summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorMaxime Ripard <maxime@cerno.tech>2020-02-17 12:34:34 +0300
committerMaxime Ripard <maxime@cerno.tech>2020-02-17 12:34:34 +0300
commit28f2aff1caa4997f58ca31179cad1b4a84a62827 (patch)
tree69fb4b0a752f3660ce022a4313f8c7b276bbcceb /drivers
parent3e8a3844fefbaad911c596f02dd48c39188ffa81 (diff)
parent11a48a5a18c63fd7621bb050228cebf13566e4d8 (diff)
downloadlinux-28f2aff1caa4997f58ca31179cad1b4a84a62827.tar.xz
Merge v5.6-rc2 into drm-misc-next
Lyude needs some patches in 5.6-rc2 and we didn't bring drm-misc-next forward yet, so it looks like a good occasion. Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/Kconfig1
-rw-r--r--drivers/acpi/acpi_apd.c6
-rw-r--r--drivers/acpi/acpi_lpit.c2
-rw-r--r--drivers/acpi/acpi_processor.c182
-rw-r--r--drivers/acpi/acpi_video.c2
-rw-r--r--drivers/acpi/acpica/acapps.h4
-rw-r--r--drivers/acpi/acpica/accommon.h2
-rw-r--r--drivers/acpi/acpica/acconvert.h2
-rw-r--r--drivers/acpi/acpica/acdebug.h2
-rw-r--r--drivers/acpi/acpica/acdispat.h2
-rw-r--r--drivers/acpi/acpica/acevents.h2
-rw-r--r--drivers/acpi/acpica/acglobal.h2
-rw-r--r--drivers/acpi/acpica/achware.h4
-rw-r--r--drivers/acpi/acpica/acinterp.h2
-rw-r--r--drivers/acpi/acpica/aclocal.h2
-rw-r--r--drivers/acpi/acpica/acmacros.h2
-rw-r--r--drivers/acpi/acpica/acnamesp.h2
-rw-r--r--drivers/acpi/acpica/acobject.h5
-rw-r--r--drivers/acpi/acpica/acopcode.h2
-rw-r--r--drivers/acpi/acpica/acparser.h2
-rw-r--r--drivers/acpi/acpica/acpredef.h2
-rw-r--r--drivers/acpi/acpica/acresrc.h2
-rw-r--r--drivers/acpi/acpica/acstruct.h2
-rw-r--r--drivers/acpi/acpica/actables.h2
-rw-r--r--drivers/acpi/acpica/acutils.h2
-rw-r--r--drivers/acpi/acpica/amlcode.h2
-rw-r--r--drivers/acpi/acpica/amlresrc.h2
-rw-r--r--drivers/acpi/acpica/dbhistry.c2
-rw-r--r--drivers/acpi/acpica/dbinput.c2
-rw-r--r--drivers/acpi/acpica/dsargs.c2
-rw-r--r--drivers/acpi/acpica/dscontrol.c2
-rw-r--r--drivers/acpi/acpica/dsdebug.c2
-rw-r--r--drivers/acpi/acpica/dsfield.c4
-rw-r--r--drivers/acpi/acpica/dsinit.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c2
-rw-r--r--drivers/acpi/acpica/dsobject.c2
-rw-r--r--drivers/acpi/acpica/dsopcode.c4
-rw-r--r--drivers/acpi/acpica/dspkginit.c2
-rw-r--r--drivers/acpi/acpica/dswexec.c2
-rw-r--r--drivers/acpi/acpica/dswload.c23
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/dswscope.c2
-rw-r--r--drivers/acpi/acpica/dswstate.c2
-rw-r--r--drivers/acpi/acpica/evevent.c2
-rw-r--r--drivers/acpi/acpica/evglock.c2
-rw-r--r--drivers/acpi/acpica/evgpe.c2
-rw-r--r--drivers/acpi/acpica/evgpeblk.c2
-rw-r--r--drivers/acpi/acpica/evgpeinit.c2
-rw-r--r--drivers/acpi/acpica/evgpeutil.c2
-rw-r--r--drivers/acpi/acpica/evhandler.c2
-rw-r--r--drivers/acpi/acpica/evmisc.c2
-rw-r--r--drivers/acpi/acpica/evregion.c2
-rw-r--r--drivers/acpi/acpica/evrgnini.c2
-rw-r--r--drivers/acpi/acpica/evxface.c2
-rw-r--r--drivers/acpi/acpica/evxfevnt.c2
-rw-r--r--drivers/acpi/acpica/evxfgpe.c34
-rw-r--r--drivers/acpi/acpica/evxfregn.c2
-rw-r--r--drivers/acpi/acpica/exconcat.c2
-rw-r--r--drivers/acpi/acpica/exconfig.c2
-rw-r--r--drivers/acpi/acpica/exconvrt.c2
-rw-r--r--drivers/acpi/acpica/excreate.c2
-rw-r--r--drivers/acpi/acpica/exdebug.c2
-rw-r--r--drivers/acpi/acpica/exdump.c2
-rw-r--r--drivers/acpi/acpica/exfield.c12
-rw-r--r--drivers/acpi/acpica/exfldio.c2
-rw-r--r--drivers/acpi/acpica/exmisc.c2
-rw-r--r--drivers/acpi/acpica/exmutex.c2
-rw-r--r--drivers/acpi/acpica/exnames.c2
-rw-r--r--drivers/acpi/acpica/exoparg1.c2
-rw-r--r--drivers/acpi/acpica/exoparg2.c2
-rw-r--r--drivers/acpi/acpica/exoparg3.c2
-rw-r--r--drivers/acpi/acpica/exoparg6.c2
-rw-r--r--drivers/acpi/acpica/exprep.c2
-rw-r--r--drivers/acpi/acpica/exregion.c2
-rw-r--r--drivers/acpi/acpica/exresnte.c2
-rw-r--r--drivers/acpi/acpica/exresolv.c2
-rw-r--r--drivers/acpi/acpica/exresop.c2
-rw-r--r--drivers/acpi/acpica/exserial.c2
-rw-r--r--drivers/acpi/acpica/exstore.c2
-rw-r--r--drivers/acpi/acpica/exstoren.c2
-rw-r--r--drivers/acpi/acpica/exstorob.c2
-rw-r--r--drivers/acpi/acpica/exsystem.c2
-rw-r--r--drivers/acpi/acpica/extrace.c2
-rw-r--r--drivers/acpi/acpica/exutils.c2
-rw-r--r--drivers/acpi/acpica/hwacpi.c2
-rw-r--r--drivers/acpi/acpica/hwesleep.c2
-rw-r--r--drivers/acpi/acpica/hwgpe.c73
-rw-r--r--drivers/acpi/acpica/hwsleep.c2
-rw-r--r--drivers/acpi/acpica/hwtimer.c2
-rw-r--r--drivers/acpi/acpica/hwvalid.c2
-rw-r--r--drivers/acpi/acpica/hwxface.c2
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c2
-rw-r--r--drivers/acpi/acpica/nsarguments.c2
-rw-r--r--drivers/acpi/acpica/nsconvert.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c2
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c2
-rw-r--r--drivers/acpi/acpica/nsinit.c2
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/acpica/nsparse.c2
-rw-r--r--drivers/acpi/acpica/nspredef.c2
-rw-r--r--drivers/acpi/acpica/nsprepkg.c2
-rw-r--r--drivers/acpi/acpica/nsrepair.c2
-rw-r--r--drivers/acpi/acpica/nsrepair2.c2
-rw-r--r--drivers/acpi/acpica/nsutils.c2
-rw-r--r--drivers/acpi/acpica/nswalk.c2
-rw-r--r--drivers/acpi/acpica/nsxfname.c2
-rw-r--r--drivers/acpi/acpica/psargs.c2
-rw-r--r--drivers/acpi/acpica/psloop.c2
-rw-r--r--drivers/acpi/acpica/psobject.c2
-rw-r--r--drivers/acpi/acpica/psopcode.c2
-rw-r--r--drivers/acpi/acpica/psopinfo.c2
-rw-r--r--drivers/acpi/acpica/psparse.c2
-rw-r--r--drivers/acpi/acpica/psscope.c2
-rw-r--r--drivers/acpi/acpica/pstree.c2
-rw-r--r--drivers/acpi/acpica/psutils.c2
-rw-r--r--drivers/acpi/acpica/pswalk.c2
-rw-r--r--drivers/acpi/acpica/psxface.c2
-rw-r--r--drivers/acpi/acpica/tbdata.c2
-rw-r--r--drivers/acpi/acpica/tbfadt.c2
-rw-r--r--drivers/acpi/acpica/tbfind.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c2
-rw-r--r--drivers/acpi/acpica/tbprint.c2
-rw-r--r--drivers/acpi/acpica/tbutils.c2
-rw-r--r--drivers/acpi/acpica/tbxface.c2
-rw-r--r--drivers/acpi/acpica/tbxfload.c2
-rw-r--r--drivers/acpi/acpica/tbxfroot.c2
-rw-r--r--drivers/acpi/acpica/utaddress.c2
-rw-r--r--drivers/acpi/acpica/utalloc.c2
-rw-r--r--drivers/acpi/acpica/utascii.c2
-rw-r--r--drivers/acpi/acpica/utbuffer.c2
-rw-r--r--drivers/acpi/acpica/utcache.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c2
-rw-r--r--drivers/acpi/acpica/utdebug.c2
-rw-r--r--drivers/acpi/acpica/utdecode.c2
-rw-r--r--drivers/acpi/acpica/uteval.c2
-rw-r--r--drivers/acpi/acpica/utglobal.c2
-rw-r--r--drivers/acpi/acpica/uthex.c2
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/utinit.c2
-rw-r--r--drivers/acpi/acpica/utlock.c2
-rw-r--r--drivers/acpi/acpica/utobject.c2
-rw-r--r--drivers/acpi/acpica/utosi.c2
-rw-r--r--drivers/acpi/acpica/utpredef.c2
-rw-r--r--drivers/acpi/acpica/utprint.c2
-rw-r--r--drivers/acpi/acpica/uttrack.c2
-rw-r--r--drivers/acpi/acpica/utuuid.c2
-rw-r--r--drivers/acpi/acpica/utxface.c2
-rw-r--r--drivers/acpi/acpica/utxfinit.c2
-rw-r--r--drivers/acpi/apei/ghes.c2
-rw-r--r--drivers/acpi/arm64/iort.c79
-rw-r--r--drivers/acpi/battery.c90
-rw-r--r--drivers/acpi/button.c11
-rw-r--r--drivers/acpi/device_pm.c1
-rw-r--r--drivers/acpi/dptf/dptf_power.c1
-rw-r--r--drivers/acpi/dptf/int340x_thermal.c4
-rw-r--r--drivers/acpi/ec.c60
-rw-r--r--drivers/acpi/fan.c97
-rw-r--r--drivers/acpi/pptt.c29
-rw-r--r--drivers/acpi/proc.c15
-rw-r--r--drivers/acpi/processor_idle.c174
-rw-r--r--drivers/acpi/scan.c2
-rw-r--r--drivers/acpi/sleep.c53
-rw-r--r--drivers/acpi/thermal.c34
-rw-r--r--drivers/acpi/video_detect.c29
-rw-r--r--drivers/android/binder.c43
-rw-r--r--drivers/ata/acard-ahci.c4
-rw-r--r--drivers/ata/ahci.c7
-rw-r--r--drivers/ata/ahci_brcm.c185
-rw-r--r--drivers/ata/libahci_platform.c6
-rw-r--r--drivers/ata/libata-core.c45
-rw-r--r--drivers/ata/libata-scsi.c9
-rw-r--r--drivers/ata/pata_arasan_cf.c8
-rw-r--r--drivers/ata/pata_macio.c2
-rw-r--r--drivers/ata/pata_octeon_cf.c6
-rw-r--r--drivers/ata/pata_pcmcia.c1
-rw-r--r--drivers/ata/pata_rb532_cf.c2
-rw-r--r--drivers/ata/sata_fsl.c2
-rw-r--r--drivers/ata/sata_mv.c2
-rw-r--r--drivers/ata/sata_nv.c2
-rw-r--r--drivers/atm/eni.c12
-rw-r--r--drivers/atm/firestream.c3
-rw-r--r--drivers/atm/fore200e.c25
-rw-r--r--drivers/base/Kconfig2
-rw-r--r--drivers/base/arch_topology.c20
-rw-r--r--drivers/base/attribute_container.c103
-rw-r--r--drivers/base/base.h19
-rw-r--r--drivers/base/bus.c1
-rw-r--r--drivers/base/class.c1
-rw-r--r--drivers/base/component.c11
-rw-r--r--drivers/base/dd.c5
-rw-r--r--drivers/base/devtmpfs.c83
-rw-r--r--drivers/base/driver.c1
-rw-r--r--drivers/base/firmware_loader/builtin/Makefile2
-rw-r--r--drivers/base/firmware_loader/fallback.c11
-rw-r--r--drivers/base/firmware_loader/firmware.h16
-rw-r--r--drivers/base/firmware_loader/main.c2
-rw-r--r--drivers/base/memory.c34
-rw-r--r--drivers/base/platform.c12
-rw-r--r--drivers/base/power/domain.c38
-rw-r--r--drivers/base/power/main.c42
-rw-r--r--drivers/base/power/qos-test.c2
-rw-r--r--drivers/base/power/runtime.c13
-rw-r--r--drivers/base/power/wakeup.c3
-rw-r--r--drivers/base/regmap/regmap-i2c.c10
-rw-r--r--drivers/base/regmap/regmap.c17
-rw-r--r--drivers/base/swnode.c154
-rw-r--r--drivers/base/test/Kconfig3
-rw-r--r--drivers/base/test/Makefile2
-rw-r--r--drivers/base/test/property-entry-test.c475
-rw-r--r--drivers/base/test/test_async_driver_probe.c3
-rw-r--r--drivers/base/transport_class.c11
-rw-r--r--drivers/bcma/driver_chipcommon_b.c2
-rw-r--r--drivers/bcma/driver_pci_host.c6
-rw-r--r--drivers/bcma/host_soc.c2
-rw-r--r--drivers/bcma/scan.c13
-rw-r--r--drivers/block/Kconfig10
-rw-r--r--drivers/block/aoe/aoeblk.c1
-rw-r--r--drivers/block/brd.c22
-rw-r--r--drivers/block/drbd/drbd_int.h2
-rw-r--r--drivers/block/drbd/drbd_nl.c3
-rw-r--r--drivers/block/drbd/drbd_receiver.c2
-rw-r--r--drivers/block/drbd/drbd_worker.c4
-rw-r--r--drivers/block/floppy.c3
-rw-r--r--drivers/block/nbd.c16
-rw-r--r--drivers/block/null_blk_main.c56
-rw-r--r--drivers/block/null_blk_zoned.c9
-rw-r--r--drivers/block/paride/pcd.c3
-rw-r--r--drivers/block/paride/pd.c1
-rw-r--r--drivers/block/paride/pf.c1
-rw-r--r--drivers/block/pktcdvd.c26
-rw-r--r--drivers/block/rbd.c35
-rw-r--r--drivers/block/sunvdc.c1
-rw-r--r--drivers/block/umem.c2
-rw-r--r--drivers/block/virtio_blk.c115
-rw-r--r--drivers/block/xen-blkback/blkback.c42
-rw-r--r--drivers/block/xen-blkback/common.h1
-rw-r--r--drivers/block/xen-blkback/xenbus.c82
-rw-r--r--drivers/block/xen-blkfront.c13
-rw-r--r--drivers/block/zram/zram_drv.c10
-rw-r--r--drivers/bluetooth/btbcm.c48
-rw-r--r--drivers/bluetooth/btbcm.h16
-rw-r--r--drivers/bluetooth/btrtl.c20
-rw-r--r--drivers/bluetooth/btsdio.c19
-rw-r--r--drivers/bluetooth/btusb.c13
-rw-r--r--drivers/bluetooth/hci_bcm.c73
-rw-r--r--drivers/bluetooth/hci_h4.c1
-rw-r--r--drivers/bluetooth/hci_h5.c3
-rw-r--r--drivers/bluetooth/hci_qca.c418
-rw-r--r--drivers/bluetooth/hci_uart.h7
-rw-r--r--drivers/bluetooth/hci_vhci.c1
-rw-r--r--drivers/bus/Kconfig1
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c6
-rw-r--r--drivers/bus/fsl-mc/mc-io.c4
-rw-r--r--drivers/bus/moxtet.c5
-rw-r--r--drivers/bus/ti-sysc.c35
-rw-r--r--drivers/cdrom/cdrom.c35
-rw-r--r--drivers/cdrom/gdrom.c3
-rw-r--r--drivers/char/agp/generic.c2
-rw-r--r--drivers/char/agp/intel-gtt.c2
-rw-r--r--drivers/char/agp/isoch.c9
-rw-r--r--drivers/char/applicom.c4
-rw-r--r--drivers/char/hpet.c4
-rw-r--r--drivers/char/hw_random/Kconfig2
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c2
-rw-r--r--drivers/char/hw_random/intel-rng.c2
-rw-r--r--drivers/char/hw_random/iproc-rng200.c1
-rw-r--r--drivers/char/hw_random/octeon-rng.c4
-rw-r--r--drivers/char/hw_random/omap-rng.c4
-rw-r--r--drivers/char/ipmi/ipmb_dev_int.c33
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c10
-rw-r--r--drivers/char/pcmcia/synclink_cs.c2
-rw-r--r--drivers/char/random.c317
-rw-r--r--drivers/char/tpm/tpm-dev-common.c10
-rw-r--r--drivers/char/tpm/tpm-dev.h2
-rw-r--r--drivers/char/tpm/tpm-sysfs.c34
-rw-r--r--drivers/char/tpm/tpm.h1
-rw-r--r--drivers/char/tpm/tpm2-cmd.c1
-rw-r--r--drivers/char/tpm/tpm_ftpm_tee.c22
-rw-r--r--drivers/char/tpm/tpm_tis_core.c3
-rw-r--r--drivers/char/ttyprintk.c15
-rw-r--r--drivers/clk/Kconfig30
-rw-r--r--drivers/clk/Makefile2
-rw-r--r--drivers/clk/at91/at91sam9260.c2
-rw-r--r--drivers/clk/at91/at91sam9rl.c2
-rw-r--r--drivers/clk/at91/at91sam9x5.c2
-rw-r--r--drivers/clk/at91/clk-sam9x60-pll.c8
-rw-r--r--drivers/clk/at91/pmc.c2
-rw-r--r--drivers/clk/at91/sam9x60.c1
-rw-r--r--drivers/clk/at91/sama5d2.c2
-rw-r--r--drivers/clk/at91/sama5d4.c2
-rw-r--r--drivers/clk/clk-asm9260.c8
-rw-r--r--drivers/clk/clk-bd718x7.c50
-rw-r--r--drivers/clk/clk-bm1880.c3
-rw-r--r--drivers/clk/clk-composite.c56
-rw-r--r--drivers/clk/clk-divider.c91
-rw-r--r--drivers/clk/clk-fixed-rate.c113
-rw-r--r--drivers/clk/clk-fsl-sai.c92
-rw-r--r--drivers/clk/clk-gate.c35
-rw-r--r--drivers/clk/clk-gpio.c172
-rw-r--r--drivers/clk/clk-mux.c58
-rw-r--r--drivers/clk/clk-plldig.c288
-rw-r--r--drivers/clk/clk-qoriq.c29
-rw-r--r--drivers/clk/clk-scmi.c2
-rw-r--r--drivers/clk/clk.c182
-rw-r--r--drivers/clk/imx/Kconfig6
-rw-r--r--drivers/clk/imx/Makefile3
-rw-r--r--drivers/clk/imx/clk-composite-7ulp.c2
-rw-r--r--drivers/clk/imx/clk-composite-8m.c6
-rw-r--r--drivers/clk/imx/clk-divider-gate.c12
-rw-r--r--drivers/clk/imx/clk-frac-pll.c7
-rw-r--r--drivers/clk/imx/clk-imx6q.c5
-rw-r--r--drivers/clk/imx/clk-imx7ulp.c183
-rw-r--r--drivers/clk/imx/clk-imx8mm.c565
-rw-r--r--drivers/clk/imx/clk-imx8mn.c498
-rw-r--r--drivers/clk/imx/clk-imx8mp.c764
-rw-r--r--drivers/clk/imx/clk-imx8mq.c584
-rw-r--r--drivers/clk/imx/clk-imx8qxp-lpcg.c11
-rw-r--r--drivers/clk/imx/clk-pfdv2.c2
-rw-r--r--drivers/clk/imx/clk-pll14xx.c31
-rw-r--r--drivers/clk/imx/clk-pllv1.c14
-rw-r--r--drivers/clk/imx/clk-pllv2.c14
-rw-r--r--drivers/clk/imx/clk-pllv4.c2
-rw-r--r--drivers/clk/imx/clk-sscg-pll.c (renamed from drivers/clk/imx/clk-sccg-pll.c)152
-rw-r--r--drivers/clk/imx/clk.c12
-rw-r--r--drivers/clk/imx/clk.h162
-rw-r--r--drivers/clk/mediatek/Kconfig44
-rw-r--r--drivers/clk/meson/Makefile2
-rw-r--r--drivers/clk/meson/clk-mpll.c4
-rw-r--r--drivers/clk/meson/clk-phase.c4
-rw-r--r--drivers/clk/meson/clk-pll.c13
-rw-r--r--drivers/clk/meson/g12a.c1
-rw-r--r--drivers/clk/meson/meson8-ddr.c149
-rw-r--r--drivers/clk/meson/meson8b.c124
-rw-r--r--drivers/clk/meson/sclk-div.c4
-rw-r--r--drivers/clk/microchip/clk-core.c8
-rw-r--r--drivers/clk/mmp/clk-frac.c4
-rw-r--r--drivers/clk/mmp/clk-mix.c4
-rw-r--r--drivers/clk/mmp/clk-of-mmp2.c8
-rw-r--r--drivers/clk/mvebu/Kconfig2
-rw-r--r--drivers/clk/qcom/Kconfig47
-rw-r--r--drivers/clk/qcom/Makefile5
-rw-r--r--drivers/clk/qcom/apcs-msm8916.c13
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c91
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h5
-rw-r--r--drivers/clk/qcom/clk-hfpll.c6
-rw-r--r--drivers/clk/qcom/clk-rcg.h1
-rw-r--r--drivers/clk/qcom/clk-rcg2.c88
-rw-r--r--drivers/clk/qcom/clk-rpmh.c10
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c5
-rw-r--r--drivers/clk/qcom/dispcc-sc7180.c761
-rw-r--r--drivers/clk/qcom/dispcc-sdm845.c214
-rw-r--r--drivers/clk/qcom/gcc-ipq6018.c4635
-rw-r--r--drivers/clk/qcom/gcc-msm8996.c35
-rw-r--r--drivers/clk/qcom/gcc-msm8998.c14
-rw-r--r--drivers/clk/qcom/gcc-qcs404.c2
-rw-r--r--drivers/clk/qcom/gcc-sc7180.c6
-rw-r--r--drivers/clk/qcom/gcc-sdm845.c7
-rw-r--r--drivers/clk/qcom/gpucc-msm8998.c2
-rw-r--r--drivers/clk/qcom/gpucc-sc7180.c264
-rw-r--r--drivers/clk/qcom/hfpll.c21
-rw-r--r--drivers/clk/qcom/mmcc-msm8974.c13
-rw-r--r--drivers/clk/qcom/mmcc-msm8998.c2913
-rw-r--r--drivers/clk/qcom/videocc-sc7180.c257
-rw-r--r--drivers/clk/renesas/Kconfig4
-rw-r--r--drivers/clk/renesas/clk-rz.c4
-rw-r--r--drivers/clk/renesas/r7s9210-cpg-mssr.c1
-rw-r--r--drivers/clk/renesas/rcar-gen2-cpg.h8
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c6
-rw-r--r--drivers/clk/rockchip/clk-pll.c28
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c8
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.c28
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.h1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c16
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.h4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a23-a33.h4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r.c21
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r40.c6
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r40.h4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.h2
-rw-r--r--drivers/clk/sunxi/clk-sun6i-apb0-gates.c6
-rw-r--r--drivers/clk/tegra/clk-dfll.c3
-rw-r--r--drivers/clk/tegra/clk-divider.c9
-rw-r--r--drivers/clk/tegra/clk-tegra-periph.c6
-rw-r--r--drivers/clk/tegra/clk-tegra20.c4
-rw-r--r--drivers/clk/tegra/clk-tegra30.c4
-rw-r--r--drivers/clk/tegra/clk.c4
-rw-r--r--drivers/clk/ti/clk-44xx.c13
-rw-r--r--drivers/clk/ti/clk-54xx.c28
-rw-r--r--drivers/clk/ti/clk-7xx.c62
-rw-r--r--drivers/clk/ti/clk-dra7-atl.c1
-rw-r--r--drivers/clk/ti/clk.c4
-rw-r--r--drivers/clk/ti/clkctrl.c96
-rw-r--r--drivers/clk/ti/clock.h2
-rw-r--r--drivers/clk/ti/clockdomain.c8
-rw-r--r--drivers/clk/uniphier/clk-uniphier-peri.c13
-rw-r--r--drivers/clk/ux500/u8500_of_clk.c2
-rw-r--r--drivers/clk/versatile/Kconfig2
-rw-r--r--drivers/clk/zynqmp/clkc.c3
-rw-r--r--drivers/clk/zynqmp/divider.c118
-rw-r--r--drivers/clk/zynqmp/pll.c6
-rw-r--r--drivers/clocksource/Kconfig76
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/bcm2835_timer.c5
-rw-r--r--drivers/clocksource/em_sti.c7
-rw-r--r--drivers/clocksource/exynos_mct.c2
-rw-r--r--drivers/clocksource/hyperv_timer.c84
-rw-r--r--drivers/clocksource/sh_cmt.c2
-rw-r--r--drivers/clocksource/sh_mtu2.c2
-rw-r--r--drivers/clocksource/sh_tmu.c2
-rw-r--r--drivers/clocksource/timer-cadence-ttc.c26
-rw-r--r--drivers/clocksource/timer-davinci.c8
-rw-r--r--drivers/clocksource/timer-microchip-pit64b.c451
-rw-r--r--drivers/clocksource/timer-riscv.c2
-rw-r--r--drivers/clocksource/timer-ti-dm.c20
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c2
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c10
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c3
-rw-r--r--drivers/cpufreq/cpufreq-nforce2.c2
-rw-r--r--drivers/cpufreq/cpufreq.c152
-rw-r--r--drivers/cpufreq/freq_table.c4
-rw-r--r--drivers/cpufreq/gx-suspmod.c2
-rw-r--r--drivers/cpufreq/imx-cpufreq-dt.c6
-rw-r--r--drivers/cpufreq/intel_pstate.c40
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c4
-rw-r--r--drivers/cpufreq/longrun.c6
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c8
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c4
-rw-r--r--drivers/cpufreq/s3c2416-cpufreq.c12
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c11
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c2
-rw-r--r--drivers/cpufreq/sh-cpufreq.c2
-rw-r--r--drivers/cpufreq/tegra186-cpufreq.c4
-rw-r--r--drivers/cpufreq/unicore2-cpufreq.c2
-rw-r--r--drivers/cpufreq/vexpress-spc-cpufreq.c2
-rw-r--r--drivers/cpuidle/Kconfig.arm12
-rw-r--r--drivers/cpuidle/Makefile4
-rw-r--r--drivers/cpuidle/coupled.c9
-rw-r--r--drivers/cpuidle/cpuidle-clps711x.c5
-rw-r--r--drivers/cpuidle/cpuidle-kirkwood.c5
-rw-r--r--drivers/cpuidle/cpuidle-psci-domain.c308
-rw-r--r--drivers/cpuidle/cpuidle-psci.c161
-rw-r--r--drivers/cpuidle/cpuidle-psci.h17
-rw-r--r--drivers/cpuidle/cpuidle.c9
-rw-r--r--drivers/cpuidle/driver.c46
-rw-r--r--drivers/cpuidle/dt_idle_states.c5
-rw-r--r--drivers/cpuidle/governors/teo.c2
-rw-r--r--drivers/cpuidle/sysfs.c16
-rw-r--r--drivers/crypto/Kconfig89
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c1
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c24
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c5
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h9
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c6
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c6
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h8
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c4
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c31
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c37
-rw-r--r--drivers/crypto/amlogic/Kconfig1
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-cipher.c1
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-core.c4
-rw-r--r--drivers/crypto/atmel-aes.c359
-rw-r--r--drivers/crypto/atmel-authenc.h3
-rw-r--r--drivers/crypto/atmel-sha.c473
-rw-r--r--drivers/crypto/atmel-tdes.c375
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c10
-rw-r--r--drivers/crypto/bcm/cipher.c17
-rw-r--r--drivers/crypto/caam/Kconfig14
-rw-r--r--drivers/crypto/caam/caamalg.c33
-rw-r--r--drivers/crypto/caam/caamalg_qi.c44
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c206
-rw-r--r--drivers/crypto/caam/caamhash.c167
-rw-r--r--drivers/crypto/caam/ctrl.c15
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_algs.c2
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_aead.c4
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_skcipher.c12
-rw-r--r--drivers/crypto/ccp/Makefile4
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c1
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-galois.c1
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c1
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c4
-rw-r--r--drivers/crypto/ccp/ccp-dev-v3.c1
-rw-r--r--drivers/crypto/ccp/psp-dev.c1042
-rw-r--r--drivers/crypto/ccp/psp-dev.h51
-rw-r--r--drivers/crypto/ccp/sev-dev.c1077
-rw-r--r--drivers/crypto/ccp/sev-dev.h63
-rw-r--r--drivers/crypto/ccp/sp-dev.h17
-rw-r--r--drivers/crypto/ccp/sp-pci.c43
-rw-r--r--drivers/crypto/ccp/tee-dev.c375
-rw-r--r--drivers/crypto/ccp/tee-dev.h110
-rw-r--r--drivers/crypto/ccree/cc_aead.c43
-rw-r--r--drivers/crypto/ccree/cc_cipher.c58
-rw-r--r--drivers/crypto/ccree/cc_driver.c24
-rw-r--r--drivers/crypto/ccree/cc_driver.h6
-rw-r--r--drivers/crypto/ccree/cc_fips.c2
-rw-r--r--drivers/crypto/ccree/cc_hash.c8
-rw-r--r--drivers/crypto/ccree/cc_pm.c39
-rw-r--r--drivers/crypto/ccree/cc_pm.h17
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.c103
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.h8
-rw-r--r--drivers/crypto/chelsio/Kconfig30
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c53
-rw-r--r--drivers/crypto/chelsio/chcr_core.c10
-rw-r--r--drivers/crypto/chelsio/chtls/chtls.h7
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.c59
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.h21
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_hw.c65
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_main.c28
-rw-r--r--drivers/crypto/geode-aes.c24
-rw-r--r--drivers/crypto/hifn_795x.c2
-rw-r--r--drivers/crypto/hisilicon/Kconfig11
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c141
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c60
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h53
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c963
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.h22
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c19
-rw-r--r--drivers/crypto/hisilicon/sgl.c17
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h4
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c92
-rw-r--r--drivers/crypto/img-hash.c6
-rw-r--r--drivers/crypto/inside-secure/safexcel.c12
-rw-r--r--drivers/crypto/inside-secure/safexcel.h34
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c600
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c36
-rw-r--r--drivers/crypto/inside-secure/safexcel_ring.c130
-rw-r--r--drivers/crypto/ixp4xx_crypto.c31
-rw-r--r--drivers/crypto/marvell/cipher.c4
-rw-r--r--drivers/crypto/mediatek/mtk-aes.c4
-rw-r--r--drivers/crypto/mxs-dcp.c12
-rw-r--r--drivers/crypto/n2_core.c1
-rw-r--r--drivers/crypto/omap-aes-gcm.c223
-rw-r--r--drivers/crypto/omap-aes.c142
-rw-r--r--drivers/crypto/omap-aes.h12
-rw-r--r--drivers/crypto/omap-crypto.c37
-rw-r--r--drivers/crypto/omap-des.c13
-rw-r--r--drivers/crypto/omap-sham.c191
-rw-r--r--drivers/crypto/padlock-aes.c9
-rw-r--r--drivers/crypto/padlock-sha.c26
-rw-r--r--drivers/crypto/picoxcell_crypto.c30
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c6
-rw-r--r--drivers/crypto/qce/Makefile7
-rw-r--r--drivers/crypto/qce/common.c244
-rw-r--r--drivers/crypto/qce/core.c4
-rw-r--r--drivers/crypto/qce/dma.c6
-rw-r--r--drivers/crypto/qce/dma.h3
-rw-r--r--drivers/crypto/qce/sha.c2
-rw-r--r--drivers/crypto/qce/skcipher.c41
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_skcipher.c4
-rw-r--r--drivers/crypto/sahara.c9
-rw-r--r--drivers/crypto/stm32/Kconfig6
-rw-r--r--drivers/crypto/stm32/stm32-crc32.c4
-rw-r--r--drivers/crypto/stm32/stm32-hash.c6
-rw-r--r--drivers/crypto/talitos.c15
-rw-r--r--drivers/crypto/ux500/Kconfig16
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c2
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c8
-rw-r--r--drivers/crypto/vmx/aes_xts.c3
-rw-r--r--drivers/dax/super.c2
-rw-r--r--drivers/devfreq/Kconfig26
-rw-r--r--drivers/devfreq/Makefile1
-rw-r--r--drivers/devfreq/devfreq-event.c4
-rw-r--r--drivers/devfreq/devfreq.c166
-rw-r--r--drivers/devfreq/event/Kconfig6
-rw-r--r--drivers/devfreq/event/exynos-nocp.c2
-rw-r--r--drivers/devfreq/event/exynos-nocp.h2
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c15
-rw-r--r--drivers/devfreq/event/exynos-ppmu.h2
-rw-r--r--drivers/devfreq/event/rockchip-dfi.c5
-rw-r--r--drivers/devfreq/exynos-bus.c155
-rw-r--r--drivers/devfreq/imx8m-ddrc.c471
-rw-r--r--drivers/devfreq/rk3399_dmc.c19
-rw-r--r--drivers/dma/Kconfig30
-rw-r--r--drivers/dma/Makefile3
-rw-r--r--drivers/dma/altera-msgdma.c4
-rw-r--r--drivers/dma/bcm2835-dma.c5
-rw-r--r--drivers/dma/dma-axi-dmac.c10
-rw-r--r--drivers/dma/dma-jz4780.c10
-rw-r--r--drivers/dma/dmaengine.c631
-rw-r--r--drivers/dma/dmaengine.h11
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c8
-rw-r--r--drivers/dma/fsl-edma-common.c5
-rw-r--r--drivers/dma/fsl-edma-common.h1
-rw-r--r--drivers/dma/fsl-edma.c8
-rw-r--r--drivers/dma/fsl-qdma.c2
-rw-r--r--drivers/dma/hisi_dma.c611
-rw-r--r--drivers/dma/idxd/Makefile2
-rw-r--r--drivers/dma/idxd/cdev.c302
-rw-r--r--drivers/dma/idxd/device.c693
-rw-r--r--drivers/dma/idxd/dma.c217
-rw-r--r--drivers/dma/idxd/idxd.h316
-rw-r--r--drivers/dma/idxd/init.c533
-rw-r--r--drivers/dma/idxd/irq.c261
-rw-r--r--drivers/dma/idxd/registers.h336
-rw-r--r--drivers/dma/idxd/submit.c95
-rw-r--r--drivers/dma/idxd/sysfs.c1528
-rw-r--r--drivers/dma/imx-sdma.c37
-rw-r--r--drivers/dma/ioat/dma.c3
-rw-r--r--drivers/dma/ioat/init.c38
-rw-r--r--drivers/dma/k3dma.c12
-rw-r--r--drivers/dma/mediatek/mtk-uart-apdma.c3
-rw-r--r--drivers/dma/mv_xor_v2.c2
-rw-r--r--drivers/dma/of-dma.c2
-rw-r--r--drivers/dma/owl-dma.c3
-rw-r--r--drivers/dma/pl330.c16
-rw-r--r--drivers/dma/plx_dma.c639
-rw-r--r--drivers/dma/s3c24xx-dma.c24
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.c4
-rw-r--r--drivers/dma/sun4i-dma.c48
-rw-r--r--drivers/dma/ti/Kconfig24
-rw-r--r--drivers/dma/ti/Makefile3
-rw-r--r--drivers/dma/ti/edma.c39
-rw-r--r--drivers/dma/ti/k3-psil-am654.c175
-rw-r--r--drivers/dma/ti/k3-psil-j721e.c222
-rw-r--r--drivers/dma/ti/k3-psil-priv.h43
-rw-r--r--drivers/dma/ti/k3-psil.c90
-rw-r--r--drivers/dma/ti/k3-udma-glue.c1198
-rw-r--r--drivers/dma/ti/k3-udma-private.c133
-rw-r--r--drivers/dma/ti/k3-udma.c3432
-rw-r--r--drivers/dma/ti/k3-udma.h151
-rw-r--r--drivers/dma/ti/omap-dma.c288
-rw-r--r--drivers/dma/virt-dma.c13
-rw-r--r--drivers/dma/virt-dma.h27
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c24
-rw-r--r--drivers/edac/Kconfig5
-rw-r--r--drivers/edac/amd64_edac.c65
-rw-r--r--drivers/edac/amd64_edac.h3
-rw-r--r--drivers/edac/aspeed_edac.c4
-rw-r--r--drivers/edac/edac_mc.c12
-rw-r--r--drivers/edac/edac_mc_sysfs.c18
-rw-r--r--drivers/edac/i3000_edac.c2
-rw-r--r--drivers/edac/i3200_edac.c2
-rw-r--r--drivers/edac/i5100_edac.c7
-rw-r--r--drivers/edac/i82975x_edac.c2
-rw-r--r--drivers/edac/ie31200_edac.c2
-rw-r--r--drivers/edac/mce_amd.c105
-rw-r--r--drivers/edac/sifive_edac.c6
-rw-r--r--drivers/edac/skx_common.c2
-rw-r--r--drivers/edac/x38_edac.c2
-rw-r--r--drivers/extcon/extcon-arizona.c354
-rw-r--r--drivers/extcon/extcon-sm5502.c10
-rw-r--r--drivers/firewire/nosy.c2
-rw-r--r--drivers/firmware/Kconfig8
-rw-r--r--drivers/firmware/Makefile5
-rw-r--r--drivers/firmware/arm_scmi/bus.c29
-rw-r--r--drivers/firmware/arm_scmi/clock.c2
-rw-r--r--drivers/firmware/arm_scmi/common.h2
-rw-r--r--drivers/firmware/arm_scmi/driver.c110
-rw-r--r--drivers/firmware/arm_scmi/perf.c2
-rw-r--r--drivers/firmware/arm_scmi/power.c2
-rw-r--r--drivers/firmware/arm_scmi/reset.c2
-rw-r--r--drivers/firmware/arm_scmi/scmi_pm_domain.c2
-rw-r--r--drivers/firmware/arm_scmi/sensors.c2
-rw-r--r--drivers/firmware/broadcom/bcm47xx_nvram.c2
-rw-r--r--drivers/firmware/broadcom/tee_bnxt_fw.c1
-rw-r--r--drivers/firmware/efi/Kconfig22
-rw-r--r--drivers/firmware/efi/arm-init.c107
-rw-r--r--drivers/firmware/efi/arm-runtime.c2
-rw-r--r--drivers/firmware/efi/capsule-loader.c1
-rw-r--r--drivers/firmware/efi/earlycon.c48
-rw-r--r--drivers/firmware/efi/efi.c30
-rw-r--r--drivers/firmware/efi/fake_mem.c43
-rw-r--r--drivers/firmware/efi/libstub/Makefile2
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c110
-rw-r--r--drivers/firmware/efi/libstub/arm32-stub.c70
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c32
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c290
-rw-r--r--drivers/firmware/efi/libstub/efistub.h48
-rw-r--r--drivers/firmware/efi/libstub/fdt.c53
-rw-r--r--drivers/firmware/efi/libstub/gop.c215
-rw-r--r--drivers/firmware/efi/libstub/pci.c114
-rw-r--r--drivers/firmware/efi/libstub/random.c79
-rw-r--r--drivers/firmware/efi/libstub/secureboot.c11
-rw-r--r--drivers/firmware/efi/libstub/tpm.c48
-rw-r--r--drivers/firmware/efi/memmap.c95
-rw-r--r--drivers/firmware/efi/rci2-table.c3
-rw-r--r--drivers/firmware/google/coreboot_table.c7
-rw-r--r--drivers/firmware/google/gsmi.c25
-rw-r--r--drivers/firmware/imx/Kconfig2
-rw-r--r--drivers/firmware/iscsi_ibft.c9
-rw-r--r--drivers/firmware/psci/psci.c18
-rw-r--r--drivers/firmware/qcom_scm-32.c671
-rw-r--r--drivers/firmware/qcom_scm-64.c579
-rw-r--r--drivers/firmware/qcom_scm-legacy.c242
-rw-r--r--drivers/firmware/qcom_scm-smc.c151
-rw-r--r--drivers/firmware/qcom_scm.c854
-rw-r--r--drivers/firmware/qcom_scm.h178
-rw-r--r--drivers/firmware/stratix10-svc.c4
-rw-r--r--drivers/firmware/turris-mox-rwtm.c2
-rw-r--r--drivers/firmware/xilinx/zynqmp.c45
-rw-r--r--drivers/fpga/dfl-afu-main.c2
-rw-r--r--drivers/fpga/dfl-fme-main.c2
-rw-r--r--drivers/fpga/ts73xx-fpga.c4
-rw-r--r--drivers/fpga/xilinx-pr-decoupler.c3
-rw-r--r--drivers/gpio/Kconfig47
-rw-r--r--drivers/gpio/Makefile5
-rw-r--r--drivers/gpio/TODO46
-rw-r--r--drivers/gpio/gpio-altera.c2
-rw-r--r--drivers/gpio/gpio-aspeed-sgpio.c4
-rw-r--r--drivers/gpio/gpio-aspeed.c2
-rw-r--r--drivers/gpio/gpio-bcm-kona.c12
-rw-r--r--drivers/gpio/gpio-bd71828.c149
-rw-r--r--drivers/gpio/gpio-creg-snps.c4
-rw-r--r--drivers/gpio/gpio-grgpio.c15
-rw-r--r--drivers/gpio/gpio-logicvc.c170
-rw-r--r--drivers/gpio/gpio-lynxpoint.c471
-rw-r--r--drivers/gpio/gpio-mockup.c27
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c2
-rw-r--r--drivers/gpio/gpio-mt7621.c3
-rw-r--r--drivers/gpio/gpio-mvebu.c8
-rw-r--r--drivers/gpio/gpio-pca953x.c31
-rw-r--r--drivers/gpio/gpio-sama5d2-piobu.c1
-rw-r--r--drivers/gpio/gpio-sifive.c252
-rw-r--r--drivers/gpio/gpio-tb10x.c1
-rw-r--r--drivers/gpio/gpio-tegra.c21
-rw-r--r--drivers/gpio/gpio-tegra186.c13
-rw-r--r--drivers/gpio/gpio-thunderx.c36
-rw-r--r--drivers/gpio/gpio-vx855.c2
-rw-r--r--drivers/gpio/gpio-wcd934x.c121
-rw-r--r--drivers/gpio/gpio-xgs-iproc.c5
-rw-r--r--drivers/gpio/gpio-xilinx.c5
-rw-r--r--drivers/gpio/gpio-xtensa.c7
-rw-r--r--drivers/gpio/gpio-zynq.c8
-rw-r--r--drivers/gpio/gpiolib-acpi.c51
-rw-r--r--drivers/gpio/gpiolib-devres.c2
-rw-r--r--drivers/gpio/gpiolib-of.c56
-rw-r--r--drivers/gpio/gpiolib-sysfs.c7
-rw-r--r--drivers/gpio/gpiolib.c227
-rw-r--r--drivers/gpio/gpiolib.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c149
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c189
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c326
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c139
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c211
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_df.h65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c211
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c104
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c894
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c330
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h92
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c92
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v2_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v1_7.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v3_6.c244
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c88
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c566
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c978
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h (renamed from drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gp102.c)29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c159
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c795
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c195
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c139
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_1.c196
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c124
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c113
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c662
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c30
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c20
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c51
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h2
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c497
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c19
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c32
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c19
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c420
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c74
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/Makefile9
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c207
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c92
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c146
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c149
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c59
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dsc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c73
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c220
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h (renamed from drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_state.h)60
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c133
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/Makefile8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h64
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c155
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c54
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h180
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c47
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c92
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/Makefile8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c358
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c84
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c188
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c195
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c191
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h132
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/Makefile8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calc_math.h (renamed from drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h27
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/reg_helper.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h32
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h78
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h15
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_meta.h63
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h29
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h43
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c107
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h123
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c107
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h13
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h10
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c133
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h16
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c313
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.h4
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c37
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c8
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c24
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c17
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c7
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h1
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c7
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_offset.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_sh_mask.h9
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h5
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_sh_mask.h8
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h22
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h16
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_offset.h647
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_sh_mask.h3912
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_1_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_2_1_0_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_1_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_2_1_0_sh_mask.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h8
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h12
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_offset.h264
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h748
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h128
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_offset.h33
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_sh_mask.h91
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h14
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c355
-rw-r--r--drivers/gpu/drm/amd/powerplay/arcturus_ppt.c150
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c15
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c23
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h18
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h14
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h7
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_types.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h7
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h46
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c297
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.h14
-rw-r--r--drivers/gpu/drm/amd/powerplay/renoir_ppt.c53
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_internal.h9
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c131
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v12_0.c32
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c17
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c22
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c41
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c2
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c72
-rw-r--r--drivers/gpu/drm/drm_dp_aux_dev.c12
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c45
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c579
-rw-r--r--drivers/gpu/drm/drm_edid.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c11
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c18
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h11
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c5
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h5
-rw-r--r--drivers/gpu/drm/exynos/Kconfig6
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c8
-rw-r--r--drivers/gpu/drm/gma500/gtt.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c2
-rw-r--r--drivers/gpu/drm/i915/.gitignore1
-rw-r--r--drivers/gpu/drm/i915/Makefile38
-rw-r--r--drivers/gpu/drm/i915/display/Makefile6
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c63
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c34
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c26
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c218
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c1066
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h32
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c114
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c207
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c243
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c65
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c55
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c4
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c67
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c12
-rw-r--r--drivers/gpu/drm/i915/gem/Makefile5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_busy.c12
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c67
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c39
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c40
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.h8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c227
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h13
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c79
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c9
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h6
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c43
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c12
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c428
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.c3
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h2
-rw-r--r--drivers/gpu/drm/i915/gt/Makefile5
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.c483
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.h76
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c724
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.h13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c184
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c25
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c30
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_user.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c1486
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c43
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.c22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c598
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h587
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c619
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c18
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ppgtt.c218
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c37
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c52
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c2
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c24
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c306
-rw-r--r--drivers/gpu/drm/i915/gt/uc/Makefile5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c75
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.h36
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c64
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c24
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c23
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h15
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c4
-rw-r--r--drivers/gpu/drm/i915/i915_active.c38
-rw-r--r--drivers/gpu/drm/i915/i915_active.h6
-rw-r--r--drivers/gpu/drm/i915/i915_buddy.c4
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c78
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c11
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h32
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c38
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c3560
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h630
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c1212
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h332
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c18
-rw-r--r--drivers/gpu/drm/i915/i915_mm.c69
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c230
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c15
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c23
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h29
-rw-r--r--drivers/gpu/drm/i915/i915_request.c7
-rw-r--r--drivers/gpu/drm/i915/i915_request.h105
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c22
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c6
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c32
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h140
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h294
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c10
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c29
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.h14
-rw-r--r--drivers/gpu/drm/i915/intel_pch.c46
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c411
-rw-r--r--drivers/gpu/drm/i915/intel_region_lmem.c12
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c29
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c25
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.c5
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h9
-rw-r--r--drivers/gpu/drm/i915/oa/Makefile7
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c78
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h8
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h8
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf_selftests.h8
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_atomic.c47
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_atomic.h41
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_live_test.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c33
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c3
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c9
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c5
-rw-r--r--drivers/gpu/drm/mediatek/Makefile2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_color.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c76
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c43
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c204
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c184
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h56
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c86
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.h7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c47
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c67
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c8
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c8
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c11
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx.xml.h52
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c32
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h3
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c81
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h9
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c52
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h16
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c11
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c66
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h17
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c15
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c186
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c73
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c73
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c241
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h38
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c92
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h26
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c36
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c27
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c34
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c6
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c24
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c46
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c62
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c14
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h3
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h7
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/arb.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c13
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base907c.c11
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.h6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec37d.c23
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec57d.c9
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c244
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.h2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c43
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.h10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head507d.c9
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head827d.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head907d.c11
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head917d.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc37d.c11
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc57d.c12
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/lut.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c17
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.h3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c11
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c11
-rw-r--r--drivers/gpu/drm/nouveau/include/nvfw/acr.h152
-rw-r--r--drivers/gpu/drm/nouveau/include/nvfw/flcn.h97
-rw-r--r--drivers/gpu/drm/nouveau/include/nvfw/fw.h28
-rw-r--r--drivers/gpu/drm/nouveau/include/nvfw/hs.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvfw/ls.h53
-rw-r--r--drivers/gpu/drm/nouveau/include/nvfw/pmu.h98
-rw-r--r--drivers/gpu/drm/nouveau/include/nvfw/sec2.h60
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0008.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/mmu.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h77
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h51
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/memory.h16
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/os.h13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h20
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h8
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h126
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vmm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvif/mmu.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/firmware.c67
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/memory.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/subdev.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c108
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxtu102.c95
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h786
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h786
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c311
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h90
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c130
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c160
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c98
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c97
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c177
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c42
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c (renamed from drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h)54
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c63
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c63
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c109
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c312
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h)37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/base.c87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c214
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c213
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c577
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h213
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c436
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c264
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/priv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h89
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/v1.c86
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/nvfw/Kbuild7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/nvfw/acr.c165
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/nvfw/flcn.c115
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/nvfw/fw.c51
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/nvfw/hs.c62
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/nvfw/ls.c108
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/Kbuild10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c411
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c470
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c134
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c281
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c111
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c57
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c180
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c253
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h151
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c215
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c97
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c59
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c65
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c216
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c101
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c54
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c1241
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h167
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c229
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h71
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r364.c117
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c418
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c168
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h50
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c94
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c213
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c262
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c148
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c264
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c88
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c95
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.c97
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.h81
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h161
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c160
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c177
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h65
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c92
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c124
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.h47
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c6
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c26
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.h1
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c61
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.h6
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.c34
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c9
-rw-r--r--drivers/gpu/drm/radeon/atombios_i2c.c5
-rw-r--r--drivers/gpu/drm/radeon/cik.c4
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c2
-rw-r--r--drivers/gpu/drm/radeon/ni.c4
-rw-r--r--drivers/gpu/drm/radeon/r100.c10
-rw-r--r--drivers/gpu/drm/radeon/r600.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c16
-rw-r--r--drivers/gpu/drm/radeon/rv770.c2
-rw-r--r--drivers/gpu/drm/radeon/si.c4
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.h2
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c89
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c6
-rw-r--r--drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c12
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c4
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c2
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c2
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c15
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h1
-rw-r--r--drivers/gpu/drm/tegra/dc.c147
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c2
-rw-r--r--drivers/gpu/drm/tegra/drm.c53
-rw-r--r--drivers/gpu/drm/tegra/drm.h2
-rw-r--r--drivers/gpu/drm/tegra/dsi.c177
-rw-r--r--drivers/gpu/drm/tegra/gem.c10
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c4
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c4
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c118
-rw-r--r--drivers/gpu/drm/tegra/hub.c198
-rw-r--r--drivers/gpu/drm/tegra/hub.h2
-rw-r--r--drivers/gpu/drm/tegra/output.c16
-rw-r--r--drivers/gpu/drm/tegra/plane.c44
-rw-r--r--drivers/gpu/drm/tegra/sor.c210
-rw-r--r--drivers/gpu/drm/tegra/vic.c8
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c22
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c8
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c9
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c76
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c90
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c4
-rw-r--r--drivers/gpu/host1x/bus.c79
-rw-r--r--drivers/gpu/host1x/dev.c4
-rw-r--r--drivers/gpu/host1x/job.c34
-rw-r--r--drivers/gpu/host1x/syncpt.c2
-rw-r--r--drivers/hid/hid-asus.c3
-rw-r--r--drivers/hid/hid-core.c6
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-input.c16
-rw-r--r--drivers/hid/hid-ite.c3
-rw-r--r--drivers/hid/hid-logitech-hidpp.c247
-rw-r--r--drivers/hid/hid-multitouch.c5
-rw-r--r--drivers/hid/hid-quirks.c1
-rw-r--r--drivers/hid/hid-steam.c4
-rw-r--r--drivers/hid/hidraw.c16
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c16
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h2
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c2
-rw-r--r--drivers/hid/uhid.c5
-rw-r--r--drivers/hid/usbhid/hiddev.c97
-rw-r--r--drivers/hid/wacom_wac.c6
-rw-r--r--drivers/hv/channel_mgmt.c21
-rw-r--r--drivers/hv/hv_balloon.c13
-rw-r--r--drivers/hv/hv_fcopy.c54
-rw-r--r--drivers/hv/hv_kvp.c43
-rw-r--r--drivers/hv/hv_snapshot.c55
-rw-r--r--drivers/hv/hv_util.c156
-rw-r--r--drivers/hv/hyperv_vmbus.h6
-rw-r--r--drivers/hv/vmbus_drv.c4
-rw-r--r--drivers/hwmon/Kconfig37
-rw-r--r--drivers/hwmon/Makefile3
-rw-r--r--drivers/hwmon/adm1177.c288
-rw-r--r--drivers/hwmon/adt7475.c5
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c15
-rw-r--r--drivers/hwmon/drivetemp.c574
-rw-r--r--drivers/hwmon/hwmon.c85
-rw-r--r--drivers/hwmon/i5k_amb.c2
-rw-r--r--drivers/hwmon/k10temp.c489
-rw-r--r--drivers/hwmon/max31730.c440
-rw-r--r--drivers/hwmon/nct7802.c75
-rw-r--r--drivers/hwmon/pmbus/Kconfig32
-rw-r--r--drivers/hwmon/pmbus/Makefile2
-rw-r--r--drivers/hwmon/pmbus/ibm-cffps.c89
-rw-r--r--drivers/hwmon/pmbus/ltc2978.c4
-rw-r--r--drivers/hwmon/pmbus/max20730.c372
-rw-r--r--drivers/hwmon/pmbus/max20751.c2
-rw-r--r--drivers/hwmon/pmbus/pmbus.c6
-rw-r--r--drivers/hwmon/pmbus/pmbus.h15
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c22
-rw-r--r--drivers/hwmon/pmbus/pxe1610.c44
-rw-r--r--drivers/hwmon/pmbus/tps53679.c46
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c39
-rw-r--r--drivers/hwmon/pmbus/xdpe12284.c117
-rw-r--r--drivers/hwmon/pwm-fan.c15
-rw-r--r--drivers/hwmon/scmi-hwmon.c2
-rw-r--r--drivers/hwmon/w83627ehf.c1969
-rw-r--r--drivers/hwspinlock/omap_hwspinlock.c32
-rw-r--r--drivers/hwspinlock/qcom_hwspinlock.c28
-rw-r--r--drivers/hwspinlock/sirf_hwspinlock.c46
-rw-r--r--drivers/hwspinlock/stm32_hwspinlock.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c13
-rw-r--r--drivers/hwtracing/intel_th/core.c7
-rw-r--r--drivers/hwtracing/intel_th/intel_th.h2
-rw-r--r--drivers/hwtracing/intel_th/msu.c14
-rw-r--r--drivers/hwtracing/intel_th/pci.c10
-rw-r--r--drivers/i2c/busses/Kconfig107
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-at91-core.c41
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c17
-rw-r--r--drivers/i2c/busses/i2c-cadence.c32
-rw-r--r--drivers/i2c/busses/i2c-cht-wc.c6
-rw-r--r--drivers/i2c/busses/i2c-cros-ec-tunnel.c3
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c1
-rw-r--r--drivers/i2c/busses/i2c-highlander.c4
-rw-r--r--drivers/i2c/busses/i2c-i801.c10
-rw-r--r--drivers/i2c/busses/i2c-iop3xx.c12
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c156
-rw-r--r--drivers/i2c/busses/i2c-meson.c97
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c5
-rw-r--r--drivers/i2c/busses/i2c-nvidia-gpu.c6
-rw-r--r--drivers/i2c/busses/i2c-ocores.c2
-rw-r--r--drivers/i2c/busses/i2c-parport-light.c267
-rw-r--r--drivers/i2c/busses/i2c-parport.c113
-rw-r--r--drivers/i2c/busses/i2c-parport.h106
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c8
-rw-r--r--drivers/i2c/busses/i2c-pnx.c4
-rw-r--r--drivers/i2c/busses/i2c-powermac.c8
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c117
-rw-r--r--drivers/i2c/busses/i2c-stu300.c2
-rw-r--r--drivers/i2c/busses/i2c-synquacer.c2
-rw-r--r--drivers/i2c/busses/i2c-taos-evm.c4
-rw-r--r--drivers/i2c/busses/i2c-tegra.c216
-rw-r--r--drivers/i2c/busses/i2c-tiny-usb.c8
-rw-r--r--drivers/i2c/busses/i2c-xiic.c69
-rw-r--r--drivers/i2c/i2c-core-acpi.c12
-rw-r--r--drivers/i2c/i2c-core-base.c46
-rw-r--r--drivers/i2c/i2c-core-of.c7
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca9541.c29
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c69
-rw-r--r--drivers/i3c/master.c4
-rw-r--r--drivers/i3c/master/dw-i3c-master.c20
-rw-r--r--drivers/i3c/master/i3c-master-cdns.c53
-rw-r--r--drivers/ide/Makefile2
-rw-r--r--drivers/ide/cmd64x.c3
-rw-r--r--drivers/ide/ht6560b.c2
-rw-r--r--drivers/ide/ide-cd.c38
-rw-r--r--drivers/ide/ide-disk.c1
-rw-r--r--drivers/ide/ide-floppy.c4
-rw-r--r--drivers/ide/ide-floppy.h2
-rw-r--r--drivers/ide/ide-floppy_ioctl.c35
-rw-r--r--drivers/ide/ide-gd.c17
-rw-r--r--drivers/ide/ide-ioctls.c47
-rw-r--r--drivers/ide/ide-iops.c1
-rw-r--r--drivers/ide/ide-proc.c21
-rw-r--r--drivers/ide/ide-tape.c11
-rw-r--r--drivers/ide/pmac.c3
-rw-r--r--drivers/ide/qd65xx.c2
-rw-r--r--drivers/ide/serverworks.c6
-rw-r--r--drivers/ide/siimage.c3
-rw-r--r--drivers/ide/tx4939ide.c2
-rw-r--r--drivers/ide/via82cxxx.c3
-rw-r--r--drivers/idle/intel_idle.c508
-rw-r--r--drivers/iio/accel/Kconfig20
-rw-r--r--drivers/iio/accel/Makefile2
-rw-r--r--drivers/iio/accel/adis16201.c8
-rw-r--r--drivers/iio/accel/adis16209.c8
-rw-r--r--drivers/iio/accel/bma180.c225
-rw-r--r--drivers/iio/accel/bma400.h99
-rw-r--r--drivers/iio/accel/bma400_core.c853
-rw-r--r--drivers/iio/accel/bma400_i2c.c61
-rw-r--r--drivers/iio/accel/cros_ec_accel_legacy.c1
-rw-r--r--drivers/iio/accel/kxcjk-1013.c27
-rw-r--r--drivers/iio/accel/st_accel.h2
-rw-r--r--drivers/iio/accel/st_accel_i2c.c8
-rw-r--r--drivers/iio/accel/st_accel_spi.c9
-rw-r--r--drivers/iio/adc/Kconfig17
-rw-r--r--drivers/iio/adc/Makefile4
-rw-r--r--drivers/iio/adc/ad7091r-base.c298
-rw-r--r--drivers/iio/adc/ad7091r-base.h26
-rw-r--r--drivers/iio/adc/ad7091r5.c113
-rw-r--r--drivers/iio/adc/ad7124.c14
-rw-r--r--drivers/iio/adc/ad7266.c29
-rw-r--r--drivers/iio/adc/ad7780.c1
-rw-r--r--drivers/iio/adc/ad7791.c1
-rw-r--r--drivers/iio/adc/ad7793.c1
-rw-r--r--drivers/iio/adc/ad7887.c82
-rw-r--r--drivers/iio/adc/ad7923.c64
-rw-r--r--drivers/iio/adc/ad799x.c66
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c2
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c6
-rw-r--r--drivers/iio/adc/ltc2496.c108
-rw-r--r--drivers/iio/adc/ltc2497-core.c243
-rw-r--r--drivers/iio/adc/ltc2497.c234
-rw-r--r--drivers/iio/adc/ltc2497.h18
-rw-r--r--drivers/iio/adc/max9611.c36
-rw-r--r--drivers/iio/adc/qcom-vadc-common.c6
-rw-r--r--drivers/iio/adc/qcom-vadc-common.h1
-rw-r--r--drivers/iio/adc/stm32-adc-core.c23
-rw-r--r--drivers/iio/adc/stm32-adc-core.h9
-rw-r--r--drivers/iio/adc/stm32-adc.c71
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c55
-rw-r--r--drivers/iio/adc/ti-ads1015.c73
-rw-r--r--drivers/iio/adc/ti-ads7950.c2
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dma.c2
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dmaengine.c30
-rw-r--r--drivers/iio/buffer/kfifo_buf.c5
-rw-r--r--drivers/iio/chemical/Kconfig1
-rw-r--r--drivers/iio/chemical/Makefile2
-rw-r--r--drivers/iio/chemical/atlas-sensor.c (renamed from drivers/iio/chemical/atlas-ph-sensor.c)24
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c1
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c1
-rw-r--r--drivers/iio/common/ssp_sensors/ssp.h14
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_dev.c29
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_spi.c8
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c45
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_i2c.c21
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_spi.c12
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_trigger.c3
-rw-r--r--drivers/iio/dac/ad5592r-base.c1
-rw-r--r--drivers/iio/dac/ad7303.c25
-rw-r--r--drivers/iio/dac/stm32-dac-core.c19
-rw-r--r--drivers/iio/frequency/adf4350.c30
-rw-r--r--drivers/iio/gyro/Kconfig32
-rw-r--r--drivers/iio/gyro/adis16136.c72
-rw-r--r--drivers/iio/gyro/adis16260.c14
-rw-r--r--drivers/iio/gyro/itg3200_core.c1
-rw-r--r--drivers/iio/gyro/st_gyro.h2
-rw-r--r--drivers/iio/gyro/st_gyro_core.c75
-rw-r--r--drivers/iio/gyro/st_gyro_i2c.c9
-rw-r--r--drivers/iio/gyro/st_gyro_spi.c9
-rw-r--r--drivers/iio/humidity/dht11.c1
-rw-r--r--drivers/iio/humidity/hts221_core.c19
-rw-r--r--drivers/iio/iio_core.h8
-rw-r--r--drivers/iio/imu/adis.c139
-rw-r--r--drivers/iio/imu/adis16400.c115
-rw-r--r--drivers/iio/imu/adis16460.c7
-rw-r--r--drivers/iio/imu/adis16480.c92
-rw-r--r--drivers/iio/imu/adis_buffer.c4
-rw-r--r--drivers/iio/imu/inv_mpu6050/Kconfig9
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c237
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h22
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c80
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c11
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c1
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c4
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h49
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c27
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c124
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c3
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c76
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c3
-rw-r--r--drivers/iio/industrialio-buffer.c16
-rw-r--r--drivers/iio/industrialio-core.c25
-rw-r--r--drivers/iio/light/apds9960.c2
-rw-r--r--drivers/iio/light/cros_ec_light_prox.c1
-rw-r--r--drivers/iio/light/lm3533-als.c2
-rw-r--r--drivers/iio/light/si1145.c1
-rw-r--r--drivers/iio/light/st_uvis25_i2c.c1
-rw-r--r--drivers/iio/light/vcnl4000.c3
-rw-r--r--drivers/iio/magnetometer/ak8975.c107
-rw-r--r--drivers/iio/magnetometer/st_magn_i2c.c9
-rw-r--r--drivers/iio/magnetometer/st_magn_spi.c9
-rw-r--r--drivers/iio/pressure/Kconfig12
-rw-r--r--drivers/iio/pressure/Makefile1
-rw-r--r--drivers/iio/pressure/bmp280-i2c.c18
-rw-r--r--drivers/iio/pressure/cros_ec_baro.c1
-rw-r--r--drivers/iio/pressure/dlhl60d.c375
-rw-r--r--drivers/iio/pressure/st_pressure.h2
-rw-r--r--drivers/iio/pressure/st_pressure_i2c.c22
-rw-r--r--drivers/iio/pressure/st_pressure_spi.c9
-rw-r--r--drivers/iio/proximity/Kconfig15
-rw-r--r--drivers/iio/proximity/Makefile1
-rw-r--r--drivers/iio/proximity/as3935.c3
-rw-r--r--drivers/iio/proximity/ping.c335
-rw-r--r--drivers/iio/resolver/ad2s1200.c1
-rw-r--r--drivers/iio/temperature/max31856.c134
-rw-r--r--drivers/iio/temperature/maxim_thermocouple.c44
-rw-r--r--drivers/iio/trigger/stm32-timer-trigger.c3
-rw-r--r--drivers/infiniband/core/Makefile9
-rw-r--r--drivers/infiniband/core/addr.c2
-rw-r--r--drivers/infiniband/core/cache.c151
-rw-r--r--drivers/infiniband/core/cm.c1000
-rw-r--r--drivers/infiniband/core/cm_msgs.h755
-rw-r--r--drivers/infiniband/core/cma.c90
-rw-r--r--drivers/infiniband/core/cma_trace.c16
-rw-r--r--drivers/infiniband/core/cma_trace.h391
-rw-r--r--drivers/infiniband/core/core_priv.h3
-rw-r--r--drivers/infiniband/core/cq.c27
-rw-r--r--drivers/infiniband/core/device.c42
-rw-r--r--drivers/infiniband/core/ib_core_uverbs.c2
-rw-r--r--drivers/infiniband/core/nldev.c3
-rw-r--r--drivers/infiniband/core/rdma_core.c235
-rw-r--r--drivers/infiniband/core/rdma_core.h45
-rw-r--r--drivers/infiniband/core/sa_query.c4
-rw-r--r--drivers/infiniband/core/security.c24
-rw-r--r--drivers/infiniband/core/trace.c14
-rw-r--r--drivers/infiniband/core/umem.c55
-rw-r--r--drivers/infiniband/core/umem_odp.c64
-rw-r--r--drivers/infiniband/core/user_mad.c5
-rw-r--r--drivers/infiniband/core/uverbs.h48
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c335
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c45
-rw-r--r--drivers/infiniband/core/uverbs_main.c301
-rw-r--r--drivers/infiniband/core/uverbs_std_types.c45
-rw-r--r--drivers/infiniband/core/uverbs_std_types_async_fd.c52
-rw-r--r--drivers/infiniband/core/uverbs_std_types_cq.c19
-rw-r--r--drivers/infiniband/core/uverbs_std_types_device.c38
-rw-r--r--drivers/infiniband/core/uverbs_uapi.c7
-rw-r--r--drivers/infiniband/core/verbs.c96
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c16
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c14
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c4
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_cmds_defs.h37
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c19
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c2
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c198
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h8
-rw-r--r--drivers/infiniband/hw/hfi1/chip_registers.h1
-rw-r--r--drivers/infiniband/hw/hfi1/common.h3
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.c2
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c237
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c64
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h200
-rw-r--r--drivers/infiniband/hw/hfi1/init.c87
-rw-r--r--drivers/infiniband/hw/hfi1/iowait.c4
-rw-r--r--drivers/infiniband/hw/hfi1/msix.c106
-rw-r--r--drivers/infiniband/hw/hfi1/msix.h1
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c4
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c2
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c9
-rw-r--r--drivers/infiniband/hw/hfi1/trace_ctxts.h2
-rw-r--r--drivers/infiniband/hw/hfi1/trace_rx.h15
-rw-r--r--drivers/infiniband/hw/hfi1/trace_tid.h8
-rw-r--r--drivers/infiniband/hw/hfi1/trace_tx.h2
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c5
-rw-r--r--drivers/infiniband/hw/hfi1/user_pages.c4
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c17
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_main.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_db.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h44
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c51
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c874
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h159
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c106
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c94
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c19
-rw-r--r--drivers/infiniband/hw/mlx4/cm.c29
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c20
-rw-r--r--drivers/infiniband/hw/mlx4/doorbell.c3
-rw-r--r--drivers/infiniband/hw/mlx4/main.c20
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c8
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c9
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c3
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c6
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c184
-rw-r--r--drivers/infiniband/hw/mlx5/doorbell.c3
-rw-r--r--drivers/infiniband/hw/mlx5/gsi.c3
-rw-r--r--drivers/infiniband/hw/mlx5/ib_virt.c28
-rw-r--r--drivers/infiniband/hw/mlx5/main.c287
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c25
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h50
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c101
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c77
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c180
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c2
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c22
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c8
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c7
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c84
-rw-r--r--drivers/infiniband/sw/rdmavt/rc.c9
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c8
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h7
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c18
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h2
-rw-r--r--drivers/infiniband/sw/siw/siw.h26
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c7
-rw-r--r--drivers/infiniband/sw/siw/siw_cq.c2
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c2
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.c4
-rw-r--r--drivers/infiniband/sw/siw/siw_qp.c13
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_rx.c6
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_tx.c2
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c61
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c2
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c2
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c2
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c12
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h2
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c3
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c2
-rw-r--r--drivers/input/evdev.c19
-rw-r--r--drivers/input/input.c54
-rw-r--r--drivers/input/keyboard/goldfish_events.c2
-rw-r--r--drivers/input/keyboard/gpio_keys.c2
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c2
-rw-r--r--drivers/input/keyboard/imx_sc_key.c8
-rw-r--r--drivers/input/keyboard/pxa930_rotary.c2
-rw-r--r--drivers/input/keyboard/sh_keysc.c2
-rw-r--r--drivers/input/keyboard/tca6416-keypad.c4
-rw-r--r--drivers/input/misc/axp20x-pek.c45
-rw-r--r--drivers/input/misc/keyspan_remote.c9
-rw-r--r--drivers/input/misc/max77650-onkey.c7
-rw-r--r--drivers/input/misc/pm8xxx-vibrator.c2
-rw-r--r--drivers/input/misc/uinput.c19
-rw-r--r--drivers/input/mouse/cyapa_gen5.c8
-rw-r--r--drivers/input/mouse/psmouse-smbus.c8
-rw-r--r--drivers/input/mouse/pxa930_trkball.c2
-rw-r--r--drivers/input/mouse/synaptics.c4
-rw-r--r--drivers/input/rmi4/rmi_f11.c14
-rw-r--r--drivers/input/rmi4/rmi_f54.c43
-rw-r--r--drivers/input/rmi4/rmi_smbus.c2
-rw-r--r--drivers/input/serio/Kconfig10
-rw-r--r--drivers/input/serio/Makefile1
-rw-r--r--drivers/input/serio/apbps2.c2
-rw-r--r--drivers/input/serio/gscps2.c2
-rw-r--r--drivers/input/serio/hyperv-keyboard.c27
-rw-r--r--drivers/input/serio/ioc3kbd.c216
-rw-r--r--drivers/input/tablet/aiptek.c8
-rw-r--r--drivers/input/tablet/gtco.c13
-rw-r--r--drivers/input/tablet/pegasus_notetaker.c2
-rw-r--r--drivers/input/touchscreen/ads7846.c15
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c55
-rw-r--r--drivers/input/touchscreen/elants_i2c.c77
-rw-r--r--drivers/input/touchscreen/ili210x.c34
-rw-r--r--drivers/input/touchscreen/sun4i-ts.c6
-rw-r--r--drivers/input/touchscreen/sur40.c2
-rw-r--r--drivers/interconnect/Makefile1
-rw-r--r--drivers/interconnect/core.c168
-rw-r--r--drivers/interconnect/internal.h42
-rw-r--r--drivers/interconnect/qcom/Kconfig9
-rw-r--r--drivers/interconnect/qcom/Makefile2
-rw-r--r--drivers/interconnect/qcom/msm8916.c554
-rw-r--r--drivers/interconnect/qcom/msm8974.c32
-rw-r--r--drivers/interconnect/qcom/qcs404.c32
-rw-r--r--drivers/interconnect/qcom/sdm845.c16
-rw-r--r--drivers/interconnect/trace.h88
-rw-r--r--drivers/iommu/Kconfig35
-rw-r--r--drivers/iommu/Makefile3
-rw-r--r--drivers/iommu/amd_iommu.c19
-rw-r--r--drivers/iommu/amd_iommu_init.c81
-rw-r--r--drivers/iommu/amd_iommu_types.h7
-rw-r--r--drivers/iommu/arm-smmu-impl.c2
-rw-r--r--drivers/iommu/arm-smmu-v3.c600
-rw-r--r--drivers/iommu/arm-smmu.c334
-rw-r--r--drivers/iommu/arm-smmu.h228
-rw-r--r--drivers/iommu/dma-iommu.c26
-rw-r--r--drivers/iommu/dmar.c44
-rw-r--r--drivers/iommu/intel-iommu-debugfs.c75
-rw-r--r--drivers/iommu/intel-iommu.c414
-rw-r--r--drivers/iommu/intel-pasid.c97
-rw-r--r--drivers/iommu/intel-pasid.h6
-rw-r--r--drivers/iommu/intel-svm.c177
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c22
-rw-r--r--drivers/iommu/io-pgtable-arm.c164
-rw-r--r--drivers/iommu/io-pgtable.c2
-rw-r--r--drivers/iommu/iommu-sysfs.c5
-rw-r--r--drivers/iommu/iommu.c60
-rw-r--r--drivers/iommu/iova.c2
-rw-r--r--drivers/iommu/ipmmu-vmsa.c2
-rw-r--r--drivers/iommu/msm_iommu.c4
-rw-r--r--drivers/iommu/mtk_iommu.c4
-rw-r--r--drivers/iommu/of_iommu.c25
-rw-r--r--drivers/iommu/qcom_iommu.c25
-rw-r--r--drivers/iommu/virtio-iommu.c14
-rw-r--r--drivers/ipack/carriers/tpci200.c4
-rw-r--r--drivers/ipack/devices/ipoctal.c6
-rw-r--r--drivers/irqchip/Kconfig18
-rw-r--r--drivers/irqchip/Makefile5
-rw-r--r--drivers/irqchip/irq-aspeed-scu-ic.c239
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c794
-rw-r--r--drivers/irqchip/irq-gic-v3.c33
-rw-r--r--drivers/irqchip/irq-imx-intmux.c309
-rw-r--r--drivers/irqchip/irq-ingenic.c6
-rw-r--r--drivers/irqchip/irq-mbigen.c1
-rw-r--r--drivers/irqchip/irq-meson-gpio.c137
-rw-r--r--drivers/irqchip/irq-mips-gic.c2
-rw-r--r--drivers/irqchip/irq-nvic.c15
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c2
-rw-r--r--drivers/irqchip/irq-sifive-plic.c32
-rw-r--r--drivers/isdn/Makefile2
-rw-r--r--drivers/isdn/capi/Kconfig32
-rw-r--r--drivers/isdn/capi/Makefile18
-rw-r--r--drivers/isdn/capi/capi.c14
-rw-r--r--drivers/isdn/capi/capilib.c202
-rw-r--r--drivers/isdn/capi/capiutil.c231
-rw-r--r--drivers/isdn/capi/kcapi.c409
-rw-r--r--drivers/isdn/capi/kcapi.h149
-rw-r--r--drivers/isdn/capi/kcapi_proc.c36
-rw-r--r--drivers/leds/Kconfig10
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/led-class.c97
-rw-r--r--drivers/leds/leds-as3645a.c3
-rw-r--r--drivers/leds/leds-bd2802.c27
-rw-r--r--drivers/leds/leds-gpio.c10
-rw-r--r--drivers/leds/leds-lm3532.c11
-rw-r--r--drivers/leds/leds-lm3642.c37
-rw-r--r--drivers/leds/leds-lm3692x.c180
-rw-r--r--drivers/leds/leds-max77650.c7
-rw-r--r--drivers/leds/leds-pca963x.c8
-rw-r--r--drivers/leds/leds-rb532.c1
-rw-r--r--drivers/leds/leds-tps6105x.c89
-rw-r--r--drivers/leds/trigger/ledtrig-pattern.c4
-rw-r--r--drivers/lightnvm/pblk-trace.h8
-rw-r--r--drivers/macintosh/Kconfig6
-rw-r--r--drivers/macintosh/via-pmu.c17
-rw-r--r--drivers/mailbox/armada-37xx-rwtm-mailbox.c5
-rw-r--r--drivers/md/bcache/alloc.c18
-rw-r--r--drivers/md/bcache/bcache.h5
-rw-r--r--drivers/md/bcache/bset.c5
-rw-r--r--drivers/md/bcache/bset.h3
-rw-r--r--drivers/md/bcache/btree.c37
-rw-r--r--drivers/md/bcache/btree.h2
-rw-r--r--drivers/md/bcache/journal.c78
-rw-r--r--drivers/md/bcache/request.c17
-rw-r--r--drivers/md/bcache/stats.c10
-rw-r--r--drivers/md/bcache/super.c174
-rw-r--r--drivers/md/bcache/sysfs.c22
-rw-r--r--drivers/md/dm-bio-prison-v2.c2
-rw-r--r--drivers/md/dm-crypt.c335
-rw-r--r--drivers/md/dm-dust.c6
-rw-r--r--drivers/md/dm-mpath.c68
-rw-r--r--drivers/md/dm-raid.c43
-rw-r--r--drivers/md/dm-snap-persistent.c2
-rw-r--r--drivers/md/dm-snap.c6
-rw-r--r--drivers/md/dm-thin-metadata.c22
-rw-r--r--drivers/md/dm-thin.c36
-rw-r--r--drivers/md/dm-verity-target.c18
-rw-r--r--drivers/md/dm-writecache.c71
-rw-r--r--drivers/md/dm-zoned-metadata.c23
-rw-r--r--drivers/md/dm.c9
-rw-r--r--drivers/md/md-bitmap.c41
-rw-r--r--drivers/md/md.c269
-rw-r--r--drivers/md/md.h45
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c27
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.h2
-rw-r--r--drivers/md/persistent-data/dm-space-map-disk.c6
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c5
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c111
-rw-r--r--drivers/md/raid5.c21
-rw-r--r--drivers/media/cec/cec-adap.c40
-rw-r--r--drivers/media/cec/cec-core.c21
-rw-r--r--drivers/media/cec/cec-notifier.c37
-rw-r--r--drivers/media/cec/cec-priv.h2
-rw-r--r--drivers/media/common/saa7146/saa7146_video.c6
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c4
-rw-r--r--drivers/media/common/videobuf2/videobuf2-vmalloc.c2
-rw-r--r--drivers/media/dvb-core/dvb_demux.c1
-rw-r--r--drivers/media/dvb-core/dvbdev.c4
-rw-r--r--drivers/media/dvb-frontends/as102_fe.c3
-rw-r--r--drivers/media/dvb-frontends/au8522_decoder.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_core.c4
-rw-r--r--drivers/media/dvb-frontends/dib0070.c23
-rw-r--r--drivers/media/dvb-frontends/dib0090.c3
-rw-r--r--drivers/media/dvb-frontends/dib7000m.c2
-rw-r--r--drivers/media/dvb-frontends/dib7000p.c2
-rw-r--r--drivers/media/dvb-frontends/dvb_dummy_fe.c91
-rw-r--r--drivers/media/dvb-frontends/dvb_dummy_fe.h12
-rw-r--r--drivers/media/dvb-frontends/lgdt330x.c4
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c4
-rw-r--r--drivers/media/dvb-frontends/ts2020.c4
-rw-r--r--drivers/media/i2c/adv748x/adv748x.h8
-rw-r--r--drivers/media/i2c/adv7604.c32
-rw-r--r--drivers/media/i2c/mt9v032.c10
-rw-r--r--drivers/media/i2c/mt9v111.c2
-rw-r--r--drivers/media/i2c/ov5640.c41
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c198
-rw-r--r--drivers/media/i2c/smiapp/smiapp-regs.c3
-rw-r--r--drivers/media/i2c/smiapp/smiapp.h1
-rw-r--r--drivers/media/pci/bt8xx/bttv-input.c6
-rw-r--r--drivers/media/pci/cobalt/cobalt-alsa-pcm.c69
-rw-r--r--drivers/media/pci/cx18/cx18-alsa-pcm.c75
-rw-r--r--drivers/media/pci/cx18/cx18-cards.c8
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c4
-rw-r--r--drivers/media/pci/cx18/cx18-i2c.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-alsa.c1
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c24
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c114
-rw-r--r--drivers/media/pci/cx23885/cx23885-i2c.c4
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c3
-rw-r--r--drivers/media/pci/cx23885/cx23885.h1
-rw-r--r--drivers/media/pci/cx25821/cx25821-alsa.c1
-rw-r--r--drivers/media/pci/cx88/cx88-alsa.c1
-rw-r--r--drivers/media/pci/cx88/cx88-input.c2
-rw-r--r--drivers/media/pci/ivtv/Kconfig5
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-pcm.c76
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c9
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.h1
-rw-r--r--drivers/media/pci/ivtv/ivtv-i2c.c6
-rw-r--r--drivers/media/pci/ivtv/ivtv-i2c.h2
-rw-r--r--drivers/media/pci/ivtv/ivtvfb.c2
-rw-r--r--drivers/media/pci/meye/meye.c4
-rw-r--r--drivers/media/pci/saa7134/saa7134-alsa.c1
-rw-r--r--drivers/media/pci/saa7164/saa7164-dvb.c24
-rw-r--r--drivers/media/pci/smipcie/smipcie-main.c4
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-g723.c24
-rw-r--r--drivers/media/pci/tw686x/tw686x-audio.c16
-rw-r--r--drivers/media/platform/Kconfig4
-rw-r--r--drivers/media/platform/atmel/atmel-isc-base.c94
-rw-r--r--drivers/media/platform/atmel/atmel-isi.c42
-rw-r--r--drivers/media/platform/atmel/atmel-isi.h2
-rw-r--r--drivers/media/platform/coda/coda-bit.c29
-rw-r--r--drivers/media/platform/coda/coda-common.c45
-rw-r--r--drivers/media/platform/coda/coda-jpeg.c746
-rw-r--r--drivers/media/platform/coda/coda.h3
-rw-r--r--drivers/media/platform/coda/coda_regs.h83
-rw-r--r--drivers/media/platform/coda/trace.h10
-rw-r--r--drivers/media/platform/cros-ec-cec/cros-ec-cec.c1
-rw-r--r--drivers/media/platform/davinci/dm355_ccdc.c2
-rw-r--r--drivers/media/platform/davinci/dm644x_ccdc.c2
-rw-r--r--drivers/media/platform/davinci/isif.c2
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c31
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_core.c3
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c66
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h7
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c2
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c23
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.h8
-rw-r--r--drivers/media/platform/omap3isp/isp.c8
-rw-r--r--drivers/media/platform/omap3isp/ispccdc.c12
-rw-r--r--drivers/media/platform/pxa_camera.c2
-rw-r--r--drivers/media/platform/rcar-vin/rcar-v4l2.c2
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-hw.c6
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-v4l2.c13
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c5
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c57
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h6
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c20
-rw-r--r--drivers/media/platform/sunxi/sun8i-di/sun8i-di.c5
-rw-r--r--drivers/media/platform/tegra-cec/tegra_cec.c2
-rw-r--r--drivers/media/platform/ti-vpe/cal.c773
-rw-r--r--drivers/media/platform/ti-vpe/cal_regs.h221
-rw-r--r--drivers/media/platform/ti-vpe/csc.c32
-rw-r--r--drivers/media/platform/vimc/vimc-scaler.c166
-rw-r--r--drivers/media/platform/vivid/Makefile3
-rw-r--r--drivers/media/platform/vivid/vivid-core.c203
-rw-r--r--drivers/media/platform/vivid/vivid-core.h20
-rw-r--r--drivers/media/platform/vivid/vivid-ctrls.c11
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-touch.c181
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-touch.h13
-rw-r--r--drivers/media/platform/vivid/vivid-touch-cap.c341
-rw-r--r--drivers/media/platform/vivid/vivid-touch-cap.h39
-rw-r--r--drivers/media/platform/vivid/vivid-vid-common.c2
-rw-r--r--drivers/media/rc/iguanair.c2
-rw-r--r--drivers/media/rc/ir-hix5hd2.c79
-rw-r--r--drivers/media/rc/rc-main.c27
-rw-r--r--drivers/media/rc/serial_ir.c2
-rw-r--r--drivers/media/usb/cpia2/cpia2_v4l.c4
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-audio.c79
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-i2c.c3
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c19
-rw-r--r--drivers/media/usb/dvb-usb-v2/anysee.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c42
-rw-r--r--drivers/media/usb/dvb-usb-v2/zd1301.c4
-rw-r--r--drivers/media/usb/dvb-usb/af9005.c2
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c33
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c8
-rw-r--r--drivers/media/usb/dvb-usb/digitv.c10
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-urb.c2
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c8
-rw-r--r--drivers/media/usb/dvb-usb/vp7045.c21
-rw-r--r--drivers/media/usb/em28xx/em28xx-audio.c87
-rw-r--r--drivers/media/usb/go7007/s2250-board.c1
-rw-r--r--drivers/media/usb/go7007/snd-go7007.c60
-rw-r--r--drivers/media/usb/gspca/gspca.c2
-rw-r--r--drivers/media/usb/pulse8-cec/pulse8-cec.c768
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-encoder.c4
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-alsa.c82
-rw-r--r--drivers/media/usb/usbtv/usbtv-audio.c29
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c4
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c37
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h1
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c476
-rw-r--r--drivers/media/v4l2-core/v4l2-event.c5
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c213
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c26
-rw-r--r--drivers/media/v4l2-core/videobuf-core.c5
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c8
-rw-r--r--drivers/memory/mvebu-devbus.c4
-rw-r--r--drivers/memory/samsung/Kconfig2
-rw-r--r--drivers/memory/samsung/exynos-srom.c2
-rw-r--r--drivers/memory/samsung/exynos5422-dmc.c7
-rw-r--r--drivers/memory/tegra/Makefile3
-rw-r--r--drivers/memory/tegra/tegra124-emc.c185
-rw-r--r--drivers/memory/tegra/tegra186-emc.c293
-rw-r--r--drivers/memory/tegra/tegra186.c1067
-rw-r--r--drivers/memory/tegra/tegra20-emc.c175
-rw-r--r--drivers/memory/tegra/tegra210.c2
-rw-r--r--drivers/memory/tegra/tegra30-emc.c352
-rw-r--r--drivers/message/fusion/mptctl.c213
-rw-r--r--drivers/message/fusion/mptlan.c2
-rw-r--r--drivers/mfd/Kconfig43
-rw-r--r--drivers/mfd/Makefile3
-rw-r--r--drivers/mfd/ab8500-core.c18
-rw-r--r--drivers/mfd/atmel-hlcdc.c18
-rw-r--r--drivers/mfd/axp20x.c2
-rw-r--r--drivers/mfd/cros_ec_dev.c23
-rw-r--r--drivers/mfd/cs47l15-tables.c1
-rw-r--r--drivers/mfd/da9062-core.c18
-rw-r--r--drivers/mfd/db8500-prcmu.c122
-rw-r--r--drivers/mfd/dln2.c13
-rw-r--r--drivers/mfd/intel-lpss-pci.c13
-rw-r--r--drivers/mfd/intel_soc_pmic_core.c19
-rw-r--r--drivers/mfd/ioc3.c669
-rw-r--r--drivers/mfd/madera-core.c33
-rw-r--r--drivers/mfd/rn5t618.c1
-rw-r--r--drivers/mfd/rohm-bd70528.c3
-rw-r--r--drivers/mfd/rohm-bd71828.c344
-rw-r--r--drivers/mfd/rohm-bd718x7.c43
-rw-r--r--drivers/mfd/sm501.c19
-rw-r--r--drivers/mfd/syscon.c31
-rw-r--r--drivers/mfd/tqmx86.c3
-rw-r--r--drivers/mfd/wcd934x.c306
-rw-r--r--drivers/misc/cardreader/alcor_pci.c8
-rw-r--r--drivers/misc/cardreader/rts5261.c11
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c2
-rw-r--r--drivers/misc/cxl/context.c2
-rw-r--r--drivers/misc/eeprom/at24.c72
-rw-r--r--drivers/misc/enclosure.c3
-rw-r--r--drivers/misc/genwqe/card_ddcb.c8
-rw-r--r--drivers/misc/habanalabs/command_submission.c5
-rw-r--r--drivers/misc/habanalabs/context.c2
-rw-r--r--drivers/misc/habanalabs/goya/goya.c15
-rw-r--r--drivers/misc/isl29020.c1
-rw-r--r--drivers/misc/lkdtm/bugs.c12
-rw-r--r--drivers/misc/mei/bus.c10
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.c33
-rw-r--r--drivers/misc/mei/hw-me-regs.h6
-rw-r--r--drivers/misc/mei/pci-me.c4
-rw-r--r--drivers/misc/mic/card/mic_debugfs.c3
-rw-r--r--drivers/misc/mic/cosm/cosm_debugfs.c3
-rw-r--r--drivers/misc/mic/host/mic_debugfs.c3
-rw-r--r--drivers/misc/mic/scif/scif_nodeqp.c2
-rw-r--r--drivers/misc/ocxl/Kconfig1
-rw-r--r--drivers/misc/ocxl/context.c8
-rw-r--r--drivers/misc/ocxl/file.c23
-rw-r--r--drivers/misc/pti.c6
-rw-r--r--drivers/misc/pvpanic.c12
-rw-r--r--drivers/misc/sgi-gru/gruprocfs.c42
-rw-r--r--drivers/misc/sgi-xp/xpnet.c2
-rw-r--r--drivers/misc/sram-exec.c21
-rw-r--r--drivers/misc/ti-st/st_core.c4
-rw-r--r--drivers/misc/tsl2550.c12
-rw-r--r--drivers/misc/vmw_balloon.c1
-rw-r--r--drivers/misc/xilinx_sdfec.c12
-rw-r--r--drivers/mmc/core/block.c6
-rw-r--r--drivers/mmc/core/core.c10
-rw-r--r--drivers/mmc/core/host.c33
-rw-r--r--drivers/mmc/core/mmc_ops.c34
-rw-r--r--drivers/mmc/core/slot-gpio.c31
-rw-r--r--drivers/mmc/host/Kconfig6
-rw-r--r--drivers/mmc/host/atmel-mci.c2
-rw-r--r--drivers/mmc/host/au1xmmc.c7
-rw-r--r--drivers/mmc/host/bcm2835.c12
-rw-r--r--drivers/mmc/host/cavium-thunderx.c16
-rw-r--r--drivers/mmc/host/davinci_mmc.c4
-rw-r--r--drivers/mmc/host/dw_mmc.c8
-rw-r--r--drivers/mmc/host/jz4740_mmc.c2
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c10
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c4
-rw-r--r--drivers/mmc/host/mmc_spi.c15
-rw-r--r--drivers/mmc/host/mmci.c114
-rw-r--r--drivers/mmc/host/mmci.h10
-rw-r--r--drivers/mmc/host/mtk-sd.c5
-rw-r--r--drivers/mmc/host/mvsdio.c6
-rw-r--r--drivers/mmc/host/mxcmmc.c11
-rw-r--r--drivers/mmc/host/mxs-mmc.c6
-rw-r--r--drivers/mmc/host/omap_hsmmc.c10
-rw-r--r--drivers/mmc/host/owl-mmc.c6
-rw-r--r--drivers/mmc/host/pxamci.c26
-rw-r--r--drivers/mmc/host/renesas_sdhi.h10
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c22
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c25
-rw-r--r--drivers/mmc/host/s3cmci.c4
-rw-r--r--drivers/mmc/host/sdhci-acpi.c4
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c270
-rw-r--r--drivers/mmc/host/sdhci-cadence.c2
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c18
-rw-r--r--drivers/mmc/host/sdhci-milbeaut.c8
-rw-r--r--drivers/mmc/host/sdhci-msm.c167
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c112
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c263
-rw-r--r--drivers/mmc/host/sdhci-omap.c60
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c14
-rw-r--r--drivers/mmc/host/sdhci-s3c.c6
-rw-r--r--drivers/mmc/host/sdhci-sirf.c2
-rw-r--r--drivers/mmc/host/sdhci-spear.c6
-rw-r--r--drivers/mmc/host/sdhci-tegra.c2
-rw-r--r--drivers/mmc/host/sdhci.c408
-rw-r--r--drivers/mmc/host/sdhci.h15
-rw-r--r--drivers/mmc/host/sdhci_am654.c58
-rw-r--r--drivers/mmc/host/sdhci_f_sdh30.c4
-rw-r--r--drivers/mmc/host/sh_mmcif.c12
-rw-r--r--drivers/mmc/host/sunxi-mmc.c3
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c6
-rw-r--r--drivers/mmc/host/uniphier-sd.c14
-rw-r--r--drivers/mmc/host/usdhi6rol0.c27
-rw-r--r--drivers/mmc/host/via-sdmmc.c2
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.c2
-rw-r--r--drivers/mtd/devices/block2mtd.c8
-rw-r--r--drivers/mtd/maps/amd76xrom.c2
-rw-r--r--drivers/mtd/maps/ck804xrom.c2
-rw-r--r--drivers/mtd/maps/esb2rom.c2
-rw-r--r--drivers/mtd/maps/ichxrom.c2
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c4
-rw-r--r--drivers/mtd/maps/l440gx.c2
-rw-r--r--drivers/mtd/maps/netsc520.c4
-rw-r--r--drivers/mtd/maps/nettel.c8
-rw-r--r--drivers/mtd/maps/pci.c4
-rw-r--r--drivers/mtd/maps/pcmciamtd.c7
-rw-r--r--drivers/mtd/maps/physmap-core.c20
-rw-r--r--drivers/mtd/maps/sc520cdp.c8
-rw-r--r--drivers/mtd/maps/scb2_flash.c2
-rw-r--r--drivers/mtd/maps/ts5500_flash.c4
-rw-r--r--drivers/mtd/mtdconcat.c5
-rw-r--r--drivers/mtd/nand/onenand/Kconfig14
-rw-r--r--drivers/mtd/nand/onenand/Makefile4
-rw-r--r--drivers/mtd/nand/onenand/onenand_base.c96
-rw-r--r--drivers/mtd/nand/onenand/onenand_omap2.c (renamed from drivers/mtd/nand/onenand/omap2.c)14
-rw-r--r--drivers/mtd/nand/onenand/onenand_samsung.c (renamed from drivers/mtd/nand/onenand/samsung_mtd.c)17
-rw-r--r--drivers/mtd/nand/raw/Kconfig2
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c20
-rw-r--r--drivers/mtd/nand/raw/au1550nd.c2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c10
-rw-r--r--drivers/mtd/nand/raw/cadence-nand-controller.c13
-rw-r--r--drivers/mtd/nand/raw/denali.c14
-rw-r--r--drivers/mtd/nand/raw/denali_dt.c56
-rw-r--r--drivers/mtd/nand/raw/denali_pci.c6
-rw-r--r--drivers/mtd/nand/raw/fsl_upm.c2
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c11
-rw-r--r--drivers/mtd/nand/raw/mpc5121_nfc.c2
-rw-r--r--drivers/mtd/nand/raw/nand_macronix.c11
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c38
-rw-r--r--drivers/mtd/nand/spi/toshiba.c10
-rw-r--r--drivers/mtd/parsers/sharpslpart.c4
-rw-r--r--drivers/mtd/sm_ftl.c3
-rw-r--r--drivers/mtd/spi-nor/Kconfig4
-rw-r--r--drivers/mtd/spi-nor/aspeed-smc.c4
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c4
-rw-r--r--drivers/mtd/spi-nor/hisi-sfc.c6
-rw-r--r--drivers/mtd/spi-nor/intel-spi-pci.c2
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c134
-rw-r--r--drivers/mtd/ubi/attach.c2
-rw-r--r--drivers/mtd/ubi/build.c33
-rw-r--r--drivers/mtd/ubi/fastmap.c23
-rw-r--r--drivers/mtd/ubi/ubi.h1
-rw-r--r--drivers/mtd/ubi/vtbl.c8
-rw-r--r--drivers/mtd/ubi/wl.c3
-rw-r--r--drivers/net/Kconfig55
-rw-r--r--drivers/net/Makefile3
-rw-r--r--drivers/net/appletalk/cops.c4
-rw-r--r--drivers/net/arcnet/arcdevice.h2
-rw-r--r--drivers/net/arcnet/arcnet.c2
-rw-r--r--drivers/net/bonding/bond_3ad.c122
-rw-r--r--drivers/net/bonding/bond_alb.c44
-rw-r--r--drivers/net/bonding/bond_main.c42
-rw-r--r--drivers/net/caif/caif_serial.c4
-rw-r--r--drivers/net/can/at91_can.c2
-rw-r--r--drivers/net/can/cc770/cc770_isa.c2
-rw-r--r--drivers/net/can/flexcan.c73
-rw-r--r--drivers/net/can/m_can/tcan4x5x.c83
-rw-r--r--drivers/net/can/mscan/mscan.c21
-rw-r--r--drivers/net/can/sja1000/sja1000_isa.c2
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c2
-rw-r--r--drivers/net/can/slcan.c12
-rw-r--r--drivers/net/can/softing/softing_main.c2
-rw-r--r--drivers/net/can/usb/gs_usb.c4
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c8
-rw-r--r--drivers/net/can/xilinx_can.c7
-rw-r--r--drivers/net/dsa/Kconfig5
-rw-r--r--drivers/net/dsa/Makefile1
-rw-r--r--drivers/net/dsa/b53/b53_common.c89
-rw-r--r--drivers/net/dsa/b53/b53_priv.h4
-rw-r--r--drivers/net/dsa/bcm_sf2.c2
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c6
-rw-r--r--drivers/net/dsa/dsa_loop.c3
-rw-r--r--drivers/net/dsa/lan9303-core.c3
-rw-r--r--drivers/net/dsa/lantiq_gswip.c3
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c3
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c3
-rw-r--r--drivers/net/dsa/microchip/ksz9477_spi.c6
-rw-r--r--drivers/net/dsa/mt7530.c3
-rw-r--r--drivers/net/dsa/mv88e6060.c3
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c32
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h6
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c5
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h1
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c5
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_vtu.c5
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c10
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c12
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c100
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h9
-rw-r--r--drivers/net/dsa/ocelot/Kconfig3
-rw-r--r--drivers/net/dsa/ocelot/felix.c271
-rw-r--r--drivers/net/dsa/ocelot/felix.h16
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c521
-rw-r--r--drivers/net/dsa/qca/Kconfig9
-rw-r--r--drivers/net/dsa/qca/Makefile2
-rw-r--r--drivers/net/dsa/qca/ar9331.c856
-rw-r--r--drivers/net/dsa/qca8k.c3
-rw-r--r--drivers/net/dsa/rtl8366rb.c3
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c133
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c42
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.h1
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.c7
-rw-r--r--drivers/net/dsa/sja1105/sja1105_tas.c5
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c5
-rw-r--r--drivers/net/ethernet/3com/3c509.c4
-rw-r--r--drivers/net/ethernet/3com/3c515.c4
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c4
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c4
-rw-r--r--drivers/net/ethernet/3com/3c59x.c8
-rw-r--r--drivers/net/ethernet/3com/typhoon.c2
-rw-r--r--drivers/net/ethernet/8390/8390.c4
-rw-r--r--drivers/net/ethernet/8390/8390.h4
-rw-r--r--drivers/net/ethernet/8390/8390p.c4
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c4
-rw-r--r--drivers/net/ethernet/8390/lib8390.c2
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c4
-rw-r--r--drivers/net/ethernet/agere/et131x.c13
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c2
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c17
-rw-r--r--drivers/net/ethernet/alteon/acenic.c4
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c96
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h11
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c74
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c975
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h75
-rw-r--r--drivers/net/ethernet/amd/7990.c2
-rw-r--r--drivers/net/ethernet/amd/7990.h2
-rw-r--r--drivers/net/ethernet/amd/a2065.c13
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c2
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c2
-rw-r--r--drivers/net/ethernet/amd/ariadne.c2
-rw-r--r--drivers/net/ethernet/amd/atarilance.c4
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c21
-rw-r--r--drivers/net/ethernet/amd/declance.c4
-rw-r--r--drivers/net/ethernet/amd/lance.c4
-rw-r--r--drivers/net/ethernet/amd/ni65.c4
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c4
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c4
-rw-r--r--drivers/net/ethernet/amd/sunlance.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c2
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c4
-rw-r--r--drivers/net/ethernet/apple/macmace.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c4
-rw-r--r--drivers/net/ethernet/arc/emac_main.c14
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c19
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atlx.c2
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c7
-rw-r--r--drivers/net/ethernet/broadcom/b44.c11
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c20
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c171
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c319
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c54
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c133
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h4
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c6
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c14
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c3
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c2
-rw-r--r--drivers/net/ethernet/cadence/macb.h15
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c136
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c4
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c4
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c39
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c253
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c80
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c71
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c40
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c21
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c18
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c4
-rw-r--r--drivers/net/ethernet/cortina/gemini.c4
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c9
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c7
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c4
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c4
-rw-r--r--drivers/net/ethernet/dlink/sundance.c4
-rw-r--r--drivers/net/ethernet/dnet.c15
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/ethoc.c6
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c13
-rw-r--r--drivers/net/ethernet/fealnx.c4
-rw-r--r--drivers/net/ethernet/freescale/Kconfig2
-rw-r--r--drivers/net/ethernet/freescale/Makefile1
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c55
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c34
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h4
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dprtc.h2
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c14
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h3
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c1
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h11
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_mdio.c120
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_mdio.h12
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c43
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c47
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.h4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c39
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c11
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c14
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c4
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c12
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c12
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c7
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c4
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c22
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c268
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_trace.h139
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c86
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c527
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h23
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c441
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h15
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c2
-rw-r--r--drivers/net/ethernet/i825xx/82596.c4
-rw-r--r--drivers/net/ethernet/i825xx/ether1.c4
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c4
-rw-r--r--drivers/net/ethernet/i825xx/sni_82596.c6
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c4
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h5
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c73
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c19
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c53
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c22
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c8
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c19
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c3
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile3
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c51
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c255
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c105
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c655
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c2563
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h112
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c1275
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h207
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c459
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c292
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_status.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c36
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c37
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c538
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c13
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c8
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c51
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/igc/Makefile2
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h47
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h102
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c34
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c2945
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c16
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c716
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h37
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c46
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c7
-rw-r--r--drivers/net/ethernet/jme.c2
-rw-r--r--drivers/net/ethernet/korina.c8
-rw-r--r--drivers/net/ethernet/lantiq_etop.c13
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c68
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c53
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Kconfig8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c1410
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h615
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c662
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c1349
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h147
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h276
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c848
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h162
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c13
-rw-r--r--drivers/net/ethernet/marvell/skge.c4
-rw-r--r--drivers/net/ethernet/marvell/sky2.c4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/crdump.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c314
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c346
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h83
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c148
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c117
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c309
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c758
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c181
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c502
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c244
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c138
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h153
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c184
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c60
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c592
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c911
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h9
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c2
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c2
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c2
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c3
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c7
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h7
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ana.h625
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c4
-rw-r--r--drivers/net/ethernet/mscc/ocelot_dev.h275
-rw-r--r--drivers/net/ethernet/mscc/ocelot_qsys.h270
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c8
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c4
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c6
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c382
-rw-r--r--drivers/net/ethernet/natsemi/sonic.h46
-rw-r--r--drivers/net/ethernet/neterion/s2io.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.h2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c2
-rw-r--r--drivers/net/ethernet/netronome/Kconfig1
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/cls.c14
-rw-r--r--drivers/net/ethernet/netronome/nfp/ccm.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/crypto.h15
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/fw.h8
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/tls.c89
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c65
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h106
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h38
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c260
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c144
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c498
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c35
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c48
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h25
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c116
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c6
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c2
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c15
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c2
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c4
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h21
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c113
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c58
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h7
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h99
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c249
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.c1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c23
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h69
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c361
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h130
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c3891
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c129
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h2564
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c67
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c521
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c47
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.h8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c36
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c149
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ptp.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h38
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c19
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c8
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c1
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c16
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c2
-rw-r--r--drivers/net/ethernet/qualcomm/qca_uart.c2
-rw-r--r--drivers/net/ethernet/rdc/r6040.c12
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c2
-rw-r--r--drivers/net/ethernet/realtek/8139too.c4
-rw-r--r--drivers/net/ethernet/realtek/Makefile2
-rw-r--r--drivers/net/ethernet/realtek/atp.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.h78
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c1491
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c1307
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c58
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c4
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c8
-rw-r--r--drivers/net/ethernet/seeq/ether3.c4
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c2
-rw-r--r--drivers/net/ethernet/sfc/Kconfig2
-rw-r--r--drivers/net/ethernet/sfc/Makefile9
-rw-r--r--drivers/net/ethernet/sfc/ef10.c2822
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c5
-rw-r--r--drivers/net/ethernet/sfc/efx.c2502
-rw-r--r--drivers/net/ethernet/sfc/efx.h65
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c1234
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.h55
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c1102
-rw-r--r--drivers/net/ethernet/sfc/efx_common.h73
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c446
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c457
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.h30
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c4
-rw-r--r--drivers/net/ethernet/sfc/farch.c1
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h3
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.c2270
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.h159
-rw-r--r--drivers/net/ethernet/sfc/mcdi_functions.c386
-rw-r--r--drivers/net/ethernet/sfc/mcdi_functions.h32
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c558
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port_common.c568
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port_common.h57
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h24
-rw-r--r--drivers/net/ethernet/sfc/nic.h7
-rw-r--r--drivers/net/ethernet/sfc/rx.c592
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c851
-rw-r--r--drivers/net/ethernet/sfc/rx_common.h97
-rw-r--r--drivers/net/ethernet/sfc/selftest.c9
-rw-r--r--drivers/net/ethernet/sfc/selftest.h2
-rw-r--r--drivers/net/ethernet/sfc/siena.c2
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c1
-rw-r--r--drivers/net/ethernet/sfc/tx.c398
-rw-r--r--drivers/net/ethernet/sfc/tx_common.c404
-rw-r--r--drivers/net/ethernet/sfc/tx_common.h36
-rw-r--r--drivers/net/ethernet/sgi/Kconfig5
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c549
-rw-r--r--drivers/net/ethernet/sgi/meth.c4
-rw-r--r--drivers/net/ethernet/silan/sc92031.c2
-rw-r--r--drivers/net/ethernet/sis/sis190.c2
-rw-r--r--drivers/net/ethernet/sis/sis900.c4
-rw-r--r--drivers/net/ethernet/smsc/epic100.c11
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c4
-rw-r--r--drivers/net/ethernet/smsc/smc9194.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c13
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c11
-rw-r--r--drivers/net/ethernet/socionext/netsec.c55
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs.h9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c89
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c47
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c119
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.h24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h42
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c87
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c55
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c405
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c152
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c162
-rw-r--r--drivers/net/ethernet/sun/cassini.c2
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c2
-rw-r--r--drivers/net/ethernet/sun/sungem.c2
-rw-r--r--drivers/net/ethernet/sun/sunhme.c2
-rw-r--r--drivers/net/ethernet/sun/sunqe.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c34
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.h2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c2
-rw-r--r--drivers/net/ethernet/ti/Kconfig1
-rw-r--r--drivers/net/ethernet/ti/Makefile1
-rw-r--r--drivers/net/ethernet/ti/cpmac.c14
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h2
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c5
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c2
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c4
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c8
-rw-r--r--drivers/net/ethernet/ti/tlan.c6
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h2
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c16
-rw-r--r--drivers/net/ethernet/via/via-rhine.c4
-rw-r--r--drivers/net/ethernet/via/via-velocity.c14
-rw-r--r--drivers/net/ethernet/via/via-velocity.h1
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c17
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c2
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c4
-rw-r--r--drivers/net/ethernet/xscale/Kconfig14
-rw-r--r--drivers/net/ethernet/xscale/Makefile3
-rw-r--r--drivers/net/ethernet/xscale/ixp46x_ts.h68
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c213
-rw-r--r--drivers/net/ethernet/xscale/ptp_ixp46x.c (renamed from drivers/ptp/ptp_ixp46x.c)3
-rw-r--r--drivers/net/fddi/defxx.c2
-rw-r--r--drivers/net/fddi/defza.c2
-rw-r--r--drivers/net/fddi/skfp/skfddi.c16
-rw-r--r--drivers/net/fjes/fjes_hw.c2
-rw-r--r--drivers/net/fjes/fjes_main.c7
-rw-r--r--drivers/net/fjes/fjes_trace.h2
-rw-r--r--drivers/net/gtp.c143
-rw-r--r--drivers/net/hamradio/6pack.c4
-rw-r--r--drivers/net/hamradio/hdlcdrv.c2
-rw-r--r--drivers/net/hamradio/mkiss.c4
-rw-r--r--drivers/net/hyperv/Makefile2
-rw-r--r--drivers/net/hyperv/hyperv_net.h24
-rw-r--r--drivers/net/hyperv/netvsc.c31
-rw-r--r--drivers/net/hyperv/netvsc_bpf.c218
-rw-r--r--drivers/net/hyperv/netvsc_drv.c190
-rw-r--r--drivers/net/hyperv/rndis_filter.c20
-rw-r--r--drivers/net/macsec.c787
-rw-r--r--drivers/net/macvlan.c7
-rw-r--r--drivers/net/netdevsim/bpf.c10
-rw-r--r--drivers/net/netdevsim/bus.c64
-rw-r--r--drivers/net/netdevsim/dev.c35
-rw-r--r--drivers/net/netdevsim/fib.c674
-rw-r--r--drivers/net/netdevsim/health.c6
-rw-r--r--drivers/net/netdevsim/netdevsim.h4
-rw-r--r--drivers/net/netdevsim/sdev.c69
-rw-r--r--drivers/net/phy/Kconfig19
-rw-r--r--drivers/net/phy/Makefile3
-rw-r--r--drivers/net/phy/adin.c12
-rw-r--r--drivers/net/phy/aquantia_main.c9
-rw-r--r--drivers/net/phy/at803x.c11
-rw-r--r--drivers/net/phy/bcm84881.c269
-rw-r--r--drivers/net/phy/dp83640.c217
-rw-r--r--drivers/net/phy/dp83822.c18
-rw-r--r--drivers/net/phy/dp83867.c70
-rw-r--r--drivers/net/phy/dp83869.c2
-rw-r--r--drivers/net/phy/fixed_phy.c11
-rw-r--r--drivers/net/phy/lxt.c24
-rw-r--r--drivers/net/phy/marvell.c209
-rw-r--r--drivers/net/phy/marvell10g.c13
-rw-r--r--drivers/net/phy/mdio-i2c.c28
-rw-r--r--drivers/net/phy/mdio-mux-meson-g12a.c4
-rw-r--r--drivers/net/phy/mdio_bus.c267
-rw-r--r--drivers/net/phy/mii_timestamper.c132
-rw-r--r--drivers/net/phy/mscc.c1139
-rw-r--r--drivers/net/phy/mscc_fc_buffer.h64
-rw-r--r--drivers/net/phy/mscc_mac.h159
-rw-r--r--drivers/net/phy/mscc_macsec.h266
-rw-r--r--drivers/net/phy/phy-core.c4
-rw-r--r--drivers/net/phy/phy.c29
-rw-r--r--drivers/net/phy/phy_device.c123
-rw-r--r--drivers/net/phy/phylink.c351
-rw-r--r--drivers/net/phy/realtek.c59
-rw-r--r--drivers/net/phy/sfp-bus.c124
-rw-r--r--drivers/net/phy/sfp.c199
-rw-r--r--drivers/net/phy/sfp.h2
-rw-r--r--drivers/net/phy/uPD60620.c7
-rw-r--r--drivers/net/ppp/ppp_async.c18
-rw-r--r--drivers/net/ppp/ppp_generic.c2
-rw-r--r--drivers/net/ppp/pptp.c5
-rw-r--r--drivers/net/slip/slip.c14
-rw-r--r--drivers/net/tap.c14
-rw-r--r--drivers/net/tun.c8
-rw-r--r--drivers/net/usb/ax88172a.c13
-rw-r--r--drivers/net/usb/catc.c2
-rw-r--r--drivers/net/usb/ch9200.c24
-rw-r--r--drivers/net/usb/hso.c2
-rw-r--r--drivers/net/usb/ipheth.c2
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/lan78xx.c40
-rw-r--r--drivers/net/usb/pegasus.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c42
-rw-r--r--drivers/net/usb/r8152.c155
-rw-r--r--drivers/net/usb/rtl8150.c2
-rw-r--r--drivers/net/usb/usbnet.c2
-rw-r--r--drivers/net/veth.c8
-rw-r--r--drivers/net/virtio_net.c4
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c4
-rw-r--r--drivers/net/vxlan.c25
-rw-r--r--drivers/net/wan/Kconfig3
-rw-r--r--drivers/net/wan/cosa.c4
-rw-r--r--drivers/net/wan/farsync.c2
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c41
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.h2
-rw-r--r--drivers/net/wan/hdlc_cisco.c4
-rw-r--r--drivers/net/wan/hdlc_x25.c93
-rw-r--r--drivers/net/wan/ixp4xx_hss.c39
-rw-r--r--drivers/net/wan/lapbether.c2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c4
-rw-r--r--drivers/net/wan/sdla.c2
-rw-r--r--drivers/net/wan/wanxl.c4
-rw-r--r--drivers/net/wan/x25_asy.c2
-rw-r--r--drivers/net/wimax/i2400m/netdev.c2
-rw-r--r--drivers/net/wireguard/Makefile18
-rw-r--r--drivers/net/wireguard/allowedips.c377
-rw-r--r--drivers/net/wireguard/allowedips.h59
-rw-r--r--drivers/net/wireguard/cookie.c236
-rw-r--r--drivers/net/wireguard/cookie.h59
-rw-r--r--drivers/net/wireguard/device.c458
-rw-r--r--drivers/net/wireguard/device.h65
-rw-r--r--drivers/net/wireguard/main.c63
-rw-r--r--drivers/net/wireguard/messages.h128
-rw-r--r--drivers/net/wireguard/netlink.c640
-rw-r--r--drivers/net/wireguard/netlink.h12
-rw-r--r--drivers/net/wireguard/noise.c832
-rw-r--r--drivers/net/wireguard/noise.h137
-rw-r--r--drivers/net/wireguard/peer.c240
-rw-r--r--drivers/net/wireguard/peer.h83
-rw-r--r--drivers/net/wireguard/peerlookup.c221
-rw-r--r--drivers/net/wireguard/peerlookup.h64
-rw-r--r--drivers/net/wireguard/queueing.c53
-rw-r--r--drivers/net/wireguard/queueing.h194
-rw-r--r--drivers/net/wireguard/ratelimiter.c223
-rw-r--r--drivers/net/wireguard/ratelimiter.h19
-rw-r--r--drivers/net/wireguard/receive.c595
-rw-r--r--drivers/net/wireguard/selftest/allowedips.c683
-rw-r--r--drivers/net/wireguard/selftest/counter.c104
-rw-r--r--drivers/net/wireguard/selftest/ratelimiter.c226
-rw-r--r--drivers/net/wireguard/send.c413
-rw-r--r--drivers/net/wireguard/socket.c438
-rw-r--r--drivers/net/wireguard/socket.h44
-rw-r--r--drivers/net/wireguard/timers.c243
-rw-r--r--drivers/net/wireguard/timers.h31
-rw-r--r--drivers/net/wireguard/version.h1
-rw-r--r--drivers/net/wireless/ath/Kconfig1
-rw-r--r--drivers/net/wireless/ath/Makefile1
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c52
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.h10
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c15
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c10
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h23
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c65
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h5
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c21
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c23
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c232
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.h21
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c24
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c10
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c20
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/Kconfig35
-rw-r--r--drivers/net/wireless/ath/ath11k/Makefile25
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c1003
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.h35
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.c808
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.h183
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c795
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h826
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.c1075
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.h279
-rw-r--r--drivers/net/wireless/ath/ath11k/debug_htt_stats.c4570
-rw-r--r--drivers/net/wireless/ath/ath11k/debug_htt_stats.h1662
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.c543
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c899
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h1535
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c4195
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.h86
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c962
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.h40
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c1124
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h897
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_desc.h2468
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c1190
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.h332
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.c154
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.h69
-rw-r--r--drivers/net/wireless/ath/ath11k/htc.c773
-rw-r--r--drivers/net/wireless/ath/ath11k/htc.h313
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h127
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c5907
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h147
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.c236
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.h35
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c2433
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h445
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c702
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.h35
-rw-r--r--drivers/net/wireless/ath/ath11k/rx_desc.h1212
-rw-r--r--drivers/net/wireless/ath/ath11k/testmode.c199
-rw-r--r--drivers/net/wireless/ath/ath11k/testmode.h29
-rw-r--r--drivers/net/wireless/ath/ath11k/testmode_i.h50
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.c9
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.h113
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c5810
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h4764
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_aic.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c2
-rw-r--r--drivers/net/wireless/ath/regd.c10
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c1
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c34
-rw-r--r--drivers/net/wireless/ath/wil6210/ethtool.c43
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c12
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c34
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.h8
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h6
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_crash_dump.c17
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c88
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h33
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c18
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c165
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c54
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c70
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c30
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c2
-rw-r--r--drivers/net/wireless/cisco/airo.c118
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c11
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c5
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_module.c15
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c5
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c8
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.c17
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/1000.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/2000.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c81
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/5000.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/6000.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/7000.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/dev.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/devices.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tx.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h144
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h41
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h68
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c74
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c37
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c75
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h48
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c58
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c244
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c95
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c167
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tdls.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c71
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c185
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c112
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c128
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c224
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c80
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ap.c2
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_hw.c4
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_main.c2
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_proc.c14
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_wlan.h2
-rw-r--r--drivers/net/wireless/intersil/orinoco/main.c2
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco.h2
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_usb.c7
-rw-r--r--drivers/net/wireless/intersil/prism54/islpci_eth.c2
-rw-r--r--drivers/net/wireless/intersil/prism54/islpci_eth.h2
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c18
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/tdls.c75
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/airtime.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c5
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c9
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c13
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.h2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c73
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h52
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c21
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800pci.c1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800soc.c1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c11
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c20
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00usb.c2
-rw-r--r--drivers/net/wireless/ray_cs.c20
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h103
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.h12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c19
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h14
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h15
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c48
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c49
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c35
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h27
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h61
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c299
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h853
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c118
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h36
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.h11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c112
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h14
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.h13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h30
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.h13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c118
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h102
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.h12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h115
-rw-r--r--drivers/net/wireless/realtek/rtw88/Makefile1
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c389
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h186
-rw-r--r--drivers/net/wireless/realtek/rtw88/hci.h6
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c12
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c46
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c91
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h72
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c60
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/ps.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h29
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c24
-rw-r--r--drivers/net/wireless/realtek/rtw88/util.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/wow.c889
-rw-r--r--drivers/net/wireless/realtek/rtw88/wow.h58
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c12
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c49
-rw-r--r--drivers/net/wireless/st/cw1200/txrx.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c23
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h1
-rw-r--r--drivers/net/wireless/wl3501_cs.c2
-rw-r--r--drivers/net/wireless/zydas/zd1201.c2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c2
-rw-r--r--drivers/net/xen-netback/hash.c6
-rw-r--r--drivers/net/xen-netback/interface.c34
-rw-r--r--drivers/net/xen-netback/netback.c20
-rw-r--r--drivers/net/xen-netback/xenbus.c349
-rw-r--r--drivers/nfc/nxp-nci/i2c.c2
-rw-r--r--drivers/nfc/pn533/i2c.c1
-rw-r--r--drivers/nfc/pn533/usb.c2
-rw-r--r--drivers/nfc/pn544/pn544.c2
-rw-r--r--drivers/nfc/port100.c2
-rw-r--r--drivers/nfc/s3fwrn5/firmware.c5
-rw-r--r--drivers/nvdimm/pmem.c6
-rw-r--r--drivers/nvme/host/Kconfig1
-rw-r--r--drivers/nvme/host/core.c14
-rw-r--r--drivers/nvme/host/hwmon.c13
-rw-r--r--drivers/nvme/host/pci.c46
-rw-r--r--drivers/nvme/host/rdma.c2
-rw-r--r--drivers/nvme/host/tcp.c9
-rw-r--r--drivers/nvme/target/admin-cmd.c12
-rw-r--r--drivers/nvme/target/core.c80
-rw-r--r--drivers/nvme/target/fabrics-cmd.c15
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c2
-rw-r--r--drivers/nvme/target/io-cmd-file.c2
-rw-r--r--drivers/nvme/target/nvmet.h1
-rw-r--r--drivers/nvmem/Kconfig8
-rw-r--r--drivers/nvmem/Makefile2
-rw-r--r--drivers/nvmem/core.c27
-rw-r--r--drivers/nvmem/imx-ocotp-scu.c16
-rw-r--r--drivers/nvmem/imx-ocotp.c79
-rw-r--r--drivers/nvmem/nvmem.h2
-rw-r--r--drivers/nvmem/qcom-spmi-sdam.c192
-rw-r--r--drivers/of/Kconfig4
-rw-r--r--drivers/of/address.c6
-rw-r--r--drivers/of/base.c166
-rw-r--r--drivers/of/device.c2
-rw-r--r--drivers/of/dynamic.c2
-rw-r--r--drivers/of/of_mdio.c42
-rw-r--r--drivers/of/of_private.h6
-rw-r--r--drivers/of/overlay.c11
-rw-r--r--drivers/opp/core.c48
-rw-r--r--drivers/opp/of.c31
-rw-r--r--drivers/opp/opp.h6
-rw-r--r--drivers/opp/ti-opp-supply.c2
-rw-r--r--drivers/oprofile/cpu_buffer.c2
-rw-r--r--drivers/parisc/ccio-dma.c2
-rw-r--r--drivers/parisc/dino.c2
-rw-r--r--drivers/parisc/eisa.c4
-rw-r--r--drivers/parisc/iosapic.c2
-rw-r--r--drivers/parisc/lba_pci.c8
-rw-r--r--drivers/parisc/led.c17
-rw-r--r--drivers/parisc/sba_iommu.c4
-rw-r--r--drivers/pci/ats.c8
-rw-r--r--drivers/pci/controller/Kconfig10
-rw-r--r--drivers/pci/controller/Makefile1
-rw-r--r--drivers/pci/controller/dwc/Kconfig11
-rw-r--r--drivers/pci/controller/dwc/Makefile1
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c2
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c2
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c8
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c56
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h12
-rw-r--r--drivers/pci/controller/dwc/pcie-intel-gw.c545
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c150
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier.c31
-rw-r--r--drivers/pci/controller/pci-tegra.c6
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c1015
-rw-r--r--drivers/pci/controller/pcie-iproc.c24
-rw-r--r--drivers/pci/controller/vmd.c156
-rw-r--r--drivers/pci/hotplug/pnv_php.c82
-rw-r--r--drivers/pci/iov.c9
-rw-r--r--drivers/pci/msi.c2
-rw-r--r--drivers/pci/p2pdma.c3
-rw-r--r--drivers/pci/pci.c51
-rw-r--r--drivers/pci/pci.h3
-rw-r--r--drivers/pci/pcie/aer.c1
-rw-r--r--drivers/pci/pcie/err.c12
-rw-r--r--drivers/pci/proc.c25
-rw-r--r--drivers/pci/quirks.c140
-rw-r--r--drivers/pci/search.c10
-rw-r--r--drivers/pci/setup-bus.c163
-rw-r--r--drivers/pci/switch/switchtec.c370
-rw-r--r--drivers/pcmcia/i82092.c648
-rw-r--r--drivers/pcmcia/i82092aa.h11
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c6
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c16
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c58
-rw-r--r--drivers/phy/Kconfig1
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/allwinner/Kconfig3
-rw-r--r--drivers/phy/broadcom/Kconfig4
-rw-r--r--drivers/phy/broadcom/Makefile2
-rw-r--r--drivers/phy/broadcom/phy-brcm-sata.c120
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c414
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init.c226
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init.h148
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb.c269
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c709
-rw-r--r--drivers/phy/hisilicon/Kconfig16
-rw-r--r--drivers/phy/intel/Kconfig9
-rw-r--r--drivers/phy/intel/Makefile2
-rw-r--r--drivers/phy/intel/phy-intel-emmc.c284
-rw-r--r--drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c2
-rw-r--r--drivers/phy/marvell/Kconfig8
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-comphy.c20
-rw-r--r--drivers/phy/mediatek/Kconfig25
-rw-r--r--drivers/phy/motorola/phy-cpcap-usb.c128
-rw-r--r--drivers/phy/motorola/phy-mapphone-mdm6600.c11
-rw-r--r--drivers/phy/phy-core.c53
-rw-r--r--drivers/phy/qualcomm/phy-qcom-apq8064-sata.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c9
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h2
-rw-r--r--drivers/phy/rockchip/Kconfig1
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c319
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-hdmi.c4
-rw-r--r--drivers/phy/samsung/Kconfig6
-rw-r--r--drivers/phy/ti/Kconfig20
-rw-r--r--drivers/phy/ti/Makefile1
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c959
-rw-r--r--drivers/phy/ti/phy-ti-pipe3.c18
-rw-r--r--drivers/pinctrl/Kconfig1
-rw-r--r--drivers/pinctrl/actions/pinctrl-s700.c1
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c170
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c212
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c411
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c50
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.h38
-rw-r--r--drivers/pinctrl/aspeed/pinmux-aspeed.h1
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c10
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns2-mux.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-mux.c2
-rw-r--r--drivers/pinctrl/cirrus/Kconfig1
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-madera-core.c1
-rw-r--r--drivers/pinctrl/core.c74
-rw-r--r--drivers/pinctrl/core.h4
-rw-r--r--drivers/pinctrl/devicetree.c4
-rw-r--r--drivers/pinctrl/freescale/Kconfig7
-rw-r--r--drivers/pinctrl/freescale/Makefile1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1-core.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8mp.c345
-rw-r--r--drivers/pinctrl/intel/Kconfig13
-rw-r--r--drivers/pinctrl/intel/Makefile1
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c491
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c112
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c101
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h44
-rw-r--r--drivers/pinctrl/intel/pinctrl-lynxpoint.c975
-rw-r--r--drivers/pinctrl/intel/pinctrl-sunrisepoint.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-tigerlake.c547
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt2712.h2
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h2
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c1
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8b.c7
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c16
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c2
-rw-r--r--drivers/pinctrl/pinctrl-amd.c2
-rw-r--r--drivers/pinctrl/pinctrl-artpec6.c2
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c637
-rw-r--r--drivers/pinctrl/pinctrl-rza1.c4
-rw-r--r--drivers/pinctrl/pinmux.c2
-rw-r--r--drivers/pinctrl/pxa/pinctrl-pxa2xx.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8976.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7180.c78
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c2
-rw-r--r--drivers/pinctrl/samsung/Kconfig16
-rw-r--r--drivers/pinctrl/sh-pfc/Kconfig12
-rw-r--r--drivers/pinctrl/sh-pfc/Makefile4
-rw-r--r--drivers/pinctrl/sh-pfc/core.c57
-rw-r--r--drivers/pinctrl/sh-pfc/gpio.c11
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7778.c4
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77950.c (renamed from drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c)26
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77951.c (renamed from drivers/pinctrl/sh-pfc/pfc-r8a7795.c)39
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77965.c6
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7264.c33
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7269.c39
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h4
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.h2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c13
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.c4
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c18
-rw-r--r--drivers/platform/chrome/cros_ec.c3
-rw-r--r--drivers/platform/chrome/cros_ec.h19
-rw-r--r--drivers/platform/chrome/cros_ec_chardev.c1
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c1
-rw-r--r--drivers/platform/chrome/cros_ec_i2c.c2
-rw-r--r--drivers/platform/chrome/cros_ec_ishtp.c4
-rw-r--r--drivers/platform/chrome/cros_ec_lightbar.c1
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c3
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c6
-rw-r--r--drivers/platform/chrome/cros_ec_rpmsg.c2
-rw-r--r--drivers/platform/chrome/cros_ec_sensorhub.c1
-rw-r--r--drivers/platform/chrome/cros_ec_spi.c2
-rw-r--r--drivers/platform/chrome/cros_ec_sysfs.c1
-rw-r--r--drivers/platform/chrome/cros_ec_trace.c97
-rw-r--r--drivers/platform/chrome/cros_ec_trace.h26
-rw-r--r--drivers/platform/chrome/cros_ec_vbc.c1
-rw-r--r--drivers/platform/chrome/cros_usbpd_logger.c1
-rw-r--r--drivers/platform/chrome/wilco_ec/Kconfig3
-rw-r--r--drivers/platform/chrome/wilco_ec/core.c4
-rw-r--r--drivers/platform/chrome/wilco_ec/keyboard_leds.c32
-rw-r--r--drivers/platform/chrome/wilco_ec/mailbox.c4
-rw-r--r--drivers/platform/chrome/wilco_ec/telemetry.c6
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c35
-rw-r--r--drivers/platform/mellanox/mlxbf-bootctl.c2
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo.c19
-rw-r--r--drivers/platform/mellanox/mlxreg-hotplug.c14
-rw-r--r--drivers/platform/mips/Kconfig2
-rw-r--r--drivers/platform/x86/Kconfig18
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c1
-rw-r--r--drivers/platform/x86/asus-wmi.c139
-rw-r--r--drivers/platform/x86/gpd-pocket-fan.c27
-rw-r--r--drivers/platform/x86/hp-wmi.c2
-rw-r--r--drivers/platform/x86/intel-hid.c1
-rw-r--r--drivers/platform/x86/intel-uncore-frequency.c437
-rw-r--r--drivers/platform/x86/intel_atomisp2_pm.c25
-rw-r--r--drivers/platform/x86/intel_cht_int33fe_typec.c81
-rw-r--r--drivers/platform/x86/intel_ips.h2
-rw-r--r--drivers/platform/x86/intel_menlow.c9
-rw-r--r--drivers/platform/x86/intel_mid_powerbtn.c5
-rw-r--r--drivers/platform/x86/intel_pmc_core.c141
-rw-r--r--drivers/platform/x86/intel_pmc_core.h6
-rw-r--r--drivers/platform/x86/intel_pmc_core_pltdrv.c2
-rw-r--r--drivers/platform/x86/intel_pmc_ipc.c114
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c414
-rw-r--r--drivers/platform/x86/intel_speed_select_if/isst_if_common.c3
-rw-r--r--drivers/platform/x86/intel_telemetry_debugfs.c14
-rw-r--r--drivers/platform/x86/intel_telemetry_pltdrv.c64
-rw-r--r--drivers/platform/x86/mlx-platform.c564
-rw-r--r--drivers/platform/x86/pcengines-apuv2.c63
-rw-r--r--drivers/platform/x86/pmc_atom.c10
-rw-r--r--drivers/platform/x86/samsung-laptop.c4
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c15
-rw-r--r--drivers/platform/x86/toshiba_acpi.c60
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c82
-rw-r--r--drivers/pnp/isapnp/core.c25
-rw-r--r--drivers/pnp/isapnp/proc.c9
-rw-r--r--drivers/pnp/pnpbios/proc.c17
-rw-r--r--drivers/power/avs/Kconfig16
-rw-r--r--drivers/power/avs/Makefile1
-rw-r--r--drivers/power/avs/qcom-cpr.c1794
-rw-r--r--drivers/power/avs/rockchip-io-domain.c6
-rw-r--r--drivers/power/reset/Kconfig20
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c72
-rw-r--r--drivers/power/reset/gpio-restart.c8
-rw-r--r--drivers/power/supply/Kconfig30
-rw-r--r--drivers/power/supply/ab8500_charger.c6
-rw-r--r--drivers/power/supply/ab8500_fg.c14
-rw-r--r--drivers/power/supply/abx500_chargalg.c2
-rw-r--r--drivers/power/supply/axp20x_ac_power.c131
-rw-r--r--drivers/power/supply/axp20x_usb_power.c219
-rw-r--r--drivers/power/supply/bq25890_charger.c103
-rw-r--r--drivers/power/supply/cros_usbpd-charger.c11
-rw-r--r--drivers/power/supply/ingenic-battery.c15
-rw-r--r--drivers/power/supply/ipaq_micro_battery.c6
-rw-r--r--drivers/power/supply/ltc2941-battery-gauge.c2
-rw-r--r--drivers/power/supply/max17040_battery.c122
-rw-r--r--drivers/power/supply/max17042_battery.c17
-rw-r--r--drivers/power/supply/max77650-charger.c7
-rw-r--r--drivers/power/supply/pda_power.c4
-rw-r--r--drivers/power/supply/power_supply_core.c67
-rw-r--r--drivers/power/supply/sbs-battery.c35
-rw-r--r--drivers/power/supply/sc27xx_fuel_gauge.c49
-rw-r--r--drivers/power/supply/ucs1002_power.c42
-rw-r--r--drivers/powercap/intel_rapl_common.c5
-rw-r--r--drivers/ptp/Kconfig26
-rw-r--r--drivers/ptp/Makefile4
-rw-r--r--drivers/ptp/idt8a340_reg.h2
-rw-r--r--drivers/ptp/ptp_clock.c41
-rw-r--r--drivers/ptp/ptp_clockmatrix.c79
-rw-r--r--drivers/ptp/ptp_ines.c852
-rw-r--r--drivers/ptp/ptp_private.h2
-rw-r--r--drivers/ptp/ptp_qoriq.c15
-rw-r--r--drivers/pwm/Kconfig9
-rw-r--r--drivers/pwm/core.c13
-rw-r--r--drivers/pwm/pwm-atmel.c87
-rw-r--r--drivers/pwm/pwm-cros-ec.c58
-rw-r--r--drivers/pwm/pwm-imx27.c147
-rw-r--r--drivers/pwm/pwm-mxs.c101
-rw-r--r--drivers/pwm/pwm-omap-dmtimer.c54
-rw-r--r--drivers/pwm/pwm-pca9685.c4
-rw-r--r--drivers/pwm/pwm-rcar.c5
-rw-r--r--drivers/pwm/pwm-stm32.c4
-rw-r--r--drivers/pwm/pwm-sun4i.c194
-rw-r--r--drivers/regulator/Kconfig44
-rw-r--r--drivers/regulator/Makefile5
-rw-r--r--drivers/regulator/axp20x-regulator.c11
-rw-r--r--drivers/regulator/bd70528-regulator.c1
-rw-r--r--drivers/regulator/bd71828-regulator.c807
-rw-r--r--drivers/regulator/bd718x7-regulator.c226
-rw-r--r--drivers/regulator/core.c18
-rw-r--r--drivers/regulator/da9210-regulator.c5
-rw-r--r--drivers/regulator/da9211-regulator.c5
-rw-r--r--drivers/regulator/helpers.c14
-rw-r--r--drivers/regulator/isl9305.c5
-rw-r--r--drivers/regulator/lp3971.c5
-rw-r--r--drivers/regulator/ltc3676.c5
-rw-r--r--drivers/regulator/max77650-regulator.c7
-rw-r--r--drivers/regulator/mp8859.c156
-rw-r--r--drivers/regulator/mpq7920.c330
-rw-r--r--drivers/regulator/mpq7920.h69
-rw-r--r--drivers/regulator/mt6311-regulator.c5
-rw-r--r--drivers/regulator/pv88060-regulator.c5
-rw-r--r--drivers/regulator/pv88090-regulator.c5
-rw-r--r--drivers/regulator/rk808-regulator.c2
-rw-r--r--drivers/regulator/rn5t618-regulator.c1
-rw-r--r--drivers/regulator/rohm-regulator.c95
-rw-r--r--drivers/regulator/s2mpa01.c2
-rw-r--r--drivers/regulator/s2mps11.c2
-rw-r--r--drivers/regulator/s5m8767.c4
-rw-r--r--drivers/regulator/slg51000-regulator.c5
-rw-r--r--drivers/regulator/sy8106a-regulator.c5
-rw-r--r--drivers/regulator/sy8824x.c5
-rw-r--r--drivers/regulator/ti-abb-regulator.c4
-rw-r--r--drivers/regulator/tps65132-regulator.c5
-rw-r--r--drivers/regulator/vctrl-regulator.c38
-rw-r--r--drivers/regulator/vqmmc-ipq4019-regulator.c101
-rw-r--r--drivers/remoteproc/Kconfig10
-rw-r--r--drivers/remoteproc/Makefile1
-rw-r--r--drivers/remoteproc/mtk_common.h94
-rw-r--r--drivers/remoteproc/mtk_scp.c663
-rw-r--r--drivers/remoteproc/mtk_scp_ipi.c219
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c236
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c260
-rw-r--r--drivers/remoteproc/qcom_sysmon.c2
-rw-r--r--drivers/remoteproc/remoteproc_core.c6
-rw-r--r--drivers/reset/Kconfig25
-rw-r--r--drivers/reset/Makefile3
-rw-r--r--drivers/reset/core.c41
-rw-r--r--drivers/reset/reset-brcmstb-rescal.c107
-rw-r--r--drivers/reset/reset-brcmstb.c6
-rw-r--r--drivers/reset/reset-intel-gw.c262
-rw-r--r--drivers/reset/reset-npcm.c291
-rw-r--r--drivers/reset/reset-qcom-aoss.c3
-rw-r--r--drivers/reset/reset-scmi.c2
-rw-r--r--drivers/reset/reset-uniphier.c13
-rw-r--r--drivers/rpmsg/Kconfig9
-rw-r--r--drivers/rpmsg/Makefile1
-rw-r--r--drivers/rpmsg/mtk_rpmsg.c414
-rw-r--r--drivers/rtc/Kconfig27
-rw-r--r--drivers/rtc/rtc-abx80x.c7
-rw-r--r--drivers/rtc/rtc-asm9260.c3
-rw-r--r--drivers/rtc/rtc-at91rm9200.c119
-rw-r--r--drivers/rtc/rtc-at91rm9200.h71
-rw-r--r--drivers/rtc/rtc-bd70528.c220
-rw-r--r--drivers/rtc/rtc-cmos.c12
-rw-r--r--drivers/rtc/rtc-cros-ec.c1
-rw-r--r--drivers/rtc/rtc-ds1343.c10
-rw-r--r--drivers/rtc/rtc-hym8563.c19
-rw-r--r--drivers/rtc/rtc-m48t35.c11
-rw-r--r--drivers/rtc/rtc-mc146818-lib.c15
-rw-r--r--drivers/rtc/rtc-moxart.c5
-rw-r--r--drivers/rtc/rtc-mt6397.c49
-rw-r--r--drivers/rtc/rtc-omap.c2
-rw-r--r--drivers/rtc/rtc-pcf2127.c6
-rw-r--r--drivers/rtc/rtc-pcf85063.c16
-rw-r--r--drivers/rtc/rtc-pcf8523.c6
-rw-r--r--drivers/rtc/rtc-pcf8563.c40
-rw-r--r--drivers/rtc/rtc-rv3028.c17
-rw-r--r--drivers/rtc/rtc-rv3029c2.c442
-rw-r--r--drivers/rtc/rtc-rv8803.c16
-rw-r--r--drivers/rtc/rtc-rx8010.c25
-rw-r--r--drivers/rtc/rtc-rx8025.c27
-rw-r--r--drivers/rtc/rtc-sh.c2
-rw-r--r--drivers/rtc/rtc-stm32.c5
-rw-r--r--drivers/rtc/rtc-sun6i.c16
-rw-r--r--drivers/rtc/rtc-tps6586x.c4
-rw-r--r--drivers/rtc/rtc-zynqmp.c4
-rw-r--r--drivers/s390/block/dasd_eckd.c28
-rw-r--r--drivers/s390/block/dasd_fba.h2
-rw-r--r--drivers/s390/block/dasd_proc.c17
-rw-r--r--drivers/s390/cio/blacklist.c14
-rw-r--r--drivers/s390/cio/css.c11
-rw-r--r--drivers/s390/cio/device_ops.c2
-rw-r--r--drivers/s390/cio/qdio.h4
-rw-r--r--drivers/s390/cio/qdio_debug.c5
-rw-r--r--drivers/s390/cio/qdio_main.c29
-rw-r--r--drivers/s390/cio/qdio_setup.c2
-rw-r--r--drivers/s390/cio/vfio_ccw_trace.h4
-rw-r--r--drivers/s390/crypto/Makefile3
-rw-r--r--drivers/s390/crypto/ap_bus.c2
-rw-r--r--drivers/s390/crypto/ap_bus.h6
-rw-r--r--drivers/s390/crypto/ap_card.c8
-rw-r--r--drivers/s390/crypto/ap_queue.c11
-rw-r--r--drivers/s390/crypto/pkey_api.c472
-rw-r--r--drivers/s390/crypto/zcrypt_api.c43
-rw-r--r--drivers/s390/crypto/zcrypt_api.h1
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c4
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.h1
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c1
-rw-r--r--drivers/s390/crypto/zcrypt_cex2c.c2
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c274
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.c1293
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.h124
-rw-r--r--drivers/s390/net/qeth_core.h98
-rw-r--r--drivers/s390/net/qeth_core_main.c565
-rw-r--r--drivers/s390/net/qeth_core_mpc.h26
-rw-r--r--drivers/s390/net/qeth_core_sys.c4
-rw-r--r--drivers/s390/net/qeth_l2.h1
-rw-r--r--drivers/s390/net/qeth_l2_main.c177
-rw-r--r--drivers/s390/net/qeth_l2_sys.c33
-rw-r--r--drivers/s390/net/qeth_l3.h6
-rw-r--r--drivers/s390/net/qeth_l3_main.c309
-rw-r--r--drivers/s390/net/qeth_l3_sys.c212
-rw-r--r--drivers/scsi/BusLogic.c110
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm_pci.c2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c3
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c6
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c4
-rw-r--r--drivers/scsi/ch.c9
-rw-r--r--drivers/scsi/csiostor/csio_init.c2
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c2
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c3
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c9
-rw-r--r--drivers/scsi/esp_scsi.c22
-rw-r--r--drivers/scsi/esp_scsi.h41
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c3
-rw-r--r--drivers/scsi/fnic/vnic_dev.c20
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c74
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c3
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c41
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c57
-rw-r--r--drivers/scsi/hpsa.c2
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c5
-rw-r--r--drivers/scsi/initio.c2
-rw-r--r--drivers/scsi/ipr.c3
-rw-r--r--drivers/scsi/isci/init.c3
-rw-r--r--drivers/scsi/iscsi_tcp.c4
-rw-r--r--drivers/scsi/lasi700.c2
-rw-r--r--drivers/scsi/libsas/sas_ata.c2
-rw-r--r--drivers/scsi/libsas/sas_discover.c2
-rw-r--r--drivers/scsi/libsas/sas_expander.c4
-rw-r--r--drivers/scsi/libsas/sas_internal.h2
-rw-r--r--drivers/scsi/libsas/sas_port.c2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c8
-rw-r--r--drivers/scsi/libsas/sas_task.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c88
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c108
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c35
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h17
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c115
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c134
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h18
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2.h6
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h19
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_image.h7
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_ioc.h8
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c341
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h45
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c39
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c46
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c220
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c11
-rw-r--r--drivers/scsi/mvsas/mv_init.c3
-rw-r--r--drivers/scsi/myrb.c2
-rw-r--r--drivers/scsi/myrb.h4
-rw-r--r--drivers/scsi/myrs.c2
-rw-r--r--drivers/scsi/myrs.h4
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c3
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c2
-rw-r--r--drivers/scsi/qla1280.c20
-rw-r--r--drivers/scsi/qla1280.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h6
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h22
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h50
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h11
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c175
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h24
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c51
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c72
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c20
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c66
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c47
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c2
-rw-r--r--drivers/scsi/scsi_devinfo.c15
-rw-r--r--drivers/scsi/scsi_ioctl.c54
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_logging.h2
-rw-r--r--drivers/scsi/scsi_proc.c29
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c44
-rw-r--r--drivers/scsi/sd.c63
-rw-r--r--drivers/scsi/sd_zbc.c38
-rw-r--r--drivers/scsi/sg.c200
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c2
-rw-r--r--drivers/scsi/sni_53c710.c2
-rw-r--r--drivers/scsi/sr.c53
-rw-r--r--drivers/scsi/st.c51
-rw-r--r--drivers/scsi/storvsc_drv.c4
-rw-r--r--drivers/scsi/sun3x_esp.c4
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_nvram.c4
-rw-r--r--drivers/scsi/ufs/cdns-pltfrm.c107
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c206
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.h32
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.c22
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.h4
-rw-r--r--drivers/scsi/ufs/ufs.h31
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h9
-rw-r--r--drivers/scsi/ufs/ufshcd.c715
-rw-r--r--drivers/scsi/ufs/ufshcd.h34
-rw-r--r--drivers/scsi/ufs/unipro.h11
-rw-r--r--drivers/scsi/vmw_pvscsi.c20
-rw-r--r--drivers/scsi/zalon.c2
-rw-r--r--drivers/scsi/zorro_esp.c6
-rw-r--r--drivers/sh/clk/core.c2
-rw-r--r--drivers/sh/intc/core.c2
-rw-r--r--drivers/sh/intc/userimask.c2
-rw-r--r--drivers/siox/siox.h2
-rw-r--r--drivers/slimbus/qcom-ctrl.c2
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c20
-rw-r--r--drivers/slimbus/slimbus.h2
-rw-r--r--drivers/soc/Kconfig1
-rw-r--r--drivers/soc/Makefile1
-rw-r--r--drivers/soc/amlogic/meson-ee-pwrc.c24
-rw-r--r--drivers/soc/atmel/soc.c5
-rw-r--r--drivers/soc/bcm/brcmstb/biuctrl.c30
-rw-r--r--drivers/soc/fsl/qe/Kconfig3
-rw-r--r--drivers/soc/fsl/qe/gpio.c36
-rw-r--r--drivers/soc/fsl/qe/qe.c104
-rw-r--r--drivers/soc/fsl/qe/qe_common.c50
-rw-r--r--drivers/soc/fsl/qe/qe_ic.c285
-rw-r--r--drivers/soc/fsl/qe/qe_ic.h99
-rw-r--r--drivers/soc/fsl/qe/qe_io.c70
-rw-r--r--drivers/soc/fsl/qe/qe_tdm.c8
-rw-r--r--drivers/soc/fsl/qe/ucc.c27
-rw-r--r--drivers/soc/fsl/qe/ucc_fast.c86
-rw-r--r--drivers/soc/fsl/qe/ucc_slow.c60
-rw-r--r--drivers/soc/fsl/qe/usb.c2
-rw-r--r--drivers/soc/imx/Kconfig2
-rw-r--r--drivers/soc/imx/soc-imx8.c9
-rw-r--r--drivers/soc/lantiq/fpi-bus.c4
-rw-r--r--drivers/soc/mediatek/mtk-cmdq-helper.c149
-rw-r--r--drivers/soc/qcom/Kconfig30
-rw-r--r--drivers/soc/qcom/qmi_interface.c8
-rw-r--r--drivers/soc/qcom/rpmhpd.c56
-rw-r--r--drivers/soc/renesas/Kconfig14
-rw-r--r--drivers/soc/renesas/rcar-rst.c2
-rw-r--r--drivers/soc/samsung/Kconfig2
-rw-r--r--drivers/soc/samsung/exynos-chipid.c2
-rw-r--r--drivers/soc/samsung/exynos-pmu.c6
-rw-r--r--drivers/soc/samsung/exynos-pmu.h2
-rw-r--r--drivers/soc/samsung/exynos3250-pmu.c2
-rw-r--r--drivers/soc/samsung/exynos4-pmu.c2
-rw-r--r--drivers/soc/samsung/exynos5250-pmu.c2
-rw-r--r--drivers/soc/samsung/exynos5420-pmu.c2
-rw-r--r--drivers/soc/sifive/Kconfig10
-rw-r--r--drivers/soc/sifive/Makefile3
-rw-r--r--drivers/soc/sifive/sifive_l2_cache.c178
-rw-r--r--drivers/soc/tegra/Kconfig1
-rw-r--r--drivers/soc/tegra/flowctrl.c2
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c5
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra30.c32
-rw-r--r--drivers/soc/tegra/fuse/fuse.h4
-rw-r--r--drivers/soc/tegra/fuse/tegra-apbmisc.c38
-rw-r--r--drivers/soc/tegra/pmc.c4
-rw-r--r--drivers/soc/tegra/regulators-tegra20.c8
-rw-r--r--drivers/soc/tegra/regulators-tegra30.c6
-rw-r--r--drivers/soc/ti/Kconfig11
-rw-r--r--drivers/soc/ti/Makefile1
-rw-r--r--drivers/soc/ti/k3-ringacc.c1157
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c7
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c4
-rw-r--r--drivers/soc/xilinx/Kconfig6
-rw-r--r--drivers/soc/xilinx/xlnx_vcu.c4
-rw-r--r--drivers/soc/xilinx/zynqmp_power.c120
-rw-r--r--drivers/soundwire/Kconfig9
-rw-r--r--drivers/soundwire/Makefile4
-rw-r--r--drivers/soundwire/bus.c55
-rw-r--r--drivers/soundwire/cadence_master.c66
-rw-r--r--drivers/soundwire/intel.c23
-rw-r--r--drivers/soundwire/intel.h13
-rw-r--r--drivers/soundwire/intel_init.c32
-rw-r--r--drivers/soundwire/qcom.c861
-rw-r--r--drivers/soundwire/stream.c8
-rw-r--r--drivers/spi/Kconfig9
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi-atmel.c29
-rw-r--r--drivers/spi/spi-bcm-qspi.c2
-rw-r--r--drivers/spi/spi-bcm2835.c47
-rw-r--r--drivers/spi/spi-bitbang.c21
-rw-r--r--drivers/spi/spi-cadence.c6
-rw-r--r--drivers/spi/spi-cavium-thunderx.c2
-rw-r--r--drivers/spi/spi-dw-mid.c2
-rw-r--r--drivers/spi/spi-dw.c26
-rw-r--r--drivers/spi/spi-dw.h1
-rw-r--r--drivers/spi/spi-fsl-dspi.c36
-rw-r--r--drivers/spi/spi-fsl-lpspi.c36
-rw-r--r--drivers/spi/spi-fsl-qspi.c2
-rw-r--r--drivers/spi/spi-fsl-spi.c45
-rw-r--r--drivers/spi/spi-hisi-sfc-v3xx.c284
-rw-r--r--drivers/spi/spi-img-spfi.c18
-rw-r--r--drivers/spi/spi-imx.c4
-rw-r--r--drivers/spi/spi-jcore.c2
-rw-r--r--drivers/spi/spi-meson-spicc.c25
-rw-r--r--drivers/spi/spi-mxs.c6
-rw-r--r--drivers/spi/spi-npcm-fiu.c2
-rw-r--r--drivers/spi/spi-npcm-pspi.c57
-rw-r--r--drivers/spi/spi-nxp-fspi.c2
-rw-r--r--drivers/spi/spi-oc-tiny.c50
-rw-r--r--drivers/spi/spi-orion.c3
-rw-r--r--drivers/spi/spi-pxa2xx.c35
-rw-r--r--drivers/spi/spi-qcom-qspi.c9
-rw-r--r--drivers/spi/spi-rspi.c23
-rw-r--r--drivers/spi/spi-sh-msiof.c471
-rw-r--r--drivers/spi/spi-sirf.c12
-rw-r--r--drivers/spi/spi-sprd.c2
-rw-r--r--drivers/spi/spi-stm32-qspi.c30
-rw-r--r--drivers/spi/spi-stm32.c79
-rw-r--r--drivers/spi/spi-tegra114.c4
-rw-r--r--drivers/spi/spi-ti-qspi.c93
-rw-r--r--drivers/spi/spi-topcliff-pch.c4
-rw-r--r--drivers/spi/spi-uniphier.c258
-rw-r--r--drivers/spi/spi.c46
-rw-r--r--drivers/spmi/spmi-pmic-arb.c4
-rw-r--r--drivers/ssb/driver_extif.c2
-rw-r--r--drivers/ssb/driver_pcicore.c6
-rw-r--r--drivers/staging/Kconfig6
-rw-r--r--drivers/staging/Makefile5
-rw-r--r--drivers/staging/android/ashmem.c6
-rw-r--r--drivers/staging/axis-fifo/Kconfig2
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.c160
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c4
-rw-r--r--drivers/staging/comedi/drivers/das6402.c2
-rw-r--r--drivers/staging/comedi/drivers/gsc_hpdi.c10
-rw-r--r--drivers/staging/comedi/drivers/ni_routes.c12
-rw-r--r--drivers/staging/exfat/Kconfig26
-rw-r--r--drivers/staging/exfat/Makefile2
-rw-r--r--drivers/staging/exfat/exfat.h93
-rw-r--r--drivers/staging/exfat/exfat_blkdev.c16
-rw-r--r--drivers/staging/exfat/exfat_core.c211
-rw-r--r--drivers/staging/exfat/exfat_super.c175
-rw-r--r--drivers/staging/gasket/gasket_core.c2
-rw-r--r--drivers/staging/hp/hp100.c11
-rw-r--r--drivers/staging/iio/accel/adis16203.c8
-rw-r--r--drivers/staging/iio/accel/adis16240.c15
-rw-r--r--drivers/staging/isdn/Kconfig12
-rw-r--r--drivers/staging/isdn/Makefile8
-rw-r--r--drivers/staging/isdn/TODO22
-rw-r--r--drivers/staging/isdn/avm/Kconfig65
-rw-r--r--drivers/staging/isdn/avm/Makefile12
-rw-r--r--drivers/staging/isdn/avm/avm_cs.c166
-rw-r--r--drivers/staging/isdn/avm/avmcard.h581
-rw-r--r--drivers/staging/isdn/avm/b1.c819
-rw-r--r--drivers/staging/isdn/avm/b1dma.c981
-rw-r--r--drivers/staging/isdn/avm/b1isa.c243
-rw-r--r--drivers/staging/isdn/avm/b1pci.c416
-rw-r--r--drivers/staging/isdn/avm/b1pcmcia.c224
-rw-r--r--drivers/staging/isdn/avm/c4.c1317
-rw-r--r--drivers/staging/isdn/avm/t1isa.c594
-rw-r--r--drivers/staging/isdn/avm/t1pci.c259
-rw-r--r--drivers/staging/isdn/gigaset/Kconfig62
-rw-r--r--drivers/staging/isdn/gigaset/Makefile17
-rw-r--r--drivers/staging/isdn/gigaset/asyncdata.c606
-rw-r--r--drivers/staging/isdn/gigaset/bas-gigaset.c2672
-rw-r--r--drivers/staging/isdn/gigaset/capi.c2517
-rw-r--r--drivers/staging/isdn/gigaset/common.c1153
-rw-r--r--drivers/staging/isdn/gigaset/dummyll.c74
-rw-r--r--drivers/staging/isdn/gigaset/ev-layer.c1910
-rw-r--r--drivers/staging/isdn/gigaset/gigaset.h827
-rw-r--r--drivers/staging/isdn/gigaset/interface.c613
-rw-r--r--drivers/staging/isdn/gigaset/isocdata.c1006
-rw-r--r--drivers/staging/isdn/gigaset/proc.c77
-rw-r--r--drivers/staging/isdn/gigaset/ser-gigaset.c796
-rw-r--r--drivers/staging/isdn/gigaset/usb-gigaset.c959
-rw-r--r--drivers/staging/isdn/hysdn/Kconfig15
-rw-r--r--drivers/staging/isdn/hysdn/Makefile12
-rw-r--r--drivers/staging/isdn/hysdn/boardergo.c445
-rw-r--r--drivers/staging/isdn/hysdn/boardergo.h100
-rw-r--r--drivers/staging/isdn/hysdn/hycapi.c785
-rw-r--r--drivers/staging/isdn/hysdn/hysdn_boot.c400
-rw-r--r--drivers/staging/isdn/hysdn/hysdn_defs.h282
-rw-r--r--drivers/staging/isdn/hysdn/hysdn_init.c213
-rw-r--r--drivers/staging/isdn/hysdn/hysdn_net.c330
-rw-r--r--drivers/staging/isdn/hysdn/hysdn_pof.h78
-rw-r--r--drivers/staging/isdn/hysdn/hysdn_procconf.c411
-rw-r--r--drivers/staging/isdn/hysdn/hysdn_proclog.c357
-rw-r--r--drivers/staging/isdn/hysdn/hysdn_sched.c197
-rw-r--r--drivers/staging/isdn/hysdn/ince1pc.h134
-rw-r--r--drivers/staging/kpc2000/kpc2000/core.c4
-rw-r--r--drivers/staging/kpc2000/kpc2000_i2c.c122
-rw-r--r--drivers/staging/kpc2000/kpc2000_spi.c2
-rw-r--r--drivers/staging/kpc2000/kpc_dma/fileops.c2
-rw-r--r--drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c2
-rw-r--r--drivers/staging/ks7010/ks_wlan_net.c4
-rw-r--r--drivers/staging/media/Kconfig4
-rw-r--r--drivers/staging/media/Makefile2
-rw-r--r--drivers/staging/media/allegro-dvt/allegro-core.c4
-rw-r--r--drivers/staging/media/hantro/Makefile1
-rw-r--r--drivers/staging/media/hantro/hantro.h66
-rw-r--r--drivers/staging/media/hantro/hantro_drv.c11
-rw-r--r--drivers/staging/media/hantro/hantro_g1_h264_dec.c4
-rw-r--r--drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c6
-rw-r--r--drivers/staging/media/hantro/hantro_g1_regs.h53
-rw-r--r--drivers/staging/media/hantro/hantro_g1_vp8_dec.c6
-rw-r--r--drivers/staging/media/hantro/hantro_h1_jpeg_enc.c4
-rw-r--r--drivers/staging/media/hantro/hantro_h264.c2
-rw-r--r--drivers/staging/media/hantro/hantro_hw.h17
-rw-r--r--drivers/staging/media/hantro/hantro_postproc.c148
-rw-r--r--drivers/staging/media/hantro/hantro_v4l2.c109
-rw-r--r--drivers/staging/media/hantro/rk3288_vpu_hw.c10
-rw-r--r--drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c4
-rw-r--r--drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c4
-rw-r--r--drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c4
-rw-r--r--drivers/staging/media/imx/imx7-mipi-csis.c3
-rw-r--r--drivers/staging/media/ipu3/TODO6
-rw-r--r--drivers/staging/media/ipu3/include/intel-ipu3.h2
-rw-r--r--drivers/staging/media/ipu3/ipu3-css.c2
-rw-r--r--drivers/staging/media/ipu3/ipu3-v4l2.c5
-rw-r--r--drivers/staging/media/meson/vdec/vdec.c18
-rw-r--r--drivers/staging/media/meson/vdec/vdec.h1
-rw-r--r--drivers/staging/media/meson/vdec/vdec_1.c29
-rw-r--r--drivers/staging/media/meson/vdec/vdec_platform.c60
-rw-r--r--drivers/staging/media/meson/vdec/vdec_platform.h4
-rw-r--r--drivers/staging/media/phy-rockchip-dphy-rx0/Documentation/devicetree/bindings/phy/rockchip-mipi-dphy-rx0.yaml76
-rw-r--r--drivers/staging/media/phy-rockchip-dphy-rx0/Kconfig13
-rw-r--r--drivers/staging/media/phy-rockchip-dphy-rx0/Makefile2
-rw-r--r--drivers/staging/media/phy-rockchip-dphy-rx0/TODO6
-rw-r--r--drivers/staging/media/phy-rockchip-dphy-rx0/phy-rockchip-dphy-rx0.c388
-rw-r--r--drivers/staging/media/rkisp1/Documentation/devicetree/bindings/media/rockchip-isp1.yaml192
-rw-r--r--drivers/staging/media/rkisp1/Documentation/media/uapi/v4l/pixfmt-meta-rkisp1-params.rst23
-rw-r--r--drivers/staging/media/rkisp1/Documentation/media/uapi/v4l/pixfmt-meta-rkisp1-stat.rst22
-rw-r--r--drivers/staging/media/rkisp1/Kconfig17
-rw-r--r--drivers/staging/media/rkisp1/Makefile8
-rw-r--r--drivers/staging/media/rkisp1/TODO23
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-capture.c1437
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-common.c37
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-common.h337
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-dev.c574
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-isp.c1164
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-params.c1630
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-regs.h1264
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-resizer.c775
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-stats.c530
-rw-r--r--drivers/staging/media/rkisp1/uapi/rkisp1-config.h819
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h265.c26
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_regs.h1
-rw-r--r--drivers/staging/media/tegra-vde/Kconfig2
-rw-r--r--drivers/staging/media/tegra-vde/vde.c6
-rw-r--r--drivers/staging/media/tegra-vde/vde.h2
-rw-r--r--drivers/staging/most/Makefile1
-rw-r--r--drivers/staging/most/cdev/Makefile1
-rw-r--r--drivers/staging/most/cdev/cdev.c5
-rw-r--r--drivers/staging/most/configfs.c59
-rw-r--r--drivers/staging/most/core.c204
-rw-r--r--drivers/staging/most/dim2/Makefile1
-rw-r--r--drivers/staging/most/dim2/dim2.c5
-rw-r--r--drivers/staging/most/i2c/Makefile1
-rw-r--r--drivers/staging/most/i2c/i2c.c2
-rw-r--r--drivers/staging/most/most.h (renamed from drivers/staging/most/core.h)30
-rw-r--r--drivers/staging/most/net/Makefile1
-rw-r--r--drivers/staging/most/net/net.c17
-rw-r--r--drivers/staging/most/sound/Makefile1
-rw-r--r--drivers/staging/most/sound/sound.c54
-rw-r--r--drivers/staging/most/usb/Makefile1
-rw-r--r--drivers/staging/most/usb/usb.c26
-rw-r--r--drivers/staging/most/video/Makefile1
-rw-r--r--drivers/staging/most/video/video.c6
-rw-r--r--drivers/staging/mt7621-dts/mt7621.dtsi2
-rw-r--r--drivers/staging/nvec/nvec_kbd.c2
-rw-r--r--drivers/staging/octeon-usb/Kconfig11
-rw-r--r--drivers/staging/octeon-usb/Makefile2
-rw-r--r--drivers/staging/octeon-usb/TODO8
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.c3737
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.h1847
-rw-r--r--drivers/staging/octeon/Kconfig16
-rw-r--r--drivers/staging/octeon/Makefile19
-rw-r--r--drivers/staging/octeon/TODO9
-rw-r--r--drivers/staging/octeon/ethernet-defines.h40
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c178
-rw-r--r--drivers/staging/octeon/ethernet-mdio.h28
-rw-r--r--drivers/staging/octeon/ethernet-mem.c154
-rw-r--r--drivers/staging/octeon/ethernet-mem.h9
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c158
-rw-r--r--drivers/staging/octeon/ethernet-rx.c538
-rw-r--r--drivers/staging/octeon/ethernet-rx.h31
-rw-r--r--drivers/staging/octeon/ethernet-sgmii.c30
-rw-r--r--drivers/staging/octeon/ethernet-spi.c226
-rw-r--r--drivers/staging/octeon/ethernet-tx.c717
-rw-r--r--drivers/staging/octeon/ethernet-tx.h14
-rw-r--r--drivers/staging/octeon/ethernet-util.h47
-rw-r--r--drivers/staging/octeon/ethernet.c992
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h107
-rw-r--r--drivers/staging/octeon/octeon-stubs.h1433
-rw-r--r--drivers/staging/qlge/qlge.h15
-rw-r--r--drivers/staging/qlge/qlge_dbg.c32
-rw-r--r--drivers/staging/qlge/qlge_ethtool.c39
-rw-r--r--drivers/staging/qlge/qlge_main.c221
-rw-r--r--drivers/staging/qlge/qlge_mpi.c26
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_efuse.c14
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ieee80211.c20
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c200
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c34
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c8
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c7
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_rtl8188e.c82
-rw-r--r--drivers/staging/rtl8188eu/hal/phy.c41
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_dm.c97
-rw-r--r--drivers/staging/rtl8188eu/include/hal8188e_phy_cfg.h5
-rw-r--r--drivers/staging/rtl8188eu/include/hal_intf.h2
-rw-r--r--drivers/staging/rtl8188eu/include/ieee80211.h2
-rw-r--r--drivers/staging/rtl8188eu/include/odm.h1
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_dm.h7
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_rf.h16
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c15
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c1
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c15
-rw-r--r--drivers/staging/rtl8192u/Makefile4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/Makefile27
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_module.c14
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c62
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c15
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.c2
-rw-r--r--drivers/staging/rtl8192u/r819xU_phy.c2
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c10
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c54
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf.c30
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf.h8
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c23
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c44
-rw-r--r--drivers/staging/rts5208/Makefile2
-rw-r--r--drivers/staging/rts5208/rtsx.c9
-rw-r--r--drivers/staging/sm750fb/sm750_hw.c2
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c2
-rw-r--r--drivers/staging/uwb/whc-rc.c6
-rw-r--r--drivers/staging/vc04_services/Makefile2
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c19
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c9
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi.h4
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c2
-rw-r--r--drivers/staging/vt6655/rf.c2
-rw-r--r--drivers/staging/vt6656/baseband.c12
-rw-r--r--drivers/staging/vt6656/card.c2
-rw-r--r--drivers/staging/vt6656/device.h20
-rw-r--r--drivers/staging/vt6656/dpc.c114
-rw-r--r--drivers/staging/vt6656/firmware.c14
-rw-r--r--drivers/staging/vt6656/int.c6
-rw-r--r--drivers/staging/vt6656/main_usb.c4
-rw-r--r--drivers/staging/vt6656/rxtx.c26
-rw-r--r--drivers/staging/vt6656/usbpipe.c27
-rw-r--r--drivers/staging/vt6656/usbpipe.h7
-rw-r--r--drivers/staging/vt6656/wcmd.c1
-rw-r--r--drivers/staging/wfx/TODO71
-rw-r--r--drivers/staging/wfx/bh.c3
-rw-r--r--drivers/staging/wfx/bus_spi.c9
-rw-r--r--drivers/staging/wfx/data_rx.c85
-rw-r--r--drivers/staging/wfx/data_rx.h4
-rw-r--r--drivers/staging/wfx/data_tx.c355
-rw-r--r--drivers/staging/wfx/data_tx.h32
-rw-r--r--drivers/staging/wfx/debug.c2
-rw-r--r--drivers/staging/wfx/fwio.c28
-rw-r--r--drivers/staging/wfx/hif_api_cmd.h35
-rw-r--r--drivers/staging/wfx/hif_api_mib.h35
-rw-r--r--drivers/staging/wfx/hif_rx.c115
-rw-r--r--drivers/staging/wfx/hif_tx.c164
-rw-r--r--drivers/staging/wfx/hif_tx.h28
-rw-r--r--drivers/staging/wfx/hif_tx_mib.h184
-rw-r--r--drivers/staging/wfx/hwio.h15
-rw-r--r--drivers/staging/wfx/main.c12
-rw-r--r--drivers/staging/wfx/queue.c217
-rw-r--r--drivers/staging/wfx/queue.h10
-rw-r--r--drivers/staging/wfx/scan.c321
-rw-r--r--drivers/staging/wfx/scan.h26
-rw-r--r--drivers/staging/wfx/secure_link.h8
-rw-r--r--drivers/staging/wfx/sta.c1058
-rw-r--r--drivers/staging/wfx/sta.h20
-rw-r--r--drivers/staging/wfx/traces.h14
-rw-r--r--drivers/staging/wfx/wfx.h43
-rw-r--r--drivers/staging/wilc1000/fw.h119
-rw-r--r--drivers/staging/wilc1000/hif.c90
-rw-r--r--drivers/staging/wilc1000/hif.h19
-rw-r--r--drivers/staging/wilc1000/netdev.c63
-rw-r--r--drivers/staging/wilc1000/netdev.h1
-rw-r--r--drivers/staging/wilc1000/sdio.c178
-rw-r--r--drivers/staging/wilc1000/spi.c285
-rw-r--r--drivers/staging/wilc1000/wlan.c192
-rw-r--r--drivers/staging/wilc1000/wlan.h2
-rw-r--r--drivers/staging/wilc1000/wlan_cfg.c152
-rw-r--r--drivers/staging/wilc1000/wlan_if.h1
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c4
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c2
-rw-r--r--drivers/target/iscsi/iscsi_target.c6
-rw-r--r--drivers/target/target_core_iblock.c4
-rw-r--r--drivers/target/tcm_fc/tfc_io.c1
-rw-r--r--drivers/tc/tc-driver.c5
-rw-r--r--drivers/tc/tc.c2
-rw-r--r--drivers/tee/Kconfig4
-rw-r--r--drivers/tee/Makefile1
-rw-r--r--drivers/tee/amdtee/Kconfig8
-rw-r--r--drivers/tee/amdtee/Makefile5
-rw-r--r--drivers/tee/amdtee/amdtee_if.h183
-rw-r--r--drivers/tee/amdtee/amdtee_private.h159
-rw-r--r--drivers/tee/amdtee/call.c373
-rw-r--r--drivers/tee/amdtee/core.c518
-rw-r--r--drivers/tee/amdtee/shm_pool.c93
-rw-r--r--drivers/tee/optee/Kconfig1
-rw-r--r--drivers/tee/optee/core.c153
-rw-r--r--drivers/tee/optee/shm_pool.c15
-rw-r--r--drivers/thermal/Kconfig35
-rw-r--r--drivers/thermal/Makefile4
-rw-r--r--drivers/thermal/amlogic_thermal.c6
-rw-r--r--drivers/thermal/armada_thermal.c7
-rw-r--r--drivers/thermal/broadcom/Kconfig7
-rw-r--r--drivers/thermal/broadcom/Makefile1
-rw-r--r--drivers/thermal/broadcom/bcm2711_thermal.c123
-rw-r--r--drivers/thermal/broadcom/brcmstb_thermal.c96
-rw-r--r--drivers/thermal/clock_cooling.c2
-rw-r--r--drivers/thermal/cpufreq_cooling.c (renamed from drivers/thermal/cpu_cooling.c)7
-rw-r--r--drivers/thermal/cpuidle_cooling.c232
-rw-r--r--drivers/thermal/db8500_thermal.c4
-rw-r--r--drivers/thermal/devfreq_cooling.c3
-rw-r--r--drivers/thermal/fair_share.c4
-rw-r--r--drivers/thermal/gov_bang_bang.c4
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3403_thermal.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c7
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c4
-rw-r--r--drivers/thermal/intel/intel_pch_thermal.c13
-rw-r--r--drivers/thermal/max77620_thermal.c2
-rw-r--r--drivers/thermal/mtk_thermal.c12
-rw-r--r--drivers/thermal/of-thermal.c70
-rw-r--r--drivers/thermal/qcom/tsens.c3
-rw-r--r--drivers/thermal/qoriq_thermal.c337
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c4
-rw-r--r--drivers/thermal/rcar_thermal.c9
-rw-r--r--drivers/thermal/rockchip_thermal.c34
-rw-r--r--drivers/thermal/samsung/Kconfig2
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c9
-rw-r--r--drivers/thermal/st/stm_thermal.c388
-rw-r--r--drivers/thermal/step_wise.c4
-rw-r--r--drivers/thermal/sun8i_thermal.c639
-rw-r--r--drivers/thermal/tegra/soctherm.c15
-rw-r--r--drivers/thermal/thermal-generic-adc.c20
-rw-r--r--drivers/thermal/thermal_core.h2
-rw-r--r--drivers/thermal/thermal_hwmon.c28
-rw-r--r--drivers/thermal/thermal_hwmon.h7
-rw-r--r--drivers/thermal/user_space.c4
-rw-r--r--drivers/thermal/zx2967_thermal.c1
-rw-r--r--drivers/thunderbolt/Kconfig11
-rw-r--r--drivers/thunderbolt/Makefile4
-rw-r--r--drivers/thunderbolt/cap.c11
-rw-r--r--drivers/thunderbolt/ctl.c19
-rw-r--r--drivers/thunderbolt/ctl.h3
-rw-r--r--drivers/thunderbolt/eeprom.c137
-rw-r--r--drivers/thunderbolt/nhi.c3
-rw-r--r--drivers/thunderbolt/nhi.h2
-rw-r--r--drivers/thunderbolt/switch.c441
-rw-r--r--drivers/thunderbolt/tb.c227
-rw-r--r--drivers/thunderbolt/tb.h101
-rw-r--r--drivers/thunderbolt/tb_msgs.h6
-rw-r--r--drivers/thunderbolt/tb_regs.h65
-rw-r--r--drivers/thunderbolt/tmu.c383
-rw-r--r--drivers/thunderbolt/tunnel.c169
-rw-r--r--drivers/thunderbolt/tunnel.h9
-rw-r--r--drivers/thunderbolt/usb4.c764
-rw-r--r--drivers/thunderbolt/xdomain.c6
-rw-r--r--drivers/tty/cyclades.c10
-rw-r--r--drivers/tty/mips_ejtag_fdc.c2
-rw-r--r--drivers/tty/moxa.c4
-rw-r--r--drivers/tty/n_gsm.c2
-rw-r--r--drivers/tty/n_hdlc.c11
-rw-r--r--drivers/tty/serdev/core.c24
-rw-r--r--drivers/tty/serial/21285.c55
-rw-r--r--drivers/tty/serial/8250/8250_aspeed_vuart.c5
-rw-r--r--drivers/tty/serial/8250/8250_bcm2835aux.c50
-rw-r--r--drivers/tty/serial/8250/8250_core.c1
-rw-r--r--drivers/tty/serial/8250/8250_exar.c6
-rw-r--r--drivers/tty/serial/8250/8250_fsl.c4
-rw-r--r--drivers/tty/serial/8250/8250_gsc.c2
-rw-r--r--drivers/tty/serial/8250/8250_ioc3.c98
-rw-r--r--drivers/tty/serial/8250/8250_of.c4
-rw-r--r--drivers/tty/serial/8250/8250_omap.c7
-rw-r--r--drivers/tty/serial/8250/8250_pci.c6
-rw-r--r--drivers/tty/serial/8250/8250_port.c10
-rw-r--r--drivers/tty/serial/8250/Kconfig21
-rw-r--r--drivers/tty/serial/8250/Makefile1
-rw-r--r--drivers/tty/serial/Kconfig4
-rw-r--r--drivers/tty/serial/amba-pl010.c5
-rw-r--r--drivers/tty/serial/amba-pl011.c13
-rw-r--r--drivers/tty/serial/apbuart.c5
-rw-r--r--drivers/tty/serial/arc_uart.c5
-rw-r--r--drivers/tty/serial/atmel_serial.c84
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c5
-rw-r--r--drivers/tty/serial/clps711x.c5
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c9
-rw-r--r--drivers/tty/serial/dz.c7
-rw-r--r--drivers/tty/serial/efm32-uart.c5
-rw-r--r--drivers/tty/serial/fsl_linflexuart.c8
-rw-r--r--drivers/tty/serial/fsl_lpuart.c16
-rw-r--r--drivers/tty/serial/imx.c58
-rw-r--r--drivers/tty/serial/ip22zilog.c7
-rw-r--r--drivers/tty/serial/kgdb_nmi.c4
-rw-r--r--drivers/tty/serial/lantiq.c2
-rw-r--r--drivers/tty/serial/meson_uart.c72
-rw-r--r--drivers/tty/serial/milbeaut_usio.c5
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c11
-rw-r--r--drivers/tty/serial/msm_serial.c36
-rw-r--r--drivers/tty/serial/mux.c7
-rw-r--r--drivers/tty/serial/mxs-auart.c5
-rw-r--r--drivers/tty/serial/omap-serial.c12
-rw-r--r--drivers/tty/serial/owl-uart.c2
-rw-r--r--drivers/tty/serial/pch_uart.c12
-rw-r--r--drivers/tty/serial/pic32_uart.c2
-rw-r--r--drivers/tty/serial/pmac_zilog.c5
-rw-r--r--drivers/tty/serial/pnx8xxx_uart.c7
-rw-r--r--drivers/tty/serial/pxa.c5
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c128
-rw-r--r--drivers/tty/serial/rda-uart.c2
-rw-r--r--drivers/tty/serial/sa1100.c7
-rw-r--r--drivers/tty/serial/samsung.h147
-rw-r--r--drivers/tty/serial/samsung_tty.c315
-rw-r--r--drivers/tty/serial/sb1250-duart.c9
-rw-r--r--drivers/tty/serial/sccnxp.c5
-rw-r--r--drivers/tty/serial/serial-tegra.c94
-rw-r--r--drivers/tty/serial/serial_core.c85
-rw-r--r--drivers/tty/serial/serial_txx9.c5
-rw-r--r--drivers/tty/serial/sh-sci.c12
-rw-r--r--drivers/tty/serial/sprd_serial.c8
-rw-r--r--drivers/tty/serial/st-asc.c17
-rw-r--r--drivers/tty/serial/stm32-usart.c5
-rw-r--r--drivers/tty/serial/sunhv.c5
-rw-r--r--drivers/tty/serial/sunsab.c5
-rw-r--r--drivers/tty/serial/sunsu.c5
-rw-r--r--drivers/tty/serial/sunzilog.c6
-rw-r--r--drivers/tty/serial/ucc_uart.c389
-rw-r--r--drivers/tty/serial/vr41xx_siu.c5
-rw-r--r--drivers/tty/serial/vt8500_serial.c5
-rw-r--r--drivers/tty/serial/xilinx_uartps.c51
-rw-r--r--drivers/tty/serial/zs.c7
-rw-r--r--drivers/tty/synclink.c6
-rw-r--r--drivers/tty/synclink_gt.c24
-rw-r--r--drivers/tty/synclinkmp.c34
-rw-r--r--drivers/tty/sysrq.c17
-rw-r--r--drivers/tty/tty_baudrate.c28
-rw-r--r--drivers/tty/tty_io.c2
-rw-r--r--drivers/tty/vt/.gitignore1
-rw-r--r--drivers/tty/vt/Makefile6
-rw-r--r--drivers/tty/vt/conmakehash.c290
-rw-r--r--drivers/tty/vt/vt.c8
-rw-r--r--drivers/uio/uio_dmem_genirq.c6
-rw-r--r--drivers/uio/uio_pdrv_genirq.c2
-rw-r--r--drivers/usb/cdns3/Kconfig10
-rw-r--r--drivers/usb/cdns3/Makefile1
-rw-r--r--drivers/usb/cdns3/cdns3-imx.c216
-rw-r--r--drivers/usb/cdns3/debug.h2
-rw-r--r--drivers/usb/cdns3/gadget.c550
-rw-r--r--drivers/usb/cdns3/gadget.h26
-rw-r--r--drivers/usb/cdns3/trace.h93
-rw-r--r--drivers/usb/chipidea/Kconfig1
-rw-r--r--drivers/usb/chipidea/ci.h10
-rw-r--r--drivers/usb/chipidea/ci_hdrc_tegra.c9
-rw-r--r--drivers/usb/chipidea/core.c4
-rw-r--r--drivers/usb/chipidea/host.c4
-rw-r--r--drivers/usb/chipidea/host.h2
-rw-r--r--drivers/usb/core/config.c82
-rw-r--r--drivers/usb/core/devio.c4
-rw-r--r--drivers/usb/core/hcd-pci.c2
-rw-r--r--drivers/usb/core/hub.c3
-rw-r--r--drivers/usb/dwc2/core_intr.c7
-rw-r--r--drivers/usb/dwc2/debugfs.c3
-rw-r--r--drivers/usb/dwc2/gadget.c25
-rw-r--r--drivers/usb/dwc2/hcd.c2
-rw-r--r--drivers/usb/dwc3/core.c3
-rw-r--r--drivers/usb/dwc3/core.h2
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c4
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/dwc3/ep0.c4
-rw-r--r--drivers/usb/dwc3/gadget.c63
-rw-r--r--drivers/usb/dwc3/gadget.h14
-rw-r--r--drivers/usb/dwc3/host.c6
-rw-r--r--drivers/usb/early/xhci-dbc.c2
-rw-r--r--drivers/usb/gadget/Kconfig28
-rw-r--r--drivers/usb/gadget/configfs.c43
-rw-r--r--drivers/usb/gadget/function/f_ecm.c16
-rw-r--r--drivers/usb/gadget/function/f_fs.c13
-rw-r--r--drivers/usb/gadget/function/f_ncm.c17
-rw-r--r--drivers/usb/gadget/function/rndis.c17
-rw-r--r--drivers/usb/gadget/function/u_audio.c29
-rw-r--r--drivers/usb/gadget/legacy/Kconfig28
-rw-r--r--drivers/usb/gadget/legacy/cdc2.c2
-rw-r--r--drivers/usb/gadget/legacy/g_ffs.c2
-rw-r--r--drivers/usb/gadget/legacy/multi.c2
-rw-r--r--drivers/usb/gadget/legacy/ncm.c2
-rw-r--r--drivers/usb/gadget/udc/Kconfig1
-rw-r--r--drivers/usb/gadget/udc/amd5536udc_pci.c2
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c2
-rw-r--r--drivers/usb/gadget/udc/core.c2
-rw-r--r--drivers/usb/gadget/udc/goku_udc.c2
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c16
-rw-r--r--drivers/usb/gadget/udc/net2272.c6
-rw-r--r--drivers/usb/gadget/udc/net2280.c2
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c6
-rw-r--r--drivers/usb/host/Kconfig56
-rw-r--r--drivers/usb/host/ehci-exynos.c4
-rw-r--r--drivers/usb/host/ehci-mv.c21
-rw-r--r--drivers/usb/host/ehci-pmcmsp.c6
-rw-r--r--drivers/usb/host/ehci-q.c13
-rw-r--r--drivers/usb/host/ehci-sh.c7
-rw-r--r--drivers/usb/host/ehci-tegra.c16
-rw-r--r--drivers/usb/host/ohci-da8xx.c8
-rw-r--r--drivers/usb/host/ohci-exynos.c2
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c14
-rw-r--r--drivers/usb/host/pci-quirks.c2
-rw-r--r--drivers/usb/host/xhci-mtk.c5
-rw-r--r--drivers/usb/host/xhci-pci.c2
-rw-r--r--drivers/usb/host/xhci-tegra.c440
-rw-r--r--drivers/usb/isp1760/isp1760-if.c4
-rw-r--r--drivers/usb/misc/usb3503.c94
-rw-r--r--drivers/usb/musb/Kconfig12
-rw-r--r--drivers/usb/musb/Makefile4
-rw-r--r--drivers/usb/musb/davinci.c57
-rw-r--r--drivers/usb/musb/jz4740.c82
-rw-r--r--drivers/usb/musb/mediatek.c582
-rw-r--r--drivers/usb/musb/musb_am335x.c44
-rw-r--r--drivers/usb/musb/musb_core.c199
-rw-r--r--drivers/usb/musb/musb_core.h20
-rw-r--r--drivers/usb/musb/musb_dma.h9
-rw-r--r--drivers/usb/musb/musb_host.c46
-rw-r--r--drivers/usb/musb/musb_io.h18
-rw-r--r--drivers/usb/musb/musb_trace.h33
-rw-r--r--drivers/usb/musb/musbhsdma.c58
-rw-r--r--drivers/usb/musb/omap2430.c164
-rw-r--r--drivers/usb/musb/sunxi.c6
-rw-r--r--drivers/usb/musb/tusb6010.c2
-rw-r--r--drivers/usb/musb/ux500_dma.c4
-rw-r--r--drivers/usb/phy/Kconfig4
-rw-r--r--drivers/usb/phy/phy-ab8500-usb.c26
-rw-r--r--drivers/usb/phy/phy-am335x.c2
-rw-r--r--drivers/usb/phy/phy-generic.c39
-rw-r--r--drivers/usb/phy/phy-generic.h3
-rw-r--r--drivers/usb/phy/phy-gpio-vbus-usb.c96
-rw-r--r--drivers/usb/phy/phy-keystone.c2
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c896
-rw-r--r--drivers/usb/phy/phy-ulpi.c48
-rw-r--r--drivers/usb/phy/phy.c13
-rw-r--r--drivers/usb/renesas_usbhs/common.c22
-rw-r--r--drivers/usb/renesas_usbhs/rcar2.c4
-rw-r--r--drivers/usb/renesas_usbhs/rza2.c2
-rw-r--r--drivers/usb/roles/intel-xhci-usb-role-switch.c2
-rw-r--r--drivers/usb/serial/Kconfig3
-rw-r--r--drivers/usb/serial/ch341.c6
-rw-r--r--drivers/usb/serial/cyberjack.c2
-rw-r--r--drivers/usb/serial/garmin_gps.c2
-rw-r--r--drivers/usb/serial/io_edgeport.c16
-rw-r--r--drivers/usb/serial/ir-usb.c185
-rw-r--r--drivers/usb/serial/keyspan.c4
-rw-r--r--drivers/usb/serial/opticon.c63
-rw-r--r--drivers/usb/serial/option.c16
-rw-r--r--drivers/usb/serial/quatech2.c6
-rw-r--r--drivers/usb/serial/usb-serial-simple.c2
-rw-r--r--drivers/usb/serial/usb-serial.c3
-rw-r--r--drivers/usb/serial/usb-wwan.h1
-rw-r--r--drivers/usb/serial/usb_wwan.c4
-rw-r--r--drivers/usb/typec/altmodes/displayport.c5
-rw-r--r--drivers/usb/typec/bus.c42
-rw-r--r--drivers/usb/typec/class.c52
-rw-r--r--drivers/usb/typec/mux.c2
-rw-r--r--drivers/usb/typec/mux/pi3usb30532.c5
-rw-r--r--drivers/usb/typec/tcpm/Kconfig1
-rw-r--r--drivers/usb/typec/tcpm/fusb302.c2
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c26
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c6
-rw-r--r--drivers/usb/typec/tcpm/wcove.c2
-rw-r--r--drivers/usb/typec/ucsi/displayport.c2
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c95
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h32
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c2
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c191
-rw-r--r--drivers/usb/usbip/usbip_common.c3
-rw-r--r--drivers/usb/usbip/vhci_rx.c13
-rw-r--r--drivers/vfio/mdev/mdev_sysfs.c2
-rw-r--r--drivers/vfio/pci/vfio_pci_nvlink2.c8
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c2
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_amdxgbe.c8
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c2
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c2
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c4
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c2
-rw-r--r--drivers/vfio/vfio_iommu_type1.c35
-rw-r--r--drivers/video/backlight/ams369fg06.c1
-rw-r--r--drivers/video/backlight/bd6107.c24
-rw-r--r--drivers/video/backlight/qcom-wled.c4
-rw-r--r--drivers/video/console/Kconfig1
-rw-r--r--drivers/video/fbdev/Kconfig1
-rw-r--r--drivers/video/fbdev/carminefb.c4
-rw-r--r--drivers/video/fbdev/hyperv_fb.c184
-rw-r--r--drivers/video/fbdev/i810/i810_main.c2
-rw-r--r--drivers/video/fbdev/imxfb.c2
-rw-r--r--drivers/video/fbdev/intelfb/intelfbdrv.c2
-rw-r--r--drivers/video/fbdev/kyro/fbdev.c2
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c2
-rw-r--r--drivers/video/fbdev/mbx/mbxfb.c4
-rw-r--r--drivers/video/fbdev/mmp/hw/mmp_ctrl.c2
-rw-r--r--drivers/video/fbdev/pm2fb.c2
-rw-r--r--drivers/video/fbdev/pm3fb.c4
-rw-r--r--drivers/video/fbdev/pmag-aa-fb.c4
-rw-r--r--drivers/video/fbdev/pmag-ba-fb.c4
-rw-r--r--drivers/video/fbdev/pmagb-b-fb.c4
-rw-r--r--drivers/video/fbdev/pvr2fb.c4
-rw-r--r--drivers/video/fbdev/pxa168fb.c2
-rw-r--r--drivers/video/fbdev/s1d13xxxfb.c9
-rw-r--r--drivers/video/fbdev/sh7760fb.c2
-rw-r--r--drivers/video/fbdev/sh_mobile_lcdcfb.c2
-rw-r--r--drivers/video/fbdev/sstfb.c4
-rw-r--r--drivers/video/fbdev/stifb.c4
-rw-r--r--drivers/video/fbdev/tdfxfb.c2
-rw-r--r--drivers/video/fbdev/tgafb.c2
-rw-r--r--drivers/video/fbdev/tridentfb.c4
-rw-r--r--drivers/video/fbdev/valkyriefb.c2
-rw-r--r--drivers/video/fbdev/vermilion/cr_pll.c2
-rw-r--r--drivers/video/fbdev/vermilion/vermilion.c4
-rw-r--r--drivers/video/fbdev/via/via-core.c2
-rw-r--r--drivers/video/fbdev/via/viafbdev.c105
-rw-r--r--drivers/video/fbdev/w100fb.c6
-rw-r--r--drivers/video/logo/Makefile2
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.c1
-rw-r--r--drivers/virt/vboxguest/vboxguest_utils.c1
-rw-r--r--drivers/virtio/virtio_balloon.c21
-rw-r--r--drivers/virtio/virtio_mmio.c15
-rw-r--r--drivers/virtio/virtio_pci_common.c2
-rw-r--r--drivers/visorbus/visorchipset.c11
-rw-r--r--drivers/vme/boards/vme_vmivme7805.c2
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.c4
-rw-r--r--drivers/vme/bridges/vme_fake.c30
-rw-r--r--drivers/vme/bridges/vme_tsi148.c4
-rw-r--r--drivers/w1/masters/matrox_w1.c2
-rw-r--r--drivers/w1/masters/omap_hdq.c348
-rw-r--r--drivers/watchdog/Kconfig2
-rw-r--r--drivers/watchdog/at91sam9_wdt.h21
-rw-r--r--drivers/watchdog/bcm63xx_wdt.c2
-rw-r--r--drivers/watchdog/cadence_wdt.c5
-rw-r--r--drivers/watchdog/da9062_wdt.c35
-rw-r--r--drivers/watchdog/dw_wdt.c11
-rw-r--r--drivers/watchdog/imx7ulp_wdt.c2
-rw-r--r--drivers/watchdog/intel_scu_watchdog.c2
-rw-r--r--drivers/watchdog/it87_wdt.c2
-rw-r--r--drivers/watchdog/mtk_wdt.c105
-rw-r--r--drivers/watchdog/orion_wdt.c4
-rw-r--r--drivers/watchdog/qcom-wdt.c2
-rw-r--r--drivers/watchdog/rc32434_wdt.c4
-rw-r--r--drivers/watchdog/rn5t618_wdt.c1
-rw-r--r--drivers/watchdog/sama5d4_wdt.c109
-rw-r--r--drivers/watchdog/stm32_iwdg.c18
-rw-r--r--drivers/watchdog/w83627hf_wdt.c2
-rw-r--r--drivers/watchdog/watchdog_core.c35
-rw-r--r--drivers/watchdog/watchdog_dev.c36
-rw-r--r--drivers/xen/gntdev.c24
-rw-r--r--drivers/xen/grant-table.c4
-rw-r--r--drivers/xen/preempt.c4
-rw-r--r--drivers/xen/xen-balloon.c2
-rw-r--r--drivers/xen/xen-pciback/conf_space.c37
-rw-r--r--drivers/xen/xen-pciback/conf_space.h7
-rw-r--r--drivers/xen/xen-pciback/conf_space_capability.c89
-rw-r--r--drivers/xen/xen-pciback/conf_space_header.c19
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c66
-rw-r--r--drivers/xen/xen-pciback/pciback.h1
-rw-r--r--drivers/xen/xenbus/xenbus.h2
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c43
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c39
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c24
-rw-r--r--drivers/zorro/Makefile2
-rw-r--r--drivers/zorro/proc.c9
4726 files changed, 272266 insertions, 122151 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index aaef17cc6512..31cf17dee252 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -171,7 +171,7 @@ obj-$(CONFIG_POWERCAP) += powercap/
obj-$(CONFIG_MCB) += mcb/
obj-$(CONFIG_PERF_EVENTS) += perf/
obj-$(CONFIG_RAS) += ras/
-obj-$(CONFIG_THUNDERBOLT) += thunderbolt/
+obj-$(CONFIG_USB4) += thunderbolt/
obj-$(CONFIG_CORESIGHT) += hwtracing/coresight/
obj-y += hwtracing/intel_th/
obj-$(CONFIG_STM) += hwtracing/stm/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 002838d23b86..cc57bab146b5 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -241,6 +241,7 @@ config ACPI_CPU_FREQ_PSS
config ACPI_PROCESSOR_CSTATE
def_bool y
+ depends on ACPI_PROCESSOR
depends on IA64 || X86
config ACPI_PROCESSOR_IDLE
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index 71511ae2dfcd..ba2612e9a0eb 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -161,6 +161,11 @@ static const struct apd_device_desc hip08_i2c_desc = {
.fixed_clk_rate = 250000000,
};
+static const struct apd_device_desc hip08_lite_i2c_desc = {
+ .setup = acpi_apd_setup,
+ .fixed_clk_rate = 125000000,
+};
+
static const struct apd_device_desc thunderx2_i2c_desc = {
.setup = acpi_apd_setup,
.fixed_clk_rate = 125000000,
@@ -243,6 +248,7 @@ static const struct acpi_device_id acpi_apd_device_ids[] = {
{ "CAV9007", APD_ADDR(thunderx2_i2c_desc) },
{ "HISI02A1", APD_ADDR(hip07_i2c_desc) },
{ "HISI02A2", APD_ADDR(hip08_i2c_desc) },
+ { "HISI02A3", APD_ADDR(hip08_lite_i2c_desc) },
{ "HISI0173", APD_ADDR(hip08_spi_desc) },
{ "NXP0001", APD_ADDR(nxp_i2c_desc) },
#endif
diff --git a/drivers/acpi/acpi_lpit.c b/drivers/acpi/acpi_lpit.c
index 433376e819bb..953437a216f6 100644
--- a/drivers/acpi/acpi_lpit.c
+++ b/drivers/acpi/acpi_lpit.c
@@ -104,7 +104,7 @@ static void lpit_update_residency(struct lpit_residency_info *info,
info->gaddr = lpit_native->residency_counter;
if (info->gaddr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
- info->iomem_addr = ioremap_nocache(info->gaddr.address,
+ info->iomem_addr = ioremap(info->gaddr.address,
info->gaddr.bit_width / 8);
if (!info->iomem_addr)
return;
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 2c4dda0787e8..5379bc3f275d 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -705,3 +705,185 @@ void __init acpi_processor_init(void)
acpi_scan_add_handler_with_hotplug(&processor_handler, "processor");
acpi_scan_add_handler(&processor_container_handler);
}
+
+#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
+/**
+ * acpi_processor_claim_cst_control - Request _CST control from the platform.
+ */
+bool acpi_processor_claim_cst_control(void)
+{
+ static bool cst_control_claimed;
+ acpi_status status;
+
+ if (!acpi_gbl_FADT.cst_control || cst_control_claimed)
+ return true;
+
+ status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
+ acpi_gbl_FADT.cst_control, 8);
+ if (ACPI_FAILURE(status)) {
+ pr_warn("ACPI: Failed to claim processor _CST control\n");
+ return false;
+ }
+
+ cst_control_claimed = true;
+ return true;
+}
+EXPORT_SYMBOL_GPL(acpi_processor_claim_cst_control);
+
+/**
+ * acpi_processor_evaluate_cst - Evaluate the processor _CST control method.
+ * @handle: ACPI handle of the processor object containing the _CST.
+ * @cpu: The numeric ID of the target CPU.
+ * @info: Object write the C-states information into.
+ *
+ * Extract the C-state information for the given CPU from the output of the _CST
+ * control method under the corresponding ACPI processor object (or processor
+ * device object) and populate @info with it.
+ *
+ * If any ACPI_ADR_SPACE_FIXED_HARDWARE C-states are found, invoke
+ * acpi_processor_ffh_cstate_probe() to verify them and update the
+ * cpu_cstate_entry data for @cpu.
+ */
+int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu,
+ struct acpi_processor_power *info)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *cst;
+ acpi_status status;
+ u64 count;
+ int last_index = 0;
+ int i, ret = 0;
+
+ status = acpi_evaluate_object(handle, "_CST", NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_debug(handle, "No _CST\n");
+ return -ENODEV;
+ }
+
+ cst = buffer.pointer;
+
+ /* There must be at least 2 elements. */
+ if (!cst || cst->type != ACPI_TYPE_PACKAGE || cst->package.count < 2) {
+ acpi_handle_warn(handle, "Invalid _CST output\n");
+ ret = -EFAULT;
+ goto end;
+ }
+
+ count = cst->package.elements[0].integer.value;
+
+ /* Validate the number of C-states. */
+ if (count < 1 || count != cst->package.count - 1) {
+ acpi_handle_warn(handle, "Inconsistent _CST data\n");
+ ret = -EFAULT;
+ goto end;
+ }
+
+ for (i = 1; i <= count; i++) {
+ union acpi_object *element;
+ union acpi_object *obj;
+ struct acpi_power_register *reg;
+ struct acpi_processor_cx cx;
+
+ /*
+ * If there is not enough space for all C-states, skip the
+ * excess ones and log a warning.
+ */
+ if (last_index >= ACPI_PROCESSOR_MAX_POWER - 1) {
+ acpi_handle_warn(handle,
+ "No room for more idle states (limit: %d)\n",
+ ACPI_PROCESSOR_MAX_POWER - 1);
+ break;
+ }
+
+ memset(&cx, 0, sizeof(cx));
+
+ element = &cst->package.elements[i];
+ if (element->type != ACPI_TYPE_PACKAGE)
+ continue;
+
+ if (element->package.count != 4)
+ continue;
+
+ obj = &element->package.elements[0];
+
+ if (obj->type != ACPI_TYPE_BUFFER)
+ continue;
+
+ reg = (struct acpi_power_register *)obj->buffer.pointer;
+
+ obj = &element->package.elements[1];
+ if (obj->type != ACPI_TYPE_INTEGER)
+ continue;
+
+ cx.type = obj->integer.value;
+ /*
+ * There are known cases in which the _CST output does not
+ * contain C1, so if the type of the first state found is not
+ * C1, leave an empty slot for C1 to be filled in later.
+ */
+ if (i == 1 && cx.type != ACPI_STATE_C1)
+ last_index = 1;
+
+ cx.address = reg->address;
+ cx.index = last_index + 1;
+
+ if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
+ if (!acpi_processor_ffh_cstate_probe(cpu, &cx, reg)) {
+ /*
+ * In the majority of cases _CST describes C1 as
+ * a FIXED_HARDWARE C-state, but if the command
+ * line forbids using MWAIT, use CSTATE_HALT for
+ * C1 regardless.
+ */
+ if (cx.type == ACPI_STATE_C1 &&
+ boot_option_idle_override == IDLE_NOMWAIT) {
+ cx.entry_method = ACPI_CSTATE_HALT;
+ snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
+ } else {
+ cx.entry_method = ACPI_CSTATE_FFH;
+ }
+ } else if (cx.type == ACPI_STATE_C1) {
+ /*
+ * In the special case of C1, FIXED_HARDWARE can
+ * be handled by executing the HLT instruction.
+ */
+ cx.entry_method = ACPI_CSTATE_HALT;
+ snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
+ } else {
+ continue;
+ }
+ } else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+ cx.entry_method = ACPI_CSTATE_SYSTEMIO;
+ snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
+ cx.address);
+ } else {
+ continue;
+ }
+
+ if (cx.type == ACPI_STATE_C1)
+ cx.valid = 1;
+
+ obj = &element->package.elements[2];
+ if (obj->type != ACPI_TYPE_INTEGER)
+ continue;
+
+ cx.latency = obj->integer.value;
+
+ obj = &element->package.elements[3];
+ if (obj->type != ACPI_TYPE_INTEGER)
+ continue;
+
+ memcpy(&info->states[++last_index], &cx, sizeof(cx));
+ }
+
+ acpi_handle_info(handle, "Found %d idle states\n", last_index);
+
+ info->count = last_index;
+
+ end:
+ kfree(buffer.pointer);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(acpi_processor_evaluate_cst);
+#endif /* CONFIG_ACPI_PROCESSOR_CSTATE */
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 2f380e7381d6..15c5b272e698 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -2187,7 +2187,7 @@ int acpi_video_register(void)
if (register_count) {
/*
* if the function of acpi_video_register is already called,
- * don't register the acpi_vide_bus again and return no error.
+ * don't register the acpi_video_bus again and return no error.
*/
goto leave;
}
diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h
index 863ade9add6d..173447d50acf 100644
--- a/drivers/acpi/acpica/acapps.h
+++ b/drivers/acpi/acpica/acapps.h
@@ -3,7 +3,7 @@
*
* Module Name: acapps - common include for ACPI applications/tools
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
@@ -17,7 +17,7 @@
/* Common info for tool signons */
#define ACPICA_NAME "Intel ACPI Component Architecture"
-#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2019 Intel Corporation"
+#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2020 Intel Corporation"
#if ACPI_MACHINE_WIDTH == 64
#define ACPI_WIDTH " (64-bit version)"
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index 54f81eac7ec9..89101e53324b 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -3,7 +3,7 @@
*
* Name: accommon.h - Common include files for generation of ACPICA source
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acconvert.h b/drivers/acpi/acpica/acconvert.h
index d5478cd4a857..ede4b9cc9e85 100644
--- a/drivers/acpi/acpica/acconvert.h
+++ b/drivers/acpi/acpica/acconvert.h
@@ -3,7 +3,7 @@
*
* Module Name: acapps - common include for ACPI applications/tools
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 694cf206fa9a..a676daaa2da5 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -3,7 +3,7 @@
*
* Name: acdebug.h - ACPI/AML debugger
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index 82f81501566b..7ba6e308f146 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -3,7 +3,7 @@
*
* Name: acdispat.h - dispatcher (parser to interpreter interface)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index c8652f91054e..79f292687bd6 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -3,7 +3,7 @@
*
* Name: acevents.h - Event subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index fd3beea93421..38ffa2c0a496 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -3,7 +3,7 @@
*
* Name: acglobal.h - Declarations for global variables
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index bcf8f7501db7..6ad0517553d5 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -3,7 +3,7 @@
*
* Name: achware.h -- hardware specific interfaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
@@ -101,6 +101,8 @@ acpi_status acpi_hw_enable_all_runtime_gpes(void);
acpi_status acpi_hw_enable_all_wakeup_gpes(void);
+u8 acpi_hw_check_all_gpes(void);
+
acpi_status
acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block,
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index 20706adbc148..a6d896cda2a5 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -3,7 +3,7 @@
*
* Name: acinterp.h - Interpreter subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 1ea52576f0a2..af58cd2dc9d3 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -3,7 +3,7 @@
*
* Name: aclocal.h - Internal data types used across the ACPI subsystem
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index 283614e82a20..2269e10bc21b 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -3,7 +3,7 @@
*
* Name: acmacros.h - C macros for the entire subsystem.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 7da1864798a0..e618ddfab2fd 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -3,7 +3,7 @@
*
* Name: acnamesp.h - Namespace subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 8def0e3d690f..9f0219a8cb98 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -3,7 +3,7 @@
*
* Name: acobject.h - Definition of union acpi_operand_object (Internal object only)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
@@ -260,7 +260,8 @@ struct acpi_object_index_field {
/* The buffer_field is different in that it is part of a Buffer, not an op_region */
struct acpi_object_buffer_field {
- ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *buffer_obj; /* Containing Buffer object */
+ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u8 is_create_field; /* Special case for objects created by create_field() */
+ union acpi_operand_object *buffer_obj; /* Containing Buffer object */
};
/******************************************************************************
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index 9d78134428e3..8825394be9ab 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -3,7 +3,7 @@
*
* Name: acopcode.h - AML opcode information for the AML parser and interpreter
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index 6e32c97cba6c..bc00b85c0a8f 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -3,7 +3,7 @@
*
* Module Name: acparser.h - AML Parser subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 387163b962a7..cd0f5df0ea23 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -3,7 +3,7 @@
*
* Name: acpredef - Information table for ACPI predefined methods and objects
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index 422cd8f2b92e..6de8a1650d3d 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -3,7 +3,7 @@
*
* Name: acresrc.h - Resource Manager function prototypes
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index 2043dff370b1..4c900c108f3f 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -3,7 +3,7 @@
*
* Name: acstruct.h - Internal structs
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index dfbf1dbd4033..734624facda3 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -3,7 +3,7 @@
*
* Name: actables.h - ACPI table management
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 5fb50634e08e..7c89b470ec81 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -3,7 +3,7 @@
*
* Name: acutils.h -- prototypes for the common (subsystem-wide) procedures
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index 49e412edd7c6..1d541bbac4a3 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -5,7 +5,7 @@
* Declarations and definitions contained herein are derived
* directly from the ACPI specification.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index 7c3bd4ab60fc..e5234e001acf 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -3,7 +3,7 @@
*
* Module Name: amlresrc.h - AML resource descriptors
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dbhistry.c b/drivers/acpi/acpica/dbhistry.c
index 47d2e5059849..bb9600b867ee 100644
--- a/drivers/acpi/acpica/dbhistry.c
+++ b/drivers/acpi/acpica/dbhistry.c
@@ -3,7 +3,7 @@
*
* Module Name: dbhistry - debugger HISTORY command
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c
index e1632b340182..aa71f65395d2 100644
--- a/drivers/acpi/acpica/dbinput.c
+++ b/drivers/acpi/acpica/dbinput.c
@@ -816,7 +816,7 @@ acpi_db_command_dispatch(char *input_buffer,
if (ACPI_FAILURE(status)
|| temp64 >= ACPI_NUM_PREDEFINED_REGIONS) {
acpi_os_printf
- ("Invalid adress space ID: must be between 0 and %u inclusive\n",
+ ("Invalid address space ID: must be between 0 and %u inclusive\n",
ACPI_NUM_PREDEFINED_REGIONS - 1);
return (AE_OK);
}
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index 85b34d02233e..ad17f62e51d9 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -4,7 +4,7 @@
* Module Name: dsargs - Support for execution of dynamic arguments for static
* objects (regions, fields, buffer fields, etc.)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index 5034fab9cf69..4b5b6e859f62 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -4,7 +4,7 @@
* Module Name: dscontrol - Support for execution control opcodes -
* if/else/while/return
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsdebug.c b/drivers/acpi/acpica/dsdebug.c
index 0d3e1ced1f57..63bc5f19fb82 100644
--- a/drivers/acpi/acpica/dsdebug.c
+++ b/drivers/acpi/acpica/dsdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: dsdebug - Parser/Interpreter interface - debugging
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index faa38a22263a..c901f5aec739 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -3,7 +3,7 @@
*
* Module Name: dsfield - Dispatcher field routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
@@ -243,7 +243,7 @@ cleanup:
* FUNCTION: acpi_ds_get_field_names
*
* PARAMETERS: info - create_field info structure
- * ` walk_state - Current method state
+ * walk_state - Current method state
* arg - First parser arg for the field name list
*
* RETURN: Status
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index a1ffed29903b..9be2a309424c 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -3,7 +3,7 @@
*
* Module Name: dsinit - Object initialization namespace walk
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index f59b4d944f7f..cf67caff878a 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -3,7 +3,7 @@
*
* Module Name: dsmethod - Parser/Interpreter interface - control method parsing
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 179129a2deb1..c0a14a6a2c20 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -3,7 +3,7 @@
*
* Module Name: dsobject - Dispatcher object management routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index 10f32b62608e..d9c26e720cb7 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -3,7 +3,7 @@
*
* Module Name: dsopcode - Dispatcher support for regions and fields
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
@@ -217,6 +217,8 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
}
obj_desc->buffer_field.buffer_obj = buffer_desc;
+ obj_desc->buffer_field.is_create_field =
+ aml_opcode == AML_CREATE_FIELD_OP;
/* Reference count for buffer_desc inherits obj_desc count */
diff --git a/drivers/acpi/acpica/dspkginit.c b/drivers/acpi/acpica/dspkginit.c
index 997faa10f615..d869568d55c2 100644
--- a/drivers/acpi/acpica/dspkginit.c
+++ b/drivers/acpi/acpica/dspkginit.c
@@ -3,7 +3,7 @@
*
* Module Name: dspkginit - Completion of deferred package initialization
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index d75aae304595..5e81a1ae44cf 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -4,7 +4,7 @@
* Module Name: dswexec - Dispatcher method execution callbacks;
* dispatch to interpreter.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index c88fd31208a5..697974e37edf 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -3,7 +3,7 @@
*
* Module Name: dswload - Dispatcher first pass namespace load callbacks
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
@@ -410,6 +410,27 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
walk_state));
+ /*
+ * Disassembler: handle create field operators here.
+ *
+ * create_buffer_field is a deferred op that is typically processed in load
+ * pass 2. However, disassembly of control method contents walk the parse
+ * tree with ACPI_PARSE_LOAD_PASS1 and AML_CREATE operators are processed
+ * in a later walk. This is a problem when there is a control method that
+ * has the same name as the AML_CREATE object. In this case, any use of the
+ * name segment will be detected as a method call rather than a reference
+ * to a buffer field.
+ *
+ * This earlier creation during disassembly solves this issue by inserting
+ * the named object in the ACPI namespace so that references to this name
+ * would be a name string rather than a method call.
+ */
+ if ((walk_state->parse_flags & ACPI_PARSE_DISASSEMBLE) &&
+ (walk_state->op_info->flags & AML_CREATE)) {
+ status = acpi_ds_create_buffer_field(op, walk_state);
+ return_ACPI_STATUS(status);
+ }
+
/* We are only interested in opcodes that have an associated name */
if (!(walk_state->op_info->flags & (AML_NAMED | AML_FIELD))) {
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 935a8e2623e4..b31457ca926c 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -3,7 +3,7 @@
*
* Module Name: dswload2 - Dispatcher second pass namespace load callbacks
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c
index 39acf7b286da..9c397642fed7 100644
--- a/drivers/acpi/acpica/dswscope.c
+++ b/drivers/acpi/acpica/dswscope.c
@@ -3,7 +3,7 @@
*
* Module Name: dswscope - Scope stack manipulation
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index de79f835a373..809a0c0536b5 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -3,7 +3,7 @@
*
* Module Name: dswstate - Dispatcher parse tree walk management routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index 9e2f5a05c066..8c83d8c620dc 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -3,7 +3,7 @@
*
* Module Name: evevent - Fixed Event handling and dispatch
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index 5c77bee5d31f..0ced84ae13e4 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -3,7 +3,7 @@
*
* Module Name: evglock - Global Lock support
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 344feba29063..3e39907fedd9 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpe - General Purpose Event handling and dispatch
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 9c7adaa7b582..132adff1e131 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeblk - GPE block creation and initialization.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 70d21d5ec5f3..6effd8076dcc 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeinit - System GPE initialization and update
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index 917892227e09..738873e876ca 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeutil - GPE utilities
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c
index 3ef4e27995f0..5884eba047f7 100644
--- a/drivers/acpi/acpica/evhandler.c
+++ b/drivers/acpi/acpica/evhandler.c
@@ -3,7 +3,7 @@
*
* Module Name: evhandler - Support for Address Space handlers
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index aa98fe07cd1b..ce1eda6beb84 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -3,7 +3,7 @@
*
* Module Name: evmisc - Miscellaneous event manager support functions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 1ff126460007..738d4b231f34 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -3,7 +3,7 @@
*
* Module Name: evregion - Operation Region support
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index aee09640d710..aefc0145e583 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -3,7 +3,7 @@
*
* Module Name: evrgnini- ACPI address_space (op_region) init
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 279ef0557aa3..e4e012297eee 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -3,7 +3,7 @@
*
* Module Name: evxface - External interfaces for ACPI events
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index e528fe56b755..1a15b0087379 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -3,7 +3,7 @@
*
* Module Name: evxfevnt - External Interfaces, ACPI event disable/enable
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 04a40d563dd6..f2de66bfd8a7 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: evxfgpe - External Interfaces for General Purpose Events (GPEs)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
@@ -795,6 +795,38 @@ acpi_status acpi_enable_all_wakeup_gpes(void)
ACPI_EXPORT_SYMBOL(acpi_enable_all_wakeup_gpes)
+/******************************************************************************
+ *
+ * FUNCTION: acpi_any_gpe_status_set
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Whether or not the status bit is set for any GPE
+ *
+ * DESCRIPTION: Check the status bits of all enabled GPEs and return TRUE if any
+ * of them is set or FALSE otherwise.
+ *
+ ******************************************************************************/
+u32 acpi_any_gpe_status_set(void)
+{
+ acpi_status status;
+ u8 ret;
+
+ ACPI_FUNCTION_TRACE(acpi_any_gpe_status_set);
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return (FALSE);
+ }
+
+ ret = acpi_hw_check_all_gpes();
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+
+ return (ret);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_any_gpe_status_set)
+
/*******************************************************************************
*
* FUNCTION: acpi_install_gpe_block
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index 47265b073e6f..da97fd0c6b51 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -4,7 +4,7 @@
* Module Name: evxfregn - External Interfaces, ACPI Operation Regions and
* Address Spaces.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exconcat.c b/drivers/acpi/acpica/exconcat.c
index c7af07566b7b..43711412722f 100644
--- a/drivers/acpi/acpica/exconcat.c
+++ b/drivers/acpi/acpica/exconcat.c
@@ -3,7 +3,7 @@
*
* Module Name: exconcat - Concatenate-type AML operators
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 46a8baf28bd0..68efd704e2dc 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -3,7 +3,7 @@
*
* Module Name: exconfig - Namespace reconfiguration (Load/Unload opcodes)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index ca2966bacb50..50c7aad2e86d 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -3,7 +3,7 @@
*
* Module Name: exconvrt - Object conversion routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index f376fc00064e..a17482428b46 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -3,7 +3,7 @@
*
* Module Name: excreate - Named object creation
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index b1aeec8cac55..a5223dcaee70 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: exdebug - Support for stores to the AML Debug Object
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index a9bc938a3b55..47a4d9a40d6b 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -3,7 +3,7 @@
*
* Module Name: exdump - Interpreter debug output routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index d3d2dbfba680..e85eb31e5075 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -3,7 +3,7 @@
*
* Module Name: exfield - AML execution - field_unit read/write
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
@@ -96,7 +96,8 @@ acpi_ex_get_protocol_buffer_length(u32 protocol_id, u32 *return_length)
* RETURN: Status
*
* DESCRIPTION: Read from a named field. Returns either an Integer or a
- * Buffer, depending on the size of the field.
+ * Buffer, depending on the size of the field and whether if a
+ * field is created by the create_field() operator.
*
******************************************************************************/
@@ -154,12 +155,17 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
* the use of arithmetic operators on the returned value if the
* field size is equal or smaller than an Integer.
*
+ * However, all buffer fields created by create_field operator needs to
+ * remain as a buffer to match other AML interpreter implementations.
+ *
* Note: Field.length is in bits.
*/
buffer_length =
(acpi_size)ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field.bit_length);
- if (buffer_length > acpi_gbl_integer_byte_width) {
+ if (buffer_length > acpi_gbl_integer_byte_width ||
+ (obj_desc->common.type == ACPI_TYPE_BUFFER_FIELD &&
+ obj_desc->buffer_field.is_create_field)) {
/* Field is too large for an Integer, create a Buffer instead */
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index 95a0dcb4f7b9..ade35ff1c7ba 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -3,7 +3,7 @@
*
* Module Name: exfldio - Aml Field I/O
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index 60e854965af9..717e3998fd77 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -3,7 +3,7 @@
*
* Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index 775cd62af5b3..9ff247cba571 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -3,7 +3,7 @@
*
* Module Name: exmutex - ASL Mutex Acquire/Release functions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index 6b76be5212a4..74f8b0d0452b 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -3,7 +3,7 @@
*
* Module Name: exnames - interpreter/scanner name load/execute
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index 06e35ea09823..a46d685a3ffc 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg1 - AML execution - opcodes with 1 argument
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index 5e4a31a11df4..03241d18ac1d 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg2 - AML execution - opcodes with 2 arguments
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index a4ebce417930..c8d0d75fc450 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg3 - AML execution - opcodes with 3 arguments
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index 31385a0b2dab..55d0fa056fe7 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg6 - AML execution - opcodes with 6 arguments
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 728d752f7adc..a4e306690a21 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -3,7 +3,7 @@
*
* Module Name: exprep - ACPI AML field prep utilities
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index c08521194b29..d15a66de26c0 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -3,7 +3,7 @@
*
* Module Name: exregion - ACPI default op_region (address space) handlers
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index b223d01e6bf8..3e4018678c09 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -3,7 +3,7 @@
*
* Module Name: exresnte - AML Interpreter object resolution
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index 36da5c0ef69c..912a078c60a4 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -3,7 +3,7 @@
*
* Module Name: exresolv - AML Interpreter object resolution
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index bdfe4d33b483..4d1b22971d58 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -3,7 +3,7 @@
*
* Module Name: exresop - AML Interpreter operand/object resolution
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exserial.c b/drivers/acpi/acpica/exserial.c
index c5aa4b0deb70..760bc7cef55a 100644
--- a/drivers/acpi/acpica/exserial.c
+++ b/drivers/acpi/acpica/exserial.c
@@ -3,7 +3,7 @@
*
* Module Name: exserial - field_unit support for serial address spaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index 7f3c3571c292..3adc0a29d890 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -3,7 +3,7 @@
*
* Module Name: exstore - AML Interpreter object store support
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
index 4e43c8277f07..8c34f4e2ab8f 100644
--- a/drivers/acpi/acpica/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -4,7 +4,7 @@
* Module Name: exstoren - AML Interpreter object store support,
* Store to Node (namespace object)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index dc9e2b1c1ad9..dc66696080a5 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -3,7 +3,7 @@
*
* Module Name: exstorob - AML object store support, store to object
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index a538f7799b78..f329b01672bb 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -3,7 +3,7 @@
*
* Module Name: exsystem - Interface to OS services
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/extrace.c b/drivers/acpi/acpica/extrace.c
index db7f93ca539f..832a47885b99 100644
--- a/drivers/acpi/acpica/extrace.c
+++ b/drivers/acpi/acpica/extrace.c
@@ -3,7 +3,7 @@
*
* Module Name: extrace - Support for interpreter execution tracing
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index 75380be1c2ef..8fefa6feac2f 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -3,7 +3,7 @@
*
* Module Name: exutils - interpreter/scanner utilities
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index 926f7e080f22..9b9aac27ff7e 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -3,7 +3,7 @@
*
* Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
index dee3affaca49..d9be5d0545d4 100644
--- a/drivers/acpi/acpica/hwesleep.c
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -4,7 +4,7 @@
* Name: hwesleep.c - ACPI Hardware Sleep/Wake Support functions for the
* extended FADT-V5 sleep registers.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 565bd3f29f31..f4c285c2f595 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: hwgpe - Low level GPE enable/disable/clear functions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
@@ -446,6 +446,53 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
/******************************************************************************
*
+ * FUNCTION: acpi_hw_get_gpe_block_status
+ *
+ * PARAMETERS: gpe_xrupt_info - GPE Interrupt info
+ * gpe_block - Gpe Block info
+ *
+ * RETURN: Success
+ *
+ * DESCRIPTION: Produce a combined GPE status bits mask for the given block.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_hw_get_gpe_block_status(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+ struct acpi_gpe_block_info *gpe_block,
+ void *ret_ptr)
+{
+ struct acpi_gpe_register_info *gpe_register_info;
+ u64 in_enable, in_status;
+ acpi_status status;
+ u8 *ret = ret_ptr;
+ u32 i;
+
+ /* Examine each GPE Register within the block */
+
+ for (i = 0; i < gpe_block->register_count; i++) {
+ gpe_register_info = &gpe_block->register_info[i];
+
+ status = acpi_hw_read(&in_enable,
+ &gpe_register_info->enable_address);
+ if (ACPI_FAILURE(status)) {
+ continue;
+ }
+
+ status = acpi_hw_read(&in_status,
+ &gpe_register_info->status_address);
+ if (ACPI_FAILURE(status)) {
+ continue;
+ }
+
+ *ret |= in_enable & in_status;
+ }
+
+ return (AE_OK);
+}
+
+/******************************************************************************
+ *
* FUNCTION: acpi_hw_disable_all_gpes
*
* PARAMETERS: None
@@ -510,4 +557,28 @@ acpi_status acpi_hw_enable_all_wakeup_gpes(void)
return_ACPI_STATUS(status);
}
+/******************************************************************************
+ *
+ * FUNCTION: acpi_hw_check_all_gpes
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Combined status of all GPEs
+ *
+ * DESCRIPTION: Check all enabled GPEs in all GPE blocks and return TRUE if the
+ * status bit is set for at least one of them of FALSE otherwise.
+ *
+ ******************************************************************************/
+
+u8 acpi_hw_check_all_gpes(void)
+{
+ u8 ret = 0;
+
+ ACPI_FUNCTION_TRACE(acpi_hw_check_all_gpes);
+
+ (void)acpi_ev_walk_gpe_list(acpi_hw_get_gpe_block_status, &ret);
+
+ return (ret != 0);
+}
+
#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index b62db8ec446f..243a25add28f 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -4,7 +4,7 @@
* Name: hwsleep.c - ACPI Hardware Sleep/Wake Support functions for the
* original/legacy sleep/PM registers.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 2fb9f75d71c5..07473ddfa9a9 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -3,7 +3,7 @@
*
* Name: hwtimer.c - ACPI Power Management Timer Interface
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index cd576153257c..4d94861e6093 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -3,7 +3,7 @@
*
* Module Name: hwvalid - I/O request validation
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index c4fd97104024..134dbfadcd15 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -3,7 +3,7 @@
*
* Module Name: hwxface - Public ACPICA hardware interfaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index 2919746c9041..a4b66f4b2714 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -3,7 +3,7 @@
*
* Name: hwxfsleep.c - ACPI Hardware Sleep/Wake External Interfaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsarguments.c b/drivers/acpi/acpica/nsarguments.c
index 0e97ed38973f..d5e8405e9d8f 100644
--- a/drivers/acpi/acpica/nsarguments.c
+++ b/drivers/acpi/acpica/nsarguments.c
@@ -3,7 +3,7 @@
*
* Module Name: nsarguments - Validation of args for ACPI predefined methods
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
index c86d0770ed6e..c86c82939ebb 100644
--- a/drivers/acpi/acpica/nsconvert.c
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -4,7 +4,7 @@
* Module Name: nsconvert - Object conversions for objects returned by
* predefined methods
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 9ad340f644a1..994f0b556c60 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -3,7 +3,7 @@
*
* Module Name: nsdump - table dumping routines for debug
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index 73e5c83c8c9f..b691fe20e384 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -3,7 +3,7 @@
*
* Module Name: nsdump - table dumping routines for debug
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 61e9dfc9fe8c..e16f6a0c2c3f 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -3,7 +3,7 @@
*
* Module Name: nsinit - namespace initialization
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index d7c4d6e8e21e..9ba17891edb6 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -3,7 +3,7 @@
*
* Module Name: nsload - namespace loading/expanding/contracting procedures
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index f16cf5e4742c..7e74a765e785 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -3,7 +3,7 @@
*
* Module Name: nsparse - namespace interface to AML parser
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 2f9d93122d0c..0cea9c363ace 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -3,7 +3,7 @@
*
* Module Name: nspredef - Validation of ACPI predefined methods and objects
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
index 9a80e3b23496..237b3ddeb075 100644
--- a/drivers/acpi/acpica/nsprepkg.c
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -3,7 +3,7 @@
*
* Module Name: nsprepkg - Validation of package objects for predefined names
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index be86fea8e4d4..90db2d85e7f5 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -3,7 +3,7 @@
*
* Module Name: nsrepair - Repair for objects returned by predefined methods
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 663d85e0adba..125143c41bb8 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -4,7 +4,7 @@
* Module Name: nsrepair2 - Repair for objects returned by specific
* predefined methods
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index b8d007c84d32..e66abdab8f31 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -4,7 +4,7 @@
* Module Name: nsutils - Utilities for accessing ACPI namespace, accessing
* parents and siblings and Scope manipulation
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index ceea6af79d12..b7f3e8603ad8 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -3,7 +3,7 @@
*
* Module Name: nswalk - Functions for walking the ACPI namespace
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 161e60ddfb69..984129dcaa0c 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -4,7 +4,7 @@
* Module Name: nsxfname - Public interfaces to the ACPI subsystem
* ACPI Namespace oriented interfaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index e62c7897fdf1..3b40db4ad9f3 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -3,7 +3,7 @@
*
* Module Name: psargs - Parse AML opcode arguments
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 207805047bc4..3cf0687b9915 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -3,7 +3,7 @@
*
* Module Name: psloop - Main AML parse loop
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index ded2779fc8ea..2480c26c5171 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -3,7 +3,7 @@
*
* Module Name: psobject - Support for parse objects
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index 43775c5ce17c..28af49263ebf 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -3,7 +3,7 @@
*
* Module Name: psopcode - Parser/Interpreter opcode information table
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c
index 15e7563829f1..ab9327f6a63c 100644
--- a/drivers/acpi/acpica/psopinfo.c
+++ b/drivers/acpi/acpica/psopinfo.c
@@ -3,7 +3,7 @@
*
* Module Name: psopinfo - AML opcode information functions and dispatch tables
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 9b386530ffbe..c780046bf294 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -3,7 +3,7 @@
*
* Module Name: psparse - Parser top level AML parse routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c
index f153ca804740..fceb311995e9 100644
--- a/drivers/acpi/acpica/psscope.c
+++ b/drivers/acpi/acpica/psscope.c
@@ -3,7 +3,7 @@
*
* Module Name: psscope - Parser scope stack management routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
index 22d8a2becdd0..c8aef0694864 100644
--- a/drivers/acpi/acpica/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -3,7 +3,7 @@
*
* Module Name: pstree - Parser op tree manipulation/traversal/search
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index 2512f584fa3c..00efae2f95ba 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -3,7 +3,7 @@
*
* Module Name: psutils - Parser miscellaneous utilities (Parser only)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c
index cf91841297c2..0fe3adf6b0e5 100644
--- a/drivers/acpi/acpica/pswalk.c
+++ b/drivers/acpi/acpica/pswalk.c
@@ -3,7 +3,7 @@
*
* Module Name: pswalk - Parser routines to walk parsed op tree(s)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index ee2ee2c858f2..1bbfc8def388 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -3,7 +3,7 @@
*
* Module Name: psxface - Parser external interfaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index 2cf36451e46f..523b1e9b98d4 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -3,7 +3,7 @@
*
* Module Name: tbdata - Table manager data structure functions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 0041bfba9abc..907edc5edba7 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -3,7 +3,7 @@
*
* Module Name: tbfadt - FADT table utilities
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index b2abb40023a6..56d81e490a5c 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -3,7 +3,7 @@
*
* Module Name: tbfind - find table
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index ef1ffd36ab3f..0bb15add2245 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -3,7 +3,7 @@
*
* Module Name: tbinstal - ACPI table installation and removal
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index 4764f849cb78..0b3494ad9a70 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -3,7 +3,7 @@
*
* Module Name: tbprint - Table output utilities
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index c5f0b8ec70cc..dfe1ac3ae34a 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -3,7 +3,7 @@
*
* Module Name: tbutils - ACPI Table utilities
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 1640685bf4ae..f8403d480318 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxface - ACPI table-oriented external interfaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 0782acf85722..bcba993d4dac 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxfload - Table load/unload external interfaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index e2859d09ca2e..0edc6ef5d46d 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxfroot - Find the root ACPI table (RSDT)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
index bb260376bd59..99fa48722cf6 100644
--- a/drivers/acpi/acpica/utaddress.c
+++ b/drivers/acpi/acpica/utaddress.c
@@ -3,7 +3,7 @@
*
* Module Name: utaddress - op_region address range check
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index d64da4d9e8d0..303ab51b4fcf 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -3,7 +3,7 @@
*
* Module Name: utalloc - local memory allocation routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utascii.c b/drivers/acpi/acpica/utascii.c
index f6cd7d4f698b..d78656d960e8 100644
--- a/drivers/acpi/acpica/utascii.c
+++ b/drivers/acpi/acpica/utascii.c
@@ -3,7 +3,7 @@
*
* Module Name: utascii - Utility ascii functions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c
index db897af1de05..f2ec427f4e29 100644
--- a/drivers/acpi/acpica/utbuffer.c
+++ b/drivers/acpi/acpica/utbuffer.c
@@ -3,7 +3,7 @@
*
* Module Name: utbuffer - Buffer dump routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index 8533fce7fa93..1b03a2747401 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -3,7 +3,7 @@
*
* Module Name: utcache - local cache allocation routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 1fb8327f3c3b..41bdd0278dd8 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -3,7 +3,7 @@
*
* Module Name: utcopy - Internal to external object translation utilities
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 5b169b5f0f1a..0c8cb0612414 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: utdebug - Debug print/trace routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 65beaa237669..befdd13b403b 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -3,7 +3,7 @@
*
* Module Name: utdecode - Utility decoding routines (value-to-string)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 558a9f3b0678..8180d1a458f5 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -3,7 +3,7 @@
*
* Module Name: uteval - Object evaluation
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index b0622ec4bb85..e6dcbdc3fc6e 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -3,7 +3,7 @@
*
* Module Name: utglobal - Global variables for the ACPI subsystem
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/uthex.c b/drivers/acpi/acpica/uthex.c
index b6da135d5f41..0e02f12513dc 100644
--- a/drivers/acpi/acpica/uthex.c
+++ b/drivers/acpi/acpica/uthex.c
@@ -3,7 +3,7 @@
*
* Module Name: uthex -- Hex/ASCII support functions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index 30198c828ab6..3bb06935a2ad 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -3,7 +3,7 @@
*
* Module Name: utids - support for device Ids - HID, UID, CID, SUB, CLS
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index 6f33e7c72327..fdbc397c038d 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -3,7 +3,7 @@
*
* Module Name: utinit - Common ACPI subsystem initialization
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c
index 8b4ff11d617a..46be549539e7 100644
--- a/drivers/acpi/acpica/utlock.c
+++ b/drivers/acpi/acpica/utlock.c
@@ -3,7 +3,7 @@
*
* Module Name: utlock - Reader/Writer lock interfaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index eee97a902696..3e60bdac2200 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -3,7 +3,7 @@
*
* Module Name: utobject - ACPI object create/delete/size/cache routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index ad2b218039d0..0a01c08dad8a 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -3,7 +3,7 @@
*
* Module Name: utosi - Support for the _OSI predefined control method
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c
index 1b0f68f5ed8c..05fe3470fb93 100644
--- a/drivers/acpi/acpica/utpredef.c
+++ b/drivers/acpi/acpica/utpredef.c
@@ -3,7 +3,7 @@
*
* Module Name: utpredef - support functions for predefined names
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c
index 5839f2fa7400..a874dac7db5c 100644
--- a/drivers/acpi/acpica/utprint.c
+++ b/drivers/acpi/acpica/utprint.c
@@ -3,7 +3,7 @@
*
* Module Name: utprint - Formatted printing routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index 14de4d15e618..d366be431a84 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -3,7 +3,7 @@
*
* Module Name: uttrack - Memory allocation tracking routines (debug only)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utuuid.c b/drivers/acpi/acpica/utuuid.c
index 0a7cf8007643..b8039954b0d1 100644
--- a/drivers/acpi/acpica/utuuid.c
+++ b/drivers/acpi/acpica/utuuid.c
@@ -3,7 +3,7 @@
*
* Module Name: utuuid -- UUID support functions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index f497c4b30e65..ca7c9f0144ef 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -3,7 +3,7 @@
*
* Module Name: utxface - External interfaces, miscellaneous utility functions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index cf769e94fe0f..653e3bb20036 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -3,7 +3,7 @@
*
* Module Name: utxfinit - External interfaces for ACPICA initialization
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 8906c80175e6..103acbbfcf9a 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -1180,7 +1180,7 @@ static int ghes_probe(struct platform_device *ghes_dev)
switch (generic->notify.type) {
case ACPI_HEST_NOTIFY_POLLED:
- timer_setup(&ghes->timer, ghes_poll_func, TIMER_DEFERRABLE);
+ timer_setup(&ghes->timer, ghes_poll_func, 0);
ghes_add_timer(ghes);
break;
case ACPI_HEST_NOTIFY_EXTERNAL:
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 33f71983e001..ed3d2d1a7ae9 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -11,6 +11,7 @@
#define pr_fmt(fmt) "ACPI: IORT: " fmt
#include <linux/acpi_iort.h>
+#include <linux/bitfield.h>
#include <linux/iommu.h>
#include <linux/kernel.h>
#include <linux/list.h>
@@ -298,6 +299,59 @@ out:
return status;
}
+struct iort_workaround_oem_info {
+ char oem_id[ACPI_OEM_ID_SIZE + 1];
+ char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
+ u32 oem_revision;
+};
+
+static bool apply_id_count_workaround;
+
+static struct iort_workaround_oem_info wa_info[] __initdata = {
+ {
+ .oem_id = "HISI ",
+ .oem_table_id = "HIP07 ",
+ .oem_revision = 0,
+ }, {
+ .oem_id = "HISI ",
+ .oem_table_id = "HIP08 ",
+ .oem_revision = 0,
+ }
+};
+
+static void __init
+iort_check_id_count_workaround(struct acpi_table_header *tbl)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wa_info); i++) {
+ if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) &&
+ !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
+ wa_info[i].oem_revision == tbl->oem_revision) {
+ apply_id_count_workaround = true;
+ pr_warn(FW_BUG "ID count for ID mapping entry is wrong, applying workaround\n");
+ break;
+ }
+ }
+}
+
+static inline u32 iort_get_map_max(struct acpi_iort_id_mapping *map)
+{
+ u32 map_max = map->input_base + map->id_count;
+
+ /*
+ * The IORT specification revision D (Section 3, table 4, page 9) says
+ * Number of IDs = The number of IDs in the range minus one, but the
+ * IORT code ignored the "minus one", and some firmware did that too,
+ * so apply a workaround here to keep compatible with both the spec
+ * compliant and non-spec compliant firmwares.
+ */
+ if (apply_id_count_workaround)
+ map_max--;
+
+ return map_max;
+}
+
static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
u32 *rid_out)
{
@@ -314,8 +368,7 @@ static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
return -ENXIO;
}
- if (rid_in < map->input_base ||
- (rid_in >= map->input_base + map->id_count))
+ if (rid_in < map->input_base || rid_in > iort_get_map_max(map))
return -ENXIO;
*rid_out = map->output_base + (rid_in - map->input_base);
@@ -850,9 +903,9 @@ static inline bool iort_iommu_driver_enabled(u8 type)
{
switch (type) {
case ACPI_IORT_NODE_SMMU_V3:
- return IS_BUILTIN(CONFIG_ARM_SMMU_V3);
+ return IS_ENABLED(CONFIG_ARM_SMMU_V3);
case ACPI_IORT_NODE_SMMU:
- return IS_BUILTIN(CONFIG_ARM_SMMU);
+ return IS_ENABLED(CONFIG_ARM_SMMU);
default:
pr_warn("IORT node type %u does not describe an SMMU\n", type);
return false;
@@ -924,6 +977,20 @@ static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
return iort_iommu_xlate(info->dev, parent, streamid);
}
+static void iort_named_component_init(struct device *dev,
+ struct acpi_iort_node *node)
+{
+ struct acpi_iort_named_component *nc;
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+
+ if (!fwspec)
+ return;
+
+ nc = (struct acpi_iort_named_component *)node->node_data;
+ fwspec->num_pasid_bits = FIELD_GET(ACPI_IORT_NC_PASID_BITS,
+ nc->node_flags);
+}
+
/**
* iort_iommu_configure - Set-up IOMMU configuration for a device.
*
@@ -978,6 +1045,9 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
if (parent)
err = iort_iommu_xlate(dev, parent, streamid);
} while (parent && !err);
+
+ if (!err)
+ iort_named_component_init(dev, node);
}
/*
@@ -1631,5 +1701,6 @@ void __init acpi_iort_init(void)
return;
}
+ iort_check_id_count_workaround(iort_table);
iort_init_platform_devices();
}
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 8f0e0c8d8c3d..111a407dcc77 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -38,6 +38,8 @@
#define PREFIX "ACPI: "
#define ACPI_BATTERY_VALUE_UNKNOWN 0xFFFFFFFF
+#define ACPI_BATTERY_CAPACITY_VALID(capacity) \
+ ((capacity) != 0 && (capacity) != ACPI_BATTERY_VALUE_UNKNOWN)
#define ACPI_BATTERY_DEVICE_NAME "Battery"
@@ -192,7 +194,8 @@ static int acpi_battery_is_charged(struct acpi_battery *battery)
static bool acpi_battery_is_degraded(struct acpi_battery *battery)
{
- return battery->full_charge_capacity && battery->design_capacity &&
+ return ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity) &&
+ ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity) &&
battery->full_charge_capacity < battery->design_capacity;
}
@@ -214,7 +217,7 @@ static int acpi_battery_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
- int ret = 0;
+ int full_capacity = ACPI_BATTERY_VALUE_UNKNOWN, ret = 0;
struct acpi_battery *battery = to_acpi_battery(psy);
if (acpi_battery_present(battery)) {
@@ -263,14 +266,14 @@ static int acpi_battery_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
- if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
+ if (!ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity))
ret = -ENODEV;
else
val->intval = battery->design_capacity * 1000;
break;
case POWER_SUPPLY_PROP_CHARGE_FULL:
case POWER_SUPPLY_PROP_ENERGY_FULL:
- if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
+ if (!ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity))
ret = -ENODEV;
else
val->intval = battery->full_charge_capacity * 1000;
@@ -283,11 +286,17 @@ static int acpi_battery_get_property(struct power_supply *psy,
val->intval = battery->capacity_now * 1000;
break;
case POWER_SUPPLY_PROP_CAPACITY:
- if (battery->capacity_now && battery->full_charge_capacity)
- val->intval = battery->capacity_now * 100/
- battery->full_charge_capacity;
+ if (ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity))
+ full_capacity = battery->full_charge_capacity;
+ else if (ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity))
+ full_capacity = battery->design_capacity;
+
+ if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN ||
+ full_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
+ ret = -ENODEV;
else
- val->intval = 0;
+ val->intval = battery->capacity_now * 100/
+ full_capacity;
break;
case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
if (battery->state & ACPI_BATTERY_STATE_CRITICAL)
@@ -333,6 +342,20 @@ static enum power_supply_property charge_battery_props[] = {
POWER_SUPPLY_PROP_SERIAL_NUMBER,
};
+static enum power_supply_property charge_battery_full_cap_broken_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_SERIAL_NUMBER,
+};
+
static enum power_supply_property energy_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
@@ -794,20 +817,34 @@ static void __exit battery_hook_exit(void)
static int sysfs_add_battery(struct acpi_battery *battery)
{
struct power_supply_config psy_cfg = { .drv_data = battery, };
+ bool full_cap_broken = false;
+
+ if (!ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity) &&
+ !ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity))
+ full_cap_broken = true;
if (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) {
- battery->bat_desc.properties = charge_battery_props;
- battery->bat_desc.num_properties =
- ARRAY_SIZE(charge_battery_props);
- } else if (battery->full_charge_capacity == 0) {
- battery->bat_desc.properties =
- energy_battery_full_cap_broken_props;
- battery->bat_desc.num_properties =
- ARRAY_SIZE(energy_battery_full_cap_broken_props);
+ if (full_cap_broken) {
+ battery->bat_desc.properties =
+ charge_battery_full_cap_broken_props;
+ battery->bat_desc.num_properties =
+ ARRAY_SIZE(charge_battery_full_cap_broken_props);
+ } else {
+ battery->bat_desc.properties = charge_battery_props;
+ battery->bat_desc.num_properties =
+ ARRAY_SIZE(charge_battery_props);
+ }
} else {
- battery->bat_desc.properties = energy_battery_props;
- battery->bat_desc.num_properties =
- ARRAY_SIZE(energy_battery_props);
+ if (full_cap_broken) {
+ battery->bat_desc.properties =
+ energy_battery_full_cap_broken_props;
+ battery->bat_desc.num_properties =
+ ARRAY_SIZE(energy_battery_full_cap_broken_props);
+ } else {
+ battery->bat_desc.properties = energy_battery_props;
+ battery->bat_desc.num_properties =
+ ARRAY_SIZE(energy_battery_props);
+ }
}
battery->bat_desc.name = acpi_device_bid(battery->device);
@@ -1165,13 +1202,12 @@ static int acpi_battery_alarm_proc_open(struct inode *inode, struct file *file)
return single_open(file, acpi_battery_alarm_proc_show, PDE_DATA(inode));
}
-static const struct file_operations acpi_battery_alarm_fops = {
- .owner = THIS_MODULE,
- .open = acpi_battery_alarm_proc_open,
- .read = seq_read,
- .write = acpi_battery_write_alarm,
- .llseek = seq_lseek,
- .release = single_release,
+static const struct proc_ops acpi_battery_alarm_proc_ops = {
+ .proc_open = acpi_battery_alarm_proc_open,
+ .proc_read = seq_read,
+ .proc_write = acpi_battery_write_alarm,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
};
static int acpi_battery_add_fs(struct acpi_device *device)
@@ -1191,7 +1227,7 @@ static int acpi_battery_add_fs(struct acpi_device *device)
acpi_battery_state_proc_show, acpi_driver_data(device)))
return -ENODEV;
if (!proc_create_data("alarm", S_IFREG | S_IRUGO | S_IWUSR,
- acpi_device_dir(device), &acpi_battery_alarm_fops,
+ acpi_device_dir(device), &acpi_battery_alarm_proc_ops,
acpi_driver_data(device)))
return -ENODEV;
return 0;
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index b758b45737f5..f6925f16c4a2 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -122,6 +122,17 @@ static const struct dmi_system_id dmi_lid_quirks[] = {
},
.driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
},
+ {
+ /*
+ * Razer Blade Stealth 13 late 2019, notification of the LID device
+ * only happens on close, not on open and _LID always returns closed.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Razer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Razer Blade Stealth 13 Late 2019"),
+ },
+ .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
+ },
{}
};
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 5e4a8860a9c0..b64c62bfcea5 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -1321,6 +1321,7 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
*/
static const struct acpi_device_id special_pm_ids[] = {
{"PNP0C0B", }, /* Generic ACPI fan */
+ {"INT1044", }, /* Fan for Tiger Lake generation */
{"INT3404", }, /* Fan */
{}
};
diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c
index eb58fc475a03..387f27ef3368 100644
--- a/drivers/acpi/dptf/dptf_power.c
+++ b/drivers/acpi/dptf/dptf_power.c
@@ -97,6 +97,7 @@ static int dptf_power_remove(struct platform_device *pdev)
}
static const struct acpi_device_id int3407_device_ids[] = {
+ {"INT1047", 0},
{"INT3407", 0},
{"", 0},
};
diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c
index 5c7a90186e3c..1ec7b6900662 100644
--- a/drivers/acpi/dptf/int340x_thermal.c
+++ b/drivers/acpi/dptf/int340x_thermal.c
@@ -13,6 +13,10 @@
#define INT3401_DEVICE 0X01
static const struct acpi_device_id int340x_thermal_device_ids[] = {
+ {"INT1040"},
+ {"INT1043"},
+ {"INT1044"},
+ {"INT1047"},
{"INT3400"},
{"INT3401", INT3401_DEVICE},
{"INT3402"},
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index d05be13c1022..d1f1cf5d4bf0 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -179,6 +179,7 @@ EXPORT_SYMBOL(first_ec);
static struct acpi_ec *boot_ec;
static bool boot_ec_is_ecdt = false;
+static struct workqueue_struct *ec_wq;
static struct workqueue_struct *ec_query_wq;
static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
@@ -469,7 +470,7 @@ static void acpi_ec_submit_query(struct acpi_ec *ec)
ec_dbg_evt("Command(%s) submitted/blocked",
acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
ec->nr_pending_queries++;
- schedule_work(&ec->work);
+ queue_work(ec_wq, &ec->work);
}
}
@@ -535,7 +536,7 @@ static void acpi_ec_enable_event(struct acpi_ec *ec)
#ifdef CONFIG_PM_SLEEP
static void __acpi_ec_flush_work(void)
{
- flush_scheduled_work(); /* flush ec->work */
+ drain_workqueue(ec_wq); /* flush ec->work */
flush_workqueue(ec_query_wq); /* flush queries */
}
@@ -556,8 +557,8 @@ static void acpi_ec_disable_event(struct acpi_ec *ec)
void acpi_ec_flush_work(void)
{
- /* Without ec_query_wq there is nothing to flush. */
- if (!ec_query_wq)
+ /* Without ec_wq there is nothing to flush. */
+ if (!ec_wq)
return;
__acpi_ec_flush_work();
@@ -1053,28 +1054,20 @@ void acpi_ec_unblock_transactions(void)
Event Management
-------------------------------------------------------------------------- */
static struct acpi_ec_query_handler *
-acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler)
-{
- if (handler)
- kref_get(&handler->kref);
- return handler;
-}
-
-static struct acpi_ec_query_handler *
acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value)
{
struct acpi_ec_query_handler *handler;
- bool found = false;
mutex_lock(&ec->mutex);
list_for_each_entry(handler, &ec->list, node) {
if (value == handler->query_bit) {
- found = true;
- break;
+ kref_get(&handler->kref);
+ mutex_unlock(&ec->mutex);
+ return handler;
}
}
mutex_unlock(&ec->mutex);
- return found ? acpi_ec_get_query_handler(handler) : NULL;
+ return NULL;
}
static void acpi_ec_query_handler_release(struct kref *kref)
@@ -2115,25 +2108,33 @@ static struct acpi_driver acpi_ec_driver = {
.drv.pm = &acpi_ec_pm,
};
-static inline int acpi_ec_query_init(void)
+static void acpi_ec_destroy_workqueues(void)
{
- if (!ec_query_wq) {
- ec_query_wq = alloc_workqueue("kec_query", 0,
- ec_max_queries);
- if (!ec_query_wq)
- return -ENODEV;
+ if (ec_wq) {
+ destroy_workqueue(ec_wq);
+ ec_wq = NULL;
}
- return 0;
-}
-
-static inline void acpi_ec_query_exit(void)
-{
if (ec_query_wq) {
destroy_workqueue(ec_query_wq);
ec_query_wq = NULL;
}
}
+static int acpi_ec_init_workqueues(void)
+{
+ if (!ec_wq)
+ ec_wq = alloc_ordered_workqueue("kec", 0);
+
+ if (!ec_query_wq)
+ ec_query_wq = alloc_workqueue("kec_query", 0, ec_max_queries);
+
+ if (!ec_wq || !ec_query_wq) {
+ acpi_ec_destroy_workqueues();
+ return -ENODEV;
+ }
+ return 0;
+}
+
static const struct dmi_system_id acpi_ec_no_wakeup[] = {
{
.ident = "Thinkpad X1 Carbon 6th",
@@ -2164,8 +2165,7 @@ int __init acpi_ec_init(void)
int result;
int ecdt_fail, dsdt_fail;
- /* register workqueue for _Qxx evaluations */
- result = acpi_ec_query_init();
+ result = acpi_ec_init_workqueues();
if (result)
return result;
@@ -2196,6 +2196,6 @@ static void __exit acpi_ec_exit(void)
{
acpi_bus_unregister_driver(&acpi_ec_driver);
- acpi_ec_query_exit();
+ acpi_ec_destroy_workqueues();
}
#endif /* 0 */
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 816b0803f7fb..aaf4e8f348cf 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -25,6 +25,7 @@ static int acpi_fan_remove(struct platform_device *pdev);
static const struct acpi_device_id fan_device_ids[] = {
{"PNP0C0B", 0},
+ {"INT1044", 0},
{"INT3404", 0},
{"", 0},
};
@@ -44,12 +45,16 @@ static const struct dev_pm_ops acpi_fan_pm = {
#define FAN_PM_OPS_PTR NULL
#endif
+#define ACPI_FPS_NAME_LEN 20
+
struct acpi_fan_fps {
u64 control;
u64 trip_point;
u64 speed;
u64 noise_level;
u64 power;
+ char name[ACPI_FPS_NAME_LEN];
+ struct device_attribute dev_attr;
};
struct acpi_fan_fif {
@@ -265,6 +270,39 @@ static int acpi_fan_speed_cmp(const void *a, const void *b)
return fps1->speed - fps2->speed;
}
+static ssize_t show_state(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct acpi_fan_fps *fps = container_of(attr, struct acpi_fan_fps, dev_attr);
+ int count;
+
+ if (fps->control == 0xFFFFFFFF || fps->control > 100)
+ count = snprintf(buf, PAGE_SIZE, "not-defined:");
+ else
+ count = snprintf(buf, PAGE_SIZE, "%lld:", fps->control);
+
+ if (fps->trip_point == 0xFFFFFFFF || fps->trip_point > 9)
+ count += snprintf(&buf[count], PAGE_SIZE, "not-defined:");
+ else
+ count += snprintf(&buf[count], PAGE_SIZE, "%lld:", fps->trip_point);
+
+ if (fps->speed == 0xFFFFFFFF)
+ count += snprintf(&buf[count], PAGE_SIZE, "not-defined:");
+ else
+ count += snprintf(&buf[count], PAGE_SIZE, "%lld:", fps->speed);
+
+ if (fps->noise_level == 0xFFFFFFFF)
+ count += snprintf(&buf[count], PAGE_SIZE, "not-defined:");
+ else
+ count += snprintf(&buf[count], PAGE_SIZE, "%lld:", fps->noise_level * 100);
+
+ if (fps->power == 0xFFFFFFFF)
+ count += snprintf(&buf[count], PAGE_SIZE, "not-defined\n");
+ else
+ count += snprintf(&buf[count], PAGE_SIZE, "%lld\n", fps->power);
+
+ return count;
+}
+
static int acpi_fan_get_fps(struct acpi_device *device)
{
struct acpi_fan *fan = acpi_driver_data(device);
@@ -295,12 +333,13 @@ static int acpi_fan_get_fps(struct acpi_device *device)
}
for (i = 0; i < fan->fps_count; i++) {
struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
- struct acpi_buffer fps = { sizeof(fan->fps[i]), &fan->fps[i] };
+ struct acpi_buffer fps = { offsetof(struct acpi_fan_fps, name),
+ &fan->fps[i] };
status = acpi_extract_package(&obj->package.elements[i + 1],
&format, &fps);
if (ACPI_FAILURE(status)) {
dev_err(&device->dev, "Invalid _FPS element\n");
- break;
+ goto err;
}
}
@@ -308,6 +347,24 @@ static int acpi_fan_get_fps(struct acpi_device *device)
sort(fan->fps, fan->fps_count, sizeof(*fan->fps),
acpi_fan_speed_cmp, NULL);
+ for (i = 0; i < fan->fps_count; ++i) {
+ struct acpi_fan_fps *fps = &fan->fps[i];
+
+ snprintf(fps->name, ACPI_FPS_NAME_LEN, "state%d", i);
+ fps->dev_attr.show = show_state;
+ fps->dev_attr.store = NULL;
+ fps->dev_attr.attr.name = fps->name;
+ fps->dev_attr.attr.mode = 0444;
+ status = sysfs_create_file(&device->dev.kobj, &fps->dev_attr.attr);
+ if (status) {
+ int j;
+
+ for (j = 0; j < i; ++j)
+ sysfs_remove_file(&device->dev.kobj, &fan->fps[j].dev_attr.attr);
+ break;
+ }
+ }
+
err:
kfree(obj);
return status;
@@ -330,14 +387,20 @@ static int acpi_fan_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fan);
if (acpi_fan_is_acpi4(device)) {
- if (acpi_fan_get_fif(device) || acpi_fan_get_fps(device))
- goto end;
+ result = acpi_fan_get_fif(device);
+ if (result)
+ return result;
+
+ result = acpi_fan_get_fps(device);
+ if (result)
+ return result;
+
fan->acpi4 = true;
} else {
result = acpi_device_update_power(device, NULL);
if (result) {
dev_err(&device->dev, "Failed to set initial power state\n");
- goto end;
+ goto err_end;
}
}
@@ -350,7 +413,7 @@ static int acpi_fan_probe(struct platform_device *pdev)
&fan_cooling_ops);
if (IS_ERR(cdev)) {
result = PTR_ERR(cdev);
- goto end;
+ goto err_end;
}
dev_dbg(&pdev->dev, "registered as cooling_device%d\n", cdev->id);
@@ -365,10 +428,21 @@ static int acpi_fan_probe(struct platform_device *pdev)
result = sysfs_create_link(&cdev->device.kobj,
&pdev->dev.kobj,
"device");
- if (result)
+ if (result) {
dev_err(&pdev->dev, "Failed to create sysfs link 'device'\n");
+ goto err_end;
+ }
+
+ return 0;
+
+err_end:
+ if (fan->acpi4) {
+ int i;
+
+ for (i = 0; i < fan->fps_count; ++i)
+ sysfs_remove_file(&device->dev.kobj, &fan->fps[i].dev_attr.attr);
+ }
-end:
return result;
}
@@ -376,6 +450,13 @@ static int acpi_fan_remove(struct platform_device *pdev)
{
struct acpi_fan *fan = platform_get_drvdata(pdev);
+ if (fan->acpi4) {
+ struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
+ int i;
+
+ for (i = 0; i < fan->fps_count; ++i)
+ sysfs_remove_file(&device->dev.kobj, &fan->fps[i].dev_attr.attr);
+ }
sysfs_remove_link(&pdev->dev.kobj, "thermal_cooling");
sysfs_remove_link(&fan->cdev->device.kobj, "device");
thermal_cooling_device_unregister(fan->cdev);
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index f31544d3656e..4ae93350b70d 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -98,11 +98,11 @@ static inline bool acpi_pptt_match_type(int table_type, int type)
*
* Return: The cache structure and the level we terminated with.
*/
-static int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr,
- int local_level,
- struct acpi_subtable_header *res,
- struct acpi_pptt_cache **found,
- int level, int type)
+static unsigned int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr,
+ unsigned int local_level,
+ struct acpi_subtable_header *res,
+ struct acpi_pptt_cache **found,
+ unsigned int level, int type)
{
struct acpi_pptt_cache *cache;
@@ -119,7 +119,7 @@ static int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr,
if (*found != NULL && cache != *found)
pr_warn("Found duplicate cache level/type unable to determine uniqueness\n");
- pr_debug("Found cache @ level %d\n", level);
+ pr_debug("Found cache @ level %u\n", level);
*found = cache;
/*
* continue looking at this node's resource list
@@ -132,16 +132,17 @@ static int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr,
return local_level;
}
-static struct acpi_pptt_cache *acpi_find_cache_level(struct acpi_table_header *table_hdr,
- struct acpi_pptt_processor *cpu_node,
- int *starting_level, int level,
- int type)
+static struct acpi_pptt_cache *
+acpi_find_cache_level(struct acpi_table_header *table_hdr,
+ struct acpi_pptt_processor *cpu_node,
+ unsigned int *starting_level, unsigned int level,
+ int type)
{
struct acpi_subtable_header *res;
- int number_of_levels = *starting_level;
+ unsigned int number_of_levels = *starting_level;
int resource = 0;
struct acpi_pptt_cache *ret = NULL;
- int local_level;
+ unsigned int local_level;
/* walk down from processor node */
while ((res = acpi_get_pptt_resource(table_hdr, cpu_node, resource))) {
@@ -321,12 +322,12 @@ static struct acpi_pptt_cache *acpi_find_cache_node(struct acpi_table_header *ta
unsigned int level,
struct acpi_pptt_processor **node)
{
- int total_levels = 0;
+ unsigned int total_levels = 0;
struct acpi_pptt_cache *found = NULL;
struct acpi_pptt_processor *cpu_node;
u8 acpi_type = acpi_cache_type(type);
- pr_debug("Looking for CPU %d's level %d cache type %d\n",
+ pr_debug("Looking for CPU %d's level %u cache type %d\n",
acpi_cpu_id, level, acpi_type);
cpu_node = acpi_find_processor_node(table_hdr, acpi_cpu_id);
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index 652f19e6c541..0e62ef265ce4 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -136,18 +136,17 @@ acpi_system_wakeup_device_open_fs(struct inode *inode, struct file *file)
PDE_DATA(inode));
}
-static const struct file_operations acpi_system_wakeup_device_fops = {
- .owner = THIS_MODULE,
- .open = acpi_system_wakeup_device_open_fs,
- .read = seq_read,
- .write = acpi_system_write_wakeup_device,
- .llseek = seq_lseek,
- .release = single_release,
+static const struct proc_ops acpi_system_wakeup_device_proc_ops = {
+ .proc_open = acpi_system_wakeup_device_open_fs,
+ .proc_read = seq_read,
+ .proc_write = acpi_system_write_wakeup_device,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
};
void __init acpi_sleep_proc_init(void)
{
/* 'wakeup device' [R/W] */
proc_create("wakeup", S_IFREG | S_IRUGO | S_IWUSR,
- acpi_root_dir, &acpi_system_wakeup_device_fops);
+ acpi_root_dir, &acpi_system_wakeup_device_proc_ops);
}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 2ae95df2e74f..dcc289e30166 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -299,164 +299,24 @@ static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
{
- acpi_status status;
- u64 count;
- int current_count;
- int i, ret = 0;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *cst;
+ int ret;
if (nocst)
return -ENODEV;
- current_count = 0;
-
- status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
- if (ACPI_FAILURE(status)) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
- return -ENODEV;
- }
-
- cst = buffer.pointer;
-
- /* There must be at least 2 elements */
- if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
- pr_err("not enough elements in _CST\n");
- ret = -EFAULT;
- goto end;
- }
-
- count = cst->package.elements[0].integer.value;
+ ret = acpi_processor_evaluate_cst(pr->handle, pr->id, &pr->power);
+ if (ret)
+ return ret;
- /* Validate number of power states. */
- if (count < 1 || count != cst->package.count - 1) {
- pr_err("count given by _CST is not valid\n");
- ret = -EFAULT;
- goto end;
- }
+ /*
+ * It is expected that there will be at least 2 states, C1 and
+ * something else (C2 or C3), so fail if that is not the case.
+ */
+ if (pr->power.count < 2)
+ return -EFAULT;
- /* Tell driver that at least _CST is supported. */
pr->flags.has_cst = 1;
-
- for (i = 1; i <= count; i++) {
- union acpi_object *element;
- union acpi_object *obj;
- struct acpi_power_register *reg;
- struct acpi_processor_cx cx;
-
- memset(&cx, 0, sizeof(cx));
-
- element = &(cst->package.elements[i]);
- if (element->type != ACPI_TYPE_PACKAGE)
- continue;
-
- if (element->package.count != 4)
- continue;
-
- obj = &(element->package.elements[0]);
-
- if (obj->type != ACPI_TYPE_BUFFER)
- continue;
-
- reg = (struct acpi_power_register *)obj->buffer.pointer;
-
- if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
- (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
- continue;
-
- /* There should be an easy way to extract an integer... */
- obj = &(element->package.elements[1]);
- if (obj->type != ACPI_TYPE_INTEGER)
- continue;
-
- cx.type = obj->integer.value;
- /*
- * Some buggy BIOSes won't list C1 in _CST -
- * Let acpi_processor_get_power_info_default() handle them later
- */
- if (i == 1 && cx.type != ACPI_STATE_C1)
- current_count++;
-
- cx.address = reg->address;
- cx.index = current_count + 1;
-
- cx.entry_method = ACPI_CSTATE_SYSTEMIO;
- if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
- if (acpi_processor_ffh_cstate_probe
- (pr->id, &cx, reg) == 0) {
- cx.entry_method = ACPI_CSTATE_FFH;
- } else if (cx.type == ACPI_STATE_C1) {
- /*
- * C1 is a special case where FIXED_HARDWARE
- * can be handled in non-MWAIT way as well.
- * In that case, save this _CST entry info.
- * Otherwise, ignore this info and continue.
- */
- cx.entry_method = ACPI_CSTATE_HALT;
- snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
- } else {
- continue;
- }
- if (cx.type == ACPI_STATE_C1 &&
- (boot_option_idle_override == IDLE_NOMWAIT)) {
- /*
- * In most cases the C1 space_id obtained from
- * _CST object is FIXED_HARDWARE access mode.
- * But when the option of idle=halt is added,
- * the entry_method type should be changed from
- * CSTATE_FFH to CSTATE_HALT.
- * When the option of idle=nomwait is added,
- * the C1 entry_method type should be
- * CSTATE_HALT.
- */
- cx.entry_method = ACPI_CSTATE_HALT;
- snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
- }
- } else {
- snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
- cx.address);
- }
-
- if (cx.type == ACPI_STATE_C1) {
- cx.valid = 1;
- }
-
- obj = &(element->package.elements[2]);
- if (obj->type != ACPI_TYPE_INTEGER)
- continue;
-
- cx.latency = obj->integer.value;
-
- obj = &(element->package.elements[3]);
- if (obj->type != ACPI_TYPE_INTEGER)
- continue;
-
- current_count++;
- memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
-
- /*
- * We support total ACPI_PROCESSOR_MAX_POWER - 1
- * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
- */
- if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
- pr_warn("Limiting number of power states to max (%d)\n",
- ACPI_PROCESSOR_MAX_POWER);
- pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
- break;
- }
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
- current_count));
-
- /* Validate number of power states discovered */
- if (current_count < 2)
- ret = -EFAULT;
-
- end:
- kfree(buffer.pointer);
-
- return ret;
+ return 0;
}
static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
@@ -909,7 +769,6 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr)
static inline void acpi_processor_cstate_first_run_checks(void)
{
- acpi_status status;
static int first_run;
if (first_run)
@@ -921,13 +780,10 @@ static inline void acpi_processor_cstate_first_run_checks(void)
max_cstate);
first_run++;
- if (acpi_gbl_FADT.cst_control && !nocst) {
- status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
- acpi_gbl_FADT.cst_control, 8);
- if (ACPI_FAILURE(status))
- ACPI_EXCEPTION((AE_INFO, status,
- "Notifying BIOS of _CST ability failed"));
- }
+ if (nocst)
+ return;
+
+ acpi_processor_claim_cst_control();
}
#else
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 915650bf519f..6d3448895382 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1462,7 +1462,7 @@ int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
iort_dma_setup(dev, &dma_addr, &size);
iommu = iort_iommu_configure(dev);
- if (IS_ERR(iommu) && PTR_ERR(iommu) == -EPROBE_DEFER)
+ if (PTR_ERR(iommu) == -EPROBE_DEFER)
return -EPROBE_DEFER;
arch_setup_dma_ops(dev, dma_addr, size,
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 6747a279621b..152f7fc0b200 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -61,8 +61,11 @@ static struct notifier_block tts_notifier = {
static int acpi_sleep_prepare(u32 acpi_state)
{
#ifdef CONFIG_ACPI_SLEEP
+ unsigned long acpi_wakeup_address;
+
/* do we have a wakeup address for S2 and S3? */
if (acpi_state == ACPI_STATE_S3) {
+ acpi_wakeup_address = acpi_get_wakeup_address();
if (!acpi_wakeup_address)
return -EFAULT;
acpi_set_waking_vector(acpi_wakeup_address);
@@ -987,21 +990,34 @@ static void acpi_s2idle_sync(void)
acpi_os_wait_events_complete(); /* synchronize Notify handling */
}
-static void acpi_s2idle_wake(void)
+static bool acpi_s2idle_wake(void)
{
- /*
- * If IRQD_WAKEUP_ARMED is set for the SCI at this point, the SCI has
- * not triggered while suspended, so bail out.
- */
- if (!acpi_sci_irq_valid() ||
- irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
- return;
+ if (!acpi_sci_irq_valid())
+ return pm_wakeup_pending();
+
+ while (pm_wakeup_pending()) {
+ /*
+ * If IRQD_WAKEUP_ARMED is set for the SCI at this point, the
+ * SCI has not triggered while suspended, so bail out (the
+ * wakeup is pending anyway and the SCI is not the source of
+ * it).
+ */
+ if (irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
+ return true;
+
+ /*
+ * If there are no EC events to process and at least one of the
+ * other enabled GPEs is active, the wakeup is regarded as a
+ * genuine one.
+ *
+ * Note that the checks below must be carried out in this order
+ * to avoid returning prematurely due to a change of the EC GPE
+ * status bit from unset to set between the checks with the
+ * status bits of all the other GPEs unset.
+ */
+ if (acpi_any_gpe_status_set() && !acpi_ec_dispatch_gpe())
+ return true;
- /*
- * If there are EC events to process, the wakeup may be a spurious one
- * coming from the EC.
- */
- if (acpi_ec_dispatch_gpe()) {
/*
* Cancel the wakeup and process all pending events in case
* there are any wakeup ones in there.
@@ -1014,8 +1030,19 @@ static void acpi_s2idle_wake(void)
acpi_s2idle_sync();
+ /*
+ * The SCI is in the "suspended" state now and it cannot produce
+ * new wakeup events till the rearming below, so if any of them
+ * are pending here, they must be resulting from the processing
+ * of EC events above or coming from somewhere else.
+ */
+ if (pm_wakeup_pending())
+ return true;
+
rearm_wake_irq(acpi_sci_irq);
}
+
+ return false;
}
static void acpi_s2idle_restore_early(void)
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index d831a61e0010..19067a5e5293 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -27,6 +27,7 @@
#include <linux/acpi.h>
#include <linux/workqueue.h>
#include <linux/uaccess.h>
+#include <linux/units.h>
#define PREFIX "ACPI: "
@@ -172,7 +173,7 @@ struct acpi_thermal {
struct acpi_handle_list devices;
struct thermal_zone_device *thermal_zone;
int tz_enabled;
- int kelvin_offset;
+ int kelvin_offset; /* in millidegrees */
struct work_struct thermal_check_work;
};
@@ -297,7 +298,8 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
if (crt == -1) {
tz->trips.critical.flags.valid = 0;
} else if (crt > 0) {
- unsigned long crt_k = CELSIUS_TO_DECI_KELVIN(crt);
+ unsigned long crt_k = celsius_to_deci_kelvin(crt);
+
/*
* Allow override critical threshold
*/
@@ -333,7 +335,7 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
if (psv == -1) {
status = AE_SUPPORT;
} else if (psv > 0) {
- tmp = CELSIUS_TO_DECI_KELVIN(psv);
+ tmp = celsius_to_deci_kelvin(psv);
status = AE_OK;
} else {
status = acpi_evaluate_integer(tz->device->handle,
@@ -413,7 +415,7 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
break;
if (i == 1)
tz->trips.active[0].temperature =
- CELSIUS_TO_DECI_KELVIN(act);
+ celsius_to_deci_kelvin(act);
else
/*
* Don't allow override higher than
@@ -421,9 +423,9 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
*/
tz->trips.active[i - 1].temperature =
(tz->trips.active[i - 2].temperature <
- CELSIUS_TO_DECI_KELVIN(act) ?
+ celsius_to_deci_kelvin(act) ?
tz->trips.active[i - 2].temperature :
- CELSIUS_TO_DECI_KELVIN(act));
+ celsius_to_deci_kelvin(act));
break;
} else {
tz->trips.active[i].temperature = tmp;
@@ -519,7 +521,7 @@ static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp)
if (result)
return result;
- *temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(tz->temperature,
+ *temp = deci_kelvin_to_millicelsius_with_offset(tz->temperature,
tz->kelvin_offset);
return 0;
}
@@ -624,7 +626,7 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
if (tz->trips.critical.flags.valid) {
if (!trip) {
- *temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(
+ *temp = deci_kelvin_to_millicelsius_with_offset(
tz->trips.critical.temperature,
tz->kelvin_offset);
return 0;
@@ -634,7 +636,7 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
if (tz->trips.hot.flags.valid) {
if (!trip) {
- *temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(
+ *temp = deci_kelvin_to_millicelsius_with_offset(
tz->trips.hot.temperature,
tz->kelvin_offset);
return 0;
@@ -644,7 +646,7 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
if (tz->trips.passive.flags.valid) {
if (!trip) {
- *temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(
+ *temp = deci_kelvin_to_millicelsius_with_offset(
tz->trips.passive.temperature,
tz->kelvin_offset);
return 0;
@@ -655,7 +657,7 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE &&
tz->trips.active[i].flags.valid; i++) {
if (!trip) {
- *temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(
+ *temp = deci_kelvin_to_millicelsius_with_offset(
tz->trips.active[i].temperature,
tz->kelvin_offset);
return 0;
@@ -672,7 +674,7 @@ static int thermal_get_crit_temp(struct thermal_zone_device *thermal,
struct acpi_thermal *tz = thermal->devdata;
if (tz->trips.critical.flags.valid) {
- *temperature = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(
+ *temperature = deci_kelvin_to_millicelsius_with_offset(
tz->trips.critical.temperature,
tz->kelvin_offset);
return 0;
@@ -692,7 +694,7 @@ static int thermal_get_trend(struct thermal_zone_device *thermal,
if (type == THERMAL_TRIP_ACTIVE) {
int trip_temp;
- int temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(
+ int temp = deci_kelvin_to_millicelsius_with_offset(
tz->temperature, tz->kelvin_offset);
if (thermal_get_trip_temp(thermal, trip, &trip_temp))
return -EINVAL;
@@ -1043,9 +1045,9 @@ static void acpi_thermal_guess_offset(struct acpi_thermal *tz)
{
if (tz->trips.critical.flags.valid &&
(tz->trips.critical.temperature % 5) == 1)
- tz->kelvin_offset = 2731;
+ tz->kelvin_offset = 273100;
else
- tz->kelvin_offset = 2732;
+ tz->kelvin_offset = 273200;
}
static void acpi_thermal_check_fn(struct work_struct *work)
@@ -1087,7 +1089,7 @@ static int acpi_thermal_add(struct acpi_device *device)
INIT_WORK(&tz->thermal_check_work, acpi_thermal_check_fn);
pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device),
- acpi_device_bid(device), DECI_KELVIN_TO_CELSIUS(tz->temperature));
+ acpi_device_bid(device), deci_kelvin_to_celsius(tz->temperature));
goto end;
free_memory:
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 31014c7d3793..419f814d596a 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -303,6 +303,22 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
},
{
+ .callback = video_detect_force_native,
+ .ident = "Lenovo E41-25",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "81FS"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "Lenovo E41-45",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82BK"),
+ },
+ },
+ {
/* https://bugzilla.redhat.com/show_bug.cgi?id=1217249 */
.callback = video_detect_force_native,
.ident = "Apple MacBook Pro 12,1",
@@ -336,6 +352,11 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7510"),
},
},
+
+ /*
+ * Desktops which falsely report a backlight and which our heuristics
+ * for this do not catch.
+ */
{
.callback = video_detect_force_none,
.ident = "Dell OptiPlex 9020M",
@@ -344,6 +365,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 9020M"),
},
},
+ {
+ .callback = video_detect_force_none,
+ .ident = "MSI MS-7721",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MSI"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MS-7721"),
+ },
+ },
{ },
};
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index b2dad43dbf82..a6b2082c24f8 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2249,10 +2249,12 @@ static void binder_deferred_fd_close(int fd)
return;
init_task_work(&twcb->twork, binder_do_fd_close);
__close_fd_get_file(fd, &twcb->file);
- if (twcb->file)
+ if (twcb->file) {
+ filp_close(twcb->file, current->files);
task_work_add(current, &twcb->twork, true);
- else
+ } else {
kfree(twcb);
+ }
}
static void binder_transaction_buffer_release(struct binder_proc *proc,
@@ -5199,10 +5201,11 @@ err_bad_arg:
static int binder_open(struct inode *nodp, struct file *filp)
{
- struct binder_proc *proc;
+ struct binder_proc *proc, *itr;
struct binder_device *binder_dev;
struct binderfs_info *info;
struct dentry *binder_binderfs_dir_entry_proc = NULL;
+ bool existing_pid = false;
binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
current->group_leader->pid, current->pid);
@@ -5235,19 +5238,24 @@ static int binder_open(struct inode *nodp, struct file *filp)
filp->private_data = proc;
mutex_lock(&binder_procs_lock);
+ hlist_for_each_entry(itr, &binder_procs, proc_node) {
+ if (itr->pid == proc->pid) {
+ existing_pid = true;
+ break;
+ }
+ }
hlist_add_head(&proc->proc_node, &binder_procs);
mutex_unlock(&binder_procs_lock);
- if (binder_debugfs_dir_entry_proc) {
+ if (binder_debugfs_dir_entry_proc && !existing_pid) {
char strbuf[11];
snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
/*
- * proc debug entries are shared between contexts, so
- * this will fail if the process tries to open the driver
- * again with a different context. The priting code will
- * anyway print all contexts that a given PID has, so this
- * is not a problem.
+ * proc debug entries are shared between contexts.
+ * Only create for the first PID to avoid debugfs log spamming
+ * The printing code will anyway print all contexts for a given
+ * PID so this is not a problem.
*/
proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
binder_debugfs_dir_entry_proc,
@@ -5255,19 +5263,16 @@ static int binder_open(struct inode *nodp, struct file *filp)
&proc_fops);
}
- if (binder_binderfs_dir_entry_proc) {
+ if (binder_binderfs_dir_entry_proc && !existing_pid) {
char strbuf[11];
struct dentry *binderfs_entry;
snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
/*
* Similar to debugfs, the process specific log file is shared
- * between contexts. If the file has already been created for a
- * process, the following binderfs_create_file() call will
- * fail with error code EEXIST if another context of the same
- * process invoked binder_open(). This is ok since same as
- * debugfs, the log file will contain information on all
- * contexts of a given PID.
+ * between contexts. Only create for the first PID.
+ * This is ok since same as debugfs, the log file will contain
+ * information on all contexts of a given PID.
*/
binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
@@ -5277,10 +5282,8 @@ static int binder_open(struct inode *nodp, struct file *filp)
int error;
error = PTR_ERR(binderfs_entry);
- if (error != -EEXIST) {
- pr_warn("Unable to create file %s in binderfs (error %d)\n",
- strbuf, error);
- }
+ pr_warn("Unable to create file %s in binderfs (error %d)\n",
+ strbuf, error);
}
}
diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c
index 46dc54d18f0b..2a04e8abd397 100644
--- a/drivers/ata/acard-ahci.c
+++ b/drivers/ata/acard-ahci.c
@@ -218,7 +218,6 @@ static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc)
void *cmd_tbl;
u32 opts;
const u32 cmd_fis_len = 5; /* five dwords */
- unsigned int n_elem;
/*
* Fill in command table information. First, the header,
@@ -232,9 +231,8 @@ static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc)
memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
}
- n_elem = 0;
if (qc->flags & ATA_QCFLAG_DMAMAP)
- n_elem = acard_ahci_fill_sg(qc, cmd_tbl);
+ acard_ahci_fill_sg(qc, cmd_tbl);
/*
* Fill in command slot information.
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 4bfd1b14b390..11ea1aff40db 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -81,6 +81,7 @@ enum board_ids {
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static void ahci_remove_one(struct pci_dev *dev);
+static void ahci_shutdown_one(struct pci_dev *dev);
static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
@@ -606,6 +607,7 @@ static struct pci_driver ahci_pci_driver = {
.id_table = ahci_pci_tbl,
.probe = ahci_init_one,
.remove = ahci_remove_one,
+ .shutdown = ahci_shutdown_one,
.driver = {
.pm = &ahci_pci_pm_ops,
},
@@ -1877,6 +1879,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
}
+static void ahci_shutdown_one(struct pci_dev *pdev)
+{
+ ata_pci_shutdown_one(pdev);
+}
+
static void ahci_remove_one(struct pci_dev *pdev)
{
pm_runtime_get_noresume(&pdev->dev);
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index f41744b9b38a..6853dbb4131d 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -73,11 +73,11 @@ enum brcm_ahci_version {
BRCM_SATA_BCM7425 = 1,
BRCM_SATA_BCM7445,
BRCM_SATA_NSP,
+ BRCM_SATA_BCM7216,
};
enum brcm_ahci_quirks {
- BRCM_AHCI_QUIRK_NO_NCQ = BIT(0),
- BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE = BIT(1),
+ BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE = BIT(0),
};
struct brcm_ahci_priv {
@@ -213,19 +213,12 @@ static void brcm_sata_phys_disable(struct brcm_ahci_priv *priv)
brcm_sata_phy_disable(priv, i);
}
-static u32 brcm_ahci_get_portmask(struct platform_device *pdev,
+static u32 brcm_ahci_get_portmask(struct ahci_host_priv *hpriv,
struct brcm_ahci_priv *priv)
{
- void __iomem *ahci;
- struct resource *res;
u32 impl;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ahci");
- ahci = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(ahci))
- return 0;
-
- impl = readl(ahci + HOST_PORTS_IMPL);
+ impl = readl(hpriv->mmio + HOST_PORTS_IMPL);
if (fls(impl) > SATA_TOP_MAX_PHYS)
dev_warn(priv->dev, "warning: more ports than PHYs (%#x)\n",
@@ -233,9 +226,6 @@ static u32 brcm_ahci_get_portmask(struct platform_device *pdev,
else if (!impl)
dev_info(priv->dev, "no ports found\n");
- devm_iounmap(&pdev->dev, ahci);
- devm_release_mem_region(&pdev->dev, res->start, resource_size(res));
-
return impl;
}
@@ -285,6 +275,13 @@ static unsigned int brcm_ahci_read_id(struct ata_device *dev,
/* Perform the SATA PHY reset sequence */
brcm_sata_phy_disable(priv, ap->port_no);
+ /* Reset the SATA clock */
+ ahci_platform_disable_clks(hpriv);
+ msleep(10);
+
+ ahci_platform_enable_clks(hpriv);
+ msleep(10);
+
/* Bring the PHY back on */
brcm_sata_phy_enable(priv, ap->port_no);
@@ -341,7 +338,6 @@ static const struct ata_port_info ahci_brcm_port_info = {
.port_ops = &ahci_brcm_platform_ops,
};
-#ifdef CONFIG_PM_SLEEP
static int brcm_ahci_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
@@ -349,23 +345,70 @@ static int brcm_ahci_suspend(struct device *dev)
struct brcm_ahci_priv *priv = hpriv->plat_data;
int ret;
- ret = ahci_platform_suspend(dev);
brcm_sata_phys_disable(priv);
+
+ if (IS_ENABLED(CONFIG_PM_SLEEP))
+ ret = ahci_platform_suspend(dev);
+ else
+ ret = 0;
+
+ if (priv->version != BRCM_SATA_BCM7216)
+ reset_control_assert(priv->rcdev);
+
return ret;
}
-static int brcm_ahci_resume(struct device *dev)
+static int __maybe_unused brcm_ahci_resume(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
struct brcm_ahci_priv *priv = hpriv->plat_data;
+ int ret = 0;
+
+ if (priv->version == BRCM_SATA_BCM7216)
+ ret = reset_control_reset(priv->rcdev);
+ else
+ ret = reset_control_deassert(priv->rcdev);
+ if (ret)
+ return ret;
+
+ /* Make sure clocks are turned on before re-configuration */
+ ret = ahci_platform_enable_clks(hpriv);
+ if (ret)
+ return ret;
brcm_sata_init(priv);
brcm_sata_phys_enable(priv);
brcm_sata_alpm_init(hpriv);
- return ahci_platform_resume(dev);
+
+ /* Since we had to enable clocks earlier on, we cannot use
+ * ahci_platform_resume() as-is since a second call to
+ * ahci_platform_enable_resources() would bump up the resources
+ * (regulators, clocks, PHYs) count artificially so we copy the part
+ * after ahci_platform_enable_resources().
+ */
+ ret = ahci_platform_enable_phys(hpriv);
+ if (ret)
+ goto out_disable_phys;
+
+ ret = ahci_platform_resume_host(dev);
+ if (ret)
+ goto out_disable_platform_phys;
+
+ /* We resumed so update PM runtime state */
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ return 0;
+
+out_disable_platform_phys:
+ ahci_platform_disable_phys(hpriv);
+out_disable_phys:
+ brcm_sata_phys_disable(priv);
+ ahci_platform_disable_clks(hpriv);
+ return ret;
}
-#endif
static struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
@@ -376,6 +419,7 @@ static const struct of_device_id ahci_of_match[] = {
{.compatible = "brcm,bcm7445-ahci", .data = (void *)BRCM_SATA_BCM7445},
{.compatible = "brcm,bcm63138-ahci", .data = (void *)BRCM_SATA_BCM7445},
{.compatible = "brcm,bcm-nsp-ahci", .data = (void *)BRCM_SATA_NSP},
+ {.compatible = "brcm,bcm7216-ahci", .data = (void *)BRCM_SATA_BCM7216},
{},
};
MODULE_DEVICE_TABLE(of, ahci_of_match);
@@ -384,6 +428,7 @@ static int brcm_ahci_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id;
struct device *dev = &pdev->dev;
+ const char *reset_name = NULL;
struct brcm_ahci_priv *priv;
struct ahci_host_priv *hpriv;
struct resource *res;
@@ -405,49 +450,86 @@ static int brcm_ahci_probe(struct platform_device *pdev)
if (IS_ERR(priv->top_ctrl))
return PTR_ERR(priv->top_ctrl);
- /* Reset is optional depending on platform */
- priv->rcdev = devm_reset_control_get(&pdev->dev, "ahci");
- if (!IS_ERR_OR_NULL(priv->rcdev))
- reset_control_deassert(priv->rcdev);
+ /* Reset is optional depending on platform and named differently */
+ if (priv->version == BRCM_SATA_BCM7216)
+ reset_name = "rescal";
+ else
+ reset_name = "ahci";
+
+ priv->rcdev = devm_reset_control_get_optional(&pdev->dev, reset_name);
+ if (IS_ERR(priv->rcdev))
+ return PTR_ERR(priv->rcdev);
+
+ hpriv = ahci_platform_get_resources(pdev, 0);
+ if (IS_ERR(hpriv))
+ return PTR_ERR(hpriv);
- if ((priv->version == BRCM_SATA_BCM7425) ||
- (priv->version == BRCM_SATA_NSP)) {
- priv->quirks |= BRCM_AHCI_QUIRK_NO_NCQ;
+ hpriv->plat_data = priv;
+ hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP | AHCI_HFLAG_NO_WRITE_TO_RO;
+
+ switch (priv->version) {
+ case BRCM_SATA_BCM7425:
+ hpriv->flags |= AHCI_HFLAG_DELAY_ENGINE;
+ /* fall through */
+ case BRCM_SATA_NSP:
+ hpriv->flags |= AHCI_HFLAG_NO_NCQ;
priv->quirks |= BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE;
+ break;
+ default:
+ break;
}
+ if (priv->version == BRCM_SATA_BCM7216)
+ ret = reset_control_reset(priv->rcdev);
+ else
+ ret = reset_control_deassert(priv->rcdev);
+ if (ret)
+ return ret;
+
+ ret = ahci_platform_enable_clks(hpriv);
+ if (ret)
+ goto out_reset;
+
+ /* Must be first so as to configure endianness including that
+ * of the standard AHCI register space.
+ */
brcm_sata_init(priv);
- priv->port_mask = brcm_ahci_get_portmask(pdev, priv);
- if (!priv->port_mask)
- return -ENODEV;
+ /* Initializes priv->port_mask which is used below */
+ priv->port_mask = brcm_ahci_get_portmask(hpriv, priv);
+ if (!priv->port_mask) {
+ ret = -ENODEV;
+ goto out_disable_clks;
+ }
+ /* Must be done before ahci_platform_enable_phys() */
brcm_sata_phys_enable(priv);
- hpriv = ahci_platform_get_resources(pdev, 0);
- if (IS_ERR(hpriv))
- return PTR_ERR(hpriv);
- hpriv->plat_data = priv;
- hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP;
-
brcm_sata_alpm_init(hpriv);
- ret = ahci_platform_enable_resources(hpriv);
+ ret = ahci_platform_enable_phys(hpriv);
if (ret)
- return ret;
-
- if (priv->quirks & BRCM_AHCI_QUIRK_NO_NCQ)
- hpriv->flags |= AHCI_HFLAG_NO_NCQ;
- hpriv->flags |= AHCI_HFLAG_NO_WRITE_TO_RO;
+ goto out_disable_phys;
ret = ahci_platform_init_host(pdev, hpriv, &ahci_brcm_port_info,
&ahci_platform_sht);
if (ret)
- return ret;
+ goto out_disable_platform_phys;
dev_info(dev, "Broadcom AHCI SATA3 registered\n");
return 0;
+
+out_disable_platform_phys:
+ ahci_platform_disable_phys(hpriv);
+out_disable_phys:
+ brcm_sata_phys_disable(priv);
+out_disable_clks:
+ ahci_platform_disable_clks(hpriv);
+out_reset:
+ if (priv->version != BRCM_SATA_BCM7216)
+ reset_control_assert(priv->rcdev);
+ return ret;
}
static int brcm_ahci_remove(struct platform_device *pdev)
@@ -457,20 +539,35 @@ static int brcm_ahci_remove(struct platform_device *pdev)
struct brcm_ahci_priv *priv = hpriv->plat_data;
int ret;
+ brcm_sata_phys_disable(priv);
+
ret = ata_platform_remove_one(pdev);
if (ret)
return ret;
- brcm_sata_phys_disable(priv);
-
return 0;
}
+static void brcm_ahci_shutdown(struct platform_device *pdev)
+{
+ int ret;
+
+ /* All resources releasing happens via devres, but our device, unlike a
+ * proper remove is not disappearing, therefore using
+ * brcm_ahci_suspend() here which does explicit power management is
+ * appropriate.
+ */
+ ret = brcm_ahci_suspend(&pdev->dev);
+ if (ret)
+ dev_err(&pdev->dev, "failed to shutdown\n");
+}
+
static SIMPLE_DEV_PM_OPS(ahci_brcm_pm_ops, brcm_ahci_suspend, brcm_ahci_resume);
static struct platform_driver brcm_ahci_driver = {
.probe = brcm_ahci_probe,
.remove = brcm_ahci_remove,
+ .shutdown = brcm_ahci_shutdown,
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_of_match,
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 8befce036af8..129556fcf6be 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -43,7 +43,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_ops);
* RETURNS:
* 0 on success otherwise a negative error code
*/
-static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
+int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
{
int rc, i;
@@ -74,6 +74,7 @@ disable_phys:
}
return rc;
}
+EXPORT_SYMBOL_GPL(ahci_platform_enable_phys);
/**
* ahci_platform_disable_phys - Disable PHYs
@@ -81,7 +82,7 @@ disable_phys:
*
* This function disables all PHYs found in hpriv->phys.
*/
-static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
+void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
{
int i;
@@ -90,6 +91,7 @@ static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
phy_exit(hpriv->phys[i]);
}
}
+EXPORT_SYMBOL_GPL(ahci_platform_disable_phys);
/**
* ahci_platform_enable_clks - Enable platform clocks
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index e9017c570bc5..42c8728f6117 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5329,6 +5329,30 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
}
/**
+ * ata_qc_get_active - get bitmask of active qcs
+ * @ap: port in question
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * Bitmask of active qcs
+ */
+u64 ata_qc_get_active(struct ata_port *ap)
+{
+ u64 qc_active = ap->qc_active;
+
+ /* ATA_TAG_INTERNAL is sent to hw as tag 0 */
+ if (qc_active & (1ULL << ATA_TAG_INTERNAL)) {
+ qc_active |= (1 << 0);
+ qc_active &= ~(1ULL << ATA_TAG_INTERNAL);
+ }
+
+ return qc_active;
+}
+EXPORT_SYMBOL_GPL(ata_qc_get_active);
+
+/**
* ata_qc_complete_multiple - Complete multiple qcs successfully
* @ap: port in question
* @qc_active: new qc_active mask
@@ -6743,6 +6767,26 @@ void ata_pci_remove_one(struct pci_dev *pdev)
ata_host_detach(host);
}
+void ata_pci_shutdown_one(struct pci_dev *pdev)
+{
+ struct ata_host *host = pci_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ ap->pflags |= ATA_PFLAG_FROZEN;
+
+ /* Disable port interrupts */
+ if (ap->ops->freeze)
+ ap->ops->freeze(ap);
+
+ /* Stop the port DMA engines */
+ if (ap->ops->port_stop)
+ ap->ops->port_stop(ap);
+ }
+}
+
/* move to PCI subsystem */
int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
{
@@ -7363,6 +7407,7 @@ EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
#ifdef CONFIG_PCI
EXPORT_SYMBOL_GPL(pci_test_config_bits);
+EXPORT_SYMBOL_GPL(ata_pci_shutdown_one);
EXPORT_SYMBOL_GPL(ata_pci_remove_one);
#ifdef CONFIG_PM
EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 58e09ffe8b9c..eb2eb599e602 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -17,6 +17,7 @@
* - http://www.t13.org/
*/
+#include <linux/compat.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/blkdev.h>
@@ -761,6 +762,10 @@ static int ata_ioc32(struct ata_port *ap)
return 0;
}
+/*
+ * This handles both native and compat commands, so anything added
+ * here must have a compatible argument, or check in_compat_syscall()
+ */
int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
unsigned int cmd, void __user *arg)
{
@@ -773,6 +778,10 @@ int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
spin_lock_irqsave(ap->lock, flags);
val = ata_ioc32(ap);
spin_unlock_irqrestore(ap->lock, flags);
+#ifdef CONFIG_COMPAT
+ if (in_compat_syscall())
+ return put_user(val, (compat_ulong_t __user *)arg);
+#endif
return put_user(val, (unsigned long __user *)arg);
case HDIO_SET_32BIT:
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 135173c8d138..e9cf31f38450 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -526,9 +526,10 @@ static void data_xfer(struct work_struct *work)
/* request dma channels */
/* dma_request_channel may sleep, so calling from process context */
- acdev->dma_chan = dma_request_slave_channel(acdev->host->dev, "data");
- if (!acdev->dma_chan) {
+ acdev->dma_chan = dma_request_chan(acdev->host->dev, "data");
+ if (IS_ERR(acdev->dma_chan)) {
dev_err(acdev->host->dev, "Unable to get dma_chan\n");
+ acdev->dma_chan = NULL;
goto chan_request_fail;
}
@@ -539,6 +540,7 @@ static void data_xfer(struct work_struct *work)
}
dma_release_channel(acdev->dma_chan);
+ acdev->dma_chan = NULL;
/* data xferred successfully */
if (!ret) {
@@ -824,7 +826,7 @@ static int arasan_cf_probe(struct platform_device *pdev)
quirk |= CF_BROKEN_MWDMA | CF_BROKEN_UDMA;
acdev->pbase = res->start;
- acdev->vbase = devm_ioremap_nocache(&pdev->dev, res->start,
+ acdev->vbase = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!acdev->vbase) {
dev_warn(&pdev->dev, "ioremap fail\n");
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 1bfd0154dad5..e47a28271f5b 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -979,7 +979,7 @@ static void pata_macio_invariants(struct pata_macio_priv *priv)
priv->aapl_bus_id = bidp ? *bidp : 0;
/* Fixup missing Apple bus ID in case of media-bay */
- if (priv->mediabay && bidp == 0)
+ if (priv->mediabay && !bidp)
priv->aapl_bus_id = 1;
}
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index d3d851b014a3..bd87476ab481 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -891,7 +891,7 @@ static int octeon_cf_probe(struct platform_device *pdev)
of_node_put(dma_node);
return -EINVAL;
}
- cf_port->dma_base = (u64)devm_ioremap_nocache(&pdev->dev, res_dma->start,
+ cf_port->dma_base = (u64)devm_ioremap(&pdev->dev, res_dma->start,
resource_size(res_dma));
if (!cf_port->dma_base) {
of_node_put(dma_node);
@@ -909,7 +909,7 @@ static int octeon_cf_probe(struct platform_device *pdev)
if (!res_cs1)
return -EINVAL;
- cs1 = devm_ioremap_nocache(&pdev->dev, res_cs1->start,
+ cs1 = devm_ioremap(&pdev->dev, res_cs1->start,
resource_size(res_cs1));
if (!cs1)
return rv;
@@ -925,7 +925,7 @@ static int octeon_cf_probe(struct platform_device *pdev)
if (!res_cs0)
return -EINVAL;
- cs0 = devm_ioremap_nocache(&pdev->dev, res_cs0->start,
+ cs0 = devm_ioremap(&pdev->dev, res_cs0->start,
resource_size(res_cs0));
if (!cs0)
return rv;
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index 3fe0754c0d52..8eb066abbd9c 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -309,6 +309,7 @@ static const struct pcmcia_device_id pcmcia_devices[] = {
PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000), /* Toshiba */
PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d),
PCMCIA_DEVICE_MANF_CARD(0x00ce, 0x0000), /* Samsung */
+ PCMCIA_DEVICE_MANF_CARD(0x00f1, 0x0101), /* SanDisk High (>8G) CFA */
PCMCIA_DEVICE_MANF_CARD(0x0319, 0x0000), /* Hitachi */
PCMCIA_DEVICE_MANF_CARD(0x2080, 0x0001),
PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0100), /* Viking CFA */
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index deae466395de..479c4b29b856 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -140,7 +140,7 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
info->gpio_line = gpiod;
info->irq = irq;
- info->iobase = devm_ioremap_nocache(&pdev->dev, res->start,
+ info->iobase = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!info->iobase)
return -ENOMEM;
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 9239615d8a04..d55ee244d693 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1280,7 +1280,7 @@ static void sata_fsl_host_intr(struct ata_port *ap)
i, ioread32(hcr_base + CC),
ioread32(hcr_base + CA));
}
- ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
+ ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
return;
} else if ((ap->qc_active & (1ULL << ATA_TAG_INTERNAL))) {
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 277f11909fc1..d7228f8e9297 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -2829,7 +2829,7 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp
}
if (work_done) {
- ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
+ ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
/* Update the software queue position index in hardware */
writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index f3e62f5528bd..eb9dc14e5147 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -984,7 +984,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
check_commands = 0;
check_commands &= ~(1 << pos);
}
- ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
+ ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
}
}
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index b23d1e4bad33..17d47ad03ab7 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -31,12 +31,6 @@
#include "suni.h"
#include "eni.h"
-#if !defined(__i386__) && !defined(__x86_64__)
-#ifndef ioremap_nocache
-#define ioremap_nocache(X,Y) ioremap(X,Y)
-#endif
-#endif
-
/*
* TODO:
*
@@ -374,7 +368,7 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb,
here = (eni_vcc->descr+skip) & (eni_vcc->words-1);
dma[j++] = (here << MID_DMA_COUNT_SHIFT) | (vcc->vci
<< MID_DMA_VCI_SHIFT) | MID_DT_JK;
- j++;
+ dma[j++] = 0;
}
here = (eni_vcc->descr+size+skip) & (eni_vcc->words-1);
if (!eff) size += skip;
@@ -447,7 +441,7 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb,
if (size != eff) {
dma[j++] = (here << MID_DMA_COUNT_SHIFT) |
(vcc->vci << MID_DMA_VCI_SHIFT) | MID_DT_JK;
- j++;
+ dma[j++] = 0;
}
if (!j || j > 2*RX_DMA_BUF) {
printk(KERN_CRIT DEV_LABEL "!j or j too big!!!\n");
@@ -1725,7 +1719,7 @@ static int eni_do_init(struct atm_dev *dev)
}
printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d,base=0x%lx,irq=%d,",
dev->number,pci_dev->revision,real_base,eni_dev->irq);
- if (!(base = ioremap_nocache(real_base,MAP_MAX_SIZE))) {
+ if (!(base = ioremap(real_base,MAP_MAX_SIZE))) {
printk("\n");
printk(KERN_ERR DEV_LABEL "(itf %d): can't set up page "
"mapping\n",dev->number);
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index aad00d2b28f5..cc87004d5e2d 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -912,6 +912,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
}
if (!to) {
printk ("No more free channels for FS50..\n");
+ kfree(vcc);
return -EBUSY;
}
vcc->channo = dev->channo;
@@ -922,6 +923,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
if (((DO_DIRECTION(rxtp) && dev->atm_vccs[vcc->channo])) ||
( DO_DIRECTION(txtp) && test_bit (vcc->channo, dev->tx_inuse))) {
printk ("Channel is in use for FS155.\n");
+ kfree(vcc);
return -EBUSY;
}
}
@@ -935,6 +937,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
tc, sizeof (struct fs_transmit_config));
if (!tc) {
fs_dprintk (FS_DEBUG_OPEN, "fs: can't alloc transmit_config.\n");
+ kfree(vcc);
return -ENOMEM;
}
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index f1a500205313..8fbd36eb8941 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -1414,12 +1414,14 @@ fore200e_open(struct atm_vcc *vcc)
static void
fore200e_close(struct atm_vcc* vcc)
{
- struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
struct fore200e_vcc* fore200e_vcc;
+ struct fore200e* fore200e;
struct fore200e_vc_map* vc_map;
unsigned long flags;
ASSERT(vcc);
+ fore200e = FORE200E_DEV(vcc->dev);
+
ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS));
ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS));
@@ -1464,10 +1466,10 @@ fore200e_close(struct atm_vcc* vcc)
static int
fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
{
- struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
- struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
+ struct fore200e* fore200e;
+ struct fore200e_vcc* fore200e_vcc;
struct fore200e_vc_map* vc_map;
- struct host_txq* txq = &fore200e->host_txq;
+ struct host_txq* txq;
struct host_txq_entry* entry;
struct tpd* tpd;
struct tpd_haddr tpd_haddr;
@@ -1480,9 +1482,18 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
unsigned char* data;
unsigned long flags;
- ASSERT(vcc);
- ASSERT(fore200e);
- ASSERT(fore200e_vcc);
+ if (!vcc)
+ return -EINVAL;
+
+ fore200e = FORE200E_DEV(vcc->dev);
+ fore200e_vcc = FORE200E_VCC(vcc);
+
+ if (!fore200e)
+ return -EINVAL;
+
+ txq = &fore200e->host_txq;
+ if (!fore200e_vcc)
+ return -EINVAL;
if (!test_bit(ATM_VF_READY, &vcc->flags)) {
DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi);
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index c3b3b5c0b0da..5f0bc74d2409 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -150,7 +150,7 @@ config DEBUG_TEST_DRIVER_REMOVE
config PM_QOS_KUNIT_TEST
bool "KUnit Test for PM QoS features"
- depends on KUNIT
+ depends on KUNIT=y
config HMEM_REPORTING
bool
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 1eb81f113786..6119e11a9f95 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -248,6 +248,16 @@ core_initcall(free_raw_capacity);
#endif
#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
+/*
+ * This function returns the logic cpu number of the node.
+ * There are basically three kinds of return values:
+ * (1) logic cpu number which is > 0.
+ * (2) -ENODEV when the device tree(DT) node is valid and found in the DT but
+ * there is no possible logical CPU in the kernel to match. This happens
+ * when CONFIG_NR_CPUS is configure to be smaller than the number of
+ * CPU nodes in DT. We need to just ignore this case.
+ * (3) -1 if the node does not exist in the device tree
+ */
static int __init get_cpu_for_node(struct device_node *node)
{
struct device_node *cpu_node;
@@ -261,7 +271,8 @@ static int __init get_cpu_for_node(struct device_node *node)
if (cpu >= 0)
topology_parse_cpu_capacity(cpu_node, cpu);
else
- pr_crit("Unable to find CPU node for %pOF\n", cpu_node);
+ pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n",
+ cpu_node, cpumask_pr_args(cpu_possible_mask));
of_node_put(cpu_node);
return cpu;
@@ -286,9 +297,8 @@ static int __init parse_core(struct device_node *core, int package_id,
cpu_topology[cpu].package_id = package_id;
cpu_topology[cpu].core_id = core_id;
cpu_topology[cpu].thread_id = i;
- } else {
- pr_err("%pOF: Can't get CPU for thread\n",
- t);
+ } else if (cpu != -ENODEV) {
+ pr_err("%pOF: Can't get CPU for thread\n", t);
of_node_put(t);
return -EINVAL;
}
@@ -307,7 +317,7 @@ static int __init parse_core(struct device_node *core, int package_id,
cpu_topology[cpu].package_id = package_id;
cpu_topology[cpu].core_id = core_id;
- } else if (leaf) {
+ } else if (leaf && cpu != -ENODEV) {
pr_err("%pOF: Can't get CPU for leaf core\n", core);
return -EINVAL;
}
diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
index 20736aaa0e69..f7bd0f4db13d 100644
--- a/drivers/base/attribute_container.c
+++ b/drivers/base/attribute_container.c
@@ -236,6 +236,109 @@ attribute_container_remove_device(struct device *dev,
mutex_unlock(&attribute_container_mutex);
}
+static int
+do_attribute_container_device_trigger_safe(struct device *dev,
+ struct attribute_container *cont,
+ int (*fn)(struct attribute_container *,
+ struct device *, struct device *),
+ int (*undo)(struct attribute_container *,
+ struct device *, struct device *))
+{
+ int ret;
+ struct internal_container *ic, *failed;
+ struct klist_iter iter;
+
+ if (attribute_container_no_classdevs(cont))
+ return fn(cont, dev, NULL);
+
+ klist_for_each_entry(ic, &cont->containers, node, &iter) {
+ if (dev == ic->classdev.parent) {
+ ret = fn(cont, dev, &ic->classdev);
+ if (ret) {
+ failed = ic;
+ klist_iter_exit(&iter);
+ goto fail;
+ }
+ }
+ }
+ return 0;
+
+fail:
+ if (!undo)
+ return ret;
+
+ /* Attempt to undo the work partially done. */
+ klist_for_each_entry(ic, &cont->containers, node, &iter) {
+ if (ic == failed) {
+ klist_iter_exit(&iter);
+ break;
+ }
+ if (dev == ic->classdev.parent)
+ undo(cont, dev, &ic->classdev);
+ }
+ return ret;
+}
+
+/**
+ * attribute_container_device_trigger_safe - execute a trigger for each
+ * matching classdev or fail all of them.
+ *
+ * @dev: The generic device to run the trigger for
+ * @fn the function to execute for each classdev.
+ * @undo A function to undo the work previously done in case of error
+ *
+ * This function is a safe version of
+ * attribute_container_device_trigger. It stops on the first error and
+ * undo the partial work that has been done, on previous classdev. It
+ * is guaranteed that either they all succeeded, or none of them
+ * succeeded.
+ */
+int
+attribute_container_device_trigger_safe(struct device *dev,
+ int (*fn)(struct attribute_container *,
+ struct device *,
+ struct device *),
+ int (*undo)(struct attribute_container *,
+ struct device *,
+ struct device *))
+{
+ struct attribute_container *cont, *failed = NULL;
+ int ret = 0;
+
+ mutex_lock(&attribute_container_mutex);
+
+ list_for_each_entry(cont, &attribute_container_list, node) {
+
+ if (!cont->match(cont, dev))
+ continue;
+
+ ret = do_attribute_container_device_trigger_safe(dev, cont,
+ fn, undo);
+ if (ret) {
+ failed = cont;
+ break;
+ }
+ }
+
+ if (ret && !WARN_ON(!undo)) {
+ list_for_each_entry(cont, &attribute_container_list, node) {
+
+ if (failed == cont)
+ break;
+
+ if (!cont->match(cont, dev))
+ continue;
+
+ do_attribute_container_device_trigger_safe(dev, cont,
+ undo, NULL);
+ }
+ }
+
+ mutex_unlock(&attribute_container_mutex);
+ return ret;
+
+}
+
/**
* attribute_container_device_trigger - execute a trigger for each matching classdev
*
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 0d32544b6f91..40fb069a8a7e 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -1,4 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
+ * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (c) 2008-2012 Novell Inc.
+ * Copyright (c) 2012-2019 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (c) 2012-2019 Linux Foundation
+ *
+ * Core driver model functions and structures that should not be
+ * shared outside of the drivers/base/ directory.
+ *
+ */
#include <linux/notifier.h>
/**
@@ -175,3 +186,11 @@ extern void device_links_unbind_consumers(struct device *dev);
/* device pm support */
void device_pm_move_to_tail(struct device *dev);
+
+#ifdef CONFIG_DEVTMPFS
+int devtmpfs_create_node(struct device *dev);
+int devtmpfs_delete_node(struct device *dev);
+#else
+static inline int devtmpfs_create_node(struct device *dev) { return 0; }
+static inline int devtmpfs_delete_node(struct device *dev) { return 0; }
+#endif
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index a1d1e8256324..886e9054999a 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -9,6 +9,7 @@
*/
#include <linux/async.h>
+#include <linux/device/bus.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/errno.h>
diff --git a/drivers/base/class.c b/drivers/base/class.c
index d8a6a5864c2e..bcd410e6d70a 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -8,6 +8,7 @@
* Copyright (c) 2003-2004 IBM Corp.
*/
+#include <linux/device/class.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/base/component.c b/drivers/base/component.c
index 532a3a5d8f63..c7879f5ae2fb 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -11,7 +11,6 @@
#include <linux/device.h>
#include <linux/kref.h>
#include <linux/list.h>
-#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
@@ -102,11 +101,11 @@ static int component_devices_show(struct seq_file *s, void *data)
seq_printf(s, "%-40s %20s\n", "device name", "status");
seq_puts(s, "-------------------------------------------------------------\n");
for (i = 0; i < match->num; i++) {
- struct device *d = (struct device *)match->compare[i].data;
+ struct component *component = match->compare[i].component;
- seq_printf(s, "%-40s %20s\n", dev_name(d),
- match->compare[i].component ?
- "registered" : "not registered");
+ seq_printf(s, "%-40s %20s\n",
+ component ? dev_name(component->dev) : "(unknown)",
+ component ? (component->bound ? "bound" : "not bound") : "not registered");
}
mutex_unlock(&component_mutex);
@@ -775,5 +774,3 @@ void component_del(struct device *dev, const struct component_ops *ops)
kfree(component);
}
EXPORT_SYMBOL_GPL(component_del);
-
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index d811e60610d3..b25bcab2a26b 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -516,7 +516,10 @@ static int really_probe(struct device *dev, struct device_driver *drv)
atomic_inc(&probe_count);
pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
drv->bus->name, __func__, drv->name, dev_name(dev));
- WARN_ON(!list_empty(&dev->devres_head));
+ if (!list_empty(&dev->devres_head)) {
+ dev_crit(dev, "Resources present before probing\n");
+ return -EBUSY;
+ }
re_probe:
dev->driver = drv;
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 6cdbf1531238..c9017e0584c0 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -30,11 +30,7 @@
static struct task_struct *thread;
-#if defined CONFIG_DEVTMPFS_MOUNT
-static int mount_dev = 1;
-#else
-static int mount_dev;
-#endif
+static int __initdata mount_dev = IS_ENABLED(CONFIG_DEVTMPFS_MOUNT);
static DEFINE_SPINLOCK(req_lock);
@@ -71,10 +67,10 @@ static struct file_system_type internal_fs_type = {
.name = "devtmpfs",
#ifdef CONFIG_TMPFS
.init_fs_context = shmem_init_fs_context,
- .parameters = &shmem_fs_parameters,
+ .parameters = shmem_fs_parameters,
#else
.init_fs_context = ramfs_init_fs_context,
- .parameters = &ramfs_fs_parameters,
+ .parameters = ramfs_fs_parameters,
#endif
.kill_sb = kill_litter_super,
};
@@ -93,6 +89,23 @@ static inline int is_blockdev(struct device *dev)
static inline int is_blockdev(struct device *dev) { return 0; }
#endif
+static int devtmpfs_submit_req(struct req *req, const char *tmp)
+{
+ init_completion(&req->done);
+
+ spin_lock(&req_lock);
+ req->next = requests;
+ requests = req;
+ spin_unlock(&req_lock);
+
+ wake_up_process(thread);
+ wait_for_completion(&req->done);
+
+ kfree(tmp);
+
+ return req->err;
+}
+
int devtmpfs_create_node(struct device *dev)
{
const char *tmp = NULL;
@@ -117,19 +130,7 @@ int devtmpfs_create_node(struct device *dev)
req.dev = dev;
- init_completion(&req.done);
-
- spin_lock(&req_lock);
- req.next = requests;
- requests = &req;
- spin_unlock(&req_lock);
-
- wake_up_process(thread);
- wait_for_completion(&req.done);
-
- kfree(tmp);
-
- return req.err;
+ return devtmpfs_submit_req(&req, tmp);
}
int devtmpfs_delete_node(struct device *dev)
@@ -147,18 +148,7 @@ int devtmpfs_delete_node(struct device *dev)
req.mode = 0;
req.dev = dev;
- init_completion(&req.done);
-
- spin_lock(&req_lock);
- req.next = requests;
- requests = &req;
- spin_unlock(&req_lock);
-
- wake_up_process(thread);
- wait_for_completion(&req.done);
-
- kfree(tmp);
- return req.err;
+ return devtmpfs_submit_req(&req, tmp);
}
static int dev_mkdir(const char *name, umode_t mode)
@@ -359,7 +349,7 @@ static int handle_remove(const char *nodename, struct device *dev)
* If configured, or requested by the commandline, devtmpfs will be
* auto-mounted after the kernel mounted the root filesystem.
*/
-int devtmpfs_mount(void)
+int __init devtmpfs_mount(void)
{
int err;
@@ -388,18 +378,30 @@ static int handle(const char *name, umode_t mode, kuid_t uid, kgid_t gid,
return handle_remove(name, dev);
}
-static int devtmpfsd(void *p)
+static int devtmpfs_setup(void *p)
{
- int *err = p;
- *err = ksys_unshare(CLONE_NEWNS);
- if (*err)
+ int err;
+
+ err = ksys_unshare(CLONE_NEWNS);
+ if (err)
goto out;
- *err = do_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, NULL);
- if (*err)
+ err = do_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, NULL);
+ if (err)
goto out;
ksys_chdir("/.."); /* will traverse into overmounted root */
ksys_chroot(".");
+out:
+ *(int *)p = err;
complete(&setup_done);
+ return err;
+}
+
+static int devtmpfsd(void *p)
+{
+ int err = devtmpfs_setup(p);
+
+ if (err)
+ return err;
while (1) {
spin_lock(&req_lock);
while (requests) {
@@ -420,9 +422,6 @@ static int devtmpfsd(void *p)
schedule();
}
return 0;
-out:
- complete(&setup_done);
- return *err;
}
/*
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index 4e5ca632f35e..57c68769e157 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -8,6 +8,7 @@
* Copyright (c) 2007 Novell Inc.
*/
+#include <linux/device/driver.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/errno.h>
diff --git a/drivers/base/firmware_loader/builtin/Makefile b/drivers/base/firmware_loader/builtin/Makefile
index 4a66888e7253..5fa7ce3745a0 100644
--- a/drivers/base/firmware_loader/builtin/Makefile
+++ b/drivers/base/firmware_loader/builtin/Makefile
@@ -17,7 +17,7 @@ PROGBITS = $(if $(CONFIG_ARM),%,@)progbits
filechk_fwbin = \
echo "/* Generated by $(src)/Makefile */" ;\
echo " .section .rodata" ;\
- echo " .p2align $(ASM_ALIGN)" ;\
+ echo " .p2align 4" ;\
echo "_fw_$(FWSTR)_bin:" ;\
echo " .incbin \"$(fwdir)/$(FWNAME)\"" ;\
echo "_fw_end:" ;\
diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c
index 62ee90b4db56..8704e1bae175 100644
--- a/drivers/base/firmware_loader/fallback.c
+++ b/drivers/base/firmware_loader/fallback.c
@@ -606,7 +606,7 @@ static bool fw_run_sysfs_fallback(enum fw_opt opt_flags)
return false;
}
- if ((opt_flags & FW_OPT_NOFALLBACK))
+ if ((opt_flags & FW_OPT_NOFALLBACK_SYSFS))
return false;
/* Also permit LSMs and IMA to fail firmware sysfs fallback */
@@ -630,10 +630,11 @@ static bool fw_run_sysfs_fallback(enum fw_opt opt_flags)
* interface. Userspace is in charge of loading the firmware through the sysfs
* loading interface. This sysfs fallback mechanism may be disabled completely
* on a system by setting the proc sysctl value ignore_sysfs_fallback to true.
- * If this false we check if the internal API caller set the @FW_OPT_NOFALLBACK
- * flag, if so it would also disable the fallback mechanism. A system may want
- * to enfoce the sysfs fallback mechanism at all times, it can do this by
- * setting ignore_sysfs_fallback to false and force_sysfs_fallback to true.
+ * If this is false we check if the internal API caller set the
+ * @FW_OPT_NOFALLBACK_SYSFS flag, if so it would also disable the fallback
+ * mechanism. A system may want to enforce the sysfs fallback mechanism at all
+ * times, it can do this by setting ignore_sysfs_fallback to false and
+ * force_sysfs_fallback to true.
* Enabling force_sysfs_fallback is functionally equivalent to build a kernel
* with CONFIG_FW_LOADER_USER_HELPER_FALLBACK.
**/
diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h
index 7ecd590e67fe..8656e5239a80 100644
--- a/drivers/base/firmware_loader/firmware.h
+++ b/drivers/base/firmware_loader/firmware.h
@@ -27,16 +27,16 @@
* firmware file lookup on storage is avoided. Used for calls where the
* file may be too big, or where the driver takes charge of its own
* firmware caching mechanism.
- * @FW_OPT_NOFALLBACK: Disable the fallback mechanism. Takes precedence over
- * &FW_OPT_UEVENT and &FW_OPT_USERHELPER.
+ * @FW_OPT_NOFALLBACK_SYSFS: Disable the sysfs fallback mechanism. Takes
+ * precedence over &FW_OPT_UEVENT and &FW_OPT_USERHELPER.
*/
enum fw_opt {
- FW_OPT_UEVENT = BIT(0),
- FW_OPT_NOWAIT = BIT(1),
- FW_OPT_USERHELPER = BIT(2),
- FW_OPT_NO_WARN = BIT(3),
- FW_OPT_NOCACHE = BIT(4),
- FW_OPT_NOFALLBACK = BIT(5),
+ FW_OPT_UEVENT = BIT(0),
+ FW_OPT_NOWAIT = BIT(1),
+ FW_OPT_USERHELPER = BIT(2),
+ FW_OPT_NO_WARN = BIT(3),
+ FW_OPT_NOCACHE = BIT(4),
+ FW_OPT_NOFALLBACK_SYSFS = BIT(5),
};
enum fw_status {
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index 249add8c5e05..57133a9dad09 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -877,7 +877,7 @@ int request_firmware_direct(const struct firmware **firmware_p,
__module_get(THIS_MODULE);
ret = _request_firmware(firmware_p, name, device, NULL, 0,
FW_OPT_UEVENT | FW_OPT_NO_WARN |
- FW_OPT_NOFALLBACK);
+ FW_OPT_NOFALLBACK_SYSFS);
module_put(THIS_MODULE);
return ret;
}
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 799b43191dea..b9f474c11393 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -70,20 +70,6 @@ void unregister_memory_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL(unregister_memory_notifier);
-static ATOMIC_NOTIFIER_HEAD(memory_isolate_chain);
-
-int register_memory_isolate_notifier(struct notifier_block *nb)
-{
- return atomic_notifier_chain_register(&memory_isolate_chain, nb);
-}
-EXPORT_SYMBOL(register_memory_isolate_notifier);
-
-void unregister_memory_isolate_notifier(struct notifier_block *nb)
-{
- atomic_notifier_chain_unregister(&memory_isolate_chain, nb);
-}
-EXPORT_SYMBOL(unregister_memory_isolate_notifier);
-
static void memory_block_release(struct device *dev)
{
struct memory_block *mem = to_memory_block(dev);
@@ -175,11 +161,6 @@ int memory_notify(unsigned long val, void *v)
return blocking_notifier_call_chain(&memory_chain, val, v);
}
-int memory_isolate_notify(unsigned long val, void *v)
-{
- return atomic_notifier_call_chain(&memory_isolate_chain, val, v);
-}
-
/*
* The probe routines leave the pages uninitialized, just as the bootmem code
* does. Make sure we do not access them, but instead use only information from
@@ -225,7 +206,7 @@ static bool pages_correctly_probed(unsigned long start_pfn)
*/
static int
memory_block_action(unsigned long start_section_nr, unsigned long action,
- int online_type)
+ int online_type, int nid)
{
unsigned long start_pfn;
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
@@ -238,7 +219,7 @@ memory_block_action(unsigned long start_section_nr, unsigned long action,
if (!pages_correctly_probed(start_pfn))
return -EBUSY;
- ret = online_pages(start_pfn, nr_pages, online_type);
+ ret = online_pages(start_pfn, nr_pages, online_type, nid);
break;
case MEM_OFFLINE:
ret = offline_pages(start_pfn, nr_pages);
@@ -264,7 +245,7 @@ static int memory_block_change_state(struct memory_block *mem,
mem->state = MEM_GOING_OFFLINE;
ret = memory_block_action(mem->start_section_nr, to_state,
- mem->online_type);
+ mem->online_type, mem->nid);
mem->state = ret ? from_state_req : to_state;
@@ -395,7 +376,6 @@ static ssize_t valid_zones_show(struct device *dev,
struct memory_block *mem = to_memory_block(dev);
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
- unsigned long valid_start_pfn, valid_end_pfn;
struct zone *default_zone;
int nid;
@@ -408,11 +388,11 @@ static ssize_t valid_zones_show(struct device *dev,
* The block contains more than one zone can not be offlined.
* This can happen e.g. for ZONE_DMA and ZONE_DMA32
*/
- if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages,
- &valid_start_pfn, &valid_end_pfn))
+ default_zone = test_pages_in_a_zone(start_pfn,
+ start_pfn + nr_pages);
+ if (!default_zone)
return sprintf(buf, "none\n");
- start_pfn = valid_start_pfn;
- strcat(buf, page_zone(pfn_to_page(start_pfn))->name);
+ strcat(buf, default_zone->name);
goto out;
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index cf6b6b722e5c..7fa654f1288b 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -27,6 +27,7 @@
#include <linux/limits.h>
#include <linux/property.h>
#include <linux/kmemleak.h>
+#include <linux/types.h>
#include "base.h"
#include "power/power.h"
@@ -48,7 +49,7 @@ EXPORT_SYMBOL_GPL(platform_bus);
struct resource *platform_get_resource(struct platform_device *dev,
unsigned int type, unsigned int num)
{
- int i;
+ u32 i;
for (i = 0; i < dev->num_resources; i++) {
struct resource *r = &dev->resource[i];
@@ -255,7 +256,7 @@ struct resource *platform_get_resource_byname(struct platform_device *dev,
unsigned int type,
const char *name)
{
- int i;
+ u32 i;
for (i = 0; i < dev->num_resources; i++) {
struct resource *r = &dev->resource[i];
@@ -501,7 +502,8 @@ EXPORT_SYMBOL_GPL(platform_device_add_properties);
*/
int platform_device_add(struct platform_device *pdev)
{
- int i, ret;
+ u32 i;
+ int ret;
if (!pdev)
return -EINVAL;
@@ -569,7 +571,7 @@ int platform_device_add(struct platform_device *pdev)
pdev->id = PLATFORM_DEVID_AUTO;
}
- while (--i >= 0) {
+ while (i--) {
struct resource *r = &pdev->resource[i];
if (r->parent)
release_resource(r);
@@ -590,7 +592,7 @@ EXPORT_SYMBOL_GPL(platform_device_add);
*/
void platform_device_del(struct platform_device *pdev)
{
- int i;
+ u32 i;
if (!IS_ERR_OR_NULL(pdev)) {
device_del(&pdev->dev);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 8e5725b11ee8..959d6d5eb000 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2303,6 +2303,44 @@ out:
EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
/**
+ * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
+ * @parent_spec: OF phandle args to use for parent PM domain look-up
+ * @subdomain_spec: OF phandle args to use for subdomain look-up
+ *
+ * Looks-up a parent PM domain and subdomain based upon phandle args
+ * provided and removes the subdomain from the parent PM domain. Returns a
+ * negative error code on failure.
+ */
+int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
+ struct of_phandle_args *subdomain_spec)
+{
+ struct generic_pm_domain *parent, *subdomain;
+ int ret;
+
+ mutex_lock(&gpd_list_lock);
+
+ parent = genpd_get_from_provider(parent_spec);
+ if (IS_ERR(parent)) {
+ ret = PTR_ERR(parent);
+ goto out;
+ }
+
+ subdomain = genpd_get_from_provider(subdomain_spec);
+ if (IS_ERR(subdomain)) {
+ ret = PTR_ERR(subdomain);
+ goto out;
+ }
+
+ ret = pm_genpd_remove_subdomain(parent, subdomain);
+
+out:
+ mutex_unlock(&gpd_list_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain);
+
+/**
* of_genpd_remove_last - Remove the last PM domain registered for a provider
* @provider: Pointer to device structure associated with provider
*
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 134a8af51511..0e99a760aebd 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -273,10 +273,38 @@ static void dpm_wait_for_suppliers(struct device *dev, bool async)
device_links_read_unlock(idx);
}
-static void dpm_wait_for_superior(struct device *dev, bool async)
+static bool dpm_wait_for_superior(struct device *dev, bool async)
{
- dpm_wait(dev->parent, async);
+ struct device *parent;
+
+ /*
+ * If the device is resumed asynchronously and the parent's callback
+ * deletes both the device and the parent itself, the parent object may
+ * be freed while this function is running, so avoid that by reference
+ * counting the parent once more unless the device has been deleted
+ * already (in which case return right away).
+ */
+ mutex_lock(&dpm_list_mtx);
+
+ if (!device_pm_initialized(dev)) {
+ mutex_unlock(&dpm_list_mtx);
+ return false;
+ }
+
+ parent = get_device(dev->parent);
+
+ mutex_unlock(&dpm_list_mtx);
+
+ dpm_wait(parent, async);
+ put_device(parent);
+
dpm_wait_for_suppliers(dev, async);
+
+ /*
+ * If the parent's callback has deleted the device, attempting to resume
+ * it would be invalid, so avoid doing that then.
+ */
+ return device_pm_initialized(dev);
}
static void dpm_wait_for_consumers(struct device *dev, bool async)
@@ -621,7 +649,8 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn
if (!dev->power.is_noirq_suspended)
goto Out;
- dpm_wait_for_superior(dev, async);
+ if (!dpm_wait_for_superior(dev, async))
+ goto Out;
skip_resume = dev_pm_may_skip_resume(dev);
@@ -829,7 +858,8 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn
if (!dev->power.is_late_suspended)
goto Out;
- dpm_wait_for_superior(dev, async);
+ if (!dpm_wait_for_superior(dev, async))
+ goto Out;
callback = dpm_subsys_resume_early_cb(dev, state, &info);
@@ -944,7 +974,9 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
goto Complete;
}
- dpm_wait_for_superior(dev, async);
+ if (!dpm_wait_for_superior(dev, async))
+ goto Complete;
+
dpm_watchdog_set(&wd, dev);
device_lock(dev);
diff --git a/drivers/base/power/qos-test.c b/drivers/base/power/qos-test.c
index 3115db08d56b..79fc6c4418da 100644
--- a/drivers/base/power/qos-test.c
+++ b/drivers/base/power/qos-test.c
@@ -114,4 +114,4 @@ static struct kunit_suite pm_qos_test_module = {
.name = "qos-kunit-test",
.test_cases = pm_qos_test_cases,
};
-kunit_test_suite(pm_qos_test_module);
+kunit_test_suites(&pm_qos_test_module);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 48616f358854..16134a69bf6f 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1006,8 +1006,10 @@ int __pm_runtime_idle(struct device *dev, int rpmflags)
int retval;
if (rpmflags & RPM_GET_PUT) {
- if (!atomic_dec_and_test(&dev->power.usage_count))
+ if (!atomic_dec_and_test(&dev->power.usage_count)) {
+ trace_rpm_usage_rcuidle(dev, rpmflags);
return 0;
+ }
}
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
@@ -1038,8 +1040,10 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
int retval;
if (rpmflags & RPM_GET_PUT) {
- if (!atomic_dec_and_test(&dev->power.usage_count))
+ if (!atomic_dec_and_test(&dev->power.usage_count)) {
+ trace_rpm_usage_rcuidle(dev, rpmflags);
return 0;
+ }
}
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
@@ -1101,6 +1105,7 @@ int pm_runtime_get_if_in_use(struct device *dev)
retval = dev->power.disable_depth > 0 ? -EINVAL :
dev->power.runtime_status == RPM_ACTIVE
&& atomic_inc_not_zero(&dev->power.usage_count);
+ trace_rpm_usage_rcuidle(dev, 0);
spin_unlock_irqrestore(&dev->power.lock, flags);
return retval;
}
@@ -1434,6 +1439,8 @@ void pm_runtime_allow(struct device *dev)
dev->power.runtime_auto = true;
if (atomic_dec_and_test(&dev->power.usage_count))
rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
+ else
+ trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC);
out:
spin_unlock_irq(&dev->power.lock);
@@ -1501,6 +1508,8 @@ static void update_autosuspend(struct device *dev, int old_delay, int old_use)
if (!old_use || old_delay >= 0) {
atomic_inc(&dev->power.usage_count);
rpm_resume(dev, 0);
+ } else {
+ trace_rpm_usage_rcuidle(dev, 0);
}
}
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 70a9edb5f525..27f3e60608e5 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -1125,6 +1125,9 @@ static void *wakeup_sources_stats_seq_next(struct seq_file *m,
break;
}
+ if (!next_ws)
+ print_wakeup_source_stats(m, &deleted_ws);
+
return next_ws;
}
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index ac9b31c57967..008f8da69d97 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -43,7 +43,7 @@ static int regmap_smbus_byte_reg_write(void *context, unsigned int reg,
return i2c_smbus_write_byte_data(i2c, reg, val);
}
-static struct regmap_bus regmap_smbus_byte = {
+static const struct regmap_bus regmap_smbus_byte = {
.reg_write = regmap_smbus_byte_reg_write,
.reg_read = regmap_smbus_byte_reg_read,
};
@@ -79,7 +79,7 @@ static int regmap_smbus_word_reg_write(void *context, unsigned int reg,
return i2c_smbus_write_word_data(i2c, reg, val);
}
-static struct regmap_bus regmap_smbus_word = {
+static const struct regmap_bus regmap_smbus_word = {
.reg_write = regmap_smbus_word_reg_write,
.reg_read = regmap_smbus_word_reg_read,
};
@@ -115,7 +115,7 @@ static int regmap_smbus_word_write_swapped(void *context, unsigned int reg,
return i2c_smbus_write_word_swapped(i2c, reg, val);
}
-static struct regmap_bus regmap_smbus_word_swapped = {
+static const struct regmap_bus regmap_smbus_word_swapped = {
.reg_write = regmap_smbus_word_write_swapped,
.reg_read = regmap_smbus_word_read_swapped,
};
@@ -197,7 +197,7 @@ static int regmap_i2c_read(void *context,
return -EIO;
}
-static struct regmap_bus regmap_i2c = {
+static const struct regmap_bus regmap_i2c = {
.write = regmap_i2c_write,
.gather_write = regmap_i2c_gather_write,
.read = regmap_i2c_read,
@@ -239,7 +239,7 @@ static int regmap_i2c_smbus_i2c_read(void *context, const void *reg,
return -EIO;
}
-static struct regmap_bus regmap_i2c_smbus_i2c_block = {
+static const struct regmap_bus regmap_i2c_smbus_i2c_block = {
.write = regmap_i2c_smbus_i2c_write,
.read = regmap_i2c_smbus_i2c_read,
.max_raw_read = I2C_SMBUS_BLOCK_MAX,
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 19f57ccfbe1d..59f911e57719 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1488,11 +1488,18 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
WARN_ON(!map->bus);
- /* Check for unwritable registers before we start */
- for (i = 0; i < val_len / map->format.val_bytes; i++)
- if (!regmap_writeable(map,
- reg + regmap_get_offset(map, i)))
- return -EINVAL;
+ /* Check for unwritable or noinc registers in range
+ * before we start
+ */
+ if (!regmap_writeable_noinc(map, reg)) {
+ for (i = 0; i < val_len / map->format.val_bytes; i++) {
+ unsigned int element =
+ reg + regmap_get_offset(map, i);
+ if (!regmap_writeable(map, element) ||
+ regmap_writeable_noinc(map, element))
+ return -EINVAL;
+ }
+ }
if (!map->cache_bypass && map->format.parse_val) {
unsigned int ival;
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index d8d0dc0ca5ac..0b081dee1e95 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -108,10 +108,7 @@ static const void *property_get_pointer(const struct property_entry *prop)
if (!prop->length)
return NULL;
- if (prop->is_array)
- return prop->pointer;
-
- return &prop->value;
+ return prop->is_inline ? &prop->value : prop->pointer;
}
static const void *property_entry_find(const struct property_entry *props,
@@ -201,92 +198,91 @@ static int property_entry_read_string_array(const struct property_entry *props,
static void property_entry_free_data(const struct property_entry *p)
{
- const void *pointer = property_get_pointer(p);
const char * const *src_str;
size_t i, nval;
- if (p->is_array) {
- if (p->type == DEV_PROP_STRING && p->pointer) {
- src_str = p->pointer;
- nval = p->length / sizeof(const char *);
- for (i = 0; i < nval; i++)
- kfree(src_str[i]);
- }
- kfree(pointer);
- } else if (p->type == DEV_PROP_STRING) {
- kfree(p->value.str);
+ if (p->type == DEV_PROP_STRING) {
+ src_str = property_get_pointer(p);
+ nval = p->length / sizeof(*src_str);
+ for (i = 0; i < nval; i++)
+ kfree(src_str[i]);
}
+
+ if (!p->is_inline)
+ kfree(p->pointer);
+
kfree(p->name);
}
-static const char * const *
-property_copy_string_array(const struct property_entry *src)
+static bool property_copy_string_array(const char **dst_ptr,
+ const char * const *src_ptr,
+ size_t nval)
{
- const char **d;
- const char * const *src_str = src->pointer;
- size_t nval = src->length / sizeof(*d);
int i;
- d = kcalloc(nval, sizeof(*d), GFP_KERNEL);
- if (!d)
- return NULL;
-
for (i = 0; i < nval; i++) {
- d[i] = kstrdup(src_str[i], GFP_KERNEL);
- if (!d[i] && src_str[i]) {
+ dst_ptr[i] = kstrdup(src_ptr[i], GFP_KERNEL);
+ if (!dst_ptr[i] && src_ptr[i]) {
while (--i >= 0)
- kfree(d[i]);
- kfree(d);
- return NULL;
+ kfree(dst_ptr[i]);
+ return false;
}
}
- return d;
+ return true;
}
static int property_entry_copy_data(struct property_entry *dst,
const struct property_entry *src)
{
const void *pointer = property_get_pointer(src);
- const void *new;
-
- if (src->is_array) {
- if (!src->length)
- return -ENODATA;
-
- if (src->type == DEV_PROP_STRING) {
- new = property_copy_string_array(src);
- if (!new)
- return -ENOMEM;
- } else {
- new = kmemdup(pointer, src->length, GFP_KERNEL);
- if (!new)
- return -ENOMEM;
- }
+ void *dst_ptr;
+ size_t nval;
+
+ /*
+ * Properties with no data should not be marked as stored
+ * out of line.
+ */
+ if (!src->is_inline && !src->length)
+ return -ENODATA;
+
+ /*
+ * Reference properties are never stored inline as
+ * they are too big.
+ */
+ if (src->type == DEV_PROP_REF && src->is_inline)
+ return -EINVAL;
- dst->is_array = true;
- dst->pointer = new;
- } else if (src->type == DEV_PROP_STRING) {
- new = kstrdup(src->value.str, GFP_KERNEL);
- if (!new && src->value.str)
+ if (src->length <= sizeof(dst->value)) {
+ dst_ptr = &dst->value;
+ dst->is_inline = true;
+ } else {
+ dst_ptr = kmalloc(src->length, GFP_KERNEL);
+ if (!dst_ptr)
return -ENOMEM;
+ dst->pointer = dst_ptr;
+ }
- dst->value.str = new;
+ if (src->type == DEV_PROP_STRING) {
+ nval = src->length / sizeof(const char *);
+ if (!property_copy_string_array(dst_ptr, pointer, nval)) {
+ if (!dst->is_inline)
+ kfree(dst->pointer);
+ return -ENOMEM;
+ }
} else {
- dst->value = src->value;
+ memcpy(dst_ptr, pointer, src->length);
}
dst->length = src->length;
dst->type = src->type;
dst->name = kstrdup(src->name, GFP_KERNEL);
- if (!dst->name)
- goto out_free_data;
+ if (!dst->name) {
+ property_entry_free_data(dst);
+ return -ENOMEM;
+ }
return 0;
-
-out_free_data:
- property_entry_free_data(dst);
- return -ENOMEM;
}
/**
@@ -483,31 +479,49 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
struct fwnode_reference_args *args)
{
struct swnode *swnode = to_swnode(fwnode);
- const struct software_node_reference *ref;
+ const struct software_node_ref_args *ref_array;
+ const struct software_node_ref_args *ref;
const struct property_entry *prop;
struct fwnode_handle *refnode;
+ u32 nargs_prop_val;
+ int error;
int i;
- if (!swnode || !swnode->node->references)
+ if (!swnode)
return -ENOENT;
- for (ref = swnode->node->references; ref->name; ref++)
- if (!strcmp(ref->name, propname))
- break;
+ prop = property_entry_get(swnode->node->properties, propname);
+ if (!prop)
+ return -ENOENT;
+
+ if (prop->type != DEV_PROP_REF)
+ return -EINVAL;
- if (!ref->name || index > (ref->nrefs - 1))
+ /*
+ * We expect that references are never stored inline, even
+ * single ones, as they are too big.
+ */
+ if (prop->is_inline)
+ return -EINVAL;
+
+ if (index * sizeof(*ref) >= prop->length)
return -ENOENT;
- refnode = software_node_fwnode(ref->refs[index].node);
+ ref_array = prop->pointer;
+ ref = &ref_array[index];
+
+ refnode = software_node_fwnode(ref->node);
if (!refnode)
return -ENOENT;
if (nargs_prop) {
- prop = property_entry_get(swnode->node->properties, nargs_prop);
- if (!prop)
- return -EINVAL;
+ error = property_entry_read_int_array(swnode->node->properties,
+ nargs_prop, sizeof(u32),
+ &nargs_prop_val, 1);
+ if (error)
+ return error;
- nargs = prop->value.u32_data;
+ nargs = nargs_prop_val;
}
if (nargs > NR_FWNODE_REFERENCE_ARGS)
@@ -517,7 +531,7 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
args->nargs = nargs;
for (i = 0; i < nargs; i++)
- args->args[i] = ref->refs[index].args[i];
+ args->args[i] = ref->args[i];
return 0;
}
diff --git a/drivers/base/test/Kconfig b/drivers/base/test/Kconfig
index 86e85daa80bf..305c7751184a 100644
--- a/drivers/base/test/Kconfig
+++ b/drivers/base/test/Kconfig
@@ -8,3 +8,6 @@ config TEST_ASYNC_DRIVER_PROBE
The module name will be test_async_driver_probe.ko
If unsure say N.
+config KUNIT_DRIVER_PE_TEST
+ bool "KUnit Tests for property entry API"
+ depends on KUNIT=y
diff --git a/drivers/base/test/Makefile b/drivers/base/test/Makefile
index 0f1f7277a013..3ca56367c84b 100644
--- a/drivers/base/test/Makefile
+++ b/drivers/base/test/Makefile
@@ -1,2 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE) += test_async_driver_probe.o
+
+obj-$(CONFIG_KUNIT_DRIVER_PE_TEST) += property-entry-test.o
diff --git a/drivers/base/test/property-entry-test.c b/drivers/base/test/property-entry-test.c
new file mode 100644
index 000000000000..abe03315180f
--- /dev/null
+++ b/drivers/base/test/property-entry-test.c
@@ -0,0 +1,475 @@
+// SPDX-License-Identifier: GPL-2.0
+// Unit tests for property entries API
+//
+// Copyright 2019 Google LLC.
+
+#include <kunit/test.h>
+#include <linux/property.h>
+#include <linux/types.h>
+
+static void pe_test_uints(struct kunit *test)
+{
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_U8("prop-u8", 8),
+ PROPERTY_ENTRY_U16("prop-u16", 16),
+ PROPERTY_ENTRY_U32("prop-u32", 32),
+ PROPERTY_ENTRY_U64("prop-u64", 64),
+ { }
+ };
+
+ struct fwnode_handle *node;
+ u8 val_u8, array_u8[2];
+ u16 val_u16, array_u16[2];
+ u32 val_u32, array_u32[2];
+ u64 val_u64, array_u64[2];
+ int error;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ error = fwnode_property_read_u8(node, "prop-u8", &val_u8);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)val_u8, 8);
+
+ error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u8[0], 8);
+
+ error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 2);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u8(node, "no-prop-u8", &val_u8);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u8_array(node, "no-prop-u8", array_u8, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u16(node, "prop-u16", &val_u16);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)val_u16, 16);
+
+ error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u16[0], 16);
+
+ error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 2);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u16(node, "no-prop-u16", &val_u16);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u16_array(node, "no-prop-u16", array_u16, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u32(node, "prop-u32", &val_u32);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)val_u32, 32);
+
+ error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u32[0], 32);
+
+ error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 2);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u32(node, "no-prop-u32", &val_u32);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u32_array(node, "no-prop-u32", array_u32, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u64(node, "prop-u64", &val_u64);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)val_u64, 64);
+
+ error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u64[0], 64);
+
+ error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 2);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u64(node, "no-prop-u64", &val_u64);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u64_array(node, "no-prop-u64", array_u64, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ fwnode_remove_software_node(node);
+}
+
+static void pe_test_uint_arrays(struct kunit *test)
+{
+ static const u8 a_u8[16] = { 8, 9 };
+ static const u16 a_u16[16] = { 16, 17 };
+ static const u32 a_u32[16] = { 32, 33 };
+ static const u64 a_u64[16] = { 64, 65 };
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_U8_ARRAY("prop-u8", a_u8),
+ PROPERTY_ENTRY_U16_ARRAY("prop-u16", a_u16),
+ PROPERTY_ENTRY_U32_ARRAY("prop-u32", a_u32),
+ PROPERTY_ENTRY_U64_ARRAY("prop-u64", a_u64),
+ { }
+ };
+
+ struct fwnode_handle *node;
+ u8 val_u8, array_u8[32];
+ u16 val_u16, array_u16[32];
+ u32 val_u32, array_u32[32];
+ u64 val_u64, array_u64[32];
+ int error;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ error = fwnode_property_read_u8(node, "prop-u8", &val_u8);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)val_u8, 8);
+
+ error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u8[0], 8);
+
+ error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 2);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u8[0], 8);
+ KUNIT_EXPECT_EQ(test, (int)array_u8[1], 9);
+
+ error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 17);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u8(node, "no-prop-u8", &val_u8);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u8_array(node, "no-prop-u8", array_u8, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u16(node, "prop-u16", &val_u16);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)val_u16, 16);
+
+ error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u16[0], 16);
+
+ error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 2);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u16[0], 16);
+ KUNIT_EXPECT_EQ(test, (int)array_u16[1], 17);
+
+ error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 17);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u16(node, "no-prop-u16", &val_u16);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u16_array(node, "no-prop-u16", array_u16, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u32(node, "prop-u32", &val_u32);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)val_u32, 32);
+
+ error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u32[0], 32);
+
+ error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 2);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u32[0], 32);
+ KUNIT_EXPECT_EQ(test, (int)array_u32[1], 33);
+
+ error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 17);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u32(node, "no-prop-u32", &val_u32);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u32_array(node, "no-prop-u32", array_u32, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u64(node, "prop-u64", &val_u64);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)val_u64, 64);
+
+ error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u64[0], 64);
+
+ error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 2);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u64[0], 64);
+ KUNIT_EXPECT_EQ(test, (int)array_u64[1], 65);
+
+ error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 17);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u64(node, "no-prop-u64", &val_u64);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u64_array(node, "no-prop-u64", array_u64, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ fwnode_remove_software_node(node);
+}
+
+static void pe_test_strings(struct kunit *test)
+{
+ static const char *strings[] = {
+ "string-a",
+ "string-b",
+ };
+
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_STRING("str", "single"),
+ PROPERTY_ENTRY_STRING("empty", ""),
+ PROPERTY_ENTRY_STRING_ARRAY("strs", strings),
+ { }
+ };
+
+ struct fwnode_handle *node;
+ const char *str;
+ const char *strs[10];
+ int error;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ error = fwnode_property_read_string(node, "str", &str);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_STREQ(test, str, "single");
+
+ error = fwnode_property_read_string_array(node, "str", strs, 1);
+ KUNIT_EXPECT_EQ(test, error, 1);
+ KUNIT_EXPECT_STREQ(test, strs[0], "single");
+
+ /* asking for more data returns what we have */
+ error = fwnode_property_read_string_array(node, "str", strs, 2);
+ KUNIT_EXPECT_EQ(test, error, 1);
+ KUNIT_EXPECT_STREQ(test, strs[0], "single");
+
+ error = fwnode_property_read_string(node, "no-str", &str);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_string_array(node, "no-str", strs, 1);
+ KUNIT_EXPECT_LT(test, error, 0);
+
+ error = fwnode_property_read_string(node, "empty", &str);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_STREQ(test, str, "");
+
+ error = fwnode_property_read_string_array(node, "strs", strs, 3);
+ KUNIT_EXPECT_EQ(test, error, 2);
+ KUNIT_EXPECT_STREQ(test, strs[0], "string-a");
+ KUNIT_EXPECT_STREQ(test, strs[1], "string-b");
+
+ error = fwnode_property_read_string_array(node, "strs", strs, 1);
+ KUNIT_EXPECT_EQ(test, error, 1);
+ KUNIT_EXPECT_STREQ(test, strs[0], "string-a");
+
+ /* NULL argument -> returns size */
+ error = fwnode_property_read_string_array(node, "strs", NULL, 0);
+ KUNIT_EXPECT_EQ(test, error, 2);
+
+ /* accessing array as single value */
+ error = fwnode_property_read_string(node, "strs", &str);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_STREQ(test, str, "string-a");
+
+ fwnode_remove_software_node(node);
+}
+
+static void pe_test_bool(struct kunit *test)
+{
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_BOOL("prop"),
+ { }
+ };
+
+ struct fwnode_handle *node;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ KUNIT_EXPECT_TRUE(test, fwnode_property_read_bool(node, "prop"));
+ KUNIT_EXPECT_FALSE(test, fwnode_property_read_bool(node, "not-prop"));
+
+ fwnode_remove_software_node(node);
+}
+
+/* Verifies that small U8 array is stored inline when property is copied */
+static void pe_test_move_inline_u8(struct kunit *test)
+{
+ static const u8 u8_array_small[8] = { 1, 2, 3, 4 };
+ static const u8 u8_array_big[128] = { 5, 6, 7, 8 };
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_U8_ARRAY("small", u8_array_small),
+ PROPERTY_ENTRY_U8_ARRAY("big", u8_array_big),
+ { }
+ };
+
+ struct property_entry *copy;
+ const u8 *data_ptr;
+
+ copy = property_entries_dup(entries);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, copy);
+
+ KUNIT_EXPECT_TRUE(test, copy[0].is_inline);
+ data_ptr = (u8 *)&copy[0].value;
+ KUNIT_EXPECT_EQ(test, (int)data_ptr[0], 1);
+ KUNIT_EXPECT_EQ(test, (int)data_ptr[1], 2);
+
+ KUNIT_EXPECT_FALSE(test, copy[1].is_inline);
+ data_ptr = copy[1].pointer;
+ KUNIT_EXPECT_EQ(test, (int)data_ptr[0], 5);
+ KUNIT_EXPECT_EQ(test, (int)data_ptr[1], 6);
+
+ property_entries_free(copy);
+}
+
+/* Verifies that single string array is stored inline when property is copied */
+static void pe_test_move_inline_str(struct kunit *test)
+{
+ static char *str_array_small[] = { "a" };
+ static char *str_array_big[] = { "b", "c", "d", "e" };
+ static char *str_array_small_empty[] = { "" };
+ static struct property_entry entries[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("small", str_array_small),
+ PROPERTY_ENTRY_STRING_ARRAY("big", str_array_big),
+ PROPERTY_ENTRY_STRING_ARRAY("small-empty", str_array_small_empty),
+ { }
+ };
+
+ struct property_entry *copy;
+ const char * const *data_ptr;
+
+ copy = property_entries_dup(entries);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, copy);
+
+ KUNIT_EXPECT_TRUE(test, copy[0].is_inline);
+ KUNIT_EXPECT_STREQ(test, copy[0].value.str[0], "a");
+
+ KUNIT_EXPECT_FALSE(test, copy[1].is_inline);
+ data_ptr = copy[1].pointer;
+ KUNIT_EXPECT_STREQ(test, data_ptr[0], "b");
+ KUNIT_EXPECT_STREQ(test, data_ptr[1], "c");
+
+ KUNIT_EXPECT_TRUE(test, copy[2].is_inline);
+ KUNIT_EXPECT_STREQ(test, copy[2].value.str[0], "");
+
+ property_entries_free(copy);
+}
+
+/* Handling of reference properties */
+static void pe_test_reference(struct kunit *test)
+{
+ static const struct software_node nodes[] = {
+ { .name = "1", },
+ { .name = "2", },
+ { }
+ };
+
+ static const struct software_node_ref_args refs[] = {
+ {
+ .node = &nodes[0],
+ .nargs = 0,
+ },
+ {
+ .node = &nodes[1],
+ .nargs = 2,
+ .args = { 3, 4 },
+ },
+ };
+
+ const struct property_entry entries[] = {
+ PROPERTY_ENTRY_REF("ref-1", &nodes[0]),
+ PROPERTY_ENTRY_REF("ref-2", &nodes[1], 1, 2),
+ PROPERTY_ENTRY_REF_ARRAY("ref-3", refs),
+ { }
+ };
+
+ struct fwnode_handle *node;
+ struct fwnode_reference_args ref;
+ int error;
+
+ error = software_node_register_nodes(nodes);
+ KUNIT_ASSERT_EQ(test, error, 0);
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ error = fwnode_property_get_reference_args(node, "ref-1", NULL,
+ 0, 0, &ref);
+ KUNIT_ASSERT_EQ(test, error, 0);
+ KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[0]);
+ KUNIT_EXPECT_EQ(test, ref.nargs, 0U);
+
+ /* wrong index */
+ error = fwnode_property_get_reference_args(node, "ref-1", NULL,
+ 0, 1, &ref);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_get_reference_args(node, "ref-2", NULL,
+ 1, 0, &ref);
+ KUNIT_ASSERT_EQ(test, error, 0);
+ KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]);
+ KUNIT_EXPECT_EQ(test, ref.nargs, 1U);
+ KUNIT_EXPECT_EQ(test, ref.args[0], 1LLU);
+
+ /* asking for more args, padded with zero data */
+ error = fwnode_property_get_reference_args(node, "ref-2", NULL,
+ 3, 0, &ref);
+ KUNIT_ASSERT_EQ(test, error, 0);
+ KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]);
+ KUNIT_EXPECT_EQ(test, ref.nargs, 3U);
+ KUNIT_EXPECT_EQ(test, ref.args[0], 1LLU);
+ KUNIT_EXPECT_EQ(test, ref.args[1], 2LLU);
+ KUNIT_EXPECT_EQ(test, ref.args[2], 0LLU);
+
+ /* wrong index */
+ error = fwnode_property_get_reference_args(node, "ref-2", NULL,
+ 2, 1, &ref);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ /* array of references */
+ error = fwnode_property_get_reference_args(node, "ref-3", NULL,
+ 0, 0, &ref);
+ KUNIT_ASSERT_EQ(test, error, 0);
+ KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[0]);
+ KUNIT_EXPECT_EQ(test, ref.nargs, 0U);
+
+ /* second reference in the array */
+ error = fwnode_property_get_reference_args(node, "ref-3", NULL,
+ 2, 1, &ref);
+ KUNIT_ASSERT_EQ(test, error, 0);
+ KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]);
+ KUNIT_EXPECT_EQ(test, ref.nargs, 2U);
+ KUNIT_EXPECT_EQ(test, ref.args[0], 3LLU);
+ KUNIT_EXPECT_EQ(test, ref.args[1], 4LLU);
+
+ /* wrong index */
+ error = fwnode_property_get_reference_args(node, "ref-1", NULL,
+ 0, 2, &ref);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ fwnode_remove_software_node(node);
+ software_node_unregister_nodes(nodes);
+}
+
+static struct kunit_case property_entry_test_cases[] = {
+ KUNIT_CASE(pe_test_uints),
+ KUNIT_CASE(pe_test_uint_arrays),
+ KUNIT_CASE(pe_test_strings),
+ KUNIT_CASE(pe_test_bool),
+ KUNIT_CASE(pe_test_move_inline_u8),
+ KUNIT_CASE(pe_test_move_inline_str),
+ KUNIT_CASE(pe_test_reference),
+ { }
+};
+
+static struct kunit_suite property_entry_test_suite = {
+ .name = "property-entry",
+ .test_cases = property_entry_test_cases,
+};
+
+kunit_test_suite(property_entry_test_suite);
diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c
index f4b1d8e54daf..3bb7beb127a9 100644
--- a/drivers/base/test/test_async_driver_probe.c
+++ b/drivers/base/test/test_async_driver_probe.c
@@ -44,7 +44,8 @@ static int test_probe(struct platform_device *pdev)
* performing an async init on that node.
*/
if (dev->driver->probe_type == PROBE_PREFER_ASYNCHRONOUS) {
- if (dev_to_node(dev) != numa_node_id()) {
+ if (IS_ENABLED(CONFIG_NUMA) &&
+ dev_to_node(dev) != numa_node_id()) {
dev_warn(dev, "NUMA node mismatch %d != %d\n",
dev_to_node(dev), numa_node_id());
atomic_inc(&warnings);
diff --git a/drivers/base/transport_class.c b/drivers/base/transport_class.c
index 5ed86ded6e6b..ccc86206e508 100644
--- a/drivers/base/transport_class.c
+++ b/drivers/base/transport_class.c
@@ -30,6 +30,10 @@
#include <linux/attribute_container.h>
#include <linux/transport_class.h>
+static int transport_remove_classdev(struct attribute_container *cont,
+ struct device *dev,
+ struct device *classdev);
+
/**
* transport_class_register - register an initial transport class
*
@@ -172,10 +176,11 @@ static int transport_add_class_device(struct attribute_container *cont,
* routine is simply a trigger point used to add the device to the
* system and register attributes for it.
*/
-
-void transport_add_device(struct device *dev)
+int transport_add_device(struct device *dev)
{
- attribute_container_device_trigger(dev, transport_add_class_device);
+ return attribute_container_device_trigger_safe(dev,
+ transport_add_class_device,
+ transport_remove_classdev);
}
EXPORT_SYMBOL_GPL(transport_add_device);
diff --git a/drivers/bcma/driver_chipcommon_b.c b/drivers/bcma/driver_chipcommon_b.c
index 57f10b58b47c..c153c96a6145 100644
--- a/drivers/bcma/driver_chipcommon_b.c
+++ b/drivers/bcma/driver_chipcommon_b.c
@@ -48,7 +48,7 @@ int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb)
return 0;
ccb->setup_done = 1;
- ccb->mii = ioremap_nocache(ccb->core->addr_s[1], BCMA_CORE_SIZE);
+ ccb->mii = ioremap(ccb->core->addr_s[1], BCMA_CORE_SIZE);
if (!ccb->mii)
return -ENOMEM;
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
index c42cec7c7ecc..88a93c266c19 100644
--- a/drivers/bcma/driver_pci_host.c
+++ b/drivers/bcma/driver_pci_host.c
@@ -115,7 +115,7 @@ static int bcma_extpci_read_config(struct bcma_drv_pci *pc, unsigned int dev,
if (unlikely(!addr))
goto out;
err = -ENOMEM;
- mmio = ioremap_nocache(addr, sizeof(val));
+ mmio = ioremap(addr, sizeof(val));
if (!mmio)
goto out;
@@ -180,7 +180,7 @@ static int bcma_extpci_write_config(struct bcma_drv_pci *pc, unsigned int dev,
if (unlikely(!addr))
goto out;
err = -ENOMEM;
- mmio = ioremap_nocache(addr, sizeof(val));
+ mmio = ioremap(addr, sizeof(val));
if (!mmio)
goto out;
@@ -515,7 +515,7 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
/* Ok, ready to run, register it to the system.
* The following needs change, if we want to port hostmode
* to non-MIPS platform. */
- io_map_base = (unsigned long)ioremap_nocache(pc_host->mem_resource.start,
+ io_map_base = (unsigned long)ioremap(pc_host->mem_resource.start,
resource_size(&pc_host->mem_resource));
pc_host->pci_controller.io_map_base = io_map_base;
set_io_port_base(pc_host->pci_controller.io_map_base);
diff --git a/drivers/bcma/host_soc.c b/drivers/bcma/host_soc.c
index c8073b509a2b..90d5bdc12e03 100644
--- a/drivers/bcma/host_soc.c
+++ b/drivers/bcma/host_soc.c
@@ -172,7 +172,7 @@ int __init bcma_host_soc_register(struct bcma_soc *soc)
/* iomap only first core. We have to read some register on this core
* to scan the bus.
*/
- bus->mmio = ioremap_nocache(BCMA_ADDR_BASE, BCMA_CORE_SIZE * 1);
+ bus->mmio = ioremap(BCMA_ADDR_BASE, BCMA_CORE_SIZE * 1);
if (!bus->mmio)
return -ENOMEM;
diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c
index 4a2d1b235fb5..1a942f734188 100644
--- a/drivers/bcma/scan.c
+++ b/drivers/bcma/scan.c
@@ -219,7 +219,7 @@ static s32 bcma_erom_get_mst_port(struct bcma_bus *bus, u32 __iomem **eromptr)
static u32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr,
u32 type, u8 port)
{
- u32 addrl, addrh, sizel, sizeh = 0;
+ u32 addrl, addrh, sizeh = 0;
u32 size;
u32 ent = bcma_erom_get_ent(bus, eromptr);
@@ -239,12 +239,9 @@ static u32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr,
if ((ent & SCAN_ADDR_SZ) == SCAN_ADDR_SZ_SZD) {
size = bcma_erom_get_ent(bus, eromptr);
- sizel = size & SCAN_SIZE_SZ;
if (size & SCAN_SIZE_SG32)
sizeh = bcma_erom_get_ent(bus, eromptr);
- } else
- sizel = SCAN_ADDR_SZ_BASE <<
- ((ent & SCAN_ADDR_SZ) >> SCAN_ADDR_SZ_SHIFT);
+ }
return addrl;
}
@@ -425,11 +422,11 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
}
}
if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
- core->io_addr = ioremap_nocache(core->addr, BCMA_CORE_SIZE);
+ core->io_addr = ioremap(core->addr, BCMA_CORE_SIZE);
if (!core->io_addr)
return -ENOMEM;
if (core->wrap) {
- core->io_wrap = ioremap_nocache(core->wrap,
+ core->io_wrap = ioremap(core->wrap,
BCMA_CORE_SIZE);
if (!core->io_wrap) {
iounmap(core->io_addr);
@@ -472,7 +469,7 @@ int bcma_bus_scan(struct bcma_bus *bus)
erombase = bcma_scan_read32(bus, 0, BCMA_CC_EROM);
if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
- eromptr = ioremap_nocache(erombase, BCMA_CORE_SIZE);
+ eromptr = ioremap(erombase, BCMA_CORE_SIZE);
if (!eromptr)
return -ENOMEM;
} else {
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 1bb8ec575352..025b1b77b11a 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -432,16 +432,6 @@ config VIRTIO_BLK
This is the virtual block driver for virtio. It can be used with
QEMU based VMMs (like KVM or Xen). Say Y or M.
-config VIRTIO_BLK_SCSI
- bool "SCSI passthrough request for the Virtio block driver"
- depends on VIRTIO_BLK
- select BLK_SCSI_REQUEST
- ---help---
- Enable support for SCSI passthrough (e.g. the SG_IO ioctl) on
- virtio-blk devices. This is only supported for the legacy
- virtio protocol and not enabled by default by any hypervisor.
- You probably want to use virtio-scsi instead.
-
config BLK_DEV_RBD
tristate "Rados block device (RBD)"
depends on INET && BLOCK
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index bd19f8af950b..7b32fb673375 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -329,6 +329,7 @@ static const struct block_device_operations aoe_bdops = {
.open = aoeblk_open,
.release = aoeblk_release,
.ioctl = aoeblk_ioctl,
+ .compat_ioctl = blkdev_compat_ptr_ioctl,
.getgeo = aoeblk_getgeo,
.owner = THIS_MODULE,
};
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index a8730cc4db10..220c5e18aba0 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -473,6 +473,25 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data)
return kobj;
}
+static inline void brd_check_and_reset_par(void)
+{
+ if (unlikely(!max_part))
+ max_part = 1;
+
+ /*
+ * make sure 'max_part' can be divided exactly by (1U << MINORBITS),
+ * otherwise, it is possiable to get same dev_t when adding partitions.
+ */
+ if ((1U << MINORBITS) % max_part != 0)
+ max_part = 1UL << fls(max_part);
+
+ if (max_part > DISK_MAX_PARTS) {
+ pr_info("brd: max_part can't be larger than %d, reset max_part = %d.\n",
+ DISK_MAX_PARTS, DISK_MAX_PARTS);
+ max_part = DISK_MAX_PARTS;
+ }
+}
+
static int __init brd_init(void)
{
struct brd_device *brd, *next;
@@ -496,8 +515,7 @@ static int __init brd_init(void)
if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
return -EIO;
- if (unlikely(!max_part))
- max_part = 1;
+ brd_check_and_reset_par();
for (i = 0; i < rd_nr; i++) {
brd = brd_alloc(i);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index ddbf56014c51..aae99a2d7bd4 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -622,7 +622,7 @@ struct fifo_buffer {
int total; /* sum of all values */
int values[0];
};
-extern struct fifo_buffer *fifo_alloc(int fifo_size);
+extern struct fifo_buffer *fifo_alloc(unsigned int fifo_size);
/* flag bits per connection */
enum {
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index de2f94d0103a..da4a3ebe04ef 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1575,7 +1575,8 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
struct drbd_device *device;
struct disk_conf *new_disk_conf, *old_disk_conf;
struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
- int err, fifo_size;
+ int err;
+ unsigned int fifo_size;
retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 2b3103c30857..79e216446030 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -3887,7 +3887,7 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
const int apv = connection->agreed_pro_version;
struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
- int fifo_size = 0;
+ unsigned int fifo_size = 0;
int err;
peer_device = conn_peer_device(connection, pi->vnr);
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 5bdcc70ad589..b7f605c6e231 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -482,11 +482,11 @@ static void fifo_add_val(struct fifo_buffer *fb, int value)
fb->values[i] += value;
}
-struct fifo_buffer *fifo_alloc(int fifo_size)
+struct fifo_buffer *fifo_alloc(unsigned int fifo_size)
{
struct fifo_buffer *fb;
- fb = kzalloc(sizeof(struct fifo_buffer) + sizeof(int) * fifo_size, GFP_NOIO);
+ fb = kzalloc(struct_size(fb, values, fifo_size), GFP_NOIO);
if (!fb)
return NULL;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 485865fd0412..cd3612e4e2e1 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3879,6 +3879,9 @@ static int fd_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
{
int drive = (long)bdev->bd_disk->private_data;
switch (cmd) {
+ case CDROMEJECT: /* CD-ROM eject */
+ case 0x6470: /* SunOS floppy eject */
+
case FDMSGON:
case FDMSGOFF:
case FDSETEMSGTRESH:
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 57532465fb83..78181908f0df 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -1265,6 +1265,16 @@ static int nbd_start_device(struct nbd_device *nbd)
args = kzalloc(sizeof(*args), GFP_KERNEL);
if (!args) {
sock_shutdown(nbd);
+ /*
+ * If num_connections is m (2 < m),
+ * and NO.1 ~ NO.n(1 < n < m) kzallocs are successful.
+ * But NO.(n + 1) failed. We still have n recv threads.
+ * So, add flush_workqueue here to prevent recv threads
+ * dropping the last config_refs and trying to destroy
+ * the workqueue from inside the workqueue.
+ */
+ if (i)
+ flush_workqueue(nbd->recv_workq);
return -ENOMEM;
}
sk_set_memalloc(config->socks[i]->sock->sk);
@@ -1296,10 +1306,10 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
mutex_unlock(&nbd->config_lock);
ret = wait_event_interruptible(config->recv_wq,
atomic_read(&config->recv_threads) == 0);
- if (ret) {
+ if (ret)
sock_shutdown(nbd);
- flush_workqueue(nbd->recv_workq);
- }
+ flush_workqueue(nbd->recv_workq);
+
mutex_lock(&nbd->config_lock);
nbd_bdev_reset(bdev);
/* user requested, ignore socket errors */
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index ae8d4bc532b0..16510795e377 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -263,34 +263,34 @@ static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
}
/* The following macro should only be used with TYPE = {uint, ulong, bool}. */
-#define NULLB_DEVICE_ATTR(NAME, TYPE, APPLY) \
-static ssize_t \
-nullb_device_##NAME##_show(struct config_item *item, char *page) \
-{ \
- return nullb_device_##TYPE##_attr_show( \
- to_nullb_device(item)->NAME, page); \
-} \
-static ssize_t \
-nullb_device_##NAME##_store(struct config_item *item, const char *page, \
- size_t count) \
-{ \
- int (*apply_fn)(struct nullb_device *dev, TYPE new_value) = APPLY; \
- struct nullb_device *dev = to_nullb_device(item); \
- TYPE new_value; \
- int ret; \
- \
- ret = nullb_device_##TYPE##_attr_store(&new_value, page, count); \
- if (ret < 0) \
- return ret; \
- if (apply_fn) \
- ret = apply_fn(dev, new_value); \
- else if (test_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags)) \
- ret = -EBUSY; \
- if (ret < 0) \
- return ret; \
- dev->NAME = new_value; \
- return count; \
-} \
+#define NULLB_DEVICE_ATTR(NAME, TYPE, APPLY) \
+static ssize_t \
+nullb_device_##NAME##_show(struct config_item *item, char *page) \
+{ \
+ return nullb_device_##TYPE##_attr_show( \
+ to_nullb_device(item)->NAME, page); \
+} \
+static ssize_t \
+nullb_device_##NAME##_store(struct config_item *item, const char *page, \
+ size_t count) \
+{ \
+ int (*apply_fn)(struct nullb_device *dev, TYPE new_value) = APPLY;\
+ struct nullb_device *dev = to_nullb_device(item); \
+ TYPE uninitialized_var(new_value); \
+ int ret; \
+ \
+ ret = nullb_device_##TYPE##_attr_store(&new_value, page, count);\
+ if (ret < 0) \
+ return ret; \
+ if (apply_fn) \
+ ret = apply_fn(dev, new_value); \
+ else if (test_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags)) \
+ ret = -EBUSY; \
+ if (ret < 0) \
+ return ret; \
+ dev->NAME = new_value; \
+ return count; \
+} \
CONFIGFS_ATTR(nullb_device_, NAME);
static int nullb_apply_submit_queues(struct nullb_device *dev,
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
index d4d88b581822..ed34785dd64b 100644
--- a/drivers/block/null_blk_zoned.c
+++ b/drivers/block/null_blk_zoned.c
@@ -129,11 +129,13 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
return BLK_STS_IOERR;
case BLK_ZONE_COND_EMPTY:
case BLK_ZONE_COND_IMP_OPEN:
+ case BLK_ZONE_COND_EXP_OPEN:
+ case BLK_ZONE_COND_CLOSED:
/* Writes must be at the write pointer position */
if (sector != zone->wp)
return BLK_STS_IOERR;
- if (zone->cond == BLK_ZONE_COND_EMPTY)
+ if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
zone->cond = BLK_ZONE_COND_IMP_OPEN;
zone->wp += nr_sectors;
@@ -186,7 +188,10 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
if (zone->cond == BLK_ZONE_COND_FULL)
return BLK_STS_IOERR;
- zone->cond = BLK_ZONE_COND_CLOSED;
+ if (zone->wp == zone->start)
+ zone->cond = BLK_ZONE_COND_EMPTY;
+ else
+ zone->cond = BLK_ZONE_COND_CLOSED;
break;
case REQ_OP_ZONE_FINISH:
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 636bfea2de6f..117cfc8cd05a 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -275,6 +275,9 @@ static const struct block_device_operations pcd_bdops = {
.open = pcd_block_open,
.release = pcd_block_release,
.ioctl = pcd_block_ioctl,
+#ifdef CONFIG_COMPAT
+ .ioctl = blkdev_compat_ptr_ioctl,
+#endif
.check_events = pcd_block_check_events,
};
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 6f9ad3fc716f..c0967507d085 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -874,6 +874,7 @@ static const struct block_device_operations pd_fops = {
.open = pd_open,
.release = pd_release,
.ioctl = pd_ioctl,
+ .compat_ioctl = pd_ioctl,
.getgeo = pd_getgeo,
.check_events = pd_check_events,
.revalidate_disk= pd_revalidate
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index 6b7d4cab3687..bb09f21ce21a 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -276,6 +276,7 @@ static const struct block_device_operations pf_fops = {
.open = pf_open,
.release = pf_release,
.ioctl = pf_ioctl,
+ .compat_ioctl = pf_ioctl,
.getgeo = pf_getgeo,
.check_events = pf_check_events,
};
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index ee67bf929fac..5f970a7d32c0 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2663,28 +2663,6 @@ static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
return ret;
}
-#ifdef CONFIG_COMPAT
-static int pkt_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
-{
- switch (cmd) {
- /* compatible */
- case CDROMEJECT:
- case CDROMMULTISESSION:
- case CDROMREADTOCENTRY:
- case SCSI_IOCTL_SEND_COMMAND:
- return pkt_ioctl(bdev, mode, cmd, (unsigned long)compat_ptr(arg));
-
-
- /* FIXME: no handler so far */
- case CDROM_LAST_WRITTEN:
- /* handled in compat_blkdev_driver_ioctl */
- case CDROM_SEND_PACKET:
- default:
- return -ENOIOCTLCMD;
- }
-}
-#endif
-
static unsigned int pkt_check_events(struct gendisk *disk,
unsigned int clearing)
{
@@ -2706,9 +2684,7 @@ static const struct block_device_operations pktcdvd_ops = {
.open = pkt_open,
.release = pkt_close,
.ioctl = pkt_ioctl,
-#ifdef CONFIG_COMPAT
- .ioctl = pkt_compat_ioctl,
-#endif
+ .compat_ioctl = blkdev_compat_ptr_ioctl,
.check_events = pkt_check_events,
};
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 2b184563cd32..6343402c09e6 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -848,7 +848,7 @@ enum {
Opt_notrim,
};
-static const struct fs_parameter_spec rbd_param_specs[] = {
+static const struct fs_parameter_spec rbd_parameters[] = {
fsparam_u32 ("alloc_size", Opt_alloc_size),
fsparam_flag ("exclusive", Opt_exclusive),
fsparam_flag ("lock_on_read", Opt_lock_on_read),
@@ -863,11 +863,6 @@ static const struct fs_parameter_spec rbd_param_specs[] = {
{}
};
-static const struct fs_parameter_description rbd_parameters = {
- .name = "rbd",
- .specs = rbd_param_specs,
-};
-
struct rbd_options {
int queue_depth;
int alloc_size;
@@ -2662,7 +2657,7 @@ static int rbd_img_fill_nodata(struct rbd_img_request *img_req,
u64 off, u64 len)
{
struct ceph_file_extent ex = { off, len };
- union rbd_img_fill_iter dummy;
+ union rbd_img_fill_iter dummy = {};
struct rbd_img_fill_ctx fctx = {
.pos_type = OBJ_REQUEST_NODATA,
.pos = &dummy,
@@ -6353,19 +6348,19 @@ static int rbd_parse_param(struct fs_parameter *param,
{
struct rbd_options *opt = pctx->opts;
struct fs_parse_result result;
+ struct p_log log = {.prefix = "rbd"};
int token, ret;
ret = ceph_parse_param(param, pctx->copts, NULL);
if (ret != -ENOPARAM)
return ret;
- token = fs_parse(NULL, &rbd_parameters, param, &result);
+ token = __fs_parse(&log, rbd_parameters, param, &result);
dout("%s fs_parse '%s' token %d\n", __func__, param->key, token);
if (token < 0) {
- if (token == -ENOPARAM) {
- return invalf(NULL, "rbd: Unknown parameter '%s'",
- param->key);
- }
+ if (token == -ENOPARAM)
+ return inval_plog(&log, "Unknown parameter '%s'",
+ param->key);
return token;
}
@@ -6378,9 +6373,8 @@ static int rbd_parse_param(struct fs_parameter *param,
case Opt_alloc_size:
if (result.uint_32 < SECTOR_SIZE)
goto out_of_range;
- if (!is_power_of_2(result.uint_32)) {
- return invalf(NULL, "rbd: alloc_size must be a power of 2");
- }
+ if (!is_power_of_2(result.uint_32))
+ return inval_plog(&log, "alloc_size must be a power of 2");
opt->alloc_size = result.uint_32;
break;
case Opt_lock_timeout:
@@ -6416,7 +6410,7 @@ static int rbd_parse_param(struct fs_parameter *param,
return 0;
out_of_range:
- return invalf(NULL, "rbd: %s out of range", param->key);
+ return inval_plog(&log, "%s out of range", param->key);
}
/*
@@ -6433,7 +6427,7 @@ static int rbd_parse_options(char *options, struct rbd_parse_opts_ctx *pctx)
if (*key) {
struct fs_parameter param = {
.key = key,
- .type = fs_value_is_string,
+ .type = fs_value_is_flag,
};
char *value = strchr(key, '=');
size_t v_len = 0;
@@ -6443,14 +6437,11 @@ static int rbd_parse_options(char *options, struct rbd_parse_opts_ctx *pctx)
continue;
*value++ = 0;
v_len = strlen(value);
- }
-
-
- if (v_len > 0) {
param.string = kmemdup_nul(value, v_len,
GFP_KERNEL);
if (!param.string)
return -ENOMEM;
+ param.type = fs_value_is_string;
}
param.size = v_len;
@@ -7143,7 +7134,7 @@ static ssize_t do_rbd_add(struct bus_type *bus,
if (rc)
goto err_out_image_lock;
- add_disk(rbd_dev->disk);
+ device_add_disk(&rbd_dev->dev, rbd_dev->disk, NULL);
/* see rbd_init_disk() */
blk_put_queue(rbd_dev->disk->queue);
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 571612e233fe..39aeebc6837d 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -171,6 +171,7 @@ static const struct block_device_operations vdc_fops = {
.owner = THIS_MODULE,
.getgeo = vdc_getgeo,
.ioctl = vdc_ioctl,
+ .compat_ioctl = blkdev_compat_ptr_ioctl,
};
static void vdc_blk_queue_start(struct vdc_port *port)
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 1f3f9e0f02a8..4eaf97d7a170 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -827,7 +827,7 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto failed_req_csr;
}
- card->csr_remap = ioremap_nocache(csr_base, csr_len);
+ card->csr_remap = ioremap(csr_base, csr_len);
if (!card->csr_remap) {
dev_printk(KERN_ERR, &card->dev->dev,
"Unable to remap memory region\n");
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 7ffd719d89de..54158766334b 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -11,7 +11,6 @@
#include <linux/virtio_blk.h>
#include <linux/scatterlist.h>
#include <linux/string_helpers.h>
-#include <scsi/scsi_cmnd.h>
#include <linux/idr.h>
#include <linux/blk-mq.h>
#include <linux/blk-mq-virtio.h>
@@ -56,11 +55,6 @@ struct virtio_blk {
};
struct virtblk_req {
-#ifdef CONFIG_VIRTIO_BLK_SCSI
- struct scsi_request sreq; /* for SCSI passthrough, must be first */
- u8 sense[SCSI_SENSE_BUFFERSIZE];
- struct virtio_scsi_inhdr in_hdr;
-#endif
struct virtio_blk_outhdr out_hdr;
u8 status;
struct scatterlist sg[];
@@ -78,80 +72,6 @@ static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
}
}
-/*
- * If this is a packet command we need a couple of additional headers. Behind
- * the normal outhdr we put a segment with the scsi command block, and before
- * the normal inhdr we put the sense data and the inhdr with additional status
- * information.
- */
-#ifdef CONFIG_VIRTIO_BLK_SCSI
-static int virtblk_add_req_scsi(struct virtqueue *vq, struct virtblk_req *vbr,
- struct scatterlist *data_sg, bool have_data)
-{
- struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6];
- unsigned int num_out = 0, num_in = 0;
-
- sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
- sgs[num_out++] = &hdr;
- sg_init_one(&cmd, vbr->sreq.cmd, vbr->sreq.cmd_len);
- sgs[num_out++] = &cmd;
-
- if (have_data) {
- if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
- sgs[num_out++] = data_sg;
- else
- sgs[num_out + num_in++] = data_sg;
- }
-
- sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE);
- sgs[num_out + num_in++] = &sense;
- sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
- sgs[num_out + num_in++] = &inhdr;
- sg_init_one(&status, &vbr->status, sizeof(vbr->status));
- sgs[num_out + num_in++] = &status;
-
- return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
-}
-
-static inline void virtblk_scsi_request_done(struct request *req)
-{
- struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
- struct virtio_blk *vblk = req->q->queuedata;
- struct scsi_request *sreq = &vbr->sreq;
-
- sreq->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual);
- sreq->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len);
- sreq->result = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors);
-}
-
-static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long data)
-{
- struct gendisk *disk = bdev->bd_disk;
- struct virtio_blk *vblk = disk->private_data;
-
- /*
- * Only allow the generic SCSI ioctls if the host can support it.
- */
- if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
- return -ENOTTY;
-
- return scsi_cmd_blk_ioctl(bdev, mode, cmd,
- (void __user *)data);
-}
-#else
-static inline int virtblk_add_req_scsi(struct virtqueue *vq,
- struct virtblk_req *vbr, struct scatterlist *data_sg,
- bool have_data)
-{
- return -EIO;
-}
-static inline void virtblk_scsi_request_done(struct request *req)
-{
-}
-#define virtblk_ioctl NULL
-#endif /* CONFIG_VIRTIO_BLK_SCSI */
-
static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
struct scatterlist *data_sg, bool have_data)
{
@@ -216,13 +136,6 @@ static inline void virtblk_request_done(struct request *req)
req->special_vec.bv_offset);
}
- switch (req_op(req)) {
- case REQ_OP_SCSI_IN:
- case REQ_OP_SCSI_OUT:
- virtblk_scsi_request_done(req);
- break;
- }
-
blk_mq_end_request(req, virtblk_result(vbr));
}
@@ -299,10 +212,6 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
type = VIRTIO_BLK_T_WRITE_ZEROES;
unmap = !(req->cmd_flags & REQ_NOUNMAP);
break;
- case REQ_OP_SCSI_IN:
- case REQ_OP_SCSI_OUT:
- type = VIRTIO_BLK_T_SCSI_CMD;
- break;
case REQ_OP_DRV_IN:
type = VIRTIO_BLK_T_GET_ID;
break;
@@ -333,10 +242,7 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
}
spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
- if (blk_rq_is_scsi(req))
- err = virtblk_add_req_scsi(vblk->vqs[qid].vq, vbr, vbr->sg, num);
- else
- err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
+ err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
if (err) {
virtqueue_kick(vblk->vqs[qid].vq);
blk_mq_stop_hw_queue(hctx);
@@ -404,7 +310,6 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
}
static const struct block_device_operations virtblk_fops = {
- .ioctl = virtblk_ioctl,
.owner = THIS_MODULE,
.getgeo = virtblk_getgeo,
};
@@ -683,9 +588,6 @@ static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
struct virtio_blk *vblk = set->driver_data;
struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
-#ifdef CONFIG_VIRTIO_BLK_SCSI
- vbr->sreq.sense = vbr->sense;
-#endif
sg_init_table(vbr->sg, vblk->sg_elems);
return 0;
}
@@ -698,23 +600,11 @@ static int virtblk_map_queues(struct blk_mq_tag_set *set)
vblk->vdev, 0);
}
-#ifdef CONFIG_VIRTIO_BLK_SCSI
-static void virtblk_initialize_rq(struct request *req)
-{
- struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
-
- scsi_req_init(&vbr->sreq);
-}
-#endif
-
static const struct blk_mq_ops virtio_mq_ops = {
.queue_rq = virtio_queue_rq,
.commit_rqs = virtio_commit_rqs,
.complete = virtblk_request_done,
.init_request = virtblk_init_request,
-#ifdef CONFIG_VIRTIO_BLK_SCSI
- .initialize_rq_fn = virtblk_initialize_rq,
-#endif
.map_queues = virtblk_map_queues,
};
@@ -991,9 +881,6 @@ static const struct virtio_device_id id_table[] = {
static unsigned int features_legacy[] = {
VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
-#ifdef CONFIG_VIRTIO_BLK_SCSI
- VIRTIO_BLK_F_SCSI,
-#endif
VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
}
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 716b99aa2307..c2f71265af4b 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -62,8 +62,8 @@
* IO workloads.
*/
-static int xen_blkif_max_buffer_pages = 1024;
-module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644);
+static int max_buffer_pages = 1024;
+module_param_named(max_buffer_pages, max_buffer_pages, int, 0644);
MODULE_PARM_DESC(max_buffer_pages,
"Maximum number of free pages to keep in each block backend buffer");
@@ -78,8 +78,8 @@ MODULE_PARM_DESC(max_buffer_pages,
* algorithm.
*/
-static int xen_blkif_max_pgrants = 1056;
-module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
+static int max_pgrants = 1056;
+module_param_named(max_persistent_grants, max_pgrants, int, 0644);
MODULE_PARM_DESC(max_persistent_grants,
"Maximum number of grants to map persistently");
@@ -88,8 +88,8 @@ MODULE_PARM_DESC(max_persistent_grants,
* use. The time is in seconds, 0 means indefinitely long.
*/
-static unsigned int xen_blkif_pgrant_timeout = 60;
-module_param_named(persistent_grant_unused_seconds, xen_blkif_pgrant_timeout,
+static unsigned int pgrant_timeout = 60;
+module_param_named(persistent_grant_unused_seconds, pgrant_timeout,
uint, 0644);
MODULE_PARM_DESC(persistent_grant_unused_seconds,
"Time in seconds an unused persistent grant is allowed to "
@@ -137,9 +137,8 @@ module_param(log_stats, int, 0644);
static inline bool persistent_gnt_timeout(struct persistent_gnt *persistent_gnt)
{
- return xen_blkif_pgrant_timeout &&
- (jiffies - persistent_gnt->last_used >=
- HZ * xen_blkif_pgrant_timeout);
+ return pgrant_timeout && (jiffies - persistent_gnt->last_used >=
+ HZ * pgrant_timeout);
}
static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page)
@@ -234,7 +233,7 @@ static int add_persistent_gnt(struct xen_blkif_ring *ring,
struct persistent_gnt *this;
struct xen_blkif *blkif = ring->blkif;
- if (ring->persistent_gnt_c >= xen_blkif_max_pgrants) {
+ if (ring->persistent_gnt_c >= max_pgrants) {
if (!blkif->vbd.overflow_max_grants)
blkif->vbd.overflow_max_grants = 1;
return -EBUSY;
@@ -397,14 +396,13 @@ static void purge_persistent_gnt(struct xen_blkif_ring *ring)
goto out;
}
- if (ring->persistent_gnt_c < xen_blkif_max_pgrants ||
- (ring->persistent_gnt_c == xen_blkif_max_pgrants &&
+ if (ring->persistent_gnt_c < max_pgrants ||
+ (ring->persistent_gnt_c == max_pgrants &&
!ring->blkif->vbd.overflow_max_grants)) {
num_clean = 0;
} else {
- num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
- num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants +
- num_clean;
+ num_clean = (max_pgrants / 100) * LRU_PERCENT_CLEAN;
+ num_clean = ring->persistent_gnt_c - max_pgrants + num_clean;
num_clean = min(ring->persistent_gnt_c, num_clean);
pr_debug("Going to purge at least %u persistent grants\n",
num_clean);
@@ -599,8 +597,7 @@ static void print_stats(struct xen_blkif_ring *ring)
current->comm, ring->st_oo_req,
ring->st_rd_req, ring->st_wr_req,
ring->st_f_req, ring->st_ds_req,
- ring->persistent_gnt_c,
- xen_blkif_max_pgrants);
+ ring->persistent_gnt_c, max_pgrants);
ring->st_print = jiffies + msecs_to_jiffies(10 * 1000);
ring->st_rd_req = 0;
ring->st_wr_req = 0;
@@ -656,8 +653,11 @@ purge_gnt_list:
ring->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
}
- /* Shrink if we have more than xen_blkif_max_buffer_pages */
- shrink_free_pagepool(ring, xen_blkif_max_buffer_pages);
+ /* Shrink the free pages pool if it is too large. */
+ if (time_before(jiffies, blkif->buffer_squeeze_end))
+ shrink_free_pagepool(ring, 0);
+ else
+ shrink_free_pagepool(ring, max_buffer_pages);
if (log_stats && time_after(jiffies, ring->st_print))
print_stats(ring);
@@ -884,7 +884,7 @@ again:
continue;
}
if (use_persistent_gnts &&
- ring->persistent_gnt_c < xen_blkif_max_pgrants) {
+ ring->persistent_gnt_c < max_pgrants) {
/*
* We are using persistent grants, the grant is
* not mapped but we might have room for it.
@@ -911,7 +911,7 @@ again:
pages[seg_idx]->persistent_gnt = persistent_gnt;
pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n",
persistent_gnt->gnt, ring->persistent_gnt_c,
- xen_blkif_max_pgrants);
+ max_pgrants);
goto next;
}
if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 49132b0adbbe..a3eeccf3ac5f 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -319,6 +319,7 @@ struct xen_blkif {
/* All rings for this device. */
struct xen_blkif_ring *rings;
unsigned int nr_rings;
+ unsigned long buffer_squeeze_end;
};
struct seg_buf {
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index d6a6adfd5159..42944d41aea0 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -190,6 +190,9 @@ static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref,
{
int err;
struct xen_blkif *blkif = ring->blkif;
+ const struct blkif_common_sring *sring_common;
+ RING_IDX rsp_prod, req_prod;
+ unsigned int size;
/* Already connected through? */
if (ring->irq)
@@ -200,46 +203,62 @@ static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref,
if (err < 0)
return err;
+ sring_common = (struct blkif_common_sring *)ring->blk_ring;
+ rsp_prod = READ_ONCE(sring_common->rsp_prod);
+ req_prod = READ_ONCE(sring_common->req_prod);
+
switch (blkif->blk_protocol) {
case BLKIF_PROTOCOL_NATIVE:
{
- struct blkif_sring *sring;
- sring = (struct blkif_sring *)ring->blk_ring;
- BACK_RING_INIT(&ring->blk_rings.native, sring,
- XEN_PAGE_SIZE * nr_grefs);
+ struct blkif_sring *sring_native =
+ (struct blkif_sring *)ring->blk_ring;
+
+ BACK_RING_ATTACH(&ring->blk_rings.native, sring_native,
+ rsp_prod, XEN_PAGE_SIZE * nr_grefs);
+ size = __RING_SIZE(sring_native, XEN_PAGE_SIZE * nr_grefs);
break;
}
case BLKIF_PROTOCOL_X86_32:
{
- struct blkif_x86_32_sring *sring_x86_32;
- sring_x86_32 = (struct blkif_x86_32_sring *)ring->blk_ring;
- BACK_RING_INIT(&ring->blk_rings.x86_32, sring_x86_32,
- XEN_PAGE_SIZE * nr_grefs);
+ struct blkif_x86_32_sring *sring_x86_32 =
+ (struct blkif_x86_32_sring *)ring->blk_ring;
+
+ BACK_RING_ATTACH(&ring->blk_rings.x86_32, sring_x86_32,
+ rsp_prod, XEN_PAGE_SIZE * nr_grefs);
+ size = __RING_SIZE(sring_x86_32, XEN_PAGE_SIZE * nr_grefs);
break;
}
case BLKIF_PROTOCOL_X86_64:
{
- struct blkif_x86_64_sring *sring_x86_64;
- sring_x86_64 = (struct blkif_x86_64_sring *)ring->blk_ring;
- BACK_RING_INIT(&ring->blk_rings.x86_64, sring_x86_64,
- XEN_PAGE_SIZE * nr_grefs);
+ struct blkif_x86_64_sring *sring_x86_64 =
+ (struct blkif_x86_64_sring *)ring->blk_ring;
+
+ BACK_RING_ATTACH(&ring->blk_rings.x86_64, sring_x86_64,
+ rsp_prod, XEN_PAGE_SIZE * nr_grefs);
+ size = __RING_SIZE(sring_x86_64, XEN_PAGE_SIZE * nr_grefs);
break;
}
default:
BUG();
}
+ err = -EIO;
+ if (req_prod - rsp_prod > size)
+ goto fail;
+
err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn,
xen_blkif_be_int, 0,
"blkif-backend", ring);
- if (err < 0) {
- xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
- ring->blk_rings.common.sring = NULL;
- return err;
- }
+ if (err < 0)
+ goto fail;
ring->irq = err;
return 0;
+
+fail:
+ xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
+ ring->blk_rings.common.sring = NULL;
+ return err;
}
static int xen_blkif_disconnect(struct xen_blkif *blkif)
@@ -448,7 +467,6 @@ static void xenvbd_sysfs_delif(struct xenbus_device *dev)
device_remove_file(&dev->dev, &dev_attr_physical_device);
}
-
static void xen_vbd_free(struct xen_vbd *vbd)
{
if (vbd->bdev)
@@ -505,6 +523,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
handle, blkif->domid);
return 0;
}
+
static int xen_blkbk_remove(struct xenbus_device *dev)
{
struct backend_info *be = dev_get_drvdata(&dev->dev);
@@ -588,6 +607,7 @@ static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info
if (err)
dev_warn(&dev->dev, "writing feature-discard (%d)", err);
}
+
int xen_blkbk_barrier(struct xenbus_transaction xbt,
struct backend_info *be, int state)
{
@@ -672,7 +692,6 @@ fail:
return err;
}
-
/*
* Callback received when the hotplug scripts have placed the physical-device
* node. Read it and the mode node, and create a vbd. If the frontend is
@@ -764,7 +783,6 @@ static void backend_changed(struct xenbus_watch *watch,
}
}
-
/*
* Callback received when the frontend's state changes.
*/
@@ -839,9 +857,27 @@ static void frontend_changed(struct xenbus_device *dev,
}
}
+/* Once a memory pressure is detected, squeeze free page pools for a while. */
+static unsigned int buffer_squeeze_duration_ms = 10;
+module_param_named(buffer_squeeze_duration_ms,
+ buffer_squeeze_duration_ms, int, 0644);
+MODULE_PARM_DESC(buffer_squeeze_duration_ms,
+"Duration in ms to squeeze pages buffer when a memory pressure is detected");
-/* ** Connection ** */
+/*
+ * Callback received when the memory pressure is detected.
+ */
+static void reclaim_memory(struct xenbus_device *dev)
+{
+ struct backend_info *be = dev_get_drvdata(&dev->dev);
+
+ if (!be)
+ return;
+ be->blkif->buffer_squeeze_end = jiffies +
+ msecs_to_jiffies(buffer_squeeze_duration_ms);
+}
+/* ** Connection ** */
/*
* Write the physical details regarding the block device to the store, and
@@ -1131,7 +1167,9 @@ static struct xenbus_driver xen_blkbk_driver = {
.ids = xen_blkbk_ids,
.probe = xen_blkbk_probe,
.remove = xen_blkbk_remove,
- .otherend_changed = frontend_changed
+ .otherend_changed = frontend_changed,
+ .allow_rebind = true,
+ .reclaim_memory = reclaim_memory,
};
int xen_blkif_xenbus_init(void)
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index a74d03913822..e2ad6bba2281 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -151,9 +151,6 @@ MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the
#define BLK_RING_SIZE(info) \
__CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages)
-#define BLK_MAX_RING_SIZE \
- __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * XENBUS_MAX_RING_GRANTS)
-
/*
* ring-ref%u i=(-1UL) would take 11 characters + 'ring-ref' is 8, so 19
* characters are enough. Define to 20 to keep consistent with backend.
@@ -177,12 +174,12 @@ struct blkfront_ring_info {
unsigned int evtchn, irq;
struct work_struct work;
struct gnttab_free_callback callback;
- struct blk_shadow shadow[BLK_MAX_RING_SIZE];
struct list_head indirect_pages;
struct list_head grants;
unsigned int persistent_gnts_c;
unsigned long shadow_free;
struct blkfront_info *dev_info;
+ struct blk_shadow shadow[];
};
/*
@@ -1113,8 +1110,8 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
if (!VDEV_IS_EXTENDED(info->vdevice)) {
err = xen_translate_vdev(info->vdevice, &minor, &offset);
if (err)
- return err;
- nr_parts = PARTS_PER_DISK;
+ return err;
+ nr_parts = PARTS_PER_DISK;
} else {
minor = BLKIF_MINOR_EXT(info->vdevice);
nr_parts = PARTS_PER_EXT_DISK;
@@ -1915,7 +1912,8 @@ static int negotiate_mq(struct blkfront_info *info)
info->nr_rings = 1;
info->rinfo = kvcalloc(info->nr_rings,
- sizeof(struct blkfront_ring_info),
+ struct_size(info->rinfo, shadow,
+ BLK_RING_SIZE(info)),
GFP_KERNEL);
if (!info->rinfo) {
xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
@@ -2632,6 +2630,7 @@ static const struct block_device_operations xlvbd_block_fops =
.release = blkif_release,
.getgeo = blkif_getgeo,
.ioctl = blkif_ioctl,
+ .compat_ioctl = blkdev_compat_ptr_ioctl,
};
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 4285e75e52c3..1bdb5793842b 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -207,14 +207,17 @@ static inline void zram_fill_page(void *ptr, unsigned long len,
static bool page_same_filled(void *ptr, unsigned long *element)
{
- unsigned int pos;
unsigned long *page;
unsigned long val;
+ unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
page = (unsigned long *)ptr;
val = page[0];
- for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) {
+ if (val != page[last_pos])
+ return false;
+
+ for (pos = 1; pos < last_pos; pos++) {
if (val != page[pos])
return false;
}
@@ -626,7 +629,7 @@ static ssize_t writeback_store(struct device *dev,
struct bio bio;
struct bio_vec bio_vec;
struct page *page;
- ssize_t ret;
+ ssize_t ret = len;
int mode;
unsigned long blk_idx = 0;
@@ -762,7 +765,6 @@ next:
if (blk_idx)
free_block_bdev(zram, blk_idx);
- ret = len;
__free_page(page);
release_init_lock:
up_read(&zram->init_lock);
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 8e05706fe5d9..1f498f358f60 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -36,6 +36,7 @@ int btbcm_check_bdaddr(struct hci_dev *hdev)
HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
int err = PTR_ERR(skb);
+
bt_dev_err(hdev, "BCM: Reading device address failed (%d)", err);
return err;
}
@@ -107,6 +108,52 @@ int btbcm_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
}
EXPORT_SYMBOL_GPL(btbcm_set_bdaddr);
+int btbcm_read_pcm_int_params(struct hci_dev *hdev,
+ struct bcm_set_pcm_int_params *params)
+{
+ struct sk_buff *skb;
+ int err = 0;
+
+ skb = __hci_cmd_sync(hdev, 0xfc1d, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "BCM: Read PCM int params failed (%d)", err);
+ return err;
+ }
+
+ if (skb->len != 6 || skb->data[0]) {
+ bt_dev_err(hdev, "BCM: Read PCM int params length mismatch");
+ kfree_skb(skb);
+ return -EIO;
+ }
+
+ if (params)
+ memcpy(params, skb->data + 1, 5);
+
+ kfree_skb(skb);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btbcm_read_pcm_int_params);
+
+int btbcm_write_pcm_int_params(struct hci_dev *hdev,
+ const struct bcm_set_pcm_int_params *params)
+{
+ struct sk_buff *skb;
+ int err;
+
+ skb = __hci_cmd_sync(hdev, 0xfc1c, 5, params, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "BCM: Write PCM int params failed (%d)", err);
+ return err;
+ }
+ kfree_skb(skb);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btbcm_write_pcm_int_params);
+
int btbcm_patchram(struct hci_dev *hdev, const struct firmware *fw)
{
const struct hci_command_hdr *cmd;
@@ -177,6 +224,7 @@ static int btbcm_reset(struct hci_dev *hdev)
skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
int err = PTR_ERR(skb);
+
bt_dev_err(hdev, "BCM: Reset failed (%d)", err);
return err;
}
diff --git a/drivers/bluetooth/btbcm.h b/drivers/bluetooth/btbcm.h
index d204be8a84bf..014ef847a486 100644
--- a/drivers/bluetooth/btbcm.h
+++ b/drivers/bluetooth/btbcm.h
@@ -54,6 +54,10 @@ struct bcm_set_pcm_format_params {
int btbcm_check_bdaddr(struct hci_dev *hdev);
int btbcm_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
int btbcm_patchram(struct hci_dev *hdev, const struct firmware *fw);
+int btbcm_read_pcm_int_params(struct hci_dev *hdev,
+ struct bcm_set_pcm_int_params *params);
+int btbcm_write_pcm_int_params(struct hci_dev *hdev,
+ const struct bcm_set_pcm_int_params *params);
int btbcm_setup_patchram(struct hci_dev *hdev);
int btbcm_setup_apple(struct hci_dev *hdev);
@@ -74,6 +78,18 @@ static inline int btbcm_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
return -EOPNOTSUPP;
}
+static inline int btbcm_read_pcm_int_params(struct hci_dev *hdev,
+ struct bcm_set_pcm_int_params *params)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int btbcm_write_pcm_int_params(struct hci_dev *hdev,
+ const struct bcm_set_pcm_int_params *params)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int btbcm_patchram(struct hci_dev *hdev, const struct firmware *fw)
{
return -EOPNOTSUPP;
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index f838537f9f89..577cfa3329db 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -370,11 +370,11 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
* the end.
*/
len = patch_length;
- buf = kmemdup(btrtl_dev->fw_data + patch_offset, patch_length,
- GFP_KERNEL);
+ buf = kvmalloc(patch_length, GFP_KERNEL);
if (!buf)
return -ENOMEM;
+ memcpy(buf, btrtl_dev->fw_data + patch_offset, patch_length - 4);
memcpy(buf + patch_length - 4, &epatch_info->fw_version, 4);
*_buf = buf;
@@ -460,8 +460,10 @@ static int rtl_load_file(struct hci_dev *hdev, const char *name, u8 **buff)
if (ret < 0)
return ret;
ret = fw->size;
- *buff = kmemdup(fw->data, ret, GFP_KERNEL);
- if (!*buff)
+ *buff = kvmalloc(fw->size, GFP_KERNEL);
+ if (*buff)
+ memcpy(*buff, fw->data, ret);
+ else
ret = -ENOMEM;
release_firmware(fw);
@@ -499,14 +501,14 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev,
goto out;
if (btrtl_dev->cfg_len > 0) {
- tbuff = kzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL);
+ tbuff = kvzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL);
if (!tbuff) {
ret = -ENOMEM;
goto out;
}
memcpy(tbuff, fw_data, ret);
- kfree(fw_data);
+ kvfree(fw_data);
memcpy(tbuff + ret, btrtl_dev->cfg_data, btrtl_dev->cfg_len);
ret += btrtl_dev->cfg_len;
@@ -519,14 +521,14 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev,
ret = rtl_download_firmware(hdev, fw_data, ret);
out:
- kfree(fw_data);
+ kvfree(fw_data);
return ret;
}
void btrtl_free(struct btrtl_device_info *btrtl_dev)
{
- kfree(btrtl_dev->fw_data);
- kfree(btrtl_dev->cfg_data);
+ kvfree(btrtl_dev->fw_data);
+ kvfree(btrtl_dev->cfg_data);
kfree(btrtl_dev);
}
EXPORT_SYMBOL_GPL(btrtl_free);
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index fd9571d5fdac..199e8f7d426d 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -145,11 +145,20 @@ static int btsdio_rx_packet(struct btsdio_data *data)
data->hdev->stat.byte_rx += len;
- hci_skb_pkt_type(skb) = hdr[3];
-
- err = hci_recv_frame(data->hdev, skb);
- if (err < 0)
- return err;
+ switch (hdr[3]) {
+ case HCI_EVENT_PKT:
+ case HCI_ACLDATA_PKT:
+ case HCI_SCODATA_PKT:
+ case HCI_ISODATA_PKT:
+ hci_skb_pkt_type(skb) = hdr[3];
+ err = hci_recv_frame(data->hdev, skb);
+ if (err < 0)
+ return err;
+ break;
+ default:
+ kfree_skb(skb);
+ return -EINVAL;
+ }
sdio_writeb(data->func, 0x00, REG_PC_RRT, NULL);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 70e385987d41..f5924f3e8b8d 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -266,6 +266,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x04ca, 0x3015), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x04ca, 0x301a), .driver_info = BTUSB_QCA_ROME },
+ { USB_DEVICE(0x04ca, 0x3021), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x13d3, 0x3491), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x13d3, 0x3496), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x13d3, 0x3501), .driver_info = BTUSB_QCA_ROME },
@@ -552,9 +553,9 @@ static void btusb_rtl_cmd_timeout(struct hci_dev *hdev)
}
bt_dev_err(hdev, "Reset Realtek device via gpio");
- gpiod_set_value_cansleep(reset_gpio, 0);
- msleep(200);
gpiod_set_value_cansleep(reset_gpio, 1);
+ msleep(200);
+ gpiod_set_value_cansleep(reset_gpio, 0);
}
static inline void btusb_free_frags(struct btusb_data *data)
@@ -2602,7 +2603,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
* and being processed the events from there then.
*/
if (test_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags)) {
- data->evt_skb = skb_clone(skb, GFP_KERNEL);
+ data->evt_skb = skb_clone(skb, GFP_ATOMIC);
if (!data->evt_skb)
goto err_out;
}
@@ -2867,7 +2868,7 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
- return err;
+ goto err_release_fw;
}
/* Wait a few moments for firmware activation done */
@@ -3832,6 +3833,10 @@ static int btusb_probe(struct usb_interface *intf,
* (DEVICE_REMOTE_WAKEUP)
*/
set_bit(BTUSB_WAKEUP_DISABLE, &data->flags);
+
+ err = usb_autopm_get_interface(intf);
+ if (err < 0)
+ goto out_free_dev;
}
if (id->driver_info & BTUSB_AMP) {
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index d2a6a4afdbbb..b236cb11c0dc 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/acpi.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/property.h>
#include <linux/platform_data/x86/apple.h>
#include <linux/platform_device.h>
@@ -48,6 +49,15 @@
#define BCM_NUM_SUPPLIES 2
/**
+ * struct bcm_device_data - device specific data
+ * @no_early_set_baudrate: Disallow set baudrate before driver setup()
+ */
+struct bcm_device_data {
+ bool no_early_set_baudrate;
+ bool drive_rts_on_open;
+};
+
+/**
* struct bcm_device - device driver resources
* @serdev_hu: HCI UART controller struct
* @list: bcm_device_list node
@@ -79,6 +89,7 @@
* @hu: pointer to HCI UART controller struct,
* used to disable flow control during runtime suspend and system sleep
* @is_suspended: whether flow control is currently disabled
+ * @no_early_set_baudrate: don't set_baudrate before setup()
*/
struct bcm_device {
/* Must be the first member, hci_serdev.c expects this. */
@@ -112,6 +123,9 @@ struct bcm_device {
struct hci_uart *hu;
bool is_suspended;
#endif
+ bool no_early_set_baudrate;
+ bool drive_rts_on_open;
+ u8 pcm_int_params[5];
};
/* generic bcm uart resources */
@@ -445,11 +459,22 @@ static int bcm_open(struct hci_uart *hu)
out:
if (bcm->dev) {
- hci_uart_set_flow_control(hu, true);
+ if (bcm->dev->drive_rts_on_open)
+ hci_uart_set_flow_control(hu, true);
+
hu->init_speed = bcm->dev->init_speed;
- hu->oper_speed = bcm->dev->oper_speed;
+
+ /* If oper_speed is set, ldisc/serdev will set the baudrate
+ * before calling setup()
+ */
+ if (!bcm->dev->no_early_set_baudrate)
+ hu->oper_speed = bcm->dev->oper_speed;
+
err = bcm_gpio_set_power(bcm->dev, true);
- hci_uart_set_flow_control(hu, false);
+
+ if (bcm->dev->drive_rts_on_open)
+ hci_uart_set_flow_control(hu, false);
+
if (err)
goto err_unset_hu;
}
@@ -565,6 +590,8 @@ static int bcm_setup(struct hci_uart *hu)
/* Operational speed if any */
if (hu->oper_speed)
speed = hu->oper_speed;
+ else if (bcm->dev && bcm->dev->oper_speed)
+ speed = bcm->dev->oper_speed;
else if (hu->proto->oper_speed)
speed = hu->proto->oper_speed;
else
@@ -576,6 +603,16 @@ static int bcm_setup(struct hci_uart *hu)
host_set_baudrate(hu, speed);
}
+ /* PCM parameters if provided */
+ if (bcm->dev && bcm->dev->pcm_int_params[0] != 0xff) {
+ struct bcm_set_pcm_int_params params;
+
+ btbcm_read_pcm_int_params(hu->hdev, &params);
+
+ memcpy(&params, bcm->dev->pcm_int_params, 5);
+ btbcm_write_pcm_int_params(hu->hdev, &params);
+ }
+
finalize:
release_firmware(fw);
@@ -1113,6 +1150,10 @@ static int bcm_acpi_probe(struct bcm_device *dev)
static int bcm_of_probe(struct bcm_device *bdev)
{
device_property_read_u32(bdev->dev, "max-speed", &bdev->oper_speed);
+ device_property_read_u8_array(bdev->dev, "brcm,bt-pcm-int-params",
+ bdev->pcm_int_params, 5);
+ bdev->irq = of_irq_get_byname(bdev->dev->of_node, "host-wakeup");
+
return 0;
}
@@ -1128,6 +1169,9 @@ static int bcm_probe(struct platform_device *pdev)
dev->dev = &pdev->dev;
dev->irq = platform_get_irq(pdev, 0);
+ /* Initialize routing field to an unused value */
+ dev->pcm_int_params[0] = 0xff;
+
if (has_acpi_companion(&pdev->dev)) {
ret = bcm_acpi_probe(dev);
if (ret)
@@ -1374,6 +1418,7 @@ static struct platform_driver bcm_driver = {
static int bcm_serdev_probe(struct serdev_device *serdev)
{
struct bcm_device *bcmdev;
+ const struct bcm_device_data *data;
int err;
bcmdev = devm_kzalloc(&serdev->dev, sizeof(*bcmdev), GFP_KERNEL);
@@ -1387,6 +1432,9 @@ static int bcm_serdev_probe(struct serdev_device *serdev)
bcmdev->serdev_hu.serdev = serdev;
serdev_device_set_drvdata(serdev, bcmdev);
+ /* Initialize routing field to an unused value */
+ bcmdev->pcm_int_params[0] = 0xff;
+
if (has_acpi_companion(&serdev->dev))
err = bcm_acpi_probe(bcmdev);
else
@@ -1408,6 +1456,12 @@ static int bcm_serdev_probe(struct serdev_device *serdev)
if (err)
dev_err(&serdev->dev, "Failed to power down\n");
+ data = device_get_match_data(bcmdev->dev);
+ if (data) {
+ bcmdev->no_early_set_baudrate = data->no_early_set_baudrate;
+ bcmdev->drive_rts_on_open = data->drive_rts_on_open;
+ }
+
return hci_uart_register_device(&bcmdev->serdev_hu, &bcm_proto);
}
@@ -1419,12 +1473,21 @@ static void bcm_serdev_remove(struct serdev_device *serdev)
}
#ifdef CONFIG_OF
+static struct bcm_device_data bcm4354_device_data = {
+ .no_early_set_baudrate = true,
+};
+
+static struct bcm_device_data bcm43438_device_data = {
+ .drive_rts_on_open = true,
+};
+
static const struct of_device_id bcm_bluetooth_of_match[] = {
{ .compatible = "brcm,bcm20702a1" },
+ { .compatible = "brcm,bcm4329-bt" },
{ .compatible = "brcm,bcm4345c5" },
{ .compatible = "brcm,bcm4330-bt" },
- { .compatible = "brcm,bcm43438-bt" },
- { .compatible = "brcm,bcm43540-bt" },
+ { .compatible = "brcm,bcm43438-bt", .data = &bcm43438_device_data },
+ { .compatible = "brcm,bcm43540-bt", .data = &bcm4354_device_data },
{ .compatible = "brcm,bcm4335a0" },
{ },
};
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 19ba52005009..6dc1fbeb564b 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -103,6 +103,7 @@ static const struct h4_recv_pkt h4_recv_pkts[] = {
{ H4_RECV_ACL, .recv = hci_recv_frame },
{ H4_RECV_SCO, .recv = hci_recv_frame },
{ H4_RECV_EVENT, .recv = hci_recv_frame },
+ { H4_RECV_ISO, .recv = hci_recv_frame },
};
/* Recv data */
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index dacf297baf59..0b14547482a7 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -385,6 +385,7 @@ static void h5_complete_rx_pkt(struct hci_uart *hu)
case HCI_EVENT_PKT:
case HCI_ACLDATA_PKT:
case HCI_SCODATA_PKT:
+ case HCI_ISODATA_PKT:
hci_skb_pkt_type(h5->rx_skb) = H5_HDR_PKT_TYPE(hdr);
/* Remove Three-wire header */
@@ -594,6 +595,7 @@ static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
break;
case HCI_SCODATA_PKT:
+ case HCI_ISODATA_PKT:
skb_queue_tail(&h5->unrel, skb);
break;
@@ -636,6 +638,7 @@ static bool valid_packet_type(u8 type)
case HCI_ACLDATA_PKT:
case HCI_COMMAND_PKT:
case HCI_SCODATA_PKT:
+ case HCI_ISODATA_PKT:
case HCI_3WIRE_LINK_PKT:
case HCI_3WIRE_ACK_PKT:
return true;
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index f10bdf8e1fc5..d6e0c99ee5eb 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -20,6 +20,7 @@
#include <linux/completion.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/devcoredump.h>
#include <linux/device.h>
#include <linux/gpio/consumer.h>
#include <linux/mod_devicetable.h>
@@ -46,6 +47,7 @@
#define IBS_BTSOC_TX_IDLE_TIMEOUT_MS 40
#define IBS_HOST_TX_IDLE_TIMEOUT_MS 2000
#define CMD_TRANS_TIMEOUT_MS 100
+#define MEMDUMP_TIMEOUT_MS 8000
/* susclk rate */
#define SUSCLK_RATE_32KHZ 32768
@@ -53,12 +55,24 @@
/* Controller debug log header */
#define QCA_DEBUG_HANDLE 0x2EDC
+/* max retry count when init fails */
+#define MAX_INIT_RETRIES 3
+
+/* Controller dump header */
+#define QCA_SSR_DUMP_HANDLE 0x0108
+#define QCA_DUMP_PACKET_SIZE 255
+#define QCA_LAST_SEQUENCE_NUM 0xFFFF
+#define QCA_CRASHBYTE_PACKET_LEN 1096
+#define QCA_MEMDUMP_BYTE 0xFB
+
enum qca_flags {
QCA_IBS_ENABLED,
QCA_DROP_VENDOR_EVENT,
QCA_SUSPENDING,
+ QCA_MEMDUMP_COLLECTION
};
+
/* HCI_IBS transmit side sleep protocol states */
enum tx_ibs_states {
HCI_IBS_TX_ASLEEP,
@@ -81,11 +95,40 @@ enum hci_ibs_clock_state_vote {
HCI_IBS_RX_VOTE_CLOCK_OFF,
};
+/* Controller memory dump states */
+enum qca_memdump_states {
+ QCA_MEMDUMP_IDLE,
+ QCA_MEMDUMP_COLLECTING,
+ QCA_MEMDUMP_COLLECTED,
+ QCA_MEMDUMP_TIMEOUT,
+};
+
+struct qca_memdump_data {
+ char *memdump_buf_head;
+ char *memdump_buf_tail;
+ u32 current_seq_no;
+ u32 received_dump;
+};
+
+struct qca_memdump_event_hdr {
+ __u8 evt;
+ __u8 plen;
+ __u16 opcode;
+ __u16 seq_no;
+ __u8 reserved;
+} __packed;
+
+
+struct qca_dump_size {
+ u32 dump_size;
+} __packed;
+
struct qca_data {
struct hci_uart *hu;
struct sk_buff *rx_skb;
struct sk_buff_head txq;
struct sk_buff_head tx_wait_q; /* HCI_IBS wait queue */
+ struct sk_buff_head rx_memdump_q; /* Memdump wait queue */
spinlock_t hci_ibs_lock; /* HCI_IBS state lock */
u8 tx_ibs_state; /* HCI_IBS transmit side power state*/
u8 rx_ibs_state; /* HCI_IBS receive side power state */
@@ -95,14 +138,18 @@ struct qca_data {
u32 tx_idle_delay;
struct timer_list wake_retrans_timer;
u32 wake_retrans;
+ struct timer_list memdump_timer;
struct workqueue_struct *workqueue;
struct work_struct ws_awake_rx;
struct work_struct ws_awake_device;
struct work_struct ws_rx_vote_off;
struct work_struct ws_tx_vote_off;
+ struct work_struct ctrl_memdump_evt;
+ struct qca_memdump_data *qca_memdump;
unsigned long flags;
struct completion drop_ev_comp;
wait_queue_head_t suspend_wait_q;
+ enum qca_memdump_states memdump_state;
/* For debugging purpose */
u64 ibs_sent_wacks;
@@ -167,6 +214,7 @@ static int qca_regulator_enable(struct qca_serdev *qcadev);
static void qca_regulator_disable(struct qca_serdev *qcadev);
static void qca_power_shutdown(struct hci_uart *hu);
static int qca_power_off(struct hci_dev *hdev);
+static void qca_controller_memdump(struct work_struct *work);
static enum qca_btsoc_type qca_soc_type(struct hci_uart *hu)
{
@@ -474,12 +522,28 @@ static void hci_ibs_wake_retrans_timeout(struct timer_list *t)
hci_uart_tx_wakeup(hu);
}
+static void hci_memdump_timeout(struct timer_list *t)
+{
+ struct qca_data *qca = from_timer(qca, t, tx_idle_timer);
+ struct hci_uart *hu = qca->hu;
+ struct qca_memdump_data *qca_memdump = qca->qca_memdump;
+ char *memdump_buf = qca_memdump->memdump_buf_tail;
+
+ bt_dev_err(hu->hdev, "clearing allocated memory due to memdump timeout");
+ /* Inject hw error event to reset the device and driver. */
+ hci_reset_dev(hu->hdev);
+ vfree(memdump_buf);
+ kfree(qca_memdump);
+ qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
+ del_timer(&qca->memdump_timer);
+ cancel_work_sync(&qca->ctrl_memdump_evt);
+}
+
/* Initialize protocol */
static int qca_open(struct hci_uart *hu)
{
struct qca_serdev *qcadev;
struct qca_data *qca;
- int ret;
BT_DBG("hu %p qca_open", hu);
@@ -492,6 +556,7 @@ static int qca_open(struct hci_uart *hu)
skb_queue_head_init(&qca->txq);
skb_queue_head_init(&qca->tx_wait_q);
+ skb_queue_head_init(&qca->rx_memdump_q);
spin_lock_init(&qca->hci_ibs_lock);
qca->workqueue = alloc_ordered_workqueue("qca_wq", 0);
if (!qca->workqueue) {
@@ -504,7 +569,7 @@ static int qca_open(struct hci_uart *hu)
INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device);
INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);
-
+ INIT_WORK(&qca->ctrl_memdump_evt, qca_controller_memdump);
init_waitqueue_head(&qca->suspend_wait_q);
qca->hu = hu;
@@ -519,23 +584,10 @@ static int qca_open(struct hci_uart *hu)
hu->priv = qca;
if (hu->serdev) {
-
qcadev = serdev_device_get_drvdata(hu->serdev);
- if (!qca_is_wcn399x(qcadev->btsoc_type)) {
- gpiod_set_value_cansleep(qcadev->bt_en, 1);
- /* Controller needs time to bootup. */
- msleep(150);
- } else {
+ if (qca_is_wcn399x(qcadev->btsoc_type)) {
hu->init_speed = qcadev->init_speed;
hu->oper_speed = qcadev->oper_speed;
- ret = qca_regulator_enable(qcadev);
- if (ret) {
- destroy_workqueue(qca->workqueue);
- kfree_skb(qca->rx_skb);
- hu->priv = NULL;
- kfree(qca);
- return ret;
- }
}
}
@@ -544,6 +596,7 @@ static int qca_open(struct hci_uart *hu)
timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0);
qca->tx_idle_delay = IBS_HOST_TX_IDLE_TIMEOUT_MS;
+ timer_setup(&qca->memdump_timer, hci_memdump_timeout, 0);
BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
qca->tx_idle_delay, qca->wake_retrans);
@@ -613,7 +666,6 @@ static int qca_flush(struct hci_uart *hu)
/* Close protocol */
static int qca_close(struct hci_uart *hu)
{
- struct qca_serdev *qcadev;
struct qca_data *qca = hu->priv;
BT_DBG("hu %p qca close", hu);
@@ -622,19 +674,14 @@ static int qca_close(struct hci_uart *hu)
skb_queue_purge(&qca->tx_wait_q);
skb_queue_purge(&qca->txq);
+ skb_queue_purge(&qca->rx_memdump_q);
del_timer(&qca->tx_idle_timer);
del_timer(&qca->wake_retrans_timer);
+ del_timer(&qca->memdump_timer);
destroy_workqueue(qca->workqueue);
qca->hu = NULL;
- if (hu->serdev) {
- qcadev = serdev_device_get_drvdata(hu->serdev);
- if (qca_is_wcn399x(qcadev->btsoc_type))
- qca_power_shutdown(hu);
- else
- gpiod_set_value_cansleep(qcadev->bt_en, 0);
-
- }
+ qca_power_shutdown(hu);
kfree_skb(qca->rx_skb);
@@ -900,6 +947,125 @@ static int qca_recv_acl_data(struct hci_dev *hdev, struct sk_buff *skb)
return hci_recv_frame(hdev, skb);
}
+static void qca_controller_memdump(struct work_struct *work)
+{
+ struct qca_data *qca = container_of(work, struct qca_data,
+ ctrl_memdump_evt);
+ struct hci_uart *hu = qca->hu;
+ struct sk_buff *skb;
+ struct qca_memdump_event_hdr *cmd_hdr;
+ struct qca_memdump_data *qca_memdump = qca->qca_memdump;
+ struct qca_dump_size *dump;
+ char *memdump_buf;
+ char nullBuff[QCA_DUMP_PACKET_SIZE] = { 0 };
+ u16 seq_no;
+ u32 dump_size;
+
+ while ((skb = skb_dequeue(&qca->rx_memdump_q))) {
+
+ if (!qca_memdump) {
+ qca_memdump = kzalloc(sizeof(struct qca_memdump_data),
+ GFP_ATOMIC);
+ if (!qca_memdump)
+ return;
+
+ qca->qca_memdump = qca_memdump;
+ }
+
+ qca->memdump_state = QCA_MEMDUMP_COLLECTING;
+ cmd_hdr = (void *) skb->data;
+ seq_no = __le16_to_cpu(cmd_hdr->seq_no);
+ skb_pull(skb, sizeof(struct qca_memdump_event_hdr));
+
+ if (!seq_no) {
+
+ /* This is the first frame of memdump packet from
+ * the controller, Disable IBS to recevie dump
+ * with out any interruption, ideally time required for
+ * the controller to send the dump is 8 seconds. let us
+ * start timer to handle this asynchronous activity.
+ */
+ clear_bit(QCA_IBS_ENABLED, &qca->flags);
+ set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
+ dump = (void *) skb->data;
+ dump_size = __le32_to_cpu(dump->dump_size);
+ if (!(dump_size)) {
+ bt_dev_err(hu->hdev, "Rx invalid memdump size");
+ kfree_skb(skb);
+ return;
+ }
+
+ bt_dev_info(hu->hdev, "QCA collecting dump of size:%u",
+ dump_size);
+ mod_timer(&qca->memdump_timer, (jiffies +
+ msecs_to_jiffies(MEMDUMP_TIMEOUT_MS)));
+
+ skb_pull(skb, sizeof(dump_size));
+ memdump_buf = vmalloc(dump_size);
+ qca_memdump->memdump_buf_head = memdump_buf;
+ qca_memdump->memdump_buf_tail = memdump_buf;
+ }
+
+ memdump_buf = qca_memdump->memdump_buf_tail;
+
+ /* If sequence no 0 is missed then there is no point in
+ * accepting the other sequences.
+ */
+ if (!memdump_buf) {
+ bt_dev_err(hu->hdev, "QCA: Discarding other packets");
+ kfree(qca_memdump);
+ kfree_skb(skb);
+ qca->qca_memdump = NULL;
+ return;
+ }
+
+ /* There could be chance of missing some packets from
+ * the controller. In such cases let us store the dummy
+ * packets in the buffer.
+ */
+ while ((seq_no > qca_memdump->current_seq_no + 1) &&
+ seq_no != QCA_LAST_SEQUENCE_NUM) {
+ bt_dev_err(hu->hdev, "QCA controller missed packet:%d",
+ qca_memdump->current_seq_no);
+ memcpy(memdump_buf, nullBuff, QCA_DUMP_PACKET_SIZE);
+ memdump_buf = memdump_buf + QCA_DUMP_PACKET_SIZE;
+ qca_memdump->received_dump += QCA_DUMP_PACKET_SIZE;
+ qca_memdump->current_seq_no++;
+ }
+
+ memcpy(memdump_buf, (unsigned char *) skb->data, skb->len);
+ memdump_buf = memdump_buf + skb->len;
+ qca_memdump->memdump_buf_tail = memdump_buf;
+ qca_memdump->current_seq_no = seq_no + 1;
+ qca_memdump->received_dump += skb->len;
+ qca->qca_memdump = qca_memdump;
+ kfree_skb(skb);
+ if (seq_no == QCA_LAST_SEQUENCE_NUM) {
+ bt_dev_info(hu->hdev, "QCA writing crash dump of size %d bytes",
+ qca_memdump->received_dump);
+ memdump_buf = qca_memdump->memdump_buf_head;
+ dev_coredumpv(&hu->serdev->dev, memdump_buf,
+ qca_memdump->received_dump, GFP_KERNEL);
+ del_timer(&qca->memdump_timer);
+ kfree(qca->qca_memdump);
+ qca->qca_memdump = NULL;
+ qca->memdump_state = QCA_MEMDUMP_COLLECTED;
+ }
+ }
+
+}
+
+int qca_controller_memdump_event(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct qca_data *qca = hu->priv;
+
+ skb_queue_tail(&qca->rx_memdump_q, skb);
+ queue_work(qca->workqueue, &qca->ctrl_memdump_evt);
+
+ return 0;
+}
+
static int qca_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
@@ -925,6 +1091,14 @@ static int qca_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
return 0;
}
+ /* We receive chip memory dump as an event packet, With a dedicated
+ * handler followed by a hardware error event. When this event is
+ * received we store dump into a file before closing hci. This
+ * dump will help in triaging the issues.
+ */
+ if ((skb->data[0] == HCI_VENDOR_PKT) &&
+ (get_unaligned_be16(skb->data + 2) == QCA_SSR_DUMP_HANDLE))
+ return qca_controller_memdump_event(hdev, skb);
return hci_recv_frame(hdev, skb);
}
@@ -1203,6 +1377,91 @@ error:
return ret;
}
+static int qca_send_crashbuffer(struct hci_uart *hu)
+{
+ struct qca_data *qca = hu->priv;
+ struct sk_buff *skb;
+
+ skb = bt_skb_alloc(QCA_CRASHBYTE_PACKET_LEN, GFP_KERNEL);
+ if (!skb) {
+ bt_dev_err(hu->hdev, "Failed to allocate memory for skb packet");
+ return -ENOMEM;
+ }
+
+ /* We forcefully crash the controller, by sending 0xfb byte for
+ * 1024 times. We also might have chance of losing data, To be
+ * on safer side we send 1096 bytes to the SoC.
+ */
+ memset(skb_put(skb, QCA_CRASHBYTE_PACKET_LEN), QCA_MEMDUMP_BYTE,
+ QCA_CRASHBYTE_PACKET_LEN);
+ hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
+ bt_dev_info(hu->hdev, "crash the soc to collect controller dump");
+ skb_queue_tail(&qca->txq, skb);
+ hci_uart_tx_wakeup(hu);
+
+ return 0;
+}
+
+static void qca_wait_for_dump_collection(struct hci_dev *hdev)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct qca_data *qca = hu->priv;
+ struct qca_memdump_data *qca_memdump = qca->qca_memdump;
+ char *memdump_buf = NULL;
+
+ wait_on_bit_timeout(&qca->flags, QCA_MEMDUMP_COLLECTION,
+ TASK_UNINTERRUPTIBLE, MEMDUMP_TIMEOUT_MS);
+
+ clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
+ if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
+ bt_dev_err(hu->hdev, "Clearing the buffers due to timeout");
+ if (qca_memdump)
+ memdump_buf = qca_memdump->memdump_buf_tail;
+ vfree(memdump_buf);
+ kfree(qca_memdump);
+ qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
+ del_timer(&qca->memdump_timer);
+ cancel_work_sync(&qca->ctrl_memdump_evt);
+ }
+}
+
+static void qca_hw_error(struct hci_dev *hdev, u8 code)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct qca_data *qca = hu->priv;
+
+ bt_dev_info(hdev, "mem_dump_status: %d", qca->memdump_state);
+
+ if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
+ /* If hardware error event received for other than QCA
+ * soc memory dump event, then we need to crash the SOC
+ * and wait here for 8 seconds to get the dump packets.
+ * This will block main thread to be on hold until we
+ * collect dump.
+ */
+ set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
+ qca_send_crashbuffer(hu);
+ qca_wait_for_dump_collection(hdev);
+ } else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) {
+ /* Let us wait here until memory dump collected or
+ * memory dump timer expired.
+ */
+ bt_dev_info(hdev, "waiting for dump to complete");
+ qca_wait_for_dump_collection(hdev);
+ }
+}
+
+static void qca_cmd_timeout(struct hci_dev *hdev)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct qca_data *qca = hu->priv;
+
+ if (qca->memdump_state == QCA_MEMDUMP_IDLE)
+ qca_send_crashbuffer(hu);
+ else
+ bt_dev_info(hdev, "Dump collection is in process");
+}
+
static int qca_wcn3990_init(struct hci_uart *hu)
{
struct qca_serdev *qcadev;
@@ -1253,11 +1512,37 @@ static int qca_wcn3990_init(struct hci_uart *hu)
return 0;
}
+static int qca_power_on(struct hci_dev *hdev)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ enum qca_btsoc_type soc_type = qca_soc_type(hu);
+ struct qca_serdev *qcadev;
+ int ret = 0;
+
+ /* Non-serdev device usually is powered by external power
+ * and don't need additional action in driver for power on
+ */
+ if (!hu->serdev)
+ return 0;
+
+ if (qca_is_wcn399x(soc_type)) {
+ ret = qca_wcn3990_init(hu);
+ } else {
+ qcadev = serdev_device_get_drvdata(hu->serdev);
+ gpiod_set_value_cansleep(qcadev->bt_en, 1);
+ /* Controller needs time to bootup. */
+ msleep(150);
+ }
+
+ return ret;
+}
+
static int qca_setup(struct hci_uart *hu)
{
struct hci_dev *hdev = hu->hdev;
struct qca_data *qca = hu->priv;
unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200;
+ unsigned int retries = 0;
enum qca_btsoc_type soc_type = qca_soc_type(hu);
const char *firmware_name = qca_get_firmware_name(hu);
int ret;
@@ -1275,24 +1560,21 @@ static int qca_setup(struct hci_uart *hu)
*/
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
- if (qca_is_wcn399x(soc_type)) {
- bt_dev_info(hdev, "setting up wcn3990");
+ bt_dev_info(hdev, "setting up %s",
+ qca_is_wcn399x(soc_type) ? "wcn399x" : "ROME");
- /* Enable NON_PERSISTENT_SETUP QUIRK to ensure to execute
- * setup for every hci up.
- */
- set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+retry:
+ ret = qca_power_on(hdev);
+ if (ret)
+ return ret;
+
+ if (qca_is_wcn399x(soc_type)) {
set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
- hu->hdev->shutdown = qca_power_off;
- ret = qca_wcn3990_init(hu);
- if (ret)
- return ret;
ret = qca_read_soc_version(hdev, &soc_ver, soc_type);
if (ret)
return ret;
} else {
- bt_dev_info(hdev, "ROME setup");
qca_set_speed(hu, QCA_INIT_SPEED);
}
@@ -1320,6 +1602,8 @@ static int qca_setup(struct hci_uart *hu)
if (!ret) {
set_bit(QCA_IBS_ENABLED, &qca->flags);
qca_debugfs_init(hdev);
+ hu->hdev->hw_error = qca_hw_error;
+ hu->hdev->cmd_timeout = qca_cmd_timeout;
} else if (ret == -ENOENT) {
/* No patch/nvm-config found, run with original fw/config */
ret = 0;
@@ -1329,6 +1613,20 @@ static int qca_setup(struct hci_uart *hu)
* patch/nvm-config is found, so run with original fw/config.
*/
ret = 0;
+ } else {
+ if (retries < MAX_INIT_RETRIES) {
+ qca_power_shutdown(hu);
+ if (hu->serdev) {
+ serdev_device_close(hu->serdev);
+ ret = serdev_device_open(hu->serdev);
+ if (ret) {
+ bt_dev_err(hdev, "failed to open port");
+ return ret;
+ }
+ }
+ retries++;
+ goto retry;
+ }
}
/* Setup bdaddr */
@@ -1393,6 +1691,7 @@ static void qca_power_shutdown(struct hci_uart *hu)
struct qca_serdev *qcadev;
struct qca_data *qca = hu->priv;
unsigned long flags;
+ enum qca_btsoc_type soc_type = qca_soc_type(hu);
qcadev = serdev_device_get_drvdata(hu->serdev);
@@ -1405,20 +1704,36 @@ static void qca_power_shutdown(struct hci_uart *hu)
qca_flush(hu);
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
- host_set_baudrate(hu, 2400);
- qca_send_power_pulse(hu, false);
- qca_regulator_disable(qcadev);
+ hu->hdev->hw_error = NULL;
+ hu->hdev->cmd_timeout = NULL;
+
+ /* Non-serdev device usually is powered by external power
+ * and don't need additional action in driver for power down
+ */
+ if (!hu->serdev)
+ return;
+
+ if (qca_is_wcn399x(soc_type)) {
+ host_set_baudrate(hu, 2400);
+ qca_send_power_pulse(hu, false);
+ qca_regulator_disable(qcadev);
+ } else {
+ gpiod_set_value_cansleep(qcadev->bt_en, 0);
+ }
}
static int qca_power_off(struct hci_dev *hdev)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct qca_data *qca = hu->priv;
- /* Perform pre shutdown command */
- qca_send_pre_shutdown_cmd(hdev);
-
- usleep_range(8000, 10000);
+ /* Stop sending shutdown command if soc crashes. */
+ if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
+ qca_send_pre_shutdown_cmd(hdev);
+ usleep_range(8000, 10000);
+ }
+ qca->memdump_state = QCA_MEMDUMP_IDLE;
qca_power_shutdown(hu);
return 0;
}
@@ -1493,6 +1808,7 @@ static int qca_init_regulators(struct qca_power *qca,
static int qca_serdev_probe(struct serdev_device *serdev)
{
struct qca_serdev *qcadev;
+ struct hci_dev *hdev;
const struct qca_vreg_data *data;
int err;
@@ -1501,7 +1817,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
return -ENOMEM;
qcadev->serdev_hu.serdev = serdev;
- data = of_device_get_match_data(&serdev->dev);
+ data = device_get_match_data(&serdev->dev);
serdev_device_set_drvdata(serdev, qcadev);
device_property_read_string(&serdev->dev, "firmware-name",
&qcadev->firmware_name);
@@ -1518,7 +1834,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
data->num_vregs);
if (err) {
BT_ERR("Failed to init regulators:%d", err);
- goto out;
+ return err;
}
qcadev->bt_power->vregs_on = false;
@@ -1531,7 +1847,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
if (err) {
BT_ERR("wcn3990 serdev registration failed");
- goto out;
+ return err;
}
} else {
qcadev->btsoc_type = QCA_ROME;
@@ -1557,12 +1873,18 @@ static int qca_serdev_probe(struct serdev_device *serdev)
return err;
err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
- if (err)
+ if (err) {
+ BT_ERR("Rome serdev registration failed");
clk_disable_unprepare(qcadev->susclk);
+ return err;
+ }
}
-out: return err;
+ hdev = qcadev->serdev_hu.hdev;
+ set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+ hdev->shutdown = qca_power_off;
+ return 0;
}
static void qca_serdev_remove(struct serdev_device *serdev)
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 6ab631101019..4e039d7a16f8 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -143,6 +143,13 @@ struct h4_recv_pkt {
.lsize = 1, \
.maxlen = HCI_MAX_EVENT_SIZE
+#define H4_RECV_ISO \
+ .type = HCI_ISODATA_PKT, \
+ .hlen = HCI_ISO_HDR_SIZE, \
+ .loff = 2, \
+ .lsize = 2, \
+ .maxlen = HCI_MAX_FRAME_SIZE \
+
struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
const unsigned char *buffer, int count,
const struct h4_recv_pkt *pkts, int pkts_count);
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 65e41c1d760f..8ab26dec5f6e 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -178,6 +178,7 @@ static inline ssize_t vhci_get_user(struct vhci_data *data,
case HCI_EVENT_PKT:
case HCI_ACLDATA_PKT:
case HCI_SCODATA_PKT:
+ case HCI_ISODATA_PKT:
if (!data->hdev) {
kfree_skb(skb);
return -ENODEV;
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 50200d1c06ea..6095b6df8a81 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -139,7 +139,6 @@ config TEGRA_ACONNECT
tristate "Tegra ACONNECT Bus Driver"
depends on ARCH_TEGRA_210_SOC
depends on OF && PM
- select PM_CLK
help
Driver for the Tegra ACONNECT bus which is used to interface with
the devices inside the Audio Processing Engine (APE) for Tegra210.
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index a07cc19becdb..c78d10ea641f 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -715,9 +715,9 @@ EXPORT_SYMBOL_GPL(fsl_mc_device_remove);
struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev)
{
struct fsl_mc_device *mc_bus_dev, *endpoint;
- struct fsl_mc_obj_desc endpoint_desc = { 0 };
- struct dprc_endpoint endpoint1 = { 0 };
- struct dprc_endpoint endpoint2 = { 0 };
+ struct fsl_mc_obj_desc endpoint_desc = {{ 0 }};
+ struct dprc_endpoint endpoint1 = {{ 0 }};
+ struct dprc_endpoint endpoint2 = {{ 0 }};
int state, err;
mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c
index d9629fc13a15..6ae48ad80409 100644
--- a/drivers/bus/fsl-mc/mc-io.c
+++ b/drivers/bus/fsl-mc/mc-io.c
@@ -97,12 +97,12 @@ int __must_check fsl_create_mc_io(struct device *dev,
return -EBUSY;
}
- mc_portal_virt_addr = devm_ioremap_nocache(dev,
+ mc_portal_virt_addr = devm_ioremap(dev,
mc_portal_phys_addr,
mc_portal_size);
if (!mc_portal_virt_addr) {
dev_err(dev,
- "devm_ioremap_nocache failed for MC portal %pa\n",
+ "devm_ioremap failed for MC portal %pa\n",
&mc_portal_phys_addr);
return -ENXIO;
}
diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
index 36cf13eee6b8..b20fdcbd035b 100644
--- a/drivers/bus/moxtet.c
+++ b/drivers/bus/moxtet.c
@@ -102,12 +102,11 @@ static int moxtet_match(struct device *dev, struct device_driver *drv)
return 0;
}
-struct bus_type moxtet_bus_type = {
+static struct bus_type moxtet_bus_type = {
.name = "moxtet",
.dev_groups = moxtet_dev_groups,
.match = moxtet_match,
};
-EXPORT_SYMBOL_GPL(moxtet_bus_type);
int __moxtet_register_driver(struct module *owner,
struct moxtet_driver *mdrv)
@@ -466,7 +465,7 @@ static ssize_t input_read(struct file *file, char __user *buf, size_t len,
{
struct moxtet *moxtet = file->private_data;
u8 bin[TURRIS_MOX_MAX_MODULES];
- u8 hex[sizeof(buf) * 2 + 1];
+ u8 hex[sizeof(bin) * 2 + 1];
int ret, n;
ret = moxtet_spi_read(moxtet, bin);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 56887c6877a7..f702c85c81b6 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -343,6 +343,12 @@ static int sysc_get_clocks(struct sysc *ddata)
return -EINVAL;
}
+ /* Always add a slot for main clocks fck and ick even if unused */
+ if (!nr_fck)
+ ddata->nr_clocks++;
+ if (!nr_ick)
+ ddata->nr_clocks++;
+
ddata->clocks = devm_kcalloc(ddata->dev,
ddata->nr_clocks, sizeof(*ddata->clocks),
GFP_KERNEL);
@@ -421,7 +427,7 @@ static int sysc_enable_opt_clocks(struct sysc *ddata)
struct clk *clock;
int i, error;
- if (!ddata->clocks)
+ if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1)
return 0;
for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
@@ -455,7 +461,7 @@ static void sysc_disable_opt_clocks(struct sysc *ddata)
struct clk *clock;
int i;
- if (!ddata->clocks)
+ if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1)
return;
for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
@@ -473,7 +479,7 @@ static void sysc_clkdm_deny_idle(struct sysc *ddata)
{
struct ti_sysc_platform_data *pdata;
- if (ddata->legacy_mode)
+ if (ddata->legacy_mode || (ddata->cfg.quirks & SYSC_QUIRK_CLKDM_NOAUTO))
return;
pdata = dev_get_platdata(ddata->dev);
@@ -485,7 +491,7 @@ static void sysc_clkdm_allow_idle(struct sysc *ddata)
{
struct ti_sysc_platform_data *pdata;
- if (ddata->legacy_mode)
+ if (ddata->legacy_mode || (ddata->cfg.quirks & SYSC_QUIRK_CLKDM_NOAUTO))
return;
pdata = dev_get_platdata(ddata->dev);
@@ -503,10 +509,8 @@ static int sysc_init_resets(struct sysc *ddata)
{
ddata->rsts =
devm_reset_control_get_optional_shared(ddata->dev, "rstctrl");
- if (IS_ERR(ddata->rsts))
- return PTR_ERR(ddata->rsts);
- return 0;
+ return PTR_ERR_OR_ZERO(ddata->rsts);
}
/**
@@ -981,7 +985,8 @@ static int sysc_disable_module(struct device *dev)
return ret;
}
- if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_MSTANDBY)
+ if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_MSTANDBY) ||
+ ddata->cfg.quirks & (SYSC_QUIRK_FORCE_MSTANDBY))
best_mode = SYSC_IDLE_FORCE;
reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
@@ -1209,10 +1214,6 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
/* These drivers need to be fixed to not use pm_runtime_irq_safe() */
SYSC_QUIRK("gpio", 0, 0, 0x10, 0x114, 0x50600801, 0xffff00ff,
SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_OPT_CLKS_IN_RESET),
- SYSC_QUIRK("mmu", 0, 0, 0x10, 0x14, 0x00000020, 0xffffffff,
- SYSC_QUIRK_LEGACY_IDLE),
- SYSC_QUIRK("mmu", 0, 0, 0x10, 0x14, 0x00000030, 0xffffffff,
- SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("sham", 0, 0x100, 0x110, 0x114, 0x40000c03, 0xffffffff,
SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("smartreflex", 0, -1, 0x24, -1, 0x00000000, 0xffffffff,
@@ -1244,6 +1245,12 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
/* Quirks that need to be set based on detected module */
SYSC_QUIRK("aess", 0, 0, 0x10, -1, 0x40000000, 0xffffffff,
SYSC_MODULE_QUIRK_AESS),
+ SYSC_QUIRK("dcan", 0x48480000, 0x20, -1, -1, 0xa3170504, 0xffffffff,
+ SYSC_QUIRK_CLKDM_NOAUTO),
+ SYSC_QUIRK("dwc3", 0x48880000, 0, 0x10, -1, 0x500a0200, 0xffffffff,
+ SYSC_QUIRK_CLKDM_NOAUTO),
+ SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -1, 0x500a0200, 0xffffffff,
+ SYSC_QUIRK_CLKDM_NOAUTO),
SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff,
SYSC_MODULE_QUIRK_HDQ1W),
SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x0000000a, 0xffffffff,
@@ -1583,6 +1590,10 @@ static int sysc_reset(struct sysc *ddata)
sysc_val |= sysc_mask;
sysc_write(ddata, sysc_offset, sysc_val);
+ if (ddata->cfg.srst_udelay)
+ usleep_range(ddata->cfg.srst_udelay,
+ ddata->cfg.srst_udelay * 2);
+
if (ddata->clk_enable_quirk)
ddata->clk_enable_quirk(ddata);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index eebdcbef0578..faca0f346fff 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -3017,9 +3017,31 @@ static noinline int mmc_ioctl_cdrom_read_audio(struct cdrom_device_info *cdi,
struct cdrom_read_audio ra;
int lba;
- if (copy_from_user(&ra, (struct cdrom_read_audio __user *)arg,
- sizeof(ra)))
- return -EFAULT;
+#ifdef CONFIG_COMPAT
+ if (in_compat_syscall()) {
+ struct compat_cdrom_read_audio {
+ union cdrom_addr addr;
+ u8 addr_format;
+ compat_int_t nframes;
+ compat_caddr_t buf;
+ } ra32;
+
+ if (copy_from_user(&ra32, arg, sizeof(ra32)))
+ return -EFAULT;
+
+ ra = (struct cdrom_read_audio) {
+ .addr = ra32.addr,
+ .addr_format = ra32.addr_format,
+ .nframes = ra32.nframes,
+ .buf = compat_ptr(ra32.buf),
+ };
+ } else
+#endif
+ {
+ if (copy_from_user(&ra, (struct cdrom_read_audio __user *)arg,
+ sizeof(ra)))
+ return -EFAULT;
+ }
if (ra.addr_format == CDROM_MSF)
lba = msf_to_lba(ra.addr.msf.minute,
@@ -3271,9 +3293,10 @@ static noinline int mmc_ioctl_cdrom_last_written(struct cdrom_device_info *cdi,
ret = cdrom_get_last_written(cdi, &last);
if (ret)
return ret;
- if (copy_to_user((long __user *)arg, &last, sizeof(last)))
- return -EFAULT;
- return 0;
+ if (in_compat_syscall())
+ return put_user(last, (__s32 __user *)arg);
+
+ return put_user(last, (long __user *)arg);
}
static int mmc_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 5b21dc421c94..886b2638c730 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -518,6 +518,9 @@ static const struct block_device_operations gdrom_bdops = {
.release = gdrom_bdops_release,
.check_events = gdrom_bdops_check_events,
.ioctl = gdrom_bdops_ioctl,
+#ifdef CONFIG_COMPAT
+ .ioctl = blkdev_compat_ptr_ioctl,
+#endif
};
static irqreturn_t gdrom_command_interrupt(int irq, void *dev_id)
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index ab154a75acf0..9e84239f88d4 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -941,7 +941,7 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
bridge->gatt_table = (u32 __iomem *)table;
#else
- bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
+ bridge->gatt_table = ioremap(virt_to_phys(table),
(PAGE_SIZE * (1 << page_order)));
bridge->driver->cache_flush();
#endif
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index c6271ce250b3..66a62d17a3f5 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1087,7 +1087,7 @@ static void intel_i9xx_setup_flush(void)
}
if (intel_private.ifp_resource.start)
- intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
+ intel_private.i9xx_flush_page = ioremap(intel_private.ifp_resource.start, PAGE_SIZE);
if (!intel_private.i9xx_flush_page)
dev_err(&intel_private.pcidev->dev,
"can't ioremap flush page - no chipset flushing\n");
diff --git a/drivers/char/agp/isoch.c b/drivers/char/agp/isoch.c
index 31c374b1b91b..7ecf20a6d19c 100644
--- a/drivers/char/agp/isoch.c
+++ b/drivers/char/agp/isoch.c
@@ -84,7 +84,6 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
unsigned int cdev = 0;
u32 mnistat, tnistat, tstatus, mcmd;
u16 tnicmd, mnicmd;
- u8 mcapndx;
u32 tot_bw = 0, tot_n = 0, tot_rq = 0, y_max, rq_isoch, rq_async;
u32 step, rem, rem_isoch, rem_async;
int ret = 0;
@@ -138,8 +137,6 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
cur = list_entry(pos, struct agp_3_5_dev, list);
dev = cur->dev;
- mcapndx = cur->capndx;
-
pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &mnistat);
master[cdev].maxbw = (mnistat >> 16) & 0xff;
@@ -251,8 +248,6 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
cur = master[cdev].dev;
dev = cur->dev;
- mcapndx = cur->capndx;
-
master[cdev].rq += (cdev == ndevs - 1)
? (rem_async + rem_isoch) : step;
@@ -319,7 +314,7 @@ int agp_3_5_enable(struct agp_bridge_data *bridge)
{
struct pci_dev *td = bridge->dev, *dev = NULL;
u8 mcapndx;
- u32 isoch, arqsz;
+ u32 isoch;
u32 tstatus, mstatus, ncapid;
u32 mmajor;
u16 mpstat;
@@ -334,8 +329,6 @@ int agp_3_5_enable(struct agp_bridge_data *bridge)
if (isoch == 0) /* isoch xfers not available, bail out. */
return -ENODEV;
- arqsz = (tstatus >> 13) & 0x7;
-
/*
* Allocate a head for our AGP 3.5 device list
* (multiple AGP v3 devices are allowed behind a single bridge).
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index eb108b3c619a..51121a4b82c7 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -204,7 +204,7 @@ static int __init applicom_init(void)
if (pci_enable_device(dev))
return -EIO;
- RamIO = ioremap_nocache(pci_resource_start(dev, 0), LEN_RAM_IO);
+ RamIO = ioremap(pci_resource_start(dev, 0), LEN_RAM_IO);
if (!RamIO) {
printk(KERN_INFO "ac.o: Failed to ioremap PCI memory "
@@ -259,7 +259,7 @@ static int __init applicom_init(void)
/* Now try the specified ISA cards */
for (i = 0; i < MAX_ISA_BOARD; i++) {
- RamIO = ioremap_nocache(mem + (LEN_RAM_IO * i), LEN_RAM_IO);
+ RamIO = ioremap(mem + (LEN_RAM_IO * i), LEN_RAM_IO);
if (!RamIO) {
printk(KERN_INFO "ac.o: Failed to ioremap the ISA card's memory space (slot #%d)\n", i + 1);
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 9ac6671bb514..ed3b7dab678d 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -110,7 +110,7 @@ struct hpets {
unsigned long hp_delta;
unsigned int hp_ntimer;
unsigned int hp_which;
- struct hpet_dev hp_dev[1];
+ struct hpet_dev hp_dev[];
};
static struct hpets *hpets;
@@ -855,7 +855,7 @@ int hpet_alloc(struct hpet_data *hdp)
return 0;
}
- hpetp = kzalloc(struct_size(hpetp, hp_dev, hdp->hd_nirqs - 1),
+ hpetp = kzalloc(struct_size(hpetp, hp_dev, hdp->hd_nirqs),
GFP_KERNEL);
if (!hpetp)
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 8486c29d8324..914e293ba62b 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -90,7 +90,7 @@ config HW_RANDOM_BCM2835
config HW_RANDOM_IPROC_RNG200
tristate "Broadcom iProc/STB RNG200 support"
- depends on ARCH_BCM_IPROC || ARCH_BRCMSTB
+ depends on ARCH_BCM_IPROC || ARCH_BCM2835 || ARCH_BRCMSTB
default HW_RANDOM
---help---
This driver provides kernel-side support for the RNG200
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index d2a5791eb49f..cbf5eaea662c 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -157,7 +157,7 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
/* Clock is optional on most platforms */
priv->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
+ if (PTR_ERR(priv->clk) == -EPROBE_DEFER)
return -EPROBE_DEFER;
priv->rng.name = pdev->name;
diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
index 290c880266bf..9f205bd1acc0 100644
--- a/drivers/char/hw_random/intel-rng.c
+++ b/drivers/char/hw_random/intel-rng.c
@@ -317,7 +317,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
return -EBUSY;
}
- intel_rng_hw->mem = ioremap_nocache(INTEL_FWH_ADDR, INTEL_FWH_ADDR_LEN);
+ intel_rng_hw->mem = ioremap(INTEL_FWH_ADDR, INTEL_FWH_ADDR_LEN);
if (intel_rng_hw->mem == NULL)
return -EBUSY;
diff --git a/drivers/char/hw_random/iproc-rng200.c b/drivers/char/hw_random/iproc-rng200.c
index 899ff25f4f28..32d9fe61a225 100644
--- a/drivers/char/hw_random/iproc-rng200.c
+++ b/drivers/char/hw_random/iproc-rng200.c
@@ -213,6 +213,7 @@ static int iproc_rng200_probe(struct platform_device *pdev)
}
static const struct of_device_id iproc_rng200_of_match[] = {
+ { .compatible = "brcm,bcm2711-rng200", },
{ .compatible = "brcm,bcm7211-rng200", },
{ .compatible = "brcm,bcm7278-rng200", },
{ .compatible = "brcm,iproc-rng200", },
diff --git a/drivers/char/hw_random/octeon-rng.c b/drivers/char/hw_random/octeon-rng.c
index 8c78aa090492..7be8067ac4e8 100644
--- a/drivers/char/hw_random/octeon-rng.c
+++ b/drivers/char/hw_random/octeon-rng.c
@@ -81,13 +81,13 @@ static int octeon_rng_probe(struct platform_device *pdev)
return -ENOENT;
- rng->control_status = devm_ioremap_nocache(&pdev->dev,
+ rng->control_status = devm_ioremap(&pdev->dev,
res_ports->start,
sizeof(u64));
if (!rng->control_status)
return -ENOENT;
- rng->result = devm_ioremap_nocache(&pdev->dev,
+ rng->result = devm_ioremap(&pdev->dev,
res_result->start,
sizeof(u64));
if (!rng->result)
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 0ed07d16ec8e..65952393e1bb 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -476,7 +476,7 @@ static int omap_rng_probe(struct platform_device *pdev)
}
priv->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
+ if (PTR_ERR(priv->clk) == -EPROBE_DEFER)
return -EPROBE_DEFER;
if (!IS_ERR(priv->clk)) {
ret = clk_prepare_enable(priv->clk);
@@ -488,7 +488,7 @@ static int omap_rng_probe(struct platform_device *pdev)
}
priv->clk_reg = devm_clk_get(&pdev->dev, "reg");
- if (IS_ERR(priv->clk_reg) && PTR_ERR(priv->clk_reg) == -EPROBE_DEFER)
+ if (PTR_ERR(priv->clk_reg) == -EPROBE_DEFER)
return -EPROBE_DEFER;
if (!IS_ERR(priv->clk_reg)) {
ret = clk_prepare_enable(priv->clk_reg);
diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c
index 1ff4fb1def7c..382b28f1cf2f 100644
--- a/drivers/char/ipmi/ipmb_dev_int.c
+++ b/drivers/char/ipmi/ipmb_dev_int.c
@@ -19,7 +19,7 @@
#include <linux/spinlock.h>
#include <linux/wait.h>
-#define MAX_MSG_LEN 128
+#define MAX_MSG_LEN 240
#define IPMB_REQUEST_LEN_MIN 7
#define NETFN_RSP_BIT_MASK 0x4
#define REQUEST_QUEUE_MAX_LEN 256
@@ -63,6 +63,7 @@ struct ipmb_dev {
spinlock_t lock;
wait_queue_head_t wait_queue;
struct mutex file_mutex;
+ bool is_i2c_protocol;
};
static inline struct ipmb_dev *to_ipmb_dev(struct file *file)
@@ -112,6 +113,25 @@ static ssize_t ipmb_read(struct file *file, char __user *buf, size_t count,
return ret < 0 ? ret : count;
}
+static int ipmb_i2c_write(struct i2c_client *client, u8 *msg, u8 addr)
+{
+ struct i2c_msg i2c_msg;
+
+ /*
+ * subtract 1 byte (rq_sa) from the length of the msg passed to
+ * raw i2c_transfer
+ */
+ i2c_msg.len = msg[IPMB_MSG_LEN_IDX] - 1;
+
+ /* Assign message to buffer except first 2 bytes (length and address) */
+ i2c_msg.buf = msg + 2;
+
+ i2c_msg.addr = addr;
+ i2c_msg.flags = client->flags & I2C_CLIENT_PEC;
+
+ return i2c_transfer(client->adapter, &i2c_msg, 1);
+}
+
static ssize_t ipmb_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
@@ -133,6 +153,12 @@ static ssize_t ipmb_write(struct file *file, const char __user *buf,
rq_sa = GET_7BIT_ADDR(msg[RQ_SA_8BIT_IDX]);
netf_rq_lun = msg[NETFN_LUN_IDX];
+ /* Check i2c block transfer vs smbus */
+ if (ipmb_dev->is_i2c_protocol) {
+ ret = ipmb_i2c_write(ipmb_dev->client, msg, rq_sa);
+ return (ret == 1) ? count : ret;
+ }
+
/*
* subtract rq_sa and netf_rq_lun from the length of the msg passed to
* i2c_smbus_xfer
@@ -253,7 +279,7 @@ static int ipmb_slave_cb(struct i2c_client *client,
break;
case I2C_SLAVE_WRITE_RECEIVED:
- if (ipmb_dev->msg_idx >= sizeof(struct ipmb_msg))
+ if (ipmb_dev->msg_idx >= sizeof(struct ipmb_msg) - 1)
break;
buf[++ipmb_dev->msg_idx] = *val;
@@ -302,6 +328,9 @@ static int ipmb_probe(struct i2c_client *client,
if (ret)
return ret;
+ ipmb_dev->is_i2c_protocol
+ = device_property_read_bool(&client->dev, "i2c-protocol");
+
ipmb_dev->client = client;
i2c_set_clientdata(client, ipmb_dev);
ret = i2c_slave_register(client, ipmb_slave_cb);
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 22c6a2e61236..8ac390c2b514 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -775,10 +775,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
msg = ssif_info->curr_msg;
if (msg) {
+ if (data) {
+ if (len > IPMI_MAX_MSG_LENGTH)
+ len = IPMI_MAX_MSG_LENGTH;
+ memcpy(msg->rsp, data, len);
+ } else {
+ len = 0;
+ }
msg->rsp_size = len;
- if (msg->rsp_size > IPMI_MAX_MSG_LENGTH)
- msg->rsp_size = IPMI_MAX_MSG_LENGTH;
- memcpy(msg->rsp, data, msg->rsp_size);
ssif_info->curr_msg = NULL;
}
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 82f9a6a814ae..e342daa73d1b 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -4169,7 +4169,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
*
* dev pointer to network device structure
*/
-static void hdlcdev_tx_timeout(struct net_device *dev)
+static void hdlcdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
MGSLPC_INFO *info = dev_to_port(dev);
unsigned long flags;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 909e0c3d82ea..c7f9584de2c8 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -307,6 +307,8 @@
* Eastlake, Steve Crocker, and Jeff Schiller.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/utsname.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -354,7 +356,6 @@
#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
#define OUTPUT_POOL_SHIFT 10
#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
-#define SEC_XFER_SIZE 512
#define EXTRACT_SIZE 10
@@ -371,12 +372,6 @@
#define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
/*
- * The minimum number of bits of entropy before we wake up a read on
- * /dev/random. Should be enough to do a significant reseed.
- */
-static int random_read_wakeup_bits = 64;
-
-/*
* If the entropy count falls under this number of bits, then we
* should wake up processes which are selecting or polling on write
* access to /dev/random.
@@ -436,42 +431,11 @@ static const struct poolinfo {
/* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */
/* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */
{ S(128), 104, 76, 51, 25, 1 },
- /* was: x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 */
- /* x^32 + x^26 + x^19 + x^14 + x^7 + x + 1 */
- { S(32), 26, 19, 14, 7, 1 },
-#if 0
- /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
- { S(2048), 1638, 1231, 819, 411, 1 },
-
- /* x^1024 + x^817 + x^615 + x^412 + x^204 + x + 1 -- 290 */
- { S(1024), 817, 615, 412, 204, 1 },
-
- /* x^1024 + x^819 + x^616 + x^410 + x^207 + x^2 + 1 -- 115 */
- { S(1024), 819, 616, 410, 207, 2 },
-
- /* x^512 + x^411 + x^308 + x^208 + x^104 + x + 1 -- 225 */
- { S(512), 411, 308, 208, 104, 1 },
-
- /* x^512 + x^409 + x^307 + x^206 + x^102 + x^2 + 1 -- 95 */
- { S(512), 409, 307, 206, 102, 2 },
- /* x^512 + x^409 + x^309 + x^205 + x^103 + x^2 + 1 -- 95 */
- { S(512), 409, 309, 205, 103, 2 },
-
- /* x^256 + x^205 + x^155 + x^101 + x^52 + x + 1 -- 125 */
- { S(256), 205, 155, 101, 52, 1 },
-
- /* x^128 + x^103 + x^78 + x^51 + x^27 + x^2 + 1 -- 70 */
- { S(128), 103, 78, 51, 27, 2 },
-
- /* x^64 + x^52 + x^39 + x^26 + x^14 + x + 1 -- 15 */
- { S(64), 52, 39, 26, 14, 1 },
-#endif
};
/*
* Static global variables
*/
-static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
static struct fasync_struct *fasync;
@@ -530,11 +494,8 @@ struct entropy_store {
const struct poolinfo *poolinfo;
__u32 *pool;
const char *name;
- struct entropy_store *pull;
- struct work_struct push_work;
/* read-write data: */
- unsigned long last_pulled;
spinlock_t lock;
unsigned short add_ptr;
unsigned short input_rotate;
@@ -550,9 +511,7 @@ static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
size_t nbytes, int fips);
static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
-static void push_to_pool(struct work_struct *work);
static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
static struct entropy_store input_pool = {
.poolinfo = &poolinfo_table[0],
@@ -561,16 +520,6 @@ static struct entropy_store input_pool = {
.pool = input_pool_data
};
-static struct entropy_store blocking_pool = {
- .poolinfo = &poolinfo_table[1],
- .name = "blocking",
- .pull = &input_pool,
- .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock),
- .pool = blocking_pool_data,
- .push_work = __WORK_INITIALIZER(blocking_pool.push_work,
- push_to_pool),
-};
-
static __u32 const twist_table[8] = {
0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
@@ -759,22 +708,17 @@ retry:
} while (unlikely(entropy_count < pool_size-2 && pnfrac));
}
- if (unlikely(entropy_count < 0)) {
- pr_warn("random: negative entropy/overflow: pool %s count %d\n",
+ if (WARN_ON(entropy_count < 0)) {
+ pr_warn("negative entropy/overflow: pool %s count %d\n",
r->name, entropy_count);
- WARN_ON(1);
entropy_count = 0;
} else if (entropy_count > pool_size)
entropy_count = pool_size;
- if ((r == &blocking_pool) && !r->initialized &&
- (entropy_count >> ENTROPY_SHIFT) > 128)
- has_initialized = 1;
if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
goto retry;
if (has_initialized) {
r->initialized = 1;
- wake_up_interruptible(&random_read_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
}
@@ -783,36 +727,13 @@ retry:
if (r == &input_pool) {
int entropy_bits = entropy_count >> ENTROPY_SHIFT;
- struct entropy_store *other = &blocking_pool;
if (crng_init < 2) {
if (entropy_bits < 128)
return;
crng_reseed(&primary_crng, r);
- entropy_bits = r->entropy_count >> ENTROPY_SHIFT;
- }
-
- /* initialize the blocking pool if necessary */
- if (entropy_bits >= random_read_wakeup_bits &&
- !other->initialized) {
- schedule_work(&other->push_work);
- return;
- }
-
- /* should we wake readers? */
- if (entropy_bits >= random_read_wakeup_bits &&
- wq_has_sleeper(&random_read_wait)) {
- wake_up_interruptible(&random_read_wait);
- kill_fasync(&fasync, SIGIO, POLL_IN);
+ entropy_bits = ENTROPY_BITS(r);
}
- /* If the input pool is getting full, and the blocking
- * pool has room, send some entropy to the blocking
- * pool.
- */
- if (!work_pending(&other->push_work) &&
- (ENTROPY_BITS(r) > 6 * r->poolinfo->poolbytes) &&
- (ENTROPY_BITS(other) <= 6 * other->poolinfo->poolbytes))
- schedule_work(&other->push_work);
}
}
@@ -884,7 +805,7 @@ static void crng_initialize(struct crng_state *crng)
invalidate_batched_entropy();
numa_crng_init();
crng_init = 2;
- pr_notice("random: crng done (trusting CPU's manufacturer)\n");
+ pr_notice("crng done (trusting CPU's manufacturer)\n");
}
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
}
@@ -946,8 +867,7 @@ static int crng_fast_load(const char *cp, size_t len)
if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
invalidate_batched_entropy();
crng_init = 1;
- wake_up_interruptible(&crng_init_wait);
- pr_notice("random: fast init done\n");
+ pr_notice("fast init done\n");
}
return 1;
}
@@ -1032,16 +952,15 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
crng_init = 2;
process_random_ready_list();
wake_up_interruptible(&crng_init_wait);
- pr_notice("random: crng init done\n");
+ kill_fasync(&fasync, SIGIO, POLL_IN);
+ pr_notice("crng init done\n");
if (unseeded_warning.missed) {
- pr_notice("random: %d get_random_xx warning(s) missed "
- "due to ratelimiting\n",
+ pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n",
unseeded_warning.missed);
unseeded_warning.missed = 0;
}
if (urandom_warning.missed) {
- pr_notice("random: %d urandom warning(s) missed "
- "due to ratelimiting\n",
+ pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
urandom_warning.missed);
urandom_warning.missed = 0;
}
@@ -1246,7 +1165,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
/*
* delta is now minimum absolute delta.
* Round down by 1 bit on general principles,
- * and limit entropy entimate to 12 bits.
+ * and limit entropy estimate to 12 bits.
*/
credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
}
@@ -1390,57 +1309,6 @@ EXPORT_SYMBOL_GPL(add_disk_randomness);
*********************************************************************/
/*
- * This utility inline function is responsible for transferring entropy
- * from the primary pool to the secondary extraction pool. We make
- * sure we pull enough for a 'catastrophic reseed'.
- */
-static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes);
-static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
-{
- if (!r->pull ||
- r->entropy_count >= (nbytes << (ENTROPY_SHIFT + 3)) ||
- r->entropy_count > r->poolinfo->poolfracbits)
- return;
-
- _xfer_secondary_pool(r, nbytes);
-}
-
-static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
-{
- __u32 tmp[OUTPUT_POOL_WORDS];
-
- int bytes = nbytes;
-
- /* pull at least as much as a wakeup */
- bytes = max_t(int, bytes, random_read_wakeup_bits / 8);
- /* but never more than the buffer size */
- bytes = min_t(int, bytes, sizeof(tmp));
-
- trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8,
- ENTROPY_BITS(r), ENTROPY_BITS(r->pull));
- bytes = extract_entropy(r->pull, tmp, bytes,
- random_read_wakeup_bits / 8, 0);
- mix_pool_bytes(r, tmp, bytes);
- credit_entropy_bits(r, bytes*8);
-}
-
-/*
- * Used as a workqueue function so that when the input pool is getting
- * full, we can "spill over" some entropy to the output pools. That
- * way the output pools can store some of the excess entropy instead
- * of letting it go to waste.
- */
-static void push_to_pool(struct work_struct *work)
-{
- struct entropy_store *r = container_of(work, struct entropy_store,
- push_work);
- BUG_ON(!r);
- _xfer_secondary_pool(r, random_read_wakeup_bits/8);
- trace_push_to_pool(r->name, r->entropy_count >> ENTROPY_SHIFT,
- r->pull->entropy_count >> ENTROPY_SHIFT);
-}
-
-/*
* This function decides how many bytes to actually take from the
* given pool, and also debits the entropy count accordingly.
*/
@@ -1465,10 +1333,9 @@ retry:
if (ibytes < min)
ibytes = 0;
- if (unlikely(entropy_count < 0)) {
- pr_warn("random: negative entropy count: pool %s count %d\n",
+ if (WARN_ON(entropy_count < 0)) {
+ pr_warn("negative entropy count: pool %s count %d\n",
r->name, entropy_count);
- WARN_ON(1);
entropy_count = 0;
}
nfrac = ibytes << (ENTROPY_SHIFT + 3);
@@ -1481,8 +1348,7 @@ retry:
goto retry;
trace_debit_entropy(r->name, 8 * ibytes);
- if (ibytes &&
- (r->entropy_count >> ENTROPY_SHIFT) < random_write_wakeup_bits) {
+ if (ibytes && ENTROPY_BITS(r) < random_write_wakeup_bits) {
wake_up_interruptible(&random_write_wait);
kill_fasync(&fasync, SIGIO, POLL_OUT);
}
@@ -1603,7 +1469,6 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
spin_unlock_irqrestore(&r->lock, flags);
trace_extract_entropy(r->name, EXTRACT_SIZE,
ENTROPY_BITS(r), _RET_IP_);
- xfer_secondary_pool(r, EXTRACT_SIZE);
extract_buf(r, tmp);
spin_lock_irqsave(&r->lock, flags);
memcpy(r->last_data, tmp, EXTRACT_SIZE);
@@ -1612,60 +1477,11 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
}
trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
- xfer_secondary_pool(r, nbytes);
nbytes = account(r, nbytes, min, reserved);
return _extract_entropy(r, buf, nbytes, fips_enabled);
}
-/*
- * This function extracts randomness from the "entropy pool", and
- * returns it in a userspace buffer.
- */
-static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
- size_t nbytes)
-{
- ssize_t ret = 0, i;
- __u8 tmp[EXTRACT_SIZE];
- int large_request = (nbytes > 256);
-
- trace_extract_entropy_user(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
- if (!r->initialized && r->pull) {
- xfer_secondary_pool(r, ENTROPY_BITS(r->pull)/8);
- if (!r->initialized)
- return 0;
- }
- xfer_secondary_pool(r, nbytes);
- nbytes = account(r, nbytes, 0, 0);
-
- while (nbytes) {
- if (large_request && need_resched()) {
- if (signal_pending(current)) {
- if (ret == 0)
- ret = -ERESTARTSYS;
- break;
- }
- schedule();
- }
-
- extract_buf(r, tmp);
- i = min_t(int, nbytes, EXTRACT_SIZE);
- if (copy_to_user(buf, tmp, i)) {
- ret = -EFAULT;
- break;
- }
-
- nbytes -= i;
- buf += i;
- ret += i;
- }
-
- /* Wipe data just returned from memory */
- memzero_explicit(tmp, sizeof(tmp));
-
- return ret;
-}
-
#define warn_unseeded_randomness(previous) \
_warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous))
@@ -1687,8 +1503,9 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
print_once = true;
#endif
if (__ratelimit(&unseeded_warning))
- pr_notice("random: %s called from %pS with crng_init=%d\n",
- func_name, caller, crng_init);
+ printk_deferred(KERN_NOTICE "random: %s called from %pS "
+ "with crng_init=%d\n", func_name, caller,
+ crng_init);
}
/*
@@ -1931,7 +1748,6 @@ static void __init init_std_data(struct entropy_store *r)
ktime_t now = ktime_get_real();
unsigned long rv;
- r->last_pulled = jiffies;
mix_pool_bytes(r, &now, sizeof(now));
for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
if (!arch_get_random_seed_long(&rv) &&
@@ -1955,7 +1771,6 @@ static void __init init_std_data(struct entropy_store *r)
int __init rand_initialize(void)
{
init_std_data(&input_pool);
- init_std_data(&blocking_pool);
crng_initialize(&primary_crng);
crng_global_init_time = jiffies;
if (ratelimit_disable) {
@@ -1983,40 +1798,15 @@ void rand_initialize_disk(struct gendisk *disk)
#endif
static ssize_t
-_random_read(int nonblock, char __user *buf, size_t nbytes)
+urandom_read_nowarn(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
{
- ssize_t n;
-
- if (nbytes == 0)
- return 0;
-
- nbytes = min_t(size_t, nbytes, SEC_XFER_SIZE);
- while (1) {
- n = extract_entropy_user(&blocking_pool, buf, nbytes);
- if (n < 0)
- return n;
- trace_random_read(n*8, (nbytes-n)*8,
- ENTROPY_BITS(&blocking_pool),
- ENTROPY_BITS(&input_pool));
- if (n > 0)
- return n;
-
- /* Pool is (near) empty. Maybe wait and retry. */
- if (nonblock)
- return -EAGAIN;
-
- wait_event_interruptible(random_read_wait,
- blocking_pool.initialized &&
- (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits));
- if (signal_pending(current))
- return -ERESTARTSYS;
- }
-}
+ int ret;
-static ssize_t
-random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
-{
- return _random_read(file->f_flags & O_NONBLOCK, buf, nbytes);
+ nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
+ ret = extract_crng_user(buf, nbytes);
+ trace_urandom_read(8 * nbytes, 0, ENTROPY_BITS(&input_pool));
+ return ret;
}
static ssize_t
@@ -2024,22 +1814,29 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
{
unsigned long flags;
static int maxwarn = 10;
- int ret;
if (!crng_ready() && maxwarn > 0) {
maxwarn--;
if (__ratelimit(&urandom_warning))
- printk(KERN_NOTICE "random: %s: uninitialized "
- "urandom read (%zd bytes read)\n",
- current->comm, nbytes);
+ pr_notice("%s: uninitialized urandom read (%zd bytes read)\n",
+ current->comm, nbytes);
spin_lock_irqsave(&primary_crng.lock, flags);
crng_init_cnt = 0;
spin_unlock_irqrestore(&primary_crng.lock, flags);
}
- nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
- ret = extract_crng_user(buf, nbytes);
- trace_urandom_read(8 * nbytes, 0, ENTROPY_BITS(&input_pool));
- return ret;
+
+ return urandom_read_nowarn(file, buf, nbytes, ppos);
+}
+
+static ssize_t
+random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
+{
+ int ret;
+
+ ret = wait_for_random_bytes();
+ if (ret != 0)
+ return ret;
+ return urandom_read_nowarn(file, buf, nbytes, ppos);
}
static __poll_t
@@ -2047,10 +1844,10 @@ random_poll(struct file *file, poll_table * wait)
{
__poll_t mask;
- poll_wait(file, &random_read_wait, wait);
+ poll_wait(file, &crng_init_wait, wait);
poll_wait(file, &random_write_wait, wait);
mask = 0;
- if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits)
+ if (crng_ready())
mask |= EPOLLIN | EPOLLRDNORM;
if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
mask |= EPOLLOUT | EPOLLWRNORM;
@@ -2141,7 +1938,6 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
input_pool.entropy_count = 0;
- blocking_pool.entropy_count = 0;
return 0;
case RNDRESEEDCRNG:
if (!capable(CAP_SYS_ADMIN))
@@ -2175,6 +1971,7 @@ const struct file_operations urandom_fops = {
.read = urandom_read,
.write = random_write,
.unlocked_ioctl = random_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.fasync = random_fasync,
.llseek = noop_llseek,
};
@@ -2184,23 +1981,27 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
{
int ret;
- if (flags & ~(GRND_NONBLOCK|GRND_RANDOM))
+ if (flags & ~(GRND_NONBLOCK|GRND_RANDOM|GRND_INSECURE))
+ return -EINVAL;
+
+ /*
+ * Requesting insecure and blocking randomness at the same time makes
+ * no sense.
+ */
+ if ((flags & (GRND_INSECURE|GRND_RANDOM)) == (GRND_INSECURE|GRND_RANDOM))
return -EINVAL;
if (count > INT_MAX)
count = INT_MAX;
- if (flags & GRND_RANDOM)
- return _random_read(flags & GRND_NONBLOCK, buf, count);
-
- if (!crng_ready()) {
+ if (!(flags & GRND_INSECURE) && !crng_ready()) {
if (flags & GRND_NONBLOCK)
return -EAGAIN;
ret = wait_for_random_bytes();
if (unlikely(ret))
return ret;
}
- return urandom_read(NULL, buf, count, NULL);
+ return urandom_read_nowarn(NULL, buf, count, NULL);
}
/********************************************************************
@@ -2213,8 +2014,7 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
#include <linux/sysctl.h>
-static int min_read_thresh = 8, min_write_thresh;
-static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
+static int min_write_thresh;
static int max_write_thresh = INPUT_POOL_WORDS * 32;
static int random_min_urandom_seed = 60;
static char sysctl_bootid[16];
@@ -2290,15 +2090,6 @@ struct ctl_table random_table[] = {
.data = &input_pool.entropy_count,
},
{
- .procname = "read_wakeup_threshold",
- .data = &random_read_wakeup_bits,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &min_read_thresh,
- .extra2 = &max_read_thresh,
- },
- {
.procname = "write_wakeup_threshold",
.data = &random_write_wakeup_bits,
.maxlen = sizeof(int),
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
index 2ec47a69a2a6..87f449340202 100644
--- a/drivers/char/tpm/tpm-dev-common.c
+++ b/drivers/char/tpm/tpm-dev-common.c
@@ -61,6 +61,12 @@ static void tpm_dev_async_work(struct work_struct *work)
mutex_lock(&priv->buffer_mutex);
priv->command_enqueued = false;
+ ret = tpm_try_get_ops(priv->chip);
+ if (ret) {
+ priv->response_length = ret;
+ goto out;
+ }
+
ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer,
sizeof(priv->data_buffer));
tpm_put_ops(priv->chip);
@@ -68,6 +74,7 @@ static void tpm_dev_async_work(struct work_struct *work)
priv->response_length = ret;
mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
}
+out:
mutex_unlock(&priv->buffer_mutex);
wake_up_interruptible(&priv->async_wait);
}
@@ -123,7 +130,7 @@ ssize_t tpm_common_read(struct file *file, char __user *buf,
priv->response_read = true;
ret_size = min_t(ssize_t, size, priv->response_length);
- if (!ret_size) {
+ if (ret_size <= 0) {
priv->response_length = 0;
goto out;
}
@@ -204,6 +211,7 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
if (file->f_flags & O_NONBLOCK) {
priv->command_enqueued = true;
queue_work(tpm_dev_wq, &priv->async_work);
+ tpm_put_ops(priv->chip);
mutex_unlock(&priv->buffer_mutex);
return size;
}
diff --git a/drivers/char/tpm/tpm-dev.h b/drivers/char/tpm/tpm-dev.h
index 1089fc0bb290..f3742bcc73e3 100644
--- a/drivers/char/tpm/tpm-dev.h
+++ b/drivers/char/tpm/tpm-dev.h
@@ -14,7 +14,7 @@ struct file_priv {
struct work_struct timeout_work;
struct work_struct async_work;
wait_queue_head_t async_wait;
- size_t response_length;
+ ssize_t response_length;
bool response_read;
bool command_enqueued;
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
index 3b53b3e5ec3e..d52bf4df0bca 100644
--- a/drivers/char/tpm/tpm-sysfs.c
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -310,7 +310,17 @@ static ssize_t timeouts_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(timeouts);
-static struct attribute *tpm_dev_attrs[] = {
+static ssize_t tpm_version_major_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tpm_chip *chip = to_tpm_chip(dev);
+
+ return sprintf(buf, "%s\n", chip->flags & TPM_CHIP_FLAG_TPM2
+ ? "2" : "1");
+}
+static DEVICE_ATTR_RO(tpm_version_major);
+
+static struct attribute *tpm1_dev_attrs[] = {
&dev_attr_pubek.attr,
&dev_attr_pcrs.attr,
&dev_attr_enabled.attr,
@@ -321,18 +331,28 @@ static struct attribute *tpm_dev_attrs[] = {
&dev_attr_cancel.attr,
&dev_attr_durations.attr,
&dev_attr_timeouts.attr,
+ &dev_attr_tpm_version_major.attr,
NULL,
};
-static const struct attribute_group tpm_dev_group = {
- .attrs = tpm_dev_attrs,
+static struct attribute *tpm2_dev_attrs[] = {
+ &dev_attr_tpm_version_major.attr,
+ NULL
+};
+
+static const struct attribute_group tpm1_dev_group = {
+ .attrs = tpm1_dev_attrs,
+};
+
+static const struct attribute_group tpm2_dev_group = {
+ .attrs = tpm2_dev_attrs,
};
void tpm_sysfs_add_device(struct tpm_chip *chip)
{
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
- return;
-
WARN_ON(chip->groups_cnt != 0);
- chip->groups[chip->groups_cnt++] = &tpm_dev_group;
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ chip->groups[chip->groups_cnt++] = &tpm2_dev_group;
+ else
+ chip->groups[chip->groups_cnt++] = &tpm1_dev_group;
}
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index b9e1547be6b5..5620747da0cf 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -218,7 +218,6 @@ int tpm2_pcr_read(struct tpm_chip *chip, u32 pcr_idx,
int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
struct tpm_digest *digests);
int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max);
-void tpm2_flush_context(struct tpm_chip *chip, u32 handle);
ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id,
u32 *value, const char *desc);
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index fdb457704aa7..13696deceae8 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -362,6 +362,7 @@ void tpm2_flush_context(struct tpm_chip *chip, u32 handle)
tpm_transmit_cmd(chip, &buf, 0, "flushing context");
tpm_buf_destroy(&buf);
}
+EXPORT_SYMBOL_GPL(tpm2_flush_context);
struct tpm2_get_cap_out {
u8 more_data;
diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c
index 6640a14dbe48..22bf553ccf9d 100644
--- a/drivers/char/tpm/tpm_ftpm_tee.c
+++ b/drivers/char/tpm/tpm_ftpm_tee.c
@@ -32,7 +32,7 @@ static const uuid_t ftpm_ta_uuid =
0x82, 0xCB, 0x34, 0x3F, 0xB7, 0xF3, 0x78, 0x96);
/**
- * ftpm_tee_tpm_op_recv - retrieve fTPM response.
+ * ftpm_tee_tpm_op_recv() - retrieve fTPM response.
* @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h.
* @buf: the buffer to store data.
* @count: the number of bytes to read.
@@ -61,7 +61,7 @@ static int ftpm_tee_tpm_op_recv(struct tpm_chip *chip, u8 *buf, size_t count)
}
/**
- * ftpm_tee_tpm_op_send - send TPM commands through the TEE shared memory.
+ * ftpm_tee_tpm_op_send() - send TPM commands through the TEE shared memory.
* @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h
* @buf: the buffer to send.
* @len: the number of bytes to send.
@@ -208,7 +208,7 @@ static int ftpm_tee_match(struct tee_ioctl_version_data *ver, const void *data)
}
/**
- * ftpm_tee_probe - initialize the fTPM
+ * ftpm_tee_probe() - initialize the fTPM
* @pdev: the platform_device description.
*
* Return:
@@ -298,7 +298,7 @@ out_tee_session:
}
/**
- * ftpm_tee_remove - remove the TPM device
+ * ftpm_tee_remove() - remove the TPM device
* @pdev: the platform_device description.
*
* Return:
@@ -328,6 +328,19 @@ static int ftpm_tee_remove(struct platform_device *pdev)
return 0;
}
+/**
+ * ftpm_tee_shutdown() - shutdown the TPM device
+ * @pdev: the platform_device description.
+ */
+static void ftpm_tee_shutdown(struct platform_device *pdev)
+{
+ struct ftpm_tee_private *pvt_data = dev_get_drvdata(&pdev->dev);
+
+ tee_shm_free(pvt_data->shm);
+ tee_client_close_session(pvt_data->ctx, pvt_data->session);
+ tee_client_close_context(pvt_data->ctx);
+}
+
static const struct of_device_id of_ftpm_tee_ids[] = {
{ .compatible = "microsoft,ftpm" },
{ }
@@ -341,6 +354,7 @@ static struct platform_driver ftpm_tee_driver = {
},
.probe = ftpm_tee_probe,
.remove = ftpm_tee_remove,
+ .shutdown = ftpm_tee_shutdown,
};
module_platform_driver(ftpm_tee_driver);
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 8af2cee1a762..27c6ca031e23 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -1059,8 +1059,6 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
goto out_err;
}
- tpm_chip_start(chip);
- chip->flags |= TPM_CHIP_FLAG_IRQ;
if (irq) {
tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
irq);
@@ -1070,7 +1068,6 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
} else {
tpm_tis_probe_irq(chip, intmask);
}
- tpm_chip_stop(chip);
}
rc = tpm_chip_register(chip);
diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
index 4f24e46ebe7c..56db949a7b70 100644
--- a/drivers/char/ttyprintk.c
+++ b/drivers/char/ttyprintk.c
@@ -15,10 +15,11 @@
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/module.h>
+#include <linux/spinlock.h>
struct ttyprintk_port {
struct tty_port port;
- struct mutex port_write_mutex;
+ spinlock_t spinlock;
};
static struct ttyprintk_port tpk_port;
@@ -99,11 +100,12 @@ static int tpk_open(struct tty_struct *tty, struct file *filp)
static void tpk_close(struct tty_struct *tty, struct file *filp)
{
struct ttyprintk_port *tpkp = tty->driver_data;
+ unsigned long flags;
- mutex_lock(&tpkp->port_write_mutex);
+ spin_lock_irqsave(&tpkp->spinlock, flags);
/* flush tpk_printk buffer */
tpk_printk(NULL, 0);
- mutex_unlock(&tpkp->port_write_mutex);
+ spin_unlock_irqrestore(&tpkp->spinlock, flags);
tty_port_close(&tpkp->port, tty, filp);
}
@@ -115,13 +117,14 @@ static int tpk_write(struct tty_struct *tty,
const unsigned char *buf, int count)
{
struct ttyprintk_port *tpkp = tty->driver_data;
+ unsigned long flags;
int ret;
/* exclusive use of tpk_printk within this tty */
- mutex_lock(&tpkp->port_write_mutex);
+ spin_lock_irqsave(&tpkp->spinlock, flags);
ret = tpk_printk(buf, count);
- mutex_unlock(&tpkp->port_write_mutex);
+ spin_unlock_irqrestore(&tpkp->spinlock, flags);
return ret;
}
@@ -171,7 +174,7 @@ static int __init ttyprintk_init(void)
{
int ret = -ENOMEM;
- mutex_init(&tpk_port.port_write_mutex);
+ spin_lock_init(&tpk_port.spinlock);
ttyprintk_driver = tty_alloc_driver(1,
TTY_DRIVER_RESET_TERMIOS |
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 45653a0e6ecd..bcb257baed06 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -27,7 +27,7 @@ config COMMON_CLK_WM831X
tristate "Clock driver for WM831x/2x PMICs"
depends on MFD_WM831X
---help---
- Supports the clocking subsystem of the WM831x/2x series of
+ Supports the clocking subsystem of the WM831x/2x series of
PMICs from Wolfson Microelectronics.
source "drivers/clk/versatile/Kconfig"
@@ -174,6 +174,18 @@ config COMMON_CLK_CS2000_CP
help
If you say yes here you get support for the CS2000 clock multiplier.
+config COMMON_CLK_FSL_SAI
+ bool "Clock driver for BCLK of Freescale SAI cores"
+ depends on ARCH_LAYERSCAPE || COMPILE_TEST
+ help
+ This driver supports the Freescale SAI (Synchronous Audio Interface)
+ to be used as a generic clock output. Some SoCs have restrictions
+ regarding the possible pin multiplexer settings. Eg. on some SoCs
+ two SAI interfaces can only be enabled together. If just one is
+ needed, the BCLK pin of the second one can be used as general
+ purpose clock output. Ideally, it can be used to drive an audio
+ codec (sometimes known as MCLK).
+
config COMMON_CLK_GEMINI
bool "Clock driver for Cortina Systems Gemini SoC"
depends on ARCH_GEMINI || COMPILE_TEST
@@ -225,6 +237,16 @@ config CLK_QORIQ
This adds the clock driver support for Freescale QorIQ platforms
using common clock framework.
+config CLK_LS1028A_PLLDIG
+ tristate "Clock driver for LS1028A Display output"
+ depends on ARCH_LAYERSCAPE || COMPILE_TEST
+ default ARCH_LAYERSCAPE
+ help
+ This driver support the Display output interfaces(LCD, DPHY) pixel clocks
+ of the QorIQ Layerscape LS1028A, as implemented TSMC CLN28HPM PLL. Not all
+ features of the PLL are currently supported by the driver. By default,
+ configured bypass mode with this PLL.
+
config COMMON_CLK_XGENE
bool "Clock driver for APM XGene SoC"
default ARCH_XGENE
@@ -305,10 +327,10 @@ config COMMON_CLK_MMP2
Support for Marvell MMP2 and MMP3 SoC clocks
config COMMON_CLK_BD718XX
- tristate "Clock driver for ROHM BD718x7 PMIC"
- depends on MFD_ROHM_BD718XX || MFD_ROHM_BD70528
+ tristate "Clock driver for 32K clk gates on ROHM PMICs"
+ depends on MFD_ROHM_BD718XX || MFD_ROHM_BD70528 || MFD_ROHM_BD71828
help
- This driver supports ROHM BD71837, ROHM BD71847 and
+ This driver supports ROHM BD71837, ROHM BD71847, ROHM BD71828 and
ROHM BD70528 PMICs clock gates.
config COMMON_CLK_FIXED_MMIO
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 0696a0c1ab58..f4169cc2fd31 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_ARCH_CLPS711X) += clk-clps711x.o
obj-$(CONFIG_COMMON_CLK_CS2000_CP) += clk-cs2000-cp.o
obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o
obj-$(CONFIG_COMMON_CLK_FIXED_MMIO) += clk-fixed-mmio.o
+obj-$(CONFIG_COMMON_CLK_FSL_SAI) += clk-fsl-sai.o
obj-$(CONFIG_COMMON_CLK_GEMINI) += clk-gemini.o
obj-$(CONFIG_COMMON_CLK_ASPEED) += clk-aspeed.o
obj-$(CONFIG_MACH_ASPEED_G6) += clk-ast2600.o
@@ -44,6 +45,7 @@ obj-$(CONFIG_ARCH_NPCM7XX) += clk-npcm7xx.o
obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o
obj-$(CONFIG_COMMON_CLK_OXNAS) += clk-oxnas.o
obj-$(CONFIG_COMMON_CLK_PALMAS) += clk-palmas.o
+obj-$(CONFIG_CLK_LS1028A_PLLDIG) += clk-plldig.o
obj-$(CONFIG_COMMON_CLK_PWM) += clk-pwm.o
obj-$(CONFIG_CLK_QORIQ) += clk-qoriq.o
obj-$(CONFIG_COMMON_CLK_RK808) += clk-rk808.o
diff --git a/drivers/clk/at91/at91sam9260.c b/drivers/clk/at91/at91sam9260.c
index 0aabe49aed09..a9d4234758d7 100644
--- a/drivers/clk/at91/at91sam9260.c
+++ b/drivers/clk/at91/at91sam9260.c
@@ -348,7 +348,7 @@ static void __init at91sam926x_pmc_setup(struct device_node *np,
return;
mainxtal_name = of_clk_get_parent_name(np, i);
- regmap = syscon_node_to_regmap(np);
+ regmap = device_node_to_regmap(np);
if (IS_ERR(regmap))
return;
diff --git a/drivers/clk/at91/at91sam9rl.c b/drivers/clk/at91/at91sam9rl.c
index 0ac34cdaa106..77fe83a73bf4 100644
--- a/drivers/clk/at91/at91sam9rl.c
+++ b/drivers/clk/at91/at91sam9rl.c
@@ -83,7 +83,7 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np)
return;
mainxtal_name = of_clk_get_parent_name(np, i);
- regmap = syscon_node_to_regmap(np);
+ regmap = device_node_to_regmap(np);
if (IS_ERR(regmap))
return;
diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c
index 0855f3a80cc7..086cf0b4955c 100644
--- a/drivers/clk/at91/at91sam9x5.c
+++ b/drivers/clk/at91/at91sam9x5.c
@@ -146,7 +146,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
return;
mainxtal_name = of_clk_get_parent_name(np, i);
- regmap = syscon_node_to_regmap(np);
+ regmap = device_node_to_regmap(np);
if (IS_ERR(regmap))
return;
diff --git a/drivers/clk/at91/clk-sam9x60-pll.c b/drivers/clk/at91/clk-sam9x60-pll.c
index 34b817825b22..dfb354a5ff18 100644
--- a/drivers/clk/at91/clk-sam9x60-pll.c
+++ b/drivers/clk/at91/clk-sam9x60-pll.c
@@ -25,7 +25,8 @@
#define PMC_PLL_CTRL1_MUL_MSK GENMASK(30, 24)
#define PMC_PLL_ACR 0x18
-#define PMC_PLL_ACR_DEFAULT 0x1b040010UL
+#define PMC_PLL_ACR_DEFAULT_UPLL 0x12020010UL
+#define PMC_PLL_ACR_DEFAULT_PLLA 0x00020010UL
#define PMC_PLL_ACR_UTMIVR BIT(12)
#define PMC_PLL_ACR_UTMIBG BIT(13)
#define PMC_PLL_ACR_LOOP_FILTER_MSK GENMASK(31, 24)
@@ -88,7 +89,10 @@ static int sam9x60_pll_prepare(struct clk_hw *hw)
}
/* Recommended value for PMC_PLL_ACR */
- val = PMC_PLL_ACR_DEFAULT;
+ if (pll->characteristics->upll)
+ val = PMC_PLL_ACR_DEFAULT_UPLL;
+ else
+ val = PMC_PLL_ACR_DEFAULT_PLLA;
regmap_write(regmap, PMC_PLL_ACR, val);
regmap_write(regmap, PMC_PLL_CTRL1,
diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c
index 0b03cfae3a9d..b71515acdec1 100644
--- a/drivers/clk/at91/pmc.c
+++ b/drivers/clk/at91/pmc.c
@@ -275,7 +275,7 @@ static int __init pmc_register_ops(void)
np = of_find_matching_node(NULL, sama5d2_pmc_dt_ids);
- pmcreg = syscon_node_to_regmap(np);
+ pmcreg = device_node_to_regmap(np);
if (IS_ERR(pmcreg))
return PTR_ERR(pmcreg);
diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c
index 86238d5ecb4d..77398aefeb6d 100644
--- a/drivers/clk/at91/sam9x60.c
+++ b/drivers/clk/at91/sam9x60.c
@@ -47,6 +47,7 @@ static const struct clk_programmable_layout sam9x60_programmable_layout = {
.pres_shift = 8,
.css_mask = 0x1f,
.have_slck_mck = 0,
+ .is_pres_direct = 1,
};
static const struct clk_pcr_layout sam9x60_pcr_layout = {
diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c
index 0de1108737db..ff7e3f727082 100644
--- a/drivers/clk/at91/sama5d2.c
+++ b/drivers/clk/at91/sama5d2.c
@@ -162,7 +162,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
return;
mainxtal_name = of_clk_get_parent_name(np, i);
- regmap = syscon_node_to_regmap(np);
+ regmap = device_node_to_regmap(np);
if (IS_ERR(regmap))
return;
diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c
index 25b156d4e645..a6dee4a3b6e4 100644
--- a/drivers/clk/at91/sama5d4.c
+++ b/drivers/clk/at91/sama5d4.c
@@ -136,7 +136,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
return;
mainxtal_name = of_clk_get_parent_name(np, i);
- regmap = syscon_node_to_regmap(np);
+ regmap = device_node_to_regmap(np);
if (IS_ERR(regmap))
return;
diff --git a/drivers/clk/clk-asm9260.c b/drivers/clk/clk-asm9260.c
index dd0f90c9dd0e..536b59aabd2c 100644
--- a/drivers/clk/clk-asm9260.c
+++ b/drivers/clk/clk-asm9260.c
@@ -260,7 +260,6 @@ static void __init asm9260_acc_init(struct device_node *np)
const char *ref_clk, *pll_clk = "pll";
u32 rate;
int n;
- u32 accuracy = 0;
clk_data = kzalloc(struct_size(clk_data, hws, MAX_CLKS), GFP_KERNEL);
if (!clk_data)
@@ -275,10 +274,11 @@ static void __init asm9260_acc_init(struct device_node *np)
/* register pll */
rate = (ioread32(base + HW_SYSPLLCTRL) & 0xffff) * 1000000;
+ /* TODO: Convert to DT parent scheme */
ref_clk = of_clk_get_parent_name(np, 0);
- accuracy = clk_get_accuracy(__clk_lookup(ref_clk));
- hw = clk_hw_register_fixed_rate_with_accuracy(NULL, pll_clk,
- ref_clk, 0, rate, accuracy);
+ hw = __clk_hw_register_fixed_rate_with_accuracy(NULL, NULL, pll_clk,
+ ref_clk, NULL, NULL, 0, rate, 0,
+ CLK_FIXED_RATE_PARENT_ACCURACY);
if (IS_ERR(hw))
panic("%pOFn: can't register REFCLK. Check DT!", np);
diff --git a/drivers/clk/clk-bd718x7.c b/drivers/clk/clk-bd718x7.c
index 00926c587390..b52e8d6f660c 100644
--- a/drivers/clk/clk-bd718x7.c
+++ b/drivers/clk/clk-bd718x7.c
@@ -7,12 +7,25 @@
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/mfd/rohm-bd718x7.h>
-#include <linux/mfd/rohm-bd70528.h>
+#include <linux/mfd/rohm-generic.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/regmap.h>
+/* clk control registers */
+/* BD70528 */
+#define BD70528_REG_OUT32K 0x2c
+/* BD71828 */
+#define BD71828_REG_OUT32K 0x4B
+/* BD71837 and BD71847 */
+#define BD718XX_REG_OUT32K 0x2E
+
+/*
+ * BD71837, BD71847, BD70528 and BD71828 all use bit [0] to clk output control
+ */
+#define CLK_OUT_EN_MASK BIT(0)
+
+
struct bd718xx_clk {
struct clk_hw hw;
u8 reg;
@@ -21,10 +34,8 @@ struct bd718xx_clk {
struct rohm_regmap_dev *mfd;
};
-static int bd71837_clk_set(struct clk_hw *hw, int status)
+static int bd71837_clk_set(struct bd718xx_clk *c, unsigned int status)
{
- struct bd718xx_clk *c = container_of(hw, struct bd718xx_clk, hw);
-
return regmap_update_bits(c->mfd->regmap, c->reg, c->mask, status);
}
@@ -33,14 +44,16 @@ static void bd71837_clk_disable(struct clk_hw *hw)
int rv;
struct bd718xx_clk *c = container_of(hw, struct bd718xx_clk, hw);
- rv = bd71837_clk_set(hw, 0);
+ rv = bd71837_clk_set(c, 0);
if (rv)
dev_dbg(&c->pdev->dev, "Failed to disable 32K clk (%d)\n", rv);
}
static int bd71837_clk_enable(struct clk_hw *hw)
{
- return bd71837_clk_set(hw, 1);
+ struct bd718xx_clk *c = container_of(hw, struct bd718xx_clk, hw);
+
+ return bd71837_clk_set(c, 0xffffffff);
}
static int bd71837_clk_is_enabled(struct clk_hw *hw)
@@ -74,6 +87,7 @@ static int bd71837_clk_probe(struct platform_device *pdev)
.name = "bd718xx-32k-out",
.ops = &bd71837_clk_ops,
};
+ enum rohm_chip_type chip = platform_get_device_id(pdev)->driver_data;
c = devm_kzalloc(&pdev->dev, sizeof(*c), GFP_KERNEL);
if (!c)
@@ -87,15 +101,19 @@ static int bd71837_clk_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "No parent clk found\n");
return -EINVAL;
}
- switch (mfd->chip_type) {
+ switch (chip) {
case ROHM_CHIP_TYPE_BD71837:
case ROHM_CHIP_TYPE_BD71847:
c->reg = BD718XX_REG_OUT32K;
- c->mask = BD718XX_OUT32K_EN;
+ c->mask = CLK_OUT_EN_MASK;
+ break;
+ case ROHM_CHIP_TYPE_BD71828:
+ c->reg = BD71828_REG_OUT32K;
+ c->mask = CLK_OUT_EN_MASK;
break;
case ROHM_CHIP_TYPE_BD70528:
- c->reg = BD70528_REG_CLK_OUT;
- c->mask = BD70528_CLK_OUT_EN_MASK;
+ c->reg = BD70528_REG_OUT32K;
+ c->mask = CLK_OUT_EN_MASK;
break;
default:
dev_err(&pdev->dev, "Unknown clk chip\n");
@@ -121,11 +139,21 @@ static int bd71837_clk_probe(struct platform_device *pdev)
return rval;
}
+static const struct platform_device_id bd718x7_clk_id[] = {
+ { "bd71837-clk", ROHM_CHIP_TYPE_BD71837 },
+ { "bd71847-clk", ROHM_CHIP_TYPE_BD71847 },
+ { "bd70528-clk", ROHM_CHIP_TYPE_BD70528 },
+ { "bd71828-clk", ROHM_CHIP_TYPE_BD71828 },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, bd718x7_clk_id);
+
static struct platform_driver bd71837_clk = {
.driver = {
.name = "bd718xx-clk",
},
.probe = bd71837_clk_probe,
+ .id_table = bd718x7_clk_id,
};
module_platform_driver(bd71837_clk);
diff --git a/drivers/clk/clk-bm1880.c b/drivers/clk/clk-bm1880.c
index 4cd175afce9b..e6d6599d310a 100644
--- a/drivers/clk/clk-bm1880.c
+++ b/drivers/clk/clk-bm1880.c
@@ -474,11 +474,10 @@ static struct bm1880_composite_clock bm1880_composite_clks[] = {
static unsigned long bm1880_pll_rate_calc(u32 regval, unsigned long parent_rate)
{
u64 numerator;
- u32 fbdiv, fref, refdiv;
+ u32 fbdiv, refdiv;
u32 postdiv1, postdiv2, denominator;
fbdiv = (regval >> 16) & 0xfff;
- fref = parent_rate;
refdiv = regval & 0x1f;
postdiv1 = (regval >> 8) & 0x7;
postdiv2 = (regval >> 12) & 0x7;
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
index 3e9c3e608769..7376f573bfdb 100644
--- a/drivers/clk/clk-composite.c
+++ b/drivers/clk/clk-composite.c
@@ -199,8 +199,9 @@ static void clk_composite_disable(struct clk_hw *hw)
gate_ops->disable(gate_hw);
}
-struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name,
- const char * const *parent_names, int num_parents,
+static struct clk_hw *__clk_hw_register_composite(struct device *dev,
+ const char *name, const char * const *parent_names,
+ const struct clk_parent_data *pdata, int num_parents,
struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
@@ -218,7 +219,10 @@ struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name,
init.name = name;
init.flags = flags;
- init.parent_names = parent_names;
+ if (parent_names)
+ init.parent_names = parent_names;
+ else
+ init.parent_data = pdata;
init.num_parents = num_parents;
hw = &composite->hw;
@@ -312,6 +316,34 @@ err:
return hw;
}
+struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name,
+ const char * const *parent_names, int num_parents,
+ struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
+ struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
+ struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
+ unsigned long flags)
+{
+ return __clk_hw_register_composite(dev, name, parent_names, NULL,
+ num_parents, mux_hw, mux_ops,
+ rate_hw, rate_ops, gate_hw,
+ gate_ops, flags);
+}
+
+struct clk_hw *clk_hw_register_composite_pdata(struct device *dev,
+ const char *name,
+ const struct clk_parent_data *parent_data,
+ int num_parents,
+ struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
+ struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
+ struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
+ unsigned long flags)
+{
+ return __clk_hw_register_composite(dev, name, NULL, parent_data,
+ num_parents, mux_hw, mux_ops,
+ rate_hw, rate_ops, gate_hw,
+ gate_ops, flags);
+}
+
struct clk *clk_register_composite(struct device *dev, const char *name,
const char * const *parent_names, int num_parents,
struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
@@ -329,6 +361,24 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
return hw->clk;
}
+struct clk *clk_register_composite_pdata(struct device *dev, const char *name,
+ const struct clk_parent_data *parent_data,
+ int num_parents,
+ struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
+ struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
+ struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
+ unsigned long flags)
+{
+ struct clk_hw *hw;
+
+ hw = clk_hw_register_composite_pdata(dev, name, parent_data,
+ num_parents, mux_hw, mux_ops, rate_hw, rate_ops,
+ gate_hw, gate_ops, flags);
+ if (IS_ERR(hw))
+ return ERR_CAST(hw);
+ return hw->clk;
+}
+
void clk_unregister_composite(struct clk *clk)
{
struct clk_composite *composite;
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 098b2b01f0af..8de12cb0c43d 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -463,11 +463,12 @@ const struct clk_ops clk_divider_ro_ops = {
};
EXPORT_SYMBOL_GPL(clk_divider_ro_ops);
-static struct clk_hw *_register_divider(struct device *dev, const char *name,
- const char *parent_name, unsigned long flags,
- void __iomem *reg, u8 shift, u8 width,
- u8 clk_divider_flags, const struct clk_div_table *table,
- spinlock_t *lock)
+struct clk_hw *__clk_hw_register_divider(struct device *dev,
+ struct device_node *np, const char *name,
+ const char *parent_name, const struct clk_hw *parent_hw,
+ const struct clk_parent_data *parent_data, unsigned long flags,
+ void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags,
+ const struct clk_div_table *table, spinlock_t *lock)
{
struct clk_divider *div;
struct clk_hw *hw;
@@ -514,55 +515,7 @@ static struct clk_hw *_register_divider(struct device *dev, const char *name,
return hw;
}
-
-/**
- * clk_register_divider - register a divider clock with the clock framework
- * @dev: device registering this clock
- * @name: name of this clock
- * @parent_name: name of clock's parent
- * @flags: framework-specific flags
- * @reg: register address to adjust divider
- * @shift: number of bits to shift the bitfield
- * @width: width of the bitfield
- * @clk_divider_flags: divider-specific flags for this clock
- * @lock: shared register lock for this clock
- */
-struct clk *clk_register_divider(struct device *dev, const char *name,
- const char *parent_name, unsigned long flags,
- void __iomem *reg, u8 shift, u8 width,
- u8 clk_divider_flags, spinlock_t *lock)
-{
- struct clk_hw *hw;
-
- hw = _register_divider(dev, name, parent_name, flags, reg, shift,
- width, clk_divider_flags, NULL, lock);
- if (IS_ERR(hw))
- return ERR_CAST(hw);
- return hw->clk;
-}
-EXPORT_SYMBOL_GPL(clk_register_divider);
-
-/**
- * clk_hw_register_divider - register a divider clock with the clock framework
- * @dev: device registering this clock
- * @name: name of this clock
- * @parent_name: name of clock's parent
- * @flags: framework-specific flags
- * @reg: register address to adjust divider
- * @shift: number of bits to shift the bitfield
- * @width: width of the bitfield
- * @clk_divider_flags: divider-specific flags for this clock
- * @lock: shared register lock for this clock
- */
-struct clk_hw *clk_hw_register_divider(struct device *dev, const char *name,
- const char *parent_name, unsigned long flags,
- void __iomem *reg, u8 shift, u8 width,
- u8 clk_divider_flags, spinlock_t *lock)
-{
- return _register_divider(dev, name, parent_name, flags, reg, shift,
- width, clk_divider_flags, NULL, lock);
-}
-EXPORT_SYMBOL_GPL(clk_hw_register_divider);
+EXPORT_SYMBOL_GPL(__clk_hw_register_divider);
/**
* clk_register_divider_table - register a table based divider clock with
@@ -586,39 +539,15 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name,
{
struct clk_hw *hw;
- hw = _register_divider(dev, name, parent_name, flags, reg, shift,
- width, clk_divider_flags, table, lock);
+ hw = __clk_hw_register_divider(dev, NULL, name, parent_name, NULL,
+ NULL, flags, reg, shift, width, clk_divider_flags,
+ table, lock);
if (IS_ERR(hw))
return ERR_CAST(hw);
return hw->clk;
}
EXPORT_SYMBOL_GPL(clk_register_divider_table);
-/**
- * clk_hw_register_divider_table - register a table based divider clock with
- * the clock framework
- * @dev: device registering this clock
- * @name: name of this clock
- * @parent_name: name of clock's parent
- * @flags: framework-specific flags
- * @reg: register address to adjust divider
- * @shift: number of bits to shift the bitfield
- * @width: width of the bitfield
- * @clk_divider_flags: divider-specific flags for this clock
- * @table: array of divider/value pairs ending with a div set to 0
- * @lock: shared register lock for this clock
- */
-struct clk_hw *clk_hw_register_divider_table(struct device *dev,
- const char *name, const char *parent_name, unsigned long flags,
- void __iomem *reg, u8 shift, u8 width,
- u8 clk_divider_flags, const struct clk_div_table *table,
- spinlock_t *lock)
-{
- return _register_divider(dev, name, parent_name, flags, reg, shift,
- width, clk_divider_flags, table, lock);
-}
-EXPORT_SYMBOL_GPL(clk_hw_register_divider_table);
-
void clk_unregister_divider(struct clk *clk)
{
struct clk_divider *div;
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index 2c4486c09040..77499a27c8fb 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -24,6 +24,8 @@
* parent - fixed parent. No clk_set_parent support
*/
+#define to_clk_fixed_rate(_hw) container_of(_hw, struct clk_fixed_rate, hw)
+
static unsigned long clk_fixed_rate_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -33,7 +35,12 @@ static unsigned long clk_fixed_rate_recalc_rate(struct clk_hw *hw,
static unsigned long clk_fixed_rate_recalc_accuracy(struct clk_hw *hw,
unsigned long parent_accuracy)
{
- return to_clk_fixed_rate(hw)->fixed_accuracy;
+ struct clk_fixed_rate *fixed = to_clk_fixed_rate(hw);
+
+ if (fixed->flags & CLK_FIXED_RATE_PARENT_ACCURACY)
+ return parent_accuracy;
+
+ return fixed->fixed_accuracy;
}
const struct clk_ops clk_fixed_rate_ops = {
@@ -42,24 +49,17 @@ const struct clk_ops clk_fixed_rate_ops = {
};
EXPORT_SYMBOL_GPL(clk_fixed_rate_ops);
-/**
- * clk_hw_register_fixed_rate_with_accuracy - register fixed-rate clock with
- * the clock framework
- * @dev: device that is registering this clock
- * @name: name of this clock
- * @parent_name: name of clock's parent
- * @flags: framework-specific flags
- * @fixed_rate: non-adjustable clock rate
- * @fixed_accuracy: non-adjustable clock rate
- */
-struct clk_hw *clk_hw_register_fixed_rate_with_accuracy(struct device *dev,
- const char *name, const char *parent_name, unsigned long flags,
- unsigned long fixed_rate, unsigned long fixed_accuracy)
+struct clk_hw *__clk_hw_register_fixed_rate(struct device *dev,
+ struct device_node *np, const char *name,
+ const char *parent_name, const struct clk_hw *parent_hw,
+ const struct clk_parent_data *parent_data, unsigned long flags,
+ unsigned long fixed_rate, unsigned long fixed_accuracy,
+ unsigned long clk_fixed_flags)
{
struct clk_fixed_rate *fixed;
struct clk_hw *hw;
struct clk_init_data init = {};
- int ret;
+ int ret = -EINVAL;
/* allocate fixed-rate clock */
fixed = kzalloc(sizeof(*fixed), GFP_KERNEL);
@@ -69,17 +69,26 @@ struct clk_hw *clk_hw_register_fixed_rate_with_accuracy(struct device *dev,
init.name = name;
init.ops = &clk_fixed_rate_ops;
init.flags = flags;
- init.parent_names = (parent_name ? &parent_name: NULL);
- init.num_parents = (parent_name ? 1 : 0);
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.parent_hws = parent_hw ? &parent_hw : NULL;
+ init.parent_data = parent_data;
+ if (parent_name || parent_hw || parent_data)
+ init.num_parents = 1;
+ else
+ init.num_parents = 0;
/* struct clk_fixed_rate assignments */
+ fixed->flags = clk_fixed_flags;
fixed->fixed_rate = fixed_rate;
fixed->fixed_accuracy = fixed_accuracy;
fixed->hw.init = &init;
/* register the clock */
hw = &fixed->hw;
- ret = clk_hw_register(dev, hw);
+ if (dev || !np)
+ ret = clk_hw_register(dev, hw);
+ else if (np)
+ ret = of_clk_hw_register(np, hw);
if (ret) {
kfree(fixed);
hw = ERR_PTR(ret);
@@ -87,47 +96,20 @@ struct clk_hw *clk_hw_register_fixed_rate_with_accuracy(struct device *dev,
return hw;
}
-EXPORT_SYMBOL_GPL(clk_hw_register_fixed_rate_with_accuracy);
+EXPORT_SYMBOL_GPL(__clk_hw_register_fixed_rate);
-struct clk *clk_register_fixed_rate_with_accuracy(struct device *dev,
- const char *name, const char *parent_name, unsigned long flags,
- unsigned long fixed_rate, unsigned long fixed_accuracy)
+struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ unsigned long fixed_rate)
{
struct clk_hw *hw;
hw = clk_hw_register_fixed_rate_with_accuracy(dev, name, parent_name,
- flags, fixed_rate, fixed_accuracy);
+ flags, fixed_rate, 0);
if (IS_ERR(hw))
return ERR_CAST(hw);
return hw->clk;
}
-EXPORT_SYMBOL_GPL(clk_register_fixed_rate_with_accuracy);
-
-/**
- * clk_hw_register_fixed_rate - register fixed-rate clock with the clock
- * framework
- * @dev: device that is registering this clock
- * @name: name of this clock
- * @parent_name: name of clock's parent
- * @flags: framework-specific flags
- * @fixed_rate: non-adjustable clock rate
- */
-struct clk_hw *clk_hw_register_fixed_rate(struct device *dev, const char *name,
- const char *parent_name, unsigned long flags,
- unsigned long fixed_rate)
-{
- return clk_hw_register_fixed_rate_with_accuracy(dev, name, parent_name,
- flags, fixed_rate, 0);
-}
-EXPORT_SYMBOL_GPL(clk_hw_register_fixed_rate);
-
-struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
- const char *parent_name, unsigned long flags,
- unsigned long fixed_rate)
-{
- return clk_register_fixed_rate_with_accuracy(dev, name, parent_name,
- flags, fixed_rate, 0);
-}
EXPORT_SYMBOL_GPL(clk_register_fixed_rate);
void clk_unregister_fixed_rate(struct clk *clk)
@@ -155,9 +137,9 @@ void clk_hw_unregister_fixed_rate(struct clk_hw *hw)
EXPORT_SYMBOL_GPL(clk_hw_unregister_fixed_rate);
#ifdef CONFIG_OF
-static struct clk *_of_fixed_clk_setup(struct device_node *node)
+static struct clk_hw *_of_fixed_clk_setup(struct device_node *node)
{
- struct clk *clk;
+ struct clk_hw *hw;
const char *clk_name = node->name;
u32 rate;
u32 accuracy = 0;
@@ -170,18 +152,18 @@ static struct clk *_of_fixed_clk_setup(struct device_node *node)
of_property_read_string(node, "clock-output-names", &clk_name);
- clk = clk_register_fixed_rate_with_accuracy(NULL, clk_name, NULL,
+ hw = clk_hw_register_fixed_rate_with_accuracy(NULL, clk_name, NULL,
0, rate, accuracy);
- if (IS_ERR(clk))
- return clk;
+ if (IS_ERR(hw))
+ return hw;
- ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw);
if (ret) {
- clk_unregister(clk);
+ clk_hw_unregister_fixed_rate(hw);
return ERR_PTR(ret);
}
- return clk;
+ return hw;
}
/**
@@ -195,27 +177,27 @@ CLK_OF_DECLARE(fixed_clk, "fixed-clock", of_fixed_clk_setup);
static int of_fixed_clk_remove(struct platform_device *pdev)
{
- struct clk *clk = platform_get_drvdata(pdev);
+ struct clk_hw *hw = platform_get_drvdata(pdev);
of_clk_del_provider(pdev->dev.of_node);
- clk_unregister_fixed_rate(clk);
+ clk_hw_unregister_fixed_rate(hw);
return 0;
}
static int of_fixed_clk_probe(struct platform_device *pdev)
{
- struct clk *clk;
+ struct clk_hw *hw;
/*
* This function is not executed when of_fixed_clk_setup
* succeeded.
*/
- clk = _of_fixed_clk_setup(pdev->dev.of_node);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ hw = _of_fixed_clk_setup(pdev->dev.of_node);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
- platform_set_drvdata(pdev, clk);
+ platform_set_drvdata(pdev, hw);
return 0;
}
@@ -224,7 +206,6 @@ static const struct of_device_id of_fixed_clk_ids[] = {
{ .compatible = "fixed-clock" },
{ }
};
-MODULE_DEVICE_TABLE(of, of_fixed_clk_ids);
static struct platform_driver of_fixed_clk_driver = {
.driver = {
diff --git a/drivers/clk/clk-fsl-sai.c b/drivers/clk/clk-fsl-sai.c
new file mode 100644
index 000000000000..0221180a4dd7
--- /dev/null
+++ b/drivers/clk/clk-fsl-sai.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Freescale SAI BCLK as a generic clock driver
+ *
+ * Copyright 2020 Michael Walle <michael@walle.cc>
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+#define I2S_CSR 0x00
+#define I2S_CR2 0x08
+#define CSR_BCE_BIT 28
+#define CR2_BCD BIT(24)
+#define CR2_DIV_SHIFT 0
+#define CR2_DIV_WIDTH 8
+
+struct fsl_sai_clk {
+ struct clk_divider div;
+ struct clk_gate gate;
+ spinlock_t lock;
+};
+
+static int fsl_sai_clk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct fsl_sai_clk *sai_clk;
+ struct clk_parent_data pdata = { .index = 0 };
+ void __iomem *base;
+ struct clk_hw *hw;
+ struct resource *res;
+
+ sai_clk = devm_kzalloc(dev, sizeof(*sai_clk), GFP_KERNEL);
+ if (!sai_clk)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ spin_lock_init(&sai_clk->lock);
+
+ sai_clk->gate.reg = base + I2S_CSR;
+ sai_clk->gate.bit_idx = CSR_BCE_BIT;
+ sai_clk->gate.lock = &sai_clk->lock;
+
+ sai_clk->div.reg = base + I2S_CR2;
+ sai_clk->div.shift = CR2_DIV_SHIFT;
+ sai_clk->div.width = CR2_DIV_WIDTH;
+ sai_clk->div.lock = &sai_clk->lock;
+
+ /* set clock direction, we are the BCLK master */
+ writel(CR2_BCD, base + I2S_CR2);
+
+ hw = clk_hw_register_composite_pdata(dev, dev->of_node->name,
+ &pdata, 1, NULL, NULL,
+ &sai_clk->div.hw,
+ &clk_divider_ops,
+ &sai_clk->gate.hw,
+ &clk_gate_ops,
+ CLK_SET_RATE_GATE);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, hw);
+}
+
+static const struct of_device_id of_fsl_sai_clk_ids[] = {
+ { .compatible = "fsl,vf610-sai-clock" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, of_fsl_sai_clk_ids);
+
+static struct platform_driver fsl_sai_clk_driver = {
+ .probe = fsl_sai_clk_probe,
+ .driver = {
+ .name = "fsl-sai-clk",
+ .of_match_table = of_fsl_sai_clk_ids,
+ },
+};
+module_platform_driver(fsl_sai_clk_driver);
+
+MODULE_DESCRIPTION("Freescale SAI bitclock-as-a-clock driver");
+MODULE_AUTHOR("Michael Walle <michael@walle.cc>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:fsl-sai-clk");
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index 670053c58c1a..2ca1f2ac38a6 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -123,26 +123,18 @@ const struct clk_ops clk_gate_ops = {
};
EXPORT_SYMBOL_GPL(clk_gate_ops);
-/**
- * clk_hw_register_gate - register a gate clock with the clock framework
- * @dev: device that is registering this clock
- * @name: name of this clock
- * @parent_name: name of this clock's parent
- * @flags: framework-specific flags for this clock
- * @reg: register address to control gating of this clock
- * @bit_idx: which bit in the register controls gating of this clock
- * @clk_gate_flags: gate-specific flags for this clock
- * @lock: shared register lock for this clock
- */
-struct clk_hw *clk_hw_register_gate(struct device *dev, const char *name,
- const char *parent_name, unsigned long flags,
+struct clk_hw *__clk_hw_register_gate(struct device *dev,
+ struct device_node *np, const char *name,
+ const char *parent_name, const struct clk_hw *parent_hw,
+ const struct clk_parent_data *parent_data,
+ unsigned long flags,
void __iomem *reg, u8 bit_idx,
u8 clk_gate_flags, spinlock_t *lock)
{
struct clk_gate *gate;
struct clk_hw *hw;
struct clk_init_data init = {};
- int ret;
+ int ret = -EINVAL;
if (clk_gate_flags & CLK_GATE_HIWORD_MASK) {
if (bit_idx > 15) {
@@ -160,7 +152,12 @@ struct clk_hw *clk_hw_register_gate(struct device *dev, const char *name,
init.ops = &clk_gate_ops;
init.flags = flags;
init.parent_names = parent_name ? &parent_name : NULL;
- init.num_parents = parent_name ? 1 : 0;
+ init.parent_hws = parent_hw ? &parent_hw : NULL;
+ init.parent_data = parent_data;
+ if (parent_name || parent_hw || parent_data)
+ init.num_parents = 1;
+ else
+ init.num_parents = 0;
/* struct clk_gate assignments */
gate->reg = reg;
@@ -170,15 +167,19 @@ struct clk_hw *clk_hw_register_gate(struct device *dev, const char *name,
gate->hw.init = &init;
hw = &gate->hw;
- ret = clk_hw_register(dev, hw);
+ if (dev || !np)
+ ret = clk_hw_register(dev, hw);
+ else if (np)
+ ret = of_clk_hw_register(np, hw);
if (ret) {
kfree(gate);
hw = ERR_PTR(ret);
}
return hw;
+
}
-EXPORT_SYMBOL_GPL(clk_hw_register_gate);
+EXPORT_SYMBOL_GPL(__clk_hw_register_gate);
struct clk *clk_register_gate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
index 13304cf5f2a8..70397b4b5ffe 100644
--- a/drivers/clk/clk-gpio.c
+++ b/drivers/clk/clk-gpio.c
@@ -28,6 +28,26 @@
* parent - fixed parent. No clk_set_parent support
*/
+/**
+ * struct clk_gpio - gpio gated clock
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @gpiod: gpio descriptor
+ *
+ * Clock with a gpio control for enabling and disabling the parent clock
+ * or switching between two parents by asserting or deasserting the gpio.
+ *
+ * Implements .enable, .disable and .is_enabled or
+ * .get_parent, .set_parent and .determine_rate depending on which clk_ops
+ * is used.
+ */
+struct clk_gpio {
+ struct clk_hw hw;
+ struct gpio_desc *gpiod;
+};
+
+#define to_clk_gpio(_hw) container_of(_hw, struct clk_gpio, hw)
+
static int clk_gpio_gate_enable(struct clk_hw *hw)
{
struct clk_gpio *clk = to_clk_gpio(hw);
@@ -51,12 +71,11 @@ static int clk_gpio_gate_is_enabled(struct clk_hw *hw)
return gpiod_get_value(clk->gpiod);
}
-const struct clk_ops clk_gpio_gate_ops = {
+static const struct clk_ops clk_gpio_gate_ops = {
.enable = clk_gpio_gate_enable,
.disable = clk_gpio_gate_disable,
.is_enabled = clk_gpio_gate_is_enabled,
};
-EXPORT_SYMBOL_GPL(clk_gpio_gate_ops);
static int clk_sleeping_gpio_gate_prepare(struct clk_hw *hw)
{
@@ -111,67 +130,49 @@ static int clk_gpio_mux_set_parent(struct clk_hw *hw, u8 index)
return 0;
}
-const struct clk_ops clk_gpio_mux_ops = {
+static const struct clk_ops clk_gpio_mux_ops = {
.get_parent = clk_gpio_mux_get_parent,
.set_parent = clk_gpio_mux_set_parent,
.determine_rate = __clk_mux_determine_rate,
};
-EXPORT_SYMBOL_GPL(clk_gpio_mux_ops);
-static struct clk_hw *clk_register_gpio(struct device *dev, const char *name,
- const char * const *parent_names, u8 num_parents, struct gpio_desc *gpiod,
- unsigned long flags, const struct clk_ops *clk_gpio_ops)
+static struct clk_hw *clk_register_gpio(struct device *dev, u8 num_parents,
+ struct gpio_desc *gpiod,
+ const struct clk_ops *clk_gpio_ops)
{
struct clk_gpio *clk_gpio;
struct clk_hw *hw;
struct clk_init_data init = {};
int err;
+ const struct clk_parent_data gpio_parent_data[] = {
+ { .index = 0 },
+ { .index = 1 },
+ };
- if (dev)
- clk_gpio = devm_kzalloc(dev, sizeof(*clk_gpio), GFP_KERNEL);
- else
- clk_gpio = kzalloc(sizeof(*clk_gpio), GFP_KERNEL);
-
+ clk_gpio = devm_kzalloc(dev, sizeof(*clk_gpio), GFP_KERNEL);
if (!clk_gpio)
return ERR_PTR(-ENOMEM);
- init.name = name;
+ init.name = dev->of_node->name;
init.ops = clk_gpio_ops;
- init.flags = flags;
- init.parent_names = parent_names;
+ init.parent_data = gpio_parent_data;
init.num_parents = num_parents;
+ init.flags = CLK_SET_RATE_PARENT;
clk_gpio->gpiod = gpiod;
clk_gpio->hw.init = &init;
hw = &clk_gpio->hw;
- if (dev)
- err = devm_clk_hw_register(dev, hw);
- else
- err = clk_hw_register(NULL, hw);
-
- if (!err)
- return hw;
-
- if (!dev) {
- kfree(clk_gpio);
- }
+ err = devm_clk_hw_register(dev, hw);
+ if (err)
+ return ERR_PTR(err);
- return ERR_PTR(err);
+ return hw;
}
-/**
- * clk_hw_register_gpio_gate - register a gpio clock gate with the clock
- * framework
- * @dev: device that is registering this clock
- * @name: name of this clock
- * @parent_name: name of this clock's parent
- * @gpiod: gpio descriptor to gate this clock
- * @flags: clock flags
- */
-struct clk_hw *clk_hw_register_gpio_gate(struct device *dev, const char *name,
- const char *parent_name, struct gpio_desc *gpiod,
- unsigned long flags)
+static struct clk_hw *clk_hw_register_gpio_gate(struct device *dev,
+ int num_parents,
+ struct gpio_desc *gpiod)
{
const struct clk_ops *ops;
@@ -180,88 +181,36 @@ struct clk_hw *clk_hw_register_gpio_gate(struct device *dev, const char *name,
else
ops = &clk_gpio_gate_ops;
- return clk_register_gpio(dev, name,
- (parent_name ? &parent_name : NULL),
- (parent_name ? 1 : 0), gpiod, flags, ops);
+ return clk_register_gpio(dev, num_parents, gpiod, ops);
}
-EXPORT_SYMBOL_GPL(clk_hw_register_gpio_gate);
-struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
- const char *parent_name, struct gpio_desc *gpiod,
- unsigned long flags)
+static struct clk_hw *clk_hw_register_gpio_mux(struct device *dev,
+ struct gpio_desc *gpiod)
{
- struct clk_hw *hw;
-
- hw = clk_hw_register_gpio_gate(dev, name, parent_name, gpiod, flags);
- if (IS_ERR(hw))
- return ERR_CAST(hw);
- return hw->clk;
+ return clk_register_gpio(dev, 2, gpiod, &clk_gpio_mux_ops);
}
-EXPORT_SYMBOL_GPL(clk_register_gpio_gate);
-
-/**
- * clk_hw_register_gpio_mux - register a gpio clock mux with the clock framework
- * @dev: device that is registering this clock
- * @name: name of this clock
- * @parent_names: names of this clock's parents
- * @num_parents: number of parents listed in @parent_names
- * @gpiod: gpio descriptor to gate this clock
- * @flags: clock flags
- */
-struct clk_hw *clk_hw_register_gpio_mux(struct device *dev, const char *name,
- const char * const *parent_names, u8 num_parents, struct gpio_desc *gpiod,
- unsigned long flags)
-{
- if (num_parents != 2) {
- pr_err("mux-clock %s must have 2 parents\n", name);
- return ERR_PTR(-EINVAL);
- }
-
- return clk_register_gpio(dev, name, parent_names, num_parents,
- gpiod, flags, &clk_gpio_mux_ops);
-}
-EXPORT_SYMBOL_GPL(clk_hw_register_gpio_mux);
-
-struct clk *clk_register_gpio_mux(struct device *dev, const char *name,
- const char * const *parent_names, u8 num_parents, struct gpio_desc *gpiod,
- unsigned long flags)
-{
- struct clk_hw *hw;
-
- hw = clk_hw_register_gpio_mux(dev, name, parent_names, num_parents,
- gpiod, flags);
- if (IS_ERR(hw))
- return ERR_CAST(hw);
- return hw->clk;
-}
-EXPORT_SYMBOL_GPL(clk_register_gpio_mux);
static int gpio_clk_driver_probe(struct platform_device *pdev)
{
- struct device_node *node = pdev->dev.of_node;
- const char **parent_names, *gpio_name;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ const char *gpio_name;
unsigned int num_parents;
struct gpio_desc *gpiod;
- struct clk *clk;
+ struct clk_hw *hw;
bool is_mux;
int ret;
+ is_mux = of_device_is_compatible(node, "gpio-mux-clock");
+
num_parents = of_clk_get_parent_count(node);
- if (num_parents) {
- parent_names = devm_kcalloc(&pdev->dev, num_parents,
- sizeof(char *), GFP_KERNEL);
- if (!parent_names)
- return -ENOMEM;
-
- of_clk_parent_fill(node, parent_names, num_parents);
- } else {
- parent_names = NULL;
+ if (is_mux && num_parents != 2) {
+ dev_err(dev, "mux-clock must have 2 parents\n");
+ return -EINVAL;
}
- is_mux = of_device_is_compatible(node, "gpio-mux-clock");
-
gpio_name = is_mux ? "select" : "enable";
- gpiod = devm_gpiod_get(&pdev->dev, gpio_name, GPIOD_OUT_LOW);
+ gpiod = devm_gpiod_get(dev, gpio_name, GPIOD_OUT_LOW);
if (IS_ERR(gpiod)) {
ret = PTR_ERR(gpiod);
if (ret == -EPROBE_DEFER)
@@ -275,16 +224,13 @@ static int gpio_clk_driver_probe(struct platform_device *pdev)
}
if (is_mux)
- clk = clk_register_gpio_mux(&pdev->dev, node->name,
- parent_names, num_parents, gpiod, 0);
+ hw = clk_hw_register_gpio_mux(dev, gpiod);
else
- clk = clk_register_gpio_gate(&pdev->dev, node->name,
- parent_names ? parent_names[0] : NULL, gpiod,
- CLK_SET_RATE_PARENT);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ hw = clk_hw_register_gpio_gate(dev, num_parents, gpiod);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
- return of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, hw);
}
static const struct of_device_id gpio_clk_match_table[] = {
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index 570b6e5b603b..e54e79714818 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -145,17 +145,19 @@ const struct clk_ops clk_mux_ro_ops = {
};
EXPORT_SYMBOL_GPL(clk_mux_ro_ops);
-struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name,
- const char * const *parent_names, u8 num_parents,
- unsigned long flags,
- void __iomem *reg, u8 shift, u32 mask,
+struct clk_hw *__clk_hw_register_mux(struct device *dev, struct device_node *np,
+ const char *name, u8 num_parents,
+ const char * const *parent_names,
+ const struct clk_hw **parent_hws,
+ const struct clk_parent_data *parent_data,
+ unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
u8 clk_mux_flags, u32 *table, spinlock_t *lock)
{
struct clk_mux *mux;
struct clk_hw *hw;
struct clk_init_data init = {};
u8 width = 0;
- int ret;
+ int ret = -EINVAL;
if (clk_mux_flags & CLK_MUX_HIWORD_MASK) {
width = fls(mask) - ffs(mask) + 1;
@@ -177,6 +179,8 @@ struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name,
init.ops = &clk_mux_ops;
init.flags = flags;
init.parent_names = parent_names;
+ init.parent_data = parent_data;
+ init.parent_hws = parent_hws;
init.num_parents = num_parents;
/* struct clk_mux assignments */
@@ -189,7 +193,10 @@ struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name,
mux->hw.init = &init;
hw = &mux->hw;
- ret = clk_hw_register(dev, hw);
+ if (dev || !np)
+ ret = clk_hw_register(dev, hw);
+ else if (np)
+ ret = of_clk_hw_register(np, hw);
if (ret) {
kfree(mux);
hw = ERR_PTR(ret);
@@ -197,53 +204,24 @@ struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name,
return hw;
}
-EXPORT_SYMBOL_GPL(clk_hw_register_mux_table);
+EXPORT_SYMBOL_GPL(__clk_hw_register_mux);
struct clk *clk_register_mux_table(struct device *dev, const char *name,
const char * const *parent_names, u8 num_parents,
- unsigned long flags,
- void __iomem *reg, u8 shift, u32 mask,
+ unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
u8 clk_mux_flags, u32 *table, spinlock_t *lock)
{
struct clk_hw *hw;
- hw = clk_hw_register_mux_table(dev, name, parent_names, num_parents,
- flags, reg, shift, mask, clk_mux_flags,
- table, lock);
+ hw = clk_hw_register_mux_table(dev, name, parent_names,
+ num_parents, flags, reg, shift, mask,
+ clk_mux_flags, table, lock);
if (IS_ERR(hw))
return ERR_CAST(hw);
return hw->clk;
}
EXPORT_SYMBOL_GPL(clk_register_mux_table);
-struct clk *clk_register_mux(struct device *dev, const char *name,
- const char * const *parent_names, u8 num_parents,
- unsigned long flags,
- void __iomem *reg, u8 shift, u8 width,
- u8 clk_mux_flags, spinlock_t *lock)
-{
- u32 mask = BIT(width) - 1;
-
- return clk_register_mux_table(dev, name, parent_names, num_parents,
- flags, reg, shift, mask, clk_mux_flags,
- NULL, lock);
-}
-EXPORT_SYMBOL_GPL(clk_register_mux);
-
-struct clk_hw *clk_hw_register_mux(struct device *dev, const char *name,
- const char * const *parent_names, u8 num_parents,
- unsigned long flags,
- void __iomem *reg, u8 shift, u8 width,
- u8 clk_mux_flags, spinlock_t *lock)
-{
- u32 mask = BIT(width) - 1;
-
- return clk_hw_register_mux_table(dev, name, parent_names, num_parents,
- flags, reg, shift, mask, clk_mux_flags,
- NULL, lock);
-}
-EXPORT_SYMBOL_GPL(clk_hw_register_mux);
-
void clk_unregister_mux(struct clk *clk)
{
struct clk_mux *mux;
diff --git a/drivers/clk/clk-plldig.c b/drivers/clk/clk-plldig.c
new file mode 100644
index 000000000000..25020164b89e
--- /dev/null
+++ b/drivers/clk/clk-plldig.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP
+ *
+ * Clock driver for LS1028A Display output interfaces(LCD, DPHY).
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/bitfield.h>
+
+/* PLLDIG register offsets and bit masks */
+#define PLLDIG_REG_PLLSR 0x24
+#define PLLDIG_LOCK_MASK BIT(2)
+#define PLLDIG_REG_PLLDV 0x28
+#define PLLDIG_MFD_MASK GENMASK(7, 0)
+#define PLLDIG_RFDPHI1_MASK GENMASK(30, 25)
+#define PLLDIG_REG_PLLFM 0x2c
+#define PLLDIG_SSCGBYP_ENABLE BIT(30)
+#define PLLDIG_REG_PLLFD 0x30
+#define PLLDIG_FDEN BIT(30)
+#define PLLDIG_FRAC_MASK GENMASK(15, 0)
+#define PLLDIG_REG_PLLCAL1 0x38
+#define PLLDIG_REG_PLLCAL2 0x3c
+
+/* Range of the VCO frequencies, in Hz */
+#define PLLDIG_MIN_VCO_FREQ 650000000
+#define PLLDIG_MAX_VCO_FREQ 1300000000
+
+/* Range of the output frequencies, in Hz */
+#define PHI1_MIN_FREQ 27000000UL
+#define PHI1_MAX_FREQ 600000000UL
+
+/* Maximum value of the reduced frequency divider */
+#define MAX_RFDPHI1 63UL
+
+/* Best value of multiplication factor divider */
+#define PLLDIG_DEFAULT_MFD 44
+
+/*
+ * Denominator part of the fractional part of the
+ * loop multiplication factor.
+ */
+#define MFDEN 20480
+
+static const struct clk_parent_data parent_data[] = {
+ { .index = 0 },
+};
+
+struct clk_plldig {
+ struct clk_hw hw;
+ void __iomem *regs;
+ unsigned int vco_freq;
+};
+
+#define to_clk_plldig(_hw) container_of(_hw, struct clk_plldig, hw)
+
+static int plldig_enable(struct clk_hw *hw)
+{
+ struct clk_plldig *data = to_clk_plldig(hw);
+ u32 val;
+
+ val = readl(data->regs + PLLDIG_REG_PLLFM);
+ /*
+ * Use Bypass mode with PLL off by default, the frequency overshoot
+ * detector output was disable. SSCG Bypass mode should be enable.
+ */
+ val |= PLLDIG_SSCGBYP_ENABLE;
+ writel(val, data->regs + PLLDIG_REG_PLLFM);
+
+ return 0;
+}
+
+static void plldig_disable(struct clk_hw *hw)
+{
+ struct clk_plldig *data = to_clk_plldig(hw);
+ u32 val;
+
+ val = readl(data->regs + PLLDIG_REG_PLLFM);
+
+ val &= ~PLLDIG_SSCGBYP_ENABLE;
+ val |= FIELD_PREP(PLLDIG_SSCGBYP_ENABLE, 0x0);
+
+ writel(val, data->regs + PLLDIG_REG_PLLFM);
+}
+
+static int plldig_is_enabled(struct clk_hw *hw)
+{
+ struct clk_plldig *data = to_clk_plldig(hw);
+
+ return readl(data->regs + PLLDIG_REG_PLLFM) &
+ PLLDIG_SSCGBYP_ENABLE;
+}
+
+static unsigned long plldig_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_plldig *data = to_clk_plldig(hw);
+ u32 val, rfdphi1;
+
+ val = readl(data->regs + PLLDIG_REG_PLLDV);
+
+ /* Check if PLL is bypassed */
+ if (val & PLLDIG_SSCGBYP_ENABLE)
+ return parent_rate;
+
+ rfdphi1 = FIELD_GET(PLLDIG_RFDPHI1_MASK, val);
+
+ /*
+ * If RFDPHI1 has a value of 1 the VCO frequency is also divided by
+ * one.
+ */
+ if (!rfdphi1)
+ rfdphi1 = 1;
+
+ return DIV_ROUND_UP(data->vco_freq, rfdphi1);
+}
+
+static unsigned long plldig_calc_target_div(unsigned long vco_freq,
+ unsigned long target_rate)
+{
+ unsigned long div;
+
+ div = DIV_ROUND_CLOSEST(vco_freq, target_rate);
+ div = clamp(div, 1UL, MAX_RFDPHI1);
+
+ return div;
+}
+
+static int plldig_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_plldig *data = to_clk_plldig(hw);
+ unsigned int div;
+
+ req->rate = clamp(req->rate, PHI1_MIN_FREQ, PHI1_MAX_FREQ);
+ div = plldig_calc_target_div(data->vco_freq, req->rate);
+ req->rate = DIV_ROUND_UP(data->vco_freq, div);
+
+ return 0;
+}
+
+static int plldig_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_plldig *data = to_clk_plldig(hw);
+ unsigned int val, cond;
+ unsigned int rfdphi1;
+
+ rate = clamp(rate, PHI1_MIN_FREQ, PHI1_MAX_FREQ);
+ rfdphi1 = plldig_calc_target_div(data->vco_freq, rate);
+
+ /* update the divider value */
+ val = readl(data->regs + PLLDIG_REG_PLLDV);
+ val &= ~PLLDIG_RFDPHI1_MASK;
+ val |= FIELD_PREP(PLLDIG_RFDPHI1_MASK, rfdphi1);
+ writel(val, data->regs + PLLDIG_REG_PLLDV);
+
+ /* waiting for old lock state to clear */
+ udelay(200);
+
+ /* Wait until PLL is locked or timeout */
+ return readl_poll_timeout_atomic(data->regs + PLLDIG_REG_PLLSR, cond,
+ cond & PLLDIG_LOCK_MASK, 0,
+ USEC_PER_MSEC);
+}
+
+static const struct clk_ops plldig_clk_ops = {
+ .enable = plldig_enable,
+ .disable = plldig_disable,
+ .is_enabled = plldig_is_enabled,
+ .recalc_rate = plldig_recalc_rate,
+ .determine_rate = plldig_determine_rate,
+ .set_rate = plldig_set_rate,
+};
+
+static int plldig_init(struct clk_hw *hw)
+{
+ struct clk_plldig *data = to_clk_plldig(hw);
+ struct clk_hw *parent = clk_hw_get_parent(hw);
+ unsigned long parent_rate;
+ unsigned long val;
+ unsigned long long lltmp;
+ unsigned int mfd, fracdiv = 0;
+
+ if (!parent)
+ return -EINVAL;
+
+ parent_rate = clk_hw_get_rate(parent);
+
+ if (data->vco_freq) {
+ mfd = data->vco_freq / parent_rate;
+ lltmp = data->vco_freq % parent_rate;
+ lltmp *= MFDEN;
+ do_div(lltmp, parent_rate);
+ fracdiv = lltmp;
+ } else {
+ mfd = PLLDIG_DEFAULT_MFD;
+ data->vco_freq = parent_rate * mfd;
+ }
+
+ val = FIELD_PREP(PLLDIG_MFD_MASK, mfd);
+ writel(val, data->regs + PLLDIG_REG_PLLDV);
+
+ /* Enable fractional divider */
+ if (fracdiv) {
+ val = FIELD_PREP(PLLDIG_FRAC_MASK, fracdiv);
+ val |= PLLDIG_FDEN;
+ writel(val, data->regs + PLLDIG_REG_PLLFD);
+ }
+
+ return 0;
+}
+
+static int plldig_clk_probe(struct platform_device *pdev)
+{
+ struct clk_plldig *data;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->regs))
+ return PTR_ERR(data->regs);
+
+ data->hw.init = CLK_HW_INIT_PARENTS_DATA("dpclk",
+ parent_data,
+ &plldig_clk_ops,
+ 0);
+
+ ret = devm_clk_hw_register(dev, &data->hw);
+ if (ret) {
+ dev_err(dev, "failed to register %s clock\n",
+ dev->of_node->name);
+ return ret;
+ }
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+ &data->hw);
+ if (ret) {
+ dev_err(dev, "unable to add clk provider\n");
+ return ret;
+ }
+
+ /*
+ * The frequency of the VCO cannot be changed during runtime.
+ * Therefore, let the user specify a desired frequency.
+ */
+ if (!of_property_read_u32(dev->of_node, "fsl,vco-hz",
+ &data->vco_freq)) {
+ if (data->vco_freq < PLLDIG_MIN_VCO_FREQ ||
+ data->vco_freq > PLLDIG_MAX_VCO_FREQ)
+ return -EINVAL;
+ }
+
+ return plldig_init(&data->hw);
+}
+
+static const struct of_device_id plldig_clk_id[] = {
+ { .compatible = "fsl,ls1028a-plldig" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, plldig_clk_id);
+
+static struct platform_driver plldig_clk_driver = {
+ .driver = {
+ .name = "plldig-clock",
+ .of_match_table = plldig_clk_id,
+ },
+ .probe = plldig_clk_probe,
+};
+module_platform_driver(plldig_clk_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Wen He <wen.he_1@nxp.com>");
+MODULE_DESCRIPTION("LS1028A Display output interface pixel clock driver");
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index bed140f7375f..d5946f7486d6 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -342,6 +342,32 @@ static const struct clockgen_muxinfo ls1046a_hwa2 = {
},
};
+static const struct clockgen_muxinfo ls1088a_hwa1 = {
+ {
+ {},
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
+ {},
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
+ },
+};
+
+static const struct clockgen_muxinfo ls1088a_hwa2 = {
+ {
+ {},
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
+ {},
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
+ },
+};
+
static const struct clockgen_muxinfo ls1012a_cmux = {
{
[0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
@@ -607,6 +633,9 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_groups = {
&clockgen2_cmux_cga12
},
+ .hwaccel = {
+ &ls1088a_hwa1, &ls1088a_hwa2
+ },
.cmux_to_group = {
0, 0, -1
},
diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
index 886f7c5df51a..c491f5de0f3f 100644
--- a/drivers/clk/clk-scmi.c
+++ b/drivers/clk/clk-scmi.c
@@ -176,7 +176,7 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
}
static const struct scmi_device_id scmi_id_table[] = {
- { SCMI_PROTOCOL_CLOCK },
+ { SCMI_PROTOCOL_CLOCK, "clocks" },
{ },
};
MODULE_DEVICE_TABLE(scmi, scmi_id_table);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index b68e200829f2..f0f2b599fd7e 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -429,7 +429,7 @@ static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
parent = ERR_PTR(-EPROBE_DEFER);
} else {
parent = clk_core_get(core, index);
- if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT && entry->name)
+ if (PTR_ERR(parent) == -ENOENT && entry->name)
parent = clk_core_lookup(entry->name);
}
@@ -2996,6 +2996,41 @@ static int clk_dump_show(struct seq_file *s, void *data)
}
DEFINE_SHOW_ATTRIBUTE(clk_dump);
+#undef CLOCK_ALLOW_WRITE_DEBUGFS
+#ifdef CLOCK_ALLOW_WRITE_DEBUGFS
+/*
+ * This can be dangerous, therefore don't provide any real compile time
+ * configuration option for this feature.
+ * People who want to use this will need to modify the source code directly.
+ */
+static int clk_rate_set(void *data, u64 val)
+{
+ struct clk_core *core = data;
+ int ret;
+
+ clk_prepare_lock();
+ ret = clk_core_set_rate_nolock(core, val);
+ clk_prepare_unlock();
+
+ return ret;
+}
+
+#define clk_rate_mode 0644
+#else
+#define clk_rate_set NULL
+#define clk_rate_mode 0444
+#endif
+
+static int clk_rate_get(void *data, u64 *val)
+{
+ struct clk_core *core = data;
+
+ *val = core->rate;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(clk_rate_fops, clk_rate_get, clk_rate_set, "%llu\n");
+
static const struct {
unsigned long flag;
const char *name;
@@ -3145,7 +3180,8 @@ static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
root = debugfs_create_dir(core->name, pdentry);
core->dentry = root;
- debugfs_create_ulong("clk_rate", 0444, root, &core->rate);
+ debugfs_create_file("clk_rate", clk_rate_mode, root, core,
+ &clk_rate_fops);
debugfs_create_file("clk_min_rate", 0444, root, core, &clk_min_rate_fops);
debugfs_create_file("clk_max_rate", 0444, root, core, &clk_max_rate_fops);
debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy);
@@ -3249,6 +3285,34 @@ static inline void clk_debug_unregister(struct clk_core *core)
}
#endif
+static void clk_core_reparent_orphans_nolock(void)
+{
+ struct clk_core *orphan;
+ struct hlist_node *tmp2;
+
+ /*
+ * walk the list of orphan clocks and reparent any that newly finds a
+ * parent.
+ */
+ hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
+ struct clk_core *parent = __clk_init_parent(orphan);
+
+ /*
+ * We need to use __clk_set_parent_before() and _after() to
+ * to properly migrate any prepare/enable count of the orphan
+ * clock. This is important for CLK_IS_CRITICAL clocks, which
+ * are enabled during init but might not have a parent yet.
+ */
+ if (parent) {
+ /* update the clk tree topology */
+ __clk_set_parent_before(orphan, parent);
+ __clk_set_parent_after(orphan, parent, NULL);
+ __clk_recalc_accuracies(orphan);
+ __clk_recalc_rates(orphan, 0);
+ }
+ }
+}
+
/**
* __clk_core_init - initialize the data structures in a struct clk_core
* @core: clk_core being initialized
@@ -3259,8 +3323,6 @@ static inline void clk_debug_unregister(struct clk_core *core)
static int __clk_core_init(struct clk_core *core)
{
int ret;
- struct clk_core *orphan;
- struct hlist_node *tmp2;
unsigned long rate;
if (!core)
@@ -3312,6 +3374,26 @@ static int __clk_core_init(struct clk_core *core)
goto out;
}
+ /*
+ * optional platform-specific magic
+ *
+ * The .init callback is not used by any of the basic clock types, but
+ * exists for weird hardware that must perform initialization magic for
+ * CCF to get an accurate view of clock for any other callbacks. It may
+ * also be used needs to perform dynamic allocations. Such allocation
+ * must be freed in the terminate() callback.
+ * This callback shall not be used to initialize the parameters state,
+ * such as rate, parent, etc ...
+ *
+ * If it exist, this callback should called before any other callback of
+ * the clock
+ */
+ if (core->ops->init) {
+ ret = core->ops->init(core->hw);
+ if (ret)
+ goto out;
+ }
+
core->parent = __clk_init_parent(core);
/*
@@ -3337,17 +3419,6 @@ static int __clk_core_init(struct clk_core *core)
}
/*
- * optional platform-specific magic
- *
- * The .init callback is not used by any of the basic clock types, but
- * exists for weird hardware that must perform initialization magic.
- * Please consider other ways of solving initialization problems before
- * using this callback, as its use is discouraged.
- */
- if (core->ops->init)
- core->ops->init(core->hw);
-
- /*
* Set clk's accuracy. The preferred method is to use
* .recalc_accuracy. For simple clocks and lazy developers the default
* fallback is to use the parent's accuracy. If a clock doesn't have a
@@ -3400,34 +3471,26 @@ static int __clk_core_init(struct clk_core *core)
if (core->flags & CLK_IS_CRITICAL) {
unsigned long flags;
- clk_core_prepare(core);
+ ret = clk_core_prepare(core);
+ if (ret) {
+ pr_warn("%s: critical clk '%s' failed to prepare\n",
+ __func__, core->name);
+ goto out;
+ }
flags = clk_enable_lock();
- clk_core_enable(core);
+ ret = clk_core_enable(core);
clk_enable_unlock(flags);
+ if (ret) {
+ pr_warn("%s: critical clk '%s' failed to enable\n",
+ __func__, core->name);
+ clk_core_unprepare(core);
+ goto out;
+ }
}
- /*
- * walk the list of orphan clocks and reparent any that newly finds a
- * parent.
- */
- hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
- struct clk_core *parent = __clk_init_parent(orphan);
+ clk_core_reparent_orphans_nolock();
- /*
- * We need to use __clk_set_parent_before() and _after() to
- * to properly migrate any prepare/enable count of the orphan
- * clock. This is important for CLK_IS_CRITICAL clocks, which
- * are enabled during init but might not have a parent yet.
- */
- if (parent) {
- /* update the clk tree topology */
- __clk_set_parent_before(orphan, parent);
- __clk_set_parent_after(orphan, parent, NULL);
- __clk_recalc_accuracies(orphan);
- __clk_recalc_rates(orphan, 0);
- }
- }
kref_init(&core->ref);
out:
@@ -3720,6 +3783,28 @@ fail_out:
}
/**
+ * dev_or_parent_of_node() - Get device node of @dev or @dev's parent
+ * @dev: Device to get device node of
+ *
+ * Return: device node pointer of @dev, or the device node pointer of
+ * @dev->parent if dev doesn't have a device node, or NULL if neither
+ * @dev or @dev->parent have a device node.
+ */
+static struct device_node *dev_or_parent_of_node(struct device *dev)
+{
+ struct device_node *np;
+
+ if (!dev)
+ return NULL;
+
+ np = dev_of_node(dev);
+ if (!np)
+ np = dev_of_node(dev->parent);
+
+ return np;
+}
+
+/**
* clk_register - allocate a new clock, register it and return an opaque cookie
* @dev: device that is registering this clock
* @hw: link to hardware-specific clock data
@@ -3734,7 +3819,7 @@ fail_out:
*/
struct clk *clk_register(struct device *dev, struct clk_hw *hw)
{
- return __clk_register(dev, dev_of_node(dev), hw);
+ return __clk_register(dev, dev_or_parent_of_node(dev), hw);
}
EXPORT_SYMBOL_GPL(clk_register);
@@ -3750,7 +3835,8 @@ EXPORT_SYMBOL_GPL(clk_register);
*/
int clk_hw_register(struct device *dev, struct clk_hw *hw)
{
- return PTR_ERR_OR_ZERO(__clk_register(dev, dev_of_node(dev), hw));
+ return PTR_ERR_OR_ZERO(__clk_register(dev, dev_or_parent_of_node(dev),
+ hw));
}
EXPORT_SYMBOL_GPL(clk_hw_register);
@@ -3853,6 +3939,7 @@ static void clk_core_evict_parent_cache(struct clk_core *core)
void clk_unregister(struct clk *clk)
{
unsigned long flags;
+ const struct clk_ops *ops;
if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
return;
@@ -3861,7 +3948,8 @@ void clk_unregister(struct clk *clk)
clk_prepare_lock();
- if (clk->core->ops == &clk_nodrv_ops) {
+ ops = clk->core->ops;
+ if (ops == &clk_nodrv_ops) {
pr_err("%s: unregistered clock: %s\n", __func__,
clk->core->name);
goto unlock;
@@ -3874,6 +3962,9 @@ void clk_unregister(struct clk *clk)
clk->core->ops = &clk_nodrv_ops;
clk_enable_unlock(flags);
+ if (ops->terminate)
+ ops->terminate(clk->core->hw);
+
if (!hlist_empty(&clk->core->children)) {
struct clk_core *child;
struct hlist_node *t;
@@ -4179,6 +4270,13 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
EXPORT_SYMBOL_GPL(clk_notifier_unregister);
#ifdef CONFIG_OF
+static void clk_core_reparent_orphans(void)
+{
+ clk_prepare_lock();
+ clk_core_reparent_orphans_nolock();
+ clk_prepare_unlock();
+}
+
/**
* struct of_clk_provider - Clock provider registration structure
* @link: Entry in global list of clock providers
@@ -4274,6 +4372,8 @@ int of_clk_add_provider(struct device_node *np,
mutex_unlock(&of_clk_mutex);
pr_debug("Added clock from %pOF\n", np);
+ clk_core_reparent_orphans();
+
ret = of_clk_set_defaults(np, true);
if (ret < 0)
of_clk_del_provider(np);
@@ -4309,6 +4409,8 @@ int of_clk_add_hw_provider(struct device_node *np,
mutex_unlock(&of_clk_mutex);
pr_debug("Added clk_hw provider from %pOF\n", np);
+ clk_core_reparent_orphans();
+
ret = of_clk_set_defaults(np, true);
if (ret < 0)
of_clk_del_provider(np);
diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig
index 1ac0c7990392..01eadee88d66 100644
--- a/drivers/clk/imx/Kconfig
+++ b/drivers/clk/imx/Kconfig
@@ -20,6 +20,12 @@ config CLK_IMX8MN
help
Build the driver for i.MX8MN CCM Clock Driver
+config CLK_IMX8MP
+ bool "IMX8MP CCM Clock Driver"
+ depends on ARCH_MXC && ARM64
+ help
+ Build the driver for i.MX8MP CCM Clock Driver
+
config CLK_IMX8MQ
bool "IMX8MQ CCM Clock Driver"
depends on ARCH_MXC && ARM64
diff --git a/drivers/clk/imx/Makefile b/drivers/clk/imx/Makefile
index 77a3d714f1d5..928f874c73d2 100644
--- a/drivers/clk/imx/Makefile
+++ b/drivers/clk/imx/Makefile
@@ -18,7 +18,7 @@ obj-$(CONFIG_MXC_CLK) += \
clk-pllv2.o \
clk-pllv3.o \
clk-pllv4.o \
- clk-sccg-pll.o \
+ clk-sscg-pll.o \
clk-pll14xx.o
obj-$(CONFIG_MXC_CLK_SCU) += \
@@ -27,6 +27,7 @@ obj-$(CONFIG_MXC_CLK_SCU) += \
obj-$(CONFIG_CLK_IMX8MM) += clk-imx8mm.o
obj-$(CONFIG_CLK_IMX8MN) += clk-imx8mn.o
+obj-$(CONFIG_CLK_IMX8MP) += clk-imx8mp.o
obj-$(CONFIG_CLK_IMX8MQ) += clk-imx8mq.o
obj-$(CONFIG_CLK_IMX8QXP) += clk-imx8qxp.o clk-imx8qxp-lpcg.o
diff --git a/drivers/clk/imx/clk-composite-7ulp.c b/drivers/clk/imx/clk-composite-7ulp.c
index 060f8600ea0d..b9efcc8a855d 100644
--- a/drivers/clk/imx/clk-composite-7ulp.c
+++ b/drivers/clk/imx/clk-composite-7ulp.c
@@ -21,7 +21,7 @@
#define PCG_PCD_WIDTH 3
#define PCG_PCD_MASK 0x7
-struct clk_hw *imx7ulp_clk_composite(const char *name,
+struct clk_hw *imx7ulp_clk_hw_composite(const char *name,
const char * const *parent_names,
int num_parents, bool mux_present,
bool rate_present, bool gate_present,
diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c
index 388bdb94f841..20f7c91c03d2 100644
--- a/drivers/clk/imx/clk-composite-8m.c
+++ b/drivers/clk/imx/clk-composite-8m.c
@@ -123,7 +123,7 @@ static const struct clk_ops imx8m_clk_composite_divider_ops = {
.set_rate = imx8m_clk_composite_divider_set_rate,
};
-struct clk *imx8m_clk_composite_flags(const char *name,
+struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
const char * const *parent_names,
int num_parents, void __iomem *reg,
unsigned long flags)
@@ -142,6 +142,7 @@ struct clk *imx8m_clk_composite_flags(const char *name,
mux->reg = reg;
mux->shift = PCG_PCS_SHIFT;
mux->mask = PCG_PCS_MASK;
+ mux->lock = &imx_ccm_lock;
div = kzalloc(sizeof(*div), GFP_KERNEL);
if (!div)
@@ -161,6 +162,7 @@ struct clk *imx8m_clk_composite_flags(const char *name,
gate_hw = &gate->hw;
gate->reg = reg;
gate->bit_idx = PCG_CGC_SHIFT;
+ gate->lock = &imx_ccm_lock;
hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
mux_hw, &clk_mux_ops, div_hw,
@@ -169,7 +171,7 @@ struct clk *imx8m_clk_composite_flags(const char *name,
if (IS_ERR(hw))
goto fail;
- return hw->clk;
+ return hw;
fail:
kfree(gate);
diff --git a/drivers/clk/imx/clk-divider-gate.c b/drivers/clk/imx/clk-divider-gate.c
index 2a8352a316c7..0322a843d245 100644
--- a/drivers/clk/imx/clk-divider-gate.c
+++ b/drivers/clk/imx/clk-divider-gate.c
@@ -43,7 +43,7 @@ static unsigned long clk_divider_gate_recalc_rate(struct clk_hw *hw,
{
struct clk_divider_gate *div_gate = to_clk_divider_gate(hw);
struct clk_divider *div = to_clk_divider(hw);
- unsigned long flags = 0;
+ unsigned long flags;
unsigned int val;
spin_lock_irqsave(div->lock, flags);
@@ -75,7 +75,7 @@ static int clk_divider_gate_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct clk_divider_gate *div_gate = to_clk_divider_gate(hw);
struct clk_divider *div = to_clk_divider(hw);
- unsigned long flags = 0;
+ unsigned long flags;
int value;
u32 val;
@@ -104,7 +104,7 @@ static int clk_divider_enable(struct clk_hw *hw)
{
struct clk_divider_gate *div_gate = to_clk_divider_gate(hw);
struct clk_divider *div = to_clk_divider(hw);
- unsigned long flags = 0;
+ unsigned long flags;
u32 val;
if (!div_gate->cached_val) {
@@ -127,7 +127,7 @@ static void clk_divider_disable(struct clk_hw *hw)
{
struct clk_divider_gate *div_gate = to_clk_divider_gate(hw);
struct clk_divider *div = to_clk_divider(hw);
- unsigned long flags = 0;
+ unsigned long flags;
u32 val;
spin_lock_irqsave(div->lock, flags);
@@ -167,13 +167,13 @@ static const struct clk_ops clk_divider_gate_ops = {
};
/*
- * NOTE: In order to resue the most code from the common divider,
+ * NOTE: In order to reuse the most code from the common divider,
* we also design our divider following the way that provids an extra
* clk_divider_flags, however it's fixed to CLK_DIVIDER_ONE_BASED by
* default as our HW is. Besides that it supports only CLK_DIVIDER_READ_ONLY
* flag which can be specified by user flexibly.
*/
-struct clk_hw *imx_clk_divider_gate(const char *name, const char *parent_name,
+struct clk_hw *imx_clk_hw_divider_gate(const char *name, const char *parent_name,
unsigned long flags, void __iomem *reg,
u8 shift, u8 width, u8 clk_divider_flags,
const struct clk_div_table *table,
diff --git a/drivers/clk/imx/clk-frac-pll.c b/drivers/clk/imx/clk-frac-pll.c
index fece503e3610..101e0a300376 100644
--- a/drivers/clk/imx/clk-frac-pll.c
+++ b/drivers/clk/imx/clk-frac-pll.c
@@ -201,8 +201,9 @@ static const struct clk_ops clk_frac_pll_ops = {
.set_rate = clk_pll_set_rate,
};
-struct clk *imx_clk_frac_pll(const char *name, const char *parent_name,
- void __iomem *base)
+struct clk_hw *imx_clk_hw_frac_pll(const char *name,
+ const char *parent_name,
+ void __iomem *base)
{
struct clk_init_data init;
struct clk_frac_pll *pll;
@@ -230,5 +231,5 @@ struct clk *imx_clk_frac_pll(const char *name, const char *parent_name,
return ERR_PTR(ret);
}
- return hw->clk;
+ return hw;
}
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index 60f2de851f39..ba33c79158de 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -598,7 +598,10 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
}
hws[IMX6QDL_CLK_PLL4_POST_DIV] = clk_hw_register_divider_table(NULL, "pll4_post_div", "pll4_audio", CLK_SET_RATE_PARENT, base + 0x70, 19, 2, 0, post_div_table, &imx_ccm_lock);
- hws[IMX6QDL_CLK_PLL4_AUDIO_DIV] = clk_hw_register_divider(NULL, "pll4_audio_div", "pll4_post_div", CLK_SET_RATE_PARENT, base + 0x170, 15, 1, 0, &imx_ccm_lock);
+ if (clk_on_imx6q() || clk_on_imx6qp())
+ hws[IMX6QDL_CLK_PLL4_AUDIO_DIV] = imx_clk_hw_fixed_factor("pll4_audio_div", "pll4_post_div", 1, 1);
+ else
+ hws[IMX6QDL_CLK_PLL4_AUDIO_DIV] = clk_hw_register_divider(NULL, "pll4_audio_div", "pll4_post_div", CLK_SET_RATE_PARENT, base + 0x170, 15, 1, 0, &imx_ccm_lock);
hws[IMX6QDL_CLK_PLL5_POST_DIV] = clk_hw_register_divider_table(NULL, "pll5_post_div", "pll5_video", CLK_SET_RATE_PARENT, base + 0xa0, 19, 2, 0, post_div_table, &imx_ccm_lock);
hws[IMX6QDL_CLK_PLL5_VIDEO_DIV] = clk_hw_register_divider_table(NULL, "pll5_video_div", "pll5_post_div", CLK_SET_RATE_PARENT, base + 0x170, 30, 2, 0, video_div_table, &imx_ccm_lock);
diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c
index 3fdf3d494f0a..0620d6c8c072 100644
--- a/drivers/clk/imx/clk-imx7ulp.c
+++ b/drivers/clk/imx/clk-imx7ulp.c
@@ -40,6 +40,7 @@ static const struct clk_div_table ulp_div_table[] = {
{ .val = 5, .div = 16, },
{ .val = 6, .div = 32, },
{ .val = 7, .div = 64, },
+ { /* sentinel */ },
};
static const int pcc2_uart_clk_ids[] __initconst = {
@@ -58,7 +59,7 @@ static struct clk **pcc3_uart_clks[ARRAY_SIZE(pcc3_uart_clk_ids) + 1] __initdata
static void __init imx7ulp_clk_scg1_init(struct device_node *np)
{
struct clk_hw_onecell_data *clk_data;
- struct clk_hw **clks;
+ struct clk_hw **hws;
void __iomem *base;
clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_SCG1_END),
@@ -67,76 +68,76 @@ static void __init imx7ulp_clk_scg1_init(struct device_node *np)
return;
clk_data->num = IMX7ULP_CLK_SCG1_END;
- clks = clk_data->hws;
+ hws = clk_data->hws;
- clks[IMX7ULP_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0);
+ hws[IMX7ULP_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0);
- clks[IMX7ULP_CLK_ROSC] = imx_obtain_fixed_clk_hw(np, "rosc");
- clks[IMX7ULP_CLK_SOSC] = imx_obtain_fixed_clk_hw(np, "sosc");
- clks[IMX7ULP_CLK_SIRC] = imx_obtain_fixed_clk_hw(np, "sirc");
- clks[IMX7ULP_CLK_FIRC] = imx_obtain_fixed_clk_hw(np, "firc");
- clks[IMX7ULP_CLK_UPLL] = imx_obtain_fixed_clk_hw(np, "upll");
+ hws[IMX7ULP_CLK_ROSC] = imx_obtain_fixed_clk_hw(np, "rosc");
+ hws[IMX7ULP_CLK_SOSC] = imx_obtain_fixed_clk_hw(np, "sosc");
+ hws[IMX7ULP_CLK_SIRC] = imx_obtain_fixed_clk_hw(np, "sirc");
+ hws[IMX7ULP_CLK_FIRC] = imx_obtain_fixed_clk_hw(np, "firc");
+ hws[IMX7ULP_CLK_UPLL] = imx_obtain_fixed_clk_hw(np, "upll");
/* SCG1 */
base = of_iomap(np, 0);
WARN_ON(!base);
/* NOTE: xPLL config can't be changed when xPLL is enabled */
- clks[IMX7ULP_CLK_APLL_PRE_SEL] = imx_clk_hw_mux_flags("apll_pre_sel", base + 0x508, 0, 1, pll_pre_sels, ARRAY_SIZE(pll_pre_sels), CLK_SET_PARENT_GATE);
- clks[IMX7ULP_CLK_SPLL_PRE_SEL] = imx_clk_hw_mux_flags("spll_pre_sel", base + 0x608, 0, 1, pll_pre_sels, ARRAY_SIZE(pll_pre_sels), CLK_SET_PARENT_GATE);
+ hws[IMX7ULP_CLK_APLL_PRE_SEL] = imx_clk_hw_mux_flags("apll_pre_sel", base + 0x508, 0, 1, pll_pre_sels, ARRAY_SIZE(pll_pre_sels), CLK_SET_PARENT_GATE);
+ hws[IMX7ULP_CLK_SPLL_PRE_SEL] = imx_clk_hw_mux_flags("spll_pre_sel", base + 0x608, 0, 1, pll_pre_sels, ARRAY_SIZE(pll_pre_sels), CLK_SET_PARENT_GATE);
/* name parent_name reg shift width flags */
- clks[IMX7ULP_CLK_APLL_PRE_DIV] = imx_clk_hw_divider_flags("apll_pre_div", "apll_pre_sel", base + 0x508, 8, 3, CLK_SET_RATE_GATE);
- clks[IMX7ULP_CLK_SPLL_PRE_DIV] = imx_clk_hw_divider_flags("spll_pre_div", "spll_pre_sel", base + 0x608, 8, 3, CLK_SET_RATE_GATE);
+ hws[IMX7ULP_CLK_APLL_PRE_DIV] = imx_clk_hw_divider_flags("apll_pre_div", "apll_pre_sel", base + 0x508, 8, 3, CLK_SET_RATE_GATE);
+ hws[IMX7ULP_CLK_SPLL_PRE_DIV] = imx_clk_hw_divider_flags("spll_pre_div", "spll_pre_sel", base + 0x608, 8, 3, CLK_SET_RATE_GATE);
/* name parent_name base */
- clks[IMX7ULP_CLK_APLL] = imx_clk_pllv4("apll", "apll_pre_div", base + 0x500);
- clks[IMX7ULP_CLK_SPLL] = imx_clk_pllv4("spll", "spll_pre_div", base + 0x600);
+ hws[IMX7ULP_CLK_APLL] = imx_clk_hw_pllv4("apll", "apll_pre_div", base + 0x500);
+ hws[IMX7ULP_CLK_SPLL] = imx_clk_hw_pllv4("spll", "spll_pre_div", base + 0x600);
/* APLL PFDs */
- clks[IMX7ULP_CLK_APLL_PFD0] = imx_clk_pfdv2("apll_pfd0", "apll", base + 0x50c, 0);
- clks[IMX7ULP_CLK_APLL_PFD1] = imx_clk_pfdv2("apll_pfd1", "apll", base + 0x50c, 1);
- clks[IMX7ULP_CLK_APLL_PFD2] = imx_clk_pfdv2("apll_pfd2", "apll", base + 0x50c, 2);
- clks[IMX7ULP_CLK_APLL_PFD3] = imx_clk_pfdv2("apll_pfd3", "apll", base + 0x50c, 3);
+ hws[IMX7ULP_CLK_APLL_PFD0] = imx_clk_hw_pfdv2("apll_pfd0", "apll", base + 0x50c, 0);
+ hws[IMX7ULP_CLK_APLL_PFD1] = imx_clk_hw_pfdv2("apll_pfd1", "apll", base + 0x50c, 1);
+ hws[IMX7ULP_CLK_APLL_PFD2] = imx_clk_hw_pfdv2("apll_pfd2", "apll", base + 0x50c, 2);
+ hws[IMX7ULP_CLK_APLL_PFD3] = imx_clk_hw_pfdv2("apll_pfd3", "apll", base + 0x50c, 3);
/* SPLL PFDs */
- clks[IMX7ULP_CLK_SPLL_PFD0] = imx_clk_pfdv2("spll_pfd0", "spll", base + 0x60C, 0);
- clks[IMX7ULP_CLK_SPLL_PFD1] = imx_clk_pfdv2("spll_pfd1", "spll", base + 0x60C, 1);
- clks[IMX7ULP_CLK_SPLL_PFD2] = imx_clk_pfdv2("spll_pfd2", "spll", base + 0x60C, 2);
- clks[IMX7ULP_CLK_SPLL_PFD3] = imx_clk_pfdv2("spll_pfd3", "spll", base + 0x60C, 3);
+ hws[IMX7ULP_CLK_SPLL_PFD0] = imx_clk_hw_pfdv2("spll_pfd0", "spll", base + 0x60C, 0);
+ hws[IMX7ULP_CLK_SPLL_PFD1] = imx_clk_hw_pfdv2("spll_pfd1", "spll", base + 0x60C, 1);
+ hws[IMX7ULP_CLK_SPLL_PFD2] = imx_clk_hw_pfdv2("spll_pfd2", "spll", base + 0x60C, 2);
+ hws[IMX7ULP_CLK_SPLL_PFD3] = imx_clk_hw_pfdv2("spll_pfd3", "spll", base + 0x60C, 3);
/* PLL Mux */
- clks[IMX7ULP_CLK_APLL_PFD_SEL] = imx_clk_hw_mux_flags("apll_pfd_sel", base + 0x508, 14, 2, apll_pfd_sels, ARRAY_SIZE(apll_pfd_sels), CLK_SET_RATE_PARENT | CLK_SET_PARENT_GATE);
- clks[IMX7ULP_CLK_SPLL_PFD_SEL] = imx_clk_hw_mux_flags("spll_pfd_sel", base + 0x608, 14, 2, spll_pfd_sels, ARRAY_SIZE(spll_pfd_sels), CLK_SET_RATE_PARENT | CLK_SET_PARENT_GATE);
- clks[IMX7ULP_CLK_APLL_SEL] = imx_clk_hw_mux_flags("apll_sel", base + 0x508, 1, 1, apll_sels, ARRAY_SIZE(apll_sels), CLK_SET_RATE_PARENT | CLK_SET_PARENT_GATE);
- clks[IMX7ULP_CLK_SPLL_SEL] = imx_clk_hw_mux_flags("spll_sel", base + 0x608, 1, 1, spll_sels, ARRAY_SIZE(spll_sels), CLK_SET_RATE_PARENT | CLK_SET_PARENT_GATE);
+ hws[IMX7ULP_CLK_APLL_PFD_SEL] = imx_clk_hw_mux_flags("apll_pfd_sel", base + 0x508, 14, 2, apll_pfd_sels, ARRAY_SIZE(apll_pfd_sels), CLK_SET_RATE_PARENT | CLK_SET_PARENT_GATE);
+ hws[IMX7ULP_CLK_SPLL_PFD_SEL] = imx_clk_hw_mux_flags("spll_pfd_sel", base + 0x608, 14, 2, spll_pfd_sels, ARRAY_SIZE(spll_pfd_sels), CLK_SET_RATE_PARENT | CLK_SET_PARENT_GATE);
+ hws[IMX7ULP_CLK_APLL_SEL] = imx_clk_hw_mux_flags("apll_sel", base + 0x508, 1, 1, apll_sels, ARRAY_SIZE(apll_sels), CLK_SET_RATE_PARENT | CLK_SET_PARENT_GATE);
+ hws[IMX7ULP_CLK_SPLL_SEL] = imx_clk_hw_mux_flags("spll_sel", base + 0x608, 1, 1, spll_sels, ARRAY_SIZE(spll_sels), CLK_SET_RATE_PARENT | CLK_SET_PARENT_GATE);
- clks[IMX7ULP_CLK_SPLL_BUS_CLK] = imx_clk_divider_gate("spll_bus_clk", "spll_sel", CLK_SET_RATE_GATE, base + 0x604, 8, 3, 0, ulp_div_table, &imx_ccm_lock);
+ hws[IMX7ULP_CLK_SPLL_BUS_CLK] = imx_clk_hw_divider_gate("spll_bus_clk", "spll_sel", CLK_SET_RATE_GATE, base + 0x604, 8, 3, 0, ulp_div_table, &imx_ccm_lock);
/* scs/ddr/nic select different clock source requires that clock to be enabled first */
- clks[IMX7ULP_CLK_SYS_SEL] = imx_clk_hw_mux2("scs_sel", base + 0x14, 24, 4, scs_sels, ARRAY_SIZE(scs_sels));
- clks[IMX7ULP_CLK_HSRUN_SYS_SEL] = imx_clk_hw_mux2("hsrun_scs_sel", base + 0x1c, 24, 4, scs_sels, ARRAY_SIZE(scs_sels));
- clks[IMX7ULP_CLK_NIC_SEL] = imx_clk_hw_mux2("nic_sel", base + 0x40, 28, 1, nic_sels, ARRAY_SIZE(nic_sels));
- clks[IMX7ULP_CLK_DDR_SEL] = imx_clk_hw_mux_flags("ddr_sel", base + 0x30, 24, 2, ddr_sels, ARRAY_SIZE(ddr_sels), CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
+ hws[IMX7ULP_CLK_SYS_SEL] = imx_clk_hw_mux2("scs_sel", base + 0x14, 24, 4, scs_sels, ARRAY_SIZE(scs_sels));
+ hws[IMX7ULP_CLK_HSRUN_SYS_SEL] = imx_clk_hw_mux2("hsrun_scs_sel", base + 0x1c, 24, 4, scs_sels, ARRAY_SIZE(scs_sels));
+ hws[IMX7ULP_CLK_NIC_SEL] = imx_clk_hw_mux2("nic_sel", base + 0x40, 28, 1, nic_sels, ARRAY_SIZE(nic_sels));
+ hws[IMX7ULP_CLK_DDR_SEL] = imx_clk_hw_mux_flags("ddr_sel", base + 0x30, 24, 2, ddr_sels, ARRAY_SIZE(ddr_sels), CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
- clks[IMX7ULP_CLK_CORE_DIV] = imx_clk_hw_divider_flags("divcore", "scs_sel", base + 0x14, 16, 4, CLK_SET_RATE_PARENT);
- clks[IMX7ULP_CLK_HSRUN_CORE_DIV] = imx_clk_hw_divider_flags("hsrun_divcore", "hsrun_scs_sel", base + 0x1c, 16, 4, CLK_SET_RATE_PARENT);
+ hws[IMX7ULP_CLK_CORE_DIV] = imx_clk_hw_divider_flags("divcore", "scs_sel", base + 0x14, 16, 4, CLK_SET_RATE_PARENT);
+ hws[IMX7ULP_CLK_HSRUN_CORE_DIV] = imx_clk_hw_divider_flags("hsrun_divcore", "hsrun_scs_sel", base + 0x1c, 16, 4, CLK_SET_RATE_PARENT);
- clks[IMX7ULP_CLK_DDR_DIV] = imx_clk_divider_gate("ddr_clk", "ddr_sel", CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, base + 0x30, 0, 3,
+ hws[IMX7ULP_CLK_DDR_DIV] = imx_clk_hw_divider_gate("ddr_clk", "ddr_sel", CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, base + 0x30, 0, 3,
0, ulp_div_table, &imx_ccm_lock);
- clks[IMX7ULP_CLK_NIC0_DIV] = imx_clk_hw_divider_flags("nic0_clk", "nic_sel", base + 0x40, 24, 4, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
- clks[IMX7ULP_CLK_NIC1_DIV] = imx_clk_hw_divider_flags("nic1_clk", "nic0_clk", base + 0x40, 16, 4, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
- clks[IMX7ULP_CLK_NIC1_BUS_DIV] = imx_clk_hw_divider_flags("nic1_bus_clk", "nic0_clk", base + 0x40, 4, 4, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+ hws[IMX7ULP_CLK_NIC0_DIV] = imx_clk_hw_divider_flags("nic0_clk", "nic_sel", base + 0x40, 24, 4, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+ hws[IMX7ULP_CLK_NIC1_DIV] = imx_clk_hw_divider_flags("nic1_clk", "nic0_clk", base + 0x40, 16, 4, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+ hws[IMX7ULP_CLK_NIC1_BUS_DIV] = imx_clk_hw_divider_flags("nic1_bus_clk", "nic0_clk", base + 0x40, 4, 4, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
- clks[IMX7ULP_CLK_GPU_DIV] = imx_clk_hw_divider("gpu_clk", "nic0_clk", base + 0x40, 20, 4);
+ hws[IMX7ULP_CLK_GPU_DIV] = imx_clk_hw_divider("gpu_clk", "nic0_clk", base + 0x40, 20, 4);
- clks[IMX7ULP_CLK_SOSC_BUS_CLK] = imx_clk_divider_gate("sosc_bus_clk", "sosc", 0, base + 0x104, 8, 3,
+ hws[IMX7ULP_CLK_SOSC_BUS_CLK] = imx_clk_hw_divider_gate("sosc_bus_clk", "sosc", 0, base + 0x104, 8, 3,
CLK_DIVIDER_READ_ONLY, ulp_div_table, &imx_ccm_lock);
- clks[IMX7ULP_CLK_FIRC_BUS_CLK] = imx_clk_divider_gate("firc_bus_clk", "firc", 0, base + 0x304, 8, 3,
+ hws[IMX7ULP_CLK_FIRC_BUS_CLK] = imx_clk_hw_divider_gate("firc_bus_clk", "firc", 0, base + 0x304, 8, 3,
CLK_DIVIDER_READ_ONLY, ulp_div_table, &imx_ccm_lock);
- imx_check_clk_hws(clks, clk_data->num);
+ imx_check_clk_hws(hws, clk_data->num);
of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
}
@@ -145,7 +146,7 @@ CLK_OF_DECLARE(imx7ulp_clk_scg1, "fsl,imx7ulp-scg1", imx7ulp_clk_scg1_init);
static void __init imx7ulp_clk_pcc2_init(struct device_node *np)
{
struct clk_hw_onecell_data *clk_data;
- struct clk_hw **clks;
+ struct clk_hw **hws;
void __iomem *base;
int i;
@@ -155,42 +156,42 @@ static void __init imx7ulp_clk_pcc2_init(struct device_node *np)
return;
clk_data->num = IMX7ULP_CLK_PCC2_END;
- clks = clk_data->hws;
+ hws = clk_data->hws;
/* PCC2 */
base = of_iomap(np, 0);
WARN_ON(!base);
- clks[IMX7ULP_CLK_DMA1] = imx_clk_hw_gate("dma1", "nic1_clk", base + 0x20, 30);
- clks[IMX7ULP_CLK_RGPIO2P1] = imx_clk_hw_gate("rgpio2p1", "nic1_bus_clk", base + 0x3c, 30);
- clks[IMX7ULP_CLK_DMA_MUX1] = imx_clk_hw_gate("dma_mux1", "nic1_bus_clk", base + 0x84, 30);
- clks[IMX7ULP_CLK_CAAM] = imx_clk_hw_gate("caam", "nic1_clk", base + 0x90, 30);
- clks[IMX7ULP_CLK_LPTPM4] = imx7ulp_clk_composite("lptpm4", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x94);
- clks[IMX7ULP_CLK_LPTPM5] = imx7ulp_clk_composite("lptpm5", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x98);
- clks[IMX7ULP_CLK_LPIT1] = imx7ulp_clk_composite("lpit1", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x9c);
- clks[IMX7ULP_CLK_LPSPI2] = imx7ulp_clk_composite("lpspi2", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xa4);
- clks[IMX7ULP_CLK_LPSPI3] = imx7ulp_clk_composite("lpspi3", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xa8);
- clks[IMX7ULP_CLK_LPI2C4] = imx7ulp_clk_composite("lpi2c4", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xac);
- clks[IMX7ULP_CLK_LPI2C5] = imx7ulp_clk_composite("lpi2c5", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xb0);
- clks[IMX7ULP_CLK_LPUART4] = imx7ulp_clk_composite("lpuart4", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xb4);
- clks[IMX7ULP_CLK_LPUART5] = imx7ulp_clk_composite("lpuart5", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xb8);
- clks[IMX7ULP_CLK_FLEXIO1] = imx7ulp_clk_composite("flexio1", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xc4);
- clks[IMX7ULP_CLK_USB0] = imx7ulp_clk_composite("usb0", periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, true, true, base + 0xcc);
- clks[IMX7ULP_CLK_USB1] = imx7ulp_clk_composite("usb1", periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, true, true, base + 0xd0);
- clks[IMX7ULP_CLK_USB_PHY] = imx_clk_hw_gate("usb_phy", "nic1_bus_clk", base + 0xd4, 30);
- clks[IMX7ULP_CLK_USDHC0] = imx7ulp_clk_composite("usdhc0", periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, true, true, base + 0xdc);
- clks[IMX7ULP_CLK_USDHC1] = imx7ulp_clk_composite("usdhc1", periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, true, true, base + 0xe0);
- clks[IMX7ULP_CLK_WDG1] = imx7ulp_clk_composite("wdg1", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, true, true, base + 0xf4);
- clks[IMX7ULP_CLK_WDG2] = imx7ulp_clk_composite("sdg2", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, true, true, base + 0x10c);
-
- imx_check_clk_hws(clks, clk_data->num);
+ hws[IMX7ULP_CLK_DMA1] = imx_clk_hw_gate("dma1", "nic1_clk", base + 0x20, 30);
+ hws[IMX7ULP_CLK_RGPIO2P1] = imx_clk_hw_gate("rgpio2p1", "nic1_bus_clk", base + 0x3c, 30);
+ hws[IMX7ULP_CLK_DMA_MUX1] = imx_clk_hw_gate("dma_mux1", "nic1_bus_clk", base + 0x84, 30);
+ hws[IMX7ULP_CLK_CAAM] = imx_clk_hw_gate("caam", "nic1_clk", base + 0x90, 30);
+ hws[IMX7ULP_CLK_LPTPM4] = imx7ulp_clk_hw_composite("lptpm4", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x94);
+ hws[IMX7ULP_CLK_LPTPM5] = imx7ulp_clk_hw_composite("lptpm5", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x98);
+ hws[IMX7ULP_CLK_LPIT1] = imx7ulp_clk_hw_composite("lpit1", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x9c);
+ hws[IMX7ULP_CLK_LPSPI2] = imx7ulp_clk_hw_composite("lpspi2", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xa4);
+ hws[IMX7ULP_CLK_LPSPI3] = imx7ulp_clk_hw_composite("lpspi3", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xa8);
+ hws[IMX7ULP_CLK_LPI2C4] = imx7ulp_clk_hw_composite("lpi2c4", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xac);
+ hws[IMX7ULP_CLK_LPI2C5] = imx7ulp_clk_hw_composite("lpi2c5", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xb0);
+ hws[IMX7ULP_CLK_LPUART4] = imx7ulp_clk_hw_composite("lpuart4", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xb4);
+ hws[IMX7ULP_CLK_LPUART5] = imx7ulp_clk_hw_composite("lpuart5", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xb8);
+ hws[IMX7ULP_CLK_FLEXIO1] = imx7ulp_clk_hw_composite("flexio1", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xc4);
+ hws[IMX7ULP_CLK_USB0] = imx7ulp_clk_hw_composite("usb0", periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, true, true, base + 0xcc);
+ hws[IMX7ULP_CLK_USB1] = imx7ulp_clk_hw_composite("usb1", periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, true, true, base + 0xd0);
+ hws[IMX7ULP_CLK_USB_PHY] = imx_clk_hw_gate("usb_phy", "nic1_bus_clk", base + 0xd4, 30);
+ hws[IMX7ULP_CLK_USDHC0] = imx7ulp_clk_hw_composite("usdhc0", periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, true, true, base + 0xdc);
+ hws[IMX7ULP_CLK_USDHC1] = imx7ulp_clk_hw_composite("usdhc1", periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, true, true, base + 0xe0);
+ hws[IMX7ULP_CLK_WDG1] = imx7ulp_clk_hw_composite("wdg1", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, true, true, base + 0xf4);
+ hws[IMX7ULP_CLK_WDG2] = imx7ulp_clk_hw_composite("wdg2", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, true, true, base + 0x10c);
+
+ imx_check_clk_hws(hws, clk_data->num);
of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
for (i = 0; i < ARRAY_SIZE(pcc2_uart_clk_ids); i++) {
int index = pcc2_uart_clk_ids[i];
- pcc2_uart_clks[i] = &clks[index]->clk;
+ pcc2_uart_clks[i] = &hws[index]->clk;
}
imx_register_uart_clocks(pcc2_uart_clks);
@@ -200,7 +201,7 @@ CLK_OF_DECLARE(imx7ulp_clk_pcc2, "fsl,imx7ulp-pcc2", imx7ulp_clk_pcc2_init);
static void __init imx7ulp_clk_pcc3_init(struct device_node *np)
{
struct clk_hw_onecell_data *clk_data;
- struct clk_hw **clks;
+ struct clk_hw **hws;
void __iomem *base;
int i;
@@ -210,41 +211,41 @@ static void __init imx7ulp_clk_pcc3_init(struct device_node *np)
return;
clk_data->num = IMX7ULP_CLK_PCC3_END;
- clks = clk_data->hws;
+ hws = clk_data->hws;
/* PCC3 */
base = of_iomap(np, 0);
WARN_ON(!base);
- clks[IMX7ULP_CLK_LPTPM6] = imx7ulp_clk_composite("lptpm6", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x84);
- clks[IMX7ULP_CLK_LPTPM7] = imx7ulp_clk_composite("lptpm7", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x88);
+ hws[IMX7ULP_CLK_LPTPM6] = imx7ulp_clk_hw_composite("lptpm6", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x84);
+ hws[IMX7ULP_CLK_LPTPM7] = imx7ulp_clk_hw_composite("lptpm7", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x88);
- clks[IMX7ULP_CLK_MMDC] = clk_hw_register_gate(NULL, "mmdc", "nic1_clk", CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ hws[IMX7ULP_CLK_MMDC] = clk_hw_register_gate(NULL, "mmdc", "nic1_clk", CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
base + 0xac, 30, 0, &imx_ccm_lock);
- clks[IMX7ULP_CLK_LPI2C6] = imx7ulp_clk_composite("lpi2c6", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x90);
- clks[IMX7ULP_CLK_LPI2C7] = imx7ulp_clk_composite("lpi2c7", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x94);
- clks[IMX7ULP_CLK_LPUART6] = imx7ulp_clk_composite("lpuart6", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x98);
- clks[IMX7ULP_CLK_LPUART7] = imx7ulp_clk_composite("lpuart7", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x9c);
- clks[IMX7ULP_CLK_DSI] = imx7ulp_clk_composite("dsi", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, true, true, base + 0xa4);
- clks[IMX7ULP_CLK_LCDIF] = imx7ulp_clk_composite("lcdif", periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, true, true, base + 0xa8);
+ hws[IMX7ULP_CLK_LPI2C6] = imx7ulp_clk_hw_composite("lpi2c6", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x90);
+ hws[IMX7ULP_CLK_LPI2C7] = imx7ulp_clk_hw_composite("lpi2c7", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x94);
+ hws[IMX7ULP_CLK_LPUART6] = imx7ulp_clk_hw_composite("lpuart6", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x98);
+ hws[IMX7ULP_CLK_LPUART7] = imx7ulp_clk_hw_composite("lpuart7", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x9c);
+ hws[IMX7ULP_CLK_DSI] = imx7ulp_clk_hw_composite("dsi", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, true, true, base + 0xa4);
+ hws[IMX7ULP_CLK_LCDIF] = imx7ulp_clk_hw_composite("lcdif", periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, true, true, base + 0xa8);
- clks[IMX7ULP_CLK_VIU] = imx_clk_hw_gate("viu", "nic1_clk", base + 0xa0, 30);
- clks[IMX7ULP_CLK_PCTLC] = imx_clk_hw_gate("pctlc", "nic1_bus_clk", base + 0xb8, 30);
- clks[IMX7ULP_CLK_PCTLD] = imx_clk_hw_gate("pctld", "nic1_bus_clk", base + 0xbc, 30);
- clks[IMX7ULP_CLK_PCTLE] = imx_clk_hw_gate("pctle", "nic1_bus_clk", base + 0xc0, 30);
- clks[IMX7ULP_CLK_PCTLF] = imx_clk_hw_gate("pctlf", "nic1_bus_clk", base + 0xc4, 30);
+ hws[IMX7ULP_CLK_VIU] = imx_clk_hw_gate("viu", "nic1_clk", base + 0xa0, 30);
+ hws[IMX7ULP_CLK_PCTLC] = imx_clk_hw_gate("pctlc", "nic1_bus_clk", base + 0xb8, 30);
+ hws[IMX7ULP_CLK_PCTLD] = imx_clk_hw_gate("pctld", "nic1_bus_clk", base + 0xbc, 30);
+ hws[IMX7ULP_CLK_PCTLE] = imx_clk_hw_gate("pctle", "nic1_bus_clk", base + 0xc0, 30);
+ hws[IMX7ULP_CLK_PCTLF] = imx_clk_hw_gate("pctlf", "nic1_bus_clk", base + 0xc4, 30);
- clks[IMX7ULP_CLK_GPU3D] = imx7ulp_clk_composite("gpu3d", periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, false, true, base + 0x140);
- clks[IMX7ULP_CLK_GPU2D] = imx7ulp_clk_composite("gpu2d", periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, false, true, base + 0x144);
+ hws[IMX7ULP_CLK_GPU3D] = imx7ulp_clk_hw_composite("gpu3d", periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, false, true, base + 0x140);
+ hws[IMX7ULP_CLK_GPU2D] = imx7ulp_clk_hw_composite("gpu2d", periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, false, true, base + 0x144);
- imx_check_clk_hws(clks, clk_data->num);
+ imx_check_clk_hws(hws, clk_data->num);
of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
for (i = 0; i < ARRAY_SIZE(pcc3_uart_clk_ids); i++) {
int index = pcc3_uart_clk_ids[i];
- pcc3_uart_clks[i] = &clks[index]->clk;
+ pcc3_uart_clks[i] = &hws[index]->clk;
}
imx_register_uart_clocks(pcc3_uart_clks);
@@ -254,7 +255,7 @@ CLK_OF_DECLARE(imx7ulp_clk_pcc3, "fsl,imx7ulp-pcc3", imx7ulp_clk_pcc3_init);
static void __init imx7ulp_clk_smc1_init(struct device_node *np)
{
struct clk_hw_onecell_data *clk_data;
- struct clk_hw **clks;
+ struct clk_hw **hws;
void __iomem *base;
clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_SMC1_END),
@@ -263,15 +264,15 @@ static void __init imx7ulp_clk_smc1_init(struct device_node *np)
return;
clk_data->num = IMX7ULP_CLK_SMC1_END;
- clks = clk_data->hws;
+ hws = clk_data->hws;
/* SMC1 */
base = of_iomap(np, 0);
WARN_ON(!base);
- clks[IMX7ULP_CLK_ARM] = imx_clk_hw_mux_flags("arm", base + 0x10, 8, 2, arm_sels, ARRAY_SIZE(arm_sels), CLK_IS_CRITICAL);
+ hws[IMX7ULP_CLK_ARM] = imx_clk_hw_mux_flags("arm", base + 0x10, 8, 2, arm_sels, ARRAY_SIZE(arm_sels), CLK_IS_CRITICAL);
- imx_check_clk_hws(clks, clk_data->num);
+ imx_check_clk_hws(hws, clk_data->num);
of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
}
diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
index 030b15d7c0ce..2ed93fc25087 100644
--- a/drivers/clk/imx/clk-imx8mm.c
+++ b/drivers/clk/imx/clk-imx8mm.c
@@ -12,6 +12,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
#include <linux/types.h>
#include "clk.h"
@@ -285,118 +286,126 @@ static const char *imx8mm_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", }
static const char *imx8mm_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "osc_27m", "sys_pll1_200m", "audio_pll2_out",
"vpu_pll", "sys_pll1_80m", };
-static struct clk *clks[IMX8MM_CLK_END];
-static struct clk_onecell_data clk_data;
+static struct clk_hw_onecell_data *clk_hw_data;
+static struct clk_hw **hws;
-static struct clk ** const uart_clks[] = {
- &clks[IMX8MM_CLK_UART1_ROOT],
- &clks[IMX8MM_CLK_UART2_ROOT],
- &clks[IMX8MM_CLK_UART3_ROOT],
- &clks[IMX8MM_CLK_UART4_ROOT],
- NULL
+static const int uart_clk_ids[] = {
+ IMX8MM_CLK_UART1_ROOT,
+ IMX8MM_CLK_UART2_ROOT,
+ IMX8MM_CLK_UART3_ROOT,
+ IMX8MM_CLK_UART4_ROOT,
};
+static struct clk **uart_hws[ARRAY_SIZE(uart_clk_ids) + 1];
static int imx8mm_clocks_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
void __iomem *base;
- int ret;
+ int ret, i;
- clks[IMX8MM_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
- clks[IMX8MM_CLK_24M] = of_clk_get_by_name(np, "osc_24m");
- clks[IMX8MM_CLK_32K] = of_clk_get_by_name(np, "osc_32k");
- clks[IMX8MM_CLK_EXT1] = of_clk_get_by_name(np, "clk_ext1");
- clks[IMX8MM_CLK_EXT2] = of_clk_get_by_name(np, "clk_ext2");
- clks[IMX8MM_CLK_EXT3] = of_clk_get_by_name(np, "clk_ext3");
- clks[IMX8MM_CLK_EXT4] = of_clk_get_by_name(np, "clk_ext4");
+ clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
+ IMX8MM_CLK_END), GFP_KERNEL);
+ if (WARN_ON(!clk_hw_data))
+ return -ENOMEM;
+
+ clk_hw_data->num = IMX8MM_CLK_END;
+ hws = clk_hw_data->hws;
+
+ hws[IMX8MM_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0);
+ hws[IMX8MM_CLK_24M] = imx_obtain_fixed_clk_hw(np, "osc_24m");
+ hws[IMX8MM_CLK_32K] = imx_obtain_fixed_clk_hw(np, "osc_32k");
+ hws[IMX8MM_CLK_EXT1] = imx_obtain_fixed_clk_hw(np, "clk_ext1");
+ hws[IMX8MM_CLK_EXT2] = imx_obtain_fixed_clk_hw(np, "clk_ext2");
+ hws[IMX8MM_CLK_EXT3] = imx_obtain_fixed_clk_hw(np, "clk_ext3");
+ hws[IMX8MM_CLK_EXT4] = imx_obtain_fixed_clk_hw(np, "clk_ext4");
np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-anatop");
base = of_iomap(np, 0);
if (WARN_ON(!base))
return -ENOMEM;
- clks[IMX8MM_AUDIO_PLL1_REF_SEL] = imx_clk_mux("audio_pll1_ref_sel", base + 0x0, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MM_AUDIO_PLL2_REF_SEL] = imx_clk_mux("audio_pll2_ref_sel", base + 0x14, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MM_VIDEO_PLL1_REF_SEL] = imx_clk_mux("video_pll1_ref_sel", base + 0x28, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MM_DRAM_PLL_REF_SEL] = imx_clk_mux("dram_pll_ref_sel", base + 0x50, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MM_GPU_PLL_REF_SEL] = imx_clk_mux("gpu_pll_ref_sel", base + 0x64, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MM_VPU_PLL_REF_SEL] = imx_clk_mux("vpu_pll_ref_sel", base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MM_ARM_PLL_REF_SEL] = imx_clk_mux("arm_pll_ref_sel", base + 0x84, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MM_SYS_PLL3_REF_SEL] = imx_clk_mux("sys_pll3_ref_sel", base + 0x114, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-
- clks[IMX8MM_AUDIO_PLL1] = imx_clk_pll14xx("audio_pll1", "audio_pll1_ref_sel", base, &imx_1443x_pll);
- clks[IMX8MM_AUDIO_PLL2] = imx_clk_pll14xx("audio_pll2", "audio_pll2_ref_sel", base + 0x14, &imx_1443x_pll);
- clks[IMX8MM_VIDEO_PLL1] = imx_clk_pll14xx("video_pll1", "video_pll1_ref_sel", base + 0x28, &imx_1443x_pll);
- clks[IMX8MM_DRAM_PLL] = imx_clk_pll14xx("dram_pll", "dram_pll_ref_sel", base + 0x50, &imx_1443x_pll);
- clks[IMX8MM_GPU_PLL] = imx_clk_pll14xx("gpu_pll", "gpu_pll_ref_sel", base + 0x64, &imx_1416x_pll);
- clks[IMX8MM_VPU_PLL] = imx_clk_pll14xx("vpu_pll", "vpu_pll_ref_sel", base + 0x74, &imx_1416x_pll);
- clks[IMX8MM_ARM_PLL] = imx_clk_pll14xx("arm_pll", "arm_pll_ref_sel", base + 0x84, &imx_1416x_pll);
- clks[IMX8MM_SYS_PLL1] = imx_clk_fixed("sys_pll1", 800000000);
- clks[IMX8MM_SYS_PLL2] = imx_clk_fixed("sys_pll2", 1000000000);
- clks[IMX8MM_SYS_PLL3] = imx_clk_pll14xx("sys_pll3", "sys_pll3_ref_sel", base + 0x114, &imx_1416x_pll);
+ hws[IMX8MM_AUDIO_PLL1_REF_SEL] = imx_clk_hw_mux("audio_pll1_ref_sel", base + 0x0, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MM_AUDIO_PLL2_REF_SEL] = imx_clk_hw_mux("audio_pll2_ref_sel", base + 0x14, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MM_VIDEO_PLL1_REF_SEL] = imx_clk_hw_mux("video_pll1_ref_sel", base + 0x28, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MM_DRAM_PLL_REF_SEL] = imx_clk_hw_mux("dram_pll_ref_sel", base + 0x50, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MM_GPU_PLL_REF_SEL] = imx_clk_hw_mux("gpu_pll_ref_sel", base + 0x64, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MM_VPU_PLL_REF_SEL] = imx_clk_hw_mux("vpu_pll_ref_sel", base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MM_ARM_PLL_REF_SEL] = imx_clk_hw_mux("arm_pll_ref_sel", base + 0x84, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MM_SYS_PLL3_REF_SEL] = imx_clk_hw_mux("sys_pll3_ref_sel", base + 0x114, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+
+ hws[IMX8MM_AUDIO_PLL1] = imx_clk_hw_pll14xx("audio_pll1", "audio_pll1_ref_sel", base, &imx_1443x_pll);
+ hws[IMX8MM_AUDIO_PLL2] = imx_clk_hw_pll14xx("audio_pll2", "audio_pll2_ref_sel", base + 0x14, &imx_1443x_pll);
+ hws[IMX8MM_VIDEO_PLL1] = imx_clk_hw_pll14xx("video_pll1", "video_pll1_ref_sel", base + 0x28, &imx_1443x_pll);
+ hws[IMX8MM_DRAM_PLL] = imx_clk_hw_pll14xx("dram_pll", "dram_pll_ref_sel", base + 0x50, &imx_1443x_dram_pll);
+ hws[IMX8MM_GPU_PLL] = imx_clk_hw_pll14xx("gpu_pll", "gpu_pll_ref_sel", base + 0x64, &imx_1416x_pll);
+ hws[IMX8MM_VPU_PLL] = imx_clk_hw_pll14xx("vpu_pll", "vpu_pll_ref_sel", base + 0x74, &imx_1416x_pll);
+ hws[IMX8MM_ARM_PLL] = imx_clk_hw_pll14xx("arm_pll", "arm_pll_ref_sel", base + 0x84, &imx_1416x_pll);
+ hws[IMX8MM_SYS_PLL1] = imx_clk_hw_fixed("sys_pll1", 800000000);
+ hws[IMX8MM_SYS_PLL2] = imx_clk_hw_fixed("sys_pll2", 1000000000);
+ hws[IMX8MM_SYS_PLL3] = imx_clk_hw_pll14xx("sys_pll3", "sys_pll3_ref_sel", base + 0x114, &imx_1416x_pll);
/* PLL bypass out */
- clks[IMX8MM_AUDIO_PLL1_BYPASS] = imx_clk_mux_flags("audio_pll1_bypass", base, 16, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT);
- clks[IMX8MM_AUDIO_PLL2_BYPASS] = imx_clk_mux_flags("audio_pll2_bypass", base + 0x14, 16, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels), CLK_SET_RATE_PARENT);
- clks[IMX8MM_VIDEO_PLL1_BYPASS] = imx_clk_mux_flags("video_pll1_bypass", base + 0x28, 16, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT);
- clks[IMX8MM_DRAM_PLL_BYPASS] = imx_clk_mux_flags("dram_pll_bypass", base + 0x50, 16, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT);
- clks[IMX8MM_GPU_PLL_BYPASS] = imx_clk_mux_flags("gpu_pll_bypass", base + 0x64, 28, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
- clks[IMX8MM_VPU_PLL_BYPASS] = imx_clk_mux_flags("vpu_pll_bypass", base + 0x74, 28, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
- clks[IMX8MM_ARM_PLL_BYPASS] = imx_clk_mux_flags("arm_pll_bypass", base + 0x84, 28, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
- clks[IMX8MM_SYS_PLL3_BYPASS] = imx_clk_mux_flags("sys_pll3_bypass", base + 0x114, 28, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MM_AUDIO_PLL1_BYPASS] = imx_clk_hw_mux_flags("audio_pll1_bypass", base, 16, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MM_AUDIO_PLL2_BYPASS] = imx_clk_hw_mux_flags("audio_pll2_bypass", base + 0x14, 16, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MM_VIDEO_PLL1_BYPASS] = imx_clk_hw_mux_flags("video_pll1_bypass", base + 0x28, 16, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MM_DRAM_PLL_BYPASS] = imx_clk_hw_mux_flags("dram_pll_bypass", base + 0x50, 16, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MM_GPU_PLL_BYPASS] = imx_clk_hw_mux_flags("gpu_pll_bypass", base + 0x64, 28, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MM_VPU_PLL_BYPASS] = imx_clk_hw_mux_flags("vpu_pll_bypass", base + 0x74, 28, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MM_ARM_PLL_BYPASS] = imx_clk_hw_mux_flags("arm_pll_bypass", base + 0x84, 28, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MM_SYS_PLL3_BYPASS] = imx_clk_hw_mux_flags("sys_pll3_bypass", base + 0x114, 28, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT);
/* PLL out gate */
- clks[IMX8MM_AUDIO_PLL1_OUT] = imx_clk_gate("audio_pll1_out", "audio_pll1_bypass", base, 13);
- clks[IMX8MM_AUDIO_PLL2_OUT] = imx_clk_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x14, 13);
- clks[IMX8MM_VIDEO_PLL1_OUT] = imx_clk_gate("video_pll1_out", "video_pll1_bypass", base + 0x28, 13);
- clks[IMX8MM_DRAM_PLL_OUT] = imx_clk_gate("dram_pll_out", "dram_pll_bypass", base + 0x50, 13);
- clks[IMX8MM_GPU_PLL_OUT] = imx_clk_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x64, 11);
- clks[IMX8MM_VPU_PLL_OUT] = imx_clk_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x74, 11);
- clks[IMX8MM_ARM_PLL_OUT] = imx_clk_gate("arm_pll_out", "arm_pll_bypass", base + 0x84, 11);
- clks[IMX8MM_SYS_PLL3_OUT] = imx_clk_gate("sys_pll3_out", "sys_pll3_bypass", base + 0x114, 11);
+ hws[IMX8MM_AUDIO_PLL1_OUT] = imx_clk_hw_gate("audio_pll1_out", "audio_pll1_bypass", base, 13);
+ hws[IMX8MM_AUDIO_PLL2_OUT] = imx_clk_hw_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x14, 13);
+ hws[IMX8MM_VIDEO_PLL1_OUT] = imx_clk_hw_gate("video_pll1_out", "video_pll1_bypass", base + 0x28, 13);
+ hws[IMX8MM_DRAM_PLL_OUT] = imx_clk_hw_gate("dram_pll_out", "dram_pll_bypass", base + 0x50, 13);
+ hws[IMX8MM_GPU_PLL_OUT] = imx_clk_hw_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x64, 11);
+ hws[IMX8MM_VPU_PLL_OUT] = imx_clk_hw_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x74, 11);
+ hws[IMX8MM_ARM_PLL_OUT] = imx_clk_hw_gate("arm_pll_out", "arm_pll_bypass", base + 0x84, 11);
+ hws[IMX8MM_SYS_PLL3_OUT] = imx_clk_hw_gate("sys_pll3_out", "sys_pll3_bypass", base + 0x114, 11);
/* SYS PLL1 fixed output */
- clks[IMX8MM_SYS_PLL1_40M_CG] = imx_clk_gate("sys_pll1_40m_cg", "sys_pll1", base + 0x94, 27);
- clks[IMX8MM_SYS_PLL1_80M_CG] = imx_clk_gate("sys_pll1_80m_cg", "sys_pll1", base + 0x94, 25);
- clks[IMX8MM_SYS_PLL1_100M_CG] = imx_clk_gate("sys_pll1_100m_cg", "sys_pll1", base + 0x94, 23);
- clks[IMX8MM_SYS_PLL1_133M_CG] = imx_clk_gate("sys_pll1_133m_cg", "sys_pll1", base + 0x94, 21);
- clks[IMX8MM_SYS_PLL1_160M_CG] = imx_clk_gate("sys_pll1_160m_cg", "sys_pll1", base + 0x94, 19);
- clks[IMX8MM_SYS_PLL1_200M_CG] = imx_clk_gate("sys_pll1_200m_cg", "sys_pll1", base + 0x94, 17);
- clks[IMX8MM_SYS_PLL1_266M_CG] = imx_clk_gate("sys_pll1_266m_cg", "sys_pll1", base + 0x94, 15);
- clks[IMX8MM_SYS_PLL1_400M_CG] = imx_clk_gate("sys_pll1_400m_cg", "sys_pll1", base + 0x94, 13);
- clks[IMX8MM_SYS_PLL1_OUT] = imx_clk_gate("sys_pll1_out", "sys_pll1", base + 0x94, 11);
-
- clks[IMX8MM_SYS_PLL1_40M] = imx_clk_fixed_factor("sys_pll1_40m", "sys_pll1_40m_cg", 1, 20);
- clks[IMX8MM_SYS_PLL1_80M] = imx_clk_fixed_factor("sys_pll1_80m", "sys_pll1_80m_cg", 1, 10);
- clks[IMX8MM_SYS_PLL1_100M] = imx_clk_fixed_factor("sys_pll1_100m", "sys_pll1_100m_cg", 1, 8);
- clks[IMX8MM_SYS_PLL1_133M] = imx_clk_fixed_factor("sys_pll1_133m", "sys_pll1_133m_cg", 1, 6);
- clks[IMX8MM_SYS_PLL1_160M] = imx_clk_fixed_factor("sys_pll1_160m", "sys_pll1_160m_cg", 1, 5);
- clks[IMX8MM_SYS_PLL1_200M] = imx_clk_fixed_factor("sys_pll1_200m", "sys_pll1_200m_cg", 1, 4);
- clks[IMX8MM_SYS_PLL1_266M] = imx_clk_fixed_factor("sys_pll1_266m", "sys_pll1_266m_cg", 1, 3);
- clks[IMX8MM_SYS_PLL1_400M] = imx_clk_fixed_factor("sys_pll1_400m", "sys_pll1_400m_cg", 1, 2);
- clks[IMX8MM_SYS_PLL1_800M] = imx_clk_fixed_factor("sys_pll1_800m", "sys_pll1_out", 1, 1);
+ hws[IMX8MM_SYS_PLL1_40M_CG] = imx_clk_hw_gate("sys_pll1_40m_cg", "sys_pll1", base + 0x94, 27);
+ hws[IMX8MM_SYS_PLL1_80M_CG] = imx_clk_hw_gate("sys_pll1_80m_cg", "sys_pll1", base + 0x94, 25);
+ hws[IMX8MM_SYS_PLL1_100M_CG] = imx_clk_hw_gate("sys_pll1_100m_cg", "sys_pll1", base + 0x94, 23);
+ hws[IMX8MM_SYS_PLL1_133M_CG] = imx_clk_hw_gate("sys_pll1_133m_cg", "sys_pll1", base + 0x94, 21);
+ hws[IMX8MM_SYS_PLL1_160M_CG] = imx_clk_hw_gate("sys_pll1_160m_cg", "sys_pll1", base + 0x94, 19);
+ hws[IMX8MM_SYS_PLL1_200M_CG] = imx_clk_hw_gate("sys_pll1_200m_cg", "sys_pll1", base + 0x94, 17);
+ hws[IMX8MM_SYS_PLL1_266M_CG] = imx_clk_hw_gate("sys_pll1_266m_cg", "sys_pll1", base + 0x94, 15);
+ hws[IMX8MM_SYS_PLL1_400M_CG] = imx_clk_hw_gate("sys_pll1_400m_cg", "sys_pll1", base + 0x94, 13);
+ hws[IMX8MM_SYS_PLL1_OUT] = imx_clk_hw_gate("sys_pll1_out", "sys_pll1", base + 0x94, 11);
+
+ hws[IMX8MM_SYS_PLL1_40M] = imx_clk_hw_fixed_factor("sys_pll1_40m", "sys_pll1_40m_cg", 1, 20);
+ hws[IMX8MM_SYS_PLL1_80M] = imx_clk_hw_fixed_factor("sys_pll1_80m", "sys_pll1_80m_cg", 1, 10);
+ hws[IMX8MM_SYS_PLL1_100M] = imx_clk_hw_fixed_factor("sys_pll1_100m", "sys_pll1_100m_cg", 1, 8);
+ hws[IMX8MM_SYS_PLL1_133M] = imx_clk_hw_fixed_factor("sys_pll1_133m", "sys_pll1_133m_cg", 1, 6);
+ hws[IMX8MM_SYS_PLL1_160M] = imx_clk_hw_fixed_factor("sys_pll1_160m", "sys_pll1_160m_cg", 1, 5);
+ hws[IMX8MM_SYS_PLL1_200M] = imx_clk_hw_fixed_factor("sys_pll1_200m", "sys_pll1_200m_cg", 1, 4);
+ hws[IMX8MM_SYS_PLL1_266M] = imx_clk_hw_fixed_factor("sys_pll1_266m", "sys_pll1_266m_cg", 1, 3);
+ hws[IMX8MM_SYS_PLL1_400M] = imx_clk_hw_fixed_factor("sys_pll1_400m", "sys_pll1_400m_cg", 1, 2);
+ hws[IMX8MM_SYS_PLL1_800M] = imx_clk_hw_fixed_factor("sys_pll1_800m", "sys_pll1_out", 1, 1);
/* SYS PLL2 fixed output */
- clks[IMX8MM_SYS_PLL2_50M_CG] = imx_clk_gate("sys_pll2_50m_cg", "sys_pll2", base + 0x104, 27);
- clks[IMX8MM_SYS_PLL2_100M_CG] = imx_clk_gate("sys_pll2_100m_cg", "sys_pll2", base + 0x104, 25);
- clks[IMX8MM_SYS_PLL2_125M_CG] = imx_clk_gate("sys_pll2_125m_cg", "sys_pll2", base + 0x104, 23);
- clks[IMX8MM_SYS_PLL2_166M_CG] = imx_clk_gate("sys_pll2_166m_cg", "sys_pll2", base + 0x104, 21);
- clks[IMX8MM_SYS_PLL2_200M_CG] = imx_clk_gate("sys_pll2_200m_cg", "sys_pll2", base + 0x104, 19);
- clks[IMX8MM_SYS_PLL2_250M_CG] = imx_clk_gate("sys_pll2_250m_cg", "sys_pll2", base + 0x104, 17);
- clks[IMX8MM_SYS_PLL2_333M_CG] = imx_clk_gate("sys_pll2_333m_cg", "sys_pll2", base + 0x104, 15);
- clks[IMX8MM_SYS_PLL2_500M_CG] = imx_clk_gate("sys_pll2_500m_cg", "sys_pll2", base + 0x104, 13);
- clks[IMX8MM_SYS_PLL2_OUT] = imx_clk_gate("sys_pll2_out", "sys_pll2", base + 0x104, 11);
-
- clks[IMX8MM_SYS_PLL2_50M] = imx_clk_fixed_factor("sys_pll2_50m", "sys_pll2_50m_cg", 1, 20);
- clks[IMX8MM_SYS_PLL2_100M] = imx_clk_fixed_factor("sys_pll2_100m", "sys_pll2_100m_cg", 1, 10);
- clks[IMX8MM_SYS_PLL2_125M] = imx_clk_fixed_factor("sys_pll2_125m", "sys_pll2_125m_cg", 1, 8);
- clks[IMX8MM_SYS_PLL2_166M] = imx_clk_fixed_factor("sys_pll2_166m", "sys_pll2_166m_cg", 1, 6);
- clks[IMX8MM_SYS_PLL2_200M] = imx_clk_fixed_factor("sys_pll2_200m", "sys_pll2_200m_cg", 1, 5);
- clks[IMX8MM_SYS_PLL2_250M] = imx_clk_fixed_factor("sys_pll2_250m", "sys_pll2_250m_cg", 1, 4);
- clks[IMX8MM_SYS_PLL2_333M] = imx_clk_fixed_factor("sys_pll2_333m", "sys_pll2_333m_cg", 1, 3);
- clks[IMX8MM_SYS_PLL2_500M] = imx_clk_fixed_factor("sys_pll2_500m", "sys_pll2_500m_cg", 1, 2);
- clks[IMX8MM_SYS_PLL2_1000M] = imx_clk_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
+ hws[IMX8MM_SYS_PLL2_50M_CG] = imx_clk_hw_gate("sys_pll2_50m_cg", "sys_pll2", base + 0x104, 27);
+ hws[IMX8MM_SYS_PLL2_100M_CG] = imx_clk_hw_gate("sys_pll2_100m_cg", "sys_pll2", base + 0x104, 25);
+ hws[IMX8MM_SYS_PLL2_125M_CG] = imx_clk_hw_gate("sys_pll2_125m_cg", "sys_pll2", base + 0x104, 23);
+ hws[IMX8MM_SYS_PLL2_166M_CG] = imx_clk_hw_gate("sys_pll2_166m_cg", "sys_pll2", base + 0x104, 21);
+ hws[IMX8MM_SYS_PLL2_200M_CG] = imx_clk_hw_gate("sys_pll2_200m_cg", "sys_pll2", base + 0x104, 19);
+ hws[IMX8MM_SYS_PLL2_250M_CG] = imx_clk_hw_gate("sys_pll2_250m_cg", "sys_pll2", base + 0x104, 17);
+ hws[IMX8MM_SYS_PLL2_333M_CG] = imx_clk_hw_gate("sys_pll2_333m_cg", "sys_pll2", base + 0x104, 15);
+ hws[IMX8MM_SYS_PLL2_500M_CG] = imx_clk_hw_gate("sys_pll2_500m_cg", "sys_pll2", base + 0x104, 13);
+ hws[IMX8MM_SYS_PLL2_OUT] = imx_clk_hw_gate("sys_pll2_out", "sys_pll2", base + 0x104, 11);
+
+ hws[IMX8MM_SYS_PLL2_50M] = imx_clk_hw_fixed_factor("sys_pll2_50m", "sys_pll2_50m_cg", 1, 20);
+ hws[IMX8MM_SYS_PLL2_100M] = imx_clk_hw_fixed_factor("sys_pll2_100m", "sys_pll2_100m_cg", 1, 10);
+ hws[IMX8MM_SYS_PLL2_125M] = imx_clk_hw_fixed_factor("sys_pll2_125m", "sys_pll2_125m_cg", 1, 8);
+ hws[IMX8MM_SYS_PLL2_166M] = imx_clk_hw_fixed_factor("sys_pll2_166m", "sys_pll2_166m_cg", 1, 6);
+ hws[IMX8MM_SYS_PLL2_200M] = imx_clk_hw_fixed_factor("sys_pll2_200m", "sys_pll2_200m_cg", 1, 5);
+ hws[IMX8MM_SYS_PLL2_250M] = imx_clk_hw_fixed_factor("sys_pll2_250m", "sys_pll2_250m_cg", 1, 4);
+ hws[IMX8MM_SYS_PLL2_333M] = imx_clk_hw_fixed_factor("sys_pll2_333m", "sys_pll2_333m_cg", 1, 3);
+ hws[IMX8MM_SYS_PLL2_500M] = imx_clk_hw_fixed_factor("sys_pll2_500m", "sys_pll2_500m_cg", 1, 2);
+ hws[IMX8MM_SYS_PLL2_1000M] = imx_clk_hw_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
np = dev->of_node;
base = devm_platform_ioremap_resource(pdev, 0);
@@ -404,204 +413,213 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
return PTR_ERR(base);
/* Core Slice */
- clks[IMX8MM_CLK_A53_SRC] = imx_clk_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mm_a53_sels, ARRAY_SIZE(imx8mm_a53_sels));
- clks[IMX8MM_CLK_M4_SRC] = imx_clk_mux2("arm_m4_src", base + 0x8080, 24, 3, imx8mm_m4_sels, ARRAY_SIZE(imx8mm_m4_sels));
- clks[IMX8MM_CLK_VPU_SRC] = imx_clk_mux2("vpu_src", base + 0x8100, 24, 3, imx8mm_vpu_sels, ARRAY_SIZE(imx8mm_vpu_sels));
- clks[IMX8MM_CLK_GPU3D_SRC] = imx_clk_mux2("gpu3d_src", base + 0x8180, 24, 3, imx8mm_gpu3d_sels, ARRAY_SIZE(imx8mm_gpu3d_sels));
- clks[IMX8MM_CLK_GPU2D_SRC] = imx_clk_mux2("gpu2d_src", base + 0x8200, 24, 3, imx8mm_gpu2d_sels, ARRAY_SIZE(imx8mm_gpu2d_sels));
- clks[IMX8MM_CLK_A53_CG] = imx_clk_gate3("arm_a53_cg", "arm_a53_src", base + 0x8000, 28);
- clks[IMX8MM_CLK_M4_CG] = imx_clk_gate3("arm_m4_cg", "arm_m4_src", base + 0x8080, 28);
- clks[IMX8MM_CLK_VPU_CG] = imx_clk_gate3("vpu_cg", "vpu_src", base + 0x8100, 28);
- clks[IMX8MM_CLK_GPU3D_CG] = imx_clk_gate3("gpu3d_cg", "gpu3d_src", base + 0x8180, 28);
- clks[IMX8MM_CLK_GPU2D_CG] = imx_clk_gate3("gpu2d_cg", "gpu2d_src", base + 0x8200, 28);
- clks[IMX8MM_CLK_A53_DIV] = imx_clk_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
- clks[IMX8MM_CLK_M4_DIV] = imx_clk_divider2("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3);
- clks[IMX8MM_CLK_VPU_DIV] = imx_clk_divider2("vpu_div", "vpu_cg", base + 0x8100, 0, 3);
- clks[IMX8MM_CLK_GPU3D_DIV] = imx_clk_divider2("gpu3d_div", "gpu3d_cg", base + 0x8180, 0, 3);
- clks[IMX8MM_CLK_GPU2D_DIV] = imx_clk_divider2("gpu2d_div", "gpu2d_cg", base + 0x8200, 0, 3);
+ hws[IMX8MM_CLK_A53_SRC] = imx_clk_hw_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mm_a53_sels, ARRAY_SIZE(imx8mm_a53_sels));
+ hws[IMX8MM_CLK_M4_SRC] = imx_clk_hw_mux2("arm_m4_src", base + 0x8080, 24, 3, imx8mm_m4_sels, ARRAY_SIZE(imx8mm_m4_sels));
+ hws[IMX8MM_CLK_VPU_SRC] = imx_clk_hw_mux2("vpu_src", base + 0x8100, 24, 3, imx8mm_vpu_sels, ARRAY_SIZE(imx8mm_vpu_sels));
+ hws[IMX8MM_CLK_GPU3D_SRC] = imx_clk_hw_mux2("gpu3d_src", base + 0x8180, 24, 3, imx8mm_gpu3d_sels, ARRAY_SIZE(imx8mm_gpu3d_sels));
+ hws[IMX8MM_CLK_GPU2D_SRC] = imx_clk_hw_mux2("gpu2d_src", base + 0x8200, 24, 3, imx8mm_gpu2d_sels, ARRAY_SIZE(imx8mm_gpu2d_sels));
+ hws[IMX8MM_CLK_A53_CG] = imx_clk_hw_gate3("arm_a53_cg", "arm_a53_src", base + 0x8000, 28);
+ hws[IMX8MM_CLK_M4_CG] = imx_clk_hw_gate3("arm_m4_cg", "arm_m4_src", base + 0x8080, 28);
+ hws[IMX8MM_CLK_VPU_CG] = imx_clk_hw_gate3("vpu_cg", "vpu_src", base + 0x8100, 28);
+ hws[IMX8MM_CLK_GPU3D_CG] = imx_clk_hw_gate3("gpu3d_cg", "gpu3d_src", base + 0x8180, 28);
+ hws[IMX8MM_CLK_GPU2D_CG] = imx_clk_hw_gate3("gpu2d_cg", "gpu2d_src", base + 0x8200, 28);
+ hws[IMX8MM_CLK_A53_DIV] = imx_clk_hw_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
+ hws[IMX8MM_CLK_M4_DIV] = imx_clk_hw_divider2("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3);
+ hws[IMX8MM_CLK_VPU_DIV] = imx_clk_hw_divider2("vpu_div", "vpu_cg", base + 0x8100, 0, 3);
+ hws[IMX8MM_CLK_GPU3D_DIV] = imx_clk_hw_divider2("gpu3d_div", "gpu3d_cg", base + 0x8180, 0, 3);
+ hws[IMX8MM_CLK_GPU2D_DIV] = imx_clk_hw_divider2("gpu2d_div", "gpu2d_cg", base + 0x8200, 0, 3);
/* BUS */
- clks[IMX8MM_CLK_MAIN_AXI] = imx8m_clk_composite_critical("main_axi", imx8mm_main_axi_sels, base + 0x8800);
- clks[IMX8MM_CLK_ENET_AXI] = imx8m_clk_composite("enet_axi", imx8mm_enet_axi_sels, base + 0x8880);
- clks[IMX8MM_CLK_NAND_USDHC_BUS] = imx8m_clk_composite_critical("nand_usdhc_bus", imx8mm_nand_usdhc_sels, base + 0x8900);
- clks[IMX8MM_CLK_VPU_BUS] = imx8m_clk_composite("vpu_bus", imx8mm_vpu_bus_sels, base + 0x8980);
- clks[IMX8MM_CLK_DISP_AXI] = imx8m_clk_composite("disp_axi", imx8mm_disp_axi_sels, base + 0x8a00);
- clks[IMX8MM_CLK_DISP_APB] = imx8m_clk_composite("disp_apb", imx8mm_disp_apb_sels, base + 0x8a80);
- clks[IMX8MM_CLK_DISP_RTRM] = imx8m_clk_composite("disp_rtrm", imx8mm_disp_rtrm_sels, base + 0x8b00);
- clks[IMX8MM_CLK_USB_BUS] = imx8m_clk_composite("usb_bus", imx8mm_usb_bus_sels, base + 0x8b80);
- clks[IMX8MM_CLK_GPU_AXI] = imx8m_clk_composite("gpu_axi", imx8mm_gpu_axi_sels, base + 0x8c00);
- clks[IMX8MM_CLK_GPU_AHB] = imx8m_clk_composite("gpu_ahb", imx8mm_gpu_ahb_sels, base + 0x8c80);
- clks[IMX8MM_CLK_NOC] = imx8m_clk_composite_critical("noc", imx8mm_noc_sels, base + 0x8d00);
- clks[IMX8MM_CLK_NOC_APB] = imx8m_clk_composite_critical("noc_apb", imx8mm_noc_apb_sels, base + 0x8d80);
+ hws[IMX8MM_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mm_main_axi_sels, base + 0x8800);
+ hws[IMX8MM_CLK_ENET_AXI] = imx8m_clk_hw_composite("enet_axi", imx8mm_enet_axi_sels, base + 0x8880);
+ hws[IMX8MM_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_critical("nand_usdhc_bus", imx8mm_nand_usdhc_sels, base + 0x8900);
+ hws[IMX8MM_CLK_VPU_BUS] = imx8m_clk_hw_composite("vpu_bus", imx8mm_vpu_bus_sels, base + 0x8980);
+ hws[IMX8MM_CLK_DISP_AXI] = imx8m_clk_hw_composite("disp_axi", imx8mm_disp_axi_sels, base + 0x8a00);
+ hws[IMX8MM_CLK_DISP_APB] = imx8m_clk_hw_composite("disp_apb", imx8mm_disp_apb_sels, base + 0x8a80);
+ hws[IMX8MM_CLK_DISP_RTRM] = imx8m_clk_hw_composite("disp_rtrm", imx8mm_disp_rtrm_sels, base + 0x8b00);
+ hws[IMX8MM_CLK_USB_BUS] = imx8m_clk_hw_composite("usb_bus", imx8mm_usb_bus_sels, base + 0x8b80);
+ hws[IMX8MM_CLK_GPU_AXI] = imx8m_clk_hw_composite("gpu_axi", imx8mm_gpu_axi_sels, base + 0x8c00);
+ hws[IMX8MM_CLK_GPU_AHB] = imx8m_clk_hw_composite("gpu_ahb", imx8mm_gpu_ahb_sels, base + 0x8c80);
+ hws[IMX8MM_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mm_noc_sels, base + 0x8d00);
+ hws[IMX8MM_CLK_NOC_APB] = imx8m_clk_hw_composite_critical("noc_apb", imx8mm_noc_apb_sels, base + 0x8d80);
/* AHB */
- clks[IMX8MM_CLK_AHB] = imx8m_clk_composite_critical("ahb", imx8mm_ahb_sels, base + 0x9000);
- clks[IMX8MM_CLK_AUDIO_AHB] = imx8m_clk_composite("audio_ahb", imx8mm_audio_ahb_sels, base + 0x9100);
+ hws[IMX8MM_CLK_AHB] = imx8m_clk_hw_composite_critical("ahb", imx8mm_ahb_sels, base + 0x9000);
+ hws[IMX8MM_CLK_AUDIO_AHB] = imx8m_clk_hw_composite("audio_ahb", imx8mm_audio_ahb_sels, base + 0x9100);
/* IPG */
- clks[IMX8MM_CLK_IPG_ROOT] = imx_clk_divider2("ipg_root", "ahb", base + 0x9080, 0, 1);
- clks[IMX8MM_CLK_IPG_AUDIO_ROOT] = imx_clk_divider2("ipg_audio_root", "audio_ahb", base + 0x9180, 0, 1);
+ hws[IMX8MM_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb", base + 0x9080, 0, 1);
+ hws[IMX8MM_CLK_IPG_AUDIO_ROOT] = imx_clk_hw_divider2("ipg_audio_root", "audio_ahb", base + 0x9180, 0, 1);
+
+ /*
+ * DRAM clocks are manipulated from TF-A outside clock framework.
+ * Mark with GET_RATE_NOCACHE to always read div value from hardware
+ */
+ hws[IMX8MM_CLK_DRAM_ALT] = __imx8m_clk_hw_composite("dram_alt", imx8mm_dram_alt_sels, base + 0xa000, CLK_GET_RATE_NOCACHE);
+ hws[IMX8MM_CLK_DRAM_APB] = __imx8m_clk_hw_composite("dram_apb", imx8mm_dram_apb_sels, base + 0xa080, CLK_IS_CRITICAL | CLK_GET_RATE_NOCACHE);
/* IP */
- clks[IMX8MM_CLK_DRAM_ALT] = imx8m_clk_composite("dram_alt", imx8mm_dram_alt_sels, base + 0xa000);
- clks[IMX8MM_CLK_DRAM_APB] = imx8m_clk_composite_critical("dram_apb", imx8mm_dram_apb_sels, base + 0xa080);
- clks[IMX8MM_CLK_VPU_G1] = imx8m_clk_composite("vpu_g1", imx8mm_vpu_g1_sels, base + 0xa100);
- clks[IMX8MM_CLK_VPU_G2] = imx8m_clk_composite("vpu_g2", imx8mm_vpu_g2_sels, base + 0xa180);
- clks[IMX8MM_CLK_DISP_DTRC] = imx8m_clk_composite("disp_dtrc", imx8mm_disp_dtrc_sels, base + 0xa200);
- clks[IMX8MM_CLK_DISP_DC8000] = imx8m_clk_composite("disp_dc8000", imx8mm_disp_dc8000_sels, base + 0xa280);
- clks[IMX8MM_CLK_PCIE1_CTRL] = imx8m_clk_composite("pcie1_ctrl", imx8mm_pcie1_ctrl_sels, base + 0xa300);
- clks[IMX8MM_CLK_PCIE1_PHY] = imx8m_clk_composite("pcie1_phy", imx8mm_pcie1_phy_sels, base + 0xa380);
- clks[IMX8MM_CLK_PCIE1_AUX] = imx8m_clk_composite("pcie1_aux", imx8mm_pcie1_aux_sels, base + 0xa400);
- clks[IMX8MM_CLK_DC_PIXEL] = imx8m_clk_composite("dc_pixel", imx8mm_dc_pixel_sels, base + 0xa480);
- clks[IMX8MM_CLK_LCDIF_PIXEL] = imx8m_clk_composite("lcdif_pixel", imx8mm_lcdif_pixel_sels, base + 0xa500);
- clks[IMX8MM_CLK_SAI1] = imx8m_clk_composite("sai1", imx8mm_sai1_sels, base + 0xa580);
- clks[IMX8MM_CLK_SAI2] = imx8m_clk_composite("sai2", imx8mm_sai2_sels, base + 0xa600);
- clks[IMX8MM_CLK_SAI3] = imx8m_clk_composite("sai3", imx8mm_sai3_sels, base + 0xa680);
- clks[IMX8MM_CLK_SAI4] = imx8m_clk_composite("sai4", imx8mm_sai4_sels, base + 0xa700);
- clks[IMX8MM_CLK_SAI5] = imx8m_clk_composite("sai5", imx8mm_sai5_sels, base + 0xa780);
- clks[IMX8MM_CLK_SAI6] = imx8m_clk_composite("sai6", imx8mm_sai6_sels, base + 0xa800);
- clks[IMX8MM_CLK_SPDIF1] = imx8m_clk_composite("spdif1", imx8mm_spdif1_sels, base + 0xa880);
- clks[IMX8MM_CLK_SPDIF2] = imx8m_clk_composite("spdif2", imx8mm_spdif2_sels, base + 0xa900);
- clks[IMX8MM_CLK_ENET_REF] = imx8m_clk_composite("enet_ref", imx8mm_enet_ref_sels, base + 0xa980);
- clks[IMX8MM_CLK_ENET_TIMER] = imx8m_clk_composite("enet_timer", imx8mm_enet_timer_sels, base + 0xaa00);
- clks[IMX8MM_CLK_ENET_PHY_REF] = imx8m_clk_composite("enet_phy", imx8mm_enet_phy_sels, base + 0xaa80);
- clks[IMX8MM_CLK_NAND] = imx8m_clk_composite("nand", imx8mm_nand_sels, base + 0xab00);
- clks[IMX8MM_CLK_QSPI] = imx8m_clk_composite("qspi", imx8mm_qspi_sels, base + 0xab80);
- clks[IMX8MM_CLK_USDHC1] = imx8m_clk_composite("usdhc1", imx8mm_usdhc1_sels, base + 0xac00);
- clks[IMX8MM_CLK_USDHC2] = imx8m_clk_composite("usdhc2", imx8mm_usdhc2_sels, base + 0xac80);
- clks[IMX8MM_CLK_I2C1] = imx8m_clk_composite("i2c1", imx8mm_i2c1_sels, base + 0xad00);
- clks[IMX8MM_CLK_I2C2] = imx8m_clk_composite("i2c2", imx8mm_i2c2_sels, base + 0xad80);
- clks[IMX8MM_CLK_I2C3] = imx8m_clk_composite("i2c3", imx8mm_i2c3_sels, base + 0xae00);
- clks[IMX8MM_CLK_I2C4] = imx8m_clk_composite("i2c4", imx8mm_i2c4_sels, base + 0xae80);
- clks[IMX8MM_CLK_UART1] = imx8m_clk_composite("uart1", imx8mm_uart1_sels, base + 0xaf00);
- clks[IMX8MM_CLK_UART2] = imx8m_clk_composite("uart2", imx8mm_uart2_sels, base + 0xaf80);
- clks[IMX8MM_CLK_UART3] = imx8m_clk_composite("uart3", imx8mm_uart3_sels, base + 0xb000);
- clks[IMX8MM_CLK_UART4] = imx8m_clk_composite("uart4", imx8mm_uart4_sels, base + 0xb080);
- clks[IMX8MM_CLK_USB_CORE_REF] = imx8m_clk_composite("usb_core_ref", imx8mm_usb_core_sels, base + 0xb100);
- clks[IMX8MM_CLK_USB_PHY_REF] = imx8m_clk_composite("usb_phy_ref", imx8mm_usb_phy_sels, base + 0xb180);
- clks[IMX8MM_CLK_GIC] = imx8m_clk_composite_critical("gic", imx8mm_gic_sels, base + 0xb200);
- clks[IMX8MM_CLK_ECSPI1] = imx8m_clk_composite("ecspi1", imx8mm_ecspi1_sels, base + 0xb280);
- clks[IMX8MM_CLK_ECSPI2] = imx8m_clk_composite("ecspi2", imx8mm_ecspi2_sels, base + 0xb300);
- clks[IMX8MM_CLK_PWM1] = imx8m_clk_composite("pwm1", imx8mm_pwm1_sels, base + 0xb380);
- clks[IMX8MM_CLK_PWM2] = imx8m_clk_composite("pwm2", imx8mm_pwm2_sels, base + 0xb400);
- clks[IMX8MM_CLK_PWM3] = imx8m_clk_composite("pwm3", imx8mm_pwm3_sels, base + 0xb480);
- clks[IMX8MM_CLK_PWM4] = imx8m_clk_composite("pwm4", imx8mm_pwm4_sels, base + 0xb500);
- clks[IMX8MM_CLK_GPT1] = imx8m_clk_composite("gpt1", imx8mm_gpt1_sels, base + 0xb580);
- clks[IMX8MM_CLK_WDOG] = imx8m_clk_composite("wdog", imx8mm_wdog_sels, base + 0xb900);
- clks[IMX8MM_CLK_WRCLK] = imx8m_clk_composite("wrclk", imx8mm_wrclk_sels, base + 0xb980);
- clks[IMX8MM_CLK_CLKO1] = imx8m_clk_composite("clko1", imx8mm_clko1_sels, base + 0xba00);
- clks[IMX8MM_CLK_DSI_CORE] = imx8m_clk_composite("dsi_core", imx8mm_dsi_core_sels, base + 0xbb00);
- clks[IMX8MM_CLK_DSI_PHY_REF] = imx8m_clk_composite("dsi_phy_ref", imx8mm_dsi_phy_sels, base + 0xbb80);
- clks[IMX8MM_CLK_DSI_DBI] = imx8m_clk_composite("dsi_dbi", imx8mm_dsi_dbi_sels, base + 0xbc00);
- clks[IMX8MM_CLK_USDHC3] = imx8m_clk_composite("usdhc3", imx8mm_usdhc3_sels, base + 0xbc80);
- clks[IMX8MM_CLK_CSI1_CORE] = imx8m_clk_composite("csi1_core", imx8mm_csi1_core_sels, base + 0xbd00);
- clks[IMX8MM_CLK_CSI1_PHY_REF] = imx8m_clk_composite("csi1_phy_ref", imx8mm_csi1_phy_sels, base + 0xbd80);
- clks[IMX8MM_CLK_CSI1_ESC] = imx8m_clk_composite("csi1_esc", imx8mm_csi1_esc_sels, base + 0xbe00);
- clks[IMX8MM_CLK_CSI2_CORE] = imx8m_clk_composite("csi2_core", imx8mm_csi2_core_sels, base + 0xbe80);
- clks[IMX8MM_CLK_CSI2_PHY_REF] = imx8m_clk_composite("csi2_phy_ref", imx8mm_csi2_phy_sels, base + 0xbf00);
- clks[IMX8MM_CLK_CSI2_ESC] = imx8m_clk_composite("csi2_esc", imx8mm_csi2_esc_sels, base + 0xbf80);
- clks[IMX8MM_CLK_PCIE2_CTRL] = imx8m_clk_composite("pcie2_ctrl", imx8mm_pcie2_ctrl_sels, base + 0xc000);
- clks[IMX8MM_CLK_PCIE2_PHY] = imx8m_clk_composite("pcie2_phy", imx8mm_pcie2_phy_sels, base + 0xc080);
- clks[IMX8MM_CLK_PCIE2_AUX] = imx8m_clk_composite("pcie2_aux", imx8mm_pcie2_aux_sels, base + 0xc100);
- clks[IMX8MM_CLK_ECSPI3] = imx8m_clk_composite("ecspi3", imx8mm_ecspi3_sels, base + 0xc180);
- clks[IMX8MM_CLK_PDM] = imx8m_clk_composite("pdm", imx8mm_pdm_sels, base + 0xc200);
- clks[IMX8MM_CLK_VPU_H1] = imx8m_clk_composite("vpu_h1", imx8mm_vpu_h1_sels, base + 0xc280);
+ hws[IMX8MM_CLK_VPU_G1] = imx8m_clk_hw_composite("vpu_g1", imx8mm_vpu_g1_sels, base + 0xa100);
+ hws[IMX8MM_CLK_VPU_G2] = imx8m_clk_hw_composite("vpu_g2", imx8mm_vpu_g2_sels, base + 0xa180);
+ hws[IMX8MM_CLK_DISP_DTRC] = imx8m_clk_hw_composite("disp_dtrc", imx8mm_disp_dtrc_sels, base + 0xa200);
+ hws[IMX8MM_CLK_DISP_DC8000] = imx8m_clk_hw_composite("disp_dc8000", imx8mm_disp_dc8000_sels, base + 0xa280);
+ hws[IMX8MM_CLK_PCIE1_CTRL] = imx8m_clk_hw_composite("pcie1_ctrl", imx8mm_pcie1_ctrl_sels, base + 0xa300);
+ hws[IMX8MM_CLK_PCIE1_PHY] = imx8m_clk_hw_composite("pcie1_phy", imx8mm_pcie1_phy_sels, base + 0xa380);
+ hws[IMX8MM_CLK_PCIE1_AUX] = imx8m_clk_hw_composite("pcie1_aux", imx8mm_pcie1_aux_sels, base + 0xa400);
+ hws[IMX8MM_CLK_DC_PIXEL] = imx8m_clk_hw_composite("dc_pixel", imx8mm_dc_pixel_sels, base + 0xa480);
+ hws[IMX8MM_CLK_LCDIF_PIXEL] = imx8m_clk_hw_composite("lcdif_pixel", imx8mm_lcdif_pixel_sels, base + 0xa500);
+ hws[IMX8MM_CLK_SAI1] = imx8m_clk_hw_composite("sai1", imx8mm_sai1_sels, base + 0xa580);
+ hws[IMX8MM_CLK_SAI2] = imx8m_clk_hw_composite("sai2", imx8mm_sai2_sels, base + 0xa600);
+ hws[IMX8MM_CLK_SAI3] = imx8m_clk_hw_composite("sai3", imx8mm_sai3_sels, base + 0xa680);
+ hws[IMX8MM_CLK_SAI4] = imx8m_clk_hw_composite("sai4", imx8mm_sai4_sels, base + 0xa700);
+ hws[IMX8MM_CLK_SAI5] = imx8m_clk_hw_composite("sai5", imx8mm_sai5_sels, base + 0xa780);
+ hws[IMX8MM_CLK_SAI6] = imx8m_clk_hw_composite("sai6", imx8mm_sai6_sels, base + 0xa800);
+ hws[IMX8MM_CLK_SPDIF1] = imx8m_clk_hw_composite("spdif1", imx8mm_spdif1_sels, base + 0xa880);
+ hws[IMX8MM_CLK_SPDIF2] = imx8m_clk_hw_composite("spdif2", imx8mm_spdif2_sels, base + 0xa900);
+ hws[IMX8MM_CLK_ENET_REF] = imx8m_clk_hw_composite("enet_ref", imx8mm_enet_ref_sels, base + 0xa980);
+ hws[IMX8MM_CLK_ENET_TIMER] = imx8m_clk_hw_composite("enet_timer", imx8mm_enet_timer_sels, base + 0xaa00);
+ hws[IMX8MM_CLK_ENET_PHY_REF] = imx8m_clk_hw_composite("enet_phy", imx8mm_enet_phy_sels, base + 0xaa80);
+ hws[IMX8MM_CLK_NAND] = imx8m_clk_hw_composite("nand", imx8mm_nand_sels, base + 0xab00);
+ hws[IMX8MM_CLK_QSPI] = imx8m_clk_hw_composite("qspi", imx8mm_qspi_sels, base + 0xab80);
+ hws[IMX8MM_CLK_USDHC1] = imx8m_clk_hw_composite("usdhc1", imx8mm_usdhc1_sels, base + 0xac00);
+ hws[IMX8MM_CLK_USDHC2] = imx8m_clk_hw_composite("usdhc2", imx8mm_usdhc2_sels, base + 0xac80);
+ hws[IMX8MM_CLK_I2C1] = imx8m_clk_hw_composite("i2c1", imx8mm_i2c1_sels, base + 0xad00);
+ hws[IMX8MM_CLK_I2C2] = imx8m_clk_hw_composite("i2c2", imx8mm_i2c2_sels, base + 0xad80);
+ hws[IMX8MM_CLK_I2C3] = imx8m_clk_hw_composite("i2c3", imx8mm_i2c3_sels, base + 0xae00);
+ hws[IMX8MM_CLK_I2C4] = imx8m_clk_hw_composite("i2c4", imx8mm_i2c4_sels, base + 0xae80);
+ hws[IMX8MM_CLK_UART1] = imx8m_clk_hw_composite("uart1", imx8mm_uart1_sels, base + 0xaf00);
+ hws[IMX8MM_CLK_UART2] = imx8m_clk_hw_composite("uart2", imx8mm_uart2_sels, base + 0xaf80);
+ hws[IMX8MM_CLK_UART3] = imx8m_clk_hw_composite("uart3", imx8mm_uart3_sels, base + 0xb000);
+ hws[IMX8MM_CLK_UART4] = imx8m_clk_hw_composite("uart4", imx8mm_uart4_sels, base + 0xb080);
+ hws[IMX8MM_CLK_USB_CORE_REF] = imx8m_clk_hw_composite("usb_core_ref", imx8mm_usb_core_sels, base + 0xb100);
+ hws[IMX8MM_CLK_USB_PHY_REF] = imx8m_clk_hw_composite("usb_phy_ref", imx8mm_usb_phy_sels, base + 0xb180);
+ hws[IMX8MM_CLK_GIC] = imx8m_clk_hw_composite_critical("gic", imx8mm_gic_sels, base + 0xb200);
+ hws[IMX8MM_CLK_ECSPI1] = imx8m_clk_hw_composite("ecspi1", imx8mm_ecspi1_sels, base + 0xb280);
+ hws[IMX8MM_CLK_ECSPI2] = imx8m_clk_hw_composite("ecspi2", imx8mm_ecspi2_sels, base + 0xb300);
+ hws[IMX8MM_CLK_PWM1] = imx8m_clk_hw_composite("pwm1", imx8mm_pwm1_sels, base + 0xb380);
+ hws[IMX8MM_CLK_PWM2] = imx8m_clk_hw_composite("pwm2", imx8mm_pwm2_sels, base + 0xb400);
+ hws[IMX8MM_CLK_PWM3] = imx8m_clk_hw_composite("pwm3", imx8mm_pwm3_sels, base + 0xb480);
+ hws[IMX8MM_CLK_PWM4] = imx8m_clk_hw_composite("pwm4", imx8mm_pwm4_sels, base + 0xb500);
+ hws[IMX8MM_CLK_GPT1] = imx8m_clk_hw_composite("gpt1", imx8mm_gpt1_sels, base + 0xb580);
+ hws[IMX8MM_CLK_WDOG] = imx8m_clk_hw_composite("wdog", imx8mm_wdog_sels, base + 0xb900);
+ hws[IMX8MM_CLK_WRCLK] = imx8m_clk_hw_composite("wrclk", imx8mm_wrclk_sels, base + 0xb980);
+ hws[IMX8MM_CLK_CLKO1] = imx8m_clk_hw_composite("clko1", imx8mm_clko1_sels, base + 0xba00);
+ hws[IMX8MM_CLK_DSI_CORE] = imx8m_clk_hw_composite("dsi_core", imx8mm_dsi_core_sels, base + 0xbb00);
+ hws[IMX8MM_CLK_DSI_PHY_REF] = imx8m_clk_hw_composite("dsi_phy_ref", imx8mm_dsi_phy_sels, base + 0xbb80);
+ hws[IMX8MM_CLK_DSI_DBI] = imx8m_clk_hw_composite("dsi_dbi", imx8mm_dsi_dbi_sels, base + 0xbc00);
+ hws[IMX8MM_CLK_USDHC3] = imx8m_clk_hw_composite("usdhc3", imx8mm_usdhc3_sels, base + 0xbc80);
+ hws[IMX8MM_CLK_CSI1_CORE] = imx8m_clk_hw_composite("csi1_core", imx8mm_csi1_core_sels, base + 0xbd00);
+ hws[IMX8MM_CLK_CSI1_PHY_REF] = imx8m_clk_hw_composite("csi1_phy_ref", imx8mm_csi1_phy_sels, base + 0xbd80);
+ hws[IMX8MM_CLK_CSI1_ESC] = imx8m_clk_hw_composite("csi1_esc", imx8mm_csi1_esc_sels, base + 0xbe00);
+ hws[IMX8MM_CLK_CSI2_CORE] = imx8m_clk_hw_composite("csi2_core", imx8mm_csi2_core_sels, base + 0xbe80);
+ hws[IMX8MM_CLK_CSI2_PHY_REF] = imx8m_clk_hw_composite("csi2_phy_ref", imx8mm_csi2_phy_sels, base + 0xbf00);
+ hws[IMX8MM_CLK_CSI2_ESC] = imx8m_clk_hw_composite("csi2_esc", imx8mm_csi2_esc_sels, base + 0xbf80);
+ hws[IMX8MM_CLK_PCIE2_CTRL] = imx8m_clk_hw_composite("pcie2_ctrl", imx8mm_pcie2_ctrl_sels, base + 0xc000);
+ hws[IMX8MM_CLK_PCIE2_PHY] = imx8m_clk_hw_composite("pcie2_phy", imx8mm_pcie2_phy_sels, base + 0xc080);
+ hws[IMX8MM_CLK_PCIE2_AUX] = imx8m_clk_hw_composite("pcie2_aux", imx8mm_pcie2_aux_sels, base + 0xc100);
+ hws[IMX8MM_CLK_ECSPI3] = imx8m_clk_hw_composite("ecspi3", imx8mm_ecspi3_sels, base + 0xc180);
+ hws[IMX8MM_CLK_PDM] = imx8m_clk_hw_composite("pdm", imx8mm_pdm_sels, base + 0xc200);
+ hws[IMX8MM_CLK_VPU_H1] = imx8m_clk_hw_composite("vpu_h1", imx8mm_vpu_h1_sels, base + 0xc280);
/* CCGR */
- clks[IMX8MM_CLK_ECSPI1_ROOT] = imx_clk_gate4("ecspi1_root_clk", "ecspi1", base + 0x4070, 0);
- clks[IMX8MM_CLK_ECSPI2_ROOT] = imx_clk_gate4("ecspi2_root_clk", "ecspi2", base + 0x4080, 0);
- clks[IMX8MM_CLK_ECSPI3_ROOT] = imx_clk_gate4("ecspi3_root_clk", "ecspi3", base + 0x4090, 0);
- clks[IMX8MM_CLK_ENET1_ROOT] = imx_clk_gate4("enet1_root_clk", "enet_axi", base + 0x40a0, 0);
- clks[IMX8MM_CLK_GPIO1_ROOT] = imx_clk_gate4("gpio1_root_clk", "ipg_root", base + 0x40b0, 0);
- clks[IMX8MM_CLK_GPIO2_ROOT] = imx_clk_gate4("gpio2_root_clk", "ipg_root", base + 0x40c0, 0);
- clks[IMX8MM_CLK_GPIO3_ROOT] = imx_clk_gate4("gpio3_root_clk", "ipg_root", base + 0x40d0, 0);
- clks[IMX8MM_CLK_GPIO4_ROOT] = imx_clk_gate4("gpio4_root_clk", "ipg_root", base + 0x40e0, 0);
- clks[IMX8MM_CLK_GPIO5_ROOT] = imx_clk_gate4("gpio5_root_clk", "ipg_root", base + 0x40f0, 0);
- clks[IMX8MM_CLK_GPT1_ROOT] = imx_clk_gate4("gpt1_root_clk", "gpt1", base + 0x4100, 0);
- clks[IMX8MM_CLK_I2C1_ROOT] = imx_clk_gate4("i2c1_root_clk", "i2c1", base + 0x4170, 0);
- clks[IMX8MM_CLK_I2C2_ROOT] = imx_clk_gate4("i2c2_root_clk", "i2c2", base + 0x4180, 0);
- clks[IMX8MM_CLK_I2C3_ROOT] = imx_clk_gate4("i2c3_root_clk", "i2c3", base + 0x4190, 0);
- clks[IMX8MM_CLK_I2C4_ROOT] = imx_clk_gate4("i2c4_root_clk", "i2c4", base + 0x41a0, 0);
- clks[IMX8MM_CLK_MU_ROOT] = imx_clk_gate4("mu_root_clk", "ipg_root", base + 0x4210, 0);
- clks[IMX8MM_CLK_OCOTP_ROOT] = imx_clk_gate4("ocotp_root_clk", "ipg_root", base + 0x4220, 0);
- clks[IMX8MM_CLK_PCIE1_ROOT] = imx_clk_gate4("pcie1_root_clk", "pcie1_ctrl", base + 0x4250, 0);
- clks[IMX8MM_CLK_PWM1_ROOT] = imx_clk_gate4("pwm1_root_clk", "pwm1", base + 0x4280, 0);
- clks[IMX8MM_CLK_PWM2_ROOT] = imx_clk_gate4("pwm2_root_clk", "pwm2", base + 0x4290, 0);
- clks[IMX8MM_CLK_PWM3_ROOT] = imx_clk_gate4("pwm3_root_clk", "pwm3", base + 0x42a0, 0);
- clks[IMX8MM_CLK_PWM4_ROOT] = imx_clk_gate4("pwm4_root_clk", "pwm4", base + 0x42b0, 0);
- clks[IMX8MM_CLK_QSPI_ROOT] = imx_clk_gate4("qspi_root_clk", "qspi", base + 0x42f0, 0);
- clks[IMX8MM_CLK_NAND_ROOT] = imx_clk_gate2_shared2("nand_root_clk", "nand", base + 0x4300, 0, &share_count_nand);
- clks[IMX8MM_CLK_NAND_USDHC_BUS_RAWNAND_CLK] = imx_clk_gate2_shared2("nand_usdhc_rawnand_clk", "nand_usdhc_bus", base + 0x4300, 0, &share_count_nand);
- clks[IMX8MM_CLK_SAI1_ROOT] = imx_clk_gate2_shared2("sai1_root_clk", "sai1", base + 0x4330, 0, &share_count_sai1);
- clks[IMX8MM_CLK_SAI1_IPG] = imx_clk_gate2_shared2("sai1_ipg_clk", "ipg_audio_root", base + 0x4330, 0, &share_count_sai1);
- clks[IMX8MM_CLK_SAI2_ROOT] = imx_clk_gate2_shared2("sai2_root_clk", "sai2", base + 0x4340, 0, &share_count_sai2);
- clks[IMX8MM_CLK_SAI2_IPG] = imx_clk_gate2_shared2("sai2_ipg_clk", "ipg_audio_root", base + 0x4340, 0, &share_count_sai2);
- clks[IMX8MM_CLK_SAI3_ROOT] = imx_clk_gate2_shared2("sai3_root_clk", "sai3", base + 0x4350, 0, &share_count_sai3);
- clks[IMX8MM_CLK_SAI3_IPG] = imx_clk_gate2_shared2("sai3_ipg_clk", "ipg_audio_root", base + 0x4350, 0, &share_count_sai3);
- clks[IMX8MM_CLK_SAI4_ROOT] = imx_clk_gate2_shared2("sai4_root_clk", "sai4", base + 0x4360, 0, &share_count_sai4);
- clks[IMX8MM_CLK_SAI4_IPG] = imx_clk_gate2_shared2("sai4_ipg_clk", "ipg_audio_root", base + 0x4360, 0, &share_count_sai4);
- clks[IMX8MM_CLK_SAI5_ROOT] = imx_clk_gate2_shared2("sai5_root_clk", "sai5", base + 0x4370, 0, &share_count_sai5);
- clks[IMX8MM_CLK_SAI5_IPG] = imx_clk_gate2_shared2("sai5_ipg_clk", "ipg_audio_root", base + 0x4370, 0, &share_count_sai5);
- clks[IMX8MM_CLK_SAI6_ROOT] = imx_clk_gate2_shared2("sai6_root_clk", "sai6", base + 0x4380, 0, &share_count_sai6);
- clks[IMX8MM_CLK_SAI6_IPG] = imx_clk_gate2_shared2("sai6_ipg_clk", "ipg_audio_root", base + 0x4380, 0, &share_count_sai6);
- clks[IMX8MM_CLK_SNVS_ROOT] = imx_clk_gate4("snvs_root_clk", "ipg_root", base + 0x4470, 0);
- clks[IMX8MM_CLK_UART1_ROOT] = imx_clk_gate4("uart1_root_clk", "uart1", base + 0x4490, 0);
- clks[IMX8MM_CLK_UART2_ROOT] = imx_clk_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0);
- clks[IMX8MM_CLK_UART3_ROOT] = imx_clk_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0);
- clks[IMX8MM_CLK_UART4_ROOT] = imx_clk_gate4("uart4_root_clk", "uart4", base + 0x44c0, 0);
- clks[IMX8MM_CLK_USB1_CTRL_ROOT] = imx_clk_gate4("usb1_ctrl_root_clk", "usb_bus", base + 0x44d0, 0);
- clks[IMX8MM_CLK_GPU3D_ROOT] = imx_clk_gate4("gpu3d_root_clk", "gpu3d_div", base + 0x44f0, 0);
- clks[IMX8MM_CLK_USDHC1_ROOT] = imx_clk_gate4("usdhc1_root_clk", "usdhc1", base + 0x4510, 0);
- clks[IMX8MM_CLK_USDHC2_ROOT] = imx_clk_gate4("usdhc2_root_clk", "usdhc2", base + 0x4520, 0);
- clks[IMX8MM_CLK_WDOG1_ROOT] = imx_clk_gate4("wdog1_root_clk", "wdog", base + 0x4530, 0);
- clks[IMX8MM_CLK_WDOG2_ROOT] = imx_clk_gate4("wdog2_root_clk", "wdog", base + 0x4540, 0);
- clks[IMX8MM_CLK_WDOG3_ROOT] = imx_clk_gate4("wdog3_root_clk", "wdog", base + 0x4550, 0);
- clks[IMX8MM_CLK_VPU_G1_ROOT] = imx_clk_gate4("vpu_g1_root_clk", "vpu_g1", base + 0x4560, 0);
- clks[IMX8MM_CLK_GPU_BUS_ROOT] = imx_clk_gate4("gpu_root_clk", "gpu_axi", base + 0x4570, 0);
- clks[IMX8MM_CLK_VPU_H1_ROOT] = imx_clk_gate4("vpu_h1_root_clk", "vpu_h1", base + 0x4590, 0);
- clks[IMX8MM_CLK_VPU_G2_ROOT] = imx_clk_gate4("vpu_g2_root_clk", "vpu_g2", base + 0x45a0, 0);
- clks[IMX8MM_CLK_PDM_ROOT] = imx_clk_gate2_shared2("pdm_root_clk", "pdm", base + 0x45b0, 0, &share_count_pdm);
- clks[IMX8MM_CLK_PDM_IPG] = imx_clk_gate2_shared2("pdm_ipg_clk", "ipg_audio_root", base + 0x45b0, 0, &share_count_pdm);
- clks[IMX8MM_CLK_DISP_ROOT] = imx_clk_gate2_shared2("disp_root_clk", "disp_dc8000", base + 0x45d0, 0, &share_count_disp);
- clks[IMX8MM_CLK_DISP_AXI_ROOT] = imx_clk_gate2_shared2("disp_axi_root_clk", "disp_axi", base + 0x45d0, 0, &share_count_disp);
- clks[IMX8MM_CLK_DISP_APB_ROOT] = imx_clk_gate2_shared2("disp_apb_root_clk", "disp_apb", base + 0x45d0, 0, &share_count_disp);
- clks[IMX8MM_CLK_DISP_RTRM_ROOT] = imx_clk_gate2_shared2("disp_rtrm_root_clk", "disp_rtrm", base + 0x45d0, 0, &share_count_disp);
- clks[IMX8MM_CLK_USDHC3_ROOT] = imx_clk_gate4("usdhc3_root_clk", "usdhc3", base + 0x45e0, 0);
- clks[IMX8MM_CLK_TMU_ROOT] = imx_clk_gate4("tmu_root_clk", "ipg_root", base + 0x4620, 0);
- clks[IMX8MM_CLK_VPU_DEC_ROOT] = imx_clk_gate4("vpu_dec_root_clk", "vpu_bus", base + 0x4630, 0);
- clks[IMX8MM_CLK_SDMA1_ROOT] = imx_clk_gate4("sdma1_clk", "ipg_root", base + 0x43a0, 0);
- clks[IMX8MM_CLK_SDMA2_ROOT] = imx_clk_gate4("sdma2_clk", "ipg_audio_root", base + 0x43b0, 0);
- clks[IMX8MM_CLK_SDMA3_ROOT] = imx_clk_gate4("sdma3_clk", "ipg_audio_root", base + 0x45f0, 0);
- clks[IMX8MM_CLK_GPU2D_ROOT] = imx_clk_gate4("gpu2d_root_clk", "gpu2d_div", base + 0x4660, 0);
- clks[IMX8MM_CLK_CSI1_ROOT] = imx_clk_gate4("csi1_root_clk", "csi1_core", base + 0x4650, 0);
-
- clks[IMX8MM_CLK_GPT_3M] = imx_clk_fixed_factor("gpt_3m", "osc_24m", 1, 8);
-
- clks[IMX8MM_CLK_DRAM_ALT_ROOT] = imx_clk_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
- clks[IMX8MM_CLK_DRAM_CORE] = imx_clk_mux2_flags("dram_core_clk", base + 0x9800, 24, 1, imx8mm_dram_core_sels, ARRAY_SIZE(imx8mm_dram_core_sels), CLK_IS_CRITICAL);
-
- clks[IMX8MM_CLK_ARM] = imx_clk_cpu("arm", "arm_a53_div",
- clks[IMX8MM_CLK_A53_DIV],
- clks[IMX8MM_CLK_A53_SRC],
- clks[IMX8MM_ARM_PLL_OUT],
- clks[IMX8MM_SYS_PLL1_800M]);
-
- imx_check_clocks(clks, ARRAY_SIZE(clks));
-
- clk_data.clks = clks;
- clk_data.clk_num = ARRAY_SIZE(clks);
- ret = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+ hws[IMX8MM_CLK_ECSPI1_ROOT] = imx_clk_hw_gate4("ecspi1_root_clk", "ecspi1", base + 0x4070, 0);
+ hws[IMX8MM_CLK_ECSPI2_ROOT] = imx_clk_hw_gate4("ecspi2_root_clk", "ecspi2", base + 0x4080, 0);
+ hws[IMX8MM_CLK_ECSPI3_ROOT] = imx_clk_hw_gate4("ecspi3_root_clk", "ecspi3", base + 0x4090, 0);
+ hws[IMX8MM_CLK_ENET1_ROOT] = imx_clk_hw_gate4("enet1_root_clk", "enet_axi", base + 0x40a0, 0);
+ hws[IMX8MM_CLK_GPIO1_ROOT] = imx_clk_hw_gate4("gpio1_root_clk", "ipg_root", base + 0x40b0, 0);
+ hws[IMX8MM_CLK_GPIO2_ROOT] = imx_clk_hw_gate4("gpio2_root_clk", "ipg_root", base + 0x40c0, 0);
+ hws[IMX8MM_CLK_GPIO3_ROOT] = imx_clk_hw_gate4("gpio3_root_clk", "ipg_root", base + 0x40d0, 0);
+ hws[IMX8MM_CLK_GPIO4_ROOT] = imx_clk_hw_gate4("gpio4_root_clk", "ipg_root", base + 0x40e0, 0);
+ hws[IMX8MM_CLK_GPIO5_ROOT] = imx_clk_hw_gate4("gpio5_root_clk", "ipg_root", base + 0x40f0, 0);
+ hws[IMX8MM_CLK_GPT1_ROOT] = imx_clk_hw_gate4("gpt1_root_clk", "gpt1", base + 0x4100, 0);
+ hws[IMX8MM_CLK_I2C1_ROOT] = imx_clk_hw_gate4("i2c1_root_clk", "i2c1", base + 0x4170, 0);
+ hws[IMX8MM_CLK_I2C2_ROOT] = imx_clk_hw_gate4("i2c2_root_clk", "i2c2", base + 0x4180, 0);
+ hws[IMX8MM_CLK_I2C3_ROOT] = imx_clk_hw_gate4("i2c3_root_clk", "i2c3", base + 0x4190, 0);
+ hws[IMX8MM_CLK_I2C4_ROOT] = imx_clk_hw_gate4("i2c4_root_clk", "i2c4", base + 0x41a0, 0);
+ hws[IMX8MM_CLK_MU_ROOT] = imx_clk_hw_gate4("mu_root_clk", "ipg_root", base + 0x4210, 0);
+ hws[IMX8MM_CLK_OCOTP_ROOT] = imx_clk_hw_gate4("ocotp_root_clk", "ipg_root", base + 0x4220, 0);
+ hws[IMX8MM_CLK_PCIE1_ROOT] = imx_clk_hw_gate4("pcie1_root_clk", "pcie1_ctrl", base + 0x4250, 0);
+ hws[IMX8MM_CLK_PWM1_ROOT] = imx_clk_hw_gate4("pwm1_root_clk", "pwm1", base + 0x4280, 0);
+ hws[IMX8MM_CLK_PWM2_ROOT] = imx_clk_hw_gate4("pwm2_root_clk", "pwm2", base + 0x4290, 0);
+ hws[IMX8MM_CLK_PWM3_ROOT] = imx_clk_hw_gate4("pwm3_root_clk", "pwm3", base + 0x42a0, 0);
+ hws[IMX8MM_CLK_PWM4_ROOT] = imx_clk_hw_gate4("pwm4_root_clk", "pwm4", base + 0x42b0, 0);
+ hws[IMX8MM_CLK_QSPI_ROOT] = imx_clk_hw_gate4("qspi_root_clk", "qspi", base + 0x42f0, 0);
+ hws[IMX8MM_CLK_NAND_ROOT] = imx_clk_hw_gate2_shared2("nand_root_clk", "nand", base + 0x4300, 0, &share_count_nand);
+ hws[IMX8MM_CLK_NAND_USDHC_BUS_RAWNAND_CLK] = imx_clk_hw_gate2_shared2("nand_usdhc_rawnand_clk", "nand_usdhc_bus", base + 0x4300, 0, &share_count_nand);
+ hws[IMX8MM_CLK_SAI1_ROOT] = imx_clk_hw_gate2_shared2("sai1_root_clk", "sai1", base + 0x4330, 0, &share_count_sai1);
+ hws[IMX8MM_CLK_SAI1_IPG] = imx_clk_hw_gate2_shared2("sai1_ipg_clk", "ipg_audio_root", base + 0x4330, 0, &share_count_sai1);
+ hws[IMX8MM_CLK_SAI2_ROOT] = imx_clk_hw_gate2_shared2("sai2_root_clk", "sai2", base + 0x4340, 0, &share_count_sai2);
+ hws[IMX8MM_CLK_SAI2_IPG] = imx_clk_hw_gate2_shared2("sai2_ipg_clk", "ipg_audio_root", base + 0x4340, 0, &share_count_sai2);
+ hws[IMX8MM_CLK_SAI3_ROOT] = imx_clk_hw_gate2_shared2("sai3_root_clk", "sai3", base + 0x4350, 0, &share_count_sai3);
+ hws[IMX8MM_CLK_SAI3_IPG] = imx_clk_hw_gate2_shared2("sai3_ipg_clk", "ipg_audio_root", base + 0x4350, 0, &share_count_sai3);
+ hws[IMX8MM_CLK_SAI4_ROOT] = imx_clk_hw_gate2_shared2("sai4_root_clk", "sai4", base + 0x4360, 0, &share_count_sai4);
+ hws[IMX8MM_CLK_SAI4_IPG] = imx_clk_hw_gate2_shared2("sai4_ipg_clk", "ipg_audio_root", base + 0x4360, 0, &share_count_sai4);
+ hws[IMX8MM_CLK_SAI5_ROOT] = imx_clk_hw_gate2_shared2("sai5_root_clk", "sai5", base + 0x4370, 0, &share_count_sai5);
+ hws[IMX8MM_CLK_SAI5_IPG] = imx_clk_hw_gate2_shared2("sai5_ipg_clk", "ipg_audio_root", base + 0x4370, 0, &share_count_sai5);
+ hws[IMX8MM_CLK_SAI6_ROOT] = imx_clk_hw_gate2_shared2("sai6_root_clk", "sai6", base + 0x4380, 0, &share_count_sai6);
+ hws[IMX8MM_CLK_SAI6_IPG] = imx_clk_hw_gate2_shared2("sai6_ipg_clk", "ipg_audio_root", base + 0x4380, 0, &share_count_sai6);
+ hws[IMX8MM_CLK_SNVS_ROOT] = imx_clk_hw_gate4("snvs_root_clk", "ipg_root", base + 0x4470, 0);
+ hws[IMX8MM_CLK_UART1_ROOT] = imx_clk_hw_gate4("uart1_root_clk", "uart1", base + 0x4490, 0);
+ hws[IMX8MM_CLK_UART2_ROOT] = imx_clk_hw_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0);
+ hws[IMX8MM_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0);
+ hws[IMX8MM_CLK_UART4_ROOT] = imx_clk_hw_gate4("uart4_root_clk", "uart4", base + 0x44c0, 0);
+ hws[IMX8MM_CLK_USB1_CTRL_ROOT] = imx_clk_hw_gate4("usb1_ctrl_root_clk", "usb_bus", base + 0x44d0, 0);
+ hws[IMX8MM_CLK_GPU3D_ROOT] = imx_clk_hw_gate4("gpu3d_root_clk", "gpu3d_div", base + 0x44f0, 0);
+ hws[IMX8MM_CLK_USDHC1_ROOT] = imx_clk_hw_gate4("usdhc1_root_clk", "usdhc1", base + 0x4510, 0);
+ hws[IMX8MM_CLK_USDHC2_ROOT] = imx_clk_hw_gate4("usdhc2_root_clk", "usdhc2", base + 0x4520, 0);
+ hws[IMX8MM_CLK_WDOG1_ROOT] = imx_clk_hw_gate4("wdog1_root_clk", "wdog", base + 0x4530, 0);
+ hws[IMX8MM_CLK_WDOG2_ROOT] = imx_clk_hw_gate4("wdog2_root_clk", "wdog", base + 0x4540, 0);
+ hws[IMX8MM_CLK_WDOG3_ROOT] = imx_clk_hw_gate4("wdog3_root_clk", "wdog", base + 0x4550, 0);
+ hws[IMX8MM_CLK_VPU_G1_ROOT] = imx_clk_hw_gate4("vpu_g1_root_clk", "vpu_g1", base + 0x4560, 0);
+ hws[IMX8MM_CLK_GPU_BUS_ROOT] = imx_clk_hw_gate4("gpu_root_clk", "gpu_axi", base + 0x4570, 0);
+ hws[IMX8MM_CLK_VPU_H1_ROOT] = imx_clk_hw_gate4("vpu_h1_root_clk", "vpu_h1", base + 0x4590, 0);
+ hws[IMX8MM_CLK_VPU_G2_ROOT] = imx_clk_hw_gate4("vpu_g2_root_clk", "vpu_g2", base + 0x45a0, 0);
+ hws[IMX8MM_CLK_PDM_ROOT] = imx_clk_hw_gate2_shared2("pdm_root_clk", "pdm", base + 0x45b0, 0, &share_count_pdm);
+ hws[IMX8MM_CLK_PDM_IPG] = imx_clk_hw_gate2_shared2("pdm_ipg_clk", "ipg_audio_root", base + 0x45b0, 0, &share_count_pdm);
+ hws[IMX8MM_CLK_DISP_ROOT] = imx_clk_hw_gate2_shared2("disp_root_clk", "disp_dc8000", base + 0x45d0, 0, &share_count_disp);
+ hws[IMX8MM_CLK_DISP_AXI_ROOT] = imx_clk_hw_gate2_shared2("disp_axi_root_clk", "disp_axi", base + 0x45d0, 0, &share_count_disp);
+ hws[IMX8MM_CLK_DISP_APB_ROOT] = imx_clk_hw_gate2_shared2("disp_apb_root_clk", "disp_apb", base + 0x45d0, 0, &share_count_disp);
+ hws[IMX8MM_CLK_DISP_RTRM_ROOT] = imx_clk_hw_gate2_shared2("disp_rtrm_root_clk", "disp_rtrm", base + 0x45d0, 0, &share_count_disp);
+ hws[IMX8MM_CLK_USDHC3_ROOT] = imx_clk_hw_gate4("usdhc3_root_clk", "usdhc3", base + 0x45e0, 0);
+ hws[IMX8MM_CLK_TMU_ROOT] = imx_clk_hw_gate4("tmu_root_clk", "ipg_root", base + 0x4620, 0);
+ hws[IMX8MM_CLK_VPU_DEC_ROOT] = imx_clk_hw_gate4("vpu_dec_root_clk", "vpu_bus", base + 0x4630, 0);
+ hws[IMX8MM_CLK_SDMA1_ROOT] = imx_clk_hw_gate4("sdma1_clk", "ipg_root", base + 0x43a0, 0);
+ hws[IMX8MM_CLK_SDMA2_ROOT] = imx_clk_hw_gate4("sdma2_clk", "ipg_audio_root", base + 0x43b0, 0);
+ hws[IMX8MM_CLK_SDMA3_ROOT] = imx_clk_hw_gate4("sdma3_clk", "ipg_audio_root", base + 0x45f0, 0);
+ hws[IMX8MM_CLK_GPU2D_ROOT] = imx_clk_hw_gate4("gpu2d_root_clk", "gpu2d_div", base + 0x4660, 0);
+ hws[IMX8MM_CLK_CSI1_ROOT] = imx_clk_hw_gate4("csi1_root_clk", "csi1_core", base + 0x4650, 0);
+
+ hws[IMX8MM_CLK_GPT_3M] = imx_clk_hw_fixed_factor("gpt_3m", "osc_24m", 1, 8);
+
+ hws[IMX8MM_CLK_DRAM_ALT_ROOT] = imx_clk_hw_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
+ hws[IMX8MM_CLK_DRAM_CORE] = imx_clk_hw_mux2_flags("dram_core_clk", base + 0x9800, 24, 1, imx8mm_dram_core_sels, ARRAY_SIZE(imx8mm_dram_core_sels), CLK_IS_CRITICAL);
+
+ hws[IMX8MM_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_div",
+ hws[IMX8MM_CLK_A53_DIV]->clk,
+ hws[IMX8MM_CLK_A53_SRC]->clk,
+ hws[IMX8MM_ARM_PLL_OUT]->clk,
+ hws[IMX8MM_SYS_PLL1_800M]->clk);
+
+ imx_check_clk_hws(hws, IMX8MM_CLK_END);
+
+ ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
if (ret < 0) {
- pr_err("failed to register clks for i.MX8MM\n");
- goto unregister_clks;
+ dev_err(dev, "failed to register clks for i.MX8MM\n");
+ goto unregister_hws;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
+ int index = uart_clk_ids[i];
+
+ uart_hws[i] = &hws[index]->clk;
}
- imx_register_uart_clocks(uart_clks);
+ imx_register_uart_clocks(uart_hws);
return 0;
-unregister_clks:
- imx_unregister_clocks(clks, ARRAY_SIZE(clks));
+unregister_hws:
+ imx_unregister_hw_clocks(hws, IMX8MM_CLK_END);
return ret;
}
@@ -616,6 +634,11 @@ static struct platform_driver imx8mm_clk_driver = {
.probe = imx8mm_clocks_probe,
.driver = {
.name = "imx8mm-ccm",
+ /*
+ * Disable bind attributes: clocks are not removed and
+ * reloading the driver will crash or break devices.
+ */
+ .suppress_bind_attrs = true,
.of_match_table = of_match_ptr(imx8mm_clk_of_match),
},
};
diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
index 9f5a5a56b45e..c5e7316b4c66 100644
--- a/drivers/clk/imx/clk-imx8mn.c
+++ b/drivers/clk/imx/clk-imx8mn.c
@@ -12,6 +12,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
#include <linux/types.h>
#include "clk.h"
@@ -280,284 +281,302 @@ static const char * const imx8mn_clko2_sels[] = {"osc_24m", "sys_pll2_200m", "sy
"sys_pll2_166m", "sys_pll3_out", "audio_pll1_out",
"video_pll1_out", "osc_32k", };
-static struct clk *clks[IMX8MN_CLK_END];
-static struct clk_onecell_data clk_data;
+static struct clk_hw_onecell_data *clk_hw_data;
+static struct clk_hw **hws;
-static struct clk ** const uart_clks[] = {
- &clks[IMX8MN_CLK_UART1_ROOT],
- &clks[IMX8MN_CLK_UART2_ROOT],
- &clks[IMX8MN_CLK_UART3_ROOT],
- &clks[IMX8MN_CLK_UART4_ROOT],
- NULL
+static const int uart_clk_ids[] = {
+ IMX8MN_CLK_UART1_ROOT,
+ IMX8MN_CLK_UART2_ROOT,
+ IMX8MN_CLK_UART3_ROOT,
+ IMX8MN_CLK_UART4_ROOT,
};
+static struct clk **uart_hws[ARRAY_SIZE(uart_clk_ids) + 1];
static int imx8mn_clocks_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
void __iomem *base;
- int ret;
+ int ret, i;
- clks[IMX8MN_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
- clks[IMX8MN_CLK_24M] = of_clk_get_by_name(np, "osc_24m");
- clks[IMX8MN_CLK_32K] = of_clk_get_by_name(np, "osc_32k");
- clks[IMX8MN_CLK_EXT1] = of_clk_get_by_name(np, "clk_ext1");
- clks[IMX8MN_CLK_EXT2] = of_clk_get_by_name(np, "clk_ext2");
- clks[IMX8MN_CLK_EXT3] = of_clk_get_by_name(np, "clk_ext3");
- clks[IMX8MN_CLK_EXT4] = of_clk_get_by_name(np, "clk_ext4");
+ clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
+ IMX8MN_CLK_END), GFP_KERNEL);
+ if (WARN_ON(!clk_hw_data))
+ return -ENOMEM;
+
+ clk_hw_data->num = IMX8MN_CLK_END;
+ hws = clk_hw_data->hws;
+
+ hws[IMX8MN_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0);
+ hws[IMX8MN_CLK_24M] = imx_obtain_fixed_clk_hw(np, "osc_24m");
+ hws[IMX8MN_CLK_32K] = imx_obtain_fixed_clk_hw(np, "osc_32k");
+ hws[IMX8MN_CLK_EXT1] = imx_obtain_fixed_clk_hw(np, "clk_ext1");
+ hws[IMX8MN_CLK_EXT2] = imx_obtain_fixed_clk_hw(np, "clk_ext2");
+ hws[IMX8MN_CLK_EXT3] = imx_obtain_fixed_clk_hw(np, "clk_ext3");
+ hws[IMX8MN_CLK_EXT4] = imx_obtain_fixed_clk_hw(np, "clk_ext4");
np = of_find_compatible_node(NULL, NULL, "fsl,imx8mn-anatop");
base = of_iomap(np, 0);
if (WARN_ON(!base)) {
ret = -ENOMEM;
- goto unregister_clks;
+ goto unregister_hws;
}
- clks[IMX8MN_AUDIO_PLL1_REF_SEL] = imx_clk_mux("audio_pll1_ref_sel", base + 0x0, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MN_AUDIO_PLL2_REF_SEL] = imx_clk_mux("audio_pll2_ref_sel", base + 0x14, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MN_VIDEO_PLL1_REF_SEL] = imx_clk_mux("video_pll1_ref_sel", base + 0x28, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MN_DRAM_PLL_REF_SEL] = imx_clk_mux("dram_pll_ref_sel", base + 0x50, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MN_GPU_PLL_REF_SEL] = imx_clk_mux("gpu_pll_ref_sel", base + 0x64, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MN_VPU_PLL_REF_SEL] = imx_clk_mux("vpu_pll_ref_sel", base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MN_ARM_PLL_REF_SEL] = imx_clk_mux("arm_pll_ref_sel", base + 0x84, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MN_SYS_PLL3_REF_SEL] = imx_clk_mux("sys_pll3_ref_sel", base + 0x114, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-
- clks[IMX8MN_AUDIO_PLL1] = imx_clk_pll14xx("audio_pll1", "audio_pll1_ref_sel", base, &imx_1443x_pll);
- clks[IMX8MN_AUDIO_PLL2] = imx_clk_pll14xx("audio_pll2", "audio_pll2_ref_sel", base + 0x14, &imx_1443x_pll);
- clks[IMX8MN_VIDEO_PLL1] = imx_clk_pll14xx("video_pll1", "video_pll1_ref_sel", base + 0x28, &imx_1443x_pll);
- clks[IMX8MN_DRAM_PLL] = imx_clk_pll14xx("dram_pll", "dram_pll_ref_sel", base + 0x50, &imx_1443x_pll);
- clks[IMX8MN_GPU_PLL] = imx_clk_pll14xx("gpu_pll", "gpu_pll_ref_sel", base + 0x64, &imx_1416x_pll);
- clks[IMX8MN_VPU_PLL] = imx_clk_pll14xx("vpu_pll", "vpu_pll_ref_sel", base + 0x74, &imx_1416x_pll);
- clks[IMX8MN_ARM_PLL] = imx_clk_pll14xx("arm_pll", "arm_pll_ref_sel", base + 0x84, &imx_1416x_pll);
- clks[IMX8MN_SYS_PLL1] = imx_clk_fixed("sys_pll1", 800000000);
- clks[IMX8MN_SYS_PLL2] = imx_clk_fixed("sys_pll2", 1000000000);
- clks[IMX8MN_SYS_PLL3] = imx_clk_pll14xx("sys_pll3", "sys_pll3_ref_sel", base + 0x114, &imx_1416x_pll);
+ hws[IMX8MN_AUDIO_PLL1_REF_SEL] = imx_clk_hw_mux("audio_pll1_ref_sel", base + 0x0, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MN_AUDIO_PLL2_REF_SEL] = imx_clk_hw_mux("audio_pll2_ref_sel", base + 0x14, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MN_VIDEO_PLL1_REF_SEL] = imx_clk_hw_mux("video_pll1_ref_sel", base + 0x28, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MN_DRAM_PLL_REF_SEL] = imx_clk_hw_mux("dram_pll_ref_sel", base + 0x50, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MN_GPU_PLL_REF_SEL] = imx_clk_hw_mux("gpu_pll_ref_sel", base + 0x64, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MN_VPU_PLL_REF_SEL] = imx_clk_hw_mux("vpu_pll_ref_sel", base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MN_ARM_PLL_REF_SEL] = imx_clk_hw_mux("arm_pll_ref_sel", base + 0x84, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MN_SYS_PLL3_REF_SEL] = imx_clk_hw_mux("sys_pll3_ref_sel", base + 0x114, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+
+ hws[IMX8MN_AUDIO_PLL1] = imx_clk_hw_pll14xx("audio_pll1", "audio_pll1_ref_sel", base, &imx_1443x_pll);
+ hws[IMX8MN_AUDIO_PLL2] = imx_clk_hw_pll14xx("audio_pll2", "audio_pll2_ref_sel", base + 0x14, &imx_1443x_pll);
+ hws[IMX8MN_VIDEO_PLL1] = imx_clk_hw_pll14xx("video_pll1", "video_pll1_ref_sel", base + 0x28, &imx_1443x_pll);
+ hws[IMX8MN_DRAM_PLL] = imx_clk_hw_pll14xx("dram_pll", "dram_pll_ref_sel", base + 0x50, &imx_1443x_dram_pll);
+ hws[IMX8MN_GPU_PLL] = imx_clk_hw_pll14xx("gpu_pll", "gpu_pll_ref_sel", base + 0x64, &imx_1416x_pll);
+ hws[IMX8MN_VPU_PLL] = imx_clk_hw_pll14xx("vpu_pll", "vpu_pll_ref_sel", base + 0x74, &imx_1416x_pll);
+ hws[IMX8MN_ARM_PLL] = imx_clk_hw_pll14xx("arm_pll", "arm_pll_ref_sel", base + 0x84, &imx_1416x_pll);
+ hws[IMX8MN_SYS_PLL1] = imx_clk_hw_fixed("sys_pll1", 800000000);
+ hws[IMX8MN_SYS_PLL2] = imx_clk_hw_fixed("sys_pll2", 1000000000);
+ hws[IMX8MN_SYS_PLL3] = imx_clk_hw_pll14xx("sys_pll3", "sys_pll3_ref_sel", base + 0x114, &imx_1416x_pll);
/* PLL bypass out */
- clks[IMX8MN_AUDIO_PLL1_BYPASS] = imx_clk_mux_flags("audio_pll1_bypass", base, 16, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT);
- clks[IMX8MN_AUDIO_PLL2_BYPASS] = imx_clk_mux_flags("audio_pll2_bypass", base + 0x14, 16, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels), CLK_SET_RATE_PARENT);
- clks[IMX8MN_VIDEO_PLL1_BYPASS] = imx_clk_mux_flags("video_pll1_bypass", base + 0x28, 16, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT);
- clks[IMX8MN_DRAM_PLL_BYPASS] = imx_clk_mux_flags("dram_pll_bypass", base + 0x50, 16, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT);
- clks[IMX8MN_GPU_PLL_BYPASS] = imx_clk_mux_flags("gpu_pll_bypass", base + 0x64, 28, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
- clks[IMX8MN_VPU_PLL_BYPASS] = imx_clk_mux_flags("vpu_pll_bypass", base + 0x74, 28, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
- clks[IMX8MN_ARM_PLL_BYPASS] = imx_clk_mux_flags("arm_pll_bypass", base + 0x84, 28, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
- clks[IMX8MN_SYS_PLL3_BYPASS] = imx_clk_mux_flags("sys_pll3_bypass", base + 0x114, 28, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MN_AUDIO_PLL1_BYPASS] = imx_clk_hw_mux_flags("audio_pll1_bypass", base, 16, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MN_AUDIO_PLL2_BYPASS] = imx_clk_hw_mux_flags("audio_pll2_bypass", base + 0x14, 16, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MN_VIDEO_PLL1_BYPASS] = imx_clk_hw_mux_flags("video_pll1_bypass", base + 0x28, 16, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MN_DRAM_PLL_BYPASS] = imx_clk_hw_mux_flags("dram_pll_bypass", base + 0x50, 16, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MN_GPU_PLL_BYPASS] = imx_clk_hw_mux_flags("gpu_pll_bypass", base + 0x64, 28, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MN_VPU_PLL_BYPASS] = imx_clk_hw_mux_flags("vpu_pll_bypass", base + 0x74, 28, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MN_ARM_PLL_BYPASS] = imx_clk_hw_mux_flags("arm_pll_bypass", base + 0x84, 28, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MN_SYS_PLL3_BYPASS] = imx_clk_hw_mux_flags("sys_pll3_bypass", base + 0x114, 28, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT);
/* PLL out gate */
- clks[IMX8MN_AUDIO_PLL1_OUT] = imx_clk_gate("audio_pll1_out", "audio_pll1_bypass", base, 13);
- clks[IMX8MN_AUDIO_PLL2_OUT] = imx_clk_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x14, 13);
- clks[IMX8MN_VIDEO_PLL1_OUT] = imx_clk_gate("video_pll1_out", "video_pll1_bypass", base + 0x28, 13);
- clks[IMX8MN_DRAM_PLL_OUT] = imx_clk_gate("dram_pll_out", "dram_pll_bypass", base + 0x50, 13);
- clks[IMX8MN_GPU_PLL_OUT] = imx_clk_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x64, 11);
- clks[IMX8MN_VPU_PLL_OUT] = imx_clk_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x74, 11);
- clks[IMX8MN_ARM_PLL_OUT] = imx_clk_gate("arm_pll_out", "arm_pll_bypass", base + 0x84, 11);
- clks[IMX8MN_SYS_PLL3_OUT] = imx_clk_gate("sys_pll3_out", "sys_pll3_bypass", base + 0x114, 11);
+ hws[IMX8MN_AUDIO_PLL1_OUT] = imx_clk_hw_gate("audio_pll1_out", "audio_pll1_bypass", base, 13);
+ hws[IMX8MN_AUDIO_PLL2_OUT] = imx_clk_hw_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x14, 13);
+ hws[IMX8MN_VIDEO_PLL1_OUT] = imx_clk_hw_gate("video_pll1_out", "video_pll1_bypass", base + 0x28, 13);
+ hws[IMX8MN_DRAM_PLL_OUT] = imx_clk_hw_gate("dram_pll_out", "dram_pll_bypass", base + 0x50, 13);
+ hws[IMX8MN_GPU_PLL_OUT] = imx_clk_hw_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x64, 11);
+ hws[IMX8MN_VPU_PLL_OUT] = imx_clk_hw_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x74, 11);
+ hws[IMX8MN_ARM_PLL_OUT] = imx_clk_hw_gate("arm_pll_out", "arm_pll_bypass", base + 0x84, 11);
+ hws[IMX8MN_SYS_PLL3_OUT] = imx_clk_hw_gate("sys_pll3_out", "sys_pll3_bypass", base + 0x114, 11);
/* SYS PLL1 fixed output */
- clks[IMX8MN_SYS_PLL1_40M_CG] = imx_clk_gate("sys_pll1_40m_cg", "sys_pll1", base + 0x94, 27);
- clks[IMX8MN_SYS_PLL1_80M_CG] = imx_clk_gate("sys_pll1_80m_cg", "sys_pll1", base + 0x94, 25);
- clks[IMX8MN_SYS_PLL1_100M_CG] = imx_clk_gate("sys_pll1_100m_cg", "sys_pll1", base + 0x94, 23);
- clks[IMX8MN_SYS_PLL1_133M_CG] = imx_clk_gate("sys_pll1_133m_cg", "sys_pll1", base + 0x94, 21);
- clks[IMX8MN_SYS_PLL1_160M_CG] = imx_clk_gate("sys_pll1_160m_cg", "sys_pll1", base + 0x94, 19);
- clks[IMX8MN_SYS_PLL1_200M_CG] = imx_clk_gate("sys_pll1_200m_cg", "sys_pll1", base + 0x94, 17);
- clks[IMX8MN_SYS_PLL1_266M_CG] = imx_clk_gate("sys_pll1_266m_cg", "sys_pll1", base + 0x94, 15);
- clks[IMX8MN_SYS_PLL1_400M_CG] = imx_clk_gate("sys_pll1_400m_cg", "sys_pll1", base + 0x94, 13);
- clks[IMX8MN_SYS_PLL1_OUT] = imx_clk_gate("sys_pll1_out", "sys_pll1", base + 0x94, 11);
-
- clks[IMX8MN_SYS_PLL1_40M] = imx_clk_fixed_factor("sys_pll1_40m", "sys_pll1_40m_cg", 1, 20);
- clks[IMX8MN_SYS_PLL1_80M] = imx_clk_fixed_factor("sys_pll1_80m", "sys_pll1_80m_cg", 1, 10);
- clks[IMX8MN_SYS_PLL1_100M] = imx_clk_fixed_factor("sys_pll1_100m", "sys_pll1_100m_cg", 1, 8);
- clks[IMX8MN_SYS_PLL1_133M] = imx_clk_fixed_factor("sys_pll1_133m", "sys_pll1_133m_cg", 1, 6);
- clks[IMX8MN_SYS_PLL1_160M] = imx_clk_fixed_factor("sys_pll1_160m", "sys_pll1_160m_cg", 1, 5);
- clks[IMX8MN_SYS_PLL1_200M] = imx_clk_fixed_factor("sys_pll1_200m", "sys_pll1_200m_cg", 1, 4);
- clks[IMX8MN_SYS_PLL1_266M] = imx_clk_fixed_factor("sys_pll1_266m", "sys_pll1_266m_cg", 1, 3);
- clks[IMX8MN_SYS_PLL1_400M] = imx_clk_fixed_factor("sys_pll1_400m", "sys_pll1_400m_cg", 1, 2);
- clks[IMX8MN_SYS_PLL1_800M] = imx_clk_fixed_factor("sys_pll1_800m", "sys_pll1_out", 1, 1);
+ hws[IMX8MN_SYS_PLL1_40M_CG] = imx_clk_hw_gate("sys_pll1_40m_cg", "sys_pll1", base + 0x94, 27);
+ hws[IMX8MN_SYS_PLL1_80M_CG] = imx_clk_hw_gate("sys_pll1_80m_cg", "sys_pll1", base + 0x94, 25);
+ hws[IMX8MN_SYS_PLL1_100M_CG] = imx_clk_hw_gate("sys_pll1_100m_cg", "sys_pll1", base + 0x94, 23);
+ hws[IMX8MN_SYS_PLL1_133M_CG] = imx_clk_hw_gate("sys_pll1_133m_cg", "sys_pll1", base + 0x94, 21);
+ hws[IMX8MN_SYS_PLL1_160M_CG] = imx_clk_hw_gate("sys_pll1_160m_cg", "sys_pll1", base + 0x94, 19);
+ hws[IMX8MN_SYS_PLL1_200M_CG] = imx_clk_hw_gate("sys_pll1_200m_cg", "sys_pll1", base + 0x94, 17);
+ hws[IMX8MN_SYS_PLL1_266M_CG] = imx_clk_hw_gate("sys_pll1_266m_cg", "sys_pll1", base + 0x94, 15);
+ hws[IMX8MN_SYS_PLL1_400M_CG] = imx_clk_hw_gate("sys_pll1_400m_cg", "sys_pll1", base + 0x94, 13);
+ hws[IMX8MN_SYS_PLL1_OUT] = imx_clk_hw_gate("sys_pll1_out", "sys_pll1", base + 0x94, 11);
+
+ hws[IMX8MN_SYS_PLL1_40M] = imx_clk_hw_fixed_factor("sys_pll1_40m", "sys_pll1_40m_cg", 1, 20);
+ hws[IMX8MN_SYS_PLL1_80M] = imx_clk_hw_fixed_factor("sys_pll1_80m", "sys_pll1_80m_cg", 1, 10);
+ hws[IMX8MN_SYS_PLL1_100M] = imx_clk_hw_fixed_factor("sys_pll1_100m", "sys_pll1_100m_cg", 1, 8);
+ hws[IMX8MN_SYS_PLL1_133M] = imx_clk_hw_fixed_factor("sys_pll1_133m", "sys_pll1_133m_cg", 1, 6);
+ hws[IMX8MN_SYS_PLL1_160M] = imx_clk_hw_fixed_factor("sys_pll1_160m", "sys_pll1_160m_cg", 1, 5);
+ hws[IMX8MN_SYS_PLL1_200M] = imx_clk_hw_fixed_factor("sys_pll1_200m", "sys_pll1_200m_cg", 1, 4);
+ hws[IMX8MN_SYS_PLL1_266M] = imx_clk_hw_fixed_factor("sys_pll1_266m", "sys_pll1_266m_cg", 1, 3);
+ hws[IMX8MN_SYS_PLL1_400M] = imx_clk_hw_fixed_factor("sys_pll1_400m", "sys_pll1_400m_cg", 1, 2);
+ hws[IMX8MN_SYS_PLL1_800M] = imx_clk_hw_fixed_factor("sys_pll1_800m", "sys_pll1_out", 1, 1);
/* SYS PLL2 fixed output */
- clks[IMX8MN_SYS_PLL2_50M_CG] = imx_clk_gate("sys_pll2_50m_cg", "sys_pll2", base + 0x104, 27);
- clks[IMX8MN_SYS_PLL2_100M_CG] = imx_clk_gate("sys_pll2_100m_cg", "sys_pll2", base + 0x104, 25);
- clks[IMX8MN_SYS_PLL2_125M_CG] = imx_clk_gate("sys_pll2_125m_cg", "sys_pll2", base + 0x104, 23);
- clks[IMX8MN_SYS_PLL2_166M_CG] = imx_clk_gate("sys_pll2_166m_cg", "sys_pll2", base + 0x104, 21);
- clks[IMX8MN_SYS_PLL2_200M_CG] = imx_clk_gate("sys_pll2_200m_cg", "sys_pll2", base + 0x104, 19);
- clks[IMX8MN_SYS_PLL2_250M_CG] = imx_clk_gate("sys_pll2_250m_cg", "sys_pll2", base + 0x104, 17);
- clks[IMX8MN_SYS_PLL2_333M_CG] = imx_clk_gate("sys_pll2_333m_cg", "sys_pll2", base + 0x104, 15);
- clks[IMX8MN_SYS_PLL2_500M_CG] = imx_clk_gate("sys_pll2_500m_cg", "sys_pll2", base + 0x104, 13);
- clks[IMX8MN_SYS_PLL2_OUT] = imx_clk_gate("sys_pll2_out", "sys_pll2", base + 0x104, 11);
-
- clks[IMX8MN_SYS_PLL2_50M] = imx_clk_fixed_factor("sys_pll2_50m", "sys_pll2_50m_cg", 1, 20);
- clks[IMX8MN_SYS_PLL2_100M] = imx_clk_fixed_factor("sys_pll2_100m", "sys_pll2_100m_cg", 1, 10);
- clks[IMX8MN_SYS_PLL2_125M] = imx_clk_fixed_factor("sys_pll2_125m", "sys_pll2_125m_cg", 1, 8);
- clks[IMX8MN_SYS_PLL2_166M] = imx_clk_fixed_factor("sys_pll2_166m", "sys_pll2_166m_cg", 1, 6);
- clks[IMX8MN_SYS_PLL2_200M] = imx_clk_fixed_factor("sys_pll2_200m", "sys_pll2_200m_cg", 1, 5);
- clks[IMX8MN_SYS_PLL2_250M] = imx_clk_fixed_factor("sys_pll2_250m", "sys_pll2_250m_cg", 1, 4);
- clks[IMX8MN_SYS_PLL2_333M] = imx_clk_fixed_factor("sys_pll2_333m", "sys_pll2_333m_cg", 1, 3);
- clks[IMX8MN_SYS_PLL2_500M] = imx_clk_fixed_factor("sys_pll2_500m", "sys_pll2_500m_cg", 1, 2);
- clks[IMX8MN_SYS_PLL2_1000M] = imx_clk_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
+ hws[IMX8MN_SYS_PLL2_50M_CG] = imx_clk_hw_gate("sys_pll2_50m_cg", "sys_pll2", base + 0x104, 27);
+ hws[IMX8MN_SYS_PLL2_100M_CG] = imx_clk_hw_gate("sys_pll2_100m_cg", "sys_pll2", base + 0x104, 25);
+ hws[IMX8MN_SYS_PLL2_125M_CG] = imx_clk_hw_gate("sys_pll2_125m_cg", "sys_pll2", base + 0x104, 23);
+ hws[IMX8MN_SYS_PLL2_166M_CG] = imx_clk_hw_gate("sys_pll2_166m_cg", "sys_pll2", base + 0x104, 21);
+ hws[IMX8MN_SYS_PLL2_200M_CG] = imx_clk_hw_gate("sys_pll2_200m_cg", "sys_pll2", base + 0x104, 19);
+ hws[IMX8MN_SYS_PLL2_250M_CG] = imx_clk_hw_gate("sys_pll2_250m_cg", "sys_pll2", base + 0x104, 17);
+ hws[IMX8MN_SYS_PLL2_333M_CG] = imx_clk_hw_gate("sys_pll2_333m_cg", "sys_pll2", base + 0x104, 15);
+ hws[IMX8MN_SYS_PLL2_500M_CG] = imx_clk_hw_gate("sys_pll2_500m_cg", "sys_pll2", base + 0x104, 13);
+ hws[IMX8MN_SYS_PLL2_OUT] = imx_clk_hw_gate("sys_pll2_out", "sys_pll2", base + 0x104, 11);
+
+ hws[IMX8MN_SYS_PLL2_50M] = imx_clk_hw_fixed_factor("sys_pll2_50m", "sys_pll2_50m_cg", 1, 20);
+ hws[IMX8MN_SYS_PLL2_100M] = imx_clk_hw_fixed_factor("sys_pll2_100m", "sys_pll2_100m_cg", 1, 10);
+ hws[IMX8MN_SYS_PLL2_125M] = imx_clk_hw_fixed_factor("sys_pll2_125m", "sys_pll2_125m_cg", 1, 8);
+ hws[IMX8MN_SYS_PLL2_166M] = imx_clk_hw_fixed_factor("sys_pll2_166m", "sys_pll2_166m_cg", 1, 6);
+ hws[IMX8MN_SYS_PLL2_200M] = imx_clk_hw_fixed_factor("sys_pll2_200m", "sys_pll2_200m_cg", 1, 5);
+ hws[IMX8MN_SYS_PLL2_250M] = imx_clk_hw_fixed_factor("sys_pll2_250m", "sys_pll2_250m_cg", 1, 4);
+ hws[IMX8MN_SYS_PLL2_333M] = imx_clk_hw_fixed_factor("sys_pll2_333m", "sys_pll2_333m_cg", 1, 3);
+ hws[IMX8MN_SYS_PLL2_500M] = imx_clk_hw_fixed_factor("sys_pll2_500m", "sys_pll2_500m_cg", 1, 2);
+ hws[IMX8MN_SYS_PLL2_1000M] = imx_clk_hw_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
np = dev->of_node;
base = devm_platform_ioremap_resource(pdev, 0);
if (WARN_ON(IS_ERR(base))) {
ret = PTR_ERR(base);
- goto unregister_clks;
+ goto unregister_hws;
}
/* CORE */
- clks[IMX8MN_CLK_A53_SRC] = imx_clk_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mn_a53_sels, ARRAY_SIZE(imx8mn_a53_sels));
- clks[IMX8MN_CLK_GPU_CORE_SRC] = imx_clk_mux2("gpu_core_src", base + 0x8180, 24, 3, imx8mn_gpu_core_sels, ARRAY_SIZE(imx8mn_gpu_core_sels));
- clks[IMX8MN_CLK_GPU_SHADER_SRC] = imx_clk_mux2("gpu_shader_src", base + 0x8200, 24, 3, imx8mn_gpu_shader_sels, ARRAY_SIZE(imx8mn_gpu_shader_sels));
- clks[IMX8MN_CLK_A53_CG] = imx_clk_gate3("arm_a53_cg", "arm_a53_src", base + 0x8000, 28);
- clks[IMX8MN_CLK_GPU_CORE_CG] = imx_clk_gate3("gpu_core_cg", "gpu_core_src", base + 0x8180, 28);
- clks[IMX8MN_CLK_GPU_SHADER_CG] = imx_clk_gate3("gpu_shader_cg", "gpu_shader_src", base + 0x8200, 28);
+ hws[IMX8MN_CLK_A53_SRC] = imx_clk_hw_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mn_a53_sels, ARRAY_SIZE(imx8mn_a53_sels));
+ hws[IMX8MN_CLK_GPU_CORE_SRC] = imx_clk_hw_mux2("gpu_core_src", base + 0x8180, 24, 3, imx8mn_gpu_core_sels, ARRAY_SIZE(imx8mn_gpu_core_sels));
+ hws[IMX8MN_CLK_GPU_SHADER_SRC] = imx_clk_hw_mux2("gpu_shader_src", base + 0x8200, 24, 3, imx8mn_gpu_shader_sels, ARRAY_SIZE(imx8mn_gpu_shader_sels));
+ hws[IMX8MN_CLK_A53_CG] = imx_clk_hw_gate3("arm_a53_cg", "arm_a53_src", base + 0x8000, 28);
+ hws[IMX8MN_CLK_GPU_CORE_CG] = imx_clk_hw_gate3("gpu_core_cg", "gpu_core_src", base + 0x8180, 28);
+ hws[IMX8MN_CLK_GPU_SHADER_CG] = imx_clk_hw_gate3("gpu_shader_cg", "gpu_shader_src", base + 0x8200, 28);
- clks[IMX8MN_CLK_A53_DIV] = imx_clk_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
- clks[IMX8MN_CLK_GPU_CORE_DIV] = imx_clk_divider2("gpu_core_div", "gpu_core_cg", base + 0x8180, 0, 3);
- clks[IMX8MN_CLK_GPU_SHADER_DIV] = imx_clk_divider2("gpu_shader_div", "gpu_shader_cg", base + 0x8200, 0, 3);
+ hws[IMX8MN_CLK_A53_DIV] = imx_clk_hw_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
+ hws[IMX8MN_CLK_GPU_CORE_DIV] = imx_clk_hw_divider2("gpu_core_div", "gpu_core_cg", base + 0x8180, 0, 3);
+ hws[IMX8MN_CLK_GPU_SHADER_DIV] = imx_clk_hw_divider2("gpu_shader_div", "gpu_shader_cg", base + 0x8200, 0, 3);
/* BUS */
- clks[IMX8MN_CLK_MAIN_AXI] = imx8m_clk_composite_critical("main_axi", imx8mn_main_axi_sels, base + 0x8800);
- clks[IMX8MN_CLK_ENET_AXI] = imx8m_clk_composite("enet_axi", imx8mn_enet_axi_sels, base + 0x8880);
- clks[IMX8MN_CLK_NAND_USDHC_BUS] = imx8m_clk_composite("nand_usdhc_bus", imx8mn_nand_usdhc_sels, base + 0x8900);
- clks[IMX8MN_CLK_DISP_AXI] = imx8m_clk_composite("disp_axi", imx8mn_disp_axi_sels, base + 0x8a00);
- clks[IMX8MN_CLK_DISP_APB] = imx8m_clk_composite("disp_apb", imx8mn_disp_apb_sels, base + 0x8a80);
- clks[IMX8MN_CLK_USB_BUS] = imx8m_clk_composite("usb_bus", imx8mn_usb_bus_sels, base + 0x8b80);
- clks[IMX8MN_CLK_GPU_AXI] = imx8m_clk_composite("gpu_axi", imx8mn_gpu_axi_sels, base + 0x8c00);
- clks[IMX8MN_CLK_GPU_AHB] = imx8m_clk_composite("gpu_ahb", imx8mn_gpu_ahb_sels, base + 0x8c80);
- clks[IMX8MN_CLK_NOC] = imx8m_clk_composite_critical("noc", imx8mn_noc_sels, base + 0x8d00);
-
- clks[IMX8MN_CLK_AHB] = imx8m_clk_composite_critical("ahb", imx8mn_ahb_sels, base + 0x9000);
- clks[IMX8MN_CLK_AUDIO_AHB] = imx8m_clk_composite("audio_ahb", imx8mn_audio_ahb_sels, base + 0x9100);
- clks[IMX8MN_CLK_IPG_ROOT] = imx_clk_divider2("ipg_root", "ahb", base + 0x9080, 0, 1);
- clks[IMX8MN_CLK_IPG_AUDIO_ROOT] = imx_clk_divider2("ipg_audio_root", "audio_ahb", base + 0x9180, 0, 1);
- clks[IMX8MN_CLK_DRAM_CORE] = imx_clk_mux2_flags("dram_core_clk", base + 0x9800, 24, 1, imx8mn_dram_core_sels, ARRAY_SIZE(imx8mn_dram_core_sels), CLK_IS_CRITICAL);
- clks[IMX8MN_CLK_DRAM_ALT] = imx8m_clk_composite("dram_alt", imx8mn_dram_alt_sels, base + 0xa000);
- clks[IMX8MN_CLK_DRAM_APB] = imx8m_clk_composite_critical("dram_apb", imx8mn_dram_apb_sels, base + 0xa080);
- clks[IMX8MN_CLK_DISP_PIXEL] = imx8m_clk_composite("disp_pixel", imx8mn_disp_pixel_sels, base + 0xa500);
- clks[IMX8MN_CLK_SAI2] = imx8m_clk_composite("sai2", imx8mn_sai2_sels, base + 0xa600);
- clks[IMX8MN_CLK_SAI3] = imx8m_clk_composite("sai3", imx8mn_sai3_sels, base + 0xa680);
- clks[IMX8MN_CLK_SAI5] = imx8m_clk_composite("sai5", imx8mn_sai5_sels, base + 0xa780);
- clks[IMX8MN_CLK_SAI6] = imx8m_clk_composite("sai6", imx8mn_sai6_sels, base + 0xa800);
- clks[IMX8MN_CLK_SPDIF1] = imx8m_clk_composite("spdif1", imx8mn_spdif1_sels, base + 0xa880);
- clks[IMX8MN_CLK_ENET_REF] = imx8m_clk_composite("enet_ref", imx8mn_enet_ref_sels, base + 0xa980);
- clks[IMX8MN_CLK_ENET_TIMER] = imx8m_clk_composite("enet_timer", imx8mn_enet_timer_sels, base + 0xaa00);
- clks[IMX8MN_CLK_ENET_PHY_REF] = imx8m_clk_composite("enet_phy", imx8mn_enet_phy_sels, base + 0xaa80);
- clks[IMX8MN_CLK_NAND] = imx8m_clk_composite("nand", imx8mn_nand_sels, base + 0xab00);
- clks[IMX8MN_CLK_QSPI] = imx8m_clk_composite("qspi", imx8mn_qspi_sels, base + 0xab80);
- clks[IMX8MN_CLK_USDHC1] = imx8m_clk_composite("usdhc1", imx8mn_usdhc1_sels, base + 0xac00);
- clks[IMX8MN_CLK_USDHC2] = imx8m_clk_composite("usdhc2", imx8mn_usdhc2_sels, base + 0xac80);
- clks[IMX8MN_CLK_I2C1] = imx8m_clk_composite("i2c1", imx8mn_i2c1_sels, base + 0xad00);
- clks[IMX8MN_CLK_I2C2] = imx8m_clk_composite("i2c2", imx8mn_i2c2_sels, base + 0xad80);
- clks[IMX8MN_CLK_I2C3] = imx8m_clk_composite("i2c3", imx8mn_i2c3_sels, base + 0xae00);
- clks[IMX8MN_CLK_I2C4] = imx8m_clk_composite("i2c4", imx8mn_i2c4_sels, base + 0xae80);
- clks[IMX8MN_CLK_UART1] = imx8m_clk_composite("uart1", imx8mn_uart1_sels, base + 0xaf00);
- clks[IMX8MN_CLK_UART2] = imx8m_clk_composite("uart2", imx8mn_uart2_sels, base + 0xaf80);
- clks[IMX8MN_CLK_UART3] = imx8m_clk_composite("uart3", imx8mn_uart3_sels, base + 0xb000);
- clks[IMX8MN_CLK_UART4] = imx8m_clk_composite("uart4", imx8mn_uart4_sels, base + 0xb080);
- clks[IMX8MN_CLK_USB_CORE_REF] = imx8m_clk_composite("usb_core_ref", imx8mn_usb_core_sels, base + 0xb100);
- clks[IMX8MN_CLK_USB_PHY_REF] = imx8m_clk_composite("usb_phy_ref", imx8mn_usb_phy_sels, base + 0xb180);
- clks[IMX8MN_CLK_GIC] = imx8m_clk_composite_critical("gic", imx8mn_gic_sels, base + 0xb200);
- clks[IMX8MN_CLK_ECSPI1] = imx8m_clk_composite("ecspi1", imx8mn_ecspi1_sels, base + 0xb280);
- clks[IMX8MN_CLK_ECSPI2] = imx8m_clk_composite("ecspi2", imx8mn_ecspi2_sels, base + 0xb300);
- clks[IMX8MN_CLK_PWM1] = imx8m_clk_composite("pwm1", imx8mn_pwm1_sels, base + 0xb380);
- clks[IMX8MN_CLK_PWM2] = imx8m_clk_composite("pwm2", imx8mn_pwm2_sels, base + 0xb400);
- clks[IMX8MN_CLK_PWM3] = imx8m_clk_composite("pwm3", imx8mn_pwm3_sels, base + 0xb480);
- clks[IMX8MN_CLK_PWM4] = imx8m_clk_composite("pwm4", imx8mn_pwm4_sels, base + 0xb500);
- clks[IMX8MN_CLK_WDOG] = imx8m_clk_composite("wdog", imx8mn_wdog_sels, base + 0xb900);
- clks[IMX8MN_CLK_WRCLK] = imx8m_clk_composite("wrclk", imx8mn_wrclk_sels, base + 0xb980);
- clks[IMX8MN_CLK_CLKO1] = imx8m_clk_composite("clko1", imx8mn_clko1_sels, base + 0xba00);
- clks[IMX8MN_CLK_CLKO2] = imx8m_clk_composite("clko2", imx8mn_clko2_sels, base + 0xba80);
- clks[IMX8MN_CLK_DSI_CORE] = imx8m_clk_composite("dsi_core", imx8mn_dsi_core_sels, base + 0xbb00);
- clks[IMX8MN_CLK_DSI_PHY_REF] = imx8m_clk_composite("dsi_phy_ref", imx8mn_dsi_phy_sels, base + 0xbb80);
- clks[IMX8MN_CLK_DSI_DBI] = imx8m_clk_composite("dsi_dbi", imx8mn_dsi_dbi_sels, base + 0xbc00);
- clks[IMX8MN_CLK_USDHC3] = imx8m_clk_composite("usdhc3", imx8mn_usdhc3_sels, base + 0xbc80);
- clks[IMX8MN_CLK_CAMERA_PIXEL] = imx8m_clk_composite("camera_pixel", imx8mn_camera_pixel_sels, base + 0xbd00);
- clks[IMX8MN_CLK_CSI1_PHY_REF] = imx8m_clk_composite("csi1_phy_ref", imx8mn_csi1_phy_sels, base + 0xbd80);
- clks[IMX8MN_CLK_CSI2_PHY_REF] = imx8m_clk_composite("csi2_phy_ref", imx8mn_csi2_phy_sels, base + 0xbf00);
- clks[IMX8MN_CLK_CSI2_ESC] = imx8m_clk_composite("csi2_esc", imx8mn_csi2_esc_sels, base + 0xbf80);
- clks[IMX8MN_CLK_ECSPI3] = imx8m_clk_composite("ecspi3", imx8mn_ecspi3_sels, base + 0xc180);
- clks[IMX8MN_CLK_PDM] = imx8m_clk_composite("pdm", imx8mn_pdm_sels, base + 0xc200);
- clks[IMX8MN_CLK_SAI7] = imx8m_clk_composite("sai7", imx8mn_sai7_sels, base + 0xc300);
-
- clks[IMX8MN_CLK_ECSPI1_ROOT] = imx_clk_gate4("ecspi1_root_clk", "ecspi1", base + 0x4070, 0);
- clks[IMX8MN_CLK_ECSPI2_ROOT] = imx_clk_gate4("ecspi2_root_clk", "ecspi2", base + 0x4080, 0);
- clks[IMX8MN_CLK_ECSPI3_ROOT] = imx_clk_gate4("ecspi3_root_clk", "ecspi3", base + 0x4090, 0);
- clks[IMX8MN_CLK_ENET1_ROOT] = imx_clk_gate4("enet1_root_clk", "enet_axi", base + 0x40a0, 0);
- clks[IMX8MN_CLK_GPIO1_ROOT] = imx_clk_gate4("gpio1_root_clk", "ipg_root", base + 0x40b0, 0);
- clks[IMX8MN_CLK_GPIO2_ROOT] = imx_clk_gate4("gpio2_root_clk", "ipg_root", base + 0x40c0, 0);
- clks[IMX8MN_CLK_GPIO3_ROOT] = imx_clk_gate4("gpio3_root_clk", "ipg_root", base + 0x40d0, 0);
- clks[IMX8MN_CLK_GPIO4_ROOT] = imx_clk_gate4("gpio4_root_clk", "ipg_root", base + 0x40e0, 0);
- clks[IMX8MN_CLK_GPIO5_ROOT] = imx_clk_gate4("gpio5_root_clk", "ipg_root", base + 0x40f0, 0);
- clks[IMX8MN_CLK_I2C1_ROOT] = imx_clk_gate4("i2c1_root_clk", "i2c1", base + 0x4170, 0);
- clks[IMX8MN_CLK_I2C2_ROOT] = imx_clk_gate4("i2c2_root_clk", "i2c2", base + 0x4180, 0);
- clks[IMX8MN_CLK_I2C3_ROOT] = imx_clk_gate4("i2c3_root_clk", "i2c3", base + 0x4190, 0);
- clks[IMX8MN_CLK_I2C4_ROOT] = imx_clk_gate4("i2c4_root_clk", "i2c4", base + 0x41a0, 0);
- clks[IMX8MN_CLK_MU_ROOT] = imx_clk_gate4("mu_root_clk", "ipg_root", base + 0x4210, 0);
- clks[IMX8MN_CLK_OCOTP_ROOT] = imx_clk_gate4("ocotp_root_clk", "ipg_root", base + 0x4220, 0);
- clks[IMX8MN_CLK_PWM1_ROOT] = imx_clk_gate4("pwm1_root_clk", "pwm1", base + 0x4280, 0);
- clks[IMX8MN_CLK_PWM2_ROOT] = imx_clk_gate4("pwm2_root_clk", "pwm2", base + 0x4290, 0);
- clks[IMX8MN_CLK_PWM3_ROOT] = imx_clk_gate4("pwm3_root_clk", "pwm3", base + 0x42a0, 0);
- clks[IMX8MN_CLK_PWM4_ROOT] = imx_clk_gate4("pwm4_root_clk", "pwm4", base + 0x42b0, 0);
- clks[IMX8MN_CLK_QSPI_ROOT] = imx_clk_gate4("qspi_root_clk", "qspi", base + 0x42f0, 0);
- clks[IMX8MN_CLK_NAND_ROOT] = imx_clk_gate2_shared2("nand_root_clk", "nand", base + 0x4300, 0, &share_count_nand);
- clks[IMX8MN_CLK_NAND_USDHC_BUS_RAWNAND_CLK] = imx_clk_gate2_shared2("nand_usdhc_rawnand_clk", "nand_usdhc_bus", base + 0x4300, 0, &share_count_nand);
- clks[IMX8MN_CLK_SAI2_ROOT] = imx_clk_gate2_shared2("sai2_root_clk", "sai2", base + 0x4340, 0, &share_count_sai2);
- clks[IMX8MN_CLK_SAI2_IPG] = imx_clk_gate2_shared2("sai2_ipg_clk", "ipg_audio_root", base + 0x4340, 0, &share_count_sai2);
- clks[IMX8MN_CLK_SAI3_ROOT] = imx_clk_gate2_shared2("sai3_root_clk", "sai3", base + 0x4350, 0, &share_count_sai3);
- clks[IMX8MN_CLK_SAI3_IPG] = imx_clk_gate2_shared2("sai3_ipg_clk", "ipg_audio_root", base + 0x4350, 0, &share_count_sai3);
- clks[IMX8MN_CLK_SAI5_ROOT] = imx_clk_gate2_shared2("sai5_root_clk", "sai5", base + 0x4370, 0, &share_count_sai5);
- clks[IMX8MN_CLK_SAI5_IPG] = imx_clk_gate2_shared2("sai5_ipg_clk", "ipg_audio_root", base + 0x4370, 0, &share_count_sai5);
- clks[IMX8MN_CLK_SAI6_ROOT] = imx_clk_gate2_shared2("sai6_root_clk", "sai6", base + 0x4380, 0, &share_count_sai6);
- clks[IMX8MN_CLK_SAI6_IPG] = imx_clk_gate2_shared2("sai6_ipg_clk", "ipg_audio_root", base + 0x4380, 0, &share_count_sai6);
- clks[IMX8MN_CLK_UART1_ROOT] = imx_clk_gate4("uart1_root_clk", "uart1", base + 0x4490, 0);
- clks[IMX8MN_CLK_UART2_ROOT] = imx_clk_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0);
- clks[IMX8MN_CLK_UART3_ROOT] = imx_clk_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0);
- clks[IMX8MN_CLK_UART4_ROOT] = imx_clk_gate4("uart4_root_clk", "uart4", base + 0x44c0, 0);
- clks[IMX8MN_CLK_USB1_CTRL_ROOT] = imx_clk_gate4("usb1_ctrl_root_clk", "usb_core_ref", base + 0x44d0, 0);
- clks[IMX8MN_CLK_GPU_CORE_ROOT] = imx_clk_gate4("gpu_core_root_clk", "gpu_core_div", base + 0x44f0, 0);
- clks[IMX8MN_CLK_USDHC1_ROOT] = imx_clk_gate4("usdhc1_root_clk", "usdhc1", base + 0x4510, 0);
- clks[IMX8MN_CLK_USDHC2_ROOT] = imx_clk_gate4("usdhc2_root_clk", "usdhc2", base + 0x4520, 0);
- clks[IMX8MN_CLK_WDOG1_ROOT] = imx_clk_gate4("wdog1_root_clk", "wdog", base + 0x4530, 0);
- clks[IMX8MN_CLK_WDOG2_ROOT] = imx_clk_gate4("wdog2_root_clk", "wdog", base + 0x4540, 0);
- clks[IMX8MN_CLK_WDOG3_ROOT] = imx_clk_gate4("wdog3_root_clk", "wdog", base + 0x4550, 0);
- clks[IMX8MN_CLK_GPU_BUS_ROOT] = imx_clk_gate4("gpu_root_clk", "gpu_axi", base + 0x4570, 0);
- clks[IMX8MN_CLK_ASRC_ROOT] = imx_clk_gate4("asrc_root_clk", "audio_ahb", base + 0x4580, 0);
- clks[IMX8MN_CLK_PDM_ROOT] = imx_clk_gate2_shared2("pdm_root_clk", "pdm", base + 0x45b0, 0, &share_count_pdm);
- clks[IMX8MN_CLK_PDM_IPG] = imx_clk_gate2_shared2("pdm_ipg_clk", "ipg_audio_root", base + 0x45b0, 0, &share_count_pdm);
- clks[IMX8MN_CLK_DISP_AXI_ROOT] = imx_clk_gate2_shared2("disp_axi_root_clk", "disp_axi", base + 0x45d0, 0, &share_count_disp);
- clks[IMX8MN_CLK_DISP_APB_ROOT] = imx_clk_gate2_shared2("disp_apb_root_clk", "disp_apb", base + 0x45d0, 0, &share_count_disp);
- clks[IMX8MN_CLK_CAMERA_PIXEL_ROOT] = imx_clk_gate2_shared2("camera_pixel_clk", "camera_pixel", base + 0x45d0, 0, &share_count_disp);
- clks[IMX8MN_CLK_DISP_PIXEL_ROOT] = imx_clk_gate2_shared2("disp_pixel_clk", "disp_pixel", base + 0x45d0, 0, &share_count_disp);
- clks[IMX8MN_CLK_USDHC3_ROOT] = imx_clk_gate4("usdhc3_root_clk", "usdhc3", base + 0x45e0, 0);
- clks[IMX8MN_CLK_TMU_ROOT] = imx_clk_gate4("tmu_root_clk", "ipg_root", base + 0x4620, 0);
- clks[IMX8MN_CLK_SDMA1_ROOT] = imx_clk_gate4("sdma1_clk", "ipg_root", base + 0x43a0, 0);
- clks[IMX8MN_CLK_SDMA2_ROOT] = imx_clk_gate4("sdma2_clk", "ipg_audio_root", base + 0x43b0, 0);
- clks[IMX8MN_CLK_SDMA3_ROOT] = imx_clk_gate4("sdma3_clk", "ipg_audio_root", base + 0x45f0, 0);
- clks[IMX8MN_CLK_SAI7_ROOT] = imx_clk_gate2_shared2("sai7_root_clk", "sai7", base + 0x4650, 0, &share_count_sai7);
-
- clks[IMX8MN_CLK_DRAM_ALT_ROOT] = imx_clk_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
-
- clks[IMX8MN_CLK_ARM] = imx_clk_cpu("arm", "arm_a53_div",
- clks[IMX8MN_CLK_A53_DIV],
- clks[IMX8MN_CLK_A53_SRC],
- clks[IMX8MN_ARM_PLL_OUT],
- clks[IMX8MN_SYS_PLL1_800M]);
-
- imx_check_clocks(clks, ARRAY_SIZE(clks));
-
- clk_data.clks = clks;
- clk_data.clk_num = ARRAY_SIZE(clks);
- ret = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+ hws[IMX8MN_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mn_main_axi_sels, base + 0x8800);
+ hws[IMX8MN_CLK_ENET_AXI] = imx8m_clk_hw_composite("enet_axi", imx8mn_enet_axi_sels, base + 0x8880);
+ hws[IMX8MN_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite("nand_usdhc_bus", imx8mn_nand_usdhc_sels, base + 0x8900);
+ hws[IMX8MN_CLK_DISP_AXI] = imx8m_clk_hw_composite("disp_axi", imx8mn_disp_axi_sels, base + 0x8a00);
+ hws[IMX8MN_CLK_DISP_APB] = imx8m_clk_hw_composite("disp_apb", imx8mn_disp_apb_sels, base + 0x8a80);
+ hws[IMX8MN_CLK_USB_BUS] = imx8m_clk_hw_composite("usb_bus", imx8mn_usb_bus_sels, base + 0x8b80);
+ hws[IMX8MN_CLK_GPU_AXI] = imx8m_clk_hw_composite("gpu_axi", imx8mn_gpu_axi_sels, base + 0x8c00);
+ hws[IMX8MN_CLK_GPU_AHB] = imx8m_clk_hw_composite("gpu_ahb", imx8mn_gpu_ahb_sels, base + 0x8c80);
+ hws[IMX8MN_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mn_noc_sels, base + 0x8d00);
+
+ hws[IMX8MN_CLK_AHB] = imx8m_clk_hw_composite_critical("ahb", imx8mn_ahb_sels, base + 0x9000);
+ hws[IMX8MN_CLK_AUDIO_AHB] = imx8m_clk_hw_composite("audio_ahb", imx8mn_audio_ahb_sels, base + 0x9100);
+ hws[IMX8MN_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb", base + 0x9080, 0, 1);
+ hws[IMX8MN_CLK_IPG_AUDIO_ROOT] = imx_clk_hw_divider2("ipg_audio_root", "audio_ahb", base + 0x9180, 0, 1);
+ hws[IMX8MN_CLK_DRAM_CORE] = imx_clk_hw_mux2_flags("dram_core_clk", base + 0x9800, 24, 1, imx8mn_dram_core_sels, ARRAY_SIZE(imx8mn_dram_core_sels), CLK_IS_CRITICAL);
+
+ /*
+ * DRAM clocks are manipulated from TF-A outside clock framework.
+ * Mark with GET_RATE_NOCACHE to always read div value from hardware
+ */
+ hws[IMX8MN_CLK_DRAM_ALT] = __imx8m_clk_hw_composite("dram_alt", imx8mn_dram_alt_sels, base + 0xa000, CLK_GET_RATE_NOCACHE);
+ hws[IMX8MN_CLK_DRAM_APB] = __imx8m_clk_hw_composite("dram_apb", imx8mn_dram_apb_sels, base + 0xa080, CLK_IS_CRITICAL | CLK_GET_RATE_NOCACHE);
+
+ hws[IMX8MN_CLK_DISP_PIXEL] = imx8m_clk_hw_composite("disp_pixel", imx8mn_disp_pixel_sels, base + 0xa500);
+ hws[IMX8MN_CLK_SAI2] = imx8m_clk_hw_composite("sai2", imx8mn_sai2_sels, base + 0xa600);
+ hws[IMX8MN_CLK_SAI3] = imx8m_clk_hw_composite("sai3", imx8mn_sai3_sels, base + 0xa680);
+ hws[IMX8MN_CLK_SAI5] = imx8m_clk_hw_composite("sai5", imx8mn_sai5_sels, base + 0xa780);
+ hws[IMX8MN_CLK_SAI6] = imx8m_clk_hw_composite("sai6", imx8mn_sai6_sels, base + 0xa800);
+ hws[IMX8MN_CLK_SPDIF1] = imx8m_clk_hw_composite("spdif1", imx8mn_spdif1_sels, base + 0xa880);
+ hws[IMX8MN_CLK_ENET_REF] = imx8m_clk_hw_composite("enet_ref", imx8mn_enet_ref_sels, base + 0xa980);
+ hws[IMX8MN_CLK_ENET_TIMER] = imx8m_clk_hw_composite("enet_timer", imx8mn_enet_timer_sels, base + 0xaa00);
+ hws[IMX8MN_CLK_ENET_PHY_REF] = imx8m_clk_hw_composite("enet_phy", imx8mn_enet_phy_sels, base + 0xaa80);
+ hws[IMX8MN_CLK_NAND] = imx8m_clk_hw_composite("nand", imx8mn_nand_sels, base + 0xab00);
+ hws[IMX8MN_CLK_QSPI] = imx8m_clk_hw_composite("qspi", imx8mn_qspi_sels, base + 0xab80);
+ hws[IMX8MN_CLK_USDHC1] = imx8m_clk_hw_composite("usdhc1", imx8mn_usdhc1_sels, base + 0xac00);
+ hws[IMX8MN_CLK_USDHC2] = imx8m_clk_hw_composite("usdhc2", imx8mn_usdhc2_sels, base + 0xac80);
+ hws[IMX8MN_CLK_I2C1] = imx8m_clk_hw_composite("i2c1", imx8mn_i2c1_sels, base + 0xad00);
+ hws[IMX8MN_CLK_I2C2] = imx8m_clk_hw_composite("i2c2", imx8mn_i2c2_sels, base + 0xad80);
+ hws[IMX8MN_CLK_I2C3] = imx8m_clk_hw_composite("i2c3", imx8mn_i2c3_sels, base + 0xae00);
+ hws[IMX8MN_CLK_I2C4] = imx8m_clk_hw_composite("i2c4", imx8mn_i2c4_sels, base + 0xae80);
+ hws[IMX8MN_CLK_UART1] = imx8m_clk_hw_composite("uart1", imx8mn_uart1_sels, base + 0xaf00);
+ hws[IMX8MN_CLK_UART2] = imx8m_clk_hw_composite("uart2", imx8mn_uart2_sels, base + 0xaf80);
+ hws[IMX8MN_CLK_UART3] = imx8m_clk_hw_composite("uart3", imx8mn_uart3_sels, base + 0xb000);
+ hws[IMX8MN_CLK_UART4] = imx8m_clk_hw_composite("uart4", imx8mn_uart4_sels, base + 0xb080);
+ hws[IMX8MN_CLK_USB_CORE_REF] = imx8m_clk_hw_composite("usb_core_ref", imx8mn_usb_core_sels, base + 0xb100);
+ hws[IMX8MN_CLK_USB_PHY_REF] = imx8m_clk_hw_composite("usb_phy_ref", imx8mn_usb_phy_sels, base + 0xb180);
+ hws[IMX8MN_CLK_GIC] = imx8m_clk_hw_composite_critical("gic", imx8mn_gic_sels, base + 0xb200);
+ hws[IMX8MN_CLK_ECSPI1] = imx8m_clk_hw_composite("ecspi1", imx8mn_ecspi1_sels, base + 0xb280);
+ hws[IMX8MN_CLK_ECSPI2] = imx8m_clk_hw_composite("ecspi2", imx8mn_ecspi2_sels, base + 0xb300);
+ hws[IMX8MN_CLK_PWM1] = imx8m_clk_hw_composite("pwm1", imx8mn_pwm1_sels, base + 0xb380);
+ hws[IMX8MN_CLK_PWM2] = imx8m_clk_hw_composite("pwm2", imx8mn_pwm2_sels, base + 0xb400);
+ hws[IMX8MN_CLK_PWM3] = imx8m_clk_hw_composite("pwm3", imx8mn_pwm3_sels, base + 0xb480);
+ hws[IMX8MN_CLK_PWM4] = imx8m_clk_hw_composite("pwm4", imx8mn_pwm4_sels, base + 0xb500);
+ hws[IMX8MN_CLK_WDOG] = imx8m_clk_hw_composite("wdog", imx8mn_wdog_sels, base + 0xb900);
+ hws[IMX8MN_CLK_WRCLK] = imx8m_clk_hw_composite("wrclk", imx8mn_wrclk_sels, base + 0xb980);
+ hws[IMX8MN_CLK_CLKO1] = imx8m_clk_hw_composite("clko1", imx8mn_clko1_sels, base + 0xba00);
+ hws[IMX8MN_CLK_CLKO2] = imx8m_clk_hw_composite("clko2", imx8mn_clko2_sels, base + 0xba80);
+ hws[IMX8MN_CLK_DSI_CORE] = imx8m_clk_hw_composite("dsi_core", imx8mn_dsi_core_sels, base + 0xbb00);
+ hws[IMX8MN_CLK_DSI_PHY_REF] = imx8m_clk_hw_composite("dsi_phy_ref", imx8mn_dsi_phy_sels, base + 0xbb80);
+ hws[IMX8MN_CLK_DSI_DBI] = imx8m_clk_hw_composite("dsi_dbi", imx8mn_dsi_dbi_sels, base + 0xbc00);
+ hws[IMX8MN_CLK_USDHC3] = imx8m_clk_hw_composite("usdhc3", imx8mn_usdhc3_sels, base + 0xbc80);
+ hws[IMX8MN_CLK_CAMERA_PIXEL] = imx8m_clk_hw_composite("camera_pixel", imx8mn_camera_pixel_sels, base + 0xbd00);
+ hws[IMX8MN_CLK_CSI1_PHY_REF] = imx8m_clk_hw_composite("csi1_phy_ref", imx8mn_csi1_phy_sels, base + 0xbd80);
+ hws[IMX8MN_CLK_CSI2_PHY_REF] = imx8m_clk_hw_composite("csi2_phy_ref", imx8mn_csi2_phy_sels, base + 0xbf00);
+ hws[IMX8MN_CLK_CSI2_ESC] = imx8m_clk_hw_composite("csi2_esc", imx8mn_csi2_esc_sels, base + 0xbf80);
+ hws[IMX8MN_CLK_ECSPI3] = imx8m_clk_hw_composite("ecspi3", imx8mn_ecspi3_sels, base + 0xc180);
+ hws[IMX8MN_CLK_PDM] = imx8m_clk_hw_composite("pdm", imx8mn_pdm_sels, base + 0xc200);
+ hws[IMX8MN_CLK_SAI7] = imx8m_clk_hw_composite("sai7", imx8mn_sai7_sels, base + 0xc300);
+
+ hws[IMX8MN_CLK_ECSPI1_ROOT] = imx_clk_hw_gate4("ecspi1_root_clk", "ecspi1", base + 0x4070, 0);
+ hws[IMX8MN_CLK_ECSPI2_ROOT] = imx_clk_hw_gate4("ecspi2_root_clk", "ecspi2", base + 0x4080, 0);
+ hws[IMX8MN_CLK_ECSPI3_ROOT] = imx_clk_hw_gate4("ecspi3_root_clk", "ecspi3", base + 0x4090, 0);
+ hws[IMX8MN_CLK_ENET1_ROOT] = imx_clk_hw_gate4("enet1_root_clk", "enet_axi", base + 0x40a0, 0);
+ hws[IMX8MN_CLK_GPIO1_ROOT] = imx_clk_hw_gate4("gpio1_root_clk", "ipg_root", base + 0x40b0, 0);
+ hws[IMX8MN_CLK_GPIO2_ROOT] = imx_clk_hw_gate4("gpio2_root_clk", "ipg_root", base + 0x40c0, 0);
+ hws[IMX8MN_CLK_GPIO3_ROOT] = imx_clk_hw_gate4("gpio3_root_clk", "ipg_root", base + 0x40d0, 0);
+ hws[IMX8MN_CLK_GPIO4_ROOT] = imx_clk_hw_gate4("gpio4_root_clk", "ipg_root", base + 0x40e0, 0);
+ hws[IMX8MN_CLK_GPIO5_ROOT] = imx_clk_hw_gate4("gpio5_root_clk", "ipg_root", base + 0x40f0, 0);
+ hws[IMX8MN_CLK_I2C1_ROOT] = imx_clk_hw_gate4("i2c1_root_clk", "i2c1", base + 0x4170, 0);
+ hws[IMX8MN_CLK_I2C2_ROOT] = imx_clk_hw_gate4("i2c2_root_clk", "i2c2", base + 0x4180, 0);
+ hws[IMX8MN_CLK_I2C3_ROOT] = imx_clk_hw_gate4("i2c3_root_clk", "i2c3", base + 0x4190, 0);
+ hws[IMX8MN_CLK_I2C4_ROOT] = imx_clk_hw_gate4("i2c4_root_clk", "i2c4", base + 0x41a0, 0);
+ hws[IMX8MN_CLK_MU_ROOT] = imx_clk_hw_gate4("mu_root_clk", "ipg_root", base + 0x4210, 0);
+ hws[IMX8MN_CLK_OCOTP_ROOT] = imx_clk_hw_gate4("ocotp_root_clk", "ipg_root", base + 0x4220, 0);
+ hws[IMX8MN_CLK_PWM1_ROOT] = imx_clk_hw_gate4("pwm1_root_clk", "pwm1", base + 0x4280, 0);
+ hws[IMX8MN_CLK_PWM2_ROOT] = imx_clk_hw_gate4("pwm2_root_clk", "pwm2", base + 0x4290, 0);
+ hws[IMX8MN_CLK_PWM3_ROOT] = imx_clk_hw_gate4("pwm3_root_clk", "pwm3", base + 0x42a0, 0);
+ hws[IMX8MN_CLK_PWM4_ROOT] = imx_clk_hw_gate4("pwm4_root_clk", "pwm4", base + 0x42b0, 0);
+ hws[IMX8MN_CLK_QSPI_ROOT] = imx_clk_hw_gate4("qspi_root_clk", "qspi", base + 0x42f0, 0);
+ hws[IMX8MN_CLK_NAND_ROOT] = imx_clk_hw_gate2_shared2("nand_root_clk", "nand", base + 0x4300, 0, &share_count_nand);
+ hws[IMX8MN_CLK_NAND_USDHC_BUS_RAWNAND_CLK] = imx_clk_hw_gate2_shared2("nand_usdhc_rawnand_clk", "nand_usdhc_bus", base + 0x4300, 0, &share_count_nand);
+ hws[IMX8MN_CLK_SAI2_ROOT] = imx_clk_hw_gate2_shared2("sai2_root_clk", "sai2", base + 0x4340, 0, &share_count_sai2);
+ hws[IMX8MN_CLK_SAI2_IPG] = imx_clk_hw_gate2_shared2("sai2_ipg_clk", "ipg_audio_root", base + 0x4340, 0, &share_count_sai2);
+ hws[IMX8MN_CLK_SAI3_ROOT] = imx_clk_hw_gate2_shared2("sai3_root_clk", "sai3", base + 0x4350, 0, &share_count_sai3);
+ hws[IMX8MN_CLK_SAI3_IPG] = imx_clk_hw_gate2_shared2("sai3_ipg_clk", "ipg_audio_root", base + 0x4350, 0, &share_count_sai3);
+ hws[IMX8MN_CLK_SAI5_ROOT] = imx_clk_hw_gate2_shared2("sai5_root_clk", "sai5", base + 0x4370, 0, &share_count_sai5);
+ hws[IMX8MN_CLK_SAI5_IPG] = imx_clk_hw_gate2_shared2("sai5_ipg_clk", "ipg_audio_root", base + 0x4370, 0, &share_count_sai5);
+ hws[IMX8MN_CLK_SAI6_ROOT] = imx_clk_hw_gate2_shared2("sai6_root_clk", "sai6", base + 0x4380, 0, &share_count_sai6);
+ hws[IMX8MN_CLK_SAI6_IPG] = imx_clk_hw_gate2_shared2("sai6_ipg_clk", "ipg_audio_root", base + 0x4380, 0, &share_count_sai6);
+ hws[IMX8MN_CLK_UART1_ROOT] = imx_clk_hw_gate4("uart1_root_clk", "uart1", base + 0x4490, 0);
+ hws[IMX8MN_CLK_UART2_ROOT] = imx_clk_hw_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0);
+ hws[IMX8MN_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0);
+ hws[IMX8MN_CLK_UART4_ROOT] = imx_clk_hw_gate4("uart4_root_clk", "uart4", base + 0x44c0, 0);
+ hws[IMX8MN_CLK_USB1_CTRL_ROOT] = imx_clk_hw_gate4("usb1_ctrl_root_clk", "usb_bus", base + 0x44d0, 0);
+ hws[IMX8MN_CLK_GPU_CORE_ROOT] = imx_clk_hw_gate4("gpu_core_root_clk", "gpu_core_div", base + 0x44f0, 0);
+ hws[IMX8MN_CLK_USDHC1_ROOT] = imx_clk_hw_gate4("usdhc1_root_clk", "usdhc1", base + 0x4510, 0);
+ hws[IMX8MN_CLK_USDHC2_ROOT] = imx_clk_hw_gate4("usdhc2_root_clk", "usdhc2", base + 0x4520, 0);
+ hws[IMX8MN_CLK_WDOG1_ROOT] = imx_clk_hw_gate4("wdog1_root_clk", "wdog", base + 0x4530, 0);
+ hws[IMX8MN_CLK_WDOG2_ROOT] = imx_clk_hw_gate4("wdog2_root_clk", "wdog", base + 0x4540, 0);
+ hws[IMX8MN_CLK_WDOG3_ROOT] = imx_clk_hw_gate4("wdog3_root_clk", "wdog", base + 0x4550, 0);
+ hws[IMX8MN_CLK_GPU_BUS_ROOT] = imx_clk_hw_gate4("gpu_root_clk", "gpu_axi", base + 0x4570, 0);
+ hws[IMX8MN_CLK_ASRC_ROOT] = imx_clk_hw_gate4("asrc_root_clk", "audio_ahb", base + 0x4580, 0);
+ hws[IMX8MN_CLK_PDM_ROOT] = imx_clk_hw_gate2_shared2("pdm_root_clk", "pdm", base + 0x45b0, 0, &share_count_pdm);
+ hws[IMX8MN_CLK_PDM_IPG] = imx_clk_hw_gate2_shared2("pdm_ipg_clk", "ipg_audio_root", base + 0x45b0, 0, &share_count_pdm);
+ hws[IMX8MN_CLK_DISP_AXI_ROOT] = imx_clk_hw_gate2_shared2("disp_axi_root_clk", "disp_axi", base + 0x45d0, 0, &share_count_disp);
+ hws[IMX8MN_CLK_DISP_APB_ROOT] = imx_clk_hw_gate2_shared2("disp_apb_root_clk", "disp_apb", base + 0x45d0, 0, &share_count_disp);
+ hws[IMX8MN_CLK_CAMERA_PIXEL_ROOT] = imx_clk_hw_gate2_shared2("camera_pixel_clk", "camera_pixel", base + 0x45d0, 0, &share_count_disp);
+ hws[IMX8MN_CLK_DISP_PIXEL_ROOT] = imx_clk_hw_gate2_shared2("disp_pixel_clk", "disp_pixel", base + 0x45d0, 0, &share_count_disp);
+ hws[IMX8MN_CLK_USDHC3_ROOT] = imx_clk_hw_gate4("usdhc3_root_clk", "usdhc3", base + 0x45e0, 0);
+ hws[IMX8MN_CLK_TMU_ROOT] = imx_clk_hw_gate4("tmu_root_clk", "ipg_root", base + 0x4620, 0);
+ hws[IMX8MN_CLK_SDMA1_ROOT] = imx_clk_hw_gate4("sdma1_clk", "ipg_root", base + 0x43a0, 0);
+ hws[IMX8MN_CLK_SDMA2_ROOT] = imx_clk_hw_gate4("sdma2_clk", "ipg_audio_root", base + 0x43b0, 0);
+ hws[IMX8MN_CLK_SDMA3_ROOT] = imx_clk_hw_gate4("sdma3_clk", "ipg_audio_root", base + 0x45f0, 0);
+ hws[IMX8MN_CLK_SAI7_ROOT] = imx_clk_hw_gate2_shared2("sai7_root_clk", "sai7", base + 0x4650, 0, &share_count_sai7);
+
+ hws[IMX8MN_CLK_DRAM_ALT_ROOT] = imx_clk_hw_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
+
+ hws[IMX8MN_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_div",
+ hws[IMX8MN_CLK_A53_DIV]->clk,
+ hws[IMX8MN_CLK_A53_SRC]->clk,
+ hws[IMX8MN_ARM_PLL_OUT]->clk,
+ hws[IMX8MN_SYS_PLL1_800M]->clk);
+
+ imx_check_clk_hws(hws, IMX8MN_CLK_END);
+
+ ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
if (ret < 0) {
- dev_err(dev, "failed to register clks for i.MX8MN\n");
- goto unregister_clks;
+ dev_err(dev, "failed to register hws for i.MX8MN\n");
+ goto unregister_hws;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
+ int index = uart_clk_ids[i];
+
+ uart_hws[i] = &hws[index]->clk;
}
- imx_register_uart_clocks(uart_clks);
+ imx_register_uart_clocks(uart_hws);
return 0;
-unregister_clks:
- imx_unregister_clocks(clks, ARRAY_SIZE(clks));
+unregister_hws:
+ imx_unregister_hw_clocks(hws, IMX8MN_CLK_END);
return ret;
}
@@ -572,6 +591,11 @@ static struct platform_driver imx8mn_clk_driver = {
.probe = imx8mn_clocks_probe,
.driver = {
.name = "imx8mn-ccm",
+ /*
+ * Disable bind attributes: clocks are not removed and
+ * reloading the driver will crash or break devices.
+ */
+ .suppress_bind_attrs = true,
.of_match_table = of_match_ptr(imx8mn_clk_of_match),
},
};
diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
new file mode 100644
index 000000000000..f6c120cca0d4
--- /dev/null
+++ b/drivers/clk/imx/clk-imx8mp.c
@@ -0,0 +1,764 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ */
+
+#include <dt-bindings/clock/imx8mp-clock.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include "clk.h"
+
+static u32 share_count_nand;
+static u32 share_count_media;
+
+static const char * const pll_ref_sels[] = { "osc_24m", "dummy", "dummy", "dummy", };
+static const char * const audio_pll1_bypass_sels[] = {"audio_pll1", "audio_pll1_ref_sel", };
+static const char * const audio_pll2_bypass_sels[] = {"audio_pll2", "audio_pll2_ref_sel", };
+static const char * const video_pll1_bypass_sels[] = {"video_pll1", "video_pll1_ref_sel", };
+static const char * const dram_pll_bypass_sels[] = {"dram_pll", "dram_pll_ref_sel", };
+static const char * const gpu_pll_bypass_sels[] = {"gpu_pll", "gpu_pll_ref_sel", };
+static const char * const vpu_pll_bypass_sels[] = {"vpu_pll", "vpu_pll_ref_sel", };
+static const char * const arm_pll_bypass_sels[] = {"arm_pll", "arm_pll_ref_sel", };
+static const char * const sys_pll1_bypass_sels[] = {"sys_pll1", "sys_pll1_ref_sel", };
+static const char * const sys_pll2_bypass_sels[] = {"sys_pll2", "sys_pll2_ref_sel", };
+static const char * const sys_pll3_bypass_sels[] = {"sys_pll3", "sys_pll3_ref_sel", };
+
+static const char * const imx8mp_a53_sels[] = {"osc_24m", "arm_pll_out", "sys_pll2_500m",
+ "sys_pll2_1000m", "sys_pll1_800m", "sys_pll1_400m",
+ "audio_pll1_out", "sys_pll3_out", };
+
+static const char * const imx8mp_m7_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_250m",
+ "vpu_pll_out", "sys_pll1_800m", "audio_pll1_out",
+ "video_pll1_out", "sys_pll3_out", };
+
+static const char * const imx8mp_ml_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m",
+ "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+ "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_gpu3d_core_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m",
+ "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+ "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_gpu3d_shader_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m",
+ "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+ "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_gpu2d_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m",
+ "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+ "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_audio_axi_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m",
+ "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+ "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_hsio_axi_sels[] = {"osc_24m", "sys_pll2_500m", "sys_pll1_800m",
+ "sys_pll2_100m", "sys_pll2_200m", "clk_ext2",
+ "clk_ext4", "audio_pll2_out", };
+
+static const char * const imx8mp_media_isp_sels[] = {"osc_24m", "sys_pll2_1000m", "sys_pll1_800m",
+ "sys_pll3_out", "sys_pll1_400m", "audio_pll2_out",
+ "clk_ext1", "sys_pll2_500m", };
+
+static const char * const imx8mp_main_axi_sels[] = {"osc_24m", "sys_pll2_333m", "sys_pll1_800m",
+ "sys_pll2_250m", "sys_pll2_1000m", "audio_pll1_out",
+ "video_pll1_out", "sys_pll1_100m",};
+
+static const char * const imx8mp_enet_axi_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll1_800m",
+ "sys_pll2_250m", "sys_pll2_200m", "audio_pll1_out",
+ "video_pll1_out", "sys_pll3_out", };
+
+static const char * const imx8mp_nand_usdhc_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll1_800m",
+ "sys_pll2_200m", "sys_pll1_133m", "sys_pll3_out",
+ "sys_pll2_250m", "audio_pll1_out", };
+
+static const char * const imx8mp_vpu_bus_sels[] = {"osc_24m", "sys_pll1_800m", "vpu_pll_out",
+ "audio_pll2_out", "sys_pll3_out", "sys_pll2_1000m",
+ "sys_pll2_200m", "sys_pll1_100m", };
+
+static const char * const imx8mp_media_axi_sels[] = {"osc_24m", "sys_pll2_1000m", "sys_pll1_800m",
+ "sys_pll3_out", "sys_pll1_40m", "audio_pll2_out",
+ "clk_ext1", "sys_pll2_500m", };
+
+static const char * const imx8mp_media_apb_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll1_800m",
+ "sys_pll3_out", "sys_pll1_40m", "audio_pll2_out",
+ "clk_ext1", "sys_pll1_133m", };
+
+static const char * const imx8mp_gpu_axi_sels[] = {"osc_24m", "sys_pll1_800m", "gpu_pll_out",
+ "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+ "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_gpu_ahb_sels[] = {"osc_24m", "sys_pll1_800m", "gpu_pll_out",
+ "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+ "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_noc_sels[] = {"osc_24m", "sys_pll1_800m", "sys_pll3_out",
+ "sys_pll2_1000m", "sys_pll2_500m", "audio_pll1_out",
+ "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_noc_io_sels[] = {"osc_24m", "sys_pll1_800m", "sys_pll3_out",
+ "sys_pll2_1000m", "sys_pll2_500m", "audio_pll1_out",
+ "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_ml_axi_sels[] = {"osc_24m", "sys_pll1_800m", "gpu_pll_out",
+ "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+ "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_ml_ahb_sels[] = {"osc_24m", "sys_pll1_800m", "gpu_pll_out",
+ "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+ "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_ahb_sels[] = {"osc_24m", "sys_pll1_133m", "sys_pll1_800m",
+ "sys_pll1_400m", "sys_pll2_125m", "sys_pll3_out",
+ "audio_pll1_out", "video_pll1_out", };
+
+static const char * const imx8mp_audio_ahb_sels[] = {"osc_24m", "sys_pll2_500m", "sys_pll1_800m",
+ "sys_pll2_1000m", "sys_pll2_166m", "sys_pll3_out",
+ "audio_pll1_out", "video_pll1_out", };
+
+static const char * const imx8mp_mipi_dsi_esc_rx_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_80m",
+ "sys_pll1_800m", "sys_pll2_1000m",
+ "sys_pll3_out", "clk_ext3", "audio_pll2_out", };
+
+static const char * const imx8mp_dram_alt_sels[] = {"osc_24m", "sys_pll1_800m", "sys_pll1_100m",
+ "sys_pll2_500m", "sys_pll2_1000m", "sys_pll3_out",
+ "audio_pll1_out", "sys_pll1_266m", };
+
+static const char * const imx8mp_dram_apb_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m",
+ "sys_pll1_160m", "sys_pll1_800m", "sys_pll3_out",
+ "sys_pll2_250m", "audio_pll2_out", };
+
+static const char * const imx8mp_vpu_g1_sels[] = {"osc_24m", "vpu_pll_out", "sys_pll1_800m",
+ "sys_pll2_1000m", "sys_pll1_100m", "sys_pll2_125m",
+ "sys_pll3_out", "audio_pll1_out", };
+
+static const char * const imx8mp_vpu_g2_sels[] = {"osc_24m", "vpu_pll_out", "sys_pll1_800m",
+ "sys_pll2_1000m", "sys_pll1_100m", "sys_pll2_125m",
+ "sys_pll3_out", "audio_pll1_out", };
+
+static const char * const imx8mp_can1_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m",
+ "sys_pll1_160m", "sys_pll1_800m", "sys_pll3_out",
+ "sys_pll2_250m", "audio_pll2_out", };
+
+static const char * const imx8mp_can2_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m",
+ "sys_pll1_160m", "sys_pll1_800m", "sys_pll3_out",
+ "sys_pll2_250m", "audio_pll2_out", };
+
+static const char * const imx8mp_memrepair_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+ "sys_pll3_out", "audio_pll1_out", "video_pll1_out",
+ "audio_pll2_out", "sys_pll1_133m", };
+
+static const char * const imx8mp_pcie_phy_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll2_500m",
+ "clk_ext1", "clk_ext2", "clk_ext3",
+ "clk_ext4", "sys_pll1_400m", };
+
+static const char * const imx8mp_pcie_aux_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_50m",
+ "sys_pll3_out", "sys_pll2_100m", "sys_pll1_80m",
+ "sys_pll1_160m", "sys_pll1_200m", };
+
+static const char * const imx8mp_i2c5_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+ "sys_pll3_out", "audio_pll1_out", "video_pll1_out",
+ "audio_pll2_out", "sys_pll1_133m", };
+
+static const char * const imx8mp_i2c6_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+ "sys_pll3_out", "audio_pll1_out", "video_pll1_out",
+ "audio_pll2_out", "sys_pll1_133m", };
+
+static const char * const imx8mp_sai1_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+ "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+ "clk_ext1", "clk_ext2", };
+
+static const char * const imx8mp_sai2_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+ "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+ "clk_ext2", "clk_ext3", };
+
+static const char * const imx8mp_sai3_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+ "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+ "clk_ext3", "clk_ext4", };
+
+static const char * const imx8mp_sai4_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+ "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+ "clk_ext1", "clk_ext2", };
+
+static const char * const imx8mp_sai5_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+ "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+ "clk_ext2", "clk_ext3", };
+
+static const char * const imx8mp_sai6_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+ "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+ "clk_ext3", "clk_ext4", };
+
+static const char * const imx8mp_enet_qos_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll2_50m",
+ "sys_pll2_100m", "sys_pll1_160m", "audio_pll1_out",
+ "video_pll1_out", "clk_ext4", };
+
+static const char * const imx8mp_enet_qos_timer_sels[] = {"osc_24m", "sys_pll2_100m", "audio_pll1_out",
+ "clk_ext1", "clk_ext2", "clk_ext3",
+ "clk_ext4", "video_pll1_out", };
+
+static const char * const imx8mp_enet_ref_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll2_50m",
+ "sys_pll2_100m", "sys_pll1_160m", "audio_pll1_out",
+ "video_pll1_out", "clk_ext4", };
+
+static const char * const imx8mp_enet_timer_sels[] = {"osc_24m", "sys_pll2_100m", "audio_pll1_out",
+ "clk_ext1", "clk_ext2", "clk_ext3",
+ "clk_ext4", "video_pll1_out", };
+
+static const char * const imx8mp_enet_phy_ref_sels[] = {"osc_24m", "sys_pll2_50m", "sys_pll2_125m",
+ "sys_pll2_200m", "sys_pll2_500m", "audio_pll1_out",
+ "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mp_nand_sels[] = {"osc_24m", "sys_pll2_500m", "audio_pll1_out",
+ "sys_pll1_400m", "audio_pll2_out", "sys_pll3_out",
+ "sys_pll2_250m", "video_pll1_out", };
+
+static const char * const imx8mp_qspi_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll2_333m",
+ "sys_pll2_500m", "audio_pll2_out", "sys_pll1_266m",
+ "sys_pll3_out", "sys_pll1_100m", };
+
+static const char * const imx8mp_usdhc1_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m",
+ "sys_pll2_500m", "sys_pll3_out", "sys_pll1_266m",
+ "audio_pll2_out", "sys_pll1_100m", };
+
+static const char * const imx8mp_usdhc2_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m",
+ "sys_pll2_500m", "sys_pll3_out", "sys_pll1_266m",
+ "audio_pll2_out", "sys_pll1_100m", };
+
+static const char * const imx8mp_i2c1_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+ "sys_pll3_out", "audio_pll1_out", "video_pll1_out",
+ "audio_pll2_out", "sys_pll1_133m", };
+
+static const char * const imx8mp_i2c2_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+ "sys_pll3_out", "audio_pll1_out", "video_pll1_out",
+ "audio_pll2_out", "sys_pll1_133m", };
+
+static const char * const imx8mp_i2c3_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+ "sys_pll3_out", "audio_pll1_out", "video_pll1_out",
+ "audio_pll2_out", "sys_pll1_133m", };
+
+static const char * const imx8mp_i2c4_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+ "sys_pll3_out", "audio_pll1_out", "video_pll1_out",
+ "audio_pll2_out", "sys_pll1_133m", };
+
+static const char * const imx8mp_uart1_sels[] = {"osc_24m", "sys_pll1_80m", "sys_pll2_200m",
+ "sys_pll2_100m", "sys_pll3_out", "clk_ext2",
+ "clk_ext4", "audio_pll2_out", };
+
+static const char * const imx8mp_uart2_sels[] = {"osc_24m", "sys_pll1_80m", "sys_pll2_200m",
+ "sys_pll2_100m", "sys_pll3_out", "clk_ext2",
+ "clk_ext3", "audio_pll2_out", };
+
+static const char * const imx8mp_uart3_sels[] = {"osc_24m", "sys_pll1_80m", "sys_pll2_200m",
+ "sys_pll2_100m", "sys_pll3_out", "clk_ext2",
+ "clk_ext4", "audio_pll2_out", };
+
+static const char * const imx8mp_uart4_sels[] = {"osc_24m", "sys_pll1_80m", "sys_pll2_200m",
+ "sys_pll2_100m", "sys_pll3_out", "clk_ext2",
+ "clk_ext3", "audio_pll2_out", };
+
+static const char * const imx8mp_usb_core_ref_sels[] = {"osc_24m", "sys_pll1_100m", "sys_pll1_40m",
+ "sys_pll2_100m", "sys_pll2_200m", "clk_ext2",
+ "clk_ext3", "audio_pll2_out", };
+
+static const char * const imx8mp_usb_phy_ref_sels[] = {"osc_24m", "sys_pll1_100m", "sys_pll1_40m",
+ "sys_pll2_100m", "sys_pll2_200m", "clk_ext2",
+ "clk_ext3", "audio_pll2_out", };
+
+static const char * const imx8mp_gic_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m",
+ "sys_pll2_100m", "sys_pll1_800m",
+ "sys_pll2_500m", "clk_ext4", "audio_pll2_out" };
+
+static const char * const imx8mp_ecspi1_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m",
+ "sys_pll1_160m", "sys_pll1_800m", "sys_pll3_out",
+ "sys_pll2_250m", "audio_pll2_out", };
+
+static const char * const imx8mp_ecspi2_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m",
+ "sys_pll1_160m", "sys_pll1_800m", "sys_pll3_out",
+ "sys_pll2_250m", "audio_pll2_out", };
+
+static const char * const imx8mp_pwm1_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m",
+ "sys_pll1_40m", "sys_pll3_out", "clk_ext1",
+ "sys_pll1_80m", "video_pll1_out", };
+
+static const char * const imx8mp_pwm2_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m",
+ "sys_pll1_40m", "sys_pll3_out", "clk_ext1",
+ "sys_pll1_80m", "video_pll1_out", };
+
+static const char * const imx8mp_pwm3_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m",
+ "sys_pll1_40m", "sys_pll3_out", "clk_ext2",
+ "sys_pll1_80m", "video_pll1_out", };
+
+static const char * const imx8mp_pwm4_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m",
+ "sys_pll1_40m", "sys_pll3_out", "clk_ext2",
+ "sys_pll1_80m", "video_pll1_out", };
+
+static const char * const imx8mp_gpt1_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+ "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
+ "audio_pll1_out", "clk_ext1" };
+
+static const char * const imx8mp_gpt2_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+ "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
+ "audio_pll1_out", "clk_ext2" };
+
+static const char * const imx8mp_gpt3_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+ "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
+ "audio_pll1_out", "clk_ext3" };
+
+static const char * const imx8mp_gpt4_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+ "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
+ "audio_pll1_out", "clk_ext1" };
+
+static const char * const imx8mp_gpt5_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+ "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
+ "audio_pll1_out", "clk_ext2" };
+
+static const char * const imx8mp_gpt6_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+ "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
+ "audio_pll1_out", "clk_ext3" };
+
+static const char * const imx8mp_wdog_sels[] = {"osc_24m", "sys_pll1_133m", "sys_pll1_160m",
+ "vpu_pll_out", "sys_pll2_125m", "sys_pll3_out",
+ "sys_pll1_80m", "sys_pll2_166m" };
+
+static const char * const imx8mp_wrclk_sels[] = {"osc_24m", "sys_pll1_40m", "vpu_pll_out",
+ "sys_pll3_out", "sys_pll2_200m", "sys_pll1_266m",
+ "sys_pll2_500m", "sys_pll1_100m" };
+
+static const char * const imx8mp_ipp_do_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "sys_pll1_133m",
+ "sys_pll1_200m", "audio_pll2_out", "sys_pll2_500m",
+ "vpu_pll_out", "sys_pll1_80m" };
+
+static const char * const imx8mp_ipp_do_clko2_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_400m",
+ "sys_pll1_166m", "sys_pll3_out", "audio_pll1_out",
+ "video_pll1_out", "osc_32k" };
+
+static const char * const imx8mp_hdmi_fdcc_tst_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_250m",
+ "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out",
+ "audio_pll2_out", "video_pll1_out", };
+
+static const char * const imx8mp_hdmi_27m_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+ "sys_pll3_out", "audio_pll1_out", "video_pll1_out",
+ "audio_pll2_out", "sys_pll1_133m", };
+
+static const char * const imx8mp_hdmi_ref_266m_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll3_out",
+ "sys_pll2_333m", "sys_pll1_266m", "sys_pll2_200m",
+ "audio_pll1_out", "video_pll1_out", };
+
+static const char * const imx8mp_usdhc3_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m",
+ "sys_pll2_500m", "sys_pll3_out", "sys_pll1_266m",
+ "audio_pll2_out", "sys_pll1_100m", };
+
+static const char * const imx8mp_media_cam1_pix_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_250m",
+ "sys_pll1_800m", "sys_pll2_1000m",
+ "sys_pll3_out", "audio_pll2_out",
+ "video_pll1_out", };
+
+static const char * const imx8mp_media_mipi_phy1_ref_sels[] = {"osc_24m", "sys_pll2_333m", "sys_pll2_100m",
+ "sys_pll1_800m", "sys_pll2_1000m",
+ "clk_ext2", "audio_pll2_out",
+ "video_pll1_out", };
+
+static const char * const imx8mp_media_disp1_pix_sels[] = {"osc_24m", "video_pll1_out", "audio_pll2_out",
+ "audio_pll1_out", "sys_pll1_800m",
+ "sys_pll2_1000m", "sys_pll3_out", "clk_ext4", };
+
+static const char * const imx8mp_media_cam2_pix_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_250m",
+ "sys_pll1_800m", "sys_pll2_1000m",
+ "sys_pll3_out", "audio_pll2_out",
+ "video_pll1_out", };
+
+static const char * const imx8mp_media_mipi_phy2_ref_sels[] = {"osc_24m", "sys_pll2_333m", "sys_pll2_100m",
+ "sys_pll1_800m", "sys_pll2_1000m",
+ "clk_ext2", "audio_pll2_out",
+ "video_pll1_out", };
+
+static const char * const imx8mp_media_mipi_csi2_esc_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_80m",
+ "sys_pll1_800m", "sys_pll2_1000m",
+ "sys_pll3_out", "clk_ext3",
+ "audio_pll2_out", };
+
+static const char * const imx8mp_pcie2_ctrl_sels[] = {"osc_24m", "sys_pll2_250m", "sys_pll2_200m",
+ "sys_pll1_266m", "sys_pll1_800m", "sys_pll2_500m",
+ "sys_pll2_333m", "sys_pll3_out", };
+
+static const char * const imx8mp_pcie2_phy_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll2_500m",
+ "clk_ext1", "clk_ext2", "clk_ext3",
+ "clk_ext4", "sys_pll1_400m", };
+
+static const char * const imx8mp_media_mipi_test_byte_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_50m",
+ "sys_pll3_out", "sys_pll2_100m",
+ "sys_pll1_80m", "sys_pll1_160m",
+ "sys_pll1_200m", };
+
+static const char * const imx8mp_ecspi3_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m",
+ "sys_pll1_160m", "sys_pll1_800m", "sys_pll3_out",
+ "sys_pll2_250m", "audio_pll2_out", };
+
+static const char * const imx8mp_pdm_sels[] = {"osc_24m", "sys_pll2_100m", "audio_pll1_out",
+ "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out",
+ "clk_ext3", "audio_pll2_out", };
+
+static const char * const imx8mp_vpu_vc8000e_sels[] = {"osc_24m", "vpu_pll_out", "sys_pll1_800m",
+ "sys_pll2_1000m", "audio_pll2_out", "sys_pll2_125m",
+ "sys_pll3_out", "audio_pll1_out", };
+
+static const char * const imx8mp_sai7_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+ "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+ "clk_ext3", "clk_ext4", };
+
+static const char * const imx8mp_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", };
+
+static struct clk_hw **hws;
+static struct clk_hw_onecell_data *clk_hw_data;
+
+static const int uart_clk_ids[] = {
+ IMX8MP_CLK_UART1_ROOT,
+ IMX8MP_CLK_UART2_ROOT,
+ IMX8MP_CLK_UART3_ROOT,
+ IMX8MP_CLK_UART4_ROOT,
+};
+static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1];
+
+static int imx8mp_clocks_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ void __iomem *anatop_base, *ccm_base;
+ int i;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx8mp-anatop");
+ anatop_base = of_iomap(np, 0);
+ if (WARN_ON(!anatop_base))
+ return -ENOMEM;
+
+ np = dev->of_node;
+ ccm_base = devm_platform_ioremap_resource(pdev, 0);
+ if (WARN_ON(IS_ERR(ccm_base))) {
+ iounmap(anatop_base);
+ return PTR_ERR(ccm_base);
+ }
+
+ clk_hw_data = kzalloc(struct_size(clk_hw_data, hws, IMX8MP_CLK_END), GFP_KERNEL);
+ if (WARN_ON(!clk_hw_data)) {
+ iounmap(anatop_base);
+ return -ENOMEM;
+ }
+
+ clk_hw_data->num = IMX8MP_CLK_END;
+ hws = clk_hw_data->hws;
+
+ hws[IMX8MP_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0);
+ hws[IMX8MP_CLK_24M] = imx_obtain_fixed_clk_hw(np, "osc_24m");
+ hws[IMX8MP_CLK_32K] = imx_obtain_fixed_clk_hw(np, "osc_32k");
+ hws[IMX8MP_CLK_EXT1] = imx_obtain_fixed_clk_hw(np, "clk_ext1");
+ hws[IMX8MP_CLK_EXT2] = imx_obtain_fixed_clk_hw(np, "clk_ext2");
+ hws[IMX8MP_CLK_EXT3] = imx_obtain_fixed_clk_hw(np, "clk_ext3");
+ hws[IMX8MP_CLK_EXT4] = imx_obtain_fixed_clk_hw(np, "clk_ext4");
+
+ hws[IMX8MP_AUDIO_PLL1_REF_SEL] = imx_clk_hw_mux("audio_pll1_ref_sel", anatop_base + 0x0, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MP_AUDIO_PLL2_REF_SEL] = imx_clk_hw_mux("audio_pll2_ref_sel", anatop_base + 0x14, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MP_VIDEO_PLL1_REF_SEL] = imx_clk_hw_mux("video_pll1_ref_sel", anatop_base + 0x28, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MP_DRAM_PLL_REF_SEL] = imx_clk_hw_mux("dram_pll_ref_sel", anatop_base + 0x50, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MP_GPU_PLL_REF_SEL] = imx_clk_hw_mux("gpu_pll_ref_sel", anatop_base + 0x64, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MP_VPU_PLL_REF_SEL] = imx_clk_hw_mux("vpu_pll_ref_sel", anatop_base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MP_ARM_PLL_REF_SEL] = imx_clk_hw_mux("arm_pll_ref_sel", anatop_base + 0x84, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MP_SYS_PLL1_REF_SEL] = imx_clk_hw_mux("sys_pll1_ref_sel", anatop_base + 0x94, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MP_SYS_PLL2_REF_SEL] = imx_clk_hw_mux("sys_pll2_ref_sel", anatop_base + 0x104, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MP_SYS_PLL3_REF_SEL] = imx_clk_hw_mux("sys_pll3_ref_sel", anatop_base + 0x114, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+
+ hws[IMX8MP_AUDIO_PLL1] = imx_clk_hw_pll14xx("audio_pll1", "audio_pll1_ref_sel", anatop_base, &imx_1443x_pll);
+ hws[IMX8MP_AUDIO_PLL2] = imx_clk_hw_pll14xx("audio_pll2", "audio_pll2_ref_sel", anatop_base + 0x14, &imx_1443x_pll);
+ hws[IMX8MP_VIDEO_PLL1] = imx_clk_hw_pll14xx("video_pll1", "video_pll1_ref_sel", anatop_base + 0x28, &imx_1443x_pll);
+ hws[IMX8MP_DRAM_PLL] = imx_clk_hw_pll14xx("dram_pll", "dram_pll_ref_sel", anatop_base + 0x50, &imx_1443x_dram_pll);
+ hws[IMX8MP_GPU_PLL] = imx_clk_hw_pll14xx("gpu_pll", "gpu_pll_ref_sel", anatop_base + 0x64, &imx_1416x_pll);
+ hws[IMX8MP_VPU_PLL] = imx_clk_hw_pll14xx("vpu_pll", "vpu_pll_ref_sel", anatop_base + 0x74, &imx_1416x_pll);
+ hws[IMX8MP_ARM_PLL] = imx_clk_hw_pll14xx("arm_pll", "arm_pll_ref_sel", anatop_base + 0x84, &imx_1416x_pll);
+ hws[IMX8MP_SYS_PLL1] = imx_clk_hw_pll14xx("sys_pll1", "sys_pll1_ref_sel", anatop_base + 0x94, &imx_1416x_pll);
+ hws[IMX8MP_SYS_PLL2] = imx_clk_hw_pll14xx("sys_pll2", "sys_pll2_ref_sel", anatop_base + 0x104, &imx_1416x_pll);
+ hws[IMX8MP_SYS_PLL3] = imx_clk_hw_pll14xx("sys_pll3", "sys_pll3_ref_sel", anatop_base + 0x114, &imx_1416x_pll);
+
+ hws[IMX8MP_AUDIO_PLL1_BYPASS] = imx_clk_hw_mux_flags("audio_pll1_bypass", anatop_base, 4, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MP_AUDIO_PLL2_BYPASS] = imx_clk_hw_mux_flags("audio_pll2_bypass", anatop_base + 0x14, 4, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MP_VIDEO_PLL1_BYPASS] = imx_clk_hw_mux_flags("video_pll1_bypass", anatop_base + 0x28, 4, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MP_DRAM_PLL_BYPASS] = imx_clk_hw_mux_flags("dram_pll_bypass", anatop_base + 0x50, 4, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MP_GPU_PLL_BYPASS] = imx_clk_hw_mux_flags("gpu_pll_bypass", anatop_base + 0x64, 4, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MP_VPU_PLL_BYPASS] = imx_clk_hw_mux_flags("vpu_pll_bypass", anatop_base + 0x74, 4, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MP_ARM_PLL_BYPASS] = imx_clk_hw_mux_flags("arm_pll_bypass", anatop_base + 0x84, 4, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MP_SYS_PLL1_BYPASS] = imx_clk_hw_mux_flags("sys_pll1_bypass", anatop_base + 0x94, 4, 1, sys_pll1_bypass_sels, ARRAY_SIZE(sys_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MP_SYS_PLL2_BYPASS] = imx_clk_hw_mux_flags("sys_pll2_bypass", anatop_base + 0x104, 4, 1, sys_pll2_bypass_sels, ARRAY_SIZE(sys_pll2_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MP_SYS_PLL3_BYPASS] = imx_clk_hw_mux_flags("sys_pll3_bypass", anatop_base + 0x114, 4, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT);
+
+ hws[IMX8MP_AUDIO_PLL1_OUT] = imx_clk_hw_gate("audio_pll1_out", "audio_pll1_bypass", anatop_base, 13);
+ hws[IMX8MP_AUDIO_PLL2_OUT] = imx_clk_hw_gate("audio_pll2_out", "audio_pll2_bypass", anatop_base + 0x14, 13);
+ hws[IMX8MP_VIDEO_PLL1_OUT] = imx_clk_hw_gate("video_pll1_out", "video_pll1_bypass", anatop_base + 0x28, 13);
+ hws[IMX8MP_DRAM_PLL_OUT] = imx_clk_hw_gate("dram_pll_out", "dram_pll_bypass", anatop_base + 0x50, 13);
+ hws[IMX8MP_GPU_PLL_OUT] = imx_clk_hw_gate("gpu_pll_out", "gpu_pll_bypass", anatop_base + 0x64, 11);
+ hws[IMX8MP_VPU_PLL_OUT] = imx_clk_hw_gate("vpu_pll_out", "vpu_pll_bypass", anatop_base + 0x74, 11);
+ hws[IMX8MP_ARM_PLL_OUT] = imx_clk_hw_gate("arm_pll_out", "arm_pll_bypass", anatop_base + 0x84, 11);
+ hws[IMX8MP_SYS_PLL1_OUT] = imx_clk_hw_gate("sys_pll1_out", "sys_pll1_bypass", anatop_base + 0x94, 11);
+ hws[IMX8MP_SYS_PLL2_OUT] = imx_clk_hw_gate("sys_pll2_out", "sys_pll2_bypass", anatop_base + 0x104, 11);
+ hws[IMX8MP_SYS_PLL3_OUT] = imx_clk_hw_gate("sys_pll3_out", "sys_pll3_bypass", anatop_base + 0x114, 11);
+
+ hws[IMX8MP_SYS_PLL1_40M] = imx_clk_hw_fixed_factor("sys_pll1_40m", "sys_pll1_out", 1, 20);
+ hws[IMX8MP_SYS_PLL1_80M] = imx_clk_hw_fixed_factor("sys_pll1_80m", "sys_pll1_out", 1, 10);
+ hws[IMX8MP_SYS_PLL1_100M] = imx_clk_hw_fixed_factor("sys_pll1_100m", "sys_pll1_out", 1, 8);
+ hws[IMX8MP_SYS_PLL1_133M] = imx_clk_hw_fixed_factor("sys_pll1_133m", "sys_pll1_out", 1, 6);
+ hws[IMX8MP_SYS_PLL1_160M] = imx_clk_hw_fixed_factor("sys_pll1_160m", "sys_pll1_out", 1, 5);
+ hws[IMX8MP_SYS_PLL1_200M] = imx_clk_hw_fixed_factor("sys_pll1_200m", "sys_pll1_out", 1, 4);
+ hws[IMX8MP_SYS_PLL1_266M] = imx_clk_hw_fixed_factor("sys_pll1_266m", "sys_pll1_out", 1, 3);
+ hws[IMX8MP_SYS_PLL1_400M] = imx_clk_hw_fixed_factor("sys_pll1_400m", "sys_pll1_out", 1, 2);
+ hws[IMX8MP_SYS_PLL1_800M] = imx_clk_hw_fixed_factor("sys_pll1_800m", "sys_pll1_out", 1, 1);
+
+ hws[IMX8MP_SYS_PLL2_50M] = imx_clk_hw_fixed_factor("sys_pll2_50m", "sys_pll2_out", 1, 20);
+ hws[IMX8MP_SYS_PLL2_100M] = imx_clk_hw_fixed_factor("sys_pll2_100m", "sys_pll2_out", 1, 10);
+ hws[IMX8MP_SYS_PLL2_125M] = imx_clk_hw_fixed_factor("sys_pll2_125m", "sys_pll2_out", 1, 8);
+ hws[IMX8MP_SYS_PLL2_166M] = imx_clk_hw_fixed_factor("sys_pll2_166m", "sys_pll2_out", 1, 6);
+ hws[IMX8MP_SYS_PLL2_200M] = imx_clk_hw_fixed_factor("sys_pll2_200m", "sys_pll2_out", 1, 5);
+ hws[IMX8MP_SYS_PLL2_250M] = imx_clk_hw_fixed_factor("sys_pll2_250m", "sys_pll2_out", 1, 4);
+ hws[IMX8MP_SYS_PLL2_333M] = imx_clk_hw_fixed_factor("sys_pll2_333m", "sys_pll2_out", 1, 3);
+ hws[IMX8MP_SYS_PLL2_500M] = imx_clk_hw_fixed_factor("sys_pll2_500m", "sys_pll2_out", 1, 2);
+ hws[IMX8MP_SYS_PLL2_1000M] = imx_clk_hw_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
+
+ hws[IMX8MP_CLK_A53_SRC] = imx_clk_hw_mux2("arm_a53_src", ccm_base + 0x8000, 24, 3, imx8mp_a53_sels, ARRAY_SIZE(imx8mp_a53_sels));
+ hws[IMX8MP_CLK_M7_SRC] = imx_clk_hw_mux2("arm_m7_src", ccm_base + 0x8080, 24, 3, imx8mp_m7_sels, ARRAY_SIZE(imx8mp_m7_sels));
+ hws[IMX8MP_CLK_ML_SRC] = imx_clk_hw_mux2("ml_src", ccm_base + 0x8100, 24, 3, imx8mp_ml_sels, ARRAY_SIZE(imx8mp_ml_sels));
+ hws[IMX8MP_CLK_GPU3D_CORE_SRC] = imx_clk_hw_mux2("gpu3d_core_src", ccm_base + 0x8180, 24, 3, imx8mp_gpu3d_core_sels, ARRAY_SIZE(imx8mp_gpu3d_core_sels));
+ hws[IMX8MP_CLK_GPU3D_SHADER_SRC] = imx_clk_hw_mux2("gpu3d_shader_src", ccm_base + 0x8200, 24, 3, imx8mp_gpu3d_shader_sels, ARRAY_SIZE(imx8mp_gpu3d_shader_sels));
+ hws[IMX8MP_CLK_GPU2D_SRC] = imx_clk_hw_mux2("gpu2d_src", ccm_base + 0x8280, 24, 3, imx8mp_gpu2d_sels, ARRAY_SIZE(imx8mp_gpu2d_sels));
+ hws[IMX8MP_CLK_AUDIO_AXI_SRC] = imx_clk_hw_mux2("audio_axi_src", ccm_base + 0x8300, 24, 3, imx8mp_audio_axi_sels, ARRAY_SIZE(imx8mp_audio_axi_sels));
+ hws[IMX8MP_CLK_HSIO_AXI_SRC] = imx_clk_hw_mux2("hsio_axi_src", ccm_base + 0x8380, 24, 3, imx8mp_hsio_axi_sels, ARRAY_SIZE(imx8mp_hsio_axi_sels));
+ hws[IMX8MP_CLK_MEDIA_ISP_SRC] = imx_clk_hw_mux2("media_isp_src", ccm_base + 0x8400, 24, 3, imx8mp_media_isp_sels, ARRAY_SIZE(imx8mp_media_isp_sels));
+ hws[IMX8MP_CLK_A53_CG] = imx_clk_hw_gate3("arm_a53_cg", "arm_a53_src", ccm_base + 0x8000, 28);
+ hws[IMX8MP_CLK_M4_CG] = imx_clk_hw_gate3("arm_m7_cg", "arm_m7_src", ccm_base + 0x8080, 28);
+ hws[IMX8MP_CLK_ML_CG] = imx_clk_hw_gate3("ml_cg", "ml_src", ccm_base + 0x8100, 28);
+ hws[IMX8MP_CLK_GPU3D_CORE_CG] = imx_clk_hw_gate3("gpu3d_core_cg", "gpu3d_core_src", ccm_base + 0x8180, 28);
+ hws[IMX8MP_CLK_GPU3D_SHADER_CG] = imx_clk_hw_gate3("gpu3d_shader_cg", "gpu3d_shader_src", ccm_base + 0x8200, 28);
+ hws[IMX8MP_CLK_GPU2D_CG] = imx_clk_hw_gate3("gpu2d_cg", "gpu2d_src", ccm_base + 0x8280, 28);
+ hws[IMX8MP_CLK_AUDIO_AXI_CG] = imx_clk_hw_gate3("audio_axi_cg", "audio_axi_src", ccm_base + 0x8300, 28);
+ hws[IMX8MP_CLK_HSIO_AXI_CG] = imx_clk_hw_gate3("hsio_axi_cg", "hsio_axi_src", ccm_base + 0x8380, 28);
+ hws[IMX8MP_CLK_MEDIA_ISP_CG] = imx_clk_hw_gate3("media_isp_cg", "media_isp_src", ccm_base + 0x8400, 28);
+ hws[IMX8MP_CLK_A53_DIV] = imx_clk_hw_divider2("arm_a53_div", "arm_a53_cg", ccm_base + 0x8000, 0, 3);
+ hws[IMX8MP_CLK_M7_DIV] = imx_clk_hw_divider2("arm_m7_div", "arm_m7_cg", ccm_base + 0x8080, 0, 3);
+ hws[IMX8MP_CLK_ML_DIV] = imx_clk_hw_divider2("ml_div", "ml_cg", ccm_base + 0x8100, 0, 3);
+ hws[IMX8MP_CLK_GPU3D_CORE_DIV] = imx_clk_hw_divider2("gpu3d_core_div", "gpu3d_core_cg", ccm_base + 0x8180, 0, 3);
+ hws[IMX8MP_CLK_GPU3D_SHADER_DIV] = imx_clk_hw_divider2("gpu3d_shader_div", "gpu3d_shader_cg", ccm_base + 0x8200, 0, 3);
+ hws[IMX8MP_CLK_GPU2D_DIV] = imx_clk_hw_divider2("gpu2d_div", "gpu2d_cg", ccm_base + 0x8280, 0, 3);
+ hws[IMX8MP_CLK_AUDIO_AXI_DIV] = imx_clk_hw_divider2("audio_axi_div", "audio_axi_cg", ccm_base + 0x8300, 0, 3);
+ hws[IMX8MP_CLK_HSIO_AXI_DIV] = imx_clk_hw_divider2("hsio_axi_div", "hsio_axi_cg", ccm_base + 0x8380, 0, 3);
+ hws[IMX8MP_CLK_MEDIA_ISP_DIV] = imx_clk_hw_divider2("media_isp_div", "media_isp_cg", ccm_base + 0x8400, 0, 3);
+
+ hws[IMX8MP_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mp_main_axi_sels, ccm_base + 0x8800);
+ hws[IMX8MP_CLK_ENET_AXI] = imx8m_clk_hw_composite("enet_axi", imx8mp_enet_axi_sels, ccm_base + 0x8880);
+ hws[IMX8MP_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_critical("nand_usdhc_bus", imx8mp_nand_usdhc_sels, ccm_base + 0x8900);
+ hws[IMX8MP_CLK_VPU_BUS] = imx8m_clk_hw_composite("vpu_bus", imx8mp_vpu_bus_sels, ccm_base + 0x8980);
+ hws[IMX8MP_CLK_MEDIA_AXI] = imx8m_clk_hw_composite("media_axi", imx8mp_media_axi_sels, ccm_base + 0x8a00);
+ hws[IMX8MP_CLK_MEDIA_APB] = imx8m_clk_hw_composite("media_apb", imx8mp_media_apb_sels, ccm_base + 0x8a80);
+ hws[IMX8MP_CLK_HDMI_APB] = imx8m_clk_hw_composite("hdmi_apb", imx8mp_media_apb_sels, ccm_base + 0x8b00);
+ hws[IMX8MP_CLK_HDMI_AXI] = imx8m_clk_hw_composite("hdmi_axi", imx8mp_media_apb_sels, ccm_base + 0x8b80);
+ hws[IMX8MP_CLK_GPU_AXI] = imx8m_clk_hw_composite("gpu_axi", imx8mp_gpu_axi_sels, ccm_base + 0x8c00);
+ hws[IMX8MP_CLK_GPU_AHB] = imx8m_clk_hw_composite("gpu_ahb", imx8mp_gpu_ahb_sels, ccm_base + 0x8c80);
+ hws[IMX8MP_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mp_noc_sels, ccm_base + 0x8d00);
+ hws[IMX8MP_CLK_NOC_IO] = imx8m_clk_hw_composite_critical("noc_io", imx8mp_noc_io_sels, ccm_base + 0x8d80);
+ hws[IMX8MP_CLK_ML_AXI] = imx8m_clk_hw_composite("ml_axi", imx8mp_ml_axi_sels, ccm_base + 0x8e00);
+ hws[IMX8MP_CLK_ML_AHB] = imx8m_clk_hw_composite("ml_ahb", imx8mp_ml_ahb_sels, ccm_base + 0x8e80);
+
+ hws[IMX8MP_CLK_AHB] = imx8m_clk_hw_composite_critical("ahb_root", imx8mp_ahb_sels, ccm_base + 0x9000);
+ hws[IMX8MP_CLK_AUDIO_AHB] = imx8m_clk_hw_composite("audio_ahb", imx8mp_audio_ahb_sels, ccm_base + 0x9100);
+ hws[IMX8MP_CLK_MIPI_DSI_ESC_RX] = imx8m_clk_hw_composite("mipi_dsi_esc_rx", imx8mp_mipi_dsi_esc_rx_sels, ccm_base + 0x9200);
+
+ hws[IMX8MP_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb_root", ccm_base + 0x9080, 0, 1);
+ hws[IMX8MP_CLK_IPG_AUDIO_ROOT] = imx_clk_hw_divider2("ipg_audio_root", "audio_ahb", ccm_base + 0x9180, 0, 1);
+
+ hws[IMX8MP_CLK_DRAM_ALT] = imx8m_clk_hw_composite("dram_alt", imx8mp_dram_alt_sels, ccm_base + 0xa000);
+ hws[IMX8MP_CLK_DRAM_APB] = imx8m_clk_hw_composite_critical("dram_apb", imx8mp_dram_apb_sels, ccm_base + 0xa080);
+ hws[IMX8MP_CLK_VPU_G1] = imx8m_clk_hw_composite("vpu_g1", imx8mp_vpu_g1_sels, ccm_base + 0xa100);
+ hws[IMX8MP_CLK_VPU_G2] = imx8m_clk_hw_composite("vpu_g2", imx8mp_vpu_g2_sels, ccm_base + 0xa180);
+ hws[IMX8MP_CLK_CAN1] = imx8m_clk_hw_composite("can1", imx8mp_can1_sels, ccm_base + 0xa200);
+ hws[IMX8MP_CLK_CAN2] = imx8m_clk_hw_composite("can2", imx8mp_can2_sels, ccm_base + 0xa280);
+ hws[IMX8MP_CLK_MEMREPAIR] = imx8m_clk_hw_composite("memrepair", imx8mp_memrepair_sels, ccm_base + 0xa300);
+ hws[IMX8MP_CLK_PCIE_PHY] = imx8m_clk_hw_composite("pcie_phy", imx8mp_pcie_phy_sels, ccm_base + 0xa380);
+ hws[IMX8MP_CLK_PCIE_AUX] = imx8m_clk_hw_composite("pcie_aux", imx8mp_pcie_aux_sels, ccm_base + 0xa400);
+ hws[IMX8MP_CLK_I2C5] = imx8m_clk_hw_composite("i2c5", imx8mp_i2c5_sels, ccm_base + 0xa480);
+ hws[IMX8MP_CLK_I2C6] = imx8m_clk_hw_composite("i2c6", imx8mp_i2c6_sels, ccm_base + 0xa500);
+ hws[IMX8MP_CLK_SAI1] = imx8m_clk_hw_composite("sai1", imx8mp_sai1_sels, ccm_base + 0xa580);
+ hws[IMX8MP_CLK_SAI2] = imx8m_clk_hw_composite("sai2", imx8mp_sai2_sels, ccm_base + 0xa600);
+ hws[IMX8MP_CLK_SAI3] = imx8m_clk_hw_composite("sai3", imx8mp_sai3_sels, ccm_base + 0xa680);
+ hws[IMX8MP_CLK_SAI4] = imx8m_clk_hw_composite("sai4", imx8mp_sai4_sels, ccm_base + 0xa700);
+ hws[IMX8MP_CLK_SAI5] = imx8m_clk_hw_composite("sai5", imx8mp_sai5_sels, ccm_base + 0xa780);
+ hws[IMX8MP_CLK_SAI6] = imx8m_clk_hw_composite("sai6", imx8mp_sai6_sels, ccm_base + 0xa800);
+ hws[IMX8MP_CLK_ENET_QOS] = imx8m_clk_hw_composite("enet_qos", imx8mp_enet_qos_sels, ccm_base + 0xa880);
+ hws[IMX8MP_CLK_ENET_QOS_TIMER] = imx8m_clk_hw_composite("enet_qos_timer", imx8mp_enet_qos_timer_sels, ccm_base + 0xa900);
+ hws[IMX8MP_CLK_ENET_REF] = imx8m_clk_hw_composite("enet_ref", imx8mp_enet_ref_sels, ccm_base + 0xa980);
+ hws[IMX8MP_CLK_ENET_TIMER] = imx8m_clk_hw_composite("enet_timer", imx8mp_enet_timer_sels, ccm_base + 0xaa00);
+ hws[IMX8MP_CLK_ENET_PHY_REF] = imx8m_clk_hw_composite("enet_phy_ref", imx8mp_enet_phy_ref_sels, ccm_base + 0xaa80);
+ hws[IMX8MP_CLK_NAND] = imx8m_clk_hw_composite("nand", imx8mp_nand_sels, ccm_base + 0xab00);
+ hws[IMX8MP_CLK_QSPI] = imx8m_clk_hw_composite("qspi", imx8mp_qspi_sels, ccm_base + 0xab80);
+ hws[IMX8MP_CLK_USDHC1] = imx8m_clk_hw_composite("usdhc1", imx8mp_usdhc1_sels, ccm_base + 0xac00);
+ hws[IMX8MP_CLK_USDHC2] = imx8m_clk_hw_composite("usdhc2", imx8mp_usdhc2_sels, ccm_base + 0xac80);
+ hws[IMX8MP_CLK_I2C1] = imx8m_clk_hw_composite("i2c1", imx8mp_i2c1_sels, ccm_base + 0xad00);
+ hws[IMX8MP_CLK_I2C2] = imx8m_clk_hw_composite("i2c2", imx8mp_i2c2_sels, ccm_base + 0xad80);
+ hws[IMX8MP_CLK_I2C3] = imx8m_clk_hw_composite("i2c3", imx8mp_i2c3_sels, ccm_base + 0xae00);
+ hws[IMX8MP_CLK_I2C4] = imx8m_clk_hw_composite("i2c4", imx8mp_i2c4_sels, ccm_base + 0xae80);
+
+ hws[IMX8MP_CLK_UART1] = imx8m_clk_hw_composite("uart1", imx8mp_uart1_sels, ccm_base + 0xaf00);
+ hws[IMX8MP_CLK_UART2] = imx8m_clk_hw_composite("uart2", imx8mp_uart2_sels, ccm_base + 0xaf80);
+ hws[IMX8MP_CLK_UART3] = imx8m_clk_hw_composite("uart3", imx8mp_uart3_sels, ccm_base + 0xb000);
+ hws[IMX8MP_CLK_UART4] = imx8m_clk_hw_composite("uart4", imx8mp_uart4_sels, ccm_base + 0xb080);
+ hws[IMX8MP_CLK_USB_CORE_REF] = imx8m_clk_hw_composite("usb_core_ref", imx8mp_usb_core_ref_sels, ccm_base + 0xb100);
+ hws[IMX8MP_CLK_USB_PHY_REF] = imx8m_clk_hw_composite("usb_phy_ref", imx8mp_usb_phy_ref_sels, ccm_base + 0xb180);
+ hws[IMX8MP_CLK_GIC] = imx8m_clk_hw_composite_critical("gic", imx8mp_gic_sels, ccm_base + 0xb200);
+ hws[IMX8MP_CLK_ECSPI1] = imx8m_clk_hw_composite("ecspi1", imx8mp_ecspi1_sels, ccm_base + 0xb280);
+ hws[IMX8MP_CLK_ECSPI2] = imx8m_clk_hw_composite("ecspi2", imx8mp_ecspi2_sels, ccm_base + 0xb300);
+ hws[IMX8MP_CLK_PWM1] = imx8m_clk_hw_composite("pwm1", imx8mp_pwm1_sels, ccm_base + 0xb380);
+ hws[IMX8MP_CLK_PWM2] = imx8m_clk_hw_composite("pwm2", imx8mp_pwm2_sels, ccm_base + 0xb400);
+ hws[IMX8MP_CLK_PWM3] = imx8m_clk_hw_composite("pwm3", imx8mp_pwm3_sels, ccm_base + 0xb480);
+ hws[IMX8MP_CLK_PWM4] = imx8m_clk_hw_composite("pwm4", imx8mp_pwm4_sels, ccm_base + 0xb500);
+
+ hws[IMX8MP_CLK_GPT1] = imx8m_clk_hw_composite("gpt1", imx8mp_gpt1_sels, ccm_base + 0xb580);
+ hws[IMX8MP_CLK_GPT2] = imx8m_clk_hw_composite("gpt2", imx8mp_gpt2_sels, ccm_base + 0xb600);
+ hws[IMX8MP_CLK_GPT3] = imx8m_clk_hw_composite("gpt3", imx8mp_gpt3_sels, ccm_base + 0xb680);
+ hws[IMX8MP_CLK_GPT4] = imx8m_clk_hw_composite("gpt4", imx8mp_gpt4_sels, ccm_base + 0xb700);
+ hws[IMX8MP_CLK_GPT5] = imx8m_clk_hw_composite("gpt5", imx8mp_gpt5_sels, ccm_base + 0xb780);
+ hws[IMX8MP_CLK_GPT6] = imx8m_clk_hw_composite("gpt6", imx8mp_gpt6_sels, ccm_base + 0xb800);
+ hws[IMX8MP_CLK_WDOG] = imx8m_clk_hw_composite("wdog", imx8mp_wdog_sels, ccm_base + 0xb900);
+ hws[IMX8MP_CLK_WRCLK] = imx8m_clk_hw_composite("wrclk", imx8mp_wrclk_sels, ccm_base + 0xb980);
+ hws[IMX8MP_CLK_IPP_DO_CLKO1] = imx8m_clk_hw_composite("ipp_do_clko1", imx8mp_ipp_do_clko1_sels, ccm_base + 0xba00);
+ hws[IMX8MP_CLK_IPP_DO_CLKO2] = imx8m_clk_hw_composite("ipp_do_clko2", imx8mp_ipp_do_clko2_sels, ccm_base + 0xba80);
+ hws[IMX8MP_CLK_HDMI_FDCC_TST] = imx8m_clk_hw_composite("hdmi_fdcc_tst", imx8mp_hdmi_fdcc_tst_sels, ccm_base + 0xbb00);
+ hws[IMX8MP_CLK_HDMI_27M] = imx8m_clk_hw_composite("hdmi_27m", imx8mp_hdmi_27m_sels, ccm_base + 0xbb80);
+ hws[IMX8MP_CLK_HDMI_REF_266M] = imx8m_clk_hw_composite("hdmi_ref_266m", imx8mp_hdmi_ref_266m_sels, ccm_base + 0xbc00);
+ hws[IMX8MP_CLK_USDHC3] = imx8m_clk_hw_composite("usdhc3", imx8mp_usdhc3_sels, ccm_base + 0xbc80);
+ hws[IMX8MP_CLK_MEDIA_CAM1_PIX] = imx8m_clk_hw_composite("media_cam1_pix", imx8mp_media_cam1_pix_sels, ccm_base + 0xbd00);
+ hws[IMX8MP_CLK_MEDIA_MIPI_PHY1_REF] = imx8m_clk_hw_composite("media_mipi_phy1_ref", imx8mp_media_mipi_phy1_ref_sels, ccm_base + 0xbd80);
+ hws[IMX8MP_CLK_MEDIA_DISP1_PIX] = imx8m_clk_hw_composite("media_disp1_pix", imx8mp_media_disp1_pix_sels, ccm_base + 0xbe00);
+ hws[IMX8MP_CLK_MEDIA_CAM2_PIX] = imx8m_clk_hw_composite("media_cam2_pix", imx8mp_media_cam2_pix_sels, ccm_base + 0xbe80);
+ hws[IMX8MP_CLK_MEDIA_MIPI_PHY2_REF] = imx8m_clk_hw_composite("media_mipi_phy2_ref", imx8mp_media_mipi_phy2_ref_sels, ccm_base + 0xbf00);
+ hws[IMX8MP_CLK_MEDIA_MIPI_CSI2_ESC] = imx8m_clk_hw_composite("media_mipi_csi2_esc", imx8mp_media_mipi_csi2_esc_sels, ccm_base + 0xbf80);
+ hws[IMX8MP_CLK_PCIE2_CTRL] = imx8m_clk_hw_composite("pcie2_ctrl", imx8mp_pcie2_ctrl_sels, ccm_base + 0xc000);
+ hws[IMX8MP_CLK_PCIE2_PHY] = imx8m_clk_hw_composite("pcie2_phy", imx8mp_pcie2_phy_sels, ccm_base + 0xc080);
+ hws[IMX8MP_CLK_MEDIA_MIPI_TEST_BYTE] = imx8m_clk_hw_composite("media_mipi_test_byte", imx8mp_media_mipi_test_byte_sels, ccm_base + 0xc100);
+ hws[IMX8MP_CLK_ECSPI3] = imx8m_clk_hw_composite("ecspi3", imx8mp_ecspi3_sels, ccm_base + 0xc180);
+ hws[IMX8MP_CLK_PDM] = imx8m_clk_hw_composite("pdm", imx8mp_pdm_sels, ccm_base + 0xc200);
+ hws[IMX8MP_CLK_VPU_VC8000E] = imx8m_clk_hw_composite("vpu_vc8000e", imx8mp_vpu_vc8000e_sels, ccm_base + 0xc280);
+ hws[IMX8MP_CLK_SAI7] = imx8m_clk_hw_composite("sai7", imx8mp_sai7_sels, ccm_base + 0xc300);
+
+ hws[IMX8MP_CLK_DRAM_ALT_ROOT] = imx_clk_hw_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
+ hws[IMX8MP_CLK_DRAM_CORE] = imx_clk_hw_mux2_flags("dram_core_clk", ccm_base + 0x9800, 24, 1, imx8mp_dram_core_sels, ARRAY_SIZE(imx8mp_dram_core_sels), CLK_IS_CRITICAL);
+
+ hws[IMX8MP_CLK_DRAM1_ROOT] = imx_clk_hw_gate4_flags("dram1_root_clk", "dram_core_clk", ccm_base + 0x4050, 0, CLK_IS_CRITICAL);
+ hws[IMX8MP_CLK_ECSPI1_ROOT] = imx_clk_hw_gate4("ecspi1_root_clk", "ecspi1", ccm_base + 0x4070, 0);
+ hws[IMX8MP_CLK_ECSPI2_ROOT] = imx_clk_hw_gate4("ecspi2_root_clk", "ecspi2", ccm_base + 0x4080, 0);
+ hws[IMX8MP_CLK_ECSPI3_ROOT] = imx_clk_hw_gate4("ecspi3_root_clk", "ecspi3", ccm_base + 0x4090, 0);
+ hws[IMX8MP_CLK_ENET1_ROOT] = imx_clk_hw_gate4("enet1_root_clk", "enet_axi", ccm_base + 0x40a0, 0);
+ hws[IMX8MP_CLK_GPIO1_ROOT] = imx_clk_hw_gate4("gpio1_root_clk", "ipg_root", ccm_base + 0x40b0, 0);
+ hws[IMX8MP_CLK_GPIO2_ROOT] = imx_clk_hw_gate4("gpio2_root_clk", "ipg_root", ccm_base + 0x40c0, 0);
+ hws[IMX8MP_CLK_GPIO3_ROOT] = imx_clk_hw_gate4("gpio3_root_clk", "ipg_root", ccm_base + 0x40d0, 0);
+ hws[IMX8MP_CLK_GPIO4_ROOT] = imx_clk_hw_gate4("gpio4_root_clk", "ipg_root", ccm_base + 0x40e0, 0);
+ hws[IMX8MP_CLK_GPIO5_ROOT] = imx_clk_hw_gate4("gpio5_root_clk", "ipg_root", ccm_base + 0x40f0, 0);
+ hws[IMX8MP_CLK_GPT1_ROOT] = imx_clk_hw_gate4("gpt1_root_clk", "gpt1", ccm_base + 0x4100, 0);
+ hws[IMX8MP_CLK_GPT2_ROOT] = imx_clk_hw_gate4("gpt2_root_clk", "gpt2", ccm_base + 0x4110, 0);
+ hws[IMX8MP_CLK_GPT3_ROOT] = imx_clk_hw_gate4("gpt3_root_clk", "gpt3", ccm_base + 0x4120, 0);
+ hws[IMX8MP_CLK_GPT4_ROOT] = imx_clk_hw_gate4("gpt4_root_clk", "gpt4", ccm_base + 0x4130, 0);
+ hws[IMX8MP_CLK_GPT5_ROOT] = imx_clk_hw_gate4("gpt5_root_clk", "gpt5", ccm_base + 0x4140, 0);
+ hws[IMX8MP_CLK_GPT6_ROOT] = imx_clk_hw_gate4("gpt6_root_clk", "gpt6", ccm_base + 0x4150, 0);
+ hws[IMX8MP_CLK_I2C1_ROOT] = imx_clk_hw_gate4("i2c1_root_clk", "i2c1", ccm_base + 0x4170, 0);
+ hws[IMX8MP_CLK_I2C2_ROOT] = imx_clk_hw_gate4("i2c2_root_clk", "i2c2", ccm_base + 0x4180, 0);
+ hws[IMX8MP_CLK_I2C3_ROOT] = imx_clk_hw_gate4("i2c3_root_clk", "i2c3", ccm_base + 0x4190, 0);
+ hws[IMX8MP_CLK_I2C4_ROOT] = imx_clk_hw_gate4("i2c4_root_clk", "i2c4", ccm_base + 0x41a0, 0);
+ hws[IMX8MP_CLK_PCIE_ROOT] = imx_clk_hw_gate4("pcie_root_clk", "pcie_aux", ccm_base + 0x4250, 0);
+ hws[IMX8MP_CLK_PWM1_ROOT] = imx_clk_hw_gate4("pwm1_root_clk", "pwm1", ccm_base + 0x4280, 0);
+ hws[IMX8MP_CLK_PWM2_ROOT] = imx_clk_hw_gate4("pwm2_root_clk", "pwm2", ccm_base + 0x4290, 0);
+ hws[IMX8MP_CLK_PWM3_ROOT] = imx_clk_hw_gate4("pwm3_root_clk", "pwm3", ccm_base + 0x42a0, 0);
+ hws[IMX8MP_CLK_PWM4_ROOT] = imx_clk_hw_gate4("pwm4_root_clk", "pwm4", ccm_base + 0x42b0, 0);
+ hws[IMX8MP_CLK_QOS_ROOT] = imx_clk_hw_gate4("qos_root_clk", "ipg_root", ccm_base + 0x42c0, 0);
+ hws[IMX8MP_CLK_QOS_ENET_ROOT] = imx_clk_hw_gate4("qos_enet_root_clk", "ipg_root", ccm_base + 0x42e0, 0);
+ hws[IMX8MP_CLK_QSPI_ROOT] = imx_clk_hw_gate4("qspi_root_clk", "qspi", ccm_base + 0x42f0, 0);
+ hws[IMX8MP_CLK_NAND_ROOT] = imx_clk_hw_gate2_shared2("nand_root_clk", "nand", ccm_base + 0x4300, 0, &share_count_nand);
+ hws[IMX8MP_CLK_NAND_USDHC_BUS_RAWNAND_CLK] = imx_clk_hw_gate2_shared2("nand_usdhc_rawnand_clk", "nand_usdhc_bus", ccm_base + 0x4300, 0, &share_count_nand);
+ hws[IMX8MP_CLK_I2C5_ROOT] = imx_clk_hw_gate2("i2c5_root_clk", "i2c5", ccm_base + 0x4330, 0);
+ hws[IMX8MP_CLK_I2C6_ROOT] = imx_clk_hw_gate2("i2c6_root_clk", "i2c6", ccm_base + 0x4340, 0);
+ hws[IMX8MP_CLK_CAN1_ROOT] = imx_clk_hw_gate2("can1_root_clk", "can1", ccm_base + 0x4350, 0);
+ hws[IMX8MP_CLK_CAN2_ROOT] = imx_clk_hw_gate2("can2_root_clk", "can2", ccm_base + 0x4360, 0);
+ hws[IMX8MP_CLK_SDMA1_ROOT] = imx_clk_hw_gate4("sdma1_root_clk", "ipg_root", ccm_base + 0x43a0, 0);
+ hws[IMX8MP_CLK_ENET_QOS_ROOT] = imx_clk_hw_gate4("enet_qos_root_clk", "enet_axi", ccm_base + 0x43b0, 0);
+ hws[IMX8MP_CLK_SIM_ENET_ROOT] = imx_clk_hw_gate4("sim_enet_root_clk", "enet_axi", ccm_base + 0x4400, 0);
+ hws[IMX8MP_CLK_GPU2D_ROOT] = imx_clk_hw_gate4("gpu2d_root_clk", "gpu2d_div", ccm_base + 0x4450, 0);
+ hws[IMX8MP_CLK_GPU3D_ROOT] = imx_clk_hw_gate4("gpu3d_root_clk", "gpu3d_core_div", ccm_base + 0x4460, 0);
+ hws[IMX8MP_CLK_SNVS_ROOT] = imx_clk_hw_gate4("snvs_root_clk", "ipg_root", ccm_base + 0x4470, 0);
+ hws[IMX8MP_CLK_UART1_ROOT] = imx_clk_hw_gate4("uart1_root_clk", "uart1", ccm_base + 0x4490, 0);
+ hws[IMX8MP_CLK_UART2_ROOT] = imx_clk_hw_gate4("uart2_root_clk", "uart2", ccm_base + 0x44a0, 0);
+ hws[IMX8MP_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", ccm_base + 0x44b0, 0);
+ hws[IMX8MP_CLK_UART4_ROOT] = imx_clk_hw_gate4("uart4_root_clk", "uart4", ccm_base + 0x44c0, 0);
+ hws[IMX8MP_CLK_USB_ROOT] = imx_clk_hw_gate4("usb_root_clk", "osc_32k", ccm_base + 0x44d0, 0);
+ hws[IMX8MP_CLK_USB_PHY_ROOT] = imx_clk_hw_gate4("usb_phy_root_clk", "usb_phy_ref", ccm_base + 0x44f0, 0);
+ hws[IMX8MP_CLK_USDHC1_ROOT] = imx_clk_hw_gate4("usdhc1_root_clk", "usdhc1", ccm_base + 0x4510, 0);
+ hws[IMX8MP_CLK_USDHC2_ROOT] = imx_clk_hw_gate4("usdhc2_root_clk", "usdhc2", ccm_base + 0x4520, 0);
+ hws[IMX8MP_CLK_WDOG1_ROOT] = imx_clk_hw_gate4("wdog1_root_clk", "wdog", ccm_base + 0x4530, 0);
+ hws[IMX8MP_CLK_WDOG2_ROOT] = imx_clk_hw_gate4("wdog2_root_clk", "wdog", ccm_base + 0x4540, 0);
+ hws[IMX8MP_CLK_WDOG3_ROOT] = imx_clk_hw_gate4("wdog3_root_clk", "wdog", ccm_base + 0x4550, 0);
+ hws[IMX8MP_CLK_VPU_G1_ROOT] = imx_clk_hw_gate4("vpu_g1_root_clk", "vpu_g1", ccm_base + 0x4560, 0);
+ hws[IMX8MP_CLK_GPU_ROOT] = imx_clk_hw_gate4("gpu_root_clk", "gpu_axi", ccm_base + 0x4570, 0);
+ hws[IMX8MP_CLK_VPU_VC8KE_ROOT] = imx_clk_hw_gate4("vpu_vc8ke_root_clk", "vpu_vc8000e", ccm_base + 0x4590, 0);
+ hws[IMX8MP_CLK_VPU_G2_ROOT] = imx_clk_hw_gate4("vpu_g2_root_clk", "vpu_g2", ccm_base + 0x45a0, 0);
+ hws[IMX8MP_CLK_NPU_ROOT] = imx_clk_hw_gate4("npu_root_clk", "ml_div", ccm_base + 0x45b0, 0);
+ hws[IMX8MP_CLK_HSIO_ROOT] = imx_clk_hw_gate4("hsio_root_clk", "ipg_root", ccm_base + 0x45c0, 0);
+ hws[IMX8MP_CLK_MEDIA_APB_ROOT] = imx_clk_hw_gate2_shared2("media_apb_root_clk", "media_apb", ccm_base + 0x45d0, 0, &share_count_media);
+ hws[IMX8MP_CLK_MEDIA_AXI_ROOT] = imx_clk_hw_gate2_shared2("media_axi_root_clk", "media_axi", ccm_base + 0x45d0, 0, &share_count_media);
+ hws[IMX8MP_CLK_MEDIA_CAM1_PIX_ROOT] = imx_clk_hw_gate2_shared2("media_cam1_pix_root_clk", "media_cam1_pix", ccm_base + 0x45d0, 0, &share_count_media);
+ hws[IMX8MP_CLK_MEDIA_CAM2_PIX_ROOT] = imx_clk_hw_gate2_shared2("media_cam2_pix_root_clk", "media_cam2_pix", ccm_base + 0x45d0, 0, &share_count_media);
+ hws[IMX8MP_CLK_MEDIA_DISP1_PIX_ROOT] = imx_clk_hw_gate2_shared2("media_disp1_pix_root_clk", "media_disp1_pix", ccm_base + 0x45d0, 0, &share_count_media);
+ hws[IMX8MP_CLK_MEDIA_DISP2_PIX_ROOT] = imx_clk_hw_gate2_shared2("media_disp2_pix_root_clk", "media_disp2_pix", ccm_base + 0x45d0, 0, &share_count_media);
+ hws[IMX8MP_CLK_MEDIA_ISP_ROOT] = imx_clk_hw_gate2_shared2("media_isp_root_clk", "media_isp_div", ccm_base + 0x45d0, 0, &share_count_media);
+
+ hws[IMX8MP_CLK_USDHC3_ROOT] = imx_clk_hw_gate4("usdhc3_root_clk", "usdhc3", ccm_base + 0x45e0, 0);
+ hws[IMX8MP_CLK_HDMI_ROOT] = imx_clk_hw_gate4("hdmi_root_clk", "hdmi_axi", ccm_base + 0x45f0, 0);
+ hws[IMX8MP_CLK_TSENSOR_ROOT] = imx_clk_hw_gate4("tsensor_root_clk", "ipg_root", ccm_base + 0x4620, 0);
+ hws[IMX8MP_CLK_VPU_ROOT] = imx_clk_hw_gate4("vpu_root_clk", "vpu_bus", ccm_base + 0x4630, 0);
+ hws[IMX8MP_CLK_AUDIO_ROOT] = imx_clk_hw_gate4("audio_root_clk", "ipg_root", ccm_base + 0x4650, 0);
+
+ hws[IMX8MP_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_div",
+ hws[IMX8MP_CLK_A53_DIV]->clk,
+ hws[IMX8MP_CLK_A53_SRC]->clk,
+ hws[IMX8MP_ARM_PLL_OUT]->clk,
+ hws[IMX8MP_SYS_PLL1_800M]->clk);
+
+ imx_check_clk_hws(hws, IMX8MP_CLK_END);
+
+ of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
+
+ for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
+ int index = uart_clk_ids[i];
+
+ uart_clks[i] = &hws[index]->clk;
+ }
+
+ imx_register_uart_clocks(uart_clks);
+
+ return 0;
+}
+
+static const struct of_device_id imx8mp_clk_of_match[] = {
+ { .compatible = "fsl,imx8mp-ccm" },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx8mp_clk_of_match);
+
+static struct platform_driver imx8mp_clk_driver = {
+ .probe = imx8mp_clocks_probe,
+ .driver = {
+ .name = "imx8mp-ccm",
+ /*
+ * Disable bind attributes: clocks are not removed and
+ * reloading the driver will crash or break devices.
+ */
+ .suppress_bind_attrs = true,
+ .of_match_table = of_match_ptr(imx8mp_clk_of_match),
+ },
+};
+module_platform_driver(imx8mp_clk_driver);
diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
index 5f10a606d836..4c0edca1a6d0 100644
--- a/drivers/clk/imx/clk-imx8mq.c
+++ b/drivers/clk/imx/clk-imx8mq.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/types.h>
+#include <linux/slab.h>
#include <linux/platform_device.h>
#include "clk.h"
@@ -24,8 +25,6 @@ static u32 share_count_sai6;
static u32 share_count_dcss;
static u32 share_count_nand;
-static struct clk *clks[IMX8MQ_CLK_END];
-
static const char * const pll_ref_sels[] = { "osc_25m", "osc_27m", "dummy", "dummy", };
static const char * const arm_pll_bypass_sels[] = {"arm_pll", "arm_pll_ref_sel", };
static const char * const gpu_pll_bypass_sels[] = {"gpu_pll", "gpu_pll_ref_sel", };
@@ -269,124 +268,133 @@ static const char * const imx8mq_clko1_sels[] = {"osc_25m", "sys1_pll_800m", "os
static const char * const imx8mq_clko2_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_400m", "sys2_pll_166m",
"sys3_pll_out", "audio_pll1_out", "video_pll1_out", "ckil", };
-static struct clk_onecell_data clk_data;
+static struct clk_hw_onecell_data *clk_hw_data;
+static struct clk_hw **hws;
-static struct clk ** const uart_clks[] = {
- &clks[IMX8MQ_CLK_UART1_ROOT],
- &clks[IMX8MQ_CLK_UART2_ROOT],
- &clks[IMX8MQ_CLK_UART3_ROOT],
- &clks[IMX8MQ_CLK_UART4_ROOT],
- NULL
+static const int uart_clk_ids[] = {
+ IMX8MQ_CLK_UART1_ROOT,
+ IMX8MQ_CLK_UART2_ROOT,
+ IMX8MQ_CLK_UART3_ROOT,
+ IMX8MQ_CLK_UART4_ROOT,
};
+static struct clk **uart_hws[ARRAY_SIZE(uart_clk_ids) + 1];
static int imx8mq_clocks_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
void __iomem *base;
- int err;
+ int err, i;
+
+ clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
+ IMX8MQ_CLK_END), GFP_KERNEL);
+ if (WARN_ON(!clk_hw_data))
+ return -ENOMEM;
+
+ clk_hw_data->num = IMX8MQ_CLK_END;
+ hws = clk_hw_data->hws;
- clks[IMX8MQ_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
- clks[IMX8MQ_CLK_32K] = of_clk_get_by_name(np, "ckil");
- clks[IMX8MQ_CLK_25M] = of_clk_get_by_name(np, "osc_25m");
- clks[IMX8MQ_CLK_27M] = of_clk_get_by_name(np, "osc_27m");
- clks[IMX8MQ_CLK_EXT1] = of_clk_get_by_name(np, "clk_ext1");
- clks[IMX8MQ_CLK_EXT2] = of_clk_get_by_name(np, "clk_ext2");
- clks[IMX8MQ_CLK_EXT3] = of_clk_get_by_name(np, "clk_ext3");
- clks[IMX8MQ_CLK_EXT4] = of_clk_get_by_name(np, "clk_ext4");
+ hws[IMX8MQ_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0);
+ hws[IMX8MQ_CLK_32K] = imx_obtain_fixed_clk_hw(np, "ckil");
+ hws[IMX8MQ_CLK_25M] = imx_obtain_fixed_clk_hw(np, "osc_25m");
+ hws[IMX8MQ_CLK_27M] = imx_obtain_fixed_clk_hw(np, "osc_27m");
+ hws[IMX8MQ_CLK_EXT1] = imx_obtain_fixed_clk_hw(np, "clk_ext1");
+ hws[IMX8MQ_CLK_EXT2] = imx_obtain_fixed_clk_hw(np, "clk_ext2");
+ hws[IMX8MQ_CLK_EXT3] = imx_obtain_fixed_clk_hw(np, "clk_ext3");
+ hws[IMX8MQ_CLK_EXT4] = imx_obtain_fixed_clk_hw(np, "clk_ext4");
np = of_find_compatible_node(NULL, NULL, "fsl,imx8mq-anatop");
base = of_iomap(np, 0);
if (WARN_ON(!base))
return -ENOMEM;
- clks[IMX8MQ_ARM_PLL_REF_SEL] = imx_clk_mux("arm_pll_ref_sel", base + 0x28, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MQ_GPU_PLL_REF_SEL] = imx_clk_mux("gpu_pll_ref_sel", base + 0x18, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MQ_VPU_PLL_REF_SEL] = imx_clk_mux("vpu_pll_ref_sel", base + 0x20, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MQ_AUDIO_PLL1_REF_SEL] = imx_clk_mux("audio_pll1_ref_sel", base + 0x0, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MQ_AUDIO_PLL2_REF_SEL] = imx_clk_mux("audio_pll2_ref_sel", base + 0x8, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MQ_VIDEO_PLL1_REF_SEL] = imx_clk_mux("video_pll1_ref_sel", base + 0x10, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MQ_SYS3_PLL1_REF_SEL] = imx_clk_mux("sys3_pll1_ref_sel", base + 0x48, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MQ_DRAM_PLL1_REF_SEL] = imx_clk_mux("dram_pll1_ref_sel", base + 0x60, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MQ_VIDEO2_PLL1_REF_SEL] = imx_clk_mux("video2_pll1_ref_sel", base + 0x54, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-
- clks[IMX8MQ_ARM_PLL_REF_DIV] = imx_clk_divider("arm_pll_ref_div", "arm_pll_ref_sel", base + 0x28, 5, 6);
- clks[IMX8MQ_GPU_PLL_REF_DIV] = imx_clk_divider("gpu_pll_ref_div", "gpu_pll_ref_sel", base + 0x18, 5, 6);
- clks[IMX8MQ_VPU_PLL_REF_DIV] = imx_clk_divider("vpu_pll_ref_div", "vpu_pll_ref_sel", base + 0x20, 5, 6);
- clks[IMX8MQ_AUDIO_PLL1_REF_DIV] = imx_clk_divider("audio_pll1_ref_div", "audio_pll1_ref_sel", base + 0x0, 5, 6);
- clks[IMX8MQ_AUDIO_PLL2_REF_DIV] = imx_clk_divider("audio_pll2_ref_div", "audio_pll2_ref_sel", base + 0x8, 5, 6);
- clks[IMX8MQ_VIDEO_PLL1_REF_DIV] = imx_clk_divider("video_pll1_ref_div", "video_pll1_ref_sel", base + 0x10, 5, 6);
-
- clks[IMX8MQ_ARM_PLL] = imx_clk_frac_pll("arm_pll", "arm_pll_ref_div", base + 0x28);
- clks[IMX8MQ_GPU_PLL] = imx_clk_frac_pll("gpu_pll", "gpu_pll_ref_div", base + 0x18);
- clks[IMX8MQ_VPU_PLL] = imx_clk_frac_pll("vpu_pll", "vpu_pll_ref_div", base + 0x20);
- clks[IMX8MQ_AUDIO_PLL1] = imx_clk_frac_pll("audio_pll1", "audio_pll1_ref_div", base + 0x0);
- clks[IMX8MQ_AUDIO_PLL2] = imx_clk_frac_pll("audio_pll2", "audio_pll2_ref_div", base + 0x8);
- clks[IMX8MQ_VIDEO_PLL1] = imx_clk_frac_pll("video_pll1", "video_pll1_ref_div", base + 0x10);
+ hws[IMX8MQ_ARM_PLL_REF_SEL] = imx_clk_hw_mux("arm_pll_ref_sel", base + 0x28, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MQ_GPU_PLL_REF_SEL] = imx_clk_hw_mux("gpu_pll_ref_sel", base + 0x18, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MQ_VPU_PLL_REF_SEL] = imx_clk_hw_mux("vpu_pll_ref_sel", base + 0x20, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MQ_AUDIO_PLL1_REF_SEL] = imx_clk_hw_mux("audio_pll1_ref_sel", base + 0x0, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MQ_AUDIO_PLL2_REF_SEL] = imx_clk_hw_mux("audio_pll2_ref_sel", base + 0x8, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MQ_VIDEO_PLL1_REF_SEL] = imx_clk_hw_mux("video_pll1_ref_sel", base + 0x10, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MQ_SYS3_PLL1_REF_SEL] = imx_clk_hw_mux("sys3_pll1_ref_sel", base + 0x48, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MQ_DRAM_PLL1_REF_SEL] = imx_clk_hw_mux("dram_pll1_ref_sel", base + 0x60, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MQ_VIDEO2_PLL1_REF_SEL] = imx_clk_hw_mux("video2_pll1_ref_sel", base + 0x54, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+
+ hws[IMX8MQ_ARM_PLL_REF_DIV] = imx_clk_hw_divider("arm_pll_ref_div", "arm_pll_ref_sel", base + 0x28, 5, 6);
+ hws[IMX8MQ_GPU_PLL_REF_DIV] = imx_clk_hw_divider("gpu_pll_ref_div", "gpu_pll_ref_sel", base + 0x18, 5, 6);
+ hws[IMX8MQ_VPU_PLL_REF_DIV] = imx_clk_hw_divider("vpu_pll_ref_div", "vpu_pll_ref_sel", base + 0x20, 5, 6);
+ hws[IMX8MQ_AUDIO_PLL1_REF_DIV] = imx_clk_hw_divider("audio_pll1_ref_div", "audio_pll1_ref_sel", base + 0x0, 5, 6);
+ hws[IMX8MQ_AUDIO_PLL2_REF_DIV] = imx_clk_hw_divider("audio_pll2_ref_div", "audio_pll2_ref_sel", base + 0x8, 5, 6);
+ hws[IMX8MQ_VIDEO_PLL1_REF_DIV] = imx_clk_hw_divider("video_pll1_ref_div", "video_pll1_ref_sel", base + 0x10, 5, 6);
+
+ hws[IMX8MQ_ARM_PLL] = imx_clk_hw_frac_pll("arm_pll", "arm_pll_ref_div", base + 0x28);
+ hws[IMX8MQ_GPU_PLL] = imx_clk_hw_frac_pll("gpu_pll", "gpu_pll_ref_div", base + 0x18);
+ hws[IMX8MQ_VPU_PLL] = imx_clk_hw_frac_pll("vpu_pll", "vpu_pll_ref_div", base + 0x20);
+ hws[IMX8MQ_AUDIO_PLL1] = imx_clk_hw_frac_pll("audio_pll1", "audio_pll1_ref_div", base + 0x0);
+ hws[IMX8MQ_AUDIO_PLL2] = imx_clk_hw_frac_pll("audio_pll2", "audio_pll2_ref_div", base + 0x8);
+ hws[IMX8MQ_VIDEO_PLL1] = imx_clk_hw_frac_pll("video_pll1", "video_pll1_ref_div", base + 0x10);
/* PLL bypass out */
- clks[IMX8MQ_ARM_PLL_BYPASS] = imx_clk_mux_flags("arm_pll_bypass", base + 0x28, 14, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
- clks[IMX8MQ_GPU_PLL_BYPASS] = imx_clk_mux("gpu_pll_bypass", base + 0x18, 14, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels));
- clks[IMX8MQ_VPU_PLL_BYPASS] = imx_clk_mux("vpu_pll_bypass", base + 0x20, 14, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels));
- clks[IMX8MQ_AUDIO_PLL1_BYPASS] = imx_clk_mux("audio_pll1_bypass", base + 0x0, 14, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels));
- clks[IMX8MQ_AUDIO_PLL2_BYPASS] = imx_clk_mux("audio_pll2_bypass", base + 0x8, 14, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels));
- clks[IMX8MQ_VIDEO_PLL1_BYPASS] = imx_clk_mux("video_pll1_bypass", base + 0x10, 14, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels));
+ hws[IMX8MQ_ARM_PLL_BYPASS] = imx_clk_hw_mux_flags("arm_pll_bypass", base + 0x28, 14, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MQ_GPU_PLL_BYPASS] = imx_clk_hw_mux("gpu_pll_bypass", base + 0x18, 14, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels));
+ hws[IMX8MQ_VPU_PLL_BYPASS] = imx_clk_hw_mux("vpu_pll_bypass", base + 0x20, 14, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels));
+ hws[IMX8MQ_AUDIO_PLL1_BYPASS] = imx_clk_hw_mux("audio_pll1_bypass", base + 0x0, 14, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels));
+ hws[IMX8MQ_AUDIO_PLL2_BYPASS] = imx_clk_hw_mux("audio_pll2_bypass", base + 0x8, 14, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels));
+ hws[IMX8MQ_VIDEO_PLL1_BYPASS] = imx_clk_hw_mux("video_pll1_bypass", base + 0x10, 14, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels));
/* PLL OUT GATE */
- clks[IMX8MQ_ARM_PLL_OUT] = imx_clk_gate("arm_pll_out", "arm_pll_bypass", base + 0x28, 21);
- clks[IMX8MQ_GPU_PLL_OUT] = imx_clk_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x18, 21);
- clks[IMX8MQ_VPU_PLL_OUT] = imx_clk_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x20, 21);
- clks[IMX8MQ_AUDIO_PLL1_OUT] = imx_clk_gate("audio_pll1_out", "audio_pll1_bypass", base + 0x0, 21);
- clks[IMX8MQ_AUDIO_PLL2_OUT] = imx_clk_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x8, 21);
- clks[IMX8MQ_VIDEO_PLL1_OUT] = imx_clk_gate("video_pll1_out", "video_pll1_bypass", base + 0x10, 21);
-
- clks[IMX8MQ_SYS1_PLL_OUT] = imx_clk_fixed("sys1_pll_out", 800000000);
- clks[IMX8MQ_SYS2_PLL_OUT] = imx_clk_fixed("sys2_pll_out", 1000000000);
- clks[IMX8MQ_SYS3_PLL_OUT] = imx_clk_sccg_pll("sys3_pll_out", sys3_pll_out_sels, ARRAY_SIZE(sys3_pll_out_sels), 0, 0, 0, base + 0x48, CLK_IS_CRITICAL);
- clks[IMX8MQ_DRAM_PLL_OUT] = imx_clk_sccg_pll("dram_pll_out", dram_pll_out_sels, ARRAY_SIZE(dram_pll_out_sels), 0, 0, 0, base + 0x60, CLK_IS_CRITICAL);
- clks[IMX8MQ_VIDEO2_PLL_OUT] = imx_clk_sccg_pll("video2_pll_out", video2_pll_out_sels, ARRAY_SIZE(video2_pll_out_sels), 0, 0, 0, base + 0x54, 0);
+ hws[IMX8MQ_ARM_PLL_OUT] = imx_clk_hw_gate("arm_pll_out", "arm_pll_bypass", base + 0x28, 21);
+ hws[IMX8MQ_GPU_PLL_OUT] = imx_clk_hw_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x18, 21);
+ hws[IMX8MQ_VPU_PLL_OUT] = imx_clk_hw_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x20, 21);
+ hws[IMX8MQ_AUDIO_PLL1_OUT] = imx_clk_hw_gate("audio_pll1_out", "audio_pll1_bypass", base + 0x0, 21);
+ hws[IMX8MQ_AUDIO_PLL2_OUT] = imx_clk_hw_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x8, 21);
+ hws[IMX8MQ_VIDEO_PLL1_OUT] = imx_clk_hw_gate("video_pll1_out", "video_pll1_bypass", base + 0x10, 21);
+
+ hws[IMX8MQ_SYS1_PLL_OUT] = imx_clk_hw_fixed("sys1_pll_out", 800000000);
+ hws[IMX8MQ_SYS2_PLL_OUT] = imx_clk_hw_fixed("sys2_pll_out", 1000000000);
+ hws[IMX8MQ_SYS3_PLL_OUT] = imx_clk_hw_sscg_pll("sys3_pll_out", sys3_pll_out_sels, ARRAY_SIZE(sys3_pll_out_sels), 0, 0, 0, base + 0x48, CLK_IS_CRITICAL);
+ hws[IMX8MQ_DRAM_PLL_OUT] = imx_clk_hw_sscg_pll("dram_pll_out", dram_pll_out_sels, ARRAY_SIZE(dram_pll_out_sels), 0, 0, 0, base + 0x60, CLK_IS_CRITICAL | CLK_GET_RATE_NOCACHE);
+ hws[IMX8MQ_VIDEO2_PLL_OUT] = imx_clk_hw_sscg_pll("video2_pll_out", video2_pll_out_sels, ARRAY_SIZE(video2_pll_out_sels), 0, 0, 0, base + 0x54, 0);
/* SYS PLL1 fixed output */
- clks[IMX8MQ_SYS1_PLL_40M_CG] = imx_clk_gate("sys1_pll_40m_cg", "sys1_pll_out", base + 0x30, 9);
- clks[IMX8MQ_SYS1_PLL_80M_CG] = imx_clk_gate("sys1_pll_80m_cg", "sys1_pll_out", base + 0x30, 11);
- clks[IMX8MQ_SYS1_PLL_100M_CG] = imx_clk_gate("sys1_pll_100m_cg", "sys1_pll_out", base + 0x30, 13);
- clks[IMX8MQ_SYS1_PLL_133M_CG] = imx_clk_gate("sys1_pll_133m_cg", "sys1_pll_out", base + 0x30, 15);
- clks[IMX8MQ_SYS1_PLL_160M_CG] = imx_clk_gate("sys1_pll_160m_cg", "sys1_pll_out", base + 0x30, 17);
- clks[IMX8MQ_SYS1_PLL_200M_CG] = imx_clk_gate("sys1_pll_200m_cg", "sys1_pll_out", base + 0x30, 19);
- clks[IMX8MQ_SYS1_PLL_266M_CG] = imx_clk_gate("sys1_pll_266m_cg", "sys1_pll_out", base + 0x30, 21);
- clks[IMX8MQ_SYS1_PLL_400M_CG] = imx_clk_gate("sys1_pll_400m_cg", "sys1_pll_out", base + 0x30, 23);
- clks[IMX8MQ_SYS1_PLL_800M_CG] = imx_clk_gate("sys1_pll_800m_cg", "sys1_pll_out", base + 0x30, 25);
-
- clks[IMX8MQ_SYS1_PLL_40M] = imx_clk_fixed_factor("sys1_pll_40m", "sys1_pll_40m_cg", 1, 20);
- clks[IMX8MQ_SYS1_PLL_80M] = imx_clk_fixed_factor("sys1_pll_80m", "sys1_pll_80m_cg", 1, 10);
- clks[IMX8MQ_SYS1_PLL_100M] = imx_clk_fixed_factor("sys1_pll_100m", "sys1_pll_100m_cg", 1, 8);
- clks[IMX8MQ_SYS1_PLL_133M] = imx_clk_fixed_factor("sys1_pll_133m", "sys1_pll_133m_cg", 1, 6);
- clks[IMX8MQ_SYS1_PLL_160M] = imx_clk_fixed_factor("sys1_pll_160m", "sys1_pll_160m_cg", 1, 5);
- clks[IMX8MQ_SYS1_PLL_200M] = imx_clk_fixed_factor("sys1_pll_200m", "sys1_pll_200m_cg", 1, 4);
- clks[IMX8MQ_SYS1_PLL_266M] = imx_clk_fixed_factor("sys1_pll_266m", "sys1_pll_266m_cg", 1, 3);
- clks[IMX8MQ_SYS1_PLL_400M] = imx_clk_fixed_factor("sys1_pll_400m", "sys1_pll_400m_cg", 1, 2);
- clks[IMX8MQ_SYS1_PLL_800M] = imx_clk_fixed_factor("sys1_pll_800m", "sys1_pll_800m_cg", 1, 1);
+ hws[IMX8MQ_SYS1_PLL_40M_CG] = imx_clk_hw_gate("sys1_pll_40m_cg", "sys1_pll_out", base + 0x30, 9);
+ hws[IMX8MQ_SYS1_PLL_80M_CG] = imx_clk_hw_gate("sys1_pll_80m_cg", "sys1_pll_out", base + 0x30, 11);
+ hws[IMX8MQ_SYS1_PLL_100M_CG] = imx_clk_hw_gate("sys1_pll_100m_cg", "sys1_pll_out", base + 0x30, 13);
+ hws[IMX8MQ_SYS1_PLL_133M_CG] = imx_clk_hw_gate("sys1_pll_133m_cg", "sys1_pll_out", base + 0x30, 15);
+ hws[IMX8MQ_SYS1_PLL_160M_CG] = imx_clk_hw_gate("sys1_pll_160m_cg", "sys1_pll_out", base + 0x30, 17);
+ hws[IMX8MQ_SYS1_PLL_200M_CG] = imx_clk_hw_gate("sys1_pll_200m_cg", "sys1_pll_out", base + 0x30, 19);
+ hws[IMX8MQ_SYS1_PLL_266M_CG] = imx_clk_hw_gate("sys1_pll_266m_cg", "sys1_pll_out", base + 0x30, 21);
+ hws[IMX8MQ_SYS1_PLL_400M_CG] = imx_clk_hw_gate("sys1_pll_400m_cg", "sys1_pll_out", base + 0x30, 23);
+ hws[IMX8MQ_SYS1_PLL_800M_CG] = imx_clk_hw_gate("sys1_pll_800m_cg", "sys1_pll_out", base + 0x30, 25);
+
+ hws[IMX8MQ_SYS1_PLL_40M] = imx_clk_hw_fixed_factor("sys1_pll_40m", "sys1_pll_40m_cg", 1, 20);
+ hws[IMX8MQ_SYS1_PLL_80M] = imx_clk_hw_fixed_factor("sys1_pll_80m", "sys1_pll_80m_cg", 1, 10);
+ hws[IMX8MQ_SYS1_PLL_100M] = imx_clk_hw_fixed_factor("sys1_pll_100m", "sys1_pll_100m_cg", 1, 8);
+ hws[IMX8MQ_SYS1_PLL_133M] = imx_clk_hw_fixed_factor("sys1_pll_133m", "sys1_pll_133m_cg", 1, 6);
+ hws[IMX8MQ_SYS1_PLL_160M] = imx_clk_hw_fixed_factor("sys1_pll_160m", "sys1_pll_160m_cg", 1, 5);
+ hws[IMX8MQ_SYS1_PLL_200M] = imx_clk_hw_fixed_factor("sys1_pll_200m", "sys1_pll_200m_cg", 1, 4);
+ hws[IMX8MQ_SYS1_PLL_266M] = imx_clk_hw_fixed_factor("sys1_pll_266m", "sys1_pll_266m_cg", 1, 3);
+ hws[IMX8MQ_SYS1_PLL_400M] = imx_clk_hw_fixed_factor("sys1_pll_400m", "sys1_pll_400m_cg", 1, 2);
+ hws[IMX8MQ_SYS1_PLL_800M] = imx_clk_hw_fixed_factor("sys1_pll_800m", "sys1_pll_800m_cg", 1, 1);
/* SYS PLL2 fixed output */
- clks[IMX8MQ_SYS2_PLL_50M_CG] = imx_clk_gate("sys2_pll_50m_cg", "sys2_pll_out", base + 0x3c, 9);
- clks[IMX8MQ_SYS2_PLL_100M_CG] = imx_clk_gate("sys2_pll_100m_cg", "sys2_pll_out", base + 0x3c, 11);
- clks[IMX8MQ_SYS2_PLL_125M_CG] = imx_clk_gate("sys2_pll_125m_cg", "sys2_pll_out", base + 0x3c, 13);
- clks[IMX8MQ_SYS2_PLL_166M_CG] = imx_clk_gate("sys2_pll_166m_cg", "sys2_pll_out", base + 0x3c, 15);
- clks[IMX8MQ_SYS2_PLL_200M_CG] = imx_clk_gate("sys2_pll_200m_cg", "sys2_pll_out", base + 0x3c, 17);
- clks[IMX8MQ_SYS2_PLL_250M_CG] = imx_clk_gate("sys2_pll_250m_cg", "sys2_pll_out", base + 0x3c, 19);
- clks[IMX8MQ_SYS2_PLL_333M_CG] = imx_clk_gate("sys2_pll_333m_cg", "sys2_pll_out", base + 0x3c, 21);
- clks[IMX8MQ_SYS2_PLL_500M_CG] = imx_clk_gate("sys2_pll_500m_cg", "sys2_pll_out", base + 0x3c, 23);
- clks[IMX8MQ_SYS2_PLL_1000M_CG] = imx_clk_gate("sys2_pll_1000m_cg", "sys2_pll_out", base + 0x3c, 25);
-
- clks[IMX8MQ_SYS2_PLL_50M] = imx_clk_fixed_factor("sys2_pll_50m", "sys2_pll_50m_cg", 1, 20);
- clks[IMX8MQ_SYS2_PLL_100M] = imx_clk_fixed_factor("sys2_pll_100m", "sys2_pll_100m_cg", 1, 10);
- clks[IMX8MQ_SYS2_PLL_125M] = imx_clk_fixed_factor("sys2_pll_125m", "sys2_pll_125m_cg", 1, 8);
- clks[IMX8MQ_SYS2_PLL_166M] = imx_clk_fixed_factor("sys2_pll_166m", "sys2_pll_166m_cg", 1, 6);
- clks[IMX8MQ_SYS2_PLL_200M] = imx_clk_fixed_factor("sys2_pll_200m", "sys2_pll_200m_cg", 1, 5);
- clks[IMX8MQ_SYS2_PLL_250M] = imx_clk_fixed_factor("sys2_pll_250m", "sys2_pll_250m_cg", 1, 4);
- clks[IMX8MQ_SYS2_PLL_333M] = imx_clk_fixed_factor("sys2_pll_333m", "sys2_pll_333m_cg", 1, 3);
- clks[IMX8MQ_SYS2_PLL_500M] = imx_clk_fixed_factor("sys2_pll_500m", "sys2_pll_500m_cg", 1, 2);
- clks[IMX8MQ_SYS2_PLL_1000M] = imx_clk_fixed_factor("sys2_pll_1000m", "sys2_pll_1000m_cg", 1, 1);
+ hws[IMX8MQ_SYS2_PLL_50M_CG] = imx_clk_hw_gate("sys2_pll_50m_cg", "sys2_pll_out", base + 0x3c, 9);
+ hws[IMX8MQ_SYS2_PLL_100M_CG] = imx_clk_hw_gate("sys2_pll_100m_cg", "sys2_pll_out", base + 0x3c, 11);
+ hws[IMX8MQ_SYS2_PLL_125M_CG] = imx_clk_hw_gate("sys2_pll_125m_cg", "sys2_pll_out", base + 0x3c, 13);
+ hws[IMX8MQ_SYS2_PLL_166M_CG] = imx_clk_hw_gate("sys2_pll_166m_cg", "sys2_pll_out", base + 0x3c, 15);
+ hws[IMX8MQ_SYS2_PLL_200M_CG] = imx_clk_hw_gate("sys2_pll_200m_cg", "sys2_pll_out", base + 0x3c, 17);
+ hws[IMX8MQ_SYS2_PLL_250M_CG] = imx_clk_hw_gate("sys2_pll_250m_cg", "sys2_pll_out", base + 0x3c, 19);
+ hws[IMX8MQ_SYS2_PLL_333M_CG] = imx_clk_hw_gate("sys2_pll_333m_cg", "sys2_pll_out", base + 0x3c, 21);
+ hws[IMX8MQ_SYS2_PLL_500M_CG] = imx_clk_hw_gate("sys2_pll_500m_cg", "sys2_pll_out", base + 0x3c, 23);
+ hws[IMX8MQ_SYS2_PLL_1000M_CG] = imx_clk_hw_gate("sys2_pll_1000m_cg", "sys2_pll_out", base + 0x3c, 25);
+
+ hws[IMX8MQ_SYS2_PLL_50M] = imx_clk_hw_fixed_factor("sys2_pll_50m", "sys2_pll_50m_cg", 1, 20);
+ hws[IMX8MQ_SYS2_PLL_100M] = imx_clk_hw_fixed_factor("sys2_pll_100m", "sys2_pll_100m_cg", 1, 10);
+ hws[IMX8MQ_SYS2_PLL_125M] = imx_clk_hw_fixed_factor("sys2_pll_125m", "sys2_pll_125m_cg", 1, 8);
+ hws[IMX8MQ_SYS2_PLL_166M] = imx_clk_hw_fixed_factor("sys2_pll_166m", "sys2_pll_166m_cg", 1, 6);
+ hws[IMX8MQ_SYS2_PLL_200M] = imx_clk_hw_fixed_factor("sys2_pll_200m", "sys2_pll_200m_cg", 1, 5);
+ hws[IMX8MQ_SYS2_PLL_250M] = imx_clk_hw_fixed_factor("sys2_pll_250m", "sys2_pll_250m_cg", 1, 4);
+ hws[IMX8MQ_SYS2_PLL_333M] = imx_clk_hw_fixed_factor("sys2_pll_333m", "sys2_pll_333m_cg", 1, 3);
+ hws[IMX8MQ_SYS2_PLL_500M] = imx_clk_hw_fixed_factor("sys2_pll_500m", "sys2_pll_500m_cg", 1, 2);
+ hws[IMX8MQ_SYS2_PLL_1000M] = imx_clk_hw_fixed_factor("sys2_pll_1000m", "sys2_pll_1000m_cg", 1, 1);
np = dev->of_node;
base = devm_platform_ioremap_resource(pdev, 0);
@@ -394,206 +402,213 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
return PTR_ERR(base);
/* CORE */
- clks[IMX8MQ_CLK_A53_SRC] = imx_clk_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mq_a53_sels, ARRAY_SIZE(imx8mq_a53_sels));
- clks[IMX8MQ_CLK_M4_SRC] = imx_clk_mux2("arm_m4_src", base + 0x8080, 24, 3, imx8mq_arm_m4_sels, ARRAY_SIZE(imx8mq_arm_m4_sels));
- clks[IMX8MQ_CLK_VPU_SRC] = imx_clk_mux2("vpu_src", base + 0x8100, 24, 3, imx8mq_vpu_sels, ARRAY_SIZE(imx8mq_vpu_sels));
- clks[IMX8MQ_CLK_GPU_CORE_SRC] = imx_clk_mux2("gpu_core_src", base + 0x8180, 24, 3, imx8mq_gpu_core_sels, ARRAY_SIZE(imx8mq_gpu_core_sels));
- clks[IMX8MQ_CLK_GPU_SHADER_SRC] = imx_clk_mux2("gpu_shader_src", base + 0x8200, 24, 3, imx8mq_gpu_shader_sels, ARRAY_SIZE(imx8mq_gpu_shader_sels));
-
- clks[IMX8MQ_CLK_A53_CG] = imx_clk_gate3_flags("arm_a53_cg", "arm_a53_src", base + 0x8000, 28, CLK_IS_CRITICAL);
- clks[IMX8MQ_CLK_M4_CG] = imx_clk_gate3("arm_m4_cg", "arm_m4_src", base + 0x8080, 28);
- clks[IMX8MQ_CLK_VPU_CG] = imx_clk_gate3("vpu_cg", "vpu_src", base + 0x8100, 28);
- clks[IMX8MQ_CLK_GPU_CORE_CG] = imx_clk_gate3("gpu_core_cg", "gpu_core_src", base + 0x8180, 28);
- clks[IMX8MQ_CLK_GPU_SHADER_CG] = imx_clk_gate3("gpu_shader_cg", "gpu_shader_src", base + 0x8200, 28);
-
- clks[IMX8MQ_CLK_A53_DIV] = imx_clk_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
- clks[IMX8MQ_CLK_M4_DIV] = imx_clk_divider2("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3);
- clks[IMX8MQ_CLK_VPU_DIV] = imx_clk_divider2("vpu_div", "vpu_cg", base + 0x8100, 0, 3);
- clks[IMX8MQ_CLK_GPU_CORE_DIV] = imx_clk_divider2("gpu_core_div", "gpu_core_cg", base + 0x8180, 0, 3);
- clks[IMX8MQ_CLK_GPU_SHADER_DIV] = imx_clk_divider2("gpu_shader_div", "gpu_shader_cg", base + 0x8200, 0, 3);
+ hws[IMX8MQ_CLK_A53_SRC] = imx_clk_hw_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mq_a53_sels, ARRAY_SIZE(imx8mq_a53_sels));
+ hws[IMX8MQ_CLK_M4_SRC] = imx_clk_hw_mux2("arm_m4_src", base + 0x8080, 24, 3, imx8mq_arm_m4_sels, ARRAY_SIZE(imx8mq_arm_m4_sels));
+ hws[IMX8MQ_CLK_VPU_SRC] = imx_clk_hw_mux2("vpu_src", base + 0x8100, 24, 3, imx8mq_vpu_sels, ARRAY_SIZE(imx8mq_vpu_sels));
+ hws[IMX8MQ_CLK_GPU_CORE_SRC] = imx_clk_hw_mux2("gpu_core_src", base + 0x8180, 24, 3, imx8mq_gpu_core_sels, ARRAY_SIZE(imx8mq_gpu_core_sels));
+ hws[IMX8MQ_CLK_GPU_SHADER_SRC] = imx_clk_hw_mux2("gpu_shader_src", base + 0x8200, 24, 3, imx8mq_gpu_shader_sels, ARRAY_SIZE(imx8mq_gpu_shader_sels));
+
+ hws[IMX8MQ_CLK_A53_CG] = imx_clk_hw_gate3_flags("arm_a53_cg", "arm_a53_src", base + 0x8000, 28, CLK_IS_CRITICAL);
+ hws[IMX8MQ_CLK_M4_CG] = imx_clk_hw_gate3("arm_m4_cg", "arm_m4_src", base + 0x8080, 28);
+ hws[IMX8MQ_CLK_VPU_CG] = imx_clk_hw_gate3("vpu_cg", "vpu_src", base + 0x8100, 28);
+ hws[IMX8MQ_CLK_GPU_CORE_CG] = imx_clk_hw_gate3("gpu_core_cg", "gpu_core_src", base + 0x8180, 28);
+ hws[IMX8MQ_CLK_GPU_SHADER_CG] = imx_clk_hw_gate3("gpu_shader_cg", "gpu_shader_src", base + 0x8200, 28);
+
+ hws[IMX8MQ_CLK_A53_DIV] = imx_clk_hw_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
+ hws[IMX8MQ_CLK_M4_DIV] = imx_clk_hw_divider2("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3);
+ hws[IMX8MQ_CLK_VPU_DIV] = imx_clk_hw_divider2("vpu_div", "vpu_cg", base + 0x8100, 0, 3);
+ hws[IMX8MQ_CLK_GPU_CORE_DIV] = imx_clk_hw_divider2("gpu_core_div", "gpu_core_cg", base + 0x8180, 0, 3);
+ hws[IMX8MQ_CLK_GPU_SHADER_DIV] = imx_clk_hw_divider2("gpu_shader_div", "gpu_shader_cg", base + 0x8200, 0, 3);
/* BUS */
- clks[IMX8MQ_CLK_MAIN_AXI] = imx8m_clk_composite_critical("main_axi", imx8mq_main_axi_sels, base + 0x8800);
- clks[IMX8MQ_CLK_ENET_AXI] = imx8m_clk_composite("enet_axi", imx8mq_enet_axi_sels, base + 0x8880);
- clks[IMX8MQ_CLK_NAND_USDHC_BUS] = imx8m_clk_composite("nand_usdhc_bus", imx8mq_nand_usdhc_sels, base + 0x8900);
- clks[IMX8MQ_CLK_VPU_BUS] = imx8m_clk_composite("vpu_bus", imx8mq_vpu_bus_sels, base + 0x8980);
- clks[IMX8MQ_CLK_DISP_AXI] = imx8m_clk_composite("disp_axi", imx8mq_disp_axi_sels, base + 0x8a00);
- clks[IMX8MQ_CLK_DISP_APB] = imx8m_clk_composite("disp_apb", imx8mq_disp_apb_sels, base + 0x8a80);
- clks[IMX8MQ_CLK_DISP_RTRM] = imx8m_clk_composite("disp_rtrm", imx8mq_disp_rtrm_sels, base + 0x8b00);
- clks[IMX8MQ_CLK_USB_BUS] = imx8m_clk_composite("usb_bus", imx8mq_usb_bus_sels, base + 0x8b80);
- clks[IMX8MQ_CLK_GPU_AXI] = imx8m_clk_composite("gpu_axi", imx8mq_gpu_axi_sels, base + 0x8c00);
- clks[IMX8MQ_CLK_GPU_AHB] = imx8m_clk_composite("gpu_ahb", imx8mq_gpu_ahb_sels, base + 0x8c80);
- clks[IMX8MQ_CLK_NOC] = imx8m_clk_composite_critical("noc", imx8mq_noc_sels, base + 0x8d00);
- clks[IMX8MQ_CLK_NOC_APB] = imx8m_clk_composite_critical("noc_apb", imx8mq_noc_apb_sels, base + 0x8d80);
+ hws[IMX8MQ_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mq_main_axi_sels, base + 0x8800);
+ hws[IMX8MQ_CLK_ENET_AXI] = imx8m_clk_hw_composite("enet_axi", imx8mq_enet_axi_sels, base + 0x8880);
+ hws[IMX8MQ_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite("nand_usdhc_bus", imx8mq_nand_usdhc_sels, base + 0x8900);
+ hws[IMX8MQ_CLK_VPU_BUS] = imx8m_clk_hw_composite("vpu_bus", imx8mq_vpu_bus_sels, base + 0x8980);
+ hws[IMX8MQ_CLK_DISP_AXI] = imx8m_clk_hw_composite("disp_axi", imx8mq_disp_axi_sels, base + 0x8a00);
+ hws[IMX8MQ_CLK_DISP_APB] = imx8m_clk_hw_composite("disp_apb", imx8mq_disp_apb_sels, base + 0x8a80);
+ hws[IMX8MQ_CLK_DISP_RTRM] = imx8m_clk_hw_composite("disp_rtrm", imx8mq_disp_rtrm_sels, base + 0x8b00);
+ hws[IMX8MQ_CLK_USB_BUS] = imx8m_clk_hw_composite("usb_bus", imx8mq_usb_bus_sels, base + 0x8b80);
+ hws[IMX8MQ_CLK_GPU_AXI] = imx8m_clk_hw_composite("gpu_axi", imx8mq_gpu_axi_sels, base + 0x8c00);
+ hws[IMX8MQ_CLK_GPU_AHB] = imx8m_clk_hw_composite("gpu_ahb", imx8mq_gpu_ahb_sels, base + 0x8c80);
+ hws[IMX8MQ_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mq_noc_sels, base + 0x8d00);
+ hws[IMX8MQ_CLK_NOC_APB] = imx8m_clk_hw_composite_critical("noc_apb", imx8mq_noc_apb_sels, base + 0x8d80);
/* AHB */
/* AHB clock is used by the AHB bus therefore marked as critical */
- clks[IMX8MQ_CLK_AHB] = imx8m_clk_composite_critical("ahb", imx8mq_ahb_sels, base + 0x9000);
- clks[IMX8MQ_CLK_AUDIO_AHB] = imx8m_clk_composite("audio_ahb", imx8mq_audio_ahb_sels, base + 0x9100);
+ hws[IMX8MQ_CLK_AHB] = imx8m_clk_hw_composite_critical("ahb", imx8mq_ahb_sels, base + 0x9000);
+ hws[IMX8MQ_CLK_AUDIO_AHB] = imx8m_clk_hw_composite("audio_ahb", imx8mq_audio_ahb_sels, base + 0x9100);
/* IPG */
- clks[IMX8MQ_CLK_IPG_ROOT] = imx_clk_divider2("ipg_root", "ahb", base + 0x9080, 0, 1);
- clks[IMX8MQ_CLK_IPG_AUDIO_ROOT] = imx_clk_divider2("ipg_audio_root", "audio_ahb", base + 0x9180, 0, 1);
+ hws[IMX8MQ_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb", base + 0x9080, 0, 1);
+ hws[IMX8MQ_CLK_IPG_AUDIO_ROOT] = imx_clk_hw_divider2("ipg_audio_root", "audio_ahb", base + 0x9180, 0, 1);
+
+ /*
+ * DRAM clocks are manipulated from TF-A outside clock framework.
+ * Mark with GET_RATE_NOCACHE to always read div value from hardware
+ */
+ hws[IMX8MQ_CLK_DRAM_CORE] = imx_clk_hw_mux2_flags("dram_core_clk", base + 0x9800, 24, 1, imx8mq_dram_core_sels, ARRAY_SIZE(imx8mq_dram_core_sels), CLK_IS_CRITICAL);
+ hws[IMX8MQ_CLK_DRAM_ALT] = __imx8m_clk_hw_composite("dram_alt", imx8mq_dram_alt_sels, base + 0xa000, CLK_GET_RATE_NOCACHE);
+ hws[IMX8MQ_CLK_DRAM_APB] = __imx8m_clk_hw_composite("dram_apb", imx8mq_dram_apb_sels, base + 0xa080, CLK_IS_CRITICAL | CLK_GET_RATE_NOCACHE);
/* IP */
- clks[IMX8MQ_CLK_DRAM_CORE] = imx_clk_mux2_flags("dram_core_clk", base + 0x9800, 24, 1, imx8mq_dram_core_sels, ARRAY_SIZE(imx8mq_dram_core_sels), CLK_IS_CRITICAL);
-
- clks[IMX8MQ_CLK_DRAM_ALT] = imx8m_clk_composite("dram_alt", imx8mq_dram_alt_sels, base + 0xa000);
- clks[IMX8MQ_CLK_DRAM_APB] = imx8m_clk_composite_critical("dram_apb", imx8mq_dram_apb_sels, base + 0xa080);
- clks[IMX8MQ_CLK_VPU_G1] = imx8m_clk_composite("vpu_g1", imx8mq_vpu_g1_sels, base + 0xa100);
- clks[IMX8MQ_CLK_VPU_G2] = imx8m_clk_composite("vpu_g2", imx8mq_vpu_g2_sels, base + 0xa180);
- clks[IMX8MQ_CLK_DISP_DTRC] = imx8m_clk_composite("disp_dtrc", imx8mq_disp_dtrc_sels, base + 0xa200);
- clks[IMX8MQ_CLK_DISP_DC8000] = imx8m_clk_composite("disp_dc8000", imx8mq_disp_dc8000_sels, base + 0xa280);
- clks[IMX8MQ_CLK_PCIE1_CTRL] = imx8m_clk_composite("pcie1_ctrl", imx8mq_pcie1_ctrl_sels, base + 0xa300);
- clks[IMX8MQ_CLK_PCIE1_PHY] = imx8m_clk_composite("pcie1_phy", imx8mq_pcie1_phy_sels, base + 0xa380);
- clks[IMX8MQ_CLK_PCIE1_AUX] = imx8m_clk_composite("pcie1_aux", imx8mq_pcie1_aux_sels, base + 0xa400);
- clks[IMX8MQ_CLK_DC_PIXEL] = imx8m_clk_composite("dc_pixel", imx8mq_dc_pixel_sels, base + 0xa480);
- clks[IMX8MQ_CLK_LCDIF_PIXEL] = imx8m_clk_composite("lcdif_pixel", imx8mq_lcdif_pixel_sels, base + 0xa500);
- clks[IMX8MQ_CLK_SAI1] = imx8m_clk_composite("sai1", imx8mq_sai1_sels, base + 0xa580);
- clks[IMX8MQ_CLK_SAI2] = imx8m_clk_composite("sai2", imx8mq_sai2_sels, base + 0xa600);
- clks[IMX8MQ_CLK_SAI3] = imx8m_clk_composite("sai3", imx8mq_sai3_sels, base + 0xa680);
- clks[IMX8MQ_CLK_SAI4] = imx8m_clk_composite("sai4", imx8mq_sai4_sels, base + 0xa700);
- clks[IMX8MQ_CLK_SAI5] = imx8m_clk_composite("sai5", imx8mq_sai5_sels, base + 0xa780);
- clks[IMX8MQ_CLK_SAI6] = imx8m_clk_composite("sai6", imx8mq_sai6_sels, base + 0xa800);
- clks[IMX8MQ_CLK_SPDIF1] = imx8m_clk_composite("spdif1", imx8mq_spdif1_sels, base + 0xa880);
- clks[IMX8MQ_CLK_SPDIF2] = imx8m_clk_composite("spdif2", imx8mq_spdif2_sels, base + 0xa900);
- clks[IMX8MQ_CLK_ENET_REF] = imx8m_clk_composite("enet_ref", imx8mq_enet_ref_sels, base + 0xa980);
- clks[IMX8MQ_CLK_ENET_TIMER] = imx8m_clk_composite("enet_timer", imx8mq_enet_timer_sels, base + 0xaa00);
- clks[IMX8MQ_CLK_ENET_PHY_REF] = imx8m_clk_composite("enet_phy", imx8mq_enet_phy_sels, base + 0xaa80);
- clks[IMX8MQ_CLK_NAND] = imx8m_clk_composite("nand", imx8mq_nand_sels, base + 0xab00);
- clks[IMX8MQ_CLK_QSPI] = imx8m_clk_composite("qspi", imx8mq_qspi_sels, base + 0xab80);
- clks[IMX8MQ_CLK_USDHC1] = imx8m_clk_composite("usdhc1", imx8mq_usdhc1_sels, base + 0xac00);
- clks[IMX8MQ_CLK_USDHC2] = imx8m_clk_composite("usdhc2", imx8mq_usdhc2_sels, base + 0xac80);
- clks[IMX8MQ_CLK_I2C1] = imx8m_clk_composite("i2c1", imx8mq_i2c1_sels, base + 0xad00);
- clks[IMX8MQ_CLK_I2C2] = imx8m_clk_composite("i2c2", imx8mq_i2c2_sels, base + 0xad80);
- clks[IMX8MQ_CLK_I2C3] = imx8m_clk_composite("i2c3", imx8mq_i2c3_sels, base + 0xae00);
- clks[IMX8MQ_CLK_I2C4] = imx8m_clk_composite("i2c4", imx8mq_i2c4_sels, base + 0xae80);
- clks[IMX8MQ_CLK_UART1] = imx8m_clk_composite("uart1", imx8mq_uart1_sels, base + 0xaf00);
- clks[IMX8MQ_CLK_UART2] = imx8m_clk_composite("uart2", imx8mq_uart2_sels, base + 0xaf80);
- clks[IMX8MQ_CLK_UART3] = imx8m_clk_composite("uart3", imx8mq_uart3_sels, base + 0xb000);
- clks[IMX8MQ_CLK_UART4] = imx8m_clk_composite("uart4", imx8mq_uart4_sels, base + 0xb080);
- clks[IMX8MQ_CLK_USB_CORE_REF] = imx8m_clk_composite("usb_core_ref", imx8mq_usb_core_sels, base + 0xb100);
- clks[IMX8MQ_CLK_USB_PHY_REF] = imx8m_clk_composite("usb_phy_ref", imx8mq_usb_phy_sels, base + 0xb180);
- clks[IMX8MQ_CLK_GIC] = imx8m_clk_composite_critical("gic", imx8mq_gic_sels, base + 0xb200);
- clks[IMX8MQ_CLK_ECSPI1] = imx8m_clk_composite("ecspi1", imx8mq_ecspi1_sels, base + 0xb280);
- clks[IMX8MQ_CLK_ECSPI2] = imx8m_clk_composite("ecspi2", imx8mq_ecspi2_sels, base + 0xb300);
- clks[IMX8MQ_CLK_PWM1] = imx8m_clk_composite("pwm1", imx8mq_pwm1_sels, base + 0xb380);
- clks[IMX8MQ_CLK_PWM2] = imx8m_clk_composite("pwm2", imx8mq_pwm2_sels, base + 0xb400);
- clks[IMX8MQ_CLK_PWM3] = imx8m_clk_composite("pwm3", imx8mq_pwm3_sels, base + 0xb480);
- clks[IMX8MQ_CLK_PWM4] = imx8m_clk_composite("pwm4", imx8mq_pwm4_sels, base + 0xb500);
- clks[IMX8MQ_CLK_GPT1] = imx8m_clk_composite("gpt1", imx8mq_gpt1_sels, base + 0xb580);
- clks[IMX8MQ_CLK_WDOG] = imx8m_clk_composite("wdog", imx8mq_wdog_sels, base + 0xb900);
- clks[IMX8MQ_CLK_WRCLK] = imx8m_clk_composite("wrclk", imx8mq_wrclk_sels, base + 0xb980);
- clks[IMX8MQ_CLK_CLKO1] = imx8m_clk_composite("clko1", imx8mq_clko1_sels, base + 0xba00);
- clks[IMX8MQ_CLK_CLKO2] = imx8m_clk_composite("clko2", imx8mq_clko2_sels, base + 0xba80);
- clks[IMX8MQ_CLK_DSI_CORE] = imx8m_clk_composite("dsi_core", imx8mq_dsi_core_sels, base + 0xbb00);
- clks[IMX8MQ_CLK_DSI_PHY_REF] = imx8m_clk_composite("dsi_phy_ref", imx8mq_dsi_phy_sels, base + 0xbb80);
- clks[IMX8MQ_CLK_DSI_DBI] = imx8m_clk_composite("dsi_dbi", imx8mq_dsi_dbi_sels, base + 0xbc00);
- clks[IMX8MQ_CLK_DSI_ESC] = imx8m_clk_composite("dsi_esc", imx8mq_dsi_esc_sels, base + 0xbc80);
- clks[IMX8MQ_CLK_DSI_AHB] = imx8m_clk_composite("dsi_ahb", imx8mq_dsi_ahb_sels, base + 0x9200);
- clks[IMX8MQ_CLK_DSI_IPG_DIV] = imx_clk_divider2("dsi_ipg_div", "dsi_ahb", base + 0x9280, 0, 6);
- clks[IMX8MQ_CLK_CSI1_CORE] = imx8m_clk_composite("csi1_core", imx8mq_csi1_core_sels, base + 0xbd00);
- clks[IMX8MQ_CLK_CSI1_PHY_REF] = imx8m_clk_composite("csi1_phy_ref", imx8mq_csi1_phy_sels, base + 0xbd80);
- clks[IMX8MQ_CLK_CSI1_ESC] = imx8m_clk_composite("csi1_esc", imx8mq_csi1_esc_sels, base + 0xbe00);
- clks[IMX8MQ_CLK_CSI2_CORE] = imx8m_clk_composite("csi2_core", imx8mq_csi2_core_sels, base + 0xbe80);
- clks[IMX8MQ_CLK_CSI2_PHY_REF] = imx8m_clk_composite("csi2_phy_ref", imx8mq_csi2_phy_sels, base + 0xbf00);
- clks[IMX8MQ_CLK_CSI2_ESC] = imx8m_clk_composite("csi2_esc", imx8mq_csi2_esc_sels, base + 0xbf80);
- clks[IMX8MQ_CLK_PCIE2_CTRL] = imx8m_clk_composite("pcie2_ctrl", imx8mq_pcie2_ctrl_sels, base + 0xc000);
- clks[IMX8MQ_CLK_PCIE2_PHY] = imx8m_clk_composite("pcie2_phy", imx8mq_pcie2_phy_sels, base + 0xc080);
- clks[IMX8MQ_CLK_PCIE2_AUX] = imx8m_clk_composite("pcie2_aux", imx8mq_pcie2_aux_sels, base + 0xc100);
- clks[IMX8MQ_CLK_ECSPI3] = imx8m_clk_composite("ecspi3", imx8mq_ecspi3_sels, base + 0xc180);
-
- clks[IMX8MQ_CLK_ECSPI1_ROOT] = imx_clk_gate4("ecspi1_root_clk", "ecspi1", base + 0x4070, 0);
- clks[IMX8MQ_CLK_ECSPI2_ROOT] = imx_clk_gate4("ecspi2_root_clk", "ecspi2", base + 0x4080, 0);
- clks[IMX8MQ_CLK_ECSPI3_ROOT] = imx_clk_gate4("ecspi3_root_clk", "ecspi3", base + 0x4090, 0);
- clks[IMX8MQ_CLK_ENET1_ROOT] = imx_clk_gate4("enet1_root_clk", "enet_axi", base + 0x40a0, 0);
- clks[IMX8MQ_CLK_GPIO1_ROOT] = imx_clk_gate4("gpio1_root_clk", "ipg_root", base + 0x40b0, 0);
- clks[IMX8MQ_CLK_GPIO2_ROOT] = imx_clk_gate4("gpio2_root_clk", "ipg_root", base + 0x40c0, 0);
- clks[IMX8MQ_CLK_GPIO3_ROOT] = imx_clk_gate4("gpio3_root_clk", "ipg_root", base + 0x40d0, 0);
- clks[IMX8MQ_CLK_GPIO4_ROOT] = imx_clk_gate4("gpio4_root_clk", "ipg_root", base + 0x40e0, 0);
- clks[IMX8MQ_CLK_GPIO5_ROOT] = imx_clk_gate4("gpio5_root_clk", "ipg_root", base + 0x40f0, 0);
- clks[IMX8MQ_CLK_GPT1_ROOT] = imx_clk_gate4("gpt1_root_clk", "gpt1", base + 0x4100, 0);
- clks[IMX8MQ_CLK_I2C1_ROOT] = imx_clk_gate4("i2c1_root_clk", "i2c1", base + 0x4170, 0);
- clks[IMX8MQ_CLK_I2C2_ROOT] = imx_clk_gate4("i2c2_root_clk", "i2c2", base + 0x4180, 0);
- clks[IMX8MQ_CLK_I2C3_ROOT] = imx_clk_gate4("i2c3_root_clk", "i2c3", base + 0x4190, 0);
- clks[IMX8MQ_CLK_I2C4_ROOT] = imx_clk_gate4("i2c4_root_clk", "i2c4", base + 0x41a0, 0);
- clks[IMX8MQ_CLK_MU_ROOT] = imx_clk_gate4("mu_root_clk", "ipg_root", base + 0x4210, 0);
- clks[IMX8MQ_CLK_OCOTP_ROOT] = imx_clk_gate4("ocotp_root_clk", "ipg_root", base + 0x4220, 0);
- clks[IMX8MQ_CLK_PCIE1_ROOT] = imx_clk_gate4("pcie1_root_clk", "pcie1_ctrl", base + 0x4250, 0);
- clks[IMX8MQ_CLK_PCIE2_ROOT] = imx_clk_gate4("pcie2_root_clk", "pcie2_ctrl", base + 0x4640, 0);
- clks[IMX8MQ_CLK_PWM1_ROOT] = imx_clk_gate4("pwm1_root_clk", "pwm1", base + 0x4280, 0);
- clks[IMX8MQ_CLK_PWM2_ROOT] = imx_clk_gate4("pwm2_root_clk", "pwm2", base + 0x4290, 0);
- clks[IMX8MQ_CLK_PWM3_ROOT] = imx_clk_gate4("pwm3_root_clk", "pwm3", base + 0x42a0, 0);
- clks[IMX8MQ_CLK_PWM4_ROOT] = imx_clk_gate4("pwm4_root_clk", "pwm4", base + 0x42b0, 0);
- clks[IMX8MQ_CLK_QSPI_ROOT] = imx_clk_gate4("qspi_root_clk", "qspi", base + 0x42f0, 0);
- clks[IMX8MQ_CLK_RAWNAND_ROOT] = imx_clk_gate2_shared2("nand_root_clk", "nand", base + 0x4300, 0, &share_count_nand);
- clks[IMX8MQ_CLK_NAND_USDHC_BUS_RAWNAND_CLK] = imx_clk_gate2_shared2("nand_usdhc_rawnand_clk", "nand_usdhc_bus", base + 0x4300, 0, &share_count_nand);
- clks[IMX8MQ_CLK_SAI1_ROOT] = imx_clk_gate2_shared2("sai1_root_clk", "sai1", base + 0x4330, 0, &share_count_sai1);
- clks[IMX8MQ_CLK_SAI1_IPG] = imx_clk_gate2_shared2("sai1_ipg_clk", "ipg_audio_root", base + 0x4330, 0, &share_count_sai1);
- clks[IMX8MQ_CLK_SAI2_ROOT] = imx_clk_gate2_shared2("sai2_root_clk", "sai2", base + 0x4340, 0, &share_count_sai2);
- clks[IMX8MQ_CLK_SAI2_IPG] = imx_clk_gate2_shared2("sai2_ipg_clk", "ipg_root", base + 0x4340, 0, &share_count_sai2);
- clks[IMX8MQ_CLK_SAI3_ROOT] = imx_clk_gate2_shared2("sai3_root_clk", "sai3", base + 0x4350, 0, &share_count_sai3);
- clks[IMX8MQ_CLK_SAI3_IPG] = imx_clk_gate2_shared2("sai3_ipg_clk", "ipg_root", base + 0x4350, 0, &share_count_sai3);
- clks[IMX8MQ_CLK_SAI4_ROOT] = imx_clk_gate2_shared2("sai4_root_clk", "sai4", base + 0x4360, 0, &share_count_sai4);
- clks[IMX8MQ_CLK_SAI4_IPG] = imx_clk_gate2_shared2("sai4_ipg_clk", "ipg_audio_root", base + 0x4360, 0, &share_count_sai4);
- clks[IMX8MQ_CLK_SAI5_ROOT] = imx_clk_gate2_shared2("sai5_root_clk", "sai5", base + 0x4370, 0, &share_count_sai5);
- clks[IMX8MQ_CLK_SAI5_IPG] = imx_clk_gate2_shared2("sai5_ipg_clk", "ipg_audio_root", base + 0x4370, 0, &share_count_sai5);
- clks[IMX8MQ_CLK_SAI6_ROOT] = imx_clk_gate2_shared2("sai6_root_clk", "sai6", base + 0x4380, 0, &share_count_sai6);
- clks[IMX8MQ_CLK_SAI6_IPG] = imx_clk_gate2_shared2("sai6_ipg_clk", "ipg_audio_root", base + 0x4380, 0, &share_count_sai6);
- clks[IMX8MQ_CLK_SNVS_ROOT] = imx_clk_gate4("snvs_root_clk", "ipg_root", base + 0x4470, 0);
- clks[IMX8MQ_CLK_UART1_ROOT] = imx_clk_gate4("uart1_root_clk", "uart1", base + 0x4490, 0);
- clks[IMX8MQ_CLK_UART2_ROOT] = imx_clk_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0);
- clks[IMX8MQ_CLK_UART3_ROOT] = imx_clk_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0);
- clks[IMX8MQ_CLK_UART4_ROOT] = imx_clk_gate4("uart4_root_clk", "uart4", base + 0x44c0, 0);
- clks[IMX8MQ_CLK_USB1_CTRL_ROOT] = imx_clk_gate4("usb1_ctrl_root_clk", "usb_bus", base + 0x44d0, 0);
- clks[IMX8MQ_CLK_USB2_CTRL_ROOT] = imx_clk_gate4("usb2_ctrl_root_clk", "usb_bus", base + 0x44e0, 0);
- clks[IMX8MQ_CLK_USB1_PHY_ROOT] = imx_clk_gate4("usb1_phy_root_clk", "usb_phy_ref", base + 0x44f0, 0);
- clks[IMX8MQ_CLK_USB2_PHY_ROOT] = imx_clk_gate4("usb2_phy_root_clk", "usb_phy_ref", base + 0x4500, 0);
- clks[IMX8MQ_CLK_USDHC1_ROOT] = imx_clk_gate4("usdhc1_root_clk", "usdhc1", base + 0x4510, 0);
- clks[IMX8MQ_CLK_USDHC2_ROOT] = imx_clk_gate4("usdhc2_root_clk", "usdhc2", base + 0x4520, 0);
- clks[IMX8MQ_CLK_WDOG1_ROOT] = imx_clk_gate4("wdog1_root_clk", "wdog", base + 0x4530, 0);
- clks[IMX8MQ_CLK_WDOG2_ROOT] = imx_clk_gate4("wdog2_root_clk", "wdog", base + 0x4540, 0);
- clks[IMX8MQ_CLK_WDOG3_ROOT] = imx_clk_gate4("wdog3_root_clk", "wdog", base + 0x4550, 0);
- clks[IMX8MQ_CLK_VPU_G1_ROOT] = imx_clk_gate2_flags("vpu_g1_root_clk", "vpu_g1", base + 0x4560, 0, CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
- clks[IMX8MQ_CLK_GPU_ROOT] = imx_clk_gate4("gpu_root_clk", "gpu_core_div", base + 0x4570, 0);
- clks[IMX8MQ_CLK_VPU_G2_ROOT] = imx_clk_gate2_flags("vpu_g2_root_clk", "vpu_g2", base + 0x45a0, 0, CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
- clks[IMX8MQ_CLK_DISP_ROOT] = imx_clk_gate2_shared2("disp_root_clk", "disp_dc8000", base + 0x45d0, 0, &share_count_dcss);
- clks[IMX8MQ_CLK_DISP_AXI_ROOT] = imx_clk_gate2_shared2("disp_axi_root_clk", "disp_axi", base + 0x45d0, 0, &share_count_dcss);
- clks[IMX8MQ_CLK_DISP_APB_ROOT] = imx_clk_gate2_shared2("disp_apb_root_clk", "disp_apb", base + 0x45d0, 0, &share_count_dcss);
- clks[IMX8MQ_CLK_DISP_RTRM_ROOT] = imx_clk_gate2_shared2("disp_rtrm_root_clk", "disp_rtrm", base + 0x45d0, 0, &share_count_dcss);
- clks[IMX8MQ_CLK_TMU_ROOT] = imx_clk_gate4("tmu_root_clk", "ipg_root", base + 0x4620, 0);
- clks[IMX8MQ_CLK_VPU_DEC_ROOT] = imx_clk_gate2_flags("vpu_dec_root_clk", "vpu_bus", base + 0x4630, 0, CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
- clks[IMX8MQ_CLK_CSI1_ROOT] = imx_clk_gate4("csi1_root_clk", "csi1_core", base + 0x4650, 0);
- clks[IMX8MQ_CLK_CSI2_ROOT] = imx_clk_gate4("csi2_root_clk", "csi2_core", base + 0x4660, 0);
- clks[IMX8MQ_CLK_SDMA1_ROOT] = imx_clk_gate4("sdma1_clk", "ipg_root", base + 0x43a0, 0);
- clks[IMX8MQ_CLK_SDMA2_ROOT] = imx_clk_gate4("sdma2_clk", "ipg_audio_root", base + 0x43b0, 0);
-
- clks[IMX8MQ_GPT_3M_CLK] = imx_clk_fixed_factor("gpt_3m", "osc_25m", 1, 8);
- clks[IMX8MQ_CLK_DRAM_ALT_ROOT] = imx_clk_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
-
- clks[IMX8MQ_CLK_ARM] = imx_clk_cpu("arm", "arm_a53_div",
- clks[IMX8MQ_CLK_A53_DIV],
- clks[IMX8MQ_CLK_A53_SRC],
- clks[IMX8MQ_ARM_PLL_OUT],
- clks[IMX8MQ_SYS1_PLL_800M]);
-
- imx_check_clocks(clks, ARRAY_SIZE(clks));
-
- clk_data.clks = clks;
- clk_data.clk_num = ARRAY_SIZE(clks);
-
- err = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+ hws[IMX8MQ_CLK_VPU_G1] = imx8m_clk_hw_composite("vpu_g1", imx8mq_vpu_g1_sels, base + 0xa100);
+ hws[IMX8MQ_CLK_VPU_G2] = imx8m_clk_hw_composite("vpu_g2", imx8mq_vpu_g2_sels, base + 0xa180);
+ hws[IMX8MQ_CLK_DISP_DTRC] = imx8m_clk_hw_composite("disp_dtrc", imx8mq_disp_dtrc_sels, base + 0xa200);
+ hws[IMX8MQ_CLK_DISP_DC8000] = imx8m_clk_hw_composite("disp_dc8000", imx8mq_disp_dc8000_sels, base + 0xa280);
+ hws[IMX8MQ_CLK_PCIE1_CTRL] = imx8m_clk_hw_composite("pcie1_ctrl", imx8mq_pcie1_ctrl_sels, base + 0xa300);
+ hws[IMX8MQ_CLK_PCIE1_PHY] = imx8m_clk_hw_composite("pcie1_phy", imx8mq_pcie1_phy_sels, base + 0xa380);
+ hws[IMX8MQ_CLK_PCIE1_AUX] = imx8m_clk_hw_composite("pcie1_aux", imx8mq_pcie1_aux_sels, base + 0xa400);
+ hws[IMX8MQ_CLK_DC_PIXEL] = imx8m_clk_hw_composite("dc_pixel", imx8mq_dc_pixel_sels, base + 0xa480);
+ hws[IMX8MQ_CLK_LCDIF_PIXEL] = imx8m_clk_hw_composite("lcdif_pixel", imx8mq_lcdif_pixel_sels, base + 0xa500);
+ hws[IMX8MQ_CLK_SAI1] = imx8m_clk_hw_composite("sai1", imx8mq_sai1_sels, base + 0xa580);
+ hws[IMX8MQ_CLK_SAI2] = imx8m_clk_hw_composite("sai2", imx8mq_sai2_sels, base + 0xa600);
+ hws[IMX8MQ_CLK_SAI3] = imx8m_clk_hw_composite("sai3", imx8mq_sai3_sels, base + 0xa680);
+ hws[IMX8MQ_CLK_SAI4] = imx8m_clk_hw_composite("sai4", imx8mq_sai4_sels, base + 0xa700);
+ hws[IMX8MQ_CLK_SAI5] = imx8m_clk_hw_composite("sai5", imx8mq_sai5_sels, base + 0xa780);
+ hws[IMX8MQ_CLK_SAI6] = imx8m_clk_hw_composite("sai6", imx8mq_sai6_sels, base + 0xa800);
+ hws[IMX8MQ_CLK_SPDIF1] = imx8m_clk_hw_composite("spdif1", imx8mq_spdif1_sels, base + 0xa880);
+ hws[IMX8MQ_CLK_SPDIF2] = imx8m_clk_hw_composite("spdif2", imx8mq_spdif2_sels, base + 0xa900);
+ hws[IMX8MQ_CLK_ENET_REF] = imx8m_clk_hw_composite("enet_ref", imx8mq_enet_ref_sels, base + 0xa980);
+ hws[IMX8MQ_CLK_ENET_TIMER] = imx8m_clk_hw_composite("enet_timer", imx8mq_enet_timer_sels, base + 0xaa00);
+ hws[IMX8MQ_CLK_ENET_PHY_REF] = imx8m_clk_hw_composite("enet_phy", imx8mq_enet_phy_sels, base + 0xaa80);
+ hws[IMX8MQ_CLK_NAND] = imx8m_clk_hw_composite("nand", imx8mq_nand_sels, base + 0xab00);
+ hws[IMX8MQ_CLK_QSPI] = imx8m_clk_hw_composite("qspi", imx8mq_qspi_sels, base + 0xab80);
+ hws[IMX8MQ_CLK_USDHC1] = imx8m_clk_hw_composite("usdhc1", imx8mq_usdhc1_sels, base + 0xac00);
+ hws[IMX8MQ_CLK_USDHC2] = imx8m_clk_hw_composite("usdhc2", imx8mq_usdhc2_sels, base + 0xac80);
+ hws[IMX8MQ_CLK_I2C1] = imx8m_clk_hw_composite("i2c1", imx8mq_i2c1_sels, base + 0xad00);
+ hws[IMX8MQ_CLK_I2C2] = imx8m_clk_hw_composite("i2c2", imx8mq_i2c2_sels, base + 0xad80);
+ hws[IMX8MQ_CLK_I2C3] = imx8m_clk_hw_composite("i2c3", imx8mq_i2c3_sels, base + 0xae00);
+ hws[IMX8MQ_CLK_I2C4] = imx8m_clk_hw_composite("i2c4", imx8mq_i2c4_sels, base + 0xae80);
+ hws[IMX8MQ_CLK_UART1] = imx8m_clk_hw_composite("uart1", imx8mq_uart1_sels, base + 0xaf00);
+ hws[IMX8MQ_CLK_UART2] = imx8m_clk_hw_composite("uart2", imx8mq_uart2_sels, base + 0xaf80);
+ hws[IMX8MQ_CLK_UART3] = imx8m_clk_hw_composite("uart3", imx8mq_uart3_sels, base + 0xb000);
+ hws[IMX8MQ_CLK_UART4] = imx8m_clk_hw_composite("uart4", imx8mq_uart4_sels, base + 0xb080);
+ hws[IMX8MQ_CLK_USB_CORE_REF] = imx8m_clk_hw_composite("usb_core_ref", imx8mq_usb_core_sels, base + 0xb100);
+ hws[IMX8MQ_CLK_USB_PHY_REF] = imx8m_clk_hw_composite("usb_phy_ref", imx8mq_usb_phy_sels, base + 0xb180);
+ hws[IMX8MQ_CLK_GIC] = imx8m_clk_hw_composite_critical("gic", imx8mq_gic_sels, base + 0xb200);
+ hws[IMX8MQ_CLK_ECSPI1] = imx8m_clk_hw_composite("ecspi1", imx8mq_ecspi1_sels, base + 0xb280);
+ hws[IMX8MQ_CLK_ECSPI2] = imx8m_clk_hw_composite("ecspi2", imx8mq_ecspi2_sels, base + 0xb300);
+ hws[IMX8MQ_CLK_PWM1] = imx8m_clk_hw_composite("pwm1", imx8mq_pwm1_sels, base + 0xb380);
+ hws[IMX8MQ_CLK_PWM2] = imx8m_clk_hw_composite("pwm2", imx8mq_pwm2_sels, base + 0xb400);
+ hws[IMX8MQ_CLK_PWM3] = imx8m_clk_hw_composite("pwm3", imx8mq_pwm3_sels, base + 0xb480);
+ hws[IMX8MQ_CLK_PWM4] = imx8m_clk_hw_composite("pwm4", imx8mq_pwm4_sels, base + 0xb500);
+ hws[IMX8MQ_CLK_GPT1] = imx8m_clk_hw_composite("gpt1", imx8mq_gpt1_sels, base + 0xb580);
+ hws[IMX8MQ_CLK_WDOG] = imx8m_clk_hw_composite("wdog", imx8mq_wdog_sels, base + 0xb900);
+ hws[IMX8MQ_CLK_WRCLK] = imx8m_clk_hw_composite("wrclk", imx8mq_wrclk_sels, base + 0xb980);
+ hws[IMX8MQ_CLK_CLKO1] = imx8m_clk_hw_composite("clko1", imx8mq_clko1_sels, base + 0xba00);
+ hws[IMX8MQ_CLK_CLKO2] = imx8m_clk_hw_composite("clko2", imx8mq_clko2_sels, base + 0xba80);
+ hws[IMX8MQ_CLK_DSI_CORE] = imx8m_clk_hw_composite("dsi_core", imx8mq_dsi_core_sels, base + 0xbb00);
+ hws[IMX8MQ_CLK_DSI_PHY_REF] = imx8m_clk_hw_composite("dsi_phy_ref", imx8mq_dsi_phy_sels, base + 0xbb80);
+ hws[IMX8MQ_CLK_DSI_DBI] = imx8m_clk_hw_composite("dsi_dbi", imx8mq_dsi_dbi_sels, base + 0xbc00);
+ hws[IMX8MQ_CLK_DSI_ESC] = imx8m_clk_hw_composite("dsi_esc", imx8mq_dsi_esc_sels, base + 0xbc80);
+ hws[IMX8MQ_CLK_DSI_AHB] = imx8m_clk_hw_composite("dsi_ahb", imx8mq_dsi_ahb_sels, base + 0x9200);
+ hws[IMX8MQ_CLK_DSI_IPG_DIV] = imx_clk_hw_divider2("dsi_ipg_div", "dsi_ahb", base + 0x9280, 0, 6);
+ hws[IMX8MQ_CLK_CSI1_CORE] = imx8m_clk_hw_composite("csi1_core", imx8mq_csi1_core_sels, base + 0xbd00);
+ hws[IMX8MQ_CLK_CSI1_PHY_REF] = imx8m_clk_hw_composite("csi1_phy_ref", imx8mq_csi1_phy_sels, base + 0xbd80);
+ hws[IMX8MQ_CLK_CSI1_ESC] = imx8m_clk_hw_composite("csi1_esc", imx8mq_csi1_esc_sels, base + 0xbe00);
+ hws[IMX8MQ_CLK_CSI2_CORE] = imx8m_clk_hw_composite("csi2_core", imx8mq_csi2_core_sels, base + 0xbe80);
+ hws[IMX8MQ_CLK_CSI2_PHY_REF] = imx8m_clk_hw_composite("csi2_phy_ref", imx8mq_csi2_phy_sels, base + 0xbf00);
+ hws[IMX8MQ_CLK_CSI2_ESC] = imx8m_clk_hw_composite("csi2_esc", imx8mq_csi2_esc_sels, base + 0xbf80);
+ hws[IMX8MQ_CLK_PCIE2_CTRL] = imx8m_clk_hw_composite("pcie2_ctrl", imx8mq_pcie2_ctrl_sels, base + 0xc000);
+ hws[IMX8MQ_CLK_PCIE2_PHY] = imx8m_clk_hw_composite("pcie2_phy", imx8mq_pcie2_phy_sels, base + 0xc080);
+ hws[IMX8MQ_CLK_PCIE2_AUX] = imx8m_clk_hw_composite("pcie2_aux", imx8mq_pcie2_aux_sels, base + 0xc100);
+ hws[IMX8MQ_CLK_ECSPI3] = imx8m_clk_hw_composite("ecspi3", imx8mq_ecspi3_sels, base + 0xc180);
+
+ hws[IMX8MQ_CLK_ECSPI1_ROOT] = imx_clk_hw_gate4("ecspi1_root_clk", "ecspi1", base + 0x4070, 0);
+ hws[IMX8MQ_CLK_ECSPI2_ROOT] = imx_clk_hw_gate4("ecspi2_root_clk", "ecspi2", base + 0x4080, 0);
+ hws[IMX8MQ_CLK_ECSPI3_ROOT] = imx_clk_hw_gate4("ecspi3_root_clk", "ecspi3", base + 0x4090, 0);
+ hws[IMX8MQ_CLK_ENET1_ROOT] = imx_clk_hw_gate4("enet1_root_clk", "enet_axi", base + 0x40a0, 0);
+ hws[IMX8MQ_CLK_GPIO1_ROOT] = imx_clk_hw_gate4("gpio1_root_clk", "ipg_root", base + 0x40b0, 0);
+ hws[IMX8MQ_CLK_GPIO2_ROOT] = imx_clk_hw_gate4("gpio2_root_clk", "ipg_root", base + 0x40c0, 0);
+ hws[IMX8MQ_CLK_GPIO3_ROOT] = imx_clk_hw_gate4("gpio3_root_clk", "ipg_root", base + 0x40d0, 0);
+ hws[IMX8MQ_CLK_GPIO4_ROOT] = imx_clk_hw_gate4("gpio4_root_clk", "ipg_root", base + 0x40e0, 0);
+ hws[IMX8MQ_CLK_GPIO5_ROOT] = imx_clk_hw_gate4("gpio5_root_clk", "ipg_root", base + 0x40f0, 0);
+ hws[IMX8MQ_CLK_GPT1_ROOT] = imx_clk_hw_gate4("gpt1_root_clk", "gpt1", base + 0x4100, 0);
+ hws[IMX8MQ_CLK_I2C1_ROOT] = imx_clk_hw_gate4("i2c1_root_clk", "i2c1", base + 0x4170, 0);
+ hws[IMX8MQ_CLK_I2C2_ROOT] = imx_clk_hw_gate4("i2c2_root_clk", "i2c2", base + 0x4180, 0);
+ hws[IMX8MQ_CLK_I2C3_ROOT] = imx_clk_hw_gate4("i2c3_root_clk", "i2c3", base + 0x4190, 0);
+ hws[IMX8MQ_CLK_I2C4_ROOT] = imx_clk_hw_gate4("i2c4_root_clk", "i2c4", base + 0x41a0, 0);
+ hws[IMX8MQ_CLK_MU_ROOT] = imx_clk_hw_gate4("mu_root_clk", "ipg_root", base + 0x4210, 0);
+ hws[IMX8MQ_CLK_OCOTP_ROOT] = imx_clk_hw_gate4("ocotp_root_clk", "ipg_root", base + 0x4220, 0);
+ hws[IMX8MQ_CLK_PCIE1_ROOT] = imx_clk_hw_gate4("pcie1_root_clk", "pcie1_ctrl", base + 0x4250, 0);
+ hws[IMX8MQ_CLK_PCIE2_ROOT] = imx_clk_hw_gate4("pcie2_root_clk", "pcie2_ctrl", base + 0x4640, 0);
+ hws[IMX8MQ_CLK_PWM1_ROOT] = imx_clk_hw_gate4("pwm1_root_clk", "pwm1", base + 0x4280, 0);
+ hws[IMX8MQ_CLK_PWM2_ROOT] = imx_clk_hw_gate4("pwm2_root_clk", "pwm2", base + 0x4290, 0);
+ hws[IMX8MQ_CLK_PWM3_ROOT] = imx_clk_hw_gate4("pwm3_root_clk", "pwm3", base + 0x42a0, 0);
+ hws[IMX8MQ_CLK_PWM4_ROOT] = imx_clk_hw_gate4("pwm4_root_clk", "pwm4", base + 0x42b0, 0);
+ hws[IMX8MQ_CLK_QSPI_ROOT] = imx_clk_hw_gate4("qspi_root_clk", "qspi", base + 0x42f0, 0);
+ hws[IMX8MQ_CLK_RAWNAND_ROOT] = imx_clk_hw_gate2_shared2("nand_root_clk", "nand", base + 0x4300, 0, &share_count_nand);
+ hws[IMX8MQ_CLK_NAND_USDHC_BUS_RAWNAND_CLK] = imx_clk_hw_gate2_shared2("nand_usdhc_rawnand_clk", "nand_usdhc_bus", base + 0x4300, 0, &share_count_nand);
+ hws[IMX8MQ_CLK_SAI1_ROOT] = imx_clk_hw_gate2_shared2("sai1_root_clk", "sai1", base + 0x4330, 0, &share_count_sai1);
+ hws[IMX8MQ_CLK_SAI1_IPG] = imx_clk_hw_gate2_shared2("sai1_ipg_clk", "ipg_audio_root", base + 0x4330, 0, &share_count_sai1);
+ hws[IMX8MQ_CLK_SAI2_ROOT] = imx_clk_hw_gate2_shared2("sai2_root_clk", "sai2", base + 0x4340, 0, &share_count_sai2);
+ hws[IMX8MQ_CLK_SAI2_IPG] = imx_clk_hw_gate2_shared2("sai2_ipg_clk", "ipg_root", base + 0x4340, 0, &share_count_sai2);
+ hws[IMX8MQ_CLK_SAI3_ROOT] = imx_clk_hw_gate2_shared2("sai3_root_clk", "sai3", base + 0x4350, 0, &share_count_sai3);
+ hws[IMX8MQ_CLK_SAI3_IPG] = imx_clk_hw_gate2_shared2("sai3_ipg_clk", "ipg_root", base + 0x4350, 0, &share_count_sai3);
+ hws[IMX8MQ_CLK_SAI4_ROOT] = imx_clk_hw_gate2_shared2("sai4_root_clk", "sai4", base + 0x4360, 0, &share_count_sai4);
+ hws[IMX8MQ_CLK_SAI4_IPG] = imx_clk_hw_gate2_shared2("sai4_ipg_clk", "ipg_audio_root", base + 0x4360, 0, &share_count_sai4);
+ hws[IMX8MQ_CLK_SAI5_ROOT] = imx_clk_hw_gate2_shared2("sai5_root_clk", "sai5", base + 0x4370, 0, &share_count_sai5);
+ hws[IMX8MQ_CLK_SAI5_IPG] = imx_clk_hw_gate2_shared2("sai5_ipg_clk", "ipg_audio_root", base + 0x4370, 0, &share_count_sai5);
+ hws[IMX8MQ_CLK_SAI6_ROOT] = imx_clk_hw_gate2_shared2("sai6_root_clk", "sai6", base + 0x4380, 0, &share_count_sai6);
+ hws[IMX8MQ_CLK_SAI6_IPG] = imx_clk_hw_gate2_shared2("sai6_ipg_clk", "ipg_audio_root", base + 0x4380, 0, &share_count_sai6);
+ hws[IMX8MQ_CLK_SNVS_ROOT] = imx_clk_hw_gate4("snvs_root_clk", "ipg_root", base + 0x4470, 0);
+ hws[IMX8MQ_CLK_UART1_ROOT] = imx_clk_hw_gate4("uart1_root_clk", "uart1", base + 0x4490, 0);
+ hws[IMX8MQ_CLK_UART2_ROOT] = imx_clk_hw_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0);
+ hws[IMX8MQ_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0);
+ hws[IMX8MQ_CLK_UART4_ROOT] = imx_clk_hw_gate4("uart4_root_clk", "uart4", base + 0x44c0, 0);
+ hws[IMX8MQ_CLK_USB1_CTRL_ROOT] = imx_clk_hw_gate4("usb1_ctrl_root_clk", "usb_bus", base + 0x44d0, 0);
+ hws[IMX8MQ_CLK_USB2_CTRL_ROOT] = imx_clk_hw_gate4("usb2_ctrl_root_clk", "usb_bus", base + 0x44e0, 0);
+ hws[IMX8MQ_CLK_USB1_PHY_ROOT] = imx_clk_hw_gate4("usb1_phy_root_clk", "usb_phy_ref", base + 0x44f0, 0);
+ hws[IMX8MQ_CLK_USB2_PHY_ROOT] = imx_clk_hw_gate4("usb2_phy_root_clk", "usb_phy_ref", base + 0x4500, 0);
+ hws[IMX8MQ_CLK_USDHC1_ROOT] = imx_clk_hw_gate4("usdhc1_root_clk", "usdhc1", base + 0x4510, 0);
+ hws[IMX8MQ_CLK_USDHC2_ROOT] = imx_clk_hw_gate4("usdhc2_root_clk", "usdhc2", base + 0x4520, 0);
+ hws[IMX8MQ_CLK_WDOG1_ROOT] = imx_clk_hw_gate4("wdog1_root_clk", "wdog", base + 0x4530, 0);
+ hws[IMX8MQ_CLK_WDOG2_ROOT] = imx_clk_hw_gate4("wdog2_root_clk", "wdog", base + 0x4540, 0);
+ hws[IMX8MQ_CLK_WDOG3_ROOT] = imx_clk_hw_gate4("wdog3_root_clk", "wdog", base + 0x4550, 0);
+ hws[IMX8MQ_CLK_VPU_G1_ROOT] = imx_clk_hw_gate2_flags("vpu_g1_root_clk", "vpu_g1", base + 0x4560, 0, CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
+ hws[IMX8MQ_CLK_GPU_ROOT] = imx_clk_hw_gate4("gpu_root_clk", "gpu_core_div", base + 0x4570, 0);
+ hws[IMX8MQ_CLK_VPU_G2_ROOT] = imx_clk_hw_gate2_flags("vpu_g2_root_clk", "vpu_g2", base + 0x45a0, 0, CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
+ hws[IMX8MQ_CLK_DISP_ROOT] = imx_clk_hw_gate2_shared2("disp_root_clk", "disp_dc8000", base + 0x45d0, 0, &share_count_dcss);
+ hws[IMX8MQ_CLK_DISP_AXI_ROOT] = imx_clk_hw_gate2_shared2("disp_axi_root_clk", "disp_axi", base + 0x45d0, 0, &share_count_dcss);
+ hws[IMX8MQ_CLK_DISP_APB_ROOT] = imx_clk_hw_gate2_shared2("disp_apb_root_clk", "disp_apb", base + 0x45d0, 0, &share_count_dcss);
+ hws[IMX8MQ_CLK_DISP_RTRM_ROOT] = imx_clk_hw_gate2_shared2("disp_rtrm_root_clk", "disp_rtrm", base + 0x45d0, 0, &share_count_dcss);
+ hws[IMX8MQ_CLK_TMU_ROOT] = imx_clk_hw_gate4("tmu_root_clk", "ipg_root", base + 0x4620, 0);
+ hws[IMX8MQ_CLK_VPU_DEC_ROOT] = imx_clk_hw_gate2_flags("vpu_dec_root_clk", "vpu_bus", base + 0x4630, 0, CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
+ hws[IMX8MQ_CLK_CSI1_ROOT] = imx_clk_hw_gate4("csi1_root_clk", "csi1_core", base + 0x4650, 0);
+ hws[IMX8MQ_CLK_CSI2_ROOT] = imx_clk_hw_gate4("csi2_root_clk", "csi2_core", base + 0x4660, 0);
+ hws[IMX8MQ_CLK_SDMA1_ROOT] = imx_clk_hw_gate4("sdma1_clk", "ipg_root", base + 0x43a0, 0);
+ hws[IMX8MQ_CLK_SDMA2_ROOT] = imx_clk_hw_gate4("sdma2_clk", "ipg_audio_root", base + 0x43b0, 0);
+
+ hws[IMX8MQ_GPT_3M_CLK] = imx_clk_hw_fixed_factor("gpt_3m", "osc_25m", 1, 8);
+ hws[IMX8MQ_CLK_DRAM_ALT_ROOT] = imx_clk_hw_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
+
+ hws[IMX8MQ_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_div",
+ hws[IMX8MQ_CLK_A53_DIV]->clk,
+ hws[IMX8MQ_CLK_A53_SRC]->clk,
+ hws[IMX8MQ_ARM_PLL_OUT]->clk,
+ hws[IMX8MQ_SYS1_PLL_800M]->clk);
+
+ imx_check_clk_hws(hws, IMX8MQ_CLK_END);
+
+ err = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
if (err < 0) {
- dev_err(dev, "failed to register clks for i.MX8MQ\n");
- goto unregister_clks;
+ dev_err(dev, "failed to register hws for i.MX8MQ\n");
+ goto unregister_hws;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
+ int index = uart_clk_ids[i];
+
+ uart_hws[i] = &hws[index]->clk;
}
- imx_register_uart_clocks(uart_clks);
+ imx_register_uart_clocks(uart_hws);
return 0;
-unregister_clks:
- imx_unregister_clocks(clks, ARRAY_SIZE(clks));
+unregister_hws:
+ imx_unregister_hw_clocks(hws, IMX8MQ_CLK_END);
return err;
}
@@ -609,6 +624,11 @@ static struct platform_driver imx8mq_clk_driver = {
.probe = imx8mq_clocks_probe,
.driver = {
.name = "imx8mq-ccm",
+ /*
+ * Disable bind attributes: clocks are not removed and
+ * reloading the driver will crash or break devices.
+ */
+ .suppress_bind_attrs = true,
.of_match_table = of_match_ptr(imx8mq_clk_of_match),
},
};
diff --git a/drivers/clk/imx/clk-imx8qxp-lpcg.c b/drivers/clk/imx/clk-imx8qxp-lpcg.c
index c0aff7ca6374..04c8ee35e14c 100644
--- a/drivers/clk/imx/clk-imx8qxp-lpcg.c
+++ b/drivers/clk/imx/clk-imx8qxp-lpcg.c
@@ -173,6 +173,17 @@ static int imx8qxp_lpcg_clk_probe(struct platform_device *pdev)
if (!ss_lpcg)
return -ENODEV;
+ /*
+ * Please don't replace this with devm_platform_ioremap_resource.
+ *
+ * devm_platform_ioremap_resource calls devm_ioremap_resource which
+ * differs from devm_ioremap by also calling devm_request_mem_region
+ * and preventing other mappings in the same area.
+ *
+ * On imx8 the LPCG nodes map entire subsystems and overlap
+ * peripherals, this means that using devm_platform_ioremap_resource
+ * will cause many devices to fail to probe including serial ports.
+ */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -EINVAL;
diff --git a/drivers/clk/imx/clk-pfdv2.c b/drivers/clk/imx/clk-pfdv2.c
index a03bbed662c6..de93ce73101b 100644
--- a/drivers/clk/imx/clk-pfdv2.c
+++ b/drivers/clk/imx/clk-pfdv2.c
@@ -166,7 +166,7 @@ static const struct clk_ops clk_pfdv2_ops = {
.is_enabled = clk_pfdv2_is_enabled,
};
-struct clk_hw *imx_clk_pfdv2(const char *name, const char *parent_name,
+struct clk_hw *imx_clk_hw_pfdv2(const char *name, const char *parent_name,
void __iomem *reg, u8 idx)
{
struct clk_init_data init;
diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c
index 5c458199060a..5b0519a81a7a 100644
--- a/drivers/clk/imx/clk-pll14xx.c
+++ b/drivers/clk/imx/clk-pll14xx.c
@@ -67,6 +67,13 @@ struct imx_pll14xx_clk imx_1443x_pll = {
.rate_count = ARRAY_SIZE(imx_pll1443x_tbl),
};
+struct imx_pll14xx_clk imx_1443x_dram_pll = {
+ .type = PLL_1443X,
+ .rate_table = imx_pll1443x_tbl,
+ .rate_count = ARRAY_SIZE(imx_pll1443x_tbl),
+ .flags = CLK_GET_RATE_NOCACHE,
+};
+
struct imx_pll14xx_clk imx_1416x_pll = {
.type = PLL_1416X,
.rate_table = imx_pll1416x_tbl,
@@ -159,7 +166,7 @@ static int clk_pll14xx_wait_lock(struct clk_pll14xx *pll)
{
u32 val;
- return readl_poll_timeout(pll->base, val, val & LOCK_TIMEOUT_US, 0,
+ return readl_poll_timeout(pll->base, val, val & LOCK_STATUS, 0,
LOCK_TIMEOUT_US);
}
@@ -369,13 +376,14 @@ static const struct clk_ops clk_pll1443x_ops = {
.set_rate = clk_pll1443x_set_rate,
};
-struct clk *imx_clk_pll14xx(const char *name, const char *parent_name,
- void __iomem *base,
- const struct imx_pll14xx_clk *pll_clk)
+struct clk_hw *imx_clk_hw_pll14xx(const char *name, const char *parent_name,
+ void __iomem *base,
+ const struct imx_pll14xx_clk *pll_clk)
{
struct clk_pll14xx *pll;
- struct clk *clk;
+ struct clk_hw *hw;
struct clk_init_data init;
+ int ret;
u32 val;
pll = kzalloc(sizeof(*pll), GFP_KERNEL);
@@ -412,12 +420,15 @@ struct clk *imx_clk_pll14xx(const char *name, const char *parent_name,
val &= ~BYPASS_MASK;
writel_relaxed(val, pll->base + GNRL_CTL);
- clk = clk_register(NULL, &pll->hw);
- if (IS_ERR(clk)) {
- pr_err("%s: failed to register pll %s %lu\n",
- __func__, name, PTR_ERR(clk));
+ hw = &pll->hw;
+
+ ret = clk_hw_register(NULL, hw);
+ if (ret) {
+ pr_err("%s: failed to register pll %s %d\n",
+ __func__, name, ret);
kfree(pll);
+ return ERR_PTR(ret);
}
- return clk;
+ return hw;
}
diff --git a/drivers/clk/imx/clk-pllv1.c b/drivers/clk/imx/clk-pllv1.c
index 4ba9973d4c18..de4f8a41a7d0 100644
--- a/drivers/clk/imx/clk-pllv1.c
+++ b/drivers/clk/imx/clk-pllv1.c
@@ -111,12 +111,13 @@ static const struct clk_ops clk_pllv1_ops = {
.recalc_rate = clk_pllv1_recalc_rate,
};
-struct clk *imx_clk_pllv1(enum imx_pllv1_type type, const char *name,
+struct clk_hw *imx_clk_hw_pllv1(enum imx_pllv1_type type, const char *name,
const char *parent, void __iomem *base)
{
struct clk_pllv1 *pll;
- struct clk *clk;
+ struct clk_hw *hw;
struct clk_init_data init;
+ int ret;
pll = kmalloc(sizeof(*pll), GFP_KERNEL);
if (!pll)
@@ -132,10 +133,13 @@ struct clk *imx_clk_pllv1(enum imx_pllv1_type type, const char *name,
init.num_parents = 1;
pll->hw.init = &init;
+ hw = &pll->hw;
- clk = clk_register(NULL, &pll->hw);
- if (IS_ERR(clk))
+ ret = clk_hw_register(NULL, hw);
+ if (ret) {
kfree(pll);
+ return ERR_PTR(ret);
+ }
- return clk;
+ return hw;
}
diff --git a/drivers/clk/imx/clk-pllv2.c b/drivers/clk/imx/clk-pllv2.c
index eeba3cb14e2d..ff17f0664faa 100644
--- a/drivers/clk/imx/clk-pllv2.c
+++ b/drivers/clk/imx/clk-pllv2.c
@@ -239,12 +239,13 @@ static const struct clk_ops clk_pllv2_ops = {
.set_rate = clk_pllv2_set_rate,
};
-struct clk *imx_clk_pllv2(const char *name, const char *parent,
+struct clk_hw *imx_clk_hw_pllv2(const char *name, const char *parent,
void __iomem *base)
{
struct clk_pllv2 *pll;
- struct clk *clk;
+ struct clk_hw *hw;
struct clk_init_data init;
+ int ret;
pll = kzalloc(sizeof(*pll), GFP_KERNEL);
if (!pll)
@@ -259,10 +260,13 @@ struct clk *imx_clk_pllv2(const char *name, const char *parent,
init.num_parents = 1;
pll->hw.init = &init;
+ hw = &pll->hw;
- clk = clk_register(NULL, &pll->hw);
- if (IS_ERR(clk))
+ ret = clk_hw_register(NULL, hw);
+ if (ret) {
kfree(pll);
+ return ERR_PTR(ret);
+ }
- return clk;
+ return hw;
}
diff --git a/drivers/clk/imx/clk-pllv4.c b/drivers/clk/imx/clk-pllv4.c
index 8155b12cf0e1..f51a800c268c 100644
--- a/drivers/clk/imx/clk-pllv4.c
+++ b/drivers/clk/imx/clk-pllv4.c
@@ -206,7 +206,7 @@ static const struct clk_ops clk_pllv4_ops = {
.is_enabled = clk_pllv4_is_enabled,
};
-struct clk_hw *imx_clk_pllv4(const char *name, const char *parent_name,
+struct clk_hw *imx_clk_hw_pllv4(const char *name, const char *parent_name,
void __iomem *base)
{
struct clk_pllv4 *pll;
diff --git a/drivers/clk/imx/clk-sccg-pll.c b/drivers/clk/imx/clk-sscg-pll.c
index 5d65f65c512e..acd1b9002be6 100644
--- a/drivers/clk/imx/clk-sccg-pll.c
+++ b/drivers/clk/imx/clk-sscg-pll.c
@@ -67,7 +67,7 @@
#define PLL_SCCG_LOCK_TIMEOUT 70
-struct clk_sccg_pll_setup {
+struct clk_sscg_pll_setup {
int divr1, divf1;
int divr2, divf2;
int divq;
@@ -83,22 +83,22 @@ struct clk_sccg_pll_setup {
int fout_error;
};
-struct clk_sccg_pll {
+struct clk_sscg_pll {
struct clk_hw hw;
const struct clk_ops ops;
void __iomem *base;
- struct clk_sccg_pll_setup setup;
+ struct clk_sscg_pll_setup setup;
u8 parent;
u8 bypass1;
u8 bypass2;
};
-#define to_clk_sccg_pll(_hw) container_of(_hw, struct clk_sccg_pll, hw)
+#define to_clk_sscg_pll(_hw) container_of(_hw, struct clk_sscg_pll, hw)
-static int clk_sccg_pll_wait_lock(struct clk_sccg_pll *pll)
+static int clk_sscg_pll_wait_lock(struct clk_sscg_pll *pll)
{
u32 val;
@@ -112,15 +112,15 @@ static int clk_sccg_pll_wait_lock(struct clk_sccg_pll *pll)
return 0;
}
-static int clk_sccg_pll2_check_match(struct clk_sccg_pll_setup *setup,
- struct clk_sccg_pll_setup *temp_setup)
+static int clk_sscg_pll2_check_match(struct clk_sscg_pll_setup *setup,
+ struct clk_sscg_pll_setup *temp_setup)
{
int new_diff = temp_setup->fout - temp_setup->fout_request;
int diff = temp_setup->fout_error;
if (abs(diff) > abs(new_diff)) {
temp_setup->fout_error = new_diff;
- memcpy(setup, temp_setup, sizeof(struct clk_sccg_pll_setup));
+ memcpy(setup, temp_setup, sizeof(struct clk_sscg_pll_setup));
if (temp_setup->fout_request == temp_setup->fout)
return 0;
@@ -128,8 +128,8 @@ static int clk_sccg_pll2_check_match(struct clk_sccg_pll_setup *setup,
return -1;
}
-static int clk_sccg_divq_lookup(struct clk_sccg_pll_setup *setup,
- struct clk_sccg_pll_setup *temp_setup)
+static int clk_sscg_divq_lookup(struct clk_sscg_pll_setup *setup,
+ struct clk_sscg_pll_setup *temp_setup)
{
int ret = -EINVAL;
@@ -144,7 +144,7 @@ static int clk_sccg_divq_lookup(struct clk_sccg_pll_setup *setup,
temp_setup->fout = temp_setup->vco2;
do_div(temp_setup->fout, 2 * (temp_setup->divq + 1));
- ret = clk_sccg_pll2_check_match(setup, temp_setup);
+ ret = clk_sscg_pll2_check_match(setup, temp_setup);
if (!ret) {
temp_setup->bypass = PLL_BYPASS1;
return ret;
@@ -155,14 +155,14 @@ static int clk_sccg_divq_lookup(struct clk_sccg_pll_setup *setup,
return ret;
}
-static int clk_sccg_divf2_lookup(struct clk_sccg_pll_setup *setup,
- struct clk_sccg_pll_setup *temp_setup)
+static int clk_sscg_divf2_lookup(struct clk_sscg_pll_setup *setup,
+ struct clk_sscg_pll_setup *temp_setup)
{
int ret = -EINVAL;
for (temp_setup->divf2 = 0; temp_setup->divf2 <= PLL_DIVF2_MAX;
temp_setup->divf2++) {
- ret = clk_sccg_divq_lookup(setup, temp_setup);
+ ret = clk_sscg_divq_lookup(setup, temp_setup);
if (!ret)
return ret;
}
@@ -170,8 +170,8 @@ static int clk_sccg_divf2_lookup(struct clk_sccg_pll_setup *setup,
return ret;
}
-static int clk_sccg_divr2_lookup(struct clk_sccg_pll_setup *setup,
- struct clk_sccg_pll_setup *temp_setup)
+static int clk_sscg_divr2_lookup(struct clk_sscg_pll_setup *setup,
+ struct clk_sscg_pll_setup *temp_setup)
{
int ret = -EINVAL;
@@ -181,7 +181,7 @@ static int clk_sccg_divr2_lookup(struct clk_sccg_pll_setup *setup,
do_div(temp_setup->ref_div2, temp_setup->divr2 + 1);
if (temp_setup->ref_div2 >= PLL_STAGE2_REF_MIN_FREQ &&
temp_setup->ref_div2 <= PLL_STAGE2_REF_MAX_FREQ) {
- ret = clk_sccg_divf2_lookup(setup, temp_setup);
+ ret = clk_sscg_divf2_lookup(setup, temp_setup);
if (!ret)
return ret;
}
@@ -190,8 +190,8 @@ static int clk_sccg_divr2_lookup(struct clk_sccg_pll_setup *setup,
return ret;
}
-static int clk_sccg_pll2_find_setup(struct clk_sccg_pll_setup *setup,
- struct clk_sccg_pll_setup *temp_setup,
+static int clk_sscg_pll2_find_setup(struct clk_sscg_pll_setup *setup,
+ struct clk_sscg_pll_setup *temp_setup,
uint64_t ref)
{
@@ -202,12 +202,12 @@ static int clk_sccg_pll2_find_setup(struct clk_sccg_pll_setup *setup,
temp_setup->vco1 = ref;
- ret = clk_sccg_divr2_lookup(setup, temp_setup);
+ ret = clk_sscg_divr2_lookup(setup, temp_setup);
return ret;
}
-static int clk_sccg_divf1_lookup(struct clk_sccg_pll_setup *setup,
- struct clk_sccg_pll_setup *temp_setup)
+static int clk_sscg_divf1_lookup(struct clk_sscg_pll_setup *setup,
+ struct clk_sscg_pll_setup *temp_setup)
{
int ret = -EINVAL;
@@ -219,7 +219,7 @@ static int clk_sccg_divf1_lookup(struct clk_sccg_pll_setup *setup,
vco1 *= 2;
vco1 *= temp_setup->divf1 + 1;
- ret = clk_sccg_pll2_find_setup(setup, temp_setup, vco1);
+ ret = clk_sscg_pll2_find_setup(setup, temp_setup, vco1);
if (!ret) {
temp_setup->bypass = PLL_BYPASS_NONE;
return ret;
@@ -229,8 +229,8 @@ static int clk_sccg_divf1_lookup(struct clk_sccg_pll_setup *setup,
return ret;
}
-static int clk_sccg_divr1_lookup(struct clk_sccg_pll_setup *setup,
- struct clk_sccg_pll_setup *temp_setup)
+static int clk_sscg_divr1_lookup(struct clk_sscg_pll_setup *setup,
+ struct clk_sscg_pll_setup *temp_setup)
{
int ret = -EINVAL;
@@ -240,7 +240,7 @@ static int clk_sccg_divr1_lookup(struct clk_sccg_pll_setup *setup,
do_div(temp_setup->ref_div1, temp_setup->divr1 + 1);
if (temp_setup->ref_div1 >= PLL_STAGE1_REF_MIN_FREQ &&
temp_setup->ref_div1 <= PLL_STAGE1_REF_MAX_FREQ) {
- ret = clk_sccg_divf1_lookup(setup, temp_setup);
+ ret = clk_sscg_divf1_lookup(setup, temp_setup);
if (!ret)
return ret;
}
@@ -249,8 +249,8 @@ static int clk_sccg_divr1_lookup(struct clk_sccg_pll_setup *setup,
return ret;
}
-static int clk_sccg_pll1_find_setup(struct clk_sccg_pll_setup *setup,
- struct clk_sccg_pll_setup *temp_setup,
+static int clk_sscg_pll1_find_setup(struct clk_sscg_pll_setup *setup,
+ struct clk_sscg_pll_setup *temp_setup,
uint64_t ref)
{
@@ -261,20 +261,20 @@ static int clk_sccg_pll1_find_setup(struct clk_sccg_pll_setup *setup,
temp_setup->ref = ref;
- ret = clk_sccg_divr1_lookup(setup, temp_setup);
+ ret = clk_sscg_divr1_lookup(setup, temp_setup);
return ret;
}
-static int clk_sccg_pll_find_setup(struct clk_sccg_pll_setup *setup,
+static int clk_sscg_pll_find_setup(struct clk_sscg_pll_setup *setup,
uint64_t prate,
uint64_t rate, int try_bypass)
{
- struct clk_sccg_pll_setup temp_setup;
+ struct clk_sscg_pll_setup temp_setup;
int ret = -EINVAL;
- memset(&temp_setup, 0, sizeof(struct clk_sccg_pll_setup));
- memset(setup, 0, sizeof(struct clk_sccg_pll_setup));
+ memset(&temp_setup, 0, sizeof(struct clk_sscg_pll_setup));
+ memset(setup, 0, sizeof(struct clk_sscg_pll_setup));
temp_setup.fout_error = PLL_OUT_MAX_FREQ;
temp_setup.fout_request = rate;
@@ -290,11 +290,11 @@ static int clk_sccg_pll_find_setup(struct clk_sccg_pll_setup *setup,
break;
case PLL_BYPASS1:
- ret = clk_sccg_pll2_find_setup(setup, &temp_setup, prate);
+ ret = clk_sscg_pll2_find_setup(setup, &temp_setup, prate);
break;
case PLL_BYPASS_NONE:
- ret = clk_sccg_pll1_find_setup(setup, &temp_setup, prate);
+ ret = clk_sscg_pll1_find_setup(setup, &temp_setup, prate);
break;
}
@@ -302,30 +302,30 @@ static int clk_sccg_pll_find_setup(struct clk_sccg_pll_setup *setup,
}
-static int clk_sccg_pll_is_prepared(struct clk_hw *hw)
+static int clk_sscg_pll_is_prepared(struct clk_hw *hw)
{
- struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
+ struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
u32 val = readl_relaxed(pll->base + PLL_CFG0);
return (val & PLL_PD_MASK) ? 0 : 1;
}
-static int clk_sccg_pll_prepare(struct clk_hw *hw)
+static int clk_sscg_pll_prepare(struct clk_hw *hw)
{
- struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
+ struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
u32 val;
val = readl_relaxed(pll->base + PLL_CFG0);
val &= ~PLL_PD_MASK;
writel_relaxed(val, pll->base + PLL_CFG0);
- return clk_sccg_pll_wait_lock(pll);
+ return clk_sscg_pll_wait_lock(pll);
}
-static void clk_sccg_pll_unprepare(struct clk_hw *hw)
+static void clk_sscg_pll_unprepare(struct clk_hw *hw)
{
- struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
+ struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
u32 val;
val = readl_relaxed(pll->base + PLL_CFG0);
@@ -333,10 +333,10 @@ static void clk_sccg_pll_unprepare(struct clk_hw *hw)
writel_relaxed(val, pll->base + PLL_CFG0);
}
-static unsigned long clk_sccg_pll_recalc_rate(struct clk_hw *hw,
+static unsigned long clk_sscg_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
+ struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
u32 val, divr1, divf1, divr2, divf2, divq;
u64 temp64;
@@ -364,11 +364,11 @@ static unsigned long clk_sccg_pll_recalc_rate(struct clk_hw *hw,
return temp64;
}
-static int clk_sccg_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+static int clk_sscg_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
- struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
- struct clk_sccg_pll_setup *setup = &pll->setup;
+ struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
+ struct clk_sscg_pll_setup *setup = &pll->setup;
u32 val;
/* set bypass here too since the parent might be the same */
@@ -387,12 +387,12 @@ static int clk_sccg_pll_set_rate(struct clk_hw *hw, unsigned long rate,
val |= FIELD_PREP(PLL_DIVQ_MASK, setup->divq);
writel_relaxed(val, pll->base + PLL_CFG2);
- return clk_sccg_pll_wait_lock(pll);
+ return clk_sscg_pll_wait_lock(pll);
}
-static u8 clk_sccg_pll_get_parent(struct clk_hw *hw)
+static u8 clk_sscg_pll_get_parent(struct clk_hw *hw)
{
- struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
+ struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
u32 val;
u8 ret = pll->parent;
@@ -404,9 +404,9 @@ static u8 clk_sccg_pll_get_parent(struct clk_hw *hw)
return ret;
}
-static int clk_sccg_pll_set_parent(struct clk_hw *hw, u8 index)
+static int clk_sscg_pll_set_parent(struct clk_hw *hw, u8 index)
{
- struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
+ struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
u32 val;
val = readl(pll->base + PLL_CFG0);
@@ -414,18 +414,18 @@ static int clk_sccg_pll_set_parent(struct clk_hw *hw, u8 index)
val |= FIELD_PREP(SSCG_PLL_BYPASS_MASK, pll->setup.bypass);
writel(val, pll->base + PLL_CFG0);
- return clk_sccg_pll_wait_lock(pll);
+ return clk_sscg_pll_wait_lock(pll);
}
-static int __clk_sccg_pll_determine_rate(struct clk_hw *hw,
+static int __clk_sscg_pll_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req,
uint64_t min,
uint64_t max,
uint64_t rate,
int bypass)
{
- struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
- struct clk_sccg_pll_setup *setup = &pll->setup;
+ struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
+ struct clk_sscg_pll_setup *setup = &pll->setup;
struct clk_hw *parent_hw = NULL;
int bypass_parent_index;
int ret = -EINVAL;
@@ -448,7 +448,7 @@ static int __clk_sccg_pll_determine_rate(struct clk_hw *hw,
parent_hw = clk_hw_get_parent_by_index(hw, bypass_parent_index);
ret = __clk_determine_rate(parent_hw, req);
if (!ret) {
- ret = clk_sccg_pll_find_setup(setup, req->rate,
+ ret = clk_sscg_pll_find_setup(setup, req->rate,
rate, bypass);
}
@@ -459,11 +459,11 @@ static int __clk_sccg_pll_determine_rate(struct clk_hw *hw,
return ret;
}
-static int clk_sccg_pll_determine_rate(struct clk_hw *hw,
+static int clk_sscg_pll_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
- struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
- struct clk_sccg_pll_setup *setup = &pll->setup;
+ struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
+ struct clk_sscg_pll_setup *setup = &pll->setup;
uint64_t rate = req->rate;
uint64_t min = req->min_rate;
uint64_t max = req->max_rate;
@@ -472,18 +472,18 @@ static int clk_sccg_pll_determine_rate(struct clk_hw *hw,
if (rate < PLL_OUT_MIN_FREQ || rate > PLL_OUT_MAX_FREQ)
return ret;
- ret = __clk_sccg_pll_determine_rate(hw, req, req->rate, req->rate,
+ ret = __clk_sscg_pll_determine_rate(hw, req, req->rate, req->rate,
rate, PLL_BYPASS2);
if (!ret)
return ret;
- ret = __clk_sccg_pll_determine_rate(hw, req, PLL_STAGE1_REF_MIN_FREQ,
+ ret = __clk_sscg_pll_determine_rate(hw, req, PLL_STAGE1_REF_MIN_FREQ,
PLL_STAGE1_REF_MAX_FREQ, rate,
PLL_BYPASS1);
if (!ret)
return ret;
- ret = __clk_sccg_pll_determine_rate(hw, req, PLL_REF_MIN_FREQ,
+ ret = __clk_sscg_pll_determine_rate(hw, req, PLL_REF_MIN_FREQ,
PLL_REF_MAX_FREQ, rate,
PLL_BYPASS_NONE);
if (!ret)
@@ -495,25 +495,25 @@ static int clk_sccg_pll_determine_rate(struct clk_hw *hw,
return ret;
}
-static const struct clk_ops clk_sccg_pll_ops = {
- .prepare = clk_sccg_pll_prepare,
- .unprepare = clk_sccg_pll_unprepare,
- .is_prepared = clk_sccg_pll_is_prepared,
- .recalc_rate = clk_sccg_pll_recalc_rate,
- .set_rate = clk_sccg_pll_set_rate,
- .set_parent = clk_sccg_pll_set_parent,
- .get_parent = clk_sccg_pll_get_parent,
- .determine_rate = clk_sccg_pll_determine_rate,
+static const struct clk_ops clk_sscg_pll_ops = {
+ .prepare = clk_sscg_pll_prepare,
+ .unprepare = clk_sscg_pll_unprepare,
+ .is_prepared = clk_sscg_pll_is_prepared,
+ .recalc_rate = clk_sscg_pll_recalc_rate,
+ .set_rate = clk_sscg_pll_set_rate,
+ .set_parent = clk_sscg_pll_set_parent,
+ .get_parent = clk_sscg_pll_get_parent,
+ .determine_rate = clk_sscg_pll_determine_rate,
};
-struct clk *imx_clk_sccg_pll(const char *name,
+struct clk_hw *imx_clk_hw_sscg_pll(const char *name,
const char * const *parent_names,
u8 num_parents,
u8 parent, u8 bypass1, u8 bypass2,
void __iomem *base,
unsigned long flags)
{
- struct clk_sccg_pll *pll;
+ struct clk_sscg_pll *pll;
struct clk_init_data init;
struct clk_hw *hw;
int ret;
@@ -528,7 +528,7 @@ struct clk *imx_clk_sccg_pll(const char *name,
pll->base = base;
init.name = name;
- init.ops = &clk_sccg_pll_ops;
+ init.ops = &clk_sscg_pll_ops;
init.flags = flags;
init.parent_names = parent_names;
@@ -545,5 +545,5 @@ struct clk *imx_clk_sccg_pll(const char *name,
return ERR_PTR(ret);
}
- return hw->clk;
+ return hw;
}
diff --git a/drivers/clk/imx/clk.c b/drivers/clk/imx/clk.c
index cfc05e43c343..87ab8db3d282 100644
--- a/drivers/clk/imx/clk.c
+++ b/drivers/clk/imx/clk.c
@@ -22,6 +22,14 @@ void imx_unregister_clocks(struct clk *clks[], unsigned int count)
clk_unregister(clks[i]);
}
+void imx_unregister_hw_clocks(struct clk_hw *hws[], unsigned int count)
+{
+ unsigned int i;
+
+ for (i = 0; i < count; i++)
+ clk_hw_unregister(hws[i]);
+}
+
void __init imx_mmdc_mask_handshake(void __iomem *ccm_base,
unsigned int chn)
{
@@ -94,8 +102,8 @@ struct clk_hw * __init imx_obtain_fixed_clock_hw(
return __clk_get_hw(clk);
}
-struct clk_hw * __init imx_obtain_fixed_clk_hw(struct device_node *np,
- const char *name)
+struct clk_hw * imx_obtain_fixed_clk_hw(struct device_node *np,
+ const char *name)
{
struct clk *clk;
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index bc5bb6ac8636..b05213b91dcf 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -12,6 +12,7 @@ void imx_check_clk_hws(struct clk_hw *clks[], unsigned int count);
void imx_register_uart_clocks(struct clk ** const clks[]);
void imx_mmdc_mask_handshake(void __iomem *ccm_base, unsigned int chn);
void imx_unregister_clocks(struct clk *clks[], unsigned int count);
+void imx_unregister_hw_clocks(struct clk_hw *hws[], unsigned int count);
extern void imx_cscmr1_fixup(u32 *val);
@@ -24,7 +25,7 @@ enum imx_pllv1_type {
IMX_PLLV1_IMX35,
};
-enum imx_sccg_pll_type {
+enum imx_sscg_pll_type {
SCCG_PLL1,
SCCG_PLL2,
};
@@ -52,64 +53,98 @@ struct imx_pll14xx_clk {
extern struct imx_pll14xx_clk imx_1416x_pll;
extern struct imx_pll14xx_clk imx_1443x_pll;
+extern struct imx_pll14xx_clk imx_1443x_dram_pll;
#define imx_clk_cpu(name, parent_name, div, mux, pll, step) \
- imx_clk_hw_cpu(name, parent_name, div, mux, pll, step)->clk
+ to_clk(imx_clk_hw_cpu(name, parent_name, div, mux, pll, step))
#define clk_register_gate2(dev, name, parent_name, flags, reg, bit_idx, \
cgr_val, clk_gate_flags, lock, share_count) \
- clk_hw_register_gate2(dev, name, parent_name, flags, reg, bit_idx, \
- cgr_val, clk_gate_flags, lock, share_count)->clk
+ to_clk(clk_hw_register_gate2(dev, name, parent_name, flags, reg, bit_idx, \
+ cgr_val, clk_gate_flags, lock, share_count))
#define imx_clk_pllv3(type, name, parent_name, base, div_mask) \
- imx_clk_hw_pllv3(type, name, parent_name, base, div_mask)->clk
+ to_clk(imx_clk_hw_pllv3(type, name, parent_name, base, div_mask))
#define imx_clk_pfd(name, parent_name, reg, idx) \
- imx_clk_hw_pfd(name, parent_name, reg, idx)->clk
+ to_clk(imx_clk_hw_pfd(name, parent_name, reg, idx))
#define imx_clk_gate_exclusive(name, parent, reg, shift, exclusive_mask) \
- imx_clk_hw_gate_exclusive(name, parent, reg, shift, exclusive_mask)->clk
+ to_clk(imx_clk_hw_gate_exclusive(name, parent, reg, shift, exclusive_mask))
+
+#define imx_clk_fixed(name, rate) \
+ to_clk(imx_clk_hw_fixed(name, rate))
#define imx_clk_fixed_factor(name, parent, mult, div) \
- imx_clk_hw_fixed_factor(name, parent, mult, div)->clk
+ to_clk(imx_clk_hw_fixed_factor(name, parent, mult, div))
+
+#define imx_clk_divider(name, parent, reg, shift, width) \
+ to_clk(imx_clk_hw_divider(name, parent, reg, shift, width))
#define imx_clk_divider2(name, parent, reg, shift, width) \
- imx_clk_hw_divider2(name, parent, reg, shift, width)->clk
+ to_clk(imx_clk_hw_divider2(name, parent, reg, shift, width))
+
+#define imx_clk_divider_flags(name, parent, reg, shift, width, flags) \
+ to_clk(imx_clk_hw_divider_flags(name, parent, reg, shift, width, flags))
+
+#define imx_clk_gate(name, parent, reg, shift) \
+ to_clk(imx_clk_hw_gate(name, parent, reg, shift))
#define imx_clk_gate_dis(name, parent, reg, shift) \
- imx_clk_hw_gate_dis(name, parent, reg, shift)->clk
+ to_clk(imx_clk_hw_gate_dis(name, parent, reg, shift))
#define imx_clk_gate2(name, parent, reg, shift) \
- imx_clk_hw_gate2(name, parent, reg, shift)->clk
+ to_clk(imx_clk_hw_gate2(name, parent, reg, shift))
#define imx_clk_gate2_flags(name, parent, reg, shift, flags) \
- imx_clk_hw_gate2_flags(name, parent, reg, shift, flags)->clk
+ to_clk(imx_clk_hw_gate2_flags(name, parent, reg, shift, flags))
#define imx_clk_gate2_shared2(name, parent, reg, shift, share_count) \
- imx_clk_hw_gate2_shared2(name, parent, reg, shift, share_count)->clk
+ to_clk(imx_clk_hw_gate2_shared2(name, parent, reg, shift, share_count))
#define imx_clk_gate3(name, parent, reg, shift) \
- imx_clk_hw_gate3(name, parent, reg, shift)->clk
+ to_clk(imx_clk_hw_gate3(name, parent, reg, shift))
#define imx_clk_gate4(name, parent, reg, shift) \
- imx_clk_hw_gate4(name, parent, reg, shift)->clk
+ to_clk(imx_clk_hw_gate4(name, parent, reg, shift))
#define imx_clk_mux(name, reg, shift, width, parents, num_parents) \
- imx_clk_hw_mux(name, reg, shift, width, parents, num_parents)->clk
+ to_clk(imx_clk_hw_mux(name, reg, shift, width, parents, num_parents))
+
+#define imx_clk_pllv1(type, name, parent, base) \
+ to_clk(imx_clk_hw_pllv1(type, name, parent, base))
+
+#define imx_clk_pllv2(name, parent, base) \
+ to_clk(imx_clk_hw_pllv2(name, parent, base))
+
+#define imx_clk_frac_pll(name, parent_name, base) \
+ to_clk(imx_clk_hw_frac_pll(name, parent_name, base))
+
+#define imx_clk_sscg_pll(name, parent_names, num_parents, parent,\
+ bypass1, bypass2, base, flags) \
+ to_clk(imx_clk_hw_sscg_pll(name, parent_names, num_parents, parent,\
+ bypass1, bypass2, base, flags))
struct clk *imx_clk_pll14xx(const char *name, const char *parent_name,
void __iomem *base, const struct imx_pll14xx_clk *pll_clk);
-struct clk *imx_clk_pllv1(enum imx_pllv1_type type, const char *name,
+#define imx_clk_pll14xx(name, parent_name, base, pll_clk) \
+ to_clk(imx_clk_hw_pll14xx(name, parent_name, base, pll_clk))
+
+struct clk_hw *imx_clk_hw_pll14xx(const char *name, const char *parent_name,
+ void __iomem *base,
+ const struct imx_pll14xx_clk *pll_clk);
+
+struct clk_hw *imx_clk_hw_pllv1(enum imx_pllv1_type type, const char *name,
const char *parent, void __iomem *base);
-struct clk *imx_clk_pllv2(const char *name, const char *parent,
+struct clk_hw *imx_clk_hw_pllv2(const char *name, const char *parent,
void __iomem *base);
-struct clk *imx_clk_frac_pll(const char *name, const char *parent_name,
+struct clk_hw *imx_clk_hw_frac_pll(const char *name, const char *parent_name,
void __iomem *base);
-struct clk *imx_clk_sccg_pll(const char *name,
+struct clk_hw *imx_clk_hw_sscg_pll(const char *name,
const char * const *parent_names,
u8 num_parents,
u8 parent, u8 bypass1, u8 bypass2,
@@ -149,7 +184,7 @@ struct clk_hw *imx_clk_hw_pllv3(enum imx_pllv3_type type, const char *name,
.kdiv = (_k), \
}
-struct clk_hw *imx_clk_pllv4(const char *name, const char *parent_name,
+struct clk_hw *imx_clk_hw_pllv4(const char *name, const char *parent_name,
void __iomem *base);
struct clk_hw *clk_hw_register_gate2(struct device *dev, const char *name,
@@ -173,7 +208,7 @@ struct clk_hw *imx_clk_hw_gate_exclusive(const char *name, const char *parent,
struct clk_hw *imx_clk_hw_pfd(const char *name, const char *parent_name,
void __iomem *reg, u8 idx);
-struct clk_hw *imx_clk_pfdv2(const char *name, const char *parent_name,
+struct clk_hw *imx_clk_hw_pfdv2(const char *name, const char *parent_name,
void __iomem *reg, u8 idx);
struct clk_hw *imx_clk_hw_busy_divider(const char *name, const char *parent_name,
@@ -184,7 +219,7 @@ struct clk_hw *imx_clk_hw_busy_mux(const char *name, void __iomem *reg, u8 shift
u8 width, void __iomem *busy_reg, u8 busy_shift,
const char * const *parent_names, int num_parents);
-struct clk_hw *imx7ulp_clk_composite(const char *name,
+struct clk_hw *imx7ulp_clk_hw_composite(const char *name,
const char * const *parent_names,
int num_parents, bool mux_present,
bool rate_present, bool gate_present,
@@ -198,9 +233,11 @@ struct clk_hw *imx_clk_hw_fixup_mux(const char *name, void __iomem *reg,
u8 shift, u8 width, const char * const *parents,
int num_parents, void (*fixup)(u32 *val));
-static inline struct clk *imx_clk_fixed(const char *name, int rate)
+static inline struct clk *to_clk(struct clk_hw *hw)
{
- return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
+ if (IS_ERR_OR_NULL(hw))
+ return ERR_CAST(hw);
+ return hw->clk;
}
static inline struct clk_hw *imx_clk_hw_fixed(const char *name, int rate)
@@ -224,13 +261,6 @@ static inline struct clk_hw *imx_clk_hw_fixed_factor(const char *name,
CLK_SET_RATE_PARENT, mult, div);
}
-static inline struct clk *imx_clk_divider(const char *name, const char *parent,
- void __iomem *reg, u8 shift, u8 width)
-{
- return clk_register_divider(NULL, name, parent, CLK_SET_RATE_PARENT,
- reg, shift, width, 0, &imx_ccm_lock);
-}
-
static inline struct clk_hw *imx_clk_hw_divider(const char *name,
const char *parent,
void __iomem *reg, u8 shift,
@@ -240,14 +270,6 @@ static inline struct clk_hw *imx_clk_hw_divider(const char *name,
reg, shift, width, 0, &imx_ccm_lock);
}
-static inline struct clk *imx_clk_divider_flags(const char *name,
- const char *parent, void __iomem *reg, u8 shift, u8 width,
- unsigned long flags)
-{
- return clk_register_divider(NULL, name, parent, flags,
- reg, shift, width, 0, &imx_ccm_lock);
-}
-
static inline struct clk_hw *imx_clk_hw_divider_flags(const char *name,
const char *parent,
void __iomem *reg, u8 shift,
@@ -274,13 +296,6 @@ static inline struct clk *imx_clk_divider2_flags(const char *name,
reg, shift, width, 0, &imx_ccm_lock);
}
-static inline struct clk *imx_clk_gate(const char *name, const char *parent,
- void __iomem *reg, u8 shift)
-{
- return clk_register_gate(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
- shift, 0, &imx_ccm_lock);
-}
-
static inline struct clk_hw *imx_clk_hw_gate_flags(const char *name, const char *parent,
void __iomem *reg, u8 shift, unsigned long flags)
{
@@ -355,15 +370,18 @@ static inline struct clk_hw *imx_clk_hw_gate3(const char *name, const char *pare
reg, shift, 0, &imx_ccm_lock);
}
-static inline struct clk *imx_clk_gate3_flags(const char *name,
+static inline struct clk_hw *imx_clk_hw_gate3_flags(const char *name,
const char *parent, void __iomem *reg, u8 shift,
unsigned long flags)
{
- return clk_register_gate(NULL, name, parent,
+ return clk_hw_register_gate(NULL, name, parent,
flags | CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
reg, shift, 0, &imx_ccm_lock);
}
+#define imx_clk_gate3_flags(name, parent, reg, shift, flags) \
+ to_clk(imx_clk_hw_gate3_flags(name, parent, reg, shift, flags))
+
static inline struct clk_hw *imx_clk_hw_gate4(const char *name, const char *parent,
void __iomem *reg, u8 shift)
{
@@ -372,15 +390,18 @@ static inline struct clk_hw *imx_clk_hw_gate4(const char *name, const char *pare
reg, shift, 0x3, 0, &imx_ccm_lock, NULL);
}
-static inline struct clk *imx_clk_gate4_flags(const char *name,
+static inline struct clk_hw *imx_clk_hw_gate4_flags(const char *name,
const char *parent, void __iomem *reg, u8 shift,
unsigned long flags)
{
- return clk_register_gate2(NULL, name, parent,
+ return clk_hw_register_gate2(NULL, name, parent,
flags | CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
reg, shift, 0x3, 0, &imx_ccm_lock, NULL);
}
+#define imx_clk_gate4_flags(name, parent, reg, shift, flags) \
+ to_clk(imx_clk_hw_gate4_flags(name, parent, reg, shift, flags))
+
static inline struct clk_hw *imx_clk_hw_mux(const char *name, void __iomem *reg,
u8 shift, u8 width, const char * const *parents,
int num_parents)
@@ -420,6 +441,16 @@ static inline struct clk *imx_clk_mux_flags(const char *name,
&imx_ccm_lock);
}
+static inline struct clk_hw *imx_clk_hw_mux2_flags(const char *name,
+ void __iomem *reg, u8 shift, u8 width,
+ const char * const *parents,
+ int num_parents, unsigned long flags)
+{
+ return clk_hw_register_mux(NULL, name, parents, num_parents,
+ flags | CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE,
+ reg, shift, width, 0, &imx_ccm_lock);
+}
+
static inline struct clk *imx_clk_mux2_flags(const char *name,
void __iomem *reg, u8 shift, u8 width,
const char * const *parents,
@@ -446,23 +477,38 @@ struct clk_hw *imx_clk_hw_cpu(const char *name, const char *parent_name,
struct clk *div, struct clk *mux, struct clk *pll,
struct clk *step);
-struct clk *imx8m_clk_composite_flags(const char *name,
- const char * const *parent_names,
- int num_parents, void __iomem *reg,
- unsigned long flags);
+struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
+ const char * const *parent_names,
+ int num_parents,
+ void __iomem *reg,
+ unsigned long flags);
-#define __imx8m_clk_composite(name, parent_names, reg, flags) \
- imx8m_clk_composite_flags(name, parent_names, \
+#define imx8m_clk_composite_flags(name, parent_names, num_parents, reg, \
+ flags) \
+ to_clk(imx8m_clk_hw_composite_flags(name, parent_names, \
+ num_parents, reg, flags))
+
+#define __imx8m_clk_hw_composite(name, parent_names, reg, flags) \
+ imx8m_clk_hw_composite_flags(name, parent_names, \
ARRAY_SIZE(parent_names), reg, \
flags | CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE)
+#define __imx8m_clk_composite(name, parent_names, reg, flags) \
+ to_clk(__imx8m_clk_hw_composite(name, parent_names, reg, flags))
+
+#define imx8m_clk_hw_composite(name, parent_names, reg) \
+ __imx8m_clk_hw_composite(name, parent_names, reg, 0)
+
#define imx8m_clk_composite(name, parent_names, reg) \
__imx8m_clk_composite(name, parent_names, reg, 0)
+#define imx8m_clk_hw_composite_critical(name, parent_names, reg) \
+ __imx8m_clk_hw_composite(name, parent_names, reg, CLK_IS_CRITICAL)
+
#define imx8m_clk_composite_critical(name, parent_names, reg) \
__imx8m_clk_composite(name, parent_names, reg, CLK_IS_CRITICAL)
-struct clk_hw *imx_clk_divider_gate(const char *name, const char *parent_name,
+struct clk_hw *imx_clk_hw_divider_gate(const char *name, const char *parent_name,
unsigned long flags, void __iomem *reg, u8 shift, u8 width,
u8 clk_divider_flags, const struct clk_div_table *table,
spinlock_t *lock);
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index 7efc3617bbd5..ea3c70d1307e 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -174,36 +174,36 @@ config COMMON_CLK_MT6779_AUDSYS
This driver supports Mediatek MT6779 audsys clocks.
config COMMON_CLK_MT6797
- bool "Clock driver for MediaTek MT6797"
- depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
- select COMMON_CLK_MEDIATEK
- default ARCH_MEDIATEK && ARM64
- ---help---
- This driver supports MediaTek MT6797 basic clocks.
+ bool "Clock driver for MediaTek MT6797"
+ depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK && ARM64
+ ---help---
+ This driver supports MediaTek MT6797 basic clocks.
config COMMON_CLK_MT6797_MMSYS
- bool "Clock driver for MediaTek MT6797 mmsys"
- depends on COMMON_CLK_MT6797
- ---help---
- This driver supports MediaTek MT6797 mmsys clocks.
+ bool "Clock driver for MediaTek MT6797 mmsys"
+ depends on COMMON_CLK_MT6797
+ ---help---
+ This driver supports MediaTek MT6797 mmsys clocks.
config COMMON_CLK_MT6797_IMGSYS
- bool "Clock driver for MediaTek MT6797 imgsys"
- depends on COMMON_CLK_MT6797
- ---help---
- This driver supports MediaTek MT6797 imgsys clocks.
+ bool "Clock driver for MediaTek MT6797 imgsys"
+ depends on COMMON_CLK_MT6797
+ ---help---
+ This driver supports MediaTek MT6797 imgsys clocks.
config COMMON_CLK_MT6797_VDECSYS
- bool "Clock driver for MediaTek MT6797 vdecsys"
- depends on COMMON_CLK_MT6797
- ---help---
- This driver supports MediaTek MT6797 vdecsys clocks.
+ bool "Clock driver for MediaTek MT6797 vdecsys"
+ depends on COMMON_CLK_MT6797
+ ---help---
+ This driver supports MediaTek MT6797 vdecsys clocks.
config COMMON_CLK_MT6797_VENCSYS
- bool "Clock driver for MediaTek MT6797 vencsys"
- depends on COMMON_CLK_MT6797
- ---help---
- This driver supports MediaTek MT6797 vencsys clocks.
+ bool "Clock driver for MediaTek MT6797 vencsys"
+ depends on COMMON_CLK_MT6797
+ ---help---
+ This driver supports MediaTek MT6797 vencsys clocks.
config COMMON_CLK_MT7622
bool "Clock driver for MediaTek MT7622"
diff --git a/drivers/clk/meson/Makefile b/drivers/clk/meson/Makefile
index 3939f218587a..6eca2a406ee3 100644
--- a/drivers/clk/meson/Makefile
+++ b/drivers/clk/meson/Makefile
@@ -18,4 +18,4 @@ obj-$(CONFIG_COMMON_CLK_AXG) += axg.o axg-aoclk.o
obj-$(CONFIG_COMMON_CLK_AXG_AUDIO) += axg-audio.o
obj-$(CONFIG_COMMON_CLK_GXBB) += gxbb.o gxbb-aoclk.o
obj-$(CONFIG_COMMON_CLK_G12A) += g12a.o g12a-aoclk.o
-obj-$(CONFIG_COMMON_CLK_MESON8B) += meson8b.o
+obj-$(CONFIG_COMMON_CLK_MESON8B) += meson8b.o meson8-ddr.o
diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c
index 2d39a8bc367c..fc9df4860872 100644
--- a/drivers/clk/meson/clk-mpll.c
+++ b/drivers/clk/meson/clk-mpll.c
@@ -129,7 +129,7 @@ static int mpll_set_rate(struct clk_hw *hw,
return 0;
}
-static void mpll_init(struct clk_hw *hw)
+static int mpll_init(struct clk_hw *hw)
{
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_clk_mpll_data *mpll = meson_clk_mpll_data(clk);
@@ -151,6 +151,8 @@ static void mpll_init(struct clk_hw *hw)
/* Set the magic misc bit if required */
if (MESON_PARM_APPLICABLE(&mpll->misc))
meson_parm_write(clk->map, &mpll->misc, 1);
+
+ return 0;
}
const struct clk_ops meson_clk_mpll_ro_ops = {
diff --git a/drivers/clk/meson/clk-phase.c b/drivers/clk/meson/clk-phase.c
index 80c3ada193a4..fe22e171121a 100644
--- a/drivers/clk/meson/clk-phase.c
+++ b/drivers/clk/meson/clk-phase.c
@@ -78,7 +78,7 @@ meson_clk_triphase_data(struct clk_regmap *clk)
return (struct meson_clk_triphase_data *)clk->data;
}
-static void meson_clk_triphase_sync(struct clk_hw *hw)
+static int meson_clk_triphase_sync(struct clk_hw *hw)
{
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
@@ -88,6 +88,8 @@ static void meson_clk_triphase_sync(struct clk_hw *hw)
val = meson_parm_read(clk->map, &tph->ph0);
meson_parm_write(clk->map, &tph->ph1, val);
meson_parm_write(clk->map, &tph->ph2, val);
+
+ return 0;
}
static int meson_clk_triphase_get_phase(struct clk_hw *hw)
diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c
index ddb1e5634739..b17a13e9337c 100644
--- a/drivers/clk/meson/clk-pll.c
+++ b/drivers/clk/meson/clk-pll.c
@@ -77,6 +77,15 @@ static unsigned long meson_clk_pll_recalc_rate(struct clk_hw *hw,
unsigned int m, n, frac;
n = meson_parm_read(clk->map, &pll->n);
+
+ /*
+ * On some HW, N is set to zero on init. This value is invalid as
+ * it would result in a division by zero. The rate can't be
+ * calculated in this case
+ */
+ if (n == 0)
+ return 0;
+
m = meson_parm_read(clk->map, &pll->m);
frac = MESON_PARM_APPLICABLE(&pll->frac) ?
@@ -277,7 +286,7 @@ static int meson_clk_pll_wait_lock(struct clk_hw *hw)
return -ETIMEDOUT;
}
-static void meson_clk_pll_init(struct clk_hw *hw)
+static int meson_clk_pll_init(struct clk_hw *hw)
{
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
@@ -288,6 +297,8 @@ static void meson_clk_pll_init(struct clk_hw *hw)
pll->init_count);
meson_parm_write(clk->map, &pll->rst, 0);
}
+
+ return 0;
}
static int meson_clk_pll_is_enabled(struct clk_hw *hw)
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
index b3af61cc6fb9..d2760a021301 100644
--- a/drivers/clk/meson/g12a.c
+++ b/drivers/clk/meson/g12a.c
@@ -4692,6 +4692,7 @@ static struct clk_regmap *const g12a_clk_regmaps[] = {
&g12a_bt656,
&g12a_usb1_to_ddr,
&g12a_mmc_pclk,
+ &g12a_uart2,
&g12a_vpu_intr,
&g12a_gic,
&g12a_sd_emmc_a_clk0,
diff --git a/drivers/clk/meson/meson8-ddr.c b/drivers/clk/meson/meson8-ddr.c
new file mode 100644
index 000000000000..4b73ea244b63
--- /dev/null
+++ b/drivers/clk/meson/meson8-ddr.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Amlogic Meson8 DDR clock controller
+ *
+ * Copyright (C) 2019 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ */
+
+#include <dt-bindings/clock/meson8-ddr-clkc.h>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-regmap.h"
+#include "clk-pll.h"
+
+#define AM_DDR_PLL_CNTL 0x00
+#define AM_DDR_PLL_CNTL1 0x04
+#define AM_DDR_PLL_CNTL2 0x08
+#define AM_DDR_PLL_CNTL3 0x0c
+#define AM_DDR_PLL_CNTL4 0x10
+#define AM_DDR_PLL_STS 0x14
+#define DDR_CLK_CNTL 0x18
+#define DDR_CLK_STS 0x1c
+
+static struct clk_regmap meson8_ddr_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = AM_DDR_PLL_CNTL,
+ .shift = 30,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = AM_DDR_PLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = AM_DDR_PLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .l = {
+ .reg_off = AM_DDR_PLL_CNTL,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = AM_DDR_PLL_CNTL,
+ .shift = 29,
+ .width = 1,
+ },
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "ddr_pll_dco",
+ .ops = &meson_clk_pll_ro_ops,
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap meson8_ddr_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = AM_DDR_PLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "ddr_pll",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8_ddr_pll_dco.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_hw_onecell_data meson8_ddr_clk_hw_onecell_data = {
+ .hws = {
+ [DDR_CLKID_DDR_PLL_DCO] = &meson8_ddr_pll_dco.hw,
+ [DDR_CLKID_DDR_PLL] = &meson8_ddr_pll.hw,
+ },
+ .num = 2,
+};
+
+static struct clk_regmap *const meson8_ddr_clk_regmaps[] = {
+ &meson8_ddr_pll_dco,
+ &meson8_ddr_pll,
+};
+
+static const struct regmap_config meson8_ddr_clkc_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = DDR_CLK_STS,
+};
+
+static int meson8_ddr_clkc_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ void __iomem *base;
+ struct clk_hw *hw;
+ int ret, i;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(&pdev->dev, base,
+ &meson8_ddr_clkc_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /* Populate regmap */
+ for (i = 0; i < ARRAY_SIZE(meson8_ddr_clk_regmaps); i++)
+ meson8_ddr_clk_regmaps[i]->map = regmap;
+
+ /* Register all clks */
+ for (i = 0; i < meson8_ddr_clk_hw_onecell_data.num; i++) {
+ hw = meson8_ddr_clk_hw_onecell_data.hws[i];
+
+ ret = devm_clk_hw_register(&pdev->dev, hw);
+ if (ret) {
+ dev_err(&pdev->dev, "Clock registration failed\n");
+ return ret;
+ }
+ }
+
+ return devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_onecell_get,
+ &meson8_ddr_clk_hw_onecell_data);
+}
+
+static const struct of_device_id meson8_ddr_clkc_match_table[] = {
+ { .compatible = "amlogic,meson8-ddr-clkc" },
+ { .compatible = "amlogic,meson8b-ddr-clkc" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver meson8_ddr_clkc_driver = {
+ .probe = meson8_ddr_clkc_probe,
+ .driver = {
+ .name = "meson8-ddr-clkc",
+ .of_match_table = meson8_ddr_clkc_match_table,
+ },
+};
+
+builtin_platform_driver(meson8_ddr_clkc_driver);
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index 67e6691e080c..9fd31f23b2a9 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -97,8 +97,10 @@ static struct clk_regmap meson8b_fixed_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "fixed_pll_dco",
.ops = &meson_clk_pll_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &meson8b_xtal.hw
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ .name = "xtal",
+ .index = -1,
},
.num_parents = 1,
},
@@ -162,8 +164,10 @@ static struct clk_regmap meson8b_hdmi_pll_dco = {
/* sometimes also called "HPLL" or "HPLL PLL" */
.name = "hdmi_pll_dco",
.ops = &meson_clk_pll_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &meson8b_xtal.hw
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ .name = "xtal",
+ .index = -1,
},
.num_parents = 1,
},
@@ -237,8 +241,10 @@ static struct clk_regmap meson8b_sys_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "sys_pll_dco",
.ops = &meson_clk_pll_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &meson8b_xtal.hw
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ .name = "xtal",
+ .index = -1,
},
.num_parents = 1,
},
@@ -631,9 +637,9 @@ static struct clk_regmap meson8b_cpu_in_sel = {
.hw.init = &(struct clk_init_data){
.name = "cpu_in_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &meson8b_xtal.hw,
- &meson8b_sys_pll.hw,
+ .parent_data = (const struct clk_parent_data[]) {
+ { .fw_name = "xtal", .name = "xtal", .index = -1, },
+ { .hw = &meson8b_sys_pll.hw, },
},
.num_parents = 2,
.flags = (CLK_SET_RATE_PARENT |
@@ -736,9 +742,9 @@ static struct clk_regmap meson8b_cpu_clk = {
.hw.init = &(struct clk_init_data){
.name = "cpu_clk",
.ops = &clk_regmap_mux_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &meson8b_xtal.hw,
- &meson8b_cpu_scale_out_sel.hw,
+ .parent_data = (const struct clk_parent_data[]) {
+ { .fw_name = "xtal", .name = "xtal", .index = -1, },
+ { .hw = &meson8b_cpu_scale_out_sel.hw, },
},
.num_parents = 2,
.flags = (CLK_SET_RATE_PARENT |
@@ -758,12 +764,12 @@ static struct clk_regmap meson8b_nand_clk_sel = {
.name = "nand_clk_sel",
.ops = &clk_regmap_mux_ops,
/* FIXME all other parents are unknown: */
- .parent_hws = (const struct clk_hw *[]) {
- &meson8b_fclk_div4.hw,
- &meson8b_fclk_div3.hw,
- &meson8b_fclk_div5.hw,
- &meson8b_fclk_div7.hw,
- &meson8b_xtal.hw,
+ .parent_data = (const struct clk_parent_data[]) {
+ { .hw = &meson8b_fclk_div4.hw, },
+ { .hw = &meson8b_fclk_div3.hw, },
+ { .hw = &meson8b_fclk_div5.hw, },
+ { .hw = &meson8b_fclk_div7.hw, },
+ { .fw_name = "xtal", .name = "xtal", .index = -1, },
},
.num_parents = 5,
.flags = CLK_SET_RATE_PARENT,
@@ -1721,8 +1727,10 @@ static struct clk_regmap meson8b_hdmi_sys_sel = {
.name = "hdmi_sys_sel",
.ops = &clk_regmap_mux_ro_ops,
/* FIXME: all other parents are unknown */
- .parent_hws = (const struct clk_hw *[]) {
- &meson8b_xtal.hw
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ .name = "xtal",
+ .index = -1,
},
.num_parents = 1,
.flags = CLK_SET_RATE_NO_REPARENT,
@@ -1764,17 +1772,20 @@ static struct clk_regmap meson8b_hdmi_sys = {
/*
* The MALI IP is clocked by two identical clocks (mali_0 and mali_1)
- * muxed by a glitch-free switch on Meson8b and Meson8m2. Meson8 only
- * has mali_0 and no glitch-free mux.
+ * muxed by a glitch-free switch on Meson8b and Meson8m2. The CCF can
+ * actually manage this glitch-free mux because it does top-to-bottom
+ * updates the each clock tree and switches to the "inactive" one when
+ * CLK_SET_RATE_GATE is set.
+ * Meson8 only has mali_0 and no glitch-free mux.
*/
-static const struct clk_hw *meson8b_mali_0_1_parent_hws[] = {
- &meson8b_xtal.hw,
- &meson8b_mpll2.hw,
- &meson8b_mpll1.hw,
- &meson8b_fclk_div7.hw,
- &meson8b_fclk_div4.hw,
- &meson8b_fclk_div3.hw,
- &meson8b_fclk_div5.hw,
+static const struct clk_parent_data meson8b_mali_0_1_parent_data[] = {
+ { .fw_name = "xtal", .name = "xtal", .index = -1, },
+ { .hw = &meson8b_mpll2.hw, },
+ { .hw = &meson8b_mpll1.hw, },
+ { .hw = &meson8b_fclk_div7.hw, },
+ { .hw = &meson8b_fclk_div4.hw, },
+ { .hw = &meson8b_fclk_div3.hw, },
+ { .hw = &meson8b_fclk_div5.hw, },
};
static u32 meson8b_mali_0_1_mux_table[] = { 0, 2, 3, 4, 5, 6, 7 };
@@ -1789,8 +1800,8 @@ static struct clk_regmap meson8b_mali_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "mali_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_mali_0_1_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_mali_0_1_parent_hws),
+ .parent_data = meson8b_mali_0_1_parent_data,
+ .num_parents = ARRAY_SIZE(meson8b_mali_0_1_parent_data),
/*
* Don't propagate rate changes up because the only changeable
* parents are mpll1 and mpll2 but we need those for audio and
@@ -1830,7 +1841,7 @@ static struct clk_regmap meson8b_mali_0 = {
&meson8b_mali_0_div.hw
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT,
},
};
@@ -1844,8 +1855,8 @@ static struct clk_regmap meson8b_mali_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "mali_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_mali_0_1_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_mali_0_1_parent_hws),
+ .parent_data = meson8b_mali_0_1_parent_data,
+ .num_parents = ARRAY_SIZE(meson8b_mali_0_1_parent_data),
/*
* Don't propagate rate changes up because the only changeable
* parents are mpll1 and mpll2 but we need those for audio and
@@ -1885,7 +1896,7 @@ static struct clk_regmap meson8b_mali_1 = {
&meson8b_mali_1_div.hw
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT,
},
};
@@ -1944,8 +1955,10 @@ static struct clk_regmap meson8m2_gp_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "gp_pll_dco",
.ops = &meson_clk_pll_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &meson8b_xtal.hw
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ .name = "xtal",
+ .index = -1,
},
.num_parents = 1,
},
@@ -3585,7 +3598,7 @@ static const struct reset_control_ops meson8b_clk_reset_ops = {
struct meson8b_nb_data {
struct notifier_block nb;
- struct clk_hw_onecell_data *onecell_data;
+ struct clk_hw *cpu_clk;
};
static int meson8b_cpu_clk_notifier_cb(struct notifier_block *nb,
@@ -3593,30 +3606,25 @@ static int meson8b_cpu_clk_notifier_cb(struct notifier_block *nb,
{
struct meson8b_nb_data *nb_data =
container_of(nb, struct meson8b_nb_data, nb);
- struct clk_hw **hws = nb_data->onecell_data->hws;
- struct clk_hw *cpu_clk_hw, *parent_clk_hw;
- struct clk *cpu_clk, *parent_clk;
+ struct clk_hw *parent_clk;
int ret;
switch (event) {
case PRE_RATE_CHANGE:
- parent_clk_hw = hws[CLKID_XTAL];
+ /* xtal */
+ parent_clk = clk_hw_get_parent_by_index(nb_data->cpu_clk, 0);
break;
case POST_RATE_CHANGE:
- parent_clk_hw = hws[CLKID_CPU_SCALE_OUT_SEL];
+ /* cpu_scale_out_sel */
+ parent_clk = clk_hw_get_parent_by_index(nb_data->cpu_clk, 1);
break;
default:
return NOTIFY_DONE;
}
- cpu_clk_hw = hws[CLKID_CPUCLK];
- cpu_clk = __clk_lookup(clk_hw_get_name(cpu_clk_hw));
-
- parent_clk = __clk_lookup(clk_hw_get_name(parent_clk_hw));
-
- ret = clk_set_parent(cpu_clk, parent_clk);
+ ret = clk_hw_set_parent(nb_data->cpu_clk, parent_clk);
if (ret)
return notifier_from_errno(ret);
@@ -3682,20 +3690,26 @@ static void __init meson8b_clkc_init_common(struct device_node *np,
meson8b_clk_regmaps[i]->map = map;
/*
- * register all clks
- * CLKID_UNUSED = 0, so skip it and start with CLKID_XTAL = 1
+ * always skip CLKID_UNUSED and also skip XTAL if the .dtb provides the
+ * XTAL clock as input.
*/
- for (i = CLKID_XTAL; i < CLK_NR_CLKS; i++) {
+ if (!IS_ERR(of_clk_get_by_name(np, "xtal")))
+ i = CLKID_PLL_FIXED;
+ else
+ i = CLKID_XTAL;
+
+ /* register all clks */
+ for (; i < CLK_NR_CLKS; i++) {
/* array might be sparse */
if (!clk_hw_onecell_data->hws[i])
continue;
- ret = clk_hw_register(NULL, clk_hw_onecell_data->hws[i]);
+ ret = of_clk_hw_register(np, clk_hw_onecell_data->hws[i]);
if (ret)
return;
}
- meson8b_cpu_nb_data.onecell_data = clk_hw_onecell_data;
+ meson8b_cpu_nb_data.cpu_clk = clk_hw_onecell_data->hws[CLKID_CPUCLK];
/*
* FIXME we shouldn't program the muxes in notifier handlers. The
diff --git a/drivers/clk/meson/sclk-div.c b/drivers/clk/meson/sclk-div.c
index 3acf03780221..76d31c0a3342 100644
--- a/drivers/clk/meson/sclk-div.c
+++ b/drivers/clk/meson/sclk-div.c
@@ -216,7 +216,7 @@ static int sclk_div_is_enabled(struct clk_hw *hw)
return 0;
}
-static void sclk_div_init(struct clk_hw *hw)
+static int sclk_div_init(struct clk_hw *hw)
{
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_sclk_div_data *sclk = meson_sclk_div_data(clk);
@@ -231,6 +231,8 @@ static void sclk_div_init(struct clk_hw *hw)
sclk->cached_div = val + 1;
sclk_div_get_duty_cycle(hw, &sclk->cached_duty);
+
+ return 0;
}
const struct clk_ops meson_sclk_div_ops = {
diff --git a/drivers/clk/microchip/clk-core.c b/drivers/clk/microchip/clk-core.c
index 567755d6f844..1b4f023cdc8b 100644
--- a/drivers/clk/microchip/clk-core.c
+++ b/drivers/clk/microchip/clk-core.c
@@ -266,10 +266,12 @@ static void roclk_disable(struct clk_hw *hw)
writel(REFO_ON | REFO_OE, PIC32_CLR(refo->ctrl_reg));
}
-static void roclk_init(struct clk_hw *hw)
+static int roclk_init(struct clk_hw *hw)
{
/* initialize clock in disabled state */
roclk_disable(hw);
+
+ return 0;
}
static u8 roclk_get_parent(struct clk_hw *hw)
@@ -880,7 +882,7 @@ static int sclk_set_parent(struct clk_hw *hw, u8 index)
return err;
}
-static void sclk_init(struct clk_hw *hw)
+static int sclk_init(struct clk_hw *hw)
{
struct pic32_sys_clk *sclk = clkhw_to_sys_clk(hw);
unsigned long flags;
@@ -899,6 +901,8 @@ static void sclk_init(struct clk_hw *hw)
writel(v, sclk->slew_reg);
spin_unlock_irqrestore(&sclk->core->reg_lock, flags);
}
+
+ return 0;
}
/* sclk with post-divider */
diff --git a/drivers/clk/mmp/clk-frac.c b/drivers/clk/mmp/clk-frac.c
index 90bf181f191a..fabc09aca6c4 100644
--- a/drivers/clk/mmp/clk-frac.c
+++ b/drivers/clk/mmp/clk-frac.c
@@ -109,7 +109,7 @@ static int clk_factor_set_rate(struct clk_hw *hw, unsigned long drate,
return 0;
}
-static void clk_factor_init(struct clk_hw *hw)
+static int clk_factor_init(struct clk_hw *hw)
{
struct mmp_clk_factor *factor = to_clk_factor(hw);
struct mmp_clk_factor_masks *masks = factor->masks;
@@ -146,6 +146,8 @@ static void clk_factor_init(struct clk_hw *hw)
if (factor->lock)
spin_unlock_irqrestore(factor->lock, flags);
+
+ return 0;
}
static const struct clk_ops clk_factor_ops = {
diff --git a/drivers/clk/mmp/clk-mix.c b/drivers/clk/mmp/clk-mix.c
index 90814b2613c0..d2cd36c54474 100644
--- a/drivers/clk/mmp/clk-mix.c
+++ b/drivers/clk/mmp/clk-mix.c
@@ -419,12 +419,14 @@ static int mmp_clk_set_rate(struct clk_hw *hw, unsigned long rate,
}
}
-static void mmp_clk_mix_init(struct clk_hw *hw)
+static int mmp_clk_mix_init(struct clk_hw *hw)
{
struct mmp_clk_mix *mix = to_clk_mix(hw);
if (mix->table)
_filter_clk_table(mix, mix->table, mix->table_size);
+
+ return 0;
}
const struct clk_ops mmp_clk_mix_ops = {
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
index a60a1be937ad..6e71591e63a0 100644
--- a/drivers/clk/mmp/clk-of-mmp2.c
+++ b/drivers/clk/mmp/clk-of-mmp2.c
@@ -53,6 +53,8 @@
#define APMU_DISP1 0x110
#define APMU_CCIC0 0x50
#define APMU_CCIC1 0xf4
+#define APMU_USBHSIC0 0xf8
+#define APMU_USBHSIC1 0xfc
#define MPMU_UART_PLL 0x14
struct mmp2_clk_unit {
@@ -134,7 +136,7 @@ static DEFINE_SPINLOCK(ssp3_lock);
static const char *ssp_parent_names[] = {"vctcxo_4", "vctcxo_2", "vctcxo", "pll1_16"};
static DEFINE_SPINLOCK(timer_lock);
-static const char *timer_parent_names[] = {"clk32", "vctcxo_2", "vctcxo_4", "vctcxo"};
+static const char *timer_parent_names[] = {"clk32", "vctcxo_4", "vctcxo_2", "vctcxo"};
static DEFINE_SPINLOCK(reset_lock);
@@ -194,6 +196,8 @@ static struct mmp_clk_mix_config sdh_mix_config = {
};
static DEFINE_SPINLOCK(usb_lock);
+static DEFINE_SPINLOCK(usbhsic0_lock);
+static DEFINE_SPINLOCK(usbhsic1_lock);
static DEFINE_SPINLOCK(disp0_lock);
static DEFINE_SPINLOCK(disp1_lock);
@@ -224,6 +228,8 @@ static struct mmp_param_div_clk apmu_div_clks[] = {
static struct mmp_param_gate_clk apmu_gate_clks[] = {
{MMP2_CLK_USB, "usb_clk", "usb_pll", 0, APMU_USB, 0x9, 0x9, 0x0, 0, &usb_lock},
+ {MMP2_CLK_USBHSIC0, "usbhsic0_clk", "usb_pll", 0, APMU_USBHSIC0, 0x1b, 0x1b, 0x0, 0, &usbhsic0_lock},
+ {MMP2_CLK_USBHSIC1, "usbhsic1_clk", "usb_pll", 0, APMU_USBHSIC1, 0x1b, 0x1b, 0x0, 0, &usbhsic1_lock},
/* The gate clocks has mux parent. */
{MMP2_CLK_SDH0, "sdh0_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH0, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
{MMP2_CLK_SDH1, "sdh1_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
diff --git a/drivers/clk/mvebu/Kconfig b/drivers/clk/mvebu/Kconfig
index 415e6906a113..ded07b0bd0d5 100644
--- a/drivers/clk/mvebu/Kconfig
+++ b/drivers/clk/mvebu/Kconfig
@@ -29,7 +29,7 @@ config ARMADA_39X_CLK
select MVEBU_CLK_COMMON
config ARMADA_37XX_CLK
- bool
+ bool
config ARMADA_XP_CLK
bool
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 3b33ef129274..15cdcdc9b3b8 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config KRAIT_CLOCKS
- bool
- select KRAIT_L2_ACCESSORS
+ bool
+ select KRAIT_L2_ACCESSORS
config QCOM_GDSC
bool
@@ -14,6 +14,7 @@ menuconfig COMMON_CLK_QCOM
tristate "Support for Qualcomm's clock controllers"
depends on OF
depends on ARCH_QCOM || COMPILE_TEST
+ select RATIONAL
select REGMAP_MMIO
select RESET_CONTROLLER
@@ -95,6 +96,14 @@ config IPQ_GCC_4019
Say Y if you want to use peripheral devices such as UART, SPI,
i2c, USB, SD/eMMC, etc.
+config IPQ_GCC_6018
+ tristate "IPQ6018 Global Clock Controller"
+ help
+ Support for global clock controller on ipq6018 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc. Select this for the root clock
+ of ipq6018.
+
config IPQ_GCC_806X
tristate "IPQ806x Global Clock Controller"
help
@@ -229,6 +238,15 @@ config MSM_GPUCC_8998
Say Y if you want to support graphics controller devices and
functionality such as 3D graphics.
+config MSM_MMCC_8998
+ tristate "MSM8998 Multimedia Clock Controller"
+ select MSM_GCC_8998
+ select QCOM_GDSC
+ help
+ Support for the multimedia clock controller on msm8998 devices.
+ Say Y if you want to support multimedia devices such as display,
+ graphics, video encode/decode, camera, etc.
+
config QCS_GCC_404
tristate "QCS404 Global Clock Controller"
help
@@ -236,6 +254,15 @@ config QCS_GCC_404
Say Y if you want to use multimedia devices or peripheral
devices such as UART, SPI, I2C, USB, SD/eMMC, PCIe etc.
+config SC_DISPCC_7180
+ tristate "SC7180 Display Clock Controller"
+ select SC_GCC_7180
+ help
+ Support for the display clock controller on Qualcomm Technologies, Inc
+ SC7180 devices.
+ Say Y if you want to support display devices and functionality such as
+ splash screen.
+
config SC_GCC_7180
tristate "SC7180 Global Clock Controller"
select QCOM_GDSC
@@ -245,6 +272,22 @@ config SC_GCC_7180
Say Y if you want to use peripheral devices such as UART, SPI,
I2C, USB, UFS, SDCC, etc.
+config SC_GPUCC_7180
+ tristate "SC7180 Graphics Clock Controller"
+ select SC_GCC_7180
+ help
+ Support for the graphics clock controller on SC7180 devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics.
+
+config SC_VIDEOCC_7180
+ tristate "SC7180 Video Clock Controller"
+ select SC_GCC_7180
+ help
+ Support for the video clock controller on SC7180 devices.
+ Say Y if you want to support video devices and functionality such as
+ video encode and decode.
+
config SDM_CAMCC_845
tristate "SDM845 Camera Clock Controller"
select SDM_GCC_845
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index d899661d0f44..656a87e629d4 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -20,6 +20,7 @@ clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o
obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o
+obj-$(CONFIG_IPQ_GCC_6018) += gcc-ipq6018.o
obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
obj-$(CONFIG_IPQ_GCC_8074) += gcc-ipq8074.o
obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
@@ -37,6 +38,7 @@ obj-$(CONFIG_MSM_GPUCC_8998) += gpucc-msm8998.o
obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o
+obj-$(CONFIG_MSM_MMCC_8998) += mmcc-msm8998.o
obj-$(CONFIG_QCOM_A53PLL) += a53-pll.o
obj-$(CONFIG_QCOM_CLK_APCS_MSM8916) += apcs-msm8916.o
obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o
@@ -45,7 +47,10 @@ obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
obj-$(CONFIG_QCS_GCC_404) += gcc-qcs404.o
obj-$(CONFIG_QCS_Q6SSTOP_404) += q6sstop-qcs404.o
obj-$(CONFIG_QCS_TURING_404) += turingcc-qcs404.o
+obj-$(CONFIG_SC_DISPCC_7180) += dispcc-sc7180.o
obj-$(CONFIG_SC_GCC_7180) += gcc-sc7180.o
+obj-$(CONFIG_SC_GPUCC_7180) += gpucc-sc7180.o
+obj-$(CONFIG_SC_VIDEOCC_7180) += videocc-sc7180.o
obj-$(CONFIG_SDM_CAMCC_845) += camcc-sdm845.o
obj-$(CONFIG_SDM_DISPCC_845) += dispcc-sdm845.o
obj-$(CONFIG_SDM_GCC_660) += gcc-sdm660.o
diff --git a/drivers/clk/qcom/apcs-msm8916.c b/drivers/clk/qcom/apcs-msm8916.c
index a6c89a310b18..cf69a97d0439 100644
--- a/drivers/clk/qcom/apcs-msm8916.c
+++ b/drivers/clk/qcom/apcs-msm8916.c
@@ -19,9 +19,9 @@
static const u32 gpll0_a53cc_map[] = { 4, 5 };
-static const char * const gpll0_a53cc[] = {
- "gpll0_vote",
- "a53pll",
+static const struct clk_parent_data pdata[] = {
+ { .fw_name = "aux", .name = "gpll0_vote", },
+ { .fw_name = "pll", .name = "a53pll", },
};
/*
@@ -62,8 +62,8 @@ static int qcom_apcs_msm8916_clk_probe(struct platform_device *pdev)
return -ENOMEM;
init.name = "a53mux";
- init.parent_names = gpll0_a53cc;
- init.num_parents = ARRAY_SIZE(gpll0_a53cc);
+ init.parent_data = pdata;
+ init.num_parents = ARRAY_SIZE(pdata);
init.ops = &clk_regmap_mux_div_ops;
init.flags = CLK_SET_RATE_PARENT;
@@ -79,7 +79,8 @@ static int qcom_apcs_msm8916_clk_probe(struct platform_device *pdev)
a53cc->pclk = devm_clk_get(parent, NULL);
if (IS_ERR(a53cc->pclk)) {
ret = PTR_ERR(a53cc->pclk);
- dev_err(dev, "failed to get clk: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get clk: %d\n", ret);
return ret;
}
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index 055318f97991..7c2936da9b14 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -878,6 +878,14 @@ static long clk_trion_pll_round_rate(struct clk_hw *hw, unsigned long rate,
return clamp(rate, min_freq, max_freq);
}
+const struct clk_ops clk_alpha_pll_fixed_ops = {
+ .enable = clk_alpha_pll_enable,
+ .disable = clk_alpha_pll_disable,
+ .is_enabled = clk_alpha_pll_is_enabled,
+ .recalc_rate = clk_alpha_pll_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_ops);
+
const struct clk_ops clk_alpha_pll_ops = {
.enable = clk_alpha_pll_enable,
.disable = clk_alpha_pll_disable,
@@ -1024,6 +1032,25 @@ void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
regmap_write(regmap, PLL_CONFIG_CTL(pll),
config->config_ctl_val);
+ if (config->config_ctl_hi_val)
+ regmap_write(regmap, PLL_CONFIG_CTL_U(pll),
+ config->config_ctl_hi_val);
+
+ if (config->user_ctl_val)
+ regmap_write(regmap, PLL_USER_CTL(pll), config->user_ctl_val);
+
+ if (config->user_ctl_hi_val)
+ regmap_write(regmap, PLL_USER_CTL_U(pll),
+ config->user_ctl_hi_val);
+
+ if (config->test_ctl_val)
+ regmap_write(regmap, PLL_TEST_CTL(pll),
+ config->test_ctl_val);
+
+ if (config->test_ctl_hi_val)
+ regmap_write(regmap, PLL_TEST_CTL_U(pll),
+ config->test_ctl_hi_val);
+
if (config->post_div_mask) {
mask = config->post_div_mask;
val = config->post_div_val;
@@ -1141,14 +1168,9 @@ static int alpha_pll_fabia_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long prate)
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
- u32 val, l, alpha_width = pll_alpha_width(pll);
+ u32 l, alpha_width = pll_alpha_width(pll);
u64 a;
unsigned long rrate;
- int ret = 0;
-
- ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
- if (ret)
- return ret;
rrate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width);
@@ -1167,7 +1189,64 @@ static int alpha_pll_fabia_set_rate(struct clk_hw *hw, unsigned long rate,
return __clk_alpha_pll_update_latch(pll);
}
+static int alpha_pll_fabia_prepare(struct clk_hw *hw)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ const struct pll_vco *vco;
+ struct clk_hw *parent_hw;
+ unsigned long cal_freq, rrate;
+ u32 cal_l, val, alpha_width = pll_alpha_width(pll);
+ u64 a;
+ int ret;
+
+ /* Check if calibration needs to be done i.e. PLL is in reset */
+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (ret)
+ return ret;
+
+ /* Return early if calibration is not needed. */
+ if (val & PLL_RESET_N)
+ return 0;
+
+ vco = alpha_pll_find_vco(pll, clk_hw_get_rate(hw));
+ if (!vco) {
+ pr_err("alpha pll: not in a valid vco range\n");
+ return -EINVAL;
+ }
+
+ cal_freq = DIV_ROUND_CLOSEST((pll->vco_table[0].min_freq +
+ pll->vco_table[0].max_freq) * 54, 100);
+
+ parent_hw = clk_hw_get_parent(hw);
+ if (!parent_hw)
+ return -EINVAL;
+
+ rrate = alpha_pll_round_rate(cal_freq, clk_hw_get_rate(parent_hw),
+ &cal_l, &a, alpha_width);
+ /*
+ * Due to a limited number of bits for fractional rate programming, the
+ * rounded up rate could be marginally higher than the requested rate.
+ */
+ if (rrate > (cal_freq + FABIA_PLL_RATE_MARGIN) || rrate < cal_freq)
+ return -EINVAL;
+
+ /* Setup PLL for calibration frequency */
+ regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), cal_l);
+
+ /* Bringup the PLL at calibration frequency */
+ ret = clk_alpha_pll_enable(hw);
+ if (ret) {
+ pr_err("alpha pll calibration failed\n");
+ return ret;
+ }
+
+ clk_alpha_pll_disable(hw);
+
+ return 0;
+}
+
const struct clk_ops clk_alpha_pll_fabia_ops = {
+ .prepare = alpha_pll_fabia_prepare,
.enable = alpha_pll_fabia_enable,
.disable = alpha_pll_fabia_disable,
.is_enabled = clk_alpha_pll_is_enabled,
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index 15f27f4b06df..fbc1f67c7a26 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -94,6 +94,10 @@ struct alpha_pll_config {
u32 alpha_hi;
u32 config_ctl_val;
u32 config_ctl_hi_val;
+ u32 user_ctl_val;
+ u32 user_ctl_hi_val;
+ u32 test_ctl_val;
+ u32 test_ctl_hi_val;
u32 main_output_mask;
u32 aux_output_mask;
u32 aux2_output_mask;
@@ -109,6 +113,7 @@ struct alpha_pll_config {
};
extern const struct clk_ops clk_alpha_pll_ops;
+extern const struct clk_ops clk_alpha_pll_fixed_ops;
extern const struct clk_ops clk_alpha_pll_hwfsm_ops;
extern const struct clk_ops clk_alpha_pll_postdiv_ops;
extern const struct clk_ops clk_alpha_pll_huayra_ops;
diff --git a/drivers/clk/qcom/clk-hfpll.c b/drivers/clk/qcom/clk-hfpll.c
index 3c04805f2a55..e847d586a73a 100644
--- a/drivers/clk/qcom/clk-hfpll.c
+++ b/drivers/clk/qcom/clk-hfpll.c
@@ -196,7 +196,7 @@ static unsigned long clk_hfpll_recalc_rate(struct clk_hw *hw,
return l_val * parent_rate;
}
-static void clk_hfpll_init(struct clk_hw *hw)
+static int clk_hfpll_init(struct clk_hw *hw)
{
struct clk_hfpll *h = to_clk_hfpll(hw);
struct hfpll_data const *hd = h->d;
@@ -206,7 +206,7 @@ static void clk_hfpll_init(struct clk_hw *hw)
regmap_read(regmap, hd->mode_reg, &mode);
if (mode != (PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL)) {
__clk_hfpll_init_once(hw);
- return;
+ return 0;
}
if (hd->status_reg) {
@@ -218,6 +218,8 @@ static void clk_hfpll_init(struct clk_hw *hw)
__clk_hfpll_init_once(hw);
}
}
+
+ return 0;
}
static int hfpll_is_enabled(struct clk_hw *hw)
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index 78358b81d249..86d2b8b90173 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -161,6 +161,7 @@ extern const struct clk_ops clk_byte2_ops;
extern const struct clk_ops clk_pixel_ops;
extern const struct clk_ops clk_gfx3d_ops;
extern const struct clk_ops clk_rcg2_shared_ops;
+extern const struct clk_ops clk_dp_ops;
struct clk_rcg_dfs_data {
struct clk_rcg2 *rcg;
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 8f4b9bec2956..357159fe85b5 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -10,6 +10,7 @@
#include <linux/export.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
+#include <linux/rational.h>
#include <linux/regmap.h>
#include <linux/math64.h>
#include <linux/slab.h>
@@ -217,6 +218,9 @@ static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
clk_flags = clk_hw_get_flags(hw);
p = clk_hw_get_parent_by_index(hw, index);
+ if (!p)
+ return -EINVAL;
+
if (clk_flags & CLK_SET_RATE_PARENT) {
rate = f->freq;
if (f->pre_div) {
@@ -952,7 +956,7 @@ static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l,
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
struct clk_hw *p;
unsigned long prate = 0;
- u32 val, mask, cfg, mode;
+ u32 val, mask, cfg, mode, src;
int i, num_parents;
regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(l), &cfg);
@@ -962,12 +966,12 @@ static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l,
if (cfg & mask)
f->pre_div = cfg & mask;
- cfg &= CFG_SRC_SEL_MASK;
- cfg >>= CFG_SRC_SEL_SHIFT;
+ src = cfg & CFG_SRC_SEL_MASK;
+ src >>= CFG_SRC_SEL_SHIFT;
num_parents = clk_hw_get_num_parents(hw);
for (i = 0; i < num_parents; i++) {
- if (cfg == rcg->parent_map[i].cfg) {
+ if (src == rcg->parent_map[i].cfg) {
f->src = rcg->parent_map[i].src;
p = clk_hw_get_parent_by_index(&rcg->clkr.hw, i);
prate = clk_hw_get_rate(p);
@@ -1124,3 +1128,79 @@ int qcom_cc_register_rcg_dfs(struct regmap *regmap,
return 0;
}
EXPORT_SYMBOL_GPL(qcom_cc_register_rcg_dfs);
+
+static int clk_rcg2_dp_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ struct freq_tbl f = { 0 };
+ u32 mask = BIT(rcg->hid_width) - 1;
+ u32 hid_div, cfg;
+ int i, num_parents = clk_hw_get_num_parents(hw);
+ unsigned long num, den;
+
+ rational_best_approximation(parent_rate, rate,
+ GENMASK(rcg->mnd_width - 1, 0),
+ GENMASK(rcg->mnd_width - 1, 0), &den, &num);
+
+ if (!num || !den)
+ return -EINVAL;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+ hid_div = cfg;
+ cfg &= CFG_SRC_SEL_MASK;
+ cfg >>= CFG_SRC_SEL_SHIFT;
+
+ for (i = 0; i < num_parents; i++) {
+ if (cfg == rcg->parent_map[i].cfg) {
+ f.src = rcg->parent_map[i].src;
+ break;
+ }
+ }
+
+ f.pre_div = hid_div;
+ f.pre_div >>= CFG_SRC_DIV_SHIFT;
+ f.pre_div &= mask;
+
+ if (num != den) {
+ f.m = num;
+ f.n = den;
+ } else {
+ f.m = 0;
+ f.n = 0;
+ }
+
+ return clk_rcg2_configure(rcg, &f);
+}
+
+static int clk_rcg2_dp_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ return clk_rcg2_dp_set_rate(hw, rate, parent_rate);
+}
+
+static int clk_rcg2_dp_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rate_request parent_req = *req;
+ int ret;
+
+ ret = __clk_determine_rate(clk_hw_get_parent(hw), &parent_req);
+ if (ret)
+ return ret;
+
+ req->best_parent_rate = parent_req.rate;
+
+ return 0;
+}
+
+const struct clk_ops clk_dp_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .set_rate = clk_rcg2_dp_set_rate,
+ .set_rate_and_parent = clk_rcg2_dp_set_rate_and_parent,
+ .determine_rate = clk_rcg2_dp_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_dp_ops);
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index 7ed313ad6e43..98a118c1e244 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -396,6 +396,7 @@ static struct clk_hw *sc7180_rpmh_clocks[] = {
[RPMH_RF_CLK1_A] = &sdm845_rf_clk1_ao.hw,
[RPMH_RF_CLK2] = &sdm845_rf_clk2.hw,
[RPMH_RF_CLK2_A] = &sdm845_rf_clk2_ao.hw,
+ [RPMH_IPA_CLK] = &sdm845_ipa.hw,
};
static const struct clk_rpmh_desc clk_rpmh_sc7180 = {
@@ -431,11 +432,16 @@ static int clk_rpmh_probe(struct platform_device *pdev)
hw_clks = desc->clks;
for (i = 0; i < desc->num_clks; i++) {
- const char *name = hw_clks[i]->init->name;
+ const char *name;
u32 res_addr;
size_t aux_data_len;
const struct bcm_db *data;
+ if (!hw_clks[i])
+ continue;
+
+ name = hw_clks[i]->init->name;
+
rpmh_clk = to_clk_rpmh(hw_clks[i]);
res_addr = cmd_db_read_addr(rpmh_clk->res_name);
if (!res_addr) {
@@ -481,9 +487,9 @@ static int clk_rpmh_probe(struct platform_device *pdev)
}
static const struct of_device_id clk_rpmh_match_table[] = {
+ { .compatible = "qcom,sc7180-rpmh-clk", .data = &clk_rpmh_sc7180},
{ .compatible = "qcom,sdm845-rpmh-clk", .data = &clk_rpmh_sdm845},
{ .compatible = "qcom,sm8150-rpmh-clk", .data = &clk_rpmh_sm8150},
- { .compatible = "qcom,sc7180-rpmh-clk", .data = &clk_rpmh_sc7180},
{ }
};
MODULE_DEVICE_TABLE(of, clk_rpmh_match_table);
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index 930fa4a4c52a..0bbfef9fa6de 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -485,6 +485,8 @@ static struct clk_smd_rpm *msm8974_clks[] = {
[RPM_SMD_MMSSNOC_AHB_CLK] = &msm8974_mmssnoc_ahb_clk,
[RPM_SMD_MMSSNOC_AHB_A_CLK] = &msm8974_mmssnoc_ahb_a_clk,
[RPM_SMD_BIMC_CLK] = &msm8974_bimc_clk,
+ [RPM_SMD_GFX3D_CLK_SRC] = &msm8974_gfx3d_clk_src,
+ [RPM_SMD_GFX3D_A_CLK_SRC] = &msm8974_gfx3d_a_clk_src,
[RPM_SMD_BIMC_A_CLK] = &msm8974_bimc_a_clk,
[RPM_SMD_OCMEMGX_CLK] = &msm8974_ocmemgx_clk,
[RPM_SMD_OCMEMGX_A_CLK] = &msm8974_ocmemgx_a_clk,
@@ -648,6 +650,7 @@ static const struct rpm_smd_clk_desc rpm_clk_qcs404 = {
};
/* msm8998 */
+DEFINE_CLK_SMD_RPM(msm8998, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0);
DEFINE_CLK_SMD_RPM(msm8998, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0);
DEFINE_CLK_SMD_RPM(msm8998, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
DEFINE_CLK_SMD_RPM(msm8998, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2);
@@ -671,6 +674,8 @@ DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk2_pin, rf_clk2_a_pin, 5);
DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, rf_clk3, rf_clk3_a, 6);
DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk3_pin, rf_clk3_a_pin, 6);
static struct clk_smd_rpm *msm8998_clks[] = {
+ [RPM_SMD_BIMC_CLK] = &msm8998_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &msm8998_bimc_a_clk,
[RPM_SMD_PCNOC_CLK] = &msm8998_pcnoc_clk,
[RPM_SMD_PCNOC_A_CLK] = &msm8998_pcnoc_a_clk,
[RPM_SMD_SNOC_CLK] = &msm8998_snoc_clk,
diff --git a/drivers/clk/qcom/dispcc-sc7180.c b/drivers/clk/qcom/dispcc-sc7180.c
new file mode 100644
index 000000000000..dd7af41e47eb
--- /dev/null
+++ b/drivers/clk/qcom/dispcc-sc7180.c
@@ -0,0 +1,761 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,dispcc-sc7180.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "gdsc.h"
+
+enum {
+ P_BI_TCXO,
+ P_CHIP_SLEEP_CLK,
+ P_CORE_BI_PLL_TEST_SE,
+ P_DISP_CC_PLL0_OUT_EVEN,
+ P_DISP_CC_PLL0_OUT_MAIN,
+ P_DP_PHY_PLL_LINK_CLK,
+ P_DP_PHY_PLL_VCO_DIV_CLK,
+ P_DSI0_PHY_PLL_OUT_BYTECLK,
+ P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_GPLL0_OUT_MAIN,
+};
+
+static const struct pll_vco fabia_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_pll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fabia_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_disp_cc_pll0_out_even[] = {
+ { 0x0, 1 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv disp_cc_pll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_disp_cc_pll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_disp_cc_pll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_pll0_out_even",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0[] = {
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP_PHY_PLL_LINK_CLK, 1 },
+ { P_DP_PHY_PLL_VCO_DIV_CLK, 2 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_1[] = {
+ { .fw_name = "bi_tcxo" },
+ { .fw_name = "dp_phy_pll_link_clk" },
+ { .fw_name = "dp_phy_pll_vco_div_clk" },
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 1 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_2[] = {
+ { .fw_name = "bi_tcxo" },
+ { .fw_name = "dsi0_phy_pll_out_byteclk" },
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_MAIN, 4 },
+ { P_DISP_CC_PLL0_OUT_EVEN, 5 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_3[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &disp_cc_pll0.clkr.hw },
+ { .fw_name = "gcc_disp_gpll0_clk_src" },
+ { .hw = &disp_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_4[] = {
+ { .fw_name = "bi_tcxo" },
+ { .fw_name = "gcc_disp_gpll0_clk_src" },
+};
+
+static const struct parent_map disp_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_5[] = {
+ { .fw_name = "bi_tcxo" },
+ { .fw_name = "dsi0_phy_pll_out_dsiclk" },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
+ .cmd_rcgr = 0x22bc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_ahb_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+ .cmd_rcgr = 0x2110,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_dp_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_aux_clk_src = {
+ .cmd_rcgr = 0x21dc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_aux_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_crypto_clk_src = {
+ .cmd_rcgr = 0x2194,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_crypto_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_link_clk_src = {
+ .cmd_rcgr = 0x2178,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_link_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_pixel_clk_src = {
+ .cmd_rcgr = 0x21ac,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_pixel_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+ .cmd_rcgr = 0x2148,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_esc0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(345000000, P_DISP_CC_PLL0_OUT_MAIN, 4, 0, 0),
+ F(460000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+ .cmd_rcgr = 0x20c8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_mdp_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+ .cmd_rcgr = 0x2098,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_pclk0_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_rot_clk_src = {
+ .cmd_rcgr = 0x20e0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_rot_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+ .cmd_rcgr = 0x20f8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_vsync_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+ .halt_reg = 0x2080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+ .halt_reg = 0x2028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+ .reg = 0x2128,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_div_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_byte0_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dp_link_div_clk_src = {
+ .reg = 0x2190,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_link_div_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_dp_link_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+ .halt_reg = 0x202c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x202c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte0_intf_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_byte0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_aux_clk = {
+ .halt_reg = 0x2054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_dp_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_crypto_clk = {
+ .halt_reg = 0x2048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_crypto_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_dp_crypto_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_link_clk = {
+ .halt_reg = 0x2040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_link_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_dp_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_link_intf_clk = {
+ .halt_reg = 0x2044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_link_intf_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_dp_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_pixel_clk = {
+ .halt_reg = 0x204c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x204c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_pixel_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_dp_pixel_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+ .halt_reg = 0x2038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_esc0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+ .halt_reg = 0x200c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_mdp_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+ .halt_reg = 0x201c,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_mdp_lut_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
+ .halt_reg = 0x4004,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x4004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_non_gdsc_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+ .halt_reg = 0x2004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_pclk0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rot_clk = {
+ .halt_reg = 0x2014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_rot_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_rot_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
+ .halt_reg = 0x400c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x400c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_rscc_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
+ .halt_reg = 0x4008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_rscc_vsync_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+ .halt_reg = 0x2024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_vsync_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x3000,
+ .pd = {
+ .name = "mdss_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL,
+};
+
+static struct gdsc *disp_cc_sc7180_gdscs[] = {
+ [MDSS_GDSC] = &mdss_gdsc,
+};
+
+static struct clk_regmap *disp_cc_sc7180_clocks[] = {
+ [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+ [DISP_CC_MDSS_DP_AUX_CLK] = &disp_cc_mdss_dp_aux_clk.clkr,
+ [DISP_CC_MDSS_DP_AUX_CLK_SRC] = &disp_cc_mdss_dp_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DP_CRYPTO_CLK] = &disp_cc_mdss_dp_crypto_clk.clkr,
+ [DISP_CC_MDSS_DP_CRYPTO_CLK_SRC] = &disp_cc_mdss_dp_crypto_clk_src.clkr,
+ [DISP_CC_MDSS_DP_LINK_CLK] = &disp_cc_mdss_dp_link_clk.clkr,
+ [DISP_CC_MDSS_DP_LINK_CLK_SRC] = &disp_cc_mdss_dp_link_clk_src.clkr,
+ [DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC] =
+ &disp_cc_mdss_dp_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DP_LINK_INTF_CLK] = &disp_cc_mdss_dp_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DP_PIXEL_CLK] = &disp_cc_mdss_dp_pixel_clk.clkr,
+ [DISP_CC_MDSS_DP_PIXEL_CLK_SRC] = &disp_cc_mdss_dp_pixel_clk_src.clkr,
+ [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+ [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+ [DISP_CC_MDSS_ROT_CLK] = &disp_cc_mdss_rot_clk.clkr,
+ [DISP_CC_MDSS_ROT_CLK_SRC] = &disp_cc_mdss_rot_clk_src.clkr,
+ [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
+ [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+ [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+ [DISP_CC_PLL0_OUT_EVEN] = &disp_cc_pll0_out_even.clkr,
+};
+
+static const struct regmap_config disp_cc_sc7180_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x10000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc disp_cc_sc7180_desc = {
+ .config = &disp_cc_sc7180_regmap_config,
+ .clks = disp_cc_sc7180_clocks,
+ .num_clks = ARRAY_SIZE(disp_cc_sc7180_clocks),
+ .gdscs = disp_cc_sc7180_gdscs,
+ .num_gdscs = ARRAY_SIZE(disp_cc_sc7180_gdscs),
+};
+
+static const struct of_device_id disp_cc_sc7180_match_table[] = {
+ { .compatible = "qcom,sc7180-dispcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_sc7180_match_table);
+
+static int disp_cc_sc7180_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ struct alpha_pll_config disp_cc_pll_config = {};
+
+ regmap = qcom_cc_map(pdev, &disp_cc_sc7180_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /* 1380MHz configuration */
+ disp_cc_pll_config.l = 0x47;
+ disp_cc_pll_config.alpha = 0xe000;
+ disp_cc_pll_config.user_ctl_val = 0x00000001;
+ disp_cc_pll_config.user_ctl_hi_val = 0x00004805;
+
+ clk_fabia_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll_config);
+
+ return qcom_cc_really_probe(pdev, &disp_cc_sc7180_desc, regmap);
+}
+
+static struct platform_driver disp_cc_sc7180_driver = {
+ .probe = disp_cc_sc7180_probe,
+ .driver = {
+ .name = "sc7180-dispcc",
+ .of_match_table = disp_cc_sc7180_match_table,
+ },
+};
+
+static int __init disp_cc_sc7180_init(void)
+{
+ return platform_driver_register(&disp_cc_sc7180_driver);
+}
+subsys_initcall(disp_cc_sc7180_init);
+
+static void __exit disp_cc_sc7180_exit(void)
+{
+ platform_driver_unregister(&disp_cc_sc7180_driver);
+}
+module_exit(disp_cc_sc7180_exit);
+
+MODULE_DESCRIPTION("QTI DISP_CC SC7180 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
index 0cc4909b5dbe..5c932cd17b14 100644
--- a/drivers/clk/qcom/dispcc-sdm845.c
+++ b/drivers/clk/qcom/dispcc-sdm845.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/clk-provider.h>
@@ -29,6 +29,8 @@ enum {
P_DSI1_PHY_PLL_OUT_DSICLK,
P_GPLL0_OUT_MAIN,
P_GPLL0_OUT_MAIN_DIV,
+ P_DP_PHY_PLL_LINK_CLK,
+ P_DP_PHY_PLL_VCO_DIV_CLK,
};
static const struct parent_map disp_cc_parent_map_0[] = {
@@ -45,6 +47,20 @@ static const char * const disp_cc_parent_names_0[] = {
"core_bi_pll_test_se",
};
+static const struct parent_map disp_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP_PHY_PLL_LINK_CLK, 1 },
+ { P_DP_PHY_PLL_VCO_DIV_CLK, 2 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_1[] = {
+ "bi_tcxo",
+ "dp_link_clk_divsel_ten",
+ "dp_vco_divided_clk_src_mux",
+ "core_bi_pll_test_se",
+};
+
static const struct parent_map disp_cc_parent_map_2[] = {
{ P_BI_TCXO, 0 },
{ P_CORE_BI_PLL_TEST_SE, 7 },
@@ -128,6 +144,81 @@ static struct clk_rcg2 disp_cc_mdss_byte1_clk_src = {
},
};
+static const struct freq_tbl ftbl_disp_cc_mdss_dp_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_aux_clk_src = {
+ .cmd_rcgr = 0x219c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_aux_clk_src",
+ .parent_names = disp_cc_parent_names_2,
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_crypto_clk_src = {
+ .cmd_rcgr = 0x2154,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_crypto_clk_src",
+ .parent_names = disp_cc_parent_names_1,
+ .num_parents = 4,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_link_clk_src = {
+ .cmd_rcgr = 0x2138,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_link_clk_src",
+ .parent_names = disp_cc_parent_names_1,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_pixel1_clk_src = {
+ .cmd_rcgr = 0x2184,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_pixel1_clk_src",
+ .parent_names = disp_cc_parent_names_1,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_pixel_clk_src = {
+ .cmd_rcgr = 0x216c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_pixel_clk_src",
+ .parent_names = disp_cc_parent_names_1,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
static const struct freq_tbl ftbl_disp_cc_mdss_esc0_clk_src[] = {
F(19200000, P_BI_TCXO, 1, 0, 0),
{ }
@@ -391,6 +482,114 @@ static struct clk_branch disp_cc_mdss_byte1_intf_clk = {
},
};
+static struct clk_branch disp_cc_mdss_dp_aux_clk = {
+ .halt_reg = 0x2054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_aux_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_dp_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_crypto_clk = {
+ .halt_reg = 0x2048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_crypto_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_dp_crypto_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_link_clk = {
+ .halt_reg = 0x2040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_link_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_dp_link_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* reset state of disp_cc_mdss_dp_link_div_clk_src divider is 0x3 (div 4) */
+static struct clk_branch disp_cc_mdss_dp_link_intf_clk = {
+ .halt_reg = 0x2044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_link_intf_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_dp_link_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_pixel1_clk = {
+ .halt_reg = 0x2050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_pixel1_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_dp_pixel1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_pixel_clk = {
+ .halt_reg = 0x204c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x204c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_pixel_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_dp_pixel_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch disp_cc_mdss_esc0_clk = {
.halt_reg = 0x2038,
.halt_check = BRANCH_HALT,
@@ -589,6 +788,19 @@ static struct clk_regmap *disp_cc_sdm845_clocks[] = {
[DISP_CC_MDSS_BYTE1_INTF_CLK] = &disp_cc_mdss_byte1_intf_clk.clkr,
[DISP_CC_MDSS_BYTE1_DIV_CLK_SRC] =
&disp_cc_mdss_byte1_div_clk_src.clkr,
+ [DISP_CC_MDSS_DP_AUX_CLK] = &disp_cc_mdss_dp_aux_clk.clkr,
+ [DISP_CC_MDSS_DP_AUX_CLK_SRC] = &disp_cc_mdss_dp_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DP_CRYPTO_CLK] = &disp_cc_mdss_dp_crypto_clk.clkr,
+ [DISP_CC_MDSS_DP_CRYPTO_CLK_SRC] =
+ &disp_cc_mdss_dp_crypto_clk_src.clkr,
+ [DISP_CC_MDSS_DP_LINK_CLK] = &disp_cc_mdss_dp_link_clk.clkr,
+ [DISP_CC_MDSS_DP_LINK_CLK_SRC] = &disp_cc_mdss_dp_link_clk_src.clkr,
+ [DISP_CC_MDSS_DP_LINK_INTF_CLK] = &disp_cc_mdss_dp_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DP_PIXEL1_CLK] = &disp_cc_mdss_dp_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DP_PIXEL1_CLK_SRC] =
+ &disp_cc_mdss_dp_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DP_PIXEL_CLK] = &disp_cc_mdss_dp_pixel_clk.clkr,
+ [DISP_CC_MDSS_DP_PIXEL_CLK_SRC] = &disp_cc_mdss_dp_pixel_clk_src.clkr,
[DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
[DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
[DISP_CC_MDSS_ESC1_CLK] = &disp_cc_mdss_esc1_clk.clkr,
diff --git a/drivers/clk/qcom/gcc-ipq6018.c b/drivers/clk/qcom/gcc-ipq6018.c
new file mode 100644
index 000000000000..3f9c2f61a5d9
--- /dev/null
+++ b/drivers/clk/qcom/gcc-ipq6018.c
@@ -0,0 +1,4635 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+#include <linux/reset-controller.h>
+#include <dt-bindings/clock/qcom,gcc-ipq6018.h>
+#include <dt-bindings/reset/qcom,gcc-ipq6018.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "clk-alpha-pll.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "reset.h"
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
+enum {
+ P_XO,
+ P_BIAS_PLL,
+ P_UNIPHY0_RX,
+ P_UNIPHY0_TX,
+ P_UNIPHY1_RX,
+ P_BIAS_PLL_NSS_NOC,
+ P_UNIPHY1_TX,
+ P_PCIE20_PHY0_PIPE,
+ P_USB3PHY_0_PIPE,
+ P_GPLL0,
+ P_GPLL0_DIV2,
+ P_GPLL2,
+ P_GPLL4,
+ P_GPLL6,
+ P_SLEEP_CLK,
+ P_UBI32_PLL,
+ P_NSS_CRYPTO_PLL,
+ P_PI_SLEEP,
+};
+
+static struct clk_alpha_pll gpll0_main = {
+ .offset = 0x21000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x0b000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0_main",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_fixed_factor gpll0_out_main_div2 = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_main_div2",
+ .parent_hws = (const struct clk_hw *[]){
+ &gpll0_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll0 = {
+ .offset = 0x21000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_hws = (const struct clk_hw *[]){
+ &gpll0_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll0_out_main_div2[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw},
+ { .hw = &gpll0_out_main_div2.hw},
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll0_out_main_div2_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL0_DIV2, 4 },
+};
+
+static struct clk_alpha_pll ubi32_pll_main = {
+ .offset = 0x25000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_HUAYRA],
+ .flags = SUPPORTS_DYNAMIC_UPDATE,
+ .clkr = {
+ .enable_reg = 0x0b000,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "ubi32_pll_main",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_huayra_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv ubi32_pll = {
+ .offset = 0x25000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_HUAYRA],
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ubi32_pll",
+ .parent_hws = (const struct clk_hw *[]){
+ &ubi32_pll_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_alpha_pll gpll6_main = {
+ .offset = 0x37000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_BRAMMO],
+ .clkr = {
+ .enable_reg = 0x0b000,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll6_main",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll6 = {
+ .offset = 0x37000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_BRAMMO],
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll6",
+ .parent_hws = (const struct clk_hw *[]){
+ &gpll6_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_alpha_pll gpll4_main = {
+ .offset = 0x24000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x0b000,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4_main",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll4 = {
+ .offset = 0x24000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll4",
+ .parent_hws = (const struct clk_hw *[]){
+ &gpll4_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct freq_tbl ftbl_pcnoc_bfdcd_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 pcnoc_bfdcd_clk_src = {
+ .cmd_rcgr = 0x27000,
+ .freq_tbl = ftbl_pcnoc_bfdcd_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pcnoc_bfdcd_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll2_main = {
+ .offset = 0x4a000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x0b000,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll2_main",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll2 = {
+ .offset = 0x4a000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll2",
+ .parent_hws = (const struct clk_hw *[]){
+ &gpll2_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_alpha_pll nss_crypto_pll_main = {
+ .offset = 0x22000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x0b000,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "nss_crypto_pll_main",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv nss_crypto_pll = {
+ .offset = 0x22000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "nss_crypto_pll",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_crypto_pll_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct freq_tbl ftbl_qdss_tsctr_clk_src[] = {
+ F(160000000, P_GPLL0_DIV2, 2.5, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ F(600000000, P_GPLL4, 2, 0, 0),
+ { }
+};
+
+static const struct clk_parent_data gcc_xo_gpll4_gpll0_gpll6_gpll0_div2[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll6.clkr.hw },
+ { .hw = &gpll0_out_main_div2.hw },
+};
+
+static const struct parent_map gcc_xo_gpll4_gpll0_gpll6_gpll0_div2_map[] = {
+ { P_XO, 0 },
+ { P_GPLL4, 1 },
+ { P_GPLL0, 2 },
+ { P_GPLL6, 3 },
+ { P_GPLL0_DIV2, 4 },
+};
+
+static struct clk_rcg2 qdss_tsctr_clk_src = {
+ .cmd_rcgr = 0x29064,
+ .freq_tbl = ftbl_qdss_tsctr_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll4_gpll0_gpll6_gpll0_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "qdss_tsctr_clk_src",
+ .parent_data = gcc_xo_gpll4_gpll0_gpll6_gpll0_div2,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_fixed_factor qdss_dap_sync_clk_src = {
+ .mult = 1,
+ .div = 4,
+ .hw.init = &(struct clk_init_data){
+ .name = "qdss_dap_sync_clk_src",
+ .parent_hws = (const struct clk_hw *[]){
+ &qdss_tsctr_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_qdss_at_clk_src[] = {
+ F(66670000, P_GPLL0_DIV2, 6, 0, 0),
+ F(240000000, P_GPLL4, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 qdss_at_clk_src = {
+ .cmd_rcgr = 0x2900c,
+ .freq_tbl = ftbl_qdss_at_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll4_gpll0_gpll6_gpll0_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "qdss_at_clk_src",
+ .parent_data = gcc_xo_gpll4_gpll0_gpll6_gpll0_div2,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_fixed_factor qdss_tsctr_div2_clk_src = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "qdss_tsctr_div2_clk_src",
+ .parent_hws = (const struct clk_hw *[]){
+ &qdss_tsctr_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_nss_ppe_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(300000000, P_BIAS_PLL, 1, 0, 0),
+ { }
+};
+
+static const struct clk_parent_data gcc_xo_bias_gpll0_gpll4_nss_ubi32[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "bias_pll_cc_clk" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &nss_crypto_pll.clkr.hw },
+ { .hw = &ubi32_pll.clkr.hw },
+};
+
+static const struct parent_map gcc_xo_bias_gpll0_gpll4_nss_ubi32_map[] = {
+ { P_XO, 0 },
+ { P_BIAS_PLL, 1 },
+ { P_GPLL0, 2 },
+ { P_GPLL4, 3 },
+ { P_NSS_CRYPTO_PLL, 4 },
+ { P_UBI32_PLL, 5 },
+};
+
+static struct clk_rcg2 nss_ppe_clk_src = {
+ .cmd_rcgr = 0x68080,
+ .freq_tbl = ftbl_nss_ppe_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_bias_gpll0_gpll4_nss_ubi32_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "nss_ppe_clk_src",
+ .parent_data = gcc_xo_bias_gpll0_gpll4_nss_ubi32,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_xo_clk_src = {
+ .halt_reg = 0x30018,
+ .clkr = {
+ .enable_reg = 0x30018,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_xo_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_nss_ce_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static const struct clk_parent_data gcc_xo_gpll0[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+};
+
+static struct clk_rcg2 nss_ce_clk_src = {
+ .cmd_rcgr = 0x68098,
+ .freq_tbl = ftbl_nss_ce_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "nss_ce_clk_src",
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_sleep_clk_src = {
+ .halt_reg = 0x30000,
+ .clkr = {
+ .enable_reg = 0x30000,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sleep_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "sleep_clk",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_snoc_nssnoc_bfdcd_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0_DIV2, 8, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(133333333, P_GPLL0, 6, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(266666667, P_GPLL0, 3, 0, 0),
+ { }
+};
+
+static const struct clk_parent_data
+ gcc_xo_gpll0_gpll6_gpll0_out_main_div2[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll6.clkr.hw },
+ { .hw = &gpll0_out_main_div2.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll6_gpll0_out_main_div2_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL6, 2 },
+ { P_GPLL0_DIV2, 3 },
+};
+
+static struct clk_rcg2 snoc_nssnoc_bfdcd_clk_src = {
+ .cmd_rcgr = 0x76054,
+ .freq_tbl = ftbl_snoc_nssnoc_bfdcd_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll6_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "snoc_nssnoc_bfdcd_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll6_gpll0_out_main_div2,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_apss_ahb_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(25000000, P_GPLL0_DIV2, 16, 0, 0),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 apss_ahb_clk_src = {
+ .cmd_rcgr = 0x46000,
+ .freq_tbl = ftbl_apss_ahb_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "apss_ahb_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_nss_port5_rx_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(25000000, P_UNIPHY1_RX, 12.5, 0, 0),
+ F(25000000, P_UNIPHY0_RX, 5, 0, 0),
+ F(78125000, P_UNIPHY1_RX, 4, 0, 0),
+ F(125000000, P_UNIPHY1_RX, 2.5, 0, 0),
+ F(125000000, P_UNIPHY0_RX, 1, 0, 0),
+ F(156250000, P_UNIPHY1_RX, 2, 0, 0),
+ F(312500000, P_UNIPHY1_RX, 1, 0, 0),
+ { }
+};
+
+static const struct clk_parent_data
+gcc_xo_uniphy0_rx_tx_uniphy1_rx_tx_ubi32_bias[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "uniphy0_gcc_rx_clk" },
+ { .fw_name = "uniphy0_gcc_tx_clk" },
+ { .fw_name = "uniphy1_gcc_rx_clk" },
+ { .fw_name = "uniphy1_gcc_tx_clk" },
+ { .hw = &ubi32_pll.clkr.hw },
+ { .fw_name = "bias_pll_cc_clk" },
+};
+
+static const struct parent_map
+gcc_xo_uniphy0_rx_tx_uniphy1_rx_tx_ubi32_bias_map[] = {
+ { P_XO, 0 },
+ { P_UNIPHY0_RX, 1 },
+ { P_UNIPHY0_TX, 2 },
+ { P_UNIPHY1_RX, 3 },
+ { P_UNIPHY1_TX, 4 },
+ { P_UBI32_PLL, 5 },
+ { P_BIAS_PLL, 6 },
+};
+
+static struct clk_rcg2 nss_port5_rx_clk_src = {
+ .cmd_rcgr = 0x68060,
+ .freq_tbl = ftbl_nss_port5_rx_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_uniphy0_rx_tx_uniphy1_rx_tx_ubi32_bias_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "nss_port5_rx_clk_src",
+ .parent_data = gcc_xo_uniphy0_rx_tx_uniphy1_rx_tx_ubi32_bias,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_nss_port5_tx_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(25000000, P_UNIPHY1_TX, 12.5, 0, 0),
+ F(25000000, P_UNIPHY0_TX, 5, 0, 0),
+ F(78125000, P_UNIPHY1_TX, 4, 0, 0),
+ F(125000000, P_UNIPHY1_TX, 2.5, 0, 0),
+ F(125000000, P_UNIPHY0_TX, 1, 0, 0),
+ F(156250000, P_UNIPHY1_TX, 2, 0, 0),
+ F(312500000, P_UNIPHY1_TX, 1, 0, 0),
+ { }
+};
+
+static const struct clk_parent_data
+gcc_xo_uniphy0_tx_rx_uniphy1_tx_rx_ubi32_bias[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "uniphy0_gcc_tx_clk" },
+ { .fw_name = "uniphy0_gcc_rx_clk" },
+ { .fw_name = "uniphy1_gcc_tx_clk" },
+ { .fw_name = "uniphy1_gcc_rx_clk" },
+ { .hw = &ubi32_pll.clkr.hw },
+ { .fw_name = "bias_pll_cc_clk" },
+};
+
+static const struct parent_map
+gcc_xo_uniphy0_tx_rx_uniphy1_tx_rx_ubi32_bias_map[] = {
+ { P_XO, 0 },
+ { P_UNIPHY0_TX, 1 },
+ { P_UNIPHY0_RX, 2 },
+ { P_UNIPHY1_TX, 3 },
+ { P_UNIPHY1_RX, 4 },
+ { P_UBI32_PLL, 5 },
+ { P_BIAS_PLL, 6 },
+};
+
+static struct clk_rcg2 nss_port5_tx_clk_src = {
+ .cmd_rcgr = 0x68068,
+ .freq_tbl = ftbl_nss_port5_tx_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_uniphy0_tx_rx_uniphy1_tx_rx_ubi32_bias_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "nss_port5_tx_clk_src",
+ .parent_data = gcc_xo_uniphy0_tx_rx_uniphy1_tx_rx_ubi32_bias,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_pcie_axi_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(240000000, P_GPLL4, 5, 0, 0),
+ { }
+};
+
+static const struct freq_tbl ftbl_pcie_rchng_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ { }
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll4[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll4_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL4, 2 },
+};
+
+static struct clk_rcg2 pcie0_axi_clk_src = {
+ .cmd_rcgr = 0x75054,
+ .freq_tbl = ftbl_pcie_axi_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll4_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pcie0_axi_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll4,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_usb0_master_clk_src[] = {
+ F(80000000, P_GPLL0_DIV2, 5, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(133330000, P_GPLL0, 6, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_out_main_div2_gpll0[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_out_main_div2.hw },
+ { .hw = &gpll0.clkr.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_out_main_div2_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0_DIV2, 2 },
+ { P_GPLL0, 1 },
+};
+
+static struct clk_rcg2 usb0_master_clk_src = {
+ .cmd_rcgr = 0x3e00c,
+ .freq_tbl = ftbl_usb0_master_clk_src,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_out_main_div2_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb0_master_clk_src",
+ .parent_data = gcc_xo_gpll0_out_main_div2_gpll0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div apss_ahb_postdiv_clk_src = {
+ .reg = 0x46018,
+ .shift = 4,
+ .width = 4,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "apss_ahb_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw *[]){
+ &apss_ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
+static struct clk_fixed_factor gcc_xo_div4_clk_src = {
+ .mult = 1,
+ .div = 4,
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_xo_div4_clk_src",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_xo_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct freq_tbl ftbl_nss_port1_rx_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(25000000, P_UNIPHY0_RX, 5, 0, 0),
+ F(125000000, P_UNIPHY0_RX, 1, 0, 0),
+ { }
+};
+
+static const struct clk_parent_data gcc_xo_uniphy0_rx_tx_ubi32_bias[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "uniphy0_gcc_rx_clk" },
+ { .fw_name = "uniphy0_gcc_tx_clk" },
+ { .hw = &ubi32_pll.clkr.hw },
+ { .fw_name = "bias_pll_cc_clk" },
+};
+
+static const struct parent_map gcc_xo_uniphy0_rx_tx_ubi32_bias_map[] = {
+ { P_XO, 0 },
+ { P_UNIPHY0_RX, 1 },
+ { P_UNIPHY0_TX, 2 },
+ { P_UBI32_PLL, 5 },
+ { P_BIAS_PLL, 6 },
+};
+
+static struct clk_rcg2 nss_port1_rx_clk_src = {
+ .cmd_rcgr = 0x68020,
+ .freq_tbl = ftbl_nss_port1_rx_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_uniphy0_rx_tx_ubi32_bias_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "nss_port1_rx_clk_src",
+ .parent_data = gcc_xo_uniphy0_rx_tx_ubi32_bias,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_nss_port1_tx_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(25000000, P_UNIPHY0_TX, 5, 0, 0),
+ F(125000000, P_UNIPHY0_TX, 1, 0, 0),
+ { }
+};
+
+static const struct clk_parent_data gcc_xo_uniphy0_tx_rx_ubi32_bias[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "uniphy0_gcc_tx_clk" },
+ { .fw_name = "uniphy0_gcc_rx_clk" },
+ { .hw = &ubi32_pll.clkr.hw },
+ { .fw_name = "bias_pll_cc_clk" },
+};
+
+static const struct parent_map gcc_xo_uniphy0_tx_rx_ubi32_bias_map[] = {
+ { P_XO, 0 },
+ { P_UNIPHY0_TX, 1 },
+ { P_UNIPHY0_RX, 2 },
+ { P_UBI32_PLL, 5 },
+ { P_BIAS_PLL, 6 },
+};
+
+static struct clk_rcg2 nss_port1_tx_clk_src = {
+ .cmd_rcgr = 0x68028,
+ .freq_tbl = ftbl_nss_port1_tx_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_uniphy0_tx_rx_ubi32_bias_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "nss_port1_tx_clk_src",
+ .parent_data = gcc_xo_uniphy0_tx_rx_ubi32_bias,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 nss_port2_rx_clk_src = {
+ .cmd_rcgr = 0x68030,
+ .freq_tbl = ftbl_nss_port1_rx_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_uniphy0_rx_tx_ubi32_bias_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "nss_port2_rx_clk_src",
+ .parent_data = gcc_xo_uniphy0_rx_tx_ubi32_bias,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 nss_port2_tx_clk_src = {
+ .cmd_rcgr = 0x68038,
+ .freq_tbl = ftbl_nss_port1_tx_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_uniphy0_tx_rx_ubi32_bias_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "nss_port2_tx_clk_src",
+ .parent_data = gcc_xo_uniphy0_tx_rx_ubi32_bias,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 nss_port3_rx_clk_src = {
+ .cmd_rcgr = 0x68040,
+ .freq_tbl = ftbl_nss_port1_rx_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_uniphy0_rx_tx_ubi32_bias_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "nss_port3_rx_clk_src",
+ .parent_data = gcc_xo_uniphy0_rx_tx_ubi32_bias,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 nss_port3_tx_clk_src = {
+ .cmd_rcgr = 0x68048,
+ .freq_tbl = ftbl_nss_port1_tx_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_uniphy0_tx_rx_ubi32_bias_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "nss_port3_tx_clk_src",
+ .parent_data = gcc_xo_uniphy0_tx_rx_ubi32_bias,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 nss_port4_rx_clk_src = {
+ .cmd_rcgr = 0x68050,
+ .freq_tbl = ftbl_nss_port1_rx_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_uniphy0_rx_tx_ubi32_bias_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "nss_port4_rx_clk_src",
+ .parent_data = gcc_xo_uniphy0_rx_tx_ubi32_bias,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 nss_port4_tx_clk_src = {
+ .cmd_rcgr = 0x68058,
+ .freq_tbl = ftbl_nss_port1_tx_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_uniphy0_tx_rx_ubi32_bias_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "nss_port4_tx_clk_src",
+ .parent_data = gcc_xo_uniphy0_tx_rx_ubi32_bias,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div nss_port5_rx_div_clk_src = {
+ .reg = 0x68440,
+ .shift = 0,
+ .width = 4,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "nss_port5_rx_div_clk_src",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_port5_rx_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_regmap_div nss_port5_tx_div_clk_src = {
+ .reg = 0x68444,
+ .shift = 0,
+ .width = 4,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "nss_port5_tx_div_clk_src",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_port5_tx_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_apss_axi_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(100000000, P_GPLL0_DIV2, 4, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(308570000, P_GPLL6, 3.5, 0, 0),
+ F(400000000, P_GPLL0, 2, 0, 0),
+ F(533000000, P_GPLL0, 1.5, 0, 0),
+ { }
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll6_ubi32_gpll0_div2[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll6.clkr.hw },
+ { .hw = &ubi32_pll.clkr.hw },
+ { .hw = &gpll0_out_main_div2.hw },
+};
+
+static const struct parent_map
+gcc_xo_gpll0_gpll6_ubi32_gpll0_div2_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL6, 2 },
+ { P_UBI32_PLL, 3 },
+ { P_GPLL0_DIV2, 6 },
+};
+
+static struct clk_rcg2 apss_axi_clk_src = {
+ .cmd_rcgr = 0x38048,
+ .freq_tbl = ftbl_apss_axi_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll6_ubi32_gpll0_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "apss_axi_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll6_ubi32_gpll0_div2,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_nss_crypto_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(300000000, P_NSS_CRYPTO_PLL, 2, 0, 0),
+ { }
+};
+
+static const struct clk_parent_data gcc_xo_nss_crypto_pll_gpll0[] = {
+ { .fw_name = "xo" },
+ { .hw = &nss_crypto_pll.clkr.hw },
+ { .hw = &gpll0.clkr.hw },
+};
+
+static const struct parent_map gcc_xo_nss_crypto_pll_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_NSS_CRYPTO_PLL, 1 },
+ { P_GPLL0, 2 },
+};
+
+static struct clk_rcg2 nss_crypto_clk_src = {
+ .cmd_rcgr = 0x68144,
+ .freq_tbl = ftbl_nss_crypto_clk_src,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_nss_crypto_pll_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "nss_crypto_clk_src",
+ .parent_data = gcc_xo_nss_crypto_pll_gpll0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div nss_port1_rx_div_clk_src = {
+ .reg = 0x68400,
+ .shift = 0,
+ .width = 4,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "nss_port1_rx_div_clk_src",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_port1_rx_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_regmap_div nss_port1_tx_div_clk_src = {
+ .reg = 0x68404,
+ .shift = 0,
+ .width = 4,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "nss_port1_tx_div_clk_src",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_port1_tx_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_regmap_div nss_port2_rx_div_clk_src = {
+ .reg = 0x68410,
+ .shift = 0,
+ .width = 4,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "nss_port2_rx_div_clk_src",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_port2_rx_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_regmap_div nss_port2_tx_div_clk_src = {
+ .reg = 0x68414,
+ .shift = 0,
+ .width = 4,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "nss_port2_tx_div_clk_src",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_port2_tx_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_regmap_div nss_port3_rx_div_clk_src = {
+ .reg = 0x68420,
+ .shift = 0,
+ .width = 4,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "nss_port3_rx_div_clk_src",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_port3_rx_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_regmap_div nss_port3_tx_div_clk_src = {
+ .reg = 0x68424,
+ .shift = 0,
+ .width = 4,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "nss_port3_tx_div_clk_src",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_port3_tx_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_regmap_div nss_port4_rx_div_clk_src = {
+ .reg = 0x68430,
+ .shift = 0,
+ .width = 4,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "nss_port4_rx_div_clk_src",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_port4_rx_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_regmap_div nss_port4_tx_div_clk_src = {
+ .reg = 0x68434,
+ .shift = 0,
+ .width = 4,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "nss_port4_tx_div_clk_src",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_port4_tx_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_nss_ubi_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(149760000, P_UBI32_PLL, 10, 0, 0),
+ F(187200000, P_UBI32_PLL, 8, 0, 0),
+ F(249600000, P_UBI32_PLL, 6, 0, 0),
+ F(374400000, P_UBI32_PLL, 4, 0, 0),
+ F(748800000, P_UBI32_PLL, 2, 0, 0),
+ F(1497600000, P_UBI32_PLL, 1, 0, 0),
+ { }
+};
+
+static const struct clk_parent_data
+ gcc_xo_ubi32_pll_gpll0_gpll2_gpll4_gpll6[] = {
+ { .fw_name = "xo" },
+ { .hw = &ubi32_pll.clkr.hw },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll2.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll6.clkr.hw },
+};
+
+static const struct parent_map gcc_xo_ubi32_gpll0_gpll2_gpll4_gpll6_map[] = {
+ { P_XO, 0 },
+ { P_UBI32_PLL, 1 },
+ { P_GPLL0, 2 },
+ { P_GPLL2, 3 },
+ { P_GPLL4, 4 },
+ { P_GPLL6, 5 },
+};
+
+static struct clk_rcg2 nss_ubi0_clk_src = {
+ .cmd_rcgr = 0x68104,
+ .freq_tbl = ftbl_nss_ubi_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_ubi32_gpll0_gpll2_gpll4_gpll6_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "nss_ubi0_clk_src",
+ .parent_data = gcc_xo_ubi32_pll_gpll0_gpll2_gpll4_gpll6,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct freq_tbl ftbl_adss_pwm_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 adss_pwm_clk_src = {
+ .cmd_rcgr = 0x1c008,
+ .freq_tbl = ftbl_adss_pwm_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "adss_pwm_clk_src",
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_blsp1_qup_i2c_apps_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(25000000, P_GPLL0_DIV2, 16, 0, 0),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0200c,
+ .freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_blsp1_qup_spi_apps_clk_src[] = {
+ F(960000, P_XO, 10, 2, 5),
+ F(4800000, P_XO, 5, 0, 0),
+ F(9600000, P_XO, 2, 4, 5),
+ F(12500000, P_GPLL0_DIV2, 16, 1, 2),
+ F(16000000, P_GPLL0, 10, 1, 5),
+ F(24000000, P_XO, 1, 0, 0),
+ F(25000000, P_GPLL0, 16, 1, 2),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x02024,
+ .freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x03000,
+ .freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup2_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x03014,
+ .freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup2_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x04000,
+ .freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup3_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x04014,
+ .freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup3_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x05000,
+ .freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup4_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x05014,
+ .freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup4_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x06000,
+ .freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup5_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup5_spi_apps_clk_src = {
+ .cmd_rcgr = 0x06014,
+ .freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup5_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x07000,
+ .freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup6_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = {
+ .cmd_rcgr = 0x07014,
+ .freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup6_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_blsp1_uart_apps_clk_src[] = {
+ F(3686400, P_GPLL0_DIV2, 1, 144, 15625),
+ F(7372800, P_GPLL0_DIV2, 1, 288, 15625),
+ F(14745600, P_GPLL0_DIV2, 1, 576, 15625),
+ F(16000000, P_GPLL0_DIV2, 5, 1, 5),
+ F(24000000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 1, 3, 100),
+ F(25000000, P_GPLL0, 16, 1, 2),
+ F(32000000, P_GPLL0, 1, 1, 25),
+ F(40000000, P_GPLL0, 1, 1, 20),
+ F(46400000, P_GPLL0, 1, 29, 500),
+ F(48000000, P_GPLL0, 1, 3, 50),
+ F(51200000, P_GPLL0, 1, 8, 125),
+ F(56000000, P_GPLL0, 1, 7, 100),
+ F(58982400, P_GPLL0, 1, 1152, 15625),
+ F(60000000, P_GPLL0, 1, 3, 40),
+ F(64000000, P_GPLL0, 12.5, 1, 1),
+ { }
+};
+
+static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x02044,
+ .freq_tbl = ftbl_blsp1_uart_apps_clk_src,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart1_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x03034,
+ .freq_tbl = ftbl_blsp1_uart_apps_clk_src,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart2_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart3_apps_clk_src = {
+ .cmd_rcgr = 0x04034,
+ .freq_tbl = ftbl_blsp1_uart_apps_clk_src,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart3_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart4_apps_clk_src = {
+ .cmd_rcgr = 0x05034,
+ .freq_tbl = ftbl_blsp1_uart_apps_clk_src,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart4_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart5_apps_clk_src = {
+ .cmd_rcgr = 0x06034,
+ .freq_tbl = ftbl_blsp1_uart_apps_clk_src,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart5_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart6_apps_clk_src = {
+ .cmd_rcgr = 0x07034,
+ .freq_tbl = ftbl_blsp1_uart_apps_clk_src,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart6_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_crypto_clk_src[] = {
+ F(40000000, P_GPLL0_DIV2, 10, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 crypto_clk_src = {
+ .cmd_rcgr = 0x16004,
+ .freq_tbl = ftbl_crypto_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "crypto_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gp_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0_DIV2, 8, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(266666666, P_GPLL0, 3, 0, 0),
+ { }
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll6_gpll0_sleep_clk[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll6.clkr.hw },
+ { .hw = &gpll0_out_main_div2.hw },
+ { .fw_name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll6_gpll0_sleep_clk_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL6, 2 },
+ { P_GPLL0_DIV2, 4 },
+ { P_SLEEP_CLK, 6 },
+};
+
+static struct clk_rcg2 gp1_clk_src = {
+ .cmd_rcgr = 0x08004,
+ .freq_tbl = ftbl_gp_clk_src,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll6_gpll0_sleep_clk_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp1_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll6_gpll0_sleep_clk,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gp2_clk_src = {
+ .cmd_rcgr = 0x09004,
+ .freq_tbl = ftbl_gp_clk_src,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll6_gpll0_sleep_clk_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp2_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll6_gpll0_sleep_clk,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gp3_clk_src = {
+ .cmd_rcgr = 0x0a004,
+ .freq_tbl = ftbl_gp_clk_src,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll6_gpll0_sleep_clk_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp3_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll6_gpll0_sleep_clk,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_fixed_factor nss_ppe_cdiv_clk_src = {
+ .mult = 1,
+ .div = 4,
+ .hw.init = &(struct clk_init_data){
+ .name = "nss_ppe_cdiv_clk_src",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_ppe_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap_div nss_ubi0_div_clk_src = {
+ .reg = 0x68118,
+ .shift = 0,
+ .width = 4,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "nss_ubi0_div_clk_src",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_ubi0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ro_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_pcie_aux_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_core_pi_sleep_clk[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+ { .fw_name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_xo_gpll0_core_pi_sleep_clk_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 2 },
+ { P_PI_SLEEP, 6 },
+};
+
+static struct clk_rcg2 pcie0_aux_clk_src = {
+ .cmd_rcgr = 0x75024,
+ .freq_tbl = ftbl_pcie_aux_clk_src,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_core_pi_sleep_clk_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pcie0_aux_clk_src",
+ .parent_data = gcc_xo_gpll0_core_pi_sleep_clk,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct clk_parent_data gcc_pcie20_phy0_pipe_clk_xo[] = {
+ { .fw_name = "pcie20_phy0_pipe_clk" },
+ { .fw_name = "xo" },
+};
+
+static const struct parent_map gcc_pcie20_phy0_pipe_clk_xo_map[] = {
+ { P_PCIE20_PHY0_PIPE, 0 },
+ { P_XO, 2 },
+};
+
+static struct clk_regmap_mux pcie0_pipe_clk_src = {
+ .reg = 0x7501c,
+ .shift = 8,
+ .width = 2,
+ .parent_map = gcc_pcie20_phy0_pipe_clk_xo_map,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie0_pipe_clk_src",
+ .parent_data = gcc_pcie20_phy0_pipe_clk_xo,
+ .num_parents = 2,
+ .ops = &clk_regmap_mux_closest_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_sdcc_apps_clk_src[] = {
+ F(144000, P_XO, 16, 12, 125),
+ F(400000, P_XO, 12, 1, 5),
+ F(24000000, P_GPLL2, 12, 1, 4),
+ F(48000000, P_GPLL2, 12, 1, 2),
+ F(96000000, P_GPLL2, 12, 0, 0),
+ F(177777778, P_GPLL0, 4.5, 0, 0),
+ F(192000000, P_GPLL2, 6, 0, 0),
+ F(384000000, P_GPLL2, 3, 0, 0),
+ { }
+};
+
+static const struct clk_parent_data
+ gcc_xo_gpll0_gpll2_gpll0_out_main_div2[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll2.clkr.hw },
+ { .hw = &gpll0_out_main_div2.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll2_gpll0_out_main_div2_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL2, 2 },
+ { P_GPLL0_DIV2, 4 },
+};
+
+static struct clk_rcg2 sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x42004,
+ .freq_tbl = ftbl_sdcc_apps_clk_src,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll2_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc1_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll2_gpll0_out_main_div2,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_usb_aux_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb0_aux_clk_src = {
+ .cmd_rcgr = 0x3e05c,
+ .freq_tbl = ftbl_usb_aux_clk_src,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_core_pi_sleep_clk_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb0_aux_clk_src",
+ .parent_data = gcc_xo_gpll0_core_pi_sleep_clk,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_usb_mock_utmi_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(60000000, P_GPLL6, 6, 1, 3),
+ { }
+};
+
+static const struct clk_parent_data
+ gcc_xo_gpll6_gpll0_gpll0_out_main_div2[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll6.clkr.hw },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_main_div2.hw },
+};
+
+static const struct parent_map gcc_xo_gpll6_gpll0_gpll0_out_main_div2_map[] = {
+ { P_XO, 0 },
+ { P_GPLL6, 1 },
+ { P_GPLL0, 3 },
+ { P_GPLL0_DIV2, 4 },
+};
+
+static struct clk_rcg2 usb0_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x3e020,
+ .freq_tbl = ftbl_usb_mock_utmi_clk_src,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll6_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb0_mock_utmi_clk_src",
+ .parent_data = gcc_xo_gpll6_gpll0_gpll0_out_main_div2,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct clk_parent_data gcc_usb3phy_0_cc_pipe_clk_xo[] = {
+ { .fw_name = "usb3phy_0_cc_pipe_clk" },
+ { .fw_name = "xo" },
+};
+
+static const struct parent_map gcc_usb3phy_0_cc_pipe_clk_xo_map[] = {
+ { P_USB3PHY_0_PIPE, 0 },
+ { P_XO, 2 },
+};
+
+static struct clk_regmap_mux usb0_pipe_clk_src = {
+ .reg = 0x3e048,
+ .shift = 8,
+ .width = 2,
+ .parent_map = gcc_usb3phy_0_cc_pipe_clk_xo_map,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "usb0_pipe_clk_src",
+ .parent_data = gcc_usb3phy_0_cc_pipe_clk_xo,
+ .num_parents = 2,
+ .ops = &clk_regmap_mux_closest_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_sdcc_ice_core_clk_src[] = {
+ F(80000000, P_GPLL0_DIV2, 5, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(216000000, P_GPLL6, 5, 0, 0),
+ F(308570000, P_GPLL6, 3.5, 0, 0),
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll6_gpll0_div2[] = {
+ { .fw_name = "xo"},
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll6.clkr.hw },
+ { .hw = &gpll0_out_main_div2.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll6_gpll0_div2_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL6, 2 },
+ { P_GPLL0_DIV2, 4 },
+};
+
+static struct clk_rcg2 sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0x5d000,
+ .freq_tbl = ftbl_sdcc_ice_core_clk_src,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll6_gpll0_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc1_ice_core_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll6_gpll0_div2,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_qdss_stm_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0_DIV2, 8, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 qdss_stm_clk_src = {
+ .cmd_rcgr = 0x2902C,
+ .freq_tbl = ftbl_qdss_stm_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "qdss_stm_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_qdss_traceclkin_clk_src[] = {
+ F(80000000, P_GPLL0_DIV2, 5, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(300000000, P_GPLL4, 4, 0, 0),
+ { }
+};
+
+static const struct clk_parent_data gcc_xo_gpll4_gpll0_gpll0_div2[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_main_div2.hw },
+};
+
+static const struct parent_map gcc_xo_gpll4_gpll0_gpll0_div2_map[] = {
+ { P_XO, 0 },
+ { P_GPLL4, 1 },
+ { P_GPLL0, 2 },
+ { P_GPLL0_DIV2, 4 },
+};
+
+static struct clk_rcg2 qdss_traceclkin_clk_src = {
+ .cmd_rcgr = 0x29048,
+ .freq_tbl = ftbl_qdss_traceclkin_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll4_gpll0_gpll0_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "qdss_traceclkin_clk_src",
+ .parent_data = gcc_xo_gpll4_gpll0_gpll0_div2,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 usb1_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x3f020,
+ .freq_tbl = ftbl_usb_mock_utmi_clk_src,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll6_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb1_mock_utmi_clk_src",
+ .parent_data = gcc_xo_gpll6_gpll0_gpll0_out_main_div2,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_adss_pwm_clk = {
+ .halt_reg = 0x1c020,
+ .clkr = {
+ .enable_reg = 0x1c020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_adss_pwm_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &adss_pwm_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_apss_ahb_clk = {
+ .halt_reg = 0x4601c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x0b004,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_apss_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &apss_ahb_postdiv_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_system_noc_bfdcd_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0_DIV2, 8, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(133333333, P_GPLL0, 6, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(266666667, P_GPLL0, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 system_noc_bfdcd_clk_src = {
+ .cmd_rcgr = 0x26004,
+ .freq_tbl = ftbl_system_noc_bfdcd_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll6_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "system_noc_bfdcd_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll6_gpll0_out_main_div2,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_ubi32_mem_noc_bfdcd_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(307670000, P_BIAS_PLL_NSS_NOC, 1.5, 0, 0),
+ F(533333333, P_GPLL0, 1.5, 0, 0),
+ { }
+};
+
+static const struct clk_parent_data
+ gcc_xo_gpll0_gpll2_bias_pll_nss_noc_clk[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll2.clkr.hw },
+ { .fw_name = "bias_pll_nss_noc_clk" },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll2_bias_pll_nss_noc_clk_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL2, 3 },
+ { P_BIAS_PLL_NSS_NOC, 4 },
+};
+
+static struct clk_rcg2 ubi32_mem_noc_bfdcd_clk_src = {
+ .cmd_rcgr = 0x68088,
+ .freq_tbl = ftbl_ubi32_mem_noc_bfdcd_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll2_bias_pll_nss_noc_clk_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ubi32_mem_noc_bfdcd_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll2_bias_pll_nss_noc_clk,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_apss_axi_clk = {
+ .halt_reg = 0x46020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x0b004,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_apss_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &apss_axi_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_ahb_clk = {
+ .halt_reg = 0x01008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x0b004,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+ .halt_reg = 0x02008,
+ .clkr = {
+ .enable_reg = 0x02008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &blsp1_qup1_i2c_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+ .halt_reg = 0x02004,
+ .clkr = {
+ .enable_reg = 0x02004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &blsp1_qup1_spi_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
+ .halt_reg = 0x03010,
+ .clkr = {
+ .enable_reg = 0x03010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &blsp1_qup2_i2c_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
+ .halt_reg = 0x0300c,
+ .clkr = {
+ .enable_reg = 0x0300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &blsp1_qup2_spi_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
+ .halt_reg = 0x04010,
+ .clkr = {
+ .enable_reg = 0x04010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &blsp1_qup3_i2c_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
+ .halt_reg = 0x0400c,
+ .clkr = {
+ .enable_reg = 0x0400c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &blsp1_qup3_spi_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
+ .halt_reg = 0x05010,
+ .clkr = {
+ .enable_reg = 0x05010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &blsp1_qup4_i2c_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
+ .halt_reg = 0x0500c,
+ .clkr = {
+ .enable_reg = 0x0500c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &blsp1_qup4_spi_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = {
+ .halt_reg = 0x06010,
+ .clkr = {
+ .enable_reg = 0x06010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup5_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &blsp1_qup5_i2c_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = {
+ .halt_reg = 0x0600c,
+ .clkr = {
+ .enable_reg = 0x0600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup5_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &blsp1_qup5_spi_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = {
+ .halt_reg = 0x0700c,
+ .clkr = {
+ .enable_reg = 0x0700c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup6_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &blsp1_qup6_spi_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart1_apps_clk = {
+ .halt_reg = 0x0203c,
+ .clkr = {
+ .enable_reg = 0x0203c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart1_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &blsp1_uart1_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart2_apps_clk = {
+ .halt_reg = 0x0302c,
+ .clkr = {
+ .enable_reg = 0x0302c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart2_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &blsp1_uart2_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart3_apps_clk = {
+ .halt_reg = 0x0402c,
+ .clkr = {
+ .enable_reg = 0x0402c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart3_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &blsp1_uart3_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart4_apps_clk = {
+ .halt_reg = 0x0502c,
+ .clkr = {
+ .enable_reg = 0x0502c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart4_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &blsp1_uart4_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart5_apps_clk = {
+ .halt_reg = 0x0602c,
+ .clkr = {
+ .enable_reg = 0x0602c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart5_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &blsp1_uart5_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart6_apps_clk = {
+ .halt_reg = 0x0702c,
+ .clkr = {
+ .enable_reg = 0x0702c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart6_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &blsp1_uart6_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_crypto_ahb_clk = {
+ .halt_reg = 0x16024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x0b004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_crypto_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_crypto_axi_clk = {
+ .halt_reg = 0x16020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x0b004,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_crypto_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_crypto_clk = {
+ .halt_reg = 0x1601c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x0b004,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_crypto_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &crypto_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_fixed_factor gpll6_out_main_div2 = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll6_out_main_div2",
+ .parent_hws = (const struct clk_hw *[]){
+ &gpll6_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_branch gcc_xo_clk = {
+ .halt_reg = 0x30030,
+ .clkr = {
+ .enable_reg = 0x30030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_xo_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_xo_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x08000,
+ .clkr = {
+ .enable_reg = 0x08000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gp1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x09000,
+ .clkr = {
+ .enable_reg = 0x09000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gp2_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x0a000,
+ .clkr = {
+ .enable_reg = 0x0a000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gp3_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdio_ahb_clk = {
+ .halt_reg = 0x58004,
+ .clkr = {
+ .enable_reg = 0x58004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdio_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_crypto_ppe_clk = {
+ .halt_reg = 0x68310,
+ .clkr = {
+ .enable_reg = 0x68310,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_crypto_ppe_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_ppe_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nss_ce_apb_clk = {
+ .halt_reg = 0x68174,
+ .clkr = {
+ .enable_reg = 0x68174,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_nss_ce_apb_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_ce_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nss_ce_axi_clk = {
+ .halt_reg = 0x68170,
+ .clkr = {
+ .enable_reg = 0x68170,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_nss_ce_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_ce_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nss_cfg_clk = {
+ .halt_reg = 0x68160,
+ .clkr = {
+ .enable_reg = 0x68160,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_nss_cfg_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nss_crypto_clk = {
+ .halt_reg = 0x68164,
+ .clkr = {
+ .enable_reg = 0x68164,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_nss_crypto_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_crypto_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nss_csr_clk = {
+ .halt_reg = 0x68318,
+ .clkr = {
+ .enable_reg = 0x68318,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_nss_csr_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_ce_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nss_edma_cfg_clk = {
+ .halt_reg = 0x6819C,
+ .clkr = {
+ .enable_reg = 0x6819C,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_nss_edma_cfg_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_ppe_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nss_edma_clk = {
+ .halt_reg = 0x68198,
+ .clkr = {
+ .enable_reg = 0x68198,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_nss_edma_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_ppe_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nss_noc_clk = {
+ .halt_reg = 0x68168,
+ .clkr = {
+ .enable_reg = 0x68168,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_nss_noc_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &snoc_nssnoc_bfdcd_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ubi0_utcm_clk = {
+ .halt_reg = 0x2606c,
+ .clkr = {
+ .enable_reg = 0x2606c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ubi0_utcm_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &snoc_nssnoc_bfdcd_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_snoc_nssnoc_clk = {
+ .halt_reg = 0x26070,
+ .clkr = {
+ .enable_reg = 0x26070,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_snoc_nssnoc_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &snoc_nssnoc_bfdcd_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_wcss_ahb_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(133333333, P_GPLL0, 6, 0, 0),
+ { }
+};
+
+static const struct freq_tbl ftbl_q6_axi_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(400000000, P_GPLL0, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 wcss_ahb_clk_src = {
+ .cmd_rcgr = 0x59020,
+ .freq_tbl = ftbl_wcss_ahb_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "wcss_ahb_clk_src",
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll2_gpll4_gpll6[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll2.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll6.clkr.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll2_gpll4_gpll6_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL2, 2 },
+ { P_GPLL4, 3 },
+ { P_GPLL6, 4 },
+};
+
+static struct clk_rcg2 q6_axi_clk_src = {
+ .cmd_rcgr = 0x59120,
+ .freq_tbl = ftbl_q6_axi_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll2_gpll4_gpll6_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "q6_axi_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll2_gpll4_gpll6,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_lpass_core_axim_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 lpass_core_axim_clk_src = {
+ .cmd_rcgr = 0x1F020,
+ .freq_tbl = ftbl_lpass_core_axim_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "lpass_core_axim_clk_src",
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_lpass_snoc_cfg_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(266666667, P_GPLL0, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 lpass_snoc_cfg_clk_src = {
+ .cmd_rcgr = 0x1F040,
+ .freq_tbl = ftbl_lpass_snoc_cfg_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "lpass_snoc_cfg_clk_src",
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_lpass_q6_axim_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(400000000, P_GPLL0, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 lpass_q6_axim_clk_src = {
+ .cmd_rcgr = 0x1F008,
+ .freq_tbl = ftbl_lpass_q6_axim_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "lpass_q6_axim_clk_src",
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_rbcpr_wcss_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 rbcpr_wcss_clk_src = {
+ .cmd_rcgr = 0x3a00c,
+ .freq_tbl = ftbl_rbcpr_wcss_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_out_main_div2_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "rbcpr_wcss_clk_src",
+ .parent_data = gcc_xo_gpll0_out_main_div2_gpll0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_lpass_core_axim_clk = {
+ .halt_reg = 0x1F028,
+ .clkr = {
+ .enable_reg = 0x1F028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_lpass_core_axim_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &lpass_core_axim_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_lpass_snoc_cfg_clk = {
+ .halt_reg = 0x1F048,
+ .clkr = {
+ .enable_reg = 0x1F048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_lpass_snoc_cfg_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &lpass_snoc_cfg_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_lpass_q6_axim_clk = {
+ .halt_reg = 0x1F010,
+ .clkr = {
+ .enable_reg = 0x1F010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_lpass_q6_axim_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &lpass_q6_axim_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_lpass_q6_atbm_at_clk = {
+ .halt_reg = 0x1F018,
+ .clkr = {
+ .enable_reg = 0x1F018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_lpass_q6_atbm_at_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &qdss_at_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_lpass_q6_pclkdbg_clk = {
+ .halt_reg = 0x1F01C,
+ .clkr = {
+ .enable_reg = 0x1F01C,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_lpass_q6_pclkdbg_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &qdss_dap_sync_clk_src.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_lpass_q6ss_tsctr_1to2_clk = {
+ .halt_reg = 0x1F014,
+ .clkr = {
+ .enable_reg = 0x1F014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_lpass_q6ss_tsctr_1to2_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &qdss_tsctr_div2_clk_src.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_lpass_q6ss_trig_clk = {
+ .halt_reg = 0x1F038,
+ .clkr = {
+ .enable_reg = 0x1F038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_lpass_q6ss_trig_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &qdss_dap_sync_clk_src.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_lpass_tbu_clk = {
+ .halt_reg = 0x12094,
+ .clkr = {
+ .enable_reg = 0xb00c,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_lpass_tbu_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &lpass_q6_axim_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcnoc_lpass_clk = {
+ .halt_reg = 0x27020,
+ .clkr = {
+ .enable_reg = 0x27020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcnoc_lpass_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &lpass_core_axim_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mem_noc_lpass_clk = {
+ .halt_reg = 0x1D044,
+ .clkr = {
+ .enable_reg = 0x1D044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mem_noc_lpass_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &lpass_q6_axim_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_snoc_lpass_cfg_clk = {
+ .halt_reg = 0x26074,
+ .clkr = {
+ .enable_reg = 0x26074,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_snoc_lpass_cfg_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &lpass_snoc_cfg_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mem_noc_ubi32_clk = {
+ .halt_reg = 0x1D03C,
+ .clkr = {
+ .enable_reg = 0x1D03C,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mem_noc_ubi32_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &ubi32_mem_noc_bfdcd_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nss_port1_rx_clk = {
+ .halt_reg = 0x68240,
+ .clkr = {
+ .enable_reg = 0x68240,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_nss_port1_rx_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_port1_rx_div_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nss_port1_tx_clk = {
+ .halt_reg = 0x68244,
+ .clkr = {
+ .enable_reg = 0x68244,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_nss_port1_tx_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_port1_tx_div_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nss_port2_rx_clk = {
+ .halt_reg = 0x68248,
+ .clkr = {
+ .enable_reg = 0x68248,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_nss_port2_rx_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_port2_rx_div_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nss_port2_tx_clk = {
+ .halt_reg = 0x6824c,
+ .clkr = {
+ .enable_reg = 0x6824c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_nss_port2_tx_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_port2_tx_div_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nss_port3_rx_clk = {
+ .halt_reg = 0x68250,
+ .clkr = {
+ .enable_reg = 0x68250,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_nss_port3_rx_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_port3_rx_div_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nss_port3_tx_clk = {
+ .halt_reg = 0x68254,
+ .clkr = {
+ .enable_reg = 0x68254,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_nss_port3_tx_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_port3_tx_div_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nss_port4_rx_clk = {
+ .halt_reg = 0x68258,
+ .clkr = {
+ .enable_reg = 0x68258,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_nss_port4_rx_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_port4_rx_div_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nss_port4_tx_clk = {
+ .halt_reg = 0x6825c,
+ .clkr = {
+ .enable_reg = 0x6825c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_nss_port4_tx_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_port4_tx_div_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nss_port5_rx_clk = {
+ .halt_reg = 0x68260,
+ .clkr = {
+ .enable_reg = 0x68260,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_nss_port5_rx_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_port5_rx_div_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nss_port5_tx_clk = {
+ .halt_reg = 0x68264,
+ .clkr = {
+ .enable_reg = 0x68264,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_nss_port5_tx_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_port5_tx_div_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nss_ppe_cfg_clk = {
+ .halt_reg = 0x68194,
+ .clkr = {
+ .enable_reg = 0x68194,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_nss_ppe_cfg_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_ppe_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nss_ppe_clk = {
+ .halt_reg = 0x68190,
+ .clkr = {
+ .enable_reg = 0x68190,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_nss_ppe_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_ppe_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nss_ppe_ipe_clk = {
+ .halt_reg = 0x68338,
+ .clkr = {
+ .enable_reg = 0x68338,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_nss_ppe_ipe_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_ppe_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nss_ptp_ref_clk = {
+ .halt_reg = 0x6816C,
+ .clkr = {
+ .enable_reg = 0x6816C,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_nss_ptp_ref_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_ppe_cdiv_clk_src.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_ce_apb_clk = {
+ .halt_reg = 0x6830C,
+ .clkr = {
+ .enable_reg = 0x6830C,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_nssnoc_ce_apb_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_ce_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_ce_axi_clk = {
+ .halt_reg = 0x68308,
+ .clkr = {
+ .enable_reg = 0x68308,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_nssnoc_ce_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_ce_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_crypto_clk = {
+ .halt_reg = 0x68314,
+ .clkr = {
+ .enable_reg = 0x68314,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_nssnoc_crypto_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_crypto_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_ppe_cfg_clk = {
+ .halt_reg = 0x68304,
+ .clkr = {
+ .enable_reg = 0x68304,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_nssnoc_ppe_cfg_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_ppe_clk_src.clkr.hw },
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_ppe_clk = {
+ .halt_reg = 0x68300,
+ .clkr = {
+ .enable_reg = 0x68300,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_nssnoc_ppe_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_ppe_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_qosgen_ref_clk = {
+ .halt_reg = 0x68180,
+ .clkr = {
+ .enable_reg = 0x68180,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_nssnoc_qosgen_ref_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_xo_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_snoc_clk = {
+ .halt_reg = 0x68188,
+ .clkr = {
+ .enable_reg = 0x68188,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_nssnoc_snoc_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &system_noc_bfdcd_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_timeout_ref_clk = {
+ .halt_reg = 0x68184,
+ .clkr = {
+ .enable_reg = 0x68184,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_nssnoc_timeout_ref_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_xo_div4_clk_src.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_ubi0_ahb_clk = {
+ .halt_reg = 0x68270,
+ .clkr = {
+ .enable_reg = 0x68270,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_nssnoc_ubi0_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_ce_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_port1_mac_clk = {
+ .halt_reg = 0x68320,
+ .clkr = {
+ .enable_reg = 0x68320,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_port1_mac_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_ppe_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_port2_mac_clk = {
+ .halt_reg = 0x68324,
+ .clkr = {
+ .enable_reg = 0x68324,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_port2_mac_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_ppe_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_port3_mac_clk = {
+ .halt_reg = 0x68328,
+ .clkr = {
+ .enable_reg = 0x68328,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_port3_mac_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_ppe_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_port4_mac_clk = {
+ .halt_reg = 0x6832c,
+ .clkr = {
+ .enable_reg = 0x6832c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_port4_mac_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_ppe_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_port5_mac_clk = {
+ .halt_reg = 0x68330,
+ .clkr = {
+ .enable_reg = 0x68330,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_port5_mac_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_ppe_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ubi0_ahb_clk = {
+ .halt_reg = 0x6820C,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x6820C,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ubi0_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_ce_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ubi0_axi_clk = {
+ .halt_reg = 0x68200,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x68200,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ubi0_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &ubi32_mem_noc_bfdcd_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ubi0_nc_axi_clk = {
+ .halt_reg = 0x68204,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x68204,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ubi0_nc_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &snoc_nssnoc_bfdcd_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ubi0_core_clk = {
+ .halt_reg = 0x68210,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x68210,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ubi0_core_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_ubi0_div_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_ahb_clk = {
+ .halt_reg = 0x75010,
+ .clkr = {
+ .enable_reg = 0x75010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie0_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_aux_clk = {
+ .halt_reg = 0x75014,
+ .clkr = {
+ .enable_reg = 0x75014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie0_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &pcie0_aux_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_axi_m_clk = {
+ .halt_reg = 0x75008,
+ .clkr = {
+ .enable_reg = 0x75008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie0_axi_m_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &pcie0_axi_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_axi_s_clk = {
+ .halt_reg = 0x7500c,
+ .clkr = {
+ .enable_reg = 0x7500c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie0_axi_s_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &pcie0_axi_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_pcie0_axi_clk = {
+ .halt_reg = 0x26048,
+ .clkr = {
+ .enable_reg = 0x26048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_pcie0_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &pcie0_axi_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_pipe_clk = {
+ .halt_reg = 0x75018,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x75018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie0_pipe_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &pcie0_pipe_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x13004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x0b004,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qdss_dap_clk = {
+ .halt_reg = 0x29084,
+ .clkr = {
+ .enable_reg = 0x29084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qdss_dap_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &qdss_dap_sync_clk_src.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qpic_ahb_clk = {
+ .halt_reg = 0x57024,
+ .clkr = {
+ .enable_reg = 0x57024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qpic_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qpic_clk = {
+ .halt_reg = 0x57020,
+ .clkr = {
+ .enable_reg = 0x57020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qpic_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x4201c,
+ .clkr = {
+ .enable_reg = 0x4201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x42018,
+ .clkr = {
+ .enable_reg = 0x42018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &sdcc1_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy0_ahb_clk = {
+ .halt_reg = 0x56008,
+ .clkr = {
+ .enable_reg = 0x56008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_uniphy0_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy0_port1_rx_clk = {
+ .halt_reg = 0x56010,
+ .clkr = {
+ .enable_reg = 0x56010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_uniphy0_port1_rx_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_port1_rx_div_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy0_port1_tx_clk = {
+ .halt_reg = 0x56014,
+ .clkr = {
+ .enable_reg = 0x56014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_uniphy0_port1_tx_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_port1_tx_div_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy0_port2_rx_clk = {
+ .halt_reg = 0x56018,
+ .clkr = {
+ .enable_reg = 0x56018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_uniphy0_port2_rx_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_port2_rx_div_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy0_port2_tx_clk = {
+ .halt_reg = 0x5601c,
+ .clkr = {
+ .enable_reg = 0x5601c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_uniphy0_port2_tx_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_port2_tx_div_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy0_port3_rx_clk = {
+ .halt_reg = 0x56020,
+ .clkr = {
+ .enable_reg = 0x56020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_uniphy0_port3_rx_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_port3_rx_div_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy0_port3_tx_clk = {
+ .halt_reg = 0x56024,
+ .clkr = {
+ .enable_reg = 0x56024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_uniphy0_port3_tx_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_port3_tx_div_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy0_port4_rx_clk = {
+ .halt_reg = 0x56028,
+ .clkr = {
+ .enable_reg = 0x56028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_uniphy0_port4_rx_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_port4_rx_div_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy0_port4_tx_clk = {
+ .halt_reg = 0x5602c,
+ .clkr = {
+ .enable_reg = 0x5602c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_uniphy0_port4_tx_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_port4_tx_div_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy0_port5_rx_clk = {
+ .halt_reg = 0x56030,
+ .clkr = {
+ .enable_reg = 0x56030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_uniphy0_port5_rx_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_port5_rx_div_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy0_port5_tx_clk = {
+ .halt_reg = 0x56034,
+ .clkr = {
+ .enable_reg = 0x56034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_uniphy0_port5_tx_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_port5_tx_div_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy0_sys_clk = {
+ .halt_reg = 0x5600C,
+ .clkr = {
+ .enable_reg = 0x5600C,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_uniphy0_sys_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_xo_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy1_ahb_clk = {
+ .halt_reg = 0x56108,
+ .clkr = {
+ .enable_reg = 0x56108,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_uniphy1_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy1_port5_rx_clk = {
+ .halt_reg = 0x56110,
+ .clkr = {
+ .enable_reg = 0x56110,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_uniphy1_port5_rx_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_port5_rx_div_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy1_port5_tx_clk = {
+ .halt_reg = 0x56114,
+ .clkr = {
+ .enable_reg = 0x56114,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_uniphy1_port5_tx_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &nss_port5_tx_div_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy1_sys_clk = {
+ .halt_reg = 0x5610C,
+ .clkr = {
+ .enable_reg = 0x5610C,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_uniphy1_sys_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_xo_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb0_aux_clk = {
+ .halt_reg = 0x3e044,
+ .clkr = {
+ .enable_reg = 0x3e044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb0_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &usb0_aux_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb0_master_clk = {
+ .halt_reg = 0x3e000,
+ .clkr = {
+ .enable_reg = 0x3e000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb0_master_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &usb0_master_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_snoc_bus_timeout2_ahb_clk = {
+ .halt_reg = 0x47014,
+ .clkr = {
+ .enable_reg = 0x47014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_snoc_bus_timeout2_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &usb0_master_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_rcg2 pcie0_rchng_clk_src = {
+ .cmd_rcgr = 0x75070,
+ .freq_tbl = ftbl_pcie_rchng_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pcie0_rchng_clk_src",
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_pcie0_rchng_clk = {
+ .halt_reg = 0x75070,
+ .clkr = {
+ .enable_reg = 0x75070,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie0_rchng_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &pcie0_rchng_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_axi_s_bridge_clk = {
+ .halt_reg = 0x75048,
+ .clkr = {
+ .enable_reg = 0x75048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie0_axi_s_bridge_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &pcie0_axi_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_usb0_axi_clk = {
+ .halt_reg = 0x26040,
+ .clkr = {
+ .enable_reg = 0x26040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_usb0_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &usb0_master_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb0_mock_utmi_clk = {
+ .halt_reg = 0x3e008,
+ .clkr = {
+ .enable_reg = 0x3e008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb0_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &usb0_mock_utmi_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb0_phy_cfg_ahb_clk = {
+ .halt_reg = 0x3e080,
+ .clkr = {
+ .enable_reg = 0x3e080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb0_phy_cfg_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb0_pipe_clk = {
+ .halt_reg = 0x3e040,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x3e040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb0_pipe_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &usb0_pipe_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb0_sleep_clk = {
+ .halt_reg = 0x3e004,
+ .clkr = {
+ .enable_reg = 0x3e004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb0_sleep_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_sleep_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb1_master_clk = {
+ .halt_reg = 0x3f000,
+ .clkr = {
+ .enable_reg = 0x3f000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb1_master_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb1_mock_utmi_clk = {
+ .halt_reg = 0x3f008,
+ .clkr = {
+ .enable_reg = 0x3f008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb1_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &usb1_mock_utmi_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb1_phy_cfg_ahb_clk = {
+ .halt_reg = 0x3f080,
+ .clkr = {
+ .enable_reg = 0x3f080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb1_phy_cfg_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb1_sleep_clk = {
+ .halt_reg = 0x3f004,
+ .clkr = {
+ .enable_reg = 0x3f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb1_sleep_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_sleep_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cmn_12gpll_ahb_clk = {
+ .halt_reg = 0x56308,
+ .clkr = {
+ .enable_reg = 0x56308,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cmn_12gpll_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cmn_12gpll_sys_clk = {
+ .halt_reg = 0x5630c,
+ .clkr = {
+ .enable_reg = 0x5630c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cmn_12gpll_sys_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_xo_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0x5d014,
+ .clkr = {
+ .enable_reg = 0x5d014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &sdcc1_ice_core_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_dcc_clk = {
+ .halt_reg = 0x77004,
+ .clkr = {
+ .enable_reg = 0x77004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_dcc_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config ubi32_pll_config = {
+ .l = 0x3e,
+ .alpha = 0x57,
+ .config_ctl_val = 0x240d6aa8,
+ .config_ctl_hi_val = 0x3c2,
+ .main_output_mask = BIT(0),
+ .aux_output_mask = BIT(1),
+ .pre_div_val = 0x0,
+ .pre_div_mask = BIT(12),
+ .post_div_val = 0x0,
+ .post_div_mask = GENMASK(9, 8),
+};
+
+static const struct alpha_pll_config nss_crypto_pll_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .alpha_hi = 0x0,
+ .config_ctl_val = 0x4001055b,
+ .main_output_mask = BIT(0),
+ .pre_div_val = 0x0,
+ .pre_div_mask = GENMASK(14, 12),
+ .post_div_val = 0x1 << 8,
+ .post_div_mask = GENMASK(11, 8),
+ .vco_mask = GENMASK(21, 20),
+ .vco_val = 0x0,
+ .alpha_en_mask = BIT(24),
+};
+
+static struct clk_hw *gcc_ipq6018_hws[] = {
+ &gpll0_out_main_div2.hw,
+ &gcc_xo_div4_clk_src.hw,
+ &nss_ppe_cdiv_clk_src.hw,
+ &gpll6_out_main_div2.hw,
+ &qdss_dap_sync_clk_src.hw,
+ &qdss_tsctr_div2_clk_src.hw,
+};
+
+static struct clk_regmap *gcc_ipq6018_clks[] = {
+ [GPLL0_MAIN] = &gpll0_main.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [UBI32_PLL_MAIN] = &ubi32_pll_main.clkr,
+ [UBI32_PLL] = &ubi32_pll.clkr,
+ [GPLL6_MAIN] = &gpll6_main.clkr,
+ [GPLL6] = &gpll6.clkr,
+ [GPLL4_MAIN] = &gpll4_main.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [PCNOC_BFDCD_CLK_SRC] = &pcnoc_bfdcd_clk_src.clkr,
+ [GPLL2_MAIN] = &gpll2_main.clkr,
+ [GPLL2] = &gpll2.clkr,
+ [NSS_CRYPTO_PLL_MAIN] = &nss_crypto_pll_main.clkr,
+ [NSS_CRYPTO_PLL] = &nss_crypto_pll.clkr,
+ [QDSS_TSCTR_CLK_SRC] = &qdss_tsctr_clk_src.clkr,
+ [QDSS_AT_CLK_SRC] = &qdss_at_clk_src.clkr,
+ [NSS_PPE_CLK_SRC] = &nss_ppe_clk_src.clkr,
+ [GCC_XO_CLK_SRC] = &gcc_xo_clk_src.clkr,
+ [SYSTEM_NOC_BFDCD_CLK_SRC] = &system_noc_bfdcd_clk_src.clkr,
+ [SNOC_NSSNOC_BFDCD_CLK_SRC] = &snoc_nssnoc_bfdcd_clk_src.clkr,
+ [NSS_CE_CLK_SRC] = &nss_ce_clk_src.clkr,
+ [GCC_SLEEP_CLK_SRC] = &gcc_sleep_clk_src.clkr,
+ [APSS_AHB_CLK_SRC] = &apss_ahb_clk_src.clkr,
+ [NSS_PORT5_RX_CLK_SRC] = &nss_port5_rx_clk_src.clkr,
+ [NSS_PORT5_TX_CLK_SRC] = &nss_port5_tx_clk_src.clkr,
+ [UBI32_MEM_NOC_BFDCD_CLK_SRC] = &ubi32_mem_noc_bfdcd_clk_src.clkr,
+ [PCIE0_AXI_CLK_SRC] = &pcie0_axi_clk_src.clkr,
+ [USB0_MASTER_CLK_SRC] = &usb0_master_clk_src.clkr,
+ [APSS_AHB_POSTDIV_CLK_SRC] = &apss_ahb_postdiv_clk_src.clkr,
+ [NSS_PORT1_RX_CLK_SRC] = &nss_port1_rx_clk_src.clkr,
+ [NSS_PORT1_TX_CLK_SRC] = &nss_port1_tx_clk_src.clkr,
+ [NSS_PORT2_RX_CLK_SRC] = &nss_port2_rx_clk_src.clkr,
+ [NSS_PORT2_TX_CLK_SRC] = &nss_port2_tx_clk_src.clkr,
+ [NSS_PORT3_RX_CLK_SRC] = &nss_port3_rx_clk_src.clkr,
+ [NSS_PORT3_TX_CLK_SRC] = &nss_port3_tx_clk_src.clkr,
+ [NSS_PORT4_RX_CLK_SRC] = &nss_port4_rx_clk_src.clkr,
+ [NSS_PORT4_TX_CLK_SRC] = &nss_port4_tx_clk_src.clkr,
+ [NSS_PORT5_RX_DIV_CLK_SRC] = &nss_port5_rx_div_clk_src.clkr,
+ [NSS_PORT5_TX_DIV_CLK_SRC] = &nss_port5_tx_div_clk_src.clkr,
+ [APSS_AXI_CLK_SRC] = &apss_axi_clk_src.clkr,
+ [NSS_CRYPTO_CLK_SRC] = &nss_crypto_clk_src.clkr,
+ [NSS_PORT1_RX_DIV_CLK_SRC] = &nss_port1_rx_div_clk_src.clkr,
+ [NSS_PORT1_TX_DIV_CLK_SRC] = &nss_port1_tx_div_clk_src.clkr,
+ [NSS_PORT2_RX_DIV_CLK_SRC] = &nss_port2_rx_div_clk_src.clkr,
+ [NSS_PORT2_TX_DIV_CLK_SRC] = &nss_port2_tx_div_clk_src.clkr,
+ [NSS_PORT3_RX_DIV_CLK_SRC] = &nss_port3_rx_div_clk_src.clkr,
+ [NSS_PORT3_TX_DIV_CLK_SRC] = &nss_port3_tx_div_clk_src.clkr,
+ [NSS_PORT4_RX_DIV_CLK_SRC] = &nss_port4_rx_div_clk_src.clkr,
+ [NSS_PORT4_TX_DIV_CLK_SRC] = &nss_port4_tx_div_clk_src.clkr,
+ [NSS_UBI0_CLK_SRC] = &nss_ubi0_clk_src.clkr,
+ [ADSS_PWM_CLK_SRC] = &adss_pwm_clk_src.clkr,
+ [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr,
+ [BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr,
+ [BLSP1_QUP3_I2C_APPS_CLK_SRC] = &blsp1_qup3_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP3_SPI_APPS_CLK_SRC] = &blsp1_qup3_spi_apps_clk_src.clkr,
+ [BLSP1_QUP4_I2C_APPS_CLK_SRC] = &blsp1_qup4_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP4_SPI_APPS_CLK_SRC] = &blsp1_qup4_spi_apps_clk_src.clkr,
+ [BLSP1_QUP5_I2C_APPS_CLK_SRC] = &blsp1_qup5_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP5_SPI_APPS_CLK_SRC] = &blsp1_qup5_spi_apps_clk_src.clkr,
+ [BLSP1_QUP6_I2C_APPS_CLK_SRC] = &blsp1_qup6_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP6_SPI_APPS_CLK_SRC] = &blsp1_qup6_spi_apps_clk_src.clkr,
+ [BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr,
+ [BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr,
+ [BLSP1_UART3_APPS_CLK_SRC] = &blsp1_uart3_apps_clk_src.clkr,
+ [BLSP1_UART4_APPS_CLK_SRC] = &blsp1_uart4_apps_clk_src.clkr,
+ [BLSP1_UART5_APPS_CLK_SRC] = &blsp1_uart5_apps_clk_src.clkr,
+ [BLSP1_UART6_APPS_CLK_SRC] = &blsp1_uart6_apps_clk_src.clkr,
+ [CRYPTO_CLK_SRC] = &crypto_clk_src.clkr,
+ [GP1_CLK_SRC] = &gp1_clk_src.clkr,
+ [GP2_CLK_SRC] = &gp2_clk_src.clkr,
+ [GP3_CLK_SRC] = &gp3_clk_src.clkr,
+ [NSS_UBI0_DIV_CLK_SRC] = &nss_ubi0_div_clk_src.clkr,
+ [PCIE0_AUX_CLK_SRC] = &pcie0_aux_clk_src.clkr,
+ [PCIE0_PIPE_CLK_SRC] = &pcie0_pipe_clk_src.clkr,
+ [SDCC1_APPS_CLK_SRC] = &sdcc1_apps_clk_src.clkr,
+ [USB0_AUX_CLK_SRC] = &usb0_aux_clk_src.clkr,
+ [USB0_MOCK_UTMI_CLK_SRC] = &usb0_mock_utmi_clk_src.clkr,
+ [USB0_PIPE_CLK_SRC] = &usb0_pipe_clk_src.clkr,
+ [USB1_MOCK_UTMI_CLK_SRC] = &usb1_mock_utmi_clk_src.clkr,
+ [GCC_ADSS_PWM_CLK] = &gcc_adss_pwm_clk.clkr,
+ [GCC_APSS_AHB_CLK] = &gcc_apss_ahb_clk.clkr,
+ [GCC_APSS_AXI_CLK] = &gcc_apss_axi_clk.clkr,
+ [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP5_I2C_APPS_CLK] = &gcc_blsp1_qup5_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP5_SPI_APPS_CLK] = &gcc_blsp1_qup5_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP6_SPI_APPS_CLK] = &gcc_blsp1_qup6_spi_apps_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+ [GCC_BLSP1_UART3_APPS_CLK] = &gcc_blsp1_uart3_apps_clk.clkr,
+ [GCC_BLSP1_UART4_APPS_CLK] = &gcc_blsp1_uart4_apps_clk.clkr,
+ [GCC_BLSP1_UART5_APPS_CLK] = &gcc_blsp1_uart5_apps_clk.clkr,
+ [GCC_BLSP1_UART6_APPS_CLK] = &gcc_blsp1_uart6_apps_clk.clkr,
+ [GCC_CRYPTO_AHB_CLK] = &gcc_crypto_ahb_clk.clkr,
+ [GCC_CRYPTO_AXI_CLK] = &gcc_crypto_axi_clk.clkr,
+ [GCC_CRYPTO_CLK] = &gcc_crypto_clk.clkr,
+ [GCC_XO_CLK] = &gcc_xo_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_MDIO_AHB_CLK] = &gcc_mdio_ahb_clk.clkr,
+ [GCC_CRYPTO_PPE_CLK] = &gcc_crypto_ppe_clk.clkr,
+ [GCC_NSS_CE_APB_CLK] = &gcc_nss_ce_apb_clk.clkr,
+ [GCC_NSS_CE_AXI_CLK] = &gcc_nss_ce_axi_clk.clkr,
+ [GCC_NSS_CFG_CLK] = &gcc_nss_cfg_clk.clkr,
+ [GCC_NSS_CRYPTO_CLK] = &gcc_nss_crypto_clk.clkr,
+ [GCC_NSS_CSR_CLK] = &gcc_nss_csr_clk.clkr,
+ [GCC_NSS_EDMA_CFG_CLK] = &gcc_nss_edma_cfg_clk.clkr,
+ [GCC_NSS_EDMA_CLK] = &gcc_nss_edma_clk.clkr,
+ [GCC_NSS_NOC_CLK] = &gcc_nss_noc_clk.clkr,
+ [GCC_UBI0_UTCM_CLK] = &gcc_ubi0_utcm_clk.clkr,
+ [GCC_SNOC_NSSNOC_CLK] = &gcc_snoc_nssnoc_clk.clkr,
+ [GCC_NSS_PORT1_RX_CLK] = &gcc_nss_port1_rx_clk.clkr,
+ [GCC_NSS_PORT1_TX_CLK] = &gcc_nss_port1_tx_clk.clkr,
+ [GCC_NSS_PORT2_RX_CLK] = &gcc_nss_port2_rx_clk.clkr,
+ [GCC_NSS_PORT2_TX_CLK] = &gcc_nss_port2_tx_clk.clkr,
+ [GCC_NSS_PORT3_RX_CLK] = &gcc_nss_port3_rx_clk.clkr,
+ [GCC_NSS_PORT3_TX_CLK] = &gcc_nss_port3_tx_clk.clkr,
+ [GCC_NSS_PORT4_RX_CLK] = &gcc_nss_port4_rx_clk.clkr,
+ [GCC_NSS_PORT4_TX_CLK] = &gcc_nss_port4_tx_clk.clkr,
+ [GCC_NSS_PORT5_RX_CLK] = &gcc_nss_port5_rx_clk.clkr,
+ [GCC_NSS_PORT5_TX_CLK] = &gcc_nss_port5_tx_clk.clkr,
+ [GCC_NSS_PPE_CFG_CLK] = &gcc_nss_ppe_cfg_clk.clkr,
+ [GCC_NSS_PPE_CLK] = &gcc_nss_ppe_clk.clkr,
+ [GCC_NSS_PPE_IPE_CLK] = &gcc_nss_ppe_ipe_clk.clkr,
+ [GCC_NSS_PTP_REF_CLK] = &gcc_nss_ptp_ref_clk.clkr,
+ [GCC_NSSNOC_CE_APB_CLK] = &gcc_nssnoc_ce_apb_clk.clkr,
+ [GCC_NSSNOC_CE_AXI_CLK] = &gcc_nssnoc_ce_axi_clk.clkr,
+ [GCC_NSSNOC_CRYPTO_CLK] = &gcc_nssnoc_crypto_clk.clkr,
+ [GCC_NSSNOC_PPE_CFG_CLK] = &gcc_nssnoc_ppe_cfg_clk.clkr,
+ [GCC_NSSNOC_PPE_CLK] = &gcc_nssnoc_ppe_clk.clkr,
+ [GCC_NSSNOC_QOSGEN_REF_CLK] = &gcc_nssnoc_qosgen_ref_clk.clkr,
+ [GCC_NSSNOC_SNOC_CLK] = &gcc_nssnoc_snoc_clk.clkr,
+ [GCC_NSSNOC_TIMEOUT_REF_CLK] = &gcc_nssnoc_timeout_ref_clk.clkr,
+ [GCC_NSSNOC_UBI0_AHB_CLK] = &gcc_nssnoc_ubi0_ahb_clk.clkr,
+ [GCC_PORT1_MAC_CLK] = &gcc_port1_mac_clk.clkr,
+ [GCC_PORT2_MAC_CLK] = &gcc_port2_mac_clk.clkr,
+ [GCC_PORT3_MAC_CLK] = &gcc_port3_mac_clk.clkr,
+ [GCC_PORT4_MAC_CLK] = &gcc_port4_mac_clk.clkr,
+ [GCC_PORT5_MAC_CLK] = &gcc_port5_mac_clk.clkr,
+ [GCC_UBI0_AHB_CLK] = &gcc_ubi0_ahb_clk.clkr,
+ [GCC_UBI0_AXI_CLK] = &gcc_ubi0_axi_clk.clkr,
+ [GCC_UBI0_NC_AXI_CLK] = &gcc_ubi0_nc_axi_clk.clkr,
+ [GCC_UBI0_CORE_CLK] = &gcc_ubi0_core_clk.clkr,
+ [GCC_PCIE0_AHB_CLK] = &gcc_pcie0_ahb_clk.clkr,
+ [GCC_PCIE0_AUX_CLK] = &gcc_pcie0_aux_clk.clkr,
+ [GCC_PCIE0_AXI_M_CLK] = &gcc_pcie0_axi_m_clk.clkr,
+ [GCC_PCIE0_AXI_S_CLK] = &gcc_pcie0_axi_s_clk.clkr,
+ [GCC_SYS_NOC_PCIE0_AXI_CLK] = &gcc_sys_noc_pcie0_axi_clk.clkr,
+ [GCC_PCIE0_PIPE_CLK] = &gcc_pcie0_pipe_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QDSS_DAP_CLK] = &gcc_qdss_dap_clk.clkr,
+ [GCC_QPIC_AHB_CLK] = &gcc_qpic_ahb_clk.clkr,
+ [GCC_QPIC_CLK] = &gcc_qpic_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_UNIPHY0_AHB_CLK] = &gcc_uniphy0_ahb_clk.clkr,
+ [GCC_UNIPHY0_PORT1_RX_CLK] = &gcc_uniphy0_port1_rx_clk.clkr,
+ [GCC_UNIPHY0_PORT1_TX_CLK] = &gcc_uniphy0_port1_tx_clk.clkr,
+ [GCC_UNIPHY0_PORT2_RX_CLK] = &gcc_uniphy0_port2_rx_clk.clkr,
+ [GCC_UNIPHY0_PORT2_TX_CLK] = &gcc_uniphy0_port2_tx_clk.clkr,
+ [GCC_UNIPHY0_PORT3_RX_CLK] = &gcc_uniphy0_port3_rx_clk.clkr,
+ [GCC_UNIPHY0_PORT3_TX_CLK] = &gcc_uniphy0_port3_tx_clk.clkr,
+ [GCC_UNIPHY0_PORT4_RX_CLK] = &gcc_uniphy0_port4_rx_clk.clkr,
+ [GCC_UNIPHY0_PORT4_TX_CLK] = &gcc_uniphy0_port4_tx_clk.clkr,
+ [GCC_UNIPHY0_PORT5_RX_CLK] = &gcc_uniphy0_port5_rx_clk.clkr,
+ [GCC_UNIPHY0_PORT5_TX_CLK] = &gcc_uniphy0_port5_tx_clk.clkr,
+ [GCC_UNIPHY0_SYS_CLK] = &gcc_uniphy0_sys_clk.clkr,
+ [GCC_UNIPHY1_AHB_CLK] = &gcc_uniphy1_ahb_clk.clkr,
+ [GCC_UNIPHY1_PORT5_RX_CLK] = &gcc_uniphy1_port5_rx_clk.clkr,
+ [GCC_UNIPHY1_PORT5_TX_CLK] = &gcc_uniphy1_port5_tx_clk.clkr,
+ [GCC_UNIPHY1_SYS_CLK] = &gcc_uniphy1_sys_clk.clkr,
+ [GCC_USB0_AUX_CLK] = &gcc_usb0_aux_clk.clkr,
+ [GCC_SYS_NOC_USB0_AXI_CLK] = &gcc_sys_noc_usb0_axi_clk.clkr,
+ [GCC_SNOC_BUS_TIMEOUT2_AHB_CLK] = &gcc_snoc_bus_timeout2_ahb_clk.clkr,
+ [GCC_USB0_MASTER_CLK] = &gcc_usb0_master_clk.clkr,
+ [GCC_USB0_MOCK_UTMI_CLK] = &gcc_usb0_mock_utmi_clk.clkr,
+ [GCC_USB0_PHY_CFG_AHB_CLK] = &gcc_usb0_phy_cfg_ahb_clk.clkr,
+ [GCC_USB0_PIPE_CLK] = &gcc_usb0_pipe_clk.clkr,
+ [GCC_USB0_SLEEP_CLK] = &gcc_usb0_sleep_clk.clkr,
+ [GCC_USB1_MASTER_CLK] = &gcc_usb1_master_clk.clkr,
+ [GCC_USB1_MOCK_UTMI_CLK] = &gcc_usb1_mock_utmi_clk.clkr,
+ [GCC_USB1_PHY_CFG_AHB_CLK] = &gcc_usb1_phy_cfg_ahb_clk.clkr,
+ [GCC_USB1_SLEEP_CLK] = &gcc_usb1_sleep_clk.clkr,
+ [GCC_CMN_12GPLL_AHB_CLK] = &gcc_cmn_12gpll_ahb_clk.clkr,
+ [GCC_CMN_12GPLL_SYS_CLK] = &gcc_cmn_12gpll_sys_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [SDCC1_ICE_CORE_CLK_SRC] = &sdcc1_ice_core_clk_src.clkr,
+ [GCC_DCC_CLK] = &gcc_dcc_clk.clkr,
+ [PCIE0_RCHNG_CLK_SRC] = &pcie0_rchng_clk_src.clkr,
+ [GCC_PCIE0_AXI_S_BRIDGE_CLK] = &gcc_pcie0_axi_s_bridge_clk.clkr,
+ [PCIE0_RCHNG_CLK] = &gcc_pcie0_rchng_clk.clkr,
+ [WCSS_AHB_CLK_SRC] = &wcss_ahb_clk_src.clkr,
+ [Q6_AXI_CLK_SRC] = &q6_axi_clk_src.clkr,
+ [RBCPR_WCSS_CLK_SRC] = &rbcpr_wcss_clk_src.clkr,
+ [GCC_LPASS_CORE_AXIM_CLK] = &gcc_lpass_core_axim_clk.clkr,
+ [LPASS_CORE_AXIM_CLK_SRC] = &lpass_core_axim_clk_src.clkr,
+ [GCC_LPASS_SNOC_CFG_CLK] = &gcc_lpass_snoc_cfg_clk.clkr,
+ [LPASS_SNOC_CFG_CLK_SRC] = &lpass_snoc_cfg_clk_src.clkr,
+ [GCC_LPASS_Q6_AXIM_CLK] = &gcc_lpass_q6_axim_clk.clkr,
+ [LPASS_Q6_AXIM_CLK_SRC] = &lpass_q6_axim_clk_src.clkr,
+ [GCC_LPASS_Q6_ATBM_AT_CLK] = &gcc_lpass_q6_atbm_at_clk.clkr,
+ [GCC_LPASS_Q6_PCLKDBG_CLK] = &gcc_lpass_q6_pclkdbg_clk.clkr,
+ [GCC_LPASS_Q6SS_TSCTR_1TO2_CLK] = &gcc_lpass_q6ss_tsctr_1to2_clk.clkr,
+ [GCC_LPASS_Q6SS_TRIG_CLK] = &gcc_lpass_q6ss_trig_clk.clkr,
+ [GCC_LPASS_TBU_CLK] = &gcc_lpass_tbu_clk.clkr,
+ [GCC_PCNOC_LPASS_CLK] = &gcc_pcnoc_lpass_clk.clkr,
+ [GCC_MEM_NOC_UBI32_CLK] = &gcc_mem_noc_ubi32_clk.clkr,
+ [GCC_MEM_NOC_LPASS_CLK] = &gcc_mem_noc_lpass_clk.clkr,
+ [GCC_SNOC_LPASS_CFG_CLK] = &gcc_snoc_lpass_cfg_clk.clkr,
+ [QDSS_STM_CLK_SRC] = &qdss_stm_clk_src.clkr,
+ [QDSS_TRACECLKIN_CLK_SRC] = &qdss_traceclkin_clk_src.clkr,
+};
+
+static const struct qcom_reset_map gcc_ipq6018_resets[] = {
+ [GCC_BLSP1_BCR] = { 0x01000, 0 },
+ [GCC_BLSP1_QUP1_BCR] = { 0x02000, 0 },
+ [GCC_BLSP1_UART1_BCR] = { 0x02038, 0 },
+ [GCC_BLSP1_QUP2_BCR] = { 0x03008, 0 },
+ [GCC_BLSP1_UART2_BCR] = { 0x03028, 0 },
+ [GCC_BLSP1_QUP3_BCR] = { 0x04008, 0 },
+ [GCC_BLSP1_UART3_BCR] = { 0x04028, 0 },
+ [GCC_BLSP1_QUP4_BCR] = { 0x05008, 0 },
+ [GCC_BLSP1_UART4_BCR] = { 0x05028, 0 },
+ [GCC_BLSP1_QUP5_BCR] = { 0x06008, 0 },
+ [GCC_BLSP1_UART5_BCR] = { 0x06028, 0 },
+ [GCC_BLSP1_QUP6_BCR] = { 0x07008, 0 },
+ [GCC_BLSP1_UART6_BCR] = { 0x07028, 0 },
+ [GCC_IMEM_BCR] = { 0x0e000, 0 },
+ [GCC_SMMU_BCR] = { 0x12000, 0 },
+ [GCC_APSS_TCU_BCR] = { 0x12050, 0 },
+ [GCC_SMMU_XPU_BCR] = { 0x12054, 0 },
+ [GCC_PCNOC_TBU_BCR] = { 0x12058, 0 },
+ [GCC_SMMU_CFG_BCR] = { 0x1208c, 0 },
+ [GCC_PRNG_BCR] = { 0x13000, 0 },
+ [GCC_BOOT_ROM_BCR] = { 0x13008, 0 },
+ [GCC_CRYPTO_BCR] = { 0x16000, 0 },
+ [GCC_WCSS_BCR] = { 0x18000, 0 },
+ [GCC_WCSS_Q6_BCR] = { 0x18100, 0 },
+ [GCC_NSS_BCR] = { 0x19000, 0 },
+ [GCC_SEC_CTRL_BCR] = { 0x1a000, 0 },
+ [GCC_ADSS_BCR] = { 0x1c000, 0 },
+ [GCC_DDRSS_BCR] = { 0x1e000, 0 },
+ [GCC_SYSTEM_NOC_BCR] = { 0x26000, 0 },
+ [GCC_PCNOC_BCR] = { 0x27018, 0 },
+ [GCC_TCSR_BCR] = { 0x28000, 0 },
+ [GCC_QDSS_BCR] = { 0x29000, 0 },
+ [GCC_DCD_BCR] = { 0x2a000, 0 },
+ [GCC_MSG_RAM_BCR] = { 0x2b000, 0 },
+ [GCC_MPM_BCR] = { 0x2c000, 0 },
+ [GCC_SPDM_BCR] = { 0x2f000, 0 },
+ [GCC_RBCPR_BCR] = { 0x33000, 0 },
+ [GCC_RBCPR_MX_BCR] = { 0x33014, 0 },
+ [GCC_TLMM_BCR] = { 0x34000, 0 },
+ [GCC_RBCPR_WCSS_BCR] = { 0x3a000, 0 },
+ [GCC_USB0_PHY_BCR] = { 0x3e034, 0 },
+ [GCC_USB3PHY_0_PHY_BCR] = { 0x3e03c, 0 },
+ [GCC_USB0_BCR] = { 0x3e070, 0 },
+ [GCC_USB1_BCR] = { 0x3f070, 0 },
+ [GCC_QUSB2_0_PHY_BCR] = { 0x4103c, 0 },
+ [GCC_QUSB2_1_PHY_BCR] = { 0x41040, 0 },
+ [GCC_SDCC1_BCR] = { 0x42000, 0 },
+ [GCC_SNOC_BUS_TIMEOUT0_BCR] = { 0x47000, 0 },
+ [GCC_SNOC_BUS_TIMEOUT1_BCR] = { 0x47008, 0 },
+ [GCC_SNOC_BUS_TIMEOUT2_BCR] = { 0x47010, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT0_BCR] = { 0x48000, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT1_BCR] = { 0x48008, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT2_BCR] = { 0x48010, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT3_BCR] = { 0x48018, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT4_BCR] = { 0x48020, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT5_BCR] = { 0x48028, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT6_BCR] = { 0x48030, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT7_BCR] = { 0x48038, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT8_BCR] = { 0x48040, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT9_BCR] = { 0x48048, 0 },
+ [GCC_UNIPHY0_BCR] = { 0x56000, 0 },
+ [GCC_UNIPHY1_BCR] = { 0x56100, 0 },
+ [GCC_CMN_12GPLL_BCR] = { 0x56300, 0 },
+ [GCC_QPIC_BCR] = { 0x57018, 0 },
+ [GCC_MDIO_BCR] = { 0x58000, 0 },
+ [GCC_WCSS_CORE_TBU_BCR] = { 0x66000, 0 },
+ [GCC_WCSS_Q6_TBU_BCR] = { 0x67000, 0 },
+ [GCC_USB0_TBU_BCR] = { 0x6a000, 0 },
+ [GCC_PCIE0_TBU_BCR] = { 0x6b000, 0 },
+ [GCC_NSS_NOC_TBU_BCR] = { 0x6e000, 0 },
+ [GCC_PCIE0_BCR] = { 0x75004, 0 },
+ [GCC_PCIE0_PHY_BCR] = { 0x75038, 0 },
+ [GCC_PCIE0PHY_PHY_BCR] = { 0x7503c, 0 },
+ [GCC_PCIE0_LINK_DOWN_BCR] = { 0x75044, 0 },
+ [GCC_DCC_BCR] = { 0x77000, 0 },
+ [GCC_APC0_VOLTAGE_DROOP_DETECTOR_BCR] = { 0x78000, 0 },
+ [GCC_SMMU_CATS_BCR] = { 0x7c000, 0 },
+ [GCC_UBI0_AXI_ARES] = { 0x68010, 0 },
+ [GCC_UBI0_AHB_ARES] = { 0x68010, 1 },
+ [GCC_UBI0_NC_AXI_ARES] = { 0x68010, 2 },
+ [GCC_UBI0_DBG_ARES] = { 0x68010, 3 },
+ [GCC_UBI0_CORE_CLAMP_ENABLE] = { 0x68010, 4 },
+ [GCC_UBI0_CLKRST_CLAMP_ENABLE] = { 0x68010, 5 },
+ [GCC_UBI0_UTCM_ARES] = { 0x68010, 6 },
+ [GCC_UBI0_CORE_ARES] = { 0x68010, 7 },
+ [GCC_NSS_CFG_ARES] = { 0x68010, 16 },
+ [GCC_NSS_NOC_ARES] = { 0x68010, 18 },
+ [GCC_NSS_CRYPTO_ARES] = { 0x68010, 19 },
+ [GCC_NSS_CSR_ARES] = { 0x68010, 20 },
+ [GCC_NSS_CE_APB_ARES] = { 0x68010, 21 },
+ [GCC_NSS_CE_AXI_ARES] = { 0x68010, 22 },
+ [GCC_NSSNOC_CE_APB_ARES] = { 0x68010, 23 },
+ [GCC_NSSNOC_CE_AXI_ARES] = { 0x68010, 24 },
+ [GCC_NSSNOC_UBI0_AHB_ARES] = { 0x68010, 25 },
+ [GCC_NSSNOC_SNOC_ARES] = { 0x68010, 27 },
+ [GCC_NSSNOC_CRYPTO_ARES] = { 0x68010, 28 },
+ [GCC_NSSNOC_ATB_ARES] = { 0x68010, 29 },
+ [GCC_NSSNOC_QOSGEN_REF_ARES] = { 0x68010, 30 },
+ [GCC_NSSNOC_TIMEOUT_REF_ARES] = { 0x68010, 31 },
+ [GCC_PCIE0_PIPE_ARES] = { 0x75040, 0 },
+ [GCC_PCIE0_SLEEP_ARES] = { 0x75040, 1 },
+ [GCC_PCIE0_CORE_STICKY_ARES] = { 0x75040, 2 },
+ [GCC_PCIE0_AXI_MASTER_ARES] = { 0x75040, 3 },
+ [GCC_PCIE0_AXI_SLAVE_ARES] = { 0x75040, 4 },
+ [GCC_PCIE0_AHB_ARES] = { 0x75040, 5 },
+ [GCC_PCIE0_AXI_MASTER_STICKY_ARES] = { 0x75040, 6 },
+ [GCC_PCIE0_AXI_SLAVE_STICKY_ARES] = { 0x75040, 7 },
+ [GCC_PPE_FULL_RESET] = { 0x68014, 0 },
+ [GCC_UNIPHY0_SOFT_RESET] = { 0x56004, 0 },
+ [GCC_UNIPHY0_XPCS_RESET] = { 0x56004, 2 },
+ [GCC_UNIPHY1_SOFT_RESET] = { 0x56104, 0 },
+ [GCC_UNIPHY1_XPCS_RESET] = { 0x56104, 2 },
+ [GCC_EDMA_HW_RESET] = { 0x68014, 0 },
+ [GCC_NSSPORT1_RESET] = { 0x68014, 0 },
+ [GCC_NSSPORT2_RESET] = { 0x68014, 0 },
+ [GCC_NSSPORT3_RESET] = { 0x68014, 0 },
+ [GCC_NSSPORT4_RESET] = { 0x68014, 0 },
+ [GCC_NSSPORT5_RESET] = { 0x68014, 0 },
+ [GCC_UNIPHY0_PORT1_ARES] = { 0x56004, 0 },
+ [GCC_UNIPHY0_PORT2_ARES] = { 0x56004, 0 },
+ [GCC_UNIPHY0_PORT3_ARES] = { 0x56004, 0 },
+ [GCC_UNIPHY0_PORT4_ARES] = { 0x56004, 0 },
+ [GCC_UNIPHY0_PORT5_ARES] = { 0x56004, 0 },
+ [GCC_UNIPHY0_PORT_4_5_RESET] = { 0x56004, 0 },
+ [GCC_UNIPHY0_PORT_4_RESET] = { 0x56004, 0 },
+ [GCC_LPASS_BCR] = {0x1F000, 0},
+ [GCC_UBI32_TBU_BCR] = {0x65000, 0},
+ [GCC_LPASS_TBU_BCR] = {0x6C000, 0},
+ [GCC_WCSSAON_RESET] = {0x59010, 0},
+ [GCC_LPASS_Q6_AXIM_ARES] = {0x1F004, 0},
+ [GCC_LPASS_Q6SS_TSCTR_1TO2_ARES] = {0x1F004, 1},
+ [GCC_LPASS_Q6SS_TRIG_ARES] = {0x1F004, 2},
+ [GCC_LPASS_Q6_ATBM_AT_ARES] = {0x1F004, 3},
+ [GCC_LPASS_Q6_PCLKDBG_ARES] = {0x1F004, 4},
+ [GCC_LPASS_CORE_AXIM_ARES] = {0x1F004, 5},
+ [GCC_LPASS_SNOC_CFG_ARES] = {0x1F004, 6},
+ [GCC_WCSS_DBG_ARES] = {0x59008, 0},
+ [GCC_WCSS_ECAHB_ARES] = {0x59008, 1},
+ [GCC_WCSS_ACMT_ARES] = {0x59008, 2},
+ [GCC_WCSS_DBG_BDG_ARES] = {0x59008, 3},
+ [GCC_WCSS_AHB_S_ARES] = {0x59008, 4},
+ [GCC_WCSS_AXI_M_ARES] = {0x59008, 5},
+ [GCC_Q6SS_DBG_ARES] = {0x59110, 0},
+ [GCC_Q6_AHB_S_ARES] = {0x59110, 1},
+ [GCC_Q6_AHB_ARES] = {0x59110, 2},
+ [GCC_Q6_AXIM2_ARES] = {0x59110, 3},
+ [GCC_Q6_AXIM_ARES] = {0x59110, 4},
+};
+
+static const struct of_device_id gcc_ipq6018_match_table[] = {
+ { .compatible = "qcom,gcc-ipq6018" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_ipq6018_match_table);
+
+static const struct regmap_config gcc_ipq6018_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x7fffc,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_ipq6018_desc = {
+ .config = &gcc_ipq6018_regmap_config,
+ .clks = gcc_ipq6018_clks,
+ .num_clks = ARRAY_SIZE(gcc_ipq6018_clks),
+ .resets = gcc_ipq6018_resets,
+ .num_resets = ARRAY_SIZE(gcc_ipq6018_resets),
+ .clk_hws = gcc_ipq6018_hws,
+ .num_clk_hws = ARRAY_SIZE(gcc_ipq6018_hws),
+};
+
+static int gcc_ipq6018_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gcc_ipq6018_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /* Disable SW_COLLAPSE for USB0 GDSCR */
+ regmap_update_bits(regmap, 0x3e078, BIT(0), 0x0);
+ /* Enable SW_OVERRIDE for USB0 GDSCR */
+ regmap_update_bits(regmap, 0x3e078, BIT(2), BIT(2));
+ /* Disable SW_COLLAPSE for USB1 GDSCR */
+ regmap_update_bits(regmap, 0x3f078, BIT(0), 0x0);
+ /* Enable SW_OVERRIDE for USB1 GDSCR */
+ regmap_update_bits(regmap, 0x3f078, BIT(2), BIT(2));
+
+ /* SW Workaround for UBI Huyara PLL */
+ regmap_update_bits(regmap, 0x2501c, BIT(26), BIT(26));
+
+ clk_alpha_pll_configure(&ubi32_pll_main, regmap, &ubi32_pll_config);
+
+ clk_alpha_pll_configure(&nss_crypto_pll_main, regmap,
+ &nss_crypto_pll_config);
+
+ return qcom_cc_really_probe(pdev, &gcc_ipq6018_desc, regmap);
+}
+
+static struct platform_driver gcc_ipq6018_driver = {
+ .probe = gcc_ipq6018_probe,
+ .driver = {
+ .name = "qcom,gcc-ipq6018",
+ .of_match_table = gcc_ipq6018_match_table,
+ },
+};
+
+static int __init gcc_ipq6018_init(void)
+{
+ return platform_driver_register(&gcc_ipq6018_driver);
+}
+core_initcall(gcc_ipq6018_init);
+
+static void __exit gcc_ipq6018_exit(void)
+{
+ platform_driver_unregister(&gcc_ipq6018_driver);
+}
+module_exit(gcc_ipq6018_exit);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. GCC IPQ6018 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
index d004cdaa0e39..3c3a7ff04562 100644
--- a/drivers/clk/qcom/gcc-msm8996.c
+++ b/drivers/clk/qcom/gcc-msm8996.c
@@ -3046,7 +3046,10 @@ static struct clk_branch gcc_usb3_clkref_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb3_clkref_clk",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "cxo2",
+ .name = "xo",
+ },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
@@ -3060,7 +3063,10 @@ static struct clk_branch gcc_hdmi_clkref_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_hdmi_clkref_clk",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "cxo2",
+ .name = "xo",
+ },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
@@ -3074,7 +3080,10 @@ static struct clk_branch gcc_edp_clkref_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_edp_clkref_clk",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "cxo2",
+ .name = "xo",
+ },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
@@ -3088,7 +3097,10 @@ static struct clk_branch gcc_ufs_clkref_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_clkref_clk",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "cxo2",
+ .name = "xo",
+ },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
@@ -3102,7 +3114,10 @@ static struct clk_branch gcc_pcie_clkref_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_clkref_clk",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "cxo2",
+ .name = "xo",
+ },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
@@ -3116,7 +3131,10 @@ static struct clk_branch gcc_rx2_usb2_clkref_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_rx2_usb2_clkref_clk",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "cxo2",
+ .name = "xo",
+ },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
@@ -3130,7 +3148,10 @@ static struct clk_branch gcc_rx1_usb2_clkref_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_rx1_usb2_clkref_clk",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "cxo2",
+ .name = "xo",
+ },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
index cf31b5d03270..df1d7056436c 100644
--- a/drivers/clk/qcom/gcc-msm8998.c
+++ b/drivers/clk/qcom/gcc-msm8998.c
@@ -1996,6 +1996,19 @@ static struct clk_branch gcc_gp3_clk = {
},
};
+static struct clk_branch gcc_bimc_gfx_clk = {
+ .halt_reg = 0x46040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x46040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_bimc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_gpu_bimc_gfx_clk = {
.halt_reg = 0x71010,
.halt_check = BRANCH_HALT,
@@ -2810,6 +2823,7 @@ static struct clk_regmap *gcc_msm8998_clocks[] = {
[GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
[GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
[GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_BIMC_GFX_CLK] = &gcc_bimc_gfx_clk.clkr,
[GCC_GPU_BIMC_GFX_CLK] = &gcc_gpu_bimc_gfx_clk.clkr,
[GCC_GPU_BIMC_GFX_SRC_CLK] = &gcc_gpu_bimc_gfx_src_clk.clkr,
[GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
diff --git a/drivers/clk/qcom/gcc-qcs404.c b/drivers/clk/qcom/gcc-qcs404.c
index 9b0c4ce2ef4e..46d314d69250 100644
--- a/drivers/clk/qcom/gcc-qcs404.c
+++ b/drivers/clk/qcom/gcc-qcs404.c
@@ -330,7 +330,7 @@ static struct clk_alpha_pll gpll0_ao_out_main = {
.parent_names = (const char *[]){ "cxo" },
.num_parents = 1,
.flags = CLK_IS_CRITICAL,
- .ops = &clk_alpha_pll_ops,
+ .ops = &clk_alpha_pll_fixed_ops,
},
},
};
diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c
index 38424e63bcae..7f59fb8da033 100644
--- a/drivers/clk/qcom/gcc-sc7180.c
+++ b/drivers/clk/qcom/gcc-sc7180.c
@@ -2186,7 +2186,8 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
.pd = {
.name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON | VOTABLE,
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = {
@@ -2194,7 +2195,8 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = {
.pd = {
.name = "hlos1_vote_mmnoc_mmu_tbu_sf_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON | VOTABLE,
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc *gcc_sc7180_gdscs[] = {
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index f7b370f3acef..f6ce888098be 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -3255,6 +3255,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc = {
.name = "hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc = {
@@ -3263,6 +3264,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc = {
.name = "hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc hlos1_vote_aggre_noc_mmu_tbu1_gdsc = {
@@ -3271,6 +3273,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_tbu1_gdsc = {
.name = "hlos1_vote_aggre_noc_mmu_tbu1_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc hlos1_vote_aggre_noc_mmu_tbu2_gdsc = {
@@ -3279,6 +3282,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_tbu2_gdsc = {
.name = "hlos1_vote_aggre_noc_mmu_tbu2_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
@@ -3287,6 +3291,7 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
.name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = {
@@ -3295,6 +3300,7 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = {
.name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = {
@@ -3303,6 +3309,7 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = {
.name = "hlos1_vote_mmnoc_mmu_tbu_sf_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct clk_regmap *gcc_sdm845_clocks[] = {
diff --git a/drivers/clk/qcom/gpucc-msm8998.c b/drivers/clk/qcom/gpucc-msm8998.c
index e5e2492b20c5..9b3923af02a1 100644
--- a/drivers/clk/qcom/gpucc-msm8998.c
+++ b/drivers/clk/qcom/gpucc-msm8998.c
@@ -242,10 +242,12 @@ static struct clk_branch gfx3d_isense_clk = {
static struct gdsc gpu_cx_gdsc = {
.gdscr = 0x1004,
+ .gds_hw_ctrl = 0x1008,
.pd = {
.name = "gpu_cx",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc gpu_gx_gdsc = {
diff --git a/drivers/clk/qcom/gpucc-sc7180.c b/drivers/clk/qcom/gpucc-sc7180.c
new file mode 100644
index 000000000000..a96c0b945de2
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-sc7180.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gpucc-sc7180.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "gdsc.h"
+
+#define CX_GMU_CBCR_SLEEP_MASK 0xF
+#define CX_GMU_CBCR_SLEEP_SHIFT 4
+#define CX_GMU_CBCR_WAKE_MASK 0xF
+#define CX_GMU_CBCR_WAKE_SHIFT 8
+#define CLK_DIS_WAIT_SHIFT 12
+#define CLK_DIS_WAIT_MASK (0xf << CLK_DIS_WAIT_SHIFT)
+
+enum {
+ P_BI_TCXO,
+ P_CORE_BI_PLL_TEST_SE,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+ P_GPU_CC_PLL1_OUT_EVEN,
+ P_GPU_CC_PLL1_OUT_MAIN,
+ P_GPU_CC_PLL1_OUT_ODD,
+};
+
+static const struct pll_vco fabia_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+ .offset = 0x100,
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_pll1",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fabia_ops,
+ },
+ },
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .fw_name = "gcc_gpu_gpll0_clk_src" },
+ { .fw_name = "gcc_gpu_gpll0_div_clk_src" },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN_DIV, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ .cmd_rcgr = 0x1120,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_0,
+ .freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gmu_clk_src",
+ .parent_data = gpu_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+ .halt_reg = 0x107c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x107c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_crc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+ .halt_reg = 0x1098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_gmu_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_snoc_dvm_clk = {
+ .halt_reg = 0x108c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x108c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_snoc_dvm_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_aon_clk = {
+ .halt_reg = 0x1004,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cxo_aon_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+ .halt_reg = 0x109c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x109c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cxo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc cx_gdsc = {
+ .gdscr = 0x106c,
+ .gds_hw_ctrl = 0x1540,
+ .pd = {
+ .name = "cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc *gpu_cc_sc7180_gdscs[] = {
+ [CX_GDSC] = &cx_gdsc,
+};
+
+static struct clk_regmap *gpu_cc_sc7180_clocks[] = {
+ [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+ [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+ [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+ [GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr,
+ [GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr,
+ [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+ [GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+};
+
+static const struct regmap_config gpu_cc_sc7180_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x8008,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gpu_cc_sc7180_desc = {
+ .config = &gpu_cc_sc7180_regmap_config,
+ .clks = gpu_cc_sc7180_clocks,
+ .num_clks = ARRAY_SIZE(gpu_cc_sc7180_clocks),
+ .gdscs = gpu_cc_sc7180_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpu_cc_sc7180_gdscs),
+};
+
+static const struct of_device_id gpu_cc_sc7180_match_table[] = {
+ { .compatible = "qcom,sc7180-gpucc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_sc7180_match_table);
+
+static int gpu_cc_sc7180_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ struct alpha_pll_config gpu_cc_pll_config = {};
+ unsigned int value, mask;
+
+ regmap = qcom_cc_map(pdev, &gpu_cc_sc7180_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /* 360MHz Configuration */
+ gpu_cc_pll_config.l = 0x12;
+ gpu_cc_pll_config.alpha = 0xc000;
+ gpu_cc_pll_config.config_ctl_val = 0x20485699;
+ gpu_cc_pll_config.config_ctl_hi_val = 0x00002067;
+ gpu_cc_pll_config.user_ctl_val = 0x00000001;
+ gpu_cc_pll_config.user_ctl_hi_val = 0x00004805;
+ gpu_cc_pll_config.test_ctl_hi_val = 0x40000000;
+
+ clk_fabia_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll_config);
+
+ /* Recommended WAKEUP/SLEEP settings for the gpu_cc_cx_gmu_clk */
+ mask = CX_GMU_CBCR_WAKE_MASK << CX_GMU_CBCR_WAKE_SHIFT;
+ mask |= CX_GMU_CBCR_SLEEP_MASK << CX_GMU_CBCR_SLEEP_SHIFT;
+ value = 0xF << CX_GMU_CBCR_WAKE_SHIFT | 0xF << CX_GMU_CBCR_SLEEP_SHIFT;
+ regmap_update_bits(regmap, 0x1098, mask, value);
+
+ /* Configure clk_dis_wait for gpu_cx_gdsc */
+ regmap_update_bits(regmap, 0x106c, CLK_DIS_WAIT_MASK,
+ 8 << CLK_DIS_WAIT_SHIFT);
+
+ return qcom_cc_really_probe(pdev, &gpu_cc_sc7180_desc, regmap);
+}
+
+static struct platform_driver gpu_cc_sc7180_driver = {
+ .probe = gpu_cc_sc7180_probe,
+ .driver = {
+ .name = "sc7180-gpucc",
+ .of_match_table = gpu_cc_sc7180_match_table,
+ },
+};
+
+static int __init gpu_cc_sc7180_init(void)
+{
+ return platform_driver_register(&gpu_cc_sc7180_driver);
+}
+subsys_initcall(gpu_cc_sc7180_init);
+
+static void __exit gpu_cc_sc7180_exit(void)
+{
+ platform_driver_unregister(&gpu_cc_sc7180_driver);
+}
+module_exit(gpu_cc_sc7180_exit);
+
+MODULE_DESCRIPTION("QTI GPU_CC SC7180 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/hfpll.c b/drivers/clk/qcom/hfpll.c
index a6de7101430c..5ff7f5a60620 100644
--- a/drivers/clk/qcom/hfpll.c
+++ b/drivers/clk/qcom/hfpll.c
@@ -53,10 +53,18 @@ static int qcom_hfpll_probe(struct platform_device *pdev)
struct regmap *regmap;
struct clk_hfpll *h;
struct clk_init_data init = {
- .parent_names = (const char *[]){ "xo" },
.num_parents = 1,
.ops = &clk_ops_hfpll,
+ /*
+ * rather than marking the clock critical and forcing the clock
+ * to be always enabled, we make sure that the clock is not
+ * disabled: the firmware remains responsible of enabling this
+ * clock (for more info check the commit log)
+ */
+ .flags = CLK_IGNORE_UNUSED,
};
+ int ret;
+ struct clk_parent_data pdata = { .index = 0 };
h = devm_kzalloc(dev, sizeof(*h), GFP_KERNEL);
if (!h)
@@ -75,11 +83,20 @@ static int qcom_hfpll_probe(struct platform_device *pdev)
0, &init.name))
return -ENODEV;
+ init.parent_data = &pdata;
+
h->d = &hdata;
h->clkr.hw.init = &init;
spin_lock_init(&h->lock);
- return devm_clk_register_regmap(&pdev->dev, &h->clkr);
+ ret = devm_clk_register_regmap(dev, &h->clkr);
+ if (ret) {
+ dev_err(dev, "failed to register regmap clock: %d\n", ret);
+ return ret;
+ }
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+ &h->clkr.hw);
}
static struct platform_driver qcom_hfpll_driver = {
diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
index bcb0a397ef91..015426262d08 100644
--- a/drivers/clk/qcom/mmcc-msm8974.c
+++ b/drivers/clk/qcom/mmcc-msm8974.c
@@ -452,18 +452,6 @@ static struct clk_rcg2 mdp_clk_src = {
},
};
-static struct clk_rcg2 gfx3d_clk_src = {
- .cmd_rcgr = 0x4000,
- .hid_width = 5,
- .parent_map = mmcc_xo_mmpll0_1_2_gpll0_map,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gfx3d_clk_src",
- .parent_names = mmcc_xo_mmpll0_1_2_gpll0,
- .num_parents = 5,
- .ops = &clk_rcg2_ops,
- },
-};
-
static struct freq_tbl ftbl_camss_jpeg_jpeg0_2_clk[] = {
F(75000000, P_GPLL0, 8, 0, 0),
F(133330000, P_GPLL0, 4.5, 0, 0),
@@ -2411,7 +2399,6 @@ static struct clk_regmap *mmcc_msm8974_clocks[] = {
[VFE0_CLK_SRC] = &vfe0_clk_src.clkr,
[VFE1_CLK_SRC] = &vfe1_clk_src.clkr,
[MDP_CLK_SRC] = &mdp_clk_src.clkr,
- [GFX3D_CLK_SRC] = &gfx3d_clk_src.clkr,
[JPEG0_CLK_SRC] = &jpeg0_clk_src.clkr,
[JPEG1_CLK_SRC] = &jpeg1_clk_src.clkr,
[JPEG2_CLK_SRC] = &jpeg2_clk_src.clkr,
diff --git a/drivers/clk/qcom/mmcc-msm8998.c b/drivers/clk/qcom/mmcc-msm8998.c
new file mode 100644
index 000000000000..dd68983fe22e
--- /dev/null
+++ b/drivers/clk/qcom/mmcc-msm8998.c
@@ -0,0 +1,2913 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,mmcc-msm8998.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-alpha-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+#include "gdsc.h"
+
+enum {
+ P_XO,
+ P_GPLL0,
+ P_GPLL0_DIV,
+ P_MMPLL0_OUT_EVEN,
+ P_MMPLL1_OUT_EVEN,
+ P_MMPLL3_OUT_EVEN,
+ P_MMPLL4_OUT_EVEN,
+ P_MMPLL5_OUT_EVEN,
+ P_MMPLL6_OUT_EVEN,
+ P_MMPLL7_OUT_EVEN,
+ P_MMPLL10_OUT_EVEN,
+ P_DSI0PLL,
+ P_DSI1PLL,
+ P_DSI0PLL_BYTE,
+ P_DSI1PLL_BYTE,
+ P_HDMIPLL,
+ P_DPVCO,
+ P_DPLINK,
+ P_CORE_BI_PLL_TEST_SE,
+};
+
+static struct clk_fixed_factor gpll0_div = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "mmss_gpll0_div",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "gpll0",
+ .name = "gpll0"
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_fabia_even[] = {
+ { 0x0, 1 },
+ { 0x1, 2 },
+ { 0x3, 4 },
+ { 0x7, 8 },
+ { }
+};
+
+static struct clk_alpha_pll mmpll0 = {
+ .offset = 0xc000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .enable_reg = 0x1e0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmpll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ .name = "xo"
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv mmpll0_out_even = {
+ .offset = 0xc000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_fabia_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll0_out_even",
+ .parent_hws = (const struct clk_hw *[]){ &mmpll0.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+};
+
+static struct clk_alpha_pll mmpll1 = {
+ .offset = 0xc050,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .enable_reg = 0x1e0,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmpll1",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ .name = "xo"
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv mmpll1_out_even = {
+ .offset = 0xc050,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_fabia_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll1_out_even",
+ .parent_hws = (const struct clk_hw *[]){ &mmpll1.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+};
+
+static struct clk_alpha_pll mmpll3 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll3",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ .name = "xo"
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv mmpll3_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_fabia_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll3_out_even",
+ .parent_hws = (const struct clk_hw *[]){ &mmpll3.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+};
+
+static struct clk_alpha_pll mmpll4 = {
+ .offset = 0x50,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll4",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ .name = "xo"
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv mmpll4_out_even = {
+ .offset = 0x50,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_fabia_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll4_out_even",
+ .parent_hws = (const struct clk_hw *[]){ &mmpll4.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+};
+
+static struct clk_alpha_pll mmpll5 = {
+ .offset = 0xa0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll5",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ .name = "xo"
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv mmpll5_out_even = {
+ .offset = 0xa0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_fabia_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll5_out_even",
+ .parent_hws = (const struct clk_hw *[]){ &mmpll5.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+};
+
+static struct clk_alpha_pll mmpll6 = {
+ .offset = 0xf0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll6",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ .name = "xo"
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv mmpll6_out_even = {
+ .offset = 0xf0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_fabia_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll6_out_even",
+ .parent_hws = (const struct clk_hw *[]){ &mmpll6.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+};
+
+static struct clk_alpha_pll mmpll7 = {
+ .offset = 0x140,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll7",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ .name = "xo"
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv mmpll7_out_even = {
+ .offset = 0x140,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_fabia_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll7_out_even",
+ .parent_hws = (const struct clk_hw *[]){ &mmpll7.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+};
+
+static struct clk_alpha_pll mmpll10 = {
+ .offset = 0x190,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll10",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ .name = "xo"
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv mmpll10_out_even = {
+ .offset = 0x190,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_fabia_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll10_out_even",
+ .parent_hws = (const struct clk_hw *[]){ &mmpll10.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+};
+
+static const struct parent_map mmss_xo_hdmi_map[] = {
+ { P_XO, 0 },
+ { P_HDMIPLL, 1 },
+ { P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_hdmi[] = {
+ { .fw_name = "xo", .name = "xo" },
+ { .fw_name = "hdmipll", .name = "hdmipll" },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map mmss_xo_dsi0pll_dsi1pll_map[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL, 1 },
+ { P_DSI1PLL, 2 },
+ { P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_dsi0pll_dsi1pll[] = {
+ { .fw_name = "xo", .name = "xo" },
+ { .fw_name = "dsi0dsi", .name = "dsi0dsi" },
+ { .fw_name = "dsi1dsi", .name = "dsi1dsi" },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map mmss_xo_dsibyte_map[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL_BYTE, 1 },
+ { P_DSI1PLL_BYTE, 2 },
+ { P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_dsibyte[] = {
+ { .fw_name = "xo", .name = "xo" },
+ { .fw_name = "dsi0byte", .name = "dsi0byte" },
+ { .fw_name = "dsi1byte", .name = "dsi1byte" },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map mmss_xo_dp_map[] = {
+ { P_XO, 0 },
+ { P_DPLINK, 1 },
+ { P_DPVCO, 2 },
+ { P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_dp[] = {
+ { .fw_name = "xo", .name = "xo" },
+ { .fw_name = "dplink", .name = "dplink" },
+ { .fw_name = "dpvco", .name = "dpvco" },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map mmss_xo_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_gpll0_gpll0_div[] = {
+ { .fw_name = "xo", .name = "xo" },
+ { .fw_name = "gpll0", .name = "gpll0" },
+ { .hw = &gpll0_div.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map mmss_xo_mmpll0_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0_OUT_EVEN, 1 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_gpll0_gpll0_div[] = {
+ { .fw_name = "xo", .name = "xo" },
+ { .hw = &mmpll0_out_even.clkr.hw },
+ { .fw_name = "gpll0", .name = "gpll0" },
+ { .hw = &gpll0_div.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0_OUT_EVEN, 1 },
+ { P_MMPLL1_OUT_EVEN, 2 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div[] = {
+ { .fw_name = "xo", .name = "xo" },
+ { .hw = &mmpll0_out_even.clkr.hw },
+ { .hw = &mmpll1_out_even.clkr.hw },
+ { .fw_name = "gpll0", .name = "gpll0" },
+ { .hw = &gpll0_div.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0_OUT_EVEN, 1 },
+ { P_MMPLL5_OUT_EVEN, 2 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div[] = {
+ { .fw_name = "xo", .name = "xo" },
+ { .hw = &mmpll0_out_even.clkr.hw },
+ { .hw = &mmpll5_out_even.clkr.hw },
+ { .fw_name = "gpll0", .name = "gpll0" },
+ { .hw = &gpll0_div.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0_OUT_EVEN, 1 },
+ { P_MMPLL3_OUT_EVEN, 3 },
+ { P_MMPLL6_OUT_EVEN, 4 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div[] = {
+ { .fw_name = "xo", .name = "xo" },
+ { .hw = &mmpll0_out_even.clkr.hw },
+ { .hw = &mmpll3_out_even.clkr.hw },
+ { .hw = &mmpll6_out_even.clkr.hw },
+ { .fw_name = "gpll0", .name = "gpll0" },
+ { .hw = &gpll0_div.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL4_OUT_EVEN, 1 },
+ { P_MMPLL7_OUT_EVEN, 2 },
+ { P_MMPLL10_OUT_EVEN, 3 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div[] = {
+ { .fw_name = "xo", .name = "xo" },
+ { .hw = &mmpll4_out_even.clkr.hw },
+ { .hw = &mmpll7_out_even.clkr.hw },
+ { .hw = &mmpll10_out_even.clkr.hw },
+ { .fw_name = "gpll0", .name = "gpll0" },
+ { .hw = &gpll0_div.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll7_mmpll10_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0_OUT_EVEN, 1 },
+ { P_MMPLL7_OUT_EVEN, 2 },
+ { P_MMPLL10_OUT_EVEN, 3 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll7_mmpll10_gpll0_gpll0_div[] = {
+ { .fw_name = "xo", .name = "xo" },
+ { .hw = &mmpll0_out_even.clkr.hw },
+ { .hw = &mmpll7_out_even.clkr.hw },
+ { .hw = &mmpll10_out_even.clkr.hw },
+ { .fw_name = "gpll0", .name = "gpll0" },
+ { .hw = &gpll0_div.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0_OUT_EVEN, 1 },
+ { P_MMPLL4_OUT_EVEN, 2 },
+ { P_MMPLL7_OUT_EVEN, 3 },
+ { P_MMPLL10_OUT_EVEN, 4 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div[] = {
+ { .fw_name = "xo", .name = "xo" },
+ { .hw = &mmpll0_out_even.clkr.hw },
+ { .hw = &mmpll4_out_even.clkr.hw },
+ { .hw = &mmpll7_out_even.clkr.hw },
+ { .hw = &mmpll10_out_even.clkr.hw },
+ { .fw_name = "gpll0", .name = "gpll0" },
+ { .hw = &gpll0_div.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static struct clk_rcg2 byte0_clk_src = {
+ .cmd_rcgr = 0x2120,
+ .hid_width = 5,
+ .parent_map = mmss_xo_dsibyte_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "byte0_clk_src",
+ .parent_data = mmss_xo_dsibyte,
+ .num_parents = 4,
+ .ops = &clk_byte2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_rcg2 byte1_clk_src = {
+ .cmd_rcgr = 0x2140,
+ .hid_width = 5,
+ .parent_map = mmss_xo_dsibyte_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "byte1_clk_src",
+ .parent_data = mmss_xo_dsibyte,
+ .num_parents = 4,
+ .ops = &clk_byte2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct freq_tbl ftbl_cci_clk_src[] = {
+ F(37500000, P_GPLL0, 16, 0, 0),
+ F(50000000, P_GPLL0, 12, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cci_clk_src = {
+ .cmd_rcgr = 0x3300,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll7_mmpll10_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_cci_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cci_clk_src",
+ .parent_data = mmss_xo_mmpll0_mmpll7_mmpll10_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cpp_clk_src[] = {
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(384000000, P_MMPLL4_OUT_EVEN, 2, 0, 0),
+ F(404000000, P_MMPLL0_OUT_EVEN, 2, 0, 0),
+ F(480000000, P_MMPLL7_OUT_EVEN, 2, 0, 0),
+ F(576000000, P_MMPLL10_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_GPLL0, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cpp_clk_src = {
+ .cmd_rcgr = 0x3640,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_cpp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cpp_clk_src",
+ .parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+ .num_parents = 8,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_csi_clk_src[] = {
+ F(164571429, P_MMPLL10_OUT_EVEN, 3.5, 0, 0),
+ F(256000000, P_MMPLL4_OUT_EVEN, 3, 0, 0),
+ F(274290000, P_MMPLL7_OUT_EVEN, 3.5, 0, 0),
+ F(300000000, P_GPLL0, 2, 0, 0),
+ F(384000000, P_MMPLL4_OUT_EVEN, 2, 0, 0),
+ F(576000000, P_MMPLL10_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi0_clk_src = {
+ .cmd_rcgr = 0x3090,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_csi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi0_clk_src",
+ .parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+ .num_parents = 8,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi1_clk_src = {
+ .cmd_rcgr = 0x3100,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_csi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi1_clk_src",
+ .parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+ .num_parents = 8,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi2_clk_src = {
+ .cmd_rcgr = 0x3160,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_csi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi2_clk_src",
+ .parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+ .num_parents = 8,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi3_clk_src = {
+ .cmd_rcgr = 0x31c0,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_csi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi3_clk_src",
+ .parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+ .num_parents = 8,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_csiphy_clk_src[] = {
+ F(164571429, P_MMPLL10_OUT_EVEN, 3.5, 0, 0),
+ F(256000000, P_MMPLL4_OUT_EVEN, 3, 0, 0),
+ F(274290000, P_MMPLL7_OUT_EVEN, 3.5, 0, 0),
+ F(300000000, P_GPLL0, 2, 0, 0),
+ F(384000000, P_MMPLL4_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csiphy_clk_src = {
+ .cmd_rcgr = 0x3800,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_csiphy_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csiphy_clk_src",
+ .parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+ .num_parents = 8,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_csiphytimer_clk_src[] = {
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(269333333, P_MMPLL0_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x3000,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_csiphytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi0phytimer_clk_src",
+ .parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+ .num_parents = 8,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x3030,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_csiphytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi1phytimer_clk_src",
+ .parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+ .num_parents = 8,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x3060,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_csiphytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi2phytimer_clk_src",
+ .parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+ .num_parents = 8,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_dp_aux_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 dp_aux_clk_src = {
+ .cmd_rcgr = 0x2260,
+ .hid_width = 5,
+ .parent_map = mmss_xo_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_dp_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "dp_aux_clk_src",
+ .parent_data = mmss_xo_gpll0_gpll0_div,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_dp_crypto_clk_src[] = {
+ F(101250, P_DPLINK, 1, 5, 16),
+ F(168750, P_DPLINK, 1, 5, 16),
+ F(337500, P_DPLINK, 1, 5, 16),
+ { }
+};
+
+static struct clk_rcg2 dp_crypto_clk_src = {
+ .cmd_rcgr = 0x2220,
+ .hid_width = 5,
+ .parent_map = mmss_xo_dp_map,
+ .freq_tbl = ftbl_dp_crypto_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "dp_crypto_clk_src",
+ .parent_data = mmss_xo_dp,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_dp_link_clk_src[] = {
+ F(162000, P_DPLINK, 2, 0, 0),
+ F(270000, P_DPLINK, 2, 0, 0),
+ F(540000, P_DPLINK, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 dp_link_clk_src = {
+ .cmd_rcgr = 0x2200,
+ .hid_width = 5,
+ .parent_map = mmss_xo_dp_map,
+ .freq_tbl = ftbl_dp_link_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "dp_link_clk_src",
+ .parent_data = mmss_xo_dp,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_dp_pixel_clk_src[] = {
+ F(154000000, P_DPVCO, 1, 0, 0),
+ F(337500000, P_DPVCO, 2, 0, 0),
+ F(675000000, P_DPVCO, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 dp_pixel_clk_src = {
+ .cmd_rcgr = 0x2240,
+ .hid_width = 5,
+ .parent_map = mmss_xo_dp_map,
+ .freq_tbl = ftbl_dp_pixel_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "dp_pixel_clk_src",
+ .parent_data = mmss_xo_dp,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_esc_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 esc0_clk_src = {
+ .cmd_rcgr = 0x2160,
+ .hid_width = 5,
+ .parent_map = mmss_xo_dsibyte_map,
+ .freq_tbl = ftbl_esc_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "esc0_clk_src",
+ .parent_data = mmss_xo_dsibyte,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 esc1_clk_src = {
+ .cmd_rcgr = 0x2180,
+ .hid_width = 5,
+ .parent_map = mmss_xo_dsibyte_map,
+ .freq_tbl = ftbl_esc_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "esc1_clk_src",
+ .parent_data = mmss_xo_dsibyte,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_extpclk_clk_src[] = {
+ { .src = P_HDMIPLL },
+ { }
+};
+
+static struct clk_rcg2 extpclk_clk_src = {
+ .cmd_rcgr = 0x2060,
+ .hid_width = 5,
+ .parent_map = mmss_xo_hdmi_map,
+ .freq_tbl = ftbl_extpclk_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "extpclk_clk_src",
+ .parent_data = mmss_xo_hdmi,
+ .num_parents = 3,
+ .ops = &clk_byte_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct freq_tbl ftbl_fd_core_clk_src[] = {
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(404000000, P_MMPLL0_OUT_EVEN, 2, 0, 0),
+ F(480000000, P_MMPLL7_OUT_EVEN, 2, 0, 0),
+ F(576000000, P_MMPLL10_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 fd_core_clk_src = {
+ .cmd_rcgr = 0x3b00,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_fd_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "fd_core_clk_src",
+ .parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+ .num_parents = 8,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_hdmi_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 hdmi_clk_src = {
+ .cmd_rcgr = 0x2100,
+ .hid_width = 5,
+ .parent_map = mmss_xo_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_hdmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "hdmi_clk_src",
+ .parent_data = mmss_xo_gpll0_gpll0_div,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_jpeg0_clk_src[] = {
+ F(75000000, P_GPLL0, 8, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(320000000, P_MMPLL7_OUT_EVEN, 3, 0, 0),
+ F(480000000, P_MMPLL7_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 jpeg0_clk_src = {
+ .cmd_rcgr = 0x3500,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_jpeg0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "jpeg0_clk_src",
+ .parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+ .num_parents = 8,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_maxi_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(75000000, P_GPLL0_DIV, 4, 0, 0),
+ F(171428571, P_GPLL0, 3.5, 0, 0),
+ F(323200000, P_MMPLL0_OUT_EVEN, 2.5, 0, 0),
+ F(406000000, P_MMPLL1_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 maxi_clk_src = {
+ .cmd_rcgr = 0xf020,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_maxi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "maxi_clk_src",
+ .parent_data = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_mclk_clk_src[] = {
+ F(4800000, P_XO, 4, 0, 0),
+ F(6000000, P_GPLL0_DIV, 10, 1, 5),
+ F(8000000, P_GPLL0_DIV, 1, 2, 75),
+ F(9600000, P_XO, 2, 0, 0),
+ F(16666667, P_GPLL0_DIV, 2, 1, 9),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0_DIV, 1, 2, 25),
+ F(33333333, P_GPLL0_DIV, 1, 2, 9),
+ F(48000000, P_GPLL0, 1, 2, 25),
+ F(66666667, P_GPLL0, 1, 2, 9),
+ { }
+};
+
+static struct clk_rcg2 mclk0_clk_src = {
+ .cmd_rcgr = 0x3360,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_mclk_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk0_clk_src",
+ .parent_data = mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 mclk1_clk_src = {
+ .cmd_rcgr = 0x3390,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_mclk_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk1_clk_src",
+ .parent_data = mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 mclk2_clk_src = {
+ .cmd_rcgr = 0x33c0,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_mclk_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk2_clk_src",
+ .parent_data = mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 mclk3_clk_src = {
+ .cmd_rcgr = 0x33f0,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_mclk_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk3_clk_src",
+ .parent_data = mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_mdp_clk_src[] = {
+ F(85714286, P_GPLL0, 7, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(171428571, P_GPLL0, 3.5, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(275000000, P_MMPLL5_OUT_EVEN, 3, 0, 0),
+ F(300000000, P_GPLL0, 2, 0, 0),
+ F(330000000, P_MMPLL5_OUT_EVEN, 2.5, 0, 0),
+ F(412500000, P_MMPLL5_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mdp_clk_src = {
+ .cmd_rcgr = 0x2040,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_mdp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mdp_clk_src",
+ .parent_data = mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_vsync_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vsync_clk_src = {
+ .cmd_rcgr = 0x2080,
+ .hid_width = 5,
+ .parent_map = mmss_xo_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_vsync_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vsync_clk_src",
+ .parent_data = mmss_xo_gpll0_gpll0_div,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_ahb_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(40000000, P_GPLL0, 15, 0, 0),
+ F(80800000, P_MMPLL0_OUT_EVEN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ahb_clk_src = {
+ .cmd_rcgr = 0x5000,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ahb_clk_src",
+ .parent_data = mmss_xo_mmpll0_gpll0_gpll0_div,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_axi_clk_src[] = {
+ F(75000000, P_GPLL0, 8, 0, 0),
+ F(171428571, P_GPLL0, 3.5, 0, 0),
+ F(240000000, P_GPLL0, 2.5, 0, 0),
+ F(323200000, P_MMPLL0_OUT_EVEN, 2.5, 0, 0),
+ F(406000000, P_MMPLL0_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+/* RO to linux */
+static struct clk_rcg2 axi_clk_src = {
+ .cmd_rcgr = 0xd000,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "axi_clk_src",
+ .parent_data = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 pclk0_clk_src = {
+ .cmd_rcgr = 0x2000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmss_xo_dsi0pll_dsi1pll_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pclk0_clk_src",
+ .parent_data = mmss_xo_dsi0pll_dsi1pll,
+ .num_parents = 4,
+ .ops = &clk_pixel_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_rcg2 pclk1_clk_src = {
+ .cmd_rcgr = 0x2020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmss_xo_dsi0pll_dsi1pll_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pclk1_clk_src",
+ .parent_data = mmss_xo_dsi0pll_dsi1pll,
+ .num_parents = 4,
+ .ops = &clk_pixel_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct freq_tbl ftbl_rot_clk_src[] = {
+ F(171428571, P_GPLL0, 3.5, 0, 0),
+ F(275000000, P_MMPLL5_OUT_EVEN, 3, 0, 0),
+ F(330000000, P_MMPLL5_OUT_EVEN, 2.5, 0, 0),
+ F(412500000, P_MMPLL5_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 rot_clk_src = {
+ .cmd_rcgr = 0x21a0,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_rot_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "rot_clk_src",
+ .parent_data = mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_video_core_clk_src[] = {
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(269330000, P_MMPLL0_OUT_EVEN, 3, 0, 0),
+ F(355200000, P_MMPLL6_OUT_EVEN, 2.5, 0, 0),
+ F(444000000, P_MMPLL6_OUT_EVEN, 2, 0, 0),
+ F(533000000, P_MMPLL3_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_core_clk_src = {
+ .cmd_rcgr = 0x1000,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_video_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "video_core_clk_src",
+ .parent_data = mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 video_subcore0_clk_src = {
+ .cmd_rcgr = 0x1060,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_video_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "video_subcore0_clk_src",
+ .parent_data = mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 video_subcore1_clk_src = {
+ .cmd_rcgr = 0x1080,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_video_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "video_subcore1_clk_src",
+ .parent_data = mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_vfe_clk_src[] = {
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(300000000, P_GPLL0, 2, 0, 0),
+ F(320000000, P_MMPLL7_OUT_EVEN, 3, 0, 0),
+ F(384000000, P_MMPLL4_OUT_EVEN, 2, 0, 0),
+ F(404000000, P_MMPLL0_OUT_EVEN, 2, 0, 0),
+ F(480000000, P_MMPLL7_OUT_EVEN, 2, 0, 0),
+ F(576000000, P_MMPLL10_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_GPLL0, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vfe0_clk_src = {
+ .cmd_rcgr = 0x3600,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_vfe_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vfe0_clk_src",
+ .parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+ .num_parents = 8,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 vfe1_clk_src = {
+ .cmd_rcgr = 0x3620,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_vfe_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vfe1_clk_src",
+ .parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+ .num_parents = 8,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch misc_ahb_clk = {
+ .halt_reg = 0x328,
+ .clkr = {
+ .enable_reg = 0x328,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "misc_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch video_core_clk = {
+ .halt_reg = 0x1028,
+ .clkr = {
+ .enable_reg = 0x1028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_core_clk",
+ .parent_hws = (const struct clk_hw *[]){ &video_core_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch video_ahb_clk = {
+ .halt_reg = 0x1030,
+ .clkr = {
+ .enable_reg = 0x1030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch video_axi_clk = {
+ .halt_reg = 0x1034,
+ .clkr = {
+ .enable_reg = 0x1034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &axi_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_maxi_clk = {
+ .halt_reg = 0x1038,
+ .clkr = {
+ .enable_reg = 0x1038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_maxi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &maxi_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch video_subcore0_clk = {
+ .halt_reg = 0x1048,
+ .clkr = {
+ .enable_reg = 0x1048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_subcore0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &video_subcore0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch video_subcore1_clk = {
+ .halt_reg = 0x104c,
+ .clkr = {
+ .enable_reg = 0x104c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_subcore1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &video_subcore1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch mdss_ahb_clk = {
+ .halt_reg = 0x2308,
+ .clkr = {
+ .enable_reg = 0x2308,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch mdss_hdmi_dp_ahb_clk = {
+ .halt_reg = 0x230c,
+ .clkr = {
+ .enable_reg = 0x230c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_hdmi_dp_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch mdss_axi_clk = {
+ .halt_reg = 0x2310,
+ .clkr = {
+ .enable_reg = 0x2310,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &axi_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_pclk0_clk = {
+ .halt_reg = 0x2314,
+ .clkr = {
+ .enable_reg = 0x2314,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &pclk0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch mdss_pclk1_clk = {
+ .halt_reg = 0x2318,
+ .clkr = {
+ .enable_reg = 0x2318,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_pclk1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &pclk1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch mdss_mdp_clk = {
+ .halt_reg = 0x231c,
+ .clkr = {
+ .enable_reg = 0x231c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw *[]){ &mdp_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch mdss_mdp_lut_clk = {
+ .halt_reg = 0x2320,
+ .clkr = {
+ .enable_reg = 0x2320,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_mdp_lut_clk",
+ .parent_hws = (const struct clk_hw *[]){ &mdp_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch mdss_extpclk_clk = {
+ .halt_reg = 0x2324,
+ .clkr = {
+ .enable_reg = 0x2324,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_extpclk_clk",
+ .parent_hws = (const struct clk_hw *[]){ &extpclk_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch mdss_vsync_clk = {
+ .halt_reg = 0x2328,
+ .clkr = {
+ .enable_reg = 0x2328,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw *[]){ &vsync_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch mdss_hdmi_clk = {
+ .halt_reg = 0x2338,
+ .clkr = {
+ .enable_reg = 0x2338,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_hdmi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &hdmi_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch mdss_byte0_clk = {
+ .halt_reg = 0x233c,
+ .clkr = {
+ .enable_reg = 0x233c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &byte0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch mdss_byte1_clk = {
+ .halt_reg = 0x2340,
+ .clkr = {
+ .enable_reg = 0x2340,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_byte1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &byte1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch mdss_esc0_clk = {
+ .halt_reg = 0x2344,
+ .clkr = {
+ .enable_reg = 0x2344,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &esc0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch mdss_esc1_clk = {
+ .halt_reg = 0x2348,
+ .clkr = {
+ .enable_reg = 0x2348,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_esc1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &esc1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch mdss_rot_clk = {
+ .halt_reg = 0x2350,
+ .clkr = {
+ .enable_reg = 0x2350,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_rot_clk",
+ .parent_hws = (const struct clk_hw *[]){ &rot_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch mdss_dp_link_clk = {
+ .halt_reg = 0x2354,
+ .clkr = {
+ .enable_reg = 0x2354,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_dp_link_clk",
+ .parent_hws = (const struct clk_hw *[]){ &dp_link_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch mdss_dp_link_intf_clk = {
+ .halt_reg = 0x2358,
+ .clkr = {
+ .enable_reg = 0x2358,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_dp_link_intf_clk",
+ .parent_hws = (const struct clk_hw *[]){ &dp_link_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch mdss_dp_crypto_clk = {
+ .halt_reg = 0x235c,
+ .clkr = {
+ .enable_reg = 0x235c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_dp_crypto_clk",
+ .parent_hws = (const struct clk_hw *[]){ &dp_crypto_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch mdss_dp_pixel_clk = {
+ .halt_reg = 0x2360,
+ .clkr = {
+ .enable_reg = 0x2360,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_dp_pixel_clk",
+ .parent_hws = (const struct clk_hw *[]){ &dp_pixel_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch mdss_dp_aux_clk = {
+ .halt_reg = 0x2364,
+ .clkr = {
+ .enable_reg = 0x2364,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_dp_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){ &dp_aux_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch mdss_byte0_intf_clk = {
+ .halt_reg = 0x2374,
+ .clkr = {
+ .enable_reg = 0x2374,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_byte0_intf_clk",
+ .parent_hws = (const struct clk_hw *[]){ &byte0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch mdss_byte1_intf_clk = {
+ .halt_reg = 0x2378,
+ .clkr = {
+ .enable_reg = 0x2378,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_byte1_intf_clk",
+ .parent_hws = (const struct clk_hw *[]){ &byte1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0phytimer_clk = {
+ .halt_reg = 0x3024,
+ .clkr = {
+ .enable_reg = 0x3024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0phytimer_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi0phytimer_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1phytimer_clk = {
+ .halt_reg = 0x3054,
+ .clkr = {
+ .enable_reg = 0x3054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1phytimer_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi1phytimer_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2phytimer_clk = {
+ .halt_reg = 0x3084,
+ .clkr = {
+ .enable_reg = 0x3084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2phytimer_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi2phytimer_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0_clk = {
+ .halt_reg = 0x30b4,
+ .clkr = {
+ .enable_reg = 0x30b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0_ahb_clk = {
+ .halt_reg = 0x30bc,
+ .clkr = {
+ .enable_reg = 0x30bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0rdi_clk = {
+ .halt_reg = 0x30d4,
+ .clkr = {
+ .enable_reg = 0x30d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0rdi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0pix_clk = {
+ .halt_reg = 0x30e4,
+ .clkr = {
+ .enable_reg = 0x30e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0pix_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1_clk = {
+ .halt_reg = 0x3124,
+ .clkr = {
+ .enable_reg = 0x3124,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1_ahb_clk = {
+ .halt_reg = 0x3128,
+ .clkr = {
+ .enable_reg = 0x3128,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1rdi_clk = {
+ .halt_reg = 0x3144,
+ .clkr = {
+ .enable_reg = 0x3144,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1rdi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1pix_clk = {
+ .halt_reg = 0x3154,
+ .clkr = {
+ .enable_reg = 0x3154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1pix_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2_clk = {
+ .halt_reg = 0x3184,
+ .clkr = {
+ .enable_reg = 0x3184,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi2_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2_ahb_clk = {
+ .halt_reg = 0x3188,
+ .clkr = {
+ .enable_reg = 0x3188,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2rdi_clk = {
+ .halt_reg = 0x31a4,
+ .clkr = {
+ .enable_reg = 0x31a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2rdi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi2_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2pix_clk = {
+ .halt_reg = 0x31b4,
+ .clkr = {
+ .enable_reg = 0x31b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2pix_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi2_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_csi3_clk = {
+ .halt_reg = 0x31e4,
+ .clkr = {
+ .enable_reg = 0x31e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi3_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi3_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_csi3_ahb_clk = {
+ .halt_reg = 0x31e8,
+ .clkr = {
+ .enable_reg = 0x31e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi3_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_csi3rdi_clk = {
+ .halt_reg = 0x3204,
+ .clkr = {
+ .enable_reg = 0x3204,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi3rdi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi3_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_csi3pix_clk = {
+ .halt_reg = 0x3214,
+ .clkr = {
+ .enable_reg = 0x3214,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi3pix_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi3_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_ispif_ahb_clk = {
+ .halt_reg = 0x3224,
+ .clkr = {
+ .enable_reg = 0x3224,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_ispif_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_cci_clk = {
+ .halt_reg = 0x3344,
+ .clkr = {
+ .enable_reg = 0x3344,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cci_clk",
+ .parent_hws = (const struct clk_hw *[]){ &cci_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_cci_ahb_clk = {
+ .halt_reg = 0x3348,
+ .clkr = {
+ .enable_reg = 0x3348,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cci_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_mclk0_clk = {
+ .halt_reg = 0x3384,
+ .clkr = {
+ .enable_reg = 0x3384,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_mclk0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &mclk0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_mclk1_clk = {
+ .halt_reg = 0x33b4,
+ .clkr = {
+ .enable_reg = 0x33b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_mclk1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &mclk1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_mclk2_clk = {
+ .halt_reg = 0x33e4,
+ .clkr = {
+ .enable_reg = 0x33e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_mclk2_clk",
+ .parent_hws = (const struct clk_hw *[]){ &mclk2_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_mclk3_clk = {
+ .halt_reg = 0x3414,
+ .clkr = {
+ .enable_reg = 0x3414,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_mclk3_clk",
+ .parent_hws = (const struct clk_hw *[]){ &mclk3_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_top_ahb_clk = {
+ .halt_reg = 0x3484,
+ .clkr = {
+ .enable_reg = 0x3484,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_top_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_ahb_clk = {
+ .halt_reg = 0x348c,
+ .clkr = {
+ .enable_reg = 0x348c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_micro_ahb_clk = {
+ .halt_reg = 0x3494,
+ .clkr = {
+ .enable_reg = 0x3494,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_micro_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_jpeg0_clk = {
+ .halt_reg = 0x35a8,
+ .clkr = {
+ .enable_reg = 0x35a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_jpeg0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &jpeg0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_jpeg_ahb_clk = {
+ .halt_reg = 0x35b4,
+ .clkr = {
+ .enable_reg = 0x35b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_jpeg_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_jpeg_axi_clk = {
+ .halt_reg = 0x35b8,
+ .clkr = {
+ .enable_reg = 0x35b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_jpeg_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &axi_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe0_ahb_clk = {
+ .halt_reg = 0x3668,
+ .clkr = {
+ .enable_reg = 0x3668,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe0_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe1_ahb_clk = {
+ .halt_reg = 0x3678,
+ .clkr = {
+ .enable_reg = 0x3678,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe1_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe0_clk = {
+ .halt_reg = 0x36a8,
+ .clkr = {
+ .enable_reg = 0x36a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &vfe0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe1_clk = {
+ .halt_reg = 0x36ac,
+ .clkr = {
+ .enable_reg = 0x36ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &vfe1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_cpp_clk = {
+ .halt_reg = 0x36b0,
+ .clkr = {
+ .enable_reg = 0x36b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cpp_clk",
+ .parent_hws = (const struct clk_hw *[]){ &cpp_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_cpp_ahb_clk = {
+ .halt_reg = 0x36b4,
+ .clkr = {
+ .enable_reg = 0x36b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cpp_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe_vbif_ahb_clk = {
+ .halt_reg = 0x36b8,
+ .clkr = {
+ .enable_reg = 0x36b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe_vbif_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe_vbif_axi_clk = {
+ .halt_reg = 0x36bc,
+ .clkr = {
+ .enable_reg = 0x36bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe_vbif_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &axi_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_cpp_axi_clk = {
+ .halt_reg = 0x36c4,
+ .clkr = {
+ .enable_reg = 0x36c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cpp_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &axi_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_cpp_vbif_ahb_clk = {
+ .halt_reg = 0x36c8,
+ .clkr = {
+ .enable_reg = 0x36c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cpp_vbif_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_csi_vfe0_clk = {
+ .halt_reg = 0x3704,
+ .clkr = {
+ .enable_reg = 0x3704,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi_vfe0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &vfe0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_csi_vfe1_clk = {
+ .halt_reg = 0x3714,
+ .clkr = {
+ .enable_reg = 0x3714,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi_vfe1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &vfe1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe0_stream_clk = {
+ .halt_reg = 0x3720,
+ .clkr = {
+ .enable_reg = 0x3720,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe0_stream_clk",
+ .parent_hws = (const struct clk_hw *[]){ &vfe0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe1_stream_clk = {
+ .halt_reg = 0x3724,
+ .clkr = {
+ .enable_reg = 0x3724,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe1_stream_clk",
+ .parent_hws = (const struct clk_hw *[]){ &vfe1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_cphy_csid0_clk = {
+ .halt_reg = 0x3730,
+ .clkr = {
+ .enable_reg = 0x3730,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cphy_csid0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csiphy_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_cphy_csid1_clk = {
+ .halt_reg = 0x3734,
+ .clkr = {
+ .enable_reg = 0x3734,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cphy_csid1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csiphy_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_cphy_csid2_clk = {
+ .halt_reg = 0x3738,
+ .clkr = {
+ .enable_reg = 0x3738,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cphy_csid2_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csiphy_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_cphy_csid3_clk = {
+ .halt_reg = 0x373c,
+ .clkr = {
+ .enable_reg = 0x373c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cphy_csid3_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csiphy_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_csiphy0_clk = {
+ .halt_reg = 0x3740,
+ .clkr = {
+ .enable_reg = 0x3740,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csiphy0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csiphy_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_csiphy1_clk = {
+ .halt_reg = 0x3744,
+ .clkr = {
+ .enable_reg = 0x3744,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csiphy1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csiphy_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch camss_csiphy2_clk = {
+ .halt_reg = 0x3748,
+ .clkr = {
+ .enable_reg = 0x3748,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csiphy2_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csiphy_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch fd_core_clk = {
+ .halt_reg = 0x3b68,
+ .clkr = {
+ .enable_reg = 0x3b68,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "fd_core_clk",
+ .parent_hws = (const struct clk_hw *[]){ &fd_core_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch fd_core_uar_clk = {
+ .halt_reg = 0x3b6c,
+ .clkr = {
+ .enable_reg = 0x3b6c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "fd_core_uar_clk",
+ .parent_hws = (const struct clk_hw *[]){ &fd_core_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch fd_ahb_clk = {
+ .halt_reg = 0x3b74,
+ .clkr = {
+ .enable_reg = 0x3b74,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "fd_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch mnoc_ahb_clk = {
+ .halt_reg = 0x5024,
+ .clkr = {
+ .enable_reg = 0x5024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mnoc_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch bimc_smmu_ahb_clk = {
+ .halt_reg = 0xe004,
+ .clkr = {
+ .enable_reg = 0xe004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "bimc_smmu_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch bimc_smmu_axi_clk = {
+ .halt_reg = 0xe008,
+ .clkr = {
+ .enable_reg = 0xe008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "bimc_smmu_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &axi_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mnoc_maxi_clk = {
+ .halt_reg = 0xf004,
+ .clkr = {
+ .enable_reg = 0xf004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mnoc_maxi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &maxi_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch vmem_maxi_clk = {
+ .halt_reg = 0xf064,
+ .clkr = {
+ .enable_reg = 0xf064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "vmem_maxi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &maxi_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch vmem_ahb_clk = {
+ .halt_reg = 0xf068,
+ .clkr = {
+ .enable_reg = 0xf068,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "vmem_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_hw *mmcc_msm8998_hws[] = {
+ &gpll0_div.hw,
+};
+
+static struct gdsc video_top_gdsc = {
+ .gdscr = 0x1024,
+ .pd = {
+ .name = "video_top",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc video_subcore0_gdsc = {
+ .gdscr = 0x1040,
+ .pd = {
+ .name = "video_subcore0",
+ },
+ .parent = &video_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc video_subcore1_gdsc = {
+ .gdscr = 0x1044,
+ .pd = {
+ .name = "video_subcore1",
+ },
+ .parent = &video_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x2304,
+ .cxcs = (unsigned int []){ 0x2310, 0x2350, 0x231c, 0x2320 },
+ .cxc_count = 4,
+ .pd = {
+ .name = "mdss",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc camss_top_gdsc = {
+ .gdscr = 0x34a0,
+ .cxcs = (unsigned int []){ 0x35b8, 0x36c4, 0x3704, 0x3714, 0x3494,
+ 0x35a8, 0x3868 },
+ .cxc_count = 7,
+ .pd = {
+ .name = "camss_top",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc camss_vfe0_gdsc = {
+ .gdscr = 0x3664,
+ .pd = {
+ .name = "camss_vfe0",
+ },
+ .parent = &camss_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc camss_vfe1_gdsc = {
+ .gdscr = 0x3674,
+ .pd = {
+ .name = "camss_vfe1_gdsc",
+ },
+ .parent = &camss_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc camss_cpp_gdsc = {
+ .gdscr = 0x36d4,
+ .pd = {
+ .name = "camss_cpp",
+ },
+ .parent = &camss_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc bimc_smmu_gdsc = {
+ .gdscr = 0xe020,
+ .gds_hw_ctrl = 0xe024,
+ .pd = {
+ .name = "bimc_smmu",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL,
+};
+
+static struct clk_regmap *mmcc_msm8998_clocks[] = {
+ [MMPLL0] = &mmpll0.clkr,
+ [MMPLL0_OUT_EVEN] = &mmpll0_out_even.clkr,
+ [MMPLL1] = &mmpll1.clkr,
+ [MMPLL1_OUT_EVEN] = &mmpll1_out_even.clkr,
+ [MMPLL3] = &mmpll3.clkr,
+ [MMPLL3_OUT_EVEN] = &mmpll3_out_even.clkr,
+ [MMPLL4] = &mmpll4.clkr,
+ [MMPLL4_OUT_EVEN] = &mmpll4_out_even.clkr,
+ [MMPLL5] = &mmpll5.clkr,
+ [MMPLL5_OUT_EVEN] = &mmpll5_out_even.clkr,
+ [MMPLL6] = &mmpll6.clkr,
+ [MMPLL6_OUT_EVEN] = &mmpll6_out_even.clkr,
+ [MMPLL7] = &mmpll7.clkr,
+ [MMPLL7_OUT_EVEN] = &mmpll7_out_even.clkr,
+ [MMPLL10] = &mmpll10.clkr,
+ [MMPLL10_OUT_EVEN] = &mmpll10_out_even.clkr,
+ [BYTE0_CLK_SRC] = &byte0_clk_src.clkr,
+ [BYTE1_CLK_SRC] = &byte1_clk_src.clkr,
+ [CCI_CLK_SRC] = &cci_clk_src.clkr,
+ [CPP_CLK_SRC] = &cpp_clk_src.clkr,
+ [CSI0_CLK_SRC] = &csi0_clk_src.clkr,
+ [CSI1_CLK_SRC] = &csi1_clk_src.clkr,
+ [CSI2_CLK_SRC] = &csi2_clk_src.clkr,
+ [CSI3_CLK_SRC] = &csi3_clk_src.clkr,
+ [CSIPHY_CLK_SRC] = &csiphy_clk_src.clkr,
+ [CSI0PHYTIMER_CLK_SRC] = &csi0phytimer_clk_src.clkr,
+ [CSI1PHYTIMER_CLK_SRC] = &csi1phytimer_clk_src.clkr,
+ [CSI2PHYTIMER_CLK_SRC] = &csi2phytimer_clk_src.clkr,
+ [DP_AUX_CLK_SRC] = &dp_aux_clk_src.clkr,
+ [DP_CRYPTO_CLK_SRC] = &dp_crypto_clk_src.clkr,
+ [DP_LINK_CLK_SRC] = &dp_link_clk_src.clkr,
+ [DP_PIXEL_CLK_SRC] = &dp_pixel_clk_src.clkr,
+ [ESC0_CLK_SRC] = &esc0_clk_src.clkr,
+ [ESC1_CLK_SRC] = &esc1_clk_src.clkr,
+ [EXTPCLK_CLK_SRC] = &extpclk_clk_src.clkr,
+ [FD_CORE_CLK_SRC] = &fd_core_clk_src.clkr,
+ [HDMI_CLK_SRC] = &hdmi_clk_src.clkr,
+ [JPEG0_CLK_SRC] = &jpeg0_clk_src.clkr,
+ [MAXI_CLK_SRC] = &maxi_clk_src.clkr,
+ [MCLK0_CLK_SRC] = &mclk0_clk_src.clkr,
+ [MCLK1_CLK_SRC] = &mclk1_clk_src.clkr,
+ [MCLK2_CLK_SRC] = &mclk2_clk_src.clkr,
+ [MCLK3_CLK_SRC] = &mclk3_clk_src.clkr,
+ [MDP_CLK_SRC] = &mdp_clk_src.clkr,
+ [VSYNC_CLK_SRC] = &vsync_clk_src.clkr,
+ [AHB_CLK_SRC] = &ahb_clk_src.clkr,
+ [AXI_CLK_SRC] = &axi_clk_src.clkr,
+ [PCLK0_CLK_SRC] = &pclk0_clk_src.clkr,
+ [PCLK1_CLK_SRC] = &pclk1_clk_src.clkr,
+ [ROT_CLK_SRC] = &rot_clk_src.clkr,
+ [VIDEO_CORE_CLK_SRC] = &video_core_clk_src.clkr,
+ [VIDEO_SUBCORE0_CLK_SRC] = &video_subcore0_clk_src.clkr,
+ [VIDEO_SUBCORE1_CLK_SRC] = &video_subcore1_clk_src.clkr,
+ [VFE0_CLK_SRC] = &vfe0_clk_src.clkr,
+ [VFE1_CLK_SRC] = &vfe1_clk_src.clkr,
+ [MISC_AHB_CLK] = &misc_ahb_clk.clkr,
+ [VIDEO_CORE_CLK] = &video_core_clk.clkr,
+ [VIDEO_AHB_CLK] = &video_ahb_clk.clkr,
+ [VIDEO_AXI_CLK] = &video_axi_clk.clkr,
+ [VIDEO_MAXI_CLK] = &video_maxi_clk.clkr,
+ [VIDEO_SUBCORE0_CLK] = &video_subcore0_clk.clkr,
+ [VIDEO_SUBCORE1_CLK] = &video_subcore1_clk.clkr,
+ [MDSS_AHB_CLK] = &mdss_ahb_clk.clkr,
+ [MDSS_HDMI_DP_AHB_CLK] = &mdss_hdmi_dp_ahb_clk.clkr,
+ [MDSS_AXI_CLK] = &mdss_axi_clk.clkr,
+ [MDSS_PCLK0_CLK] = &mdss_pclk0_clk.clkr,
+ [MDSS_PCLK1_CLK] = &mdss_pclk1_clk.clkr,
+ [MDSS_MDP_CLK] = &mdss_mdp_clk.clkr,
+ [MDSS_MDP_LUT_CLK] = &mdss_mdp_lut_clk.clkr,
+ [MDSS_EXTPCLK_CLK] = &mdss_extpclk_clk.clkr,
+ [MDSS_VSYNC_CLK] = &mdss_vsync_clk.clkr,
+ [MDSS_HDMI_CLK] = &mdss_hdmi_clk.clkr,
+ [MDSS_BYTE0_CLK] = &mdss_byte0_clk.clkr,
+ [MDSS_BYTE1_CLK] = &mdss_byte1_clk.clkr,
+ [MDSS_ESC0_CLK] = &mdss_esc0_clk.clkr,
+ [MDSS_ESC1_CLK] = &mdss_esc1_clk.clkr,
+ [MDSS_ROT_CLK] = &mdss_rot_clk.clkr,
+ [MDSS_DP_LINK_CLK] = &mdss_dp_link_clk.clkr,
+ [MDSS_DP_LINK_INTF_CLK] = &mdss_dp_link_intf_clk.clkr,
+ [MDSS_DP_CRYPTO_CLK] = &mdss_dp_crypto_clk.clkr,
+ [MDSS_DP_PIXEL_CLK] = &mdss_dp_pixel_clk.clkr,
+ [MDSS_DP_AUX_CLK] = &mdss_dp_aux_clk.clkr,
+ [MDSS_BYTE0_INTF_CLK] = &mdss_byte0_intf_clk.clkr,
+ [MDSS_BYTE1_INTF_CLK] = &mdss_byte1_intf_clk.clkr,
+ [CAMSS_CSI0PHYTIMER_CLK] = &camss_csi0phytimer_clk.clkr,
+ [CAMSS_CSI1PHYTIMER_CLK] = &camss_csi1phytimer_clk.clkr,
+ [CAMSS_CSI2PHYTIMER_CLK] = &camss_csi2phytimer_clk.clkr,
+ [CAMSS_CSI0_CLK] = &camss_csi0_clk.clkr,
+ [CAMSS_CSI0_AHB_CLK] = &camss_csi0_ahb_clk.clkr,
+ [CAMSS_CSI0RDI_CLK] = &camss_csi0rdi_clk.clkr,
+ [CAMSS_CSI0PIX_CLK] = &camss_csi0pix_clk.clkr,
+ [CAMSS_CSI1_CLK] = &camss_csi1_clk.clkr,
+ [CAMSS_CSI1_AHB_CLK] = &camss_csi1_ahb_clk.clkr,
+ [CAMSS_CSI1RDI_CLK] = &camss_csi1rdi_clk.clkr,
+ [CAMSS_CSI1PIX_CLK] = &camss_csi1pix_clk.clkr,
+ [CAMSS_CSI2_CLK] = &camss_csi2_clk.clkr,
+ [CAMSS_CSI2_AHB_CLK] = &camss_csi2_ahb_clk.clkr,
+ [CAMSS_CSI2RDI_CLK] = &camss_csi2rdi_clk.clkr,
+ [CAMSS_CSI2PIX_CLK] = &camss_csi2pix_clk.clkr,
+ [CAMSS_CSI3_CLK] = &camss_csi3_clk.clkr,
+ [CAMSS_CSI3_AHB_CLK] = &camss_csi3_ahb_clk.clkr,
+ [CAMSS_CSI3RDI_CLK] = &camss_csi3rdi_clk.clkr,
+ [CAMSS_CSI3PIX_CLK] = &camss_csi3pix_clk.clkr,
+ [CAMSS_ISPIF_AHB_CLK] = &camss_ispif_ahb_clk.clkr,
+ [CAMSS_CCI_CLK] = &camss_cci_clk.clkr,
+ [CAMSS_CCI_AHB_CLK] = &camss_cci_ahb_clk.clkr,
+ [CAMSS_MCLK0_CLK] = &camss_mclk0_clk.clkr,
+ [CAMSS_MCLK1_CLK] = &camss_mclk1_clk.clkr,
+ [CAMSS_MCLK2_CLK] = &camss_mclk2_clk.clkr,
+ [CAMSS_MCLK3_CLK] = &camss_mclk3_clk.clkr,
+ [CAMSS_TOP_AHB_CLK] = &camss_top_ahb_clk.clkr,
+ [CAMSS_AHB_CLK] = &camss_ahb_clk.clkr,
+ [CAMSS_MICRO_AHB_CLK] = &camss_micro_ahb_clk.clkr,
+ [CAMSS_JPEG0_CLK] = &camss_jpeg0_clk.clkr,
+ [CAMSS_JPEG_AHB_CLK] = &camss_jpeg_ahb_clk.clkr,
+ [CAMSS_JPEG_AXI_CLK] = &camss_jpeg_axi_clk.clkr,
+ [CAMSS_VFE0_AHB_CLK] = &camss_vfe0_ahb_clk.clkr,
+ [CAMSS_VFE1_AHB_CLK] = &camss_vfe1_ahb_clk.clkr,
+ [CAMSS_VFE0_CLK] = &camss_vfe0_clk.clkr,
+ [CAMSS_VFE1_CLK] = &camss_vfe1_clk.clkr,
+ [CAMSS_CPP_CLK] = &camss_cpp_clk.clkr,
+ [CAMSS_CPP_AHB_CLK] = &camss_cpp_ahb_clk.clkr,
+ [CAMSS_VFE_VBIF_AHB_CLK] = &camss_vfe_vbif_ahb_clk.clkr,
+ [CAMSS_VFE_VBIF_AXI_CLK] = &camss_vfe_vbif_axi_clk.clkr,
+ [CAMSS_CPP_AXI_CLK] = &camss_cpp_axi_clk.clkr,
+ [CAMSS_CPP_VBIF_AHB_CLK] = &camss_cpp_vbif_ahb_clk.clkr,
+ [CAMSS_CSI_VFE0_CLK] = &camss_csi_vfe0_clk.clkr,
+ [CAMSS_CSI_VFE1_CLK] = &camss_csi_vfe1_clk.clkr,
+ [CAMSS_VFE0_STREAM_CLK] = &camss_vfe0_stream_clk.clkr,
+ [CAMSS_VFE1_STREAM_CLK] = &camss_vfe1_stream_clk.clkr,
+ [CAMSS_CPHY_CSID0_CLK] = &camss_cphy_csid0_clk.clkr,
+ [CAMSS_CPHY_CSID1_CLK] = &camss_cphy_csid1_clk.clkr,
+ [CAMSS_CPHY_CSID2_CLK] = &camss_cphy_csid2_clk.clkr,
+ [CAMSS_CPHY_CSID3_CLK] = &camss_cphy_csid3_clk.clkr,
+ [CAMSS_CSIPHY0_CLK] = &camss_csiphy0_clk.clkr,
+ [CAMSS_CSIPHY1_CLK] = &camss_csiphy1_clk.clkr,
+ [CAMSS_CSIPHY2_CLK] = &camss_csiphy2_clk.clkr,
+ [FD_CORE_CLK] = &fd_core_clk.clkr,
+ [FD_CORE_UAR_CLK] = &fd_core_uar_clk.clkr,
+ [FD_AHB_CLK] = &fd_ahb_clk.clkr,
+ [MNOC_AHB_CLK] = &mnoc_ahb_clk.clkr,
+ [BIMC_SMMU_AHB_CLK] = &bimc_smmu_ahb_clk.clkr,
+ [BIMC_SMMU_AXI_CLK] = &bimc_smmu_axi_clk.clkr,
+ [MNOC_MAXI_CLK] = &mnoc_maxi_clk.clkr,
+ [VMEM_MAXI_CLK] = &vmem_maxi_clk.clkr,
+ [VMEM_AHB_CLK] = &vmem_ahb_clk.clkr,
+};
+
+static struct gdsc *mmcc_msm8998_gdscs[] = {
+ [VIDEO_TOP_GDSC] = &video_top_gdsc,
+ [VIDEO_SUBCORE0_GDSC] = &video_subcore0_gdsc,
+ [VIDEO_SUBCORE1_GDSC] = &video_subcore1_gdsc,
+ [MDSS_GDSC] = &mdss_gdsc,
+ [CAMSS_TOP_GDSC] = &camss_top_gdsc,
+ [CAMSS_VFE0_GDSC] = &camss_vfe0_gdsc,
+ [CAMSS_VFE1_GDSC] = &camss_vfe1_gdsc,
+ [CAMSS_CPP_GDSC] = &camss_cpp_gdsc,
+ [BIMC_SMMU_GDSC] = &bimc_smmu_gdsc,
+};
+
+static const struct qcom_reset_map mmcc_msm8998_resets[] = {
+ [SPDM_BCR] = { 0x200 },
+ [SPDM_RM_BCR] = { 0x300 },
+ [MISC_BCR] = { 0x320 },
+ [VIDEO_TOP_BCR] = { 0x1020 },
+ [THROTTLE_VIDEO_BCR] = { 0x1180 },
+ [MDSS_BCR] = { 0x2300 },
+ [THROTTLE_MDSS_BCR] = { 0x2460 },
+ [CAMSS_PHY0_BCR] = { 0x3020 },
+ [CAMSS_PHY1_BCR] = { 0x3050 },
+ [CAMSS_PHY2_BCR] = { 0x3080 },
+ [CAMSS_CSI0_BCR] = { 0x30b0 },
+ [CAMSS_CSI0RDI_BCR] = { 0x30d0 },
+ [CAMSS_CSI0PIX_BCR] = { 0x30e0 },
+ [CAMSS_CSI1_BCR] = { 0x3120 },
+ [CAMSS_CSI1RDI_BCR] = { 0x3140 },
+ [CAMSS_CSI1PIX_BCR] = { 0x3150 },
+ [CAMSS_CSI2_BCR] = { 0x3180 },
+ [CAMSS_CSI2RDI_BCR] = { 0x31a0 },
+ [CAMSS_CSI2PIX_BCR] = { 0x31b0 },
+ [CAMSS_CSI3_BCR] = { 0x31e0 },
+ [CAMSS_CSI3RDI_BCR] = { 0x3200 },
+ [CAMSS_CSI3PIX_BCR] = { 0x3210 },
+ [CAMSS_ISPIF_BCR] = { 0x3220 },
+ [CAMSS_CCI_BCR] = { 0x3340 },
+ [CAMSS_TOP_BCR] = { 0x3480 },
+ [CAMSS_AHB_BCR] = { 0x3488 },
+ [CAMSS_MICRO_BCR] = { 0x3490 },
+ [CAMSS_JPEG_BCR] = { 0x35a0 },
+ [CAMSS_VFE0_BCR] = { 0x3660 },
+ [CAMSS_VFE1_BCR] = { 0x3670 },
+ [CAMSS_VFE_VBIF_BCR] = { 0x36a0 },
+ [CAMSS_CPP_TOP_BCR] = { 0x36c0 },
+ [CAMSS_CPP_BCR] = { 0x36d0 },
+ [CAMSS_CSI_VFE0_BCR] = { 0x3700 },
+ [CAMSS_CSI_VFE1_BCR] = { 0x3710 },
+ [CAMSS_FD_BCR] = { 0x3b60 },
+ [THROTTLE_CAMSS_BCR] = { 0x3c30 },
+ [MNOCAHB_BCR] = { 0x5020 },
+ [MNOCAXI_BCR] = { 0xd020 },
+ [BMIC_SMMU_BCR] = { 0xe000 },
+ [MNOC_MAXI_BCR] = { 0xf000 },
+ [VMEM_BCR] = { 0xf060 },
+ [BTO_BCR] = { 0x10004 },
+};
+
+static const struct regmap_config mmcc_msm8998_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x10004,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc mmcc_msm8998_desc = {
+ .config = &mmcc_msm8998_regmap_config,
+ .clks = mmcc_msm8998_clocks,
+ .num_clks = ARRAY_SIZE(mmcc_msm8998_clocks),
+ .resets = mmcc_msm8998_resets,
+ .num_resets = ARRAY_SIZE(mmcc_msm8998_resets),
+ .gdscs = mmcc_msm8998_gdscs,
+ .num_gdscs = ARRAY_SIZE(mmcc_msm8998_gdscs),
+ .clk_hws = mmcc_msm8998_hws,
+ .num_clk_hws = ARRAY_SIZE(mmcc_msm8998_hws),
+};
+
+static const struct of_device_id mmcc_msm8998_match_table[] = {
+ { .compatible = "qcom,mmcc-msm8998" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mmcc_msm8998_match_table);
+
+static int mmcc_msm8998_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &mmcc_msm8998_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return qcom_cc_really_probe(pdev, &mmcc_msm8998_desc, regmap);
+}
+
+static struct platform_driver mmcc_msm8998_driver = {
+ .probe = mmcc_msm8998_probe,
+ .driver = {
+ .name = "mmcc-msm8998",
+ .of_match_table = mmcc_msm8998_match_table,
+ },
+};
+module_platform_driver(mmcc_msm8998_driver);
+
+MODULE_DESCRIPTION("QCOM MMCC MSM8998 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/videocc-sc7180.c b/drivers/clk/qcom/videocc-sc7180.c
new file mode 100644
index 000000000000..c363c3cc544e
--- /dev/null
+++ b/drivers/clk/qcom/videocc-sc7180.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,videocc-sc7180.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "gdsc.h"
+
+enum {
+ P_BI_TCXO,
+ P_CHIP_SLEEP_CLK,
+ P_CORE_BI_PLL_TEST_SE,
+ P_VIDEO_PLL0_OUT_EVEN,
+ P_VIDEO_PLL0_OUT_MAIN,
+ P_VIDEO_PLL0_OUT_ODD,
+};
+
+static const struct pll_vco fabia_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static struct clk_alpha_pll video_pll0 = {
+ .offset = 0x42c,
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "video_pll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fabia_ops,
+ },
+ },
+};
+
+static const struct parent_map video_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_VIDEO_PLL0_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_1[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &video_pll0.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_video_cc_venus_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(150000000, P_VIDEO_PLL0_OUT_MAIN, 4, 0, 0),
+ F(270000000, P_VIDEO_PLL0_OUT_MAIN, 2.5, 0, 0),
+ F(340000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+ F(434000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+ F(500000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_venus_clk_src = {
+ .cmd_rcgr = 0x7f0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_1,
+ .freq_tbl = ftbl_video_cc_venus_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "video_cc_venus_clk_src",
+ .parent_data = video_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_branch video_cc_vcodec0_axi_clk = {
+ .halt_reg = 0x9ec,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9ec,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_cc_vcodec0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_vcodec0_core_clk = {
+ .halt_reg = 0x890,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x890,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_cc_vcodec0_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &video_cc_venus_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_venus_ahb_clk = {
+ .halt_reg = 0xa4c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa4c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_cc_venus_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_venus_ctl_axi_clk = {
+ .halt_reg = 0x9cc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_cc_venus_ctl_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_venus_ctl_core_clk = {
+ .halt_reg = 0x850,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x850,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_cc_venus_ctl_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &video_cc_venus_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc venus_gdsc = {
+ .gdscr = 0x814,
+ .pd = {
+ .name = "venus_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc vcodec0_gdsc = {
+ .gdscr = 0x874,
+ .pd = {
+ .name = "vcodec0_gdsc",
+ },
+ .flags = HW_CTRL,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct clk_regmap *video_cc_sc7180_clocks[] = {
+ [VIDEO_CC_VCODEC0_AXI_CLK] = &video_cc_vcodec0_axi_clk.clkr,
+ [VIDEO_CC_VCODEC0_CORE_CLK] = &video_cc_vcodec0_core_clk.clkr,
+ [VIDEO_CC_VENUS_AHB_CLK] = &video_cc_venus_ahb_clk.clkr,
+ [VIDEO_CC_VENUS_CLK_SRC] = &video_cc_venus_clk_src.clkr,
+ [VIDEO_CC_VENUS_CTL_AXI_CLK] = &video_cc_venus_ctl_axi_clk.clkr,
+ [VIDEO_CC_VENUS_CTL_CORE_CLK] = &video_cc_venus_ctl_core_clk.clkr,
+ [VIDEO_PLL0] = &video_pll0.clkr,
+};
+
+static struct gdsc *video_cc_sc7180_gdscs[] = {
+ [VENUS_GDSC] = &venus_gdsc,
+ [VCODEC0_GDSC] = &vcodec0_gdsc,
+};
+
+static const struct regmap_config video_cc_sc7180_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xb94,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc video_cc_sc7180_desc = {
+ .config = &video_cc_sc7180_regmap_config,
+ .clks = video_cc_sc7180_clocks,
+ .num_clks = ARRAY_SIZE(video_cc_sc7180_clocks),
+ .gdscs = video_cc_sc7180_gdscs,
+ .num_gdscs = ARRAY_SIZE(video_cc_sc7180_gdscs),
+};
+
+static const struct of_device_id video_cc_sc7180_match_table[] = {
+ { .compatible = "qcom,sc7180-videocc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, video_cc_sc7180_match_table);
+
+static int video_cc_sc7180_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ struct alpha_pll_config video_pll0_config = {};
+
+ regmap = qcom_cc_map(pdev, &video_cc_sc7180_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ video_pll0_config.l = 0x1f;
+ video_pll0_config.alpha = 0x4000;
+ video_pll0_config.user_ctl_val = 0x00000001;
+ video_pll0_config.user_ctl_hi_val = 0x00004805;
+
+ clk_fabia_pll_configure(&video_pll0, regmap, &video_pll0_config);
+
+ /* Keep VIDEO_CC_XO_CLK ALWAYS-ON */
+ regmap_update_bits(regmap, 0x984, 0x1, 0x1);
+
+ return qcom_cc_really_probe(pdev, &video_cc_sc7180_desc, regmap);
+}
+
+static struct platform_driver video_cc_sc7180_driver = {
+ .probe = video_cc_sc7180_probe,
+ .driver = {
+ .name = "sc7180-videocc",
+ .of_match_table = video_cc_sc7180_match_table,
+ },
+};
+
+static int __init video_cc_sc7180_init(void)
+{
+ return platform_driver_register(&video_cc_sc7180_driver);
+}
+subsys_initcall(video_cc_sc7180_init);
+
+static void __exit video_cc_sc7180_exit(void)
+{
+ platform_driver_unregister(&video_cc_sc7180_driver);
+}
+module_exit(video_cc_sc7180_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI VIDEOCC SC7180 Driver");
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index 4cd846bc98cc..250d8165167a 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -20,8 +20,8 @@ config CLK_RENESAS
select CLK_R8A7791 if ARCH_R8A7791 || ARCH_R8A7793
select CLK_R8A7792 if ARCH_R8A7792
select CLK_R8A7794 if ARCH_R8A7794
- select CLK_R8A7795 if ARCH_R8A7795
- select CLK_R8A77960 if ARCH_R8A77960 || ARCH_R8A7796
+ select CLK_R8A7795 if ARCH_R8A77950 || ARCH_R8A77951 || ARCH_R8A7795
+ select CLK_R8A77960 if ARCH_R8A77960
select CLK_R8A77961 if ARCH_R8A77961
select CLK_R8A77965 if ARCH_R8A77965
select CLK_R8A77970 if ARCH_R8A77970
diff --git a/drivers/clk/renesas/clk-rz.c b/drivers/clk/renesas/clk-rz.c
index fbc34beafc78..7b703f14e20b 100644
--- a/drivers/clk/renesas/clk-rz.c
+++ b/drivers/clk/renesas/clk-rz.c
@@ -37,8 +37,8 @@ static u16 __init rz_cpg_read_mode_pins(void)
void __iomem *ppr0, *pibc0;
u16 modes;
- ppr0 = ioremap_nocache(PPR0, 2);
- pibc0 = ioremap_nocache(PIBC0, 2);
+ ppr0 = ioremap(PPR0, 2);
+ pibc0 = ioremap(PIBC0, 2);
BUG_ON(!ppr0 || !pibc0);
iowrite16(4, pibc0); /* enable input buffer */
modes = ioread16(ppr0);
diff --git a/drivers/clk/renesas/r7s9210-cpg-mssr.c b/drivers/clk/renesas/r7s9210-cpg-mssr.c
index cf65d4e0e116..443bff08df4c 100644
--- a/drivers/clk/renesas/r7s9210-cpg-mssr.c
+++ b/drivers/clk/renesas/r7s9210-cpg-mssr.c
@@ -93,6 +93,7 @@ static const struct mssr_mod_clk r7s9210_mod_clks[] __initconst = {
DEF_MOD_STB("ether1", 64, R7S9210_CLK_B),
DEF_MOD_STB("ether0", 65, R7S9210_CLK_B),
+ DEF_MOD_STB("spibsc", 83, R7S9210_CLK_P1),
DEF_MOD_STB("i2c3", 84, R7S9210_CLK_P1),
DEF_MOD_STB("i2c2", 85, R7S9210_CLK_P1),
DEF_MOD_STB("i2c1", 86, R7S9210_CLK_P1),
diff --git a/drivers/clk/renesas/rcar-gen2-cpg.h b/drivers/clk/renesas/rcar-gen2-cpg.h
index db2f57ef2f99..bdcd4a38d48d 100644
--- a/drivers/clk/renesas/rcar-gen2-cpg.h
+++ b/drivers/clk/renesas/rcar-gen2-cpg.h
@@ -24,10 +24,10 @@ enum rcar_gen2_clk_types {
};
struct rcar_gen2_cpg_pll_config {
- unsigned int extal_div;
- unsigned int pll1_mult;
- unsigned int pll3_mult;
- unsigned int pll0_mult; /* leave as zero if PLL0CR exists */
+ u8 extal_div;
+ u8 pll1_mult;
+ u8 pll3_mult;
+ u8 pll0_mult; /* leave as zero if PLL0CR exists */
};
struct clk *rcar_gen2_cpg_clk_register(struct device *dev,
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index c97b647db9b6..488f8b3980c5 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -470,7 +470,8 @@ static struct clk * __init cpg_rpc_clk_register(const char *name,
clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
&rpc->div.hw, &clk_divider_ops,
- &rpc->gate.hw, &clk_gate_ops, 0);
+ &rpc->gate.hw, &clk_gate_ops,
+ CLK_SET_RATE_PARENT);
if (IS_ERR(clk)) {
kfree(rpc);
return clk;
@@ -506,7 +507,8 @@ static struct clk * __init cpg_rpcd2_clk_register(const char *name,
clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
&rpcd2->fixed.hw, &clk_fixed_factor_ops,
- &rpcd2->gate.hw, &clk_gate_ops, 0);
+ &rpcd2->gate.hw, &clk_gate_ops,
+ CLK_SET_RATE_PARENT);
if (IS_ERR(clk))
kfree(rpcd2);
diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
index 198417d56300..10560d963baf 100644
--- a/drivers/clk/rockchip/clk-pll.c
+++ b/drivers/clk/rockchip/clk-pll.c
@@ -282,7 +282,7 @@ static int rockchip_rk3036_pll_is_enabled(struct clk_hw *hw)
return !(pllcon & RK3036_PLLCON1_PWRDOWN);
}
-static void rockchip_rk3036_pll_init(struct clk_hw *hw)
+static int rockchip_rk3036_pll_init(struct clk_hw *hw)
{
struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
const struct rockchip_pll_rate_table *rate;
@@ -290,14 +290,14 @@ static void rockchip_rk3036_pll_init(struct clk_hw *hw)
unsigned long drate;
if (!(pll->flags & ROCKCHIP_PLL_SYNC_RATE))
- return;
+ return 0;
drate = clk_hw_get_rate(hw);
rate = rockchip_get_pll_settings(pll, drate);
/* when no rate setting for the current rate, rely on clk_set_rate */
if (!rate)
- return;
+ return 0;
rockchip_rk3036_pll_get_params(pll, &cur);
@@ -319,13 +319,15 @@ static void rockchip_rk3036_pll_init(struct clk_hw *hw)
if (!parent) {
pr_warn("%s: parent of %s not available\n",
__func__, __clk_get_name(hw->clk));
- return;
+ return 0;
}
pr_debug("%s: pll %s: rate params do not match rate table, adjusting\n",
__func__, __clk_get_name(hw->clk));
rockchip_rk3036_pll_set_params(pll, rate);
}
+
+ return 0;
}
static const struct clk_ops rockchip_rk3036_pll_clk_norate_ops = {
@@ -515,7 +517,7 @@ static int rockchip_rk3066_pll_is_enabled(struct clk_hw *hw)
return !(pllcon & RK3066_PLLCON3_PWRDOWN);
}
-static void rockchip_rk3066_pll_init(struct clk_hw *hw)
+static int rockchip_rk3066_pll_init(struct clk_hw *hw)
{
struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
const struct rockchip_pll_rate_table *rate;
@@ -523,14 +525,14 @@ static void rockchip_rk3066_pll_init(struct clk_hw *hw)
unsigned long drate;
if (!(pll->flags & ROCKCHIP_PLL_SYNC_RATE))
- return;
+ return 0;
drate = clk_hw_get_rate(hw);
rate = rockchip_get_pll_settings(pll, drate);
/* when no rate setting for the current rate, rely on clk_set_rate */
if (!rate)
- return;
+ return 0;
rockchip_rk3066_pll_get_params(pll, &cur);
@@ -543,6 +545,8 @@ static void rockchip_rk3066_pll_init(struct clk_hw *hw)
__func__, clk_hw_get_name(hw));
rockchip_rk3066_pll_set_params(pll, rate);
}
+
+ return 0;
}
static const struct clk_ops rockchip_rk3066_pll_clk_norate_ops = {
@@ -761,7 +765,7 @@ static int rockchip_rk3399_pll_is_enabled(struct clk_hw *hw)
return !(pllcon & RK3399_PLLCON3_PWRDOWN);
}
-static void rockchip_rk3399_pll_init(struct clk_hw *hw)
+static int rockchip_rk3399_pll_init(struct clk_hw *hw)
{
struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
const struct rockchip_pll_rate_table *rate;
@@ -769,14 +773,14 @@ static void rockchip_rk3399_pll_init(struct clk_hw *hw)
unsigned long drate;
if (!(pll->flags & ROCKCHIP_PLL_SYNC_RATE))
- return;
+ return 0;
drate = clk_hw_get_rate(hw);
rate = rockchip_get_pll_settings(pll, drate);
/* when no rate setting for the current rate, rely on clk_set_rate */
if (!rate)
- return;
+ return 0;
rockchip_rk3399_pll_get_params(pll, &cur);
@@ -798,13 +802,15 @@ static void rockchip_rk3399_pll_init(struct clk_hw *hw)
if (!parent) {
pr_warn("%s: parent of %s not available\n",
__func__, __clk_get_name(hw->clk));
- return;
+ return 0;
}
pr_debug("%s: pll %s: rate params do not match rate table, adjusting\n",
__func__, __clk_get_name(hw->clk));
rockchip_rk3399_pll_set_params(pll, rate);
}
+
+ return 0;
}
static const struct clk_ops rockchip_rk3399_pll_clk_norate_ops = {
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 3a991ca1ee36..c9e5a1fb6653 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -12,6 +12,7 @@
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/clk.h>
#include "clk.h"
#include "clk-cpu.h"
@@ -1646,6 +1647,13 @@ static void __init exynos5x_clk_init(struct device_node *np,
exynos5x_subcmus);
}
+ /*
+ * Keep top part of G3D clock path enabled permanently to ensure
+ * that the internal busses get their clock regardless of the
+ * main G3D clock enablement status.
+ */
+ clk_prepare_enable(__clk_lookup("mout_sw_aclk_g3d"));
+
samsung_clk_of_add_provider(np, ctx);
}
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
index 49bd7a4c015c..5f66bf879772 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
@@ -921,11 +921,26 @@ static const struct sunxi_ccu_desc sun50i_a64_ccu_desc = {
.num_resets = ARRAY_SIZE(sun50i_a64_ccu_resets),
};
+static struct ccu_pll_nb sun50i_a64_pll_cpu_nb = {
+ .common = &pll_cpux_clk.common,
+ /* copy from pll_cpux_clk */
+ .enable = BIT(31),
+ .lock = BIT(28),
+};
+
+static struct ccu_mux_nb sun50i_a64_cpu_nb = {
+ .common = &cpux_clk.common,
+ .cm = &cpux_clk.mux,
+ .delay_us = 1, /* > 8 clock cycles at 24 MHz */
+ .bypass_index = 1, /* index of 24 MHz oscillator */
+};
+
static int sun50i_a64_ccu_probe(struct platform_device *pdev)
{
struct resource *res;
void __iomem *reg;
u32 val;
+ int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
reg = devm_ioremap_resource(&pdev->dev, res);
@@ -939,7 +954,18 @@ static int sun50i_a64_ccu_probe(struct platform_device *pdev)
writel(0x515, reg + SUN50I_A64_PLL_MIPI_REG);
- return sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_a64_ccu_desc);
+ ret = sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_a64_ccu_desc);
+ if (ret)
+ return ret;
+
+ /* Gate then ungate PLL CPU after any rate changes */
+ ccu_pll_notifier_register(&sun50i_a64_pll_cpu_nb);
+
+ /* Reparent CPU during PLL CPU rate changes */
+ ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk,
+ &sun50i_a64_cpu_nb);
+
+ return 0;
}
static const struct of_device_id sun50i_a64_ccu_ids[] = {
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
index 979929276709..116e6f826d04 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
@@ -36,7 +36,6 @@
#define CLK_PLL_HSIC 18
#define CLK_PLL_DE 19
#define CLK_PLL_DDR1 20
-#define CLK_CPUX 21
#define CLK_AXI 22
#define CLK_APB 23
#define CLK_AHB1 24
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
index 45a1ed3fe674..50f8d1bc7046 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
@@ -23,9 +23,9 @@
*/
static const char * const ar100_r_apb2_parents[] = { "osc24M", "osc32k",
- "pll-periph0", "iosc" };
+ "iosc", "pll-periph0" };
static const struct ccu_mux_var_prediv ar100_r_apb2_predivs[] = {
- { .index = 2, .shift = 0, .width = 5 },
+ { .index = 3, .shift = 0, .width = 5 },
};
static struct ccu_div ar100_clk = {
@@ -51,17 +51,7 @@ static struct ccu_div ar100_clk = {
static CLK_FIXED_FACTOR_HW(r_ahb_clk, "r-ahb", &ar100_clk.common.hw, 1, 1, 0);
-static struct ccu_div r_apb1_clk = {
- .div = _SUNXI_CCU_DIV(0, 2),
-
- .common = {
- .reg = 0x00c,
- .hw.init = CLK_HW_INIT("r-apb1",
- "r-ahb",
- &ccu_div_ops,
- 0),
- },
-};
+static SUNXI_CCU_M(r_apb1_clk, "r-apb1", "r-ahb", 0x00c, 0, 2, 0);
static struct ccu_div r_apb2_clk = {
.div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.h b/drivers/clk/sunxi-ng/ccu-sun6i-a31.h
index a361388b4670..3ed2a59b0dc6 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.h
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.h
@@ -32,7 +32,9 @@
/* The PLL_VIDEO1_2X clock is exported */
#define CLK_PLL_GPU 14
-#define CLK_PLL_MIPI 15
+
+/* The PLL_VIDEO1_2X clock is exported */
+
#define CLK_PLL9 16
#define CLK_PLL10 17
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a23-a33.h b/drivers/clk/sunxi-ng/ccu-sun8i-a23-a33.h
index 72df69291cc6..5bf5c4d13b4c 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a23-a33.h
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a23-a33.h
@@ -24,7 +24,9 @@
#define CLK_PLL_PERIPH 10
#define CLK_PLL_PERIPH_2X 11
#define CLK_PLL_GPU 12
-#define CLK_PLL_MIPI 13
+
+/* The PLL MIPI clock is exported */
+
#define CLK_PLL_HSIC 14
#define CLK_PLL_DE 15
#define CLK_PLL_DDR1 16
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r.c b/drivers/clk/sunxi-ng/ccu-sun8i-r.c
index 4646fdc61053..4c8c491b87c2 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r.c
@@ -51,19 +51,7 @@ static struct ccu_div ar100_clk = {
static CLK_FIXED_FACTOR_HW(ahb0_clk, "ahb0", &ar100_clk.common.hw, 1, 1, 0);
-static struct ccu_div apb0_clk = {
- .div = _SUNXI_CCU_DIV_FLAGS(0, 2, CLK_DIVIDER_POWER_OF_TWO),
-
- .common = {
- .reg = 0x0c,
- .hw.init = CLK_HW_INIT_HW("apb0",
- &ahb0_clk.hw,
- &ccu_div_ops,
- 0),
- },
-};
-
-static SUNXI_CCU_M(a83t_apb0_clk, "apb0", "ahb0", 0x0c, 0, 2, 0);
+static SUNXI_CCU_M(apb0_clk, "apb0", "ahb0", 0x0c, 0, 2, 0);
/*
* Define the parent as an array that can be reused to save space
@@ -127,7 +115,7 @@ static struct ccu_mp a83t_ir_clk = {
static struct ccu_common *sun8i_a83t_r_ccu_clks[] = {
&ar100_clk.common,
- &a83t_apb0_clk.common,
+ &apb0_clk.common,
&apb0_pio_clk.common,
&apb0_ir_clk.common,
&apb0_timer_clk.common,
@@ -167,7 +155,7 @@ static struct clk_hw_onecell_data sun8i_a83t_r_hw_clks = {
.hws = {
[CLK_AR100] = &ar100_clk.common.hw,
[CLK_AHB0] = &ahb0_clk.hw,
- [CLK_APB0] = &a83t_apb0_clk.common.hw,
+ [CLK_APB0] = &apb0_clk.common.hw,
[CLK_APB0_PIO] = &apb0_pio_clk.common.hw,
[CLK_APB0_IR] = &apb0_ir_clk.common.hw,
[CLK_APB0_TIMER] = &apb0_timer_clk.common.hw,
@@ -282,9 +270,6 @@ static void __init sunxi_r_ccu_init(struct device_node *node,
static void __init sun8i_a83t_r_ccu_setup(struct device_node *node)
{
- /* Fix apb0 bus gate parents here */
- apb0_gate_parent[0] = &a83t_apb0_clk.common.hw;
-
sunxi_r_ccu_init(node, &sun8i_a83t_r_ccu_desc);
}
CLK_OF_DECLARE(sun8i_a83t_r_ccu, "allwinner,sun8i-a83t-r-ccu",
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
index 897490800102..23bfe1d12f21 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
@@ -761,7 +761,8 @@ static struct ccu_mp outa_clk = {
.reg = 0x1f0,
.features = CCU_FEATURE_FIXED_PREDIV,
.hw.init = CLK_HW_INIT_PARENTS("outa", out_parents,
- &ccu_mp_ops, 0),
+ &ccu_mp_ops,
+ CLK_SET_RATE_PARENT),
}
};
@@ -779,7 +780,8 @@ static struct ccu_mp outb_clk = {
.reg = 0x1f4,
.features = CCU_FEATURE_FIXED_PREDIV,
.hw.init = CLK_HW_INIT_PARENTS("outb", out_parents,
- &ccu_mp_ops, 0),
+ &ccu_mp_ops,
+ CLK_SET_RATE_PARENT),
}
};
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.h b/drivers/clk/sunxi-ng/ccu-sun8i-r40.h
index a69637b6b0c1..6f7071df8e1c 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.h
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.h
@@ -55,10 +55,6 @@
/* Some more module clocks are exported */
-#define CLK_MBUS 155
-
-/* Another bunch of module clocks are exported */
-
#define CLK_NUMBER (CLK_OUTB + 1)
#endif /* _CCU_SUN8I_R40_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
index 5c779eec454b..0e36ca3bf3d5 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
@@ -618,7 +618,7 @@ static struct clk_hw_onecell_data sun8i_v3s_hw_clks = {
[CLK_MBUS] = &mbus_clk.common.hw,
[CLK_MIPI_CSI] = &mipi_csi_clk.common.hw,
},
- .num = CLK_NUMBER,
+ .num = CLK_PLL_DDR1 + 1,
};
static struct clk_hw_onecell_data sun8i_v3_hw_clks = {
@@ -700,7 +700,7 @@ static struct clk_hw_onecell_data sun8i_v3_hw_clks = {
[CLK_MBUS] = &mbus_clk.common.hw,
[CLK_MIPI_CSI] = &mipi_csi_clk.common.hw,
},
- .num = CLK_NUMBER,
+ .num = CLK_I2S0 + 1,
};
static struct ccu_reset_map sun8i_v3s_ccu_resets[] = {
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h
index b0160d305a67..108eeeedcbf7 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h
@@ -51,6 +51,4 @@
#define CLK_PLL_DDR1 74
-#define CLK_NUMBER (CLK_I2S0 + 1)
-
#endif /* _CCU_SUN8I_H3_H_ */
diff --git a/drivers/clk/sunxi/clk-sun6i-apb0-gates.c b/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
index a165e7172346..4c75b0770c74 100644
--- a/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
+++ b/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
@@ -37,7 +37,6 @@ static int sun6i_a31_apb0_gates_clk_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct clk_onecell_data *clk_data;
- const struct of_device_id *device;
const struct gates_data *data;
const char *clk_parent;
const char *clk_name;
@@ -50,10 +49,9 @@ static int sun6i_a31_apb0_gates_clk_probe(struct platform_device *pdev)
if (!np)
return -ENODEV;
- device = of_match_device(sun6i_a31_apb0_gates_clk_dt_ids, &pdev->dev);
- if (!device)
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data)
return -ENODEV;
- data = device->data;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
reg = devm_ioremap_resource(&pdev->dev, r);
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
index c051d92c2bbf..cfbaa90c7adb 100644
--- a/drivers/clk/tegra/clk-dfll.c
+++ b/drivers/clk/tegra/clk-dfll.c
@@ -1487,7 +1487,6 @@ static int dfll_init(struct tegra_dfll *td)
td->last_unrounded_rate = 0;
pm_runtime_enable(td->dev);
- pm_runtime_irq_safe(td->dev);
pm_runtime_get_sync(td->dev);
dfll_set_mode(td, DFLL_DISABLED);
@@ -1516,7 +1515,7 @@ di_err1:
/**
* tegra_dfll_suspend - check DFLL is disabled
- * @dev: DFLL device *
+ * @dev: DFLL instance
*
* DFLL clock should be disabled by the CPUFreq driver. So, make
* sure it is disabled and disable all clocks needed by the DFLL.
diff --git a/drivers/clk/tegra/clk-divider.c b/drivers/clk/tegra/clk-divider.c
index ca0de5f11f84..38daf483ddf1 100644
--- a/drivers/clk/tegra/clk-divider.c
+++ b/drivers/clk/tegra/clk-divider.c
@@ -40,8 +40,13 @@ static unsigned long clk_frac_div_recalc_rate(struct clk_hw *hw,
int div, mul;
u64 rate = parent_rate;
- reg = readl_relaxed(divider->reg) >> divider->shift;
- div = reg & div_mask(divider);
+ reg = readl_relaxed(divider->reg);
+
+ if ((divider->flags & TEGRA_DIVIDER_UART) &&
+ !(reg & PERIPH_CLK_UART_DIV_ENB))
+ return rate;
+
+ div = (reg >> divider->shift) & div_mask(divider);
mul = get_mul(divider);
div += mul;
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index 0d07c0ba49b6..2b2a3b81c16b 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -777,7 +777,11 @@ static struct tegra_periph_init_data gate_clks[] = {
GATE("ahbdma", "hclk", 33, 0, tegra_clk_ahbdma, 0),
GATE("apbdma", "pclk", 34, 0, tegra_clk_apbdma, 0),
GATE("kbc", "clk_32k", 36, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_kbc, 0),
- GATE("fuse", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse, 0),
+ /*
+ * Critical for RAM re-repair operation, which must occur on resume
+ * from LP1 system suspend and as part of CCPLEX cluster switching.
+ */
+ GATE("fuse", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse, CLK_IS_CRITICAL),
GATE("fuse_burn", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse_burn, 0),
GATE("kfuse", "clk_m", 40, TEGRA_PERIPH_ON_APB, tegra_clk_kfuse, 0),
GATE("apbif", "clk_m", 107, TEGRA_PERIPH_ON_APB, tegra_clk_apbif, 0),
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index 4d8222f5c638..fff5cba87637 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -1046,11 +1046,9 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA20_CLK_SBC3, TEGRA20_CLK_PLL_P, 100000000, 0 },
{ TEGRA20_CLK_SBC4, TEGRA20_CLK_PLL_P, 100000000, 0 },
{ TEGRA20_CLK_HOST1X, TEGRA20_CLK_PLL_C, 150000000, 0 },
- { TEGRA20_CLK_DISP1, TEGRA20_CLK_PLL_P, 600000000, 0 },
- { TEGRA20_CLK_DISP2, TEGRA20_CLK_PLL_P, 600000000, 0 },
{ TEGRA20_CLK_GR2D, TEGRA20_CLK_PLL_C, 300000000, 0 },
{ TEGRA20_CLK_GR3D, TEGRA20_CLK_PLL_C, 300000000, 0 },
- { TEGRA20_CLK_VDE, TEGRA20_CLK_CLK_MAX, 300000000, 0 },
+ { TEGRA20_CLK_VDE, TEGRA20_CLK_PLL_C, 300000000, 0 },
/* must be the last entry */
{ TEGRA20_CLK_CLK_MAX, TEGRA20_CLK_CLK_MAX, 0, 0 },
};
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index c8bc18e4d7e5..b20891489e11 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -1251,14 +1251,12 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA30_CLK_SBC6, TEGRA30_CLK_PLL_P, 100000000, 0 },
{ TEGRA30_CLK_PLL_C, TEGRA30_CLK_CLK_MAX, 600000000, 0 },
{ TEGRA30_CLK_HOST1X, TEGRA30_CLK_PLL_C, 150000000, 0 },
- { TEGRA30_CLK_DISP1, TEGRA30_CLK_PLL_P, 600000000, 0 },
- { TEGRA30_CLK_DISP2, TEGRA30_CLK_PLL_P, 600000000, 0 },
{ TEGRA30_CLK_TWD, TEGRA30_CLK_CLK_MAX, 0, 1 },
{ TEGRA30_CLK_GR2D, TEGRA30_CLK_PLL_C, 300000000, 0 },
{ TEGRA30_CLK_GR3D, TEGRA30_CLK_PLL_C, 300000000, 0 },
{ TEGRA30_CLK_GR3D2, TEGRA30_CLK_PLL_C, 300000000, 0 },
{ TEGRA30_CLK_PLL_U, TEGRA30_CLK_CLK_MAX, 480000000, 0 },
- { TEGRA30_CLK_VDE, TEGRA30_CLK_CLK_MAX, 600000000, 0 },
+ { TEGRA30_CLK_VDE, TEGRA30_CLK_PLL_C, 600000000, 0 },
{ TEGRA30_CLK_SPDIF_IN_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
{ TEGRA30_CLK_I2S0_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
{ TEGRA30_CLK_I2S1_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
index e6bd6d1ea012..f6cdce441cf7 100644
--- a/drivers/clk/tegra/clk.c
+++ b/drivers/clk/tegra/clk.c
@@ -231,8 +231,10 @@ struct clk ** __init tegra_clk_init(void __iomem *regs, int num, int banks)
periph_banks = banks;
clks = kcalloc(num, sizeof(struct clk *), GFP_KERNEL);
- if (!clks)
+ if (!clks) {
kfree(periph_clk_enb_refcnt);
+ return NULL;
+ }
clk_num = num;
diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c
index 2b4dab632318..312a20f8ec0e 100644
--- a/drivers/clk/ti/clk-44xx.c
+++ b/drivers/clk/ti/clk-44xx.c
@@ -604,6 +604,18 @@ static const struct omap_clkctrl_reg_data omap4_l4_per_clkctrl_regs[] __initcons
{ 0 },
};
+static const struct
+omap_clkctrl_reg_data omap4_l4_secure_clkctrl_regs[] __initconst = {
+ { OMAP4_AES1_CLKCTRL, NULL, CLKF_SW_SUP, "" },
+ { OMAP4_AES2_CLKCTRL, NULL, CLKF_SW_SUP, "" },
+ { OMAP4_DES3DES_CLKCTRL, NULL, CLKF_SW_SUP, "" },
+ { OMAP4_PKA_CLKCTRL, NULL, CLKF_SW_SUP, "" },
+ { OMAP4_RNG_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "" },
+ { OMAP4_SHA2MD5_CLKCTRL, NULL, CLKF_SW_SUP, "" },
+ { OMAP4_CRYPTODMA_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "" },
+ { 0 },
+};
+
static const struct omap_clkctrl_bit_data omap4_gpio1_bit_data[] __initconst = {
{ 8, TI_CLK_GATE, omap4_gpio2_dbclk_parents, NULL },
{ 0 },
@@ -691,6 +703,7 @@ const struct omap_clkctrl_data omap4_clkctrl_data[] __initconst = {
{ 0x4a009220, omap4_l3_gfx_clkctrl_regs },
{ 0x4a009320, omap4_l3_init_clkctrl_regs },
{ 0x4a009420, omap4_l4_per_clkctrl_regs },
+ { 0x4a0095a0, omap4_l4_secure_clkctrl_regs },
{ 0x4a307820, omap4_l4_wkup_clkctrl_regs },
{ 0x4a307a20, omap4_emu_sys_clkctrl_regs },
{ 0 },
diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c
index c9608e5d993a..92bf2dda95b9 100644
--- a/drivers/clk/ti/clk-54xx.c
+++ b/drivers/clk/ti/clk-54xx.c
@@ -35,6 +35,20 @@ static const struct omap_clkctrl_reg_data omap5_dsp_clkctrl_regs[] __initconst =
{ 0 },
};
+static const char * const omap5_aess_fclk_parents[] __initconst = {
+ "abe_clk",
+ NULL,
+};
+
+static const struct omap_clkctrl_div_data omap5_aess_fclk_data __initconst = {
+ .max_div = 2,
+};
+
+static const struct omap_clkctrl_bit_data omap5_aess_bit_data[] __initconst = {
+ { 24, TI_CLK_DIVIDER, omap5_aess_fclk_parents, &omap5_aess_fclk_data },
+ { 0 },
+};
+
static const char * const omap5_dmic_gfclk_parents[] __initconst = {
"abe_cm:clk:0018:26",
"pad_clks_ck",
@@ -122,6 +136,7 @@ static const struct omap_clkctrl_bit_data omap5_timer8_bit_data[] __initconst =
static const struct omap_clkctrl_reg_data omap5_abe_clkctrl_regs[] __initconst = {
{ OMAP5_L4_ABE_CLKCTRL, NULL, 0, "abe_iclk" },
+ { OMAP5_AESS_CLKCTRL, omap5_aess_bit_data, CLKF_SW_SUP, "abe_cm:clk:0008:24" },
{ OMAP5_MCPDM_CLKCTRL, NULL, CLKF_SW_SUP, "pad_clks_ck" },
{ OMAP5_DMIC_CLKCTRL, omap5_dmic_bit_data, CLKF_SW_SUP, "abe_cm:clk:0018:24" },
{ OMAP5_MCBSP1_CLKCTRL, omap5_mcbsp1_bit_data, CLKF_SW_SUP, "abe_cm:clk:0028:24" },
@@ -286,6 +301,18 @@ static const struct omap_clkctrl_reg_data omap5_l4per_clkctrl_regs[] __initconst
{ 0 },
};
+static const struct
+omap_clkctrl_reg_data omap5_l4_secure_clkctrl_regs[] __initconst = {
+ { OMAP5_AES1_CLKCTRL, NULL, CLKF_HW_SUP, "" },
+ { OMAP5_AES2_CLKCTRL, NULL, CLKF_HW_SUP, "" },
+ { OMAP5_DES3DES_CLKCTRL, NULL, CLKF_HW_SUP, "" },
+ { OMAP5_FPKA_CLKCTRL, NULL, CLKF_SW_SUP, "" },
+ { OMAP5_RNG_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "" },
+ { OMAP5_SHA2MD5_CLKCTRL, NULL, CLKF_HW_SUP, "" },
+ { OMAP5_DMA_CRYPTO_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "" },
+ { 0 },
+};
+
static const struct omap_clkctrl_reg_data omap5_iva_clkctrl_regs[] __initconst = {
{ OMAP5_IVA_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_iva_h12x2_ck" },
{ OMAP5_SL2IF_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_iva_h12x2_ck" },
@@ -508,6 +535,7 @@ const struct omap_clkctrl_data omap5_clkctrl_data[] __initconst = {
{ 0x4a008d20, omap5_l4cfg_clkctrl_regs },
{ 0x4a008e20, omap5_l3instr_clkctrl_regs },
{ 0x4a009020, omap5_l4per_clkctrl_regs },
+ { 0x4a0091a0, omap5_l4_secure_clkctrl_regs },
{ 0x4a009220, omap5_iva_clkctrl_regs },
{ 0x4a009420, omap5_dss_clkctrl_regs },
{ 0x4a009520, omap5_gpu_clkctrl_regs },
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
index 5f46782cebeb..14b645093107 100644
--- a/drivers/clk/ti/clk-7xx.c
+++ b/drivers/clk/ti/clk-7xx.c
@@ -146,6 +146,29 @@ static const struct omap_clkctrl_reg_data dra7_rtc_clkctrl_regs[] __initconst =
{ 0 },
};
+static const char * const dra7_cam_gfclk_mux_parents[] __initconst = {
+ "l3_iclk_div",
+ "core_iss_main_clk",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data dra7_cam_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_cam_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_cam_clkctrl_regs[] __initconst = {
+ { DRA7_CAM_VIP1_CLKCTRL, dra7_cam_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_CAM_VIP2_CLKCTRL, dra7_cam_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_CAM_VIP3_CLKCTRL, dra7_cam_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_vpe_clkctrl_regs[] __initconst = {
+ { DRA7_VPE_VPE_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h23x2_ck" },
+ { 0 },
+};
+
static const struct omap_clkctrl_reg_data dra7_coreaon_clkctrl_regs[] __initconst = {
{ DRA7_COREAON_SMARTREFLEX_MPU_CLKCTRL, NULL, CLKF_SW_SUP, "wkupaon_iclk_mux" },
{ DRA7_COREAON_SMARTREFLEX_CORE_CLKCTRL, NULL, CLKF_SW_SUP, "wkupaon_iclk_mux" },
@@ -275,6 +298,40 @@ static const struct omap_clkctrl_reg_data dra7_dss_clkctrl_regs[] __initconst =
{ 0 },
};
+static const char * const dra7_gpu_core_mux_parents[] __initconst = {
+ "dpll_core_h14x2_ck",
+ "dpll_per_h14x2_ck",
+ "dpll_gpu_m2_ck",
+ NULL,
+};
+
+static const char * const dra7_gpu_hyd_mux_parents[] __initconst = {
+ "dpll_core_h14x2_ck",
+ "dpll_per_h14x2_ck",
+ "dpll_gpu_m2_ck",
+ NULL,
+};
+
+static const char * const dra7_gpu_sys_clk_parents[] __initconst = {
+ "sys_clkin",
+ NULL,
+};
+
+static const struct omap_clkctrl_div_data dra7_gpu_sys_clk_data __initconst = {
+ .max_div = 2,
+};
+
+static const struct omap_clkctrl_bit_data dra7_gpu_core_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_gpu_core_mux_parents, NULL, },
+ { 26, TI_CLK_MUX, dra7_gpu_hyd_mux_parents, NULL, },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_gpu_clkctrl_regs[] __initconst = {
+ { DRA7_GPU_CLKCTRL, dra7_gpu_core_bit_data, CLKF_SW_SUP, "gpu_cm:clk:0000:24", },
+ { 0 },
+};
+
static const char * const dra7_mmc1_fclk_mux_parents[] __initconst = {
"func_128m_clk",
"dpll_per_m2x2_ck",
@@ -405,7 +462,7 @@ static const struct omap_clkctrl_bit_data dra7_gmac_bit_data[] __initconst = {
};
static const struct omap_clkctrl_reg_data dra7_gmac_clkctrl_regs[] __initconst = {
- { DRA7_GMAC_GMAC_CLKCTRL, dra7_gmac_bit_data, CLKF_SW_SUP, "dpll_gmac_ck" },
+ { DRA7_GMAC_GMAC_CLKCTRL, dra7_gmac_bit_data, CLKF_SW_SUP, "gmac_main_clk" },
{ 0 },
};
@@ -769,6 +826,7 @@ const struct omap_clkctrl_data dra7_clkctrl_data[] __initconst = {
{ 0x4a005550, dra7_ipu_clkctrl_regs },
{ 0x4a005620, dra7_dsp2_clkctrl_regs },
{ 0x4a005720, dra7_rtc_clkctrl_regs },
+ { 0x4a005760, dra7_vpe_clkctrl_regs },
{ 0x4a008620, dra7_coreaon_clkctrl_regs },
{ 0x4a008720, dra7_l3main1_clkctrl_regs },
{ 0x4a008920, dra7_ipu2_clkctrl_regs },
@@ -777,7 +835,9 @@ const struct omap_clkctrl_data dra7_clkctrl_data[] __initconst = {
{ 0x4a008c00, dra7_atl_clkctrl_regs },
{ 0x4a008d20, dra7_l4cfg_clkctrl_regs },
{ 0x4a008e20, dra7_l3instr_clkctrl_regs },
+ { 0x4a009020, dra7_cam_clkctrl_regs },
{ 0x4a009120, dra7_dss_clkctrl_regs },
+ { 0x4a009220, dra7_gpu_clkctrl_regs },
{ 0x4a009320, dra7_l3init_clkctrl_regs },
{ 0x4a0093b0, dra7_pcie_clkctrl_regs },
{ 0x4a0093d0, dra7_gmac_clkctrl_regs },
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
index f65e16c4f3c4..8d4c08b034bd 100644
--- a/drivers/clk/ti/clk-dra7-atl.c
+++ b/drivers/clk/ti/clk-dra7-atl.c
@@ -233,7 +233,6 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
cinfo->iobase = of_iomap(node, 0);
cinfo->dev = &pdev->dev;
pm_runtime_enable(cinfo->dev);
- pm_runtime_irq_safe(cinfo->dev);
pm_runtime_get_sync(cinfo->dev);
atl_write(cinfo, DRA7_ATL_PCLKMUX_REG(0), DRA7_ATL_PCLKMUX);
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index e0b8ed3a1e80..3da33c786d77 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -171,7 +171,9 @@ void __init ti_dt_clocks_register(struct ti_dt_clk oclks[])
node = of_find_node_by_name(NULL, buf);
if (num_args && compat_mode) {
parent = node;
- node = of_get_child_by_name(parent, "clk");
+ node = of_get_child_by_name(parent, "clock");
+ if (!node)
+ node = of_get_child_by_name(parent, "clk");
of_node_put(parent);
}
diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
index 17b9a761242f..062266034d84 100644
--- a/drivers/clk/ti/clkctrl.c
+++ b/drivers/clk/ti/clkctrl.c
@@ -440,6 +440,63 @@ static void __init _clkctrl_add_provider(void *data,
of_clk_add_hw_provider(np, _ti_omap4_clkctrl_xlate, data);
}
+/* Get clock name based on compatible string for clkctrl */
+static char * __init clkctrl_get_name(struct device_node *np)
+{
+ struct property *prop;
+ const int prefix_len = 11;
+ const char *compat;
+ char *name;
+
+ of_property_for_each_string(np, "compatible", prop, compat) {
+ if (!strncmp("ti,clkctrl-", compat, prefix_len)) {
+ /* Two letter minimum name length for l3, l4 etc */
+ if (strnlen(compat + prefix_len, 16) < 2)
+ continue;
+ name = kasprintf(GFP_KERNEL, "%s", compat + prefix_len);
+ if (!name)
+ continue;
+ strreplace(name, '-', '_');
+
+ return name;
+ }
+ }
+ of_node_put(np);
+
+ return NULL;
+}
+
+/* Get clkctrl clock base name based on clkctrl_name or dts node */
+static const char * __init clkctrl_get_clock_name(struct device_node *np,
+ const char *clkctrl_name,
+ int offset, int index,
+ bool legacy_naming)
+{
+ char *clock_name;
+
+ /* l4per-clkctrl:1234:0 style naming based on clkctrl_name */
+ if (clkctrl_name && !legacy_naming) {
+ clock_name = kasprintf(GFP_KERNEL, "%s-clkctrl:%04x:%d",
+ clkctrl_name, offset, index);
+ strreplace(clock_name, '_', '-');
+
+ return clock_name;
+ }
+
+ /* l4per:1234:0 old style naming based on clkctrl_name */
+ if (clkctrl_name)
+ return kasprintf(GFP_KERNEL, "%s_cm:clk:%04x:%d",
+ clkctrl_name, offset, index);
+
+ /* l4per_cm:1234:0 old style naming based on parent node name */
+ if (legacy_naming)
+ return kasprintf(GFP_KERNEL, "%pOFn:clk:%04x:%d",
+ np->parent, offset, index);
+
+ /* l4per-clkctrl:1234:0 style naming based on node name */
+ return kasprintf(GFP_KERNEL, "%pOFn:%04x:%d", np, offset, index);
+}
+
static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
{
struct omap_clkctrl_provider *provider;
@@ -448,8 +505,10 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
struct clk_init_data init = { NULL };
struct clk_hw_omap *hw;
struct clk *clk;
- struct omap_clkctrl_clk *clkctrl_clk;
+ struct omap_clkctrl_clk *clkctrl_clk = NULL;
const __be32 *addrp;
+ bool legacy_naming;
+ char *clkctrl_name;
u32 addr;
int ret;
char *c;
@@ -537,7 +596,19 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
provider->base = of_iomap(node, 0);
- if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) {
+ legacy_naming = ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT;
+ clkctrl_name = clkctrl_get_name(node);
+ if (clkctrl_name) {
+ provider->clkdm_name = kasprintf(GFP_KERNEL,
+ "%s_clkdm", clkctrl_name);
+ goto clkdm_found;
+ }
+
+ /*
+ * The code below can be removed when all clkctrl nodes use domain
+ * specific compatible proprerty and standard clock node naming
+ */
+ if (legacy_naming) {
provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFnxxx", node->parent);
if (!provider->clkdm_name) {
kfree(provider);
@@ -573,7 +644,7 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
*c = '_';
c++;
}
-
+clkdm_found:
INIT_LIST_HEAD(&provider->clocks);
/* Generate clocks */
@@ -612,15 +683,15 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
init.flags = 0;
if (reg_data->flags & CLKF_SET_RATE_PARENT)
init.flags |= CLK_SET_RATE_PARENT;
- if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
- init.name = kasprintf(GFP_KERNEL, "%pOFn:%pOFn:%04x:%d",
- node->parent, node,
- reg_data->offset, 0);
- else
- init.name = kasprintf(GFP_KERNEL, "%pOFn:%04x:%d",
- node, reg_data->offset, 0);
+
+ init.name = clkctrl_get_clock_name(node, clkctrl_name,
+ reg_data->offset, 0,
+ legacy_naming);
+ if (!init.name)
+ goto cleanup;
+
clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL);
- if (!init.name || !clkctrl_clk)
+ if (!clkctrl_clk)
goto cleanup;
init.ops = &omap4_clkctrl_clk_ops;
@@ -642,11 +713,14 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
if (ret == -EPROBE_DEFER)
ti_clk_retry_init(node, provider, _clkctrl_add_provider);
+ kfree(clkctrl_name);
+
return;
cleanup:
kfree(hw);
kfree(init.name);
+ kfree(clkctrl_name);
kfree(clkctrl_clk);
}
CLK_OF_DECLARE(ti_omap4_clkctrl_clock, "ti,clkctrl",
diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
index e6995c04001e..f1dd62de2bfc 100644
--- a/drivers/clk/ti/clock.h
+++ b/drivers/clk/ti/clock.h
@@ -253,7 +253,7 @@ extern const struct clk_ops omap_gate_clk_ops;
extern struct ti_clk_features ti_clk_features;
-void omap2_init_clk_clkdm(struct clk_hw *hw);
+int omap2_init_clk_clkdm(struct clk_hw *hw);
int omap2_clkops_enable_clkdm(struct clk_hw *hw);
void omap2_clkops_disable_clkdm(struct clk_hw *hw);
diff --git a/drivers/clk/ti/clockdomain.c b/drivers/clk/ti/clockdomain.c
index 423a99b9f10c..ee56306f79d5 100644
--- a/drivers/clk/ti/clockdomain.c
+++ b/drivers/clk/ti/clockdomain.c
@@ -101,16 +101,16 @@ void omap2_clkops_disable_clkdm(struct clk_hw *hw)
*
* Convert a clockdomain name stored in a struct clk 'clk' into a
* clockdomain pointer, and save it into the struct clk. Intended to be
- * called during clk_register(). No return value.
+ * called during clk_register(). Returns 0 on success, -EERROR otherwise.
*/
-void omap2_init_clk_clkdm(struct clk_hw *hw)
+int omap2_init_clk_clkdm(struct clk_hw *hw)
{
struct clk_hw_omap *clk = to_clk_hw_omap(hw);
struct clockdomain *clkdm;
const char *clk_name;
if (!clk->clkdm_name)
- return;
+ return 0;
clk_name = __clk_get_name(hw->clk);
@@ -123,6 +123,8 @@ void omap2_init_clk_clkdm(struct clk_hw *hw)
pr_debug("clock: could not associate clk %s to clkdm %s\n",
clk_name, clk->clkdm_name);
}
+
+ return 0;
}
static void __init of_ti_clockdomain_setup(struct device_node *node)
diff --git a/drivers/clk/uniphier/clk-uniphier-peri.c b/drivers/clk/uniphier/clk-uniphier-peri.c
index 9caa52944b1c..3e32db9dad81 100644
--- a/drivers/clk/uniphier/clk-uniphier-peri.c
+++ b/drivers/clk/uniphier/clk-uniphier-peri.c
@@ -18,8 +18,8 @@
#define UNIPHIER_PERI_CLK_FI2C(idx, ch) \
UNIPHIER_CLK_GATE("i2c" #ch, (idx), "i2c", 0x24, 24 + (ch))
-#define UNIPHIER_PERI_CLK_SCSSI(idx) \
- UNIPHIER_CLK_GATE("scssi", (idx), "spi", 0x20, 17)
+#define UNIPHIER_PERI_CLK_SCSSI(idx, ch) \
+ UNIPHIER_CLK_GATE("scssi" #ch, (idx), "spi", 0x20, 17 + (ch))
#define UNIPHIER_PERI_CLK_MCSSI(idx) \
UNIPHIER_CLK_GATE("mcssi", (idx), "spi", 0x24, 14)
@@ -35,7 +35,7 @@ const struct uniphier_clk_data uniphier_ld4_peri_clk_data[] = {
UNIPHIER_PERI_CLK_I2C(6, 2),
UNIPHIER_PERI_CLK_I2C(7, 3),
UNIPHIER_PERI_CLK_I2C(8, 4),
- UNIPHIER_PERI_CLK_SCSSI(11),
+ UNIPHIER_PERI_CLK_SCSSI(11, 0),
{ /* sentinel */ }
};
@@ -51,7 +51,10 @@ const struct uniphier_clk_data uniphier_pro4_peri_clk_data[] = {
UNIPHIER_PERI_CLK_FI2C(8, 4),
UNIPHIER_PERI_CLK_FI2C(9, 5),
UNIPHIER_PERI_CLK_FI2C(10, 6),
- UNIPHIER_PERI_CLK_SCSSI(11),
- UNIPHIER_PERI_CLK_MCSSI(12),
+ UNIPHIER_PERI_CLK_SCSSI(11, 0),
+ UNIPHIER_PERI_CLK_SCSSI(12, 1),
+ UNIPHIER_PERI_CLK_SCSSI(13, 2),
+ UNIPHIER_PERI_CLK_SCSSI(14, 3),
+ UNIPHIER_PERI_CLK_MCSSI(15),
{ /* sentinel */ }
};
diff --git a/drivers/clk/ux500/u8500_of_clk.c b/drivers/clk/ux500/u8500_of_clk.c
index 72ed97c6662a..0aedd42fad52 100644
--- a/drivers/clk/ux500/u8500_of_clk.c
+++ b/drivers/clk/ux500/u8500_of_clk.c
@@ -99,8 +99,10 @@ static void u8500_clk_init(struct device_node *np)
if (fw_version != NULL) {
switch (fw_version->project) {
case PRCMU_FW_PROJECT_U8500_C2:
+ case PRCMU_FW_PROJECT_U8500_MBL:
case PRCMU_FW_PROJECT_U8520:
case PRCMU_FW_PROJECT_U8420:
+ case PRCMU_FW_PROJECT_U8420_SYSCLK:
sgaclk_parent = "soc0_pll";
break;
default:
diff --git a/drivers/clk/versatile/Kconfig b/drivers/clk/versatile/Kconfig
index ac766855ba16..c2618f1477a2 100644
--- a/drivers/clk/versatile/Kconfig
+++ b/drivers/clk/versatile/Kconfig
@@ -9,7 +9,7 @@ config COMMON_CLK_VERSATILE
COMPILE_TEST
select REGMAP_MMIO
---help---
- Supports clocking on ARM Reference designs:
+ Supports clocking on ARM Reference designs:
- Integrator/AP and Integrator/CP
- RealView PB1176, EB, PB11MP and PBX
- Versatile Express
diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c
index a11f93ecbf34..10e89f23880b 100644
--- a/drivers/clk/zynqmp/clkc.c
+++ b/drivers/clk/zynqmp/clkc.c
@@ -2,7 +2,7 @@
/*
* Zynq UltraScale+ MPSoC clock controller
*
- * Copyright (C) 2016-2018 Xilinx
+ * Copyright (C) 2016-2019 Xilinx
*
* Based on drivers/clk/zynq/clkc.c
*/
@@ -749,6 +749,7 @@ static int zynqmp_clock_probe(struct platform_device *pdev)
static const struct of_device_id zynqmp_clock_of_match[] = {
{.compatible = "xlnx,zynqmp-clk"},
+ {.compatible = "xlnx,versal-clk"},
{},
};
MODULE_DEVICE_TABLE(of, zynqmp_clock_of_match);
diff --git a/drivers/clk/zynqmp/divider.c b/drivers/clk/zynqmp/divider.c
index d8f5b70d2709..4be2cc76aa2e 100644
--- a/drivers/clk/zynqmp/divider.c
+++ b/drivers/clk/zynqmp/divider.c
@@ -2,7 +2,7 @@
/*
* Zynq UltraScale+ MPSoC Divider support
*
- * Copyright (C) 2016-2018 Xilinx
+ * Copyright (C) 2016-2019 Xilinx
*
* Adjustable divider clock implementation
*/
@@ -41,12 +41,30 @@ struct zynqmp_clk_divider {
bool is_frac;
u32 clk_id;
u32 div_type;
+ u16 max_div;
};
static inline int zynqmp_divider_get_val(unsigned long parent_rate,
- unsigned long rate)
+ unsigned long rate, u16 flags)
{
- return DIV_ROUND_CLOSEST(parent_rate, rate);
+ int up, down;
+ unsigned long up_rate, down_rate;
+
+ if (flags & CLK_DIVIDER_POWER_OF_TWO) {
+ up = DIV_ROUND_UP_ULL((u64)parent_rate, rate);
+ down = DIV_ROUND_DOWN_ULL((u64)parent_rate, rate);
+
+ up = __roundup_pow_of_two(up);
+ down = __rounddown_pow_of_two(down);
+
+ up_rate = DIV_ROUND_UP_ULL((u64)parent_rate, up);
+ down_rate = DIV_ROUND_UP_ULL((u64)parent_rate, down);
+
+ return (rate - up_rate) <= (down_rate - rate) ? up : down;
+
+ } else {
+ return DIV_ROUND_CLOSEST(parent_rate, rate);
+ }
}
/**
@@ -78,6 +96,9 @@ static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw,
else
value = div >> 16;
+ if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+ value = 1 << value;
+
if (!value) {
WARN(!(divider->flags & CLK_DIVIDER_ALLOW_ZERO),
"%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
@@ -88,6 +109,42 @@ static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw,
return DIV_ROUND_UP_ULL(parent_rate, value);
}
+static void zynqmp_get_divider2_val(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate,
+ struct zynqmp_clk_divider *divider,
+ int *bestdiv)
+{
+ int div1;
+ int div2;
+ long error = LONG_MAX;
+ struct clk_hw *parent_hw = clk_hw_get_parent(hw);
+ struct zynqmp_clk_divider *pdivider = to_zynqmp_clk_divider(parent_hw);
+
+ if (!pdivider)
+ return;
+
+ *bestdiv = 1;
+ for (div1 = 1; div1 <= pdivider->max_div;) {
+ for (div2 = 1; div2 <= divider->max_div;) {
+ long new_error = ((parent_rate / div1) / div2) - rate;
+
+ if (abs(new_error) < abs(error)) {
+ *bestdiv = div2;
+ error = new_error;
+ }
+ if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+ div2 = div2 << 1;
+ else
+ div2++;
+ }
+ if (pdivider->flags & CLK_DIVIDER_POWER_OF_TWO)
+ div1 = div1 << 1;
+ else
+ div1++;
+ }
+}
+
/**
* zynqmp_clk_divider_round_rate() - Round rate of divider clock
* @hw: handle between common and hardware-specific interfaces
@@ -120,10 +177,23 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
else
bestdiv = bestdiv >> 16;
+ if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+ bestdiv = 1 << bestdiv;
+
return DIV_ROUND_UP_ULL((u64)*prate, bestdiv);
}
- bestdiv = zynqmp_divider_get_val(*prate, rate);
+ bestdiv = zynqmp_divider_get_val(*prate, rate, divider->flags);
+
+ /*
+ * In case of two divisors, compute best divider values and return
+ * divider2 value based on compute value. div1 will be automatically
+ * set to optimum based on required total divider value.
+ */
+ if (div_type == TYPE_DIV2 &&
+ (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
+ zynqmp_get_divider2_val(hw, rate, *prate, divider, &bestdiv);
+ }
if ((clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && divider->is_frac)
bestdiv = rate % *prate ? 1 : bestdiv;
@@ -151,7 +221,7 @@ static int zynqmp_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
int ret;
const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- value = zynqmp_divider_get_val(parent_rate, rate);
+ value = zynqmp_divider_get_val(parent_rate, rate, divider->flags);
if (div_type == TYPE_DIV1) {
div = value & 0xFFFF;
div |= 0xffff << 16;
@@ -160,6 +230,9 @@ static int zynqmp_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
div |= value << 16;
}
+ if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+ div = __ffs(div);
+
ret = eemi_ops->clock_setdivider(clk_id, div);
if (ret)
@@ -176,6 +249,35 @@ static const struct clk_ops zynqmp_clk_divider_ops = {
};
/**
+ * zynqmp_clk_get_max_divisor() - Get maximum supported divisor from firmware.
+ * @clk_id: Id of clock
+ * @type: Divider type
+ *
+ * Return: Maximum divisor of a clock if query data is successful
+ * U16_MAX in case of query data is not success
+ */
+u32 zynqmp_clk_get_max_divisor(u32 clk_id, u32 type)
+{
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+ struct zynqmp_pm_query_data qdata = {0};
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ qdata.qid = PM_QID_CLOCK_GET_MAX_DIVISOR;
+ qdata.arg1 = clk_id;
+ qdata.arg2 = type;
+ ret = eemi_ops->query_data(qdata, ret_payload);
+ /*
+ * To maintain backward compatibility return maximum possible value
+ * (0xFFFF) if query for max divisor is not successful.
+ */
+ if (ret)
+ return U16_MAX;
+
+ return ret_payload[1];
+}
+
+/**
* zynqmp_clk_register_divider() - Register a divider clock
* @name: Name of this clock
* @clk_id: Id of clock
@@ -215,6 +317,12 @@ struct clk_hw *zynqmp_clk_register_divider(const char *name,
div->clk_id = clk_id;
div->div_type = nodes->type;
+ /*
+ * To achieve best possible rate, maximum limit of divider is required
+ * while computation.
+ */
+ div->max_div = zynqmp_clk_get_max_divisor(clk_id, nodes->type);
+
hw = &div->hw;
ret = clk_hw_register(NULL, hw);
if (ret) {
diff --git a/drivers/clk/zynqmp/pll.c b/drivers/clk/zynqmp/pll.c
index a541397a172c..89b599530105 100644
--- a/drivers/clk/zynqmp/pll.c
+++ b/drivers/clk/zynqmp/pll.c
@@ -188,10 +188,12 @@ static int zynqmp_pll_set_rate(struct clk_hw *hw, unsigned long rate,
frac = (parent_rate * f) / FRAC_DIV;
ret = eemi_ops->clock_setdivider(clk_id, m);
- if (ret)
+ if (ret == -EUSERS)
+ WARN(1, "More than allowed devices are using the %s, which is forbidden\n",
+ clk_name);
+ else if (ret)
pr_warn_once("%s() set divider failed for %s, ret = %d\n",
__func__, clk_name, ret);
-
eemi_ops->ioctl(0, IOCTL_SET_PLL_FRAC_DATA, clk_id, f, NULL);
return rate + frac;
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 5fdd76cb1768..cc909e465823 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -88,7 +88,7 @@ config ROCKCHIP_TIMER
select TIMER_OF
select CLKSRC_MMIO
help
- Enables the support for the rockchip timer driver.
+ Enables the support for the Rockchip timer driver.
config ARMADA_370_XP_TIMER
bool "Armada 370 and XP timer driver" if COMPILE_TEST
@@ -162,13 +162,13 @@ config NPCM7XX_TIMER
select CLKSRC_MMIO
help
Enable 24-bit TIMER0 and TIMER1 counters in the NPCM7xx architecture,
- While TIMER0 serves as clockevent and TIMER1 serves as clocksource.
+ where TIMER0 serves as clockevent and TIMER1 serves as clocksource.
config CADENCE_TTC_TIMER
bool "Cadence TTC timer driver" if COMPILE_TEST
depends on COMMON_CLK
help
- Enables support for the cadence ttc driver.
+ Enables support for the Cadence TTC driver.
config ASM9260_TIMER
bool "ASM9260 timer driver" if COMPILE_TEST
@@ -190,10 +190,10 @@ config CLKSRC_DBX500_PRCMU
bool "Clocksource PRCMU Timer" if COMPILE_TEST
depends on HAS_IOMEM
help
- Use the always on PRCMU Timer as clocksource
+ Use the always on PRCMU Timer as clocksource.
config CLPS711X_TIMER
- bool "Cirrus logic timer driver" if COMPILE_TEST
+ bool "Cirrus Logic timer driver" if COMPILE_TEST
select CLKSRC_MMIO
help
Enables support for the Cirrus Logic PS711 timer.
@@ -205,11 +205,11 @@ config ATLAS7_TIMER
Enables support for the Atlas7 timer.
config MXS_TIMER
- bool "Mxs timer driver" if COMPILE_TEST
+ bool "MXS timer driver" if COMPILE_TEST
select CLKSRC_MMIO
select STMP_DEVICE
help
- Enables support for the Mxs timer.
+ Enables support for the MXS timer.
config PRIMA2_TIMER
bool "Prima2 timer driver" if COMPILE_TEST
@@ -238,10 +238,10 @@ config KEYSTONE_TIMER
Enables support for the Keystone timer.
config INTEGRATOR_AP_TIMER
- bool "Integrator-ap timer driver" if COMPILE_TEST
+ bool "Integrator-AP timer driver" if COMPILE_TEST
select CLKSRC_MMIO
help
- Enables support for the Integrator-ap timer.
+ Enables support for the Integrator-AP timer.
config CLKSRC_EFM32
bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32
@@ -283,8 +283,8 @@ config CLKSRC_NPS
select TIMER_OF if OF
help
NPS400 clocksource support.
- Got 64 bit counter with update rate up to 1000MHz.
- This counter is accessed via couple of 32 bit memory mapped registers.
+ It has a 64-bit counter with update rate up to 1000MHz.
+ This counter is accessed via couple of 32-bit memory-mapped registers.
config CLKSRC_STM32
bool "Clocksource for STM32 SoCs" if !ARCH_STM32
@@ -305,14 +305,14 @@ config ARC_TIMERS
help
These are legacy 32-bit TIMER0 and TIMER1 counters found on all ARC cores
(ARC700 as well as ARC HS38).
- TIMER0 serves as clockevent while TIMER1 provides clocksource
+ TIMER0 serves as clockevent while TIMER1 provides clocksource.
config ARC_TIMERS_64BIT
bool "Support for 64-bit counters in ARC HS38 cores" if COMPILE_TEST
depends on ARC_TIMERS
select TIMER_OF
help
- This enables 2 different 64-bit timers: RTC (for UP) and GFRC (for SMP)
+ This enables 2 different 64-bit timers: RTC (for UP) and GFRC (for SMP).
RTC is implemented inside the core, while GFRC sits outside the core in
ARConnect IP block. Driver automatically picks one of them for clocksource
as appropriate.
@@ -390,7 +390,7 @@ config ARM_GLOBAL_TIMER
select TIMER_OF if OF
depends on ARM
help
- This options enables support for the ARM global timer unit
+ This option enables support for the ARM global timer unit.
config ARM_TIMER_SP804
bool "Support for Dual Timer SP804 module" if COMPILE_TEST
@@ -403,14 +403,14 @@ config CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
depends on ARM_GLOBAL_TIMER
default y
help
- Use ARM global timer clock source as sched_clock
+ Use ARM global timer clock source as sched_clock.
config ARMV7M_SYSTICK
bool "Support for the ARMv7M system time" if COMPILE_TEST
select TIMER_OF if OF
select CLKSRC_MMIO
help
- This options enables support for the ARMv7M system timer unit
+ This option enables support for the ARMv7M system timer unit.
config ATMEL_PIT
bool "Atmel PIT support" if COMPILE_TEST
@@ -460,7 +460,7 @@ config VF_PIT_TIMER
bool
select CLKSRC_MMIO
help
- Support for Period Interrupt Timer on Freescale Vybrid Family SoCs.
+ Support for Periodic Interrupt Timer on Freescale Vybrid Family SoCs.
config OXNAS_RPS_TIMER
bool "Oxford Semiconductor OXNAS RPS Timers driver" if COMPILE_TEST
@@ -470,7 +470,7 @@ config OXNAS_RPS_TIMER
This enables support for the Oxford Semiconductor OXNAS RPS timers.
config SYS_SUPPORTS_SH_CMT
- bool
+ bool
config MTK_TIMER
bool "Mediatek timer driver" if COMPILE_TEST
@@ -490,13 +490,13 @@ config SPRD_TIMER
Enables support for the Spreadtrum timer driver.
config SYS_SUPPORTS_SH_MTU2
- bool
+ bool
config SYS_SUPPORTS_SH_TMU
- bool
+ bool
config SYS_SUPPORTS_EM_STI
- bool
+ bool
config CLKSRC_JCORE_PIT
bool "J-Core PIT timer driver" if COMPILE_TEST
@@ -523,7 +523,7 @@ config SH_TIMER_MTU2
help
This enables build of a clockevent driver for the Multi-Function
Timer Pulse Unit 2 (MTU2) hardware available on SoCs from Renesas.
- This hardware comes with 16 bit-timer registers.
+ This hardware comes with 16-bit timer registers.
config RENESAS_OSTM
bool "Renesas OSTM timer driver" if COMPILE_TEST
@@ -580,7 +580,7 @@ config CLKSRC_TANGO_XTAL
select TIMER_OF
select CLKSRC_MMIO
help
- This enables the clocksource for Tango SoC
+ This enables the clocksource for Tango SoC.
config CLKSRC_PXA
bool "Clocksource for PXA or SA-11x0 platform" if COMPILE_TEST
@@ -591,24 +591,24 @@ config CLKSRC_PXA
platforms.
config H8300_TMR8
- bool "Clockevent timer for the H8300 platform" if COMPILE_TEST
- depends on HAS_IOMEM
+ bool "Clockevent timer for the H8300 platform" if COMPILE_TEST
+ depends on HAS_IOMEM
help
This enables the 8 bits timer for the H8300 platform.
config H8300_TMR16
- bool "Clockevent timer for the H83069 platform" if COMPILE_TEST
- depends on HAS_IOMEM
+ bool "Clockevent timer for the H83069 platform" if COMPILE_TEST
+ depends on HAS_IOMEM
help
This enables the 16 bits timer for the H8300 platform with the
- H83069 cpu.
+ H83069 CPU.
config H8300_TPU
- bool "Clocksource for the H8300 platform" if COMPILE_TEST
- depends on HAS_IOMEM
+ bool "Clocksource for the H8300 platform" if COMPILE_TEST
+ depends on HAS_IOMEM
help
This enables the clocksource for the H8300 platform with the
- H8S2678 cpu.
+ H8S2678 CPU.
config CLKSRC_IMX_GPT
bool "Clocksource using i.MX GPT" if COMPILE_TEST
@@ -666,8 +666,8 @@ config CSKY_MP_TIMER
help
Say yes here to enable C-SKY SMP timer driver used for C-SKY SMP
system.
- csky,mptimer is not only used in SMP system, it also could be used
- single core system. It's not a mmio reg and it use mtcr/mfcr instruction.
+ csky,mptimer is not only used in SMP system, it also could be used in
+ single core system. It's not a mmio reg and it uses mtcr/mfcr instruction.
config GX6605S_TIMER
bool "Gx6605s SOC system timer driver" if COMPILE_TEST
@@ -697,4 +697,14 @@ config INGENIC_TIMER
help
Support for the timer/counter unit of the Ingenic JZ SoCs.
+config MICROCHIP_PIT64B
+ bool "Microchip PIT64B support"
+ depends on OF || COMPILE_TEST
+ select CLKSRC_MMIO
+ help
+ This option enables Microchip PIT64B timer for Atmel
+ based system. It supports the oneshot, the periodic
+ modes and high resolution. It is used as a clocksource
+ and a clockevent.
+
endmenu
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 4dfe4225ece7..713686faa549 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -88,3 +88,4 @@ obj-$(CONFIG_RISCV_TIMER) += timer-riscv.o
obj-$(CONFIG_CSKY_MP_TIMER) += timer-mp-csky.o
obj-$(CONFIG_GX6605S_TIMER) += timer-gx6605s.o
obj-$(CONFIG_HYPERV_TIMER) += hyperv_timer.o
+obj-$(CONFIG_MICROCHIP_PIT64B) += timer-microchip-pit64b.o
diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c
index 2b196cbfadb6..b235f446ee50 100644
--- a/drivers/clocksource/bcm2835_timer.c
+++ b/drivers/clocksource/bcm2835_timer.c
@@ -121,7 +121,7 @@ static int __init bcm2835_timer_init(struct device_node *node)
ret = setup_irq(irq, &timer->act);
if (ret) {
pr_err("Can't set up timer IRQ\n");
- goto err_iounmap;
+ goto err_timer_free;
}
clockevents_config_and_register(&timer->evt, freq, 0xf, 0xffffffff);
@@ -130,6 +130,9 @@ static int __init bcm2835_timer_init(struct device_node *node)
return 0;
+err_timer_free:
+ kfree(timer);
+
err_iounmap:
iounmap(base);
return ret;
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
index 9039df4f90e2..ab190dffb1ed 100644
--- a/drivers/clocksource/em_sti.c
+++ b/drivers/clocksource/em_sti.c
@@ -279,9 +279,7 @@ static void em_sti_register_clockevent(struct em_sti_priv *p)
static int em_sti_probe(struct platform_device *pdev)
{
struct em_sti_priv *p;
- struct resource *res;
- int irq;
- int ret;
+ int irq, ret;
p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
if (p == NULL)
@@ -295,8 +293,7 @@ static int em_sti_probe(struct platform_device *pdev)
return irq;
/* map memory, let base point to the STI instance */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- p->base = devm_ioremap_resource(&pdev->dev, res);
+ p->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(p->base))
return PTR_ERR(p->base);
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 74cb299f5089..a267fe31ef13 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -4,7 +4,7 @@
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * EXYNOS4 MCT(Multi-Core Timer) support
+ * Exynos4 MCT(Multi-Core Timer) support
*/
#include <linux/interrupt.h>
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index 287d8d58c21a..9d808d595ca8 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -66,7 +66,7 @@ static int hv_ce_set_next_event(unsigned long delta,
{
u64 current_tick;
- current_tick = hyperv_cs->read(NULL);
+ current_tick = hv_read_reference_counter();
current_tick += delta;
hv_init_timer(0, current_tick);
return 0;
@@ -302,22 +302,33 @@ EXPORT_SYMBOL_GPL(hv_stimer_global_cleanup);
* the other that uses the TSC reference page feature as defined in the
* TLFS. The MSR version is for compatibility with old versions of
* Hyper-V and 32-bit x86. The TSC reference page version is preferred.
+ *
+ * The Hyper-V clocksource ratings of 250 are chosen to be below the
+ * TSC clocksource rating of 300. In configurations where Hyper-V offers
+ * an InvariantTSC, the TSC is not marked "unstable", so the TSC clocksource
+ * is available and preferred. With the higher rating, it will be the
+ * default. On older hardware and Hyper-V versions, the TSC is marked
+ * "unstable", so no TSC clocksource is created and the selected Hyper-V
+ * clocksource will be the default.
*/
-struct clocksource *hyperv_cs;
-EXPORT_SYMBOL_GPL(hyperv_cs);
+u64 (*hv_read_reference_counter)(void);
+EXPORT_SYMBOL_GPL(hv_read_reference_counter);
-static struct ms_hyperv_tsc_page tsc_pg __aligned(PAGE_SIZE);
+static union {
+ struct ms_hyperv_tsc_page page;
+ u8 reserved[PAGE_SIZE];
+} tsc_pg __aligned(PAGE_SIZE);
struct ms_hyperv_tsc_page *hv_get_tsc_page(void)
{
- return &tsc_pg;
+ return &tsc_pg.page;
}
EXPORT_SYMBOL_GPL(hv_get_tsc_page);
-static u64 notrace read_hv_clock_tsc(struct clocksource *arg)
+static u64 notrace read_hv_clock_tsc(void)
{
- u64 current_tick = hv_read_tsc_page(&tsc_pg);
+ u64 current_tick = hv_read_tsc_page(hv_get_tsc_page());
if (current_tick == U64_MAX)
hv_get_time_ref_count(current_tick);
@@ -325,20 +336,50 @@ static u64 notrace read_hv_clock_tsc(struct clocksource *arg)
return current_tick;
}
+static u64 notrace read_hv_clock_tsc_cs(struct clocksource *arg)
+{
+ return read_hv_clock_tsc();
+}
+
static u64 read_hv_sched_clock_tsc(void)
{
- return read_hv_clock_tsc(NULL) - hv_sched_clock_offset;
+ return read_hv_clock_tsc() - hv_sched_clock_offset;
+}
+
+static void suspend_hv_clock_tsc(struct clocksource *arg)
+{
+ u64 tsc_msr;
+
+ /* Disable the TSC page */
+ hv_get_reference_tsc(tsc_msr);
+ tsc_msr &= ~BIT_ULL(0);
+ hv_set_reference_tsc(tsc_msr);
+}
+
+
+static void resume_hv_clock_tsc(struct clocksource *arg)
+{
+ phys_addr_t phys_addr = virt_to_phys(&tsc_pg);
+ u64 tsc_msr;
+
+ /* Re-enable the TSC page */
+ hv_get_reference_tsc(tsc_msr);
+ tsc_msr &= GENMASK_ULL(11, 0);
+ tsc_msr |= BIT_ULL(0) | (u64)phys_addr;
+ hv_set_reference_tsc(tsc_msr);
}
static struct clocksource hyperv_cs_tsc = {
.name = "hyperv_clocksource_tsc_page",
- .rating = 400,
- .read = read_hv_clock_tsc,
+ .rating = 250,
+ .read = read_hv_clock_tsc_cs,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .suspend= suspend_hv_clock_tsc,
+ .resume = resume_hv_clock_tsc,
};
-static u64 notrace read_hv_clock_msr(struct clocksource *arg)
+static u64 notrace read_hv_clock_msr(void)
{
u64 current_tick;
/*
@@ -350,15 +391,20 @@ static u64 notrace read_hv_clock_msr(struct clocksource *arg)
return current_tick;
}
+static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg)
+{
+ return read_hv_clock_msr();
+}
+
static u64 read_hv_sched_clock_msr(void)
{
- return read_hv_clock_msr(NULL) - hv_sched_clock_offset;
+ return read_hv_clock_msr() - hv_sched_clock_offset;
}
static struct clocksource hyperv_cs_msr = {
.name = "hyperv_clocksource_msr",
- .rating = 400,
- .read = read_hv_clock_msr,
+ .rating = 250,
+ .read = read_hv_clock_msr_cs,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
@@ -371,8 +417,8 @@ static bool __init hv_init_tsc_clocksource(void)
if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE))
return false;
- hyperv_cs = &hyperv_cs_tsc;
- phys_addr = virt_to_phys(&tsc_pg);
+ hv_read_reference_counter = read_hv_clock_tsc;
+ phys_addr = virt_to_phys(hv_get_tsc_page());
/*
* The Hyper-V TLFS specifies to preserve the value of reserved
@@ -389,7 +435,7 @@ static bool __init hv_init_tsc_clocksource(void)
hv_set_clocksource_vdso(hyperv_cs_tsc);
clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
- hv_sched_clock_offset = hyperv_cs->read(hyperv_cs);
+ hv_sched_clock_offset = hv_read_reference_counter();
hv_setup_sched_clock(read_hv_sched_clock_tsc);
return true;
@@ -411,10 +457,10 @@ void __init hv_init_clocksource(void)
if (!(ms_hyperv.features & HV_MSR_TIME_REF_COUNT_AVAILABLE))
return;
- hyperv_cs = &hyperv_cs_msr;
+ hv_read_reference_counter = read_hv_clock_msr;
clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100);
- hv_sched_clock_offset = hyperv_cs->read(hyperv_cs);
+ hv_sched_clock_offset = hv_read_reference_counter();
hv_setup_sched_clock(read_hv_sched_clock_msr);
}
EXPORT_SYMBOL_GPL(hv_init_clocksource);
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 9cde50cb3220..12ac75f7571f 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -905,7 +905,7 @@ static int sh_cmt_map_memory(struct sh_cmt_device *cmt)
return -ENXIO;
}
- cmt->mapbase = ioremap_nocache(mem->start, resource_size(mem));
+ cmt->mapbase = ioremap(mem->start, resource_size(mem));
if (cmt->mapbase == NULL) {
dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n");
return -ENXIO;
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 64526e50d471..bfccb31e94ad 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -377,7 +377,7 @@ static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu)
return -ENXIO;
}
- mtu->mapbase = ioremap_nocache(res->start, resource_size(res));
+ mtu->mapbase = ioremap(res->start, resource_size(res));
if (mtu->mapbase == NULL)
return -ENXIO;
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index d49690d15536..d41df9ba3725 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -486,7 +486,7 @@ static int sh_tmu_map_memory(struct sh_tmu_device *tmu)
return -ENXIO;
}
- tmu->mapbase = ioremap_nocache(res->start, resource_size(res));
+ tmu->mapbase = ioremap(res->start, resource_size(res));
if (tmu->mapbase == NULL)
return -ENXIO;
diff --git a/drivers/clocksource/timer-cadence-ttc.c b/drivers/clocksource/timer-cadence-ttc.c
index 88fe2e9ba9a3..38858e141731 100644
--- a/drivers/clocksource/timer-cadence-ttc.c
+++ b/drivers/clocksource/timer-cadence-ttc.c
@@ -15,6 +15,8 @@
#include <linux/of_irq.h>
#include <linux/slab.h>
#include <linux/sched_clock.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
/*
* This driver configures the 2 16/32-bit count-up timers as follows:
@@ -464,13 +466,7 @@ static int __init ttc_setup_clockevent(struct clk *clk,
return 0;
}
-/**
- * ttc_timer_init - Initialize the timer
- *
- * Initializes the timer hardware and register the clock source and clock event
- * timers with Linux kernal timer framework
- */
-static int __init ttc_timer_init(struct device_node *timer)
+static int __init ttc_timer_probe(struct platform_device *pdev)
{
unsigned int irq;
void __iomem *timer_baseaddr;
@@ -478,6 +474,7 @@ static int __init ttc_timer_init(struct device_node *timer)
static int initialized;
int clksel, ret;
u32 timer_width = 16;
+ struct device_node *timer = pdev->dev.of_node;
if (initialized)
return 0;
@@ -532,4 +529,17 @@ static int __init ttc_timer_init(struct device_node *timer)
return 0;
}
-TIMER_OF_DECLARE(ttc, "cdns,ttc", ttc_timer_init);
+static const struct of_device_id ttc_timer_of_match[] = {
+ {.compatible = "cdns,ttc"},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, ttc_timer_of_match);
+
+static struct platform_driver ttc_timer_driver = {
+ .driver = {
+ .name = "cdns_ttc_timer",
+ .of_match_table = ttc_timer_of_match,
+ },
+};
+builtin_platform_driver_probe(ttc_timer_driver, ttc_timer_probe);
diff --git a/drivers/clocksource/timer-davinci.c b/drivers/clocksource/timer-davinci.c
index 62745c962049..e421946a91c5 100644
--- a/drivers/clocksource/timer-davinci.c
+++ b/drivers/clocksource/timer-davinci.c
@@ -302,10 +302,6 @@ int __init davinci_timer_register(struct clk *clk,
return rv;
}
- clockevents_config_and_register(&clockevent->dev, tick_rate,
- DAVINCI_TIMER_MIN_DELTA,
- DAVINCI_TIMER_MAX_DELTA);
-
davinci_clocksource.dev.rating = 300;
davinci_clocksource.dev.read = davinci_clocksource_read;
davinci_clocksource.dev.mask =
@@ -323,6 +319,10 @@ int __init davinci_timer_register(struct clk *clk,
davinci_clocksource_init_tim34(base);
}
+ clockevents_config_and_register(&clockevent->dev, tick_rate,
+ DAVINCI_TIMER_MIN_DELTA,
+ DAVINCI_TIMER_MAX_DELTA);
+
rv = clocksource_register_hz(&davinci_clocksource.dev, tick_rate);
if (rv) {
pr_err("Unable to register clocksource");
diff --git a/drivers/clocksource/timer-microchip-pit64b.c b/drivers/clocksource/timer-microchip-pit64b.c
new file mode 100644
index 000000000000..bd63d3484838
--- /dev/null
+++ b/drivers/clocksource/timer-microchip-pit64b.c
@@ -0,0 +1,451 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * 64-bit Periodic Interval Timer driver
+ *
+ * Copyright (C) 2019 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Claudiu Beznea <claudiu.beznea@microchip.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+
+#define MCHP_PIT64B_CR 0x00 /* Control Register */
+#define MCHP_PIT64B_CR_START BIT(0)
+#define MCHP_PIT64B_CR_SWRST BIT(8)
+
+#define MCHP_PIT64B_MR 0x04 /* Mode Register */
+#define MCHP_PIT64B_MR_CONT BIT(0)
+#define MCHP_PIT64B_MR_ONE_SHOT (0)
+#define MCHP_PIT64B_MR_SGCLK BIT(3)
+#define MCHP_PIT64B_MR_PRES GENMASK(11, 8)
+
+#define MCHP_PIT64B_LSB_PR 0x08 /* LSB Period Register */
+
+#define MCHP_PIT64B_MSB_PR 0x0C /* MSB Period Register */
+
+#define MCHP_PIT64B_IER 0x10 /* Interrupt Enable Register */
+#define MCHP_PIT64B_IER_PERIOD BIT(0)
+
+#define MCHP_PIT64B_ISR 0x1C /* Interrupt Status Register */
+
+#define MCHP_PIT64B_TLSBR 0x20 /* Timer LSB Register */
+
+#define MCHP_PIT64B_TMSBR 0x24 /* Timer MSB Register */
+
+#define MCHP_PIT64B_PRES_MAX 0x10
+#define MCHP_PIT64B_LSBMASK GENMASK_ULL(31, 0)
+#define MCHP_PIT64B_PRES_TO_MODE(p) (MCHP_PIT64B_MR_PRES & ((p) << 8))
+#define MCHP_PIT64B_MODE_TO_PRES(m) ((MCHP_PIT64B_MR_PRES & (m)) >> 8)
+#define MCHP_PIT64B_DEF_CS_FREQ 5000000UL /* 5 MHz */
+#define MCHP_PIT64B_DEF_CE_FREQ 32768 /* 32 KHz */
+
+#define MCHP_PIT64B_NAME "pit64b"
+
+/**
+ * struct mchp_pit64b_timer - PIT64B timer data structure
+ * @base: base address of PIT64B hardware block
+ * @pclk: PIT64B's peripheral clock
+ * @gclk: PIT64B's generic clock
+ * @mode: precomputed value for mode register
+ */
+struct mchp_pit64b_timer {
+ void __iomem *base;
+ struct clk *pclk;
+ struct clk *gclk;
+ u32 mode;
+};
+
+/**
+ * mchp_pit64b_clkevt - PIT64B clockevent data structure
+ * @timer: PIT64B timer
+ * @clkevt: clockevent
+ */
+struct mchp_pit64b_clkevt {
+ struct mchp_pit64b_timer timer;
+ struct clock_event_device clkevt;
+};
+
+#define to_mchp_pit64b_timer(x) \
+ ((struct mchp_pit64b_timer *)container_of(x,\
+ struct mchp_pit64b_clkevt, clkevt))
+
+/* Base address for clocksource timer. */
+static void __iomem *mchp_pit64b_cs_base;
+/* Default cycles for clockevent timer. */
+static u64 mchp_pit64b_ce_cycles;
+
+static inline u64 mchp_pit64b_cnt_read(void __iomem *base)
+{
+ unsigned long flags;
+ u32 low, high;
+
+ raw_local_irq_save(flags);
+
+ /*
+ * When using a 64 bit period TLSB must be read first, followed by the
+ * read of TMSB. This sequence generates an atomic read of the 64 bit
+ * timer value whatever the lapse of time between the accesses.
+ */
+ low = readl_relaxed(base + MCHP_PIT64B_TLSBR);
+ high = readl_relaxed(base + MCHP_PIT64B_TMSBR);
+
+ raw_local_irq_restore(flags);
+
+ return (((u64)high << 32) | low);
+}
+
+static inline void mchp_pit64b_reset(struct mchp_pit64b_timer *timer,
+ u64 cycles, u32 mode, u32 irqs)
+{
+ u32 low, high;
+
+ low = cycles & MCHP_PIT64B_LSBMASK;
+ high = cycles >> 32;
+
+ writel_relaxed(MCHP_PIT64B_CR_SWRST, timer->base + MCHP_PIT64B_CR);
+ writel_relaxed(mode | timer->mode, timer->base + MCHP_PIT64B_MR);
+ writel_relaxed(high, timer->base + MCHP_PIT64B_MSB_PR);
+ writel_relaxed(low, timer->base + MCHP_PIT64B_LSB_PR);
+ writel_relaxed(irqs, timer->base + MCHP_PIT64B_IER);
+ writel_relaxed(MCHP_PIT64B_CR_START, timer->base + MCHP_PIT64B_CR);
+}
+
+static u64 mchp_pit64b_clksrc_read(struct clocksource *cs)
+{
+ return mchp_pit64b_cnt_read(mchp_pit64b_cs_base);
+}
+
+static u64 mchp_pit64b_sched_read_clk(void)
+{
+ return mchp_pit64b_cnt_read(mchp_pit64b_cs_base);
+}
+
+static int mchp_pit64b_clkevt_shutdown(struct clock_event_device *cedev)
+{
+ struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev);
+
+ writel_relaxed(MCHP_PIT64B_CR_SWRST, timer->base + MCHP_PIT64B_CR);
+
+ return 0;
+}
+
+static int mchp_pit64b_clkevt_set_periodic(struct clock_event_device *cedev)
+{
+ struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev);
+
+ mchp_pit64b_reset(timer, mchp_pit64b_ce_cycles, MCHP_PIT64B_MR_CONT,
+ MCHP_PIT64B_IER_PERIOD);
+
+ return 0;
+}
+
+static int mchp_pit64b_clkevt_set_next_event(unsigned long evt,
+ struct clock_event_device *cedev)
+{
+ struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev);
+
+ mchp_pit64b_reset(timer, evt, MCHP_PIT64B_MR_ONE_SHOT,
+ MCHP_PIT64B_IER_PERIOD);
+
+ return 0;
+}
+
+static void mchp_pit64b_clkevt_suspend(struct clock_event_device *cedev)
+{
+ struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev);
+
+ writel_relaxed(MCHP_PIT64B_CR_SWRST, timer->base + MCHP_PIT64B_CR);
+ if (timer->mode & MCHP_PIT64B_MR_SGCLK)
+ clk_disable_unprepare(timer->gclk);
+ clk_disable_unprepare(timer->pclk);
+}
+
+static void mchp_pit64b_clkevt_resume(struct clock_event_device *cedev)
+{
+ struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev);
+
+ clk_prepare_enable(timer->pclk);
+ if (timer->mode & MCHP_PIT64B_MR_SGCLK)
+ clk_prepare_enable(timer->gclk);
+}
+
+static irqreturn_t mchp_pit64b_interrupt(int irq, void *dev_id)
+{
+ struct mchp_pit64b_clkevt *irq_data = dev_id;
+
+ /* Need to clear the interrupt. */
+ readl_relaxed(irq_data->timer.base + MCHP_PIT64B_ISR);
+
+ irq_data->clkevt.event_handler(&irq_data->clkevt);
+
+ return IRQ_HANDLED;
+}
+
+static void __init mchp_pit64b_pres_compute(u32 *pres, u32 clk_rate,
+ u32 max_rate)
+{
+ u32 tmp;
+
+ for (*pres = 0; *pres < MCHP_PIT64B_PRES_MAX; (*pres)++) {
+ tmp = clk_rate / (*pres + 1);
+ if (tmp <= max_rate)
+ break;
+ }
+
+ /* Use the bigest prescaler if we didn't match one. */
+ if (*pres == MCHP_PIT64B_PRES_MAX)
+ *pres = MCHP_PIT64B_PRES_MAX - 1;
+}
+
+/**
+ * mchp_pit64b_init_mode - prepare PIT64B mode register value to be used at
+ * runtime; this includes prescaler and SGCLK bit
+ *
+ * PIT64B timer may be fed by gclk or pclk. When gclk is used its rate has to
+ * be at least 3 times lower that pclk's rate. pclk rate is fixed, gclk rate
+ * could be changed via clock APIs. The chosen clock (pclk or gclk) could be
+ * divided by the internal PIT64B's divider.
+ *
+ * This function, first tries to use GCLK by requesting the desired rate from
+ * PMC and then using the internal PIT64B prescaler, if any, to reach the
+ * requested rate. If PCLK/GCLK < 3 (condition requested by PIT64B hardware)
+ * then the function falls back on using PCLK as clock source for PIT64B timer
+ * choosing the highest prescaler in case it doesn't locate one to match the
+ * requested frequency.
+ *
+ * Below is presented the PIT64B block in relation with PMC:
+ *
+ * PIT64B
+ * PMC +------------------------------------+
+ * +----+ | +-----+ |
+ * | |-->gclk -->|-->| | +---------+ +-----+ |
+ * | | | | MUX |--->| Divider |->|timer| |
+ * | |-->pclk -->|-->| | +---------+ +-----+ |
+ * +----+ | +-----+ |
+ * | ^ |
+ * | sel |
+ * +------------------------------------+
+ *
+ * Where:
+ * - gclk rate <= pclk rate/3
+ * - gclk rate could be requested from PMC
+ * - pclk rate is fixed (cannot be requested from PMC)
+ */
+static int __init mchp_pit64b_init_mode(struct mchp_pit64b_timer *timer,
+ unsigned long max_rate)
+{
+ unsigned long pclk_rate, diff = 0, best_diff = ULONG_MAX;
+ long gclk_round = 0;
+ u32 pres, best_pres = 0;
+
+ pclk_rate = clk_get_rate(timer->pclk);
+ if (!pclk_rate)
+ return -EINVAL;
+
+ timer->mode = 0;
+
+ /* Try using GCLK. */
+ gclk_round = clk_round_rate(timer->gclk, max_rate);
+ if (gclk_round < 0)
+ goto pclk;
+
+ if (pclk_rate / gclk_round < 3)
+ goto pclk;
+
+ mchp_pit64b_pres_compute(&pres, gclk_round, max_rate);
+ best_diff = abs(gclk_round / (pres + 1) - max_rate);
+ best_pres = pres;
+
+ if (!best_diff) {
+ timer->mode |= MCHP_PIT64B_MR_SGCLK;
+ goto done;
+ }
+
+pclk:
+ /* Check if requested rate could be obtained using PCLK. */
+ mchp_pit64b_pres_compute(&pres, pclk_rate, max_rate);
+ diff = abs(pclk_rate / (pres + 1) - max_rate);
+
+ if (best_diff > diff) {
+ /* Use PCLK. */
+ best_pres = pres;
+ } else {
+ /* Use GCLK. */
+ timer->mode |= MCHP_PIT64B_MR_SGCLK;
+ clk_set_rate(timer->gclk, gclk_round);
+ }
+
+done:
+ timer->mode |= MCHP_PIT64B_PRES_TO_MODE(best_pres);
+
+ pr_info("PIT64B: using clk=%s with prescaler %u, freq=%lu [Hz]\n",
+ timer->mode & MCHP_PIT64B_MR_SGCLK ? "gclk" : "pclk", best_pres,
+ timer->mode & MCHP_PIT64B_MR_SGCLK ?
+ gclk_round / (best_pres + 1) : pclk_rate / (best_pres + 1));
+
+ return 0;
+}
+
+static int __init mchp_pit64b_init_clksrc(struct mchp_pit64b_timer *timer,
+ u32 clk_rate)
+{
+ int ret;
+
+ mchp_pit64b_reset(timer, ULLONG_MAX, MCHP_PIT64B_MR_CONT, 0);
+
+ mchp_pit64b_cs_base = timer->base;
+
+ ret = clocksource_mmio_init(timer->base, MCHP_PIT64B_NAME, clk_rate,
+ 210, 64, mchp_pit64b_clksrc_read);
+ if (ret) {
+ pr_debug("clksrc: Failed to register PIT64B clocksource!\n");
+
+ /* Stop timer. */
+ writel_relaxed(MCHP_PIT64B_CR_SWRST,
+ timer->base + MCHP_PIT64B_CR);
+
+ return ret;
+ }
+
+ sched_clock_register(mchp_pit64b_sched_read_clk, 64, clk_rate);
+
+ return 0;
+}
+
+static int __init mchp_pit64b_init_clkevt(struct mchp_pit64b_timer *timer,
+ u32 clk_rate, u32 irq)
+{
+ struct mchp_pit64b_clkevt *ce;
+ int ret;
+
+ ce = kzalloc(sizeof(*ce), GFP_KERNEL);
+ if (!ce)
+ return -ENOMEM;
+
+ mchp_pit64b_ce_cycles = DIV_ROUND_CLOSEST(clk_rate, HZ);
+
+ ce->timer.base = timer->base;
+ ce->timer.pclk = timer->pclk;
+ ce->timer.gclk = timer->gclk;
+ ce->timer.mode = timer->mode;
+ ce->clkevt.name = MCHP_PIT64B_NAME;
+ ce->clkevt.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC;
+ ce->clkevt.rating = 150;
+ ce->clkevt.set_state_shutdown = mchp_pit64b_clkevt_shutdown;
+ ce->clkevt.set_state_periodic = mchp_pit64b_clkevt_set_periodic;
+ ce->clkevt.set_next_event = mchp_pit64b_clkevt_set_next_event;
+ ce->clkevt.suspend = mchp_pit64b_clkevt_suspend;
+ ce->clkevt.resume = mchp_pit64b_clkevt_resume;
+ ce->clkevt.cpumask = cpumask_of(0);
+ ce->clkevt.irq = irq;
+
+ ret = request_irq(irq, mchp_pit64b_interrupt, IRQF_TIMER,
+ "pit64b_tick", ce);
+ if (ret) {
+ pr_debug("clkevt: Failed to setup PIT64B IRQ\n");
+ kfree(ce);
+ return ret;
+ }
+
+ clockevents_config_and_register(&ce->clkevt, clk_rate, 1, ULONG_MAX);
+
+ return 0;
+}
+
+static int __init mchp_pit64b_dt_init_timer(struct device_node *node,
+ bool clkevt)
+{
+ u32 freq = clkevt ? MCHP_PIT64B_DEF_CE_FREQ : MCHP_PIT64B_DEF_CS_FREQ;
+ struct mchp_pit64b_timer timer;
+ unsigned long clk_rate;
+ u32 irq = 0;
+ int ret;
+
+ /* Parse DT node. */
+ timer.pclk = of_clk_get_by_name(node, "pclk");
+ if (IS_ERR(timer.pclk))
+ return PTR_ERR(timer.pclk);
+
+ timer.gclk = of_clk_get_by_name(node, "gclk");
+ if (IS_ERR(timer.gclk))
+ return PTR_ERR(timer.gclk);
+
+ timer.base = of_iomap(node, 0);
+ if (!timer.base)
+ return -ENXIO;
+
+ if (clkevt) {
+ irq = irq_of_parse_and_map(node, 0);
+ if (!irq) {
+ ret = -ENODEV;
+ goto io_unmap;
+ }
+ }
+
+ /* Initialize mode (prescaler + SGCK bit). To be used at runtime. */
+ ret = mchp_pit64b_init_mode(&timer, freq);
+ if (ret)
+ goto irq_unmap;
+
+ ret = clk_prepare_enable(timer.pclk);
+ if (ret)
+ goto irq_unmap;
+
+ if (timer.mode & MCHP_PIT64B_MR_SGCLK) {
+ ret = clk_prepare_enable(timer.gclk);
+ if (ret)
+ goto pclk_unprepare;
+
+ clk_rate = clk_get_rate(timer.gclk);
+ } else {
+ clk_rate = clk_get_rate(timer.pclk);
+ }
+ clk_rate = clk_rate / (MCHP_PIT64B_MODE_TO_PRES(timer.mode) + 1);
+
+ if (clkevt)
+ ret = mchp_pit64b_init_clkevt(&timer, clk_rate, irq);
+ else
+ ret = mchp_pit64b_init_clksrc(&timer, clk_rate);
+
+ if (ret)
+ goto gclk_unprepare;
+
+ return 0;
+
+gclk_unprepare:
+ if (timer.mode & MCHP_PIT64B_MR_SGCLK)
+ clk_disable_unprepare(timer.gclk);
+pclk_unprepare:
+ clk_disable_unprepare(timer.pclk);
+irq_unmap:
+ irq_dispose_mapping(irq);
+io_unmap:
+ iounmap(timer.base);
+
+ return ret;
+}
+
+static int __init mchp_pit64b_dt_init(struct device_node *node)
+{
+ static int inits;
+
+ switch (inits++) {
+ case 0:
+ /* 1st request, register clockevent. */
+ return mchp_pit64b_dt_init_timer(node, true);
+ case 1:
+ /* 2nd request, register clocksource. */
+ return mchp_pit64b_dt_init_timer(node, false);
+ }
+
+ /* The rest, don't care. */
+ return -EINVAL;
+}
+
+TIMER_OF_DECLARE(mchp_pit64b, "microchip,sam9x60-pit64b", mchp_pit64b_dt_init);
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index 4e54856ce2a5..c4f15c4068c0 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -56,7 +56,7 @@ static unsigned long long riscv_clocksource_rdtime(struct clocksource *cs)
return get_cycles64();
}
-static u64 riscv_sched_clock(void)
+static u64 notrace riscv_sched_clock(void)
{
return get_cycles64();
}
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index 5394d9dbdfbc..269a994d6a99 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -780,7 +780,6 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
{
unsigned long flags;
struct omap_dm_timer *timer;
- struct resource *mem, *irq;
struct device *dev = &pdev->dev;
const struct dmtimer_platform_data *pdata;
int ret;
@@ -796,24 +795,16 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
return -ENODEV;
}
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (unlikely(!irq)) {
- dev_err(dev, "%s: no IRQ resource.\n", __func__);
- return -ENODEV;
- }
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (unlikely(!mem)) {
- dev_err(dev, "%s: no memory resource.\n", __func__);
- return -ENODEV;
- }
-
timer = devm_kzalloc(dev, sizeof(*timer), GFP_KERNEL);
if (!timer)
return -ENOMEM;
+ timer->irq = platform_get_irq(pdev, 0);
+ if (timer->irq < 0)
+ return timer->irq;
+
timer->fclk = ERR_PTR(-ENODEV);
- timer->io_base = devm_ioremap_resource(dev, mem);
+ timer->io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(timer->io_base))
return PTR_ERR(timer->io_base);
@@ -836,7 +827,6 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
if (pdata)
timer->errata = pdata->timer_errata;
- timer->irq = irq->start;
timer->pdev = pdev;
pm_runtime_enable(dev);
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 77b0e5d0fb13..4f86ce2db34f 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -455,6 +455,8 @@ static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct private_data *priv = policy->driver_data;
+ cpufreq_cpu_put(policy);
+
return brcm_avs_get_frequency(priv->base);
}
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 8d8da763adc5..bda0b2406fba 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -39,7 +39,7 @@
static struct cppc_cpudata **all_cpu_data;
struct cppc_workaround_oem_info {
- char oem_id[ACPI_OEM_ID_SIZE +1];
+ char oem_id[ACPI_OEM_ID_SIZE + 1];
char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
u32 oem_revision;
};
@@ -93,9 +93,13 @@ static void cppc_check_hisi_workaround(void)
for (i = 0; i < ARRAY_SIZE(wa_info); i++) {
if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) &&
!memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
- wa_info[i].oem_revision == tbl->oem_revision)
+ wa_info[i].oem_revision == tbl->oem_revision) {
apply_hisi_workaround = true;
+ break;
+ }
}
+
+ acpi_put_table(tbl);
}
/* Callback function used to retrieve the max frequency from DMI */
@@ -217,7 +221,7 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
return ret;
}
-static int cppc_verify_policy(struct cpufreq_policy *policy)
+static int cppc_verify_policy(struct cpufreq_policy_data *policy)
{
cpufreq_verify_within_cpu_limits(policy);
return 0;
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index f1d170dcf4d3..f2ae9cd455c1 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -109,6 +109,7 @@ static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "fsl,imx8mq", },
{ .compatible = "fsl,imx8mm", },
{ .compatible = "fsl,imx8mn", },
+ { .compatible = "fsl,imx8mp", },
{ .compatible = "marvell,armadaxp", },
@@ -121,6 +122,8 @@ static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "mediatek,mt8176", },
{ .compatible = "mediatek,mt8183", },
+ { .compatible = "nvidia,tegra20", },
+ { .compatible = "nvidia,tegra30", },
{ .compatible = "nvidia,tegra124", },
{ .compatible = "nvidia,tegra210", },
diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c
index cd53272e2fa2..f7a7bcf6f52e 100644
--- a/drivers/cpufreq/cpufreq-nforce2.c
+++ b/drivers/cpufreq/cpufreq-nforce2.c
@@ -291,7 +291,7 @@ static int nforce2_target(struct cpufreq_policy *policy,
* nforce2_verify - verifies a new CPUFreq policy
* @policy: new policy
*/
-static int nforce2_verify(struct cpufreq_policy *policy)
+static int nforce2_verify(struct cpufreq_policy_data *policy)
{
unsigned int fsb_pol_max;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 77114a3897fb..cbe6c94bf158 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -74,6 +74,9 @@ static void cpufreq_exit_governor(struct cpufreq_policy *policy);
static int cpufreq_start_governor(struct cpufreq_policy *policy);
static void cpufreq_stop_governor(struct cpufreq_policy *policy);
static void cpufreq_governor_limits(struct cpufreq_policy *policy);
+static int cpufreq_set_policy(struct cpufreq_policy *policy,
+ struct cpufreq_governor *new_gov,
+ unsigned int new_pol);
/**
* Two notifier lists: the "policy" list is involved in the
@@ -102,6 +105,8 @@ bool have_governor_per_policy(void)
}
EXPORT_SYMBOL_GPL(have_governor_per_policy);
+static struct kobject *cpufreq_global_kobject;
+
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
{
if (have_governor_per_policy())
@@ -616,25 +621,22 @@ static struct cpufreq_governor *find_governor(const char *str_governor)
return NULL;
}
-static int cpufreq_parse_policy(char *str_governor,
- struct cpufreq_policy *policy)
+static unsigned int cpufreq_parse_policy(char *str_governor)
{
- if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
- policy->policy = CPUFREQ_POLICY_PERFORMANCE;
- return 0;
- }
- if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) {
- policy->policy = CPUFREQ_POLICY_POWERSAVE;
- return 0;
- }
- return -EINVAL;
+ if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
+ return CPUFREQ_POLICY_PERFORMANCE;
+
+ if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
+ return CPUFREQ_POLICY_POWERSAVE;
+
+ return CPUFREQ_POLICY_UNKNOWN;
}
/**
* cpufreq_parse_governor - parse a governor string only for has_target()
+ * @str_governor: Governor name.
*/
-static int cpufreq_parse_governor(char *str_governor,
- struct cpufreq_policy *policy)
+static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
{
struct cpufreq_governor *t;
@@ -648,7 +650,7 @@ static int cpufreq_parse_governor(char *str_governor,
ret = request_module("cpufreq_%s", str_governor);
if (ret)
- return -EINVAL;
+ return NULL;
mutex_lock(&cpufreq_governor_mutex);
@@ -659,12 +661,7 @@ static int cpufreq_parse_governor(char *str_governor,
mutex_unlock(&cpufreq_governor_mutex);
- if (t) {
- policy->governor = t;
- return 0;
- }
-
- return -EINVAL;
+ return t;
}
/**
@@ -765,28 +762,33 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
+ char str_governor[16];
int ret;
- char str_governor[16];
- struct cpufreq_policy new_policy;
-
- memcpy(&new_policy, policy, sizeof(*policy));
ret = sscanf(buf, "%15s", str_governor);
if (ret != 1)
return -EINVAL;
if (cpufreq_driver->setpolicy) {
- if (cpufreq_parse_policy(str_governor, &new_policy))
+ unsigned int new_pol;
+
+ new_pol = cpufreq_parse_policy(str_governor);
+ if (!new_pol)
return -EINVAL;
+
+ ret = cpufreq_set_policy(policy, NULL, new_pol);
} else {
- if (cpufreq_parse_governor(str_governor, &new_policy))
+ struct cpufreq_governor *new_gov;
+
+ new_gov = cpufreq_parse_governor(str_governor);
+ if (!new_gov)
return -EINVAL;
- }
- ret = cpufreq_set_policy(policy, &new_policy);
+ ret = cpufreq_set_policy(policy, new_gov,
+ CPUFREQ_POLICY_UNKNOWN);
- if (new_policy.governor)
- module_put(new_policy.governor->owner);
+ module_put(new_gov->owner);
+ }
return ret ? ret : count;
}
@@ -1053,40 +1055,33 @@ __weak struct cpufreq_governor *cpufreq_default_governor(void)
static int cpufreq_init_policy(struct cpufreq_policy *policy)
{
- struct cpufreq_governor *gov = NULL, *def_gov = NULL;
- struct cpufreq_policy new_policy;
-
- memcpy(&new_policy, policy, sizeof(*policy));
-
- def_gov = cpufreq_default_governor();
+ struct cpufreq_governor *def_gov = cpufreq_default_governor();
+ struct cpufreq_governor *gov = NULL;
+ unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
if (has_target()) {
- /*
- * Update governor of new_policy to the governor used before
- * hotplug
- */
+ /* Update policy governor to the one used before hotplug. */
gov = find_governor(policy->last_governor);
if (gov) {
pr_debug("Restoring governor %s for cpu %d\n",
- policy->governor->name, policy->cpu);
- } else {
- if (!def_gov)
- return -ENODATA;
+ policy->governor->name, policy->cpu);
+ } else if (def_gov) {
gov = def_gov;
+ } else {
+ return -ENODATA;
}
- new_policy.governor = gov;
} else {
/* Use the default policy if there is no last_policy. */
if (policy->last_policy) {
- new_policy.policy = policy->last_policy;
+ pol = policy->last_policy;
+ } else if (def_gov) {
+ pol = cpufreq_parse_policy(def_gov->name);
} else {
- if (!def_gov)
- return -ENODATA;
- cpufreq_parse_policy(def_gov->name, &new_policy);
+ return -ENODATA;
}
}
- return cpufreq_set_policy(policy, &new_policy);
+ return cpufreq_set_policy(policy, gov, pol);
}
static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
@@ -1114,13 +1109,10 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp
void refresh_frequency_limits(struct cpufreq_policy *policy)
{
- struct cpufreq_policy new_policy;
-
if (!policy_is_inactive(policy)) {
- new_policy = *policy;
pr_debug("updating policy for CPU %u\n", policy->cpu);
- cpufreq_set_policy(policy, &new_policy);
+ cpufreq_set_policy(policy, policy->governor, policy->policy);
}
}
EXPORT_SYMBOL(refresh_frequency_limits);
@@ -2364,46 +2356,49 @@ EXPORT_SYMBOL(cpufreq_get_policy);
/**
* cpufreq_set_policy - Modify cpufreq policy parameters.
* @policy: Policy object to modify.
- * @new_policy: New policy data.
+ * @new_gov: Policy governor pointer.
+ * @new_pol: Policy value (for drivers with built-in governors).
*
- * Pass @new_policy to the cpufreq driver's ->verify() callback. Next, copy the
- * min and max parameters of @new_policy to @policy and either invoke the
- * driver's ->setpolicy() callback (if present) or carry out a governor update
- * for @policy. That is, run the current governor's ->limits() callback (if the
- * governor field in @new_policy points to the same object as the one in
- * @policy) or replace the governor for @policy with the new one stored in
- * @new_policy.
+ * Invoke the cpufreq driver's ->verify() callback to sanity-check the frequency
+ * limits to be set for the policy, update @policy with the verified limits
+ * values and either invoke the driver's ->setpolicy() callback (if present) or
+ * carry out a governor update for @policy. That is, run the current governor's
+ * ->limits() callback (if @new_gov points to the same object as the one in
+ * @policy) or replace the governor for @policy with @new_gov.
*
* The cpuinfo part of @policy is not updated by this function.
*/
-int cpufreq_set_policy(struct cpufreq_policy *policy,
- struct cpufreq_policy *new_policy)
+static int cpufreq_set_policy(struct cpufreq_policy *policy,
+ struct cpufreq_governor *new_gov,
+ unsigned int new_pol)
{
+ struct cpufreq_policy_data new_data;
struct cpufreq_governor *old_gov;
int ret;
- pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
- new_policy->cpu, new_policy->min, new_policy->max);
-
- memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
-
+ memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
+ new_data.freq_table = policy->freq_table;
+ new_data.cpu = policy->cpu;
/*
* PM QoS framework collects all the requests from users and provide us
* the final aggregated value here.
*/
- new_policy->min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
- new_policy->max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
+ new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
+ new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
+
+ pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
+ new_data.cpu, new_data.min, new_data.max);
/*
* Verify that the CPU speed can be set within these limits and make sure
* that min <= max.
*/
- ret = cpufreq_driver->verify(new_policy);
+ ret = cpufreq_driver->verify(&new_data);
if (ret)
return ret;
- policy->min = new_policy->min;
- policy->max = new_policy->max;
+ policy->min = new_data.min;
+ policy->max = new_data.max;
trace_cpu_frequency_limits(policy);
policy->cached_target_freq = UINT_MAX;
@@ -2412,12 +2407,12 @@ int cpufreq_set_policy(struct cpufreq_policy *policy,
policy->min, policy->max);
if (cpufreq_driver->setpolicy) {
- policy->policy = new_policy->policy;
+ policy->policy = new_pol;
pr_debug("setting range\n");
return cpufreq_driver->setpolicy(policy);
}
- if (new_policy->governor == policy->governor) {
+ if (new_gov == policy->governor) {
pr_debug("governor limits update\n");
cpufreq_governor_limits(policy);
return 0;
@@ -2434,7 +2429,7 @@ int cpufreq_set_policy(struct cpufreq_policy *policy,
}
/* start new governor */
- policy->governor = new_policy->governor;
+ policy->governor = new_gov;
ret = cpufreq_init_governor(policy);
if (!ret) {
ret = cpufreq_start_governor(policy);
@@ -2752,9 +2747,6 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
}
EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
-struct kobject *cpufreq_global_kobject;
-EXPORT_SYMBOL(cpufreq_global_kobject);
-
static int __init cpufreq_core_init(void)
{
if (cpufreq_disabled())
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index ded427e0a488..e117b0059123 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -60,7 +60,7 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
return 0;
}
-int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
+int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
struct cpufreq_frequency_table *table)
{
struct cpufreq_frequency_table *pos;
@@ -100,7 +100,7 @@ EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify);
* Generic routine to verify policy & frequency table, requires driver to set
* policy->freq_table prior to it.
*/
-int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy)
+int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy)
{
if (!policy->freq_table)
return -ENODEV;
diff --git a/drivers/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c
index e97b5733aa24..75b3ef7ec679 100644
--- a/drivers/cpufreq/gx-suspmod.c
+++ b/drivers/cpufreq/gx-suspmod.c
@@ -328,7 +328,7 @@ static void gx_set_cpuspeed(struct cpufreq_policy *policy, unsigned int khz)
* for the hardware supported by the driver.
*/
-static int cpufreq_gx_verify(struct cpufreq_policy *policy)
+static int cpufreq_gx_verify(struct cpufreq_policy_data *policy)
{
unsigned int tmp_freq = 0;
u8 tmp1, tmp2;
diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c
index 85a6efd6b68f..6cb8193421ea 100644
--- a/drivers/cpufreq/imx-cpufreq-dt.c
+++ b/drivers/cpufreq/imx-cpufreq-dt.c
@@ -35,7 +35,8 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
if (ret)
return ret;
- if (of_machine_is_compatible("fsl,imx8mn"))
+ if (of_machine_is_compatible("fsl,imx8mn") ||
+ of_machine_is_compatible("fsl,imx8mp"))
speed_grade = (cell_value & IMX8MN_OCOTP_CFG3_SPEED_GRADE_MASK)
>> OCOTP_CFG3_SPEED_GRADE_SHIFT;
else
@@ -54,7 +55,8 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
if (of_machine_is_compatible("fsl,imx8mm") ||
of_machine_is_compatible("fsl,imx8mq"))
speed_grade = 1;
- if (of_machine_is_compatible("fsl,imx8mn"))
+ if (of_machine_is_compatible("fsl,imx8mn") ||
+ of_machine_is_compatible("fsl,imx8mp"))
speed_grade = 0xb;
}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index d2fa3e9ccd97..c81e1ff29069 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -172,7 +172,7 @@ struct vid_data {
/**
* struct global_params - Global parameters, mostly tunable via sysfs.
* @no_turbo: Whether or not to use turbo P-states.
- * @turbo_disabled: Whethet or not turbo P-states are available at all,
+ * @turbo_disabled: Whether or not turbo P-states are available at all,
* based on the MSR_IA32_MISC_ENABLE value and whether or
* not the maximum reported turbo P-state is different from
* the maximum reported non-turbo one.
@@ -2036,8 +2036,9 @@ static int intel_pstate_get_max_freq(struct cpudata *cpu)
cpu->pstate.max_freq : cpu->pstate.turbo_freq;
}
-static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
- struct cpudata *cpu)
+static void intel_pstate_update_perf_limits(struct cpudata *cpu,
+ unsigned int policy_min,
+ unsigned int policy_max)
{
int max_freq = intel_pstate_get_max_freq(cpu);
int32_t max_policy_perf, min_policy_perf;
@@ -2056,18 +2057,17 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
turbo_max = cpu->pstate.turbo_pstate;
}
- max_policy_perf = max_state * policy->max / max_freq;
- if (policy->max == policy->min) {
+ max_policy_perf = max_state * policy_max / max_freq;
+ if (policy_max == policy_min) {
min_policy_perf = max_policy_perf;
} else {
- min_policy_perf = max_state * policy->min / max_freq;
+ min_policy_perf = max_state * policy_min / max_freq;
min_policy_perf = clamp_t(int32_t, min_policy_perf,
0, max_policy_perf);
}
pr_debug("cpu:%d max_state %d min_policy_perf:%d max_policy_perf:%d\n",
- policy->cpu, max_state,
- min_policy_perf, max_policy_perf);
+ cpu->cpu, max_state, min_policy_perf, max_policy_perf);
/* Normalize user input to [min_perf, max_perf] */
if (per_cpu_limits) {
@@ -2081,7 +2081,7 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100);
global_min = clamp_t(int32_t, global_min, 0, global_max);
- pr_debug("cpu:%d global_min:%d global_max:%d\n", policy->cpu,
+ pr_debug("cpu:%d global_min:%d global_max:%d\n", cpu->cpu,
global_min, global_max);
cpu->min_perf_ratio = max(min_policy_perf, global_min);
@@ -2094,7 +2094,7 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
cpu->max_perf_ratio);
}
- pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", policy->cpu,
+ pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", cpu->cpu,
cpu->max_perf_ratio,
cpu->min_perf_ratio);
}
@@ -2114,7 +2114,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
mutex_lock(&intel_pstate_limits_lock);
- intel_pstate_update_perf_limits(policy, cpu);
+ intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
/*
@@ -2143,8 +2143,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
return 0;
}
-static void intel_pstate_adjust_policy_max(struct cpufreq_policy *policy,
- struct cpudata *cpu)
+static void intel_pstate_adjust_policy_max(struct cpudata *cpu,
+ struct cpufreq_policy_data *policy)
{
if (!hwp_active &&
cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
@@ -2155,7 +2155,7 @@ static void intel_pstate_adjust_policy_max(struct cpufreq_policy *policy,
}
}
-static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
+static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy)
{
struct cpudata *cpu = all_cpu_data[policy->cpu];
@@ -2163,11 +2163,7 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
intel_pstate_get_max_freq(cpu));
- if (policy->policy != CPUFREQ_POLICY_POWERSAVE &&
- policy->policy != CPUFREQ_POLICY_PERFORMANCE)
- return -EINVAL;
-
- intel_pstate_adjust_policy_max(policy, cpu);
+ intel_pstate_adjust_policy_max(cpu, policy);
return 0;
}
@@ -2268,7 +2264,7 @@ static struct cpufreq_driver intel_pstate = {
.name = "intel_pstate",
};
-static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
+static int intel_cpufreq_verify_policy(struct cpufreq_policy_data *policy)
{
struct cpudata *cpu = all_cpu_data[policy->cpu];
@@ -2276,9 +2272,9 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
intel_pstate_get_max_freq(cpu));
- intel_pstate_adjust_policy_max(policy, cpu);
+ intel_pstate_adjust_policy_max(cpu, policy);
- intel_pstate_update_perf_limits(policy, cpu);
+ intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
return 0;
}
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index cb74bdc5baaa..70ad8fe1d78b 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -102,13 +102,11 @@ static struct cpufreq_driver kirkwood_cpufreq_driver = {
static int kirkwood_cpufreq_probe(struct platform_device *pdev)
{
struct device_node *np;
- struct resource *res;
int err;
priv.dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv.base = devm_ioremap_resource(&pdev->dev, res);
+ priv.base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv.base))
return PTR_ERR(priv.base);
diff --git a/drivers/cpufreq/longrun.c b/drivers/cpufreq/longrun.c
index 64b8689f7a4a..0b08be8bff76 100644
--- a/drivers/cpufreq/longrun.c
+++ b/drivers/cpufreq/longrun.c
@@ -122,7 +122,7 @@ static int longrun_set_policy(struct cpufreq_policy *policy)
* Validates a new CPUFreq policy. This function has to be called with
* cpufreq_driver locked.
*/
-static int longrun_verify_policy(struct cpufreq_policy *policy)
+static int longrun_verify_policy(struct cpufreq_policy_data *policy)
{
if (!policy)
return -EINVAL;
@@ -130,10 +130,6 @@ static int longrun_verify_policy(struct cpufreq_policy *policy)
policy->cpu = 0;
cpufreq_verify_within_cpu_limits(policy);
- if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
- (policy->policy != CPUFREQ_POLICY_PERFORMANCE))
- return -EINVAL;
-
return 0;
}
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index e9caa9586982..909f40fbcde2 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -144,9 +144,11 @@ static void loongson2_cpu_wait(void)
u32 cpu_freq;
spin_lock_irqsave(&loongson2_wait_lock, flags);
- cpu_freq = LOONGSON_CHIPCFG(0);
- LOONGSON_CHIPCFG(0) &= ~0x7; /* Put CPU into wait mode */
- LOONGSON_CHIPCFG(0) = cpu_freq; /* Restore CPU state */
+ cpu_freq = readl(LOONGSON_CHIPCFG);
+ /* Put CPU into wait mode */
+ writel(readl(LOONGSON_CHIPCFG) & ~0x7, LOONGSON_CHIPCFG);
+ /* Restore CPU state */
+ writel(cpu_freq, LOONGSON_CHIPCFG);
spin_unlock_irqrestore(&loongson2_wait_lock, flags);
local_irq_enable();
}
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index fdc767fdbe6a..5789fe7a94bd 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -109,7 +109,7 @@ struct pcc_cpu {
static struct pcc_cpu __percpu *pcc_cpu_info;
-static int pcc_cpufreq_verify(struct cpufreq_policy *policy)
+static int pcc_cpufreq_verify(struct cpufreq_policy_data *policy)
{
cpufreq_verify_within_cpu_limits(policy);
return 0;
@@ -445,7 +445,7 @@ static int __init pcc_cpufreq_probe(void)
goto out_free;
}
- pcch_virt_addr = ioremap_nocache(mem_resource->minimum,
+ pcch_virt_addr = ioremap(mem_resource->minimum,
mem_resource->address_length);
if (pcch_virt_addr == NULL) {
pr_debug("probe: could not map shared mem region\n");
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
index 106910351c41..5c221bc90210 100644
--- a/drivers/cpufreq/s3c2416-cpufreq.c
+++ b/drivers/cpufreq/s3c2416-cpufreq.c
@@ -304,6 +304,7 @@ static int s3c2416_cpufreq_reboot_notifier_evt(struct notifier_block *this,
{
struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
int ret;
+ struct cpufreq_policy *policy;
mutex_lock(&cpufreq_lock);
@@ -318,7 +319,16 @@ static int s3c2416_cpufreq_reboot_notifier_evt(struct notifier_block *this,
*/
if (s3c_freq->is_dvs) {
pr_debug("cpufreq: leave dvs on reboot\n");
- ret = cpufreq_driver_target(cpufreq_cpu_get(0), FREQ_SLEEP, 0);
+
+ policy = cpufreq_cpu_get(0);
+ if (!policy) {
+ pr_debug("cpufreq: get no policy for cpu0\n");
+ return NOTIFY_BAD;
+ }
+
+ ret = cpufreq_driver_target(policy, FREQ_SLEEP, 0);
+ cpufreq_cpu_put(policy);
+
if (ret < 0)
return NOTIFY_BAD;
}
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index 5d10030f2560..e84281e2561d 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -555,8 +555,17 @@ static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
int ret;
+ struct cpufreq_policy *policy;
+
+ policy = cpufreq_cpu_get(0);
+ if (!policy) {
+ pr_debug("cpufreq: get no policy for cpu0\n");
+ return NOTIFY_BAD;
+ }
+
+ ret = cpufreq_driver_target(policy, SLEEP_FREQ, 0);
+ cpufreq_cpu_put(policy);
- ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0);
if (ret < 0)
return NOTIFY_BAD;
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index e6182c89df79..61623e2ff149 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -261,7 +261,7 @@ static void scmi_cpufreq_remove(struct scmi_device *sdev)
}
static const struct scmi_device_id scmi_id_table[] = {
- { SCMI_PROTOCOL_PERF },
+ { SCMI_PROTOCOL_PERF, "cpufreq" },
{ },
};
MODULE_DEVICE_TABLE(scmi, scmi_id_table);
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
index 5096c0ab781b..0ac265d47ef0 100644
--- a/drivers/cpufreq/sh-cpufreq.c
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -87,7 +87,7 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy,
return work_on_cpu(policy->cpu, __sh_cpufreq_target, &data);
}
-static int sh_cpufreq_verify(struct cpufreq_policy *policy)
+static int sh_cpufreq_verify(struct cpufreq_policy_data *policy)
{
struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu);
struct cpufreq_frequency_table *freq_table;
diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
index bcecb068b51b..2e233ad72758 100644
--- a/drivers/cpufreq/tegra186-cpufreq.c
+++ b/drivers/cpufreq/tegra186-cpufreq.c
@@ -187,7 +187,6 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev)
{
struct tegra186_cpufreq_data *data;
struct tegra_bpmp *bpmp;
- struct resource *res;
unsigned int i = 0, err;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
@@ -205,8 +204,7 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev)
if (IS_ERR(bpmp))
return PTR_ERR(bpmp);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->regs = devm_ioremap_resource(&pdev->dev, res);
+ data->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->regs)) {
err = PTR_ERR(data->regs);
goto put_bpmp;
diff --git a/drivers/cpufreq/unicore2-cpufreq.c b/drivers/cpufreq/unicore2-cpufreq.c
index 707dbc1b7ac8..98d392196df2 100644
--- a/drivers/cpufreq/unicore2-cpufreq.c
+++ b/drivers/cpufreq/unicore2-cpufreq.c
@@ -22,7 +22,7 @@ static struct cpufreq_driver ucv2_driver;
/* make sure that only the "userspace" governor is run
* -- anything else wouldn't make sense on this platform, anyway.
*/
-static int ucv2_verify_speed(struct cpufreq_policy *policy)
+static int ucv2_verify_speed(struct cpufreq_policy_data *policy)
{
if (policy->cpu)
return -EINVAL;
diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
index 506e3f2bf53a..83c85d3d67e3 100644
--- a/drivers/cpufreq/vexpress-spc-cpufreq.c
+++ b/drivers/cpufreq/vexpress-spc-cpufreq.c
@@ -434,7 +434,7 @@ static int ve_spc_cpufreq_init(struct cpufreq_policy *policy)
if (cur_cluster < MAX_CLUSTERS) {
int cpu;
- cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
+ dev_pm_opp_get_sharing_cpus(cpu_dev, policy->cpus);
for_each_cpu(cpu, policy->cpus)
per_cpu(physical_cluster, cpu) = cur_cluster;
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index a224d33dda7f..62272ecfa771 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -25,7 +25,7 @@ config ARM_PSCI_CPUIDLE
config ARM_BIG_LITTLE_CPUIDLE
bool "Support for ARM big.LITTLE processors"
- depends on ARCH_VEXPRESS_TC2_PM || ARCH_EXYNOS
+ depends on ARCH_VEXPRESS_TC2_PM || ARCH_EXYNOS || COMPILE_TEST
depends on MCPM && !ARM64
select ARM_CPU_SUSPEND
select CPU_IDLE_MULTIPLE_DRIVERS
@@ -51,13 +51,13 @@ config ARM_HIGHBANK_CPUIDLE
config ARM_KIRKWOOD_CPUIDLE
bool "CPU Idle Driver for Marvell Kirkwood SoCs"
- depends on MACH_KIRKWOOD && !ARM64
+ depends on (MACH_KIRKWOOD || COMPILE_TEST) && !ARM64
help
This adds the CPU Idle driver for Marvell Kirkwood SoCs.
config ARM_ZYNQ_CPUIDLE
bool "CPU Idle Driver for Xilinx Zynq processors"
- depends on ARCH_ZYNQ && !ARM64
+ depends on (ARCH_ZYNQ || COMPILE_TEST) && !ARM64
help
Select this to enable cpuidle on Xilinx Zynq processors.
@@ -70,19 +70,19 @@ config ARM_U8500_CPUIDLE
config ARM_AT91_CPUIDLE
bool "Cpu Idle Driver for the AT91 processors"
default y
- depends on ARCH_AT91 && !ARM64
+ depends on (ARCH_AT91 || COMPILE_TEST) && !ARM64
help
Select this to enable cpuidle for AT91 processors.
config ARM_EXYNOS_CPUIDLE
bool "Cpu Idle Driver for the Exynos processors"
- depends on ARCH_EXYNOS && !ARM64
+ depends on (ARCH_EXYNOS || COMPILE_TEST) && !ARM64
select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP
help
Select this to enable cpuidle for Exynos processors.
config ARM_MVEBU_V7_CPUIDLE
bool "CPU Idle Driver for mvebu v7 family processors"
- depends on ARCH_MVEBU && !ARM64
+ depends on (ARCH_MVEBU || COMPILE_TEST) && !ARM64
help
Select this to enable cpuidle on Armada 370, 38x and XP processors.
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index ee70d5cc5b99..cc8c769d7fa9 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -21,7 +21,9 @@ obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o
obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o
obj-$(CONFIG_ARM_EXYNOS_CPUIDLE) += cpuidle-exynos.o
obj-$(CONFIG_ARM_CPUIDLE) += cpuidle-arm.o
-obj-$(CONFIG_ARM_PSCI_CPUIDLE) += cpuidle-psci.o
+obj-$(CONFIG_ARM_PSCI_CPUIDLE) += cpuidle_psci.o
+cpuidle_psci-y := cpuidle-psci.o
+cpuidle_psci-$(CONFIG_PM_GENERIC_DOMAINS_OF) += cpuidle-psci-domain.o
###############################################################################
# MIPS drivers
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index b607278df25b..04003b90dc49 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -89,6 +89,7 @@
* @coupled_cpus: mask of cpus that are part of the coupled set
* @requested_state: array of requested states for cpus in the coupled set
* @ready_waiting_counts: combined count of cpus in ready or waiting loops
+ * @abort_barrier: synchronisation point for abort cases
* @online_count: count of cpus that are online
* @refcnt: reference count of cpuidle devices that are using this struct
* @prevent: flag to prevent coupled idle while a cpu is hotplugging
@@ -338,7 +339,7 @@ static void cpuidle_coupled_poke(int cpu)
/**
* cpuidle_coupled_poke_others - wake up all other cpus that may be waiting
- * @dev: struct cpuidle_device for this cpu
+ * @this_cpu: target cpu
* @coupled: the struct coupled that contains the current cpu
*
* Calls cpuidle_coupled_poke on all other online cpus.
@@ -355,7 +356,7 @@ static void cpuidle_coupled_poke_others(int this_cpu,
/**
* cpuidle_coupled_set_waiting - mark this cpu as in the wait loop
- * @dev: struct cpuidle_device for this cpu
+ * @cpu: target cpu
* @coupled: the struct coupled that contains the current cpu
* @next_state: the index in drv->states of the requested state for this cpu
*
@@ -376,7 +377,7 @@ static int cpuidle_coupled_set_waiting(int cpu,
/**
* cpuidle_coupled_set_not_waiting - mark this cpu as leaving the wait loop
- * @dev: struct cpuidle_device for this cpu
+ * @cpu: target cpu
* @coupled: the struct coupled that contains the current cpu
*
* Removes the requested idle state for the specified cpuidle device.
@@ -412,7 +413,7 @@ static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled)
/**
* cpuidle_coupled_clear_pokes - spin until the poke interrupt is processed
- * @cpu - this cpu
+ * @cpu: this cpu
*
* Turns on interrupts and spins until any outstanding poke interrupts have
* been processed and the poke bit has been cleared.
diff --git a/drivers/cpuidle/cpuidle-clps711x.c b/drivers/cpuidle/cpuidle-clps711x.c
index 6e36740f5719..fc22c59b6c73 100644
--- a/drivers/cpuidle/cpuidle-clps711x.c
+++ b/drivers/cpuidle/cpuidle-clps711x.c
@@ -37,10 +37,7 @@ static struct cpuidle_driver clps711x_idle_driver = {
static int __init clps711x_cpuidle_probe(struct platform_device *pdev)
{
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- clps711x_halt = devm_ioremap_resource(&pdev->dev, res);
+ clps711x_halt = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(clps711x_halt))
return PTR_ERR(clps711x_halt);
diff --git a/drivers/cpuidle/cpuidle-kirkwood.c b/drivers/cpuidle/cpuidle-kirkwood.c
index d23d8f468c12..511c4f46027a 100644
--- a/drivers/cpuidle/cpuidle-kirkwood.c
+++ b/drivers/cpuidle/cpuidle-kirkwood.c
@@ -55,10 +55,7 @@ static struct cpuidle_driver kirkwood_idle_driver = {
/* Initialize CPU idle by registering the idle states */
static int kirkwood_cpuidle_probe(struct platform_device *pdev)
{
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ddr_operation_base = devm_ioremap_resource(&pdev->dev, res);
+ ddr_operation_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ddr_operation_base))
return PTR_ERR(ddr_operation_base);
diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c
new file mode 100644
index 000000000000..423f03bbeb74
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-psci-domain.c
@@ -0,0 +1,308 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PM domains for CPUs via genpd - managed by cpuidle-psci.
+ *
+ * Copyright (C) 2019 Linaro Ltd.
+ * Author: Ulf Hansson <ulf.hansson@linaro.org>
+ *
+ */
+
+#define pr_fmt(fmt) "CPUidle PSCI: " fmt
+
+#include <linux/cpu.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/psci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "cpuidle-psci.h"
+
+struct psci_pd_provider {
+ struct list_head link;
+ struct device_node *node;
+};
+
+static LIST_HEAD(psci_pd_providers);
+static bool osi_mode_enabled __initdata;
+
+static int psci_pd_power_off(struct generic_pm_domain *pd)
+{
+ struct genpd_power_state *state = &pd->states[pd->state_idx];
+ u32 *pd_state;
+
+ if (!state->data)
+ return 0;
+
+ /* OSI mode is enabled, set the corresponding domain state. */
+ pd_state = state->data;
+ psci_set_domain_state(*pd_state);
+
+ return 0;
+}
+
+static int __init psci_pd_parse_state_nodes(struct genpd_power_state *states,
+ int state_count)
+{
+ int i, ret;
+ u32 psci_state, *psci_state_buf;
+
+ for (i = 0; i < state_count; i++) {
+ ret = psci_dt_parse_state_node(to_of_node(states[i].fwnode),
+ &psci_state);
+ if (ret)
+ goto free_state;
+
+ psci_state_buf = kmalloc(sizeof(u32), GFP_KERNEL);
+ if (!psci_state_buf) {
+ ret = -ENOMEM;
+ goto free_state;
+ }
+ *psci_state_buf = psci_state;
+ states[i].data = psci_state_buf;
+ }
+
+ return 0;
+
+free_state:
+ i--;
+ for (; i >= 0; i--)
+ kfree(states[i].data);
+ return ret;
+}
+
+static int __init psci_pd_parse_states(struct device_node *np,
+ struct genpd_power_state **states, int *state_count)
+{
+ int ret;
+
+ /* Parse the domain idle states. */
+ ret = of_genpd_parse_idle_states(np, states, state_count);
+ if (ret)
+ return ret;
+
+ /* Fill out the PSCI specifics for each found state. */
+ ret = psci_pd_parse_state_nodes(*states, *state_count);
+ if (ret)
+ kfree(*states);
+
+ return ret;
+}
+
+static void psci_pd_free_states(struct genpd_power_state *states,
+ unsigned int state_count)
+{
+ int i;
+
+ for (i = 0; i < state_count; i++)
+ kfree(states[i].data);
+ kfree(states);
+}
+
+static int __init psci_pd_init(struct device_node *np)
+{
+ struct generic_pm_domain *pd;
+ struct psci_pd_provider *pd_provider;
+ struct dev_power_governor *pd_gov;
+ struct genpd_power_state *states = NULL;
+ int ret = -ENOMEM, state_count = 0;
+
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ goto out;
+
+ pd_provider = kzalloc(sizeof(*pd_provider), GFP_KERNEL);
+ if (!pd_provider)
+ goto free_pd;
+
+ pd->name = kasprintf(GFP_KERNEL, "%pOF", np);
+ if (!pd->name)
+ goto free_pd_prov;
+
+ /*
+ * Parse the domain idle states and let genpd manage the state selection
+ * for those being compatible with "domain-idle-state".
+ */
+ ret = psci_pd_parse_states(np, &states, &state_count);
+ if (ret)
+ goto free_name;
+
+ pd->free_states = psci_pd_free_states;
+ pd->name = kbasename(pd->name);
+ pd->power_off = psci_pd_power_off;
+ pd->states = states;
+ pd->state_count = state_count;
+ pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN;
+
+ /* Use governor for CPU PM domains if it has some states to manage. */
+ pd_gov = state_count > 0 ? &pm_domain_cpu_gov : NULL;
+
+ ret = pm_genpd_init(pd, pd_gov, false);
+ if (ret) {
+ psci_pd_free_states(states, state_count);
+ goto free_name;
+ }
+
+ ret = of_genpd_add_provider_simple(np, pd);
+ if (ret)
+ goto remove_pd;
+
+ pd_provider->node = of_node_get(np);
+ list_add(&pd_provider->link, &psci_pd_providers);
+
+ pr_debug("init PM domain %s\n", pd->name);
+ return 0;
+
+remove_pd:
+ pm_genpd_remove(pd);
+free_name:
+ kfree(pd->name);
+free_pd_prov:
+ kfree(pd_provider);
+free_pd:
+ kfree(pd);
+out:
+ pr_err("failed to init PM domain ret=%d %pOF\n", ret, np);
+ return ret;
+}
+
+static void __init psci_pd_remove(void)
+{
+ struct psci_pd_provider *pd_provider, *it;
+ struct generic_pm_domain *genpd;
+
+ list_for_each_entry_safe(pd_provider, it, &psci_pd_providers, link) {
+ of_genpd_del_provider(pd_provider->node);
+
+ genpd = of_genpd_remove_last(pd_provider->node);
+ if (!IS_ERR(genpd))
+ kfree(genpd);
+
+ of_node_put(pd_provider->node);
+ list_del(&pd_provider->link);
+ kfree(pd_provider);
+ }
+}
+
+static int __init psci_pd_init_topology(struct device_node *np, bool add)
+{
+ struct device_node *node;
+ struct of_phandle_args child, parent;
+ int ret;
+
+ for_each_child_of_node(np, node) {
+ if (of_parse_phandle_with_args(node, "power-domains",
+ "#power-domain-cells", 0, &parent))
+ continue;
+
+ child.np = node;
+ child.args_count = 0;
+
+ ret = add ? of_genpd_add_subdomain(&parent, &child) :
+ of_genpd_remove_subdomain(&parent, &child);
+ of_node_put(parent.np);
+ if (ret) {
+ of_node_put(node);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int __init psci_pd_add_topology(struct device_node *np)
+{
+ return psci_pd_init_topology(np, true);
+}
+
+static void __init psci_pd_remove_topology(struct device_node *np)
+{
+ psci_pd_init_topology(np, false);
+}
+
+static const struct of_device_id psci_of_match[] __initconst = {
+ { .compatible = "arm,psci-1.0" },
+ {}
+};
+
+static int __init psci_idle_init_domains(void)
+{
+ struct device_node *np = of_find_matching_node(NULL, psci_of_match);
+ struct device_node *node;
+ int ret = 0, pd_count = 0;
+
+ if (!np)
+ return -ENODEV;
+
+ /* Currently limit the hierarchical topology to be used in OSI mode. */
+ if (!psci_has_osi_support())
+ goto out;
+
+ /*
+ * Parse child nodes for the "#power-domain-cells" property and
+ * initialize a genpd/genpd-of-provider pair when it's found.
+ */
+ for_each_child_of_node(np, node) {
+ if (!of_find_property(node, "#power-domain-cells", NULL))
+ continue;
+
+ ret = psci_pd_init(node);
+ if (ret)
+ goto put_node;
+
+ pd_count++;
+ }
+
+ /* Bail out if not using the hierarchical CPU topology. */
+ if (!pd_count)
+ goto out;
+
+ /* Link genpd masters/subdomains to model the CPU topology. */
+ ret = psci_pd_add_topology(np);
+ if (ret)
+ goto remove_pd;
+
+ /* Try to enable OSI mode. */
+ ret = psci_set_osi_mode();
+ if (ret) {
+ pr_warn("failed to enable OSI mode: %d\n", ret);
+ psci_pd_remove_topology(np);
+ goto remove_pd;
+ }
+
+ osi_mode_enabled = true;
+ of_node_put(np);
+ pr_info("Initialized CPU PM domain topology\n");
+ return pd_count;
+
+put_node:
+ of_node_put(node);
+remove_pd:
+ if (pd_count)
+ psci_pd_remove();
+ pr_err("failed to create CPU PM domains ret=%d\n", ret);
+out:
+ of_node_put(np);
+ return ret;
+}
+subsys_initcall(psci_idle_init_domains);
+
+struct device __init *psci_dt_attach_cpu(int cpu)
+{
+ struct device *dev;
+
+ if (!osi_mode_enabled)
+ return NULL;
+
+ dev = dev_pm_domain_attach_by_name(get_cpu_device(cpu), "psci");
+ if (IS_ERR_OR_NULL(dev))
+ return dev;
+
+ pm_runtime_irq_safe(dev);
+ if (cpu_online(cpu))
+ pm_runtime_get_sync(dev);
+
+ return dev;
+}
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
index f3c1a2396f98..edd7a54ef0d3 100644
--- a/drivers/cpuidle/cpuidle-psci.c
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -8,6 +8,7 @@
#define pr_fmt(fmt) "CPUidle PSCI: " fmt
+#include <linux/cpuhotplug.h>
#include <linux/cpuidle.h>
#include <linux/cpumask.h>
#include <linux/cpu_pm.h>
@@ -16,21 +17,107 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/psci.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <asm/cpuidle.h>
+#include "cpuidle-psci.h"
#include "dt_idle_states.h"
-static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
+struct psci_cpuidle_data {
+ u32 *psci_states;
+ struct device *dev;
+};
+
+static DEFINE_PER_CPU_READ_MOSTLY(struct psci_cpuidle_data, psci_cpuidle_data);
+static DEFINE_PER_CPU(u32, domain_state);
+static bool psci_cpuidle_use_cpuhp __initdata;
+
+void psci_set_domain_state(u32 state)
+{
+ __this_cpu_write(domain_state, state);
+}
+
+static inline u32 psci_get_domain_state(void)
+{
+ return __this_cpu_read(domain_state);
+}
+
+static inline int psci_enter_state(int idx, u32 state)
+{
+ return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter, idx, state);
+}
+
+static int psci_enter_domain_idle_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
+{
+ struct psci_cpuidle_data *data = this_cpu_ptr(&psci_cpuidle_data);
+ u32 *states = data->psci_states;
+ struct device *pd_dev = data->dev;
+ u32 state;
+ int ret;
+
+ /* Do runtime PM to manage a hierarchical CPU toplogy. */
+ pm_runtime_put_sync_suspend(pd_dev);
+
+ state = psci_get_domain_state();
+ if (!state)
+ state = states[idx];
+
+ ret = psci_enter_state(idx, state);
+
+ pm_runtime_get_sync(pd_dev);
+
+ /* Clear the domain state to start fresh when back from idle. */
+ psci_set_domain_state(0);
+ return ret;
+}
+
+static int psci_idle_cpuhp_up(unsigned int cpu)
+{
+ struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev);
+
+ if (pd_dev)
+ pm_runtime_get_sync(pd_dev);
+
+ return 0;
+}
+
+static int psci_idle_cpuhp_down(unsigned int cpu)
+{
+ struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev);
+
+ if (pd_dev) {
+ pm_runtime_put_sync(pd_dev);
+ /* Clear domain state to start fresh at next online. */
+ psci_set_domain_state(0);
+ }
+
+ return 0;
+}
+
+static void __init psci_idle_init_cpuhp(void)
+{
+ int err;
+
+ if (!psci_cpuidle_use_cpuhp)
+ return;
+
+ err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING,
+ "cpuidle/psci:online",
+ psci_idle_cpuhp_up,
+ psci_idle_cpuhp_down);
+ if (err)
+ pr_warn("Failed %d while setup cpuhp state\n", err);
+}
static int psci_enter_idle_state(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int idx)
{
- u32 *state = __this_cpu_read(psci_power_state);
+ u32 *state = __this_cpu_read(psci_cpuidle_data.psci_states);
- return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter,
- idx, state[idx - 1]);
+ return psci_enter_state(idx, state[idx]);
}
static struct cpuidle_driver psci_idle_driver __initdata = {
@@ -56,7 +143,7 @@ static const struct of_device_id psci_idle_state_match[] __initconst = {
{ },
};
-static int __init psci_dt_parse_state_node(struct device_node *np, u32 *state)
+int __init psci_dt_parse_state_node(struct device_node *np, u32 *state)
{
int err = of_property_read_u32(np, "arm,psci-suspend-param", state);
@@ -73,28 +160,25 @@ static int __init psci_dt_parse_state_node(struct device_node *np, u32 *state)
return 0;
}
-static int __init psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
+static int __init psci_dt_cpu_init_idle(struct cpuidle_driver *drv,
+ struct device_node *cpu_node,
+ unsigned int state_count, int cpu)
{
- int i, ret = 0, count = 0;
+ int i, ret = 0;
u32 *psci_states;
struct device_node *state_node;
+ struct psci_cpuidle_data *data = per_cpu_ptr(&psci_cpuidle_data, cpu);
- /* Count idle states */
- while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
- count))) {
- count++;
- of_node_put(state_node);
- }
-
- if (!count)
- return -ENODEV;
-
- psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
+ state_count++; /* Add WFI state too */
+ psci_states = kcalloc(state_count, sizeof(*psci_states), GFP_KERNEL);
if (!psci_states)
return -ENOMEM;
- for (i = 0; i < count; i++) {
- state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
+ for (i = 1; i < state_count; i++) {
+ state_node = of_get_cpu_state_node(cpu_node, i - 1);
+ if (!state_node)
+ break;
+
ret = psci_dt_parse_state_node(state_node, &psci_states[i]);
of_node_put(state_node);
@@ -104,8 +188,33 @@ static int __init psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
pr_debug("psci-power-state %#x index %d\n", psci_states[i], i);
}
- /* Idle states parsed correctly, initialize per-cpu pointer */
- per_cpu(psci_power_state, cpu) = psci_states;
+ if (i != state_count) {
+ ret = -ENODEV;
+ goto free_mem;
+ }
+
+ /* Currently limit the hierarchical topology to be used in OSI mode. */
+ if (psci_has_osi_support()) {
+ data->dev = psci_dt_attach_cpu(cpu);
+ if (IS_ERR(data->dev)) {
+ ret = PTR_ERR(data->dev);
+ goto free_mem;
+ }
+
+ /*
+ * Using the deepest state for the CPU to trigger a potential
+ * selection of a shared state for the domain, assumes the
+ * domain states are all deeper states.
+ */
+ if (data->dev) {
+ drv->states[state_count - 1].enter =
+ psci_enter_domain_idle_state;
+ psci_cpuidle_use_cpuhp = true;
+ }
+ }
+
+ /* Idle states parsed correctly, store them in the per-cpu struct. */
+ data->psci_states = psci_states;
return 0;
free_mem:
@@ -113,7 +222,8 @@ free_mem:
return ret;
}
-static __init int psci_cpu_init_idle(unsigned int cpu)
+static __init int psci_cpu_init_idle(struct cpuidle_driver *drv,
+ unsigned int cpu, unsigned int state_count)
{
struct device_node *cpu_node;
int ret;
@@ -129,7 +239,7 @@ static __init int psci_cpu_init_idle(unsigned int cpu)
if (!cpu_node)
return -ENODEV;
- ret = psci_dt_cpu_init_idle(cpu_node, cpu);
+ ret = psci_dt_cpu_init_idle(drv, cpu_node, state_count, cpu);
of_node_put(cpu_node);
@@ -185,7 +295,7 @@ static int __init psci_idle_init_cpu(int cpu)
/*
* Initialize PSCI idle states.
*/
- ret = psci_cpu_init_idle(cpu);
+ ret = psci_cpu_init_idle(drv, cpu, ret);
if (ret) {
pr_err("CPU %d failed to PSCI idle\n", cpu);
goto out_kfree_drv;
@@ -221,6 +331,7 @@ static int __init psci_idle_init(void)
goto out_fail;
}
+ psci_idle_init_cpuhp();
return 0;
out_fail:
diff --git a/drivers/cpuidle/cpuidle-psci.h b/drivers/cpuidle/cpuidle-psci.h
new file mode 100644
index 000000000000..7299a04dd467
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-psci.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __CPUIDLE_PSCI_H
+#define __CPUIDLE_PSCI_H
+
+struct device_node;
+
+void psci_set_domain_state(u32 state);
+int __init psci_dt_parse_state_node(struct device_node *np, u32 *state);
+
+#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
+struct device __init *psci_dt_attach_cpu(int cpu);
+#else
+static inline struct device __init *psci_dt_attach_cpu(int cpu) { return NULL; }
+#endif
+
+#endif /* __CPUIDLE_PSCI_H */
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 33d19c8eb027..de81298051b3 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -121,6 +121,9 @@ void cpuidle_use_deepest_state(u64 latency_limit_ns)
* cpuidle_find_deepest_state - Find the deepest available idle state.
* @drv: cpuidle driver for the given CPU.
* @dev: cpuidle device for the given CPU.
+ * @latency_limit_ns: Idle state exit latency limit
+ *
+ * Return: the index of the deepest available idle state.
*/
int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev,
@@ -572,10 +575,14 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
if (!try_module_get(drv->owner))
return -EINVAL;
- for (i = 0; i < drv->state_count; i++)
+ for (i = 0; i < drv->state_count; i++) {
if (drv->states[i].flags & CPUIDLE_FLAG_UNUSABLE)
dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_DRIVER;
+ if (drv->states[i].flags & CPUIDLE_FLAG_OFF)
+ dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_USER;
+ }
+
per_cpu(cpuidle_devices, dev->cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index ce6a5f80fb83..4070e573bf43 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -155,8 +155,6 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv)
{
int i;
- drv->refcnt = 0;
-
/*
* Use all possible CPUs as the default, because if the kernel boots
* with some CPUs offline and then we online one of them, the CPU
@@ -240,9 +238,6 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv)
*/
static void __cpuidle_unregister_driver(struct cpuidle_driver *drv)
{
- if (WARN_ON(drv->refcnt > 0))
- return;
-
if (drv->bctimer) {
drv->bctimer = 0;
on_each_cpu_mask(drv->cpumask, cpuidle_setup_broadcast_timer,
@@ -350,47 +345,6 @@ struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev)
EXPORT_SYMBOL_GPL(cpuidle_get_cpu_driver);
/**
- * cpuidle_driver_ref - get a reference to the driver.
- *
- * Increment the reference counter of the cpuidle driver associated with
- * the current CPU.
- *
- * Returns a pointer to the driver, or NULL if the current CPU has no driver.
- */
-struct cpuidle_driver *cpuidle_driver_ref(void)
-{
- struct cpuidle_driver *drv;
-
- spin_lock(&cpuidle_driver_lock);
-
- drv = cpuidle_get_driver();
- if (drv)
- drv->refcnt++;
-
- spin_unlock(&cpuidle_driver_lock);
- return drv;
-}
-
-/**
- * cpuidle_driver_unref - puts down the refcount for the driver
- *
- * Decrement the reference counter of the cpuidle driver associated with
- * the current CPU.
- */
-void cpuidle_driver_unref(void)
-{
- struct cpuidle_driver *drv;
-
- spin_lock(&cpuidle_driver_lock);
-
- drv = cpuidle_get_driver();
- if (drv && !WARN_ON(drv->refcnt <= 0))
- drv->refcnt--;
-
- spin_unlock(&cpuidle_driver_lock);
-}
-
-/**
* cpuidle_driver_state_disabled - Disable or enable an idle state
* @drv: cpuidle driver owning the state
* @idx: State index
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
index d06d21a9525d..252f2a9686a6 100644
--- a/drivers/cpuidle/dt_idle_states.c
+++ b/drivers/cpuidle/dt_idle_states.c
@@ -111,8 +111,7 @@ static bool idle_state_valid(struct device_node *state_node, unsigned int idx,
for (cpu = cpumask_next(cpumask_first(cpumask), cpumask);
cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpumask)) {
cpu_node = of_cpu_device_node_get(cpu);
- curr_state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
- idx);
+ curr_state_node = of_get_cpu_state_node(cpu_node, idx);
if (state_node != curr_state_node)
valid = false;
@@ -170,7 +169,7 @@ int dt_init_idle_driver(struct cpuidle_driver *drv,
cpu_node = of_cpu_device_node_get(cpumask_first(cpumask));
for (i = 0; ; i++) {
- state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
+ state_node = of_get_cpu_state_node(cpu_node, i);
if (!state_node)
break;
diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
index de7e706efd46..6deaaf5f05b5 100644
--- a/drivers/cpuidle/governors/teo.c
+++ b/drivers/cpuidle/governors/teo.c
@@ -198,7 +198,7 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* pattern detection.
*/
cpu_data->intervals[cpu_data->interval_idx++] = measured_ns;
- if (cpu_data->interval_idx > INTERVALS)
+ if (cpu_data->interval_idx >= INTERVALS)
cpu_data->interval_idx = 0;
}
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 38ef770be90d..cdeedbf02646 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -142,6 +142,7 @@ static struct attribute_group cpuidle_attr_group = {
/**
* cpuidle_add_interface - add CPU global sysfs attributes
+ * @dev: the target device
*/
int cpuidle_add_interface(struct device *dev)
{
@@ -153,6 +154,7 @@ int cpuidle_add_interface(struct device *dev)
/**
* cpuidle_remove_interface - remove CPU global sysfs attributes
+ * @dev: the target device
*/
void cpuidle_remove_interface(struct device *dev)
{
@@ -327,6 +329,14 @@ static ssize_t store_state_disable(struct cpuidle_state *state,
return size;
}
+static ssize_t show_state_default_status(struct cpuidle_state *state,
+ struct cpuidle_state_usage *state_usage,
+ char *buf)
+{
+ return sprintf(buf, "%s\n",
+ state->flags & CPUIDLE_FLAG_OFF ? "disabled" : "enabled");
+}
+
define_one_state_ro(name, show_state_name);
define_one_state_ro(desc, show_state_desc);
define_one_state_ro(latency, show_state_exit_latency);
@@ -337,6 +347,7 @@ define_one_state_ro(time, show_state_time);
define_one_state_rw(disable, show_state_disable, store_state_disable);
define_one_state_ro(above, show_state_above);
define_one_state_ro(below, show_state_below);
+define_one_state_ro(default_status, show_state_default_status);
static struct attribute *cpuidle_state_default_attrs[] = {
&attr_name.attr,
@@ -349,6 +360,7 @@ static struct attribute *cpuidle_state_default_attrs[] = {
&attr_disable.attr,
&attr_above.attr,
&attr_below.attr,
+ &attr_default_status.attr,
NULL
};
@@ -615,7 +627,7 @@ static struct kobj_type ktype_driver_cpuidle = {
/**
* cpuidle_add_driver_sysfs - adds the driver name sysfs attribute
- * @device: the target device
+ * @dev: the target device
*/
static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev)
{
@@ -646,7 +658,7 @@ static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev)
/**
* cpuidle_remove_driver_sysfs - removes the driver name sysfs attribute
- * @device: the target device
+ * @dev: the target device
*/
static void cpuidle_remove_driver_sysfs(struct cpuidle_device *dev)
{
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 91eb768d4221..c2767ed54dfe 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -248,15 +248,15 @@ config CRYPTO_DEV_MARVELL_CESA
This driver supports CPU offload through DMA transfers.
config CRYPTO_DEV_NIAGARA2
- tristate "Niagara2 Stream Processing Unit driver"
- select CRYPTO_LIB_DES
- select CRYPTO_SKCIPHER
- select CRYPTO_HASH
- select CRYPTO_MD5
- select CRYPTO_SHA1
- select CRYPTO_SHA256
- depends on SPARC64
- help
+ tristate "Niagara2 Stream Processing Unit driver"
+ select CRYPTO_LIB_DES
+ select CRYPTO_SKCIPHER
+ select CRYPTO_HASH
+ select CRYPTO_MD5
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ depends on SPARC64
+ help
Each core of a Niagara2 processor contains a Stream
Processing Unit, which itself contains several cryptographic
sub-units. One set provides the Modular Arithmetic Unit,
@@ -357,7 +357,7 @@ config CRYPTO_DEV_OMAP
depends on ARCH_OMAP2PLUS
help
OMAP processors have various crypto HW accelerators. Select this if
- you want to use the OMAP modules for any of the crypto algorithms.
+ you want to use the OMAP modules for any of the crypto algorithms.
if CRYPTO_DEV_OMAP
@@ -430,7 +430,7 @@ config CRYPTO_DEV_SAHARA
found in some Freescale i.MX chips.
config CRYPTO_DEV_EXYNOS_RNG
- tristate "EXYNOS HW pseudo random number generator support"
+ tristate "Exynos HW pseudo random number generator support"
depends on ARCH_EXYNOS || COMPILE_TEST
depends on HAS_IOMEM
select CRYPTO_RNG
@@ -618,6 +618,14 @@ config CRYPTO_DEV_QCE
tristate "Qualcomm crypto engine accelerator"
depends on ARCH_QCOM || COMPILE_TEST
depends on HAS_IOMEM
+ help
+ This driver supports Qualcomm crypto engine accelerator
+ hardware. To compile this driver as a module, choose M here. The
+ module will be called qcrypto.
+
+config CRYPTO_DEV_QCE_SKCIPHER
+ bool
+ depends on CRYPTO_DEV_QCE
select CRYPTO_AES
select CRYPTO_LIB_DES
select CRYPTO_ECB
@@ -625,10 +633,57 @@ config CRYPTO_DEV_QCE
select CRYPTO_XTS
select CRYPTO_CTR
select CRYPTO_SKCIPHER
- help
- This driver supports Qualcomm crypto engine accelerator
- hardware. To compile this driver as a module, choose M here. The
- module will be called qcrypto.
+
+config CRYPTO_DEV_QCE_SHA
+ bool
+ depends on CRYPTO_DEV_QCE
+
+choice
+ prompt "Algorithms enabled for QCE acceleration"
+ default CRYPTO_DEV_QCE_ENABLE_ALL
+ depends on CRYPTO_DEV_QCE
+ help
+ This option allows to choose whether to build support for all algorihtms
+ (default), hashes-only, or skciphers-only.
+
+ The QCE engine does not appear to scale as well as the CPU to handle
+ multiple crypto requests. While the ipq40xx chips have 4-core CPUs, the
+ QCE handles only 2 requests in parallel.
+
+ Ipsec throughput seems to improve when disabling either family of
+ algorithms, sharing the load with the CPU. Enabling skciphers-only
+ appears to work best.
+
+ config CRYPTO_DEV_QCE_ENABLE_ALL
+ bool "All supported algorithms"
+ select CRYPTO_DEV_QCE_SKCIPHER
+ select CRYPTO_DEV_QCE_SHA
+ help
+ Enable all supported algorithms:
+ - AES (CBC, CTR, ECB, XTS)
+ - 3DES (CBC, ECB)
+ - DES (CBC, ECB)
+ - SHA1, HMAC-SHA1
+ - SHA256, HMAC-SHA256
+
+ config CRYPTO_DEV_QCE_ENABLE_SKCIPHER
+ bool "Symmetric-key ciphers only"
+ select CRYPTO_DEV_QCE_SKCIPHER
+ help
+ Enable symmetric-key ciphers only:
+ - AES (CBC, CTR, ECB, XTS)
+ - 3DES (ECB, CBC)
+ - DES (ECB, CBC)
+
+ config CRYPTO_DEV_QCE_ENABLE_SHA
+ bool "Hash/HMAC only"
+ select CRYPTO_DEV_QCE_SHA
+ help
+ Enable hashes/HMAC algorithms only:
+ - SHA1, HMAC-SHA1
+ - SHA256, HMAC-SHA256
+
+endchoice
config CRYPTO_DEV_QCOM_RNG
tristate "Qualcomm Random Number Generator Driver"
@@ -639,7 +694,7 @@ config CRYPTO_DEV_QCOM_RNG
Generator hardware found on Qualcomm SoCs.
To compile this driver as a module, choose M here. The
- module will be called qcom-rng. If unsure, say N.
+ module will be called qcom-rng. If unsure, say N.
config CRYPTO_DEV_VMX
bool "Support for VMX cryptographic acceleration instructions"
@@ -716,7 +771,7 @@ source "drivers/crypto/stm32/Kconfig"
config CRYPTO_DEV_SAFEXCEL
tristate "Inside Secure's SafeXcel cryptographic engine driver"
- depends on OF || PCI || COMPILE_TEST
+ depends on (OF || PCI || COMPILE_TEST) && HAS_IOMEM
select CRYPTO_LIB_AES
select CRYPTO_AUTHENC
select CRYPTO_SKCIPHER
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
index cb2b0874f68f..7f22d305178e 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
@@ -541,7 +541,6 @@ int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
break;
default:
dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
op->keylen = keylen;
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
index 814cd12149a9..a2b67f7f8a81 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
@@ -13,6 +13,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <crypto/scatterwalk.h>
#include <linux/scatterlist.h>
@@ -22,6 +23,14 @@
#include "sun4i-ss.h"
+static const struct ss_variant ss_a10_variant = {
+ .sha1_in_be = false,
+};
+
+static const struct ss_variant ss_a33_variant = {
+ .sha1_in_be = true,
+};
+
static struct sun4i_ss_alg_template ss_algs[] = {
{ .type = CRYPTO_ALG_TYPE_AHASH,
.mode = SS_OP_MD5,
@@ -273,7 +282,7 @@ err_enable:
return err;
}
-const struct dev_pm_ops sun4i_ss_pm_ops = {
+static const struct dev_pm_ops sun4i_ss_pm_ops = {
SET_RUNTIME_PM_OPS(sun4i_ss_pm_suspend, sun4i_ss_pm_resume, NULL)
};
@@ -323,6 +332,12 @@ static int sun4i_ss_probe(struct platform_device *pdev)
return PTR_ERR(ss->base);
}
+ ss->variant = of_device_get_match_data(&pdev->dev);
+ if (!ss->variant) {
+ dev_err(&pdev->dev, "Missing Security System variant\n");
+ return -EINVAL;
+ }
+
ss->ssclk = devm_clk_get(&pdev->dev, "mod");
if (IS_ERR(ss->ssclk)) {
err = PTR_ERR(ss->ssclk);
@@ -484,7 +499,12 @@ static int sun4i_ss_remove(struct platform_device *pdev)
}
static const struct of_device_id a20ss_crypto_of_match_table[] = {
- { .compatible = "allwinner,sun4i-a10-crypto" },
+ { .compatible = "allwinner,sun4i-a10-crypto",
+ .data = &ss_a10_variant
+ },
+ { .compatible = "allwinner,sun8i-a33-crypto",
+ .data = &ss_a33_variant
+ },
{}
};
MODULE_DEVICE_TABLE(of, a20ss_crypto_of_match_table);
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
index fdc0e6cdbb85..dc35edd90034 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
@@ -479,7 +479,10 @@ hash_final:
/* Get the hash from the device */
if (op->mode == SS_OP_SHA1) {
for (i = 0; i < 5; i++) {
- v = cpu_to_be32(readl(ss->base + SS_MD0 + i * 4));
+ if (ss->variant->sha1_in_be)
+ v = cpu_to_le32(readl(ss->base + SS_MD0 + i * 4));
+ else
+ v = cpu_to_be32(readl(ss->base + SS_MD0 + i * 4));
memcpy(areq->result + i * 4, &v, 4);
}
} else {
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
index 60425ac75d90..2b4c6333eb67 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
@@ -131,7 +131,16 @@
#define SS_SEED_LEN 192
#define SS_DATA_LEN 160
+/*
+ * struct ss_variant - Describe SS hardware variant
+ * @sha1_in_be: The SHA1 digest is given by SS in BE, and so need to be inverted.
+ */
+struct ss_variant {
+ bool sha1_in_be;
+};
+
struct sun4i_ss_ctx {
+ const struct ss_variant *variant;
void __iomem *base;
int irq;
struct clk *busclk;
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index 37d0b6c386a0..a5fd8975f3d3 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -144,11 +144,6 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
cet->t_sym_ctl = cpu_to_le32(sym);
cet->t_asym_ctl = 0;
- chan->op_mode = ce->variant->op_mode[algt->ce_blockmode];
- chan->op_dir = rctx->op_dir;
- chan->method = ce->variant->alg_cipher[algt->ce_algo_id];
- chan->keylen = op->keylen;
-
addr_key = dma_map_single(ce->dev, op->key, op->keylen, DMA_TO_DEVICE);
cet->t_key = cpu_to_le32(addr_key);
if (dma_mapping_error(ce->dev, addr_key)) {
@@ -394,7 +389,6 @@ int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
break;
default:
dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen);
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
if (op->key) {
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index 73a7649f915d..f72346a44e69 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -555,7 +555,7 @@ static int sun8i_ce_probe(struct platform_device *pdev)
return -EINVAL;
}
- ce->base = devm_platform_ioremap_resource(pdev, 0);;
+ ce->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ce->base))
return PTR_ERR(ce->base);
@@ -624,7 +624,7 @@ error_alg:
error_irq:
sun8i_ce_pm_exit(ce);
error_pm:
- sun8i_ce_free_chanlist(ce, MAXFLOW);
+ sun8i_ce_free_chanlist(ce, MAXFLOW - 1);
return err;
}
@@ -638,7 +638,7 @@ static int sun8i_ce_remove(struct platform_device *pdev)
debugfs_remove_recursive(ce->dbgfs_dir);
#endif
- sun8i_ce_free_chanlist(ce, MAXFLOW);
+ sun8i_ce_free_chanlist(ce, MAXFLOW - 1);
sun8i_ce_pm_exit(ce);
return 0;
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
index 43db49ceafe4..8f8404c84a4d 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
@@ -131,12 +131,8 @@ struct ce_task {
* @engine: ptr to the crypto_engine for this flow
* @bounce_iv: buffer which contain the IV
* @ivlen: size of bounce_iv
- * @keylen: keylen for this flow operation
* @complete: completion for the current task on this flow
* @status: set to 1 by interrupt if task is done
- * @method: current method for flow
- * @op_dir: direction (encrypt vs decrypt) of this flow
- * @op_mode: op_mode for this flow
* @t_phy: Physical address of task
* @tl: pointer to the current ce_task for this flow
* @stat_req: number of request done by this flow
@@ -145,12 +141,8 @@ struct sun8i_ce_flow {
struct crypto_engine *engine;
void *bounce_iv;
unsigned int ivlen;
- unsigned int keylen;
struct completion complete;
int status;
- u32 method;
- u32 op_dir;
- u32 op_mode;
dma_addr_t t_phy;
int timeout;
struct ce_task *tl;
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index f222979a5623..84d52fc3a2da 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -390,7 +390,6 @@ int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
break;
default:
dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
if (op->key) {
@@ -416,7 +415,6 @@ int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
dev_dbg(ss->dev, "Invalid keylen %u\n", keylen);
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
index 90997cc509b8..6b301afffd11 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@ -595,7 +595,7 @@ error_alg:
error_irq:
sun8i_ss_pm_exit(ss);
error_pm:
- sun8i_ss_free_flows(ss, MAXFLOW);
+ sun8i_ss_free_flows(ss, MAXFLOW - 1);
return err;
}
@@ -609,7 +609,7 @@ static int sun8i_ss_remove(struct platform_device *pdev)
debugfs_remove_recursive(ss->dbgfs_dir);
#endif
- sun8i_ss_free_flows(ss, MAXFLOW);
+ sun8i_ss_free_flows(ss, MAXFLOW - 1);
sun8i_ss_pm_exit(ss);
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index a42f8619589d..f7fc0c464125 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -128,12 +128,9 @@ static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher,
struct dynamic_sa_ctl *sa;
int rc;
- if (keylen != AES_KEYSIZE_256 &&
- keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_128) {
- crypto_skcipher_set_flags(cipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != AES_KEYSIZE_256 && keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_128)
return -EINVAL;
- }
/* Create SA */
if (ctx->sa_in || ctx->sa_out)
@@ -292,19 +289,11 @@ static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx,
const u8 *key,
unsigned int keylen)
{
- int rc;
-
crypto_sync_skcipher_clear_flags(ctx->sw_cipher.cipher,
CRYPTO_TFM_REQ_MASK);
crypto_sync_skcipher_set_flags(ctx->sw_cipher.cipher,
crypto_skcipher_get_flags(cipher) & CRYPTO_TFM_REQ_MASK);
- rc = crypto_sync_skcipher_setkey(ctx->sw_cipher.cipher, key, keylen);
- crypto_skcipher_clear_flags(cipher, CRYPTO_TFM_RES_MASK);
- crypto_skcipher_set_flags(cipher,
- crypto_sync_skcipher_get_flags(ctx->sw_cipher.cipher) &
- CRYPTO_TFM_RES_MASK);
-
- return rc;
+ return crypto_sync_skcipher_setkey(ctx->sw_cipher.cipher, key, keylen);
}
int crypto4xx_setkey_aes_ctr(struct crypto_skcipher *cipher,
@@ -379,18 +368,10 @@ static int crypto4xx_aead_setup_fallback(struct crypto4xx_ctx *ctx,
const u8 *key,
unsigned int keylen)
{
- int rc;
-
crypto_aead_clear_flags(ctx->sw_cipher.aead, CRYPTO_TFM_REQ_MASK);
crypto_aead_set_flags(ctx->sw_cipher.aead,
crypto_aead_get_flags(cipher) & CRYPTO_TFM_REQ_MASK);
- rc = crypto_aead_setkey(ctx->sw_cipher.aead, key, keylen);
- crypto_aead_clear_flags(cipher, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(cipher,
- crypto_aead_get_flags(ctx->sw_cipher.aead) &
- CRYPTO_TFM_RES_MASK);
-
- return rc;
+ return crypto_aead_setkey(ctx->sw_cipher.aead, key, keylen);
}
/**
@@ -551,10 +532,8 @@ int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher,
struct dynamic_sa_ctl *sa;
int rc = 0;
- if (crypto4xx_aes_gcm_validate_keylen(keylen) != 0) {
- crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (crypto4xx_aes_gcm_validate_keylen(keylen) != 0)
return -EINVAL;
- }
rc = crypto4xx_aead_setup_fallback(ctx, cipher, key, keylen);
if (rc)
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 7d6b695c4ab3..981de43ea5e2 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -169,7 +169,7 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
int i;
dev->pdr = dma_alloc_coherent(dev->core_dev->device,
sizeof(struct ce_pd) * PPC4XX_NUM_PD,
- &dev->pdr_pa, GFP_ATOMIC);
+ &dev->pdr_pa, GFP_KERNEL);
if (!dev->pdr)
return -ENOMEM;
@@ -185,13 +185,13 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
&dev->shadow_sa_pool_pa,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!dev->shadow_sa_pool)
return -ENOMEM;
dev->shadow_sr_pool = dma_alloc_coherent(dev->core_dev->device,
sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
- &dev->shadow_sr_pool_pa, GFP_ATOMIC);
+ &dev->shadow_sr_pool_pa, GFP_KERNEL);
if (!dev->shadow_sr_pool)
return -ENOMEM;
for (i = 0; i < PPC4XX_NUM_PD; i++) {
@@ -277,7 +277,7 @@ static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
{
dev->gdr = dma_alloc_coherent(dev->core_dev->device,
sizeof(struct ce_gd) * PPC4XX_NUM_GD,
- &dev->gdr_pa, GFP_ATOMIC);
+ &dev->gdr_pa, GFP_KERNEL);
if (!dev->gdr)
return -ENOMEM;
@@ -286,7 +286,8 @@ static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev)
{
- dma_free_coherent(dev->core_dev->device,
+ if (dev->gdr)
+ dma_free_coherent(dev->core_dev->device,
sizeof(struct ce_gd) * PPC4XX_NUM_GD,
dev->gdr, dev->gdr_pa);
}
@@ -354,20 +355,20 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
{
int i;
- /* alloc memory for scatter descriptor ring */
- dev->sdr = dma_alloc_coherent(dev->core_dev->device,
- sizeof(struct ce_sd) * PPC4XX_NUM_SD,
- &dev->sdr_pa, GFP_ATOMIC);
- if (!dev->sdr)
- return -ENOMEM;
-
dev->scatter_buffer_va =
dma_alloc_coherent(dev->core_dev->device,
PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
- &dev->scatter_buffer_pa, GFP_ATOMIC);
+ &dev->scatter_buffer_pa, GFP_KERNEL);
if (!dev->scatter_buffer_va)
return -ENOMEM;
+ /* alloc memory for scatter descriptor ring */
+ dev->sdr = dma_alloc_coherent(dev->core_dev->device,
+ sizeof(struct ce_sd) * PPC4XX_NUM_SD,
+ &dev->sdr_pa, GFP_KERNEL);
+ if (!dev->sdr)
+ return -ENOMEM;
+
for (i = 0; i < PPC4XX_NUM_SD; i++) {
dev->sdr[i].ptr = dev->scatter_buffer_pa +
PPC4XX_SD_BUFFER_SIZE * i;
@@ -1439,16 +1440,15 @@ static int crypto4xx_probe(struct platform_device *ofdev)
spin_lock_init(&core_dev->lock);
INIT_LIST_HEAD(&core_dev->dev->alg_list);
ratelimit_default_init(&core_dev->dev->aead_ratelimit);
+ rc = crypto4xx_build_sdr(core_dev->dev);
+ if (rc)
+ goto err_build_sdr;
rc = crypto4xx_build_pdr(core_dev->dev);
if (rc)
- goto err_build_pdr;
+ goto err_build_sdr;
rc = crypto4xx_build_gdr(core_dev->dev);
if (rc)
- goto err_build_pdr;
-
- rc = crypto4xx_build_sdr(core_dev->dev);
- if (rc)
goto err_build_sdr;
/* Init tasklet for bottom half processing */
@@ -1493,7 +1493,6 @@ err_iomap:
err_build_sdr:
crypto4xx_destroy_sdr(core_dev->dev);
crypto4xx_destroy_gdr(core_dev->dev);
-err_build_pdr:
crypto4xx_destroy_pdr(core_dev->dev);
kfree(core_dev->dev);
err_alloc_dev:
diff --git a/drivers/crypto/amlogic/Kconfig b/drivers/crypto/amlogic/Kconfig
index b90850d18965..cf9547602670 100644
--- a/drivers/crypto/amlogic/Kconfig
+++ b/drivers/crypto/amlogic/Kconfig
@@ -1,5 +1,6 @@
config CRYPTO_DEV_AMLOGIC_GXL
tristate "Support for amlogic cryptographic offloader"
+ depends on HAS_IOMEM
default y if ARCH_MESON
select CRYPTO_SKCIPHER
select CRYPTO_ENGINE
diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
index e589015aac1c..9819dd50fbad 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
@@ -366,7 +366,6 @@ int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
break;
default:
dev_dbg(mc->dev, "ERROR: Invalid keylen %u\n", keylen);
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
if (op->key) {
diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c
index fa05fce1c0de..9d4ead2f7ebb 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-core.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-core.c
@@ -289,7 +289,7 @@ static int meson_crypto_probe(struct platform_device *pdev)
error_alg:
meson_unregister_algs(mc);
error_flow:
- meson_free_chanlist(mc, MAXFLOW);
+ meson_free_chanlist(mc, MAXFLOW - 1);
clk_disable_unprepare(mc->busclk);
return err;
}
@@ -304,7 +304,7 @@ static int meson_crypto_remove(struct platform_device *pdev)
meson_unregister_algs(mc);
- meson_free_chanlist(mc, MAXFLOW);
+ meson_free_chanlist(mc, MAXFLOW - 1);
clk_disable_unprepare(mc->busclk);
return 0;
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 91092504bc96..a6e14491e080 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -21,6 +21,7 @@
#include <linux/platform_device.h>
#include <linux/device.h>
+#include <linux/dmaengine.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
@@ -37,8 +38,6 @@
#include <crypto/xts.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
-#include <linux/platform_data/crypto-atmel.h>
-#include <dt-bindings/dma/at91.h>
#include "atmel-aes-regs.h"
#include "atmel-authenc.h"
@@ -89,7 +88,6 @@
struct atmel_aes_caps {
bool has_dualbuff;
bool has_cfb64;
- bool has_ctr32;
bool has_gcm;
bool has_xts;
bool has_authenc;
@@ -122,6 +120,7 @@ struct atmel_aes_ctr_ctx {
size_t offset;
struct scatterlist src[2];
struct scatterlist dst[2];
+ u32 blocks;
};
struct atmel_aes_gcm_ctx {
@@ -514,8 +513,37 @@ static void atmel_aes_set_iv_as_last_ciphertext_block(struct atmel_aes_dev *dd)
}
}
+static inline struct atmel_aes_ctr_ctx *
+atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx)
+{
+ return container_of(ctx, struct atmel_aes_ctr_ctx, base);
+}
+
+static void atmel_aes_ctr_update_req_iv(struct atmel_aes_dev *dd)
+{
+ struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
+ struct skcipher_request *req = skcipher_request_cast(dd->areq);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+ int i;
+
+ /*
+ * The CTR transfer works in fragments of data of maximum 1 MByte
+ * because of the 16 bit CTR counter embedded in the IP. When reaching
+ * here, ctx->blocks contains the number of blocks of the last fragment
+ * processed, there is no need to explicit cast it to u16.
+ */
+ for (i = 0; i < ctx->blocks; i++)
+ crypto_inc((u8 *)ctx->iv, AES_BLOCK_SIZE);
+
+ memcpy(req->iv, ctx->iv, ivsize);
+}
+
static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
{
+ struct skcipher_request *req = skcipher_request_cast(dd->areq);
+ struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
+
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
if (dd->ctx->is_aead)
atmel_aes_authenc_complete(dd, err);
@@ -524,8 +552,13 @@ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
clk_disable(dd->iclk);
dd->flags &= ~AES_FLAGS_BUSY;
- if (!dd->ctx->is_aead)
- atmel_aes_set_iv_as_last_ciphertext_block(dd);
+ if (!err && !dd->ctx->is_aead &&
+ (rctx->mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_ECB) {
+ if ((rctx->mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_CTR)
+ atmel_aes_set_iv_as_last_ciphertext_block(dd);
+ else
+ atmel_aes_ctr_update_req_iv(dd);
+ }
if (dd->is_async)
dd->areq->complete(dd->areq, err);
@@ -790,7 +823,6 @@ static int atmel_aes_dma_transfer_start(struct atmel_aes_dev *dd,
int err;
memset(&config, 0, sizeof(config));
- config.direction = dir;
config.src_addr_width = addr_width;
config.dst_addr_width = addr_width;
config.src_maxburst = maxburst;
@@ -830,27 +862,6 @@ static int atmel_aes_dma_transfer_start(struct atmel_aes_dev *dd,
return 0;
}
-static void atmel_aes_dma_transfer_stop(struct atmel_aes_dev *dd,
- enum dma_transfer_direction dir)
-{
- struct atmel_aes_dma *dma;
-
- switch (dir) {
- case DMA_MEM_TO_DEV:
- dma = &dd->src;
- break;
-
- case DMA_DEV_TO_MEM:
- dma = &dd->dst;
- break;
-
- default:
- return;
- }
-
- dmaengine_terminate_all(dma->chan);
-}
-
static int atmel_aes_dma_start(struct atmel_aes_dev *dd,
struct scatterlist *src,
struct scatterlist *dst,
@@ -909,25 +920,18 @@ static int atmel_aes_dma_start(struct atmel_aes_dev *dd,
return -EINPROGRESS;
output_transfer_stop:
- atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM);
+ dmaengine_terminate_sync(dd->dst.chan);
unmap:
atmel_aes_unmap(dd);
exit:
return atmel_aes_complete(dd, err);
}
-static void atmel_aes_dma_stop(struct atmel_aes_dev *dd)
-{
- atmel_aes_dma_transfer_stop(dd, DMA_MEM_TO_DEV);
- atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM);
- atmel_aes_unmap(dd);
-}
-
static void atmel_aes_dma_callback(void *data)
{
struct atmel_aes_dev *dd = data;
- atmel_aes_dma_stop(dd);
+ atmel_aes_unmap(dd);
dd->is_async = true;
(void)dd->resume(dd);
}
@@ -1004,19 +1008,14 @@ static int atmel_aes_start(struct atmel_aes_dev *dd)
atmel_aes_transfer_complete);
}
-static inline struct atmel_aes_ctr_ctx *
-atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx)
-{
- return container_of(ctx, struct atmel_aes_ctr_ctx, base);
-}
-
static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
{
struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
struct skcipher_request *req = skcipher_request_cast(dd->areq);
struct scatterlist *src, *dst;
- u32 ctr, blocks;
size_t datalen;
+ u32 ctr;
+ u16 start, end;
bool use_dma, fragmented = false;
/* Check for transfer completion. */
@@ -1026,29 +1025,19 @@ static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
/* Compute data length. */
datalen = req->cryptlen - ctx->offset;
- blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
+ ctx->blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
ctr = be32_to_cpu(ctx->iv[3]);
- if (dd->caps.has_ctr32) {
- /* Check 32bit counter overflow. */
- u32 start = ctr;
- u32 end = start + blocks - 1;
-
- if (end < start) {
- ctr |= 0xffffffff;
- datalen = AES_BLOCK_SIZE * -start;
- fragmented = true;
- }
- } else {
- /* Check 16bit counter overflow. */
- u16 start = ctr & 0xffff;
- u16 end = start + (u16)blocks - 1;
-
- if (blocks >> 16 || end < start) {
- ctr |= 0xffff;
- datalen = AES_BLOCK_SIZE * (0x10000-start);
- fragmented = true;
- }
+
+ /* Check 16bit counter overflow. */
+ start = ctr & 0xffff;
+ end = start + ctx->blocks - 1;
+
+ if (ctx->blocks >> 16 || end < start) {
+ ctr |= 0xffff;
+ datalen = AES_BLOCK_SIZE * (0x10000 - start);
+ fragmented = true;
}
+
use_dma = (datalen >= ATMEL_AES_DMA_THRESHOLD);
/* Jump to offset. */
@@ -1131,7 +1120,8 @@ static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
rctx = skcipher_request_ctx(req);
rctx->mode = mode;
- if (!(mode & AES_FLAGS_ENCRYPT) && (req->src == req->dst)) {
+ if ((mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_ECB &&
+ !(mode & AES_FLAGS_ENCRYPT) && req->src == req->dst) {
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
if (req->cryptlen >= ivsize)
@@ -1150,10 +1140,8 @@ static int atmel_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
if (keylen != AES_KEYSIZE_128 &&
keylen != AES_KEYSIZE_192 &&
- keylen != AES_KEYSIZE_256) {
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ keylen != AES_KEYSIZE_256)
return -EINVAL;
- }
memcpy(ctx->key, key, keylen);
ctx->keylen = keylen;
@@ -1275,12 +1263,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "ecb(aes)",
.base.cra_driver_name = "atmel-ecb-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1292,12 +1276,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "atmel-cbc-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1310,12 +1290,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "ofb(aes)",
.base.cra_driver_name = "atmel-ofb-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1328,12 +1304,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "cfb(aes)",
.base.cra_driver_name = "atmel-cfb-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1346,12 +1318,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "cfb32(aes)",
.base.cra_driver_name = "atmel-cfb32-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = CFB32_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1364,12 +1332,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "cfb16(aes)",
.base.cra_driver_name = "atmel-cfb16-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = CFB16_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1382,12 +1346,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "cfb8(aes)",
.base.cra_driver_name = "atmel-cfb8-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = CFB8_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1400,12 +1360,8 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "ctr(aes)",
.base.cra_driver_name = "atmel-ctr-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctr_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_ctr_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1420,12 +1376,8 @@ static struct skcipher_alg aes_algs[] = {
static struct skcipher_alg aes_cfb64_alg = {
.base.cra_name = "cfb64(aes)",
.base.cra_driver_name = "atmel-cfb64-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = CFB64_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.init = atmel_aes_init_tfm,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1762,10 +1714,8 @@ static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
if (keylen != AES_KEYSIZE_256 &&
keylen != AES_KEYSIZE_192 &&
- keylen != AES_KEYSIZE_128) {
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ keylen != AES_KEYSIZE_128)
return -EINVAL;
- }
memcpy(ctx->key, key, keylen);
ctx->keylen = keylen;
@@ -1776,21 +1726,7 @@ static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
static int atmel_aes_gcm_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
- /* Same as crypto_gcm_authsize() from crypto/gcm.c */
- switch (authsize) {
- case 4:
- case 8:
- case 12:
- case 13:
- case 14:
- case 15:
- case 16:
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
+ return crypto_gcm_check_authsize(authsize);
}
static int atmel_aes_gcm_encrypt(struct aead_request *req)
@@ -1825,12 +1761,8 @@ static struct aead_alg aes_gcm_alg = {
.base = {
.cra_name = "gcm(aes)",
.cra_driver_name = "atmel-gcm-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct atmel_aes_gcm_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
},
};
@@ -1947,12 +1879,8 @@ static int atmel_aes_xts_init_tfm(struct crypto_skcipher *tfm)
static struct skcipher_alg aes_xts_alg = {
.base.cra_name = "xts(aes)",
.base.cra_driver_name = "atmel-xts-aes",
- .base.cra_priority = ATMEL_AES_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_xts_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
@@ -2113,7 +2041,6 @@ static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key,
{
struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_authenc_keys keys;
- u32 flags;
int err;
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
@@ -2123,11 +2050,9 @@ static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key,
goto badkey;
/* Save auth key. */
- flags = crypto_aead_get_flags(tfm);
err = atmel_sha_authenc_setkey(ctx->auth,
keys.authkey, keys.authkeylen,
- &flags);
- crypto_aead_set_flags(tfm, flags & CRYPTO_TFM_RES_MASK);
+ crypto_aead_get_flags(tfm));
if (err) {
memzero_explicit(&keys, sizeof(keys));
return err;
@@ -2141,7 +2066,6 @@ static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key,
return 0;
badkey:
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
@@ -2252,12 +2176,8 @@ static struct aead_alg aes_authenc_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "atmel-authenc-hmac-sha1-cbc-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
},
},
{
@@ -2272,12 +2192,8 @@ static struct aead_alg aes_authenc_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha224),cbc(aes))",
.cra_driver_name = "atmel-authenc-hmac-sha224-cbc-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
},
},
{
@@ -2292,12 +2208,8 @@ static struct aead_alg aes_authenc_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "atmel-authenc-hmac-sha256-cbc-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
},
},
{
@@ -2312,12 +2224,8 @@ static struct aead_alg aes_authenc_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha384),cbc(aes))",
.cra_driver_name = "atmel-authenc-hmac-sha384-cbc-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
},
},
{
@@ -2332,12 +2240,8 @@ static struct aead_alg aes_authenc_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha512),cbc(aes))",
.cra_driver_name = "atmel-authenc-hmac-sha512-cbc-aes",
- .cra_priority = ATMEL_AES_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
},
},
};
@@ -2364,47 +2268,30 @@ static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
free_page((unsigned long)dd->buf);
}
-static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
+static int atmel_aes_dma_init(struct atmel_aes_dev *dd)
{
- struct at_dma_slave *sl = slave;
-
- if (sl && sl->dma_dev == chan->device->dev) {
- chan->private = sl;
- return true;
- } else {
- return false;
- }
-}
-
-static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
- struct crypto_platform_data *pdata)
-{
- struct at_dma_slave *slave;
- dma_cap_mask_t mask;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
+ int ret;
/* Try to grab 2 DMA channels */
- slave = &pdata->dma_slave->rxdata;
- dd->src.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
- slave, dd->dev, "tx");
- if (!dd->src.chan)
+ dd->src.chan = dma_request_chan(dd->dev, "tx");
+ if (IS_ERR(dd->src.chan)) {
+ ret = PTR_ERR(dd->src.chan);
goto err_dma_in;
+ }
- slave = &pdata->dma_slave->txdata;
- dd->dst.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
- slave, dd->dev, "rx");
- if (!dd->dst.chan)
+ dd->dst.chan = dma_request_chan(dd->dev, "rx");
+ if (IS_ERR(dd->dst.chan)) {
+ ret = PTR_ERR(dd->dst.chan);
goto err_dma_out;
+ }
return 0;
err_dma_out:
dma_release_channel(dd->src.chan);
err_dma_in:
- dev_warn(dd->dev, "no DMA channel available\n");
- return -ENODEV;
+ dev_err(dd->dev, "no DMA channel available\n");
+ return ret;
}
static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
@@ -2469,29 +2356,45 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
crypto_unregister_skcipher(&aes_algs[i]);
}
+static void atmel_aes_crypto_alg_init(struct crypto_alg *alg)
+{
+ alg->cra_flags = CRYPTO_ALG_ASYNC;
+ alg->cra_alignmask = 0xf;
+ alg->cra_priority = ATMEL_AES_PRIORITY;
+ alg->cra_module = THIS_MODULE;
+}
+
static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
{
int err, i, j;
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
+ atmel_aes_crypto_alg_init(&aes_algs[i].base);
+
err = crypto_register_skcipher(&aes_algs[i]);
if (err)
goto err_aes_algs;
}
if (dd->caps.has_cfb64) {
+ atmel_aes_crypto_alg_init(&aes_cfb64_alg.base);
+
err = crypto_register_skcipher(&aes_cfb64_alg);
if (err)
goto err_aes_cfb64_alg;
}
if (dd->caps.has_gcm) {
+ atmel_aes_crypto_alg_init(&aes_gcm_alg.base);
+
err = crypto_register_aead(&aes_gcm_alg);
if (err)
goto err_aes_gcm_alg;
}
if (dd->caps.has_xts) {
+ atmel_aes_crypto_alg_init(&aes_xts_alg.base);
+
err = crypto_register_skcipher(&aes_xts_alg);
if (err)
goto err_aes_xts_alg;
@@ -2500,6 +2403,8 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
if (dd->caps.has_authenc) {
for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++) {
+ atmel_aes_crypto_alg_init(&aes_authenc_algs[i].base);
+
err = crypto_register_aead(&aes_authenc_algs[i]);
if (err)
goto err_aes_authenc_alg;
@@ -2533,7 +2438,6 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
{
dd->caps.has_dualbuff = 0;
dd->caps.has_cfb64 = 0;
- dd->caps.has_ctr32 = 0;
dd->caps.has_gcm = 0;
dd->caps.has_xts = 0;
dd->caps.has_authenc = 0;
@@ -2544,7 +2448,6 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
case 0x500:
dd->caps.has_dualbuff = 1;
dd->caps.has_cfb64 = 1;
- dd->caps.has_ctr32 = 1;
dd->caps.has_gcm = 1;
dd->caps.has_xts = 1;
dd->caps.has_authenc = 1;
@@ -2553,7 +2456,6 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
case 0x200:
dd->caps.has_dualbuff = 1;
dd->caps.has_cfb64 = 1;
- dd->caps.has_ctr32 = 1;
dd->caps.has_gcm = 1;
dd->caps.max_burst_size = 4;
break;
@@ -2577,65 +2479,18 @@ static const struct of_device_id atmel_aes_dt_ids[] = {
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids);
-
-static struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct crypto_platform_data *pdata;
-
- if (!np) {
- dev_err(&pdev->dev, "device node not found\n");
- return ERR_PTR(-EINVAL);
- }
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
-
- pdata->dma_slave = devm_kzalloc(&pdev->dev,
- sizeof(*(pdata->dma_slave)),
- GFP_KERNEL);
- if (!pdata->dma_slave) {
- devm_kfree(&pdev->dev, pdata);
- return ERR_PTR(-ENOMEM);
- }
-
- return pdata;
-}
-#else
-static inline struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
-{
- return ERR_PTR(-EINVAL);
-}
#endif
static int atmel_aes_probe(struct platform_device *pdev)
{
struct atmel_aes_dev *aes_dd;
- struct crypto_platform_data *pdata;
struct device *dev = &pdev->dev;
struct resource *aes_res;
int err;
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- pdata = atmel_aes_of_init(pdev);
- if (IS_ERR(pdata)) {
- err = PTR_ERR(pdata);
- goto aes_dd_err;
- }
- }
-
- if (!pdata->dma_slave) {
- err = -ENXIO;
- goto aes_dd_err;
- }
-
aes_dd = devm_kzalloc(&pdev->dev, sizeof(*aes_dd), GFP_KERNEL);
- if (aes_dd == NULL) {
- err = -ENOMEM;
- goto aes_dd_err;
- }
+ if (!aes_dd)
+ return -ENOMEM;
aes_dd->dev = dev;
@@ -2656,7 +2511,7 @@ static int atmel_aes_probe(struct platform_device *pdev)
if (!aes_res) {
dev_err(dev, "no MEM resource info\n");
err = -ENODEV;
- goto res_err;
+ goto err_tasklet_kill;
}
aes_dd->phys_base = aes_res->start;
@@ -2664,14 +2519,14 @@ static int atmel_aes_probe(struct platform_device *pdev)
aes_dd->irq = platform_get_irq(pdev, 0);
if (aes_dd->irq < 0) {
err = aes_dd->irq;
- goto res_err;
+ goto err_tasklet_kill;
}
err = devm_request_irq(&pdev->dev, aes_dd->irq, atmel_aes_irq,
IRQF_SHARED, "atmel-aes", aes_dd);
if (err) {
dev_err(dev, "unable to request aes irq.\n");
- goto res_err;
+ goto err_tasklet_kill;
}
/* Initializing the clock */
@@ -2679,40 +2534,40 @@ static int atmel_aes_probe(struct platform_device *pdev)
if (IS_ERR(aes_dd->iclk)) {
dev_err(dev, "clock initialization failed.\n");
err = PTR_ERR(aes_dd->iclk);
- goto res_err;
+ goto err_tasklet_kill;
}
aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
if (IS_ERR(aes_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
err = PTR_ERR(aes_dd->io_base);
- goto res_err;
+ goto err_tasklet_kill;
}
err = clk_prepare(aes_dd->iclk);
if (err)
- goto res_err;
+ goto err_tasklet_kill;
err = atmel_aes_hw_version_init(aes_dd);
if (err)
- goto iclk_unprepare;
+ goto err_iclk_unprepare;
atmel_aes_get_cap(aes_dd);
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
if (aes_dd->caps.has_authenc && !atmel_sha_authenc_is_ready()) {
err = -EPROBE_DEFER;
- goto iclk_unprepare;
+ goto err_iclk_unprepare;
}
#endif
err = atmel_aes_buff_init(aes_dd);
if (err)
- goto err_aes_buff;
+ goto err_iclk_unprepare;
- err = atmel_aes_dma_init(aes_dd, pdata);
+ err = atmel_aes_dma_init(aes_dd);
if (err)
- goto err_aes_dma;
+ goto err_buff_cleanup;
spin_lock(&atmel_aes.lock);
list_add_tail(&aes_dd->list, &atmel_aes.dev_list);
@@ -2733,17 +2588,13 @@ err_algs:
list_del(&aes_dd->list);
spin_unlock(&atmel_aes.lock);
atmel_aes_dma_cleanup(aes_dd);
-err_aes_dma:
+err_buff_cleanup:
atmel_aes_buff_cleanup(aes_dd);
-err_aes_buff:
-iclk_unprepare:
+err_iclk_unprepare:
clk_unprepare(aes_dd->iclk);
-res_err:
+err_tasklet_kill:
tasklet_kill(&aes_dd->done_task);
tasklet_kill(&aes_dd->queue_task);
-aes_dd_err:
- if (err != -EPROBE_DEFER)
- dev_err(dev, "initialization failed.\n");
return err;
}
diff --git a/drivers/crypto/atmel-authenc.h b/drivers/crypto/atmel-authenc.h
index d6de810df44f..c6530a1c8c20 100644
--- a/drivers/crypto/atmel-authenc.h
+++ b/drivers/crypto/atmel-authenc.h
@@ -30,8 +30,7 @@ unsigned int atmel_sha_authenc_get_reqsize(void);
struct atmel_sha_authenc_ctx *atmel_sha_authenc_spawn(unsigned long mode);
void atmel_sha_authenc_free(struct atmel_sha_authenc_ctx *auth);
int atmel_sha_authenc_setkey(struct atmel_sha_authenc_ctx *auth,
- const u8 *key, unsigned int keylen,
- u32 *flags);
+ const u8 *key, unsigned int keylen, u32 flags);
int atmel_sha_authenc_schedule(struct ahash_request *req,
struct atmel_sha_authenc_ctx *auth,
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 8ea0e4bcde0d..e536e2a6bbd8 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -21,6 +21,7 @@
#include <linux/platform_device.h>
#include <linux/device.h>
+#include <linux/dmaengine.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
@@ -36,10 +37,11 @@
#include <crypto/sha.h>
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
-#include <linux/platform_data/crypto-atmel.h>
#include "atmel-sha-regs.h"
#include "atmel-authenc.h"
+#define ATMEL_SHA_PRIORITY 300
+
/* SHA flags */
#define SHA_FLAGS_BUSY BIT(0)
#define SHA_FLAGS_FINAL BIT(1)
@@ -134,7 +136,6 @@ struct atmel_sha_dev {
void __iomem *io_base;
spinlock_t lock;
- int err;
struct tasklet_struct done_task;
struct tasklet_struct queue_task;
@@ -851,7 +852,7 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
0, final);
}
-static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
+static void atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
{
struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
@@ -870,8 +871,6 @@ static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen +
ctx->block_size, DMA_TO_DEVICE);
}
-
- return 0;
}
static int atmel_sha_update_req(struct atmel_sha_dev *dd)
@@ -1025,7 +1024,6 @@ static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
if (!(SHA_FLAGS_INIT & dd->flags)) {
atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST);
dd->flags |= SHA_FLAGS_INIT;
- dd->err = 0;
}
return 0;
@@ -1036,9 +1034,13 @@ static inline unsigned int atmel_sha_get_version(struct atmel_sha_dev *dd)
return atmel_sha_read(dd, SHA_HW_VERSION) & 0x00000fff;
}
-static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
+static int atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
{
- atmel_sha_hw_init(dd);
+ int err;
+
+ err = atmel_sha_hw_init(dd);
+ if (err)
+ return err;
dd->hw_version = atmel_sha_get_version(dd);
@@ -1046,6 +1048,8 @@ static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
"version: 0x%x\n", dd->hw_version);
clk_disable(dd->iclk);
+
+ return 0;
}
static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
@@ -1248,130 +1252,66 @@ static int atmel_sha_cra_init(struct crypto_tfm *tfm)
return 0;
}
+static void atmel_sha_alg_init(struct ahash_alg *alg)
+{
+ alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY;
+ alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
+ alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_ctx);
+ alg->halg.base.cra_module = THIS_MODULE;
+ alg->halg.base.cra_init = atmel_sha_cra_init;
+
+ alg->halg.statesize = sizeof(struct atmel_sha_reqctx);
+
+ alg->init = atmel_sha_init;
+ alg->update = atmel_sha_update;
+ alg->final = atmel_sha_final;
+ alg->finup = atmel_sha_finup;
+ alg->digest = atmel_sha_digest;
+ alg->export = atmel_sha_export;
+ alg->import = atmel_sha_import;
+}
+
static struct ahash_alg sha_1_256_algs[] = {
{
- .init = atmel_sha_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .finup = atmel_sha_finup,
- .digest = atmel_sha_digest,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "sha1",
- .cra_driver_name = "atmel-sha1",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_cra_init,
- }
- }
+ .halg.base.cra_name = "sha1",
+ .halg.base.cra_driver_name = "atmel-sha1",
+ .halg.base.cra_blocksize = SHA1_BLOCK_SIZE,
+
+ .halg.digestsize = SHA1_DIGEST_SIZE,
},
{
- .init = atmel_sha_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .finup = atmel_sha_finup,
- .digest = atmel_sha_digest,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "sha256",
- .cra_driver_name = "atmel-sha256",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_cra_init,
- }
- }
+ .halg.base.cra_name = "sha256",
+ .halg.base.cra_driver_name = "atmel-sha256",
+ .halg.base.cra_blocksize = SHA256_BLOCK_SIZE,
+
+ .halg.digestsize = SHA256_DIGEST_SIZE,
},
};
static struct ahash_alg sha_224_alg = {
- .init = atmel_sha_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .finup = atmel_sha_finup,
- .digest = atmel_sha_digest,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA224_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "sha224",
- .cra_driver_name = "atmel-sha224",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_cra_init,
- }
- }
+ .halg.base.cra_name = "sha224",
+ .halg.base.cra_driver_name = "atmel-sha224",
+ .halg.base.cra_blocksize = SHA224_BLOCK_SIZE,
+
+ .halg.digestsize = SHA224_DIGEST_SIZE,
};
static struct ahash_alg sha_384_512_algs[] = {
{
- .init = atmel_sha_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .finup = atmel_sha_finup,
- .digest = atmel_sha_digest,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA384_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "sha384",
- .cra_driver_name = "atmel-sha384",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_ctx),
- .cra_alignmask = 0x3,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_cra_init,
- }
- }
+ .halg.base.cra_name = "sha384",
+ .halg.base.cra_driver_name = "atmel-sha384",
+ .halg.base.cra_blocksize = SHA384_BLOCK_SIZE,
+ .halg.base.cra_alignmask = 0x3,
+
+ .halg.digestsize = SHA384_DIGEST_SIZE,
},
{
- .init = atmel_sha_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .finup = atmel_sha_finup,
- .digest = atmel_sha_digest,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA512_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "sha512",
- .cra_driver_name = "atmel-sha512",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_ctx),
- .cra_alignmask = 0x3,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_cra_init,
- }
- }
+ .halg.base.cra_name = "sha512",
+ .halg.base.cra_driver_name = "atmel-sha512",
+ .halg.base.cra_blocksize = SHA512_BLOCK_SIZE,
+ .halg.base.cra_alignmask = 0x3,
+
+ .halg.digestsize = SHA512_DIGEST_SIZE,
},
};
@@ -1395,10 +1335,6 @@ static int atmel_sha_done(struct atmel_sha_dev *dd)
if (SHA_FLAGS_DMA_ACTIVE & dd->flags) {
dd->flags &= ~SHA_FLAGS_DMA_ACTIVE;
atmel_sha_update_dma_stop(dd);
- if (dd->err) {
- err = dd->err;
- goto finish;
- }
}
if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
/* hash or semi-hash ready */
@@ -1493,7 +1429,6 @@ static void atmel_sha_dma_callback2(void *data)
struct scatterlist *sg;
int nents;
- dmaengine_terminate_all(dma->chan);
dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE);
sg = dma->sg;
@@ -1918,12 +1853,7 @@ static int atmel_sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
{
struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
- if (atmel_sha_hmac_key_set(&hmac->hkey, key, keylen)) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
-
- return 0;
+ return atmel_sha_hmac_key_set(&hmac->hkey, key, keylen);
}
static int atmel_sha_hmac_init(struct ahash_request *req)
@@ -2084,131 +2014,61 @@ static void atmel_sha_hmac_cra_exit(struct crypto_tfm *tfm)
atmel_sha_hmac_key_release(&hmac->hkey);
}
+static void atmel_sha_hmac_alg_init(struct ahash_alg *alg)
+{
+ alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY;
+ alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
+ alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx);
+ alg->halg.base.cra_module = THIS_MODULE;
+ alg->halg.base.cra_init = atmel_sha_hmac_cra_init;
+ alg->halg.base.cra_exit = atmel_sha_hmac_cra_exit;
+
+ alg->halg.statesize = sizeof(struct atmel_sha_reqctx);
+
+ alg->init = atmel_sha_hmac_init;
+ alg->update = atmel_sha_update;
+ alg->final = atmel_sha_final;
+ alg->digest = atmel_sha_hmac_digest;
+ alg->setkey = atmel_sha_hmac_setkey;
+ alg->export = atmel_sha_export;
+ alg->import = atmel_sha_import;
+}
+
static struct ahash_alg sha_hmac_algs[] = {
{
- .init = atmel_sha_hmac_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .digest = atmel_sha_hmac_digest,
- .setkey = atmel_sha_hmac_setkey,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "hmac(sha1)",
- .cra_driver_name = "atmel-hmac-sha1",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_hmac_cra_init,
- .cra_exit = atmel_sha_hmac_cra_exit,
- }
- }
+ .halg.base.cra_name = "hmac(sha1)",
+ .halg.base.cra_driver_name = "atmel-hmac-sha1",
+ .halg.base.cra_blocksize = SHA1_BLOCK_SIZE,
+
+ .halg.digestsize = SHA1_DIGEST_SIZE,
},
{
- .init = atmel_sha_hmac_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .digest = atmel_sha_hmac_digest,
- .setkey = atmel_sha_hmac_setkey,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA224_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "hmac(sha224)",
- .cra_driver_name = "atmel-hmac-sha224",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_hmac_cra_init,
- .cra_exit = atmel_sha_hmac_cra_exit,
- }
- }
+ .halg.base.cra_name = "hmac(sha224)",
+ .halg.base.cra_driver_name = "atmel-hmac-sha224",
+ .halg.base.cra_blocksize = SHA224_BLOCK_SIZE,
+
+ .halg.digestsize = SHA224_DIGEST_SIZE,
},
{
- .init = atmel_sha_hmac_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .digest = atmel_sha_hmac_digest,
- .setkey = atmel_sha_hmac_setkey,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "hmac(sha256)",
- .cra_driver_name = "atmel-hmac-sha256",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_hmac_cra_init,
- .cra_exit = atmel_sha_hmac_cra_exit,
- }
- }
+ .halg.base.cra_name = "hmac(sha256)",
+ .halg.base.cra_driver_name = "atmel-hmac-sha256",
+ .halg.base.cra_blocksize = SHA256_BLOCK_SIZE,
+
+ .halg.digestsize = SHA256_DIGEST_SIZE,
},
{
- .init = atmel_sha_hmac_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .digest = atmel_sha_hmac_digest,
- .setkey = atmel_sha_hmac_setkey,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA384_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "hmac(sha384)",
- .cra_driver_name = "atmel-hmac-sha384",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_hmac_cra_init,
- .cra_exit = atmel_sha_hmac_cra_exit,
- }
- }
+ .halg.base.cra_name = "hmac(sha384)",
+ .halg.base.cra_driver_name = "atmel-hmac-sha384",
+ .halg.base.cra_blocksize = SHA384_BLOCK_SIZE,
+
+ .halg.digestsize = SHA384_DIGEST_SIZE,
},
{
- .init = atmel_sha_hmac_init,
- .update = atmel_sha_update,
- .final = atmel_sha_final,
- .digest = atmel_sha_hmac_digest,
- .setkey = atmel_sha_hmac_setkey,
- .export = atmel_sha_export,
- .import = atmel_sha_import,
- .halg = {
- .digestsize = SHA512_DIGEST_SIZE,
- .statesize = sizeof(struct atmel_sha_reqctx),
- .base = {
- .cra_name = "hmac(sha512)",
- .cra_driver_name = "atmel-hmac-sha512",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_sha_hmac_cra_init,
- .cra_exit = atmel_sha_hmac_cra_exit,
- }
- }
+ .halg.base.cra_name = "hmac(sha512)",
+ .halg.base.cra_driver_name = "atmel-hmac-sha512",
+ .halg.base.cra_blocksize = SHA512_BLOCK_SIZE,
+
+ .halg.digestsize = SHA512_DIGEST_SIZE,
},
};
@@ -2347,18 +2207,13 @@ void atmel_sha_authenc_free(struct atmel_sha_authenc_ctx *auth)
EXPORT_SYMBOL_GPL(atmel_sha_authenc_free);
int atmel_sha_authenc_setkey(struct atmel_sha_authenc_ctx *auth,
- const u8 *key, unsigned int keylen,
- u32 *flags)
+ const u8 *key, unsigned int keylen, u32 flags)
{
struct crypto_ahash *tfm = auth->tfm;
- int err;
crypto_ahash_clear_flags(tfm, CRYPTO_TFM_REQ_MASK);
- crypto_ahash_set_flags(tfm, *flags & CRYPTO_TFM_REQ_MASK);
- err = crypto_ahash_setkey(tfm, key, keylen);
- *flags = crypto_ahash_get_flags(tfm);
-
- return err;
+ crypto_ahash_set_flags(tfm, flags & CRYPTO_TFM_REQ_MASK);
+ return crypto_ahash_setkey(tfm, key, keylen);
}
EXPORT_SYMBOL_GPL(atmel_sha_authenc_setkey);
@@ -2561,12 +2416,16 @@ static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
int err, i, j;
for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) {
+ atmel_sha_alg_init(&sha_1_256_algs[i]);
+
err = crypto_register_ahash(&sha_1_256_algs[i]);
if (err)
goto err_sha_1_256_algs;
}
if (dd->caps.has_sha224) {
+ atmel_sha_alg_init(&sha_224_alg);
+
err = crypto_register_ahash(&sha_224_alg);
if (err)
goto err_sha_224_algs;
@@ -2574,6 +2433,8 @@ static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
if (dd->caps.has_sha_384_512) {
for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) {
+ atmel_sha_alg_init(&sha_384_512_algs[i]);
+
err = crypto_register_ahash(&sha_384_512_algs[i]);
if (err)
goto err_sha_384_512_algs;
@@ -2582,6 +2443,8 @@ static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
if (dd->caps.has_hmac) {
for (i = 0; i < ARRAY_SIZE(sha_hmac_algs); i++) {
+ atmel_sha_hmac_alg_init(&sha_hmac_algs[i]);
+
err = crypto_register_ahash(&sha_hmac_algs[i]);
if (err)
goto err_sha_hmac_algs;
@@ -2608,35 +2471,14 @@ err_sha_1_256_algs:
return err;
}
-static bool atmel_sha_filter(struct dma_chan *chan, void *slave)
+static int atmel_sha_dma_init(struct atmel_sha_dev *dd)
{
- struct at_dma_slave *sl = slave;
-
- if (sl && sl->dma_dev == chan->device->dev) {
- chan->private = sl;
- return true;
- } else {
- return false;
+ dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx");
+ if (IS_ERR(dd->dma_lch_in.chan)) {
+ dev_err(dd->dev, "DMA channel is not available\n");
+ return PTR_ERR(dd->dma_lch_in.chan);
}
-}
-static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
- struct crypto_platform_data *pdata)
-{
- dma_cap_mask_t mask_in;
-
- /* Try to grab DMA channel */
- dma_cap_zero(mask_in);
- dma_cap_set(DMA_SLAVE, mask_in);
-
- dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask_in,
- atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
- if (!dd->dma_lch_in.chan) {
- dev_warn(dd->dev, "no DMA channel available\n");
- return -ENODEV;
- }
-
- dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
SHA_REG_DIN(0);
dd->dma_lch_in.dma_conf.src_maxburst = 1;
@@ -2709,49 +2551,18 @@ static const struct of_device_id atmel_sha_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, atmel_sha_dt_ids);
-
-static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct crypto_platform_data *pdata;
-
- if (!np) {
- dev_err(&pdev->dev, "device node not found\n");
- return ERR_PTR(-EINVAL);
- }
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
-
- pdata->dma_slave = devm_kzalloc(&pdev->dev,
- sizeof(*(pdata->dma_slave)),
- GFP_KERNEL);
- if (!pdata->dma_slave)
- return ERR_PTR(-ENOMEM);
-
- return pdata;
-}
-#else /* CONFIG_OF */
-static inline struct crypto_platform_data *atmel_sha_of_init(struct platform_device *dev)
-{
- return ERR_PTR(-EINVAL);
-}
#endif
static int atmel_sha_probe(struct platform_device *pdev)
{
struct atmel_sha_dev *sha_dd;
- struct crypto_platform_data *pdata;
struct device *dev = &pdev->dev;
struct resource *sha_res;
int err;
sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL);
- if (sha_dd == NULL) {
- err = -ENOMEM;
- goto sha_dd_err;
- }
+ if (!sha_dd)
+ return -ENOMEM;
sha_dd->dev = dev;
@@ -2772,7 +2583,7 @@ static int atmel_sha_probe(struct platform_device *pdev)
if (!sha_res) {
dev_err(dev, "no MEM resource info\n");
err = -ENODEV;
- goto res_err;
+ goto err_tasklet_kill;
}
sha_dd->phys_base = sha_res->start;
@@ -2780,14 +2591,14 @@ static int atmel_sha_probe(struct platform_device *pdev)
sha_dd->irq = platform_get_irq(pdev, 0);
if (sha_dd->irq < 0) {
err = sha_dd->irq;
- goto res_err;
+ goto err_tasklet_kill;
}
err = devm_request_irq(&pdev->dev, sha_dd->irq, atmel_sha_irq,
IRQF_SHARED, "atmel-sha", sha_dd);
if (err) {
dev_err(dev, "unable to request sha irq.\n");
- goto res_err;
+ goto err_tasklet_kill;
}
/* Initializing the clock */
@@ -2795,41 +2606,30 @@ static int atmel_sha_probe(struct platform_device *pdev)
if (IS_ERR(sha_dd->iclk)) {
dev_err(dev, "clock initialization failed.\n");
err = PTR_ERR(sha_dd->iclk);
- goto res_err;
+ goto err_tasklet_kill;
}
sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
if (IS_ERR(sha_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
err = PTR_ERR(sha_dd->io_base);
- goto res_err;
+ goto err_tasklet_kill;
}
err = clk_prepare(sha_dd->iclk);
if (err)
- goto res_err;
+ goto err_tasklet_kill;
- atmel_sha_hw_version_init(sha_dd);
+ err = atmel_sha_hw_version_init(sha_dd);
+ if (err)
+ goto err_iclk_unprepare;
atmel_sha_get_cap(sha_dd);
if (sha_dd->caps.has_dma) {
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- pdata = atmel_sha_of_init(pdev);
- if (IS_ERR(pdata)) {
- dev_err(&pdev->dev, "platform data not available\n");
- err = PTR_ERR(pdata);
- goto iclk_unprepare;
- }
- }
- if (!pdata->dma_slave) {
- err = -ENXIO;
- goto iclk_unprepare;
- }
- err = atmel_sha_dma_init(sha_dd, pdata);
+ err = atmel_sha_dma_init(sha_dd);
if (err)
- goto err_sha_dma;
+ goto err_iclk_unprepare;
dev_info(dev, "using %s for DMA transfers\n",
dma_chan_name(sha_dd->dma_lch_in.chan));
@@ -2855,14 +2655,11 @@ err_algs:
spin_unlock(&atmel_sha.lock);
if (sha_dd->caps.has_dma)
atmel_sha_dma_cleanup(sha_dd);
-err_sha_dma:
-iclk_unprepare:
+err_iclk_unprepare:
clk_unprepare(sha_dd->iclk);
-res_err:
+err_tasklet_kill:
tasklet_kill(&sha_dd->queue_task);
tasklet_kill(&sha_dd->done_task);
-sha_dd_err:
- dev_err(dev, "initialization failed.\n");
return err;
}
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 0c1f79b30fc1..ed40dbb98c6b 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -21,6 +21,7 @@
#include <linux/platform_device.h>
#include <linux/device.h>
+#include <linux/dmaengine.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
@@ -30,31 +31,32 @@
#include <linux/of_device.h>
#include <linux/delay.h>
#include <linux/crypto.h>
-#include <linux/cryptohash.h>
#include <crypto/scatterwalk.h>
#include <crypto/algapi.h>
#include <crypto/internal/des.h>
-#include <crypto/hash.h>
-#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
-#include <linux/platform_data/crypto-atmel.h>
#include "atmel-tdes-regs.h"
+#define ATMEL_TDES_PRIORITY 300
+
/* TDES flags */
-#define TDES_FLAGS_MODE_MASK 0x00ff
-#define TDES_FLAGS_ENCRYPT BIT(0)
-#define TDES_FLAGS_CBC BIT(1)
-#define TDES_FLAGS_CFB BIT(2)
-#define TDES_FLAGS_CFB8 BIT(3)
-#define TDES_FLAGS_CFB16 BIT(4)
-#define TDES_FLAGS_CFB32 BIT(5)
-#define TDES_FLAGS_CFB64 BIT(6)
-#define TDES_FLAGS_OFB BIT(7)
-
-#define TDES_FLAGS_INIT BIT(16)
-#define TDES_FLAGS_FAST BIT(17)
-#define TDES_FLAGS_BUSY BIT(18)
-#define TDES_FLAGS_DMA BIT(19)
+/* Reserve bits [17:16], [13:12], [2:0] for AES Mode Register */
+#define TDES_FLAGS_ENCRYPT TDES_MR_CYPHER_ENC
+#define TDES_FLAGS_OPMODE_MASK (TDES_MR_OPMOD_MASK | TDES_MR_CFBS_MASK)
+#define TDES_FLAGS_ECB TDES_MR_OPMOD_ECB
+#define TDES_FLAGS_CBC TDES_MR_OPMOD_CBC
+#define TDES_FLAGS_OFB TDES_MR_OPMOD_OFB
+#define TDES_FLAGS_CFB64 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_64b)
+#define TDES_FLAGS_CFB32 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_32b)
+#define TDES_FLAGS_CFB16 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_16b)
+#define TDES_FLAGS_CFB8 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_8b)
+
+#define TDES_FLAGS_MODE_MASK (TDES_FLAGS_OPMODE_MASK | TDES_FLAGS_ENCRYPT)
+
+#define TDES_FLAGS_INIT BIT(3)
+#define TDES_FLAGS_FAST BIT(4)
+#define TDES_FLAGS_BUSY BIT(5)
+#define TDES_FLAGS_DMA BIT(6)
#define ATMEL_TDES_QUEUE_LENGTH 50
@@ -100,7 +102,6 @@ struct atmel_tdes_dev {
int irq;
unsigned long flags;
- int err;
spinlock_t lock;
struct crypto_queue queue;
@@ -189,7 +190,7 @@ static inline void atmel_tdes_write(struct atmel_tdes_dev *dd,
}
static void atmel_tdes_write_n(struct atmel_tdes_dev *dd, u32 offset,
- u32 *value, int count)
+ const u32 *value, int count)
{
for (; count--; value++, offset += 4)
atmel_tdes_write(dd, offset, *value);
@@ -226,7 +227,6 @@ static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd)
if (!(dd->flags & TDES_FLAGS_INIT)) {
atmel_tdes_write(dd, TDES_CR, TDES_CR_SWRST);
dd->flags |= TDES_FLAGS_INIT;
- dd->err = 0;
}
return 0;
@@ -237,9 +237,13 @@ static inline unsigned int atmel_tdes_get_version(struct atmel_tdes_dev *dd)
return atmel_tdes_read(dd, TDES_HW_VERSION) & 0x00000fff;
}
-static void atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd)
+static int atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd)
{
- atmel_tdes_hw_init(dd);
+ int err;
+
+ err = atmel_tdes_hw_init(dd);
+ if (err)
+ return err;
dd->hw_version = atmel_tdes_get_version(dd);
@@ -247,6 +251,8 @@ static void atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd)
"version: 0x%x\n", dd->hw_version);
clk_disable_unprepare(dd->iclk);
+
+ return 0;
}
static void atmel_tdes_dma_callback(void *data)
@@ -260,7 +266,7 @@ static void atmel_tdes_dma_callback(void *data)
static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
{
int err;
- u32 valcr = 0, valmr = TDES_MR_SMOD_PDC;
+ u32 valmr = TDES_MR_SMOD_PDC;
err = atmel_tdes_hw_init(dd);
@@ -282,36 +288,15 @@ static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
valmr |= TDES_MR_TDESMOD_DES;
}
- if (dd->flags & TDES_FLAGS_CBC) {
- valmr |= TDES_MR_OPMOD_CBC;
- } else if (dd->flags & TDES_FLAGS_CFB) {
- valmr |= TDES_MR_OPMOD_CFB;
-
- if (dd->flags & TDES_FLAGS_CFB8)
- valmr |= TDES_MR_CFBS_8b;
- else if (dd->flags & TDES_FLAGS_CFB16)
- valmr |= TDES_MR_CFBS_16b;
- else if (dd->flags & TDES_FLAGS_CFB32)
- valmr |= TDES_MR_CFBS_32b;
- else if (dd->flags & TDES_FLAGS_CFB64)
- valmr |= TDES_MR_CFBS_64b;
- } else if (dd->flags & TDES_FLAGS_OFB) {
- valmr |= TDES_MR_OPMOD_OFB;
- }
-
- if ((dd->flags & TDES_FLAGS_ENCRYPT) || (dd->flags & TDES_FLAGS_OFB))
- valmr |= TDES_MR_CYPHER_ENC;
+ valmr |= dd->flags & TDES_FLAGS_MODE_MASK;
- atmel_tdes_write(dd, TDES_CR, valcr);
atmel_tdes_write(dd, TDES_MR, valmr);
atmel_tdes_write_n(dd, TDES_KEY1W1R, dd->ctx->key,
dd->ctx->keylen >> 2);
- if (((dd->flags & TDES_FLAGS_CBC) || (dd->flags & TDES_FLAGS_CFB) ||
- (dd->flags & TDES_FLAGS_OFB)) && dd->req->iv) {
+ if (dd->req->iv && (valmr & TDES_MR_OPMOD_MASK) != TDES_MR_OPMOD_ECB)
atmel_tdes_write_n(dd, TDES_IV1R, (void *)dd->req->iv, 2);
- }
return 0;
}
@@ -397,11 +382,11 @@ static void atmel_tdes_buff_cleanup(struct atmel_tdes_dev *dd)
free_page((unsigned long)dd->buf_in);
}
-static int atmel_tdes_crypt_pdc(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
- dma_addr_t dma_addr_out, int length)
+static int atmel_tdes_crypt_pdc(struct atmel_tdes_dev *dd,
+ dma_addr_t dma_addr_in,
+ dma_addr_t dma_addr_out, int length)
{
- struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
- struct atmel_tdes_dev *dd = ctx->dd;
+ struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req);
int len32;
dd->dma_size = length;
@@ -411,12 +396,19 @@ static int atmel_tdes_crypt_pdc(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
DMA_TO_DEVICE);
}
- if ((dd->flags & TDES_FLAGS_CFB) && (dd->flags & TDES_FLAGS_CFB8))
+ switch (rctx->mode & TDES_FLAGS_OPMODE_MASK) {
+ case TDES_FLAGS_CFB8:
len32 = DIV_ROUND_UP(length, sizeof(u8));
- else if ((dd->flags & TDES_FLAGS_CFB) && (dd->flags & TDES_FLAGS_CFB16))
+ break;
+
+ case TDES_FLAGS_CFB16:
len32 = DIV_ROUND_UP(length, sizeof(u16));
- else
+ break;
+
+ default:
len32 = DIV_ROUND_UP(length, sizeof(u32));
+ break;
+ }
atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
atmel_tdes_write(dd, TDES_TPR, dma_addr_in);
@@ -433,13 +425,14 @@ static int atmel_tdes_crypt_pdc(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
return 0;
}
-static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
- dma_addr_t dma_addr_out, int length)
+static int atmel_tdes_crypt_dma(struct atmel_tdes_dev *dd,
+ dma_addr_t dma_addr_in,
+ dma_addr_t dma_addr_out, int length)
{
- struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
- struct atmel_tdes_dev *dd = ctx->dd;
+ struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req);
struct scatterlist sg[2];
struct dma_async_tx_descriptor *in_desc, *out_desc;
+ enum dma_slave_buswidth addr_width;
dd->dma_size = length;
@@ -448,23 +441,23 @@ static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
DMA_TO_DEVICE);
}
- if (dd->flags & TDES_FLAGS_CFB8) {
- dd->dma_lch_in.dma_conf.dst_addr_width =
- DMA_SLAVE_BUSWIDTH_1_BYTE;
- dd->dma_lch_out.dma_conf.src_addr_width =
- DMA_SLAVE_BUSWIDTH_1_BYTE;
- } else if (dd->flags & TDES_FLAGS_CFB16) {
- dd->dma_lch_in.dma_conf.dst_addr_width =
- DMA_SLAVE_BUSWIDTH_2_BYTES;
- dd->dma_lch_out.dma_conf.src_addr_width =
- DMA_SLAVE_BUSWIDTH_2_BYTES;
- } else {
- dd->dma_lch_in.dma_conf.dst_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
- dd->dma_lch_out.dma_conf.src_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
+ switch (rctx->mode & TDES_FLAGS_OPMODE_MASK) {
+ case TDES_FLAGS_CFB8:
+ addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ break;
+
+ case TDES_FLAGS_CFB16:
+ addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ break;
+
+ default:
+ addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ break;
}
+ dd->dma_lch_in.dma_conf.dst_addr_width = addr_width;
+ dd->dma_lch_out.dma_conf.src_addr_width = addr_width;
+
dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
@@ -504,8 +497,6 @@ static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(
- crypto_skcipher_reqtfm(dd->req));
int err, fast = 0, in, out;
size_t count;
dma_addr_t addr_in, addr_out;
@@ -561,9 +552,9 @@ static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
dd->total -= count;
if (dd->caps.has_dma)
- err = atmel_tdes_crypt_dma(tfm, addr_in, addr_out, count);
+ err = atmel_tdes_crypt_dma(dd, addr_in, addr_out, count);
else
- err = atmel_tdes_crypt_pdc(tfm, addr_in, addr_out, count);
+ err = atmel_tdes_crypt_pdc(dd, addr_in, addr_out, count);
if (err && (dd->flags & TDES_FLAGS_FAST)) {
dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
@@ -600,12 +591,14 @@ atmel_tdes_set_iv_as_last_ciphertext_block(struct atmel_tdes_dev *dd)
static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
{
struct skcipher_request *req = dd->req;
+ struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
clk_disable_unprepare(dd->iclk);
dd->flags &= ~TDES_FLAGS_BUSY;
- atmel_tdes_set_iv_as_last_ciphertext_block(dd);
+ if (!err && (rctx->mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB)
+ atmel_tdes_set_iv_as_last_ciphertext_block(dd);
req->base.complete(&req->base, err);
}
@@ -699,35 +692,44 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
- if (mode & TDES_FLAGS_CFB8) {
+ switch (mode & TDES_FLAGS_OPMODE_MASK) {
+ case TDES_FLAGS_CFB8:
if (!IS_ALIGNED(req->cryptlen, CFB8_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB8 blocks\n");
return -EINVAL;
}
ctx->block_size = CFB8_BLOCK_SIZE;
- } else if (mode & TDES_FLAGS_CFB16) {
+ break;
+
+ case TDES_FLAGS_CFB16:
if (!IS_ALIGNED(req->cryptlen, CFB16_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB16 blocks\n");
return -EINVAL;
}
ctx->block_size = CFB16_BLOCK_SIZE;
- } else if (mode & TDES_FLAGS_CFB32) {
+ break;
+
+ case TDES_FLAGS_CFB32:
if (!IS_ALIGNED(req->cryptlen, CFB32_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB32 blocks\n");
return -EINVAL;
}
ctx->block_size = CFB32_BLOCK_SIZE;
- } else {
+ break;
+
+ default:
if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
pr_err("request size is not exact amount of DES blocks\n");
return -EINVAL;
}
ctx->block_size = DES_BLOCK_SIZE;
+ break;
}
rctx->mode = mode;
- if (!(mode & TDES_FLAGS_ENCRYPT) && req->src == req->dst) {
+ if ((mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB &&
+ !(mode & TDES_FLAGS_ENCRYPT) && req->src == req->dst) {
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
if (req->cryptlen >= ivsize)
@@ -739,33 +741,17 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
return atmel_tdes_handle_queue(ctx->dd, req);
}
-static bool atmel_tdes_filter(struct dma_chan *chan, void *slave)
+static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd)
{
- struct at_dma_slave *sl = slave;
-
- if (sl && sl->dma_dev == chan->device->dev) {
- chan->private = sl;
- return true;
- } else {
- return false;
- }
-}
-
-static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd,
- struct crypto_platform_data *pdata)
-{
- dma_cap_mask_t mask;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
+ int ret;
/* Try to grab 2 DMA channels */
- dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask,
- atmel_tdes_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
- if (!dd->dma_lch_in.chan)
+ dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx");
+ if (IS_ERR(dd->dma_lch_in.chan)) {
+ ret = PTR_ERR(dd->dma_lch_in.chan);
goto err_dma_in;
+ }
- dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
TDES_IDATA1R;
dd->dma_lch_in.dma_conf.src_maxburst = 1;
@@ -776,12 +762,12 @@ static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd,
DMA_SLAVE_BUSWIDTH_4_BYTES;
dd->dma_lch_in.dma_conf.device_fc = false;
- dd->dma_lch_out.chan = dma_request_slave_channel_compat(mask,
- atmel_tdes_filter, &pdata->dma_slave->txdata, dd->dev, "rx");
- if (!dd->dma_lch_out.chan)
+ dd->dma_lch_out.chan = dma_request_chan(dd->dev, "rx");
+ if (IS_ERR(dd->dma_lch_out.chan)) {
+ ret = PTR_ERR(dd->dma_lch_out.chan);
goto err_dma_out;
+ }
- dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
TDES_ODATA1R;
dd->dma_lch_out.dma_conf.src_maxburst = 1;
@@ -797,8 +783,8 @@ static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd,
err_dma_out:
dma_release_channel(dd->dma_lch_in.chan);
err_dma_in:
- dev_warn(dd->dev, "no DMA channel available\n");
- return -ENODEV;
+ dev_err(dd->dev, "no DMA channel available\n");
+ return ret;
}
static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
@@ -841,17 +827,17 @@ static int atmel_tdes_setkey(struct crypto_skcipher *tfm, const u8 *key,
static int atmel_tdes_ecb_encrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT);
+ return atmel_tdes_crypt(req, TDES_FLAGS_ECB | TDES_FLAGS_ENCRYPT);
}
static int atmel_tdes_ecb_decrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, 0);
+ return atmel_tdes_crypt(req, TDES_FLAGS_ECB);
}
static int atmel_tdes_cbc_encrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CBC);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CBC | TDES_FLAGS_ENCRYPT);
}
static int atmel_tdes_cbc_decrypt(struct skcipher_request *req)
@@ -860,50 +846,47 @@ static int atmel_tdes_cbc_decrypt(struct skcipher_request *req)
}
static int atmel_tdes_cfb_encrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB64 | TDES_FLAGS_ENCRYPT);
}
static int atmel_tdes_cfb_decrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB64);
}
static int atmel_tdes_cfb8_encrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
- TDES_FLAGS_CFB8);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB8 | TDES_FLAGS_ENCRYPT);
}
static int atmel_tdes_cfb8_decrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB8);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB8);
}
static int atmel_tdes_cfb16_encrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
- TDES_FLAGS_CFB16);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB16 | TDES_FLAGS_ENCRYPT);
}
static int atmel_tdes_cfb16_decrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB16);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB16);
}
static int atmel_tdes_cfb32_encrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
- TDES_FLAGS_CFB32);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB32 | TDES_FLAGS_ENCRYPT);
}
static int atmel_tdes_cfb32_decrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB32);
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB32);
}
static int atmel_tdes_ofb_encrypt(struct skcipher_request *req)
{
- return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_OFB);
+ return atmel_tdes_crypt(req, TDES_FLAGS_OFB | TDES_FLAGS_ENCRYPT);
}
static int atmel_tdes_ofb_decrypt(struct skcipher_request *req)
@@ -925,18 +908,23 @@ static int atmel_tdes_init_tfm(struct crypto_skcipher *tfm)
return 0;
}
+static void atmel_tdes_skcipher_alg_init(struct skcipher_alg *alg)
+{
+ alg->base.cra_priority = ATMEL_TDES_PRIORITY;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC;
+ alg->base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ alg->base.cra_module = THIS_MODULE;
+
+ alg->init = atmel_tdes_init_tfm;
+}
+
static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "ecb(des)",
.base.cra_driver_name = "atmel-ecb-des",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x7,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.setkey = atmel_des_setkey,
@@ -946,14 +934,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "cbc(des)",
.base.cra_driver_name = "atmel-cbc-des",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x7,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -964,14 +947,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "cfb(des)",
.base.cra_driver_name = "atmel-cfb-des",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x7,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -982,14 +960,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "cfb8(des)",
.base.cra_driver_name = "atmel-cfb8-des",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = CFB8_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -1000,14 +973,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "cfb16(des)",
.base.cra_driver_name = "atmel-cfb16-des",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = CFB16_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x1,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -1018,14 +986,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "cfb32(des)",
.base.cra_driver_name = "atmel-cfb32-des",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = CFB32_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x3,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -1036,14 +999,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "ofb(des)",
.base.cra_driver_name = "atmel-ofb-des",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x7,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -1054,14 +1012,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "ecb(des3_ede)",
.base.cra_driver_name = "atmel-ecb-tdes",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x7,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.setkey = atmel_tdes_setkey,
@@ -1071,14 +1024,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "cbc(des3_ede)",
.base.cra_driver_name = "atmel-cbc-tdes",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x7,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.setkey = atmel_tdes_setkey,
@@ -1089,14 +1037,9 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "ofb(des3_ede)",
.base.cra_driver_name = "atmel-ofb-tdes",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
.base.cra_alignmask = 0x7,
- .base.cra_module = THIS_MODULE,
- .init = atmel_tdes_init_tfm,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.setkey = atmel_tdes_setkey,
@@ -1123,8 +1066,6 @@ static void atmel_tdes_done_task(unsigned long data)
else
err = atmel_tdes_crypt_dma_stop(dd);
- err = dd->err ? : err;
-
if (dd->total && !err) {
if (dd->flags & TDES_FLAGS_FAST) {
dd->in_sg = sg_next(dd->in_sg);
@@ -1173,6 +1114,8 @@ static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
int err, i, j;
for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) {
+ atmel_tdes_skcipher_alg_init(&tdes_algs[i]);
+
err = crypto_register_skcipher(&tdes_algs[i]);
if (err)
goto err_tdes_algs;
@@ -1214,49 +1157,18 @@ static const struct of_device_id atmel_tdes_dt_ids[] = {
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_tdes_dt_ids);
-
-static struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct crypto_platform_data *pdata;
-
- if (!np) {
- dev_err(&pdev->dev, "device node not found\n");
- return ERR_PTR(-EINVAL);
- }
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
-
- pdata->dma_slave = devm_kzalloc(&pdev->dev,
- sizeof(*(pdata->dma_slave)),
- GFP_KERNEL);
- if (!pdata->dma_slave)
- return ERR_PTR(-ENOMEM);
-
- return pdata;
-}
-#else /* CONFIG_OF */
-static inline struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *pdev)
-{
- return ERR_PTR(-EINVAL);
-}
#endif
static int atmel_tdes_probe(struct platform_device *pdev)
{
struct atmel_tdes_dev *tdes_dd;
- struct crypto_platform_data *pdata;
struct device *dev = &pdev->dev;
struct resource *tdes_res;
int err;
tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL);
- if (tdes_dd == NULL) {
- err = -ENOMEM;
- goto tdes_dd_err;
- }
+ if (!tdes_dd)
+ return -ENOMEM;
tdes_dd->dev = dev;
@@ -1277,7 +1189,7 @@ static int atmel_tdes_probe(struct platform_device *pdev)
if (!tdes_res) {
dev_err(dev, "no MEM resource info\n");
err = -ENODEV;
- goto res_err;
+ goto err_tasklet_kill;
}
tdes_dd->phys_base = tdes_res->start;
@@ -1285,14 +1197,14 @@ static int atmel_tdes_probe(struct platform_device *pdev)
tdes_dd->irq = platform_get_irq(pdev, 0);
if (tdes_dd->irq < 0) {
err = tdes_dd->irq;
- goto res_err;
+ goto err_tasklet_kill;
}
err = devm_request_irq(&pdev->dev, tdes_dd->irq, atmel_tdes_irq,
IRQF_SHARED, "atmel-tdes", tdes_dd);
if (err) {
dev_err(dev, "unable to request tdes irq.\n");
- goto res_err;
+ goto err_tasklet_kill;
}
/* Initializing the clock */
@@ -1300,41 +1212,30 @@ static int atmel_tdes_probe(struct platform_device *pdev)
if (IS_ERR(tdes_dd->iclk)) {
dev_err(dev, "clock initialization failed.\n");
err = PTR_ERR(tdes_dd->iclk);
- goto res_err;
+ goto err_tasklet_kill;
}
tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
if (IS_ERR(tdes_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
err = PTR_ERR(tdes_dd->io_base);
- goto res_err;
+ goto err_tasklet_kill;
}
- atmel_tdes_hw_version_init(tdes_dd);
+ err = atmel_tdes_hw_version_init(tdes_dd);
+ if (err)
+ goto err_tasklet_kill;
atmel_tdes_get_cap(tdes_dd);
err = atmel_tdes_buff_init(tdes_dd);
if (err)
- goto err_tdes_buff;
+ goto err_tasklet_kill;
if (tdes_dd->caps.has_dma) {
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- pdata = atmel_tdes_of_init(pdev);
- if (IS_ERR(pdata)) {
- dev_err(&pdev->dev, "platform data not available\n");
- err = PTR_ERR(pdata);
- goto err_pdata;
- }
- }
- if (!pdata->dma_slave) {
- err = -ENXIO;
- goto err_pdata;
- }
- err = atmel_tdes_dma_init(tdes_dd, pdata);
+ err = atmel_tdes_dma_init(tdes_dd);
if (err)
- goto err_tdes_dma;
+ goto err_buff_cleanup;
dev_info(dev, "using %s, %s for DMA transfers\n",
dma_chan_name(tdes_dd->dma_lch_in.chan),
@@ -1359,15 +1260,11 @@ err_algs:
spin_unlock(&atmel_tdes.lock);
if (tdes_dd->caps.has_dma)
atmel_tdes_dma_cleanup(tdes_dd);
-err_tdes_dma:
-err_pdata:
+err_buff_cleanup:
atmel_tdes_buff_cleanup(tdes_dd);
-err_tdes_buff:
-res_err:
+err_tasklet_kill:
tasklet_kill(&tdes_dd->done_task);
tasklet_kill(&tdes_dd->queue_task);
-tdes_dd_err:
- dev_err(dev, "initialization failed.\n");
return err;
}
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 4b20606983a4..fcf1effc7661 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -1249,10 +1249,8 @@ static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key,
{
struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(&tfm->base);
- if (len != 16 && len != 24 && len != 32) {
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -1;
- }
+ if (len != 16 && len != 24 && len != 32)
+ return -EINVAL;
ctx->key_length = len;
@@ -1606,8 +1604,6 @@ artpec6_crypto_cipher_set_key(struct crypto_skcipher *cipher, const u8 *key,
case 32:
break;
default:
- crypto_skcipher_set_flags(cipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -1634,8 +1630,6 @@ artpec6_crypto_xts_set_key(struct crypto_skcipher *cipher, const u8 *key,
case 64:
break;
default:
- crypto_skcipher_set_flags(cipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 1564a6f8c9cb..c8b9408541a9 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -1846,7 +1846,6 @@ static int aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
ctx->cipher_type = CIPHER_TYPE_AES256;
break;
default:
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
WARN_ON((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
@@ -2894,13 +2893,8 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
ctx->fallback_cipher->base.crt_flags |=
tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen);
- if (ret) {
+ if (ret)
flow_log(" fallback setkey() returned:%d\n", ret);
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |=
- (ctx->fallback_cipher->base.crt_flags &
- CRYPTO_TFM_RES_MASK);
- }
}
ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
@@ -2916,7 +2910,6 @@ badkey:
ctx->authkeylen = 0;
ctx->digestsize = 0;
- crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -2967,13 +2960,8 @@ static int aead_gcm_ccm_setkey(struct crypto_aead *cipher,
tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
ret = crypto_aead_setkey(ctx->fallback_cipher, key,
keylen + ctx->salt_len);
- if (ret) {
+ if (ret)
flow_log(" fallback setkey() returned:%d\n", ret);
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |=
- (ctx->fallback_cipher->base.crt_flags &
- CRYPTO_TFM_RES_MASK);
- }
}
ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
@@ -2992,7 +2980,6 @@ badkey:
ctx->authkeylen = 0;
ctx->digestsize = 0;
- crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 87053e46c788..fac5b2e26610 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -130,13 +130,13 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
scatterlist crypto API to the SEC4 via job ring.
config CRYPTO_DEV_FSL_CAAM_PKC_API
- bool "Register public key cryptography implementations with Crypto API"
- default y
- select CRYPTO_RSA
- help
- Selecting this will allow SEC Public key support for RSA.
- Supported cryptographic primitives: encryption, decryption,
- signature and verification.
+ bool "Register public key cryptography implementations with Crypto API"
+ default y
+ select CRYPTO_RSA
+ help
+ Selecting this will allow SEC Public key support for RSA.
+ Supported cryptographic primitives: encryption, decryption,
+ signature and verification.
config CRYPTO_DEV_FSL_CAAM_RNG_API
bool "Register caam device for hwrng API"
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 2912006b946b..ef1a65f4fc92 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -548,10 +548,8 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int ivsize = crypto_aead_ivsize(aead);
unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
- if (keylen != CHACHA_KEY_SIZE + saltlen) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != CHACHA_KEY_SIZE + saltlen)
return -EINVAL;
- }
ctx->cdata.key_virt = key;
ctx->cdata.keylen = keylen - saltlen;
@@ -619,7 +617,6 @@ skip_split_key:
memzero_explicit(&keys, sizeof(keys));
return aead_set_sh_desc(aead);
badkey:
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
@@ -649,10 +646,8 @@ static int gcm_setkey(struct crypto_aead *aead,
int err;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -672,10 +667,8 @@ static int rfc4106_setkey(struct crypto_aead *aead,
int err;
err = aes_check_keylen(keylen - 4);
- if (err) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -700,10 +693,8 @@ static int rfc4543_setkey(struct crypto_aead *aead,
int err;
err = aes_check_keylen(keylen - 4);
- if (err) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -762,11 +753,8 @@ static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
int err;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, 0);
}
@@ -786,11 +774,8 @@ static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
}
@@ -809,11 +794,8 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
ctx1_iv_off = 16;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
}
@@ -846,7 +828,6 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
u32 *desc;
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
dev_err(jrdev, "key size mismatch\n");
return -EINVAL;
}
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 8e3449670d2f..4a29e0ef9d63 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -268,7 +268,6 @@ skip_split_key:
memzero_explicit(&keys, sizeof(keys));
return ret;
badkey:
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
@@ -356,10 +355,8 @@ static int gcm_setkey(struct crypto_aead *aead,
int ret;
ret = aes_check_keylen(keylen);
- if (ret) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -462,10 +459,8 @@ static int rfc4106_setkey(struct crypto_aead *aead,
int ret;
ret = aes_check_keylen(keylen - 4);
- if (ret) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -570,10 +565,8 @@ static int rfc4543_setkey(struct crypto_aead *aead,
int ret;
ret = aes_check_keylen(keylen - 4);
- if (ret) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -644,7 +637,7 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
ctx->sh_desc_enc);
if (ret) {
dev_err(jrdev, "driver enc context update failed\n");
- goto badkey;
+ return -EINVAL;
}
}
@@ -653,14 +646,11 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
ctx->sh_desc_dec);
if (ret) {
dev_err(jrdev, "driver dec context update failed\n");
- goto badkey;
+ return -EINVAL;
}
}
return ret;
-badkey:
- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
}
static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
@@ -669,11 +659,8 @@ static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
int err;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, 0);
}
@@ -693,11 +680,8 @@ static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
}
@@ -716,11 +700,8 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
ctx1_iv_off = 16;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
}
@@ -748,7 +729,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
dev_err(jrdev, "key size mismatch\n");
- goto badkey;
+ return -EINVAL;
}
ctx->cdata.keylen = keylen;
@@ -765,7 +746,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
ctx->sh_desc_enc);
if (ret) {
dev_err(jrdev, "driver enc context update failed\n");
- goto badkey;
+ return -EINVAL;
}
}
@@ -774,14 +755,11 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
ctx->sh_desc_dec);
if (ret) {
dev_err(jrdev, "driver dec context update failed\n");
- goto badkey;
+ return -EINVAL;
}
}
return ret;
-badkey:
- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
}
/*
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 3443f6d6dd83..28669cbecf77 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -313,7 +313,6 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
memzero_explicit(&keys, sizeof(keys));
return aead_set_sh_desc(aead);
badkey:
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
@@ -326,11 +325,11 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
err = crypto_authenc_extractkeys(&keys, key, keylen);
if (unlikely(err))
- goto badkey;
+ goto out;
err = -EINVAL;
if (keys.enckeylen != DES3_EDE_KEY_SIZE)
- goto badkey;
+ goto out;
err = crypto_des3_ede_verify_key(crypto_aead_tfm(aead), keys.enckey) ?:
aead_setkey(aead, key, keylen);
@@ -338,10 +337,6 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
out:
memzero_explicit(&keys, sizeof(keys));
return err;
-
-badkey:
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
- goto out;
}
static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
@@ -634,10 +629,8 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int ivsize = crypto_aead_ivsize(aead);
unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
- if (keylen != CHACHA_KEY_SIZE + saltlen) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != CHACHA_KEY_SIZE + saltlen)
return -EINVAL;
- }
ctx->cdata.key_virt = key;
ctx->cdata.keylen = keylen - saltlen;
@@ -725,10 +718,8 @@ static int gcm_setkey(struct crypto_aead *aead,
int ret;
ret = aes_check_keylen(keylen);
- if (ret) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -822,10 +813,8 @@ static int rfc4106_setkey(struct crypto_aead *aead,
int ret;
ret = aes_check_keylen(keylen - 4);
- if (ret) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -923,10 +912,8 @@ static int rfc4543_setkey(struct crypto_aead *aead,
int ret;
ret = aes_check_keylen(keylen - 4);
- if (ret) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -992,11 +979,8 @@ static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
int err;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, 0);
}
@@ -1016,11 +1000,8 @@ static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
}
@@ -1039,11 +1020,8 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
ctx1_iv_off = 16;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
}
@@ -1051,11 +1029,8 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher,
const u8 *key, unsigned int keylen)
{
- if (keylen != CHACHA_KEY_SIZE) {
- crypto_skcipher_set_flags(skcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != CHACHA_KEY_SIZE)
return -EINVAL;
- }
return skcipher_setkey(skcipher, key, keylen, 0);
}
@@ -1084,7 +1059,6 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
dev_err(dev, "key size mismatch\n");
- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -2481,7 +2455,7 @@ static struct caam_aead_alg driver_aeads[] = {
.cra_name = "echainiv(authenc(hmac(sha256),"
"cbc(des)))",
.cra_driver_name = "echainiv-authenc-"
- "hmac-sha256-cbc-desi-"
+ "hmac-sha256-cbc-des-"
"caam-qi2",
.cra_blocksize = DES_BLOCK_SIZE,
},
@@ -2998,15 +2972,13 @@ struct caam_hash_state {
dma_addr_t buf_dma;
dma_addr_t ctx_dma;
int ctx_dma_len;
- u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
- int buflen_0;
- u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
- int buflen_1;
+ u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
+ int buflen;
+ int next_buflen;
u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
int (*update)(struct ahash_request *req);
int (*final)(struct ahash_request *req);
int (*finup)(struct ahash_request *req);
- int current_buf;
};
struct caam_export_state {
@@ -3018,42 +2990,17 @@ struct caam_export_state {
int (*finup)(struct ahash_request *req);
};
-static inline void switch_buf(struct caam_hash_state *state)
-{
- state->current_buf ^= 1;
-}
-
-static inline u8 *current_buf(struct caam_hash_state *state)
-{
- return state->current_buf ? state->buf_1 : state->buf_0;
-}
-
-static inline u8 *alt_buf(struct caam_hash_state *state)
-{
- return state->current_buf ? state->buf_0 : state->buf_1;
-}
-
-static inline int *current_buflen(struct caam_hash_state *state)
-{
- return state->current_buf ? &state->buflen_1 : &state->buflen_0;
-}
-
-static inline int *alt_buflen(struct caam_hash_state *state)
-{
- return state->current_buf ? &state->buflen_0 : &state->buflen_1;
-}
-
/* Map current buffer in state (if length > 0) and put it in link table */
static inline int buf_map_to_qm_sg(struct device *dev,
struct dpaa2_sg_entry *qm_sg,
struct caam_hash_state *state)
{
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
if (!buflen)
return 0;
- state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
+ state->buf_dma = dma_map_single(dev, state->buf, buflen,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, state->buf_dma)) {
dev_err(dev, "unable to map buf\n");
@@ -3304,7 +3251,6 @@ static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
return ret;
bad_free_key:
kfree(hashed_key);
- crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -3321,7 +3267,7 @@ static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
DMA_TO_DEVICE);
if (state->buf_dma) {
- dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
+ dma_unmap_single(dev, state->buf_dma, state->buflen,
DMA_TO_DEVICE);
state->buf_dma = 0;
}
@@ -3383,9 +3329,17 @@ static void ahash_done_bi(void *cbk_ctx, u32 status)
ecode = caam_qi2_strstatus(ctx->dev, status);
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
- switch_buf(state);
qi_cache_free(edesc);
+ scatterwalk_map_and_copy(state->buf, req->src,
+ req->nbytes - state->next_buflen,
+ state->next_buflen, 0);
+ state->buflen = state->next_buflen;
+
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
+ state->buflen, 1);
+
print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
@@ -3440,9 +3394,17 @@ static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
ecode = caam_qi2_strstatus(ctx->dev, status);
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
- switch_buf(state);
qi_cache_free(edesc);
+ scatterwalk_map_and_copy(state->buf, req->src,
+ req->nbytes - state->next_buflen,
+ state->next_buflen, 0);
+ state->buflen = state->next_buflen;
+
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
+ state->buflen, 1);
+
print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
@@ -3464,16 +3426,14 @@ static int ahash_update_ctx(struct ahash_request *req)
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = current_buf(state);
- int *buflen = current_buflen(state);
- u8 *next_buf = alt_buf(state);
- int *next_buflen = alt_buflen(state), last_buflen;
+ u8 *buf = state->buf;
+ int *buflen = &state->buflen;
+ int *next_buflen = &state->next_buflen;
int in_len = *buflen + req->nbytes, to_hash;
int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
struct ahash_edesc *edesc;
int ret = 0;
- last_buflen = *next_buflen;
*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
to_hash = in_len - *next_buflen;
@@ -3524,10 +3484,6 @@ static int ahash_update_ctx(struct ahash_request *req)
if (mapped_nents) {
sg_to_qm_sg_last(req->src, src_len,
sg_table + qm_sg_src_index, 0);
- if (*next_buflen)
- scatterwalk_map_and_copy(next_buf, req->src,
- to_hash - *buflen,
- *next_buflen, 0);
} else {
dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
true);
@@ -3566,14 +3522,11 @@ static int ahash_update_ctx(struct ahash_request *req)
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req->nbytes, 0);
*buflen = *next_buflen;
- *next_buflen = last_buflen;
- }
- print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
- print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
- 1);
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
+ *buflen, 1);
+ }
return ret;
unmap_ctx:
@@ -3592,7 +3545,7 @@ static int ahash_final_ctx(struct ahash_request *req)
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
int qm_sg_bytes;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
@@ -3663,7 +3616,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
int qm_sg_bytes, qm_sg_src_index;
int src_nents, mapped_nents;
int digestsize = crypto_ahash_digestsize(ahash);
@@ -3852,8 +3805,8 @@ static int ahash_final_no_ctx(struct ahash_request *req)
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = current_buf(state);
- int buflen = *current_buflen(state);
+ u8 *buf = state->buf;
+ int buflen = state->buflen;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
int ret = -ENOMEM;
@@ -3925,10 +3878,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = current_buf(state);
- int *buflen = current_buflen(state);
- u8 *next_buf = alt_buf(state);
- int *next_buflen = alt_buflen(state);
+ u8 *buf = state->buf;
+ int *buflen = &state->buflen;
+ int *next_buflen = &state->next_buflen;
int in_len = *buflen + req->nbytes, to_hash;
int qm_sg_bytes, src_nents, mapped_nents;
struct ahash_edesc *edesc;
@@ -3977,11 +3929,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0);
- if (*next_buflen)
- scatterwalk_map_and_copy(next_buf, req->src,
- to_hash - *buflen,
- *next_buflen, 0);
-
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
qm_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
@@ -4029,14 +3976,11 @@ static int ahash_update_no_ctx(struct ahash_request *req)
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req->nbytes, 0);
*buflen = *next_buflen;
- *next_buflen = 0;
- }
- print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
- print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
- 1);
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
+ *buflen, 1);
+ }
return ret;
unmap_ctx:
@@ -4055,7 +3999,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
int qm_sg_bytes, src_nents, mapped_nents;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
@@ -4151,8 +4095,9 @@ static int ahash_update_first(struct ahash_request *req)
struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *next_buf = alt_buf(state);
- int *next_buflen = alt_buflen(state);
+ u8 *buf = state->buf;
+ int *buflen = &state->buflen;
+ int *next_buflen = &state->next_buflen;
int to_hash;
int src_nents, mapped_nents;
struct ahash_edesc *edesc;
@@ -4220,10 +4165,6 @@ static int ahash_update_first(struct ahash_request *req)
dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
}
- if (*next_buflen)
- scatterwalk_map_and_copy(next_buf, req->src, to_hash,
- *next_buflen, 0);
-
state->ctx_dma_len = ctx->ctx_len;
state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
ctx->ctx_len, DMA_FROM_DEVICE);
@@ -4257,14 +4198,14 @@ static int ahash_update_first(struct ahash_request *req)
state->update = ahash_update_no_ctx;
state->finup = ahash_finup_no_ctx;
state->final = ahash_final_no_ctx;
- scatterwalk_map_and_copy(next_buf, req->src, 0,
+ scatterwalk_map_and_copy(buf, req->src, 0,
req->nbytes, 0);
- switch_buf(state);
- }
+ *buflen = *next_buflen;
- print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
- 1);
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
+ *buflen, 1);
+ }
return ret;
unmap_ctx:
@@ -4288,10 +4229,9 @@ static int ahash_init(struct ahash_request *req)
state->ctx_dma = 0;
state->ctx_dma_len = 0;
- state->current_buf = 0;
state->buf_dma = 0;
- state->buflen_0 = 0;
- state->buflen_1 = 0;
+ state->buflen = 0;
+ state->next_buflen = 0;
return 0;
}
@@ -4321,16 +4261,8 @@ static int ahash_export(struct ahash_request *req, void *out)
{
struct caam_hash_state *state = ahash_request_ctx(req);
struct caam_export_state *export = out;
- int len;
- u8 *buf;
-
- if (state->current_buf) {
- buf = state->buf_1;
- len = state->buflen_1;
- } else {
- buf = state->buf_0;
- len = state->buflen_0;
- }
+ u8 *buf = state->buf;
+ int len = state->buflen;
memcpy(export->buf, buf, len);
memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
@@ -4348,9 +4280,9 @@ static int ahash_import(struct ahash_request *req, const void *in)
const struct caam_export_state *export = in;
memset(state, 0, sizeof(*state));
- memcpy(state->buf_0, export->buf, export->buflen);
+ memcpy(state->buf, export->buf, export->buflen);
memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
- state->buflen_0 = export->buflen;
+ state->buflen = export->buflen;
state->update = export->update;
state->final = export->final;
state->finup = export->finup;
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 65399cb2a770..8d9143407fc5 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -107,15 +107,13 @@ struct caam_hash_state {
dma_addr_t buf_dma;
dma_addr_t ctx_dma;
int ctx_dma_len;
- u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
- int buflen_0;
- u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
- int buflen_1;
+ u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
+ int buflen;
+ int next_buflen;
u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
int (*update)(struct ahash_request *req);
int (*final)(struct ahash_request *req);
int (*finup)(struct ahash_request *req);
- int current_buf;
};
struct caam_export_state {
@@ -127,31 +125,6 @@ struct caam_export_state {
int (*finup)(struct ahash_request *req);
};
-static inline void switch_buf(struct caam_hash_state *state)
-{
- state->current_buf ^= 1;
-}
-
-static inline u8 *current_buf(struct caam_hash_state *state)
-{
- return state->current_buf ? state->buf_1 : state->buf_0;
-}
-
-static inline u8 *alt_buf(struct caam_hash_state *state)
-{
- return state->current_buf ? state->buf_0 : state->buf_1;
-}
-
-static inline int *current_buflen(struct caam_hash_state *state)
-{
- return state->current_buf ? &state->buflen_1 : &state->buflen_0;
-}
-
-static inline int *alt_buflen(struct caam_hash_state *state)
-{
- return state->current_buf ? &state->buflen_0 : &state->buflen_1;
-}
-
static inline bool is_cmac_aes(u32 algtype)
{
return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
@@ -183,12 +156,12 @@ static inline int buf_map_to_sec4_sg(struct device *jrdev,
struct sec4_sg_entry *sec4_sg,
struct caam_hash_state *state)
{
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
if (!buflen)
return 0;
- state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
+ state->buf_dma = dma_map_single(jrdev, state->buf, buflen,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, state->buf_dma)) {
dev_err(jrdev, "unable to map buf\n");
@@ -500,7 +473,6 @@ static int ahash_setkey(struct crypto_ahash *ahash,
return ahash_set_sh_desc(ahash);
bad_free_key:
kfree(hashed_key);
- crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -510,10 +482,8 @@ static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct device *jrdev = ctx->jrdev;
- if (keylen != AES_KEYSIZE_128) {
- crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != AES_KEYSIZE_128)
return -EINVAL;
- }
memcpy(ctx->key, key, keylen);
dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen,
@@ -533,10 +503,8 @@ static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
int err;
err = aes_check_keylen(keylen);
- if (err) {
- crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (err)
return err;
- }
/* key is immediate data for all cmac shared descriptors */
ctx->adata.key_virt = key;
@@ -578,7 +546,7 @@ static inline void ahash_unmap(struct device *dev,
edesc->sec4_sg_bytes, DMA_TO_DEVICE);
if (state->buf_dma) {
- dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
+ dma_unmap_single(dev, state->buf_dma, state->buflen,
DMA_TO_DEVICE);
state->buf_dma = 0;
}
@@ -643,9 +611,17 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
ecode = caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
- switch_buf(state);
kfree(edesc);
+ scatterwalk_map_and_copy(state->buf, req->src,
+ req->nbytes - state->next_buflen,
+ state->next_buflen, 0);
+ state->buflen = state->next_buflen;
+
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
+ state->buflen, 1);
+
print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
@@ -703,9 +679,17 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
ecode = caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
- switch_buf(state);
kfree(edesc);
+ scatterwalk_map_and_copy(state->buf, req->src,
+ req->nbytes - state->next_buflen,
+ state->next_buflen, 0);
+ state->buflen = state->next_buflen;
+
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
+ state->buflen, 1);
+
print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
@@ -786,18 +770,16 @@ static int ahash_update_ctx(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = current_buf(state);
- int *buflen = current_buflen(state);
- u8 *next_buf = alt_buf(state);
+ u8 *buf = state->buf;
+ int *buflen = &state->buflen;
+ int *next_buflen = &state->next_buflen;
int blocksize = crypto_ahash_blocksize(ahash);
- int *next_buflen = alt_buflen(state), last_buflen;
int in_len = *buflen + req->nbytes, to_hash;
u32 *desc;
int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
struct ahash_edesc *edesc;
int ret = 0;
- last_buflen = *next_buflen;
*next_buflen = in_len & (blocksize - 1);
to_hash = in_len - *next_buflen;
@@ -868,10 +850,6 @@ static int ahash_update_ctx(struct ahash_request *req)
sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
1);
- if (*next_buflen)
- scatterwalk_map_and_copy(next_buf, req->src,
- to_hash - *buflen,
- *next_buflen, 0);
desc = edesc->hw_desc;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
@@ -901,14 +879,11 @@ static int ahash_update_ctx(struct ahash_request *req)
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req->nbytes, 0);
*buflen = *next_buflen;
- *next_buflen = last_buflen;
- }
- print_hex_dump_debug("buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
- print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
- *next_buflen, 1);
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
+ *buflen, 1);
+ }
return ret;
unmap_ctx:
@@ -925,7 +900,7 @@ static int ahash_final_ctx(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
u32 *desc;
int sec4_sg_bytes;
int digestsize = crypto_ahash_digestsize(ahash);
@@ -991,7 +966,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
u32 *desc;
int sec4_sg_src_index;
int src_nents, mapped_nents;
@@ -1148,8 +1123,8 @@ static int ahash_final_no_ctx(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = current_buf(state);
- int buflen = *current_buflen(state);
+ u8 *buf = state->buf;
+ int buflen = state->buflen;
u32 *desc;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
@@ -1207,11 +1182,10 @@ static int ahash_update_no_ctx(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = current_buf(state);
- int *buflen = current_buflen(state);
+ u8 *buf = state->buf;
+ int *buflen = &state->buflen;
+ int *next_buflen = &state->next_buflen;
int blocksize = crypto_ahash_blocksize(ahash);
- u8 *next_buf = alt_buf(state);
- int *next_buflen = alt_buflen(state);
int in_len = *buflen + req->nbytes, to_hash;
int sec4_sg_bytes, src_nents, mapped_nents;
struct ahash_edesc *edesc;
@@ -1278,12 +1252,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
- if (*next_buflen) {
- scatterwalk_map_and_copy(next_buf, req->src,
- to_hash - *buflen,
- *next_buflen, 0);
- }
-
desc = edesc->hw_desc;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
@@ -1317,14 +1285,11 @@ static int ahash_update_no_ctx(struct ahash_request *req)
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req->nbytes, 0);
*buflen = *next_buflen;
- *next_buflen = 0;
- }
- print_hex_dump_debug("buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
- print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
- 1);
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
+ *buflen, 1);
+ }
return ret;
unmap_ctx:
@@ -1342,7 +1307,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- int buflen = *current_buflen(state);
+ int buflen = state->buflen;
u32 *desc;
int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
int digestsize = crypto_ahash_digestsize(ahash);
@@ -1428,8 +1393,9 @@ static int ahash_update_first(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- u8 *next_buf = alt_buf(state);
- int *next_buflen = alt_buflen(state);
+ u8 *buf = state->buf;
+ int *buflen = &state->buflen;
+ int *next_buflen = &state->next_buflen;
int to_hash;
int blocksize = crypto_ahash_blocksize(ahash);
u32 *desc;
@@ -1491,10 +1457,6 @@ static int ahash_update_first(struct ahash_request *req)
if (ret)
goto unmap_ctx;
- if (*next_buflen)
- scatterwalk_map_and_copy(next_buf, req->src, to_hash,
- *next_buflen, 0);
-
desc = edesc->hw_desc;
ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
@@ -1517,14 +1479,14 @@ static int ahash_update_first(struct ahash_request *req)
state->update = ahash_update_no_ctx;
state->finup = ahash_finup_no_ctx;
state->final = ahash_final_no_ctx;
- scatterwalk_map_and_copy(next_buf, req->src, 0,
+ scatterwalk_map_and_copy(buf, req->src, 0,
req->nbytes, 0);
- switch_buf(state);
- }
+ *buflen = *next_buflen;
- print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
- 1);
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf,
+ *buflen, 1);
+ }
return ret;
unmap_ctx:
@@ -1548,10 +1510,9 @@ static int ahash_init(struct ahash_request *req)
state->ctx_dma = 0;
state->ctx_dma_len = 0;
- state->current_buf = 0;
state->buf_dma = 0;
- state->buflen_0 = 0;
- state->buflen_1 = 0;
+ state->buflen = 0;
+ state->next_buflen = 0;
return 0;
}
@@ -1581,16 +1542,8 @@ static int ahash_export(struct ahash_request *req, void *out)
{
struct caam_hash_state *state = ahash_request_ctx(req);
struct caam_export_state *export = out;
- int len;
- u8 *buf;
-
- if (state->current_buf) {
- buf = state->buf_1;
- len = state->buflen_1;
- } else {
- buf = state->buf_0;
- len = state->buflen_0;
- }
+ u8 *buf = state->buf;
+ int len = state->buflen;
memcpy(export->buf, buf, len);
memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
@@ -1608,9 +1561,9 @@ static int ahash_import(struct ahash_request *req, const void *in)
const struct caam_export_state *export = in;
memset(state, 0, sizeof(*state));
- memcpy(state->buf_0, export->buf, export->buflen);
+ memcpy(state->buf, export->buf, export->buflen);
memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
- state->buflen_0 = export->buflen;
+ state->buflen = export->buflen;
state->update = export->update;
state->final = export->final;
state->finup = export->finup;
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index d7c3c3805693..7139366da016 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -99,10 +99,13 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
if (ctrlpriv->virt_en == 1 ||
/*
- * Apparently on i.MX8MQ it doesn't matter if virt_en == 1
+ * Apparently on i.MX8M{Q,M,N,P} it doesn't matter if virt_en == 1
* and the following steps should be performed regardless
*/
- of_machine_is_compatible("fsl,imx8mq")) {
+ of_machine_is_compatible("fsl,imx8mq") ||
+ of_machine_is_compatible("fsl,imx8mm") ||
+ of_machine_is_compatible("fsl,imx8mn") ||
+ of_machine_is_compatible("fsl,imx8mp")) {
clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0);
while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
@@ -508,7 +511,7 @@ static const struct soc_device_attribute caam_imx_soc_table[] = {
{ .soc_id = "i.MX6UL", .data = &caam_imx6ul_data },
{ .soc_id = "i.MX6*", .data = &caam_imx6_data },
{ .soc_id = "i.MX7*", .data = &caam_imx7_data },
- { .soc_id = "i.MX8MQ", .data = &caam_imx7_data },
+ { .soc_id = "i.MX8M*", .data = &caam_imx7_data },
{ .family = "Freescale i.MX" },
{ /* sentinel */ }
};
@@ -671,11 +674,9 @@ static int caam_probe(struct platform_device *pdev)
of_node_put(np);
if (!ctrlpriv->mc_en)
- clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
+ clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK,
MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
- MCFGR_WDENABLE | MCFGR_LARGE_BURST |
- (sizeof(dma_addr_t) == sizeof(u64) ?
- MCFGR_LONG_PTR : 0));
+ MCFGR_WDENABLE | MCFGR_LARGE_BURST);
handle_imx6_err005766(&ctrl->mcr);
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
index 1ad66677d88e..1be1adffff1d 100644
--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
@@ -295,8 +295,6 @@ static int cvm_setkey(struct crypto_skcipher *cipher, const u8 *key,
memcpy(ctx->enc_key, key, keylen);
return 0;
} else {
- crypto_skcipher_set_flags(cipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_aead.c b/drivers/crypto/cavium/nitrox/nitrox_aead.c
index 6f80cc3b5c84..dce5423a5883 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_aead.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_aead.c
@@ -40,10 +40,8 @@ static int nitrox_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
union fc_ctx_flags flags;
aes_keylen = flexi_aes_keylen(keylen);
- if (aes_keylen < 0) {
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (aes_keylen < 0)
return -EINVAL;
- }
/* fill crypto context */
fctx = nctx->u.fctx;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
index 97af4d50d003..18088b0a2257 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
@@ -200,10 +200,8 @@ static int nitrox_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
int aes_keylen;
aes_keylen = flexi_aes_keylen(keylen);
- if (aes_keylen < 0) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (aes_keylen < 0)
return -EINVAL;
- }
return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen);
}
@@ -351,10 +349,8 @@ static int nitrox_aes_xts_setkey(struct crypto_skcipher *cipher,
keylen /= 2;
aes_keylen = flexi_aes_keylen(keylen);
- if (aes_keylen < 0) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (aes_keylen < 0)
return -EINVAL;
- }
fctx = nctx->u.fctx;
/* copy KEY2 */
@@ -382,10 +378,8 @@ static int nitrox_aes_ctr_rfc3686_setkey(struct crypto_skcipher *cipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
aes_keylen = flexi_aes_keylen(keylen);
- if (aes_keylen < 0) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (aes_keylen < 0)
return -EINVAL;
- }
return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen);
}
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 6b86f1e6d634..db362fe472ea 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -8,7 +8,9 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_CCP) += ccp-dev.o \
ccp-dmaengine.o
ccp-$(CONFIG_CRYPTO_DEV_CCP_DEBUGFS) += ccp-debugfs.o
ccp-$(CONFIG_PCI) += sp-pci.o
-ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o
+ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \
+ sev-dev.o \
+ tee-dev.o
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
ccp-crypto-objs := ccp-crypto-main.o \
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 32f19f402073..5eba7ee49e81 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -276,7 +276,6 @@ static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
ctx->u.aes.type = CCP_AES_TYPE_256;
break;
default:
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
ctx->u.aes.mode = alg->mode;
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
index ff50ee80d223..9e8f07c1afac 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
@@ -42,7 +42,6 @@ static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
ctx->u.aes.type = CCP_AES_TYPE_256;
break;
default:
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
index 33328a153225..51e12fbd1159 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -51,7 +51,6 @@ static int ccp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
ctx->u.aes.type = CCP_AES_TYPE_256;
break;
default:
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
ctx->u.aes.mode = alg->mode;
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 453b9797f93f..474e6f1a6a84 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -293,10 +293,8 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
ret = crypto_shash_digest(sdesc, key, key_len,
ctx->u.sha.key);
- if (ret) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return -EINVAL;
- }
key_len = digest_size;
} else {
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
index 0186b3df4c87..0d5576f6ad21 100644
--- a/drivers/crypto/ccp/ccp-dev-v3.c
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -586,6 +586,7 @@ const struct ccp_vdata ccpv3_platform = {
.setup = NULL,
.perform = &ccp3_actions,
.offset = 0,
+ .rsamax = CCP_RSA_MAX_WIDTH,
};
const struct ccp_vdata ccpv3 = {
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index 7ca2d3408e7a..e95e7aa5dbf1 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -2,57 +2,20 @@
/*
* AMD Platform Security Processor (PSP) interface
*
- * Copyright (C) 2016,2018 Advanced Micro Devices, Inc.
+ * Copyright (C) 2016,2019 Advanced Micro Devices, Inc.
*
* Author: Brijesh Singh <brijesh.singh@amd.com>
*/
-#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/kthread.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/spinlock_types.h>
-#include <linux/types.h>
-#include <linux/mutex.h>
-#include <linux/delay.h>
-#include <linux/hw_random.h>
-#include <linux/ccp.h>
-#include <linux/firmware.h>
-
-#include <asm/smp.h>
+#include <linux/irqreturn.h>
#include "sp-dev.h"
#include "psp-dev.h"
+#include "sev-dev.h"
+#include "tee-dev.h"
-#define DEVICE_NAME "sev"
-#define SEV_FW_FILE "amd/sev.fw"
-#define SEV_FW_NAME_SIZE 64
-
-static DEFINE_MUTEX(sev_cmd_mutex);
-static struct sev_misc_dev *misc_dev;
-static struct psp_device *psp_master;
-
-static int psp_cmd_timeout = 100;
-module_param(psp_cmd_timeout, int, 0644);
-MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands");
-
-static int psp_probe_timeout = 5;
-module_param(psp_probe_timeout, int, 0644);
-MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe");
-
-static bool psp_dead;
-static int psp_timeout;
-
-static inline bool sev_version_greater_or_equal(u8 maj, u8 min)
-{
- if (psp_master->api_major > maj)
- return true;
- if (psp_master->api_major == maj && psp_master->api_minor >= min)
- return true;
- return false;
-}
+struct psp_device *psp_master;
static struct psp_device *psp_alloc_struct(struct sp_device *sp)
{
@@ -75,902 +38,95 @@ static irqreturn_t psp_irq_handler(int irq, void *data)
{
struct psp_device *psp = data;
unsigned int status;
- int reg;
/* Read the interrupt status: */
status = ioread32(psp->io_regs + psp->vdata->intsts_reg);
- /* Check if it is command completion: */
- if (!(status & PSP_CMD_COMPLETE))
- goto done;
+ /* invoke subdevice interrupt handlers */
+ if (status) {
+ if (psp->sev_irq_handler)
+ psp->sev_irq_handler(irq, psp->sev_irq_data, status);
- /* Check if it is SEV command completion: */
- reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg);
- if (reg & PSP_CMDRESP_RESP) {
- psp->sev_int_rcvd = 1;
- wake_up(&psp->sev_int_queue);
+ if (psp->tee_irq_handler)
+ psp->tee_irq_handler(irq, psp->tee_irq_data, status);
}
-done:
/* Clear the interrupt status by writing the same value we read. */
iowrite32(status, psp->io_regs + psp->vdata->intsts_reg);
return IRQ_HANDLED;
}
-static int sev_wait_cmd_ioc(struct psp_device *psp,
- unsigned int *reg, unsigned int timeout)
-{
- int ret;
-
- ret = wait_event_timeout(psp->sev_int_queue,
- psp->sev_int_rcvd, timeout * HZ);
- if (!ret)
- return -ETIMEDOUT;
-
- *reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg);
-
- return 0;
-}
-
-static int sev_cmd_buffer_len(int cmd)
-{
- switch (cmd) {
- case SEV_CMD_INIT: return sizeof(struct sev_data_init);
- case SEV_CMD_PLATFORM_STATUS: return sizeof(struct sev_user_data_status);
- case SEV_CMD_PEK_CSR: return sizeof(struct sev_data_pek_csr);
- case SEV_CMD_PEK_CERT_IMPORT: return sizeof(struct sev_data_pek_cert_import);
- case SEV_CMD_PDH_CERT_EXPORT: return sizeof(struct sev_data_pdh_cert_export);
- case SEV_CMD_LAUNCH_START: return sizeof(struct sev_data_launch_start);
- case SEV_CMD_LAUNCH_UPDATE_DATA: return sizeof(struct sev_data_launch_update_data);
- case SEV_CMD_LAUNCH_UPDATE_VMSA: return sizeof(struct sev_data_launch_update_vmsa);
- case SEV_CMD_LAUNCH_FINISH: return sizeof(struct sev_data_launch_finish);
- case SEV_CMD_LAUNCH_MEASURE: return sizeof(struct sev_data_launch_measure);
- case SEV_CMD_ACTIVATE: return sizeof(struct sev_data_activate);
- case SEV_CMD_DEACTIVATE: return sizeof(struct sev_data_deactivate);
- case SEV_CMD_DECOMMISSION: return sizeof(struct sev_data_decommission);
- case SEV_CMD_GUEST_STATUS: return sizeof(struct sev_data_guest_status);
- case SEV_CMD_DBG_DECRYPT: return sizeof(struct sev_data_dbg);
- case SEV_CMD_DBG_ENCRYPT: return sizeof(struct sev_data_dbg);
- case SEV_CMD_SEND_START: return sizeof(struct sev_data_send_start);
- case SEV_CMD_SEND_UPDATE_DATA: return sizeof(struct sev_data_send_update_data);
- case SEV_CMD_SEND_UPDATE_VMSA: return sizeof(struct sev_data_send_update_vmsa);
- case SEV_CMD_SEND_FINISH: return sizeof(struct sev_data_send_finish);
- case SEV_CMD_RECEIVE_START: return sizeof(struct sev_data_receive_start);
- case SEV_CMD_RECEIVE_FINISH: return sizeof(struct sev_data_receive_finish);
- case SEV_CMD_RECEIVE_UPDATE_DATA: return sizeof(struct sev_data_receive_update_data);
- case SEV_CMD_RECEIVE_UPDATE_VMSA: return sizeof(struct sev_data_receive_update_vmsa);
- case SEV_CMD_LAUNCH_UPDATE_SECRET: return sizeof(struct sev_data_launch_secret);
- case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware);
- case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id);
- default: return 0;
- }
-
- return 0;
-}
-
-static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
-{
- struct psp_device *psp = psp_master;
- unsigned int phys_lsb, phys_msb;
- unsigned int reg, ret = 0;
-
- if (!psp)
- return -ENODEV;
-
- if (psp_dead)
- return -EBUSY;
-
- /* Get the physical address of the command buffer */
- phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0;
- phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0;
-
- dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n",
- cmd, phys_msb, phys_lsb, psp_timeout);
-
- print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data,
- sev_cmd_buffer_len(cmd), false);
-
- iowrite32(phys_lsb, psp->io_regs + psp->vdata->cmdbuff_addr_lo_reg);
- iowrite32(phys_msb, psp->io_regs + psp->vdata->cmdbuff_addr_hi_reg);
-
- psp->sev_int_rcvd = 0;
-
- reg = cmd;
- reg <<= PSP_CMDRESP_CMD_SHIFT;
- reg |= PSP_CMDRESP_IOC;
- iowrite32(reg, psp->io_regs + psp->vdata->cmdresp_reg);
-
- /* wait for command completion */
- ret = sev_wait_cmd_ioc(psp, &reg, psp_timeout);
- if (ret) {
- if (psp_ret)
- *psp_ret = 0;
-
- dev_err(psp->dev, "sev command %#x timed out, disabling PSP \n", cmd);
- psp_dead = true;
-
- return ret;
- }
-
- psp_timeout = psp_cmd_timeout;
-
- if (psp_ret)
- *psp_ret = reg & PSP_CMDRESP_ERR_MASK;
-
- if (reg & PSP_CMDRESP_ERR_MASK) {
- dev_dbg(psp->dev, "sev command %#x failed (%#010x)\n",
- cmd, reg & PSP_CMDRESP_ERR_MASK);
- ret = -EIO;
- }
-
- print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
- sev_cmd_buffer_len(cmd), false);
-
- return ret;
-}
-
-static int sev_do_cmd(int cmd, void *data, int *psp_ret)
-{
- int rc;
-
- mutex_lock(&sev_cmd_mutex);
- rc = __sev_do_cmd_locked(cmd, data, psp_ret);
- mutex_unlock(&sev_cmd_mutex);
-
- return rc;
-}
-
-static int __sev_platform_init_locked(int *error)
-{
- struct psp_device *psp = psp_master;
- int rc = 0;
-
- if (!psp)
- return -ENODEV;
-
- if (psp->sev_state == SEV_STATE_INIT)
- return 0;
-
- rc = __sev_do_cmd_locked(SEV_CMD_INIT, &psp->init_cmd_buf, error);
- if (rc)
- return rc;
-
- psp->sev_state = SEV_STATE_INIT;
-
- /* Prepare for first SEV guest launch after INIT */
- wbinvd_on_all_cpus();
- rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, error);
- if (rc)
- return rc;
-
- dev_dbg(psp->dev, "SEV firmware initialized\n");
-
- return rc;
-}
-
-int sev_platform_init(int *error)
-{
- int rc;
-
- mutex_lock(&sev_cmd_mutex);
- rc = __sev_platform_init_locked(error);
- mutex_unlock(&sev_cmd_mutex);
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(sev_platform_init);
-
-static int __sev_platform_shutdown_locked(int *error)
-{
- int ret;
-
- ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
- if (ret)
- return ret;
-
- psp_master->sev_state = SEV_STATE_UNINIT;
- dev_dbg(psp_master->dev, "SEV firmware shutdown\n");
-
- return ret;
-}
-
-static int sev_platform_shutdown(int *error)
+static unsigned int psp_get_capability(struct psp_device *psp)
{
- int rc;
-
- mutex_lock(&sev_cmd_mutex);
- rc = __sev_platform_shutdown_locked(NULL);
- mutex_unlock(&sev_cmd_mutex);
-
- return rc;
-}
-
-static int sev_get_platform_state(int *state, int *error)
-{
- int rc;
-
- rc = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS,
- &psp_master->status_cmd_buf, error);
- if (rc)
- return rc;
-
- *state = psp_master->status_cmd_buf.state;
- return rc;
-}
-
-static int sev_ioctl_do_reset(struct sev_issue_cmd *argp)
-{
- int state, rc;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
+ unsigned int val = ioread32(psp->io_regs + psp->vdata->feature_reg);
/*
- * The SEV spec requires that FACTORY_RESET must be issued in
- * UNINIT state. Before we go further lets check if any guest is
- * active.
- *
- * If FW is in WORKING state then deny the request otherwise issue
- * SHUTDOWN command do INIT -> UNINIT before issuing the FACTORY_RESET.
- *
- */
- rc = sev_get_platform_state(&state, &argp->error);
- if (rc)
- return rc;
-
- if (state == SEV_STATE_WORKING)
- return -EBUSY;
-
- if (state == SEV_STATE_INIT) {
- rc = __sev_platform_shutdown_locked(&argp->error);
- if (rc)
- return rc;
- }
-
- return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, NULL, &argp->error);
-}
-
-static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp)
-{
- struct sev_user_data_status *data = &psp_master->status_cmd_buf;
- int ret;
-
- ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, data, &argp->error);
- if (ret)
- return ret;
-
- if (copy_to_user((void __user *)argp->data, data, sizeof(*data)))
- ret = -EFAULT;
-
- return ret;
-}
-
-static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp)
-{
- int rc;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (psp_master->sev_state == SEV_STATE_UNINIT) {
- rc = __sev_platform_init_locked(&argp->error);
- if (rc)
- return rc;
- }
-
- return __sev_do_cmd_locked(cmd, NULL, &argp->error);
-}
-
-static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp)
-{
- struct sev_user_data_pek_csr input;
- struct sev_data_pek_csr *data;
- void *blob = NULL;
- int ret;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
- return -EFAULT;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- /* userspace wants to query CSR length */
- if (!input.address || !input.length)
- goto cmd;
-
- /* allocate a physically contiguous buffer to store the CSR blob */
- if (!access_ok(input.address, input.length) ||
- input.length > SEV_FW_BLOB_MAX_SIZE) {
- ret = -EFAULT;
- goto e_free;
- }
-
- blob = kmalloc(input.length, GFP_KERNEL);
- if (!blob) {
- ret = -ENOMEM;
- goto e_free;
- }
-
- data->address = __psp_pa(blob);
- data->len = input.length;
-
-cmd:
- if (psp_master->sev_state == SEV_STATE_UNINIT) {
- ret = __sev_platform_init_locked(&argp->error);
- if (ret)
- goto e_free_blob;
- }
-
- ret = __sev_do_cmd_locked(SEV_CMD_PEK_CSR, data, &argp->error);
-
- /* If we query the CSR length, FW responded with expected data. */
- input.length = data->len;
-
- if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
- ret = -EFAULT;
- goto e_free_blob;
- }
-
- if (blob) {
- if (copy_to_user((void __user *)input.address, blob, input.length))
- ret = -EFAULT;
- }
-
-e_free_blob:
- kfree(blob);
-e_free:
- kfree(data);
- return ret;
-}
-
-void *psp_copy_user_blob(u64 __user uaddr, u32 len)
-{
- if (!uaddr || !len)
- return ERR_PTR(-EINVAL);
-
- /* verify that blob length does not exceed our limit */
- if (len > SEV_FW_BLOB_MAX_SIZE)
- return ERR_PTR(-EINVAL);
-
- return memdup_user((void __user *)(uintptr_t)uaddr, len);
-}
-EXPORT_SYMBOL_GPL(psp_copy_user_blob);
-
-static int sev_get_api_version(void)
-{
- struct sev_user_data_status *status;
- int error = 0, ret;
-
- status = &psp_master->status_cmd_buf;
- ret = sev_platform_status(status, &error);
- if (ret) {
- dev_err(psp_master->dev,
- "SEV: failed to get status. Error: %#x\n", error);
- return 1;
- }
-
- psp_master->api_major = status->api_major;
- psp_master->api_minor = status->api_minor;
- psp_master->build = status->build;
- psp_master->sev_state = status->state;
-
- return 0;
-}
-
-static int sev_get_firmware(struct device *dev,
- const struct firmware **firmware)
-{
- char fw_name_specific[SEV_FW_NAME_SIZE];
- char fw_name_subset[SEV_FW_NAME_SIZE];
-
- snprintf(fw_name_specific, sizeof(fw_name_specific),
- "amd/amd_sev_fam%.2xh_model%.2xh.sbin",
- boot_cpu_data.x86, boot_cpu_data.x86_model);
-
- snprintf(fw_name_subset, sizeof(fw_name_subset),
- "amd/amd_sev_fam%.2xh_model%.1xxh.sbin",
- boot_cpu_data.x86, (boot_cpu_data.x86_model & 0xf0) >> 4);
-
- /* Check for SEV FW for a particular model.
- * Ex. amd_sev_fam17h_model00h.sbin for Family 17h Model 00h
- *
- * or
- *
- * Check for SEV FW common to a subset of models.
- * Ex. amd_sev_fam17h_model0xh.sbin for
- * Family 17h Model 00h -- Family 17h Model 0Fh
- *
- * or
- *
- * Fall-back to using generic name: sev.fw
+ * Check for a access to the registers. If this read returns
+ * 0xffffffff, it's likely that the system is running a broken
+ * BIOS which disallows access to the device. Stop here and
+ * fail the PSP initialization (but not the load, as the CCP
+ * could get properly initialized).
*/
- if ((firmware_request_nowarn(firmware, fw_name_specific, dev) >= 0) ||
- (firmware_request_nowarn(firmware, fw_name_subset, dev) >= 0) ||
- (firmware_request_nowarn(firmware, SEV_FW_FILE, dev) >= 0))
+ if (val == 0xffffffff) {
+ dev_notice(psp->dev, "psp: unable to access the device: you might be running a broken BIOS.\n");
return 0;
-
- return -ENOENT;
-}
-
-/* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */
-static int sev_update_firmware(struct device *dev)
-{
- struct sev_data_download_firmware *data;
- const struct firmware *firmware;
- int ret, error, order;
- struct page *p;
- u64 data_size;
-
- if (sev_get_firmware(dev, &firmware) == -ENOENT) {
- dev_dbg(dev, "No SEV firmware file present\n");
- return -1;
- }
-
- /*
- * SEV FW expects the physical address given to it to be 32
- * byte aligned. Memory allocated has structure placed at the
- * beginning followed by the firmware being passed to the SEV
- * FW. Allocate enough memory for data structure + alignment
- * padding + SEV FW.
- */
- data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32);
-
- order = get_order(firmware->size + data_size);
- p = alloc_pages(GFP_KERNEL, order);
- if (!p) {
- ret = -1;
- goto fw_err;
}
- /*
- * Copy firmware data to a kernel allocated contiguous
- * memory region.
- */
- data = page_address(p);
- memcpy(page_address(p) + data_size, firmware->data, firmware->size);
-
- data->address = __psp_pa(page_address(p) + data_size);
- data->len = firmware->size;
-
- ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error);
- if (ret)
- dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error);
- else
- dev_info(dev, "SEV firmware update successful\n");
-
- __free_pages(p, order);
-
-fw_err:
- release_firmware(firmware);
-
- return ret;
+ return val;
}
-static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp)
+static int psp_check_sev_support(struct psp_device *psp,
+ unsigned int capability)
{
- struct sev_user_data_pek_cert_import input;
- struct sev_data_pek_cert_import *data;
- void *pek_blob, *oca_blob;
- int ret;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
- return -EFAULT;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- /* copy PEK certificate blobs from userspace */
- pek_blob = psp_copy_user_blob(input.pek_cert_address, input.pek_cert_len);
- if (IS_ERR(pek_blob)) {
- ret = PTR_ERR(pek_blob);
- goto e_free;
- }
-
- data->pek_cert_address = __psp_pa(pek_blob);
- data->pek_cert_len = input.pek_cert_len;
-
- /* copy PEK certificate blobs from userspace */
- oca_blob = psp_copy_user_blob(input.oca_cert_address, input.oca_cert_len);
- if (IS_ERR(oca_blob)) {
- ret = PTR_ERR(oca_blob);
- goto e_free_pek;
- }
-
- data->oca_cert_address = __psp_pa(oca_blob);
- data->oca_cert_len = input.oca_cert_len;
-
- /* If platform is not in INIT state then transition it to INIT */
- if (psp_master->sev_state != SEV_STATE_INIT) {
- ret = __sev_platform_init_locked(&argp->error);
- if (ret)
- goto e_free_oca;
+ /* Check if device supports SEV feature */
+ if (!(capability & 1)) {
+ dev_dbg(psp->dev, "psp does not support SEV\n");
+ return -ENODEV;
}
- ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, data, &argp->error);
-
-e_free_oca:
- kfree(oca_blob);
-e_free_pek:
- kfree(pek_blob);
-e_free:
- kfree(data);
- return ret;
+ return 0;
}
-static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
+static int psp_check_tee_support(struct psp_device *psp,
+ unsigned int capability)
{
- struct sev_user_data_get_id2 input;
- struct sev_data_get_id *data;
- void *id_blob = NULL;
- int ret;
-
- /* SEV GET_ID is available from SEV API v0.16 and up */
- if (!sev_version_greater_or_equal(0, 16))
- return -ENOTSUPP;
-
- if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
- return -EFAULT;
-
- /* Check if we have write access to the userspace buffer */
- if (input.address &&
- input.length &&
- !access_ok(input.address, input.length))
- return -EFAULT;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- if (input.address && input.length) {
- id_blob = kmalloc(input.length, GFP_KERNEL);
- if (!id_blob) {
- kfree(data);
- return -ENOMEM;
- }
-
- data->address = __psp_pa(id_blob);
- data->len = input.length;
- }
-
- ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
-
- /*
- * Firmware will return the length of the ID value (either the minimum
- * required length or the actual length written), return it to the user.
- */
- input.length = data->len;
-
- if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
- ret = -EFAULT;
- goto e_free;
- }
-
- if (id_blob) {
- if (copy_to_user((void __user *)input.address,
- id_blob, data->len)) {
- ret = -EFAULT;
- goto e_free;
- }
+ /* Check if device supports TEE feature */
+ if (!(capability & 2)) {
+ dev_dbg(psp->dev, "psp does not support TEE\n");
+ return -ENODEV;
}
-e_free:
- kfree(id_blob);
- kfree(data);
-
- return ret;
+ return 0;
}
-static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp)
+static int psp_check_support(struct psp_device *psp,
+ unsigned int capability)
{
- struct sev_data_get_id *data;
- u64 data_size, user_size;
- void *id_blob, *mem;
- int ret;
+ int sev_support = psp_check_sev_support(psp, capability);
+ int tee_support = psp_check_tee_support(psp, capability);
- /* SEV GET_ID available from SEV API v0.16 and up */
- if (!sev_version_greater_or_equal(0, 16))
- return -ENOTSUPP;
-
- /* SEV FW expects the buffer it fills with the ID to be
- * 8-byte aligned. Memory allocated should be enough to
- * hold data structure + alignment padding + memory
- * where SEV FW writes the ID.
- */
- data_size = ALIGN(sizeof(struct sev_data_get_id), 8);
- user_size = sizeof(struct sev_user_data_get_id);
-
- mem = kzalloc(data_size + user_size, GFP_KERNEL);
- if (!mem)
- return -ENOMEM;
-
- data = mem;
- id_blob = mem + data_size;
-
- data->address = __psp_pa(id_blob);
- data->len = user_size;
-
- ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
- if (!ret) {
- if (copy_to_user((void __user *)argp->data, id_blob, data->len))
- ret = -EFAULT;
- }
-
- kfree(mem);
+ /* Return error if device neither supports SEV nor TEE */
+ if (sev_support && tee_support)
+ return -ENODEV;
- return ret;
+ return 0;
}
-static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp)
+static int psp_init(struct psp_device *psp, unsigned int capability)
{
- struct sev_user_data_pdh_cert_export input;
- void *pdh_blob = NULL, *cert_blob = NULL;
- struct sev_data_pdh_cert_export *data;
int ret;
- /* If platform is not in INIT state then transition it to INIT. */
- if (psp_master->sev_state != SEV_STATE_INIT) {
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- ret = __sev_platform_init_locked(&argp->error);
+ if (!psp_check_sev_support(psp, capability)) {
+ ret = sev_dev_init(psp);
if (ret)
return ret;
}
- if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
- return -EFAULT;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- /* Userspace wants to query the certificate length. */
- if (!input.pdh_cert_address ||
- !input.pdh_cert_len ||
- !input.cert_chain_address)
- goto cmd;
-
- /* Allocate a physically contiguous buffer to store the PDH blob. */
- if ((input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) ||
- !access_ok(input.pdh_cert_address, input.pdh_cert_len)) {
- ret = -EFAULT;
- goto e_free;
- }
-
- /* Allocate a physically contiguous buffer to store the cert chain blob. */
- if ((input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) ||
- !access_ok(input.cert_chain_address, input.cert_chain_len)) {
- ret = -EFAULT;
- goto e_free;
- }
-
- pdh_blob = kmalloc(input.pdh_cert_len, GFP_KERNEL);
- if (!pdh_blob) {
- ret = -ENOMEM;
- goto e_free;
- }
-
- data->pdh_cert_address = __psp_pa(pdh_blob);
- data->pdh_cert_len = input.pdh_cert_len;
-
- cert_blob = kmalloc(input.cert_chain_len, GFP_KERNEL);
- if (!cert_blob) {
- ret = -ENOMEM;
- goto e_free_pdh;
- }
-
- data->cert_chain_address = __psp_pa(cert_blob);
- data->cert_chain_len = input.cert_chain_len;
-
-cmd:
- ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, data, &argp->error);
-
- /* If we query the length, FW responded with expected data. */
- input.cert_chain_len = data->cert_chain_len;
- input.pdh_cert_len = data->pdh_cert_len;
-
- if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
- ret = -EFAULT;
- goto e_free_cert;
- }
-
- if (pdh_blob) {
- if (copy_to_user((void __user *)input.pdh_cert_address,
- pdh_blob, input.pdh_cert_len)) {
- ret = -EFAULT;
- goto e_free_cert;
- }
- }
-
- if (cert_blob) {
- if (copy_to_user((void __user *)input.cert_chain_address,
- cert_blob, input.cert_chain_len))
- ret = -EFAULT;
- }
-
-e_free_cert:
- kfree(cert_blob);
-e_free_pdh:
- kfree(pdh_blob);
-e_free:
- kfree(data);
- return ret;
-}
-
-static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
- struct sev_issue_cmd input;
- int ret = -EFAULT;
-
- if (!psp_master)
- return -ENODEV;
-
- if (ioctl != SEV_ISSUE_CMD)
- return -EINVAL;
-
- if (copy_from_user(&input, argp, sizeof(struct sev_issue_cmd)))
- return -EFAULT;
-
- if (input.cmd > SEV_MAX)
- return -EINVAL;
-
- mutex_lock(&sev_cmd_mutex);
-
- switch (input.cmd) {
-
- case SEV_FACTORY_RESET:
- ret = sev_ioctl_do_reset(&input);
- break;
- case SEV_PLATFORM_STATUS:
- ret = sev_ioctl_do_platform_status(&input);
- break;
- case SEV_PEK_GEN:
- ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PEK_GEN, &input);
- break;
- case SEV_PDH_GEN:
- ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PDH_GEN, &input);
- break;
- case SEV_PEK_CSR:
- ret = sev_ioctl_do_pek_csr(&input);
- break;
- case SEV_PEK_CERT_IMPORT:
- ret = sev_ioctl_do_pek_import(&input);
- break;
- case SEV_PDH_CERT_EXPORT:
- ret = sev_ioctl_do_pdh_export(&input);
- break;
- case SEV_GET_ID:
- pr_warn_once("SEV_GET_ID command is deprecated, use SEV_GET_ID2\n");
- ret = sev_ioctl_do_get_id(&input);
- break;
- case SEV_GET_ID2:
- ret = sev_ioctl_do_get_id2(&input);
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
-
- if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd)))
- ret = -EFAULT;
-out:
- mutex_unlock(&sev_cmd_mutex);
-
- return ret;
-}
-
-static const struct file_operations sev_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = sev_ioctl,
-};
-
-int sev_platform_status(struct sev_user_data_status *data, int *error)
-{
- return sev_do_cmd(SEV_CMD_PLATFORM_STATUS, data, error);
-}
-EXPORT_SYMBOL_GPL(sev_platform_status);
-
-int sev_guest_deactivate(struct sev_data_deactivate *data, int *error)
-{
- return sev_do_cmd(SEV_CMD_DEACTIVATE, data, error);
-}
-EXPORT_SYMBOL_GPL(sev_guest_deactivate);
-
-int sev_guest_activate(struct sev_data_activate *data, int *error)
-{
- return sev_do_cmd(SEV_CMD_ACTIVATE, data, error);
-}
-EXPORT_SYMBOL_GPL(sev_guest_activate);
-
-int sev_guest_decommission(struct sev_data_decommission *data, int *error)
-{
- return sev_do_cmd(SEV_CMD_DECOMMISSION, data, error);
-}
-EXPORT_SYMBOL_GPL(sev_guest_decommission);
-
-int sev_guest_df_flush(int *error)
-{
- return sev_do_cmd(SEV_CMD_DF_FLUSH, NULL, error);
-}
-EXPORT_SYMBOL_GPL(sev_guest_df_flush);
-
-static void sev_exit(struct kref *ref)
-{
- struct sev_misc_dev *misc_dev = container_of(ref, struct sev_misc_dev, refcount);
-
- misc_deregister(&misc_dev->misc);
-}
-
-static int sev_misc_init(struct psp_device *psp)
-{
- struct device *dev = psp->dev;
- int ret;
-
- /*
- * SEV feature support can be detected on multiple devices but the SEV
- * FW commands must be issued on the master. During probe, we do not
- * know the master hence we create /dev/sev on the first device probe.
- * sev_do_cmd() finds the right master device to which to issue the
- * command to the firmware.
- */
- if (!misc_dev) {
- struct miscdevice *misc;
-
- misc_dev = devm_kzalloc(dev, sizeof(*misc_dev), GFP_KERNEL);
- if (!misc_dev)
- return -ENOMEM;
-
- misc = &misc_dev->misc;
- misc->minor = MISC_DYNAMIC_MINOR;
- misc->name = DEVICE_NAME;
- misc->fops = &sev_fops;
-
- ret = misc_register(misc);
+ if (!psp_check_tee_support(psp, capability)) {
+ ret = tee_dev_init(psp);
if (ret)
return ret;
-
- kref_init(&misc_dev->refcount);
- } else {
- kref_get(&misc_dev->refcount);
- }
-
- init_waitqueue_head(&psp->sev_int_queue);
- psp->sev_misc = misc_dev;
- dev_dbg(dev, "registered SEV device\n");
-
- return 0;
-}
-
-static int psp_check_sev_support(struct psp_device *psp)
-{
- unsigned int val = ioread32(psp->io_regs + psp->vdata->feature_reg);
-
- /*
- * Check for a access to the registers. If this read returns
- * 0xffffffff, it's likely that the system is running a broken
- * BIOS which disallows access to the device. Stop here and
- * fail the PSP initialization (but not the load, as the CCP
- * could get properly initialized).
- */
- if (val == 0xffffffff) {
- dev_notice(psp->dev, "psp: unable to access the device: you might be running a broken BIOS.\n");
- return -ENODEV;
- }
-
- if (!(val & 1)) {
- /* Device does not support the SEV feature */
- dev_dbg(psp->dev, "psp does not support SEV\n");
- return -ENODEV;
}
return 0;
@@ -980,6 +136,7 @@ int psp_dev_init(struct sp_device *sp)
{
struct device *dev = sp->dev;
struct psp_device *psp;
+ unsigned int capability;
int ret;
ret = -ENOMEM;
@@ -998,7 +155,11 @@ int psp_dev_init(struct sp_device *sp)
psp->io_regs = sp->io_map;
- ret = psp_check_sev_support(psp);
+ capability = psp_get_capability(psp);
+ if (!capability)
+ goto e_disable;
+
+ ret = psp_check_support(psp, capability);
if (ret)
goto e_disable;
@@ -1013,7 +174,7 @@ int psp_dev_init(struct sp_device *sp)
goto e_err;
}
- ret = sev_misc_init(psp);
+ ret = psp_init(psp, capability);
if (ret)
goto e_irq;
@@ -1049,83 +210,52 @@ void psp_dev_destroy(struct sp_device *sp)
if (!psp)
return;
- if (psp->sev_misc)
- kref_put(&misc_dev->refcount, sev_exit);
+ sev_dev_destroy(psp);
+
+ tee_dev_destroy(psp);
sp_free_psp_irq(sp, psp);
}
-int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd,
- void *data, int *error)
+void psp_set_sev_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
+ void *data)
{
- if (!filep || filep->f_op != &sev_fops)
- return -EBADF;
-
- return sev_do_cmd(cmd, data, error);
+ psp->sev_irq_data = data;
+ psp->sev_irq_handler = handler;
}
-EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user);
-void psp_pci_init(void)
+void psp_clear_sev_irq_handler(struct psp_device *psp)
{
- struct sp_device *sp;
- int error, rc;
-
- sp = sp_get_psp_master_device();
- if (!sp)
- return;
-
- psp_master = sp->psp_data;
-
- psp_timeout = psp_probe_timeout;
+ psp_set_sev_irq_handler(psp, NULL, NULL);
+}
- if (sev_get_api_version())
- goto err;
+void psp_set_tee_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
+ void *data)
+{
+ psp->tee_irq_data = data;
+ psp->tee_irq_handler = handler;
+}
- /*
- * If platform is not in UNINIT state then firmware upgrade and/or
- * platform INIT command will fail. These command require UNINIT state.
- *
- * In a normal boot we should never run into case where the firmware
- * is not in UNINIT state on boot. But in case of kexec boot, a reboot
- * may not go through a typical shutdown sequence and may leave the
- * firmware in INIT or WORKING state.
- */
+void psp_clear_tee_irq_handler(struct psp_device *psp)
+{
+ psp_set_tee_irq_handler(psp, NULL, NULL);
+}
- if (psp_master->sev_state != SEV_STATE_UNINIT) {
- sev_platform_shutdown(NULL);
- psp_master->sev_state = SEV_STATE_UNINIT;
- }
+struct psp_device *psp_get_master_device(void)
+{
+ struct sp_device *sp = sp_get_psp_master_device();
- if (sev_version_greater_or_equal(0, 15) &&
- sev_update_firmware(psp_master->dev) == 0)
- sev_get_api_version();
+ return sp ? sp->psp_data : NULL;
+}
- /* Initialize the platform */
- rc = sev_platform_init(&error);
- if (rc && (error == SEV_RET_SECURE_DATA_INVALID)) {
- /*
- * INIT command returned an integrity check failure
- * status code, meaning that firmware load and
- * validation of SEV related persistent data has
- * failed and persistent state has been erased.
- * Retrying INIT command here should succeed.
- */
- dev_dbg(sp->dev, "SEV: retrying INIT command");
- rc = sev_platform_init(&error);
- }
+void psp_pci_init(void)
+{
+ psp_master = psp_get_master_device();
- if (rc) {
- dev_err(sp->dev, "SEV: failed to INIT error %#x\n", error);
+ if (!psp_master)
return;
- }
-
- dev_info(sp->dev, "SEV API:%d.%d build:%d\n", psp_master->api_major,
- psp_master->api_minor, psp_master->build);
-
- return;
-err:
- psp_master = NULL;
+ sev_pci_init();
}
void psp_pci_exit(void)
@@ -1133,5 +263,5 @@ void psp_pci_exit(void)
if (!psp_master)
return;
- sev_platform_shutdown(NULL);
+ sev_pci_exit();
}
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
index dd516b35ba86..ef38e4135d81 100644
--- a/drivers/crypto/ccp/psp-dev.h
+++ b/drivers/crypto/ccp/psp-dev.h
@@ -2,7 +2,7 @@
/*
* AMD Platform Security Processor (PSP) interface driver
*
- * Copyright (C) 2017-2018 Advanced Micro Devices, Inc.
+ * Copyright (C) 2017-2019 Advanced Micro Devices, Inc.
*
* Author: Brijesh Singh <brijesh.singh@amd.com>
*/
@@ -11,35 +11,20 @@
#define __PSP_DEV_H__
#include <linux/device.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
#include <linux/list.h>
-#include <linux/wait.h>
-#include <linux/dmapool.h>
-#include <linux/hw_random.h>
-#include <linux/bitops.h>
+#include <linux/bits.h>
#include <linux/interrupt.h>
-#include <linux/irqreturn.h>
-#include <linux/dmaengine.h>
-#include <linux/psp-sev.h>
-#include <linux/miscdevice.h>
-#include <linux/capability.h>
#include "sp-dev.h"
-#define PSP_CMD_COMPLETE BIT(1)
-
-#define PSP_CMDRESP_CMD_SHIFT 16
-#define PSP_CMDRESP_IOC BIT(0)
#define PSP_CMDRESP_RESP BIT(31)
#define PSP_CMDRESP_ERR_MASK 0xffff
#define MAX_PSP_NAME_LEN 16
-struct sev_misc_dev {
- struct kref refcount;
- struct miscdevice misc;
-};
+extern struct psp_device *psp_master;
+
+typedef void (*psp_irq_handler_t)(int, void *, unsigned int);
struct psp_device {
struct list_head entry;
@@ -52,16 +37,24 @@ struct psp_device {
void __iomem *io_regs;
- int sev_state;
- unsigned int sev_int_rcvd;
- wait_queue_head_t sev_int_queue;
- struct sev_misc_dev *sev_misc;
- struct sev_user_data_status status_cmd_buf;
- struct sev_data_init init_cmd_buf;
+ psp_irq_handler_t sev_irq_handler;
+ void *sev_irq_data;
+
+ psp_irq_handler_t tee_irq_handler;
+ void *tee_irq_data;
- u8 api_major;
- u8 api_minor;
- u8 build;
+ void *sev_data;
+ void *tee_data;
};
+void psp_set_sev_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
+ void *data);
+void psp_clear_sev_irq_handler(struct psp_device *psp);
+
+void psp_set_tee_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
+ void *data);
+void psp_clear_tee_irq_handler(struct psp_device *psp);
+
+struct psp_device *psp_get_master_device(void);
+
#endif /* __PSP_DEV_H */
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
new file mode 100644
index 000000000000..e467860f797d
--- /dev/null
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -0,0 +1,1077 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Secure Encrypted Virtualization (SEV) interface
+ *
+ * Copyright (C) 2016,2019 Advanced Micro Devices, Inc.
+ *
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/hw_random.h>
+#include <linux/ccp.h>
+#include <linux/firmware.h>
+
+#include <asm/smp.h>
+
+#include "psp-dev.h"
+#include "sev-dev.h"
+
+#define DEVICE_NAME "sev"
+#define SEV_FW_FILE "amd/sev.fw"
+#define SEV_FW_NAME_SIZE 64
+
+static DEFINE_MUTEX(sev_cmd_mutex);
+static struct sev_misc_dev *misc_dev;
+
+static int psp_cmd_timeout = 100;
+module_param(psp_cmd_timeout, int, 0644);
+MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands");
+
+static int psp_probe_timeout = 5;
+module_param(psp_probe_timeout, int, 0644);
+MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe");
+
+static bool psp_dead;
+static int psp_timeout;
+
+static inline bool sev_version_greater_or_equal(u8 maj, u8 min)
+{
+ struct sev_device *sev = psp_master->sev_data;
+
+ if (sev->api_major > maj)
+ return true;
+
+ if (sev->api_major == maj && sev->api_minor >= min)
+ return true;
+
+ return false;
+}
+
+static void sev_irq_handler(int irq, void *data, unsigned int status)
+{
+ struct sev_device *sev = data;
+ int reg;
+
+ /* Check if it is command completion: */
+ if (!(status & SEV_CMD_COMPLETE))
+ return;
+
+ /* Check if it is SEV command completion: */
+ reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg);
+ if (reg & PSP_CMDRESP_RESP) {
+ sev->int_rcvd = 1;
+ wake_up(&sev->int_queue);
+ }
+}
+
+static int sev_wait_cmd_ioc(struct sev_device *sev,
+ unsigned int *reg, unsigned int timeout)
+{
+ int ret;
+
+ ret = wait_event_timeout(sev->int_queue,
+ sev->int_rcvd, timeout * HZ);
+ if (!ret)
+ return -ETIMEDOUT;
+
+ *reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg);
+
+ return 0;
+}
+
+static int sev_cmd_buffer_len(int cmd)
+{
+ switch (cmd) {
+ case SEV_CMD_INIT: return sizeof(struct sev_data_init);
+ case SEV_CMD_PLATFORM_STATUS: return sizeof(struct sev_user_data_status);
+ case SEV_CMD_PEK_CSR: return sizeof(struct sev_data_pek_csr);
+ case SEV_CMD_PEK_CERT_IMPORT: return sizeof(struct sev_data_pek_cert_import);
+ case SEV_CMD_PDH_CERT_EXPORT: return sizeof(struct sev_data_pdh_cert_export);
+ case SEV_CMD_LAUNCH_START: return sizeof(struct sev_data_launch_start);
+ case SEV_CMD_LAUNCH_UPDATE_DATA: return sizeof(struct sev_data_launch_update_data);
+ case SEV_CMD_LAUNCH_UPDATE_VMSA: return sizeof(struct sev_data_launch_update_vmsa);
+ case SEV_CMD_LAUNCH_FINISH: return sizeof(struct sev_data_launch_finish);
+ case SEV_CMD_LAUNCH_MEASURE: return sizeof(struct sev_data_launch_measure);
+ case SEV_CMD_ACTIVATE: return sizeof(struct sev_data_activate);
+ case SEV_CMD_DEACTIVATE: return sizeof(struct sev_data_deactivate);
+ case SEV_CMD_DECOMMISSION: return sizeof(struct sev_data_decommission);
+ case SEV_CMD_GUEST_STATUS: return sizeof(struct sev_data_guest_status);
+ case SEV_CMD_DBG_DECRYPT: return sizeof(struct sev_data_dbg);
+ case SEV_CMD_DBG_ENCRYPT: return sizeof(struct sev_data_dbg);
+ case SEV_CMD_SEND_START: return sizeof(struct sev_data_send_start);
+ case SEV_CMD_SEND_UPDATE_DATA: return sizeof(struct sev_data_send_update_data);
+ case SEV_CMD_SEND_UPDATE_VMSA: return sizeof(struct sev_data_send_update_vmsa);
+ case SEV_CMD_SEND_FINISH: return sizeof(struct sev_data_send_finish);
+ case SEV_CMD_RECEIVE_START: return sizeof(struct sev_data_receive_start);
+ case SEV_CMD_RECEIVE_FINISH: return sizeof(struct sev_data_receive_finish);
+ case SEV_CMD_RECEIVE_UPDATE_DATA: return sizeof(struct sev_data_receive_update_data);
+ case SEV_CMD_RECEIVE_UPDATE_VMSA: return sizeof(struct sev_data_receive_update_vmsa);
+ case SEV_CMD_LAUNCH_UPDATE_SECRET: return sizeof(struct sev_data_launch_secret);
+ case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware);
+ case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id);
+ default: return 0;
+ }
+
+ return 0;
+}
+
+static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
+{
+ struct psp_device *psp = psp_master;
+ struct sev_device *sev;
+ unsigned int phys_lsb, phys_msb;
+ unsigned int reg, ret = 0;
+
+ if (!psp || !psp->sev_data)
+ return -ENODEV;
+
+ if (psp_dead)
+ return -EBUSY;
+
+ sev = psp->sev_data;
+
+ /* Get the physical address of the command buffer */
+ phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0;
+ phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0;
+
+ dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n",
+ cmd, phys_msb, phys_lsb, psp_timeout);
+
+ print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data,
+ sev_cmd_buffer_len(cmd), false);
+
+ iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg);
+ iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg);
+
+ sev->int_rcvd = 0;
+
+ reg = cmd;
+ reg <<= SEV_CMDRESP_CMD_SHIFT;
+ reg |= SEV_CMDRESP_IOC;
+ iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg);
+
+ /* wait for command completion */
+ ret = sev_wait_cmd_ioc(sev, &reg, psp_timeout);
+ if (ret) {
+ if (psp_ret)
+ *psp_ret = 0;
+
+ dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd);
+ psp_dead = true;
+
+ return ret;
+ }
+
+ psp_timeout = psp_cmd_timeout;
+
+ if (psp_ret)
+ *psp_ret = reg & PSP_CMDRESP_ERR_MASK;
+
+ if (reg & PSP_CMDRESP_ERR_MASK) {
+ dev_dbg(sev->dev, "sev command %#x failed (%#010x)\n",
+ cmd, reg & PSP_CMDRESP_ERR_MASK);
+ ret = -EIO;
+ }
+
+ print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
+ sev_cmd_buffer_len(cmd), false);
+
+ return ret;
+}
+
+static int sev_do_cmd(int cmd, void *data, int *psp_ret)
+{
+ int rc;
+
+ mutex_lock(&sev_cmd_mutex);
+ rc = __sev_do_cmd_locked(cmd, data, psp_ret);
+ mutex_unlock(&sev_cmd_mutex);
+
+ return rc;
+}
+
+static int __sev_platform_init_locked(int *error)
+{
+ struct psp_device *psp = psp_master;
+ struct sev_device *sev;
+ int rc = 0;
+
+ if (!psp || !psp->sev_data)
+ return -ENODEV;
+
+ sev = psp->sev_data;
+
+ if (sev->state == SEV_STATE_INIT)
+ return 0;
+
+ rc = __sev_do_cmd_locked(SEV_CMD_INIT, &sev->init_cmd_buf, error);
+ if (rc)
+ return rc;
+
+ sev->state = SEV_STATE_INIT;
+
+ /* Prepare for first SEV guest launch after INIT */
+ wbinvd_on_all_cpus();
+ rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, error);
+ if (rc)
+ return rc;
+
+ dev_dbg(sev->dev, "SEV firmware initialized\n");
+
+ return rc;
+}
+
+int sev_platform_init(int *error)
+{
+ int rc;
+
+ mutex_lock(&sev_cmd_mutex);
+ rc = __sev_platform_init_locked(error);
+ mutex_unlock(&sev_cmd_mutex);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(sev_platform_init);
+
+static int __sev_platform_shutdown_locked(int *error)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ int ret;
+
+ ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
+ if (ret)
+ return ret;
+
+ sev->state = SEV_STATE_UNINIT;
+ dev_dbg(sev->dev, "SEV firmware shutdown\n");
+
+ return ret;
+}
+
+static int sev_platform_shutdown(int *error)
+{
+ int rc;
+
+ mutex_lock(&sev_cmd_mutex);
+ rc = __sev_platform_shutdown_locked(NULL);
+ mutex_unlock(&sev_cmd_mutex);
+
+ return rc;
+}
+
+static int sev_get_platform_state(int *state, int *error)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ int rc;
+
+ rc = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS,
+ &sev->status_cmd_buf, error);
+ if (rc)
+ return rc;
+
+ *state = sev->status_cmd_buf.state;
+ return rc;
+}
+
+static int sev_ioctl_do_reset(struct sev_issue_cmd *argp)
+{
+ int state, rc;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ /*
+ * The SEV spec requires that FACTORY_RESET must be issued in
+ * UNINIT state. Before we go further lets check if any guest is
+ * active.
+ *
+ * If FW is in WORKING state then deny the request otherwise issue
+ * SHUTDOWN command do INIT -> UNINIT before issuing the FACTORY_RESET.
+ *
+ */
+ rc = sev_get_platform_state(&state, &argp->error);
+ if (rc)
+ return rc;
+
+ if (state == SEV_STATE_WORKING)
+ return -EBUSY;
+
+ if (state == SEV_STATE_INIT) {
+ rc = __sev_platform_shutdown_locked(&argp->error);
+ if (rc)
+ return rc;
+ }
+
+ return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, NULL, &argp->error);
+}
+
+static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_user_data_status *data = &sev->status_cmd_buf;
+ int ret;
+
+ ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, data, &argp->error);
+ if (ret)
+ return ret;
+
+ if (copy_to_user((void __user *)argp->data, data, sizeof(*data)))
+ ret = -EFAULT;
+
+ return ret;
+}
+
+static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ int rc;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (sev->state == SEV_STATE_UNINIT) {
+ rc = __sev_platform_init_locked(&argp->error);
+ if (rc)
+ return rc;
+ }
+
+ return __sev_do_cmd_locked(cmd, NULL, &argp->error);
+}
+
+static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_user_data_pek_csr input;
+ struct sev_data_pek_csr *data;
+ void *blob = NULL;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* userspace wants to query CSR length */
+ if (!input.address || !input.length)
+ goto cmd;
+
+ /* allocate a physically contiguous buffer to store the CSR blob */
+ if (!access_ok(input.address, input.length) ||
+ input.length > SEV_FW_BLOB_MAX_SIZE) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+
+ blob = kmalloc(input.length, GFP_KERNEL);
+ if (!blob) {
+ ret = -ENOMEM;
+ goto e_free;
+ }
+
+ data->address = __psp_pa(blob);
+ data->len = input.length;
+
+cmd:
+ if (sev->state == SEV_STATE_UNINIT) {
+ ret = __sev_platform_init_locked(&argp->error);
+ if (ret)
+ goto e_free_blob;
+ }
+
+ ret = __sev_do_cmd_locked(SEV_CMD_PEK_CSR, data, &argp->error);
+
+ /* If we query the CSR length, FW responded with expected data. */
+ input.length = data->len;
+
+ if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
+ ret = -EFAULT;
+ goto e_free_blob;
+ }
+
+ if (blob) {
+ if (copy_to_user((void __user *)input.address, blob, input.length))
+ ret = -EFAULT;
+ }
+
+e_free_blob:
+ kfree(blob);
+e_free:
+ kfree(data);
+ return ret;
+}
+
+void *psp_copy_user_blob(u64 __user uaddr, u32 len)
+{
+ if (!uaddr || !len)
+ return ERR_PTR(-EINVAL);
+
+ /* verify that blob length does not exceed our limit */
+ if (len > SEV_FW_BLOB_MAX_SIZE)
+ return ERR_PTR(-EINVAL);
+
+ return memdup_user((void __user *)(uintptr_t)uaddr, len);
+}
+EXPORT_SYMBOL_GPL(psp_copy_user_blob);
+
+static int sev_get_api_version(void)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_user_data_status *status;
+ int error = 0, ret;
+
+ status = &sev->status_cmd_buf;
+ ret = sev_platform_status(status, &error);
+ if (ret) {
+ dev_err(sev->dev,
+ "SEV: failed to get status. Error: %#x\n", error);
+ return 1;
+ }
+
+ sev->api_major = status->api_major;
+ sev->api_minor = status->api_minor;
+ sev->build = status->build;
+ sev->state = status->state;
+
+ return 0;
+}
+
+static int sev_get_firmware(struct device *dev,
+ const struct firmware **firmware)
+{
+ char fw_name_specific[SEV_FW_NAME_SIZE];
+ char fw_name_subset[SEV_FW_NAME_SIZE];
+
+ snprintf(fw_name_specific, sizeof(fw_name_specific),
+ "amd/amd_sev_fam%.2xh_model%.2xh.sbin",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
+
+ snprintf(fw_name_subset, sizeof(fw_name_subset),
+ "amd/amd_sev_fam%.2xh_model%.1xxh.sbin",
+ boot_cpu_data.x86, (boot_cpu_data.x86_model & 0xf0) >> 4);
+
+ /* Check for SEV FW for a particular model.
+ * Ex. amd_sev_fam17h_model00h.sbin for Family 17h Model 00h
+ *
+ * or
+ *
+ * Check for SEV FW common to a subset of models.
+ * Ex. amd_sev_fam17h_model0xh.sbin for
+ * Family 17h Model 00h -- Family 17h Model 0Fh
+ *
+ * or
+ *
+ * Fall-back to using generic name: sev.fw
+ */
+ if ((firmware_request_nowarn(firmware, fw_name_specific, dev) >= 0) ||
+ (firmware_request_nowarn(firmware, fw_name_subset, dev) >= 0) ||
+ (firmware_request_nowarn(firmware, SEV_FW_FILE, dev) >= 0))
+ return 0;
+
+ return -ENOENT;
+}
+
+/* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */
+static int sev_update_firmware(struct device *dev)
+{
+ struct sev_data_download_firmware *data;
+ const struct firmware *firmware;
+ int ret, error, order;
+ struct page *p;
+ u64 data_size;
+
+ if (sev_get_firmware(dev, &firmware) == -ENOENT) {
+ dev_dbg(dev, "No SEV firmware file present\n");
+ return -1;
+ }
+
+ /*
+ * SEV FW expects the physical address given to it to be 32
+ * byte aligned. Memory allocated has structure placed at the
+ * beginning followed by the firmware being passed to the SEV
+ * FW. Allocate enough memory for data structure + alignment
+ * padding + SEV FW.
+ */
+ data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32);
+
+ order = get_order(firmware->size + data_size);
+ p = alloc_pages(GFP_KERNEL, order);
+ if (!p) {
+ ret = -1;
+ goto fw_err;
+ }
+
+ /*
+ * Copy firmware data to a kernel allocated contiguous
+ * memory region.
+ */
+ data = page_address(p);
+ memcpy(page_address(p) + data_size, firmware->data, firmware->size);
+
+ data->address = __psp_pa(page_address(p) + data_size);
+ data->len = firmware->size;
+
+ ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error);
+ if (ret)
+ dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error);
+ else
+ dev_info(dev, "SEV firmware update successful\n");
+
+ __free_pages(p, order);
+
+fw_err:
+ release_firmware(firmware);
+
+ return ret;
+}
+
+static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_user_data_pek_cert_import input;
+ struct sev_data_pek_cert_import *data;
+ void *pek_blob, *oca_blob;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* copy PEK certificate blobs from userspace */
+ pek_blob = psp_copy_user_blob(input.pek_cert_address, input.pek_cert_len);
+ if (IS_ERR(pek_blob)) {
+ ret = PTR_ERR(pek_blob);
+ goto e_free;
+ }
+
+ data->pek_cert_address = __psp_pa(pek_blob);
+ data->pek_cert_len = input.pek_cert_len;
+
+ /* copy PEK certificate blobs from userspace */
+ oca_blob = psp_copy_user_blob(input.oca_cert_address, input.oca_cert_len);
+ if (IS_ERR(oca_blob)) {
+ ret = PTR_ERR(oca_blob);
+ goto e_free_pek;
+ }
+
+ data->oca_cert_address = __psp_pa(oca_blob);
+ data->oca_cert_len = input.oca_cert_len;
+
+ /* If platform is not in INIT state then transition it to INIT */
+ if (sev->state != SEV_STATE_INIT) {
+ ret = __sev_platform_init_locked(&argp->error);
+ if (ret)
+ goto e_free_oca;
+ }
+
+ ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, data, &argp->error);
+
+e_free_oca:
+ kfree(oca_blob);
+e_free_pek:
+ kfree(pek_blob);
+e_free:
+ kfree(data);
+ return ret;
+}
+
+static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
+{
+ struct sev_user_data_get_id2 input;
+ struct sev_data_get_id *data;
+ void *id_blob = NULL;
+ int ret;
+
+ /* SEV GET_ID is available from SEV API v0.16 and up */
+ if (!sev_version_greater_or_equal(0, 16))
+ return -ENOTSUPP;
+
+ if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
+ return -EFAULT;
+
+ /* Check if we have write access to the userspace buffer */
+ if (input.address &&
+ input.length &&
+ !access_ok(input.address, input.length))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ if (input.address && input.length) {
+ id_blob = kmalloc(input.length, GFP_KERNEL);
+ if (!id_blob) {
+ kfree(data);
+ return -ENOMEM;
+ }
+
+ data->address = __psp_pa(id_blob);
+ data->len = input.length;
+ }
+
+ ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
+
+ /*
+ * Firmware will return the length of the ID value (either the minimum
+ * required length or the actual length written), return it to the user.
+ */
+ input.length = data->len;
+
+ if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+
+ if (id_blob) {
+ if (copy_to_user((void __user *)input.address,
+ id_blob, data->len)) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+ }
+
+e_free:
+ kfree(id_blob);
+ kfree(data);
+
+ return ret;
+}
+
+static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp)
+{
+ struct sev_data_get_id *data;
+ u64 data_size, user_size;
+ void *id_blob, *mem;
+ int ret;
+
+ /* SEV GET_ID available from SEV API v0.16 and up */
+ if (!sev_version_greater_or_equal(0, 16))
+ return -ENOTSUPP;
+
+ /* SEV FW expects the buffer it fills with the ID to be
+ * 8-byte aligned. Memory allocated should be enough to
+ * hold data structure + alignment padding + memory
+ * where SEV FW writes the ID.
+ */
+ data_size = ALIGN(sizeof(struct sev_data_get_id), 8);
+ user_size = sizeof(struct sev_user_data_get_id);
+
+ mem = kzalloc(data_size + user_size, GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+
+ data = mem;
+ id_blob = mem + data_size;
+
+ data->address = __psp_pa(id_blob);
+ data->len = user_size;
+
+ ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
+ if (!ret) {
+ if (copy_to_user((void __user *)argp->data, id_blob, data->len))
+ ret = -EFAULT;
+ }
+
+ kfree(mem);
+
+ return ret;
+}
+
+static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_user_data_pdh_cert_export input;
+ void *pdh_blob = NULL, *cert_blob = NULL;
+ struct sev_data_pdh_cert_export *data;
+ int ret;
+
+ /* If platform is not in INIT state then transition it to INIT. */
+ if (sev->state != SEV_STATE_INIT) {
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ret = __sev_platform_init_locked(&argp->error);
+ if (ret)
+ return ret;
+ }
+
+ if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* Userspace wants to query the certificate length. */
+ if (!input.pdh_cert_address ||
+ !input.pdh_cert_len ||
+ !input.cert_chain_address)
+ goto cmd;
+
+ /* Allocate a physically contiguous buffer to store the PDH blob. */
+ if ((input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) ||
+ !access_ok(input.pdh_cert_address, input.pdh_cert_len)) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+
+ /* Allocate a physically contiguous buffer to store the cert chain blob. */
+ if ((input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) ||
+ !access_ok(input.cert_chain_address, input.cert_chain_len)) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+
+ pdh_blob = kmalloc(input.pdh_cert_len, GFP_KERNEL);
+ if (!pdh_blob) {
+ ret = -ENOMEM;
+ goto e_free;
+ }
+
+ data->pdh_cert_address = __psp_pa(pdh_blob);
+ data->pdh_cert_len = input.pdh_cert_len;
+
+ cert_blob = kmalloc(input.cert_chain_len, GFP_KERNEL);
+ if (!cert_blob) {
+ ret = -ENOMEM;
+ goto e_free_pdh;
+ }
+
+ data->cert_chain_address = __psp_pa(cert_blob);
+ data->cert_chain_len = input.cert_chain_len;
+
+cmd:
+ ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, data, &argp->error);
+
+ /* If we query the length, FW responded with expected data. */
+ input.cert_chain_len = data->cert_chain_len;
+ input.pdh_cert_len = data->pdh_cert_len;
+
+ if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
+ ret = -EFAULT;
+ goto e_free_cert;
+ }
+
+ if (pdh_blob) {
+ if (copy_to_user((void __user *)input.pdh_cert_address,
+ pdh_blob, input.pdh_cert_len)) {
+ ret = -EFAULT;
+ goto e_free_cert;
+ }
+ }
+
+ if (cert_blob) {
+ if (copy_to_user((void __user *)input.cert_chain_address,
+ cert_blob, input.cert_chain_len))
+ ret = -EFAULT;
+ }
+
+e_free_cert:
+ kfree(cert_blob);
+e_free_pdh:
+ kfree(pdh_blob);
+e_free:
+ kfree(data);
+ return ret;
+}
+
+static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ struct sev_issue_cmd input;
+ int ret = -EFAULT;
+
+ if (!psp_master || !psp_master->sev_data)
+ return -ENODEV;
+
+ if (ioctl != SEV_ISSUE_CMD)
+ return -EINVAL;
+
+ if (copy_from_user(&input, argp, sizeof(struct sev_issue_cmd)))
+ return -EFAULT;
+
+ if (input.cmd > SEV_MAX)
+ return -EINVAL;
+
+ mutex_lock(&sev_cmd_mutex);
+
+ switch (input.cmd) {
+
+ case SEV_FACTORY_RESET:
+ ret = sev_ioctl_do_reset(&input);
+ break;
+ case SEV_PLATFORM_STATUS:
+ ret = sev_ioctl_do_platform_status(&input);
+ break;
+ case SEV_PEK_GEN:
+ ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PEK_GEN, &input);
+ break;
+ case SEV_PDH_GEN:
+ ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PDH_GEN, &input);
+ break;
+ case SEV_PEK_CSR:
+ ret = sev_ioctl_do_pek_csr(&input);
+ break;
+ case SEV_PEK_CERT_IMPORT:
+ ret = sev_ioctl_do_pek_import(&input);
+ break;
+ case SEV_PDH_CERT_EXPORT:
+ ret = sev_ioctl_do_pdh_export(&input);
+ break;
+ case SEV_GET_ID:
+ pr_warn_once("SEV_GET_ID command is deprecated, use SEV_GET_ID2\n");
+ ret = sev_ioctl_do_get_id(&input);
+ break;
+ case SEV_GET_ID2:
+ ret = sev_ioctl_do_get_id2(&input);
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd)))
+ ret = -EFAULT;
+out:
+ mutex_unlock(&sev_cmd_mutex);
+
+ return ret;
+}
+
+static const struct file_operations sev_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = sev_ioctl,
+};
+
+int sev_platform_status(struct sev_user_data_status *data, int *error)
+{
+ return sev_do_cmd(SEV_CMD_PLATFORM_STATUS, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_platform_status);
+
+int sev_guest_deactivate(struct sev_data_deactivate *data, int *error)
+{
+ return sev_do_cmd(SEV_CMD_DEACTIVATE, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_guest_deactivate);
+
+int sev_guest_activate(struct sev_data_activate *data, int *error)
+{
+ return sev_do_cmd(SEV_CMD_ACTIVATE, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_guest_activate);
+
+int sev_guest_decommission(struct sev_data_decommission *data, int *error)
+{
+ return sev_do_cmd(SEV_CMD_DECOMMISSION, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_guest_decommission);
+
+int sev_guest_df_flush(int *error)
+{
+ return sev_do_cmd(SEV_CMD_DF_FLUSH, NULL, error);
+}
+EXPORT_SYMBOL_GPL(sev_guest_df_flush);
+
+static void sev_exit(struct kref *ref)
+{
+ struct sev_misc_dev *misc_dev = container_of(ref, struct sev_misc_dev, refcount);
+
+ misc_deregister(&misc_dev->misc);
+}
+
+static int sev_misc_init(struct sev_device *sev)
+{
+ struct device *dev = sev->dev;
+ int ret;
+
+ /*
+ * SEV feature support can be detected on multiple devices but the SEV
+ * FW commands must be issued on the master. During probe, we do not
+ * know the master hence we create /dev/sev on the first device probe.
+ * sev_do_cmd() finds the right master device to which to issue the
+ * command to the firmware.
+ */
+ if (!misc_dev) {
+ struct miscdevice *misc;
+
+ misc_dev = devm_kzalloc(dev, sizeof(*misc_dev), GFP_KERNEL);
+ if (!misc_dev)
+ return -ENOMEM;
+
+ misc = &misc_dev->misc;
+ misc->minor = MISC_DYNAMIC_MINOR;
+ misc->name = DEVICE_NAME;
+ misc->fops = &sev_fops;
+
+ ret = misc_register(misc);
+ if (ret)
+ return ret;
+
+ kref_init(&misc_dev->refcount);
+ } else {
+ kref_get(&misc_dev->refcount);
+ }
+
+ init_waitqueue_head(&sev->int_queue);
+ sev->misc = misc_dev;
+ dev_dbg(dev, "registered SEV device\n");
+
+ return 0;
+}
+
+int sev_dev_init(struct psp_device *psp)
+{
+ struct device *dev = psp->dev;
+ struct sev_device *sev;
+ int ret = -ENOMEM;
+
+ sev = devm_kzalloc(dev, sizeof(*sev), GFP_KERNEL);
+ if (!sev)
+ goto e_err;
+
+ psp->sev_data = sev;
+
+ sev->dev = dev;
+ sev->psp = psp;
+
+ sev->io_regs = psp->io_regs;
+
+ sev->vdata = (struct sev_vdata *)psp->vdata->sev;
+ if (!sev->vdata) {
+ ret = -ENODEV;
+ dev_err(dev, "sev: missing driver data\n");
+ goto e_err;
+ }
+
+ psp_set_sev_irq_handler(psp, sev_irq_handler, sev);
+
+ ret = sev_misc_init(sev);
+ if (ret)
+ goto e_irq;
+
+ dev_notice(dev, "sev enabled\n");
+
+ return 0;
+
+e_irq:
+ psp_clear_sev_irq_handler(psp);
+e_err:
+ psp->sev_data = NULL;
+
+ dev_notice(dev, "sev initialization failed\n");
+
+ return ret;
+}
+
+void sev_dev_destroy(struct psp_device *psp)
+{
+ struct sev_device *sev = psp->sev_data;
+
+ if (!sev)
+ return;
+
+ if (sev->misc)
+ kref_put(&misc_dev->refcount, sev_exit);
+
+ psp_clear_sev_irq_handler(psp);
+}
+
+int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd,
+ void *data, int *error)
+{
+ if (!filep || filep->f_op != &sev_fops)
+ return -EBADF;
+
+ return sev_do_cmd(cmd, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user);
+
+void sev_pci_init(void)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ int error, rc;
+
+ if (!sev)
+ return;
+
+ psp_timeout = psp_probe_timeout;
+
+ if (sev_get_api_version())
+ goto err;
+
+ /*
+ * If platform is not in UNINIT state then firmware upgrade and/or
+ * platform INIT command will fail. These command require UNINIT state.
+ *
+ * In a normal boot we should never run into case where the firmware
+ * is not in UNINIT state on boot. But in case of kexec boot, a reboot
+ * may not go through a typical shutdown sequence and may leave the
+ * firmware in INIT or WORKING state.
+ */
+
+ if (sev->state != SEV_STATE_UNINIT) {
+ sev_platform_shutdown(NULL);
+ sev->state = SEV_STATE_UNINIT;
+ }
+
+ if (sev_version_greater_or_equal(0, 15) &&
+ sev_update_firmware(sev->dev) == 0)
+ sev_get_api_version();
+
+ /* Initialize the platform */
+ rc = sev_platform_init(&error);
+ if (rc && (error == SEV_RET_SECURE_DATA_INVALID)) {
+ /*
+ * INIT command returned an integrity check failure
+ * status code, meaning that firmware load and
+ * validation of SEV related persistent data has
+ * failed and persistent state has been erased.
+ * Retrying INIT command here should succeed.
+ */
+ dev_dbg(sev->dev, "SEV: retrying INIT command");
+ rc = sev_platform_init(&error);
+ }
+
+ if (rc) {
+ dev_err(sev->dev, "SEV: failed to INIT error %#x\n", error);
+ return;
+ }
+
+ dev_info(sev->dev, "SEV API:%d.%d build:%d\n", sev->api_major,
+ sev->api_minor, sev->build);
+
+ return;
+
+err:
+ psp_master->sev_data = NULL;
+}
+
+void sev_pci_exit(void)
+{
+ if (!psp_master->sev_data)
+ return;
+
+ sev_platform_shutdown(NULL);
+}
diff --git a/drivers/crypto/ccp/sev-dev.h b/drivers/crypto/ccp/sev-dev.h
new file mode 100644
index 000000000000..dd5c4fe82914
--- /dev/null
+++ b/drivers/crypto/ccp/sev-dev.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AMD Platform Security Processor (PSP) interface driver
+ *
+ * Copyright (C) 2017-2019 Advanced Micro Devices, Inc.
+ *
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ */
+
+#ifndef __SEV_DEV_H__
+#define __SEV_DEV_H__
+
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/dmapool.h>
+#include <linux/hw_random.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/dmaengine.h>
+#include <linux/psp-sev.h>
+#include <linux/miscdevice.h>
+#include <linux/capability.h>
+
+#define SEV_CMD_COMPLETE BIT(1)
+#define SEV_CMDRESP_CMD_SHIFT 16
+#define SEV_CMDRESP_IOC BIT(0)
+
+struct sev_misc_dev {
+ struct kref refcount;
+ struct miscdevice misc;
+};
+
+struct sev_device {
+ struct device *dev;
+ struct psp_device *psp;
+
+ void __iomem *io_regs;
+
+ struct sev_vdata *vdata;
+
+ int state;
+ unsigned int int_rcvd;
+ wait_queue_head_t int_queue;
+ struct sev_misc_dev *misc;
+ struct sev_user_data_status status_cmd_buf;
+ struct sev_data_init init_cmd_buf;
+
+ u8 api_major;
+ u8 api_minor;
+ u8 build;
+};
+
+int sev_dev_init(struct psp_device *psp);
+void sev_dev_destroy(struct psp_device *psp);
+
+void sev_pci_init(void);
+void sev_pci_exit(void);
+
+#endif /* __SEV_DEV_H */
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
index 53c12562d31e..423594608ad1 100644
--- a/drivers/crypto/ccp/sp-dev.h
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -2,7 +2,7 @@
/*
* AMD Secure Processor driver
*
- * Copyright (C) 2017-2018 Advanced Micro Devices, Inc.
+ * Copyright (C) 2017-2019 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
@@ -39,10 +39,23 @@ struct ccp_vdata {
const unsigned int rsamax;
};
-struct psp_vdata {
+struct sev_vdata {
const unsigned int cmdresp_reg;
const unsigned int cmdbuff_addr_lo_reg;
const unsigned int cmdbuff_addr_hi_reg;
+};
+
+struct tee_vdata {
+ const unsigned int cmdresp_reg;
+ const unsigned int cmdbuff_addr_lo_reg;
+ const unsigned int cmdbuff_addr_hi_reg;
+ const unsigned int ring_wptr_reg;
+ const unsigned int ring_rptr_reg;
+};
+
+struct psp_vdata {
+ const struct sev_vdata *sev;
+ const struct tee_vdata *tee;
const unsigned int feature_reg;
const unsigned int inten_reg;
const unsigned int intsts_reg;
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index b29d2e663e10..56c1f61c0f84 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -2,7 +2,7 @@
/*
* AMD Secure Processor device driver
*
- * Copyright (C) 2013,2018 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2019 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
@@ -262,19 +262,42 @@ static int sp_pci_resume(struct pci_dev *pdev)
#endif
#ifdef CONFIG_CRYPTO_DEV_SP_PSP
-static const struct psp_vdata pspv1 = {
+static const struct sev_vdata sevv1 = {
.cmdresp_reg = 0x10580,
.cmdbuff_addr_lo_reg = 0x105e0,
.cmdbuff_addr_hi_reg = 0x105e4,
+};
+
+static const struct sev_vdata sevv2 = {
+ .cmdresp_reg = 0x10980,
+ .cmdbuff_addr_lo_reg = 0x109e0,
+ .cmdbuff_addr_hi_reg = 0x109e4,
+};
+
+static const struct tee_vdata teev1 = {
+ .cmdresp_reg = 0x10544,
+ .cmdbuff_addr_lo_reg = 0x10548,
+ .cmdbuff_addr_hi_reg = 0x1054c,
+ .ring_wptr_reg = 0x10550,
+ .ring_rptr_reg = 0x10554,
+};
+
+static const struct psp_vdata pspv1 = {
+ .sev = &sevv1,
.feature_reg = 0x105fc,
.inten_reg = 0x10610,
.intsts_reg = 0x10614,
};
static const struct psp_vdata pspv2 = {
- .cmdresp_reg = 0x10980,
- .cmdbuff_addr_lo_reg = 0x109e0,
- .cmdbuff_addr_hi_reg = 0x109e4,
+ .sev = &sevv2,
+ .feature_reg = 0x109fc,
+ .inten_reg = 0x10690,
+ .intsts_reg = 0x10694,
+};
+
+static const struct psp_vdata pspv3 = {
+ .tee = &teev1,
.feature_reg = 0x109fc,
.inten_reg = 0x10690,
.intsts_reg = 0x10694,
@@ -312,12 +335,22 @@ static const struct sp_dev_vdata dev_vdata[] = {
.psp_vdata = &pspv2,
#endif
},
+ { /* 4 */
+ .bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+ .ccp_vdata = &ccpv5a,
+#endif
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+ .psp_vdata = &pspv3,
+#endif
+ },
};
static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&dev_vdata[0] },
{ PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&dev_vdata[1] },
{ PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] },
{ PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] },
+ { PCI_VDEVICE(AMD, 0x15DF), (kernel_ulong_t)&dev_vdata[4] },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/crypto/ccp/tee-dev.c b/drivers/crypto/ccp/tee-dev.c
new file mode 100644
index 000000000000..5e697a90ea7f
--- /dev/null
+++ b/drivers/crypto/ccp/tee-dev.c
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: MIT
+/*
+ * AMD Trusted Execution Environment (TEE) interface
+ *
+ * Author: Rijo Thomas <Rijo-john.Thomas@amd.com>
+ * Author: Devaraj Rangasamy <Devaraj.Rangasamy@amd.com>
+ *
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/gfp.h>
+#include <linux/psp-sev.h>
+#include <linux/psp-tee.h>
+
+#include "psp-dev.h"
+#include "tee-dev.h"
+
+static bool psp_dead;
+
+static int tee_alloc_ring(struct psp_tee_device *tee, int ring_size)
+{
+ struct ring_buf_manager *rb_mgr = &tee->rb_mgr;
+ void *start_addr;
+
+ if (!ring_size)
+ return -EINVAL;
+
+ /* We need actual physical address instead of DMA address, since
+ * Trusted OS running on AMD Secure Processor will map this region
+ */
+ start_addr = (void *)__get_free_pages(GFP_KERNEL, get_order(ring_size));
+ if (!start_addr)
+ return -ENOMEM;
+
+ rb_mgr->ring_start = start_addr;
+ rb_mgr->ring_size = ring_size;
+ rb_mgr->ring_pa = __psp_pa(start_addr);
+ mutex_init(&rb_mgr->mutex);
+
+ return 0;
+}
+
+static void tee_free_ring(struct psp_tee_device *tee)
+{
+ struct ring_buf_manager *rb_mgr = &tee->rb_mgr;
+
+ if (!rb_mgr->ring_start)
+ return;
+
+ free_pages((unsigned long)rb_mgr->ring_start,
+ get_order(rb_mgr->ring_size));
+
+ rb_mgr->ring_start = NULL;
+ rb_mgr->ring_size = 0;
+ rb_mgr->ring_pa = 0;
+ mutex_destroy(&rb_mgr->mutex);
+}
+
+static int tee_wait_cmd_poll(struct psp_tee_device *tee, unsigned int timeout,
+ unsigned int *reg)
+{
+ /* ~10ms sleep per loop => nloop = timeout * 100 */
+ int nloop = timeout * 100;
+
+ while (--nloop) {
+ *reg = ioread32(tee->io_regs + tee->vdata->cmdresp_reg);
+ if (*reg & PSP_CMDRESP_RESP)
+ return 0;
+
+ usleep_range(10000, 10100);
+ }
+
+ dev_err(tee->dev, "tee: command timed out, disabling PSP\n");
+ psp_dead = true;
+
+ return -ETIMEDOUT;
+}
+
+static
+struct tee_init_ring_cmd *tee_alloc_cmd_buffer(struct psp_tee_device *tee)
+{
+ struct tee_init_ring_cmd *cmd;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return NULL;
+
+ cmd->hi_addr = upper_32_bits(tee->rb_mgr.ring_pa);
+ cmd->low_addr = lower_32_bits(tee->rb_mgr.ring_pa);
+ cmd->size = tee->rb_mgr.ring_size;
+
+ dev_dbg(tee->dev, "tee: ring address: high = 0x%x low = 0x%x size = %u\n",
+ cmd->hi_addr, cmd->low_addr, cmd->size);
+
+ return cmd;
+}
+
+static inline void tee_free_cmd_buffer(struct tee_init_ring_cmd *cmd)
+{
+ kfree(cmd);
+}
+
+static int tee_init_ring(struct psp_tee_device *tee)
+{
+ int ring_size = MAX_RING_BUFFER_ENTRIES * sizeof(struct tee_ring_cmd);
+ struct tee_init_ring_cmd *cmd;
+ phys_addr_t cmd_buffer;
+ unsigned int reg;
+ int ret;
+
+ BUILD_BUG_ON(sizeof(struct tee_ring_cmd) != 1024);
+
+ ret = tee_alloc_ring(tee, ring_size);
+ if (ret) {
+ dev_err(tee->dev, "tee: ring allocation failed %d\n", ret);
+ return ret;
+ }
+
+ tee->rb_mgr.wptr = 0;
+
+ cmd = tee_alloc_cmd_buffer(tee);
+ if (!cmd) {
+ tee_free_ring(tee);
+ return -ENOMEM;
+ }
+
+ cmd_buffer = __psp_pa((void *)cmd);
+
+ /* Send command buffer details to Trusted OS by writing to
+ * CPU-PSP message registers
+ */
+
+ iowrite32(lower_32_bits(cmd_buffer),
+ tee->io_regs + tee->vdata->cmdbuff_addr_lo_reg);
+ iowrite32(upper_32_bits(cmd_buffer),
+ tee->io_regs + tee->vdata->cmdbuff_addr_hi_reg);
+ iowrite32(TEE_RING_INIT_CMD,
+ tee->io_regs + tee->vdata->cmdresp_reg);
+
+ ret = tee_wait_cmd_poll(tee, TEE_DEFAULT_TIMEOUT, &reg);
+ if (ret) {
+ dev_err(tee->dev, "tee: ring init command timed out\n");
+ tee_free_ring(tee);
+ goto free_buf;
+ }
+
+ if (reg & PSP_CMDRESP_ERR_MASK) {
+ dev_err(tee->dev, "tee: ring init command failed (%#010x)\n",
+ reg & PSP_CMDRESP_ERR_MASK);
+ tee_free_ring(tee);
+ ret = -EIO;
+ }
+
+free_buf:
+ tee_free_cmd_buffer(cmd);
+
+ return ret;
+}
+
+static void tee_destroy_ring(struct psp_tee_device *tee)
+{
+ unsigned int reg;
+ int ret;
+
+ if (!tee->rb_mgr.ring_start)
+ return;
+
+ if (psp_dead)
+ goto free_ring;
+
+ iowrite32(TEE_RING_DESTROY_CMD,
+ tee->io_regs + tee->vdata->cmdresp_reg);
+
+ ret = tee_wait_cmd_poll(tee, TEE_DEFAULT_TIMEOUT, &reg);
+ if (ret) {
+ dev_err(tee->dev, "tee: ring destroy command timed out\n");
+ } else if (reg & PSP_CMDRESP_ERR_MASK) {
+ dev_err(tee->dev, "tee: ring destroy command failed (%#010x)\n",
+ reg & PSP_CMDRESP_ERR_MASK);
+ }
+
+free_ring:
+ tee_free_ring(tee);
+}
+
+int tee_dev_init(struct psp_device *psp)
+{
+ struct device *dev = psp->dev;
+ struct psp_tee_device *tee;
+ int ret;
+
+ ret = -ENOMEM;
+ tee = devm_kzalloc(dev, sizeof(*tee), GFP_KERNEL);
+ if (!tee)
+ goto e_err;
+
+ psp->tee_data = tee;
+
+ tee->dev = dev;
+ tee->psp = psp;
+
+ tee->io_regs = psp->io_regs;
+
+ tee->vdata = (struct tee_vdata *)psp->vdata->tee;
+ if (!tee->vdata) {
+ ret = -ENODEV;
+ dev_err(dev, "tee: missing driver data\n");
+ goto e_err;
+ }
+
+ ret = tee_init_ring(tee);
+ if (ret) {
+ dev_err(dev, "tee: failed to init ring buffer\n");
+ goto e_err;
+ }
+
+ dev_notice(dev, "tee enabled\n");
+
+ return 0;
+
+e_err:
+ psp->tee_data = NULL;
+
+ dev_notice(dev, "tee initialization failed\n");
+
+ return ret;
+}
+
+void tee_dev_destroy(struct psp_device *psp)
+{
+ struct psp_tee_device *tee = psp->tee_data;
+
+ if (!tee)
+ return;
+
+ tee_destroy_ring(tee);
+}
+
+static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id,
+ void *buf, size_t len, struct tee_ring_cmd **resp)
+{
+ struct tee_ring_cmd *cmd;
+ u32 rptr, wptr;
+ int nloop = 1000, ret = 0;
+
+ *resp = NULL;
+
+ mutex_lock(&tee->rb_mgr.mutex);
+
+ wptr = tee->rb_mgr.wptr;
+
+ /* Check if ring buffer is full */
+ do {
+ rptr = ioread32(tee->io_regs + tee->vdata->ring_rptr_reg);
+
+ if (!(wptr + sizeof(struct tee_ring_cmd) == rptr))
+ break;
+
+ dev_info(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
+ rptr, wptr);
+
+ /* Wait if ring buffer is full */
+ mutex_unlock(&tee->rb_mgr.mutex);
+ schedule_timeout_interruptible(msecs_to_jiffies(10));
+ mutex_lock(&tee->rb_mgr.mutex);
+
+ } while (--nloop);
+
+ if (!nloop && (wptr + sizeof(struct tee_ring_cmd) == rptr)) {
+ dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
+ rptr, wptr);
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ /* Pointer to empty data entry in ring buffer */
+ cmd = (struct tee_ring_cmd *)(tee->rb_mgr.ring_start + wptr);
+
+ /* Write command data into ring buffer */
+ cmd->cmd_id = cmd_id;
+ cmd->cmd_state = TEE_CMD_STATE_INIT;
+ memset(&cmd->buf[0], 0, sizeof(cmd->buf));
+ memcpy(&cmd->buf[0], buf, len);
+
+ /* Update local copy of write pointer */
+ tee->rb_mgr.wptr += sizeof(struct tee_ring_cmd);
+ if (tee->rb_mgr.wptr >= tee->rb_mgr.ring_size)
+ tee->rb_mgr.wptr = 0;
+
+ /* Trigger interrupt to Trusted OS */
+ iowrite32(tee->rb_mgr.wptr, tee->io_regs + tee->vdata->ring_wptr_reg);
+
+ /* The response is provided by Trusted OS in same
+ * location as submitted data entry within ring buffer.
+ */
+ *resp = cmd;
+
+unlock:
+ mutex_unlock(&tee->rb_mgr.mutex);
+
+ return ret;
+}
+
+static int tee_wait_cmd_completion(struct psp_tee_device *tee,
+ struct tee_ring_cmd *resp,
+ unsigned int timeout)
+{
+ /* ~5ms sleep per loop => nloop = timeout * 200 */
+ int nloop = timeout * 200;
+
+ while (--nloop) {
+ if (resp->cmd_state == TEE_CMD_STATE_COMPLETED)
+ return 0;
+
+ usleep_range(5000, 5100);
+ }
+
+ dev_err(tee->dev, "tee: command 0x%x timed out, disabling PSP\n",
+ resp->cmd_id);
+
+ psp_dead = true;
+
+ return -ETIMEDOUT;
+}
+
+int psp_tee_process_cmd(enum tee_cmd_id cmd_id, void *buf, size_t len,
+ u32 *status)
+{
+ struct psp_device *psp = psp_get_master_device();
+ struct psp_tee_device *tee;
+ struct tee_ring_cmd *resp;
+ int ret;
+
+ if (!buf || !status || !len || len > sizeof(resp->buf))
+ return -EINVAL;
+
+ *status = 0;
+
+ if (!psp || !psp->tee_data)
+ return -ENODEV;
+
+ if (psp_dead)
+ return -EBUSY;
+
+ tee = psp->tee_data;
+
+ ret = tee_submit_cmd(tee, cmd_id, buf, len, &resp);
+ if (ret)
+ return ret;
+
+ ret = tee_wait_cmd_completion(tee, resp, TEE_DEFAULT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ memcpy(buf, &resp->buf[0], len);
+ *status = resp->status;
+
+ return 0;
+}
+EXPORT_SYMBOL(psp_tee_process_cmd);
+
+int psp_check_tee_status(void)
+{
+ struct psp_device *psp = psp_get_master_device();
+
+ if (!psp || !psp->tee_data)
+ return -ENODEV;
+
+ return 0;
+}
+EXPORT_SYMBOL(psp_check_tee_status);
diff --git a/drivers/crypto/ccp/tee-dev.h b/drivers/crypto/ccp/tee-dev.h
new file mode 100644
index 000000000000..f09960112115
--- /dev/null
+++ b/drivers/crypto/ccp/tee-dev.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Author: Rijo Thomas <Rijo-john.Thomas@amd.com>
+ * Author: Devaraj Rangasamy <Devaraj.Rangasamy@amd.com>
+ *
+ */
+
+/* This file describes the TEE communication interface between host and AMD
+ * Secure Processor
+ */
+
+#ifndef __TEE_DEV_H__
+#define __TEE_DEV_H__
+
+#include <linux/device.h>
+#include <linux/mutex.h>
+
+#define TEE_DEFAULT_TIMEOUT 10
+#define MAX_BUFFER_SIZE 992
+
+/**
+ * enum tee_ring_cmd_id - TEE interface commands for ring buffer configuration
+ * @TEE_RING_INIT_CMD: Initialize ring buffer
+ * @TEE_RING_DESTROY_CMD: Destroy ring buffer
+ * @TEE_RING_MAX_CMD: Maximum command id
+ */
+enum tee_ring_cmd_id {
+ TEE_RING_INIT_CMD = 0x00010000,
+ TEE_RING_DESTROY_CMD = 0x00020000,
+ TEE_RING_MAX_CMD = 0x000F0000,
+};
+
+/**
+ * struct tee_init_ring_cmd - Command to init TEE ring buffer
+ * @low_addr: bits [31:0] of the physical address of ring buffer
+ * @hi_addr: bits [63:32] of the physical address of ring buffer
+ * @size: size of ring buffer in bytes
+ */
+struct tee_init_ring_cmd {
+ u32 low_addr;
+ u32 hi_addr;
+ u32 size;
+};
+
+#define MAX_RING_BUFFER_ENTRIES 32
+
+/**
+ * struct ring_buf_manager - Helper structure to manage ring buffer.
+ * @ring_start: starting address of ring buffer
+ * @ring_size: size of ring buffer in bytes
+ * @ring_pa: physical address of ring buffer
+ * @wptr: index to the last written entry in ring buffer
+ */
+struct ring_buf_manager {
+ struct mutex mutex; /* synchronizes access to ring buffer */
+ void *ring_start;
+ u32 ring_size;
+ phys_addr_t ring_pa;
+ u32 wptr;
+};
+
+struct psp_tee_device {
+ struct device *dev;
+ struct psp_device *psp;
+ void __iomem *io_regs;
+ struct tee_vdata *vdata;
+ struct ring_buf_manager rb_mgr;
+};
+
+/**
+ * enum tee_cmd_state - TEE command states for the ring buffer interface
+ * @TEE_CMD_STATE_INIT: initial state of command when sent from host
+ * @TEE_CMD_STATE_PROCESS: command being processed by TEE environment
+ * @TEE_CMD_STATE_COMPLETED: command processing completed
+ */
+enum tee_cmd_state {
+ TEE_CMD_STATE_INIT,
+ TEE_CMD_STATE_PROCESS,
+ TEE_CMD_STATE_COMPLETED,
+};
+
+/**
+ * struct tee_ring_cmd - Structure of the command buffer in TEE ring
+ * @cmd_id: refers to &enum tee_cmd_id. Command id for the ring buffer
+ * interface
+ * @cmd_state: refers to &enum tee_cmd_state
+ * @status: status of TEE command execution
+ * @res0: reserved region
+ * @pdata: private data (currently unused)
+ * @res1: reserved region
+ * @buf: TEE command specific buffer
+ */
+struct tee_ring_cmd {
+ u32 cmd_id;
+ u32 cmd_state;
+ u32 status;
+ u32 res0[1];
+ u64 pdata;
+ u32 res1[2];
+ u8 buf[MAX_BUFFER_SIZE];
+
+ /* Total size: 1024 bytes */
+} __packed;
+
+int tee_dev_init(struct psp_device *psp);
+void tee_dev_destroy(struct psp_device *psp);
+
+#endif /* __TEE_DEV_H__ */
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index 64d318dc0d47..2fc0e0da790b 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -237,7 +237,7 @@ static void cc_aead_complete(struct device *dev, void *cc_req, int err)
* revealed the decrypted message --> zero its memory.
*/
sg_zero_buffer(areq->dst, sg_nents(areq->dst),
- areq->cryptlen, 0);
+ areq->cryptlen, areq->assoclen);
err = -EBADMSG;
}
/*ENCRYPT*/
@@ -385,13 +385,13 @@ static int validate_keys_sizes(struct cc_aead_ctx *ctx)
return -EINVAL;
break;
default:
- dev_err(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
+ dev_dbg(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
return -EINVAL;
}
/* Check cipher key size */
if (ctx->flow_mode == S_DIN_to_DES) {
if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
- dev_err(dev, "Invalid cipher(3DES) key size: %u\n",
+ dev_dbg(dev, "Invalid cipher(3DES) key size: %u\n",
ctx->enc_keylen);
return -EINVAL;
}
@@ -399,7 +399,7 @@ static int validate_keys_sizes(struct cc_aead_ctx *ctx)
if (ctx->enc_keylen != AES_KEYSIZE_128 &&
ctx->enc_keylen != AES_KEYSIZE_192 &&
ctx->enc_keylen != AES_KEYSIZE_256) {
- dev_err(dev, "Invalid cipher(AES) key size: %u\n",
+ dev_dbg(dev, "Invalid cipher(AES) key size: %u\n",
ctx->enc_keylen);
return -EINVAL;
}
@@ -562,7 +562,7 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
rc = crypto_authenc_extractkeys(&keys, key, keylen);
if (rc)
- goto badkey;
+ return rc;
enckey = keys.enckey;
authkey = keys.authkey;
ctx->enc_keylen = keys.enckeylen;
@@ -570,10 +570,9 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
if (ctx->cipher_mode == DRV_CIPHER_CTR) {
/* the nonce is stored in bytes at end of key */
- rc = -EINVAL;
if (ctx->enc_keylen <
(AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
- goto badkey;
+ return -EINVAL;
/* Copy nonce from last 4 bytes in CTR key to
* first 4 bytes in CTR IV
*/
@@ -591,7 +590,7 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
rc = validate_keys_sizes(ctx);
if (rc)
- goto badkey;
+ return rc;
/* STAT_PHASE_1: Copy key to ctx */
@@ -605,7 +604,7 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
if (rc)
- goto badkey;
+ return rc;
}
/* STAT_PHASE_2: Create sequence */
@@ -622,8 +621,7 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
break; /* No auth. key setup */
default:
dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
- rc = -ENOTSUPP;
- goto badkey;
+ return -ENOTSUPP;
}
/* STAT_PHASE_3: Submit sequence to HW */
@@ -632,18 +630,12 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len);
if (rc) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- goto setkey_error;
+ return rc;
}
}
/* Update STAT_PHASE_3 */
return rc;
-
-badkey:
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
-
-setkey_error:
- return rc;
}
static int cc_des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
@@ -1567,7 +1559,7 @@ static int config_ccm_adata(struct aead_request *req)
/* taken from crypto/ccm.c */
/* 2 <= L <= 8, so 1 <= L' <= 7. */
if (l < 2 || l > 8) {
- dev_err(dev, "illegal iv value %X\n", req->iv[0]);
+ dev_dbg(dev, "illegal iv value %X\n", req->iv[0]);
return -EINVAL;
}
memcpy(b0, req->iv, AES_BLOCK_SIZE);
@@ -1925,7 +1917,6 @@ static int cc_proc_aead(struct aead_request *req,
if (validate_data_size(ctx, direct, req)) {
dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
req->cryptlen, areq_ctx->assoclen);
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
return -EINVAL;
}
@@ -2065,7 +2056,7 @@ static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
@@ -2115,7 +2106,7 @@ static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
@@ -2234,7 +2225,7 @@ static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
@@ -2265,7 +2256,7 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
@@ -2299,7 +2290,7 @@ static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
@@ -2330,7 +2321,7 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index 3112b58d0bb1..7d6252d892d7 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -291,7 +291,6 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
/* This check the size of the protected key token */
if (keylen != sizeof(hki)) {
dev_err(dev, "Unsupported protected key size %d.\n", keylen);
- crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -303,8 +302,7 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
keylen = hki.keylen;
if (validate_keys_sizes(ctx_p, keylen)) {
- dev_err(dev, "Unsupported key size %d.\n", keylen);
- crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ dev_dbg(dev, "Unsupported key size %d.\n", keylen);
return -EINVAL;
}
@@ -394,8 +392,7 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
/* STAT_PHASE_0: Init and sanity checks */
if (validate_keys_sizes(ctx_p, keylen)) {
- dev_err(dev, "Unsupported key size %d.\n", keylen);
- crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ dev_dbg(dev, "Unsupported key size %d.\n", keylen);
return -EINVAL;
}
@@ -523,6 +520,7 @@ static void cc_setup_readiv_desc(struct crypto_tfm *tfm,
}
}
+
static void cc_setup_state_desc(struct crypto_tfm *tfm,
struct cipher_req_ctx *req_ctx,
unsigned int ivsize, unsigned int nbytes,
@@ -534,8 +532,6 @@ static void cc_setup_state_desc(struct crypto_tfm *tfm,
int cipher_mode = ctx_p->cipher_mode;
int flow_mode = ctx_p->flow_mode;
int direction = req_ctx->gen_ctx.op_type;
- dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
- unsigned int key_len = ctx_p->keylen;
dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
unsigned int du_size = nbytes;
@@ -571,6 +567,47 @@ static void cc_setup_state_desc(struct crypto_tfm *tfm,
case DRV_CIPHER_XTS:
case DRV_CIPHER_ESSIV:
case DRV_CIPHER_BITLOCKER:
+ break;
+ default:
+ dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
+ }
+}
+
+
+static void cc_setup_xex_state_desc(struct crypto_tfm *tfm,
+ struct cipher_req_ctx *req_ctx,
+ unsigned int ivsize, unsigned int nbytes,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+ int cipher_mode = ctx_p->cipher_mode;
+ int flow_mode = ctx_p->flow_mode;
+ int direction = req_ctx->gen_ctx.op_type;
+ dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
+ unsigned int key_len = ctx_p->keylen;
+ dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
+ unsigned int du_size = nbytes;
+
+ struct cc_crypto_alg *cc_alg =
+ container_of(tfm->__crt_alg, struct cc_crypto_alg,
+ skcipher_alg.base);
+
+ if (cc_alg->data_unit)
+ du_size = cc_alg->data_unit;
+
+ switch (cipher_mode) {
+ case DRV_CIPHER_ECB:
+ break;
+ case DRV_CIPHER_CBC:
+ case DRV_CIPHER_CBC_CTS:
+ case DRV_CIPHER_CTR:
+ case DRV_CIPHER_OFB:
+ break;
+ case DRV_CIPHER_XTS:
+ case DRV_CIPHER_ESSIV:
+ case DRV_CIPHER_BITLOCKER:
/* load XEX key */
hw_desc_init(&desc[*seq_size]);
set_cipher_mode(&desc[*seq_size], cipher_mode);
@@ -836,8 +873,7 @@ static int cc_cipher_process(struct skcipher_request *req,
/* TODO: check data length according to mode */
if (validate_data_size(ctx_p, nbytes)) {
- dev_err(dev, "Unsupported data size %d.\n", nbytes);
- crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
+ dev_dbg(dev, "Unsupported data size %d.\n", nbytes);
rc = -EINVAL;
goto exit_process;
}
@@ -881,12 +917,14 @@ static int cc_cipher_process(struct skcipher_request *req,
/* STAT_PHASE_2: Create sequence */
- /* Setup IV and XEX key used */
+ /* Setup state (IV) */
cc_setup_state_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
/* Setup MLLI line, if needed */
cc_setup_mlli_desc(tfm, req_ctx, dst, src, nbytes, req, desc, &seq_len);
/* Setup key */
cc_setup_key_desc(tfm, req_ctx, nbytes, desc, &seq_len);
+ /* Setup state (IV and XEX key) */
+ cc_setup_xex_state_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
/* Data processing */
cc_setup_flow_desc(tfm, req_ctx, dst, src, nbytes, desc, &seq_len);
/* Read next IV */
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 8b8eee513c27..532bc95a8373 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -133,7 +133,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
u32 imr;
/* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */
- /* if driver suspended return, probebly shared interrupt */
+ /* if driver suspended return, probably shared interrupt */
if (cc_pm_is_dev_suspended(dev))
return IRQ_NONE;
@@ -271,6 +271,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
const struct cc_hw_data *hw_rev;
const struct of_device_id *dev_id;
struct clk *clk;
+ int irq;
int rc = 0;
new_drvdata = devm_kzalloc(dev, sizeof(*new_drvdata), GFP_KERNEL);
@@ -337,9 +338,9 @@ static int init_cc_resources(struct platform_device *plat_dev)
&req_mem_cc_regs->start, new_drvdata->cc_base);
/* Then IRQ */
- new_drvdata->irq = platform_get_irq(plat_dev, 0);
- if (new_drvdata->irq < 0)
- return new_drvdata->irq;
+ irq = platform_get_irq(plat_dev, 0);
+ if (irq < 0)
+ return irq;
init_completion(&new_drvdata->hw_queue_avail);
@@ -442,14 +443,13 @@ static int init_cc_resources(struct platform_device *plat_dev)
dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X/0x%8X, Driver version %s\n",
hw_rev->name, hw_rev_pidr, sig_cidr, DRV_MODULE_VERSION);
/* register the driver isr function */
- rc = devm_request_irq(dev, new_drvdata->irq, cc_isr,
- IRQF_SHARED, "ccree", new_drvdata);
+ rc = devm_request_irq(dev, irq, cc_isr, IRQF_SHARED, "ccree",
+ new_drvdata);
if (rc) {
- dev_err(dev, "Could not register to interrupt %d\n",
- new_drvdata->irq);
+ dev_err(dev, "Could not register to interrupt %d\n", irq);
goto post_clk_err;
}
- dev_dbg(dev, "Registered to IRQ: %d\n", new_drvdata->irq);
+ dev_dbg(dev, "Registered to IRQ: %d\n", irq);
rc = init_cc_regs(new_drvdata, true);
if (rc) {
@@ -465,7 +465,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
rc = cc_fips_init(new_drvdata);
if (rc) {
- dev_err(dev, "CC_FIPS_INIT failed 0x%x\n", rc);
+ dev_err(dev, "cc_fips_init failed 0x%x\n", rc);
goto post_debugfs_err;
}
rc = cc_sram_mgr_init(new_drvdata);
@@ -490,13 +490,13 @@ static int init_cc_resources(struct platform_device *plat_dev)
rc = cc_buffer_mgr_init(new_drvdata);
if (rc) {
- dev_err(dev, "buffer_mgr_init failed\n");
+ dev_err(dev, "cc_buffer_mgr_init failed\n");
goto post_req_mgr_err;
}
rc = cc_pm_init(new_drvdata);
if (rc) {
- dev_err(dev, "ssi_power_mgr_init failed\n");
+ dev_err(dev, "cc_pm_init failed\n");
goto post_buf_mgr_err;
}
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index ab31d4a68c80..c227718ba992 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -28,7 +28,6 @@
/* Registers definitions from shared/hw/ree_include */
#include "cc_host_regs.h"
-#define CC_DEV_SHA_MAX 512
#include "cc_crypto_ctx.h"
#include "cc_hw_queue_defs.h"
#include "cc_sram_mgr.h"
@@ -133,13 +132,11 @@ struct cc_crypto_req {
/**
* struct cc_drvdata - driver private data context
* @cc_base: virt address of the CC registers
- * @irq: device IRQ number
- * @irq_mask: Interrupt mask shadow (1 for masked interrupts)
+ * @irq: bitmap indicating source of last interrupt
*/
struct cc_drvdata {
void __iomem *cc_base;
int irq;
- u32 irq_mask;
struct completion hw_queue_avail; /* wait for HW queue availability */
struct platform_device *plat_dev;
cc_sram_addr_t mlli_sram_addr;
@@ -161,6 +158,7 @@ struct cc_drvdata {
int std_bodies;
bool sec_disabled;
u32 comp_mask;
+ bool pm_on;
};
struct cc_crypto_alg {
diff --git a/drivers/crypto/ccree/cc_fips.c b/drivers/crypto/ccree/cc_fips.c
index 4c8bce33abcf..702aefc21447 100644
--- a/drivers/crypto/ccree/cc_fips.c
+++ b/drivers/crypto/ccree/cc_fips.c
@@ -120,7 +120,7 @@ static void fips_dsr(unsigned long devarg)
cc_tee_handle_fips_error(drvdata);
}
- /* after verifing that there is nothing to do,
+ /* after verifying that there is nothing to do,
* unmask AXI completion interrupt.
*/
val = (CC_REG(HOST_IMR) & ~irq);
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index bc71bdf44a9f..912e5ce5079d 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -899,9 +899,6 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
out:
- if (rc)
- crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
-
if (ctx->key_params.key_dma_addr) {
dma_unmap_single(dev, ctx->key_params.key_dma_addr,
ctx->key_params.keylen, DMA_TO_DEVICE);
@@ -990,9 +987,6 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash,
rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
- if (rc)
- crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
-
dma_unmap_single(dev, ctx->key_params.key_dma_addr,
ctx->key_params.keylen, DMA_TO_DEVICE);
dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
@@ -2358,11 +2352,9 @@ cc_digest_len_addr(void *drvdata, u32 mode)
case DRV_HASH_SHA256:
case DRV_HASH_MD5:
return digest_len_addr;
-#if (CC_DEV_SHA_MAX > 256)
case DRV_HASH_SHA384:
case DRV_HASH_SHA512:
return digest_len_addr + sizeof(cc_digest_len_init);
-#endif
default:
return digest_len_addr; /*to avoid kernel crash*/
}
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index dbc508fb719b..24c368b866f6 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -22,14 +22,8 @@ const struct dev_pm_ops ccree_pm = {
int cc_pm_suspend(struct device *dev)
{
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
- int rc;
dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
- rc = cc_suspend_req_queue(drvdata);
- if (rc) {
- dev_err(dev, "cc_suspend_req_queue (%x)\n", rc);
- return rc;
- }
fini_cc_regs(drvdata);
cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
cc_clk_off(drvdata);
@@ -48,7 +42,7 @@ int cc_pm_resume(struct device *dev)
dev_err(dev, "failed getting clock back on. We're toast.\n");
return rc;
}
- /* wait for Crytpcell reset completion */
+ /* wait for Cryptocell reset completion */
if (!cc_wait_for_reset_completion(drvdata)) {
dev_err(dev, "Cryptocell reset not completed");
return -EBUSY;
@@ -63,13 +57,6 @@ int cc_pm_resume(struct device *dev)
/* check if tee fips error occurred during power down */
cc_tee_handle_fips_error(drvdata);
- rc = cc_resume_req_queue(drvdata);
- if (rc) {
- dev_err(dev, "cc_resume_req_queue (%x)\n", rc);
- return rc;
- }
-
- /* must be after the queue resuming as it uses the HW queue*/
cc_init_hash_sram(drvdata);
return 0;
@@ -80,28 +67,20 @@ int cc_pm_get(struct device *dev)
int rc = 0;
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
- if (cc_req_queue_suspended(drvdata))
+ if (drvdata->pm_on)
rc = pm_runtime_get_sync(dev);
- else
- pm_runtime_get_noresume(dev);
- return rc;
+ return (rc == 1 ? 0 : rc);
}
-int cc_pm_put_suspend(struct device *dev)
+void cc_pm_put_suspend(struct device *dev)
{
- int rc = 0;
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
- if (!cc_req_queue_suspended(drvdata)) {
+ if (drvdata->pm_on) {
pm_runtime_mark_last_busy(dev);
- rc = pm_runtime_put_autosuspend(dev);
- } else {
- /* Something wrong happens*/
- dev_err(dev, "request to suspend already suspended queue");
- rc = -EBUSY;
+ pm_runtime_put_autosuspend(dev);
}
- return rc;
}
bool cc_pm_is_dev_suspended(struct device *dev)
@@ -114,10 +93,10 @@ int cc_pm_init(struct cc_drvdata *drvdata)
{
struct device *dev = drvdata_to_dev(drvdata);
- /* must be before the enabling to avoid resdundent suspending */
+ /* must be before the enabling to avoid redundant suspending */
pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(dev);
- /* activate the PM module */
+ /* set us as active - note we won't do PM ops until cc_pm_go()! */
return pm_runtime_set_active(dev);
}
@@ -125,9 +104,11 @@ int cc_pm_init(struct cc_drvdata *drvdata)
void cc_pm_go(struct cc_drvdata *drvdata)
{
pm_runtime_enable(drvdata_to_dev(drvdata));
+ drvdata->pm_on = true;
}
void cc_pm_fini(struct cc_drvdata *drvdata)
{
pm_runtime_disable(drvdata_to_dev(drvdata));
+ drvdata->pm_on = false;
}
diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h
index a7d98a5da2e1..80a18e11cae4 100644
--- a/drivers/crypto/ccree/cc_pm.h
+++ b/drivers/crypto/ccree/cc_pm.h
@@ -21,7 +21,7 @@ void cc_pm_fini(struct cc_drvdata *drvdata);
int cc_pm_suspend(struct device *dev);
int cc_pm_resume(struct device *dev);
int cc_pm_get(struct device *dev);
-int cc_pm_put_suspend(struct device *dev);
+void cc_pm_put_suspend(struct device *dev);
bool cc_pm_is_dev_suspended(struct device *dev);
#else
@@ -35,25 +35,12 @@ static inline void cc_pm_go(struct cc_drvdata *drvdata) {}
static inline void cc_pm_fini(struct cc_drvdata *drvdata) {}
-static inline int cc_pm_suspend(struct device *dev)
-{
- return 0;
-}
-
-static inline int cc_pm_resume(struct device *dev)
-{
- return 0;
-}
-
static inline int cc_pm_get(struct device *dev)
{
return 0;
}
-static inline int cc_pm_put_suspend(struct device *dev)
-{
- return 0;
-}
+static inline void cc_pm_put_suspend(struct device *dev) {}
static inline bool cc_pm_is_dev_suspended(struct device *dev)
{
diff --git a/drivers/crypto/ccree/cc_request_mgr.c b/drivers/crypto/ccree/cc_request_mgr.c
index a947d5a2cf35..9d61e6f12478 100644
--- a/drivers/crypto/ccree/cc_request_mgr.c
+++ b/drivers/crypto/ccree/cc_request_mgr.c
@@ -41,7 +41,6 @@ struct cc_req_mgr_handle {
#else
struct tasklet_struct comptask;
#endif
- bool is_runtime_suspended;
};
struct cc_bl_item {
@@ -230,7 +229,7 @@ static int cc_queues_status(struct cc_drvdata *drvdata,
struct device *dev = drvdata_to_dev(drvdata);
/* SW queue is checked only once as it will not
- * be chaned during the poll because the spinlock_bh
+ * be changed during the poll because the spinlock_bh
* is held by the thread
*/
if (((req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1)) ==
@@ -275,12 +274,11 @@ static int cc_queues_status(struct cc_drvdata *drvdata,
* \param len The crypto sequence length
* \param add_comp If "true": add an artificial dout DMA to mark completion
*
- * \return int Returns -EINPROGRESS or error code
*/
-static int cc_do_send_request(struct cc_drvdata *drvdata,
- struct cc_crypto_req *cc_req,
- struct cc_hw_desc *desc, unsigned int len,
- bool add_comp)
+static void cc_do_send_request(struct cc_drvdata *drvdata,
+ struct cc_crypto_req *cc_req,
+ struct cc_hw_desc *desc, unsigned int len,
+ bool add_comp)
{
struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
unsigned int used_sw_slots;
@@ -303,8 +301,8 @@ static int cc_do_send_request(struct cc_drvdata *drvdata,
/*
* We are about to push command to the HW via the command registers
- * that may refernece hsot memory. We need to issue a memory barrier
- * to make sure there are no outstnading memory writes
+ * that may reference host memory. We need to issue a memory barrier
+ * to make sure there are no outstanding memory writes
*/
wmb();
@@ -328,9 +326,6 @@ static int cc_do_send_request(struct cc_drvdata *drvdata,
/* Update the free slots in HW queue */
req_mgr_h->q_free_slots -= total_seq_len;
}
-
- /* Operation still in process */
- return -EINPROGRESS;
}
static void cc_enqueue_backlog(struct cc_drvdata *drvdata,
@@ -390,20 +385,15 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata)
return;
}
- rc = cc_do_send_request(drvdata, &bli->creq, bli->desc,
- bli->len, false);
-
+ cc_do_send_request(drvdata, &bli->creq, bli->desc, bli->len,
+ false);
spin_unlock(&mgr->hw_lock);
- if (rc != -EINPROGRESS) {
- cc_pm_put_suspend(dev);
- creq->user_cb(dev, req, rc);
- }
-
/* Remove ourselves from the backlog list */
spin_lock(&mgr->bl_lock);
list_del(&bli->list);
--mgr->bl_len;
+ kfree(bli);
}
spin_unlock(&mgr->bl_lock);
@@ -422,7 +412,7 @@ int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
rc = cc_pm_get(dev);
if (rc) {
- dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
+ dev_err(dev, "cc_pm_get returned %x\n", rc);
return rc;
}
@@ -451,8 +441,10 @@ int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
return -EBUSY;
}
- if (!rc)
- rc = cc_do_send_request(drvdata, cc_req, desc, len, false);
+ if (!rc) {
+ cc_do_send_request(drvdata, cc_req, desc, len, false);
+ rc = -EINPROGRESS;
+ }
spin_unlock_bh(&mgr->hw_lock);
return rc;
@@ -472,7 +464,7 @@ int cc_send_sync_request(struct cc_drvdata *drvdata,
rc = cc_pm_get(dev);
if (rc) {
- dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
+ dev_err(dev, "cc_pm_get returned %x\n", rc);
return rc;
}
@@ -492,14 +484,8 @@ int cc_send_sync_request(struct cc_drvdata *drvdata,
reinit_completion(&drvdata->hw_queue_avail);
}
- rc = cc_do_send_request(drvdata, cc_req, desc, len, true);
+ cc_do_send_request(drvdata, cc_req, desc, len, true);
spin_unlock_bh(&mgr->hw_lock);
-
- if (rc != -EINPROGRESS) {
- cc_pm_put_suspend(dev);
- return rc;
- }
-
wait_for_completion(&cc_req->seq_compl);
return 0;
}
@@ -532,8 +518,8 @@ int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
/*
* We are about to push command to the HW via the command registers
- * that may refernece hsot memory. We need to issue a memory barrier
- * to make sure there are no outstnading memory writes
+ * that may reference host memory. We need to issue a memory barrier
+ * to make sure there are no outstanding memory writes
*/
wmb();
enqueue_seq(drvdata, desc, len);
@@ -668,7 +654,7 @@ static void comp_handler(unsigned long devarg)
request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
}
- /* after verifing that there is nothing to do,
+ /* after verifying that there is nothing to do,
* unmask AXI completion interrupt
*/
cc_iowrite(drvdata, CC_REG(HOST_IMR),
@@ -677,52 +663,3 @@ static void comp_handler(unsigned long devarg)
cc_proc_backlog(drvdata);
dev_dbg(dev, "Comp. handler done.\n");
}
-
-/*
- * resume the queue configuration - no need to take the lock as this happens
- * inside the spin lock protection
- */
-#if defined(CONFIG_PM)
-int cc_resume_req_queue(struct cc_drvdata *drvdata)
-{
- struct cc_req_mgr_handle *request_mgr_handle =
- drvdata->request_mgr_handle;
-
- spin_lock_bh(&request_mgr_handle->hw_lock);
- request_mgr_handle->is_runtime_suspended = false;
- spin_unlock_bh(&request_mgr_handle->hw_lock);
-
- return 0;
-}
-
-/*
- * suspend the queue configuration. Since it is used for the runtime suspend
- * only verify that the queue can be suspended.
- */
-int cc_suspend_req_queue(struct cc_drvdata *drvdata)
-{
- struct cc_req_mgr_handle *request_mgr_handle =
- drvdata->request_mgr_handle;
-
- /* lock the send_request */
- spin_lock_bh(&request_mgr_handle->hw_lock);
- if (request_mgr_handle->req_queue_head !=
- request_mgr_handle->req_queue_tail) {
- spin_unlock_bh(&request_mgr_handle->hw_lock);
- return -EBUSY;
- }
- request_mgr_handle->is_runtime_suspended = true;
- spin_unlock_bh(&request_mgr_handle->hw_lock);
-
- return 0;
-}
-
-bool cc_req_queue_suspended(struct cc_drvdata *drvdata)
-{
- struct cc_req_mgr_handle *request_mgr_handle =
- drvdata->request_mgr_handle;
-
- return request_mgr_handle->is_runtime_suspended;
-}
-
-#endif
diff --git a/drivers/crypto/ccree/cc_request_mgr.h b/drivers/crypto/ccree/cc_request_mgr.h
index f46cf766fe4d..ff7746aaaf35 100644
--- a/drivers/crypto/ccree/cc_request_mgr.h
+++ b/drivers/crypto/ccree/cc_request_mgr.h
@@ -40,12 +40,4 @@ void complete_request(struct cc_drvdata *drvdata);
void cc_req_mgr_fini(struct cc_drvdata *drvdata);
-#if defined(CONFIG_PM)
-int cc_resume_req_queue(struct cc_drvdata *drvdata);
-
-int cc_suspend_req_queue(struct cc_drvdata *drvdata);
-
-bool cc_req_queue_suspended(struct cc_drvdata *drvdata);
-#endif
-
#endif /*__REQUEST_MGR_H__*/
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index 91e424378217..f078b2686418 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -23,22 +23,22 @@ config CRYPTO_DEV_CHELSIO
will be called chcr.
config CHELSIO_IPSEC_INLINE
- bool "Chelsio IPSec XFRM Tx crypto offload"
- depends on CHELSIO_T4
+ bool "Chelsio IPSec XFRM Tx crypto offload"
+ depends on CHELSIO_T4
depends on CRYPTO_DEV_CHELSIO
- depends on XFRM_OFFLOAD
- depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
- default n
- ---help---
- Enable support for IPSec Tx Inline.
+ depends on XFRM_OFFLOAD
+ depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
+ default n
+ ---help---
+ Enable support for IPSec Tx Inline.
config CRYPTO_DEV_CHELSIO_TLS
- tristate "Chelsio Crypto Inline TLS Driver"
- depends on CHELSIO_T4
- depends on TLS_TOE
- select CRYPTO_DEV_CHELSIO
- ---help---
- Support Chelsio Inline TLS with Chelsio crypto accelerator.
+ tristate "Chelsio Crypto Inline TLS Driver"
+ depends on CHELSIO_T4
+ depends on TLS_TOE
+ select CRYPTO_DEV_CHELSIO
+ ---help---
+ Support Chelsio Inline TLS with Chelsio crypto accelerator.
- To compile this driver as a module, choose M here: the module
- will be called chtls.
+ To compile this driver as a module, choose M here: the module
+ will be called chtls.
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 1b4a5664e604..b4b9b22125d1 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -870,20 +870,13 @@ static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
const u8 *key,
unsigned int keylen)
{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
- int err = 0;
crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher,
CRYPTO_TFM_REQ_MASK);
crypto_sync_skcipher_set_flags(ablkctx->sw_cipher,
cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
- err = crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |=
- crypto_sync_skcipher_get_flags(ablkctx->sw_cipher) &
- CRYPTO_TFM_RES_MASK;
- return err;
+ return crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
}
static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
@@ -912,7 +905,6 @@ static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
return 0;
badkey_err:
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
return err;
@@ -943,7 +935,6 @@ static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher,
return 0;
badkey_err:
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
return err;
@@ -981,7 +972,6 @@ static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher,
return 0;
badkey_err:
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
return err;
@@ -1379,7 +1369,8 @@ static int chcr_device_init(struct chcr_context *ctx)
txq_perchan = ntxq / u_ctx->lldi.nchan;
spin_lock(&ctx->dev->lock_chcr_dev);
ctx->tx_chan_id = ctx->dev->tx_channel_id;
- ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
+ ctx->dev->tx_channel_id =
+ (ctx->dev->tx_channel_id + 1) % u_ctx->lldi.nchan;
spin_unlock(&ctx->dev->lock_chcr_dev);
rxq_idx = ctx->tx_chan_id * rxq_perchan;
rxq_idx += id % rxq_perchan;
@@ -2173,7 +2164,6 @@ static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
return 0;
badkey_err:
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
return err;
@@ -3195,9 +3185,6 @@ static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
aeadctx->mayverify = VERIFY_SW;
break;
default:
-
- crypto_tfm_set_flags((struct crypto_tfm *) tfm,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
@@ -3222,8 +3209,6 @@ static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
aeadctx->mayverify = VERIFY_HW;
break;
default:
- crypto_tfm_set_flags((struct crypto_tfm *)tfm,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
@@ -3264,8 +3249,6 @@ static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
aeadctx->mayverify = VERIFY_HW;
break;
default:
- crypto_tfm_set_flags((struct crypto_tfm *)tfm,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
@@ -3290,8 +3273,6 @@ static int chcr_ccm_common_setkey(struct crypto_aead *aead,
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
} else {
- crypto_tfm_set_flags((struct crypto_tfm *)aead,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
aeadctx->enckey_len = 0;
return -EINVAL;
}
@@ -3314,9 +3295,6 @@ static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
CRYPTO_TFM_REQ_MASK);
error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
- crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
- CRYPTO_TFM_RES_MASK);
if (error)
return error;
return chcr_ccm_common_setkey(aead, key, keylen);
@@ -3329,8 +3307,6 @@ static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
int error;
if (keylen < 3) {
- crypto_tfm_set_flags((struct crypto_tfm *)aead,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
aeadctx->enckey_len = 0;
return -EINVAL;
}
@@ -3338,9 +3314,6 @@ static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
CRYPTO_TFM_REQ_MASK);
error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
- crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
- CRYPTO_TFM_RES_MASK);
if (error)
return error;
keylen -= 3;
@@ -3362,9 +3335,6 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
& CRYPTO_TFM_REQ_MASK);
ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
- crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
- CRYPTO_TFM_RES_MASK);
if (ret)
goto out;
@@ -3380,8 +3350,6 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
} else if (keylen == AES_KEYSIZE_256) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
} else {
- crypto_tfm_set_flags((struct crypto_tfm *)aead,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
pr_err("GCM: Invalid key length %d\n", keylen);
ret = -EINVAL;
goto out;
@@ -3432,16 +3400,11 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
& CRYPTO_TFM_REQ_MASK);
err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
- crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
- & CRYPTO_TFM_RES_MASK);
if (err)
goto out;
- if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
- crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto out;
- }
if (get_alg_config(&param, max_authsize)) {
pr_err("chcr : Unsupported digest size\n");
@@ -3562,16 +3525,12 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
& CRYPTO_TFM_REQ_MASK);
err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
- crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
- & CRYPTO_TFM_RES_MASK);
if (err)
goto out;
- if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
- crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto out;
- }
+
subtype = get_aead_subtype(authenc);
if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index 029a7354f541..e937605670ac 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -132,8 +132,6 @@ static void chcr_dev_init(struct uld_ctx *u_ctx)
static int chcr_dev_move(struct uld_ctx *u_ctx)
{
- struct adapter *adap;
-
mutex_lock(&drv_data.drv_mutex);
if (drv_data.last_dev == u_ctx) {
if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev))
@@ -146,8 +144,6 @@ static int chcr_dev_move(struct uld_ctx *u_ctx)
list_move(&u_ctx->entry, &drv_data.inact_dev);
if (list_empty(&drv_data.act_dev))
drv_data.last_dev = NULL;
- adap = padap(&u_ctx->dev);
- memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
atomic_dec(&drv_data.dev_count);
mutex_unlock(&drv_data.drv_mutex);
@@ -299,17 +295,21 @@ static int __init chcr_crypto_init(void)
static void __exit chcr_crypto_exit(void)
{
struct uld_ctx *u_ctx, *tmp;
+ struct adapter *adap;
stop_crypto();
-
cxgb4_unregister_uld(CXGB4_ULD_CRYPTO);
/* Remove all devices from list */
mutex_lock(&drv_data.drv_mutex);
list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) {
+ adap = padap(&u_ctx->dev);
+ memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
list_del(&u_ctx->entry);
kfree(u_ctx);
}
list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) {
+ adap = padap(&u_ctx->dev);
+ memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
list_del(&u_ctx->entry);
kfree(u_ctx);
}
diff --git a/drivers/crypto/chelsio/chtls/chtls.h b/drivers/crypto/chelsio/chtls/chtls.h
index d2bc655ab931..459442704eb1 100644
--- a/drivers/crypto/chelsio/chtls/chtls.h
+++ b/drivers/crypto/chelsio/chtls/chtls.h
@@ -179,7 +179,10 @@ struct chtls_hws {
u32 copied_seq;
u64 tx_seq_no;
struct tls_scmd scmd;
- struct tls12_crypto_info_aes_gcm_128 crypto_info;
+ union {
+ struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
+ struct tls12_crypto_info_aes_gcm_256 aes_gcm_256;
+ } crypto_info;
};
struct chtls_sock {
@@ -482,7 +485,7 @@ int send_tx_flowc_wr(struct sock *sk, int compl,
void chtls_tcp_push(struct sock *sk, int flags);
int chtls_push_frames(struct chtls_sock *csk, int comp);
int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val);
-int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 mode);
+int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 mode, int cipher_type);
void skb_entail(struct sock *sk, struct sk_buff *skb, int flags);
unsigned int keyid_to_addr(int start_addr, int keyid);
void free_tls_keyid(struct sock *sk);
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
index aca75237bbcf..9b2745ad9e38 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -727,6 +727,14 @@ static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
return 0;
}
+static void chtls_purge_wr_queue(struct sock *sk)
+{
+ struct sk_buff *skb;
+
+ while ((skb = dequeue_wr(sk)) != NULL)
+ kfree_skb(skb);
+}
+
static void chtls_release_resources(struct sock *sk)
{
struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
@@ -741,6 +749,11 @@ static void chtls_release_resources(struct sock *sk)
kfree_skb(csk->txdata_skb_cache);
csk->txdata_skb_cache = NULL;
+ if (csk->wr_credits != csk->wr_max_credits) {
+ chtls_purge_wr_queue(sk);
+ chtls_reset_wr_list(csk);
+ }
+
if (csk->l2t_entry) {
cxgb4_l2t_release(csk->l2t_entry);
csk->l2t_entry = NULL;
@@ -1273,7 +1286,7 @@ static int chtls_pass_accept_req(struct chtls_dev *cdev, struct sk_buff *skb)
ctx = (struct listen_ctx *)data;
lsk = ctx->lsk;
- if (unlikely(tid >= cdev->tids->ntids)) {
+ if (unlikely(tid_out_of_range(cdev->tids, tid))) {
pr_info("passive open TID %u too large\n", tid);
return 1;
}
@@ -1735,6 +1748,7 @@ static void chtls_peer_close(struct sock *sk, struct sk_buff *skb)
else
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
}
+ kfree_skb(skb);
}
static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb)
@@ -1815,6 +1829,20 @@ static void send_defer_abort_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
kfree_skb(skb);
}
+/*
+ * Add an skb to the deferred skb queue for processing from process context.
+ */
+static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev,
+ defer_handler_t handler)
+{
+ DEFERRED_SKB_CB(skb)->handler = handler;
+ spin_lock_bh(&cdev->deferq.lock);
+ __skb_queue_tail(&cdev->deferq, skb);
+ if (skb_queue_len(&cdev->deferq) == 1)
+ schedule_work(&cdev->deferq_task);
+ spin_unlock_bh(&cdev->deferq.lock);
+}
+
static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
struct chtls_dev *cdev, int status, int queue)
{
@@ -1829,7 +1857,7 @@ static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
if (!reply_skb) {
req->status = (queue << 1);
- send_defer_abort_rpl(cdev, skb);
+ t4_defer_reply(skb, cdev, send_defer_abort_rpl);
return;
}
@@ -1848,20 +1876,6 @@ static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
}
-/*
- * Add an skb to the deferred skb queue for processing from process context.
- */
-static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev,
- defer_handler_t handler)
-{
- DEFERRED_SKB_CB(skb)->handler = handler;
- spin_lock_bh(&cdev->deferq.lock);
- __skb_queue_tail(&cdev->deferq, skb);
- if (skb_queue_len(&cdev->deferq) == 1)
- schedule_work(&cdev->deferq_task);
- spin_unlock_bh(&cdev->deferq.lock);
-}
-
static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb,
struct chtls_dev *cdev,
int status, int queue)
@@ -2062,19 +2076,6 @@ rel_skb:
return 0;
}
-static struct sk_buff *dequeue_wr(struct sock *sk)
-{
- struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
- struct sk_buff *skb = csk->wr_skb_head;
-
- if (likely(skb)) {
- /* Don't bother clearing the tail */
- csk->wr_skb_head = WR_SKB_CB(skb)->next_wr;
- WR_SKB_CB(skb)->next_wr = NULL;
- }
- return skb;
-}
-
static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb)
{
struct cpl_fw4_ack *hdr = cplhdr(skb) + RSS_HDR;
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.h b/drivers/crypto/chelsio/chtls/chtls_cm.h
index 129d7ac649a9..3fac0c74a41f 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.h
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.h
@@ -185,6 +185,12 @@ static inline void chtls_kfree_skb(struct sock *sk, struct sk_buff *skb)
kfree_skb(skb);
}
+static inline void chtls_reset_wr_list(struct chtls_sock *csk)
+{
+ csk->wr_skb_head = NULL;
+ csk->wr_skb_tail = NULL;
+}
+
static inline void enqueue_wr(struct chtls_sock *csk, struct sk_buff *skb)
{
WR_SKB_CB(skb)->next_wr = NULL;
@@ -197,4 +203,19 @@ static inline void enqueue_wr(struct chtls_sock *csk, struct sk_buff *skb)
WR_SKB_CB(csk->wr_skb_tail)->next_wr = skb;
csk->wr_skb_tail = skb;
}
+
+static inline struct sk_buff *dequeue_wr(struct sock *sk)
+{
+ struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+ struct sk_buff *skb = NULL;
+
+ skb = csk->wr_skb_head;
+
+ if (likely(skb)) {
+ /* Don't bother clearing the tail */
+ csk->wr_skb_head = WR_SKB_CB(skb)->next_wr;
+ WR_SKB_CB(skb)->next_wr = NULL;
+ }
+ return skb;
+}
#endif
diff --git a/drivers/crypto/chelsio/chtls/chtls_hw.c b/drivers/crypto/chelsio/chtls/chtls_hw.c
index 2a34035d3cfb..f1820aca0d33 100644
--- a/drivers/crypto/chelsio/chtls/chtls_hw.c
+++ b/drivers/crypto/chelsio/chtls/chtls_hw.c
@@ -208,28 +208,53 @@ static void chtls_rxkey_ivauth(struct _key_ctx *kctx)
static int chtls_key_info(struct chtls_sock *csk,
struct _key_ctx *kctx,
- u32 keylen, u32 optname)
+ u32 keylen, u32 optname,
+ int cipher_type)
{
- unsigned char key[AES_KEYSIZE_128];
- struct tls12_crypto_info_aes_gcm_128 *gcm_ctx;
+ unsigned char key[AES_MAX_KEY_SIZE];
+ unsigned char *key_p, *salt;
unsigned char ghash_h[AEAD_H_SIZE];
- int ck_size, key_ctx_size;
+ int ck_size, key_ctx_size, kctx_mackey_size, salt_size;
struct crypto_aes_ctx aes;
int ret;
- gcm_ctx = (struct tls12_crypto_info_aes_gcm_128 *)
- &csk->tlshws.crypto_info;
-
key_ctx_size = sizeof(struct _key_ctx) +
roundup(keylen, 16) + AEAD_H_SIZE;
- if (keylen == AES_KEYSIZE_128) {
- ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
- } else {
+ /* GCM mode of AES supports 128 and 256 bit encryption, so
+ * prepare key context base on GCM cipher type
+ */
+ switch (cipher_type) {
+ case TLS_CIPHER_AES_GCM_128: {
+ struct tls12_crypto_info_aes_gcm_128 *gcm_ctx_128 =
+ (struct tls12_crypto_info_aes_gcm_128 *)
+ &csk->tlshws.crypto_info;
+ memcpy(key, gcm_ctx_128->key, keylen);
+
+ key_p = gcm_ctx_128->key;
+ salt = gcm_ctx_128->salt;
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
+ kctx_mackey_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
+ break;
+ }
+ case TLS_CIPHER_AES_GCM_256: {
+ struct tls12_crypto_info_aes_gcm_256 *gcm_ctx_256 =
+ (struct tls12_crypto_info_aes_gcm_256 *)
+ &csk->tlshws.crypto_info;
+ memcpy(key, gcm_ctx_256->key, keylen);
+
+ key_p = gcm_ctx_256->key;
+ salt = gcm_ctx_256->salt;
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE;
+ kctx_mackey_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
+ break;
+ }
+ default:
pr_err("GCM: Invalid key length %d\n", keylen);
return -EINVAL;
}
- memcpy(key, gcm_ctx->key, keylen);
/* Calculate the H = CIPH(K, 0 repeated 16 times).
* It will go in key context
@@ -249,20 +274,20 @@ static int chtls_key_info(struct chtls_sock *csk,
key_ctx = ((key_ctx_size >> 4) << 3);
kctx->ctx_hdr = FILL_KEY_CRX_HDR(ck_size,
- CHCR_KEYCTX_MAC_KEY_SIZE_128,
+ kctx_mackey_size,
0, 0, key_ctx);
chtls_rxkey_ivauth(kctx);
} else {
kctx->ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
- CHCR_KEYCTX_MAC_KEY_SIZE_128,
+ kctx_mackey_size,
0, 0, key_ctx_size >> 4);
}
- memcpy(kctx->salt, gcm_ctx->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
- memcpy(kctx->key, gcm_ctx->key, keylen);
+ memcpy(kctx->salt, salt, salt_size);
+ memcpy(kctx->key, key_p, keylen);
memcpy(kctx->key + keylen, ghash_h, AEAD_H_SIZE);
/* erase key info from driver */
- memset(gcm_ctx->key, 0, keylen);
+ memset(key_p, 0, keylen);
return 0;
}
@@ -288,7 +313,8 @@ static void chtls_set_scmd(struct chtls_sock *csk)
SCMD_TLS_FRAG_ENABLE_V(1);
}
-int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname)
+int chtls_setkey(struct chtls_sock *csk, u32 keylen,
+ u32 optname, int cipher_type)
{
struct tls_key_req *kwr;
struct chtls_dev *cdev;
@@ -350,9 +376,10 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname)
kwr->sc_imm.cmd_more = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_IMM));
kwr->sc_imm.len = cpu_to_be32(klen);
+ lock_sock(sk);
/* key info */
kctx = (struct _key_ctx *)(kwr + 1);
- ret = chtls_key_info(csk, kctx, keylen, optname);
+ ret = chtls_key_info(csk, kctx, keylen, optname, cipher_type);
if (ret)
goto out_notcb;
@@ -388,8 +415,10 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname)
csk->tlshws.txkey = keyid;
}
+ release_sock(sk);
return ret;
out_notcb:
+ release_sock(sk);
free_tls_keyid(sk);
out_nokey:
kfree_skb(skb);
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c
index 18996935d8ba..a038de90b2ea 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -84,7 +84,6 @@ static int listen_backlog_rcv(struct sock *sk, struct sk_buff *skb)
static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk)
{
struct chtls_listen *clisten;
- int err;
if (sk->sk_protocol != IPPROTO_TCP)
return -EPROTONOSUPPORT;
@@ -100,10 +99,10 @@ static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk)
clisten->cdev = cdev;
clisten->sk = sk;
mutex_lock(&notify_mutex);
- err = raw_notifier_call_chain(&listen_notify_list,
+ raw_notifier_call_chain(&listen_notify_list,
CHTLS_LISTEN_START, clisten);
mutex_unlock(&notify_mutex);
- return err;
+ return 0;
}
static void chtls_stop_listen(struct chtls_dev *cdev, struct sock *sk)
@@ -486,6 +485,7 @@ static int do_chtls_setsockopt(struct sock *sk, int optname,
struct tls_crypto_info *crypto_info, tmp_crypto_info;
struct chtls_sock *csk;
int keylen;
+ int cipher_type;
int rc = 0;
csk = rcu_dereference_sk_user_data(sk);
@@ -509,6 +509,9 @@ static int do_chtls_setsockopt(struct sock *sk, int optname,
crypto_info = (struct tls_crypto_info *)&csk->tlshws.crypto_info;
+ /* GCM mode of AES supports 128 and 256 bit encryption, so
+ * copy keys from user based on GCM cipher type.
+ */
switch (tmp_crypto_info.cipher_type) {
case TLS_CIPHER_AES_GCM_128: {
/* Obtain version and type from previous copy */
@@ -525,13 +528,30 @@ static int do_chtls_setsockopt(struct sock *sk, int optname,
}
keylen = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
- rc = chtls_setkey(csk, keylen, optname);
+ cipher_type = TLS_CIPHER_AES_GCM_128;
+ break;
+ }
+ case TLS_CIPHER_AES_GCM_256: {
+ crypto_info[0] = tmp_crypto_info;
+ rc = copy_from_user((char *)crypto_info + sizeof(*crypto_info),
+ optval + sizeof(*crypto_info),
+ sizeof(struct tls12_crypto_info_aes_gcm_256)
+ - sizeof(*crypto_info));
+
+ if (rc) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ keylen = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
+ cipher_type = TLS_CIPHER_AES_GCM_256;
break;
}
default:
rc = -EINVAL;
goto out;
}
+ rc = chtls_setkey(csk, keylen, optname, cipher_type);
out:
return rc;
}
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index 73a899e6f837..f4f18bfc2247 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -110,7 +110,6 @@ static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
unsigned int len)
{
struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
- unsigned int ret;
tctx->keylen = len;
@@ -119,11 +118,9 @@ static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
return 0;
}
- if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
+ if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256)
/* not supported at all */
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
- }
/*
* The requested key size is not supported by HW, do a fallback
@@ -132,20 +129,13 @@ static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
tctx->fallback.cip->base.crt_flags |=
(tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
- ret = crypto_cipher_setkey(tctx->fallback.cip, key, len);
- if (ret) {
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |= (tctx->fallback.cip->base.crt_flags &
- CRYPTO_TFM_RES_MASK);
- }
- return ret;
+ return crypto_cipher_setkey(tctx->fallback.cip, key, len);
}
static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
unsigned int len)
{
struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
- unsigned int ret;
tctx->keylen = len;
@@ -154,11 +144,9 @@ static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
return 0;
}
- if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
+ if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256)
/* not supported at all */
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
- }
/*
* The requested key size is not supported by HW, do a fallback
@@ -168,11 +156,7 @@ static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
crypto_skcipher_set_flags(tctx->fallback.skcipher,
crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
- ret = crypto_skcipher_setkey(tctx->fallback.skcipher, key, len);
- crypto_skcipher_set_flags(tfm,
- crypto_skcipher_get_flags(tctx->fallback.skcipher) &
- CRYPTO_TFM_RES_MASK);
- return ret;
+ return crypto_skcipher_setkey(tctx->fallback.skcipher, key, len);
}
static void
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index 4e7323884ae3..354836468c5d 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -2507,7 +2507,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
addr = pci_resource_start(pdev, i);
size = pci_resource_len(pdev, i);
- dev->bar[i] = ioremap_nocache(addr, size);
+ dev->bar[i] = ioremap(addr, size);
if (!dev->bar[i]) {
err = -ENOMEM;
goto err_out_unmap_bars;
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
index c0e7a85fe129..8851161f722f 100644
--- a/drivers/crypto/hisilicon/Kconfig
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -16,16 +16,22 @@ config CRYPTO_DEV_HISI_SEC
config CRYPTO_DEV_HISI_SEC2
tristate "Support for HiSilicon SEC2 crypto block cipher accelerator"
- select CRYPTO_BLKCIPHER
+ select CRYPTO_SKCIPHER
select CRYPTO_ALGAPI
select CRYPTO_LIB_DES
select CRYPTO_DEV_HISI_QM
+ select CRYPTO_AEAD
+ select CRYPTO_AUTHENC
+ select CRYPTO_HMAC
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
depends on PCI && PCI_MSI
depends on ARM64 || (COMPILE_TEST && 64BIT)
help
Support for HiSilicon SEC Engine of version 2 in crypto subsystem.
It provides AES, SM4, and 3DES algorithms with ECB
- CBC, and XTS cipher mode.
+ CBC, and XTS cipher mode, and AEAD algorithms.
To compile this as a module, choose M here: the module
will be called hisi_sec2.
@@ -44,7 +50,6 @@ config CRYPTO_DEV_HISI_ZIP
depends on ARM64 || (COMPILE_TEST && 64BIT)
depends on !CPU_BIG_ENDIAN || COMPILE_TEST
select CRYPTO_DEV_HISI_QM
- select SG_SPLIT
help
Support for HiSilicon ZIP Driver
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 98f037e6ea3e..5d400d69e8e4 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -123,7 +123,7 @@ static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
ctx = hpre_req->ctx;
id = hpre_alloc_req_id(ctx);
- if (id < 0)
+ if (unlikely(id < 0))
return -EINVAL;
ctx->req_list[id] = hpre_req;
@@ -174,8 +174,8 @@ static struct hisi_qp *hpre_get_qp_and_start(void)
}
static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len,
- int is_src, dma_addr_t *tmp)
+ struct scatterlist *data, unsigned int len,
+ int is_src, dma_addr_t *tmp)
{
struct hpre_ctx *ctx = hpre_req->ctx;
struct device *dev = HPRE_DEV(ctx);
@@ -190,7 +190,7 @@ static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
}
*tmp = dma_map_single(dev, sg_virt(data),
len, dma_dir);
- if (dma_mapping_error(dev, *tmp)) {
+ if (unlikely(dma_mapping_error(dev, *tmp))) {
dev_err(dev, "dma map data err!\n");
return -ENOMEM;
}
@@ -199,8 +199,8 @@ static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
}
static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len,
- int is_src, dma_addr_t *tmp)
+ struct scatterlist *data, unsigned int len,
+ int is_src, dma_addr_t *tmp)
{
struct hpre_ctx *ctx = hpre_req->ctx;
struct device *dev = HPRE_DEV(ctx);
@@ -208,11 +208,11 @@ static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
int shift;
shift = ctx->key_sz - len;
- if (shift < 0)
+ if (unlikely(shift < 0))
return -EINVAL;
ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_KERNEL);
- if (!ptr)
+ if (unlikely(!ptr))
return -ENOMEM;
if (is_src) {
@@ -226,12 +226,12 @@ static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
}
static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len,
- int is_src, int is_dh)
+ struct scatterlist *data, unsigned int len,
+ int is_src, int is_dh)
{
struct hpre_sqe *msg = &hpre_req->req;
struct hpre_ctx *ctx = hpre_req->ctx;
- dma_addr_t tmp;
+ dma_addr_t tmp = 0;
int ret;
/* when the data is dh's source, we should format it */
@@ -241,7 +241,7 @@ static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
else
ret = hpre_prepare_dma_buf(hpre_req, data, len,
is_src, &tmp);
- if (ret)
+ if (unlikely(ret))
return ret;
if (is_src)
@@ -253,15 +253,16 @@ static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
}
static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
- struct hpre_asym_request *req,
- struct scatterlist *dst, struct scatterlist *src)
+ struct hpre_asym_request *req,
+ struct scatterlist *dst,
+ struct scatterlist *src)
{
struct device *dev = HPRE_DEV(ctx);
struct hpre_sqe *sqe = &req->req;
dma_addr_t tmp;
tmp = le64_to_cpu(sqe->in);
- if (!tmp)
+ if (unlikely(!tmp))
return;
if (src) {
@@ -274,7 +275,7 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
}
tmp = le64_to_cpu(sqe->out);
- if (!tmp)
+ if (unlikely(!tmp))
return;
if (req->dst) {
@@ -288,7 +289,7 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
}
static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
- void **kreq)
+ void **kreq)
{
struct hpre_asym_request *req;
int err, id, done;
@@ -308,7 +309,7 @@ static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) &
HREE_SQE_DONE_MASK;
- if (err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE)
+ if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE))
return 0;
return -EINVAL;
@@ -375,7 +376,7 @@ static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
struct hpre_ctx *ctx = qp->qp_ctx;
struct hpre_sqe *sqe = resp;
- ctx->req_list[sqe->tag]->cb(ctx, resp);
+ ctx->req_list[le16_to_cpu(sqe->tag)]->cb(ctx, resp);
}
static int hpre_ctx_init(struct hpre_ctx *ctx)
@@ -454,33 +455,30 @@ static int hpre_dh_compute_value(struct kpp_request *req)
int ctr = 0;
int ret;
- if (!ctx)
- return -EINVAL;
-
ret = hpre_msg_request_set(ctx, req, false);
- if (ret)
+ if (unlikely(ret))
return ret;
if (req->src) {
ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1);
- if (ret)
+ if (unlikely(ret))
goto clear_all;
}
ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1);
- if (ret)
+ if (unlikely(ret))
goto clear_all;
if (ctx->crt_g2_mode && !req->src)
- msg->dw0 |= HPRE_ALG_DH_G2;
+ msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2);
else
- msg->dw0 |= HPRE_ALG_DH;
+ msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH);
do {
ret = hisi_qp_send(ctx->qp, msg);
} while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
/* success */
- if (!ret)
+ if (likely(!ret))
return -EINPROGRESS;
clear_all:
@@ -520,12 +518,12 @@ static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params)
return -EINVAL;
if (hpre_is_dh_params_length_valid(params->p_size <<
- HPRE_BITS_2_BYTES_SHIFT))
+ HPRE_BITS_2_BYTES_SHIFT))
return -EINVAL;
sz = ctx->key_sz = params->p_size;
ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1,
- &ctx->dh.dma_xa_p, GFP_KERNEL);
+ &ctx->dh.dma_xa_p, GFP_KERNEL);
if (!ctx->dh.xa_p)
return -ENOMEM;
@@ -559,13 +557,12 @@ static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
hisi_qm_stop_qp(ctx->qp);
if (ctx->dh.g) {
- memset(ctx->dh.g, 0, sz);
dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g);
ctx->dh.g = NULL;
}
if (ctx->dh.xa_p) {
- memset(ctx->dh.xa_p, 0, sz);
+ memzero_explicit(ctx->dh.xa_p, sz);
dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
ctx->dh.dma_xa_p);
ctx->dh.xa_p = NULL;
@@ -661,9 +658,6 @@ static int hpre_rsa_enc(struct akcipher_request *req)
int ctr = 0;
int ret;
- if (!ctx)
- return -EINVAL;
-
/* For 512 and 1536 bits key size, use soft tfm instead */
if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
@@ -673,22 +667,22 @@ static int hpre_rsa_enc(struct akcipher_request *req)
return ret;
}
- if (!ctx->rsa.pubkey)
+ if (unlikely(!ctx->rsa.pubkey))
return -EINVAL;
ret = hpre_msg_request_set(ctx, req, true);
- if (ret)
+ if (unlikely(ret))
return ret;
- msg->dw0 |= HPRE_ALG_NC_NCRT;
+ msg->dw0 |= cpu_to_le32(HPRE_ALG_NC_NCRT);
msg->key = cpu_to_le64((u64)ctx->rsa.dma_pubkey);
ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
- if (ret)
+ if (unlikely(ret))
goto clear_all;
ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
- if (ret)
+ if (unlikely(ret))
goto clear_all;
do {
@@ -696,7 +690,7 @@ static int hpre_rsa_enc(struct akcipher_request *req)
} while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
/* success */
- if (!ret)
+ if (likely(!ret))
return -EINPROGRESS;
clear_all:
@@ -716,9 +710,6 @@ static int hpre_rsa_dec(struct akcipher_request *req)
int ctr = 0;
int ret;
- if (!ctx)
- return -EINVAL;
-
/* For 512 and 1536 bits key size, use soft tfm instead */
if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
@@ -728,27 +719,29 @@ static int hpre_rsa_dec(struct akcipher_request *req)
return ret;
}
- if (!ctx->rsa.prikey)
+ if (unlikely(!ctx->rsa.prikey))
return -EINVAL;
ret = hpre_msg_request_set(ctx, req, true);
- if (ret)
+ if (unlikely(ret))
return ret;
if (ctx->crt_g2_mode) {
msg->key = cpu_to_le64((u64)ctx->rsa.dma_crt_prikey);
- msg->dw0 |= HPRE_ALG_NC_CRT;
+ msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
+ HPRE_ALG_NC_CRT);
} else {
msg->key = cpu_to_le64((u64)ctx->rsa.dma_prikey);
- msg->dw0 |= HPRE_ALG_NC_NCRT;
+ msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
+ HPRE_ALG_NC_NCRT);
}
ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
- if (ret)
+ if (unlikely(ret))
goto clear_all;
ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
- if (ret)
+ if (unlikely(ret))
goto clear_all;
do {
@@ -756,7 +749,7 @@ static int hpre_rsa_dec(struct akcipher_request *req)
} while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
/* success */
- if (!ret)
+ if (likely(!ret))
return -EINPROGRESS;
clear_all:
@@ -811,10 +804,8 @@ static int hpre_rsa_set_e(struct hpre_ctx *ctx, const char *value,
hpre_rsa_drop_leading_zeros(&ptr, &vlen);
- if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
- ctx->rsa.pubkey = NULL;
+ if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
return -EINVAL;
- }
memcpy(ctx->rsa.pubkey + ctx->key_sz - vlen, ptr, vlen);
@@ -836,17 +827,17 @@ static int hpre_rsa_set_d(struct hpre_ctx *ctx, const char *value,
return 0;
}
-static int hpre_crt_para_get(char *para, const char *raw,
- unsigned int raw_sz, unsigned int para_size)
+static int hpre_crt_para_get(char *para, size_t para_sz,
+ const char *raw, size_t raw_sz)
{
const char *ptr = raw;
size_t len = raw_sz;
hpre_rsa_drop_leading_zeros(&ptr, &len);
- if (!len || len > para_size)
+ if (!len || len > para_sz)
return -EINVAL;
- memcpy(para + para_size - len, ptr, len);
+ memcpy(para + para_sz - len, ptr, len);
return 0;
}
@@ -864,32 +855,32 @@ static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
if (!ctx->rsa.crt_prikey)
return -ENOMEM;
- ret = hpre_crt_para_get(ctx->rsa.crt_prikey, rsa_key->dq,
- rsa_key->dq_sz, hlf_ksz);
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey, hlf_ksz,
+ rsa_key->dq, rsa_key->dq_sz);
if (ret)
goto free_key;
offset = hlf_ksz;
- ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, rsa_key->dp,
- rsa_key->dp_sz, hlf_ksz);
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
+ rsa_key->dp, rsa_key->dp_sz);
if (ret)
goto free_key;
offset = hlf_ksz * HPRE_CRT_Q;
- ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset,
- rsa_key->q, rsa_key->q_sz, hlf_ksz);
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
+ rsa_key->q, rsa_key->q_sz);
if (ret)
goto free_key;
offset = hlf_ksz * HPRE_CRT_P;
- ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset,
- rsa_key->p, rsa_key->p_sz, hlf_ksz);
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
+ rsa_key->p, rsa_key->p_sz);
if (ret)
goto free_key;
offset = hlf_ksz * HPRE_CRT_INV;
- ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset,
- rsa_key->qinv, rsa_key->qinv_sz, hlf_ksz);
+ ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
+ rsa_key->qinv, rsa_key->qinv_sz);
if (ret)
goto free_key;
@@ -899,7 +890,7 @@ static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
free_key:
offset = hlf_ksz * HPRE_CRT_PRMS;
- memset(ctx->rsa.crt_prikey, 0, offset);
+ memzero_explicit(ctx->rsa.crt_prikey, offset);
dma_free_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey,
ctx->rsa.dma_crt_prikey);
ctx->rsa.crt_prikey = NULL;
@@ -924,14 +915,15 @@ static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
}
if (ctx->rsa.crt_prikey) {
- memset(ctx->rsa.crt_prikey, 0, half_key_sz * HPRE_CRT_PRMS);
+ memzero_explicit(ctx->rsa.crt_prikey,
+ half_key_sz * HPRE_CRT_PRMS);
dma_free_coherent(dev, half_key_sz * HPRE_CRT_PRMS,
ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey);
ctx->rsa.crt_prikey = NULL;
}
if (ctx->rsa.prikey) {
- memset(ctx->rsa.prikey, 0, ctx->key_sz);
+ memzero_explicit(ctx->rsa.prikey, ctx->key_sz);
dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey,
ctx->rsa.dma_prikey);
ctx->rsa.prikey = NULL;
@@ -1043,6 +1035,7 @@ static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm)
static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
{
struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
+ int ret;
ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0);
if (IS_ERR(ctx->rsa.soft_tfm)) {
@@ -1050,7 +1043,11 @@ static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
return PTR_ERR(ctx->rsa.soft_tfm);
}
- return hpre_ctx_init(ctx);
+ ret = hpre_ctx_init(ctx);
+ if (ret)
+ crypto_free_akcipher(ctx->rsa.soft_tfm);
+
+ return ret;
}
static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 34e0424410bf..401747de67a8 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -73,7 +73,7 @@
#define HPRE_DBGFS_VAL_MAX_LEN 20
#define HPRE_PCI_DEVICE_ID 0xa258
#define HPRE_PCI_VF_DEVICE_ID 0xa259
-#define HPRE_ADDR(qm, offset) (qm->io_base + (offset))
+#define HPRE_ADDR(qm, offset) ((qm)->io_base + (offset))
#define HPRE_QM_USR_CFG_MASK 0xfffffffe
#define HPRE_QM_AXI_CFG_MASK 0xffff
#define HPRE_QM_VFG_AX_MASK 0xff
@@ -106,18 +106,18 @@ static const char * const hpre_debug_file_name[] = {
};
static const struct hpre_hw_error hpre_hw_errors[] = {
- { .int_msk = BIT(0), .msg = "hpre_ecc_1bitt_err" },
- { .int_msk = BIT(1), .msg = "hpre_ecc_2bit_err" },
- { .int_msk = BIT(2), .msg = "hpre_data_wr_err" },
- { .int_msk = BIT(3), .msg = "hpre_data_rd_err" },
- { .int_msk = BIT(4), .msg = "hpre_bd_rd_err" },
- { .int_msk = BIT(5), .msg = "hpre_ooo_2bit_ecc_err" },
- { .int_msk = BIT(6), .msg = "hpre_cltr1_htbt_tm_out_err" },
- { .int_msk = BIT(7), .msg = "hpre_cltr2_htbt_tm_out_err" },
- { .int_msk = BIT(8), .msg = "hpre_cltr3_htbt_tm_out_err" },
- { .int_msk = BIT(9), .msg = "hpre_cltr4_htbt_tm_out_err" },
- { .int_msk = GENMASK(15, 10), .msg = "hpre_ooo_rdrsp_err" },
- { .int_msk = GENMASK(21, 16), .msg = "hpre_ooo_wrrsp_err" },
+ { .int_msk = BIT(0), .msg = "core_ecc_1bit_err_int_set" },
+ { .int_msk = BIT(1), .msg = "core_ecc_2bit_err_int_set" },
+ { .int_msk = BIT(2), .msg = "dat_wb_poison_int_set" },
+ { .int_msk = BIT(3), .msg = "dat_rd_poison_int_set" },
+ { .int_msk = BIT(4), .msg = "bd_rd_poison_int_set" },
+ { .int_msk = BIT(5), .msg = "ooo_ecc_2bit_err_int_set" },
+ { .int_msk = BIT(6), .msg = "cluster1_shb_timeout_int_set" },
+ { .int_msk = BIT(7), .msg = "cluster2_shb_timeout_int_set" },
+ { .int_msk = BIT(8), .msg = "cluster3_shb_timeout_int_set" },
+ { .int_msk = BIT(9), .msg = "cluster4_shb_timeout_int_set" },
+ { .int_msk = GENMASK(15, 10), .msg = "ooo_rdrsp_err_int_set" },
+ { .int_msk = GENMASK(21, 16), .msg = "ooo_wrrsp_err_int_set" },
{ /* sentinel */ }
};
@@ -490,7 +490,7 @@ static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
return -EINVAL;
}
spin_unlock_irq(&file->lock);
- ret = sprintf(tbuf, "%u\n", val);
+ ret = snprintf(tbuf, HPRE_DBGFS_VAL_MAX_LEN, "%u\n", val);
return simple_read_from_buffer(buf, count, pos, tbuf, ret);
}
@@ -557,7 +557,7 @@ static const struct file_operations hpre_ctrl_debug_fops = {
static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir,
enum hpre_ctrl_dbgfs_file type, int indx)
{
- struct dentry *tmp, *file_dir;
+ struct dentry *file_dir;
if (dir)
file_dir = dir;
@@ -571,10 +571,8 @@ static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir,
dbg->files[indx].debug = dbg;
dbg->files[indx].type = type;
dbg->files[indx].index = indx;
- tmp = debugfs_create_file(hpre_debug_file_name[type], 0600, file_dir,
- dbg->files + indx, &hpre_ctrl_debug_fops);
- if (!tmp)
- return -ENOENT;
+ debugfs_create_file(hpre_debug_file_name[type], 0600, file_dir,
+ dbg->files + indx, &hpre_ctrl_debug_fops);
return 0;
}
@@ -585,7 +583,6 @@ static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug)
struct hisi_qm *qm = &hpre->qm;
struct device *dev = &qm->pdev->dev;
struct debugfs_regset32 *regset;
- struct dentry *tmp;
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
@@ -595,10 +592,7 @@ static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug)
regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs);
regset->base = qm->io_base;
- tmp = debugfs_create_regset32("regs", 0444, debug->debug_root, regset);
- if (!tmp)
- return -ENOENT;
-
+ debugfs_create_regset32("regs", 0444, debug->debug_root, regset);
return 0;
}
@@ -609,15 +603,14 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug)
struct device *dev = &qm->pdev->dev;
char buf[HPRE_DBGFS_VAL_MAX_LEN];
struct debugfs_regset32 *regset;
- struct dentry *tmp_d, *tmp;
+ struct dentry *tmp_d;
int i, ret;
for (i = 0; i < HPRE_CLUSTERS_NUM; i++) {
- sprintf(buf, "cluster%d", i);
-
+ ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i);
+ if (ret < 0)
+ return -EINVAL;
tmp_d = debugfs_create_dir(buf, debug->debug_root);
- if (!tmp_d)
- return -ENOENT;
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
@@ -627,9 +620,7 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug)
regset->nregs = ARRAY_SIZE(hpre_cluster_dfx_regs);
regset->base = qm->io_base + hpre_cluster_offsets[i];
- tmp = debugfs_create_regset32("regs", 0444, tmp_d, regset);
- if (!tmp)
- return -ENOENT;
+ debugfs_create_regset32("regs", 0444, tmp_d, regset);
ret = hpre_create_debugfs_file(debug, tmp_d, HPRE_CLUSTER_CTRL,
i + HPRE_CLUSTER_CTRL);
if (ret)
@@ -668,9 +659,6 @@ static int hpre_debugfs_init(struct hpre *hpre)
int ret;
dir = debugfs_create_dir(dev_name(dev), hpre_debugfs_root);
- if (!dir)
- return -ENOENT;
-
qm->debug.debug_root = dir;
ret = hisi_qm_debug_init(qm);
@@ -1014,8 +1002,6 @@ static void hpre_register_debugfs(void)
return;
hpre_debugfs_root = debugfs_create_dir(hpre_name, NULL);
- if (IS_ERR_OR_NULL(hpre_debugfs_root))
- hpre_debugfs_root = NULL;
}
static void hpre_unregister_debugfs(void)
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 26754d0570ba..13e2d8d7be94 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -9,10 +9,12 @@
#include "../qm.h"
#include "sec_crypto.h"
-/* Cipher resource per hardware SEC queue */
-struct sec_cipher_res {
+/* Algorithm resource per hardware SEC queue */
+struct sec_alg_res {
u8 *c_ivin;
dma_addr_t c_ivin_dma;
+ u8 *out_mac;
+ dma_addr_t out_mac_dma;
};
/* Cipher request of SEC private */
@@ -21,33 +23,35 @@ struct sec_cipher_req {
dma_addr_t c_in_dma;
struct hisi_acc_hw_sgl *c_out;
dma_addr_t c_out_dma;
- u8 *c_ivin;
- dma_addr_t c_ivin_dma;
struct skcipher_request *sk_req;
u32 c_len;
bool encrypt;
};
+struct sec_aead_req {
+ u8 *out_mac;
+ dma_addr_t out_mac_dma;
+ struct aead_request *aead_req;
+};
+
/* SEC request of Crypto */
struct sec_req {
struct sec_sqe sec_sqe;
struct sec_ctx *ctx;
struct sec_qp_ctx *qp_ctx;
- /* Cipher supported only at present */
struct sec_cipher_req c_req;
+ struct sec_aead_req aead_req;
+
int err_type;
int req_id;
/* Status of the SEC request */
- int fake_busy;
+ bool fake_busy;
};
/**
* struct sec_req_op - Operations for SEC request
- * @get_res: Get resources for TFM on the SEC device
- * @resource_alloc: Allocate resources for queue context on the SEC device
- * @resource_free: Free resources for queue context on the SEC device
* @buf_map: DMA map the SGL buffers of the request
* @buf_unmap: DMA unmap the SGL buffers of the request
* @bd_fill: Fill the SEC queue BD
@@ -56,18 +60,25 @@ struct sec_req {
* @process: Main processing logic of Skcipher
*/
struct sec_req_op {
- int (*get_res)(struct sec_ctx *ctx, struct sec_req *req);
- int (*resource_alloc)(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx);
- void (*resource_free)(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx);
int (*buf_map)(struct sec_ctx *ctx, struct sec_req *req);
void (*buf_unmap)(struct sec_ctx *ctx, struct sec_req *req);
void (*do_transfer)(struct sec_ctx *ctx, struct sec_req *req);
int (*bd_fill)(struct sec_ctx *ctx, struct sec_req *req);
int (*bd_send)(struct sec_ctx *ctx, struct sec_req *req);
- void (*callback)(struct sec_ctx *ctx, struct sec_req *req);
+ void (*callback)(struct sec_ctx *ctx, struct sec_req *req, int err);
int (*process)(struct sec_ctx *ctx, struct sec_req *req);
};
+/* SEC auth context */
+struct sec_auth_ctx {
+ dma_addr_t a_key_dma;
+ u8 *a_key;
+ u8 a_key_len;
+ u8 mac_len;
+ u8 a_alg;
+ struct crypto_shash *hash_tfm;
+};
+
/* SEC cipher context which cipher's relatives */
struct sec_cipher_ctx {
u8 *c_key;
@@ -83,9 +94,9 @@ struct sec_cipher_ctx {
/* SEC queue context which defines queue's relatives */
struct sec_qp_ctx {
struct hisi_qp *qp;
- struct sec_req **req_list;
+ struct sec_req *req_list[QM_Q_DEPTH];
struct idr req_idr;
- void *alg_meta_data;
+ struct sec_alg_res res[QM_Q_DEPTH];
struct sec_ctx *ctx;
struct mutex req_lock;
struct hisi_acc_sgl_pool *c_in_pool;
@@ -93,6 +104,11 @@ struct sec_qp_ctx {
atomic_t pending_reqs;
};
+enum sec_alg_type {
+ SEC_SKCIPHER,
+ SEC_AEAD
+};
+
/* SEC Crypto TFM context which defines queue and cipher .etc relatives */
struct sec_ctx {
struct sec_qp_ctx *qp_ctx;
@@ -110,7 +126,10 @@ struct sec_ctx {
/* Currrent cyclic index to select a queue for decipher */
atomic_t dec_qcyclic;
+
+ enum sec_alg_type alg_type;
struct sec_cipher_ctx c_ctx;
+ struct sec_auth_ctx a_ctx;
};
enum sec_endian {
@@ -132,8 +151,8 @@ struct sec_debug_file {
};
struct sec_dfx {
- u64 send_cnt;
- u64 recv_cnt;
+ atomic64_t send_cnt;
+ atomic64_t recv_cnt;
};
struct sec_debug {
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 62b04e19067c..a2cfcc9ccd94 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -3,7 +3,11 @@
#include <crypto/aes.h>
#include <crypto/algapi.h>
+#include <crypto/authenc.h>
#include <crypto/des.h>
+#include <crypto/hash.h>
+#include <crypto/internal/aead.h>
+#include <crypto/sha.h>
#include <crypto/skcipher.h>
#include <crypto/xts.h>
#include <linux/crypto.h>
@@ -27,6 +31,10 @@
#define SEC_SRC_SGL_OFFSET 7
#define SEC_CKEY_OFFSET 9
#define SEC_CMODE_OFFSET 12
+#define SEC_AKEY_OFFSET 5
+#define SEC_AEAD_ALG_OFFSET 11
+#define SEC_AUTH_OFFSET 6
+
#define SEC_FLAG_OFFSET 7
#define SEC_FLAG_MASK 0x0780
#define SEC_TYPE_MASK 0x0F
@@ -35,12 +43,19 @@
#define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH)
#define SEC_SGL_SGE_NR 128
#define SEC_CTX_DEV(ctx) (&(ctx)->sec->qm.pdev->dev)
+#define SEC_CIPHER_AUTH 0xfe
+#define SEC_AUTH_CIPHER 0x1
+#define SEC_MAX_MAC_LEN 64
+#define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH)
+#define SEC_SQE_LEN_RATE 4
+#define SEC_SQE_CFLAG 2
+#define SEC_SQE_AEAD_FLAG 3
+#define SEC_SQE_DONE 0x1
-static DEFINE_MUTEX(sec_algs_lock);
-static unsigned int sec_active_devs;
+static atomic_t sec_active_devs;
/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
-static inline int sec_get_queue_id(struct sec_ctx *ctx, struct sec_req *req)
+static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
{
if (req->c_req.encrypt)
return (u32)atomic_inc_return(&ctx->enc_qcyclic) %
@@ -50,7 +65,7 @@ static inline int sec_get_queue_id(struct sec_ctx *ctx, struct sec_req *req)
ctx->hlf_q_num;
}
-static inline void sec_put_queue_id(struct sec_ctx *ctx, struct sec_req *req)
+static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req)
{
if (req->c_req.encrypt)
atomic_dec(&ctx->enc_qcyclic);
@@ -67,7 +82,7 @@ static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL,
0, QM_Q_DEPTH, GFP_ATOMIC);
mutex_unlock(&qp_ctx->req_lock);
- if (req_id < 0) {
+ if (unlikely(req_id < 0)) {
dev_err(SEC_CTX_DEV(req->ctx), "alloc req id fail!\n");
return req_id;
}
@@ -82,7 +97,7 @@ static void sec_free_req_id(struct sec_req *req)
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
int req_id = req->req_id;
- if (req_id < 0 || req_id >= QM_Q_DEPTH) {
+ if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) {
dev_err(SEC_CTX_DEV(req->ctx), "free request id invalid!\n");
return;
}
@@ -95,36 +110,66 @@ static void sec_free_req_id(struct sec_req *req)
mutex_unlock(&qp_ctx->req_lock);
}
+static int sec_aead_verify(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
+{
+ struct aead_request *aead_req = req->aead_req.aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
+ u8 *mac_out = qp_ctx->res[req->req_id].out_mac;
+ size_t authsize = crypto_aead_authsize(tfm);
+ u8 *mac = mac_out + SEC_MAX_MAC_LEN;
+ struct scatterlist *sgl = aead_req->src;
+ size_t sz;
+
+ sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac, authsize,
+ aead_req->cryptlen + aead_req->assoclen -
+ authsize);
+ if (unlikely(sz != authsize || memcmp(mac_out, mac, sz))) {
+ dev_err(SEC_CTX_DEV(req->ctx), "aead verify failure!\n");
+ return -EBADMSG;
+ }
+
+ return 0;
+}
+
static void sec_req_cb(struct hisi_qp *qp, void *resp)
{
struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
struct sec_sqe *bd = resp;
+ struct sec_ctx *ctx;
+ struct sec_req *req;
u16 done, flag;
+ int err = 0;
u8 type;
- struct sec_req *req;
type = bd->type_cipher_auth & SEC_TYPE_MASK;
- if (type == SEC_BD_TYPE2) {
- req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
- req->err_type = bd->type2.error_type;
-
- done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
- flag = (le16_to_cpu(bd->type2.done_flag) &
- SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
- if (req->err_type || done != 0x1 || flag != 0x2)
- dev_err(SEC_CTX_DEV(req->ctx),
- "err_type[%d],done[%d],flag[%d]\n",
- req->err_type, done, flag);
- } else {
+ if (unlikely(type != SEC_BD_TYPE2)) {
pr_err("err bd type [%d]\n", type);
return;
}
- __sync_add_and_fetch(&req->ctx->sec->debug.dfx.recv_cnt, 1);
+ req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
+ req->err_type = bd->type2.error_type;
+ ctx = req->ctx;
+ done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
+ flag = (le16_to_cpu(bd->type2.done_flag) &
+ SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
+ if (unlikely(req->err_type || done != SEC_SQE_DONE ||
+ (ctx->alg_type == SEC_SKCIPHER && flag != SEC_SQE_CFLAG) ||
+ (ctx->alg_type == SEC_AEAD && flag != SEC_SQE_AEAD_FLAG))) {
+ dev_err(SEC_CTX_DEV(ctx),
+ "err_type[%d],done[%d],flag[%d]\n",
+ req->err_type, done, flag);
+ err = -EIO;
+ }
+
+ if (ctx->alg_type == SEC_AEAD && !req->c_req.encrypt)
+ err = sec_aead_verify(req, qp_ctx);
- req->ctx->req_op->buf_unmap(req->ctx, req);
+ atomic64_inc(&ctx->sec->debug.dfx.recv_cnt);
- req->ctx->req_op->callback(req->ctx, req);
+ ctx->req_op->buf_unmap(ctx, req);
+
+ ctx->req_op->callback(ctx, req, err);
}
static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
@@ -135,9 +180,9 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
mutex_lock(&qp_ctx->req_lock);
ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
mutex_unlock(&qp_ctx->req_lock);
- __sync_add_and_fetch(&ctx->sec->debug.dfx.send_cnt, 1);
+ atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
- if (ret == -EBUSY)
+ if (unlikely(ret == -EBUSY))
return -ENOBUFS;
if (!ret) {
@@ -150,6 +195,91 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
return ret;
}
+/* Get DMA memory resources */
+static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
+{
+ int i;
+
+ res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
+ &res->c_ivin_dma, GFP_KERNEL);
+ if (!res->c_ivin)
+ return -ENOMEM;
+
+ for (i = 1; i < QM_Q_DEPTH; i++) {
+ res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
+ res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
+ }
+
+ return 0;
+}
+
+static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res)
+{
+ if (res->c_ivin)
+ dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
+ res->c_ivin, res->c_ivin_dma);
+}
+
+static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)
+{
+ int i;
+
+ res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
+ &res->out_mac_dma, GFP_KERNEL);
+ if (!res->out_mac)
+ return -ENOMEM;
+
+ for (i = 1; i < QM_Q_DEPTH; i++) {
+ res[i].out_mac_dma = res->out_mac_dma +
+ i * (SEC_MAX_MAC_LEN << 1);
+ res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1);
+ }
+
+ return 0;
+}
+
+static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res)
+{
+ if (res->out_mac)
+ dma_free_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
+ res->out_mac, res->out_mac_dma);
+}
+
+static int sec_alg_resource_alloc(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
+{
+ struct device *dev = SEC_CTX_DEV(ctx);
+ struct sec_alg_res *res = qp_ctx->res;
+ int ret;
+
+ ret = sec_alloc_civ_resource(dev, res);
+ if (ret)
+ return ret;
+
+ if (ctx->alg_type == SEC_AEAD) {
+ ret = sec_alloc_mac_resource(dev, res);
+ if (ret)
+ goto get_fail;
+ }
+
+ return 0;
+get_fail:
+ sec_free_civ_resource(dev, res);
+
+ return ret;
+}
+
+static void sec_alg_resource_free(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
+{
+ struct device *dev = SEC_CTX_DEV(ctx);
+
+ sec_free_civ_resource(dev, qp_ctx->res);
+
+ if (ctx->alg_type == SEC_AEAD)
+ sec_free_mac_resource(dev, qp_ctx->res);
+}
+
static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
int qp_ctx_id, int alg_type)
{
@@ -173,15 +303,11 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
atomic_set(&qp_ctx->pending_reqs, 0);
idr_init(&qp_ctx->req_idr);
- qp_ctx->req_list = kcalloc(QM_Q_DEPTH, sizeof(void *), GFP_ATOMIC);
- if (!qp_ctx->req_list)
- goto err_destroy_idr;
-
qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
SEC_SGL_SGE_NR);
if (IS_ERR(qp_ctx->c_in_pool)) {
dev_err(dev, "fail to create sgl pool for input!\n");
- goto err_free_req_list;
+ goto err_destroy_idr;
}
qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
@@ -191,7 +317,7 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
goto err_free_c_in_pool;
}
- ret = ctx->req_op->resource_alloc(ctx, qp_ctx);
+ ret = sec_alg_resource_alloc(ctx, qp_ctx);
if (ret)
goto err_free_c_out_pool;
@@ -202,13 +328,11 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
return 0;
err_queue_free:
- ctx->req_op->resource_free(ctx, qp_ctx);
+ sec_alg_resource_free(ctx, qp_ctx);
err_free_c_out_pool:
hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
err_free_c_in_pool:
hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
-err_free_req_list:
- kfree(qp_ctx->req_list);
err_destroy_idr:
idr_destroy(&qp_ctx->req_idr);
hisi_qm_release_qp(qp);
@@ -222,66 +346,42 @@ static void sec_release_qp_ctx(struct sec_ctx *ctx,
struct device *dev = SEC_CTX_DEV(ctx);
hisi_qm_stop_qp(qp_ctx->qp);
- ctx->req_op->resource_free(ctx, qp_ctx);
+ sec_alg_resource_free(ctx, qp_ctx);
hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
idr_destroy(&qp_ctx->req_idr);
- kfree(qp_ctx->req_list);
hisi_qm_release_qp(qp_ctx->qp);
}
-static int sec_skcipher_init(struct crypto_skcipher *tfm)
+static int sec_ctx_base_init(struct sec_ctx *ctx)
{
- struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct sec_cipher_ctx *c_ctx;
struct sec_dev *sec;
- struct device *dev;
- struct hisi_qm *qm;
int i, ret;
- crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
-
sec = sec_find_device(cpu_to_node(smp_processor_id()));
if (!sec) {
- pr_err("find no Hisilicon SEC device!\n");
+ pr_err("Can not find proper Hisilicon SEC device!\n");
return -ENODEV;
}
ctx->sec = sec;
- qm = &sec->qm;
- dev = &qm->pdev->dev;
- ctx->hlf_q_num = sec->ctx_q_num >> 0x1;
+ ctx->hlf_q_num = sec->ctx_q_num >> 1;
/* Half of queue depth is taken as fake requests limit in the queue. */
- ctx->fake_req_limit = QM_Q_DEPTH >> 0x1;
+ ctx->fake_req_limit = QM_Q_DEPTH >> 1;
ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
GFP_KERNEL);
if (!ctx->qp_ctx)
return -ENOMEM;
for (i = 0; i < sec->ctx_q_num; i++) {
- ret = sec_create_qp_ctx(qm, ctx, i, 0);
+ ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0);
if (ret)
goto err_sec_release_qp_ctx;
}
- c_ctx = &ctx->c_ctx;
- c_ctx->ivsize = crypto_skcipher_ivsize(tfm);
- if (c_ctx->ivsize > SEC_IV_SIZE) {
- dev_err(dev, "get error iv size!\n");
- ret = -EINVAL;
- goto err_sec_release_qp_ctx;
- }
- c_ctx->c_key = dma_alloc_coherent(dev, SEC_MAX_KEY_SIZE,
- &c_ctx->c_key_dma, GFP_KERNEL);
- if (!c_ctx->c_key) {
- ret = -ENOMEM;
- goto err_sec_release_qp_ctx;
- }
-
return 0;
-
err_sec_release_qp_ctx:
for (i = i - 1; i >= 0; i--)
sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
@@ -290,17 +390,9 @@ err_sec_release_qp_ctx:
return ret;
}
-static void sec_skcipher_exit(struct crypto_skcipher *tfm)
+static void sec_ctx_base_uninit(struct sec_ctx *ctx)
{
- struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
- int i = 0;
-
- if (c_ctx->c_key) {
- dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
- c_ctx->c_key, c_ctx->c_key_dma);
- c_ctx->c_key = NULL;
- }
+ int i;
for (i = 0; i < ctx->sec->ctx_q_num; i++)
sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
@@ -308,6 +400,85 @@ static void sec_skcipher_exit(struct crypto_skcipher *tfm)
kfree(ctx->qp_ctx);
}
+static int sec_cipher_init(struct sec_ctx *ctx)
+{
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+
+ c_ctx->c_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ &c_ctx->c_key_dma, GFP_KERNEL);
+ if (!c_ctx->c_key)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void sec_cipher_uninit(struct sec_ctx *ctx)
+{
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+
+ memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
+ dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ c_ctx->c_key, c_ctx->c_key_dma);
+}
+
+static int sec_auth_init(struct sec_ctx *ctx)
+{
+ struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
+
+ a_ctx->a_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ &a_ctx->a_key_dma, GFP_KERNEL);
+ if (!a_ctx->a_key)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void sec_auth_uninit(struct sec_ctx *ctx)
+{
+ struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
+
+ memzero_explicit(a_ctx->a_key, SEC_MAX_KEY_SIZE);
+ dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ a_ctx->a_key, a_ctx->a_key_dma);
+}
+
+static int sec_skcipher_init(struct crypto_skcipher *tfm)
+{
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret;
+
+ ctx = crypto_skcipher_ctx(tfm);
+ ctx->alg_type = SEC_SKCIPHER;
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
+ ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
+ if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
+ dev_err(SEC_CTX_DEV(ctx), "get error skcipher iv size!\n");
+ return -EINVAL;
+ }
+
+ ret = sec_ctx_base_init(ctx);
+ if (ret)
+ return ret;
+
+ ret = sec_cipher_init(ctx);
+ if (ret)
+ goto err_cipher_init;
+
+ return 0;
+err_cipher_init:
+ sec_ctx_base_uninit(ctx);
+
+ return ret;
+}
+
+static void sec_skcipher_uninit(struct crypto_skcipher *tfm)
+{
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ sec_cipher_uninit(ctx);
+ sec_ctx_base_uninit(ctx);
+}
+
static int sec_skcipher_3des_setkey(struct sec_cipher_ctx *c_ctx,
const u32 keylen,
const enum sec_cmode c_mode)
@@ -420,62 +591,8 @@ GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
-static int sec_skcipher_get_res(struct sec_ctx *ctx,
- struct sec_req *req)
-{
- struct sec_qp_ctx *qp_ctx = req->qp_ctx;
- struct sec_cipher_res *c_res = qp_ctx->alg_meta_data;
- struct sec_cipher_req *c_req = &req->c_req;
- int req_id = req->req_id;
-
- c_req->c_ivin = c_res[req_id].c_ivin;
- c_req->c_ivin_dma = c_res[req_id].c_ivin_dma;
-
- return 0;
-}
-
-static int sec_skcipher_resource_alloc(struct sec_ctx *ctx,
- struct sec_qp_ctx *qp_ctx)
-{
- struct device *dev = SEC_CTX_DEV(ctx);
- struct sec_cipher_res *res;
- int i;
-
- res = kcalloc(QM_Q_DEPTH, sizeof(struct sec_cipher_res), GFP_KERNEL);
- if (!res)
- return -ENOMEM;
-
- res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
- &res->c_ivin_dma, GFP_KERNEL);
- if (!res->c_ivin) {
- kfree(res);
- return -ENOMEM;
- }
-
- for (i = 1; i < QM_Q_DEPTH; i++) {
- res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
- res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
- }
- qp_ctx->alg_meta_data = res;
-
- return 0;
-}
-
-static void sec_skcipher_resource_free(struct sec_ctx *ctx,
- struct sec_qp_ctx *qp_ctx)
-{
- struct sec_cipher_res *res = qp_ctx->alg_meta_data;
- struct device *dev = SEC_CTX_DEV(ctx);
-
- if (!res)
- return;
-
- dma_free_coherent(dev, SEC_TOTAL_IV_SZ, res->c_ivin, res->c_ivin_dma);
- kfree(res);
-}
-
-static int sec_skcipher_map(struct device *dev, struct sec_req *req,
- struct scatterlist *src, struct scatterlist *dst)
+static int sec_cipher_map(struct device *dev, struct sec_req *req,
+ struct scatterlist *src, struct scatterlist *dst)
{
struct sec_cipher_req *c_req = &req->c_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
@@ -509,12 +626,20 @@ static int sec_skcipher_map(struct device *dev, struct sec_req *req,
return 0;
}
+static void sec_cipher_unmap(struct device *dev, struct sec_cipher_req *req,
+ struct scatterlist *src, struct scatterlist *dst)
+{
+ if (dst != src)
+ hisi_acc_sg_buf_unmap(dev, src, req->c_in);
+
+ hisi_acc_sg_buf_unmap(dev, dst, req->c_out);
+}
+
static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
{
- struct sec_cipher_req *c_req = &req->c_req;
+ struct skcipher_request *sq = req->c_req.sk_req;
- return sec_skcipher_map(SEC_CTX_DEV(ctx), req,
- c_req->sk_req->src, c_req->sk_req->dst);
+ return sec_cipher_map(SEC_CTX_DEV(ctx), req, sq->src, sq->dst);
}
static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
@@ -523,10 +648,127 @@ static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
struct sec_cipher_req *c_req = &req->c_req;
struct skcipher_request *sk_req = c_req->sk_req;
- if (sk_req->dst != sk_req->src)
- hisi_acc_sg_buf_unmap(dev, sk_req->src, c_req->c_in);
+ sec_cipher_unmap(dev, c_req, sk_req->src, sk_req->dst);
+}
+
+static int sec_aead_aes_set_key(struct sec_cipher_ctx *c_ctx,
+ struct crypto_authenc_keys *keys)
+{
+ switch (keys->enckeylen) {
+ case AES_KEYSIZE_128:
+ c_ctx->c_key_len = SEC_CKEY_128BIT;
+ break;
+ case AES_KEYSIZE_192:
+ c_ctx->c_key_len = SEC_CKEY_192BIT;
+ break;
+ case AES_KEYSIZE_256:
+ c_ctx->c_key_len = SEC_CKEY_256BIT;
+ break;
+ default:
+ pr_err("hisi_sec2: aead aes key error!\n");
+ return -EINVAL;
+ }
+ memcpy(c_ctx->c_key, keys->enckey, keys->enckeylen);
+
+ return 0;
+}
+
+static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
+ struct crypto_authenc_keys *keys)
+{
+ struct crypto_shash *hash_tfm = ctx->hash_tfm;
+ SHASH_DESC_ON_STACK(shash, hash_tfm);
+ int blocksize, ret;
- hisi_acc_sg_buf_unmap(dev, sk_req->dst, c_req->c_out);
+ if (!keys->authkeylen) {
+ pr_err("hisi_sec2: aead auth key error!\n");
+ return -EINVAL;
+ }
+
+ blocksize = crypto_shash_blocksize(hash_tfm);
+ if (keys->authkeylen > blocksize) {
+ ret = crypto_shash_digest(shash, keys->authkey,
+ keys->authkeylen, ctx->a_key);
+ if (ret) {
+ pr_err("hisi_sec2: aead auth digest error!\n");
+ return -EINVAL;
+ }
+ ctx->a_key_len = blocksize;
+ } else {
+ memcpy(ctx->a_key, keys->authkey, keys->authkeylen);
+ ctx->a_key_len = keys->authkeylen;
+ }
+
+ return 0;
+}
+
+static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ const u32 keylen, const enum sec_hash_alg a_alg,
+ const enum sec_calg c_alg,
+ const enum sec_mac_len mac_len,
+ const enum sec_cmode c_mode)
+{
+ struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ struct crypto_authenc_keys keys;
+ int ret;
+
+ ctx->a_ctx.a_alg = a_alg;
+ ctx->c_ctx.c_alg = c_alg;
+ ctx->a_ctx.mac_len = mac_len;
+ c_ctx->c_mode = c_mode;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen))
+ goto bad_key;
+
+ ret = sec_aead_aes_set_key(c_ctx, &keys);
+ if (ret) {
+ dev_err(SEC_CTX_DEV(ctx), "set sec cipher key err!\n");
+ goto bad_key;
+ }
+
+ ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys);
+ if (ret) {
+ dev_err(SEC_CTX_DEV(ctx), "set sec auth key err!\n");
+ goto bad_key;
+ }
+
+ return 0;
+bad_key:
+ memzero_explicit(&keys, sizeof(struct crypto_authenc_keys));
+
+ return -EINVAL;
+}
+
+
+#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \
+static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \
+ u32 keylen) \
+{ \
+ return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\
+}
+
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1,
+ SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256,
+ SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512,
+ SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC)
+
+static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct aead_request *aq = req->aead_req.aead_req;
+
+ return sec_cipher_map(SEC_CTX_DEV(ctx), req, aq->src, aq->dst);
+}
+
+static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct device *dev = SEC_CTX_DEV(ctx);
+ struct sec_cipher_req *cq = &req->c_req;
+ struct aead_request *aq = req->aead_req.aead_req;
+
+ sec_cipher_unmap(dev, cq, aq->src, aq->dst);
}
static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
@@ -534,13 +776,13 @@ static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
int ret;
ret = ctx->req_op->buf_map(ctx, req);
- if (ret)
+ if (unlikely(ret))
return ret;
ctx->req_op->do_transfer(ctx, req);
ret = ctx->req_op->bd_fill(ctx, req);
- if (ret)
+ if (unlikely(ret))
goto unmap_req_buf;
return ret;
@@ -559,10 +801,9 @@ static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
{
struct skcipher_request *sk_req = req->c_req.sk_req;
- struct sec_cipher_req *c_req = &req->c_req;
+ u8 *c_ivin = req->qp_ctx->res[req->req_id].c_ivin;
- c_req->c_len = sk_req->cryptlen;
- memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
+ memcpy(c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
}
static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
@@ -570,14 +811,15 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
struct sec_cipher_req *c_req = &req->c_req;
struct sec_sqe *sec_sqe = &req->sec_sqe;
- u8 de = 0;
u8 scene, sa_type, da_type;
u8 bd_type, cipher;
+ u8 de = 0;
memset(sec_sqe, 0, sizeof(struct sec_sqe));
sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
- sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
+ sec_sqe->type2.c_ivin_addr =
+ cpu_to_le64(req->qp_ctx->res[req->req_id].c_ivin_dma);
sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma);
sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
@@ -611,25 +853,37 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
return 0;
}
-static void sec_update_iv(struct sec_req *req)
+static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
{
+ struct aead_request *aead_req = req->aead_req.aead_req;
struct skcipher_request *sk_req = req->c_req.sk_req;
u32 iv_size = req->ctx->c_ctx.ivsize;
struct scatterlist *sgl;
+ unsigned int cryptlen;
size_t sz;
+ u8 *iv;
if (req->c_req.encrypt)
- sgl = sk_req->dst;
+ sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst;
else
- sgl = sk_req->src;
+ sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src;
- sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), sk_req->iv,
- iv_size, sk_req->cryptlen - iv_size);
- if (sz != iv_size)
+ if (alg_type == SEC_SKCIPHER) {
+ iv = sk_req->iv;
+ cryptlen = sk_req->cryptlen;
+ } else {
+ iv = aead_req->iv;
+ cryptlen = aead_req->cryptlen;
+ }
+
+ sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,
+ cryptlen - iv_size);
+ if (unlikely(sz != iv_size))
dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n");
}
-static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req)
+static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
+ int err)
{
struct skcipher_request *sk_req = req->c_req.sk_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
@@ -638,13 +892,109 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req)
sec_free_req_id(req);
/* IV output at encrypto of CBC mode */
- if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
- sec_update_iv(req);
+ if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
+ sec_update_iv(req, SEC_SKCIPHER);
- if (__sync_bool_compare_and_swap(&req->fake_busy, 1, 0))
+ if (req->fake_busy)
sk_req->base.complete(&sk_req->base, -EINPROGRESS);
- sk_req->base.complete(&sk_req->base, req->err_type);
+ sk_req->base.complete(&sk_req->base, err);
+}
+
+static void sec_aead_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct aead_request *aead_req = req->aead_req.aead_req;
+ u8 *c_ivin = req->qp_ctx->res[req->req_id].c_ivin;
+
+ memcpy(c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
+}
+
+static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
+ struct sec_req *req, struct sec_sqe *sec_sqe)
+{
+ struct sec_aead_req *a_req = &req->aead_req;
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct aead_request *aq = a_req->aead_req;
+
+ sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
+
+ sec_sqe->type2.mac_key_alg =
+ cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE);
+
+ sec_sqe->type2.mac_key_alg |=
+ cpu_to_le32((u32)((ctx->a_key_len) /
+ SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET);
+
+ sec_sqe->type2.mac_key_alg |=
+ cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET);
+
+ sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET;
+
+ if (dir)
+ sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
+ else
+ sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
+
+ sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen);
+
+ sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
+
+ sec_sqe->type2.mac_addr =
+ cpu_to_le64(req->qp_ctx->res[req->req_id].out_mac_dma);
+}
+
+static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
+ struct sec_sqe *sec_sqe = &req->sec_sqe;
+ int ret;
+
+ ret = sec_skcipher_bd_fill(ctx, req);
+ if (unlikely(ret)) {
+ dev_err(SEC_CTX_DEV(ctx), "skcipher bd fill is error!\n");
+ return ret;
+ }
+
+ sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe);
+
+ return 0;
+}
+
+static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
+{
+ struct aead_request *a_req = req->aead_req.aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
+ struct sec_cipher_req *c_req = &req->c_req;
+ size_t authsize = crypto_aead_authsize(tfm);
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ size_t sz;
+
+ atomic_dec(&qp_ctx->pending_reqs);
+
+ if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt)
+ sec_update_iv(req, SEC_AEAD);
+
+ /* Copy output mac */
+ if (!err && c_req->encrypt) {
+ struct scatterlist *sgl = a_req->dst;
+
+ sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl),
+ qp_ctx->res[req->req_id].out_mac,
+ authsize, a_req->cryptlen +
+ a_req->assoclen);
+
+ if (unlikely(sz != authsize)) {
+ dev_err(SEC_CTX_DEV(req->ctx), "copy out mac err!\n");
+ err = -EINVAL;
+ }
+ }
+
+ sec_free_req_id(req);
+
+ if (req->fake_busy)
+ a_req->base.complete(&a_req->base, -EINPROGRESS);
+
+ a_req->base.complete(&a_req->base, err);
}
static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
@@ -653,37 +1003,30 @@ static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
atomic_dec(&qp_ctx->pending_reqs);
sec_free_req_id(req);
- sec_put_queue_id(ctx, req);
+ sec_free_queue_id(ctx, req);
}
static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
{
struct sec_qp_ctx *qp_ctx;
- int issue_id, ret;
+ int queue_id;
/* To load balance */
- issue_id = sec_get_queue_id(ctx, req);
- qp_ctx = &ctx->qp_ctx[issue_id];
+ queue_id = sec_alloc_queue_id(ctx, req);
+ qp_ctx = &ctx->qp_ctx[queue_id];
req->req_id = sec_alloc_req_id(req, qp_ctx);
- if (req->req_id < 0) {
- sec_put_queue_id(ctx, req);
+ if (unlikely(req->req_id < 0)) {
+ sec_free_queue_id(ctx, req);
return req->req_id;
}
if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs))
- req->fake_busy = 1;
+ req->fake_busy = true;
else
- req->fake_busy = 0;
-
- ret = ctx->req_op->get_res(ctx, req);
- if (ret) {
- atomic_dec(&qp_ctx->pending_reqs);
- sec_request_uninit(ctx, req);
- dev_err(SEC_CTX_DEV(ctx), "get resources failed!\n");
- }
+ req->fake_busy = false;
- return ret;
+ return 0;
}
static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
@@ -691,20 +1034,20 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
int ret;
ret = sec_request_init(ctx, req);
- if (ret)
+ if (unlikely(ret))
return ret;
ret = sec_request_transfer(ctx, req);
- if (ret)
+ if (unlikely(ret))
goto err_uninit_req;
/* Output IV as decrypto */
if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt)
- sec_update_iv(req);
+ sec_update_iv(req, ctx->alg_type);
ret = ctx->req_op->bd_send(ctx, req);
- if (ret != -EBUSY && ret != -EINPROGRESS) {
- dev_err(SEC_CTX_DEV(ctx), "send sec request failed!\n");
+ if (unlikely(ret != -EBUSY && ret != -EINPROGRESS)) {
+ dev_err_ratelimited(SEC_CTX_DEV(ctx), "send sec request failed!\n");
goto err_send_req;
}
@@ -712,9 +1055,16 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
err_send_req:
/* As failing, restore the IV from user */
- if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt)
- memcpy(req->c_req.sk_req->iv, req->c_req.c_ivin,
- ctx->c_ctx.ivsize);
+ if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
+ if (ctx->alg_type == SEC_SKCIPHER)
+ memcpy(req->c_req.sk_req->iv,
+ req->qp_ctx->res[req->req_id].c_ivin,
+ ctx->c_ctx.ivsize);
+ else
+ memcpy(req->aead_req.aead_req->iv,
+ req->qp_ctx->res[req->req_id].c_ivin,
+ ctx->c_ctx.ivsize);
+ }
sec_request_untransfer(ctx, req);
err_uninit_req:
@@ -723,10 +1073,7 @@ err_uninit_req:
return ret;
}
-static struct sec_req_op sec_req_ops_tbl = {
- .get_res = sec_skcipher_get_res,
- .resource_alloc = sec_skcipher_resource_alloc,
- .resource_free = sec_skcipher_resource_free,
+static const struct sec_req_op sec_skcipher_req_ops = {
.buf_map = sec_skcipher_sgl_map,
.buf_unmap = sec_skcipher_sgl_unmap,
.do_transfer = sec_skcipher_copy_iv,
@@ -736,39 +1083,139 @@ static struct sec_req_op sec_req_ops_tbl = {
.process = sec_process,
};
+static const struct sec_req_op sec_aead_req_ops = {
+ .buf_map = sec_aead_sgl_map,
+ .buf_unmap = sec_aead_sgl_unmap,
+ .do_transfer = sec_aead_copy_iv,
+ .bd_fill = sec_aead_bd_fill,
+ .bd_send = sec_bd_send,
+ .callback = sec_aead_callback,
+ .process = sec_process,
+};
+
static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
{
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- ctx->req_op = &sec_req_ops_tbl;
+ ctx->req_op = &sec_skcipher_req_ops;
return sec_skcipher_init(tfm);
}
static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
{
- sec_skcipher_exit(tfm);
+ sec_skcipher_uninit(tfm);
}
-static int sec_skcipher_param_check(struct sec_ctx *ctx,
- struct skcipher_request *sk_req)
+static int sec_aead_init(struct crypto_aead *tfm)
{
- u8 c_alg = ctx->c_ctx.c_alg;
+ struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+ int ret;
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct sec_req));
+ ctx->alg_type = SEC_AEAD;
+ ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);
+ if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
+ dev_err(SEC_CTX_DEV(ctx), "get error aead iv size!\n");
+ return -EINVAL;
+ }
+
+ ctx->req_op = &sec_aead_req_ops;
+ ret = sec_ctx_base_init(ctx);
+ if (ret)
+ return ret;
+
+ ret = sec_auth_init(ctx);
+ if (ret)
+ goto err_auth_init;
+
+ ret = sec_cipher_init(ctx);
+ if (ret)
+ goto err_cipher_init;
+
+ return ret;
+
+err_cipher_init:
+ sec_auth_uninit(ctx);
+err_auth_init:
+ sec_ctx_base_uninit(ctx);
+
+ return ret;
+}
+
+static void sec_aead_exit(struct crypto_aead *tfm)
+{
+ struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+
+ sec_cipher_uninit(ctx);
+ sec_auth_uninit(ctx);
+ sec_ctx_base_uninit(ctx);
+}
+
+static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
+{
+ struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+ struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
+ int ret;
+
+ ret = sec_aead_init(tfm);
+ if (ret) {
+ pr_err("hisi_sec2: aead init error!\n");
+ return ret;
+ }
+
+ auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
+ if (IS_ERR(auth_ctx->hash_tfm)) {
+ dev_err(SEC_CTX_DEV(ctx), "aead alloc shash error!\n");
+ sec_aead_exit(tfm);
+ return PTR_ERR(auth_ctx->hash_tfm);
+ }
+
+ return 0;
+}
+
+static void sec_aead_ctx_exit(struct crypto_aead *tfm)
+{
+ struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+
+ crypto_free_shash(ctx->a_ctx.hash_tfm);
+ sec_aead_exit(tfm);
+}
+
+static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm)
+{
+ return sec_aead_ctx_init(tfm, "sha1");
+}
+
+static int sec_aead_sha256_ctx_init(struct crypto_aead *tfm)
+{
+ return sec_aead_ctx_init(tfm, "sha256");
+}
+
+static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
+{
+ return sec_aead_ctx_init(tfm, "sha512");
+}
+
+static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
+{
+ struct skcipher_request *sk_req = sreq->c_req.sk_req;
struct device *dev = SEC_CTX_DEV(ctx);
+ u8 c_alg = ctx->c_ctx.c_alg;
- if (!sk_req->src || !sk_req->dst) {
+ if (unlikely(!sk_req->src || !sk_req->dst)) {
dev_err(dev, "skcipher input param error!\n");
return -EINVAL;
}
-
+ sreq->c_req.c_len = sk_req->cryptlen;
if (c_alg == SEC_CALG_3DES) {
- if (sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1)) {
+ if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) {
dev_err(dev, "skcipher 3des input length error!\n");
return -EINVAL;
}
return 0;
} else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
- if (sk_req->cryptlen & (AES_BLOCK_SIZE - 1)) {
+ if (unlikely(sk_req->cryptlen & (AES_BLOCK_SIZE - 1))) {
dev_err(dev, "skcipher aes input length error!\n");
return -EINVAL;
}
@@ -789,14 +1236,14 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
if (!sk_req->cryptlen)
return 0;
- ret = sec_skcipher_param_check(ctx, sk_req);
- if (ret)
- return ret;
-
req->c_req.sk_req = sk_req;
req->c_req.encrypt = encrypt;
req->ctx = ctx;
+ ret = sec_skcipher_param_check(ctx, req);
+ if (unlikely(ret))
+ return -EINVAL;
+
return ctx->req_op->process(ctx, req);
}
@@ -837,7 +1284,7 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size)
-static struct skcipher_alg sec_algs[] = {
+static struct skcipher_alg sec_skciphers[] = {
SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb,
AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
AES_BLOCK_SIZE, 0)
@@ -867,23 +1314,133 @@ static struct skcipher_alg sec_algs[] = {
AES_BLOCK_SIZE, AES_BLOCK_SIZE)
};
+static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
+{
+ u8 c_alg = ctx->c_ctx.c_alg;
+ struct aead_request *req = sreq->aead_req.aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ size_t authsize = crypto_aead_authsize(tfm);
+
+ if (unlikely(!req->src || !req->dst || !req->cryptlen)) {
+ dev_err(SEC_CTX_DEV(ctx), "aead input param error!\n");
+ return -EINVAL;
+ }
+
+ /* Support AES only */
+ if (unlikely(c_alg != SEC_CALG_AES)) {
+ dev_err(SEC_CTX_DEV(ctx), "aead crypto alg error!\n");
+ return -EINVAL;
+
+ }
+ if (sreq->c_req.encrypt)
+ sreq->c_req.c_len = req->cryptlen;
+ else
+ sreq->c_req.c_len = req->cryptlen - authsize;
+
+ if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
+ dev_err(SEC_CTX_DEV(ctx), "aead crypto length error!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
+ struct sec_req *req = aead_request_ctx(a_req);
+ struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+ int ret;
+
+ req->aead_req.aead_req = a_req;
+ req->c_req.encrypt = encrypt;
+ req->ctx = ctx;
+
+ ret = sec_aead_param_check(ctx, req);
+ if (unlikely(ret))
+ return -EINVAL;
+
+ return ctx->req_op->process(ctx, req);
+}
+
+static int sec_aead_encrypt(struct aead_request *a_req)
+{
+ return sec_aead_crypto(a_req, true);
+}
+
+static int sec_aead_decrypt(struct aead_request *a_req)
+{
+ return sec_aead_crypto(a_req, false);
+}
+
+#define SEC_AEAD_GEN_ALG(sec_cra_name, sec_set_key, ctx_init,\
+ ctx_exit, blk_size, iv_size, max_authsize)\
+{\
+ .base = {\
+ .cra_name = sec_cra_name,\
+ .cra_driver_name = "hisi_sec_"sec_cra_name,\
+ .cra_priority = SEC_PRIORITY,\
+ .cra_flags = CRYPTO_ALG_ASYNC,\
+ .cra_blocksize = blk_size,\
+ .cra_ctxsize = sizeof(struct sec_ctx),\
+ .cra_module = THIS_MODULE,\
+ },\
+ .init = ctx_init,\
+ .exit = ctx_exit,\
+ .setkey = sec_set_key,\
+ .decrypt = sec_aead_decrypt,\
+ .encrypt = sec_aead_encrypt,\
+ .ivsize = iv_size,\
+ .maxauthsize = max_authsize,\
+}
+
+#define SEC_AEAD_ALG(algname, keyfunc, aead_init, blksize, ivsize, authsize)\
+ SEC_AEAD_GEN_ALG(algname, keyfunc, aead_init,\
+ sec_aead_ctx_exit, blksize, ivsize, authsize)
+
+static struct aead_alg sec_aeads[] = {
+ SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))",
+ sec_setkey_aes_cbc_sha1, sec_aead_sha1_ctx_init,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
+
+ SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))",
+ sec_setkey_aes_cbc_sha256, sec_aead_sha256_ctx_init,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
+
+ SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))",
+ sec_setkey_aes_cbc_sha512, sec_aead_sha512_ctx_init,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
+};
+
int sec_register_to_crypto(void)
{
int ret = 0;
/* To avoid repeat register */
- mutex_lock(&sec_algs_lock);
- if (++sec_active_devs == 1)
- ret = crypto_register_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
- mutex_unlock(&sec_algs_lock);
+ if (atomic_add_return(1, &sec_active_devs) == 1) {
+ ret = crypto_register_skciphers(sec_skciphers,
+ ARRAY_SIZE(sec_skciphers));
+ if (ret)
+ return ret;
+
+ ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
+ if (ret)
+ goto reg_aead_fail;
+ }
+
+ return ret;
+
+reg_aead_fail:
+ crypto_unregister_skciphers(sec_skciphers, ARRAY_SIZE(sec_skciphers));
return ret;
}
void sec_unregister_from_crypto(void)
{
- mutex_lock(&sec_algs_lock);
- if (--sec_active_devs == 0)
- crypto_unregister_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
- mutex_unlock(&sec_algs_lock);
+ if (atomic_sub_return(1, &sec_active_devs) == 0) {
+ crypto_unregister_skciphers(sec_skciphers,
+ ARRAY_SIZE(sec_skciphers));
+ crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
+ }
}
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
index 097dce828340..b2786e17d8fe 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.h
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
@@ -14,6 +14,18 @@ enum sec_calg {
SEC_CALG_SM4 = 0x3,
};
+enum sec_hash_alg {
+ SEC_A_HMAC_SHA1 = 0x10,
+ SEC_A_HMAC_SHA256 = 0x11,
+ SEC_A_HMAC_SHA512 = 0x15,
+};
+
+enum sec_mac_len {
+ SEC_HMAC_SHA1_MAC = 20,
+ SEC_HMAC_SHA256_MAC = 32,
+ SEC_HMAC_SHA512_MAC = 64,
+};
+
enum sec_cmode {
SEC_CMODE_ECB = 0x0,
SEC_CMODE_CBC = 0x1,
@@ -34,6 +46,12 @@ enum sec_bd_type {
SEC_BD_TYPE2 = 0x2,
};
+enum sec_auth {
+ SEC_NO_AUTH = 0x0,
+ SEC_AUTH_TYPE1 = 0x1,
+ SEC_AUTH_TYPE2 = 0x2,
+};
+
enum sec_cipher_dir {
SEC_CIPHER_ENC = 0x1,
SEC_CIPHER_DEC = 0x2,
@@ -48,8 +66,8 @@ enum sec_addr_type {
struct sec_sqe_type2 {
/*
- * mac_len: 0~5 bits
- * a_key_len: 6~10 bits
+ * mac_len: 0~4 bits
+ * a_key_len: 5~10 bits
* a_alg: 11~16 bits
*/
__le32 mac_key_alg;
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 74f0654028c9..2bbaf1e2dae7 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -32,6 +32,7 @@
#define SEC_PF_DEF_Q_NUM 64
#define SEC_PF_DEF_Q_BASE 0
#define SEC_CTX_Q_NUM_DEF 24
+#define SEC_CTX_Q_NUM_MAX 32
#define SEC_CTRL_CNT_CLR_CE 0x301120
#define SEC_CTRL_CNT_CLR_CE_BIT BIT(0)
@@ -221,7 +222,7 @@ static int sec_ctx_q_num_set(const char *val, const struct kernel_param *kp)
if (ret)
return -EINVAL;
- if (!ctx_q_num || ctx_q_num > QM_Q_DEPTH || ctx_q_num & 0x1) {
+ if (!ctx_q_num || ctx_q_num > SEC_CTX_Q_NUM_MAX || ctx_q_num & 0x1) {
pr_err("ctx queue num[%u] is invalid!\n", ctx_q_num);
return -EINVAL;
}
@@ -235,7 +236,7 @@ static const struct kernel_param_ops sec_ctx_q_num_ops = {
};
static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF;
module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444);
-MODULE_PARM_DESC(ctx_q_num, "Number of queue in ctx (2, 4, 6, ..., 1024)");
+MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (24 default, 2, 4, ..., 32)");
static const struct pci_device_id sec_dev_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) },
@@ -608,6 +609,14 @@ static const struct file_operations sec_dbg_fops = {
.write = sec_debug_write,
};
+static int sec_debugfs_atomic64_get(void *data, u64 *val)
+{
+ *val = atomic64_read((atomic64_t *)data);
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get,
+ NULL, "%lld\n");
+
static int sec_core_debug_init(struct sec_dev *sec)
{
struct hisi_qm *qm = &sec->qm;
@@ -628,9 +637,11 @@ static int sec_core_debug_init(struct sec_dev *sec)
debugfs_create_regset32("regs", 0444, tmp_d, regset);
- debugfs_create_u64("send_cnt", 0444, tmp_d, &dfx->send_cnt);
+ debugfs_create_file("send_cnt", 0444, tmp_d,
+ &dfx->send_cnt, &sec_atomic64_ops);
- debugfs_create_u64("recv_cnt", 0444, tmp_d, &dfx->recv_cnt);
+ debugfs_create_file("recv_cnt", 0444, tmp_d,
+ &dfx->recv_cnt, &sec_atomic64_ops);
return 0;
}
diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c
index 012023c347b1..0e8c7e324fb4 100644
--- a/drivers/crypto/hisilicon/sgl.c
+++ b/drivers/crypto/hisilicon/sgl.c
@@ -202,18 +202,21 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
dma_addr_t curr_sgl_dma = 0;
struct acc_hw_sge *curr_hw_sge;
struct scatterlist *sg;
- int i, ret, sg_n;
+ int i, sg_n, sg_n_mapped;
if (!dev || !sgl || !pool || !hw_sgl_dma)
return ERR_PTR(-EINVAL);
sg_n = sg_nents(sgl);
- if (sg_n > pool->sge_nr)
+
+ sg_n_mapped = dma_map_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
+ if (!sg_n_mapped)
return ERR_PTR(-EINVAL);
- ret = dma_map_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
- if (!ret)
+ if (sg_n_mapped > pool->sge_nr) {
+ dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
return ERR_PTR(-EINVAL);
+ }
curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma);
if (IS_ERR(curr_hw_sgl)) {
@@ -224,7 +227,7 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
curr_hw_sgl->entry_length_in_sgl = cpu_to_le16(pool->sge_nr);
curr_hw_sge = curr_hw_sgl->sge_entries;
- for_each_sg(sgl, sg, sg_n, i) {
+ for_each_sg(sgl, sg, sg_n_mapped, i) {
sg_map_to_hw_sg(sg, curr_hw_sge);
inc_hw_sgl_sge(curr_hw_sgl);
curr_hw_sge++;
@@ -260,7 +263,3 @@ void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
hw_sgl->entry_length_in_sgl = 0;
}
EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_unmap);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
-MODULE_DESCRIPTION("HiSilicon Accelerator SGL support");
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index 79fc4dd3fe00..bc1db26598bb 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -11,6 +11,10 @@
/* hisi_zip_sqe dw3 */
#define HZIP_BD_STATUS_M GENMASK(7, 0)
+/* hisi_zip_sqe dw7 */
+#define HZIP_IN_SGE_DATA_OFFSET_M GENMASK(23, 0)
+/* hisi_zip_sqe dw8 */
+#define HZIP_OUT_SGE_DATA_OFFSET_M GENMASK(23, 0)
/* hisi_zip_sqe dw9 */
#define HZIP_REQ_TYPE_M GENMASK(7, 0)
#define HZIP_ALG_TYPE_ZLIB 0x02
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index 795428c1d07e..9815d5e3ccd0 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -46,10 +46,8 @@ enum hisi_zip_alg_type {
struct hisi_zip_req {
struct acomp_req *req;
- struct scatterlist *src;
- struct scatterlist *dst;
- size_t slen;
- size_t dlen;
+ int sskip;
+ int dskip;
struct hisi_acc_hw_sgl *hw_src;
struct hisi_acc_hw_sgl *hw_dst;
dma_addr_t dma_src;
@@ -119,13 +117,15 @@ static void hisi_zip_config_tag(struct hisi_zip_sqe *sqe, u32 tag)
static void hisi_zip_fill_sqe(struct hisi_zip_sqe *sqe, u8 req_type,
dma_addr_t s_addr, dma_addr_t d_addr, u32 slen,
- u32 dlen)
+ u32 dlen, int sskip, int dskip)
{
memset(sqe, 0, sizeof(struct hisi_zip_sqe));
- sqe->input_data_length = slen;
+ sqe->input_data_length = slen - sskip;
+ sqe->dw7 = FIELD_PREP(HZIP_IN_SGE_DATA_OFFSET_M, sskip);
+ sqe->dw8 = FIELD_PREP(HZIP_OUT_SGE_DATA_OFFSET_M, dskip);
sqe->dw9 = FIELD_PREP(HZIP_REQ_TYPE_M, req_type);
- sqe->dest_avail_out = dlen;
+ sqe->dest_avail_out = dlen - dskip;
sqe->source_addr_l = lower_32_bits(s_addr);
sqe->source_addr_h = upper_32_bits(s_addr);
sqe->dest_addr_l = lower_32_bits(d_addr);
@@ -327,11 +327,6 @@ static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx,
{
struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
- if (qp_ctx->qp->alg_type == HZIP_ALG_TYPE_COMP)
- kfree(req->dst);
- else
- kfree(req->src);
-
write_lock(&req_q->req_lock);
clear_bit(req->req_id, req_q->req_bitmap);
memset(req, 0, sizeof(struct hisi_zip_req));
@@ -359,8 +354,8 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
}
dlen = sqe->produced;
- hisi_acc_sg_buf_unmap(dev, req->src, req->hw_src);
- hisi_acc_sg_buf_unmap(dev, req->dst, req->hw_dst);
+ hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src);
+ hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst);
head_size = (qp->alg_type == 0) ? TO_HEAD_SIZE(qp->req_type) : 0;
acomp_req->dlen = dlen + head_size;
@@ -454,20 +449,6 @@ static size_t get_comp_head_size(struct scatterlist *src, u8 req_type)
}
}
-static int get_sg_skip_bytes(struct scatterlist *sgl, size_t bytes,
- size_t remains, struct scatterlist **out)
-{
-#define SPLIT_NUM 2
- size_t split_sizes[SPLIT_NUM];
- int out_mapped_nents[SPLIT_NUM];
-
- split_sizes[0] = bytes;
- split_sizes[1] = remains;
-
- return sg_split(sgl, 0, 0, SPLIT_NUM, split_sizes, out,
- out_mapped_nents, GFP_KERNEL);
-}
-
static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
struct hisi_zip_qp_ctx *qp_ctx,
size_t head_size, bool is_comp)
@@ -475,31 +456,7 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
struct hisi_zip_req *q = req_q->q;
struct hisi_zip_req *req_cache;
- struct scatterlist *out[2];
- struct scatterlist *sgl;
- size_t len;
- int ret, req_id;
-
- /*
- * remove/add zlib/gzip head, as hardware operations do not include
- * comp head. so split req->src to get sgl without heads in acomp, or
- * add comp head to req->dst ahead of that hardware output compressed
- * data in sgl splited from req->dst without comp head.
- */
- if (is_comp) {
- sgl = req->dst;
- len = req->dlen - head_size;
- } else {
- sgl = req->src;
- len = req->slen - head_size;
- }
-
- ret = get_sg_skip_bytes(sgl, head_size, len, out);
- if (ret)
- return ERR_PTR(ret);
-
- /* sgl for comp head is useless, so free it now */
- kfree(out[0]);
+ int req_id;
write_lock(&req_q->req_lock);
@@ -507,7 +464,6 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
if (req_id >= req_q->size) {
write_unlock(&req_q->req_lock);
dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n");
- kfree(out[1]);
return ERR_PTR(-EBUSY);
}
set_bit(req_id, req_q->req_bitmap);
@@ -515,16 +471,13 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
req_cache = q + req_id;
req_cache->req_id = req_id;
req_cache->req = req;
+
if (is_comp) {
- req_cache->src = req->src;
- req_cache->dst = out[1];
- req_cache->slen = req->slen;
- req_cache->dlen = req->dlen - head_size;
+ req_cache->sskip = 0;
+ req_cache->dskip = head_size;
} else {
- req_cache->src = out[1];
- req_cache->dst = req->dst;
- req_cache->slen = req->slen - head_size;
- req_cache->dlen = req->dlen;
+ req_cache->sskip = head_size;
+ req_cache->dskip = 0;
}
write_unlock(&req_q->req_lock);
@@ -536,6 +489,7 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
struct hisi_zip_qp_ctx *qp_ctx)
{
struct hisi_zip_sqe *zip_sqe = &qp_ctx->zip_sqe;
+ struct acomp_req *a_req = req->req;
struct hisi_qp *qp = qp_ctx->qp;
struct device *dev = &qp->qm->pdev->dev;
struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
@@ -543,16 +497,16 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
dma_addr_t output;
int ret;
- if (!req->src || !req->slen || !req->dst || !req->dlen)
+ if (!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen)
return -EINVAL;
- req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, req->src, pool,
+ req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool,
req->req_id << 1, &input);
if (IS_ERR(req->hw_src))
return PTR_ERR(req->hw_src);
req->dma_src = input;
- req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, req->dst, pool,
+ req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool,
(req->req_id << 1) + 1,
&output);
if (IS_ERR(req->hw_dst)) {
@@ -561,8 +515,8 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
}
req->dma_dst = output;
- hisi_zip_fill_sqe(zip_sqe, qp->req_type, input, output, req->slen,
- req->dlen);
+ hisi_zip_fill_sqe(zip_sqe, qp->req_type, input, output, a_req->slen,
+ a_req->dlen, req->sskip, req->dskip);
hisi_zip_config_buf_type(zip_sqe, HZIP_SGL);
hisi_zip_config_tag(zip_sqe, req->req_id);
@@ -574,9 +528,9 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
return -EINPROGRESS;
err_unmap_output:
- hisi_acc_sg_buf_unmap(dev, req->dst, req->hw_dst);
+ hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst);
err_unmap_input:
- hisi_acc_sg_buf_unmap(dev, req->src, req->hw_src);
+ hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src);
return ret;
}
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index fe4cc8babe1c..25d5227f74a1 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -332,10 +332,10 @@ static int img_hash_dma_init(struct img_hash_dev *hdev)
struct dma_slave_config dma_conf;
int err = -EINVAL;
- hdev->dma_lch = dma_request_slave_channel(hdev->dev, "tx");
- if (!hdev->dma_lch) {
+ hdev->dma_lch = dma_request_chan(hdev->dev, "tx");
+ if (IS_ERR(hdev->dma_lch)) {
dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
- return -EBUSY;
+ return PTR_ERR(hdev->dma_lch);
}
dma_conf.direction = DMA_MEM_TO_DEV;
dma_conf.dst_addr = hdev->bus_addr;
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 64894d8b442a..2cb53fbae841 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -501,8 +501,8 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
writel(upper_32_bits(priv->ring[i].cdr.base_dma),
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
- writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 14) |
- priv->config.cd_size,
+ writel(EIP197_xDR_DESC_MODE_64BIT | EIP197_CDR_DESC_MODE_ADCP |
+ (priv->config.cd_offset << 14) | priv->config.cd_size,
EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
writel(((cd_fetch_cnt *
(cd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
@@ -974,16 +974,18 @@ int safexcel_invalidate_cache(struct crypto_async_request *async,
{
struct safexcel_command_desc *cdesc;
struct safexcel_result_desc *rdesc;
+ struct safexcel_token *dmmy;
int ret = 0;
/* Prepare command descriptor */
- cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
+ cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma,
+ &dmmy);
if (IS_ERR(cdesc))
return PTR_ERR(cdesc);
cdesc->control_data.type = EIP197_TYPE_EXTENDED;
cdesc->control_data.options = 0;
- cdesc->control_data.refresh = 0;
+ cdesc->control_data.context_lo &= ~EIP197_CONTEXT_SIZE_MASK;
cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;
/* Prepare result descriptor */
@@ -1331,6 +1333,7 @@ static void safexcel_configure(struct safexcel_crypto_priv *priv)
priv->config.cd_size = EIP197_CD64_FETCH_SIZE;
priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
+ priv->config.cdsh_offset = (EIP197_MAX_TOKENS + mask) & ~mask;
/* res token is behind the descr, but ofs must be rounded to buswdth */
priv->config.res_offset = (EIP197_RD64_FETCH_SIZE + mask) & ~mask;
@@ -1341,6 +1344,7 @@ static void safexcel_configure(struct safexcel_crypto_priv *priv)
/* convert dwords to bytes */
priv->config.cd_offset *= sizeof(u32);
+ priv->config.cdsh_offset *= sizeof(u32);
priv->config.rd_offset *= sizeof(u32);
priv->config.res_offset *= sizeof(u32);
}
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index b4624b5687ce..94016c505abb 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -40,7 +40,8 @@
/* Static configuration */
#define EIP197_DEFAULT_RING_SIZE 400
-#define EIP197_MAX_TOKENS 19
+#define EIP197_EMB_TOKENS 4 /* Pad CD to 16 dwords */
+#define EIP197_MAX_TOKENS 16
#define EIP197_MAX_RINGS 4
#define EIP197_FETCH_DEPTH 2
#define EIP197_MAX_BATCH_SZ 64
@@ -207,6 +208,7 @@
/* EIP197_HIA_xDR_DESC_SIZE */
#define EIP197_xDR_DESC_MODE_64BIT BIT(31)
+#define EIP197_CDR_DESC_MODE_ADCP BIT(30)
/* EIP197_HIA_xDR_DMA_CFG */
#define EIP197_HIA_xDR_WR_RES_BUF BIT(22)
@@ -277,9 +279,9 @@
#define EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(n) ((n) << 16)
#define EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(n) (((n) & 0x7) << 20)
#define EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(n) ((n) << 24)
-#define EIP197_HIA_DFE_CFG_DIS_DEBUG (BIT(31) | BIT(29))
+#define EIP197_HIA_DFE_CFG_DIS_DEBUG GENMASK(31, 29)
#define EIP197_HIA_DSE_CFG_EN_SINGLE_WR BIT(29)
-#define EIP197_HIA_DSE_CFG_DIS_DEBUG BIT(31)
+#define EIP197_HIA_DSE_CFG_DIS_DEBUG GENMASK(31, 30)
/* EIP197_HIA_DFE/DSE_THR_CTRL */
#define EIP197_DxE_THR_CTRL_EN BIT(30)
@@ -553,6 +555,8 @@ static inline void eip197_noop_token(struct safexcel_token *token)
{
token->opcode = EIP197_TOKEN_OPCODE_NOOP;
token->packet_length = BIT(2);
+ token->stat = 0;
+ token->instructions = 0;
}
/* Instructions */
@@ -574,14 +578,13 @@ struct safexcel_control_data_desc {
u16 application_id;
u16 rsvd;
- u8 refresh:2;
- u32 context_lo:30;
+ u32 context_lo;
u32 context_hi;
u32 control0;
u32 control1;
- u32 token[EIP197_MAX_TOKENS];
+ u32 token[EIP197_EMB_TOKENS];
} __packed;
#define EIP197_OPTION_MAGIC_VALUE BIT(0)
@@ -591,7 +594,10 @@ struct safexcel_control_data_desc {
#define EIP197_OPTION_2_TOKEN_IV_CMD GENMASK(11, 10)
#define EIP197_OPTION_4_TOKEN_IV_CMD GENMASK(11, 9)
+#define EIP197_TYPE_BCLA 0x0
#define EIP197_TYPE_EXTENDED 0x3
+#define EIP197_CONTEXT_SMALL 0x2
+#define EIP197_CONTEXT_SIZE_MASK 0x3
/* Basic Command Descriptor format */
struct safexcel_command_desc {
@@ -599,13 +605,16 @@ struct safexcel_command_desc {
u8 rsvd0:5;
u8 last_seg:1;
u8 first_seg:1;
- u16 additional_cdata_size:8;
+ u8 additional_cdata_size:8;
u32 rsvd1;
u32 data_lo;
u32 data_hi;
+ u32 atok_lo;
+ u32 atok_hi;
+
struct safexcel_control_data_desc control_data;
} __packed;
@@ -629,15 +638,20 @@ enum eip197_fw {
struct safexcel_desc_ring {
void *base;
+ void *shbase;
void *base_end;
+ void *shbase_end;
dma_addr_t base_dma;
+ dma_addr_t shbase_dma;
/* write and read pointers */
void *write;
+ void *shwrite;
void *read;
/* descriptor element offset */
- unsigned offset;
+ unsigned int offset;
+ unsigned int shoffset;
};
enum safexcel_alg_type {
@@ -652,6 +666,7 @@ struct safexcel_config {
u32 cd_size;
u32 cd_offset;
+ u32 cdsh_offset;
u32 rd_size;
u32 rd_offset;
@@ -862,7 +877,8 @@ struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *pr
bool first, bool last,
dma_addr_t data, u32 len,
u32 full_data_len,
- dma_addr_t context);
+ dma_addr_t context,
+ struct safexcel_token **atoken);
struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *priv,
int ring_id,
bool first, bool last,
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index c02995694b41..0c5e80c3f6e3 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -47,8 +47,12 @@ struct safexcel_cipher_ctx {
u32 mode;
enum safexcel_cipher_alg alg;
- char aead; /* !=0=AEAD, 2=IPSec ESP AEAD, 3=IPsec ESP GMAC */
- char xcm; /* 0=authenc, 1=GCM, 2 reserved for CCM */
+ u8 aead; /* !=0=AEAD, 2=IPSec ESP AEAD, 3=IPsec ESP GMAC */
+ u8 xcm; /* 0=authenc, 1=GCM, 2 reserved for CCM */
+ u8 aadskip;
+ u8 blocksz;
+ u32 ivmask;
+ u32 ctrinit;
__le32 key[16];
u32 nonce;
@@ -72,251 +76,298 @@ struct safexcel_cipher_req {
int nr_src, nr_dst;
};
-static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
- struct safexcel_command_desc *cdesc)
+static int safexcel_skcipher_iv(struct safexcel_cipher_ctx *ctx, u8 *iv,
+ struct safexcel_command_desc *cdesc)
{
- u32 block_sz = 0;
-
- if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD ||
- ctx->aead & EIP197_AEAD_TYPE_IPSEC_ESP) { /* _ESP and _ESP_GMAC */
+ if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD) {
cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
-
/* 32 bit nonce */
cdesc->control_data.token[0] = ctx->nonce;
/* 64 bit IV part */
memcpy(&cdesc->control_data.token[1], iv, 8);
-
- if (ctx->alg == SAFEXCEL_CHACHA20 ||
- ctx->xcm == EIP197_XCM_MODE_CCM) {
- /* 32 bit counter, starting at 0 */
- cdesc->control_data.token[3] = 0;
- } else {
- /* 32 bit counter, start at 1 (big endian!) */
- cdesc->control_data.token[3] =
- (__force u32)cpu_to_be32(1);
- }
-
- return;
- } else if (ctx->xcm == EIP197_XCM_MODE_GCM ||
- (ctx->aead && ctx->alg == SAFEXCEL_CHACHA20)) {
- cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
-
- /* 96 bit IV part */
- memcpy(&cdesc->control_data.token[0], iv, 12);
-
- if (ctx->alg == SAFEXCEL_CHACHA20) {
- /* 32 bit counter, starting at 0 */
- cdesc->control_data.token[3] = 0;
- } else {
- /* 32 bit counter, start at 1 (big endian!) */
- *(__be32 *)&cdesc->control_data.token[3] =
- cpu_to_be32(1);
- }
-
- return;
- } else if (ctx->alg == SAFEXCEL_CHACHA20) {
+ /* 32 bit counter, start at 0 or 1 (big endian!) */
+ cdesc->control_data.token[3] =
+ (__force u32)cpu_to_be32(ctx->ctrinit);
+ return 4;
+ }
+ if (ctx->alg == SAFEXCEL_CHACHA20) {
cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
-
/* 96 bit nonce part */
memcpy(&cdesc->control_data.token[0], &iv[4], 12);
/* 32 bit counter */
cdesc->control_data.token[3] = *(u32 *)iv;
-
- return;
- } else if (ctx->xcm == EIP197_XCM_MODE_CCM) {
- cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
-
- /* Variable length IV part */
- memcpy(&cdesc->control_data.token[0], iv, 15 - iv[0]);
- /* Start variable length counter at 0 */
- memset((u8 *)&cdesc->control_data.token[0] + 15 - iv[0],
- 0, iv[0] + 1);
-
- return;
+ return 4;
}
- if (ctx->mode != CONTEXT_CONTROL_CRYPTO_MODE_ECB) {
- switch (ctx->alg) {
- case SAFEXCEL_DES:
- block_sz = DES_BLOCK_SIZE;
- cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
- break;
- case SAFEXCEL_3DES:
- block_sz = DES3_EDE_BLOCK_SIZE;
- cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
- break;
- case SAFEXCEL_SM4:
- block_sz = SM4_BLOCK_SIZE;
- cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
- break;
- case SAFEXCEL_AES:
- block_sz = AES_BLOCK_SIZE;
- cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
- break;
- default:
- break;
- }
- memcpy(cdesc->control_data.token, iv, block_sz);
- }
+ cdesc->control_data.options |= ctx->ivmask;
+ memcpy(cdesc->control_data.token, iv, ctx->blocksz);
+ return ctx->blocksz / sizeof(u32);
}
static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
struct safexcel_command_desc *cdesc,
+ struct safexcel_token *atoken,
u32 length)
{
struct safexcel_token *token;
+ int ivlen;
- safexcel_cipher_token(ctx, iv, cdesc);
+ ivlen = safexcel_skcipher_iv(ctx, iv, cdesc);
+ if (ivlen == 4) {
+ /* No space in cdesc, instruction moves to atoken */
+ cdesc->additional_cdata_size = 1;
+ token = atoken;
+ } else {
+ /* Everything fits in cdesc */
+ token = (struct safexcel_token *)(cdesc->control_data.token + 2);
+ /* Need to pad with NOP */
+ eip197_noop_token(&token[1]);
+ }
- /* skip over worst case IV of 4 dwords, no need to be exact */
- token = (struct safexcel_token *)(cdesc->control_data.token + 4);
+ token->opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ token->packet_length = length;
+ token->stat = EIP197_TOKEN_STAT_LAST_PACKET |
+ EIP197_TOKEN_STAT_LAST_HASH;
+ token->instructions = EIP197_TOKEN_INS_LAST |
+ EIP197_TOKEN_INS_TYPE_CRYPTO |
+ EIP197_TOKEN_INS_TYPE_OUTPUT;
+}
- token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
- token[0].packet_length = length;
- token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET |
- EIP197_TOKEN_STAT_LAST_HASH;
- token[0].instructions = EIP197_TOKEN_INS_LAST |
- EIP197_TOKEN_INS_TYPE_CRYPTO |
- EIP197_TOKEN_INS_TYPE_OUTPUT;
+static void safexcel_aead_iv(struct safexcel_cipher_ctx *ctx, u8 *iv,
+ struct safexcel_command_desc *cdesc)
+{
+ if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD ||
+ ctx->aead & EIP197_AEAD_TYPE_IPSEC_ESP) { /* _ESP and _ESP_GMAC */
+ /* 32 bit nonce */
+ cdesc->control_data.token[0] = ctx->nonce;
+ /* 64 bit IV part */
+ memcpy(&cdesc->control_data.token[1], iv, 8);
+ /* 32 bit counter, start at 0 or 1 (big endian!) */
+ cdesc->control_data.token[3] =
+ (__force u32)cpu_to_be32(ctx->ctrinit);
+ return;
+ }
+ if (ctx->xcm == EIP197_XCM_MODE_GCM || ctx->alg == SAFEXCEL_CHACHA20) {
+ /* 96 bit IV part */
+ memcpy(&cdesc->control_data.token[0], iv, 12);
+ /* 32 bit counter, start at 0 or 1 (big endian!) */
+ cdesc->control_data.token[3] =
+ (__force u32)cpu_to_be32(ctx->ctrinit);
+ return;
+ }
+ /* CBC */
+ memcpy(cdesc->control_data.token, iv, ctx->blocksz);
}
static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
struct safexcel_command_desc *cdesc,
+ struct safexcel_token *atoken,
enum safexcel_cipher_direction direction,
u32 cryptlen, u32 assoclen, u32 digestsize)
{
- struct safexcel_token *token;
+ struct safexcel_token *aadref;
+ int atoksize = 2; /* Start with minimum size */
+ int assocadj = assoclen - ctx->aadskip, aadalign;
- safexcel_cipher_token(ctx, iv, cdesc);
+ /* Always 4 dwords of embedded IV for AEAD modes */
+ cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
- if (direction == SAFEXCEL_ENCRYPT) {
- /* align end of instruction sequence to end of token */
- token = (struct safexcel_token *)(cdesc->control_data.token +
- EIP197_MAX_TOKENS - 14);
-
- token[13].opcode = EIP197_TOKEN_OPCODE_INSERT;
- token[13].packet_length = digestsize;
- token[13].stat = EIP197_TOKEN_STAT_LAST_HASH |
- EIP197_TOKEN_STAT_LAST_PACKET;
- token[13].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
- EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
- } else {
+ if (direction == SAFEXCEL_DECRYPT)
cryptlen -= digestsize;
- /* align end of instruction sequence to end of token */
- token = (struct safexcel_token *)(cdesc->control_data.token +
- EIP197_MAX_TOKENS - 15);
-
- token[13].opcode = EIP197_TOKEN_OPCODE_RETRIEVE;
- token[13].packet_length = digestsize;
- token[13].stat = EIP197_TOKEN_STAT_LAST_HASH |
- EIP197_TOKEN_STAT_LAST_PACKET;
- token[13].instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
-
- token[14].opcode = EIP197_TOKEN_OPCODE_VERIFY;
- token[14].packet_length = digestsize |
- EIP197_TOKEN_HASH_RESULT_VERIFY;
- token[14].stat = EIP197_TOKEN_STAT_LAST_HASH |
- EIP197_TOKEN_STAT_LAST_PACKET;
- token[14].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT;
- }
-
- if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP) {
- /* For ESP mode (and not GMAC), skip over the IV */
- token[8].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
- token[8].packet_length = EIP197_AEAD_IPSEC_IV_SIZE;
-
- assoclen -= EIP197_AEAD_IPSEC_IV_SIZE;
- }
+ if (unlikely(ctx->xcm == EIP197_XCM_MODE_CCM)) {
+ /* Construct IV block B0 for the CBC-MAC */
+ u8 *final_iv = (u8 *)cdesc->control_data.token;
+ u8 *cbcmaciv = (u8 *)&atoken[1];
+ __le32 *aadlen = (__le32 *)&atoken[5];
+
+ if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP) {
+ /* Length + nonce */
+ cdesc->control_data.token[0] = ctx->nonce;
+ /* Fixup flags byte */
+ *(__le32 *)cbcmaciv =
+ cpu_to_le32(ctx->nonce |
+ ((assocadj > 0) << 6) |
+ ((digestsize - 2) << 2));
+ /* 64 bit IV part */
+ memcpy(&cdesc->control_data.token[1], iv, 8);
+ memcpy(cbcmaciv + 4, iv, 8);
+ /* Start counter at 0 */
+ cdesc->control_data.token[3] = 0;
+ /* Message length */
+ *(__be32 *)(cbcmaciv + 12) = cpu_to_be32(cryptlen);
+ } else {
+ /* Variable length IV part */
+ memcpy(final_iv, iv, 15 - iv[0]);
+ memcpy(cbcmaciv, iv, 15 - iv[0]);
+ /* Start variable length counter at 0 */
+ memset(final_iv + 15 - iv[0], 0, iv[0] + 1);
+ memset(cbcmaciv + 15 - iv[0], 0, iv[0] - 1);
+ /* fixup flags byte */
+ cbcmaciv[0] |= ((assocadj > 0) << 6) |
+ ((digestsize - 2) << 2);
+ /* insert lower 2 bytes of message length */
+ cbcmaciv[14] = cryptlen >> 8;
+ cbcmaciv[15] = cryptlen & 255;
+ }
- token[6].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
- token[6].packet_length = assoclen;
- token[6].instructions = EIP197_TOKEN_INS_LAST |
- EIP197_TOKEN_INS_TYPE_HASH;
+ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
+ atoken->packet_length = AES_BLOCK_SIZE +
+ ((assocadj > 0) << 1);
+ atoken->stat = 0;
+ atoken->instructions = EIP197_TOKEN_INS_ORIGIN_TOKEN |
+ EIP197_TOKEN_INS_TYPE_HASH;
+
+ if (likely(assocadj)) {
+ *aadlen = cpu_to_le32((assocadj >> 8) |
+ (assocadj & 255) << 8);
+ atoken += 6;
+ atoksize += 7;
+ } else {
+ atoken += 5;
+ atoksize += 6;
+ }
- if (likely(cryptlen || ctx->alg == SAFEXCEL_CHACHA20)) {
- token[11].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
- token[11].packet_length = cryptlen;
- token[11].stat = EIP197_TOKEN_STAT_LAST_HASH;
- if (unlikely(ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP_GMAC)) {
- token[6].instructions = EIP197_TOKEN_INS_TYPE_HASH;
- /* Do not send to crypt engine in case of GMAC */
- token[11].instructions = EIP197_TOKEN_INS_LAST |
- EIP197_TOKEN_INS_TYPE_HASH |
- EIP197_TOKEN_INS_TYPE_OUTPUT;
+ /* Process AAD data */
+ aadref = atoken;
+ atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ atoken->packet_length = assocadj;
+ atoken->stat = 0;
+ atoken->instructions = EIP197_TOKEN_INS_TYPE_HASH;
+ atoken++;
+
+ /* For CCM only, align AAD data towards hash engine */
+ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
+ aadalign = (assocadj + 2) & 15;
+ atoken->packet_length = assocadj && aadalign ?
+ 16 - aadalign :
+ 0;
+ if (likely(cryptlen)) {
+ atoken->stat = 0;
+ atoken->instructions = EIP197_TOKEN_INS_TYPE_HASH;
} else {
- token[11].instructions = EIP197_TOKEN_INS_LAST |
- EIP197_TOKEN_INS_TYPE_CRYPTO |
- EIP197_TOKEN_INS_TYPE_HASH |
- EIP197_TOKEN_INS_TYPE_OUTPUT;
+ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH;
+ atoken->instructions = EIP197_TOKEN_INS_LAST |
+ EIP197_TOKEN_INS_TYPE_HASH;
}
- } else if (ctx->xcm != EIP197_XCM_MODE_CCM) {
- token[6].stat = EIP197_TOKEN_STAT_LAST_HASH;
+ } else {
+ safexcel_aead_iv(ctx, iv, cdesc);
+
+ /* Process AAD data */
+ aadref = atoken;
+ atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ atoken->packet_length = assocadj;
+ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH;
+ atoken->instructions = EIP197_TOKEN_INS_LAST |
+ EIP197_TOKEN_INS_TYPE_HASH;
}
+ atoken++;
- if (!ctx->xcm)
- return;
+ if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP) {
+ /* For ESP mode (and not GMAC), skip over the IV */
+ atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ atoken->packet_length = EIP197_AEAD_IPSEC_IV_SIZE;
+ atoken->stat = 0;
+ atoken->instructions = 0;
+ atoken++;
+ atoksize++;
+ } else if (unlikely(ctx->alg == SAFEXCEL_CHACHA20 &&
+ direction == SAFEXCEL_DECRYPT)) {
+ /* Poly-chacha decryption needs a dummy NOP here ... */
+ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
+ atoken->packet_length = 16; /* According to Op Manual */
+ atoken->stat = 0;
+ atoken->instructions = 0;
+ atoken++;
+ atoksize++;
+ }
- token[9].opcode = EIP197_TOKEN_OPCODE_INSERT_REMRES;
- token[9].packet_length = 0;
- token[9].instructions = AES_BLOCK_SIZE;
+ if (ctx->xcm) {
+ /* For GCM and CCM, obtain enc(Y0) */
+ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT_REMRES;
+ atoken->packet_length = 0;
+ atoken->stat = 0;
+ atoken->instructions = AES_BLOCK_SIZE;
+ atoken++;
+
+ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
+ atoken->packet_length = AES_BLOCK_SIZE;
+ atoken->stat = 0;
+ atoken->instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
+ EIP197_TOKEN_INS_TYPE_CRYPTO;
+ atoken++;
+ atoksize += 2;
+ }
- token[10].opcode = EIP197_TOKEN_OPCODE_INSERT;
- token[10].packet_length = AES_BLOCK_SIZE;
- token[10].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
- EIP197_TOKEN_INS_TYPE_CRYPTO;
+ if (likely(cryptlen || ctx->alg == SAFEXCEL_CHACHA20)) {
+ /* Fixup stat field for AAD direction instruction */
+ aadref->stat = 0;
- if (ctx->xcm != EIP197_XCM_MODE_GCM) {
- u8 *final_iv = (u8 *)cdesc->control_data.token;
- u8 *cbcmaciv = (u8 *)&token[1];
- __le32 *aadlen = (__le32 *)&token[5];
+ /* Process crypto data */
+ atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ atoken->packet_length = cryptlen;
- /* Construct IV block B0 for the CBC-MAC */
- token[0].opcode = EIP197_TOKEN_OPCODE_INSERT;
- token[0].packet_length = AES_BLOCK_SIZE +
- ((assoclen > 0) << 1);
- token[0].instructions = EIP197_TOKEN_INS_ORIGIN_TOKEN |
- EIP197_TOKEN_INS_TYPE_HASH;
- /* Variable length IV part */
- memcpy(cbcmaciv, final_iv, 15 - final_iv[0]);
- /* fixup flags byte */
- cbcmaciv[0] |= ((assoclen > 0) << 6) | ((digestsize - 2) << 2);
- /* Clear upper bytes of variable message length to 0 */
- memset(cbcmaciv + 15 - final_iv[0], 0, final_iv[0] - 1);
- /* insert lower 2 bytes of message length */
- cbcmaciv[14] = cryptlen >> 8;
- cbcmaciv[15] = cryptlen & 255;
+ if (unlikely(ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP_GMAC)) {
+ /* Fixup instruction field for AAD dir instruction */
+ aadref->instructions = EIP197_TOKEN_INS_TYPE_HASH;
- if (assoclen) {
- *aadlen = cpu_to_le32((assoclen >> 8) |
- ((assoclen & 0xff) << 8));
- assoclen += 2;
+ /* Do not send to crypt engine in case of GMAC */
+ atoken->instructions = EIP197_TOKEN_INS_LAST |
+ EIP197_TOKEN_INS_TYPE_HASH |
+ EIP197_TOKEN_INS_TYPE_OUTPUT;
+ } else {
+ atoken->instructions = EIP197_TOKEN_INS_LAST |
+ EIP197_TOKEN_INS_TYPE_CRYPTO |
+ EIP197_TOKEN_INS_TYPE_HASH |
+ EIP197_TOKEN_INS_TYPE_OUTPUT;
}
- token[6].instructions = EIP197_TOKEN_INS_TYPE_HASH;
-
- /* Align AAD data towards hash engine */
- token[7].opcode = EIP197_TOKEN_OPCODE_INSERT;
- assoclen &= 15;
- token[7].packet_length = assoclen ? 16 - assoclen : 0;
-
- if (likely(cryptlen)) {
- token[7].instructions = EIP197_TOKEN_INS_TYPE_HASH;
-
- /* Align crypto data towards hash engine */
- token[11].stat = 0;
-
- token[12].opcode = EIP197_TOKEN_OPCODE_INSERT;
- cryptlen &= 15;
- token[12].packet_length = cryptlen ? 16 - cryptlen : 0;
- token[12].stat = EIP197_TOKEN_STAT_LAST_HASH;
- token[12].instructions = EIP197_TOKEN_INS_TYPE_HASH;
+ cryptlen &= 15;
+ if (unlikely(ctx->xcm == EIP197_XCM_MODE_CCM && cryptlen)) {
+ atoken->stat = 0;
+ /* For CCM only, pad crypto data to the hash engine */
+ atoken++;
+ atoksize++;
+ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
+ atoken->packet_length = 16 - cryptlen;
+ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH;
+ atoken->instructions = EIP197_TOKEN_INS_TYPE_HASH;
} else {
- token[7].stat = EIP197_TOKEN_STAT_LAST_HASH;
- token[7].instructions = EIP197_TOKEN_INS_LAST |
- EIP197_TOKEN_INS_TYPE_HASH;
+ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH;
}
+ atoken++;
+ atoksize++;
}
+
+ if (direction == SAFEXCEL_ENCRYPT) {
+ /* Append ICV */
+ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
+ atoken->packet_length = digestsize;
+ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH |
+ EIP197_TOKEN_STAT_LAST_PACKET;
+ atoken->instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
+ EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
+ } else {
+ /* Extract ICV */
+ atoken->opcode = EIP197_TOKEN_OPCODE_RETRIEVE;
+ atoken->packet_length = digestsize;
+ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH |
+ EIP197_TOKEN_STAT_LAST_PACKET;
+ atoken->instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
+ atoken++;
+ atoksize++;
+
+ /* Verify ICV */
+ atoken->opcode = EIP197_TOKEN_OPCODE_VERIFY;
+ atoken->packet_length = digestsize |
+ EIP197_TOKEN_HASH_RESULT_VERIFY;
+ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH |
+ EIP197_TOKEN_STAT_LAST_PACKET;
+ atoken->instructions = EIP197_TOKEN_INS_TYPE_OUTPUT;
+ }
+
+ /* Fixup length of the token in the command descriptor */
+ cdesc->additional_cdata_size = atoksize;
}
static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm,
@@ -329,10 +380,8 @@ static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm,
int ret, i;
ret = aes_expandkey(&aes, key, len);
- if (ret) {
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < len / sizeof(u32); i++) {
@@ -382,12 +431,12 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
case SAFEXCEL_DES:
err = verify_aead_des_key(ctfm, keys.enckey, keys.enckeylen);
if (unlikely(err))
- goto badkey_expflags;
+ goto badkey;
break;
case SAFEXCEL_3DES:
err = verify_aead_des3_key(ctfm, keys.enckey, keys.enckeylen);
if (unlikely(err))
- goto badkey_expflags;
+ goto badkey;
break;
case SAFEXCEL_AES:
err = aes_expandkey(&aes, keys.enckey, keys.enckeylen);
@@ -450,9 +499,6 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
goto badkey;
}
- crypto_aead_set_flags(ctfm, crypto_aead_get_flags(ctfm) &
- CRYPTO_TFM_RES_MASK);
-
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma &&
(memcmp(ctx->ipad, istate.state, ctx->state_sz) ||
memcmp(ctx->opad, ostate.state, ctx->state_sz)))
@@ -470,8 +516,6 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
return 0;
badkey:
- crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
-badkey_expflags:
memzero_explicit(&keys, sizeof(keys));
return err;
}
@@ -656,6 +700,7 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
unsigned int totlen;
unsigned int totlen_src = cryptlen + assoclen;
unsigned int totlen_dst = totlen_src;
+ struct safexcel_token *atoken;
int n_cdesc = 0, n_rdesc = 0;
int queued, i, ret = 0;
bool first = true;
@@ -730,56 +775,60 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
- /* The EIP cannot deal with zero length input packets! */
- if (totlen == 0)
- totlen = 1;
+ if (!totlen) {
+ /*
+ * The EIP97 cannot deal with zero length input packets!
+ * So stuff a dummy command descriptor indicating a 1 byte
+ * (dummy) input packet, using the context record as source.
+ */
+ first_cdesc = safexcel_add_cdesc(priv, ring,
+ 1, 1, ctx->base.ctxr_dma,
+ 1, 1, ctx->base.ctxr_dma,
+ &atoken);
+ if (IS_ERR(first_cdesc)) {
+ /* No space left in the command descriptor ring */
+ ret = PTR_ERR(first_cdesc);
+ goto cdesc_rollback;
+ }
+ n_cdesc = 1;
+ goto skip_cdesc;
+ }
/* command descriptors */
for_each_sg(src, sg, sreq->nr_src, i) {
int len = sg_dma_len(sg);
/* Do not overflow the request */
- if (queued - len < 0)
+ if (queued < len)
len = queued;
cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
!(queued - len),
sg_dma_address(sg), len, totlen,
- ctx->base.ctxr_dma);
+ ctx->base.ctxr_dma, &atoken);
if (IS_ERR(cdesc)) {
/* No space left in the command descriptor ring */
ret = PTR_ERR(cdesc);
goto cdesc_rollback;
}
- n_cdesc++;
- if (n_cdesc == 1) {
+ if (!n_cdesc)
first_cdesc = cdesc;
- }
+ n_cdesc++;
queued -= len;
if (!queued)
break;
}
-
- if (unlikely(!n_cdesc)) {
- /*
- * Special case: zero length input buffer.
- * The engine always needs the 1st command descriptor, however!
- */
- first_cdesc = safexcel_add_cdesc(priv, ring, 1, 1, 0, 0, totlen,
- ctx->base.ctxr_dma);
- n_cdesc = 1;
- }
-
+skip_cdesc:
/* Add context control words and token to first command descriptor */
safexcel_context_control(ctx, base, sreq, first_cdesc);
if (ctx->aead)
- safexcel_aead_token(ctx, iv, first_cdesc,
+ safexcel_aead_token(ctx, iv, first_cdesc, atoken,
sreq->direction, cryptlen,
assoclen, digestsize);
else
- safexcel_skcipher_token(ctx, iv, first_cdesc,
+ safexcel_skcipher_token(ctx, iv, first_cdesc, atoken,
cryptlen);
/* result descriptors */
@@ -1166,6 +1215,8 @@ static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
ctx->base.send = safexcel_skcipher_send;
ctx->base.handle_result = safexcel_skcipher_handle_result;
+ ctx->ivmask = EIP197_OPTION_4_TOKEN_IV_CMD;
+ ctx->ctrinit = 1;
return 0;
}
@@ -1230,6 +1281,8 @@ static int safexcel_skcipher_aes_ecb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
+ ctx->blocksz = 0;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -1264,6 +1317,7 @@ static int safexcel_skcipher_aes_cbc_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
+ ctx->blocksz = AES_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
return 0;
}
@@ -1300,6 +1354,7 @@ static int safexcel_skcipher_aes_cfb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
+ ctx->blocksz = AES_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CFB;
return 0;
}
@@ -1336,6 +1391,7 @@ static int safexcel_skcipher_aes_ofb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
+ ctx->blocksz = AES_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_OFB;
return 0;
}
@@ -1381,10 +1437,8 @@ static int safexcel_skcipher_aesctr_setkey(struct crypto_skcipher *ctfm,
/* exclude the nonce here */
keylen = len - CTR_RFC3686_NONCE_SIZE;
ret = aes_expandkey(&aes, key, keylen);
- if (ret) {
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < keylen / sizeof(u32); i++) {
@@ -1410,6 +1464,7 @@ static int safexcel_skcipher_aes_ctr_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
+ ctx->blocksz = AES_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
return 0;
}
@@ -1445,6 +1500,7 @@ static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key,
unsigned int len)
{
struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
+ struct safexcel_crypto_priv *priv = ctx->priv;
int ret;
ret = verify_skcipher_des_key(ctfm, key);
@@ -1452,7 +1508,7 @@ static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key,
return ret;
/* if context exits and key changed, need to invalidate it */
- if (ctx->base.ctxr_dma)
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma)
if (memcmp(ctx->key, key, len))
ctx->base.needs_inv = true;
@@ -1468,6 +1524,8 @@ static int safexcel_skcipher_des_cbc_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_DES;
+ ctx->blocksz = DES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
return 0;
}
@@ -1505,6 +1563,8 @@ static int safexcel_skcipher_des_ecb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_DES;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
+ ctx->blocksz = 0;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -1537,6 +1597,7 @@ static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm,
const u8 *key, unsigned int len)
{
struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
+ struct safexcel_crypto_priv *priv = ctx->priv;
int err;
err = verify_skcipher_des3_key(ctfm, key);
@@ -1544,7 +1605,7 @@ static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm,
return err;
/* if context exits and key changed, need to invalidate it */
- if (ctx->base.ctxr_dma)
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma)
if (memcmp(ctx->key, key, len))
ctx->base.needs_inv = true;
@@ -1560,6 +1621,8 @@ static int safexcel_skcipher_des3_cbc_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES;
+ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
return 0;
}
@@ -1597,6 +1660,8 @@ static int safexcel_skcipher_des3_ecb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
+ ctx->blocksz = 0;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -1652,6 +1717,9 @@ static int safexcel_aead_cra_init(struct crypto_tfm *tfm)
ctx->priv = tmpl->priv;
ctx->alg = SAFEXCEL_AES; /* default */
+ ctx->blocksz = AES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_4_TOKEN_IV_CMD;
+ ctx->ctrinit = 1;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC; /* default */
ctx->aead = true;
ctx->base.send = safexcel_aead_send;
@@ -1840,6 +1908,8 @@ static int safexcel_aead_sha1_des3_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha1_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES; /* override default */
+ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -1874,6 +1944,8 @@ static int safexcel_aead_sha256_des3_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha256_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES; /* override default */
+ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -1908,6 +1980,8 @@ static int safexcel_aead_sha224_des3_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha224_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES; /* override default */
+ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -1942,6 +2016,8 @@ static int safexcel_aead_sha512_des3_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha512_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES; /* override default */
+ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -1976,6 +2052,8 @@ static int safexcel_aead_sha384_des3_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha384_cra_init(tfm);
ctx->alg = SAFEXCEL_3DES; /* override default */
+ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -2010,6 +2088,8 @@ static int safexcel_aead_sha1_des_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha1_cra_init(tfm);
ctx->alg = SAFEXCEL_DES; /* override default */
+ ctx->blocksz = DES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -2044,6 +2124,8 @@ static int safexcel_aead_sha256_des_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha256_cra_init(tfm);
ctx->alg = SAFEXCEL_DES; /* override default */
+ ctx->blocksz = DES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -2078,6 +2160,8 @@ static int safexcel_aead_sha224_des_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha224_cra_init(tfm);
ctx->alg = SAFEXCEL_DES; /* override default */
+ ctx->blocksz = DES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -2112,6 +2196,8 @@ static int safexcel_aead_sha512_des_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha512_cra_init(tfm);
ctx->alg = SAFEXCEL_DES; /* override default */
+ ctx->blocksz = DES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -2146,6 +2232,8 @@ static int safexcel_aead_sha384_des_cra_init(struct crypto_tfm *tfm)
safexcel_aead_sha384_cra_init(tfm);
ctx->alg = SAFEXCEL_DES; /* override default */
+ ctx->blocksz = DES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -2362,10 +2450,8 @@ static int safexcel_skcipher_aesxts_setkey(struct crypto_skcipher *ctfm,
/* Only half of the key data is cipher key */
keylen = (len >> 1);
ret = aes_expandkey(&aes, key, keylen);
- if (ret) {
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < keylen / sizeof(u32); i++) {
@@ -2381,10 +2467,8 @@ static int safexcel_skcipher_aesxts_setkey(struct crypto_skcipher *ctfm,
/* The other half is the tweak key */
ret = aes_expandkey(&aes, (u8 *)(key + keylen), keylen);
- if (ret) {
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < keylen / sizeof(u32); i++) {
@@ -2412,6 +2496,7 @@ static int safexcel_skcipher_aes_xts_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_AES;
+ ctx->blocksz = AES_BLOCK_SIZE;
ctx->xts = 1;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XTS;
return 0;
@@ -2472,7 +2557,6 @@ static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
ret = aes_expandkey(&aes, key, len);
if (ret) {
- crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&aes, sizeof(aes));
return ret;
}
@@ -2496,8 +2580,6 @@ static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
crypto_cipher_set_flags(ctx->hkaes, crypto_aead_get_flags(ctfm) &
CRYPTO_TFM_REQ_MASK);
ret = crypto_cipher_setkey(ctx->hkaes, key, len);
- crypto_aead_set_flags(ctfm, crypto_cipher_get_flags(ctx->hkaes) &
- CRYPTO_TFM_RES_MASK);
if (ret)
return ret;
@@ -2532,10 +2614,7 @@ static int safexcel_aead_gcm_cra_init(struct crypto_tfm *tfm)
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XCM; /* override default */
ctx->hkaes = crypto_alloc_cipher("aes", 0, 0);
- if (IS_ERR(ctx->hkaes))
- return PTR_ERR(ctx->hkaes);
-
- return 0;
+ return PTR_ERR_OR_ZERO(ctx->hkaes);
}
static void safexcel_aead_gcm_cra_exit(struct crypto_tfm *tfm)
@@ -2589,7 +2668,6 @@ static int safexcel_aead_ccm_setkey(struct crypto_aead *ctfm, const u8 *key,
ret = aes_expandkey(&aes, key, len);
if (ret) {
- crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&aes, sizeof(aes));
return ret;
}
@@ -2632,6 +2710,7 @@ static int safexcel_aead_ccm_cra_init(struct crypto_tfm *tfm)
ctx->state_sz = 3 * AES_BLOCK_SIZE;
ctx->xcm = EIP197_XCM_MODE_CCM;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XCM; /* override default */
+ ctx->ctrinit = 0;
return 0;
}
@@ -2719,10 +2798,9 @@ static int safexcel_skcipher_chacha20_setkey(struct crypto_skcipher *ctfm,
{
struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
- if (len != CHACHA_KEY_SIZE) {
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (len != CHACHA_KEY_SIZE)
return -EINVAL;
- }
+
safexcel_chacha20_setkey(ctx, key);
return 0;
@@ -2734,6 +2812,7 @@ static int safexcel_skcipher_chacha20_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_CHACHA20;
+ ctx->ctrinit = 0;
ctx->mode = CONTEXT_CONTROL_CHACHA20_MODE_256_32;
return 0;
}
@@ -2775,10 +2854,9 @@ static int safexcel_aead_chachapoly_setkey(struct crypto_aead *ctfm,
len -= EIP197_AEAD_IPSEC_NONCE_SIZE;
ctx->nonce = *(u32 *)(key + len);
}
- if (len != CHACHA_KEY_SIZE) {
- crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (len != CHACHA_KEY_SIZE)
return -EINVAL;
- }
+
safexcel_chacha20_setkey(ctx, key);
return 0;
@@ -2885,6 +2963,7 @@ static int safexcel_aead_chachapoly_cra_init(struct crypto_tfm *tfm)
ctx->alg = SAFEXCEL_CHACHA20;
ctx->mode = CONTEXT_CONTROL_CHACHA20_MODE_256_32 |
CONTEXT_CONTROL_CHACHA20_MODE_CALC_OTK;
+ ctx->ctrinit = 0;
ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_POLY1305;
ctx->state_sz = 0; /* Precomputed by HW */
return 0;
@@ -2933,6 +3012,7 @@ static int safexcel_aead_chachapolyesp_cra_init(struct crypto_tfm *tfm)
ret = safexcel_aead_chachapoly_cra_init(tfm);
ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP;
+ ctx->aadskip = EIP197_AEAD_IPSEC_IV_SIZE;
return ret;
}
@@ -2971,10 +3051,8 @@ static int safexcel_skcipher_sm4_setkey(struct crypto_skcipher *ctfm,
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
- if (len != SM4_KEY_SIZE) {
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (len != SM4_KEY_SIZE)
return -EINVAL;
- }
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma)
if (memcmp(ctx->key, key, SM4_KEY_SIZE))
@@ -3013,6 +3091,8 @@ static int safexcel_skcipher_sm4_ecb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
+ ctx->blocksz = 0;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
return 0;
}
@@ -3047,6 +3127,7 @@ static int safexcel_skcipher_sm4_cbc_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
+ ctx->blocksz = SM4_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
return 0;
}
@@ -3083,6 +3164,7 @@ static int safexcel_skcipher_sm4_ofb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
+ ctx->blocksz = SM4_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_OFB;
return 0;
}
@@ -3119,6 +3201,7 @@ static int safexcel_skcipher_sm4_cfb_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
+ ctx->blocksz = SM4_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CFB;
return 0;
}
@@ -3169,6 +3252,7 @@ static int safexcel_skcipher_sm4_ctr_cra_init(struct crypto_tfm *tfm)
safexcel_skcipher_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
+ ctx->blocksz = SM4_BLOCK_SIZE;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
return 0;
}
@@ -3228,6 +3312,7 @@ static int safexcel_aead_sm4cbc_sha1_cra_init(struct crypto_tfm *tfm)
safexcel_aead_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
+ ctx->blocksz = SM4_BLOCK_SIZE;
ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
ctx->state_sz = SHA1_DIGEST_SIZE;
return 0;
@@ -3335,6 +3420,7 @@ static int safexcel_aead_sm4cbc_sm3_cra_init(struct crypto_tfm *tfm)
safexcel_aead_fallback_cra_init(tfm);
ctx->alg = SAFEXCEL_SM4;
+ ctx->blocksz = SM4_BLOCK_SIZE;
ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SM3;
ctx->state_sz = SM3_DIGEST_SIZE;
return 0;
@@ -3473,6 +3559,7 @@ static int safexcel_rfc4106_gcm_cra_init(struct crypto_tfm *tfm)
ret = safexcel_aead_gcm_cra_init(tfm);
ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP;
+ ctx->aadskip = EIP197_AEAD_IPSEC_IV_SIZE;
return ret;
}
@@ -3607,6 +3694,7 @@ static int safexcel_rfc4309_ccm_cra_init(struct crypto_tfm *tfm)
ret = safexcel_aead_ccm_cra_init(tfm);
ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP;
+ ctx->aadskip = EIP197_AEAD_IPSEC_IV_SIZE;
return ret;
}
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 2134daef24f6..43962bc709c6 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -87,12 +87,14 @@ static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
input_length &= 15;
if (unlikely(cbcmac && input_length)) {
+ token[0].stat = 0;
token[1].opcode = EIP197_TOKEN_OPCODE_INSERT;
token[1].packet_length = 16 - input_length;
token[1].stat = EIP197_TOKEN_STAT_LAST_HASH;
token[1].instructions = EIP197_TOKEN_INS_TYPE_HASH;
} else {
token[0].stat = EIP197_TOKEN_STAT_LAST_HASH;
+ eip197_noop_token(&token[1]);
}
token[2].opcode = EIP197_TOKEN_OPCODE_INSERT;
@@ -101,6 +103,8 @@ static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
token[2].packet_length = result_length;
token[2].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
+
+ eip197_noop_token(&token[3]);
}
static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
@@ -111,6 +115,7 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
u64 count = 0;
cdesc->control_data.control0 = ctx->alg;
+ cdesc->control_data.control1 = 0;
/*
* Copy the input digest if needed, and setup the context
@@ -277,7 +282,8 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv,
sreq->processed = sreq->block_sz;
sreq->hmac = 0;
- ctx->base.needs_inv = true;
+ if (priv->flags & EIP197_TRC_CACHE)
+ ctx->base.needs_inv = true;
areq->nbytes = 0;
safexcel_ahash_enqueue(areq);
@@ -314,6 +320,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
struct safexcel_result_desc *rdesc;
struct scatterlist *sg;
+ struct safexcel_token *dmmy;
int i, extra = 0, n_cdesc = 0, ret = 0, cache_len, skip = 0;
u64 queued, len;
@@ -397,7 +404,8 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
first_cdesc = safexcel_add_cdesc(priv, ring, 1,
(cache_len == len),
req->cache_dma, cache_len,
- len, ctx->base.ctxr_dma);
+ len, ctx->base.ctxr_dma,
+ &dmmy);
if (IS_ERR(first_cdesc)) {
ret = PTR_ERR(first_cdesc);
goto unmap_cache;
@@ -436,7 +444,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
!(queued - sglen),
sg_dma_address(sg) + skip, sglen,
- len, ctx->base.ctxr_dma);
+ len, ctx->base.ctxr_dma, &dmmy);
if (IS_ERR(cdesc)) {
ret = PTR_ERR(cdesc);
goto unmap_sg;
@@ -1911,10 +1919,8 @@ static int safexcel_crc32_setkey(struct crypto_ahash *tfm, const u8 *key,
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
- if (keylen != sizeof(u32)) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != sizeof(u32))
return -EINVAL;
- }
memcpy(ctx->ipad, key, sizeof(u32));
return 0;
@@ -1987,10 +1993,8 @@ static int safexcel_cbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
int ret, i;
ret = aes_expandkey(&aes, key, len);
- if (ret) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
memset(ctx->ipad, 0, 2 * AES_BLOCK_SIZE);
for (i = 0; i < len / sizeof(u32); i++)
@@ -2057,18 +2061,14 @@ static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
int ret, i;
ret = aes_expandkey(&aes, key, len);
- if (ret) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
/* precompute the XCBC key material */
crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
ret = crypto_cipher_setkey(ctx->kaes, key, len);
- crypto_ahash_set_flags(tfm, crypto_cipher_get_flags(ctx->kaes) &
- CRYPTO_TFM_RES_MASK);
if (ret)
return ret;
@@ -2088,8 +2088,6 @@ static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
ret = crypto_cipher_setkey(ctx->kaes,
(u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
AES_MIN_KEY_SIZE);
- crypto_ahash_set_flags(tfm, crypto_cipher_get_flags(ctx->kaes) &
- CRYPTO_TFM_RES_MASK);
if (ret)
return ret;
@@ -2160,10 +2158,8 @@ static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
int ret, i;
ret = aes_expandkey(&aes, key, len);
- if (ret) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
for (i = 0; i < len / sizeof(u32); i++)
ctx->ipad[i + 8] =
@@ -2174,8 +2170,6 @@ static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
ret = crypto_cipher_setkey(ctx->kaes, key, len);
- crypto_ahash_set_flags(tfm, crypto_cipher_get_flags(ctx->kaes) &
- CRYPTO_TFM_RES_MASK);
if (ret)
return ret;
diff --git a/drivers/crypto/inside-secure/safexcel_ring.c b/drivers/crypto/inside-secure/safexcel_ring.c
index 9237ba745c2f..e454c3d44f07 100644
--- a/drivers/crypto/inside-secure/safexcel_ring.c
+++ b/drivers/crypto/inside-secure/safexcel_ring.c
@@ -14,6 +14,11 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
struct safexcel_desc_ring *cdr,
struct safexcel_desc_ring *rdr)
{
+ int i;
+ struct safexcel_command_desc *cdesc;
+ dma_addr_t atok;
+
+ /* Actual command descriptor ring */
cdr->offset = priv->config.cd_offset;
cdr->base = dmam_alloc_coherent(priv->dev,
cdr->offset * EIP197_DEFAULT_RING_SIZE,
@@ -24,7 +29,34 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
cdr->base_end = cdr->base + cdr->offset * (EIP197_DEFAULT_RING_SIZE - 1);
cdr->read = cdr->base;
+ /* Command descriptor shadow ring for storing additional token data */
+ cdr->shoffset = priv->config.cdsh_offset;
+ cdr->shbase = dmam_alloc_coherent(priv->dev,
+ cdr->shoffset *
+ EIP197_DEFAULT_RING_SIZE,
+ &cdr->shbase_dma, GFP_KERNEL);
+ if (!cdr->shbase)
+ return -ENOMEM;
+ cdr->shwrite = cdr->shbase;
+ cdr->shbase_end = cdr->shbase + cdr->shoffset *
+ (EIP197_DEFAULT_RING_SIZE - 1);
+
+ /*
+ * Populate command descriptors with physical pointers to shadow descs.
+ * Note that we only need to do this once if we don't overwrite them.
+ */
+ cdesc = cdr->base;
+ atok = cdr->shbase_dma;
+ for (i = 0; i < EIP197_DEFAULT_RING_SIZE; i++) {
+ cdesc->atok_lo = lower_32_bits(atok);
+ cdesc->atok_hi = upper_32_bits(atok);
+ cdesc = (void *)cdesc + cdr->offset;
+ atok += cdr->shoffset;
+ }
+
rdr->offset = priv->config.rd_offset;
+ /* Use shoffset for result token offset here */
+ rdr->shoffset = priv->config.res_offset;
rdr->base = dmam_alloc_coherent(priv->dev,
rdr->offset * EIP197_DEFAULT_RING_SIZE,
&rdr->base_dma, GFP_KERNEL);
@@ -42,11 +74,40 @@ inline int safexcel_select_ring(struct safexcel_crypto_priv *priv)
return (atomic_inc_return(&priv->ring_used) % priv->config.rings);
}
-static void *safexcel_ring_next_wptr(struct safexcel_crypto_priv *priv,
- struct safexcel_desc_ring *ring)
+static void *safexcel_ring_next_cwptr(struct safexcel_crypto_priv *priv,
+ struct safexcel_desc_ring *ring,
+ bool first,
+ struct safexcel_token **atoken)
{
void *ptr = ring->write;
+ if (first)
+ *atoken = ring->shwrite;
+
+ if ((ring->write == ring->read - ring->offset) ||
+ (ring->read == ring->base && ring->write == ring->base_end))
+ return ERR_PTR(-ENOMEM);
+
+ if (ring->write == ring->base_end) {
+ ring->write = ring->base;
+ ring->shwrite = ring->shbase;
+ } else {
+ ring->write += ring->offset;
+ ring->shwrite += ring->shoffset;
+ }
+
+ return ptr;
+}
+
+static void *safexcel_ring_next_rwptr(struct safexcel_crypto_priv *priv,
+ struct safexcel_desc_ring *ring,
+ struct result_data_desc **rtoken)
+{
+ void *ptr = ring->write;
+
+ /* Result token at relative offset shoffset */
+ *rtoken = ring->write + ring->shoffset;
+
if ((ring->write == ring->read - ring->offset) ||
(ring->read == ring->base && ring->write == ring->base_end))
return ERR_PTR(-ENOMEM);
@@ -106,10 +167,13 @@ void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
if (ring->write == ring->read)
return;
- if (ring->write == ring->base)
+ if (ring->write == ring->base) {
ring->write = ring->base_end;
- else
+ ring->shwrite = ring->shbase_end;
+ } else {
ring->write -= ring->offset;
+ ring->shwrite -= ring->shoffset;
+ }
}
struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
@@ -117,26 +181,26 @@ struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *pr
bool first, bool last,
dma_addr_t data, u32 data_len,
u32 full_data_len,
- dma_addr_t context) {
+ dma_addr_t context,
+ struct safexcel_token **atoken)
+{
struct safexcel_command_desc *cdesc;
- int i;
- cdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].cdr);
+ cdesc = safexcel_ring_next_cwptr(priv, &priv->ring[ring_id].cdr,
+ first, atoken);
if (IS_ERR(cdesc))
return cdesc;
- memset(cdesc, 0, sizeof(struct safexcel_command_desc));
-
- cdesc->first_seg = first;
- cdesc->last_seg = last;
cdesc->particle_size = data_len;
+ cdesc->rsvd0 = 0;
+ cdesc->last_seg = last;
+ cdesc->first_seg = first;
+ cdesc->additional_cdata_size = 0;
+ cdesc->rsvd1 = 0;
cdesc->data_lo = lower_32_bits(data);
cdesc->data_hi = upper_32_bits(data);
- if (first && context) {
- struct safexcel_token *token =
- (struct safexcel_token *)cdesc->control_data.token;
-
+ if (first) {
/*
* Note that the length here MUST be >0 or else the EIP(1)97
* may hang. Newer EIP197 firmware actually incorporates this
@@ -146,20 +210,12 @@ struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *pr
cdesc->control_data.packet_length = full_data_len ?: 1;
cdesc->control_data.options = EIP197_OPTION_MAGIC_VALUE |
EIP197_OPTION_64BIT_CTX |
- EIP197_OPTION_CTX_CTRL_IN_CMD;
- cdesc->control_data.context_lo =
- (lower_32_bits(context) & GENMASK(31, 2)) >> 2;
+ EIP197_OPTION_CTX_CTRL_IN_CMD |
+ EIP197_OPTION_RC_AUTO;
+ cdesc->control_data.type = EIP197_TYPE_BCLA;
+ cdesc->control_data.context_lo = lower_32_bits(context) |
+ EIP197_CONTEXT_SMALL;
cdesc->control_data.context_hi = upper_32_bits(context);
-
- if (priv->version == EIP197B_MRVL ||
- priv->version == EIP197D_MRVL)
- cdesc->control_data.options |= EIP197_OPTION_RC_AUTO;
-
- /* TODO: large xform HMAC with SHA-384/512 uses refresh = 3 */
- cdesc->control_data.refresh = 2;
-
- for (i = 0; i < EIP197_MAX_TOKENS; i++)
- eip197_noop_token(&token[i]);
}
return cdesc;
@@ -171,19 +227,27 @@ struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *pri
dma_addr_t data, u32 len)
{
struct safexcel_result_desc *rdesc;
+ struct result_data_desc *rtoken;
- rdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].rdr);
+ rdesc = safexcel_ring_next_rwptr(priv, &priv->ring[ring_id].rdr,
+ &rtoken);
if (IS_ERR(rdesc))
return rdesc;
- memset(rdesc, 0, sizeof(struct safexcel_result_desc));
-
- rdesc->first_seg = first;
+ rdesc->particle_size = len;
+ rdesc->rsvd0 = 0;
+ rdesc->descriptor_overflow = 0;
+ rdesc->buffer_overflow = 0;
rdesc->last_seg = last;
+ rdesc->first_seg = first;
rdesc->result_size = EIP197_RD64_RESULT_SIZE;
- rdesc->particle_size = len;
+ rdesc->rsvd1 = 0;
rdesc->data_lo = lower_32_bits(data);
rdesc->data_hi = upper_32_bits(data);
+ /* Clear length & error code in result token */
+ rtoken->packet_length = 0;
+ rtoken->error_code = 0;
+
return rdesc;
}
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 391e3b4df364..ad73fc946682 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -740,7 +740,7 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
u32 keylen_cfg = 0;
struct ix_sa_dir *dir;
struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
- u32 *flags = &tfm->crt_flags;
+ int err;
dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
cinfo = dir->npe_ctx;
@@ -757,12 +757,13 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
case 24: keylen_cfg = MOD_AES192; break;
case 32: keylen_cfg = MOD_AES256; break;
default:
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
cipher_cfg |= keylen_cfg;
} else {
- crypto_des_verify_key(tfm, key);
+ err = crypto_des_verify_key(tfm, key);
+ if (err)
+ return err;
}
/* write cfg word to cryptinfo */
*(u32*)cinfo = cpu_to_be32(cipher_cfg);
@@ -819,7 +820,6 @@ static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
- u32 *flags = &tfm->base.crt_flags;
int ret;
init_completion(&ctx->completion);
@@ -835,16 +835,6 @@ static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
if (ret)
goto out;
ret = setup_cipher(&tfm->base, 1, key, key_len);
- if (ret)
- goto out;
-
- if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
- if (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
- ret = -EINVAL;
- } else {
- *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
- }
- }
out:
if (!atomic_dec_and_test(&ctx->configuring))
wait_for_completion(&ctx->completion);
@@ -1096,7 +1086,6 @@ free_buf_src:
static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
{
struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
- u32 *flags = &tfm->base.crt_flags;
unsigned digest_len = crypto_aead_maxauthsize(tfm);
int ret;
@@ -1120,17 +1109,6 @@ static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
goto out;
ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey,
ctx->authkey_len, digest_len);
- if (ret)
- goto out;
-
- if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
- if (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
- ret = -EINVAL;
- goto out;
- } else {
- *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
- }
- }
out:
if (!atomic_dec_and_test(&ctx->configuring))
wait_for_completion(&ctx->completion);
@@ -1169,7 +1147,6 @@ static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
memzero_explicit(&keys, sizeof(keys));
return aead_setup(tfm, crypto_aead_authsize(tfm));
badkey:
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index d8e8c857770c..c24f34a48cef 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -255,10 +255,8 @@ static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
int i;
ret = aes_expandkey(&ctx->aes, key, len);
- if (ret) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return ret;
- }
remaining = (ctx->aes.key_length - 16) / 4;
offset = ctx->aes.key_length + 24 - remaining;
diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c
index 90880a81c534..78d660d963e2 100644
--- a/drivers/crypto/mediatek/mtk-aes.c
+++ b/drivers/crypto/mediatek/mtk-aes.c
@@ -652,7 +652,6 @@ static int mtk_aes_setkey(struct crypto_skcipher *tfm,
break;
default:
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -1022,7 +1021,6 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
break;
default:
- crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -1033,8 +1031,6 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
CRYPTO_TFM_REQ_MASK);
err = crypto_skcipher_setkey(ctr, key, keylen);
- crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctr) &
- CRYPTO_TFM_RES_MASK);
if (err)
return err;
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index f438b425c655..435ac1c83df9 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -492,7 +492,6 @@ static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int len)
{
struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
- unsigned int ret;
/*
* AES 128 is supposed by the hardware, store key into temporary
@@ -513,16 +512,7 @@ static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
crypto_sync_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
crypto_sync_skcipher_set_flags(actx->fallback,
tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
-
- ret = crypto_sync_skcipher_setkey(actx->fallback, key, len);
- if (!ret)
- return 0;
-
- tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(actx->fallback) &
- CRYPTO_TFM_RES_MASK;
-
- return ret;
+ return crypto_sync_skcipher_setkey(actx->fallback, key, len);
}
static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm)
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 63bd565048f4..f5c468f2cc82 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -746,7 +746,6 @@ static int n2_aes_setkey(struct crypto_skcipher *skcipher, const u8 *key,
ctx->enc_type |= ENC_TYPE_ALG_AES256;
break;
default:
- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c
index 9bbedbccfadf..32dc00dc570b 100644
--- a/drivers/crypto/omap-aes-gcm.c
+++ b/drivers/crypto/omap-aes-gcm.c
@@ -13,6 +13,7 @@
#include <linux/dmaengine.h>
#include <linux/omap-dma.h>
#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
#include <crypto/aes.h>
#include <crypto/gcm.h>
#include <crypto/scatterwalk.h>
@@ -29,11 +30,13 @@ static void omap_aes_gcm_finish_req(struct omap_aes_dev *dd, int ret)
{
struct aead_request *req = dd->aead_req;
- dd->flags &= ~FLAGS_BUSY;
dd->in_sg = NULL;
dd->out_sg = NULL;
- req->base.complete(&req->base, ret);
+ crypto_finalize_aead_request(dd->engine, req, ret);
+
+ pm_runtime_mark_last_busy(dd->dev);
+ pm_runtime_put_autosuspend(dd->dev);
}
static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
@@ -81,7 +84,6 @@ static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
}
omap_aes_gcm_finish_req(dd, ret);
- omap_aes_gcm_handle_queue(dd, NULL);
}
static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
@@ -120,11 +122,16 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
OMAP_CRYPTO_FORCE_SINGLE_ENTRY,
FLAGS_ASSOC_DATA_ST_SHIFT,
&dd->flags);
+ if (ret)
+ return ret;
}
if (cryptlen) {
tmp = scatterwalk_ffwd(sg_arr, req->src, req->assoclen);
+ if (nsg)
+ sg_unmark_end(dd->in_sgl);
+
ret = omap_crypto_align_sg(&tmp, cryptlen,
AES_BLOCK_SIZE, &dd->in_sgl[nsg],
OMAP_CRYPTO_COPY_DATA |
@@ -132,6 +139,8 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
OMAP_CRYPTO_FORCE_SINGLE_ENTRY,
FLAGS_IN_DATA_ST_SHIFT,
&dd->flags);
+ if (ret)
+ return ret;
}
dd->in_sg = dd->in_sgl;
@@ -142,18 +151,20 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
dd->out_sg = req->dst;
dd->orig_out = req->dst;
- dd->out_sg = scatterwalk_ffwd(sg_arr, req->dst, assoclen);
+ dd->out_sg = scatterwalk_ffwd(sg_arr, req->dst, req->assoclen);
flags = 0;
if (req->src == req->dst || dd->out_sg == sg_arr)
flags |= OMAP_CRYPTO_FORCE_COPY;
- ret = omap_crypto_align_sg(&dd->out_sg, cryptlen,
- AES_BLOCK_SIZE, &dd->out_sgl,
- flags,
- FLAGS_OUT_DATA_ST_SHIFT, &dd->flags);
- if (ret)
- return ret;
+ if (cryptlen) {
+ ret = omap_crypto_align_sg(&dd->out_sg, cryptlen,
+ AES_BLOCK_SIZE, &dd->out_sgl,
+ flags,
+ FLAGS_OUT_DATA_ST_SHIFT, &dd->flags);
+ if (ret)
+ return ret;
+ }
dd->in_sg_len = sg_nents_for_len(dd->in_sg, alen + clen);
dd->out_sg_len = sg_nents_for_len(dd->out_sg, clen);
@@ -161,62 +172,12 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
return 0;
}
-static void omap_aes_gcm_complete(struct crypto_async_request *req, int err)
-{
- struct omap_aes_gcm_result *res = req->data;
-
- if (err == -EINPROGRESS)
- return;
-
- res->err = err;
- complete(&res->completion);
-}
-
static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv)
{
- struct scatterlist iv_sg, tag_sg;
- struct skcipher_request *sk_req;
- struct omap_aes_gcm_result result;
- struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- int ret = 0;
-
- sk_req = skcipher_request_alloc(ctx->ctr, GFP_KERNEL);
- if (!sk_req) {
- pr_err("skcipher: Failed to allocate request\n");
- return -ENOMEM;
- }
-
- init_completion(&result.completion);
-
- sg_init_one(&iv_sg, iv, AES_BLOCK_SIZE);
- sg_init_one(&tag_sg, tag, AES_BLOCK_SIZE);
- skcipher_request_set_callback(sk_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- omap_aes_gcm_complete, &result);
- ret = crypto_skcipher_setkey(ctx->ctr, (u8 *)ctx->key, ctx->keylen);
- skcipher_request_set_crypt(sk_req, &iv_sg, &tag_sg, AES_BLOCK_SIZE,
- NULL);
- ret = crypto_skcipher_encrypt(sk_req);
- switch (ret) {
- case 0:
- break;
- case -EINPROGRESS:
- case -EBUSY:
- ret = wait_for_completion_interruptible(&result.completion);
- if (!ret) {
- ret = result.err;
- if (!ret) {
- reinit_completion(&result.completion);
- break;
- }
- }
- /* fall through */
- default:
- pr_err("Encryption of IV failed for GCM mode\n");
- break;
- }
+ struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- skcipher_request_free(sk_req);
- return ret;
+ aes_encrypt(&ctx->actx, (u8 *)tag, (u8 *)iv);
+ return 0;
}
void omap_aes_gcm_dma_out_callback(void *data)
@@ -246,37 +207,21 @@ void omap_aes_gcm_dma_out_callback(void *data)
static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
struct aead_request *req)
{
- struct omap_aes_ctx *ctx;
- struct aead_request *backlog;
- struct omap_aes_reqctx *rctx;
- unsigned long flags;
- int err, ret = 0;
-
- spin_lock_irqsave(&dd->lock, flags);
if (req)
- ret = aead_enqueue_request(&dd->aead_queue, req);
- if (dd->flags & FLAGS_BUSY) {
- spin_unlock_irqrestore(&dd->lock, flags);
- return ret;
- }
-
- backlog = aead_get_backlog(&dd->aead_queue);
- req = aead_dequeue_request(&dd->aead_queue);
- if (req)
- dd->flags |= FLAGS_BUSY;
- spin_unlock_irqrestore(&dd->lock, flags);
-
- if (!req)
- return ret;
+ return crypto_transfer_aead_request_to_engine(dd->engine, req);
- if (backlog)
- backlog->base.complete(&backlog->base, -EINPROGRESS);
+ return 0;
+}
- ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- rctx = aead_request_ctx(req);
+static int omap_aes_gcm_prepare_req(struct crypto_engine *engine, void *areq)
+{
+ struct aead_request *req = container_of(areq, struct aead_request,
+ base);
+ struct omap_aes_reqctx *rctx = aead_request_ctx(req);
+ struct omap_aes_dev *dd = rctx->dd;
+ struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ int err;
- dd->ctx = ctx;
- rctx->dd = dd;
dd->aead_req = req;
rctx->mode &= FLAGS_MODE_MASK;
@@ -286,16 +231,9 @@ static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
if (err)
return err;
- err = omap_aes_write_ctrl(dd);
- if (!err)
- err = omap_aes_crypt_dma_start(dd);
+ dd->ctx = &ctx->octx;
- if (err) {
- omap_aes_gcm_finish_req(dd, err);
- omap_aes_gcm_handle_queue(dd, NULL);
- }
-
- return ret;
+ return omap_aes_write_ctrl(dd);
}
static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode)
@@ -350,36 +288,39 @@ int omap_aes_gcm_decrypt(struct aead_request *req)
int omap_aes_4106gcm_encrypt(struct aead_request *req)
{
- struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
struct omap_aes_reqctx *rctx = aead_request_ctx(req);
- memcpy(rctx->iv, ctx->nonce, 4);
+ memcpy(rctx->iv, ctx->octx.nonce, 4);
memcpy(rctx->iv + 4, req->iv, 8);
- return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM |
+ return crypto_ipsec_check_assoclen(req->assoclen) ?:
+ omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM |
FLAGS_RFC4106_GCM);
}
int omap_aes_4106gcm_decrypt(struct aead_request *req)
{
- struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
struct omap_aes_reqctx *rctx = aead_request_ctx(req);
- memcpy(rctx->iv, ctx->nonce, 4);
+ memcpy(rctx->iv, ctx->octx.nonce, 4);
memcpy(rctx->iv + 4, req->iv, 8);
- return omap_aes_gcm_crypt(req, FLAGS_GCM | FLAGS_RFC4106_GCM);
+ return crypto_ipsec_check_assoclen(req->assoclen) ?:
+ omap_aes_gcm_crypt(req, FLAGS_GCM | FLAGS_RFC4106_GCM);
}
int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
- struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
+ struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
+ int ret;
- if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
- keylen != AES_KEYSIZE_256)
- return -EINVAL;
+ ret = aes_expandkey(&ctx->actx, key, keylen);
+ if (ret)
+ return ret;
- memcpy(ctx->key, key, keylen);
- ctx->keylen = keylen;
+ memcpy(ctx->octx.key, key, keylen);
+ ctx->octx.keylen = keylen;
return 0;
}
@@ -387,19 +328,63 @@ int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
int omap_aes_4106gcm_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
- struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
+ struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
+ int ret;
if (keylen < 4)
return -EINVAL;
-
keylen -= 4;
- if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
- keylen != AES_KEYSIZE_256)
- return -EINVAL;
- memcpy(ctx->key, key, keylen);
- memcpy(ctx->nonce, key + keylen, 4);
- ctx->keylen = keylen;
+ ret = aes_expandkey(&ctx->actx, key, keylen);
+ if (ret)
+ return ret;
+
+ memcpy(ctx->octx.key, key, keylen);
+ memcpy(ctx->octx.nonce, key + keylen, 4);
+ ctx->octx.keylen = keylen;
+
+ return 0;
+}
+
+int omap_aes_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+ return crypto_gcm_check_authsize(authsize);
+}
+
+int omap_aes_4106gcm_setauthsize(struct crypto_aead *parent,
+ unsigned int authsize)
+{
+ return crypto_rfc4106_check_authsize(authsize);
+}
+
+static int omap_aes_gcm_crypt_req(struct crypto_engine *engine, void *areq)
+{
+ struct aead_request *req = container_of(areq, struct aead_request,
+ base);
+ struct omap_aes_reqctx *rctx = aead_request_ctx(req);
+ struct omap_aes_dev *dd = rctx->dd;
+ int ret = 0;
+
+ if (!dd)
+ return -ENODEV;
+
+ if (dd->in_sg_len)
+ ret = omap_aes_crypt_dma_start(dd);
+ else
+ omap_aes_gcm_dma_out_callback(dd);
+
+ return ret;
+}
+
+int omap_aes_gcm_cra_init(struct crypto_aead *tfm)
+{
+ struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
+
+ ctx->enginectx.op.prepare_request = omap_aes_gcm_prepare_req;
+ ctx->enginectx.op.unprepare_request = NULL;
+ ctx->enginectx.op.do_one_request = omap_aes_gcm_crypt_req;
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct omap_aes_reqctx));
return 0;
}
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index a1fc03ed01f3..824ddf2a66ff 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -269,13 +269,14 @@ static int omap_aes_crypt_dma(struct omap_aes_dev *dd,
struct scatterlist *out_sg,
int in_sg_len, int out_sg_len)
{
- struct dma_async_tx_descriptor *tx_in, *tx_out;
+ struct dma_async_tx_descriptor *tx_in, *tx_out = NULL, *cb_desc;
struct dma_slave_config cfg;
int ret;
if (dd->pio_only) {
scatterwalk_start(&dd->in_walk, dd->in_sg);
- scatterwalk_start(&dd->out_walk, dd->out_sg);
+ if (out_sg_len)
+ scatterwalk_start(&dd->out_walk, dd->out_sg);
/* Enable DATAIN interrupt and let it take
care of the rest */
@@ -312,34 +313,45 @@ static int omap_aes_crypt_dma(struct omap_aes_dev *dd,
/* No callback necessary */
tx_in->callback_param = dd;
+ tx_in->callback = NULL;
/* OUT */
- ret = dmaengine_slave_config(dd->dma_lch_out, &cfg);
- if (ret) {
- dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
- ret);
- return ret;
- }
+ if (out_sg_len) {
+ ret = dmaengine_slave_config(dd->dma_lch_out, &cfg);
+ if (ret) {
+ dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
+ ret);
+ return ret;
+ }
- tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, out_sg_len,
- DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (!tx_out) {
- dev_err(dd->dev, "OUT prep_slave_sg() failed\n");
- return -EINVAL;
+ tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg,
+ out_sg_len,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tx_out) {
+ dev_err(dd->dev, "OUT prep_slave_sg() failed\n");
+ return -EINVAL;
+ }
+
+ cb_desc = tx_out;
+ } else {
+ cb_desc = tx_in;
}
if (dd->flags & FLAGS_GCM)
- tx_out->callback = omap_aes_gcm_dma_out_callback;
+ cb_desc->callback = omap_aes_gcm_dma_out_callback;
else
- tx_out->callback = omap_aes_dma_out_callback;
- tx_out->callback_param = dd;
+ cb_desc->callback = omap_aes_dma_out_callback;
+ cb_desc->callback_param = dd;
+
dmaengine_submit(tx_in);
- dmaengine_submit(tx_out);
+ if (tx_out)
+ dmaengine_submit(tx_out);
dma_async_issue_pending(dd->dma_lch_in);
- dma_async_issue_pending(dd->dma_lch_out);
+ if (out_sg_len)
+ dma_async_issue_pending(dd->dma_lch_out);
/* start DMA */
dd->pdata->trigger(dd, dd->total);
@@ -361,11 +373,13 @@ int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
return -EINVAL;
}
- err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len,
- DMA_FROM_DEVICE);
- if (!err) {
- dev_err(dd->dev, "dma_map_sg() error\n");
- return -EINVAL;
+ if (dd->out_sg_len) {
+ err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len,
+ DMA_FROM_DEVICE);
+ if (!err) {
+ dev_err(dd->dev, "dma_map_sg() error\n");
+ return -EINVAL;
+ }
}
}
@@ -373,8 +387,9 @@ int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
dd->out_sg_len);
if (err && !dd->pio_only) {
dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
- dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
- DMA_FROM_DEVICE);
+ if (dd->out_sg_len)
+ dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
+ DMA_FROM_DEVICE);
}
return err;
@@ -479,6 +494,14 @@ static int omap_aes_crypt_req(struct crypto_engine *engine,
return omap_aes_crypt_dma_start(dd);
}
+static void omap_aes_copy_ivout(struct omap_aes_dev *dd, u8 *ivbuf)
+{
+ int i;
+
+ for (i = 0; i < 4; i++)
+ ((u32 *)ivbuf)[i] = omap_aes_read(dd, AES_REG_IV(dd, i));
+}
+
static void omap_aes_done_task(unsigned long data)
{
struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
@@ -494,12 +517,16 @@ static void omap_aes_done_task(unsigned long data)
omap_aes_crypt_dma_stop(dd);
}
- omap_crypto_cleanup(dd->in_sgl, NULL, 0, dd->total_save,
+ omap_crypto_cleanup(dd->in_sg, NULL, 0, dd->total_save,
FLAGS_IN_DATA_ST_SHIFT, dd->flags);
- omap_crypto_cleanup(&dd->out_sgl, dd->orig_out, 0, dd->total_save,
+ omap_crypto_cleanup(dd->out_sg, dd->orig_out, 0, dd->total_save,
FLAGS_OUT_DATA_ST_SHIFT, dd->flags);
+ /* Update IV output */
+ if (dd->flags & (FLAGS_CBC | FLAGS_CTR))
+ omap_aes_copy_ivout(dd, dd->req->iv);
+
omap_aes_finish_req(dd, 0);
pr_debug("exit\n");
@@ -513,6 +540,9 @@ static int omap_aes_crypt(struct skcipher_request *req, unsigned long mode)
struct omap_aes_dev *dd;
int ret;
+ if ((req->cryptlen % AES_BLOCK_SIZE) && !(mode & FLAGS_CTR))
+ return -EINVAL;
+
pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->cryptlen,
!!(mode & FLAGS_ENCRYPT),
!!(mode & FLAGS_CBC));
@@ -627,36 +657,6 @@ static int omap_aes_init_tfm(struct crypto_skcipher *tfm)
return 0;
}
-static int omap_aes_gcm_cra_init(struct crypto_aead *tfm)
-{
- struct omap_aes_dev *dd = NULL;
- struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
- int err;
-
- /* Find AES device, currently picks the first device */
- spin_lock_bh(&list_lock);
- list_for_each_entry(dd, &dev_list, list) {
- break;
- }
- spin_unlock_bh(&list_lock);
-
- err = pm_runtime_get_sync(dd->dev);
- if (err < 0) {
- dev_err(dd->dev, "%s: failed to get_sync(%d)\n",
- __func__, err);
- return err;
- }
-
- tfm->reqsize = sizeof(struct omap_aes_reqctx);
- ctx->ctr = crypto_alloc_skcipher("ecb(aes)", 0, 0);
- if (IS_ERR(ctx->ctr)) {
- pr_warn("could not load aes driver for encrypting IV\n");
- return PTR_ERR(ctx->ctr);
- }
-
- return 0;
-}
-
static void omap_aes_exit_tfm(struct crypto_skcipher *tfm)
{
struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
@@ -667,19 +667,6 @@ static void omap_aes_exit_tfm(struct crypto_skcipher *tfm)
ctx->fallback = NULL;
}
-static void omap_aes_gcm_cra_exit(struct crypto_aead *tfm)
-{
- struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
-
- if (ctx->fallback)
- crypto_free_sync_skcipher(ctx->fallback);
-
- ctx->fallback = NULL;
-
- if (ctx->ctr)
- crypto_free_skcipher(ctx->ctr);
-}
-
/* ********************** ALGS ************************************ */
static struct skcipher_alg algs_ecb_cbc[] = {
@@ -732,7 +719,7 @@ static struct skcipher_alg algs_ctr[] = {
.base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
- .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct omap_aes_ctx),
.base.cra_module = THIS_MODULE,
@@ -763,15 +750,15 @@ static struct aead_alg algs_aead_gcm[] = {
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct omap_aes_ctx),
+ .cra_ctxsize = sizeof(struct omap_aes_gcm_ctx),
.cra_alignmask = 0xf,
.cra_module = THIS_MODULE,
},
.init = omap_aes_gcm_cra_init,
- .exit = omap_aes_gcm_cra_exit,
.ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.setkey = omap_aes_gcm_setkey,
+ .setauthsize = omap_aes_gcm_setauthsize,
.encrypt = omap_aes_gcm_encrypt,
.decrypt = omap_aes_gcm_decrypt,
},
@@ -783,15 +770,15 @@ static struct aead_alg algs_aead_gcm[] = {
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct omap_aes_ctx),
+ .cra_ctxsize = sizeof(struct omap_aes_gcm_ctx),
.cra_alignmask = 0xf,
.cra_module = THIS_MODULE,
},
.init = omap_aes_gcm_cra_init,
- .exit = omap_aes_gcm_cra_exit,
.maxauthsize = AES_BLOCK_SIZE,
.ivsize = GCM_RFC4106_IV_SIZE,
.setkey = omap_aes_4106gcm_setkey,
+ .setauthsize = omap_aes_4106gcm_setauthsize,
.encrypt = omap_aes_4106gcm_encrypt,
.decrypt = omap_aes_4106gcm_decrypt,
},
@@ -1296,7 +1283,8 @@ static int omap_aes_remove(struct platform_device *pdev)
tasklet_kill(&dd->done_task);
omap_aes_dma_cleanup(dd);
pm_runtime_disable(dd->dev);
- dd = NULL;
+
+ sysfs_remove_group(&dd->dev->kobj, &omap_aes_attr_group);
return 0;
}
diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h
index 2d3575231e31..2d111bf906e1 100644
--- a/drivers/crypto/omap-aes.h
+++ b/drivers/crypto/omap-aes.h
@@ -9,6 +9,7 @@
#ifndef __OMAP_AES_H__
#define __OMAP_AES_H__
+#include <crypto/aes.h>
#include <crypto/engine.h>
#define DST_MAXBURST 4
@@ -79,7 +80,6 @@
#define FLAGS_INIT BIT(5)
#define FLAGS_FAST BIT(6)
-#define FLAGS_BUSY BIT(7)
#define FLAGS_IN_DATA_ST_SHIFT 8
#define FLAGS_OUT_DATA_ST_SHIFT 10
@@ -98,7 +98,11 @@ struct omap_aes_ctx {
u32 key[AES_KEYSIZE_256 / sizeof(u32)];
u8 nonce[4];
struct crypto_sync_skcipher *fallback;
- struct crypto_skcipher *ctr;
+};
+
+struct omap_aes_gcm_ctx {
+ struct omap_aes_ctx octx;
+ struct crypto_aes_ctx actx;
};
struct omap_aes_reqctx {
@@ -202,8 +206,12 @@ int omap_aes_4106gcm_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen);
int omap_aes_gcm_encrypt(struct aead_request *req);
int omap_aes_gcm_decrypt(struct aead_request *req);
+int omap_aes_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize);
int omap_aes_4106gcm_encrypt(struct aead_request *req);
int omap_aes_4106gcm_decrypt(struct aead_request *req);
+int omap_aes_4106gcm_setauthsize(struct crypto_aead *parent,
+ unsigned int authsize);
+int omap_aes_gcm_cra_init(struct crypto_aead *tfm);
int omap_aes_write_ctrl(struct omap_aes_dev *dd);
int omap_aes_crypt_dma_start(struct omap_aes_dev *dd);
int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd);
diff --git a/drivers/crypto/omap-crypto.c b/drivers/crypto/omap-crypto.c
index 7d592d93bb1c..cc88b7362bc2 100644
--- a/drivers/crypto/omap-crypto.c
+++ b/drivers/crypto/omap-crypto.c
@@ -154,6 +154,41 @@ int omap_crypto_align_sg(struct scatterlist **sg, int total, int bs,
}
EXPORT_SYMBOL_GPL(omap_crypto_align_sg);
+static void omap_crypto_copy_data(struct scatterlist *src,
+ struct scatterlist *dst,
+ int offset, int len)
+{
+ int amt;
+ void *srcb, *dstb;
+ int srco = 0, dsto = offset;
+
+ while (src && dst && len) {
+ if (srco >= src->length) {
+ srco -= src->length;
+ src = sg_next(src);
+ continue;
+ }
+
+ if (dsto >= dst->length) {
+ dsto -= dst->length;
+ dst = sg_next(dst);
+ continue;
+ }
+
+ amt = min(src->length - srco, dst->length - dsto);
+ amt = min(len, amt);
+
+ srcb = sg_virt(src) + srco;
+ dstb = sg_virt(dst) + dsto;
+
+ memcpy(dstb, srcb, amt);
+
+ srco += amt;
+ dsto += amt;
+ len -= amt;
+ }
+}
+
void omap_crypto_cleanup(struct scatterlist *sg, struct scatterlist *orig,
int offset, int len, u8 flags_shift,
unsigned long flags)
@@ -171,7 +206,7 @@ void omap_crypto_cleanup(struct scatterlist *sg, struct scatterlist *orig,
pages = get_order(len);
if (orig && (flags & OMAP_CRYPTO_COPY_MASK))
- scatterwalk_map_and_copy(buf, orig, offset, len, 1);
+ omap_crypto_copy_data(sg, orig, offset, len);
if (flags & OMAP_CRYPTO_DATA_COPIED)
free_pages((unsigned long)buf, pages);
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 4c4dbc2b377e..8eda43319204 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -597,6 +597,7 @@ static int omap_des_crypt_req(struct crypto_engine *engine,
static void omap_des_done_task(unsigned long data)
{
struct omap_des_dev *dd = (struct omap_des_dev *)data;
+ int i;
pr_debug("enter done_task\n");
@@ -615,6 +616,11 @@ static void omap_des_done_task(unsigned long data)
omap_crypto_cleanup(&dd->out_sgl, dd->orig_out, 0, dd->total_save,
FLAGS_OUT_DATA_ST_SHIFT, dd->flags);
+ if ((dd->flags & FLAGS_CBC) && dd->req->iv)
+ for (i = 0; i < 2; i++)
+ ((u32 *)dd->req->iv)[i] =
+ omap_des_read(dd, DES_REG_IV(dd, i));
+
omap_des_finish_req(dd, 0);
pr_debug("exit\n");
@@ -631,10 +637,11 @@ static int omap_des_crypt(struct skcipher_request *req, unsigned long mode)
!!(mode & FLAGS_ENCRYPT),
!!(mode & FLAGS_CBC));
- if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
- pr_err("request size is not exact amount of DES blocks\n");
+ if (!req->cryptlen)
+ return 0;
+
+ if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE))
return -EINVAL;
- }
dd = omap_des_find_dev(ctx);
if (!dd)
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index ac80bc6af093..4f915a4ef5b0 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -112,6 +112,8 @@
#define FLAGS_BE32_SHA1 8
#define FLAGS_SGS_COPIED 9
#define FLAGS_SGS_ALLOCED 10
+#define FLAGS_HUGE 11
+
/* context flags */
#define FLAGS_FINUP 16
@@ -136,6 +138,8 @@
#define BUFLEN SHA512_BLOCK_SIZE
#define OMAP_SHA_DMA_THRESHOLD 256
+#define OMAP_SHA_MAX_DMA_LEN (1024 * 2048)
+
struct omap_sham_dev;
struct omap_sham_reqctx {
@@ -644,6 +648,8 @@ static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx,
struct scatterlist *tmp;
int offset = ctx->offset;
+ ctx->total = new_len;
+
if (ctx->bufcnt)
n++;
@@ -661,15 +667,16 @@ static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx,
sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
tmp = sg_next(tmp);
ctx->sg_len++;
+ new_len -= ctx->bufcnt;
}
while (sg && new_len) {
int len = sg->length - offset;
- if (offset) {
+ if (len <= 0) {
offset -= sg->length;
- if (offset < 0)
- offset = 0;
+ sg = sg_next(sg);
+ continue;
}
if (new_len < len)
@@ -677,33 +684,37 @@ static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx,
if (len > 0) {
new_len -= len;
- sg_set_page(tmp, sg_page(sg), len, sg->offset);
+ sg_set_page(tmp, sg_page(sg), len, sg->offset + offset);
+ offset = 0;
+ ctx->offset = 0;
+ ctx->sg_len++;
if (new_len <= 0)
- sg_mark_end(tmp);
+ break;
tmp = sg_next(tmp);
- ctx->sg_len++;
}
sg = sg_next(sg);
}
+ if (tmp)
+ sg_mark_end(tmp);
+
set_bit(FLAGS_SGS_ALLOCED, &ctx->dd->flags);
+ ctx->offset += new_len - ctx->bufcnt;
ctx->bufcnt = 0;
return 0;
}
static int omap_sham_copy_sgs(struct omap_sham_reqctx *ctx,
- struct scatterlist *sg, int bs, int new_len)
+ struct scatterlist *sg, int bs,
+ unsigned int new_len)
{
int pages;
void *buf;
- int len;
-
- len = new_len + ctx->bufcnt;
- pages = get_order(ctx->total);
+ pages = get_order(new_len);
buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
if (!buf) {
@@ -715,14 +726,15 @@ static int omap_sham_copy_sgs(struct omap_sham_reqctx *ctx,
memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->offset,
- ctx->total - ctx->bufcnt, 0);
+ min(new_len, ctx->total) - ctx->bufcnt, 0);
sg_init_table(ctx->sgl, 1);
- sg_set_buf(ctx->sgl, buf, len);
+ sg_set_buf(ctx->sgl, buf, new_len);
ctx->sg = ctx->sgl;
set_bit(FLAGS_SGS_COPIED, &ctx->dd->flags);
ctx->sg_len = 1;
+ ctx->offset += new_len - ctx->bufcnt;
ctx->bufcnt = 0;
- ctx->offset = 0;
+ ctx->total = new_len;
return 0;
}
@@ -737,6 +749,7 @@ static int omap_sham_align_sgs(struct scatterlist *sg,
struct scatterlist *sg_tmp = sg;
int new_len;
int offset = rctx->offset;
+ int bufcnt = rctx->bufcnt;
if (!sg || !sg->length || !nbytes)
return 0;
@@ -751,12 +764,28 @@ static int omap_sham_align_sgs(struct scatterlist *sg,
else
new_len = (new_len - 1) / bs * bs;
+ if (!new_len)
+ return 0;
+
if (nbytes != new_len)
list_ok = false;
while (nbytes > 0 && sg_tmp) {
n++;
+ if (bufcnt) {
+ if (!IS_ALIGNED(bufcnt, bs)) {
+ aligned = false;
+ break;
+ }
+ nbytes -= bufcnt;
+ bufcnt = 0;
+ if (!nbytes)
+ list_ok = false;
+
+ continue;
+ }
+
#ifdef CONFIG_ZONE_DMA
if (page_zonenum(sg_page(sg_tmp)) != ZONE_DMA) {
aligned = false;
@@ -794,13 +823,27 @@ static int omap_sham_align_sgs(struct scatterlist *sg,
}
}
+ if (new_len > OMAP_SHA_MAX_DMA_LEN) {
+ new_len = OMAP_SHA_MAX_DMA_LEN;
+ aligned = false;
+ }
+
if (!aligned)
return omap_sham_copy_sgs(rctx, sg, bs, new_len);
else if (!list_ok)
return omap_sham_copy_sg_lists(rctx, sg, bs, new_len);
+ rctx->total = new_len;
+ rctx->offset += new_len;
rctx->sg_len = n;
- rctx->sg = sg;
+ if (rctx->bufcnt) {
+ sg_init_table(rctx->sgl, 2);
+ sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, rctx->bufcnt);
+ sg_chain(rctx->sgl, 2, sg);
+ rctx->sg = rctx->sgl;
+ } else {
+ rctx->sg = sg;
+ }
return 0;
}
@@ -810,31 +853,35 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update)
struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
int bs;
int ret;
- int nbytes;
+ unsigned int nbytes;
bool final = rctx->flags & BIT(FLAGS_FINUP);
- int xmit_len, hash_later;
+ int hash_later;
bs = get_block_size(rctx);
+ nbytes = rctx->bufcnt;
+
if (update)
- nbytes = req->nbytes;
- else
- nbytes = 0;
+ nbytes += req->nbytes - rctx->offset;
- rctx->total = nbytes + rctx->bufcnt;
+ dev_dbg(rctx->dd->dev,
+ "%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%d\n",
+ __func__, nbytes, bs, rctx->total, rctx->offset,
+ rctx->bufcnt);
- if (!rctx->total)
+ if (!nbytes)
return 0;
- if (nbytes && (!IS_ALIGNED(rctx->bufcnt, bs))) {
+ rctx->total = nbytes;
+
+ if (update && req->nbytes && (!IS_ALIGNED(rctx->bufcnt, bs))) {
int len = bs - rctx->bufcnt % bs;
- if (len > nbytes)
- len = nbytes;
+ if (len > req->nbytes)
+ len = req->nbytes;
scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, req->src,
0, len, 0);
rctx->bufcnt += len;
- nbytes -= len;
rctx->offset = len;
}
@@ -845,64 +892,25 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update)
if (ret)
return ret;
- xmit_len = rctx->total;
-
- if (!IS_ALIGNED(xmit_len, bs)) {
- if (final)
- xmit_len = DIV_ROUND_UP(xmit_len, bs) * bs;
- else
- xmit_len = xmit_len / bs * bs;
- } else if (!final) {
- xmit_len -= bs;
- }
-
- hash_later = rctx->total - xmit_len;
+ hash_later = nbytes - rctx->total;
if (hash_later < 0)
hash_later = 0;
- if (rctx->bufcnt && nbytes) {
- /* have data from previous operation and current */
- sg_init_table(rctx->sgl, 2);
- sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, rctx->bufcnt);
-
- sg_chain(rctx->sgl, 2, req->src);
-
- rctx->sg = rctx->sgl;
-
- rctx->sg_len++;
- } else if (rctx->bufcnt) {
- /* have buffered data only */
- sg_init_table(rctx->sgl, 1);
- sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, xmit_len);
-
- rctx->sg = rctx->sgl;
-
- rctx->sg_len = 1;
- }
-
if (hash_later) {
- int offset = 0;
-
- if (hash_later > req->nbytes) {
- memcpy(rctx->buffer, rctx->buffer + xmit_len,
- hash_later - req->nbytes);
- offset = hash_later - req->nbytes;
- }
-
- if (req->nbytes) {
- scatterwalk_map_and_copy(rctx->buffer + offset,
- req->src,
- offset + req->nbytes -
- hash_later, hash_later, 0);
- }
+ scatterwalk_map_and_copy(rctx->buffer,
+ req->src,
+ req->nbytes - hash_later,
+ hash_later, 0);
rctx->bufcnt = hash_later;
} else {
rctx->bufcnt = 0;
}
- if (!final)
- rctx->total = xmit_len;
+ if (hash_later > rctx->buflen)
+ set_bit(FLAGS_HUGE, &rctx->dd->flags);
+
+ rctx->total = min(nbytes, rctx->total);
return 0;
}
@@ -998,10 +1006,11 @@ static int omap_sham_update_req(struct omap_sham_dev *dd)
struct ahash_request *req = dd->req;
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
int err;
- bool final = ctx->flags & BIT(FLAGS_FINUP);
+ bool final = (ctx->flags & BIT(FLAGS_FINUP)) &&
+ !(dd->flags & BIT(FLAGS_HUGE));
- dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
- ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0);
+ dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, final: %d",
+ ctx->total, ctx->digcnt, final);
if (ctx->total < get_block_size(ctx) ||
ctx->total < dd->fallback_sz)
@@ -1024,6 +1033,9 @@ static int omap_sham_final_req(struct omap_sham_dev *dd)
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
int err = 0, use_dma = 1;
+ if (dd->flags & BIT(FLAGS_HUGE))
+ return 0;
+
if ((ctx->total <= get_block_size(ctx)) || dd->polling_mode)
/*
* faster to handle last block with cpu or
@@ -1083,7 +1095,7 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
if (test_bit(FLAGS_SGS_COPIED, &dd->flags))
free_pages((unsigned long)sg_virt(ctx->sg),
- get_order(ctx->sg->length + ctx->bufcnt));
+ get_order(ctx->sg->length));
if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags))
kfree(ctx->sg);
@@ -1092,6 +1104,21 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
dd->flags &= ~(BIT(FLAGS_SGS_ALLOCED) | BIT(FLAGS_SGS_COPIED));
+ if (dd->flags & BIT(FLAGS_HUGE)) {
+ dd->flags &= ~(BIT(FLAGS_CPU) | BIT(FLAGS_DMA_READY) |
+ BIT(FLAGS_OUTPUT_READY) | BIT(FLAGS_HUGE));
+ omap_sham_prepare_request(req, ctx->op == OP_UPDATE);
+ if (ctx->op == OP_UPDATE || (dd->flags & BIT(FLAGS_HUGE))) {
+ err = omap_sham_update_req(dd);
+ if (err != -EINPROGRESS &&
+ (ctx->flags & BIT(FLAGS_FINUP)))
+ err = omap_sham_final_req(dd);
+ } else if (ctx->op == OP_FINAL) {
+ omap_sham_final_req(dd);
+ }
+ return;
+ }
+
if (!err) {
dd->pdata->copy_hash(req, 1);
if (test_bit(FLAGS_FINAL, &dd->flags))
@@ -1107,6 +1134,8 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
pm_runtime_mark_last_busy(dd->dev);
pm_runtime_put_autosuspend(dd->dev);
+ ctx->offset = 0;
+
if (req->base.complete)
req->base.complete(&req->base, err);
}
@@ -1158,7 +1187,7 @@ retry:
/* request has changed - restore hash */
dd->pdata->copy_hash(req, 0);
- if (ctx->op == OP_UPDATE) {
+ if (ctx->op == OP_UPDATE || (dd->flags & BIT(FLAGS_HUGE))) {
err = omap_sham_update_req(dd);
if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP)))
/* no final() after finup() */
@@ -1730,6 +1759,8 @@ static void omap_sham_done_task(unsigned long data)
struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
int err = 0;
+ dev_dbg(dd->dev, "%s: flags=%lx\n", __func__, dd->flags);
+
if (!test_bit(FLAGS_BUSY, &dd->flags)) {
omap_sham_handle_queue(dd, NULL);
return;
@@ -2223,6 +2254,8 @@ static int omap_sham_remove(struct platform_device *pdev)
if (!dd->polling_mode)
dma_release_channel(dd->dma_lch);
+ sysfs_remove_group(&dd->dev->kobj, &omap_sham_attr_group);
+
return 0;
}
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index c5b60f50e1b5..594d6b1695d5 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -108,14 +108,11 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
{
struct aes_ctx *ctx = aes_ctx(tfm);
const __le32 *key = (const __le32 *)in_key;
- u32 *flags = &tfm->crt_flags;
struct crypto_aes_ctx gen_aes;
int cpu;
- if (key_len % 8) {
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ if (key_len % 8)
return -EINVAL;
- }
/*
* If the hardware is capable of generating the extended key
@@ -146,10 +143,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
ctx->cword.encrypt.keygen = 1;
ctx->cword.decrypt.keygen = 1;
- if (aes_expandkey(&gen_aes, in_key, key_len)) {
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ if (aes_expandkey(&gen_aes, in_key, key_len))
return -EINVAL;
- }
memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH);
memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH);
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index ddf1b549fdca..c826abe79e79 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -190,13 +190,11 @@ static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
return padlock_sha256_finup(desc, buf, 0, out);
}
-static int padlock_cra_init(struct crypto_tfm *tfm)
+static int padlock_init_tfm(struct crypto_shash *hash)
{
- struct crypto_shash *hash = __crypto_shash_cast(tfm);
- const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
- struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
+ const char *fallback_driver_name = crypto_shash_alg_name(hash);
+ struct padlock_sha_ctx *ctx = crypto_shash_ctx(hash);
struct crypto_shash *fallback_tfm;
- int err = -ENOMEM;
/* Allocate a fallback and abort if it failed. */
fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
@@ -204,21 +202,17 @@ static int padlock_cra_init(struct crypto_tfm *tfm)
if (IS_ERR(fallback_tfm)) {
printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
fallback_driver_name);
- err = PTR_ERR(fallback_tfm);
- goto out;
+ return PTR_ERR(fallback_tfm);
}
ctx->fallback = fallback_tfm;
hash->descsize += crypto_shash_descsize(fallback_tfm);
return 0;
-
-out:
- return err;
}
-static void padlock_cra_exit(struct crypto_tfm *tfm)
+static void padlock_exit_tfm(struct crypto_shash *hash)
{
- struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct padlock_sha_ctx *ctx = crypto_shash_ctx(hash);
crypto_free_shash(ctx->fallback);
}
@@ -231,6 +225,8 @@ static struct shash_alg sha1_alg = {
.final = padlock_sha1_final,
.export = padlock_sha_export,
.import = padlock_sha_import,
+ .init_tfm = padlock_init_tfm,
+ .exit_tfm = padlock_exit_tfm,
.descsize = sizeof(struct padlock_sha_desc),
.statesize = sizeof(struct sha1_state),
.base = {
@@ -241,8 +237,6 @@ static struct shash_alg sha1_alg = {
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct padlock_sha_ctx),
.cra_module = THIS_MODULE,
- .cra_init = padlock_cra_init,
- .cra_exit = padlock_cra_exit,
}
};
@@ -254,6 +248,8 @@ static struct shash_alg sha256_alg = {
.final = padlock_sha256_final,
.export = padlock_sha_export,
.import = padlock_sha_import,
+ .init_tfm = padlock_init_tfm,
+ .exit_tfm = padlock_exit_tfm,
.descsize = sizeof(struct padlock_sha_desc),
.statesize = sizeof(struct sha256_state),
.base = {
@@ -264,8 +260,6 @@ static struct shash_alg sha256_alg = {
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct padlock_sha_ctx),
.cra_module = THIS_MODULE,
- .cra_init = padlock_cra_init,
- .cra_exit = padlock_cra_exit,
}
};
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 29da449b3e9e..7384e91c8b32 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -465,9 +465,6 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
crypto_aead_set_flags(ctx->sw_cipher, crypto_aead_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
err = crypto_aead_setkey(ctx->sw_cipher, key, keylen);
- crypto_aead_clear_flags(tfm, CRYPTO_TFM_RES_MASK);
- crypto_aead_set_flags(tfm, crypto_aead_get_flags(ctx->sw_cipher) &
- CRYPTO_TFM_RES_MASK);
if (err)
return err;
@@ -490,7 +487,6 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
return 0;
badkey:
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
@@ -780,10 +776,8 @@ static int spacc_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
int err = 0;
- if (len > AES_MAX_KEY_SIZE) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (len > AES_MAX_KEY_SIZE)
return -EINVAL;
- }
/*
* IPSec engine only supports 128 and 256 bit AES keys. If we get a
@@ -805,12 +799,6 @@ static int spacc_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
CRYPTO_TFM_REQ_MASK);
err = crypto_sync_skcipher_setkey(ctx->sw_cipher, key, len);
-
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |=
- crypto_sync_skcipher_get_flags(ctx->sw_cipher) &
- CRYPTO_TFM_RES_MASK;
-
if (err)
goto sw_setkey_failed;
}
@@ -830,7 +818,6 @@ static int spacc_kasumi_f8_setkey(struct crypto_skcipher *cipher,
int err = 0;
if (len > AES_MAX_KEY_SIZE) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
err = -EINVAL;
goto out;
}
@@ -1595,6 +1582,11 @@ static const struct of_device_id spacc_of_id_table[] = {
MODULE_DEVICE_TABLE(of, spacc_of_id_table);
#endif /* CONFIG_OF */
+static void spacc_tasklet_kill(void *data)
+{
+ tasklet_kill(data);
+}
+
static int spacc_probe(struct platform_device *pdev)
{
int i, err, ret;
@@ -1637,6 +1629,14 @@ static int spacc_probe(struct platform_device *pdev)
return -ENXIO;
}
+ tasklet_init(&engine->complete, spacc_spacc_complete,
+ (unsigned long)engine);
+
+ ret = devm_add_action(&pdev->dev, spacc_tasklet_kill,
+ &engine->complete);
+ if (ret)
+ return ret;
+
if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0,
engine->name, engine)) {
dev_err(engine->dev, "failed to request IRQ\n");
@@ -1694,8 +1694,6 @@ static int spacc_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&engine->completed);
INIT_LIST_HEAD(&engine->in_progress);
engine->in_flight = 0;
- tasklet_init(&engine->complete, spacc_spacc_complete,
- (unsigned long)engine);
platform_set_drvdata(pdev, engine);
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 35bca76b640f..833bb1d3a11b 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -570,7 +570,6 @@ static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
memzero_explicit(&keys, sizeof(keys));
return 0;
bad_key:
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
error:
@@ -586,14 +585,11 @@ static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
int alg;
if (qat_alg_validate_key(keylen, &alg, mode))
- goto bad_key;
+ return -EINVAL;
qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode);
qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode);
return 0;
-bad_key:
- crypto_skcipher_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
}
static int qat_alg_aead_rekey(struct crypto_aead *tfm, const uint8_t *key,
diff --git a/drivers/crypto/qce/Makefile b/drivers/crypto/qce/Makefile
index 8caa04e1ec43..14ade8a7d664 100644
--- a/drivers/crypto/qce/Makefile
+++ b/drivers/crypto/qce/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_CRYPTO_DEV_QCE) += qcrypto.o
qcrypto-objs := core.o \
common.o \
- dma.o \
- sha.o \
- skcipher.o
+ dma.o
+
+qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SHA) += sha.o
+qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SKCIPHER) += skcipher.o
diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index da1188abc9ba..629e7f34dc09 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -45,52 +45,56 @@ qce_clear_array(struct qce_device *qce, u32 offset, unsigned int len)
qce_write(qce, offset + i * sizeof(u32), 0);
}
-static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
+static u32 qce_config_reg(struct qce_device *qce, int little)
{
- u32 cfg = 0;
+ u32 beats = (qce->burst_size >> 3) - 1;
+ u32 pipe_pair = qce->pipe_pair_id;
+ u32 config;
- if (IS_AES(flags)) {
- if (aes_key_size == AES_KEYSIZE_128)
- cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT;
- else if (aes_key_size == AES_KEYSIZE_256)
- cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT;
- }
+ config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK;
+ config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) |
+ BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT);
+ config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK;
+ config &= ~HIGH_SPD_EN_N_SHIFT;
- if (IS_AES(flags))
- cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT;
- else if (IS_DES(flags) || IS_3DES(flags))
- cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT;
+ if (little)
+ config |= BIT(LITTLE_ENDIAN_MODE_SHIFT);
- if (IS_DES(flags))
- cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT;
+ return config;
+}
- if (IS_3DES(flags))
- cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT;
+void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len)
+{
+ __be32 *d = dst;
+ const u8 *s = src;
+ unsigned int n;
- switch (flags & QCE_MODE_MASK) {
- case QCE_MODE_ECB:
- cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT;
- break;
- case QCE_MODE_CBC:
- cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT;
- break;
- case QCE_MODE_CTR:
- cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT;
- break;
- case QCE_MODE_XTS:
- cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT;
- break;
- case QCE_MODE_CCM:
- cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT;
- cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT;
- break;
- default:
- return ~0;
+ n = len / sizeof(u32);
+ for (; n > 0; n--) {
+ *d = cpu_to_be32p((const __u32 *) s);
+ s += sizeof(__u32);
+ d++;
}
+}
- return cfg;
+static void qce_setup_config(struct qce_device *qce)
+{
+ u32 config;
+
+ /* get big endianness */
+ config = qce_config_reg(qce, 0);
+
+ /* clear status */
+ qce_write(qce, REG_STATUS, 0);
+ qce_write(qce, REG_CONFIG, config);
+}
+
+static inline void qce_crypto_go(struct qce_device *qce)
+{
+ qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
}
+#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
{
u32 cfg = 0;
@@ -137,88 +141,6 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
return cfg;
}
-static u32 qce_config_reg(struct qce_device *qce, int little)
-{
- u32 beats = (qce->burst_size >> 3) - 1;
- u32 pipe_pair = qce->pipe_pair_id;
- u32 config;
-
- config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK;
- config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) |
- BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT);
- config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK;
- config &= ~HIGH_SPD_EN_N_SHIFT;
-
- if (little)
- config |= BIT(LITTLE_ENDIAN_MODE_SHIFT);
-
- return config;
-}
-
-void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len)
-{
- __be32 *d = dst;
- const u8 *s = src;
- unsigned int n;
-
- n = len / sizeof(u32);
- for (; n > 0; n--) {
- *d = cpu_to_be32p((const __u32 *) s);
- s += sizeof(__u32);
- d++;
- }
-}
-
-static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
-{
- u8 swap[QCE_AES_IV_LENGTH];
- u32 i, j;
-
- if (ivsize > QCE_AES_IV_LENGTH)
- return;
-
- memset(swap, 0, QCE_AES_IV_LENGTH);
-
- for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
- i < QCE_AES_IV_LENGTH; i++, j--)
- swap[i] = src[j];
-
- qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
-}
-
-static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
- unsigned int enckeylen, unsigned int cryptlen)
-{
- u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
- unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
- unsigned int xtsdusize;
-
- qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
- enckeylen / 2);
- qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
-
- /* xts du size 512B */
- xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
- qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
-}
-
-static void qce_setup_config(struct qce_device *qce)
-{
- u32 config;
-
- /* get big endianness */
- config = qce_config_reg(qce, 0);
-
- /* clear status */
- qce_write(qce, REG_STATUS, 0);
- qce_write(qce, REG_CONFIG, config);
-}
-
-static inline void qce_crypto_go(struct qce_device *qce)
-{
- qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
-}
-
static int qce_setup_regs_ahash(struct crypto_async_request *async_req,
u32 totallen, u32 offset)
{
@@ -303,6 +225,87 @@ go_proc:
return 0;
}
+#endif
+
+#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
+static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
+{
+ u32 cfg = 0;
+
+ if (IS_AES(flags)) {
+ if (aes_key_size == AES_KEYSIZE_128)
+ cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT;
+ else if (aes_key_size == AES_KEYSIZE_256)
+ cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT;
+ }
+
+ if (IS_AES(flags))
+ cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT;
+ else if (IS_DES(flags) || IS_3DES(flags))
+ cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT;
+
+ if (IS_DES(flags))
+ cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT;
+
+ if (IS_3DES(flags))
+ cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT;
+
+ switch (flags & QCE_MODE_MASK) {
+ case QCE_MODE_ECB:
+ cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT;
+ break;
+ case QCE_MODE_CBC:
+ cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT;
+ break;
+ case QCE_MODE_CTR:
+ cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT;
+ break;
+ case QCE_MODE_XTS:
+ cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT;
+ break;
+ case QCE_MODE_CCM:
+ cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT;
+ cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT;
+ break;
+ default:
+ return ~0;
+ }
+
+ return cfg;
+}
+
+static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
+{
+ u8 swap[QCE_AES_IV_LENGTH];
+ u32 i, j;
+
+ if (ivsize > QCE_AES_IV_LENGTH)
+ return;
+
+ memset(swap, 0, QCE_AES_IV_LENGTH);
+
+ for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
+ i < QCE_AES_IV_LENGTH; i++, j--)
+ swap[i] = src[j];
+
+ qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
+}
+
+static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
+ unsigned int enckeylen, unsigned int cryptlen)
+{
+ u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
+ unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
+ unsigned int xtsdusize;
+
+ qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
+ enckeylen / 2);
+ qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
+
+ /* xts du size 512B */
+ xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
+ qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
+}
static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
u32 totallen, u32 offset)
@@ -384,15 +387,20 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
return 0;
}
+#endif
int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
u32 offset)
{
switch (type) {
+#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
case CRYPTO_ALG_TYPE_SKCIPHER:
return qce_setup_regs_skcipher(async_req, totallen, offset);
+#endif
+#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
case CRYPTO_ALG_TYPE_AHASH:
return qce_setup_regs_ahash(async_req, totallen, offset);
+#endif
default:
return -EINVAL;
}
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
index 0a44a6eeacf5..cb6d61eb7302 100644
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -22,8 +22,12 @@
#define QCE_QUEUE_LENGTH 1
static const struct qce_algo_ops *qce_ops[] = {
+#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
&skcipher_ops,
+#endif
+#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
&ahash_ops,
+#endif
};
static void qce_unregister_algs(struct qce_device *qce)
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c
index 40a59214d2e1..7da893dc00e7 100644
--- a/drivers/crypto/qce/dma.c
+++ b/drivers/crypto/qce/dma.c
@@ -47,7 +47,8 @@ void qce_dma_release(struct qce_dma_data *dma)
}
struct scatterlist *
-qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl)
+qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl,
+ int max_ents)
{
struct scatterlist *sg = sgt->sgl, *sg_last = NULL;
@@ -60,12 +61,13 @@ qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl)
if (!sg)
return ERR_PTR(-EINVAL);
- while (new_sgl && sg) {
+ while (new_sgl && sg && max_ents) {
sg_set_page(sg, sg_page(new_sgl), new_sgl->length,
new_sgl->offset);
sg_last = sg;
sg = sg_next(sg);
new_sgl = sg_next(new_sgl);
+ max_ents--;
}
return sg_last;
diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h
index 1e25a9e0e6f8..ed25a0d9829e 100644
--- a/drivers/crypto/qce/dma.h
+++ b/drivers/crypto/qce/dma.h
@@ -42,6 +42,7 @@ int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in,
void qce_dma_issue_pending(struct qce_dma_data *dma);
int qce_dma_terminate_all(struct qce_dma_data *dma);
struct scatterlist *
-qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add);
+qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add,
+ int max_ents);
#endif /* _DMA_H_ */
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 95ab16fc8fd6..1ab62e7d5f3c 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -396,8 +396,6 @@ static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
ahash_request_set_crypt(req, &sg, ctx->authkey, keylen);
ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
- if (ret)
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
kfree(buf);
err_free_req:
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index fee07323f8f9..4217b745f124 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -21,6 +21,7 @@ static void qce_skcipher_done(void *data)
struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
struct qce_device *qce = tmpl->qce;
+ struct qce_result_dump *result_buf = qce->dma.result_buf;
enum dma_data_direction dir_src, dir_dst;
u32 status;
int error;
@@ -45,6 +46,7 @@ static void qce_skcipher_done(void *data)
if (error < 0)
dev_dbg(qce->dev, "skcipher operation error (%x)\n", status);
+ memcpy(rctx->iv, result_buf->encr_cntr_iv, rctx->ivsize);
qce->async_req_done(tmpl->qce, error);
}
@@ -95,13 +97,13 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
- sg = qce_sgtable_add(&rctx->dst_tbl, req->dst);
+ sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, rctx->dst_nents - 1);
if (IS_ERR(sg)) {
ret = PTR_ERR(sg);
goto error_free;
}
- sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg);
+ sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg, 1);
if (IS_ERR(sg)) {
ret = PTR_ERR(sg);
goto error_free;
@@ -154,12 +156,13 @@ static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key,
{
struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk);
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ unsigned long flags = to_cipher_tmpl(ablk)->alg_flags;
int ret;
if (!key || !keylen)
return -EINVAL;
- switch (keylen) {
+ switch (IS_XTS(flags) ? keylen >> 1 : keylen) {
case AES_KEYSIZE_128:
case AES_KEYSIZE_256:
break;
@@ -213,13 +216,15 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
+ int keylen;
int ret;
rctx->flags = tmpl->alg_flags;
rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
+ keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen;
- if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
- ctx->enc_keylen != AES_KEYSIZE_256) {
+ if (IS_AES(rctx->flags) && keylen != AES_KEYSIZE_128 &&
+ keylen != AES_KEYSIZE_256) {
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
skcipher_request_set_sync_tfm(subreq, ctx->fallback);
@@ -252,7 +257,14 @@ static int qce_skcipher_init(struct crypto_skcipher *tfm)
memset(ctx, 0, sizeof(*ctx));
crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx));
+ return 0;
+}
+
+static int qce_skcipher_init_fallback(struct crypto_skcipher *tfm)
+{
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ qce_skcipher_init(tfm);
ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base),
0, CRYPTO_ALG_NEED_FALLBACK);
return PTR_ERR_OR_ZERO(ctx->fallback);
@@ -270,6 +282,7 @@ struct qce_skcipher_def {
const char *name;
const char *drv_name;
unsigned int blocksize;
+ unsigned int chunksize;
unsigned int ivsize;
unsigned int min_keysize;
unsigned int max_keysize;
@@ -298,7 +311,8 @@ static const struct qce_skcipher_def skcipher_def[] = {
.flags = QCE_ALG_AES | QCE_MODE_CTR,
.name = "ctr(aes)",
.drv_name = "ctr-aes-qce",
- .blocksize = AES_BLOCK_SIZE,
+ .blocksize = 1,
+ .chunksize = AES_BLOCK_SIZE,
.ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -309,8 +323,8 @@ static const struct qce_skcipher_def skcipher_def[] = {
.drv_name = "xts-aes-qce",
.blocksize = AES_BLOCK_SIZE,
.ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
},
{
.flags = QCE_ALG_DES | QCE_MODE_ECB,
@@ -368,6 +382,7 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
def->drv_name);
alg->base.cra_blocksize = def->blocksize;
+ alg->chunksize = def->chunksize;
alg->ivsize = def->ivsize;
alg->min_keysize = def->min_keysize;
alg->max_keysize = def->max_keysize;
@@ -379,14 +394,18 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
alg->base.cra_priority = 300;
alg->base.cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx);
alg->base.cra_alignmask = 0;
alg->base.cra_module = THIS_MODULE;
- alg->init = qce_skcipher_init;
- alg->exit = qce_skcipher_exit;
+ if (IS_AES(def->flags)) {
+ alg->base.cra_flags |= CRYPTO_ALG_NEED_FALLBACK;
+ alg->init = qce_skcipher_init_fallback;
+ alg->exit = qce_skcipher_exit;
+ } else {
+ alg->init = qce_skcipher_init;
+ }
INIT_LIST_HEAD(&tmpl->entry);
tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_SKCIPHER;
diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
index ca4de4ddfe1f..4a75c8e1fa6c 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
@@ -34,10 +34,8 @@ static int rk_aes_setkey(struct crypto_skcipher *cipher,
struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
- keylen != AES_KEYSIZE_256) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ keylen != AES_KEYSIZE_256)
return -EINVAL;
- }
ctx->keylen = keylen;
memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen);
return 0;
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index d4ea2f11ca68..466e30bd529c 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -601,7 +601,6 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
- int ret;
ctx->keylen = keylen;
@@ -621,13 +620,7 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
CRYPTO_TFM_REQ_MASK);
-
- ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
-
- tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(ctx->fallback) &
- CRYPTO_TFM_RES_MASK;
- return ret;
+ return crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
}
static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
index 1aba9372cd23..4ef3eb11361c 100644
--- a/drivers/crypto/stm32/Kconfig
+++ b/drivers/crypto/stm32/Kconfig
@@ -4,7 +4,7 @@ config CRYPTO_DEV_STM32_CRC
depends on ARCH_STM32
select CRYPTO_HASH
help
- This enables support for the CRC32 hw accelerator which can be found
+ This enables support for the CRC32 hw accelerator which can be found
on STMicroelectronics STM32 SOC.
config CRYPTO_DEV_STM32_HASH
@@ -17,7 +17,7 @@ config CRYPTO_DEV_STM32_HASH
select CRYPTO_SHA256
select CRYPTO_ENGINE
help
- This enables support for the HASH hw accelerator which can be found
+ This enables support for the HASH hw accelerator which can be found
on STMicroelectronics STM32 SOC.
config CRYPTO_DEV_STM32_CRYP
@@ -27,5 +27,5 @@ config CRYPTO_DEV_STM32_CRYP
select CRYPTO_ENGINE
select CRYPTO_LIB_DES
help
- This enables support for the CRYP (AES/DES/TDES) hw accelerator which
+ This enables support for the CRYP (AES/DES/TDES) hw accelerator which
can be found on STMicroelectronics STM32 SOC.
diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
index 9e11c3480353..8e92e4ac79f1 100644
--- a/drivers/crypto/stm32/stm32-crc32.c
+++ b/drivers/crypto/stm32/stm32-crc32.c
@@ -85,10 +85,8 @@ static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key,
{
struct stm32_crc_ctx *mctx = crypto_shash_ctx(tfm);
- if (keylen != sizeof(u32)) {
- crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != sizeof(u32))
return -EINVAL;
- }
mctx->key = get_unaligned_le32(key);
return 0;
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index cfc8e0e37bee..167b80eec437 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -518,10 +518,10 @@ static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
dma_conf.dst_maxburst = hdev->dma_maxburst;
dma_conf.device_fc = false;
- hdev->dma_lch = dma_request_slave_channel(hdev->dev, "in");
- if (!hdev->dma_lch) {
+ hdev->dma_lch = dma_request_chan(hdev->dev, "in");
+ if (IS_ERR(hdev->dma_lch)) {
dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
- return -EBUSY;
+ return PTR_ERR(hdev->dma_lch);
}
err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index d71d65846e47..9c6db7f698c4 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -914,7 +914,6 @@ static int aead_setkey(struct crypto_aead *authenc,
return 0;
badkey:
- crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
@@ -929,11 +928,11 @@ static int aead_des3_setkey(struct crypto_aead *authenc,
err = crypto_authenc_extractkeys(&keys, key, keylen);
if (unlikely(err))
- goto badkey;
+ goto out;
err = -EINVAL;
if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
- goto badkey;
+ goto out;
err = verify_aead_des3_key(authenc, keys.enckey, keys.enckeylen);
if (err)
@@ -954,10 +953,6 @@ static int aead_des3_setkey(struct crypto_aead *authenc,
out:
memzero_explicit(&keys, sizeof(keys));
return err;
-
-badkey:
- crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
- goto out;
}
static void talitos_sg_unmap(struct device *dev,
@@ -1528,8 +1523,6 @@ static int skcipher_aes_setkey(struct crypto_skcipher *cipher,
keylen == AES_KEYSIZE_256)
return skcipher_setkey(cipher, key, keylen);
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
-
return -EINVAL;
}
@@ -2234,10 +2227,8 @@ static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
/* Must get the hash of the long key */
ret = keyhash(tfm, key, keylen, hash);
- if (ret) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (ret)
return -EINVAL;
- }
keysize = digestsize;
memcpy(ctx->key, hash, digestsize);
diff --git a/drivers/crypto/ux500/Kconfig b/drivers/crypto/ux500/Kconfig
index b731895aa241..f56d65c56ccf 100644
--- a/drivers/crypto/ux500/Kconfig
+++ b/drivers/crypto/ux500/Kconfig
@@ -11,18 +11,18 @@ config CRYPTO_DEV_UX500_CRYP
select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
help
- This selects the crypto driver for the UX500_CRYP hardware. It supports
- AES-ECB, CBC and CTR with keys sizes of 128, 192 and 256 bit sizes.
+ This selects the crypto driver for the UX500_CRYP hardware. It supports
+ AES-ECB, CBC and CTR with keys sizes of 128, 192 and 256 bit sizes.
config CRYPTO_DEV_UX500_HASH
- tristate "UX500 crypto driver for HASH block"
- depends on CRYPTO_DEV_UX500
- select CRYPTO_HASH
+ tristate "UX500 crypto driver for HASH block"
+ depends on CRYPTO_DEV_UX500
+ select CRYPTO_HASH
select CRYPTO_SHA1
select CRYPTO_SHA256
- help
- This selects the hash driver for the UX500_HASH hardware.
- Depends on UX500/STM DMA if running in DMA mode.
+ help
+ This selects the hash driver for the UX500_HASH hardware.
+ Depends on UX500/STM DMA if running in DMA mode.
config CRYPTO_DEV_UX500_DEBUG
bool "Activate ux500 platform debug-mode for crypto and hash block"
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 95fb694a2667..800dfc4d16c4 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -951,7 +951,6 @@ static int aes_skcipher_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher);
- u32 *flags = &cipher->base.crt_flags;
pr_debug(DEV_DBG_NAME " [%s]", __func__);
@@ -970,7 +969,6 @@ static int aes_skcipher_setkey(struct crypto_skcipher *cipher,
default:
pr_err(DEV_DBG_NAME "[%s]: Unknown keylen!", __func__);
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
index 4b71e80951b7..fd045e64972a 100644
--- a/drivers/crypto/virtio/virtio_crypto_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -272,11 +272,11 @@ static int virtio_crypto_alg_skcipher_init_sessions(
if (keylen > vcrypto->max_cipher_key_len) {
pr_err("virtio_crypto: the key is too long\n");
- goto bad_key;
+ return -EINVAL;
}
if (virtio_crypto_alg_validate_key(keylen, &alg))
- goto bad_key;
+ return -EINVAL;
/* Create encryption session */
ret = virtio_crypto_alg_skcipher_init_session(ctx,
@@ -291,10 +291,6 @@ static int virtio_crypto_alg_skcipher_init_sessions(
return ret;
}
return 0;
-
-bad_key:
- crypto_skcipher_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
}
/* Note: kernel crypto API realization */
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index d59e736882f6..9fee1b1532a4 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -84,6 +84,9 @@ static int p8_aes_xts_crypt(struct skcipher_request *req, int enc)
u8 tweak[AES_BLOCK_SIZE];
int ret;
+ if (req->cryptlen < AES_BLOCK_SIZE)
+ return -EINVAL;
+
if (!crypto_simd_usable() || (req->cryptlen % XTS_BLOCK_SIZE) != 0) {
struct skcipher_request *subreq = skcipher_request_ctx(req);
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 26a654dbc69a..0aa4b6bc5101 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -61,7 +61,7 @@ struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
{
if (!blk_queue_dax(bdev->bd_queue))
return NULL;
- return fs_dax_get_by_host(bdev->bd_disk->disk_name);
+ return dax_get_by_host(bdev->bd_disk->disk_name);
}
EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
#endif
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index defe1d438710..0b1df12e0f21 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -77,13 +77,12 @@ config DEVFREQ_GOV_PASSIVE
comment "DEVFREQ Drivers"
config ARM_EXYNOS_BUS_DEVFREQ
- tristate "ARM EXYNOS Generic Memory Bus DEVFREQ Driver"
+ tristate "ARM Exynos Generic Memory Bus DEVFREQ Driver"
depends on ARCH_EXYNOS || COMPILE_TEST
select DEVFREQ_GOV_SIMPLE_ONDEMAND
select DEVFREQ_GOV_PASSIVE
select DEVFREQ_EVENT_EXYNOS_PPMU
select PM_DEVFREQ_EVENT
- select PM_OPP
help
This adds the common DEVFREQ driver for Exynos Memory bus. Exynos
Memory bus has one more group of memory bus (e.g, MIF and INT block).
@@ -92,13 +91,23 @@ config ARM_EXYNOS_BUS_DEVFREQ
and adjusts the operating frequencies and voltages with OPP support.
This does not yet operate with optimal voltages.
+config ARM_IMX8M_DDRC_DEVFREQ
+ tristate "i.MX8M DDRC DEVFREQ Driver"
+ depends on (ARCH_MXC && HAVE_ARM_SMCCC) || \
+ (COMPILE_TEST && HAVE_ARM_SMCCC)
+ select DEVFREQ_GOV_SIMPLE_ONDEMAND
+ select DEVFREQ_GOV_USERSPACE
+ help
+ This adds the DEVFREQ driver for the i.MX8M DDR Controller. It allows
+ adjusting DRAM frequency.
+
config ARM_TEGRA_DEVFREQ
tristate "NVIDIA Tegra30/114/124/210 DEVFREQ Driver"
depends on ARCH_TEGRA_3x_SOC || ARCH_TEGRA_114_SOC || \
ARCH_TEGRA_132_SOC || ARCH_TEGRA_124_SOC || \
ARCH_TEGRA_210_SOC || \
COMPILE_TEST
- select PM_OPP
+ depends on COMMON_CLK
help
This adds the DEVFREQ driver for the Tegra family of SoCs.
It reads ACTMON counters of memory controllers and adjusts the
@@ -109,7 +118,6 @@ config ARM_TEGRA20_DEVFREQ
depends on (TEGRA_MC && TEGRA20_EMC) || COMPILE_TEST
depends on COMMON_CLK
select DEVFREQ_GOV_SIMPLE_ONDEMAND
- select PM_OPP
help
This adds the DEVFREQ driver for the Tegra20 family of SoCs.
It reads Memory Controller counters and adjusts the operating
@@ -117,15 +125,15 @@ config ARM_TEGRA20_DEVFREQ
config ARM_RK3399_DMC_DEVFREQ
tristate "ARM RK3399 DMC DEVFREQ Driver"
- depends on ARCH_ROCKCHIP
+ depends on (ARCH_ROCKCHIP && HAVE_ARM_SMCCC) || \
+ (COMPILE_TEST && HAVE_ARM_SMCCC)
select DEVFREQ_EVENT_ROCKCHIP_DFI
select DEVFREQ_GOV_SIMPLE_ONDEMAND
select PM_DEVFREQ_EVENT
- select PM_OPP
help
- This adds the DEVFREQ driver for the RK3399 DMC(Dynamic Memory Controller).
- It sets the frequency for the memory controller and reads the usage counts
- from hardware.
+ This adds the DEVFREQ driver for the RK3399 DMC(Dynamic Memory Controller).
+ It sets the frequency for the memory controller and reads the usage counts
+ from hardware.
source "drivers/devfreq/event/Kconfig"
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index 338ae8440db6..3eb4d5e6635c 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_DEVFREQ_GOV_PASSIVE) += governor_passive.o
# DEVFREQ Drivers
obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ) += exynos-bus.o
+obj-$(CONFIG_ARM_IMX8M_DDRC_DEVFREQ) += imx8m-ddrc.o
obj-$(CONFIG_ARM_RK3399_DMC_DEVFREQ) += rk3399_dmc.o
obj-$(CONFIG_ARM_TEGRA_DEVFREQ) += tegra30-devfreq.o
obj-$(CONFIG_ARM_TEGRA20_DEVFREQ) += tegra20-devfreq.o
diff --git a/drivers/devfreq/devfreq-event.c b/drivers/devfreq/devfreq-event.c
index 3dc5fd6065a3..8c31b0f2e28f 100644
--- a/drivers/devfreq/devfreq-event.c
+++ b/drivers/devfreq/devfreq-event.c
@@ -346,9 +346,9 @@ EXPORT_SYMBOL_GPL(devfreq_event_add_edev);
/**
* devfreq_event_remove_edev() - Remove the devfreq-event device registered.
- * @dev : the devfreq-event device
+ * @edev : the devfreq-event device
*
- * Note that this function remove the registered devfreq-event device.
+ * Note that this function removes the registered devfreq-event device.
*/
int devfreq_event_remove_edev(struct devfreq_event_dev *edev)
{
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 57f6944d65a6..cceee8bc3c2f 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/kmod.h>
#include <linux/sched.h>
+#include <linux/debugfs.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -33,6 +34,7 @@
#define HZ_PER_KHZ 1000
static struct class *devfreq_class;
+static struct dentry *devfreq_debugfs;
/*
* devfreq core provides delayed work based load monitoring helper
@@ -209,10 +211,10 @@ static int set_freq_table(struct devfreq *devfreq)
int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
{
int lev, prev_lev, ret = 0;
- unsigned long cur_time;
+ u64 cur_time;
lockdep_assert_held(&devfreq->lock);
- cur_time = jiffies;
+ cur_time = get_jiffies_64();
/* Immediately exit if previous_freq is not initialized yet. */
if (!devfreq->previous_freq)
@@ -224,8 +226,8 @@ int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
goto out;
}
- devfreq->time_in_state[prev_lev] +=
- cur_time - devfreq->last_stat_updated;
+ devfreq->stats.time_in_state[prev_lev] +=
+ cur_time - devfreq->stats.last_update;
lev = devfreq_get_freq_level(devfreq, freq);
if (lev < 0) {
@@ -234,13 +236,13 @@ int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
}
if (lev != prev_lev) {
- devfreq->trans_table[(prev_lev *
- devfreq->profile->max_state) + lev]++;
- devfreq->total_trans++;
+ devfreq->stats.trans_table[
+ (prev_lev * devfreq->profile->max_state) + lev]++;
+ devfreq->stats.total_trans++;
}
out:
- devfreq->last_stat_updated = cur_time;
+ devfreq->stats.last_update = cur_time;
return ret;
}
EXPORT_SYMBOL(devfreq_update_status);
@@ -535,7 +537,7 @@ void devfreq_monitor_resume(struct devfreq *devfreq)
msecs_to_jiffies(devfreq->profile->polling_ms));
out_update:
- devfreq->last_stat_updated = jiffies;
+ devfreq->stats.last_update = get_jiffies_64();
devfreq->stop_polling = false;
if (devfreq->profile->get_cur_freq &&
@@ -807,28 +809,29 @@ struct devfreq *devfreq_add_device(struct device *dev,
goto err_out;
}
- devfreq->trans_table = devm_kzalloc(&devfreq->dev,
+ devfreq->stats.trans_table = devm_kzalloc(&devfreq->dev,
array3_size(sizeof(unsigned int),
devfreq->profile->max_state,
devfreq->profile->max_state),
GFP_KERNEL);
- if (!devfreq->trans_table) {
+ if (!devfreq->stats.trans_table) {
mutex_unlock(&devfreq->lock);
err = -ENOMEM;
goto err_devfreq;
}
- devfreq->time_in_state = devm_kcalloc(&devfreq->dev,
+ devfreq->stats.time_in_state = devm_kcalloc(&devfreq->dev,
devfreq->profile->max_state,
- sizeof(unsigned long),
+ sizeof(*devfreq->stats.time_in_state),
GFP_KERNEL);
- if (!devfreq->time_in_state) {
+ if (!devfreq->stats.time_in_state) {
mutex_unlock(&devfreq->lock);
err = -ENOMEM;
goto err_devfreq;
}
- devfreq->last_stat_updated = jiffies;
+ devfreq->stats.total_trans = 0;
+ devfreq->stats.last_update = get_jiffies_64();
srcu_init_notifier_head(&devfreq->transition_notifier_list);
@@ -1259,6 +1262,14 @@ err_out:
}
EXPORT_SYMBOL(devfreq_remove_governor);
+static ssize_t name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct devfreq *devfreq = to_devfreq(dev);
+ return sprintf(buf, "%s\n", dev_name(devfreq->dev.parent));
+}
+static DEVICE_ATTR_RO(name);
+
static ssize_t governor_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1461,6 +1472,7 @@ static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%lu\n", min_freq);
}
+static DEVICE_ATTR_RW(min_freq);
static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -1501,7 +1513,6 @@ static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR_RW(min_freq);
static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -1580,18 +1591,47 @@ static ssize_t trans_stat_show(struct device *dev,
devfreq->profile->freq_table[i]);
for (j = 0; j < max_state; j++)
len += sprintf(buf + len, "%10u",
- devfreq->trans_table[(i * max_state) + j]);
- len += sprintf(buf + len, "%10u\n",
- jiffies_to_msecs(devfreq->time_in_state[i]));
+ devfreq->stats.trans_table[(i * max_state) + j]);
+
+ len += sprintf(buf + len, "%10llu\n", (u64)
+ jiffies64_to_msecs(devfreq->stats.time_in_state[i]));
}
len += sprintf(buf + len, "Total transition : %u\n",
- devfreq->total_trans);
+ devfreq->stats.total_trans);
return len;
}
-static DEVICE_ATTR_RO(trans_stat);
+
+static ssize_t trans_stat_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct devfreq *df = to_devfreq(dev);
+ int err, value;
+
+ if (df->profile->max_state == 0)
+ return count;
+
+ err = kstrtoint(buf, 10, &value);
+ if (err || value != 0)
+ return -EINVAL;
+
+ mutex_lock(&df->lock);
+ memset(df->stats.time_in_state, 0, (df->profile->max_state *
+ sizeof(*df->stats.time_in_state)));
+ memset(df->stats.trans_table, 0, array3_size(sizeof(unsigned int),
+ df->profile->max_state,
+ df->profile->max_state));
+ df->stats.total_trans = 0;
+ df->stats.last_update = get_jiffies_64();
+ mutex_unlock(&df->lock);
+
+ return count;
+}
+static DEVICE_ATTR_RW(trans_stat);
static struct attribute *devfreq_attrs[] = {
+ &dev_attr_name.attr,
&dev_attr_governor.attr,
&dev_attr_available_governors.attr,
&dev_attr_cur_freq.attr,
@@ -1605,6 +1645,81 @@ static struct attribute *devfreq_attrs[] = {
};
ATTRIBUTE_GROUPS(devfreq);
+/**
+ * devfreq_summary_show() - Show the summary of the devfreq devices
+ * @s: seq_file instance to show the summary of devfreq devices
+ * @data: not used
+ *
+ * Show the summary of the devfreq devices via 'devfreq_summary' debugfs file.
+ * It helps that user can know the detailed information of the devfreq devices.
+ *
+ * Return 0 always because it shows the information without any data change.
+ */
+static int devfreq_summary_show(struct seq_file *s, void *data)
+{
+ struct devfreq *devfreq;
+ struct devfreq *p_devfreq = NULL;
+ unsigned long cur_freq, min_freq, max_freq;
+ unsigned int polling_ms;
+
+ seq_printf(s, "%-30s %-10s %-10s %-15s %10s %12s %12s %12s\n",
+ "dev_name",
+ "dev",
+ "parent_dev",
+ "governor",
+ "polling_ms",
+ "cur_freq_Hz",
+ "min_freq_Hz",
+ "max_freq_Hz");
+ seq_printf(s, "%30s %10s %10s %15s %10s %12s %12s %12s\n",
+ "------------------------------",
+ "----------",
+ "----------",
+ "---------------",
+ "----------",
+ "------------",
+ "------------",
+ "------------");
+
+ mutex_lock(&devfreq_list_lock);
+
+ list_for_each_entry_reverse(devfreq, &devfreq_list, node) {
+#if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE)
+ if (!strncmp(devfreq->governor_name, DEVFREQ_GOV_PASSIVE,
+ DEVFREQ_NAME_LEN)) {
+ struct devfreq_passive_data *data = devfreq->data;
+
+ if (data)
+ p_devfreq = data->parent;
+ } else {
+ p_devfreq = NULL;
+ }
+#endif
+
+ mutex_lock(&devfreq->lock);
+ cur_freq = devfreq->previous_freq,
+ get_freq_range(devfreq, &min_freq, &max_freq);
+ polling_ms = devfreq->profile->polling_ms,
+ mutex_unlock(&devfreq->lock);
+
+ seq_printf(s,
+ "%-30s %-10s %-10s %-15s %10d %12ld %12ld %12ld\n",
+ dev_name(devfreq->dev.parent),
+ dev_name(&devfreq->dev),
+ p_devfreq ? dev_name(&p_devfreq->dev) : "null",
+ devfreq->governor_name,
+ polling_ms,
+ cur_freq,
+ min_freq,
+ max_freq);
+ }
+
+ mutex_unlock(&devfreq_list_lock);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(devfreq_summary);
+
static int __init devfreq_init(void)
{
devfreq_class = class_create(THIS_MODULE, "devfreq");
@@ -1621,6 +1736,11 @@ static int __init devfreq_init(void)
}
devfreq_class->dev_groups = devfreq_groups;
+ devfreq_debugfs = debugfs_create_dir("devfreq", NULL);
+ debugfs_create_file("devfreq_summary", 0444,
+ devfreq_debugfs, NULL,
+ &devfreq_summary_fops);
+
return 0;
}
subsys_initcall(devfreq_init);
@@ -1814,7 +1934,7 @@ static void devm_devfreq_notifier_release(struct device *dev, void *res)
/**
* devm_devfreq_register_notifier()
- - Resource-managed devfreq_register_notifier()
+ * - Resource-managed devfreq_register_notifier()
* @dev: The devfreq user device. (parent of devfreq)
* @devfreq: The devfreq object.
* @nb: The notifier block to be unregistered.
@@ -1850,7 +1970,7 @@ EXPORT_SYMBOL(devm_devfreq_register_notifier);
/**
* devm_devfreq_unregister_notifier()
- - Resource-managed devfreq_unregister_notifier()
+ * - Resource-managed devfreq_unregister_notifier()
* @dev: The devfreq user device. (parent of devfreq)
* @devfreq: The devfreq object.
* @nb: The notifier block to be unregistered.
diff --git a/drivers/devfreq/event/Kconfig b/drivers/devfreq/event/Kconfig
index cef2cf5347ca..878825372f6f 100644
--- a/drivers/devfreq/event/Kconfig
+++ b/drivers/devfreq/event/Kconfig
@@ -15,7 +15,7 @@ menuconfig PM_DEVFREQ_EVENT
if PM_DEVFREQ_EVENT
config DEVFREQ_EVENT_EXYNOS_NOCP
- tristate "EXYNOS NoC (Network On Chip) Probe DEVFREQ event Driver"
+ tristate "Exynos NoC (Network On Chip) Probe DEVFREQ event Driver"
depends on ARCH_EXYNOS || COMPILE_TEST
select PM_OPP
select REGMAP_MMIO
@@ -24,7 +24,7 @@ config DEVFREQ_EVENT_EXYNOS_NOCP
(Network on Chip) Probe counters to measure the bandwidth of AXI bus.
config DEVFREQ_EVENT_EXYNOS_PPMU
- tristate "EXYNOS PPMU (Platform Performance Monitoring Unit) DEVFREQ event Driver"
+ tristate "Exynos PPMU (Platform Performance Monitoring Unit) DEVFREQ event Driver"
depends on ARCH_EXYNOS || COMPILE_TEST
select PM_OPP
help
@@ -34,7 +34,7 @@ config DEVFREQ_EVENT_EXYNOS_PPMU
config DEVFREQ_EVENT_ROCKCHIP_DFI
tristate "ROCKCHIP DFI DEVFREQ event Driver"
- depends on ARCH_ROCKCHIP
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
help
This add the devfreq-event driver for Rockchip SoC. It provides DFI
(DDR Monitor Module) driver to count ddr load.
diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c
index 1c565926db9f..ccc531ee6938 100644
--- a/drivers/devfreq/event/exynos-nocp.c
+++ b/drivers/devfreq/event/exynos-nocp.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * exynos-nocp.c - EXYNOS NoC (Network On Chip) Probe support
+ * exynos-nocp.c - Exynos NoC (Network On Chip) Probe support
*
* Copyright (c) 2016 Samsung Electronics Co., Ltd.
* Author : Chanwoo Choi <cw00.choi@samsung.com>
diff --git a/drivers/devfreq/event/exynos-nocp.h b/drivers/devfreq/event/exynos-nocp.h
index 55cc96284a36..2d6f08cfd0c5 100644
--- a/drivers/devfreq/event/exynos-nocp.h
+++ b/drivers/devfreq/event/exynos-nocp.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * exynos-nocp.h - EXYNOS NoC (Network on Chip) Probe header file
+ * exynos-nocp.h - Exynos NoC (Network on Chip) Probe header file
*
* Copyright (c) 2016 Samsung Electronics Co., Ltd.
* Author : Chanwoo Choi <cw00.choi@samsung.com>
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index 85c7a77bf3f0..17ed980d9099 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * exynos_ppmu.c - EXYNOS PPMU (Platform Performance Monitoring Unit) support
+ * exynos_ppmu.c - Exynos PPMU (Platform Performance Monitoring Unit) support
*
* Copyright (c) 2014-2015 Samsung Electronics Co., Ltd.
* Author : Chanwoo Choi <cw00.choi@samsung.com>
@@ -101,17 +101,22 @@ static struct __exynos_ppmu_events {
PPMU_EVENT(dmc1_1),
};
-static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
+static int __exynos_ppmu_find_ppmu_id(const char *edev_name)
{
int i;
for (i = 0; i < ARRAY_SIZE(ppmu_events); i++)
- if (!strcmp(edev->desc->name, ppmu_events[i].name))
+ if (!strcmp(edev_name, ppmu_events[i].name))
return ppmu_events[i].id;
return -EINVAL;
}
+static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
+{
+ return __exynos_ppmu_find_ppmu_id(edev->desc->name);
+}
+
/*
* The devfreq-event ops structure for PPMU v1.1
*/
@@ -556,13 +561,11 @@ static int of_get_devfreq_events(struct device_node *np,
* use default if not.
*/
if (info->ppmu_type == EXYNOS_TYPE_PPMU_V2) {
- struct devfreq_event_dev edev;
int id;
/* Not all registers take the same value for
* read+write data count.
*/
- edev.desc = &desc[j];
- id = exynos_ppmu_find_ppmu_id(&edev);
+ id = __exynos_ppmu_find_ppmu_id(desc[j].name);
switch (id) {
case PPMU_PMNCNT0:
diff --git a/drivers/devfreq/event/exynos-ppmu.h b/drivers/devfreq/event/exynos-ppmu.h
index 284420047455..97f667d0cbdd 100644
--- a/drivers/devfreq/event/exynos-ppmu.h
+++ b/drivers/devfreq/event/exynos-ppmu.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * exynos_ppmu.h - EXYNOS PPMU header file
+ * exynos_ppmu.h - Exynos PPMU header file
*
* Copyright (c) 2015 Samsung Electronics Co., Ltd.
* Author : Chanwoo Choi <cw00.choi@samsung.com>
diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
index 5d1042188727..9a88faaf8b27 100644
--- a/drivers/devfreq/event/rockchip-dfi.c
+++ b/drivers/devfreq/event/rockchip-dfi.c
@@ -177,7 +177,6 @@ static int rockchip_dfi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rockchip_dfi *data;
- struct resource *res;
struct devfreq_event_desc *desc;
struct device_node *np = pdev->dev.of_node, *node;
@@ -185,8 +184,7 @@ static int rockchip_dfi_probe(struct platform_device *pdev)
if (!data)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->regs = devm_ioremap_resource(&pdev->dev, res);
+ data->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->regs))
return PTR_ERR(data->regs);
@@ -200,6 +198,7 @@ static int rockchip_dfi_probe(struct platform_device *pdev)
node = of_parse_phandle(np, "rockchip,pmu", 0);
if (node) {
data->regmap_pmu = syscon_node_to_regmap(node);
+ of_node_put(node);
if (IS_ERR(data->regmap_pmu))
return PTR_ERR(data->regmap_pmu);
}
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index c832673273a2..8fa8eb541373 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -15,11 +15,10 @@
#include <linux/device.h>
#include <linux/export.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pm_opp.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
-#include <linux/slab.h>
#define DEFAULT_SATURATION_RATIO 40
@@ -127,6 +126,7 @@ static int exynos_bus_get_dev_status(struct device *dev,
ret = exynos_bus_get_event(bus, &edata);
if (ret < 0) {
+ dev_err(dev, "failed to get event from devfreq-event devices\n");
stat->total_time = stat->busy_time = 0;
goto err;
}
@@ -287,52 +287,12 @@ err_clk:
return ret;
}
-static int exynos_bus_probe(struct platform_device *pdev)
+static int exynos_bus_profile_init(struct exynos_bus *bus,
+ struct devfreq_dev_profile *profile)
{
- struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node, *node;
- struct devfreq_dev_profile *profile;
+ struct device *dev = bus->dev;
struct devfreq_simple_ondemand_data *ondemand_data;
- struct devfreq_passive_data *passive_data;
- struct devfreq *parent_devfreq;
- struct exynos_bus *bus;
- int ret, max_state;
- unsigned long min_freq, max_freq;
- bool passive = false;
-
- if (!np) {
- dev_err(dev, "failed to find devicetree node\n");
- return -EINVAL;
- }
-
- bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL);
- if (!bus)
- return -ENOMEM;
- mutex_init(&bus->lock);
- bus->dev = &pdev->dev;
- platform_set_drvdata(pdev, bus);
-
- profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL);
- if (!profile)
- return -ENOMEM;
-
- node = of_parse_phandle(dev->of_node, "devfreq", 0);
- if (node) {
- of_node_put(node);
- passive = true;
- } else {
- ret = exynos_bus_parent_parse_of(np, bus);
- if (ret < 0)
- return ret;
- }
-
- /* Parse the device-tree to get the resource information */
- ret = exynos_bus_parse_of(np, bus);
- if (ret < 0)
- goto err_reg;
-
- if (passive)
- goto passive;
+ int ret;
/* Initialize the struct profile and governor data for parent device */
profile->polling_ms = 50;
@@ -341,10 +301,9 @@ static int exynos_bus_probe(struct platform_device *pdev)
profile->exit = exynos_bus_exit;
ondemand_data = devm_kzalloc(dev, sizeof(*ondemand_data), GFP_KERNEL);
- if (!ondemand_data) {
- ret = -ENOMEM;
- goto err;
- }
+ if (!ondemand_data)
+ return -ENOMEM;
+
ondemand_data->upthreshold = 40;
ondemand_data->downdifferential = 5;
@@ -354,15 +313,14 @@ static int exynos_bus_probe(struct platform_device *pdev)
ondemand_data);
if (IS_ERR(bus->devfreq)) {
dev_err(dev, "failed to add devfreq device\n");
- ret = PTR_ERR(bus->devfreq);
- goto err;
+ return PTR_ERR(bus->devfreq);
}
/* Register opp_notifier to catch the change of OPP */
ret = devm_devfreq_register_opp_notifier(dev, bus->devfreq);
if (ret < 0) {
dev_err(dev, "failed to register opp notifier\n");
- goto err;
+ return ret;
}
/*
@@ -372,33 +330,44 @@ static int exynos_bus_probe(struct platform_device *pdev)
ret = exynos_bus_enable_edev(bus);
if (ret < 0) {
dev_err(dev, "failed to enable devfreq-event devices\n");
- goto err;
+ return ret;
}
ret = exynos_bus_set_event(bus);
if (ret < 0) {
dev_err(dev, "failed to set event to devfreq-event devices\n");
- goto err;
+ goto err_edev;
}
- goto out;
-passive:
+ return 0;
+
+err_edev:
+ if (exynos_bus_disable_edev(bus))
+ dev_warn(dev, "failed to disable the devfreq-event devices\n");
+
+ return ret;
+}
+
+static int exynos_bus_profile_init_passive(struct exynos_bus *bus,
+ struct devfreq_dev_profile *profile)
+{
+ struct device *dev = bus->dev;
+ struct devfreq_passive_data *passive_data;
+ struct devfreq *parent_devfreq;
+
/* Initialize the struct profile and governor data for passive device */
profile->target = exynos_bus_target;
profile->exit = exynos_bus_passive_exit;
/* Get the instance of parent devfreq device */
parent_devfreq = devfreq_get_devfreq_by_phandle(dev, 0);
- if (IS_ERR(parent_devfreq)) {
- ret = -EPROBE_DEFER;
- goto err;
- }
+ if (IS_ERR(parent_devfreq))
+ return -EPROBE_DEFER;
passive_data = devm_kzalloc(dev, sizeof(*passive_data), GFP_KERNEL);
- if (!passive_data) {
- ret = -ENOMEM;
- goto err;
- }
+ if (!passive_data)
+ return -ENOMEM;
+
passive_data->parent = parent_devfreq;
/* Add devfreq device for exynos bus with passive governor */
@@ -407,11 +376,61 @@ passive:
if (IS_ERR(bus->devfreq)) {
dev_err(dev,
"failed to add devfreq dev with passive governor\n");
- ret = PTR_ERR(bus->devfreq);
- goto err;
+ return PTR_ERR(bus->devfreq);
+ }
+
+ return 0;
+}
+
+static int exynos_bus_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node, *node;
+ struct devfreq_dev_profile *profile;
+ struct exynos_bus *bus;
+ int ret, max_state;
+ unsigned long min_freq, max_freq;
+ bool passive = false;
+
+ if (!np) {
+ dev_err(dev, "failed to find devicetree node\n");
+ return -EINVAL;
}
-out:
+ bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL);
+ if (!bus)
+ return -ENOMEM;
+ mutex_init(&bus->lock);
+ bus->dev = &pdev->dev;
+ platform_set_drvdata(pdev, bus);
+
+ profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL);
+ if (!profile)
+ return -ENOMEM;
+
+ node = of_parse_phandle(dev->of_node, "devfreq", 0);
+ if (node) {
+ of_node_put(node);
+ passive = true;
+ } else {
+ ret = exynos_bus_parent_parse_of(np, bus);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Parse the device-tree to get the resource information */
+ ret = exynos_bus_parse_of(np, bus);
+ if (ret < 0)
+ goto err_reg;
+
+ if (passive)
+ ret = exynos_bus_profile_init_passive(bus, profile);
+ else
+ ret = exynos_bus_profile_init(bus, profile);
+
+ if (ret < 0)
+ goto err;
+
max_state = bus->devfreq->profile->max_state;
min_freq = (bus->devfreq->profile->freq_table[0] / 1000);
max_freq = (bus->devfreq->profile->freq_table[max_state - 1] / 1000);
diff --git a/drivers/devfreq/imx8m-ddrc.c b/drivers/devfreq/imx8m-ddrc.c
new file mode 100644
index 000000000000..bc82d3653bff
--- /dev/null
+++ b/drivers/devfreq/imx8m-ddrc.c
@@ -0,0 +1,471 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/devfreq.h>
+#include <linux/pm_opp.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/arm-smccc.h>
+
+#define IMX_SIP_DDR_DVFS 0xc2000004
+
+/* Query available frequencies. */
+#define IMX_SIP_DDR_DVFS_GET_FREQ_COUNT 0x10
+#define IMX_SIP_DDR_DVFS_GET_FREQ_INFO 0x11
+
+/*
+ * This should be in a 1:1 mapping with devicetree OPPs but
+ * firmware provides additional info.
+ */
+struct imx8m_ddrc_freq {
+ unsigned long rate;
+ unsigned long smcarg;
+ int dram_core_parent_index;
+ int dram_alt_parent_index;
+ int dram_apb_parent_index;
+};
+
+/* Hardware limitation */
+#define IMX8M_DDRC_MAX_FREQ_COUNT 4
+
+/*
+ * i.MX8M DRAM Controller clocks have the following structure (abridged):
+ *
+ * +----------+ |\ +------+
+ * | dram_pll |-------|M| dram_core | |
+ * +----------+ |U|---------->| D |
+ * /--|X| | D |
+ * dram_alt_root | |/ | R |
+ * | | C |
+ * +---------+ | |
+ * |FIX DIV/4| | |
+ * +---------+ | |
+ * composite: | | |
+ * +----------+ | | |
+ * | dram_alt |----/ | |
+ * +----------+ | |
+ * | dram_apb |-------------------->| |
+ * +----------+ +------+
+ *
+ * The dram_pll is used for higher rates and dram_alt is used for lower rates.
+ *
+ * Frequency switching is implemented in TF-A (via SMC call) and can change the
+ * configuration of the clocks, including mux parents. The dram_alt and
+ * dram_apb clocks are "imx composite" and their parent can change too.
+ *
+ * We need to prepare/enable the new mux parents head of switching and update
+ * their information afterwards.
+ */
+struct imx8m_ddrc {
+ struct devfreq_dev_profile profile;
+ struct devfreq *devfreq;
+
+ /* For frequency switching: */
+ struct clk *dram_core;
+ struct clk *dram_pll;
+ struct clk *dram_alt;
+ struct clk *dram_apb;
+
+ int freq_count;
+ struct imx8m_ddrc_freq freq_table[IMX8M_DDRC_MAX_FREQ_COUNT];
+};
+
+static struct imx8m_ddrc_freq *imx8m_ddrc_find_freq(struct imx8m_ddrc *priv,
+ unsigned long rate)
+{
+ struct imx8m_ddrc_freq *freq;
+ int i;
+
+ /*
+ * Firmware reports values in MT/s, so we round-down from Hz
+ * Rounding is extra generous to ensure a match.
+ */
+ rate = DIV_ROUND_CLOSEST(rate, 250000);
+ for (i = 0; i < priv->freq_count; ++i) {
+ freq = &priv->freq_table[i];
+ if (freq->rate == rate ||
+ freq->rate + 1 == rate ||
+ freq->rate - 1 == rate)
+ return freq;
+ }
+
+ return NULL;
+}
+
+static void imx8m_ddrc_smc_set_freq(int target_freq)
+{
+ struct arm_smccc_res res;
+ u32 online_cpus = 0;
+ int cpu;
+
+ local_irq_disable();
+
+ for_each_online_cpu(cpu)
+ online_cpus |= (1 << (cpu * 8));
+
+ /* change the ddr freqency */
+ arm_smccc_smc(IMX_SIP_DDR_DVFS, target_freq, online_cpus,
+ 0, 0, 0, 0, 0, &res);
+
+ local_irq_enable();
+}
+
+static struct clk *clk_get_parent_by_index(struct clk *clk, int index)
+{
+ struct clk_hw *hw;
+
+ hw = clk_hw_get_parent_by_index(__clk_get_hw(clk), index);
+
+ return hw ? hw->clk : NULL;
+}
+
+static int imx8m_ddrc_set_freq(struct device *dev, struct imx8m_ddrc_freq *freq)
+{
+ struct imx8m_ddrc *priv = dev_get_drvdata(dev);
+ struct clk *new_dram_core_parent;
+ struct clk *new_dram_alt_parent;
+ struct clk *new_dram_apb_parent;
+ int ret;
+
+ /*
+ * Fetch new parents
+ *
+ * new_dram_alt_parent and new_dram_apb_parent are optional but
+ * new_dram_core_parent is not.
+ */
+ new_dram_core_parent = clk_get_parent_by_index(
+ priv->dram_core, freq->dram_core_parent_index - 1);
+ if (!new_dram_core_parent) {
+ dev_err(dev, "failed to fetch new dram_core parent\n");
+ return -EINVAL;
+ }
+ if (freq->dram_alt_parent_index) {
+ new_dram_alt_parent = clk_get_parent_by_index(
+ priv->dram_alt,
+ freq->dram_alt_parent_index - 1);
+ if (!new_dram_alt_parent) {
+ dev_err(dev, "failed to fetch new dram_alt parent\n");
+ return -EINVAL;
+ }
+ } else
+ new_dram_alt_parent = NULL;
+
+ if (freq->dram_apb_parent_index) {
+ new_dram_apb_parent = clk_get_parent_by_index(
+ priv->dram_apb,
+ freq->dram_apb_parent_index - 1);
+ if (!new_dram_apb_parent) {
+ dev_err(dev, "failed to fetch new dram_apb parent\n");
+ return -EINVAL;
+ }
+ } else
+ new_dram_apb_parent = NULL;
+
+ /* increase reference counts and ensure clks are ON before switch */
+ ret = clk_prepare_enable(new_dram_core_parent);
+ if (ret) {
+ dev_err(dev, "failed to enable new dram_core parent: %d\n",
+ ret);
+ goto out;
+ }
+ ret = clk_prepare_enable(new_dram_alt_parent);
+ if (ret) {
+ dev_err(dev, "failed to enable new dram_alt parent: %d\n",
+ ret);
+ goto out_disable_core_parent;
+ }
+ ret = clk_prepare_enable(new_dram_apb_parent);
+ if (ret) {
+ dev_err(dev, "failed to enable new dram_apb parent: %d\n",
+ ret);
+ goto out_disable_alt_parent;
+ }
+
+ imx8m_ddrc_smc_set_freq(freq->smcarg);
+
+ /* update parents in clk tree after switch. */
+ ret = clk_set_parent(priv->dram_core, new_dram_core_parent);
+ if (ret)
+ dev_warn(dev, "failed to set dram_core parent: %d\n", ret);
+ if (new_dram_alt_parent) {
+ ret = clk_set_parent(priv->dram_alt, new_dram_alt_parent);
+ if (ret)
+ dev_warn(dev, "failed to set dram_alt parent: %d\n",
+ ret);
+ }
+ if (new_dram_apb_parent) {
+ ret = clk_set_parent(priv->dram_apb, new_dram_apb_parent);
+ if (ret)
+ dev_warn(dev, "failed to set dram_apb parent: %d\n",
+ ret);
+ }
+
+ /*
+ * Explicitly refresh dram PLL rate.
+ *
+ * Even if it's marked with CLK_GET_RATE_NOCACHE the rate will not be
+ * automatically refreshed when clk_get_rate is called on children.
+ */
+ clk_get_rate(priv->dram_pll);
+
+ /*
+ * clk_set_parent transfer the reference count from old parent.
+ * now we drop extra reference counts used during the switch
+ */
+ clk_disable_unprepare(new_dram_apb_parent);
+out_disable_alt_parent:
+ clk_disable_unprepare(new_dram_alt_parent);
+out_disable_core_parent:
+ clk_disable_unprepare(new_dram_core_parent);
+out:
+ return ret;
+}
+
+static int imx8m_ddrc_target(struct device *dev, unsigned long *freq, u32 flags)
+{
+ struct imx8m_ddrc *priv = dev_get_drvdata(dev);
+ struct imx8m_ddrc_freq *freq_info;
+ struct dev_pm_opp *new_opp;
+ unsigned long old_freq, new_freq;
+ int ret;
+
+ new_opp = devfreq_recommended_opp(dev, freq, flags);
+ if (IS_ERR(new_opp)) {
+ ret = PTR_ERR(new_opp);
+ dev_err(dev, "failed to get recommended opp: %d\n", ret);
+ return ret;
+ }
+ dev_pm_opp_put(new_opp);
+
+ old_freq = clk_get_rate(priv->dram_core);
+ if (*freq == old_freq)
+ return 0;
+
+ freq_info = imx8m_ddrc_find_freq(priv, *freq);
+ if (!freq_info)
+ return -EINVAL;
+
+ /*
+ * Read back the clk rate to verify switch was correct and so that
+ * we can report it on all error paths.
+ */
+ ret = imx8m_ddrc_set_freq(dev, freq_info);
+
+ new_freq = clk_get_rate(priv->dram_core);
+ if (ret)
+ dev_err(dev, "ddrc failed freq switch to %lu from %lu: error %d. now at %lu\n",
+ *freq, old_freq, ret, new_freq);
+ else if (*freq != new_freq)
+ dev_err(dev, "ddrc failed freq update to %lu from %lu, now at %lu\n",
+ *freq, old_freq, new_freq);
+ else
+ dev_dbg(dev, "ddrc freq set to %lu (was %lu)\n",
+ *freq, old_freq);
+
+ return ret;
+}
+
+static int imx8m_ddrc_get_cur_freq(struct device *dev, unsigned long *freq)
+{
+ struct imx8m_ddrc *priv = dev_get_drvdata(dev);
+
+ *freq = clk_get_rate(priv->dram_core);
+
+ return 0;
+}
+
+static int imx8m_ddrc_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ struct imx8m_ddrc *priv = dev_get_drvdata(dev);
+
+ stat->busy_time = 0;
+ stat->total_time = 0;
+ stat->current_frequency = clk_get_rate(priv->dram_core);
+
+ return 0;
+}
+
+static int imx8m_ddrc_init_freq_info(struct device *dev)
+{
+ struct imx8m_ddrc *priv = dev_get_drvdata(dev);
+ struct arm_smccc_res res;
+ int index;
+
+ /* An error here means DDR DVFS API not supported by firmware */
+ arm_smccc_smc(IMX_SIP_DDR_DVFS, IMX_SIP_DDR_DVFS_GET_FREQ_COUNT,
+ 0, 0, 0, 0, 0, 0, &res);
+ priv->freq_count = res.a0;
+ if (priv->freq_count <= 0 ||
+ priv->freq_count > IMX8M_DDRC_MAX_FREQ_COUNT)
+ return -ENODEV;
+
+ for (index = 0; index < priv->freq_count; ++index) {
+ struct imx8m_ddrc_freq *freq = &priv->freq_table[index];
+
+ arm_smccc_smc(IMX_SIP_DDR_DVFS, IMX_SIP_DDR_DVFS_GET_FREQ_INFO,
+ index, 0, 0, 0, 0, 0, &res);
+ /* Result should be strictly positive */
+ if ((long)res.a0 <= 0)
+ return -ENODEV;
+
+ freq->rate = res.a0;
+ freq->smcarg = index;
+ freq->dram_core_parent_index = res.a1;
+ freq->dram_alt_parent_index = res.a2;
+ freq->dram_apb_parent_index = res.a3;
+
+ /* dram_core has 2 options: dram_pll or dram_alt_root */
+ if (freq->dram_core_parent_index != 1 &&
+ freq->dram_core_parent_index != 2)
+ return -ENODEV;
+ /* dram_apb and dram_alt have exactly 8 possible parents */
+ if (freq->dram_alt_parent_index > 8 ||
+ freq->dram_apb_parent_index > 8)
+ return -ENODEV;
+ /* dram_core from alt requires explicit dram_alt parent */
+ if (freq->dram_core_parent_index == 2 &&
+ freq->dram_alt_parent_index == 0)
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int imx8m_ddrc_check_opps(struct device *dev)
+{
+ struct imx8m_ddrc *priv = dev_get_drvdata(dev);
+ struct imx8m_ddrc_freq *freq_info;
+ struct dev_pm_opp *opp;
+ unsigned long freq;
+ int i, opp_count;
+
+ /* Enumerate DT OPPs and disable those not supported by firmware */
+ opp_count = dev_pm_opp_get_opp_count(dev);
+ if (opp_count < 0)
+ return opp_count;
+ for (i = 0, freq = 0; i < opp_count; ++i, ++freq) {
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+ if (IS_ERR(opp)) {
+ dev_err(dev, "Failed enumerating OPPs: %ld\n",
+ PTR_ERR(opp));
+ return PTR_ERR(opp);
+ }
+ dev_pm_opp_put(opp);
+
+ freq_info = imx8m_ddrc_find_freq(priv, freq);
+ if (!freq_info) {
+ dev_info(dev, "Disable unsupported OPP %luHz %luMT/s\n",
+ freq, DIV_ROUND_CLOSEST(freq, 250000));
+ dev_pm_opp_disable(dev, freq);
+ }
+ }
+
+ return 0;
+}
+
+static void imx8m_ddrc_exit(struct device *dev)
+{
+ dev_pm_opp_of_remove_table(dev);
+}
+
+static int imx8m_ddrc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct imx8m_ddrc *priv;
+ const char *gov = DEVFREQ_GOV_USERSPACE;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = imx8m_ddrc_init_freq_info(dev);
+ if (ret) {
+ dev_err(dev, "failed to init firmware freq info: %d\n", ret);
+ return ret;
+ }
+
+ priv->dram_core = devm_clk_get(dev, "core");
+ if (IS_ERR(priv->dram_core)) {
+ ret = PTR_ERR(priv->dram_core);
+ dev_err(dev, "failed to fetch core clock: %d\n", ret);
+ return ret;
+ }
+ priv->dram_pll = devm_clk_get(dev, "pll");
+ if (IS_ERR(priv->dram_pll)) {
+ ret = PTR_ERR(priv->dram_pll);
+ dev_err(dev, "failed to fetch pll clock: %d\n", ret);
+ return ret;
+ }
+ priv->dram_alt = devm_clk_get(dev, "alt");
+ if (IS_ERR(priv->dram_alt)) {
+ ret = PTR_ERR(priv->dram_alt);
+ dev_err(dev, "failed to fetch alt clock: %d\n", ret);
+ return ret;
+ }
+ priv->dram_apb = devm_clk_get(dev, "apb");
+ if (IS_ERR(priv->dram_apb)) {
+ ret = PTR_ERR(priv->dram_apb);
+ dev_err(dev, "failed to fetch apb clock: %d\n", ret);
+ return ret;
+ }
+
+ ret = dev_pm_opp_of_add_table(dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to get OPP table\n");
+ return ret;
+ }
+
+ ret = imx8m_ddrc_check_opps(dev);
+ if (ret < 0)
+ goto err;
+
+ priv->profile.polling_ms = 1000;
+ priv->profile.target = imx8m_ddrc_target;
+ priv->profile.get_dev_status = imx8m_ddrc_get_dev_status;
+ priv->profile.exit = imx8m_ddrc_exit;
+ priv->profile.get_cur_freq = imx8m_ddrc_get_cur_freq;
+ priv->profile.initial_freq = clk_get_rate(priv->dram_core);
+
+ priv->devfreq = devm_devfreq_add_device(dev, &priv->profile,
+ gov, NULL);
+ if (IS_ERR(priv->devfreq)) {
+ ret = PTR_ERR(priv->devfreq);
+ dev_err(dev, "failed to add devfreq device: %d\n", ret);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ dev_pm_opp_of_remove_table(dev);
+ return ret;
+}
+
+static const struct of_device_id imx8m_ddrc_of_match[] = {
+ { .compatible = "fsl,imx8m-ddrc", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, imx8m_ddrc_of_match);
+
+static struct platform_driver imx8m_ddrc_platdrv = {
+ .probe = imx8m_ddrc_probe,
+ .driver = {
+ .name = "imx8m-ddrc-devfreq",
+ .of_match_table = of_match_ptr(imx8m_ddrc_of_match),
+ },
+};
+module_platform_driver(imx8m_ddrc_platdrv);
+
+MODULE_DESCRIPTION("i.MX8M DDR Controller frequency driver");
+MODULE_AUTHOR("Leonard Crestez <leonard.crestez@nxp.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
index 2e65d7279d79..24f04f78285b 100644
--- a/drivers/devfreq/rk3399_dmc.c
+++ b/drivers/devfreq/rk3399_dmc.c
@@ -364,7 +364,8 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
if (res.a0) {
dev_err(dev, "Failed to set dram param: %ld\n",
res.a0);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_edev;
}
}
}
@@ -372,8 +373,11 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
node = of_parse_phandle(np, "rockchip,pmu", 0);
if (node) {
data->regmap_pmu = syscon_node_to_regmap(node);
- if (IS_ERR(data->regmap_pmu))
- return PTR_ERR(data->regmap_pmu);
+ of_node_put(node);
+ if (IS_ERR(data->regmap_pmu)) {
+ ret = PTR_ERR(data->regmap_pmu);
+ goto err_edev;
+ }
}
regmap_read(data->regmap_pmu, RK3399_PMUGRF_OS_REG2, &val);
@@ -391,7 +395,8 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
data->odt_dis_freq = data->timing.lpddr4_odt_dis_freq;
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_edev;
};
arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0,
@@ -425,7 +430,8 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
*/
if (dev_pm_opp_of_add_table(dev)) {
dev_err(dev, "Invalid operating-points in device tree.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_edev;
}
of_property_read_u32(np, "upthreshold",
@@ -465,6 +471,9 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
err_free_opp:
dev_pm_opp_of_remove_table(&pdev->dev);
+err_edev:
+ devfreq_event_disable_edev(data->edev);
+
return ret;
}
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6fa1eba9d477..5142da401db3 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -239,6 +239,14 @@ config FSL_RAID
the capability to offload memcpy, xor and pq computation
for raid5/6.
+config HISI_DMA
+ tristate "HiSilicon DMA Engine support"
+ depends on ARM64 || (COMPILE_TEST && PCI_MSI)
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Support HiSilicon Kunpeng DMA engine.
+
config IMG_MDC_DMA
tristate "IMG MDC support"
depends on MIPS || COMPILE_TEST
@@ -273,6 +281,19 @@ config INTEL_IDMA64
Enable DMA support for Intel Low Power Subsystem such as found on
Intel Skylake PCH.
+config INTEL_IDXD
+ tristate "Intel Data Accelerators support"
+ depends on PCI && X86_64
+ select DMA_ENGINE
+ select SBITMAP
+ help
+ Enable support for the Intel(R) data accelerators present
+ in Intel Xeon CPU.
+
+ Say Y if you have such a platform.
+
+ If unsure, say N.
+
config INTEL_IOATDMA
tristate "Intel I/OAT DMA support"
depends on PCI && X86_64
@@ -497,6 +518,15 @@ config PXA_DMA
16 to 32 channels for peripheral to memory or memory to memory
transfers.
+config PLX_DMA
+ tristate "PLX ExpressLane PEX Switch DMA Engine Support"
+ depends on PCI
+ select DMA_ENGINE
+ help
+ Some PLX ExpressLane PCI Switches support additional DMA engines.
+ These are exposed via extra functions on the switch's
+ upstream port. Each function exposes one DMA channel.
+
config SIRF_DMA
tristate "CSR SiRFprimaII/SiRFmarco DMA support"
depends on ARCH_SIRF
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 42d7e2fc64fa..1d908394fbea 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -35,12 +35,14 @@ obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o
obj-$(CONFIG_MCF_EDMA) += mcf-edma.o fsl-edma-common.o
obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
obj-$(CONFIG_FSL_RAID) += fsl_raid.o
+obj-$(CONFIG_HISI_DMA) += hisi_dma.o
obj-$(CONFIG_HSU_DMA) += hsu/
obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
obj-$(CONFIG_IMX_DMA) += imx-dma.o
obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
obj-$(CONFIG_INTEL_IDMA64) += idma64.o
obj-$(CONFIG_INTEL_IOATDMA) += ioat/
+obj-$(CONFIG_INTEL_IDXD) += idxd/
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o
obj-$(CONFIG_K3_DMA) += k3dma.o
@@ -59,6 +61,7 @@ obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o
obj-$(CONFIG_OWL_DMA) += owl-dma.o
obj-$(CONFIG_PCH_DMA) += pch_dma.o
obj-$(CONFIG_PL330_DMA) += pl330.o
+obj-$(CONFIG_PLX_DMA) += plx_dma.o
obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
obj-$(CONFIG_PXA_DMA) += pxa_dma.o
obj-$(CONFIG_RENESAS_DMA) += sh/
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
index 832aefbe7af9..539e785039ca 100644
--- a/drivers/dma/altera-msgdma.c
+++ b/drivers/dma/altera-msgdma.c
@@ -772,10 +772,10 @@ static int request_and_map(struct platform_device *pdev, const char *name,
return -EBUSY;
}
- *ptr = devm_ioremap_nocache(device, region->start,
+ *ptr = devm_ioremap(device, region->start,
resource_size(region));
if (*ptr == NULL) {
- dev_err(device, "ioremap_nocache of %s failed!", name);
+ dev_err(device, "ioremap of %s failed!", name);
return -ENOMEM;
}
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index e4c593f48575..4768ef26013b 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -797,10 +797,7 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
/* stop DMA activity */
if (c->desc) {
- if (c->desc->vd.tx.flags & DMA_PREP_INTERRUPT)
- vchan_terminate_vdesc(&c->desc->vd);
- else
- vchan_vdesc_fini(&c->desc->vd);
+ vchan_terminate_vdesc(&c->desc->vd);
c->desc = NULL;
bcm2835_dma_abort(c);
}
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index a0ee404b736e..f1d149e32839 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -830,6 +830,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
struct dma_device *dma_dev;
struct axi_dmac *dmac;
struct resource *res;
+ struct regmap *regmap;
int ret;
dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
@@ -921,10 +922,17 @@ static int axi_dmac_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dmac);
- devm_regmap_init_mmio(&pdev->dev, dmac->base, &axi_dmac_regmap_config);
+ regmap = devm_regmap_init_mmio(&pdev->dev, dmac->base,
+ &axi_dmac_regmap_config);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ goto err_free_irq;
+ }
return 0;
+err_free_irq:
+ free_irq(dmac->irq, dmac);
err_unregister_of:
of_dma_controller_free(pdev->dev.of_node);
err_unregister_device:
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index fa626acdc9b9..448f663da89c 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -999,7 +999,8 @@ static const struct jz4780_dma_soc_data jz4740_dma_soc_data = {
static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = {
.nb_channels = 6,
.transfer_ord_max = 5,
- .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC,
+ .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC |
+ JZ_SOC_DATA_BREAK_LINKS,
};
static const struct jz4780_dma_soc_data jz4770_dma_soc_data = {
@@ -1020,12 +1021,19 @@ static const struct jz4780_dma_soc_data x1000_dma_soc_data = {
.flags = JZ_SOC_DATA_PROGRAMMABLE_DMA,
};
+static const struct jz4780_dma_soc_data x1830_dma_soc_data = {
+ .nb_channels = 32,
+ .transfer_ord_max = 7,
+ .flags = JZ_SOC_DATA_PROGRAMMABLE_DMA,
+};
+
static const struct of_device_id jz4780_dma_dt_match[] = {
{ .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data },
{ .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data },
{ .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data },
{ .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data },
{ .compatible = "ingenic,x1000-dma", .data = &x1000_dma_soc_data },
+ { .compatible = "ingenic,x1830-dma", .data = &x1830_dma_soc_data },
{},
};
MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 03ac4b96117c..c3b1283b6d31 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -60,6 +60,8 @@ static long dmaengine_ref_count;
/* --- sysfs implementation --- */
+#define DMA_SLAVE_NAME "slave"
+
/**
* dev_to_dma_chan - convert a device pointer to its sysfs container object
* @dev - device node
@@ -164,11 +166,152 @@ static struct class dma_devclass = {
/* --- client and device registration --- */
-#define dma_device_satisfies_mask(device, mask) \
- __dma_device_satisfies_mask((device), &(mask))
-static int
-__dma_device_satisfies_mask(struct dma_device *device,
- const dma_cap_mask_t *want)
+/**
+ * dma_cap_mask_all - enable iteration over all operation types
+ */
+static dma_cap_mask_t dma_cap_mask_all;
+
+/**
+ * dma_chan_tbl_ent - tracks channel allocations per core/operation
+ * @chan - associated channel for this entry
+ */
+struct dma_chan_tbl_ent {
+ struct dma_chan *chan;
+};
+
+/**
+ * channel_table - percpu lookup table for memory-to-memory offload providers
+ */
+static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
+
+static int __init dma_channel_table_init(void)
+{
+ enum dma_transaction_type cap;
+ int err = 0;
+
+ bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
+
+ /* 'interrupt', 'private', and 'slave' are channel capabilities,
+ * but are not associated with an operation so they do not need
+ * an entry in the channel_table
+ */
+ clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
+ clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
+ clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
+
+ for_each_dma_cap_mask(cap, dma_cap_mask_all) {
+ channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
+ if (!channel_table[cap]) {
+ err = -ENOMEM;
+ break;
+ }
+ }
+
+ if (err) {
+ pr_err("dmaengine dma_channel_table_init failure: %d\n", err);
+ for_each_dma_cap_mask(cap, dma_cap_mask_all)
+ free_percpu(channel_table[cap]);
+ }
+
+ return err;
+}
+arch_initcall(dma_channel_table_init);
+
+/**
+ * dma_chan_is_local - returns true if the channel is in the same numa-node as
+ * the cpu
+ */
+static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
+{
+ int node = dev_to_node(chan->device->dev);
+ return node == NUMA_NO_NODE ||
+ cpumask_test_cpu(cpu, cpumask_of_node(node));
+}
+
+/**
+ * min_chan - returns the channel with min count and in the same numa-node as
+ * the cpu
+ * @cap: capability to match
+ * @cpu: cpu index which the channel should be close to
+ *
+ * If some channels are close to the given cpu, the one with the lowest
+ * reference count is returned. Otherwise, cpu is ignored and only the
+ * reference count is taken into account.
+ * Must be called under dma_list_mutex.
+ */
+static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
+{
+ struct dma_device *device;
+ struct dma_chan *chan;
+ struct dma_chan *min = NULL;
+ struct dma_chan *localmin = NULL;
+
+ list_for_each_entry(device, &dma_device_list, global_node) {
+ if (!dma_has_cap(cap, device->cap_mask) ||
+ dma_has_cap(DMA_PRIVATE, device->cap_mask))
+ continue;
+ list_for_each_entry(chan, &device->channels, device_node) {
+ if (!chan->client_count)
+ continue;
+ if (!min || chan->table_count < min->table_count)
+ min = chan;
+
+ if (dma_chan_is_local(chan, cpu))
+ if (!localmin ||
+ chan->table_count < localmin->table_count)
+ localmin = chan;
+ }
+ }
+
+ chan = localmin ? localmin : min;
+
+ if (chan)
+ chan->table_count++;
+
+ return chan;
+}
+
+/**
+ * dma_channel_rebalance - redistribute the available channels
+ *
+ * Optimize for cpu isolation (each cpu gets a dedicated channel for an
+ * operation type) in the SMP case, and operation isolation (avoid
+ * multi-tasking channels) in the non-SMP case. Must be called under
+ * dma_list_mutex.
+ */
+static void dma_channel_rebalance(void)
+{
+ struct dma_chan *chan;
+ struct dma_device *device;
+ int cpu;
+ int cap;
+
+ /* undo the last distribution */
+ for_each_dma_cap_mask(cap, dma_cap_mask_all)
+ for_each_possible_cpu(cpu)
+ per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
+
+ list_for_each_entry(device, &dma_device_list, global_node) {
+ if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
+ continue;
+ list_for_each_entry(chan, &device->channels, device_node)
+ chan->table_count = 0;
+ }
+
+ /* don't populate the channel_table if no clients are available */
+ if (!dmaengine_ref_count)
+ return;
+
+ /* redistribute available channels */
+ for_each_dma_cap_mask(cap, dma_cap_mask_all)
+ for_each_online_cpu(cpu) {
+ chan = min_chan(cap, cpu);
+ per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
+ }
+}
+
+static int dma_device_satisfies_mask(struct dma_device *device,
+ const dma_cap_mask_t *want)
{
dma_cap_mask_t has;
@@ -179,7 +322,7 @@ __dma_device_satisfies_mask(struct dma_device *device,
static struct module *dma_chan_to_owner(struct dma_chan *chan)
{
- return chan->device->dev->driver->owner;
+ return chan->device->owner;
}
/**
@@ -198,6 +341,23 @@ static void balance_ref_count(struct dma_chan *chan)
}
}
+static void dma_device_release(struct kref *ref)
+{
+ struct dma_device *device = container_of(ref, struct dma_device, ref);
+
+ list_del_rcu(&device->global_node);
+ dma_channel_rebalance();
+
+ if (device->device_release)
+ device->device_release(device);
+}
+
+static void dma_device_put(struct dma_device *device)
+{
+ lockdep_assert_held(&dma_list_mutex);
+ kref_put(&device->ref, dma_device_release);
+}
+
/**
* dma_chan_get - try to grab a dma channel's parent driver module
* @chan - channel to grab
@@ -218,6 +378,12 @@ static int dma_chan_get(struct dma_chan *chan)
if (!try_module_get(owner))
return -ENODEV;
+ ret = kref_get_unless_zero(&chan->device->ref);
+ if (!ret) {
+ ret = -ENODEV;
+ goto module_put_out;
+ }
+
/* allocate upon first client reference */
if (chan->device->device_alloc_chan_resources) {
ret = chan->device->device_alloc_chan_resources(chan);
@@ -233,6 +399,8 @@ out:
return 0;
err_out:
+ dma_device_put(chan->device);
+module_put_out:
module_put(owner);
return ret;
}
@@ -250,7 +418,6 @@ static void dma_chan_put(struct dma_chan *chan)
return;
chan->client_count--;
- module_put(dma_chan_to_owner(chan));
/* This channel is not in use anymore, free it */
if (!chan->client_count && chan->device->device_free_chan_resources) {
@@ -265,6 +432,9 @@ static void dma_chan_put(struct dma_chan *chan)
chan->router = NULL;
chan->route_data = NULL;
}
+
+ dma_device_put(chan->device);
+ module_put(dma_chan_to_owner(chan));
}
enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
@@ -289,57 +459,6 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
EXPORT_SYMBOL(dma_sync_wait);
/**
- * dma_cap_mask_all - enable iteration over all operation types
- */
-static dma_cap_mask_t dma_cap_mask_all;
-
-/**
- * dma_chan_tbl_ent - tracks channel allocations per core/operation
- * @chan - associated channel for this entry
- */
-struct dma_chan_tbl_ent {
- struct dma_chan *chan;
-};
-
-/**
- * channel_table - percpu lookup table for memory-to-memory offload providers
- */
-static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
-
-static int __init dma_channel_table_init(void)
-{
- enum dma_transaction_type cap;
- int err = 0;
-
- bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
-
- /* 'interrupt', 'private', and 'slave' are channel capabilities,
- * but are not associated with an operation so they do not need
- * an entry in the channel_table
- */
- clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
- clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
- clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
-
- for_each_dma_cap_mask(cap, dma_cap_mask_all) {
- channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
- if (!channel_table[cap]) {
- err = -ENOMEM;
- break;
- }
- }
-
- if (err) {
- pr_err("initialization failure\n");
- for_each_dma_cap_mask(cap, dma_cap_mask_all)
- free_percpu(channel_table[cap]);
- }
-
- return err;
-}
-arch_initcall(dma_channel_table_init);
-
-/**
* dma_find_channel - find a channel to carry out the operation
* @tx_type: transaction type
*/
@@ -369,97 +488,6 @@ void dma_issue_pending_all(void)
}
EXPORT_SYMBOL(dma_issue_pending_all);
-/**
- * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
- */
-static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
-{
- int node = dev_to_node(chan->device->dev);
- return node == NUMA_NO_NODE ||
- cpumask_test_cpu(cpu, cpumask_of_node(node));
-}
-
-/**
- * min_chan - returns the channel with min count and in the same numa-node as the cpu
- * @cap: capability to match
- * @cpu: cpu index which the channel should be close to
- *
- * If some channels are close to the given cpu, the one with the lowest
- * reference count is returned. Otherwise, cpu is ignored and only the
- * reference count is taken into account.
- * Must be called under dma_list_mutex.
- */
-static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
-{
- struct dma_device *device;
- struct dma_chan *chan;
- struct dma_chan *min = NULL;
- struct dma_chan *localmin = NULL;
-
- list_for_each_entry(device, &dma_device_list, global_node) {
- if (!dma_has_cap(cap, device->cap_mask) ||
- dma_has_cap(DMA_PRIVATE, device->cap_mask))
- continue;
- list_for_each_entry(chan, &device->channels, device_node) {
- if (!chan->client_count)
- continue;
- if (!min || chan->table_count < min->table_count)
- min = chan;
-
- if (dma_chan_is_local(chan, cpu))
- if (!localmin ||
- chan->table_count < localmin->table_count)
- localmin = chan;
- }
- }
-
- chan = localmin ? localmin : min;
-
- if (chan)
- chan->table_count++;
-
- return chan;
-}
-
-/**
- * dma_channel_rebalance - redistribute the available channels
- *
- * Optimize for cpu isolation (each cpu gets a dedicated channel for an
- * operation type) in the SMP case, and operation isolation (avoid
- * multi-tasking channels) in the non-SMP case. Must be called under
- * dma_list_mutex.
- */
-static void dma_channel_rebalance(void)
-{
- struct dma_chan *chan;
- struct dma_device *device;
- int cpu;
- int cap;
-
- /* undo the last distribution */
- for_each_dma_cap_mask(cap, dma_cap_mask_all)
- for_each_possible_cpu(cpu)
- per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
-
- list_for_each_entry(device, &dma_device_list, global_node) {
- if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
- continue;
- list_for_each_entry(chan, &device->channels, device_node)
- chan->table_count = 0;
- }
-
- /* don't populate the channel_table if no clients are available */
- if (!dmaengine_ref_count)
- return;
-
- /* redistribute available channels */
- for_each_dma_cap_mask(cap, dma_cap_mask_all)
- for_each_online_cpu(cpu) {
- chan = min_chan(cap, cpu);
- per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
- }
-}
-
int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
{
struct dma_device *device;
@@ -502,7 +530,7 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
{
struct dma_chan *chan;
- if (mask && !__dma_device_satisfies_mask(dev, mask)) {
+ if (mask && !dma_device_satisfies_mask(dev, mask)) {
dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
return NULL;
}
@@ -704,11 +732,11 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name)
if (has_acpi_companion(dev) && !chan)
chan = acpi_dma_request_slave_chan_by_name(dev, name);
- if (chan) {
- /* Valid channel found or requester needs to be deferred */
- if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
- return chan;
- }
+ if (PTR_ERR(chan) == -EPROBE_DEFER)
+ return chan;
+
+ if (!IS_ERR_OR_NULL(chan))
+ goto found;
/* Try to find the channel via the DMA filter map(s) */
mutex_lock(&dma_list_mutex);
@@ -728,7 +756,22 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name)
}
mutex_unlock(&dma_list_mutex);
- return chan ? chan : ERR_PTR(-EPROBE_DEFER);
+ if (IS_ERR_OR_NULL(chan))
+ return chan ? chan : ERR_PTR(-EPROBE_DEFER);
+
+found:
+ chan->name = kasprintf(GFP_KERNEL, "dma:%s", name);
+ if (!chan->name)
+ return chan;
+ chan->slave = dev;
+
+ if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj,
+ DMA_SLAVE_NAME))
+ dev_warn(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME);
+ if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name))
+ dev_warn(dev, "Cannot create DMA %s symlink\n", chan->name);
+
+ return chan;
}
EXPORT_SYMBOL_GPL(dma_request_chan);
@@ -786,6 +829,14 @@ void dma_release_channel(struct dma_chan *chan)
/* drop PRIVATE cap enabled by __dma_request_channel() */
if (--chan->device->privatecnt == 0)
dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
+
+ if (chan->slave) {
+ sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME);
+ sysfs_remove_link(&chan->slave->kobj, chan->name);
+ kfree(chan->name);
+ chan->name = NULL;
+ chan->slave = NULL;
+ }
mutex_unlock(&dma_list_mutex);
}
EXPORT_SYMBOL_GPL(dma_release_channel);
@@ -834,14 +885,14 @@ EXPORT_SYMBOL(dmaengine_get);
*/
void dmaengine_put(void)
{
- struct dma_device *device;
+ struct dma_device *device, *_d;
struct dma_chan *chan;
mutex_lock(&dma_list_mutex);
dmaengine_ref_count--;
BUG_ON(dmaengine_ref_count < 0);
/* drop channel references */
- list_for_each_entry(device, &dma_device_list, global_node) {
+ list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
continue;
list_for_each_entry(chan, &device->channels, device_node)
@@ -900,15 +951,118 @@ static int get_dma_id(struct dma_device *device)
return 0;
}
+static int __dma_async_device_channel_register(struct dma_device *device,
+ struct dma_chan *chan,
+ int chan_id)
+{
+ int rc = 0;
+ int chancnt = device->chancnt;
+ atomic_t *idr_ref;
+ struct dma_chan *tchan;
+
+ tchan = list_first_entry_or_null(&device->channels,
+ struct dma_chan, device_node);
+ if (!tchan)
+ return -ENODEV;
+
+ if (tchan->dev) {
+ idr_ref = tchan->dev->idr_ref;
+ } else {
+ idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
+ if (!idr_ref)
+ return -ENOMEM;
+ atomic_set(idr_ref, 0);
+ }
+
+ chan->local = alloc_percpu(typeof(*chan->local));
+ if (!chan->local)
+ goto err_out;
+ chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
+ if (!chan->dev) {
+ free_percpu(chan->local);
+ chan->local = NULL;
+ goto err_out;
+ }
+
+ /*
+ * When the chan_id is a negative value, we are dynamically adding
+ * the channel. Otherwise we are static enumerating.
+ */
+ chan->chan_id = chan_id < 0 ? chancnt : chan_id;
+ chan->dev->device.class = &dma_devclass;
+ chan->dev->device.parent = device->dev;
+ chan->dev->chan = chan;
+ chan->dev->idr_ref = idr_ref;
+ chan->dev->dev_id = device->dev_id;
+ atomic_inc(idr_ref);
+ dev_set_name(&chan->dev->device, "dma%dchan%d",
+ device->dev_id, chan->chan_id);
+
+ rc = device_register(&chan->dev->device);
+ if (rc)
+ goto err_out;
+ chan->client_count = 0;
+ device->chancnt = chan->chan_id + 1;
+
+ return 0;
+
+ err_out:
+ free_percpu(chan->local);
+ kfree(chan->dev);
+ if (atomic_dec_return(idr_ref) == 0)
+ kfree(idr_ref);
+ return rc;
+}
+
+int dma_async_device_channel_register(struct dma_device *device,
+ struct dma_chan *chan)
+{
+ int rc;
+
+ rc = __dma_async_device_channel_register(device, chan, -1);
+ if (rc < 0)
+ return rc;
+
+ dma_channel_rebalance();
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dma_async_device_channel_register);
+
+static void __dma_async_device_channel_unregister(struct dma_device *device,
+ struct dma_chan *chan)
+{
+ WARN_ONCE(!device->device_release && chan->client_count,
+ "%s called while %d clients hold a reference\n",
+ __func__, chan->client_count);
+ mutex_lock(&dma_list_mutex);
+ list_del(&chan->device_node);
+ device->chancnt--;
+ chan->dev->chan = NULL;
+ mutex_unlock(&dma_list_mutex);
+ device_unregister(&chan->dev->device);
+ free_percpu(chan->local);
+}
+
+void dma_async_device_channel_unregister(struct dma_device *device,
+ struct dma_chan *chan)
+{
+ __dma_async_device_channel_unregister(device, chan);
+ dma_channel_rebalance();
+}
+EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
+
/**
* dma_async_device_register - registers DMA devices found
* @device: &dma_device
+ *
+ * After calling this routine the structure should not be freed except in the
+ * device_release() callback which will be called after
+ * dma_async_device_unregister() is called and no further references are taken.
*/
int dma_async_device_register(struct dma_device *device)
{
- int chancnt = 0, rc;
+ int rc, i = 0;
struct dma_chan* chan;
- atomic_t *idr_ref;
if (!device)
return -ENODEV;
@@ -919,6 +1073,8 @@ int dma_async_device_register(struct dma_device *device)
return -EIO;
}
+ device->owner = device->dev->driver->owner;
+
if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
dev_err(device->dev,
"Device claims capability %s, but op is not defined\n",
@@ -994,65 +1150,29 @@ int dma_async_device_register(struct dma_device *device)
return -EIO;
}
+ if (!device->device_release)
+ dev_warn(device->dev,
+ "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n");
+
+ kref_init(&device->ref);
+
/* note: this only matters in the
* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
*/
if (device_has_all_tx_types(device))
dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
- idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
- if (!idr_ref)
- return -ENOMEM;
rc = get_dma_id(device);
- if (rc != 0) {
- kfree(idr_ref);
+ if (rc != 0)
return rc;
- }
-
- atomic_set(idr_ref, 0);
/* represent channels in sysfs. Probably want devs too */
list_for_each_entry(chan, &device->channels, device_node) {
- rc = -ENOMEM;
- chan->local = alloc_percpu(typeof(*chan->local));
- if (chan->local == NULL)
- goto err_out;
- chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
- if (chan->dev == NULL) {
- free_percpu(chan->local);
- chan->local = NULL;
+ rc = __dma_async_device_channel_register(device, chan, i++);
+ if (rc < 0)
goto err_out;
- }
-
- chan->chan_id = chancnt++;
- chan->dev->device.class = &dma_devclass;
- chan->dev->device.parent = device->dev;
- chan->dev->chan = chan;
- chan->dev->idr_ref = idr_ref;
- chan->dev->dev_id = device->dev_id;
- atomic_inc(idr_ref);
- dev_set_name(&chan->dev->device, "dma%dchan%d",
- device->dev_id, chan->chan_id);
-
- rc = device_register(&chan->dev->device);
- if (rc) {
- free_percpu(chan->local);
- chan->local = NULL;
- kfree(chan->dev);
- atomic_dec(idr_ref);
- goto err_out;
- }
- chan->client_count = 0;
}
- if (!chancnt) {
- dev_err(device->dev, "%s: device has no channels!\n", __func__);
- rc = -ENODEV;
- goto err_out;
- }
-
- device->chancnt = chancnt;
-
mutex_lock(&dma_list_mutex);
/* take references on public channels */
if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
@@ -1080,9 +1200,8 @@ int dma_async_device_register(struct dma_device *device)
err_out:
/* if we never registered a channel just release the idr */
- if (atomic_read(idr_ref) == 0) {
+ if (!device->chancnt) {
ida_free(&dma_ida, device->dev_id);
- kfree(idr_ref);
return rc;
}
@@ -1108,23 +1227,20 @@ EXPORT_SYMBOL(dma_async_device_register);
*/
void dma_async_device_unregister(struct dma_device *device)
{
- struct dma_chan *chan;
+ struct dma_chan *chan, *n;
+
+ list_for_each_entry_safe(chan, n, &device->channels, device_node)
+ __dma_async_device_channel_unregister(device, chan);
mutex_lock(&dma_list_mutex);
- list_del_rcu(&device->global_node);
+ /*
+ * setting DMA_PRIVATE ensures the device being torn down will not
+ * be used in the channel_table
+ */
+ dma_cap_set(DMA_PRIVATE, device->cap_mask);
dma_channel_rebalance();
+ dma_device_put(device);
mutex_unlock(&dma_list_mutex);
-
- list_for_each_entry(chan, &device->channels, device_node) {
- WARN_ONCE(chan->client_count,
- "%s called while %d clients hold a reference\n",
- __func__, chan->client_count);
- mutex_lock(&dma_list_mutex);
- chan->dev->chan = NULL;
- mutex_unlock(&dma_list_mutex);
- device_unregister(&chan->dev->device);
- free_percpu(chan->local);
- }
}
EXPORT_SYMBOL(dma_async_device_unregister);
@@ -1302,6 +1418,79 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
}
EXPORT_SYMBOL(dma_async_tx_descriptor_init);
+static inline int desc_check_and_set_metadata_mode(
+ struct dma_async_tx_descriptor *desc, enum dma_desc_metadata_mode mode)
+{
+ /* Make sure that the metadata mode is not mixed */
+ if (!desc->desc_metadata_mode) {
+ if (dmaengine_is_metadata_mode_supported(desc->chan, mode))
+ desc->desc_metadata_mode = mode;
+ else
+ return -ENOTSUPP;
+ } else if (desc->desc_metadata_mode != mode) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
+ void *data, size_t len)
+{
+ int ret;
+
+ if (!desc)
+ return -EINVAL;
+
+ ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_CLIENT);
+ if (ret)
+ return ret;
+
+ if (!desc->metadata_ops || !desc->metadata_ops->attach)
+ return -ENOTSUPP;
+
+ return desc->metadata_ops->attach(desc, data, len);
+}
+EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata);
+
+void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
+ size_t *payload_len, size_t *max_len)
+{
+ int ret;
+
+ if (!desc)
+ return ERR_PTR(-EINVAL);
+
+ ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (!desc->metadata_ops || !desc->metadata_ops->get_ptr)
+ return ERR_PTR(-ENOTSUPP);
+
+ return desc->metadata_ops->get_ptr(desc, payload_len, max_len);
+}
+EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr);
+
+int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
+ size_t payload_len)
+{
+ int ret;
+
+ if (!desc)
+ return -EINVAL;
+
+ ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
+ if (ret)
+ return ret;
+
+ if (!desc->metadata_ops || !desc->metadata_ops->set_len)
+ return -ENOTSUPP;
+
+ return desc->metadata_ops->set_len(desc, payload_len);
+}
+EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len);
+
/* dma_wait_for_async_tx - spin wait for a transaction to complete
* @tx: in-flight transaction to wait on
*/
@@ -1373,5 +1562,3 @@ static int __init dma_bus_init(void)
return class_register(&dma_devclass);
}
arch_initcall(dma_bus_init);
-
-
diff --git a/drivers/dma/dmaengine.h b/drivers/dma/dmaengine.h
index 501c0b063f85..e8a320c9e57c 100644
--- a/drivers/dma/dmaengine.h
+++ b/drivers/dma/dmaengine.h
@@ -77,6 +77,7 @@ static inline enum dma_status dma_cookie_status(struct dma_chan *chan,
state->last = complete;
state->used = used;
state->residue = 0;
+ state->in_flight_bytes = 0;
}
return dma_async_is_complete(cookie, complete, used);
}
@@ -87,6 +88,13 @@ static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)
state->residue = residue;
}
+static inline void dma_set_in_flight_bytes(struct dma_tx_state *state,
+ u32 in_flight_bytes)
+{
+ if (state)
+ state->in_flight_bytes = in_flight_bytes;
+}
+
struct dmaengine_desc_callback {
dma_async_tx_callback callback;
dma_async_tx_callback_result callback_result;
@@ -171,4 +179,7 @@ dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)
return (cb->callback) ? true : false;
}
+struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
+struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
+
#endif
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index a1ce307c502f..14c1ac26f866 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -636,14 +636,10 @@ static int dma_chan_terminate_all(struct dma_chan *dchan)
vchan_get_all_descriptors(&chan->vc, &head);
- /*
- * As vchan_dma_desc_free_list can access to desc_allocated list
- * we need to call it in vc.lock context.
- */
- vchan_dma_desc_free_list(&chan->vc, &head);
-
spin_unlock_irqrestore(&chan->vc.lock, flags);
+ vchan_dma_desc_free_list(&chan->vc, &head);
+
dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan));
return 0;
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index b1a7ca91701a..5697c3622699 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -109,10 +109,15 @@ void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
u32 ch = fsl_chan->vchan.chan.chan_id;
void __iomem *muxaddr;
unsigned int chans_per_mux, ch_off;
+ int endian_diff[4] = {3, 1, -1, -3};
u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs;
chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr;
ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
+
+ if (fsl_chan->edma->drvdata->mux_swap)
+ ch_off += endian_diff[ch_off % 4];
+
muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
slot = EDMAMUX_CHCFG_SOURCE(slot);
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index 5eaa2902ed39..67e422590c9a 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -147,6 +147,7 @@ struct fsl_edma_drvdata {
enum edma_version version;
u32 dmamuxs;
bool has_dmaclk;
+ bool mux_swap;
int (*setup_irq)(struct platform_device *pdev,
struct fsl_edma_engine *fsl_edma);
};
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index b626c06ac2e0..eff7ebd8cf35 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -233,6 +233,13 @@ static struct fsl_edma_drvdata vf610_data = {
.setup_irq = fsl_edma_irq_init,
};
+static struct fsl_edma_drvdata ls1028a_data = {
+ .version = v1,
+ .dmamuxs = DMAMUX_NR,
+ .mux_swap = true,
+ .setup_irq = fsl_edma_irq_init,
+};
+
static struct fsl_edma_drvdata imx7ulp_data = {
.version = v3,
.dmamuxs = 1,
@@ -242,6 +249,7 @@ static struct fsl_edma_drvdata imx7ulp_data = {
static const struct of_device_id fsl_edma_dt_ids[] = {
{ .compatible = "fsl,vf610-edma", .data = &vf610_data},
+ { .compatible = "fsl,ls1028a-edma", .data = &ls1028a_data},
{ .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data},
{ /* sentinel */ }
};
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index 89792083d62c..95cc0256b387 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -304,7 +304,7 @@ static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
- if (!fsl_queue->comp_pool && !fsl_queue->comp_pool)
+ if (!fsl_queue->comp_pool && !fsl_queue->desc_pool)
return;
list_for_each_entry_safe(comp_temp, _comp_temp,
diff --git a/drivers/dma/hisi_dma.c b/drivers/dma/hisi_dma.c
new file mode 100644
index 000000000000..ed3619266a48
--- /dev/null
+++ b/drivers/dma/hisi_dma.c
@@ -0,0 +1,611 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2019 HiSilicon Limited. */
+#include <linux/bitfield.h>
+#include <linux/dmaengine.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include "virt-dma.h"
+
+#define HISI_DMA_SQ_BASE_L 0x0
+#define HISI_DMA_SQ_BASE_H 0x4
+#define HISI_DMA_SQ_DEPTH 0x8
+#define HISI_DMA_SQ_TAIL_PTR 0xc
+#define HISI_DMA_CQ_BASE_L 0x10
+#define HISI_DMA_CQ_BASE_H 0x14
+#define HISI_DMA_CQ_DEPTH 0x18
+#define HISI_DMA_CQ_HEAD_PTR 0x1c
+#define HISI_DMA_CTRL0 0x20
+#define HISI_DMA_CTRL0_QUEUE_EN_S 0
+#define HISI_DMA_CTRL0_QUEUE_PAUSE_S 4
+#define HISI_DMA_CTRL1 0x24
+#define HISI_DMA_CTRL1_QUEUE_RESET_S 0
+#define HISI_DMA_Q_FSM_STS 0x30
+#define HISI_DMA_FSM_STS_MASK GENMASK(3, 0)
+#define HISI_DMA_INT_STS 0x40
+#define HISI_DMA_INT_STS_MASK GENMASK(12, 0)
+#define HISI_DMA_INT_MSK 0x44
+#define HISI_DMA_MODE 0x217c
+#define HISI_DMA_OFFSET 0x100
+
+#define HISI_DMA_MSI_NUM 30
+#define HISI_DMA_CHAN_NUM 30
+#define HISI_DMA_Q_DEPTH_VAL 1024
+
+#define PCI_BAR_2 2
+
+enum hisi_dma_mode {
+ EP = 0,
+ RC,
+};
+
+enum hisi_dma_chan_status {
+ DISABLE = -1,
+ IDLE = 0,
+ RUN,
+ CPL,
+ PAUSE,
+ HALT,
+ ABORT,
+ WAIT,
+ BUFFCLR,
+};
+
+struct hisi_dma_sqe {
+ __le32 dw0;
+#define OPCODE_MASK GENMASK(3, 0)
+#define OPCODE_SMALL_PACKAGE 0x1
+#define OPCODE_M2M 0x4
+#define LOCAL_IRQ_EN BIT(8)
+#define ATTR_SRC_MASK GENMASK(14, 12)
+ __le32 dw1;
+ __le32 dw2;
+#define ATTR_DST_MASK GENMASK(26, 24)
+ __le32 length;
+ __le64 src_addr;
+ __le64 dst_addr;
+};
+
+struct hisi_dma_cqe {
+ __le32 rsv0;
+ __le32 rsv1;
+ __le16 sq_head;
+ __le16 rsv2;
+ __le16 rsv3;
+ __le16 w0;
+#define STATUS_MASK GENMASK(15, 1)
+#define STATUS_SUCC 0x0
+#define VALID_BIT BIT(0)
+};
+
+struct hisi_dma_desc {
+ struct virt_dma_desc vd;
+ struct hisi_dma_sqe sqe;
+};
+
+struct hisi_dma_chan {
+ struct virt_dma_chan vc;
+ struct hisi_dma_dev *hdma_dev;
+ struct hisi_dma_sqe *sq;
+ struct hisi_dma_cqe *cq;
+ dma_addr_t sq_dma;
+ dma_addr_t cq_dma;
+ u32 sq_tail;
+ u32 cq_head;
+ u32 qp_num;
+ enum hisi_dma_chan_status status;
+ struct hisi_dma_desc *desc;
+};
+
+struct hisi_dma_dev {
+ struct pci_dev *pdev;
+ void __iomem *base;
+ struct dma_device dma_dev;
+ u32 chan_num;
+ u32 chan_depth;
+ struct hisi_dma_chan chan[];
+};
+
+static inline struct hisi_dma_chan *to_hisi_dma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct hisi_dma_chan, vc.chan);
+}
+
+static inline struct hisi_dma_desc *to_hisi_dma_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct hisi_dma_desc, vd);
+}
+
+static inline void hisi_dma_chan_write(void __iomem *base, u32 reg, u32 index,
+ u32 val)
+{
+ writel_relaxed(val, base + reg + index * HISI_DMA_OFFSET);
+}
+
+static inline void hisi_dma_update_bit(void __iomem *addr, u32 pos, bool val)
+{
+ u32 tmp;
+
+ tmp = readl_relaxed(addr);
+ tmp = val ? tmp | BIT(pos) : tmp & ~BIT(pos);
+ writel_relaxed(tmp, addr);
+}
+
+static void hisi_dma_free_irq_vectors(void *data)
+{
+ pci_free_irq_vectors(data);
+}
+
+static void hisi_dma_pause_dma(struct hisi_dma_dev *hdma_dev, u32 index,
+ bool pause)
+{
+ void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL0 + index *
+ HISI_DMA_OFFSET;
+
+ hisi_dma_update_bit(addr, HISI_DMA_CTRL0_QUEUE_PAUSE_S, pause);
+}
+
+static void hisi_dma_enable_dma(struct hisi_dma_dev *hdma_dev, u32 index,
+ bool enable)
+{
+ void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL0 + index *
+ HISI_DMA_OFFSET;
+
+ hisi_dma_update_bit(addr, HISI_DMA_CTRL0_QUEUE_EN_S, enable);
+}
+
+static void hisi_dma_mask_irq(struct hisi_dma_dev *hdma_dev, u32 qp_index)
+{
+ hisi_dma_chan_write(hdma_dev->base, HISI_DMA_INT_MSK, qp_index,
+ HISI_DMA_INT_STS_MASK);
+}
+
+static void hisi_dma_unmask_irq(struct hisi_dma_dev *hdma_dev, u32 qp_index)
+{
+ void __iomem *base = hdma_dev->base;
+
+ hisi_dma_chan_write(base, HISI_DMA_INT_STS, qp_index,
+ HISI_DMA_INT_STS_MASK);
+ hisi_dma_chan_write(base, HISI_DMA_INT_MSK, qp_index, 0);
+}
+
+static void hisi_dma_do_reset(struct hisi_dma_dev *hdma_dev, u32 index)
+{
+ void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL1 + index *
+ HISI_DMA_OFFSET;
+
+ hisi_dma_update_bit(addr, HISI_DMA_CTRL1_QUEUE_RESET_S, 1);
+}
+
+static void hisi_dma_reset_qp_point(struct hisi_dma_dev *hdma_dev, u32 index)
+{
+ hisi_dma_chan_write(hdma_dev->base, HISI_DMA_SQ_TAIL_PTR, index, 0);
+ hisi_dma_chan_write(hdma_dev->base, HISI_DMA_CQ_HEAD_PTR, index, 0);
+}
+
+static void hisi_dma_reset_hw_chan(struct hisi_dma_chan *chan)
+{
+ struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
+ u32 index = chan->qp_num, tmp;
+ int ret;
+
+ hisi_dma_pause_dma(hdma_dev, index, true);
+ hisi_dma_enable_dma(hdma_dev, index, false);
+ hisi_dma_mask_irq(hdma_dev, index);
+
+ ret = readl_relaxed_poll_timeout(hdma_dev->base +
+ HISI_DMA_Q_FSM_STS + index * HISI_DMA_OFFSET, tmp,
+ FIELD_GET(HISI_DMA_FSM_STS_MASK, tmp) != RUN, 10, 1000);
+ if (ret) {
+ dev_err(&hdma_dev->pdev->dev, "disable channel timeout!\n");
+ WARN_ON(1);
+ }
+
+ hisi_dma_do_reset(hdma_dev, index);
+ hisi_dma_reset_qp_point(hdma_dev, index);
+ hisi_dma_pause_dma(hdma_dev, index, false);
+ hisi_dma_enable_dma(hdma_dev, index, true);
+ hisi_dma_unmask_irq(hdma_dev, index);
+
+ ret = readl_relaxed_poll_timeout(hdma_dev->base +
+ HISI_DMA_Q_FSM_STS + index * HISI_DMA_OFFSET, tmp,
+ FIELD_GET(HISI_DMA_FSM_STS_MASK, tmp) == IDLE, 10, 1000);
+ if (ret) {
+ dev_err(&hdma_dev->pdev->dev, "reset channel timeout!\n");
+ WARN_ON(1);
+ }
+}
+
+static void hisi_dma_free_chan_resources(struct dma_chan *c)
+{
+ struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
+ struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
+
+ hisi_dma_reset_hw_chan(chan);
+ vchan_free_chan_resources(&chan->vc);
+
+ memset(chan->sq, 0, sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth);
+ memset(chan->cq, 0, sizeof(struct hisi_dma_cqe) * hdma_dev->chan_depth);
+ chan->sq_tail = 0;
+ chan->cq_head = 0;
+ chan->status = DISABLE;
+}
+
+static void hisi_dma_desc_free(struct virt_dma_desc *vd)
+{
+ kfree(to_hisi_dma_desc(vd));
+}
+
+static struct dma_async_tx_descriptor *
+hisi_dma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dst, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
+ struct hisi_dma_desc *desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->sqe.length = cpu_to_le32(len);
+ desc->sqe.src_addr = cpu_to_le64(src);
+ desc->sqe.dst_addr = cpu_to_le64(dst);
+
+ return vchan_tx_prep(&chan->vc, &desc->vd, flags);
+}
+
+static enum dma_status
+hisi_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ return dma_cookie_status(c, cookie, txstate);
+}
+
+static void hisi_dma_start_transfer(struct hisi_dma_chan *chan)
+{
+ struct hisi_dma_sqe *sqe = chan->sq + chan->sq_tail;
+ struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
+ struct hisi_dma_desc *desc;
+ struct virt_dma_desc *vd;
+
+ vd = vchan_next_desc(&chan->vc);
+ if (!vd) {
+ dev_err(&hdma_dev->pdev->dev, "no issued task!\n");
+ chan->desc = NULL;
+ return;
+ }
+ list_del(&vd->node);
+ desc = to_hisi_dma_desc(vd);
+ chan->desc = desc;
+
+ memcpy(sqe, &desc->sqe, sizeof(struct hisi_dma_sqe));
+
+ /* update other field in sqe */
+ sqe->dw0 = cpu_to_le32(FIELD_PREP(OPCODE_MASK, OPCODE_M2M));
+ sqe->dw0 |= cpu_to_le32(LOCAL_IRQ_EN);
+
+ /* make sure data has been updated in sqe */
+ wmb();
+
+ /* update sq tail, point to new sqe position */
+ chan->sq_tail = (chan->sq_tail + 1) % hdma_dev->chan_depth;
+
+ /* update sq_tail to trigger a new task */
+ hisi_dma_chan_write(hdma_dev->base, HISI_DMA_SQ_TAIL_PTR, chan->qp_num,
+ chan->sq_tail);
+}
+
+static void hisi_dma_issue_pending(struct dma_chan *c)
+{
+ struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+
+ if (vchan_issue_pending(&chan->vc))
+ hisi_dma_start_transfer(chan);
+
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+}
+
+static int hisi_dma_terminate_all(struct dma_chan *c)
+{
+ struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+
+ hisi_dma_pause_dma(chan->hdma_dev, chan->qp_num, true);
+ if (chan->desc) {
+ vchan_terminate_vdesc(&chan->desc->vd);
+ chan->desc = NULL;
+ }
+
+ vchan_get_all_descriptors(&chan->vc, &head);
+
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ vchan_dma_desc_free_list(&chan->vc, &head);
+ hisi_dma_pause_dma(chan->hdma_dev, chan->qp_num, false);
+
+ return 0;
+}
+
+static void hisi_dma_synchronize(struct dma_chan *c)
+{
+ struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
+
+ vchan_synchronize(&chan->vc);
+}
+
+static int hisi_dma_alloc_qps_mem(struct hisi_dma_dev *hdma_dev)
+{
+ size_t sq_size = sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth;
+ size_t cq_size = sizeof(struct hisi_dma_cqe) * hdma_dev->chan_depth;
+ struct device *dev = &hdma_dev->pdev->dev;
+ struct hisi_dma_chan *chan;
+ int i;
+
+ for (i = 0; i < hdma_dev->chan_num; i++) {
+ chan = &hdma_dev->chan[i];
+ chan->sq = dmam_alloc_coherent(dev, sq_size, &chan->sq_dma,
+ GFP_KERNEL);
+ if (!chan->sq)
+ return -ENOMEM;
+
+ chan->cq = dmam_alloc_coherent(dev, cq_size, &chan->cq_dma,
+ GFP_KERNEL);
+ if (!chan->cq)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void hisi_dma_init_hw_qp(struct hisi_dma_dev *hdma_dev, u32 index)
+{
+ struct hisi_dma_chan *chan = &hdma_dev->chan[index];
+ u32 hw_depth = hdma_dev->chan_depth - 1;
+ void __iomem *base = hdma_dev->base;
+
+ /* set sq, cq base */
+ hisi_dma_chan_write(base, HISI_DMA_SQ_BASE_L, index,
+ lower_32_bits(chan->sq_dma));
+ hisi_dma_chan_write(base, HISI_DMA_SQ_BASE_H, index,
+ upper_32_bits(chan->sq_dma));
+ hisi_dma_chan_write(base, HISI_DMA_CQ_BASE_L, index,
+ lower_32_bits(chan->cq_dma));
+ hisi_dma_chan_write(base, HISI_DMA_CQ_BASE_H, index,
+ upper_32_bits(chan->cq_dma));
+
+ /* set sq, cq depth */
+ hisi_dma_chan_write(base, HISI_DMA_SQ_DEPTH, index, hw_depth);
+ hisi_dma_chan_write(base, HISI_DMA_CQ_DEPTH, index, hw_depth);
+
+ /* init sq tail and cq head */
+ hisi_dma_chan_write(base, HISI_DMA_SQ_TAIL_PTR, index, 0);
+ hisi_dma_chan_write(base, HISI_DMA_CQ_HEAD_PTR, index, 0);
+}
+
+static void hisi_dma_enable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index)
+{
+ hisi_dma_init_hw_qp(hdma_dev, qp_index);
+ hisi_dma_unmask_irq(hdma_dev, qp_index);
+ hisi_dma_enable_dma(hdma_dev, qp_index, true);
+}
+
+static void hisi_dma_disable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index)
+{
+ hisi_dma_reset_hw_chan(&hdma_dev->chan[qp_index]);
+}
+
+static void hisi_dma_enable_qps(struct hisi_dma_dev *hdma_dev)
+{
+ int i;
+
+ for (i = 0; i < hdma_dev->chan_num; i++) {
+ hdma_dev->chan[i].qp_num = i;
+ hdma_dev->chan[i].hdma_dev = hdma_dev;
+ hdma_dev->chan[i].vc.desc_free = hisi_dma_desc_free;
+ vchan_init(&hdma_dev->chan[i].vc, &hdma_dev->dma_dev);
+ hisi_dma_enable_qp(hdma_dev, i);
+ }
+}
+
+static void hisi_dma_disable_qps(struct hisi_dma_dev *hdma_dev)
+{
+ int i;
+
+ for (i = 0; i < hdma_dev->chan_num; i++) {
+ hisi_dma_disable_qp(hdma_dev, i);
+ tasklet_kill(&hdma_dev->chan[i].vc.task);
+ }
+}
+
+static irqreturn_t hisi_dma_irq(int irq, void *data)
+{
+ struct hisi_dma_chan *chan = data;
+ struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
+ struct hisi_dma_desc *desc;
+ struct hisi_dma_cqe *cqe;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+
+ desc = chan->desc;
+ cqe = chan->cq + chan->cq_head;
+ if (desc) {
+ if (FIELD_GET(STATUS_MASK, cqe->w0) == STATUS_SUCC) {
+ chan->cq_head = (chan->cq_head + 1) %
+ hdma_dev->chan_depth;
+ hisi_dma_chan_write(hdma_dev->base,
+ HISI_DMA_CQ_HEAD_PTR, chan->qp_num,
+ chan->cq_head);
+ vchan_cookie_complete(&desc->vd);
+ } else {
+ dev_err(&hdma_dev->pdev->dev, "task error!\n");
+ }
+
+ chan->desc = NULL;
+ }
+
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int hisi_dma_request_qps_irq(struct hisi_dma_dev *hdma_dev)
+{
+ struct pci_dev *pdev = hdma_dev->pdev;
+ int i, ret;
+
+ for (i = 0; i < hdma_dev->chan_num; i++) {
+ ret = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i),
+ hisi_dma_irq, IRQF_SHARED, "hisi_dma",
+ &hdma_dev->chan[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* This function enables all hw channels in a device */
+static int hisi_dma_enable_hw_channels(struct hisi_dma_dev *hdma_dev)
+{
+ int ret;
+
+ ret = hisi_dma_alloc_qps_mem(hdma_dev);
+ if (ret) {
+ dev_err(&hdma_dev->pdev->dev, "fail to allocate qp memory!\n");
+ return ret;
+ }
+
+ ret = hisi_dma_request_qps_irq(hdma_dev);
+ if (ret) {
+ dev_err(&hdma_dev->pdev->dev, "fail to request qp irq!\n");
+ return ret;
+ }
+
+ hisi_dma_enable_qps(hdma_dev);
+
+ return 0;
+}
+
+static void hisi_dma_disable_hw_channels(void *data)
+{
+ hisi_dma_disable_qps(data);
+}
+
+static void hisi_dma_set_mode(struct hisi_dma_dev *hdma_dev,
+ enum hisi_dma_mode mode)
+{
+ writel_relaxed(mode == RC ? 1 : 0, hdma_dev->base + HISI_DMA_MODE);
+}
+
+static int hisi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct hisi_dma_dev *hdma_dev;
+ struct dma_device *dma_dev;
+ size_t dev_size;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ dev_err(dev, "failed to enable device mem!\n");
+ return ret;
+ }
+
+ ret = pcim_iomap_regions(pdev, 1 << PCI_BAR_2, pci_name(pdev));
+ if (ret) {
+ dev_err(dev, "failed to remap I/O region!\n");
+ return ret;
+ }
+
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (ret)
+ return ret;
+
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (ret)
+ return ret;
+
+ dev_size = sizeof(struct hisi_dma_chan) * HISI_DMA_CHAN_NUM +
+ sizeof(*hdma_dev);
+ hdma_dev = devm_kzalloc(dev, dev_size, GFP_KERNEL);
+ if (!hdma_dev)
+ return -EINVAL;
+
+ hdma_dev->base = pcim_iomap_table(pdev)[PCI_BAR_2];
+ hdma_dev->pdev = pdev;
+ hdma_dev->chan_num = HISI_DMA_CHAN_NUM;
+ hdma_dev->chan_depth = HISI_DMA_Q_DEPTH_VAL;
+
+ pci_set_drvdata(pdev, hdma_dev);
+ pci_set_master(pdev);
+
+ ret = pci_alloc_irq_vectors(pdev, HISI_DMA_MSI_NUM, HISI_DMA_MSI_NUM,
+ PCI_IRQ_MSI);
+ if (ret < 0) {
+ dev_err(dev, "Failed to allocate MSI vectors!\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev, hisi_dma_free_irq_vectors, pdev);
+ if (ret)
+ return ret;
+
+ dma_dev = &hdma_dev->dma_dev;
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+ dma_dev->device_free_chan_resources = hisi_dma_free_chan_resources;
+ dma_dev->device_prep_dma_memcpy = hisi_dma_prep_dma_memcpy;
+ dma_dev->device_tx_status = hisi_dma_tx_status;
+ dma_dev->device_issue_pending = hisi_dma_issue_pending;
+ dma_dev->device_terminate_all = hisi_dma_terminate_all;
+ dma_dev->device_synchronize = hisi_dma_synchronize;
+ dma_dev->directions = BIT(DMA_MEM_TO_MEM);
+ dma_dev->dev = dev;
+ INIT_LIST_HEAD(&dma_dev->channels);
+
+ hisi_dma_set_mode(hdma_dev, RC);
+
+ ret = hisi_dma_enable_hw_channels(hdma_dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable hw channel!\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev, hisi_dma_disable_hw_channels,
+ hdma_dev);
+ if (ret)
+ return ret;
+
+ ret = dmaenginem_async_device_register(dma_dev);
+ if (ret < 0)
+ dev_err(dev, "failed to register device!\n");
+
+ return ret;
+}
+
+static const struct pci_device_id hisi_dma_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa122) },
+ { 0, }
+};
+
+static struct pci_driver hisi_dma_pci_driver = {
+ .name = "hisi_dma",
+ .id_table = hisi_dma_pci_tbl,
+ .probe = hisi_dma_probe,
+};
+
+module_pci_driver(hisi_dma_pci_driver);
+
+MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
+MODULE_AUTHOR("Zhenfa Qiu <qiuzhenfa@hisilicon.com>");
+MODULE_DESCRIPTION("HiSilicon Kunpeng DMA controller driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, hisi_dma_pci_tbl);
diff --git a/drivers/dma/idxd/Makefile b/drivers/dma/idxd/Makefile
new file mode 100644
index 000000000000..8978b898d777
--- /dev/null
+++ b/drivers/dma/idxd/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_INTEL_IDXD) += idxd.o
+idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
new file mode 100644
index 000000000000..1d7347825b95
--- /dev/null
+++ b/drivers/dma/idxd/cdev.c
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/sched/task.h>
+#include <linux/intel-svm.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <uapi/linux/idxd.h>
+#include "registers.h"
+#include "idxd.h"
+
+struct idxd_cdev_context {
+ const char *name;
+ dev_t devt;
+ struct ida minor_ida;
+};
+
+/*
+ * ictx is an array based off of accelerator types. enum idxd_type
+ * is used as index
+ */
+static struct idxd_cdev_context ictx[IDXD_TYPE_MAX] = {
+ { .name = "dsa" },
+};
+
+struct idxd_user_context {
+ struct idxd_wq *wq;
+ struct task_struct *task;
+ unsigned int flags;
+};
+
+enum idxd_cdev_cleanup {
+ CDEV_NORMAL = 0,
+ CDEV_FAILED,
+};
+
+static void idxd_cdev_dev_release(struct device *dev)
+{
+ dev_dbg(dev, "releasing cdev device\n");
+ kfree(dev);
+}
+
+static struct device_type idxd_cdev_device_type = {
+ .name = "idxd_cdev",
+ .release = idxd_cdev_dev_release,
+};
+
+static inline struct idxd_cdev *inode_idxd_cdev(struct inode *inode)
+{
+ struct cdev *cdev = inode->i_cdev;
+
+ return container_of(cdev, struct idxd_cdev, cdev);
+}
+
+static inline struct idxd_wq *idxd_cdev_wq(struct idxd_cdev *idxd_cdev)
+{
+ return container_of(idxd_cdev, struct idxd_wq, idxd_cdev);
+}
+
+static inline struct idxd_wq *inode_wq(struct inode *inode)
+{
+ return idxd_cdev_wq(inode_idxd_cdev(inode));
+}
+
+static int idxd_cdev_open(struct inode *inode, struct file *filp)
+{
+ struct idxd_user_context *ctx;
+ struct idxd_device *idxd;
+ struct idxd_wq *wq;
+ struct device *dev;
+ struct idxd_cdev *idxd_cdev;
+
+ wq = inode_wq(inode);
+ idxd = wq->idxd;
+ dev = &idxd->pdev->dev;
+ idxd_cdev = &wq->idxd_cdev;
+
+ dev_dbg(dev, "%s called\n", __func__);
+
+ if (idxd_wq_refcount(wq) > 1 && wq_dedicated(wq))
+ return -EBUSY;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->wq = wq;
+ filp->private_data = ctx;
+ idxd_wq_get(wq);
+ return 0;
+}
+
+static int idxd_cdev_release(struct inode *node, struct file *filep)
+{
+ struct idxd_user_context *ctx = filep->private_data;
+ struct idxd_wq *wq = ctx->wq;
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+
+ dev_dbg(dev, "%s called\n", __func__);
+ filep->private_data = NULL;
+
+ kfree(ctx);
+ idxd_wq_put(wq);
+ return 0;
+}
+
+static int check_vma(struct idxd_wq *wq, struct vm_area_struct *vma,
+ const char *func)
+{
+ struct device *dev = &wq->idxd->pdev->dev;
+
+ if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
+ dev_info_ratelimited(dev,
+ "%s: %s: mapping too large: %lu\n",
+ current->comm, func,
+ vma->vm_end - vma->vm_start);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct idxd_user_context *ctx = filp->private_data;
+ struct idxd_wq *wq = ctx->wq;
+ struct idxd_device *idxd = wq->idxd;
+ struct pci_dev *pdev = idxd->pdev;
+ phys_addr_t base = pci_resource_start(pdev, IDXD_WQ_BAR);
+ unsigned long pfn;
+ int rc;
+
+ dev_dbg(&pdev->dev, "%s called\n", __func__);
+ rc = check_vma(wq, vma, __func__);
+
+ vma->vm_flags |= VM_DONTCOPY;
+ pfn = (base + idxd_get_wq_portal_full_offset(wq->id,
+ IDXD_PORTAL_LIMITED)) >> PAGE_SHIFT;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_private_data = ctx;
+
+ return io_remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE,
+ vma->vm_page_prot);
+}
+
+static __poll_t idxd_cdev_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ struct idxd_user_context *ctx = filp->private_data;
+ struct idxd_wq *wq = ctx->wq;
+ struct idxd_device *idxd = wq->idxd;
+ struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
+ unsigned long flags;
+ __poll_t out = 0;
+
+ poll_wait(filp, &idxd_cdev->err_queue, wait);
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ if (idxd->sw_err.valid)
+ out = EPOLLIN | EPOLLRDNORM;
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+
+ return out;
+}
+
+static const struct file_operations idxd_cdev_fops = {
+ .owner = THIS_MODULE,
+ .open = idxd_cdev_open,
+ .release = idxd_cdev_release,
+ .mmap = idxd_cdev_mmap,
+ .poll = idxd_cdev_poll,
+};
+
+int idxd_cdev_get_major(struct idxd_device *idxd)
+{
+ return MAJOR(ictx[idxd->type].devt);
+}
+
+static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
+ struct idxd_cdev_context *cdev_ctx;
+ struct device *dev;
+ int minor, rc;
+
+ idxd_cdev->dev = kzalloc(sizeof(*idxd_cdev->dev), GFP_KERNEL);
+ if (!idxd_cdev->dev)
+ return -ENOMEM;
+
+ dev = idxd_cdev->dev;
+ dev->parent = &idxd->pdev->dev;
+ dev_set_name(dev, "%s/wq%u.%u", idxd_get_dev_name(idxd),
+ idxd->id, wq->id);
+ dev->bus = idxd_get_bus_type(idxd);
+
+ cdev_ctx = &ictx[wq->idxd->type];
+ minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
+ if (minor < 0) {
+ rc = minor;
+ goto ida_err;
+ }
+
+ dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
+ dev->type = &idxd_cdev_device_type;
+ rc = device_register(dev);
+ if (rc < 0) {
+ dev_err(&idxd->pdev->dev, "device register failed\n");
+ put_device(dev);
+ goto dev_reg_err;
+ }
+ idxd_cdev->minor = minor;
+
+ return 0;
+
+ dev_reg_err:
+ ida_simple_remove(&cdev_ctx->minor_ida, MINOR(dev->devt));
+ ida_err:
+ kfree(dev);
+ idxd_cdev->dev = NULL;
+ return rc;
+}
+
+static void idxd_wq_cdev_cleanup(struct idxd_wq *wq,
+ enum idxd_cdev_cleanup cdev_state)
+{
+ struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
+ struct idxd_cdev_context *cdev_ctx;
+
+ cdev_ctx = &ictx[wq->idxd->type];
+ if (cdev_state == CDEV_NORMAL)
+ cdev_del(&idxd_cdev->cdev);
+ device_unregister(idxd_cdev->dev);
+ /*
+ * The device_type->release() will be called on the device and free
+ * the allocated struct device. We can just forget it.
+ */
+ ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
+ idxd_cdev->dev = NULL;
+ idxd_cdev->minor = -1;
+}
+
+int idxd_wq_add_cdev(struct idxd_wq *wq)
+{
+ struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
+ struct cdev *cdev = &idxd_cdev->cdev;
+ struct device *dev;
+ int rc;
+
+ rc = idxd_wq_cdev_dev_setup(wq);
+ if (rc < 0)
+ return rc;
+
+ dev = idxd_cdev->dev;
+ cdev_init(cdev, &idxd_cdev_fops);
+ cdev_set_parent(cdev, &dev->kobj);
+ rc = cdev_add(cdev, dev->devt, 1);
+ if (rc) {
+ dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc);
+ idxd_wq_cdev_cleanup(wq, CDEV_FAILED);
+ return rc;
+ }
+
+ init_waitqueue_head(&idxd_cdev->err_queue);
+ return 0;
+}
+
+void idxd_wq_del_cdev(struct idxd_wq *wq)
+{
+ idxd_wq_cdev_cleanup(wq, CDEV_NORMAL);
+}
+
+int idxd_cdev_register(void)
+{
+ int rc, i;
+
+ for (i = 0; i < IDXD_TYPE_MAX; i++) {
+ ida_init(&ictx[i].minor_ida);
+ rc = alloc_chrdev_region(&ictx[i].devt, 0, MINORMASK,
+ ictx[i].name);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+void idxd_cdev_remove(void)
+{
+ int i;
+
+ for (i = 0; i < IDXD_TYPE_MAX; i++) {
+ unregister_chrdev_region(ictx[i].devt, MINORMASK);
+ ida_destroy(&ictx[i].minor_ida);
+ }
+}
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
new file mode 100644
index 000000000000..ada69e722f84
--- /dev/null
+++ b/drivers/dma/idxd/device.c
@@ -0,0 +1,693 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/dmaengine.h>
+#include <uapi/linux/idxd.h>
+#include "../dmaengine.h"
+#include "idxd.h"
+#include "registers.h"
+
+static int idxd_cmd_wait(struct idxd_device *idxd, u32 *status, int timeout);
+static int idxd_cmd_send(struct idxd_device *idxd, int cmd_code, u32 operand);
+
+/* Interrupt control bits */
+int idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
+{
+ struct pci_dev *pdev = idxd->pdev;
+ int msixcnt = pci_msix_vec_count(pdev);
+ union msix_perm perm;
+ u32 offset;
+
+ if (vec_id < 0 || vec_id >= msixcnt)
+ return -EINVAL;
+
+ offset = idxd->msix_perm_offset + vec_id * 8;
+ perm.bits = ioread32(idxd->reg_base + offset);
+ perm.ignore = 1;
+ iowrite32(perm.bits, idxd->reg_base + offset);
+
+ return 0;
+}
+
+void idxd_mask_msix_vectors(struct idxd_device *idxd)
+{
+ struct pci_dev *pdev = idxd->pdev;
+ int msixcnt = pci_msix_vec_count(pdev);
+ int i, rc;
+
+ for (i = 0; i < msixcnt; i++) {
+ rc = idxd_mask_msix_vector(idxd, i);
+ if (rc < 0)
+ dev_warn(&pdev->dev,
+ "Failed disabling msix vec %d\n", i);
+ }
+}
+
+int idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
+{
+ struct pci_dev *pdev = idxd->pdev;
+ int msixcnt = pci_msix_vec_count(pdev);
+ union msix_perm perm;
+ u32 offset;
+
+ if (vec_id < 0 || vec_id >= msixcnt)
+ return -EINVAL;
+
+ offset = idxd->msix_perm_offset + vec_id * 8;
+ perm.bits = ioread32(idxd->reg_base + offset);
+ perm.ignore = 0;
+ iowrite32(perm.bits, idxd->reg_base + offset);
+
+ return 0;
+}
+
+void idxd_unmask_error_interrupts(struct idxd_device *idxd)
+{
+ union genctrl_reg genctrl;
+
+ genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
+ genctrl.softerr_int_en = 1;
+ iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
+}
+
+void idxd_mask_error_interrupts(struct idxd_device *idxd)
+{
+ union genctrl_reg genctrl;
+
+ genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
+ genctrl.softerr_int_en = 0;
+ iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
+}
+
+static void free_hw_descs(struct idxd_wq *wq)
+{
+ int i;
+
+ for (i = 0; i < wq->num_descs; i++)
+ kfree(wq->hw_descs[i]);
+
+ kfree(wq->hw_descs);
+}
+
+static int alloc_hw_descs(struct idxd_wq *wq, int num)
+{
+ struct device *dev = &wq->idxd->pdev->dev;
+ int i;
+ int node = dev_to_node(dev);
+
+ wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *),
+ GFP_KERNEL, node);
+ if (!wq->hw_descs)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++) {
+ wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]),
+ GFP_KERNEL, node);
+ if (!wq->hw_descs[i]) {
+ free_hw_descs(wq);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static void free_descs(struct idxd_wq *wq)
+{
+ int i;
+
+ for (i = 0; i < wq->num_descs; i++)
+ kfree(wq->descs[i]);
+
+ kfree(wq->descs);
+}
+
+static int alloc_descs(struct idxd_wq *wq, int num)
+{
+ struct device *dev = &wq->idxd->pdev->dev;
+ int i;
+ int node = dev_to_node(dev);
+
+ wq->descs = kcalloc_node(num, sizeof(struct idxd_desc *),
+ GFP_KERNEL, node);
+ if (!wq->descs)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++) {
+ wq->descs[i] = kzalloc_node(sizeof(*wq->descs[i]),
+ GFP_KERNEL, node);
+ if (!wq->descs[i]) {
+ free_descs(wq);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+/* WQ control bits */
+int idxd_wq_alloc_resources(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct idxd_group *group = wq->group;
+ struct device *dev = &idxd->pdev->dev;
+ int rc, num_descs, i;
+
+ if (wq->type != IDXD_WQT_KERNEL)
+ return 0;
+
+ num_descs = wq->size +
+ idxd->hw.gen_cap.max_descs_per_engine * group->num_engines;
+ wq->num_descs = num_descs;
+
+ rc = alloc_hw_descs(wq, num_descs);
+ if (rc < 0)
+ return rc;
+
+ wq->compls_size = num_descs * sizeof(struct dsa_completion_record);
+ wq->compls = dma_alloc_coherent(dev, wq->compls_size,
+ &wq->compls_addr, GFP_KERNEL);
+ if (!wq->compls) {
+ rc = -ENOMEM;
+ goto fail_alloc_compls;
+ }
+
+ rc = alloc_descs(wq, num_descs);
+ if (rc < 0)
+ goto fail_alloc_descs;
+
+ rc = sbitmap_init_node(&wq->sbmap, num_descs, -1, GFP_KERNEL,
+ dev_to_node(dev));
+ if (rc < 0)
+ goto fail_sbitmap_init;
+
+ for (i = 0; i < num_descs; i++) {
+ struct idxd_desc *desc = wq->descs[i];
+
+ desc->hw = wq->hw_descs[i];
+ desc->completion = &wq->compls[i];
+ desc->compl_dma = wq->compls_addr +
+ sizeof(struct dsa_completion_record) * i;
+ desc->id = i;
+ desc->wq = wq;
+
+ dma_async_tx_descriptor_init(&desc->txd, &wq->dma_chan);
+ desc->txd.tx_submit = idxd_dma_tx_submit;
+ }
+
+ return 0;
+
+ fail_sbitmap_init:
+ free_descs(wq);
+ fail_alloc_descs:
+ dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
+ fail_alloc_compls:
+ free_hw_descs(wq);
+ return rc;
+}
+
+void idxd_wq_free_resources(struct idxd_wq *wq)
+{
+ struct device *dev = &wq->idxd->pdev->dev;
+
+ if (wq->type != IDXD_WQT_KERNEL)
+ return;
+
+ free_hw_descs(wq);
+ free_descs(wq);
+ dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
+ sbitmap_free(&wq->sbmap);
+}
+
+int idxd_wq_enable(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ u32 status;
+ int rc;
+
+ lockdep_assert_held(&idxd->dev_lock);
+
+ if (wq->state == IDXD_WQ_ENABLED) {
+ dev_dbg(dev, "WQ %d already enabled\n", wq->id);
+ return -ENXIO;
+ }
+
+ rc = idxd_cmd_send(idxd, IDXD_CMD_ENABLE_WQ, wq->id);
+ if (rc < 0)
+ return rc;
+ rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ if (status != IDXD_CMDSTS_SUCCESS &&
+ status != IDXD_CMDSTS_ERR_WQ_ENABLED) {
+ dev_dbg(dev, "WQ enable failed: %#x\n", status);
+ return -ENXIO;
+ }
+
+ wq->state = IDXD_WQ_ENABLED;
+ dev_dbg(dev, "WQ %d enabled\n", wq->id);
+ return 0;
+}
+
+int idxd_wq_disable(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ u32 status, operand;
+ int rc;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ dev_dbg(dev, "Disabling WQ %d\n", wq->id);
+
+ if (wq->state != IDXD_WQ_ENABLED) {
+ dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
+ return 0;
+ }
+
+ operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
+ rc = idxd_cmd_send(idxd, IDXD_CMD_DISABLE_WQ, operand);
+ if (rc < 0)
+ return rc;
+ rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ if (status != IDXD_CMDSTS_SUCCESS) {
+ dev_dbg(dev, "WQ disable failed: %#x\n", status);
+ return -ENXIO;
+ }
+
+ wq->state = IDXD_WQ_DISABLED;
+ dev_dbg(dev, "WQ %d disabled\n", wq->id);
+ return 0;
+}
+
+int idxd_wq_map_portal(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct pci_dev *pdev = idxd->pdev;
+ struct device *dev = &pdev->dev;
+ resource_size_t start;
+
+ start = pci_resource_start(pdev, IDXD_WQ_BAR);
+ start = start + wq->id * IDXD_PORTAL_SIZE;
+
+ wq->dportal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE);
+ if (!wq->dportal)
+ return -ENOMEM;
+ dev_dbg(dev, "wq %d portal mapped at %p\n", wq->id, wq->dportal);
+
+ return 0;
+}
+
+void idxd_wq_unmap_portal(struct idxd_wq *wq)
+{
+ struct device *dev = &wq->idxd->pdev->dev;
+
+ devm_iounmap(dev, wq->dportal);
+}
+
+/* Device control bits */
+static inline bool idxd_is_enabled(struct idxd_device *idxd)
+{
+ union gensts_reg gensts;
+
+ gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
+
+ if (gensts.state == IDXD_DEVICE_STATE_ENABLED)
+ return true;
+ return false;
+}
+
+static int idxd_cmd_wait(struct idxd_device *idxd, u32 *status, int timeout)
+{
+ u32 sts, to = timeout;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ sts = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
+ while (sts & IDXD_CMDSTS_ACTIVE && --to) {
+ cpu_relax();
+ sts = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
+ }
+
+ if (to == 0 && sts & IDXD_CMDSTS_ACTIVE) {
+ dev_warn(&idxd->pdev->dev, "%s timed out!\n", __func__);
+ *status = 0;
+ return -EBUSY;
+ }
+
+ *status = sts;
+ return 0;
+}
+
+static int idxd_cmd_send(struct idxd_device *idxd, int cmd_code, u32 operand)
+{
+ union idxd_command_reg cmd;
+ int rc;
+ u32 status;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd = cmd_code;
+ cmd.operand = operand;
+ dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n",
+ __func__, cmd_code, operand);
+ iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
+
+ return 0;
+}
+
+int idxd_device_enable(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int rc;
+ u32 status;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ if (idxd_is_enabled(idxd)) {
+ dev_dbg(dev, "Device already enabled\n");
+ return -ENXIO;
+ }
+
+ rc = idxd_cmd_send(idxd, IDXD_CMD_ENABLE_DEVICE, 0);
+ if (rc < 0)
+ return rc;
+ rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ /* If the command is successful or if the device was enabled */
+ if (status != IDXD_CMDSTS_SUCCESS &&
+ status != IDXD_CMDSTS_ERR_DEV_ENABLED) {
+ dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
+ return -ENXIO;
+ }
+
+ idxd->state = IDXD_DEV_ENABLED;
+ return 0;
+}
+
+int idxd_device_disable(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int rc;
+ u32 status;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ if (!idxd_is_enabled(idxd)) {
+ dev_dbg(dev, "Device is not enabled\n");
+ return 0;
+ }
+
+ rc = idxd_cmd_send(idxd, IDXD_CMD_DISABLE_DEVICE, 0);
+ if (rc < 0)
+ return rc;
+ rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ /* If the command is successful or if the device was disabled */
+ if (status != IDXD_CMDSTS_SUCCESS &&
+ !(status & IDXD_CMDSTS_ERR_DIS_DEV_EN)) {
+ dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
+ rc = -ENXIO;
+ return rc;
+ }
+
+ idxd->state = IDXD_DEV_CONF_READY;
+ return 0;
+}
+
+int __idxd_device_reset(struct idxd_device *idxd)
+{
+ u32 status;
+ int rc;
+
+ rc = idxd_cmd_send(idxd, IDXD_CMD_RESET_DEVICE, 0);
+ if (rc < 0)
+ return rc;
+ rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+int idxd_device_reset(struct idxd_device *idxd)
+{
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ rc = __idxd_device_reset(idxd);
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ return rc;
+}
+
+/* Device configuration bits */
+static void idxd_group_config_write(struct idxd_group *group)
+{
+ struct idxd_device *idxd = group->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ int i;
+ u32 grpcfg_offset;
+
+ dev_dbg(dev, "Writing group %d cfg registers\n", group->id);
+
+ /* setup GRPWQCFG */
+ for (i = 0; i < 4; i++) {
+ grpcfg_offset = idxd->grpcfg_offset +
+ group->id * 64 + i * sizeof(u64);
+ iowrite64(group->grpcfg.wqs[i],
+ idxd->reg_base + grpcfg_offset);
+ dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
+ group->id, i, grpcfg_offset,
+ ioread64(idxd->reg_base + grpcfg_offset));
+ }
+
+ /* setup GRPENGCFG */
+ grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 32;
+ iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset);
+ dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
+ grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset));
+
+ /* setup GRPFLAGS */
+ grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 40;
+ iowrite32(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset);
+ dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n",
+ group->id, grpcfg_offset,
+ ioread32(idxd->reg_base + grpcfg_offset));
+}
+
+static int idxd_groups_config_write(struct idxd_device *idxd)
+
+{
+ union gencfg_reg reg;
+ int i;
+ struct device *dev = &idxd->pdev->dev;
+
+ /* Setup bandwidth token limit */
+ if (idxd->token_limit) {
+ reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
+ reg.token_limit = idxd->token_limit;
+ iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
+ }
+
+ dev_dbg(dev, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET,
+ ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ struct idxd_group *group = &idxd->groups[i];
+
+ idxd_group_config_write(group);
+ }
+
+ return 0;
+}
+
+static int idxd_wq_config_write(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ u32 wq_offset;
+ int i;
+
+ if (!wq->group)
+ return 0;
+
+ memset(&wq->wqcfg, 0, sizeof(union wqcfg));
+
+ /* byte 0-3 */
+ wq->wqcfg.wq_size = wq->size;
+
+ if (wq->size == 0) {
+ dev_warn(dev, "Incorrect work queue size: 0\n");
+ return -EINVAL;
+ }
+
+ /* bytes 4-7 */
+ wq->wqcfg.wq_thresh = wq->threshold;
+
+ /* byte 8-11 */
+ wq->wqcfg.priv = !!(wq->type == IDXD_WQT_KERNEL);
+ wq->wqcfg.mode = 1;
+
+ wq->wqcfg.priority = wq->priority;
+
+ /* bytes 12-15 */
+ wq->wqcfg.max_xfer_shift = idxd->hw.gen_cap.max_xfer_shift;
+ wq->wqcfg.max_batch_shift = idxd->hw.gen_cap.max_batch_shift;
+
+ dev_dbg(dev, "WQ %d CFGs\n", wq->id);
+ for (i = 0; i < 8; i++) {
+ wq_offset = idxd->wqcfg_offset + wq->id * 32 + i * sizeof(u32);
+ iowrite32(wq->wqcfg.bits[i], idxd->reg_base + wq_offset);
+ dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
+ wq->id, i, wq_offset,
+ ioread32(idxd->reg_base + wq_offset));
+ }
+
+ return 0;
+}
+
+static int idxd_wqs_config_write(struct idxd_device *idxd)
+{
+ int i, rc;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ rc = idxd_wq_config_write(wq);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static void idxd_group_flags_setup(struct idxd_device *idxd)
+{
+ int i;
+
+ /* TC-A 0 and TC-B 1 should be defaults */
+ for (i = 0; i < idxd->max_groups; i++) {
+ struct idxd_group *group = &idxd->groups[i];
+
+ if (group->tc_a == -1)
+ group->grpcfg.flags.tc_a = 0;
+ else
+ group->grpcfg.flags.tc_a = group->tc_a;
+ if (group->tc_b == -1)
+ group->grpcfg.flags.tc_b = 1;
+ else
+ group->grpcfg.flags.tc_b = group->tc_b;
+ group->grpcfg.flags.use_token_limit = group->use_token_limit;
+ group->grpcfg.flags.tokens_reserved = group->tokens_reserved;
+ if (group->tokens_allowed)
+ group->grpcfg.flags.tokens_allowed =
+ group->tokens_allowed;
+ else
+ group->grpcfg.flags.tokens_allowed = idxd->max_tokens;
+ }
+}
+
+static int idxd_engines_setup(struct idxd_device *idxd)
+{
+ int i, engines = 0;
+ struct idxd_engine *eng;
+ struct idxd_group *group;
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ group = &idxd->groups[i];
+ group->grpcfg.engines = 0;
+ }
+
+ for (i = 0; i < idxd->max_engines; i++) {
+ eng = &idxd->engines[i];
+ group = eng->group;
+
+ if (!group)
+ continue;
+
+ group->grpcfg.engines |= BIT(eng->id);
+ engines++;
+ }
+
+ if (!engines)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int idxd_wqs_setup(struct idxd_device *idxd)
+{
+ struct idxd_wq *wq;
+ struct idxd_group *group;
+ int i, j, configured = 0;
+ struct device *dev = &idxd->pdev->dev;
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ group = &idxd->groups[i];
+ for (j = 0; j < 4; j++)
+ group->grpcfg.wqs[j] = 0;
+ }
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ wq = &idxd->wqs[i];
+ group = wq->group;
+
+ if (!wq->group)
+ continue;
+ if (!wq->size)
+ continue;
+
+ if (!wq_dedicated(wq)) {
+ dev_warn(dev, "No shared workqueue support.\n");
+ return -EINVAL;
+ }
+
+ group->grpcfg.wqs[wq->id / 64] |= BIT(wq->id % 64);
+ configured++;
+ }
+
+ if (configured == 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+int idxd_device_config(struct idxd_device *idxd)
+{
+ int rc;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ rc = idxd_wqs_setup(idxd);
+ if (rc < 0)
+ return rc;
+
+ rc = idxd_engines_setup(idxd);
+ if (rc < 0)
+ return rc;
+
+ idxd_group_flags_setup(idxd);
+
+ rc = idxd_wqs_config_write(idxd);
+ if (rc < 0)
+ return rc;
+
+ rc = idxd_groups_config_write(idxd);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
new file mode 100644
index 000000000000..c64c1429d160
--- /dev/null
+++ b/drivers/dma/idxd/dma.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/dmaengine.h>
+#include <uapi/linux/idxd.h>
+#include "../dmaengine.h"
+#include "registers.h"
+#include "idxd.h"
+
+static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c)
+{
+ return container_of(c, struct idxd_wq, dma_chan);
+}
+
+void idxd_dma_complete_txd(struct idxd_desc *desc,
+ enum idxd_complete_type comp_type)
+{
+ struct dma_async_tx_descriptor *tx;
+ struct dmaengine_result res;
+ int complete = 1;
+
+ if (desc->completion->status == DSA_COMP_SUCCESS)
+ res.result = DMA_TRANS_NOERROR;
+ else if (desc->completion->status)
+ res.result = DMA_TRANS_WRITE_FAILED;
+ else if (comp_type == IDXD_COMPLETE_ABORT)
+ res.result = DMA_TRANS_ABORTED;
+ else
+ complete = 0;
+
+ tx = &desc->txd;
+ if (complete && tx->cookie) {
+ dma_cookie_complete(tx);
+ dma_descriptor_unmap(tx);
+ dmaengine_desc_get_callback_invoke(tx, &res);
+ tx->callback = NULL;
+ tx->callback_result = NULL;
+ }
+}
+
+static void op_flag_setup(unsigned long flags, u32 *desc_flags)
+{
+ *desc_flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR;
+ if (flags & DMA_PREP_INTERRUPT)
+ *desc_flags |= IDXD_OP_FLAG_RCI;
+}
+
+static inline void set_completion_address(struct idxd_desc *desc,
+ u64 *compl_addr)
+{
+ *compl_addr = desc->compl_dma;
+}
+
+static inline void idxd_prep_desc_common(struct idxd_wq *wq,
+ struct dsa_hw_desc *hw, char opcode,
+ u64 addr_f1, u64 addr_f2, u64 len,
+ u64 compl, u32 flags)
+{
+ struct idxd_device *idxd = wq->idxd;
+
+ hw->flags = flags;
+ hw->opcode = opcode;
+ hw->src_addr = addr_f1;
+ hw->dst_addr = addr_f2;
+ hw->xfer_size = len;
+ hw->priv = !!(wq->type == IDXD_WQT_KERNEL);
+ hw->completion_addr = compl;
+
+ /*
+ * Descriptor completion vectors are 1-8 for MSIX. We will round
+ * robin through the 8 vectors.
+ */
+ wq->vec_ptr = (wq->vec_ptr % idxd->num_wq_irqs) + 1;
+ hw->int_handle = wq->vec_ptr;
+}
+
+static struct dma_async_tx_descriptor *
+idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
+ dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+ struct idxd_wq *wq = to_idxd_wq(c);
+ u32 desc_flags;
+ struct idxd_device *idxd = wq->idxd;
+ struct idxd_desc *desc;
+
+ if (wq->state != IDXD_WQ_ENABLED)
+ return NULL;
+
+ if (len > idxd->max_xfer_bytes)
+ return NULL;
+
+ op_flag_setup(flags, &desc_flags);
+ desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
+ if (IS_ERR(desc))
+ return NULL;
+
+ idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_MEMMOVE,
+ dma_src, dma_dest, len, desc->compl_dma,
+ desc_flags);
+
+ desc->txd.flags = flags;
+
+ return &desc->txd;
+}
+
+static int idxd_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct idxd_wq *wq = to_idxd_wq(chan);
+ struct device *dev = &wq->idxd->pdev->dev;
+
+ idxd_wq_get(wq);
+ dev_dbg(dev, "%s: client_count: %d\n", __func__,
+ idxd_wq_refcount(wq));
+ return 0;
+}
+
+static void idxd_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct idxd_wq *wq = to_idxd_wq(chan);
+ struct device *dev = &wq->idxd->pdev->dev;
+
+ idxd_wq_put(wq);
+ dev_dbg(dev, "%s: client_count: %d\n", __func__,
+ idxd_wq_refcount(wq));
+}
+
+static enum dma_status idxd_dma_tx_status(struct dma_chan *dma_chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ return dma_cookie_status(dma_chan, cookie, txstate);
+}
+
+/*
+ * issue_pending() does not need to do anything since tx_submit() does the job
+ * already.
+ */
+static void idxd_dma_issue_pending(struct dma_chan *dma_chan)
+{
+}
+
+dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct dma_chan *c = tx->chan;
+ struct idxd_wq *wq = to_idxd_wq(c);
+ dma_cookie_t cookie;
+ int rc;
+ struct idxd_desc *desc = container_of(tx, struct idxd_desc, txd);
+
+ cookie = dma_cookie_assign(tx);
+
+ rc = idxd_submit_desc(wq, desc);
+ if (rc < 0) {
+ idxd_free_desc(wq, desc);
+ return rc;
+ }
+
+ return cookie;
+}
+
+static void idxd_dma_release(struct dma_device *device)
+{
+}
+
+int idxd_register_dma_device(struct idxd_device *idxd)
+{
+ struct dma_device *dma = &idxd->dma_dev;
+
+ INIT_LIST_HEAD(&dma->channels);
+ dma->dev = &idxd->pdev->dev;
+
+ dma->device_release = idxd_dma_release;
+
+ if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) {
+ dma_cap_set(DMA_MEMCPY, dma->cap_mask);
+ dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy;
+ }
+
+ dma->device_tx_status = idxd_dma_tx_status;
+ dma->device_issue_pending = idxd_dma_issue_pending;
+ dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources;
+ dma->device_free_chan_resources = idxd_dma_free_chan_resources;
+
+ return dma_async_device_register(&idxd->dma_dev);
+}
+
+void idxd_unregister_dma_device(struct idxd_device *idxd)
+{
+ dma_async_device_unregister(&idxd->dma_dev);
+}
+
+int idxd_register_dma_channel(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct dma_device *dma = &idxd->dma_dev;
+ struct dma_chan *chan = &wq->dma_chan;
+ int rc;
+
+ memset(&wq->dma_chan, 0, sizeof(struct dma_chan));
+ chan->device = dma;
+ list_add_tail(&chan->device_node, &dma->channels);
+ rc = dma_async_device_channel_register(dma, chan);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+void idxd_unregister_dma_channel(struct idxd_wq *wq)
+{
+ dma_async_device_channel_unregister(&wq->idxd->dma_dev, &wq->dma_chan);
+}
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
new file mode 100644
index 000000000000..b8f8a363b4a7
--- /dev/null
+++ b/drivers/dma/idxd/idxd.h
@@ -0,0 +1,316 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#ifndef _IDXD_H_
+#define _IDXD_H_
+
+#include <linux/sbitmap.h>
+#include <linux/dmaengine.h>
+#include <linux/percpu-rwsem.h>
+#include <linux/wait.h>
+#include <linux/cdev.h>
+#include "registers.h"
+
+#define IDXD_DRIVER_VERSION "1.00"
+
+extern struct kmem_cache *idxd_desc_pool;
+
+#define IDXD_REG_TIMEOUT 50
+#define IDXD_DRAIN_TIMEOUT 5000
+
+enum idxd_type {
+ IDXD_TYPE_UNKNOWN = -1,
+ IDXD_TYPE_DSA = 0,
+ IDXD_TYPE_MAX
+};
+
+#define IDXD_NAME_SIZE 128
+
+struct idxd_device_driver {
+ struct device_driver drv;
+};
+
+struct idxd_irq_entry {
+ struct idxd_device *idxd;
+ int id;
+ struct llist_head pending_llist;
+ struct list_head work_list;
+};
+
+struct idxd_group {
+ struct device conf_dev;
+ struct idxd_device *idxd;
+ struct grpcfg grpcfg;
+ int id;
+ int num_engines;
+ int num_wqs;
+ bool use_token_limit;
+ u8 tokens_allowed;
+ u8 tokens_reserved;
+ int tc_a;
+ int tc_b;
+};
+
+#define IDXD_MAX_PRIORITY 0xf
+
+enum idxd_wq_state {
+ IDXD_WQ_DISABLED = 0,
+ IDXD_WQ_ENABLED,
+};
+
+enum idxd_wq_flag {
+ WQ_FLAG_DEDICATED = 0,
+};
+
+enum idxd_wq_type {
+ IDXD_WQT_NONE = 0,
+ IDXD_WQT_KERNEL,
+ IDXD_WQT_USER,
+};
+
+struct idxd_cdev {
+ struct cdev cdev;
+ struct device *dev;
+ int minor;
+ struct wait_queue_head err_queue;
+};
+
+#define IDXD_ALLOCATED_BATCH_SIZE 128U
+#define WQ_NAME_SIZE 1024
+#define WQ_TYPE_SIZE 10
+
+enum idxd_op_type {
+ IDXD_OP_BLOCK = 0,
+ IDXD_OP_NONBLOCK = 1,
+};
+
+enum idxd_complete_type {
+ IDXD_COMPLETE_NORMAL = 0,
+ IDXD_COMPLETE_ABORT,
+};
+
+struct idxd_wq {
+ void __iomem *dportal;
+ struct device conf_dev;
+ struct idxd_cdev idxd_cdev;
+ struct idxd_device *idxd;
+ int id;
+ enum idxd_wq_type type;
+ struct idxd_group *group;
+ int client_count;
+ struct mutex wq_lock; /* mutex for workqueue */
+ u32 size;
+ u32 threshold;
+ u32 priority;
+ enum idxd_wq_state state;
+ unsigned long flags;
+ union wqcfg wqcfg;
+ atomic_t dq_count; /* dedicated queue flow control */
+ u32 vec_ptr; /* interrupt steering */
+ struct dsa_hw_desc **hw_descs;
+ int num_descs;
+ struct dsa_completion_record *compls;
+ dma_addr_t compls_addr;
+ int compls_size;
+ struct idxd_desc **descs;
+ struct sbitmap sbmap;
+ struct dma_chan dma_chan;
+ struct percpu_rw_semaphore submit_lock;
+ wait_queue_head_t submit_waitq;
+ char name[WQ_NAME_SIZE + 1];
+};
+
+struct idxd_engine {
+ struct device conf_dev;
+ int id;
+ struct idxd_group *group;
+ struct idxd_device *idxd;
+};
+
+/* shadow registers */
+struct idxd_hw {
+ u32 version;
+ union gen_cap_reg gen_cap;
+ union wq_cap_reg wq_cap;
+ union group_cap_reg group_cap;
+ union engine_cap_reg engine_cap;
+ struct opcap opcap;
+};
+
+enum idxd_device_state {
+ IDXD_DEV_HALTED = -1,
+ IDXD_DEV_DISABLED = 0,
+ IDXD_DEV_CONF_READY,
+ IDXD_DEV_ENABLED,
+};
+
+enum idxd_device_flag {
+ IDXD_FLAG_CONFIGURABLE = 0,
+};
+
+struct idxd_device {
+ enum idxd_type type;
+ struct device conf_dev;
+ struct list_head list;
+ struct idxd_hw hw;
+ enum idxd_device_state state;
+ unsigned long flags;
+ int id;
+ int major;
+
+ struct pci_dev *pdev;
+ void __iomem *reg_base;
+
+ spinlock_t dev_lock; /* spinlock for device */
+ struct idxd_group *groups;
+ struct idxd_wq *wqs;
+ struct idxd_engine *engines;
+
+ int num_groups;
+
+ u32 msix_perm_offset;
+ u32 wqcfg_offset;
+ u32 grpcfg_offset;
+ u32 perfmon_offset;
+
+ u64 max_xfer_bytes;
+ u32 max_batch_size;
+ int max_groups;
+ int max_engines;
+ int max_tokens;
+ int max_wqs;
+ int max_wq_size;
+ int token_limit;
+ int nr_tokens; /* non-reserved tokens */
+
+ union sw_err_reg sw_err;
+
+ struct msix_entry *msix_entries;
+ int num_wq_irqs;
+ struct idxd_irq_entry *irq_entries;
+
+ struct dma_device dma_dev;
+};
+
+/* IDXD software descriptor */
+struct idxd_desc {
+ struct dsa_hw_desc *hw;
+ dma_addr_t desc_dma;
+ struct dsa_completion_record *completion;
+ dma_addr_t compl_dma;
+ struct dma_async_tx_descriptor txd;
+ struct llist_node llnode;
+ struct list_head list;
+ int id;
+ struct idxd_wq *wq;
+};
+
+#define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev)
+#define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev)
+
+extern struct bus_type dsa_bus_type;
+
+static inline bool wq_dedicated(struct idxd_wq *wq)
+{
+ return test_bit(WQ_FLAG_DEDICATED, &wq->flags);
+}
+
+enum idxd_portal_prot {
+ IDXD_PORTAL_UNLIMITED = 0,
+ IDXD_PORTAL_LIMITED,
+};
+
+static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot)
+{
+ return prot * 0x1000;
+}
+
+static inline int idxd_get_wq_portal_full_offset(int wq_id,
+ enum idxd_portal_prot prot)
+{
+ return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot);
+}
+
+static inline void idxd_set_type(struct idxd_device *idxd)
+{
+ struct pci_dev *pdev = idxd->pdev;
+
+ if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0)
+ idxd->type = IDXD_TYPE_DSA;
+ else
+ idxd->type = IDXD_TYPE_UNKNOWN;
+}
+
+static inline void idxd_wq_get(struct idxd_wq *wq)
+{
+ wq->client_count++;
+}
+
+static inline void idxd_wq_put(struct idxd_wq *wq)
+{
+ wq->client_count--;
+}
+
+static inline int idxd_wq_refcount(struct idxd_wq *wq)
+{
+ return wq->client_count;
+};
+
+const char *idxd_get_dev_name(struct idxd_device *idxd);
+int idxd_register_bus_type(void);
+void idxd_unregister_bus_type(void);
+int idxd_setup_sysfs(struct idxd_device *idxd);
+void idxd_cleanup_sysfs(struct idxd_device *idxd);
+int idxd_register_driver(void);
+void idxd_unregister_driver(void);
+struct bus_type *idxd_get_bus_type(struct idxd_device *idxd);
+
+/* device interrupt control */
+irqreturn_t idxd_irq_handler(int vec, void *data);
+irqreturn_t idxd_misc_thread(int vec, void *data);
+irqreturn_t idxd_wq_thread(int irq, void *data);
+void idxd_mask_error_interrupts(struct idxd_device *idxd);
+void idxd_unmask_error_interrupts(struct idxd_device *idxd);
+void idxd_mask_msix_vectors(struct idxd_device *idxd);
+int idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
+int idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
+
+/* device control */
+int idxd_device_enable(struct idxd_device *idxd);
+int idxd_device_disable(struct idxd_device *idxd);
+int idxd_device_reset(struct idxd_device *idxd);
+int __idxd_device_reset(struct idxd_device *idxd);
+void idxd_device_cleanup(struct idxd_device *idxd);
+int idxd_device_config(struct idxd_device *idxd);
+void idxd_device_wqs_clear_state(struct idxd_device *idxd);
+
+/* work queue control */
+int idxd_wq_alloc_resources(struct idxd_wq *wq);
+void idxd_wq_free_resources(struct idxd_wq *wq);
+int idxd_wq_enable(struct idxd_wq *wq);
+int idxd_wq_disable(struct idxd_wq *wq);
+int idxd_wq_map_portal(struct idxd_wq *wq);
+void idxd_wq_unmap_portal(struct idxd_wq *wq);
+
+/* submission */
+int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
+struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype);
+void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc);
+
+/* dmaengine */
+int idxd_register_dma_device(struct idxd_device *idxd);
+void idxd_unregister_dma_device(struct idxd_device *idxd);
+int idxd_register_dma_channel(struct idxd_wq *wq);
+void idxd_unregister_dma_channel(struct idxd_wq *wq);
+void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
+void idxd_dma_complete_txd(struct idxd_desc *desc,
+ enum idxd_complete_type comp_type);
+dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx);
+
+/* cdev */
+int idxd_cdev_register(void);
+void idxd_cdev_remove(void);
+int idxd_cdev_get_major(struct idxd_device *idxd);
+int idxd_wq_add_cdev(struct idxd_wq *wq);
+void idxd_wq_del_cdev(struct idxd_wq *wq);
+
+#endif
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
new file mode 100644
index 000000000000..7778c05deb5d
--- /dev/null
+++ b/drivers/dma/idxd/init.c
@@ -0,0 +1,533 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/aer.h>
+#include <linux/fs.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <uapi/linux/idxd.h>
+#include <linux/dmaengine.h>
+#include "../dmaengine.h"
+#include "registers.h"
+#include "idxd.h"
+
+MODULE_VERSION(IDXD_DRIVER_VERSION);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Intel Corporation");
+
+#define DRV_NAME "idxd"
+
+static struct idr idxd_idrs[IDXD_TYPE_MAX];
+static struct mutex idxd_idr_lock;
+
+static struct pci_device_id idxd_pci_tbl[] = {
+ /* DSA ver 1.0 platforms */
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_DSA_SPR0) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
+
+static char *idxd_name[] = {
+ "dsa",
+};
+
+const char *idxd_get_dev_name(struct idxd_device *idxd)
+{
+ return idxd_name[idxd->type];
+}
+
+static int idxd_setup_interrupts(struct idxd_device *idxd)
+{
+ struct pci_dev *pdev = idxd->pdev;
+ struct device *dev = &pdev->dev;
+ struct msix_entry *msix;
+ struct idxd_irq_entry *irq_entry;
+ int i, msixcnt;
+ int rc = 0;
+
+ msixcnt = pci_msix_vec_count(pdev);
+ if (msixcnt < 0) {
+ dev_err(dev, "Not MSI-X interrupt capable.\n");
+ goto err_no_irq;
+ }
+
+ idxd->msix_entries = devm_kzalloc(dev, sizeof(struct msix_entry) *
+ msixcnt, GFP_KERNEL);
+ if (!idxd->msix_entries) {
+ rc = -ENOMEM;
+ goto err_no_irq;
+ }
+
+ for (i = 0; i < msixcnt; i++)
+ idxd->msix_entries[i].entry = i;
+
+ rc = pci_enable_msix_exact(pdev, idxd->msix_entries, msixcnt);
+ if (rc) {
+ dev_err(dev, "Failed enabling %d MSIX entries.\n", msixcnt);
+ goto err_no_irq;
+ }
+ dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
+
+ /*
+ * We implement 1 completion list per MSI-X entry except for
+ * entry 0, which is for errors and others.
+ */
+ idxd->irq_entries = devm_kcalloc(dev, msixcnt,
+ sizeof(struct idxd_irq_entry),
+ GFP_KERNEL);
+ if (!idxd->irq_entries) {
+ rc = -ENOMEM;
+ goto err_no_irq;
+ }
+
+ for (i = 0; i < msixcnt; i++) {
+ idxd->irq_entries[i].id = i;
+ idxd->irq_entries[i].idxd = idxd;
+ }
+
+ msix = &idxd->msix_entries[0];
+ irq_entry = &idxd->irq_entries[0];
+ rc = devm_request_threaded_irq(dev, msix->vector, idxd_irq_handler,
+ idxd_misc_thread, 0, "idxd-misc",
+ irq_entry);
+ if (rc < 0) {
+ dev_err(dev, "Failed to allocate misc interrupt.\n");
+ goto err_no_irq;
+ }
+
+ dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n",
+ msix->vector);
+
+ /* first MSI-X entry is not for wq interrupts */
+ idxd->num_wq_irqs = msixcnt - 1;
+
+ for (i = 1; i < msixcnt; i++) {
+ msix = &idxd->msix_entries[i];
+ irq_entry = &idxd->irq_entries[i];
+
+ init_llist_head(&idxd->irq_entries[i].pending_llist);
+ INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
+ rc = devm_request_threaded_irq(dev, msix->vector,
+ idxd_irq_handler,
+ idxd_wq_thread, 0,
+ "idxd-portal", irq_entry);
+ if (rc < 0) {
+ dev_err(dev, "Failed to allocate irq %d.\n",
+ msix->vector);
+ goto err_no_irq;
+ }
+ dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n",
+ i, msix->vector);
+ }
+
+ idxd_unmask_error_interrupts(idxd);
+
+ return 0;
+
+ err_no_irq:
+ /* Disable error interrupt generation */
+ idxd_mask_error_interrupts(idxd);
+ pci_disable_msix(pdev);
+ dev_err(dev, "No usable interrupts\n");
+ return rc;
+}
+
+static void idxd_wqs_free_lock(struct idxd_device *idxd)
+{
+ int i;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ percpu_free_rwsem(&wq->submit_lock);
+ }
+}
+
+static int idxd_setup_internals(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int i;
+
+ idxd->groups = devm_kcalloc(dev, idxd->max_groups,
+ sizeof(struct idxd_group), GFP_KERNEL);
+ if (!idxd->groups)
+ return -ENOMEM;
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ idxd->groups[i].idxd = idxd;
+ idxd->groups[i].id = i;
+ idxd->groups[i].tc_a = -1;
+ idxd->groups[i].tc_b = -1;
+ }
+
+ idxd->wqs = devm_kcalloc(dev, idxd->max_wqs, sizeof(struct idxd_wq),
+ GFP_KERNEL);
+ if (!idxd->wqs)
+ return -ENOMEM;
+
+ idxd->engines = devm_kcalloc(dev, idxd->max_engines,
+ sizeof(struct idxd_engine), GFP_KERNEL);
+ if (!idxd->engines)
+ return -ENOMEM;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+ int rc;
+
+ wq->id = i;
+ wq->idxd = idxd;
+ mutex_init(&wq->wq_lock);
+ atomic_set(&wq->dq_count, 0);
+ init_waitqueue_head(&wq->submit_waitq);
+ wq->idxd_cdev.minor = -1;
+ rc = percpu_init_rwsem(&wq->submit_lock);
+ if (rc < 0) {
+ idxd_wqs_free_lock(idxd);
+ return rc;
+ }
+ }
+
+ for (i = 0; i < idxd->max_engines; i++) {
+ idxd->engines[i].idxd = idxd;
+ idxd->engines[i].id = i;
+ }
+
+ return 0;
+}
+
+static void idxd_read_table_offsets(struct idxd_device *idxd)
+{
+ union offsets_reg offsets;
+ struct device *dev = &idxd->pdev->dev;
+
+ offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET);
+ offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET
+ + sizeof(u64));
+ idxd->grpcfg_offset = offsets.grpcfg * 0x100;
+ dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset);
+ idxd->wqcfg_offset = offsets.wqcfg * 0x100;
+ dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n",
+ idxd->wqcfg_offset);
+ idxd->msix_perm_offset = offsets.msix_perm * 0x100;
+ dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n",
+ idxd->msix_perm_offset);
+ idxd->perfmon_offset = offsets.perfmon * 0x100;
+ dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
+}
+
+static void idxd_read_caps(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int i;
+
+ /* reading generic capabilities */
+ idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET);
+ dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits);
+ idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
+ dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
+ idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift;
+ dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size);
+ if (idxd->hw.gen_cap.config_en)
+ set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags);
+
+ /* reading group capabilities */
+ idxd->hw.group_cap.bits =
+ ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET);
+ dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits);
+ idxd->max_groups = idxd->hw.group_cap.num_groups;
+ dev_dbg(dev, "max groups: %u\n", idxd->max_groups);
+ idxd->max_tokens = idxd->hw.group_cap.total_tokens;
+ dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens);
+ idxd->nr_tokens = idxd->max_tokens;
+
+ /* read engine capabilities */
+ idxd->hw.engine_cap.bits =
+ ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET);
+ dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits);
+ idxd->max_engines = idxd->hw.engine_cap.num_engines;
+ dev_dbg(dev, "max engines: %u\n", idxd->max_engines);
+
+ /* read workqueue capabilities */
+ idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET);
+ dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits);
+ idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size;
+ dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size);
+ idxd->max_wqs = idxd->hw.wq_cap.num_wqs;
+ dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs);
+
+ /* reading operation capabilities */
+ for (i = 0; i < 4; i++) {
+ idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base +
+ IDXD_OPCAP_OFFSET + i * sizeof(u64));
+ dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]);
+ }
+}
+
+static struct idxd_device *idxd_alloc(struct pci_dev *pdev,
+ void __iomem * const *iomap)
+{
+ struct device *dev = &pdev->dev;
+ struct idxd_device *idxd;
+
+ idxd = devm_kzalloc(dev, sizeof(struct idxd_device), GFP_KERNEL);
+ if (!idxd)
+ return NULL;
+
+ idxd->pdev = pdev;
+ idxd->reg_base = iomap[IDXD_MMIO_BAR];
+ spin_lock_init(&idxd->dev_lock);
+
+ return idxd;
+}
+
+static int idxd_probe(struct idxd_device *idxd)
+{
+ struct pci_dev *pdev = idxd->pdev;
+ struct device *dev = &pdev->dev;
+ int rc;
+
+ dev_dbg(dev, "%s entered and resetting device\n", __func__);
+ rc = idxd_device_reset(idxd);
+ if (rc < 0)
+ return rc;
+ dev_dbg(dev, "IDXD reset complete\n");
+
+ idxd_read_caps(idxd);
+ idxd_read_table_offsets(idxd);
+
+ rc = idxd_setup_internals(idxd);
+ if (rc)
+ goto err_setup;
+
+ rc = idxd_setup_interrupts(idxd);
+ if (rc)
+ goto err_setup;
+
+ dev_dbg(dev, "IDXD interrupt setup complete.\n");
+
+ mutex_lock(&idxd_idr_lock);
+ idxd->id = idr_alloc(&idxd_idrs[idxd->type], idxd, 0, 0, GFP_KERNEL);
+ mutex_unlock(&idxd_idr_lock);
+ if (idxd->id < 0) {
+ rc = -ENOMEM;
+ goto err_idr_fail;
+ }
+
+ idxd->major = idxd_cdev_get_major(idxd);
+
+ dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
+ return 0;
+
+ err_idr_fail:
+ idxd_mask_error_interrupts(idxd);
+ idxd_mask_msix_vectors(idxd);
+ err_setup:
+ return rc;
+}
+
+static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ void __iomem * const *iomap;
+ struct device *dev = &pdev->dev;
+ struct idxd_device *idxd;
+ int rc;
+ unsigned int mask;
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ dev_dbg(dev, "Mapping BARs\n");
+ mask = (1 << IDXD_MMIO_BAR);
+ rc = pcim_iomap_regions(pdev, mask, DRV_NAME);
+ if (rc)
+ return rc;
+
+ iomap = pcim_iomap_table(pdev);
+ if (!iomap)
+ return -ENOMEM;
+
+ dev_dbg(dev, "Set DMA masks\n");
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (rc)
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc)
+ return rc;
+
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (rc)
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc)
+ return rc;
+
+ dev_dbg(dev, "Alloc IDXD context\n");
+ idxd = idxd_alloc(pdev, iomap);
+ if (!idxd)
+ return -ENOMEM;
+
+ idxd_set_type(idxd);
+
+ dev_dbg(dev, "Set PCI master\n");
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, idxd);
+
+ idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
+ rc = idxd_probe(idxd);
+ if (rc) {
+ dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
+ return -ENODEV;
+ }
+
+ rc = idxd_setup_sysfs(idxd);
+ if (rc) {
+ dev_err(dev, "IDXD sysfs setup failed\n");
+ return -ENODEV;
+ }
+
+ idxd->state = IDXD_DEV_CONF_READY;
+
+ dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
+ idxd->hw.version);
+
+ return 0;
+}
+
+static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
+{
+ struct idxd_desc *desc, *itr;
+ struct llist_node *head;
+
+ head = llist_del_all(&ie->pending_llist);
+ if (!head)
+ return;
+
+ llist_for_each_entry_safe(desc, itr, head, llnode) {
+ idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
+ idxd_free_desc(desc->wq, desc);
+ }
+}
+
+static void idxd_flush_work_list(struct idxd_irq_entry *ie)
+{
+ struct idxd_desc *desc, *iter;
+
+ list_for_each_entry_safe(desc, iter, &ie->work_list, list) {
+ list_del(&desc->list);
+ idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
+ idxd_free_desc(desc->wq, desc);
+ }
+}
+
+static void idxd_shutdown(struct pci_dev *pdev)
+{
+ struct idxd_device *idxd = pci_get_drvdata(pdev);
+ int rc, i;
+ struct idxd_irq_entry *irq_entry;
+ int msixcnt = pci_msix_vec_count(pdev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ rc = idxd_device_disable(idxd);
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ if (rc)
+ dev_err(&pdev->dev, "Disabling device failed\n");
+
+ dev_dbg(&pdev->dev, "%s called\n", __func__);
+ idxd_mask_msix_vectors(idxd);
+ idxd_mask_error_interrupts(idxd);
+
+ for (i = 0; i < msixcnt; i++) {
+ irq_entry = &idxd->irq_entries[i];
+ synchronize_irq(idxd->msix_entries[i].vector);
+ if (i == 0)
+ continue;
+ idxd_flush_pending_llist(irq_entry);
+ idxd_flush_work_list(irq_entry);
+ }
+}
+
+static void idxd_remove(struct pci_dev *pdev)
+{
+ struct idxd_device *idxd = pci_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s called\n", __func__);
+ idxd_cleanup_sysfs(idxd);
+ idxd_shutdown(pdev);
+ idxd_wqs_free_lock(idxd);
+ mutex_lock(&idxd_idr_lock);
+ idr_remove(&idxd_idrs[idxd->type], idxd->id);
+ mutex_unlock(&idxd_idr_lock);
+}
+
+static struct pci_driver idxd_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = idxd_pci_tbl,
+ .probe = idxd_pci_probe,
+ .remove = idxd_remove,
+ .shutdown = idxd_shutdown,
+};
+
+static int __init idxd_init_module(void)
+{
+ int err, i;
+
+ /*
+ * If the CPU does not support write512, there's no point in
+ * enumerating the device. We can not utilize it.
+ */
+ if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) {
+ pr_warn("idxd driver failed to load without MOVDIR64B.\n");
+ return -ENODEV;
+ }
+
+ pr_info("%s: Intel(R) Accelerator Devices Driver %s\n",
+ DRV_NAME, IDXD_DRIVER_VERSION);
+
+ mutex_init(&idxd_idr_lock);
+ for (i = 0; i < IDXD_TYPE_MAX; i++)
+ idr_init(&idxd_idrs[i]);
+
+ err = idxd_register_bus_type();
+ if (err < 0)
+ return err;
+
+ err = idxd_register_driver();
+ if (err < 0)
+ goto err_idxd_driver_register;
+
+ err = idxd_cdev_register();
+ if (err)
+ goto err_cdev_register;
+
+ err = pci_register_driver(&idxd_pci_driver);
+ if (err)
+ goto err_pci_register;
+
+ return 0;
+
+err_pci_register:
+ idxd_cdev_remove();
+err_cdev_register:
+ idxd_unregister_driver();
+err_idxd_driver_register:
+ idxd_unregister_bus_type();
+ return err;
+}
+module_init(idxd_init_module);
+
+static void __exit idxd_exit_module(void)
+{
+ pci_unregister_driver(&idxd_pci_driver);
+ idxd_cdev_remove();
+ idxd_unregister_bus_type();
+}
+module_exit(idxd_exit_module);
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
new file mode 100644
index 000000000000..d6fcd2e60103
--- /dev/null
+++ b/drivers/dma/idxd/irq.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/dmaengine.h>
+#include <uapi/linux/idxd.h>
+#include "../dmaengine.h"
+#include "idxd.h"
+#include "registers.h"
+
+void idxd_device_wqs_clear_state(struct idxd_device *idxd)
+{
+ int i;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ wq->state = IDXD_WQ_DISABLED;
+ }
+}
+
+static int idxd_restart(struct idxd_device *idxd)
+{
+ int i, rc;
+
+ lockdep_assert_held(&idxd->dev_lock);
+
+ rc = __idxd_device_reset(idxd);
+ if (rc < 0)
+ goto out;
+
+ rc = idxd_device_config(idxd);
+ if (rc < 0)
+ goto out;
+
+ rc = idxd_device_enable(idxd);
+ if (rc < 0)
+ goto out;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ if (wq->state == IDXD_WQ_ENABLED) {
+ rc = idxd_wq_enable(wq);
+ if (rc < 0) {
+ dev_warn(&idxd->pdev->dev,
+ "Unable to re-enable wq %s\n",
+ dev_name(&wq->conf_dev));
+ }
+ }
+ }
+
+ return 0;
+
+ out:
+ idxd_device_wqs_clear_state(idxd);
+ idxd->state = IDXD_DEV_HALTED;
+ return rc;
+}
+
+irqreturn_t idxd_irq_handler(int vec, void *data)
+{
+ struct idxd_irq_entry *irq_entry = data;
+ struct idxd_device *idxd = irq_entry->idxd;
+
+ idxd_mask_msix_vector(idxd, irq_entry->id);
+ return IRQ_WAKE_THREAD;
+}
+
+irqreturn_t idxd_misc_thread(int vec, void *data)
+{
+ struct idxd_irq_entry *irq_entry = data;
+ struct idxd_device *idxd = irq_entry->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ union gensts_reg gensts;
+ u32 cause, val = 0;
+ int i, rc;
+ bool err = false;
+
+ cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
+
+ if (cause & IDXD_INTC_ERR) {
+ spin_lock_bh(&idxd->dev_lock);
+ for (i = 0; i < 4; i++)
+ idxd->sw_err.bits[i] = ioread64(idxd->reg_base +
+ IDXD_SWERR_OFFSET + i * sizeof(u64));
+ iowrite64(IDXD_SWERR_ACK, idxd->reg_base + IDXD_SWERR_OFFSET);
+
+ if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
+ int id = idxd->sw_err.wq_idx;
+ struct idxd_wq *wq = &idxd->wqs[id];
+
+ if (wq->type == IDXD_WQT_USER)
+ wake_up_interruptible(&wq->idxd_cdev.err_queue);
+ } else {
+ int i;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ if (wq->type == IDXD_WQT_USER)
+ wake_up_interruptible(&wq->idxd_cdev.err_queue);
+ }
+ }
+
+ spin_unlock_bh(&idxd->dev_lock);
+ val |= IDXD_INTC_ERR;
+
+ for (i = 0; i < 4; i++)
+ dev_warn(dev, "err[%d]: %#16.16llx\n",
+ i, idxd->sw_err.bits[i]);
+ err = true;
+ }
+
+ if (cause & IDXD_INTC_CMD) {
+ /* Driver does use command interrupts */
+ val |= IDXD_INTC_CMD;
+ }
+
+ if (cause & IDXD_INTC_OCCUPY) {
+ /* Driver does not utilize occupancy interrupt */
+ val |= IDXD_INTC_OCCUPY;
+ }
+
+ if (cause & IDXD_INTC_PERFMON_OVFL) {
+ /*
+ * Driver does not utilize perfmon counter overflow interrupt
+ * yet.
+ */
+ val |= IDXD_INTC_PERFMON_OVFL;
+ }
+
+ val ^= cause;
+ if (val)
+ dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n",
+ val);
+
+ iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
+ if (!err)
+ return IRQ_HANDLED;
+
+ gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
+ if (gensts.state == IDXD_DEVICE_STATE_HALT) {
+ spin_lock_bh(&idxd->dev_lock);
+ if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) {
+ rc = idxd_restart(idxd);
+ if (rc < 0)
+ dev_err(&idxd->pdev->dev,
+ "idxd restart failed, device halt.");
+ } else {
+ idxd_device_wqs_clear_state(idxd);
+ idxd->state = IDXD_DEV_HALTED;
+ dev_err(&idxd->pdev->dev,
+ "idxd halted, need %s.\n",
+ gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
+ "FLR" : "system reset");
+ }
+ spin_unlock_bh(&idxd->dev_lock);
+ }
+
+ idxd_unmask_msix_vector(idxd, irq_entry->id);
+ return IRQ_HANDLED;
+}
+
+static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
+ int *processed)
+{
+ struct idxd_desc *desc, *t;
+ struct llist_node *head;
+ int queued = 0;
+
+ head = llist_del_all(&irq_entry->pending_llist);
+ if (!head)
+ return 0;
+
+ llist_for_each_entry_safe(desc, t, head, llnode) {
+ if (desc->completion->status) {
+ idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
+ idxd_free_desc(desc->wq, desc);
+ (*processed)++;
+ } else {
+ list_add_tail(&desc->list, &irq_entry->work_list);
+ queued++;
+ }
+ }
+
+ return queued;
+}
+
+static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
+ int *processed)
+{
+ struct list_head *node, *next;
+ int queued = 0;
+
+ if (list_empty(&irq_entry->work_list))
+ return 0;
+
+ list_for_each_safe(node, next, &irq_entry->work_list) {
+ struct idxd_desc *desc =
+ container_of(node, struct idxd_desc, list);
+
+ if (desc->completion->status) {
+ list_del(&desc->list);
+ /* process and callback */
+ idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
+ idxd_free_desc(desc->wq, desc);
+ (*processed)++;
+ } else {
+ queued++;
+ }
+ }
+
+ return queued;
+}
+
+irqreturn_t idxd_wq_thread(int irq, void *data)
+{
+ struct idxd_irq_entry *irq_entry = data;
+ int rc, processed = 0, retry = 0;
+
+ /*
+ * There are two lists we are processing. The pending_llist is where
+ * submmiter adds all the submitted descriptor after sending it to
+ * the workqueue. It's a lockless singly linked list. The work_list
+ * is the common linux double linked list. We are in a scenario of
+ * multiple producers and a single consumer. The producers are all
+ * the kernel submitters of descriptors, and the consumer is the
+ * kernel irq handler thread for the msix vector when using threaded
+ * irq. To work with the restrictions of llist to remain lockless,
+ * we are doing the following steps:
+ * 1. Iterate through the work_list and process any completed
+ * descriptor. Delete the completed entries during iteration.
+ * 2. llist_del_all() from the pending list.
+ * 3. Iterate through the llist that was deleted from the pending list
+ * and process the completed entries.
+ * 4. If the entry is still waiting on hardware, list_add_tail() to
+ * the work_list.
+ * 5. Repeat until no more descriptors.
+ */
+ do {
+ rc = irq_process_work_list(irq_entry, &processed);
+ if (rc != 0) {
+ retry++;
+ continue;
+ }
+
+ rc = irq_process_pending_llist(irq_entry, &processed);
+ } while (rc != 0 && retry != 10);
+
+ idxd_unmask_msix_vector(irq_entry->idxd, irq_entry->id);
+
+ if (processed == 0)
+ return IRQ_NONE;
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
new file mode 100644
index 000000000000..a39e7ae6b3d9
--- /dev/null
+++ b/drivers/dma/idxd/registers.h
@@ -0,0 +1,336 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#ifndef _IDXD_REGISTERS_H_
+#define _IDXD_REGISTERS_H_
+
+/* PCI Config */
+#define PCI_DEVICE_ID_INTEL_DSA_SPR0 0x0b25
+
+#define IDXD_MMIO_BAR 0
+#define IDXD_WQ_BAR 2
+#define IDXD_PORTAL_SIZE 0x4000
+
+/* MMIO Device BAR0 Registers */
+#define IDXD_VER_OFFSET 0x00
+#define IDXD_VER_MAJOR_MASK 0xf0
+#define IDXD_VER_MINOR_MASK 0x0f
+#define GET_IDXD_VER_MAJOR(x) (((x) & IDXD_VER_MAJOR_MASK) >> 4)
+#define GET_IDXD_VER_MINOR(x) ((x) & IDXD_VER_MINOR_MASK)
+
+union gen_cap_reg {
+ struct {
+ u64 block_on_fault:1;
+ u64 overlap_copy:1;
+ u64 cache_control_mem:1;
+ u64 cache_control_cache:1;
+ u64 rsvd:3;
+ u64 int_handle_req:1;
+ u64 dest_readback:1;
+ u64 drain_readback:1;
+ u64 rsvd2:6;
+ u64 max_xfer_shift:5;
+ u64 max_batch_shift:4;
+ u64 max_ims_mult:6;
+ u64 config_en:1;
+ u64 max_descs_per_engine:8;
+ u64 rsvd3:24;
+ };
+ u64 bits;
+} __packed;
+#define IDXD_GENCAP_OFFSET 0x10
+
+union wq_cap_reg {
+ struct {
+ u64 total_wq_size:16;
+ u64 num_wqs:8;
+ u64 rsvd:24;
+ u64 shared_mode:1;
+ u64 dedicated_mode:1;
+ u64 rsvd2:1;
+ u64 priority:1;
+ u64 occupancy:1;
+ u64 occupancy_int:1;
+ u64 rsvd3:10;
+ };
+ u64 bits;
+} __packed;
+#define IDXD_WQCAP_OFFSET 0x20
+
+union group_cap_reg {
+ struct {
+ u64 num_groups:8;
+ u64 total_tokens:8;
+ u64 token_en:1;
+ u64 token_limit:1;
+ u64 rsvd:46;
+ };
+ u64 bits;
+} __packed;
+#define IDXD_GRPCAP_OFFSET 0x30
+
+union engine_cap_reg {
+ struct {
+ u64 num_engines:8;
+ u64 rsvd:56;
+ };
+ u64 bits;
+} __packed;
+
+#define IDXD_ENGCAP_OFFSET 0x38
+
+#define IDXD_OPCAP_NOOP 0x0001
+#define IDXD_OPCAP_BATCH 0x0002
+#define IDXD_OPCAP_MEMMOVE 0x0008
+struct opcap {
+ u64 bits[4];
+};
+
+#define IDXD_OPCAP_OFFSET 0x40
+
+#define IDXD_TABLE_OFFSET 0x60
+union offsets_reg {
+ struct {
+ u64 grpcfg:16;
+ u64 wqcfg:16;
+ u64 msix_perm:16;
+ u64 ims:16;
+ u64 perfmon:16;
+ u64 rsvd:48;
+ };
+ u64 bits[2];
+} __packed;
+
+#define IDXD_GENCFG_OFFSET 0x80
+union gencfg_reg {
+ struct {
+ u32 token_limit:8;
+ u32 rsvd:4;
+ u32 user_int_en:1;
+ u32 rsvd2:19;
+ };
+ u32 bits;
+} __packed;
+
+#define IDXD_GENCTRL_OFFSET 0x88
+union genctrl_reg {
+ struct {
+ u32 softerr_int_en:1;
+ u32 rsvd:31;
+ };
+ u32 bits;
+} __packed;
+
+#define IDXD_GENSTATS_OFFSET 0x90
+union gensts_reg {
+ struct {
+ u32 state:2;
+ u32 reset_type:2;
+ u32 rsvd:28;
+ };
+ u32 bits;
+} __packed;
+
+enum idxd_device_status_state {
+ IDXD_DEVICE_STATE_DISABLED = 0,
+ IDXD_DEVICE_STATE_ENABLED,
+ IDXD_DEVICE_STATE_DRAIN,
+ IDXD_DEVICE_STATE_HALT,
+};
+
+enum idxd_device_reset_type {
+ IDXD_DEVICE_RESET_SOFTWARE = 0,
+ IDXD_DEVICE_RESET_FLR,
+ IDXD_DEVICE_RESET_WARM,
+ IDXD_DEVICE_RESET_COLD,
+};
+
+#define IDXD_INTCAUSE_OFFSET 0x98
+#define IDXD_INTC_ERR 0x01
+#define IDXD_INTC_CMD 0x02
+#define IDXD_INTC_OCCUPY 0x04
+#define IDXD_INTC_PERFMON_OVFL 0x08
+
+#define IDXD_CMD_OFFSET 0xa0
+union idxd_command_reg {
+ struct {
+ u32 operand:20;
+ u32 cmd:5;
+ u32 rsvd:6;
+ u32 int_req:1;
+ };
+ u32 bits;
+} __packed;
+
+enum idxd_cmd {
+ IDXD_CMD_ENABLE_DEVICE = 1,
+ IDXD_CMD_DISABLE_DEVICE,
+ IDXD_CMD_DRAIN_ALL,
+ IDXD_CMD_ABORT_ALL,
+ IDXD_CMD_RESET_DEVICE,
+ IDXD_CMD_ENABLE_WQ,
+ IDXD_CMD_DISABLE_WQ,
+ IDXD_CMD_DRAIN_WQ,
+ IDXD_CMD_ABORT_WQ,
+ IDXD_CMD_RESET_WQ,
+ IDXD_CMD_DRAIN_PASID,
+ IDXD_CMD_ABORT_PASID,
+ IDXD_CMD_REQUEST_INT_HANDLE,
+};
+
+#define IDXD_CMDSTS_OFFSET 0xa8
+union cmdsts_reg {
+ struct {
+ u8 err;
+ u16 result;
+ u8 rsvd:7;
+ u8 active:1;
+ };
+ u32 bits;
+} __packed;
+#define IDXD_CMDSTS_ACTIVE 0x80000000
+
+enum idxd_cmdsts_err {
+ IDXD_CMDSTS_SUCCESS = 0,
+ IDXD_CMDSTS_INVAL_CMD,
+ IDXD_CMDSTS_INVAL_WQIDX,
+ IDXD_CMDSTS_HW_ERR,
+ /* enable device errors */
+ IDXD_CMDSTS_ERR_DEV_ENABLED = 0x10,
+ IDXD_CMDSTS_ERR_CONFIG,
+ IDXD_CMDSTS_ERR_BUSMASTER_EN,
+ IDXD_CMDSTS_ERR_PASID_INVAL,
+ IDXD_CMDSTS_ERR_WQ_SIZE_ERANGE,
+ IDXD_CMDSTS_ERR_GRP_CONFIG,
+ IDXD_CMDSTS_ERR_GRP_CONFIG2,
+ IDXD_CMDSTS_ERR_GRP_CONFIG3,
+ IDXD_CMDSTS_ERR_GRP_CONFIG4,
+ /* enable wq errors */
+ IDXD_CMDSTS_ERR_DEV_NOTEN = 0x20,
+ IDXD_CMDSTS_ERR_WQ_ENABLED,
+ IDXD_CMDSTS_ERR_WQ_SIZE,
+ IDXD_CMDSTS_ERR_WQ_PRIOR,
+ IDXD_CMDSTS_ERR_WQ_MODE,
+ IDXD_CMDSTS_ERR_BOF_EN,
+ IDXD_CMDSTS_ERR_PASID_EN,
+ IDXD_CMDSTS_ERR_MAX_BATCH_SIZE,
+ IDXD_CMDSTS_ERR_MAX_XFER_SIZE,
+ /* disable device errors */
+ IDXD_CMDSTS_ERR_DIS_DEV_EN = 0x31,
+ /* disable WQ, drain WQ, abort WQ, reset WQ */
+ IDXD_CMDSTS_ERR_DEV_NOT_EN,
+ /* request interrupt handle */
+ IDXD_CMDSTS_ERR_INVAL_INT_IDX = 0x41,
+ IDXD_CMDSTS_ERR_NO_HANDLE,
+};
+
+#define IDXD_SWERR_OFFSET 0xc0
+#define IDXD_SWERR_VALID 0x00000001
+#define IDXD_SWERR_OVERFLOW 0x00000002
+#define IDXD_SWERR_ACK (IDXD_SWERR_VALID | IDXD_SWERR_OVERFLOW)
+union sw_err_reg {
+ struct {
+ u64 valid:1;
+ u64 overflow:1;
+ u64 desc_valid:1;
+ u64 wq_idx_valid:1;
+ u64 batch:1;
+ u64 fault_rw:1;
+ u64 priv:1;
+ u64 rsvd:1;
+ u64 error:8;
+ u64 wq_idx:8;
+ u64 rsvd2:8;
+ u64 operation:8;
+ u64 pasid:20;
+ u64 rsvd3:4;
+
+ u64 batch_idx:16;
+ u64 rsvd4:16;
+ u64 invalid_flags:32;
+
+ u64 fault_addr;
+
+ u64 rsvd5;
+ };
+ u64 bits[4];
+} __packed;
+
+union msix_perm {
+ struct {
+ u32 rsvd:2;
+ u32 ignore:1;
+ u32 pasid_en:1;
+ u32 rsvd2:8;
+ u32 pasid:20;
+ };
+ u32 bits;
+} __packed;
+
+union group_flags {
+ struct {
+ u32 tc_a:3;
+ u32 tc_b:3;
+ u32 rsvd:1;
+ u32 use_token_limit:1;
+ u32 tokens_reserved:8;
+ u32 rsvd2:4;
+ u32 tokens_allowed:8;
+ u32 rsvd3:4;
+ };
+ u32 bits;
+} __packed;
+
+struct grpcfg {
+ u64 wqs[4];
+ u64 engines;
+ union group_flags flags;
+} __packed;
+
+union wqcfg {
+ struct {
+ /* bytes 0-3 */
+ u16 wq_size;
+ u16 rsvd;
+
+ /* bytes 4-7 */
+ u16 wq_thresh;
+ u16 rsvd1;
+
+ /* bytes 8-11 */
+ u32 mode:1; /* shared or dedicated */
+ u32 bof:1; /* block on fault */
+ u32 rsvd2:2;
+ u32 priority:4;
+ u32 pasid:20;
+ u32 pasid_en:1;
+ u32 priv:1;
+ u32 rsvd3:2;
+
+ /* bytes 12-15 */
+ u32 max_xfer_shift:5;
+ u32 max_batch_shift:4;
+ u32 rsvd4:23;
+
+ /* bytes 16-19 */
+ u16 occupancy_inth;
+ u16 occupancy_table_sel:1;
+ u16 rsvd5:15;
+
+ /* bytes 20-23 */
+ u16 occupancy_limit;
+ u16 occupancy_int_en:1;
+ u16 rsvd6:15;
+
+ /* bytes 24-27 */
+ u16 occupancy;
+ u16 occupancy_int:1;
+ u16 rsvd7:12;
+ u16 mode_support:1;
+ u16 wq_state:2;
+
+ /* bytes 28-31 */
+ u32 rsvd8;
+ };
+ u32 bits[8];
+} __packed;
+#endif
diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
new file mode 100644
index 000000000000..45a0c5869a0a
--- /dev/null
+++ b/drivers/dma/idxd/submit.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <uapi/linux/idxd.h>
+#include "idxd.h"
+#include "registers.h"
+
+struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype)
+{
+ struct idxd_desc *desc;
+ int idx;
+ struct idxd_device *idxd = wq->idxd;
+
+ if (idxd->state != IDXD_DEV_ENABLED)
+ return ERR_PTR(-EIO);
+
+ if (optype == IDXD_OP_BLOCK)
+ percpu_down_read(&wq->submit_lock);
+ else if (!percpu_down_read_trylock(&wq->submit_lock))
+ return ERR_PTR(-EBUSY);
+
+ if (!atomic_add_unless(&wq->dq_count, 1, wq->size)) {
+ int rc;
+
+ if (optype == IDXD_OP_NONBLOCK) {
+ percpu_up_read(&wq->submit_lock);
+ return ERR_PTR(-EAGAIN);
+ }
+
+ percpu_up_read(&wq->submit_lock);
+ percpu_down_write(&wq->submit_lock);
+ rc = wait_event_interruptible(wq->submit_waitq,
+ atomic_add_unless(&wq->dq_count,
+ 1, wq->size) ||
+ idxd->state != IDXD_DEV_ENABLED);
+ percpu_up_write(&wq->submit_lock);
+ if (rc < 0)
+ return ERR_PTR(-EINTR);
+ if (idxd->state != IDXD_DEV_ENABLED)
+ return ERR_PTR(-EIO);
+ } else {
+ percpu_up_read(&wq->submit_lock);
+ }
+
+ idx = sbitmap_get(&wq->sbmap, 0, false);
+ if (idx < 0) {
+ atomic_dec(&wq->dq_count);
+ return ERR_PTR(-EAGAIN);
+ }
+
+ desc = wq->descs[idx];
+ memset(desc->hw, 0, sizeof(struct dsa_hw_desc));
+ memset(desc->completion, 0, sizeof(struct dsa_completion_record));
+ return desc;
+}
+
+void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc)
+{
+ atomic_dec(&wq->dq_count);
+
+ sbitmap_clear_bit(&wq->sbmap, desc->id);
+ wake_up(&wq->submit_waitq);
+}
+
+int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
+{
+ struct idxd_device *idxd = wq->idxd;
+ int vec = desc->hw->int_handle;
+ void __iomem *portal;
+
+ if (idxd->state != IDXD_DEV_ENABLED)
+ return -EIO;
+
+ portal = wq->dportal + idxd_get_wq_portal_offset(IDXD_PORTAL_UNLIMITED);
+ /*
+ * The wmb() flushes writes to coherent DMA data before possibly
+ * triggering a DMA read. The wmb() is necessary even on UP because
+ * the recipient is a device.
+ */
+ wmb();
+ iosubmit_cmds512(portal, desc->hw, 1);
+
+ /*
+ * Pending the descriptor to the lockless list for the irq_entry
+ * that we designated the descriptor to.
+ */
+ if (desc->hw->flags & IDXD_OP_FLAG_RCI)
+ llist_add(&desc->llnode,
+ &idxd->irq_entries[vec].pending_llist);
+
+ return 0;
+}
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
new file mode 100644
index 000000000000..6d907fe150aa
--- /dev/null
+++ b/drivers/dma/idxd/sysfs.c
@@ -0,0 +1,1528 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <uapi/linux/idxd.h>
+#include "registers.h"
+#include "idxd.h"
+
+static char *idxd_wq_type_names[] = {
+ [IDXD_WQT_NONE] = "none",
+ [IDXD_WQT_KERNEL] = "kernel",
+ [IDXD_WQT_USER] = "user",
+};
+
+static void idxd_conf_device_release(struct device *dev)
+{
+ dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev));
+}
+
+static struct device_type idxd_group_device_type = {
+ .name = "group",
+ .release = idxd_conf_device_release,
+};
+
+static struct device_type idxd_wq_device_type = {
+ .name = "wq",
+ .release = idxd_conf_device_release,
+};
+
+static struct device_type idxd_engine_device_type = {
+ .name = "engine",
+ .release = idxd_conf_device_release,
+};
+
+static struct device_type dsa_device_type = {
+ .name = "dsa",
+ .release = idxd_conf_device_release,
+};
+
+static inline bool is_dsa_dev(struct device *dev)
+{
+ return dev ? dev->type == &dsa_device_type : false;
+}
+
+static inline bool is_idxd_dev(struct device *dev)
+{
+ return is_dsa_dev(dev);
+}
+
+static inline bool is_idxd_wq_dev(struct device *dev)
+{
+ return dev ? dev->type == &idxd_wq_device_type : false;
+}
+
+static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
+{
+ if (wq->type == IDXD_WQT_KERNEL &&
+ strcmp(wq->name, "dmaengine") == 0)
+ return true;
+ return false;
+}
+
+static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
+{
+ return wq->type == IDXD_WQT_USER;
+}
+
+static int idxd_config_bus_match(struct device *dev,
+ struct device_driver *drv)
+{
+ int matched = 0;
+
+ if (is_idxd_dev(dev)) {
+ struct idxd_device *idxd = confdev_to_idxd(dev);
+
+ if (idxd->state != IDXD_DEV_CONF_READY)
+ return 0;
+ matched = 1;
+ } else if (is_idxd_wq_dev(dev)) {
+ struct idxd_wq *wq = confdev_to_wq(dev);
+ struct idxd_device *idxd = wq->idxd;
+
+ if (idxd->state < IDXD_DEV_CONF_READY)
+ return 0;
+
+ if (wq->state != IDXD_WQ_DISABLED) {
+ dev_dbg(dev, "%s not disabled\n", dev_name(dev));
+ return 0;
+ }
+ matched = 1;
+ }
+
+ if (matched)
+ dev_dbg(dev, "%s matched\n", dev_name(dev));
+
+ return matched;
+}
+
+static int idxd_config_bus_probe(struct device *dev)
+{
+ int rc;
+ unsigned long flags;
+
+ dev_dbg(dev, "%s called\n", __func__);
+
+ if (is_idxd_dev(dev)) {
+ struct idxd_device *idxd = confdev_to_idxd(dev);
+
+ if (idxd->state != IDXD_DEV_CONF_READY) {
+ dev_warn(dev, "Device not ready for config\n");
+ return -EBUSY;
+ }
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENXIO;
+
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+
+ /* Perform IDXD configuration and enabling */
+ rc = idxd_device_config(idxd);
+ if (rc < 0) {
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ dev_warn(dev, "Device config failed: %d\n", rc);
+ return rc;
+ }
+
+ /* start device */
+ rc = idxd_device_enable(idxd);
+ if (rc < 0) {
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ dev_warn(dev, "Device enable failed: %d\n", rc);
+ return rc;
+ }
+
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ dev_info(dev, "Device %s enabled\n", dev_name(dev));
+
+ rc = idxd_register_dma_device(idxd);
+ if (rc < 0) {
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ dev_dbg(dev, "Failed to register dmaengine device\n");
+ return rc;
+ }
+ return 0;
+ } else if (is_idxd_wq_dev(dev)) {
+ struct idxd_wq *wq = confdev_to_wq(dev);
+ struct idxd_device *idxd = wq->idxd;
+
+ mutex_lock(&wq->wq_lock);
+
+ if (idxd->state != IDXD_DEV_ENABLED) {
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "Enabling while device not enabled.\n");
+ return -EPERM;
+ }
+
+ if (wq->state != IDXD_WQ_DISABLED) {
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "WQ %d already enabled.\n", wq->id);
+ return -EBUSY;
+ }
+
+ if (!wq->group) {
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "WQ not attached to group.\n");
+ return -EINVAL;
+ }
+
+ if (strlen(wq->name) == 0) {
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "WQ name not set.\n");
+ return -EINVAL;
+ }
+
+ rc = idxd_wq_alloc_resources(wq);
+ if (rc < 0) {
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "WQ resource alloc failed\n");
+ return rc;
+ }
+
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ rc = idxd_device_config(idxd);
+ if (rc < 0) {
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "Writing WQ %d config failed: %d\n",
+ wq->id, rc);
+ return rc;
+ }
+
+ rc = idxd_wq_enable(wq);
+ if (rc < 0) {
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "WQ %d enabling failed: %d\n",
+ wq->id, rc);
+ return rc;
+ }
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+
+ rc = idxd_wq_map_portal(wq);
+ if (rc < 0) {
+ dev_warn(dev, "wq portal mapping failed: %d\n", rc);
+ rc = idxd_wq_disable(wq);
+ if (rc < 0)
+ dev_warn(dev, "IDXD wq disable failed\n");
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ mutex_unlock(&wq->wq_lock);
+ return rc;
+ }
+
+ wq->client_count = 0;
+
+ dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev));
+
+ if (is_idxd_wq_dmaengine(wq)) {
+ rc = idxd_register_dma_channel(wq);
+ if (rc < 0) {
+ dev_dbg(dev, "DMA channel register failed\n");
+ mutex_unlock(&wq->wq_lock);
+ return rc;
+ }
+ } else if (is_idxd_wq_cdev(wq)) {
+ rc = idxd_wq_add_cdev(wq);
+ if (rc < 0) {
+ dev_dbg(dev, "Cdev creation failed\n");
+ mutex_unlock(&wq->wq_lock);
+ return rc;
+ }
+ }
+
+ mutex_unlock(&wq->wq_lock);
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static void disable_wq(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ unsigned long flags;
+ int rc;
+
+ mutex_lock(&wq->wq_lock);
+ dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
+ if (wq->state == IDXD_WQ_DISABLED) {
+ mutex_unlock(&wq->wq_lock);
+ return;
+ }
+
+ if (is_idxd_wq_dmaengine(wq))
+ idxd_unregister_dma_channel(wq);
+ else if (is_idxd_wq_cdev(wq))
+ idxd_wq_del_cdev(wq);
+
+ if (idxd_wq_refcount(wq))
+ dev_warn(dev, "Clients has claim on wq %d: %d\n",
+ wq->id, idxd_wq_refcount(wq));
+
+ idxd_wq_unmap_portal(wq);
+
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ rc = idxd_wq_disable(wq);
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+
+ idxd_wq_free_resources(wq);
+ wq->client_count = 0;
+ mutex_unlock(&wq->wq_lock);
+
+ if (rc < 0)
+ dev_warn(dev, "Failed to disable %s: %d\n",
+ dev_name(&wq->conf_dev), rc);
+ else
+ dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
+}
+
+static int idxd_config_bus_remove(struct device *dev)
+{
+ int rc;
+ unsigned long flags;
+
+ dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev));
+
+ /* disable workqueue here */
+ if (is_idxd_wq_dev(dev)) {
+ struct idxd_wq *wq = confdev_to_wq(dev);
+
+ disable_wq(wq);
+ } else if (is_idxd_dev(dev)) {
+ struct idxd_device *idxd = confdev_to_idxd(dev);
+ int i;
+
+ dev_dbg(dev, "%s removing dev %s\n", __func__,
+ dev_name(&idxd->conf_dev));
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ if (wq->state == IDXD_WQ_DISABLED)
+ continue;
+ dev_warn(dev, "Active wq %d on disable %s.\n", i,
+ dev_name(&idxd->conf_dev));
+ device_release_driver(&wq->conf_dev);
+ }
+
+ idxd_unregister_dma_device(idxd);
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ rc = idxd_device_disable(idxd);
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ module_put(THIS_MODULE);
+ if (rc < 0)
+ dev_warn(dev, "Device disable failed\n");
+ else
+ dev_info(dev, "Device %s disabled\n", dev_name(dev));
+
+ }
+
+ return 0;
+}
+
+static void idxd_config_bus_shutdown(struct device *dev)
+{
+ dev_dbg(dev, "%s called\n", __func__);
+}
+
+struct bus_type dsa_bus_type = {
+ .name = "dsa",
+ .match = idxd_config_bus_match,
+ .probe = idxd_config_bus_probe,
+ .remove = idxd_config_bus_remove,
+ .shutdown = idxd_config_bus_shutdown,
+};
+
+static struct bus_type *idxd_bus_types[] = {
+ &dsa_bus_type
+};
+
+static struct idxd_device_driver dsa_drv = {
+ .drv = {
+ .name = "dsa",
+ .bus = &dsa_bus_type,
+ .owner = THIS_MODULE,
+ .mod_name = KBUILD_MODNAME,
+ },
+};
+
+static struct idxd_device_driver *idxd_drvs[] = {
+ &dsa_drv
+};
+
+struct bus_type *idxd_get_bus_type(struct idxd_device *idxd)
+{
+ return idxd_bus_types[idxd->type];
+}
+
+static struct device_type *idxd_get_device_type(struct idxd_device *idxd)
+{
+ if (idxd->type == IDXD_TYPE_DSA)
+ return &dsa_device_type;
+ else
+ return NULL;
+}
+
+/* IDXD generic driver setup */
+int idxd_register_driver(void)
+{
+ int i, rc;
+
+ for (i = 0; i < IDXD_TYPE_MAX; i++) {
+ rc = driver_register(&idxd_drvs[i]->drv);
+ if (rc < 0)
+ goto drv_fail;
+ }
+
+ return 0;
+
+drv_fail:
+ for (; i > 0; i--)
+ driver_unregister(&idxd_drvs[i]->drv);
+ return rc;
+}
+
+void idxd_unregister_driver(void)
+{
+ int i;
+
+ for (i = 0; i < IDXD_TYPE_MAX; i++)
+ driver_unregister(&idxd_drvs[i]->drv);
+}
+
+/* IDXD engine attributes */
+static ssize_t engine_group_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_engine *engine =
+ container_of(dev, struct idxd_engine, conf_dev);
+
+ if (engine->group)
+ return sprintf(buf, "%d\n", engine->group->id);
+ else
+ return sprintf(buf, "%d\n", -1);
+}
+
+static ssize_t engine_group_id_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_engine *engine =
+ container_of(dev, struct idxd_engine, conf_dev);
+ struct idxd_device *idxd = engine->idxd;
+ long id;
+ int rc;
+ struct idxd_group *prevg, *group;
+
+ rc = kstrtol(buf, 10, &id);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (id > idxd->max_groups - 1 || id < -1)
+ return -EINVAL;
+
+ if (id == -1) {
+ if (engine->group) {
+ engine->group->num_engines--;
+ engine->group = NULL;
+ }
+ return count;
+ }
+
+ group = &idxd->groups[id];
+ prevg = engine->group;
+
+ if (prevg)
+ prevg->num_engines--;
+ engine->group = &idxd->groups[id];
+ engine->group->num_engines++;
+
+ return count;
+}
+
+static struct device_attribute dev_attr_engine_group =
+ __ATTR(group_id, 0644, engine_group_id_show,
+ engine_group_id_store);
+
+static struct attribute *idxd_engine_attributes[] = {
+ &dev_attr_engine_group.attr,
+ NULL,
+};
+
+static const struct attribute_group idxd_engine_attribute_group = {
+ .attrs = idxd_engine_attributes,
+};
+
+static const struct attribute_group *idxd_engine_attribute_groups[] = {
+ &idxd_engine_attribute_group,
+ NULL,
+};
+
+/* Group attributes */
+
+static void idxd_set_free_tokens(struct idxd_device *idxd)
+{
+ int i, tokens;
+
+ for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
+ struct idxd_group *g = &idxd->groups[i];
+
+ tokens += g->tokens_reserved;
+ }
+
+ idxd->nr_tokens = idxd->max_tokens - tokens;
+}
+
+static ssize_t group_tokens_reserved_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+
+ return sprintf(buf, "%u\n", group->tokens_reserved);
+}
+
+static ssize_t group_tokens_reserved_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_device *idxd = group->idxd;
+ unsigned long val;
+ int rc;
+
+ rc = kstrtoul(buf, 10, &val);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (idxd->state == IDXD_DEV_ENABLED)
+ return -EPERM;
+
+ if (idxd->token_limit == 0)
+ return -EPERM;
+
+ if (val > idxd->max_tokens)
+ return -EINVAL;
+
+ if (val > idxd->nr_tokens)
+ return -EINVAL;
+
+ group->tokens_reserved = val;
+ idxd_set_free_tokens(idxd);
+ return count;
+}
+
+static struct device_attribute dev_attr_group_tokens_reserved =
+ __ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
+ group_tokens_reserved_store);
+
+static ssize_t group_tokens_allowed_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+
+ return sprintf(buf, "%u\n", group->tokens_allowed);
+}
+
+static ssize_t group_tokens_allowed_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_device *idxd = group->idxd;
+ unsigned long val;
+ int rc;
+
+ rc = kstrtoul(buf, 10, &val);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (idxd->state == IDXD_DEV_ENABLED)
+ return -EPERM;
+
+ if (idxd->token_limit == 0)
+ return -EPERM;
+ if (val < 4 * group->num_engines ||
+ val > group->tokens_reserved + idxd->nr_tokens)
+ return -EINVAL;
+
+ group->tokens_allowed = val;
+ return count;
+}
+
+static struct device_attribute dev_attr_group_tokens_allowed =
+ __ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
+ group_tokens_allowed_store);
+
+static ssize_t group_use_token_limit_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+
+ return sprintf(buf, "%u\n", group->use_token_limit);
+}
+
+static ssize_t group_use_token_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_device *idxd = group->idxd;
+ unsigned long val;
+ int rc;
+
+ rc = kstrtoul(buf, 10, &val);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (idxd->state == IDXD_DEV_ENABLED)
+ return -EPERM;
+
+ if (idxd->token_limit == 0)
+ return -EPERM;
+
+ group->use_token_limit = !!val;
+ return count;
+}
+
+static struct device_attribute dev_attr_group_use_token_limit =
+ __ATTR(use_token_limit, 0644, group_use_token_limit_show,
+ group_use_token_limit_store);
+
+static ssize_t group_engines_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+ int i, rc = 0;
+ char *tmp = buf;
+ struct idxd_device *idxd = group->idxd;
+
+ for (i = 0; i < idxd->max_engines; i++) {
+ struct idxd_engine *engine = &idxd->engines[i];
+
+ if (!engine->group)
+ continue;
+
+ if (engine->group->id == group->id)
+ rc += sprintf(tmp + rc, "engine%d.%d ",
+ idxd->id, engine->id);
+ }
+
+ rc--;
+ rc += sprintf(tmp + rc, "\n");
+
+ return rc;
+}
+
+static struct device_attribute dev_attr_group_engines =
+ __ATTR(engines, 0444, group_engines_show, NULL);
+
+static ssize_t group_work_queues_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+ int i, rc = 0;
+ char *tmp = buf;
+ struct idxd_device *idxd = group->idxd;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ if (!wq->group)
+ continue;
+
+ if (wq->group->id == group->id)
+ rc += sprintf(tmp + rc, "wq%d.%d ",
+ idxd->id, wq->id);
+ }
+
+ rc--;
+ rc += sprintf(tmp + rc, "\n");
+
+ return rc;
+}
+
+static struct device_attribute dev_attr_group_work_queues =
+ __ATTR(work_queues, 0444, group_work_queues_show, NULL);
+
+static ssize_t group_traffic_class_a_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+
+ return sprintf(buf, "%d\n", group->tc_a);
+}
+
+static ssize_t group_traffic_class_a_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_device *idxd = group->idxd;
+ long val;
+ int rc;
+
+ rc = kstrtol(buf, 10, &val);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (idxd->state == IDXD_DEV_ENABLED)
+ return -EPERM;
+
+ if (val < 0 || val > 7)
+ return -EINVAL;
+
+ group->tc_a = val;
+ return count;
+}
+
+static struct device_attribute dev_attr_group_traffic_class_a =
+ __ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
+ group_traffic_class_a_store);
+
+static ssize_t group_traffic_class_b_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+
+ return sprintf(buf, "%d\n", group->tc_b);
+}
+
+static ssize_t group_traffic_class_b_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_device *idxd = group->idxd;
+ long val;
+ int rc;
+
+ rc = kstrtol(buf, 10, &val);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (idxd->state == IDXD_DEV_ENABLED)
+ return -EPERM;
+
+ if (val < 0 || val > 7)
+ return -EINVAL;
+
+ group->tc_b = val;
+ return count;
+}
+
+static struct device_attribute dev_attr_group_traffic_class_b =
+ __ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
+ group_traffic_class_b_store);
+
+static struct attribute *idxd_group_attributes[] = {
+ &dev_attr_group_work_queues.attr,
+ &dev_attr_group_engines.attr,
+ &dev_attr_group_use_token_limit.attr,
+ &dev_attr_group_tokens_allowed.attr,
+ &dev_attr_group_tokens_reserved.attr,
+ &dev_attr_group_traffic_class_a.attr,
+ &dev_attr_group_traffic_class_b.attr,
+ NULL,
+};
+
+static const struct attribute_group idxd_group_attribute_group = {
+ .attrs = idxd_group_attributes,
+};
+
+static const struct attribute_group *idxd_group_attribute_groups[] = {
+ &idxd_group_attribute_group,
+ NULL,
+};
+
+/* IDXD work queue attribs */
+static ssize_t wq_clients_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ return sprintf(buf, "%d\n", wq->client_count);
+}
+
+static struct device_attribute dev_attr_wq_clients =
+ __ATTR(clients, 0444, wq_clients_show, NULL);
+
+static ssize_t wq_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ switch (wq->state) {
+ case IDXD_WQ_DISABLED:
+ return sprintf(buf, "disabled\n");
+ case IDXD_WQ_ENABLED:
+ return sprintf(buf, "enabled\n");
+ }
+
+ return sprintf(buf, "unknown\n");
+}
+
+static struct device_attribute dev_attr_wq_state =
+ __ATTR(state, 0444, wq_state_show, NULL);
+
+static ssize_t wq_group_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ if (wq->group)
+ return sprintf(buf, "%u\n", wq->group->id);
+ else
+ return sprintf(buf, "-1\n");
+}
+
+static ssize_t wq_group_id_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_device *idxd = wq->idxd;
+ long id;
+ int rc;
+ struct idxd_group *prevg, *group;
+
+ rc = kstrtol(buf, 10, &id);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ if (id > idxd->max_groups - 1 || id < -1)
+ return -EINVAL;
+
+ if (id == -1) {
+ if (wq->group) {
+ wq->group->num_wqs--;
+ wq->group = NULL;
+ }
+ return count;
+ }
+
+ group = &idxd->groups[id];
+ prevg = wq->group;
+
+ if (prevg)
+ prevg->num_wqs--;
+ wq->group = group;
+ group->num_wqs++;
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_group_id =
+ __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
+
+static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ return sprintf(buf, "%s\n",
+ wq_dedicated(wq) ? "dedicated" : "shared");
+}
+
+static ssize_t wq_mode_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_device *idxd = wq->idxd;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ if (sysfs_streq(buf, "dedicated")) {
+ set_bit(WQ_FLAG_DEDICATED, &wq->flags);
+ wq->threshold = 0;
+ } else {
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_mode =
+ __ATTR(mode, 0644, wq_mode_show, wq_mode_store);
+
+static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ return sprintf(buf, "%u\n", wq->size);
+}
+
+static ssize_t wq_size_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ unsigned long size;
+ struct idxd_device *idxd = wq->idxd;
+ int rc;
+
+ rc = kstrtoul(buf, 10, &size);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ if (size > idxd->max_wq_size)
+ return -EINVAL;
+
+ wq->size = size;
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_size =
+ __ATTR(size, 0644, wq_size_show, wq_size_store);
+
+static ssize_t wq_priority_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ return sprintf(buf, "%u\n", wq->priority);
+}
+
+static ssize_t wq_priority_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ unsigned long prio;
+ struct idxd_device *idxd = wq->idxd;
+ int rc;
+
+ rc = kstrtoul(buf, 10, &prio);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ if (prio > IDXD_MAX_PRIORITY)
+ return -EINVAL;
+
+ wq->priority = prio;
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_priority =
+ __ATTR(priority, 0644, wq_priority_show, wq_priority_store);
+
+static ssize_t wq_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ switch (wq->type) {
+ case IDXD_WQT_KERNEL:
+ return sprintf(buf, "%s\n",
+ idxd_wq_type_names[IDXD_WQT_KERNEL]);
+ case IDXD_WQT_USER:
+ return sprintf(buf, "%s\n",
+ idxd_wq_type_names[IDXD_WQT_USER]);
+ case IDXD_WQT_NONE:
+ default:
+ return sprintf(buf, "%s\n",
+ idxd_wq_type_names[IDXD_WQT_NONE]);
+ }
+
+ return -EINVAL;
+}
+
+static ssize_t wq_type_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ enum idxd_wq_type old_type;
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ old_type = wq->type;
+ if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
+ wq->type = IDXD_WQT_KERNEL;
+ else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
+ wq->type = IDXD_WQT_USER;
+ else
+ wq->type = IDXD_WQT_NONE;
+
+ /* If we are changing queue type, clear the name */
+ if (wq->type != old_type)
+ memset(wq->name, 0, WQ_NAME_SIZE + 1);
+
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_type =
+ __ATTR(type, 0644, wq_type_show, wq_type_store);
+
+static ssize_t wq_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ return sprintf(buf, "%s\n", wq->name);
+}
+
+static ssize_t wq_name_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
+ return -EINVAL;
+
+ memset(wq->name, 0, WQ_NAME_SIZE + 1);
+ strncpy(wq->name, buf, WQ_NAME_SIZE);
+ strreplace(wq->name, '\n', '\0');
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_name =
+ __ATTR(name, 0644, wq_name_show, wq_name_store);
+
+static ssize_t wq_cdev_minor_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ return sprintf(buf, "%d\n", wq->idxd_cdev.minor);
+}
+
+static struct device_attribute dev_attr_wq_cdev_minor =
+ __ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
+
+static struct attribute *idxd_wq_attributes[] = {
+ &dev_attr_wq_clients.attr,
+ &dev_attr_wq_state.attr,
+ &dev_attr_wq_group_id.attr,
+ &dev_attr_wq_mode.attr,
+ &dev_attr_wq_size.attr,
+ &dev_attr_wq_priority.attr,
+ &dev_attr_wq_type.attr,
+ &dev_attr_wq_name.attr,
+ &dev_attr_wq_cdev_minor.attr,
+ NULL,
+};
+
+static const struct attribute_group idxd_wq_attribute_group = {
+ .attrs = idxd_wq_attributes,
+};
+
+static const struct attribute_group *idxd_wq_attribute_groups[] = {
+ &idxd_wq_attribute_group,
+ NULL,
+};
+
+/* IDXD device attribs */
+static ssize_t max_work_queues_size_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n", idxd->max_wq_size);
+}
+static DEVICE_ATTR_RO(max_work_queues_size);
+
+static ssize_t max_groups_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n", idxd->max_groups);
+}
+static DEVICE_ATTR_RO(max_groups);
+
+static ssize_t max_work_queues_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n", idxd->max_wqs);
+}
+static DEVICE_ATTR_RO(max_work_queues);
+
+static ssize_t max_engines_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n", idxd->max_engines);
+}
+static DEVICE_ATTR_RO(max_engines);
+
+static ssize_t numa_node_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
+}
+static DEVICE_ATTR_RO(numa_node);
+
+static ssize_t max_batch_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n", idxd->max_batch_size);
+}
+static DEVICE_ATTR_RO(max_batch_size);
+
+static ssize_t max_transfer_size_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%llu\n", idxd->max_xfer_bytes);
+}
+static DEVICE_ATTR_RO(max_transfer_size);
+
+static ssize_t op_cap_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]);
+}
+static DEVICE_ATTR_RO(op_cap);
+
+static ssize_t configurable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n",
+ test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
+}
+static DEVICE_ATTR_RO(configurable);
+
+static ssize_t clients_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+ unsigned long flags;
+ int count = 0, i;
+
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ count += wq->client_count;
+ }
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+
+ return sprintf(buf, "%d\n", count);
+}
+static DEVICE_ATTR_RO(clients);
+
+static ssize_t state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ switch (idxd->state) {
+ case IDXD_DEV_DISABLED:
+ case IDXD_DEV_CONF_READY:
+ return sprintf(buf, "disabled\n");
+ case IDXD_DEV_ENABLED:
+ return sprintf(buf, "enabled\n");
+ case IDXD_DEV_HALTED:
+ return sprintf(buf, "halted\n");
+ }
+
+ return sprintf(buf, "unknown\n");
+}
+static DEVICE_ATTR_RO(state);
+
+static ssize_t errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+ int i, out = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ for (i = 0; i < 4; i++)
+ out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]);
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ out--;
+ out += sprintf(buf + out, "\n");
+ return out;
+}
+static DEVICE_ATTR_RO(errors);
+
+static ssize_t max_tokens_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n", idxd->max_tokens);
+}
+static DEVICE_ATTR_RO(max_tokens);
+
+static ssize_t token_limit_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n", idxd->token_limit);
+}
+
+static ssize_t token_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+ unsigned long val;
+ int rc;
+
+ rc = kstrtoul(buf, 10, &val);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (idxd->state == IDXD_DEV_ENABLED)
+ return -EPERM;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (!idxd->hw.group_cap.token_limit)
+ return -EPERM;
+
+ if (val > idxd->hw.group_cap.total_tokens)
+ return -EINVAL;
+
+ idxd->token_limit = val;
+ return count;
+}
+static DEVICE_ATTR_RW(token_limit);
+
+static ssize_t cdev_major_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n", idxd->major);
+}
+static DEVICE_ATTR_RO(cdev_major);
+
+static struct attribute *idxd_device_attributes[] = {
+ &dev_attr_max_groups.attr,
+ &dev_attr_max_work_queues.attr,
+ &dev_attr_max_work_queues_size.attr,
+ &dev_attr_max_engines.attr,
+ &dev_attr_numa_node.attr,
+ &dev_attr_max_batch_size.attr,
+ &dev_attr_max_transfer_size.attr,
+ &dev_attr_op_cap.attr,
+ &dev_attr_configurable.attr,
+ &dev_attr_clients.attr,
+ &dev_attr_state.attr,
+ &dev_attr_errors.attr,
+ &dev_attr_max_tokens.attr,
+ &dev_attr_token_limit.attr,
+ &dev_attr_cdev_major.attr,
+ NULL,
+};
+
+static const struct attribute_group idxd_device_attribute_group = {
+ .attrs = idxd_device_attributes,
+};
+
+static const struct attribute_group *idxd_attribute_groups[] = {
+ &idxd_device_attribute_group,
+ NULL,
+};
+
+static int idxd_setup_engine_sysfs(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int i, rc;
+
+ for (i = 0; i < idxd->max_engines; i++) {
+ struct idxd_engine *engine = &idxd->engines[i];
+
+ engine->conf_dev.parent = &idxd->conf_dev;
+ dev_set_name(&engine->conf_dev, "engine%d.%d",
+ idxd->id, engine->id);
+ engine->conf_dev.bus = idxd_get_bus_type(idxd);
+ engine->conf_dev.groups = idxd_engine_attribute_groups;
+ engine->conf_dev.type = &idxd_engine_device_type;
+ dev_dbg(dev, "Engine device register: %s\n",
+ dev_name(&engine->conf_dev));
+ rc = device_register(&engine->conf_dev);
+ if (rc < 0) {
+ put_device(&engine->conf_dev);
+ goto cleanup;
+ }
+ }
+
+ return 0;
+
+cleanup:
+ while (i--) {
+ struct idxd_engine *engine = &idxd->engines[i];
+
+ device_unregister(&engine->conf_dev);
+ }
+ return rc;
+}
+
+static int idxd_setup_group_sysfs(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int i, rc;
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ struct idxd_group *group = &idxd->groups[i];
+
+ group->conf_dev.parent = &idxd->conf_dev;
+ dev_set_name(&group->conf_dev, "group%d.%d",
+ idxd->id, group->id);
+ group->conf_dev.bus = idxd_get_bus_type(idxd);
+ group->conf_dev.groups = idxd_group_attribute_groups;
+ group->conf_dev.type = &idxd_group_device_type;
+ dev_dbg(dev, "Group device register: %s\n",
+ dev_name(&group->conf_dev));
+ rc = device_register(&group->conf_dev);
+ if (rc < 0) {
+ put_device(&group->conf_dev);
+ goto cleanup;
+ }
+ }
+
+ return 0;
+
+cleanup:
+ while (i--) {
+ struct idxd_group *group = &idxd->groups[i];
+
+ device_unregister(&group->conf_dev);
+ }
+ return rc;
+}
+
+static int idxd_setup_wq_sysfs(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int i, rc;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ wq->conf_dev.parent = &idxd->conf_dev;
+ dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
+ wq->conf_dev.bus = idxd_get_bus_type(idxd);
+ wq->conf_dev.groups = idxd_wq_attribute_groups;
+ wq->conf_dev.type = &idxd_wq_device_type;
+ dev_dbg(dev, "WQ device register: %s\n",
+ dev_name(&wq->conf_dev));
+ rc = device_register(&wq->conf_dev);
+ if (rc < 0) {
+ put_device(&wq->conf_dev);
+ goto cleanup;
+ }
+ }
+
+ return 0;
+
+cleanup:
+ while (i--) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ device_unregister(&wq->conf_dev);
+ }
+ return rc;
+}
+
+static int idxd_setup_device_sysfs(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int rc;
+ char devname[IDXD_NAME_SIZE];
+
+ sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id);
+ idxd->conf_dev.parent = dev;
+ dev_set_name(&idxd->conf_dev, "%s", devname);
+ idxd->conf_dev.bus = idxd_get_bus_type(idxd);
+ idxd->conf_dev.groups = idxd_attribute_groups;
+ idxd->conf_dev.type = idxd_get_device_type(idxd);
+
+ dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev));
+ rc = device_register(&idxd->conf_dev);
+ if (rc < 0) {
+ put_device(&idxd->conf_dev);
+ return rc;
+ }
+
+ return 0;
+}
+
+int idxd_setup_sysfs(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int rc;
+
+ rc = idxd_setup_device_sysfs(idxd);
+ if (rc < 0) {
+ dev_dbg(dev, "Device sysfs registering failed: %d\n", rc);
+ return rc;
+ }
+
+ rc = idxd_setup_wq_sysfs(idxd);
+ if (rc < 0) {
+ /* unregister conf dev */
+ dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc);
+ return rc;
+ }
+
+ rc = idxd_setup_group_sysfs(idxd);
+ if (rc < 0) {
+ /* unregister conf dev */
+ dev_dbg(dev, "Group sysfs registering failed: %d\n", rc);
+ return rc;
+ }
+
+ rc = idxd_setup_engine_sysfs(idxd);
+ if (rc < 0) {
+ /* unregister conf dev */
+ dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+void idxd_cleanup_sysfs(struct idxd_device *idxd)
+{
+ int i;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ device_unregister(&wq->conf_dev);
+ }
+
+ for (i = 0; i < idxd->max_engines; i++) {
+ struct idxd_engine *engine = &idxd->engines[i];
+
+ device_unregister(&engine->conf_dev);
+ }
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ struct idxd_group *group = &idxd->groups[i];
+
+ device_unregister(&group->conf_dev);
+ }
+
+ device_unregister(&idxd->conf_dev);
+}
+
+int idxd_register_bus_type(void)
+{
+ int i, rc;
+
+ for (i = 0; i < IDXD_TYPE_MAX; i++) {
+ rc = bus_register(idxd_bus_types[i]);
+ if (rc < 0)
+ goto bus_err;
+ }
+
+ return 0;
+
+bus_err:
+ for (; i > 0; i--)
+ bus_unregister(idxd_bus_types[i]);
+ return rc;
+}
+
+void idxd_unregister_bus_type(void)
+{
+ int i;
+
+ for (i = 0; i < IDXD_TYPE_MAX; i++)
+ bus_unregister(idxd_bus_types[i]);
+}
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index c27e206a764c..066b21a32232 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -760,12 +760,8 @@ static void sdma_start_desc(struct sdma_channel *sdmac)
return;
}
sdmac->desc = desc = to_sdma_desc(&vd->tx);
- /*
- * Do not delete the node in desc_issued list in cyclic mode, otherwise
- * the desc allocated will never be freed in vchan_dma_desc_free_list
- */
- if (!(sdmac->flags & IMX_DMA_SG_LOOP))
- list_del(&vd->node);
+
+ list_del(&vd->node);
sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
@@ -1071,20 +1067,27 @@ static void sdma_channel_terminate_work(struct work_struct *work)
spin_lock_irqsave(&sdmac->vc.lock, flags);
vchan_get_all_descriptors(&sdmac->vc, &head);
- sdmac->desc = NULL;
spin_unlock_irqrestore(&sdmac->vc.lock, flags);
vchan_dma_desc_free_list(&sdmac->vc, &head);
sdmac->context_loaded = false;
}
-static int sdma_disable_channel_async(struct dma_chan *chan)
+static int sdma_terminate_all(struct dma_chan *chan)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&sdmac->vc.lock, flags);
sdma_disable_channel(chan);
- if (sdmac->desc)
+ if (sdmac->desc) {
+ vchan_terminate_vdesc(&sdmac->desc->vd);
+ sdmac->desc = NULL;
schedule_work(&sdmac->terminate_worker);
+ }
+
+ spin_unlock_irqrestore(&sdmac->vc.lock, flags);
return 0;
}
@@ -1324,7 +1327,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
- sdma_disable_channel_async(chan);
+ sdma_terminate_all(chan);
sdma_channel_synchronize(chan);
@@ -1648,7 +1651,7 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
struct dma_tx_state *txstate)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
- struct sdma_desc *desc;
+ struct sdma_desc *desc = NULL;
u32 residue;
struct virt_dma_desc *vd;
enum dma_status ret;
@@ -1659,19 +1662,23 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
return ret;
spin_lock_irqsave(&sdmac->vc.lock, flags);
+
vd = vchan_find_desc(&sdmac->vc, cookie);
- if (vd) {
+ if (vd)
desc = to_sdma_desc(&vd->tx);
+ else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie)
+ desc = sdmac->desc;
+
+ if (desc) {
if (sdmac->flags & IMX_DMA_SG_LOOP)
residue = (desc->num_bd - desc->buf_ptail) *
desc->period_len - desc->chn_real_count;
else
residue = desc->chn_count - desc->chn_real_count;
- } else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie) {
- residue = sdmac->desc->chn_count - sdmac->desc->chn_real_count;
} else {
residue = 0;
}
+
spin_unlock_irqrestore(&sdmac->vc.lock, flags);
dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
@@ -2103,7 +2110,7 @@ static int sdma_probe(struct platform_device *pdev)
sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
sdma->dma_device.device_config = sdma_config;
- sdma->dma_device.device_terminate_all = sdma_disable_channel_async;
+ sdma->dma_device.device_terminate_all = sdma_terminate_all;
sdma->dma_device.device_synchronize = sdma_channel_synchronize;
sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;
sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 1a422a8b43cf..18c011e57592 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -377,10 +377,11 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
SZ_2M, &descs->hw, flags);
- if (!descs->virt && (i > 0)) {
+ if (!descs->virt) {
int idx;
for (idx = 0; idx < i; idx++) {
+ descs = &ioat_chan->descs[idx];
dma_free_coherent(to_dev(ioat_chan), SZ_2M,
descs->virt, descs->hw);
descs->virt = NULL;
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index a6a6dc432db8..60e9afbb896c 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -556,10 +556,6 @@ static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
ioat_kobject_del(ioat_dma);
dma_async_device_unregister(dma);
-
- dma_pool_destroy(ioat_dma->completion_pool);
-
- INIT_LIST_HEAD(&dma->channels);
}
/**
@@ -589,7 +585,7 @@ static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
for (i = 0; i < dma->chancnt; i++) {
- ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL);
+ ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
if (!ioat_chan)
break;
@@ -624,12 +620,16 @@ static void ioat_free_chan_resources(struct dma_chan *c)
return;
ioat_stop(ioat_chan);
- ioat_reset_hw(ioat_chan);
- /* Put LTR to idle */
- if (ioat_dma->version >= IOAT_VER_3_4)
- writeb(IOAT_CHAN_LTR_SWSEL_IDLE,
- ioat_chan->reg_base + IOAT_CHAN_LTR_SWSEL_OFFSET);
+ if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) {
+ ioat_reset_hw(ioat_chan);
+
+ /* Put LTR to idle */
+ if (ioat_dma->version >= IOAT_VER_3_4)
+ writeb(IOAT_CHAN_LTR_SWSEL_IDLE,
+ ioat_chan->reg_base +
+ IOAT_CHAN_LTR_SWSEL_OFFSET);
+ }
spin_lock_bh(&ioat_chan->cleanup_lock);
spin_lock_bh(&ioat_chan->prep_lock);
@@ -1322,16 +1322,28 @@ static struct pci_driver ioat_pci_driver = {
.err_handler = &ioat_err_handler,
};
+static void release_ioatdma(struct dma_device *device)
+{
+ struct ioatdma_device *d = to_ioatdma_device(device);
+ int i;
+
+ for (i = 0; i < IOAT_MAX_CHANS; i++)
+ kfree(d->idx[i]);
+
+ dma_pool_destroy(d->completion_pool);
+ kfree(d);
+}
+
static struct ioatdma_device *
alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase)
{
- struct device *dev = &pdev->dev;
- struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
+ struct ioatdma_device *d = kzalloc(sizeof(*d), GFP_KERNEL);
if (!d)
return NULL;
d->pdev = pdev;
d->reg_base = iobase;
+ d->dma_dev.device_release = release_ioatdma;
return d;
}
@@ -1400,6 +1412,8 @@ static void ioat_remove(struct pci_dev *pdev)
if (!device)
return;
+ ioat_shutdown(pdev);
+
dev_err(&pdev->dev, "Removing dma and dca services\n");
if (device->dca) {
unregister_dca_provider(device->dca, &pdev->dev);
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index adecea51814f..c5c1aa0dcaed 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -229,9 +229,11 @@ static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
c = p->vchan;
if (c && (tc1 & BIT(i))) {
spin_lock_irqsave(&c->vc.lock, flags);
- vchan_cookie_complete(&p->ds_run->vd);
- p->ds_done = p->ds_run;
- p->ds_run = NULL;
+ if (p->ds_run != NULL) {
+ vchan_cookie_complete(&p->ds_run->vd);
+ p->ds_done = p->ds_run;
+ p->ds_run = NULL;
+ }
spin_unlock_irqrestore(&c->vc.lock, flags);
}
if (c && (tc2 & BIT(i))) {
@@ -271,6 +273,10 @@ static int k3_dma_start_txd(struct k3_dma_chan *c)
if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d))
return -EAGAIN;
+ /* Avoid losing track of ds_run if a transaction is in flight */
+ if (c->phy->ds_run)
+ return -EAGAIN;
+
if (vd) {
struct k3_dma_desc_sw *ds =
container_of(vd, struct k3_dma_desc_sw, vd);
diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
index c20e6bd4e298..29f1223b285a 100644
--- a/drivers/dma/mediatek/mtk-uart-apdma.c
+++ b/drivers/dma/mediatek/mtk-uart-apdma.c
@@ -430,9 +430,10 @@ static int mtk_uart_apdma_terminate_all(struct dma_chan *chan)
spin_lock_irqsave(&c->vc.lock, flags);
vchan_get_all_descriptors(&c->vc, &head);
- vchan_dma_desc_free_list(&c->vc, &head);
spin_unlock_irqrestore(&c->vc.lock, flags);
+ vchan_dma_desc_free_list(&c->vc, &head);
+
return 0;
}
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index e3850f04f676..157c959311ea 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -750,7 +750,7 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
}
xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) {
+ if (PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) {
ret = EPROBE_DEFER;
goto disable_reg_clk;
}
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index c2d779daa4b5..b2c2b5e8093c 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -15,6 +15,8 @@
#include <linux/of.h>
#include <linux/of_dma.h>
+#include "dmaengine.h"
+
static LIST_HEAD(of_dma_list);
static DEFINE_MUTEX(of_dma_lock);
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index 023f951189a7..c683051257fd 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -674,10 +674,11 @@ static int owl_dma_terminate_all(struct dma_chan *chan)
}
vchan_get_all_descriptors(&vchan->vc, &head);
- vchan_dma_desc_free_list(&vchan->vc, &head);
spin_unlock_irqrestore(&vchan->vc.lock, flags);
+ vchan_dma_desc_free_list(&vchan->vc, &head);
+
return 0;
}
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 6cce9ef61b29..88b884cbb7c1 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2961,12 +2961,7 @@ static int __maybe_unused pl330_suspend(struct device *dev)
{
struct amba_device *pcdev = to_amba_device(dev);
- pm_runtime_disable(dev);
-
- if (!pm_runtime_status_suspended(dev)) {
- /* amba did not disable the clock */
- amba_pclk_disable(pcdev);
- }
+ pm_runtime_force_suspend(dev);
amba_pclk_unprepare(pcdev);
return 0;
@@ -2981,15 +2976,14 @@ static int __maybe_unused pl330_resume(struct device *dev)
if (ret)
return ret;
- if (!pm_runtime_status_suspended(dev))
- ret = amba_pclk_enable(pcdev);
-
- pm_runtime_enable(dev);
+ pm_runtime_force_resume(dev);
return ret;
}
-static SIMPLE_DEV_PM_OPS(pl330_pm, pl330_suspend, pl330_resume);
+static const struct dev_pm_ops pl330_pm = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pl330_suspend, pl330_resume)
+};
static int
pl330_probe(struct amba_device *adev, const struct amba_id *id)
diff --git a/drivers/dma/plx_dma.c b/drivers/dma/plx_dma.c
new file mode 100644
index 000000000000..db4c5fd453a9
--- /dev/null
+++ b/drivers/dma/plx_dma.c
@@ -0,0 +1,639 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microsemi Switchtec(tm) PCIe Management Driver
+ * Copyright (c) 2019, Logan Gunthorpe <logang@deltatee.com>
+ * Copyright (c) 2019, GigaIO Networks, Inc
+ */
+
+#include "dmaengine.h"
+
+#include <linux/circ_buf.h>
+#include <linux/dmaengine.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+MODULE_DESCRIPTION("PLX ExpressLane PEX PCI Switch DMA Engine");
+MODULE_VERSION("0.1");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Logan Gunthorpe");
+
+#define PLX_REG_DESC_RING_ADDR 0x214
+#define PLX_REG_DESC_RING_ADDR_HI 0x218
+#define PLX_REG_DESC_RING_NEXT_ADDR 0x21C
+#define PLX_REG_DESC_RING_COUNT 0x220
+#define PLX_REG_DESC_RING_LAST_ADDR 0x224
+#define PLX_REG_DESC_RING_LAST_SIZE 0x228
+#define PLX_REG_PREF_LIMIT 0x234
+#define PLX_REG_CTRL 0x238
+#define PLX_REG_CTRL2 0x23A
+#define PLX_REG_INTR_CTRL 0x23C
+#define PLX_REG_INTR_STATUS 0x23E
+
+#define PLX_REG_PREF_LIMIT_PREF_FOUR 8
+
+#define PLX_REG_CTRL_GRACEFUL_PAUSE BIT(0)
+#define PLX_REG_CTRL_ABORT BIT(1)
+#define PLX_REG_CTRL_WRITE_BACK_EN BIT(2)
+#define PLX_REG_CTRL_START BIT(3)
+#define PLX_REG_CTRL_RING_STOP_MODE BIT(4)
+#define PLX_REG_CTRL_DESC_MODE_BLOCK (0 << 5)
+#define PLX_REG_CTRL_DESC_MODE_ON_CHIP (1 << 5)
+#define PLX_REG_CTRL_DESC_MODE_OFF_CHIP (2 << 5)
+#define PLX_REG_CTRL_DESC_INVALID BIT(8)
+#define PLX_REG_CTRL_GRACEFUL_PAUSE_DONE BIT(9)
+#define PLX_REG_CTRL_ABORT_DONE BIT(10)
+#define PLX_REG_CTRL_IMM_PAUSE_DONE BIT(12)
+#define PLX_REG_CTRL_IN_PROGRESS BIT(30)
+
+#define PLX_REG_CTRL_RESET_VAL (PLX_REG_CTRL_DESC_INVALID | \
+ PLX_REG_CTRL_GRACEFUL_PAUSE_DONE | \
+ PLX_REG_CTRL_ABORT_DONE | \
+ PLX_REG_CTRL_IMM_PAUSE_DONE)
+
+#define PLX_REG_CTRL_START_VAL (PLX_REG_CTRL_WRITE_BACK_EN | \
+ PLX_REG_CTRL_DESC_MODE_OFF_CHIP | \
+ PLX_REG_CTRL_START | \
+ PLX_REG_CTRL_RESET_VAL)
+
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_64B 0
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_128B 1
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_256B 2
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_512B 3
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_1KB 4
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_2KB 5
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_4B 7
+
+#define PLX_REG_INTR_CRTL_ERROR_EN BIT(0)
+#define PLX_REG_INTR_CRTL_INV_DESC_EN BIT(1)
+#define PLX_REG_INTR_CRTL_ABORT_DONE_EN BIT(3)
+#define PLX_REG_INTR_CRTL_PAUSE_DONE_EN BIT(4)
+#define PLX_REG_INTR_CRTL_IMM_PAUSE_DONE_EN BIT(5)
+
+#define PLX_REG_INTR_STATUS_ERROR BIT(0)
+#define PLX_REG_INTR_STATUS_INV_DESC BIT(1)
+#define PLX_REG_INTR_STATUS_DESC_DONE BIT(2)
+#define PLX_REG_INTR_CRTL_ABORT_DONE BIT(3)
+
+struct plx_dma_hw_std_desc {
+ __le32 flags_and_size;
+ __le16 dst_addr_hi;
+ __le16 src_addr_hi;
+ __le32 dst_addr_lo;
+ __le32 src_addr_lo;
+};
+
+#define PLX_DESC_SIZE_MASK 0x7ffffff
+#define PLX_DESC_FLAG_VALID BIT(31)
+#define PLX_DESC_FLAG_INT_WHEN_DONE BIT(30)
+
+#define PLX_DESC_WB_SUCCESS BIT(30)
+#define PLX_DESC_WB_RD_FAIL BIT(29)
+#define PLX_DESC_WB_WR_FAIL BIT(28)
+
+#define PLX_DMA_RING_COUNT 2048
+
+struct plx_dma_desc {
+ struct dma_async_tx_descriptor txd;
+ struct plx_dma_hw_std_desc *hw;
+ u32 orig_size;
+};
+
+struct plx_dma_dev {
+ struct dma_device dma_dev;
+ struct dma_chan dma_chan;
+ struct pci_dev __rcu *pdev;
+ void __iomem *bar;
+ struct tasklet_struct desc_task;
+
+ spinlock_t ring_lock;
+ bool ring_active;
+ int head;
+ int tail;
+ struct plx_dma_hw_std_desc *hw_ring;
+ dma_addr_t hw_ring_dma;
+ struct plx_dma_desc **desc_ring;
+};
+
+static struct plx_dma_dev *chan_to_plx_dma_dev(struct dma_chan *c)
+{
+ return container_of(c, struct plx_dma_dev, dma_chan);
+}
+
+static struct plx_dma_desc *to_plx_desc(struct dma_async_tx_descriptor *txd)
+{
+ return container_of(txd, struct plx_dma_desc, txd);
+}
+
+static struct plx_dma_desc *plx_dma_get_desc(struct plx_dma_dev *plxdev, int i)
+{
+ return plxdev->desc_ring[i & (PLX_DMA_RING_COUNT - 1)];
+}
+
+static void plx_dma_process_desc(struct plx_dma_dev *plxdev)
+{
+ struct dmaengine_result res;
+ struct plx_dma_desc *desc;
+ u32 flags;
+
+ spin_lock_bh(&plxdev->ring_lock);
+
+ while (plxdev->tail != plxdev->head) {
+ desc = plx_dma_get_desc(plxdev, plxdev->tail);
+
+ flags = le32_to_cpu(READ_ONCE(desc->hw->flags_and_size));
+
+ if (flags & PLX_DESC_FLAG_VALID)
+ break;
+
+ res.residue = desc->orig_size - (flags & PLX_DESC_SIZE_MASK);
+
+ if (flags & PLX_DESC_WB_SUCCESS)
+ res.result = DMA_TRANS_NOERROR;
+ else if (flags & PLX_DESC_WB_WR_FAIL)
+ res.result = DMA_TRANS_WRITE_FAILED;
+ else
+ res.result = DMA_TRANS_READ_FAILED;
+
+ dma_cookie_complete(&desc->txd);
+ dma_descriptor_unmap(&desc->txd);
+ dmaengine_desc_get_callback_invoke(&desc->txd, &res);
+ desc->txd.callback = NULL;
+ desc->txd.callback_result = NULL;
+
+ plxdev->tail++;
+ }
+
+ spin_unlock_bh(&plxdev->ring_lock);
+}
+
+static void plx_dma_abort_desc(struct plx_dma_dev *plxdev)
+{
+ struct dmaengine_result res;
+ struct plx_dma_desc *desc;
+
+ plx_dma_process_desc(plxdev);
+
+ spin_lock_bh(&plxdev->ring_lock);
+
+ while (plxdev->tail != plxdev->head) {
+ desc = plx_dma_get_desc(plxdev, plxdev->tail);
+
+ res.residue = desc->orig_size;
+ res.result = DMA_TRANS_ABORTED;
+
+ dma_cookie_complete(&desc->txd);
+ dma_descriptor_unmap(&desc->txd);
+ dmaengine_desc_get_callback_invoke(&desc->txd, &res);
+ desc->txd.callback = NULL;
+ desc->txd.callback_result = NULL;
+
+ plxdev->tail++;
+ }
+
+ spin_unlock_bh(&plxdev->ring_lock);
+}
+
+static void __plx_dma_stop(struct plx_dma_dev *plxdev)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ u32 val;
+
+ val = readl(plxdev->bar + PLX_REG_CTRL);
+ if (!(val & ~PLX_REG_CTRL_GRACEFUL_PAUSE))
+ return;
+
+ writel(PLX_REG_CTRL_RESET_VAL | PLX_REG_CTRL_GRACEFUL_PAUSE,
+ plxdev->bar + PLX_REG_CTRL);
+
+ while (!time_after(jiffies, timeout)) {
+ val = readl(plxdev->bar + PLX_REG_CTRL);
+ if (val & PLX_REG_CTRL_GRACEFUL_PAUSE_DONE)
+ break;
+
+ cpu_relax();
+ }
+
+ if (!(val & PLX_REG_CTRL_GRACEFUL_PAUSE_DONE))
+ dev_err(plxdev->dma_dev.dev,
+ "Timeout waiting for graceful pause!\n");
+
+ writel(PLX_REG_CTRL_RESET_VAL | PLX_REG_CTRL_GRACEFUL_PAUSE,
+ plxdev->bar + PLX_REG_CTRL);
+
+ writel(0, plxdev->bar + PLX_REG_DESC_RING_COUNT);
+ writel(0, plxdev->bar + PLX_REG_DESC_RING_ADDR);
+ writel(0, plxdev->bar + PLX_REG_DESC_RING_ADDR_HI);
+ writel(0, plxdev->bar + PLX_REG_DESC_RING_NEXT_ADDR);
+}
+
+static void plx_dma_stop(struct plx_dma_dev *plxdev)
+{
+ rcu_read_lock();
+ if (!rcu_dereference(plxdev->pdev)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ __plx_dma_stop(plxdev);
+
+ rcu_read_unlock();
+}
+
+static void plx_dma_desc_task(unsigned long data)
+{
+ struct plx_dma_dev *plxdev = (void *)data;
+
+ plx_dma_process_desc(plxdev);
+}
+
+static struct dma_async_tx_descriptor *plx_dma_prep_memcpy(struct dma_chan *c,
+ dma_addr_t dma_dst, dma_addr_t dma_src, size_t len,
+ unsigned long flags)
+ __acquires(plxdev->ring_lock)
+{
+ struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(c);
+ struct plx_dma_desc *plxdesc;
+
+ spin_lock_bh(&plxdev->ring_lock);
+ if (!plxdev->ring_active)
+ goto err_unlock;
+
+ if (!CIRC_SPACE(plxdev->head, plxdev->tail, PLX_DMA_RING_COUNT))
+ goto err_unlock;
+
+ if (len > PLX_DESC_SIZE_MASK)
+ goto err_unlock;
+
+ plxdesc = plx_dma_get_desc(plxdev, plxdev->head);
+ plxdev->head++;
+
+ plxdesc->hw->dst_addr_lo = cpu_to_le32(lower_32_bits(dma_dst));
+ plxdesc->hw->dst_addr_hi = cpu_to_le16(upper_32_bits(dma_dst));
+ plxdesc->hw->src_addr_lo = cpu_to_le32(lower_32_bits(dma_src));
+ plxdesc->hw->src_addr_hi = cpu_to_le16(upper_32_bits(dma_src));
+
+ plxdesc->orig_size = len;
+
+ if (flags & DMA_PREP_INTERRUPT)
+ len |= PLX_DESC_FLAG_INT_WHEN_DONE;
+
+ plxdesc->hw->flags_and_size = cpu_to_le32(len);
+ plxdesc->txd.flags = flags;
+
+ /* return with the lock held, it will be released in tx_submit */
+
+ return &plxdesc->txd;
+
+err_unlock:
+ /*
+ * Keep sparse happy by restoring an even lock count on
+ * this lock.
+ */
+ __acquire(plxdev->ring_lock);
+
+ spin_unlock_bh(&plxdev->ring_lock);
+ return NULL;
+}
+
+static dma_cookie_t plx_dma_tx_submit(struct dma_async_tx_descriptor *desc)
+ __releases(plxdev->ring_lock)
+{
+ struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(desc->chan);
+ struct plx_dma_desc *plxdesc = to_plx_desc(desc);
+ dma_cookie_t cookie;
+
+ cookie = dma_cookie_assign(desc);
+
+ /*
+ * Ensure the descriptor updates are visible to the dma device
+ * before setting the valid bit.
+ */
+ wmb();
+
+ plxdesc->hw->flags_and_size |= cpu_to_le32(PLX_DESC_FLAG_VALID);
+
+ spin_unlock_bh(&plxdev->ring_lock);
+
+ return cookie;
+}
+
+static enum dma_status plx_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+ struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
+ enum dma_status ret;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_COMPLETE)
+ return ret;
+
+ plx_dma_process_desc(plxdev);
+
+ return dma_cookie_status(chan, cookie, txstate);
+}
+
+static void plx_dma_issue_pending(struct dma_chan *chan)
+{
+ struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
+
+ rcu_read_lock();
+ if (!rcu_dereference(plxdev->pdev)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ /*
+ * Ensure the valid bits are visible before starting the
+ * DMA engine.
+ */
+ wmb();
+
+ writew(PLX_REG_CTRL_START_VAL, plxdev->bar + PLX_REG_CTRL);
+
+ rcu_read_unlock();
+}
+
+static irqreturn_t plx_dma_isr(int irq, void *devid)
+{
+ struct plx_dma_dev *plxdev = devid;
+ u32 status;
+
+ status = readw(plxdev->bar + PLX_REG_INTR_STATUS);
+
+ if (!status)
+ return IRQ_NONE;
+
+ if (status & PLX_REG_INTR_STATUS_DESC_DONE && plxdev->ring_active)
+ tasklet_schedule(&plxdev->desc_task);
+
+ writew(status, plxdev->bar + PLX_REG_INTR_STATUS);
+
+ return IRQ_HANDLED;
+}
+
+static int plx_dma_alloc_desc(struct plx_dma_dev *plxdev)
+{
+ struct plx_dma_desc *desc;
+ int i;
+
+ plxdev->desc_ring = kcalloc(PLX_DMA_RING_COUNT,
+ sizeof(*plxdev->desc_ring), GFP_KERNEL);
+ if (!plxdev->desc_ring)
+ return -ENOMEM;
+
+ for (i = 0; i < PLX_DMA_RING_COUNT; i++) {
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ goto free_and_exit;
+
+ dma_async_tx_descriptor_init(&desc->txd, &plxdev->dma_chan);
+ desc->txd.tx_submit = plx_dma_tx_submit;
+ desc->hw = &plxdev->hw_ring[i];
+
+ plxdev->desc_ring[i] = desc;
+ }
+
+ return 0;
+
+free_and_exit:
+ for (i = 0; i < PLX_DMA_RING_COUNT; i++)
+ kfree(plxdev->desc_ring[i]);
+ kfree(plxdev->desc_ring);
+ return -ENOMEM;
+}
+
+static int plx_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
+ size_t ring_sz = PLX_DMA_RING_COUNT * sizeof(*plxdev->hw_ring);
+ int rc;
+
+ plxdev->head = plxdev->tail = 0;
+ plxdev->hw_ring = dma_alloc_coherent(plxdev->dma_dev.dev, ring_sz,
+ &plxdev->hw_ring_dma, GFP_KERNEL);
+ if (!plxdev->hw_ring)
+ return -ENOMEM;
+
+ rc = plx_dma_alloc_desc(plxdev);
+ if (rc)
+ goto out_free_hw_ring;
+
+ rcu_read_lock();
+ if (!rcu_dereference(plxdev->pdev)) {
+ rcu_read_unlock();
+ rc = -ENODEV;
+ goto out_free_hw_ring;
+ }
+
+ writel(PLX_REG_CTRL_RESET_VAL, plxdev->bar + PLX_REG_CTRL);
+ writel(lower_32_bits(plxdev->hw_ring_dma),
+ plxdev->bar + PLX_REG_DESC_RING_ADDR);
+ writel(upper_32_bits(plxdev->hw_ring_dma),
+ plxdev->bar + PLX_REG_DESC_RING_ADDR_HI);
+ writel(lower_32_bits(plxdev->hw_ring_dma),
+ plxdev->bar + PLX_REG_DESC_RING_NEXT_ADDR);
+ writel(PLX_DMA_RING_COUNT, plxdev->bar + PLX_REG_DESC_RING_COUNT);
+ writel(PLX_REG_PREF_LIMIT_PREF_FOUR, plxdev->bar + PLX_REG_PREF_LIMIT);
+
+ plxdev->ring_active = true;
+
+ rcu_read_unlock();
+
+ return PLX_DMA_RING_COUNT;
+
+out_free_hw_ring:
+ dma_free_coherent(plxdev->dma_dev.dev, ring_sz, plxdev->hw_ring,
+ plxdev->hw_ring_dma);
+ return rc;
+}
+
+static void plx_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
+ size_t ring_sz = PLX_DMA_RING_COUNT * sizeof(*plxdev->hw_ring);
+ struct pci_dev *pdev;
+ int irq = -1;
+ int i;
+
+ spin_lock_bh(&plxdev->ring_lock);
+ plxdev->ring_active = false;
+ spin_unlock_bh(&plxdev->ring_lock);
+
+ plx_dma_stop(plxdev);
+
+ rcu_read_lock();
+ pdev = rcu_dereference(plxdev->pdev);
+ if (pdev)
+ irq = pci_irq_vector(pdev, 0);
+ rcu_read_unlock();
+
+ if (irq > 0)
+ synchronize_irq(irq);
+
+ tasklet_kill(&plxdev->desc_task);
+
+ plx_dma_abort_desc(plxdev);
+
+ for (i = 0; i < PLX_DMA_RING_COUNT; i++)
+ kfree(plxdev->desc_ring[i]);
+
+ kfree(plxdev->desc_ring);
+ dma_free_coherent(plxdev->dma_dev.dev, ring_sz, plxdev->hw_ring,
+ plxdev->hw_ring_dma);
+
+}
+
+static void plx_dma_release(struct dma_device *dma_dev)
+{
+ struct plx_dma_dev *plxdev =
+ container_of(dma_dev, struct plx_dma_dev, dma_dev);
+
+ put_device(dma_dev->dev);
+ kfree(plxdev);
+}
+
+static int plx_dma_create(struct pci_dev *pdev)
+{
+ struct plx_dma_dev *plxdev;
+ struct dma_device *dma;
+ struct dma_chan *chan;
+ int rc;
+
+ plxdev = kzalloc(sizeof(*plxdev), GFP_KERNEL);
+ if (!plxdev)
+ return -ENOMEM;
+
+ rc = request_irq(pci_irq_vector(pdev, 0), plx_dma_isr, 0,
+ KBUILD_MODNAME, plxdev);
+ if (rc) {
+ kfree(plxdev);
+ return rc;
+ }
+
+ spin_lock_init(&plxdev->ring_lock);
+ tasklet_init(&plxdev->desc_task, plx_dma_desc_task,
+ (unsigned long)plxdev);
+
+ RCU_INIT_POINTER(plxdev->pdev, pdev);
+ plxdev->bar = pcim_iomap_table(pdev)[0];
+
+ dma = &plxdev->dma_dev;
+ dma->chancnt = 1;
+ INIT_LIST_HEAD(&dma->channels);
+ dma_cap_set(DMA_MEMCPY, dma->cap_mask);
+ dma->copy_align = DMAENGINE_ALIGN_1_BYTE;
+ dma->dev = get_device(&pdev->dev);
+
+ dma->device_alloc_chan_resources = plx_dma_alloc_chan_resources;
+ dma->device_free_chan_resources = plx_dma_free_chan_resources;
+ dma->device_prep_dma_memcpy = plx_dma_prep_memcpy;
+ dma->device_issue_pending = plx_dma_issue_pending;
+ dma->device_tx_status = plx_dma_tx_status;
+ dma->device_release = plx_dma_release;
+
+ chan = &plxdev->dma_chan;
+ chan->device = dma;
+ dma_cookie_init(chan);
+ list_add_tail(&chan->device_node, &dma->channels);
+
+ rc = dma_async_device_register(dma);
+ if (rc) {
+ pci_err(pdev, "Failed to register dma device: %d\n", rc);
+ free_irq(pci_irq_vector(pdev, 0), plxdev);
+ kfree(plxdev);
+ return rc;
+ }
+
+ pci_set_drvdata(pdev, plxdev);
+
+ return 0;
+}
+
+static int plx_dma_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int rc;
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (rc)
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc)
+ return rc;
+
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (rc)
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc)
+ return rc;
+
+ rc = pcim_iomap_regions(pdev, 1, KBUILD_MODNAME);
+ if (rc)
+ return rc;
+
+ rc = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (rc <= 0)
+ return rc;
+
+ pci_set_master(pdev);
+
+ rc = plx_dma_create(pdev);
+ if (rc)
+ goto err_free_irq_vectors;
+
+ pci_info(pdev, "PLX DMA Channel Registered\n");
+
+ return 0;
+
+err_free_irq_vectors:
+ pci_free_irq_vectors(pdev);
+ return rc;
+}
+
+static void plx_dma_remove(struct pci_dev *pdev)
+{
+ struct plx_dma_dev *plxdev = pci_get_drvdata(pdev);
+
+ free_irq(pci_irq_vector(pdev, 0), plxdev);
+
+ rcu_assign_pointer(plxdev->pdev, NULL);
+ synchronize_rcu();
+
+ spin_lock_bh(&plxdev->ring_lock);
+ plxdev->ring_active = false;
+ spin_unlock_bh(&plxdev->ring_lock);
+
+ __plx_dma_stop(plxdev);
+ plx_dma_abort_desc(plxdev);
+
+ plxdev->bar = NULL;
+ dma_async_device_unregister(&plxdev->dma_dev);
+
+ pci_free_irq_vectors(pdev);
+}
+
+static const struct pci_device_id plx_dma_pci_tbl[] = {
+ {
+ .vendor = PCI_VENDOR_ID_PLX,
+ .device = 0x87D0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = PCI_CLASS_SYSTEM_OTHER << 8,
+ .class_mask = 0xFFFFFFFF,
+ },
+ {0}
+};
+MODULE_DEVICE_TABLE(pci, plx_dma_pci_tbl);
+
+static struct pci_driver plx_dma_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = plx_dma_pci_tbl,
+ .probe = plx_dma_probe,
+ .remove = plx_dma_remove,
+};
+module_pci_driver(plx_dma_pci_driver);
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 43da8eeb18ef..8e14c72d03f0 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -519,15 +519,6 @@ static void s3c24xx_dma_start_next_txd(struct s3c24xx_dma_chan *s3cchan)
s3c24xx_dma_start_next_sg(s3cchan, txd);
}
-static void s3c24xx_dma_free_txd_list(struct s3c24xx_dma_engine *s3cdma,
- struct s3c24xx_dma_chan *s3cchan)
-{
- LIST_HEAD(head);
-
- vchan_get_all_descriptors(&s3cchan->vc, &head);
- vchan_dma_desc_free_list(&s3cchan->vc, &head);
-}
-
/*
* Try to allocate a physical channel. When successful, assign it to
* this virtual channel, and initiate the next descriptor. The
@@ -709,8 +700,9 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
{
struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+ LIST_HEAD(head);
unsigned long flags;
- int ret = 0;
+ int ret;
spin_lock_irqsave(&s3cchan->vc.lock, flags);
@@ -734,7 +726,15 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
}
/* Dequeue jobs not yet fired as well */
- s3c24xx_dma_free_txd_list(s3cdma, s3cchan);
+
+ vchan_get_all_descriptors(&s3cchan->vc, &head);
+
+ spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
+
+ vchan_dma_desc_free_list(&s3cchan->vc, &head);
+
+ return 0;
+
unlock:
spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
@@ -1198,7 +1198,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
/* Basic sanity check */
if (pdata->num_phy_channels > MAX_DMA_CHANNELS) {
- dev_err(&pdev->dev, "to many dma channels %d, max %d\n",
+ dev_err(&pdev->dev, "too many dma channels %d, max %d\n",
pdata->num_phy_channels, MAX_DMA_CHANNELS);
return -EINVAL;
}
diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c
index 465256fe8b1f..6d0bec947636 100644
--- a/drivers/dma/sf-pdma/sf-pdma.c
+++ b/drivers/dma/sf-pdma/sf-pdma.c
@@ -155,9 +155,9 @@ static void sf_pdma_free_chan_resources(struct dma_chan *dchan)
kfree(chan->desc);
chan->desc = NULL;
vchan_get_all_descriptors(&chan->vchan, &head);
- vchan_dma_desc_free_list(&chan->vchan, &head);
sf_pdma_disclaim_chan(chan);
spin_unlock_irqrestore(&chan->vchan.lock, flags);
+ vchan_dma_desc_free_list(&chan->vchan, &head);
}
static size_t sf_pdma_desc_residue(struct sf_pdma_chan *chan,
@@ -220,8 +220,8 @@ static int sf_pdma_terminate_all(struct dma_chan *dchan)
chan->desc = NULL;
chan->xfer_err = false;
vchan_get_all_descriptors(&chan->vchan, &head);
- vchan_dma_desc_free_list(&chan->vchan, &head);
spin_unlock_irqrestore(&chan->vchan.lock, flags);
+ vchan_dma_desc_free_list(&chan->vchan, &head);
return 0;
}
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index e397a50058c8..bbc2bda3b902 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -669,43 +669,41 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
dma_addr_t src, dest;
u32 endpoints;
int nr_periods, offset, plength, i;
+ u8 ram_type, io_mode, linear_mode;
if (!is_slave_direction(dir)) {
dev_err(chan2dev(chan), "Invalid DMA direction\n");
return NULL;
}
- if (vchan->is_dedicated) {
- /*
- * As we are using this just for audio data, we need to use
- * normal DMA. There is nothing stopping us from supporting
- * dedicated DMA here as well, so if a client comes up and
- * requires it, it will be simple to implement it.
- */
- dev_err(chan2dev(chan),
- "Cyclic transfers are only supported on Normal DMA\n");
- return NULL;
- }
-
contract = generate_dma_contract();
if (!contract)
return NULL;
contract->is_cyclic = 1;
- /* Figure out the endpoints and the address we need */
+ if (vchan->is_dedicated) {
+ io_mode = SUN4I_DDMA_ADDR_MODE_IO;
+ linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR;
+ ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM;
+ } else {
+ io_mode = SUN4I_NDMA_ADDR_MODE_IO;
+ linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR;
+ ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM;
+ }
+
if (dir == DMA_MEM_TO_DEV) {
src = buf;
dest = sconfig->dst_addr;
- endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) |
- SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
- SUN4I_DMA_CFG_DST_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO);
+ endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
+ SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) |
+ SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type);
} else {
src = sconfig->src_addr;
dest = buf;
- endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
- SUN4I_DMA_CFG_SRC_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO) |
- SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM);
+ endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) |
+ SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
+ SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode);
}
/*
@@ -747,8 +745,13 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
dest = buf + offset;
/* Make the promise */
- promise = generate_ndma_promise(chan, src, dest,
- plength, sconfig, dir);
+ if (vchan->is_dedicated)
+ promise = generate_ddma_promise(chan, src, dest,
+ plength, sconfig);
+ else
+ promise = generate_ndma_promise(chan, src, dest,
+ plength, sconfig, dir);
+
if (!promise) {
/* TODO: should we free everything? */
return NULL;
@@ -885,12 +888,13 @@ static int sun4i_dma_terminate_all(struct dma_chan *chan)
}
spin_lock_irqsave(&vchan->vc.lock, flags);
- vchan_dma_desc_free_list(&vchan->vc, &head);
/* Clear these so the vchan is usable again */
vchan->processing = NULL;
vchan->pchan = NULL;
spin_unlock_irqrestore(&vchan->vc.lock, flags);
+ vchan_dma_desc_free_list(&vchan->vc, &head);
+
return 0;
}
diff --git a/drivers/dma/ti/Kconfig b/drivers/dma/ti/Kconfig
index d507c24fbf31..f76e06651f80 100644
--- a/drivers/dma/ti/Kconfig
+++ b/drivers/dma/ti/Kconfig
@@ -34,5 +34,29 @@ config DMA_OMAP
Enable support for the TI sDMA (System DMA or DMA4) controller. This
DMA engine is found on OMAP and DRA7xx parts.
+config TI_K3_UDMA
+ bool "Texas Instruments UDMA support"
+ depends on ARCH_K3 || COMPILE_TEST
+ depends on TI_SCI_PROTOCOL
+ depends on TI_SCI_INTA_IRQCHIP
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ select TI_K3_RINGACC
+ select TI_K3_PSIL
+ help
+ Enable support for the TI UDMA (Unified DMA) controller. This
+ DMA engine is used in AM65x and j721e.
+
+config TI_K3_UDMA_GLUE_LAYER
+ bool "Texas Instruments UDMA Glue layer for non DMAengine users"
+ depends on ARCH_K3 || COMPILE_TEST
+ depends on TI_K3_UDMA
+ help
+ Say y here to support the K3 NAVSS DMA glue interface
+ If unsure, say N.
+
+config TI_K3_PSIL
+ bool
+
config TI_DMA_CROSSBAR
bool
diff --git a/drivers/dma/ti/Makefile b/drivers/dma/ti/Makefile
index 113e59ec9c32..9a29a107e374 100644
--- a/drivers/dma/ti/Makefile
+++ b/drivers/dma/ti/Makefile
@@ -2,4 +2,7 @@
obj-$(CONFIG_TI_CPPI41) += cppi41.o
obj-$(CONFIG_TI_EDMA) += edma.o
obj-$(CONFIG_DMA_OMAP) += omap-dma.o
+obj-$(CONFIG_TI_K3_UDMA) += k3-udma.o
+obj-$(CONFIG_TI_K3_UDMA_GLUE_LAYER) += k3-udma-glue.o
+obj-$(CONFIG_TI_K3_PSIL) += k3-psil.o k3-psil-am654.o k3-psil-j721e.o
obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index 756a3c951dc7..03a7f647f7b2 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -2289,13 +2289,6 @@ static int edma_probe(struct platform_device *pdev)
if (!info)
return -ENODEV;
- pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- dev_err(dev, "pm_runtime_get_sync() failed\n");
- return ret;
- }
-
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret)
return ret;
@@ -2326,27 +2319,33 @@ static int edma_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ecc);
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync() failed\n");
+ pm_runtime_disable(dev);
+ return ret;
+ }
+
/* Get eDMA3 configuration from IP */
ret = edma_setup_from_hw(dev, info, ecc);
if (ret)
- return ret;
+ goto err_disable_pm;
/* Allocate memory based on the information we got from the IP */
ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels,
sizeof(*ecc->slave_chans), GFP_KERNEL);
- if (!ecc->slave_chans)
- return -ENOMEM;
ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots),
sizeof(unsigned long), GFP_KERNEL);
- if (!ecc->slot_inuse)
- return -ENOMEM;
ecc->channels_mask = devm_kcalloc(dev,
BITS_TO_LONGS(ecc->num_channels),
sizeof(unsigned long), GFP_KERNEL);
- if (!ecc->channels_mask)
- return -ENOMEM;
+ if (!ecc->slave_chans || !ecc->slot_inuse || !ecc->channels_mask) {
+ ret = -ENOMEM;
+ goto err_disable_pm;
+ }
/* Mark all channels available initially */
bitmap_fill(ecc->channels_mask, ecc->num_channels);
@@ -2388,7 +2387,7 @@ static int edma_probe(struct platform_device *pdev)
ecc);
if (ret) {
dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret);
- return ret;
+ goto err_disable_pm;
}
ecc->ccint = irq;
}
@@ -2404,7 +2403,7 @@ static int edma_probe(struct platform_device *pdev)
ecc);
if (ret) {
dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret);
- return ret;
+ goto err_disable_pm;
}
ecc->ccerrint = irq;
}
@@ -2412,7 +2411,8 @@ static int edma_probe(struct platform_device *pdev)
ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY);
if (ecc->dummy_slot < 0) {
dev_err(dev, "Can't allocate PaRAM dummy slot\n");
- return ecc->dummy_slot;
+ ret = ecc->dummy_slot;
+ goto err_disable_pm;
}
queue_priority_mapping = info->queue_priority_mapping;
@@ -2512,6 +2512,9 @@ static int edma_probe(struct platform_device *pdev)
err_reg1:
edma_free_slot(ecc, ecc->dummy_slot);
+err_disable_pm:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
return ret;
}
@@ -2542,6 +2545,8 @@ static int edma_remove(struct platform_device *pdev)
if (ecc->dma_memcpy)
dma_async_device_unregister(ecc->dma_memcpy);
edma_free_slot(ecc, ecc->dummy_slot);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
return 0;
}
diff --git a/drivers/dma/ti/k3-psil-am654.c b/drivers/dma/ti/k3-psil-am654.c
new file mode 100644
index 000000000000..a896a15908cf
--- /dev/null
+++ b/drivers/dma/ti/k3-psil-am654.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#include <linux/kernel.h>
+
+#include "k3-psil-priv.h"
+
+#define PSIL_PDMA_XY_TR(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ }, \
+ }
+
+#define PSIL_PDMA_XY_PKT(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .pkt_mode = 1, \
+ }, \
+ }
+
+#define PSIL_ETHERNET(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 16, \
+ }, \
+ }
+
+#define PSIL_SA2UL(x, tx) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 64, \
+ .notdpkt = tx, \
+ }, \
+ }
+
+/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
+static struct psil_ep am654_src_ep_map[] = {
+ /* SA2UL */
+ PSIL_SA2UL(0x4000, 0),
+ PSIL_SA2UL(0x4001, 0),
+ PSIL_SA2UL(0x4002, 0),
+ PSIL_SA2UL(0x4003, 0),
+ /* PRU_ICSSG0 */
+ PSIL_ETHERNET(0x4100),
+ PSIL_ETHERNET(0x4101),
+ PSIL_ETHERNET(0x4102),
+ PSIL_ETHERNET(0x4103),
+ /* PRU_ICSSG1 */
+ PSIL_ETHERNET(0x4200),
+ PSIL_ETHERNET(0x4201),
+ PSIL_ETHERNET(0x4202),
+ PSIL_ETHERNET(0x4203),
+ /* PRU_ICSSG2 */
+ PSIL_ETHERNET(0x4300),
+ PSIL_ETHERNET(0x4301),
+ PSIL_ETHERNET(0x4302),
+ PSIL_ETHERNET(0x4303),
+ /* PDMA0 - McASPs */
+ PSIL_PDMA_XY_TR(0x4400),
+ PSIL_PDMA_XY_TR(0x4401),
+ PSIL_PDMA_XY_TR(0x4402),
+ /* PDMA1 - SPI0-4 */
+ PSIL_PDMA_XY_PKT(0x4500),
+ PSIL_PDMA_XY_PKT(0x4501),
+ PSIL_PDMA_XY_PKT(0x4502),
+ PSIL_PDMA_XY_PKT(0x4503),
+ PSIL_PDMA_XY_PKT(0x4504),
+ PSIL_PDMA_XY_PKT(0x4505),
+ PSIL_PDMA_XY_PKT(0x4506),
+ PSIL_PDMA_XY_PKT(0x4507),
+ PSIL_PDMA_XY_PKT(0x4508),
+ PSIL_PDMA_XY_PKT(0x4509),
+ PSIL_PDMA_XY_PKT(0x450a),
+ PSIL_PDMA_XY_PKT(0x450b),
+ PSIL_PDMA_XY_PKT(0x450c),
+ PSIL_PDMA_XY_PKT(0x450d),
+ PSIL_PDMA_XY_PKT(0x450e),
+ PSIL_PDMA_XY_PKT(0x450f),
+ PSIL_PDMA_XY_PKT(0x4510),
+ PSIL_PDMA_XY_PKT(0x4511),
+ PSIL_PDMA_XY_PKT(0x4512),
+ PSIL_PDMA_XY_PKT(0x4513),
+ /* PDMA1 - USART0-2 */
+ PSIL_PDMA_XY_PKT(0x4514),
+ PSIL_PDMA_XY_PKT(0x4515),
+ PSIL_PDMA_XY_PKT(0x4516),
+ /* CPSW0 */
+ PSIL_ETHERNET(0x7000),
+ /* MCU_PDMA0 - ADCs */
+ PSIL_PDMA_XY_TR(0x7100),
+ PSIL_PDMA_XY_TR(0x7101),
+ PSIL_PDMA_XY_TR(0x7102),
+ PSIL_PDMA_XY_TR(0x7103),
+ /* MCU_PDMA1 - MCU_SPI0-2 */
+ PSIL_PDMA_XY_PKT(0x7200),
+ PSIL_PDMA_XY_PKT(0x7201),
+ PSIL_PDMA_XY_PKT(0x7202),
+ PSIL_PDMA_XY_PKT(0x7203),
+ PSIL_PDMA_XY_PKT(0x7204),
+ PSIL_PDMA_XY_PKT(0x7205),
+ PSIL_PDMA_XY_PKT(0x7206),
+ PSIL_PDMA_XY_PKT(0x7207),
+ PSIL_PDMA_XY_PKT(0x7208),
+ PSIL_PDMA_XY_PKT(0x7209),
+ PSIL_PDMA_XY_PKT(0x720a),
+ PSIL_PDMA_XY_PKT(0x720b),
+ /* MCU_PDMA1 - MCU_USART0 */
+ PSIL_PDMA_XY_PKT(0x7212),
+};
+
+/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
+static struct psil_ep am654_dst_ep_map[] = {
+ /* SA2UL */
+ PSIL_SA2UL(0xc000, 1),
+ PSIL_SA2UL(0xc001, 1),
+ /* PRU_ICSSG0 */
+ PSIL_ETHERNET(0xc100),
+ PSIL_ETHERNET(0xc101),
+ PSIL_ETHERNET(0xc102),
+ PSIL_ETHERNET(0xc103),
+ PSIL_ETHERNET(0xc104),
+ PSIL_ETHERNET(0xc105),
+ PSIL_ETHERNET(0xc106),
+ PSIL_ETHERNET(0xc107),
+ /* PRU_ICSSG1 */
+ PSIL_ETHERNET(0xc200),
+ PSIL_ETHERNET(0xc201),
+ PSIL_ETHERNET(0xc202),
+ PSIL_ETHERNET(0xc203),
+ PSIL_ETHERNET(0xc204),
+ PSIL_ETHERNET(0xc205),
+ PSIL_ETHERNET(0xc206),
+ PSIL_ETHERNET(0xc207),
+ /* PRU_ICSSG2 */
+ PSIL_ETHERNET(0xc300),
+ PSIL_ETHERNET(0xc301),
+ PSIL_ETHERNET(0xc302),
+ PSIL_ETHERNET(0xc303),
+ PSIL_ETHERNET(0xc304),
+ PSIL_ETHERNET(0xc305),
+ PSIL_ETHERNET(0xc306),
+ PSIL_ETHERNET(0xc307),
+ /* CPSW0 */
+ PSIL_ETHERNET(0xf000),
+ PSIL_ETHERNET(0xf001),
+ PSIL_ETHERNET(0xf002),
+ PSIL_ETHERNET(0xf003),
+ PSIL_ETHERNET(0xf004),
+ PSIL_ETHERNET(0xf005),
+ PSIL_ETHERNET(0xf006),
+ PSIL_ETHERNET(0xf007),
+};
+
+struct psil_ep_map am654_ep_map = {
+ .name = "am654",
+ .src = am654_src_ep_map,
+ .src_count = ARRAY_SIZE(am654_src_ep_map),
+ .dst = am654_dst_ep_map,
+ .dst_count = ARRAY_SIZE(am654_dst_ep_map),
+};
diff --git a/drivers/dma/ti/k3-psil-j721e.c b/drivers/dma/ti/k3-psil-j721e.c
new file mode 100644
index 000000000000..e3cfd5f66842
--- /dev/null
+++ b/drivers/dma/ti/k3-psil-j721e.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#include <linux/kernel.h>
+
+#include "k3-psil-priv.h"
+
+#define PSIL_PDMA_XY_TR(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ }, \
+ }
+
+#define PSIL_PDMA_XY_PKT(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .pkt_mode = 1, \
+ }, \
+ }
+
+#define PSIL_PDMA_MCASP(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .pdma_acc32 = 1, \
+ .pdma_burst = 1, \
+ }, \
+ }
+
+#define PSIL_ETHERNET(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 16, \
+ }, \
+ }
+
+#define PSIL_SA2UL(x, tx) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 64, \
+ .notdpkt = tx, \
+ }, \
+ }
+
+/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
+static struct psil_ep j721e_src_ep_map[] = {
+ /* SA2UL */
+ PSIL_SA2UL(0x4000, 0),
+ PSIL_SA2UL(0x4001, 0),
+ PSIL_SA2UL(0x4002, 0),
+ PSIL_SA2UL(0x4003, 0),
+ /* PRU_ICSSG0 */
+ PSIL_ETHERNET(0x4100),
+ PSIL_ETHERNET(0x4101),
+ PSIL_ETHERNET(0x4102),
+ PSIL_ETHERNET(0x4103),
+ /* PRU_ICSSG1 */
+ PSIL_ETHERNET(0x4200),
+ PSIL_ETHERNET(0x4201),
+ PSIL_ETHERNET(0x4202),
+ PSIL_ETHERNET(0x4203),
+ /* PDMA6 (PSIL_PDMA_MCASP_G0) - McASP0-2 */
+ PSIL_PDMA_MCASP(0x4400),
+ PSIL_PDMA_MCASP(0x4401),
+ PSIL_PDMA_MCASP(0x4402),
+ /* PDMA7 (PSIL_PDMA_MCASP_G1) - McASP3-11 */
+ PSIL_PDMA_MCASP(0x4500),
+ PSIL_PDMA_MCASP(0x4501),
+ PSIL_PDMA_MCASP(0x4502),
+ PSIL_PDMA_MCASP(0x4503),
+ PSIL_PDMA_MCASP(0x4504),
+ PSIL_PDMA_MCASP(0x4505),
+ PSIL_PDMA_MCASP(0x4506),
+ PSIL_PDMA_MCASP(0x4507),
+ PSIL_PDMA_MCASP(0x4508),
+ /* PDMA8 (PDMA_MISC_G0) - SPI0-1 */
+ PSIL_PDMA_XY_PKT(0x4600),
+ PSIL_PDMA_XY_PKT(0x4601),
+ PSIL_PDMA_XY_PKT(0x4602),
+ PSIL_PDMA_XY_PKT(0x4603),
+ PSIL_PDMA_XY_PKT(0x4604),
+ PSIL_PDMA_XY_PKT(0x4605),
+ PSIL_PDMA_XY_PKT(0x4606),
+ PSIL_PDMA_XY_PKT(0x4607),
+ /* PDMA9 (PDMA_MISC_G1) - SPI2-3 */
+ PSIL_PDMA_XY_PKT(0x460c),
+ PSIL_PDMA_XY_PKT(0x460d),
+ PSIL_PDMA_XY_PKT(0x460e),
+ PSIL_PDMA_XY_PKT(0x460f),
+ PSIL_PDMA_XY_PKT(0x4610),
+ PSIL_PDMA_XY_PKT(0x4611),
+ PSIL_PDMA_XY_PKT(0x4612),
+ PSIL_PDMA_XY_PKT(0x4613),
+ /* PDMA10 (PDMA_MISC_G2) - SPI4-5 */
+ PSIL_PDMA_XY_PKT(0x4618),
+ PSIL_PDMA_XY_PKT(0x4619),
+ PSIL_PDMA_XY_PKT(0x461a),
+ PSIL_PDMA_XY_PKT(0x461b),
+ PSIL_PDMA_XY_PKT(0x461c),
+ PSIL_PDMA_XY_PKT(0x461d),
+ PSIL_PDMA_XY_PKT(0x461e),
+ PSIL_PDMA_XY_PKT(0x461f),
+ /* PDMA11 (PDMA_MISC_G3) */
+ PSIL_PDMA_XY_PKT(0x4624),
+ PSIL_PDMA_XY_PKT(0x4625),
+ PSIL_PDMA_XY_PKT(0x4626),
+ PSIL_PDMA_XY_PKT(0x4627),
+ PSIL_PDMA_XY_PKT(0x4628),
+ PSIL_PDMA_XY_PKT(0x4629),
+ PSIL_PDMA_XY_PKT(0x4630),
+ PSIL_PDMA_XY_PKT(0x463a),
+ /* PDMA13 (PDMA_USART_G0) - UART0-1 */
+ PSIL_PDMA_XY_PKT(0x4700),
+ PSIL_PDMA_XY_PKT(0x4701),
+ /* PDMA14 (PDMA_USART_G1) - UART2-3 */
+ PSIL_PDMA_XY_PKT(0x4702),
+ PSIL_PDMA_XY_PKT(0x4703),
+ /* PDMA15 (PDMA_USART_G2) - UART4-9 */
+ PSIL_PDMA_XY_PKT(0x4704),
+ PSIL_PDMA_XY_PKT(0x4705),
+ PSIL_PDMA_XY_PKT(0x4706),
+ PSIL_PDMA_XY_PKT(0x4707),
+ PSIL_PDMA_XY_PKT(0x4708),
+ PSIL_PDMA_XY_PKT(0x4709),
+ /* CPSW9 */
+ PSIL_ETHERNET(0x4a00),
+ /* CPSW0 */
+ PSIL_ETHERNET(0x7000),
+ /* MCU_PDMA0 (MCU_PDMA_MISC_G0) - SPI0 */
+ PSIL_PDMA_XY_PKT(0x7100),
+ PSIL_PDMA_XY_PKT(0x7101),
+ PSIL_PDMA_XY_PKT(0x7102),
+ PSIL_PDMA_XY_PKT(0x7103),
+ /* MCU_PDMA1 (MCU_PDMA_MISC_G1) - SPI1-2 */
+ PSIL_PDMA_XY_PKT(0x7200),
+ PSIL_PDMA_XY_PKT(0x7201),
+ PSIL_PDMA_XY_PKT(0x7202),
+ PSIL_PDMA_XY_PKT(0x7203),
+ PSIL_PDMA_XY_PKT(0x7204),
+ PSIL_PDMA_XY_PKT(0x7205),
+ PSIL_PDMA_XY_PKT(0x7206),
+ PSIL_PDMA_XY_PKT(0x7207),
+ /* MCU_PDMA2 (MCU_PDMA_MISC_G2) - UART0 */
+ PSIL_PDMA_XY_PKT(0x7300),
+ /* MCU_PDMA_ADC - ADC0-1 */
+ PSIL_PDMA_XY_TR(0x7400),
+ PSIL_PDMA_XY_TR(0x7401),
+ PSIL_PDMA_XY_TR(0x7402),
+ PSIL_PDMA_XY_TR(0x7403),
+ /* SA2UL */
+ PSIL_SA2UL(0x7500, 0),
+ PSIL_SA2UL(0x7501, 0),
+};
+
+/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
+static struct psil_ep j721e_dst_ep_map[] = {
+ /* SA2UL */
+ PSIL_SA2UL(0xc000, 1),
+ PSIL_SA2UL(0xc001, 1),
+ /* PRU_ICSSG0 */
+ PSIL_ETHERNET(0xc100),
+ PSIL_ETHERNET(0xc101),
+ PSIL_ETHERNET(0xc102),
+ PSIL_ETHERNET(0xc103),
+ PSIL_ETHERNET(0xc104),
+ PSIL_ETHERNET(0xc105),
+ PSIL_ETHERNET(0xc106),
+ PSIL_ETHERNET(0xc107),
+ /* PRU_ICSSG1 */
+ PSIL_ETHERNET(0xc200),
+ PSIL_ETHERNET(0xc201),
+ PSIL_ETHERNET(0xc202),
+ PSIL_ETHERNET(0xc203),
+ PSIL_ETHERNET(0xc204),
+ PSIL_ETHERNET(0xc205),
+ PSIL_ETHERNET(0xc206),
+ PSIL_ETHERNET(0xc207),
+ /* CPSW9 */
+ PSIL_ETHERNET(0xca00),
+ PSIL_ETHERNET(0xca01),
+ PSIL_ETHERNET(0xca02),
+ PSIL_ETHERNET(0xca03),
+ PSIL_ETHERNET(0xca04),
+ PSIL_ETHERNET(0xca05),
+ PSIL_ETHERNET(0xca06),
+ PSIL_ETHERNET(0xca07),
+ /* CPSW0 */
+ PSIL_ETHERNET(0xf000),
+ PSIL_ETHERNET(0xf001),
+ PSIL_ETHERNET(0xf002),
+ PSIL_ETHERNET(0xf003),
+ PSIL_ETHERNET(0xf004),
+ PSIL_ETHERNET(0xf005),
+ PSIL_ETHERNET(0xf006),
+ PSIL_ETHERNET(0xf007),
+ /* SA2UL */
+ PSIL_SA2UL(0xf500, 1),
+};
+
+struct psil_ep_map j721e_ep_map = {
+ .name = "j721e",
+ .src = j721e_src_ep_map,
+ .src_count = ARRAY_SIZE(j721e_src_ep_map),
+ .dst = j721e_dst_ep_map,
+ .dst_count = ARRAY_SIZE(j721e_dst_ep_map),
+};
diff --git a/drivers/dma/ti/k3-psil-priv.h b/drivers/dma/ti/k3-psil-priv.h
new file mode 100644
index 000000000000..a1f389ca371e
--- /dev/null
+++ b/drivers/dma/ti/k3-psil-priv.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef K3_PSIL_PRIV_H_
+#define K3_PSIL_PRIV_H_
+
+#include <linux/dma/k3-psil.h>
+
+struct psil_ep {
+ u32 thread_id;
+ struct psil_endpoint_config ep_config;
+};
+
+/**
+ * struct psil_ep_map - PSI-L thread ID configuration maps
+ * @name: Name of the map, set it to the name of the SoC
+ * @src: Array of source PSI-L thread configurations
+ * @src_count: Number of entries in the src array
+ * @dst: Array of destination PSI-L thread configurations
+ * @dst_count: Number of entries in the dst array
+ *
+ * In case of symmetric configuration for a matching src/dst thread (for example
+ * 0x4400 and 0xc400) only the src configuration can be present. If no dst
+ * configuration found the code will look for (dst_thread_id & ~0x8000) to find
+ * the symmetric match.
+ */
+struct psil_ep_map {
+ char *name;
+ struct psil_ep *src;
+ int src_count;
+ struct psil_ep *dst;
+ int dst_count;
+};
+
+struct psil_endpoint_config *psil_get_ep_config(u32 thread_id);
+
+/* SoC PSI-L endpoint maps */
+extern struct psil_ep_map am654_ep_map;
+extern struct psil_ep_map j721e_ep_map;
+
+#endif /* K3_PSIL_PRIV_H_ */
diff --git a/drivers/dma/ti/k3-psil.c b/drivers/dma/ti/k3-psil.c
new file mode 100644
index 000000000000..d7b965049ccb
--- /dev/null
+++ b/drivers/dma/ti/k3-psil.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+
+#include "k3-psil-priv.h"
+
+static DEFINE_MUTEX(ep_map_mutex);
+static struct psil_ep_map *soc_ep_map;
+
+struct psil_endpoint_config *psil_get_ep_config(u32 thread_id)
+{
+ int i;
+
+ mutex_lock(&ep_map_mutex);
+ if (!soc_ep_map) {
+ if (of_machine_is_compatible("ti,am654")) {
+ soc_ep_map = &am654_ep_map;
+ } else if (of_machine_is_compatible("ti,j721e")) {
+ soc_ep_map = &j721e_ep_map;
+ } else {
+ pr_err("PSIL: No compatible machine found for map\n");
+ return ERR_PTR(-ENOTSUPP);
+ }
+ pr_debug("%s: Using map for %s\n", __func__, soc_ep_map->name);
+ }
+ mutex_unlock(&ep_map_mutex);
+
+ if (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET && soc_ep_map->dst) {
+ /* check in destination thread map */
+ for (i = 0; i < soc_ep_map->dst_count; i++) {
+ if (soc_ep_map->dst[i].thread_id == thread_id)
+ return &soc_ep_map->dst[i].ep_config;
+ }
+ }
+
+ thread_id &= ~K3_PSIL_DST_THREAD_ID_OFFSET;
+ if (soc_ep_map->src) {
+ for (i = 0; i < soc_ep_map->src_count; i++) {
+ if (soc_ep_map->src[i].thread_id == thread_id)
+ return &soc_ep_map->src[i].ep_config;
+ }
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL_GPL(psil_get_ep_config);
+
+int psil_set_new_ep_config(struct device *dev, const char *name,
+ struct psil_endpoint_config *ep_config)
+{
+ struct psil_endpoint_config *dst_ep_config;
+ struct of_phandle_args dma_spec;
+ u32 thread_id;
+ int index;
+
+ if (!dev || !dev->of_node)
+ return -EINVAL;
+
+ index = of_property_match_string(dev->of_node, "dma-names", name);
+ if (index < 0)
+ return index;
+
+ if (of_parse_phandle_with_args(dev->of_node, "dmas", "#dma-cells",
+ index, &dma_spec))
+ return -ENOENT;
+
+ thread_id = dma_spec.args[0];
+
+ dst_ep_config = psil_get_ep_config(thread_id);
+ if (IS_ERR(dst_ep_config)) {
+ pr_err("PSIL: thread ID 0x%04x not defined in map\n",
+ thread_id);
+ of_node_put(dma_spec.np);
+ return PTR_ERR(dst_ep_config);
+ }
+
+ memcpy(dst_ep_config, ep_config, sizeof(*dst_ep_config));
+
+ of_node_put(dma_spec.np);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(psil_set_new_ep_config);
diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c
new file mode 100644
index 000000000000..c1511298ece2
--- /dev/null
+++ b/drivers/dma/ti/k3-udma-glue.c
@@ -0,0 +1,1198 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * K3 NAVSS DMA glue interface
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ *
+ */
+
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ti/k3-ringacc.h>
+#include <linux/dma/ti-cppi5.h>
+#include <linux/dma/k3-udma-glue.h>
+
+#include "k3-udma.h"
+#include "k3-psil-priv.h"
+
+struct k3_udma_glue_common {
+ struct device *dev;
+ struct udma_dev *udmax;
+ const struct udma_tisci_rm *tisci_rm;
+ struct k3_ringacc *ringacc;
+ u32 src_thread;
+ u32 dst_thread;
+
+ u32 hdesc_size;
+ bool epib;
+ u32 psdata_size;
+ u32 swdata_size;
+};
+
+struct k3_udma_glue_tx_channel {
+ struct k3_udma_glue_common common;
+
+ struct udma_tchan *udma_tchanx;
+ int udma_tchan_id;
+
+ struct k3_ring *ringtx;
+ struct k3_ring *ringtxcq;
+
+ bool psil_paired;
+
+ int virq;
+
+ atomic_t free_pkts;
+ bool tx_pause_on_err;
+ bool tx_filt_einfo;
+ bool tx_filt_pswords;
+ bool tx_supr_tdpkt;
+};
+
+struct k3_udma_glue_rx_flow {
+ struct udma_rflow *udma_rflow;
+ int udma_rflow_id;
+ struct k3_ring *ringrx;
+ struct k3_ring *ringrxfdq;
+
+ int virq;
+};
+
+struct k3_udma_glue_rx_channel {
+ struct k3_udma_glue_common common;
+
+ struct udma_rchan *udma_rchanx;
+ int udma_rchan_id;
+ bool remote;
+
+ bool psil_paired;
+
+ u32 swdata_size;
+ int flow_id_base;
+
+ struct k3_udma_glue_rx_flow *flows;
+ u32 flow_num;
+ u32 flows_ready;
+};
+
+#define K3_UDMAX_TDOWN_TIMEOUT_US 1000
+
+static int of_k3_udma_glue_parse(struct device_node *udmax_np,
+ struct k3_udma_glue_common *common)
+{
+ common->ringacc = of_k3_ringacc_get_by_phandle(udmax_np,
+ "ti,ringacc");
+ if (IS_ERR(common->ringacc))
+ return PTR_ERR(common->ringacc);
+
+ common->udmax = of_xudma_dev_get(udmax_np, NULL);
+ if (IS_ERR(common->udmax))
+ return PTR_ERR(common->udmax);
+
+ common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax);
+
+ return 0;
+}
+
+static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
+ const char *name, struct k3_udma_glue_common *common,
+ bool tx_chn)
+{
+ struct psil_endpoint_config *ep_config;
+ struct of_phandle_args dma_spec;
+ u32 thread_id;
+ int ret = 0;
+ int index;
+
+ if (unlikely(!name))
+ return -EINVAL;
+
+ index = of_property_match_string(chn_np, "dma-names", name);
+ if (index < 0)
+ return index;
+
+ if (of_parse_phandle_with_args(chn_np, "dmas", "#dma-cells", index,
+ &dma_spec))
+ return -ENOENT;
+
+ thread_id = dma_spec.args[0];
+
+ if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
+ ret = -EINVAL;
+ goto out_put_spec;
+ }
+
+ if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
+ ret = -EINVAL;
+ goto out_put_spec;
+ }
+
+ /* get psil endpoint config */
+ ep_config = psil_get_ep_config(thread_id);
+ if (IS_ERR(ep_config)) {
+ dev_err(common->dev,
+ "No configuration for psi-l thread 0x%04x\n",
+ thread_id);
+ ret = PTR_ERR(ep_config);
+ goto out_put_spec;
+ }
+
+ common->epib = ep_config->needs_epib;
+ common->psdata_size = ep_config->psd_size;
+
+ if (tx_chn)
+ common->dst_thread = thread_id;
+ else
+ common->src_thread = thread_id;
+
+ ret = of_k3_udma_glue_parse(dma_spec.np, common);
+
+out_put_spec:
+ of_node_put(dma_spec.np);
+ return ret;
+};
+
+static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ struct device *dev = tx_chn->common.dev;
+
+ dev_dbg(dev, "dump_tx_chn:\n"
+ "udma_tchan_id: %d\n"
+ "src_thread: %08x\n"
+ "dst_thread: %08x\n",
+ tx_chn->udma_tchan_id,
+ tx_chn->common.src_thread,
+ tx_chn->common.dst_thread);
+}
+
+static void k3_udma_glue_dump_tx_rt_chn(struct k3_udma_glue_tx_channel *chn,
+ char *mark)
+{
+ struct device *dev = chn->common.dev;
+
+ dev_dbg(dev, "=== dump ===> %s\n", mark);
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_CTL_REG,
+ xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG));
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_PEER_RT_EN_REG,
+ xudma_tchanrt_read(chn->udma_tchanx,
+ UDMA_TCHAN_RT_PEER_RT_EN_REG));
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_PCNT_REG,
+ xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_PCNT_REG));
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_BCNT_REG,
+ xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_BCNT_REG));
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_SBCNT_REG,
+ xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_SBCNT_REG));
+}
+
+static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ const struct udma_tisci_rm *tisci_rm = tx_chn->common.tisci_rm;
+ struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
+
+ memset(&req, 0, sizeof(req));
+
+ req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;
+ req.nav_id = tisci_rm->tisci_dev_id;
+ req.index = tx_chn->udma_tchan_id;
+ if (tx_chn->tx_pause_on_err)
+ req.tx_pause_on_err = 1;
+ if (tx_chn->tx_filt_einfo)
+ req.tx_filt_einfo = 1;
+ if (tx_chn->tx_filt_pswords)
+ req.tx_filt_pswords = 1;
+ req.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
+ if (tx_chn->tx_supr_tdpkt)
+ req.tx_supr_tdpkt = 1;
+ req.tx_fetch_size = tx_chn->common.hdesc_size >> 2;
+ req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
+
+ return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
+}
+
+struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
+ const char *name, struct k3_udma_glue_tx_channel_cfg *cfg)
+{
+ struct k3_udma_glue_tx_channel *tx_chn;
+ int ret;
+
+ tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
+ if (!tx_chn)
+ return ERR_PTR(-ENOMEM);
+
+ tx_chn->common.dev = dev;
+ tx_chn->common.swdata_size = cfg->swdata_size;
+ tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
+ tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
+ tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
+ tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
+
+ /* parse of udmap channel */
+ ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
+ &tx_chn->common, true);
+ if (ret)
+ goto err;
+
+ tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib,
+ tx_chn->common.psdata_size,
+ tx_chn->common.swdata_size);
+
+ /* request and cfg UDMAP TX channel */
+ tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax, -1);
+ if (IS_ERR(tx_chn->udma_tchanx)) {
+ ret = PTR_ERR(tx_chn->udma_tchanx);
+ dev_err(dev, "UDMAX tchanx get err %d\n", ret);
+ goto err;
+ }
+ tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx);
+
+ atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size);
+
+ /* request and cfg rings */
+ tx_chn->ringtx = k3_ringacc_request_ring(tx_chn->common.ringacc,
+ tx_chn->udma_tchan_id, 0);
+ if (!tx_chn->ringtx) {
+ ret = -ENODEV;
+ dev_err(dev, "Failed to get TX ring %u\n",
+ tx_chn->udma_tchan_id);
+ goto err;
+ }
+
+ tx_chn->ringtxcq = k3_ringacc_request_ring(tx_chn->common.ringacc,
+ -1, 0);
+ if (!tx_chn->ringtxcq) {
+ ret = -ENODEV;
+ dev_err(dev, "Failed to get TXCQ ring\n");
+ goto err;
+ }
+
+ ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg);
+ if (ret) {
+ dev_err(dev, "Failed to cfg ringtx %d\n", ret);
+ goto err;
+ }
+
+ ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg);
+ if (ret) {
+ dev_err(dev, "Failed to cfg ringtx %d\n", ret);
+ goto err;
+ }
+
+ /* request and cfg psi-l */
+ tx_chn->common.src_thread =
+ xudma_dev_get_psil_base(tx_chn->common.udmax) +
+ tx_chn->udma_tchan_id;
+
+ ret = k3_udma_glue_cfg_tx_chn(tx_chn);
+ if (ret) {
+ dev_err(dev, "Failed to cfg tchan %d\n", ret);
+ goto err;
+ }
+
+ ret = xudma_navss_psil_pair(tx_chn->common.udmax,
+ tx_chn->common.src_thread,
+ tx_chn->common.dst_thread);
+ if (ret) {
+ dev_err(dev, "PSI-L request err %d\n", ret);
+ goto err;
+ }
+
+ tx_chn->psil_paired = true;
+
+ /* reset TX RT registers */
+ k3_udma_glue_disable_tx_chn(tx_chn);
+
+ k3_udma_glue_dump_tx_chn(tx_chn);
+
+ return tx_chn;
+
+err:
+ k3_udma_glue_release_tx_chn(tx_chn);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn);
+
+void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ if (tx_chn->psil_paired) {
+ xudma_navss_psil_unpair(tx_chn->common.udmax,
+ tx_chn->common.src_thread,
+ tx_chn->common.dst_thread);
+ tx_chn->psil_paired = false;
+ }
+
+ if (!IS_ERR_OR_NULL(tx_chn->udma_tchanx))
+ xudma_tchan_put(tx_chn->common.udmax,
+ tx_chn->udma_tchanx);
+
+ if (tx_chn->ringtxcq)
+ k3_ringacc_ring_free(tx_chn->ringtxcq);
+
+ if (tx_chn->ringtx)
+ k3_ringacc_ring_free(tx_chn->ringtx);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn);
+
+int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+ struct cppi5_host_desc_t *desc_tx,
+ dma_addr_t desc_dma)
+{
+ u32 ringtxcq_id;
+
+ if (!atomic_add_unless(&tx_chn->free_pkts, -1, 0))
+ return -ENOMEM;
+
+ ringtxcq_id = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
+ cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, ringtxcq_id);
+
+ return k3_ringacc_ring_push(tx_chn->ringtx, &desc_dma);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_push_tx_chn);
+
+int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+ dma_addr_t *desc_dma)
+{
+ int ret;
+
+ ret = k3_ringacc_ring_pop(tx_chn->ringtxcq, desc_dma);
+ if (!ret)
+ atomic_inc(&tx_chn->free_pkts);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn);
+
+int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ u32 txrt_ctl;
+
+ txrt_ctl = UDMA_PEER_RT_EN_ENABLE;
+ xudma_tchanrt_write(tx_chn->udma_tchanx,
+ UDMA_TCHAN_RT_PEER_RT_EN_REG,
+ txrt_ctl);
+
+ txrt_ctl = xudma_tchanrt_read(tx_chn->udma_tchanx,
+ UDMA_TCHAN_RT_CTL_REG);
+ txrt_ctl |= UDMA_CHAN_RT_CTL_EN;
+ xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG,
+ txrt_ctl);
+
+ k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn en");
+ return 0;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_enable_tx_chn);
+
+void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis1");
+
+ xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG, 0);
+
+ xudma_tchanrt_write(tx_chn->udma_tchanx,
+ UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
+ k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis2");
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_disable_tx_chn);
+
+void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+ bool sync)
+{
+ int i = 0;
+ u32 val;
+
+ k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown1");
+
+ xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_EN | UDMA_CHAN_RT_CTL_TDOWN);
+
+ val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG);
+
+ while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
+ val = xudma_tchanrt_read(tx_chn->udma_tchanx,
+ UDMA_TCHAN_RT_CTL_REG);
+ udelay(1);
+ if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
+ dev_err(tx_chn->common.dev, "TX tdown timeout\n");
+ break;
+ }
+ i++;
+ }
+
+ val = xudma_tchanrt_read(tx_chn->udma_tchanx,
+ UDMA_TCHAN_RT_PEER_RT_EN_REG);
+ if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
+ dev_err(tx_chn->common.dev, "TX tdown peer not stopped\n");
+ k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown2");
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_tx_chn);
+
+void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+ void *data,
+ void (*cleanup)(void *data, dma_addr_t desc_dma))
+{
+ dma_addr_t desc_dma;
+ int occ_tx, i, ret;
+
+ /* reset TXCQ as it is not input for udma - expected to be empty */
+ if (tx_chn->ringtxcq)
+ k3_ringacc_ring_reset(tx_chn->ringtxcq);
+
+ /*
+ * TXQ reset need to be special way as it is input for udma and its
+ * state cached by udma, so:
+ * 1) save TXQ occ
+ * 2) clean up TXQ and call callback .cleanup() for each desc
+ * 3) reset TXQ in a special way
+ */
+ occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx);
+ dev_dbg(tx_chn->common.dev, "TX reset occ_tx %u\n", occ_tx);
+
+ for (i = 0; i < occ_tx; i++) {
+ ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma);
+ if (ret) {
+ dev_err(tx_chn->common.dev, "TX reset pop %d\n", ret);
+ break;
+ }
+ cleanup(data, desc_dma);
+ }
+
+ k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn);
+
+u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ return tx_chn->common.hdesc_size;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_hdesc_size);
+
+u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ return k3_ringacc_get_ring_id(tx_chn->ringtxcq);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id);
+
+int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq);
+
+ return tx_chn->virq;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq);
+
+static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
+{
+ const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
+ struct ti_sci_msg_rm_udmap_rx_ch_cfg req;
+ int ret;
+
+ memset(&req, 0, sizeof(req));
+
+ req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
+
+ req.nav_id = tisci_rm->tisci_dev_id;
+ req.index = rx_chn->udma_rchan_id;
+ req.rx_fetch_size = rx_chn->common.hdesc_size >> 2;
+ /*
+ * TODO: we can't support rxcq_qnum/RCHAN[a]_RCQ cfg with current sysfw
+ * and udmax impl, so just configure it to invalid value.
+ * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx);
+ */
+ req.rxcq_qnum = 0xFFFF;
+ if (rx_chn->flow_num && rx_chn->flow_id_base != rx_chn->udma_rchan_id) {
+ /* Default flow + extra ones */
+ req.flowid_start = rx_chn->flow_id_base;
+ req.flowid_cnt = rx_chn->flow_num;
+ }
+ req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
+
+ ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
+ if (ret)
+ dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n",
+ rx_chn->udma_rchan_id, ret);
+
+ return ret;
+}
+
+static void k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num)
+{
+ struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
+
+ if (IS_ERR_OR_NULL(flow->udma_rflow))
+ return;
+
+ if (flow->ringrxfdq)
+ k3_ringacc_ring_free(flow->ringrxfdq);
+
+ if (flow->ringrx)
+ k3_ringacc_ring_free(flow->ringrx);
+
+ xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
+ flow->udma_rflow = NULL;
+ rx_chn->flows_ready--;
+}
+
+static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx,
+ struct k3_udma_glue_rx_flow_cfg *flow_cfg)
+{
+ struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
+ const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
+ struct device *dev = rx_chn->common.dev;
+ struct ti_sci_msg_rm_udmap_flow_cfg req;
+ int rx_ring_id;
+ int rx_ringfdq_id;
+ int ret = 0;
+
+ flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax,
+ flow->udma_rflow_id);
+ if (IS_ERR(flow->udma_rflow)) {
+ ret = PTR_ERR(flow->udma_rflow);
+ dev_err(dev, "UDMAX rflow get err %d\n", ret);
+ goto err;
+ }
+
+ if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) {
+ xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
+ return -ENODEV;
+ }
+
+ /* request and cfg rings */
+ flow->ringrx = k3_ringacc_request_ring(rx_chn->common.ringacc,
+ flow_cfg->ring_rxq_id, 0);
+ if (!flow->ringrx) {
+ ret = -ENODEV;
+ dev_err(dev, "Failed to get RX ring\n");
+ goto err;
+ }
+
+ flow->ringrxfdq = k3_ringacc_request_ring(rx_chn->common.ringacc,
+ flow_cfg->ring_rxfdq0_id, 0);
+ if (!flow->ringrxfdq) {
+ ret = -ENODEV;
+ dev_err(dev, "Failed to get RXFDQ ring\n");
+ goto err;
+ }
+
+ ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg);
+ if (ret) {
+ dev_err(dev, "Failed to cfg ringrx %d\n", ret);
+ goto err;
+ }
+
+ ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg);
+ if (ret) {
+ dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret);
+ goto err;
+ }
+
+ if (rx_chn->remote) {
+ rx_ring_id = TI_SCI_RESOURCE_NULL;
+ rx_ringfdq_id = TI_SCI_RESOURCE_NULL;
+ } else {
+ rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
+ rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
+ }
+
+ memset(&req, 0, sizeof(req));
+
+ req.valid_params =
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
+ req.nav_id = tisci_rm->tisci_dev_id;
+ req.flow_index = flow->udma_rflow_id;
+ if (rx_chn->common.epib)
+ req.rx_einfo_present = 1;
+ if (rx_chn->common.psdata_size)
+ req.rx_psinfo_present = 1;
+ if (flow_cfg->rx_error_handling)
+ req.rx_error_handling = 1;
+ req.rx_desc_type = 0;
+ req.rx_dest_qnum = rx_ring_id;
+ req.rx_src_tag_hi_sel = 0;
+ req.rx_src_tag_lo_sel = flow_cfg->src_tag_lo_sel;
+ req.rx_dest_tag_hi_sel = 0;
+ req.rx_dest_tag_lo_sel = 0;
+ req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
+ req.rx_fdq1_qnum = rx_ringfdq_id;
+ req.rx_fdq2_qnum = rx_ringfdq_id;
+ req.rx_fdq3_qnum = rx_ringfdq_id;
+
+ ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
+ if (ret) {
+ dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id,
+ ret);
+ goto err;
+ }
+
+ rx_chn->flows_ready++;
+ dev_dbg(dev, "flow%d config done. ready:%d\n",
+ flow->udma_rflow_id, rx_chn->flows_ready);
+
+ return 0;
+err:
+ k3_udma_glue_release_rx_flow(rx_chn, flow_idx);
+ return ret;
+}
+
+static void k3_udma_glue_dump_rx_chn(struct k3_udma_glue_rx_channel *chn)
+{
+ struct device *dev = chn->common.dev;
+
+ dev_dbg(dev, "dump_rx_chn:\n"
+ "udma_rchan_id: %d\n"
+ "src_thread: %08x\n"
+ "dst_thread: %08x\n"
+ "epib: %d\n"
+ "hdesc_size: %u\n"
+ "psdata_size: %u\n"
+ "swdata_size: %u\n"
+ "flow_id_base: %d\n"
+ "flow_num: %d\n",
+ chn->udma_rchan_id,
+ chn->common.src_thread,
+ chn->common.dst_thread,
+ chn->common.epib,
+ chn->common.hdesc_size,
+ chn->common.psdata_size,
+ chn->common.swdata_size,
+ chn->flow_id_base,
+ chn->flow_num);
+}
+
+static void k3_udma_glue_dump_rx_rt_chn(struct k3_udma_glue_rx_channel *chn,
+ char *mark)
+{
+ struct device *dev = chn->common.dev;
+
+ dev_dbg(dev, "=== dump ===> %s\n", mark);
+
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_CTL_REG,
+ xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG));
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_PEER_RT_EN_REG,
+ xudma_rchanrt_read(chn->udma_rchanx,
+ UDMA_RCHAN_RT_PEER_RT_EN_REG));
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_PCNT_REG,
+ xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_PCNT_REG));
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_BCNT_REG,
+ xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_BCNT_REG));
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_SBCNT_REG,
+ xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_SBCNT_REG));
+}
+
+static int
+k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel *rx_chn,
+ struct k3_udma_glue_rx_channel_cfg *cfg)
+{
+ int ret;
+
+ /* default rflow */
+ if (cfg->flow_id_use_rxchan_id)
+ return 0;
+
+ /* not a GP rflows */
+ if (rx_chn->flow_id_base != -1 &&
+ !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
+ return 0;
+
+ /* Allocate range of GP rflows */
+ ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax,
+ rx_chn->flow_id_base,
+ rx_chn->flow_num);
+ if (ret < 0) {
+ dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n",
+ rx_chn->flow_id_base, rx_chn->flow_num, ret);
+ return ret;
+ }
+ rx_chn->flow_id_base = ret;
+
+ return 0;
+}
+
+static struct k3_udma_glue_rx_channel *
+k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
+ struct k3_udma_glue_rx_channel_cfg *cfg)
+{
+ struct k3_udma_glue_rx_channel *rx_chn;
+ int ret, i;
+
+ if (cfg->flow_id_num <= 0)
+ return ERR_PTR(-EINVAL);
+
+ if (cfg->flow_id_num != 1 &&
+ (cfg->def_flow_cfg || cfg->flow_id_use_rxchan_id))
+ return ERR_PTR(-EINVAL);
+
+ rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
+ if (!rx_chn)
+ return ERR_PTR(-ENOMEM);
+
+ rx_chn->common.dev = dev;
+ rx_chn->common.swdata_size = cfg->swdata_size;
+ rx_chn->remote = false;
+
+ /* parse of udmap channel */
+ ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
+ &rx_chn->common, false);
+ if (ret)
+ goto err;
+
+ rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
+ rx_chn->common.psdata_size,
+ rx_chn->common.swdata_size);
+
+ /* request and cfg UDMAP RX channel */
+ rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax, -1);
+ if (IS_ERR(rx_chn->udma_rchanx)) {
+ ret = PTR_ERR(rx_chn->udma_rchanx);
+ dev_err(dev, "UDMAX rchanx get err %d\n", ret);
+ goto err;
+ }
+ rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx);
+
+ rx_chn->flow_num = cfg->flow_id_num;
+ rx_chn->flow_id_base = cfg->flow_id_base;
+
+ /* Use RX channel id as flow id: target dev can't generate flow_id */
+ if (cfg->flow_id_use_rxchan_id)
+ rx_chn->flow_id_base = rx_chn->udma_rchan_id;
+
+ rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
+ sizeof(*rx_chn->flows), GFP_KERNEL);
+ if (!rx_chn->flows) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < rx_chn->flow_num; i++)
+ rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
+
+ /* request and cfg psi-l */
+ rx_chn->common.dst_thread =
+ xudma_dev_get_psil_base(rx_chn->common.udmax) +
+ rx_chn->udma_rchan_id;
+
+ ret = k3_udma_glue_cfg_rx_chn(rx_chn);
+ if (ret) {
+ dev_err(dev, "Failed to cfg rchan %d\n", ret);
+ goto err;
+ }
+
+ /* init default RX flow only if flow_num = 1 */
+ if (cfg->def_flow_cfg) {
+ ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg);
+ if (ret)
+ goto err;
+ }
+
+ ret = xudma_navss_psil_pair(rx_chn->common.udmax,
+ rx_chn->common.src_thread,
+ rx_chn->common.dst_thread);
+ if (ret) {
+ dev_err(dev, "PSI-L request err %d\n", ret);
+ goto err;
+ }
+
+ rx_chn->psil_paired = true;
+
+ /* reset RX RT registers */
+ k3_udma_glue_disable_rx_chn(rx_chn);
+
+ k3_udma_glue_dump_rx_chn(rx_chn);
+
+ return rx_chn;
+
+err:
+ k3_udma_glue_release_rx_chn(rx_chn);
+ return ERR_PTR(ret);
+}
+
+static struct k3_udma_glue_rx_channel *
+k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
+ struct k3_udma_glue_rx_channel_cfg *cfg)
+{
+ struct k3_udma_glue_rx_channel *rx_chn;
+ int ret, i;
+
+ if (cfg->flow_id_num <= 0 ||
+ cfg->flow_id_use_rxchan_id ||
+ cfg->def_flow_cfg ||
+ cfg->flow_id_base < 0)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * Remote RX channel is under control of Remote CPU core, so
+ * Linux can only request and manipulate by dedicated RX flows
+ */
+
+ rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
+ if (!rx_chn)
+ return ERR_PTR(-ENOMEM);
+
+ rx_chn->common.dev = dev;
+ rx_chn->common.swdata_size = cfg->swdata_size;
+ rx_chn->remote = true;
+ rx_chn->udma_rchan_id = -1;
+ rx_chn->flow_num = cfg->flow_id_num;
+ rx_chn->flow_id_base = cfg->flow_id_base;
+ rx_chn->psil_paired = false;
+
+ /* parse of udmap channel */
+ ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
+ &rx_chn->common, false);
+ if (ret)
+ goto err;
+
+ rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
+ rx_chn->common.psdata_size,
+ rx_chn->common.swdata_size);
+
+ rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
+ sizeof(*rx_chn->flows), GFP_KERNEL);
+ if (!rx_chn->flows) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < rx_chn->flow_num; i++)
+ rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
+
+ k3_udma_glue_dump_rx_chn(rx_chn);
+
+ return rx_chn;
+
+err:
+ k3_udma_glue_release_rx_chn(rx_chn);
+ return ERR_PTR(ret);
+}
+
+struct k3_udma_glue_rx_channel *
+k3_udma_glue_request_rx_chn(struct device *dev, const char *name,
+ struct k3_udma_glue_rx_channel_cfg *cfg)
+{
+ if (cfg->remote)
+ return k3_udma_glue_request_remote_rx_chn(dev, name, cfg);
+ else
+ return k3_udma_glue_request_rx_chn_priv(dev, name, cfg);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_request_rx_chn);
+
+void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
+{
+ int i;
+
+ if (IS_ERR_OR_NULL(rx_chn->common.udmax))
+ return;
+
+ if (rx_chn->psil_paired) {
+ xudma_navss_psil_unpair(rx_chn->common.udmax,
+ rx_chn->common.src_thread,
+ rx_chn->common.dst_thread);
+ rx_chn->psil_paired = false;
+ }
+
+ for (i = 0; i < rx_chn->flow_num; i++)
+ k3_udma_glue_release_rx_flow(rx_chn, i);
+
+ if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
+ xudma_free_gp_rflow_range(rx_chn->common.udmax,
+ rx_chn->flow_id_base,
+ rx_chn->flow_num);
+
+ if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx))
+ xudma_rchan_put(rx_chn->common.udmax,
+ rx_chn->udma_rchanx);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn);
+
+int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx,
+ struct k3_udma_glue_rx_flow_cfg *flow_cfg)
+{
+ if (flow_idx >= rx_chn->flow_num)
+ return -EINVAL;
+
+ return k3_udma_glue_cfg_rx_flow(rx_chn, flow_idx, flow_cfg);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_init);
+
+u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx)
+{
+ struct k3_udma_glue_rx_flow *flow;
+
+ if (flow_idx >= rx_chn->flow_num)
+ return -EINVAL;
+
+ flow = &rx_chn->flows[flow_idx];
+
+ return k3_ringacc_get_ring_id(flow->ringrxfdq);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_get_fdq_id);
+
+u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn)
+{
+ return rx_chn->flow_id_base;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_flow_id_base);
+
+int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx)
+{
+ struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
+ const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
+ struct device *dev = rx_chn->common.dev;
+ struct ti_sci_msg_rm_udmap_flow_cfg req;
+ int rx_ring_id;
+ int rx_ringfdq_id;
+ int ret = 0;
+
+ if (!rx_chn->remote)
+ return -EINVAL;
+
+ rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
+ rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
+
+ memset(&req, 0, sizeof(req));
+
+ req.valid_params =
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
+ req.nav_id = tisci_rm->tisci_dev_id;
+ req.flow_index = flow->udma_rflow_id;
+ req.rx_dest_qnum = rx_ring_id;
+ req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
+ req.rx_fdq1_qnum = rx_ringfdq_id;
+ req.rx_fdq2_qnum = rx_ringfdq_id;
+ req.rx_fdq3_qnum = rx_ringfdq_id;
+
+ ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
+ if (ret) {
+ dev_err(dev, "flow%d enable failed: %d\n", flow->udma_rflow_id,
+ ret);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_enable);
+
+int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx)
+{
+ struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
+ const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
+ struct device *dev = rx_chn->common.dev;
+ struct ti_sci_msg_rm_udmap_flow_cfg req;
+ int ret = 0;
+
+ if (!rx_chn->remote)
+ return -EINVAL;
+
+ memset(&req, 0, sizeof(req));
+ req.valid_params =
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
+ req.nav_id = tisci_rm->tisci_dev_id;
+ req.flow_index = flow->udma_rflow_id;
+ req.rx_dest_qnum = TI_SCI_RESOURCE_NULL;
+ req.rx_fdq0_sz0_qnum = TI_SCI_RESOURCE_NULL;
+ req.rx_fdq1_qnum = TI_SCI_RESOURCE_NULL;
+ req.rx_fdq2_qnum = TI_SCI_RESOURCE_NULL;
+ req.rx_fdq3_qnum = TI_SCI_RESOURCE_NULL;
+
+ ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
+ if (ret) {
+ dev_err(dev, "flow%d disable failed: %d\n", flow->udma_rflow_id,
+ ret);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable);
+
+int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
+{
+ u32 rxrt_ctl;
+
+ if (rx_chn->remote)
+ return -EINVAL;
+
+ if (rx_chn->flows_ready < rx_chn->flow_num)
+ return -EINVAL;
+
+ rxrt_ctl = xudma_rchanrt_read(rx_chn->udma_rchanx,
+ UDMA_RCHAN_RT_CTL_REG);
+ rxrt_ctl |= UDMA_CHAN_RT_CTL_EN;
+ xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG,
+ rxrt_ctl);
+
+ xudma_rchanrt_write(rx_chn->udma_rchanx,
+ UDMA_RCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_ENABLE);
+
+ k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt en");
+ return 0;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_enable_rx_chn);
+
+void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
+{
+ k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis1");
+
+ xudma_rchanrt_write(rx_chn->udma_rchanx,
+ UDMA_RCHAN_RT_PEER_RT_EN_REG,
+ 0);
+ xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG, 0);
+
+ k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis2");
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_disable_rx_chn);
+
+void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+ bool sync)
+{
+ int i = 0;
+ u32 val;
+
+ if (rx_chn->remote)
+ return;
+
+ k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown1");
+
+ xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_ENABLE | UDMA_PEER_RT_EN_TEARDOWN);
+
+ val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG);
+
+ while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
+ val = xudma_rchanrt_read(rx_chn->udma_rchanx,
+ UDMA_RCHAN_RT_CTL_REG);
+ udelay(1);
+ if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
+ dev_err(rx_chn->common.dev, "RX tdown timeout\n");
+ break;
+ }
+ i++;
+ }
+
+ val = xudma_rchanrt_read(rx_chn->udma_rchanx,
+ UDMA_RCHAN_RT_PEER_RT_EN_REG);
+ if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
+ dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n");
+ k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown2");
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn);
+
+void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num, void *data,
+ void (*cleanup)(void *data, dma_addr_t desc_dma), bool skip_fdq)
+{
+ struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
+ struct device *dev = rx_chn->common.dev;
+ dma_addr_t desc_dma;
+ int occ_rx, i, ret;
+
+ /* reset RXCQ as it is not input for udma - expected to be empty */
+ occ_rx = k3_ringacc_ring_get_occ(flow->ringrx);
+ dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx);
+ if (flow->ringrx)
+ k3_ringacc_ring_reset(flow->ringrx);
+
+ /* Skip RX FDQ in case one FDQ is used for the set of flows */
+ if (skip_fdq)
+ return;
+
+ /*
+ * RX FDQ reset need to be special way as it is input for udma and its
+ * state cached by udma, so:
+ * 1) save RX FDQ occ
+ * 2) clean up RX FDQ and call callback .cleanup() for each desc
+ * 3) reset RX FDQ in a special way
+ */
+ occ_rx = k3_ringacc_ring_get_occ(flow->ringrxfdq);
+ dev_dbg(dev, "RX reset flow %u occ_rx_fdq %u\n", flow_num, occ_rx);
+
+ for (i = 0; i < occ_rx; i++) {
+ ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma);
+ if (ret) {
+ dev_err(dev, "RX reset pop %d\n", ret);
+ break;
+ }
+ cleanup(data, desc_dma);
+ }
+
+ k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn);
+
+int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num, struct cppi5_host_desc_t *desc_rx,
+ dma_addr_t desc_dma)
+{
+ struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
+
+ return k3_ringacc_ring_push(flow->ringrxfdq, &desc_dma);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_push_rx_chn);
+
+int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num, dma_addr_t *desc_dma)
+{
+ struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
+
+ return k3_ringacc_ring_pop(flow->ringrx, desc_dma);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_pop_rx_chn);
+
+int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num)
+{
+ struct k3_udma_glue_rx_flow *flow;
+
+ flow = &rx_chn->flows[flow_num];
+
+ flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx);
+
+ return flow->virq;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq);
diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c
new file mode 100644
index 000000000000..0b8f3dd6b146
--- /dev/null
+++ b/drivers/dma/ti/k3-udma-private.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+int xudma_navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
+{
+ return navss_psil_pair(ud, src_thread, dst_thread);
+}
+EXPORT_SYMBOL(xudma_navss_psil_pair);
+
+int xudma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
+{
+ return navss_psil_unpair(ud, src_thread, dst_thread);
+}
+EXPORT_SYMBOL(xudma_navss_psil_unpair);
+
+struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property)
+{
+ struct device_node *udma_node = np;
+ struct platform_device *pdev;
+ struct udma_dev *ud;
+
+ if (property) {
+ udma_node = of_parse_phandle(np, property, 0);
+ if (!udma_node) {
+ pr_err("UDMA node is not found\n");
+ return ERR_PTR(-ENODEV);
+ }
+ }
+
+ pdev = of_find_device_by_node(udma_node);
+ if (!pdev) {
+ pr_debug("UDMA device not found\n");
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ if (np != udma_node)
+ of_node_put(udma_node);
+
+ ud = platform_get_drvdata(pdev);
+ if (!ud) {
+ pr_debug("UDMA has not been probed\n");
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ return ud;
+}
+EXPORT_SYMBOL(of_xudma_dev_get);
+
+u32 xudma_dev_get_psil_base(struct udma_dev *ud)
+{
+ return ud->psil_base;
+}
+EXPORT_SYMBOL(xudma_dev_get_psil_base);
+
+struct udma_tisci_rm *xudma_dev_get_tisci_rm(struct udma_dev *ud)
+{
+ return &ud->tisci_rm;
+}
+EXPORT_SYMBOL(xudma_dev_get_tisci_rm);
+
+int xudma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
+{
+ return __udma_alloc_gp_rflow_range(ud, from, cnt);
+}
+EXPORT_SYMBOL(xudma_alloc_gp_rflow_range);
+
+int xudma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
+{
+ return __udma_free_gp_rflow_range(ud, from, cnt);
+}
+EXPORT_SYMBOL(xudma_free_gp_rflow_range);
+
+bool xudma_rflow_is_gp(struct udma_dev *ud, int id)
+{
+ return !test_bit(id, ud->rflow_gp_map);
+}
+EXPORT_SYMBOL(xudma_rflow_is_gp);
+
+#define XUDMA_GET_PUT_RESOURCE(res) \
+struct udma_##res *xudma_##res##_get(struct udma_dev *ud, int id) \
+{ \
+ return __udma_reserve_##res(ud, false, id); \
+} \
+EXPORT_SYMBOL(xudma_##res##_get); \
+ \
+void xudma_##res##_put(struct udma_dev *ud, struct udma_##res *p) \
+{ \
+ clear_bit(p->id, ud->res##_map); \
+} \
+EXPORT_SYMBOL(xudma_##res##_put)
+XUDMA_GET_PUT_RESOURCE(tchan);
+XUDMA_GET_PUT_RESOURCE(rchan);
+
+struct udma_rflow *xudma_rflow_get(struct udma_dev *ud, int id)
+{
+ return __udma_get_rflow(ud, id);
+}
+EXPORT_SYMBOL(xudma_rflow_get);
+
+void xudma_rflow_put(struct udma_dev *ud, struct udma_rflow *p)
+{
+ __udma_put_rflow(ud, p);
+}
+EXPORT_SYMBOL(xudma_rflow_put);
+
+#define XUDMA_GET_RESOURCE_ID(res) \
+int xudma_##res##_get_id(struct udma_##res *p) \
+{ \
+ return p->id; \
+} \
+EXPORT_SYMBOL(xudma_##res##_get_id)
+XUDMA_GET_RESOURCE_ID(tchan);
+XUDMA_GET_RESOURCE_ID(rchan);
+XUDMA_GET_RESOURCE_ID(rflow);
+
+/* Exported register access functions */
+#define XUDMA_RT_IO_FUNCTIONS(res) \
+u32 xudma_##res##rt_read(struct udma_##res *p, int reg) \
+{ \
+ return udma_##res##rt_read(p, reg); \
+} \
+EXPORT_SYMBOL(xudma_##res##rt_read); \
+ \
+void xudma_##res##rt_write(struct udma_##res *p, int reg, u32 val) \
+{ \
+ udma_##res##rt_write(p, reg, val); \
+} \
+EXPORT_SYMBOL(xudma_##res##rt_write)
+XUDMA_RT_IO_FUNCTIONS(tchan);
+XUDMA_RT_IO_FUNCTIONS(rchan);
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
new file mode 100644
index 000000000000..ea79c2df28e0
--- /dev/null
+++ b/drivers/dma/ti/k3-udma.c
@@ -0,0 +1,3432 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/soc/ti/k3-ringacc.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+#include <linux/soc/ti/ti_sci_inta_msi.h>
+#include <linux/dma/ti-cppi5.h>
+
+#include "../virt-dma.h"
+#include "k3-udma.h"
+#include "k3-psil-priv.h"
+
+struct udma_static_tr {
+ u8 elsize; /* RPSTR0 */
+ u16 elcnt; /* RPSTR0 */
+ u16 bstcnt; /* RPSTR1 */
+};
+
+#define K3_UDMA_MAX_RFLOWS 1024
+#define K3_UDMA_DEFAULT_RING_SIZE 16
+
+/* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */
+#define UDMA_RFLOW_SRCTAG_NONE 0
+#define UDMA_RFLOW_SRCTAG_CFG_TAG 1
+#define UDMA_RFLOW_SRCTAG_FLOW_ID 2
+#define UDMA_RFLOW_SRCTAG_SRC_TAG 4
+
+#define UDMA_RFLOW_DSTTAG_NONE 0
+#define UDMA_RFLOW_DSTTAG_CFG_TAG 1
+#define UDMA_RFLOW_DSTTAG_FLOW_ID 2
+#define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4
+#define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5
+
+struct udma_chan;
+
+enum udma_mmr {
+ MMR_GCFG = 0,
+ MMR_RCHANRT,
+ MMR_TCHANRT,
+ MMR_LAST,
+};
+
+static const char * const mmr_names[] = { "gcfg", "rchanrt", "tchanrt" };
+
+struct udma_tchan {
+ void __iomem *reg_rt;
+
+ int id;
+ struct k3_ring *t_ring; /* Transmit ring */
+ struct k3_ring *tc_ring; /* Transmit Completion ring */
+};
+
+struct udma_rflow {
+ int id;
+ struct k3_ring *fd_ring; /* Free Descriptor ring */
+ struct k3_ring *r_ring; /* Receive ring */
+};
+
+struct udma_rchan {
+ void __iomem *reg_rt;
+
+ int id;
+};
+
+#define UDMA_FLAG_PDMA_ACC32 BIT(0)
+#define UDMA_FLAG_PDMA_BURST BIT(1)
+
+struct udma_match_data {
+ u32 psil_base;
+ bool enable_memcpy_support;
+ u32 flags;
+ u32 statictr_z_mask;
+ u32 rchan_oes_offset;
+
+ u8 tpl_levels;
+ u32 level_start_idx[];
+};
+
+struct udma_dev {
+ struct dma_device ddev;
+ struct device *dev;
+ void __iomem *mmrs[MMR_LAST];
+ const struct udma_match_data *match_data;
+
+ size_t desc_align; /* alignment to use for descriptors */
+
+ struct udma_tisci_rm tisci_rm;
+
+ struct k3_ringacc *ringacc;
+
+ struct work_struct purge_work;
+ struct list_head desc_to_purge;
+ spinlock_t lock;
+
+ int tchan_cnt;
+ int echan_cnt;
+ int rchan_cnt;
+ int rflow_cnt;
+ unsigned long *tchan_map;
+ unsigned long *rchan_map;
+ unsigned long *rflow_gp_map;
+ unsigned long *rflow_gp_map_allocated;
+ unsigned long *rflow_in_use;
+
+ struct udma_tchan *tchans;
+ struct udma_rchan *rchans;
+ struct udma_rflow *rflows;
+
+ struct udma_chan *channels;
+ u32 psil_base;
+};
+
+struct udma_hwdesc {
+ size_t cppi5_desc_size;
+ void *cppi5_desc_vaddr;
+ dma_addr_t cppi5_desc_paddr;
+
+ /* TR descriptor internal pointers */
+ void *tr_req_base;
+ struct cppi5_tr_resp_t *tr_resp_base;
+};
+
+struct udma_desc {
+ struct virt_dma_desc vd;
+
+ bool terminated;
+
+ enum dma_transfer_direction dir;
+
+ struct udma_static_tr static_tr;
+ u32 residue;
+
+ unsigned int sglen;
+ unsigned int desc_idx; /* Only used for cyclic in packet mode */
+ unsigned int tr_idx;
+
+ u32 metadata_size;
+ void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */
+
+ unsigned int hwdesc_count;
+ struct udma_hwdesc hwdesc[0];
+};
+
+enum udma_chan_state {
+ UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */
+ UDMA_CHAN_IS_ACTIVE, /* Normal operation */
+ UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */
+};
+
+struct udma_tx_drain {
+ struct delayed_work work;
+ unsigned long jiffie;
+ u32 residue;
+};
+
+struct udma_chan_config {
+ bool pkt_mode; /* TR or packet */
+ bool needs_epib; /* EPIB is needed for the communication or not */
+ u32 psd_size; /* size of Protocol Specific Data */
+ u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
+ u32 hdesc_size; /* Size of a packet descriptor in packet mode */
+ bool notdpkt; /* Suppress sending TDC packet */
+ int remote_thread_id;
+ u32 src_thread;
+ u32 dst_thread;
+ enum psil_endpoint_type ep_type;
+ bool enable_acc32;
+ bool enable_burst;
+ enum udma_tp_level channel_tpl; /* Channel Throughput Level */
+
+ enum dma_transfer_direction dir;
+};
+
+struct udma_chan {
+ struct virt_dma_chan vc;
+ struct dma_slave_config cfg;
+ struct udma_dev *ud;
+ struct udma_desc *desc;
+ struct udma_desc *terminated_desc;
+ struct udma_static_tr static_tr;
+ char *name;
+
+ struct udma_tchan *tchan;
+ struct udma_rchan *rchan;
+ struct udma_rflow *rflow;
+
+ bool psil_paired;
+
+ int irq_num_ring;
+ int irq_num_udma;
+
+ bool cyclic;
+ bool paused;
+
+ enum udma_chan_state state;
+ struct completion teardown_completed;
+
+ struct udma_tx_drain tx_drain;
+
+ u32 bcnt; /* number of bytes completed since the start of the channel */
+ u32 in_ring_cnt; /* number of descriptors in flight */
+
+ /* Channel configuration parameters */
+ struct udma_chan_config config;
+
+ /* dmapool for packet mode descriptors */
+ bool use_dma_pool;
+ struct dma_pool *hdesc_pool;
+
+ u32 id;
+};
+
+static inline struct udma_dev *to_udma_dev(struct dma_device *d)
+{
+ return container_of(d, struct udma_dev, ddev);
+}
+
+static inline struct udma_chan *to_udma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct udma_chan, vc.chan);
+}
+
+static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t)
+{
+ return container_of(t, struct udma_desc, vd.tx);
+}
+
+/* Generic register access functions */
+static inline u32 udma_read(void __iomem *base, int reg)
+{
+ return readl(base + reg);
+}
+
+static inline void udma_write(void __iomem *base, int reg, u32 val)
+{
+ writel(val, base + reg);
+}
+
+static inline void udma_update_bits(void __iomem *base, int reg,
+ u32 mask, u32 val)
+{
+ u32 tmp, orig;
+
+ orig = readl(base + reg);
+ tmp = orig & ~mask;
+ tmp |= (val & mask);
+
+ if (tmp != orig)
+ writel(tmp, base + reg);
+}
+
+/* TCHANRT */
+static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
+{
+ if (!tchan)
+ return 0;
+ return udma_read(tchan->reg_rt, reg);
+}
+
+static inline void udma_tchanrt_write(struct udma_tchan *tchan, int reg,
+ u32 val)
+{
+ if (!tchan)
+ return;
+ udma_write(tchan->reg_rt, reg, val);
+}
+
+static inline void udma_tchanrt_update_bits(struct udma_tchan *tchan, int reg,
+ u32 mask, u32 val)
+{
+ if (!tchan)
+ return;
+ udma_update_bits(tchan->reg_rt, reg, mask, val);
+}
+
+/* RCHANRT */
+static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
+{
+ if (!rchan)
+ return 0;
+ return udma_read(rchan->reg_rt, reg);
+}
+
+static inline void udma_rchanrt_write(struct udma_rchan *rchan, int reg,
+ u32 val)
+{
+ if (!rchan)
+ return;
+ udma_write(rchan->reg_rt, reg, val);
+}
+
+static inline void udma_rchanrt_update_bits(struct udma_rchan *rchan, int reg,
+ u32 mask, u32 val)
+{
+ if (!rchan)
+ return;
+ udma_update_bits(rchan->reg_rt, reg, mask, val);
+}
+
+static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
+{
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+
+ dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
+ return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
+ tisci_rm->tisci_navss_dev_id,
+ src_thread, dst_thread);
+}
+
+static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
+ u32 dst_thread)
+{
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+
+ dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
+ return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
+ tisci_rm->tisci_navss_dev_id,
+ src_thread, dst_thread);
+}
+
+static void udma_reset_uchan(struct udma_chan *uc)
+{
+ memset(&uc->config, 0, sizeof(uc->config));
+ uc->config.remote_thread_id = -1;
+ uc->state = UDMA_CHAN_IS_IDLE;
+}
+
+static void udma_dump_chan_stdata(struct udma_chan *uc)
+{
+ struct device *dev = uc->ud->dev;
+ u32 offset;
+ int i;
+
+ if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) {
+ dev_dbg(dev, "TCHAN State data:\n");
+ for (i = 0; i < 32; i++) {
+ offset = UDMA_TCHAN_RT_STDATA_REG + i * 4;
+ dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i,
+ udma_tchanrt_read(uc->tchan, offset));
+ }
+ }
+
+ if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) {
+ dev_dbg(dev, "RCHAN State data:\n");
+ for (i = 0; i < 32; i++) {
+ offset = UDMA_RCHAN_RT_STDATA_REG + i * 4;
+ dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i,
+ udma_rchanrt_read(uc->rchan, offset));
+ }
+ }
+}
+
+static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d,
+ int idx)
+{
+ return d->hwdesc[idx].cppi5_desc_paddr;
+}
+
+static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx)
+{
+ return d->hwdesc[idx].cppi5_desc_vaddr;
+}
+
+static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc,
+ dma_addr_t paddr)
+{
+ struct udma_desc *d = uc->terminated_desc;
+
+ if (d) {
+ dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
+ d->desc_idx);
+
+ if (desc_paddr != paddr)
+ d = NULL;
+ }
+
+ if (!d) {
+ d = uc->desc;
+ if (d) {
+ dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
+ d->desc_idx);
+
+ if (desc_paddr != paddr)
+ d = NULL;
+ }
+ }
+
+ return d;
+}
+
+static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d)
+{
+ if (uc->use_dma_pool) {
+ int i;
+
+ for (i = 0; i < d->hwdesc_count; i++) {
+ if (!d->hwdesc[i].cppi5_desc_vaddr)
+ continue;
+
+ dma_pool_free(uc->hdesc_pool,
+ d->hwdesc[i].cppi5_desc_vaddr,
+ d->hwdesc[i].cppi5_desc_paddr);
+
+ d->hwdesc[i].cppi5_desc_vaddr = NULL;
+ }
+ } else if (d->hwdesc[0].cppi5_desc_vaddr) {
+ struct udma_dev *ud = uc->ud;
+
+ dma_free_coherent(ud->dev, d->hwdesc[0].cppi5_desc_size,
+ d->hwdesc[0].cppi5_desc_vaddr,
+ d->hwdesc[0].cppi5_desc_paddr);
+
+ d->hwdesc[0].cppi5_desc_vaddr = NULL;
+ }
+}
+
+static void udma_purge_desc_work(struct work_struct *work)
+{
+ struct udma_dev *ud = container_of(work, typeof(*ud), purge_work);
+ struct virt_dma_desc *vd, *_vd;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&ud->lock, flags);
+ list_splice_tail_init(&ud->desc_to_purge, &head);
+ spin_unlock_irqrestore(&ud->lock, flags);
+
+ list_for_each_entry_safe(vd, _vd, &head, node) {
+ struct udma_chan *uc = to_udma_chan(vd->tx.chan);
+ struct udma_desc *d = to_udma_desc(&vd->tx);
+
+ udma_free_hwdesc(uc, d);
+ list_del(&vd->node);
+ kfree(d);
+ }
+
+ /* If more to purge, schedule the work again */
+ if (!list_empty(&ud->desc_to_purge))
+ schedule_work(&ud->purge_work);
+}
+
+static void udma_desc_free(struct virt_dma_desc *vd)
+{
+ struct udma_dev *ud = to_udma_dev(vd->tx.chan->device);
+ struct udma_chan *uc = to_udma_chan(vd->tx.chan);
+ struct udma_desc *d = to_udma_desc(&vd->tx);
+ unsigned long flags;
+
+ if (uc->terminated_desc == d)
+ uc->terminated_desc = NULL;
+
+ if (uc->use_dma_pool) {
+ udma_free_hwdesc(uc, d);
+ kfree(d);
+ return;
+ }
+
+ spin_lock_irqsave(&ud->lock, flags);
+ list_add_tail(&vd->node, &ud->desc_to_purge);
+ spin_unlock_irqrestore(&ud->lock, flags);
+
+ schedule_work(&ud->purge_work);
+}
+
+static bool udma_is_chan_running(struct udma_chan *uc)
+{
+ u32 trt_ctl = 0;
+ u32 rrt_ctl = 0;
+
+ if (uc->tchan)
+ trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
+ if (uc->rchan)
+ rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
+
+ if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
+ return true;
+
+ return false;
+}
+
+static bool udma_is_chan_paused(struct udma_chan *uc)
+{
+ u32 val, pause_mask;
+
+ switch (uc->desc->dir) {
+ case DMA_DEV_TO_MEM:
+ val = udma_rchanrt_read(uc->rchan,
+ UDMA_RCHAN_RT_PEER_RT_EN_REG);
+ pause_mask = UDMA_PEER_RT_EN_PAUSE;
+ break;
+ case DMA_MEM_TO_DEV:
+ val = udma_tchanrt_read(uc->tchan,
+ UDMA_TCHAN_RT_PEER_RT_EN_REG);
+ pause_mask = UDMA_PEER_RT_EN_PAUSE;
+ break;
+ case DMA_MEM_TO_MEM:
+ val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
+ pause_mask = UDMA_CHAN_RT_CTL_PAUSE;
+ break;
+ default:
+ return false;
+ }
+
+ if (val & pause_mask)
+ return true;
+
+ return false;
+}
+
+static void udma_sync_for_device(struct udma_chan *uc, int idx)
+{
+ struct udma_desc *d = uc->desc;
+
+ if (uc->cyclic && uc->config.pkt_mode) {
+ dma_sync_single_for_device(uc->ud->dev,
+ d->hwdesc[idx].cppi5_desc_paddr,
+ d->hwdesc[idx].cppi5_desc_size,
+ DMA_TO_DEVICE);
+ } else {
+ int i;
+
+ for (i = 0; i < d->hwdesc_count; i++) {
+ if (!d->hwdesc[i].cppi5_desc_vaddr)
+ continue;
+
+ dma_sync_single_for_device(uc->ud->dev,
+ d->hwdesc[i].cppi5_desc_paddr,
+ d->hwdesc[i].cppi5_desc_size,
+ DMA_TO_DEVICE);
+ }
+ }
+}
+
+static int udma_push_to_ring(struct udma_chan *uc, int idx)
+{
+ struct udma_desc *d = uc->desc;
+
+ struct k3_ring *ring = NULL;
+ int ret = -EINVAL;
+
+ switch (uc->config.dir) {
+ case DMA_DEV_TO_MEM:
+ ring = uc->rflow->fd_ring;
+ break;
+ case DMA_MEM_TO_DEV:
+ case DMA_MEM_TO_MEM:
+ ring = uc->tchan->t_ring;
+ break;
+ default:
+ break;
+ }
+
+ if (ring) {
+ dma_addr_t desc_addr = udma_curr_cppi5_desc_paddr(d, idx);
+
+ wmb(); /* Ensure that writes are not moved over this point */
+ udma_sync_for_device(uc, idx);
+ ret = k3_ringacc_ring_push(ring, &desc_addr);
+ uc->in_ring_cnt++;
+ }
+
+ return ret;
+}
+
+static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
+{
+ struct k3_ring *ring = NULL;
+ int ret = -ENOENT;
+
+ switch (uc->config.dir) {
+ case DMA_DEV_TO_MEM:
+ ring = uc->rflow->r_ring;
+ break;
+ case DMA_MEM_TO_DEV:
+ case DMA_MEM_TO_MEM:
+ ring = uc->tchan->tc_ring;
+ break;
+ default:
+ break;
+ }
+
+ if (ring && k3_ringacc_ring_get_occ(ring)) {
+ struct udma_desc *d = NULL;
+
+ ret = k3_ringacc_ring_pop(ring, addr);
+ if (ret)
+ return ret;
+
+ /* Teardown completion */
+ if (cppi5_desc_is_tdcm(*addr))
+ return ret;
+
+ d = udma_udma_desc_from_paddr(uc, *addr);
+
+ if (d)
+ dma_sync_single_for_cpu(uc->ud->dev, *addr,
+ d->hwdesc[0].cppi5_desc_size,
+ DMA_FROM_DEVICE);
+ rmb(); /* Ensure that reads are not moved before this point */
+
+ if (!ret)
+ uc->in_ring_cnt--;
+ }
+
+ return ret;
+}
+
+static void udma_reset_rings(struct udma_chan *uc)
+{
+ struct k3_ring *ring1 = NULL;
+ struct k3_ring *ring2 = NULL;
+
+ switch (uc->config.dir) {
+ case DMA_DEV_TO_MEM:
+ if (uc->rchan) {
+ ring1 = uc->rflow->fd_ring;
+ ring2 = uc->rflow->r_ring;
+ }
+ break;
+ case DMA_MEM_TO_DEV:
+ case DMA_MEM_TO_MEM:
+ if (uc->tchan) {
+ ring1 = uc->tchan->t_ring;
+ ring2 = uc->tchan->tc_ring;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (ring1)
+ k3_ringacc_ring_reset_dma(ring1,
+ k3_ringacc_ring_get_occ(ring1));
+ if (ring2)
+ k3_ringacc_ring_reset(ring2);
+
+ /* make sure we are not leaking memory by stalled descriptor */
+ if (uc->terminated_desc) {
+ udma_desc_free(&uc->terminated_desc->vd);
+ uc->terminated_desc = NULL;
+ }
+
+ uc->in_ring_cnt = 0;
+}
+
+static void udma_reset_counters(struct udma_chan *uc)
+{
+ u32 val;
+
+ if (uc->tchan) {
+ val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
+
+ val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
+
+ val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
+
+ val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
+ }
+
+ if (uc->rchan) {
+ val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
+
+ val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
+
+ val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
+
+ val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
+ }
+
+ uc->bcnt = 0;
+}
+
+static int udma_reset_chan(struct udma_chan *uc, bool hard)
+{
+ switch (uc->config.dir) {
+ case DMA_DEV_TO_MEM:
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
+ break;
+ case DMA_MEM_TO_DEV:
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
+ break;
+ case DMA_MEM_TO_MEM:
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Reset all counters */
+ udma_reset_counters(uc);
+
+ /* Hard reset: re-initialize the channel to reset */
+ if (hard) {
+ struct udma_chan_config ucc_backup;
+ int ret;
+
+ memcpy(&ucc_backup, &uc->config, sizeof(uc->config));
+ uc->ud->ddev.device_free_chan_resources(&uc->vc.chan);
+
+ /* restore the channel configuration */
+ memcpy(&uc->config, &ucc_backup, sizeof(uc->config));
+ ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan);
+ if (ret)
+ return ret;
+
+ /*
+ * Setting forced teardown after forced reset helps recovering
+ * the rchan.
+ */
+ if (uc->config.dir == DMA_DEV_TO_MEM)
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_EN |
+ UDMA_CHAN_RT_CTL_TDOWN |
+ UDMA_CHAN_RT_CTL_FTDOWN);
+ }
+ uc->state = UDMA_CHAN_IS_IDLE;
+
+ return 0;
+}
+
+static void udma_start_desc(struct udma_chan *uc)
+{
+ struct udma_chan_config *ucc = &uc->config;
+
+ if (ucc->pkt_mode && (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) {
+ int i;
+
+ /* Push all descriptors to ring for packet mode cyclic or RX */
+ for (i = 0; i < uc->desc->sglen; i++)
+ udma_push_to_ring(uc, i);
+ } else {
+ udma_push_to_ring(uc, 0);
+ }
+}
+
+static bool udma_chan_needs_reconfiguration(struct udma_chan *uc)
+{
+ /* Only PDMAs have staticTR */
+ if (uc->config.ep_type == PSIL_EP_NATIVE)
+ return false;
+
+ /* Check if the staticTR configuration has changed for TX */
+ if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr)))
+ return true;
+
+ return false;
+}
+
+static int udma_start(struct udma_chan *uc)
+{
+ struct virt_dma_desc *vd = vchan_next_desc(&uc->vc);
+
+ if (!vd) {
+ uc->desc = NULL;
+ return -ENOENT;
+ }
+
+ list_del(&vd->node);
+
+ uc->desc = to_udma_desc(&vd->tx);
+
+ /* Channel is already running and does not need reconfiguration */
+ if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) {
+ udma_start_desc(uc);
+ goto out;
+ }
+
+ /* Make sure that we clear the teardown bit, if it is set */
+ udma_reset_chan(uc, false);
+
+ /* Push descriptors before we start the channel */
+ udma_start_desc(uc);
+
+ switch (uc->desc->dir) {
+ case DMA_DEV_TO_MEM:
+ /* Config remote TR */
+ if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
+ u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
+ PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
+ const struct udma_match_data *match_data =
+ uc->ud->match_data;
+
+ if (uc->config.enable_acc32)
+ val |= PDMA_STATIC_TR_XY_ACC32;
+ if (uc->config.enable_burst)
+ val |= PDMA_STATIC_TR_XY_BURST;
+
+ udma_rchanrt_write(uc->rchan,
+ UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG, val);
+
+ udma_rchanrt_write(uc->rchan,
+ UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG,
+ PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt,
+ match_data->statictr_z_mask));
+
+ /* save the current staticTR configuration */
+ memcpy(&uc->static_tr, &uc->desc->static_tr,
+ sizeof(uc->static_tr));
+ }
+
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_EN);
+
+ /* Enable remote */
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_ENABLE);
+
+ break;
+ case DMA_MEM_TO_DEV:
+ /* Config remote TR */
+ if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
+ u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
+ PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
+
+ if (uc->config.enable_acc32)
+ val |= PDMA_STATIC_TR_XY_ACC32;
+ if (uc->config.enable_burst)
+ val |= PDMA_STATIC_TR_XY_BURST;
+
+ udma_tchanrt_write(uc->tchan,
+ UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG, val);
+
+ /* save the current staticTR configuration */
+ memcpy(&uc->static_tr, &uc->desc->static_tr,
+ sizeof(uc->static_tr));
+ }
+
+ /* Enable remote */
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_ENABLE);
+
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_EN);
+
+ break;
+ case DMA_MEM_TO_MEM:
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_EN);
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_EN);
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ uc->state = UDMA_CHAN_IS_ACTIVE;
+out:
+
+ return 0;
+}
+
+static int udma_stop(struct udma_chan *uc)
+{
+ enum udma_chan_state old_state = uc->state;
+
+ uc->state = UDMA_CHAN_IS_TERMINATING;
+ reinit_completion(&uc->teardown_completed);
+
+ switch (uc->config.dir) {
+ case DMA_DEV_TO_MEM:
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_ENABLE |
+ UDMA_PEER_RT_EN_TEARDOWN);
+ break;
+ case DMA_MEM_TO_DEV:
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_ENABLE |
+ UDMA_PEER_RT_EN_FLUSH);
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_EN |
+ UDMA_CHAN_RT_CTL_TDOWN);
+ break;
+ case DMA_MEM_TO_MEM:
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_EN |
+ UDMA_CHAN_RT_CTL_TDOWN);
+ break;
+ default:
+ uc->state = old_state;
+ complete_all(&uc->teardown_completed);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void udma_cyclic_packet_elapsed(struct udma_chan *uc)
+{
+ struct udma_desc *d = uc->desc;
+ struct cppi5_host_desc_t *h_desc;
+
+ h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr;
+ cppi5_hdesc_reset_to_original(h_desc);
+ udma_push_to_ring(uc, d->desc_idx);
+ d->desc_idx = (d->desc_idx + 1) % d->sglen;
+}
+
+static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d)
+{
+ struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr;
+
+ memcpy(d->metadata, h_desc->epib, d->metadata_size);
+}
+
+static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
+{
+ u32 peer_bcnt, bcnt;
+
+ /* Only TX towards PDMA is affected */
+ if (uc->config.ep_type == PSIL_EP_NATIVE ||
+ uc->config.dir != DMA_MEM_TO_DEV)
+ return true;
+
+ peer_bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
+ bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
+
+ if (peer_bcnt < bcnt) {
+ uc->tx_drain.residue = bcnt - peer_bcnt;
+ uc->tx_drain.jiffie = jiffies;
+ return false;
+ }
+
+ return true;
+}
+
+static void udma_check_tx_completion(struct work_struct *work)
+{
+ struct udma_chan *uc = container_of(work, typeof(*uc),
+ tx_drain.work.work);
+ bool desc_done = true;
+ u32 residue_diff;
+ unsigned long jiffie_diff, delay;
+
+ if (uc->desc) {
+ residue_diff = uc->tx_drain.residue;
+ jiffie_diff = uc->tx_drain.jiffie;
+ desc_done = udma_is_desc_really_done(uc, uc->desc);
+ }
+
+ if (!desc_done) {
+ jiffie_diff = uc->tx_drain.jiffie - jiffie_diff;
+ residue_diff -= uc->tx_drain.residue;
+ if (residue_diff) {
+ /* Try to guess when we should check next time */
+ residue_diff /= jiffie_diff;
+ delay = uc->tx_drain.residue / residue_diff / 3;
+ if (jiffies_to_msecs(delay) < 5)
+ delay = 0;
+ } else {
+ /* No progress, check again in 1 second */
+ delay = HZ;
+ }
+
+ schedule_delayed_work(&uc->tx_drain.work, delay);
+ } else if (uc->desc) {
+ struct udma_desc *d = uc->desc;
+
+ uc->bcnt += d->residue;
+ udma_start(uc);
+ vchan_cookie_complete(&d->vd);
+ }
+}
+
+static irqreturn_t udma_ring_irq_handler(int irq, void *data)
+{
+ struct udma_chan *uc = data;
+ struct udma_desc *d;
+ unsigned long flags;
+ dma_addr_t paddr = 0;
+
+ if (udma_pop_from_ring(uc, &paddr) || !paddr)
+ return IRQ_HANDLED;
+
+ spin_lock_irqsave(&uc->vc.lock, flags);
+
+ /* Teardown completion message */
+ if (cppi5_desc_is_tdcm(paddr)) {
+ /* Compensate our internal pop/push counter */
+ uc->in_ring_cnt++;
+
+ complete_all(&uc->teardown_completed);
+
+ if (uc->terminated_desc) {
+ udma_desc_free(&uc->terminated_desc->vd);
+ uc->terminated_desc = NULL;
+ }
+
+ if (!uc->desc)
+ udma_start(uc);
+
+ goto out;
+ }
+
+ d = udma_udma_desc_from_paddr(uc, paddr);
+
+ if (d) {
+ dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
+ d->desc_idx);
+ if (desc_paddr != paddr) {
+ dev_err(uc->ud->dev, "not matching descriptors!\n");
+ goto out;
+ }
+
+ if (uc->cyclic) {
+ /* push the descriptor back to the ring */
+ if (d == uc->desc) {
+ udma_cyclic_packet_elapsed(uc);
+ vchan_cyclic_callback(&d->vd);
+ }
+ } else {
+ bool desc_done = false;
+
+ if (d == uc->desc) {
+ desc_done = udma_is_desc_really_done(uc, d);
+
+ if (desc_done) {
+ uc->bcnt += d->residue;
+ udma_start(uc);
+ } else {
+ schedule_delayed_work(&uc->tx_drain.work,
+ 0);
+ }
+ }
+
+ if (desc_done)
+ vchan_cookie_complete(&d->vd);
+ }
+ }
+out:
+ spin_unlock_irqrestore(&uc->vc.lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t udma_udma_irq_handler(int irq, void *data)
+{
+ struct udma_chan *uc = data;
+ struct udma_desc *d;
+ unsigned long flags;
+
+ spin_lock_irqsave(&uc->vc.lock, flags);
+ d = uc->desc;
+ if (d) {
+ d->tr_idx = (d->tr_idx + 1) % d->sglen;
+
+ if (uc->cyclic) {
+ vchan_cyclic_callback(&d->vd);
+ } else {
+ /* TODO: figure out the real amount of data */
+ uc->bcnt += d->residue;
+ udma_start(uc);
+ vchan_cookie_complete(&d->vd);
+ }
+ }
+
+ spin_unlock_irqrestore(&uc->vc.lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
+ * @ud: UDMA device
+ * @from: Start the search from this flow id number
+ * @cnt: Number of consecutive flow ids to allocate
+ *
+ * Allocate range of RX flow ids for future use, those flows can be requested
+ * only using explicit flow id number. if @from is set to -1 it will try to find
+ * first free range. if @from is positive value it will force allocation only
+ * of the specified range of flows.
+ *
+ * Returns -ENOMEM if can't find free range.
+ * -EEXIST if requested range is busy.
+ * -EINVAL if wrong input values passed.
+ * Returns flow id on success.
+ */
+static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
+{
+ int start, tmp_from;
+ DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
+
+ tmp_from = from;
+ if (tmp_from < 0)
+ tmp_from = ud->rchan_cnt;
+ /* default flows can't be allocated and accessible only by id */
+ if (tmp_from < ud->rchan_cnt)
+ return -EINVAL;
+
+ if (tmp_from + cnt > ud->rflow_cnt)
+ return -EINVAL;
+
+ bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated,
+ ud->rflow_cnt);
+
+ start = bitmap_find_next_zero_area(tmp,
+ ud->rflow_cnt,
+ tmp_from, cnt, 0);
+ if (start >= ud->rflow_cnt)
+ return -ENOMEM;
+
+ if (from >= 0 && start != from)
+ return -EEXIST;
+
+ bitmap_set(ud->rflow_gp_map_allocated, start, cnt);
+ return start;
+}
+
+static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
+{
+ if (from < ud->rchan_cnt)
+ return -EINVAL;
+ if (from + cnt > ud->rflow_cnt)
+ return -EINVAL;
+
+ bitmap_clear(ud->rflow_gp_map_allocated, from, cnt);
+ return 0;
+}
+
+static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
+{
+ /*
+ * Attempt to request rflow by ID can be made for any rflow
+ * if not in use with assumption that caller knows what's doing.
+ * TI-SCI FW will perform additional permission check ant way, it's
+ * safe
+ */
+
+ if (id < 0 || id >= ud->rflow_cnt)
+ return ERR_PTR(-ENOENT);
+
+ if (test_bit(id, ud->rflow_in_use))
+ return ERR_PTR(-ENOENT);
+
+ /* GP rflow has to be allocated first */
+ if (!test_bit(id, ud->rflow_gp_map) &&
+ !test_bit(id, ud->rflow_gp_map_allocated))
+ return ERR_PTR(-EINVAL);
+
+ dev_dbg(ud->dev, "get rflow%d\n", id);
+ set_bit(id, ud->rflow_in_use);
+ return &ud->rflows[id];
+}
+
+static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow)
+{
+ if (!test_bit(rflow->id, ud->rflow_in_use)) {
+ dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id);
+ return;
+ }
+
+ dev_dbg(ud->dev, "put rflow%d\n", rflow->id);
+ clear_bit(rflow->id, ud->rflow_in_use);
+}
+
+#define UDMA_RESERVE_RESOURCE(res) \
+static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
+ enum udma_tp_level tpl, \
+ int id) \
+{ \
+ if (id >= 0) { \
+ if (test_bit(id, ud->res##_map)) { \
+ dev_err(ud->dev, "res##%d is in use\n", id); \
+ return ERR_PTR(-ENOENT); \
+ } \
+ } else { \
+ int start; \
+ \
+ if (tpl >= ud->match_data->tpl_levels) \
+ tpl = ud->match_data->tpl_levels - 1; \
+ \
+ start = ud->match_data->level_start_idx[tpl]; \
+ \
+ id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \
+ start); \
+ if (id == ud->res##_cnt) { \
+ return ERR_PTR(-ENOENT); \
+ } \
+ } \
+ \
+ set_bit(id, ud->res##_map); \
+ return &ud->res##s[id]; \
+}
+
+UDMA_RESERVE_RESOURCE(tchan);
+UDMA_RESERVE_RESOURCE(rchan);
+
+static int udma_get_tchan(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+
+ if (uc->tchan) {
+ dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
+ uc->id, uc->tchan->id);
+ return 0;
+ }
+
+ uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, -1);
+ if (IS_ERR(uc->tchan))
+ return PTR_ERR(uc->tchan);
+
+ return 0;
+}
+
+static int udma_get_rchan(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+
+ if (uc->rchan) {
+ dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
+ uc->id, uc->rchan->id);
+ return 0;
+ }
+
+ uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, -1);
+ if (IS_ERR(uc->rchan))
+ return PTR_ERR(uc->rchan);
+
+ return 0;
+}
+
+static int udma_get_chan_pair(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ const struct udma_match_data *match_data = ud->match_data;
+ int chan_id, end;
+
+ if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
+ dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
+ uc->id, uc->tchan->id);
+ return 0;
+ }
+
+ if (uc->tchan) {
+ dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
+ uc->id, uc->tchan->id);
+ return -EBUSY;
+ } else if (uc->rchan) {
+ dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
+ uc->id, uc->rchan->id);
+ return -EBUSY;
+ }
+
+ /* Can be optimized, but let's have it like this for now */
+ end = min(ud->tchan_cnt, ud->rchan_cnt);
+ /* Try to use the highest TPL channel pair for MEM_TO_MEM channels */
+ chan_id = match_data->level_start_idx[match_data->tpl_levels - 1];
+ for (; chan_id < end; chan_id++) {
+ if (!test_bit(chan_id, ud->tchan_map) &&
+ !test_bit(chan_id, ud->rchan_map))
+ break;
+ }
+
+ if (chan_id == end)
+ return -ENOENT;
+
+ set_bit(chan_id, ud->tchan_map);
+ set_bit(chan_id, ud->rchan_map);
+ uc->tchan = &ud->tchans[chan_id];
+ uc->rchan = &ud->rchans[chan_id];
+
+ return 0;
+}
+
+static int udma_get_rflow(struct udma_chan *uc, int flow_id)
+{
+ struct udma_dev *ud = uc->ud;
+
+ if (!uc->rchan) {
+ dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
+ return -EINVAL;
+ }
+
+ if (uc->rflow) {
+ dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
+ uc->id, uc->rflow->id);
+ return 0;
+ }
+
+ uc->rflow = __udma_get_rflow(ud, flow_id);
+ if (IS_ERR(uc->rflow))
+ return PTR_ERR(uc->rflow);
+
+ return 0;
+}
+
+static void udma_put_rchan(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+
+ if (uc->rchan) {
+ dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
+ uc->rchan->id);
+ clear_bit(uc->rchan->id, ud->rchan_map);
+ uc->rchan = NULL;
+ }
+}
+
+static void udma_put_tchan(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+
+ if (uc->tchan) {
+ dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
+ uc->tchan->id);
+ clear_bit(uc->tchan->id, ud->tchan_map);
+ uc->tchan = NULL;
+ }
+}
+
+static void udma_put_rflow(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+
+ if (uc->rflow) {
+ dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
+ uc->rflow->id);
+ __udma_put_rflow(ud, uc->rflow);
+ uc->rflow = NULL;
+ }
+}
+
+static void udma_free_tx_resources(struct udma_chan *uc)
+{
+ if (!uc->tchan)
+ return;
+
+ k3_ringacc_ring_free(uc->tchan->t_ring);
+ k3_ringacc_ring_free(uc->tchan->tc_ring);
+ uc->tchan->t_ring = NULL;
+ uc->tchan->tc_ring = NULL;
+
+ udma_put_tchan(uc);
+}
+
+static int udma_alloc_tx_resources(struct udma_chan *uc)
+{
+ struct k3_ring_cfg ring_cfg;
+ struct udma_dev *ud = uc->ud;
+ int ret;
+
+ ret = udma_get_tchan(uc);
+ if (ret)
+ return ret;
+
+ uc->tchan->t_ring = k3_ringacc_request_ring(ud->ringacc,
+ uc->tchan->id, 0);
+ if (!uc->tchan->t_ring) {
+ ret = -EBUSY;
+ goto err_tx_ring;
+ }
+
+ uc->tchan->tc_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0);
+ if (!uc->tchan->tc_ring) {
+ ret = -EBUSY;
+ goto err_txc_ring;
+ }
+
+ memset(&ring_cfg, 0, sizeof(ring_cfg));
+ ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
+ ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
+ ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
+
+ ret = k3_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
+ ret |= k3_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
+
+ if (ret)
+ goto err_ringcfg;
+
+ return 0;
+
+err_ringcfg:
+ k3_ringacc_ring_free(uc->tchan->tc_ring);
+ uc->tchan->tc_ring = NULL;
+err_txc_ring:
+ k3_ringacc_ring_free(uc->tchan->t_ring);
+ uc->tchan->t_ring = NULL;
+err_tx_ring:
+ udma_put_tchan(uc);
+
+ return ret;
+}
+
+static void udma_free_rx_resources(struct udma_chan *uc)
+{
+ if (!uc->rchan)
+ return;
+
+ if (uc->rflow) {
+ struct udma_rflow *rflow = uc->rflow;
+
+ k3_ringacc_ring_free(rflow->fd_ring);
+ k3_ringacc_ring_free(rflow->r_ring);
+ rflow->fd_ring = NULL;
+ rflow->r_ring = NULL;
+
+ udma_put_rflow(uc);
+ }
+
+ udma_put_rchan(uc);
+}
+
+static int udma_alloc_rx_resources(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ struct k3_ring_cfg ring_cfg;
+ struct udma_rflow *rflow;
+ int fd_ring_id;
+ int ret;
+
+ ret = udma_get_rchan(uc);
+ if (ret)
+ return ret;
+
+ /* For MEM_TO_MEM we don't need rflow or rings */
+ if (uc->config.dir == DMA_MEM_TO_MEM)
+ return 0;
+
+ ret = udma_get_rflow(uc, uc->rchan->id);
+ if (ret) {
+ ret = -EBUSY;
+ goto err_rflow;
+ }
+
+ rflow = uc->rflow;
+ fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
+ rflow->fd_ring = k3_ringacc_request_ring(ud->ringacc, fd_ring_id, 0);
+ if (!rflow->fd_ring) {
+ ret = -EBUSY;
+ goto err_rx_ring;
+ }
+
+ rflow->r_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0);
+ if (!rflow->r_ring) {
+ ret = -EBUSY;
+ goto err_rxc_ring;
+ }
+
+ memset(&ring_cfg, 0, sizeof(ring_cfg));
+
+ if (uc->config.pkt_mode)
+ ring_cfg.size = SG_MAX_SEGMENTS;
+ else
+ ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
+
+ ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
+ ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
+
+ ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
+ ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
+ ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
+
+ if (ret)
+ goto err_ringcfg;
+
+ return 0;
+
+err_ringcfg:
+ k3_ringacc_ring_free(rflow->r_ring);
+ rflow->r_ring = NULL;
+err_rxc_ring:
+ k3_ringacc_ring_free(rflow->fd_ring);
+ rflow->fd_ring = NULL;
+err_rx_ring:
+ udma_put_rflow(uc);
+err_rflow:
+ udma_put_rchan(uc);
+
+ return ret;
+}
+
+#define TISCI_TCHAN_VALID_PARAMS ( \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID)
+
+#define TISCI_RCHAN_VALID_PARAMS ( \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID)
+
+static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
+ struct udma_tchan *tchan = uc->tchan;
+ struct udma_rchan *rchan = uc->rchan;
+ int ret = 0;
+
+ /* Non synchronized - mem to mem type of transfer */
+ int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
+ struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
+ struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
+
+ req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
+ req_tx.nav_id = tisci_rm->tisci_dev_id;
+ req_tx.index = tchan->id;
+ req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
+ req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
+ req_tx.txcq_qnum = tc_ring;
+
+ ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
+ if (ret) {
+ dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
+ return ret;
+ }
+
+ req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
+ req_rx.nav_id = tisci_rm->tisci_dev_id;
+ req_rx.index = rchan->id;
+ req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
+ req_rx.rxcq_qnum = tc_ring;
+ req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
+
+ ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
+ if (ret)
+ dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret);
+
+ return ret;
+}
+
+static int udma_tisci_tx_channel_config(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
+ struct udma_tchan *tchan = uc->tchan;
+ int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
+ struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
+ u32 mode, fetch_size;
+ int ret = 0;
+
+ if (uc->config.pkt_mode) {
+ mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
+ fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
+ uc->config.psd_size, 0);
+ } else {
+ mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
+ fetch_size = sizeof(struct cppi5_desc_hdr_t);
+ }
+
+ req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
+ req_tx.nav_id = tisci_rm->tisci_dev_id;
+ req_tx.index = tchan->id;
+ req_tx.tx_chan_type = mode;
+ req_tx.tx_supr_tdpkt = uc->config.notdpkt;
+ req_tx.tx_fetch_size = fetch_size >> 2;
+ req_tx.txcq_qnum = tc_ring;
+
+ ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
+ if (ret)
+ dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
+
+ return ret;
+}
+
+static int udma_tisci_rx_channel_config(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
+ struct udma_rchan *rchan = uc->rchan;
+ int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring);
+ int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring);
+ struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
+ struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
+ u32 mode, fetch_size;
+ int ret = 0;
+
+ if (uc->config.pkt_mode) {
+ mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
+ fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
+ uc->config.psd_size, 0);
+ } else {
+ mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
+ fetch_size = sizeof(struct cppi5_desc_hdr_t);
+ }
+
+ req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
+ req_rx.nav_id = tisci_rm->tisci_dev_id;
+ req_rx.index = rchan->id;
+ req_rx.rx_fetch_size = fetch_size >> 2;
+ req_rx.rxcq_qnum = rx_ring;
+ req_rx.rx_chan_type = mode;
+
+ ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
+ if (ret) {
+ dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
+ return ret;
+ }
+
+ flow_req.valid_params =
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
+
+ flow_req.nav_id = tisci_rm->tisci_dev_id;
+ flow_req.flow_index = rchan->id;
+
+ if (uc->config.needs_epib)
+ flow_req.rx_einfo_present = 1;
+ else
+ flow_req.rx_einfo_present = 0;
+ if (uc->config.psd_size)
+ flow_req.rx_psinfo_present = 1;
+ else
+ flow_req.rx_psinfo_present = 0;
+ flow_req.rx_error_handling = 1;
+ flow_req.rx_dest_qnum = rx_ring;
+ flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE;
+ flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG;
+ flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI;
+ flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO;
+ flow_req.rx_fdq0_sz0_qnum = fd_ring;
+ flow_req.rx_fdq1_qnum = fd_ring;
+ flow_req.rx_fdq2_qnum = fd_ring;
+ flow_req.rx_fdq3_qnum = fd_ring;
+
+ ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
+
+ if (ret)
+ dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret);
+
+ return 0;
+}
+
+static int udma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ struct udma_dev *ud = to_udma_dev(chan->device);
+ const struct udma_match_data *match_data = ud->match_data;
+ struct k3_ring *irq_ring;
+ u32 irq_udma_idx;
+ int ret;
+
+ if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) {
+ uc->use_dma_pool = true;
+ /* in case of MEM_TO_MEM we have maximum of two TRs */
+ if (uc->config.dir == DMA_MEM_TO_MEM) {
+ uc->config.hdesc_size = cppi5_trdesc_calc_size(
+ sizeof(struct cppi5_tr_type15_t), 2);
+ uc->config.pkt_mode = false;
+ }
+ }
+
+ if (uc->use_dma_pool) {
+ uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
+ uc->config.hdesc_size,
+ ud->desc_align,
+ 0);
+ if (!uc->hdesc_pool) {
+ dev_err(ud->ddev.dev,
+ "Descriptor pool allocation failed\n");
+ uc->use_dma_pool = false;
+ return -ENOMEM;
+ }
+ }
+
+ /*
+ * Make sure that the completion is in a known state:
+ * No teardown, the channel is idle
+ */
+ reinit_completion(&uc->teardown_completed);
+ complete_all(&uc->teardown_completed);
+ uc->state = UDMA_CHAN_IS_IDLE;
+
+ switch (uc->config.dir) {
+ case DMA_MEM_TO_MEM:
+ /* Non synchronized - mem to mem type of transfer */
+ dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
+ uc->id);
+
+ ret = udma_get_chan_pair(uc);
+ if (ret)
+ return ret;
+
+ ret = udma_alloc_tx_resources(uc);
+ if (ret)
+ return ret;
+
+ ret = udma_alloc_rx_resources(uc);
+ if (ret) {
+ udma_free_tx_resources(uc);
+ return ret;
+ }
+
+ uc->config.src_thread = ud->psil_base + uc->tchan->id;
+ uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
+ K3_PSIL_DST_THREAD_ID_OFFSET;
+
+ irq_ring = uc->tchan->tc_ring;
+ irq_udma_idx = uc->tchan->id;
+
+ ret = udma_tisci_m2m_channel_config(uc);
+ break;
+ case DMA_MEM_TO_DEV:
+ /* Slave transfer synchronized - mem to dev (TX) trasnfer */
+ dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
+ uc->id);
+
+ ret = udma_alloc_tx_resources(uc);
+ if (ret) {
+ uc->config.remote_thread_id = -1;
+ return ret;
+ }
+
+ uc->config.src_thread = ud->psil_base + uc->tchan->id;
+ uc->config.dst_thread = uc->config.remote_thread_id;
+ uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
+
+ irq_ring = uc->tchan->tc_ring;
+ irq_udma_idx = uc->tchan->id;
+
+ ret = udma_tisci_tx_channel_config(uc);
+ break;
+ case DMA_DEV_TO_MEM:
+ /* Slave transfer synchronized - dev to mem (RX) trasnfer */
+ dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
+ uc->id);
+
+ ret = udma_alloc_rx_resources(uc);
+ if (ret) {
+ uc->config.remote_thread_id = -1;
+ return ret;
+ }
+
+ uc->config.src_thread = uc->config.remote_thread_id;
+ uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
+ K3_PSIL_DST_THREAD_ID_OFFSET;
+
+ irq_ring = uc->rflow->r_ring;
+ irq_udma_idx = match_data->rchan_oes_offset + uc->rchan->id;
+
+ ret = udma_tisci_rx_channel_config(uc);
+ break;
+ default:
+ /* Can not happen */
+ dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
+ __func__, uc->id, uc->config.dir);
+ return -EINVAL;
+ }
+
+ /* check if the channel configuration was successful */
+ if (ret)
+ goto err_res_free;
+
+ if (udma_is_chan_running(uc)) {
+ dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
+ udma_stop(uc);
+ if (udma_is_chan_running(uc)) {
+ dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
+ goto err_res_free;
+ }
+ }
+
+ /* PSI-L pairing */
+ ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
+ if (ret) {
+ dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
+ uc->config.src_thread, uc->config.dst_thread);
+ goto err_res_free;
+ }
+
+ uc->psil_paired = true;
+
+ uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring);
+ if (uc->irq_num_ring <= 0) {
+ dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
+ k3_ringacc_get_ring_id(irq_ring));
+ ret = -EINVAL;
+ goto err_psi_free;
+ }
+
+ ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
+ IRQF_TRIGGER_HIGH, uc->name, uc);
+ if (ret) {
+ dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
+ goto err_irq_free;
+ }
+
+ /* Event from UDMA (TR events) only needed for slave TR mode channels */
+ if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) {
+ uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev,
+ irq_udma_idx);
+ if (uc->irq_num_udma <= 0) {
+ dev_err(ud->dev, "Failed to get udma irq (index: %u)\n",
+ irq_udma_idx);
+ free_irq(uc->irq_num_ring, uc);
+ ret = -EINVAL;
+ goto err_irq_free;
+ }
+
+ ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
+ uc->name, uc);
+ if (ret) {
+ dev_err(ud->dev, "chan%d: UDMA irq request failed\n",
+ uc->id);
+ free_irq(uc->irq_num_ring, uc);
+ goto err_irq_free;
+ }
+ } else {
+ uc->irq_num_udma = 0;
+ }
+
+ udma_reset_rings(uc);
+
+ INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
+ udma_check_tx_completion);
+ return 0;
+
+err_irq_free:
+ uc->irq_num_ring = 0;
+ uc->irq_num_udma = 0;
+err_psi_free:
+ navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
+ uc->psil_paired = false;
+err_res_free:
+ udma_free_tx_resources(uc);
+ udma_free_rx_resources(uc);
+
+ udma_reset_uchan(uc);
+
+ if (uc->use_dma_pool) {
+ dma_pool_destroy(uc->hdesc_pool);
+ uc->use_dma_pool = false;
+ }
+
+ return ret;
+}
+
+static int udma_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+
+ memcpy(&uc->cfg, cfg, sizeof(uc->cfg));
+
+ return 0;
+}
+
+static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc,
+ size_t tr_size, int tr_count,
+ enum dma_transfer_direction dir)
+{
+ struct udma_hwdesc *hwdesc;
+ struct cppi5_desc_hdr_t *tr_desc;
+ struct udma_desc *d;
+ u32 reload_count = 0;
+ u32 ring_id;
+
+ switch (tr_size) {
+ case 16:
+ case 32:
+ case 64:
+ case 128:
+ break;
+ default:
+ dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size);
+ return NULL;
+ }
+
+ /* We have only one descriptor containing multiple TRs */
+ d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT);
+ if (!d)
+ return NULL;
+
+ d->sglen = tr_count;
+
+ d->hwdesc_count = 1;
+ hwdesc = &d->hwdesc[0];
+
+ /* Allocate memory for DMA ring descriptor */
+ if (uc->use_dma_pool) {
+ hwdesc->cppi5_desc_size = uc->config.hdesc_size;
+ hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
+ GFP_NOWAIT,
+ &hwdesc->cppi5_desc_paddr);
+ } else {
+ hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size,
+ tr_count);
+ hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
+ uc->ud->desc_align);
+ hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev,
+ hwdesc->cppi5_desc_size,
+ &hwdesc->cppi5_desc_paddr,
+ GFP_NOWAIT);
+ }
+
+ if (!hwdesc->cppi5_desc_vaddr) {
+ kfree(d);
+ return NULL;
+ }
+
+ /* Start of the TR req records */
+ hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
+ /* Start address of the TR response array */
+ hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count;
+
+ tr_desc = hwdesc->cppi5_desc_vaddr;
+
+ if (uc->cyclic)
+ reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE;
+
+ if (dir == DMA_DEV_TO_MEM)
+ ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
+ else
+ ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
+
+ cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count);
+ cppi5_desc_set_pktids(tr_desc, uc->id,
+ CPPI5_INFO1_DESC_FLOWID_DEFAULT);
+ cppi5_desc_set_retpolicy(tr_desc, 0, ring_id);
+
+ return d;
+}
+
+static struct udma_desc *
+udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
+ unsigned int sglen, enum dma_transfer_direction dir,
+ unsigned long tx_flags, void *context)
+{
+ enum dma_slave_buswidth dev_width;
+ struct scatterlist *sgent;
+ struct udma_desc *d;
+ size_t tr_size;
+ struct cppi5_tr_type1_t *tr_req = NULL;
+ unsigned int i;
+ u32 burst;
+
+ if (dir == DMA_DEV_TO_MEM) {
+ dev_width = uc->cfg.src_addr_width;
+ burst = uc->cfg.src_maxburst;
+ } else if (dir == DMA_MEM_TO_DEV) {
+ dev_width = uc->cfg.dst_addr_width;
+ burst = uc->cfg.dst_maxburst;
+ } else {
+ dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
+ return NULL;
+ }
+
+ if (!burst)
+ burst = 1;
+
+ /* Now allocate and setup the descriptor. */
+ tr_size = sizeof(struct cppi5_tr_type1_t);
+ d = udma_alloc_tr_desc(uc, tr_size, sglen, dir);
+ if (!d)
+ return NULL;
+
+ d->sglen = sglen;
+
+ tr_req = d->hwdesc[0].tr_req_base;
+ for_each_sg(sgl, sgent, sglen, i) {
+ d->residue += sg_dma_len(sgent);
+
+ cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false,
+ CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+ cppi5_tr_csf_set(&tr_req[i].flags, CPPI5_TR_CSF_SUPR_EVT);
+
+ tr_req[i].addr = sg_dma_address(sgent);
+ tr_req[i].icnt0 = burst * dev_width;
+ tr_req[i].dim1 = burst * dev_width;
+ tr_req[i].icnt1 = sg_dma_len(sgent) / tr_req[i].icnt0;
+ }
+
+ cppi5_tr_csf_set(&tr_req[i - 1].flags, CPPI5_TR_CSF_EOP);
+
+ return d;
+}
+
+static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
+ enum dma_slave_buswidth dev_width,
+ u16 elcnt)
+{
+ if (uc->config.ep_type != PSIL_EP_PDMA_XY)
+ return 0;
+
+ /* Bus width translates to the element size (ES) */
+ switch (dev_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ d->static_tr.elsize = 0;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ d->static_tr.elsize = 1;
+ break;
+ case DMA_SLAVE_BUSWIDTH_3_BYTES:
+ d->static_tr.elsize = 2;
+ break;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ d->static_tr.elsize = 3;
+ break;
+ case DMA_SLAVE_BUSWIDTH_8_BYTES:
+ d->static_tr.elsize = 4;
+ break;
+ default: /* not reached */
+ return -EINVAL;
+ }
+
+ d->static_tr.elcnt = elcnt;
+
+ /*
+ * PDMA must to close the packet when the channel is in packet mode.
+ * For TR mode when the channel is not cyclic we also need PDMA to close
+ * the packet otherwise the transfer will stall because PDMA holds on
+ * the data it has received from the peripheral.
+ */
+ if (uc->config.pkt_mode || !uc->cyclic) {
+ unsigned int div = dev_width * elcnt;
+
+ if (uc->cyclic)
+ d->static_tr.bstcnt = d->residue / d->sglen / div;
+ else
+ d->static_tr.bstcnt = d->residue / div;
+
+ if (uc->config.dir == DMA_DEV_TO_MEM &&
+ d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
+ return -EINVAL;
+ } else {
+ d->static_tr.bstcnt = 0;
+ }
+
+ return 0;
+}
+
+static struct udma_desc *
+udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
+ unsigned int sglen, enum dma_transfer_direction dir,
+ unsigned long tx_flags, void *context)
+{
+ struct scatterlist *sgent;
+ struct cppi5_host_desc_t *h_desc = NULL;
+ struct udma_desc *d;
+ u32 ring_id;
+ unsigned int i;
+
+ d = kzalloc(sizeof(*d) + sglen * sizeof(d->hwdesc[0]), GFP_NOWAIT);
+ if (!d)
+ return NULL;
+
+ d->sglen = sglen;
+ d->hwdesc_count = sglen;
+
+ if (dir == DMA_DEV_TO_MEM)
+ ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
+ else
+ ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
+
+ for_each_sg(sgl, sgent, sglen, i) {
+ struct udma_hwdesc *hwdesc = &d->hwdesc[i];
+ dma_addr_t sg_addr = sg_dma_address(sgent);
+ struct cppi5_host_desc_t *desc;
+ size_t sg_len = sg_dma_len(sgent);
+
+ hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
+ GFP_NOWAIT,
+ &hwdesc->cppi5_desc_paddr);
+ if (!hwdesc->cppi5_desc_vaddr) {
+ dev_err(uc->ud->dev,
+ "descriptor%d allocation failed\n", i);
+
+ udma_free_hwdesc(uc, d);
+ kfree(d);
+ return NULL;
+ }
+
+ d->residue += sg_len;
+ hwdesc->cppi5_desc_size = uc->config.hdesc_size;
+ desc = hwdesc->cppi5_desc_vaddr;
+
+ if (i == 0) {
+ cppi5_hdesc_init(desc, 0, 0);
+ /* Flow and Packed ID */
+ cppi5_desc_set_pktids(&desc->hdr, uc->id,
+ CPPI5_INFO1_DESC_FLOWID_DEFAULT);
+ cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id);
+ } else {
+ cppi5_hdesc_reset_hbdesc(desc);
+ cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff);
+ }
+
+ /* attach the sg buffer to the descriptor */
+ cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len);
+
+ /* Attach link as host buffer descriptor */
+ if (h_desc)
+ cppi5_hdesc_link_hbdesc(h_desc,
+ hwdesc->cppi5_desc_paddr);
+
+ if (dir == DMA_MEM_TO_DEV)
+ h_desc = desc;
+ }
+
+ if (d->residue >= SZ_4M) {
+ dev_err(uc->ud->dev,
+ "%s: Transfer size %u is over the supported 4M range\n",
+ __func__, d->residue);
+ udma_free_hwdesc(uc, d);
+ kfree(d);
+ return NULL;
+ }
+
+ h_desc = d->hwdesc[0].cppi5_desc_vaddr;
+ cppi5_hdesc_set_pktlen(h_desc, d->residue);
+
+ return d;
+}
+
+static int udma_attach_metadata(struct dma_async_tx_descriptor *desc,
+ void *data, size_t len)
+{
+ struct udma_desc *d = to_udma_desc(desc);
+ struct udma_chan *uc = to_udma_chan(desc->chan);
+ struct cppi5_host_desc_t *h_desc;
+ u32 psd_size = len;
+ u32 flags = 0;
+
+ if (!uc->config.pkt_mode || !uc->config.metadata_size)
+ return -ENOTSUPP;
+
+ if (!data || len > uc->config.metadata_size)
+ return -EINVAL;
+
+ if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE)
+ return -EINVAL;
+
+ h_desc = d->hwdesc[0].cppi5_desc_vaddr;
+ if (d->dir == DMA_MEM_TO_DEV)
+ memcpy(h_desc->epib, data, len);
+
+ if (uc->config.needs_epib)
+ psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
+
+ d->metadata = data;
+ d->metadata_size = len;
+ if (uc->config.needs_epib)
+ flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
+
+ cppi5_hdesc_update_flags(h_desc, flags);
+ cppi5_hdesc_update_psdata_size(h_desc, psd_size);
+
+ return 0;
+}
+
+static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
+ size_t *payload_len, size_t *max_len)
+{
+ struct udma_desc *d = to_udma_desc(desc);
+ struct udma_chan *uc = to_udma_chan(desc->chan);
+ struct cppi5_host_desc_t *h_desc;
+
+ if (!uc->config.pkt_mode || !uc->config.metadata_size)
+ return ERR_PTR(-ENOTSUPP);
+
+ h_desc = d->hwdesc[0].cppi5_desc_vaddr;
+
+ *max_len = uc->config.metadata_size;
+
+ *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ?
+ CPPI5_INFO0_HDESC_EPIB_SIZE : 0;
+ *payload_len += cppi5_hdesc_get_psdata_size(h_desc);
+
+ return h_desc->epib;
+}
+
+static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc,
+ size_t payload_len)
+{
+ struct udma_desc *d = to_udma_desc(desc);
+ struct udma_chan *uc = to_udma_chan(desc->chan);
+ struct cppi5_host_desc_t *h_desc;
+ u32 psd_size = payload_len;
+ u32 flags = 0;
+
+ if (!uc->config.pkt_mode || !uc->config.metadata_size)
+ return -ENOTSUPP;
+
+ if (payload_len > uc->config.metadata_size)
+ return -EINVAL;
+
+ if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE)
+ return -EINVAL;
+
+ h_desc = d->hwdesc[0].cppi5_desc_vaddr;
+
+ if (uc->config.needs_epib) {
+ psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
+ flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
+ }
+
+ cppi5_hdesc_update_flags(h_desc, flags);
+ cppi5_hdesc_update_psdata_size(h_desc, psd_size);
+
+ return 0;
+}
+
+static struct dma_descriptor_metadata_ops metadata_ops = {
+ .attach = udma_attach_metadata,
+ .get_ptr = udma_get_metadata_ptr,
+ .set_len = udma_set_metadata_len,
+};
+
+static struct dma_async_tx_descriptor *
+udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sglen, enum dma_transfer_direction dir,
+ unsigned long tx_flags, void *context)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ enum dma_slave_buswidth dev_width;
+ struct udma_desc *d;
+ u32 burst;
+
+ if (dir != uc->config.dir) {
+ dev_err(chan->device->dev,
+ "%s: chan%d is for %s, not supporting %s\n",
+ __func__, uc->id,
+ dmaengine_get_direction_text(uc->config.dir),
+ dmaengine_get_direction_text(dir));
+ return NULL;
+ }
+
+ if (dir == DMA_DEV_TO_MEM) {
+ dev_width = uc->cfg.src_addr_width;
+ burst = uc->cfg.src_maxburst;
+ } else if (dir == DMA_MEM_TO_DEV) {
+ dev_width = uc->cfg.dst_addr_width;
+ burst = uc->cfg.dst_maxburst;
+ } else {
+ dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
+ return NULL;
+ }
+
+ if (!burst)
+ burst = 1;
+
+ if (uc->config.pkt_mode)
+ d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
+ context);
+ else
+ d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags,
+ context);
+
+ if (!d)
+ return NULL;
+
+ d->dir = dir;
+ d->desc_idx = 0;
+ d->tr_idx = 0;
+
+ /* static TR for remote PDMA */
+ if (udma_configure_statictr(uc, d, dev_width, burst)) {
+ dev_err(uc->ud->dev,
+ "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
+ __func__, d->static_tr.bstcnt);
+
+ udma_free_hwdesc(uc, d);
+ kfree(d);
+ return NULL;
+ }
+
+ if (uc->config.metadata_size)
+ d->vd.tx.metadata_ops = &metadata_ops;
+
+ return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
+}
+
+static struct udma_desc *
+udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
+ size_t buf_len, size_t period_len,
+ enum dma_transfer_direction dir, unsigned long flags)
+{
+ enum dma_slave_buswidth dev_width;
+ struct udma_desc *d;
+ size_t tr_size;
+ struct cppi5_tr_type1_t *tr_req;
+ unsigned int i;
+ unsigned int periods = buf_len / period_len;
+ u32 burst;
+
+ if (dir == DMA_DEV_TO_MEM) {
+ dev_width = uc->cfg.src_addr_width;
+ burst = uc->cfg.src_maxburst;
+ } else if (dir == DMA_MEM_TO_DEV) {
+ dev_width = uc->cfg.dst_addr_width;
+ burst = uc->cfg.dst_maxburst;
+ } else {
+ dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
+ return NULL;
+ }
+
+ if (!burst)
+ burst = 1;
+
+ /* Now allocate and setup the descriptor. */
+ tr_size = sizeof(struct cppi5_tr_type1_t);
+ d = udma_alloc_tr_desc(uc, tr_size, periods, dir);
+ if (!d)
+ return NULL;
+
+ tr_req = d->hwdesc[0].tr_req_base;
+ for (i = 0; i < periods; i++) {
+ cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false,
+ CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+
+ tr_req[i].addr = buf_addr + period_len * i;
+ tr_req[i].icnt0 = dev_width;
+ tr_req[i].icnt1 = period_len / dev_width;
+ tr_req[i].dim1 = dev_width;
+
+ if (!(flags & DMA_PREP_INTERRUPT))
+ cppi5_tr_csf_set(&tr_req[i].flags,
+ CPPI5_TR_CSF_SUPR_EVT);
+ }
+
+ return d;
+}
+
+static struct udma_desc *
+udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr,
+ size_t buf_len, size_t period_len,
+ enum dma_transfer_direction dir, unsigned long flags)
+{
+ struct udma_desc *d;
+ u32 ring_id;
+ int i;
+ int periods = buf_len / period_len;
+
+ if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1))
+ return NULL;
+
+ if (period_len >= SZ_4M)
+ return NULL;
+
+ d = kzalloc(sizeof(*d) + periods * sizeof(d->hwdesc[0]), GFP_NOWAIT);
+ if (!d)
+ return NULL;
+
+ d->hwdesc_count = periods;
+
+ /* TODO: re-check this... */
+ if (dir == DMA_DEV_TO_MEM)
+ ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
+ else
+ ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
+
+ for (i = 0; i < periods; i++) {
+ struct udma_hwdesc *hwdesc = &d->hwdesc[i];
+ dma_addr_t period_addr = buf_addr + (period_len * i);
+ struct cppi5_host_desc_t *h_desc;
+
+ hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
+ GFP_NOWAIT,
+ &hwdesc->cppi5_desc_paddr);
+ if (!hwdesc->cppi5_desc_vaddr) {
+ dev_err(uc->ud->dev,
+ "descriptor%d allocation failed\n", i);
+
+ udma_free_hwdesc(uc, d);
+ kfree(d);
+ return NULL;
+ }
+
+ hwdesc->cppi5_desc_size = uc->config.hdesc_size;
+ h_desc = hwdesc->cppi5_desc_vaddr;
+
+ cppi5_hdesc_init(h_desc, 0, 0);
+ cppi5_hdesc_set_pktlen(h_desc, period_len);
+
+ /* Flow and Packed ID */
+ cppi5_desc_set_pktids(&h_desc->hdr, uc->id,
+ CPPI5_INFO1_DESC_FLOWID_DEFAULT);
+ cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id);
+
+ /* attach each period to a new descriptor */
+ cppi5_hdesc_attach_buf(h_desc,
+ period_addr, period_len,
+ period_addr, period_len);
+ }
+
+ return d;
+}
+
+static struct dma_async_tx_descriptor *
+udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction dir,
+ unsigned long flags)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ enum dma_slave_buswidth dev_width;
+ struct udma_desc *d;
+ u32 burst;
+
+ if (dir != uc->config.dir) {
+ dev_err(chan->device->dev,
+ "%s: chan%d is for %s, not supporting %s\n",
+ __func__, uc->id,
+ dmaengine_get_direction_text(uc->config.dir),
+ dmaengine_get_direction_text(dir));
+ return NULL;
+ }
+
+ uc->cyclic = true;
+
+ if (dir == DMA_DEV_TO_MEM) {
+ dev_width = uc->cfg.src_addr_width;
+ burst = uc->cfg.src_maxburst;
+ } else if (dir == DMA_MEM_TO_DEV) {
+ dev_width = uc->cfg.dst_addr_width;
+ burst = uc->cfg.dst_maxburst;
+ } else {
+ dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
+ return NULL;
+ }
+
+ if (!burst)
+ burst = 1;
+
+ if (uc->config.pkt_mode)
+ d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len,
+ dir, flags);
+ else
+ d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len,
+ dir, flags);
+
+ if (!d)
+ return NULL;
+
+ d->sglen = buf_len / period_len;
+
+ d->dir = dir;
+ d->residue = buf_len;
+
+ /* static TR for remote PDMA */
+ if (udma_configure_statictr(uc, d, dev_width, burst)) {
+ dev_err(uc->ud->dev,
+ "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
+ __func__, d->static_tr.bstcnt);
+
+ udma_free_hwdesc(uc, d);
+ kfree(d);
+ return NULL;
+ }
+
+ if (uc->config.metadata_size)
+ d->vd.tx.metadata_ops = &metadata_ops;
+
+ return vchan_tx_prep(&uc->vc, &d->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *
+udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long tx_flags)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ struct udma_desc *d;
+ struct cppi5_tr_type15_t *tr_req;
+ int num_tr;
+ size_t tr_size = sizeof(struct cppi5_tr_type15_t);
+ u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
+
+ if (uc->config.dir != DMA_MEM_TO_MEM) {
+ dev_err(chan->device->dev,
+ "%s: chan%d is for %s, not supporting %s\n",
+ __func__, uc->id,
+ dmaengine_get_direction_text(uc->config.dir),
+ dmaengine_get_direction_text(DMA_MEM_TO_MEM));
+ return NULL;
+ }
+
+ if (len < SZ_64K) {
+ num_tr = 1;
+ tr0_cnt0 = len;
+ tr0_cnt1 = 1;
+ } else {
+ unsigned long align_to = __ffs(src | dest);
+
+ if (align_to > 3)
+ align_to = 3;
+ /*
+ * Keep simple: tr0: SZ_64K-alignment blocks,
+ * tr1: the remaining
+ */
+ num_tr = 2;
+ tr0_cnt0 = (SZ_64K - BIT(align_to));
+ if (len / tr0_cnt0 >= SZ_64K) {
+ dev_err(uc->ud->dev, "size %zu is not supported\n",
+ len);
+ return NULL;
+ }
+
+ tr0_cnt1 = len / tr0_cnt0;
+ tr1_cnt0 = len % tr0_cnt0;
+ }
+
+ d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM);
+ if (!d)
+ return NULL;
+
+ d->dir = DMA_MEM_TO_MEM;
+ d->desc_idx = 0;
+ d->tr_idx = 0;
+ d->residue = len;
+
+ tr_req = d->hwdesc[0].tr_req_base;
+
+ cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
+ CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+ cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
+
+ tr_req[0].addr = src;
+ tr_req[0].icnt0 = tr0_cnt0;
+ tr_req[0].icnt1 = tr0_cnt1;
+ tr_req[0].icnt2 = 1;
+ tr_req[0].icnt3 = 1;
+ tr_req[0].dim1 = tr0_cnt0;
+
+ tr_req[0].daddr = dest;
+ tr_req[0].dicnt0 = tr0_cnt0;
+ tr_req[0].dicnt1 = tr0_cnt1;
+ tr_req[0].dicnt2 = 1;
+ tr_req[0].dicnt3 = 1;
+ tr_req[0].ddim1 = tr0_cnt0;
+
+ if (num_tr == 2) {
+ cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
+ CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+ cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
+
+ tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
+ tr_req[1].icnt0 = tr1_cnt0;
+ tr_req[1].icnt1 = 1;
+ tr_req[1].icnt2 = 1;
+ tr_req[1].icnt3 = 1;
+
+ tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
+ tr_req[1].dicnt0 = tr1_cnt0;
+ tr_req[1].dicnt1 = 1;
+ tr_req[1].dicnt2 = 1;
+ tr_req[1].dicnt3 = 1;
+ }
+
+ cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
+
+ if (uc->config.metadata_size)
+ d->vd.tx.metadata_ops = &metadata_ops;
+
+ return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
+}
+
+static void udma_issue_pending(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&uc->vc.lock, flags);
+
+ /* If we have something pending and no active descriptor, then */
+ if (vchan_issue_pending(&uc->vc) && !uc->desc) {
+ /*
+ * start a descriptor if the channel is NOT [marked as
+ * terminating _and_ it is still running (teardown has not
+ * completed yet)].
+ */
+ if (!(uc->state == UDMA_CHAN_IS_TERMINATING &&
+ udma_is_chan_running(uc)))
+ udma_start(uc);
+ }
+
+ spin_unlock_irqrestore(&uc->vc.lock, flags);
+}
+
+static enum dma_status udma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ enum dma_status ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&uc->vc.lock, flags);
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+
+ if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc))
+ ret = DMA_PAUSED;
+
+ if (ret == DMA_COMPLETE || !txstate)
+ goto out;
+
+ if (uc->desc && uc->desc->vd.tx.cookie == cookie) {
+ u32 peer_bcnt = 0;
+ u32 bcnt = 0;
+ u32 residue = uc->desc->residue;
+ u32 delay = 0;
+
+ if (uc->desc->dir == DMA_MEM_TO_DEV) {
+ bcnt = udma_tchanrt_read(uc->tchan,
+ UDMA_TCHAN_RT_SBCNT_REG);
+
+ if (uc->config.ep_type != PSIL_EP_NATIVE) {
+ peer_bcnt = udma_tchanrt_read(uc->tchan,
+ UDMA_TCHAN_RT_PEER_BCNT_REG);
+
+ if (bcnt > peer_bcnt)
+ delay = bcnt - peer_bcnt;
+ }
+ } else if (uc->desc->dir == DMA_DEV_TO_MEM) {
+ bcnt = udma_rchanrt_read(uc->rchan,
+ UDMA_RCHAN_RT_BCNT_REG);
+
+ if (uc->config.ep_type != PSIL_EP_NATIVE) {
+ peer_bcnt = udma_rchanrt_read(uc->rchan,
+ UDMA_RCHAN_RT_PEER_BCNT_REG);
+
+ if (peer_bcnt > bcnt)
+ delay = peer_bcnt - bcnt;
+ }
+ } else {
+ bcnt = udma_tchanrt_read(uc->tchan,
+ UDMA_TCHAN_RT_BCNT_REG);
+ }
+
+ bcnt -= uc->bcnt;
+ if (bcnt && !(bcnt % uc->desc->residue))
+ residue = 0;
+ else
+ residue -= bcnt % uc->desc->residue;
+
+ if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) {
+ ret = DMA_COMPLETE;
+ delay = 0;
+ }
+
+ dma_set_residue(txstate, residue);
+ dma_set_in_flight_bytes(txstate, delay);
+
+ } else {
+ ret = DMA_COMPLETE;
+ }
+
+out:
+ spin_unlock_irqrestore(&uc->vc.lock, flags);
+ return ret;
+}
+
+static int udma_pause(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+
+ if (!uc->desc)
+ return -EINVAL;
+
+ /* pause the channel */
+ switch (uc->desc->dir) {
+ case DMA_DEV_TO_MEM:
+ udma_rchanrt_update_bits(uc->rchan,
+ UDMA_RCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_PAUSE,
+ UDMA_PEER_RT_EN_PAUSE);
+ break;
+ case DMA_MEM_TO_DEV:
+ udma_tchanrt_update_bits(uc->tchan,
+ UDMA_TCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_PAUSE,
+ UDMA_PEER_RT_EN_PAUSE);
+ break;
+ case DMA_MEM_TO_MEM:
+ udma_tchanrt_update_bits(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_PAUSE,
+ UDMA_CHAN_RT_CTL_PAUSE);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int udma_resume(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+
+ if (!uc->desc)
+ return -EINVAL;
+
+ /* resume the channel */
+ switch (uc->desc->dir) {
+ case DMA_DEV_TO_MEM:
+ udma_rchanrt_update_bits(uc->rchan,
+ UDMA_RCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_PAUSE, 0);
+
+ break;
+ case DMA_MEM_TO_DEV:
+ udma_tchanrt_update_bits(uc->tchan,
+ UDMA_TCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_PAUSE, 0);
+ break;
+ case DMA_MEM_TO_MEM:
+ udma_tchanrt_update_bits(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_PAUSE, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int udma_terminate_all(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&uc->vc.lock, flags);
+
+ if (udma_is_chan_running(uc))
+ udma_stop(uc);
+
+ if (uc->desc) {
+ uc->terminated_desc = uc->desc;
+ uc->desc = NULL;
+ uc->terminated_desc->terminated = true;
+ cancel_delayed_work(&uc->tx_drain.work);
+ }
+
+ uc->paused = false;
+
+ vchan_get_all_descriptors(&uc->vc, &head);
+ spin_unlock_irqrestore(&uc->vc.lock, flags);
+ vchan_dma_desc_free_list(&uc->vc, &head);
+
+ return 0;
+}
+
+static void udma_synchronize(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ unsigned long timeout = msecs_to_jiffies(1000);
+
+ vchan_synchronize(&uc->vc);
+
+ if (uc->state == UDMA_CHAN_IS_TERMINATING) {
+ timeout = wait_for_completion_timeout(&uc->teardown_completed,
+ timeout);
+ if (!timeout) {
+ dev_warn(uc->ud->dev, "chan%d teardown timeout!\n",
+ uc->id);
+ udma_dump_chan_stdata(uc);
+ udma_reset_chan(uc, true);
+ }
+ }
+
+ udma_reset_chan(uc, false);
+ if (udma_is_chan_running(uc))
+ dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id);
+
+ cancel_delayed_work_sync(&uc->tx_drain.work);
+ udma_reset_rings(uc);
+}
+
+static void udma_desc_pre_callback(struct virt_dma_chan *vc,
+ struct virt_dma_desc *vd,
+ struct dmaengine_result *result)
+{
+ struct udma_chan *uc = to_udma_chan(&vc->chan);
+ struct udma_desc *d;
+
+ if (!vd)
+ return;
+
+ d = to_udma_desc(&vd->tx);
+
+ if (d->metadata_size)
+ udma_fetch_epib(uc, d);
+
+ /* Provide residue information for the client */
+ if (result) {
+ void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
+
+ if (cppi5_desc_get_type(desc_vaddr) ==
+ CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
+ result->residue = d->residue -
+ cppi5_hdesc_get_pktlen(desc_vaddr);
+ if (result->residue)
+ result->result = DMA_TRANS_ABORTED;
+ else
+ result->result = DMA_TRANS_NOERROR;
+ } else {
+ result->residue = 0;
+ result->result = DMA_TRANS_NOERROR;
+ }
+ }
+}
+
+/*
+ * This tasklet handles the completion of a DMA descriptor by
+ * calling its callback and freeing it.
+ */
+static void udma_vchan_complete(unsigned long arg)
+{
+ struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
+ struct virt_dma_desc *vd, *_vd;
+ struct dmaengine_desc_callback cb;
+ LIST_HEAD(head);
+
+ spin_lock_irq(&vc->lock);
+ list_splice_tail_init(&vc->desc_completed, &head);
+ vd = vc->cyclic;
+ if (vd) {
+ vc->cyclic = NULL;
+ dmaengine_desc_get_callback(&vd->tx, &cb);
+ } else {
+ memset(&cb, 0, sizeof(cb));
+ }
+ spin_unlock_irq(&vc->lock);
+
+ udma_desc_pre_callback(vc, vd, NULL);
+ dmaengine_desc_callback_invoke(&cb, NULL);
+
+ list_for_each_entry_safe(vd, _vd, &head, node) {
+ struct dmaengine_result result;
+
+ dmaengine_desc_get_callback(&vd->tx, &cb);
+
+ list_del(&vd->node);
+
+ udma_desc_pre_callback(vc, vd, &result);
+ dmaengine_desc_callback_invoke(&cb, &result);
+
+ vchan_vdesc_fini(vd);
+ }
+}
+
+static void udma_free_chan_resources(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ struct udma_dev *ud = to_udma_dev(chan->device);
+
+ udma_terminate_all(chan);
+ if (uc->terminated_desc) {
+ udma_reset_chan(uc, false);
+ udma_reset_rings(uc);
+ }
+
+ cancel_delayed_work_sync(&uc->tx_drain.work);
+ destroy_delayed_work_on_stack(&uc->tx_drain.work);
+
+ if (uc->irq_num_ring > 0) {
+ free_irq(uc->irq_num_ring, uc);
+
+ uc->irq_num_ring = 0;
+ }
+ if (uc->irq_num_udma > 0) {
+ free_irq(uc->irq_num_udma, uc);
+
+ uc->irq_num_udma = 0;
+ }
+
+ /* Release PSI-L pairing */
+ if (uc->psil_paired) {
+ navss_psil_unpair(ud, uc->config.src_thread,
+ uc->config.dst_thread);
+ uc->psil_paired = false;
+ }
+
+ vchan_free_chan_resources(&uc->vc);
+ tasklet_kill(&uc->vc.task);
+
+ udma_free_tx_resources(uc);
+ udma_free_rx_resources(uc);
+ udma_reset_uchan(uc);
+
+ if (uc->use_dma_pool) {
+ dma_pool_destroy(uc->hdesc_pool);
+ uc->use_dma_pool = false;
+ }
+}
+
+static struct platform_driver udma_driver;
+
+static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+ struct udma_chan_config *ucc;
+ struct psil_endpoint_config *ep_config;
+ struct udma_chan *uc;
+ struct udma_dev *ud;
+ u32 *args;
+
+ if (chan->device->dev->driver != &udma_driver.driver)
+ return false;
+
+ uc = to_udma_chan(chan);
+ ucc = &uc->config;
+ ud = uc->ud;
+ args = param;
+
+ ucc->remote_thread_id = args[0];
+
+ if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
+ ucc->dir = DMA_MEM_TO_DEV;
+ else
+ ucc->dir = DMA_DEV_TO_MEM;
+
+ ep_config = psil_get_ep_config(ucc->remote_thread_id);
+ if (IS_ERR(ep_config)) {
+ dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
+ ucc->remote_thread_id);
+ ucc->dir = DMA_MEM_TO_MEM;
+ ucc->remote_thread_id = -1;
+ return false;
+ }
+
+ ucc->pkt_mode = ep_config->pkt_mode;
+ ucc->channel_tpl = ep_config->channel_tpl;
+ ucc->notdpkt = ep_config->notdpkt;
+ ucc->ep_type = ep_config->ep_type;
+
+ if (ucc->ep_type != PSIL_EP_NATIVE) {
+ const struct udma_match_data *match_data = ud->match_data;
+
+ if (match_data->flags & UDMA_FLAG_PDMA_ACC32)
+ ucc->enable_acc32 = ep_config->pdma_acc32;
+ if (match_data->flags & UDMA_FLAG_PDMA_BURST)
+ ucc->enable_burst = ep_config->pdma_burst;
+ }
+
+ ucc->needs_epib = ep_config->needs_epib;
+ ucc->psd_size = ep_config->psd_size;
+ ucc->metadata_size =
+ (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) +
+ ucc->psd_size;
+
+ if (ucc->pkt_mode)
+ ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
+ ucc->metadata_size, ud->desc_align);
+
+ dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id,
+ ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir));
+
+ return true;
+}
+
+static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct udma_dev *ud = ofdma->of_dma_data;
+ dma_cap_mask_t mask = ud->ddev.cap_mask;
+ struct dma_chan *chan;
+
+ if (dma_spec->args_count != 1)
+ return NULL;
+
+ chan = __dma_request_channel(&mask, udma_dma_filter_fn,
+ &dma_spec->args[0], ofdma->of_node);
+ if (!chan) {
+ dev_err(ud->dev, "get channel fail in %s.\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return chan;
+}
+
+static struct udma_match_data am654_main_data = {
+ .psil_base = 0x1000,
+ .enable_memcpy_support = true,
+ .statictr_z_mask = GENMASK(11, 0),
+ .rchan_oes_offset = 0x2000,
+ .tpl_levels = 2,
+ .level_start_idx = {
+ [0] = 8, /* Normal channels */
+ [1] = 0, /* High Throughput channels */
+ },
+};
+
+static struct udma_match_data am654_mcu_data = {
+ .psil_base = 0x6000,
+ .enable_memcpy_support = true, /* TEST: DMA domains */
+ .statictr_z_mask = GENMASK(11, 0),
+ .rchan_oes_offset = 0x2000,
+ .tpl_levels = 2,
+ .level_start_idx = {
+ [0] = 2, /* Normal channels */
+ [1] = 0, /* High Throughput channels */
+ },
+};
+
+static struct udma_match_data j721e_main_data = {
+ .psil_base = 0x1000,
+ .enable_memcpy_support = true,
+ .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
+ .statictr_z_mask = GENMASK(23, 0),
+ .rchan_oes_offset = 0x400,
+ .tpl_levels = 3,
+ .level_start_idx = {
+ [0] = 16, /* Normal channels */
+ [1] = 4, /* High Throughput channels */
+ [2] = 0, /* Ultra High Throughput channels */
+ },
+};
+
+static struct udma_match_data j721e_mcu_data = {
+ .psil_base = 0x6000,
+ .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
+ .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
+ .statictr_z_mask = GENMASK(23, 0),
+ .rchan_oes_offset = 0x400,
+ .tpl_levels = 2,
+ .level_start_idx = {
+ [0] = 2, /* Normal channels */
+ [1] = 0, /* High Throughput channels */
+ },
+};
+
+static const struct of_device_id udma_of_match[] = {
+ {
+ .compatible = "ti,am654-navss-main-udmap",
+ .data = &am654_main_data,
+ },
+ {
+ .compatible = "ti,am654-navss-mcu-udmap",
+ .data = &am654_mcu_data,
+ }, {
+ .compatible = "ti,j721e-navss-main-udmap",
+ .data = &j721e_main_data,
+ }, {
+ .compatible = "ti,j721e-navss-mcu-udmap",
+ .data = &j721e_mcu_data,
+ },
+ { /* Sentinel */ },
+};
+
+static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
+{
+ struct resource *res;
+ int i;
+
+ for (i = 0; i < MMR_LAST; i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ mmr_names[i]);
+ ud->mmrs[i] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ud->mmrs[i]))
+ return PTR_ERR(ud->mmrs[i]);
+ }
+
+ return 0;
+}
+
+static int udma_setup_resources(struct udma_dev *ud)
+{
+ struct device *dev = ud->dev;
+ int ch_count, ret, i, j;
+ u32 cap2, cap3;
+ struct ti_sci_resource_desc *rm_desc;
+ struct ti_sci_resource *rm_res, irq_res;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ static const char * const range_names[] = { "ti,sci-rm-range-tchan",
+ "ti,sci-rm-range-rchan",
+ "ti,sci-rm-range-rflow" };
+
+ cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
+ cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
+
+ ud->rflow_cnt = cap3 & 0x3fff;
+ ud->tchan_cnt = cap2 & 0x1ff;
+ ud->echan_cnt = (cap2 >> 9) & 0x1ff;
+ ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
+ ch_count = ud->tchan_cnt + ud->rchan_cnt;
+
+ ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
+ sizeof(unsigned long), GFP_KERNEL);
+ ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
+ GFP_KERNEL);
+ ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
+ sizeof(unsigned long), GFP_KERNEL);
+ ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
+ GFP_KERNEL);
+ ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ ud->rflow_gp_map_allocated = devm_kcalloc(dev,
+ BITS_TO_LONGS(ud->rflow_cnt),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
+ GFP_KERNEL);
+
+ if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map ||
+ !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans ||
+ !ud->rflows || !ud->rflow_in_use)
+ return -ENOMEM;
+
+ /*
+ * RX flows with the same Ids as RX channels are reserved to be used
+ * as default flows if remote HW can't generate flow_ids. Those
+ * RX flows can be requested only explicitly by id.
+ */
+ bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt);
+
+ /* by default no GP rflows are assigned to Linux */
+ bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);
+
+ /* Get resource ranges from tisci */
+ for (i = 0; i < RM_RANGE_LAST; i++)
+ tisci_rm->rm_ranges[i] =
+ devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
+ tisci_rm->tisci_dev_id,
+ (char *)range_names[i]);
+
+ /* tchan ranges */
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
+ if (IS_ERR(rm_res)) {
+ bitmap_zero(ud->tchan_map, ud->tchan_cnt);
+ } else {
+ bitmap_fill(ud->tchan_map, ud->tchan_cnt);
+ for (i = 0; i < rm_res->sets; i++) {
+ rm_desc = &rm_res->desc[i];
+ bitmap_clear(ud->tchan_map, rm_desc->start,
+ rm_desc->num);
+ dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
+ rm_desc->start, rm_desc->num);
+ }
+ }
+ irq_res.sets = rm_res->sets;
+
+ /* rchan and matching default flow ranges */
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
+ if (IS_ERR(rm_res)) {
+ bitmap_zero(ud->rchan_map, ud->rchan_cnt);
+ } else {
+ bitmap_fill(ud->rchan_map, ud->rchan_cnt);
+ for (i = 0; i < rm_res->sets; i++) {
+ rm_desc = &rm_res->desc[i];
+ bitmap_clear(ud->rchan_map, rm_desc->start,
+ rm_desc->num);
+ dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
+ rm_desc->start, rm_desc->num);
+ }
+ }
+
+ irq_res.sets += rm_res->sets;
+ irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
+ for (i = 0; i < rm_res->sets; i++) {
+ irq_res.desc[i].start = rm_res->desc[i].start;
+ irq_res.desc[i].num = rm_res->desc[i].num;
+ }
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
+ for (j = 0; j < rm_res->sets; j++, i++) {
+ irq_res.desc[i].start = rm_res->desc[j].start +
+ ud->match_data->rchan_oes_offset;
+ irq_res.desc[i].num = rm_res->desc[j].num;
+ }
+ ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
+ kfree(irq_res.desc);
+ if (ret) {
+ dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
+ return ret;
+ }
+
+ /* GP rflow ranges */
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
+ if (IS_ERR(rm_res)) {
+ /* all gp flows are assigned exclusively to Linux */
+ bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
+ ud->rflow_cnt - ud->rchan_cnt);
+ } else {
+ for (i = 0; i < rm_res->sets; i++) {
+ rm_desc = &rm_res->desc[i];
+ bitmap_clear(ud->rflow_gp_map, rm_desc->start,
+ rm_desc->num);
+ dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n",
+ rm_desc->start, rm_desc->num);
+ }
+ }
+
+ ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
+ ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
+ if (!ch_count)
+ return -ENODEV;
+
+ ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
+ GFP_KERNEL);
+ if (!ud->channels)
+ return -ENOMEM;
+
+ dev_info(dev, "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
+ ch_count,
+ ud->tchan_cnt - bitmap_weight(ud->tchan_map, ud->tchan_cnt),
+ ud->rchan_cnt - bitmap_weight(ud->rchan_map, ud->rchan_cnt),
+ ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
+ ud->rflow_cnt));
+
+ return ch_count;
+}
+
+#define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
+static int udma_probe(struct platform_device *pdev)
+{
+ struct device_node *navss_node = pdev->dev.parent->of_node;
+ struct device *dev = &pdev->dev;
+ struct udma_dev *ud;
+ const struct of_device_id *match;
+ int i, ret;
+ int ch_count;
+
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (ret)
+ dev_err(dev, "failed to set dma mask stuff\n");
+
+ ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL);
+ if (!ud)
+ return -ENOMEM;
+
+ ret = udma_get_mmrs(pdev, ud);
+ if (ret)
+ return ret;
+
+ ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci");
+ if (IS_ERR(ud->tisci_rm.tisci))
+ return PTR_ERR(ud->tisci_rm.tisci);
+
+ ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id",
+ &ud->tisci_rm.tisci_dev_id);
+ if (ret) {
+ dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
+ return ret;
+ }
+ pdev->id = ud->tisci_rm.tisci_dev_id;
+
+ ret = of_property_read_u32(navss_node, "ti,sci-dev-id",
+ &ud->tisci_rm.tisci_navss_dev_id);
+ if (ret) {
+ dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret);
+ return ret;
+ }
+
+ ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
+ ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
+
+ ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
+ if (IS_ERR(ud->ringacc))
+ return PTR_ERR(ud->ringacc);
+
+ dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
+ DOMAIN_BUS_TI_SCI_INTA_MSI);
+ if (!dev->msi_domain) {
+ dev_err(dev, "Failed to get MSI domain\n");
+ return -EPROBE_DEFER;
+ }
+
+ match = of_match_node(udma_of_match, dev->of_node);
+ if (!match) {
+ dev_err(dev, "No compatible match found\n");
+ return -ENODEV;
+ }
+ ud->match_data = match->data;
+
+ dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
+ dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
+
+ ud->ddev.device_alloc_chan_resources = udma_alloc_chan_resources;
+ ud->ddev.device_config = udma_slave_config;
+ ud->ddev.device_prep_slave_sg = udma_prep_slave_sg;
+ ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
+ ud->ddev.device_issue_pending = udma_issue_pending;
+ ud->ddev.device_tx_status = udma_tx_status;
+ ud->ddev.device_pause = udma_pause;
+ ud->ddev.device_resume = udma_resume;
+ ud->ddev.device_terminate_all = udma_terminate_all;
+ ud->ddev.device_synchronize = udma_synchronize;
+
+ ud->ddev.device_free_chan_resources = udma_free_chan_resources;
+ ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
+ ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
+ ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ ud->ddev.copy_align = DMAENGINE_ALIGN_8_BYTES;
+ ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
+ DESC_METADATA_ENGINE;
+ if (ud->match_data->enable_memcpy_support) {
+ dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask);
+ ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy;
+ ud->ddev.directions |= BIT(DMA_MEM_TO_MEM);
+ }
+
+ ud->ddev.dev = dev;
+ ud->dev = dev;
+ ud->psil_base = ud->match_data->psil_base;
+
+ INIT_LIST_HEAD(&ud->ddev.channels);
+ INIT_LIST_HEAD(&ud->desc_to_purge);
+
+ ch_count = udma_setup_resources(ud);
+ if (ch_count <= 0)
+ return ch_count;
+
+ spin_lock_init(&ud->lock);
+ INIT_WORK(&ud->purge_work, udma_purge_desc_work);
+
+ ud->desc_align = 64;
+ if (ud->desc_align < dma_get_cache_alignment())
+ ud->desc_align = dma_get_cache_alignment();
+
+ for (i = 0; i < ud->tchan_cnt; i++) {
+ struct udma_tchan *tchan = &ud->tchans[i];
+
+ tchan->id = i;
+ tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000;
+ }
+
+ for (i = 0; i < ud->rchan_cnt; i++) {
+ struct udma_rchan *rchan = &ud->rchans[i];
+
+ rchan->id = i;
+ rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000;
+ }
+
+ for (i = 0; i < ud->rflow_cnt; i++) {
+ struct udma_rflow *rflow = &ud->rflows[i];
+
+ rflow->id = i;
+ }
+
+ for (i = 0; i < ch_count; i++) {
+ struct udma_chan *uc = &ud->channels[i];
+
+ uc->ud = ud;
+ uc->vc.desc_free = udma_desc_free;
+ uc->id = i;
+ uc->tchan = NULL;
+ uc->rchan = NULL;
+ uc->config.remote_thread_id = -1;
+ uc->config.dir = DMA_MEM_TO_MEM;
+ uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
+ dev_name(dev), i);
+
+ vchan_init(&uc->vc, &ud->ddev);
+ /* Use custom vchan completion handling */
+ tasklet_init(&uc->vc.task, udma_vchan_complete,
+ (unsigned long)&uc->vc);
+ init_completion(&uc->teardown_completed);
+ }
+
+ ret = dma_async_device_register(&ud->ddev);
+ if (ret) {
+ dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, ud);
+
+ ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud);
+ if (ret) {
+ dev_err(dev, "failed to register of_dma controller\n");
+ dma_async_device_unregister(&ud->ddev);
+ }
+
+ return ret;
+}
+
+static struct platform_driver udma_driver = {
+ .driver = {
+ .name = "ti-udma",
+ .of_match_table = udma_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = udma_probe,
+};
+builtin_platform_driver(udma_driver);
+
+/* Private interfaces to UDMA */
+#include "k3-udma-private.c"
diff --git a/drivers/dma/ti/k3-udma.h b/drivers/dma/ti/k3-udma.h
new file mode 100644
index 000000000000..128d8744a435
--- /dev/null
+++ b/drivers/dma/ti/k3-udma.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef K3_UDMA_H_
+#define K3_UDMA_H_
+
+#include <linux/soc/ti/ti_sci_protocol.h>
+
+/* Global registers */
+#define UDMA_REV_REG 0x0
+#define UDMA_PERF_CTL_REG 0x4
+#define UDMA_EMU_CTL_REG 0x8
+#define UDMA_PSIL_TO_REG 0x10
+#define UDMA_UTC_CTL_REG 0x1c
+#define UDMA_CAP_REG(i) (0x20 + ((i) * 4))
+#define UDMA_RX_FLOW_ID_FW_OES_REG 0x80
+#define UDMA_RX_FLOW_ID_FW_STATUS_REG 0x88
+
+/* TX chan RT regs */
+#define UDMA_TCHAN_RT_CTL_REG 0x0
+#define UDMA_TCHAN_RT_SWTRIG_REG 0x8
+#define UDMA_TCHAN_RT_STDATA_REG 0x80
+
+#define UDMA_TCHAN_RT_PEER_REG(i) (0x200 + ((i) * 0x4))
+#define UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG \
+ UDMA_TCHAN_RT_PEER_REG(0) /* PSI-L: 0x400 */
+#define UDMA_TCHAN_RT_PEER_STATIC_TR_Z_REG \
+ UDMA_TCHAN_RT_PEER_REG(1) /* PSI-L: 0x401 */
+#define UDMA_TCHAN_RT_PEER_BCNT_REG \
+ UDMA_TCHAN_RT_PEER_REG(4) /* PSI-L: 0x404 */
+#define UDMA_TCHAN_RT_PEER_RT_EN_REG \
+ UDMA_TCHAN_RT_PEER_REG(8) /* PSI-L: 0x408 */
+
+#define UDMA_TCHAN_RT_PCNT_REG 0x400
+#define UDMA_TCHAN_RT_BCNT_REG 0x408
+#define UDMA_TCHAN_RT_SBCNT_REG 0x410
+
+/* RX chan RT regs */
+#define UDMA_RCHAN_RT_CTL_REG 0x0
+#define UDMA_RCHAN_RT_SWTRIG_REG 0x8
+#define UDMA_RCHAN_RT_STDATA_REG 0x80
+
+#define UDMA_RCHAN_RT_PEER_REG(i) (0x200 + ((i) * 0x4))
+#define UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG \
+ UDMA_RCHAN_RT_PEER_REG(0) /* PSI-L: 0x400 */
+#define UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG \
+ UDMA_RCHAN_RT_PEER_REG(1) /* PSI-L: 0x401 */
+#define UDMA_RCHAN_RT_PEER_BCNT_REG \
+ UDMA_RCHAN_RT_PEER_REG(4) /* PSI-L: 0x404 */
+#define UDMA_RCHAN_RT_PEER_RT_EN_REG \
+ UDMA_RCHAN_RT_PEER_REG(8) /* PSI-L: 0x408 */
+
+#define UDMA_RCHAN_RT_PCNT_REG 0x400
+#define UDMA_RCHAN_RT_BCNT_REG 0x408
+#define UDMA_RCHAN_RT_SBCNT_REG 0x410
+
+/* UDMA_TCHAN_RT_CTL_REG/UDMA_RCHAN_RT_CTL_REG */
+#define UDMA_CHAN_RT_CTL_EN BIT(31)
+#define UDMA_CHAN_RT_CTL_TDOWN BIT(30)
+#define UDMA_CHAN_RT_CTL_PAUSE BIT(29)
+#define UDMA_CHAN_RT_CTL_FTDOWN BIT(28)
+#define UDMA_CHAN_RT_CTL_ERROR BIT(0)
+
+/* UDMA_TCHAN_RT_PEER_RT_EN_REG/UDMA_RCHAN_RT_PEER_RT_EN_REG (PSI-L: 0x408) */
+#define UDMA_PEER_RT_EN_ENABLE BIT(31)
+#define UDMA_PEER_RT_EN_TEARDOWN BIT(30)
+#define UDMA_PEER_RT_EN_PAUSE BIT(29)
+#define UDMA_PEER_RT_EN_FLUSH BIT(28)
+#define UDMA_PEER_RT_EN_IDLE BIT(1)
+
+/*
+ * UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG /
+ * UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG
+ */
+#define PDMA_STATIC_TR_X_MASK GENMASK(26, 24)
+#define PDMA_STATIC_TR_X_SHIFT (24)
+#define PDMA_STATIC_TR_Y_MASK GENMASK(11, 0)
+#define PDMA_STATIC_TR_Y_SHIFT (0)
+
+#define PDMA_STATIC_TR_Y(x) \
+ (((x) << PDMA_STATIC_TR_Y_SHIFT) & PDMA_STATIC_TR_Y_MASK)
+#define PDMA_STATIC_TR_X(x) \
+ (((x) << PDMA_STATIC_TR_X_SHIFT) & PDMA_STATIC_TR_X_MASK)
+
+#define PDMA_STATIC_TR_XY_ACC32 BIT(30)
+#define PDMA_STATIC_TR_XY_BURST BIT(31)
+
+/*
+ * UDMA_TCHAN_RT_PEER_STATIC_TR_Z_REG /
+ * UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG
+ */
+#define PDMA_STATIC_TR_Z(x, mask) ((x) & (mask))
+
+struct udma_dev;
+struct udma_tchan;
+struct udma_rchan;
+struct udma_rflow;
+
+enum udma_rm_range {
+ RM_RANGE_TCHAN = 0,
+ RM_RANGE_RCHAN,
+ RM_RANGE_RFLOW,
+ RM_RANGE_LAST,
+};
+
+struct udma_tisci_rm {
+ const struct ti_sci_handle *tisci;
+ const struct ti_sci_rm_udmap_ops *tisci_udmap_ops;
+ u32 tisci_dev_id;
+
+ /* tisci information for PSI-L thread pairing/unpairing */
+ const struct ti_sci_rm_psil_ops *tisci_psil_ops;
+ u32 tisci_navss_dev_id;
+
+ struct ti_sci_resource *rm_ranges[RM_RANGE_LAST];
+};
+
+/* Direct access to UDMA low lever resources for the glue layer */
+int xudma_navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread);
+int xudma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
+ u32 dst_thread);
+
+struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property);
+void xudma_dev_put(struct udma_dev *ud);
+u32 xudma_dev_get_psil_base(struct udma_dev *ud);
+struct udma_tisci_rm *xudma_dev_get_tisci_rm(struct udma_dev *ud);
+
+int xudma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt);
+int xudma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt);
+
+struct udma_tchan *xudma_tchan_get(struct udma_dev *ud, int id);
+struct udma_rchan *xudma_rchan_get(struct udma_dev *ud, int id);
+struct udma_rflow *xudma_rflow_get(struct udma_dev *ud, int id);
+
+void xudma_tchan_put(struct udma_dev *ud, struct udma_tchan *p);
+void xudma_rchan_put(struct udma_dev *ud, struct udma_rchan *p);
+void xudma_rflow_put(struct udma_dev *ud, struct udma_rflow *p);
+
+int xudma_tchan_get_id(struct udma_tchan *p);
+int xudma_rchan_get_id(struct udma_rchan *p);
+int xudma_rflow_get_id(struct udma_rflow *p);
+
+u32 xudma_tchanrt_read(struct udma_tchan *tchan, int reg);
+void xudma_tchanrt_write(struct udma_tchan *tchan, int reg, u32 val);
+u32 xudma_rchanrt_read(struct udma_rchan *rchan, int reg);
+void xudma_rchanrt_write(struct udma_rchan *rchan, int reg, u32 val);
+bool xudma_rflow_is_gp(struct udma_dev *ud, int id);
+
+#endif /* K3_UDMA_H_ */
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index 6b6ba238b81a..a014ab96e673 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -2,6 +2,7 @@
/*
* OMAP DMAengine support
*/
+#include <linux/cpu_pm.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
@@ -23,12 +24,33 @@
#define OMAP_SDMA_REQUESTS 127
#define OMAP_SDMA_CHANNELS 32
+struct omap_dma_config {
+ int lch_end;
+ unsigned int rw_priority:1;
+ unsigned int needs_busy_check:1;
+ unsigned int may_lose_context:1;
+ unsigned int needs_lch_clear:1;
+};
+
+struct omap_dma_context {
+ u32 irqenable_l0;
+ u32 irqenable_l1;
+ u32 ocp_sysconfig;
+ u32 gcr;
+};
+
struct omap_dmadev {
struct dma_device ddev;
spinlock_t lock;
void __iomem *base;
const struct omap_dma_reg *reg_map;
struct omap_system_dma_plat_info *plat;
+ const struct omap_dma_config *cfg;
+ struct notifier_block nb;
+ struct omap_dma_context context;
+ int lch_count;
+ DECLARE_BITMAP(lch_bitmap, OMAP_SDMA_CHANNELS);
+ struct mutex lch_lock; /* for assigning logical channels */
bool legacy;
bool ll123_supported;
struct dma_pool *desc_pool;
@@ -376,6 +398,19 @@ static unsigned omap_dma_get_csr(struct omap_chan *c)
return val;
}
+static void omap_dma_clear_lch(struct omap_dmadev *od, int lch)
+{
+ struct omap_chan *c;
+ int i;
+
+ c = od->lch_map[lch];
+ if (!c)
+ return;
+
+ for (i = CSDP; i <= od->cfg->lch_end; i++)
+ omap_dma_chan_write(c, i, 0);
+}
+
static void omap_dma_assign(struct omap_dmadev *od, struct omap_chan *c,
unsigned lch)
{
@@ -633,6 +668,37 @@ static irqreturn_t omap_dma_irq(int irq, void *devid)
return IRQ_HANDLED;
}
+static int omap_dma_get_lch(struct omap_dmadev *od, int *lch)
+{
+ int channel;
+
+ mutex_lock(&od->lch_lock);
+ channel = find_first_zero_bit(od->lch_bitmap, od->lch_count);
+ if (channel >= od->lch_count)
+ goto out_busy;
+ set_bit(channel, od->lch_bitmap);
+ mutex_unlock(&od->lch_lock);
+
+ omap_dma_clear_lch(od, channel);
+ *lch = channel;
+
+ return 0;
+
+out_busy:
+ mutex_unlock(&od->lch_lock);
+ *lch = -EINVAL;
+
+ return -EBUSY;
+}
+
+static void omap_dma_put_lch(struct omap_dmadev *od, int lch)
+{
+ omap_dma_clear_lch(od, lch);
+ mutex_lock(&od->lch_lock);
+ clear_bit(lch, od->lch_bitmap);
+ mutex_unlock(&od->lch_lock);
+}
+
static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
{
struct omap_dmadev *od = to_omap_dma_dev(chan->device);
@@ -644,8 +710,7 @@ static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
ret = omap_request_dma(c->dma_sig, "DMA engine",
omap_dma_callback, c, &c->dma_ch);
} else {
- ret = omap_request_dma(c->dma_sig, "DMA engine", NULL, NULL,
- &c->dma_ch);
+ ret = omap_dma_get_lch(od, &c->dma_ch);
}
dev_dbg(dev, "allocating channel %u for %u\n", c->dma_ch, c->dma_sig);
@@ -702,7 +767,11 @@ static void omap_dma_free_chan_resources(struct dma_chan *chan)
c->channel_base = NULL;
od->lch_map[c->dma_ch] = NULL;
vchan_free_chan_resources(&c->vc);
- omap_free_dma(c->dma_ch);
+
+ if (od->legacy)
+ omap_free_dma(c->dma_ch);
+ else
+ omap_dma_put_lch(od, c->dma_ch);
dev_dbg(od->ddev.dev, "freeing channel %u used for %u\n", c->dma_ch,
c->dma_sig);
@@ -1453,16 +1522,128 @@ static void omap_dma_free(struct omap_dmadev *od)
}
}
+/* Currently only used for omap2. For omap1, also a check for lcd_dma is needed */
+static int omap_dma_busy_notifier(struct notifier_block *nb,
+ unsigned long cmd, void *v)
+{
+ struct omap_dmadev *od;
+ struct omap_chan *c;
+ int lch = -1;
+
+ od = container_of(nb, struct omap_dmadev, nb);
+
+ switch (cmd) {
+ case CPU_CLUSTER_PM_ENTER:
+ while (1) {
+ lch = find_next_bit(od->lch_bitmap, od->lch_count,
+ lch + 1);
+ if (lch >= od->lch_count)
+ break;
+ c = od->lch_map[lch];
+ if (!c)
+ continue;
+ if (omap_dma_chan_read(c, CCR) & CCR_ENABLE)
+ return NOTIFY_BAD;
+ }
+ break;
+ case CPU_CLUSTER_PM_ENTER_FAILED:
+ case CPU_CLUSTER_PM_EXIT:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+/*
+ * We are using IRQENABLE_L1, and legacy DMA code was using IRQENABLE_L0.
+ * As the DSP may be using IRQENABLE_L2 and L3, let's not touch those for
+ * now. Context save seems to be only currently needed on omap3.
+ */
+static void omap_dma_context_save(struct omap_dmadev *od)
+{
+ od->context.irqenable_l0 = omap_dma_glbl_read(od, IRQENABLE_L0);
+ od->context.irqenable_l1 = omap_dma_glbl_read(od, IRQENABLE_L1);
+ od->context.ocp_sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG);
+ od->context.gcr = omap_dma_glbl_read(od, GCR);
+}
+
+static void omap_dma_context_restore(struct omap_dmadev *od)
+{
+ int i;
+
+ omap_dma_glbl_write(od, GCR, od->context.gcr);
+ omap_dma_glbl_write(od, OCP_SYSCONFIG, od->context.ocp_sysconfig);
+ omap_dma_glbl_write(od, IRQENABLE_L0, od->context.irqenable_l0);
+ omap_dma_glbl_write(od, IRQENABLE_L1, od->context.irqenable_l1);
+
+ /* Clear IRQSTATUS_L0 as legacy DMA code is no longer doing it */
+ if (od->plat->errata & DMA_ROMCODE_BUG)
+ omap_dma_glbl_write(od, IRQSTATUS_L0, 0);
+
+ /* Clear dma channels */
+ for (i = 0; i < od->lch_count; i++)
+ omap_dma_clear_lch(od, i);
+}
+
+/* Currently only used for omap3 */
+static int omap_dma_context_notifier(struct notifier_block *nb,
+ unsigned long cmd, void *v)
+{
+ struct omap_dmadev *od;
+
+ od = container_of(nb, struct omap_dmadev, nb);
+
+ switch (cmd) {
+ case CPU_CLUSTER_PM_ENTER:
+ omap_dma_context_save(od);
+ break;
+ case CPU_CLUSTER_PM_ENTER_FAILED:
+ case CPU_CLUSTER_PM_EXIT:
+ omap_dma_context_restore(od);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static void omap_dma_init_gcr(struct omap_dmadev *od, int arb_rate,
+ int max_fifo_depth, int tparams)
+{
+ u32 val;
+
+ /* Set only for omap2430 and later */
+ if (!od->cfg->rw_priority)
+ return;
+
+ if (max_fifo_depth == 0)
+ max_fifo_depth = 1;
+ if (arb_rate == 0)
+ arb_rate = 1;
+
+ val = 0xff & max_fifo_depth;
+ val |= (0x3 & tparams) << 12;
+ val |= (arb_rate & 0xff) << 16;
+
+ omap_dma_glbl_write(od, GCR, val);
+}
+
#define OMAP_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+/*
+ * No flags currently set for default configuration as omap1 is still
+ * using platform data.
+ */
+static const struct omap_dma_config default_cfg;
+
static int omap_dma_probe(struct platform_device *pdev)
{
+ const struct omap_dma_config *conf;
struct omap_dmadev *od;
struct resource *res;
int rc, i, irq;
- u32 lch_count;
+ u32 val;
od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
if (!od)
@@ -1473,9 +1654,21 @@ static int omap_dma_probe(struct platform_device *pdev)
if (IS_ERR(od->base))
return PTR_ERR(od->base);
- od->plat = omap_get_plat_info();
- if (!od->plat)
- return -EPROBE_DEFER;
+ conf = of_device_get_match_data(&pdev->dev);
+ if (conf) {
+ od->cfg = conf;
+ od->plat = dev_get_platdata(&pdev->dev);
+ if (!od->plat) {
+ dev_err(&pdev->dev, "omap_system_dma_plat_info is missing");
+ return -ENODEV;
+ }
+ } else {
+ od->cfg = &default_cfg;
+
+ od->plat = omap_get_plat_info();
+ if (!od->plat)
+ return -EPROBE_DEFER;
+ }
od->reg_map = od->plat->reg_map;
@@ -1507,6 +1700,7 @@ static int omap_dma_probe(struct platform_device *pdev)
od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */
od->ddev.dev = &pdev->dev;
INIT_LIST_HEAD(&od->ddev.channels);
+ mutex_init(&od->lch_lock);
spin_lock_init(&od->lock);
spin_lock_init(&od->irq_lock);
@@ -1522,18 +1716,30 @@ static int omap_dma_probe(struct platform_device *pdev)
/* Number of available logical channels */
if (!pdev->dev.of_node) {
- lch_count = od->plat->dma_attr->lch_count;
- if (unlikely(!lch_count))
- lch_count = OMAP_SDMA_CHANNELS;
+ od->lch_count = od->plat->dma_attr->lch_count;
+ if (unlikely(!od->lch_count))
+ od->lch_count = OMAP_SDMA_CHANNELS;
} else if (of_property_read_u32(pdev->dev.of_node, "dma-channels",
- &lch_count)) {
+ &od->lch_count)) {
dev_info(&pdev->dev,
"Missing dma-channels property, using %u.\n",
OMAP_SDMA_CHANNELS);
- lch_count = OMAP_SDMA_CHANNELS;
+ od->lch_count = OMAP_SDMA_CHANNELS;
+ }
+
+ /* Mask of allowed logical channels */
+ if (pdev->dev.of_node && !of_property_read_u32(pdev->dev.of_node,
+ "dma-channel-mask",
+ &val)) {
+ /* Tag channels not in mask as reserved */
+ val = ~val;
+ bitmap_from_arr32(od->lch_bitmap, &val, od->lch_count);
}
+ if (od->plat->dma_attr->dev_caps & HS_CHANNELS_RESERVED)
+ bitmap_set(od->lch_bitmap, 0, 2);
- od->lch_map = devm_kcalloc(&pdev->dev, lch_count, sizeof(*od->lch_map),
+ od->lch_map = devm_kcalloc(&pdev->dev, od->lch_count,
+ sizeof(*od->lch_map),
GFP_KERNEL);
if (!od->lch_map)
return -ENOMEM;
@@ -1605,6 +1811,16 @@ static int omap_dma_probe(struct platform_device *pdev)
}
}
+ omap_dma_init_gcr(od, DMA_DEFAULT_ARB_RATE, DMA_DEFAULT_FIFO_DEPTH, 0);
+
+ if (od->cfg->needs_busy_check) {
+ od->nb.notifier_call = omap_dma_busy_notifier;
+ cpu_pm_register_notifier(&od->nb);
+ } else if (od->cfg->may_lose_context) {
+ od->nb.notifier_call = omap_dma_context_notifier;
+ cpu_pm_register_notifier(&od->nb);
+ }
+
dev_info(&pdev->dev, "OMAP DMA engine driver%s\n",
od->ll123_supported ? " (LinkedList1/2/3 supported)" : "");
@@ -1616,6 +1832,9 @@ static int omap_dma_remove(struct platform_device *pdev)
struct omap_dmadev *od = platform_get_drvdata(pdev);
int irq;
+ if (od->cfg->may_lose_context)
+ cpu_pm_unregister_notifier(&od->nb);
+
if (pdev->dev.of_node)
of_dma_controller_free(pdev->dev.of_node);
@@ -1637,12 +1856,45 @@ static int omap_dma_remove(struct platform_device *pdev)
return 0;
}
+static const struct omap_dma_config omap2420_data = {
+ .lch_end = CCFN,
+ .rw_priority = true,
+ .needs_lch_clear = true,
+ .needs_busy_check = true,
+};
+
+static const struct omap_dma_config omap2430_data = {
+ .lch_end = CCFN,
+ .rw_priority = true,
+ .needs_lch_clear = true,
+};
+
+static const struct omap_dma_config omap3430_data = {
+ .lch_end = CCFN,
+ .rw_priority = true,
+ .needs_lch_clear = true,
+ .may_lose_context = true,
+};
+
+static const struct omap_dma_config omap3630_data = {
+ .lch_end = CCDN,
+ .rw_priority = true,
+ .needs_lch_clear = true,
+ .may_lose_context = true,
+};
+
+static const struct omap_dma_config omap4_data = {
+ .lch_end = CCDN,
+ .rw_priority = true,
+ .needs_lch_clear = true,
+};
+
static const struct of_device_id omap_dma_match[] = {
- { .compatible = "ti,omap2420-sdma", },
- { .compatible = "ti,omap2430-sdma", },
- { .compatible = "ti,omap3430-sdma", },
- { .compatible = "ti,omap3630-sdma", },
- { .compatible = "ti,omap4430-sdma", },
+ { .compatible = "ti,omap2420-sdma", .data = &omap2420_data, },
+ { .compatible = "ti,omap2430-sdma", .data = &omap2430_data, },
+ { .compatible = "ti,omap3430-sdma", .data = &omap3430_data, },
+ { .compatible = "ti,omap3630-sdma", .data = &omap3630_data, },
+ { .compatible = "ti,omap4430-sdma", .data = &omap4_data, },
{},
};
MODULE_DEVICE_TABLE(of, omap_dma_match);
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
index ec4adf4260a0..23e33a85f033 100644
--- a/drivers/dma/virt-dma.c
+++ b/drivers/dma/virt-dma.c
@@ -104,9 +104,8 @@ static void vchan_complete(unsigned long arg)
dmaengine_desc_get_callback(&vd->tx, &cb);
list_del(&vd->node);
- vchan_vdesc_fini(vd);
-
dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
+ vchan_vdesc_fini(vd);
}
}
@@ -115,13 +114,8 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
struct virt_dma_desc *vd, *_vd;
list_for_each_entry_safe(vd, _vd, head, node) {
- if (dmaengine_desc_test_reuse(&vd->tx)) {
- list_move_tail(&vd->node, &vc->desc_allocated);
- } else {
- dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
- list_del(&vd->node);
- vc->desc_free(vd);
- }
+ list_del(&vd->node);
+ vchan_vdesc_fini(vd);
}
}
EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
@@ -135,6 +129,7 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
INIT_LIST_HEAD(&vc->desc_submitted);
INIT_LIST_HEAD(&vc->desc_issued);
INIT_LIST_HEAD(&vc->desc_completed);
+ INIT_LIST_HEAD(&vc->desc_terminated);
tasklet_init(&vc->task, vchan_complete, (unsigned long)vc);
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index ab158bac03a7..e9f5250fbe4d 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -31,9 +31,9 @@ struct virt_dma_chan {
struct list_head desc_submitted;
struct list_head desc_issued;
struct list_head desc_completed;
+ struct list_head desc_terminated;
struct virt_dma_desc *cyclic;
- struct virt_dma_desc *vd_terminated;
};
static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
@@ -113,10 +113,15 @@ static inline void vchan_vdesc_fini(struct virt_dma_desc *vd)
{
struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
- if (dmaengine_desc_test_reuse(&vd->tx))
+ if (dmaengine_desc_test_reuse(&vd->tx)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&vc->lock, flags);
list_add(&vd->node, &vc->desc_allocated);
- else
+ spin_unlock_irqrestore(&vc->lock, flags);
+ } else {
vc->desc_free(vd);
+ }
}
/**
@@ -141,11 +146,8 @@ static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd)
{
struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
- /* free up stuck descriptor */
- if (vc->vd_terminated)
- vchan_vdesc_fini(vc->vd_terminated);
+ list_add_tail(&vd->node, &vc->desc_terminated);
- vc->vd_terminated = vd;
if (vc->cyclic == vd)
vc->cyclic = NULL;
}
@@ -179,6 +181,7 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
list_splice_tail_init(&vc->desc_submitted, head);
list_splice_tail_init(&vc->desc_issued, head);
list_splice_tail_init(&vc->desc_completed, head);
+ list_splice_tail_init(&vc->desc_terminated, head);
}
static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
@@ -207,16 +210,18 @@ static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
*/
static inline void vchan_synchronize(struct virt_dma_chan *vc)
{
+ LIST_HEAD(head);
unsigned long flags;
tasklet_kill(&vc->task);
spin_lock_irqsave(&vc->lock, flags);
- if (vc->vd_terminated) {
- vchan_vdesc_fini(vc->vd_terminated);
- vc->vd_terminated = NULL;
- }
+
+ list_splice_tail_init(&vc->desc_terminated, &head);
+
spin_unlock_irqrestore(&vc->lock, flags);
+
+ vchan_dma_desc_free_list(vc, &head);
}
#endif
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 9c845c07b107..d47749a35863 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -123,10 +123,12 @@
/* Max transfer size per descriptor */
#define ZYNQMP_DMA_MAX_TRANS_LEN 0x40000000
+/* Max burst lengths */
+#define ZYNQMP_DMA_MAX_DST_BURST_LEN 32768U
+#define ZYNQMP_DMA_MAX_SRC_BURST_LEN 32768U
+
/* Reset values for data attributes */
#define ZYNQMP_DMA_AXCACHE_VAL 0xF
-#define ZYNQMP_DMA_ARLEN_RST_VAL 0xF
-#define ZYNQMP_DMA_AWLEN_RST_VAL 0xF
#define ZYNQMP_DMA_SRC_ISSUE_RST_VAL 0x1F
@@ -534,17 +536,19 @@ static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status)
static void zynqmp_dma_config(struct zynqmp_dma_chan *chan)
{
- u32 val;
+ u32 val, burst_val;
val = readl(chan->regs + ZYNQMP_DMA_CTRL0);
val |= ZYNQMP_DMA_POINT_TYPE_SG;
writel(val, chan->regs + ZYNQMP_DMA_CTRL0);
val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR);
+ burst_val = __ilog2_u32(chan->src_burst_len);
val = (val & ~ZYNQMP_DMA_ARLEN) |
- (chan->src_burst_len << ZYNQMP_DMA_ARLEN_OFST);
+ ((burst_val << ZYNQMP_DMA_ARLEN_OFST) & ZYNQMP_DMA_ARLEN);
+ burst_val = __ilog2_u32(chan->dst_burst_len);
val = (val & ~ZYNQMP_DMA_AWLEN) |
- (chan->dst_burst_len << ZYNQMP_DMA_AWLEN_OFST);
+ ((burst_val << ZYNQMP_DMA_AWLEN_OFST) & ZYNQMP_DMA_AWLEN);
writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR);
}
@@ -560,8 +564,10 @@ static int zynqmp_dma_device_config(struct dma_chan *dchan,
{
struct zynqmp_dma_chan *chan = to_chan(dchan);
- chan->src_burst_len = config->src_maxburst;
- chan->dst_burst_len = config->dst_maxburst;
+ chan->src_burst_len = clamp(config->src_maxburst, 1U,
+ ZYNQMP_DMA_MAX_SRC_BURST_LEN);
+ chan->dst_burst_len = clamp(config->dst_maxburst, 1U,
+ ZYNQMP_DMA_MAX_DST_BURST_LEN);
return 0;
}
@@ -887,8 +893,8 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
return PTR_ERR(chan->regs);
chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64;
- chan->dst_burst_len = ZYNQMP_DMA_AWLEN_RST_VAL;
- chan->src_burst_len = ZYNQMP_DMA_ARLEN_RST_VAL;
+ chan->dst_burst_len = ZYNQMP_DMA_MAX_DST_BURST_LEN;
+ chan->src_burst_len = ZYNQMP_DMA_MAX_SRC_BURST_LEN;
err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width);
if (err < 0) {
dev_err(&pdev->dev, "missing xlnx,bus-width property\n");
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 417dad635526..b3c99bb5fe77 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -462,7 +462,7 @@ config EDAC_ALTERA_SDMMC
config EDAC_SIFIVE
bool "Sifive platform EDAC driver"
- depends on EDAC=y && RISCV
+ depends on EDAC=y && SIFIVE_L2
help
Support for error detection and correction on the SiFive SoCs.
@@ -491,8 +491,7 @@ config EDAC_TI
tristate "Texas Instruments DDR3 ECC Controller"
depends on ARCH_KEYSTONE || SOC_DRA7XX
help
- Support for error detection and correction on the
- TI SoCs.
+ Support for error detection and correction on the TI SoCs.
config EDAC_QCOM
tristate "QCOM EDAC Controller"
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 428ce98f6776..9fbad908a854 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -214,7 +214,7 @@ static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
scrubval = scrubrates[i].scrubval;
- if (pvt->fam == 0x17 || pvt->fam == 0x18) {
+ if (pvt->umc) {
__f17h_set_scrubval(pvt, scrubval);
} else if (pvt->fam == 0x15 && pvt->model == 0x60) {
f15h_select_dct(pvt, 0);
@@ -256,18 +256,7 @@ static int get_scrub_rate(struct mem_ctl_info *mci)
int i, retval = -EINVAL;
u32 scrubval = 0;
- switch (pvt->fam) {
- case 0x15:
- /* Erratum #505 */
- if (pvt->model < 0x10)
- f15h_select_dct(pvt, 0);
-
- if (pvt->model == 0x60)
- amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
- break;
-
- case 0x17:
- case 0x18:
+ if (pvt->umc) {
amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
if (scrubval & BIT(0)) {
amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
@@ -276,11 +265,15 @@ static int get_scrub_rate(struct mem_ctl_info *mci)
} else {
scrubval = 0;
}
- break;
+ } else if (pvt->fam == 0x15) {
+ /* Erratum #505 */
+ if (pvt->model < 0x10)
+ f15h_select_dct(pvt, 0);
- default:
+ if (pvt->model == 0x60)
+ amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
+ } else {
amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
- break;
}
scrubval = scrubval & 0x001F;
@@ -1055,6 +1048,16 @@ static void determine_memory_type(struct amd64_pvt *pvt)
{
u32 dram_ctrl, dcsm;
+ if (pvt->umc) {
+ if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
+ pvt->dram_type = MEM_LRDDR4;
+ else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
+ pvt->dram_type = MEM_RDDR4;
+ else
+ pvt->dram_type = MEM_DDR4;
+ return;
+ }
+
switch (pvt->fam) {
case 0xf:
if (pvt->ext_model >= K8_REV_F)
@@ -1100,16 +1103,6 @@ static void determine_memory_type(struct amd64_pvt *pvt)
case 0x16:
goto ddr3;
- case 0x17:
- case 0x18:
- if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
- pvt->dram_type = MEM_LRDDR4;
- else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
- pvt->dram_type = MEM_RDDR4;
- else
- pvt->dram_type = MEM_DDR4;
- return;
-
default:
WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
pvt->dram_type = MEM_EMPTY;
@@ -2336,6 +2329,16 @@ static struct amd64_family_type family_types[] = {
.dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
+ [F19_CPUS] = {
+ .ctl_name = "F19h",
+ .f0_id = PCI_DEVICE_ID_AMD_19H_DF_F0,
+ .f6_id = PCI_DEVICE_ID_AMD_19H_DF_F6,
+ .max_mcs = 8,
+ .ops = {
+ .early_channel_count = f17_early_channel_count,
+ .dbam_to_cs = f17_addr_mask_to_cs_size,
+ }
+ },
};
/*
@@ -3368,6 +3371,12 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
family_types[F17_CPUS].ctl_name = "F18h";
break;
+ case 0x19:
+ fam_type = &family_types[F19_CPUS];
+ pvt->ops = &family_types[F19_CPUS].ops;
+ family_types[F19_CPUS].ctl_name = "F19h";
+ break;
+
default:
amd64_err("Unsupported family!\n");
return NULL;
@@ -3573,9 +3582,6 @@ static void remove_one_instance(unsigned int nid)
struct mem_ctl_info *mci;
struct amd64_pvt *pvt;
- mci = find_mci_by_dev(&F3->dev);
- WARN_ON(!mci);
-
/* Remove from EDAC CORE tracking list */
mci = edac_mc_del_mc(&F3->dev);
if (!mci)
@@ -3626,6 +3632,7 @@ static const struct x86_cpu_id amd64_cpuids[] = {
{ X86_VENDOR_AMD, 0x16, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
{ X86_VENDOR_AMD, 0x17, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
{ X86_VENDOR_HYGON, 0x18, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
+ { X86_VENDOR_AMD, 0x19, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
{ }
};
MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 9be31688110b..abbf3c274d74 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -122,6 +122,8 @@
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F6 0x1496
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F0 0x1440
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F6 0x1446
+#define PCI_DEVICE_ID_AMD_19H_DF_F0 0x1650
+#define PCI_DEVICE_ID_AMD_19H_DF_F6 0x1656
/*
* Function 1 - Address Map
@@ -292,6 +294,7 @@ enum amd_families {
F17_M10H_CPUS,
F17_M30H_CPUS,
F17_M70H_CPUS,
+ F19_CPUS,
NUM_FAMILIES,
};
diff --git a/drivers/edac/aspeed_edac.c b/drivers/edac/aspeed_edac.c
index 09a9e3de9595..b194658b8b5c 100644
--- a/drivers/edac/aspeed_edac.c
+++ b/drivers/edac/aspeed_edac.c
@@ -243,7 +243,7 @@ static int init_csrows(struct mem_ctl_info *mci)
if (!np) {
dev_err(mci->pdev, "dt: missing /memory node\n");
return -ENODEV;
- };
+ }
rc = of_address_to_resource(np, 0, &r);
@@ -252,7 +252,7 @@ static int init_csrows(struct mem_ctl_info *mci)
if (rc) {
dev_err(mci->pdev, "dt: failed requesting resource for /memory node\n");
return rc;
- };
+ }
dev_dbg(mci->pdev, "dt: /memory node resources: first page r.start=0x%x, resource_size=0x%x, PAGE_SHIFT macro=0x%x\n",
r.start, resource_size(&r), PAGE_SHIFT);
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 7243b88f81d8..69e0d90460e6 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -505,16 +505,10 @@ void edac_mc_free(struct mem_ctl_info *mci)
{
edac_dbg(1, "\n");
- /* If we're not yet registered with sysfs free only what was allocated
- * in edac_mc_alloc().
- */
- if (!device_is_registered(&mci->dev)) {
- _edac_mc_free(mci);
- return;
- }
+ if (device_is_registered(&mci->dev))
+ edac_unregister_sysfs(mci);
- /* the mci instance is freed here, when the sysfs object is dropped */
- edac_unregister_sysfs(mci);
+ _edac_mc_free(mci);
}
EXPORT_SYMBOL_GPL(edac_mc_free);
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 0367554e7437..c70ec0a306d8 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -276,10 +276,7 @@ static const struct attribute_group *csrow_attr_groups[] = {
static void csrow_attr_release(struct device *dev)
{
- struct csrow_info *csrow = container_of(dev, struct csrow_info, dev);
-
- edac_dbg(1, "device %s released\n", dev_name(dev));
- kfree(csrow);
+ /* release device with _edac_mc_free() */
}
static const struct device_type csrow_attr_type = {
@@ -447,8 +444,7 @@ error:
csrow = mci->csrows[i];
if (!nr_pages_per_csrow(csrow))
continue;
-
- device_del(&mci->csrows[i]->dev);
+ device_unregister(&mci->csrows[i]->dev);
}
return err;
@@ -608,10 +604,7 @@ static const struct attribute_group *dimm_attr_groups[] = {
static void dimm_attr_release(struct device *dev)
{
- struct dimm_info *dimm = container_of(dev, struct dimm_info, dev);
-
- edac_dbg(1, "device %s released\n", dev_name(dev));
- kfree(dimm);
+ /* release device with _edac_mc_free() */
}
static const struct device_type dimm_attr_type = {
@@ -893,10 +886,7 @@ static const struct attribute_group *mci_attr_groups[] = {
static void mci_attr_release(struct device *dev)
{
- struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
-
- edac_dbg(1, "device %s released\n", dev_name(dev));
- kfree(mci);
+ /* release device with _edac_mc_free() */
}
static const struct device_type mci_attr_type = {
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
index f564a4a8a4ae..5c1eea96230c 100644
--- a/drivers/edac/i3000_edac.c
+++ b/drivers/edac/i3000_edac.c
@@ -324,7 +324,7 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
pci_read_config_dword(pdev, I3000_MCHBAR, (u32 *) & mchbar);
mchbar &= I3000_MCHBAR_MASK;
- window = ioremap_nocache(mchbar, I3000_MMR_WINDOW_SIZE);
+ window = ioremap(mchbar, I3000_MMR_WINDOW_SIZE);
if (!window) {
printk(KERN_ERR "i3000: cannot map mmio space at 0x%lx\n",
mchbar);
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index 432b375a4075..a8988db6d423 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -280,7 +280,7 @@ static void __iomem *i3200_map_mchbar(struct pci_dev *pdev)
return NULL;
}
- window = ioremap_nocache(u.mchbar, I3200_MMR_WINDOW_SIZE);
+ window = ioremap(u.mchbar, I3200_MMR_WINDOW_SIZE);
if (!window)
printk(KERN_ERR "i3200: cannot map mmio space at 0x%llx\n",
(unsigned long long)u.mchbar);
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index 0ddc41e47a96..191aa7c19ded 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -259,11 +259,6 @@ static inline u32 i5100_nrecmemb_ras(u32 a)
return a & ((1 << 16) - 1);
}
-static inline u32 i5100_redmemb_ecc_locator(u32 a)
-{
- return a & ((1 << 18) - 1);
-}
-
static inline u32 i5100_recmema_merr(u32 a)
{
return i5100_nrecmema_merr(a);
@@ -486,7 +481,6 @@ static void i5100_read_log(struct mem_ctl_info *mci, int chan,
u32 dw;
u32 dw2;
unsigned syndrome = 0;
- unsigned ecc_loc = 0;
unsigned merr;
unsigned bank;
unsigned rank;
@@ -499,7 +493,6 @@ static void i5100_read_log(struct mem_ctl_info *mci, int chan,
pci_read_config_dword(pdev, I5100_REDMEMA, &dw2);
syndrome = dw2;
pci_read_config_dword(pdev, I5100_REDMEMB, &dw2);
- ecc_loc = i5100_redmemb_ecc_locator(dw2);
}
if (i5100_validlog_recmemvalid(dw)) {
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index 7c6a2d4d2360..6be99e0d850d 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -485,7 +485,7 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
goto fail0;
}
mchbar &= 0xffffc000; /* bits 31:14 used for 16K window */
- mch_window = ioremap_nocache(mchbar, 0x1000);
+ mch_window = ioremap(mchbar, 0x1000);
if (!mch_window) {
edac_dbg(3, "error ioremapping MCHBAR!\n");
goto fail0;
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index 4f65073f230b..d68346a8e141 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -357,7 +357,7 @@ static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev)
return NULL;
}
- window = ioremap_nocache(u.mchbar, IE31200_MMR_WINDOW_SIZE);
+ window = ioremap(u.mchbar, IE31200_MMR_WINDOW_SIZE);
if (!window)
ie31200_printk(KERN_ERR, "Cannot map mmio space at 0x%llx\n",
(unsigned long long)u.mchbar);
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index ea622c6f3a39..ea980c556f2e 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -6,7 +6,7 @@
#include "mce_amd.h"
-static struct amd_decoder_ops *fam_ops;
+static struct amd_decoder_ops fam_ops;
static u8 xec_mask = 0xf;
@@ -175,6 +175,33 @@ static const char * const smca_ls_mce_desc[] = {
"L2 Fill Data error",
};
+static const char * const smca_ls2_mce_desc[] = {
+ "An ECC error was detected on a data cache read by a probe or victimization",
+ "An ECC error or L2 poison was detected on a data cache read by a load",
+ "An ECC error was detected on a data cache read-modify-write by a store",
+ "An ECC error or poison bit mismatch was detected on a tag read by a probe or victimization",
+ "An ECC error or poison bit mismatch was detected on a tag read by a load",
+ "An ECC error or poison bit mismatch was detected on a tag read by a store",
+ "An ECC error was detected on an EMEM read by a load",
+ "An ECC error was detected on an EMEM read-modify-write by a store",
+ "A parity error was detected in an L1 TLB entry by any access",
+ "A parity error was detected in an L2 TLB entry by any access",
+ "A parity error was detected in a PWC entry by any access",
+ "A parity error was detected in an STQ entry by any access",
+ "A parity error was detected in an LDQ entry by any access",
+ "A parity error was detected in a MAB entry by any access",
+ "A parity error was detected in an SCB entry state field by any access",
+ "A parity error was detected in an SCB entry address field by any access",
+ "A parity error was detected in an SCB entry data field by any access",
+ "A parity error was detected in a WCB entry by any access",
+ "A poisoned line was detected in an SCB entry by any access",
+ "A SystemReadDataError error was reported on read data returned from L2 for a load",
+ "A SystemReadDataError error was reported on read data returned from L2 for an SCB store",
+ "A SystemReadDataError error was reported on read data returned from L2 for a WCB store",
+ "A hardware assertion error was reported",
+ "A parity error was detected in an STLF, SCB EMEM entry or SRB store data by any access",
+};
+
static const char * const smca_if_mce_desc[] = {
"Op Cache Microtag Probe Port Parity Error",
"IC Microtag or Full Tag Multi-hit Error",
@@ -378,6 +405,7 @@ struct smca_mce_desc {
static struct smca_mce_desc smca_mce_descs[] = {
[SMCA_LS] = { smca_ls_mce_desc, ARRAY_SIZE(smca_ls_mce_desc) },
+ [SMCA_LS_V2] = { smca_ls2_mce_desc, ARRAY_SIZE(smca_ls2_mce_desc) },
[SMCA_IF] = { smca_if_mce_desc, ARRAY_SIZE(smca_if_mce_desc) },
[SMCA_L2_CACHE] = { smca_l2_mce_desc, ARRAY_SIZE(smca_l2_mce_desc) },
[SMCA_DE] = { smca_de_mce_desc, ARRAY_SIZE(smca_de_mce_desc) },
@@ -555,7 +583,7 @@ static void decode_mc0_mce(struct mce *m)
: (xec ? "multimatch" : "parity")));
return;
}
- } else if (fam_ops->mc0_mce(ec, xec))
+ } else if (fam_ops.mc0_mce(ec, xec))
;
else
pr_emerg(HW_ERR "Corrupted MC0 MCE info?\n");
@@ -669,7 +697,7 @@ static void decode_mc1_mce(struct mce *m)
pr_cont("Hardware Assert.\n");
else
goto wrong_mc1_mce;
- } else if (fam_ops->mc1_mce(ec, xec))
+ } else if (fam_ops.mc1_mce(ec, xec))
;
else
goto wrong_mc1_mce;
@@ -803,7 +831,7 @@ static void decode_mc2_mce(struct mce *m)
pr_emerg(HW_ERR "MC2 Error: ");
- if (!fam_ops->mc2_mce(ec, xec))
+ if (!fam_ops.mc2_mce(ec, xec))
pr_cont(HW_ERR "Corrupted MC2 MCE info?\n");
}
@@ -1102,7 +1130,8 @@ amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
if (m->tsc)
pr_emerg(HW_ERR "TSC: %llu\n", m->tsc);
- if (!fam_ops)
+ /* Doesn't matter which member to test. */
+ if (!fam_ops.mc0_mce)
goto err_code;
switch (m->bank) {
@@ -1157,80 +1186,73 @@ static int __init mce_amd_init(void)
c->x86_vendor != X86_VENDOR_HYGON)
return -ENODEV;
- fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL);
- if (!fam_ops)
- return -ENOMEM;
+ if (boot_cpu_has(X86_FEATURE_SMCA)) {
+ xec_mask = 0x3f;
+ goto out;
+ }
switch (c->x86) {
case 0xf:
- fam_ops->mc0_mce = k8_mc0_mce;
- fam_ops->mc1_mce = k8_mc1_mce;
- fam_ops->mc2_mce = k8_mc2_mce;
+ fam_ops.mc0_mce = k8_mc0_mce;
+ fam_ops.mc1_mce = k8_mc1_mce;
+ fam_ops.mc2_mce = k8_mc2_mce;
break;
case 0x10:
- fam_ops->mc0_mce = f10h_mc0_mce;
- fam_ops->mc1_mce = k8_mc1_mce;
- fam_ops->mc2_mce = k8_mc2_mce;
+ fam_ops.mc0_mce = f10h_mc0_mce;
+ fam_ops.mc1_mce = k8_mc1_mce;
+ fam_ops.mc2_mce = k8_mc2_mce;
break;
case 0x11:
- fam_ops->mc0_mce = k8_mc0_mce;
- fam_ops->mc1_mce = k8_mc1_mce;
- fam_ops->mc2_mce = k8_mc2_mce;
+ fam_ops.mc0_mce = k8_mc0_mce;
+ fam_ops.mc1_mce = k8_mc1_mce;
+ fam_ops.mc2_mce = k8_mc2_mce;
break;
case 0x12:
- fam_ops->mc0_mce = f12h_mc0_mce;
- fam_ops->mc1_mce = k8_mc1_mce;
- fam_ops->mc2_mce = k8_mc2_mce;
+ fam_ops.mc0_mce = f12h_mc0_mce;
+ fam_ops.mc1_mce = k8_mc1_mce;
+ fam_ops.mc2_mce = k8_mc2_mce;
break;
case 0x14:
- fam_ops->mc0_mce = cat_mc0_mce;
- fam_ops->mc1_mce = cat_mc1_mce;
- fam_ops->mc2_mce = k8_mc2_mce;
+ fam_ops.mc0_mce = cat_mc0_mce;
+ fam_ops.mc1_mce = cat_mc1_mce;
+ fam_ops.mc2_mce = k8_mc2_mce;
break;
case 0x15:
xec_mask = c->x86_model == 0x60 ? 0x3f : 0x1f;
- fam_ops->mc0_mce = f15h_mc0_mce;
- fam_ops->mc1_mce = f15h_mc1_mce;
- fam_ops->mc2_mce = f15h_mc2_mce;
+ fam_ops.mc0_mce = f15h_mc0_mce;
+ fam_ops.mc1_mce = f15h_mc1_mce;
+ fam_ops.mc2_mce = f15h_mc2_mce;
break;
case 0x16:
xec_mask = 0x1f;
- fam_ops->mc0_mce = cat_mc0_mce;
- fam_ops->mc1_mce = cat_mc1_mce;
- fam_ops->mc2_mce = f16h_mc2_mce;
+ fam_ops.mc0_mce = cat_mc0_mce;
+ fam_ops.mc1_mce = cat_mc1_mce;
+ fam_ops.mc2_mce = f16h_mc2_mce;
break;
case 0x17:
case 0x18:
- xec_mask = 0x3f;
- if (!boot_cpu_has(X86_FEATURE_SMCA)) {
- printk(KERN_WARNING "Decoding supported only on Scalable MCA processors.\n");
- goto err_out;
- }
- break;
+ pr_warn("Decoding supported only on Scalable MCA processors.\n");
+ return -EINVAL;
default:
printk(KERN_WARNING "Huh? What family is it: 0x%x?!\n", c->x86);
- goto err_out;
+ return -EINVAL;
}
+out:
pr_info("MCE: In-kernel MCE decoding enabled.\n");
mce_register_decode_chain(&amd_mce_dec_nb);
return 0;
-
-err_out:
- kfree(fam_ops);
- fam_ops = NULL;
- return -EINVAL;
}
early_initcall(mce_amd_init);
@@ -1238,7 +1260,6 @@ early_initcall(mce_amd_init);
static void __exit mce_amd_exit(void)
{
mce_unregister_decode_chain(&amd_mce_dec_nb);
- kfree(fam_ops);
}
MODULE_DESCRIPTION("AMD MCE decoder");
diff --git a/drivers/edac/sifive_edac.c b/drivers/edac/sifive_edac.c
index 413cdb4a591d..3a3dcb14ed99 100644
--- a/drivers/edac/sifive_edac.c
+++ b/drivers/edac/sifive_edac.c
@@ -10,7 +10,7 @@
#include <linux/edac.h>
#include <linux/platform_device.h>
#include "edac_module.h"
-#include <asm/sifive_l2_cache.h>
+#include <soc/sifive/sifive_l2_cache.h>
#define DRVNAME "sifive_edac"
@@ -54,8 +54,8 @@ static int ecc_register(struct platform_device *pdev)
p->dci = edac_device_alloc_ctl_info(0, "sifive_ecc", 1, "sifive_ecc",
1, 1, NULL, 0,
edac_device_alloc_index());
- if (IS_ERR(p->dci))
- return PTR_ERR(p->dci);
+ if (!p->dci)
+ return -ENOMEM;
p->dci->dev = &pdev->dev;
p->dci->mod_name = "Sifive ECC Manager";
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index 95662a4ff4c4..99bbaf629b8d 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -256,7 +256,7 @@ int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm)
pdev = pci_get_device(PCI_VENDOR_ID_INTEL, did, NULL);
if (!pdev) {
- skx_printk(KERN_ERR, "Can't get tolm/tohm\n");
+ edac_dbg(2, "Can't get tolm/tohm\n");
return -ENODEV;
}
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
index cc779f3f9e2d..a65e2f78a402 100644
--- a/drivers/edac/x38_edac.c
+++ b/drivers/edac/x38_edac.c
@@ -266,7 +266,7 @@ static void __iomem *x38_map_mchbar(struct pci_dev *pdev)
return NULL;
}
- window = ioremap_nocache(u.mchbar, X38_MMR_WINDOW_SIZE);
+ window = ioremap(u.mchbar, X38_MMR_WINDOW_SIZE);
if (!window)
printk(KERN_ERR "x38: cannot map mmio space at 0x%llx\n",
(unsigned long long)u.mchbar);
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index e970134c95fa..7401733db08b 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -77,8 +77,6 @@ struct arizona_extcon_info {
const struct arizona_micd_range *micd_ranges;
int num_micd_ranges;
- int micd_timeout;
-
bool micd_reva;
bool micd_clamp;
@@ -310,9 +308,13 @@ static void arizona_start_mic(struct arizona_extcon_info *info)
}
if (info->micd_reva) {
- regmap_write(arizona->regmap, 0x80, 0x3);
- regmap_write(arizona->regmap, 0x294, 0);
- regmap_write(arizona->regmap, 0x80, 0x0);
+ const struct reg_sequence reva[] = {
+ { 0x80, 0x3 },
+ { 0x294, 0x0 },
+ { 0x80, 0x0 },
+ };
+
+ regmap_multi_reg_write(arizona->regmap, reva, ARRAY_SIZE(reva));
}
if (info->detecting && arizona->pdata.micd_software_compare)
@@ -361,9 +363,13 @@ static void arizona_stop_mic(struct arizona_extcon_info *info)
snd_soc_dapm_sync(dapm);
if (info->micd_reva) {
- regmap_write(arizona->regmap, 0x80, 0x3);
- regmap_write(arizona->regmap, 0x294, 2);
- regmap_write(arizona->regmap, 0x80, 0x0);
+ const struct reg_sequence reva[] = {
+ { 0x80, 0x3 },
+ { 0x294, 0x2 },
+ { 0x80, 0x0 },
+ };
+
+ regmap_multi_reg_write(arizona->regmap, reva, ARRAY_SIZE(reva));
}
ret = regulator_allow_bypass(info->micvdd, true);
@@ -527,67 +533,65 @@ static int arizona_hpdet_do_id(struct arizona_extcon_info *info, int *reading,
struct arizona *arizona = info->arizona;
int id_gpio = arizona->pdata.hpdet_id_gpio;
+ if (!arizona->pdata.hpdet_acc_id)
+ return 0;
+
/*
* If we're using HPDET for accessory identification we need
* to take multiple measurements, step through them in sequence.
*/
- if (arizona->pdata.hpdet_acc_id) {
- info->hpdet_res[info->num_hpdet_res++] = *reading;
+ info->hpdet_res[info->num_hpdet_res++] = *reading;
- /* Only check the mic directly if we didn't already ID it */
- if (id_gpio && info->num_hpdet_res == 1) {
- dev_dbg(arizona->dev, "Measuring mic\n");
+ /* Only check the mic directly if we didn't already ID it */
+ if (id_gpio && info->num_hpdet_res == 1) {
+ dev_dbg(arizona->dev, "Measuring mic\n");
- regmap_update_bits(arizona->regmap,
- ARIZONA_ACCESSORY_DETECT_MODE_1,
- ARIZONA_ACCDET_MODE_MASK |
- ARIZONA_ACCDET_SRC,
- ARIZONA_ACCDET_MODE_HPR |
- info->micd_modes[0].src);
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_ACCESSORY_DETECT_MODE_1,
+ ARIZONA_ACCDET_MODE_MASK |
+ ARIZONA_ACCDET_SRC,
+ ARIZONA_ACCDET_MODE_HPR |
+ info->micd_modes[0].src);
- gpio_set_value_cansleep(id_gpio, 1);
+ gpio_set_value_cansleep(id_gpio, 1);
- regmap_update_bits(arizona->regmap,
- ARIZONA_HEADPHONE_DETECT_1,
- ARIZONA_HP_POLL, ARIZONA_HP_POLL);
- return -EAGAIN;
- }
-
- /* OK, got both. Now, compare... */
- dev_dbg(arizona->dev, "HPDET measured %d %d\n",
- info->hpdet_res[0], info->hpdet_res[1]);
+ regmap_update_bits(arizona->regmap, ARIZONA_HEADPHONE_DETECT_1,
+ ARIZONA_HP_POLL, ARIZONA_HP_POLL);
+ return -EAGAIN;
+ }
- /* Take the headphone impedance for the main report */
- *reading = info->hpdet_res[0];
+ /* OK, got both. Now, compare... */
+ dev_dbg(arizona->dev, "HPDET measured %d %d\n",
+ info->hpdet_res[0], info->hpdet_res[1]);
- /* Sometimes we get false readings due to slow insert */
- if (*reading >= ARIZONA_HPDET_MAX && !info->hpdet_retried) {
- dev_dbg(arizona->dev, "Retrying high impedance\n");
- info->num_hpdet_res = 0;
- info->hpdet_retried = true;
- arizona_start_hpdet_acc_id(info);
- pm_runtime_put(info->dev);
- return -EAGAIN;
- }
+ /* Take the headphone impedance for the main report */
+ *reading = info->hpdet_res[0];
- /*
- * If we measure the mic as high impedance
- */
- if (!id_gpio || info->hpdet_res[1] > 50) {
- dev_dbg(arizona->dev, "Detected mic\n");
- *mic = true;
- info->detecting = true;
- } else {
- dev_dbg(arizona->dev, "Detected headphone\n");
- }
+ /* Sometimes we get false readings due to slow insert */
+ if (*reading >= ARIZONA_HPDET_MAX && !info->hpdet_retried) {
+ dev_dbg(arizona->dev, "Retrying high impedance\n");
+ info->num_hpdet_res = 0;
+ info->hpdet_retried = true;
+ arizona_start_hpdet_acc_id(info);
+ pm_runtime_put(info->dev);
+ return -EAGAIN;
+ }
- /* Make sure everything is reset back to the real polarity */
- regmap_update_bits(arizona->regmap,
- ARIZONA_ACCESSORY_DETECT_MODE_1,
- ARIZONA_ACCDET_SRC,
- info->micd_modes[0].src);
+ /*
+ * If we measure the mic as high impedance
+ */
+ if (!id_gpio || info->hpdet_res[1] > 50) {
+ dev_dbg(arizona->dev, "Detected mic\n");
+ *mic = true;
+ info->detecting = true;
+ } else {
+ dev_dbg(arizona->dev, "Detected headphone\n");
}
+ /* Make sure everything is reset back to the real polarity */
+ regmap_update_bits(arizona->regmap, ARIZONA_ACCESSORY_DETECT_MODE_1,
+ ARIZONA_ACCDET_SRC, info->micd_modes[0].src);
+
return 0;
}
@@ -662,11 +666,6 @@ done:
if (id_gpio)
gpio_set_value_cansleep(id_gpio, 0);
- /* Revert back to MICDET mode */
- regmap_update_bits(arizona->regmap,
- ARIZONA_ACCESSORY_DETECT_MODE_1,
- ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
-
/* If we have a mic then reenable MICDET */
if (mic || info->mic)
arizona_start_mic(info);
@@ -699,8 +698,7 @@ static void arizona_identify_headphone(struct arizona_extcon_info *info)
info->hpdet_active = true;
- if (info->mic)
- arizona_stop_mic(info);
+ arizona_stop_mic(info);
arizona_extcon_hp_clamp(info, true);
@@ -724,8 +722,8 @@ static void arizona_identify_headphone(struct arizona_extcon_info *info)
return;
err:
- regmap_update_bits(arizona->regmap, ARIZONA_ACCESSORY_DETECT_MODE_1,
- ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
+ arizona_extcon_hp_clamp(info, false);
+ pm_runtime_put_autosuspend(info->dev);
/* Just report headphone */
ret = extcon_set_state_sync(info->edev, EXTCON_JACK_HEADPHONE, true);
@@ -781,9 +779,6 @@ static void arizona_start_hpdet_acc_id(struct arizona_extcon_info *info)
return;
err:
- regmap_update_bits(arizona->regmap, ARIZONA_ACCESSORY_DETECT_MODE_1,
- ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
-
/* Just report headphone */
ret = extcon_set_state_sync(info->edev, EXTCON_JACK_HEADPHONE, true);
if (ret != 0)
@@ -806,75 +801,58 @@ static void arizona_micd_timeout_work(struct work_struct *work)
arizona_identify_headphone(info);
- arizona_stop_mic(info);
-
mutex_unlock(&info->lock);
}
-static void arizona_micd_detect(struct work_struct *work)
+static int arizona_micd_adc_read(struct arizona_extcon_info *info)
{
- struct arizona_extcon_info *info = container_of(work,
- struct arizona_extcon_info,
- micd_detect_work.work);
struct arizona *arizona = info->arizona;
- unsigned int val = 0, lvl;
- int ret, i, key;
-
- cancel_delayed_work_sync(&info->micd_timeout_work);
+ unsigned int val;
+ int ret;
- mutex_lock(&info->lock);
+ /* Must disable MICD before we read the ADCVAL */
+ regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
+ ARIZONA_MICD_ENA, 0);
- /* If the cable was removed while measuring ignore the result */
- ret = extcon_get_state(info->edev, EXTCON_MECHANICAL);
- if (ret < 0) {
- dev_err(arizona->dev, "Failed to check cable state: %d\n",
- ret);
- mutex_unlock(&info->lock);
- return;
- } else if (!ret) {
- dev_dbg(arizona->dev, "Ignoring MICDET for removed cable\n");
- mutex_unlock(&info->lock);
- return;
+ ret = regmap_read(arizona->regmap, ARIZONA_MIC_DETECT_4, &val);
+ if (ret != 0) {
+ dev_err(arizona->dev,
+ "Failed to read MICDET_ADCVAL: %d\n", ret);
+ return ret;
}
- if (info->detecting && arizona->pdata.micd_software_compare) {
- /* Must disable MICD before we read the ADCVAL */
- regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
- ARIZONA_MICD_ENA, 0);
- ret = regmap_read(arizona->regmap, ARIZONA_MIC_DETECT_4, &val);
- if (ret != 0) {
- dev_err(arizona->dev,
- "Failed to read MICDET_ADCVAL: %d\n",
- ret);
- mutex_unlock(&info->lock);
- return;
- }
+ dev_dbg(arizona->dev, "MICDET_ADCVAL: %x\n", val);
- dev_dbg(arizona->dev, "MICDET_ADCVAL: %x\n", val);
+ val &= ARIZONA_MICDET_ADCVAL_MASK;
+ if (val < ARRAY_SIZE(arizona_micd_levels))
+ val = arizona_micd_levels[val];
+ else
+ val = INT_MAX;
+
+ if (val <= QUICK_HEADPHONE_MAX_OHM)
+ val = ARIZONA_MICD_STS | ARIZONA_MICD_LVL_0;
+ else if (val <= MICROPHONE_MIN_OHM)
+ val = ARIZONA_MICD_STS | ARIZONA_MICD_LVL_1;
+ else if (val <= MICROPHONE_MAX_OHM)
+ val = ARIZONA_MICD_STS | ARIZONA_MICD_LVL_8;
+ else
+ val = ARIZONA_MICD_LVL_8;
- val &= ARIZONA_MICDET_ADCVAL_MASK;
- if (val < ARRAY_SIZE(arizona_micd_levels))
- val = arizona_micd_levels[val];
- else
- val = INT_MAX;
-
- if (val <= QUICK_HEADPHONE_MAX_OHM)
- val = ARIZONA_MICD_STS | ARIZONA_MICD_LVL_0;
- else if (val <= MICROPHONE_MIN_OHM)
- val = ARIZONA_MICD_STS | ARIZONA_MICD_LVL_1;
- else if (val <= MICROPHONE_MAX_OHM)
- val = ARIZONA_MICD_STS | ARIZONA_MICD_LVL_8;
- else
- val = ARIZONA_MICD_LVL_8;
- }
+ return val;
+}
+
+static int arizona_micd_read(struct arizona_extcon_info *info)
+{
+ struct arizona *arizona = info->arizona;
+ unsigned int val = 0;
+ int ret, i;
for (i = 0; i < 10 && !(val & MICD_LVL_0_TO_8); i++) {
ret = regmap_read(arizona->regmap, ARIZONA_MIC_DETECT_3, &val);
if (ret != 0) {
dev_err(arizona->dev,
"Failed to read MICDET: %d\n", ret);
- mutex_unlock(&info->lock);
- return;
+ return ret;
}
dev_dbg(arizona->dev, "MICDET: %x\n", val);
@@ -882,29 +860,44 @@ static void arizona_micd_detect(struct work_struct *work)
if (!(val & ARIZONA_MICD_VALID)) {
dev_warn(arizona->dev,
"Microphone detection state invalid\n");
- mutex_unlock(&info->lock);
- return;
+ return -EINVAL;
}
}
if (i == 10 && !(val & MICD_LVL_0_TO_8)) {
dev_err(arizona->dev, "Failed to get valid MICDET value\n");
- mutex_unlock(&info->lock);
- return;
+ return -EINVAL;
}
+ return val;
+}
+
+static int arizona_micdet_reading(void *priv)
+{
+ struct arizona_extcon_info *info = priv;
+ struct arizona *arizona = info->arizona;
+ int ret, val;
+
+ if (info->detecting && arizona->pdata.micd_software_compare)
+ ret = arizona_micd_adc_read(info);
+ else
+ ret = arizona_micd_read(info);
+ if (ret < 0)
+ return ret;
+
+ val = ret;
+
/* Due to jack detect this should never happen */
if (!(val & ARIZONA_MICD_STS)) {
dev_warn(arizona->dev, "Detected open circuit\n");
info->mic = false;
- arizona_stop_mic(info);
info->detecting = false;
arizona_identify_headphone(info);
- goto handled;
+ return 0;
}
/* If we got a high impedence we should have a headset, report it. */
- if (info->detecting && (val & ARIZONA_MICD_LVL_8)) {
+ if (val & ARIZONA_MICD_LVL_8) {
info->mic = true;
info->detecting = false;
@@ -923,7 +916,7 @@ static void arizona_micd_detect(struct work_struct *work)
ret);
}
- goto handled;
+ return 0;
}
/* If we detected a lower impedence during initial startup
@@ -932,15 +925,13 @@ static void arizona_micd_detect(struct work_struct *work)
* plain headphones. If both polarities report a low
* impedence then give up and report headphones.
*/
- if (info->detecting && (val & MICD_LVL_1_TO_7)) {
+ if (val & MICD_LVL_1_TO_7) {
if (info->jack_flips >= info->micd_num_modes * 10) {
dev_dbg(arizona->dev, "Detected HP/line\n");
info->detecting = false;
arizona_identify_headphone(info);
-
- arizona_stop_mic(info);
} else {
info->micd_mode++;
if (info->micd_mode == info->micd_num_modes)
@@ -948,13 +939,45 @@ static void arizona_micd_detect(struct work_struct *work)
arizona_extcon_set_mode(info, info->micd_mode);
info->jack_flips++;
+
+ if (arizona->pdata.micd_software_compare)
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_MIC_DETECT_1,
+ ARIZONA_MICD_ENA,
+ ARIZONA_MICD_ENA);
+
+ queue_delayed_work(system_power_efficient_wq,
+ &info->micd_timeout_work,
+ msecs_to_jiffies(arizona->pdata.micd_timeout));
}
- goto handled;
+ return 0;
}
/*
* If we're still detecting and we detect a short then we've
+ * got a headphone.
+ */
+ dev_dbg(arizona->dev, "Headphone detected\n");
+ info->detecting = false;
+
+ arizona_identify_headphone(info);
+
+ return 0;
+}
+
+static int arizona_button_reading(void *priv)
+{
+ struct arizona_extcon_info *info = priv;
+ struct arizona *arizona = info->arizona;
+ int val, key, lvl, i;
+
+ val = arizona_micd_read(info);
+ if (val < 0)
+ return val;
+
+ /*
+ * If we're still detecting and we detect a short then we've
* got a headphone. Otherwise it's a button press.
*/
if (val & MICD_LVL_0_TO_7) {
@@ -968,20 +991,13 @@ static void arizona_micd_detect(struct work_struct *work)
input_report_key(info->input,
info->micd_ranges[i].key, 0);
- WARN_ON(!lvl);
- WARN_ON(ffs(lvl) - 1 >= info->num_micd_ranges);
if (lvl && ffs(lvl) - 1 < info->num_micd_ranges) {
key = info->micd_ranges[ffs(lvl) - 1].key;
input_report_key(info->input, key, 1);
input_sync(info->input);
+ } else {
+ dev_err(arizona->dev, "Button out of range\n");
}
-
- } else if (info->detecting) {
- dev_dbg(arizona->dev, "Headphone detected\n");
- info->detecting = false;
- arizona_stop_mic(info);
-
- arizona_identify_headphone(info);
} else {
dev_warn(arizona->dev, "Button with no mic: %x\n",
val);
@@ -995,19 +1011,39 @@ static void arizona_micd_detect(struct work_struct *work)
arizona_extcon_pulse_micbias(info);
}
-handled:
- if (info->detecting) {
- if (arizona->pdata.micd_software_compare)
- regmap_update_bits(arizona->regmap,
- ARIZONA_MIC_DETECT_1,
- ARIZONA_MICD_ENA,
- ARIZONA_MICD_ENA);
+ return 0;
+}
- queue_delayed_work(system_power_efficient_wq,
- &info->micd_timeout_work,
- msecs_to_jiffies(info->micd_timeout));
+static void arizona_micd_detect(struct work_struct *work)
+{
+ struct arizona_extcon_info *info = container_of(work,
+ struct arizona_extcon_info,
+ micd_detect_work.work);
+ struct arizona *arizona = info->arizona;
+ int ret;
+
+ cancel_delayed_work_sync(&info->micd_timeout_work);
+
+ mutex_lock(&info->lock);
+
+ /* If the cable was removed while measuring ignore the result */
+ ret = extcon_get_state(info->edev, EXTCON_MECHANICAL);
+ if (ret < 0) {
+ dev_err(arizona->dev, "Failed to check cable state: %d\n",
+ ret);
+ mutex_unlock(&info->lock);
+ return;
+ } else if (!ret) {
+ dev_dbg(arizona->dev, "Ignoring MICDET for removed cable\n");
+ mutex_unlock(&info->lock);
+ return;
}
+ if (info->detecting)
+ arizona_micdet_reading(info);
+ else
+ arizona_button_reading(info);
+
pm_runtime_mark_last_busy(info->dev);
mutex_unlock(&info->lock);
}
@@ -1125,7 +1161,7 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
msecs_to_jiffies(HPDET_DEBOUNCE));
if (cancelled_mic) {
- int micd_timeout = info->micd_timeout;
+ int micd_timeout = arizona->pdata.micd_timeout;
queue_delayed_work(system_power_efficient_wq,
&info->micd_timeout_work,
@@ -1145,11 +1181,11 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
dev_err(arizona->dev, "Mechanical report failed: %d\n",
ret);
- if (!arizona->pdata.hpdet_acc_id) {
- info->detecting = true;
- info->mic = false;
- info->jack_flips = 0;
+ info->detecting = true;
+ info->mic = false;
+ info->jack_flips = 0;
+ if (!arizona->pdata.hpdet_acc_id) {
arizona_start_mic(info);
} else {
queue_delayed_work(system_power_efficient_wq,
@@ -1202,11 +1238,6 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
ARIZONA_MICD_CLAMP_DB | ARIZONA_JD1_DB);
}
- if (arizona->pdata.micd_timeout)
- info->micd_timeout = arizona->pdata.micd_timeout;
- else
- info->micd_timeout = DEFAULT_MICD_TIMEOUT;
-
out:
/* Clear trig_sts to make sure DCVDD is not forced up */
regmap_write(arizona->regmap, ARIZONA_AOD_WKUP_AND_TRIG,
@@ -1435,6 +1466,9 @@ static int arizona_extcon_probe(struct platform_device *pdev)
info->input->name = "Headset";
info->input->phys = "arizona/extcon";
+ if (!pdata->micd_timeout)
+ pdata->micd_timeout = DEFAULT_MICD_TIMEOUT;
+
if (pdata->num_micd_configs) {
info->micd_modes = pdata->micd_configs;
info->micd_num_modes = pdata->num_micd_configs;
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
index bcf65aaca5d2..106d4da647bd 100644
--- a/drivers/extcon/extcon-sm5502.c
+++ b/drivers/extcon/extcon-sm5502.c
@@ -249,7 +249,7 @@ static int sm5502_muic_set_path(struct sm5502_muic_info *info,
dev_err(info->dev, "Unknown DM_CON/DP_CON switch type (%d)\n",
con_sw);
return -EINVAL;
- };
+ }
switch (vbus_sw) {
case VBUSIN_SWITCH_OPEN:
@@ -268,7 +268,7 @@ static int sm5502_muic_set_path(struct sm5502_muic_info *info,
default:
dev_err(info->dev, "Unknown VBUS switch type (%d)\n", vbus_sw);
return -EINVAL;
- };
+ }
return 0;
}
@@ -357,13 +357,13 @@ static unsigned int sm5502_muic_get_cable_type(struct sm5502_muic_info *info)
"cannot identify the cable type: adc(0x%x)\n",
adc);
return -EINVAL;
- };
+ }
break;
default:
dev_err(info->dev,
"failed to identify the cable type: adc(0x%x)\n", adc);
return -EINVAL;
- };
+ }
return cable_type;
}
@@ -405,7 +405,7 @@ static int sm5502_muic_cable_handler(struct sm5502_muic_info *info,
dev_dbg(info->dev,
"cannot handle this cable_type (0x%x)\n", cable_type);
return 0;
- };
+ }
/* Change internal hardware path(DM_CON/DP_CON, VBUSIN) */
ret = sm5502_muic_set_path(info, con_sw, vbus_sw, attached);
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index 0cc746673677..6ca2f5ab6c57 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -551,7 +551,7 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused)
INIT_LIST_HEAD(&lynx->client_list);
kref_init(&lynx->kref);
- lynx->registers = ioremap_nocache(pci_resource_start(dev, 0),
+ lynx->registers = ioremap(pci_resource_start(dev, 0),
PCILYNX_MAX_REGISTER);
if (lynx->registers == NULL) {
dev_err(&dev->dev, "Failed to map registers\n");
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index e40a77bfe821..ea869addc89b 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -239,14 +239,6 @@ config QCOM_SCM
depends on ARM || ARM64
select RESET_CONTROLLER
-config QCOM_SCM_32
- def_bool y
- depends on QCOM_SCM && ARM
-
-config QCOM_SCM_64
- def_bool y
- depends on QCOM_SCM && ARM64
-
config QCOM_SCM_DOWNLOAD_MODE_DEFAULT
bool "Qualcomm download mode enabled by default"
depends on QCOM_SCM
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 3fcb91975bdc..e9fb838af4df 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -17,10 +17,7 @@ obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o
obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
obj-$(CONFIG_RASPBERRYPI_FIRMWARE) += raspberrypi.o
obj-$(CONFIG_FW_CFG_SYSFS) += qemu_fw_cfg.o
-obj-$(CONFIG_QCOM_SCM) += qcom_scm.o
-obj-$(CONFIG_QCOM_SCM_64) += qcom_scm-64.o
-obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o
-CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch armv7-a\n.arch_extension sec,-DREQUIRES_SEC=1) -march=armv7-a
+obj-$(CONFIG_QCOM_SCM) += qcom_scm.o qcom_scm-smc.o qcom_scm-legacy.o
obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o
obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o
obj-$(CONFIG_TURRIS_MOX_RWTM) += turris-mox-rwtm.o
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index 7a30952b463d..db55c43a2cbd 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -28,8 +28,12 @@ scmi_dev_match_id(struct scmi_device *scmi_dev, struct scmi_driver *scmi_drv)
return NULL;
for (; id->protocol_id; id++)
- if (id->protocol_id == scmi_dev->protocol_id)
- return id;
+ if (id->protocol_id == scmi_dev->protocol_id) {
+ if (!id->name)
+ return id;
+ else if (!strcmp(id->name, scmi_dev->name))
+ return id;
+ }
return NULL;
}
@@ -56,6 +60,11 @@ static int scmi_protocol_init(int protocol_id, struct scmi_handle *handle)
return fn(handle);
}
+static int scmi_protocol_dummy_init(struct scmi_handle *handle)
+{
+ return 0;
+}
+
static int scmi_dev_probe(struct device *dev)
{
struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver);
@@ -74,6 +83,10 @@ static int scmi_dev_probe(struct device *dev)
if (ret)
return ret;
+ /* Skip protocol initialisation for additional devices */
+ idr_replace(&scmi_protocols, &scmi_protocol_dummy_init,
+ scmi_dev->protocol_id);
+
return scmi_drv->probe(scmi_dev);
}
@@ -125,7 +138,8 @@ static void scmi_device_release(struct device *dev)
}
struct scmi_device *
-scmi_device_create(struct device_node *np, struct device *parent, int protocol)
+scmi_device_create(struct device_node *np, struct device *parent, int protocol,
+ const char *name)
{
int id, retval;
struct scmi_device *scmi_dev;
@@ -134,8 +148,15 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol)
if (!scmi_dev)
return NULL;
+ scmi_dev->name = kstrdup_const(name ?: "unknown", GFP_KERNEL);
+ if (!scmi_dev->name) {
+ kfree(scmi_dev);
+ return NULL;
+ }
+
id = ida_simple_get(&scmi_bus_id, 1, 0, GFP_KERNEL);
if (id < 0) {
+ kfree_const(scmi_dev->name);
kfree(scmi_dev);
return NULL;
}
@@ -154,6 +175,7 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol)
return scmi_dev;
put_dev:
+ kfree_const(scmi_dev->name);
put_device(&scmi_dev->dev);
ida_simple_remove(&scmi_bus_id, id);
return NULL;
@@ -161,6 +183,7 @@ put_dev:
void scmi_device_destroy(struct scmi_device *scmi_dev)
{
+ kfree_const(scmi_dev->name);
scmi_handle_put(scmi_dev->handle);
ida_simple_remove(&scmi_bus_id, scmi_dev->id);
device_unregister(&scmi_dev->dev);
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index 32526a793f3a..4c2227662b26 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -65,6 +65,7 @@ struct scmi_clock_set_rate {
};
struct clock_info {
+ u32 version;
int num_clocks;
int max_async_req;
atomic_t cur_async_req;
@@ -340,6 +341,7 @@ static int scmi_clock_protocol_init(struct scmi_handle *handle)
scmi_clock_describe_rates_get(handle, clkid, clk);
}
+ cinfo->version = version;
handle->clk_ops = &clk_ops;
handle->clk_priv = cinfo;
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index 5237c2ff79fe..df35358ff324 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -81,6 +81,7 @@ struct scmi_msg {
/**
* struct scmi_xfer - Structure representing a message flow
*
+ * @transfer_id: Unique ID for debug & profiling purpose
* @hdr: Transmit message header
* @tx: Transmit message
* @rx: Receive message, the buffer should be pre-allocated to store
@@ -90,6 +91,7 @@ struct scmi_msg {
* @async: pointer to delayed response message received event completion
*/
struct scmi_xfer {
+ int transfer_id;
struct scmi_msg_hdr hdr;
struct scmi_msg tx;
struct scmi_msg rx;
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 3eb0382491ce..2c96f6b5a7d8 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -29,6 +29,9 @@
#include "common.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/scmi.h>
+
#define MSG_ID_MASK GENMASK(7, 0)
#define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr))
#define MSG_TYPE_MASK GENMASK(9, 8)
@@ -61,6 +64,8 @@ enum scmi_error_codes {
static LIST_HEAD(scmi_list);
/* Protection for the entire list */
static DEFINE_MUTEX(scmi_list_mutex);
+/* Track the unique id for the transfers for debug & profiling purpose */
+static atomic_t transfer_last_id;
/**
* struct scmi_xfers_info - Structure to manage transfer information
@@ -304,6 +309,7 @@ static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
xfer = &minfo->xfer_block[xfer_id];
xfer->hdr.seq = xfer_id;
reinit_completion(&xfer->done);
+ xfer->transfer_id = atomic_inc_return(&transfer_last_id);
return xfer;
}
@@ -374,6 +380,10 @@ static void scmi_rx_callback(struct mbox_client *cl, void *m)
scmi_fetch_response(xfer, mem);
+ trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
+ xfer->hdr.protocol_id, xfer->hdr.seq,
+ msg_type);
+
if (msg_type == MSG_TYPE_DELAYED_RESP)
complete(xfer->async_done);
else
@@ -439,6 +449,10 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
if (unlikely(!cinfo))
return -EINVAL;
+ trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
+ xfer->hdr.protocol_id, xfer->hdr.seq,
+ xfer->hdr.poll_completion);
+
ret = mbox_send_message(cinfo->chan, xfer);
if (ret < 0) {
dev_dbg(dev, "mbox send fail %d\n", ret);
@@ -478,6 +492,10 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
*/
mbox_client_txdone(cinfo->chan, ret);
+ trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
+ xfer->hdr.protocol_id, xfer->hdr.seq,
+ xfer->hdr.status);
+
return ret;
}
@@ -735,6 +753,11 @@ static int scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev,
idx = tx ? 0 : 1;
idr = tx ? &info->tx_idr : &info->rx_idr;
+ /* check if already allocated, used for multiple device per protocol */
+ cinfo = idr_find(idr, prot_id);
+ if (cinfo)
+ return 0;
+
if (scmi_mailbox_check(np, idx)) {
cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
@@ -803,11 +826,11 @@ scmi_mbox_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
static inline void
scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
- int prot_id)
+ int prot_id, const char *name)
{
struct scmi_device *sdev;
- sdev = scmi_device_create(np, info->dev, prot_id);
+ sdev = scmi_device_create(np, info->dev, prot_id, name);
if (!sdev) {
dev_err(info->dev, "failed to create %d protocol device\n",
prot_id);
@@ -824,6 +847,40 @@ scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
scmi_set_handle(sdev);
}
+#define MAX_SCMI_DEV_PER_PROTOCOL 2
+struct scmi_prot_devnames {
+ int protocol_id;
+ char *names[MAX_SCMI_DEV_PER_PROTOCOL];
+};
+
+static struct scmi_prot_devnames devnames[] = {
+ { SCMI_PROTOCOL_POWER, { "genpd" },},
+ { SCMI_PROTOCOL_PERF, { "cpufreq" },},
+ { SCMI_PROTOCOL_CLOCK, { "clocks" },},
+ { SCMI_PROTOCOL_SENSOR, { "hwmon" },},
+ { SCMI_PROTOCOL_RESET, { "reset" },},
+};
+
+static inline void
+scmi_create_protocol_devices(struct device_node *np, struct scmi_info *info,
+ int prot_id)
+{
+ int loop, cnt;
+
+ for (loop = 0; loop < ARRAY_SIZE(devnames); loop++) {
+ if (devnames[loop].protocol_id != prot_id)
+ continue;
+
+ for (cnt = 0; cnt < ARRAY_SIZE(devnames[loop].names); cnt++) {
+ const char *name = devnames[loop].names[cnt];
+
+ if (name)
+ scmi_create_protocol_device(np, info, prot_id,
+ name);
+ }
+ }
+}
+
static int scmi_probe(struct platform_device *pdev)
{
int ret;
@@ -892,7 +949,7 @@ static int scmi_probe(struct platform_device *pdev)
continue;
}
- scmi_create_protocol_device(child, info, prot_id);
+ scmi_create_protocol_devices(child, info, prot_id);
}
return 0;
@@ -940,6 +997,52 @@ static int scmi_remove(struct platform_device *pdev)
return ret;
}
+static ssize_t protocol_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scmi_info *info = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u.%u\n", info->version.major_ver,
+ info->version.minor_ver);
+}
+static DEVICE_ATTR_RO(protocol_version);
+
+static ssize_t firmware_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scmi_info *info = dev_get_drvdata(dev);
+
+ return sprintf(buf, "0x%x\n", info->version.impl_ver);
+}
+static DEVICE_ATTR_RO(firmware_version);
+
+static ssize_t vendor_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scmi_info *info = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", info->version.vendor_id);
+}
+static DEVICE_ATTR_RO(vendor_id);
+
+static ssize_t sub_vendor_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scmi_info *info = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", info->version.sub_vendor_id);
+}
+static DEVICE_ATTR_RO(sub_vendor_id);
+
+static struct attribute *versions_attrs[] = {
+ &dev_attr_firmware_version.attr,
+ &dev_attr_protocol_version.attr,
+ &dev_attr_vendor_id.attr,
+ &dev_attr_sub_vendor_id.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(versions);
+
static const struct scmi_desc scmi_generic_desc = {
.max_rx_timeout_ms = 30, /* We may increase this if required */
.max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */
@@ -958,6 +1061,7 @@ static struct platform_driver scmi_driver = {
.driver = {
.name = "arm-scmi",
.of_match_table = scmi_of_match,
+ .dev_groups = versions_groups,
},
.probe = scmi_probe,
.remove = scmi_remove,
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index 601af4edad5e..ec81e6f7e7a4 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -145,6 +145,7 @@ struct perf_dom_info {
};
struct scmi_perf_info {
+ u32 version;
int num_domains;
bool power_scale_mw;
u64 stats_addr;
@@ -736,6 +737,7 @@ static int scmi_perf_protocol_init(struct scmi_handle *handle)
scmi_perf_domain_init_fc(handle, domain, &dom->fc_info);
}
+ pinfo->version = version;
handle->perf_ops = &perf_ops;
handle->perf_priv = pinfo;
diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c
index 5abef7079c0a..214886ce84f1 100644
--- a/drivers/firmware/arm_scmi/power.c
+++ b/drivers/firmware/arm_scmi/power.c
@@ -50,6 +50,7 @@ struct power_dom_info {
};
struct scmi_power_info {
+ u32 version;
int num_domains;
u64 stats_addr;
u32 stats_size;
@@ -207,6 +208,7 @@ static int scmi_power_protocol_init(struct scmi_handle *handle)
scmi_power_domain_attributes_get(handle, domain, dom);
}
+ pinfo->version = version;
handle->power_ops = &power_ops;
handle->power_priv = pinfo;
diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c
index ab42c21c5517..de73054554f3 100644
--- a/drivers/firmware/arm_scmi/reset.c
+++ b/drivers/firmware/arm_scmi/reset.c
@@ -48,6 +48,7 @@ struct reset_dom_info {
};
struct scmi_reset_info {
+ u32 version;
int num_domains;
struct reset_dom_info *dom_info;
};
@@ -217,6 +218,7 @@ static int scmi_reset_protocol_init(struct scmi_handle *handle)
scmi_reset_domain_attributes_get(handle, domain, dom);
}
+ pinfo->version = version;
handle->reset_ops = &reset_ops;
handle->reset_priv = pinfo;
diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c
index 87f737e01473..bafbfe358f97 100644
--- a/drivers/firmware/arm_scmi/scmi_pm_domain.c
+++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c
@@ -112,7 +112,7 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
}
static const struct scmi_device_id scmi_id_table[] = {
- { SCMI_PROTOCOL_POWER },
+ { SCMI_PROTOCOL_POWER, "genpd" },
{ },
};
MODULE_DEVICE_TABLE(scmi, scmi_id_table);
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
index a400ea805fc2..eba61b9c1f53 100644
--- a/drivers/firmware/arm_scmi/sensors.c
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -68,6 +68,7 @@ struct scmi_msg_sensor_reading_get {
};
struct sensors_info {
+ u32 version;
int num_sensors;
int max_requests;
u64 reg_addr;
@@ -294,6 +295,7 @@ static int scmi_sensors_protocol_init(struct scmi_handle *handle)
scmi_sensor_description_get(handle, sinfo);
+ sinfo->version = version;
handle->sensor_ops = &sensor_ops;
handle->sensor_priv = sinfo;
diff --git a/drivers/firmware/broadcom/bcm47xx_nvram.c b/drivers/firmware/broadcom/bcm47xx_nvram.c
index da04fdae62a1..835ece9c00f1 100644
--- a/drivers/firmware/broadcom/bcm47xx_nvram.c
+++ b/drivers/firmware/broadcom/bcm47xx_nvram.c
@@ -120,7 +120,7 @@ int bcm47xx_nvram_init_from_mem(u32 base, u32 lim)
void __iomem *iobase;
int err;
- iobase = ioremap_nocache(base, lim);
+ iobase = ioremap(base, lim);
if (!iobase)
return -ENOMEM;
diff --git a/drivers/firmware/broadcom/tee_bnxt_fw.c b/drivers/firmware/broadcom/tee_bnxt_fw.c
index 5b7ef89eb701..ed10da5313e8 100644
--- a/drivers/firmware/broadcom/tee_bnxt_fw.c
+++ b/drivers/firmware/broadcom/tee_bnxt_fw.c
@@ -215,7 +215,6 @@ static int tee_bnxt_fw_probe(struct device *dev)
fw_shm_pool = tee_shm_alloc(pvt_data.ctx, MAX_SHM_MEM_SZ,
TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
if (IS_ERR(fw_shm_pool)) {
- tee_client_close_context(pvt_data.ctx);
dev_err(pvt_data.dev, "tee_shm_alloc failed\n");
err = PTR_ERR(fw_shm_pool);
goto out_sess;
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index bcc378c19ebe..ecc83e2f032c 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -215,6 +215,28 @@ config EFI_RCI2_TABLE
Say Y here for Dell EMC PowerEdge systems.
+config EFI_DISABLE_PCI_DMA
+ bool "Clear Busmaster bit on PCI bridges during ExitBootServices()"
+ help
+ Disable the busmaster bit in the control register on all PCI bridges
+ while calling ExitBootServices() and passing control to the runtime
+ kernel. System firmware may configure the IOMMU to prevent malicious
+ PCI devices from being able to attack the OS via DMA. However, since
+ firmware can't guarantee that the OS is IOMMU-aware, it will tear
+ down IOMMU configuration when ExitBootServices() is called. This
+ leaves a window between where a hostile device could still cause
+ damage before Linux configures the IOMMU again.
+
+ If you say Y here, the EFI stub will clear the busmaster bit on all
+ PCI bridges before ExitBootServices() is called. This will prevent
+ any malicious PCI devices from being able to perform DMA until the
+ kernel reenables busmastering after configuring the IOMMU.
+
+ This option will cause failures with some poorly behaved hardware
+ and should not be enabled without testing. The kernel commandline
+ options "efi=disable_early_pci_dma" or "efi=no_disable_early_pci_dma"
+ may be used to override this option.
+
endmenu
config UEFI_CPER
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index 904fa09e6a6b..d99f5b0c8a09 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -10,10 +10,12 @@
#define pr_fmt(fmt) "efi: " fmt
#include <linux/efi.h>
+#include <linux/fwnode.h>
#include <linux/init.h>
#include <linux/memblock.h>
#include <linux/mm_types.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_fdt.h>
#include <linux/platform_device.h>
#include <linux/screen_info.h>
@@ -276,15 +278,112 @@ void __init efi_init(void)
efi_memmap_unmap();
}
+static bool efifb_overlaps_pci_range(const struct of_pci_range *range)
+{
+ u64 fb_base = screen_info.lfb_base;
+
+ if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
+ fb_base |= (u64)(unsigned long)screen_info.ext_lfb_base << 32;
+
+ return fb_base >= range->cpu_addr &&
+ fb_base < (range->cpu_addr + range->size);
+}
+
+static struct device_node *find_pci_overlap_node(void)
+{
+ struct device_node *np;
+
+ for_each_node_by_type(np, "pci") {
+ struct of_pci_range_parser parser;
+ struct of_pci_range range;
+ int err;
+
+ err = of_pci_range_parser_init(&parser, np);
+ if (err) {
+ pr_warn("of_pci_range_parser_init() failed: %d\n", err);
+ continue;
+ }
+
+ for_each_of_pci_range(&parser, &range)
+ if (efifb_overlaps_pci_range(&range))
+ return np;
+ }
+ return NULL;
+}
+
+/*
+ * If the efifb framebuffer is backed by a PCI graphics controller, we have
+ * to ensure that this relation is expressed using a device link when
+ * running in DT mode, or the probe order may be reversed, resulting in a
+ * resource reservation conflict on the memory window that the efifb
+ * framebuffer steals from the PCIe host bridge.
+ */
+static int efifb_add_links(const struct fwnode_handle *fwnode,
+ struct device *dev)
+{
+ struct device_node *sup_np;
+ struct device *sup_dev;
+
+ sup_np = find_pci_overlap_node();
+
+ /*
+ * If there's no PCI graphics controller backing the efifb, we are
+ * done here.
+ */
+ if (!sup_np)
+ return 0;
+
+ sup_dev = get_dev_from_fwnode(&sup_np->fwnode);
+ of_node_put(sup_np);
+
+ /*
+ * Return -ENODEV if the PCI graphics controller device hasn't been
+ * registered yet. This ensures that efifb isn't allowed to probe
+ * and this function is retried again when new devices are
+ * registered.
+ */
+ if (!sup_dev)
+ return -ENODEV;
+
+ /*
+ * If this fails, retrying this function at a later point won't
+ * change anything. So, don't return an error after this.
+ */
+ if (!device_link_add(dev, sup_dev, 0))
+ dev_warn(dev, "device_link_add() failed\n");
+
+ put_device(sup_dev);
+
+ return 0;
+}
+
+static const struct fwnode_operations efifb_fwnode_ops = {
+ .add_links = efifb_add_links,
+};
+
+static struct fwnode_handle efifb_fwnode = {
+ .ops = &efifb_fwnode_ops,
+};
+
static int __init register_gop_device(void)
{
- void *pd;
+ struct platform_device *pd;
+ int err;
if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
return 0;
- pd = platform_device_register_data(NULL, "efi-framebuffer", 0,
- &screen_info, sizeof(screen_info));
- return PTR_ERR_OR_ZERO(pd);
+ pd = platform_device_alloc("efi-framebuffer", 0);
+ if (!pd)
+ return -ENOMEM;
+
+ if (IS_ENABLED(CONFIG_PCI))
+ pd->dev.fwnode = &efifb_fwnode;
+
+ err = platform_device_add_data(pd, &screen_info, sizeof(screen_info));
+ if (err)
+ return err;
+
+ return platform_device_add(pd);
}
subsys_initcall(register_gop_device);
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 899b803842bb..9dda2602c862 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -27,7 +27,7 @@
extern u64 efi_system_table;
-#ifdef CONFIG_ARM64_PTDUMP_DEBUGFS
+#if defined(CONFIG_PTDUMP_DEBUGFS) && defined(CONFIG_ARM64)
#include <asm/ptdump.h>
static struct ptdump_info efi_ptdump_info = {
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
index b1395133389e..d3067cbd5114 100644
--- a/drivers/firmware/efi/capsule-loader.c
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/miscdevice.h>
#include <linux/highmem.h>
+#include <linux/io.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/efi.h>
diff --git a/drivers/firmware/efi/earlycon.c b/drivers/firmware/efi/earlycon.c
index c9a0efca17b0..5d4f84781aa0 100644
--- a/drivers/firmware/efi/earlycon.c
+++ b/drivers/firmware/efi/earlycon.c
@@ -13,18 +13,58 @@
#include <asm/early_ioremap.h>
+static const struct console *earlycon_console __initdata;
static const struct font_desc *font;
static u32 efi_x, efi_y;
static u64 fb_base;
-static pgprot_t fb_prot;
+static bool fb_wb;
+static void *efi_fb;
+
+/*
+ * EFI earlycon needs to use early_memremap() to map the framebuffer.
+ * But early_memremap() is not usable for 'earlycon=efifb keep_bootcon',
+ * memremap() should be used instead. memremap() will be available after
+ * paging_init() which is earlier than initcall callbacks. Thus adding this
+ * early initcall function early_efi_map_fb() to map the whole EFI framebuffer.
+ */
+static int __init efi_earlycon_remap_fb(void)
+{
+ /* bail if there is no bootconsole or it has been disabled already */
+ if (!earlycon_console || !(earlycon_console->flags & CON_ENABLED))
+ return 0;
+
+ efi_fb = memremap(fb_base, screen_info.lfb_size,
+ fb_wb ? MEMREMAP_WB : MEMREMAP_WC);
+
+ return efi_fb ? 0 : -ENOMEM;
+}
+early_initcall(efi_earlycon_remap_fb);
+
+static int __init efi_earlycon_unmap_fb(void)
+{
+ /* unmap the bootconsole fb unless keep_bootcon has left it enabled */
+ if (efi_fb && !(earlycon_console->flags & CON_ENABLED))
+ memunmap(efi_fb);
+ return 0;
+}
+late_initcall(efi_earlycon_unmap_fb);
static __ref void *efi_earlycon_map(unsigned long start, unsigned long len)
{
+ pgprot_t fb_prot;
+
+ if (efi_fb)
+ return efi_fb + start;
+
+ fb_prot = fb_wb ? PAGE_KERNEL : pgprot_writecombine(PAGE_KERNEL);
return early_memremap_prot(fb_base + start, len, pgprot_val(fb_prot));
}
static __ref void efi_earlycon_unmap(void *addr, unsigned long len)
{
+ if (efi_fb)
+ return;
+
early_memunmap(addr, len);
}
@@ -176,10 +216,7 @@ static int __init efi_earlycon_setup(struct earlycon_device *device,
if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
fb_base |= (u64)screen_info.ext_lfb_base << 32;
- if (opt && !strcmp(opt, "ram"))
- fb_prot = PAGE_KERNEL;
- else
- fb_prot = pgprot_writecombine(PAGE_KERNEL);
+ fb_wb = opt && !strcmp(opt, "ram");
si = &screen_info;
xres = si->lfb_width;
@@ -201,6 +238,7 @@ static int __init efi_earlycon_setup(struct earlycon_device *device,
efi_earlycon_scroll_up();
device->con->write = efi_earlycon_write;
+ earlycon_console = device->con;
return 0;
}
EARLYCON_DECLARE(efifb, efi_earlycon_setup);
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 407816da9fcb..621220ab3d0e 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -908,7 +908,7 @@ u64 efi_mem_attributes(unsigned long phys_addr)
*
* Search in the EFI memory map for the region covering @phys_addr.
* Returns the EFI memory type if the region was found in the memory
- * map, EFI_RESERVED_TYPE (zero) otherwise.
+ * map, -EINVAL otherwise.
*/
int efi_mem_type(unsigned long phys_addr)
{
@@ -979,6 +979,24 @@ static int __init efi_memreserve_map_root(void)
return 0;
}
+static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
+{
+ struct resource *res, *parent;
+
+ res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
+ if (!res)
+ return -ENOMEM;
+
+ res->name = "reserved";
+ res->flags = IORESOURCE_MEM;
+ res->start = addr;
+ res->end = addr + size - 1;
+
+ /* we expect a conflict with a 'System RAM' region */
+ parent = request_resource_conflict(&iomem_resource, res);
+ return parent ? request_resource(parent, res) : 0;
+}
+
int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
{
struct linux_efi_memreserve *rsv;
@@ -1003,7 +1021,7 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
rsv->entry[index].size = size;
memunmap(rsv);
- return 0;
+ return efi_mem_reserve_iomem(addr, size);
}
memunmap(rsv);
}
@@ -1013,6 +1031,12 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
if (!rsv)
return -ENOMEM;
+ rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K);
+ if (rc) {
+ free_page((unsigned long)rsv);
+ return rc;
+ }
+
/*
* The memremap() call above assumes that a linux_efi_memreserve entry
* never crosses a page boundary, so let's ensure that this remains true
@@ -1029,7 +1053,7 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
efi_memreserve_root->next = __pa(rsv);
spin_unlock(&efi_mem_reserve_persistent_lock);
- return 0;
+ return efi_mem_reserve_iomem(addr, size);
}
static int __init efi_memreserve_root_init(void)
diff --git a/drivers/firmware/efi/fake_mem.c b/drivers/firmware/efi/fake_mem.c
index bb9fc70d0cfa..6e0f34a38171 100644
--- a/drivers/firmware/efi/fake_mem.c
+++ b/drivers/firmware/efi/fake_mem.c
@@ -34,46 +34,45 @@ static int __init cmp_fake_mem(const void *x1, const void *x2)
return 0;
}
-void __init efi_fake_memmap(void)
+static void __init efi_fake_range(struct efi_mem_range *efi_range)
{
+ struct efi_memory_map_data data = { 0 };
int new_nr_map = efi.memmap.nr_map;
efi_memory_desc_t *md;
- phys_addr_t new_memmap_phy;
void *new_memmap;
- int i;
-
- if (!efi_enabled(EFI_MEMMAP) || !nr_fake_mem)
- return;
/* count up the number of EFI memory descriptor */
- for (i = 0; i < nr_fake_mem; i++) {
- for_each_efi_memory_desc(md) {
- struct range *r = &efi_fake_mems[i].range;
-
- new_nr_map += efi_memmap_split_count(md, r);
- }
- }
+ for_each_efi_memory_desc(md)
+ new_nr_map += efi_memmap_split_count(md, &efi_range->range);
/* allocate memory for new EFI memmap */
- new_memmap_phy = efi_memmap_alloc(new_nr_map);
- if (!new_memmap_phy)
+ if (efi_memmap_alloc(new_nr_map, &data) != 0)
return;
/* create new EFI memmap */
- new_memmap = early_memremap(new_memmap_phy,
- efi.memmap.desc_size * new_nr_map);
+ new_memmap = early_memremap(data.phys_map, data.size);
if (!new_memmap) {
- memblock_free(new_memmap_phy, efi.memmap.desc_size * new_nr_map);
+ __efi_memmap_free(data.phys_map, data.size, data.flags);
return;
}
- for (i = 0; i < nr_fake_mem; i++)
- efi_memmap_insert(&efi.memmap, new_memmap, &efi_fake_mems[i]);
+ efi_memmap_insert(&efi.memmap, new_memmap, efi_range);
/* swap into new EFI memmap */
- early_memunmap(new_memmap, efi.memmap.desc_size * new_nr_map);
+ early_memunmap(new_memmap, data.size);
+
+ efi_memmap_install(&data);
+}
+
+void __init efi_fake_memmap(void)
+{
+ int i;
- efi_memmap_install(new_memmap_phy, new_nr_map);
+ if (!efi_enabled(EFI_MEMMAP) || !nr_fake_mem)
+ return;
+
+ for (i = 0; i < nr_fake_mem; i++)
+ efi_fake_range(&efi_fake_mems[i]);
/* print new EFI memmap */
efi_print_memmap();
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index c35f893897e1..98a81576213d 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -39,7 +39,7 @@ OBJECT_FILES_NON_STANDARD := y
KCOV_INSTRUMENT := n
lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o \
- random.o
+ random.o pci.o
# include the stub's generic dependencies from lib/ when building for ARM/arm64
arm-deps-y := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 817237ce2420..7bbef4a67350 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -37,16 +37,14 @@
static u64 virtmap_base = EFI_RT_VIRTUAL_BASE;
-void efi_char16_printk(efi_system_table_t *sys_table_arg,
- efi_char16_t *str)
-{
- struct efi_simple_text_output_protocol *out;
+static efi_system_table_t *__efistub_global sys_table;
- out = (struct efi_simple_text_output_protocol *)sys_table_arg->con_out;
- out->output_string(out, str);
+__pure efi_system_table_t *efi_system_table(void)
+{
+ return sys_table;
}
-static struct screen_info *setup_graphics(efi_system_table_t *sys_table_arg)
+static struct screen_info *setup_graphics(void)
{
efi_guid_t gop_proto = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID;
efi_status_t status;
@@ -55,27 +53,27 @@ static struct screen_info *setup_graphics(efi_system_table_t *sys_table_arg)
struct screen_info *si = NULL;
size = 0;
- status = efi_call_early(locate_handle, EFI_LOCATE_BY_PROTOCOL,
- &gop_proto, NULL, &size, gop_handle);
+ status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
+ &gop_proto, NULL, &size, gop_handle);
if (status == EFI_BUFFER_TOO_SMALL) {
- si = alloc_screen_info(sys_table_arg);
+ si = alloc_screen_info();
if (!si)
return NULL;
- efi_setup_gop(sys_table_arg, si, &gop_proto, size);
+ efi_setup_gop(si, &gop_proto, size);
}
return si;
}
-void install_memreserve_table(efi_system_table_t *sys_table_arg)
+void install_memreserve_table(void)
{
struct linux_efi_memreserve *rsv;
efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID;
efi_status_t status;
- status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv),
- (void **)&rsv);
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv),
+ (void **)&rsv);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table_arg, "Failed to allocate memreserve entry!\n");
+ pr_efi_err("Failed to allocate memreserve entry!\n");
return;
}
@@ -83,11 +81,10 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg)
rsv->size = 0;
atomic_set(&rsv->count, 0);
- status = efi_call_early(install_configuration_table,
- &memreserve_table_guid,
- rsv);
+ status = efi_bs_call(install_configuration_table,
+ &memreserve_table_guid, rsv);
if (status != EFI_SUCCESS)
- pr_efi_err(sys_table_arg, "Failed to install memreserve config table!\n");
+ pr_efi_err("Failed to install memreserve config table!\n");
}
@@ -97,8 +94,7 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg)
* must be reserved. On failure it is required to free all
* all allocations it has made.
*/
-efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
- unsigned long *image_addr,
+efi_status_t handle_kernel_image(unsigned long *image_addr,
unsigned long *image_size,
unsigned long *reserve_addr,
unsigned long *reserve_size,
@@ -110,7 +106,7 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
* for both archictectures, with the arch-specific code provided in the
* handle_kernel_image() function.
*/
-unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
+unsigned long efi_entry(void *handle, efi_system_table_t *sys_table_arg,
unsigned long *image_addr)
{
efi_loaded_image_t *image;
@@ -131,11 +127,13 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
enum efi_secureboot_mode secure_boot;
struct screen_info *si;
+ sys_table = sys_table_arg;
+
/* Check if we were booted by the EFI firmware */
if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
goto fail;
- status = check_platform_features(sys_table);
+ status = check_platform_features();
if (status != EFI_SUCCESS)
goto fail;
@@ -147,13 +145,13 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
status = sys_table->boottime->handle_protocol(handle,
&loaded_image_proto, (void *)&image);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table, "Failed to get loaded image protocol\n");
+ pr_efi_err("Failed to get loaded image protocol\n");
goto fail;
}
- dram_base = get_dram_base(sys_table);
+ dram_base = get_dram_base();
if (dram_base == EFI_ERROR) {
- pr_efi_err(sys_table, "Failed to find DRAM base\n");
+ pr_efi_err("Failed to find DRAM base\n");
goto fail;
}
@@ -162,9 +160,9 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
* protocol. We are going to copy the command line into the
* device tree, so this can be allocated anywhere.
*/
- cmdline_ptr = efi_convert_cmdline(sys_table, image, &cmdline_size);
+ cmdline_ptr = efi_convert_cmdline(image, &cmdline_size);
if (!cmdline_ptr) {
- pr_efi_err(sys_table, "getting command line via LOADED_IMAGE_PROTOCOL\n");
+ pr_efi_err("getting command line via LOADED_IMAGE_PROTOCOL\n");
goto fail;
}
@@ -176,25 +174,25 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && cmdline_size > 0)
efi_parse_options(cmdline_ptr);
- pr_efi(sys_table, "Booting Linux Kernel...\n");
+ pr_efi("Booting Linux Kernel...\n");
- si = setup_graphics(sys_table);
+ si = setup_graphics();
- status = handle_kernel_image(sys_table, image_addr, &image_size,
+ status = handle_kernel_image(image_addr, &image_size,
&reserve_addr,
&reserve_size,
dram_base, image);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table, "Failed to relocate kernel\n");
+ pr_efi_err("Failed to relocate kernel\n");
goto fail_free_cmdline;
}
- efi_retrieve_tpm2_eventlog(sys_table);
+ efi_retrieve_tpm2_eventlog();
/* Ask the firmware to clear memory on unclean shutdown */
- efi_enable_reset_attack_mitigation(sys_table);
+ efi_enable_reset_attack_mitigation();
- secure_boot = efi_get_secureboot(sys_table);
+ secure_boot = efi_get_secureboot();
/*
* Unauthenticated device tree data is a security hazard, so ignore
@@ -204,39 +202,38 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
if (!IS_ENABLED(CONFIG_EFI_ARMSTUB_DTB_LOADER) ||
secure_boot != efi_secureboot_mode_disabled) {
if (strstr(cmdline_ptr, "dtb="))
- pr_efi(sys_table, "Ignoring DTB from command line.\n");
+ pr_efi("Ignoring DTB from command line.\n");
} else {
- status = handle_cmdline_files(sys_table, image, cmdline_ptr,
- "dtb=",
+ status = handle_cmdline_files(image, cmdline_ptr, "dtb=",
~0UL, &fdt_addr, &fdt_size);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table, "Failed to load device tree!\n");
+ pr_efi_err("Failed to load device tree!\n");
goto fail_free_image;
}
}
if (fdt_addr) {
- pr_efi(sys_table, "Using DTB from command line\n");
+ pr_efi("Using DTB from command line\n");
} else {
/* Look for a device tree configuration table entry. */
- fdt_addr = (uintptr_t)get_fdt(sys_table, &fdt_size);
+ fdt_addr = (uintptr_t)get_fdt(&fdt_size);
if (fdt_addr)
- pr_efi(sys_table, "Using DTB from configuration table\n");
+ pr_efi("Using DTB from configuration table\n");
}
if (!fdt_addr)
- pr_efi(sys_table, "Generating empty DTB\n");
+ pr_efi("Generating empty DTB\n");
- status = handle_cmdline_files(sys_table, image, cmdline_ptr, "initrd=",
+ status = handle_cmdline_files(image, cmdline_ptr, "initrd=",
efi_get_max_initrd_addr(dram_base,
*image_addr),
(unsigned long *)&initrd_addr,
(unsigned long *)&initrd_size);
if (status != EFI_SUCCESS)
- pr_efi_err(sys_table, "Failed initrd from command line!\n");
+ pr_efi_err("Failed initrd from command line!\n");
- efi_random_get_seed(sys_table);
+ efi_random_get_seed();
/* hibernation expects the runtime regions to stay in the same place */
if (!IS_ENABLED(CONFIG_HIBERNATION) && !nokaslr()) {
@@ -251,18 +248,17 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
EFI_RT_VIRTUAL_SIZE;
u32 rnd;
- status = efi_get_random_bytes(sys_table, sizeof(rnd),
- (u8 *)&rnd);
+ status = efi_get_random_bytes(sizeof(rnd), (u8 *)&rnd);
if (status == EFI_SUCCESS) {
virtmap_base = EFI_RT_VIRTUAL_BASE +
(((headroom >> 21) * rnd) >> (32 - 21));
}
}
- install_memreserve_table(sys_table);
+ install_memreserve_table();
new_fdt_addr = fdt_addr;
- status = allocate_new_fdt_and_exit_boot(sys_table, handle,
+ status = allocate_new_fdt_and_exit_boot(handle,
&new_fdt_addr, efi_get_max_fdt_addr(dram_base),
initrd_addr, initrd_size, cmdline_ptr,
fdt_addr, fdt_size);
@@ -275,17 +271,17 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
if (status == EFI_SUCCESS)
return new_fdt_addr;
- pr_efi_err(sys_table, "Failed to update FDT and exit boot services\n");
+ pr_efi_err("Failed to update FDT and exit boot services\n");
- efi_free(sys_table, initrd_size, initrd_addr);
- efi_free(sys_table, fdt_size, fdt_addr);
+ efi_free(initrd_size, initrd_addr);
+ efi_free(fdt_size, fdt_addr);
fail_free_image:
- efi_free(sys_table, image_size, *image_addr);
- efi_free(sys_table, reserve_size, reserve_addr);
+ efi_free(image_size, *image_addr);
+ efi_free(reserve_size, reserve_addr);
fail_free_cmdline:
- free_screen_info(sys_table, si);
- efi_free(sys_table, cmdline_size, (unsigned long)cmdline_ptr);
+ free_screen_info(si);
+ efi_free(cmdline_size, (unsigned long)cmdline_ptr);
fail:
return EFI_ERROR;
}
diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c
index 4566640de650..7b2a6382b647 100644
--- a/drivers/firmware/efi/libstub/arm32-stub.c
+++ b/drivers/firmware/efi/libstub/arm32-stub.c
@@ -7,7 +7,7 @@
#include "efistub.h"
-efi_status_t check_platform_features(efi_system_table_t *sys_table_arg)
+efi_status_t check_platform_features(void)
{
int block;
@@ -18,7 +18,7 @@ efi_status_t check_platform_features(efi_system_table_t *sys_table_arg)
/* LPAE kernels need compatible hardware */
block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
if (block < 5) {
- pr_efi_err(sys_table_arg, "This LPAE kernel is not supported by your CPU\n");
+ pr_efi_err("This LPAE kernel is not supported by your CPU\n");
return EFI_UNSUPPORTED;
}
return EFI_SUCCESS;
@@ -26,7 +26,7 @@ efi_status_t check_platform_features(efi_system_table_t *sys_table_arg)
static efi_guid_t screen_info_guid = LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID;
-struct screen_info *alloc_screen_info(efi_system_table_t *sys_table_arg)
+struct screen_info *alloc_screen_info(void)
{
struct screen_info *si;
efi_status_t status;
@@ -37,32 +37,31 @@ struct screen_info *alloc_screen_info(efi_system_table_t *sys_table_arg)
* its contents while we hand over to the kernel proper from the
* decompressor.
*/
- status = efi_call_early(allocate_pool, EFI_RUNTIME_SERVICES_DATA,
- sizeof(*si), (void **)&si);
+ status = efi_bs_call(allocate_pool, EFI_RUNTIME_SERVICES_DATA,
+ sizeof(*si), (void **)&si);
if (status != EFI_SUCCESS)
return NULL;
- status = efi_call_early(install_configuration_table,
- &screen_info_guid, si);
+ status = efi_bs_call(install_configuration_table,
+ &screen_info_guid, si);
if (status == EFI_SUCCESS)
return si;
- efi_call_early(free_pool, si);
+ efi_bs_call(free_pool, si);
return NULL;
}
-void free_screen_info(efi_system_table_t *sys_table_arg, struct screen_info *si)
+void free_screen_info(struct screen_info *si)
{
if (!si)
return;
- efi_call_early(install_configuration_table, &screen_info_guid, NULL);
- efi_call_early(free_pool, si);
+ efi_bs_call(install_configuration_table, &screen_info_guid, NULL);
+ efi_bs_call(free_pool, si);
}
-static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg,
- unsigned long dram_base,
+static efi_status_t reserve_kernel_base(unsigned long dram_base,
unsigned long *reserve_addr,
unsigned long *reserve_size)
{
@@ -92,8 +91,8 @@ static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg,
*/
alloc_addr = dram_base + MAX_UNCOMP_KERNEL_SIZE;
nr_pages = MAX_UNCOMP_KERNEL_SIZE / EFI_PAGE_SIZE;
- status = efi_call_early(allocate_pages, EFI_ALLOCATE_MAX_ADDRESS,
- EFI_BOOT_SERVICES_DATA, nr_pages, &alloc_addr);
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_MAX_ADDRESS,
+ EFI_BOOT_SERVICES_DATA, nr_pages, &alloc_addr);
if (status == EFI_SUCCESS) {
if (alloc_addr == dram_base) {
*reserve_addr = alloc_addr;
@@ -119,10 +118,9 @@ static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg,
* released to the OS after ExitBootServices(), the decompressor can
* safely overwrite them.
*/
- status = efi_get_memory_map(sys_table_arg, &map);
+ status = efi_get_memory_map(&map);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table_arg,
- "reserve_kernel_base(): Unable to retrieve memory map.\n");
+ pr_efi_err("reserve_kernel_base(): Unable to retrieve memory map.\n");
return status;
}
@@ -158,14 +156,13 @@ static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg,
start = max(start, (u64)dram_base);
end = min(end, (u64)dram_base + MAX_UNCOMP_KERNEL_SIZE);
- status = efi_call_early(allocate_pages,
- EFI_ALLOCATE_ADDRESS,
- EFI_LOADER_DATA,
- (end - start) / EFI_PAGE_SIZE,
- &start);
+ status = efi_bs_call(allocate_pages,
+ EFI_ALLOCATE_ADDRESS,
+ EFI_LOADER_DATA,
+ (end - start) / EFI_PAGE_SIZE,
+ &start);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table_arg,
- "reserve_kernel_base(): alloc failed.\n");
+ pr_efi_err("reserve_kernel_base(): alloc failed.\n");
goto out;
}
break;
@@ -188,12 +185,11 @@ static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg,
status = EFI_SUCCESS;
out:
- efi_call_early(free_pool, memory_map);
+ efi_bs_call(free_pool, memory_map);
return status;
}
-efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
- unsigned long *image_addr,
+efi_status_t handle_kernel_image(unsigned long *image_addr,
unsigned long *image_size,
unsigned long *reserve_addr,
unsigned long *reserve_size,
@@ -221,10 +217,9 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
*/
kernel_base += TEXT_OFFSET - 5 * PAGE_SIZE;
- status = reserve_kernel_base(sys_table, kernel_base, reserve_addr,
- reserve_size);
+ status = reserve_kernel_base(kernel_base, reserve_addr, reserve_size);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table, "Unable to allocate memory for uncompressed kernel.\n");
+ pr_efi_err("Unable to allocate memory for uncompressed kernel.\n");
return status;
}
@@ -233,12 +228,11 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
* memory window.
*/
*image_size = image->image_size;
- status = efi_relocate_kernel(sys_table, image_addr, *image_size,
- *image_size,
+ status = efi_relocate_kernel(image_addr, *image_size, *image_size,
kernel_base + MAX_UNCOMP_KERNEL_SIZE, 0, 0);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table, "Failed to relocate kernel.\n");
- efi_free(sys_table, *reserve_size, *reserve_addr);
+ pr_efi_err("Failed to relocate kernel.\n");
+ efi_free(*reserve_size, *reserve_addr);
*reserve_size = 0;
return status;
}
@@ -249,10 +243,10 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
* address at which the zImage is loaded.
*/
if (*image_addr + *image_size > dram_base + ZIMAGE_OFFSET_LIMIT) {
- pr_efi_err(sys_table, "Failed to relocate kernel, no low memory available.\n");
- efi_free(sys_table, *reserve_size, *reserve_addr);
+ pr_efi_err("Failed to relocate kernel, no low memory available.\n");
+ efi_free(*reserve_size, *reserve_addr);
*reserve_size = 0;
- efi_free(sys_table, *image_size, *image_addr);
+ efi_free(*image_size, *image_addr);
*image_size = 0;
return EFI_LOAD_ERROR;
}
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index 1550d244e996..2915b44132e6 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -21,7 +21,7 @@
#include "efistub.h"
-efi_status_t check_platform_features(efi_system_table_t *sys_table_arg)
+efi_status_t check_platform_features(void)
{
u64 tg;
@@ -32,16 +32,15 @@ efi_status_t check_platform_features(efi_system_table_t *sys_table_arg)
tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_TGRAN_SHIFT) & 0xf;
if (tg != ID_AA64MMFR0_TGRAN_SUPPORTED) {
if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
- pr_efi_err(sys_table_arg, "This 64 KB granular kernel is not supported by your CPU\n");
+ pr_efi_err("This 64 KB granular kernel is not supported by your CPU\n");
else
- pr_efi_err(sys_table_arg, "This 16 KB granular kernel is not supported by your CPU\n");
+ pr_efi_err("This 16 KB granular kernel is not supported by your CPU\n");
return EFI_UNSUPPORTED;
}
return EFI_SUCCESS;
}
-efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg,
- unsigned long *image_addr,
+efi_status_t handle_kernel_image(unsigned long *image_addr,
unsigned long *image_size,
unsigned long *reserve_addr,
unsigned long *reserve_size,
@@ -56,17 +55,16 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg,
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
if (!nokaslr()) {
- status = efi_get_random_bytes(sys_table_arg,
- sizeof(phys_seed),
+ status = efi_get_random_bytes(sizeof(phys_seed),
(u8 *)&phys_seed);
if (status == EFI_NOT_FOUND) {
- pr_efi(sys_table_arg, "EFI_RNG_PROTOCOL unavailable, no randomness supplied\n");
+ pr_efi("EFI_RNG_PROTOCOL unavailable, no randomness supplied\n");
} else if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table_arg, "efi_get_random_bytes() failed\n");
+ pr_efi_err("efi_get_random_bytes() failed\n");
return status;
}
} else {
- pr_efi(sys_table_arg, "KASLR disabled on kernel command line\n");
+ pr_efi("KASLR disabled on kernel command line\n");
}
}
@@ -108,7 +106,7 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg,
* locate the kernel at a randomized offset in physical memory.
*/
*reserve_size = kernel_memsize + offset;
- status = efi_random_alloc(sys_table_arg, *reserve_size,
+ status = efi_random_alloc(*reserve_size,
MIN_KIMG_ALIGN, reserve_addr,
(u32)phys_seed);
@@ -131,19 +129,19 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg,
*image_addr = *reserve_addr = preferred_offset;
*reserve_size = round_up(kernel_memsize, EFI_ALLOC_ALIGN);
- status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS,
- EFI_LOADER_DATA,
- *reserve_size / EFI_PAGE_SIZE,
- (efi_physical_addr_t *)reserve_addr);
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+ EFI_LOADER_DATA,
+ *reserve_size / EFI_PAGE_SIZE,
+ (efi_physical_addr_t *)reserve_addr);
}
if (status != EFI_SUCCESS) {
*reserve_size = kernel_memsize + TEXT_OFFSET;
- status = efi_low_alloc(sys_table_arg, *reserve_size,
+ status = efi_low_alloc(*reserve_size,
MIN_KIMG_ALIGN, reserve_addr);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table_arg, "Failed to relocate kernel\n");
+ pr_efi_err("Failed to relocate kernel\n");
*reserve_size = 0;
return status;
}
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index e02579907f2e..74ddfb496140 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -27,24 +27,26 @@
*/
#define EFI_READ_CHUNK_SIZE (1024 * 1024)
-static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE;
+static unsigned long efi_chunk_size = EFI_READ_CHUNK_SIZE;
-static int __section(.data) __nokaslr;
-static int __section(.data) __quiet;
-static int __section(.data) __novamap;
-static bool __section(.data) efi_nosoftreserve;
+static bool __efistub_global efi_nokaslr;
+static bool __efistub_global efi_quiet;
+static bool __efistub_global efi_novamap;
+static bool __efistub_global efi_nosoftreserve;
+static bool __efistub_global efi_disable_pci_dma =
+ IS_ENABLED(CONFIG_EFI_DISABLE_PCI_DMA);
-int __pure nokaslr(void)
+bool __pure nokaslr(void)
{
- return __nokaslr;
+ return efi_nokaslr;
}
-int __pure is_quiet(void)
+bool __pure is_quiet(void)
{
- return __quiet;
+ return efi_quiet;
}
-int __pure novamap(void)
+bool __pure novamap(void)
{
- return __novamap;
+ return efi_novamap;
}
bool __pure __efi_soft_reserve_enabled(void)
{
@@ -58,7 +60,7 @@ struct file_info {
u64 size;
};
-void efi_printk(efi_system_table_t *sys_table_arg, char *str)
+void efi_printk(char *str)
{
char *s8;
@@ -68,10 +70,10 @@ void efi_printk(efi_system_table_t *sys_table_arg, char *str)
ch[0] = *s8;
if (*s8 == '\n') {
efi_char16_t nl[2] = { '\r', 0 };
- efi_char16_printk(sys_table_arg, nl);
+ efi_char16_printk(nl);
}
- efi_char16_printk(sys_table_arg, ch);
+ efi_char16_printk(ch);
}
}
@@ -84,8 +86,7 @@ static inline bool mmap_has_headroom(unsigned long buff_size,
return slack / desc_size >= EFI_MMAP_NR_SLACK_SLOTS;
}
-efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
- struct efi_boot_memmap *map)
+efi_status_t efi_get_memory_map(struct efi_boot_memmap *map)
{
efi_memory_desc_t *m = NULL;
efi_status_t status;
@@ -96,19 +97,19 @@ efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
*map->map_size = *map->desc_size * 32;
*map->buff_size = *map->map_size;
again:
- status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
- *map->map_size, (void **)&m);
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
+ *map->map_size, (void **)&m);
if (status != EFI_SUCCESS)
goto fail;
*map->desc_size = 0;
key = 0;
- status = efi_call_early(get_memory_map, map->map_size, m,
- &key, map->desc_size, &desc_version);
+ status = efi_bs_call(get_memory_map, map->map_size, m,
+ &key, map->desc_size, &desc_version);
if (status == EFI_BUFFER_TOO_SMALL ||
!mmap_has_headroom(*map->buff_size, *map->map_size,
*map->desc_size)) {
- efi_call_early(free_pool, m);
+ efi_bs_call(free_pool, m);
/*
* Make sure there is some entries of headroom so that the
* buffer can be reused for a new map after allocations are
@@ -122,7 +123,7 @@ again:
}
if (status != EFI_SUCCESS)
- efi_call_early(free_pool, m);
+ efi_bs_call(free_pool, m);
if (map->key_ptr && status == EFI_SUCCESS)
*map->key_ptr = key;
@@ -135,7 +136,7 @@ fail:
}
-unsigned long get_dram_base(efi_system_table_t *sys_table_arg)
+unsigned long get_dram_base(void)
{
efi_status_t status;
unsigned long map_size, buff_size;
@@ -151,7 +152,7 @@ unsigned long get_dram_base(efi_system_table_t *sys_table_arg)
boot_map.key_ptr = NULL;
boot_map.buff_size = &buff_size;
- status = efi_get_memory_map(sys_table_arg, &boot_map);
+ status = efi_get_memory_map(&boot_map);
if (status != EFI_SUCCESS)
return membase;
@@ -164,7 +165,7 @@ unsigned long get_dram_base(efi_system_table_t *sys_table_arg)
}
}
- efi_call_early(free_pool, map.map);
+ efi_bs_call(free_pool, map.map);
return membase;
}
@@ -172,8 +173,7 @@ unsigned long get_dram_base(efi_system_table_t *sys_table_arg)
/*
* Allocate at the highest possible address that is not above 'max'.
*/
-efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
- unsigned long size, unsigned long align,
+efi_status_t efi_high_alloc(unsigned long size, unsigned long align,
unsigned long *addr, unsigned long max)
{
unsigned long map_size, desc_size, buff_size;
@@ -191,7 +191,7 @@ efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
boot_map.key_ptr = NULL;
boot_map.buff_size = &buff_size;
- status = efi_get_memory_map(sys_table_arg, &boot_map);
+ status = efi_get_memory_map(&boot_map);
if (status != EFI_SUCCESS)
goto fail;
@@ -251,9 +251,8 @@ again:
if (!max_addr)
status = EFI_NOT_FOUND;
else {
- status = efi_call_early(allocate_pages,
- EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
- nr_pages, &max_addr);
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+ EFI_LOADER_DATA, nr_pages, &max_addr);
if (status != EFI_SUCCESS) {
max = max_addr;
max_addr = 0;
@@ -263,7 +262,7 @@ again:
*addr = max_addr;
}
- efi_call_early(free_pool, map);
+ efi_bs_call(free_pool, map);
fail:
return status;
}
@@ -271,8 +270,7 @@ fail:
/*
* Allocate at the lowest possible address that is not below 'min'.
*/
-efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg,
- unsigned long size, unsigned long align,
+efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
unsigned long *addr, unsigned long min)
{
unsigned long map_size, desc_size, buff_size;
@@ -289,7 +287,7 @@ efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg,
boot_map.key_ptr = NULL;
boot_map.buff_size = &buff_size;
- status = efi_get_memory_map(sys_table_arg, &boot_map);
+ status = efi_get_memory_map(&boot_map);
if (status != EFI_SUCCESS)
goto fail;
@@ -331,9 +329,8 @@ efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg,
if ((start + size) > end)
continue;
- status = efi_call_early(allocate_pages,
- EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
- nr_pages, &start);
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+ EFI_LOADER_DATA, nr_pages, &start);
if (status == EFI_SUCCESS) {
*addr = start;
break;
@@ -343,13 +340,12 @@ efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg,
if (i == map_size / desc_size)
status = EFI_NOT_FOUND;
- efi_call_early(free_pool, map);
+ efi_bs_call(free_pool, map);
fail:
return status;
}
-void efi_free(efi_system_table_t *sys_table_arg, unsigned long size,
- unsigned long addr)
+void efi_free(unsigned long size, unsigned long addr)
{
unsigned long nr_pages;
@@ -357,12 +353,11 @@ void efi_free(efi_system_table_t *sys_table_arg, unsigned long size,
return;
nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
- efi_call_early(free_pages, addr, nr_pages);
+ efi_bs_call(free_pages, addr, nr_pages);
}
-static efi_status_t efi_file_size(efi_system_table_t *sys_table_arg, void *__fh,
- efi_char16_t *filename_16, void **handle,
- u64 *file_sz)
+static efi_status_t efi_file_size(void *__fh, efi_char16_t *filename_16,
+ void **handle, u64 *file_sz)
{
efi_file_handle_t *h, *fh = __fh;
efi_file_info_t *info;
@@ -370,81 +365,75 @@ static efi_status_t efi_file_size(efi_system_table_t *sys_table_arg, void *__fh,
efi_guid_t info_guid = EFI_FILE_INFO_ID;
unsigned long info_sz;
- status = efi_call_proto(efi_file_handle, open, fh, &h, filename_16,
- EFI_FILE_MODE_READ, (u64)0);
+ status = fh->open(fh, &h, filename_16, EFI_FILE_MODE_READ, 0);
if (status != EFI_SUCCESS) {
- efi_printk(sys_table_arg, "Failed to open file: ");
- efi_char16_printk(sys_table_arg, filename_16);
- efi_printk(sys_table_arg, "\n");
+ efi_printk("Failed to open file: ");
+ efi_char16_printk(filename_16);
+ efi_printk("\n");
return status;
}
*handle = h;
info_sz = 0;
- status = efi_call_proto(efi_file_handle, get_info, h, &info_guid,
- &info_sz, NULL);
+ status = h->get_info(h, &info_guid, &info_sz, NULL);
if (status != EFI_BUFFER_TOO_SMALL) {
- efi_printk(sys_table_arg, "Failed to get file info size\n");
+ efi_printk("Failed to get file info size\n");
return status;
}
grow:
- status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
- info_sz, (void **)&info);
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, info_sz,
+ (void **)&info);
if (status != EFI_SUCCESS) {
- efi_printk(sys_table_arg, "Failed to alloc mem for file info\n");
+ efi_printk("Failed to alloc mem for file info\n");
return status;
}
- status = efi_call_proto(efi_file_handle, get_info, h, &info_guid,
- &info_sz, info);
+ status = h->get_info(h, &info_guid, &info_sz, info);
if (status == EFI_BUFFER_TOO_SMALL) {
- efi_call_early(free_pool, info);
+ efi_bs_call(free_pool, info);
goto grow;
}
*file_sz = info->file_size;
- efi_call_early(free_pool, info);
+ efi_bs_call(free_pool, info);
if (status != EFI_SUCCESS)
- efi_printk(sys_table_arg, "Failed to get initrd info\n");
+ efi_printk("Failed to get initrd info\n");
return status;
}
-static efi_status_t efi_file_read(void *handle, unsigned long *size, void *addr)
+static efi_status_t efi_file_read(efi_file_handle_t *handle,
+ unsigned long *size, void *addr)
{
- return efi_call_proto(efi_file_handle, read, handle, size, addr);
+ return handle->read(handle, size, addr);
}
-static efi_status_t efi_file_close(void *handle)
+static efi_status_t efi_file_close(efi_file_handle_t *handle)
{
- return efi_call_proto(efi_file_handle, close, handle);
+ return handle->close(handle);
}
-static efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg,
- efi_loaded_image_t *image,
+static efi_status_t efi_open_volume(efi_loaded_image_t *image,
efi_file_handle_t **__fh)
{
efi_file_io_interface_t *io;
efi_file_handle_t *fh;
efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
efi_status_t status;
- void *handle = (void *)(unsigned long)efi_table_attr(efi_loaded_image,
- device_handle,
- image);
+ efi_handle_t handle = image->device_handle;
- status = efi_call_early(handle_protocol, handle,
- &fs_proto, (void **)&io);
+ status = efi_bs_call(handle_protocol, handle, &fs_proto, (void **)&io);
if (status != EFI_SUCCESS) {
- efi_printk(sys_table_arg, "Failed to handle fs_proto\n");
+ efi_printk("Failed to handle fs_proto\n");
return status;
}
- status = efi_call_proto(efi_file_io_interface, open_volume, io, &fh);
+ status = io->open_volume(io, &fh);
if (status != EFI_SUCCESS)
- efi_printk(sys_table_arg, "Failed to open volume\n");
+ efi_printk("Failed to open volume\n");
else
*__fh = fh;
@@ -465,11 +454,11 @@ efi_status_t efi_parse_options(char const *cmdline)
str = strstr(cmdline, "nokaslr");
if (str == cmdline || (str && str > cmdline && *(str - 1) == ' '))
- __nokaslr = 1;
+ efi_nokaslr = true;
str = strstr(cmdline, "quiet");
if (str == cmdline || (str && str > cmdline && *(str - 1) == ' '))
- __quiet = 1;
+ efi_quiet = true;
/*
* If no EFI parameters were specified on the cmdline we've got
@@ -489,18 +478,28 @@ efi_status_t efi_parse_options(char const *cmdline)
while (*str && *str != ' ') {
if (!strncmp(str, "nochunk", 7)) {
str += strlen("nochunk");
- __chunk_size = -1UL;
+ efi_chunk_size = -1UL;
}
if (!strncmp(str, "novamap", 7)) {
str += strlen("novamap");
- __novamap = 1;
+ efi_novamap = true;
}
if (IS_ENABLED(CONFIG_EFI_SOFT_RESERVE) &&
!strncmp(str, "nosoftreserve", 7)) {
str += strlen("nosoftreserve");
- efi_nosoftreserve = 1;
+ efi_nosoftreserve = true;
+ }
+
+ if (!strncmp(str, "disable_early_pci_dma", 21)) {
+ str += strlen("disable_early_pci_dma");
+ efi_disable_pci_dma = true;
+ }
+
+ if (!strncmp(str, "no_disable_early_pci_dma", 24)) {
+ str += strlen("no_disable_early_pci_dma");
+ efi_disable_pci_dma = false;
}
/* Group words together, delimited by "," */
@@ -520,8 +519,7 @@ efi_status_t efi_parse_options(char const *cmdline)
* We only support loading a file from the same filesystem as
* the kernel image.
*/
-efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
- efi_loaded_image_t *image,
+efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
char *cmd_line, char *option_string,
unsigned long max_addr,
unsigned long *load_addr,
@@ -570,10 +568,10 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
if (!nr_files)
return EFI_SUCCESS;
- status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
- nr_files * sizeof(*files), (void **)&files);
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
+ nr_files * sizeof(*files), (void **)&files);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table_arg, "Failed to alloc mem for file handle list\n");
+ pr_efi_err("Failed to alloc mem for file handle list\n");
goto fail;
}
@@ -612,13 +610,13 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
/* Only open the volume once. */
if (!i) {
- status = efi_open_volume(sys_table_arg, image, &fh);
+ status = efi_open_volume(image, &fh);
if (status != EFI_SUCCESS)
goto free_files;
}
- status = efi_file_size(sys_table_arg, fh, filename_16,
- (void **)&file->handle, &file->size);
+ status = efi_file_size(fh, filename_16, (void **)&file->handle,
+ &file->size);
if (status != EFI_SUCCESS)
goto close_handles;
@@ -633,16 +631,16 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
* so allocate enough memory for all the files. This is used
* for loading multiple files.
*/
- status = efi_high_alloc(sys_table_arg, file_size_total, 0x1000,
- &file_addr, max_addr);
+ status = efi_high_alloc(file_size_total, 0x1000, &file_addr,
+ max_addr);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table_arg, "Failed to alloc highmem for files\n");
+ pr_efi_err("Failed to alloc highmem for files\n");
goto close_handles;
}
/* We've run out of free low memory. */
if (file_addr > max_addr) {
- pr_efi_err(sys_table_arg, "We've run out of free low memory\n");
+ pr_efi_err("We've run out of free low memory\n");
status = EFI_INVALID_PARAMETER;
goto free_file_total;
}
@@ -655,8 +653,8 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
while (size) {
unsigned long chunksize;
- if (IS_ENABLED(CONFIG_X86) && size > __chunk_size)
- chunksize = __chunk_size;
+ if (IS_ENABLED(CONFIG_X86) && size > efi_chunk_size)
+ chunksize = efi_chunk_size;
else
chunksize = size;
@@ -664,7 +662,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
&chunksize,
(void *)addr);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table_arg, "Failed to read file\n");
+ pr_efi_err("Failed to read file\n");
goto free_file_total;
}
addr += chunksize;
@@ -676,7 +674,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
}
- efi_call_early(free_pool, files);
+ efi_bs_call(free_pool, files);
*load_addr = file_addr;
*load_size = file_size_total;
@@ -684,13 +682,13 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
return status;
free_file_total:
- efi_free(sys_table_arg, file_size_total, file_addr);
+ efi_free(file_size_total, file_addr);
close_handles:
for (k = j; k < i; k++)
efi_file_close(files[k].handle);
free_files:
- efi_call_early(free_pool, files);
+ efi_bs_call(free_pool, files);
fail:
*load_addr = 0;
*load_size = 0;
@@ -707,8 +705,7 @@ fail:
* address is not available the lowest available address will
* be used.
*/
-efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg,
- unsigned long *image_addr,
+efi_status_t efi_relocate_kernel(unsigned long *image_addr,
unsigned long image_size,
unsigned long alloc_size,
unsigned long preferred_addr,
@@ -737,20 +734,19 @@ efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg,
* as possible while respecting the required alignment.
*/
nr_pages = round_up(alloc_size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
- status = efi_call_early(allocate_pages,
- EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
- nr_pages, &efi_addr);
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+ EFI_LOADER_DATA, nr_pages, &efi_addr);
new_addr = efi_addr;
/*
* If preferred address allocation failed allocate as low as
* possible.
*/
if (status != EFI_SUCCESS) {
- status = efi_low_alloc_above(sys_table_arg, alloc_size,
- alignment, &new_addr, min_addr);
+ status = efi_low_alloc_above(alloc_size, alignment, &new_addr,
+ min_addr);
}
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table_arg, "Failed to allocate usable memory for kernel.\n");
+ pr_efi_err("Failed to allocate usable memory for kernel.\n");
return status;
}
@@ -824,8 +820,7 @@ static u8 *efi_utf16_to_utf8(u8 *dst, const u16 *src, int n)
* Size of memory allocated return in *cmd_line_len.
* Returns NULL on error.
*/
-char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
- efi_loaded_image_t *image,
+char *efi_convert_cmdline(efi_loaded_image_t *image,
int *cmd_line_len)
{
const u16 *s2;
@@ -854,8 +849,8 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
options_bytes++; /* NUL termination */
- status = efi_high_alloc(sys_table_arg, options_bytes, 0,
- &cmdline_addr, MAX_CMDLINE_ADDRESS);
+ status = efi_high_alloc(options_bytes, 0, &cmdline_addr,
+ MAX_CMDLINE_ADDRESS);
if (status != EFI_SUCCESS)
return NULL;
@@ -877,24 +872,26 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
* specific structure may be passed to the function via priv. The client
* function may be called multiple times.
*/
-efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table_arg,
- void *handle,
+efi_status_t efi_exit_boot_services(void *handle,
struct efi_boot_memmap *map,
void *priv,
efi_exit_boot_map_processing priv_func)
{
efi_status_t status;
- status = efi_get_memory_map(sys_table_arg, map);
+ status = efi_get_memory_map(map);
if (status != EFI_SUCCESS)
goto fail;
- status = priv_func(sys_table_arg, map, priv);
+ status = priv_func(map, priv);
if (status != EFI_SUCCESS)
goto free_map;
- status = efi_call_early(exit_boot_services, handle, *map->key_ptr);
+ if (efi_disable_pci_dma)
+ efi_pci_disable_bridge_busmaster();
+
+ status = efi_bs_call(exit_boot_services, handle, *map->key_ptr);
if (status == EFI_INVALID_PARAMETER) {
/*
@@ -911,23 +908,23 @@ efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table_arg,
* to get_memory_map() is expected to succeed here.
*/
*map->map_size = *map->buff_size;
- status = efi_call_early(get_memory_map,
- map->map_size,
- *map->map,
- map->key_ptr,
- map->desc_size,
- map->desc_ver);
+ status = efi_bs_call(get_memory_map,
+ map->map_size,
+ *map->map,
+ map->key_ptr,
+ map->desc_size,
+ map->desc_ver);
/* exit_boot_services() was called, thus cannot free */
if (status != EFI_SUCCESS)
goto fail;
- status = priv_func(sys_table_arg, map, priv);
+ status = priv_func(map, priv);
/* exit_boot_services() was called, thus cannot free */
if (status != EFI_SUCCESS)
goto fail;
- status = efi_call_early(exit_boot_services, handle, *map->key_ptr);
+ status = efi_bs_call(exit_boot_services, handle, *map->key_ptr);
}
/* exit_boot_services() was called, thus cannot free */
@@ -937,38 +934,31 @@ efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table_arg,
return EFI_SUCCESS;
free_map:
- efi_call_early(free_pool, *map->map);
+ efi_bs_call(free_pool, *map->map);
fail:
return status;
}
-#define GET_EFI_CONFIG_TABLE(bits) \
-static void *get_efi_config_table##bits(efi_system_table_t *_sys_table, \
- efi_guid_t guid) \
-{ \
- efi_system_table_##bits##_t *sys_table; \
- efi_config_table_##bits##_t *tables; \
- int i; \
- \
- sys_table = (typeof(sys_table))_sys_table; \
- tables = (typeof(tables))(unsigned long)sys_table->tables; \
- \
- for (i = 0; i < sys_table->nr_tables; i++) { \
- if (efi_guidcmp(tables[i].guid, guid) != 0) \
- continue; \
- \
- return (void *)(unsigned long)tables[i].table; \
- } \
- \
- return NULL; \
+void *get_efi_config_table(efi_guid_t guid)
+{
+ unsigned long tables = efi_table_attr(efi_system_table(), tables);
+ int nr_tables = efi_table_attr(efi_system_table(), nr_tables);
+ int i;
+
+ for (i = 0; i < nr_tables; i++) {
+ efi_config_table_t *t = (void *)tables;
+
+ if (efi_guidcmp(t->guid, guid) == 0)
+ return efi_table_attr(t, table);
+
+ tables += efi_is_native() ? sizeof(efi_config_table_t)
+ : sizeof(efi_config_table_32_t);
+ }
+ return NULL;
}
-GET_EFI_CONFIG_TABLE(32)
-GET_EFI_CONFIG_TABLE(64)
-void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid)
+void efi_char16_printk(efi_char16_t *str)
{
- if (efi_is_64bit())
- return get_efi_config_table64(sys_table, guid);
- else
- return get_efi_config_table32(sys_table, guid);
+ efi_call_proto(efi_table_attr(efi_system_table(), con_out),
+ output_string, str);
}
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 05739ae013c8..c244b165005e 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -25,22 +25,30 @@
#define EFI_ALLOC_ALIGN EFI_PAGE_SIZE
#endif
-extern int __pure nokaslr(void);
-extern int __pure is_quiet(void);
-extern int __pure novamap(void);
+#ifdef CONFIG_ARM
+#define __efistub_global __section(.data)
+#else
+#define __efistub_global
+#endif
+
+extern bool __pure nokaslr(void);
+extern bool __pure is_quiet(void);
+extern bool __pure novamap(void);
+
+extern __pure efi_system_table_t *efi_system_table(void);
-#define pr_efi(sys_table, msg) do { \
- if (!is_quiet()) efi_printk(sys_table, "EFI stub: "msg); \
+#define pr_efi(msg) do { \
+ if (!is_quiet()) efi_printk("EFI stub: "msg); \
} while (0)
-#define pr_efi_err(sys_table, msg) efi_printk(sys_table, "EFI stub: ERROR: "msg)
+#define pr_efi_err(msg) efi_printk("EFI stub: ERROR: "msg)
-void efi_char16_printk(efi_system_table_t *, efi_char16_t *);
+void efi_char16_printk(efi_char16_t *);
+void efi_char16_printk(efi_char16_t *);
-unsigned long get_dram_base(efi_system_table_t *sys_table_arg);
+unsigned long get_dram_base(void);
-efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
- void *handle,
+efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
unsigned long *new_fdt_addr,
unsigned long max_addr,
u64 initrd_addr, u64 initrd_size,
@@ -48,22 +56,20 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
unsigned long fdt_addr,
unsigned long fdt_size);
-void *get_fdt(efi_system_table_t *sys_table, unsigned long *fdt_size);
+void *get_fdt(unsigned long *fdt_size);
void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
unsigned long desc_size, efi_memory_desc_t *runtime_map,
int *count);
-efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table,
- unsigned long size, u8 *out);
+efi_status_t efi_get_random_bytes(unsigned long size, u8 *out);
-efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
- unsigned long size, unsigned long align,
+efi_status_t efi_random_alloc(unsigned long size, unsigned long align,
unsigned long *addr, unsigned long random_seed);
-efi_status_t check_platform_features(efi_system_table_t *sys_table_arg);
+efi_status_t check_platform_features(void);
-void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid);
+void *get_efi_config_table(efi_guid_t guid);
/* Helper macros for the usual case of using simple C variables: */
#ifndef fdt_setprop_inplace_var
@@ -76,4 +82,12 @@ void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid);
fdt_setprop((fdt), (node_offset), (name), &(var), sizeof(var))
#endif
+#define get_efi_var(name, vendor, ...) \
+ efi_rt_call(get_variable, (efi_char16_t *)(name), \
+ (efi_guid_t *)(vendor), __VA_ARGS__)
+
+#define set_efi_var(name, vendor, ...) \
+ efi_rt_call(set_variable, (efi_char16_t *)(name), \
+ (efi_guid_t *)(vendor), __VA_ARGS__)
+
#endif
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index 0bf0190917e0..0a91e5232127 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -16,7 +16,7 @@
#define EFI_DT_ADDR_CELLS_DEFAULT 2
#define EFI_DT_SIZE_CELLS_DEFAULT 2
-static void fdt_update_cell_size(efi_system_table_t *sys_table, void *fdt)
+static void fdt_update_cell_size(void *fdt)
{
int offset;
@@ -27,8 +27,7 @@ static void fdt_update_cell_size(efi_system_table_t *sys_table, void *fdt)
fdt_setprop_u32(fdt, offset, "#size-cells", EFI_DT_SIZE_CELLS_DEFAULT);
}
-static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
- unsigned long orig_fdt_size,
+static efi_status_t update_fdt(void *orig_fdt, unsigned long orig_fdt_size,
void *fdt, int new_fdt_size, char *cmdline_ptr,
u64 initrd_addr, u64 initrd_size)
{
@@ -40,7 +39,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
/* Do some checks on provided FDT, if it exists: */
if (orig_fdt) {
if (fdt_check_header(orig_fdt)) {
- pr_efi_err(sys_table, "Device Tree header not valid!\n");
+ pr_efi_err("Device Tree header not valid!\n");
return EFI_LOAD_ERROR;
}
/*
@@ -48,7 +47,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
* configuration table:
*/
if (orig_fdt_size && fdt_totalsize(orig_fdt) > orig_fdt_size) {
- pr_efi_err(sys_table, "Truncated device tree! foo!\n");
+ pr_efi_err("Truncated device tree! foo!\n");
return EFI_LOAD_ERROR;
}
}
@@ -62,7 +61,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
* Any failure from the following function is
* non-critical:
*/
- fdt_update_cell_size(sys_table, fdt);
+ fdt_update_cell_size(fdt);
}
}
@@ -111,7 +110,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
/* Add FDT entries for EFI runtime services in chosen node. */
node = fdt_subnode_offset(fdt, 0, "chosen");
- fdt_val64 = cpu_to_fdt64((u64)(unsigned long)sys_table);
+ fdt_val64 = cpu_to_fdt64((u64)(unsigned long)efi_system_table());
status = fdt_setprop_var(fdt, node, "linux,uefi-system-table", fdt_val64);
if (status)
@@ -140,7 +139,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
efi_status_t efi_status;
- efi_status = efi_get_random_bytes(sys_table, sizeof(fdt_val64),
+ efi_status = efi_get_random_bytes(sizeof(fdt_val64),
(u8 *)&fdt_val64);
if (efi_status == EFI_SUCCESS) {
status = fdt_setprop_var(fdt, node, "kaslr-seed", fdt_val64);
@@ -210,8 +209,7 @@ struct exit_boot_struct {
void *new_fdt_addr;
};
-static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
- struct efi_boot_memmap *map,
+static efi_status_t exit_boot_func(struct efi_boot_memmap *map,
void *priv)
{
struct exit_boot_struct *p = priv;
@@ -244,8 +242,7 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
* with the final memory map in it.
*/
-efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
- void *handle,
+efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
unsigned long *new_fdt_addr,
unsigned long max_addr,
u64 initrd_addr, u64 initrd_size,
@@ -275,19 +272,19 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
* subsequent allocations adding entries, since they could not affect
* the number of EFI_MEMORY_RUNTIME regions.
*/
- status = efi_get_memory_map(sys_table, &map);
+ status = efi_get_memory_map(&map);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table, "Unable to retrieve UEFI memory map.\n");
+ pr_efi_err("Unable to retrieve UEFI memory map.\n");
return status;
}
- pr_efi(sys_table, "Exiting boot services and installing virtual address map...\n");
+ pr_efi("Exiting boot services and installing virtual address map...\n");
map.map = &memory_map;
- status = efi_high_alloc(sys_table, MAX_FDT_SIZE, EFI_FDT_ALIGN,
+ status = efi_high_alloc(MAX_FDT_SIZE, EFI_FDT_ALIGN,
new_fdt_addr, max_addr);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table, "Unable to allocate memory for new device tree.\n");
+ pr_efi_err("Unable to allocate memory for new device tree.\n");
goto fail;
}
@@ -295,16 +292,16 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
* Now that we have done our final memory allocation (and free)
* we can get the memory map key needed for exit_boot_services().
*/
- status = efi_get_memory_map(sys_table, &map);
+ status = efi_get_memory_map(&map);
if (status != EFI_SUCCESS)
goto fail_free_new_fdt;
- status = update_fdt(sys_table, (void *)fdt_addr, fdt_size,
+ status = update_fdt((void *)fdt_addr, fdt_size,
(void *)*new_fdt_addr, MAX_FDT_SIZE, cmdline_ptr,
initrd_addr, initrd_size);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table, "Unable to construct new device tree.\n");
+ pr_efi_err("Unable to construct new device tree.\n");
goto fail_free_new_fdt;
}
@@ -313,7 +310,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
priv.runtime_entry_count = &runtime_entry_count;
priv.new_fdt_addr = (void *)*new_fdt_addr;
- status = efi_exit_boot_services(sys_table, handle, &map, &priv, exit_boot_func);
+ status = efi_exit_boot_services(handle, &map, &priv, exit_boot_func);
if (status == EFI_SUCCESS) {
efi_set_virtual_address_map_t *svam;
@@ -322,7 +319,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
return EFI_SUCCESS;
/* Install the new virtual address map */
- svam = sys_table->runtime->set_virtual_address_map;
+ svam = efi_system_table()->runtime->set_virtual_address_map;
status = svam(runtime_entry_count * desc_size, desc_size,
desc_ver, runtime_map);
@@ -350,28 +347,28 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
return EFI_SUCCESS;
}
- pr_efi_err(sys_table, "Exit boot services failed.\n");
+ pr_efi_err("Exit boot services failed.\n");
fail_free_new_fdt:
- efi_free(sys_table, MAX_FDT_SIZE, *new_fdt_addr);
+ efi_free(MAX_FDT_SIZE, *new_fdt_addr);
fail:
- sys_table->boottime->free_pool(runtime_map);
+ efi_system_table()->boottime->free_pool(runtime_map);
return EFI_LOAD_ERROR;
}
-void *get_fdt(efi_system_table_t *sys_table, unsigned long *fdt_size)
+void *get_fdt(unsigned long *fdt_size)
{
void *fdt;
- fdt = get_efi_config_table(sys_table, DEVICE_TREE_GUID);
+ fdt = get_efi_config_table(DEVICE_TREE_GUID);
if (!fdt)
return NULL;
if (fdt_check_header(fdt) != 0) {
- pr_efi_err(sys_table, "Invalid header detected on UEFI supplied FDT, ignoring ...\n");
+ pr_efi_err("Invalid header detected on UEFI supplied FDT, ignoring ...\n");
return NULL;
}
*fdt_size = fdt_totalsize(fdt);
diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
index 0101ca4c13b1..55e6b3f286fe 100644
--- a/drivers/firmware/efi/libstub/gop.c
+++ b/drivers/firmware/efi/libstub/gop.c
@@ -10,6 +10,8 @@
#include <asm/efi.h>
#include <asm/setup.h>
+#include "efistub.h"
+
static void find_bits(unsigned long mask, u8 *pos, u8 *size)
{
u8 first, len;
@@ -35,7 +37,7 @@ static void find_bits(unsigned long mask, u8 *pos, u8 *size)
static void
setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
- struct efi_pixel_bitmask pixel_info, int pixel_format)
+ efi_pixel_bitmask_t pixel_info, int pixel_format)
{
if (pixel_format == PIXEL_RGB_RESERVED_8BIT_PER_COLOR) {
si->lfb_depth = 32;
@@ -83,189 +85,44 @@ setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
}
}
-static efi_status_t
-__gop_query32(efi_system_table_t *sys_table_arg,
- struct efi_graphics_output_protocol_32 *gop32,
- struct efi_graphics_output_mode_info **info,
- unsigned long *size, u64 *fb_base)
-{
- struct efi_graphics_output_protocol_mode_32 *mode;
- efi_graphics_output_protocol_query_mode query_mode;
- efi_status_t status;
- unsigned long m;
-
- m = gop32->mode;
- mode = (struct efi_graphics_output_protocol_mode_32 *)m;
- query_mode = (void *)(unsigned long)gop32->query_mode;
-
- status = __efi_call_early(query_mode, (void *)gop32, mode->mode, size,
- info);
- if (status != EFI_SUCCESS)
- return status;
-
- *fb_base = mode->frame_buffer_base;
- return status;
-}
-
-static efi_status_t
-setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
- efi_guid_t *proto, unsigned long size, void **gop_handle)
+static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto,
+ unsigned long size, void **handles)
{
- struct efi_graphics_output_protocol_32 *gop32, *first_gop;
- unsigned long nr_gops;
+ efi_graphics_output_protocol_t *gop, *first_gop;
u16 width, height;
u32 pixels_per_scan_line;
u32 ext_lfb_base;
- u64 fb_base;
- struct efi_pixel_bitmask pixel_info;
+ efi_physical_addr_t fb_base;
+ efi_pixel_bitmask_t pixel_info;
int pixel_format;
- efi_status_t status = EFI_NOT_FOUND;
- u32 *handles = (u32 *)(unsigned long)gop_handle;
- int i;
-
- first_gop = NULL;
- gop32 = NULL;
-
- nr_gops = size / sizeof(u32);
- for (i = 0; i < nr_gops; i++) {
- struct efi_graphics_output_mode_info *info = NULL;
- efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
- bool conout_found = false;
- void *dummy = NULL;
- efi_handle_t h = (efi_handle_t)(unsigned long)handles[i];
- u64 current_fb_base;
-
- status = efi_call_early(handle_protocol, h,
- proto, (void **)&gop32);
- if (status != EFI_SUCCESS)
- continue;
-
- status = efi_call_early(handle_protocol, h,
- &conout_proto, &dummy);
- if (status == EFI_SUCCESS)
- conout_found = true;
-
- status = __gop_query32(sys_table_arg, gop32, &info, &size,
- &current_fb_base);
- if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
- info->pixel_format != PIXEL_BLT_ONLY) {
- /*
- * Systems that use the UEFI Console Splitter may
- * provide multiple GOP devices, not all of which are
- * backed by real hardware. The workaround is to search
- * for a GOP implementing the ConOut protocol, and if
- * one isn't found, to just fall back to the first GOP.
- */
- width = info->horizontal_resolution;
- height = info->vertical_resolution;
- pixel_format = info->pixel_format;
- pixel_info = info->pixel_information;
- pixels_per_scan_line = info->pixels_per_scan_line;
- fb_base = current_fb_base;
-
- /*
- * Once we've found a GOP supporting ConOut,
- * don't bother looking any further.
- */
- first_gop = gop32;
- if (conout_found)
- break;
- }
- }
-
- /* Did we find any GOPs? */
- if (!first_gop)
- goto out;
-
- /* EFI framebuffer */
- si->orig_video_isVGA = VIDEO_TYPE_EFI;
-
- si->lfb_width = width;
- si->lfb_height = height;
- si->lfb_base = fb_base;
-
- ext_lfb_base = (u64)(unsigned long)fb_base >> 32;
- if (ext_lfb_base) {
- si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
- si->ext_lfb_base = ext_lfb_base;
- }
-
- si->pages = 1;
-
- setup_pixel_info(si, pixels_per_scan_line, pixel_info, pixel_format);
-
- si->lfb_size = si->lfb_linelength * si->lfb_height;
-
- si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
-out:
- return status;
-}
-
-static efi_status_t
-__gop_query64(efi_system_table_t *sys_table_arg,
- struct efi_graphics_output_protocol_64 *gop64,
- struct efi_graphics_output_mode_info **info,
- unsigned long *size, u64 *fb_base)
-{
- struct efi_graphics_output_protocol_mode_64 *mode;
- efi_graphics_output_protocol_query_mode query_mode;
efi_status_t status;
- unsigned long m;
-
- m = gop64->mode;
- mode = (struct efi_graphics_output_protocol_mode_64 *)m;
- query_mode = (void *)(unsigned long)gop64->query_mode;
-
- status = __efi_call_early(query_mode, (void *)gop64, mode->mode, size,
- info);
- if (status != EFI_SUCCESS)
- return status;
-
- *fb_base = mode->frame_buffer_base;
- return status;
-}
-
-static efi_status_t
-setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
- efi_guid_t *proto, unsigned long size, void **gop_handle)
-{
- struct efi_graphics_output_protocol_64 *gop64, *first_gop;
- unsigned long nr_gops;
- u16 width, height;
- u32 pixels_per_scan_line;
- u32 ext_lfb_base;
- u64 fb_base;
- struct efi_pixel_bitmask pixel_info;
- int pixel_format;
- efi_status_t status = EFI_NOT_FOUND;
- u64 *handles = (u64 *)(unsigned long)gop_handle;
+ efi_handle_t h;
int i;
first_gop = NULL;
- gop64 = NULL;
+ gop = NULL;
- nr_gops = size / sizeof(u64);
- for (i = 0; i < nr_gops; i++) {
- struct efi_graphics_output_mode_info *info = NULL;
+ for_each_efi_handle(h, handles, size, i) {
+ efi_graphics_output_protocol_mode_t *mode;
+ efi_graphics_output_mode_info_t *info = NULL;
efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
bool conout_found = false;
void *dummy = NULL;
- efi_handle_t h = (efi_handle_t)(unsigned long)handles[i];
- u64 current_fb_base;
+ efi_physical_addr_t current_fb_base;
- status = efi_call_early(handle_protocol, h,
- proto, (void **)&gop64);
+ status = efi_bs_call(handle_protocol, h, proto, (void **)&gop);
if (status != EFI_SUCCESS)
continue;
- status = efi_call_early(handle_protocol, h,
- &conout_proto, &dummy);
+ status = efi_bs_call(handle_protocol, h, &conout_proto, &dummy);
if (status == EFI_SUCCESS)
conout_found = true;
- status = __gop_query64(sys_table_arg, gop64, &info, &size,
- &current_fb_base);
- if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
+ mode = efi_table_attr(gop, mode);
+ info = efi_table_attr(mode, info);
+ current_fb_base = efi_table_attr(mode, frame_buffer_base);
+
+ if ((!first_gop || conout_found) &&
info->pixel_format != PIXEL_BLT_ONLY) {
/*
* Systems that use the UEFI Console Splitter may
@@ -285,7 +142,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
* Once we've found a GOP supporting ConOut,
* don't bother looking any further.
*/
- first_gop = gop64;
+ first_gop = gop;
if (conout_found)
break;
}
@@ -293,7 +150,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
/* Did we find any GOPs? */
if (!first_gop)
- goto out;
+ return EFI_NOT_FOUND;
/* EFI framebuffer */
si->orig_video_isVGA = VIDEO_TYPE_EFI;
@@ -315,40 +172,32 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
si->lfb_size = si->lfb_linelength * si->lfb_height;
si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
-out:
- return status;
+
+ return EFI_SUCCESS;
}
/*
* See if we have Graphics Output Protocol
*/
-efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
- struct screen_info *si, efi_guid_t *proto,
+efi_status_t efi_setup_gop(struct screen_info *si, efi_guid_t *proto,
unsigned long size)
{
efi_status_t status;
void **gop_handle = NULL;
- status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
- size, (void **)&gop_handle);
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
+ (void **)&gop_handle);
if (status != EFI_SUCCESS)
return status;
- status = efi_call_early(locate_handle,
- EFI_LOCATE_BY_PROTOCOL,
- proto, NULL, &size, gop_handle);
+ status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, proto, NULL,
+ &size, gop_handle);
if (status != EFI_SUCCESS)
goto free_handle;
- if (efi_is_64bit()) {
- status = setup_gop64(sys_table_arg, si, proto, size,
- gop_handle);
- } else {
- status = setup_gop32(sys_table_arg, si, proto, size,
- gop_handle);
- }
+ status = setup_gop(si, proto, size, gop_handle);
free_handle:
- efi_call_early(free_pool, gop_handle);
+ efi_bs_call(free_pool, gop_handle);
return status;
}
diff --git a/drivers/firmware/efi/libstub/pci.c b/drivers/firmware/efi/libstub/pci.c
new file mode 100644
index 000000000000..b025e59b94df
--- /dev/null
+++ b/drivers/firmware/efi/libstub/pci.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI-related functions used by the EFI stub on multiple
+ * architectures.
+ *
+ * Copyright 2019 Google, LLC
+ */
+
+#include <linux/efi.h>
+#include <linux/pci.h>
+
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+void efi_pci_disable_bridge_busmaster(void)
+{
+ efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
+ unsigned long pci_handle_size = 0;
+ efi_handle_t *pci_handle = NULL;
+ efi_handle_t handle;
+ efi_status_t status;
+ u16 class, command;
+ int i;
+
+ status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto,
+ NULL, &pci_handle_size, NULL);
+
+ if (status != EFI_BUFFER_TOO_SMALL) {
+ if (status != EFI_SUCCESS && status != EFI_NOT_FOUND)
+ pr_efi_err("Failed to locate PCI I/O handles'\n");
+ return;
+ }
+
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, pci_handle_size,
+ (void **)&pci_handle);
+ if (status != EFI_SUCCESS) {
+ pr_efi_err("Failed to allocate memory for 'pci_handle'\n");
+ return;
+ }
+
+ status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto,
+ NULL, &pci_handle_size, pci_handle);
+ if (status != EFI_SUCCESS) {
+ pr_efi_err("Failed to locate PCI I/O handles'\n");
+ goto free_handle;
+ }
+
+ for_each_efi_handle(handle, pci_handle, pci_handle_size, i) {
+ efi_pci_io_protocol_t *pci;
+ unsigned long segment_nr, bus_nr, device_nr, func_nr;
+
+ status = efi_bs_call(handle_protocol, handle, &pci_proto,
+ (void **)&pci);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ /*
+ * Disregard devices living on bus 0 - these are not behind a
+ * bridge so no point in disconnecting them from their drivers.
+ */
+ status = efi_call_proto(pci, get_location, &segment_nr, &bus_nr,
+ &device_nr, &func_nr);
+ if (status != EFI_SUCCESS || bus_nr == 0)
+ continue;
+
+ /*
+ * Don't disconnect VGA controllers so we don't risk losing
+ * access to the framebuffer. Drivers for true PCIe graphics
+ * controllers that are behind a PCIe root port do not use
+ * DMA to implement the GOP framebuffer anyway [although they
+ * may use it in their implentation of Gop->Blt()], and so
+ * disabling DMA in the PCI bridge should not interfere with
+ * normal operation of the device.
+ */
+ status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
+ PCI_CLASS_DEVICE, 1, &class);
+ if (status != EFI_SUCCESS || class == PCI_CLASS_DISPLAY_VGA)
+ continue;
+
+ /* Disconnect this handle from all its drivers */
+ efi_bs_call(disconnect_controller, handle, NULL, NULL);
+ }
+
+ for_each_efi_handle(handle, pci_handle, pci_handle_size, i) {
+ efi_pci_io_protocol_t *pci;
+
+ status = efi_bs_call(handle_protocol, handle, &pci_proto,
+ (void **)&pci);
+ if (status != EFI_SUCCESS || !pci)
+ continue;
+
+ status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
+ PCI_CLASS_DEVICE, 1, &class);
+
+ if (status != EFI_SUCCESS || class != PCI_CLASS_BRIDGE_PCI)
+ continue;
+
+ /* Disable busmastering */
+ status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
+ PCI_COMMAND, 1, &command);
+ if (status != EFI_SUCCESS || !(command & PCI_COMMAND_MASTER))
+ continue;
+
+ command &= ~PCI_COMMAND_MASTER;
+ status = efi_call_proto(pci, pci.write, EfiPciIoWidthUint16,
+ PCI_COMMAND, 1, &command);
+ if (status != EFI_SUCCESS)
+ pr_efi_err("Failed to disable PCI busmastering\n");
+ }
+
+free_handle:
+ efi_bs_call(free_pool, pci_handle);
+}
diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c
index 35edd7cfb6a1..316ce9ff0193 100644
--- a/drivers/firmware/efi/libstub/random.c
+++ b/drivers/firmware/efi/libstub/random.c
@@ -9,38 +9,34 @@
#include "efistub.h"
-typedef struct efi_rng_protocol efi_rng_protocol_t;
-
-typedef struct {
- u32 get_info;
- u32 get_rng;
-} efi_rng_protocol_32_t;
-
-typedef struct {
- u64 get_info;
- u64 get_rng;
-} efi_rng_protocol_64_t;
-
-struct efi_rng_protocol {
- efi_status_t (*get_info)(struct efi_rng_protocol *,
- unsigned long *, efi_guid_t *);
- efi_status_t (*get_rng)(struct efi_rng_protocol *,
- efi_guid_t *, unsigned long, u8 *out);
+typedef union efi_rng_protocol efi_rng_protocol_t;
+
+union efi_rng_protocol {
+ struct {
+ efi_status_t (__efiapi *get_info)(efi_rng_protocol_t *,
+ unsigned long *,
+ efi_guid_t *);
+ efi_status_t (__efiapi *get_rng)(efi_rng_protocol_t *,
+ efi_guid_t *, unsigned long,
+ u8 *out);
+ };
+ struct {
+ u32 get_info;
+ u32 get_rng;
+ } mixed_mode;
};
-efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table_arg,
- unsigned long size, u8 *out)
+efi_status_t efi_get_random_bytes(unsigned long size, u8 *out)
{
efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID;
efi_status_t status;
- struct efi_rng_protocol *rng;
+ efi_rng_protocol_t *rng = NULL;
- status = efi_call_early(locate_protocol, &rng_proto, NULL,
- (void **)&rng);
+ status = efi_bs_call(locate_protocol, &rng_proto, NULL, (void **)&rng);
if (status != EFI_SUCCESS)
return status;
- return efi_call_proto(efi_rng_protocol, get_rng, rng, NULL, size, out);
+ return efi_call_proto(rng, get_rng, NULL, size, out);
}
/*
@@ -81,8 +77,7 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
*/
#define MD_NUM_SLOTS(md) ((md)->virt_addr)
-efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
- unsigned long size,
+efi_status_t efi_random_alloc(unsigned long size,
unsigned long align,
unsigned long *addr,
unsigned long random_seed)
@@ -101,7 +96,7 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
map.key_ptr = NULL;
map.buff_size = &buff_size;
- status = efi_get_memory_map(sys_table_arg, &map);
+ status = efi_get_memory_map(&map);
if (status != EFI_SUCCESS)
return status;
@@ -145,39 +140,38 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
target = round_up(md->phys_addr, align) + target_slot * align;
pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
- status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS,
- EFI_LOADER_DATA, pages, &target);
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+ EFI_LOADER_DATA, pages, &target);
if (status == EFI_SUCCESS)
*addr = target;
break;
}
- efi_call_early(free_pool, memory_map);
+ efi_bs_call(free_pool, memory_map);
return status;
}
-efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg)
+efi_status_t efi_random_get_seed(void)
{
efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID;
efi_guid_t rng_algo_raw = EFI_RNG_ALGORITHM_RAW;
efi_guid_t rng_table_guid = LINUX_EFI_RANDOM_SEED_TABLE_GUID;
- struct efi_rng_protocol *rng;
- struct linux_efi_random_seed *seed;
+ efi_rng_protocol_t *rng = NULL;
+ struct linux_efi_random_seed *seed = NULL;
efi_status_t status;
- status = efi_call_early(locate_protocol, &rng_proto, NULL,
- (void **)&rng);
+ status = efi_bs_call(locate_protocol, &rng_proto, NULL, (void **)&rng);
if (status != EFI_SUCCESS)
return status;
- status = efi_call_early(allocate_pool, EFI_RUNTIME_SERVICES_DATA,
- sizeof(*seed) + EFI_RANDOM_SEED_SIZE,
- (void **)&seed);
+ status = efi_bs_call(allocate_pool, EFI_RUNTIME_SERVICES_DATA,
+ sizeof(*seed) + EFI_RANDOM_SEED_SIZE,
+ (void **)&seed);
if (status != EFI_SUCCESS)
return status;
- status = efi_call_proto(efi_rng_protocol, get_rng, rng, &rng_algo_raw,
+ status = efi_call_proto(rng, get_rng, &rng_algo_raw,
EFI_RANDOM_SEED_SIZE, seed->bits);
if (status == EFI_UNSUPPORTED)
@@ -185,21 +179,20 @@ efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg)
* Use whatever algorithm we have available if the raw algorithm
* is not implemented.
*/
- status = efi_call_proto(efi_rng_protocol, get_rng, rng, NULL,
- EFI_RANDOM_SEED_SIZE, seed->bits);
+ status = efi_call_proto(rng, get_rng, NULL,
+ EFI_RANDOM_SEED_SIZE, seed->bits);
if (status != EFI_SUCCESS)
goto err_freepool;
seed->size = EFI_RANDOM_SEED_SIZE;
- status = efi_call_early(install_configuration_table, &rng_table_guid,
- seed);
+ status = efi_bs_call(install_configuration_table, &rng_table_guid, seed);
if (status != EFI_SUCCESS)
goto err_freepool;
return EFI_SUCCESS;
err_freepool:
- efi_call_early(free_pool, seed);
+ efi_bs_call(free_pool, seed);
return status;
}
diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c
index edba5e7a3743..a765378ad18c 100644
--- a/drivers/firmware/efi/libstub/secureboot.c
+++ b/drivers/firmware/efi/libstub/secureboot.c
@@ -21,18 +21,13 @@ static const efi_char16_t efi_SetupMode_name[] = L"SetupMode";
static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID;
static const efi_char16_t shim_MokSBState_name[] = L"MokSBState";
-#define get_efi_var(name, vendor, ...) \
- efi_call_runtime(get_variable, \
- (efi_char16_t *)(name), (efi_guid_t *)(vendor), \
- __VA_ARGS__);
-
/*
* Determine whether we're in secure boot mode.
*
* Please keep the logic in sync with
* arch/x86/xen/efi.c:xen_efi_get_secureboot().
*/
-enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table_arg)
+enum efi_secureboot_mode efi_get_secureboot(void)
{
u32 attr;
u8 secboot, setupmode, moksbstate;
@@ -72,10 +67,10 @@ enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table_arg)
return efi_secureboot_mode_disabled;
secure_boot_enabled:
- pr_efi(sys_table_arg, "UEFI Secure Boot is enabled.\n");
+ pr_efi("UEFI Secure Boot is enabled.\n");
return efi_secureboot_mode_enabled;
out_efi_err:
- pr_efi_err(sys_table_arg, "Could not determine UEFI Secure Boot status.\n");
+ pr_efi_err("Could not determine UEFI Secure Boot status.\n");
return efi_secureboot_mode_unknown;
}
diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
index eb9af83e4d59..1d59e103a2e3 100644
--- a/drivers/firmware/efi/libstub/tpm.c
+++ b/drivers/firmware/efi/libstub/tpm.c
@@ -20,23 +20,13 @@ static const efi_char16_t efi_MemoryOverWriteRequest_name[] =
#define MEMORY_ONLY_RESET_CONTROL_GUID \
EFI_GUID(0xe20939be, 0x32d4, 0x41be, 0xa1, 0x50, 0x89, 0x7f, 0x85, 0xd4, 0x98, 0x29)
-#define get_efi_var(name, vendor, ...) \
- efi_call_runtime(get_variable, \
- (efi_char16_t *)(name), (efi_guid_t *)(vendor), \
- __VA_ARGS__)
-
-#define set_efi_var(name, vendor, ...) \
- efi_call_runtime(set_variable, \
- (efi_char16_t *)(name), (efi_guid_t *)(vendor), \
- __VA_ARGS__)
-
/*
* Enable reboot attack mitigation. This requests that the firmware clear the
* RAM on next reboot before proceeding with boot, ensuring that any secrets
* are cleared. If userland has ensured that all secrets have been removed
* from RAM before reboot it can simply reset this variable.
*/
-void efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg)
+void efi_enable_reset_attack_mitigation(void)
{
u8 val = 1;
efi_guid_t var_guid = MEMORY_ONLY_RESET_CONTROL_GUID;
@@ -57,7 +47,7 @@ void efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg)
#endif
-void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg)
+void efi_retrieve_tpm2_eventlog(void)
{
efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID;
@@ -69,23 +59,22 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg)
size_t log_size, last_entry_size;
efi_bool_t truncated;
int version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_2;
- void *tcg2_protocol = NULL;
+ efi_tcg2_protocol_t *tcg2_protocol = NULL;
int final_events_size = 0;
- status = efi_call_early(locate_protocol, &tcg2_guid, NULL,
- &tcg2_protocol);
+ status = efi_bs_call(locate_protocol, &tcg2_guid, NULL,
+ (void **)&tcg2_protocol);
if (status != EFI_SUCCESS)
return;
- status = efi_call_proto(efi_tcg2_protocol, get_event_log,
- tcg2_protocol, version, &log_location,
- &log_last_entry, &truncated);
+ status = efi_call_proto(tcg2_protocol, get_event_log, version,
+ &log_location, &log_last_entry, &truncated);
if (status != EFI_SUCCESS || !log_location) {
version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
- status = efi_call_proto(efi_tcg2_protocol, get_event_log,
- tcg2_protocol, version, &log_location,
- &log_last_entry, &truncated);
+ status = efi_call_proto(tcg2_protocol, get_event_log, version,
+ &log_location, &log_last_entry,
+ &truncated);
if (status != EFI_SUCCESS || !log_location)
return;
@@ -126,13 +115,11 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg)
}
/* Allocate space for the logs and copy them. */
- status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
- sizeof(*log_tbl) + log_size,
- (void **) &log_tbl);
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
+ sizeof(*log_tbl) + log_size, (void **)&log_tbl);
if (status != EFI_SUCCESS) {
- efi_printk(sys_table_arg,
- "Unable to allocate memory for event log\n");
+ efi_printk("Unable to allocate memory for event log\n");
return;
}
@@ -140,8 +127,7 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg)
* Figure out whether any events have already been logged to the
* final events structure, and if so how much space they take up
*/
- final_events_table = get_efi_config_table(sys_table_arg,
- LINUX_EFI_TPM_FINAL_LOG_GUID);
+ final_events_table = get_efi_config_table(LINUX_EFI_TPM_FINAL_LOG_GUID);
if (final_events_table && final_events_table->nr_events) {
struct tcg_pcr_event2_head *header;
int offset;
@@ -169,12 +155,12 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg)
log_tbl->version = version;
memcpy(log_tbl->log, (void *) first_entry_addr, log_size);
- status = efi_call_early(install_configuration_table,
- &linux_eventlog_guid, log_tbl);
+ status = efi_bs_call(install_configuration_table,
+ &linux_eventlog_guid, log_tbl);
if (status != EFI_SUCCESS)
goto err_free;
return;
err_free:
- efi_call_early(free_pool, log_tbl);
+ efi_bs_call(free_pool, log_tbl);
}
diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
index 38b686c67b17..2ff1883dc788 100644
--- a/drivers/firmware/efi/memmap.c
+++ b/drivers/firmware/efi/memmap.c
@@ -29,9 +29,32 @@ static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
return PFN_PHYS(page_to_pfn(p));
}
+void __init __efi_memmap_free(u64 phys, unsigned long size, unsigned long flags)
+{
+ if (flags & EFI_MEMMAP_MEMBLOCK) {
+ if (slab_is_available())
+ memblock_free_late(phys, size);
+ else
+ memblock_free(phys, size);
+ } else if (flags & EFI_MEMMAP_SLAB) {
+ struct page *p = pfn_to_page(PHYS_PFN(phys));
+ unsigned int order = get_order(size);
+
+ free_pages((unsigned long) page_address(p), order);
+ }
+}
+
+static void __init efi_memmap_free(void)
+{
+ __efi_memmap_free(efi.memmap.phys_map,
+ efi.memmap.desc_size * efi.memmap.nr_map,
+ efi.memmap.flags);
+}
+
/**
* efi_memmap_alloc - Allocate memory for the EFI memory map
* @num_entries: Number of entries in the allocated map.
+ * @data: efi memmap installation parameters
*
* Depending on whether mm_init() has already been invoked or not,
* either memblock or "normal" page allocation is used.
@@ -39,34 +62,47 @@ static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
* Returns the physical address of the allocated memory map on
* success, zero on failure.
*/
-phys_addr_t __init efi_memmap_alloc(unsigned int num_entries)
+int __init efi_memmap_alloc(unsigned int num_entries,
+ struct efi_memory_map_data *data)
{
- unsigned long size = num_entries * efi.memmap.desc_size;
-
- if (slab_is_available())
- return __efi_memmap_alloc_late(size);
+ /* Expect allocation parameters are zero initialized */
+ WARN_ON(data->phys_map || data->size);
+
+ data->size = num_entries * efi.memmap.desc_size;
+ data->desc_version = efi.memmap.desc_version;
+ data->desc_size = efi.memmap.desc_size;
+ data->flags &= ~(EFI_MEMMAP_SLAB | EFI_MEMMAP_MEMBLOCK);
+ data->flags |= efi.memmap.flags & EFI_MEMMAP_LATE;
+
+ if (slab_is_available()) {
+ data->flags |= EFI_MEMMAP_SLAB;
+ data->phys_map = __efi_memmap_alloc_late(data->size);
+ } else {
+ data->flags |= EFI_MEMMAP_MEMBLOCK;
+ data->phys_map = __efi_memmap_alloc_early(data->size);
+ }
- return __efi_memmap_alloc_early(size);
+ if (!data->phys_map)
+ return -ENOMEM;
+ return 0;
}
/**
* __efi_memmap_init - Common code for mapping the EFI memory map
* @data: EFI memory map data
- * @late: Use early or late mapping function?
*
* This function takes care of figuring out which function to use to
* map the EFI memory map in efi.memmap based on how far into the boot
* we are.
*
- * During bootup @late should be %false since we only have access to
- * the early_memremap*() functions as the vmalloc space isn't setup.
- * Once the kernel is fully booted we can fallback to the more robust
- * memremap*() API.
+ * During bootup EFI_MEMMAP_LATE in data->flags should be clear since we
+ * only have access to the early_memremap*() functions as the vmalloc
+ * space isn't setup. Once the kernel is fully booted we can fallback
+ * to the more robust memremap*() API.
*
* Returns zero on success, a negative error code on failure.
*/
-static int __init
-__efi_memmap_init(struct efi_memory_map_data *data, bool late)
+static int __init __efi_memmap_init(struct efi_memory_map_data *data)
{
struct efi_memory_map map;
phys_addr_t phys_map;
@@ -76,7 +112,7 @@ __efi_memmap_init(struct efi_memory_map_data *data, bool late)
phys_map = data->phys_map;
- if (late)
+ if (data->flags & EFI_MEMMAP_LATE)
map.map = memremap(phys_map, data->size, MEMREMAP_WB);
else
map.map = early_memremap(phys_map, data->size);
@@ -86,13 +122,16 @@ __efi_memmap_init(struct efi_memory_map_data *data, bool late)
return -ENOMEM;
}
+ /* NOP if data->flags & (EFI_MEMMAP_MEMBLOCK | EFI_MEMMAP_SLAB) == 0 */
+ efi_memmap_free();
+
map.phys_map = data->phys_map;
map.nr_map = data->size / data->desc_size;
map.map_end = map.map + data->size;
map.desc_version = data->desc_version;
map.desc_size = data->desc_size;
- map.late = late;
+ map.flags = data->flags;
set_bit(EFI_MEMMAP, &efi.flags);
@@ -111,9 +150,10 @@ __efi_memmap_init(struct efi_memory_map_data *data, bool late)
int __init efi_memmap_init_early(struct efi_memory_map_data *data)
{
/* Cannot go backwards */
- WARN_ON(efi.memmap.late);
+ WARN_ON(efi.memmap.flags & EFI_MEMMAP_LATE);
- return __efi_memmap_init(data, false);
+ data->flags = 0;
+ return __efi_memmap_init(data);
}
void __init efi_memmap_unmap(void)
@@ -121,7 +161,7 @@ void __init efi_memmap_unmap(void)
if (!efi_enabled(EFI_MEMMAP))
return;
- if (!efi.memmap.late) {
+ if (!(efi.memmap.flags & EFI_MEMMAP_LATE)) {
unsigned long size;
size = efi.memmap.desc_size * efi.memmap.nr_map;
@@ -162,13 +202,14 @@ int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size)
struct efi_memory_map_data data = {
.phys_map = addr,
.size = size,
+ .flags = EFI_MEMMAP_LATE,
};
/* Did we forget to unmap the early EFI memmap? */
WARN_ON(efi.memmap.map);
/* Were we already called? */
- WARN_ON(efi.memmap.late);
+ WARN_ON(efi.memmap.flags & EFI_MEMMAP_LATE);
/*
* It makes no sense to allow callers to register different
@@ -178,13 +219,12 @@ int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size)
data.desc_version = efi.memmap.desc_version;
data.desc_size = efi.memmap.desc_size;
- return __efi_memmap_init(&data, true);
+ return __efi_memmap_init(&data);
}
/**
* efi_memmap_install - Install a new EFI memory map in efi.memmap
- * @addr: Physical address of the memory map
- * @nr_map: Number of entries in the memory map
+ * @ctx: map allocation parameters (address, size, flags)
*
* Unlike efi_memmap_init_*(), this function does not allow the caller
* to switch from early to late mappings. It simply uses the existing
@@ -192,18 +232,11 @@ int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size)
*
* Returns zero on success, a negative error code on failure.
*/
-int __init efi_memmap_install(phys_addr_t addr, unsigned int nr_map)
+int __init efi_memmap_install(struct efi_memory_map_data *data)
{
- struct efi_memory_map_data data;
-
efi_memmap_unmap();
- data.phys_map = addr;
- data.size = efi.memmap.desc_size * nr_map;
- data.desc_version = efi.memmap.desc_version;
- data.desc_size = efi.memmap.desc_size;
-
- return __efi_memmap_init(&data, efi.memmap.late);
+ return __efi_memmap_init(data);
}
/**
diff --git a/drivers/firmware/efi/rci2-table.c b/drivers/firmware/efi/rci2-table.c
index 76b0c354a027..de1a9a1f9f14 100644
--- a/drivers/firmware/efi/rci2-table.c
+++ b/drivers/firmware/efi/rci2-table.c
@@ -81,6 +81,9 @@ static int __init efi_rci2_sysfs_init(void)
struct kobject *tables_kobj;
int ret = -ENOMEM;
+ if (rci2_table_phys == EFI_INVALID_TABLE_ADDR)
+ return 0;
+
rci2_base = memremap(rci2_table_phys,
sizeof(struct rci2_table_global_hdr),
MEMREMAP_WB);
diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c
index 8d132e4f008a..0205987a4fd4 100644
--- a/drivers/firmware/google/coreboot_table.c
+++ b/drivers/firmware/google/coreboot_table.c
@@ -163,8 +163,15 @@ static int coreboot_table_probe(struct platform_device *pdev)
return ret;
}
+static int __cb_dev_unregister(struct device *dev, void *dummy)
+{
+ device_unregister(dev);
+ return 0;
+}
+
static int coreboot_table_remove(struct platform_device *pdev)
{
+ bus_for_each_dev(&coreboot_bus_type, NULL, NULL, __cb_dev_unregister);
bus_unregister(&coreboot_bus_type);
return 0;
}
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index edaa4e5d84ad..5b2011ebbe26 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -76,6 +76,7 @@
#define GSMI_CMD_LOG_S0IX_RESUME 0x0b
#define GSMI_CMD_CLEAR_CONFIG 0x20
#define GSMI_CMD_HANDSHAKE_TYPE 0xC1
+#define GSMI_CMD_RESERVED 0xff
/* Magic entry type for kernel events */
#define GSMI_LOG_ENTRY_TYPE_KERNEL 0xDEAD
@@ -746,6 +747,7 @@ MODULE_DEVICE_TABLE(dmi, gsmi_dmi_table);
static __init int gsmi_system_valid(void)
{
u32 hash;
+ u16 cmd, result;
if (!dmi_check_system(gsmi_dmi_table))
return -ENODEV;
@@ -780,6 +782,23 @@ static __init int gsmi_system_valid(void)
return -ENODEV;
}
+ /* Test the smihandler with a bogus command. If it leaves the
+ * calling argument in %ax untouched, there is no handler for
+ * GSMI commands.
+ */
+ cmd = GSMI_CALLBACK | GSMI_CMD_RESERVED << 8;
+ asm volatile (
+ "outb %%al, %%dx\n\t"
+ : "=a" (result)
+ : "0" (cmd),
+ "d" (acpi_gbl_FADT.smi_command)
+ : "memory", "cc"
+ );
+ if (cmd == result) {
+ pr_info("gsmi: no gsmi handler in firmware\n");
+ return -ENODEV;
+ }
+
/* Found */
return 0;
}
@@ -1016,6 +1035,9 @@ out_err:
dma_pool_destroy(gsmi_dev.dma_pool);
platform_device_unregister(gsmi_dev.pdev);
pr_info("gsmi: failed to load: %d\n", ret);
+#ifdef CONFIG_PM
+ platform_driver_unregister(&gsmi_driver_info);
+#endif
return ret;
}
@@ -1037,6 +1059,9 @@ static void __exit gsmi_exit(void)
gsmi_buf_free(gsmi_dev.name_buf);
dma_pool_destroy(gsmi_dev.dma_pool);
platform_device_unregister(gsmi_dev.pdev);
+#ifdef CONFIG_PM
+ platform_driver_unregister(&gsmi_driver_info);
+#endif
}
module_init(gsmi_init);
diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig
index 0dbee32da4c6..1d2e5b85d7ca 100644
--- a/drivers/firmware/imx/Kconfig
+++ b/drivers/firmware/imx/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config IMX_DSP
- bool "IMX DSP Protocol driver"
+ tristate "IMX DSP Protocol driver"
depends on IMX_MBOX
help
This enables DSP IPC protocol between host AP (Linux)
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 7e12cbdf957c..96758b71a8db 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -104,6 +104,7 @@ struct ibft_control {
u16 tgt0_off;
u16 nic1_off;
u16 tgt1_off;
+ u16 expansion[0];
} __attribute__((__packed__));
struct ibft_initiator {
@@ -235,7 +236,7 @@ static int ibft_verify_hdr(char *t, struct ibft_hdr *hdr, int id, int length)
"found %d instead!\n", t, id, hdr->id);
return -ENODEV;
}
- if (hdr->length != length) {
+ if (length && hdr->length != length) {
printk(KERN_ERR "iBFT error: We expected the %s " \
"field header.length to have %d but " \
"found %d instead!\n", t, length, hdr->length);
@@ -749,16 +750,16 @@ static int __init ibft_register_kobjects(struct acpi_table_ibft *header)
control = (void *)header + sizeof(*header);
end = (void *)control + control->hdr.length;
eot_offset = (void *)header + header->header.length - (void *)control;
- rc = ibft_verify_hdr("control", (struct ibft_hdr *)control, id_control,
- sizeof(*control));
+ rc = ibft_verify_hdr("control", (struct ibft_hdr *)control, id_control, 0);
/* iBFT table safety checking */
rc |= ((control->hdr.index) ? -ENODEV : 0);
+ rc |= ((control->hdr.length < sizeof(*control)) ? -ENODEV : 0);
if (rc) {
printk(KERN_ERR "iBFT error: Control header is invalid!\n");
return rc;
}
- for (ptr = &control->initiator_off; ptr < end; ptr += sizeof(u16)) {
+ for (ptr = &control->initiator_off; ptr + sizeof(u16) <= end; ptr += sizeof(u16)) {
offset = *(u16 *)ptr;
if (offset && offset < header->header.length &&
offset < eot_offset) {
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
index b3b6c15e7b36..2937d44b5df4 100644
--- a/drivers/firmware/psci/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -97,7 +97,7 @@ static inline bool psci_has_ext_power_state(void)
PSCI_1_0_FEATURES_CPU_SUSPEND_PF_MASK;
}
-static inline bool psci_has_osi_support(void)
+bool psci_has_osi_support(void)
{
return psci_cpu_suspend_feature & PSCI_1_0_OS_INITIATED;
}
@@ -162,6 +162,15 @@ static u32 psci_get_version(void)
return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
}
+int psci_set_osi_mode(void)
+{
+ int err;
+
+ err = invoke_psci_fn(PSCI_1_0_FN_SET_SUSPEND_MODE,
+ PSCI_1_0_SUSPEND_MODE_OSI, 0, 0);
+ return psci_to_linux_errno(err);
+}
+
static int psci_cpu_suspend(u32 state, unsigned long entry_point)
{
int err;
@@ -544,9 +553,14 @@ static int __init psci_1_0_init(struct device_node *np)
if (err)
return err;
- if (psci_has_osi_support())
+ if (psci_has_osi_support()) {
pr_info("OSI mode supported.\n");
+ /* Default to PC mode. */
+ invoke_psci_fn(PSCI_1_0_FN_SET_SUSPEND_MODE,
+ PSCI_1_0_SUSPEND_MODE_PC, 0, 0);
+ }
+
return 0;
}
diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c
deleted file mode 100644
index 48e2ef794ea3..000000000000
--- a/drivers/firmware/qcom_scm-32.c
+++ /dev/null
@@ -1,671 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2010,2015, The Linux Foundation. All rights reserved.
- * Copyright (C) 2015 Linaro Ltd.
- */
-
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/qcom_scm.h>
-#include <linux/dma-mapping.h>
-
-#include "qcom_scm.h"
-
-#define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00
-#define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01
-#define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08
-#define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20
-
-#define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04
-#define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02
-#define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10
-#define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40
-
-struct qcom_scm_entry {
- int flag;
- void *entry;
-};
-
-static struct qcom_scm_entry qcom_scm_wb[] = {
- { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 },
- { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 },
- { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 },
- { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 },
-};
-
-static DEFINE_MUTEX(qcom_scm_lock);
-
-/**
- * struct qcom_scm_command - one SCM command buffer
- * @len: total available memory for command and response
- * @buf_offset: start of command buffer
- * @resp_hdr_offset: start of response buffer
- * @id: command to be executed
- * @buf: buffer returned from qcom_scm_get_command_buffer()
- *
- * An SCM command is laid out in memory as follows:
- *
- * ------------------- <--- struct qcom_scm_command
- * | command header |
- * ------------------- <--- qcom_scm_get_command_buffer()
- * | command buffer |
- * ------------------- <--- struct qcom_scm_response and
- * | response header | qcom_scm_command_to_response()
- * ------------------- <--- qcom_scm_get_response_buffer()
- * | response buffer |
- * -------------------
- *
- * There can be arbitrary padding between the headers and buffers so
- * you should always use the appropriate qcom_scm_get_*_buffer() routines
- * to access the buffers in a safe manner.
- */
-struct qcom_scm_command {
- __le32 len;
- __le32 buf_offset;
- __le32 resp_hdr_offset;
- __le32 id;
- __le32 buf[0];
-};
-
-/**
- * struct qcom_scm_response - one SCM response buffer
- * @len: total available memory for response
- * @buf_offset: start of response data relative to start of qcom_scm_response
- * @is_complete: indicates if the command has finished processing
- */
-struct qcom_scm_response {
- __le32 len;
- __le32 buf_offset;
- __le32 is_complete;
-};
-
-/**
- * qcom_scm_command_to_response() - Get a pointer to a qcom_scm_response
- * @cmd: command
- *
- * Returns a pointer to a response for a command.
- */
-static inline struct qcom_scm_response *qcom_scm_command_to_response(
- const struct qcom_scm_command *cmd)
-{
- return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset);
-}
-
-/**
- * qcom_scm_get_command_buffer() - Get a pointer to a command buffer
- * @cmd: command
- *
- * Returns a pointer to the command buffer of a command.
- */
-static inline void *qcom_scm_get_command_buffer(const struct qcom_scm_command *cmd)
-{
- return (void *)cmd->buf;
-}
-
-/**
- * qcom_scm_get_response_buffer() - Get a pointer to a response buffer
- * @rsp: response
- *
- * Returns a pointer to a response buffer of a response.
- */
-static inline void *qcom_scm_get_response_buffer(const struct qcom_scm_response *rsp)
-{
- return (void *)rsp + le32_to_cpu(rsp->buf_offset);
-}
-
-static u32 smc(u32 cmd_addr)
-{
- int context_id;
- register u32 r0 asm("r0") = 1;
- register u32 r1 asm("r1") = (u32)&context_id;
- register u32 r2 asm("r2") = cmd_addr;
- do {
- asm volatile(
- __asmeq("%0", "r0")
- __asmeq("%1", "r0")
- __asmeq("%2", "r1")
- __asmeq("%3", "r2")
-#ifdef REQUIRES_SEC
- ".arch_extension sec\n"
-#endif
- "smc #0 @ switch to secure world\n"
- : "=r" (r0)
- : "r" (r0), "r" (r1), "r" (r2)
- : "r3", "r12");
- } while (r0 == QCOM_SCM_INTERRUPTED);
-
- return r0;
-}
-
-/**
- * qcom_scm_call() - Send an SCM command
- * @dev: struct device
- * @svc_id: service identifier
- * @cmd_id: command identifier
- * @cmd_buf: command buffer
- * @cmd_len: length of the command buffer
- * @resp_buf: response buffer
- * @resp_len: length of the response buffer
- *
- * Sends a command to the SCM and waits for the command to finish processing.
- *
- * A note on cache maintenance:
- * Note that any buffers that are expected to be accessed by the secure world
- * must be flushed before invoking qcom_scm_call and invalidated in the cache
- * immediately after qcom_scm_call returns. Cache maintenance on the command
- * and response buffers is taken care of by qcom_scm_call; however, callers are
- * responsible for any other cached buffers passed over to the secure world.
- */
-static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
- const void *cmd_buf, size_t cmd_len, void *resp_buf,
- size_t resp_len)
-{
- int ret;
- struct qcom_scm_command *cmd;
- struct qcom_scm_response *rsp;
- size_t alloc_len = sizeof(*cmd) + cmd_len + sizeof(*rsp) + resp_len;
- dma_addr_t cmd_phys;
-
- cmd = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
-
- cmd->len = cpu_to_le32(alloc_len);
- cmd->buf_offset = cpu_to_le32(sizeof(*cmd));
- cmd->resp_hdr_offset = cpu_to_le32(sizeof(*cmd) + cmd_len);
-
- cmd->id = cpu_to_le32((svc_id << 10) | cmd_id);
- if (cmd_buf)
- memcpy(qcom_scm_get_command_buffer(cmd), cmd_buf, cmd_len);
-
- rsp = qcom_scm_command_to_response(cmd);
-
- cmd_phys = dma_map_single(dev, cmd, alloc_len, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, cmd_phys)) {
- kfree(cmd);
- return -ENOMEM;
- }
-
- mutex_lock(&qcom_scm_lock);
- ret = smc(cmd_phys);
- if (ret < 0)
- ret = qcom_scm_remap_error(ret);
- mutex_unlock(&qcom_scm_lock);
- if (ret)
- goto out;
-
- do {
- dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len,
- sizeof(*rsp), DMA_FROM_DEVICE);
- } while (!rsp->is_complete);
-
- if (resp_buf) {
- dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len +
- le32_to_cpu(rsp->buf_offset),
- resp_len, DMA_FROM_DEVICE);
- memcpy(resp_buf, qcom_scm_get_response_buffer(rsp),
- resp_len);
- }
-out:
- dma_unmap_single(dev, cmd_phys, alloc_len, DMA_TO_DEVICE);
- kfree(cmd);
- return ret;
-}
-
-#define SCM_CLASS_REGISTER (0x2 << 8)
-#define SCM_MASK_IRQS BIT(5)
-#define SCM_ATOMIC(svc, cmd, n) (((((svc) << 10)|((cmd) & 0x3ff)) << 12) | \
- SCM_CLASS_REGISTER | \
- SCM_MASK_IRQS | \
- (n & 0xf))
-
-/**
- * qcom_scm_call_atomic1() - Send an atomic SCM command with one argument
- * @svc_id: service identifier
- * @cmd_id: command identifier
- * @arg1: first argument
- *
- * This shall only be used with commands that are guaranteed to be
- * uninterruptable, atomic and SMP safe.
- */
-static s32 qcom_scm_call_atomic1(u32 svc, u32 cmd, u32 arg1)
-{
- int context_id;
-
- register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 1);
- register u32 r1 asm("r1") = (u32)&context_id;
- register u32 r2 asm("r2") = arg1;
-
- asm volatile(
- __asmeq("%0", "r0")
- __asmeq("%1", "r0")
- __asmeq("%2", "r1")
- __asmeq("%3", "r2")
-#ifdef REQUIRES_SEC
- ".arch_extension sec\n"
-#endif
- "smc #0 @ switch to secure world\n"
- : "=r" (r0)
- : "r" (r0), "r" (r1), "r" (r2)
- : "r3", "r12");
- return r0;
-}
-
-/**
- * qcom_scm_call_atomic2() - Send an atomic SCM command with two arguments
- * @svc_id: service identifier
- * @cmd_id: command identifier
- * @arg1: first argument
- * @arg2: second argument
- *
- * This shall only be used with commands that are guaranteed to be
- * uninterruptable, atomic and SMP safe.
- */
-static s32 qcom_scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2)
-{
- int context_id;
-
- register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 2);
- register u32 r1 asm("r1") = (u32)&context_id;
- register u32 r2 asm("r2") = arg1;
- register u32 r3 asm("r3") = arg2;
-
- asm volatile(
- __asmeq("%0", "r0")
- __asmeq("%1", "r0")
- __asmeq("%2", "r1")
- __asmeq("%3", "r2")
- __asmeq("%4", "r3")
-#ifdef REQUIRES_SEC
- ".arch_extension sec\n"
-#endif
- "smc #0 @ switch to secure world\n"
- : "=r" (r0)
- : "r" (r0), "r" (r1), "r" (r2), "r" (r3)
- : "r12");
- return r0;
-}
-
-u32 qcom_scm_get_version(void)
-{
- int context_id;
- static u32 version = -1;
- register u32 r0 asm("r0");
- register u32 r1 asm("r1");
-
- if (version != -1)
- return version;
-
- mutex_lock(&qcom_scm_lock);
-
- r0 = 0x1 << 8;
- r1 = (u32)&context_id;
- do {
- asm volatile(
- __asmeq("%0", "r0")
- __asmeq("%1", "r1")
- __asmeq("%2", "r0")
- __asmeq("%3", "r1")
-#ifdef REQUIRES_SEC
- ".arch_extension sec\n"
-#endif
- "smc #0 @ switch to secure world\n"
- : "=r" (r0), "=r" (r1)
- : "r" (r0), "r" (r1)
- : "r2", "r3", "r12");
- } while (r0 == QCOM_SCM_INTERRUPTED);
-
- version = r1;
- mutex_unlock(&qcom_scm_lock);
-
- return version;
-}
-EXPORT_SYMBOL(qcom_scm_get_version);
-
-/**
- * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
- * @entry: Entry point function for the cpus
- * @cpus: The cpumask of cpus that will use the entry point
- *
- * Set the cold boot address of the cpus. Any cpu outside the supported
- * range would be removed from the cpu present mask.
- */
-int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
-{
- int flags = 0;
- int cpu;
- int scm_cb_flags[] = {
- QCOM_SCM_FLAG_COLDBOOT_CPU0,
- QCOM_SCM_FLAG_COLDBOOT_CPU1,
- QCOM_SCM_FLAG_COLDBOOT_CPU2,
- QCOM_SCM_FLAG_COLDBOOT_CPU3,
- };
-
- if (!cpus || (cpus && cpumask_empty(cpus)))
- return -EINVAL;
-
- for_each_cpu(cpu, cpus) {
- if (cpu < ARRAY_SIZE(scm_cb_flags))
- flags |= scm_cb_flags[cpu];
- else
- set_cpu_present(cpu, false);
- }
-
- return qcom_scm_call_atomic2(QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR,
- flags, virt_to_phys(entry));
-}
-
-/**
- * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
- * @entry: Entry point function for the cpus
- * @cpus: The cpumask of cpus that will use the entry point
- *
- * Set the Linux entry point for the SCM to transfer control to when coming
- * out of a power down. CPU power down may be executed on cpuidle or hotplug.
- */
-int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry,
- const cpumask_t *cpus)
-{
- int ret;
- int flags = 0;
- int cpu;
- struct {
- __le32 flags;
- __le32 addr;
- } cmd;
-
- /*
- * Reassign only if we are switching from hotplug entry point
- * to cpuidle entry point or vice versa.
- */
- for_each_cpu(cpu, cpus) {
- if (entry == qcom_scm_wb[cpu].entry)
- continue;
- flags |= qcom_scm_wb[cpu].flag;
- }
-
- /* No change in entry function */
- if (!flags)
- return 0;
-
- cmd.addr = cpu_to_le32(virt_to_phys(entry));
- cmd.flags = cpu_to_le32(flags);
- ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR,
- &cmd, sizeof(cmd), NULL, 0);
- if (!ret) {
- for_each_cpu(cpu, cpus)
- qcom_scm_wb[cpu].entry = entry;
- }
-
- return ret;
-}
-
-/**
- * qcom_scm_cpu_power_down() - Power down the cpu
- * @flags - Flags to flush cache
- *
- * This is an end point to power down cpu. If there was a pending interrupt,
- * the control would return from this function, otherwise, the cpu jumps to the
- * warm boot entry point set for this cpu upon reset.
- */
-void __qcom_scm_cpu_power_down(u32 flags)
-{
- qcom_scm_call_atomic1(QCOM_SCM_SVC_BOOT, QCOM_SCM_CMD_TERMINATE_PC,
- flags & QCOM_SCM_FLUSH_FLAG_MASK);
-}
-
-int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, u32 cmd_id)
-{
- int ret;
- __le32 svc_cmd = cpu_to_le32((svc_id << 10) | cmd_id);
- __le32 ret_val = 0;
-
- ret = qcom_scm_call(dev, QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD,
- &svc_cmd, sizeof(svc_cmd), &ret_val,
- sizeof(ret_val));
- if (ret)
- return ret;
-
- return le32_to_cpu(ret_val);
-}
-
-int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req,
- u32 req_cnt, u32 *resp)
-{
- if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
- return -ERANGE;
-
- return qcom_scm_call(dev, QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP,
- req, req_cnt * sizeof(*req), resp, sizeof(*resp));
-}
-
-int __qcom_scm_ocmem_lock(struct device *dev, u32 id, u32 offset, u32 size,
- u32 mode)
-{
- struct ocmem_tz_lock {
- __le32 id;
- __le32 offset;
- __le32 size;
- __le32 mode;
- } request;
-
- request.id = cpu_to_le32(id);
- request.offset = cpu_to_le32(offset);
- request.size = cpu_to_le32(size);
- request.mode = cpu_to_le32(mode);
-
- return qcom_scm_call(dev, QCOM_SCM_OCMEM_SVC, QCOM_SCM_OCMEM_LOCK_CMD,
- &request, sizeof(request), NULL, 0);
-}
-
-int __qcom_scm_ocmem_unlock(struct device *dev, u32 id, u32 offset, u32 size)
-{
- struct ocmem_tz_unlock {
- __le32 id;
- __le32 offset;
- __le32 size;
- } request;
-
- request.id = cpu_to_le32(id);
- request.offset = cpu_to_le32(offset);
- request.size = cpu_to_le32(size);
-
- return qcom_scm_call(dev, QCOM_SCM_OCMEM_SVC, QCOM_SCM_OCMEM_UNLOCK_CMD,
- &request, sizeof(request), NULL, 0);
-}
-
-void __qcom_scm_init(void)
-{
-}
-
-bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral)
-{
- __le32 out;
- __le32 in;
- int ret;
-
- in = cpu_to_le32(peripheral);
- ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
- QCOM_SCM_PAS_IS_SUPPORTED_CMD,
- &in, sizeof(in),
- &out, sizeof(out));
-
- return ret ? false : !!out;
-}
-
-int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral,
- dma_addr_t metadata_phys)
-{
- __le32 scm_ret;
- int ret;
- struct {
- __le32 proc;
- __le32 image_addr;
- } request;
-
- request.proc = cpu_to_le32(peripheral);
- request.image_addr = cpu_to_le32(metadata_phys);
-
- ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
- QCOM_SCM_PAS_INIT_IMAGE_CMD,
- &request, sizeof(request),
- &scm_ret, sizeof(scm_ret));
-
- return ret ? : le32_to_cpu(scm_ret);
-}
-
-int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral,
- phys_addr_t addr, phys_addr_t size)
-{
- __le32 scm_ret;
- int ret;
- struct {
- __le32 proc;
- __le32 addr;
- __le32 len;
- } request;
-
- request.proc = cpu_to_le32(peripheral);
- request.addr = cpu_to_le32(addr);
- request.len = cpu_to_le32(size);
-
- ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
- QCOM_SCM_PAS_MEM_SETUP_CMD,
- &request, sizeof(request),
- &scm_ret, sizeof(scm_ret));
-
- return ret ? : le32_to_cpu(scm_ret);
-}
-
-int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral)
-{
- __le32 out;
- __le32 in;
- int ret;
-
- in = cpu_to_le32(peripheral);
- ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
- QCOM_SCM_PAS_AUTH_AND_RESET_CMD,
- &in, sizeof(in),
- &out, sizeof(out));
-
- return ret ? : le32_to_cpu(out);
-}
-
-int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral)
-{
- __le32 out;
- __le32 in;
- int ret;
-
- in = cpu_to_le32(peripheral);
- ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
- QCOM_SCM_PAS_SHUTDOWN_CMD,
- &in, sizeof(in),
- &out, sizeof(out));
-
- return ret ? : le32_to_cpu(out);
-}
-
-int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
-{
- __le32 out;
- __le32 in = cpu_to_le32(reset);
- int ret;
-
- ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MSS_RESET,
- &in, sizeof(in),
- &out, sizeof(out));
-
- return ret ? : le32_to_cpu(out);
-}
-
-int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
-{
- return qcom_scm_call_atomic2(QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_DLOAD_MODE,
- enable ? QCOM_SCM_SET_DLOAD_MODE : 0, 0);
-}
-
-int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id)
-{
- struct {
- __le32 state;
- __le32 id;
- } req;
- __le32 scm_ret = 0;
- int ret;
-
- req.state = cpu_to_le32(state);
- req.id = cpu_to_le32(id);
-
- ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_REMOTE_STATE,
- &req, sizeof(req), &scm_ret, sizeof(scm_ret));
-
- return ret ? : le32_to_cpu(scm_ret);
-}
-
-int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
- size_t mem_sz, phys_addr_t src, size_t src_sz,
- phys_addr_t dest, size_t dest_sz)
-{
- return -ENODEV;
-}
-
-int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id,
- u32 spare)
-{
- struct msm_scm_sec_cfg {
- __le32 id;
- __le32 ctx_bank_num;
- } cfg;
- int ret, scm_ret = 0;
-
- cfg.id = cpu_to_le32(device_id);
- cfg.ctx_bank_num = cpu_to_le32(spare);
-
- ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP, QCOM_SCM_RESTORE_SEC_CFG,
- &cfg, sizeof(cfg), &scm_ret, sizeof(scm_ret));
-
- if (ret || scm_ret)
- return ret ? ret : -EINVAL;
-
- return 0;
-}
-
-int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
- size_t *size)
-{
- return -ENODEV;
-}
-
-int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size,
- u32 spare)
-{
- return -ENODEV;
-}
-
-int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr,
- unsigned int *val)
-{
- int ret;
-
- ret = qcom_scm_call_atomic1(QCOM_SCM_SVC_IO, QCOM_SCM_IO_READ, addr);
- if (ret >= 0)
- *val = ret;
-
- return ret < 0 ? ret : 0;
-}
-
-int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val)
-{
- return qcom_scm_call_atomic2(QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
- addr, val);
-}
-
-int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev, bool enable)
-{
- return -ENODEV;
-}
diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c
deleted file mode 100644
index 3c5850350974..000000000000
--- a/drivers/firmware/qcom_scm-64.c
+++ /dev/null
@@ -1,579 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/io.h>
-#include <linux/errno.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/qcom_scm.h>
-#include <linux/arm-smccc.h>
-#include <linux/dma-mapping.h>
-
-#include "qcom_scm.h"
-
-#define QCOM_SCM_FNID(s, c) ((((s) & 0xFF) << 8) | ((c) & 0xFF))
-
-#define MAX_QCOM_SCM_ARGS 10
-#define MAX_QCOM_SCM_RETS 3
-
-enum qcom_scm_arg_types {
- QCOM_SCM_VAL,
- QCOM_SCM_RO,
- QCOM_SCM_RW,
- QCOM_SCM_BUFVAL,
-};
-
-#define QCOM_SCM_ARGS_IMPL(num, a, b, c, d, e, f, g, h, i, j, ...) (\
- (((a) & 0x3) << 4) | \
- (((b) & 0x3) << 6) | \
- (((c) & 0x3) << 8) | \
- (((d) & 0x3) << 10) | \
- (((e) & 0x3) << 12) | \
- (((f) & 0x3) << 14) | \
- (((g) & 0x3) << 16) | \
- (((h) & 0x3) << 18) | \
- (((i) & 0x3) << 20) | \
- (((j) & 0x3) << 22) | \
- ((num) & 0xf))
-
-#define QCOM_SCM_ARGS(...) QCOM_SCM_ARGS_IMPL(__VA_ARGS__, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
-
-/**
- * struct qcom_scm_desc
- * @arginfo: Metadata describing the arguments in args[]
- * @args: The array of arguments for the secure syscall
- * @res: The values returned by the secure syscall
- */
-struct qcom_scm_desc {
- u32 arginfo;
- u64 args[MAX_QCOM_SCM_ARGS];
-};
-
-static u64 qcom_smccc_convention = -1;
-static DEFINE_MUTEX(qcom_scm_lock);
-
-#define QCOM_SCM_EBUSY_WAIT_MS 30
-#define QCOM_SCM_EBUSY_MAX_RETRY 20
-
-#define N_EXT_QCOM_SCM_ARGS 7
-#define FIRST_EXT_ARG_IDX 3
-#define N_REGISTER_ARGS (MAX_QCOM_SCM_ARGS - N_EXT_QCOM_SCM_ARGS + 1)
-
-static void __qcom_scm_call_do(const struct qcom_scm_desc *desc,
- struct arm_smccc_res *res, u32 fn_id,
- u64 x5, u32 type)
-{
- u64 cmd;
- struct arm_smccc_quirk quirk = { .id = ARM_SMCCC_QUIRK_QCOM_A6 };
-
- cmd = ARM_SMCCC_CALL_VAL(type, qcom_smccc_convention,
- ARM_SMCCC_OWNER_SIP, fn_id);
-
- quirk.state.a6 = 0;
-
- do {
- arm_smccc_smc_quirk(cmd, desc->arginfo, desc->args[0],
- desc->args[1], desc->args[2], x5,
- quirk.state.a6, 0, res, &quirk);
-
- if (res->a0 == QCOM_SCM_INTERRUPTED)
- cmd = res->a0;
-
- } while (res->a0 == QCOM_SCM_INTERRUPTED);
-}
-
-static void qcom_scm_call_do(const struct qcom_scm_desc *desc,
- struct arm_smccc_res *res, u32 fn_id,
- u64 x5, bool atomic)
-{
- int retry_count = 0;
-
- if (atomic) {
- __qcom_scm_call_do(desc, res, fn_id, x5, ARM_SMCCC_FAST_CALL);
- return;
- }
-
- do {
- mutex_lock(&qcom_scm_lock);
-
- __qcom_scm_call_do(desc, res, fn_id, x5,
- ARM_SMCCC_STD_CALL);
-
- mutex_unlock(&qcom_scm_lock);
-
- if (res->a0 == QCOM_SCM_V2_EBUSY) {
- if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
- break;
- msleep(QCOM_SCM_EBUSY_WAIT_MS);
- }
- } while (res->a0 == QCOM_SCM_V2_EBUSY);
-}
-
-static int ___qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
- const struct qcom_scm_desc *desc,
- struct arm_smccc_res *res, bool atomic)
-{
- int arglen = desc->arginfo & 0xf;
- int i;
- u32 fn_id = QCOM_SCM_FNID(svc_id, cmd_id);
- u64 x5 = desc->args[FIRST_EXT_ARG_IDX];
- dma_addr_t args_phys = 0;
- void *args_virt = NULL;
- size_t alloc_len;
- gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
-
- if (unlikely(arglen > N_REGISTER_ARGS)) {
- alloc_len = N_EXT_QCOM_SCM_ARGS * sizeof(u64);
- args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag);
-
- if (!args_virt)
- return -ENOMEM;
-
- if (qcom_smccc_convention == ARM_SMCCC_SMC_32) {
- __le32 *args = args_virt;
-
- for (i = 0; i < N_EXT_QCOM_SCM_ARGS; i++)
- args[i] = cpu_to_le32(desc->args[i +
- FIRST_EXT_ARG_IDX]);
- } else {
- __le64 *args = args_virt;
-
- for (i = 0; i < N_EXT_QCOM_SCM_ARGS; i++)
- args[i] = cpu_to_le64(desc->args[i +
- FIRST_EXT_ARG_IDX]);
- }
-
- args_phys = dma_map_single(dev, args_virt, alloc_len,
- DMA_TO_DEVICE);
-
- if (dma_mapping_error(dev, args_phys)) {
- kfree(args_virt);
- return -ENOMEM;
- }
-
- x5 = args_phys;
- }
-
- qcom_scm_call_do(desc, res, fn_id, x5, atomic);
-
- if (args_virt) {
- dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE);
- kfree(args_virt);
- }
-
- if ((long)res->a0 < 0)
- return qcom_scm_remap_error(res->a0);
-
- return 0;
-}
-
-/**
- * qcom_scm_call() - Invoke a syscall in the secure world
- * @dev: device
- * @svc_id: service identifier
- * @cmd_id: command identifier
- * @desc: Descriptor structure containing arguments and return values
- *
- * Sends a command to the SCM and waits for the command to finish processing.
- * This should *only* be called in pre-emptible context.
- */
-static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
- const struct qcom_scm_desc *desc,
- struct arm_smccc_res *res)
-{
- might_sleep();
- return ___qcom_scm_call(dev, svc_id, cmd_id, desc, res, false);
-}
-
-/**
- * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
- * @dev: device
- * @svc_id: service identifier
- * @cmd_id: command identifier
- * @desc: Descriptor structure containing arguments and return values
- * @res: Structure containing results from SMC/HVC call
- *
- * Sends a command to the SCM and waits for the command to finish processing.
- * This can be called in atomic context.
- */
-static int qcom_scm_call_atomic(struct device *dev, u32 svc_id, u32 cmd_id,
- const struct qcom_scm_desc *desc,
- struct arm_smccc_res *res)
-{
- return ___qcom_scm_call(dev, svc_id, cmd_id, desc, res, true);
-}
-
-/**
- * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
- * @entry: Entry point function for the cpus
- * @cpus: The cpumask of cpus that will use the entry point
- *
- * Set the cold boot address of the cpus. Any cpu outside the supported
- * range would be removed from the cpu present mask.
- */
-int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
-{
- return -ENOTSUPP;
-}
-
-/**
- * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
- * @dev: Device pointer
- * @entry: Entry point function for the cpus
- * @cpus: The cpumask of cpus that will use the entry point
- *
- * Set the Linux entry point for the SCM to transfer control to when coming
- * out of a power down. CPU power down may be executed on cpuidle or hotplug.
- */
-int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry,
- const cpumask_t *cpus)
-{
- return -ENOTSUPP;
-}
-
-/**
- * qcom_scm_cpu_power_down() - Power down the cpu
- * @flags - Flags to flush cache
- *
- * This is an end point to power down cpu. If there was a pending interrupt,
- * the control would return from this function, otherwise, the cpu jumps to the
- * warm boot entry point set for this cpu upon reset.
- */
-void __qcom_scm_cpu_power_down(u32 flags)
-{
-}
-
-int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, u32 cmd_id)
-{
- int ret;
- struct qcom_scm_desc desc = {0};
- struct arm_smccc_res res;
-
- desc.arginfo = QCOM_SCM_ARGS(1);
- desc.args[0] = QCOM_SCM_FNID(svc_id, cmd_id) |
- (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
-
- ret = qcom_scm_call(dev, QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD,
- &desc, &res);
-
- return ret ? : res.a1;
-}
-
-int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req,
- u32 req_cnt, u32 *resp)
-{
- int ret;
- struct qcom_scm_desc desc = {0};
- struct arm_smccc_res res;
-
- if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
- return -ERANGE;
-
- desc.args[0] = req[0].addr;
- desc.args[1] = req[0].val;
- desc.args[2] = req[1].addr;
- desc.args[3] = req[1].val;
- desc.args[4] = req[2].addr;
- desc.args[5] = req[2].val;
- desc.args[6] = req[3].addr;
- desc.args[7] = req[3].val;
- desc.args[8] = req[4].addr;
- desc.args[9] = req[4].val;
- desc.arginfo = QCOM_SCM_ARGS(10);
-
- ret = qcom_scm_call(dev, QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP, &desc,
- &res);
- *resp = res.a1;
-
- return ret;
-}
-
-int __qcom_scm_ocmem_lock(struct device *dev, uint32_t id, uint32_t offset,
- uint32_t size, uint32_t mode)
-{
- return -ENOTSUPP;
-}
-
-int __qcom_scm_ocmem_unlock(struct device *dev, uint32_t id, uint32_t offset,
- uint32_t size)
-{
- return -ENOTSUPP;
-}
-
-void __qcom_scm_init(void)
-{
- u64 cmd;
- struct arm_smccc_res res;
- u32 function = QCOM_SCM_FNID(QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD);
-
- /* First try a SMC64 call */
- cmd = ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64,
- ARM_SMCCC_OWNER_SIP, function);
-
- arm_smccc_smc(cmd, QCOM_SCM_ARGS(1), cmd & (~BIT(ARM_SMCCC_TYPE_SHIFT)),
- 0, 0, 0, 0, 0, &res);
-
- if (!res.a0 && res.a1)
- qcom_smccc_convention = ARM_SMCCC_SMC_64;
- else
- qcom_smccc_convention = ARM_SMCCC_SMC_32;
-}
-
-bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral)
-{
- int ret;
- struct qcom_scm_desc desc = {0};
- struct arm_smccc_res res;
-
- desc.args[0] = peripheral;
- desc.arginfo = QCOM_SCM_ARGS(1);
-
- ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
- QCOM_SCM_PAS_IS_SUPPORTED_CMD,
- &desc, &res);
-
- return ret ? false : !!res.a1;
-}
-
-int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral,
- dma_addr_t metadata_phys)
-{
- int ret;
- struct qcom_scm_desc desc = {0};
- struct arm_smccc_res res;
-
- desc.args[0] = peripheral;
- desc.args[1] = metadata_phys;
- desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW);
-
- ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_INIT_IMAGE_CMD,
- &desc, &res);
-
- return ret ? : res.a1;
-}
-
-int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral,
- phys_addr_t addr, phys_addr_t size)
-{
- int ret;
- struct qcom_scm_desc desc = {0};
- struct arm_smccc_res res;
-
- desc.args[0] = peripheral;
- desc.args[1] = addr;
- desc.args[2] = size;
- desc.arginfo = QCOM_SCM_ARGS(3);
-
- ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MEM_SETUP_CMD,
- &desc, &res);
-
- return ret ? : res.a1;
-}
-
-int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral)
-{
- int ret;
- struct qcom_scm_desc desc = {0};
- struct arm_smccc_res res;
-
- desc.args[0] = peripheral;
- desc.arginfo = QCOM_SCM_ARGS(1);
-
- ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
- QCOM_SCM_PAS_AUTH_AND_RESET_CMD,
- &desc, &res);
-
- return ret ? : res.a1;
-}
-
-int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral)
-{
- int ret;
- struct qcom_scm_desc desc = {0};
- struct arm_smccc_res res;
-
- desc.args[0] = peripheral;
- desc.arginfo = QCOM_SCM_ARGS(1);
-
- ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_SHUTDOWN_CMD,
- &desc, &res);
-
- return ret ? : res.a1;
-}
-
-int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
-{
- struct qcom_scm_desc desc = {0};
- struct arm_smccc_res res;
- int ret;
-
- desc.args[0] = reset;
- desc.args[1] = 0;
- desc.arginfo = QCOM_SCM_ARGS(2);
-
- ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MSS_RESET, &desc,
- &res);
-
- return ret ? : res.a1;
-}
-
-int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id)
-{
- struct qcom_scm_desc desc = {0};
- struct arm_smccc_res res;
- int ret;
-
- desc.args[0] = state;
- desc.args[1] = id;
- desc.arginfo = QCOM_SCM_ARGS(2);
-
- ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_REMOTE_STATE,
- &desc, &res);
-
- return ret ? : res.a1;
-}
-
-int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
- size_t mem_sz, phys_addr_t src, size_t src_sz,
- phys_addr_t dest, size_t dest_sz)
-{
- int ret;
- struct qcom_scm_desc desc = {0};
- struct arm_smccc_res res;
-
- desc.args[0] = mem_region;
- desc.args[1] = mem_sz;
- desc.args[2] = src;
- desc.args[3] = src_sz;
- desc.args[4] = dest;
- desc.args[5] = dest_sz;
- desc.args[6] = 0;
-
- desc.arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
- QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
- QCOM_SCM_VAL, QCOM_SCM_VAL);
-
- ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP,
- QCOM_MEM_PROT_ASSIGN_ID,
- &desc, &res);
-
- return ret ? : res.a1;
-}
-
-int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id, u32 spare)
-{
- struct qcom_scm_desc desc = {0};
- struct arm_smccc_res res;
- int ret;
-
- desc.args[0] = device_id;
- desc.args[1] = spare;
- desc.arginfo = QCOM_SCM_ARGS(2);
-
- ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP, QCOM_SCM_RESTORE_SEC_CFG,
- &desc, &res);
-
- return ret ? : res.a1;
-}
-
-int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
- size_t *size)
-{
- struct qcom_scm_desc desc = {0};
- struct arm_smccc_res res;
- int ret;
-
- desc.args[0] = spare;
- desc.arginfo = QCOM_SCM_ARGS(1);
-
- ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP,
- QCOM_SCM_IOMMU_SECURE_PTBL_SIZE, &desc, &res);
-
- if (size)
- *size = res.a1;
-
- return ret ? : res.a2;
-}
-
-int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size,
- u32 spare)
-{
- struct qcom_scm_desc desc = {0};
- struct arm_smccc_res res;
- int ret;
-
- desc.args[0] = addr;
- desc.args[1] = size;
- desc.args[2] = spare;
- desc.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
- QCOM_SCM_VAL);
-
- ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP,
- QCOM_SCM_IOMMU_SECURE_PTBL_INIT, &desc, &res);
-
- /* the pg table has been initialized already, ignore the error */
- if (ret == -EPERM)
- ret = 0;
-
- return ret;
-}
-
-int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
-{
- struct qcom_scm_desc desc = {0};
- struct arm_smccc_res res;
-
- desc.args[0] = QCOM_SCM_SET_DLOAD_MODE;
- desc.args[1] = enable ? QCOM_SCM_SET_DLOAD_MODE : 0;
- desc.arginfo = QCOM_SCM_ARGS(2);
-
- return qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_DLOAD_MODE,
- &desc, &res);
-}
-
-int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr,
- unsigned int *val)
-{
- struct qcom_scm_desc desc = {0};
- struct arm_smccc_res res;
- int ret;
-
- desc.args[0] = addr;
- desc.arginfo = QCOM_SCM_ARGS(1);
-
- ret = qcom_scm_call(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_READ,
- &desc, &res);
- if (ret >= 0)
- *val = res.a1;
-
- return ret < 0 ? ret : 0;
-}
-
-int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val)
-{
- struct qcom_scm_desc desc = {0};
- struct arm_smccc_res res;
-
- desc.args[0] = addr;
- desc.args[1] = val;
- desc.arginfo = QCOM_SCM_ARGS(2);
-
- return qcom_scm_call(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
- &desc, &res);
-}
-
-int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev, bool en)
-{
- struct qcom_scm_desc desc = {0};
- struct arm_smccc_res res;
-
- desc.args[0] = QCOM_SCM_CONFIG_ERRATA1_CLIENT_ALL;
- desc.args[1] = en;
- desc.arginfo = QCOM_SCM_ARGS(2);
-
- return qcom_scm_call_atomic(dev, QCOM_SCM_SVC_SMMU_PROGRAM,
- QCOM_SCM_CONFIG_ERRATA1, &desc, &res);
-}
diff --git a/drivers/firmware/qcom_scm-legacy.c b/drivers/firmware/qcom_scm-legacy.c
new file mode 100644
index 000000000000..8532e7c78ef7
--- /dev/null
+++ b/drivers/firmware/qcom_scm-legacy.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015 Linaro Ltd.
+ */
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/qcom_scm.h>
+#include <linux/arm-smccc.h>
+#include <linux/dma-mapping.h>
+
+#include "qcom_scm.h"
+
+static DEFINE_MUTEX(qcom_scm_lock);
+
+
+/**
+ * struct arm_smccc_args
+ * @args: The array of values used in registers in smc instruction
+ */
+struct arm_smccc_args {
+ unsigned long args[8];
+};
+
+
+/**
+ * struct scm_legacy_command - one SCM command buffer
+ * @len: total available memory for command and response
+ * @buf_offset: start of command buffer
+ * @resp_hdr_offset: start of response buffer
+ * @id: command to be executed
+ * @buf: buffer returned from scm_legacy_get_command_buffer()
+ *
+ * An SCM command is laid out in memory as follows:
+ *
+ * ------------------- <--- struct scm_legacy_command
+ * | command header |
+ * ------------------- <--- scm_legacy_get_command_buffer()
+ * | command buffer |
+ * ------------------- <--- struct scm_legacy_response and
+ * | response header | scm_legacy_command_to_response()
+ * ------------------- <--- scm_legacy_get_response_buffer()
+ * | response buffer |
+ * -------------------
+ *
+ * There can be arbitrary padding between the headers and buffers so
+ * you should always use the appropriate scm_legacy_get_*_buffer() routines
+ * to access the buffers in a safe manner.
+ */
+struct scm_legacy_command {
+ __le32 len;
+ __le32 buf_offset;
+ __le32 resp_hdr_offset;
+ __le32 id;
+ __le32 buf[0];
+};
+
+/**
+ * struct scm_legacy_response - one SCM response buffer
+ * @len: total available memory for response
+ * @buf_offset: start of response data relative to start of scm_legacy_response
+ * @is_complete: indicates if the command has finished processing
+ */
+struct scm_legacy_response {
+ __le32 len;
+ __le32 buf_offset;
+ __le32 is_complete;
+};
+
+/**
+ * scm_legacy_command_to_response() - Get a pointer to a scm_legacy_response
+ * @cmd: command
+ *
+ * Returns a pointer to a response for a command.
+ */
+static inline struct scm_legacy_response *scm_legacy_command_to_response(
+ const struct scm_legacy_command *cmd)
+{
+ return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset);
+}
+
+/**
+ * scm_legacy_get_command_buffer() - Get a pointer to a command buffer
+ * @cmd: command
+ *
+ * Returns a pointer to the command buffer of a command.
+ */
+static inline void *scm_legacy_get_command_buffer(
+ const struct scm_legacy_command *cmd)
+{
+ return (void *)cmd->buf;
+}
+
+/**
+ * scm_legacy_get_response_buffer() - Get a pointer to a response buffer
+ * @rsp: response
+ *
+ * Returns a pointer to a response buffer of a response.
+ */
+static inline void *scm_legacy_get_response_buffer(
+ const struct scm_legacy_response *rsp)
+{
+ return (void *)rsp + le32_to_cpu(rsp->buf_offset);
+}
+
+static void __scm_legacy_do(const struct arm_smccc_args *smc,
+ struct arm_smccc_res *res)
+{
+ do {
+ arm_smccc_smc(smc->args[0], smc->args[1], smc->args[2],
+ smc->args[3], smc->args[4], smc->args[5],
+ smc->args[6], smc->args[7], res);
+ } while (res->a0 == QCOM_SCM_INTERRUPTED);
+}
+
+/**
+ * qcom_scm_call() - Sends a command to the SCM and waits for the command to
+ * finish processing.
+ *
+ * A note on cache maintenance:
+ * Note that any buffers that are expected to be accessed by the secure world
+ * must be flushed before invoking qcom_scm_call and invalidated in the cache
+ * immediately after qcom_scm_call returns. Cache maintenance on the command
+ * and response buffers is taken care of by qcom_scm_call; however, callers are
+ * responsible for any other cached buffers passed over to the secure world.
+ */
+int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
+ struct qcom_scm_res *res)
+{
+ u8 arglen = desc->arginfo & 0xf;
+ int ret = 0, context_id;
+ unsigned int i;
+ struct scm_legacy_command *cmd;
+ struct scm_legacy_response *rsp;
+ struct arm_smccc_args smc = {0};
+ struct arm_smccc_res smc_res;
+ const size_t cmd_len = arglen * sizeof(__le32);
+ const size_t resp_len = MAX_QCOM_SCM_RETS * sizeof(__le32);
+ size_t alloc_len = sizeof(*cmd) + cmd_len + sizeof(*rsp) + resp_len;
+ dma_addr_t cmd_phys;
+ __le32 *arg_buf;
+ const __le32 *res_buf;
+
+ cmd = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->len = cpu_to_le32(alloc_len);
+ cmd->buf_offset = cpu_to_le32(sizeof(*cmd));
+ cmd->resp_hdr_offset = cpu_to_le32(sizeof(*cmd) + cmd_len);
+ cmd->id = cpu_to_le32(SCM_LEGACY_FNID(desc->svc, desc->cmd));
+
+ arg_buf = scm_legacy_get_command_buffer(cmd);
+ for (i = 0; i < arglen; i++)
+ arg_buf[i] = cpu_to_le32(desc->args[i]);
+
+ rsp = scm_legacy_command_to_response(cmd);
+
+ cmd_phys = dma_map_single(dev, cmd, alloc_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, cmd_phys)) {
+ kfree(cmd);
+ return -ENOMEM;
+ }
+
+ smc.args[0] = 1;
+ smc.args[1] = (unsigned long)&context_id;
+ smc.args[2] = cmd_phys;
+
+ mutex_lock(&qcom_scm_lock);
+ __scm_legacy_do(&smc, &smc_res);
+ if (smc_res.a0)
+ ret = qcom_scm_remap_error(smc_res.a0);
+ mutex_unlock(&qcom_scm_lock);
+ if (ret)
+ goto out;
+
+ do {
+ dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len,
+ sizeof(*rsp), DMA_FROM_DEVICE);
+ } while (!rsp->is_complete);
+
+ dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len +
+ le32_to_cpu(rsp->buf_offset),
+ resp_len, DMA_FROM_DEVICE);
+
+ if (res) {
+ res_buf = scm_legacy_get_response_buffer(rsp);
+ for (i = 0; i < MAX_QCOM_SCM_RETS; i++)
+ res->result[i] = le32_to_cpu(res_buf[i]);
+ }
+out:
+ dma_unmap_single(dev, cmd_phys, alloc_len, DMA_TO_DEVICE);
+ kfree(cmd);
+ return ret;
+}
+
+#define SCM_LEGACY_ATOMIC_N_REG_ARGS 5
+#define SCM_LEGACY_ATOMIC_FIRST_REG_IDX 2
+#define SCM_LEGACY_CLASS_REGISTER (0x2 << 8)
+#define SCM_LEGACY_MASK_IRQS BIT(5)
+#define SCM_LEGACY_ATOMIC_ID(svc, cmd, n) \
+ ((SCM_LEGACY_FNID(svc, cmd) << 12) | \
+ SCM_LEGACY_CLASS_REGISTER | \
+ SCM_LEGACY_MASK_IRQS | \
+ (n & 0xf))
+
+/**
+ * qcom_scm_call_atomic() - Send an atomic SCM command with up to 5 arguments
+ * and 3 return values
+ * @desc: SCM call descriptor containing arguments
+ * @res: SCM call return values
+ *
+ * This shall only be used with commands that are guaranteed to be
+ * uninterruptable, atomic and SMP safe.
+ */
+int scm_legacy_call_atomic(struct device *unused,
+ const struct qcom_scm_desc *desc,
+ struct qcom_scm_res *res)
+{
+ int context_id;
+ struct arm_smccc_res smc_res;
+ size_t arglen = desc->arginfo & 0xf;
+
+ BUG_ON(arglen > SCM_LEGACY_ATOMIC_N_REG_ARGS);
+
+ arm_smccc_smc(SCM_LEGACY_ATOMIC_ID(desc->svc, desc->cmd, arglen),
+ (unsigned long)&context_id,
+ desc->args[0], desc->args[1], desc->args[2],
+ desc->args[3], desc->args[4], 0, &smc_res);
+
+ if (res) {
+ res->result[0] = smc_res.a1;
+ res->result[1] = smc_res.a2;
+ res->result[2] = smc_res.a3;
+ }
+
+ return smc_res.a0;
+}
diff --git a/drivers/firmware/qcom_scm-smc.c b/drivers/firmware/qcom_scm-smc.c
new file mode 100644
index 000000000000..497c13ba98d6
--- /dev/null
+++ b/drivers/firmware/qcom_scm-smc.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015,2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/qcom_scm.h>
+#include <linux/arm-smccc.h>
+#include <linux/dma-mapping.h>
+
+#include "qcom_scm.h"
+
+/**
+ * struct arm_smccc_args
+ * @args: The array of values used in registers in smc instruction
+ */
+struct arm_smccc_args {
+ unsigned long args[8];
+};
+
+static DEFINE_MUTEX(qcom_scm_lock);
+
+#define QCOM_SCM_EBUSY_WAIT_MS 30
+#define QCOM_SCM_EBUSY_MAX_RETRY 20
+
+#define SCM_SMC_N_REG_ARGS 4
+#define SCM_SMC_FIRST_EXT_IDX (SCM_SMC_N_REG_ARGS - 1)
+#define SCM_SMC_N_EXT_ARGS (MAX_QCOM_SCM_ARGS - SCM_SMC_N_REG_ARGS + 1)
+#define SCM_SMC_FIRST_REG_IDX 2
+#define SCM_SMC_LAST_REG_IDX (SCM_SMC_FIRST_REG_IDX + SCM_SMC_N_REG_ARGS - 1)
+
+static void __scm_smc_do_quirk(const struct arm_smccc_args *smc,
+ struct arm_smccc_res *res)
+{
+ unsigned long a0 = smc->args[0];
+ struct arm_smccc_quirk quirk = { .id = ARM_SMCCC_QUIRK_QCOM_A6 };
+
+ quirk.state.a6 = 0;
+
+ do {
+ arm_smccc_smc_quirk(a0, smc->args[1], smc->args[2],
+ smc->args[3], smc->args[4], smc->args[5],
+ quirk.state.a6, smc->args[7], res, &quirk);
+
+ if (res->a0 == QCOM_SCM_INTERRUPTED)
+ a0 = res->a0;
+
+ } while (res->a0 == QCOM_SCM_INTERRUPTED);
+}
+
+static void __scm_smc_do(const struct arm_smccc_args *smc,
+ struct arm_smccc_res *res, bool atomic)
+{
+ int retry_count = 0;
+
+ if (atomic) {
+ __scm_smc_do_quirk(smc, res);
+ return;
+ }
+
+ do {
+ mutex_lock(&qcom_scm_lock);
+
+ __scm_smc_do_quirk(smc, res);
+
+ mutex_unlock(&qcom_scm_lock);
+
+ if (res->a0 == QCOM_SCM_V2_EBUSY) {
+ if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
+ break;
+ msleep(QCOM_SCM_EBUSY_WAIT_MS);
+ }
+ } while (res->a0 == QCOM_SCM_V2_EBUSY);
+}
+
+int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
+ struct qcom_scm_res *res, bool atomic)
+{
+ int arglen = desc->arginfo & 0xf;
+ int i;
+ dma_addr_t args_phys = 0;
+ void *args_virt = NULL;
+ size_t alloc_len;
+ gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
+ u32 smccc_call_type = atomic ? ARM_SMCCC_FAST_CALL : ARM_SMCCC_STD_CALL;
+ u32 qcom_smccc_convention =
+ (qcom_scm_convention == SMC_CONVENTION_ARM_32) ?
+ ARM_SMCCC_SMC_32 : ARM_SMCCC_SMC_64;
+ struct arm_smccc_res smc_res;
+ struct arm_smccc_args smc = {0};
+
+ smc.args[0] = ARM_SMCCC_CALL_VAL(
+ smccc_call_type,
+ qcom_smccc_convention,
+ desc->owner,
+ SCM_SMC_FNID(desc->svc, desc->cmd));
+ smc.args[1] = desc->arginfo;
+ for (i = 0; i < SCM_SMC_N_REG_ARGS; i++)
+ smc.args[i + SCM_SMC_FIRST_REG_IDX] = desc->args[i];
+
+ if (unlikely(arglen > SCM_SMC_N_REG_ARGS)) {
+ alloc_len = SCM_SMC_N_EXT_ARGS * sizeof(u64);
+ args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag);
+
+ if (!args_virt)
+ return -ENOMEM;
+
+ if (qcom_smccc_convention == ARM_SMCCC_SMC_32) {
+ __le32 *args = args_virt;
+
+ for (i = 0; i < SCM_SMC_N_EXT_ARGS; i++)
+ args[i] = cpu_to_le32(desc->args[i +
+ SCM_SMC_FIRST_EXT_IDX]);
+ } else {
+ __le64 *args = args_virt;
+
+ for (i = 0; i < SCM_SMC_N_EXT_ARGS; i++)
+ args[i] = cpu_to_le64(desc->args[i +
+ SCM_SMC_FIRST_EXT_IDX]);
+ }
+
+ args_phys = dma_map_single(dev, args_virt, alloc_len,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(dev, args_phys)) {
+ kfree(args_virt);
+ return -ENOMEM;
+ }
+
+ smc.args[SCM_SMC_LAST_REG_IDX] = args_phys;
+ }
+
+ __scm_smc_do(&smc, &smc_res, atomic);
+
+ if (args_virt) {
+ dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE);
+ kfree(args_virt);
+ }
+
+ if (res) {
+ res->result[0] = smc_res.a1;
+ res->result[1] = smc_res.a2;
+ res->result[2] = smc_res.a3;
+ }
+
+ return (long)smc_res.a0 ? qcom_scm_remap_error(smc_res.a0) : 0;
+}
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index 1ba0df4b97ab..059bb0fbae9e 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -1,8 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Qualcomm SCM driver
- *
- * Copyright (c) 2010,2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
* Copyright (C) 2015 Linaro Ltd.
*/
#include <linux/platform_device.h>
@@ -19,6 +16,7 @@
#include <linux/of_platform.h>
#include <linux/clk.h>
#include <linux/reset-controller.h>
+#include <linux/arm-smccc.h>
#include "qcom_scm.h"
@@ -52,6 +50,35 @@ struct qcom_scm_mem_map_info {
__le64 mem_size;
};
+#define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00
+#define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01
+#define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08
+#define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20
+
+#define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04
+#define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02
+#define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10
+#define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40
+
+struct qcom_scm_wb_entry {
+ int flag;
+ void *entry;
+};
+
+static struct qcom_scm_wb_entry qcom_scm_wb[] = {
+ { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 },
+ { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 },
+ { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 },
+ { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 },
+};
+
+static const char *qcom_scm_convention_names[] = {
+ [SMC_CONVENTION_UNKNOWN] = "unknown",
+ [SMC_CONVENTION_ARM_32] = "smc arm 32",
+ [SMC_CONVENTION_ARM_64] = "smc arm 64",
+ [SMC_CONVENTION_LEGACY] = "smc legacy",
+};
+
static struct qcom_scm *__scm;
static int qcom_scm_clk_enable(void)
@@ -87,149 +114,308 @@ static void qcom_scm_clk_disable(void)
clk_disable_unprepare(__scm->bus_clk);
}
-/**
- * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
- * @entry: Entry point function for the cpus
- * @cpus: The cpumask of cpus that will use the entry point
- *
- * Set the cold boot address of the cpus. Any cpu outside the supported
- * range would be removed from the cpu present mask.
- */
-int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
+static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
+ u32 cmd_id);
+
+enum qcom_scm_convention qcom_scm_convention;
+static bool has_queried __read_mostly;
+static DEFINE_SPINLOCK(query_lock);
+
+static void __query_convention(void)
{
- return __qcom_scm_set_cold_boot_addr(entry, cpus);
+ unsigned long flags;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_INFO,
+ .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
+ .args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
+ QCOM_SCM_INFO_IS_CALL_AVAIL) |
+ (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
+ .arginfo = QCOM_SCM_ARGS(1),
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+ int ret;
+
+ spin_lock_irqsave(&query_lock, flags);
+ if (has_queried)
+ goto out;
+
+ qcom_scm_convention = SMC_CONVENTION_ARM_64;
+ // Device isn't required as there is only one argument - no device
+ // needed to dma_map_single to secure world
+ ret = scm_smc_call(NULL, &desc, &res, true);
+ if (!ret && res.result[0] == 1)
+ goto out;
+
+ qcom_scm_convention = SMC_CONVENTION_ARM_32;
+ ret = scm_smc_call(NULL, &desc, &res, true);
+ if (!ret && res.result[0] == 1)
+ goto out;
+
+ qcom_scm_convention = SMC_CONVENTION_LEGACY;
+out:
+ has_queried = true;
+ spin_unlock_irqrestore(&query_lock, flags);
+ pr_info("qcom_scm: convention: %s\n",
+ qcom_scm_convention_names[qcom_scm_convention]);
}
-EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr);
-/**
- * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
- * @entry: Entry point function for the cpus
- * @cpus: The cpumask of cpus that will use the entry point
- *
- * Set the Linux entry point for the SCM to transfer control to when coming
- * out of a power down. CPU power down may be executed on cpuidle or hotplug.
- */
-int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
+static inline enum qcom_scm_convention __get_convention(void)
{
- return __qcom_scm_set_warm_boot_addr(__scm->dev, entry, cpus);
+ if (unlikely(!has_queried))
+ __query_convention();
+ return qcom_scm_convention;
}
-EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr);
/**
- * qcom_scm_cpu_power_down() - Power down the cpu
- * @flags - Flags to flush cache
+ * qcom_scm_call() - Invoke a syscall in the secure world
+ * @dev: device
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @desc: Descriptor structure containing arguments and return values
*
- * This is an end point to power down cpu. If there was a pending interrupt,
- * the control would return from this function, otherwise, the cpu jumps to the
- * warm boot entry point set for this cpu upon reset.
+ * Sends a command to the SCM and waits for the command to finish processing.
+ * This should *only* be called in pre-emptible context.
*/
-void qcom_scm_cpu_power_down(u32 flags)
+static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
+ struct qcom_scm_res *res)
{
- __qcom_scm_cpu_power_down(flags);
+ might_sleep();
+ switch (__get_convention()) {
+ case SMC_CONVENTION_ARM_32:
+ case SMC_CONVENTION_ARM_64:
+ return scm_smc_call(dev, desc, res, false);
+ case SMC_CONVENTION_LEGACY:
+ return scm_legacy_call(dev, desc, res);
+ default:
+ pr_err("Unknown current SCM calling convention.\n");
+ return -EINVAL;
+ }
}
-EXPORT_SYMBOL(qcom_scm_cpu_power_down);
/**
- * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
+ * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
+ * @dev: device
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @desc: Descriptor structure containing arguments and return values
+ * @res: Structure containing results from SMC/HVC call
*
- * Return true if HDCP is supported, false if not.
+ * Sends a command to the SCM and waits for the command to finish processing.
+ * This can be called in atomic context.
*/
-bool qcom_scm_hdcp_available(void)
+static int qcom_scm_call_atomic(struct device *dev,
+ const struct qcom_scm_desc *desc,
+ struct qcom_scm_res *res)
{
- int ret = qcom_scm_clk_enable();
-
- if (ret)
- return ret;
+ switch (__get_convention()) {
+ case SMC_CONVENTION_ARM_32:
+ case SMC_CONVENTION_ARM_64:
+ return scm_smc_call(dev, desc, res, true);
+ case SMC_CONVENTION_LEGACY:
+ return scm_legacy_call_atomic(dev, desc, res);
+ default:
+ pr_err("Unknown current SCM calling convention.\n");
+ return -EINVAL;
+ }
+}
- ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
- QCOM_SCM_CMD_HDCP);
+static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
+ u32 cmd_id)
+{
+ int ret;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_INFO,
+ .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+
+ desc.arginfo = QCOM_SCM_ARGS(1);
+ switch (__get_convention()) {
+ case SMC_CONVENTION_ARM_32:
+ case SMC_CONVENTION_ARM_64:
+ desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
+ (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
+ break;
+ case SMC_CONVENTION_LEGACY:
+ desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
+ break;
+ default:
+ pr_err("Unknown SMC convention being used\n");
+ return -EINVAL;
+ }
- qcom_scm_clk_disable();
+ ret = qcom_scm_call(dev, &desc, &res);
- return ret > 0 ? true : false;
+ return ret ? : res.result[0];
}
-EXPORT_SYMBOL(qcom_scm_hdcp_available);
/**
- * qcom_scm_hdcp_req() - Send HDCP request.
- * @req: HDCP request array
- * @req_cnt: HDCP request array count
- * @resp: response buffer passed to SCM
+ * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
+ * @entry: Entry point function for the cpus
+ * @cpus: The cpumask of cpus that will use the entry point
*
- * Write HDCP register(s) through SCM.
+ * Set the Linux entry point for the SCM to transfer control to when coming
+ * out of a power down. CPU power down may be executed on cpuidle or hotplug.
*/
-int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
+int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
{
- int ret = qcom_scm_clk_enable();
+ int ret;
+ int flags = 0;
+ int cpu;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_BOOT,
+ .cmd = QCOM_SCM_BOOT_SET_ADDR,
+ .arginfo = QCOM_SCM_ARGS(2),
+ };
- if (ret)
- return ret;
+ /*
+ * Reassign only if we are switching from hotplug entry point
+ * to cpuidle entry point or vice versa.
+ */
+ for_each_cpu(cpu, cpus) {
+ if (entry == qcom_scm_wb[cpu].entry)
+ continue;
+ flags |= qcom_scm_wb[cpu].flag;
+ }
+
+ /* No change in entry function */
+ if (!flags)
+ return 0;
+
+ desc.args[0] = flags;
+ desc.args[1] = virt_to_phys(entry);
+
+ ret = qcom_scm_call(__scm->dev, &desc, NULL);
+ if (!ret) {
+ for_each_cpu(cpu, cpus)
+ qcom_scm_wb[cpu].entry = entry;
+ }
- ret = __qcom_scm_hdcp_req(__scm->dev, req, req_cnt, resp);
- qcom_scm_clk_disable();
return ret;
}
-EXPORT_SYMBOL(qcom_scm_hdcp_req);
+EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr);
/**
- * qcom_scm_pas_supported() - Check if the peripheral authentication service is
- * available for the given peripherial
- * @peripheral: peripheral id
+ * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
+ * @entry: Entry point function for the cpus
+ * @cpus: The cpumask of cpus that will use the entry point
*
- * Returns true if PAS is supported for this peripheral, otherwise false.
+ * Set the cold boot address of the cpus. Any cpu outside the supported
+ * range would be removed from the cpu present mask.
*/
-bool qcom_scm_pas_supported(u32 peripheral)
+int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
{
- int ret;
+ int flags = 0;
+ int cpu;
+ int scm_cb_flags[] = {
+ QCOM_SCM_FLAG_COLDBOOT_CPU0,
+ QCOM_SCM_FLAG_COLDBOOT_CPU1,
+ QCOM_SCM_FLAG_COLDBOOT_CPU2,
+ QCOM_SCM_FLAG_COLDBOOT_CPU3,
+ };
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_BOOT,
+ .cmd = QCOM_SCM_BOOT_SET_ADDR,
+ .arginfo = QCOM_SCM_ARGS(2),
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+
+ if (!cpus || (cpus && cpumask_empty(cpus)))
+ return -EINVAL;
- ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
- QCOM_SCM_PAS_IS_SUPPORTED_CMD);
- if (ret <= 0)
- return false;
+ for_each_cpu(cpu, cpus) {
+ if (cpu < ARRAY_SIZE(scm_cb_flags))
+ flags |= scm_cb_flags[cpu];
+ else
+ set_cpu_present(cpu, false);
+ }
+
+ desc.args[0] = flags;
+ desc.args[1] = virt_to_phys(entry);
- return __qcom_scm_pas_supported(__scm->dev, peripheral);
+ return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
}
-EXPORT_SYMBOL(qcom_scm_pas_supported);
+EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr);
/**
- * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
+ * qcom_scm_cpu_power_down() - Power down the cpu
+ * @flags - Flags to flush cache
+ *
+ * This is an end point to power down cpu. If there was a pending interrupt,
+ * the control would return from this function, otherwise, the cpu jumps to the
+ * warm boot entry point set for this cpu upon reset.
*/
-bool qcom_scm_ocmem_lock_available(void)
+void qcom_scm_cpu_power_down(u32 flags)
{
- return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_OCMEM_SVC,
- QCOM_SCM_OCMEM_LOCK_CMD);
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_BOOT,
+ .cmd = QCOM_SCM_BOOT_TERMINATE_PC,
+ .args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
+ .arginfo = QCOM_SCM_ARGS(1),
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+
+ qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
}
-EXPORT_SYMBOL(qcom_scm_ocmem_lock_available);
+EXPORT_SYMBOL(qcom_scm_cpu_power_down);
-/**
- * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
- * region to the specified initiator
- *
- * @id: tz initiator id
- * @offset: OCMEM offset
- * @size: OCMEM size
- * @mode: access mode (WIDE/NARROW)
- */
-int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
- u32 mode)
+int qcom_scm_set_remote_state(u32 state, u32 id)
{
- return __qcom_scm_ocmem_lock(__scm->dev, id, offset, size, mode);
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_BOOT,
+ .cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE,
+ .arginfo = QCOM_SCM_ARGS(2),
+ .args[0] = state,
+ .args[1] = id,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+ int ret;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+
+ return ret ? : res.result[0];
}
-EXPORT_SYMBOL(qcom_scm_ocmem_lock);
+EXPORT_SYMBOL(qcom_scm_set_remote_state);
-/**
- * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
- * region from the specified initiator
- *
- * @id: tz initiator id
- * @offset: OCMEM offset
- * @size: OCMEM size
- */
-int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
+static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
{
- return __qcom_scm_ocmem_unlock(__scm->dev, id, offset, size);
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_BOOT,
+ .cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE,
+ .arginfo = QCOM_SCM_ARGS(2),
+ .args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+
+ desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
+
+ return qcom_scm_call(__scm->dev, &desc, NULL);
+}
+
+static void qcom_scm_set_download_mode(bool enable)
+{
+ bool avail;
+ int ret = 0;
+
+ avail = __qcom_scm_is_call_available(__scm->dev,
+ QCOM_SCM_SVC_BOOT,
+ QCOM_SCM_BOOT_SET_DLOAD_MODE);
+ if (avail) {
+ ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
+ } else if (__scm->dload_mode_addr) {
+ ret = qcom_scm_io_writel(__scm->dload_mode_addr,
+ enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0);
+ } else {
+ dev_err(__scm->dev,
+ "No available mechanism for setting download mode\n");
+ }
+
+ if (ret)
+ dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
}
-EXPORT_SYMBOL(qcom_scm_ocmem_unlock);
/**
* qcom_scm_pas_init_image() - Initialize peripheral authentication service
@@ -248,6 +434,14 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size)
dma_addr_t mdata_phys;
void *mdata_buf;
int ret;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_PIL,
+ .cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
+ .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
+ .args[0] = peripheral,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
/*
* During the scm call memory protection will be enabled for the meta
@@ -266,14 +460,16 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size)
if (ret)
goto free_metadata;
- ret = __qcom_scm_pas_init_image(__scm->dev, peripheral, mdata_phys);
+ desc.args[1] = mdata_phys;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
qcom_scm_clk_disable();
free_metadata:
dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
- return ret;
+ return ret ? : res.result[0];
}
EXPORT_SYMBOL(qcom_scm_pas_init_image);
@@ -289,15 +485,25 @@ EXPORT_SYMBOL(qcom_scm_pas_init_image);
int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
{
int ret;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_PIL,
+ .cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
+ .arginfo = QCOM_SCM_ARGS(3),
+ .args[0] = peripheral,
+ .args[1] = addr,
+ .args[2] = size,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
ret = qcom_scm_clk_enable();
if (ret)
return ret;
- ret = __qcom_scm_pas_mem_setup(__scm->dev, peripheral, addr, size);
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
qcom_scm_clk_disable();
- return ret;
+ return ret ? : res.result[0];
}
EXPORT_SYMBOL(qcom_scm_pas_mem_setup);
@@ -311,15 +517,23 @@ EXPORT_SYMBOL(qcom_scm_pas_mem_setup);
int qcom_scm_pas_auth_and_reset(u32 peripheral)
{
int ret;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_PIL,
+ .cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
+ .arginfo = QCOM_SCM_ARGS(1),
+ .args[0] = peripheral,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
ret = qcom_scm_clk_enable();
if (ret)
return ret;
- ret = __qcom_scm_pas_auth_and_reset(__scm->dev, peripheral);
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
qcom_scm_clk_disable();
- return ret;
+ return ret ? : res.result[0];
}
EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset);
@@ -332,18 +546,75 @@ EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset);
int qcom_scm_pas_shutdown(u32 peripheral)
{
int ret;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_PIL,
+ .cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
+ .arginfo = QCOM_SCM_ARGS(1),
+ .args[0] = peripheral,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
ret = qcom_scm_clk_enable();
if (ret)
return ret;
- ret = __qcom_scm_pas_shutdown(__scm->dev, peripheral);
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+
qcom_scm_clk_disable();
- return ret;
+ return ret ? : res.result[0];
}
EXPORT_SYMBOL(qcom_scm_pas_shutdown);
+/**
+ * qcom_scm_pas_supported() - Check if the peripheral authentication service is
+ * available for the given peripherial
+ * @peripheral: peripheral id
+ *
+ * Returns true if PAS is supported for this peripheral, otherwise false.
+ */
+bool qcom_scm_pas_supported(u32 peripheral)
+{
+ int ret;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_PIL,
+ .cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
+ .arginfo = QCOM_SCM_ARGS(1),
+ .args[0] = peripheral,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+
+ ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
+ QCOM_SCM_PIL_PAS_IS_SUPPORTED);
+ if (ret <= 0)
+ return false;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+
+ return ret ? false : !!res.result[0];
+}
+EXPORT_SYMBOL(qcom_scm_pas_supported);
+
+static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_PIL,
+ .cmd = QCOM_SCM_PIL_PAS_MSS_RESET,
+ .arginfo = QCOM_SCM_ARGS(2),
+ .args[0] = reset,
+ .args[1] = 0,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+ int ret;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+
+ return ret ? : res.result[0];
+}
+
static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
unsigned long idx)
{
@@ -367,6 +638,43 @@ static const struct reset_control_ops qcom_scm_pas_reset_ops = {
.deassert = qcom_scm_pas_reset_deassert,
};
+int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_IO,
+ .cmd = QCOM_SCM_IO_READ,
+ .arginfo = QCOM_SCM_ARGS(1),
+ .args[0] = addr,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+ int ret;
+
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+ if (ret >= 0)
+ *val = res.result[0];
+
+ return ret < 0 ? ret : 0;
+}
+EXPORT_SYMBOL(qcom_scm_io_readl);
+
+int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_IO,
+ .cmd = QCOM_SCM_IO_WRITE,
+ .arginfo = QCOM_SCM_ARGS(2),
+ .args[0] = addr,
+ .args[1] = val,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+
+
+ return qcom_scm_call(__scm->dev, &desc, NULL);
+}
+EXPORT_SYMBOL(qcom_scm_io_writel);
+
/**
* qcom_scm_restore_sec_cfg_available() - Check if secure environment
* supports restore security config interface.
@@ -376,108 +684,106 @@ static const struct reset_control_ops qcom_scm_pas_reset_ops = {
bool qcom_scm_restore_sec_cfg_available(void)
{
return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
- QCOM_SCM_RESTORE_SEC_CFG);
+ QCOM_SCM_MP_RESTORE_SEC_CFG);
}
EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available);
int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
{
- return __qcom_scm_restore_sec_cfg(__scm->dev, device_id, spare);
-}
-EXPORT_SYMBOL(qcom_scm_restore_sec_cfg);
-
-int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
-{
- return __qcom_scm_iommu_secure_ptbl_size(__scm->dev, spare, size);
-}
-EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size);
-
-int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
-{
- return __qcom_scm_iommu_secure_ptbl_init(__scm->dev, addr, size, spare);
-}
-EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_MP,
+ .cmd = QCOM_SCM_MP_RESTORE_SEC_CFG,
+ .arginfo = QCOM_SCM_ARGS(2),
+ .args[0] = device_id,
+ .args[1] = spare,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+ int ret;
-int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
-{
- return __qcom_scm_qsmmu500_wait_safe_toggle(__scm->dev, en);
-}
-EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle);
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
-int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
-{
- return __qcom_scm_io_readl(__scm->dev, addr, val);
+ return ret ? : res.result[0];
}
-EXPORT_SYMBOL(qcom_scm_io_readl);
+EXPORT_SYMBOL(qcom_scm_restore_sec_cfg);
-int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
+int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
{
- return __qcom_scm_io_writel(__scm->dev, addr, val);
-}
-EXPORT_SYMBOL(qcom_scm_io_writel);
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_MP,
+ .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE,
+ .arginfo = QCOM_SCM_ARGS(1),
+ .args[0] = spare,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+ int ret;
-static void qcom_scm_set_download_mode(bool enable)
-{
- bool avail;
- int ret = 0;
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
- avail = __qcom_scm_is_call_available(__scm->dev,
- QCOM_SCM_SVC_BOOT,
- QCOM_SCM_SET_DLOAD_MODE);
- if (avail) {
- ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
- } else if (__scm->dload_mode_addr) {
- ret = __qcom_scm_io_writel(__scm->dev, __scm->dload_mode_addr,
- enable ? QCOM_SCM_SET_DLOAD_MODE : 0);
- } else {
- dev_err(__scm->dev,
- "No available mechanism for setting download mode\n");
- }
+ if (size)
+ *size = res.result[0];
- if (ret)
- dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
+ return ret ? : res.result[1];
}
+EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size);
-static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
+int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
{
- struct device_node *tcsr;
- struct device_node *np = dev->of_node;
- struct resource res;
- u32 offset;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_MP,
+ .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT,
+ .arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
+ QCOM_SCM_VAL),
+ .args[0] = addr,
+ .args[1] = size,
+ .args[2] = spare,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
int ret;
- tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
- if (!tcsr)
- return 0;
-
- ret = of_address_to_resource(tcsr, 0, &res);
- of_node_put(tcsr);
- if (ret)
- return ret;
-
- ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
- if (ret < 0)
- return ret;
+ desc.args[0] = addr;
+ desc.args[1] = size;
+ desc.args[2] = spare;
+ desc.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
+ QCOM_SCM_VAL);
- *addr = res.start + offset;
+ ret = qcom_scm_call(__scm->dev, &desc, NULL);
- return 0;
-}
+ /* the pg table has been initialized already, ignore the error */
+ if (ret == -EPERM)
+ ret = 0;
-/**
- * qcom_scm_is_available() - Checks if SCM is available
- */
-bool qcom_scm_is_available(void)
-{
- return !!__scm;
+ return ret;
}
-EXPORT_SYMBOL(qcom_scm_is_available);
+EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
-int qcom_scm_set_remote_state(u32 state, u32 id)
+static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
+ size_t mem_sz, phys_addr_t src, size_t src_sz,
+ phys_addr_t dest, size_t dest_sz)
{
- return __qcom_scm_set_remote_state(__scm->dev, state, id);
+ int ret;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_MP,
+ .cmd = QCOM_SCM_MP_ASSIGN,
+ .arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
+ QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
+ QCOM_SCM_VAL, QCOM_SCM_VAL),
+ .args[0] = mem_region,
+ .args[1] = mem_sz,
+ .args[2] = src,
+ .args[3] = src_sz,
+ .args[4] = dest,
+ .args[5] = dest_sz,
+ .args[6] = 0,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+
+ ret = qcom_scm_call(dev, &desc, &res);
+
+ return ret ? : res.result[0];
}
-EXPORT_SYMBOL(qcom_scm_set_remote_state);
/**
* qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
@@ -561,6 +867,184 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
}
EXPORT_SYMBOL(qcom_scm_assign_mem);
+/**
+ * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
+ */
+bool qcom_scm_ocmem_lock_available(void)
+{
+ return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
+ QCOM_SCM_OCMEM_LOCK_CMD);
+}
+EXPORT_SYMBOL(qcom_scm_ocmem_lock_available);
+
+/**
+ * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
+ * region to the specified initiator
+ *
+ * @id: tz initiator id
+ * @offset: OCMEM offset
+ * @size: OCMEM size
+ * @mode: access mode (WIDE/NARROW)
+ */
+int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
+ u32 mode)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_OCMEM,
+ .cmd = QCOM_SCM_OCMEM_LOCK_CMD,
+ .args[0] = id,
+ .args[1] = offset,
+ .args[2] = size,
+ .args[3] = mode,
+ .arginfo = QCOM_SCM_ARGS(4),
+ };
+
+ return qcom_scm_call(__scm->dev, &desc, NULL);
+}
+EXPORT_SYMBOL(qcom_scm_ocmem_lock);
+
+/**
+ * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
+ * region from the specified initiator
+ *
+ * @id: tz initiator id
+ * @offset: OCMEM offset
+ * @size: OCMEM size
+ */
+int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_OCMEM,
+ .cmd = QCOM_SCM_OCMEM_UNLOCK_CMD,
+ .args[0] = id,
+ .args[1] = offset,
+ .args[2] = size,
+ .arginfo = QCOM_SCM_ARGS(3),
+ };
+
+ return qcom_scm_call(__scm->dev, &desc, NULL);
+}
+EXPORT_SYMBOL(qcom_scm_ocmem_unlock);
+
+/**
+ * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
+ *
+ * Return true if HDCP is supported, false if not.
+ */
+bool qcom_scm_hdcp_available(void)
+{
+ int ret = qcom_scm_clk_enable();
+
+ if (ret)
+ return ret;
+
+ ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
+ QCOM_SCM_HDCP_INVOKE);
+
+ qcom_scm_clk_disable();
+
+ return ret > 0 ? true : false;
+}
+EXPORT_SYMBOL(qcom_scm_hdcp_available);
+
+/**
+ * qcom_scm_hdcp_req() - Send HDCP request.
+ * @req: HDCP request array
+ * @req_cnt: HDCP request array count
+ * @resp: response buffer passed to SCM
+ *
+ * Write HDCP register(s) through SCM.
+ */
+int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
+{
+ int ret;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_HDCP,
+ .cmd = QCOM_SCM_HDCP_INVOKE,
+ .arginfo = QCOM_SCM_ARGS(10),
+ .args = {
+ req[0].addr,
+ req[0].val,
+ req[1].addr,
+ req[1].val,
+ req[2].addr,
+ req[2].val,
+ req[3].addr,
+ req[3].val,
+ req[4].addr,
+ req[4].val
+ },
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+
+ if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
+ return -ERANGE;
+
+ ret = qcom_scm_clk_enable();
+ if (ret)
+ return ret;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+ *resp = res.result[0];
+
+ qcom_scm_clk_disable();
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_scm_hdcp_req);
+
+int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_SMMU_PROGRAM,
+ .cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1,
+ .arginfo = QCOM_SCM_ARGS(2),
+ .args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL,
+ .args[1] = en,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+
+
+ return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
+}
+EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle);
+
+static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
+{
+ struct device_node *tcsr;
+ struct device_node *np = dev->of_node;
+ struct resource res;
+ u32 offset;
+ int ret;
+
+ tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
+ if (!tcsr)
+ return 0;
+
+ ret = of_address_to_resource(tcsr, 0, &res);
+ of_node_put(tcsr);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
+ if (ret < 0)
+ return ret;
+
+ *addr = res.start + offset;
+
+ return 0;
+}
+
+/**
+ * qcom_scm_is_available() - Checks if SCM is available
+ */
+bool qcom_scm_is_available(void)
+{
+ return !!__scm;
+}
+EXPORT_SYMBOL(qcom_scm_is_available);
+
static int qcom_scm_probe(struct platform_device *pdev)
{
struct qcom_scm *scm;
@@ -631,7 +1115,7 @@ static int qcom_scm_probe(struct platform_device *pdev)
__scm = scm;
__scm->dev = &pdev->dev;
- __qcom_scm_init();
+ __query_convention();
/*
* If requested enable "download mode", from this point on warmboot
diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
index 81dcf5f1138e..d9ed670da222 100644
--- a/drivers/firmware/qcom_scm.h
+++ b/drivers/firmware/qcom_scm.h
@@ -1,71 +1,116 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2015,2019 The Linux Foundation. All rights reserved.
*/
#ifndef __QCOM_SCM_INT_H
#define __QCOM_SCM_INT_H
-#define QCOM_SCM_SVC_BOOT 0x1
-#define QCOM_SCM_BOOT_ADDR 0x1
-#define QCOM_SCM_SET_DLOAD_MODE 0x10
-#define QCOM_SCM_BOOT_ADDR_MC 0x11
-#define QCOM_SCM_SET_REMOTE_STATE 0xa
-extern int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id);
-extern int __qcom_scm_set_dload_mode(struct device *dev, bool enable);
-
-#define QCOM_SCM_FLAG_HLOS 0x01
-#define QCOM_SCM_FLAG_COLDBOOT_MC 0x02
-#define QCOM_SCM_FLAG_WARMBOOT_MC 0x04
-extern int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry,
- const cpumask_t *cpus);
-extern int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus);
-
-#define QCOM_SCM_CMD_TERMINATE_PC 0x2
+enum qcom_scm_convention {
+ SMC_CONVENTION_UNKNOWN,
+ SMC_CONVENTION_LEGACY,
+ SMC_CONVENTION_ARM_32,
+ SMC_CONVENTION_ARM_64,
+};
+
+extern enum qcom_scm_convention qcom_scm_convention;
+
+#define MAX_QCOM_SCM_ARGS 10
+#define MAX_QCOM_SCM_RETS 3
+
+enum qcom_scm_arg_types {
+ QCOM_SCM_VAL,
+ QCOM_SCM_RO,
+ QCOM_SCM_RW,
+ QCOM_SCM_BUFVAL,
+};
+
+#define QCOM_SCM_ARGS_IMPL(num, a, b, c, d, e, f, g, h, i, j, ...) (\
+ (((a) & 0x3) << 4) | \
+ (((b) & 0x3) << 6) | \
+ (((c) & 0x3) << 8) | \
+ (((d) & 0x3) << 10) | \
+ (((e) & 0x3) << 12) | \
+ (((f) & 0x3) << 14) | \
+ (((g) & 0x3) << 16) | \
+ (((h) & 0x3) << 18) | \
+ (((i) & 0x3) << 20) | \
+ (((j) & 0x3) << 22) | \
+ ((num) & 0xf))
+
+#define QCOM_SCM_ARGS(...) QCOM_SCM_ARGS_IMPL(__VA_ARGS__, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+
+
+/**
+ * struct qcom_scm_desc
+ * @arginfo: Metadata describing the arguments in args[]
+ * @args: The array of arguments for the secure syscall
+ */
+struct qcom_scm_desc {
+ u32 svc;
+ u32 cmd;
+ u32 arginfo;
+ u64 args[MAX_QCOM_SCM_ARGS];
+ u32 owner;
+};
+
+/**
+ * struct qcom_scm_res
+ * @result: The values returned by the secure syscall
+ */
+struct qcom_scm_res {
+ u64 result[MAX_QCOM_SCM_RETS];
+};
+
+#define SCM_SMC_FNID(s, c) ((((s) & 0xFF) << 8) | ((c) & 0xFF))
+extern int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
+ struct qcom_scm_res *res, bool atomic);
+
+#define SCM_LEGACY_FNID(s, c) (((s) << 10) | ((c) & 0x3ff))
+extern int scm_legacy_call_atomic(struct device *dev,
+ const struct qcom_scm_desc *desc,
+ struct qcom_scm_res *res);
+extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
+ struct qcom_scm_res *res);
+
+#define QCOM_SCM_SVC_BOOT 0x01
+#define QCOM_SCM_BOOT_SET_ADDR 0x01
+#define QCOM_SCM_BOOT_TERMINATE_PC 0x02
+#define QCOM_SCM_BOOT_SET_DLOAD_MODE 0x10
+#define QCOM_SCM_BOOT_SET_REMOTE_STATE 0x0a
#define QCOM_SCM_FLUSH_FLAG_MASK 0x3
-#define QCOM_SCM_CMD_CORE_HOTPLUGGED 0x10
-extern void __qcom_scm_cpu_power_down(u32 flags);
-#define QCOM_SCM_SVC_IO 0x5
-#define QCOM_SCM_IO_READ 0x1
-#define QCOM_SCM_IO_WRITE 0x2
-extern int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr, unsigned int *val);
-extern int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val);
+#define QCOM_SCM_SVC_PIL 0x02
+#define QCOM_SCM_PIL_PAS_INIT_IMAGE 0x01
+#define QCOM_SCM_PIL_PAS_MEM_SETUP 0x02
+#define QCOM_SCM_PIL_PAS_AUTH_AND_RESET 0x05
+#define QCOM_SCM_PIL_PAS_SHUTDOWN 0x06
+#define QCOM_SCM_PIL_PAS_IS_SUPPORTED 0x07
+#define QCOM_SCM_PIL_PAS_MSS_RESET 0x0a
+
+#define QCOM_SCM_SVC_IO 0x05
+#define QCOM_SCM_IO_READ 0x01
+#define QCOM_SCM_IO_WRITE 0x02
+
+#define QCOM_SCM_SVC_INFO 0x06
+#define QCOM_SCM_INFO_IS_CALL_AVAIL 0x01
-#define QCOM_SCM_SVC_INFO 0x6
-#define QCOM_IS_CALL_AVAIL_CMD 0x1
-extern int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
- u32 cmd_id);
+#define QCOM_SCM_SVC_MP 0x0c
+#define QCOM_SCM_MP_RESTORE_SEC_CFG 0x02
+#define QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE 0x03
+#define QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT 0x04
+#define QCOM_SCM_MP_ASSIGN 0x16
+
+#define QCOM_SCM_SVC_OCMEM 0x0f
+#define QCOM_SCM_OCMEM_LOCK_CMD 0x01
+#define QCOM_SCM_OCMEM_UNLOCK_CMD 0x02
#define QCOM_SCM_SVC_HDCP 0x11
-#define QCOM_SCM_CMD_HDCP 0x01
-extern int __qcom_scm_hdcp_req(struct device *dev,
- struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp);
+#define QCOM_SCM_HDCP_INVOKE 0x01
-extern void __qcom_scm_init(void);
+#define QCOM_SCM_SVC_SMMU_PROGRAM 0x15
+#define QCOM_SCM_SMMU_CONFIG_ERRATA1 0x03
+#define QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL 0x02
-#define QCOM_SCM_OCMEM_SVC 0xf
-#define QCOM_SCM_OCMEM_LOCK_CMD 0x1
-#define QCOM_SCM_OCMEM_UNLOCK_CMD 0x2
-
-extern int __qcom_scm_ocmem_lock(struct device *dev, u32 id, u32 offset,
- u32 size, u32 mode);
-extern int __qcom_scm_ocmem_unlock(struct device *dev, u32 id, u32 offset,
- u32 size);
-
-#define QCOM_SCM_SVC_PIL 0x2
-#define QCOM_SCM_PAS_INIT_IMAGE_CMD 0x1
-#define QCOM_SCM_PAS_MEM_SETUP_CMD 0x2
-#define QCOM_SCM_PAS_AUTH_AND_RESET_CMD 0x5
-#define QCOM_SCM_PAS_SHUTDOWN_CMD 0x6
-#define QCOM_SCM_PAS_IS_SUPPORTED_CMD 0x7
-#define QCOM_SCM_PAS_MSS_RESET 0xa
-extern bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral);
-extern int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral,
- dma_addr_t metadata_phys);
-extern int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral,
- phys_addr_t addr, phys_addr_t size);
-extern int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral);
-extern int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral);
-extern int __qcom_scm_pas_mss_reset(struct device *dev, bool reset);
+extern void __qcom_scm_init(void);
/* common error codes */
#define QCOM_SCM_V2_EBUSY -12
@@ -94,25 +139,4 @@ static inline int qcom_scm_remap_error(int err)
return -EINVAL;
}
-#define QCOM_SCM_SVC_MP 0xc
-#define QCOM_SCM_RESTORE_SEC_CFG 2
-extern int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id,
- u32 spare);
-#define QCOM_SCM_IOMMU_SECURE_PTBL_SIZE 3
-#define QCOM_SCM_IOMMU_SECURE_PTBL_INIT 4
-#define QCOM_SCM_SVC_SMMU_PROGRAM 0x15
-#define QCOM_SCM_CONFIG_ERRATA1 0x3
-#define QCOM_SCM_CONFIG_ERRATA1_CLIENT_ALL 0x2
-extern int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
- size_t *size);
-extern int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr,
- u32 size, u32 spare);
-extern int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev,
- bool enable);
-#define QCOM_MEM_PROT_ASSIGN_ID 0x16
-extern int __qcom_scm_assign_mem(struct device *dev,
- phys_addr_t mem_region, size_t mem_sz,
- phys_addr_t src, size_t src_sz,
- phys_addr_t dest, size_t dest_sz);
-
#endif
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index c6c31402848d..7ffb42b0775e 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -268,7 +268,7 @@ static void svc_thread_cmd_config_status(struct stratix10_svc_controller *ctrl,
*/
msleep(1000);
count_in_sec--;
- };
+ }
if (res.a0 == INTEL_SIP_SMC_STATUS_OK && count_in_sec)
cb_data->status = BIT(SVC_STATUS_RECONFIG_COMPLETED);
@@ -512,7 +512,7 @@ static int svc_normal_to_secure_thread(void *data)
break;
}
- };
+ }
kfree(cbdata);
kfree(pdata);
diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c
index 72be58960e54..e27f68437b56 100644
--- a/drivers/firmware/turris-mox-rwtm.c
+++ b/drivers/firmware/turris-mox-rwtm.c
@@ -197,7 +197,7 @@ static int mox_get_board_info(struct mox_rwtm *rwtm)
rwtm->serial_number = reply->status[1];
rwtm->serial_number <<= 32;
rwtm->serial_number |= reply->status[0];
- rwtm->board_version = reply->status[2];
+ rwtm->board_version = reply->status[2];
rwtm->ram_size = reply->status[3];
reply_to_mac_addr(rwtm->mac_address1, reply->status[4],
reply->status[5]);
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 75bdfaa08380..ecc339d846de 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -26,6 +26,9 @@
static const struct zynqmp_eemi_ops *eemi_ops_tbl;
+static bool feature_check_enabled;
+static u32 zynqmp_pm_features[PM_API_MAX];
+
static const struct mfd_cell firmware_devs[] = {
{
.name = "zynqmp_power_controller",
@@ -44,10 +47,14 @@ static int zynqmp_pm_ret_code(u32 ret_status)
case XST_PM_SUCCESS:
case XST_PM_DOUBLE_REQ:
return 0;
+ case XST_PM_NO_FEATURE:
+ return -ENOTSUPP;
case XST_PM_NO_ACCESS:
return -EACCES;
case XST_PM_ABORT_SUSPEND:
return -ECANCELED;
+ case XST_PM_MULT_USER:
+ return -EUSERS;
case XST_PM_INTERNAL:
case XST_PM_CONFLICT:
case XST_PM_INVALID_NODE:
@@ -127,6 +134,39 @@ static noinline int do_fw_call_hvc(u64 arg0, u64 arg1, u64 arg2,
}
/**
+ * zynqmp_pm_feature() - Check weather given feature is supported or not
+ * @api_id: API ID to check
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_feature(u32 api_id)
+{
+ int ret;
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ u64 smc_arg[2];
+
+ if (!feature_check_enabled)
+ return 0;
+
+ /* Return value if feature is already checked */
+ if (zynqmp_pm_features[api_id] != PM_FEATURE_UNCHECKED)
+ return zynqmp_pm_features[api_id];
+
+ smc_arg[0] = PM_SIP_SVC | PM_FEATURE_CHECK;
+ smc_arg[1] = api_id;
+
+ ret = do_fw_call(smc_arg[0], smc_arg[1], 0, ret_payload);
+ if (ret) {
+ zynqmp_pm_features[api_id] = PM_FEATURE_INVALID;
+ return PM_FEATURE_INVALID;
+ }
+
+ zynqmp_pm_features[api_id] = ret_payload[1];
+
+ return zynqmp_pm_features[api_id];
+}
+
+/**
* zynqmp_pm_invoke_fn() - Invoke the system-level platform management layer
* caller function depending on the configuration
* @pm_api_id: Requested PM-API call
@@ -160,6 +200,9 @@ int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1,
*/
u64 smc_arg[4];
+ if (zynqmp_pm_feature(pm_api_id) == PM_FEATURE_INVALID)
+ return -ENOTSUPP;
+
smc_arg[0] = PM_SIP_SVC | pm_api_id;
smc_arg[1] = ((u64)arg1 << 32) | arg0;
smc_arg[2] = ((u64)arg3 << 32) | arg2;
@@ -715,6 +758,8 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
np = of_find_compatible_node(NULL, NULL, "xlnx,versal");
if (!np)
return 0;
+
+ feature_check_enabled = true;
}
of_node_put(np);
diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c
index e4a34dc7947f..65437b6a6842 100644
--- a/drivers/fpga/dfl-afu-main.c
+++ b/drivers/fpga/dfl-afu-main.c
@@ -813,10 +813,8 @@ static int afu_dev_init(struct platform_device *pdev)
static int afu_dev_destroy(struct platform_device *pdev)
{
struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
- struct dfl_afu *afu;
mutex_lock(&pdata->lock);
- afu = dfl_fpga_pdata_get_private(pdata);
afu_mmio_region_destroy(pdata);
afu_dma_region_destroy(pdata);
dfl_fpga_pdata_set_private(pdata, NULL);
diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c
index 7c930e6b314d..1d4690c99268 100644
--- a/drivers/fpga/dfl-fme-main.c
+++ b/drivers/fpga/dfl-fme-main.c
@@ -675,10 +675,8 @@ static int fme_dev_init(struct platform_device *pdev)
static void fme_dev_destroy(struct platform_device *pdev)
{
struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
- struct dfl_fme *fme;
mutex_lock(&pdata->lock);
- fme = dfl_fpga_pdata_get_private(pdata);
dfl_fpga_pdata_set_private(pdata, NULL);
mutex_unlock(&pdata->lock);
}
diff --git a/drivers/fpga/ts73xx-fpga.c b/drivers/fpga/ts73xx-fpga.c
index 9a17fe98c1b0..2888ff000e4d 100644
--- a/drivers/fpga/ts73xx-fpga.c
+++ b/drivers/fpga/ts73xx-fpga.c
@@ -119,10 +119,8 @@ static int ts73xx_fpga_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->io_base = devm_ioremap_resource(kdev, res);
- if (IS_ERR(priv->io_base)) {
- dev_err(kdev, "unable to remap registers\n");
+ if (IS_ERR(priv->io_base))
return PTR_ERR(priv->io_base);
- }
mgr = devm_fpga_mgr_create(kdev, "TS-73xx FPGA Manager",
&ts73xx_fpga_ops, priv);
diff --git a/drivers/fpga/xilinx-pr-decoupler.c b/drivers/fpga/xilinx-pr-decoupler.c
index af9b387c56d3..7d69af230567 100644
--- a/drivers/fpga/xilinx-pr-decoupler.c
+++ b/drivers/fpga/xilinx-pr-decoupler.c
@@ -101,7 +101,8 @@ static int xlnx_pr_decoupler_probe(struct platform_device *pdev)
priv->clk = devm_clk_get(&pdev->dev, "aclk");
if (IS_ERR(priv->clk)) {
- dev_err(&pdev->dev, "input clock not found\n");
+ if (PTR_ERR(priv->clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "input clock not found\n");
return PTR_ERR(priv->clk);
}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 8adffd42f8cb..b8013cf90064 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -312,6 +312,12 @@ config GPIO_IXP4XX
IXP4xx series of chips.
If unsure, say N.
+config GPIO_LOGICVC
+ tristate "Xylon LogiCVC GPIO support"
+ depends on MFD_SYSCON && OF
+ help
+ Say yes here to support GPIO functionality of the Xylon LogiCVC
+ programmable logic block.
config GPIO_LOONGSON
bool "Loongson-2/3 GPIO support"
@@ -335,14 +341,6 @@ config GPIO_LPC32XX
Select this option to enable GPIO driver for
NXP LPC32XX devices.
-config GPIO_LYNXPOINT
- tristate "Intel Lynxpoint GPIO support"
- depends on ACPI && X86
- select GPIOLIB_IRQCHIP
- help
- driver for GPIO functionality on Intel Lynxpoint PCH chipset
- Requires ACPI device enumeration code to set up a platform device.
-
config GPIO_MB86S7X
tristate "GPIO support for Fujitsu MB86S7x Platforms"
help
@@ -479,6 +477,15 @@ config GPIO_SAMA5D2_PIOBU
The difference from regular GPIOs is that they
maintain their value during backup/self-refresh.
+config GPIO_SIFIVE
+ bool "SiFive GPIO support"
+ depends on OF_GPIO && IRQ_DOMAIN_HIERARCHY
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ select REGMAP_MMIO
+ help
+ Say yes here to support the GPIO device on SiFive SoCs.
+
config GPIO_SIOX
tristate "SIOX GPIO support"
depends on SIOX
@@ -553,8 +560,8 @@ config GPIO_TEGRA
config GPIO_TEGRA186
tristate "NVIDIA Tegra186 GPIO support"
- default ARCH_TEGRA_186_SOC
- depends on ARCH_TEGRA_186_SOC || COMPILE_TEST
+ default ARCH_TEGRA_186_SOC || ARCH_TEGRA_194_SOC
+ depends on ARCH_TEGRA_186_SOC || ARCH_TEGRA_194_SOC || COMPILE_TEST
depends on OF_GPIO
select GPIOLIB_IRQCHIP
select IRQ_DOMAIN_HIERARCHY
@@ -613,6 +620,13 @@ config GPIO_VX855
additional drivers must be enabled in order to use the
functionality of the device.
+config GPIO_WCD934X
+ tristate "Qualcomm Technologies Inc WCD9340/WCD9341 gpio controller driver"
+ depends on MFD_WCD934X && OF_GPIO
+ help
+ This driver is to supprot GPIO block found on the Qualcomm Technologies
+ Inc WCD9340/WCD9341 Audio Codec.
+
config GPIO_XGENE
bool "APM X-Gene GPIO controller support"
depends on ARM64 && OF_GPIO
@@ -1021,6 +1035,18 @@ config GPIO_BD70528
This driver can also be built as a module. If so, the module
will be called gpio-bd70528.
+config GPIO_BD71828
+ tristate "ROHM BD71828 GPIO support"
+ depends on MFD_ROHM_BD71828
+ help
+ Support for GPIOs on ROHM BD71828 PMIC. There are three GPIOs
+ available on the ROHM PMIC in total. The GPIOs are limited to
+ outputs only and pins must be configured to GPIO outputs by
+ OTP. Enable this only if you want to use these pins as outputs.
+
+ This driver can also be built as a module. If so, the module
+ will be called gpio-bd71828.
+
config GPIO_BD9571MWV
tristate "ROHM BD9571 GPIO support"
depends on MFD_BD9571MWV
@@ -1148,6 +1174,7 @@ config GPIO_MADERA
config GPIO_MAX77620
tristate "GPIO support for PMIC MAX77620 and MAX20024"
depends on MFD_MAX77620
+ select GPIOLIB_IRQCHIP
help
GPIO driver for MAX77620 and MAX20024 PMIC from Maxim Semiconductor.
MAX77620 PMIC has 8 pins that can be configured as GPIOs. The
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 34eb8b2b12dd..0b571264ddbc 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_GPIO_ATH79) += gpio-ath79.o
obj-$(CONFIG_GPIO_BCM_KONA) += gpio-bcm-kona.o
obj-$(CONFIG_GPIO_BCM_XGS_IPROC) += gpio-xgs-iproc.o
obj-$(CONFIG_GPIO_BD70528) += gpio-bd70528.o
+obj-$(CONFIG_GPIO_BD71828) += gpio-bd71828.o
obj-$(CONFIG_GPIO_BD9571MWV) += gpio-bd9571mwv.o
obj-$(CONFIG_GPIO_BRCMSTB) += gpio-brcmstb.o
obj-$(CONFIG_GPIO_BT8XX) += gpio-bt8xx.o
@@ -69,6 +70,7 @@ obj-$(CONFIG_GPIO_IT87) += gpio-it87.o
obj-$(CONFIG_GPIO_IXP4XX) += gpio-ixp4xx.o
obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
obj-$(CONFIG_GPIO_KEMPLD) += gpio-kempld.o
+obj-$(CONFIG_GPIO_LOGICVC) += gpio-logicvc.o
obj-$(CONFIG_GPIO_LOONGSON1) += gpio-loongson1.o
obj-$(CONFIG_GPIO_LOONGSON) += gpio-loongson.o
obj-$(CONFIG_GPIO_LP3943) += gpio-lp3943.o
@@ -76,7 +78,6 @@ obj-$(CONFIG_GPIO_LP873X) += gpio-lp873x.o
obj-$(CONFIG_GPIO_LP87565) += gpio-lp87565.o
obj-$(CONFIG_GPIO_LPC18XX) += gpio-lpc18xx.o
obj-$(CONFIG_GPIO_LPC32XX) += gpio-lpc32xx.o
-obj-$(CONFIG_GPIO_LYNXPOINT) += gpio-lynxpoint.o
obj-$(CONFIG_GPIO_MADERA) += gpio-madera.o
obj-$(CONFIG_GPIO_MAX3191X) += gpio-max3191x.o
obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o
@@ -124,6 +125,7 @@ obj-$(CONFIG_ARCH_SA1100) += gpio-sa1100.o
obj-$(CONFIG_GPIO_SAMA5D2_PIOBU) += gpio-sama5d2-piobu.o
obj-$(CONFIG_GPIO_SCH311X) += gpio-sch311x.o
obj-$(CONFIG_GPIO_SCH) += gpio-sch.o
+obj-$(CONFIG_GPIO_SIFIVE) += gpio-sifive.o
obj-$(CONFIG_GPIO_SIOX) += gpio-siox.o
obj-$(CONFIG_GPIO_SODAVILLE) += gpio-sodaville.o
obj-$(CONFIG_GPIO_SPEAR_SPICS) += gpio-spear-spics.o
@@ -157,6 +159,7 @@ obj-$(CONFIG_GPIO_VF610) += gpio-vf610.o
obj-$(CONFIG_GPIO_VIPERBOARD) += gpio-viperboard.o
obj-$(CONFIG_GPIO_VR41XX) += gpio-vr41xx.o
obj-$(CONFIG_GPIO_VX855) += gpio-vx855.o
+obj-$(CONFIG_GPIO_WCD934X) += gpio-wcd934x.o
obj-$(CONFIG_GPIO_WHISKEY_COVE) += gpio-wcove.o
obj-$(CONFIG_GPIO_WINBOND) += gpio-winbond.o
obj-$(CONFIG_GPIO_WM831X) += gpio-wm831x.o
diff --git a/drivers/gpio/TODO b/drivers/gpio/TODO
index 76f8c7ff18ff..3a44e6ae52bd 100644
--- a/drivers/gpio/TODO
+++ b/drivers/gpio/TODO
@@ -10,6 +10,28 @@ approach. This means that GPIO consumers, drivers and machine descriptions
ideally have no use or idea of the global GPIO numberspace that has/was
used in the inception of the GPIO subsystem.
+The numberspace issue is the same as to why irq is moving away from irq
+numbers to IRQ descriptors.
+
+The underlying motivation for this is that the GPIO numberspace has become
+unmanageable: machine board files tend to become full of macros trying to
+establish the numberspace at compile-time, making it hard to add any numbers
+in the middle (such as if you missed a pin on a chip) without the numberspace
+breaking.
+
+Machine descriptions such as device tree or ACPI does not have a concept of the
+Linux GPIO number as those descriptions are external to the Linux kernel
+and treat GPIO lines as abstract entities.
+
+The runtime-assigned GPIO numberspace (what you get if you assign the GPIO
+base as -1 in struct gpio_chip) has also became unpredictable due to factors
+such as probe ordering and the introduction of -EPROBE_DEFER making probe
+ordering of independent GPIO chips essentially unpredictable, as their base
+number will be assigned on a first come first serve basis.
+
+The best way to get out of the problem is to make the global GPIO numbers
+unimportant by simply not using them. GPIO descriptors deal with this.
+
Work items:
- Convert all GPIO device drivers to only #include <linux/gpio/driver.h>
@@ -33,7 +55,7 @@ This header and helpers appeared at one point when there was no proper
driver infrastructure for doing simpler MMIO GPIO devices and there was
no core support for parsing device tree GPIOs from the core library with
the [devm_]gpiod_get() calls we have today that will implicitly go into
-the device tree back-end.
+the device tree back-end. It is legacy and should not be used in new code.
Work items:
@@ -59,6 +81,15 @@ Work items:
uses <linux/gpio/consumer.h> or <linux/gpio/driver.h> instead.
+Get rid of <linux/gpio.h>
+
+This legacy header is a one stop shop for anything GPIO is closely tied
+to the global GPIO numberspace. The endgame of the above refactorings will
+be the removal of <linux/gpio.h> and from that point only the specialized
+headers under <linux/gpio/*.h> will be used. This requires all the above to
+be completed and is expected to take a long time.
+
+
Collect drivers
Collect GPIO drivers from arch/* and other places that should be placed
@@ -109,7 +140,7 @@ try to cover any generic kind of irqchip cascaded from a GPIO.
int irq; /* from platform etc */
struct my_gpio *g;
- struct gpio_irq_chip *girq
+ struct gpio_irq_chip *girq;
/* Set up the irqchip dynamically */
g->irq.name = "my_gpio_irq";
@@ -137,9 +168,14 @@ try to cover any generic kind of irqchip cascaded from a GPIO.
- Look over and identify any remaining easily converted drivers and
dry-code conversions to gpiolib irqchip for maintainers to test
-- Support generic hierarchical GPIO interrupts: these are for the
- non-cascading case where there is one IRQ per GPIO line, there is
- currently no common infrastructure for this.
+- Drop gpiochip_set_chained_irqchip() when all the chained irqchips
+ have been converted to the above infrastructure.
+
+- Add more infrastructure to make it possible to also pass a threaded
+ irqchip in struct gpio_irq_chip.
+
+- Drop gpiochip_irqchip_add_nested() when all the chained irqchips
+ have been converted to the above infrastructure.
Increase integration with pin control
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 9f2e6b04c361..cc4ba71e4fe3 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -266,7 +266,7 @@ static int altera_gpio_probe(struct platform_device *pdev)
altera_gc->mmchip.gc.owner = THIS_MODULE;
altera_gc->mmchip.gc.parent = &pdev->dev;
- altera_gc->mapped_irq = platform_get_irq(pdev, 0);
+ altera_gc->mapped_irq = platform_get_irq_optional(pdev, 0);
if (altera_gc->mapped_irq < 0)
goto skip_irq;
diff --git a/drivers/gpio/gpio-aspeed-sgpio.c b/drivers/gpio/gpio-aspeed-sgpio.c
index 7e99860ca447..d16645c1d8d9 100644
--- a/drivers/gpio/gpio-aspeed-sgpio.c
+++ b/drivers/gpio/gpio-aspeed-sgpio.c
@@ -107,7 +107,7 @@ static void __iomem *bank_reg(struct aspeed_sgpio *gpio,
return gpio->base + bank->irq_regs + GPIO_IRQ_STATUS;
default:
/* acturally if code runs to here, it's an error case */
- BUG_ON(1);
+ BUG();
}
}
@@ -391,7 +391,7 @@ static int aspeed_sgpio_setup_irqs(struct aspeed_sgpio *gpio,
gpio->irq = rc;
- /* Disable IRQ and clear Interrupt status registers for all SPGIO Pins. */
+ /* Disable IRQ and clear Interrupt status registers for all SGPIO Pins. */
for (i = 0; i < ARRAY_SIZE(aspeed_sgpio_banks); i++) {
bank = &aspeed_sgpio_banks[i];
/* disable irq enable bits */
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index f1037b61f763..879db23d8454 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -978,7 +978,7 @@ static int aspeed_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
}
/**
- * aspeed_gpio_copro_set_ops - Sets the callbacks used for handhsaking with
+ * aspeed_gpio_copro_set_ops - Sets the callbacks used for handshaking with
* the coprocessor for shared GPIO banks
* @ops: The callbacks
* @data: Pointer passed back to the callbacks
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 4122683eb1f9..baee8c3f06ad 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -19,7 +19,6 @@
#include <linux/io.h>
#include <linux/gpio/driver.h>
#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/init.h>
#include <linux/irqdomain.h>
#include <linux/irqchip/chained_irq.h>
@@ -586,11 +585,18 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev)
kona_gpio->gpio_chip = template_chip;
chip = &kona_gpio->gpio_chip;
- kona_gpio->num_bank = of_irq_count(dev->of_node);
- if (kona_gpio->num_bank == 0) {
+ ret = platform_irq_count(pdev);
+ if (!ret) {
dev_err(dev, "Couldn't determine # GPIO banks\n");
return -ENOENT;
+ } else if (ret < 0) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Couldn't determine GPIO banks: (%pe)\n",
+ ERR_PTR(ret));
+ return ret;
}
+ kona_gpio->num_bank = ret;
+
if (kona_gpio->num_bank > GPIO_MAX_BANK_NUM) {
dev_err(dev, "Too many GPIO banks configured (max=%d)\n",
GPIO_MAX_BANK_NUM);
diff --git a/drivers/gpio/gpio-bd71828.c b/drivers/gpio/gpio-bd71828.c
new file mode 100644
index 000000000000..3dbbc638e9a9
--- /dev/null
+++ b/drivers/gpio/gpio-bd71828.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2018 ROHM Semiconductors
+
+#include <linux/gpio/driver.h>
+#include <linux/mfd/rohm-bd71828.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define GPIO_OUT_REG(off) (BD71828_REG_GPIO_CTRL1 + (off))
+#define HALL_GPIO_OFFSET 3
+
+struct bd71828_gpio {
+ struct rohm_regmap_dev chip;
+ struct gpio_chip gpio;
+};
+
+static void bd71828_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ int ret;
+ struct bd71828_gpio *bdgpio = gpiochip_get_data(chip);
+ u8 val = (value) ? BD71828_GPIO_OUT_HI : BD71828_GPIO_OUT_LO;
+
+ /*
+ * The HALL input pin can only be used as input. If this is the pin
+ * we are dealing with - then we are done
+ */
+ if (offset == HALL_GPIO_OFFSET)
+ return;
+
+ ret = regmap_update_bits(bdgpio->chip.regmap, GPIO_OUT_REG(offset),
+ BD71828_GPIO_OUT_MASK, val);
+ if (ret)
+ dev_err(bdgpio->chip.dev, "Could not set gpio to %d\n", value);
+}
+
+static int bd71828_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ int ret;
+ unsigned int val;
+ struct bd71828_gpio *bdgpio = gpiochip_get_data(chip);
+
+ if (offset == HALL_GPIO_OFFSET)
+ ret = regmap_read(bdgpio->chip.regmap, BD71828_REG_IO_STAT,
+ &val);
+ else
+ ret = regmap_read(bdgpio->chip.regmap, GPIO_OUT_REG(offset),
+ &val);
+ if (!ret)
+ ret = (val & BD71828_GPIO_OUT_MASK);
+
+ return ret;
+}
+
+static int bd71828_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
+ unsigned long config)
+{
+ struct bd71828_gpio *bdgpio = gpiochip_get_data(chip);
+
+ if (offset == HALL_GPIO_OFFSET)
+ return -ENOTSUPP;
+
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ return regmap_update_bits(bdgpio->chip.regmap,
+ GPIO_OUT_REG(offset),
+ BD71828_GPIO_DRIVE_MASK,
+ BD71828_GPIO_OPEN_DRAIN);
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ return regmap_update_bits(bdgpio->chip.regmap,
+ GPIO_OUT_REG(offset),
+ BD71828_GPIO_DRIVE_MASK,
+ BD71828_GPIO_PUSH_PULL);
+ default:
+ break;
+ }
+ return -ENOTSUPP;
+}
+
+static int bd71828_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ /*
+ * Pin usage is selected by OTP data. We can't read it runtime. Hence
+ * we trust that if the pin is not excluded by "gpio-reserved-ranges"
+ * the OTP configuration is set to OUT. (Other pins but HALL input pin
+ * on BD71828 can't really be used for general purpose input - input
+ * states are used for specific cases like regulator control or
+ * PMIC_ON_REQ.
+ */
+ if (offset == HALL_GPIO_OFFSET)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
+}
+
+static int bd71828_probe(struct platform_device *pdev)
+{
+ struct bd71828_gpio *bdgpio;
+ struct rohm_regmap_dev *bd71828;
+
+ bd71828 = dev_get_drvdata(pdev->dev.parent);
+ if (!bd71828) {
+ dev_err(&pdev->dev, "No MFD driver data\n");
+ return -EINVAL;
+ }
+
+ bdgpio = devm_kzalloc(&pdev->dev, sizeof(*bdgpio),
+ GFP_KERNEL);
+ if (!bdgpio)
+ return -ENOMEM;
+
+ bdgpio->chip.dev = &pdev->dev;
+ bdgpio->gpio.parent = pdev->dev.parent;
+ bdgpio->gpio.label = "bd71828-gpio";
+ bdgpio->gpio.owner = THIS_MODULE;
+ bdgpio->gpio.get_direction = bd71828_get_direction;
+ bdgpio->gpio.set_config = bd71828_gpio_set_config;
+ bdgpio->gpio.can_sleep = true;
+ bdgpio->gpio.get = bd71828_gpio_get;
+ bdgpio->gpio.set = bd71828_gpio_set;
+ bdgpio->gpio.base = -1;
+
+ /*
+ * See if we need some implementation to mark some PINs as
+ * not controllable based on DT info or if core can handle
+ * "gpio-reserved-ranges" and exclude them from control
+ */
+ bdgpio->gpio.ngpio = 4;
+ bdgpio->gpio.of_node = pdev->dev.parent->of_node;
+ bdgpio->chip.regmap = bd71828->regmap;
+
+ return devm_gpiochip_add_data(&pdev->dev, &bdgpio->gpio,
+ bdgpio);
+}
+
+static struct platform_driver bd71828_gpio = {
+ .driver = {
+ .name = "bd71828-gpio"
+ },
+ .probe = bd71828_probe,
+};
+
+module_platform_driver(bd71828_gpio);
+
+MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
+MODULE_DESCRIPTION("BD71828 voltage regulator driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:bd71828-gpio");
diff --git a/drivers/gpio/gpio-creg-snps.c b/drivers/gpio/gpio-creg-snps.c
index ff19a8ad5663..1d0827e79703 100644
--- a/drivers/gpio/gpio-creg-snps.c
+++ b/drivers/gpio/gpio-creg-snps.c
@@ -64,11 +64,11 @@ static int creg_gpio_validate_pg(struct device *dev, struct creg_gpio *hcg,
if (layout->bit_per_gpio[i] < 1 || layout->bit_per_gpio[i] > 8)
return -EINVAL;
- /* Check that on valiue fits it's placeholder */
+ /* Check that on value fits its placeholder */
if (GENMASK(31, layout->bit_per_gpio[i]) & layout->on[i])
return -EINVAL;
- /* Check that off valiue fits it's placeholder */
+ /* Check that off value fits its placeholder */
if (GENMASK(31, layout->bit_per_gpio[i]) & layout->off[i])
return -EINVAL;
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index 08234e64993a..f954359c9544 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -253,17 +253,16 @@ static int grgpio_irq_map(struct irq_domain *d, unsigned int irq,
lirq->irq = irq;
uirq = &priv->uirqs[lirq->index];
if (uirq->refcnt == 0) {
+ spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
ret = request_irq(uirq->uirq, grgpio_irq_handler, 0,
dev_name(priv->dev), priv);
if (ret) {
dev_err(priv->dev,
"Could not request underlying irq %d\n",
uirq->uirq);
-
- spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
-
return ret;
}
+ spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
}
uirq->refcnt++;
@@ -309,8 +308,11 @@ static void grgpio_irq_unmap(struct irq_domain *d, unsigned int irq)
if (index >= 0) {
uirq = &priv->uirqs[lirq->index];
uirq->refcnt--;
- if (uirq->refcnt == 0)
+ if (uirq->refcnt == 0) {
+ spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
free_irq(uirq->uirq, priv);
+ return;
+ }
}
spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
@@ -433,12 +435,9 @@ static int grgpio_probe(struct platform_device *ofdev)
static int grgpio_remove(struct platform_device *ofdev)
{
struct grgpio_priv *priv = platform_get_drvdata(ofdev);
- unsigned long flags;
int i;
int ret = 0;
- spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
-
if (priv->domain) {
for (i = 0; i < GRGPIO_MAX_NGPIO; i++) {
if (priv->uirqs[i].refcnt != 0) {
@@ -454,8 +453,6 @@ static int grgpio_remove(struct platform_device *ofdev)
irq_domain_remove(priv->domain);
out:
- spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
-
return ret;
}
diff --git a/drivers/gpio/gpio-logicvc.c b/drivers/gpio/gpio-logicvc.c
new file mode 100644
index 000000000000..015632cf159f
--- /dev/null
+++ b/drivers/gpio/gpio-logicvc.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+#define LOGICVC_CTRL_REG 0x40
+#define LOGICVC_CTRL_GPIO_SHIFT 11
+#define LOGICVC_CTRL_GPIO_BITS 5
+
+#define LOGICVC_POWER_CTRL_REG 0x78
+#define LOGICVC_POWER_CTRL_GPIO_SHIFT 0
+#define LOGICVC_POWER_CTRL_GPIO_BITS 4
+
+struct logicvc_gpio {
+ struct gpio_chip chip;
+ struct regmap *regmap;
+};
+
+static void logicvc_gpio_offset(struct logicvc_gpio *logicvc, unsigned offset,
+ unsigned int *reg, unsigned int *bit)
+{
+ if (offset >= LOGICVC_CTRL_GPIO_BITS) {
+ *reg = LOGICVC_POWER_CTRL_REG;
+
+ /* To the (virtual) power ctrl offset. */
+ offset -= LOGICVC_CTRL_GPIO_BITS;
+ /* To the actual bit offset in reg. */
+ offset += LOGICVC_POWER_CTRL_GPIO_SHIFT;
+ } else {
+ *reg = LOGICVC_CTRL_REG;
+
+ /* To the actual bit offset in reg. */
+ offset += LOGICVC_CTRL_GPIO_SHIFT;
+ }
+
+ *bit = BIT(offset);
+}
+
+static int logicvc_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct logicvc_gpio *logicvc = gpiochip_get_data(chip);
+ unsigned int reg, bit, value;
+ int ret;
+
+ logicvc_gpio_offset(logicvc, offset, &reg, &bit);
+
+ ret = regmap_read(logicvc->regmap, reg, &value);
+ if (ret)
+ return ret;
+
+ return !!(value & bit);
+}
+
+static void logicvc_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct logicvc_gpio *logicvc = gpiochip_get_data(chip);
+ unsigned int reg, bit;
+
+ logicvc_gpio_offset(logicvc, offset, &reg, &bit);
+
+ regmap_update_bits(logicvc->regmap, reg, bit, value ? bit : 0);
+}
+
+static int logicvc_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ /* Pins are always configured as output, so just set the value. */
+ logicvc_gpio_set(chip, offset, value);
+
+ return 0;
+}
+
+static struct regmap_config logicvc_gpio_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .name = "logicvc-gpio",
+};
+
+static int logicvc_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *of_node = dev->of_node;
+ struct logicvc_gpio *logicvc;
+ int ret;
+
+ logicvc = devm_kzalloc(dev, sizeof(*logicvc), GFP_KERNEL);
+ if (!logicvc)
+ return -ENOMEM;
+
+ /* Try to get regmap from parent first. */
+ logicvc->regmap = syscon_node_to_regmap(of_node->parent);
+
+ /* Grab our own regmap if that fails. */
+ if (IS_ERR(logicvc->regmap)) {
+ struct resource res;
+ void __iomem *base;
+
+ ret = of_address_to_resource(of_node, 0, &res);
+ if (ret) {
+ dev_err(dev, "Failed to get resource from address\n");
+ return ret;
+ }
+
+ base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(base)) {
+ dev_err(dev, "Failed to map I/O base\n");
+ return PTR_ERR(base);
+ }
+
+ logicvc_gpio_regmap_config.max_register = resource_size(&res) -
+ logicvc_gpio_regmap_config.reg_stride;
+
+ logicvc->regmap =
+ devm_regmap_init_mmio(dev, base,
+ &logicvc_gpio_regmap_config);
+ if (IS_ERR(logicvc->regmap)) {
+ dev_err(dev, "Failed to create regmap for I/O\n");
+ return PTR_ERR(logicvc->regmap);
+ }
+ }
+
+ logicvc->chip.parent = dev;
+ logicvc->chip.owner = THIS_MODULE;
+ logicvc->chip.label = dev_name(dev);
+ logicvc->chip.base = -1;
+ logicvc->chip.ngpio = LOGICVC_CTRL_GPIO_BITS +
+ LOGICVC_POWER_CTRL_GPIO_BITS;
+ logicvc->chip.get = logicvc_gpio_get;
+ logicvc->chip.set = logicvc_gpio_set;
+ logicvc->chip.direction_output = logicvc_gpio_direction_output;
+
+ platform_set_drvdata(pdev, logicvc);
+
+ return devm_gpiochip_add_data(dev, &logicvc->chip, logicvc);
+}
+
+static const struct of_device_id logicivc_gpio_of_table[] = {
+ {
+ .compatible = "xylon,logicvc-3.02.a-gpio",
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, logicivc_gpio_of_table);
+
+static struct platform_driver logicvc_gpio_driver = {
+ .driver = {
+ .name = "gpio-logicvc",
+ .of_match_table = logicivc_gpio_of_table,
+ },
+ .probe = logicvc_gpio_probe,
+};
+
+module_platform_driver(logicvc_gpio_driver);
+
+MODULE_AUTHOR("Paul Kocialkowski <paul.kocialkowski@bootlin.com>");
+MODULE_DESCRIPTION("Xylon LogiCVC GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
deleted file mode 100644
index 490ce7bae25e..000000000000
--- a/drivers/gpio/gpio-lynxpoint.c
+++ /dev/null
@@ -1,471 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GPIO controller driver for Intel Lynxpoint PCH chipset>
- * Copyright (c) 2012, Intel Corporation.
- *
- * Author: Mathias Nyman <mathias.nyman@linux.intel.com>
- */
-
-#include <linux/acpi.h>
-#include <linux/bitops.h>
-#include <linux/gpio/driver.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-
-/* LynxPoint chipset has support for 94 gpio pins */
-
-#define LP_NUM_GPIO 94
-
-/* Bitmapped register offsets */
-#define LP_ACPI_OWNED 0x00 /* Bitmap, set by bios, 0: pin reserved for ACPI */
-#define LP_GC 0x7C /* set APIC IRQ to IRQ14 or IRQ15 for all pins */
-#define LP_INT_STAT 0x80
-#define LP_INT_ENABLE 0x90
-
-/* Each pin has two 32 bit config registers, starting at 0x100 */
-#define LP_CONFIG1 0x100
-#define LP_CONFIG2 0x104
-
-/* LP_CONFIG1 reg bits */
-#define OUT_LVL_BIT BIT(31)
-#define IN_LVL_BIT BIT(30)
-#define TRIG_SEL_BIT BIT(4) /* 0: Edge, 1: Level */
-#define INT_INV_BIT BIT(3) /* Invert interrupt triggering */
-#define DIR_BIT BIT(2) /* 0: Output, 1: Input */
-#define USE_SEL_BIT BIT(0) /* 0: Native, 1: GPIO */
-
-/* LP_CONFIG2 reg bits */
-#define GPINDIS_BIT BIT(2) /* disable input sensing */
-#define GPIWP_BIT (BIT(0) | BIT(1)) /* weak pull options */
-
-struct lp_gpio {
- struct gpio_chip chip;
- struct platform_device *pdev;
- spinlock_t lock;
- unsigned long reg_base;
-};
-
-/*
- * Lynxpoint gpios are controlled through both bitmapped registers and
- * per gpio specific registers. The bitmapped registers are in chunks of
- * 3 x 32bit registers to cover all 94 gpios
- *
- * per gpio specific registers consist of two 32bit registers per gpio
- * (LP_CONFIG1 and LP_CONFIG2), with 94 gpios there's a total of
- * 188 config registers.
- *
- * A simplified view of the register layout look like this:
- *
- * LP_ACPI_OWNED[31:0] gpio ownerships for gpios 0-31 (bitmapped registers)
- * LP_ACPI_OWNED[63:32] gpio ownerships for gpios 32-63
- * LP_ACPI_OWNED[94:64] gpio ownerships for gpios 63-94
- * ...
- * LP_INT_ENABLE[31:0] ...
- * LP_INT_ENABLE[63:31] ...
- * LP_INT_ENABLE[94:64] ...
- * LP0_CONFIG1 (gpio 0) config1 reg for gpio 0 (per gpio registers)
- * LP0_CONFIG2 (gpio 0) config2 reg for gpio 0
- * LP1_CONFIG1 (gpio 1) config1 reg for gpio 1
- * LP1_CONFIG2 (gpio 1) config2 reg for gpio 1
- * LP2_CONFIG1 (gpio 2) ...
- * LP2_CONFIG2 (gpio 2) ...
- * ...
- * LP94_CONFIG1 (gpio 94) ...
- * LP94_CONFIG2 (gpio 94) ...
- */
-
-static unsigned long lp_gpio_reg(struct gpio_chip *chip, unsigned offset,
- int reg)
-{
- struct lp_gpio *lg = gpiochip_get_data(chip);
- int reg_offset;
-
- if (reg == LP_CONFIG1 || reg == LP_CONFIG2)
- /* per gpio specific config registers */
- reg_offset = offset * 8;
- else
- /* bitmapped registers */
- reg_offset = (offset / 32) * 4;
-
- return lg->reg_base + reg + reg_offset;
-}
-
-static int lp_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
- struct lp_gpio *lg = gpiochip_get_data(chip);
- unsigned long reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
- unsigned long conf2 = lp_gpio_reg(chip, offset, LP_CONFIG2);
- unsigned long acpi_use = lp_gpio_reg(chip, offset, LP_ACPI_OWNED);
-
- pm_runtime_get(&lg->pdev->dev); /* should we put if failed */
-
- /* Fail if BIOS reserved pin for ACPI use */
- if (!(inl(acpi_use) & BIT(offset % 32))) {
- dev_err(&lg->pdev->dev, "gpio %d reserved for ACPI\n", offset);
- return -EBUSY;
- }
- /* Fail if pin is in alternate function mode (not GPIO mode) */
- if (!(inl(reg) & USE_SEL_BIT))
- return -ENODEV;
-
- /* enable input sensing */
- outl(inl(conf2) & ~GPINDIS_BIT, conf2);
-
-
- return 0;
-}
-
-static void lp_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
- struct lp_gpio *lg = gpiochip_get_data(chip);
- unsigned long conf2 = lp_gpio_reg(chip, offset, LP_CONFIG2);
-
- /* disable input sensing */
- outl(inl(conf2) | GPINDIS_BIT, conf2);
-
- pm_runtime_put(&lg->pdev->dev);
-}
-
-static int lp_irq_type(struct irq_data *d, unsigned type)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct lp_gpio *lg = gpiochip_get_data(gc);
- u32 hwirq = irqd_to_hwirq(d);
- unsigned long flags;
- u32 value;
- unsigned long reg = lp_gpio_reg(&lg->chip, hwirq, LP_CONFIG1);
-
- if (hwirq >= lg->chip.ngpio)
- return -EINVAL;
-
- spin_lock_irqsave(&lg->lock, flags);
- value = inl(reg);
-
- /* set both TRIG_SEL and INV bits to 0 for rising edge */
- if (type & IRQ_TYPE_EDGE_RISING)
- value &= ~(TRIG_SEL_BIT | INT_INV_BIT);
-
- /* TRIG_SEL bit 0, INV bit 1 for falling edge */
- if (type & IRQ_TYPE_EDGE_FALLING)
- value = (value | INT_INV_BIT) & ~TRIG_SEL_BIT;
-
- /* TRIG_SEL bit 1, INV bit 0 for level low */
- if (type & IRQ_TYPE_LEVEL_LOW)
- value = (value | TRIG_SEL_BIT) & ~INT_INV_BIT;
-
- /* TRIG_SEL bit 1, INV bit 1 for level high */
- if (type & IRQ_TYPE_LEVEL_HIGH)
- value |= TRIG_SEL_BIT | INT_INV_BIT;
-
- outl(value, reg);
-
- if (type & IRQ_TYPE_EDGE_BOTH)
- irq_set_handler_locked(d, handle_edge_irq);
- else if (type & IRQ_TYPE_LEVEL_MASK)
- irq_set_handler_locked(d, handle_level_irq);
-
- spin_unlock_irqrestore(&lg->lock, flags);
-
- return 0;
-}
-
-static int lp_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
- unsigned long reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
- return !!(inl(reg) & IN_LVL_BIT);
-}
-
-static void lp_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
-{
- struct lp_gpio *lg = gpiochip_get_data(chip);
- unsigned long reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
- unsigned long flags;
-
- spin_lock_irqsave(&lg->lock, flags);
-
- if (value)
- outl(inl(reg) | OUT_LVL_BIT, reg);
- else
- outl(inl(reg) & ~OUT_LVL_BIT, reg);
-
- spin_unlock_irqrestore(&lg->lock, flags);
-}
-
-static int lp_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- struct lp_gpio *lg = gpiochip_get_data(chip);
- unsigned long reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
- unsigned long flags;
-
- spin_lock_irqsave(&lg->lock, flags);
- outl(inl(reg) | DIR_BIT, reg);
- spin_unlock_irqrestore(&lg->lock, flags);
-
- return 0;
-}
-
-static int lp_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int value)
-{
- struct lp_gpio *lg = gpiochip_get_data(chip);
- unsigned long reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
- unsigned long flags;
-
- lp_gpio_set(chip, offset, value);
-
- spin_lock_irqsave(&lg->lock, flags);
- outl(inl(reg) & ~DIR_BIT, reg);
- spin_unlock_irqrestore(&lg->lock, flags);
-
- return 0;
-}
-
-static void lp_gpio_irq_handler(struct irq_desc *desc)
-{
- struct irq_data *data = irq_desc_get_irq_data(desc);
- struct gpio_chip *gc = irq_desc_get_handler_data(desc);
- struct lp_gpio *lg = gpiochip_get_data(gc);
- struct irq_chip *chip = irq_data_get_irq_chip(data);
- unsigned long reg, ena, pending;
- u32 base, pin;
-
- /* check from GPIO controller which pin triggered the interrupt */
- for (base = 0; base < lg->chip.ngpio; base += 32) {
- reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
- ena = lp_gpio_reg(&lg->chip, base, LP_INT_ENABLE);
-
- /* Only interrupts that are enabled */
- pending = inl(reg) & inl(ena);
-
- for_each_set_bit(pin, &pending, 32) {
- unsigned irq;
-
- /* Clear before handling so we don't lose an edge */
- outl(BIT(pin), reg);
-
- irq = irq_find_mapping(lg->chip.irq.domain, base + pin);
- generic_handle_irq(irq);
- }
- }
- chip->irq_eoi(data);
-}
-
-static void lp_irq_unmask(struct irq_data *d)
-{
-}
-
-static void lp_irq_mask(struct irq_data *d)
-{
-}
-
-static void lp_irq_enable(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct lp_gpio *lg = gpiochip_get_data(gc);
- u32 hwirq = irqd_to_hwirq(d);
- unsigned long reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_ENABLE);
- unsigned long flags;
-
- spin_lock_irqsave(&lg->lock, flags);
- outl(inl(reg) | BIT(hwirq % 32), reg);
- spin_unlock_irqrestore(&lg->lock, flags);
-}
-
-static void lp_irq_disable(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct lp_gpio *lg = gpiochip_get_data(gc);
- u32 hwirq = irqd_to_hwirq(d);
- unsigned long reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_ENABLE);
- unsigned long flags;
-
- spin_lock_irqsave(&lg->lock, flags);
- outl(inl(reg) & ~BIT(hwirq % 32), reg);
- spin_unlock_irqrestore(&lg->lock, flags);
-}
-
-static struct irq_chip lp_irqchip = {
- .name = "LP-GPIO",
- .irq_mask = lp_irq_mask,
- .irq_unmask = lp_irq_unmask,
- .irq_enable = lp_irq_enable,
- .irq_disable = lp_irq_disable,
- .irq_set_type = lp_irq_type,
- .flags = IRQCHIP_SKIP_SET_WAKE,
-};
-
-static int lp_gpio_irq_init_hw(struct gpio_chip *chip)
-{
- struct lp_gpio *lg = gpiochip_get_data(chip);
- unsigned long reg;
- unsigned base;
-
- for (base = 0; base < lg->chip.ngpio; base += 32) {
- /* disable gpio pin interrupts */
- reg = lp_gpio_reg(&lg->chip, base, LP_INT_ENABLE);
- outl(0, reg);
- /* Clear interrupt status register */
- reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
- outl(0xffffffff, reg);
- }
-
- return 0;
-}
-
-static int lp_gpio_probe(struct platform_device *pdev)
-{
- struct lp_gpio *lg;
- struct gpio_chip *gc;
- struct resource *io_rc, *irq_rc;
- struct device *dev = &pdev->dev;
- unsigned long reg_len;
- int ret = -ENODEV;
-
- lg = devm_kzalloc(dev, sizeof(struct lp_gpio), GFP_KERNEL);
- if (!lg)
- return -ENOMEM;
-
- lg->pdev = pdev;
- platform_set_drvdata(pdev, lg);
-
- io_rc = platform_get_resource(pdev, IORESOURCE_IO, 0);
- irq_rc = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-
- if (!io_rc) {
- dev_err(dev, "missing IO resources\n");
- return -EINVAL;
- }
-
- lg->reg_base = io_rc->start;
- reg_len = resource_size(io_rc);
-
- if (!devm_request_region(dev, lg->reg_base, reg_len, "lp-gpio")) {
- dev_err(dev, "failed requesting IO region 0x%x\n",
- (unsigned int)lg->reg_base);
- return -EBUSY;
- }
-
- spin_lock_init(&lg->lock);
-
- gc = &lg->chip;
- gc->label = dev_name(dev);
- gc->owner = THIS_MODULE;
- gc->request = lp_gpio_request;
- gc->free = lp_gpio_free;
- gc->direction_input = lp_gpio_direction_input;
- gc->direction_output = lp_gpio_direction_output;
- gc->get = lp_gpio_get;
- gc->set = lp_gpio_set;
- gc->base = -1;
- gc->ngpio = LP_NUM_GPIO;
- gc->can_sleep = false;
- gc->parent = dev;
-
- /* set up interrupts */
- if (irq_rc && irq_rc->start) {
- struct gpio_irq_chip *girq;
-
- girq = &gc->irq;
- girq->chip = &lp_irqchip;
- girq->init_hw = lp_gpio_irq_init_hw;
- girq->parent_handler = lp_gpio_irq_handler;
- girq->num_parents = 1;
- girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
- sizeof(*girq->parents),
- GFP_KERNEL);
- if (!girq->parents)
- return -ENOMEM;
- girq->parents[0] = (unsigned)irq_rc->start;
- girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_bad_irq;
- }
-
- ret = devm_gpiochip_add_data(dev, gc, lg);
- if (ret) {
- dev_err(dev, "failed adding lp-gpio chip\n");
- return ret;
- }
-
- pm_runtime_enable(dev);
-
- return 0;
-}
-
-static int lp_gpio_runtime_suspend(struct device *dev)
-{
- return 0;
-}
-
-static int lp_gpio_runtime_resume(struct device *dev)
-{
- return 0;
-}
-
-static int lp_gpio_resume(struct device *dev)
-{
- struct lp_gpio *lg = dev_get_drvdata(dev);
- unsigned long reg;
- int i;
-
- /* on some hardware suspend clears input sensing, re-enable it here */
- for (i = 0; i < lg->chip.ngpio; i++) {
- if (gpiochip_is_requested(&lg->chip, i) != NULL) {
- reg = lp_gpio_reg(&lg->chip, i, LP_CONFIG2);
- outl(inl(reg) & ~GPINDIS_BIT, reg);
- }
- }
- return 0;
-}
-
-static const struct dev_pm_ops lp_gpio_pm_ops = {
- .runtime_suspend = lp_gpio_runtime_suspend,
- .runtime_resume = lp_gpio_runtime_resume,
- .resume = lp_gpio_resume,
-};
-
-static const struct acpi_device_id lynxpoint_gpio_acpi_match[] = {
- { "INT33C7", 0 },
- { "INT3437", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(acpi, lynxpoint_gpio_acpi_match);
-
-static int lp_gpio_remove(struct platform_device *pdev)
-{
- pm_runtime_disable(&pdev->dev);
- return 0;
-}
-
-static struct platform_driver lp_gpio_driver = {
- .probe = lp_gpio_probe,
- .remove = lp_gpio_remove,
- .driver = {
- .name = "lp_gpio",
- .pm = &lp_gpio_pm_ops,
- .acpi_match_table = ACPI_PTR(lynxpoint_gpio_acpi_match),
- },
-};
-
-static int __init lp_gpio_init(void)
-{
- return platform_driver_register(&lp_gpio_driver);
-}
-
-static void __exit lp_gpio_exit(void)
-{
- platform_driver_unregister(&lp_gpio_driver);
-}
-
-subsys_initcall(lp_gpio_init);
-module_exit(lp_gpio_exit);
-
-MODULE_AUTHOR("Mathias Nyman (Intel)");
-MODULE_DESCRIPTION("GPIO interface for Intel Lynxpoint");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:lp_gpio");
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 56d647a30e3e..7d343bea784a 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* GPIO Testing Device Driver
*
@@ -7,18 +7,18 @@
* Copyright (C) 2017 Bartosz Golaszewski <brgl@bgdev.pl>
*/
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/gpio/driver.h>
+#include <linux/debugfs.h>
#include <linux/gpio/consumer.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irq_sim.h>
-#include <linux/debugfs.h>
-#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
#include "gpiolib.h"
@@ -156,7 +156,7 @@ static int gpio_mockup_apply_pull(struct gpio_mockup_chip *chip,
mutex_lock(&chip->lock);
if (test_bit(FLAG_REQUESTED, &desc->flags) &&
- !test_bit(FLAG_IS_OUT, &desc->flags)) {
+ !test_bit(FLAG_IS_OUT, &desc->flags)) {
curr = __gpio_mockup_get(chip, offset);
if (curr == value)
goto out;
@@ -165,7 +165,7 @@ static int gpio_mockup_apply_pull(struct gpio_mockup_chip *chip,
irq_type = irq_get_trigger_type(irq);
if ((value == 1 && (irq_type & IRQ_TYPE_EDGE_RISING)) ||
- (value == 0 && (irq_type & IRQ_TYPE_EDGE_FALLING)))
+ (value == 0 && (irq_type & IRQ_TYPE_EDGE_FALLING)))
irq_sim_fire(sim, offset);
}
@@ -226,7 +226,7 @@ static int gpio_mockup_get_direction(struct gpio_chip *gc, unsigned int offset)
int direction;
mutex_lock(&chip->lock);
- direction = !chip->lines[offset].dir;
+ direction = chip->lines[offset].dir;
mutex_unlock(&chip->lock);
return direction;
@@ -395,7 +395,7 @@ static int gpio_mockup_probe(struct platform_device *pdev)
struct gpio_chip *gc;
struct device *dev;
const char *name;
- int rv, base;
+ int rv, base, i;
u16 ngpio;
dev = &pdev->dev;
@@ -447,6 +447,9 @@ static int gpio_mockup_probe(struct platform_device *pdev)
if (!chip->lines)
return -ENOMEM;
+ for (i = 0; i < gc->ngpio; i++)
+ chip->lines[i].dir = GPIO_LINE_DIRECTION_IN;
+
if (device_property_read_bool(dev, "named-gpio-lines")) {
rv = gpio_mockup_name_lines(dev, chip);
if (rv)
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index f1e164cecff8..604dfec353a1 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -296,6 +296,7 @@ static const struct mpc8xxx_gpio_devtype mpc512x_gpio_devtype = {
static const struct mpc8xxx_gpio_devtype ls1028a_gpio_devtype = {
.gpio_dir_in_init = ls1028a_gpio_dir_in_init,
+ .irq_set_type = mpc8xxx_irq_set_type,
};
static const struct mpc8xxx_gpio_devtype mpc5125_gpio_devtype = {
@@ -346,6 +347,7 @@ static int mpc8xxx_probe(struct platform_device *pdev)
return -ENOMEM;
gc = &mpc8xxx_gc->gc;
+ gc->parent = &pdev->dev;
if (of_property_read_bool(np, "little-endian")) {
ret = bgpio_init(gc, &pdev->dev, 4,
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
index d1d785f983a7..b992321bb852 100644
--- a/drivers/gpio/gpio-mt7621.c
+++ b/drivers/gpio/gpio-mt7621.c
@@ -253,8 +253,7 @@ mediatek_gpio_bank_probe(struct device *dev,
/*
* Directly request the irq here instead of passing
- * a flow-handler to gpiochip_set_chained_irqchip,
- * because the irq is shared.
+ * a flow-handler because the irq is shared.
*/
ret = devm_request_irq(dev, mtk->gpio_irq,
mediatek_gpio_irq_handler, IRQF_SHARED,
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 993bbeb3c006..d2b999c7987f 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -46,7 +46,6 @@
#include <linux/irqdomain.h>
#include <linux/mfd/syscon.h>
#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
@@ -432,6 +431,7 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
u32 mask = d->mask;
irq_gc_lock(gc);
+ mvebu_gpio_write_edge_cause(mvchip, ~mask);
ct->mask_cache_priv |= mask;
mvebu_gpio_write_edge_mask(mvchip, ct->mask_cache_priv);
irq_gc_unlock(gc);
@@ -1102,7 +1102,11 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
soc_variant = MVEBU_GPIO_SOC_VARIANT_ORION;
/* Some gpio controllers do not provide irq support */
- have_irqs = of_irq_count(np) != 0;
+ err = platform_irq_count(pdev);
+ if (err < 0)
+ return err;
+
+ have_irqs = err != 0;
mvchip = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_gpio_chip),
GFP_KERNEL);
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 6652bee01966..5638b4e5355f 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -568,16 +568,18 @@ static void pca953x_irq_mask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct pca953x_chip *chip = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
- chip->irq_mask[d->hwirq / BANK_SZ] &= ~BIT(d->hwirq % BANK_SZ);
+ clear_bit(hwirq, chip->irq_mask);
}
static void pca953x_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct pca953x_chip *chip = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
- chip->irq_mask[d->hwirq / BANK_SZ] |= BIT(d->hwirq % BANK_SZ);
+ set_bit(hwirq, chip->irq_mask);
}
static int pca953x_irq_set_wake(struct irq_data *d, unsigned int on)
@@ -635,8 +637,7 @@ static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct pca953x_chip *chip = gpiochip_get_data(gc);
- int bank_nb = d->hwirq / BANK_SZ;
- u8 mask = BIT(d->hwirq % BANK_SZ);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
if (!(type & IRQ_TYPE_EDGE_BOTH)) {
dev_err(&chip->client->dev, "irq %d: unsupported type %d\n",
@@ -644,15 +645,8 @@ static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
- if (type & IRQ_TYPE_EDGE_FALLING)
- chip->irq_trig_fall[bank_nb] |= mask;
- else
- chip->irq_trig_fall[bank_nb] &= ~mask;
-
- if (type & IRQ_TYPE_EDGE_RISING)
- chip->irq_trig_raise[bank_nb] |= mask;
- else
- chip->irq_trig_raise[bank_nb] &= ~mask;
+ assign_bit(hwirq, chip->irq_trig_fall, type & IRQ_TYPE_EDGE_FALLING);
+ assign_bit(hwirq, chip->irq_trig_raise, type & IRQ_TYPE_EDGE_RISING);
return 0;
}
@@ -661,10 +655,10 @@ static void pca953x_irq_shutdown(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct pca953x_chip *chip = gpiochip_get_data(gc);
- u8 mask = BIT(d->hwirq % BANK_SZ);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
- chip->irq_trig_raise[d->hwirq / BANK_SZ] &= ~mask;
- chip->irq_trig_fall[d->hwirq / BANK_SZ] &= ~mask;
+ clear_bit(hwirq, chip->irq_trig_raise);
+ clear_bit(hwirq, chip->irq_trig_fall);
}
static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pending)
@@ -770,8 +764,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, int irq_base)
ret = devm_request_threaded_irq(&client->dev, client->irq,
NULL, pca953x_irq_handler,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT |
- IRQF_SHARED,
+ IRQF_ONESHOT | IRQF_SHARED,
dev_name(&client->dev), chip);
if (ret) {
dev_err(&client->dev, "failed to request irq %d\n",
@@ -861,8 +854,6 @@ out:
return ret;
}
-static const struct of_device_id pca953x_dt_ids[];
-
static int pca953x_probe(struct i2c_client *client,
const struct i2c_device_id *i2c_id)
{
diff --git a/drivers/gpio/gpio-sama5d2-piobu.c b/drivers/gpio/gpio-sama5d2-piobu.c
index b04c561f3280..4d47b2c41186 100644
--- a/drivers/gpio/gpio-sama5d2-piobu.c
+++ b/drivers/gpio/gpio-sama5d2-piobu.c
@@ -244,7 +244,6 @@ static struct platform_driver sama5d2_piobu_driver = {
module_platform_driver(sama5d2_piobu_driver);
-MODULE_VERSION("1.0");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("SAMA5D2 PIOBU controller driver");
MODULE_AUTHOR("Andrei Stefanescu <andrei.stefanescu@microchip.com>");
diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c
new file mode 100644
index 000000000000..c54dd08f2cbf
--- /dev/null
+++ b/drivers/gpio/gpio-sifive.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 SiFive
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/of_irq.h>
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/regmap.h>
+
+#define SIFIVE_GPIO_INPUT_VAL 0x00
+#define SIFIVE_GPIO_INPUT_EN 0x04
+#define SIFIVE_GPIO_OUTPUT_EN 0x08
+#define SIFIVE_GPIO_OUTPUT_VAL 0x0C
+#define SIFIVE_GPIO_RISE_IE 0x18
+#define SIFIVE_GPIO_RISE_IP 0x1C
+#define SIFIVE_GPIO_FALL_IE 0x20
+#define SIFIVE_GPIO_FALL_IP 0x24
+#define SIFIVE_GPIO_HIGH_IE 0x28
+#define SIFIVE_GPIO_HIGH_IP 0x2C
+#define SIFIVE_GPIO_LOW_IE 0x30
+#define SIFIVE_GPIO_LOW_IP 0x34
+#define SIFIVE_GPIO_OUTPUT_XOR 0x40
+
+#define SIFIVE_GPIO_MAX 32
+#define SIFIVE_GPIO_IRQ_OFFSET 7
+
+struct sifive_gpio {
+ void __iomem *base;
+ struct gpio_chip gc;
+ struct regmap *regs;
+ unsigned long irq_state;
+ unsigned int trigger[SIFIVE_GPIO_MAX];
+ unsigned int irq_parent[SIFIVE_GPIO_MAX];
+};
+
+static void sifive_gpio_set_ie(struct sifive_gpio *chip, unsigned int offset)
+{
+ unsigned long flags;
+ unsigned int trigger;
+
+ spin_lock_irqsave(&chip->gc.bgpio_lock, flags);
+ trigger = (chip->irq_state & BIT(offset)) ? chip->trigger[offset] : 0;
+ regmap_update_bits(chip->regs, SIFIVE_GPIO_RISE_IE, BIT(offset),
+ (trigger & IRQ_TYPE_EDGE_RISING) ? BIT(offset) : 0);
+ regmap_update_bits(chip->regs, SIFIVE_GPIO_FALL_IE, BIT(offset),
+ (trigger & IRQ_TYPE_EDGE_FALLING) ? BIT(offset) : 0);
+ regmap_update_bits(chip->regs, SIFIVE_GPIO_HIGH_IE, BIT(offset),
+ (trigger & IRQ_TYPE_LEVEL_HIGH) ? BIT(offset) : 0);
+ regmap_update_bits(chip->regs, SIFIVE_GPIO_LOW_IE, BIT(offset),
+ (trigger & IRQ_TYPE_LEVEL_LOW) ? BIT(offset) : 0);
+ spin_unlock_irqrestore(&chip->gc.bgpio_lock, flags);
+}
+
+static int sifive_gpio_irq_set_type(struct irq_data *d, unsigned int trigger)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct sifive_gpio *chip = gpiochip_get_data(gc);
+ int offset = irqd_to_hwirq(d);
+
+ if (offset < 0 || offset >= gc->ngpio)
+ return -EINVAL;
+
+ chip->trigger[offset] = trigger;
+ sifive_gpio_set_ie(chip, offset);
+ return 0;
+}
+
+static void sifive_gpio_irq_enable(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct sifive_gpio *chip = gpiochip_get_data(gc);
+ int offset = irqd_to_hwirq(d) % SIFIVE_GPIO_MAX;
+ u32 bit = BIT(offset);
+ unsigned long flags;
+
+ irq_chip_enable_parent(d);
+
+ /* Switch to input */
+ gc->direction_input(gc, offset);
+
+ spin_lock_irqsave(&gc->bgpio_lock, flags);
+ /* Clear any sticky pending interrupts */
+ regmap_write(chip->regs, SIFIVE_GPIO_RISE_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_FALL_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_LOW_IP, bit);
+ spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+
+ /* Enable interrupts */
+ assign_bit(offset, &chip->irq_state, 1);
+ sifive_gpio_set_ie(chip, offset);
+}
+
+static void sifive_gpio_irq_disable(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct sifive_gpio *chip = gpiochip_get_data(gc);
+ int offset = irqd_to_hwirq(d) % SIFIVE_GPIO_MAX;
+
+ assign_bit(offset, &chip->irq_state, 0);
+ sifive_gpio_set_ie(chip, offset);
+ irq_chip_disable_parent(d);
+}
+
+static void sifive_gpio_irq_eoi(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct sifive_gpio *chip = gpiochip_get_data(gc);
+ int offset = irqd_to_hwirq(d) % SIFIVE_GPIO_MAX;
+ u32 bit = BIT(offset);
+ unsigned long flags;
+
+ spin_lock_irqsave(&gc->bgpio_lock, flags);
+ /* Clear all pending interrupts */
+ regmap_write(chip->regs, SIFIVE_GPIO_RISE_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_FALL_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_LOW_IP, bit);
+ spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+
+ irq_chip_eoi_parent(d);
+}
+
+static struct irq_chip sifive_gpio_irqchip = {
+ .name = "sifive-gpio",
+ .irq_set_type = sifive_gpio_irq_set_type,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_enable = sifive_gpio_irq_enable,
+ .irq_disable = sifive_gpio_irq_disable,
+ .irq_eoi = sifive_gpio_irq_eoi,
+};
+
+static int sifive_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
+ unsigned int child,
+ unsigned int child_type,
+ unsigned int *parent,
+ unsigned int *parent_type)
+{
+ *parent_type = IRQ_TYPE_NONE;
+ *parent = child + SIFIVE_GPIO_IRQ_OFFSET;
+ return 0;
+}
+
+static const struct regmap_config sifive_gpio_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .disable_locking = true,
+};
+
+static int sifive_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *irq_parent;
+ struct irq_domain *parent;
+ struct gpio_irq_chip *girq;
+ struct sifive_gpio *chip;
+ int ret, ngpio;
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(chip->base)) {
+ dev_err(dev, "failed to allocate device memory\n");
+ return PTR_ERR(chip->base);
+ }
+
+ chip->regs = devm_regmap_init_mmio(dev, chip->base,
+ &sifive_gpio_regmap_config);
+ if (IS_ERR(chip->regs))
+ return PTR_ERR(chip->regs);
+
+ ngpio = of_irq_count(node);
+ if (ngpio >= SIFIVE_GPIO_MAX) {
+ dev_err(dev, "Too many GPIO interrupts (max=%d)\n",
+ SIFIVE_GPIO_MAX);
+ return -ENXIO;
+ }
+
+ irq_parent = of_irq_find_parent(node);
+ if (!irq_parent) {
+ dev_err(dev, "no IRQ parent node\n");
+ return -ENODEV;
+ }
+ parent = irq_find_host(irq_parent);
+ if (!parent) {
+ dev_err(dev, "no IRQ parent domain\n");
+ return -ENODEV;
+ }
+
+ ret = bgpio_init(&chip->gc, dev, 4,
+ chip->base + SIFIVE_GPIO_INPUT_VAL,
+ chip->base + SIFIVE_GPIO_OUTPUT_VAL,
+ NULL,
+ chip->base + SIFIVE_GPIO_OUTPUT_EN,
+ chip->base + SIFIVE_GPIO_INPUT_EN,
+ 0);
+ if (ret) {
+ dev_err(dev, "unable to init generic GPIO\n");
+ return ret;
+ }
+
+ /* Disable all GPIO interrupts before enabling parent interrupts */
+ regmap_write(chip->regs, SIFIVE_GPIO_RISE_IE, 0);
+ regmap_write(chip->regs, SIFIVE_GPIO_FALL_IE, 0);
+ regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IE, 0);
+ regmap_write(chip->regs, SIFIVE_GPIO_LOW_IE, 0);
+ chip->irq_state = 0;
+
+ chip->gc.base = -1;
+ chip->gc.ngpio = ngpio;
+ chip->gc.label = dev_name(dev);
+ chip->gc.parent = dev;
+ chip->gc.owner = THIS_MODULE;
+ girq = &chip->gc.irq;
+ girq->chip = &sifive_gpio_irqchip;
+ girq->fwnode = of_node_to_fwnode(node);
+ girq->parent_domain = parent;
+ girq->child_to_parent_hwirq = sifive_gpio_child_to_parent_hwirq;
+ girq->handler = handle_bad_irq;
+ girq->default_type = IRQ_TYPE_NONE;
+
+ platform_set_drvdata(pdev, chip);
+ return gpiochip_add_data(&chip->gc, chip);
+}
+
+static const struct of_device_id sifive_gpio_match[] = {
+ { .compatible = "sifive,gpio0" },
+ { .compatible = "sifive,fu540-c000-gpio" },
+ { },
+};
+
+static struct platform_driver sifive_gpio_driver = {
+ .probe = sifive_gpio_probe,
+ .driver = {
+ .name = "sifive_gpio",
+ .of_match_table = of_match_ptr(sifive_gpio_match),
+ },
+};
+builtin_platform_driver(sifive_gpio_driver)
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index 5e375186f90e..866201cf5f65 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -243,4 +243,3 @@ static struct platform_driver tb10x_gpio_driver = {
module_platform_driver(tb10x_gpio_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("tb10x gpio.");
-MODULE_VERSION("0.0.1");
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 6fdfe4c5303e..acb99eff9939 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -96,12 +96,12 @@ struct tegra_gpio_info {
static inline void tegra_gpio_writel(struct tegra_gpio_info *tgi,
u32 val, u32 reg)
{
- __raw_writel(val, tgi->regs + reg);
+ writel_relaxed(val, tgi->regs + reg);
}
static inline u32 tegra_gpio_readl(struct tegra_gpio_info *tgi, u32 reg)
{
- return __raw_readl(tgi->regs + reg);
+ return readl_relaxed(tgi->regs + reg);
}
static unsigned int tegra_gpio_compose(unsigned int bank, unsigned int port,
@@ -416,11 +416,8 @@ static void tegra_gpio_irq_handler(struct irq_desc *desc)
static int tegra_gpio_resume(struct device *dev)
{
struct tegra_gpio_info *tgi = dev_get_drvdata(dev);
- unsigned long flags;
unsigned int b, p;
- local_irq_save(flags);
-
for (b = 0; b < tgi->bank_count; b++) {
struct tegra_gpio_bank *bank = &tgi->bank_info[b];
@@ -448,17 +445,14 @@ static int tegra_gpio_resume(struct device *dev)
}
}
- local_irq_restore(flags);
return 0;
}
static int tegra_gpio_suspend(struct device *dev)
{
struct tegra_gpio_info *tgi = dev_get_drvdata(dev);
- unsigned long flags;
unsigned int b, p;
- local_irq_save(flags);
for (b = 0; b < tgi->bank_count; b++) {
struct tegra_gpio_bank *bank = &tgi->bank_info[b];
@@ -488,7 +482,7 @@ static int tegra_gpio_suspend(struct device *dev)
GPIO_INT_ENB(tgi, gpio));
}
}
- local_irq_restore(flags);
+
return 0;
}
@@ -497,6 +491,11 @@ static int tegra_gpio_irq_set_wake(struct irq_data *d, unsigned int enable)
struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
unsigned int gpio = d->hwirq;
u32 port, bit, mask;
+ int err;
+
+ err = irq_set_irq_wake(bank->irq, enable);
+ if (err)
+ return err;
port = GPIO_PORT(gpio);
bit = GPIO_BIT(gpio);
@@ -507,7 +506,7 @@ static int tegra_gpio_irq_set_wake(struct irq_data *d, unsigned int enable)
else
bank->wake_enb[port] &= ~mask;
- return irq_set_irq_wake(bank->irq, enable);
+ return 0;
}
#endif
@@ -557,7 +556,7 @@ static inline void tegra_gpio_debuginit(struct tegra_gpio_info *tgi)
#endif
static const struct dev_pm_ops tegra_gpio_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(tegra_gpio_suspend, tegra_gpio_resume)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_gpio_suspend, tegra_gpio_resume)
};
static int tegra_gpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index 55b43b7ce88d..de241263d4be 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -448,17 +448,24 @@ static int tegra186_gpio_irq_domain_translate(struct irq_domain *domain,
return 0;
}
-static void tegra186_gpio_populate_parent_fwspec(struct gpio_chip *chip,
- struct irq_fwspec *fwspec,
+static void *tegra186_gpio_populate_parent_fwspec(struct gpio_chip *chip,
unsigned int parent_hwirq,
unsigned int parent_type)
{
struct tegra_gpio *gpio = gpiochip_get_data(chip);
+ struct irq_fwspec *fwspec;
+ fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL);
+ if (!fwspec)
+ return NULL;
+
+ fwspec->fwnode = chip->irq.parent_domain->fwnode;
fwspec->param_count = 3;
fwspec->param[0] = gpio->soc->instance;
fwspec->param[1] = parent_hwirq;
fwspec->param[2] = parent_type;
+
+ return fwspec;
}
static int tegra186_gpio_child_to_parent_hwirq(struct gpio_chip *chip,
@@ -621,7 +628,7 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
irq->chip = &gpio->intc;
irq->fwnode = of_node_to_fwnode(pdev->dev.of_node);
irq->child_to_parent_hwirq = tegra186_gpio_child_to_parent_hwirq;
- irq->populate_parent_fwspec = tegra186_gpio_populate_parent_fwspec;
+ irq->populate_parent_alloc_arg = tegra186_gpio_populate_parent_fwspec;
irq->child_offset_to_irq = tegra186_gpio_child_offset_to_irq;
irq->child_irq_domain_ops.translate = tegra186_gpio_irq_domain_translate;
irq->handler = handle_simple_irq;
diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c
index d08d86a22b1f..9f66deab46ea 100644
--- a/drivers/gpio/gpio-thunderx.c
+++ b/drivers/gpio/gpio-thunderx.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
+#include <asm-generic/msi.h>
#define GPIO_RX_DAT 0x0
@@ -395,12 +396,32 @@ static int thunderx_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
unsigned int *parent_type)
{
struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
-
- *parent = txgpio->base_msi + (2 * child);
+ struct irq_data *irqd;
+ unsigned int irq;
+
+ irq = txgpio->msix_entries[child].vector;
+ irqd = irq_domain_get_irq_data(gc->irq.parent_domain, irq);
+ if (!irqd)
+ return -EINVAL;
+ *parent = irqd_to_hwirq(irqd);
*parent_type = IRQ_TYPE_LEVEL_HIGH;
return 0;
}
+static void *thunderx_gpio_populate_parent_alloc_info(struct gpio_chip *chip,
+ unsigned int parent_hwirq,
+ unsigned int parent_type)
+{
+ msi_alloc_info_t *info;
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return NULL;
+
+ info->hwirq = parent_hwirq;
+ return info;
+}
+
static int thunderx_gpio_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -515,6 +536,7 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
girq->parent_domain =
irq_get_irq_data(txgpio->msix_entries[0].vector)->domain;
girq->child_to_parent_hwirq = thunderx_gpio_child_to_parent_hwirq;
+ girq->populate_parent_alloc_arg = thunderx_gpio_populate_parent_alloc_info;
girq->handler = handle_bad_irq;
girq->default_type = IRQ_TYPE_NONE;
@@ -524,9 +546,15 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
/* Push on irq_data and the domain for each line. */
for (i = 0; i < ngpio; i++) {
- err = irq_domain_push_irq(chip->irq.domain,
+ struct irq_fwspec fwspec;
+
+ fwspec.fwnode = of_node_to_fwnode(dev->of_node);
+ fwspec.param_count = 2;
+ fwspec.param[0] = i;
+ fwspec.param[1] = IRQ_TYPE_NONE;
+ err = irq_domain_push_irq(girq->domain,
txgpio->msix_entries[i].vector,
- chip);
+ &fwspec);
if (err < 0)
dev_err(dev, "irq_domain_push_irq: %d\n", err);
}
diff --git a/drivers/gpio/gpio-vx855.c b/drivers/gpio/gpio-vx855.c
index 4ff146ca32fe..3bf397b8dfbc 100644
--- a/drivers/gpio/gpio-vx855.c
+++ b/drivers/gpio/gpio-vx855.c
@@ -71,7 +71,7 @@ static inline u_int32_t gpio_o_bit(int i)
return 1 << (i + 13);
}
-/* Mapping betwee numeric GPIO ID and the actual GPIO hardware numbering:
+/* Mapping between numeric GPIO ID and the actual GPIO hardware numbering:
* 0..13 GPI 0..13
* 14..26 GPO 0..12
* 27..41 GPIO 0..14
diff --git a/drivers/gpio/gpio-wcd934x.c b/drivers/gpio/gpio-wcd934x.c
new file mode 100644
index 000000000000..74913f2e5697
--- /dev/null
+++ b/drivers/gpio/gpio-wcd934x.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019, Linaro Limited
+
+#include <linux/module.h>
+#include <linux/gpio/driver.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/of_device.h>
+
+#define WCD_PIN_MASK(p) BIT(p - 1)
+#define WCD_REG_DIR_CTL_OFFSET 0x42
+#define WCD_REG_VAL_CTL_OFFSET 0x43
+#define WCD934X_NPINS 5
+
+struct wcd_gpio_data {
+ struct regmap *map;
+ struct gpio_chip chip;
+};
+
+static int wcd_gpio_get_direction(struct gpio_chip *chip, unsigned int pin)
+{
+ struct wcd_gpio_data *data = gpiochip_get_data(chip);
+ unsigned int value;
+ int ret;
+
+ ret = regmap_read(data->map, WCD_REG_DIR_CTL_OFFSET, &value);
+ if (ret < 0)
+ return ret;
+
+ if (value & WCD_PIN_MASK(pin))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
+}
+
+static int wcd_gpio_direction_input(struct gpio_chip *chip, unsigned int pin)
+{
+ struct wcd_gpio_data *data = gpiochip_get_data(chip);
+
+ return regmap_update_bits(data->map, WCD_REG_DIR_CTL_OFFSET,
+ WCD_PIN_MASK(pin), 0);
+}
+
+static int wcd_gpio_direction_output(struct gpio_chip *chip, unsigned int pin,
+ int val)
+{
+ struct wcd_gpio_data *data = gpiochip_get_data(chip);
+
+ regmap_update_bits(data->map, WCD_REG_DIR_CTL_OFFSET,
+ WCD_PIN_MASK(pin), WCD_PIN_MASK(pin));
+
+ return regmap_update_bits(data->map, WCD_REG_VAL_CTL_OFFSET,
+ WCD_PIN_MASK(pin),
+ val ? WCD_PIN_MASK(pin) : 0);
+}
+
+static int wcd_gpio_get(struct gpio_chip *chip, unsigned int pin)
+{
+ struct wcd_gpio_data *data = gpiochip_get_data(chip);
+ int value;
+
+ regmap_read(data->map, WCD_REG_VAL_CTL_OFFSET, &value);
+
+ return !!(value && WCD_PIN_MASK(pin));
+}
+
+static void wcd_gpio_set(struct gpio_chip *chip, unsigned int pin, int val)
+{
+ wcd_gpio_direction_output(chip, pin, val);
+}
+
+static int wcd_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct wcd_gpio_data *data;
+ struct gpio_chip *chip;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->map = dev_get_regmap(dev->parent, NULL);
+ if (!data->map) {
+ dev_err(dev, "%s: failed to get regmap\n", __func__);
+ return -EINVAL;
+ }
+
+ chip = &data->chip;
+ chip->direction_input = wcd_gpio_direction_input;
+ chip->direction_output = wcd_gpio_direction_output;
+ chip->get_direction = wcd_gpio_get_direction;
+ chip->get = wcd_gpio_get;
+ chip->set = wcd_gpio_set;
+ chip->parent = dev;
+ chip->base = -1;
+ chip->ngpio = WCD934X_NPINS;
+ chip->label = dev_name(dev);
+ chip->of_gpio_n_cells = 2;
+ chip->can_sleep = false;
+
+ return devm_gpiochip_add_data(dev, chip, data);
+}
+
+static const struct of_device_id wcd_gpio_of_match[] = {
+ { .compatible = "qcom,wcd9340-gpio" },
+ { .compatible = "qcom,wcd9341-gpio" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, wcd_gpio_of_match);
+
+static struct platform_driver wcd_gpio_driver = {
+ .driver = {
+ .name = "wcd934x-gpio",
+ .of_match_table = wcd_gpio_of_match,
+ },
+ .probe = wcd_gpio_probe,
+};
+
+module_platform_driver(wcd_gpio_driver);
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc WCD GPIO control driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-xgs-iproc.c b/drivers/gpio/gpio-xgs-iproc.c
index 773e5c24309e..ad5489a65d54 100644
--- a/drivers/gpio/gpio-xgs-iproc.c
+++ b/drivers/gpio/gpio-xgs-iproc.c
@@ -251,8 +251,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
/*
* Directly request the irq here instead of passing
- * a flow-handler to gpiochip_set_chained_irqchip,
- * because the irq is shared.
+ * a flow-handler because the irq is shared.
*/
ret = devm_request_irq(dev, irq, iproc_gpio_irq_handler,
IRQF_SHARED, chip->gc.label, &chip->gc);
@@ -280,7 +279,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
return 0;
}
-static int __exit iproc_gpio_remove(struct platform_device *pdev)
+static int iproc_gpio_remove(struct platform_device *pdev)
{
struct iproc_gpio_chip *chip;
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index a9748b5198e6..67f9f82e0db0 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -147,9 +147,10 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
for (i = 0; i < gc->ngpio; i++) {
if (*mask == 0)
break;
+ /* Once finished with an index write it out to the register */
if (index != xgpio_index(chip, i)) {
xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
- xgpio_regoffset(chip, i),
+ index * XGPIO_CHANNEL_OFFSET,
chip->gpio_state[index]);
spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
index = xgpio_index(chip, i);
@@ -165,7 +166,7 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
}
xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
- xgpio_regoffset(chip, i), chip->gpio_state[index]);
+ index * XGPIO_CHANNEL_OFFSET, chip->gpio_state[index]);
spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
}
diff --git a/drivers/gpio/gpio-xtensa.c b/drivers/gpio/gpio-xtensa.c
index 08d7c3b32038..c8af34a6368f 100644
--- a/drivers/gpio/gpio-xtensa.c
+++ b/drivers/gpio/gpio-xtensa.c
@@ -44,15 +44,14 @@ static inline unsigned long enable_cp(unsigned long *cpenable)
unsigned long flags;
local_irq_save(flags);
- RSR_CPENABLE(*cpenable);
- WSR_CPENABLE(*cpenable | BIT(XCHAL_CP_ID_XTIOP));
-
+ *cpenable = xtensa_get_sr(cpenable);
+ xtensa_set_sr(*cpenable | BIT(XCHAL_CP_ID_XTIOP), cpenable);
return flags;
}
static inline void disable_cp(unsigned long flags, unsigned long cpenable)
{
- WSR_CPENABLE(cpenable);
+ xtensa_set_sr(cpenable, cpenable);
local_irq_restore(flags);
}
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index 4c3f6370eab4..05ba16fffdad 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -684,6 +684,8 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio)
unsigned int bank_num;
for (bank_num = 0; bank_num < gpio->p_data->max_bank; bank_num++) {
+ writel_relaxed(ZYNQ_GPIO_IXR_DISABLE_ALL, gpio->base_addr +
+ ZYNQ_GPIO_INTDIS_OFFSET(bank_num));
writel_relaxed(gpio->context.datalsw[bank_num],
gpio->base_addr +
ZYNQ_GPIO_DATA_LSW_OFFSET(bank_num));
@@ -693,9 +695,6 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio)
writel_relaxed(gpio->context.dirm[bank_num],
gpio->base_addr +
ZYNQ_GPIO_DIRM_OFFSET(bank_num));
- writel_relaxed(gpio->context.int_en[bank_num],
- gpio->base_addr +
- ZYNQ_GPIO_INTEN_OFFSET(bank_num));
writel_relaxed(gpio->context.int_type[bank_num],
gpio->base_addr +
ZYNQ_GPIO_INTTYPE_OFFSET(bank_num));
@@ -705,6 +704,9 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio)
writel_relaxed(gpio->context.int_any[bank_num],
gpio->base_addr +
ZYNQ_GPIO_INTANY_OFFSET(bank_num));
+ writel_relaxed(~(gpio->context.int_en[bank_num]),
+ gpio->base_addr +
+ ZYNQ_GPIO_INTEN_OFFSET(bank_num));
}
}
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index d30e57dc755c..31fee5e918b7 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -21,11 +21,19 @@
#include "gpiolib.h"
#include "gpiolib-acpi.h"
+#define QUIRK_NO_EDGE_EVENTS_ON_BOOT 0x01l
+#define QUIRK_NO_WAKEUP 0x02l
+
static int run_edge_events_on_boot = -1;
module_param(run_edge_events_on_boot, int, 0444);
MODULE_PARM_DESC(run_edge_events_on_boot,
"Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto");
+static int honor_wakeup = -1;
+module_param(honor_wakeup, int, 0444);
+MODULE_PARM_DESC(honor_wakeup,
+ "Honor the ACPI wake-capable flag: 0=no, 1=yes, -1=auto");
+
/**
* struct acpi_gpio_event - ACPI GPIO event handler data
*
@@ -281,7 +289,7 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
event->handle = evt_handle;
event->handler = handler;
event->irq = irq;
- event->irq_is_wake = agpio->wake_capable == ACPI_WAKE_CAPABLE;
+ event->irq_is_wake = honor_wakeup && agpio->wake_capable == ACPI_WAKE_CAPABLE;
event->pin = pin;
event->desc = desc;
@@ -1309,7 +1317,7 @@ static int acpi_gpio_handle_deferred_request_irqs(void)
/* We must use _sync so that this runs after the first deferred_probe run */
late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
-static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = {
+static const struct dmi_system_id gpiolib_acpi_quirks[] = {
{
/*
* The Minix Neo Z83-4 has a micro-USB-B id-pin handler for
@@ -1319,7 +1327,8 @@ static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
- }
+ },
+ .driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT,
},
{
/*
@@ -1331,20 +1340,52 @@ static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"),
DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"),
- }
+ },
+ .driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT,
+ },
+ {
+ /*
+ * Various HP X2 10 Cherry Trail models use an external
+ * embedded-controller connected via I2C + an ACPI GPIO
+ * event handler. The embedded controller generates various
+ * spurious wakeup events when suspended. So disable wakeup
+ * for its handler (it uses the only ACPI GPIO event handler).
+ * This breaks wakeup when opening the lid, the user needs
+ * to press the power-button to wakeup the system. The
+ * alternative is suspend simply not working, which is worse.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP x2 Detachable 10-p0XX"),
+ },
+ .driver_data = (void *)QUIRK_NO_WAKEUP,
},
{} /* Terminating entry */
};
static int acpi_gpio_setup_params(void)
{
+ const struct dmi_system_id *id;
+ long quirks = 0;
+
+ id = dmi_first_match(gpiolib_acpi_quirks);
+ if (id)
+ quirks = (long)id->driver_data;
+
if (run_edge_events_on_boot < 0) {
- if (dmi_check_system(run_edge_events_on_boot_blacklist))
+ if (quirks & QUIRK_NO_EDGE_EVENTS_ON_BOOT)
run_edge_events_on_boot = 0;
else
run_edge_events_on_boot = 1;
}
+ if (honor_wakeup < 0) {
+ if (quirks & QUIRK_NO_WAKEUP)
+ honor_wakeup = 0;
+ else
+ honor_wakeup = 1;
+ }
+
return 0;
}
diff --git a/drivers/gpio/gpiolib-devres.c b/drivers/gpio/gpiolib-devres.c
index 4421be09b960..72b6001c56ef 100644
--- a/drivers/gpio/gpiolib-devres.c
+++ b/drivers/gpio/gpiolib-devres.c
@@ -308,7 +308,7 @@ devm_gpiod_get_array_optional(struct device *dev, const char *con_id,
struct gpio_descs *descs;
descs = devm_gpiod_get_array(dev, con_id, flags);
- if (IS_ERR(descs) && (PTR_ERR(descs) == -ENOENT))
+ if (PTR_ERR(descs) == -ENOENT)
return NULL;
return descs;
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index dc27b1a88e93..c6d30f73df07 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -23,6 +23,29 @@
#include "gpiolib.h"
#include "gpiolib-of.h"
+/**
+ * of_gpio_spi_cs_get_count() - special GPIO counting for SPI
+ * Some elder GPIO controllers need special quirks. Currently we handle
+ * the Freescale GPIO controller with bindings that doesn't use the
+ * established "cs-gpios" for chip selects but instead rely on
+ * "gpios" for the chip select lines. If we detect this, we redirect
+ * the counting of "cs-gpios" to count "gpios" transparent to the
+ * driver.
+ */
+static int of_gpio_spi_cs_get_count(struct device *dev, const char *con_id)
+{
+ struct device_node *np = dev->of_node;
+
+ if (!IS_ENABLED(CONFIG_SPI_MASTER))
+ return 0;
+ if (!con_id || strcmp(con_id, "cs"))
+ return 0;
+ if (!of_device_is_compatible(np, "fsl,spi") &&
+ !of_device_is_compatible(np, "aeroflexgaisler,spictrl"))
+ return 0;
+ return of_gpio_named_count(np, "gpios");
+}
+
/*
* This is used by external users of of_gpio_count() from <linux/of_gpio.h>
*
@@ -35,6 +58,10 @@ int of_gpio_get_count(struct device *dev, const char *con_id)
char propname[32];
unsigned int i;
+ ret = of_gpio_spi_cs_get_count(dev, con_id);
+ if (ret > 0)
+ return ret;
+
for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) {
if (con_id)
snprintf(propname, sizeof(propname), "%s-%s",
@@ -105,27 +132,6 @@ static void of_gpio_flags_quirks(struct device_node *np,
int index)
{
/*
- * Handle MMC "cd-inverted" and "wp-inverted" semantics.
- */
- if (IS_ENABLED(CONFIG_MMC)) {
- /*
- * Active low is the default according to the
- * SDHCI specification and the device tree
- * bindings. However the code in the current
- * kernel was written such that the phandle
- * flags were always respected, and "cd-inverted"
- * would invert the flag from the device phandle.
- */
- if (!strcmp(propname, "cd-gpios")) {
- if (of_property_read_bool(np, "cd-inverted"))
- *flags ^= OF_GPIO_ACTIVE_LOW;
- }
- if (!strcmp(propname, "wp-gpios")) {
- if (of_property_read_bool(np, "wp-inverted"))
- *flags ^= OF_GPIO_ACTIVE_LOW;
- }
- }
- /*
* Some GPIO fixed regulator quirks.
* Note that active low is the default.
*/
@@ -478,24 +484,24 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
break;
}
- if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT) {
+ if (PTR_ERR(desc) == -ENOENT) {
/* Special handling for SPI GPIOs if used */
desc = of_find_spi_gpio(dev, con_id, &of_flags);
}
- if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT) {
+ if (PTR_ERR(desc) == -ENOENT) {
/* This quirk looks up flags and all */
desc = of_find_spi_cs_gpio(dev, con_id, idx, flags);
if (!IS_ERR(desc))
return desc;
}
- if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT) {
+ if (PTR_ERR(desc) == -ENOENT) {
/* Special handling for regulator GPIOs if used */
desc = of_find_regulator_gpio(dev, con_id, &of_flags);
}
- if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT)
+ if (PTR_ERR(desc) == -ENOENT)
desc = of_find_arizona_gpio(dev, con_id, &of_flags);
if (IS_ERR(desc))
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index fbf6b1a0a4fa..23e3d335cd54 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -762,10 +762,9 @@ int gpiochip_sysfs_register(struct gpio_device *gdev)
parent = &gdev->dev;
/* use chip->base for the ID; it's already known to be unique */
- dev = device_create_with_groups(&gpio_class, parent,
- MKDEV(0, 0),
- chip, gpiochip_groups,
- "gpiochip%d", chip->base);
+ dev = device_create_with_groups(&gpio_class, parent, MKDEV(0, 0), chip,
+ gpiochip_groups, GPIOCHIP_NAME "%d",
+ chip->base);
if (IS_ERR(dev))
return PTR_ERR(dev);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 9913886ede90..4d0106ceeba7 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -140,7 +140,7 @@ EXPORT_SYMBOL_GPL(gpio_to_desc);
* in the given chip for the specified hardware number.
*/
struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip,
- u16 hwnum)
+ unsigned int hwnum)
{
struct gpio_device *gdev = chip->gpiodev;
@@ -220,19 +220,27 @@ int gpiod_get_direction(struct gpio_desc *desc)
chip = gpiod_to_chip(desc);
offset = gpio_chip_hwgpio(desc);
+ /*
+ * Open drain emulation using input mode may incorrectly report
+ * input here, fix that up.
+ */
+ if (test_bit(FLAG_OPEN_DRAIN, &desc->flags) &&
+ test_bit(FLAG_IS_OUT, &desc->flags))
+ return 0;
+
if (!chip->get_direction)
return -ENOTSUPP;
ret = chip->get_direction(chip, offset);
- if (ret > 0) {
- /* GPIOF_DIR_IN, or other positive */
+ if (ret < 0)
+ return ret;
+
+ /* GPIOF_DIR_IN or other positive, otherwise GPIOF_DIR_OUT */
+ if (ret > 0)
ret = 1;
- clear_bit(FLAG_IS_OUT, &desc->flags);
- }
- if (ret == 0) {
- /* GPIOF_DIR_OUT */
- set_bit(FLAG_IS_OUT, &desc->flags);
- }
+
+ assign_bit(FLAG_IS_OUT, &desc->flags, !ret);
+
return ret;
}
EXPORT_SYMBOL_GPL(gpiod_get_direction);
@@ -484,15 +492,6 @@ static int linehandle_validate_flags(u32 flags)
return 0;
}
-static void linehandle_configure_flag(unsigned long *flagsp,
- u32 bit, bool active)
-{
- if (active)
- set_bit(bit, flagsp);
- else
- clear_bit(bit, flagsp);
-}
-
static long linehandle_set_config(struct linehandle_state *lh,
void __user *ip)
{
@@ -514,22 +513,22 @@ static long linehandle_set_config(struct linehandle_state *lh,
desc = lh->descs[i];
flagsp = &desc->flags;
- linehandle_configure_flag(flagsp, FLAG_ACTIVE_LOW,
+ assign_bit(FLAG_ACTIVE_LOW, flagsp,
lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW);
- linehandle_configure_flag(flagsp, FLAG_OPEN_DRAIN,
+ assign_bit(FLAG_OPEN_DRAIN, flagsp,
lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN);
- linehandle_configure_flag(flagsp, FLAG_OPEN_SOURCE,
+ assign_bit(FLAG_OPEN_SOURCE, flagsp,
lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE);
- linehandle_configure_flag(flagsp, FLAG_PULL_UP,
+ assign_bit(FLAG_PULL_UP, flagsp,
lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP);
- linehandle_configure_flag(flagsp, FLAG_PULL_DOWN,
+ assign_bit(FLAG_PULL_DOWN, flagsp,
lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN);
- linehandle_configure_flag(flagsp, FLAG_BIAS_DISABLE,
+ assign_bit(FLAG_BIAS_DISABLE, flagsp,
lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE);
/*
@@ -678,14 +677,13 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
/* Request each GPIO */
for (i = 0; i < handlereq.lines; i++) {
u32 offset = handlereq.lineoffsets[i];
- struct gpio_desc *desc;
+ struct gpio_desc *desc = gpiochip_get_desc(gdev->chip, offset);
- if (offset >= gdev->ngpio) {
- ret = -EINVAL;
+ if (IS_ERR(desc)) {
+ ret = PTR_ERR(desc);
goto out_free_descs;
}
- desc = &gdev->descs[offset];
ret = gpiod_request(desc, lh->label);
if (ret)
goto out_free_descs;
@@ -1010,8 +1008,9 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
lflags = eventreq.handleflags;
eflags = eventreq.eventflags;
- if (offset >= gdev->ngpio)
- return -EINVAL;
+ desc = gpiochip_get_desc(gdev->chip, offset);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
/* Return an error if a unknown flag is set */
if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) ||
@@ -1049,7 +1048,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
}
}
- desc = &gdev->descs[offset];
ret = gpiod_request(desc, le->label);
if (ret)
goto out_free_label;
@@ -1176,10 +1174,11 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
return -EFAULT;
- if (lineinfo.line_offset >= gdev->ngpio)
- return -EINVAL;
- desc = &gdev->descs[lineinfo.line_offset];
+ desc = gpiochip_get_desc(chip, lineinfo.line_offset);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
if (desc->name) {
strncpy(lineinfo.name, desc->name,
sizeof(lineinfo.name));
@@ -1419,7 +1418,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
ret = gdev->id;
goto err_free_gdev;
}
- dev_set_name(&gdev->dev, "gpiochip%d", gdev->id);
+ dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id);
device_initialize(&gdev->dev);
dev_set_drvdata(&gdev->dev, gdev);
if (chip->parent && chip->parent->driver)
@@ -1444,7 +1443,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
if (chip->ngpio > FASTPATH_NGPIO)
chip_warn(chip, "line cnt %u is greater than fast path cnt %u\n",
- chip->ngpio, FASTPATH_NGPIO);
+ chip->ngpio, FASTPATH_NGPIO);
gdev->label = kstrdup_const(chip->label ?: "unknown", GFP_KERNEL);
if (!gdev->label) {
@@ -1487,11 +1486,11 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
goto err_free_label;
}
- spin_unlock_irqrestore(&gpio_lock, flags);
-
for (i = 0; i < chip->ngpio; i++)
gdev->descs[i].gdev = gdev;
+ spin_unlock_irqrestore(&gpio_lock, flags);
+
#ifdef CONFIG_PINCTRL
INIT_LIST_HEAD(&gdev->pin_ranges);
#endif
@@ -1516,15 +1515,11 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
struct gpio_desc *desc = &gdev->descs[i];
if (chip->get_direction && gpiochip_line_is_valid(chip, i)) {
- if (!chip->get_direction(chip, i))
- set_bit(FLAG_IS_OUT, &desc->flags);
- else
- clear_bit(FLAG_IS_OUT, &desc->flags);
+ assign_bit(FLAG_IS_OUT,
+ &desc->flags, !chip->get_direction(chip, i));
} else {
- if (!chip->direction_input)
- set_bit(FLAG_IS_OUT, &desc->flags);
- else
- clear_bit(FLAG_IS_OUT, &desc->flags);
+ assign_bit(FLAG_IS_OUT,
+ &desc->flags, !chip->direction_input);
}
}
@@ -1805,7 +1800,7 @@ EXPORT_SYMBOL_GPL(gpiochip_irqchip_irq_valid);
* gpiochip_set_cascaded_irqchip() - connects a cascaded irqchip to a gpiochip
* @gc: the gpiochip to set the irqchip chain to
* @parent_irq: the irq number corresponding to the parent IRQ for this
- * chained irqchip
+ * cascaded irqchip
* @parent_handler: the parent interrupt handler for the accumulated IRQ
* coming out of the gpiochip. If the interrupt is nested rather than
* cascaded, pass NULL in this handler argument
@@ -1848,29 +1843,6 @@ static void gpiochip_set_cascaded_irqchip(struct gpio_chip *gc,
}
/**
- * gpiochip_set_chained_irqchip() - connects a chained irqchip to a gpiochip
- * @gpiochip: the gpiochip to set the irqchip chain to
- * @irqchip: the irqchip to chain to the gpiochip
- * @parent_irq: the irq number corresponding to the parent IRQ for this
- * chained irqchip
- * @parent_handler: the parent interrupt handler for the accumulated IRQ
- * coming out of the gpiochip.
- */
-void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
- struct irq_chip *irqchip,
- unsigned int parent_irq,
- irq_flow_handler_t parent_handler)
-{
- if (gpiochip->irq.threaded) {
- chip_err(gpiochip, "tried to chain a threaded gpiochip\n");
- return;
- }
-
- gpiochip_set_cascaded_irqchip(gpiochip, parent_irq, parent_handler);
-}
-EXPORT_SYMBOL_GPL(gpiochip_set_chained_irqchip);
-
-/**
* gpiochip_set_nested_irqchip() - connects a nested irqchip to a gpiochip
* @gpiochip: the gpiochip to set the irqchip nested handler to
* @irqchip: the irqchip to nest to the gpiochip
@@ -1995,7 +1967,7 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d,
irq_hw_number_t hwirq;
unsigned int type = IRQ_TYPE_NONE;
struct irq_fwspec *fwspec = data;
- struct irq_fwspec parent_fwspec;
+ void *parent_arg;
unsigned int parent_hwirq;
unsigned int parent_type;
struct gpio_irq_chip *girq = &gc->irq;
@@ -2011,7 +1983,7 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d,
if (ret)
return ret;
- chip_info(gc, "allocate IRQ %d, hwirq %lu\n", irq, hwirq);
+ chip_dbg(gc, "allocate IRQ %d, hwirq %lu\n", irq, hwirq);
ret = girq->child_to_parent_hwirq(gc, hwirq, type,
&parent_hwirq, &parent_type);
@@ -2019,7 +1991,7 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d,
chip_err(gc, "can't look up hwirq %lu\n", hwirq);
return ret;
}
- chip_info(gc, "found parent hwirq %u\n", parent_hwirq);
+ chip_dbg(gc, "found parent hwirq %u\n", parent_hwirq);
/*
* We set handle_bad_irq because the .set_type() should
@@ -2034,23 +2006,27 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d,
NULL, NULL);
irq_set_probe(irq);
- /*
- * Create a IRQ fwspec to send up to the parent irqdomain:
- * specify the hwirq we address on the parent and tie it
- * all together up the chain.
- */
- parent_fwspec.fwnode = d->parent->fwnode;
/* This parent only handles asserted level IRQs */
- girq->populate_parent_fwspec(gc, &parent_fwspec, parent_hwirq,
- parent_type);
- chip_info(gc, "alloc_irqs_parent for %d parent hwirq %d\n",
+ parent_arg = girq->populate_parent_alloc_arg(gc, parent_hwirq, parent_type);
+ if (!parent_arg)
+ return -ENOMEM;
+
+ chip_dbg(gc, "alloc_irqs_parent for %d parent hwirq %d\n",
irq, parent_hwirq);
- ret = irq_domain_alloc_irqs_parent(d, irq, 1, &parent_fwspec);
+ irq_set_lockdep_class(irq, gc->irq.lock_key, gc->irq.request_key);
+ ret = irq_domain_alloc_irqs_parent(d, irq, 1, parent_arg);
+ /*
+ * If the parent irqdomain is msi, the interrupts have already
+ * been allocated, so the EEXIST is good.
+ */
+ if (irq_domain_is_msi(d->parent) && (ret == -EEXIST))
+ ret = 0;
if (ret)
chip_err(gc,
"failed to allocate parent hwirq %d for hwirq %lu\n",
parent_hwirq, hwirq);
+ kfree(parent_arg);
return ret;
}
@@ -2087,8 +2063,8 @@ static int gpiochip_hierarchy_add_domain(struct gpio_chip *gc)
if (!gc->irq.child_offset_to_irq)
gc->irq.child_offset_to_irq = gpiochip_child_offset_to_irq_noop;
- if (!gc->irq.populate_parent_fwspec)
- gc->irq.populate_parent_fwspec =
+ if (!gc->irq.populate_parent_alloc_arg)
+ gc->irq.populate_parent_alloc_arg =
gpiochip_populate_parent_fwspec_twocell;
gpiochip_hierarchy_setup_domain_ops(&gc->irq.child_irq_domain_ops);
@@ -2114,27 +2090,43 @@ static bool gpiochip_hierarchy_is_hierarchical(struct gpio_chip *gc)
return !!gc->irq.parent_domain;
}
-void gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip,
- struct irq_fwspec *fwspec,
+void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip,
unsigned int parent_hwirq,
unsigned int parent_type)
{
+ struct irq_fwspec *fwspec;
+
+ fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL);
+ if (!fwspec)
+ return NULL;
+
+ fwspec->fwnode = chip->irq.parent_domain->fwnode;
fwspec->param_count = 2;
fwspec->param[0] = parent_hwirq;
fwspec->param[1] = parent_type;
+
+ return fwspec;
}
EXPORT_SYMBOL_GPL(gpiochip_populate_parent_fwspec_twocell);
-void gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip,
- struct irq_fwspec *fwspec,
+void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip,
unsigned int parent_hwirq,
unsigned int parent_type)
{
+ struct irq_fwspec *fwspec;
+
+ fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL);
+ if (!fwspec)
+ return NULL;
+
+ fwspec->fwnode = chip->irq.parent_domain->fwnode;
fwspec->param_count = 4;
fwspec->param[0] = 0;
fwspec->param[1] = parent_hwirq;
fwspec->param[2] = 0;
fwspec->param[3] = parent_type;
+
+ return fwspec;
}
EXPORT_SYMBOL_GPL(gpiochip_populate_parent_fwspec_fourcell);
@@ -2990,7 +2982,8 @@ EXPORT_SYMBOL_GPL(gpiochip_is_requested);
* A pointer to the GPIO descriptor, or an ERR_PTR()-encoded negative error
* code on failure.
*/
-struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum,
+struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip,
+ unsigned int hwnum,
const char *label,
enum gpio_lookup_flags lflags,
enum gpiod_flags dflags)
@@ -3042,14 +3035,22 @@ EXPORT_SYMBOL_GPL(gpiochip_free_own_desc);
* rely on gpio_request() having been called beforehand.
*/
-static int gpio_set_config(struct gpio_chip *gc, unsigned offset,
+static int gpio_do_set_config(struct gpio_chip *gc, unsigned int offset,
+ unsigned long config)
+{
+ if (!gc->set_config)
+ return -ENOTSUPP;
+
+ return gc->set_config(gc, offset, config);
+}
+
+static int gpio_set_config(struct gpio_chip *gc, unsigned int offset,
enum pin_config_param mode)
{
unsigned long config;
unsigned arg;
switch (mode) {
- case PIN_CONFIG_BIAS_DISABLE:
case PIN_CONFIG_BIAS_PULL_DOWN:
case PIN_CONFIG_BIAS_PULL_UP:
arg = 1;
@@ -3060,7 +3061,7 @@ static int gpio_set_config(struct gpio_chip *gc, unsigned offset,
}
config = PIN_CONF_PACKED(mode, arg);
- return gc->set_config ? gc->set_config(gc, offset, config) : -ENOTSUPP;
+ return gpio_do_set_config(gc, offset, config);
}
static int gpio_set_bias(struct gpio_chip *chip, struct gpio_desc *desc)
@@ -3294,15 +3295,9 @@ int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
VALIDATE_DESC(desc);
chip = desc->gdev->chip;
- if (!chip->set || !chip->set_config) {
- gpiod_dbg(desc,
- "%s: missing set() or set_config() operations\n",
- __func__);
- return -ENOTSUPP;
- }
config = pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE, debounce);
- return chip->set_config(chip, gpio_chip_hwgpio(desc), config);
+ return gpio_do_set_config(chip, gpio_chip_hwgpio(desc), config);
}
EXPORT_SYMBOL_GPL(gpiod_set_debounce);
@@ -3326,10 +3321,7 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
* Handle FLAG_TRANSITORY first, enabling queries to gpiolib for
* persistence state.
*/
- if (transitory)
- set_bit(FLAG_TRANSITORY, &desc->flags);
- else
- clear_bit(FLAG_TRANSITORY, &desc->flags);
+ assign_bit(FLAG_TRANSITORY, &desc->flags, transitory);
/* If the driver supports it, set the persistence state now */
chip = desc->gdev->chip;
@@ -3339,7 +3331,7 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
packed = pinconf_to_config_packed(PIN_CONFIG_PERSIST_STATE,
!transitory);
gpio = gpio_chip_hwgpio(desc);
- rc = chip->set_config(chip, gpio, packed);
+ rc = gpio_do_set_config(chip, gpio, packed);
if (rc == -ENOTSUPP) {
dev_dbg(&desc->gdev->dev, "Persistence not supported for GPIO %d\n",
gpio);
@@ -3363,6 +3355,17 @@ int gpiod_is_active_low(const struct gpio_desc *desc)
}
EXPORT_SYMBOL_GPL(gpiod_is_active_low);
+/**
+ * gpiod_toggle_active_low - toggle whether a GPIO is active-low or not
+ * @desc: the gpio descriptor to change
+ */
+void gpiod_toggle_active_low(struct gpio_desc *desc)
+{
+ VALIDATE_DESC_VOID(desc);
+ change_bit(FLAG_ACTIVE_LOW, &desc->flags);
+}
+EXPORT_SYMBOL_GPL(gpiod_toggle_active_low);
+
/* I/O calls are only valid after configuration completed; the relevant
* "is this a valid GPIO" error checks should already have been done.
*
@@ -3785,10 +3788,7 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
gpio_set_open_source_value_commit(desc, value);
} else {
__set_bit(hwgpio, mask);
- if (value)
- __set_bit(hwgpio, bits);
- else
- __clear_bit(hwgpio, bits);
+ __assign_bit(hwgpio, bits, value);
count++;
}
i++;
@@ -4472,8 +4472,9 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
if (chip->ngpio <= p->chip_hwnum) {
dev_err(dev,
- "requested GPIO %d is out of range [0..%d] for chip %s\n",
- idx, chip->ngpio, chip->label);
+ "requested GPIO %u (%u) is out of range [0..%u] for chip %s\n",
+ idx, p->chip_hwnum, chip->ngpio - 1,
+ chip->label);
return ERR_PTR(-EINVAL);
}
@@ -5058,7 +5059,7 @@ struct gpio_descs *__must_check gpiod_get_array_optional(struct device *dev,
struct gpio_descs *descs;
descs = gpiod_get_array(dev, con_id, flags);
- if (IS_ERR(descs) && (PTR_ERR(descs) == -ENOENT))
+ if (PTR_ERR(descs) == -ENOENT)
return NULL;
return descs;
@@ -5104,7 +5105,7 @@ static int __init gpiolib_dev_init(void)
return ret;
}
- ret = alloc_chrdev_region(&gpio_devt, 0, GPIO_DEV_MAX, "gpiochip");
+ ret = alloc_chrdev_region(&gpio_devt, 0, GPIO_DEV_MAX, GPIOCHIP_NAME);
if (ret < 0) {
pr_err("gpiolib: failed to allocate char dev region\n");
bus_unregister(&gpio_bus_type);
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index ca9bc1e4803c..3e0aab2945d8 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -16,6 +16,8 @@
#include <linux/module.h>
#include <linux/cdev.h>
+#define GPIOCHIP_NAME "gpiochip"
+
/**
* struct gpio_device - internal state container for GPIO devices
* @id: numerical ID number for the GPIO chip
@@ -78,7 +80,8 @@ struct gpio_array {
unsigned long invert_mask[];
};
-struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip, u16 hwnum);
+struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip,
+ unsigned int hwnum);
int gpiod_get_array_value_complex(bool raw, bool can_sleep,
unsigned int array_size,
struct gpio_desc **desc_array,
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 7ae3b22c5628..c2bbcdd9c875 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -120,6 +120,7 @@ amdgpu-y += \
amdgpu_rlc.o \
gfx_v8_0.o \
gfx_v9_0.o \
+ gfx_v9_4.o \
gfx_v10_0.o
# add async DMA block
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 9aff914608b8..2f9da6d182f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -90,6 +90,7 @@
#include "amdgpu_mes.h"
#include "amdgpu_umc.h"
#include "amdgpu_mmhub.h"
+#include "amdgpu_df.h"
#define MAX_GPU_INSTANCE 16
@@ -636,9 +637,8 @@ struct amdgpu_fw_vram_usage {
struct amdgpu_bo *reserved_bo;
void *va;
- /* Offset on the top of VRAM, used as c2p write buffer.
+ /* GDDR6 training support flag.
*/
- u64 mem_train_fb_loc;
bool mem_train_support;
};
@@ -665,29 +665,6 @@ struct amdgpu_mmio_remap {
resource_size_t bus_addr;
};
-struct amdgpu_df_funcs {
- void (*sw_init)(struct amdgpu_device *adev);
- void (*sw_fini)(struct amdgpu_device *adev);
- void (*enable_broadcast_mode)(struct amdgpu_device *adev,
- bool enable);
- u32 (*get_fb_channel_number)(struct amdgpu_device *adev);
- u32 (*get_hbm_channel_number)(struct amdgpu_device *adev);
- void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
- bool enable);
- void (*get_clockgating_state)(struct amdgpu_device *adev,
- u32 *flags);
- void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev,
- bool enable);
- int (*pmc_start)(struct amdgpu_device *adev, uint64_t config,
- int is_enable);
- int (*pmc_stop)(struct amdgpu_device *adev, uint64_t config,
- int is_disable);
- void (*pmc_get_count)(struct amdgpu_device *adev, uint64_t config,
- uint64_t *count);
- uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val);
- void (*set_fica)(struct amdgpu_device *adev, uint32_t ficaa_val,
- uint32_t ficadl_val, uint32_t ficadh_val);
-};
/* Define the HW IP blocks will be used in driver , add more if necessary */
enum amd_hw_ip_block_type {
GC_HWIP = 1,
@@ -931,6 +908,9 @@ struct amdgpu_device {
bool enable_mes;
struct amdgpu_mes mes;
+ /* df */
+ struct amdgpu_df df;
+
struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
int num_ip_blocks;
struct mutex mn_lock;
@@ -944,8 +924,6 @@ struct amdgpu_device {
/* soc15 register offset based on ip, instance and segment */
uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
- const struct amdgpu_df_funcs *df_funcs;
-
/* delayed work_func for deferring clockgating during resume */
struct delayed_work delayed_init_work;
@@ -994,8 +972,6 @@ struct amdgpu_device {
bool pm_sysfs_en;
bool ucode_sysfs_en;
-
- bool in_baco;
};
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@@ -1033,10 +1009,14 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define AMDGPU_REGS_IDX (1<<0)
#define AMDGPU_REGS_NO_KIQ (1<<1)
+#define AMDGPU_REGS_KIQ (1<<2)
#define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
#define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
+#define RREG32_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_KIQ)
+#define WREG32_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_KIQ)
+
#define RREG8(reg) amdgpu_mm_rreg8(adev, (reg))
#define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index 82155ac3288a..12247a32f9ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -527,7 +527,7 @@ static int acp_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = state == AMD_PG_STATE_GATE ? true : false;
+ bool enable = (state == AMD_PG_STATE_GATE);
if (adev->powerplay.pp_funcs &&
adev->powerplay.pp_funcs->set_powergating_by_smu)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index d3da9dde4ee1..8609287620ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -613,15 +613,9 @@ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
- if (is_support_sw_smu(adev))
- smu_switch_power_profile(&adev->smu,
- PP_SMC_POWER_PROFILE_COMPUTE,
- !idle);
- else if (adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->switch_power_profile)
- amdgpu_dpm_switch_power_profile(adev,
- PP_SMC_POWER_PROFILE_COMPUTE,
- !idle);
+ amdgpu_dpm_switch_power_profile(adev,
+ PP_SMC_POWER_PROFILE_COMPUTE,
+ !idle);
}
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
@@ -634,6 +628,38 @@ bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
return false;
}
+int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ if (adev->family == AMDGPU_FAMILY_AI) {
+ int i;
+
+ for (i = 0; i < adev->num_vmhubs; i++)
+ amdgpu_gmc_flush_gpu_tlb(adev, vmid, i, 0);
+ } else {
+ amdgpu_gmc_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB_0, 0);
+ }
+
+ return 0;
+}
+
+int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+ uint32_t flush_type = 0;
+ bool all_hub = false;
+
+ if (adev->gmc.xgmi.num_physical_nodes &&
+ adev->asic_type == CHIP_VEGA20)
+ flush_type = 2;
+
+ if (adev->family == AMDGPU_FAMILY_AI)
+ all_hub = true;
+
+ return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub);
+}
+
bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 069d5d230810..47b0f2957d1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -136,6 +136,8 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
uint32_t *ib_cmd, uint32_t ib_len);
void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle);
bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd);
+int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid);
+int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid);
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index b6713e0ed1b2..4bcc175a149d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -46,6 +46,8 @@
#include "soc15.h"
#include "soc15d.h"
#include "amdgpu_amdkfd_gfx_v9.h"
+#include "gfxhub_v1_0.h"
+#include "mmhub_v9_4.h"
#define HQD_N_REGS 56
#define DUMP_REG(addr) do { \
@@ -69,32 +71,56 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
unsigned int engine_id,
unsigned int queue_id)
{
- uint32_t sdma_engine_reg_base[8] = {
- SOC15_REG_OFFSET(SDMA0, 0,
- mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
- SOC15_REG_OFFSET(SDMA1, 0,
- mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL,
- SOC15_REG_OFFSET(SDMA2, 0,
- mmSDMA2_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL,
- SOC15_REG_OFFSET(SDMA3, 0,
- mmSDMA3_RLC0_RB_CNTL) - mmSDMA3_RLC0_RB_CNTL,
- SOC15_REG_OFFSET(SDMA4, 0,
- mmSDMA4_RLC0_RB_CNTL) - mmSDMA4_RLC0_RB_CNTL,
- SOC15_REG_OFFSET(SDMA5, 0,
- mmSDMA5_RLC0_RB_CNTL) - mmSDMA5_RLC0_RB_CNTL,
- SOC15_REG_OFFSET(SDMA6, 0,
- mmSDMA6_RLC0_RB_CNTL) - mmSDMA6_RLC0_RB_CNTL,
- SOC15_REG_OFFSET(SDMA7, 0,
- mmSDMA7_RLC0_RB_CNTL) - mmSDMA7_RLC0_RB_CNTL
- };
-
- uint32_t retval = sdma_engine_reg_base[engine_id]
+ uint32_t sdma_engine_reg_base = 0;
+ uint32_t sdma_rlc_reg_offset;
+
+ switch (engine_id) {
+ default:
+ dev_warn(adev->dev,
+ "Invalid sdma engine id (%d), using engine id 0\n",
+ engine_id);
+ /* fall through */
+ case 0:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
+ mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
+ break;
+ case 1:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0,
+ mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL;
+ break;
+ case 2:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA2, 0,
+ mmSDMA2_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL;
+ break;
+ case 3:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA3, 0,
+ mmSDMA3_RLC0_RB_CNTL) - mmSDMA3_RLC0_RB_CNTL;
+ break;
+ case 4:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA4, 0,
+ mmSDMA4_RLC0_RB_CNTL) - mmSDMA4_RLC0_RB_CNTL;
+ break;
+ case 5:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA5, 0,
+ mmSDMA5_RLC0_RB_CNTL) - mmSDMA5_RLC0_RB_CNTL;
+ break;
+ case 6:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA6, 0,
+ mmSDMA6_RLC0_RB_CNTL) - mmSDMA6_RLC0_RB_CNTL;
+ break;
+ case 7:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA7, 0,
+ mmSDMA7_RLC0_RB_CNTL) - mmSDMA7_RLC0_RB_CNTL;
+ break;
+ }
+
+ sdma_rlc_reg_offset = sdma_engine_reg_base
+ queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
- queue_id, retval);
+ queue_id, sdma_rlc_reg_offset);
- return retval;
+ return sdma_rlc_reg_offset;
}
static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
@@ -258,11 +284,28 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
return 0;
}
+static void kgd_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
+ uint64_t page_table_base)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+ if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
+ pr_err("trying to set page table base for wrong VMID %u\n",
+ vmid);
+ return;
+ }
+
+ mmhub_v9_4_setup_vm_pt_regs(adev, vmid, page_table_base);
+
+ gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
+}
+
const struct kfd2kgd_calls arcturus_kfd2kgd = {
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
.init_interrupts = kgd_gfx_v9_init_interrupts,
.hqd_load = kgd_gfx_v9_hqd_load,
+ .hiq_mqd_load = kgd_gfx_v9_hiq_mqd_load,
.hqd_sdma_load = kgd_hqd_sdma_load,
.hqd_dump = kgd_gfx_v9_hqd_dump,
.hqd_sdma_dump = kgd_hqd_sdma_dump,
@@ -277,8 +320,6 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = {
.get_atc_vmid_pasid_mapping_info =
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.get_tile_config = kgd_gfx_v9_get_tile_config,
- .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
- .invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs,
- .invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid,
+ .set_vm_context_page_table_base = kgd_set_vm_context_page_table_base,
.get_hive_id = amdgpu_amdkfd_get_hive_id,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index 61cd707158e4..a7b17c8deb00 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -107,13 +107,13 @@ static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
lock_srbm(kgd, mec, pipe, queue_id, 0);
}
-static uint32_t get_queue_mask(struct amdgpu_device *adev,
+static uint64_t get_queue_mask(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id)
{
- unsigned int bit = (pipe_id * adev->gfx.mec.num_queue_per_pipe +
- queue_id) & 31;
+ unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe +
+ queue_id;
- return ((uint32_t)1) << bit;
+ return 1ull << bit;
}
static void release_queue(struct kgd_dev *kgd)
@@ -268,21 +268,6 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id);
acquire_queue(kgd, pipe_id, queue_id);
- /* HIQ is set during driver init period with vmid set to 0*/
- if (m->cp_hqd_vmid == 0) {
- uint32_t value, mec, pipe;
-
- mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
- pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
-
- pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
- mec, pipe, queue_id);
- value = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS));
- value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
- ((mec << 5) | (pipe << 3) | queue_id | 0x80));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), value);
- }
-
/* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
mqd_hqd = &m->cp_mqd_base_addr_lo;
hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
@@ -332,9 +317,10 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
lower_32_bits((uint64_t)wptr));
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
upper_32_bits((uint64_t)wptr));
- pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__, get_queue_mask(adev, pipe_id, queue_id));
+ pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__,
+ (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1),
- get_queue_mask(adev, pipe_id, queue_id));
+ (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
}
/* Start the EOP fetcher */
@@ -350,6 +336,59 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
return 0;
}
+static int kgd_hiq_mqd_load(struct kgd_dev *kgd, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t doorbell_off)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+ struct v10_compute_mqd *m;
+ uint32_t mec, pipe;
+ int r;
+
+ m = get_mqd(mqd);
+
+ acquire_queue(kgd, pipe_id, queue_id);
+
+ mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
+
+ pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
+ mec, pipe, queue_id);
+
+ spin_lock(&adev->gfx.kiq.ring_lock);
+ r = amdgpu_ring_alloc(kiq_ring, 7);
+ if (r) {
+ pr_err("Failed to alloc KIQ (%d).\n", r);
+ goto out_unlock;
+ }
+
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
+ PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */
+ PACKET3_MAP_QUEUES_QUEUE(queue_id) |
+ PACKET3_MAP_QUEUES_PIPE(pipe) |
+ PACKET3_MAP_QUEUES_ME((mec - 1)) |
+ PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
+ PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
+ PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */
+ PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off));
+ amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo);
+ amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi);
+ amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo);
+ amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi);
+ amdgpu_ring_commit(kiq_ring);
+
+out_unlock:
+ spin_unlock(&adev->gfx.kiq.ring_lock);
+ release_queue(kgd);
+
+ return r;
+}
+
static int kgd_hqd_dump(struct kgd_dev *kgd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs)
@@ -686,71 +725,6 @@ static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
-static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid)
-{
- signed long r;
- uint32_t seq;
- struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
-
- spin_lock(&adev->gfx.kiq.ring_lock);
- amdgpu_ring_alloc(ring, 12); /* fence + invalidate_tlbs package*/
- amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
- amdgpu_ring_write(ring,
- PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
- PACKET3_INVALIDATE_TLBS_PASID(pasid));
- amdgpu_fence_emit_polling(ring, &seq);
- amdgpu_ring_commit(ring);
- spin_unlock(&adev->gfx.kiq.ring_lock);
-
- r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
- if (r < 1) {
- DRM_ERROR("wait for kiq fence error: %ld.\n", r);
- return -ETIME;
- }
-
- return 0;
-}
-
-static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- int vmid;
- uint16_t queried_pasid;
- bool ret;
- struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
-
- if (amdgpu_emu_mode == 0 && ring->sched.ready)
- return invalidate_tlbs_with_kiq(adev, pasid);
-
- for (vmid = 0; vmid < 16; vmid++) {
- if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
- continue;
-
- ret = get_atc_vmid_pasid_mapping_info(kgd, vmid,
- &queried_pasid);
- if (ret && queried_pasid == pasid) {
- amdgpu_gmc_flush_gpu_tlb(adev, vmid,
- AMDGPU_GFXHUB_0, 0);
- break;
- }
- }
-
- return 0;
-}
-
-static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-
- if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
- pr_err("non kfd vmid %d\n", vmid);
- return 0;
- }
-
- amdgpu_gmc_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB_0, 0);
- return 0;
-}
-
static int kgd_address_watch_disable(struct kgd_dev *kgd)
{
return 0;
@@ -817,6 +791,7 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
.init_interrupts = kgd_init_interrupts,
.hqd_load = kgd_hqd_load,
+ .hiq_mqd_load = kgd_hiq_mqd_load,
.hqd_sdma_load = kgd_hqd_sdma_load,
.hqd_dump = kgd_hqd_dump,
.hqd_sdma_dump = kgd_hqd_sdma_dump,
@@ -832,7 +807,5 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
get_atc_vmid_pasid_mapping_info,
.get_tile_config = amdgpu_amdkfd_get_tile_config,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
- .invalidate_tlbs = invalidate_tlbs,
- .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
.get_hive_id = amdgpu_amdkfd_get_hive_id,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 6e6f0a99ec06..8f052e98a3c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -696,45 +696,6 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
lower_32_bits(page_table_base));
}
-static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- int vmid;
- unsigned int tmp;
-
- if (adev->in_gpu_reset)
- return -EIO;
-
- for (vmid = 0; vmid < 16; vmid++) {
- if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
- continue;
-
- tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
- if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
- (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
- WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
- RREG32(mmVM_INVALIDATE_RESPONSE);
- break;
- }
- }
-
- return 0;
-}
-
-static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-
- if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
- pr_err("non kfd vmid\n");
- return 0;
- }
-
- WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
- RREG32(mmVM_INVALIDATE_RESPONSE);
- return 0;
-}
-
/**
* read_vmid_from_vmfault_reg - read vmid from register
*
@@ -771,7 +732,5 @@ const struct kfd2kgd_calls gfx_v7_kfd2kgd = {
.set_scratch_backing_va = set_scratch_backing_va,
.get_tile_config = get_tile_config,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
- .invalidate_tlbs = invalidate_tlbs,
- .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
.read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index bfbddedb2380..19a10db93d68 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -657,45 +657,6 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
lower_32_bits(page_table_base));
}
-static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- int vmid;
- unsigned int tmp;
-
- if (adev->in_gpu_reset)
- return -EIO;
-
- for (vmid = 0; vmid < 16; vmid++) {
- if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
- continue;
-
- tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
- if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
- (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
- WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
- RREG32(mmVM_INVALIDATE_RESPONSE);
- break;
- }
- }
-
- return 0;
-}
-
-static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-
- if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
- pr_err("non kfd vmid %d\n", vmid);
- return -EINVAL;
- }
-
- WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
- RREG32(mmVM_INVALIDATE_RESPONSE);
- return 0;
-}
-
const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
@@ -717,6 +678,4 @@ const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
.set_scratch_backing_va = set_scratch_backing_va,
.get_tile_config = get_tile_config,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
- .invalidate_tlbs = invalidate_tlbs,
- .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 6f1a4676ddde..8562afe5b761 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -40,7 +40,6 @@
#include "soc15d.h"
#include "mmhub_v1_0.h"
#include "gfxhub_v1_0.h"
-#include "mmhub_v9_4.h"
enum hqd_dequeue_request_type {
@@ -104,13 +103,13 @@ static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
lock_srbm(kgd, mec, pipe, queue_id, 0);
}
-static uint32_t get_queue_mask(struct amdgpu_device *adev,
+static uint64_t get_queue_mask(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id)
{
- unsigned int bit = (pipe_id * adev->gfx.mec.num_queue_per_pipe +
- queue_id) & 31;
+ unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe +
+ queue_id;
- return ((uint32_t)1) << bit;
+ return 1ull << bit;
}
static void release_queue(struct kgd_dev *kgd)
@@ -259,21 +258,6 @@ int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
acquire_queue(kgd, pipe_id, queue_id);
- /* HIQ is set during driver init period with vmid set to 0*/
- if (m->cp_hqd_vmid == 0) {
- uint32_t value, mec, pipe;
-
- mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
- pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
-
- pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
- mec, pipe, queue_id);
- value = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS));
- value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
- ((mec << 5) | (pipe << 3) | queue_id | 0x80));
- WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), value);
- }
-
/* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
mqd_hqd = &m->cp_mqd_base_addr_lo;
hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
@@ -324,7 +308,7 @@ int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
upper_32_bits((uintptr_t)wptr));
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1),
- get_queue_mask(adev, pipe_id, queue_id));
+ (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
}
/* Start the EOP fetcher */
@@ -340,6 +324,59 @@ int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
return 0;
}
+int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t doorbell_off)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+ struct v9_mqd *m;
+ uint32_t mec, pipe;
+ int r;
+
+ m = get_mqd(mqd);
+
+ acquire_queue(kgd, pipe_id, queue_id);
+
+ mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
+
+ pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
+ mec, pipe, queue_id);
+
+ spin_lock(&adev->gfx.kiq.ring_lock);
+ r = amdgpu_ring_alloc(kiq_ring, 7);
+ if (r) {
+ pr_err("Failed to alloc KIQ (%d).\n", r);
+ goto out_unlock;
+ }
+
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
+ PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */
+ PACKET3_MAP_QUEUES_QUEUE(queue_id) |
+ PACKET3_MAP_QUEUES_PIPE(pipe) |
+ PACKET3_MAP_QUEUES_ME((mec - 1)) |
+ PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
+ PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
+ PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */
+ PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off));
+ amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo);
+ amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi);
+ amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo);
+ amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi);
+ amdgpu_ring_commit(kiq_ring);
+
+out_unlock:
+ spin_unlock(&adev->gfx.kiq.ring_lock);
+ release_queue(kgd);
+
+ return r;
+}
+
int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs)
@@ -618,100 +655,6 @@ bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
-static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid,
- uint32_t flush_type)
-{
- signed long r;
- uint32_t seq;
- struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
-
- spin_lock(&adev->gfx.kiq.ring_lock);
- amdgpu_ring_alloc(ring, 12); /* fence + invalidate_tlbs package*/
- amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
- amdgpu_ring_write(ring,
- PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
- PACKET3_INVALIDATE_TLBS_ALL_HUB(1) |
- PACKET3_INVALIDATE_TLBS_PASID(pasid) |
- PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
- amdgpu_fence_emit_polling(ring, &seq);
- amdgpu_ring_commit(ring);
- spin_unlock(&adev->gfx.kiq.ring_lock);
-
- r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
- if (r < 1) {
- DRM_ERROR("wait for kiq fence error: %ld.\n", r);
- return -ETIME;
- }
-
- return 0;
-}
-
-int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- int vmid, i;
- uint16_t queried_pasid;
- bool ret;
- struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
- uint32_t flush_type = 0;
-
- if (adev->in_gpu_reset)
- return -EIO;
- if (adev->gmc.xgmi.num_physical_nodes &&
- adev->asic_type == CHIP_VEGA20)
- flush_type = 2;
-
- if (ring->sched.ready)
- return invalidate_tlbs_with_kiq(adev, pasid, flush_type);
-
- for (vmid = 0; vmid < 16; vmid++) {
- if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
- continue;
-
- ret = kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(kgd, vmid,
- &queried_pasid);
- if (ret && queried_pasid == pasid) {
- for (i = 0; i < adev->num_vmhubs; i++)
- amdgpu_gmc_flush_gpu_tlb(adev, vmid,
- i, flush_type);
- break;
- }
- }
-
- return 0;
-}
-
-int kgd_gfx_v9_invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- int i;
-
- if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
- pr_err("non kfd vmid %d\n", vmid);
- return 0;
- }
-
- /* Use legacy mode tlb invalidation.
- *
- * Currently on Raven the code below is broken for anything but
- * legacy mode due to a MMHUB power gating problem. A workaround
- * is for MMHUB to wait until the condition PER_VMID_INVALIDATE_REQ
- * == PER_VMID_INVALIDATE_ACK instead of simply waiting for the ack
- * bit.
- *
- * TODO 1: agree on the right set of invalidation registers for
- * KFD use. Use the last one for now. Invalidate both GC and
- * MMHUB.
- *
- * TODO 2: support range-based invalidation, requires kfg2kgd
- * interface change
- */
- for (i = 0; i < adev->num_vmhubs; i++)
- amdgpu_gmc_flush_gpu_tlb(adev, vmid, i, 0);
-
- return 0;
-}
-
int kgd_gfx_v9_address_watch_disable(struct kgd_dev *kgd)
{
return 0;
@@ -758,8 +701,8 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
return 0;
}
-void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint64_t page_table_base)
+static void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd,
+ uint32_t vmid, uint64_t page_table_base)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
@@ -769,14 +712,7 @@ void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmi
return;
}
- /* TODO: take advantage of per-process address space size. For
- * now, all processes share the same address space size, like
- * on GFX8 and older.
- */
- if (adev->asic_type == CHIP_ARCTURUS) {
- mmhub_v9_4_setup_vm_pt_regs(adev, vmid, page_table_base);
- } else
- mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
+ mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
}
@@ -786,6 +722,7 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
.init_interrupts = kgd_gfx_v9_init_interrupts,
.hqd_load = kgd_gfx_v9_hqd_load,
+ .hiq_mqd_load = kgd_gfx_v9_hiq_mqd_load,
.hqd_sdma_load = kgd_hqd_sdma_load,
.hqd_dump = kgd_gfx_v9_hqd_dump,
.hqd_sdma_dump = kgd_hqd_sdma_dump,
@@ -801,7 +738,5 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.get_tile_config = kgd_gfx_v9_get_tile_config,
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
- .invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs,
- .invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid,
.get_hive_id = amdgpu_amdkfd_get_hive_id,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
index d9e9ad22b2bd..63d3e6683dfe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
@@ -33,6 +33,9 @@ int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr,
uint32_t wptr_shift, uint32_t wptr_mask,
struct mm_struct *mm);
+int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t doorbell_off);
int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs);
@@ -57,9 +60,5 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
uint8_t vmid, uint16_t *p_pasid);
-void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint64_t page_table_base);
-int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
-int kgd_gfx_v9_invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd,
struct tile_config *config);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index b2487f4f271b..fa8ac9d19a7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -2129,6 +2129,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem
return -ENOMEM;
mutex_init(&(*mem)->lock);
+ INIT_LIST_HEAD(&(*mem)->bo_va_list);
(*mem)->bo = amdgpu_bo_ref(gws_bo);
(*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
(*mem)->process_info = process_info;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 9ba80d828876..fdd52d86a4d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -2022,7 +2022,7 @@ int amdgpu_atombios_init(struct amdgpu_device *adev)
if (adev->is_atom_fw) {
amdgpu_atomfirmware_scratch_regs_init(adev);
amdgpu_atomfirmware_allocate_fb_scratch(adev);
- ret = amdgpu_atomfirmware_get_mem_train_fb_loc(adev);
+ ret = amdgpu_atomfirmware_get_mem_train_info(adev);
if (ret) {
DRM_ERROR("Failed to get mem train fb location.\n");
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index ff4eb96bdfb5..58f9d8c3a17a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -525,16 +525,12 @@ static int gddr6_mem_train_support(struct amdgpu_device *adev)
return ret;
}
-int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev)
+int amdgpu_atomfirmware_get_mem_train_info(struct amdgpu_device *adev)
{
struct atom_context *ctx = adev->mode_info.atom_context;
- unsigned char *bios = ctx->bios;
- struct vram_reserve_block *reserved_block;
- int index, block_number;
+ int index;
uint8_t frev, crev;
uint16_t data_offset, size;
- uint32_t start_address_in_kb;
- uint64_t offset;
int ret;
adev->fw_vram_usage.mem_train_support = false;
@@ -569,32 +565,6 @@ int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev)
return -EINVAL;
}
- reserved_block = (struct vram_reserve_block *)
- (bios + data_offset + sizeof(struct atom_common_table_header));
- block_number = ((unsigned int)size - sizeof(struct atom_common_table_header))
- / sizeof(struct vram_reserve_block);
- reserved_block += (block_number > 0) ? block_number-1 : 0;
- DRM_DEBUG("block_number:0x%04x, last block: 0x%08xkb sz, %dkb fw, %dkb drv.\n",
- block_number,
- le32_to_cpu(reserved_block->start_address_in_kb),
- le16_to_cpu(reserved_block->used_by_firmware_in_kb),
- le16_to_cpu(reserved_block->used_by_driver_in_kb));
- if (reserved_block->used_by_firmware_in_kb > 0) {
- start_address_in_kb = le32_to_cpu(reserved_block->start_address_in_kb);
- offset = (uint64_t)start_address_in_kb * ONE_KiB;
- if ((offset & (ONE_MiB - 1)) < (4 * ONE_KiB + 1) ) {
- offset -= ONE_MiB;
- }
-
- offset &= ~(ONE_MiB - 1);
- adev->fw_vram_usage.mem_train_fb_loc = offset;
- adev->fw_vram_usage.mem_train_support = true;
- DRM_DEBUG("mem_train_fb_loc:0x%09llx.\n", offset);
- ret = 0;
- } else {
- DRM_ERROR("used_by_firmware_in_kb is 0!\n");
- ret = -EINVAL;
- }
-
- return ret;
+ adev->fw_vram_usage.mem_train_support = true;
+ return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
index f871af5ea6f3..434fe2fa0089 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
@@ -31,7 +31,7 @@ void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev);
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
int *vram_width, int *vram_type, int *vram_vendor);
-int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev);
+int amdgpu_atomfirmware_get_mem_train_info(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index a97fb759e2f4..3e35a8f2c5e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -613,7 +613,17 @@ static bool amdgpu_atpx_detect(void)
bool d3_supported = false;
struct pci_dev *parent_pdev;
- while ((pdev = pci_get_class(PCI_BASE_CLASS_DISPLAY << 16, pdev)) != NULL) {
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+ vga_count++;
+
+ has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
+
+ parent_pdev = pci_upstream_bridge(pdev);
+ d3_supported |= parent_pdev && parent_pdev->bridge_d3;
+ amdgpu_atpx_get_quirks(pdev);
+ }
+
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
vga_count++;
has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 5b330f69194b..a52a084158b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -909,6 +909,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
if (parser->entity && parser->entity != entity)
return -EINVAL;
+ /* Return if there is no run queue associated with this entity.
+ * Possibly because of disabled HW IP*/
+ if (entity->rq == NULL)
+ return -EINVAL;
+
parser->entity = entity;
ring = to_amdgpu_ring(entity->rq->sched);
@@ -1229,7 +1234,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
goto error_abort;
}
- job->owner = p->filp;
p->fence = dma_fence_get(&job->base.s_fence->finished);
amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 1d2bbf10614e..94a6c42f29ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -42,19 +42,12 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
[AMDGPU_HW_IP_VCN_JPEG] = 1,
};
-static int amdgpu_ctx_total_num_entities(void)
-{
- unsigned i, num_entities = 0;
-
- for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
- num_entities += amdgpu_ctx_num_entities[i];
-
- return num_entities;
-}
-
static int amdgpu_ctx_priority_permit(struct drm_file *filp,
enum drm_sched_priority priority)
{
+ if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
+ return -EINVAL;
+
/* NORMAL and below are accessible by everyone */
if (priority <= DRM_SCHED_PRIORITY_NORMAL)
return 0;
@@ -68,47 +61,94 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
return -EACCES;
}
+static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const u32 ring)
+{
+ struct amdgpu_device *adev = ctx->adev;
+ struct amdgpu_ctx_entity *entity;
+ struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
+ unsigned num_scheds = 0;
+ enum drm_sched_priority priority;
+ int r;
+
+ entity = kcalloc(1, offsetof(typeof(*entity), fences[amdgpu_sched_jobs]),
+ GFP_KERNEL);
+ if (!entity)
+ return -ENOMEM;
+
+ entity->sequence = 1;
+ priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
+ ctx->init_priority : ctx->override_priority;
+ switch (hw_ip) {
+ case AMDGPU_HW_IP_GFX:
+ sched = &adev->gfx.gfx_ring[0].sched;
+ scheds = &sched;
+ num_scheds = 1;
+ break;
+ case AMDGPU_HW_IP_COMPUTE:
+ scheds = adev->gfx.compute_sched;
+ num_scheds = adev->gfx.num_compute_sched;
+ break;
+ case AMDGPU_HW_IP_DMA:
+ scheds = adev->sdma.sdma_sched;
+ num_scheds = adev->sdma.num_sdma_sched;
+ break;
+ case AMDGPU_HW_IP_UVD:
+ sched = &adev->uvd.inst[0].ring.sched;
+ scheds = &sched;
+ num_scheds = 1;
+ break;
+ case AMDGPU_HW_IP_VCE:
+ sched = &adev->vce.ring[0].sched;
+ scheds = &sched;
+ num_scheds = 1;
+ break;
+ case AMDGPU_HW_IP_UVD_ENC:
+ sched = &adev->uvd.inst[0].ring_enc[0].sched;
+ scheds = &sched;
+ num_scheds = 1;
+ break;
+ case AMDGPU_HW_IP_VCN_DEC:
+ scheds = adev->vcn.vcn_dec_sched;
+ num_scheds = adev->vcn.num_vcn_dec_sched;
+ break;
+ case AMDGPU_HW_IP_VCN_ENC:
+ scheds = adev->vcn.vcn_enc_sched;
+ num_scheds = adev->vcn.num_vcn_enc_sched;
+ break;
+ case AMDGPU_HW_IP_VCN_JPEG:
+ scheds = adev->jpeg.jpeg_sched;
+ num_scheds = adev->jpeg.num_jpeg_sched;
+ break;
+ }
+
+ r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds,
+ &ctx->guilty);
+ if (r)
+ goto error_free_entity;
+
+ ctx->entities[hw_ip][ring] = entity;
+ return 0;
+
+error_free_entity:
+ kfree(entity);
+
+ return r;
+}
+
static int amdgpu_ctx_init(struct amdgpu_device *adev,
enum drm_sched_priority priority,
struct drm_file *filp,
struct amdgpu_ctx *ctx)
{
- unsigned num_entities = amdgpu_ctx_total_num_entities();
- unsigned i, j, k;
int r;
- if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
- return -EINVAL;
-
r = amdgpu_ctx_priority_permit(filp, priority);
if (r)
return r;
memset(ctx, 0, sizeof(*ctx));
- ctx->adev = adev;
-
- ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities,
- sizeof(struct dma_fence*), GFP_KERNEL);
- if (!ctx->fences)
- return -ENOMEM;
-
- ctx->entities[0] = kcalloc(num_entities,
- sizeof(struct amdgpu_ctx_entity),
- GFP_KERNEL);
- if (!ctx->entities[0]) {
- r = -ENOMEM;
- goto error_free_fences;
- }
-
- for (i = 0; i < num_entities; ++i) {
- struct amdgpu_ctx_entity *entity = &ctx->entities[0][i];
- entity->sequence = 1;
- entity->fences = &ctx->fences[amdgpu_sched_jobs * i];
- }
- for (i = 1; i < AMDGPU_HW_IP_NUM; ++i)
- ctx->entities[i] = ctx->entities[i - 1] +
- amdgpu_ctx_num_entities[i - 1];
+ ctx->adev = adev;
kref_init(&ctx->refcount);
spin_lock_init(&ctx->ring_lock);
@@ -120,114 +160,49 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
ctx->init_priority = priority;
ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
- for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
- struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
- struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
- unsigned num_rings = 0;
- unsigned num_rqs = 0;
-
- switch (i) {
- case AMDGPU_HW_IP_GFX:
- rings[0] = &adev->gfx.gfx_ring[0];
- num_rings = 1;
- break;
- case AMDGPU_HW_IP_COMPUTE:
- for (j = 0; j < adev->gfx.num_compute_rings; ++j)
- rings[j] = &adev->gfx.compute_ring[j];
- num_rings = adev->gfx.num_compute_rings;
- break;
- case AMDGPU_HW_IP_DMA:
- for (j = 0; j < adev->sdma.num_instances; ++j)
- rings[j] = &adev->sdma.instance[j].ring;
- num_rings = adev->sdma.num_instances;
- break;
- case AMDGPU_HW_IP_UVD:
- rings[0] = &adev->uvd.inst[0].ring;
- num_rings = 1;
- break;
- case AMDGPU_HW_IP_VCE:
- rings[0] = &adev->vce.ring[0];
- num_rings = 1;
- break;
- case AMDGPU_HW_IP_UVD_ENC:
- rings[0] = &adev->uvd.inst[0].ring_enc[0];
- num_rings = 1;
- break;
- case AMDGPU_HW_IP_VCN_DEC:
- for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
- if (adev->vcn.harvest_config & (1 << j))
- continue;
- rings[num_rings++] = &adev->vcn.inst[j].ring_dec;
- }
- break;
- case AMDGPU_HW_IP_VCN_ENC:
- for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
- if (adev->vcn.harvest_config & (1 << j))
- continue;
- for (k = 0; k < adev->vcn.num_enc_rings; ++k)
- rings[num_rings++] = &adev->vcn.inst[j].ring_enc[k];
- }
- break;
- case AMDGPU_HW_IP_VCN_JPEG:
- for (j = 0; j < adev->jpeg.num_jpeg_inst; ++j) {
- if (adev->jpeg.harvest_config & (1 << j))
- continue;
- rings[num_rings++] = &adev->jpeg.inst[j].ring_dec;
- }
- break;
- }
+ return 0;
- for (j = 0; j < num_rings; ++j) {
- if (!rings[j]->adev)
- continue;
+}
- rqs[num_rqs++] = &rings[j]->sched.sched_rq[priority];
- }
+static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
+{
- for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
- r = drm_sched_entity_init(&ctx->entities[i][j].entity,
- rqs, num_rqs, &ctx->guilty);
- if (r)
- goto error_cleanup_entities;
- }
+ int i;
- return 0;
+ if (!entity)
+ return;
-error_cleanup_entities:
- for (i = 0; i < num_entities; ++i)
- drm_sched_entity_destroy(&ctx->entities[0][i].entity);
- kfree(ctx->entities[0]);
+ for (i = 0; i < amdgpu_sched_jobs; ++i)
+ dma_fence_put(entity->fences[i]);
-error_free_fences:
- kfree(ctx->fences);
- ctx->fences = NULL;
- return r;
+ kfree(entity);
}
static void amdgpu_ctx_fini(struct kref *ref)
{
struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
- unsigned num_entities = amdgpu_ctx_total_num_entities();
struct amdgpu_device *adev = ctx->adev;
unsigned i, j;
if (!adev)
return;
- for (i = 0; i < num_entities; ++i)
- for (j = 0; j < amdgpu_sched_jobs; ++j)
- dma_fence_put(ctx->entities[0][i].fences[j]);
- kfree(ctx->fences);
- kfree(ctx->entities[0]);
+ for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
+ for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) {
+ amdgpu_ctx_fini_entity(ctx->entities[i][j]);
+ ctx->entities[i][j] = NULL;
+ }
+ }
mutex_destroy(&ctx->lock);
-
kfree(ctx);
}
int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
u32 ring, struct drm_sched_entity **entity)
{
+ int r;
+
if (hw_ip >= AMDGPU_HW_IP_NUM) {
DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
return -EINVAL;
@@ -244,7 +219,13 @@ int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
return -EINVAL;
}
- *entity = &ctx->entities[hw_ip][ring].entity;
+ if (ctx->entities[hw_ip][ring] == NULL) {
+ r = amdgpu_ctx_init_entity(ctx, hw_ip, ring);
+ if (r)
+ return r;
+ }
+
+ *entity = &ctx->entities[hw_ip][ring]->entity;
return 0;
}
@@ -284,14 +265,17 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
static void amdgpu_ctx_do_release(struct kref *ref)
{
struct amdgpu_ctx *ctx;
- unsigned num_entities;
- u32 i;
+ u32 i, j;
ctx = container_of(ref, struct amdgpu_ctx, refcount);
+ for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
+ for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
+ if (!ctx->entities[i][j])
+ continue;
- num_entities = amdgpu_ctx_total_num_entities();
- for (i = 0; i < num_entities; i++)
- drm_sched_entity_destroy(&ctx->entities[0][i].entity);
+ drm_sched_entity_destroy(&ctx->entities[i][j]->entity);
+ }
+ }
amdgpu_ctx_fini(ref);
}
@@ -521,19 +505,23 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
enum drm_sched_priority priority)
{
- unsigned num_entities = amdgpu_ctx_total_num_entities();
enum drm_sched_priority ctx_prio;
- unsigned i;
+ unsigned i, j;
ctx->override_priority = priority;
ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
ctx->init_priority : ctx->override_priority;
+ for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
+ for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
+ struct drm_sched_entity *entity;
- for (i = 0; i < num_entities; i++) {
- struct drm_sched_entity *entity = &ctx->entities[0][i].entity;
+ if (!ctx->entities[i][j])
+ continue;
- drm_sched_entity_set_priority(entity, ctx_prio);
+ entity = &ctx->entities[i][j]->entity;
+ drm_sched_entity_set_priority(entity, ctx_prio);
+ }
}
}
@@ -569,20 +557,24 @@ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
{
- unsigned num_entities = amdgpu_ctx_total_num_entities();
struct amdgpu_ctx *ctx;
struct idr *idp;
- uint32_t id, i;
+ uint32_t id, i, j;
idp = &mgr->ctx_handles;
mutex_lock(&mgr->lock);
idr_for_each_entry(idp, ctx, id) {
- for (i = 0; i < num_entities; i++) {
- struct drm_sched_entity *entity;
+ for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
+ for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
+ struct drm_sched_entity *entity;
+
+ if (!ctx->entities[i][j])
+ continue;
- entity = &ctx->entities[0][i].entity;
- timeout = drm_sched_entity_flush(entity, timeout);
+ entity = &ctx->entities[i][j]->entity;
+ timeout = drm_sched_entity_flush(entity, timeout);
+ }
}
}
mutex_unlock(&mgr->lock);
@@ -591,10 +583,9 @@ long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
{
- unsigned num_entities = amdgpu_ctx_total_num_entities();
struct amdgpu_ctx *ctx;
struct idr *idp;
- uint32_t id, i;
+ uint32_t id, i, j;
idp = &mgr->ctx_handles;
@@ -604,8 +595,17 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
continue;
}
- for (i = 0; i < num_entities; i++)
- drm_sched_entity_fini(&ctx->entities[0][i].entity);
+ for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
+ for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
+ struct drm_sched_entity *entity;
+
+ if (!ctx->entities[i][j])
+ continue;
+
+ entity = &ctx->entities[i][j]->entity;
+ drm_sched_entity_fini(entity);
+ }
+ }
}
}
@@ -627,3 +627,45 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
idr_destroy(&mgr->ctx_handles);
mutex_destroy(&mgr->lock);
}
+
+void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
+{
+ int i, j;
+
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
+ adev->gfx.num_gfx_sched++;
+ }
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ adev->gfx.compute_sched[i] = &adev->gfx.compute_ring[i].sched;
+ adev->gfx.num_compute_sched++;
+ }
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
+ adev->sdma.num_sdma_sched++;
+ }
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ adev->vcn.vcn_dec_sched[adev->vcn.num_vcn_dec_sched++] =
+ &adev->vcn.inst[i].ring_dec.sched;
+ }
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ for (j = 0; j < adev->vcn.num_enc_rings; ++j)
+ adev->vcn.vcn_enc_sched[adev->vcn.num_vcn_enc_sched++] =
+ &adev->vcn.inst[i].ring_enc[j].sched;
+ }
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+ adev->jpeg.jpeg_sched[adev->jpeg.num_jpeg_sched++] =
+ &adev->jpeg.inst[i].ring_dec.sched;
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
index da808633732b..de490f183af2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -29,10 +29,12 @@ struct drm_device;
struct drm_file;
struct amdgpu_fpriv;
+#define AMDGPU_MAX_ENTITY_NUM 4
+
struct amdgpu_ctx_entity {
uint64_t sequence;
- struct dma_fence **fences;
struct drm_sched_entity entity;
+ struct dma_fence *fences[];
};
struct amdgpu_ctx {
@@ -42,8 +44,7 @@ struct amdgpu_ctx {
unsigned reset_counter_query;
uint32_t vram_lost_counter;
spinlock_t ring_lock;
- struct dma_fence **fences;
- struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM];
+ struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM][AMDGPU_MAX_ENTITY_NUM];
bool preamble_presented;
enum drm_sched_priority init_priority;
enum drm_sched_priority override_priority;
@@ -87,4 +88,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout);
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
+void amdgpu_ctx_init_sched(struct amdgpu_device *adev);
+
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 8e6726e0d035..f24ed9a1a3e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -26,6 +26,7 @@
#include <linux/kthread.h>
#include <linux/pci.h>
#include <linux/uaccess.h>
+#include <linux/pm_runtime.h>
#include <drm/drm_debugfs.h>
@@ -129,7 +130,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
sh_bank = 0xFFFFFFFF;
if (instance_bank == 0x3FF)
instance_bank = 0xFFFFFFFF;
- use_bank = 1;
+ use_bank = true;
} else if (*pos & (1ULL << 61)) {
me = (*pos & GENMASK_ULL(33, 24)) >> 24;
@@ -137,17 +138,24 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
queue = (*pos & GENMASK_ULL(53, 44)) >> 44;
vmid = (*pos & GENMASK_ULL(58, 54)) >> 54;
- use_ring = 1;
+ use_ring = true;
} else {
- use_bank = use_ring = 0;
+ use_bank = use_ring = false;
}
*pos &= (1UL << 22) - 1;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
if (use_bank) {
if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
- (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
+ (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return -EINVAL;
+ }
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, se_bank,
sh_bank, instance_bank);
@@ -193,6 +201,9 @@ end:
if (pm_pg_lock)
mutex_unlock(&adev->pm.mutex);
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
return result;
}
@@ -237,13 +248,20 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
value = RREG32_PCIE(*pos >> 2);
r = put_user(value, (uint32_t *)buf);
- if (r)
+ if (r) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return r;
+ }
result += 4;
buf += 4;
@@ -251,6 +269,9 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
size -= 4;
}
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
return result;
}
@@ -276,12 +297,19 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
r = get_user(value, (uint32_t *)buf);
- if (r)
+ if (r) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return r;
+ }
WREG32_PCIE(*pos >> 2, value);
@@ -291,6 +319,9 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
size -= 4;
}
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
return result;
}
@@ -316,13 +347,20 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
value = RREG32_DIDT(*pos >> 2);
r = put_user(value, (uint32_t *)buf);
- if (r)
+ if (r) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return r;
+ }
result += 4;
buf += 4;
@@ -330,6 +368,9 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
size -= 4;
}
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
return result;
}
@@ -355,12 +396,19 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
r = get_user(value, (uint32_t *)buf);
- if (r)
+ if (r) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return r;
+ }
WREG32_DIDT(*pos >> 2, value);
@@ -370,6 +418,9 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
size -= 4;
}
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
return result;
}
@@ -395,13 +446,20 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
value = RREG32_SMC(*pos);
r = put_user(value, (uint32_t *)buf);
- if (r)
+ if (r) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return r;
+ }
result += 4;
buf += 4;
@@ -409,6 +467,9 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
size -= 4;
}
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
return result;
}
@@ -434,12 +495,19 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
r = get_user(value, (uint32_t *)buf);
- if (r)
+ if (r) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return r;
+ }
WREG32_SMC(*pos, value);
@@ -449,6 +517,9 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
size -= 4;
}
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
return result;
}
@@ -572,7 +643,16 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
idx = *pos >> 2;
valuesize = sizeof(values);
+
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
if (r)
return r;
@@ -633,6 +713,10 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
/* switch to the specific se/sh/cu */
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, se, sh, cu);
@@ -644,6 +728,9 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
mutex_unlock(&adev->grbm_idx_mutex);
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
if (!x)
return -EINVAL;
@@ -711,6 +798,10 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
if (!data)
return -ENOMEM;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
/* switch to the specific se/sh/cu */
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, se, sh, cu);
@@ -726,6 +817,9 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
mutex_unlock(&adev->grbm_idx_mutex);
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
while (size) {
uint32_t value;
@@ -859,6 +953,10 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
struct amdgpu_device *adev = dev->dev_private;
int r = 0, i;
+ r = pm_runtime_get_sync(dev->dev);
+ if (r < 0)
+ return r;
+
/* Avoid accidently unparking the sched thread during GPU reset */
mutex_lock(&adev->lock_reset);
@@ -889,6 +987,9 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
mutex_unlock(&adev->lock_reset);
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
return 0;
}
@@ -907,8 +1008,17 @@ static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
+ int r;
+
+ r = pm_runtime_get_sync(dev->dev);
+ if (r < 0)
+ return r;
seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev));
+
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
return 0;
}
@@ -917,8 +1027,17 @@ static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
+ int r;
+
+ r = pm_runtime_get_sync(dev->dev);
+ if (r < 0)
+ return r;
seq_printf(m, "(%d)\n", ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_TT));
+
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index e37fa52f22f7..53fdafd85cb8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -66,6 +66,7 @@
#include "amdgpu_pmu.h"
#include <linux/suspend.h>
+#include <drm/task_barrier.h>
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
@@ -215,8 +216,8 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
{
uint32_t ret;
- if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
- return amdgpu_virt_kiq_rreg(adev, reg);
+ if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)))
+ return amdgpu_kiq_rreg(adev, reg);
if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
@@ -293,8 +294,8 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
adev->last_mm_index = v;
}
- if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
- return amdgpu_virt_kiq_wreg(adev, reg, v);
+ if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)))
+ return amdgpu_kiq_wreg(adev, reg, v);
if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
@@ -984,7 +985,7 @@ static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
{
struct sysinfo si;
- bool is_os_64 = (sizeof(void *) == 8) ? true : false;
+ bool is_os_64 = (sizeof(void *) == 8);
uint64_t total_memory;
uint64_t dram_size_seven_GB = 0x1B8000000;
uint64_t dram_size_three_GB = 0xB8000000;
@@ -1031,8 +1032,6 @@ def_value:
*/
static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
{
- int ret = 0;
-
if (amdgpu_sched_jobs < 4) {
dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
amdgpu_sched_jobs);
@@ -1072,7 +1071,7 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
- return ret;
+ return 0;
}
/**
@@ -1810,7 +1809,8 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
}
}
- r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
+ if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
+ r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
return r;
}
@@ -2345,14 +2345,7 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
adev->ip_blocks[i].status.hw = false;
/* handle putting the SMC in the appropriate state */
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
- if (is_support_sw_smu(adev)) {
- r = smu_set_mp1_state(&adev->smu, adev->mp1_state);
- } else if (adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->set_mp1_state) {
- r = adev->powerplay.pp_funcs->set_mp1_state(
- adev->powerplay.pp_handle,
- adev->mp1_state);
- }
+ r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
if (r) {
DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
adev->mp1_state, r);
@@ -2439,7 +2432,8 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
AMD_IP_BLOCK_TYPE_GFX,
AMD_IP_BLOCK_TYPE_SDMA,
AMD_IP_BLOCK_TYPE_UVD,
- AMD_IP_BLOCK_TYPE_VCE
+ AMD_IP_BLOCK_TYPE_VCE,
+ AMD_IP_BLOCK_TYPE_VCN
};
for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
@@ -2454,7 +2448,11 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
block->status.hw)
continue;
- r = block->version->funcs->hw_init(adev);
+ if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
+ r = block->version->funcs->resume(adev);
+ else
+ r = block->version->funcs->hw_init(adev);
+
DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
if (r)
return r;
@@ -2663,14 +2661,38 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
{
struct amdgpu_device *adev =
container_of(__work, struct amdgpu_device, xgmi_reset_work);
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
- if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)
- adev->asic_reset_res = (adev->in_baco == false) ?
- amdgpu_device_baco_enter(adev->ddev) :
- amdgpu_device_baco_exit(adev->ddev);
- else
- adev->asic_reset_res = amdgpu_asic_reset(adev);
+ /* It's a bug to not have a hive within this function */
+ if (WARN_ON(!hive))
+ return;
+
+ /*
+ * Use task barrier to synchronize all xgmi reset works across the
+ * hive. task_barrier_enter and task_barrier_exit will block
+ * until all the threads running the xgmi reset works reach
+ * those points. task_barrier_full will do both blocks.
+ */
+ if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
+
+ task_barrier_enter(&hive->tb);
+ adev->asic_reset_res = amdgpu_device_baco_enter(adev->ddev);
+
+ if (adev->asic_reset_res)
+ goto fail;
+
+ task_barrier_exit(&hive->tb);
+ adev->asic_reset_res = amdgpu_device_baco_exit(adev->ddev);
+
+ if (adev->asic_reset_res)
+ goto fail;
+ } else {
+ task_barrier_full(&hive->tb);
+ adev->asic_reset_res = amdgpu_asic_reset(adev);
+ }
+
+fail:
if (adev->asic_reset_res)
DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
adev->asic_reset_res, adev->ddev->unique);
@@ -2785,7 +2807,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->mman.buffer_funcs = NULL;
adev->mman.buffer_funcs_ring = NULL;
adev->vm_manager.vm_pte_funcs = NULL;
- adev->vm_manager.vm_pte_num_rqs = 0;
+ adev->vm_manager.vm_pte_num_scheds = 0;
adev->gmc.gmc_funcs = NULL;
adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
@@ -2826,6 +2848,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
hash_init(adev->mn_hash);
mutex_init(&adev->lock_reset);
mutex_init(&adev->psp.mutex);
+ mutex_init(&adev->notifier_lock);
r = amdgpu_device_check_arguments(adev);
if (r)
@@ -3029,6 +3052,14 @@ fence_driver_init:
goto failed;
}
+ DRM_DEBUG("SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
+ adev->gfx.config.max_shader_engines,
+ adev->gfx.config.max_sh_per_se,
+ adev->gfx.config.max_cu_per_sh,
+ adev->gfx.cu_info.number);
+
+ amdgpu_ctx_init_sched(adev);
+
adev->accel_working = true;
amdgpu_vm_check_compute_bug(adev);
@@ -3660,8 +3691,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
if (r)
return r;
- amdgpu_amdkfd_pre_reset(adev);
-
/* Resume IP prior to SMC */
r = amdgpu_device_ip_reinit_early_sriov(adev);
if (r)
@@ -3730,6 +3759,11 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_RAVEN:
+ case CHIP_ARCTURUS:
+ case CHIP_RENOIR:
+ case CHIP_NAVI10:
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
break;
default:
goto disabled;
@@ -3790,18 +3824,13 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
return r;
}
-static int amdgpu_do_asic_reset(struct amdgpu_device *adev,
- struct amdgpu_hive_info *hive,
+static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
struct list_head *device_list_handle,
bool *need_full_reset_arg)
{
struct amdgpu_device *tmp_adev = NULL;
bool need_full_reset = *need_full_reset_arg, vram_lost = false;
int r = 0;
- int cpu = smp_processor_id();
- bool use_baco =
- (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) ?
- true : false;
/*
* ASIC reset has to be done on all HGMI hive nodes ASAP
@@ -3809,62 +3838,22 @@ static int amdgpu_do_asic_reset(struct amdgpu_device *adev,
*/
if (need_full_reset) {
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
- /*
- * For XGMI run all resets in parallel to speed up the
- * process by scheduling the highpri wq on different
- * cpus. For XGMI with baco reset, all nodes must enter
- * baco within close proximity before anyone exit.
- */
+ /* For XGMI run all resets in parallel to speed up the process */
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
- if (!queue_work_on(cpu, system_highpri_wq,
- &tmp_adev->xgmi_reset_work))
+ if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
r = -EALREADY;
- cpu = cpumask_next(cpu, cpu_online_mask);
} else
r = amdgpu_asic_reset(tmp_adev);
- if (r)
- break;
- }
-
- /* For XGMI wait for all work to complete before proceed */
- if (!r) {
- list_for_each_entry(tmp_adev, device_list_handle,
- gmc.xgmi.head) {
- if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
- flush_work(&tmp_adev->xgmi_reset_work);
- r = tmp_adev->asic_reset_res;
- if (r)
- break;
- if (use_baco)
- tmp_adev->in_baco = true;
- }
- }
- }
-
- /*
- * For XGMI with baco reset, need exit baco phase by scheduling
- * xgmi_reset_work one more time. PSP reset and sGPU skips this
- * phase. Not assume the situation that PSP reset and baco reset
- * coexist within an XGMI hive.
- */
- if (!r && use_baco) {
- cpu = smp_processor_id();
- list_for_each_entry(tmp_adev, device_list_handle,
- gmc.xgmi.head) {
- if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
- if (!queue_work_on(cpu,
- system_highpri_wq,
- &tmp_adev->xgmi_reset_work))
- r = -EALREADY;
- if (r)
- break;
- cpu = cpumask_next(cpu, cpu_online_mask);
- }
+ if (r) {
+ DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s",
+ r, tmp_adev->ddev->unique);
+ break;
}
}
- if (!r && use_baco) {
+ /* For XGMI wait for all resets to complete before proceed */
+ if (!r) {
list_for_each_entry(tmp_adev, device_list_handle,
gmc.xgmi.head) {
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
@@ -3872,16 +3861,9 @@ static int amdgpu_do_asic_reset(struct amdgpu_device *adev,
r = tmp_adev->asic_reset_res;
if (r)
break;
- tmp_adev->in_baco = false;
}
}
}
-
- if (r) {
- DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s",
- r, tmp_adev->ddev->unique);
- goto end;
- }
}
if (!r && amdgpu_ras_intr_triggered())
@@ -3974,7 +3956,7 @@ static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock)
mutex_lock(&adev->lock_reset);
atomic_inc(&adev->gpu_reset_counter);
- adev->in_gpu_reset = 1;
+ adev->in_gpu_reset = true;
switch (amdgpu_asic_reset_method(adev)) {
case AMD_RESET_METHOD_MODE1:
adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
@@ -3994,7 +3976,7 @@ static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
{
amdgpu_vf_error_trans_all(adev);
adev->mp1_state = PP_MP1_STATE_NONE;
- adev->in_gpu_reset = 0;
+ adev->in_gpu_reset = false;
mutex_unlock(&adev->lock_reset);
}
@@ -4175,8 +4157,7 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
if (r)
adev->asic_reset_res = r;
} else {
- r = amdgpu_do_asic_reset(adev, hive, device_list_handle,
- &need_full_reset);
+ r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset);
if (r && r == -EAGAIN)
goto retry;
}
@@ -4377,55 +4358,21 @@ int amdgpu_device_baco_enter(struct drm_device *dev)
if (ras && ras->supported)
adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
- if (is_support_sw_smu(adev)) {
- struct smu_context *smu = &adev->smu;
- int ret;
-
- ret = smu_baco_enter(smu);
- if (ret)
- return ret;
- } else {
- void *pp_handle = adev->powerplay.pp_handle;
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-
- if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
- return -ENOENT;
-
- /* enter BACO state */
- if (pp_funcs->set_asic_baco_state(pp_handle, 1))
- return -EIO;
- }
-
- return 0;
+ return amdgpu_dpm_baco_enter(adev);
}
int amdgpu_device_baco_exit(struct drm_device *dev)
{
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ int ret = 0;
if (!amdgpu_device_supports_baco(adev->ddev))
return -ENOTSUPP;
- if (is_support_sw_smu(adev)) {
- struct smu_context *smu = &adev->smu;
- int ret;
-
- ret = smu_baco_exit(smu);
- if (ret)
- return ret;
-
- } else {
- void *pp_handle = adev->powerplay.pp_handle;
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-
- if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
- return -ENOENT;
-
- /* exit BACO state */
- if (pp_funcs->set_asic_baco_state(pp_handle, 0))
- return -EIO;
- }
+ ret = amdgpu_dpm_baco_exit(adev);
+ if (ret)
+ return ret;
if (ras && ras->supported)
adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
new file mode 100644
index 000000000000..057f6ea645d7
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_DF_H__
+#define __AMDGPU_DF_H__
+
+struct amdgpu_df_hash_status {
+ bool hash_64k;
+ bool hash_2m;
+ bool hash_1g;
+};
+
+struct amdgpu_df_funcs {
+ void (*sw_init)(struct amdgpu_device *adev);
+ void (*sw_fini)(struct amdgpu_device *adev);
+ void (*enable_broadcast_mode)(struct amdgpu_device *adev,
+ bool enable);
+ u32 (*get_fb_channel_number)(struct amdgpu_device *adev);
+ u32 (*get_hbm_channel_number)(struct amdgpu_device *adev);
+ void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
+ bool enable);
+ void (*get_clockgating_state)(struct amdgpu_device *adev,
+ u32 *flags);
+ void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev,
+ bool enable);
+ int (*pmc_start)(struct amdgpu_device *adev, uint64_t config,
+ int is_enable);
+ int (*pmc_stop)(struct amdgpu_device *adev, uint64_t config,
+ int is_disable);
+ void (*pmc_get_count)(struct amdgpu_device *adev, uint64_t config,
+ uint64_t *count);
+ uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val);
+ void (*set_fica)(struct amdgpu_device *adev, uint32_t ficaa_val,
+ uint32_t ficadl_val, uint32_t ficadh_val);
+ uint64_t (*get_dram_base_addr)(struct amdgpu_device *adev,
+ uint32_t df_inst);
+ uint32_t (*get_df_inst_id)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_df {
+ struct amdgpu_df_hash_status hash_status;
+ const struct amdgpu_df_funcs *funcs;
+};
+
+#endif /* __AMDGPU_DF_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index ad9c9546a64f..84cee27cd7ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -513,13 +513,23 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
* will not allow USWC mappings.
* Also, don't allow GTT domain if the BO doens't have USWC falg set.
*/
- if (adev->asic_type >= CHIP_CARRIZO &&
- adev->asic_type < CHIP_RAVEN &&
- (adev->flags & AMD_IS_APU) &&
- (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
+ if ((bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
amdgpu_bo_support_uswc(bo_flags) &&
- amdgpu_device_asic_has_dc_support(adev->asic_type))
- domain |= AMDGPU_GEM_DOMAIN_GTT;
+ amdgpu_device_asic_has_dc_support(adev->asic_type)) {
+ switch (adev->asic_type) {
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ domain |= AMDGPU_GEM_DOMAIN_GTT;
+ break;
+ case CHIP_RAVEN:
+ /* enable S/G on PCO and RV2 */
+ if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
+ domain |= AMDGPU_GEM_DOMAIN_GTT;
+ break;
+ default:
+ break;
+ }
+ }
#endif
return domain;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index 9cc270efee7c..a2e8c3dfb4f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -946,20 +946,63 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
bool swsmu = is_support_sw_smu(adev);
switch (block_type) {
- case AMD_IP_BLOCK_TYPE_GFX:
case AMD_IP_BLOCK_TYPE_UVD:
- case AMD_IP_BLOCK_TYPE_VCN:
case AMD_IP_BLOCK_TYPE_VCE:
+ if (swsmu) {
+ ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
+ } else if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->set_powergating_by_smu) {
+ /*
+ * TODO: need a better lock mechanism
+ *
+ * Here adev->pm.mutex lock protection is enforced on
+ * UVD and VCE cases only. Since for other cases, there
+ * may be already lock protection in amdgpu_pm.c.
+ * This is a quick fix for the deadlock issue below.
+ * NFO: task ocltst:2028 blocked for more than 120 seconds.
+ * Tainted: G OE 5.0.0-37-generic #40~18.04.1-Ubuntu
+ * echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+ * cltst D 0 2028 2026 0x00000000
+ * all Trace:
+ * __schedule+0x2c0/0x870
+ * schedule+0x2c/0x70
+ * schedule_preempt_disabled+0xe/0x10
+ * __mutex_lock.isra.9+0x26d/0x4e0
+ * __mutex_lock_slowpath+0x13/0x20
+ * ? __mutex_lock_slowpath+0x13/0x20
+ * mutex_lock+0x2f/0x40
+ * amdgpu_dpm_set_powergating_by_smu+0x64/0xe0 [amdgpu]
+ * gfx_v8_0_enable_gfx_static_mg_power_gating+0x3c/0x70 [amdgpu]
+ * gfx_v8_0_set_powergating_state+0x66/0x260 [amdgpu]
+ * amdgpu_device_ip_set_powergating_state+0x62/0xb0 [amdgpu]
+ * pp_dpm_force_performance_level+0xe7/0x100 [amdgpu]
+ * amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu]
+ */
+ mutex_lock(&adev->pm.mutex);
+ ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
+ (adev)->powerplay.pp_handle, block_type, gate));
+ mutex_unlock(&adev->pm.mutex);
+ }
+ break;
+ case AMD_IP_BLOCK_TYPE_GFX:
+ case AMD_IP_BLOCK_TYPE_VCN:
case AMD_IP_BLOCK_TYPE_SDMA:
if (swsmu)
ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
- else
+ else if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->set_powergating_by_smu)
ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
(adev)->powerplay.pp_handle, block_type, gate));
break;
+ case AMD_IP_BLOCK_TYPE_JPEG:
+ if (swsmu)
+ ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
+ break;
case AMD_IP_BLOCK_TYPE_GMC:
case AMD_IP_BLOCK_TYPE_ACP:
- ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
+ if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->set_powergating_by_smu)
+ ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
(adev)->powerplay.pp_handle, block_type, gate));
break;
default:
@@ -968,3 +1011,163 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
return ret;
}
+
+int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ void *pp_handle = adev->powerplay.pp_handle;
+ struct smu_context *smu = &adev->smu;
+ int ret = 0;
+
+ if (is_support_sw_smu(adev)) {
+ ret = smu_baco_enter(smu);
+ } else {
+ if (!pp_funcs || !pp_funcs->set_asic_baco_state)
+ return -ENOENT;
+
+ /* enter BACO state */
+ ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
+ }
+
+ return ret;
+}
+
+int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ void *pp_handle = adev->powerplay.pp_handle;
+ struct smu_context *smu = &adev->smu;
+ int ret = 0;
+
+ if (is_support_sw_smu(adev)) {
+ ret = smu_baco_exit(smu);
+ } else {
+ if (!pp_funcs || !pp_funcs->set_asic_baco_state)
+ return -ENOENT;
+
+ /* exit BACO state */
+ ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
+ }
+
+ return ret;
+}
+
+int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
+ enum pp_mp1_state mp1_state)
+{
+ int ret = 0;
+
+ if (is_support_sw_smu(adev)) {
+ ret = smu_set_mp1_state(&adev->smu, mp1_state);
+ } else if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->set_mp1_state) {
+ ret = adev->powerplay.pp_funcs->set_mp1_state(
+ adev->powerplay.pp_handle,
+ mp1_state);
+ }
+
+ return ret;
+}
+
+bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ void *pp_handle = adev->powerplay.pp_handle;
+ struct smu_context *smu = &adev->smu;
+ bool baco_cap;
+
+ if (is_support_sw_smu(adev)) {
+ return smu_baco_is_support(smu);
+ } else {
+ if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
+ return false;
+
+ if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap))
+ return false;
+
+ return baco_cap ? true : false;
+ }
+}
+
+int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ void *pp_handle = adev->powerplay.pp_handle;
+ struct smu_context *smu = &adev->smu;
+
+ if (is_support_sw_smu(adev)) {
+ return smu_mode2_reset(smu);
+ } else {
+ if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
+ return -ENOENT;
+
+ return pp_funcs->asic_reset_mode_2(pp_handle);
+ }
+}
+
+int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ void *pp_handle = adev->powerplay.pp_handle;
+ struct smu_context *smu = &adev->smu;
+ int ret = 0;
+
+ dev_info(adev->dev, "GPU BACO reset\n");
+
+ if (is_support_sw_smu(adev)) {
+ ret = smu_baco_enter(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_baco_exit(smu);
+ if (ret)
+ return ret;
+ } else {
+ if (!pp_funcs
+ || !pp_funcs->set_asic_baco_state)
+ return -ENOENT;
+
+ /* enter BACO state */
+ ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
+ if (ret)
+ return ret;
+
+ /* exit BACO state */
+ ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
+ enum PP_SMC_POWER_PROFILE type,
+ bool en)
+{
+ int ret = 0;
+
+ if (is_support_sw_smu(adev))
+ ret = smu_switch_power_profile(&adev->smu, type, en);
+ else if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->switch_power_profile)
+ ret = adev->powerplay.pp_funcs->switch_power_profile(
+ adev->powerplay.pp_handle, type, en);
+
+ return ret;
+}
+
+int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
+ uint32_t pstate)
+{
+ int ret = 0;
+
+ if (is_support_sw_smu_xgmi(adev))
+ ret = smu_set_xgmi_pstate(&adev->smu, pstate);
+ else if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->set_xgmi_pstate)
+ ret = adev->powerplay.pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
+ pstate);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 2cfb677272af..902ca6c00cca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -341,10 +341,6 @@ enum amdgpu_pcie_gen {
((adev)->powerplay.pp_funcs->reset_power_profile_state(\
(adev)->powerplay.pp_handle, request))
-#define amdgpu_dpm_switch_power_profile(adev, type, en) \
- ((adev)->powerplay.pp_funcs->switch_power_profile(\
- (adev)->powerplay.pp_handle, type, en))
-
#define amdgpu_dpm_set_clockgating_by_smu(adev, msg_id) \
((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\
(adev)->powerplay.pp_handle, msg_id))
@@ -517,4 +513,24 @@ extern int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low);
extern int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low);
+int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
+ uint32_t pstate);
+
+int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
+ enum PP_SMC_POWER_PROFILE type,
+ bool en);
+
+int amdgpu_dpm_baco_reset(struct amdgpu_device *adev);
+
+int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev);
+
+bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev);
+
+int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
+ enum pp_mp1_state mp1_state);
+
+int amdgpu_dpm_baco_exit(struct amdgpu_device *adev);
+
+int amdgpu_dpm_baco_enter(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 9baa1ddf8693..4598836c5fa4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -142,7 +142,7 @@ int amdgpu_async_gfx_ring = 1;
int amdgpu_mcbp = 0;
int amdgpu_discovery = -1;
int amdgpu_mes = 0;
-int amdgpu_noretry = 1;
+int amdgpu_noretry;
int amdgpu_force_asic_type = -1;
struct amdgpu_mgpu_info mgpu_info = {
@@ -588,7 +588,7 @@ MODULE_PARM_DESC(mes,
module_param_named(mes, amdgpu_mes, int, 0444);
MODULE_PARM_DESC(noretry,
- "Disable retry faults (0 = retry enabled, 1 = retry disabled (default))");
+ "Disable retry faults (0 = retry enabled (default), 1 = retry disabled)");
module_param_named(noretry, amdgpu_noretry, int, 0644);
/**
@@ -1004,7 +1004,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
/* Renoir */
- {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
/* Navi12 */
{0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
@@ -1203,13 +1203,23 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_dev->dev_private;
- int ret;
+ int ret, i;
if (!adev->runpm) {
pm_runtime_forbid(dev);
return -EBUSY;
}
+ /* wait for all rings to drain before suspending */
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+ if (ring && ring->sched.ready) {
+ ret = amdgpu_fence_wait_empty(ring);
+ if (ret)
+ return -EBUSY;
+ }
+ }
+
if (amdgpu_device_supports_boco(drm_dev))
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
drm_kms_helper_poll_disable(drm_dev);
@@ -1371,7 +1381,8 @@ static struct drm_driver kms_driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_ATOMIC |
DRIVER_GEM |
- DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ,
+ DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ |
+ DRIVER_SYNCOBJ_TIMELINE,
.load = amdgpu_driver_load_kms,
.open = amdgpu_driver_open_kms,
.postclose = amdgpu_driver_postclose_kms,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 377fe20bce23..3c01252b1e0e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -34,6 +34,7 @@
#include <linux/kref.h>
#include <linux/slab.h>
#include <linux/firmware.h>
+#include <linux/pm_runtime.h>
#include <drm/drm_debugfs.h>
@@ -154,7 +155,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
seq);
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, flags | AMDGPU_FENCE_FLAG_INT);
-
+ pm_runtime_get_noresume(adev->ddev->dev);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
if (unlikely(rcu_dereference_protected(*ptr, 1))) {
struct dma_fence *old;
@@ -234,6 +235,7 @@ static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
bool amdgpu_fence_process(struct amdgpu_ring *ring)
{
struct amdgpu_fence_driver *drv = &ring->fence_drv;
+ struct amdgpu_device *adev = ring->adev;
uint32_t seq, last_seq;
int r;
@@ -274,6 +276,8 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
BUG();
dma_fence_put(fence);
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
} while (last_seq != seq);
return true;
@@ -737,10 +741,18 @@ static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
+ int r;
+
+ r = pm_runtime_get_sync(dev->dev);
+ if (r < 0)
+ return 0;
seq_printf(m, "gpu recover\n");
amdgpu_device_gpu_recover(adev, NULL);
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index e00b46180d2e..0f960b498792 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -296,7 +296,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
spin_lock_init(&kiq->ring_lock);
- r = amdgpu_device_wb_get(adev, &adev->virt.reg_val_offs);
+ r = amdgpu_device_wb_get(adev, &kiq->reg_val_offs);
if (r)
return r;
@@ -321,7 +321,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
{
- amdgpu_device_wb_free(ring->adev, ring->adev->virt.reg_val_offs);
+ amdgpu_device_wb_free(ring->adev, ring->adev->gfx.kiq.reg_val_offs);
amdgpu_ring_fini(ring);
}
@@ -543,12 +543,6 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return;
- if (!is_support_sw_smu(adev) &&
- (!adev->powerplay.pp_funcs ||
- !adev->powerplay.pp_funcs->set_powergating_by_smu))
- return;
-
-
mutex_lock(&adev->gfx.gfx_off_mutex);
if (!enable)
@@ -641,7 +635,7 @@ int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
if (adev->gfx.funcs->query_ras_error_count)
adev->gfx.funcs->query_ras_error_count(adev, err_data);
- amdgpu_ras_reset_gpu(adev, 0);
+ amdgpu_ras_reset_gpu(adev);
}
return AMDGPU_RAS_SUCCESS;
}
@@ -664,3 +658,95 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
amdgpu_ras_interrupt_dispatch(adev, &ih_data);
return 0;
}
+
+uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
+{
+ signed long r, cnt = 0;
+ unsigned long flags;
+ uint32_t seq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_ring *ring = &kiq->ring;
+
+ BUG_ON(!ring->funcs->emit_rreg);
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+ amdgpu_ring_alloc(ring, 32);
+ amdgpu_ring_emit_rreg(ring, reg);
+ amdgpu_fence_emit_polling(ring, &seq);
+ amdgpu_ring_commit(ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+ /* don't wait anymore for gpu reset case because this way may
+ * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
+ * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
+ * never return if we keep waiting in virt_kiq_rreg, which cause
+ * gpu_recover() hang there.
+ *
+ * also don't wait anymore for IRQ context
+ * */
+ if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
+ goto failed_kiq_read;
+
+ might_sleep();
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+ }
+
+ if (cnt > MAX_KIQ_REG_TRY)
+ goto failed_kiq_read;
+
+ return adev->wb.wb[kiq->reg_val_offs];
+
+failed_kiq_read:
+ pr_err("failed to read reg:%x\n", reg);
+ return ~0;
+}
+
+void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
+{
+ signed long r, cnt = 0;
+ unsigned long flags;
+ uint32_t seq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_ring *ring = &kiq->ring;
+
+ BUG_ON(!ring->funcs->emit_wreg);
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+ amdgpu_ring_alloc(ring, 32);
+ amdgpu_ring_emit_wreg(ring, reg, v);
+ amdgpu_fence_emit_polling(ring, &seq);
+ amdgpu_ring_commit(ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+ /* don't wait anymore for gpu reset case because this way may
+ * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
+ * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
+ * never return if we keep waiting in virt_kiq_rreg, which cause
+ * gpu_recover() hang there.
+ *
+ * also don't wait anymore for IRQ context
+ * */
+ if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
+ goto failed_kiq_write;
+
+ might_sleep();
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+ }
+
+ if (cnt > MAX_KIQ_REG_TRY)
+ goto failed_kiq_write;
+
+ return;
+
+failed_kiq_write:
+ pr_err("failed to write reg:%x\n", reg);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 0ae0a2715b0d..ca17ffb01301 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -76,11 +76,15 @@ struct kiq_pm4_funcs {
struct amdgpu_ring *ring,
u64 addr,
u64 seq);
+ void (*kiq_invalidate_tlbs)(struct amdgpu_ring *kiq_ring,
+ uint16_t pasid, uint32_t flush_type,
+ bool all_hub);
/* Packet sizes */
int set_resources_size;
int map_queues_size;
int unmap_queues_size;
int query_status_size;
+ int invalidate_tlbs_size;
};
struct amdgpu_kiq {
@@ -90,6 +94,7 @@ struct amdgpu_kiq {
struct amdgpu_ring ring;
struct amdgpu_irq_src irq;
const struct kiq_pm4_funcs *pmf;
+ uint32_t reg_val_offs;
};
/*
@@ -269,8 +274,12 @@ struct amdgpu_gfx {
bool me_fw_write_wait;
bool cp_fw_write_wait;
struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
+ struct drm_gpu_scheduler *gfx_sched[AMDGPU_MAX_GFX_RINGS];
+ uint32_t num_gfx_sched;
unsigned num_gfx_rings;
struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
+ struct drm_gpu_scheduler *compute_sched[AMDGPU_MAX_COMPUTE_RINGS];
+ uint32_t num_compute_sched;
unsigned num_compute_rings;
struct amdgpu_irq_src eop_irq;
struct amdgpu_irq_src priv_reg_irq;
@@ -367,4 +376,6 @@ int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
+uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
+void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index a12f33c0f5df..5884ab590486 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -223,7 +223,7 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
u64 size_af, size_bf;
if (amdgpu_sriov_vf(adev)) {
- mc->agp_start = 0xffffffff;
+ mc->agp_start = 0xffffffffffff;
mc->agp_end = 0x0;
mc->agp_size = 0;
@@ -333,3 +333,43 @@ void amdgpu_gmc_ras_fini(struct amdgpu_device *adev)
amdgpu_mmhub_ras_fini(adev);
amdgpu_xgmi_ras_fini(adev);
}
+
+ /*
+ * The latest engine allocation on gfx9/10 is:
+ * Engine 2, 3: firmware
+ * Engine 0, 1, 4~16: amdgpu ring,
+ * subject to change when ring number changes
+ * Engine 17: Gart flushes
+ */
+#define GFXHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
+#define MMHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
+
+int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
+ {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP,
+ GFXHUB_FREE_VM_INV_ENGS_BITMAP};
+ unsigned i;
+ unsigned vmhub, inv_eng;
+
+ for (i = 0; i < adev->num_rings; ++i) {
+ ring = adev->rings[i];
+ vmhub = ring->funcs->vmhub;
+
+ inv_eng = ffs(vm_inv_engs[vmhub]);
+ if (!inv_eng) {
+ dev_err(adev->dev, "no VM inv eng for ring %s\n",
+ ring->name);
+ return -EINVAL;
+ }
+
+ ring->vm_inv_eng = inv_eng - 1;
+ vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
+
+ dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
+ ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index b499a3de8bb6..d3c27a3c43f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -92,6 +92,9 @@ struct amdgpu_gmc_funcs {
/* flush the vm tlb via mmio */
void (*flush_gpu_tlb)(struct amdgpu_device *adev, uint32_t vmid,
uint32_t vmhub, uint32_t flush_type);
+ /* flush the vm tlb via pasid */
+ int (*flush_gpu_tlb_pasid)(struct amdgpu_device *adev, uint16_t pasid,
+ uint32_t flush_type, bool all_hub);
/* flush the vm tlb via ring */
uint64_t (*emit_flush_gpu_tlb)(struct amdgpu_ring *ring, unsigned vmid,
uint64_t pd_addr);
@@ -216,6 +219,9 @@ struct amdgpu_gmc {
};
#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type)))
+#define amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, type, allhub) \
+ ((adev)->gmc.gmc_funcs->flush_gpu_tlb_pasid \
+ ((adev), (pasid), (type), (allhub)))
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
#define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags))
@@ -267,5 +273,6 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
uint16_t pasid, uint64_t timestamp);
int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev);
void amdgpu_gmc_ras_fini(struct amdgpu_device *adev);
+int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 73328d0c741d..d42be880a236 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -153,7 +153,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
if (r)
return r;
- job->owner = owner;
*f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
priority = job->base.s_priority;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index dc7ee9358dcd..3f7b8433d179 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -49,7 +49,6 @@ struct amdgpu_job {
uint32_t preamble_status;
uint32_t preemption_status;
uint32_t num_ibs;
- void *owner;
bool vm_needs_flush;
uint64_t vm_pd_addr;
unsigned vmid;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
index 5131a0a1bc8a..bd9ef9cc86de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
@@ -43,6 +43,8 @@ struct amdgpu_jpeg {
uint8_t num_jpeg_inst;
struct amdgpu_jpeg_inst inst[AMDGPU_MAX_JPEG_INSTANCES];
struct amdgpu_jpeg_reg internal;
+ struct drm_gpu_scheduler *jpeg_sched[AMDGPU_MAX_JPEG_INSTANCES];
+ uint32_t num_jpeg_sched;
unsigned harvest_config;
struct delayed_work idle_work;
enum amd_powergating_state cur_state;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index b32adda70bbc..b03b1eb7ba04 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -37,6 +37,7 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/nospec.h>
+#include <linux/pm_runtime.h>
#include "hwmgr.h"
#define WIDTH_4K 3840
@@ -158,10 +159,15 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
enum amd_pm_state_type pm;
+ int ret;
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev)) {
if (adev->smu.ppt_funcs->get_current_power_state)
pm = smu_get_current_power_state(&adev->smu);
@@ -173,6 +179,9 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
pm = adev->pm.dpm.user_state;
}
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
return snprintf(buf, PAGE_SIZE, "%s\n",
(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
@@ -186,6 +195,7 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
enum amd_pm_state_type state;
+ int ret;
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return -EINVAL;
@@ -196,10 +206,12 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
state = POWER_STATE_TYPE_BALANCED;
else if (strncmp("performance", buf, strlen("performance")) == 0)
state = POWER_STATE_TYPE_PERFORMANCE;
- else {
- count = -EINVAL;
- goto fail;
- }
+ else
+ return -EINVAL;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev)) {
mutex_lock(&adev->pm.mutex);
@@ -212,12 +224,11 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
adev->pm.dpm.user_state = state;
mutex_unlock(&adev->pm.mutex);
- /* Can't set dpm state when the card is off */
- if (!(adev->flags & AMD_IS_PX) ||
- (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
- amdgpu_pm_compute_clocks(adev);
+ amdgpu_pm_compute_clocks(adev);
}
-fail:
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
return count;
}
@@ -288,13 +299,14 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
enum amd_dpm_forced_level level = 0xff;
+ int ret;
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
- if ((adev->flags & AMD_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return snprintf(buf, PAGE_SIZE, "off\n");
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev))
level = smu_get_performance_level(&adev->smu);
@@ -303,6 +315,9 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
else
level = adev->pm.dpm.forced_level;
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
return snprintf(buf, PAGE_SIZE, "%s\n",
(level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
(level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
@@ -329,11 +344,6 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return -EINVAL;
- /* Can't force performance level when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
-
if (strncmp("low", buf, strlen("low")) == 0) {
level = AMD_DPM_FORCED_LEVEL_LOW;
} else if (strncmp("high", buf, strlen("high")) == 0) {
@@ -353,17 +363,23 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
} else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
} else {
- count = -EINVAL;
- goto fail;
+ return -EINVAL;
}
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev))
current_level = smu_get_performance_level(&adev->smu);
else if (adev->powerplay.pp_funcs->get_performance_level)
current_level = amdgpu_dpm_get_performance_level(adev);
- if (current_level == level)
+ if (current_level == level) {
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return count;
+ }
/* profile_exit setting is valid only when current mode is in profile mode */
if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
@@ -372,29 +388,40 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) &&
(level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) {
pr_err("Currently not in any profile mode!\n");
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return -EINVAL;
}
if (is_support_sw_smu(adev)) {
ret = smu_force_performance_level(&adev->smu, level);
- if (ret)
- count = -EINVAL;
+ if (ret) {
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+ return -EINVAL;
+ }
} else if (adev->powerplay.pp_funcs->force_performance_level) {
mutex_lock(&adev->pm.mutex);
if (adev->pm.dpm.thermal_active) {
- count = -EINVAL;
mutex_unlock(&adev->pm.mutex);
- goto fail;
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+ return -EINVAL;
}
ret = amdgpu_dpm_force_performance_level(adev, level);
- if (ret)
- count = -EINVAL;
- else
+ if (ret) {
+ mutex_unlock(&adev->pm.mutex);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+ return -EINVAL;
+ } else {
adev->pm.dpm.forced_level = level;
+ }
mutex_unlock(&adev->pm.mutex);
}
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
-fail:
return count;
}
@@ -407,6 +434,10 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
struct pp_states_info data;
int i, buf_len, ret;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev)) {
ret = smu_get_power_num_states(&adev->smu, &data);
if (ret)
@@ -414,6 +445,9 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
} else if (adev->powerplay.pp_funcs->get_pp_num_states)
amdgpu_dpm_get_pp_num_states(adev, &data);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
for (i = 0; i < data.nums; i++)
buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i,
@@ -439,6 +473,10 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev)) {
pm = smu_get_current_power_state(smu);
ret = smu_get_power_num_states(smu, &data);
@@ -450,6 +488,9 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
amdgpu_dpm_get_pp_num_states(adev, &data);
}
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
for (i = 0; i < data.nums; i++) {
if (pm == data.states[i])
break;
@@ -500,14 +541,18 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
struct pp_states_info data;
ret = kstrtoul(buf, 0, &idx);
- if (ret || idx >= ARRAY_SIZE(data.states)) {
- count = -EINVAL;
- goto fail;
- }
+ if (ret || idx >= ARRAY_SIZE(data.states))
+ return -EINVAL;
+
idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
amdgpu_dpm_get_pp_num_states(adev, &data);
state = data.states[idx];
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
/* only set user selected power states */
if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
state != POWER_STATE_TYPE_DEFAULT) {
@@ -515,8 +560,10 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
AMD_PP_TASK_ENABLE_USER_STATE, &state);
adev->pp_force_state_enabled = true;
}
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
}
-fail:
+
return count;
}
@@ -538,20 +585,32 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
char *table = NULL;
- int size;
+ int size, ret;
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev)) {
size = smu_sys_get_pp_table(&adev->smu, (void **)&table);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
if (size < 0)
return size;
- }
- else if (adev->powerplay.pp_funcs->get_pp_table)
+ } else if (adev->powerplay.pp_funcs->get_pp_table) {
size = amdgpu_dpm_get_pp_table(adev, &table);
- else
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+ if (size < 0)
+ return size;
+ } else {
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return 0;
+ }
if (size >= PAGE_SIZE)
size = PAGE_SIZE - 1;
@@ -573,13 +632,23 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return -EINVAL;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev)) {
ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count);
- if (ret)
+ if (ret) {
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return ret;
+ }
} else if (adev->powerplay.pp_funcs->set_pp_table)
amdgpu_dpm_set_pp_table(adev, buf, count);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
return count;
}
@@ -703,18 +772,28 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
tmp_str++;
}
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev)) {
ret = smu_od_edit_dpm_table(&adev->smu, type,
parameter, parameter_size);
- if (ret)
+ if (ret) {
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return -EINVAL;
+ }
} else {
if (adev->powerplay.pp_funcs->odn_edit_dpm_table) {
ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
parameter, parameter_size);
- if (ret)
+ if (ret) {
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return -EINVAL;
+ }
}
if (type == PP_OD_COMMIT_DPM_TABLE) {
@@ -722,12 +801,18 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
amdgpu_dpm_dispatch_task(adev,
AMD_PP_TASK_READJUST_POWER_STATE,
NULL);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return count;
} else {
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return -EINVAL;
}
}
}
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return count;
}
@@ -738,27 +823,33 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- uint32_t size = 0;
+ ssize_t size;
+ int ret;
if (amdgpu_sriov_vf(adev))
return 0;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev)) {
size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf);
size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size);
size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size);
size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size);
- return size;
} else if (adev->powerplay.pp_funcs->print_clock_levels) {
size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
- return size;
} else {
- return snprintf(buf, PAGE_SIZE, "\n");
+ size = snprintf(buf, PAGE_SIZE, "\n");
}
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+ return size;
}
/**
@@ -796,15 +887,27 @@ static ssize_t amdgpu_set_pp_feature_status(struct device *dev,
pr_debug("featuremask = 0x%llx\n", featuremask);
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev)) {
ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask);
- if (ret)
+ if (ret) {
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return -EINVAL;
+ }
} else if (adev->powerplay.pp_funcs->set_ppfeature_status) {
ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
- if (ret)
+ if (ret) {
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return -EINVAL;
+ }
}
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
return count;
}
@@ -815,16 +918,27 @@ static ssize_t amdgpu_get_pp_feature_status(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ ssize_t size;
+ int ret;
if (amdgpu_sriov_vf(adev))
return 0;
- if (is_support_sw_smu(adev)) {
- return smu_sys_get_pp_feature_mask(&adev->smu, buf);
- } else if (adev->powerplay.pp_funcs->get_ppfeature_status)
- return amdgpu_dpm_get_ppfeature_status(adev, buf);
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
+ if (is_support_sw_smu(adev))
+ size = smu_sys_get_pp_feature_mask(&adev->smu, buf);
+ else if (adev->powerplay.pp_funcs->get_ppfeature_status)
+ size = amdgpu_dpm_get_ppfeature_status(adev, buf);
+ else
+ size = snprintf(buf, PAGE_SIZE, "\n");
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
- return snprintf(buf, PAGE_SIZE, "\n");
+ return size;
}
/**
@@ -863,16 +977,27 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ ssize_t size;
+ int ret;
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev))
- return smu_print_clk_levels(&adev->smu, SMU_SCLK, buf);
+ size = smu_print_clk_levels(&adev->smu, SMU_SCLK, buf);
else if (adev->powerplay.pp_funcs->print_clock_levels)
- return amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
+ size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
else
- return snprintf(buf, PAGE_SIZE, "\n");
+ size = snprintf(buf, PAGE_SIZE, "\n");
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return size;
}
/*
@@ -928,11 +1053,18 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
if (ret)
return ret;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev))
ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
if (ret)
return -EINVAL;
@@ -945,16 +1077,27 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ ssize_t size;
+ int ret;
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev))
- return smu_print_clk_levels(&adev->smu, SMU_MCLK, buf);
+ size = smu_print_clk_levels(&adev->smu, SMU_MCLK, buf);
else if (adev->powerplay.pp_funcs->print_clock_levels)
- return amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
+ size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
else
- return snprintf(buf, PAGE_SIZE, "\n");
+ size = snprintf(buf, PAGE_SIZE, "\n");
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return size;
}
static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
@@ -964,8 +1107,8 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- int ret;
uint32_t mask = 0;
+ int ret;
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return -EINVAL;
@@ -974,11 +1117,18 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
if (ret)
return ret;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev))
ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
if (ret)
return -EINVAL;
@@ -991,16 +1141,27 @@ static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ ssize_t size;
+ int ret;
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev))
- return smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf);
+ size = smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf);
else if (adev->powerplay.pp_funcs->print_clock_levels)
- return amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf);
+ size = amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf);
else
- return snprintf(buf, PAGE_SIZE, "\n");
+ size = snprintf(buf, PAGE_SIZE, "\n");
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return size;
}
static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
@@ -1020,10 +1181,19 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
if (ret)
return ret;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev))
ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
+ else
+ ret = 0;
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
if (ret)
return -EINVAL;
@@ -1037,16 +1207,27 @@ static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ ssize_t size;
+ int ret;
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev))
- return smu_print_clk_levels(&adev->smu, SMU_FCLK, buf);
+ size = smu_print_clk_levels(&adev->smu, SMU_FCLK, buf);
else if (adev->powerplay.pp_funcs->print_clock_levels)
- return amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf);
+ size = amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf);
else
- return snprintf(buf, PAGE_SIZE, "\n");
+ size = snprintf(buf, PAGE_SIZE, "\n");
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return size;
}
static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
@@ -1066,10 +1247,19 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
if (ret)
return ret;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev))
ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
+ else
+ ret = 0;
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
if (ret)
return -EINVAL;
@@ -1083,16 +1273,27 @@ static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ ssize_t size;
+ int ret;
if (amdgpu_sriov_vf(adev))
return 0;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev))
- return smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf);
+ size = smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf);
else if (adev->powerplay.pp_funcs->print_clock_levels)
- return amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf);
+ size = amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf);
else
- return snprintf(buf, PAGE_SIZE, "\n");
+ size = snprintf(buf, PAGE_SIZE, "\n");
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return size;
}
static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
@@ -1112,10 +1313,19 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
if (ret)
return ret;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev))
ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
+ else
+ ret = 0;
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
if (ret)
return -EINVAL;
@@ -1129,16 +1339,27 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ ssize_t size;
+ int ret;
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev))
- return smu_print_clk_levels(&adev->smu, SMU_PCIE, buf);
+ size = smu_print_clk_levels(&adev->smu, SMU_PCIE, buf);
else if (adev->powerplay.pp_funcs->print_clock_levels)
- return amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
+ size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
else
- return snprintf(buf, PAGE_SIZE, "\n");
+ size = snprintf(buf, PAGE_SIZE, "\n");
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return size;
}
static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
@@ -1158,10 +1379,19 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
if (ret)
return ret;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev))
ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
+ else
+ ret = 0;
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
if (ret)
return -EINVAL;
@@ -1176,15 +1406,23 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
uint32_t value = 0;
+ int ret;
if (amdgpu_sriov_vf(adev))
return 0;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev))
value = smu_get_od_percentage(&(adev->smu), SMU_OD_SCLK);
else if (adev->powerplay.pp_funcs->get_sclk_od)
value = amdgpu_dpm_get_sclk_od(adev);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
return snprintf(buf, PAGE_SIZE, "%d\n", value);
}
@@ -1203,10 +1441,12 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
ret = kstrtol(buf, 0, &value);
- if (ret) {
- count = -EINVAL;
- goto fail;
- }
+ if (ret)
+ return -EINVAL;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev)) {
value = smu_set_od_percentage(&(adev->smu), SMU_OD_SCLK, (uint32_t)value);
@@ -1222,7 +1462,9 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
}
}
-fail:
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
return count;
}
@@ -1233,15 +1475,23 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
uint32_t value = 0;
+ int ret;
if (amdgpu_sriov_vf(adev))
return 0;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev))
value = smu_get_od_percentage(&(adev->smu), SMU_OD_MCLK);
else if (adev->powerplay.pp_funcs->get_mclk_od)
value = amdgpu_dpm_get_mclk_od(adev);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
return snprintf(buf, PAGE_SIZE, "%d\n", value);
}
@@ -1260,10 +1510,12 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
ret = kstrtol(buf, 0, &value);
- if (ret) {
- count = -EINVAL;
- goto fail;
- }
+ if (ret)
+ return -EINVAL;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev)) {
value = smu_set_od_percentage(&(adev->smu), SMU_OD_MCLK, (uint32_t)value);
@@ -1279,7 +1531,9 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
}
}
-fail:
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
return count;
}
@@ -1309,16 +1563,27 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ ssize_t size;
+ int ret;
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev))
- return smu_get_power_profile_mode(&adev->smu, buf);
+ size = smu_get_power_profile_mode(&adev->smu, buf);
else if (adev->powerplay.pp_funcs->get_power_profile_mode)
- return amdgpu_dpm_get_power_profile_mode(adev, buf);
+ size = amdgpu_dpm_get_power_profile_mode(adev, buf);
+ else
+ size = snprintf(buf, PAGE_SIZE, "\n");
- return snprintf(buf, PAGE_SIZE, "\n");
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return size;
}
@@ -1343,7 +1608,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
tmp[1] = '\0';
ret = kstrtol(tmp, 0, &profile_mode);
if (ret)
- goto fail;
+ return -EINVAL;
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return -EINVAL;
@@ -1358,23 +1623,30 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
while (tmp_str[0]) {
sub_str = strsep(&tmp_str, delimiter);
ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
- if (ret) {
- count = -EINVAL;
- goto fail;
- }
+ if (ret)
+ return -EINVAL;
parameter_size++;
while (isspace(*tmp_str))
tmp_str++;
}
}
parameter[parameter_size] = profile_mode;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev))
ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true);
else if (adev->powerplay.pp_funcs->set_power_profile_mode)
ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
if (!ret)
return count;
-fail:
+
return -EINVAL;
}
@@ -1397,10 +1669,17 @@ static ssize_t amdgpu_get_busy_percent(struct device *dev,
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
+ r = pm_runtime_get_sync(ddev->dev);
+ if (r < 0)
+ return r;
+
/* read the IP busy sensor */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
(void *)&value, &size);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
if (r)
return r;
@@ -1426,10 +1705,17 @@ static ssize_t amdgpu_get_memory_busy_percent(struct device *dev,
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
+ r = pm_runtime_get_sync(ddev->dev);
+ if (r < 0)
+ return r;
+
/* read the IP busy sensor */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD,
(void *)&value, &size);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
if (r)
return r;
@@ -1455,11 +1741,20 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
uint64_t count0, count1;
+ int ret;
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0)
+ return ret;
+
amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n",
count0, count1, pcie_get_mps(adev->pdev));
}
@@ -1547,42 +1842,43 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
- struct drm_device *ddev = adev->ddev;
int channel = to_sensor_dev_attr(attr)->index;
int r, temp = 0, size = sizeof(temp);
- /* Can't get temperature when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
-
if (channel >= PP_TEMP_MAX)
return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
switch (channel) {
case PP_TEMP_JUNCTION:
/* get current junction temperature */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
(void *)&temp, &size);
- if (r)
- return r;
break;
case PP_TEMP_EDGE:
/* get current edge temperature */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
(void *)&temp, &size);
- if (r)
- return r;
break;
case PP_TEMP_MEM:
/* get current memory temperature */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
(void *)&temp, &size);
- if (r)
- return r;
+ break;
+ default:
+ r = -EINVAL;
break;
}
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
+ if (r)
+ return r;
+
return snprintf(buf, PAGE_SIZE, "%d\n", temp);
}
@@ -1678,16 +1974,27 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
u32 pwm_mode = 0;
+ int ret;
+
+ ret = pm_runtime_get_sync(adev->ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev)) {
pwm_mode = smu_get_fan_control_mode(&adev->smu);
} else {
- if (!adev->powerplay.pp_funcs->get_fan_control_mode)
+ if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return -EINVAL;
+ }
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
}
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
return sprintf(buf, "%i\n", pwm_mode);
}
@@ -1697,27 +2004,32 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
size_t count)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
- int err;
+ int err, ret;
int value;
- /* Can't adjust fan when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
-
err = kstrtoint(buf, 10, &value);
if (err)
return err;
+ ret = pm_runtime_get_sync(adev->ddev->dev);
+ if (ret < 0)
+ return ret;
+
if (is_support_sw_smu(adev)) {
smu_set_fan_control_mode(&adev->smu, value);
} else {
- if (!adev->powerplay.pp_funcs->set_fan_control_mode)
+ if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return -EINVAL;
+ }
amdgpu_dpm_set_fan_control_mode(adev, value);
}
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
return count;
}
@@ -1744,34 +2056,43 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
u32 value;
u32 pwm_mode;
- /* Can't adjust fan when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
+ err = pm_runtime_get_sync(adev->ddev->dev);
+ if (err < 0)
+ return err;
+
if (is_support_sw_smu(adev))
pwm_mode = smu_get_fan_control_mode(&adev->smu);
else
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+
if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
pr_info("manual fan speed control should be enabled first\n");
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return -EINVAL;
}
err = kstrtou32(buf, 10, &value);
- if (err)
+ if (err) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return err;
+ }
value = (value * 100) / 255;
- if (is_support_sw_smu(adev)) {
+ if (is_support_sw_smu(adev))
err = smu_set_fan_speed_percent(&adev->smu, value);
- if (err)
- return err;
- } else if (adev->powerplay.pp_funcs->set_fan_speed_percent) {
+ else if (adev->powerplay.pp_funcs->set_fan_speed_percent)
err = amdgpu_dpm_set_fan_speed_percent(adev, value);
- if (err)
- return err;
- }
+ else
+ err = -EINVAL;
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
+ if (err)
+ return err;
return count;
}
@@ -1784,20 +2105,22 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
int err;
u32 speed = 0;
- /* Can't adjust fan when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
+ err = pm_runtime_get_sync(adev->ddev->dev);
+ if (err < 0)
+ return err;
- if (is_support_sw_smu(adev)) {
+ if (is_support_sw_smu(adev))
err = smu_get_fan_speed_percent(&adev->smu, &speed);
- if (err)
- return err;
- } else if (adev->powerplay.pp_funcs->get_fan_speed_percent) {
+ else if (adev->powerplay.pp_funcs->get_fan_speed_percent)
err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
- if (err)
- return err;
- }
+ else
+ err = -EINVAL;
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
+ if (err)
+ return err;
speed = (speed * 255) / 100;
@@ -1812,20 +2135,22 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
int err;
u32 speed = 0;
- /* Can't adjust fan when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
+ err = pm_runtime_get_sync(adev->ddev->dev);
+ if (err < 0)
+ return err;
- if (is_support_sw_smu(adev)) {
+ if (is_support_sw_smu(adev))
err = smu_get_fan_speed_rpm(&adev->smu, &speed);
- if (err)
- return err;
- } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
+ else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
- if (err)
- return err;
- }
+ else
+ err = -EINVAL;
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
+ if (err)
+ return err;
return sprintf(buf, "%i\n", speed);
}
@@ -1839,8 +2164,16 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
u32 size = sizeof(min_rpm);
int r;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
(void *)&min_rpm, &size);
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
if (r)
return r;
@@ -1856,8 +2189,16 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
u32 size = sizeof(max_rpm);
int r;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
(void *)&max_rpm, &size);
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
if (r)
return r;
@@ -1872,20 +2213,22 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
int err;
u32 rpm = 0;
- /* Can't adjust fan when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
+ err = pm_runtime_get_sync(adev->ddev->dev);
+ if (err < 0)
+ return err;
- if (is_support_sw_smu(adev)) {
+ if (is_support_sw_smu(adev))
err = smu_get_fan_speed_rpm(&adev->smu, &rpm);
- if (err)
- return err;
- } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
+ else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
- if (err)
- return err;
- }
+ else
+ err = -EINVAL;
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
+ if (err)
+ return err;
return sprintf(buf, "%i\n", rpm);
}
@@ -1899,32 +2242,40 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
u32 value;
u32 pwm_mode;
+ err = pm_runtime_get_sync(adev->ddev->dev);
+ if (err < 0)
+ return err;
+
if (is_support_sw_smu(adev))
pwm_mode = smu_get_fan_control_mode(&adev->smu);
else
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
- if (pwm_mode != AMD_FAN_CTRL_MANUAL)
+ if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return -ENODATA;
-
- /* Can't adjust fan when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
+ }
err = kstrtou32(buf, 10, &value);
- if (err)
+ if (err) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return err;
+ }
- if (is_support_sw_smu(adev)) {
+ if (is_support_sw_smu(adev))
err = smu_set_fan_speed_rpm(&adev->smu, value);
- if (err)
- return err;
- } else if (adev->powerplay.pp_funcs->set_fan_speed_rpm) {
+ else if (adev->powerplay.pp_funcs->set_fan_speed_rpm)
err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
- if (err)
- return err;
- }
+ else
+ err = -EINVAL;
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
+ if (err)
+ return err;
return count;
}
@@ -1935,15 +2286,27 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
u32 pwm_mode = 0;
+ int ret;
+
+ ret = pm_runtime_get_sync(adev->ddev->dev);
+ if (ret < 0)
+ return ret;
if (is_support_sw_smu(adev)) {
pwm_mode = smu_get_fan_control_mode(&adev->smu);
} else {
- if (!adev->powerplay.pp_funcs->get_fan_control_mode)
+ if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return -EINVAL;
+ }
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
}
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
}
@@ -1957,12 +2320,6 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
int value;
u32 pwm_mode;
- /* Can't adjust fan when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
-
-
err = kstrtoint(buf, 10, &value);
if (err)
return err;
@@ -1974,14 +2331,24 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
else
return -EINVAL;
+ err = pm_runtime_get_sync(adev->ddev->dev);
+ if (err < 0)
+ return err;
+
if (is_support_sw_smu(adev)) {
smu_set_fan_control_mode(&adev->smu, pwm_mode);
} else {
- if (!adev->powerplay.pp_funcs->set_fan_control_mode)
+ if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return -EINVAL;
+ }
amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
}
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
return count;
}
@@ -1990,18 +2357,20 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
- struct drm_device *ddev = adev->ddev;
u32 vddgfx;
int r, size = sizeof(vddgfx);
- /* Can't get voltage when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
/* get the voltage */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
(void *)&vddgfx, &size);
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
if (r)
return r;
@@ -2020,7 +2389,6 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
- struct drm_device *ddev = adev->ddev;
u32 vddnb;
int r, size = sizeof(vddnb);
@@ -2028,14 +2396,17 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
if (!(adev->flags & AMD_IS_APU))
return -EINVAL;
- /* Can't get voltage when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
/* get the voltage */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
(void *)&vddnb, &size);
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
if (r)
return r;
@@ -2054,19 +2425,21 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
- struct drm_device *ddev = adev->ddev;
u32 query = 0;
int r, size = sizeof(u32);
unsigned uw;
- /* Can't get power when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
/* get the voltage */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
(void *)&query, &size);
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
if (r)
return r;
@@ -2089,16 +2462,27 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
uint32_t limit = 0;
+ ssize_t size;
+ int r;
+
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
if (is_support_sw_smu(adev)) {
smu_get_power_limit(&adev->smu, &limit, true, true);
- return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
+ size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
- return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
+ size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
} else {
- return snprintf(buf, PAGE_SIZE, "\n");
+ size = snprintf(buf, PAGE_SIZE, "\n");
}
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
+ return size;
}
static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
@@ -2107,16 +2491,27 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
uint32_t limit = 0;
+ ssize_t size;
+ int r;
+
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
if (is_support_sw_smu(adev)) {
smu_get_power_limit(&adev->smu, &limit, false, true);
- return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
+ size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
- return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
+ size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
} else {
- return snprintf(buf, PAGE_SIZE, "\n");
+ size = snprintf(buf, PAGE_SIZE, "\n");
}
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
+ return size;
}
@@ -2138,13 +2533,20 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
value = value / 1000000; /* convert to Watt */
- if (is_support_sw_smu(adev)) {
+
+ err = pm_runtime_get_sync(adev->ddev->dev);
+ if (err < 0)
+ return err;
+
+ if (is_support_sw_smu(adev))
err = smu_set_power_limit(&adev->smu, value);
- } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) {
+ else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit)
err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
- } else {
+ else
err = -EINVAL;
- }
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
if (err)
return err;
@@ -2157,18 +2559,20 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
- struct drm_device *ddev = adev->ddev;
uint32_t sclk;
int r, size = sizeof(sclk);
- /* Can't get voltage when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
/* get the sclk */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
(void *)&sclk, &size);
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
if (r)
return r;
@@ -2187,18 +2591,20 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
- struct drm_device *ddev = adev->ddev;
uint32_t mclk;
int r, size = sizeof(mclk);
- /* Can't get voltage when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
/* get the sclk */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
(void *)&mclk, &size);
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
if (r)
return r;
@@ -2762,17 +3168,12 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
{
int ret = 0;
- if (is_support_sw_smu(adev)) {
- ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_UVD, enable);
- if (ret)
- DRM_ERROR("[SW SMU]: dpm enable uvd failed, state = %s, ret = %d. \n",
- enable ? "true" : "false", ret);
- } else if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
- /* enable/disable UVD */
- mutex_lock(&adev->pm.mutex);
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
- mutex_unlock(&adev->pm.mutex);
- }
+
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
+ if (ret)
+ DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
+ enable ? "enable" : "disable", ret);
+
/* enable/disable Low Memory PState for UVD (4k videos) */
if (adev->asic_type == CHIP_STONEY &&
adev->uvd.decode_image_width >= WIDTH_4K) {
@@ -2789,17 +3190,11 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
{
int ret = 0;
- if (is_support_sw_smu(adev)) {
- ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_VCE, enable);
- if (ret)
- DRM_ERROR("[SW SMU]: dpm enable vce failed, state = %s, ret = %d. \n",
- enable ? "true" : "false", ret);
- } else if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
- /* enable/disable VCE */
- mutex_lock(&adev->pm.mutex);
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
- mutex_unlock(&adev->pm.mutex);
- }
+
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
+ if (ret)
+ DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
+ enable ? "enable" : "disable", ret);
}
void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
@@ -2818,12 +3213,10 @@ void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
{
int ret = 0;
- if (is_support_sw_smu(adev)) {
- ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_JPEG, enable);
- if (ret)
- DRM_ERROR("[SW SMU]: dpm enable jpeg failed, state = %s, ret = %d. \n",
- enable ? "true" : "false", ret);
- }
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
+ if (ret)
+ DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
+ enable ? "enable" : "disable", ret);
}
int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
@@ -3233,8 +3626,12 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
- struct drm_device *ddev = adev->ddev;
u32 flags = 0;
+ int r;
+
+ r = pm_runtime_get_sync(dev->dev);
+ if (r < 0)
+ return r;
amdgpu_device_ip_get_clockgating_state(adev, &flags);
seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
@@ -3243,23 +3640,28 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
if (!adev->pm.dpm_enabled) {
seq_printf(m, "dpm not enabled\n");
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
return 0;
}
- if ((adev->flags & AMD_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
- seq_printf(m, "PX asic powered off\n");
- } else if (!is_support_sw_smu(adev) && adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
+
+ if (!is_support_sw_smu(adev) &&
+ adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
mutex_lock(&adev->pm.mutex);
if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
else
seq_printf(m, "Debugfs support not implemented for this asic\n");
mutex_unlock(&adev->pm.mutex);
+ r = 0;
} else {
- return amdgpu_debugfs_pm_info_pp(m, adev);
+ r = amdgpu_debugfs_pm_info_pp(m, adev);
}
- return 0;
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
+ return r;
}
static const struct drm_info_list amdgpu_pm_info_list[] = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
index 0e6dba9f60f0..1311d6aec5d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
@@ -52,7 +52,7 @@ static int amdgpu_perf_event_init(struct perf_event *event)
return -ENOENT;
/* update the hw_perf_event struct with config data */
- hwc->conf = event->attr.config;
+ hwc->config = event->attr.config;
return 0;
}
@@ -74,9 +74,9 @@ static void amdgpu_perf_start(struct perf_event *event, int flags)
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
if (!(flags & PERF_EF_RELOAD))
- pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 1);
+ pe->adev->df.funcs->pmc_start(pe->adev, hwc->config, 1);
- pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 0);
+ pe->adev->df.funcs->pmc_start(pe->adev, hwc->config, 0);
break;
default:
break;
@@ -101,13 +101,13 @@ static void amdgpu_perf_read(struct perf_event *event)
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
- pe->adev->df_funcs->pmc_get_count(pe->adev, hwc->conf,
+ pe->adev->df.funcs->pmc_get_count(pe->adev, hwc->config,
&count);
break;
default:
count = 0;
break;
- };
+ }
} while (local64_cmpxchg(&hwc->prev_count, prev, count) != prev);
local64_add(count - prev, &event->count);
@@ -126,11 +126,11 @@ static void amdgpu_perf_stop(struct perf_event *event, int flags)
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
- pe->adev->df_funcs->pmc_stop(pe->adev, hwc->conf, 0);
+ pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, 0);
break;
default:
break;
- };
+ }
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
hwc->state |= PERF_HES_STOPPED;
@@ -156,11 +156,12 @@ static int amdgpu_perf_add(struct perf_event *event, int flags)
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
- retval = pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 1);
+ retval = pe->adev->df.funcs->pmc_start(pe->adev,
+ hwc->config, 1);
break;
default:
return 0;
- };
+ }
if (retval)
return retval;
@@ -184,11 +185,11 @@ static void amdgpu_perf_del(struct perf_event *event, int flags)
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
- pe->adev->df_funcs->pmc_stop(pe->adev, hwc->conf, 1);
+ pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, 1);
break;
default:
break;
- };
+ }
perf_event_update_userpage(event);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index c14f2ccd0677..3a1570dafe34 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -191,9 +191,9 @@ psp_cmd_submit_buf(struct psp_context *psp,
if (ucode)
DRM_WARN("failed to load ucode id (%d) ",
ucode->ucode_id);
- DRM_DEBUG_DRIVER("psp command (0x%X) failed and response status is (0x%X)\n",
+ DRM_WARN("psp command (0x%X) failed and response status is (0x%X)\n",
psp->cmd_buf_mem->cmd_id,
- psp->cmd_buf_mem->resp.status & GFX_CMD_STATUS_MASK);
+ psp->cmd_buf_mem->resp.status);
if (!timeout) {
mutex_unlock(&psp->mutex);
return -EINVAL;
@@ -365,11 +365,11 @@ static int psp_asd_load(struct psp_context *psp)
return ret;
}
-static void psp_prep_asd_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
- uint32_t asd_session_id)
+static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint32_t session_id)
{
cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
- cmd->cmd.cmd_unload_ta.session_id = asd_session_id;
+ cmd->cmd.cmd_unload_ta.session_id = session_id;
}
static int psp_asd_unload(struct psp_context *psp)
@@ -387,7 +387,7 @@ static int psp_asd_unload(struct psp_context *psp)
if (!cmd)
return -ENOMEM;
- psp_prep_asd_unload_cmd_buf(cmd, psp->asd_context.session_id);
+ psp_prep_ta_unload_cmd_buf(cmd, psp->asd_context.session_id);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
@@ -427,18 +427,20 @@ int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
return ret;
}
-static void psp_prep_xgmi_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
- uint64_t xgmi_ta_mc, uint64_t xgmi_mc_shared,
- uint32_t xgmi_ta_size, uint32_t shared_size)
+static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint64_t ta_bin_mc,
+ uint32_t ta_bin_size,
+ uint64_t ta_shared_mc,
+ uint32_t ta_shared_size)
{
- cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
- cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(xgmi_ta_mc);
- cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(xgmi_ta_mc);
- cmd->cmd.cmd_load_ta.app_len = xgmi_ta_size;
+ cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
+ cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc);
+ cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc);
+ cmd->cmd.cmd_load_ta.app_len = ta_bin_size;
- cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(xgmi_mc_shared);
- cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(xgmi_mc_shared);
- cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ta_shared_mc);
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ta_shared_mc);
+ cmd->cmd.cmd_load_ta.cmd_buf_len = ta_shared_size;
}
static int psp_xgmi_init_shared_buf(struct psp_context *psp)
@@ -458,6 +460,36 @@ static int psp_xgmi_init_shared_buf(struct psp_context *psp)
return ret;
}
+static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint32_t ta_cmd_id,
+ uint32_t session_id)
+{
+ cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
+ cmd->cmd.cmd_invoke_cmd.session_id = session_id;
+ cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
+}
+
+int psp_ta_invoke(struct psp_context *psp,
+ uint32_t ta_cmd_id,
+ uint32_t session_id)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
static int psp_xgmi_load(struct psp_context *psp)
{
int ret;
@@ -466,8 +498,6 @@ static int psp_xgmi_load(struct psp_context *psp)
/*
* TODO: bypass the loading in sriov for now
*/
- if (amdgpu_sriov_vf(psp->adev))
- return 0;
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!cmd)
@@ -476,9 +506,11 @@ static int psp_xgmi_load(struct psp_context *psp)
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
memcpy(psp->fw_pri_buf, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size);
- psp_prep_xgmi_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
- psp->xgmi_context.xgmi_shared_mc_addr,
- psp->ta_xgmi_ucode_size, PSP_XGMI_SHARED_MEM_SIZE);
+ psp_prep_ta_load_cmd_buf(cmd,
+ psp->fw_pri_mc_addr,
+ psp->ta_xgmi_ucode_size,
+ psp->xgmi_context.xgmi_shared_mc_addr,
+ PSP_XGMI_SHARED_MEM_SIZE);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
@@ -493,29 +525,25 @@ static int psp_xgmi_load(struct psp_context *psp)
return ret;
}
-static void psp_prep_xgmi_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
- uint32_t xgmi_session_id)
-{
- cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
- cmd->cmd.cmd_unload_ta.session_id = xgmi_session_id;
-}
-
static int psp_xgmi_unload(struct psp_context *psp)
{
int ret;
struct psp_gfx_cmd_resp *cmd;
+ struct amdgpu_device *adev = psp->adev;
+
+ /* XGMI TA unload currently is not supported on Arcturus */
+ if (adev->asic_type == CHIP_ARCTURUS)
+ return 0;
/*
* TODO: bypass the unloading in sriov for now
*/
- if (amdgpu_sriov_vf(psp->adev))
- return 0;
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
- psp_prep_xgmi_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id);
+ psp_prep_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
@@ -525,40 +553,9 @@ static int psp_xgmi_unload(struct psp_context *psp)
return ret;
}
-static void psp_prep_xgmi_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
- uint32_t ta_cmd_id,
- uint32_t xgmi_session_id)
-{
- cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
- cmd->cmd.cmd_invoke_cmd.session_id = xgmi_session_id;
- cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
- /* Note: cmd_invoke_cmd.buf is not used for now */
-}
-
int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
{
- int ret;
- struct psp_gfx_cmd_resp *cmd;
-
- /*
- * TODO: bypass the loading in sriov for now
- */
- if (amdgpu_sriov_vf(psp->adev))
- return 0;
-
- cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
-
- psp_prep_xgmi_ta_invoke_cmd_buf(cmd, ta_cmd_id,
- psp->xgmi_context.session_id);
-
- ret = psp_cmd_submit_buf(psp, NULL, cmd,
- psp->fence_buf_mc_addr);
-
- kfree(cmd);
-
- return ret;
+ return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.session_id);
}
static int psp_xgmi_terminate(struct psp_context *psp)
@@ -614,20 +611,6 @@ static int psp_xgmi_initialize(struct psp_context *psp)
}
// ras begin
-static void psp_prep_ras_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
- uint64_t ras_ta_mc, uint64_t ras_mc_shared,
- uint32_t ras_ta_size, uint32_t shared_size)
-{
- cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
- cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ras_ta_mc);
- cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ras_ta_mc);
- cmd->cmd.cmd_load_ta.app_len = ras_ta_size;
-
- cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ras_mc_shared);
- cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ras_mc_shared);
- cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
-}
-
static int psp_ras_init_shared_buf(struct psp_context *psp)
{
int ret;
@@ -663,15 +646,17 @@ static int psp_ras_load(struct psp_context *psp)
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
memcpy(psp->fw_pri_buf, psp->ta_ras_start_addr, psp->ta_ras_ucode_size);
- psp_prep_ras_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
- psp->ras.ras_shared_mc_addr,
- psp->ta_ras_ucode_size, PSP_RAS_SHARED_MEM_SIZE);
+ psp_prep_ta_load_cmd_buf(cmd,
+ psp->fw_pri_mc_addr,
+ psp->ta_ras_ucode_size,
+ psp->ras.ras_shared_mc_addr,
+ PSP_RAS_SHARED_MEM_SIZE);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
if (!ret) {
- psp->ras.ras_initialized = 1;
+ psp->ras.ras_initialized = true;
psp->ras.session_id = cmd->resp.session_id;
}
@@ -680,13 +665,6 @@ static int psp_ras_load(struct psp_context *psp)
return ret;
}
-static void psp_prep_ras_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
- uint32_t ras_session_id)
-{
- cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
- cmd->cmd.cmd_unload_ta.session_id = ras_session_id;
-}
-
static int psp_ras_unload(struct psp_context *psp)
{
int ret;
@@ -702,7 +680,7 @@ static int psp_ras_unload(struct psp_context *psp)
if (!cmd)
return -ENOMEM;
- psp_prep_ras_ta_unload_cmd_buf(cmd, psp->ras.session_id);
+ psp_prep_ta_unload_cmd_buf(cmd, psp->ras.session_id);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
@@ -712,40 +690,15 @@ static int psp_ras_unload(struct psp_context *psp)
return ret;
}
-static void psp_prep_ras_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
- uint32_t ta_cmd_id,
- uint32_t ras_session_id)
-{
- cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
- cmd->cmd.cmd_invoke_cmd.session_id = ras_session_id;
- cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
- /* Note: cmd_invoke_cmd.buf is not used for now */
-}
-
int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
{
- int ret;
- struct psp_gfx_cmd_resp *cmd;
-
/*
* TODO: bypass the loading in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
- cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
-
- psp_prep_ras_ta_invoke_cmd_buf(cmd, ta_cmd_id,
- psp->ras.session_id);
-
- ret = psp_cmd_submit_buf(psp, NULL, cmd,
- psp->fence_buf_mc_addr);
-
- kfree(cmd);
-
- return ret;
+ return psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id);
}
int psp_ras_enable_features(struct psp_context *psp,
@@ -791,7 +744,7 @@ static int psp_ras_terminate(struct psp_context *psp)
if (ret)
return ret;
- psp->ras.ras_initialized = 0;
+ psp->ras.ras_initialized = false;
/* free ras shared memory */
amdgpu_bo_free_kernel(&psp->ras.ras_shared_bo,
@@ -832,24 +785,6 @@ static int psp_ras_initialize(struct psp_context *psp)
// ras end
// HDCP start
-static void psp_prep_hdcp_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
- uint64_t hdcp_ta_mc,
- uint64_t hdcp_mc_shared,
- uint32_t hdcp_ta_size,
- uint32_t shared_size)
-{
- cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
- cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(hdcp_ta_mc);
- cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(hdcp_ta_mc);
- cmd->cmd.cmd_load_ta.app_len = hdcp_ta_size;
-
- cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
- lower_32_bits(hdcp_mc_shared);
- cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
- upper_32_bits(hdcp_mc_shared);
- cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
-}
-
static int psp_hdcp_init_shared_buf(struct psp_context *psp)
{
int ret;
@@ -886,15 +821,16 @@ static int psp_hdcp_load(struct psp_context *psp)
memcpy(psp->fw_pri_buf, psp->ta_hdcp_start_addr,
psp->ta_hdcp_ucode_size);
- psp_prep_hdcp_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
- psp->hdcp_context.hdcp_shared_mc_addr,
- psp->ta_hdcp_ucode_size,
- PSP_HDCP_SHARED_MEM_SIZE);
+ psp_prep_ta_load_cmd_buf(cmd,
+ psp->fw_pri_mc_addr,
+ psp->ta_hdcp_ucode_size,
+ psp->hdcp_context.hdcp_shared_mc_addr,
+ PSP_HDCP_SHARED_MEM_SIZE);
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
if (!ret) {
- psp->hdcp_context.hdcp_initialized = 1;
+ psp->hdcp_context.hdcp_initialized = true;
psp->hdcp_context.session_id = cmd->resp.session_id;
}
@@ -930,12 +866,6 @@ static int psp_hdcp_initialize(struct psp_context *psp)
return 0;
}
-static void psp_prep_hdcp_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
- uint32_t hdcp_session_id)
-{
- cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
- cmd->cmd.cmd_unload_ta.session_id = hdcp_session_id;
-}
static int psp_hdcp_unload(struct psp_context *psp)
{
@@ -952,7 +882,7 @@ static int psp_hdcp_unload(struct psp_context *psp)
if (!cmd)
return -ENOMEM;
- psp_prep_hdcp_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id);
+ psp_prep_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id);
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
@@ -961,39 +891,15 @@ static int psp_hdcp_unload(struct psp_context *psp)
return ret;
}
-static void psp_prep_hdcp_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
- uint32_t ta_cmd_id,
- uint32_t hdcp_session_id)
-{
- cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
- cmd->cmd.cmd_invoke_cmd.session_id = hdcp_session_id;
- cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
- /* Note: cmd_invoke_cmd.buf is not used for now */
-}
-
int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
{
- int ret;
- struct psp_gfx_cmd_resp *cmd;
-
/*
* TODO: bypass the loading in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
- cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
-
- psp_prep_hdcp_ta_invoke_cmd_buf(cmd, ta_cmd_id,
- psp->hdcp_context.session_id);
-
- ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
-
- kfree(cmd);
-
- return ret;
+ return psp_ta_invoke(psp, ta_cmd_id, psp->hdcp_context.session_id);
}
static int psp_hdcp_terminate(struct psp_context *psp)
@@ -1013,7 +919,7 @@ static int psp_hdcp_terminate(struct psp_context *psp)
if (ret)
return ret;
- psp->hdcp_context.hdcp_initialized = 0;
+ psp->hdcp_context.hdcp_initialized = false;
/* free hdcp shared memory */
amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo,
@@ -1025,22 +931,6 @@ static int psp_hdcp_terminate(struct psp_context *psp)
// HDCP end
// DTM start
-static void psp_prep_dtm_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
- uint64_t dtm_ta_mc,
- uint64_t dtm_mc_shared,
- uint32_t dtm_ta_size,
- uint32_t shared_size)
-{
- cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
- cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(dtm_ta_mc);
- cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(dtm_ta_mc);
- cmd->cmd.cmd_load_ta.app_len = dtm_ta_size;
-
- cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(dtm_mc_shared);
- cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(dtm_mc_shared);
- cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
-}
-
static int psp_dtm_init_shared_buf(struct psp_context *psp)
{
int ret;
@@ -1076,15 +966,16 @@ static int psp_dtm_load(struct psp_context *psp)
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
memcpy(psp->fw_pri_buf, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size);
- psp_prep_dtm_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
- psp->dtm_context.dtm_shared_mc_addr,
- psp->ta_dtm_ucode_size,
- PSP_DTM_SHARED_MEM_SIZE);
+ psp_prep_ta_load_cmd_buf(cmd,
+ psp->fw_pri_mc_addr,
+ psp->ta_dtm_ucode_size,
+ psp->dtm_context.dtm_shared_mc_addr,
+ PSP_DTM_SHARED_MEM_SIZE);
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
if (!ret) {
- psp->dtm_context.dtm_initialized = 1;
+ psp->dtm_context.dtm_initialized = true;
psp->dtm_context.session_id = cmd->resp.session_id;
}
@@ -1122,39 +1013,15 @@ static int psp_dtm_initialize(struct psp_context *psp)
return 0;
}
-static void psp_prep_dtm_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
- uint32_t ta_cmd_id,
- uint32_t dtm_session_id)
-{
- cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
- cmd->cmd.cmd_invoke_cmd.session_id = dtm_session_id;
- cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
- /* Note: cmd_invoke_cmd.buf is not used for now */
-}
-
int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
{
- int ret;
- struct psp_gfx_cmd_resp *cmd;
-
/*
* TODO: bypass the loading in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
- cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
-
- psp_prep_dtm_ta_invoke_cmd_buf(cmd, ta_cmd_id,
- psp->dtm_context.session_id);
-
- ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
-
- kfree(cmd);
-
- return ret;
+ return psp_ta_invoke(psp, ta_cmd_id, psp->dtm_context.session_id);
}
static int psp_dtm_terminate(struct psp_context *psp)
@@ -1174,7 +1041,7 @@ static int psp_dtm_terminate(struct psp_context *psp)
if (ret)
return ret;
- psp->dtm_context.dtm_initialized = 0;
+ psp->dtm_context.dtm_initialized = false;
/* free hdcp shared memory */
amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo,
@@ -1310,6 +1177,9 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
case AMDGPU_UCODE_ID_VCN:
*type = GFX_FW_TYPE_VCN;
break;
+ case AMDGPU_UCODE_ID_VCN1:
+ *type = GFX_FW_TYPE_VCN1;
+ break;
case AMDGPU_UCODE_ID_DMCU_ERAM:
*type = GFX_FW_TYPE_DMCU_ERAM;
break;
@@ -1454,7 +1324,8 @@ out:
|| ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G
|| ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
|| ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
- || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM))
+ || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
+ || ucode->ucode_id == AMDGPU_UCODE_ID_SMC))
/*skip ucode loading in SRIOV VF */
continue;
@@ -1472,7 +1343,7 @@ out:
/* Start rlc autoload after psp recieved all the gfx firmware */
if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
- AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM)) {
+ AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_G)) {
ret = psp_rlc_autoload(psp);
if (ret) {
DRM_ERROR("Failed to start rlc autoload\n");
@@ -1503,16 +1374,13 @@ static int psp_load_fw(struct amdgpu_device *adev)
if (!psp->cmd)
return -ENOMEM;
- /* this fw pri bo is not used under SRIOV */
- if (!amdgpu_sriov_vf(psp->adev)) {
- ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
- AMDGPU_GEM_DOMAIN_GTT,
- &psp->fw_pri_bo,
- &psp->fw_pri_mc_addr,
- &psp->fw_pri_buf);
- if (ret)
- goto failed;
- }
+ ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &psp->fw_pri_bo,
+ &psp->fw_pri_mc_addr,
+ &psp->fw_pri_buf);
+ if (ret)
+ goto failed;
ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 5f8fd3e3535b..611021514c52 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -172,6 +172,8 @@ struct psp_dtm_context {
#define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942
#define GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES 0x1000
#define GDDR6_MEM_TRAINING_OFFSET 0x8000
+/*Define the VRAM size that will be encroached by BIST training.*/
+#define GDDR6_MEM_TRAINING_ENCROACHED_SIZE 0x2000000
enum psp_memory_training_init_flag {
PSP_MEM_TRAIN_NOT_SUPPORT = 0x0,
@@ -202,7 +204,6 @@ struct psp_memory_training_context {
/*vram offset of the p2c training data*/
u64 p2c_train_data_offset;
- struct amdgpu_bo *p2c_bo;
/*vram offset of the c2p training data*/
u64 c2p_train_data_offset;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 04394c45aa03..cef94e2169fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -315,7 +315,7 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
default:
ret = -EINVAL;
break;
- };
+ }
if (ret)
return -EINVAL;
@@ -686,6 +686,7 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
{
struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
struct ras_err_data err_data = {0, 0, 0, NULL};
+ int i;
if (!obj)
return -EINVAL;
@@ -700,6 +701,13 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
if (adev->umc.funcs->query_ras_error_address)
adev->umc.funcs->query_ras_error_address(adev, &err_data);
break;
+ case AMDGPU_RAS_BLOCK__SDMA:
+ if (adev->sdma.funcs->query_ras_error_count) {
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ adev->sdma.funcs->query_ras_error_count(adev, i,
+ &err_data);
+ }
+ break;
case AMDGPU_RAS_BLOCK__GFX:
if (adev->gfx.funcs->query_ras_error_count)
adev->gfx.funcs->query_ras_error_count(adev, &err_data);
@@ -734,6 +742,20 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
return 0;
}
+uint64_t get_xgmi_relative_phy_addr(struct amdgpu_device *adev, uint64_t addr)
+{
+ uint32_t df_inst_id;
+
+ if ((!adev->df.funcs) ||
+ (!adev->df.funcs->get_df_inst_id) ||
+ (!adev->df.funcs->get_dram_base_addr))
+ return addr;
+
+ df_inst_id = adev->df.funcs->get_df_inst_id(adev);
+
+ return addr + adev->df.funcs->get_dram_base_addr(adev, df_inst_id);
+}
+
/* wrapper of psp_ras_trigger_error */
int amdgpu_ras_error_inject(struct amdgpu_device *adev,
struct ras_inject_if *info)
@@ -751,6 +773,12 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
if (!obj)
return -EINVAL;
+ /* Calculate XGMI relative offset */
+ if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ block_info.address = get_xgmi_relative_phy_addr(adev,
+ block_info.address);
+ }
+
switch (info->head.block) {
case AMDGPU_RAS_BLOCK__GFX:
if (adev->gfx.funcs->ras_error_inject)
@@ -1311,6 +1339,7 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
data = con->eh_data;
if (!data || data->count == 0) {
*bps = NULL;
+ ret = -EINVAL;
goto out;
}
@@ -1344,7 +1373,8 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
struct amdgpu_ras *ras =
container_of(work, struct amdgpu_ras, recovery_work);
- amdgpu_device_gpu_recover(ras->adev, 0);
+ if (amdgpu_device_should_recover_gpu(ras->adev))
+ amdgpu_device_gpu_recover(ras->adev, 0);
atomic_set(&ras->in_recovery, 0);
}
@@ -1870,7 +1900,7 @@ void amdgpu_ras_resume(struct amdgpu_device *adev)
* See feature_enable_on_boot
*/
amdgpu_ras_disable_all_features(adev, 1);
- amdgpu_ras_reset_gpu(adev, 0);
+ amdgpu_ras_reset_gpu(adev);
}
}
@@ -1933,6 +1963,6 @@ void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
DRM_WARN("RAS event of type ERREVENT_ATHUB_INTERRUPT detected!\n");
- amdgpu_ras_reset_gpu(adev, false);
+ amdgpu_ras_reset_gpu(adev);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index d4ade4739245..a5fe29a9373e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -494,8 +494,7 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev);
-static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev,
- bool is_baco)
+static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
{
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 6010999d9020..a2ee30b16212 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -160,7 +160,7 @@ int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
- amdgpu_ras_reset_gpu(adev, 0);
+ amdgpu_ras_reset_gpu(adev);
return AMDGPU_RAS_SUCCESS;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 761ff8be6314..485335267d78 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -50,8 +50,18 @@ struct amdgpu_sdma_instance {
bool burst_nop;
};
+struct amdgpu_sdma_ras_funcs {
+ int (*ras_late_init)(struct amdgpu_device *adev,
+ void *ras_ih_info);
+ void (*ras_fini)(struct amdgpu_device *adev);
+ int (*query_ras_error_count)(struct amdgpu_device *adev,
+ uint32_t instance, void *ras_error_status);
+};
+
struct amdgpu_sdma {
struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
+ struct drm_gpu_scheduler *sdma_sched[AMDGPU_MAX_SDMA_INSTANCES];
+ uint32_t num_sdma_sched;
struct amdgpu_irq_src trap_irq;
struct amdgpu_irq_src illegal_inst_irq;
struct amdgpu_irq_src ecc_irq;
@@ -59,6 +69,7 @@ struct amdgpu_sdma {
uint32_t srbm_soft_reset;
bool has_page_queue;
struct ras_common_if *ras_if;
+ const struct amdgpu_sdma_ras_funcs *funcs;
};
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 7c4b1cbd9a50..3ab46d4647e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -41,6 +41,7 @@
#include <linux/swap.h>
#include <linux/swiotlb.h>
#include <linux/dma-buf.h>
+#include <linux/sizes.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
@@ -1708,12 +1709,17 @@ static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL);
ctx->c2p_bo = NULL;
- amdgpu_bo_free_kernel(&ctx->p2c_bo, NULL, NULL);
- ctx->p2c_bo = NULL;
-
return 0;
}
+static u64 amdgpu_ttm_training_get_c2p_offset(u64 vram_size)
+{
+ if ((vram_size & (SZ_1M - 1)) < (SZ_4K + 1) )
+ vram_size -= SZ_1M;
+
+ return ALIGN(vram_size, SZ_1M);
+}
+
/**
* amdgpu_ttm_training_reserve_vram_init - create bo vram reservation from memory training
*
@@ -1732,7 +1738,7 @@ static int amdgpu_ttm_training_reserve_vram_init(struct amdgpu_device *adev)
return 0;
}
- ctx->c2p_train_data_offset = adev->fw_vram_usage.mem_train_fb_loc;
+ ctx->c2p_train_data_offset = amdgpu_ttm_training_get_c2p_offset(adev->gmc.mc_vram_size);
ctx->p2c_train_data_offset = (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
ctx->train_data_size = GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
@@ -1742,17 +1748,6 @@ static int amdgpu_ttm_training_reserve_vram_init(struct amdgpu_device *adev)
ctx->c2p_train_data_offset);
ret = amdgpu_bo_create_kernel_at(adev,
- ctx->p2c_train_data_offset,
- ctx->train_data_size,
- AMDGPU_GEM_DOMAIN_VRAM,
- &ctx->p2c_bo,
- NULL);
- if (ret) {
- DRM_ERROR("alloc p2c_bo failed(%d)!\n", ret);
- goto Err_out;
- }
-
- ret = amdgpu_bo_create_kernel_at(adev,
ctx->c2p_train_data_offset,
ctx->train_data_size,
AMDGPU_GEM_DOMAIN_VRAM,
@@ -1760,15 +1755,12 @@ static int amdgpu_ttm_training_reserve_vram_init(struct amdgpu_device *adev)
NULL);
if (ret) {
DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
- goto Err_out;
+ amdgpu_ttm_training_reserve_vram_fini(adev);
+ return ret;
}
ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
return 0;
-
-Err_out:
- amdgpu_ttm_training_reserve_vram_fini(adev);
- return ret;
}
/**
@@ -1981,11 +1973,13 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
if (enable) {
struct amdgpu_ring *ring;
- struct drm_sched_rq *rq;
+ struct drm_gpu_scheduler *sched;
ring = adev->mman.buffer_funcs_ring;
- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
- r = drm_sched_entity_init(&adev->mman.entity, &rq, 1, NULL);
+ sched = &ring->sched;
+ r = drm_sched_entity_init(&adev->mman.entity,
+ DRM_SCHED_PRIORITY_KERNEL, &sched,
+ 1, NULL);
if (r) {
DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index eaf2d5b9c92f..b0e656409c03 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -300,10 +300,10 @@ enum AMDGPU_UCODE_ID {
AMDGPU_UCODE_ID_CP_MEC2_JT,
AMDGPU_UCODE_ID_CP_MES,
AMDGPU_UCODE_ID_CP_MES_DATA,
- AMDGPU_UCODE_ID_RLC_G,
AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL,
AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM,
AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM,
+ AMDGPU_UCODE_ID_RLC_G,
AMDGPU_UCODE_ID_STORAGE,
AMDGPU_UCODE_ID_SMC,
AMDGPU_UCODE_ID_UVD,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index d4fb9cf27e21..f4d40855147b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -95,13 +95,6 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
- /* When “Full RAS” is enabled, the per-IP interrupt sources should
- * be disabled and the driver should only look for the aggregated
- * interrupt via sync flood
- */
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
- return AMDGPU_RAS_SUCCESS;
-
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
if (adev->umc.funcs &&
adev->umc.funcs->query_ras_error_count)
@@ -113,6 +106,7 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
err_data->err_addr =
kcalloc(adev->umc.max_ras_err_cnt_per_query,
sizeof(struct eeprom_table_record), GFP_KERNEL);
+
/* still call query_ras_error_address to clear error status
* even NOMEM error is encountered
*/
@@ -132,7 +126,7 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
err_data->err_addr_cnt))
DRM_WARN("Failed to add ras bad page!\n");
- amdgpu_ras_reset_gpu(adev, 0);
+ amdgpu_ras_reset_gpu(adev);
}
kfree(err_data->err_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index 3283032a78e5..a615a1eb750b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -21,38 +21,6 @@
#ifndef __AMDGPU_UMC_H__
#define __AMDGPU_UMC_H__
-/* implement 64 bits REG operations via 32 bits interface */
-#define RREG64_UMC(reg) (RREG32(reg) | \
- ((uint64_t)RREG32((reg) + 1) << 32))
-#define WREG64_UMC(reg, v) \
- do { \
- WREG32((reg), lower_32_bits(v)); \
- WREG32((reg) + 1, upper_32_bits(v)); \
- } while (0)
-
-/*
- * void (*func)(struct amdgpu_device *adev, struct ras_err_data *err_data,
- * uint32_t umc_reg_offset, uint32_t channel_index)
- */
-#define amdgpu_umc_for_each_channel(func) \
- struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; \
- uint32_t umc_inst, channel_inst, umc_reg_offset, channel_index; \
- for (umc_inst = 0; umc_inst < adev->umc.umc_inst_num; umc_inst++) { \
- /* enable the index mode to query eror count per channel */ \
- adev->umc.funcs->enable_umc_index_mode(adev, umc_inst); \
- for (channel_inst = 0; \
- channel_inst < adev->umc.channel_inst_num; \
- channel_inst++) { \
- /* calc the register offset according to channel instance */ \
- umc_reg_offset = adev->umc.channel_offs * channel_inst; \
- /* get channel index of interleaved memory */ \
- channel_index = adev->umc.channel_idx_tbl[ \
- umc_inst * adev->umc.channel_inst_num + channel_inst]; \
- (func)(adev, err_data, umc_reg_offset, channel_index); \
- } \
- } \
- adev->umc.funcs->disable_umc_index_mode(adev);
-
struct amdgpu_umc_funcs {
void (*err_cnt_init)(struct amdgpu_device *adev);
int (*ras_late_init)(struct amdgpu_device *adev);
@@ -60,9 +28,6 @@ struct amdgpu_umc_funcs {
void *ras_error_status);
void (*query_ras_error_address)(struct amdgpu_device *adev,
void *ras_error_status);
- void (*enable_umc_index_mode)(struct amdgpu_device *adev,
- uint32_t umc_instance);
- void (*disable_umc_index_mode)(struct amdgpu_device *adev);
void (*init_registers)(struct amdgpu_device *adev);
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index d587ffe2af8e..a92f3b18e657 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -330,12 +330,13 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
int amdgpu_uvd_entity_init(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- struct drm_sched_rq *rq;
+ struct drm_gpu_scheduler *sched;
int r;
ring = &adev->uvd.inst[0].ring;
- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
- r = drm_sched_entity_init(&adev->uvd.entity, &rq, 1, NULL);
+ sched = &ring->sched;
+ r = drm_sched_entity_init(&adev->uvd.entity, DRM_SCHED_PRIORITY_NORMAL,
+ &sched, 1, NULL);
if (r) {
DRM_ERROR("Failed setting up UVD kernel entity.\n");
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 46b590af2fd2..59ddba137946 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -240,12 +240,13 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
int amdgpu_vce_entity_init(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- struct drm_sched_rq *rq;
+ struct drm_gpu_scheduler *sched;
int r;
ring = &adev->vce.ring[0];
- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
- r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
+ sched = &ring->sched;
+ r = drm_sched_entity_init(&adev->vce.entity, DRM_SCHED_PRIORITY_NORMAL,
+ &sched, 1, NULL);
if (r != 0) {
DRM_ERROR("Failed setting up VCE run queue.\n");
return r;
@@ -651,7 +652,7 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
if ((addr + (uint64_t)size) >
(mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
- DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
+ DRM_ERROR("BO too small for addr 0x%010Lx %d %d\n",
addr, lo, hi);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 428cfd58b37d..f96464e2c157 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -28,19 +28,10 @@
#include <linux/module.h>
#include <linux/pci.h>
-#include <drm/drm.h>
-
#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "amdgpu_vcn.h"
#include "soc15d.h"
-#include "soc15_common.h"
-
-#include "vcn/vcn_1_0_offset.h"
-#include "vcn/vcn_1_0_sh_mask.h"
-
-/* 1 second timeout */
-#define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000)
/* Firmware Names */
#define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
@@ -84,6 +75,9 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
break;
case CHIP_ARCTURUS:
fw_name = FIRMWARE_ARCTURUS;
+ if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
+ (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
+ adev->vcn.indirect_sram = true;
break;
case CHIP_RENOIR:
fw_name = FIRMWARE_RENOIR;
@@ -174,15 +168,15 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
return r;
}
- }
- if (adev->vcn.indirect_sram) {
- r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.dpg_sram_bo,
- &adev->vcn.dpg_sram_gpu_addr, &adev->vcn.dpg_sram_cpu_addr);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate DPG bo\n", r);
- return r;
+ if (adev->vcn.indirect_sram) {
+ r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo,
+ &adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr);
+ if (r) {
+ dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
+ return r;
+ }
}
}
@@ -195,15 +189,14 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
cancel_delayed_work_sync(&adev->vcn.idle_work);
- if (adev->vcn.indirect_sram) {
- amdgpu_bo_free_kernel(&adev->vcn.dpg_sram_bo,
- &adev->vcn.dpg_sram_gpu_addr,
- (void **)&adev->vcn.dpg_sram_cpu_addr);
- }
-
for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
if (adev->vcn.harvest_config & (1 << j))
continue;
+ if (adev->vcn.indirect_sram) {
+ amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo,
+ &adev->vcn.inst[j].dpg_sram_gpu_addr,
+ (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
+ }
kvfree(adev->vcn.inst[j].saved_bo);
amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
@@ -294,6 +287,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
if (adev->vcn.harvest_config & (1 << j))
continue;
+
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
}
@@ -306,26 +300,17 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
else
new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
- if (amdgpu_fence_count_emitted(&adev->jpeg.inst[j].ring_dec))
- new_state.jpeg = VCN_DPG_STATE__PAUSE;
- else
- new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
-
- adev->vcn.pause_dpg_mode(adev, &new_state);
+ adev->vcn.pause_dpg_mode(adev, j, &new_state);
}
- fence[j] += amdgpu_fence_count_emitted(&adev->jpeg.inst[j].ring_dec);
fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
fences += fence[j];
}
if (fences == 0) {
amdgpu_gfx_off_ctrl(adev, true);
- if (adev->asic_type < CHIP_ARCTURUS && adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, false);
- else
- amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
- AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
+ AMD_PG_STATE_GATE);
} else {
schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
}
@@ -338,11 +323,8 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
if (set_clocks) {
amdgpu_gfx_off_ctrl(adev, false);
- if (adev->asic_type < CHIP_ARCTURUS && adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, true);
- else
- amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
- AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
+ AMD_PG_STATE_UNGATE);
}
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
@@ -358,17 +340,10 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
else
new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
- if (amdgpu_fence_count_emitted(&adev->jpeg.inst[ring->me].ring_dec))
- new_state.jpeg = VCN_DPG_STATE__PAUSE;
- else
- new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
-
if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
new_state.fw_based = VCN_DPG_STATE__PAUSE;
- else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
- new_state.jpeg = VCN_DPG_STATE__PAUSE;
- adev->vcn.pause_dpg_mode(adev, &new_state);
+ adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
}
}
@@ -518,9 +493,14 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
+ struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence;
long r;
+ /* temporarily disable ib test for sriov */
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
if (r)
goto error;
@@ -676,10 +656,15 @@ err:
int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
+ struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence = NULL;
struct amdgpu_bo *bo = NULL;
long r;
+ /* temporarily disable ib test for sriov */
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&bo, NULL, NULL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 402a5046b985..6fe057329de2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -31,6 +31,7 @@
#define AMDGPU_VCN_MAX_ENC_RINGS 3
#define AMDGPU_MAX_VCN_INSTANCES 2
+#define AMDGPU_MAX_VCN_ENC_RINGS AMDGPU_VCN_MAX_ENC_RINGS * AMDGPU_MAX_VCN_INSTANCES
#define AMDGPU_VCN_HARVEST_VCN0 (1 << 0)
#define AMDGPU_VCN_HARVEST_VCN1 (1 << 1)
@@ -56,33 +57,41 @@
#define VCN_VID_IP_ADDRESS_2_0 0x0
#define VCN_AON_IP_ADDRESS_2_0 0x30000
-#define RREG32_SOC15_DPG_MODE(ip, inst, reg, mask, sram_sel) \
- ({ WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); \
- WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \
+#define mmUVD_RBC_XX_IB_REG_CHECK 0x026b
+#define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1
+#define mmUVD_REG_XX_MASK 0x026c
+#define mmUVD_REG_XX_MASK_BASE_IDX 1
+
+/* 1 second timeout */
+#define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000)
+
+#define RREG32_SOC15_DPG_MODE(ip, inst_idx, reg, mask, sram_sel) \
+ ({ WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, mask); \
+ WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_CTL, \
UVD_DPG_LMA_CTL__MASK_EN_MASK | \
- ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) \
+ ((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg) \
<< UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | \
(sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \
- RREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA); \
+ RREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_DATA); \
})
-#define WREG32_SOC15_DPG_MODE(ip, inst, reg, value, mask, sram_sel) \
+#define WREG32_SOC15_DPG_MODE(ip, inst_idx, reg, value, mask, sram_sel) \
do { \
- WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA, value); \
- WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); \
- WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \
+ WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_DATA, value); \
+ WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, mask); \
+ WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_CTL, \
UVD_DPG_LMA_CTL__READ_WRITE_MASK | \
- ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) \
+ ((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg) \
<< UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | \
(sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \
} while (0)
-#define SOC15_DPG_MODE_OFFSET_2_0(ip, inst, reg) \
+#define SOC15_DPG_MODE_OFFSET_2_0(ip, inst_idx, reg) \
({ \
uint32_t internal_reg_offset, addr; \
bool video_range, aon_range; \
\
- addr = (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \
+ addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \
addr <<= 2; \
video_range = ((((0xFFFFF & addr) >= (VCN_VID_SOC_ADDRESS_2_0)) && \
((0xFFFFF & addr) < ((VCN_VID_SOC_ADDRESS_2_0 + 0x2600))))); \
@@ -100,27 +109,27 @@
internal_reg_offset >>= 2; \
})
-#define RREG32_SOC15_DPG_MODE_2_0(offset, mask_en) \
- ({ \
- WREG32_SOC15(VCN, 0, mmUVD_DPG_LMA_CTL, \
- (0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \
- mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \
- offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \
- RREG32_SOC15(VCN, 0, mmUVD_DPG_LMA_DATA); \
+#define RREG32_SOC15_DPG_MODE_2_0(inst_idx, offset, mask_en) \
+ ({ \
+ WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_CTL, \
+ (0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \
+ mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \
+ offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \
+ RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_DATA); \
})
-#define WREG32_SOC15_DPG_MODE_2_0(offset, value, mask_en, indirect) \
- do { \
- if (!indirect) { \
- WREG32_SOC15(VCN, 0, mmUVD_DPG_LMA_DATA, value); \
- WREG32_SOC15(VCN, 0, mmUVD_DPG_LMA_CTL, \
- (0x1 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \
- mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \
- offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \
- } else { \
- *adev->vcn.dpg_sram_curr_addr++ = offset; \
- *adev->vcn.dpg_sram_curr_addr++ = value; \
- } \
+#define WREG32_SOC15_DPG_MODE_2_0(inst_idx, offset, value, mask_en, indirect) \
+ do { \
+ if (!indirect) { \
+ WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_DATA, value); \
+ WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_CTL, \
+ (0x1 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \
+ mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \
+ offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \
+ } else { \
+ *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = offset; \
+ *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = value; \
+ } \
} while (0)
enum engine_status_constants {
@@ -169,6 +178,11 @@ struct amdgpu_vcn_inst {
struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
struct amdgpu_irq_src irq;
struct amdgpu_vcn_reg external;
+ struct amdgpu_bo *dpg_sram_bo;
+ struct dpg_pause_state pause_state;
+ void *dpg_sram_cpu_addr;
+ uint64_t dpg_sram_gpu_addr;
+ uint32_t *dpg_sram_curr_addr;
};
struct amdgpu_vcn {
@@ -177,21 +191,19 @@ struct amdgpu_vcn {
const struct firmware *fw; /* VCN firmware */
unsigned num_enc_rings;
enum amd_powergating_state cur_state;
- struct dpg_pause_state pause_state;
-
bool indirect_sram;
- struct amdgpu_bo *dpg_sram_bo;
- void *dpg_sram_cpu_addr;
- uint64_t dpg_sram_gpu_addr;
- uint32_t *dpg_sram_curr_addr;
uint8_t num_vcn_inst;
- struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES];
- struct amdgpu_vcn_reg internal;
+ struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES];
+ struct amdgpu_vcn_reg internal;
+ struct drm_gpu_scheduler *vcn_enc_sched[AMDGPU_MAX_VCN_ENC_RINGS];
+ struct drm_gpu_scheduler *vcn_dec_sched[AMDGPU_MAX_VCN_INSTANCES];
+ uint32_t num_vcn_enc_sched;
+ uint32_t num_vcn_dec_sched;
unsigned harvest_config;
int (*pause_dpg_mode)(struct amdgpu_device *adev,
- struct dpg_pause_state *new_state);
+ int inst_idx, struct dpg_pause_state *new_state);
};
int amdgpu_vcn_sw_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 103033f96f13..adc813cde8e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -45,98 +45,6 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
adev->pg_flags = 0;
}
-uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
-{
- signed long r, cnt = 0;
- unsigned long flags;
- uint32_t seq;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
- struct amdgpu_ring *ring = &kiq->ring;
-
- BUG_ON(!ring->funcs->emit_rreg);
-
- spin_lock_irqsave(&kiq->ring_lock, flags);
- amdgpu_ring_alloc(ring, 32);
- amdgpu_ring_emit_rreg(ring, reg);
- amdgpu_fence_emit_polling(ring, &seq);
- amdgpu_ring_commit(ring);
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
- r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
-
- /* don't wait anymore for gpu reset case because this way may
- * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
- * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
- * never return if we keep waiting in virt_kiq_rreg, which cause
- * gpu_recover() hang there.
- *
- * also don't wait anymore for IRQ context
- * */
- if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
- goto failed_kiq_read;
-
- might_sleep();
- while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
- msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
- r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
- }
-
- if (cnt > MAX_KIQ_REG_TRY)
- goto failed_kiq_read;
-
- return adev->wb.wb[adev->virt.reg_val_offs];
-
-failed_kiq_read:
- pr_err("failed to read reg:%x\n", reg);
- return ~0;
-}
-
-void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
-{
- signed long r, cnt = 0;
- unsigned long flags;
- uint32_t seq;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
- struct amdgpu_ring *ring = &kiq->ring;
-
- BUG_ON(!ring->funcs->emit_wreg);
-
- spin_lock_irqsave(&kiq->ring_lock, flags);
- amdgpu_ring_alloc(ring, 32);
- amdgpu_ring_emit_wreg(ring, reg, v);
- amdgpu_fence_emit_polling(ring, &seq);
- amdgpu_ring_commit(ring);
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
- r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
-
- /* don't wait anymore for gpu reset case because this way may
- * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
- * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
- * never return if we keep waiting in virt_kiq_rreg, which cause
- * gpu_recover() hang there.
- *
- * also don't wait anymore for IRQ context
- * */
- if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
- goto failed_kiq_write;
-
- might_sleep();
- while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
-
- msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
- r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
- }
-
- if (cnt > MAX_KIQ_REG_TRY)
- goto failed_kiq_write;
-
- return;
-
-failed_kiq_write:
- pr_err("failed to write reg:%x\n", reg);
-}
-
void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
uint32_t reg0, uint32_t reg1,
uint32_t ref, uint32_t mask)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 4d1ac7612967..daaf909d009a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -287,8 +287,6 @@ static inline bool is_virtual_machine(void)
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
-uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
-void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
uint32_t reg0, uint32_t rreg1,
uint32_t ref, uint32_t mask);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 8f26504a59a7..d16231d6a790 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -83,6 +83,32 @@ struct amdgpu_prt_cb {
};
/**
+ * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
+ * happens while holding this lock anywhere to prevent deadlocks when
+ * an MMU notifier runs in reclaim-FS context.
+ */
+static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
+{
+ mutex_lock(&vm->eviction_lock);
+ vm->saved_flags = memalloc_nofs_save();
+}
+
+static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
+{
+ if (mutex_trylock(&vm->eviction_lock)) {
+ vm->saved_flags = memalloc_nofs_save();
+ return 1;
+ }
+ return 0;
+}
+
+static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
+{
+ memalloc_nofs_restore(vm->saved_flags);
+ mutex_unlock(&vm->eviction_lock);
+}
+
+/**
* amdgpu_vm_level_shift - return the addr shift for each level
*
* @adev: amdgpu_device pointer
@@ -678,9 +704,9 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
}
- mutex_lock(&vm->eviction_lock);
+ amdgpu_vm_eviction_lock(vm);
vm->evicting = false;
- mutex_unlock(&vm->eviction_lock);
+ amdgpu_vm_eviction_unlock(vm);
return 0;
}
@@ -1559,7 +1585,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (!(flags & AMDGPU_PTE_VALID))
owner = AMDGPU_FENCE_OWNER_KFD;
- mutex_lock(&vm->eviction_lock);
+ amdgpu_vm_eviction_lock(vm);
if (vm->evicting) {
r = -EBUSY;
goto error_unlock;
@@ -1576,7 +1602,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
r = vm->update_funcs->commit(&params, fence);
error_unlock:
- mutex_unlock(&vm->eviction_lock);
+ amdgpu_vm_eviction_unlock(vm);
return r;
}
@@ -2533,18 +2559,18 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
return false;
/* Try to block ongoing updates */
- if (!mutex_trylock(&bo_base->vm->eviction_lock))
+ if (!amdgpu_vm_eviction_trylock(bo_base->vm))
return false;
/* Don't evict VM page tables while they are updated */
if (!dma_fence_is_signaled(bo_base->vm->last_direct) ||
!dma_fence_is_signaled(bo_base->vm->last_delayed)) {
- mutex_unlock(&bo_base->vm->eviction_lock);
+ amdgpu_vm_eviction_unlock(bo_base->vm);
return false;
}
bo_base->vm->evicting = true;
- mutex_unlock(&bo_base->vm->eviction_lock);
+ amdgpu_vm_eviction_unlock(bo_base->vm);
return true;
}
@@ -2753,14 +2779,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
spin_lock_init(&vm->invalidated_lock);
INIT_LIST_HEAD(&vm->freed);
+
/* create scheduler entities for page table updates */
- r = drm_sched_entity_init(&vm->direct, adev->vm_manager.vm_pte_rqs,
- adev->vm_manager.vm_pte_num_rqs, NULL);
+ r = drm_sched_entity_init(&vm->direct, DRM_SCHED_PRIORITY_NORMAL,
+ adev->vm_manager.vm_pte_scheds,
+ adev->vm_manager.vm_pte_num_scheds, NULL);
if (r)
return r;
- r = drm_sched_entity_init(&vm->delayed, adev->vm_manager.vm_pte_rqs,
- adev->vm_manager.vm_pte_num_rqs, NULL);
+ r = drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
+ adev->vm_manager.vm_pte_scheds,
+ adev->vm_manager.vm_pte_num_scheds, NULL);
if (r)
goto error_free_direct;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 7e0eb36da27d..b4640ab38c95 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -30,6 +30,7 @@
#include <drm/gpu_scheduler.h>
#include <drm/drm_file.h>
#include <drm/ttm/ttm_bo_driver.h>
+#include <linux/sched/mm.h>
#include "amdgpu_sync.h"
#include "amdgpu_ring.h"
@@ -239,9 +240,12 @@ struct amdgpu_vm {
/* tree of virtual addresses mapped */
struct rb_root_cached va;
- /* Lock to prevent eviction while we are updating page tables */
+ /* Lock to prevent eviction while we are updating page tables
+ * use vm_eviction_lock/unlock(vm)
+ */
struct mutex eviction_lock;
bool evicting;
+ unsigned int saved_flags;
/* BOs who needs a validation */
struct list_head evicted;
@@ -327,8 +331,8 @@ struct amdgpu_vm_manager {
u64 vram_base_offset;
/* vm pte handling */
const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
- struct drm_sched_rq *vm_pte_rqs[AMDGPU_MAX_RINGS];
- unsigned vm_pte_num_rqs;
+ struct drm_gpu_scheduler *vm_pte_scheds[AMDGPU_MAX_RINGS];
+ unsigned vm_pte_num_scheds;
struct amdgpu_ring *page_fault;
/* partial resident texture handling */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 61d13d8b7b20..a97af422575a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -146,16 +146,16 @@ static ssize_t amdgpu_xgmi_show_error(struct device *dev,
ficaa_pie_ctl_in = AMDGPU_XGMI_SET_FICAA(0x200);
ficaa_pie_status_in = AMDGPU_XGMI_SET_FICAA(0x208);
- fica_out = adev->df_funcs->get_fica(adev, ficaa_pie_ctl_in);
+ fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_ctl_in);
if (fica_out != 0x1f)
pr_err("xGMI error counters not enabled!\n");
- fica_out = adev->df_funcs->get_fica(adev, ficaa_pie_status_in);
+ fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_status_in);
if ((fica_out & 0xffff) == 2)
error_count = ((fica_out >> 62) & 0x1) + (fica_out >> 63);
- adev->df_funcs->set_fica(adev, ficaa_pie_status_in, 0, 0);
+ adev->df.funcs->set_fica(adev, ficaa_pie_status_in, 0, 0);
return snprintf(buf, PAGE_SIZE, "%d\n", error_count);
}
@@ -261,6 +261,7 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lo
INIT_LIST_HEAD(&tmp->device_list);
mutex_init(&tmp->hive_lock);
mutex_init(&tmp->reset_lock);
+ task_barrier_init(&tmp->tb);
if (lock)
mutex_lock(&tmp->hive_lock);
@@ -290,13 +291,7 @@ int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
dev_dbg(adev->dev, "Set xgmi pstate %d.\n", pstate);
- if (is_support_sw_smu_xgmi(adev))
- ret = smu_set_xgmi_pstate(&adev->smu, pstate);
- else if (adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->set_xgmi_pstate)
- ret = adev->powerplay.pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
- pstate);
-
+ ret = amdgpu_dpm_set_xgmi_pstate(adev, pstate);
if (ret) {
dev_err(adev->dev,
"XGMI: Set pstate failure on device %llx, hive %llx, ret %d",
@@ -408,6 +403,8 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
top_info->num_nodes = count;
hive->number_devices = count;
+ task_barrier_add_task(&hive->tb);
+
if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
/* update node list for other device in the hive */
@@ -470,6 +467,7 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
mutex_destroy(&hive->hive_lock);
mutex_destroy(&hive->reset_lock);
} else {
+ task_barrier_rem_task(&hive->tb);
amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
mutex_unlock(&hive->hive_lock);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index bbf504ff7051..74011fbc2251 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -22,6 +22,7 @@
#ifndef __AMDGPU_XGMI_H__
#define __AMDGPU_XGMI_H__
+#include <drm/task_barrier.h>
#include "amdgpu_psp.h"
struct amdgpu_hive_info {
@@ -33,6 +34,7 @@ struct amdgpu_hive_info {
struct device_attribute dev_attr;
struct amdgpu_device *adev;
int pstate; /*0 -- low , 1 -- high , -1 unknown*/
+ struct task_barrier tb;
};
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c
index d9cc746af5e6..847ca9b3ce4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c
@@ -74,9 +74,9 @@ int athub_v1_0_set_clockgating(struct amdgpu_device *adev,
case CHIP_VEGA20:
case CHIP_RAVEN:
athub_update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
athub_update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
index ceb9aa4df0e7..921a69abda55 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
@@ -77,9 +77,9 @@ int athub_v2_0_set_clockgating(struct amdgpu_device *adev,
case CHIP_NAVI14:
case CHIP_NAVI12:
athub_v2_0_update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
athub_v2_0_update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index e9822ea8bb19..006f21ef7ddf 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1312,19 +1312,13 @@ static int cik_asic_pci_config_reset(struct amdgpu_device *adev)
static bool cik_asic_supports_baco(struct amdgpu_device *adev)
{
- bool baco_support;
-
switch (adev->asic_type) {
case CHIP_BONAIRE:
case CHIP_HAWAII:
- smu7_asic_get_baco_capability(adev, &baco_support);
- break;
+ return amdgpu_dpm_is_baco_supported(adev);
default:
- baco_support = false;
- break;
+ return false;
}
-
- return baco_support;
}
static enum amd_reset_method
@@ -1366,7 +1360,7 @@ static int cik_asic_reset(struct amdgpu_device *adev)
if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
if (!adev->in_suspend)
amdgpu_inc_vram_lost(adev);
- r = smu7_asic_baco_reset(adev);
+ r = amdgpu_dpm_baco_reset(adev);
} else {
r = cik_asic_pci_config_reset(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h
index 9870bf27870e..f91ab4c246b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik.h
@@ -31,7 +31,5 @@ void cik_srbm_select(struct amdgpu_device *adev,
int cik_set_ip_blocks(struct amdgpu_device *adev);
void legacy_doorbell_index_init(struct amdgpu_device *adev);
-int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap);
-int smu7_asic_baco_reset(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index c45304f1047c..580d3f93d670 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -228,7 +228,7 @@ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
u32 extra_bits = vmid & 0xf;
/* IB packet must end on a 8 DW boundary */
- cik_sdma_ring_insert_nop(ring, (12 - (lower_32_bits(ring->wptr) & 7)) % 8);
+ cik_sdma_ring_insert_nop(ring, (4 - lower_32_bits(ring->wptr)) & 7);
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
@@ -811,7 +811,7 @@ static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
u32 pad_count;
int i;
- pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+ pad_count = (-ib->length_dw) & 7;
for (i = 0; i < pad_count; i++)
if (sdma && sdma->burst_nop && (i == 0))
ib->ptr[ib->length_dw++] =
@@ -1372,16 +1372,14 @@ static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
{
- struct drm_gpu_scheduler *sched;
unsigned i;
adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
for (i = 0; i < adev->sdma.num_instances; i++) {
- sched = &adev->sdma.instance[i].ring.sched;
- adev->vm_manager.vm_pte_rqs[i] =
- &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ adev->vm_manager.vm_pte_scheds[i] =
+ &adev->sdma.instance[i].ring.sched;
}
- adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+ adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version cik_sdma_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
index d6221298b477..d6aca1c08068 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
@@ -31,6 +31,9 @@ static u32 df_v1_7_channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
static void df_v1_7_sw_init(struct amdgpu_device *adev)
{
+ adev->df.hash_status.hash_64k = false;
+ adev->df.hash_status.hash_2m = false;
+ adev->df.hash_status.hash_1g = false;
}
static void df_v1_7_sw_fini(struct amdgpu_device *adev)
@@ -66,7 +69,7 @@ static u32 df_v1_7_get_hbm_channel_number(struct amdgpu_device *adev)
{
int fb_channel_number;
- fb_channel_number = adev->df_funcs->get_fb_channel_number(adev);
+ fb_channel_number = adev->df.funcs->get_fb_channel_number(adev);
return df_v1_7_channel_number[fb_channel_number];
}
@@ -77,7 +80,7 @@ static void df_v1_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
u32 tmp;
/* Put DF on broadcast mode */
- adev->df_funcs->enable_broadcast_mode(adev, true);
+ adev->df.funcs->enable_broadcast_mode(adev, true);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) {
tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
@@ -92,7 +95,7 @@ static void df_v1_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
}
/* Exit boradcast mode */
- adev->df_funcs->enable_broadcast_mode(adev, false);
+ adev->df.funcs->enable_broadcast_mode(adev, false);
}
static void df_v1_7_get_clockgating_state(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index 4043ebcea5de..5a1bd8ed1a6c 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -27,6 +27,9 @@
#include "df/df_3_6_offset.h"
#include "df/df_3_6_sh_mask.h"
+#define DF_3_6_SMN_REG_INST_DIST 0x8
+#define DF_3_6_INST_CNT 8
+
static u32 df_v3_6_channel_number[] = {1, 2, 0, 4, 0, 8, 0,
16, 32, 0, 0, 0, 2, 4, 8};
@@ -183,6 +186,61 @@ static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr,
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
}
+/* same as perfmon_wreg but return status on write value check */
+static int df_v3_6_perfmon_arm_with_status(struct amdgpu_device *adev,
+ uint32_t lo_addr, uint32_t lo_val,
+ uint32_t hi_addr, uint32_t hi_val)
+{
+ unsigned long flags, address, data;
+ uint32_t lo_val_rb, hi_val_rb;
+
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(address, lo_addr);
+ WREG32(data, lo_val);
+ WREG32(address, hi_addr);
+ WREG32(data, hi_val);
+
+ WREG32(address, lo_addr);
+ lo_val_rb = RREG32(data);
+ WREG32(address, hi_addr);
+ hi_val_rb = RREG32(data);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+
+ if (!(lo_val == lo_val_rb && hi_val == hi_val_rb))
+ return -EBUSY;
+
+ return 0;
+}
+
+
+/*
+ * retry arming counters every 100 usecs within 1 millisecond interval.
+ * if retry fails after time out, return error.
+ */
+#define ARM_RETRY_USEC_TIMEOUT 1000
+#define ARM_RETRY_USEC_INTERVAL 100
+static int df_v3_6_perfmon_arm_with_retry(struct amdgpu_device *adev,
+ uint32_t lo_addr, uint32_t lo_val,
+ uint32_t hi_addr, uint32_t hi_val)
+{
+ int countdown = ARM_RETRY_USEC_TIMEOUT;
+
+ while (countdown) {
+
+ if (!df_v3_6_perfmon_arm_with_status(adev, lo_addr, lo_val,
+ hi_addr, hi_val))
+ break;
+
+ countdown -= ARM_RETRY_USEC_INTERVAL;
+ udelay(ARM_RETRY_USEC_INTERVAL);
+ }
+
+ return countdown > 0 ? 0 : -ETIME;
+}
+
/* get the number of df counters available */
static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev,
struct device_attribute *attr,
@@ -207,6 +265,32 @@ static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev,
/* device attr for available perfmon counters */
static DEVICE_ATTR(df_cntr_avail, S_IRUGO, df_v3_6_get_df_cntr_avail, NULL);
+static void df_v3_6_query_hashes(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ adev->df.hash_status.hash_64k = false;
+ adev->df.hash_status.hash_2m = false;
+ adev->df.hash_status.hash_1g = false;
+
+ if (adev->asic_type != CHIP_ARCTURUS)
+ return;
+
+ /* encoding for hash-enabled on Arcturus */
+ if (adev->df.funcs->get_fb_channel_number(adev) == 0xe) {
+ tmp = RREG32_SOC15(DF, 0, mmDF_CS_UMC_AON0_DfGlobalCtrl);
+ adev->df.hash_status.hash_64k = REG_GET_FIELD(tmp,
+ DF_CS_UMC_AON0_DfGlobalCtrl,
+ GlbHashIntlvCtl64K);
+ adev->df.hash_status.hash_2m = REG_GET_FIELD(tmp,
+ DF_CS_UMC_AON0_DfGlobalCtrl,
+ GlbHashIntlvCtl2M);
+ adev->df.hash_status.hash_1g = REG_GET_FIELD(tmp,
+ DF_CS_UMC_AON0_DfGlobalCtrl,
+ GlbHashIntlvCtl1G);
+ }
+}
+
/* init perfmons */
static void df_v3_6_sw_init(struct amdgpu_device *adev)
{
@@ -218,6 +302,8 @@ static void df_v3_6_sw_init(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_MAX_DF_PERFMONS; i++)
adev->df_perfmon_config_assign_mask[i] = 0;
+
+ df_v3_6_query_hashes(adev);
}
static void df_v3_6_sw_fini(struct amdgpu_device *adev)
@@ -256,7 +342,7 @@ static u32 df_v3_6_get_hbm_channel_number(struct amdgpu_device *adev)
{
int fb_channel_number;
- fb_channel_number = adev->df_funcs->get_fb_channel_number(adev);
+ fb_channel_number = adev->df.funcs->get_fb_channel_number(adev);
if (fb_channel_number >= ARRAY_SIZE(df_v3_6_channel_number))
fb_channel_number = 0;
@@ -270,7 +356,7 @@ static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev,
if (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG) {
/* Put DF on broadcast mode */
- adev->df_funcs->enable_broadcast_mode(adev, true);
+ adev->df.funcs->enable_broadcast_mode(adev, true);
if (enable) {
tmp = RREG32_SOC15(DF, 0,
@@ -289,7 +375,7 @@ static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev,
}
/* Exit broadcast mode */
- adev->df_funcs->enable_broadcast_mode(adev, false);
+ adev->df.funcs->enable_broadcast_mode(adev, false);
}
}
@@ -334,20 +420,20 @@ static void df_v3_6_pmc_get_addr(struct amdgpu_device *adev,
switch (target_cntr) {
case 0:
- *lo_base_addr = is_ctrl ? smnPerfMonCtlLo0 : smnPerfMonCtrLo0;
- *hi_base_addr = is_ctrl ? smnPerfMonCtlHi0 : smnPerfMonCtrHi0;
+ *lo_base_addr = is_ctrl ? smnPerfMonCtlLo4 : smnPerfMonCtrLo4;
+ *hi_base_addr = is_ctrl ? smnPerfMonCtlHi4 : smnPerfMonCtrHi4;
break;
case 1:
- *lo_base_addr = is_ctrl ? smnPerfMonCtlLo1 : smnPerfMonCtrLo1;
- *hi_base_addr = is_ctrl ? smnPerfMonCtlHi1 : smnPerfMonCtrHi1;
+ *lo_base_addr = is_ctrl ? smnPerfMonCtlLo5 : smnPerfMonCtrLo5;
+ *hi_base_addr = is_ctrl ? smnPerfMonCtlHi5 : smnPerfMonCtrHi5;
break;
case 2:
- *lo_base_addr = is_ctrl ? smnPerfMonCtlLo2 : smnPerfMonCtrLo2;
- *hi_base_addr = is_ctrl ? smnPerfMonCtlHi2 : smnPerfMonCtrHi2;
+ *lo_base_addr = is_ctrl ? smnPerfMonCtlLo6 : smnPerfMonCtrLo6;
+ *hi_base_addr = is_ctrl ? smnPerfMonCtlHi6 : smnPerfMonCtrHi6;
break;
case 3:
- *lo_base_addr = is_ctrl ? smnPerfMonCtlLo3 : smnPerfMonCtrLo3;
- *hi_base_addr = is_ctrl ? smnPerfMonCtlHi3 : smnPerfMonCtrHi3;
+ *lo_base_addr = is_ctrl ? smnPerfMonCtlLo7 : smnPerfMonCtrLo7;
+ *hi_base_addr = is_ctrl ? smnPerfMonCtlHi7 : smnPerfMonCtrHi7;
break;
}
@@ -422,6 +508,44 @@ static int df_v3_6_pmc_add_cntr(struct amdgpu_device *adev,
return -ENOSPC;
}
+#define DEFERRED_ARM_MASK (1 << 31)
+static int df_v3_6_pmc_set_deferred(struct amdgpu_device *adev,
+ uint64_t config, bool is_deferred)
+{
+ int target_cntr;
+
+ target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
+
+ if (target_cntr < 0)
+ return -EINVAL;
+
+ if (is_deferred)
+ adev->df_perfmon_config_assign_mask[target_cntr] |=
+ DEFERRED_ARM_MASK;
+ else
+ adev->df_perfmon_config_assign_mask[target_cntr] &=
+ ~DEFERRED_ARM_MASK;
+
+ return 0;
+}
+
+static bool df_v3_6_pmc_is_deferred(struct amdgpu_device *adev,
+ uint64_t config)
+{
+ int target_cntr;
+
+ target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
+
+ /*
+ * we never get target_cntr < 0 since this funciton is only called in
+ * pmc_count for now but we should check anyways.
+ */
+ return (target_cntr >= 0 &&
+ (adev->df_perfmon_config_assign_mask[target_cntr]
+ & DEFERRED_ARM_MASK));
+
+}
+
/* release performance counter */
static void df_v3_6_pmc_release_cntr(struct amdgpu_device *adev,
uint64_t config)
@@ -451,29 +575,33 @@ static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
int is_enable)
{
uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
- int ret = 0;
+ int err = 0, ret = 0;
switch (adev->asic_type) {
case CHIP_VEGA20:
+ if (is_enable)
+ return df_v3_6_pmc_add_cntr(adev, config);
df_v3_6_reset_perfmon_cntr(adev, config);
- if (is_enable) {
- ret = df_v3_6_pmc_add_cntr(adev, config);
- } else {
- ret = df_v3_6_pmc_get_ctrl_settings(adev,
+ ret = df_v3_6_pmc_get_ctrl_settings(adev,
config,
&lo_base_addr,
&hi_base_addr,
&lo_val,
&hi_val);
- if (ret)
- return ret;
+ if (ret)
+ return ret;
+
+ err = df_v3_6_perfmon_arm_with_retry(adev,
+ lo_base_addr,
+ lo_val,
+ hi_base_addr,
+ hi_val);
- df_v3_6_perfmon_wreg(adev, lo_base_addr, lo_val,
- hi_base_addr, hi_val);
- }
+ if (err)
+ ret = df_v3_6_pmc_set_deferred(adev, config, true);
break;
default:
@@ -501,7 +629,7 @@ static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config,
if (ret)
return ret;
- df_v3_6_perfmon_wreg(adev, lo_base_addr, 0, hi_base_addr, 0);
+ df_v3_6_reset_perfmon_cntr(adev, config);
if (is_disable)
df_v3_6_pmc_release_cntr(adev, config);
@@ -518,18 +646,29 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
uint64_t config,
uint64_t *count)
{
- uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
+ uint32_t lo_base_addr, hi_base_addr, lo_val = 0, hi_val = 0;
*count = 0;
switch (adev->asic_type) {
case CHIP_VEGA20:
-
df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr,
&hi_base_addr);
if ((lo_base_addr == 0) || (hi_base_addr == 0))
return;
+ /* rearm the counter or throw away count value on failure */
+ if (df_v3_6_pmc_is_deferred(adev, config)) {
+ int rearm_err = df_v3_6_perfmon_arm_with_status(adev,
+ lo_base_addr, lo_val,
+ hi_base_addr, hi_val);
+
+ if (rearm_err)
+ return;
+
+ df_v3_6_pmc_set_deferred(adev, config, false);
+ }
+
df_v3_6_perfmon_rreg(adev, lo_base_addr, &lo_val,
hi_base_addr, &hi_val);
@@ -542,12 +681,63 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
config, lo_base_addr, hi_base_addr, lo_val, hi_val);
break;
-
default:
break;
}
}
+static uint64_t df_v3_6_get_dram_base_addr(struct amdgpu_device *adev,
+ uint32_t df_inst)
+{
+ uint32_t base_addr_reg_val = 0;
+ uint64_t base_addr = 0;
+
+ base_addr_reg_val = RREG32_PCIE(smnDF_CS_UMC_AON0_DramBaseAddress0 +
+ df_inst * DF_3_6_SMN_REG_INST_DIST);
+
+ if (REG_GET_FIELD(base_addr_reg_val,
+ DF_CS_UMC_AON0_DramBaseAddress0,
+ AddrRngVal) == 0) {
+ DRM_WARN("address range not valid");
+ return 0;
+ }
+
+ base_addr = REG_GET_FIELD(base_addr_reg_val,
+ DF_CS_UMC_AON0_DramBaseAddress0,
+ DramBaseAddr);
+
+ return base_addr << 28;
+}
+
+static uint32_t df_v3_6_get_df_inst_id(struct amdgpu_device *adev)
+{
+ uint32_t xgmi_node_id = 0;
+ uint32_t df_inst_id = 0;
+
+ /* Walk through DF dst nodes to find current XGMI node */
+ for (df_inst_id = 0; df_inst_id < DF_3_6_INST_CNT; df_inst_id++) {
+
+ xgmi_node_id = RREG32_PCIE(smnDF_CS_UMC_AON0_DramLimitAddress0 +
+ df_inst_id * DF_3_6_SMN_REG_INST_DIST);
+ xgmi_node_id = REG_GET_FIELD(xgmi_node_id,
+ DF_CS_UMC_AON0_DramLimitAddress0,
+ DstFabricID);
+
+ /* TODO: establish reason dest fabric id is offset by 7 */
+ xgmi_node_id = xgmi_node_id >> 7;
+
+ if (adev->gmc.xgmi.physical_node_id == xgmi_node_id)
+ break;
+ }
+
+ if (df_inst_id == DF_3_6_INST_CNT) {
+ DRM_WARN("cant match df dst id with gpu node");
+ return 0;
+ }
+
+ return df_inst_id;
+}
+
const struct amdgpu_df_funcs df_v3_6_funcs = {
.sw_init = df_v3_6_sw_init,
.sw_fini = df_v3_6_sw_fini,
@@ -561,5 +751,7 @@ const struct amdgpu_df_funcs df_v3_6_funcs = {
.pmc_stop = df_v3_6_pmc_stop,
.pmc_get_count = df_v3_6_pmc_get_count,
.get_fica = df_v3_6_get_fica,
- .set_fica = df_v3_6_set_fica
+ .set_fica = df_v3_6_set_fica,
+ .get_dram_base_addr = df_v3_6_get_dram_base_addr,
+ .get_df_inst_id = df_v3_6_get_df_inst_id
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 98db25215d0d..1785fdad6ecb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -40,6 +40,7 @@
#include "ivsrcid/gfx/irqsrcs_gfx_10_1.h"
#include "soc15.h"
+#include "soc15d.h"
#include "soc15_common.h"
#include "clearstate_gfx10.h"
#include "v10_structs.h"
@@ -120,7 +121,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000800, 0x00000820),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL, 0x001f0000, 0x00070104),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x001f0000, 0x00070104),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000100, 0x00000130),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
@@ -168,7 +169,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000800, 0x00000820),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL, 0x001f0000, 0x00070105),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x001f0000, 0x00070105),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
@@ -345,15 +346,29 @@ static void gfx10_kiq_query_status(struct amdgpu_ring *kiq_ring,
amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
}
+static void gfx10_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
+ uint16_t pasid, uint32_t flush_type,
+ bool all_hub)
+{
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
+ PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
+ PACKET3_INVALIDATE_TLBS_PASID(pasid) |
+ PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
+}
+
static const struct kiq_pm4_funcs gfx_v10_0_kiq_pm4_funcs = {
.kiq_set_resources = gfx10_kiq_set_resources,
.kiq_map_queues = gfx10_kiq_map_queues,
.kiq_unmap_queues = gfx10_kiq_unmap_queues,
.kiq_query_status = gfx10_kiq_query_status,
+ .kiq_invalidate_tlbs = gfx10_kiq_invalidate_tlbs,
.set_resources_size = 8,
.map_queues_size = 7,
.unmap_queues_size = 6,
.query_status_size = 7,
+ .invalidate_tlbs_size = 2,
};
static void gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
@@ -471,18 +486,10 @@ static int gfx_v10_0_ring_test_ring(struct amdgpu_ring *ring)
else
udelay(1);
}
- if (i < adev->usec_timeout) {
- if (amdgpu_emu_mode == 1)
- DRM_INFO("ring test on %d succeeded in %d msecs\n",
- ring->idx, i);
- else
- DRM_INFO("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
- ring->idx, scratch, tmp);
- r = -EINVAL;
- }
+
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
amdgpu_gfx_scratch_free(adev, scratch);
return r;
@@ -532,14 +539,10 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
}
tmp = RREG32(scratch);
- if (tmp == 0xDEADBEEF) {
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ if (tmp == 0xDEADBEEF)
r = 0;
- } else {
- DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
- scratch, tmp);
+ else
r = -EINVAL;
- }
err2:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
@@ -588,8 +591,7 @@ static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)
}
if (adev->gfx.cp_fw_write_wait == false)
- DRM_WARN_ONCE("Warning: check cp_fw_version and update it to realize \
- GRBM requires 1-cycle delay in cp firmware\n");
+ DRM_WARN_ONCE("CP firmware version too old, please update!");
}
@@ -820,10 +822,11 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
info->fw = adev->gfx.rlc_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
+ if (info->fw) {
+ header = (const struct common_firmware_header *)info->fw->data;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+ }
if (adev->gfx.rlc.is_rlc_v2_1 &&
adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
@@ -1963,7 +1966,7 @@ static int gfx_v10_0_parse_rlc_toc(struct amdgpu_device *adev)
rlc_autoload_info[rlc_toc->id].size = rlc_toc->size * 4;
rlc_toc++;
- };
+ }
return 0;
}
@@ -3334,8 +3337,11 @@ static int gfx_v10_0_compute_mqd_init(struct amdgpu_ring *ring)
tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
mqd->cp_hqd_ib_control = tmp;
- /* activate the queue */
- mqd->cp_hqd_active = 1;
+ /* map_queues packet doesn't need activate the queue,
+ * so only kiq need set this field.
+ */
+ if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+ mqd->cp_hqd_active = 1;
return 0;
}
@@ -3606,23 +3612,16 @@ static int gfx_v10_0_cp_resume(struct amdgpu_device *adev)
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
ring = &adev->gfx.gfx_ring[i];
- DRM_INFO("gfx %d ring me %d pipe %d q %d\n",
- i, ring->me, ring->pipe, ring->queue);
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->sched.ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
return r;
- }
}
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i];
- ring->sched.ready = true;
- DRM_INFO("compute ring %d mec %d pipe %d q %d\n",
- i, ring->me, ring->pipe, ring->queue);
- r = amdgpu_ring_test_ring(ring);
+ r = amdgpu_ring_test_helper(ring);
if (r)
- ring->sched.ready = false;
+ return r;
}
return 0;
@@ -4230,7 +4229,7 @@ static int gfx_v10_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
+ bool enable = (state == AMD_PG_STATE_GATE);
switch (adev->asic_type) {
case CHIP_NAVI10:
case CHIP_NAVI14:
@@ -4256,7 +4255,7 @@ static int gfx_v10_0_set_clockgating_state(void *handle,
case CHIP_NAVI14:
case CHIP_NAVI12:
gfx_v10_0_update_gfx_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
default:
break;
@@ -4738,6 +4737,7 @@ static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
{
struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
amdgpu_ring_write(ring, 0 | /* src: register*/
@@ -4746,9 +4746,9 @@ static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
- adev->virt.reg_val_offs * 4));
+ kiq->reg_val_offs * 4));
amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
- adev->virt.reg_val_offs * 4));
+ kiq->reg_val_offs * 4));
}
static void gfx_v10_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index cfc1403fc855..fa245973de12 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -4558,8 +4558,11 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
mqd->cp_hqd_eop_wptr_mem = RREG32(mmCP_HQD_EOP_WPTR_MEM);
mqd->cp_hqd_eop_dones = RREG32(mmCP_HQD_EOP_DONES);
- /* activate the queue */
- mqd->cp_hqd_active = 1;
+ /* map_queues packet doesn't need activate the queue,
+ * so only kiq need set this field.
+ */
+ if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+ mqd->cp_hqd_active = 1;
return 0;
}
@@ -6446,6 +6449,7 @@ static void gfx_v8_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne
static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
{
struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
amdgpu_ring_write(ring, 0 | /* src: register*/
@@ -6454,9 +6458,9 @@ static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
- adev->virt.reg_val_offs * 4));
+ kiq->reg_val_offs * 4));
amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
- adev->virt.reg_val_offs * 4));
+ kiq->reg_val_offs * 4));
}
static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 2616f1b59bbd..b33a4eb39193 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -48,8 +48,8 @@
#include "amdgpu_ras.h"
-#include "sdma0/sdma0_4_0_offset.h"
-#include "sdma1/sdma1_4_0_offset.h"
+#include "gfx_v9_4.h"
+
#define GFX9_NUM_GFX_RINGS 1
#define GFX9_MEC_HPD_SIZE 4096
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
@@ -738,9 +738,138 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status);
+static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev);
static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
void *inject_if);
+static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
+ uint64_t queue_mask)
+{
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_SET_RESOURCES_VMID_MASK(0) |
+ /* vmid_mask:0* queue_type:0 (KIQ) */
+ PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
+ amdgpu_ring_write(kiq_ring,
+ lower_32_bits(queue_mask)); /* queue mask lo */
+ amdgpu_ring_write(kiq_ring,
+ upper_32_bits(queue_mask)); /* queue mask hi */
+ amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
+ amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
+ amdgpu_ring_write(kiq_ring, 0); /* oac mask */
+ amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
+}
+
+static void gfx_v9_0_kiq_map_queues(struct amdgpu_ring *kiq_ring,
+ struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = kiq_ring->adev;
+ uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
+ uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+ uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
+
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
+ /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
+ amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
+ PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
+ PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
+ PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
+ PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
+ PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
+ /*queue_type: normal compute queue */
+ PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
+ /* alloc format: all_on_one_pipe */
+ PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
+ PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
+ /* num_queues: must be 1 */
+ PACKET3_MAP_QUEUES_NUM_QUEUES(1));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
+ amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
+ amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
+ amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
+ amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
+}
+
+static void gfx_v9_0_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
+ struct amdgpu_ring *ring,
+ enum amdgpu_unmap_queues_action action,
+ u64 gpu_addr, u64 seq)
+{
+ uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
+
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
+ amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
+ PACKET3_UNMAP_QUEUES_ACTION(action) |
+ PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
+ PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
+ PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
+
+ if (action == PREEMPT_QUEUES_NO_UNMAP) {
+ amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
+ amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
+ amdgpu_ring_write(kiq_ring, seq);
+ } else {
+ amdgpu_ring_write(kiq_ring, 0);
+ amdgpu_ring_write(kiq_ring, 0);
+ amdgpu_ring_write(kiq_ring, 0);
+ }
+}
+
+static void gfx_v9_0_kiq_query_status(struct amdgpu_ring *kiq_ring,
+ struct amdgpu_ring *ring,
+ u64 addr,
+ u64 seq)
+{
+ uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
+
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
+ PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
+ PACKET3_QUERY_STATUS_COMMAND(2));
+ /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
+ PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
+ amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
+ amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
+ amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
+ amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
+}
+
+static void gfx_v9_0_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
+ uint16_t pasid, uint32_t flush_type,
+ bool all_hub)
+{
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
+ PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
+ PACKET3_INVALIDATE_TLBS_PASID(pasid) |
+ PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
+}
+
+static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = {
+ .kiq_set_resources = gfx_v9_0_kiq_set_resources,
+ .kiq_map_queues = gfx_v9_0_kiq_map_queues,
+ .kiq_unmap_queues = gfx_v9_0_kiq_unmap_queues,
+ .kiq_query_status = gfx_v9_0_kiq_query_status,
+ .kiq_invalidate_tlbs = gfx_v9_0_kiq_invalidate_tlbs,
+ .set_resources_size = 8,
+ .map_queues_size = 7,
+ .unmap_queues_size = 6,
+ .query_status_size = 7,
+ .invalidate_tlbs_size = 2,
+};
+
+static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
+{
+ adev->gfx.kiq.pmf = &gfx_v9_0_kiq_pm4_funcs;
+}
+
static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
@@ -981,8 +1110,7 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
(adev->gfx.mec_feature_version < 46) ||
(adev->gfx.pfp_fw_version < 0x000000b7) ||
(adev->gfx.pfp_feature_version < 46))
- DRM_WARN_ONCE("Warning: check cp_fw_version and update it to realize \
- GRBM requires 1-cycle delay in cp firmware\n");
+ DRM_WARN_ONCE("CP firmware version too old, please update!");
switch (adev->asic_type) {
case CHIP_VEGA10:
@@ -1034,25 +1162,54 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
}
}
+struct amdgpu_gfxoff_quirk {
+ u16 chip_vendor;
+ u16 chip_device;
+ u16 subsys_vendor;
+ u16 subsys_device;
+ u8 revision;
+};
+
+static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=204689 */
+ { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
+ { 0, 0, 0, 0, 0 },
+};
+
+static bool gfx_v9_0_should_disable_gfxoff(struct pci_dev *pdev)
+{
+ const struct amdgpu_gfxoff_quirk *p = amdgpu_gfxoff_quirk_list;
+
+ while (p && p->chip_device != 0) {
+ if (pdev->vendor == p->chip_vendor &&
+ pdev->device == p->chip_device &&
+ pdev->subsystem_vendor == p->subsys_vendor &&
+ pdev->subsystem_device == p->subsys_device &&
+ pdev->revision == p->revision) {
+ return true;
+ }
+ ++p;
+ }
+ return false;
+}
+
static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
{
+ if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
break;
case CHIP_RAVEN:
- /* Disable GFXOFF on original raven. There are combinations
- * of sbios and platforms that are not stable.
- */
- if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8))
- adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
- else if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
- &&((adev->gfx.rlc_fw_version != 106 &&
- adev->gfx.rlc_fw_version < 531) ||
- (adev->gfx.rlc_fw_version == 53815) ||
- (adev->gfx.rlc_feature_version < 1) ||
- !adev->gfx.rlc.is_rlc_v2_1))
+ if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) &&
+ ((adev->gfx.rlc_fw_version != 106 &&
+ adev->gfx.rlc_fw_version < 531) ||
+ (adev->gfx.rlc_fw_version == 53815) ||
+ (adev->gfx.rlc_feature_version < 1) ||
+ !adev->gfx.rlc.is_rlc_v2_1))
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
if (adev->pm.pp_feature & PP_GFXOFF_MASK)
@@ -1831,6 +1988,17 @@ static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
.query_ras_error_count = &gfx_v9_0_query_ras_error_count
};
+static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = {
+ .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
+ .select_se_sh = &gfx_v9_0_select_se_sh,
+ .read_wave_data = &gfx_v9_0_read_wave_data,
+ .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
+ .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
+ .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
+ .ras_error_inject = &gfx_v9_4_ras_error_inject,
+ .query_ras_error_count = &gfx_v9_4_query_ras_error_count
+};
+
static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
{
u32 gb_addr_config;
@@ -1882,6 +2050,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_ARCTURUS:
+ adev->gfx.funcs = &gfx_v9_4_gfx_funcs;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -2272,6 +2441,22 @@ static void gfx_v9_0_init_gds_vmid(struct amdgpu_device *adev)
}
}
+static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+
+ switch (adev->asic_type) {
+ case CHIP_ARCTURUS:
+ tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG);
+ tmp = REG_SET_FIELD(tmp, SQ_CONFIG,
+ DISABLE_BARRIER_WAITCNT, 1);
+ WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp);
+ break;
+ default:
+ break;
+ };
+}
+
static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
{
u32 tmp;
@@ -2317,6 +2502,7 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
gfx_v9_0_init_compute_vmid(adev);
gfx_v9_0_init_gds_vmid(adev);
+ gfx_v9_0_init_sq_config(adev);
}
static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
@@ -3116,74 +3302,6 @@ static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
}
-static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
-{
- struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
- uint64_t queue_mask = 0;
- int r, i;
-
- for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
- if (!test_bit(i, adev->gfx.mec.queue_bitmap))
- continue;
-
- /* This situation may be hit in the future if a new HW
- * generation exposes more than 64 queues. If so, the
- * definition of queue_mask needs updating */
- if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
- DRM_ERROR("Invalid KCQ enabled: %d\n", i);
- break;
- }
-
- queue_mask |= (1ull << i);
- }
-
- r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 8);
- if (r) {
- DRM_ERROR("Failed to lock KIQ (%d).\n", r);
- return r;
- }
-
- /* set resources */
- amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
- amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
- PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
- amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
- amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
- amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
- amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
- amdgpu_ring_write(kiq_ring, 0); /* oac mask */
- amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
- uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
- uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
-
- amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
- /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
- amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
- PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
- PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
- PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
- PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
- PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
- PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
- PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
- PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
- PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
- amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
- amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
- amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
- amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
- amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
- }
-
- r = amdgpu_ring_test_helper(kiq_ring);
- if (r)
- DRM_ERROR("KCQ enable failed\n");
-
- return r;
-}
-
static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -3320,8 +3438,11 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
mqd->cp_hqd_ib_control = tmp;
- /* activate the queue */
- mqd->cp_hqd_active = 1;
+ /* map_queues packet doesn't need activate the queue,
+ * so only kiq need set this field.
+ */
+ if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+ mqd->cp_hqd_active = 1;
return 0;
}
@@ -3590,7 +3711,7 @@ static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
goto done;
}
- r = gfx_v9_0_kiq_kcq_enable(adev);
+ r = amdgpu_gfx_enable_kcq(adev);
done:
return r;
}
@@ -3647,6 +3768,23 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
return 0;
}
+static void gfx_v9_0_init_tcp_config(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ if (adev->asic_type != CHIP_ARCTURUS)
+ return;
+
+ tmp = RREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG);
+ tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE64KHASH,
+ adev->df.hash_status.hash_64k);
+ tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE2MHASH,
+ adev->df.hash_status.hash_2m);
+ tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE1GHASH,
+ adev->df.hash_status.hash_1g);
+ WREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG, tmp);
+}
+
static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
{
if (adev->asic_type != CHIP_ARCTURUS)
@@ -3664,6 +3802,8 @@ static int gfx_v9_0_hw_init(void *handle)
gfx_v9_0_constants_init(adev);
+ gfx_v9_0_init_tcp_config(adev);
+
r = adev->gfx.rlc.funcs->resume(adev);
if (r)
return r;
@@ -3675,36 +3815,6 @@ static int gfx_v9_0_hw_init(void *handle)
return r;
}
-static int gfx_v9_0_kcq_disable(struct amdgpu_device *adev)
-{
- int r, i;
- struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
-
- r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings);
- if (r)
- DRM_ERROR("Failed to lock KIQ (%d).\n", r);
-
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
-
- amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
- amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
- PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
- PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
- PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
- PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
- amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
- amdgpu_ring_write(kiq_ring, 0);
- amdgpu_ring_write(kiq_ring, 0);
- amdgpu_ring_write(kiq_ring, 0);
- }
- r = amdgpu_ring_test_helper(kiq_ring);
- if (r)
- DRM_ERROR("KCQ disable failed\n");
-
- return r;
-}
-
static int gfx_v9_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -3716,7 +3826,7 @@ static int gfx_v9_0_hw_fini(void *handle)
/* DF freeze and kcq disable will fail */
if (!amdgpu_ras_intr_triggered())
/* disable KCQ to avoid CPC touch memory not valid anymore */
- gfx_v9_0_kcq_disable(adev);
+ amdgpu_gfx_disable_kcq(adev);
if (amdgpu_sriov_vf(adev)) {
gfx_v9_0_cp_gfx_enable(adev, false);
@@ -3933,46 +4043,61 @@ static const u32 sgpr_init_compute_shader[] =
0xbe800080, 0xbf810000,
};
+/* When below register arrays changed, please update gpr_reg_size,
+ and sec_ded_counter_reg_size in function gfx_v9_0_do_edc_gpr_workarounds,
+ to cover all gfx9 ASICs */
static const struct soc15_reg_entry vgpr_init_regs[] = {
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x3f },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 }, /* 64KB LDS */
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
};
static const struct soc15_reg_entry sgpr1_init_regs[] = {
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x000000ff },
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x000000ff },
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x000000ff },
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x000000ff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x000000ff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x000000ff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x000000ff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x000000ff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x000000ff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x000000ff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x000000ff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x000000ff },
};
static const struct soc15_reg_entry sgpr2_init_regs[] = {
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x0000ff00 },
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x0000ff00 },
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x0000ff00 },
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x0000ff00 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x0000ff00 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x0000ff00 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x0000ff00 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x0000ff00 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x0000ff00 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x0000ff00 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x0000ff00 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x0000ff00 },
};
-static const struct soc15_reg_entry sec_ded_counter_registers[] = {
+static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = {
{ SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1},
{ SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1},
{ SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1},
@@ -4006,8 +4131,6 @@ static const struct soc15_reg_entry sec_ded_counter_registers[] = {
{ SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16},
{ SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2},
{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6},
- { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), 0, 1, 1},
- { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_EDC_COUNTER), 0, 1, 1},
{ SOC15_REG_ENTRY(HDP, 0, mmHDP_EDC_CNT), 0, 1, 1},
};
@@ -4063,10 +4186,16 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
struct amdgpu_ib ib;
struct dma_fence *f = NULL;
- int r, i, j, k;
+ int r, i;
unsigned total_size, vgpr_offset, sgpr_offset;
u64 gpu_addr;
+ int compute_dim_x = adev->gfx.config.max_shader_engines *
+ adev->gfx.config.max_cu_per_sh *
+ adev->gfx.config.max_sh_per_se;
+ int sgpr_work_group_size = 5;
+ int gpr_reg_size = compute_dim_x / 16 + 6;
+
/* only support when RAS is enabled */
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
return 0;
@@ -4076,11 +4205,11 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
return 0;
total_size =
- ((ARRAY_SIZE(vgpr_init_regs) * 3) + 4 + 5 + 2) * 4;
+ (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* VGPRS */
total_size +=
- ((ARRAY_SIZE(sgpr1_init_regs) * 3) + 4 + 5 + 2) * 4;
+ (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS1 */
total_size +=
- ((ARRAY_SIZE(sgpr2_init_regs) * 3) + 4 + 5 + 2) * 4;
+ (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS2 */
total_size = ALIGN(total_size, 256);
vgpr_offset = total_size;
total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256);
@@ -4107,7 +4236,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
/* VGPR */
/* write the register state for the compute dispatch */
- for (i = 0; i < ARRAY_SIZE(vgpr_init_regs); i++) {
+ for (i = 0; i < gpr_reg_size; i++) {
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs[i])
- PACKET3_SET_SH_REG_START;
@@ -4123,7 +4252,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
/* write dispatch packet */
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
- ib.ptr[ib.length_dw++] = 0x40*2; /* x */
+ ib.ptr[ib.length_dw++] = compute_dim_x; /* x */
ib.ptr[ib.length_dw++] = 1; /* y */
ib.ptr[ib.length_dw++] = 1; /* z */
ib.ptr[ib.length_dw++] =
@@ -4135,7 +4264,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
/* SGPR1 */
/* write the register state for the compute dispatch */
- for (i = 0; i < ARRAY_SIZE(sgpr1_init_regs); i++) {
+ for (i = 0; i < gpr_reg_size; i++) {
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr1_init_regs[i])
- PACKET3_SET_SH_REG_START;
@@ -4151,7 +4280,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
/* write dispatch packet */
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
- ib.ptr[ib.length_dw++] = 0xA0*2; /* x */
+ ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
ib.ptr[ib.length_dw++] = 1; /* y */
ib.ptr[ib.length_dw++] = 1; /* z */
ib.ptr[ib.length_dw++] =
@@ -4163,7 +4292,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
/* SGPR2 */
/* write the register state for the compute dispatch */
- for (i = 0; i < ARRAY_SIZE(sgpr2_init_regs); i++) {
+ for (i = 0; i < gpr_reg_size; i++) {
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr2_init_regs[i])
- PACKET3_SET_SH_REG_START;
@@ -4179,7 +4308,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
/* write dispatch packet */
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
- ib.ptr[ib.length_dw++] = 0xA0*2; /* x */
+ ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
ib.ptr[ib.length_dw++] = 1; /* y */
ib.ptr[ib.length_dw++] = 1; /* z */
ib.ptr[ib.length_dw++] =
@@ -4203,18 +4332,17 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
goto fail;
}
- /* read back registers to clear the counters */
- mutex_lock(&adev->grbm_idx_mutex);
- for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) {
- for (j = 0; j < sec_ded_counter_registers[i].se_num; j++) {
- for (k = 0; k < sec_ded_counter_registers[i].instance; k++) {
- gfx_v9_0_select_se_sh(adev, j, 0x0, k);
- RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i]));
- }
- }
+ switch (adev->asic_type)
+ {
+ case CHIP_VEGA20:
+ gfx_v9_0_clear_ras_edc_counter(adev);
+ break;
+ case CHIP_ARCTURUS:
+ gfx_v9_4_clear_ras_edc_counter(adev);
+ break;
+ default:
+ break;
}
- WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
- mutex_unlock(&adev->grbm_idx_mutex);
fail:
amdgpu_ib_free(adev, &ib, NULL);
@@ -4232,6 +4360,7 @@ static int gfx_v9_0_early_init(void *handle)
else
adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
+ gfx_v9_0_set_kiq_pm4_funcs(adev);
gfx_v9_0_set_ring_funcs(adev);
gfx_v9_0_set_irq_funcs(adev);
gfx_v9_0_set_gds_init(adev);
@@ -4245,9 +4374,17 @@ static int gfx_v9_0_ecc_late_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = gfx_v9_0_do_edc_gds_workarounds(adev);
- if (r)
- return r;
+ /*
+ * Temp workaround to fix the issue that CP firmware fails to
+ * update read pointer when CPDMA is writing clearing operation
+ * to GDS in suspend/resume sequence on several cards. So just
+ * limit this operation in cold boot sequence.
+ */
+ if (!adev->in_suspend) {
+ r = gfx_v9_0_do_edc_gds_workarounds(adev);
+ if (r)
+ return r;
+ }
/* requires IBs so do in late init after IB pool is initialized */
r = gfx_v9_0_do_edc_gpr_workarounds(adev);
@@ -4576,7 +4713,7 @@ static int gfx_v9_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
+ bool enable = (state == AMD_PG_STATE_GATE);
switch (adev->asic_type) {
case CHIP_RAVEN:
@@ -4638,7 +4775,7 @@ static int gfx_v9_0_set_clockgating_state(void *handle,
case CHIP_ARCTURUS:
case CHIP_RENOIR:
gfx_v9_0_update_gfx_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
default:
break;
@@ -4655,12 +4792,12 @@ static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
*flags = 0;
/* AMD_CG_SUPPORT_GFX_MGCG */
- data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE));
if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
*flags |= AMD_CG_SUPPORT_GFX_MGCG;
/* AMD_CG_SUPPORT_GFX_CGCG */
- data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL));
if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_CGCG;
@@ -4669,18 +4806,18 @@ static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
*flags |= AMD_CG_SUPPORT_GFX_CGLS;
/* AMD_CG_SUPPORT_GFX_RLC_LS */
- data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_MEM_SLP_CNTL));
if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
/* AMD_CG_SUPPORT_GFX_CP_LS */
- data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmCP_MEM_SLP_CNTL));
if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
if (adev->asic_type != CHIP_ARCTURUS) {
/* AMD_CG_SUPPORT_GFX_3D_CGCG */
- data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D));
if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
@@ -5151,6 +5288,7 @@ static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne
static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
{
struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
amdgpu_ring_write(ring, 0 | /* src: register*/
@@ -5159,9 +5297,9 @@ static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
- adev->virt.reg_val_offs * 4));
+ kiq->reg_val_offs * 4));
amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
- adev->virt.reg_val_offs * 4));
+ kiq->reg_val_offs * 4));
}
static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
@@ -5483,7 +5621,7 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
}
-static const struct soc15_ras_field_entry gc_ras_fields_vg20[] = {
+static const struct soc15_ras_field_entry gfx_v9_0_ras_fields[] = {
{ "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT),
SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT)
@@ -5931,7 +6069,7 @@ static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
int ret;
struct ta_ras_trigger_error_input block_info = { 0 };
- if (adev->asic_type != CHIP_VEGA20)
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
return -EINVAL;
if (info->head.sub_block_index >= ARRAY_SIZE(ras_gfx_subblocks))
@@ -6056,7 +6194,7 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
- for (i = 0; i < 16; i++) {
+ for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
data = RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
@@ -6075,7 +6213,7 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
}
}
- for (i = 0; i < 7; i++) {
+ for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
data = RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
@@ -6096,7 +6234,7 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
}
}
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
@@ -6108,7 +6246,7 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
}
}
- for (i = 0; i < 32; i++) {
+ for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
@@ -6135,36 +6273,36 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
return 0;
}
-static int __get_ras_error_count(const struct soc15_reg_entry *reg,
+static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg,
uint32_t se_id, uint32_t inst_id, uint32_t value,
uint32_t *sec_count, uint32_t *ded_count)
{
uint32_t i;
uint32_t sec_cnt, ded_cnt;
- for (i = 0; i < ARRAY_SIZE(gc_ras_fields_vg20); i++) {
- if(gc_ras_fields_vg20[i].reg_offset != reg->reg_offset ||
- gc_ras_fields_vg20[i].seg != reg->seg ||
- gc_ras_fields_vg20[i].inst != reg->inst)
+ for (i = 0; i < ARRAY_SIZE(gfx_v9_0_ras_fields); i++) {
+ if(gfx_v9_0_ras_fields[i].reg_offset != reg->reg_offset ||
+ gfx_v9_0_ras_fields[i].seg != reg->seg ||
+ gfx_v9_0_ras_fields[i].inst != reg->inst)
continue;
sec_cnt = (value &
- gc_ras_fields_vg20[i].sec_count_mask) >>
- gc_ras_fields_vg20[i].sec_count_shift;
+ gfx_v9_0_ras_fields[i].sec_count_mask) >>
+ gfx_v9_0_ras_fields[i].sec_count_shift;
if (sec_cnt) {
DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
- gc_ras_fields_vg20[i].name,
+ gfx_v9_0_ras_fields[i].name,
se_id, inst_id,
sec_cnt);
*sec_count += sec_cnt;
}
ded_cnt = (value &
- gc_ras_fields_vg20[i].ded_count_mask) >>
- gc_ras_fields_vg20[i].ded_count_shift;
+ gfx_v9_0_ras_fields[i].ded_count_mask) >>
+ gfx_v9_0_ras_fields[i].ded_count_shift;
if (ded_cnt) {
DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n",
- gc_ras_fields_vg20[i].name,
+ gfx_v9_0_ras_fields[i].name,
se_id, inst_id,
ded_cnt);
*ded_count += ded_cnt;
@@ -6174,6 +6312,58 @@ static int __get_ras_error_count(const struct soc15_reg_entry *reg,
return 0;
}
+static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev)
+{
+ int i, j, k;
+
+ /* read back registers to clear the counters */
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
+ for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
+ for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
+ gfx_v9_0_select_se_sh(adev, j, 0x0, k);
+ RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
+ }
+ }
+ }
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
+
+ for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
+ RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
+ RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
+ RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
+ RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
+ }
+
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
+}
+
static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{
@@ -6182,7 +6372,7 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
uint32_t i, j, k;
uint32_t reg_value;
- if (adev->asic_type != CHIP_VEGA20)
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
return -EINVAL;
err_data->ue_count = 0;
@@ -6190,14 +6380,14 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
mutex_lock(&adev->grbm_idx_mutex);
- for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) {
- for (j = 0; j < sec_ded_counter_registers[i].se_num; j++) {
- for (k = 0; k < sec_ded_counter_registers[i].instance; k++) {
+ for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
+ for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
+ for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
gfx_v9_0_select_se_sh(adev, j, 0, k);
reg_value =
- RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i]));
+ RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
if (reg_value)
- __get_ras_error_count(&sec_ded_counter_registers[i],
+ gfx_v9_0_ras_error_count(&gfx_v9_0_edc_counter_regs[i],
j, k, reg_value,
&sec_count, &ded_count);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
new file mode 100644
index 000000000000..f099f13d7f1e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
@@ -0,0 +1,978 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+
+#include "amdgpu.h"
+#include "amdgpu_gfx.h"
+#include "soc15.h"
+#include "soc15d.h"
+#include "amdgpu_atomfirmware.h"
+#include "amdgpu_pm.h"
+
+#include "gc/gc_9_4_1_offset.h"
+#include "gc/gc_9_4_1_sh_mask.h"
+#include "soc15_common.h"
+
+#include "gfx_v9_4.h"
+#include "amdgpu_ras.h"
+
+static const struct soc15_reg_entry gfx_v9_4_edc_counter_regs[] = {
+ /* CPC */
+ { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1 },
+ /* DC */
+ { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1, 1 },
+ /* CPF */
+ { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1, 1 },
+ /* GDS */
+ { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_DED), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, 1 },
+ /* SPI */
+ { SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 0, 4, 1 },
+ /* SQ */
+ { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16 },
+ { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 4, 16 },
+ { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 4, 16 },
+ { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 4, 16 },
+ /* SQC */
+ { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 0, 4, 6 },
+ { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6 },
+ { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6 },
+ { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), 0, 4, 6 },
+ /* TA */
+ { SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 0, 4, 16 },
+ /* TCA */
+ { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2 },
+ /* TCC */
+ { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16 },
+ { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16 },
+ /* TCI */
+ { SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 1, 72 },
+ /* TCP */
+ { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16 },
+ { SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16 },
+ { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16 },
+ /* TD */
+ { SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16 },
+ /* GCEA */
+ { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 1, 32 },
+ { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 1, 32 },
+ { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 1, 32 },
+ /* RLC */
+ { SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), 0, 1, 1 },
+};
+
+static void gfx_v9_4_select_se_sh(struct amdgpu_device *adev, u32 se_num,
+ u32 sh_num, u32 instance)
+{
+ u32 data;
+
+ if (instance == 0xffffffff)
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
+ INSTANCE_BROADCAST_WRITES, 1);
+ else
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX,
+ instance);
+
+ if (se_num == 0xffffffff)
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES,
+ 1);
+ else
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
+
+ if (sh_num == 0xffffffff)
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES,
+ 1);
+ else
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
+
+ WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data);
+}
+
+static const struct soc15_ras_field_entry gfx_v9_4_ras_fields[] = {
+ /* CPC */
+ { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT),
+ SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
+ SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT) },
+ { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT),
+ SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT),
+ SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT) },
+ { "CPC_DC_STATE_RAM_ME1", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT),
+ SOC15_REG_FIELD(DC_EDC_STATE_CNT, SEC_COUNT_ME1),
+ SOC15_REG_FIELD(DC_EDC_STATE_CNT, DED_COUNT_ME1) },
+ { "CPC_DC_CSINVOC_RAM_ME1",
+ SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT),
+ SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, SEC_COUNT_ME1),
+ SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, DED_COUNT_ME1) },
+ { "CPC_DC_RESTORE_RAM_ME1",
+ SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT),
+ SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, SEC_COUNT_ME1),
+ SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, DED_COUNT_ME1) },
+ { "CPC_DC_CSINVOC_RAM1_ME1",
+ SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT),
+ SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, SEC_COUNT1_ME1),
+ SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, DED_COUNT1_ME1) },
+ { "CPC_DC_RESTORE_RAM1_ME1",
+ SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT),
+ SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, SEC_COUNT1_ME1),
+ SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, DED_COUNT1_ME1) },
+
+ /* CPF */
+ { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
+ SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, SEC_COUNT_ME2),
+ SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, DED_COUNT_ME2) },
+ { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
+ SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, SEC_COUNT_ME1),
+ SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, DED_COUNT_ME1) },
+ { "CPF_TCIU_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT),
+ SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT),
+ SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT) },
+
+ /* GDS */
+ { "GDS_GRBM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT),
+ SOC15_REG_FIELD(GDS_EDC_GRBM_CNT, SEC),
+ SOC15_REG_FIELD(GDS_EDC_GRBM_CNT, DED) },
+ { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
+ SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED) },
+ { "GDS_PHY_CMD_RAM_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED) },
+ { "GDS_PHY_DATA_RAM_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_DED) },
+ { "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED) },
+ { "GDS_ME1_PIPE0_PIPE_MEM",
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED) },
+ { "GDS_ME1_PIPE1_PIPE_MEM",
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED) },
+ { "GDS_ME1_PIPE2_PIPE_MEM",
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED) },
+ { "GDS_ME1_PIPE3_PIPE_MEM",
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED) },
+
+ /* SPI */
+ { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SEC_COUNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_DED_COUNT) },
+ { "SPI_GDS_EXPREQ", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_GDS_EXPREQ_SEC_COUNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_GDS_EXPREQ_DED_COUNT) },
+ { "SPI_WB_GRANT_30", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_30_SEC_COUNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_30_DED_COUNT) },
+ { "SPI_WB_GRANT_61", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_61_SEC_COUNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_61_DED_COUNT) },
+ { "SPI_LIFE_CNT", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_LIFE_CNT_SEC_COUNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_LIFE_CNT_DED_COUNT) },
+
+ /* SQ */
+ { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT) },
+ { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT) },
+ { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT) },
+ { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT) },
+ { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT) },
+ { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT) },
+ { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT) },
+
+ /* SQC */
+ { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT) },
+ { "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT) },
+ { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT) },
+ { "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT) },
+ { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT) },
+ { "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT) },
+ { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT) },
+ { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT) },
+ { "SQC_INST_BANKA_UTCL1_MISS_FIFO",
+ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+ INST_BANKA_UTCL1_MISS_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+ INST_BANKA_UTCL1_MISS_FIFO_DED_COUNT) },
+ { "SQC_INST_BANKA_MISS_FIFO",
+ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKA_MISS_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+ INST_BANKA_MISS_FIFO_DED_COUNT) },
+ { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT) },
+ { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT) },
+ { "SQC_DATA_BANKA_HIT_FIFO",
+ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_HIT_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_HIT_FIFO_DED_COUNT) },
+ { "SQC_DATA_BANKA_MISS_FIFO",
+ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_MISS_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+ DATA_BANKA_MISS_FIFO_DED_COUNT) },
+ { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT) },
+ { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT) },
+ { "SQC_INST_BANKB_UTCL1_MISS_FIFO",
+ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+ INST_BANKB_UTCL1_MISS_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+ INST_BANKB_UTCL1_MISS_FIFO_DED_COUNT) },
+ { "SQC_INST_BANKB_MISS_FIFO",
+ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKB_MISS_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+ INST_BANKB_MISS_FIFO_DED_COUNT) },
+ { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT) },
+ { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT) },
+ { "SQC_DATA_BANKB_HIT_FIFO",
+ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_HIT_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_HIT_FIFO_DED_COUNT) },
+ { "SQC_DATA_BANKB_MISS_FIFO",
+ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_MISS_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
+ DATA_BANKB_MISS_FIFO_DED_COUNT) },
+ { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT) },
+
+ /* TA */
+ { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT) },
+ { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_DED_COUNT) },
+ { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_DED_COUNT) },
+ { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_DED_COUNT) },
+ { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_DED_COUNT) },
+
+ /* TCA */
+ { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
+ SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_DED_COUNT) },
+ { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
+ SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_DED_COUNT) },
+
+ /* TCC */
+ { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT) },
+ { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT) },
+ { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT) },
+ { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT) },
+ { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_DEC_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_DEC_DED_COUNT) },
+ { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_TRANSFER_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_TRANSFER_DED_COUNT) },
+ { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_DATA_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_DATA_DED_COUNT) },
+ { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_CONTROL_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_CONTROL_DED_COUNT) },
+ { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, UC_ATOMIC_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, UC_ATOMIC_FIFO_DED_COUNT) },
+ { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_DED_COUNT) },
+ { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_DED_COUNT) },
+ { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT) },
+ { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_DED_COUNT) },
+ { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_DED_COUNT) },
+ { "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_NEXT_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_NEXT_RAM_DED_COUNT) },
+
+ /* TCI */
+ { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT),
+ SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_DED_COUNT) },
+
+ /* TCP */
+ { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT) },
+ { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT) },
+ { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_DED_COUNT) },
+ { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_DED_COUNT) },
+ { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT), 0, 0 },
+ { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT) },
+ { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT) },
+
+ /* TD */
+ { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT) },
+ { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT) },
+ { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_DED_COUNT) },
+
+ /* EA */
+ { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT) },
+ { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT) },
+ { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT) },
+ { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT) },
+ { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT) },
+ { "EA_GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT) },
+ { "EA_GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT) },
+ { "EA_GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT) },
+ { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), 0, 0 },
+ { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT) },
+ { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), 0, 0 },
+ { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT) },
+ { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT), 0, 0 },
+ { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, IORD_CMDMEM_DED_COUNT) },
+ { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT), 0, 0 },
+ { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, IOWR_CMDMEM_DED_COUNT) },
+ { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT), 0, 0 },
+ { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, IOWR_DATAMEM_DED_COUNT) },
+ { "EA_GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), 0, 0 },
+ { "EA_GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT) },
+ { "EA_GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), 0, 0 },
+ { "EA_GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT) },
+ { "EA_MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_DED_COUNT) },
+ { "EA_MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_DED_COUNT) },
+ { "EA_MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_DED_COUNT) },
+ { "EA_MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_DED_COUNT) },
+ { "EA_MAM_A0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A0MEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A0MEM_DED_COUNT) },
+ { "EA_MAM_A1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A1MEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A1MEM_DED_COUNT) },
+ { "EA_MAM_A2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A2MEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A2MEM_DED_COUNT) },
+ { "EA_MAM_A3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A3MEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A3MEM_DED_COUNT) },
+ { "EA_MAM_AFMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, MAM_AFMEM_SEC_COUNT), 0, 0 },
+ { "EA_MAM_AFMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_AFMEM_DED_COUNT) },
+
+ /* RLC */
+ { "RLCG_INSTR_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_INSTR_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_INSTR_RAM_DED_COUNT) },
+ { "RLCG_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_SCRATCH_RAM_DED_COUNT) },
+ { "RLCV_INSTR_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_INSTR_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_INSTR_RAM_DED_COUNT) },
+ { "RLCV_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_TCTAG_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_TCTAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_TCTAG_RAM_DED_COUNT) },
+ { "RLC_SPM_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SPM_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SPM_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SRM_DATA_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_DATA_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_DATA_RAM_DED_COUNT) },
+ { "RLC_SRM_ADDR_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_ADDR_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_ADDR_RAM_DED_COUNT) },
+ { "RLC_SPM_SE0_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE0_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE0_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SPM_SE1_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE1_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE1_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SPM_SE2_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE2_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE2_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SPM_SE3_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE3_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE3_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SPM_SE4_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE4_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE4_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SPM_SE5_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE5_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE5_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SPM_SE6_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE6_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE6_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SPM_SE7_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE7_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE7_SCRATCH_RAM_DED_COUNT) },
+};
+
+static const char * const vml2_mems[] = {
+ "UTC_VML2_BANK_CACHE_0_BIGK_MEM0",
+ "UTC_VML2_BANK_CACHE_0_BIGK_MEM1",
+ "UTC_VML2_BANK_CACHE_0_4K_MEM0",
+ "UTC_VML2_BANK_CACHE_0_4K_MEM1",
+ "UTC_VML2_BANK_CACHE_1_BIGK_MEM0",
+ "UTC_VML2_BANK_CACHE_1_BIGK_MEM1",
+ "UTC_VML2_BANK_CACHE_1_4K_MEM0",
+ "UTC_VML2_BANK_CACHE_1_4K_MEM1",
+ "UTC_VML2_BANK_CACHE_2_BIGK_MEM0",
+ "UTC_VML2_BANK_CACHE_2_BIGK_MEM1",
+ "UTC_VML2_BANK_CACHE_2_4K_MEM0",
+ "UTC_VML2_BANK_CACHE_2_4K_MEM1",
+ "UTC_VML2_BANK_CACHE_3_BIGK_MEM0",
+ "UTC_VML2_BANK_CACHE_3_BIGK_MEM1",
+ "UTC_VML2_BANK_CACHE_3_4K_MEM0",
+ "UTC_VML2_BANK_CACHE_3_4K_MEM1",
+ "UTC_VML2_IFIFO_GROUP0",
+ "UTC_VML2_IFIFO_GROUP1",
+ "UTC_VML2_IFIFO_GROUP2",
+ "UTC_VML2_IFIFO_GROUP3",
+ "UTC_VML2_IFIFO_GROUP4",
+ "UTC_VML2_IFIFO_GROUP5",
+ "UTC_VML2_IFIFO_GROUP6",
+ "UTC_VML2_IFIFO_GROUP7",
+ "UTC_VML2_IFIFO_GROUP8",
+ "UTC_VML2_IFIFO_GROUP9",
+ "UTC_VML2_IFIFO_GROUP10",
+ "UTC_VML2_IFIFO_GROUP11",
+ "UTC_VML2_IFIFO_GROUP12",
+ "UTC_VML2_IFIFO_GROUP13",
+ "UTC_VML2_IFIFO_GROUP14",
+ "UTC_VML2_IFIFO_GROUP15",
+ "UTC_VML2_IFIFO_GROUP16",
+ "UTC_VML2_IFIFO_GROUP17",
+ "UTC_VML2_IFIFO_GROUP18",
+ "UTC_VML2_IFIFO_GROUP19",
+ "UTC_VML2_IFIFO_GROUP20",
+ "UTC_VML2_IFIFO_GROUP21",
+ "UTC_VML2_IFIFO_GROUP22",
+ "UTC_VML2_IFIFO_GROUP23",
+ "UTC_VML2_IFIFO_GROUP24",
+};
+
+static const char * const vml2_walker_mems[] = {
+ "UTC_VML2_CACHE_PDE0_MEM0",
+ "UTC_VML2_CACHE_PDE0_MEM1",
+ "UTC_VML2_CACHE_PDE1_MEM0",
+ "UTC_VML2_CACHE_PDE1_MEM1",
+ "UTC_VML2_CACHE_PDE2_MEM0",
+ "UTC_VML2_CACHE_PDE2_MEM1",
+ "UTC_VML2_RDIF_ARADDRS",
+ "UTC_VML2_RDIF_LOG_FIFO",
+ "UTC_VML2_QUEUE_REQ",
+ "UTC_VML2_QUEUE_RET",
+};
+
+static const char * const utcl2_router_mems[] = {
+ "UTCL2_ROUTER_GROUP0_VML2_REQ_FIFO0",
+ "UTCL2_ROUTER_GROUP1_VML2_REQ_FIFO1",
+ "UTCL2_ROUTER_GROUP2_VML2_REQ_FIFO2",
+ "UTCL2_ROUTER_GROUP3_VML2_REQ_FIFO3",
+ "UTCL2_ROUTER_GROUP4_VML2_REQ_FIFO4",
+ "UTCL2_ROUTER_GROUP5_VML2_REQ_FIFO5",
+ "UTCL2_ROUTER_GROUP6_VML2_REQ_FIFO6",
+ "UTCL2_ROUTER_GROUP7_VML2_REQ_FIFO7",
+ "UTCL2_ROUTER_GROUP8_VML2_REQ_FIFO8",
+ "UTCL2_ROUTER_GROUP9_VML2_REQ_FIFO9",
+ "UTCL2_ROUTER_GROUP10_VML2_REQ_FIFO10",
+ "UTCL2_ROUTER_GROUP11_VML2_REQ_FIFO11",
+ "UTCL2_ROUTER_GROUP12_VML2_REQ_FIFO12",
+ "UTCL2_ROUTER_GROUP13_VML2_REQ_FIFO13",
+ "UTCL2_ROUTER_GROUP14_VML2_REQ_FIFO14",
+ "UTCL2_ROUTER_GROUP15_VML2_REQ_FIFO15",
+ "UTCL2_ROUTER_GROUP16_VML2_REQ_FIFO16",
+ "UTCL2_ROUTER_GROUP17_VML2_REQ_FIFO17",
+ "UTCL2_ROUTER_GROUP18_VML2_REQ_FIFO18",
+ "UTCL2_ROUTER_GROUP19_VML2_REQ_FIFO19",
+ "UTCL2_ROUTER_GROUP20_VML2_REQ_FIFO20",
+ "UTCL2_ROUTER_GROUP21_VML2_REQ_FIFO21",
+ "UTCL2_ROUTER_GROUP22_VML2_REQ_FIFO22",
+ "UTCL2_ROUTER_GROUP23_VML2_REQ_FIFO23",
+ "UTCL2_ROUTER_GROUP24_VML2_REQ_FIFO24",
+};
+
+static const char * const atc_l2_cache_2m_mems[] = {
+ "UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM",
+ "UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM",
+ "UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM",
+ "UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM",
+};
+
+static const char * const atc_l2_cache_4k_mems[] = {
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7",
+};
+
+static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
+ struct ras_err_data *err_data)
+{
+ uint32_t i, data;
+ uint32_t sec_count, ded_count;
+
+ WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL, 0);
+ WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL, 0);
+ WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL, 0);
+
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0);
+
+ for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
+ WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, i);
+ data = RREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL);
+
+ sec_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, SEC_COUNT);
+ if (sec_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ vml2_mems[i], sec_count);
+ err_data->ce_count += sec_count;
+ }
+
+ ded_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, DED_COUNT);
+ if (ded_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ vml2_mems[i], ded_count);
+ err_data->ue_count += ded_count;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
+ WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, i);
+ data = RREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL);
+
+ sec_count = REG_GET_FIELD(data, VML2_WALKER_MEM_ECC_CNTL,
+ SEC_COUNT);
+ if (sec_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ vml2_walker_mems[i], sec_count);
+ err_data->ce_count += sec_count;
+ }
+
+ ded_count = REG_GET_FIELD(data, VML2_WALKER_MEM_ECC_CNTL,
+ DED_COUNT);
+ if (ded_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ vml2_walker_mems[i], ded_count);
+ err_data->ue_count += ded_count;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(utcl2_router_mems); i++) {
+ WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, i);
+ data = RREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL);
+
+ sec_count = REG_GET_FIELD(data, UTCL2_MEM_ECC_CNTL, SEC_COUNT);
+ if (sec_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ utcl2_router_mems[i], sec_count);
+ err_data->ce_count += sec_count;
+ }
+
+ ded_count = REG_GET_FIELD(data, UTCL2_MEM_ECC_CNTL, DED_COUNT);
+ if (ded_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ utcl2_router_mems[i], ded_count);
+ err_data->ue_count += ded_count;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, i);
+ data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL);
+
+ sec_count = REG_GET_FIELD(data, ATC_L2_CACHE_2M_DSM_CNTL,
+ SEC_COUNT);
+ if (sec_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ atc_l2_cache_2m_mems[i], sec_count);
+ err_data->ce_count += sec_count;
+ }
+
+ ded_count = REG_GET_FIELD(data, ATC_L2_CACHE_2M_DSM_CNTL,
+ DED_COUNT);
+ if (ded_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ atc_l2_cache_2m_mems[i], ded_count);
+ err_data->ue_count += ded_count;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, i);
+ data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_CNTL);
+
+ sec_count = REG_GET_FIELD(data, ATC_L2_CACHE_4K_DSM_CNTL,
+ SEC_COUNT);
+ if (sec_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ atc_l2_cache_4k_mems[i], sec_count);
+ err_data->ce_count += sec_count;
+ }
+
+ ded_count = REG_GET_FIELD(data, ATC_L2_CACHE_4K_DSM_CNTL,
+ DED_COUNT);
+ if (ded_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ atc_l2_cache_4k_mems[i], ded_count);
+ err_data->ue_count += ded_count;
+ }
+ }
+
+ WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255);
+
+ return 0;
+}
+
+static int gfx_v9_4_ras_error_count(const struct soc15_reg_entry *reg,
+ uint32_t se_id, uint32_t inst_id,
+ uint32_t value, uint32_t *sec_count,
+ uint32_t *ded_count)
+{
+ uint32_t i;
+ uint32_t sec_cnt, ded_cnt;
+
+ for (i = 0; i < ARRAY_SIZE(gfx_v9_4_ras_fields); i++) {
+ if (gfx_v9_4_ras_fields[i].reg_offset != reg->reg_offset ||
+ gfx_v9_4_ras_fields[i].seg != reg->seg ||
+ gfx_v9_4_ras_fields[i].inst != reg->inst)
+ continue;
+
+ sec_cnt = (value & gfx_v9_4_ras_fields[i].sec_count_mask) >>
+ gfx_v9_4_ras_fields[i].sec_count_shift;
+ if (sec_cnt) {
+ DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
+ gfx_v9_4_ras_fields[i].name, se_id, inst_id,
+ sec_cnt);
+ *sec_count += sec_cnt;
+ }
+
+ ded_cnt = (value & gfx_v9_4_ras_fields[i].ded_count_mask) >>
+ gfx_v9_4_ras_fields[i].ded_count_shift;
+ if (ded_cnt) {
+ DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n",
+ gfx_v9_4_ras_fields[i].name, se_id, inst_id,
+ ded_cnt);
+ *ded_count += ded_cnt;
+ }
+ }
+
+ return 0;
+}
+
+int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ uint32_t sec_count = 0, ded_count = 0;
+ uint32_t i, j, k;
+ uint32_t reg_value;
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return -EINVAL;
+
+ err_data->ue_count = 0;
+ err_data->ce_count = 0;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ for (i = 0; i < ARRAY_SIZE(gfx_v9_4_edc_counter_regs); i++) {
+ for (j = 0; j < gfx_v9_4_edc_counter_regs[i].se_num; j++) {
+ for (k = 0; k < gfx_v9_4_edc_counter_regs[i].instance;
+ k++) {
+ gfx_v9_4_select_se_sh(adev, j, 0, k);
+ reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(
+ gfx_v9_4_edc_counter_regs[i]));
+ if (reg_value)
+ gfx_v9_4_ras_error_count(
+ &gfx_v9_4_edc_counter_regs[i],
+ j, k, reg_value, &sec_count,
+ &ded_count);
+ }
+ }
+ }
+
+ err_data->ce_count += sec_count;
+ err_data->ue_count += ded_count;
+
+ gfx_v9_4_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ gfx_v9_4_query_utc_edc_status(adev, err_data);
+
+ return 0;
+}
+
+void gfx_v9_4_clear_ras_edc_counter(struct amdgpu_device *adev)
+{
+ int i, j, k;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (i = 0; i < ARRAY_SIZE(gfx_v9_4_edc_counter_regs); i++) {
+ for (j = 0; j < gfx_v9_4_edc_counter_regs[i].se_num; j++) {
+ for (k = 0; k < gfx_v9_4_edc_counter_regs[i].instance;
+ k++) {
+ gfx_v9_4_select_se_sh(adev, j, 0x0, k);
+ RREG32(SOC15_REG_ENTRY_OFFSET(
+ gfx_v9_4_edc_counter_regs[i]));
+ }
+ }
+ }
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL, 0);
+ WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL, 0);
+ WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL, 0);
+
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0);
+
+ for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
+ WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, i);
+ RREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
+ WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, i);
+ RREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(utcl2_router_mems); i++) {
+ WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, i);
+ RREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, i);
+ RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, i);
+ RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_CNTL);
+ }
+
+ WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255);
+}
+
+int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, void *inject_if)
+{
+ struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
+ int ret;
+ struct ta_ras_trigger_error_input block_info = { 0 };
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return -EINVAL;
+
+ block_info.block_id = amdgpu_ras_block_to_ta(info->head.block);
+ block_info.sub_block_index = info->head.sub_block_index;
+ block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type);
+ block_info.address = info->address;
+ block_info.value = info->value;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ ret = psp_ras_trigger_error(&adev->psp, &block_info);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gp102.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
index fde6328c6d71..2e3f6f755ad4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gp102.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ * Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -14,17 +14,22 @@
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
*/
-#include "priv.h"
+#ifndef __GFX_V9_4_H__
+#define __GFX_V9_4_H__
+
+void gfx_v9_4_clear_ras_edc_counter(struct amdgpu_device *adev);
+
+int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status);
+
+int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev,
+ void *inject_if);
-int
-gp102_nvdec_new(struct nvkm_device *device, int index,
- struct nvkm_nvdec **pnvdec)
-{
- return nvkm_nvdec_new_(device, index, pnvdec);
-}
+#endif /* __GFX_V9_4_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index e91bd7945777..1a2f18b908fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -75,40 +75,45 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
- /* Program the system aperture low logical page number. */
- WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
- min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
-
- if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
- /*
- * Raven2 has a HW issue that it is unable to use the vram which
- * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
- * workaround that increase system aperture high address (add 1)
- * to get rid of the VM fault and hardware hang.
- */
- WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- max((adev->gmc.fb_end >> 18) + 0x1,
- adev->gmc.agp_end >> 18));
- else
- WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
-
- /* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
- + adev->vm_manager.vram_base_offset;
- WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
- (u32)(value >> 12));
- WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
- (u32)(value >> 44));
-
- /* Program "protection fault". */
- WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
- (u32)(adev->dummy_page_addr >> 12));
- WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
- (u32)((u64)adev->dummy_page_addr >> 44));
-
- WREG32_FIELD15(GC, 0, VM_L2_PROTECTION_FAULT_CNTL2,
- ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
+ if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) {
+ /* Program the system aperture low logical page number. */
+ WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
+
+ if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
+ /*
+ * Raven2 has a HW issue that it is unable to use the
+ * vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
+ * So here is the workaround that increase system
+ * aperture high address (add 1) to get rid of the VM
+ * fault and hardware hang.
+ */
+ WREG32_SOC15_RLC(GC, 0,
+ mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ max((adev->gmc.fb_end >> 18) + 0x1,
+ adev->gmc.agp_end >> 18));
+ else
+ WREG32_SOC15_RLC(
+ GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
+
+ /* Set default page address. */
+ value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
+ adev->vm_manager.vram_base_offset;
+ WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
+ (u32)(value >> 12));
+ WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+ (u32)(value >> 44));
+
+ /* Program "protection fault". */
+ WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
+ (u32)(adev->dummy_page_addr >> 12));
+ WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
+ (u32)((u64)adev->dummy_page_addr >> 44));
+
+ WREG32_FIELD15(GC, 0, VM_L2_PROTECTION_FAULT_CNTL2,
+ ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
+ }
}
static void gfxhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
@@ -264,7 +269,7 @@ static void gfxhub_v1_0_program_invalidation(struct amdgpu_device *adev)
int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
{
- if (amdgpu_sriov_vf(adev)) {
+ if (amdgpu_sriov_vf(adev) && adev->asic_type != CHIP_ARCTURUS) {
/*
* MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
* VF copy registers so vbios post doesn't program them, for
@@ -280,10 +285,12 @@ int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
gfxhub_v1_0_init_gart_aperture_regs(adev);
gfxhub_v1_0_init_system_aperture_regs(adev);
gfxhub_v1_0_init_tlb_regs(adev);
- gfxhub_v1_0_init_cache_regs(adev);
+ if (!amdgpu_sriov_vf(adev))
+ gfxhub_v1_0_init_cache_regs(adev);
gfxhub_v1_0_enable_system_domain(adev);
- gfxhub_v1_0_disable_identity_aperture(adev);
+ if (!amdgpu_sriov_vf(adev))
+ gfxhub_v1_0_disable_identity_aperture(adev);
gfxhub_v1_0_setup_vmid_config(adev);
gfxhub_v1_0_program_invalidation(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index f5725336a5f2..9775eca6fe43 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -30,6 +30,8 @@
#include "hdp/hdp_5_0_0_sh_mask.h"
#include "gc/gc_10_1_0_sh_mask.h"
#include "mmhub/mmhub_2_0_0_sh_mask.h"
+#include "athub/athub_2_0_0_sh_mask.h"
+#include "athub/athub_2_0_0_offset.h"
#include "dcn/dcn_2_0_0_offset.h"
#include "dcn/dcn_2_0_0_sh_mask.h"
#include "oss/osssys_5_0_0_offset.h"
@@ -37,6 +39,7 @@
#include "navi10_enum.h"
#include "soc15.h"
+#include "soc15d.h"
#include "soc15_common.h"
#include "nbio_v2_3.h"
@@ -234,6 +237,19 @@ static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev,
(!amdgpu_sriov_vf(adev)));
}
+static bool gmc_v10_0_get_atc_vmid_pasid_mapping_info(
+ struct amdgpu_device *adev,
+ uint8_t vmid, uint16_t *p_pasid)
+{
+ uint32_t value;
+
+ value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ + vmid);
+ *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
+
+ return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
+}
+
/*
* GART
* VMID 0 is the physical GPU addresses as used by the kernel.
@@ -246,7 +262,8 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
{
bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub);
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
- u32 tmp = gmc_v10_0_get_invalidate_req(vmid, flush_type);
+ u32 inv_req = gmc_v10_0_get_invalidate_req(vmid, flush_type);
+ u32 tmp;
/* Use register 17 for GART */
const unsigned eng = 17;
unsigned int i;
@@ -273,7 +290,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
}
- WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
+ WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req);
/*
* Issue a dummy read to wait for the ACK register to be cleared
@@ -380,6 +397,64 @@ error_alloc:
DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r);
}
+/**
+ * gmc_v10_0_flush_gpu_tlb_pasid - tlb flush via pasid
+ *
+ * @adev: amdgpu_device pointer
+ * @pasid: pasid to be flush
+ *
+ * Flush the TLB for the requested pasid.
+ */
+static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
+ uint16_t pasid, uint32_t flush_type,
+ bool all_hub)
+{
+ int vmid, i;
+ signed long r;
+ uint32_t seq;
+ uint16_t queried_pasid;
+ bool ret;
+ struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+
+ if (amdgpu_emu_mode == 0 && ring->sched.ready) {
+ spin_lock(&adev->gfx.kiq.ring_lock);
+ /* 2 dwords flush + 8 dwords fence */
+ amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
+ kiq->pmf->kiq_invalidate_tlbs(ring,
+ pasid, flush_type, all_hub);
+ amdgpu_fence_emit_polling(ring, &seq);
+ amdgpu_ring_commit(ring);
+ spin_unlock(&adev->gfx.kiq.ring_lock);
+ r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
+ if (r < 1) {
+ DRM_ERROR("wait for kiq fence error: %ld.\n", r);
+ return -ETIME;
+ }
+
+ return 0;
+ }
+
+ for (vmid = 1; vmid < 16; vmid++) {
+
+ ret = gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
+ &queried_pasid);
+ if (ret && queried_pasid == pasid) {
+ if (all_hub) {
+ for (i = 0; i < adev->num_vmhubs; i++)
+ gmc_v10_0_flush_gpu_tlb(adev, vmid,
+ i, flush_type);
+ } else {
+ gmc_v10_0_flush_gpu_tlb(adev, vmid,
+ AMDGPU_GFXHUB_0, flush_type);
+ }
+ break;
+ }
+ }
+
+ return 0;
+}
+
static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
@@ -531,6 +606,7 @@ static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
+ .flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
.map_mtype = gmc_v10_0_map_mtype,
@@ -564,22 +640,13 @@ static int gmc_v10_0_early_init(void *handle)
static int gmc_v10_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
- unsigned i;
-
- for(i = 0; i < adev->num_rings; ++i) {
- struct amdgpu_ring *ring = adev->rings[i];
- unsigned vmhub = ring->funcs->vmhub;
+ int r;
- ring->vm_inv_eng = vm_inv_eng[vmhub]++;
- dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n",
- ring->idx, ring->name, ring->vm_inv_eng,
- ring->funcs->vmhub);
- }
+ amdgpu_bo_late_init(adev);
- /* Engine 17 is used for GART flushes */
- for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
- BUG_ON(vm_inv_eng[i] > 17);
+ r = amdgpu_gmc_allocate_vm_inv_eng(adev);
+ if (r)
+ return r;
return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
}
@@ -731,6 +798,10 @@ static int gmc_v10_0_sw_init(void *handle)
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC,
VMC_1_0__SRCID__VM_FAULT,
&adev->gmc.vm_fault);
+
+ if (r)
+ return r;
+
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2,
UTCL2_1_0__SRCID__FAULT,
&adev->gmc.vm_fault);
@@ -743,15 +814,6 @@ static int gmc_v10_0_sw_init(void *handle)
*/
adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
- /*
- * Reserve 8M stolen memory for navi10 like vega10
- * TODO: will check if it's really needed on asic.
- */
- if (amdgpu_emu_mode == 1)
- adev->gmc.stolen_size = 0;
- else
- adev->gmc.stolen_size = 9 * 1024 *1024;
-
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
if (r) {
printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index f08e5330642d..9da9596a3638 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -381,7 +381,8 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
#ifdef CONFIG_X86_64
- if (adev->flags & AMD_IS_APU) {
+ if (adev->flags & AMD_IS_APU &&
+ adev->gmc.real_vram_size > adev->gmc.aper_size) {
adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
adev->gmc.aper_size = adev->gmc.real_vram_size;
}
@@ -418,6 +419,38 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
return 0;
}
+/**
+ * gmc_v7_0_flush_gpu_tlb_pasid - tlb flush via pasid
+ *
+ * @adev: amdgpu_device pointer
+ * @pasid: pasid to be flush
+ *
+ * Flush the TLB for the requested pasid.
+ */
+static int gmc_v7_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
+ uint16_t pasid, uint32_t flush_type,
+ bool all_hub)
+{
+ int vmid;
+ unsigned int tmp;
+
+ if (adev->in_gpu_reset)
+ return -EIO;
+
+ for (vmid = 1; vmid < 16; vmid++) {
+
+ tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+ if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
+ (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
+ WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
+ RREG32(mmVM_INVALIDATE_RESPONSE);
+ break;
+ }
+ }
+
+ return 0;
+}
+
/*
* GART
* VMID 0 is the physical GPU addresses as used by the kernel.
@@ -1333,6 +1366,7 @@ static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v7_0_flush_gpu_tlb,
+ .flush_gpu_tlb_pasid = gmc_v7_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping,
.set_prt = gmc_v7_0_set_prt,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 6d96d40fbcb8..27d83204fa2b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -620,6 +620,39 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
return 0;
}
+/**
+ * gmc_v8_0_flush_gpu_tlb_pasid - tlb flush via pasid
+ *
+ * @adev: amdgpu_device pointer
+ * @pasid: pasid to be flush
+ *
+ * Flush the TLB for the requested pasid.
+ */
+static int gmc_v8_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
+ uint16_t pasid, uint32_t flush_type,
+ bool all_hub)
+{
+ int vmid;
+ unsigned int tmp;
+
+ if (adev->in_gpu_reset)
+ return -EIO;
+
+ for (vmid = 1; vmid < 16; vmid++) {
+
+ tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+ if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
+ (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
+ WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
+ RREG32(mmVM_INVALIDATE_RESPONSE);
+ break;
+ }
+ }
+
+ return 0;
+
+}
+
/*
* GART
* VMID 0 is the physical GPU addresses as used by the kernel.
@@ -1700,6 +1733,7 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb,
+ .flush_gpu_tlb_pasid = gmc_v8_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
.set_prt = gmc_v8_0_set_prt,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index fa025ceeea0f..90216abf14a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -38,10 +38,12 @@
#include "dce/dce_12_0_sh_mask.h"
#include "vega10_enum.h"
#include "mmhub/mmhub_1_0_offset.h"
+#include "athub/athub_1_0_sh_mask.h"
#include "athub/athub_1_0_offset.h"
#include "oss/osssys_4_0_offset.h"
#include "soc15.h"
+#include "soc15d.h"
#include "soc15_common.h"
#include "umc/umc_6_0_sh_mask.h"
@@ -207,6 +209,11 @@ static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
{
u32 bits, i, tmp, reg;
+ /* Devices newer then VEGA10/12 shall have these programming
+ sequences performed by PSP BL */
+ if (adev->asic_type >= CHIP_VEGA20)
+ return 0;
+
bits = 0x7f;
switch (state) {
@@ -393,8 +400,10 @@ static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gmc.vm_fault.num_types = 1;
adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
- adev->gmc.ecc_irq.num_types = 1;
- adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
+ if (!amdgpu_sriov_vf(adev)) {
+ adev->gmc.ecc_irq.num_types = 1;
+ adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
+ }
}
static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
@@ -434,6 +443,18 @@ static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
adev->pdev->device == 0x15d8)));
}
+static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
+ uint8_t vmid, uint16_t *p_pasid)
+{
+ uint32_t value;
+
+ value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ + vmid);
+ *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
+
+ return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
+}
+
/*
* GART
* VMID 0 is the physical GPU addresses as used by the kernel.
@@ -455,13 +476,13 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
{
bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
const unsigned eng = 17;
- u32 j, tmp;
+ u32 j, inv_req, tmp;
struct amdgpu_vmhub *hub;
BUG_ON(vmhub >= adev->num_vmhubs);
hub = &adev->vmhub[vmhub];
- tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
+ inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
/* This is necessary for a HW workaround under SRIOV as well
* as GFXOFF under bare metal
@@ -472,7 +493,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
uint32_t req = hub->vm_inv_eng0_req + eng;
uint32_t ack = hub->vm_inv_eng0_ack + eng;
- amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
+ amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
1 << vmid);
return;
}
@@ -500,7 +521,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
}
- WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
+ WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req);
/*
* Issue a dummy read to wait for the ACK register to be cleared
@@ -532,6 +553,68 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
DRM_ERROR("Timeout waiting for VM flush ACK!\n");
}
+/**
+ * gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid
+ *
+ * @adev: amdgpu_device pointer
+ * @pasid: pasid to be flush
+ *
+ * Flush the TLB for the requested pasid.
+ */
+static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
+ uint16_t pasid, uint32_t flush_type,
+ bool all_hub)
+{
+ int vmid, i;
+ signed long r;
+ uint32_t seq;
+ uint16_t queried_pasid;
+ bool ret;
+ struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+
+ if (adev->in_gpu_reset)
+ return -EIO;
+
+ if (ring->sched.ready) {
+ spin_lock(&adev->gfx.kiq.ring_lock);
+ /* 2 dwords flush + 8 dwords fence */
+ amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
+ kiq->pmf->kiq_invalidate_tlbs(ring,
+ pasid, flush_type, all_hub);
+ amdgpu_fence_emit_polling(ring, &seq);
+ amdgpu_ring_commit(ring);
+ spin_unlock(&adev->gfx.kiq.ring_lock);
+ r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
+ if (r < 1) {
+ DRM_ERROR("wait for kiq fence error: %ld.\n", r);
+ return -ETIME;
+ }
+
+ return 0;
+ }
+
+ for (vmid = 1; vmid < 16; vmid++) {
+
+ ret = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
+ &queried_pasid);
+ if (ret && queried_pasid == pasid) {
+ if (all_hub) {
+ for (i = 0; i < adev->num_vmhubs; i++)
+ gmc_v9_0_flush_gpu_tlb(adev, vmid,
+ i, flush_type);
+ } else {
+ gmc_v9_0_flush_gpu_tlb(adev, vmid,
+ AMDGPU_GFXHUB_0, flush_type);
+ }
+ break;
+ }
+ }
+
+ return 0;
+
+}
+
static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
@@ -693,6 +776,7 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
+ .flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
.map_mtype = gmc_v9_0_map_mtype,
@@ -790,36 +874,6 @@ static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
}
}
-static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
-{
- struct amdgpu_ring *ring;
- unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
- {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP,
- GFXHUB_FREE_VM_INV_ENGS_BITMAP};
- unsigned i;
- unsigned vmhub, inv_eng;
-
- for (i = 0; i < adev->num_rings; ++i) {
- ring = adev->rings[i];
- vmhub = ring->funcs->vmhub;
-
- inv_eng = ffs(vm_inv_engs[vmhub]);
- if (!inv_eng) {
- dev_err(adev->dev, "no VM inv eng for ring %s\n",
- ring->name);
- return -EINVAL;
- }
-
- ring->vm_inv_eng = inv_eng - 1;
- vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
-
- dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
- ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
- }
-
- return 0;
-}
-
static int gmc_v9_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -828,7 +882,7 @@ static int gmc_v9_0_late_init(void *handle)
if (!gmc_v9_0_keep_stolen_memory(adev))
amdgpu_bo_late_init(adev);
- r = gmc_v9_0_allocate_vm_inv_eng(adev);
+ r = amdgpu_gmc_allocate_vm_inv_eng(adev);
if (r)
return r;
/* Check if ecc is available */
@@ -840,8 +894,8 @@ static int gmc_v9_0_late_init(void *handle)
r = amdgpu_atomfirmware_mem_ecc_supported(adev);
if (!r) {
DRM_INFO("ECC is not present.\n");
- if (adev->df_funcs->enable_ecc_force_par_wr_rmw)
- adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
+ if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
+ adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
} else {
DRM_INFO("ECC is active.\n");
}
@@ -1046,7 +1100,7 @@ static int gmc_v9_0_sw_init(void *handle)
else
chansize = 128;
- numchan = adev->df_funcs->get_hbm_channel_number(adev);
+ numchan = adev->df.funcs->get_hbm_channel_number(adev);
adev->gmc.vram_width = numchan * chansize;
}
@@ -1112,11 +1166,13 @@ static int gmc_v9_0_sw_init(void *handle)
if (r)
return r;
- /* interrupt sent to DF. */
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
- &adev->gmc.ecc_irq);
- if (r)
- return r;
+ if (!amdgpu_sriov_vf(adev)) {
+ /* interrupt sent to DF. */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
+ &adev->gmc.ecc_irq);
+ if (r)
+ return r;
+ }
/* Set the internal MC address mask
* This is the max address of the GPU's
@@ -1302,12 +1358,13 @@ static int gmc_v9_0_hw_init(void *handle)
else
value = true;
- gfxhub_v1_0_set_fault_enable_default(adev, value);
- if (adev->asic_type == CHIP_ARCTURUS)
- mmhub_v9_4_set_fault_enable_default(adev, value);
- else
- mmhub_v1_0_set_fault_enable_default(adev, value);
-
+ if (!amdgpu_sriov_vf(adev)) {
+ gfxhub_v1_0_set_fault_enable_default(adev, value);
+ if (adev->asic_type == CHIP_ARCTURUS)
+ mmhub_v9_4_set_fault_enable_default(adev, value);
+ else
+ mmhub_v1_0_set_fault_enable_default(adev, value);
+ }
for (i = 0; i < adev->num_vmhubs; ++i)
gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h
index 49e8be761214..e0585e8c6c1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h
@@ -24,16 +24,6 @@
#ifndef __GMC_V9_0_H__
#define __GMC_V9_0_H__
- /*
- * The latest engine allocation on gfx9 is:
- * Engine 2, 3: firmware
- * Engine 0, 1, 4~16: amdgpu ring,
- * subject to change when ring number changes
- * Engine 17: Gart flushes
- */
-#define GFXHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
-#define MMHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
-
extern const struct amd_ip_funcs gmc_v9_0_ip_funcs;
extern const struct amdgpu_ip_block_version gmc_v9_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
index a141408dfb23..0debfd9f428c 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
@@ -25,6 +25,7 @@
#include "amdgpu_jpeg.h"
#include "soc15.h"
#include "soc15d.h"
+#include "vcn_v1_0.h"
#include "vcn/vcn_1_0_offset.h"
#include "vcn/vcn_1_0_sh_mask.h"
@@ -561,7 +562,7 @@ static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = {
.insert_start = jpeg_v1_0_decode_ring_insert_start,
.insert_end = jpeg_v1_0_decode_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
- .begin_use = amdgpu_vcn_ring_begin_use,
+ .begin_use = vcn_v1_0_ring_begin_use,
.end_use = amdgpu_vcn_ring_end_use,
.emit_wreg = jpeg_v1_0_decode_ring_emit_wreg,
.emit_reg_wait = jpeg_v1_0_decode_ring_emit_reg_wait,
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index a78292d84854..ff2e6e1ccde7 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -690,7 +690,7 @@ static int jpeg_v2_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = (state == AMD_CG_STATE_GATE);
if (enable) {
if (jpeg_v2_0_is_idle(handle))
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index 2c58939e6ad0..c6d046df4b70 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -469,7 +469,7 @@ static int jpeg_v2_5_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = (state == AMD_CG_STATE_GATE);
int i;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index adfd8a6171eb..49a3a56ec017 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -523,9 +523,9 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
case CHIP_RAVEN:
case CHIP_RENOIR:
mmhub_v1_0_update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
mmhub_v1_0_update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index a7cb185d639a..bde189680521 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -427,9 +427,9 @@ int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
case CHIP_NAVI14:
case CHIP_NAVI12:
mmhub_v2_0_update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
mmhub_v2_0_update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index d9301e80522a..a5281df8d84f 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -128,45 +128,53 @@ static void mmhub_v9_4_init_system_aperture_regs(struct amdgpu_device *adev,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
adev->gmc.agp_start >> 24);
- /* Program the system aperture low logical page number. */
- WREG32_SOC15_OFFSET(MMHUB, 0,
- mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_LOW_ADDR,
- hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
- min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
- WREG32_SOC15_OFFSET(MMHUB, 0,
- mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
- max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
+ if (!amdgpu_sriov_vf(adev)) {
+ /* Program the system aperture low logical page number. */
+ WREG32_SOC15_OFFSET(
+ MMHUB, 0, mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
+ min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
+ WREG32_SOC15_OFFSET(
+ MMHUB, 0, mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
+ max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
- /* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
- adev->vm_manager.vram_base_offset;
- WREG32_SOC15_OFFSET(MMHUB, 0,
+ /* Set default page address. */
+ value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
+ adev->vm_manager.vram_base_offset;
+ WREG32_SOC15_OFFSET(
+ MMHUB, 0,
mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
(u32)(value >> 12));
- WREG32_SOC15_OFFSET(MMHUB, 0,
+ WREG32_SOC15_OFFSET(
+ MMHUB, 0,
mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
(u32)(value >> 44));
- /* Program "protection fault". */
- WREG32_SOC15_OFFSET(MMHUB, 0,
- mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
- hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
- (u32)(adev->dummy_page_addr >> 12));
- WREG32_SOC15_OFFSET(MMHUB, 0,
- mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
- hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
- (u32)((u64)adev->dummy_page_addr >> 44));
+ /* Program "protection fault". */
+ WREG32_SOC15_OFFSET(
+ MMHUB, 0,
+ mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
+ (u32)(adev->dummy_page_addr >> 12));
+ WREG32_SOC15_OFFSET(
+ MMHUB, 0,
+ mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
+ (u32)((u64)adev->dummy_page_addr >> 44));
- tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
- mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
- hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
- tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
- ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
- WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
- hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
+ tmp = RREG32_SOC15_OFFSET(
+ MMHUB, 0, mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
+ tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
+ ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
+ hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
+ tmp);
+ }
}
static void mmhub_v9_4_init_tlb_regs(struct amdgpu_device *adev, int hubid)
@@ -368,30 +376,16 @@ int mmhub_v9_4_gart_enable(struct amdgpu_device *adev)
int i;
for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
- if (amdgpu_sriov_vf(adev)) {
- /*
- * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase
- * they are VF copy registers so vbios post doesn't
- * program them, for SRIOV driver need to program them
- */
- WREG32_SOC15_OFFSET(MMHUB, 0,
- mmVMSHAREDVC0_MC_VM_FB_LOCATION_BASE,
- i * MMHUB_INSTANCE_REGISTER_OFFSET,
- adev->gmc.vram_start >> 24);
- WREG32_SOC15_OFFSET(MMHUB, 0,
- mmVMSHAREDVC0_MC_VM_FB_LOCATION_TOP,
- i * MMHUB_INSTANCE_REGISTER_OFFSET,
- adev->gmc.vram_end >> 24);
- }
-
/* GART Enable. */
mmhub_v9_4_init_gart_aperture_regs(adev, i);
mmhub_v9_4_init_system_aperture_regs(adev, i);
mmhub_v9_4_init_tlb_regs(adev, i);
- mmhub_v9_4_init_cache_regs(adev, i);
+ if (!amdgpu_sriov_vf(adev))
+ mmhub_v9_4_init_cache_regs(adev, i);
mmhub_v9_4_enable_system_domain(adev, i);
- mmhub_v9_4_disable_identity_aperture(adev, i);
+ if (!amdgpu_sriov_vf(adev))
+ mmhub_v9_4_disable_identity_aperture(adev, i);
mmhub_v9_4_setup_vmid_config(adev, i);
mmhub_v9_4_program_invalidation(adev, i);
}
@@ -631,9 +625,9 @@ int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev,
switch (adev->asic_type) {
case CHIP_ARCTURUS:
mmhub_v9_4_update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
mmhub_v9_4_update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
default:
break;
@@ -669,6 +663,7 @@ void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags)
}
static const struct soc15_ras_field_entry mmhub_v9_4_ras_fields[] = {
+ /* MMHUB Range 0 */
{ "MMEA0_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
@@ -757,6 +752,24 @@ static const struct soc15_ras_field_entry mmhub_v9_4_ras_fields[] = {
0, 0,
SOC15_REG_FIELD(MMEA0_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
},
+ { "MMEA0_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA0_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA0_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA0_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+ },
+
+ /* MMHUB Range 1 */
{ "MMEA1_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
@@ -844,16 +857,686 @@ static const struct soc15_ras_field_entry mmhub_v9_4_ras_fields[] = {
{ "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA1_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA1_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA1_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA1_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA1_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+ },
+
+ /* MMHAB Range 2*/
+ { "MMEA2_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA2_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA2_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA2_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA2_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA2_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA2_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA2_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA2_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA2_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA2_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA2_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA2_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA2_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA2_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA2_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA2_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA2_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA2_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA2_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA2_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA2_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA2_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA2_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA2_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA2_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA2_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA2_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA2_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA2_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA2_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA2_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA2_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+ },
+
+ /* MMHUB Rang 3 */
+ { "MMEA3_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA3_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA3_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA3_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA3_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA3_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA3_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA3_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA3_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA3_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA3_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA3_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA3_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA3_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA3_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA3_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA3_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA3_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA3_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA3_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA3_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA3_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA3_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA3_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA3_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA3_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA3_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA3_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA3_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA3_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA3_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA3_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA3_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+ },
+
+ /* MMHUB Range 4 */
+ { "MMEA4_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA4_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA4_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA4_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA4_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA4_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA4_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA4_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA4_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA4_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA4_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA4_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA4_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA4_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA4_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA4_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA4_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA4_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA4_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA4_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA4_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA4_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA4_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA4_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA4_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA4_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA4_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA4_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA4_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA4_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA4_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA4_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA4_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+ },
+
+ /* MMHUAB Range 5 */
+ { "MMEA5_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA5_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA5_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA5_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA5_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA5_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA5_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA5_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA5_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA5_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA5_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA5_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA5_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA5_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA5_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA5_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA5_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA5_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA5_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA5_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA5_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA5_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA5_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA5_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA5_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA5_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA5_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA5_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA5_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA5_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA5_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA5_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA5_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+ },
+
+ /* MMHUB Range 6 */
+ { "MMEA6_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA6_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA6_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA6_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA6_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA6_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA6_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA6_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA6_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA6_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA6_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA6_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA6_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA6_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA6_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA6_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA6_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA6_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA6_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA6_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA6_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA6_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA6_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA6_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA6_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA6_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA6_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA6_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA6_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA6_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA6_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA6_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA6_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+ },
+
+ /* MMHUB Range 7*/
+ { "MMEA7_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA7_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA7_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA7_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA7_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA7_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA7_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA7_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA7_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA7_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA7_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA7_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA7_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA7_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA7_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA7_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA7_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA7_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA7_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA7_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA7_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA7_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA7_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA7_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA7_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA7_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA7_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA7_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA7_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA7_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA7_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA7_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA7_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D3MEM_DED_COUNT),
}
};
static const struct soc15_reg_entry mmhub_v9_4_edc_cnt_regs[] = {
- { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), 0, 0, 0},
- { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), 0, 0, 0},
- { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), 0, 0, 0},
- { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), 0, 0, 0},
- { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), 0, 0, 0},
- { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), 0, 0, 0},
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), 0, 0, 0 },
};
static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h
index 8af0bddf85e4..20958639b601 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h
@@ -47,6 +47,18 @@ struct mmsch_v1_0_init_header {
uint32_t uvd_table_size;
};
+struct mmsch_vf_eng_init_header {
+ uint32_t init_status;
+ uint32_t table_offset;
+ uint32_t table_size;
+};
+
+struct mmsch_v1_1_init_header {
+ uint32_t version;
+ uint32_t total_size;
+ struct mmsch_vf_eng_init_header eng[2];
+};
+
struct mmsch_v1_0_cmd_direct_reg_header {
uint32_t reg_offset : 28;
uint32_t command_type : 4;
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 43305afa3d6f..5fd67e1cc2a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -250,7 +250,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
*/
locked = mutex_trylock(&adev->lock_reset);
if (locked)
- adev->in_gpu_reset = 1;
+ adev->in_gpu_reset = true;
do {
if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
@@ -262,7 +262,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
flr_done:
if (locked) {
- adev->in_gpu_reset = 0;
+ adev->in_gpu_reset = false;
mutex_unlock(&adev->lock_reset);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index 0d8767eb7a70..237fa5e16b7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -252,7 +252,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
*/
locked = mutex_trylock(&adev->lock_reset);
if (locked)
- adev->in_gpu_reset = 1;
+ adev->in_gpu_reset = true;
do {
if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
@@ -264,12 +264,16 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
flr_done:
if (locked) {
- adev->in_gpu_reset = 0;
+ adev->in_gpu_reset = false;
mutex_unlock(&adev->lock_reset);
}
/* Trigger recovery for world switch failure if no TDR */
- if (amdgpu_device_should_recover_gpu(adev))
+ if (amdgpu_device_should_recover_gpu(adev)
+ && (adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
+ adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
+ adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
+ adev->video_timeout == MAX_SCHEDULE_TIMEOUT))
amdgpu_device_gpu_recover(adev, NULL);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index 9af73567e716..cf557a428298 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -110,7 +110,6 @@ static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl
static int navi10_ih_irq_init(struct amdgpu_device *adev)
{
struct amdgpu_ih_ring *ih = &adev->irq.ih;
- int ret = 0;
u32 ih_rb_cntl, ih_doorbell_rtpr, ih_chicken;
u32 tmp;
@@ -179,7 +178,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
/* enable interrupts */
navi10_ih_enable_interrupts(adev);
- return ret;
+ return 0;
}
/**
@@ -427,7 +426,7 @@ static int navi10_ih_set_clockgating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
navi10_ih_update_clockgating_state(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index bb701dbfd472..65eb378fa035 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -339,7 +339,7 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
/* ras_controller_int is dedicated for nbif ras error,
* not the global interrupt for sync flood
*/
- amdgpu_ras_reset_gpu(adev, true);
+ amdgpu_ras_reset_gpu(adev);
}
}
@@ -456,10 +456,8 @@ static int nbio_v7_4_init_ras_controller_interrupt (struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT,
&adev->nbio.ras_controller_irq);
- if (r)
- return r;
- return 0;
+ return r;
}
static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *adev)
@@ -476,10 +474,8 @@ static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *a
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT,
&adev->nbio.ras_err_event_athub_irq);
- if (r)
- return r;
- return 0;
+ return r;
}
#define smnPARITY_ERROR_STATUS_UNCORR_GRP2 0x13a20030
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index b0229543e887..2d1bebdf1603 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -478,7 +478,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
- is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev))
+ !amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
@@ -489,7 +489,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
- is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev))
+ !amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
@@ -502,7 +502,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
- is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev))
+ !amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
@@ -513,7 +513,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
- is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev))
+ !amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
@@ -726,6 +726,12 @@ static int nv_common_early_init(void *handle)
AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_JPEG |
AMD_PG_SUPPORT_ATHUB;
+ /* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0,
+ * as a consequence, the rev_id and external_rev_id are wrong.
+ * workaround it by hardcoding rev_id to 0 (default value).
+ */
+ if (amdgpu_sriov_vf(adev))
+ adev->rev_id = 0;
adev->external_rev_id = adev->rev_id + 0xa;
break;
default:
@@ -944,13 +950,13 @@ static int nv_common_set_clockgating_state(void *handle,
case CHIP_NAVI14:
case CHIP_NAVI12:
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
nv_update_hdp_mem_power_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
nv_update_hdp_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index 74a9fe8e0cfb..36b65797434e 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -242,6 +242,7 @@ enum psp_gfx_fw_type {
GFX_FW_TYPE_SDMA5 = 55, /* SDMA5 MI */
GFX_FW_TYPE_SDMA6 = 56, /* SDMA6 MI */
GFX_FW_TYPE_SDMA7 = 57, /* SDMA7 MI */
+ GFX_FW_TYPE_VCN1 = 58, /* VCN1 MI */
GFX_FW_TYPE_MAX
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index c66ca8cc2ebd..0829188c1a5c 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -22,6 +22,7 @@
#include <linux/firmware.h>
#include <linux/module.h>
+#include <linux/vmalloc.h>
#include "amdgpu.h"
#include "amdgpu_psp.h"
@@ -43,10 +44,13 @@ MODULE_FIRMWARE("amdgpu/vega20_asd.bin");
MODULE_FIRMWARE("amdgpu/vega20_ta.bin");
MODULE_FIRMWARE("amdgpu/navi10_sos.bin");
MODULE_FIRMWARE("amdgpu/navi10_asd.bin");
+MODULE_FIRMWARE("amdgpu/navi10_ta.bin");
MODULE_FIRMWARE("amdgpu/navi14_sos.bin");
MODULE_FIRMWARE("amdgpu/navi14_asd.bin");
+MODULE_FIRMWARE("amdgpu/navi14_ta.bin");
MODULE_FIRMWARE("amdgpu/navi12_sos.bin");
MODULE_FIRMWARE("amdgpu/navi12_asd.bin");
+MODULE_FIRMWARE("amdgpu/navi12_ta.bin");
MODULE_FIRMWARE("amdgpu/arcturus_sos.bin");
MODULE_FIRMWARE("amdgpu/arcturus_asd.bin");
MODULE_FIRMWARE("amdgpu/arcturus_ta.bin");
@@ -233,6 +237,29 @@ out:
return err;
}
+int psp_v11_0_wait_for_bootloader(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+
+ int ret;
+ int retry_loop;
+
+ for (retry_loop = 0; retry_loop < 10; retry_loop++) {
+ /* Wait for bootloader to signify that is
+ ready having bit 31 of C2PMSG_35 set to 1 */
+ ret = psp_wait_for(psp,
+ SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000,
+ 0x80000000,
+ false);
+
+ if (ret == 0)
+ return 0;
+ }
+
+ return ret;
+}
+
static bool psp_v11_0_is_sos_alive(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -258,9 +285,7 @@ static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
return 0;
}
- /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_v11_0_wait_for_bootloader(psp);
if (ret)
return ret;
@@ -276,9 +301,7 @@ static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
psp_gfxdrv_command_reg);
- /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1*/
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_v11_0_wait_for_bootloader(psp);
return ret;
}
@@ -298,9 +321,7 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
return 0;
}
- /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_v11_0_wait_for_bootloader(psp);
if (ret)
return ret;
@@ -319,8 +340,7 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_v11_0_wait_for_bootloader(psp);
return ret;
}
@@ -337,9 +357,7 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
if (psp_v11_0_is_sos_alive(psp))
return 0;
- /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_v11_0_wait_for_bootloader(psp);
if (ret)
return ret;
@@ -954,10 +972,13 @@ Err_out:
*/
static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
{
- int ret;
- uint32_t p2c_header[4];
struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
uint32_t *pcache = (uint32_t*)ctx->sys_cache;
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t p2c_header[4];
+ uint32_t sz;
+ void *buf;
+ int ret;
if (ctx->init == PSP_MEM_TRAIN_NOT_SUPPORT) {
DRM_DEBUG("Memory training is not supported.\n");
@@ -972,7 +993,7 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
return 0;
}
- amdgpu_device_vram_access(psp->adev, ctx->p2c_train_data_offset, p2c_header, sizeof(p2c_header), false);
+ amdgpu_device_vram_access(adev, ctx->p2c_train_data_offset, p2c_header, sizeof(p2c_header), false);
DRM_DEBUG("sys_cache[%08x,%08x,%08x,%08x] p2c_header[%08x,%08x,%08x,%08x]\n",
pcache[0], pcache[1], pcache[2], pcache[3],
p2c_header[0], p2c_header[1], p2c_header[2], p2c_header[3]);
@@ -1009,11 +1030,38 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
DRM_DEBUG("Memory training ops:%x.\n", ops);
if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) {
+ /*
+ * Long traing will encroach certain mount of bottom VRAM,
+ * saving the content of this bottom VRAM to system memory
+ * before training, and restoring it after training to avoid
+ * VRAM corruption.
+ */
+ sz = GDDR6_MEM_TRAINING_ENCROACHED_SIZE;
+
+ if (adev->gmc.visible_vram_size < sz || !adev->mman.aper_base_kaddr) {
+ DRM_ERROR("visible_vram_size %llx or aper_base_kaddr %p is not initialized.\n",
+ adev->gmc.visible_vram_size,
+ adev->mman.aper_base_kaddr);
+ return -EINVAL;
+ }
+
+ buf = vmalloc(sz);
+ if (!buf) {
+ DRM_ERROR("failed to allocate system memory.\n");
+ return -ENOMEM;
+ }
+
+ memcpy_fromio(buf, adev->mman.aper_base_kaddr, sz);
ret = psp_v11_0_memory_training_send_msg(psp, PSP_BL__DRAM_LONG_TRAIN);
if (ret) {
DRM_ERROR("Send long training msg failed.\n");
+ vfree(buf);
return ret;
}
+
+ memcpy_toio(adev->mman.aper_base_kaddr, buf, sz);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
+ vfree(buf);
}
if (ops & PSP_MEM_TRAIN_SAVE) {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index a10175838013..7d509a40076f 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -255,7 +255,7 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
/* IB packet must end on a 8 DW boundary */
- sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
+ sdma_v2_4_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
@@ -750,7 +750,7 @@ static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
u32 pad_count;
int i;
- pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+ pad_count = (-ib->length_dw) & 7;
for (i = 0; i < pad_count; i++)
if (sdma && sdma->burst_nop && (i == 0))
ib->ptr[ib->length_dw++] =
@@ -1260,16 +1260,14 @@ static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
{
- struct drm_gpu_scheduler *sched;
unsigned i;
adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
for (i = 0; i < adev->sdma.num_instances; i++) {
- sched = &adev->sdma.instance[i].ring.sched;
- adev->vm_manager.vm_pte_rqs[i] =
- &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ adev->vm_manager.vm_pte_scheds[i] =
+ &adev->sdma.instance[i].ring.sched;
}
- adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+ adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version sdma_v2_4_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 5f4e2c616241..b6109a99fc43 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -429,7 +429,7 @@ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
/* IB packet must end on a 8 DW boundary */
- sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
+ sdma_v3_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
@@ -1021,7 +1021,7 @@ static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
u32 pad_count;
int i;
- pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+ pad_count = (-ib->length_dw) & 7;
for (i = 0; i < pad_count; i++)
if (sdma && sdma->burst_nop && (i == 0))
ib->ptr[ib->length_dw++] =
@@ -1698,16 +1698,14 @@ static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
{
- struct drm_gpu_scheduler *sched;
unsigned i;
adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
for (i = 0; i < adev->sdma.num_instances; i++) {
- sched = &adev->sdma.instance[i].ring.sched;
- adev->vm_manager.vm_pte_rqs[i] =
- &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ adev->vm_manager.vm_pte_scheds[i] =
+ &adev->sdma.instance[i].ring.sched;
}
- adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+ adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version sdma_v3_0_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 4ef4d31f5231..e55884d204bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -82,6 +82,7 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev);
+static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev);
static const struct soc15_reg_golden golden_settings_sdma_4[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
@@ -254,7 +255,106 @@ static const struct soc15_reg_golden golden_settings_sdma_4_3[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
- SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000)
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x03fbe1fe)
+};
+
+static const struct soc15_ras_field_entry sdma_v4_0_ras_fields[] = {
+ { "SDMA_UCODE_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_UCODE_BUF_SED),
+ 0, 0,
+ },
+ { "SDMA_RB_CMD_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_RB_CMD_BUF_SED),
+ 0, 0,
+ },
+ { "SDMA_IB_CMD_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_IB_CMD_BUF_SED),
+ 0, 0,
+ },
+ { "SDMA_UTCL1_RD_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_UTCL1_RD_FIFO_SED),
+ 0, 0,
+ },
+ { "SDMA_UTCL1_RDBST_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_UTCL1_RDBST_FIFO_SED),
+ 0, 0,
+ },
+ { "SDMA_DATA_LUT_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_DATA_LUT_FIFO_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF0_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF0_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF1_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF1_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF2_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF2_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF3_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF3_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF4_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF4_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF5_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF5_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF6_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF6_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF7_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF7_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF8_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF8_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF9_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF9_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF10_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF10_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF11_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF11_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF12_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF12_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF13_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF13_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF14_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF14_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF15_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF15_SED),
+ 0, 0,
+ },
+ { "SDMA_SPLIT_DAT_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_SPLIT_DAT_BUF_SED),
+ 0, 0,
+ },
+ { "SDMA_MC_WR_ADDR_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MC_WR_ADDR_FIFO_SED),
+ 0, 0,
+ },
};
static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
@@ -698,7 +798,7 @@ static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
/* IB packet must end on a 8 DW boundary */
- sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
+ sdma_v4_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
@@ -1579,7 +1679,7 @@ static void sdma_v4_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
u32 pad_count;
int i;
- pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+ pad_count = (-ib->length_dw) & 7;
for (i = 0; i < pad_count; i++)
if (sdma && sdma->burst_nop && (i == 0))
ib->ptr[ib->length_dw++] =
@@ -1686,6 +1786,7 @@ static int sdma_v4_0_early_init(void *handle)
sdma_v4_0_set_buffer_funcs(adev);
sdma_v4_0_set_vm_pte_funcs(adev);
sdma_v4_0_set_irq_funcs(adev);
+ sdma_v4_0_set_ras_funcs(adev);
return 0;
}
@@ -1700,8 +1801,18 @@ static int sdma_v4_0_late_init(void *handle)
struct ras_ih_if ih_info = {
.cb = sdma_v4_0_process_ras_data_cb,
};
+ int i;
- return amdgpu_sdma_ras_late_init(adev, &ih_info);
+ /* read back edc counter registers to clear the counters */
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ RREG32_SDMA(i, mmSDMA0_EDC_COUNTER);
+ }
+
+ if (adev->sdma.funcs && adev->sdma.funcs->ras_late_init)
+ return adev->sdma.funcs->ras_late_init(adev, &ih_info);
+ else
+ return 0;
}
static int sdma_v4_0_sw_init(void *handle)
@@ -1773,7 +1884,8 @@ static int sdma_v4_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
- amdgpu_sdma_ras_fini(adev);
+ if (adev->sdma.funcs && adev->sdma.funcs->ras_fini)
+ adev->sdma.funcs->ras_fini(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
@@ -2064,9 +2176,9 @@ static int sdma_v4_0_set_clockgating_state(void *handle,
case CHIP_ARCTURUS:
case CHIP_RENOIR:
sdma_v4_0_update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
sdma_v4_0_update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
default:
break;
@@ -2409,10 +2521,73 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
sched = &adev->sdma.instance[i].page.sched;
else
sched = &adev->sdma.instance[i].ring.sched;
- adev->vm_manager.vm_pte_rqs[i] =
- &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ adev->vm_manager.vm_pte_scheds[i] = sched;
+ }
+ adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
+}
+
+static void sdma_v4_0_get_ras_error_count(uint32_t value,
+ uint32_t instance,
+ uint32_t *sec_count)
+{
+ uint32_t i;
+ uint32_t sec_cnt;
+
+ /* double bits error (multiple bits) error detection is not supported */
+ for (i = 0; i < ARRAY_SIZE(sdma_v4_0_ras_fields); i++) {
+ /* the SDMA_EDC_COUNTER register in each sdma instance
+ * shares the same sed shift_mask
+ * */
+ sec_cnt = (value &
+ sdma_v4_0_ras_fields[i].sec_count_mask) >>
+ sdma_v4_0_ras_fields[i].sec_count_shift;
+ if (sec_cnt) {
+ DRM_INFO("Detected %s in SDMA%d, SED %d\n",
+ sdma_v4_0_ras_fields[i].name,
+ instance, sec_cnt);
+ *sec_count += sec_cnt;
+ }
+ }
+}
+
+static int sdma_v4_0_query_ras_error_count(struct amdgpu_device *adev,
+ uint32_t instance, void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ uint32_t sec_count = 0;
+ uint32_t reg_value = 0;
+
+ reg_value = RREG32_SDMA(instance, mmSDMA0_EDC_COUNTER);
+ /* double bit error is not supported */
+ if (reg_value)
+ sdma_v4_0_get_ras_error_count(reg_value,
+ instance, &sec_count);
+ /* err_data->ce_count should be initialized to 0
+ * before calling into this function */
+ err_data->ce_count += sec_count;
+ /* double bit error is not supported
+ * set ue count to 0 */
+ err_data->ue_count = 0;
+
+ return 0;
+};
+
+static const struct amdgpu_sdma_ras_funcs sdma_v4_0_ras_funcs = {
+ .ras_late_init = amdgpu_sdma_ras_late_init,
+ .ras_fini = amdgpu_sdma_ras_fini,
+ .query_ras_error_count = sdma_v4_0_query_ras_error_count,
+};
+
+static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ case CHIP_ARCTURUS:
+ adev->sdma.funcs = &sdma_v4_0_ras_funcs;
+ break;
+ default:
+ break;
}
- adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index f4ad2990f973..67b9830b7c7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -382,8 +382,15 @@ static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
- /* IB packet must end on a 8 DW boundary */
- sdma_v5_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
+ /* An IB packet must end on a 8 DW boundary--the next dword
+ * must be on a 8-dword boundary. Our IB packet below is 6
+ * dwords long, thus add x number of NOPs, such that, in
+ * modular arithmetic,
+ * wptr + 6 + x = 8k, k >= 0, which in C is,
+ * (wptr + 6 + x) % 8 = 0.
+ * The expression below, is a solution of x.
+ */
+ sdma_v5_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
@@ -907,16 +914,9 @@ static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring)
udelay(1);
}
- if (i < adev->usec_timeout) {
- if (amdgpu_emu_mode == 1)
- DRM_INFO("ring test on %d succeeded in %d msecs\n", ring->idx, i);
- else
- DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
amdgpu_device_wb_free(adev, index);
return r;
@@ -981,13 +981,10 @@ static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto err1;
}
tmp = le32_to_cpu(adev->wb.wb[index]);
- if (tmp == 0xDEADBEEF) {
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ if (tmp == 0xDEADBEEF)
r = 0;
- } else {
- DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+ else
r = -EINVAL;
- }
err1:
amdgpu_ib_free(adev, &ib, NULL);
@@ -1086,10 +1083,10 @@ static void sdma_v5_0_vm_set_pte_pde(struct amdgpu_ib *ib,
}
/**
- * sdma_v5_0_ring_pad_ib - pad the IB to the required number of dw
- *
+ * sdma_v5_0_ring_pad_ib - pad the IB
* @ib: indirect buffer to fill with padding
*
+ * Pad the IB with NOPs to a boundary multiple of 8.
*/
static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
@@ -1097,7 +1094,7 @@ static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
u32 pad_count;
int i;
- pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+ pad_count = (-ib->length_dw) & 0x7;
for (i = 0; i < pad_count; i++)
if (sdma && sdma->burst_nop && (i == 0))
ib->ptr[ib->length_dw++] =
@@ -1528,9 +1525,9 @@ static int sdma_v5_0_set_clockgating_state(void *handle,
case CHIP_NAVI14:
case CHIP_NAVI12:
sdma_v5_0_update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
sdma_v5_0_update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
default:
break;
@@ -1721,17 +1718,15 @@ static const struct amdgpu_vm_pte_funcs sdma_v5_0_vm_pte_funcs = {
static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev)
{
- struct drm_gpu_scheduler *sched;
unsigned i;
if (adev->vm_manager.vm_pte_funcs == NULL) {
adev->vm_manager.vm_pte_funcs = &sdma_v5_0_vm_pte_funcs;
for (i = 0; i < adev->sdma.num_instances; i++) {
- sched = &adev->sdma.instance[i].ring.sched;
- adev->vm_manager.vm_pte_rqs[i] =
- &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ adev->vm_manager.vm_pte_scheds[i] =
+ &adev->sdma.instance[i].ring.sched;
}
- adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+ adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index bdda8b4e03f0..42d5601b6bf3 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -648,7 +648,7 @@ static int si_dma_set_clockgating_state(void *handle,
bool enable;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ enable = (state == AMD_CG_STATE_GATE);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -834,16 +834,14 @@ static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
{
- struct drm_gpu_scheduler *sched;
unsigned i;
adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
for (i = 0; i < adev->sdma.num_instances; i++) {
- sched = &adev->sdma.instance[i].ring.sched;
- adev->vm_manager.vm_pte_rqs[i] =
- &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ adev->vm_manager.vm_pte_scheds[i] =
+ &adev->sdma.instance[i].ring.sched;
}
- adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+ adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version si_dma_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 5bd6ae7a52fe..15f3424a1ff7 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -479,62 +479,18 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
return ret;
}
-static int soc15_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap)
-{
- if (is_support_sw_smu(adev)) {
- struct smu_context *smu = &adev->smu;
-
- *cap = smu_baco_is_support(smu);
- return 0;
- } else {
- void *pp_handle = adev->powerplay.pp_handle;
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-
- if (!pp_funcs || !pp_funcs->get_asic_baco_capability) {
- *cap = false;
- return -ENOENT;
- }
-
- return pp_funcs->get_asic_baco_capability(pp_handle, cap);
- }
-}
-
static int soc15_asic_baco_reset(struct amdgpu_device *adev)
{
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ int ret = 0;
/* avoid NBIF got stuck when do RAS recovery in BACO reset */
if (ras && ras->supported)
adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
- dev_info(adev->dev, "GPU BACO reset\n");
-
- if (is_support_sw_smu(adev)) {
- struct smu_context *smu = &adev->smu;
- int ret;
-
- ret = smu_baco_enter(smu);
- if (ret)
- return ret;
-
- ret = smu_baco_exit(smu);
- if (ret)
- return ret;
- } else {
- void *pp_handle = adev->powerplay.pp_handle;
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-
- if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
- return -ENOENT;
-
- /* enter BACO state */
- if (pp_funcs->set_asic_baco_state(pp_handle, 1))
- return -EIO;
-
- /* exit BACO state */
- if (pp_funcs->set_asic_baco_state(pp_handle, 0))
- return -EIO;
- }
+ ret = amdgpu_dpm_baco_reset(adev);
+ if (ret)
+ return ret;
/* re-enable doorbell interrupt after BACO exit */
if (ras && ras->supported)
@@ -543,17 +499,6 @@ static int soc15_asic_baco_reset(struct amdgpu_device *adev)
return 0;
}
-static int soc15_mode2_reset(struct amdgpu_device *adev)
-{
- if (is_support_sw_smu(adev))
- return smu_mode2_reset(&adev->smu);
- if (!adev->powerplay.pp_funcs ||
- !adev->powerplay.pp_funcs->asic_reset_mode_2)
- return -ENOENT;
-
- return adev->powerplay.pp_funcs->asic_reset_mode_2(adev->powerplay.pp_handle);
-}
-
static enum amd_reset_method
soc15_asic_reset_method(struct amdgpu_device *adev)
{
@@ -567,11 +512,11 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_ARCTURUS:
- soc15_asic_get_baco_capability(adev, &baco_reset);
+ baco_reset = amdgpu_dpm_is_baco_supported(adev);
break;
case CHIP_VEGA20:
if (adev->psp.sos_fw_version >= 0x80067)
- soc15_asic_get_baco_capability(adev, &baco_reset);
+ baco_reset = amdgpu_dpm_is_baco_supported(adev);
/*
* 1. PMFW version > 0x284300: all cases use baco
@@ -592,13 +537,17 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
static int soc15_asic_reset(struct amdgpu_device *adev)
{
+ /* original raven doesn't have full asic reset */
+ if (adev->pdev->device == 0x15dd && adev->rev_id < 0x8)
+ return 0;
+
switch (soc15_asic_reset_method(adev)) {
case AMD_RESET_METHOD_BACO:
if (!adev->in_suspend)
amdgpu_inc_vram_lost(adev);
return soc15_asic_baco_reset(adev);
case AMD_RESET_METHOD_MODE2:
- return soc15_mode2_reset(adev);
+ return amdgpu_dpm_mode2_reset(adev);
default:
if (!adev->in_suspend)
amdgpu_inc_vram_lost(adev);
@@ -608,24 +557,18 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
static bool soc15_supports_baco(struct amdgpu_device *adev)
{
- bool baco_support;
-
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA12:
- soc15_asic_get_baco_capability(adev, &baco_support);
- break;
+ case CHIP_ARCTURUS:
+ return amdgpu_dpm_is_baco_supported(adev);
case CHIP_VEGA20:
if (adev->psp.sos_fw_version >= 0x80067)
- soc15_asic_get_baco_capability(adev, &baco_support);
- else
- baco_support = false;
- break;
+ return amdgpu_dpm_is_baco_supported(adev);
+ return false;
default:
return false;
}
-
- return baco_support;
}
/*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
@@ -738,9 +681,9 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
}
if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
- adev->df_funcs = &df_v3_6_funcs;
+ adev->df.funcs = &df_v3_6_funcs;
else
- adev->df_funcs = &df_v1_7_funcs;
+ adev->df.funcs = &df_v1_7_funcs;
adev->rev_id = soc15_get_rev_id(adev);
adev->nbio.funcs->detect_hw_virt(adev);
@@ -827,11 +770,14 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
- if (!amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT))
+ if (amdgpu_sriov_vf(adev)) {
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
+ amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
+ } else {
amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
+ }
if (!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
break;
@@ -841,8 +787,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
- if (is_support_sw_smu(adev))
- amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
@@ -1306,7 +1251,7 @@ static int soc15_common_sw_init(void *handle)
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_add_irq_id(adev);
- adev->df_funcs->sw_init(adev);
+ adev->df.funcs->sw_init(adev);
return 0;
}
@@ -1316,7 +1261,7 @@ static int soc15_common_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_nbio_ras_fini(adev);
- adev->df_funcs->sw_fini(adev);
+ adev->df.funcs->sw_fini(adev);
return 0;
}
@@ -1526,38 +1471,38 @@ static int soc15_common_set_clockgating_state(void *handle,
case CHIP_VEGA12:
case CHIP_VEGA20:
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
soc15_update_hdp_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
soc15_update_drm_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
soc15_update_drm_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
soc15_update_rom_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
- adev->df_funcs->update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
+ adev->df.funcs->update_medium_grain_clock_gating(adev,
+ state == AMD_CG_STATE_GATE);
break;
case CHIP_RAVEN:
case CHIP_RENOIR:
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
soc15_update_hdp_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
soc15_update_drm_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
soc15_update_drm_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
soc15_update_rom_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
case CHIP_ARCTURUS:
soc15_update_hdp_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
default:
break;
@@ -1595,7 +1540,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
*flags |= AMD_CG_SUPPORT_ROM_MGCG;
- adev->df_funcs->get_clockgating_state(adev, flags);
+ adev->df.funcs->get_clockgating_state(adev, flags);
}
static int soc15_common_set_powergating_state(void *handle,
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
index 515eb50cd0f8..793bf70e64b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
@@ -28,27 +28,24 @@
#include "rsmu/rsmu_0_0_2_sh_mask.h"
#include "umc/umc_6_1_1_offset.h"
#include "umc/umc_6_1_1_sh_mask.h"
+#include "umc/umc_6_1_2_offset.h"
-#define smnMCA_UMC0_MCUMC_ADDRT0 0x50f10
-
-/* UMC 6_1_2 register offsets */
-#define mmUMCCH0_0_EccErrCntSel_ARCT 0x0360
-#define mmUMCCH0_0_EccErrCntSel_ARCT_BASE_IDX 1
-#define mmUMCCH0_0_EccErrCnt_ARCT 0x0361
-#define mmUMCCH0_0_EccErrCnt_ARCT_BASE_IDX 1
-#define mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT 0x03c2
-#define mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT_BASE_IDX 1
+#define UMC_6_INST_DIST 0x40000
/*
* (addr / 256) * 8192, the higher 26 bits in ErrorAddr
* is the index of 8KB block
*/
-#define ADDR_OF_8KB_BLOCK(addr) (((addr) & ~0xffULL) << 5)
+#define ADDR_OF_8KB_BLOCK(addr) (((addr) & ~0xffULL) << 5)
/* channel index is the index of 256B block */
#define ADDR_OF_256B_BLOCK(channel_index) ((channel_index) << 8)
/* offset in 256B block */
#define OFFSET_IN_256B_BLOCK(addr) ((addr) & 0xffULL)
+#define LOOP_UMC_INST(umc_inst) for ((umc_inst) = 0; (umc_inst) < adev->umc.umc_inst_num; (umc_inst)++)
+#define LOOP_UMC_CH_INST(ch_inst) for ((ch_inst) = 0; (ch_inst) < adev->umc.channel_inst_num; (ch_inst)++)
+#define LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) LOOP_UMC_INST((umc_inst)) LOOP_UMC_CH_INST((ch_inst))
+
const uint32_t
umc_v6_1_channel_idx_tbl[UMC_V6_1_UMC_INSTANCE_NUM][UMC_V6_1_CHANNEL_INSTANCE_NUM] = {
{2, 18, 11, 27}, {4, 20, 13, 29},
@@ -57,24 +54,10 @@ const uint32_t
{9, 25, 0, 16}, {15, 31, 6, 22}
};
-static void umc_v6_1_enable_umc_index_mode(struct amdgpu_device *adev,
- uint32_t umc_instance)
+static void umc_v6_1_enable_umc_index_mode(struct amdgpu_device *adev)
{
- uint32_t rsmu_umc_index;
-
- rsmu_umc_index = RREG32_SOC15(RSMU, 0,
- mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
- rsmu_umc_index = REG_SET_FIELD(rsmu_umc_index,
- RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
+ WREG32_FIELD15(RSMU, 0, RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
RSMU_UMC_INDEX_MODE_EN, 1);
- rsmu_umc_index = REG_SET_FIELD(rsmu_umc_index,
- RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
- RSMU_UMC_INDEX_INSTANCE, umc_instance);
- rsmu_umc_index = REG_SET_FIELD(rsmu_umc_index,
- RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
- RSMU_UMC_INDEX_WREN, 1 << umc_instance);
- WREG32_SOC15(RSMU, 0, mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
- rsmu_umc_index);
}
static void umc_v6_1_disable_umc_index_mode(struct amdgpu_device *adev)
@@ -83,15 +66,23 @@ static void umc_v6_1_disable_umc_index_mode(struct amdgpu_device *adev)
RSMU_UMC_INDEX_MODE_EN, 0);
}
-static uint32_t umc_v6_1_get_umc_inst(struct amdgpu_device *adev)
+static uint32_t umc_v6_1_get_umc_index_mode_state(struct amdgpu_device *adev)
{
uint32_t rsmu_umc_index;
rsmu_umc_index = RREG32_SOC15(RSMU, 0,
- mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
+ mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
+
return REG_GET_FIELD(rsmu_umc_index,
- RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
- RSMU_UMC_INDEX_INSTANCE);
+ RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
+ RSMU_UMC_INDEX_MODE_EN);
+}
+
+static inline uint32_t get_umc_6_reg_offset(struct amdgpu_device *adev,
+ uint32_t umc_inst,
+ uint32_t ch_inst)
+{
+ return adev->umc.channel_offs*ch_inst + UMC_6_INST_DIST*umc_inst;
}
static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev,
@@ -105,7 +96,6 @@ static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev,
if (adev->asic_type == CHIP_ARCTURUS) {
/* UMC 6_1_2 registers */
-
ecc_err_cnt_sel_addr =
SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel_ARCT);
ecc_err_cnt_addr =
@@ -114,7 +104,6 @@ static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev,
SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT);
} else {
/* UMC 6_1_1 registers */
-
ecc_err_cnt_sel_addr =
SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel);
ecc_err_cnt_addr =
@@ -124,31 +113,31 @@ static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev,
}
/* select the lower chip and check the error count */
- ecc_err_cnt_sel = RREG32(ecc_err_cnt_sel_addr + umc_reg_offset);
+ ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4);
ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
EccErrCntCsSel, 0);
- WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel);
- ecc_err_cnt = RREG32(ecc_err_cnt_addr + umc_reg_offset);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+ ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
*error_count +=
(REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
UMC_V6_1_CE_CNT_INIT);
/* clear the lower chip err count */
- WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT);
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
/* select the higher chip and check the err counter */
ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
EccErrCntCsSel, 1);
- WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel);
- ecc_err_cnt = RREG32(ecc_err_cnt_addr + umc_reg_offset);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+ ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
*error_count +=
(REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
UMC_V6_1_CE_CNT_INIT);
/* clear the higher chip err count */
- WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT);
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
/* check for SRAM correctable error
MCUMC_STATUS is a 64 bit register */
- mc_umc_status = RREG64_UMC(mc_umc_status_addr + umc_reg_offset);
+ mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 6 &&
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
@@ -164,18 +153,16 @@ static void umc_v6_1_querry_uncorrectable_error_count(struct amdgpu_device *adev
if (adev->asic_type == CHIP_ARCTURUS) {
/* UMC 6_1_2 registers */
-
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT);
} else {
/* UMC 6_1_1 registers */
-
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
}
/* check the MCUMC_STATUS */
- mc_umc_status = RREG64_UMC(mc_umc_status_addr + umc_reg_offset);
+ mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
@@ -185,58 +172,78 @@ static void umc_v6_1_querry_uncorrectable_error_count(struct amdgpu_device *adev
*error_count += 1;
}
-static void umc_v6_1_query_error_count(struct amdgpu_device *adev,
- struct ras_err_data *err_data, uint32_t umc_reg_offset,
- uint32_t channel_index)
-{
- umc_v6_1_query_correctable_error_count(adev, umc_reg_offset,
- &(err_data->ce_count));
- umc_v6_1_querry_uncorrectable_error_count(adev, umc_reg_offset,
- &(err_data->ue_count));
-}
-
static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{
- amdgpu_umc_for_each_channel(umc_v6_1_query_error_count);
+ struct ras_err_data* err_data = (struct ras_err_data*)ras_error_status;
+
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+ uint32_t umc_reg_offset = 0;
+
+ uint32_t rsmu_umc_index_state = umc_v6_1_get_umc_index_mode_state(adev);
+
+ if (rsmu_umc_index_state)
+ umc_v6_1_disable_umc_index_mode(adev);
+
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ umc_reg_offset = get_umc_6_reg_offset(adev,
+ umc_inst,
+ ch_inst);
+
+ umc_v6_1_query_correctable_error_count(adev,
+ umc_reg_offset,
+ &(err_data->ce_count));
+ umc_v6_1_querry_uncorrectable_error_count(adev,
+ umc_reg_offset,
+ &(err_data->ue_count));
+ }
+
+ if (rsmu_umc_index_state)
+ umc_v6_1_enable_umc_index_mode(adev);
}
static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data,
- uint32_t umc_reg_offset, uint32_t channel_index)
+ uint32_t umc_reg_offset,
+ uint32_t ch_inst,
+ uint32_t umc_inst)
{
uint32_t lsb, mc_umc_status_addr;
- uint64_t mc_umc_status, err_addr, retired_page;
+ uint64_t mc_umc_status, err_addr, retired_page, mc_umc_addrt0;
struct eeprom_table_record *err_rec;
+ uint32_t channel_index = adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
if (adev->asic_type == CHIP_ARCTURUS) {
/* UMC 6_1_2 registers */
-
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT);
+ mc_umc_addrt0 =
+ SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0_ARCT);
} else {
/* UMC 6_1_1 registers */
-
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
+ mc_umc_addrt0 =
+ SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0);
}
/* skip error address process if -ENOMEM */
if (!err_data->err_addr) {
/* clear umc status */
- WREG64_UMC(mc_umc_status_addr + umc_reg_offset, 0x0ULL);
+ WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
return;
}
err_rec = &err_data->err_addr[err_data->err_addr_cnt];
- mc_umc_status = RREG64_UMC(mc_umc_status_addr + umc_reg_offset);
+ mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
/* calculate error address if ue/ce error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
- err_addr = RREG64_PCIE(smnMCA_UMC0_MCUMC_ADDRT0 + umc_reg_offset * 4);
+ err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
/* the lowest lsb bits should be ignored */
lsb = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, LSB);
err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
@@ -257,39 +264,60 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
err_rec->cu = 0;
err_rec->mem_channel = channel_index;
- err_rec->mcumc_id = umc_v6_1_get_umc_inst(adev);
+ err_rec->mcumc_id = umc_inst;
err_data->err_addr_cnt++;
}
}
/* clear umc status */
- WREG64_UMC(mc_umc_status_addr + umc_reg_offset, 0x0ULL);
+ WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
}
static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev,
void *ras_error_status)
{
- amdgpu_umc_for_each_channel(umc_v6_1_query_error_address);
+ struct ras_err_data* err_data = (struct ras_err_data*)ras_error_status;
+
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+ uint32_t umc_reg_offset = 0;
+
+ uint32_t rsmu_umc_index_state = umc_v6_1_get_umc_index_mode_state(adev);
+
+ if (rsmu_umc_index_state)
+ umc_v6_1_disable_umc_index_mode(adev);
+
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ umc_reg_offset = get_umc_6_reg_offset(adev,
+ umc_inst,
+ ch_inst);
+
+ umc_v6_1_query_error_address(adev,
+ err_data,
+ umc_reg_offset,
+ ch_inst,
+ umc_inst);
+ }
+
+ if (rsmu_umc_index_state)
+ umc_v6_1_enable_umc_index_mode(adev);
}
static void umc_v6_1_err_cnt_init_per_channel(struct amdgpu_device *adev,
- struct ras_err_data *err_data,
- uint32_t umc_reg_offset, uint32_t channel_index)
+ uint32_t umc_reg_offset)
{
uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
uint32_t ecc_err_cnt_addr;
if (adev->asic_type == CHIP_ARCTURUS) {
/* UMC 6_1_2 registers */
-
ecc_err_cnt_sel_addr =
SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel_ARCT);
ecc_err_cnt_addr =
SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt_ARCT);
} else {
/* UMC 6_1_1 registers */
-
ecc_err_cnt_sel_addr =
SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel);
ecc_err_cnt_addr =
@@ -297,28 +325,44 @@ static void umc_v6_1_err_cnt_init_per_channel(struct amdgpu_device *adev,
}
/* select the lower chip and check the error count */
- ecc_err_cnt_sel = RREG32(ecc_err_cnt_sel_addr + umc_reg_offset);
+ ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4);
ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
EccErrCntCsSel, 0);
/* set ce error interrupt type to APIC based interrupt */
ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
EccErrInt, 0x1);
- WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
/* set error count to initial value */
- WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT);
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
/* select the higher chip and check the err counter */
ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
EccErrCntCsSel, 1);
- WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel);
- WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
}
static void umc_v6_1_err_cnt_init(struct amdgpu_device *adev)
{
- void *ras_error_status = NULL;
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+ uint32_t umc_reg_offset = 0;
+
+ uint32_t rsmu_umc_index_state = umc_v6_1_get_umc_index_mode_state(adev);
+
+ if (rsmu_umc_index_state)
+ umc_v6_1_disable_umc_index_mode(adev);
+
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ umc_reg_offset = get_umc_6_reg_offset(adev,
+ umc_inst,
+ ch_inst);
+
+ umc_v6_1_err_cnt_init_per_channel(adev, umc_reg_offset);
+ }
- amdgpu_umc_for_each_channel(umc_v6_1_err_cnt_init_per_channel);
+ if (rsmu_umc_index_state)
+ umc_v6_1_enable_umc_index_mode(adev);
}
const struct amdgpu_umc_funcs umc_v6_1_funcs = {
@@ -326,6 +370,4 @@ const struct amdgpu_umc_funcs umc_v6_1_funcs = {
.ras_late_init = amdgpu_umc_ras_late_init,
.query_ras_error_count = umc_v6_1_query_ras_error_count,
.query_ras_error_address = umc_v6_1_query_ras_error_address,
- .enable_umc_index_mode = umc_v6_1_enable_umc_index_mode,
- .disable_umc_index_mode = umc_v6_1_disable_umc_index_mode,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 01e62fb8e6e0..0fa8aae2d78e 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -763,7 +763,7 @@ static int uvd_v5_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = (state == AMD_CG_STATE_GATE);
if (enable) {
/* wait for STATUS to clear */
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 217084d56ab8..e0aadcaf6c8b 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -1421,7 +1421,7 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = (state == AMD_CG_STATE_GATE);
if (enable) {
/* wait for STATUS to clear */
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 475ae68f38f5..217db187207c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -739,7 +739,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = (state == AMD_CG_STATE_GATE);
int i;
if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 683701cf7270..3fd102efb7af 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -887,7 +887,7 @@ static int vce_v4_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = (state == AMD_CG_STATE_GATE);
int i;
if ((adev->asic_type == CHIP_POLARIS10) ||
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 652cecc030b3..71f61afdc655 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -25,6 +25,7 @@
#include "amdgpu.h"
#include "amdgpu_vcn.h"
+#include "amdgpu_pm.h"
#include "soc15.h"
#include "soc15d.h"
#include "soc15_common.h"
@@ -38,10 +39,10 @@
#include "ivsrcid/vcn/irqsrcs_vcn_1_0.h"
#include "jpeg_v1_0.h"
-#define mmUVD_RBC_XX_IB_REG_CHECK 0x05ab
-#define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1
-#define mmUVD_REG_XX_MASK 0x05ac
-#define mmUVD_REG_XX_MASK_BASE_IDX 1
+#define mmUVD_RBC_XX_IB_REG_CHECK_1_0 0x05ab
+#define mmUVD_RBC_XX_IB_REG_CHECK_1_0_BASE_IDX 1
+#define mmUVD_REG_XX_MASK_1_0 0x05ac
+#define mmUVD_REG_XX_MASK_1_0_BASE_IDX 1
static int vcn_v1_0_stop(struct amdgpu_device *adev);
static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
@@ -49,7 +50,9 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state);
static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
- struct dpg_pause_state *new_state);
+ int inst_idx, struct dpg_pause_state *new_state);
+
+static void vcn_v1_0_idle_work_handler(struct work_struct *work);
/**
* vcn_v1_0_early_init - set function pointers
@@ -105,6 +108,9 @@ static int vcn_v1_0_sw_init(void *handle)
if (r)
return r;
+ /* Override the work func */
+ adev->vcn.idle_work.work.func = vcn_v1_0_idle_work_handler;
+
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
const struct common_firmware_header *hdr;
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
@@ -829,9 +835,9 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
vcn_v1_0_mc_resume_spg_mode(adev);
- WREG32_SOC15(UVD, 0, mmUVD_REG_XX_MASK, 0x10);
- WREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK,
- RREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK) | 0x3);
+ WREG32_SOC15(UVD, 0, mmUVD_REG_XX_MASK_1_0, 0x10);
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0,
+ RREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0) | 0x3);
/* enable VCPU clock */
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
@@ -1193,7 +1199,7 @@ static int vcn_v1_0_stop(struct amdgpu_device *adev)
}
static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
- struct dpg_pause_state *new_state)
+ int inst_idx, struct dpg_pause_state *new_state)
{
int ret_code;
uint32_t reg_data = 0;
@@ -1201,9 +1207,10 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
struct amdgpu_ring *ring;
/* pause/unpause if state is changed */
- if (adev->vcn.pause_state.fw_based != new_state->fw_based) {
+ if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
- adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg,
+ adev->vcn.inst[inst_idx].pause_state.fw_based,
+ adev->vcn.inst[inst_idx].pause_state.jpeg,
new_state->fw_based, new_state->jpeg);
reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
@@ -1252,13 +1259,14 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
}
- adev->vcn.pause_state.fw_based = new_state->fw_based;
+ adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
}
/* pause/unpause if state is changed */
- if (adev->vcn.pause_state.jpeg != new_state->jpeg) {
+ if (adev->vcn.inst[inst_idx].pause_state.jpeg != new_state->jpeg) {
DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
- adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg,
+ adev->vcn.inst[inst_idx].pause_state.fw_based,
+ adev->vcn.inst[inst_idx].pause_state.jpeg,
new_state->fw_based, new_state->jpeg);
reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
@@ -1312,7 +1320,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
reg_data &= ~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
}
- adev->vcn.pause_state.jpeg = new_state->jpeg;
+ adev->vcn.inst[inst_idx].pause_state.jpeg = new_state->jpeg;
}
return 0;
@@ -1340,7 +1348,7 @@ static int vcn_v1_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = (state == AMD_CG_STATE_GATE);
if (enable) {
/* wait for STATUS to clear */
@@ -1758,6 +1766,86 @@ static int vcn_v1_0_set_powergating_state(void *handle,
return ret;
}
+static void vcn_v1_0_idle_work_handler(struct work_struct *work)
+{
+ struct amdgpu_device *adev =
+ container_of(work, struct amdgpu_device, vcn.idle_work.work);
+ unsigned int fences = 0, i;
+
+ for (i = 0; i < adev->vcn.num_enc_rings; ++i)
+ fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]);
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ struct dpg_pause_state new_state;
+
+ if (fences)
+ new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ else
+ new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+
+ if (amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec))
+ new_state.jpeg = VCN_DPG_STATE__PAUSE;
+ else
+ new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
+
+ adev->vcn.pause_dpg_mode(adev, 0, &new_state);
+ }
+
+ fences += amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec);
+ fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_dec);
+
+ if (fences == 0) {
+ amdgpu_gfx_off_ctrl(adev, true);
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, false);
+ else
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
+ AMD_PG_STATE_GATE);
+ } else {
+ schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
+ }
+}
+
+void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
+
+ if (set_clocks) {
+ amdgpu_gfx_off_ctrl(adev, false);
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, true);
+ else
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
+ AMD_PG_STATE_UNGATE);
+ }
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ struct dpg_pause_state new_state;
+ unsigned int fences = 0, i;
+
+ for (i = 0; i < adev->vcn.num_enc_rings; ++i)
+ fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]);
+
+ if (fences)
+ new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ else
+ new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+
+ if (amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec))
+ new_state.jpeg = VCN_DPG_STATE__PAUSE;
+ else
+ new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
+ new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
+ new_state.jpeg = VCN_DPG_STATE__PAUSE;
+
+ adev->vcn.pause_dpg_mode(adev, 0, &new_state);
+ }
+}
+
static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
.name = "vcn_v1_0",
.early_init = vcn_v1_0_early_init,
@@ -1804,7 +1892,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
.insert_start = vcn_v1_0_dec_ring_insert_start,
.insert_end = vcn_v1_0_dec_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
- .begin_use = amdgpu_vcn_ring_begin_use,
+ .begin_use = vcn_v1_0_ring_begin_use,
.end_use = amdgpu_vcn_ring_end_use,
.emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
.emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
@@ -1836,7 +1924,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
.insert_nop = amdgpu_ring_insert_nop,
.insert_end = vcn_v1_0_enc_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
- .begin_use = amdgpu_vcn_ring_begin_use,
+ .begin_use = vcn_v1_0_ring_begin_use,
.end_use = amdgpu_vcn_ring_end_use,
.emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h
index 2a497a7a4840..f67d7391fc21 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h
@@ -24,6 +24,8 @@
#ifndef __VCN_V1_0_H__
#define __VCN_V1_0_H__
+void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring);
+
extern const struct amdgpu_ip_block_version vcn_v1_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index d76ece38c97b..c387c81f8695 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -47,18 +47,13 @@
#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x5a7
#define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x1e2
-#define mmUVD_RBC_XX_IB_REG_CHECK 0x026b
-#define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1
-#define mmUVD_REG_XX_MASK 0x026c
-#define mmUVD_REG_XX_MASK_BASE_IDX 1
-
static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev);
static int vcn_v2_0_set_powergating_state(void *handle,
enum amd_powergating_state state);
static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
- struct dpg_pause_state *new_state);
+ int inst_idx, struct dpg_pause_state *new_state);
/**
* vcn_v2_0_early_init - set function pointers
@@ -356,88 +351,88 @@ static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirec
/* cache window 0: fw */
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
if (!indirect) {
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo), 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi), 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
} else {
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
}
offset = 0;
} else {
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
offset = size;
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
}
if (!indirect)
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
else
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
/* cache window 1: stack */
if (!indirect) {
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
} else {
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
}
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
/* cache window 2: context */
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
/* non-cache window */
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect);
/* VCN global tiling registers */
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
}
@@ -583,19 +578,19 @@ static void vcn_v2_0_clock_gating_dpg_mode(struct amdgpu_device *adev,
UVD_CGC_CTRL__WCB_MODE_MASK |
UVD_CGC_CTRL__VCPU_MODE_MASK |
UVD_CGC_CTRL__SCPU_MODE_MASK);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
/* turn off clock gating */
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
/* turn on SUVD clock gating */
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
/* turn on sw mode in UVD_SUVD_CGC_CTRL */
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
}
@@ -759,7 +754,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp);
if (indirect)
- adev->vcn.dpg_sram_curr_addr = (uint32_t*)adev->vcn.dpg_sram_cpu_addr;
+ adev->vcn.inst->dpg_sram_curr_addr = (uint32_t*)adev->vcn.inst->dpg_sram_cpu_addr;
/* enable clock gating */
vcn_v2_0_clock_gating_dpg_mode(adev, 0, indirect);
@@ -768,11 +763,11 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK;
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
/* disable master interupt */
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
/* setup mmUVD_LMI_CTRL */
@@ -784,28 +779,28 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
0x00100000L);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_MPC_CNTL),
0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_MPC_SET_MUXA0),
((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
(0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
(0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_MPC_SET_MUXB0),
((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
(0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
(0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_MPC_SET_MUX),
((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
@@ -813,29 +808,29 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
vcn_v2_0_mc_resume_dpg_mode(adev, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
/* release VCPU reset to boot */
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_SOFT_RESET), 0, 0, indirect);
/* enable LMI MC and UMC channels */
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_CTRL2),
0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT, 0, indirect);
/* enable master interrupt */
- WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
+ WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
if (indirect)
- psp_update_vcn_sram(adev, 0, adev->vcn.dpg_sram_gpu_addr,
- (uint32_t)((uintptr_t)adev->vcn.dpg_sram_curr_addr -
- (uintptr_t)adev->vcn.dpg_sram_cpu_addr));
+ psp_update_vcn_sram(adev, 0, adev->vcn.inst->dpg_sram_gpu_addr,
+ (uint32_t)((uintptr_t)adev->vcn.inst->dpg_sram_curr_addr -
+ (uintptr_t)adev->vcn.inst->dpg_sram_cpu_addr));
/* force RBC into idle state */
rb_bufsz = order_base_2(ring->ring_size);
@@ -1135,16 +1130,16 @@ power_off:
}
static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
- struct dpg_pause_state *new_state)
+ int inst_idx, struct dpg_pause_state *new_state)
{
struct amdgpu_ring *ring;
uint32_t reg_data = 0;
int ret_code;
/* pause/unpause if state is changed */
- if (adev->vcn.pause_state.fw_based != new_state->fw_based) {
+ if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
DRM_DEBUG("dpg pause state changed %d -> %d",
- adev->vcn.pause_state.fw_based, new_state->fw_based);
+ adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
@@ -1190,7 +1185,7 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
}
- adev->vcn.pause_state.fw_based = new_state->fw_based;
+ adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
}
return 0;
@@ -1218,7 +1213,7 @@ static int vcn_v2_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = (state == AMD_CG_STATE_GATE);
if (enable) {
/* wait for STATUS to clear */
@@ -1629,7 +1624,7 @@ static int vcn_v2_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
+int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
uint32_t tmp = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h
index ef749b02ded9..6c9de1882428 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h
@@ -37,6 +37,7 @@ extern void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr);
extern void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
uint32_t reg, uint32_t val);
+extern int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring);
extern void vcn_v2_0_enc_ring_insert_end(struct amdgpu_ring *ring);
extern void vcn_v2_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index f67fca38c1a9..2d64ba1adf99 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -29,6 +29,7 @@
#include "soc15.h"
#include "soc15d.h"
#include "vcn_v2_0.h"
+#include "mmsch_v1_0.h"
#include "vcn/vcn_2_5_offset.h"
#include "vcn/vcn_2_5_sh_mask.h"
@@ -54,6 +55,9 @@ static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
static int vcn_v2_5_set_powergating_state(void *handle,
enum amd_powergating_state state);
+static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
+ int inst_idx, struct dpg_pause_state *new_state);
+static int vcn_v2_5_sriov_start(struct amdgpu_device *adev);
static int amdgpu_ih_clientid_vcns[] = {
SOC15_IH_CLIENTID_VCN,
@@ -88,7 +92,13 @@ static int vcn_v2_5_early_init(void *handle)
} else
adev->vcn.num_vcn_inst = 1;
- adev->vcn.num_enc_rings = 2;
+ if (amdgpu_sriov_vf(adev)) {
+ adev->vcn.num_vcn_inst = 2;
+ adev->vcn.harvest_config = 0;
+ adev->vcn.num_enc_rings = 1;
+ } else {
+ adev->vcn.num_enc_rings = 2;
+ }
vcn_v2_5_set_dec_ring_funcs(adev);
vcn_v2_5_set_enc_ring_funcs(adev);
@@ -176,7 +186,9 @@ static int vcn_v2_5_sw_init(void *handle)
ring = &adev->vcn.inst[j].ring_dec;
ring->use_doorbell = true;
- ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8*j;
+
+ ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ (amdgpu_sriov_vf(adev) ? 2*j : 8*j);
sprintf(ring->name, "vcn_dec_%d", j);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
if (r)
@@ -185,7 +197,10 @@ static int vcn_v2_5_sw_init(void *handle)
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
ring = &adev->vcn.inst[j].ring_enc[i];
ring->use_doorbell = true;
- ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i + 8*j;
+
+ ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ (amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j));
+
sprintf(ring->name, "vcn_enc_%d.%d", j, i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
if (r)
@@ -193,6 +208,15 @@ static int vcn_v2_5_sw_init(void *handle)
}
}
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_virt_alloc_mm_table(adev);
+ if (r)
+ return r;
+ }
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ adev->vcn.pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
+
return 0;
}
@@ -208,6 +232,9 @@ static int vcn_v2_5_sw_fini(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_free_mm_table(adev);
+
r = amdgpu_vcn_suspend(adev);
if (r)
return r;
@@ -228,31 +255,44 @@ static int vcn_v2_5_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring;
- int i, j, r;
+ int i, j, r = 0;
+
+ if (amdgpu_sriov_vf(adev))
+ r = vcn_v2_5_sriov_start(adev);
for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
if (adev->vcn.harvest_config & (1 << j))
continue;
- ring = &adev->vcn.inst[j].ring_dec;
- adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
- ring->doorbell_index, j);
+ if (amdgpu_sriov_vf(adev)) {
+ adev->vcn.inst[j].ring_enc[0].sched.ready = true;
+ adev->vcn.inst[j].ring_enc[1].sched.ready = false;
+ adev->vcn.inst[j].ring_enc[2].sched.ready = false;
+ adev->vcn.inst[j].ring_dec.sched.ready = true;
+ } else {
- r = amdgpu_ring_test_helper(ring);
- if (r)
- goto done;
+ ring = &adev->vcn.inst[j].ring_dec;
+
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ ring->doorbell_index, j);
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
- ring = &adev->vcn.inst[j].ring_enc[i];
r = amdgpu_ring_test_helper(ring);
if (r)
goto done;
+
+ for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ ring = &adev->vcn.inst[j].ring_enc[i];
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ goto done;
+ }
}
}
done:
if (!r)
- DRM_INFO("VCN decode and encode initialized successfully.\n");
+ DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
+ (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
return r;
}
@@ -275,7 +315,9 @@ static int vcn_v2_5_hw_fini(void *handle)
continue;
ring = &adev->vcn.inst[i].ring_dec;
- if (RREG32_SOC15(VCN, i, mmUVD_STATUS))
+ if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
+ (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(VCN, i, mmUVD_STATUS)))
vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
ring->sched.ready = false;
@@ -350,9 +392,9 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
/* cache window 0: fw */
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
- (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo));
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
- (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi));
WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
offset = 0;
} else {
@@ -384,6 +426,99 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
}
}
+static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+{
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t offset;
+
+ /* cache window 0: fw */
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (!indirect) {
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+ } else {
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+ }
+ offset = 0;
+ } else {
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
+ offset = size;
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
+ }
+
+ if (!indirect)
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
+ else
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
+
+ /* cache window 1: stack */
+ if (!indirect) {
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
+ } else {
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
+ }
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
+
+ /* cache window 2: context */
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
+
+ /* non-cache window */
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect);
+
+ /* VCN global tiling registers */
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+}
+
/**
* vcn_v2_5_disable_clock_gating - disable VCN clock gating
*
@@ -502,6 +637,54 @@ static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
}
}
+static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev,
+ uint8_t sram_sel, int inst_idx, uint8_t indirect)
+{
+ uint32_t reg_data = 0;
+
+ /* enable sw clock gating control */
+ if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
+ reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ else
+ reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
+ reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
+ reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
+ UVD_CGC_CTRL__SYS_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_MODE_MASK |
+ UVD_CGC_CTRL__MPEG2_MODE_MASK |
+ UVD_CGC_CTRL__REGS_MODE_MASK |
+ UVD_CGC_CTRL__RBC_MODE_MASK |
+ UVD_CGC_CTRL__LMI_MC_MODE_MASK |
+ UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
+ UVD_CGC_CTRL__IDCT_MODE_MASK |
+ UVD_CGC_CTRL__MPRD_MODE_MASK |
+ UVD_CGC_CTRL__MPC_MODE_MASK |
+ UVD_CGC_CTRL__LBSI_MODE_MASK |
+ UVD_CGC_CTRL__LRBBM_MODE_MASK |
+ UVD_CGC_CTRL__WCB_MODE_MASK |
+ UVD_CGC_CTRL__VCPU_MODE_MASK |
+ UVD_CGC_CTRL__MMSCH_MODE_MASK);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
+
+ /* turn off clock gating */
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
+
+ /* turn on SUVD clock gating */
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
+
+ /* turn on sw mode in UVD_SUVD_CGC_CTRL */
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
+}
+
/**
* vcn_v2_5_enable_clock_gating - enable VCN clock gating
*
@@ -564,6 +747,138 @@ static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev)
}
}
+static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+{
+ struct amdgpu_ring *ring;
+ uint32_t rb_bufsz, tmp;
+
+ /* disable register anti-hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), 1,
+ ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
+ /* enable dynamic power gating mode */
+ tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_POWER_STATUS);
+ tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
+ tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
+ WREG32_SOC15(UVD, inst_idx, mmUVD_POWER_STATUS, tmp);
+
+ if (indirect)
+ adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t*)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
+
+ /* enable clock gating */
+ vcn_v2_5_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
+
+ /* enable VCPU clock */
+ tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
+ tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
+ tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
+
+ /* disable master interupt */
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
+
+ /* setup mmUVD_LMI_CTRL */
+ tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__REQ_MODE_MASK |
+ UVD_LMI_CTRL__CRC_RESET_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
+ (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
+ 0x00100000L);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
+
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_MPC_CNTL),
+ 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
+
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_MPC_SET_MUXA0),
+ ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
+
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_MPC_SET_MUXB0),
+ ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
+
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_MPC_SET_MUX),
+ ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
+ (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
+
+ vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect);
+
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
+
+ /* enable LMI MC and UMC channels */
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
+
+ /* unblock VCPU register access */
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
+
+ tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
+ tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
+
+ /* enable master interrupt */
+ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+ UVD, 0, mmUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
+
+ if (indirect)
+ psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
+ (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
+ (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
+
+ ring = &adev->vcn.inst[inst_idx].ring_dec;
+ /* force RBC into idle state */
+ rb_bufsz = order_base_2(ring->ring_size);
+ tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
+
+ /* set the write pointer delay */
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
+
+ /* set the wb address */
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
+ (upper_32_bits(ring->gpu_addr) >> 2));
+
+ /* programm the RB_BASE for ring buffer */
+ WREG32_SOC15(UVD, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
+ lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
+ upper_32_bits(ring->gpu_addr));
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR, 0);
+
+ WREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2, 0);
+
+ ring->wptr = RREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR);
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR,
+ lower_32_bits(ring->wptr));
+
+ return 0;
+}
+
static int vcn_v2_5_start(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
@@ -576,6 +891,11 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ r = vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
+ continue;
+ }
+
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
@@ -585,6 +905,9 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
WREG32_SOC15(UVD, i, mmUVD_STATUS, tmp);
}
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ return 0;
+
/*SW clock gating */
vcn_v2_5_disable_clock_gating(adev);
@@ -741,6 +1064,233 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
return 0;
}
+static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev,
+ struct amdgpu_mm_table *table)
+{
+ uint32_t data = 0, loop = 0, size = 0;
+ uint64_t addr = table->gpu_addr;
+ struct mmsch_v1_1_init_header *header = NULL;;
+
+ header = (struct mmsch_v1_1_init_header *)table->cpu_addr;
+ size = header->total_size;
+
+ /*
+ * 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of
+ * memory descriptor location
+ */
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
+
+ /* 2, update vmid of descriptor */
+ data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID);
+ data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
+ /* use domain0 for MM scheduler */
+ data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data);
+
+ /* 3, notify mmsch about the size of this descriptor */
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size);
+
+ /* 4, set resp to zero */
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
+
+ /*
+ * 5, kick off the initialization and wait until
+ * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
+ */
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
+
+ data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
+ loop = 10;
+ while ((data & 0x10000002) != 0x10000002) {
+ udelay(100);
+ data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
+ loop--;
+ if (!loop)
+ break;
+ }
+
+ if (!loop) {
+ dev_err(adev->dev,
+ "failed to init MMSCH, mmMMSCH_VF_MAILBOX_RESP = %x\n",
+ data);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ uint32_t offset, size, tmp, i, rb_bufsz;
+ uint32_t table_size = 0;
+ struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } };
+ struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } };
+ struct mmsch_v1_0_cmd_direct_polling direct_poll = { { 0 } };
+ struct mmsch_v1_0_cmd_end end = { { 0 } };
+ uint32_t *init_table = adev->virt.mm_table.cpu_addr;
+ struct mmsch_v1_1_init_header *header = (struct mmsch_v1_1_init_header *)init_table;
+
+ direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
+ direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
+ direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
+ end.cmd_header.command_type = MMSCH_COMMAND__END;
+
+ header->version = MMSCH_VERSION;
+ header->total_size = sizeof(struct mmsch_v1_1_init_header) >> 2;
+ init_table += header->total_size;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ header->eng[i].table_offset = header->total_size;
+ header->eng[i].init_status = 0;
+ header->eng[i].table_size = 0;
+
+ table_size = 0;
+
+ MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
+ ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
+
+ size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ /* mc resume*/
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
+ offset = 0;
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0), 0);
+ } else {
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[i].gpu_addr));
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[i].gpu_addr));
+ offset = size;
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+ }
+
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0),
+ size);
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1),
+ 0);
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1),
+ AMDGPU_VCN_STACK_SIZE);
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[i].gpu_addr + offset +
+ AMDGPU_VCN_STACK_SIZE));
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[i].gpu_addr + offset +
+ AMDGPU_VCN_STACK_SIZE));
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2),
+ 0);
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
+ AMDGPU_VCN_CONTEXT_SIZE);
+
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ ring->wptr = 0;
+
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO),
+ lower_32_bits(ring->gpu_addr));
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI),
+ upper_32_bits(ring->gpu_addr));
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE),
+ ring->ring_size / 4);
+
+ ring = &adev->vcn.inst[i].ring_dec;
+ ring->wptr = 0;
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
+ lower_32_bits(ring->gpu_addr));
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
+ upper_32_bits(ring->gpu_addr));
+
+ /* force RBC into idle state */
+ rb_bufsz = order_base_2(ring->ring_size);
+ tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
+ MMSCH_V1_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
+
+ /* add end packet */
+ memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
+ table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
+ init_table += sizeof(struct mmsch_v1_0_cmd_end) / 4;
+
+ /* refine header */
+ header->eng[i].table_size = table_size;
+ header->total_size += table_size;
+ }
+
+ return vcn_v2_5_mmsch_start(adev, &adev->virt.mm_table);
+}
+
+static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+{
+ int ret_code = 0;
+ uint32_t tmp;
+
+ /* Wait for power status to be 1 */
+ SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 1,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+
+ /* wait for read ptr to be equal to write ptr */
+ tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR);
+ SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+
+ tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR2);
+ SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
+
+ tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
+ SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+
+ SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 1,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+
+ /* disable dynamic power gating mode */
+ WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), 0,
+ ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+
+ return 0;
+}
+
static int vcn_v2_5_stop(struct amdgpu_device *adev)
{
uint32_t tmp;
@@ -749,6 +1299,11 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ r = vcn_v2_5_stop_dpg_mode(adev, i);
+ continue;
+ }
+
/* wait for vcn idle */
SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, r);
if (r)
@@ -804,6 +1359,67 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
return 0;
}
+static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
+ int inst_idx, struct dpg_pause_state *new_state)
+{
+ struct amdgpu_ring *ring;
+ uint32_t reg_data = 0;
+ int ret_code;
+
+ /* pause/unpause if state is changed */
+ if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
+ DRM_DEBUG("dpg pause state changed %d -> %d",
+ adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
+ reg_data = RREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE) &
+ (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
+
+ if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
+ ret_code = 0;
+ SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 0x1,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+
+ if (!ret_code) {
+ /* pause DPG */
+ reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
+ WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data);
+
+ /* wait for ACK */
+ SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_DPG_PAUSE,
+ UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
+ UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
+
+ /* Restore */
+ ring = &adev->vcn.inst[inst_idx].ring_enc[0];
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+
+ ring = &adev->vcn.inst[inst_idx].ring_enc[1];
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+
+ WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR,
+ RREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF);
+
+ SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS,
+ UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+ }
+ } else {
+ /* unpause dpg, no need to wait */
+ reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
+ WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data);
+ }
+ adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
+ }
+
+ return 0;
+}
+
/**
* vcn_v2_5_dec_ring_get_rptr - get read pointer
*
@@ -846,6 +1462,10 @@ static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ WREG32_SOC15(UVD, ring->me, mmUVD_SCRATCH2,
+ lower_32_bits(ring->wptr) | 0x80000000);
+
if (ring->use_doorbell) {
adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
@@ -871,7 +1491,7 @@ static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
.emit_ib = vcn_v2_0_dec_ring_emit_ib,
.emit_fence = vcn_v2_0_dec_ring_emit_fence,
.emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
- .test_ring = amdgpu_vcn_dec_ring_test_ring,
+ .test_ring = vcn_v2_0_dec_ring_test_ring,
.test_ib = amdgpu_vcn_dec_ring_test_ib,
.insert_nop = vcn_v2_0_dec_ring_insert_nop,
.insert_start = vcn_v2_0_dec_ring_insert_start,
@@ -1046,7 +1666,10 @@ static int vcn_v2_5_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = (state == AMD_CG_STATE_GATE);
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
if (enable) {
if (vcn_v2_5_is_idle(handle))
@@ -1065,6 +1688,9 @@ static int vcn_v2_5_set_powergating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
if(state == adev->vcn.cur_state)
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 5cb7e231de5f..407c6093c2ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -234,16 +234,9 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (ih->gpu_addr >> 40) & 0xff);
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
- ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN);
ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
- if (adev->irq.ih.use_bus_addr) {
- ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1);
- } else {
- ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN, MC_SPACE_FBPA_ENABLE, 1);
- }
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
!!adev->irq.msi_enabled);
-
if (amdgpu_sriov_vf(adev)) {
if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
@@ -253,10 +246,19 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
}
- if ((adev->asic_type == CHIP_ARCTURUS
- && adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)
- || adev->asic_type == CHIP_RENOIR)
+ if ((adev->asic_type == CHIP_ARCTURUS &&
+ adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) ||
+ adev->asic_type == CHIP_RENOIR) {
+ ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN);
+ if (adev->irq.ih.use_bus_addr) {
+ ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
+ MC_SPACE_GPA_ENABLE, 1);
+ } else {
+ ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
+ MC_SPACE_FBPA_ENABLE, 1);
+ }
WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken);
+ }
/* set the writeback address whether it's enabled or not */
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO,
@@ -715,7 +717,7 @@ static int vega10_ih_set_clockgating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
vega10_ih_update_clockgating_state(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index e4f4201b3c34..78b35901643b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -689,40 +689,6 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
return -EINVAL;
}
-int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap)
-{
- void *pp_handle = adev->powerplay.pp_handle;
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-
- if (!pp_funcs || !pp_funcs->get_asic_baco_capability) {
- *cap = false;
- return -ENOENT;
- }
-
- return pp_funcs->get_asic_baco_capability(pp_handle, cap);
-}
-
-int smu7_asic_baco_reset(struct amdgpu_device *adev)
-{
- void *pp_handle = adev->powerplay.pp_handle;
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-
- if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
- return -ENOENT;
-
- /* enter BACO state */
- if (pp_funcs->set_asic_baco_state(pp_handle, 1))
- return -EIO;
-
- /* exit BACO state */
- if (pp_funcs->set_asic_baco_state(pp_handle, 0))
- return -EIO;
-
- dev_info(adev->dev, "GPU BACO reset\n");
-
- return 0;
-}
-
/**
* vi_asic_pci_config_reset - soft reset GPU
*
@@ -747,8 +713,6 @@ static int vi_asic_pci_config_reset(struct amdgpu_device *adev)
static bool vi_asic_supports_baco(struct amdgpu_device *adev)
{
- bool baco_support;
-
switch (adev->asic_type) {
case CHIP_FIJI:
case CHIP_TONGA:
@@ -756,14 +720,10 @@ static bool vi_asic_supports_baco(struct amdgpu_device *adev)
case CHIP_POLARIS11:
case CHIP_POLARIS12:
case CHIP_TOPAZ:
- smu7_asic_get_baco_capability(adev, &baco_support);
- break;
+ return amdgpu_dpm_is_baco_supported(adev);
default:
- baco_support = false;
- break;
+ return false;
}
-
- return baco_support;
}
static enum amd_reset_method
@@ -778,7 +738,7 @@ vi_asic_reset_method(struct amdgpu_device *adev)
case CHIP_POLARIS11:
case CHIP_POLARIS12:
case CHIP_TOPAZ:
- smu7_asic_get_baco_capability(adev, &baco_reset);
+ baco_reset = amdgpu_dpm_is_baco_supported(adev);
break;
default:
baco_reset = false;
@@ -807,7 +767,7 @@ static int vi_asic_reset(struct amdgpu_device *adev)
if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
if (!adev->in_suspend)
amdgpu_inc_vram_lost(adev);
- r = smu7_asic_baco_reset(adev);
+ r = amdgpu_dpm_baco_reset(adev);
} else {
r = vi_asic_pci_config_reset(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h
index 40d4174913a4..defb4aaf929a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.h
+++ b/drivers/gpu/drm/amd/amdgpu/vi.h
@@ -31,7 +31,5 @@ void vi_srbm_select(struct amdgpu_device *adev,
int vi_set_ip_blocks(struct amdgpu_device *adev);
void legacy_doorbell_index_init(struct amdgpu_device *adev);
-int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap);
-int smu7_asic_baco_reset(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index b6ba0697c531..3f0300e53727 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -42,6 +42,7 @@
static long kfd_ioctl(struct file *, unsigned int, unsigned long);
static int kfd_open(struct inode *, struct file *);
+static int kfd_release(struct inode *, struct file *);
static int kfd_mmap(struct file *, struct vm_area_struct *);
static const char kfd_dev_name[] = "kfd";
@@ -51,6 +52,7 @@ static const struct file_operations kfd_fops = {
.unlocked_ioctl = kfd_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = kfd_open,
+ .release = kfd_release,
.mmap = kfd_mmap,
};
@@ -124,8 +126,13 @@ static int kfd_open(struct inode *inode, struct file *filep)
if (IS_ERR(process))
return PTR_ERR(process);
- if (kfd_is_locked())
+ if (kfd_is_locked()) {
+ kfd_unref_process(process);
return -EAGAIN;
+ }
+
+ /* filep now owns the reference returned by kfd_create_process */
+ filep->private_data = process;
dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
process->pasid, process->is_32bit_user_mode);
@@ -133,6 +140,16 @@ static int kfd_open(struct inode *inode, struct file *filep)
return 0;
}
+static int kfd_release(struct inode *inode, struct file *filep)
+{
+ struct kfd_process *process = filep->private_data;
+
+ if (process)
+ kfd_unref_process(process);
+
+ return 0;
+}
+
static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
void *data)
{
@@ -1801,9 +1818,14 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg);
- process = kfd_get_process(current);
- if (IS_ERR(process)) {
- dev_dbg(kfd_device, "no process\n");
+ /* Get the process struct from the filep. Only the process
+ * that opened /dev/kfd can use the file descriptor. Child
+ * processes need to create their own KFD device context.
+ */
+ process = filep->private_data;
+ if (process->lead_thread != current->group_leader) {
+ dev_dbg(kfd_device, "Using KFD FD in wrong process\n");
+ retcode = -EBADF;
goto err_i1;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
index 15c523027285..511712c2e382 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
@@ -93,7 +93,7 @@ void kfd_debugfs_init(void)
kfd_debugfs_hqds_by_device, &kfd_debugfs_fops);
debugfs_create_file("rls", S_IFREG | 0444, debugfs_root,
kfd_debugfs_rls_by_device, &kfd_debugfs_fops);
- debugfs_create_file("hang_hws", S_IFREG | 0644, debugfs_root,
+ debugfs_create_file("hang_hws", S_IFREG | 0200, debugfs_root,
NULL, &kfd_debugfs_hang_hws_fops);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 209bfc849352..2a9e40131735 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -728,6 +728,9 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd)
{
if (!kfd->init_complete)
return 0;
+
+ kfd->dqm->ops.pre_reset(kfd->dqm);
+
kgd2kfd_suspend(kfd);
kfd_signal_reset_event(kfd);
@@ -822,6 +825,21 @@ dqm_start_error:
return err;
}
+static inline void kfd_queue_work(struct workqueue_struct *wq,
+ struct work_struct *work)
+{
+ int cpu, new_cpu;
+
+ cpu = new_cpu = smp_processor_id();
+ do {
+ new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids;
+ if (cpu_to_node(new_cpu) == numa_node_id())
+ break;
+ } while (cpu != new_cpu);
+
+ queue_work_on(new_cpu, wq, work);
+}
+
/* This is called directly from KGD at ISR. */
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
{
@@ -844,7 +862,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
patched_ihre, &is_patched)
&& enqueue_ih_ring_entry(kfd,
is_patched ? patched_ihre : ih_ring_entry))
- queue_work(kfd->ih_wq, &kfd->interrupt_work);
+ kfd_queue_work(kfd->ih_wq, &kfd->interrupt_work);
spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index f7f6df40875e..80d22bf702e8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -930,11 +930,11 @@ static void uninitialize(struct device_queue_manager *dqm)
for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
kfree(dqm->mqd_mgrs[i]);
mutex_destroy(&dqm->lock_hidden);
- kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
}
static int start_nocpsch(struct device_queue_manager *dqm)
{
+ pr_info("SW scheduler is used");
init_interrupts(dqm);
if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
@@ -947,12 +947,19 @@ static int start_nocpsch(struct device_queue_manager *dqm)
static int stop_nocpsch(struct device_queue_manager *dqm)
{
if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
- pm_uninit(&dqm->packets);
+ pm_uninit(&dqm->packets, false);
dqm->sched_running = false;
return 0;
}
+static void pre_reset(struct device_queue_manager *dqm)
+{
+ dqm_lock(dqm);
+ dqm->is_resetting = true;
+ dqm_unlock(dqm);
+}
+
static int allocate_sdma_queue(struct device_queue_manager *dqm,
struct queue *q)
{
@@ -1100,6 +1107,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
dqm_lock(dqm);
/* clear hang status when driver try to start the hw scheduler */
dqm->is_hws_hang = false;
+ dqm->is_resetting = false;
dqm->sched_running = true;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
dqm_unlock(dqm);
@@ -1107,20 +1115,24 @@ static int start_cpsch(struct device_queue_manager *dqm)
return 0;
fail_allocate_vidmem:
fail_set_sched_resources:
- pm_uninit(&dqm->packets);
+ pm_uninit(&dqm->packets, false);
fail_packet_manager_init:
return retval;
}
static int stop_cpsch(struct device_queue_manager *dqm)
{
+ bool hanging;
+
dqm_lock(dqm);
- unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
+ if (!dqm->is_hws_hang)
+ unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
+ hanging = dqm->is_hws_hang || dqm->is_resetting;
dqm->sched_running = false;
dqm_unlock(dqm);
kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
- pm_uninit(&dqm->packets);
+ pm_uninit(&dqm->packets, hanging);
return 0;
}
@@ -1225,16 +1237,18 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
list_add(&q->list, &qpd->queues_list);
qpd->queue_count++;
+
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
+ dqm->sdma_queue_count++;
+ else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
+ dqm->xgmi_sdma_queue_count++;
+
if (q->properties.is_active) {
dqm->queue_count++;
retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
}
- if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
- dqm->sdma_queue_count++;
- else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
- dqm->xgmi_sdma_queue_count++;
/*
* Unconditionally increment this counter, regardless of the queue's
* type or whether the queue is active.
@@ -1352,8 +1366,17 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
/* should be timed out */
retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
queue_preemption_timeout_ms);
- if (retval)
+ if (retval) {
+ pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
+ dqm->is_hws_hang = true;
+ /* It's possible we're detecting a HWS hang in the
+ * middle of a GPU reset. No need to schedule another
+ * reset in this case.
+ */
+ if (!dqm->is_resetting)
+ schedule_work(&dqm->hw_exception_work);
return retval;
+ }
pm_release_ib(&dqm->packets);
dqm->active_runlist = false;
@@ -1371,12 +1394,8 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm,
if (dqm->is_hws_hang)
return -EIO;
retval = unmap_queues_cpsch(dqm, filter, filter_param);
- if (retval) {
- pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
- dqm->is_hws_hang = true;
- schedule_work(&dqm->hw_exception_work);
+ if (retval)
return retval;
- }
return map_queues_cpsch(dqm);
}
@@ -1770,6 +1789,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
dqm->ops.initialize = initialize_cpsch;
dqm->ops.start = start_cpsch;
dqm->ops.stop = stop_cpsch;
+ dqm->ops.pre_reset = pre_reset;
dqm->ops.destroy_queue = destroy_queue_cpsch;
dqm->ops.update_queue = update_queue;
dqm->ops.register_process = register_process;
@@ -1788,6 +1808,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
/* initialize dqm for no cp scheduling */
dqm->ops.start = start_nocpsch;
dqm->ops.stop = stop_nocpsch;
+ dqm->ops.pre_reset = pre_reset;
dqm->ops.create_queue = create_queue_nocpsch;
dqm->ops.destroy_queue = destroy_queue_nocpsch;
dqm->ops.update_queue = update_queue;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index a8c37e6da027..871d3b628d2d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -104,6 +104,7 @@ struct device_queue_manager_ops {
int (*initialize)(struct device_queue_manager *dqm);
int (*start)(struct device_queue_manager *dqm);
int (*stop)(struct device_queue_manager *dqm);
+ void (*pre_reset)(struct device_queue_manager *dqm);
void (*uninitialize)(struct device_queue_manager *dqm);
int (*create_kernel_queue)(struct device_queue_manager *dqm,
struct kernel_queue *kq,
@@ -190,7 +191,6 @@ struct device_queue_manager {
/* the pasid mapping for each kfd vmid */
uint16_t vmid_pasid[VMID_NUM];
uint64_t pipelines_addr;
- struct kfd_mem_obj *pipeline_mem;
uint64_t fence_gpu_addr;
unsigned int *fence_addr;
struct kfd_mem_obj *fence_mem;
@@ -199,6 +199,7 @@ struct device_queue_manager {
/* hw exception */
bool is_hws_hang;
+ bool is_resetting;
struct work_struct hw_exception_work;
struct kfd_mem_obj hiq_sdma_mqd;
bool sched_running;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 2d56dc534459..bae706462f96 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -195,9 +195,9 @@ err_get_kernel_doorbell:
}
/* Uninitialize a kernel queue and free all its memory usages. */
-static void kq_uninitialize(struct kernel_queue *kq)
+static void kq_uninitialize(struct kernel_queue *kq, bool hanging)
{
- if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ)
+ if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ && !hanging)
kq->mqd_mgr->destroy_mqd(kq->mqd_mgr,
kq->queue->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
@@ -337,9 +337,9 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
return NULL;
}
-void kernel_queue_uninit(struct kernel_queue *kq)
+void kernel_queue_uninit(struct kernel_queue *kq, bool hanging)
{
- kq_uninitialize(kq);
+ kq_uninitialize(kq, hanging);
kfree(kq);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
index 7832ec6e480b..d1d68a51bfb8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
@@ -153,6 +153,14 @@ static int load_mqd(struct mqd_manager *mm, void *mqd,
return r;
}
+static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ struct queue_properties *p, struct mm_struct *mms)
+{
+ return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->kgd, mqd, pipe_id,
+ queue_id, p->doorbell_off);
+}
+
static void update_mqd(struct mqd_manager *mm, void *mqd,
struct queue_properties *q)
{
@@ -409,7 +417,7 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
mqd->allocate_mqd = allocate_hiq_mqd;
mqd->init_mqd = init_mqd_hiq;
mqd->free_mqd = free_mqd_hiq_sdma;
- mqd->load_mqd = load_mqd;
+ mqd->load_mqd = hiq_load_mqd_kiq;
mqd->update_mqd = update_mqd;
mqd->destroy_mqd = destroy_mqd;
mqd->is_occupied = is_occupied;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index aa9010995eaf..436b7f518979 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -191,6 +191,14 @@ static int load_mqd(struct mqd_manager *mm, void *mqd,
wptr_shift, 0, mms);
}
+static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ struct queue_properties *p, struct mm_struct *mms)
+{
+ return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->kgd, mqd, pipe_id,
+ queue_id, p->doorbell_off);
+}
+
static void update_mqd(struct mqd_manager *mm, void *mqd,
struct queue_properties *q)
{
@@ -449,7 +457,7 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
mqd->allocate_mqd = allocate_hiq_mqd;
mqd->init_mqd = init_mqd_hiq;
mqd->free_mqd = free_mqd_hiq_sdma;
- mqd->load_mqd = load_mqd;
+ mqd->load_mqd = hiq_load_mqd_kiq;
mqd->update_mqd = update_mqd;
mqd->destroy_mqd = destroy_mqd;
mqd->is_occupied = is_occupied;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 6cabed06ef5d..dc406e6dee23 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -264,10 +264,10 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
return 0;
}
-void pm_uninit(struct packet_manager *pm)
+void pm_uninit(struct packet_manager *pm, bool hanging)
{
mutex_destroy(&pm->lock);
- kernel_queue_uninit(pm->priv_queue);
+ kernel_queue_uninit(pm->priv_queue, hanging);
}
int pm_send_set_resources(struct packet_manager *pm,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index fc61b5ec068e..6af1b5881f43 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -883,7 +883,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev);
void device_queue_manager_uninit(struct device_queue_manager *dqm);
struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
enum kfd_queue_type type);
-void kernel_queue_uninit(struct kernel_queue *kq);
+void kernel_queue_uninit(struct kernel_queue *kq, bool hanging);
int kfd_process_vm_fault(struct device_queue_manager *dqm, unsigned int pasid);
/* Process Queue Manager */
@@ -972,7 +972,7 @@ extern const struct packet_manager_funcs kfd_vi_pm_funcs;
extern const struct packet_manager_funcs kfd_v9_pm_funcs;
int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
-void pm_uninit(struct packet_manager *pm);
+void pm_uninit(struct packet_manager *pm, bool hanging);
int pm_send_set_resources(struct packet_manager *pm,
struct scheduling_resources *res);
int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 8276601a122f..25b90f70aecd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -32,6 +32,7 @@
#include <linux/mman.h>
#include <linux/file.h>
#include "amdgpu_amdkfd.h"
+#include "amdgpu.h"
struct mm_struct;
@@ -324,6 +325,8 @@ struct kfd_process *kfd_create_process(struct file *filep)
(int)process->lead_thread->pid);
}
out:
+ if (!IS_ERR(process))
+ kref_get(&process->ref);
mutex_unlock(&kfd_processes_mutex);
return process;
@@ -1150,16 +1153,17 @@ int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
void kfd_flush_tlb(struct kfd_process_device *pdd)
{
struct kfd_dev *dev = pdd->dev;
- const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
/* Nothing to flush until a VMID is assigned, which
* only happens when the first queue is created.
*/
if (pdd->qpd.vmid)
- f2g->invalidate_tlbs_vmid(dev->kgd, pdd->qpd.vmid);
+ amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->kgd,
+ pdd->qpd.vmid);
} else {
- f2g->invalidate_tlbs(dev->kgd, pdd->process->pasid);
+ amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->kgd,
+ pdd->process->pasid);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 1152490bbf53..31fcd1b51f00 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -374,7 +374,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
/* destroy kernel queue (DIQ) */
dqm = pqn->kq->dev->dqm;
dqm->ops.destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd);
- kernel_queue_uninit(pqn->kq);
+ kernel_queue_uninit(pqn->kq, false);
}
if (pqn->q) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 69bd0628fdc6..203c823d65f1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -486,6 +486,10 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.num_sdma_engines);
sysfs_show_32bit_prop(buffer, "num_sdma_xgmi_engines",
dev->node_props.num_sdma_xgmi_engines);
+ sysfs_show_32bit_prop(buffer, "num_sdma_queues_per_engine",
+ dev->node_props.num_sdma_queues_per_engine);
+ sysfs_show_32bit_prop(buffer, "num_cp_queues",
+ dev->node_props.num_cp_queues);
if (dev->gpu) {
log_max_watch_addr =
@@ -1309,9 +1313,12 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
dev->node_props.num_sdma_engines = gpu->device_info->num_sdma_engines;
dev->node_props.num_sdma_xgmi_engines =
gpu->device_info->num_xgmi_sdma_engines;
+ dev->node_props.num_sdma_queues_per_engine =
+ gpu->device_info->num_sdma_queues_per_engine;
dev->node_props.num_gws = (hws_gws_support &&
dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ?
amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0;
+ dev->node_props.num_cp_queues = get_queues_num(dev->gpu->dqm);
kfd_fill_mem_clk_max_info(dev);
kfd_fill_iolink_non_crat_info(dev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index 15843e0fc756..74e9b1682af8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -81,6 +81,8 @@ struct kfd_node_properties {
int32_t drm_render_minor;
uint32_t num_sdma_engines;
uint32_t num_sdma_xgmi_engines;
+ uint32_t num_sdma_queues_per_engine;
+ uint32_t num_cp_queues;
char name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE];
};
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 096db863c345..87858bc57e64 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -6,7 +6,7 @@ config DRM_AMD_DC
bool "AMD DC - Enable new display engine"
default y
select SND_HDA_COMPONENT if SND_HDA_CORE
- select DRM_AMD_DC_DCN if X86 && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
+ select DRM_AMD_DC_DCN if (X86 || PPC64) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
help
Choose this option if you want to use the new display engine
support for AMDGPU. This adds required support for Vega and
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index ea55f4160c80..df1535543fde 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -98,6 +98,12 @@ MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
+/* Number of bytes in PSP header for firmware. */
+#define PSP_HEADER_BYTES 0x100
+
+/* Number of bytes in PSP footer for firmware. */
+#define PSP_FOOTER_BYTES 0x100
+
/**
* DOC: overview
*
@@ -741,28 +747,27 @@ void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
static int dm_dmub_hw_init(struct amdgpu_device *adev)
{
- const unsigned int psp_header_bytes = 0x100;
- const unsigned int psp_footer_bytes = 0x100;
const struct dmcub_firmware_header_v1_0 *hdr;
struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
+ struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
const struct firmware *dmub_fw = adev->dm.dmub_fw;
struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
struct abm *abm = adev->dm.dc->res_pool->abm;
- struct dmub_srv_region_params region_params;
- struct dmub_srv_region_info region_info;
- struct dmub_srv_fb_params fb_params;
- struct dmub_srv_fb_info fb_info;
struct dmub_srv_hw_params hw_params;
enum dmub_status status;
const unsigned char *fw_inst_const, *fw_bss_data;
- uint32_t i;
- int r;
+ uint32_t i, fw_inst_const_size, fw_bss_data_size;
bool has_hw_support;
if (!dmub_srv)
/* DMUB isn't supported on the ASIC. */
return 0;
+ if (!fb_info) {
+ DRM_ERROR("No framebuffer info for DMUB service.\n");
+ return -EINVAL;
+ }
+
if (!dmub_fw) {
/* Firmware required for DMUB support. */
DRM_ERROR("No firmware provided for DMUB.\n");
@@ -782,60 +787,36 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
- /* Calculate the size of all the regions for the DMUB service. */
- memset(&region_params, 0, sizeof(region_params));
-
- region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
- psp_header_bytes - psp_footer_bytes;
- region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
- region_params.vbios_size = adev->dm.dc->ctx->dc_bios->bios_size;
-
- status = dmub_srv_calc_region_info(dmub_srv, &region_params,
- &region_info);
-
- if (status != DMUB_STATUS_OK) {
- DRM_ERROR("Error calculating DMUB region info: %d\n", status);
- return -EINVAL;
- }
-
- /*
- * Allocate a framebuffer based on the total size of all the regions.
- * TODO: Move this into GART.
- */
- r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
- &adev->dm.dmub_bo_gpu_addr,
- &adev->dm.dmub_bo_cpu_addr);
- if (r)
- return r;
-
- /* Rebase the regions on the framebuffer address. */
- memset(&fb_params, 0, sizeof(fb_params));
- fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
- fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
- fb_params.region_info = &region_info;
-
- status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, &fb_info);
- if (status != DMUB_STATUS_OK) {
- DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
- return -EINVAL;
- }
-
fw_inst_const = dmub_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
- psp_header_bytes;
+ PSP_HEADER_BYTES;
fw_bss_data = dmub_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
le32_to_cpu(hdr->inst_const_bytes);
/* Copy firmware and bios info into FB memory. */
- memcpy(fb_info.fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
- region_params.inst_const_size);
- memcpy(fb_info.fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
- region_params.bss_data_size);
- memcpy(fb_info.fb[DMUB_WINDOW_3_VBIOS].cpu_addr,
- adev->dm.dc->ctx->dc_bios->bios, region_params.vbios_size);
+ fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
+ PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
+
+ fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
+
+ memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
+ fw_inst_const_size);
+ memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
+ fw_bss_data_size);
+ memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
+ adev->bios_size);
+
+ /* Reset regions that need to be reset. */
+ memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
+ fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
+
+ memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
+ fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
+
+ memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
+ fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
/* Initialize hardware. */
memset(&hw_params, 0, sizeof(hw_params));
@@ -845,8 +826,8 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
if (dmcu)
hw_params.psp_version = dmcu->psp_version;
- for (i = 0; i < fb_info.num_fb; ++i)
- hw_params.fb[i] = &fb_info.fb[i];
+ for (i = 0; i < fb_info->num_fb; ++i)
+ hw_params.fb[i] = &fb_info->fb[i];
status = dmub_srv_hw_init(dmub_srv, &hw_params);
if (status != DMUB_STATUS_OK) {
@@ -925,13 +906,16 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
- /*
- * TODO debug why this doesn't work on Raven
- */
- if (adev->flags & AMD_IS_APU &&
- adev->asic_type >= CHIP_CARRIZO &&
- adev->asic_type < CHIP_RAVEN)
+ switch (adev->asic_type) {
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ case CHIP_RAVEN:
+ case CHIP_RENOIR:
init_data.flags.gpu_vm_support = true;
+ break;
+ default:
+ break;
+ }
if (amdgpu_dc_feature_mask & DC_FBC_MASK)
init_data.flags.fbc_support = true;
@@ -956,14 +940,14 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
goto error;
}
- dc_hardware_init(adev->dm.dc);
-
r = dm_dmub_hw_init(adev);
if (r) {
DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
goto error;
}
+ dc_hardware_init(adev->dm.dc);
+
adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
if (!adev->dm.freesync_module) {
DRM_ERROR(
@@ -1174,6 +1158,11 @@ static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
static int dm_dmub_sw_init(struct amdgpu_device *adev)
{
struct dmub_srv_create_params create_params;
+ struct dmub_srv_region_params region_params;
+ struct dmub_srv_region_info region_info;
+ struct dmub_srv_fb_params fb_params;
+ struct dmub_srv_fb_info *fb_info;
+ struct dmub_srv *dmub_srv;
const struct dmcub_firmware_header_v1_0 *hdr;
const char *fw_name_dmub;
enum dmub_asic dmub_asic;
@@ -1191,24 +1180,6 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
return 0;
}
- adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
- if (!adev->dm.dmub_srv) {
- DRM_ERROR("Failed to allocate DMUB service!\n");
- return -ENOMEM;
- }
-
- memset(&create_params, 0, sizeof(create_params));
- create_params.user_ctx = adev;
- create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
- create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
- create_params.asic = dmub_asic;
-
- status = dmub_srv_create(adev->dm.dmub_srv, &create_params);
- if (status != DMUB_STATUS_OK) {
- DRM_ERROR("Error creating DMUB service: %d\n", status);
- return -EINVAL;
- }
-
r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
if (r) {
DRM_ERROR("DMUB firmware loading failed: %d\n", r);
@@ -1238,6 +1209,80 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
adev->dm.dmcub_fw_version);
+ adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
+ dmub_srv = adev->dm.dmub_srv;
+
+ if (!dmub_srv) {
+ DRM_ERROR("Failed to allocate DMUB service!\n");
+ return -ENOMEM;
+ }
+
+ memset(&create_params, 0, sizeof(create_params));
+ create_params.user_ctx = adev;
+ create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
+ create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
+ create_params.asic = dmub_asic;
+
+ /* Create the DMUB service. */
+ status = dmub_srv_create(dmub_srv, &create_params);
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error creating DMUB service: %d\n", status);
+ return -EINVAL;
+ }
+
+ /* Calculate the size of all the regions for the DMUB service. */
+ memset(&region_params, 0, sizeof(region_params));
+
+ region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
+ PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
+ region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
+ region_params.vbios_size = adev->bios_size;
+ region_params.fw_bss_data =
+ adev->dm.dmub_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+ le32_to_cpu(hdr->inst_const_bytes);
+
+ status = dmub_srv_calc_region_info(dmub_srv, &region_params,
+ &region_info);
+
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error calculating DMUB region info: %d\n", status);
+ return -EINVAL;
+ }
+
+ /*
+ * Allocate a framebuffer based on the total size of all the regions.
+ * TODO: Move this into GART.
+ */
+ r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
+ &adev->dm.dmub_bo_gpu_addr,
+ &adev->dm.dmub_bo_cpu_addr);
+ if (r)
+ return r;
+
+ /* Rebase the regions on the framebuffer address. */
+ memset(&fb_params, 0, sizeof(fb_params));
+ fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
+ fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
+ fb_params.region_info = &region_info;
+
+ adev->dm.dmub_fb_info =
+ kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
+ fb_info = adev->dm.dmub_fb_info;
+
+ if (!fb_info) {
+ DRM_ERROR(
+ "Failed to allocate framebuffer info for DMUB service!\n");
+ return -ENOMEM;
+ }
+
+ status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
+ return -EINVAL;
+ }
+
return 0;
}
@@ -1257,6 +1302,9 @@ static int dm_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ kfree(adev->dm.dmub_fb_info);
+ adev->dm.dmub_fb_info = NULL;
+
if (adev->dm.dmub_srv) {
dmub_srv_destroy(adev->dm.dmub_srv);
adev->dm.dmub_srv = NULL;
@@ -1559,7 +1607,7 @@ static int dm_resume(void *handle)
struct dm_plane_state *dm_new_plane_state;
struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
enum dc_connection_type new_connection_type = dc_connection_none;
- int i;
+ int i, r;
/* Recreate dc_state - DC invalidates it when setting power state to S3. */
dc_release_state(dm_state->context);
@@ -1567,6 +1615,11 @@ static int dm_resume(void *handle)
/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
dc_resource_state_construct(dm->dc, dm_state->context);
+ /* Before powering on DC we need to re-initialize DMUB. */
+ r = dm_dmub_hw_init(adev);
+ if (r)
+ DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+
/* power on hardware */
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
@@ -3654,27 +3707,21 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
return color_space;
}
-static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out)
-{
- if (timing_out->display_color_depth <= COLOR_DEPTH_888)
- return;
-
- timing_out->display_color_depth--;
-}
-
-static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out,
- const struct drm_display_info *info)
+static bool adjust_colour_depth_from_display_info(
+ struct dc_crtc_timing *timing_out,
+ const struct drm_display_info *info)
{
+ enum dc_color_depth depth = timing_out->display_color_depth;
int normalized_clk;
- if (timing_out->display_color_depth <= COLOR_DEPTH_888)
- return;
do {
normalized_clk = timing_out->pix_clk_100hz / 10;
/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
normalized_clk /= 2;
/* Adjusting pix clock following on HDMI spec based on colour depth */
- switch (timing_out->display_color_depth) {
+ switch (depth) {
+ case COLOR_DEPTH_888:
+ break;
case COLOR_DEPTH_101010:
normalized_clk = (normalized_clk * 30) / 24;
break;
@@ -3685,14 +3732,15 @@ static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_
normalized_clk = (normalized_clk * 48) / 24;
break;
default:
- return;
+ /* The above depths are the only ones valid for HDMI. */
+ return false;
}
- if (normalized_clk <= info->max_tmds_clock)
- return;
- reduce_mode_colour_depth(timing_out);
-
- } while (timing_out->display_color_depth > COLOR_DEPTH_888);
-
+ if (normalized_clk <= info->max_tmds_clock) {
+ timing_out->display_color_depth = depth;
+ return true;
+ }
+ } while (--depth > COLOR_DEPTH_666);
+ return false;
}
static void fill_stream_properties_from_drm_display_mode(
@@ -3773,8 +3821,14 @@ static void fill_stream_properties_from_drm_display_mode(
stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
- if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
- adjust_colour_depth_from_display_info(timing_out, info);
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ if (!adjust_colour_depth_from_display_info(timing_out, info) &&
+ drm_mode_is_420_also(info, mode_in) &&
+ timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+ adjust_colour_depth_from_display_info(timing_out, info);
+ }
+ }
}
static void fill_audio_info(struct audio_info *audio_info,
@@ -4025,7 +4079,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
#if defined(CONFIG_DRM_AMD_DC_DCN)
- dc_dsc_parse_dsc_dpcd(aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
+ dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
+ aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
&dsc_caps);
#endif
@@ -4884,12 +4939,13 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
is_y420);
bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
clock = adjusted_mode->clock;
- dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp);
+ dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
}
dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
mst_mgr,
mst_port,
- dm_new_connector_state->pbn);
+ dm_new_connector_state->pbn,
+ 0);
if (dm_new_connector_state->vcpi_slots < 0) {
DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
return dm_new_connector_state->vcpi_slots;
@@ -4902,6 +4958,71 @@ const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
.atomic_check = dm_encoder_helper_atomic_check
};
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
+ struct dc_state *dc_state)
+{
+ struct dc_stream_state *stream = NULL;
+ struct drm_connector *connector;
+ struct drm_connector_state *new_con_state, *old_con_state;
+ struct amdgpu_dm_connector *aconnector;
+ struct dm_connector_state *dm_conn_state;
+ int i, j, clock, bpp;
+ int vcpi, pbn_div, pbn = 0;
+
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ if (!aconnector->port)
+ continue;
+
+ if (!new_con_state || !new_con_state->crtc)
+ continue;
+
+ dm_conn_state = to_dm_connector_state(new_con_state);
+
+ for (j = 0; j < dc_state->stream_count; j++) {
+ stream = dc_state->streams[j];
+ if (!stream)
+ continue;
+
+ if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
+ break;
+
+ stream = NULL;
+ }
+
+ if (!stream)
+ continue;
+
+ if (stream->timing.flags.DSC != 1) {
+ drm_dp_mst_atomic_enable_dsc(state,
+ aconnector->port,
+ dm_conn_state->pbn,
+ 0,
+ false);
+ continue;
+ }
+
+ pbn_div = dm_mst_get_pbn_divider(stream->link);
+ bpp = stream->timing.dsc_cfg.bits_per_pixel;
+ clock = stream->timing.pix_clk_100hz / 10;
+ pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
+ vcpi = drm_dp_mst_atomic_enable_dsc(state,
+ aconnector->port,
+ pbn, pbn_div,
+ true);
+ if (vcpi < 0)
+ return vcpi;
+
+ dm_conn_state->pbn = pbn;
+ dm_conn_state->vcpi_slots = vcpi;
+ }
+ return 0;
+}
+#endif
+
static void dm_drm_plane_reset(struct drm_plane *plane)
{
struct dm_plane_state *amdgpu_state = NULL;
@@ -5564,9 +5685,9 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
- /* This defaults to the max in the range, but we want 8bpc. */
- aconnector->base.state->max_bpc = 8;
- aconnector->base.state->max_requested_bpc = 8;
+ /* This defaults to the max in the range, but we want 8bpc for non-edp. */
+ aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
+ aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
if (connector_type == DRM_MODE_CONNECTOR_eDP &&
dc_is_dmcu_initialized(adev->dm.dc)) {
@@ -7641,24 +7762,27 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
struct drm_crtc_state *new_crtc_state, *old_crtc_state;
struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
struct dc_stream_status *status = NULL;
-
- struct dc_surface_update *updates;
enum surface_update_type update_type = UPDATE_TYPE_FAST;
+ struct surface_info_bundle {
+ struct dc_surface_update surface_updates[MAX_SURFACES];
+ struct dc_plane_info plane_infos[MAX_SURFACES];
+ struct dc_scaling_info scaling_infos[MAX_SURFACES];
+ struct dc_flip_addrs flip_addrs[MAX_SURFACES];
+ struct dc_stream_update stream_update;
+ } *bundle;
- updates = kcalloc(MAX_SURFACES, sizeof(*updates), GFP_KERNEL);
+ bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
- if (!updates) {
- DRM_ERROR("Failed to allocate plane updates\n");
+ if (!bundle) {
+ DRM_ERROR("Failed to allocate update bundle\n");
/* Set type to FULL to avoid crashing in DC*/
update_type = UPDATE_TYPE_FULL;
goto cleanup;
}
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- struct dc_scaling_info scaling_info;
- struct dc_stream_update stream_update;
- memset(&stream_update, 0, sizeof(stream_update));
+ memset(bundle, 0, sizeof(struct surface_info_bundle));
new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
@@ -7675,8 +7799,9 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
const struct amdgpu_framebuffer *amdgpu_fb =
to_amdgpu_framebuffer(new_plane_state->fb);
- struct dc_plane_info plane_info;
- struct dc_flip_addrs flip_addr;
+ struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
+ struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
+ struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
uint64_t tiling_flags;
new_plane_crtc = new_plane_state->crtc;
@@ -7694,49 +7819,48 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
if (crtc != new_plane_crtc)
continue;
- updates[num_plane].surface = new_dm_plane_state->dc_state;
+ bundle->surface_updates[num_plane].surface =
+ new_dm_plane_state->dc_state;
if (new_crtc_state->mode_changed) {
- stream_update.dst = new_dm_crtc_state->stream->dst;
- stream_update.src = new_dm_crtc_state->stream->src;
+ bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
+ bundle->stream_update.src = new_dm_crtc_state->stream->src;
}
if (new_crtc_state->color_mgmt_changed) {
- updates[num_plane].gamma =
+ bundle->surface_updates[num_plane].gamma =
new_dm_plane_state->dc_state->gamma_correction;
- updates[num_plane].in_transfer_func =
+ bundle->surface_updates[num_plane].in_transfer_func =
new_dm_plane_state->dc_state->in_transfer_func;
- stream_update.gamut_remap =
+ bundle->stream_update.gamut_remap =
&new_dm_crtc_state->stream->gamut_remap_matrix;
- stream_update.output_csc_transform =
+ bundle->stream_update.output_csc_transform =
&new_dm_crtc_state->stream->csc_color_matrix;
- stream_update.out_transfer_func =
+ bundle->stream_update.out_transfer_func =
new_dm_crtc_state->stream->out_transfer_func;
}
ret = fill_dc_scaling_info(new_plane_state,
- &scaling_info);
+ scaling_info);
if (ret)
goto cleanup;
- updates[num_plane].scaling_info = &scaling_info;
+ bundle->surface_updates[num_plane].scaling_info = scaling_info;
if (amdgpu_fb) {
ret = get_fb_info(amdgpu_fb, &tiling_flags);
if (ret)
goto cleanup;
- memset(&flip_addr, 0, sizeof(flip_addr));
-
ret = fill_dc_plane_info_and_addr(
dm->adev, new_plane_state, tiling_flags,
- &plane_info,
- &flip_addr.address);
+ plane_info,
+ &flip_addr->address);
if (ret)
goto cleanup;
- updates[num_plane].plane_info = &plane_info;
- updates[num_plane].flip_addr = &flip_addr;
+ bundle->surface_updates[num_plane].plane_info = plane_info;
+ bundle->surface_updates[num_plane].flip_addr = flip_addr;
}
num_plane++;
@@ -7757,14 +7881,15 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
status = dc_stream_get_status_from_state(old_dm_state->context,
new_dm_crtc_state->stream);
- stream_update.stream = new_dm_crtc_state->stream;
+ bundle->stream_update.stream = new_dm_crtc_state->stream;
/*
* TODO: DC modifies the surface during this call so we need
* to lock here - find a way to do this without locking.
*/
mutex_lock(&dm->dc_lock);
- update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane,
- &stream_update, status);
+ update_type = dc_check_update_surfaces_for_stream(
+ dc, bundle->surface_updates, num_plane,
+ &bundle->stream_update, status);
mutex_unlock(&dm->dc_lock);
if (update_type > UPDATE_TYPE_MED) {
@@ -7774,12 +7899,35 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
}
cleanup:
- kfree(updates);
+ kfree(bundle);
*out_type = update_type;
return ret;
}
+static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ struct amdgpu_dm_connector *aconnector = NULL;
+ int i;
+ for_each_new_connector_in_state(state, connector, conn_state, i) {
+ if (conn_state->crtc != crtc)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (!aconnector->port || !aconnector->mst_port)
+ aconnector = NULL;
+ else
+ break;
+ }
+
+ if (!aconnector)
+ return 0;
+
+ return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
+}
+
/**
* amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
* @dev: The DRM device
@@ -7832,6 +7980,16 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
+ if (adev->asic_type >= CHIP_NAVI10) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
+ ret = add_affected_mst_dsc_crtcs(state, crtc);
+ if (ret)
+ goto fail;
+ }
+ }
+ }
+
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
!new_crtc_state->color_mgmt_changed &&
@@ -7935,11 +8093,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
- /* Perform validation of MST topology in the state*/
- ret = drm_dp_mst_atomic_check(state);
- if (ret)
- goto fail;
-
if (state->legacy_cursor_update) {
/*
* This is a fast cursor update coming from the plane update
@@ -8008,6 +8161,25 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
+ goto fail;
+
+ ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
+ if (ret)
+ goto fail;
+#endif
+
+ /*
+ * Perform validation of MST topology in the state:
+ * We need to perform MST atomic check before calling
+ * dc_validate_global_state(), or there is a chance
+ * to get stuck in an infinite loop and hang eventually.
+ */
+ ret = drm_dp_mst_atomic_check(state);
+ if (ret)
+ goto fail;
+
if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
ret = -EINVAL;
goto fail;
@@ -8234,17 +8406,38 @@ static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
{
struct dc_link *link = stream->link;
- struct dc_static_screen_events triggers = {0};
+ unsigned int vsync_rate_hz = 0;
+ struct dc_static_screen_params params = {0};
+ /* Calculate number of static frames before generating interrupt to
+ * enter PSR.
+ */
+ // Init fail safe of 2 frames static
+ unsigned int num_frames_static = 2;
DRM_DEBUG_DRIVER("Enabling psr...\n");
- triggers.cursor_update = true;
- triggers.overlay_update = true;
- triggers.surface_update = true;
+ vsync_rate_hz = div64_u64(div64_u64((
+ stream->timing.pix_clk_100hz * 100),
+ stream->timing.v_total),
+ stream->timing.h_total);
+
+ /* Round up
+ * Calculate number of frames such that at least 30 ms of time has
+ * passed.
+ */
+ if (vsync_rate_hz != 0) {
+ unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
+ num_frames_static = (30000 / frame_time_microsec) + 1;
+ }
+
+ params.triggers.cursor_update = true;
+ params.triggers.overlay_update = true;
+ params.triggers.surface_update = true;
+ params.num_frames = num_frames_static;
- dc_stream_set_static_screen_events(link->ctx->dc,
+ dc_stream_set_static_screen_params(link->ctx->dc,
&stream, 1,
- &triggers);
+ &params);
return dc_link_set_psr_allow_active(link, true, false);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index a8fc90a927d6..7ea9acb0358d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -133,6 +133,13 @@ struct amdgpu_display_manager {
struct dmub_srv *dmub_srv;
/**
+ * @dmub_fb_info:
+ *
+ * Framebuffer regions for the DMUB.
+ */
+ struct dmub_srv_fb_info *dmub_fb_info;
+
+ /**
* @dmub_fw:
*
* DMUB firmware, required on hardware that has DMUB support.
@@ -323,6 +330,7 @@ struct amdgpu_dm_connector {
struct drm_dp_mst_port *port;
struct amdgpu_dm_connector *mst_port;
struct amdgpu_encoder *mst_encoder;
+ struct drm_dp_aux *dsc_aux;
/* TODO see if we can merge with ddc_bus or make a dm_connector */
struct amdgpu_i2c_adapter *i2c;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index ae329335dfcc..0acd3409dd6c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -135,6 +135,20 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
mutex_unlock(&hdcp_w->mutex);
}
+static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
+ unsigned int link_index,
+ struct amdgpu_dm_connector *aconnector)
+{
+ struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
+
+ mutex_lock(&hdcp_w->mutex);
+ hdcp_w->aconnector = aconnector;
+
+ mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output);
+
+ process_output(hdcp_w);
+ mutex_unlock(&hdcp_w->mutex);
+}
void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
{
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
@@ -303,6 +317,11 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
memset(link, 0, sizeof(*link));
display->index = aconnector->base.index;
+
+ if (config->dpms_off) {
+ hdcp_remove_display(hdcp_work, link_index, aconnector);
+ return;
+ }
display->state = MOD_HDCP_DISPLAY_ACTIVE;
if (aconnector->dc_sink != NULL)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 66f266a5e10b..318b474ff20e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -37,6 +37,7 @@
#include "dc.h"
#include "amdgpu_dm.h"
#include "amdgpu_dm_irq.h"
+#include "amdgpu_dm_mst_types.h"
#include "dm_helpers.h"
@@ -215,7 +216,8 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port);
}
- ret = drm_dp_update_payload_part1(mst_mgr);
+ /* It's OK for this to fail */
+ drm_dp_update_payload_part1(mst_mgr);
/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
* AUX message. The sequence is slot 1-63 allocated sequence for each
@@ -224,9 +226,6 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
get_payload_table(aconnector, proposed_table);
- if (ret)
- return false;
-
return true;
}
@@ -284,7 +283,6 @@ bool dm_helpers_dp_mst_send_payload_allocation(
struct amdgpu_dm_connector *aconnector;
struct drm_dp_mst_topology_mgr *mst_mgr;
struct drm_dp_mst_port *mst_port;
- int ret;
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
@@ -298,10 +296,8 @@ bool dm_helpers_dp_mst_send_payload_allocation(
if (!mst_mgr->mst_state)
return false;
- ret = drm_dp_update_payload_part2(mst_mgr);
-
- if (ret)
- return false;
+ /* It's OK for this to fail */
+ drm_dp_update_payload_part2(mst_mgr);
if (!enable)
drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port);
@@ -516,8 +512,24 @@ bool dm_helpers_dp_write_dsc_enable(
)
{
uint8_t enable_dsc = enable ? 1 : 0;
+ struct amdgpu_dm_connector *aconnector;
+
+ if (!stream)
+ return false;
+
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+
+ if (!aconnector->dsc_aux)
+ return false;
+
+ return (drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1) >= 0);
+ }
+
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT)
+ return dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
- return dm_helpers_dp_write_dpcd(ctx, stream->sink->link, DP_DSC_ENABLE, &enable_dsc, 1);
+ return false;
}
bool dm_helpers_is_dp_sink_present(struct dc_link *link)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 64445c4cc4c2..cbcf504f73a5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -111,17 +111,12 @@ static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd,
*/
static void dm_irq_work_func(struct work_struct *work)
{
- struct list_head *entry;
struct irq_list_head *irq_list_head =
container_of(work, struct irq_list_head, work);
struct list_head *handler_list = &irq_list_head->head;
struct amdgpu_dm_irq_handler_data *handler_data;
- list_for_each(entry, handler_list) {
- handler_data = list_entry(entry,
- struct amdgpu_dm_irq_handler_data,
- list);
-
+ list_for_each_entry(handler_data, handler_list, list) {
DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",
handler_data->irq_source);
@@ -528,19 +523,13 @@ static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
enum dc_irq_source irq_source)
{
struct amdgpu_dm_irq_handler_data *handler_data;
- struct list_head *entry;
unsigned long irq_table_flags;
DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
- list_for_each(
- entry,
- &adev->dm.irq_handler_list_high_tab[irq_source]) {
-
- handler_data = list_entry(entry,
- struct amdgpu_dm_irq_handler_data,
- list);
-
+ list_for_each_entry(handler_data,
+ &adev->dm.irq_handler_list_high_tab[irq_source],
+ list) {
/* Call a subcomponent which registered for immediate
* interrupt notification */
handler_data->handler(handler_data->handler_arg);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 81367c869134..5672f7765919 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -25,6 +25,7 @@
#include <linux/version.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_dp_mst_helper.h>
#include "dm_services.h"
#include "amdgpu.h"
#include "amdgpu_dm.h"
@@ -39,6 +40,12 @@
#if defined(CONFIG_DEBUG_FS)
#include "amdgpu_dm_debugfs.h"
#endif
+
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+#include "dc/dcn20/dcn20_resource.h"
+#endif
+
/* #define TRACE_DPCD */
#ifdef TRACE_DPCD
@@ -180,6 +187,30 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
.early_unregister = amdgpu_dm_mst_connector_early_unregister,
};
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
+{
+ struct dc_sink *dc_sink = aconnector->dc_sink;
+ struct drm_dp_mst_port *port = aconnector->port;
+ u8 dsc_caps[16] = { 0 };
+
+ aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port);
+
+ if (!aconnector->dsc_aux)
+ return false;
+
+ if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DSC_SUPPORT, dsc_caps, 16) < 0)
+ return false;
+
+ if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
+ dsc_caps, NULL,
+ &dc_sink->sink_dsc_caps.dsc_dec_caps))
+ return false;
+
+ return true;
+}
+#endif
+
static int dm_dp_mst_get_modes(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
@@ -222,10 +253,16 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
/* dc_link_add_remote_sink returns a new reference */
aconnector->dc_sink = dc_sink;
- if (aconnector->dc_sink)
+ if (aconnector->dc_sink) {
amdgpu_dm_update_freesync_caps(
connector, aconnector->edid);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (!validate_dsc_caps_on_connector(aconnector))
+ memset(&aconnector->dc_sink->sink_dsc_caps,
+ 0, sizeof(aconnector->dc_sink->sink_dsc_caps));
+#endif
+ }
}
drm_connector_update_edid_property(
@@ -466,3 +503,384 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
aconnector->connector_id);
}
+int dm_mst_get_pbn_divider(struct dc_link *link)
+{
+ if (!link)
+ return 0;
+
+ return dc_link_bandwidth_kbps(link,
+ dc_link_get_link_cap(link)) / (8 * 1000 * 54);
+}
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+
+struct dsc_mst_fairness_params {
+ struct dc_crtc_timing *timing;
+ struct dc_sink *sink;
+ struct dc_dsc_bw_range bw_range;
+ bool compression_possible;
+ struct drm_dp_mst_port *port;
+};
+
+struct dsc_mst_fairness_vars {
+ int pbn;
+ bool dsc_enabled;
+ int bpp_x16;
+};
+
+static int kbps_to_peak_pbn(int kbps)
+{
+ u64 peak_kbps = kbps;
+
+ peak_kbps *= 1006;
+ peak_kbps = div_u64(peak_kbps, 1000);
+ return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
+}
+
+static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params,
+ struct dsc_mst_fairness_vars *vars,
+ int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ memset(&params[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg));
+ if (vars[i].dsc_enabled && dc_dsc_compute_config(
+ params[i].sink->ctx->dc->res_pool->dscs[0],
+ &params[i].sink->sink_dsc_caps.dsc_dec_caps,
+ params[i].sink->ctx->dc->debug.dsc_min_slice_height_override,
+ 0,
+ params[i].timing,
+ &params[i].timing->dsc_cfg)) {
+ params[i].timing->flags.DSC = 1;
+ params[i].timing->dsc_cfg.bits_per_pixel = vars[i].bpp_x16;
+ } else {
+ params[i].timing->flags.DSC = 0;
+ }
+ }
+}
+
+static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
+{
+ struct dc_dsc_config dsc_config;
+ u64 kbps;
+
+ kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
+ dc_dsc_compute_config(
+ param.sink->ctx->dc->res_pool->dscs[0],
+ &param.sink->sink_dsc_caps.dsc_dec_caps,
+ param.sink->ctx->dc->debug.dsc_min_slice_height_override,
+ (int) kbps, param.timing, &dsc_config);
+
+ return dsc_config.bits_per_pixel;
+}
+
+static void increase_dsc_bpp(struct drm_atomic_state *state,
+ struct dc_link *dc_link,
+ struct dsc_mst_fairness_params *params,
+ struct dsc_mst_fairness_vars *vars,
+ int count)
+{
+ int i;
+ bool bpp_increased[MAX_PIPES];
+ int initial_slack[MAX_PIPES];
+ int min_initial_slack;
+ int next_index;
+ int remaining_to_increase = 0;
+ int pbn_per_timeslot;
+ int link_timeslots_used;
+ int fair_pbn_alloc;
+
+ for (i = 0; i < count; i++) {
+ if (vars[i].dsc_enabled) {
+ initial_slack[i] = kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i].pbn;
+ bpp_increased[i] = false;
+ remaining_to_increase += 1;
+ } else {
+ initial_slack[i] = 0;
+ bpp_increased[i] = true;
+ }
+ }
+
+ pbn_per_timeslot = dc_link_bandwidth_kbps(dc_link,
+ dc_link_get_link_cap(dc_link)) / (8 * 1000 * 54);
+
+ while (remaining_to_increase) {
+ next_index = -1;
+ min_initial_slack = -1;
+ for (i = 0; i < count; i++) {
+ if (!bpp_increased[i]) {
+ if (min_initial_slack == -1 || min_initial_slack > initial_slack[i]) {
+ min_initial_slack = initial_slack[i];
+ next_index = i;
+ }
+ }
+ }
+
+ if (next_index == -1)
+ break;
+
+ link_timeslots_used = 0;
+
+ for (i = 0; i < count; i++)
+ link_timeslots_used += DIV_ROUND_UP(vars[i].pbn, pbn_per_timeslot);
+
+ fair_pbn_alloc = (63 - link_timeslots_used) / remaining_to_increase * pbn_per_timeslot;
+
+ if (initial_slack[next_index] > fair_pbn_alloc) {
+ vars[next_index].pbn += fair_pbn_alloc;
+ if (drm_dp_atomic_find_vcpi_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+ vars[next_index].pbn,
+ dm_mst_get_pbn_divider(dc_link)) < 0)
+ return;
+ if (!drm_dp_mst_atomic_check(state)) {
+ vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn);
+ } else {
+ vars[next_index].pbn -= fair_pbn_alloc;
+ if (drm_dp_atomic_find_vcpi_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+ vars[next_index].pbn,
+ dm_mst_get_pbn_divider(dc_link)) < 0)
+ return;
+ }
+ } else {
+ vars[next_index].pbn += initial_slack[next_index];
+ if (drm_dp_atomic_find_vcpi_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+ vars[next_index].pbn,
+ dm_mst_get_pbn_divider(dc_link)) < 0)
+ return;
+ if (!drm_dp_mst_atomic_check(state)) {
+ vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16;
+ } else {
+ vars[next_index].pbn -= initial_slack[next_index];
+ if (drm_dp_atomic_find_vcpi_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+ vars[next_index].pbn,
+ dm_mst_get_pbn_divider(dc_link)) < 0)
+ return;
+ }
+ }
+
+ bpp_increased[next_index] = true;
+ remaining_to_increase--;
+ }
+}
+
+static void try_disable_dsc(struct drm_atomic_state *state,
+ struct dc_link *dc_link,
+ struct dsc_mst_fairness_params *params,
+ struct dsc_mst_fairness_vars *vars,
+ int count)
+{
+ int i;
+ bool tried[MAX_PIPES];
+ int kbps_increase[MAX_PIPES];
+ int max_kbps_increase;
+ int next_index;
+ int remaining_to_try = 0;
+
+ for (i = 0; i < count; i++) {
+ if (vars[i].dsc_enabled && vars[i].bpp_x16 == params[i].bw_range.max_target_bpp_x16) {
+ kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps;
+ tried[i] = false;
+ remaining_to_try += 1;
+ } else {
+ kbps_increase[i] = 0;
+ tried[i] = true;
+ }
+ }
+
+ while (remaining_to_try) {
+ next_index = -1;
+ max_kbps_increase = -1;
+ for (i = 0; i < count; i++) {
+ if (!tried[i]) {
+ if (max_kbps_increase == -1 || max_kbps_increase < kbps_increase[i]) {
+ max_kbps_increase = kbps_increase[i];
+ next_index = i;
+ }
+ }
+ }
+
+ if (next_index == -1)
+ break;
+
+ vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
+ if (drm_dp_atomic_find_vcpi_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+ vars[next_index].pbn,
+ 0) < 0)
+ return;
+
+ if (!drm_dp_mst_atomic_check(state)) {
+ vars[next_index].dsc_enabled = false;
+ vars[next_index].bpp_x16 = 0;
+ } else {
+ vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps);
+ if (drm_dp_atomic_find_vcpi_slots(state,
+ params[next_index].port->mgr,
+ params[next_index].port,
+ vars[next_index].pbn,
+ dm_mst_get_pbn_divider(dc_link)) < 0)
+ return;
+ }
+
+ tried[next_index] = true;
+ remaining_to_try--;
+ }
+}
+
+static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
+ struct dc_state *dc_state,
+ struct dc_link *dc_link)
+{
+ int i;
+ struct dc_stream_state *stream;
+ struct dsc_mst_fairness_params params[MAX_PIPES];
+ struct dsc_mst_fairness_vars vars[MAX_PIPES];
+ struct amdgpu_dm_connector *aconnector;
+ int count = 0;
+
+ memset(params, 0, sizeof(params));
+
+ /* Set up params */
+ for (i = 0; i < dc_state->stream_count; i++) {
+ struct dc_dsc_policy dsc_policy = {0};
+
+ stream = dc_state->streams[i];
+
+ if (stream->link != dc_link)
+ continue;
+
+ stream->timing.flags.DSC = 0;
+
+ params[count].timing = &stream->timing;
+ params[count].sink = stream->sink;
+ aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+ params[count].port = aconnector->port;
+ params[count].compression_possible = stream->sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported;
+ dc_dsc_get_policy_for_timing(params[count].timing, &dsc_policy);
+ if (!dc_dsc_compute_bandwidth_range(
+ stream->sink->ctx->dc->res_pool->dscs[0],
+ stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
+ dsc_policy.min_target_bpp,
+ dsc_policy.max_target_bpp,
+ &stream->sink->sink_dsc_caps.dsc_dec_caps,
+ &stream->timing, &params[count].bw_range))
+ params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+
+ count++;
+ }
+ /* Try no compression */
+ for (i = 0; i < count; i++) {
+ vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
+ vars[i].dsc_enabled = false;
+ vars[i].bpp_x16 = 0;
+ if (drm_dp_atomic_find_vcpi_slots(state,
+ params[i].port->mgr,
+ params[i].port,
+ vars[i].pbn,
+ 0) < 0)
+ return false;
+ }
+ if (!drm_dp_mst_atomic_check(state)) {
+ set_dsc_configs_from_fairness_vars(params, vars, count);
+ return true;
+ }
+
+ /* Try max compression */
+ for (i = 0; i < count; i++) {
+ if (params[i].compression_possible) {
+ vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
+ vars[i].dsc_enabled = true;
+ vars[i].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
+ if (drm_dp_atomic_find_vcpi_slots(state,
+ params[i].port->mgr,
+ params[i].port,
+ vars[i].pbn,
+ dm_mst_get_pbn_divider(dc_link)) < 0)
+ return false;
+ } else {
+ vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
+ vars[i].dsc_enabled = false;
+ vars[i].bpp_x16 = 0;
+ if (drm_dp_atomic_find_vcpi_slots(state,
+ params[i].port->mgr,
+ params[i].port,
+ vars[i].pbn,
+ 0) < 0)
+ return false;
+ }
+ }
+ if (drm_dp_mst_atomic_check(state))
+ return false;
+
+ /* Optimize degree of compression */
+ increase_dsc_bpp(state, dc_link, params, vars, count);
+
+ try_disable_dsc(state, dc_link, params, vars, count);
+
+ set_dsc_configs_from_fairness_vars(params, vars, count);
+
+ return true;
+}
+
+bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
+ struct dc_state *dc_state)
+{
+ int i, j;
+ struct dc_stream_state *stream;
+ bool computed_streams[MAX_PIPES];
+ struct amdgpu_dm_connector *aconnector;
+
+ for (i = 0; i < dc_state->stream_count; i++)
+ computed_streams[i] = false;
+
+ for (i = 0; i < dc_state->stream_count; i++) {
+ stream = dc_state->streams[i];
+
+ if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST)
+ continue;
+
+ aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+
+ if (!aconnector || !aconnector->dc_sink)
+ continue;
+
+ if (!aconnector->dc_sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported)
+ continue;
+
+ if (computed_streams[i])
+ continue;
+
+ mutex_lock(&aconnector->mst_mgr.lock);
+ if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) {
+ mutex_unlock(&aconnector->mst_mgr.lock);
+ return false;
+ }
+ mutex_unlock(&aconnector->mst_mgr.lock);
+
+ for (j = 0; j < dc_state->stream_count; j++) {
+ if (dc_state->streams[j]->link == stream->link)
+ computed_streams[j] = true;
+ }
+ }
+
+ for (i = 0; i < dc_state->stream_count; i++) {
+ stream = dc_state->streams[i];
+
+ if (stream->timing.flags.DSC == 1)
+ dcn20_add_dsc_to_stream_resource(stream->ctx->dc, dc_state, stream);
+ }
+
+ return true;
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index 2da851b40042..d6813ce67bbd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -29,7 +29,14 @@
struct amdgpu_display_manager;
struct amdgpu_dm_connector;
+int dm_mst_get_pbn_divider(struct dc_link *link);
+
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
+ struct dc_state *dc_state);
+#endif
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index 2cb7a4288cb7..c4ba6e84db65 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -89,6 +89,10 @@ static enum bp_result encoder_control_digx_v1_5(
struct bios_parser *bp,
struct bp_encoder_control *cntl);
+static enum bp_result encoder_control_fallback(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl);
+
static void init_dig_encoder_control(struct bios_parser *bp)
{
uint32_t version =
@@ -100,7 +104,7 @@ static void init_dig_encoder_control(struct bios_parser *bp)
break;
default:
dm_output_to_console("Don't have dig_encoder_control for v%d\n", version);
- bp->cmd_tbl.dig_encoder_control = NULL;
+ bp->cmd_tbl.dig_encoder_control = encoder_control_fallback;
break;
}
}
@@ -184,6 +188,18 @@ static enum bp_result encoder_control_digx_v1_5(
return result;
}
+static enum bp_result encoder_control_fallback(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl)
+{
+ if (bp->base.ctx->dc->ctx->dmub_srv &&
+ bp->base.ctx->dc->debug.dmub_command_table) {
+ return encoder_control_digx_v1_5(bp, cntl);
+ }
+
+ return BP_RESULT_FAILURE;
+}
+
/*****************************************************************************
******************************************************************************
**
@@ -196,6 +212,10 @@ static enum bp_result transmitter_control_v1_6(
struct bios_parser *bp,
struct bp_transmitter_control *cntl);
+static enum bp_result transmitter_control_fallback(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl);
+
static void init_transmitter_control(struct bios_parser *bp)
{
uint8_t frev;
@@ -209,7 +229,7 @@ static void init_transmitter_control(struct bios_parser *bp)
break;
default:
dm_output_to_console("Don't have transmitter_control for v%d\n", crev);
- bp->cmd_tbl.transmitter_control = NULL;
+ bp->cmd_tbl.transmitter_control = transmitter_control_fallback;
break;
}
}
@@ -273,6 +293,18 @@ static enum bp_result transmitter_control_v1_6(
return result;
}
+static enum bp_result transmitter_control_fallback(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl)
+{
+ if (bp->base.ctx->dc->ctx->dmub_srv &&
+ bp->base.ctx->dc->debug.dmub_command_table) {
+ return transmitter_control_v1_6(bp, cntl);
+ }
+
+ return BP_RESULT_FAILURE;
+}
+
/******************************************************************************
******************************************************************************
**
@@ -285,6 +317,10 @@ static enum bp_result set_pixel_clock_v7(
struct bios_parser *bp,
struct bp_pixel_clock_parameters *bp_params);
+static enum bp_result set_pixel_clock_fallback(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params);
+
static void init_set_pixel_clock(struct bios_parser *bp)
{
switch (BIOS_CMD_TABLE_PARA_REVISION(setpixelclock)) {
@@ -294,7 +330,7 @@ static void init_set_pixel_clock(struct bios_parser *bp)
default:
dm_output_to_console("Don't have set_pixel_clock for v%d\n",
BIOS_CMD_TABLE_PARA_REVISION(setpixelclock));
- bp->cmd_tbl.set_pixel_clock = NULL;
+ bp->cmd_tbl.set_pixel_clock = set_pixel_clock_fallback;
break;
}
}
@@ -400,6 +436,18 @@ static enum bp_result set_pixel_clock_v7(
return result;
}
+static enum bp_result set_pixel_clock_fallback(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params)
+{
+ if (bp->base.ctx->dc->ctx->dmub_srv &&
+ bp->base.ctx->dc->debug.dmub_command_table) {
+ return set_pixel_clock_v7(bp, bp_params);
+ }
+
+ return BP_RESULT_FAILURE;
+}
+
/******************************************************************************
******************************************************************************
**
@@ -632,6 +680,11 @@ static enum bp_result enable_disp_power_gating_v2_1(
enum controller_id crtc_id,
enum bp_pipe_control_action action);
+static enum bp_result enable_disp_power_gating_fallback(
+ struct bios_parser *bp,
+ enum controller_id crtc_id,
+ enum bp_pipe_control_action action);
+
static void init_enable_disp_power_gating(
struct bios_parser *bp)
{
@@ -643,7 +696,7 @@ static void init_enable_disp_power_gating(
default:
dm_output_to_console("Don't enable_disp_power_gating enable_crtc for v%d\n",
BIOS_CMD_TABLE_PARA_REVISION(enabledisppowergating));
- bp->cmd_tbl.enable_disp_power_gating = NULL;
+ bp->cmd_tbl.enable_disp_power_gating = enable_disp_power_gating_fallback;
break;
}
}
@@ -695,6 +748,19 @@ static enum bp_result enable_disp_power_gating_v2_1(
return result;
}
+static enum bp_result enable_disp_power_gating_fallback(
+ struct bios_parser *bp,
+ enum controller_id crtc_id,
+ enum bp_pipe_control_action action)
+{
+ if (bp->base.ctx->dc->ctx->dmub_srv &&
+ bp->base.ctx->dc->debug.dmub_command_table) {
+ return enable_disp_power_gating_v2_1(bp, crtc_id, action);
+ }
+
+ return BP_RESULT_FAILURE;
+}
+
/******************************************************************************
*******************************************************************************
**
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
index d0714a3d63c8..4674aca8f206 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
@@ -1,5 +1,6 @@
#
# Copyright 2017 Advanced Micro Devices, Inc.
+# Copyright 2019 Raptor Engineering, LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
@@ -24,7 +25,13 @@
# It calculates Bandwidth and Watermarks values for HW programming
#
+ifdef CONFIG_X86
calcs_ccflags := -mhard-float -msse
+endif
+
+ifdef CONFIG_PPC64
+calcs_ccflags := -mhard-float -maltivec
+endif
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
@@ -32,6 +39,7 @@ IS_OLD_GCC = 1
endif
endif
+ifdef CONFIG_X86
ifdef IS_OLD_GCC
# Stack alignment mismatch, proceed with caution.
# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
@@ -40,6 +48,7 @@ calcs_ccflags += -mpreferred-stack-boundary=4
else
calcs_ccflags += -msse2
endif
+endif
CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calcs.o := $(calcs_ccflags)
CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calc_auto.o := $(calcs_ccflags)
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index a1d49256fab7..5d081c42e81b 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -154,14 +154,14 @@ static void calculate_bandwidth(
- if (data->d0_underlay_mode == bw_def_none) { d0_underlay_enable = 0; }
- else {
- d0_underlay_enable = 1;
- }
- if (data->d1_underlay_mode == bw_def_none) { d1_underlay_enable = 0; }
- else {
- d1_underlay_enable = 1;
- }
+ if (data->d0_underlay_mode == bw_def_none)
+ d0_underlay_enable = false;
+ else
+ d0_underlay_enable = true;
+ if (data->d1_underlay_mode == bw_def_none)
+ d1_underlay_enable = false;
+ else
+ d1_underlay_enable = true;
data->number_of_underlay_surfaces = d0_underlay_enable + d1_underlay_enable;
switch (data->underlay_surface_type) {
case bw_def_420:
@@ -286,8 +286,8 @@ static void calculate_bandwidth(
data->cursor_width_pixels[2] = bw_int_to_fixed(0);
data->cursor_width_pixels[3] = bw_int_to_fixed(0);
/* graphics surface parameters from spreadsheet*/
- fbc_enabled = 0;
- lpt_enabled = 0;
+ fbc_enabled = false;
+ lpt_enabled = false;
for (i = 4; i <= maximum_number_of_surfaces - 3; i++) {
if (i < data->number_of_displays + 4) {
if (i == 4 && data->d0_underlay_mode == bw_def_underlay_only) {
@@ -338,9 +338,9 @@ static void calculate_bandwidth(
data->access_one_channel_only[i] = 0;
}
if (data->fbc_en[i] == 1) {
- fbc_enabled = 1;
+ fbc_enabled = true;
if (data->lpt_en[i] == 1) {
- lpt_enabled = 1;
+ lpt_enabled = true;
}
}
data->cursor_width_pixels[i] = bw_int_to_fixed(vbios->cursor_width);
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index a4ddd657598f..1a37550731de 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -1,5 +1,6 @@
/*
* Copyright 2017 Advanced Micro Devices, Inc.
+ * Copyright 2019 Raptor Engineering, LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -622,7 +623,7 @@ static bool dcn_bw_apply_registry_override(struct dc *dc)
{
bool updated = false;
- kernel_fpu_begin();
+ DC_FP_START();
if ((int)(dc->dcn_soc->sr_exit_time * 1000) != dc->debug.sr_exit_time_ns
&& dc->debug.sr_exit_time_ns) {
updated = true;
@@ -658,7 +659,7 @@ static bool dcn_bw_apply_registry_override(struct dc *dc)
dc->dcn_soc->dram_clock_change_latency =
dc->debug.dram_clock_change_latency_ns / 1000.0;
}
- kernel_fpu_end();
+ DC_FP_END();
return updated;
}
@@ -704,8 +705,8 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
unsigned int get_highest_allowed_voltage_level(uint32_t hw_internal_rev)
{
- /* for dali, the highest voltage level we want is 0 */
- if (ASICREV_IS_DALI(hw_internal_rev))
+ /* for dali & pollock, the highest voltage level we want is 0 */
+ if (ASICREV_IS_POLLOCK(hw_internal_rev) || ASICREV_IS_DALI(hw_internal_rev))
return 0;
/* we are ok with all levels */
@@ -738,7 +739,7 @@ bool dcn_validate_bandwidth(
dcn_bw_sync_calcs_and_dml(dc);
memset(v, 0, sizeof(*v));
- kernel_fpu_begin();
+ DC_FP_START();
v->sr_exit_time = dc->dcn_soc->sr_exit_time;
v->sr_enter_plus_exit_time = dc->dcn_soc->sr_enter_plus_exit_time;
@@ -1271,7 +1272,7 @@ bool dcn_validate_bandwidth(
bw_limit = dc->dcn_soc->percent_disp_bw_limit * v->fabric_and_dram_bandwidth_vmax0p9;
bw_limit_pass = (v->total_data_read_bandwidth / 1000.0) < bw_limit;
- kernel_fpu_end();
+ DC_FP_END();
PERFORMANCE_TRACE_END();
BW_VAL_TRACE_FINISH();
@@ -1434,37 +1435,49 @@ void dcn_bw_update_from_pplib(struct dc *dc)
struct dc_context *ctx = dc->ctx;
struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0};
bool res;
+ unsigned vmin0p65_idx, vmid0p72_idx, vnom0p8_idx, vmax0p9_idx;
/* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */
res = dm_pp_get_clock_levels_by_type_with_voltage(
ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks);
- kernel_fpu_begin();
+ DC_FP_START();
if (res)
res = verify_clock_values(&fclks);
if (res) {
- ASSERT(fclks.num_levels >= 3);
- dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 32 * (fclks.data[0].clocks_in_khz / 1000.0) / 1000.0;
- dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = dc->dcn_soc->number_of_channels *
- (fclks.data[fclks.num_levels - (fclks.num_levels > 2 ? 3 : 2)].clocks_in_khz / 1000.0)
- * ddr4_dram_factor_single_Channel / 1000.0;
- dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = dc->dcn_soc->number_of_channels *
- (fclks.data[fclks.num_levels - 2].clocks_in_khz / 1000.0)
- * ddr4_dram_factor_single_Channel / 1000.0;
- dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = dc->dcn_soc->number_of_channels *
- (fclks.data[fclks.num_levels - 1].clocks_in_khz / 1000.0)
- * ddr4_dram_factor_single_Channel / 1000.0;
+ ASSERT(fclks.num_levels);
+
+ vmin0p65_idx = 0;
+ vmid0p72_idx = fclks.num_levels -
+ (fclks.num_levels > 2 ? 3 : (fclks.num_levels > 1 ? 2 : 1));
+ vnom0p8_idx = fclks.num_levels - (fclks.num_levels > 1 ? 2 : 1);
+ vmax0p9_idx = fclks.num_levels - 1;
+
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 =
+ 32 * (fclks.data[vmin0p65_idx].clocks_in_khz / 1000.0) / 1000.0;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 =
+ dc->dcn_soc->number_of_channels *
+ (fclks.data[vmid0p72_idx].clocks_in_khz / 1000.0)
+ * ddr4_dram_factor_single_Channel / 1000.0;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 =
+ dc->dcn_soc->number_of_channels *
+ (fclks.data[vnom0p8_idx].clocks_in_khz / 1000.0)
+ * ddr4_dram_factor_single_Channel / 1000.0;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 =
+ dc->dcn_soc->number_of_channels *
+ (fclks.data[vmax0p9_idx].clocks_in_khz / 1000.0)
+ * ddr4_dram_factor_single_Channel / 1000.0;
} else
BREAK_TO_DEBUGGER();
- kernel_fpu_end();
+ DC_FP_END();
res = dm_pp_get_clock_levels_by_type_with_voltage(
ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks);
- kernel_fpu_begin();
+ DC_FP_START();
if (res)
res = verify_clock_values(&dcfclks);
@@ -1477,7 +1490,7 @@ void dcn_bw_update_from_pplib(struct dc *dc)
} else
BREAK_TO_DEBUGGER();
- kernel_fpu_end();
+ DC_FP_END();
}
void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
@@ -1492,11 +1505,11 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
if (!pp || !pp->set_wm_ranges)
return;
- kernel_fpu_begin();
+ DC_FP_START();
min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32;
min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000;
socclk_khz = dc->dcn_soc->socclk * 1000;
- kernel_fpu_end();
+ DC_FP_END();
/* Now notify PPLib/SMU about which Watermarks sets they should select
* depending on DPM state they are in. And update BW MGR GFX Engine and
@@ -1547,7 +1560,7 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
void dcn_bw_sync_calcs_and_dml(struct dc *dc)
{
- kernel_fpu_begin();
+ DC_FP_START();
DC_LOG_BANDWIDTH_CALCS("sr_exit_time: %f ns\n"
"sr_enter_plus_exit_time: %f ns\n"
"urgent_latency: %f ns\n"
@@ -1736,5 +1749,5 @@ void dcn_bw_sync_calcs_and_dml(struct dc *dc)
dc->dml.ip.bug_forcing_LC_req_same_size_fixed =
dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes;
dc->dml.ip.dcfclk_cstate_latency = dc->dcn_ip->dcfclk_cstate_latency;
- kernel_fpu_end();
+ DC_FP_END();
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
index 3cd283195091..c0f6a8c7de7d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
@@ -87,6 +87,12 @@ AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN20)
###############################################################################
CLK_MGR_DCN21 = rn_clk_mgr.o rn_clk_mgr_vbios_smu.o
+# prevent build errors regarding soft-float vs hard-float FP ABI tags
+# this code is currently unused on ppc64, as it applies to Renoir APUs only
+ifdef CONFIG_PPC64
+CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn21/rn_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
+endif
+
AMD_DAL_CLK_MGR_DCN21 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn21/,$(CLK_MGR_DCN21))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 6d60ef822619..a78e5c74c79c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -134,13 +134,13 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
#if defined(CONFIG_DRM_AMD_DC_DCN)
case FAMILY_RV:
- if (ASICREV_IS_DALI(asic_id.hw_internal_rev)) {
+ if (ASICREV_IS_DALI(asic_id.hw_internal_rev) ||
+ ASICREV_IS_POLLOCK(asic_id.hw_internal_rev)) {
/* TEMP: this check has to come before ASICREV_IS_RENOIR */
- /* which also incorrectly returns true for Dali */
+ /* which also incorrectly returns true for Dali/Pollock*/
rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu);
break;
}
-
if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev)) {
rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
break;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 25d7b7c6681c..49ce46b543ea 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -27,6 +27,7 @@
#include "clk_mgr_internal.h"
#include "dce100/dce_clk_mgr.h"
+#include "dcn20_clk_mgr.h"
#include "reg_helper.h"
#include "core_types.h"
#include "dm_helpers.h"
@@ -100,13 +101,13 @@ uint32_t dentist_get_did_from_divider(int divider)
}
void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
- struct dc_state *context)
+ struct dc_state *context, bool safe_to_lower)
{
int i;
clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz;
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
- int dpp_inst, dppclk_khz;
+ int dpp_inst, dppclk_khz, prev_dppclk_khz;
/* Loop index will match dpp->inst if resource exists,
* and we want to avoid dependency on dpp object
@@ -114,8 +115,12 @@ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
dpp_inst = i;
dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
- clk_mgr->dccg->funcs->update_dpp_dto(
- clk_mgr->dccg, dpp_inst, dppclk_khz);
+ prev_dppclk_khz = clk_mgr->base.ctx->dc->current_state->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
+
+ if ((prev_dppclk_khz > dppclk_khz && safe_to_lower) || prev_dppclk_khz < dppclk_khz) {
+ clk_mgr->dccg->funcs->update_dpp_dto(
+ clk_mgr->dccg, dpp_inst, dppclk_khz);
+ }
}
}
@@ -161,6 +166,9 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
dc->debug.force_clock_mode & 0x1) {
//this is from resume or boot up, if forced_clock cfg option used, we bypass program dispclk and DPPCLK, but need set them for S3.
force_reset = true;
+
+ dcn2_read_clocks_from_hw_dentist(clk_mgr_base);
+
//force_clock_mode 0x1: force reset the clock even it is the same clock as long as it is in Passive level.
}
display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
@@ -240,7 +248,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
if (dpp_clock_lowered) {
// if clock is being lowered, increase DTO before lowering refclk
- dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
dcn20_update_clocks_update_dentist(clk_mgr);
} else {
// if clock is being raised, increase refclk before lowering DTO
@@ -248,7 +256,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
dcn20_update_clocks_update_dentist(clk_mgr);
// always update dtos unless clock is lowered and not safe to lower
if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
- dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
}
}
@@ -339,6 +347,32 @@ void dcn2_enable_pme_wa(struct clk_mgr *clk_mgr_base)
}
}
+
+void dcn2_read_clocks_from_hw_dentist(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ uint32_t dispclk_wdivider;
+ uint32_t dppclk_wdivider;
+ int disp_divider;
+ int dpp_divider;
+
+ REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, &dispclk_wdivider);
+ REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_WDIVIDER, &dppclk_wdivider);
+
+ disp_divider = dentist_get_divider_from_did(dispclk_wdivider);
+ dpp_divider = dentist_get_divider_from_did(dispclk_wdivider);
+
+ if (disp_divider && dpp_divider) {
+ /* Calculate the current DFS clock, in kHz.*/
+ clk_mgr_base->clks.dispclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+ * clk_mgr->base.dentist_vco_freq_khz) / disp_divider;
+
+ clk_mgr_base->clks.dppclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+ * clk_mgr->base.dentist_vco_freq_khz) / dpp_divider;
+ }
+
+}
+
void dcn2_get_clock(struct clk_mgr *clk_mgr,
struct dc_state *context,
enum dc_clock_type clock_type,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h
index c9fd824f3c23..0b9c045b0c8e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h
@@ -34,7 +34,7 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr,
struct dc_state *context,
bool safe_to_lower);
void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
- struct dc_state *context);
+ struct dc_state *context, bool safe_to_lower);
void dcn2_init_clocks(struct clk_mgr *clk_mgr);
@@ -51,4 +51,8 @@ void dcn2_get_clock(struct clk_mgr *clk_mgr,
struct dc_clock_config *clock_cfg);
void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr);
+
+void dcn2_read_clocks_from_hw_dentist(struct clk_mgr *clk_mgr_base);
+
+
#endif //__DCN20_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index de51ef12e33a..9ef3f7b91a1d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -59,14 +59,16 @@ int rn_get_active_display_cnt_wa(
struct dc_state *context)
{
int i, display_count;
- bool hdmi_present = false;
+ bool tmds_present = false;
display_count = 0;
for (i = 0; i < context->stream_count; i++) {
const struct dc_stream_state *stream = context->streams[i];
- if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
- hdmi_present = true;
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A ||
+ stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
+ stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
+ tmds_present = true;
}
for (i = 0; i < dc->link_count; i++) {
@@ -85,7 +87,7 @@ int rn_get_active_display_cnt_wa(
}
/* WA for hang on HDMI after display off back back on*/
- if (display_count == 0 && hdmi_present)
+ if (display_count == 0 && tmds_present)
display_count = 1;
return display_count;
@@ -149,6 +151,12 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
rn_vbios_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz);
}
+ // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
+ if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
+ if (new_clocks->dppclk_khz < 100000)
+ new_clocks->dppclk_khz = 100000;
+ }
+
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
dpp_clock_lowered = true;
@@ -164,16 +172,16 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (dpp_clock_lowered) {
- // if clock is being lowered, increase DTO before lowering refclk
- dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
+ // increase per DPP DTO before lowering global dppclk
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
} else {
- // if clock is being raised, increase refclk before lowering DTO
+ // increase global DPPCLK before lowering per DPP DTO
if (update_dppclk || update_dispclk)
rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
// always update dtos unless clock is lowered and not safe to lower
if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
- dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
}
if (update_dispclk &&
@@ -409,20 +417,20 @@ void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_ra
continue;
ranges->reader_wm_sets[num_valid_sets].wm_inst = bw_params->wm_table.entries[i].wm_inst;
- ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type;;
- /* We will not select WM based on dcfclk, so leave it as unconstrained */
- ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
- ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
- /* fclk wil be used to select WM*/
+ ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type;
+ /* We will not select WM based on fclk, so leave it as unconstrained */
+ ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+ ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+ /* dcfclk wil be used to select WM*/
if (ranges->reader_wm_sets[num_valid_sets].wm_type == WM_TYPE_PSTATE_CHG) {
if (i == 0)
- ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = 0;
+ ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = 0;
else {
/* add 1 to make it non-overlapping with next lvl */
- ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = bw_params->clk_table.entries[i - 1].fclk_mhz + 1;
+ ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = bw_params->clk_table.entries[i - 1].dcfclk_mhz + 1;
}
- ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
+ ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
} else {
/* unconstrained for memory retraining */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 39fe38cb39b6..04441dbcba76 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -66,6 +66,9 @@
#include "dce/dce_i2c.h"
+#define CTX \
+ dc->ctx
+
#define DC_LOGGER \
dc->ctx->logger
@@ -284,7 +287,6 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->stream == stream && pipe->stream_res.tg) {
- pipe->stream->adjust = *adjust;
dc->hwss.set_drr(&pipe,
1,
adjust->v_total_min,
@@ -508,10 +510,10 @@ bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
return ret;
}
-void dc_stream_set_static_screen_events(struct dc *dc,
+void dc_stream_set_static_screen_params(struct dc *dc,
struct dc_stream_state **streams,
int num_streams,
- const struct dc_static_screen_events *events)
+ const struct dc_static_screen_params *params)
{
int i = 0;
int j = 0;
@@ -530,7 +532,7 @@ void dc_stream_set_static_screen_events(struct dc *dc,
}
}
- dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
+ dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params);
}
static void dc_destruct(struct dc *dc)
@@ -579,6 +581,40 @@ static void dc_destruct(struct dc *dc)
}
+static bool dc_construct_ctx(struct dc *dc,
+ const struct dc_init_data *init_params)
+{
+ struct dc_context *dc_ctx;
+ enum dce_version dc_version = DCE_VERSION_UNKNOWN;
+
+ dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
+ if (!dc_ctx)
+ return false;
+
+ dc_ctx->cgs_device = init_params->cgs_device;
+ dc_ctx->driver_context = init_params->driver;
+ dc_ctx->dc = dc;
+ dc_ctx->asic_id = init_params->asic_id;
+ dc_ctx->dc_sink_id_count = 0;
+ dc_ctx->dc_stream_id_count = 0;
+ dc_ctx->dce_environment = init_params->dce_environment;
+
+ /* Create logger */
+
+ dc_version = resource_parse_asic_id(init_params->asic_id);
+ dc_ctx->dce_version = dc_version;
+
+ dc_ctx->perf_trace = dc_perf_trace_create();
+ if (!dc_ctx->perf_trace) {
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+
+ dc->ctx = dc_ctx;
+
+ return true;
+}
+
static bool dc_construct(struct dc *dc,
const struct dc_init_data *init_params)
{
@@ -590,7 +626,6 @@ static bool dc_construct(struct dc *dc,
struct dcn_ip_params *dcn_ip;
#endif
- enum dce_version dc_version = DCE_VERSION_UNKNOWN;
dc->config = init_params->flags;
// Allocate memory for the vm_helper
@@ -636,26 +671,12 @@ static bool dc_construct(struct dc *dc,
dc->soc_bounding_box = init_params->soc_bounding_box;
#endif
- dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
- if (!dc_ctx) {
+ if (!dc_construct_ctx(dc, init_params)) {
dm_error("%s: failed to create ctx\n", __func__);
goto fail;
}
- dc_ctx->cgs_device = init_params->cgs_device;
- dc_ctx->driver_context = init_params->driver;
- dc_ctx->dc = dc;
- dc_ctx->asic_id = init_params->asic_id;
- dc_ctx->dc_sink_id_count = 0;
- dc_ctx->dc_stream_id_count = 0;
- dc->ctx = dc_ctx;
-
- /* Create logger */
-
- dc_ctx->dce_environment = init_params->dce_environment;
-
- dc_version = resource_parse_asic_id(init_params->asic_id);
- dc_ctx->dce_version = dc_version;
+ dc_ctx = dc->ctx;
/* Resource should construct all asic specific resources.
* This should be the only place where we need to parse the asic id
@@ -670,7 +691,7 @@ static bool dc_construct(struct dc *dc,
bp_init_data.bios = init_params->asic_id.atombios_base_address;
dc_ctx->dc_bios = dal_bios_parser_create(
- &bp_init_data, dc_version);
+ &bp_init_data, dc_ctx->dce_version);
if (!dc_ctx->dc_bios) {
ASSERT_CRITICAL(false);
@@ -678,17 +699,13 @@ static bool dc_construct(struct dc *dc,
}
dc_ctx->created_bios = true;
- }
-
- dc_ctx->perf_trace = dc_perf_trace_create();
- if (!dc_ctx->perf_trace) {
- ASSERT_CRITICAL(false);
- goto fail;
}
+
+
/* Create GPIO service */
dc_ctx->gpio_service = dal_gpio_service_create(
- dc_version,
+ dc_ctx->dce_version,
dc_ctx->dce_environment,
dc_ctx);
@@ -697,7 +714,7 @@ static bool dc_construct(struct dc *dc,
goto fail;
}
- dc->res_pool = dc_create_resource_pool(dc, init_params, dc_version);
+ dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
if (!dc->res_pool)
goto fail;
@@ -728,8 +745,6 @@ static bool dc_construct(struct dc *dc,
return true;
fail:
-
- dc_destruct(dc);
return false;
}
@@ -783,6 +798,33 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
dc_release_state(current_ctx);
}
+static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
+{
+ int i;
+ int count = 0;
+ struct pipe_ctx *pipe;
+ PERF_TRACE();
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->plane_state)
+ continue;
+
+ /* Timeout 100 ms */
+ while (count < 100000) {
+ /* Must set to false to start with, due to OR in update function */
+ pipe->plane_state->status.is_flip_pending = false;
+ dc->hwss.update_pending_status(pipe);
+ if (!pipe->plane_state->status.is_flip_pending)
+ break;
+ udelay(1);
+ count++;
+ }
+ ASSERT(!pipe->plane_state->status.is_flip_pending);
+ }
+ PERF_TRACE();
+}
+
/*******************************************************************************
* Public functions
******************************************************************************/
@@ -795,28 +837,38 @@ struct dc *dc_create(const struct dc_init_data *init_params)
if (NULL == dc)
goto alloc_fail;
- if (false == dc_construct(dc, init_params))
- goto construct_fail;
+ if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
+ if (false == dc_construct_ctx(dc, init_params)) {
+ dc_destruct(dc);
+ goto construct_fail;
+ }
+ } else {
+ if (false == dc_construct(dc, init_params)) {
+ dc_destruct(dc);
+ goto construct_fail;
+ }
+
+ full_pipe_count = dc->res_pool->pipe_count;
+ if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
+ full_pipe_count--;
+ dc->caps.max_streams = min(
+ full_pipe_count,
+ dc->res_pool->stream_enc_count);
- full_pipe_count = dc->res_pool->pipe_count;
- if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
- full_pipe_count--;
- dc->caps.max_streams = min(
- full_pipe_count,
- dc->res_pool->stream_enc_count);
+ dc->optimize_seamless_boot_streams = 0;
+ dc->caps.max_links = dc->link_count;
+ dc->caps.max_audios = dc->res_pool->audio_count;
+ dc->caps.linear_pitch_alignment = 64;
- dc->caps.max_links = dc->link_count;
- dc->caps.max_audios = dc->res_pool->audio_count;
- dc->caps.linear_pitch_alignment = 64;
+ dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
- dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
+ if (dc->res_pool->dmcu != NULL)
+ dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
+ }
/* Populate versioning information */
dc->versions.dc_ver = DC_VER;
- if (dc->res_pool->dmcu != NULL)
- dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
-
dc->build_id = DC_BUILD_ID;
DC_LOG_DC("Display Core initialized\n");
@@ -834,7 +886,8 @@ alloc_fail:
void dc_hardware_init(struct dc *dc)
{
- dc->hwss.init_hw(dc);
+ if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
+ dc->hwss.init_hw(dc);
}
void dc_init_callbacks(struct dc *dc,
@@ -1148,10 +1201,10 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
for (i = 0; i < context->stream_count; i++) {
if (context->streams[i]->apply_seamless_boot_optimization)
- dc->optimize_seamless_boot = true;
+ dc->optimize_seamless_boot_streams++;
}
- if (!dc->optimize_seamless_boot)
+ if (dc->optimize_seamless_boot_streams == 0)
dc->hwss.prepare_bandwidth(dc, context);
/* re-program planes for existing stream, in case we need to
@@ -1224,9 +1277,12 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc_enable_stereo(dc, context, dc_streams, context->stream_count);
- if (!dc->optimize_seamless_boot)
- /* pplib is notified if disp_num changed */
- dc->hwss.optimize_bandwidth(dc, context);
+ if (dc->optimize_seamless_boot_streams == 0) {
+ /* Must wait for no flips to be pending before doing optimize bw */
+ wait_for_no_pipes_pending(dc, context);
+ /* pplib is notified if disp_num changed */
+ dc->hwss.optimize_bandwidth(dc, context);
+ }
for (i = 0; i < context->stream_count; i++)
context->streams[i]->mode_changed = false;
@@ -1262,12 +1318,18 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
return (result == DC_OK);
}
+bool dc_is_hw_initialized(struct dc *dc)
+{
+ struct dc_bios *dcb = dc->ctx->dc_bios;
+ return dcb->funcs->is_accelerated_mode(dcb);
+}
+
bool dc_post_update_surfaces_to_stream(struct dc *dc)
{
int i;
struct dc_state *context = dc->current_state;
- if (!dc->optimized_required || dc->optimize_seamless_boot)
+ if (!dc->optimized_required || dc->optimize_seamless_boot_streams > 0)
return true;
post_surface_trace(dc);
@@ -1543,7 +1605,7 @@ static enum surface_update_type get_scaling_info_update_type(
update_flags->bits.scaling_change = 1;
if (u->scaling_info->src_rect.width > u->surface->src_rect.width
- && u->scaling_info->src_rect.height > u->surface->src_rect.height)
+ || u->scaling_info->src_rect.height > u->surface->src_rect.height)
/* Making src rect bigger requires a bandwidth change */
update_flags->bits.clock_change = 1;
}
@@ -1557,11 +1619,11 @@ static enum surface_update_type get_scaling_info_update_type(
update_flags->bits.position_change = 1;
if (update_flags->bits.clock_change
- || update_flags->bits.bandwidth_change)
+ || update_flags->bits.bandwidth_change
+ || update_flags->bits.scaling_change)
return UPDATE_TYPE_FULL;
- if (update_flags->bits.scaling_change
- || update_flags->bits.position_change)
+ if (update_flags->bits.position_change)
return UPDATE_TYPE_MED;
return UPDATE_TYPE_FAST;
@@ -2051,7 +2113,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
dc->hwss.optimize_bandwidth(dc, dc->current_state);
} else {
- if (!dc->optimize_seamless_boot)
+ if (dc->optimize_seamless_boot_streams == 0)
dc->hwss.prepare_bandwidth(dc, dc->current_state);
core_link_enable_stream(dc->current_state, pipe_ctx);
@@ -2092,7 +2154,7 @@ static void commit_planes_for_stream(struct dc *dc,
int i, j;
struct pipe_ctx *top_pipe_to_program = NULL;
- if (dc->optimize_seamless_boot && surface_count > 0) {
+ if (dc->optimize_seamless_boot_streams > 0 && surface_count > 0) {
/* Optimize seamless boot flag keeps clocks and watermarks high until
* first flip. After first flip, optimization is required to lower
* bandwidth. Important to note that it is expected UEFI will
@@ -2101,12 +2163,14 @@ static void commit_planes_for_stream(struct dc *dc,
*/
if (stream->apply_seamless_boot_optimization) {
stream->apply_seamless_boot_optimization = false;
- dc->optimize_seamless_boot = false;
- dc->optimized_required = true;
+ dc->optimize_seamless_boot_streams--;
+
+ if (dc->optimize_seamless_boot_streams == 0)
+ dc->optimized_required = true;
}
}
- if (update_type == UPDATE_TYPE_FULL && !dc->optimize_seamless_boot) {
+ if (update_type == UPDATE_TYPE_FULL && dc->optimize_seamless_boot_streams == 0) {
dc->hwss.prepare_bandwidth(dc, context);
context_clock_trace(dc, context);
}
@@ -2398,12 +2462,7 @@ void dc_set_power_state(
enum dc_acpi_cm_power_state power_state)
{
struct kref refcount;
- struct display_mode_lib *dml = kzalloc(sizeof(struct display_mode_lib),
- GFP_KERNEL);
-
- ASSERT(dml);
- if (!dml)
- return;
+ struct display_mode_lib *dml;
switch (power_state) {
case DC_ACPI_CM_POWER_STATE_D0:
@@ -2426,6 +2485,12 @@ void dc_set_power_state(
* clean state, and dc hw programming optimizations will not
* cause any trouble.
*/
+ dml = kzalloc(sizeof(struct display_mode_lib),
+ GFP_KERNEL);
+
+ ASSERT(dml);
+ if (!dml)
+ return;
/* Preserve refcount */
refcount = dc->current_state->refcount;
@@ -2439,10 +2504,10 @@ void dc_set_power_state(
dc->current_state->refcount = refcount;
dc->current_state->bw_ctx.dml = *dml;
+ kfree(dml);
+
break;
}
-
- kfree(dml);
}
void dc_resume(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index cef8c1ba9797..a09119c10d7c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -45,6 +45,7 @@
#include "dpcd_defs.h"
#include "dmcu.h"
#include "hw/clk_mgr.h"
+#include "../dce/dmub_psr.h"
#define DC_LOGGER_INIT(logger)
@@ -817,8 +818,8 @@ static bool dc_link_detect_helper(struct dc_link *link,
}
case SIGNAL_TYPE_EDP: {
- read_current_link_settings_on_detect(link);
detect_edp_sink_caps(link);
+ read_current_link_settings_on_detect(link);
sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
sink_caps.signal = SIGNAL_TYPE_EDP;
break;
@@ -850,18 +851,12 @@ static bool dc_link_detect_helper(struct dc_link *link,
if (memcmp(&link->dpcd_caps, &prev_dpcd_caps, sizeof(struct dpcd_caps)))
same_dpcd = false;
}
- /* Active dongle plug in without display or downstream unplug*/
+ /* Active dongle downstream unplug*/
if (link->type == dc_connection_active_dongle &&
link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) {
- if (prev_sink != NULL) {
+ if (prev_sink != NULL)
/* Downstream unplug */
dc_sink_release(prev_sink);
- } else {
- /* Empty dongle plug in */
- dp_verify_link_cap_with_retries(link,
- &link->reported_link_cap,
- LINK_TRAINING_MAX_VERIFY_RETRY);
- }
return true;
}
@@ -968,8 +963,7 @@ static bool dc_link_detect_helper(struct dc_link *link,
same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid);
if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
- sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX &&
- reason != DETECT_REASON_HPDRX) {
+ sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
/*
* TODO debug why Dell 2413 doesn't like
* two link trainings
@@ -2404,10 +2398,11 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool
{
struct dc *dc = link->ctx->dc;
struct dmcu *dmcu = dc->res_pool->dmcu;
+ struct dmub_psr *psr = dc->res_pool->psr;
-
-
- if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled)
+ if ((psr != NULL) && link->psr_feature_enabled)
+ psr->funcs->set_psr_enable(psr, allow_active);
+ else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled)
dmcu->funcs->set_psr_enable(dmcu, allow_active, wait);
link->psr_allow_active = allow_active;
@@ -2419,8 +2414,11 @@ bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state)
{
struct dc *dc = link->ctx->dc;
struct dmcu *dmcu = dc->res_pool->dmcu;
+ struct dmub_psr *psr = dc->res_pool->psr;
- if (dmcu != NULL && link->psr_feature_enabled)
+ if (psr != NULL && link->psr_feature_enabled)
+ psr->funcs->get_psr_state(psr_state);
+ else if (dmcu != NULL && link->psr_feature_enabled)
dmcu->funcs->get_psr_state(dmcu, psr_state);
return true;
@@ -2467,6 +2465,7 @@ bool dc_link_setup_psr(struct dc_link *link,
{
struct dc *dc;
struct dmcu *dmcu;
+ struct dmub_psr *psr;
int i;
/* updateSinkPsrDpcdConfig*/
union dpcd_psr_configuration psr_configuration;
@@ -2478,8 +2477,9 @@ bool dc_link_setup_psr(struct dc_link *link,
dc = link->ctx->dc;
dmcu = dc->res_pool->dmcu;
+ psr = dc->res_pool->psr;
- if (!dmcu)
+ if (!dmcu && !psr)
return false;
@@ -2535,7 +2535,7 @@ bool dc_link_setup_psr(struct dc_link *link,
transmitter_to_phy_id(link->link_enc->transmitter);
psr_context->crtcTimingVerticalTotal = stream->timing.v_total;
- psr_context->vsyncRateHz = div64_u64(div64_u64((stream->
+ psr_context->vsync_rate_hz = div64_u64(div64_u64((stream->
timing.pix_clk_100hz * 100),
stream->timing.v_total),
stream->timing.h_total);
@@ -2588,7 +2588,10 @@ bool dc_link_setup_psr(struct dc_link *link,
*/
psr_context->frame_delay = 0;
- link->psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context);
+ if (psr)
+ link->psr_feature_enabled = psr->funcs->setup_psr(psr, link, psr_context);
+ else
+ link->psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context);
/* psr_enabled == 0 indicates setup_psr did not succeed, but this
* should not happen since firmware should be running at this point
@@ -2863,6 +2866,52 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
return DC_OK;
}
+
+enum dc_status dc_link_reallocate_mst_payload(struct dc_link *link)
+{
+ int i;
+ struct pipe_ctx *pipe_ctx;
+
+ // Clear all of MST payload then reallocate
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+
+ /* driver enable split pipe for external monitors
+ * we have to check pipe_ctx is split pipe or not
+ * If it's split pipe, driver using top pipe to
+ * reaallocate.
+ */
+ if (!pipe_ctx || pipe_ctx->top_pipe)
+ continue;
+
+ if (pipe_ctx->stream && pipe_ctx->stream->link == link &&
+ pipe_ctx->stream->dpms_off == false &&
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ deallocate_mst_payload(pipe_ctx);
+ }
+ }
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx || pipe_ctx->top_pipe)
+ continue;
+
+ if (pipe_ctx->stream && pipe_ctx->stream->link == link &&
+ pipe_ctx->stream->dpms_off == false &&
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ /* enable/disable PHY will clear connection between BE and FE
+ * need to restore it.
+ */
+ link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
+ pipe_ctx->stream_res.stream_enc->id, true);
+ dc_link_allocate_mst_payload(pipe_ctx);
+ }
+ }
+
+ return DC_OK;
+}
+
#if defined(CONFIG_DRM_AMD_DC_HDCP)
static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
{
@@ -3361,3 +3410,10 @@ const struct dc_link_settings *dc_link_get_link_cap(
return &link->preferred_link_setting;
return &link->verified_link_cap;
}
+
+void dc_link_overwrite_extended_receiver_cap(
+ struct dc_link *link)
+{
+ dp_overwrite_extended_receiver_cap(link);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index c2c136b12184..a49c10d5df26 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -590,7 +590,7 @@ bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
struct aux_payload *payload)
{
uint32_t retrieved = 0;
- bool ret = 0;
+ bool ret = false;
if (!ddc)
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 42aa889fd0f5..cb731c1d30b1 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -983,7 +983,7 @@ static enum link_training_result perform_clock_recovery_sequence(
offset);
/* 2. update DPCD of the receiver*/
- if (!retries_cr)
+ if (!retry_count)
/* EPR #361076 - write as a 5-byte burst,
* but only for the 1-st iteration.*/
dpcd_set_lt_pattern_and_lane_settings(
@@ -1217,24 +1217,33 @@ static void configure_lttpr_mode(struct dc_link *link)
uint8_t repeater_cnt;
uint32_t aux_interval_address;
uint8_t repeater_id;
+ enum dc_status result = DC_ERROR_UNEXPECTED;
uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT;
DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__);
- core_link_write_dpcd(link,
+ result = core_link_write_dpcd(link,
DP_PHY_REPEATER_MODE,
(uint8_t *)&repeater_mode,
sizeof(repeater_mode));
+ if (result == DC_OK) {
+ link->dpcd_caps.lttpr_caps.mode = repeater_mode;
+ }
+
if (!link->is_lttpr_mode_transparent) {
DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Non Transparent Mode\n", __func__);
repeater_mode = DP_PHY_REPEATER_MODE_NON_TRANSPARENT;
- core_link_write_dpcd(link,
+ result = core_link_write_dpcd(link,
DP_PHY_REPEATER_MODE,
(uint8_t *)&repeater_mode,
sizeof(repeater_mode));
+ if (result == DC_OK) {
+ link->dpcd_caps.lttpr_caps.mode = repeater_mode;
+ }
+
repeater_cnt = convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) {
aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 +
@@ -1883,6 +1892,16 @@ bool dp_verify_link_cap(
/* disable PHY done possible by BIOS, will be done by driver itself */
dp_disable_link_phy(link, link->connector_signal);
+ /* Temporary Renoir-specific workaround for SWDEV-215184;
+ * PHY will sometimes be in bad state on hotplugging display from certain USB-C dongle,
+ * so add extra cycle of enabling and disabling the PHY before first link training.
+ */
+ if (link->link_enc->features.flags.bits.DP_IS_USB_C &&
+ link->dc->debug.usbc_combo_phy_reset_wa) {
+ dp_enable_link_phy(link, link->connector_signal, dp_cs_id, cur);
+ dp_disable_link_phy(link, link->connector_signal);
+ }
+
dp_cs_id = get_clock_source_id(link);
/* link training starts with the maximum common settings
@@ -2854,10 +2873,12 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
/* For now we only handle 'Downstream port status' case.
* If we got sink count changed it means
* Downstream port status changed,
- * then DM should call DC to do the detection. */
- if (hpd_rx_irq_check_link_loss_status(
- link,
- &hpd_irq_dpcd_data)) {
+ * then DM should call DC to do the detection.
+ * NOTE: Do not handle link loss on eDP since it is internal link*/
+ if ((link->connector_signal != SIGNAL_TYPE_EDP) &&
+ hpd_rx_irq_check_link_loss_status(
+ link,
+ &hpd_irq_dpcd_data)) {
/* Connectivity log: link loss */
CONN_DATA_LINK_LOSS(link,
hpd_irq_dpcd_data.raw,
@@ -2874,18 +2895,14 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
return false;
previous_link_settings = link->cur_link_settings;
- dp_disable_link_phy(link, pipe_ctx->stream->signal);
perform_link_training_with_retries(&previous_link_settings,
true, LINK_TRAINING_ATTEMPTS,
pipe_ctx,
pipe_ctx->stream->signal);
- if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link &&
- pipe_ctx->stream->dpms_off == false &&
- pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- dc_link_allocate_mst_payload(pipe_ctx);
- }
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ dc_link_reallocate_mst_payload(link);
status = false;
if (out_link_loss)
@@ -3267,7 +3284,7 @@ static bool retrieve_link_cap(struct dc_link *link)
dpcd_data[DP_TRAINING_AUX_RD_INTERVAL];
link->dpcd_caps.ext_receiver_cap_field_present =
- aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1 ? true:false;
+ aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1;
if (aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1) {
uint8_t ext_cap_data[16];
@@ -3426,6 +3443,68 @@ static bool retrieve_link_cap(struct dc_link *link)
return true;
}
+bool dp_overwrite_extended_receiver_cap(struct dc_link *link)
+{
+ uint8_t dpcd_data[16];
+ uint32_t read_dpcd_retry_cnt = 3;
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+ union dp_downstream_port_present ds_port = { 0 };
+ union down_stream_port_count down_strm_port_count;
+ union edp_configuration_cap edp_config_cap;
+
+ int i;
+
+ for (i = 0; i < read_dpcd_retry_cnt; i++) {
+ status = core_link_read_dpcd(
+ link,
+ DP_DPCD_REV,
+ dpcd_data,
+ sizeof(dpcd_data));
+ if (status == DC_OK)
+ break;
+ }
+
+ link->dpcd_caps.dpcd_rev.raw =
+ dpcd_data[DP_DPCD_REV - DP_DPCD_REV];
+
+ if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0)
+ return false;
+
+ ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT -
+ DP_DPCD_REV];
+
+ get_active_converter_info(ds_port.byte, link);
+
+ down_strm_port_count.raw = dpcd_data[DP_DOWN_STREAM_PORT_COUNT -
+ DP_DPCD_REV];
+
+ link->dpcd_caps.allow_invalid_MSA_timing_param =
+ down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM;
+
+ link->dpcd_caps.max_ln_count.raw = dpcd_data[
+ DP_MAX_LANE_COUNT - DP_DPCD_REV];
+
+ link->dpcd_caps.max_down_spread.raw = dpcd_data[
+ DP_MAX_DOWNSPREAD - DP_DPCD_REV];
+
+ link->reported_link_cap.lane_count =
+ link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT;
+ link->reported_link_cap.link_rate = dpcd_data[
+ DP_MAX_LINK_RATE - DP_DPCD_REV];
+ link->reported_link_cap.link_spread =
+ link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ?
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
+
+ edp_config_cap.raw = dpcd_data[
+ DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV];
+ link->dpcd_caps.panel_mode_edp =
+ edp_config_cap.bits.ALT_SCRAMBLER_RESET;
+ link->dpcd_caps.dpcd_display_control_capable =
+ edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE;
+
+ return true;
+}
+
bool detect_dp_sink_caps(struct dc_link *link)
{
return retrieve_link_cap(link);
@@ -3601,6 +3680,7 @@ static void set_crtc_test_pattern(struct dc_link *link,
struct pipe_ctx *odm_pipe;
enum controller_dp_color_space controller_color_space;
int opp_cnt = 1;
+ int count;
switch (test_pattern_color_space) {
case DP_TEST_PATTERN_COLOR_SPACE_RGB:
@@ -3644,6 +3724,12 @@ static void set_crtc_test_pattern(struct dc_link *link,
NULL,
width,
height);
+ /* wait for dpg to blank pixel data with test pattern */
+ for (count = 0; count < 1000; count++) {
+ if (opp->funcs->dpg_is_blanked(opp))
+ break;
+ udelay(100);
+ }
}
}
break;
@@ -3839,8 +3925,38 @@ bool dc_link_dp_set_test_pattern(
sizeof(training_pattern));
}
} else {
- /* CRTC Patterns */
+ enum dc_color_space color_space = COLOR_SPACE_UNKNOWN;
+
+ switch (test_pattern_color_space) {
+ case DP_TEST_PATTERN_COLOR_SPACE_RGB:
+ color_space = COLOR_SPACE_SRGB;
+ if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA)
+ color_space = COLOR_SPACE_SRGB_LIMITED;
+ break;
+
+ case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601:
+ color_space = COLOR_SPACE_YCBCR601;
+ if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA)
+ color_space = COLOR_SPACE_YCBCR601_LIMITED;
+ break;
+ case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709:
+ color_space = COLOR_SPACE_YCBCR709;
+ if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA)
+ color_space = COLOR_SPACE_YCBCR709_LIMITED;
+ break;
+ default:
+ break;
+ }
+ /* update MSA to requested color space */
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(pipe_ctx->stream_res.stream_enc,
+ &pipe_ctx->stream->timing,
+ color_space,
+ pipe_ctx->stream->use_vsc_sdp_for_colorimetry,
+ link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
+
+ /* CRTC Patterns */
set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space);
+
/* Set Test Pattern state */
link->test_pattern_enabled = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index 548aac02ca11..ddb855045767 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -173,15 +173,20 @@ bool edp_receiver_ready_T9(struct dc_link *link)
}
bool edp_receiver_ready_T7(struct dc_link *link)
{
- unsigned int tries = 0;
unsigned char sinkstatus = 0;
unsigned char edpRev = 0;
enum dc_status result = DC_OK;
+ /* use absolute time stamp to constrain max T7*/
+ unsigned long long enter_timestamp = 0;
+ unsigned long long finish_timestamp = 0;
+ unsigned long long time_taken_in_ns = 0;
+
result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
if (result == DC_OK && edpRev < DP_EDP_12)
return true;
/* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
+ enter_timestamp = dm_get_timestamp(link->ctx);
do {
sinkstatus = 0;
result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
@@ -189,8 +194,10 @@ bool edp_receiver_ready_T7(struct dc_link *link)
break;
if (result != DC_OK)
break;
- udelay(25); //MAx T7 is 50ms
- } while (++tries < 300);
+ udelay(25);
+ finish_timestamp = dm_get_timestamp(link->ctx);
+ time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp);
+ } while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms
if (link->local_sink->edid_caps.panel_patch.extra_t7_ms > 0)
udelay(link->local_sink->edid_caps.panel_patch.extra_t7_ms * 1000);
@@ -518,6 +525,9 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)
struct dsc_config dsc_cfg;
uint8_t dsc_packed_pps[128];
+ memset(&dsc_cfg, 0, sizeof(dsc_cfg));
+ memset(dsc_packed_pps, 0, 128);
+
/* Enable DSC hw block */
dsc_cfg.pic_width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 0c19de678339..a0eb9e533a61 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -940,30 +940,43 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
}
-static void calculate_integer_scaling(struct pipe_ctx *pipe_ctx)
+/*
+ * When handling 270 rotation in mixed SLS mode, we have
+ * stream->timing.h_border_left that is non zero. If we are doing
+ * pipe-splitting, this h_border_left value gets added to recout.x and when it
+ * calls calculate_inits_and_adj_vp() and
+ * adjust_vp_and_init_for_seamless_clip(), it can cause viewport.height for a
+ * pipe to be incorrect.
+ *
+ * To fix this, instead of using stream->timing.h_border_left, we can use
+ * stream->dst.x to represent the border instead. So we will set h_border_left
+ * to 0 and shift the appropriate amount in stream->dst.x. We will then
+ * perform all calculations in resource_build_scaling_params() based on this
+ * and then restore the h_border_left and stream->dst.x to their original
+ * values.
+ *
+ * shift_border_left_to_dst() will shift the amount of h_border_left to
+ * stream->dst.x and set h_border_left to 0. restore_border_left_from_dst()
+ * will restore h_border_left and stream->dst.x back to their original values
+ * We also need to make sure pipe_ctx->plane_res.scl_data.h_active uses the
+ * original h_border_left value in its calculation.
+ */
+int shift_border_left_to_dst(struct pipe_ctx *pipe_ctx)
{
- unsigned int integer_multiple = 1;
-
- if (pipe_ctx->plane_state->scaling_quality.integer_scaling) {
- // calculate maximum # of replication of src onto addressable
- integer_multiple = min(
- pipe_ctx->stream->timing.h_addressable / pipe_ctx->stream->src.width,
- pipe_ctx->stream->timing.v_addressable / pipe_ctx->stream->src.height);
+ int store_h_border_left = pipe_ctx->stream->timing.h_border_left;
- //scale dst
- pipe_ctx->stream->dst.width = integer_multiple * pipe_ctx->stream->src.width;
- pipe_ctx->stream->dst.height = integer_multiple * pipe_ctx->stream->src.height;
-
- //center dst onto addressable
- pipe_ctx->stream->dst.x = (pipe_ctx->stream->timing.h_addressable - pipe_ctx->stream->dst.width)/2;
- pipe_ctx->stream->dst.y = (pipe_ctx->stream->timing.v_addressable - pipe_ctx->stream->dst.height)/2;
-
- //We are guaranteed that we are scaling in integer ratio
- pipe_ctx->plane_state->scaling_quality.v_taps = 1;
- pipe_ctx->plane_state->scaling_quality.h_taps = 1;
- pipe_ctx->plane_state->scaling_quality.v_taps_c = 1;
- pipe_ctx->plane_state->scaling_quality.h_taps_c = 1;
+ if (store_h_border_left) {
+ pipe_ctx->stream->timing.h_border_left = 0;
+ pipe_ctx->stream->dst.x += store_h_border_left;
}
+ return store_h_border_left;
+}
+
+void restore_border_left_from_dst(struct pipe_ctx *pipe_ctx,
+ int store_h_border_left)
+{
+ pipe_ctx->stream->dst.x -= store_h_border_left;
+ pipe_ctx->stream->timing.h_border_left = store_h_border_left;
}
bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
@@ -971,6 +984,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
bool res = false;
+ int store_h_border_left = shift_border_left_to_dst(pipe_ctx);
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
/* Important: scaling ratio calculation requires pixel format,
* lb depth calculation requires recout and taps require scaling ratios.
@@ -979,14 +993,18 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
pipe_ctx->plane_state->format);
- calculate_integer_scaling(pipe_ctx);
-
calculate_scaling_ratios(pipe_ctx);
calculate_viewport(pipe_ctx);
- if (pipe_ctx->plane_res.scl_data.viewport.height < 16 || pipe_ctx->plane_res.scl_data.viewport.width < 16)
+ if (pipe_ctx->plane_res.scl_data.viewport.height < 16 ||
+ pipe_ctx->plane_res.scl_data.viewport.width < 16) {
+ if (store_h_border_left) {
+ restore_border_left_from_dst(pipe_ctx,
+ store_h_border_left);
+ }
return false;
+ }
calculate_recout(pipe_ctx);
@@ -999,8 +1017,10 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.recout.x += timing->h_border_left;
pipe_ctx->plane_res.scl_data.recout.y += timing->v_border_top;
- pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right;
- pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
+ pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable +
+ store_h_border_left + timing->h_border_right;
+ pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable +
+ timing->v_border_top + timing->v_border_bottom;
/* Taps calculations */
if (pipe_ctx->plane_res.xfm != NULL)
@@ -1047,6 +1067,9 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
plane_state->dst_rect.x,
plane_state->dst_rect.y);
+ if (store_h_border_left)
+ restore_border_left_from_dst(pipe_ctx, store_h_border_left);
+
return res;
}
@@ -1894,8 +1917,26 @@ static int acquire_resource_from_hw_enabled_state(
pipe_ctx->plane_res.dpp = pool->dpps[tg_inst];
pipe_ctx->stream_res.opp = pool->opps[tg_inst];
- if (pool->dpps[tg_inst])
+ if (pool->dpps[tg_inst]) {
pipe_ctx->plane_res.mpcc_inst = pool->dpps[tg_inst]->inst;
+
+ // Read DPP->MPCC->OPP Pipe from HW State
+ if (pool->mpc->funcs->read_mpcc_state) {
+ struct mpcc_state s = {0};
+
+ pool->mpc->funcs->read_mpcc_state(pool->mpc, pipe_ctx->plane_res.mpcc_inst, &s);
+
+ if (s.dpp_id < MAX_MPCC)
+ pool->mpc->mpcc_array[pipe_ctx->plane_res.mpcc_inst].dpp_id = s.dpp_id;
+
+ if (s.bot_mpcc_id < MAX_MPCC)
+ pool->mpc->mpcc_array[pipe_ctx->plane_res.mpcc_inst].mpcc_bot =
+ &pool->mpc->mpcc_array[s.bot_mpcc_id];
+
+ if (s.opp_id < MAX_OPP)
+ pipe_ctx->stream_res.opp->mpc_tree_params.opp_id = s.opp_id;
+ }
+ }
pipe_ctx->pipe_idx = tg_inst;
pipe_ctx->stream = stream;
@@ -2025,6 +2066,13 @@ void dc_resource_state_construct(
dst_ctx->clk_mgr = dc->clk_mgr;
}
+
+bool dc_resource_is_dsc_encoding_supported(const struct dc *dc)
+{
+ return dc->res_pool->res_cap->num_dsc > 0;
+}
+
+
/**
* dc_validate_global_state() - Determine if HW can support a given state
* Checks HW resource availability and bandwidth requirement.
@@ -2281,7 +2329,7 @@ static void set_avi_info_frame(
if (color_space == COLOR_SPACE_SRGB ||
color_space == COLOR_SPACE_2020_RGB_FULLRANGE) {
hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_FULL_RANGE;
- hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_FULL_RANGE;
+ hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
} else if (color_space == COLOR_SPACE_SRGB_LIMITED ||
color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) {
hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_LIMITED_RANGE;
@@ -2811,3 +2859,48 @@ unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format)
return -1;
}
}
+static unsigned int get_max_audio_sample_rate(struct audio_mode *modes)
+{
+ if (modes) {
+ if (modes->sample_rates.rate.RATE_192)
+ return 192000;
+ if (modes->sample_rates.rate.RATE_176_4)
+ return 176400;
+ if (modes->sample_rates.rate.RATE_96)
+ return 96000;
+ if (modes->sample_rates.rate.RATE_88_2)
+ return 88200;
+ if (modes->sample_rates.rate.RATE_48)
+ return 48000;
+ if (modes->sample_rates.rate.RATE_44_1)
+ return 44100;
+ if (modes->sample_rates.rate.RATE_32)
+ return 32000;
+ }
+ /*original logic when no audio info*/
+ return 441000;
+}
+
+void get_audio_check(struct audio_info *aud_modes,
+ struct audio_check *audio_chk)
+{
+ unsigned int i;
+ unsigned int max_sample_rate = 0;
+
+ if (aud_modes) {
+ audio_chk->audio_packet_type = 0x2;/*audio sample packet AP = .25 for layout0, 1 for layout1*/
+
+ audio_chk->max_audiosample_rate = 0;
+ for (i = 0; i < aud_modes->mode_count; i++) {
+ max_sample_rate = get_max_audio_sample_rate(&aud_modes->modes[i]);
+ if (audio_chk->max_audiosample_rate < max_sample_rate)
+ audio_chk->max_audiosample_rate = max_sample_rate;
+ /*dts takes the same as type 2: AP = 0.25*/
+ }
+ /*check which one take more bandwidth*/
+ if (audio_chk->max_audiosample_rate > 192000)
+ audio_chk->audio_packet_type = 0x9;/*AP =1*/
+ audio_chk->acat = 0;/*not support*/
+ }
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index b43a4b115fd8..6ddbb00ed37a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -406,25 +406,30 @@ bool dc_stream_add_writeback(struct dc *dc,
stream->writeback_info[stream->num_wb_info++] = *wb_info;
}
- if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
- dm_error("DC: update_bandwidth failed!\n");
- return false;
- }
-
- /* enable writeback */
if (dc->hwss.enable_writeback) {
struct dc_stream_status *stream_status = dc_stream_get_status(stream);
struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+ dwb->otg_inst = stream_status->primary_otg_inst;
+ }
+ if (IS_DIAG_DC(dc->ctx->dce_environment)) {
+ if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
+ dm_error("DC: update_bandwidth failed!\n");
+ return false;
+ }
- if (dwb->funcs->is_enabled(dwb)) {
- /* writeback pipe already enabled, only need to update */
- dc->hwss.update_writeback(dc, stream_status, wb_info, dc->current_state);
- } else {
- /* Enable writeback pipe from scratch*/
- dc->hwss.enable_writeback(dc, stream_status, wb_info, dc->current_state);
+ /* enable writeback */
+ if (dc->hwss.enable_writeback) {
+ struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+
+ if (dwb->funcs->is_enabled(dwb)) {
+ /* writeback pipe already enabled, only need to update */
+ dc->hwss.update_writeback(dc, wb_info, dc->current_state);
+ } else {
+ /* Enable writeback pipe from scratch*/
+ dc->hwss.enable_writeback(dc, wb_info, dc->current_state);
+ }
}
}
-
return true;
}
@@ -463,19 +468,29 @@ bool dc_stream_remove_writeback(struct dc *dc,
}
stream->num_wb_info = j;
- /* recalculate and apply DML parameters */
- if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
- dm_error("DC: update_bandwidth failed!\n");
- return false;
- }
-
- /* disable writeback */
- if (dc->hwss.disable_writeback)
- dc->hwss.disable_writeback(dc, dwb_pipe_inst);
+ if (IS_DIAG_DC(dc->ctx->dce_environment)) {
+ /* recalculate and apply DML parameters */
+ if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
+ dm_error("DC: update_bandwidth failed!\n");
+ return false;
+ }
+ /* disable writeback */
+ if (dc->hwss.disable_writeback)
+ dc->hwss.disable_writeback(dc, dwb_pipe_inst);
+ }
return true;
}
+bool dc_stream_warmup_writeback(struct dc *dc,
+ int num_dwb,
+ struct dc_writeback_info *wb_info)
+{
+ if (dc->hwss.mmhubbub_warmup)
+ return dc->hwss.mmhubbub_warmup(dc, num_dwb, wb_info);
+ else
+ return false;
+}
uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
{
uint8_t i;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index c24639080371..8ff25b5dd2f6 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -39,7 +39,7 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.2.62"
+#define DC_VER "3.2.69"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -157,11 +157,14 @@ struct dc_surface_dcc_cap {
bool const_color_support;
};
-struct dc_static_screen_events {
- bool force_trigger;
- bool cursor_update;
- bool surface_update;
- bool overlay_update;
+struct dc_static_screen_params {
+ struct {
+ bool force_trigger;
+ bool cursor_update;
+ bool surface_update;
+ bool overlay_update;
+ } triggers;
+ unsigned int num_frames;
};
@@ -367,6 +370,7 @@ struct dc_debug_options {
bool disable_hubp_power_gate;
bool disable_dsc_power_gate;
int dsc_min_slice_height_override;
+ int dsc_bpp_increment_div;
bool native422_support;
bool disable_pplib_wm_range;
enum wm_report_mode pplib_wm_report_mode;
@@ -419,6 +423,9 @@ struct dc_debug_options {
bool nv12_iflip_vm_wa;
bool disable_dram_clock_change_vactive_support;
bool validate_dml_output;
+ bool enable_dmcub_surface_flip;
+ bool usbc_combo_phy_reset_wa;
+ bool disable_dsc;
};
struct dc_debug_data {
@@ -513,7 +520,7 @@ struct dc {
bool optimized_required;
/* Require to maintain clocks and bandwidth for UEFI enabled HW */
- bool optimize_seamless_boot;
+ int optimize_seamless_boot_streams;
/* FBC compressor */
struct compressor *fbc_compressor;
@@ -909,6 +916,8 @@ void dc_resource_state_copy_construct_current(
void dc_resource_state_destruct(struct dc_state *context);
+bool dc_resource_is_dsc_encoding_supported(const struct dc *dc);
+
/*
* TODO update to make it about validation sets
* Set up streams and links associated to drive sinks
@@ -1066,6 +1075,7 @@ unsigned int dc_get_current_backlight_pwm(struct dc *dc);
unsigned int dc_get_target_backlight_pwm(struct dc *dc);
bool dc_is_dmcu_initialized(struct dc *dc);
+bool dc_is_hw_initialized(struct dc *dc);
enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping);
void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
index 8ec09813ee17..3800340a5b4f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
@@ -53,7 +53,8 @@ struct dc_dsc_policy {
uint32_t min_target_bpp;
};
-bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data,
+bool dc_dsc_parse_dsc_dpcd(const struct dc *dc,
+ const uint8_t *dpcd_dsc_basic_data,
const uint8_t *dpcd_dsc_ext_data,
struct dsc_dec_dpcd_caps *dsc_sink_caps);
@@ -77,4 +78,6 @@ bool dc_dsc_compute_config(
void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing,
struct dc_dsc_policy *policy);
+void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index 02a63e9cb62f..737048d8a96c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -552,6 +552,36 @@ uint32_t generic_read_indirect_reg(const struct dc_context *ctx,
return value;
}
+uint32_t generic_indirect_reg_get(const struct dc_context *ctx,
+ uint32_t addr_index, uint32_t addr_data,
+ uint32_t index, int n,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ ...)
+{
+ uint32_t shift, mask, *field_value;
+ uint32_t value = 0;
+ int i = 1;
+
+ va_list ap;
+
+ va_start(ap, field_value1);
+
+ value = generic_read_indirect_reg(ctx, addr_index, addr_data, index);
+ *field_value1 = get_reg_field_value_ex(value, mask1, shift1);
+
+ while (i < n) {
+ shift = va_arg(ap, uint32_t);
+ mask = va_arg(ap, uint32_t);
+ field_value = va_arg(ap, uint32_t *);
+
+ *field_value = get_reg_field_value_ex(value, mask, shift);
+ i++;
+ }
+
+ va_end(ap);
+
+ return value;
+}
uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
uint32_t addr_index, uint32_t addr_data,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 1ff79f703734..d25603128394 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -133,6 +133,7 @@ struct dc_link {
struct link_flags {
bool dp_keep_receiver_powered;
bool dp_skip_DID2;
+ bool dp_skip_reset_segment;
} wa_flags;
struct link_mst_stream_allocation_table mst_stream_alloc_table;
@@ -204,6 +205,7 @@ enum dc_detect_reason {
bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
bool dc_link_get_hpd_state(struct dc_link *dc_link);
enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx);
+enum dc_status dc_link_reallocate_mst_payload(struct dc_link *link);
/* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt).
* Return:
@@ -300,6 +302,9 @@ uint32_t dc_link_bandwidth_kbps(
const struct dc_link_settings *dc_link_get_link_cap(
const struct dc_link *link);
+void dc_link_overwrite_extended_receiver_cap(
+ struct dc_link *link);
+
bool dc_submit_i2c(
struct dc *dc,
uint32_t link_index,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 3ea54321b045..92096de79dec 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -344,10 +344,17 @@ bool dc_add_all_planes_for_stream(
bool dc_stream_add_writeback(struct dc *dc,
struct dc_stream_state *stream,
struct dc_writeback_info *wb_info);
+
bool dc_stream_remove_writeback(struct dc *dc,
struct dc_stream_state *stream,
uint32_t dwb_pipe_inst);
+
+bool dc_stream_warmup_writeback(struct dc *dc,
+ int num_dwb,
+ struct dc_writeback_info *wb_info);
+
bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream);
+
bool dc_stream_set_dynamic_metadata(struct dc *dc,
struct dc_stream_state *stream,
struct dc_dmdata_attributes *dmdata_attr);
@@ -432,10 +439,10 @@ bool dc_stream_get_crc(struct dc *dc,
uint32_t *g_y,
uint32_t *b_cb);
-void dc_stream_set_static_screen_events(struct dc *dc,
+void dc_stream_set_static_screen_params(struct dc *dc,
struct dc_stream_state **stream,
int num_streams,
- const struct dc_static_screen_events *events);
+ const struct dc_static_screen_params *params);
void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
enum dc_dynamic_expansion option);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 2b92bfa28bde..e59532d98cb4 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -60,7 +60,12 @@ enum dce_environment {
DCE_ENV_FPGA_MAXIMUS,
/* Emulation on real HW or on FPGA. Used by Diagnostics, enforces
* requirements of Diagnostics team. */
- DCE_ENV_DIAG
+ DCE_ENV_DIAG,
+ /*
+ * Guest VM system, DC HW may exist but is not virtualized and
+ * should not be used. SW support for VDI only.
+ */
+ DCE_ENV_VIRTUAL_HW
};
/* Note: use these macro definitions instead of direct comparison! */
@@ -224,6 +229,7 @@ struct dc_panel_patch {
unsigned int extra_t12_ms;
unsigned int extra_delay_backlight_off;
unsigned int extra_t7_ms;
+ unsigned int manage_secondary_link;
};
struct dc_edid_caps {
@@ -598,7 +604,11 @@ struct audio_info {
/* this field must be last in this struct */
struct audio_mode modes[DC_MAX_AUDIO_DESC_COUNT];
};
-
+struct audio_check {
+ unsigned int audio_packet_type;
+ unsigned int max_audiosample_rate;
+ unsigned int acat;
+};
enum dc_infoframe_type {
DC_HDMI_INFOFRAME_TYPE_VENDOR = 0x81,
DC_HDMI_INFOFRAME_TYPE_AVI = 0x82,
@@ -719,7 +729,7 @@ struct psr_context {
/* The VSync rate in Hz used to calculate the
* step size for smooth brightness feature
*/
- unsigned int vsyncRateHz;
+ unsigned int vsync_rate_hz;
unsigned int skipPsrWaitForPllLock;
unsigned int numberOfControllers;
/* Unused, for future use. To indicate that first changed frame from
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index f1a5d2c6aa37..68c4049cbc2a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -400,7 +400,7 @@ static bool acquire(
{
enum gpio_result result;
- if (!is_engine_available(engine))
+ if ((engine == NULL) || !is_engine_available(engine))
return false;
result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index e619e67e6b51..30d953acd016 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -537,9 +537,6 @@ static void dcn10_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
if (dmcu->dmcu_state != DMCU_RUNNING)
return;
- dcn10_get_dmcu_psr_state(dmcu, &psr_state);
- if (psr_state == 0 && !enable)
- return;
/* waitDMCUReadyForCmd */
REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
dmcu_wait_reg_ready_interval,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index 1cd4d8fc361f..066188ba7949 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -100,20 +100,6 @@ static uint32_t get_hw_buffer_available_size(
dce_i2c_hw->buffer_used_bytes;
}
-static uint32_t get_speed(
- const struct dce_i2c_hw *dce_i2c_hw)
-{
- uint32_t pre_scale = 0;
-
- REG_GET(SPEED, DC_I2C_DDC1_PRESCALE, &pre_scale);
-
- /* [anaumov] it seems following is unnecessary */
- /*ASSERT(value.bits.DC_I2C_DDC1_PRESCALE);*/
- return pre_scale ?
- dce_i2c_hw->reference_frequency / pre_scale :
- dce_i2c_hw->default_speed;
-}
-
static void process_channel_reply(
struct dce_i2c_hw *dce_i2c_hw,
struct i2c_payload *reply)
@@ -278,16 +264,25 @@ static void set_speed(
struct dce_i2c_hw *dce_i2c_hw,
uint32_t speed)
{
+ uint32_t xtal_ref_div = 0;
+ uint32_t prescale = 0;
+
+ REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div);
+
+ if (xtal_ref_div == 0)
+ xtal_ref_div = 2;
+
+ prescale = ((dce_i2c_hw->reference_frequency * 2) / xtal_ref_div) / speed;
if (speed) {
if (dce_i2c_hw->masks->DC_I2C_DDC1_START_STOP_TIMING_CNTL)
REG_UPDATE_N(SPEED, 3,
- FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), dce_i2c_hw->reference_frequency / speed,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale,
FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2,
FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1);
else
REG_UPDATE_N(SPEED, 2,
- FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), dce_i2c_hw->reference_frequency / speed,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale,
FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2);
}
}
@@ -344,9 +339,7 @@ static void release_engine(
bool safe_to_reset;
/* Restore original HW engine speed */
-
- set_speed(dce_i2c_hw, dce_i2c_hw->original_speed);
-
+ set_speed(dce_i2c_hw, dce_i2c_hw->default_speed);
/* Reset HW engine */
{
@@ -378,7 +371,6 @@ struct dce_i2c_hw *acquire_i2c_hw_engine(
{
uint32_t counter = 0;
enum gpio_result result;
- uint32_t current_speed;
struct dce_i2c_hw *dce_i2c_hw = NULL;
if (!ddc)
@@ -416,11 +408,6 @@ struct dce_i2c_hw *acquire_i2c_hw_engine(
dce_i2c_hw->ddc = ddc;
- current_speed = get_speed(dce_i2c_hw);
-
- if (current_speed)
- dce_i2c_hw->original_speed = current_speed;
-
if (!setup_engine(dce_i2c_hw)) {
release_engine(dce_i2c_hw);
return NULL;
@@ -478,13 +465,9 @@ static void submit_channel_request_hw(
static uint32_t get_transaction_timeout_hw(
const struct dce_i2c_hw *dce_i2c_hw,
- uint32_t length)
+ uint32_t length,
+ uint32_t speed)
{
-
- uint32_t speed = get_speed(dce_i2c_hw);
-
-
-
uint32_t period_timeout;
uint32_t num_of_clock_stretches;
@@ -504,7 +487,8 @@ static uint32_t get_transaction_timeout_hw(
bool dce_i2c_hw_engine_submit_payload(
struct dce_i2c_hw *dce_i2c_hw,
struct i2c_payload *payload,
- bool middle_of_transaction)
+ bool middle_of_transaction,
+ uint32_t speed)
{
struct i2c_request_transaction_data request;
@@ -542,7 +526,7 @@ bool dce_i2c_hw_engine_submit_payload(
/* obtain timeout value before submitting request */
transaction_timeout = get_transaction_timeout_hw(
- dce_i2c_hw, payload->length + 1);
+ dce_i2c_hw, payload->length + 1, speed);
submit_channel_request_hw(
dce_i2c_hw, &request);
@@ -588,13 +572,11 @@ bool dce_i2c_submit_command_hw(
struct i2c_payload *payload = cmd->payloads + index_of_payload;
if (!dce_i2c_hw_engine_submit_payload(
- dce_i2c_hw, payload, mot)) {
+ dce_i2c_hw, payload, mot, cmd->speed)) {
result = false;
break;
}
-
-
++index_of_payload;
}
@@ -625,7 +607,6 @@ void dce_i2c_hw_construct(
dce_i2c_hw->buffer_used_bytes = 0;
dce_i2c_hw->transaction_count = 0;
dce_i2c_hw->engine_keep_power_up_count = 1;
- dce_i2c_hw->original_speed = DEFAULT_I2C_HW_SPEED;
dce_i2c_hw->default_speed = DEFAULT_I2C_HW_SPEED;
dce_i2c_hw->send_reset_length = 0;
dce_i2c_hw->setup_limit = I2C_SETUP_TIME_LIMIT_DCE;
@@ -640,9 +621,6 @@ void dce100_i2c_hw_construct(
const struct dce_i2c_shift *shifts,
const struct dce_i2c_mask *masks)
{
-
- uint32_t xtal_ref_div = 0;
-
dce_i2c_hw_construct(dce_i2c_hw,
ctx,
engine_id,
@@ -650,21 +628,6 @@ void dce100_i2c_hw_construct(
shifts,
masks);
dce_i2c_hw->buffer_size = I2C_HW_BUFFER_SIZE_DCE100;
-
- REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div);
-
- if (xtal_ref_div == 0)
- xtal_ref_div = 2;
-
- /*Calculating Reference Clock by divding original frequency by
- * XTAL_REF_DIV.
- * At upper level, uint32_t reference_frequency =
- * dal_dce_i2c_get_reference_clock(as) >> 1
- * which already divided by 2. So we need x2 to get original
- * reference clock from ppll_info
- */
- dce_i2c_hw->reference_frequency =
- (dce_i2c_hw->reference_frequency * 2) / xtal_ref_div;
}
void dce112_i2c_hw_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
index d4b2037f7d74..fb055e6883c0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
@@ -256,7 +256,6 @@ struct i2c_request_transaction_data {
struct dce_i2c_hw {
struct ddc *ddc;
- uint32_t original_speed;
uint32_t engine_keep_power_up_count;
uint32_t transaction_count;
uint32_t buffer_used_bytes;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
new file mode 100644
index 000000000000..225955ec6d39
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dmub_psr.h"
+#include "dc.h"
+#include "dc_dmub_srv.h"
+#include "../../dmub/inc/dmub_srv.h"
+#include "dmub_fw_state.h"
+#include "core_types.h"
+#include "ipp.h"
+
+#define MAX_PIPES 6
+
+/**
+ * Get PSR state from firmware.
+ */
+static void dmub_get_psr_state(uint32_t *psr_state)
+{
+ // Not yet implemented
+ // Trigger GPINT interrupt from firmware
+}
+
+/**
+ * Enable/Disable PSR.
+ */
+static void dmub_set_psr_enable(struct dmub_psr *dmub, bool enable)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = dmub->ctx;
+
+ cmd.psr_enable.header.type = DMUB_CMD__PSR;
+
+ if (enable)
+ cmd.psr_enable.header.sub_type = DMUB_CMD__PSR_ENABLE;
+ else
+ cmd.psr_enable.header.sub_type = DMUB_CMD__PSR_DISABLE;
+
+ cmd.psr_enable.header.payload_bytes = 0; // Send header only
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_enable.header);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+}
+
+/**
+ * Set PSR level.
+ */
+static void dmub_set_psr_level(struct dmub_psr *dmub, uint16_t psr_level)
+{
+ union dmub_rb_cmd cmd;
+ uint32_t psr_state = 0;
+ struct dc_context *dc = dmub->ctx;
+
+ dmub_get_psr_state(&psr_state);
+
+ if (psr_state == 0)
+ return;
+
+ cmd.psr_set_level.header.type = DMUB_CMD__PSR;
+ cmd.psr_set_level.header.sub_type = DMUB_CMD__PSR_SET_LEVEL;
+ cmd.psr_set_level.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_level_data);
+ cmd.psr_set_level.psr_set_level_data.psr_level = psr_level;
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_set_level.header);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+}
+
+/**
+ * Setup PSR by programming phy registers and sending psr hw context values to firmware.
+ */
+static bool dmub_setup_psr(struct dmub_psr *dmub,
+ struct dc_link *link,
+ struct psr_context *psr_context)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = dmub->ctx;
+ struct dmub_cmd_psr_copy_settings_data *copy_settings_data
+ = &cmd.psr_copy_settings.psr_copy_settings_data;
+ struct pipe_ctx *pipe_ctx = NULL;
+ struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx;
+
+ for (int i = 0; i < MAX_PIPES; i++) {
+ if (res_ctx &&
+ res_ctx->pipe_ctx[i].stream &&
+ res_ctx->pipe_ctx[i].stream->link &&
+ res_ctx->pipe_ctx[i].stream->link == link &&
+ res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) {
+ pipe_ctx = &res_ctx->pipe_ctx[i];
+ break;
+ }
+ }
+
+ if (!pipe_ctx ||
+ !&pipe_ctx->plane_res ||
+ !&pipe_ctx->stream_res)
+ return false;
+
+ // Program DP DPHY fast training registers
+ link->link_enc->funcs->psr_program_dp_dphy_fast_training(link->link_enc,
+ psr_context->psrExitLinkTrainingRequired);
+
+ // Program DP_SEC_CNTL1 register to set transmission GPS0 line num and priority to high
+ link->link_enc->funcs->psr_program_secondary_packet(link->link_enc,
+ psr_context->sdpTransmitLineNumDeadline);
+
+ cmd.psr_copy_settings.header.type = DMUB_CMD__PSR;
+ cmd.psr_copy_settings.header.sub_type = DMUB_CMD__PSR_COPY_SETTINGS;
+ cmd.psr_copy_settings.header.payload_bytes = sizeof(struct dmub_cmd_psr_copy_settings_data);
+
+ // Hw insts
+ copy_settings_data->dpphy_inst = psr_context->phyType;
+ copy_settings_data->aux_inst = psr_context->channel;
+ copy_settings_data->digfe_inst = psr_context->engineId;
+ copy_settings_data->digbe_inst = psr_context->transmitterId;
+
+ copy_settings_data->mpcc_inst = pipe_ctx->plane_res.mpcc_inst;
+
+ if (pipe_ctx->plane_res.hubp)
+ copy_settings_data->hubp_inst = pipe_ctx->plane_res.hubp->inst;
+ else
+ copy_settings_data->hubp_inst = 0;
+ if (pipe_ctx->plane_res.dpp)
+ copy_settings_data->dpp_inst = pipe_ctx->plane_res.dpp->inst;
+ else
+ copy_settings_data->dpp_inst = 0;
+ if (pipe_ctx->stream_res.opp)
+ copy_settings_data->opp_inst = pipe_ctx->stream_res.opp->inst;
+ else
+ copy_settings_data->opp_inst = 0;
+ if (pipe_ctx->stream_res.tg)
+ copy_settings_data->otg_inst = pipe_ctx->stream_res.tg->inst;
+ else
+ copy_settings_data->otg_inst = 0;
+
+ // Misc
+ copy_settings_data->psr_level = psr_context->psr_level.u32all;
+ copy_settings_data->hyst_frames = psr_context->timehyst_frames;
+ copy_settings_data->hyst_lines = psr_context->hyst_lines;
+ copy_settings_data->phy_type = psr_context->phyType;
+ copy_settings_data->aux_repeat = psr_context->aux_repeats;
+ copy_settings_data->smu_optimizations_en = psr_context->allow_smu_optimizations;
+ copy_settings_data->skip_wait_for_pll_lock = psr_context->skipPsrWaitForPllLock;
+ copy_settings_data->frame_delay = psr_context->frame_delay;
+ copy_settings_data->smu_phy_id = psr_context->smuPhyId;
+ copy_settings_data->num_of_controllers = psr_context->numberOfControllers;
+ copy_settings_data->frame_cap_ind = psr_context->psrFrameCaptureIndicationReq;
+ copy_settings_data->phy_num = psr_context->frame_delay & 0x7;
+ copy_settings_data->link_rate = psr_context->frame_delay & 0xF;
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_copy_settings.header);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+ return true;
+}
+
+static const struct dmub_psr_funcs psr_funcs = {
+ .set_psr_enable = dmub_set_psr_enable,
+ .setup_psr = dmub_setup_psr,
+ .get_psr_state = dmub_get_psr_state,
+ .set_psr_level = dmub_set_psr_level,
+};
+
+/**
+ * Construct PSR object.
+ */
+static void dmub_psr_construct(struct dmub_psr *psr, struct dc_context *ctx)
+{
+ psr->ctx = ctx;
+ psr->funcs = &psr_funcs;
+}
+
+/**
+ * Allocate and initialize PSR object.
+ */
+struct dmub_psr *dmub_psr_create(struct dc_context *ctx)
+{
+ struct dmub_psr *psr = kzalloc(sizeof(struct dmub_psr), GFP_KERNEL);
+
+ if (psr == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dmub_psr_construct(psr, ctx);
+
+ return psr;
+}
+
+/**
+ * Deallocate PSR object.
+ */
+void dmub_psr_destroy(struct dmub_psr **dmub)
+{
+ kfree(dmub);
+ *dmub = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_state.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
index c87b1ba7590e..229958de3035 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_state.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2019 Advanced Micro Devices, Inc.
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -23,51 +23,25 @@
*
*/
-#ifndef _DMUB_FW_STATE_H_
-#define _DMUB_FW_STATE_H_
+#ifndef _DMUB_PSR_H_
+#define _DMUB_PSR_H_
-#include "dmub_types.h"
+#include "os_types.h"
-#pragma pack(push, 1)
-
-struct dmub_fw_state {
- /**
- * @phy_initialized_during_fw_boot:
- *
- * Detects if VBIOS/VBL has ran before firmware boot.
- * A value of 1 will usually mean S0i3 boot.
- */
- uint8_t phy_initialized_during_fw_boot;
-
- /**
- * @intialized_phy:
- *
- * Bit vector of initialized PHY.
- */
- uint8_t initialized_phy;
-
- /**
- * @enabled_phy:
- *
- * Bit vector of enabled PHY for DP alt mode switch tracking.
- */
- uint8_t enabled_phy;
-
- /**
- * @dmcu_fw_loaded:
- *
- * DMCU auto load state.
- */
- uint8_t dmcu_fw_loaded;
+struct dmub_psr {
+ struct dc_context *ctx;
+ const struct dmub_psr_funcs *funcs;
+};
- /**
- * @psr_state:
- *
- * PSR state tracking.
- */
- uint8_t psr_state;
+struct dmub_psr_funcs {
+ void (*set_psr_enable)(struct dmub_psr *dmub, bool enable);
+ bool (*setup_psr)(struct dmub_psr *dmub, struct dc_link *link, struct psr_context *psr_context);
+ void (*get_psr_state)(uint32_t *psr_state);
+ void (*set_psr_level)(struct dmub_psr *dmub, uint16_t psr_level);
};
-#pragma pack(pop)
+struct dmub_psr *dmub_psr_create(struct dc_context *ctx);
+void dmub_psr_destroy(struct dmub_psr **dmub);
+
-#endif /* _DMUB_FW_STATE_H_ */
+#endif /* _DCE_DMUB_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 4939cf3b316f..5b689273ff44 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1373,9 +1373,13 @@ static enum dc_status apply_single_controller_ctx_to_hw(
// DRR should set trigger event to monitor surface update event
if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0)
event_triggers = 0x80;
+ /* Event triggers and num frames initialized for DRR, but can be
+ * later updated for PSR use. Note DRR trigger events are generated
+ * regardless of whether num frames met.
+ */
if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control)
pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
- pipe_ctx->stream_res.tg, event_triggers);
+ pipe_ctx->stream_res.tg, event_triggers, 2);
if (!dc_is_virtual_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dig_connect_to_otg(
@@ -1706,6 +1710,8 @@ static void set_drr(struct pipe_ctx **pipe_ctx,
struct drr_params params = {0};
// DRR should set trigger event to monitor surface update event
unsigned int event_triggers = 0x80;
+ // Note DRR trigger events are generated regardless of whether num frames met.
+ unsigned int num_frames = 2;
params.vertical_total_max = vmax;
params.vertical_total_min = vmin;
@@ -1721,7 +1727,7 @@ static void set_drr(struct pipe_ctx **pipe_ctx,
if (vmax != 0 && vmin != 0)
pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
pipe_ctx[i]->stream_res.tg,
- event_triggers);
+ event_triggers, num_frames);
}
}
@@ -1738,30 +1744,31 @@ static void get_position(struct pipe_ctx **pipe_ctx,
}
static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
- int num_pipes, const struct dc_static_screen_events *events)
+ int num_pipes, const struct dc_static_screen_params *params)
{
unsigned int i;
- unsigned int value = 0;
+ unsigned int triggers = 0;
- if (events->overlay_update)
- value |= 0x100;
- if (events->surface_update)
- value |= 0x80;
- if (events->cursor_update)
- value |= 0x2;
- if (events->force_trigger)
- value |= 0x1;
+ if (params->triggers.overlay_update)
+ triggers |= 0x100;
+ if (params->triggers.surface_update)
+ triggers |= 0x80;
+ if (params->triggers.cursor_update)
+ triggers |= 0x2;
+ if (params->triggers.force_trigger)
+ triggers |= 0x1;
if (num_pipes) {
struct dc *dc = pipe_ctx[0]->stream->ctx->dc;
if (dc->fbc_compressor)
- value |= 0x84;
+ triggers |= 0x84;
}
for (i = 0; i < num_pipes; i++)
pipe_ctx[i]->stream_res.tg->funcs->
- set_static_screen_control(pipe_ctx[i]->stream_res.tg, value);
+ set_static_screen_control(pipe_ctx[i]->stream_res.tg,
+ triggers, params->num_frames);
}
/*
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
index 5f7c2c5641c4..1ea7db8eeb98 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
@@ -469,22 +469,27 @@ void dce110_timing_generator_set_drr(
void dce110_timing_generator_set_static_screen_control(
struct timing_generator *tg,
- uint32_t value)
+ uint32_t event_triggers,
+ uint32_t num_frames)
{
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
uint32_t static_screen_cntl = 0;
uint32_t addr = 0;
+ // By register spec, it only takes 8 bit value
+ if (num_frames > 0xFF)
+ num_frames = 0xFF;
+
addr = CRTC_REG(mmCRTC_STATIC_SCREEN_CONTROL);
static_screen_cntl = dm_read_reg(tg->ctx, addr);
set_reg_field_value(static_screen_cntl,
- value,
+ event_triggers,
CRTC_STATIC_SCREEN_CONTROL,
CRTC_STATIC_SCREEN_EVENT_MASK);
set_reg_field_value(static_screen_cntl,
- 2,
+ num_frames,
CRTC_STATIC_SCREEN_CONTROL,
CRTC_STATIC_SCREEN_FRAME_COUNT);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
index 768ccf27ada9..d8a5ed7b485d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
@@ -231,7 +231,8 @@ void dce110_timing_generator_set_drr(
void dce110_timing_generator_set_static_screen_control(
struct timing_generator *tg,
- uint32_t value);
+ uint32_t event_triggers,
+ uint32_t num_frames);
void dce110_timing_generator_get_crtc_scanoutpos(
struct timing_generator *tg,
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
index 098e56962f2a..82bc4e192bbf 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
@@ -819,13 +819,18 @@ void dce120_tg_set_colors(struct timing_generator *tg,
static void dce120_timing_generator_set_static_screen_control(
struct timing_generator *tg,
- uint32_t value)
+ uint32_t event_triggers,
+ uint32_t num_frames)
{
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ // By register spec, it only takes 8 bit value
+ if (num_frames > 0xFF)
+ num_frames = 0xFF;
+
CRTC_REG_UPDATE_2(CRTC0_CRTC_STATIC_SCREEN_CONTROL,
- CRTC_STATIC_SCREEN_EVENT_MASK, value,
- CRTC_STATIC_SCREEN_FRAME_COUNT, 2);
+ CRTC_STATIC_SCREEN_EVENT_MASK, event_triggers,
+ CRTC_STATIC_SCREEN_FRAME_COUNT, num_frames);
}
void dce120_timing_generator_set_test_pattern(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
index 935c892622a0..4d3f7d5e1473 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -88,26 +88,6 @@ enum dscl_mode_sel {
DSCL_MODE_DSCL_BYPASS = 6
};
-static const struct dpp_input_csc_matrix dpp_input_csc_matrix[] = {
- {COLOR_SPACE_SRGB,
- {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
- {COLOR_SPACE_SRGB_LIMITED,
- {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
- {COLOR_SPACE_YCBCR601,
- {0x2cdd, 0x2000, 0, 0xe991, 0xe926, 0x2000, 0xf4fd, 0x10ef,
- 0, 0x2000, 0x38b4, 0xe3a6} },
- {COLOR_SPACE_YCBCR601_LIMITED,
- {0x3353, 0x2568, 0, 0xe400, 0xe5dc, 0x2568, 0xf367, 0x1108,
- 0, 0x2568, 0x40de, 0xdd3a} },
- {COLOR_SPACE_YCBCR709,
- {0x3265, 0x2000, 0, 0xe6ce, 0xf105, 0x2000, 0xfa01, 0xa7d, 0,
- 0x2000, 0x3b61, 0xe24f} },
-
- {COLOR_SPACE_YCBCR709_LIMITED,
- {0x39a6, 0x2568, 0, 0xe0d6, 0xeedd, 0x2568, 0xf925, 0x9a8, 0,
- 0x2568, 0x43ee, 0xdbb2} }
-};
-
static void program_gamut_remap(
struct dcn10_dpp *dpp,
const uint16_t *regval,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index a02c10e23e0d..f36a0d8cedfe 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -930,6 +930,9 @@ static bool hubbub1_get_dcc_compression_cap(struct hubbub *hubbub,
output->grph.rgb.max_compressed_blk_size = 64;
output->grph.rgb.independent_64b_blks = true;
break;
+ default:
+ ASSERT(false);
+ break;
}
output->capable = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 4d1301e5eaf5..31b64733d693 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -810,8 +810,7 @@ static void hubp1_set_vm_context0_settings(struct hubp *hubp,
void min_set_viewport(
struct hubp *hubp,
const struct rect *viewport,
- const struct rect *viewport_c,
- enum dc_rotation_angle rotation)
+ const struct rect *viewport_c)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index e44eaae5033b..780af5b3c16f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -749,9 +749,7 @@ void hubp1_set_blank(struct hubp *hubp, bool blank);
void min_set_viewport(struct hubp *hubp,
const struct rect *viewport,
- const struct rect *viewport_c,
- enum dc_rotation_angle rotation);
-/* rotation angle added for use by hubp21_set_viewport */
+ const struct rect *viewport_c);
void hubp1_clk_cntl(struct hubp *hubp, bool enable);
void hubp1_vtg_sel(struct hubp *hubp, uint32_t otg_inst);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 3996fef56948..1008ac8a0f2a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -479,10 +479,10 @@ void dcn10_enable_power_gating_plane(
struct dce_hwseq *hws,
bool enable)
{
- bool force_on = 1; /* disable power gating */
+ bool force_on = true; /* disable power gating */
if (enable)
- force_on = 0;
+ force_on = false;
/* DCHUBP0/1/2/3 */
REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
@@ -860,6 +860,7 @@ static void dcn10_reset_back_end_for_pipe(
struct dc_state *context)
{
int i;
+ struct dc_link *link;
DC_LOGGER_INIT(dc->ctx->logger);
if (pipe_ctx->stream_res.stream_enc == NULL) {
pipe_ctx->stream = NULL;
@@ -867,8 +868,14 @@ static void dcn10_reset_back_end_for_pipe(
}
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- /* DPMS may already disable */
- if (!pipe_ctx->stream->dpms_off)
+ link = pipe_ctx->stream->link;
+ /* DPMS may already disable or */
+ /* dpms_off status is incorrect due to fastboot
+ * feature. When system resume from S4 with second
+ * screen only, the dpms_off would be true but
+ * VBIOS lit up eDP, so check link status too.
+ */
+ if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
core_link_disable_stream(pipe_ctx);
else if (pipe_ctx->stream_res.audio)
dc->hwss.disable_audio_stream(pipe_ctx);
@@ -1156,7 +1163,8 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
}
}
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ /* num_opp will be equal to number of mpcc */
+ for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
/* Cannot reset the MPC mux if seamless boot */
@@ -1180,8 +1188,14 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
if (can_apply_seamless_boot &&
pipe_ctx->stream != NULL &&
pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
- pipe_ctx->stream_res.tg))
+ pipe_ctx->stream_res.tg)) {
+ // Enable double buffering for OTG_BLANK no matter if
+ // seamless boot is enabled or not to suppress global sync
+ // signals when OTG blanked. This is to prevent pipe from
+ // requesting data while in PSR.
+ tg->funcs->tg_init(tg);
continue;
+ }
/* Disable on the current state so the new one isn't cleared. */
pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -2291,8 +2305,7 @@ static void dcn10_update_dchubp_dpp(
hubp->funcs->mem_program_viewport(
hubp,
&pipe_ctx->plane_res.scl_data.viewport,
- &pipe_ctx->plane_res.scl_data.viewport_c,
- plane_state->rotation);
+ &pipe_ctx->plane_res.scl_data.viewport_c);
}
if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
@@ -2697,6 +2710,8 @@ void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
struct drr_params params = {0};
// DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
unsigned int event_triggers = 0x800;
+ // Note DRR trigger events are generated regardless of whether num frames met.
+ unsigned int num_frames = 2;
params.vertical_total_max = vmax;
params.vertical_total_min = vmin;
@@ -2713,7 +2728,7 @@ void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
if (vmax != 0 && vmin != 0)
pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
pipe_ctx[i]->stream_res.tg,
- event_triggers);
+ event_triggers, num_frames);
}
}
@@ -2730,21 +2745,22 @@ void dcn10_get_position(struct pipe_ctx **pipe_ctx,
}
void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
- int num_pipes, const struct dc_static_screen_events *events)
+ int num_pipes, const struct dc_static_screen_params *params)
{
unsigned int i;
- unsigned int value = 0;
+ unsigned int triggers = 0;
- if (events->surface_update)
- value |= 0x80;
- if (events->cursor_update)
- value |= 0x2;
- if (events->force_trigger)
- value |= 0x1;
+ if (params->triggers.surface_update)
+ triggers |= 0x80;
+ if (params->triggers.cursor_update)
+ triggers |= 0x2;
+ if (params->triggers.force_trigger)
+ triggers |= 0x1;
for (i = 0; i < num_pipes; i++)
pipe_ctx[i]->stream_res.tg->funcs->
- set_static_screen_control(pipe_ctx[i]->stream_res.tg, value);
+ set_static_screen_control(pipe_ctx[i]->stream_res.tg,
+ triggers, params->num_frames);
}
static void dcn10_config_stereo_parameters(
@@ -2895,6 +2911,33 @@ void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
hubbub->funcs->update_dchub(hubbub, dh_data);
}
+static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
+{
+ struct pipe_ctx *test_pipe;
+ const struct rect *r1 = &pipe_ctx->plane_res.scl_data.recout, *r2;
+ int r1_r = r1->x + r1->width, r1_b = r1->y + r1->height, r2_r, r2_b;
+
+ /**
+ * Disable the cursor if there's another pipe above this with a
+ * plane that contains this pipe's viewport to prevent double cursor
+ * and incorrect scaling artifacts.
+ */
+ for (test_pipe = pipe_ctx->top_pipe; test_pipe;
+ test_pipe = test_pipe->top_pipe) {
+ if (!test_pipe->plane_state->visible)
+ continue;
+
+ r2 = &test_pipe->plane_res.scl_data.recout;
+ r2_r = r2->x + r2->width;
+ r2_b = r2->y + r2->height;
+
+ if (r1->x >= r2->x && r1->y >= r2->y && r1_r <= r2_r && r1_b <= r2_b)
+ return true;
+ }
+
+ return false;
+}
+
void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
{
struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
@@ -2909,6 +2952,8 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
.rotation = pipe_ctx->plane_state->rotation,
.mirror = pipe_ctx->plane_state->horizontal_mirror
};
+ bool pipe_split_on = (pipe_ctx->top_pipe != NULL) ||
+ (pipe_ctx->bottom_pipe != NULL);
int x_plane = pipe_ctx->plane_state->dst_rect.x;
int y_plane = pipe_ctx->plane_state->dst_rect.y;
@@ -2938,9 +2983,13 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
pos_cpy.enable = false;
+ if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
+ pos_cpy.enable = false;
+
// Swap axis and mirror horizontally
if (param.rotation == ROTATION_ANGLE_90) {
uint32_t temp_x = pos_cpy.x;
+
pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
(pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
pos_cpy.y = temp_x;
@@ -2948,26 +2997,44 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
// Swap axis and mirror vertically
else if (param.rotation == ROTATION_ANGLE_270) {
uint32_t temp_y = pos_cpy.y;
- if (pos_cpy.x > pipe_ctx->plane_res.scl_data.viewport.height) {
- pos_cpy.x = pos_cpy.x - pipe_ctx->plane_res.scl_data.viewport.height;
- pos_cpy.y = pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.x;
- } else {
- pos_cpy.y = 2 * pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.x;
- }
+ int viewport_height =
+ pipe_ctx->plane_res.scl_data.viewport.height;
+
+ if (pipe_split_on) {
+ if (pos_cpy.x > viewport_height) {
+ pos_cpy.x = pos_cpy.x - viewport_height;
+ pos_cpy.y = viewport_height - pos_cpy.x;
+ } else {
+ pos_cpy.y = 2 * viewport_height - pos_cpy.x;
+ }
+ } else
+ pos_cpy.y = viewport_height - pos_cpy.x;
pos_cpy.x = temp_y;
}
// Mirror horizontally and vertically
else if (param.rotation == ROTATION_ANGLE_180) {
- if (pos_cpy.x >= pipe_ctx->plane_res.scl_data.viewport.width + pipe_ctx->plane_res.scl_data.viewport.x) {
- pos_cpy.x = 2 * pipe_ctx->plane_res.scl_data.viewport.width
- - pos_cpy.x + 2 * pipe_ctx->plane_res.scl_data.viewport.x;
- } else {
- uint32_t temp_x = pos_cpy.x;
- pos_cpy.x = 2 * pipe_ctx->plane_res.scl_data.viewport.x - pos_cpy.x;
- if (temp_x >= pipe_ctx->plane_res.scl_data.viewport.x + (int)hubp->curs_attr.width
- || pos_cpy.x <= (int)hubp->curs_attr.width + pipe_ctx->plane_state->src_rect.x) {
- pos_cpy.x = temp_x + pipe_ctx->plane_res.scl_data.viewport.width;
+ int viewport_width =
+ pipe_ctx->plane_res.scl_data.viewport.width;
+ int viewport_x =
+ pipe_ctx->plane_res.scl_data.viewport.x;
+
+ if (pipe_split_on) {
+ if (pos_cpy.x >= viewport_width + viewport_x) {
+ pos_cpy.x = 2 * viewport_width
+ - pos_cpy.x + 2 * viewport_x;
+ } else {
+ uint32_t temp_x = pos_cpy.x;
+
+ pos_cpy.x = 2 * viewport_x - pos_cpy.x;
+ if (temp_x >= viewport_x +
+ (int)hubp->curs_attr.width || pos_cpy.x
+ <= (int)hubp->curs_attr.width +
+ pipe_ctx->plane_state->src_rect.x) {
+ pos_cpy.x = temp_x + viewport_width;
+ }
}
+ } else {
+ pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
}
pos_cpy.y = pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index 55b8f3b2fc4e..4d20f6586bb5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -132,7 +132,7 @@ void dcn10_get_position(struct pipe_ctx **pipe_ctx,
int num_pipes,
struct crtc_position *position);
void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
- int num_pipes, const struct dc_static_screen_events *events);
+ int num_pipes, const struct dc_static_screen_params *params);
void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc);
void dce110_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
void dcn10_log_hw_state(struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
index 7493a630f4dc..eb13589b9a81 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
@@ -124,6 +124,26 @@ struct dcn10_link_enc_registers {
uint32_t RDPCSTX_PHY_CNTL13;
uint32_t RDPCSTX_PHY_CNTL14;
uint32_t RDPCSTX_PHY_CNTL15;
+ uint32_t RDPCSTX_CNTL;
+ uint32_t RDPCSTX_CLOCK_CNTL;
+ uint32_t RDPCSTX_PHY_CNTL0;
+ uint32_t RDPCSTX_PHY_CNTL2;
+ uint32_t RDPCSTX_PLL_UPDATE_DATA;
+ uint32_t RDPCS_TX_CR_ADDR;
+ uint32_t RDPCS_TX_CR_DATA;
+ uint32_t DPCSTX_TX_CLOCK_CNTL;
+ uint32_t DPCSTX_TX_CNTL;
+ uint32_t RDPCSTX_INTERRUPT_CONTROL;
+ uint32_t RDPCSTX_PHY_FUSE0;
+ uint32_t RDPCSTX_PHY_FUSE1;
+ uint32_t RDPCSTX_PHY_FUSE2;
+ uint32_t RDPCSTX_PHY_FUSE3;
+ uint32_t RDPCSTX_PHY_RX_LD_VAL;
+ uint32_t DPCSTX_DEBUG_CONFIG;
+ uint32_t RDPCSTX_DEBUG_CONFIG;
+ uint32_t RDPCSTX0_RDPCSTX_SCRATCH;
+ uint32_t RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG;
+ uint32_t DCIO_SOFT_RESET;
/* indirect registers */
uint32_t RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2;
uint32_t RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index cd7412dc42d1..a9a43b397db9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -789,21 +789,26 @@ void optc1_set_early_control(
void optc1_set_static_screen_control(
struct timing_generator *optc,
- uint32_t value)
+ uint32_t event_triggers,
+ uint32_t num_frames)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ // By register spec, it only takes 8 bit value
+ if (num_frames > 0xFF)
+ num_frames = 0xFF;
+
/* Bit 8 is no longer applicable in RV for PSR case,
* set bit 8 to 0 if given
*/
- if ((value & STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN)
+ if ((event_triggers & STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN)
!= 0)
- value = value &
+ event_triggers = event_triggers &
~STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN;
REG_SET_2(OTG_STATIC_SCREEN_CONTROL, 0,
- OTG_STATIC_SCREEN_EVENT_MASK, value,
- OTG_STATIC_SCREEN_FRAME_COUNT, 2);
+ OTG_STATIC_SCREEN_EVENT_MASK, event_triggers,
+ OTG_STATIC_SCREEN_FRAME_COUNT, num_frames);
}
void optc1_setup_manual_trigger(struct timing_generator *optc)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index 3afeb1a30f21..f277656d5464 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -625,7 +625,8 @@ void optc1_set_drr(
void optc1_set_static_screen_control(
struct timing_generator *optc,
- uint32_t value);
+ uint32_t event_triggers,
+ uint32_t num_frames);
void optc1_program_stereo(struct timing_generator *optc,
const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
index fd52862d6624..5fcaf78334ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
@@ -9,7 +9,13 @@ DCN20 = dcn20_resource.o dcn20_init.o dcn20_hwseq.o dcn20_dpp.o dcn20_dpp_cm.o d
DCN20 += dcn20_dsc.o
+ifdef CONFIG_X86
CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -msse
+endif
+
+ifdef CONFIG_PPC64
+CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -maltivec
+endif
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
@@ -17,6 +23,7 @@ IS_OLD_GCC = 1
endif
endif
+ifdef CONFIG_X86
ifdef IS_OLD_GCC
# Stack alignment mismatch, proceed with caution.
# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
@@ -25,6 +32,7 @@ CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -mpreferred-stack-boundary=4
else
CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -msse2
endif
+endif
AMD_DAL_DCN20 = $(addprefix $(AMDDALPATH)/dc/dcn20/,$(DCN20))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
index 1e1151356e60..50bffbfdd394 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
@@ -50,20 +50,20 @@ void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
if (dccg->ref_dppclk && req_dppclk) {
int ref_dppclk = dccg->ref_dppclk;
+ int modulo, phase;
- ASSERT(req_dppclk <= ref_dppclk);
- /* need to clamp to 8 bits */
- if (ref_dppclk > 0xff) {
- int divider = (ref_dppclk + 0xfe) / 0xff;
+ // phase / modulo = dpp pipe clk / dpp global clk
+ modulo = 0xff; // use FF at the end
+ phase = ((modulo * req_dppclk) + ref_dppclk - 1) / ref_dppclk;
- ref_dppclk /= divider;
- req_dppclk = (req_dppclk + divider - 1) / divider;
- if (req_dppclk > ref_dppclk)
- req_dppclk = ref_dppclk;
+ if (phase > 0xff) {
+ ASSERT(false);
+ phase = 0xff;
}
+
REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
- DPPCLK0_DTO_PHASE, req_dppclk,
- DPPCLK0_DTO_MODULO, ref_dppclk);
+ DPPCLK0_DTO_PHASE, phase,
+ DPPCLK0_DTO_MODULO, modulo);
REG_UPDATE(DPPCLK_DTO_CTRL,
DPPCLK_DTO_ENABLE[dpp_inst], 1);
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
index 4d7e45892f08..13e057d7ee93 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
@@ -104,7 +104,7 @@ static void dpp2_cnv_setup (
uint32_t pixel_format = 0;
uint32_t alpha_en = 1;
enum dc_color_space color_space = COLOR_SPACE_SRGB;
- enum dcn10_input_csc_select select = INPUT_CSC_SELECT_BYPASS;
+ enum dcn20_input_csc_select select = DCN2_ICSC_SELECT_BYPASS;
bool force_disable_cursor = false;
struct out_csc_color_matrix tbl_entry;
uint32_t is_2bit = 0;
@@ -145,25 +145,25 @@ static void dpp2_cnv_setup (
force_disable_cursor = false;
pixel_format = 65;
color_space = COLOR_SPACE_YCBCR709;
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
force_disable_cursor = true;
pixel_format = 64;
color_space = COLOR_SPACE_YCBCR709;
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
force_disable_cursor = true;
pixel_format = 67;
color_space = COLOR_SPACE_YCBCR709;
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
force_disable_cursor = true;
pixel_format = 66;
color_space = COLOR_SPACE_YCBCR709;
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
pixel_format = 22;
@@ -177,7 +177,7 @@ static void dpp2_cnv_setup (
case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888:
pixel_format = 12;
color_space = COLOR_SPACE_YCBCR709;
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
pixel_format = 112;
@@ -188,13 +188,13 @@ static void dpp2_cnv_setup (
case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010:
pixel_format = 114;
color_space = COLOR_SPACE_YCBCR709;
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
is_2bit = 1;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102:
pixel_format = 115;
color_space = COLOR_SPACE_YCBCR709;
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
is_2bit = 1;
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT:
@@ -227,13 +227,13 @@ static void dpp2_cnv_setup (
tbl_entry.color_space = input_color_space;
if (color_space >= COLOR_SPACE_YCBCR601)
- select = INPUT_CSC_SELECT_ICSC;
+ select = DCN2_ICSC_SELECT_ICSC_A;
else
- select = INPUT_CSC_SELECT_BYPASS;
+ select = DCN2_ICSC_SELECT_BYPASS;
- dpp1_program_input_csc(dpp_base, color_space, select, &tbl_entry);
+ dpp2_program_input_csc(dpp_base, color_space, select, &tbl_entry);
} else
- dpp1_program_input_csc(dpp_base, color_space, select, NULL);
+ dpp2_program_input_csc(dpp_base, color_space, select, NULL);
if (force_disable_cursor) {
REG_UPDATE(CURSOR_CONTROL,
@@ -458,7 +458,7 @@ static struct dpp_funcs dcn20_dpp_funcs = {
.dpp_reset = dpp_reset,
.dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
.dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps,
- .dpp_set_gamut_remap = dpp1_cm_set_gamut_remap,
+ .dpp_set_gamut_remap = dpp2_cm_set_gamut_remap,
.dpp_set_csc_adjustment = NULL,
.dpp_set_csc_default = NULL,
.dpp_program_regamma_pwl = oppn20_dummy_program_regamma_pwl,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
index 5b03b737b1d6..27610251c57f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
@@ -150,6 +150,16 @@
SRI(CM_SHAPER_RAMA_REGION_32_33, CM, id), \
SRI(CM_SHAPER_LUT_INDEX, CM, id)
+#define TF_REG_LIST_DCN20_COMMON_APPEND(id) \
+ SRI(CM_GAMUT_REMAP_B_C11_C12, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C13_C14, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C21_C22, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C23_C24, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C31_C32, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C33_C34, CM, id),\
+ SRI(CM_ICSC_B_C11_C12, CM, id), \
+ SRI(CM_ICSC_B_C33_C34, CM, id)
+
#define TF_REG_LIST_DCN20(id) \
TF_REG_LIST_DCN(id), \
TF_REG_LIST_DCN20_COMMON(id), \
@@ -572,10 +582,29 @@
TF_SF(DSCL0_OBUF_MEM_PWR_CTRL, OBUF_MEM_PWR_FORCE, mask_sh),\
TF_SF(DSCL0_DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, mask_sh)
+/* DPP CM debug status register:
+ *
+ * Status index including current ICSC, Gamut Remap Mode is 9
+ * ICSC Mode: [4..3]
+ * Gamut Remap Mode: [10..9]
+ */
+#define CM_TEST_DEBUG_DATA_STATUS_IDX 9
+
+#define TF_DEBUG_REG_LIST_SH_DCN20 \
+ TF_DEBUG_REG_LIST_SH_DCN10, \
+ .CM_TEST_DEBUG_DATA_ICSC_MODE = 3, \
+ .CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE = 9
+
+#define TF_DEBUG_REG_LIST_MASK_DCN20 \
+ TF_DEBUG_REG_LIST_MASK_DCN10, \
+ .CM_TEST_DEBUG_DATA_ICSC_MODE = 0x18, \
+ .CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE = 0x600
#define TF_REG_FIELD_LIST_DCN2_0(type) \
TF_REG_FIELD_LIST(type) \
type CM_BLNDGAM_LUT_DATA; \
+ type CM_TEST_DEBUG_DATA_ICSC_MODE; \
+ type CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE; \
type FORMAT_CNV16; \
type CNVC_BYPASS_MSB_ALIGN; \
type CLAMP_POSITIVE; \
@@ -630,11 +659,22 @@ struct dcn2_dpp_mask {
uint32_t COLOR_KEYER_RED; \
uint32_t COLOR_KEYER_GREEN; \
uint32_t COLOR_KEYER_BLUE; \
- uint32_t OBUF_MEM_PWR_CTRL;\
+ uint32_t OBUF_MEM_PWR_CTRL; \
uint32_t DSCL_MEM_PWR_CTRL
+#define DPP_DCN2_REG_VARIABLE_LIST_CM_APPEND \
+ uint32_t CM_GAMUT_REMAP_B_C11_C12; \
+ uint32_t CM_GAMUT_REMAP_B_C13_C14; \
+ uint32_t CM_GAMUT_REMAP_B_C21_C22; \
+ uint32_t CM_GAMUT_REMAP_B_C23_C24; \
+ uint32_t CM_GAMUT_REMAP_B_C31_C32; \
+ uint32_t CM_GAMUT_REMAP_B_C33_C34; \
+ uint32_t CM_ICSC_B_C11_C12; \
+ uint32_t CM_ICSC_B_C33_C34
+
struct dcn2_dpp_registers {
DPP_DCN2_REG_VARIABLE_LIST;
+ DPP_DCN2_REG_VARIABLE_LIST_CM_APPEND;
};
struct dcn20_dpp {
@@ -656,6 +696,18 @@ struct dcn20_dpp {
struct pwl_params pwl_data;
};
+enum dcn20_input_csc_select {
+ DCN2_ICSC_SELECT_BYPASS = 0,
+ DCN2_ICSC_SELECT_ICSC_A = 1,
+ DCN2_ICSC_SELECT_ICSC_B = 2
+};
+
+enum dcn20_gamut_remap_select {
+ DCN2_GAMUT_REMAP_BYPASS = 0,
+ DCN2_GAMUT_REMAP_COEF_A = 1,
+ DCN2_GAMUT_REMAP_COEF_B = 2
+};
+
void dpp20_read_state(struct dpp *dpp_base,
struct dcn_dpp_state *s);
@@ -667,6 +719,16 @@ void dpp2_set_degamma(
struct dpp *dpp_base,
enum ipp_degamma_mode mode);
+void dpp2_cm_set_gamut_remap(
+ struct dpp *dpp_base,
+ const struct dpp_grph_csc_adjustment *adjust);
+
+void dpp2_program_input_csc(
+ struct dpp *dpp_base,
+ enum dc_color_space color_space,
+ enum dcn20_input_csc_select input_select,
+ const struct out_csc_color_matrix *tbl_entry);
+
bool dpp20_program_blnd_lut(
struct dpp *dpp_base, const struct pwl_params *params);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
index 05a3e7f97ef0..8dc3d1f73984 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
@@ -36,6 +36,9 @@
#define REG(reg)\
dpp->tf_regs->reg
+#define IND_REG(index) \
+ (index)
+
#define CTX \
dpp->base.ctx
@@ -44,9 +47,6 @@
dpp->tf_shift->field_name, dpp->tf_mask->field_name
-
-
-
static void dpp2_enable_cm_block(
struct dpp *dpp_base)
{
@@ -158,6 +158,155 @@ void dpp2_set_degamma(
}
}
+static void program_gamut_remap(
+ struct dcn20_dpp *dpp,
+ const uint16_t *regval,
+ enum dcn20_gamut_remap_select select)
+{
+ uint32_t cur_select = 0;
+ struct color_matrices_reg gam_regs;
+
+ if (regval == NULL || select == DCN2_GAMUT_REMAP_BYPASS) {
+ REG_SET(CM_GAMUT_REMAP_CONTROL, 0,
+ CM_GAMUT_REMAP_MODE, 0);
+ return;
+ }
+
+ /* determine which gamut_remap coefficients (A or B) we are using
+ * currently. select the alternate set to double buffer
+ * the update so gamut_remap is updated on frame boundary
+ */
+ IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA,
+ CM_TEST_DEBUG_DATA_STATUS_IDX,
+ CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE, &cur_select);
+
+ /* value stored in dbg reg will be 1 greater than mode we want */
+ if (cur_select != DCN2_GAMUT_REMAP_COEF_A)
+ select = DCN2_GAMUT_REMAP_COEF_A;
+ else
+ select = DCN2_GAMUT_REMAP_COEF_B;
+
+ gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
+ gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11;
+ gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
+ gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
+
+ if (select == DCN2_GAMUT_REMAP_COEF_A) {
+ gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
+ } else {
+ gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34);
+ }
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+
+ REG_SET(
+ CM_GAMUT_REMAP_CONTROL, 0,
+ CM_GAMUT_REMAP_MODE, select);
+
+}
+
+void dpp2_cm_set_gamut_remap(
+ struct dpp *dpp_base,
+ const struct dpp_grph_csc_adjustment *adjust)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+ int i = 0;
+
+ if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW)
+ /* Bypass if type is bypass or hw */
+ program_gamut_remap(dpp, NULL, DCN2_GAMUT_REMAP_BYPASS);
+ else {
+ struct fixed31_32 arr_matrix[12];
+ uint16_t arr_reg_val[12];
+
+ for (i = 0; i < 12; i++)
+ arr_matrix[i] = adjust->temperature_matrix[i];
+
+ convert_float_matrix(
+ arr_reg_val, arr_matrix, 12);
+
+ program_gamut_remap(dpp, arr_reg_val, DCN2_GAMUT_REMAP_COEF_A);
+ }
+}
+
+void dpp2_program_input_csc(
+ struct dpp *dpp_base,
+ enum dc_color_space color_space,
+ enum dcn20_input_csc_select input_select,
+ const struct out_csc_color_matrix *tbl_entry)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+ int i;
+ int arr_size = sizeof(dpp_input_csc_matrix)/sizeof(struct dpp_input_csc_matrix);
+ const uint16_t *regval = NULL;
+ uint32_t cur_select = 0;
+ enum dcn20_input_csc_select select;
+ struct color_matrices_reg icsc_regs;
+
+ if (input_select == DCN2_ICSC_SELECT_BYPASS) {
+ REG_SET(CM_ICSC_CONTROL, 0, CM_ICSC_MODE, 0);
+ return;
+ }
+
+ if (tbl_entry == NULL) {
+ for (i = 0; i < arr_size; i++)
+ if (dpp_input_csc_matrix[i].color_space == color_space) {
+ regval = dpp_input_csc_matrix[i].regval;
+ break;
+ }
+
+ if (regval == NULL) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ } else {
+ regval = tbl_entry->regval;
+ }
+
+ /* determine which CSC coefficients (A or B) we are using
+ * currently. select the alternate set to double buffer
+ * the CSC update so CSC is updated on frame boundary
+ */
+ IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA,
+ CM_TEST_DEBUG_DATA_STATUS_IDX,
+ CM_TEST_DEBUG_DATA_ICSC_MODE, &cur_select);
+
+ if (cur_select != DCN2_ICSC_SELECT_ICSC_A)
+ select = DCN2_ICSC_SELECT_ICSC_A;
+ else
+ select = DCN2_ICSC_SELECT_ICSC_B;
+
+ icsc_regs.shifts.csc_c11 = dpp->tf_shift->CM_ICSC_C11;
+ icsc_regs.masks.csc_c11 = dpp->tf_mask->CM_ICSC_C11;
+ icsc_regs.shifts.csc_c12 = dpp->tf_shift->CM_ICSC_C12;
+ icsc_regs.masks.csc_c12 = dpp->tf_mask->CM_ICSC_C12;
+
+ if (select == DCN2_ICSC_SELECT_ICSC_A) {
+
+ icsc_regs.csc_c11_c12 = REG(CM_ICSC_C11_C12);
+ icsc_regs.csc_c33_c34 = REG(CM_ICSC_C33_C34);
+
+ } else {
+
+ icsc_regs.csc_c11_c12 = REG(CM_ICSC_B_C11_C12);
+ icsc_regs.csc_c33_c34 = REG(CM_ICSC_B_C33_C34);
+
+ }
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &icsc_regs);
+
+ REG_SET(CM_ICSC_CONTROL, 0,
+ CM_ICSC_MODE, select);
+}
+
static void dpp20_power_on_blnd_lut(
struct dpp *dpp_base,
bool power_on)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
index 0111545dac75..6bdfee20b6a7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
@@ -206,6 +206,9 @@ static bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const str
struct dsc_reg_values dsc_reg_vals;
struct dsc_optc_config dsc_optc_cfg;
+ memset(&dsc_reg_vals, 0, sizeof(dsc_reg_vals));
+ memset(&dsc_optc_cfg, 0, sizeof(dsc_optc_cfg));
+
DC_LOG_DSC("Getting packed DSC PPS for DSC Config:");
dsc_config_log(dsc, dsc_cfg);
DC_LOG_DSC("DSC Picture Parameter Set (PPS):");
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
index 8b8438566101..9235f7d29454 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
@@ -293,6 +293,9 @@ bool hubbub2_get_dcc_compression_cap(struct hubbub *hubbub,
output->grph.rgb.max_compressed_blk_size = 64;
output->grph.rgb.independent_64b_blks = true;
break;
+ default:
+ ASSERT(false);
+ break;
}
output->capable = true;
output->const_color_support = true;
@@ -601,7 +604,8 @@ static const struct hubbub_funcs hubbub2_funcs = {
.wm_read_state = hubbub2_wm_read_state,
.get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
.program_watermarks = hubbub2_program_watermarks,
- .allow_self_refresh_control = hubbub1_allow_self_refresh_control
+ .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
+ .allow_self_refresh_control = hubbub1_allow_self_refresh_control,
};
void hubbub2_construct(struct dcn20_hubbub *hubbub,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 32878a65bdd7..a444fed94184 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -183,10 +183,10 @@ void dcn20_enable_power_gating_plane(
struct dce_hwseq *hws,
bool enable)
{
- bool force_on = 1; /* disable power gating */
+ bool force_on = true; /* disable power gating */
if (enable)
- force_on = 0;
+ force_on = false;
/* DCHUBP0/1/2/3/4/5 */
REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
@@ -572,7 +572,6 @@ void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
dpp->funcs->dpp_dppclk_control(dpp, false, false);
hubp->power_gated = true;
- dc->optimized_required = false; /* We're powering off, no need to optimize */
hws->funcs.plane_atomic_power_down(dc,
pipe_ctx->plane_res.dpp,
@@ -686,9 +685,13 @@ enum dc_status dcn20_enable_stream_timing(
// DRR should set trigger event to monitor surface update event
if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0)
event_triggers = 0x80;
+ /* Event triggers and num frames initialized for DRR, but can be
+ * later updated for PSR use. Note DRR trigger events are generated
+ * regardless of whether num frames met.
+ */
if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control)
pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
- pipe_ctx->stream_res.tg, event_triggers);
+ pipe_ctx->stream_res.tg, event_triggers, 2);
/* TODO program crtc source select for non-virtual signal*/
/* TODO program FMT */
@@ -941,6 +944,9 @@ void dcn20_blank_pixel_data(
int width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
int height = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
+ if (stream->link->test_pattern_enabled)
+ return;
+
/* get opp dpg blank color */
color_space_to_black_color(dc, color_space, &black_color);
@@ -1305,6 +1311,7 @@ static void dcn20_update_dchubp_dpp(
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct dpp *dpp = pipe_ctx->plane_res.dpp;
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ bool viewport_changed = false;
if (pipe_ctx->update_flags.bits.dppclk)
dpp->funcs->dpp_dppclk_control(dpp, false, true);
@@ -1355,9 +1362,9 @@ static void dcn20_update_dchubp_dpp(
|| plane_state->update_flags.bits.global_alpha_change
|| plane_state->update_flags.bits.per_pixel_alpha_change) {
// MPCC inst is equal to pipe index in practice
- int mpcc_inst = pipe_ctx->pipe_idx;
+ int mpcc_inst = hubp->inst;
int opp_inst;
- int opp_count = dc->res_pool->res_cap->num_opp;
+ int opp_count = dc->res_pool->pipe_count;
for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) {
@@ -1383,15 +1390,18 @@ static void dcn20_update_dchubp_dpp(
if (pipe_ctx->update_flags.bits.viewport ||
(context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
- (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling))
+ (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) {
+
hubp->funcs->mem_program_viewport(
hubp,
&pipe_ctx->plane_res.scl_data.viewport,
- &pipe_ctx->plane_res.scl_data.viewport_c,
- plane_state->rotation);
+ &pipe_ctx->plane_res.scl_data.viewport_c);
+ viewport_changed = true;
+ }
/* Any updates are handled in dc interface, just need to apply existing for plane enable */
- if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed)
+ if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
+ pipe_ctx->update_flags.bits.scaler || pipe_ctx->update_flags.bits.viewport)
&& pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
dc->hwss.set_cursor_position(pipe_ctx);
dc->hwss.set_cursor_attribute(pipe_ctx);
@@ -1441,9 +1451,14 @@ static void dcn20_update_dchubp_dpp(
hubp->power_gated = false;
}
+ if (hubp->funcs->apply_PLAT_54186_wa && viewport_changed)
+ hubp->funcs->apply_PLAT_54186_wa(hubp, &plane_state->address);
+
if (pipe_ctx->update_flags.bits.enable || plane_state->update_flags.bits.addr_update)
hws->funcs.update_plane_addr(dc, pipe_ctx);
+
+
if (pipe_ctx->update_flags.bits.enable)
hubp->funcs->set_blank(hubp, false);
}
@@ -1629,9 +1644,9 @@ void dcn20_program_front_end_for_ctx(
struct hubp *hubp = pipe->plane_res.hubp;
int j = 0;
- for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS
+ for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS*1000
&& hubp->funcs->hubp_is_flip_pending(hubp); j++)
- msleep(1);
+ mdelay(1);
}
}
@@ -1731,7 +1746,6 @@ bool dcn20_update_bandwidth(
void dcn20_enable_writeback(
struct dc *dc,
- const struct dc_stream_status *stream_status,
struct dc_writeback_info *wb_info,
struct dc_state *context)
{
@@ -1745,8 +1759,7 @@ void dcn20_enable_writeback(
mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
/* set the OPTC source mux */
- ASSERT(stream_status->primary_otg_inst < MAX_PIPES);
- optc = dc->res_pool->timing_generators[stream_status->primary_otg_inst];
+ optc = dc->res_pool->timing_generators[dwb->otg_inst];
optc->funcs->set_dwb_source(optc, wb_info->dwb_pipe_inst);
/* set MCIF_WB buffer and arbitration configuration */
mcif_wb->funcs->config_mcif_buf(mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height);
@@ -1995,6 +2008,7 @@ static void dcn20_reset_back_end_for_pipe(
struct dc_state *context)
{
int i;
+ struct dc_link *link;
DC_LOGGER_INIT(dc->ctx->logger);
if (pipe_ctx->stream_res.stream_enc == NULL) {
pipe_ctx->stream = NULL;
@@ -2002,8 +2016,14 @@ static void dcn20_reset_back_end_for_pipe(
}
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- /* DPMS may already disable */
- if (!pipe_ctx->stream->dpms_off)
+ link = pipe_ctx->stream->link;
+ /* DPMS may already disable or */
+ /* dpms_off status is incorrect due to fastboot
+ * feature. When system resume from S4 with second
+ * screen only, the dpms_off would be true but
+ * VBIOS lit up eDP, so check link status too.
+ */
+ if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
core_link_disable_stream(pipe_ctx);
else if (pipe_ctx->stream_res.audio)
dc->hwss.disable_audio_stream(pipe_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
index eecd7a26ec4c..02c9be5ebd47 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
@@ -104,7 +104,6 @@ void dcn20_program_triple_buffer(
bool enable_triple_buffer);
void dcn20_enable_writeback(
struct dc *dc,
- const struct dc_stream_status *stream_status,
struct dc_writeback_info *wb_info,
struct dc_state *context);
void dcn20_disable_writeback(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
index 62dfd34c69f1..8cab8107fd94 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
@@ -33,7 +33,142 @@
SRI(AUX_DPHY_TX_CONTROL, DP_AUX, id)
#define UNIPHY_MASK_SH_LIST(mask_sh)\
- LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_LINK_ENABLE, mask_sh)
+ LE_SF(SYMCLKA_CLOCK_ENABLE, SYMCLKA_CLOCK_ENABLE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_LINK_ENABLE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL0_XBAR_SOURCE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL1_XBAR_SOURCE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL2_XBAR_SOURCE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL3_XBAR_SOURCE, mask_sh)
+
+#define DPCS_MASK_SH_LIST(mask_sh)\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_CLK_RDY, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_DATA_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_CLK_RDY, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_DATA_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_CLK_RDY, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_DATA_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_CLK_RDY, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_DATA_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX0_TERM_CTRL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX1_TERM_CTRL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX2_TERM_CTRL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX3_TERM_CTRL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_DP_MPLLB_MULTIPLIER, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX0_WIDTH, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX0_RATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX1_WIDTH, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX1_RATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX2_PSTATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX3_PSTATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX2_MPLL_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX3_MPLL_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_QUOT, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_DEN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL8, RDPCS_PHY_DP_MPLLB_SSC_PEAK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL9, RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL9, RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL10, RDPCS_PHY_DP_MPLLB_FRACN_REM, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_DP_REF_CLK_MPLLB_DIV, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_HDMI_MPLLB_HDMI_DIV, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_SSC_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_TX_CLK_DIV, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_STATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL13, RDPCS_PHY_DP_MPLLB_DIV_CLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL13, RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL14, RDPCS_PHY_DP_MPLLB_FRACN_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL14, RDPCS_PHY_DP_MPLLB_PMIX_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE0_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE1_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE2_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE3_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_RD_START_DELAY, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_EXT_REFCLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SRAMCLK_BYPASS, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SRAMCLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SRAMCLK_CLOCK_ON, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SYMCLK_DIV2_CLOCK_ON, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SYMCLK_DIV2_GATE_DIS, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SYMCLK_DIV2_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_DISABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_DISABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_DISABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_DISABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_REQ, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_REQ, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_REQ, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_REQ, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_CR_MUX_SEL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_REF_RANGE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_SRAM_BYPASS, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_SRAM_EXT_LD_DONE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_HDMIMODE_ENABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_SRAM_INIT_DONE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DP4_POR, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA, RDPCS_PLL_UPDATE_DATA, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_REG_FIFO_ERROR_MASK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_TX_FIFO_ERROR_MASK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_DPALT_DISABLE_TOGGLE_MASK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_DPALT_4LANE_TOGGLE_MASK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCS_TX_CR_ADDR, RDPCS_TX_CR_ADDR, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCS_TX_CR_DATA, RDPCS_TX_CR_DATA, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_MPLLB_V2I, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_POST, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_MPLLB_FREQ_VCO, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_MPLLB_CP_INT, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_MPLLB_CP_PROP, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_POST, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_POST, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_FINETUNE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_RANGE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_POST, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CLOCK_CNTL, DPCS_SYMCLK_CLOCK_ON, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CLOCK_CNTL, DPCS_SYMCLK_GATE_DIS, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CLOCK_CNTL, DPCS_SYMCLK_EN, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_SWAP, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_ORDER_INVERT, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_EN, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_RD_START_DELAY, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_DEBUG_CONFIG, DPCS_DBG_CBUS_DIS, mask_sh)
+
+#define DPCS_DCN2_MASK_SH_LIST(mask_sh)\
+ DPCS_MASK_SH_LIST(mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL, RDPCS_PHY_RX_REF_LD_VAL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL, RDPCS_PHY_RX_VCO_LD_VAL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX0_PSTATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX1_PSTATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX0_MPLL_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX1_MPLL_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_REF_CLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX2_WIDTH, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX2_RATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX3_WIDTH, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX3_RATE, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYA_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYB_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYC_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYD_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYE_SOFT_RESET, mask_sh)
#define LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh)\
LINK_ENCODER_MASK_SH_LIST_DCN10(mask_sh),\
@@ -63,6 +198,49 @@
SRI(CLOCK_ENABLE, SYMCLK, id), \
SRI(CHANNEL_XBAR_CNTL, UNIPHY, id)
+#define DPCS_DCN2_CMN_REG_LIST(id) \
+ SRI(DIG_LANE_ENABLE, DIG, id), \
+ SRI(TMDS_CTL_BITS, DIG, id), \
+ SRI(RDPCSTX_PHY_CNTL3, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL4, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL5, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL6, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL7, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL8, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL9, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL10, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL11, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL12, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL13, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL14, RDPCSTX, id), \
+ SRI(RDPCSTX_CNTL, RDPCSTX, id), \
+ SRI(RDPCSTX_CLOCK_CNTL, RDPCSTX, id), \
+ SRI(RDPCSTX_INTERRUPT_CONTROL, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL0, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL2, RDPCSTX, id), \
+ SRI(RDPCSTX_PLL_UPDATE_DATA, RDPCSTX, id), \
+ SRI(RDPCS_TX_CR_ADDR, RDPCSTX, id), \
+ SRI(RDPCS_TX_CR_DATA, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE0, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE1, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE2, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \
+ SRI(DPCSTX_TX_CLOCK_CNTL, DPCSTX, id), \
+ SRI(DPCSTX_TX_CNTL, DPCSTX, id), \
+ SRI(DPCSTX_DEBUG_CONFIG, DPCSTX, id), \
+ SRI(RDPCSTX_DEBUG_CONFIG, RDPCSTX, id), \
+ SR(RDPCSTX0_RDPCSTX_SCRATCH)
+
+
+#define DPCS_DCN2_REG_LIST(id) \
+ DPCS_DCN2_CMN_REG_LIST(id), \
+ SRI(RDPCSTX_PHY_RX_LD_VAL, RDPCSTX, id),\
+ SRI(RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCSTX, id)
+
+#define LE_DCN2_REG_LIST(id) \
+ LE_DCN10_REG_LIST(id), \
+ SR(DCIO_SOFT_RESET)
+
struct mpll_cfg {
uint32_t mpllb_ana_v2i;
uint32_t mpllb_ana_freq_vco;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
index f90031ed58a6..de9c857ab3e9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
@@ -33,6 +33,9 @@
#define REG(reg)\
mpc20->mpc_regs->reg
+#define IND_REG(index) \
+ (index)
+
#define CTX \
mpc20->base.ctx
@@ -132,19 +135,33 @@ void mpc2_set_output_csc(
const uint16_t *regval,
enum mpc_output_csc_mode ocsc_mode)
{
+ uint32_t cur_mode;
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
struct color_matrices_reg ocsc_regs;
- REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
-
- if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE)
+ if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) {
+ REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
return;
+ }
if (regval == NULL) {
BREAK_TO_DEBUGGER();
return;
}
+ /* determine which CSC coefficients (A or B) we are using
+ * currently. select the alternate set to double buffer
+ * the CSC update so CSC is updated on frame boundary
+ */
+ IX_REG_GET(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_DATA,
+ MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX,
+ MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE, &cur_mode);
+
+ if (cur_mode != MPC_OUTPUT_CSC_COEF_A)
+ ocsc_mode = MPC_OUTPUT_CSC_COEF_A;
+ else
+ ocsc_mode = MPC_OUTPUT_CSC_COEF_B;
+
ocsc_regs.shifts.csc_c11 = mpc20->mpc_shift->MPC_OCSC_C11_A;
ocsc_regs.masks.csc_c11 = mpc20->mpc_mask->MPC_OCSC_C11_A;
ocsc_regs.shifts.csc_c12 = mpc20->mpc_shift->MPC_OCSC_C12_A;
@@ -157,10 +174,13 @@ void mpc2_set_output_csc(
ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_B[opp_id]);
ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_B[opp_id]);
}
+
cm_helper_program_color_matrices(
mpc20->base.ctx,
regval,
&ocsc_regs);
+
+ REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
}
void mpc2_set_ocsc_default(
@@ -169,14 +189,16 @@ void mpc2_set_ocsc_default(
enum dc_color_space color_space,
enum mpc_output_csc_mode ocsc_mode)
{
+ uint32_t cur_mode;
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
uint32_t arr_size;
struct color_matrices_reg ocsc_regs;
const uint16_t *regval = NULL;
- REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
- if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE)
+ if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) {
+ REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
return;
+ }
regval = find_color_matrix(color_space, &arr_size);
@@ -185,6 +207,19 @@ void mpc2_set_ocsc_default(
return;
}
+ /* determine which CSC coefficients (A or B) we are using
+ * currently. select the alternate set to double buffer
+ * the CSC update so CSC is updated on frame boundary
+ */
+ IX_REG_GET(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_DATA,
+ MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX,
+ MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE, &cur_mode);
+
+ if (cur_mode != MPC_OUTPUT_CSC_COEF_A)
+ ocsc_mode = MPC_OUTPUT_CSC_COEF_A;
+ else
+ ocsc_mode = MPC_OUTPUT_CSC_COEF_B;
+
ocsc_regs.shifts.csc_c11 = mpc20->mpc_shift->MPC_OCSC_C11_A;
ocsc_regs.masks.csc_c11 = mpc20->mpc_mask->MPC_OCSC_C11_A;
ocsc_regs.shifts.csc_c12 = mpc20->mpc_shift->MPC_OCSC_C12_A;
@@ -203,6 +238,8 @@ void mpc2_set_ocsc_default(
mpc20->base.ctx,
regval,
&ocsc_regs);
+
+ REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
}
static void mpc2_ogam_get_reg_field(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h
index 9f53192da2dc..c78fd5123497 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h
@@ -80,6 +80,10 @@
SRII(DENORM_CLAMP_G_Y, MPC_OUT, inst),\
SRII(DENORM_CLAMP_B_CB, MPC_OUT, inst)
+#define MPC_DBG_REG_LIST_DCN2_0() \
+ SR(MPC_OCSC_TEST_DEBUG_DATA),\
+ SR(MPC_OCSC_TEST_DEBUG_INDEX)
+
#define MPC_REG_VARIABLE_LIST_DCN2_0 \
MPC_COMMON_REG_VARIABLE_LIST \
uint32_t MPCC_TOP_GAIN[MAX_MPCC]; \
@@ -118,6 +122,8 @@
uint32_t MPCC_OGAM_LUT_RAM_CONTROL[MAX_MPCC];\
uint32_t MPCC_OGAM_LUT_DATA[MAX_MPCC];\
uint32_t MPCC_OGAM_MODE[MAX_MPCC];\
+ uint32_t MPC_OCSC_TEST_DEBUG_DATA;\
+ uint32_t MPC_OCSC_TEST_DEBUG_INDEX;\
uint32_t CSC_MODE[MAX_OPP]; \
uint32_t CSC_C11_C12_A[MAX_OPP]; \
uint32_t CSC_C33_C34_A[MAX_OPP]; \
@@ -134,6 +140,7 @@
SF(MPCC0_MPCC_TOP_GAIN, MPCC_TOP_GAIN, mask_sh),\
SF(MPCC0_MPCC_BOT_GAIN_INSIDE, MPCC_BOT_GAIN_INSIDE, mask_sh),\
SF(MPCC0_MPCC_BOT_GAIN_OUTSIDE, MPCC_BOT_GAIN_OUTSIDE, mask_sh),\
+ SF(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_INDEX, mask_sh),\
SF(MPC_OUT0_CSC_MODE, MPC_OCSC_MODE, mask_sh),\
SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C11_A, mask_sh),\
SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C12_A, mask_sh),\
@@ -174,6 +181,19 @@
SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MAX_B_CB, mask_sh),\
SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh)
+/*
+ * DCN2 MPC_OCSC debug status register:
+ *
+ * Status index including current OCSC Mode is 1
+ * OCSC Mode: [1..0]
+ */
+#define MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX 1
+
+#define MPC_DEBUG_REG_LIST_SH_DCN20 \
+ .MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE = 0
+
+#define MPC_DEBUG_REG_LIST_MASK_DCN20 \
+ .MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE = 0x3
#define MPC_REG_FIELD_LIST_DCN2_0(type) \
MPC_REG_FIELD_LIST(type)\
@@ -182,6 +202,8 @@
type MPCC_TOP_GAIN;\
type MPCC_BOT_GAIN_INSIDE;\
type MPCC_BOT_GAIN_OUTSIDE;\
+ type MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE;\
+ type MPC_OCSC_TEST_DEBUG_INDEX;\
type MPC_OCSC_MODE;\
type MPC_OCSC_C11_A;\
type MPC_OCSC_C12_A;\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
index 673c83e2afd4..d875b0c38fde 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
@@ -236,12 +236,13 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c
struct dc_crtc_timing *timing)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
- /* 2 pieces of memory required for up to 5120 displays, 4 for up to 8192 */
int mpcc_hactive = (timing->h_addressable + timing->h_border_left + timing->h_border_right)
/ opp_cnt;
- int memory_mask = mpcc_hactive <= 2560 ? 0x3 : 0xf;
+ uint32_t memory_mask;
uint32_t data_fmt = 0;
+ ASSERT(opp_cnt == 2);
+
/* TODO: In pseudocode but does not affect maximus, delete comment if we dont need on asic
* REG_SET(OTG_GLOBAL_CONTROL2, 0, GLOBAL_UPDATE_LOCK_EN, 1);
* Program OTG register MASTER_UPDATE_LOCK_DB_X/Y to the position before DP frame start
@@ -249,9 +250,17 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c
* MASTER_UPDATE_LOCK_DB_X, 160,
* MASTER_UPDATE_LOCK_DB_Y, 240);
*/
+
+ /* 2 pieces of memory required for up to 5120 displays, 4 for up to 8192,
+ * however, for ODM combine we can simplify by always using 4.
+ * To make sure there's no overlap, each instance "reserves" 2 memories and
+ * they are uniquely combined here.
+ */
+ memory_mask = 0x3 << (opp_id[0] * 2) | 0x3 << (opp_id[1] * 2);
+
if (REG(OPTC_MEMORY_CONFIG))
REG_SET(OPTC_MEMORY_CONFIG, 0,
- OPTC_MEM_SEL, memory_mask << (optc->inst * 4));
+ OPTC_MEM_SEL, memory_mask);
if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
data_fmt = 1;
@@ -260,7 +269,6 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c
REG_UPDATE(OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, data_fmt);
- ASSERT(opp_cnt == 2);
REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0,
OPTC_NUM_OF_INPUT_SEGMENT, 1,
OPTC_SEG0_SRC_SEL, opp_id[0],
@@ -382,14 +390,8 @@ void optc2_setup_manual_trigger(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
- REG_SET(OTG_MANUAL_FLOW_CONTROL, 0,
- MANUAL_FLOW_CONTROL, 1);
-
- REG_SET(OTG_GLOBAL_CONTROL2, 0,
- MANUAL_FLOW_CONTROL_SEL, optc->inst);
-
REG_SET_8(OTG_TRIGA_CNTL, 0,
- OTG_TRIGA_SOURCE_SELECT, 22,
+ OTG_TRIGA_SOURCE_SELECT, 21,
OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst,
OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1,
OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 0,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
index ac93fbfaee03..239cc40ae474 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
@@ -106,6 +106,7 @@ void optc2_triplebuffer_lock(struct timing_generator *optc);
void optc2_triplebuffer_unlock(struct timing_generator *optc);
void optc2_lock_doublebuffer_disable(struct timing_generator *optc);
void optc2_lock_doublebuffer_enable(struct timing_generator *optc);
+void optc2_setup_manual_trigger(struct timing_generator *optc);
void optc2_program_manual_trigger(struct timing_generator *optc);
bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing);
#endif /* __DC_OPTC_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index cfc69919ef9e..85f90f3e24cb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -1,5 +1,6 @@
/*
* Copyright 2016 Advanced Micro Devices, Inc.
+ * Copyright 2019 Raptor Engineering, LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -65,6 +66,8 @@
#include "dcn/dcn_2_0_0_offset.h"
#include "dcn/dcn_2_0_0_sh_mask.h"
+#include "dpcs/dpcs_2_0_0_offset.h"
+#include "dpcs/dpcs_2_0_0_sh_mask.h"
#include "nbio/nbio_2_3_offset.h"
@@ -548,6 +551,7 @@ static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
[id] = {\
LE_DCN10_REG_LIST(id), \
UNIPHY_DCN2_REG_LIST(phyid), \
+ DPCS_DCN2_REG_LIST(id), \
SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
}
@@ -561,11 +565,13 @@ static const struct dcn10_link_enc_registers link_enc_regs[] = {
};
static const struct dcn10_link_enc_shift le_shift = {
- LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT)
+ LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT),\
+ DPCS_DCN2_MASK_SH_LIST(__SHIFT)
};
static const struct dcn10_link_enc_mask le_mask = {
- LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK)
+ LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK),\
+ DPCS_DCN2_MASK_SH_LIST(_MASK)
};
#define ipp_regs(id)\
@@ -632,6 +638,7 @@ static const struct dce110_aux_registers aux_engine_regs[] = {
#define tf_regs(id)\
[id] = {\
TF_REG_LIST_DCN20(id),\
+ TF_REG_LIST_DCN20_COMMON_APPEND(id),\
}
static const struct dcn2_dpp_registers tf_regs[] = {
@@ -645,12 +652,12 @@ static const struct dcn2_dpp_registers tf_regs[] = {
static const struct dcn2_dpp_shift tf_shift = {
TF_REG_LIST_SH_MASK_DCN20(__SHIFT),
- TF_DEBUG_REG_LIST_SH_DCN10
+ TF_DEBUG_REG_LIST_SH_DCN20
};
static const struct dcn2_dpp_mask tf_mask = {
TF_REG_LIST_SH_MASK_DCN20(_MASK),
- TF_DEBUG_REG_LIST_MASK_DCN10
+ TF_DEBUG_REG_LIST_MASK_DCN20
};
#define dwbc_regs_dcn2(id)\
@@ -700,14 +707,17 @@ static const struct dcn20_mpc_registers mpc_regs = {
MPC_OUT_MUX_REG_LIST_DCN2_0(3),
MPC_OUT_MUX_REG_LIST_DCN2_0(4),
MPC_OUT_MUX_REG_LIST_DCN2_0(5),
+ MPC_DBG_REG_LIST_DCN2_0()
};
static const struct dcn20_mpc_shift mpc_shift = {
- MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
+ MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT),
+ MPC_DEBUG_REG_LIST_SH_DCN20
};
static const struct dcn20_mpc_mask mpc_mask = {
- MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
+ MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK),
+ MPC_DEBUG_REG_LIST_MASK_DCN20
};
#define tg_regs(id)\
@@ -1563,7 +1573,7 @@ static void release_dsc(struct resource_context *res_ctx,
-static enum dc_status add_dsc_to_stream_resource(struct dc *dc,
+enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc,
struct dc_state *dc_ctx,
struct dc_stream_state *dc_stream)
{
@@ -1578,6 +1588,9 @@ static enum dc_status add_dsc_to_stream_resource(struct dc *dc,
if (pipe_ctx->stream != dc_stream)
continue;
+ if (pipe_ctx->stream_res.dsc)
+ continue;
+
acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc, i);
/* The number of DSCs can be less than the number of pipes */
@@ -1626,7 +1639,7 @@ enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx,
/* Get a DSC if required and available */
if (result == DC_OK && dc_stream->timing.flags.DSC)
- result = add_dsc_to_stream_resource(dc, new_ctx, dc_stream);
+ result = dcn20_add_dsc_to_stream_resource(dc, new_ctx, dc_stream);
if (result == DC_OK)
result = dcn20_build_mapped_resource(dc, new_ctx, dc_stream);
@@ -1848,6 +1861,22 @@ void dcn20_populate_dml_writeback_from_context(
}
+static int get_num_odm_heads(struct pipe_ctx *pipe)
+{
+ int odm_head_count = 0;
+ struct pipe_ctx *next_pipe = pipe->next_odm_pipe;
+ while (next_pipe) {
+ odm_head_count++;
+ next_pipe = next_pipe->next_odm_pipe;
+ }
+ pipe = pipe->prev_odm_pipe;
+ while (pipe) {
+ odm_head_count++;
+ pipe = pipe->prev_odm_pipe;
+ }
+ return odm_head_count ? odm_head_count + 1 : 0;
+}
+
int dcn20_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes)
{
@@ -1874,17 +1903,21 @@ int dcn20_populate_dml_pipes_from_context(
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing = &res_ctx->pipe_ctx[i].stream->timing;
unsigned int v_total;
+ unsigned int front_porch;
int output_bpc;
if (!res_ctx->pipe_ctx[i].stream)
continue;
v_total = timing->v_total;
+ front_porch = timing->v_front_porch;
/* todo:
pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = 0;
pipes[pipe_cnt].pipe.src.dcc = 0;
pipes[pipe_cnt].pipe.src.vm = 0;*/
+ pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
+
pipes[pipe_cnt].dout.dsc_enable = res_ctx->pipe_ctx[i].stream->timing.flags.DSC;
/* todo: rotation?*/
pipes[pipe_cnt].dout.dsc_slices = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.num_slices_h;
@@ -1906,7 +1939,7 @@ int dcn20_populate_dml_pipes_from_context(
- timing->h_addressable
- timing->h_border_left
- timing->h_border_right;
- pipes[pipe_cnt].pipe.dest.vblank_start = v_total - timing->v_front_porch;
+ pipes[pipe_cnt].pipe.dest.vblank_start = v_total - front_porch;
pipes[pipe_cnt].pipe.dest.vblank_end = pipes[pipe_cnt].pipe.dest.vblank_start
- timing->v_addressable
- timing->v_border_top
@@ -1923,8 +1956,13 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].dout.dp_lanes = 4;
pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min;
pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max;
- pipes[pipe_cnt].pipe.dest.odm_combine = res_ctx->pipe_ctx[i].prev_odm_pipe
- || res_ctx->pipe_ctx[i].next_odm_pipe;
+ switch (get_num_odm_heads(&res_ctx->pipe_ctx[i])) {
+ case 2:
+ pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_2to1;
+ break;
+ default:
+ pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_disabled;
+ }
pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].pipe_idx;
if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state
== res_ctx->pipe_ctx[i].plane_state)
@@ -2034,6 +2072,9 @@ int dcn20_populate_dml_pipes_from_context(
if (pipes[pipe_cnt].pipe.src.viewport_height > 1080)
pipes[pipe_cnt].pipe.src.viewport_height = 1080;
pipes[pipe_cnt].pipe.src.surface_height_y = pipes[pipe_cnt].pipe.src.viewport_height;
+ pipes[pipe_cnt].pipe.src.surface_width_y = pipes[pipe_cnt].pipe.src.viewport_width;
+ pipes[pipe_cnt].pipe.src.surface_height_c = pipes[pipe_cnt].pipe.src.viewport_height;
+ pipes[pipe_cnt].pipe.src.surface_width_c = pipes[pipe_cnt].pipe.src.viewport_width;
pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 63) / 64) * 64; /* linear sw only */
pipes[pipe_cnt].pipe.src.source_format = dm_444_32;
pipes[pipe_cnt].pipe.dest.recout_width = pipes[pipe_cnt].pipe.src.viewport_width; /*vp_width/hratio*/
@@ -2067,7 +2108,10 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c.width;
pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport.height;
pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c.height;
+ pipes[pipe_cnt].pipe.src.surface_width_y = pln->plane_size.surface_size.width;
pipes[pipe_cnt].pipe.src.surface_height_y = pln->plane_size.surface_size.height;
+ pipes[pipe_cnt].pipe.src.surface_width_c = pln->plane_size.chroma_size.width;
+ pipes[pipe_cnt].pipe.src.surface_height_c = pln->plane_size.chroma_size.height;
if (pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.surface_pitch;
pipes[pipe_cnt].pipe.src.data_pitch_c = pln->plane_size.chroma_pitch;
@@ -2481,7 +2525,7 @@ int dcn20_validate_apply_pipe_split_flags(
split[i] = true;
if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
split[i] = true;
- context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = true;
+ context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = dm_odm_combine_mode_2to1;
}
context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] =
context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx];
@@ -2886,12 +2930,19 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
bool voltage_supported = false;
bool full_pstate_supported = false;
bool dummy_pstate_supported = false;
- double p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us;
- context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support = dc->debug.disable_dram_clock_change_vactive_support;
+ double p_state_latency_us;
- if (fast_validate)
- return dcn20_validate_bandwidth_internal(dc, context, true);
+ DC_FP_START();
+ p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us;
+ context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support =
+ dc->debug.disable_dram_clock_change_vactive_support;
+ if (fast_validate) {
+ voltage_supported = dcn20_validate_bandwidth_internal(dc, context, true);
+
+ DC_FP_END();
+ return voltage_supported;
+ }
// Best case, we support full UCLK switch latency
voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
@@ -2899,7 +2950,7 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 ||
(voltage_supported && full_pstate_supported)) {
- context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
+ context->bw_ctx.bw.dcn.clk.p_state_change_support = full_pstate_supported;
goto restore_dml_state;
}
@@ -2920,6 +2971,7 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
restore_dml_state:
context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us;
+ DC_FP_END();
return voltage_supported;
}
@@ -3211,7 +3263,6 @@ void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s
void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb)
{
- kernel_fpu_begin();
if ((int)(bb->sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns
&& dc->bb_overrides.sr_exit_time_ns) {
bb->sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
@@ -3235,7 +3286,6 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st
bb->dram_clock_change_latency_us =
dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
}
- kernel_fpu_end();
}
static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb(
@@ -3441,6 +3491,8 @@ static bool dcn20_resource_construct(
enum dml_project dml_project_version =
get_dml_project_version(ctx->asic_id.hw_internal_rev);
+ DC_FP_START();
+
ctx->dc_bios->regs = &bios_regs;
pool->base.funcs = &dcn20_res_pool_funcs;
@@ -3738,10 +3790,12 @@ static bool dcn20_resource_construct(
pool->base.oem_device = NULL;
}
+ DC_FP_END();
return true;
create_fail:
+ DC_FP_END();
dcn20_resource_destruct(pool);
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
index 840ca66c34e1..f5893840b79b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
@@ -157,6 +157,7 @@ void dcn20_calculate_dlg_params(
enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream);
enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
+enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, struct dc_state *dc_ctx, struct dc_stream_state *dc_stream);
enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
index 4763721fb1c9..07684d3e375a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
@@ -5,7 +5,13 @@
DCN21 = dcn21_init.o dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o \
dcn21_hwseq.o dcn21_link_encoder.o
+ifdef CONFIG_X86
CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse
+endif
+
+ifdef CONFIG_PPC64
+CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -maltivec
+endif
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
@@ -13,6 +19,7 @@ IS_OLD_GCC = 1
endif
endif
+ifdef CONFIG_X86
ifdef IS_OLD_GCC
# Stack alignment mismatch, proceed with caution.
# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
@@ -21,6 +28,7 @@ CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -mpreferred-stack-boundary=4
else
CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -msse2
endif
+endif
AMD_DAL_DCN21 = $(addprefix $(AMDDALPATH)/dc/dcn21/,$(DCN21))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
index 332bf3d3a664..cf09b9335728 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
@@ -29,6 +29,8 @@
#include "dm_services.h"
#include "reg_helper.h"
+#include "dc_dmub_srv.h"
+
#define DC_LOGGER_INIT(logger)
#define REG(reg)\
@@ -169,12 +171,9 @@ static void hubp21_setup(
void hubp21_set_viewport(
struct hubp *hubp,
const struct rect *viewport,
- const struct rect *viewport_c,
- enum dc_rotation_angle rotation)
+ const struct rect *viewport_c)
{
struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
- int patched_viewport_height = 0;
- struct dc_debug_options *debug = &hubp->ctx->dc->debug;
REG_SET_2(DCSURF_PRI_VIEWPORT_DIMENSION, 0,
PRI_VIEWPORT_WIDTH, viewport->width,
@@ -193,31 +192,10 @@ void hubp21_set_viewport(
SEC_VIEWPORT_X_START, viewport->x,
SEC_VIEWPORT_Y_START, viewport->y);
- /*
- * Work around for underflow issue with NV12 + rIOMMU translation
- * + immediate flip. This will cause hubp underflow, but will not
- * be user visible since underflow is in blank region
- * Disable w/a when rotated 180 degrees, causes vertical chroma offset
- */
- patched_viewport_height = viewport_c->height;
- if (debug->nv12_iflip_vm_wa && viewport_c->height > 512 &&
- rotation != ROTATION_ANGLE_180) {
- int pte_row_height = 0;
- int pte_rows = 0;
-
- REG_GET(DCHUBP_REQ_SIZE_CONFIG_C,
- PTE_ROW_HEIGHT_LINEAR_C, &pte_row_height);
-
- pte_row_height = 1 << (pte_row_height + 3);
- pte_rows = (viewport_c->height / pte_row_height) + 1;
- patched_viewport_height = pte_rows * pte_row_height + 1;
- }
-
-
/* DC supports NV12 only at the moment */
REG_SET_2(DCSURF_PRI_VIEWPORT_DIMENSION_C, 0,
PRI_VIEWPORT_WIDTH_C, viewport_c->width,
- PRI_VIEWPORT_HEIGHT_C, patched_viewport_height);
+ PRI_VIEWPORT_HEIGHT_C, viewport_c->height);
REG_SET_2(DCSURF_PRI_VIEWPORT_START_C, 0,
PRI_VIEWPORT_X_START_C, viewport_c->x,
@@ -225,13 +203,123 @@ void hubp21_set_viewport(
REG_SET_2(DCSURF_SEC_VIEWPORT_DIMENSION_C, 0,
SEC_VIEWPORT_WIDTH_C, viewport_c->width,
- SEC_VIEWPORT_HEIGHT_C, patched_viewport_height);
+ SEC_VIEWPORT_HEIGHT_C, viewport_c->height);
REG_SET_2(DCSURF_SEC_VIEWPORT_START_C, 0,
SEC_VIEWPORT_X_START_C, viewport_c->x,
SEC_VIEWPORT_Y_START_C, viewport_c->y);
}
+static void hubp21_apply_PLAT_54186_wa(
+ struct hubp *hubp,
+ const struct dc_plane_address *address)
+{
+ struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
+ struct dc_debug_options *debug = &hubp->ctx->dc->debug;
+ unsigned int chroma_bpe = 2;
+ unsigned int luma_addr_high_part = 0;
+ unsigned int row_height = 0;
+ unsigned int chroma_pitch = 0;
+ unsigned int viewport_c_height = 0;
+ unsigned int viewport_c_width = 0;
+ unsigned int patched_viewport_height = 0;
+ unsigned int patched_viewport_width = 0;
+ unsigned int rotation_angle = 0;
+ unsigned int pix_format = 0;
+ unsigned int h_mirror_en = 0;
+ unsigned int tile_blk_size = 64 * 1024; /* 64KB for 64KB SW, 4KB for 4KB SW */
+
+
+ if (!debug->nv12_iflip_vm_wa)
+ return;
+
+ REG_GET(DCHUBP_REQ_SIZE_CONFIG_C,
+ PTE_ROW_HEIGHT_LINEAR_C, &row_height);
+
+ REG_GET_2(DCSURF_PRI_VIEWPORT_DIMENSION_C,
+ PRI_VIEWPORT_WIDTH_C, &viewport_c_width,
+ PRI_VIEWPORT_HEIGHT_C, &viewport_c_height);
+
+ REG_GET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C,
+ PRIMARY_SURFACE_ADDRESS_HIGH_C, &luma_addr_high_part);
+
+ REG_GET(DCSURF_SURFACE_PITCH_C,
+ PITCH_C, &chroma_pitch);
+
+ chroma_pitch += 1;
+
+ REG_GET_3(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, &pix_format,
+ ROTATION_ANGLE, &rotation_angle,
+ H_MIRROR_EN, &h_mirror_en);
+
+ /* reset persistent cached data */
+ hubp21->PLAT_54186_wa_chroma_addr_offset = 0;
+ /* apply wa only for NV12 surface with scatter gather enabled with viewport > 512 along
+ * the vertical direction*/
+ if (address->type != PLN_ADDR_TYPE_VIDEO_PROGRESSIVE ||
+ address->video_progressive.luma_addr.high_part == 0xf4)
+ return;
+
+ if ((rotation_angle == ROTATION_ANGLE_0 || rotation_angle == ROTATION_ANGLE_180)
+ && viewport_c_height <= 512)
+ return;
+
+ if ((rotation_angle == ROTATION_ANGLE_90 || rotation_angle == ROTATION_ANGLE_270)
+ && viewport_c_width <= 512)
+ return;
+
+ switch (rotation_angle) {
+ case ROTATION_ANGLE_0: /* 0 degree rotation */
+ row_height = 128;
+ patched_viewport_height = (viewport_c_height / row_height + 1) * row_height + 1;
+ patched_viewport_width = viewport_c_width;
+ hubp21->PLAT_54186_wa_chroma_addr_offset = 0;
+ break;
+ case ROTATION_ANGLE_180: /* 180 degree rotation */
+ row_height = 128;
+ patched_viewport_height = viewport_c_height + row_height;
+ patched_viewport_width = viewport_c_width;
+ hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - chroma_pitch * row_height * chroma_bpe;
+ break;
+ case ROTATION_ANGLE_90: /* 90 degree rotation */
+ row_height = 256;
+ if (h_mirror_en) {
+ patched_viewport_height = viewport_c_height;
+ patched_viewport_width = viewport_c_width + row_height;
+ hubp21->PLAT_54186_wa_chroma_addr_offset = 0;
+ } else {
+ patched_viewport_height = viewport_c_height;
+ patched_viewport_width = viewport_c_width + row_height;
+ hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - tile_blk_size;
+ }
+ break;
+ case ROTATION_ANGLE_270: /* 270 degree rotation */
+ row_height = 256;
+ if (h_mirror_en) {
+ patched_viewport_height = viewport_c_height;
+ patched_viewport_width = viewport_c_width + row_height;
+ hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - tile_blk_size;
+ } else {
+ patched_viewport_height = viewport_c_height;
+ patched_viewport_width = viewport_c_width + row_height;
+ hubp21->PLAT_54186_wa_chroma_addr_offset = 0;
+ }
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+
+ /* catch cases where viewport keep growing */
+ ASSERT(patched_viewport_height && patched_viewport_height < 5000);
+ ASSERT(patched_viewport_width && patched_viewport_width < 5000);
+
+ REG_UPDATE_2(DCSURF_PRI_VIEWPORT_DIMENSION_C,
+ PRI_VIEWPORT_WIDTH_C, patched_viewport_width,
+ PRI_VIEWPORT_HEIGHT_C, patched_viewport_height);
+}
+
void hubp21_set_vm_system_aperture_settings(struct hubp *hubp,
struct vm_system_aperture_param *apt)
{
@@ -602,6 +690,219 @@ void hubp21_validate_dml_output(struct hubp *hubp,
dml_dlg_attr->refcyc_per_meta_chunk_flip_l, dlg_attr.refcyc_per_meta_chunk_flip_l);
}
+static void program_surface_flip_and_addr(struct hubp *hubp, struct surface_flip_registers *flip_regs)
+{
+ struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
+
+ REG_UPDATE_3(DCSURF_FLIP_CONTROL,
+ SURFACE_FLIP_TYPE, flip_regs->immediate,
+ SURFACE_FLIP_MODE_FOR_STEREOSYNC, flip_regs->grph_stereo,
+ SURFACE_FLIP_IN_STEREOSYNC, flip_regs->grph_stereo);
+
+ REG_UPDATE(VMID_SETTINGS_0,
+ VMID, flip_regs->vmid);
+
+ REG_UPDATE_8(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_TMZ, flip_regs->tmz_surface,
+ PRIMARY_SURFACE_TMZ_C, flip_regs->tmz_surface,
+ PRIMARY_META_SURFACE_TMZ, flip_regs->tmz_surface,
+ PRIMARY_META_SURFACE_TMZ_C, flip_regs->tmz_surface,
+ SECONDARY_SURFACE_TMZ, flip_regs->tmz_surface,
+ SECONDARY_SURFACE_TMZ_C, flip_regs->tmz_surface,
+ SECONDARY_META_SURFACE_TMZ, flip_regs->tmz_surface,
+ SECONDARY_META_SURFACE_TMZ_C, flip_regs->tmz_surface);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH_C,
+ flip_regs->DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, 0,
+ PRIMARY_META_SURFACE_ADDRESS_C,
+ flip_regs->DCSURF_PRIMARY_META_SURFACE_ADDRESS_C);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH,
+ flip_regs->DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0,
+ PRIMARY_META_SURFACE_ADDRESS,
+ flip_regs->DCSURF_PRIMARY_META_SURFACE_ADDRESS);
+
+ REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH, 0,
+ SECONDARY_META_SURFACE_ADDRESS_HIGH,
+ flip_regs->DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH);
+
+ REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS, 0,
+ SECONDARY_META_SURFACE_ADDRESS,
+ flip_regs->DCSURF_SECONDARY_META_SURFACE_ADDRESS);
+
+
+ REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, 0,
+ SECONDARY_SURFACE_ADDRESS_HIGH,
+ flip_regs->DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH);
+
+ REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS, 0,
+ SECONDARY_SURFACE_ADDRESS,
+ flip_regs->DCSURF_SECONDARY_SURFACE_ADDRESS);
+
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH_C,
+ flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_C, 0,
+ PRIMARY_SURFACE_ADDRESS_C,
+ flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_C);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH,
+ flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0,
+ PRIMARY_SURFACE_ADDRESS,
+ flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS);
+}
+
+void dmcub_PLAT_54186_wa(struct hubp *hubp, struct surface_flip_registers *flip_regs)
+{
+ struct dc_dmub_srv *dmcub = hubp->ctx->dmub_srv;
+ struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
+ struct dmub_rb_cmd_PLAT_54186_wa PLAT_54186_wa = { 0 };
+
+ PLAT_54186_wa.header.type = DMUB_CMD__PLAT_54186_WA;
+ PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS;
+ PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_C = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_C;
+ PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH;
+ PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C;
+ PLAT_54186_wa.flip.flip_params.grph_stereo = flip_regs->grph_stereo;
+ PLAT_54186_wa.flip.flip_params.hubp_inst = hubp->inst;
+ PLAT_54186_wa.flip.flip_params.immediate = flip_regs->immediate;
+ PLAT_54186_wa.flip.flip_params.tmz_surface = flip_regs->tmz_surface;
+ PLAT_54186_wa.flip.flip_params.vmid = flip_regs->vmid;
+
+ PERF_TRACE(); // TODO: remove after performance is stable.
+ dc_dmub_srv_cmd_queue(dmcub, &PLAT_54186_wa.header);
+ PERF_TRACE(); // TODO: remove after performance is stable.
+ dc_dmub_srv_cmd_execute(dmcub);
+ PERF_TRACE(); // TODO: remove after performance is stable.
+ dc_dmub_srv_wait_idle(dmcub);
+ PERF_TRACE(); // TODO: remove after performance is stable.
+}
+
+bool hubp21_program_surface_flip_and_addr(
+ struct hubp *hubp,
+ const struct dc_plane_address *address,
+ bool flip_immediate)
+{
+ struct dc_debug_options *debug = &hubp->ctx->dc->debug;
+ struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
+ struct surface_flip_registers flip_regs = { 0 };
+
+ flip_regs.vmid = address->vmid;
+
+ switch (address->type) {
+ case PLN_ADDR_TYPE_GRAPHICS:
+ if (address->grph.addr.quad_part == 0) {
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ if (address->grph.meta_addr.quad_part != 0) {
+ flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS =
+ address->grph.meta_addr.low_part;
+ flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH =
+ address->grph.meta_addr.high_part;
+ }
+
+ flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS =
+ address->grph.addr.low_part;
+ flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH =
+ address->grph.addr.high_part;
+ break;
+ case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE:
+ if (address->video_progressive.luma_addr.quad_part == 0
+ || address->video_progressive.chroma_addr.quad_part == 0)
+ break;
+
+ if (address->video_progressive.luma_meta_addr.quad_part != 0) {
+ flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS =
+ address->video_progressive.luma_meta_addr.low_part;
+ flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH =
+ address->video_progressive.luma_meta_addr.high_part;
+
+ flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_C =
+ address->video_progressive.chroma_meta_addr.low_part;
+ flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C =
+ address->video_progressive.chroma_meta_addr.high_part;
+ }
+
+ flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS =
+ address->video_progressive.luma_addr.low_part;
+ flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH =
+ address->video_progressive.luma_addr.high_part;
+
+ if (debug->nv12_iflip_vm_wa) {
+ flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_C =
+ address->video_progressive.chroma_addr.low_part + hubp21->PLAT_54186_wa_chroma_addr_offset;
+ } else
+ flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_C =
+ address->video_progressive.chroma_addr.low_part;
+
+ flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C =
+ address->video_progressive.chroma_addr.high_part;
+
+ break;
+ case PLN_ADDR_TYPE_GRPH_STEREO:
+ if (address->grph_stereo.left_addr.quad_part == 0)
+ break;
+ if (address->grph_stereo.right_addr.quad_part == 0)
+ break;
+
+ flip_regs.grph_stereo = true;
+
+ if (address->grph_stereo.right_meta_addr.quad_part != 0) {
+ flip_regs.DCSURF_SECONDARY_META_SURFACE_ADDRESS =
+ address->grph_stereo.right_meta_addr.low_part;
+ flip_regs.DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH =
+ address->grph_stereo.right_meta_addr.high_part;
+ }
+
+ if (address->grph_stereo.left_meta_addr.quad_part != 0) {
+ flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS =
+ address->grph_stereo.left_meta_addr.low_part;
+ flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH =
+ address->grph_stereo.left_meta_addr.high_part;
+ }
+
+ flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS =
+ address->grph_stereo.left_addr.low_part;
+ flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH =
+ address->grph_stereo.left_addr.high_part;
+
+ flip_regs.DCSURF_SECONDARY_SURFACE_ADDRESS =
+ address->grph_stereo.right_addr.low_part;
+ flip_regs.DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH =
+ address->grph_stereo.right_addr.high_part;
+
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ flip_regs.tmz_surface = address->tmz_surface;
+ flip_regs.immediate = flip_immediate;
+
+ if (hubp->ctx->dc->debug.enable_dmcub_surface_flip && address->type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
+ dmcub_PLAT_54186_wa(hubp, &flip_regs);
+ else
+ program_surface_flip_and_addr(hubp, &flip_regs);
+
+ hubp->request_address = *address;
+
+ return true;
+}
+
void hubp21_init(struct hubp *hubp)
{
// DEDCN21-133: Inconsistent row starting line for flip between DPTE and Meta
@@ -614,7 +915,7 @@ void hubp21_init(struct hubp *hubp)
static struct hubp_funcs dcn21_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
- .hubp_program_surface_flip_and_addr = hubp2_program_surface_flip_and_addr,
+ .hubp_program_surface_flip_and_addr = hubp21_program_surface_flip_and_addr,
.hubp_program_surface_config = hubp1_program_surface_config,
.hubp_is_flip_pending = hubp1_is_flip_pending,
.hubp_setup = hubp21_setup,
@@ -623,6 +924,7 @@ static struct hubp_funcs dcn21_hubp_funcs = {
.set_blank = hubp1_set_blank,
.dcc_control = hubp1_dcc_control,
.mem_program_viewport = hubp21_set_viewport,
+ .apply_PLAT_54186_wa = hubp21_apply_PLAT_54186_wa,
.set_cursor_attributes = hubp2_cursor_set_attributes,
.set_cursor_position = hubp1_cursor_set_position,
.hubp_clk_cntl = hubp1_clk_cntl,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.h
index aeda719a2a13..9873b6cbc5ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.h
@@ -108,6 +108,7 @@ struct dcn21_hubp {
const struct dcn_hubp2_registers *hubp_regs;
const struct dcn_hubp2_shift *hubp_shift;
const struct dcn_hubp2_mask *hubp_mask;
+ int PLAT_54186_wa_chroma_addr_offset;
};
bool hubp21_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h
index 1d7a1a51f13d..033d5d76f195 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h
@@ -33,6 +33,45 @@ struct dcn21_link_encoder {
struct dpcssys_phy_seq_cfg phy_seq_cfg;
};
+#define DPCS_DCN21_MASK_SH_LIST(mask_sh)\
+ DPCS_DCN2_MASK_SH_LIST(mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_TX_VBOOST_LVL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_MPLLB_CP_PROP_GS, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_RX_VREF_CTRL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_MPLLB_CP_INT_GS, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCS_DMCU_DPALT_DIS_BLOCK_REG, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL15, RDPCS_PHY_SUP_PRE_HP, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL15, RDPCS_PHY_DP_TX0_VREGDRV_BYP, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL15, RDPCS_PHY_DP_TX1_VREGDRV_BYP, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL15, RDPCS_PHY_DP_TX2_VREGDRV_BYP, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL15, RDPCS_PHY_DP_TX3_VREGDRV_BYP, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_POST, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_POST, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_POST, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_FINETUNE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_RANGE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_POST, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYA_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYB_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYC_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYD_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYE_SOFT_RESET, mask_sh)
+
+#define DPCS_DCN21_REG_LIST(id) \
+ DPCS_DCN2_REG_LIST(id),\
+ SRI(RDPCSTX_PHY_CNTL15, RDPCSTX, id),\
+ SRI(RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCSTX, id)
+
#define LINK_ENCODER_MASK_SH_LIST_DCN21(mask_sh)\
LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh),\
LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL0_XBAR_SOURCE, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index c865b95d5c0e..33d0a176841a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -1,5 +1,6 @@
/*
* Copyright 2018 Advanced Micro Devices, Inc.
+ * Copyright 2019 Raptor Engineering, LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -59,9 +60,12 @@
#include "dcn20/dcn20_dccg.h"
#include "dcn21_hubbub.h"
#include "dcn10/dcn10_resource.h"
+#include "dce110/dce110_resource.h"
#include "dcn20/dcn20_dwb.h"
#include "dcn20/dcn20_mmhubbub.h"
+#include "dpcs/dpcs_2_1_0_offset.h"
+#include "dpcs/dpcs_2_1_0_sh_mask.h"
#include "renoir_ip_offset.h"
#include "dcn/dcn_2_1_0_offset.h"
@@ -80,6 +84,7 @@
#include "dcn21_resource.h"
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
+#include "../dce/dmub_psr.h"
#define SOC_BOUNDING_BOX_VALID false
#define DC_LOGGER_INIT(logger)
@@ -462,15 +467,18 @@ static const struct dcn20_mpc_registers mpc_regs = {
MPC_OUT_MUX_REG_LIST_DCN2_0(0),
MPC_OUT_MUX_REG_LIST_DCN2_0(1),
MPC_OUT_MUX_REG_LIST_DCN2_0(2),
- MPC_OUT_MUX_REG_LIST_DCN2_0(3)
+ MPC_OUT_MUX_REG_LIST_DCN2_0(3),
+ MPC_DBG_REG_LIST_DCN2_0()
};
static const struct dcn20_mpc_shift mpc_shift = {
- MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
+ MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT),
+ MPC_DEBUG_REG_LIST_SH_DCN20
};
static const struct dcn20_mpc_mask mpc_mask = {
- MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
+ MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK),
+ MPC_DEBUG_REG_LIST_MASK_DCN20
};
#define hubp_regs(id)\
@@ -605,6 +613,7 @@ static const struct dce110_aux_registers aux_engine_regs[] = {
#define tf_regs(id)\
[id] = {\
TF_REG_LIST_DCN20(id),\
+ TF_REG_LIST_DCN20_COMMON_APPEND(id),\
}
static const struct dcn2_dpp_registers tf_regs[] = {
@@ -615,11 +624,13 @@ static const struct dcn2_dpp_registers tf_regs[] = {
};
static const struct dcn2_dpp_shift tf_shift = {
- TF_REG_LIST_SH_MASK_DCN20(__SHIFT)
+ TF_REG_LIST_SH_MASK_DCN20(__SHIFT),
+ TF_DEBUG_REG_LIST_SH_DCN20
};
static const struct dcn2_dpp_mask tf_mask = {
- TF_REG_LIST_SH_MASK_DCN20(_MASK)
+ TF_REG_LIST_SH_MASK_DCN20(_MASK),
+ TF_DEBUG_REG_LIST_MASK_DCN20
};
#define stream_enc_regs(id)\
@@ -820,12 +831,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
.performance_trace = false,
- .max_downscale_src_width = 3840,
+ .max_downscale_src_width = 4096,
.disable_pplib_wm_range = false,
.scl_reset_length10 = true,
.sanity_checks = true,
.disable_48mhz_pwrdwn = false,
- .nv12_iflip_vm_wa = true
+ .nv12_iflip_vm_wa = true,
+ .usbc_combo_phy_reset_wa = true
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -845,6 +857,7 @@ static const struct dc_debug_options debug_defaults_diags = {
enum dcn20_clk_src_array_id {
DCN20_CLK_SRC_PLL0,
DCN20_CLK_SRC_PLL1,
+ DCN20_CLK_SRC_PLL2,
DCN20_CLK_SRC_TOTAL_DCN21
};
@@ -993,7 +1006,8 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s
{
int i;
- kernel_fpu_begin();
+ DC_FP_START();
+
if (dc->bb_overrides.sr_exit_time_ns) {
for (i = 0; i < WM_SET_COUNT; i++) {
dc->clk_mgr->bw_params->wm_table.entries[i].sr_exit_time_us =
@@ -1019,7 +1033,7 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s
}
}
- kernel_fpu_end();
+ DC_FP_END();
}
void dcn21_calculate_wm(
@@ -1319,12 +1333,6 @@ struct display_stream_compressor *dcn21_dsc_create(
static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
- /*
- TODO: Fix this function to calcualte correct values.
- There are known issues with this function currently
- that will need to be investigated. Use hardcoded known good values for now.
-
-
struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool);
struct clk_limit_table *clk_table = &bw_params->clk_table;
int i;
@@ -1339,11 +1347,14 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dcn2_1_soc.clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
dcn2_1_soc.clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
dcn2_1_soc.clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
- dcn2_1_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 16 / 1000;
+ dcn2_1_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
}
- dcn2_1_soc.clock_limits[i] = dcn2_1_soc.clock_limits[i - i];
+ dcn2_1_soc.clock_limits[i] = dcn2_1_soc.clock_limits[i - 1];
dcn2_1_soc.num_states = i;
- */
+
+ // diags does not retrieve proper values from SMU, do not update DML instance for diags
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && !IS_DIAG_DC(dc->ctx->dce_environment))
+ dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21);
}
/* Temporary Place holder until we can get them from fuse */
@@ -1497,8 +1508,9 @@ static const struct encoder_feature_support link_enc_feature = {
#define link_regs(id, phyid)\
[id] = {\
- LE_DCN10_REG_LIST(id), \
+ LE_DCN2_REG_LIST(id), \
UNIPHY_DCN2_REG_LIST(phyid), \
+ DPCS_DCN21_REG_LIST(id), \
SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
}
@@ -1537,11 +1549,13 @@ static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
};
static const struct dcn10_link_enc_shift le_shift = {
- LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT)
+ LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT),\
+ DPCS_DCN21_MASK_SH_LIST(__SHIFT)
};
static const struct dcn10_link_enc_mask le_mask = {
- LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK)
+ LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK),\
+ DPCS_DCN21_MASK_SH_LIST(_MASK)
};
static int map_transmitter_id_to_phy_instance(
@@ -1706,6 +1720,10 @@ static bool dcn21_resource_construct(
dcn21_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL1,
&clk_src_regs[1], false);
+ pool->base.clock_sources[DCN20_CLK_SRC_PLL2] =
+ dcn21_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL2,
+ &clk_src_regs[2], false);
pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN21;
@@ -1740,6 +1758,10 @@ static bool dcn21_resource_construct(
goto create_fail;
}
+ // Leave as NULL to not affect current dmcu psr programming sequence
+ // Will be uncommented when functionality is confirmed to be working
+ pool->base.psr = NULL;
+
pool->base.abm = dce_abm_create(ctx,
&abm_regs,
&abm_shift,
@@ -1776,41 +1798,41 @@ static bool dcn21_resource_construct(
if ((pipe_fuses & (1 << i)) != 0)
continue;
- pool->base.hubps[i] = dcn21_hubp_create(ctx, i);
- if (pool->base.hubps[i] == NULL) {
+ pool->base.hubps[j] = dcn21_hubp_create(ctx, i);
+ if (pool->base.hubps[j] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create memory input!\n");
goto create_fail;
}
- pool->base.ipps[i] = dcn21_ipp_create(ctx, i);
- if (pool->base.ipps[i] == NULL) {
+ pool->base.ipps[j] = dcn21_ipp_create(ctx, i);
+ if (pool->base.ipps[j] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create input pixel processor!\n");
goto create_fail;
}
- pool->base.dpps[i] = dcn21_dpp_create(ctx, i);
- if (pool->base.dpps[i] == NULL) {
+ pool->base.dpps[j] = dcn21_dpp_create(ctx, i);
+ if (pool->base.dpps[j] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create dpps!\n");
goto create_fail;
}
- pool->base.opps[i] = dcn21_opp_create(ctx, i);
- if (pool->base.opps[i] == NULL) {
+ pool->base.opps[j] = dcn21_opp_create(ctx, i);
+ if (pool->base.opps[j] == NULL) {
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create output pixel processor!\n");
goto create_fail;
}
- pool->base.timing_generators[i] = dcn21_timing_generator_create(
+ pool->base.timing_generators[j] = dcn21_timing_generator_create(
ctx, i);
- if (pool->base.timing_generators[i] == NULL) {
+ if (pool->base.timing_generators[j] == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create tg!\n");
goto create_fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
index a3d1be20dd9d..b52ba6ffabe1 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
@@ -220,6 +220,7 @@ struct dm_bl_data_point {
};
/* Total size of the structure should not exceed 256 bytes */
+#define BL_DATA_POINTS 99
struct dm_acpi_atif_backlight_caps {
uint16_t size; /* Bytes 0-1 (2 bytes) */
uint16_t flags; /* Byted 2-3 (2 bytes) */
@@ -229,7 +230,7 @@ struct dm_acpi_atif_backlight_caps {
uint8_t min_input_signal; /* Byte 7 */
uint8_t max_input_signal; /* Byte 8 */
uint8_t num_data_points; /* Byte 9 */
- struct dm_bl_data_point data_points[99]; /* Bytes 10-207 (198 bytes)*/
+ struct dm_bl_data_point data_points[BL_DATA_POINTS]; /* Bytes 10-207 (198 bytes)*/
};
enum dm_acpi_display_type {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index fb6358036be8..7ee8b8460a9b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -1,5 +1,6 @@
#
# Copyright 2017 Advanced Micro Devices, Inc.
+# Copyright 2019 Raptor Engineering, LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
@@ -24,7 +25,13 @@
# It provides the general basic services required by other DAL
# subcomponents.
+ifdef CONFIG_X86
dml_ccflags := -mhard-float -msse
+endif
+
+ifdef CONFIG_PPC64
+dml_ccflags := -mhard-float -maltivec
+endif
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
@@ -32,6 +39,7 @@ IS_OLD_GCC = 1
endif
endif
+ifdef CONFIG_X86
ifdef IS_OLD_GCC
# Stack alignment mismatch, proceed with caution.
# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
@@ -40,6 +48,7 @@ dml_ccflags += -mpreferred-stack-boundary=4
else
dml_ccflags += -msse2
endif
+endif
CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
index 3b224b155e8c..45f028986a8d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
@@ -38,6 +38,7 @@
#define BPP_INVALID 0
#define BPP_BLENDED_PIPE 0xffffffff
+#define DCN20_MAX_420_IMAGE_WIDTH 4096
static double adjust_ReturnBW(
struct display_mode_lib *mode_lib,
@@ -937,7 +938,7 @@ static unsigned int CalculateVMAndRowBytes(
*MetaRowByte = 0;
}
- if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_lvp) {
+ if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_l_vp) {
MacroTileSizeBytes = 256;
MacroTileHeight = BlockHeight256Bytes;
} else if (SurfaceTiling == dm_sw_4kb_s || SurfaceTiling == dm_sw_4kb_s_x
@@ -1335,11 +1336,11 @@ static void dml20_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPer
else
mode_lib->vba.SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportHeight[k];
- if (mode_lib->vba.ODMCombineEnabled[k] == true)
+ if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)
MainPlaneDoesODMCombine = true;
for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
if (mode_lib->vba.BlendingAndTiming[k] == j
- && mode_lib->vba.ODMCombineEnabled[j] == true)
+ && mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)
MainPlaneDoesODMCombine = true;
if (MainPlaneDoesODMCombine == true)
@@ -2848,12 +2849,12 @@ static void dml20_DisplayPipeConfiguration(struct display_mode_lib *mode_lib)
SwathWidth = mode_lib->vba.ViewportHeight[k];
}
- if (mode_lib->vba.ODMCombineEnabled[k] == true) {
+ if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
MainPlaneDoesODMCombine = true;
}
for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
if (mode_lib->vba.BlendingAndTiming[k] == j
- && mode_lib->vba.ODMCombineEnabled[j] == true) {
+ && mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
MainPlaneDoesODMCombine = true;
}
}
@@ -3348,7 +3349,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
== dm_420_10))
|| (((mode_lib->vba.SurfaceTiling[k] == dm_sw_gfx7_2d_thin_gl
|| mode_lib->vba.SurfaceTiling[k]
- == dm_sw_gfx7_2d_thin_lvp)
+ == dm_sw_gfx7_2d_thin_l_vp)
&& !((mode_lib->vba.SourcePixelFormat[k]
== dm_444_64
|| mode_lib->vba.SourcePixelFormat[k]
@@ -3446,10 +3447,10 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
locals->FabricAndDRAMBandwidthPerState[i] * 1000)
* locals->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly / 100;
- locals->ReturnBWPerState[i] = locals->ReturnBWToDCNPerState;
+ locals->ReturnBWPerState[i][0] = locals->ReturnBWToDCNPerState;
if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) {
- locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i],
+ locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency /
((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
/ (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i]
@@ -3460,7 +3461,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024);
if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) {
- locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i],
+ locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
4 * locals->ReturnBWToDCNPerState *
(locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
* locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency /
@@ -3472,7 +3473,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
locals->DCFCLKPerState[i], locals->FabricAndDRAMBandwidthPerState[i] * 1000);
if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) {
- locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i],
+ locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency /
((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
/ (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i]
@@ -3483,7 +3484,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024);
if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) {
- locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i],
+ locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
4 * locals->ReturnBWToDCNPerState *
(locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
* locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency /
@@ -3521,12 +3522,12 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i] =
(mode_lib->vba.RoundTripPingLatencyCycles + 32.0) / mode_lib->vba.DCFCLKPerState[i]
- + locals->UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i];
- if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i]
+ + locals->UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i][0];
+ if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i][0]
> locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i]) {
- locals->ROBSupport[i] = true;
+ locals->ROBSupport[i][0] = true;
} else {
- locals->ROBSupport[i] = false;
+ locals->ROBSupport[i][0] = false;
}
}
/*Writeback Mode Support Check*/
@@ -3894,16 +3895,22 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
&& i == mode_lib->vba.soc.num_states)
mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
- if (mode_lib->vba.ODMCapability == false || mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine <= mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) {
- locals->ODMCombineEnablePerState[i][k] = false;
- mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
- } else {
- locals->ODMCombineEnablePerState[i][k] = true;
- mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+
+ locals->ODMCombineEnablePerState[i][k] = false;
+ mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
+ if (mode_lib->vba.ODMCapability) {
+ if (locals->PlaneRequiredDISPCLKWithoutODMCombine > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) {
+ locals->ODMCombineEnablePerState[i][k] = true;
+ mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+ } else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) {
+ locals->ODMCombineEnablePerState[i][k] = true;
+ mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+ }
}
+
if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity
&& locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]
- && locals->ODMCombineEnablePerState[i][k] == false) {
+ && locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
locals->NoOfDPP[i][j][k] = 1;
locals->RequiredDPPCLK[i][j][k] =
locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
@@ -3992,16 +3999,16 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
/*Viewport Size Check*/
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
- locals->ViewportSizeSupport[i] = true;
+ locals->ViewportSizeSupport[i][0] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (locals->ODMCombineEnablePerState[i][k] == true) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
if (dml_min(locals->SwathWidthYSingleDPP[k], dml_round(mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]))
> locals->MaximumSwathWidth[k]) {
- locals->ViewportSizeSupport[i] = false;
+ locals->ViewportSizeSupport[i][0] = false;
}
} else {
if (locals->SwathWidthYSingleDPP[k] / 2.0 > locals->MaximumSwathWidth[k]) {
- locals->ViewportSizeSupport[i] = false;
+ locals->ViewportSizeSupport[i][0] = false;
}
}
}
@@ -4183,8 +4190,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.DSCFormatFactor = 1;
}
if (locals->RequiresDSC[i][k] == true) {
- if (locals->ODMCombineEnablePerState[i][k]
- == true) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
if (mode_lib->vba.PixelClockBackEnd[k] / 6.0 / mode_lib->vba.DSCFormatFactor
> (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i]) {
locals->DSCCLKRequiredMoreThanSupported[i] =
@@ -4207,7 +4213,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.TotalDSCUnitsRequired = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (locals->RequiresDSC[i][k] == true) {
- if (locals->ODMCombineEnablePerState[i][k] == true) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
mode_lib->vba.TotalDSCUnitsRequired =
mode_lib->vba.TotalDSCUnitsRequired + 2.0;
} else {
@@ -4249,7 +4255,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.bpp = locals->OutputBppPerState[i][k];
}
if (locals->RequiresDSC[i][k] == true && mode_lib->vba.bpp != 0.0) {
- if (locals->ODMCombineEnablePerState[i][k] == false) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
locals->DSCDelayPerState[i][k] =
dscceComputeDelay(
mode_lib->vba.DSCInputBitPerComponent[k],
@@ -4292,7 +4298,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
for (j = 0; j < 2; j++) {
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (locals->ODMCombineEnablePerState[i][k] == true)
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1)
locals->SwathWidthYPerState[i][j][k] = dml_min(locals->SwathWidthYSingleDPP[k], dml_round(locals->HActive[k] / 2 * locals->HRatio[k]));
else
locals->SwathWidthYPerState[i][j][k] = locals->SwathWidthYSingleDPP[k] / locals->NoOfDPP[i][j][k];
@@ -4345,28 +4351,28 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
locals->EffectiveDETLBLinesLuma = dml_floor(locals->LinesInDETLuma + dml_min(
locals->LinesInDETLuma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETY[k] *
- locals->PSCL_FACTOR[k] / locals->ReturnBWPerState[i],
+ locals->PSCL_FACTOR[k] / locals->ReturnBWPerState[i][0],
locals->EffectiveLBLatencyHidingSourceLinesLuma),
locals->SwathHeightYPerState[i][j][k]);
locals->EffectiveDETLBLinesChroma = dml_floor(locals->LinesInDETChroma + dml_min(
locals->LinesInDETChroma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETC[k] *
- locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i],
+ locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i][0],
locals->EffectiveLBLatencyHidingSourceLinesChroma),
locals->SwathHeightCPerState[i][j][k]);
if (locals->BytePerPixelInDETC[k] == 0) {
locals->UrgentLatencySupportUsPerState[i][j][k] = locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k])
/ locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] *
- dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k]);
+ dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]);
} else {
locals->UrgentLatencySupportUsPerState[i][j][k] = dml_min(
locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k])
/ locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] *
- dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k]),
+ dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]),
locals->EffectiveDETLBLinesChroma * (locals->HTotal[k] / locals->PixelClock[k]) / (locals->VRatio[k] / 2) -
locals->EffectiveDETLBLinesChroma * locals->SwathWidthYPerState[i][j][k] / 2 *
- dml_ceil(locals->BytePerPixelInDETC[k], 2) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k]));
+ dml_ceil(locals->BytePerPixelInDETC[k], 2) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]));
}
}
}
@@ -4406,14 +4412,14 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
locals->SwathHeightYThisState[k] = locals->SwathHeightYPerState[i][j][k];
locals->SwathHeightCThisState[k] = locals->SwathHeightCPerState[i][j][k];
locals->SwathWidthYThisState[k] = locals->SwathWidthYPerState[i][j][k];
- mode_lib->vba.ProjectedDCFCLKDeepSleep = dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max(
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
mode_lib->vba.PixelClock[k] / 16.0);
if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {
if (mode_lib->vba.VRatio[k] <= 1.0) {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETY[k],
@@ -4423,9 +4429,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
* mode_lib->vba.PixelClock[k]
/ mode_lib->vba.NoOfDPP[i][j][k]);
} else {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETY[k],
@@ -4436,9 +4442,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
} else {
if (mode_lib->vba.VRatio[k] <= 1.0) {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETY[k],
@@ -4448,9 +4454,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
* mode_lib->vba.PixelClock[k]
/ mode_lib->vba.NoOfDPP[i][j][k]);
} else {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETY[k],
@@ -4460,9 +4466,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
* mode_lib->vba.RequiredDPPCLK[i][j][k]);
}
if (mode_lib->vba.VRatio[k] / 2.0 <= 1.0) {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETC[k],
@@ -4473,9 +4479,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
* mode_lib->vba.PixelClock[k]
/ mode_lib->vba.NoOfDPP[i][j][k]);
} else {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETC[k],
@@ -4511,7 +4517,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
&mode_lib->vba.PTEBufferSizeNotExceededY[i][j][k],
&mode_lib->vba.dpte_row_height[k],
&mode_lib->vba.meta_row_height[k]);
- mode_lib->vba.PrefetchLinesY[k] = CalculatePrefetchSourceLines(
+ mode_lib->vba.PrefetchLinesY[0][0][k] = CalculatePrefetchSourceLines(
mode_lib,
mode_lib->vba.VRatio[k],
mode_lib->vba.vtaps[k],
@@ -4550,7 +4556,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
&mode_lib->vba.PTEBufferSizeNotExceededC[i][j][k],
&mode_lib->vba.dpte_row_height_chroma[k],
&mode_lib->vba.meta_row_height_chroma[k]);
- mode_lib->vba.PrefetchLinesC[k] = CalculatePrefetchSourceLines(
+ mode_lib->vba.PrefetchLinesC[0][0][k] = CalculatePrefetchSourceLines(
mode_lib,
mode_lib->vba.VRatio[k] / 2.0,
mode_lib->vba.VTAPsChroma[k],
@@ -4564,14 +4570,14 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = 0.0;
mode_lib->vba.MetaRowBytesC = 0.0;
mode_lib->vba.DPTEBytesPerRowC = 0.0;
- locals->PrefetchLinesC[k] = 0.0;
+ locals->PrefetchLinesC[0][0][k] = 0.0;
locals->PTEBufferSizeNotExceededC[i][j][k] = true;
locals->PTEBufferSizeInRequestsForLuma = mode_lib->vba.PTEBufferSizeInRequestsLuma + mode_lib->vba.PTEBufferSizeInRequestsChroma;
}
- locals->PDEAndMetaPTEBytesPerFrame[k] =
+ locals->PDEAndMetaPTEBytesPerFrame[0][0][k] =
mode_lib->vba.PDEAndMetaPTEBytesPerFrameY + mode_lib->vba.PDEAndMetaPTEBytesPerFrameC;
- locals->MetaRowBytes[k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC;
- locals->DPTEBytesPerRow[k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC;
+ locals->MetaRowBytes[0][0][k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC;
+ locals->DPTEBytesPerRow[0][0][k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC;
CalculateActiveRowBandwidth(
mode_lib->vba.GPUVMEnable,
@@ -4598,14 +4604,14 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ mode_lib->vba.TotalNumberOfDCCActiveDPP[i][j]
* mode_lib->vba.MetaChunkSize)
* 1024.0
- / mode_lib->vba.ReturnBWPerState[i];
+ / mode_lib->vba.ReturnBWPerState[i][0];
if (mode_lib->vba.GPUVMEnable == true) {
mode_lib->vba.ExtraLatency = mode_lib->vba.ExtraLatency
+ mode_lib->vba.TotalNumberOfActiveDPP[i][j]
* mode_lib->vba.PTEGroupSize
- / mode_lib->vba.ReturnBWPerState[i];
+ / mode_lib->vba.ReturnBWPerState[i][0];
}
- mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep;
+ mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0];
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
@@ -4655,7 +4661,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- locals->MaximumVStartup[k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
+ locals->MaximumVStartup[0][0][k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
- dml_max(1.0, dml_ceil(locals->WritebackDelay[i][k] / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1.0));
}
@@ -4700,7 +4706,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.RequiredDPPCLK[i][j][k],
mode_lib->vba.RequiredDISPCLK[i][j],
mode_lib->vba.PixelClock[k],
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
mode_lib->vba.DSCDelayPerState[i][k],
mode_lib->vba.NoOfDPP[i][j][k],
mode_lib->vba.ScalerEnabled[k],
@@ -4718,7 +4724,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
- mode_lib->vba.VActive[k],
mode_lib->vba.HTotal[k],
mode_lib->vba.MaxInterDCNTileRepeaters,
- mode_lib->vba.MaximumVStartup[k],
+ mode_lib->vba.MaximumVStartup[0][0][k],
mode_lib->vba.GPUVMMaxPageTableLevels,
mode_lib->vba.GPUVMEnable,
mode_lib->vba.DynamicMetadataEnable[k],
@@ -4728,15 +4734,15 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.UrgentLatencyPixelDataOnly,
mode_lib->vba.ExtraLatency,
mode_lib->vba.TimeCalc,
- mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k],
- mode_lib->vba.MetaRowBytes[k],
- mode_lib->vba.DPTEBytesPerRow[k],
- mode_lib->vba.PrefetchLinesY[k],
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k],
+ mode_lib->vba.MetaRowBytes[0][0][k],
+ mode_lib->vba.DPTEBytesPerRow[0][0][k],
+ mode_lib->vba.PrefetchLinesY[0][0][k],
mode_lib->vba.SwathWidthYPerState[i][j][k],
mode_lib->vba.BytePerPixelInDETY[k],
mode_lib->vba.PrefillY[k],
mode_lib->vba.MaxNumSwY[k],
- mode_lib->vba.PrefetchLinesC[k],
+ mode_lib->vba.PrefetchLinesC[0][0][k],
mode_lib->vba.BytePerPixelInDETC[k],
mode_lib->vba.PrefillC[k],
mode_lib->vba.MaxNumSwC[k],
@@ -4767,19 +4773,19 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
locals->prefetch_vm_bw_valid = true;
locals->prefetch_row_bw_valid = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (locals->PDEAndMetaPTEBytesPerFrame[k] == 0)
+ if (locals->PDEAndMetaPTEBytesPerFrame[0][0][k] == 0)
locals->prefetch_vm_bw[k] = 0;
else if (locals->LinesForMetaPTE[k] > 0)
- locals->prefetch_vm_bw[k] = locals->PDEAndMetaPTEBytesPerFrame[k]
+ locals->prefetch_vm_bw[k] = locals->PDEAndMetaPTEBytesPerFrame[0][0][k]
/ (locals->LinesForMetaPTE[k] * locals->HTotal[k] / locals->PixelClock[k]);
else {
locals->prefetch_vm_bw[k] = 0;
locals->prefetch_vm_bw_valid = false;
}
- if (locals->MetaRowBytes[k] + locals->DPTEBytesPerRow[k] == 0)
+ if (locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k] == 0)
locals->prefetch_row_bw[k] = 0;
else if (locals->LinesForMetaAndDPTERow[k] > 0)
- locals->prefetch_row_bw[k] = (locals->MetaRowBytes[k] + locals->DPTEBytesPerRow[k])
+ locals->prefetch_row_bw[k] = (locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k])
/ (locals->LinesForMetaAndDPTERow[k] * locals->HTotal[k] / locals->PixelClock[k]);
else {
locals->prefetch_row_bw[k] = 0;
@@ -4798,13 +4804,13 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.RequiredPrefetchPixelDataBWLuma[i][j][k])
+ mode_lib->vba.meta_row_bw[k] + mode_lib->vba.dpte_row_bw[k]);
}
- locals->BandwidthWithoutPrefetchSupported[i] = true;
- if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i]) {
- locals->BandwidthWithoutPrefetchSupported[i] = false;
+ locals->BandwidthWithoutPrefetchSupported[i][0] = true;
+ if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i][0]) {
+ locals->BandwidthWithoutPrefetchSupported[i][0] = false;
}
locals->PrefetchSupported[i][j] = true;
- if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i]) {
+ if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i][0]) {
locals->PrefetchSupported[i][j] = false;
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
@@ -4829,7 +4835,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
if (mode_lib->vba.PrefetchSupported[i][j] == true
&& mode_lib->vba.VRatioInPrefetchSupported[i][j] == true) {
mode_lib->vba.BandwidthAvailableForImmediateFlip =
- mode_lib->vba.ReturnBWPerState[i];
+ mode_lib->vba.ReturnBWPerState[i][0];
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
mode_lib->vba.BandwidthAvailableForImmediateFlip =
mode_lib->vba.BandwidthAvailableForImmediateFlip
@@ -4843,9 +4849,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
&& mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
mode_lib->vba.ImmediateFlipBytes[k] =
- mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k]
- + mode_lib->vba.MetaRowBytes[k]
- + mode_lib->vba.DPTEBytesPerRow[k];
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k]
+ + mode_lib->vba.MetaRowBytes[0][0][k]
+ + mode_lib->vba.DPTEBytesPerRow[0][0][k];
}
}
mode_lib->vba.TotImmediateFlipBytes = 0.0;
@@ -4873,9 +4879,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
/ mode_lib->vba.PixelClock[k],
mode_lib->vba.VRatio[k],
mode_lib->vba.Tno_bw[k],
- mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k],
- mode_lib->vba.MetaRowBytes[k],
- mode_lib->vba.DPTEBytesPerRow[k],
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k],
+ mode_lib->vba.MetaRowBytes[0][0][k],
+ mode_lib->vba.DPTEBytesPerRow[0][0][k],
mode_lib->vba.DCCEnable[k],
mode_lib->vba.dpte_row_height[k],
mode_lib->vba.meta_row_height[k],
@@ -4900,7 +4906,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
mode_lib->vba.ImmediateFlipSupportedForState[i][j] = true;
if (mode_lib->vba.total_dcn_read_bw_with_flip
- > mode_lib->vba.ReturnBWPerState[i]) {
+ > mode_lib->vba.ReturnBWPerState[i][0]) {
mode_lib->vba.ImmediateFlipSupportedForState[i][j] = false;
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
@@ -4919,13 +4925,13 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; k++)
mode_lib->vba.MaxTotalVActiveRDBandwidth = mode_lib->vba.MaxTotalVActiveRDBandwidth + mode_lib->vba.ReadBandwidth[k];
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
- mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i] = dml_min(mode_lib->vba.ReturnBusWidth *
+ mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][0] = dml_min(mode_lib->vba.ReturnBusWidth *
mode_lib->vba.DCFCLKPerState[i], mode_lib->vba.FabricAndDRAMBandwidthPerState[i] * 1000) *
mode_lib->vba.MaxAveragePercentOfIdealDRAMBWDisplayCanUseInNormalSystemOperation / 100;
- if (mode_lib->vba.MaxTotalVActiveRDBandwidth <= mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i])
- mode_lib->vba.TotalVerticalActiveBandwidthSupport[i] = true;
+ if (mode_lib->vba.MaxTotalVActiveRDBandwidth <= mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][0])
+ mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] = true;
else
- mode_lib->vba.TotalVerticalActiveBandwidthSupport[i] = false;
+ mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] = false;
}
/*PTE Buffer Size Check*/
@@ -5013,7 +5019,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
status = DML_FAIL_SCALE_RATIO_TAP;
} else if (mode_lib->vba.SourceFormatPixelAndScanSupport != true) {
status = DML_FAIL_SOURCE_PIXEL_FORMAT;
- } else if (locals->ViewportSizeSupport[i] != true) {
+ } else if (locals->ViewportSizeSupport[i][0] != true) {
status = DML_FAIL_VIEWPORT_SIZE;
} else if (locals->DIOSupport[i] != true) {
status = DML_FAIL_DIO_SUPPORT;
@@ -5023,7 +5029,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
status = DML_FAIL_DSC_CLK_REQUIRED;
} else if (locals->UrgentLatencySupport[i][j] != true) {
status = DML_FAIL_URGENT_LATENCY;
- } else if (locals->ROBSupport[i] != true) {
+ } else if (locals->ROBSupport[i][0] != true) {
status = DML_FAIL_REORDERING_BUFFER;
} else if (locals->DISPCLK_DPPCLK_Support[i][j] != true) {
status = DML_FAIL_DISPCLK_DPPCLK;
@@ -5043,7 +5049,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
status = DML_FAIL_PITCH_SUPPORT;
} else if (locals->PrefetchSupported[i][j] != true) {
status = DML_FAIL_PREFETCH_SUPPORT;
- } else if (locals->TotalVerticalActiveBandwidthSupport[i] != true) {
+ } else if (locals->TotalVerticalActiveBandwidthSupport[i][0] != true) {
status = DML_FAIL_TOTAL_V_ACTIVE_BW;
} else if (locals->VRatioInPrefetchSupported[i][j] != true) {
status = DML_FAIL_V_RATIO_PREFETCH;
@@ -5089,7 +5095,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.DRAMSpeed = mode_lib->vba.DRAMSpeedPerState[mode_lib->vba.VoltageLevel];
mode_lib->vba.FabricClock = mode_lib->vba.FabricClockPerState[mode_lib->vba.VoltageLevel];
mode_lib->vba.SOCCLK = mode_lib->vba.SOCCLKPerState[mode_lib->vba.VoltageLevel];
- mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel];
+ mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel][0];
mode_lib->vba.FabricAndDRAMBandwidth = locals->FabricAndDRAMBandwidthPerState[mode_lib->vba.VoltageLevel];
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index 6482d7b99bae..485a9c62ec58 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -39,6 +39,7 @@
#define BPP_INVALID 0
#define BPP_BLENDED_PIPE 0xffffffff
#define DCN20_MAX_DSC_IMAGE_WIDTH 5184
+#define DCN20_MAX_420_IMAGE_WIDTH 4096
static double adjust_ReturnBW(
struct display_mode_lib *mode_lib,
@@ -997,7 +998,7 @@ static unsigned int CalculateVMAndRowBytes(
*MetaRowByte = 0;
}
- if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_lvp) {
+ if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_l_vp) {
MacroTileSizeBytes = 256;
MacroTileHeight = BlockHeight256Bytes;
} else if (SurfaceTiling == dm_sw_4kb_s || SurfaceTiling == dm_sw_4kb_s_x
@@ -1395,11 +1396,11 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
else
mode_lib->vba.SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportHeight[k];
- if (mode_lib->vba.ODMCombineEnabled[k] == true)
+ if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)
MainPlaneDoesODMCombine = true;
for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
if (mode_lib->vba.BlendingAndTiming[k] == j
- && mode_lib->vba.ODMCombineEnabled[j] == true)
+ && mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)
MainPlaneDoesODMCombine = true;
if (MainPlaneDoesODMCombine == true)
@@ -2885,12 +2886,12 @@ static void dml20v2_DisplayPipeConfiguration(struct display_mode_lib *mode_lib)
SwathWidth = mode_lib->vba.ViewportHeight[k];
}
- if (mode_lib->vba.ODMCombineEnabled[k] == true) {
+ if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
MainPlaneDoesODMCombine = true;
}
for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
if (mode_lib->vba.BlendingAndTiming[k] == j
- && mode_lib->vba.ODMCombineEnabled[j] == true) {
+ && mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
MainPlaneDoesODMCombine = true;
}
}
@@ -3385,7 +3386,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
== dm_420_10))
|| (((mode_lib->vba.SurfaceTiling[k] == dm_sw_gfx7_2d_thin_gl
|| mode_lib->vba.SurfaceTiling[k]
- == dm_sw_gfx7_2d_thin_lvp)
+ == dm_sw_gfx7_2d_thin_l_vp)
&& !((mode_lib->vba.SourcePixelFormat[k]
== dm_444_64
|| mode_lib->vba.SourcePixelFormat[k]
@@ -3483,10 +3484,10 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
locals->FabricAndDRAMBandwidthPerState[i] * 1000)
* locals->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly / 100;
- locals->ReturnBWPerState[i] = locals->ReturnBWToDCNPerState;
+ locals->ReturnBWPerState[i][0] = locals->ReturnBWToDCNPerState;
if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) {
- locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i],
+ locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency /
((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
/ (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i]
@@ -3497,7 +3498,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
+ (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024);
if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) {
- locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i],
+ locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
4 * locals->ReturnBWToDCNPerState *
(locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
* locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency /
@@ -3509,7 +3510,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
locals->DCFCLKPerState[i], locals->FabricAndDRAMBandwidthPerState[i] * 1000);
if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) {
- locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i],
+ locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency /
((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
/ (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i]
@@ -3520,7 +3521,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
+ (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024);
if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) {
- locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i],
+ locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0],
4 * locals->ReturnBWToDCNPerState *
(locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024
* locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency /
@@ -3558,12 +3559,12 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i] =
(mode_lib->vba.RoundTripPingLatencyCycles + 32.0) / mode_lib->vba.DCFCLKPerState[i]
- + locals->UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i];
- if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i]
+ + locals->UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i][0];
+ if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i][0]
> locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i]) {
- locals->ROBSupport[i] = true;
+ locals->ROBSupport[i][0] = true;
} else {
- locals->ROBSupport[i] = false;
+ locals->ROBSupport[i][0] = false;
}
}
/*Writeback Mode Support Check*/
@@ -3935,18 +3936,25 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
&& i == mode_lib->vba.soc.num_states)
mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
- if (mode_lib->vba.ODMCapability == false ||
- (locals->PlaneRequiredDISPCLKWithoutODMCombine <= MaxMaxDispclkRoundedDown
- && (!locals->DSCEnabled[k] || locals->HActive[k] <= DCN20_MAX_DSC_IMAGE_WIDTH))) {
- locals->ODMCombineEnablePerState[i][k] = false;
- mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
- } else {
- locals->ODMCombineEnablePerState[i][k] = true;
- mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+
+ locals->ODMCombineEnablePerState[i][k] = false;
+ mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
+ if (mode_lib->vba.ODMCapability) {
+ if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) {
+ locals->ODMCombineEnablePerState[i][k] = true;
+ mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+ } else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN20_MAX_DSC_IMAGE_WIDTH)) {
+ locals->ODMCombineEnablePerState[i][k] = true;
+ mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+ } else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) {
+ locals->ODMCombineEnablePerState[i][k] = true;
+ mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+ }
}
+
if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity
&& locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]
- && locals->ODMCombineEnablePerState[i][k] == false) {
+ && locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
locals->NoOfDPP[i][j][k] = 1;
locals->RequiredDPPCLK[i][j][k] =
locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
@@ -4035,16 +4043,16 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
/*Viewport Size Check*/
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
- locals->ViewportSizeSupport[i] = true;
+ locals->ViewportSizeSupport[i][0] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (locals->ODMCombineEnablePerState[i][k] == true) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
if (dml_min(locals->SwathWidthYSingleDPP[k], dml_round(mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]))
> locals->MaximumSwathWidth[k]) {
- locals->ViewportSizeSupport[i] = false;
+ locals->ViewportSizeSupport[i][0] = false;
}
} else {
if (locals->SwathWidthYSingleDPP[k] / 2.0 > locals->MaximumSwathWidth[k]) {
- locals->ViewportSizeSupport[i] = false;
+ locals->ViewportSizeSupport[i][0] = false;
}
}
}
@@ -4226,8 +4234,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.DSCFormatFactor = 1;
}
if (locals->RequiresDSC[i][k] == true) {
- if (locals->ODMCombineEnablePerState[i][k]
- == true) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
if (mode_lib->vba.PixelClockBackEnd[k] / 6.0 / mode_lib->vba.DSCFormatFactor
> (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i]) {
locals->DSCCLKRequiredMoreThanSupported[i] =
@@ -4250,7 +4257,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.TotalDSCUnitsRequired = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (locals->RequiresDSC[i][k] == true) {
- if (locals->ODMCombineEnablePerState[i][k] == true) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
mode_lib->vba.TotalDSCUnitsRequired =
mode_lib->vba.TotalDSCUnitsRequired + 2.0;
} else {
@@ -4292,7 +4299,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.bpp = locals->OutputBppPerState[i][k];
}
if (locals->RequiresDSC[i][k] == true && mode_lib->vba.bpp != 0.0) {
- if (locals->ODMCombineEnablePerState[i][k] == false) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
locals->DSCDelayPerState[i][k] =
dscceComputeDelay(
mode_lib->vba.DSCInputBitPerComponent[k],
@@ -4335,7 +4342,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
for (j = 0; j < 2; j++) {
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (locals->ODMCombineEnablePerState[i][k] == true)
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1)
locals->SwathWidthYPerState[i][j][k] = dml_min(locals->SwathWidthYSingleDPP[k], dml_round(locals->HActive[k] / 2 * locals->HRatio[k]));
else
locals->SwathWidthYPerState[i][j][k] = locals->SwathWidthYSingleDPP[k] / locals->NoOfDPP[i][j][k];
@@ -4388,28 +4395,28 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
locals->EffectiveDETLBLinesLuma = dml_floor(locals->LinesInDETLuma + dml_min(
locals->LinesInDETLuma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETY[k] *
- locals->PSCL_FACTOR[k] / locals->ReturnBWPerState[i],
+ locals->PSCL_FACTOR[k] / locals->ReturnBWPerState[i][0],
locals->EffectiveLBLatencyHidingSourceLinesLuma),
locals->SwathHeightYPerState[i][j][k]);
locals->EffectiveDETLBLinesChroma = dml_floor(locals->LinesInDETChroma + dml_min(
locals->LinesInDETChroma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETC[k] *
- locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i],
+ locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i][0],
locals->EffectiveLBLatencyHidingSourceLinesChroma),
locals->SwathHeightCPerState[i][j][k]);
if (locals->BytePerPixelInDETC[k] == 0) {
locals->UrgentLatencySupportUsPerState[i][j][k] = locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k])
/ locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] *
- dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k]);
+ dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]);
} else {
locals->UrgentLatencySupportUsPerState[i][j][k] = dml_min(
locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k])
/ locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] *
- dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k]),
+ dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]),
locals->EffectiveDETLBLinesChroma * (locals->HTotal[k] / locals->PixelClock[k]) / (locals->VRatio[k] / 2) -
locals->EffectiveDETLBLinesChroma * locals->SwathWidthYPerState[i][j][k] / 2 *
- dml_ceil(locals->BytePerPixelInDETC[k], 2) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k]));
+ dml_ceil(locals->BytePerPixelInDETC[k], 2) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]));
}
}
}
@@ -4454,14 +4461,14 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
locals->SwathHeightYThisState[k] = locals->SwathHeightYPerState[i][j][k];
locals->SwathHeightCThisState[k] = locals->SwathHeightCPerState[i][j][k];
locals->SwathWidthYThisState[k] = locals->SwathWidthYPerState[i][j][k];
- mode_lib->vba.ProjectedDCFCLKDeepSleep = dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max(
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
mode_lib->vba.PixelClock[k] / 16.0);
if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {
if (mode_lib->vba.VRatio[k] <= 1.0) {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETY[k],
@@ -4471,9 +4478,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
* mode_lib->vba.PixelClock[k]
/ mode_lib->vba.NoOfDPP[i][j][k]);
} else {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETY[k],
@@ -4484,9 +4491,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
}
} else {
if (mode_lib->vba.VRatio[k] <= 1.0) {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETY[k],
@@ -4496,9 +4503,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
* mode_lib->vba.PixelClock[k]
/ mode_lib->vba.NoOfDPP[i][j][k]);
} else {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETY[k],
@@ -4508,9 +4515,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
* mode_lib->vba.RequiredDPPCLK[i][j][k]);
}
if (mode_lib->vba.VRatio[k] / 2.0 <= 1.0) {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETC[k],
@@ -4521,9 +4528,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
* mode_lib->vba.PixelClock[k]
/ mode_lib->vba.NoOfDPP[i][j][k]);
} else {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] =
dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
1.1
* dml_ceil(
mode_lib->vba.BytePerPixelInDETC[k],
@@ -4559,7 +4566,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
&mode_lib->vba.PTEBufferSizeNotExceededY[i][j][k],
&mode_lib->vba.dpte_row_height[k],
&mode_lib->vba.meta_row_height[k]);
- mode_lib->vba.PrefetchLinesY[k] = CalculatePrefetchSourceLines(
+ mode_lib->vba.PrefetchLinesY[0][0][k] = CalculatePrefetchSourceLines(
mode_lib,
mode_lib->vba.VRatio[k],
mode_lib->vba.vtaps[k],
@@ -4598,7 +4605,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
&mode_lib->vba.PTEBufferSizeNotExceededC[i][j][k],
&mode_lib->vba.dpte_row_height_chroma[k],
&mode_lib->vba.meta_row_height_chroma[k]);
- mode_lib->vba.PrefetchLinesC[k] = CalculatePrefetchSourceLines(
+ mode_lib->vba.PrefetchLinesC[0][0][k] = CalculatePrefetchSourceLines(
mode_lib,
mode_lib->vba.VRatio[k] / 2.0,
mode_lib->vba.VTAPsChroma[k],
@@ -4612,14 +4619,14 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = 0.0;
mode_lib->vba.MetaRowBytesC = 0.0;
mode_lib->vba.DPTEBytesPerRowC = 0.0;
- locals->PrefetchLinesC[k] = 0.0;
+ locals->PrefetchLinesC[0][0][k] = 0.0;
locals->PTEBufferSizeNotExceededC[i][j][k] = true;
locals->PTEBufferSizeInRequestsForLuma = mode_lib->vba.PTEBufferSizeInRequestsLuma + mode_lib->vba.PTEBufferSizeInRequestsChroma;
}
- locals->PDEAndMetaPTEBytesPerFrame[k] =
+ locals->PDEAndMetaPTEBytesPerFrame[0][0][k] =
mode_lib->vba.PDEAndMetaPTEBytesPerFrameY + mode_lib->vba.PDEAndMetaPTEBytesPerFrameC;
- locals->MetaRowBytes[k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC;
- locals->DPTEBytesPerRow[k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC;
+ locals->MetaRowBytes[0][0][k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC;
+ locals->DPTEBytesPerRow[0][0][k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC;
CalculateActiveRowBandwidth(
mode_lib->vba.GPUVMEnable,
@@ -4646,14 +4653,14 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
+ mode_lib->vba.TotalNumberOfDCCActiveDPP[i][j]
* mode_lib->vba.MetaChunkSize)
* 1024.0
- / mode_lib->vba.ReturnBWPerState[i];
+ / mode_lib->vba.ReturnBWPerState[i][0];
if (mode_lib->vba.GPUVMEnable == true) {
mode_lib->vba.ExtraLatency = mode_lib->vba.ExtraLatency
+ mode_lib->vba.TotalNumberOfActiveDPP[i][j]
* mode_lib->vba.PTEGroupSize
- / mode_lib->vba.ReturnBWPerState[i];
+ / mode_lib->vba.ReturnBWPerState[i][0];
}
- mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep;
+ mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0];
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
@@ -4703,7 +4710,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- locals->MaximumVStartup[k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
+ locals->MaximumVStartup[0][0][k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
- dml_max(1.0, dml_ceil(locals->WritebackDelay[i][k] / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1.0));
}
@@ -4743,7 +4750,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0.0;
}
- CalculateDelayAfterScaler(mode_lib, mode_lib->vba.ReturnBWPerState[i], mode_lib->vba.ReadBandwidthLuma[k], mode_lib->vba.ReadBandwidthChroma[k], mode_lib->vba.MaxTotalVActiveRDBandwidth,
+ CalculateDelayAfterScaler(mode_lib, mode_lib->vba.ReturnBWPerState[i][0], mode_lib->vba.ReadBandwidthLuma[k], mode_lib->vba.ReadBandwidthChroma[k], mode_lib->vba.MaxTotalVActiveRDBandwidth,
mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k], mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k],
mode_lib->vba.RequiredDPPCLK[i][j][k], mode_lib->vba.RequiredDISPCLK[i][j], mode_lib->vba.PixelClock[k], mode_lib->vba.DSCDelayPerState[i][k], mode_lib->vba.NoOfDPP[i][j][k], mode_lib->vba.ScalerEnabled[k], mode_lib->vba.NumberOfCursors[k],
mode_lib->vba.DPPCLKDelaySubtotal, mode_lib->vba.DPPCLKDelaySCL, mode_lib->vba.DPPCLKDelaySCLLBOnly, mode_lib->vba.DPPCLKDelayCNVCFormater, mode_lib->vba.DPPCLKDelayCNVCCursor, mode_lib->vba.DISPCLKDelaySubtotal,
@@ -4757,14 +4764,14 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.RequiredDPPCLK[i][j][k],
mode_lib->vba.RequiredDISPCLK[i][j],
mode_lib->vba.PixelClock[k],
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
mode_lib->vba.NoOfDPP[i][j][k],
mode_lib->vba.NumberOfCursors[k],
mode_lib->vba.VTotal[k]
- mode_lib->vba.VActive[k],
mode_lib->vba.HTotal[k],
mode_lib->vba.MaxInterDCNTileRepeaters,
- mode_lib->vba.MaximumVStartup[k],
+ mode_lib->vba.MaximumVStartup[0][0][k],
mode_lib->vba.GPUVMMaxPageTableLevels,
mode_lib->vba.GPUVMEnable,
mode_lib->vba.DynamicMetadataEnable[k],
@@ -4774,15 +4781,15 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.UrgentLatencyPixelDataOnly,
mode_lib->vba.ExtraLatency,
mode_lib->vba.TimeCalc,
- mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k],
- mode_lib->vba.MetaRowBytes[k],
- mode_lib->vba.DPTEBytesPerRow[k],
- mode_lib->vba.PrefetchLinesY[k],
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k],
+ mode_lib->vba.MetaRowBytes[0][0][k],
+ mode_lib->vba.DPTEBytesPerRow[0][0][k],
+ mode_lib->vba.PrefetchLinesY[0][0][k],
mode_lib->vba.SwathWidthYPerState[i][j][k],
mode_lib->vba.BytePerPixelInDETY[k],
mode_lib->vba.PrefillY[k],
mode_lib->vba.MaxNumSwY[k],
- mode_lib->vba.PrefetchLinesC[k],
+ mode_lib->vba.PrefetchLinesC[0][0][k],
mode_lib->vba.BytePerPixelInDETC[k],
mode_lib->vba.PrefillC[k],
mode_lib->vba.MaxNumSwC[k],
@@ -4812,19 +4819,19 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
locals->prefetch_vm_bw_valid = true;
locals->prefetch_row_bw_valid = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (locals->PDEAndMetaPTEBytesPerFrame[k] == 0)
+ if (locals->PDEAndMetaPTEBytesPerFrame[0][0][k] == 0)
locals->prefetch_vm_bw[k] = 0;
else if (locals->LinesForMetaPTE[k] > 0)
- locals->prefetch_vm_bw[k] = locals->PDEAndMetaPTEBytesPerFrame[k]
+ locals->prefetch_vm_bw[k] = locals->PDEAndMetaPTEBytesPerFrame[0][0][k]
/ (locals->LinesForMetaPTE[k] * locals->HTotal[k] / locals->PixelClock[k]);
else {
locals->prefetch_vm_bw[k] = 0;
locals->prefetch_vm_bw_valid = false;
}
- if (locals->MetaRowBytes[k] + locals->DPTEBytesPerRow[k] == 0)
+ if (locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k] == 0)
locals->prefetch_row_bw[k] = 0;
else if (locals->LinesForMetaAndDPTERow[k] > 0)
- locals->prefetch_row_bw[k] = (locals->MetaRowBytes[k] + locals->DPTEBytesPerRow[k])
+ locals->prefetch_row_bw[k] = (locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k])
/ (locals->LinesForMetaAndDPTERow[k] * locals->HTotal[k] / locals->PixelClock[k]);
else {
locals->prefetch_row_bw[k] = 0;
@@ -4843,13 +4850,13 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.RequiredPrefetchPixelDataBWLuma[i][j][k])
+ mode_lib->vba.meta_row_bw[k] + mode_lib->vba.dpte_row_bw[k]);
}
- locals->BandwidthWithoutPrefetchSupported[i] = true;
- if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i]) {
- locals->BandwidthWithoutPrefetchSupported[i] = false;
+ locals->BandwidthWithoutPrefetchSupported[i][0] = true;
+ if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i][0]) {
+ locals->BandwidthWithoutPrefetchSupported[i][0] = false;
}
locals->PrefetchSupported[i][j] = true;
- if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i]) {
+ if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i][0]) {
locals->PrefetchSupported[i][j] = false;
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
@@ -4874,7 +4881,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
if (mode_lib->vba.PrefetchSupported[i][j] == true
&& mode_lib->vba.VRatioInPrefetchSupported[i][j] == true) {
mode_lib->vba.BandwidthAvailableForImmediateFlip =
- mode_lib->vba.ReturnBWPerState[i];
+ mode_lib->vba.ReturnBWPerState[i][0];
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
mode_lib->vba.BandwidthAvailableForImmediateFlip =
mode_lib->vba.BandwidthAvailableForImmediateFlip
@@ -4888,9 +4895,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
&& mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
mode_lib->vba.ImmediateFlipBytes[k] =
- mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k]
- + mode_lib->vba.MetaRowBytes[k]
- + mode_lib->vba.DPTEBytesPerRow[k];
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k]
+ + mode_lib->vba.MetaRowBytes[0][0][k]
+ + mode_lib->vba.DPTEBytesPerRow[0][0][k];
}
}
mode_lib->vba.TotImmediateFlipBytes = 0.0;
@@ -4918,9 +4925,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
/ mode_lib->vba.PixelClock[k],
mode_lib->vba.VRatio[k],
mode_lib->vba.Tno_bw[k],
- mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k],
- mode_lib->vba.MetaRowBytes[k],
- mode_lib->vba.DPTEBytesPerRow[k],
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k],
+ mode_lib->vba.MetaRowBytes[0][0][k],
+ mode_lib->vba.DPTEBytesPerRow[0][0][k],
mode_lib->vba.DCCEnable[k],
mode_lib->vba.dpte_row_height[k],
mode_lib->vba.meta_row_height[k],
@@ -4945,7 +4952,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
}
mode_lib->vba.ImmediateFlipSupportedForState[i][j] = true;
if (mode_lib->vba.total_dcn_read_bw_with_flip
- > mode_lib->vba.ReturnBWPerState[i]) {
+ > mode_lib->vba.ReturnBWPerState[i][0]) {
mode_lib->vba.ImmediateFlipSupportedForState[i][j] = false;
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
@@ -4961,13 +4968,13 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
/*Vertical Active BW support*/
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
- mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i] = dml_min(mode_lib->vba.ReturnBusWidth *
+ mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][0] = dml_min(mode_lib->vba.ReturnBusWidth *
mode_lib->vba.DCFCLKPerState[i], mode_lib->vba.FabricAndDRAMBandwidthPerState[i] * 1000) *
mode_lib->vba.MaxAveragePercentOfIdealDRAMBWDisplayCanUseInNormalSystemOperation / 100;
- if (mode_lib->vba.MaxTotalVActiveRDBandwidth <= mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i])
- mode_lib->vba.TotalVerticalActiveBandwidthSupport[i] = true;
+ if (mode_lib->vba.MaxTotalVActiveRDBandwidth <= mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][0])
+ mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] = true;
else
- mode_lib->vba.TotalVerticalActiveBandwidthSupport[i] = false;
+ mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] = false;
}
/*PTE Buffer Size Check*/
@@ -5055,7 +5062,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
status = DML_FAIL_SCALE_RATIO_TAP;
} else if (mode_lib->vba.SourceFormatPixelAndScanSupport != true) {
status = DML_FAIL_SOURCE_PIXEL_FORMAT;
- } else if (locals->ViewportSizeSupport[i] != true) {
+ } else if (locals->ViewportSizeSupport[i][0] != true) {
status = DML_FAIL_VIEWPORT_SIZE;
} else if (locals->DIOSupport[i] != true) {
status = DML_FAIL_DIO_SUPPORT;
@@ -5065,7 +5072,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
status = DML_FAIL_DSC_CLK_REQUIRED;
} else if (locals->UrgentLatencySupport[i][j] != true) {
status = DML_FAIL_URGENT_LATENCY;
- } else if (locals->ROBSupport[i] != true) {
+ } else if (locals->ROBSupport[i][0] != true) {
status = DML_FAIL_REORDERING_BUFFER;
} else if (locals->DISPCLK_DPPCLK_Support[i][j] != true) {
status = DML_FAIL_DISPCLK_DPPCLK;
@@ -5085,7 +5092,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
status = DML_FAIL_PITCH_SUPPORT;
} else if (locals->PrefetchSupported[i][j] != true) {
status = DML_FAIL_PREFETCH_SUPPORT;
- } else if (locals->TotalVerticalActiveBandwidthSupport[i] != true) {
+ } else if (locals->TotalVerticalActiveBandwidthSupport[i][0] != true) {
status = DML_FAIL_TOTAL_V_ACTIVE_BW;
} else if (locals->VRatioInPrefetchSupported[i][j] != true) {
status = DML_FAIL_V_RATIO_PREFETCH;
@@ -5131,7 +5138,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.DRAMSpeed = mode_lib->vba.DRAMSpeedPerState[mode_lib->vba.VoltageLevel];
mode_lib->vba.FabricClock = mode_lib->vba.FabricClockPerState[mode_lib->vba.VoltageLevel];
mode_lib->vba.SOCCLK = mode_lib->vba.SOCCLKPerState[mode_lib->vba.VoltageLevel];
- mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel];
+ mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel][0];
mode_lib->vba.FabricAndDRAMBandwidth = locals->FabricAndDRAMBandwidthPerState[mode_lib->vba.VoltageLevel];
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
index 9df24ececcec..ca807846032f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
@@ -107,10 +107,10 @@ static unsigned int get_bytes_per_element(enum source_format_class source_format
static bool is_dual_plane(enum source_format_class source_format)
{
- bool ret_val = 0;
+ bool ret_val = false;
if ((source_format == dm_420_8) || (source_format == dm_420_10))
- ret_val = 1;
+ ret_val = true;
return ret_val;
}
@@ -240,8 +240,8 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
unsigned int swath_bytes_c = 0;
unsigned int full_swath_bytes_packed_l = 0;
unsigned int full_swath_bytes_packed_c = 0;
- bool req128_l = 0;
- bool req128_c = 0;
+ bool req128_l = false;
+ bool req128_c = false;
bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
bool surf_vert = (pipe_src_param.source_scan == dm_vert);
unsigned int log2_swath_height_l = 0;
@@ -264,13 +264,13 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c;
if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request
- req128_l = 0;
- req128_c = 0;
+ req128_l = false;
+ req128_c = false;
swath_bytes_l = full_swath_bytes_packed_l;
swath_bytes_c = full_swath_bytes_packed_c;
} else { //128b request (for luma only for yuv420 8bpc)
- req128_l = 1;
- req128_c = 0;
+ req128_l = true;
+ req128_c = false;
swath_bytes_l = full_swath_bytes_packed_l / 2;
swath_bytes_c = full_swath_bytes_packed_c;
}
@@ -280,9 +280,9 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
total_swath_bytes = 2 * full_swath_bytes_packed_l;
if (total_swath_bytes <= detile_buf_size_in_bytes)
- req128_l = 0;
+ req128_l = false;
else
- req128_l = 1;
+ req128_l = true;
swath_bytes_l = total_swath_bytes;
swath_bytes_c = 0;
@@ -670,7 +670,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
const display_pipe_source_params_st pipe_src_param,
bool is_chroma)
{
- bool mode_422 = 0;
+ bool mode_422 = false;
unsigned int vp_width = 0;
unsigned int vp_height = 0;
unsigned int data_pitch = 0;
@@ -958,7 +958,7 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
// Source
// dcc_en = src.dcc;
dual_plane = is_dual_plane((enum source_format_class)(src->source_format));
- mode_422 = 0; // TODO
+ mode_422 = false; // TODO
access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
// bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0);
// bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
index 1e6aeb1bd2bf..287b7a0ad108 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
@@ -107,10 +107,10 @@ static unsigned int get_bytes_per_element(enum source_format_class source_format
static bool is_dual_plane(enum source_format_class source_format)
{
- bool ret_val = 0;
+ bool ret_val = false;
if ((source_format == dm_420_8) || (source_format == dm_420_10))
- ret_val = 1;
+ ret_val = true;
return ret_val;
}
@@ -240,8 +240,8 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
unsigned int swath_bytes_c = 0;
unsigned int full_swath_bytes_packed_l = 0;
unsigned int full_swath_bytes_packed_c = 0;
- bool req128_l = 0;
- bool req128_c = 0;
+ bool req128_l = false;
+ bool req128_c = false;
bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
bool surf_vert = (pipe_src_param.source_scan == dm_vert);
unsigned int log2_swath_height_l = 0;
@@ -264,13 +264,13 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c;
if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request
- req128_l = 0;
- req128_c = 0;
+ req128_l = false;
+ req128_c = false;
swath_bytes_l = full_swath_bytes_packed_l;
swath_bytes_c = full_swath_bytes_packed_c;
} else { //128b request (for luma only for yuv420 8bpc)
- req128_l = 1;
- req128_c = 0;
+ req128_l = true;
+ req128_c = false;
swath_bytes_l = full_swath_bytes_packed_l / 2;
swath_bytes_c = full_swath_bytes_packed_c;
}
@@ -280,9 +280,9 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
total_swath_bytes = 2 * full_swath_bytes_packed_l;
if (total_swath_bytes <= detile_buf_size_in_bytes)
- req128_l = 0;
+ req128_l = false;
else
- req128_l = 1;
+ req128_l = true;
swath_bytes_l = total_swath_bytes;
swath_bytes_c = 0;
@@ -670,7 +670,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
const display_pipe_source_params_st pipe_src_param,
bool is_chroma)
{
- bool mode_422 = 0;
+ bool mode_422 = false;
unsigned int vp_width = 0;
unsigned int vp_height = 0;
unsigned int data_pitch = 0;
@@ -959,7 +959,7 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
// Source
// dcc_en = src.dcc;
dual_plane = is_dual_plane((enum source_format_class)(src->source_format));
- mode_422 = 0; // TODO
+ mode_422 = false; // TODO
access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
// bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0);
// bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index 945291d5ad98..e6617c958bb8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -65,6 +65,7 @@ typedef struct {
#define BPP_INVALID 0
#define BPP_BLENDED_PIPE 0xffffffff
#define DCN21_MAX_DSC_IMAGE_WIDTH 5184
+#define DCN21_MAX_420_IMAGE_WIDTH 4096
static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib);
static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
@@ -197,7 +198,7 @@ static unsigned int CalculateVMAndRowBytes(
unsigned int *meta_row_width,
unsigned int *meta_row_height,
unsigned int *vm_group_bytes,
- long *dpte_group_bytes,
+ unsigned int *dpte_group_bytes,
unsigned int *PixelPTEReqWidth,
unsigned int *PixelPTEReqHeight,
unsigned int *PTERequestSize,
@@ -295,7 +296,7 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
double UrgentOutOfOrderReturn,
double ReturnBW,
bool GPUVMEnable,
- long dpte_group_bytes[],
+ int dpte_group_bytes[],
unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
@@ -309,13 +310,13 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
int DPPPerPlane[],
bool DCCEnable[],
double DPPCLK[],
- unsigned int SwathWidthSingleDPPY[],
+ double SwathWidthSingleDPPY[],
unsigned int SwathHeightY[],
double ReadBandwidthPlaneLuma[],
unsigned int SwathHeightC[],
double ReadBandwidthPlaneChroma[],
unsigned int LBBitPerPixel[],
- unsigned int SwathWidthY[],
+ double SwathWidthY[],
double HRatio[],
unsigned int vtaps[],
unsigned int VTAPsChroma[],
@@ -344,7 +345,7 @@ static void CalculateDCFCLKDeepSleep(
double BytePerPixelDETY[],
double BytePerPixelDETC[],
double VRatio[],
- unsigned int SwathWidthY[],
+ double SwathWidthY[],
int DPPPerPlane[],
double HRatio[],
double PixelClock[],
@@ -435,7 +436,7 @@ static void CalculateMetaAndPTETimes(
unsigned int meta_row_height[],
unsigned int meta_req_width[],
unsigned int meta_req_height[],
- long dpte_group_bytes[],
+ int dpte_group_bytes[],
unsigned int PTERequestSizeY[],
unsigned int PTERequestSizeC[],
unsigned int PixelPTEReqWidthY[],
@@ -477,7 +478,7 @@ static double CalculateExtraLatency(
bool HostVMEnable,
int NumberOfActivePlanes,
int NumberOfDPP[],
- long dpte_group_bytes[],
+ int dpte_group_bytes[],
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
int HostVMMaxPageTableLevels,
@@ -1280,7 +1281,7 @@ static unsigned int CalculateVMAndRowBytes(
unsigned int *meta_row_width,
unsigned int *meta_row_height,
unsigned int *vm_group_bytes,
- long *dpte_group_bytes,
+ unsigned int *dpte_group_bytes,
unsigned int *PixelPTEReqWidth,
unsigned int *PixelPTEReqHeight,
unsigned int *PTERequestSize,
@@ -1338,7 +1339,7 @@ static unsigned int CalculateVMAndRowBytes(
*MetaRowByte = 0;
}
- if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_lvp) {
+ if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_l_vp) {
MacroTileSizeBytes = 256;
MacroTileHeight = BlockHeight256Bytes;
} else if (SurfaceTiling == dm_sw_4kb_s || SurfaceTiling == dm_sw_4kb_s_x
@@ -1683,11 +1684,11 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
else
locals->SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportHeight[k];
- if (mode_lib->vba.ODMCombineEnabled[k] == true)
+ if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)
MainPlaneDoesODMCombine = true;
for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
if (mode_lib->vba.BlendingAndTiming[k] == j
- && mode_lib->vba.ODMCombineEnabled[j] == true)
+ && mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)
MainPlaneDoesODMCombine = true;
if (MainPlaneDoesODMCombine == true)
@@ -2940,12 +2941,12 @@ static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib)
SwathWidth = mode_lib->vba.ViewportHeight[k];
}
- if (mode_lib->vba.ODMCombineEnabled[k] == true) {
+ if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
MainPlaneDoesODMCombine = true;
}
for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
if (mode_lib->vba.BlendingAndTiming[k] == j
- && mode_lib->vba.ODMCombineEnabled[j] == true) {
+ && mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
MainPlaneDoesODMCombine = true;
}
}
@@ -3453,7 +3454,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
== dm_420_10))
|| (((mode_lib->vba.SurfaceTiling[k] == dm_sw_gfx7_2d_thin_gl
|| mode_lib->vba.SurfaceTiling[k]
- == dm_sw_gfx7_2d_thin_lvp)
+ == dm_sw_gfx7_2d_thin_l_vp)
&& !((mode_lib->vba.SourcePixelFormat[k]
== dm_444_64
|| mode_lib->vba.SourcePixelFormat[k]
@@ -3542,17 +3543,17 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
- locals->IdealSDPPortBandwidthPerState[i] = dml_min3(
+ locals->IdealSDPPortBandwidthPerState[i][0] = dml_min3(
mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKPerState[i],
mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels
* mode_lib->vba.DRAMChannelWidth,
mode_lib->vba.FabricClockPerState[i]
* mode_lib->vba.FabricDatapathToDCNDataReturn);
if (mode_lib->vba.HostVMEnable == false) {
- locals->ReturnBWPerState[i] = locals->IdealSDPPortBandwidthPerState[i]
+ locals->ReturnBWPerState[i][0] = locals->IdealSDPPortBandwidthPerState[i][0]
* mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly / 100.0;
} else {
- locals->ReturnBWPerState[i] = locals->IdealSDPPortBandwidthPerState[i]
+ locals->ReturnBWPerState[i][0] = locals->IdealSDPPortBandwidthPerState[i][0]
* mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData / 100.0;
}
}
@@ -3589,12 +3590,12 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ dml_max3(mode_lib->vba.UrgentOutOfOrderReturnPerChannelPixelDataOnly,
mode_lib->vba.UrgentOutOfOrderReturnPerChannelPixelMixedWithVMData,
mode_lib->vba.UrgentOutOfOrderReturnPerChannelVMDataOnly)
- * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i];
- if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i]
+ * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i][0];
+ if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i][0]
> locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i]) {
- locals->ROBSupport[i] = true;
+ locals->ROBSupport[i][0] = true;
} else {
- locals->ROBSupport[i] = false;
+ locals->ROBSupport[i][0] = false;
}
}
/*Writeback Mode Support Check*/
@@ -3971,18 +3972,25 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
&& i == mode_lib->vba.soc.num_states)
mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
- if (mode_lib->vba.ODMCapability == false ||
- (locals->PlaneRequiredDISPCLKWithoutODMCombine <= MaxMaxDispclkRoundedDown
- && (!locals->DSCEnabled[k] || locals->HActive[k] <= DCN21_MAX_DSC_IMAGE_WIDTH))) {
- locals->ODMCombineEnablePerState[i][k] = false;
- mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
- } else {
- locals->ODMCombineEnablePerState[i][k] = true;
- mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+
+ locals->ODMCombineEnablePerState[i][k] = false;
+ mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
+ if (mode_lib->vba.ODMCapability) {
+ if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) {
+ locals->ODMCombineEnablePerState[i][k] = true;
+ mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+ } else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN21_MAX_DSC_IMAGE_WIDTH)) {
+ locals->ODMCombineEnablePerState[i][k] = true;
+ mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+ } else if (locals->HActive[k] > DCN21_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) {
+ locals->ODMCombineEnablePerState[i][k] = true;
+ mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+ }
}
+
if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity
&& locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]
- && locals->ODMCombineEnablePerState[i][k] == false) {
+ && locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
locals->NoOfDPP[i][j][k] = 1;
locals->RequiredDPPCLK[i][j][k] =
locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
@@ -4071,16 +4079,16 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
/*Viewport Size Check*/
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
- locals->ViewportSizeSupport[i] = true;
+ locals->ViewportSizeSupport[i][0] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (locals->ODMCombineEnablePerState[i][k] == true) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
if (dml_min(locals->SwathWidthYSingleDPP[k], dml_round(mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]))
> locals->MaximumSwathWidth[k]) {
- locals->ViewportSizeSupport[i] = false;
+ locals->ViewportSizeSupport[i][0] = false;
}
} else {
if (locals->SwathWidthYSingleDPP[k] / 2.0 > locals->MaximumSwathWidth[k]) {
- locals->ViewportSizeSupport[i] = false;
+ locals->ViewportSizeSupport[i][0] = false;
}
}
}
@@ -4121,11 +4129,11 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- locals->RequiresDSC[i][k] = 0;
+ locals->RequiresDSC[i][k] = false;
locals->RequiresFEC[i][k] = 0;
if (mode_lib->vba.BlendingAndTiming[k] == k) {
if (mode_lib->vba.Output[k] == dm_hdmi) {
- locals->RequiresDSC[i][k] = 0;
+ locals->RequiresDSC[i][k] = false;
locals->RequiresFEC[i][k] = 0;
locals->OutputBppPerState[i][k] = TruncToValidBPP(
dml_min(600.0, mode_lib->vba.PHYCLKPerState[i]) / mode_lib->vba.PixelClockBackEnd[k] * 24,
@@ -4269,8 +4277,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.DSCFormatFactor = 1;
}
if (locals->RequiresDSC[i][k] == true) {
- if (locals->ODMCombineEnablePerState[i][k]
- == true) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
if (mode_lib->vba.PixelClockBackEnd[k] / 6.0 / mode_lib->vba.DSCFormatFactor
> (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i]) {
locals->DSCCLKRequiredMoreThanSupported[i] =
@@ -4293,7 +4300,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.TotalDSCUnitsRequired = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (locals->RequiresDSC[i][k] == true) {
- if (locals->ODMCombineEnablePerState[i][k] == true) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
mode_lib->vba.TotalDSCUnitsRequired =
mode_lib->vba.TotalDSCUnitsRequired + 2.0;
} else {
@@ -4335,7 +4342,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.bpp = locals->OutputBppPerState[i][k];
}
if (locals->RequiresDSC[i][k] == true && mode_lib->vba.bpp != 0.0) {
- if (locals->ODMCombineEnablePerState[i][k] == false) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
locals->DSCDelayPerState[i][k] =
dscceComputeDelay(
mode_lib->vba.DSCInputBitPerComponent[k],
@@ -4399,7 +4406,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
locals->RequiredDPPCLKThisState[k] = locals->RequiredDPPCLK[i][j][k];
locals->NoOfDPPThisState[k] = locals->NoOfDPP[i][j][k];
- if (locals->ODMCombineEnablePerState[i][k] == true) {
+ if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
locals->SwathWidthYThisState[k] =
dml_min(locals->SwathWidthYSingleDPP[k], dml_round(mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]));
} else {
@@ -4451,7 +4458,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
locals->PSCL_FACTOR,
locals->PSCL_FACTOR_CHROMA,
locals->RequiredDPPCLKThisState,
- &mode_lib->vba.ProjectedDCFCLKDeepSleep);
+ &mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0]);
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64
@@ -4496,7 +4503,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
locals->PTERequestSizeC,
locals->dpde0_bytes_per_frame_ub_c,
locals->meta_pte_bytes_per_frame_ub_c);
- locals->PrefetchLinesC[k] = CalculatePrefetchSourceLines(
+ locals->PrefetchLinesC[0][0][k] = CalculatePrefetchSourceLines(
mode_lib,
mode_lib->vba.VRatio[k]/2,
mode_lib->vba.VTAPsChroma[k],
@@ -4511,7 +4518,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = 0.0;
mode_lib->vba.MetaRowBytesC = 0.0;
mode_lib->vba.DPTEBytesPerRowC = 0.0;
- locals->PrefetchLinesC[k] = 0.0;
+ locals->PrefetchLinesC[0][0][k] = 0.0;
locals->PTEBufferSizeNotExceededC[i][j][k] = true;
locals->PTEBufferSizeInRequestsForLuma = mode_lib->vba.PTEBufferSizeInRequestsLuma + mode_lib->vba.PTEBufferSizeInRequestsChroma;
}
@@ -4552,7 +4559,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
locals->PTERequestSizeY,
locals->dpde0_bytes_per_frame_ub_l,
locals->meta_pte_bytes_per_frame_ub_l);
- locals->PrefetchLinesY[k] = CalculatePrefetchSourceLines(
+ locals->PrefetchLinesY[0][0][k] = CalculatePrefetchSourceLines(
mode_lib,
mode_lib->vba.VRatio[k],
mode_lib->vba.vtaps[k],
@@ -4562,10 +4569,10 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.ViewportYStartY[k],
&locals->PrefillY[k],
&locals->MaxNumSwY[k]);
- locals->PDEAndMetaPTEBytesPerFrame[k] =
+ locals->PDEAndMetaPTEBytesPerFrame[0][0][k] =
mode_lib->vba.PDEAndMetaPTEBytesPerFrameY + mode_lib->vba.PDEAndMetaPTEBytesPerFrameC;
- locals->MetaRowBytes[k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC;
- locals->DPTEBytesPerRow[k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC;
+ locals->MetaRowBytes[0][0][k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC;
+ locals->DPTEBytesPerRow[0][0][k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC;
CalculateActiveRowBandwidth(
mode_lib->vba.GPUVMEnable,
@@ -4591,7 +4598,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.PixelChunkSizeInKByte,
locals->TotalNumberOfDCCActiveDPP[i][j],
mode_lib->vba.MetaChunkSize,
- locals->ReturnBWPerState[i],
+ locals->ReturnBWPerState[i][0],
mode_lib->vba.GPUVMEnable,
mode_lib->vba.HostVMEnable,
mode_lib->vba.NumberOfActivePlanes,
@@ -4602,7 +4609,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.HostVMMaxPageTableLevels,
mode_lib->vba.HostVMCachedPageTableLevels);
- mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep;
+ mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0];
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
if (mode_lib->vba.WritebackEnable[k] == true) {
@@ -4644,15 +4651,15 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
}
- mode_lib->vba.MaxMaxVStartup = 0;
+ mode_lib->vba.MaxMaxVStartup[0][0] = 0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- locals->MaximumVStartup[k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
+ locals->MaximumVStartup[0][0][k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
- dml_max(1.0, dml_ceil(locals->WritebackDelay[i][k] / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1.0));
- mode_lib->vba.MaxMaxVStartup = dml_max(mode_lib->vba.MaxMaxVStartup, locals->MaximumVStartup[k]);
+ mode_lib->vba.MaxMaxVStartup[0][0] = dml_max(mode_lib->vba.MaxMaxVStartup[0][0], locals->MaximumVStartup[0][0][k]);
}
mode_lib->vba.NextPrefetchMode = mode_lib->vba.MinPrefetchMode;
- mode_lib->vba.NextMaxVStartup = mode_lib->vba.MaxMaxVStartup;
+ mode_lib->vba.NextMaxVStartup = mode_lib->vba.MaxMaxVStartup[0][0];
do {
mode_lib->vba.PrefetchMode[i][j] = mode_lib->vba.NextPrefetchMode;
mode_lib->vba.MaxVStartup = mode_lib->vba.NextMaxVStartup;
@@ -4693,7 +4700,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
myPipe.DPPCLK = locals->RequiredDPPCLK[i][j][k];
myPipe.DISPCLK = locals->RequiredDISPCLK[i][j];
myPipe.PixelClock = mode_lib->vba.PixelClock[k];
- myPipe.DCFCLKDeepSleep = mode_lib->vba.ProjectedDCFCLKDeepSleep;
+ myPipe.DCFCLKDeepSleep = mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0];
myPipe.DPPPerPlane = locals->NoOfDPP[i][j][k];
myPipe.ScalerEnabled = mode_lib->vba.ScalerEnabled[k];
myPipe.SourceScan = mode_lib->vba.SourceScan[k];
@@ -4727,8 +4734,8 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
locals->SwathWidthYThisState[k] / mode_lib->vba.HRatio[k],
mode_lib->vba.OutputFormat[k],
mode_lib->vba.MaxInterDCNTileRepeaters,
- dml_min(mode_lib->vba.MaxVStartup, locals->MaximumVStartup[k]),
- locals->MaximumVStartup[k],
+ dml_min(mode_lib->vba.MaxVStartup, locals->MaximumVStartup[0][0][k]),
+ locals->MaximumVStartup[0][0][k],
mode_lib->vba.GPUVMMaxPageTableLevels,
mode_lib->vba.GPUVMEnable,
&myHostVM,
@@ -4739,15 +4746,15 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.UrgentLatency,
mode_lib->vba.ExtraLatency,
mode_lib->vba.TimeCalc,
- locals->PDEAndMetaPTEBytesPerFrame[k],
- locals->MetaRowBytes[k],
- locals->DPTEBytesPerRow[k],
- locals->PrefetchLinesY[k],
+ locals->PDEAndMetaPTEBytesPerFrame[0][0][k],
+ locals->MetaRowBytes[0][0][k],
+ locals->DPTEBytesPerRow[0][0][k],
+ locals->PrefetchLinesY[0][0][k],
locals->SwathWidthYThisState[k],
locals->BytePerPixelInDETY[k],
locals->PrefillY[k],
locals->MaxNumSwY[k],
- locals->PrefetchLinesC[k],
+ locals->PrefetchLinesC[0][0][k],
locals->BytePerPixelInDETC[k],
locals->PrefillC[k],
locals->MaxNumSwC[k],
@@ -4836,14 +4843,14 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ locals->RequiredPrefetchPixelDataBWChroma[i][j][k] * locals->UrgentBurstFactorChromaPre[k]
+ locals->cursor_bw_pre[k] * locals->UrgentBurstFactorCursorPre[k]);
}
- locals->BandwidthWithoutPrefetchSupported[i] = true;
- if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i]
+ locals->BandwidthWithoutPrefetchSupported[i][0] = true;
+ if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i][0]
|| locals->NotEnoughUrgentLatencyHiding == 1) {
- locals->BandwidthWithoutPrefetchSupported[i] = false;
+ locals->BandwidthWithoutPrefetchSupported[i][0] = false;
}
locals->PrefetchSupported[i][j] = true;
- if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i]
+ if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i][0]
|| locals->NotEnoughUrgentLatencyHiding == 1
|| locals->NotEnoughUrgentLatencyHidingPre == 1) {
locals->PrefetchSupported[i][j] = false;
@@ -4872,17 +4879,17 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
if (mode_lib->vba.MaxVStartup <= 13 || mode_lib->vba.AnyLinesForVMOrRowTooLarge == false) {
- mode_lib->vba.NextMaxVStartup = mode_lib->vba.MaxMaxVStartup;
+ mode_lib->vba.NextMaxVStartup = mode_lib->vba.MaxMaxVStartup[0][0];
mode_lib->vba.NextPrefetchMode = mode_lib->vba.NextPrefetchMode + 1;
} else {
mode_lib->vba.NextMaxVStartup = mode_lib->vba.NextMaxVStartup - 1;
}
} while ((locals->PrefetchSupported[i][j] != true || locals->VRatioInPrefetchSupported[i][j] != true)
- && (mode_lib->vba.NextMaxVStartup != mode_lib->vba.MaxMaxVStartup
+ && (mode_lib->vba.NextMaxVStartup != mode_lib->vba.MaxMaxVStartup[0][0]
|| mode_lib->vba.NextPrefetchMode < mode_lib->vba.MaxPrefetchMode));
if (locals->PrefetchSupported[i][j] == true && locals->VRatioInPrefetchSupported[i][j] == true) {
- mode_lib->vba.BandwidthAvailableForImmediateFlip = locals->ReturnBWPerState[i];
+ mode_lib->vba.BandwidthAvailableForImmediateFlip = locals->ReturnBWPerState[i][0];
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
mode_lib->vba.BandwidthAvailableForImmediateFlip = mode_lib->vba.BandwidthAvailableForImmediateFlip
- dml_max(locals->ReadBandwidthLuma[k] * locals->UrgentBurstFactorLuma[k]
@@ -4895,7 +4902,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.TotImmediateFlipBytes = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
mode_lib->vba.TotImmediateFlipBytes = mode_lib->vba.TotImmediateFlipBytes
- + locals->PDEAndMetaPTEBytesPerFrame[k] + locals->MetaRowBytes[k] + locals->DPTEBytesPerRow[k];
+ + locals->PDEAndMetaPTEBytesPerFrame[0][0][k] + locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k];
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
@@ -4910,9 +4917,9 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.HostVMMaxPageTableLevels,
mode_lib->vba.HostVMCachedPageTableLevels,
mode_lib->vba.GPUVMEnable,
- locals->PDEAndMetaPTEBytesPerFrame[k],
- locals->MetaRowBytes[k],
- locals->DPTEBytesPerRow[k],
+ locals->PDEAndMetaPTEBytesPerFrame[0][0][k],
+ locals->MetaRowBytes[0][0][k],
+ locals->DPTEBytesPerRow[0][0][k],
mode_lib->vba.BandwidthAvailableForImmediateFlip,
mode_lib->vba.TotImmediateFlipBytes,
mode_lib->vba.SourcePixelFormat[k],
@@ -4943,7 +4950,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
locals->ImmediateFlipSupportedForState[i][j] = true;
if (mode_lib->vba.total_dcn_read_bw_with_flip
- > locals->ReturnBWPerState[i]) {
+ > locals->ReturnBWPerState[i][0]) {
locals->ImmediateFlipSupportedForState[i][j] = false;
}
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
@@ -4970,7 +4977,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.WritebackInterfaceChromaBufferSize,
mode_lib->vba.DCFCLKPerState[i],
mode_lib->vba.UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels,
- locals->ReturnBWPerState[i],
+ locals->ReturnBWPerState[i][0],
mode_lib->vba.GPUVMEnable,
locals->dpte_group_bytes,
mode_lib->vba.MetaChunkSize,
@@ -4982,7 +4989,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.DRAMClockChangeLatency,
mode_lib->vba.SRExitTime,
mode_lib->vba.SREnterPlusExitTime,
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0],
locals->NoOfDPPThisState,
mode_lib->vba.DCCEnable,
locals->RequiredDPPCLKThisState,
@@ -5025,8 +5032,8 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
MaxTotalVActiveRDBandwidth = MaxTotalVActiveRDBandwidth + locals->ReadBandwidth[k];
}
for (i = 0; i <= mode_lib->vba.soc.num_states; ++i) {
- locals->MaxTotalVerticalActiveAvailableBandwidth[i] = dml_min(
- locals->IdealSDPPortBandwidthPerState[i] *
+ locals->MaxTotalVerticalActiveAvailableBandwidth[i][0] = dml_min(
+ locals->IdealSDPPortBandwidthPerState[i][0] *
mode_lib->vba.MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation
/ 100.0, mode_lib->vba.DRAMSpeedPerState[i] *
mode_lib->vba.NumberOfChannels *
@@ -5034,10 +5041,10 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.MaxAveragePercentOfIdealDRAMBWDisplayCanUseInNormalSystemOperation
/ 100.0);
- if (MaxTotalVActiveRDBandwidth <= locals->MaxTotalVerticalActiveAvailableBandwidth[i]) {
- locals->TotalVerticalActiveBandwidthSupport[i] = true;
+ if (MaxTotalVActiveRDBandwidth <= locals->MaxTotalVerticalActiveAvailableBandwidth[i][0]) {
+ locals->TotalVerticalActiveBandwidthSupport[i][0] = true;
} else {
- locals->TotalVerticalActiveBandwidthSupport[i] = false;
+ locals->TotalVerticalActiveBandwidthSupport[i][0] = false;
}
}
}
@@ -5116,7 +5123,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
status = DML_FAIL_SCALE_RATIO_TAP;
} else if (mode_lib->vba.SourceFormatPixelAndScanSupport != true) {
status = DML_FAIL_SOURCE_PIXEL_FORMAT;
- } else if (locals->ViewportSizeSupport[i] != true) {
+ } else if (locals->ViewportSizeSupport[i][0] != true) {
status = DML_FAIL_VIEWPORT_SIZE;
} else if (locals->DIOSupport[i] != true) {
status = DML_FAIL_DIO_SUPPORT;
@@ -5124,7 +5131,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
status = DML_FAIL_NOT_ENOUGH_DSC;
} else if (locals->DSCCLKRequiredMoreThanSupported[i] != false) {
status = DML_FAIL_DSC_CLK_REQUIRED;
- } else if (locals->ROBSupport[i] != true) {
+ } else if (locals->ROBSupport[i][0] != true) {
status = DML_FAIL_REORDERING_BUFFER;
} else if (locals->DISPCLK_DPPCLK_Support[i][j] != true) {
status = DML_FAIL_DISPCLK_DPPCLK;
@@ -5142,7 +5149,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
status = DML_FAIL_CURSOR_SUPPORT;
} else if (mode_lib->vba.PitchSupport != true) {
status = DML_FAIL_PITCH_SUPPORT;
- } else if (locals->TotalVerticalActiveBandwidthSupport[i] != true) {
+ } else if (locals->TotalVerticalActiveBandwidthSupport[i][0] != true) {
status = DML_FAIL_TOTAL_V_ACTIVE_BW;
} else if (locals->PTEBufferSizeNotExceeded[i][j] != true) {
status = DML_FAIL_PTE_BUFFER_SIZE;
@@ -5198,13 +5205,13 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.DRAMSpeed = mode_lib->vba.DRAMSpeedPerState[mode_lib->vba.VoltageLevel];
mode_lib->vba.FabricClock = mode_lib->vba.FabricClockPerState[mode_lib->vba.VoltageLevel];
mode_lib->vba.SOCCLK = mode_lib->vba.SOCCLKPerState[mode_lib->vba.VoltageLevel];
- mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel];
+ mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel][0];
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
mode_lib->vba.ODMCombineEnabled[k] =
locals->ODMCombineEnablePerState[mode_lib->vba.VoltageLevel][k];
} else {
- mode_lib->vba.ODMCombineEnabled[k] = 0;
+ mode_lib->vba.ODMCombineEnabled[k] = false;
}
mode_lib->vba.DSCEnabled[k] =
locals->RequiresDSC[mode_lib->vba.VoltageLevel][k];
@@ -5227,7 +5234,7 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
double UrgentOutOfOrderReturn,
double ReturnBW,
bool GPUVMEnable,
- long dpte_group_bytes[],
+ int dpte_group_bytes[],
unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
@@ -5241,13 +5248,13 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
int DPPPerPlane[],
bool DCCEnable[],
double DPPCLK[],
- unsigned int SwathWidthSingleDPPY[],
+ double SwathWidthSingleDPPY[],
unsigned int SwathHeightY[],
double ReadBandwidthPlaneLuma[],
unsigned int SwathHeightC[],
double ReadBandwidthPlaneChroma[],
unsigned int LBBitPerPixel[],
- unsigned int SwathWidthY[],
+ double SwathWidthY[],
double HRatio[],
unsigned int vtaps[],
unsigned int VTAPsChroma[],
@@ -5503,7 +5510,7 @@ static void CalculateDCFCLKDeepSleep(
double BytePerPixelDETY[],
double BytePerPixelDETC[],
double VRatio[],
- unsigned int SwathWidthY[],
+ double SwathWidthY[],
int DPPPerPlane[],
double HRatio[],
double PixelClock[],
@@ -5831,7 +5838,7 @@ static void CalculateMetaAndPTETimes(
unsigned int meta_row_height[],
unsigned int meta_req_width[],
unsigned int meta_req_height[],
- long dpte_group_bytes[],
+ int dpte_group_bytes[],
unsigned int PTERequestSizeY[],
unsigned int PTERequestSizeC[],
unsigned int PixelPTEReqWidthY[],
@@ -6087,7 +6094,7 @@ static double CalculateExtraLatency(
bool HostVMEnable,
int NumberOfActivePlanes,
int NumberOfDPP[],
- long dpte_group_bytes[],
+ int dpte_group_bytes[],
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
int HostVMMaxPageTableLevels,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
index e60af383b4db..a38baa73d484 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
@@ -82,10 +82,10 @@ static unsigned int get_bytes_per_element(enum source_format_class source_format
static bool is_dual_plane(enum source_format_class source_format)
{
- bool ret_val = 0;
+ bool ret_val = false;
if ((source_format == dm_420_8) || (source_format == dm_420_10))
- ret_val = 1;
+ ret_val = true;
return ret_val;
}
@@ -222,8 +222,8 @@ static void handle_det_buf_split(
unsigned int swath_bytes_c = 0;
unsigned int full_swath_bytes_packed_l = 0;
unsigned int full_swath_bytes_packed_c = 0;
- bool req128_l = 0;
- bool req128_c = 0;
+ bool req128_l = false;
+ bool req128_c = false;
bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
bool surf_vert = (pipe_src_param.source_scan == dm_vert);
unsigned int log2_swath_height_l = 0;
@@ -248,13 +248,13 @@ static void handle_det_buf_split(
total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c;
if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request
- req128_l = 0;
- req128_c = 0;
+ req128_l = false;
+ req128_c = false;
swath_bytes_l = full_swath_bytes_packed_l;
swath_bytes_c = full_swath_bytes_packed_c;
} else { //128b request (for luma only for yuv420 8bpc)
- req128_l = 1;
- req128_c = 0;
+ req128_l = true;
+ req128_c = false;
swath_bytes_l = full_swath_bytes_packed_l / 2;
swath_bytes_c = full_swath_bytes_packed_c;
}
@@ -264,9 +264,9 @@ static void handle_det_buf_split(
total_swath_bytes = 2 * full_swath_bytes_packed_l;
if (total_swath_bytes <= detile_buf_size_in_bytes)
- req128_l = 0;
+ req128_l = false;
else
- req128_l = 1;
+ req128_l = true;
swath_bytes_l = total_swath_bytes;
swath_bytes_c = 0;
@@ -679,7 +679,7 @@ static void get_surf_rq_param(
const display_pipe_params_st pipe_param,
bool is_chroma)
{
- bool mode_422 = 0;
+ bool mode_422 = false;
unsigned int vp_width = 0;
unsigned int vp_height = 0;
unsigned int data_pitch = 0;
@@ -1010,7 +1010,7 @@ static void dml_rq_dlg_get_dlg_params(
// Source
// dcc_en = src.dcc;
dual_plane = is_dual_plane((enum source_format_class) (src->source_format));
- mode_422 = 0; // FIXME
+ mode_422 = false; // FIXME
access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
// bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0);
// bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
index 55d4cb23a073..bfc2f39bd1ef 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
@@ -85,7 +85,7 @@ enum dm_swizzle_mode {
dm_sw_var_s_x = 29,
dm_sw_var_d_x = 30,
dm_sw_64kb_r_x,
- dm_sw_gfx7_2d_thin_lvp,
+ dm_sw_gfx7_2d_thin_l_vp,
dm_sw_gfx7_2d_thin_gl,
};
enum lb_depth {
@@ -119,6 +119,10 @@ enum mpc_combine_affinity {
dm_mpc_never
};
+enum RequestType {
+ REQ_256Bytes, REQ_128BytesNonContiguous, REQ_128BytesContiguous, REQ_NA
+};
+
enum self_refresh_affinity {
dm_try_to_allow_self_refresh_and_mclk_switch,
dm_allow_self_refresh_and_mclk_switch,
@@ -165,4 +169,16 @@ enum odm_combine_mode {
dm_odm_combine_mode_4to1,
};
+enum odm_combine_policy {
+ dm_odm_combine_policy_dal,
+ dm_odm_combine_policy_none,
+ dm_odm_combine_policy_2to1,
+ dm_odm_combine_policy_4to1,
+};
+
+enum immediate_flip_requirement {
+ dm_immediate_flip_not_required,
+ dm_immediate_flip_required,
+};
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 220d5e610f1f..658f81e757e9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -63,6 +63,7 @@ struct _vcs_dpi_voltage_scaling_st {
double dispclk_mhz;
double phyclk_mhz;
double dppclk_mhz;
+ double dtbclk_mhz;
};
struct _vcs_dpi_soc_bounding_box_st {
@@ -214,6 +215,7 @@ struct _vcs_dpi_display_pipe_source_params_st {
int source_format;
unsigned char dcc;
unsigned int dcc_rate;
+ unsigned int dcc_rate_chroma;
unsigned char dcc_use_global;
unsigned char vm;
bool gpuvm; // gpuvm enabled
@@ -225,7 +227,10 @@ struct _vcs_dpi_display_pipe_source_params_st {
int source_scan;
int sw_mode;
int macro_tile_size;
+ unsigned int surface_width_y;
unsigned int surface_height_y;
+ unsigned int surface_width_c;
+ unsigned int surface_height_c;
unsigned int viewport_width;
unsigned int viewport_height;
unsigned int viewport_y_y;
@@ -278,6 +283,7 @@ struct _vcs_dpi_display_output_params_st {
int output_type;
int output_format;
int dsc_slices;
+ int max_audio_sample_rate;
struct writeback_st wb;
};
@@ -323,7 +329,7 @@ struct _vcs_dpi_display_pipe_dest_params_st {
double pixel_rate_mhz;
unsigned char synchronized_vblank_all_planes;
unsigned char otg_inst;
- unsigned char odm_combine;
+ unsigned int odm_combine;
unsigned char use_maximum_vstartup;
unsigned int vtotal_max;
unsigned int vtotal_min;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index 15b72a8b5174..b3c96d9b472f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -264,7 +264,10 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib)
mode_lib->vba.DRAMSpeedPerState[i] = soc->clock_limits[i].dram_speed_mts;
//mode_lib->vba.DRAMSpeedPerState[i] = soc->clock_limits[i].dram_speed_mhz;
mode_lib->vba.MaxDispclk[i] = soc->clock_limits[i].dispclk_mhz;
+ mode_lib->vba.DTBCLKPerState[i] = soc->clock_limits[i].dtbclk_mhz;
}
+ mode_lib->vba.MinVoltageLevel = 0;
+ mode_lib->vba.MaxVoltageLevel = mode_lib->vba.soc.num_states;
mode_lib->vba.DoUrgentLatencyAdjustment =
soc->do_urgent_latency_adjustment;
@@ -306,8 +309,6 @@ static void fetch_ip_params(struct display_mode_lib *mode_lib)
mode_lib->vba.WritebackInterfaceBufferSize = ip->writeback_interface_buffer_size_kbytes;
mode_lib->vba.WritebackLineBufferSize = ip->writeback_line_buffer_buffer_size;
- mode_lib->vba.MinVoltageLevel = 0;
- mode_lib->vba.MaxVoltageLevel = 5;
mode_lib->vba.WritebackChromaLineBufferWidth =
ip->writeback_chroma_line_buffer_width_pixels;
@@ -423,8 +424,8 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
ip->dcc_supported : src->dcc && ip->dcc_supported;
mode_lib->vba.DCCRate[mode_lib->vba.NumberOfActivePlanes] = src->dcc_rate;
/* TODO: Needs to be set based on src->dcc_rate_luma/chroma */
- mode_lib->vba.DCCRateLuma[mode_lib->vba.NumberOfActivePlanes] = 0;
- mode_lib->vba.DCCRateChroma[mode_lib->vba.NumberOfActivePlanes] = 0;
+ mode_lib->vba.DCCRateLuma[mode_lib->vba.NumberOfActivePlanes] = src->dcc_rate;
+ mode_lib->vba.DCCRateChroma[mode_lib->vba.NumberOfActivePlanes] = src->dcc_rate_chroma;
mode_lib->vba.SourcePixelFormat[mode_lib->vba.NumberOfActivePlanes] =
(enum source_format_class) (src->source_format);
@@ -436,8 +437,6 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
dst->recout_width; // TODO: or should this be full_recout_width???...maybe only when in hsplit mode?
mode_lib->vba.ODMCombineEnabled[mode_lib->vba.NumberOfActivePlanes] =
dst->odm_combine;
- mode_lib->vba.ODMCombineTypeEnabled[mode_lib->vba.NumberOfActivePlanes] =
- dst->odm_combine;
mode_lib->vba.OutputFormat[mode_lib->vba.NumberOfActivePlanes] =
(enum output_format_class) (dout->output_format);
mode_lib->vba.OutputBpp[mode_lib->vba.NumberOfActivePlanes] =
@@ -454,7 +453,7 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
dout->dp_lanes;
/* TODO: Needs to be set based on dout->audio.audio_sample_rate_khz/sample_layout */
mode_lib->vba.AudioSampleRate[mode_lib->vba.NumberOfActivePlanes] =
- 44.1 * 1000;
+ dout->max_audio_sample_rate;
mode_lib->vba.AudioSampleLayout[mode_lib->vba.NumberOfActivePlanes] =
1;
mode_lib->vba.DRAMClockChangeLatencyOverride = 0.0;
@@ -590,6 +589,7 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
for (k = j + 1; k < mode_lib->vba.cache_num_pipes; ++k) {
display_pipe_source_params_st *src_k = &pipes[k].pipe.src;
display_pipe_dest_params_st *dst_k = &pipes[k].pipe.dest;
+ display_output_params_st *dout_k = &pipes[j].dout;
if (src_k->is_hsplit && !visited[k]
&& src->hsplit_grp == src_k->hsplit_grp) {
@@ -600,12 +600,18 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
== dm_horz) {
mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] +=
src_k->viewport_width;
+ mode_lib->vba.ViewportWidthChroma[mode_lib->vba.NumberOfActivePlanes] +=
+ src_k->viewport_width;
mode_lib->vba.ScalerRecoutWidth[mode_lib->vba.NumberOfActivePlanes] +=
dst_k->recout_width;
} else {
mode_lib->vba.ViewportHeight[mode_lib->vba.NumberOfActivePlanes] +=
src_k->viewport_height;
+ mode_lib->vba.ViewportHeightChroma[mode_lib->vba.NumberOfActivePlanes] +=
+ src_k->viewport_height;
}
+ mode_lib->vba.NumberOfDSCSlices[mode_lib->vba.NumberOfActivePlanes] +=
+ dout_k->dsc_slices;
visited[k] = true;
}
@@ -811,7 +817,9 @@ void ModeSupportAndSystemConfiguration(struct display_mode_lib *mode_lib)
unsigned int total_pipes = 0;
mode_lib->vba.VoltageLevel = mode_lib->vba.cache_pipes[0].clks_cfg.voltage;
- mode_lib->vba.ReturnBW = mode_lib->vba.ReturnBWPerState[mode_lib->vba.VoltageLevel];
+ mode_lib->vba.ReturnBW = mode_lib->vba.ReturnBWPerState[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb];
+ if (mode_lib->vba.ReturnBW == 0)
+ mode_lib->vba.ReturnBW = mode_lib->vba.ReturnBWPerState[mode_lib->vba.VoltageLevel][0];
mode_lib->vba.FabricAndDRAMBandwidth = mode_lib->vba.FabricAndDRAMBandwidthPerState[mode_lib->vba.VoltageLevel];
fetch_socbb_params(mode_lib);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
index 3eb657ed5714..2875efd85467 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -157,6 +157,7 @@ struct vba_vars_st {
bool DummyPStateCheck;
bool DRAMClockChangeSupportsVActive;
bool PrefetchModeSupported;
+ bool PrefetchAndImmediateFlipSupported;
enum self_refresh_affinity AllowDRAMSelfRefreshOrDRAMClockChangeInVblank; // Mode Support only
double XFCRemoteSurfaceFlipDelay;
double TInitXFill;
@@ -318,8 +319,7 @@ struct vba_vars_st {
unsigned int DynamicMetadataTransmittedBytes[DC__NUM_DPP__MAX];
double DCCRate[DC__NUM_DPP__MAX];
double AverageDCCCompressionRate;
- bool ODMCombineEnabled[DC__NUM_DPP__MAX];
- enum odm_combine_mode ODMCombineTypeEnabled[DC__NUM_DPP__MAX];
+ enum odm_combine_mode ODMCombineEnabled[DC__NUM_DPP__MAX];
double OutputBpp[DC__NUM_DPP__MAX];
bool DSCEnabled[DC__NUM_DPP__MAX];
unsigned int DSCInputBitPerComponent[DC__NUM_DPP__MAX];
@@ -347,6 +347,7 @@ struct vba_vars_st {
unsigned int EffectiveLBLatencyHidingSourceLinesChroma;
double BandwidthAvailableForImmediateFlip;
unsigned int PrefetchMode[DC__VOLTAGE_STATES + 1][2];
+ unsigned int PrefetchModePerState[DC__VOLTAGE_STATES + 1][2];
unsigned int MinPrefetchMode;
unsigned int MaxPrefetchMode;
bool AnyLinesForVMOrRowTooLarge;
@@ -396,6 +397,7 @@ struct vba_vars_st {
bool WritebackLumaAndChromaScalingSupported;
bool Cursor64BppSupport;
double DCFCLKPerState[DC__VOLTAGE_STATES + 1];
+ double DCFCLKState[DC__VOLTAGE_STATES + 1][2];
double FabricClockPerState[DC__VOLTAGE_STATES + 1];
double SOCCLKPerState[DC__VOLTAGE_STATES + 1];
double PHYCLKPerState[DC__VOLTAGE_STATES + 1];
@@ -444,7 +446,7 @@ struct vba_vars_st {
double OutputLinkDPLanes[DC__NUM_DPP__MAX];
double ForcedOutputLinkBPP[DC__NUM_DPP__MAX]; // Mode Support only
double ImmediateFlipBW[DC__NUM_DPP__MAX];
- double MaxMaxVStartup;
+ double MaxMaxVStartup[DC__VOLTAGE_STATES + 1][2];
double WritebackLumaVExtra;
double WritebackChromaVExtra;
@@ -471,7 +473,7 @@ struct vba_vars_st {
double RoundedUpMaxSwathSizeBytesC;
double EffectiveDETLBLinesLuma;
double EffectiveDETLBLinesChroma;
- double ProjectedDCFCLKDeepSleep;
+ double ProjectedDCFCLKDeepSleep[DC__VOLTAGE_STATES + 1][2];
double PDEAndMetaPTEBytesPerFrameY;
double PDEAndMetaPTEBytesPerFrameC;
unsigned int MetaRowBytesY;
@@ -489,12 +491,11 @@ struct vba_vars_st {
double FractionOfUrgentBandwidthImmediateFlip; // Mode Support debugging output
/* ms locals */
- double IdealSDPPortBandwidthPerState[DC__VOLTAGE_STATES + 1];
+ double IdealSDPPortBandwidthPerState[DC__VOLTAGE_STATES + 1][2];
unsigned int NoOfDPP[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
int NoOfDPPThisState[DC__NUM_DPP__MAX];
- bool ODMCombineEnablePerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
- enum odm_combine_mode ODMCombineTypeEnablePerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
- unsigned int SwathWidthYThisState[DC__NUM_DPP__MAX];
+ enum odm_combine_mode ODMCombineEnablePerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ double SwathWidthYThisState[DC__NUM_DPP__MAX];
unsigned int SwathHeightCPerState[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
unsigned int SwathHeightYThisState[DC__NUM_DPP__MAX];
unsigned int SwathHeightCThisState[DC__NUM_DPP__MAX];
@@ -506,7 +507,7 @@ struct vba_vars_st {
double RequiredDPPCLKThisState[DC__NUM_DPP__MAX];
bool PTEBufferSizeNotExceededY[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
bool PTEBufferSizeNotExceededC[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
- bool BandwidthWithoutPrefetchSupported[DC__VOLTAGE_STATES + 1];
+ bool BandwidthWithoutPrefetchSupported[DC__VOLTAGE_STATES + 1][2];
bool PrefetchSupported[DC__VOLTAGE_STATES + 1][2];
bool VRatioInPrefetchSupported[DC__VOLTAGE_STATES + 1][2];
double RequiredDISPCLK[DC__VOLTAGE_STATES + 1][2];
@@ -515,22 +516,22 @@ struct vba_vars_st {
unsigned int TotalNumberOfActiveDPP[DC__VOLTAGE_STATES + 1][2];
unsigned int TotalNumberOfDCCActiveDPP[DC__VOLTAGE_STATES + 1][2];
bool ModeSupport[DC__VOLTAGE_STATES + 1][2];
- double ReturnBWPerState[DC__VOLTAGE_STATES + 1];
+ double ReturnBWPerState[DC__VOLTAGE_STATES + 1][2];
bool DIOSupport[DC__VOLTAGE_STATES + 1];
bool NotEnoughDSCUnits[DC__VOLTAGE_STATES + 1];
bool DSCCLKRequiredMoreThanSupported[DC__VOLTAGE_STATES + 1];
bool DTBCLKRequiredMoreThanSupported[DC__VOLTAGE_STATES + 1];
double UrgentRoundTripAndOutOfOrderLatencyPerState[DC__VOLTAGE_STATES + 1];
- bool ROBSupport[DC__VOLTAGE_STATES + 1];
+ bool ROBSupport[DC__VOLTAGE_STATES + 1][2];
bool PTEBufferSizeNotExceeded[DC__VOLTAGE_STATES + 1][2];
- bool TotalVerticalActiveBandwidthSupport[DC__VOLTAGE_STATES + 1];
- double MaxTotalVerticalActiveAvailableBandwidth[DC__VOLTAGE_STATES + 1];
+ bool TotalVerticalActiveBandwidthSupport[DC__VOLTAGE_STATES + 1][2];
+ double MaxTotalVerticalActiveAvailableBandwidth[DC__VOLTAGE_STATES + 1][2];
double PrefetchBW[DC__NUM_DPP__MAX];
- double PDEAndMetaPTEBytesPerFrame[DC__NUM_DPP__MAX];
- double MetaRowBytes[DC__NUM_DPP__MAX];
- double DPTEBytesPerRow[DC__NUM_DPP__MAX];
- double PrefetchLinesY[DC__NUM_DPP__MAX];
- double PrefetchLinesC[DC__NUM_DPP__MAX];
+ double PDEAndMetaPTEBytesPerFrame[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double MetaRowBytes[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double DPTEBytesPerRow[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double PrefetchLinesY[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double PrefetchLinesC[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
unsigned int MaxNumSwY[DC__NUM_DPP__MAX];
unsigned int MaxNumSwC[DC__NUM_DPP__MAX];
double PrefillY[DC__NUM_DPP__MAX];
@@ -539,7 +540,7 @@ struct vba_vars_st {
double LinesForMetaPTE[DC__NUM_DPP__MAX];
double LinesForMetaAndDPTERow[DC__NUM_DPP__MAX];
double MinDPPCLKUsingSingleDPP[DC__NUM_DPP__MAX];
- unsigned int SwathWidthYSingleDPP[DC__NUM_DPP__MAX];
+ double SwathWidthYSingleDPP[DC__NUM_DPP__MAX];
double BytePerPixelInDETY[DC__NUM_DPP__MAX];
double BytePerPixelInDETC[DC__NUM_DPP__MAX];
bool RequiresDSC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
@@ -547,7 +548,7 @@ struct vba_vars_st {
double RequiresFEC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
double OutputBppPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
double DSCDelayPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
- bool ViewportSizeSupport[DC__VOLTAGE_STATES + 1];
+ bool ViewportSizeSupport[DC__VOLTAGE_STATES + 1][2];
unsigned int Read256BlockHeightY[DC__NUM_DPP__MAX];
unsigned int Read256BlockWidthY[DC__NUM_DPP__MAX];
unsigned int Read256BlockHeightC[DC__NUM_DPP__MAX];
@@ -562,7 +563,7 @@ struct vba_vars_st {
double WriteBandwidth[DC__NUM_DPP__MAX];
double PSCL_FACTOR[DC__NUM_DPP__MAX];
double PSCL_FACTOR_CHROMA[DC__NUM_DPP__MAX];
- double MaximumVStartup[DC__NUM_DPP__MAX];
+ double MaximumVStartup[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
unsigned int MacroTileWidthY[DC__NUM_DPP__MAX];
unsigned int MacroTileWidthC[DC__NUM_DPP__MAX];
double AlignedDCCMetaPitch[DC__NUM_DPP__MAX];
@@ -579,7 +580,7 @@ struct vba_vars_st {
bool ImmediateFlipSupportedForState[DC__VOLTAGE_STATES + 1][2];
double WritebackDelay[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
unsigned int vm_group_bytes[DC__NUM_DPP__MAX];
- long dpte_group_bytes[DC__NUM_DPP__MAX];
+ unsigned int dpte_group_bytes[DC__NUM_DPP__MAX];
unsigned int dpte_row_height[DC__NUM_DPP__MAX];
unsigned int meta_req_height[DC__NUM_DPP__MAX];
unsigned int meta_req_width[DC__NUM_DPP__MAX];
@@ -605,14 +606,14 @@ struct vba_vars_st {
double UrgentBurstFactorChroma[DC__NUM_DPP__MAX];
double UrgentBurstFactorChromaPre[DC__NUM_DPP__MAX];
+
bool MPCCombine[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
double SwathWidthCSingleDPP[DC__NUM_DPP__MAX];
double MaximumSwathWidthInLineBufferLuma;
double MaximumSwathWidthInLineBufferChroma;
double MaximumSwathWidthLuma[DC__NUM_DPP__MAX];
double MaximumSwathWidthChroma[DC__NUM_DPP__MAX];
- bool odm_combine_dummy[DC__NUM_DPP__MAX];
- enum odm_combine_mode odm_combine_mode_dummy[DC__NUM_DPP__MAX];
+ enum odm_combine_mode odm_combine_dummy[DC__NUM_DPP__MAX];
double dummy1[DC__NUM_DPP__MAX];
double dummy2[DC__NUM_DPP__MAX];
double dummy3[DC__NUM_DPP__MAX];
@@ -622,9 +623,9 @@ struct vba_vars_st {
double dummy7[DC__NUM_DPP__MAX];
double dummy8[DC__NUM_DPP__MAX];
unsigned int dummyinteger1ms[DC__NUM_DPP__MAX];
- unsigned int dummyinteger2ms[DC__NUM_DPP__MAX];
+ double dummyinteger2ms[DC__NUM_DPP__MAX];
unsigned int dummyinteger3[DC__NUM_DPP__MAX];
- unsigned int dummyinteger4;
+ unsigned int dummyinteger4[DC__NUM_DPP__MAX];
unsigned int dummyinteger5;
unsigned int dummyinteger6;
unsigned int dummyinteger7;
@@ -637,7 +638,6 @@ struct vba_vars_st {
unsigned int dummyintegerarr2[DC__NUM_DPP__MAX];
unsigned int dummyintegerarr3[DC__NUM_DPP__MAX];
unsigned int dummyintegerarr4[DC__NUM_DPP__MAX];
- long dummylongarr1[DC__NUM_DPP__MAX];
bool dummysinglestring;
bool SingleDPPViewportSizeSupportPerPlane[DC__NUM_DPP__MAX];
double PlaneRequiredDISPCLKWithODMCombine2To1;
@@ -645,20 +645,19 @@ struct vba_vars_st {
unsigned int TotalNumberOfSingleDPPPlanes[DC__VOLTAGE_STATES + 1][2];
bool LinkDSCEnable;
bool ODMCombine4To1SupportCheckOK[DC__VOLTAGE_STATES + 1];
- bool ODMCombineEnableThisState[DC__NUM_DPP__MAX];
- enum odm_combine_mode ODMCombineEnableTypeThisState[DC__NUM_DPP__MAX];
- unsigned int SwathWidthCThisState[DC__NUM_DPP__MAX];
+ enum odm_combine_mode ODMCombineEnableThisState[DC__NUM_DPP__MAX];
+ double SwathWidthCThisState[DC__NUM_DPP__MAX];
bool ViewportSizeSupportPerPlane[DC__NUM_DPP__MAX];
double AlignedDCCMetaPitchY[DC__NUM_DPP__MAX];
double AlignedDCCMetaPitchC[DC__NUM_DPP__MAX];
unsigned int NotEnoughUrgentLatencyHiding;
unsigned int NotEnoughUrgentLatencyHidingPre;
- long PTEBufferSizeInRequestsForLuma;
- long PTEBufferSizeInRequestsForChroma;
+ int PTEBufferSizeInRequestsForLuma;
+ int PTEBufferSizeInRequestsForChroma;
// Missing from VBA
- long dpte_group_bytes_chroma;
+ int dpte_group_bytes_chroma;
unsigned int vm_group_bytes_chroma;
double dst_x_after_scaler;
double dst_y_after_scaler;
@@ -683,8 +682,8 @@ struct vba_vars_st {
double MinTTUVBlank[DC__NUM_DPP__MAX];
double BytePerPixelDETY[DC__NUM_DPP__MAX];
double BytePerPixelDETC[DC__NUM_DPP__MAX];
- unsigned int SwathWidthY[DC__NUM_DPP__MAX];
- unsigned int SwathWidthSingleDPPY[DC__NUM_DPP__MAX];
+ double SwathWidthY[DC__NUM_DPP__MAX];
+ double SwathWidthSingleDPPY[DC__NUM_DPP__MAX];
double CursorRequestDeliveryTime[DC__NUM_DPP__MAX];
double CursorRequestDeliveryTimePrefetch[DC__NUM_DPP__MAX];
double ReadBandwidthPlaneLuma[DC__NUM_DPP__MAX];
@@ -760,12 +759,12 @@ struct vba_vars_st {
double LinesInDETY[DC__NUM_DPP__MAX];
double LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX];
- unsigned int SwathWidthSingleDPPC[DC__NUM_DPP__MAX];
- unsigned int SwathWidthC[DC__NUM_DPP__MAX];
+ double SwathWidthSingleDPPC[DC__NUM_DPP__MAX];
+ double SwathWidthC[DC__NUM_DPP__MAX];
unsigned int BytePerPixelY[DC__NUM_DPP__MAX];
unsigned int BytePerPixelC[DC__NUM_DPP__MAX];
- long dummyinteger1;
- long dummyinteger2;
+ unsigned int dummyinteger1;
+ unsigned int dummyinteger2;
double FinalDRAMClockChangeLatency;
double Tdmdl_vm[DC__NUM_DPP__MAX];
double Tdmdl[DC__NUM_DPP__MAX];
@@ -779,6 +778,7 @@ struct vba_vars_st {
unsigned int DCCCMaxCompressedBlock[DC__NUM_DPP__MAX];
unsigned int DCCCIndependent64ByteBlock[DC__NUM_DPP__MAX];
double VStartupMargin;
+ bool NotEnoughTimeForDynamicMetadata;
/* Missing from VBA */
unsigned int MaximumMaxVStartupLines;
@@ -814,7 +814,7 @@ struct vba_vars_st {
unsigned int ViewportHeightChroma[DC__NUM_DPP__MAX];
double HRatioChroma[DC__NUM_DPP__MAX];
double VRatioChroma[DC__NUM_DPP__MAX];
- long WritebackSourceWidth[DC__NUM_DPP__MAX];
+ int WritebackSourceWidth[DC__NUM_DPP__MAX];
bool ModeIsSupported;
bool ODMCombine4To1Supported;
@@ -850,6 +850,58 @@ struct vba_vars_st {
unsigned int MaxNumHDMIFRLOutputs;
int AudioSampleRate[DC__NUM_DPP__MAX];
int AudioSampleLayout[DC__NUM_DPP__MAX];
+
+ int PercentMarginOverMinimumRequiredDCFCLK;
+ bool DynamicMetadataSupported[DC__VOLTAGE_STATES + 1][2];
+ enum immediate_flip_requirement ImmediateFlipRequirement;
+ double DETBufferSizeYThisState[DC__NUM_DPP__MAX];
+ double DETBufferSizeCThisState[DC__NUM_DPP__MAX];
+ bool NoUrgentLatencyHiding[DC__NUM_DPP__MAX];
+ bool NoUrgentLatencyHidingPre[DC__NUM_DPP__MAX];
+ int swath_width_luma_ub_this_state[DC__NUM_DPP__MAX];
+ int swath_width_chroma_ub_this_state[DC__NUM_DPP__MAX];
+ double UrgLatency[DC__VOLTAGE_STATES + 1];
+ double VActiveCursorBandwidth[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double VActivePixelBandwidth[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ bool NoTimeForPrefetch[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ bool NoTimeForDynamicMetadata[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double dpte_row_bandwidth[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double meta_row_bandwidth[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double DETBufferSizeYAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double DETBufferSizeCAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ int swath_width_luma_ub_all_states[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ int swath_width_chroma_ub_all_states[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ bool NotUrgentLatencyHiding[DC__VOLTAGE_STATES + 1][2];
+ unsigned int SwathHeightYAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ unsigned int SwathHeightCAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ unsigned int SwathWidthYAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ unsigned int SwathWidthCAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double TotalDPTERowBandwidth[DC__VOLTAGE_STATES + 1][2];
+ double TotalMetaRowBandwidth[DC__VOLTAGE_STATES + 1][2];
+ double TotalVActiveCursorBandwidth[DC__VOLTAGE_STATES + 1][2];
+ double TotalVActivePixelBandwidth[DC__VOLTAGE_STATES + 1][2];
+ bool UseMinimumRequiredDCFCLK;
+ double WritebackDelayTime[DC__NUM_DPP__MAX];
+ unsigned int DCCYIndependentBlock[DC__NUM_DPP__MAX];
+ unsigned int DCCCIndependentBlock[DC__NUM_DPP__MAX];
+ unsigned int dummyinteger15;
+ unsigned int dummyinteger16;
+ unsigned int dummyinteger17;
+ unsigned int dummyinteger18;
+ unsigned int dummyinteger19;
+ unsigned int dummyinteger20;
+ unsigned int dummyinteger21;
+ unsigned int dummyinteger22;
+ unsigned int dummyinteger23;
+ unsigned int dummyinteger24;
+ unsigned int dummyinteger25;
+ unsigned int dummyinteger26;
+ unsigned int dummyinteger27;
+ unsigned int dummyinteger28;
+ unsigned int dummyinteger29;
+ bool dummystring[DC__NUM_DPP__MAX];
+ double BPP;
+ enum odm_combine_policy ODMCombinePolicy;
};
bool CalculateMinAndMaxPrefetchMode(
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c
index b953b02a1512..723af0b2dda0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c
@@ -24,7 +24,7 @@
*/
#include "dml_common_defs.h"
-#include "../calcs/dcn_calc_math.h"
+#include "dcn_calc_math.h"
#include "dml_inline_defs.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
index eca140da13d8..ded71ea82413 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
@@ -27,7 +27,7 @@
#define __DML_INLINE_DEFS_H__
#include "dml_common_defs.h"
-#include "../calcs/dcn_calc_math.h"
+#include "dcn_calc_math.h"
#include "dml_logger.h"
static inline double dml_min(double a, double b)
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
index 641ffb7cfaed..3f66868df171 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
@@ -2,7 +2,13 @@
#
# Makefile for the 'dsc' sub-component of DAL.
+ifdef CONFIG_X86
dsc_ccflags := -mhard-float -msse
+endif
+
+ifdef CONFIG_PPC64
+dsc_ccflags := -mhard-float -maltivec
+endif
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
@@ -10,6 +16,7 @@ IS_OLD_GCC = 1
endif
endif
+ifdef CONFIG_X86
ifdef IS_OLD_GCC
# Stack alignment mismatch, proceed with caution.
# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
@@ -18,6 +25,7 @@ dsc_ccflags += -mpreferred-stack-boundary=4
else
dsc_ccflags += -msse2
endif
+endif
CFLAGS_$(AMDDALPATH)/dc/dsc/rc_calc.o := $(dsc_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dsc/rc_calc_dpi.o := $(dsc_ccflags)
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index d2423ad1fac2..87d682d25278 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -29,6 +29,9 @@
/* This module's internal functions */
+/* default DSC policy target bitrate limit is 16bpp */
+static uint32_t dsc_policy_max_target_bpp_limit = 16;
+
static uint32_t dc_dsc_bandwidth_in_kbps_from_timing(
const struct dc_crtc_timing *timing)
{
@@ -221,7 +224,8 @@ static void get_dsc_enc_caps(
memset(dsc_enc_caps, 0, sizeof(struct dsc_enc_caps));
if (dsc) {
- dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz);
+ if (!dsc->ctx->dc->debug.disable_dsc)
+ dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz);
if (dsc->ctx->dc->debug.native422_support)
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
}
@@ -757,7 +761,7 @@ done:
return is_dsc_possible;
}
-bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data, const uint8_t *dpcd_dsc_ext_data, struct dsc_dec_dpcd_caps *dsc_sink_caps)
+bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, const uint8_t *dpcd_dsc_basic_data, const uint8_t *dpcd_dsc_ext_data, struct dsc_dec_dpcd_caps *dsc_sink_caps)
{
if (!dpcd_dsc_basic_data)
return false;
@@ -810,6 +814,23 @@ bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data, const uint8_t *dp
if (!dsc_bpp_increment_div_from_dpcd(dpcd_dsc_basic_data[DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT], &dsc_sink_caps->bpp_increment_div))
return false;
+ if (dc->debug.dsc_bpp_increment_div) {
+ /* dsc_bpp_increment_div should onl be 1, 2, 4, 8 or 16, but rather than rejecting invalid values,
+ * we'll accept all and get it into range. This also makes the above check against 0 redundant,
+ * but that one stresses out the override will be only used if it's not 0.
+ */
+ if (dc->debug.dsc_bpp_increment_div >= 1)
+ dsc_sink_caps->bpp_increment_div = 1;
+ if (dc->debug.dsc_bpp_increment_div >= 2)
+ dsc_sink_caps->bpp_increment_div = 2;
+ if (dc->debug.dsc_bpp_increment_div >= 4)
+ dsc_sink_caps->bpp_increment_div = 4;
+ if (dc->debug.dsc_bpp_increment_div >= 8)
+ dsc_sink_caps->bpp_increment_div = 8;
+ if (dc->debug.dsc_bpp_increment_div >= 16)
+ dsc_sink_caps->bpp_increment_div = 16;
+ }
+
/* Extended caps */
if (dpcd_dsc_ext_data == NULL) { // Extended DPCD DSC data can be null, e.g. because it doesn't apply to SST
dsc_sink_caps->branch_overall_throughput_0_mps = 0;
@@ -951,7 +972,12 @@ void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, struct dc
default:
return;
}
- /* internal upper limit to 16 bpp */
- if (policy->max_target_bpp > 16)
- policy->max_target_bpp = 16;
+ /* internal upper limit, default 16 bpp */
+ if (policy->max_target_bpp > dsc_policy_max_target_bpp_limit)
+ policy->max_target_bpp = dsc_policy_max_target_bpp_limit;
+}
+
+void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit)
+{
+ dsc_policy_max_target_bpp_limit = limit;
}
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 16f6ef22367b..f285b76888fb 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -212,6 +212,7 @@ struct resource_pool {
struct abm *abm;
struct dmcu *dmcu;
+ struct dmub_psr *psr;
const struct resource_funcs *funcs;
const struct resource_caps *res_cap;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index 6198bccd6199..8b1f0ce6c2a7 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -76,6 +76,8 @@ void dp_enable_mst_on_sink(struct dc_link *link, bool enable);
enum dp_panel_mode dp_get_panel_mode(struct dc_link *link);
void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode);
+bool dp_overwrite_extended_receiver_cap(struct dc_link *link);
+
void dp_set_fec_ready(struct dc_link *link, bool ready);
void dp_set_fec_enable(struct dc_link *link, bool enable);
bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable);
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calc_math.h
index 45a07eeffbb6..45a07eeffbb6 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calc_math.h
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 125e42dbd3c5..45ef390ae052 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -47,6 +47,26 @@ struct dpp_input_csc_matrix {
uint16_t regval[12];
};
+static const struct dpp_input_csc_matrix dpp_input_csc_matrix[] = {
+ {COLOR_SPACE_SRGB,
+ {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
+ {COLOR_SPACE_SRGB_LIMITED,
+ {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
+ {COLOR_SPACE_YCBCR601,
+ {0x2cdd, 0x2000, 0, 0xe991, 0xe926, 0x2000, 0xf4fd, 0x10ef,
+ 0, 0x2000, 0x38b4, 0xe3a6} },
+ {COLOR_SPACE_YCBCR601_LIMITED,
+ {0x3353, 0x2568, 0, 0xe400, 0xe5dc, 0x2568, 0xf367, 0x1108,
+ 0, 0x2568, 0x40de, 0xdd3a} },
+ {COLOR_SPACE_YCBCR709,
+ {0x3265, 0x2000, 0, 0xe6ce, 0xf105, 0x2000, 0xfa01, 0xa7d, 0,
+ 0x2000, 0x3b61, 0xe24f} },
+
+ {COLOR_SPACE_YCBCR709_LIMITED,
+ {0x39a6, 0x2568, 0, 0xe0d6, 0xeedd, 0x2568, 0xf925, 0x9a8, 0,
+ 0x2568, 0x43ee, 0xdbb2} }
+};
+
struct dpp_grph_csc_adjustment {
struct fixed31_32 temperature_matrix[CSC_TEMPERATURE_MATRIX_SIZE];
enum graphics_gamut_adjust_type gamut_adjust_type;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
index 735f41901b88..459f95f52486 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
@@ -113,7 +113,8 @@ struct dwbc {
int wb_src_plane_inst;/*hubp, mpcc, inst*/
bool update_privacymask;
uint32_t mask_id;
-
+ int otg_inst;
+ bool mvc_cfg;
};
struct dwbc_funcs {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 85a34dde8526..2cb8466e657b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -63,6 +63,26 @@ struct hubp {
bool power_gated;
};
+struct surface_flip_registers {
+ uint32_t DCSURF_SURFACE_CONTROL;
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH;
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS;
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C;
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_C;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C;
+ uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH;
+ uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS;
+ uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH;
+ uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS;
+ bool tmz_surface;
+ bool immediate;
+ uint8_t vmid;
+ bool grph_stereo;
+};
+
struct hubp_funcs {
void (*hubp_setup)(
struct hubp *hubp,
@@ -82,9 +102,10 @@ struct hubp_funcs {
void (*mem_program_viewport)(
struct hubp *hubp,
const struct rect *viewport,
- const struct rect *viewport_c,
- enum dc_rotation_angle rotation);
- /* rotation needed for Renoir workaround */
+ const struct rect *viewport_c);
+
+ void (*apply_PLAT_54186_wa)(struct hubp *hubp,
+ const struct dc_plane_address *address);
bool (*hubp_program_surface_flip_and_addr)(
struct hubp *hubp,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 2d3efd71fa51..e5e7d94026fc 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -208,7 +208,8 @@ struct timing_generator_funcs {
bool enable, const struct dc_crtc_timing *timing);
void (*set_drr)(struct timing_generator *tg, const struct drr_params *params);
void (*set_static_screen_control)(struct timing_generator *tg,
- uint32_t value);
+ uint32_t event_triggers,
+ uint32_t num_frames);
void (*set_test_pattern)(
struct timing_generator *tg,
enum controller_dp_test_pattern test_pattern,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index e9c6021a5372..209118f9f193 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -42,7 +42,7 @@ struct dc_state;
struct dc_stream_status;
struct dc_writeback_info;
struct dchub_init_data;
-struct dc_static_screen_events;
+struct dc_static_screen_params;
struct resource_pool;
struct dc_phy_addr_space_config;
struct dc_virtual_addr_space_config;
@@ -102,7 +102,7 @@ struct hw_sequencer_funcs {
unsigned int vmid, unsigned int vmid_frame_number);
void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx,
int num_pipes,
- const struct dc_static_screen_events *events);
+ const struct dc_static_screen_params *events);
/* Stream Related */
void (*enable_stream)(struct pipe_ctx *pipe_ctx);
@@ -149,16 +149,18 @@ struct hw_sequencer_funcs {
/* Writeback Related */
void (*update_writeback)(struct dc *dc,
- const struct dc_stream_status *stream_status,
struct dc_writeback_info *wb_info,
struct dc_state *context);
void (*enable_writeback)(struct dc *dc,
- const struct dc_stream_status *stream_status,
struct dc_writeback_info *wb_info,
struct dc_state *context);
void (*disable_writeback)(struct dc *dc,
unsigned int dwb_pipe_inst);
+ bool (*mmhubbub_warmup)(struct dc *dc,
+ unsigned int num_dwb,
+ struct dc_writeback_info *wb_info);
+
/* Clock Related */
enum dc_status (*set_clock)(struct dc *dc,
enum dc_clock_type clock_type,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
index 8ba06f015975..ecf566378ccd 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
@@ -51,7 +51,7 @@ struct dc_state;
struct dc_stream_status;
struct dc_writeback_info;
struct dchub_init_data;
-struct dc_static_screen_events;
+struct dc_static_screen_params;
struct resource_pool;
struct resource_context;
struct stream_resource;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
index 47e307388581..2470405e996b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
@@ -458,7 +458,14 @@ uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr,
#define IX_REG_READ(index_reg_name, data_reg_name, index) \
generic_read_indirect_reg(CTX, REG(index_reg_name), REG(data_reg_name), IND_REG(index))
+#define IX_REG_GET_N(index_reg_name, data_reg_name, index, n, ...) \
+ generic_indirect_reg_get(CTX, REG(index_reg_name), REG(data_reg_name), \
+ IND_REG(index), \
+ n, __VA_ARGS__)
+#define IX_REG_GET(index_reg_name, data_reg_name, index, field, val) \
+ IX_REG_GET_N(index_reg_name, data_reg_name, index, 1, \
+ FN(data_reg_name, field), val)
#define IX_REG_UPDATE_N(index_reg_name, data_reg_name, index, n, ...) \
generic_indirect_reg_update_ex(CTX, \
@@ -479,6 +486,12 @@ uint32_t generic_read_indirect_reg(const struct dc_context *ctx,
uint32_t addr_index, uint32_t addr_data,
uint32_t index);
+uint32_t generic_indirect_reg_get(const struct dc_context *ctx,
+ uint32_t addr_index, uint32_t addr_data,
+ uint32_t index, int n,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ ...);
+
uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
uint32_t addr_index, uint32_t addr_data,
uint32_t index, uint32_t reg_val, int n,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 7a85abc53d05..5ae8ada154ef 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -177,4 +177,6 @@ void update_audio_usage(
unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format);
+void get_audio_check(struct audio_info *aud_modes,
+ struct audio_check *aud_chk);
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index 13b9a9bb32c8..c34eba19860a 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -1,5 +1,6 @@
/*
* Copyright 2012-16 Advanced Micro Devices, Inc.
+ * Copyright 2019 Raptor Engineering, LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -50,7 +51,38 @@
#define dm_error(fmt, ...) DRM_ERROR(fmt, ##__VA_ARGS__)
#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_X86)
#include <asm/fpu/api.h>
+#define DC_FP_START() kernel_fpu_begin()
+#define DC_FP_END() kernel_fpu_end()
+#elif defined(CONFIG_PPC64)
+#include <asm/switch_to.h>
+#include <asm/cputable.h>
+#define DC_FP_START() { \
+ if (cpu_has_feature(CPU_FTR_VSX_COMP)) { \
+ preempt_disable(); \
+ enable_kernel_vsx(); \
+ } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) { \
+ preempt_disable(); \
+ enable_kernel_altivec(); \
+ } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) { \
+ preempt_disable(); \
+ enable_kernel_fp(); \
+ } \
+}
+#define DC_FP_END() { \
+ if (cpu_has_feature(CPU_FTR_VSX_COMP)) { \
+ disable_kernel_vsx(); \
+ preempt_enable(); \
+ } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) { \
+ disable_kernel_altivec(); \
+ preempt_enable(); \
+ } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) { \
+ disable_kernel_fp(); \
+ preempt_enable(); \
+ } \
+}
+#endif
#endif
/*
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index b10728f33f62..cd9532b4f14d 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -36,6 +36,7 @@
#define DMUB_RB_SIZE (DMUB_RB_CMD_SIZE * DMUB_RB_MAX_ENTRY)
#define REG_SET_MASK 0xFFFF
+
/*
* Command IDs should be treated as stable ABI.
* Do not reuse or modify IDs.
@@ -47,6 +48,7 @@ enum dmub_cmd_type {
DMUB_CMD__REG_SEQ_FIELD_UPDATE_SEQ = 2,
DMUB_CMD__REG_SEQ_BURST_WRITE = 3,
DMUB_CMD__REG_REG_WAIT = 4,
+ DMUB_CMD__PLAT_54186_WA = 5,
DMUB_CMD__PSR = 64,
DMUB_CMD__VBIOS = 128,
};
@@ -145,6 +147,32 @@ struct dmub_rb_cmd_reg_wait {
struct dmub_cmd_reg_wait_data reg_wait;
};
+#ifndef PHYSICAL_ADDRESS_LOC
+#define PHYSICAL_ADDRESS_LOC union large_integer
+#endif
+
+struct dmub_cmd_PLAT_54186_wa {
+ uint32_t DCSURF_SURFACE_CONTROL;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C;
+ struct {
+ uint8_t hubp_inst : 4;
+ uint8_t tmz_surface : 1;
+ uint8_t immediate :1;
+ uint8_t vmid : 4;
+ uint8_t grph_stereo : 1;
+ uint32_t reserved : 21;
+ } flip_params;
+ uint32_t reserved[9];
+};
+
+struct dmub_rb_cmd_PLAT_54186_wa {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_PLAT_54186_wa flip;
+};
+
struct dmub_cmd_digx_encoder_control_data {
union dig_encoder_control_parameters_v1_5 dig;
};
@@ -187,9 +215,28 @@ struct dmub_rb_cmd_dpphy_init {
};
struct dmub_cmd_psr_copy_settings_data {
- uint32_t reg1;
- uint32_t reg2;
- uint32_t reg3;
+ uint16_t psr_level;
+ uint8_t hubp_inst;
+ uint8_t dpp_inst;
+ uint8_t mpcc_inst;
+ uint8_t opp_inst;
+ uint8_t otg_inst;
+ uint8_t digfe_inst;
+ uint8_t digbe_inst;
+ uint8_t dpphy_inst;
+ uint8_t aux_inst;
+ uint8_t hyst_frames;
+ uint8_t hyst_lines;
+ uint8_t phy_num;
+ uint8_t phy_type;
+ uint8_t aux_repeat;
+ uint8_t smu_optimizations_en;
+ uint8_t skip_wait_for_pll_lock;
+ uint8_t frame_delay;
+ uint8_t smu_phy_id;
+ uint8_t num_of_controllers;
+ uint8_t link_rate;
+ uint8_t frame_cap_ind;
};
struct dmub_rb_cmd_psr_copy_settings {
@@ -206,31 +253,17 @@ struct dmub_rb_cmd_psr_set_level {
struct dmub_cmd_psr_set_level_data psr_set_level_data;
};
-struct dmub_rb_cmd_psr_disable {
- struct dmub_cmd_header header;
-};
-
struct dmub_rb_cmd_psr_enable {
struct dmub_cmd_header header;
};
-struct dmub_cmd_psr_notify_vblank_data {
- uint32_t vblank_int; // Which vblank interrupt was triggered
-};
-
-struct dmub_rb_cmd_notify_vblank {
- struct dmub_cmd_header header;
- struct dmub_cmd_psr_notify_vblank_data psr_notify_vblank_data;
-};
-
-struct dmub_cmd_psr_notify_static_state_data {
- uint32_t ss_int; // Which static screen interrupt was triggered
- uint32_t ss_enter; // Enter (1) or exit (0) static screen
+struct dmub_cmd_psr_setup_data {
+ enum psr_version version; // PSR version 1 or 2
};
-struct dmub_rb_cmd_psr_notify_static_state {
+struct dmub_rb_cmd_psr_setup {
struct dmub_cmd_header header;
- struct dmub_cmd_psr_notify_static_state_data psr_notify_static_state_data;
+ struct dmub_cmd_psr_setup_data psr_setup_data;
};
union dmub_rb_cmd {
@@ -245,9 +278,10 @@ union dmub_rb_cmd {
struct dmub_rb_cmd_dpphy_init dpphy_init;
struct dmub_rb_cmd_dig1_transmitter_control dig1_transmitter_control;
struct dmub_rb_cmd_psr_enable psr_enable;
- struct dmub_rb_cmd_psr_disable psr_disable;
struct dmub_rb_cmd_psr_copy_settings psr_copy_settings;
struct dmub_rb_cmd_psr_set_level psr_set_level;
+ struct dmub_rb_cmd_PLAT_54186_wa PLAT_54186_wa;
+ struct dmub_rb_cmd_psr_setup psr_setup;
};
#pragma pack(pop)
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
index 14f13e8a6f3b..7b69eb37f762 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
@@ -32,10 +32,17 @@
*/
enum dmub_cmd_psr_type {
- DMUB_CMD__PSR_ENABLE = 0,
- DMUB_CMD__PSR_DISABLE = 1,
- DMUB_CMD__PSR_COPY_SETTINGS = 2,
- DMUB_CMD__PSR_SET_LEVEL = 3,
+ DMUB_CMD__PSR_SETUP = 0,
+ DMUB_CMD__PSR_COPY_SETTINGS = 1,
+ DMUB_CMD__PSR_ENABLE = 2,
+ DMUB_CMD__PSR_DISABLE = 3,
+ DMUB_CMD__PSR_SET_LEVEL = 4,
+};
+
+enum psr_version {
+ PSR_VERSION_1 = 0x10, // PSR Version 1
+ PSR_VERSION_2 = 0x20, // PSR Version 2, includes selective update
+ PSR_VERSION_2_Y_COORD = 0x21, // PSR Version 2, includes Y-coordinate support for SU
};
#endif /* _DMUB_CMD_DAL_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_meta.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_meta.h
new file mode 100644
index 000000000000..242ec257998c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_meta.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef _DMUB_META_H_
+#define _DMUB_META_H_
+
+#include "dmub_types.h"
+
+#pragma pack(push, 1)
+
+/* Magic value for identifying dmub_fw_meta_info */
+#define DMUB_FW_META_MAGIC 0x444D5542
+
+/* Offset from the end of the file to the dmub_fw_meta_info */
+#define DMUB_FW_META_OFFSET 0x24
+
+/**
+ * struct dmub_fw_meta_info - metadata associated with fw binary
+ *
+ * NOTE: This should be considered a stable API. Fields should
+ * not be repurposed or reordered. New fields should be
+ * added instead to extend the structure.
+ *
+ * @magic_value: magic value identifying DMUB firmware meta info
+ * @fw_region_size: size of the firmware state region
+ * @trace_buffer_size: size of the tracebuffer region
+ */
+struct dmub_fw_meta_info {
+ uint32_t magic_value;
+ uint32_t fw_region_size;
+ uint32_t trace_buffer_size;
+};
+
+/* Ensure that the structure remains 64 bytes. */
+union dmub_fw_meta {
+ struct dmub_fw_meta_info info;
+ uint8_t reserved[64];
+};
+
+#pragma pack(pop)
+
+#endif /* _DMUB_META_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h
index ac22744eaa94..df875fdd2ab0 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h
@@ -73,12 +73,17 @@ static inline bool dmub_rb_full(struct dmub_rb *rb)
static inline bool dmub_rb_push_front(struct dmub_rb *rb,
const struct dmub_cmd_header *cmd)
{
- uint8_t *wt_ptr = (uint8_t *)(rb->base_address) + rb->wrpt;
+ uint64_t volatile *dst = (uint64_t volatile *)(rb->base_address) + rb->wrpt / sizeof(uint64_t);
+ const uint64_t *src = (const uint64_t *)cmd;
+ int i;
if (dmub_rb_full(rb))
return false;
- dmub_memcpy(wt_ptr, cmd, DMUB_RB_CMD_SIZE);
+ // copying data
+ for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++)
+ *dst++ = *src++;
+
rb->wrpt += DMUB_RB_CMD_SIZE;
if (rb->wrpt >= rb->capacity)
@@ -113,6 +118,26 @@ static inline bool dmub_rb_pop_front(struct dmub_rb *rb)
return true;
}
+static inline void dmub_rb_flush_pending(const struct dmub_rb *rb)
+{
+ uint32_t rptr = rb->rptr;
+ uint32_t wptr = rb->wrpt;
+
+ while (rptr != wptr) {
+ uint64_t volatile *data = (uint64_t volatile *)rb->base_address + rptr / sizeof(uint64_t);
+ //uint64_t volatile *p = (uint64_t volatile *)data;
+ uint64_t temp;
+ int i;
+
+ for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++)
+ temp = *data++;
+
+ rptr += DMUB_RB_CMD_SIZE;
+ if (rptr >= rb->capacity)
+ rptr %= rb->capacity;
+ }
+}
+
static inline void dmub_rb_init(struct dmub_rb *rb,
struct dmub_rb_init_params *init_params)
{
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
index 528243e35add..f8917594036a 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
@@ -67,7 +67,6 @@
#include "dmub_types.h"
#include "dmub_cmd.h"
#include "dmub_rb.h"
-#include "dmub_fw_state.h"
#if defined(__cplusplus)
extern "C" {
@@ -76,7 +75,7 @@ extern "C" {
/* Forward declarations */
struct dmub_srv;
struct dmub_cmd_header;
-struct dmcu;
+struct dmub_srv_common_regs;
/* enum dmub_status - return code for dmcub functions */
enum dmub_status {
@@ -145,11 +144,13 @@ struct dmub_fb {
* @inst_const_size: size of the fw inst const section
* @bss_data_size: size of the fw bss data section
* @vbios_size: size of the vbios data
+ * @fw_bss_data: raw firmware bss data section
*/
struct dmub_srv_region_params {
uint32_t inst_const_size;
uint32_t bss_data_size;
uint32_t vbios_size;
+ const uint8_t *fw_bss_data;
};
/**
@@ -230,6 +231,8 @@ struct dmub_srv_base_funcs {
struct dmub_srv_hw_funcs {
/* private: internal use only */
+ void (*init)(struct dmub_srv *dmub);
+
void (*reset)(struct dmub_srv *dmub);
void (*reset_release)(struct dmub_srv *dmub);
@@ -307,6 +310,8 @@ struct dmub_srv {
volatile const struct dmub_fw_state *fw_state;
/* private: internal use only */
+ const struct dmub_srv_common_regs *regs;
+
struct dmub_srv_base_funcs funcs;
struct dmub_srv_hw_funcs hw_funcs;
struct dmub_rb inbox1_rb;
@@ -414,6 +419,21 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
const struct dmub_srv_hw_params *params);
/**
+ * dmub_srv_hw_reset() - puts the DMUB hardware in reset state if initialized
+ * @dmub: the dmub service
+ *
+ * Before destroying the DMUB service or releasing the backing framebuffer
+ * memory we'll need to put the DMCUB into reset first.
+ *
+ * A subsequent call to dmub_srv_hw_init() will re-enable the DMCUB.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub);
+
+/**
* dmub_srv_cmd_queue() - queues a command to the DMUB
* @dmub: the dmub service
* @cmd: the command to queue
@@ -442,25 +462,6 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub);
/**
- * dmub_srv_cmd_submit() - submits a command to the DMUB immediately
- * @dmub: the dmub service
- * @cmd: the command to submit
- * @timeout_us: the maximum number of microseconds to wait
- *
- * Submits a command to the DMUB with an optional timeout.
- * If timeout_us is given then the service will attempt to
- * resubmit for the given number of microseconds.
- *
- * Return:
- * DMUB_STATUS_OK - success
- * DMUB_STATUS_TIMEOUT - wait for submit timed out
- * DMUB_STATUS_INVALID - unspecified error
- */
-enum dmub_status dmub_srv_cmd_submit(struct dmub_srv *dmub,
- const struct dmub_cmd_header *cmd,
- uint32_t timeout_us);
-
-/**
* dmub_srv_wait_for_auto_load() - Waits for firmware auto load to complete
* @dmub: the dmub service
* @timeout_us: the maximum number of microseconds to wait
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
index 951ea7053c7e..b2ca8e0dbac9 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
@@ -25,6 +25,7 @@
#include "../inc/dmub_srv.h"
#include "dmub_reg.h"
+#include "dmub_dcn20.h"
#include "dcn/dcn_2_0_0_offset.h"
#include "dcn/dcn_2_0_0_sh_mask.h"
@@ -33,36 +34,90 @@
#define BASE_INNER(seg) DCN_BASE__INST0_SEG##seg
#define CTX dmub
+#define REGS dmub->regs
+
+/* Registers. */
+
+const struct dmub_srv_common_regs dmub_srv_dcn20_regs = {
+#define DMUB_SR(reg) REG_OFFSET(reg),
+ { DMUB_COMMON_REGS() },
+#undef DMUB_SR
+
+#define DMUB_SF(reg, field) FD_MASK(reg, field),
+ { DMUB_COMMON_FIELDS() },
+#undef DMUB_SF
+
+#define DMUB_SF(reg, field) FD_SHIFT(reg, field),
+ { DMUB_COMMON_FIELDS() },
+#undef DMUB_SF
+};
+
+/* Shared functions. */
+
+static void dmub_dcn20_get_fb_base_offset(struct dmub_srv *dmub,
+ uint64_t *fb_base,
+ uint64_t *fb_offset)
+{
+ uint32_t tmp;
+
+ REG_GET(DCN_VM_FB_LOCATION_BASE, FB_BASE, &tmp);
+ *fb_base = (uint64_t)tmp << 24;
+
+ REG_GET(DCN_VM_FB_OFFSET, FB_OFFSET, &tmp);
+ *fb_offset = (uint64_t)tmp << 24;
+}
+
+static inline void dmub_dcn20_translate_addr(const union dmub_addr *addr_in,
+ uint64_t fb_base,
+ uint64_t fb_offset,
+ union dmub_addr *addr_out)
+{
+ addr_out->quad_part = addr_in->quad_part - fb_base + fb_offset;
+}
void dmub_dcn20_reset(struct dmub_srv *dmub)
{
REG_UPDATE(DMCUB_CNTL, DMCUB_SOFT_RESET, 1);
REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
+ REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
+ REG_WRITE(DMCUB_INBOX1_RPTR, 0);
+ REG_WRITE(DMCUB_INBOX1_WPTR, 0);
}
void dmub_dcn20_reset_release(struct dmub_srv *dmub)
{
+ REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 0);
REG_WRITE(DMCUB_SCRATCH15, dmub->psp_version & 0x001100FF);
REG_UPDATE_2(DMCUB_CNTL, DMCUB_ENABLE, 1, DMCUB_TRACEPORT_EN, 1);
REG_UPDATE(DMCUB_CNTL, DMCUB_SOFT_RESET, 0);
}
-void dmub_dcn20_backdoor_load(struct dmub_srv *dmub, struct dmub_window *cw0,
- struct dmub_window *cw1)
+void dmub_dcn20_backdoor_load(struct dmub_srv *dmub,
+ const struct dmub_window *cw0,
+ const struct dmub_window *cw1)
{
+ union dmub_addr offset;
+ uint64_t fb_base, fb_offset;
+
+ dmub_dcn20_get_fb_base_offset(dmub, &fb_base, &fb_offset);
+
REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1);
- REG_UPDATE_2(DMCUB_MEM_CNTL, DMCUB_MEM_READ_SPACE, 0x4,
- DMCUB_MEM_WRITE_SPACE, 0x4);
+ REG_UPDATE_2(DMCUB_MEM_CNTL, DMCUB_MEM_READ_SPACE, 0x3,
+ DMCUB_MEM_WRITE_SPACE, 0x3);
+
+ dmub_dcn20_translate_addr(&cw0->offset, fb_base, fb_offset, &offset);
- REG_WRITE(DMCUB_REGION3_CW0_OFFSET, cw0->offset.u.low_part);
- REG_WRITE(DMCUB_REGION3_CW0_OFFSET_HIGH, cw0->offset.u.high_part);
+ REG_WRITE(DMCUB_REGION3_CW0_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION3_CW0_OFFSET_HIGH, offset.u.high_part);
REG_WRITE(DMCUB_REGION3_CW0_BASE_ADDRESS, cw0->region.base);
REG_SET_2(DMCUB_REGION3_CW0_TOP_ADDRESS, 0,
DMCUB_REGION3_CW0_TOP_ADDRESS, cw0->region.top,
DMCUB_REGION3_CW0_ENABLE, 1);
- REG_WRITE(DMCUB_REGION3_CW1_OFFSET, cw1->offset.u.low_part);
- REG_WRITE(DMCUB_REGION3_CW1_OFFSET_HIGH, cw1->offset.u.high_part);
+ dmub_dcn20_translate_addr(&cw1->offset, fb_base, fb_offset, &offset);
+
+ REG_WRITE(DMCUB_REGION3_CW1_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION3_CW1_OFFSET_HIGH, offset.u.high_part);
REG_WRITE(DMCUB_REGION3_CW1_BASE_ADDRESS, cw1->region.base);
REG_SET_2(DMCUB_REGION3_CW1_TOP_ADDRESS, 0,
DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top,
@@ -79,37 +134,51 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw5,
const struct dmub_window *cw6)
{
- REG_WRITE(DMCUB_REGION3_CW2_OFFSET, cw2->offset.u.low_part);
- REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, cw2->offset.u.high_part);
+ union dmub_addr offset;
+ uint64_t fb_base, fb_offset;
+
+ dmub_dcn20_get_fb_base_offset(dmub, &fb_base, &fb_offset);
+
+ dmub_dcn20_translate_addr(&cw2->offset, fb_base, fb_offset, &offset);
+
+ REG_WRITE(DMCUB_REGION3_CW2_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, offset.u.high_part);
REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, cw2->region.base);
REG_SET_2(DMCUB_REGION3_CW2_TOP_ADDRESS, 0,
DMCUB_REGION3_CW2_TOP_ADDRESS, cw2->region.top,
DMCUB_REGION3_CW2_ENABLE, 1);
- REG_WRITE(DMCUB_REGION3_CW3_OFFSET, cw3->offset.u.low_part);
- REG_WRITE(DMCUB_REGION3_CW3_OFFSET_HIGH, cw3->offset.u.high_part);
+ dmub_dcn20_translate_addr(&cw3->offset, fb_base, fb_offset, &offset);
+
+ REG_WRITE(DMCUB_REGION3_CW3_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION3_CW3_OFFSET_HIGH, offset.u.high_part);
REG_WRITE(DMCUB_REGION3_CW3_BASE_ADDRESS, cw3->region.base);
REG_SET_2(DMCUB_REGION3_CW3_TOP_ADDRESS, 0,
DMCUB_REGION3_CW3_TOP_ADDRESS, cw3->region.top,
DMCUB_REGION3_CW3_ENABLE, 1);
/* TODO: Move this to CW4. */
+ dmub_dcn20_translate_addr(&cw4->offset, fb_base, fb_offset, &offset);
- REG_WRITE(DMCUB_REGION4_OFFSET, cw4->offset.u.low_part);
- REG_WRITE(DMCUB_REGION4_OFFSET_HIGH, cw4->offset.u.high_part);
+ REG_WRITE(DMCUB_REGION4_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION4_OFFSET_HIGH, offset.u.high_part);
REG_SET_2(DMCUB_REGION4_TOP_ADDRESS, 0, DMCUB_REGION4_TOP_ADDRESS,
cw4->region.top - cw4->region.base - 1, DMCUB_REGION4_ENABLE,
1);
- REG_WRITE(DMCUB_REGION3_CW5_OFFSET, cw5->offset.u.low_part);
- REG_WRITE(DMCUB_REGION3_CW5_OFFSET_HIGH, cw5->offset.u.high_part);
+ dmub_dcn20_translate_addr(&cw5->offset, fb_base, fb_offset, &offset);
+
+ REG_WRITE(DMCUB_REGION3_CW5_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION3_CW5_OFFSET_HIGH, offset.u.high_part);
REG_WRITE(DMCUB_REGION3_CW5_BASE_ADDRESS, cw5->region.base);
REG_SET_2(DMCUB_REGION3_CW5_TOP_ADDRESS, 0,
DMCUB_REGION3_CW5_TOP_ADDRESS, cw5->region.top,
DMCUB_REGION3_CW5_ENABLE, 1);
- REG_WRITE(DMCUB_REGION3_CW6_OFFSET, cw6->offset.u.low_part);
- REG_WRITE(DMCUB_REGION3_CW6_OFFSET_HIGH, cw6->offset.u.high_part);
+ dmub_dcn20_translate_addr(&cw6->offset, fb_base, fb_offset, &offset);
+
+ REG_WRITE(DMCUB_REGION3_CW6_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION3_CW6_OFFSET_HIGH, offset.u.high_part);
REG_WRITE(DMCUB_REGION3_CW6_BASE_ADDRESS, cw6->region.base);
REG_SET_2(DMCUB_REGION3_CW6_TOP_ADDRESS, 0,
DMCUB_REGION3_CW6_TOP_ADDRESS, cw6->region.top,
@@ -123,8 +192,6 @@ void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub,
REG_WRITE(DMCUB_INBOX1_BASE_ADDRESS, 0x80000000);
REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base);
- REG_WRITE(DMCUB_INBOX1_RPTR, 0);
- REG_WRITE(DMCUB_INBOX1_WPTR, 0);
}
uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub)
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
index e70a57573467..04b0fa13153d 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
@@ -30,6 +30,129 @@
struct dmub_srv;
+/* DCN20 register definitions. */
+
+#define DMUB_COMMON_REGS() \
+ DMUB_SR(DMCUB_CNTL) \
+ DMUB_SR(DMCUB_MEM_CNTL) \
+ DMUB_SR(DMCUB_SEC_CNTL) \
+ DMUB_SR(DMCUB_INBOX1_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_INBOX1_SIZE) \
+ DMUB_SR(DMCUB_INBOX1_RPTR) \
+ DMUB_SR(DMCUB_INBOX1_WPTR) \
+ DMUB_SR(DMCUB_REGION3_CW0_OFFSET) \
+ DMUB_SR(DMCUB_REGION3_CW1_OFFSET) \
+ DMUB_SR(DMCUB_REGION3_CW2_OFFSET) \
+ DMUB_SR(DMCUB_REGION3_CW3_OFFSET) \
+ DMUB_SR(DMCUB_REGION3_CW4_OFFSET) \
+ DMUB_SR(DMCUB_REGION3_CW5_OFFSET) \
+ DMUB_SR(DMCUB_REGION3_CW6_OFFSET) \
+ DMUB_SR(DMCUB_REGION3_CW7_OFFSET) \
+ DMUB_SR(DMCUB_REGION3_CW0_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION3_CW1_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION3_CW2_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION3_CW3_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION3_CW4_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION3_CW5_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION3_CW6_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION3_CW7_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION3_CW0_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW1_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW2_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW3_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW4_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW5_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW6_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW7_BASE_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW0_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW1_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW2_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW3_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW4_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW5_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW6_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION3_CW7_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION4_OFFSET) \
+ DMUB_SR(DMCUB_REGION4_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION4_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_SCRATCH0) \
+ DMUB_SR(DMCUB_SCRATCH1) \
+ DMUB_SR(DMCUB_SCRATCH2) \
+ DMUB_SR(DMCUB_SCRATCH3) \
+ DMUB_SR(DMCUB_SCRATCH4) \
+ DMUB_SR(DMCUB_SCRATCH5) \
+ DMUB_SR(DMCUB_SCRATCH6) \
+ DMUB_SR(DMCUB_SCRATCH7) \
+ DMUB_SR(DMCUB_SCRATCH8) \
+ DMUB_SR(DMCUB_SCRATCH9) \
+ DMUB_SR(DMCUB_SCRATCH10) \
+ DMUB_SR(DMCUB_SCRATCH11) \
+ DMUB_SR(DMCUB_SCRATCH12) \
+ DMUB_SR(DMCUB_SCRATCH13) \
+ DMUB_SR(DMCUB_SCRATCH14) \
+ DMUB_SR(DMCUB_SCRATCH15) \
+ DMUB_SR(CC_DC_PIPE_DIS) \
+ DMUB_SR(MMHUBBUB_SOFT_RESET) \
+ DMUB_SR(DCN_VM_FB_LOCATION_BASE) \
+ DMUB_SR(DCN_VM_FB_OFFSET)
+
+#define DMUB_COMMON_FIELDS() \
+ DMUB_SF(DMCUB_CNTL, DMCUB_ENABLE) \
+ DMUB_SF(DMCUB_CNTL, DMCUB_SOFT_RESET) \
+ DMUB_SF(DMCUB_CNTL, DMCUB_TRACEPORT_EN) \
+ DMUB_SF(DMCUB_MEM_CNTL, DMCUB_MEM_READ_SPACE) \
+ DMUB_SF(DMCUB_MEM_CNTL, DMCUB_MEM_WRITE_SPACE) \
+ DMUB_SF(DMCUB_SEC_CNTL, DMCUB_SEC_RESET) \
+ DMUB_SF(DMCUB_SEC_CNTL, DMCUB_MEM_UNIT_ID) \
+ DMUB_SF(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE) \
+ DMUB_SF(DMCUB_REGION3_CW1_TOP_ADDRESS, DMCUB_REGION3_CW1_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION3_CW1_TOP_ADDRESS, DMCUB_REGION3_CW1_ENABLE) \
+ DMUB_SF(DMCUB_REGION3_CW2_TOP_ADDRESS, DMCUB_REGION3_CW2_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION3_CW2_TOP_ADDRESS, DMCUB_REGION3_CW2_ENABLE) \
+ DMUB_SF(DMCUB_REGION3_CW3_TOP_ADDRESS, DMCUB_REGION3_CW3_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION3_CW3_TOP_ADDRESS, DMCUB_REGION3_CW3_ENABLE) \
+ DMUB_SF(DMCUB_REGION3_CW4_TOP_ADDRESS, DMCUB_REGION3_CW4_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION3_CW4_TOP_ADDRESS, DMCUB_REGION3_CW4_ENABLE) \
+ DMUB_SF(DMCUB_REGION3_CW5_TOP_ADDRESS, DMCUB_REGION3_CW5_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION3_CW5_TOP_ADDRESS, DMCUB_REGION3_CW5_ENABLE) \
+ DMUB_SF(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE) \
+ DMUB_SF(DMCUB_REGION3_CW7_TOP_ADDRESS, DMCUB_REGION3_CW7_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION3_CW7_TOP_ADDRESS, DMCUB_REGION3_CW7_ENABLE) \
+ DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_ENABLE) \
+ DMUB_SF(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE) \
+ DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET) \
+ DMUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE) \
+ DMUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET)
+
+struct dmub_srv_common_reg_offset {
+#define DMUB_SR(reg) uint32_t reg;
+ DMUB_COMMON_REGS()
+#undef DMUB_SR
+};
+
+struct dmub_srv_common_reg_shift {
+#define DMUB_SF(reg, field) uint8_t reg##__##field;
+ DMUB_COMMON_FIELDS()
+#undef DMUB_SF
+};
+
+struct dmub_srv_common_reg_mask {
+#define DMUB_SF(reg, field) uint32_t reg##__##field;
+ DMUB_COMMON_FIELDS()
+#undef DMUB_SF
+};
+
+struct dmub_srv_common_regs {
+ const struct dmub_srv_common_reg_offset offset;
+ const struct dmub_srv_common_reg_mask mask;
+ const struct dmub_srv_common_reg_shift shift;
+};
+
+extern const struct dmub_srv_common_regs dmub_srv_dcn20_regs;
+
/* Hardware functions. */
void dmub_dcn20_init(struct dmub_srv *dmub);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
index 9cea7a2d8dbf..5bed9fcd6b5c 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
@@ -25,6 +25,7 @@
#include "../inc/dmub_srv.h"
#include "dmub_reg.h"
+#include "dmub_dcn21.h"
#include "dcn/dcn_2_1_0_offset.h"
#include "dcn/dcn_2_1_0_sh_mask.h"
@@ -32,103 +33,25 @@
#define BASE_INNER(seg) DMU_BASE__INST0_SEG##seg
#define CTX dmub
+#define REGS dmub->regs
-static inline void dmub_dcn21_translate_addr(const union dmub_addr *addr_in,
- uint64_t fb_base,
- uint64_t fb_offset,
- union dmub_addr *addr_out)
-{
- addr_out->quad_part = addr_in->quad_part - fb_base + fb_offset;
-}
-
-void dmub_dcn21_backdoor_load(struct dmub_srv *dmub,
- const struct dmub_window *cw0,
- const struct dmub_window *cw1)
-{
- union dmub_addr offset;
- uint64_t fb_base = dmub->fb_base, fb_offset = dmub->fb_offset;
-
- REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1);
- REG_UPDATE_2(DMCUB_MEM_CNTL, DMCUB_MEM_READ_SPACE, 0x3,
- DMCUB_MEM_WRITE_SPACE, 0x3);
-
- dmub_dcn21_translate_addr(&cw0->offset, fb_base, fb_offset, &offset);
-
- REG_WRITE(DMCUB_REGION3_CW0_OFFSET, offset.u.low_part);
- REG_WRITE(DMCUB_REGION3_CW0_OFFSET_HIGH, offset.u.high_part);
- REG_WRITE(DMCUB_REGION3_CW0_BASE_ADDRESS, cw0->region.base);
- REG_SET_2(DMCUB_REGION3_CW0_TOP_ADDRESS, 0,
- DMCUB_REGION3_CW0_TOP_ADDRESS, cw0->region.top,
- DMCUB_REGION3_CW0_ENABLE, 1);
-
- dmub_dcn21_translate_addr(&cw1->offset, fb_base, fb_offset, &offset);
-
- REG_WRITE(DMCUB_REGION3_CW1_OFFSET, offset.u.low_part);
- REG_WRITE(DMCUB_REGION3_CW1_OFFSET_HIGH, offset.u.high_part);
- REG_WRITE(DMCUB_REGION3_CW1_BASE_ADDRESS, cw1->region.base);
- REG_SET_2(DMCUB_REGION3_CW1_TOP_ADDRESS, 0,
- DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top,
- DMCUB_REGION3_CW1_ENABLE, 1);
-
- REG_UPDATE_2(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 0, DMCUB_MEM_UNIT_ID,
- 0x20);
-}
-
-void dmub_dcn21_setup_windows(struct dmub_srv *dmub,
- const struct dmub_window *cw2,
- const struct dmub_window *cw3,
- const struct dmub_window *cw4,
- const struct dmub_window *cw5,
- const struct dmub_window *cw6)
-{
- union dmub_addr offset;
- uint64_t fb_base = dmub->fb_base, fb_offset = dmub->fb_offset;
-
- dmub_dcn21_translate_addr(&cw2->offset, fb_base, fb_offset, &offset);
-
- REG_WRITE(DMCUB_REGION3_CW2_OFFSET, offset.u.low_part);
- REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, offset.u.high_part);
- REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, cw2->region.base);
- REG_SET_2(DMCUB_REGION3_CW2_TOP_ADDRESS, 0,
- DMCUB_REGION3_CW2_TOP_ADDRESS, cw2->region.top,
- DMCUB_REGION3_CW2_ENABLE, 1);
+/* Registers. */
- dmub_dcn21_translate_addr(&cw3->offset, fb_base, fb_offset, &offset);
+const struct dmub_srv_common_regs dmub_srv_dcn21_regs = {
+#define DMUB_SR(reg) REG_OFFSET(reg),
+ { DMUB_COMMON_REGS() },
+#undef DMUB_SR
- REG_WRITE(DMCUB_REGION3_CW3_OFFSET, offset.u.low_part);
- REG_WRITE(DMCUB_REGION3_CW3_OFFSET_HIGH, offset.u.high_part);
- REG_WRITE(DMCUB_REGION3_CW3_BASE_ADDRESS, cw3->region.base);
- REG_SET_2(DMCUB_REGION3_CW3_TOP_ADDRESS, 0,
- DMCUB_REGION3_CW3_TOP_ADDRESS, cw3->region.top,
- DMCUB_REGION3_CW3_ENABLE, 1);
+#define DMUB_SF(reg, field) FD_MASK(reg, field),
+ { DMUB_COMMON_FIELDS() },
+#undef DMUB_SF
- /* TODO: Move this to CW4. */
- dmub_dcn21_translate_addr(&cw4->offset, fb_base, fb_offset, &offset);
+#define DMUB_SF(reg, field) FD_SHIFT(reg, field),
+ { DMUB_COMMON_FIELDS() },
+#undef DMUB_SF
+};
- REG_WRITE(DMCUB_REGION4_OFFSET, offset.u.low_part);
- REG_WRITE(DMCUB_REGION4_OFFSET_HIGH, offset.u.high_part);
- REG_SET_2(DMCUB_REGION4_TOP_ADDRESS, 0, DMCUB_REGION4_TOP_ADDRESS,
- cw4->region.top - cw4->region.base - 1, DMCUB_REGION4_ENABLE,
- 1);
-
- dmub_dcn21_translate_addr(&cw5->offset, fb_base, fb_offset, &offset);
-
- REG_WRITE(DMCUB_REGION3_CW5_OFFSET, offset.u.low_part);
- REG_WRITE(DMCUB_REGION3_CW5_OFFSET_HIGH, offset.u.high_part);
- REG_WRITE(DMCUB_REGION3_CW5_BASE_ADDRESS, cw5->region.base);
- REG_SET_2(DMCUB_REGION3_CW5_TOP_ADDRESS, 0,
- DMCUB_REGION3_CW5_TOP_ADDRESS, cw5->region.top,
- DMCUB_REGION3_CW5_ENABLE, 1);
-
- dmub_dcn21_translate_addr(&cw6->offset, fb_base, fb_offset, &offset);
-
- REG_WRITE(DMCUB_REGION3_CW6_OFFSET, offset.u.low_part);
- REG_WRITE(DMCUB_REGION3_CW6_OFFSET_HIGH, offset.u.high_part);
- REG_WRITE(DMCUB_REGION3_CW6_BASE_ADDRESS, cw6->region.base);
- REG_SET_2(DMCUB_REGION3_CW6_TOP_ADDRESS, 0,
- DMCUB_REGION3_CW6_TOP_ADDRESS, cw6->region.top,
- DMCUB_REGION3_CW6_ENABLE, 1);
-}
+/* Shared functions. */
bool dmub_dcn21_is_auto_load_done(struct dmub_srv *dmub)
{
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h
index f7a93a5dcfa5..2bbea237137b 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h
@@ -28,18 +28,11 @@
#include "dmub_dcn20.h"
-/* Hardware functions. */
+/* Registers. */
-void dmub_dcn21_backdoor_load(struct dmub_srv *dmub,
- const struct dmub_window *cw0,
- const struct dmub_window *cw1);
+extern const struct dmub_srv_common_regs dmub_srv_dcn21_regs;
-void dmub_dcn21_setup_windows(struct dmub_srv *dmub,
- const struct dmub_window *cw2,
- const struct dmub_window *cw3,
- const struct dmub_window *cw4,
- const struct dmub_window *cw5,
- const struct dmub_window *cw6);
+/* Hardware functions. */
bool dmub_dcn21_is_auto_load_done(struct dmub_srv *dmub);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h
index bac4ee8f745f..c1f4030929a4 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h
@@ -34,11 +34,15 @@ struct dmub_srv;
#define BASE(seg) BASE_INNER(seg)
-#define REG_OFFSET(base_index, addr) (BASE(base_index) + addr)
+#define REG_OFFSET(reg_name) (BASE(mm##reg_name##_BASE_IDX) + mm##reg_name)
-#define REG(reg_name) REG_OFFSET(mm ## reg_name ## _BASE_IDX, mm ## reg_name)
+#define FD_SHIFT(reg_name, field) reg_name##__##field##__SHIFT
-#define FD(reg_field) reg_field ## __SHIFT, reg_field ## _MASK
+#define FD_MASK(reg_name, field) reg_name##__##field##_MASK
+
+#define REG(reg) (REGS)->offset.reg
+
+#define FD(reg_field) (REGS)->shift.reg_field, (REGS)->mask.reg_field
#define FN(reg_name, field) FD(reg_name##__##field)
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 5f39166d3c08..85a518bf8a76 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -26,7 +26,7 @@
#include "../inc/dmub_srv.h"
#include "dmub_dcn20.h"
#include "dmub_dcn21.h"
-#include "dmub_trace_buffer.h"
+#include "dmub_fw_meta.h"
#include "os_types.h"
/*
* Note: the DMUB service is standalone. No additional headers should be
@@ -46,6 +46,11 @@
/* Mailbox size */
#define DMUB_MAILBOX_SIZE (DMUB_RB_SIZE)
+/* Default state size if meta is absent. */
+#define DMUB_FW_STATE_SIZE (1024)
+
+/* Default tracebuffer size if meta is absent. */
+#define DMUB_TRACE_BUFFER_SIZE (1024)
/* Number of windows in use. */
#define DMUB_NUM_WINDOWS (DMUB_WINDOW_6_FW_STATE + 1)
@@ -62,6 +67,47 @@ static inline uint32_t dmub_align(uint32_t val, uint32_t factor)
return (val + factor - 1) / factor * factor;
}
+static void dmub_flush_buffer_mem(const struct dmub_fb *fb)
+{
+ const uint8_t *base = (const uint8_t *)fb->cpu_addr;
+ uint8_t buf[64];
+ uint32_t pos, end;
+
+ /**
+ * Read 64-byte chunks since we don't want to store a
+ * large temporary buffer for this purpose.
+ */
+ end = fb->size / sizeof(buf) * sizeof(buf);
+
+ for (pos = 0; pos < end; pos += sizeof(buf))
+ dmub_memcpy(buf, base + pos, sizeof(buf));
+
+ /* Read anything leftover into the buffer. */
+ if (end < fb->size)
+ dmub_memcpy(buf, base + pos, fb->size - end);
+}
+
+static const struct dmub_fw_meta_info *
+dmub_get_fw_meta_info(const uint8_t *fw_bss_data, uint32_t fw_bss_data_size)
+{
+ const union dmub_fw_meta *meta;
+
+ if (fw_bss_data == NULL)
+ return NULL;
+
+ if (fw_bss_data_size < sizeof(union dmub_fw_meta) + DMUB_FW_META_OFFSET)
+ return NULL;
+
+ meta = (const union dmub_fw_meta *)(fw_bss_data + fw_bss_data_size -
+ DMUB_FW_META_OFFSET -
+ sizeof(union dmub_fw_meta));
+
+ if (meta->info.magic_value != DMUB_FW_META_MAGIC)
+ return NULL;
+
+ return &meta->info;
+}
+
static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
{
struct dmub_srv_hw_funcs *funcs = &dmub->hw_funcs;
@@ -69,6 +115,8 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
switch (asic) {
case DMUB_ASIC_DCN20:
case DMUB_ASIC_DCN21:
+ dmub->regs = &dmub_srv_dcn20_regs;
+
funcs->reset = dmub_dcn20_reset;
funcs->reset_release = dmub_dcn20_reset_release;
funcs->backdoor_load = dmub_dcn20_backdoor_load;
@@ -80,8 +128,8 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->is_hw_init = dmub_dcn20_is_hw_init;
if (asic == DMUB_ASIC_DCN21) {
- funcs->backdoor_load = dmub_dcn21_backdoor_load;
- funcs->setup_windows = dmub_dcn21_setup_windows;
+ dmub->regs = &dmub_srv_dcn21_regs;
+
funcs->is_auto_load_done = dmub_dcn21_is_auto_load_done;
funcs->is_phy_init = dmub_dcn21_is_phy_init;
}
@@ -160,6 +208,9 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
struct dmub_region *mail = &out->regions[DMUB_WINDOW_4_MAILBOX];
struct dmub_region *trace_buff = &out->regions[DMUB_WINDOW_5_TRACEBUFF];
struct dmub_region *fw_state = &out->regions[DMUB_WINDOW_6_FW_STATE];
+ const struct dmub_fw_meta_info *fw_info;
+ uint32_t fw_state_size = DMUB_FW_STATE_SIZE;
+ uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE;
if (!dmub->sw_init)
return DMUB_STATUS_INVALID;
@@ -174,6 +225,11 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
data->base = dmub_align(inst->top, 256);
data->top = data->base + params->bss_data_size;
+ /*
+ * All cache windows below should be aligned to the size
+ * of the DMCUB cache line, 64 bytes.
+ */
+
stack->base = dmub_align(data->top, 256);
stack->top = stack->base + DMUB_STACK_SIZE + DMUB_CONTEXT_SIZE;
@@ -183,14 +239,19 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
mail->base = dmub_align(bios->top, 256);
mail->top = mail->base + DMUB_MAILBOX_SIZE;
+ fw_info = dmub_get_fw_meta_info(params->fw_bss_data,
+ params->bss_data_size);
+
+ if (fw_info) {
+ fw_state_size = fw_info->fw_region_size;
+ trace_buffer_size = fw_info->trace_buffer_size;
+ }
+
trace_buff->base = dmub_align(mail->top, 256);
- trace_buff->top = trace_buff->base + TRACE_BUF_SIZE;
+ trace_buff->top = trace_buff->base + dmub_align(trace_buffer_size, 64);
fw_state->base = dmub_align(trace_buff->top, 256);
-
- /* Align firmware state to size of cache line. */
- fw_state->top =
- fw_state->base + dmub_align(sizeof(struct dmub_fw_state), 64);
+ fw_state->top = fw_state->base + dmub_align(fw_state_size, 64);
out->fb_size = dmub_align(fw_state->top, 4096);
@@ -251,6 +312,9 @@ enum dmub_status dmub_srv_is_hw_init(struct dmub_srv *dmub, bool *is_hw_init)
if (!dmub->sw_init)
return DMUB_STATUS_INVALID;
+ if (!dmub->hw_init)
+ return DMUB_STATUS_OK;
+
if (dmub->hw_funcs.is_hw_init)
*is_hw_init = dmub->hw_funcs.is_hw_init(dmub);
@@ -288,6 +352,13 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
cw1.region.base = DMUB_CW1_BASE;
cw1.region.top = cw1.region.base + stack_fb->size - 1;
+ /**
+ * Read back all the instruction memory so we don't hang the
+ * DMCUB when backdoor loading if the write from x86 hasn't been
+ * flushed yet. This only occurs in backdoor loading.
+ */
+ dmub_flush_buffer_mem(inst_fb);
+
if (params->load_inst_const && dmub->hw_funcs.backdoor_load)
dmub->hw_funcs.backdoor_load(dmub, &cw0, &cw1);
}
@@ -347,6 +418,22 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
return DMUB_STATUS_OK;
}
+enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
+{
+ if (!dmub->sw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub->hw_init == false)
+ return DMUB_STATUS_OK;
+
+ if (dmub->hw_funcs.reset)
+ dmub->hw_funcs.reset(dmub);
+
+ dmub->hw_init = false;
+
+ return DMUB_STATUS_OK;
+}
+
enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
const struct dmub_cmd_header *cmd)
{
@@ -364,33 +451,17 @@ enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub)
if (!dmub->hw_init)
return DMUB_STATUS_INVALID;
+ /**
+ * Read back all the queued commands to ensure that they've
+ * been flushed to framebuffer memory. Otherwise DMCUB might
+ * read back stale, fully invalid or partially invalid data.
+ */
+ dmub_rb_flush_pending(&dmub->inbox1_rb);
+
dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt);
return DMUB_STATUS_OK;
}
-enum dmub_status dmub_srv_cmd_submit(struct dmub_srv *dmub,
- const struct dmub_cmd_header *cmd,
- uint32_t timeout_us)
-{
- uint32_t i = 0;
-
- if (!dmub->hw_init)
- return DMUB_STATUS_INVALID;
-
- for (i = 0; i <= timeout_us; ++i) {
- dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
- if (dmub_rb_push_front(&dmub->inbox1_rb, cmd)) {
- dmub->hw_funcs.set_inbox1_wptr(dmub,
- dmub->inbox1_rb.wrpt);
- return DMUB_STATUS_OK;
- }
-
- udelay(1);
- }
-
- return DMUB_STATUS_TIMEOUT;
-}
-
enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub,
uint32_t timeout_us)
{
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 72b659c63aea..a2903985b9e8 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -134,17 +134,31 @@
#define PICASSO_A0 0x41
/* DCN1_01 */
#define RAVEN2_A0 0x81
+#define RAVEN2_15D8_REV_94 0x94
+#define RAVEN2_15D8_REV_95 0x95
#define RAVEN2_15D8_REV_E3 0xE3
#define RAVEN2_15D8_REV_E4 0xE4
+#define RAVEN2_15D8_REV_E9 0xE9
+#define RAVEN2_15D8_REV_EA 0xEA
+#define RAVEN2_15D8_REV_EB 0xEB
#define RAVEN1_F0 0xF0
#define RAVEN_UNKNOWN 0xFF
-
+#ifndef ASICREV_IS_RAVEN
#define ASICREV_IS_RAVEN(eChipRev) ((eChipRev >= RAVEN_A0) && eChipRev < RAVEN_UNKNOWN)
+#endif
+
#define ASICREV_IS_PICASSO(eChipRev) ((eChipRev >= PICASSO_A0) && (eChipRev < RAVEN2_A0))
+#ifndef ASICREV_IS_RAVEN2
#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < RAVEN1_F0))
+#endif
#define ASICREV_IS_RV1_F0(eChipRev) ((eChipRev >= RAVEN1_F0) && (eChipRev < RAVEN_UNKNOWN))
#define ASICREV_IS_DALI(eChipRev) ((eChipRev == RAVEN2_15D8_REV_E3) \
|| (eChipRev == RAVEN2_15D8_REV_E4))
+#define ASICREV_IS_POLLOCK(eChipRev) (eChipRev == RAVEN2_15D8_REV_94 \
+ || eChipRev == RAVEN2_15D8_REV_95 \
+ || eChipRev == RAVEN2_15D8_REV_E9 \
+ || eChipRev == RAVEN2_15D8_REV_EA \
+ || eChipRev == RAVEN2_15D8_REV_EB)
#define FAMILY_RV 142 /* DCN 1*/
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index b52c4d379651..cac09d500fda 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -364,8 +364,10 @@ static struct fixed31_32 translate_from_linear_space(
scratch_2 = dc_fixpt_mul(gamma_of_2,
pow_buffer[pow_buffer_ptr%16]);
- pow_buffer[pow_buffer_ptr%16] = scratch_2;
- pow_buffer_ptr++;
+ if (pow_buffer_ptr != -1) {
+ pow_buffer[pow_buffer_ptr%16] = scratch_2;
+ pow_buffer_ptr++;
+ }
scratch_1 = dc_fixpt_mul(scratch_1, scratch_2);
scratch_1 = dc_fixpt_sub(scratch_1, args->a2);
@@ -1671,129 +1673,6 @@ static bool map_regamma_hw_to_x_user(
#define _EXTRA_POINTS 3
-bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
- const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
- const struct freesync_hdr_tf_params *fs_params)
-{
- struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts;
- struct dividers dividers;
-
- struct pwl_float_data *rgb_user = NULL;
- struct pwl_float_data_ex *rgb_regamma = NULL;
- struct gamma_pixel *axis_x = NULL;
- struct pixel_gamma_point *coeff = NULL;
- enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB;
- bool ret = false;
-
- if (output_tf->type == TF_TYPE_BYPASS)
- return false;
-
- /* we can use hardcoded curve for plain SRGB TF */
- if (output_tf->type == TF_TYPE_PREDEFINED && canRomBeUsed == true &&
- output_tf->tf == TRANSFER_FUNCTION_SRGB) {
- if (ramp == NULL)
- return true;
- if ((ramp->is_identity && ramp->type != GAMMA_CS_TFM_1D) ||
- (!mapUserRamp && ramp->type == GAMMA_RGB_256))
- return true;
- }
-
- output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
-
- if (ramp && ramp->type != GAMMA_CS_TFM_1D &&
- (mapUserRamp || ramp->type != GAMMA_RGB_256)) {
- rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS,
- sizeof(*rgb_user),
- GFP_KERNEL);
- if (!rgb_user)
- goto rgb_user_alloc_fail;
-
- axis_x = kvcalloc(ramp->num_entries + 3, sizeof(*axis_x),
- GFP_KERNEL);
- if (!axis_x)
- goto axis_x_alloc_fail;
-
- dividers.divider1 = dc_fixpt_from_fraction(3, 2);
- dividers.divider2 = dc_fixpt_from_int(2);
- dividers.divider3 = dc_fixpt_from_fraction(5, 2);
-
- build_evenly_distributed_points(
- axis_x,
- ramp->num_entries,
- dividers);
-
- if (ramp->type == GAMMA_RGB_256 && mapUserRamp)
- scale_gamma(rgb_user, ramp, dividers);
- else if (ramp->type == GAMMA_RGB_FLOAT_1024)
- scale_gamma_dx(rgb_user, ramp, dividers);
- }
-
- rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
- sizeof(*rgb_regamma),
- GFP_KERNEL);
- if (!rgb_regamma)
- goto rgb_regamma_alloc_fail;
-
- coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff),
- GFP_KERNEL);
- if (!coeff)
- goto coeff_alloc_fail;
-
- tf = output_tf->tf;
- if (tf == TRANSFER_FUNCTION_PQ) {
- tf_pts->end_exponent = 7;
- tf_pts->x_point_at_y1_red = 125;
- tf_pts->x_point_at_y1_green = 125;
- tf_pts->x_point_at_y1_blue = 125;
-
- build_pq(rgb_regamma,
- MAX_HW_POINTS,
- coordinates_x,
- output_tf->sdr_ref_white_level);
- } else if (tf == TRANSFER_FUNCTION_GAMMA22 &&
- fs_params != NULL && fs_params->skip_tm == 0) {
- build_freesync_hdr(rgb_regamma,
- MAX_HW_POINTS,
- coordinates_x,
- fs_params);
- } else if (tf == TRANSFER_FUNCTION_HLG) {
- build_freesync_hdr(rgb_regamma,
- MAX_HW_POINTS,
- coordinates_x,
- fs_params);
-
- } else {
- tf_pts->end_exponent = 0;
- tf_pts->x_point_at_y1_red = 1;
- tf_pts->x_point_at_y1_green = 1;
- tf_pts->x_point_at_y1_blue = 1;
-
- build_regamma(rgb_regamma,
- MAX_HW_POINTS,
- coordinates_x, tf);
- }
- map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
- coordinates_x, axis_x, rgb_regamma,
- MAX_HW_POINTS, tf_pts,
- (mapUserRamp || (ramp && ramp->type != GAMMA_RGB_256)) &&
- (ramp && ramp->type != GAMMA_CS_TFM_1D));
-
- if (ramp && ramp->type == GAMMA_CS_TFM_1D)
- apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
-
- ret = true;
-
- kvfree(coeff);
-coeff_alloc_fail:
- kvfree(rgb_regamma);
-rgb_regamma_alloc_fail:
- kvfree(axis_x);
-axis_x_alloc_fail:
- kvfree(rgb_user);
-rgb_user_alloc_fail:
- return ret;
-}
-
bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf,
const struct regamma_lut *regamma)
{
@@ -2041,14 +1920,14 @@ rgb_user_alloc_fail:
return ret;
}
-
-bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
+static bool calculate_curve(enum dc_transfer_func_predefined trans,
struct dc_transfer_func_distributed_points *points,
+ struct pwl_float_data_ex *rgb_regamma,
+ const struct freesync_hdr_tf_params *fs_params,
uint32_t sdr_ref_white_level)
{
uint32_t i;
bool ret = false;
- struct pwl_float_data_ex *rgb_regamma = NULL;
if (trans == TRANSFER_FUNCTION_UNITY ||
trans == TRANSFER_FUNCTION_LINEAR) {
@@ -2058,68 +1937,33 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
points->x_point_at_y1_blue = 1;
for (i = 0; i <= MAX_HW_POINTS ; i++) {
- points->red[i] = coordinates_x[i].x;
- points->green[i] = coordinates_x[i].x;
- points->blue[i] = coordinates_x[i].x;
+ rgb_regamma[i].r = coordinates_x[i].x;
+ rgb_regamma[i].g = coordinates_x[i].x;
+ rgb_regamma[i].b = coordinates_x[i].x;
}
+
ret = true;
} else if (trans == TRANSFER_FUNCTION_PQ) {
- rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
- sizeof(*rgb_regamma),
- GFP_KERNEL);
- if (!rgb_regamma)
- goto rgb_regamma_alloc_fail;
points->end_exponent = 7;
points->x_point_at_y1_red = 125;
points->x_point_at_y1_green = 125;
points->x_point_at_y1_blue = 125;
-
build_pq(rgb_regamma,
MAX_HW_POINTS,
coordinates_x,
sdr_ref_white_level);
- for (i = 0; i <= MAX_HW_POINTS ; i++) {
- points->red[i] = rgb_regamma[i].r;
- points->green[i] = rgb_regamma[i].g;
- points->blue[i] = rgb_regamma[i].b;
- }
- ret = true;
- kvfree(rgb_regamma);
- } else if (trans == TRANSFER_FUNCTION_SRGB ||
- trans == TRANSFER_FUNCTION_BT709 ||
- trans == TRANSFER_FUNCTION_GAMMA22 ||
- trans == TRANSFER_FUNCTION_GAMMA24 ||
- trans == TRANSFER_FUNCTION_GAMMA26) {
- rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
- sizeof(*rgb_regamma),
- GFP_KERNEL);
- if (!rgb_regamma)
- goto rgb_regamma_alloc_fail;
- points->end_exponent = 0;
- points->x_point_at_y1_red = 1;
- points->x_point_at_y1_green = 1;
- points->x_point_at_y1_blue = 1;
-
- build_regamma(rgb_regamma,
+ ret = true;
+ } else if (trans == TRANSFER_FUNCTION_GAMMA22 &&
+ fs_params != NULL && fs_params->skip_tm == 0) {
+ build_freesync_hdr(rgb_regamma,
MAX_HW_POINTS,
coordinates_x,
- trans);
- for (i = 0; i <= MAX_HW_POINTS ; i++) {
- points->red[i] = rgb_regamma[i].r;
- points->green[i] = rgb_regamma[i].g;
- points->blue[i] = rgb_regamma[i].b;
- }
- ret = true;
+ fs_params);
- kvfree(rgb_regamma);
+ ret = true;
} else if (trans == TRANSFER_FUNCTION_HLG) {
- rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
- sizeof(*rgb_regamma),
- GFP_KERNEL);
- if (!rgb_regamma)
- goto rgb_regamma_alloc_fail;
points->end_exponent = 4;
points->x_point_at_y1_red = 12;
points->x_point_at_y1_green = 12;
@@ -2129,18 +1973,127 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
MAX_HW_POINTS,
coordinates_x,
80, 1000);
- for (i = 0; i <= MAX_HW_POINTS ; i++) {
- points->red[i] = rgb_regamma[i].r;
- points->green[i] = rgb_regamma[i].g;
- points->blue[i] = rgb_regamma[i].b;
- }
+
+ ret = true;
+ } else {
+ // trans == TRANSFER_FUNCTION_SRGB
+ // trans == TRANSFER_FUNCTION_BT709
+ // trans == TRANSFER_FUNCTION_GAMMA22
+ // trans == TRANSFER_FUNCTION_GAMMA24
+ // trans == TRANSFER_FUNCTION_GAMMA26
+ points->end_exponent = 0;
+ points->x_point_at_y1_red = 1;
+ points->x_point_at_y1_green = 1;
+ points->x_point_at_y1_blue = 1;
+
+ build_regamma(rgb_regamma,
+ MAX_HW_POINTS,
+ coordinates_x,
+ trans);
+
ret = true;
- kvfree(rgb_regamma);
}
-rgb_regamma_alloc_fail:
+
return ret;
}
+bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
+ const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
+ const struct freesync_hdr_tf_params *fs_params)
+{
+ struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts;
+ struct dividers dividers;
+
+ struct pwl_float_data *rgb_user = NULL;
+ struct pwl_float_data_ex *rgb_regamma = NULL;
+ struct gamma_pixel *axis_x = NULL;
+ struct pixel_gamma_point *coeff = NULL;
+ enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB;
+ bool ret = false;
+
+ if (output_tf->type == TF_TYPE_BYPASS)
+ return false;
+
+ /* we can use hardcoded curve for plain SRGB TF */
+ if (output_tf->type == TF_TYPE_PREDEFINED && canRomBeUsed == true &&
+ output_tf->tf == TRANSFER_FUNCTION_SRGB) {
+ if (ramp == NULL)
+ return true;
+ if ((ramp->is_identity && ramp->type != GAMMA_CS_TFM_1D) ||
+ (!mapUserRamp && ramp->type == GAMMA_RGB_256))
+ return true;
+ }
+
+ output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+
+ if (ramp && ramp->type != GAMMA_CS_TFM_1D &&
+ (mapUserRamp || ramp->type != GAMMA_RGB_256)) {
+ rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS,
+ sizeof(*rgb_user),
+ GFP_KERNEL);
+ if (!rgb_user)
+ goto rgb_user_alloc_fail;
+
+ axis_x = kvcalloc(ramp->num_entries + 3, sizeof(*axis_x),
+ GFP_KERNEL);
+ if (!axis_x)
+ goto axis_x_alloc_fail;
+
+ dividers.divider1 = dc_fixpt_from_fraction(3, 2);
+ dividers.divider2 = dc_fixpt_from_int(2);
+ dividers.divider3 = dc_fixpt_from_fraction(5, 2);
+
+ build_evenly_distributed_points(
+ axis_x,
+ ramp->num_entries,
+ dividers);
+
+ if (ramp->type == GAMMA_RGB_256 && mapUserRamp)
+ scale_gamma(rgb_user, ramp, dividers);
+ else if (ramp->type == GAMMA_RGB_FLOAT_1024)
+ scale_gamma_dx(rgb_user, ramp, dividers);
+ }
+
+ rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
+ sizeof(*rgb_regamma),
+ GFP_KERNEL);
+ if (!rgb_regamma)
+ goto rgb_regamma_alloc_fail;
+
+ coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff),
+ GFP_KERNEL);
+ if (!coeff)
+ goto coeff_alloc_fail;
+
+ tf = output_tf->tf;
+
+ ret = calculate_curve(tf,
+ tf_pts,
+ rgb_regamma,
+ fs_params,
+ output_tf->sdr_ref_white_level);
+
+ if (ret) {
+ map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
+ coordinates_x, axis_x, rgb_regamma,
+ MAX_HW_POINTS, tf_pts,
+ (mapUserRamp || (ramp && ramp->type != GAMMA_RGB_256)) &&
+ (ramp && ramp->type != GAMMA_CS_TFM_1D));
+
+ if (ramp && ramp->type == GAMMA_CS_TFM_1D)
+ apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
+ }
+
+ kvfree(coeff);
+coeff_alloc_fail:
+ kvfree(rgb_regamma);
+rgb_regamma_alloc_fail:
+ kvfree(axis_x);
+axis_x_alloc_fail:
+ kvfree(rgb_user);
+rgb_user_alloc_fail:
+ return ret;
+}
bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
struct dc_transfer_func_distributed_points *points)
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
index 44ddea58523a..9994817a9a03 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
@@ -103,10 +103,6 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
bool mod_color_calculate_degamma_params(struct dc_transfer_func *output_tf,
const struct dc_gamma *ramp, bool mapUserRamp);
-bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
- struct dc_transfer_func_distributed_points *points,
- uint32_t sdr_ref_white_level);
-
bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
struct dc_transfer_func_distributed_points *points);
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index a94700940fd6..b9992ebf77a6 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -37,8 +37,8 @@
#define STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME ((1000 / 60) * 65)
/* Number of elements in the render times cache array */
#define RENDER_TIMES_MAX_COUNT 10
-/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */
-#define BTR_EXIT_MARGIN 2000
+/* Threshold to exit/exit BTR (to avoid frequent enter-exits at the lower limit) */
+#define BTR_MAX_MARGIN 2500
/* Threshold to change BTR multiplier (to avoid frequent changes) */
#define BTR_DRIFT_MARGIN 2000
/*Threshold to exit fixed refresh rate*/
@@ -254,24 +254,22 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
unsigned int delta_from_mid_point_in_us_1 = 0xFFFFFFFF;
unsigned int delta_from_mid_point_in_us_2 = 0xFFFFFFFF;
unsigned int frames_to_insert = 0;
- unsigned int min_frame_duration_in_ns = 0;
- unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us;
unsigned int delta_from_mid_point_delta_in_us;
-
- min_frame_duration_in_ns = ((unsigned int) (div64_u64(
- (1000000000ULL * 1000000),
- in_out_vrr->max_refresh_in_uhz)));
+ unsigned int max_render_time_in_us =
+ in_out_vrr->max_duration_in_us - in_out_vrr->btr.margin_in_us;
/* Program BTR */
- if (last_render_time_in_us + BTR_EXIT_MARGIN < max_render_time_in_us) {
+ if ((last_render_time_in_us + in_out_vrr->btr.margin_in_us / 2) < max_render_time_in_us) {
/* Exit Below the Range */
if (in_out_vrr->btr.btr_active) {
in_out_vrr->btr.frame_counter = 0;
in_out_vrr->btr.btr_active = false;
}
- } else if (last_render_time_in_us > max_render_time_in_us) {
+ } else if (last_render_time_in_us > (max_render_time_in_us + in_out_vrr->btr.margin_in_us / 2)) {
/* Enter Below the Range */
- in_out_vrr->btr.btr_active = true;
+ if (!in_out_vrr->btr.btr_active) {
+ in_out_vrr->btr.btr_active = true;
+ }
}
/* BTR set to "not active" so disengage */
@@ -327,7 +325,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
/* Choose number of frames to insert based on how close it
* can get to the mid point of the variable range.
*/
- if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) {
+ if ((frame_time_in_us / mid_point_frames_ceil) > in_out_vrr->min_duration_in_us &&
+ (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2 ||
+ mid_point_frames_floor < 2)) {
frames_to_insert = mid_point_frames_ceil;
delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 -
delta_from_mid_point_in_us_1;
@@ -343,7 +343,7 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
if (in_out_vrr->btr.frames_to_insert != 0 &&
delta_from_mid_point_delta_in_us < BTR_DRIFT_MARGIN) {
if (((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) <
- in_out_vrr->max_duration_in_us) &&
+ max_render_time_in_us) &&
((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) >
in_out_vrr->min_duration_in_us))
frames_to_insert = in_out_vrr->btr.frames_to_insert;
@@ -381,7 +381,7 @@ static void apply_fixed_refresh(struct core_freesync *core_freesync,
bool update = false;
unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us;
- //Compute the exit refresh rate and exit frame duration
+ /* Compute the exit refresh rate and exit frame duration */
unsigned int exit_refresh_rate_in_milli_hz = ((1000000000/max_render_time_in_us)
+ (1000*FIXED_REFRESH_EXIT_MARGIN_IN_HZ));
unsigned int exit_frame_duration_in_us = 1000000000/exit_refresh_rate_in_milli_hz;
@@ -796,6 +796,11 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
refresh_range = in_out_vrr->max_refresh_in_uhz -
in_out_vrr->min_refresh_in_uhz;
+ in_out_vrr->btr.margin_in_us = in_out_vrr->max_duration_in_us -
+ 2 * in_out_vrr->min_duration_in_us;
+ if (in_out_vrr->btr.margin_in_us > BTR_MAX_MARGIN)
+ in_out_vrr->btr.margin_in_us = BTR_MAX_MARGIN;
+
in_out_vrr->supported = true;
}
@@ -811,6 +816,9 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
in_out_vrr->btr.inserted_duration_in_us = 0;
in_out_vrr->btr.frames_to_insert = 0;
in_out_vrr->btr.frame_counter = 0;
+ in_out_vrr->fixed.fixed_active = false;
+ in_out_vrr->fixed.target_refresh_in_uhz = 0;
+
in_out_vrr->btr.mid_point_in_us =
(in_out_vrr->min_duration_in_us +
in_out_vrr->max_duration_in_us) / 2;
@@ -826,6 +834,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
in_out_vrr->adjust.v_total_max = stream->timing.v_total;
} else if (in_out_vrr->state == VRR_STATE_ACTIVE_VARIABLE &&
refresh_range >= MIN_REFRESH_RANGE_IN_US) {
+
in_out_vrr->adjust.v_total_min =
calc_v_total_from_refresh(stream,
in_out_vrr->max_refresh_in_uhz);
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
index f98d3d9ecb6d..af78e4f1be68 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -63,7 +63,7 @@ struct mod_hdcp_transition_input_hdcp1 {
uint8_t hdcp_capable_dp;
uint8_t binfo_read_dp;
uint8_t r0p_available_dp;
- uint8_t link_integiry_check;
+ uint8_t link_integrity_check;
uint8_t reauth_request_check;
uint8_t stream_encryption_dp;
};
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
index 04845e43df15..37670db64855 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -283,8 +283,8 @@ static enum mod_hdcp_status wait_for_ready(struct mod_hdcp *hdcp,
hdcp, "bstatus_read"))
goto out;
if (!mod_hdcp_execute_and_set(check_link_integrity_dp,
- &input->link_integiry_check, &status,
- hdcp, "link_integiry_check"))
+ &input->link_integrity_check, &status,
+ hdcp, "link_integrity_check"))
goto out;
if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
&input->reauth_request_check, &status,
@@ -431,8 +431,8 @@ static enum mod_hdcp_status authenticated_dp(struct mod_hdcp *hdcp,
hdcp, "bstatus_read"))
goto out;
if (!mod_hdcp_execute_and_set(check_link_integrity_dp,
- &input->link_integiry_check, &status,
- hdcp, "link_integiry_check"))
+ &input->link_integrity_check, &status,
+ hdcp, "link_integrity_check"))
goto out;
if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
&input->reauth_request_check, &status,
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
index 136b8011ff3f..76edcbe51f71 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
@@ -67,11 +67,19 @@ enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp,
break;
case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER:
if (input->bcaps_read != PASS ||
- input->r0p_read != PASS ||
- input->rx_validation != PASS ||
- (!conn->is_repeater && input->encryption != PASS)) {
+ input->r0p_read != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->rx_validation != PASS) {
/* 1A-06: consider invalid r0' a failure */
/* 1A-08: consider bksv listed in SRM a failure */
+ /*
+ * some slow RX will fail rx validation when it is
+ * not ready. give it more time to react before retry.
+ */
+ fail_and_restart_in_ms(1000, &status, output);
+ break;
+ } else if (!conn->is_repeater && input->encryption != PASS) {
fail_and_restart_in_ms(0, &status, output);
break;
}
@@ -212,7 +220,11 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
* after 3 attempts.
* 1A-08: consider bksv listed in SRM a failure
*/
- fail_and_restart_in_ms(0, &status, output);
+ /*
+ * some slow RX will fail rx validation when it is
+ * not ready. give it more time to react before retry.
+ */
+ fail_and_restart_in_ms(1000, &status, output);
}
break;
} else if ((!conn->is_repeater && input->encryption != PASS) ||
@@ -229,7 +241,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
}
break;
case D1_A4_AUTHENTICATED:
- if (input->link_integiry_check != PASS ||
+ if (input->link_integrity_check != PASS ||
input->reauth_request_check != PASS) {
/* 1A-07: restart hdcp on a link integrity failure */
fail_and_restart_in_ms(0, &status, output);
@@ -237,7 +249,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
}
break;
case D1_A6_WAIT_FOR_READY:
- if (input->link_integiry_check == FAIL ||
+ if (input->link_integrity_check == FAIL ||
input->reauth_request_check == FAIL) {
fail_and_restart_in_ms(0, &status, output);
break;
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
index e8043c903a84..8cae3e3aacd5 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
@@ -114,7 +114,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp,
if (event_ctx->event ==
MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
/* 1A-11-3: consider h' timeout a failure */
- fail_and_restart_in_ms(0, &status, output);
+ fail_and_restart_in_ms(1000, &status, output);
} else {
/* continue h' polling */
callback_in_ms(100, output);
@@ -166,7 +166,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp,
if (event_ctx->event ==
MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
/* 1A-11-2: consider h' timeout a failure */
- fail_and_restart_in_ms(0, &status, output);
+ fail_and_restart_in_ms(1000, &status, output);
} else {
/* continue h' polling */
callback_in_ms(20, output);
@@ -439,7 +439,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
if (event_ctx->event ==
MOD_HDCP_EVENT_WATCHDOG_TIMEOUT)
/* 1A-10-3: consider h' timeout a failure */
- fail_and_restart_in_ms(0, &status, output);
+ fail_and_restart_in_ms(1000, &status, output);
else
increment_stay_counter(hdcp);
break;
@@ -484,7 +484,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
if (event_ctx->event ==
MOD_HDCP_EVENT_WATCHDOG_TIMEOUT)
/* 1A-10-2: consider h' timeout a failure */
- fail_and_restart_in_ms(0, &status, output);
+ fail_and_restart_in_ms(1000, &status, output);
else
increment_stay_counter(hdcp);
break;
@@ -630,7 +630,10 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
break;
} else if (input->prepare_stream_manage != PASS ||
input->stream_manage_write != PASS) {
- fail_and_restart_in_ms(0, &status, output);
+ if (event_ctx->event == MOD_HDCP_EVENT_CALLBACK)
+ fail_and_restart_in_ms(0, &status, output);
+ else
+ increment_stay_counter(hdcp);
break;
}
callback_in_ms(100, output);
@@ -655,10 +658,12 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
*/
if (hdcp->auth.count.stream_management_retry_count > 10) {
fail_and_restart_in_ms(0, &status, output);
- } else {
+ } else if (event_ctx->event == MOD_HDCP_EVENT_CALLBACK) {
hdcp->auth.count.stream_management_retry_count++;
callback_in_ms(0, output);
set_state_id(hdcp, output, D2_A9_SEND_STREAM_MANAGEMENT);
+ } else {
+ increment_stay_counter(hdcp);
}
break;
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index ef4eb55f4474..7911dc157d5a 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -145,10 +145,11 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+ hdcp->auth.id = hdcp_cmd->out_msg.hdcp1_create_session.session_handle;
+
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
return MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE;
- hdcp->auth.id = hdcp_cmd->out_msg.hdcp1_create_session.session_handle;
hdcp->auth.msg.hdcp1.ainfo = hdcp_cmd->out_msg.hdcp1_create_session.ainfo_primary;
memcpy(hdcp->auth.msg.hdcp1.aksv, hdcp_cmd->out_msg.hdcp1_create_session.aksv_primary,
sizeof(hdcp->auth.msg.hdcp1.aksv));
@@ -510,7 +511,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE;
+ return MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE;
if (msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
return MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE;
@@ -794,7 +795,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp)
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- return (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) &&
+ return (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS) &&
(msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
? MOD_HDCP_STATUS_SUCCESS
: MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE;
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
index dc187844d10b..dbe7835aabcf 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
@@ -92,6 +92,7 @@ struct mod_vrr_params_btr {
uint32_t inserted_duration_in_us;
uint32_t frames_to_insert;
uint32_t frame_counter;
+ uint32_t margin_in_us;
};
struct mod_vrr_params_fixed_refresh {
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 4e2f615c3566..e75a4bb94488 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -662,7 +662,11 @@ bool dmcu_load_iram(struct dmcu *dmcu,
memset(&ram_table, 0, sizeof(ram_table));
- if (dmcu->dmcu_version.abm_version == 0x23) {
+ if (dmcu->dmcu_version.abm_version == 0x24) {
+ fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params);
+ result = dmcu->funcs->load_iram(
+ dmcu, 0, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2);
+ } else if (dmcu->dmcu_version.abm_version == 0x23) {
fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params);
result = dmcu->funcs->load_iram(
@@ -687,3 +691,4 @@ bool dmcu_load_iram(struct dmcu *dmcu,
return result;
}
+
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_offset.h
index cff8f91555d3..e9b2bd84cfed 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_offset.h
@@ -8134,6 +8134,10 @@
#define mmMPC_OUT5_CSC_C33_C34_B 0x1604
#define mmMPC_OUT5_CSC_C33_C34_B_BASE_IDX 2
+#define mmMPC_OCSC_TEST_DEBUG_INDEX 0x163b
+#define mmMPC_OCSC_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmMPC_OCSC_TEST_DEBUG_DATA_BASE_IDX 2
+#define mmMPC_OCSC_TEST_DEBUG_DATA 0x163c
// addressBlock: dce_dc_mpc_mpc_dcperfmon_dc_perfmon_dispdec
// base address: 0x5964
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_sh_mask.h
index 10c83fecd147..dc8ce7aaa0cf 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_sh_mask.h
@@ -28263,7 +28263,14 @@
#define MPC_OUT5_CSC_C33_C34_B__MPC_OCSC_C34_B__SHIFT 0x10
#define MPC_OUT5_CSC_C33_C34_B__MPC_OCSC_C33_B_MASK 0x0000FFFFL
#define MPC_OUT5_CSC_C33_C34_B__MPC_OCSC_C34_B_MASK 0xFFFF0000L
-
+//MPC_OCSC_TEST_DEBUG_INDEX
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_INDEX__SHIFT 0x0
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+//MPC_OCSC_TEST_DEBUG_DATA
+#define MPC_OCSC_TEST_DEBUG_DATA__MPC_OCSC_TEST_DEBUG_DATA__SHIFT 0x0
+#define MPC_OCSC_TEST_DEBUG_DATA__MPC_OCSC_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
// addressBlock: dce_dc_mpc_mpc_dcperfmon_dc_perfmon_dispdec
//DC_PERFMON17_PERFCOUNTER_CNTL
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h
index eddf83ec1c39..7cd0ee61c030 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h
@@ -7103,7 +7103,10 @@
#define mmMPC_OUT3_CSC_C31_C32_B_BASE_IDX 2
#define mmMPC_OUT3_CSC_C33_C34_B 0x15ea
#define mmMPC_OUT3_CSC_C33_C34_B_BASE_IDX 2
-
+#define mmMPC_OCSC_TEST_DEBUG_INDEX 0x163b
+#define mmMPC_OCSC_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmMPC_OCSC_TEST_DEBUG_DATA_BASE_IDX 2
+#define mmMPC_OCSC_TEST_DEBUG_DATA 0x163c
// addressBlock: dce_dc_mpc_mpc_dcperfmon_dc_perfmon_dispdec
// base address: 0x5964
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_sh_mask.h
index faa0e76e32b4..2f780aefc722 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_sh_mask.h
@@ -56634,5 +56634,13 @@
#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
#define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+//MPC_OCSC_TEST_DEBUG_INDEX
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_INDEX__SHIFT 0x0
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+//MPC_OCSC_TEST_DEBUG_DATA
+#define MPC_OCSC_TEST_DEBUG_DATA__MPC_OCSC_TEST_DEBUG_DATA__SHIFT 0x0
+#define MPC_OCSC_TEST_DEBUG_DATA__MPC_OCSC_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
index c2bd25589e84..bb2c9c7a18df 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
@@ -27,6 +27,9 @@
#define mmDF_PIE_AON0_DfGlobalClkGater 0x00fc
#define mmDF_PIE_AON0_DfGlobalClkGater_BASE_IDX 0
+#define mmDF_CS_UMC_AON0_DfGlobalCtrl 0x00fe
+#define mmDF_CS_UMC_AON0_DfGlobalCtrl_BASE_IDX 0
+
#define mmDF_CS_UMC_AON0_DramBaseAddress0 0x0044
#define mmDF_CS_UMC_AON0_DramBaseAddress0_BASE_IDX 0
@@ -38,6 +41,14 @@
#define smnPerfMonCtlHi2 0x01d464UL
#define smnPerfMonCtlLo3 0x01d470UL
#define smnPerfMonCtlHi3 0x01d474UL
+#define smnPerfMonCtlLo4 0x01d880UL
+#define smnPerfMonCtlHi4 0x01d884UL
+#define smnPerfMonCtlLo5 0x01d888UL
+#define smnPerfMonCtlHi5 0x01d88cUL
+#define smnPerfMonCtlLo6 0x01d890UL
+#define smnPerfMonCtlHi6 0x01d894UL
+#define smnPerfMonCtlLo7 0x01d898UL
+#define smnPerfMonCtlHi7 0x01d89cUL
#define smnPerfMonCtrLo0 0x01d448UL
#define smnPerfMonCtrHi0 0x01d44cUL
@@ -47,9 +58,20 @@
#define smnPerfMonCtrHi2 0x01d46cUL
#define smnPerfMonCtrLo3 0x01d478UL
#define smnPerfMonCtrHi3 0x01d47cUL
+#define smnPerfMonCtrLo4 0x01d790UL
+#define smnPerfMonCtrHi4 0x01d794UL
+#define smnPerfMonCtrLo5 0x01d798UL
+#define smnPerfMonCtrHi5 0x01d79cUL
+#define smnPerfMonCtrLo6 0x01d7a0UL
+#define smnPerfMonCtrHi6 0x01d7a4UL
+#define smnPerfMonCtrLo7 0x01d7a8UL
+#define smnPerfMonCtrHi7 0x01d7acUL
#define smnDF_PIE_AON_FabricIndirectConfigAccessAddress3 0x1d05cUL
#define smnDF_PIE_AON_FabricIndirectConfigAccessDataLo3 0x1d098UL
#define smnDF_PIE_AON_FabricIndirectConfigAccessDataHi3 0x1d09cUL
+#define smnDF_CS_UMC_AON0_DramBaseAddress0 0x1c110UL
+#define smnDF_CS_UMC_AON0_DramLimitAddress0 0x1c114UL
+
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h
index 06fac509e987..7afa87c7ff54 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h
@@ -33,6 +33,14 @@
#define DF_PIE_AON0_DfGlobalClkGater__MGCGMode__SHIFT 0x0
#define DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK 0x0000000FL
+/* DF_CS_UMC_AON0_DfGlobalCtrl */
+#define DF_CS_UMC_AON0_DfGlobalCtrl__GlbHashIntlvCtl64K__SHIFT 0x14
+#define DF_CS_UMC_AON0_DfGlobalCtrl__GlbHashIntlvCtl2M__SHIFT 0x15
+#define DF_CS_UMC_AON0_DfGlobalCtrl__GlbHashIntlvCtl1G__SHIFT 0x16
+#define DF_CS_UMC_AON0_DfGlobalCtrl__GlbHashIntlvCtl64K_MASK 0x00100000L
+#define DF_CS_UMC_AON0_DfGlobalCtrl__GlbHashIntlvCtl2M_MASK 0x00200000L
+#define DF_CS_UMC_AON0_DfGlobalCtrl__GlbHashIntlvCtl1G_MASK 0x00400000L
+
/* DF_CS_AON0_DramBaseAddress0 */
#define DF_CS_UMC_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
#define DF_CS_UMC_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
@@ -45,4 +53,12 @@
#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000E00L
#define DF_CS_UMC_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
+//DF_CS_UMC_AON0_DramLimitAddress0
+#define DF_CS_UMC_AON0_DramLimitAddress0__DstFabricID__SHIFT 0x0
+#define DF_CS_UMC_AON0_DramLimitAddress0__AllowReqIO__SHIFT 0xa
+#define DF_CS_UMC_AON0_DramLimitAddress0__DramLimitAddr__SHIFT 0xc
+#define DF_CS_UMC_AON0_DramLimitAddress0__DstFabricID_MASK 0x000003FFL
+#define DF_CS_UMC_AON0_DramLimitAddress0__AllowReqIO_MASK 0x00000400L
+#define DF_CS_UMC_AON0_DramLimitAddress0__DramLimitAddr_MASK 0xFFFFF000L
+
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_offset.h
new file mode 100644
index 000000000000..36ae5b7fe88e
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_offset.h
@@ -0,0 +1,647 @@
+/*
+ * Copyright (C) 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _dpcs_2_0_0_OFFSET_HEADER
+#define _dpcs_2_0_0_OFFSET_HEADER
+
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx0_dispdec
+// base address: 0x0
+#define mmDPCSTX0_DPCSTX_TX_CLOCK_CNTL 0x2928
+#define mmDPCSTX0_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_TX_CNTL 0x2929
+#define mmDPCSTX0_DPCSTX_TX_CNTL_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_CBUS_CNTL 0x292a
+#define mmDPCSTX0_DPCSTX_CBUS_CNTL_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_INTERRUPT_CNTL 0x292b
+#define mmDPCSTX0_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_PLL_UPDATE_ADDR 0x292c
+#define mmDPCSTX0_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_PLL_UPDATE_DATA 0x292d
+#define mmDPCSTX0_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_DEBUG_CONFIG 0x292e
+#define mmDPCSTX0_DPCSTX_DEBUG_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx0_dispdec
+// base address: 0x0
+#define mmRDPCSTX0_RDPCSTX_CNTL 0x2930
+#define mmRDPCSTX0_RDPCSTX_CNTL_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_CLOCK_CNTL 0x2931
+#define mmRDPCSTX0_RDPCSTX_CLOCK_CNTL_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_INTERRUPT_CONTROL 0x2932
+#define mmRDPCSTX0_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PLL_UPDATE_DATA 0x2933
+#define mmRDPCSTX0_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmRDPCSTX0_RDPCS_TX_CR_ADDR 0x2934
+#define mmRDPCSTX0_RDPCS_TX_CR_ADDR_BASE_IDX 2
+#define mmRDPCSTX0_RDPCS_TX_CR_DATA 0x2935
+#define mmRDPCSTX0_RDPCS_TX_CR_DATA_BASE_IDX 2
+#define mmRDPCSTX0_RDPCS_TX_SRAM_CNTL 0x2936
+#define mmRDPCSTX0_RDPCS_TX_SRAM_CNTL_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_MEM_POWER_CTRL 0x2937
+#define mmRDPCSTX0_RDPCSTX_MEM_POWER_CTRL_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_MEM_POWER_CTRL2 0x2938
+#define mmRDPCSTX0_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_SCRATCH 0x2939
+#define mmRDPCSTX0_RDPCSTX_SCRATCH_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x293c
+#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_DEBUG_CONFIG 0x293d
+#define mmRDPCSTX0_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL0 0x2940
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL0_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL1 0x2941
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL1_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL2 0x2942
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL2_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL3 0x2943
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL4 0x2944
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL4_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL5 0x2945
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL5_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL6 0x2946
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL7 0x2947
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL7_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL8 0x2948
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL8_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL9 0x2949
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL9_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL10 0x294a
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL10_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL11 0x294b
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL11_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL12 0x294c
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL12_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL13 0x294d
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL13_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL14 0x294e
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL14_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE0 0x294f
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE0_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE1 0x2950
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE1_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE2 0x2951
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE2_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE3 0x2952
+#define mmRDPCSTX0_RDPCSTX_PHY_FUSE3_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_RX_LD_VAL 0x2953
+#define mmRDPCSTX0_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3 0x2954
+#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6 0x2955
+#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_DPALT_CONTROL_REG 0x2956
+#define mmRDPCSTX0_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcssys_cr0_dispdec
+// base address: 0x0
+#define mmDPCSSYS_CR0_DPCSSYS_CR_ADDR 0x2934
+#define mmDPCSSYS_CR0_DPCSSYS_CR_ADDR_BASE_IDX 2
+#define mmDPCSSYS_CR0_DPCSSYS_CR_DATA 0x2935
+#define mmDPCSSYS_CR0_DPCSSYS_CR_DATA_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx1_dispdec
+// base address: 0x360
+#define mmDPCSTX1_DPCSTX_TX_CLOCK_CNTL 0x2a00
+#define mmDPCSTX1_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_TX_CNTL 0x2a01
+#define mmDPCSTX1_DPCSTX_TX_CNTL_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_CBUS_CNTL 0x2a02
+#define mmDPCSTX1_DPCSTX_CBUS_CNTL_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_INTERRUPT_CNTL 0x2a03
+#define mmDPCSTX1_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_PLL_UPDATE_ADDR 0x2a04
+#define mmDPCSTX1_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_PLL_UPDATE_DATA 0x2a05
+#define mmDPCSTX1_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_DEBUG_CONFIG 0x2a06
+#define mmDPCSTX1_DPCSTX_DEBUG_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx1_dispdec
+// base address: 0x360
+#define mmRDPCSTX1_RDPCSTX_CNTL 0x2a08
+#define mmRDPCSTX1_RDPCSTX_CNTL_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_CLOCK_CNTL 0x2a09
+#define mmRDPCSTX1_RDPCSTX_CLOCK_CNTL_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_INTERRUPT_CONTROL 0x2a0a
+#define mmRDPCSTX1_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PLL_UPDATE_DATA 0x2a0b
+#define mmRDPCSTX1_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmRDPCSTX1_RDPCS_TX_CR_ADDR 0x2a0c
+#define mmRDPCSTX1_RDPCS_TX_CR_ADDR_BASE_IDX 2
+#define mmRDPCSTX1_RDPCS_TX_CR_DATA 0x2a0d
+#define mmRDPCSTX1_RDPCS_TX_CR_DATA_BASE_IDX 2
+#define mmRDPCSTX1_RDPCS_TX_SRAM_CNTL 0x2a0e
+#define mmRDPCSTX1_RDPCS_TX_SRAM_CNTL_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_MEM_POWER_CTRL 0x2a0f
+#define mmRDPCSTX1_RDPCSTX_MEM_POWER_CTRL_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_MEM_POWER_CTRL2 0x2a10
+#define mmRDPCSTX1_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_SCRATCH 0x2a11
+#define mmRDPCSTX1_RDPCSTX_SCRATCH_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2a14
+#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_DEBUG_CONFIG 0x2a15
+#define mmRDPCSTX1_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL0 0x2a18
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL0_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL1 0x2a19
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL1_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL2 0x2a1a
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL2_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL3 0x2a1b
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL4 0x2a1c
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL4_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL5 0x2a1d
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL5_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL6 0x2a1e
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL7 0x2a1f
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL7_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL8 0x2a20
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL8_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL9 0x2a21
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL9_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL10 0x2a22
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL10_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL11 0x2a23
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL11_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL12 0x2a24
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL12_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL13 0x2a25
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL13_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL14 0x2a26
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL14_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE0 0x2a27
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE0_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE1 0x2a28
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE1_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE2 0x2a29
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE2_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE3 0x2a2a
+#define mmRDPCSTX1_RDPCSTX_PHY_FUSE3_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_RX_LD_VAL 0x2a2b
+#define mmRDPCSTX1_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3 0x2a2c
+#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6 0x2a2d
+#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_DPALT_CONTROL_REG 0x2a2e
+#define mmRDPCSTX1_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcssys_cr1_dispdec
+// base address: 0x360
+#define mmDPCSSYS_CR1_DPCSSYS_CR_ADDR 0x2a0c
+#define mmDPCSSYS_CR1_DPCSSYS_CR_ADDR_BASE_IDX 2
+#define mmDPCSSYS_CR1_DPCSSYS_CR_DATA 0x2a0d
+#define mmDPCSSYS_CR1_DPCSSYS_CR_DATA_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx2_dispdec
+// base address: 0x6c0
+#define mmDPCSTX2_DPCSTX_TX_CLOCK_CNTL 0x2ad8
+#define mmDPCSTX2_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2
+#define mmDPCSTX2_DPCSTX_TX_CNTL 0x2ad9
+#define mmDPCSTX2_DPCSTX_TX_CNTL_BASE_IDX 2
+#define mmDPCSTX2_DPCSTX_CBUS_CNTL 0x2ada
+#define mmDPCSTX2_DPCSTX_CBUS_CNTL_BASE_IDX 2
+#define mmDPCSTX2_DPCSTX_INTERRUPT_CNTL 0x2adb
+#define mmDPCSTX2_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDPCSTX2_DPCSTX_PLL_UPDATE_ADDR 0x2adc
+#define mmDPCSTX2_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
+#define mmDPCSTX2_DPCSTX_PLL_UPDATE_DATA 0x2add
+#define mmDPCSTX2_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmDPCSTX2_DPCSTX_DEBUG_CONFIG 0x2ade
+#define mmDPCSTX2_DPCSTX_DEBUG_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx2_dispdec
+// base address: 0x6c0
+#define mmRDPCSTX2_RDPCSTX_CNTL 0x2ae0
+#define mmRDPCSTX2_RDPCSTX_CNTL_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_CLOCK_CNTL 0x2ae1
+#define mmRDPCSTX2_RDPCSTX_CLOCK_CNTL_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_INTERRUPT_CONTROL 0x2ae2
+#define mmRDPCSTX2_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PLL_UPDATE_DATA 0x2ae3
+#define mmRDPCSTX2_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmRDPCSTX2_RDPCS_TX_CR_ADDR 0x2ae4
+#define mmRDPCSTX2_RDPCS_TX_CR_ADDR_BASE_IDX 2
+#define mmRDPCSTX2_RDPCS_TX_CR_DATA 0x2ae5
+#define mmRDPCSTX2_RDPCS_TX_CR_DATA_BASE_IDX 2
+#define mmRDPCSTX2_RDPCS_TX_SRAM_CNTL 0x2ae6
+#define mmRDPCSTX2_RDPCS_TX_SRAM_CNTL_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_MEM_POWER_CTRL 0x2ae7
+#define mmRDPCSTX2_RDPCSTX_MEM_POWER_CTRL_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_MEM_POWER_CTRL2 0x2ae8
+#define mmRDPCSTX2_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_SCRATCH 0x2ae9
+#define mmRDPCSTX2_RDPCSTX_SCRATCH_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2aec
+#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_DEBUG_CONFIG 0x2aed
+#define mmRDPCSTX2_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL0 0x2af0
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL0_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL1 0x2af1
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL1_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL2 0x2af2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL2_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL3 0x2af3
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL4 0x2af4
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL4_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL5 0x2af5
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL5_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL6 0x2af6
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL7 0x2af7
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL7_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL8 0x2af8
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL8_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL9 0x2af9
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL9_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL10 0x2afa
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL10_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL11 0x2afb
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL11_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL12 0x2afc
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL12_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL13 0x2afd
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL13_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL14 0x2afe
+#define mmRDPCSTX2_RDPCSTX_PHY_CNTL14_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE0 0x2aff
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE0_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE1 0x2b00
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE1_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE2 0x2b01
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE2_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE3 0x2b02
+#define mmRDPCSTX2_RDPCSTX_PHY_FUSE3_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_PHY_RX_LD_VAL 0x2b03
+#define mmRDPCSTX2_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3 0x2b04
+#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6 0x2b05
+#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX2_RDPCSTX_DPALT_CONTROL_REG 0x2b06
+#define mmRDPCSTX2_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcssys_cr2_dispdec
+// base address: 0x6c0
+#define mmDPCSSYS_CR2_DPCSSYS_CR_ADDR 0x2ae4
+#define mmDPCSSYS_CR2_DPCSSYS_CR_ADDR_BASE_IDX 2
+#define mmDPCSSYS_CR2_DPCSSYS_CR_DATA 0x2ae5
+#define mmDPCSSYS_CR2_DPCSSYS_CR_DATA_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx3_dispdec
+// base address: 0xa20
+#define mmDPCSTX3_DPCSTX_TX_CLOCK_CNTL 0x2bb0
+#define mmDPCSTX3_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2
+#define mmDPCSTX3_DPCSTX_TX_CNTL 0x2bb1
+#define mmDPCSTX3_DPCSTX_TX_CNTL_BASE_IDX 2
+#define mmDPCSTX3_DPCSTX_CBUS_CNTL 0x2bb2
+#define mmDPCSTX3_DPCSTX_CBUS_CNTL_BASE_IDX 2
+#define mmDPCSTX3_DPCSTX_INTERRUPT_CNTL 0x2bb3
+#define mmDPCSTX3_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDPCSTX3_DPCSTX_PLL_UPDATE_ADDR 0x2bb4
+#define mmDPCSTX3_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
+#define mmDPCSTX3_DPCSTX_PLL_UPDATE_DATA 0x2bb5
+#define mmDPCSTX3_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmDPCSTX3_DPCSTX_DEBUG_CONFIG 0x2bb6
+#define mmDPCSTX3_DPCSTX_DEBUG_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx3_dispdec
+// base address: 0xa20
+#define mmRDPCSTX3_RDPCSTX_CNTL 0x2bb8
+#define mmRDPCSTX3_RDPCSTX_CNTL_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_CLOCK_CNTL 0x2bb9
+#define mmRDPCSTX3_RDPCSTX_CLOCK_CNTL_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_INTERRUPT_CONTROL 0x2bba
+#define mmRDPCSTX3_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PLL_UPDATE_DATA 0x2bbb
+#define mmRDPCSTX3_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmRDPCSTX3_RDPCS_TX_CR_ADDR 0x2bbc
+#define mmRDPCSTX3_RDPCS_TX_CR_ADDR_BASE_IDX 2
+#define mmRDPCSTX3_RDPCS_TX_CR_DATA 0x2bbd
+#define mmRDPCSTX3_RDPCS_TX_CR_DATA_BASE_IDX 2
+#define mmRDPCSTX3_RDPCS_TX_SRAM_CNTL 0x2bbe
+#define mmRDPCSTX3_RDPCS_TX_SRAM_CNTL_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_MEM_POWER_CTRL 0x2bbf
+#define mmRDPCSTX3_RDPCSTX_MEM_POWER_CTRL_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_MEM_POWER_CTRL2 0x2bc0
+#define mmRDPCSTX3_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_SCRATCH 0x2bc1
+#define mmRDPCSTX3_RDPCSTX_SCRATCH_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2bc4
+#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_DEBUG_CONFIG 0x2bc5
+#define mmRDPCSTX3_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL0 0x2bc8
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL0_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL1 0x2bc9
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL1_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL2 0x2bca
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL2_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL3 0x2bcb
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL4 0x2bcc
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL4_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL5 0x2bcd
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL5_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL6 0x2bce
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL7 0x2bcf
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL7_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL8 0x2bd0
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL8_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL9 0x2bd1
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL9_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL10 0x2bd2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL10_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL11 0x2bd3
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL11_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL12 0x2bd4
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL12_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL13 0x2bd5
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL13_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL14 0x2bd6
+#define mmRDPCSTX3_RDPCSTX_PHY_CNTL14_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE0 0x2bd7
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE0_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE1 0x2bd8
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE1_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE2 0x2bd9
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE2_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE3 0x2bda
+#define mmRDPCSTX3_RDPCSTX_PHY_FUSE3_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_PHY_RX_LD_VAL 0x2bdb
+#define mmRDPCSTX3_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3 0x2bdc
+#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6 0x2bdd
+#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX3_RDPCSTX_DPALT_CONTROL_REG 0x2bde
+#define mmRDPCSTX3_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcssys_cr3_dispdec
+// base address: 0xa20
+#define mmDPCSSYS_CR3_DPCSSYS_CR_ADDR 0x2bbc
+#define mmDPCSSYS_CR3_DPCSSYS_CR_ADDR_BASE_IDX 2
+#define mmDPCSSYS_CR3_DPCSSYS_CR_DATA 0x2bbd
+#define mmDPCSSYS_CR3_DPCSSYS_CR_DATA_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_dpcsrx_dispdec
+// base address: 0x0
+#define mmDPCSRX_PHY_CNTL 0x2c76
+#define mmDPCSRX_PHY_CNTL_BASE_IDX 2
+#define mmDPCSRX_RX_CLOCK_CNTL 0x2c78
+#define mmDPCSRX_RX_CLOCK_CNTL_BASE_IDX 2
+#define mmDPCSRX_RX_CNTL 0x2c7a
+#define mmDPCSRX_RX_CNTL_BASE_IDX 2
+#define mmDPCSRX_CBUS_CNTL 0x2c7b
+#define mmDPCSRX_CBUS_CNTL_BASE_IDX 2
+#define mmDPCSRX_REG_ERROR_STATUS 0x2c7c
+#define mmDPCSRX_REG_ERROR_STATUS_BASE_IDX 2
+#define mmDPCSRX_RX_ERROR_STATUS 0x2c7d
+#define mmDPCSRX_RX_ERROR_STATUS_BASE_IDX 2
+#define mmDPCSRX_INDEX_MODE_ADDR 0x2c80
+#define mmDPCSRX_INDEX_MODE_ADDR_BASE_IDX 2
+#define mmDPCSRX_INDEX_MODE_DATA 0x2c81
+#define mmDPCSRX_INDEX_MODE_DATA_BASE_IDX 2
+#define mmDPCSRX_DEBUG_CONFIG 0x2c82
+#define mmDPCSRX_DEBUG_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx4_dispdec
+// base address: 0xd80
+#define mmDPCSTX4_DPCSTX_TX_CLOCK_CNTL 0x2c88
+#define mmDPCSTX4_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2
+#define mmDPCSTX4_DPCSTX_TX_CNTL 0x2c89
+#define mmDPCSTX4_DPCSTX_TX_CNTL_BASE_IDX 2
+#define mmDPCSTX4_DPCSTX_CBUS_CNTL 0x2c8a
+#define mmDPCSTX4_DPCSTX_CBUS_CNTL_BASE_IDX 2
+#define mmDPCSTX4_DPCSTX_INTERRUPT_CNTL 0x2c8b
+#define mmDPCSTX4_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDPCSTX4_DPCSTX_PLL_UPDATE_ADDR 0x2c8c
+#define mmDPCSTX4_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
+#define mmDPCSTX4_DPCSTX_PLL_UPDATE_DATA 0x2c8d
+#define mmDPCSTX4_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmDPCSTX4_DPCSTX_DEBUG_CONFIG 0x2c8e
+#define mmDPCSTX4_DPCSTX_DEBUG_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx4_dispdec
+// base address: 0xd80
+#define mmRDPCSTX4_RDPCSTX_CNTL 0x2c90
+#define mmRDPCSTX4_RDPCSTX_CNTL_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_CLOCK_CNTL 0x2c91
+#define mmRDPCSTX4_RDPCSTX_CLOCK_CNTL_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_INTERRUPT_CONTROL 0x2c92
+#define mmRDPCSTX4_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PLL_UPDATE_DATA 0x2c93
+#define mmRDPCSTX4_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmRDPCSTX4_RDPCS_TX_CR_ADDR 0x2c94
+#define mmRDPCSTX4_RDPCS_TX_CR_ADDR_BASE_IDX 2
+#define mmRDPCSTX4_RDPCS_TX_CR_DATA 0x2c95
+#define mmRDPCSTX4_RDPCS_TX_CR_DATA_BASE_IDX 2
+#define mmRDPCSTX4_RDPCS_TX_SRAM_CNTL 0x2c96
+#define mmRDPCSTX4_RDPCS_TX_SRAM_CNTL_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_MEM_POWER_CTRL 0x2c97
+#define mmRDPCSTX4_RDPCSTX_MEM_POWER_CTRL_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_MEM_POWER_CTRL2 0x2c98
+#define mmRDPCSTX4_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_SCRATCH 0x2c99
+#define mmRDPCSTX4_RDPCSTX_SCRATCH_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2c9c
+#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_DEBUG_CONFIG 0x2c9d
+#define mmRDPCSTX4_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL0 0x2ca0
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL0_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL1 0x2ca1
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL1_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL2 0x2ca2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL2_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL3 0x2ca3
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL4 0x2ca4
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL4_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL5 0x2ca5
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL5_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL6 0x2ca6
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL7 0x2ca7
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL7_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL8 0x2ca8
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL8_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL9 0x2ca9
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL9_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL10 0x2caa
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL10_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL11 0x2cab
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL11_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL12 0x2cac
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL12_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL13 0x2cad
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL13_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL14 0x2cae
+#define mmRDPCSTX4_RDPCSTX_PHY_CNTL14_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE0 0x2caf
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE0_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE1 0x2cb0
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE1_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE2 0x2cb1
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE2_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE3 0x2cb2
+#define mmRDPCSTX4_RDPCSTX_PHY_FUSE3_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_PHY_RX_LD_VAL 0x2cb3
+#define mmRDPCSTX4_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3 0x2cb4
+#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6 0x2cb5
+#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX4_RDPCSTX_DPALT_CONTROL_REG 0x2cb6
+#define mmRDPCSTX4_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcssys_cr4_dispdec
+// base address: 0xd80
+#define mmDPCSSYS_CR4_DPCSSYS_CR_ADDR 0x2c94
+#define mmDPCSSYS_CR4_DPCSSYS_CR_ADDR_BASE_IDX 2
+#define mmDPCSSYS_CR4_DPCSSYS_CR_DATA 0x2c95
+#define mmDPCSSYS_CR4_DPCSSYS_CR_DATA_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx5_dispdec
+// base address: 0x10e0
+#define mmDPCSTX5_DPCSTX_TX_CLOCK_CNTL 0x2d60
+#define mmDPCSTX5_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2
+#define mmDPCSTX5_DPCSTX_TX_CNTL 0x2d61
+#define mmDPCSTX5_DPCSTX_TX_CNTL_BASE_IDX 2
+#define mmDPCSTX5_DPCSTX_CBUS_CNTL 0x2d62
+#define mmDPCSTX5_DPCSTX_CBUS_CNTL_BASE_IDX 2
+#define mmDPCSTX5_DPCSTX_INTERRUPT_CNTL 0x2d63
+#define mmDPCSTX5_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDPCSTX5_DPCSTX_PLL_UPDATE_ADDR 0x2d64
+#define mmDPCSTX5_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
+#define mmDPCSTX5_DPCSTX_PLL_UPDATE_DATA 0x2d65
+#define mmDPCSTX5_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmDPCSTX5_DPCSTX_DEBUG_CONFIG 0x2d66
+#define mmDPCSTX5_DPCSTX_DEBUG_CONFIG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx5_dispdec
+// base address: 0x10e0
+#define mmRDPCSTX5_RDPCSTX_CNTL 0x2d68
+#define mmRDPCSTX5_RDPCSTX_CNTL_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_CLOCK_CNTL 0x2d69
+#define mmRDPCSTX5_RDPCSTX_CLOCK_CNTL_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_INTERRUPT_CONTROL 0x2d6a
+#define mmRDPCSTX5_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PLL_UPDATE_DATA 0x2d6b
+#define mmRDPCSTX5_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmRDPCSTX5_RDPCS_TX_CR_ADDR 0x2d6c
+#define mmRDPCSTX5_RDPCS_TX_CR_ADDR_BASE_IDX 2
+#define mmRDPCSTX5_RDPCS_TX_CR_DATA 0x2d6d
+#define mmRDPCSTX5_RDPCS_TX_CR_DATA_BASE_IDX 2
+#define mmRDPCSTX5_RDPCS_TX_SRAM_CNTL 0x2d6e
+#define mmRDPCSTX5_RDPCS_TX_SRAM_CNTL_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_MEM_POWER_CTRL 0x2d6f
+#define mmRDPCSTX5_RDPCSTX_MEM_POWER_CTRL_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_MEM_POWER_CTRL2 0x2d70
+#define mmRDPCSTX5_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_SCRATCH 0x2d71
+#define mmRDPCSTX5_RDPCSTX_SCRATCH_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2d74
+#define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_DEBUG_CONFIG 0x2d75
+#define mmRDPCSTX5_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL0 0x2d78
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL0_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL1 0x2d79
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL1_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL2 0x2d7a
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL2_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL3 0x2d7b
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL4 0x2d7c
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL4_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL5 0x2d7d
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL5_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL6 0x2d7e
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL7 0x2d7f
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL7_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL8 0x2d80
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL8_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL9 0x2d81
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL9_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL10 0x2d82
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL10_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL11 0x2d83
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL11_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL12 0x2d84
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL12_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL13 0x2d85
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL13_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL14 0x2d86
+#define mmRDPCSTX5_RDPCSTX_PHY_CNTL14_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_FUSE0 0x2d87
+#define mmRDPCSTX5_RDPCSTX_PHY_FUSE0_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_FUSE1 0x2d88
+#define mmRDPCSTX5_RDPCSTX_PHY_FUSE1_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_FUSE2 0x2d89
+#define mmRDPCSTX5_RDPCSTX_PHY_FUSE2_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_FUSE3 0x2d8a
+#define mmRDPCSTX5_RDPCSTX_PHY_FUSE3_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_PHY_RX_LD_VAL 0x2d8b
+#define mmRDPCSTX5_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3 0x2d8c
+#define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6 0x2d8d
+#define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX5_RDPCSTX_DPALT_CONTROL_REG 0x2d8e
+#define mmRDPCSTX5_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX 2
+
+
+// addressBlock: dpcssys_dpcssys_cr5_dispdec
+// base address: 0x10e0
+#define mmDPCSSYS_CR5_DPCSSYS_CR_ADDR 0x2d6c
+#define mmDPCSSYS_CR5_DPCSSYS_CR_ADDR_BASE_IDX 2
+#define mmDPCSSYS_CR5_DPCSSYS_CR_DATA 0x2d6d
+#define mmDPCSSYS_CR5_DPCSSYS_CR_DATA_BASE_IDX 2
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_sh_mask.h
new file mode 100644
index 000000000000..25e0569e05c8
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_sh_mask.h
@@ -0,0 +1,3912 @@
+/*
+ * Copyright (C) 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _dpcs_2_0_0_SH_MASK_HEADER
+#define _dpcs_2_0_0_SH_MASK_HEADER
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx0_dispdec
+//DPCSTX0_DPCSTX_TX_CLOCK_CNTL
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L
+//DPCSTX0_DPCSTX_TX_CNTL
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L
+//DPCSTX0_DPCSTX_CBUS_CNTL
+#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0
+#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL
+#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L
+//DPCSTX0_DPCSTX_INTERRUPT_CNTL
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L
+//DPCSTX0_DPCSTX_PLL_UPDATE_ADDR
+#define DPCSTX0_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0
+#define DPCSTX0_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL
+//DPCSTX0_DPCSTX_PLL_UPDATE_DATA
+#define DPCSTX0_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define DPCSTX0_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL
+//DPCSTX0_DPCSTX_DEBUG_CONFIG
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx0_dispdec
+//RDPCSTX0_RDPCSTX_CNTL
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT 0x19
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT 0x1a
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK 0x01000000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK 0x02000000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK 0x04000000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L
+//RDPCSTX0_RDPCSTX_CLOCK_CNTL
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L
+//RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L
+//RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA
+#define RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L
+//RDPCSTX0_RDPCS_TX_CR_ADDR
+#define RDPCSTX0_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define RDPCSTX0_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//RDPCSTX0_RDPCS_TX_CR_DATA
+#define RDPCSTX0_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define RDPCSTX0_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+//RDPCSTX0_RDPCS_TX_SRAM_CNTL
+#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14
+#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18
+#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c
+#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L
+#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L
+#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L
+//RDPCSTX0_RDPCSTX_MEM_POWER_CTRL
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT 0x1a
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT 0x1b
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT 0x1d
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT 0x1e
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK 0x00000FFFL
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK 0x03FFF000L
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK 0x04000000L
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK 0x08000000L
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK 0x10000000L
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK 0x20000000L
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK 0x40000000L
+//RDPCSTX0_RDPCSTX_MEM_POWER_CTRL2
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK 0x00000003L
+#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK 0x00000004L
+//RDPCSTX0_RDPCSTX_SCRATCH
+#define RDPCSTX0_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL
+//RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+//RDPCSTX0_RDPCSTX_DEBUG_CONFIG
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT 0xf
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK 0x00000070L
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK 0x00001F00L
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK 0x00008000L
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK 0x00FF0000L
+#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK 0xFF000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L
+//RDPCSTX0_RDPCSTX_PHY_CNTL2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L
+//RDPCSTX0_RDPCSTX_PHY_CNTL3
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL5
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0xe
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0xf
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0x16
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0x17
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x19
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1e
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1f
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x0000000EL
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000030L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00000E00L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x00003000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x00004000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x00008000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK 0x000E0000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00300000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00400000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00800000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK 0x01000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK 0x0E000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x30000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x40000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x80000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0xe
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x13
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x00000003L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x00000030L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x00000300L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x00003000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x00004000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00080000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00100000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL
+#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL
+//RDPCSTX0_RDPCSTX_PHY_CNTL9
+#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL
+#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL
+//RDPCSTX0_RDPCSTX_PHY_CNTL11
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL12
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L
+//RDPCSTX0_RDPCSTX_PHY_CNTL13
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L
+//RDPCSTX0_RDPCSTX_PHY_FUSE0
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L
+//RDPCSTX0_RDPCSTX_PHY_FUSE1
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L
+//RDPCSTX0_RDPCSTX_PHY_FUSE2
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L
+//RDPCSTX0_RDPCSTX_PHY_FUSE3
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L
+//RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL
+#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL
+#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L
+//RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT 0x3
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT 0xb
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT 0xd
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT 0x15
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT 0x19
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT 0x1a
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT 0x1b
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT 0x1d
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK 0x00000002L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK 0x00000008L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK 0x00000020L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK 0x00000200L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK 0x00000800L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK 0x00002000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK 0x00040000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK 0x00100000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK 0x00200000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK 0x01000000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK 0x02000000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK 0x04000000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK 0x08000000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK 0x10000000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK 0x20000000L
+//RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT 0xe
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK 0x00000003L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK 0x00000030L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK 0x00000300L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK 0x00003000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK 0x00004000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK 0x00040000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK 0x00100000L
+//RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG
+#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+
+
+// addressBlock: dpcssys_dpcssys_cr0_dispdec
+//DPCSSYS_CR0_DPCSSYS_CR_ADDR
+#define DPCSSYS_CR0_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define DPCSSYS_CR0_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//DPCSSYS_CR0_DPCSSYS_CR_DATA
+#define DPCSSYS_CR0_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define DPCSSYS_CR0_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx1_dispdec
+//DPCSTX1_DPCSTX_TX_CLOCK_CNTL
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L
+//DPCSTX1_DPCSTX_TX_CNTL
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L
+//DPCSTX1_DPCSTX_CBUS_CNTL
+#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0
+#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL
+#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L
+//DPCSTX1_DPCSTX_INTERRUPT_CNTL
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L
+//DPCSTX1_DPCSTX_PLL_UPDATE_ADDR
+#define DPCSTX1_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0
+#define DPCSTX1_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL
+//DPCSTX1_DPCSTX_PLL_UPDATE_DATA
+#define DPCSTX1_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define DPCSTX1_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL
+//DPCSTX1_DPCSTX_DEBUG_CONFIG
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L
+#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx1_dispdec
+//RDPCSTX1_RDPCSTX_CNTL
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT 0x19
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT 0x1a
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK 0x01000000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK 0x02000000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK 0x04000000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L
+//RDPCSTX1_RDPCSTX_CLOCK_CNTL
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L
+//RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L
+//RDPCSTX1_RDPCSTX_PLL_UPDATE_DATA
+#define RDPCSTX1_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L
+//RDPCSTX1_RDPCS_TX_CR_ADDR
+#define RDPCSTX1_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define RDPCSTX1_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//RDPCSTX1_RDPCS_TX_CR_DATA
+#define RDPCSTX1_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define RDPCSTX1_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+//RDPCSTX1_RDPCS_TX_SRAM_CNTL
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L
+//RDPCSTX1_RDPCSTX_MEM_POWER_CTRL
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT 0x1a
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT 0x1b
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT 0x1d
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT 0x1e
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK 0x00000FFFL
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK 0x03FFF000L
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK 0x04000000L
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK 0x08000000L
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK 0x10000000L
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK 0x20000000L
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK 0x40000000L
+//RDPCSTX1_RDPCSTX_MEM_POWER_CTRL2
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK 0x00000003L
+#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK 0x00000004L
+//RDPCSTX1_RDPCSTX_SCRATCH
+#define RDPCSTX1_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL
+//RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+//RDPCSTX1_RDPCSTX_DEBUG_CONFIG
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT 0xf
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK 0x00000070L
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK 0x00001F00L
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK 0x00008000L
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK 0x00FF0000L
+#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK 0xFF000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L
+//RDPCSTX1_RDPCSTX_PHY_CNTL2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L
+//RDPCSTX1_RDPCSTX_PHY_CNTL3
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL5
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0xe
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0xf
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0x16
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0x17
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x19
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1e
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1f
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x0000000EL
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000030L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00000E00L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x00003000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x00004000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x00008000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK 0x000E0000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00300000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00400000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00800000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK 0x01000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK 0x0E000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x30000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x40000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x80000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0xe
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x13
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x00000003L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x00000030L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x00000300L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x00003000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x00004000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00080000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00100000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL
+#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL
+//RDPCSTX1_RDPCSTX_PHY_CNTL9
+#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL
+#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL
+//RDPCSTX1_RDPCSTX_PHY_CNTL11
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL12
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L
+//RDPCSTX1_RDPCSTX_PHY_CNTL13
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L
+//RDPCSTX1_RDPCSTX_PHY_FUSE0
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L
+//RDPCSTX1_RDPCSTX_PHY_FUSE1
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L
+//RDPCSTX1_RDPCSTX_PHY_FUSE2
+#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L
+//RDPCSTX1_RDPCSTX_PHY_FUSE3
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L
+#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L
+//RDPCSTX1_RDPCSTX_PHY_RX_LD_VAL
+#define RDPCSTX1_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL
+#define RDPCSTX1_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L
+//RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT 0x3
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT 0xb
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT 0xd
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT 0x15
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT 0x19
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT 0x1a
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT 0x1b
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT 0x1d
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK 0x00000002L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK 0x00000008L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK 0x00000020L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK 0x00000200L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK 0x00000800L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK 0x00002000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK 0x00040000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK 0x00100000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK 0x00200000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK 0x01000000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK 0x02000000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK 0x04000000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK 0x08000000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK 0x10000000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK 0x20000000L
+//RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT 0xe
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK 0x00000003L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK 0x00000030L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK 0x00000300L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK 0x00003000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK 0x00004000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK 0x00040000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK 0x00100000L
+//RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG
+#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+
+
+// addressBlock: dpcssys_dpcssys_cr1_dispdec
+//DPCSSYS_CR1_DPCSSYS_CR_ADDR
+#define DPCSSYS_CR1_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define DPCSSYS_CR1_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//DPCSSYS_CR1_DPCSSYS_CR_DATA
+#define DPCSSYS_CR1_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define DPCSSYS_CR1_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx2_dispdec
+//DPCSTX2_DPCSTX_TX_CLOCK_CNTL
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L
+#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L
+//DPCSTX2_DPCSTX_TX_CNTL
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L
+//DPCSTX2_DPCSTX_CBUS_CNTL
+#define DPCSTX2_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0
+#define DPCSTX2_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX2_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL
+#define DPCSTX2_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L
+//DPCSTX2_DPCSTX_INTERRUPT_CNTL
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L
+#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L
+//DPCSTX2_DPCSTX_PLL_UPDATE_ADDR
+#define DPCSTX2_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0
+#define DPCSTX2_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL
+//DPCSTX2_DPCSTX_PLL_UPDATE_DATA
+#define DPCSTX2_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define DPCSTX2_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL
+//DPCSTX2_DPCSTX_DEBUG_CONFIG
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L
+#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx2_dispdec
+//RDPCSTX2_RDPCSTX_CNTL
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT 0x19
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT 0x1a
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK 0x01000000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK 0x02000000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK 0x04000000L
+#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L
+//RDPCSTX2_RDPCSTX_CLOCK_CNTL
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L
+#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L
+//RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L
+#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L
+//RDPCSTX2_RDPCSTX_PLL_UPDATE_DATA
+#define RDPCSTX2_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L
+//RDPCSTX2_RDPCS_TX_CR_ADDR
+#define RDPCSTX2_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define RDPCSTX2_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//RDPCSTX2_RDPCS_TX_CR_DATA
+#define RDPCSTX2_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define RDPCSTX2_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+//RDPCSTX2_RDPCS_TX_SRAM_CNTL
+#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14
+#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18
+#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c
+#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L
+#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L
+#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L
+//RDPCSTX2_RDPCSTX_MEM_POWER_CTRL
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT 0x1a
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT 0x1b
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT 0x1c
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT 0x1d
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT 0x1e
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK 0x00000FFFL
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK 0x03FFF000L
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK 0x04000000L
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK 0x08000000L
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK 0x10000000L
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK 0x20000000L
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK 0x40000000L
+//RDPCSTX2_RDPCSTX_MEM_POWER_CTRL2
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT 0x2
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK 0x00000003L
+#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK 0x00000004L
+//RDPCSTX2_RDPCSTX_SCRATCH
+#define RDPCSTX2_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL
+//RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+//RDPCSTX2_RDPCSTX_DEBUG_CONFIG
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT 0x7
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT 0xf
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK 0x00000070L
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK 0x00000080L
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK 0x00001F00L
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK 0x00008000L
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK 0x00FF0000L
+#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK 0xFF000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL1
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L
+//RDPCSTX2_RDPCSTX_PHY_CNTL2
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L
+//RDPCSTX2_RDPCSTX_PHY_CNTL3
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL5
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x1
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0x7
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x9
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0xe
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0xf
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x11
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0x16
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0x17
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x19
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x1c
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1e
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1f
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x0000000EL
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000030L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000040L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00000080L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00000100L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00000E00L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x00003000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x00004000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x00008000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK 0x00010000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK 0x000E0000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00300000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00400000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00800000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK 0x01000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK 0x0E000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x30000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x40000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x80000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL6
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0x2
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0xe
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x13
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x00000003L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000004L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x00000030L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x00000040L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x00000300L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x00003000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x00004000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00080000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00100000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL7
+#define RDPCSTX2_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL
+#define RDPCSTX2_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL
+//RDPCSTX2_RDPCSTX_PHY_CNTL9
+#define RDPCSTX2_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL
+#define RDPCSTX2_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL
+//RDPCSTX2_RDPCSTX_PHY_CNTL11
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL12
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L
+//RDPCSTX2_RDPCSTX_PHY_CNTL13
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L
+//RDPCSTX2_RDPCSTX_PHY_CNTL14
+#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c
+#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L
+#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L
+//RDPCSTX2_RDPCSTX_PHY_FUSE0
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L
+//RDPCSTX2_RDPCSTX_PHY_FUSE1
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L
+//RDPCSTX2_RDPCSTX_PHY_FUSE2
+#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L
+//RDPCSTX2_RDPCSTX_PHY_FUSE3
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L
+#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L
+//RDPCSTX2_RDPCSTX_PHY_RX_LD_VAL
+#define RDPCSTX2_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL
+#define RDPCSTX2_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L
+//RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT 0x1
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT 0x2
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT 0x3
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT 0x5
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT 0x9
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT 0xa
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT 0xb
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT 0xd
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT 0x15
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT 0x18
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT 0x19
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT 0x1a
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT 0x1b
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT 0x1c
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT 0x1d
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK 0x00000002L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK 0x00000004L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK 0x00000008L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK 0x00000020L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK 0x00000100L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK 0x00000200L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK 0x00000400L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK 0x00000800L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK 0x00001000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK 0x00002000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK 0x00010000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK 0x00040000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK 0x00100000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK 0x00200000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK 0x01000000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK 0x02000000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK 0x04000000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK 0x08000000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK 0x10000000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK 0x20000000L
+//RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT 0x2
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT 0x6
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT 0xa
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT 0xc
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT 0xe
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT 0x10
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT 0x12
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK 0x00000003L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK 0x00000004L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK 0x00000030L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK 0x00000040L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK 0x00000300L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK 0x00000400L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK 0x00003000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK 0x00004000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK 0x00010000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK 0x00040000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK 0x00100000L
+//RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG
+#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT 0x0
+#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT 0x4
+#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK 0x00000001L
+#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK 0x00000010L
+#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+
+
+// addressBlock: dpcssys_dpcssys_cr2_dispdec
+//DPCSSYS_CR2_DPCSSYS_CR_ADDR
+#define DPCSSYS_CR2_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define DPCSSYS_CR2_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//DPCSSYS_CR2_DPCSSYS_CR_DATA
+#define DPCSSYS_CR2_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define DPCSSYS_CR2_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx3_dispdec
+//DPCSTX3_DPCSTX_TX_CLOCK_CNTL
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L
+#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L
+//DPCSTX3_DPCSTX_TX_CNTL
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L
+//DPCSTX3_DPCSTX_CBUS_CNTL
+#define DPCSTX3_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0
+#define DPCSTX3_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX3_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL
+#define DPCSTX3_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L
+//DPCSTX3_DPCSTX_INTERRUPT_CNTL
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L
+#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L
+//DPCSTX3_DPCSTX_PLL_UPDATE_ADDR
+#define DPCSTX3_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0
+#define DPCSTX3_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL
+//DPCSTX3_DPCSTX_PLL_UPDATE_DATA
+#define DPCSTX3_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define DPCSTX3_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL
+//DPCSTX3_DPCSTX_DEBUG_CONFIG
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L
+#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx3_dispdec
+//RDPCSTX3_RDPCSTX_CNTL
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT 0x19
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT 0x1a
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK 0x01000000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK 0x02000000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK 0x04000000L
+#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L
+//RDPCSTX3_RDPCSTX_CLOCK_CNTL
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L
+#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L
+//RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L
+#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L
+//RDPCSTX3_RDPCSTX_PLL_UPDATE_DATA
+#define RDPCSTX3_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L
+//RDPCSTX3_RDPCS_TX_CR_ADDR
+#define RDPCSTX3_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define RDPCSTX3_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//RDPCSTX3_RDPCS_TX_CR_DATA
+#define RDPCSTX3_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define RDPCSTX3_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+//RDPCSTX3_RDPCS_TX_SRAM_CNTL
+#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14
+#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18
+#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c
+#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L
+#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L
+#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L
+//RDPCSTX3_RDPCSTX_MEM_POWER_CTRL
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT 0x1a
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT 0x1b
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT 0x1c
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT 0x1d
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT 0x1e
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK 0x00000FFFL
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK 0x03FFF000L
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK 0x04000000L
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK 0x08000000L
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK 0x10000000L
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK 0x20000000L
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK 0x40000000L
+//RDPCSTX3_RDPCSTX_MEM_POWER_CTRL2
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT 0x2
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK 0x00000003L
+#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK 0x00000004L
+//RDPCSTX3_RDPCSTX_SCRATCH
+#define RDPCSTX3_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL
+//RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+//RDPCSTX3_RDPCSTX_DEBUG_CONFIG
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT 0x7
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT 0xf
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK 0x00000070L
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK 0x00000080L
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK 0x00001F00L
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK 0x00008000L
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK 0x00FF0000L
+#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK 0xFF000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL1
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L
+//RDPCSTX3_RDPCSTX_PHY_CNTL2
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L
+//RDPCSTX3_RDPCSTX_PHY_CNTL3
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL5
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x1
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0x7
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x9
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0xe
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0xf
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x11
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0x16
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0x17
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x19
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x1c
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1e
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1f
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x0000000EL
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000030L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000040L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00000080L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00000100L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00000E00L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x00003000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x00004000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x00008000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK 0x00010000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK 0x000E0000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00300000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00400000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00800000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK 0x01000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK 0x0E000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x30000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x40000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x80000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL6
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0x2
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0xe
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x13
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x00000003L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000004L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x00000030L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x00000040L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x00000300L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x00003000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x00004000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00080000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00100000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL7
+#define RDPCSTX3_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL
+#define RDPCSTX3_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL
+//RDPCSTX3_RDPCSTX_PHY_CNTL9
+#define RDPCSTX3_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL
+#define RDPCSTX3_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL
+//RDPCSTX3_RDPCSTX_PHY_CNTL11
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL12
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L
+//RDPCSTX3_RDPCSTX_PHY_CNTL13
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L
+//RDPCSTX3_RDPCSTX_PHY_CNTL14
+#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c
+#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L
+#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L
+//RDPCSTX3_RDPCSTX_PHY_FUSE0
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L
+//RDPCSTX3_RDPCSTX_PHY_FUSE1
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L
+//RDPCSTX3_RDPCSTX_PHY_FUSE2
+#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L
+//RDPCSTX3_RDPCSTX_PHY_FUSE3
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L
+#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L
+//RDPCSTX3_RDPCSTX_PHY_RX_LD_VAL
+#define RDPCSTX3_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL
+#define RDPCSTX3_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L
+//RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT 0x1
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT 0x2
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT 0x3
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT 0x5
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT 0x9
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT 0xa
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT 0xb
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT 0xd
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT 0x15
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT 0x18
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT 0x19
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT 0x1a
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT 0x1b
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT 0x1c
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT 0x1d
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK 0x00000002L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK 0x00000004L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK 0x00000008L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK 0x00000020L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK 0x00000100L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK 0x00000200L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK 0x00000400L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK 0x00000800L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK 0x00001000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK 0x00002000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK 0x00010000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK 0x00040000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK 0x00100000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK 0x00200000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK 0x01000000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK 0x02000000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK 0x04000000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK 0x08000000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK 0x10000000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK 0x20000000L
+//RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT 0x2
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT 0x6
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT 0xa
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT 0xc
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT 0xe
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT 0x10
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT 0x12
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK 0x00000003L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK 0x00000004L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK 0x00000030L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK 0x00000040L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK 0x00000300L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK 0x00000400L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK 0x00003000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK 0x00004000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK 0x00010000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK 0x00040000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK 0x00100000L
+//RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG
+#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT 0x0
+#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT 0x4
+#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK 0x00000001L
+#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK 0x00000010L
+#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+
+
+// addressBlock: dpcssys_dpcssys_cr3_dispdec
+//DPCSSYS_CR3_DPCSSYS_CR_ADDR
+#define DPCSSYS_CR3_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define DPCSSYS_CR3_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//DPCSSYS_CR3_DPCSSYS_CR_DATA
+#define DPCSSYS_CR3_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define DPCSSYS_CR3_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+
+
+// addressBlock: dpcssys_dpcs0_dpcsrx_dispdec
+//DPCSRX_PHY_CNTL
+#define DPCSRX_PHY_CNTL__DPCS_PHY_RESET__SHIFT 0x0
+#define DPCSRX_PHY_CNTL__DPCS_PHY_RESET_MASK 0x00000001L
+//DPCSRX_RX_CLOCK_CNTL
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_GATE_DIS__SHIFT 0x0
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_EN__SHIFT 0x1
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_SEL__SHIFT 0x2
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_CLOCK_ON__SHIFT 0x4
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_GATE_DIS__SHIFT 0x10
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_EN__SHIFT 0x11
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_CLOCK_ON__SHIFT 0x12
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_GATE_DIS__SHIFT 0x14
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_EN__SHIFT 0x15
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_CLOCK_ON__SHIFT 0x16
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_GATE_DIS__SHIFT 0x18
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_EN__SHIFT 0x19
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_CLOCK_ON__SHIFT 0x1a
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_GATE_DIS__SHIFT 0x1c
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_EN__SHIFT 0x1d
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_CLOCK_ON__SHIFT 0x1e
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_GATE_DIS_MASK 0x00000001L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_EN_MASK 0x00000002L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_SEL_MASK 0x0000000CL
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_CLOCK_ON_MASK 0x00000010L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_GATE_DIS_MASK 0x00010000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_EN_MASK 0x00020000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_CLOCK_ON_MASK 0x00040000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_GATE_DIS_MASK 0x00100000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_EN_MASK 0x00200000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_CLOCK_ON_MASK 0x00400000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_GATE_DIS_MASK 0x01000000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_EN_MASK 0x02000000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_CLOCK_ON_MASK 0x04000000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_GATE_DIS_MASK 0x10000000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_EN_MASK 0x20000000L
+#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_CLOCK_ON_MASK 0x40000000L
+//DPCSRX_RX_CNTL
+#define DPCSRX_RX_CNTL__DPCS_RX_LANE0_EN__SHIFT 0x0
+#define DPCSRX_RX_CNTL__DPCS_RX_LANE1_EN__SHIFT 0x1
+#define DPCSRX_RX_CNTL__DPCS_RX_LANE2_EN__SHIFT 0x2
+#define DPCSRX_RX_CNTL__DPCS_RX_LANE3_EN__SHIFT 0x3
+#define DPCSRX_RX_CNTL__DPCS_RX_FIFO_EN__SHIFT 0x4
+#define DPCSRX_RX_CNTL__DPCS_RX_FIFO_START__SHIFT 0x5
+#define DPCSRX_RX_CNTL__DPCS_RX_FIFO_RD_START_DELAY__SHIFT 0x8
+#define DPCSRX_RX_CNTL__DPCS_RX_SOFT_RESET__SHIFT 0x1f
+#define DPCSRX_RX_CNTL__DPCS_RX_LANE0_EN_MASK 0x00000001L
+#define DPCSRX_RX_CNTL__DPCS_RX_LANE1_EN_MASK 0x00000002L
+#define DPCSRX_RX_CNTL__DPCS_RX_LANE2_EN_MASK 0x00000004L
+#define DPCSRX_RX_CNTL__DPCS_RX_LANE3_EN_MASK 0x00000008L
+#define DPCSRX_RX_CNTL__DPCS_RX_FIFO_EN_MASK 0x00000010L
+#define DPCSRX_RX_CNTL__DPCS_RX_FIFO_START_MASK 0x00000020L
+#define DPCSRX_RX_CNTL__DPCS_RX_FIFO_RD_START_DELAY_MASK 0x00000F00L
+#define DPCSRX_RX_CNTL__DPCS_RX_SOFT_RESET_MASK 0x80000000L
+//DPCSRX_CBUS_CNTL
+#define DPCSRX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0
+#define DPCSRX_CBUS_CNTL__DPCS_PHY_MASTER_REQ_DELAY__SHIFT 0x8
+#define DPCSRX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f
+#define DPCSRX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x0000000FL
+#define DPCSRX_CBUS_CNTL__DPCS_PHY_MASTER_REQ_DELAY_MASK 0x0000FF00L
+#define DPCSRX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L
+//DPCSRX_REG_ERROR_STATUS
+#define DPCSRX_REG_ERROR_STATUS__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define DPCSRX_REG_ERROR_STATUS__DPCS_REG_ERROR_CLR__SHIFT 0x1
+#define DPCSRX_REG_ERROR_STATUS__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4
+#define DPCSRX_REG_ERROR_STATUS__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define DPCSRX_REG_ERROR_STATUS__DPCS_REG_ERROR_CLR_MASK 0x00000002L
+#define DPCSRX_REG_ERROR_STATUS__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L
+//DPCSRX_RX_ERROR_STATUS
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX0_FIFO_ERROR__SHIFT 0x0
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX1_FIFO_ERROR__SHIFT 0x1
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX2_FIFO_ERROR__SHIFT 0x2
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX3_FIFO_ERROR__SHIFT 0x3
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX_ERROR_CLR__SHIFT 0x8
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX_FIFO_ERROR_MASK__SHIFT 0xc
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX0_FIFO_ERROR_MASK 0x00000001L
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX1_FIFO_ERROR_MASK 0x00000002L
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX2_FIFO_ERROR_MASK 0x00000004L
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX3_FIFO_ERROR_MASK 0x00000008L
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX_ERROR_CLR_MASK 0x00000100L
+#define DPCSRX_RX_ERROR_STATUS__DPCS_RX_FIFO_ERROR_MASK_MASK 0x00001000L
+//DPCSRX_INDEX_MODE_ADDR
+#define DPCSRX_INDEX_MODE_ADDR__DPCS_INDEX_MODE_ADDR__SHIFT 0x0
+#define DPCSRX_INDEX_MODE_ADDR__DPCS_INDEX_MODE_ADDR_MASK 0x0003FFFFL
+//DPCSRX_INDEX_MODE_DATA
+#define DPCSRX_INDEX_MODE_DATA__DPCS_INDEX_MODE_DATA__SHIFT 0x0
+#define DPCSRX_INDEX_MODE_DATA__DPCS_INDEX_MODE_DATA_MASK 0xFFFFFFFFL
+//DPCSRX_DEBUG_CONFIG
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_RX_SYMCLK_SEL__SHIFT 0x6
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_BLOCK_SEL__SHIFT 0xb
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe
+#define DPCSRX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_RX_SYMCLK_SEL_MASK 0x000000C0L
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_BLOCK_SEL_MASK 0x00003800L
+#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L
+#define DPCSRX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx4_dispdec
+//DPCSTX4_DPCSTX_TX_CLOCK_CNTL
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L
+#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L
+//DPCSTX4_DPCSTX_TX_CNTL
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L
+//DPCSTX4_DPCSTX_CBUS_CNTL
+#define DPCSTX4_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0
+#define DPCSTX4_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX4_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL
+#define DPCSTX4_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L
+//DPCSTX4_DPCSTX_INTERRUPT_CNTL
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L
+#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L
+//DPCSTX4_DPCSTX_PLL_UPDATE_ADDR
+#define DPCSTX4_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0
+#define DPCSTX4_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL
+//DPCSTX4_DPCSTX_PLL_UPDATE_DATA
+#define DPCSTX4_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define DPCSTX4_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL
+//DPCSTX4_DPCSTX_DEBUG_CONFIG
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L
+#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx4_dispdec
+//RDPCSTX4_RDPCSTX_CNTL
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT 0x19
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT 0x1a
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK 0x01000000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK 0x02000000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK 0x04000000L
+#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L
+//RDPCSTX4_RDPCSTX_CLOCK_CNTL
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L
+#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L
+//RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L
+#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L
+//RDPCSTX4_RDPCSTX_PLL_UPDATE_DATA
+#define RDPCSTX4_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L
+//RDPCSTX4_RDPCS_TX_CR_ADDR
+#define RDPCSTX4_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define RDPCSTX4_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//RDPCSTX4_RDPCS_TX_CR_DATA
+#define RDPCSTX4_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define RDPCSTX4_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+//RDPCSTX4_RDPCS_TX_SRAM_CNTL
+#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14
+#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18
+#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c
+#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L
+#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L
+#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L
+//RDPCSTX4_RDPCSTX_MEM_POWER_CTRL
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT 0x1a
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT 0x1b
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT 0x1c
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT 0x1d
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT 0x1e
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK 0x00000FFFL
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK 0x03FFF000L
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK 0x04000000L
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK 0x08000000L
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK 0x10000000L
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK 0x20000000L
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK 0x40000000L
+//RDPCSTX4_RDPCSTX_MEM_POWER_CTRL2
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT 0x2
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK 0x00000003L
+#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK 0x00000004L
+//RDPCSTX4_RDPCSTX_SCRATCH
+#define RDPCSTX4_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL
+//RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+//RDPCSTX4_RDPCSTX_DEBUG_CONFIG
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT 0x7
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT 0xf
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK 0x00000070L
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK 0x00000080L
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK 0x00001F00L
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK 0x00008000L
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK 0x00FF0000L
+#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK 0xFF000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL1
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L
+//RDPCSTX4_RDPCSTX_PHY_CNTL2
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L
+//RDPCSTX4_RDPCSTX_PHY_CNTL3
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL5
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x1
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0x7
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x9
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0xe
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0xf
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x11
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0x16
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0x17
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x19
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x1c
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1e
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1f
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x0000000EL
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000030L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000040L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00000080L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00000100L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00000E00L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x00003000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x00004000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x00008000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK 0x00010000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK 0x000E0000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00300000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00400000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00800000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK 0x01000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK 0x0E000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x30000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x40000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x80000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL6
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0x2
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0xe
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x13
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x00000003L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000004L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x00000030L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x00000040L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x00000300L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x00003000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x00004000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00080000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00100000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL7
+#define RDPCSTX4_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL
+#define RDPCSTX4_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL
+//RDPCSTX4_RDPCSTX_PHY_CNTL9
+#define RDPCSTX4_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL
+#define RDPCSTX4_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL
+//RDPCSTX4_RDPCSTX_PHY_CNTL11
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL12
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L
+//RDPCSTX4_RDPCSTX_PHY_CNTL13
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L
+//RDPCSTX4_RDPCSTX_PHY_CNTL14
+#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c
+#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L
+#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L
+//RDPCSTX4_RDPCSTX_PHY_FUSE0
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L
+//RDPCSTX4_RDPCSTX_PHY_FUSE1
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L
+//RDPCSTX4_RDPCSTX_PHY_FUSE2
+#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L
+//RDPCSTX4_RDPCSTX_PHY_FUSE3
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L
+#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L
+//RDPCSTX4_RDPCSTX_PHY_RX_LD_VAL
+#define RDPCSTX4_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL
+#define RDPCSTX4_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L
+//RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT 0x1
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT 0x2
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT 0x3
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT 0x5
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT 0x9
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT 0xa
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT 0xb
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT 0xd
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT 0x15
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT 0x18
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT 0x19
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT 0x1a
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT 0x1b
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT 0x1c
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT 0x1d
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK 0x00000002L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK 0x00000004L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK 0x00000008L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK 0x00000020L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK 0x00000100L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK 0x00000200L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK 0x00000400L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK 0x00000800L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK 0x00001000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK 0x00002000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK 0x00010000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK 0x00040000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK 0x00100000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK 0x00200000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK 0x01000000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK 0x02000000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK 0x04000000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK 0x08000000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK 0x10000000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK 0x20000000L
+//RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT 0x2
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT 0x6
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT 0xa
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT 0xc
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT 0xe
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT 0x10
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT 0x12
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK 0x00000003L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK 0x00000004L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK 0x00000030L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK 0x00000040L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK 0x00000300L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK 0x00000400L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK 0x00003000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK 0x00004000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK 0x00010000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK 0x00040000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK 0x00100000L
+//RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG
+#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT 0x0
+#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT 0x4
+#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK 0x00000001L
+#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK 0x00000010L
+#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+
+
+// addressBlock: dpcssys_dpcssys_cr4_dispdec
+//DPCSSYS_CR4_DPCSSYS_CR_ADDR
+#define DPCSSYS_CR4_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define DPCSSYS_CR4_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//DPCSSYS_CR4_DPCSSYS_CR_DATA
+#define DPCSSYS_CR4_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define DPCSSYS_CR4_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+
+
+// addressBlock: dpcssys_dpcs0_dpcstx5_dispdec
+//DPCSTX5_DPCSTX_TX_CLOCK_CNTL
+#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0
+#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1
+#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2
+#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3
+#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L
+#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L
+#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L
+#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L
+//DPCSTX5_DPCSTX_TX_CNTL
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L
+//DPCSTX5_DPCSTX_CBUS_CNTL
+#define DPCSTX5_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0
+#define DPCSTX5_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX5_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL
+#define DPCSTX5_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L
+//DPCSTX5_DPCSTX_INTERRUPT_CNTL
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L
+#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L
+//DPCSTX5_DPCSTX_PLL_UPDATE_ADDR
+#define DPCSTX5_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0
+#define DPCSTX5_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL
+//DPCSTX5_DPCSTX_PLL_UPDATE_DATA
+#define DPCSTX5_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define DPCSTX5_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL
+//DPCSTX5_DPCSTX_DEBUG_CONFIG
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L
+#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L
+
+
+// addressBlock: dpcssys_dpcs0_rdpcstx5_dispdec
+//RDPCSTX5_RDPCSTX_CNTL
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT 0x18
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT 0x19
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT 0x1a
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK 0x01000000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK 0x02000000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK 0x04000000L
+#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L
+//RDPCSTX5_RDPCSTX_CLOCK_CNTL
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L
+#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L
+//RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L
+#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L
+//RDPCSTX5_RDPCSTX_PLL_UPDATE_DATA
+#define RDPCSTX5_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L
+//RDPCSTX5_RDPCS_TX_CR_ADDR
+#define RDPCSTX5_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define RDPCSTX5_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//RDPCSTX5_RDPCS_TX_CR_DATA
+#define RDPCSTX5_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define RDPCSTX5_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+//RDPCSTX5_RDPCS_TX_SRAM_CNTL
+#define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14
+#define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18
+#define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c
+#define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L
+#define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L
+#define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L
+//RDPCSTX5_RDPCSTX_MEM_POWER_CTRL
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT 0x1a
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT 0x1b
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT 0x1c
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT 0x1d
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT 0x1e
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK 0x00000FFFL
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK 0x03FFF000L
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK 0x04000000L
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK 0x08000000L
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK 0x10000000L
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK 0x20000000L
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK 0x40000000L
+//RDPCSTX5_RDPCSTX_MEM_POWER_CTRL2
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT 0x2
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK 0x00000003L
+#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK 0x00000004L
+//RDPCSTX5_RDPCSTX_SCRATCH
+#define RDPCSTX5_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL
+//RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK 0x00000010L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+//RDPCSTX5_RDPCSTX_DEBUG_CONFIG
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT 0x7
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT 0xf
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT 0x18
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK 0x00000070L
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK 0x00000080L
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK 0x00001F00L
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK 0x00008000L
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK 0x00FF0000L
+#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK 0xFF000000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL1
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L
+//RDPCSTX5_RDPCSTX_PHY_CNTL2
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L
+//RDPCSTX5_RDPCSTX_PHY_CNTL3
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL5
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x1
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0x7
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x9
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0xe
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0xf
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x11
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0x16
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0x17
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x18
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x19
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x1c
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1e
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1f
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x0000000EL
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000030L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000040L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00000080L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00000100L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00000E00L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x00003000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x00004000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x00008000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK 0x00010000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK 0x000E0000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00300000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00400000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00800000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK 0x01000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK 0x0E000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x30000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x40000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x80000000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL6
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0x2
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0xe
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x13
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x00000003L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000004L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x00000030L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x00000040L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x00000300L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x00003000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x00004000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00080000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00100000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL7
+#define RDPCSTX5_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL
+#define RDPCSTX5_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL8
+#define RDPCSTX5_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL
+//RDPCSTX5_RDPCSTX_PHY_CNTL9
+#define RDPCSTX5_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18
+#define RDPCSTX5_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL
+#define RDPCSTX5_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL10
+#define RDPCSTX5_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL
+//RDPCSTX5_RDPCSTX_PHY_CNTL11
+#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18
+#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL12
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L
+//RDPCSTX5_RDPCSTX_PHY_CNTL13
+#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c
+#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d
+#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e
+#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L
+//RDPCSTX5_RDPCSTX_PHY_CNTL14
+#define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18
+#define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c
+#define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L
+#define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L
+//RDPCSTX5_RDPCSTX_PHY_FUSE0
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L
+//RDPCSTX5_RDPCSTX_PHY_FUSE1
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L
+//RDPCSTX5_RDPCSTX_PHY_FUSE2
+#define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L
+//RDPCSTX5_RDPCSTX_PHY_FUSE3
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L
+#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L
+//RDPCSTX5_RDPCSTX_PHY_RX_LD_VAL
+#define RDPCSTX5_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL
+#define RDPCSTX5_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L
+//RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT 0x1
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT 0x2
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT 0x3
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT 0x5
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT 0x9
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT 0xa
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT 0xb
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT 0xd
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT 0x12
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT 0x15
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT 0x18
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT 0x19
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT 0x1a
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT 0x1b
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT 0x1c
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT 0x1d
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK 0x00000002L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK 0x00000004L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK 0x00000008L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK 0x00000010L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK 0x00000020L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK 0x00000100L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK 0x00000200L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK 0x00000400L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK 0x00000800L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK 0x00001000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK 0x00002000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK 0x00010000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK 0x00040000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK 0x00100000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK 0x00200000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK 0x01000000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK 0x02000000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK 0x04000000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK 0x08000000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK 0x10000000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK 0x20000000L
+//RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT 0x2
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT 0x6
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT 0xa
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT 0xc
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT 0xe
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT 0x10
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT 0x11
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT 0x12
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT 0x13
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT 0x14
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK 0x00000003L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK 0x00000004L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK 0x00000030L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK 0x00000040L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK 0x00000300L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK 0x00000400L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK 0x00003000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK 0x00004000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK 0x00010000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK 0x00020000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK 0x00040000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK 0x00080000L
+#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK 0x00100000L
+//RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG
+#define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT 0x0
+#define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT 0x4
+#define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT 0x8
+#define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK 0x00000001L
+#define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK 0x00000010L
+#define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK 0x0000FF00L
+
+
+// addressBlock: dpcssys_dpcssys_cr5_dispdec
+//DPCSSYS_CR5_DPCSSYS_CR_ADDR
+#define DPCSSYS_CR5_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define DPCSSYS_CR5_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//DPCSSYS_CR5_DPCSSYS_CR_DATA
+#define DPCSSYS_CR5_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define DPCSSYS_CR5_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_2_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_1_0_offset.h
index 945bb6101a9d..945bb6101a9d 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_2_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_1_0_offset.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_2_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_1_0_sh_mask.h
index 6e039f2208e1..6e039f2208e1 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_2_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_1_0_sh_mask.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
index 2bfaaa8157d0..d984c916df80 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
@@ -2224,6 +2224,14 @@
#define mmCOMPUTE_STATIC_THREAD_MGMT_SE2_BASE_IDX 0
#define mmCOMPUTE_STATIC_THREAD_MGMT_SE3 0x0e1a
#define mmCOMPUTE_STATIC_THREAD_MGMT_SE3_BASE_IDX 0
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE4 0x0e25
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE4_BASE_IDX 0
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE5 0x0e26
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE5_BASE_IDX 0
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE6 0x0e27
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE6_BASE_IDX 0
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE7 0x0e28
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE7_BASE_IDX 0
#define mmCOMPUTE_RESTART_X 0x0e1b
#define mmCOMPUTE_RESTART_X_BASE_IDX 0
#define mmCOMPUTE_RESTART_Y 0x0e1c
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
index d4c613a85352..ea316d8dcb37 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
@@ -2060,7 +2060,8 @@
// addressBlock: gc_sqdec
//SQ_CONFIG
-#define SQ_CONFIG__UNUSED__SHIFT 0x0
+#define SQ_CONFIG__DISABLE_BARRIER_WAITCNT__SHIFT 0x0
+#define SQ_CONFIG__UNUSED__SHIFT 0x1
#define SQ_CONFIG__OVERRIDE_ALU_BUSY__SHIFT 0x7
#define SQ_CONFIG__DEBUG_EN__SHIFT 0x8
#define SQ_CONFIG__DEBUG_SINGLE_MEMOP__SHIFT 0x9
@@ -2079,7 +2080,8 @@
#define SQ_CONFIG__DISABLE_SP_REDUNDANT_THREAD_GATING__SHIFT 0x1d
#define SQ_CONFIG__DISABLE_FLAT_SOFT_CLAUSE__SHIFT 0x1e
#define SQ_CONFIG__DISABLE_MIMG_SOFT_CLAUSE__SHIFT 0x1f
-#define SQ_CONFIG__UNUSED_MASK 0x0000007FL
+#define SQ_CONFIG__DISABLE_BARRIER_WAITCNT_MASK 0x00000001L
+#define SQ_CONFIG__UNUSED_MASK 0x0000007EL
#define SQ_CONFIG__OVERRIDE_ALU_BUSY_MASK 0x00000080L
#define SQ_CONFIG__DEBUG_EN_MASK 0x00000100L
#define SQ_CONFIG__DEBUG_SINGLE_MEMOP_MASK 0x00000200L
@@ -8739,10 +8741,16 @@
#define TCP_ADDR_CONFIG__NUM_BANKS__SHIFT 0x4
#define TCP_ADDR_CONFIG__COLHI_WIDTH__SHIFT 0x6
#define TCP_ADDR_CONFIG__RB_SPLIT_COLHI__SHIFT 0x9
+#define TCP_ADDR_CONFIG__ENABLE64KHASH__SHIFT 0xb
+#define TCP_ADDR_CONFIG__ENABLE2MHASH__SHIFT 0xc
+#define TCP_ADDR_CONFIG__ENABLE1GHASH__SHIFT 0xd
#define TCP_ADDR_CONFIG__NUM_TCC_BANKS_MASK 0x0000000FL
#define TCP_ADDR_CONFIG__NUM_BANKS_MASK 0x00000030L
#define TCP_ADDR_CONFIG__COLHI_WIDTH_MASK 0x000001C0L
#define TCP_ADDR_CONFIG__RB_SPLIT_COLHI_MASK 0x00000200L
+#define TCP_ADDR_CONFIG__ENABLE64KHASH_MASK 0x00000800L
+#define TCP_ADDR_CONFIG__ENABLE2MHASH_MASK 0x00001000L
+#define TCP_ADDR_CONFIG__ENABLE1GHASH_MASK 0x00002000L
//TCP_CREDIT
#define TCP_CREDIT__LFIFO_CREDIT__SHIFT 0x0
#define TCP_CREDIT__REQ_FIFO_CREDIT__SHIFT 0x10
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_offset.h
new file mode 100644
index 000000000000..f41556abfbbc
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_offset.h
@@ -0,0 +1,264 @@
+/*
+ * Copyright (C) 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _gc_9_4_1_OFFSET_HEADER
+#define _gc_9_4_1_OFFSET_HEADER
+
+// addressBlock: gc_grbmdec
+// base address: 0x8000
+#define mmGRBM_CNTL 0x0000
+#define mmGRBM_CNTL_BASE_IDX 0
+#define mmGRBM_SKEW_CNTL 0x0001
+#define mmGRBM_SKEW_CNTL_BASE_IDX 0
+#define mmGRBM_STATUS2 0x0002
+#define mmGRBM_STATUS2_BASE_IDX 0
+#define mmGRBM_PWR_CNTL 0x0003
+#define mmGRBM_PWR_CNTL_BASE_IDX 0
+#define mmGRBM_STATUS 0x0004
+#define mmGRBM_STATUS_BASE_IDX 0
+#define mmGRBM_STATUS_SE0 0x0005
+#define mmGRBM_STATUS_SE0_BASE_IDX 0
+#define mmGRBM_STATUS_SE1 0x0006
+#define mmGRBM_STATUS_SE1_BASE_IDX 0
+#define mmGRBM_SOFT_RESET 0x0008
+#define mmGRBM_SOFT_RESET_BASE_IDX 0
+#define mmGRBM_GFX_CLKEN_CNTL 0x000c
+#define mmGRBM_GFX_CLKEN_CNTL_BASE_IDX 0
+#define mmGRBM_WAIT_IDLE_CLOCKS 0x000d
+#define mmGRBM_WAIT_IDLE_CLOCKS_BASE_IDX 0
+#define mmGRBM_STATUS_SE2 0x000e
+#define mmGRBM_STATUS_SE2_BASE_IDX 0
+#define mmGRBM_STATUS_SE3 0x000f
+#define mmGRBM_STATUS_SE3_BASE_IDX 0
+#define mmGRBM_READ_ERROR 0x0016
+#define mmGRBM_READ_ERROR_BASE_IDX 0
+#define mmGRBM_READ_ERROR2 0x0017
+#define mmGRBM_READ_ERROR2_BASE_IDX 0
+#define mmGRBM_INT_CNTL 0x0018
+#define mmGRBM_INT_CNTL_BASE_IDX 0
+#define mmGRBM_TRAP_OP 0x0019
+#define mmGRBM_TRAP_OP_BASE_IDX 0
+#define mmGRBM_TRAP_ADDR 0x001a
+#define mmGRBM_TRAP_ADDR_BASE_IDX 0
+#define mmGRBM_TRAP_ADDR_MSK 0x001b
+#define mmGRBM_TRAP_ADDR_MSK_BASE_IDX 0
+#define mmGRBM_TRAP_WD 0x001c
+#define mmGRBM_TRAP_WD_BASE_IDX 0
+#define mmGRBM_TRAP_WD_MSK 0x001d
+#define mmGRBM_TRAP_WD_MSK_BASE_IDX 0
+#define mmGRBM_DSM_BYPASS 0x001e
+#define mmGRBM_DSM_BYPASS_BASE_IDX 0
+#define mmGRBM_WRITE_ERROR 0x001f
+#define mmGRBM_WRITE_ERROR_BASE_IDX 0
+#define mmGRBM_IOV_ERROR 0x0020
+#define mmGRBM_IOV_ERROR_BASE_IDX 0
+#define mmGRBM_CHIP_REVISION 0x0021
+#define mmGRBM_CHIP_REVISION_BASE_IDX 0
+#define mmGRBM_GFX_CNTL 0x0022
+#define mmGRBM_GFX_CNTL_BASE_IDX 0
+#define mmGRBM_RSMU_CFG 0x0023
+#define mmGRBM_RSMU_CFG_BASE_IDX 0
+#define mmGRBM_IH_CREDIT 0x0024
+#define mmGRBM_IH_CREDIT_BASE_IDX 0
+#define mmGRBM_PWR_CNTL2 0x0025
+#define mmGRBM_PWR_CNTL2_BASE_IDX 0
+#define mmGRBM_UTCL2_INVAL_RANGE_START 0x0026
+#define mmGRBM_UTCL2_INVAL_RANGE_START_BASE_IDX 0
+#define mmGRBM_UTCL2_INVAL_RANGE_END 0x0027
+#define mmGRBM_UTCL2_INVAL_RANGE_END_BASE_IDX 0
+#define mmGRBM_RSMU_READ_ERROR 0x0028
+#define mmGRBM_RSMU_READ_ERROR_BASE_IDX 0
+#define mmGRBM_CHICKEN_BITS 0x0029
+#define mmGRBM_CHICKEN_BITS_BASE_IDX 0
+#define mmGRBM_FENCE_RANGE0 0x002a
+#define mmGRBM_FENCE_RANGE0_BASE_IDX 0
+#define mmGRBM_FENCE_RANGE1 0x002b
+#define mmGRBM_FENCE_RANGE1_BASE_IDX 0
+#define mmGRBM_NOWHERE 0x003f
+#define mmGRBM_NOWHERE_BASE_IDX 0
+#define mmGRBM_SCRATCH_REG0 0x0040
+#define mmGRBM_SCRATCH_REG0_BASE_IDX 0
+#define mmGRBM_SCRATCH_REG1 0x0041
+#define mmGRBM_SCRATCH_REG1_BASE_IDX 0
+#define mmGRBM_SCRATCH_REG2 0x0042
+#define mmGRBM_SCRATCH_REG2_BASE_IDX 0
+#define mmGRBM_SCRATCH_REG3 0x0043
+#define mmGRBM_SCRATCH_REG3_BASE_IDX 0
+#define mmGRBM_SCRATCH_REG4 0x0044
+#define mmGRBM_SCRATCH_REG4_BASE_IDX 0
+#define mmGRBM_SCRATCH_REG5 0x0045
+#define mmGRBM_SCRATCH_REG5_BASE_IDX 0
+#define mmGRBM_SCRATCH_REG6 0x0046
+#define mmGRBM_SCRATCH_REG6_BASE_IDX 0
+#define mmGRBM_SCRATCH_REG7 0x0047
+#define mmGRBM_SCRATCH_REG7_BASE_IDX 0
+
+// addressBlock: gc_cppdec2
+// base address: 0xc600
+#define mmCPF_EDC_TAG_CNT 0x1189
+#define mmCPF_EDC_TAG_CNT_BASE_IDX 0
+#define mmCPF_EDC_ROQ_CNT 0x118a
+#define mmCPF_EDC_ROQ_CNT_BASE_IDX 0
+#define mmCPG_EDC_TAG_CNT 0x118b
+#define mmCPG_EDC_TAG_CNT_BASE_IDX 0
+#define mmCPG_EDC_DMA_CNT 0x118d
+#define mmCPG_EDC_DMA_CNT_BASE_IDX 0
+#define mmCPC_EDC_SCRATCH_CNT 0x118e
+#define mmCPC_EDC_SCRATCH_CNT_BASE_IDX 0
+#define mmCPC_EDC_UCODE_CNT 0x118f
+#define mmCPC_EDC_UCODE_CNT_BASE_IDX 0
+#define mmDC_EDC_STATE_CNT 0x1191
+#define mmDC_EDC_STATE_CNT_BASE_IDX 0
+#define mmDC_EDC_CSINVOC_CNT 0x1192
+#define mmDC_EDC_CSINVOC_CNT_BASE_IDX 0
+#define mmDC_EDC_RESTORE_CNT 0x1193
+#define mmDC_EDC_RESTORE_CNT_BASE_IDX 0
+
+// addressBlock: gc_gdsdec
+// base address: 0x9700
+#define mmGDS_EDC_CNT 0x05c5
+#define mmGDS_EDC_CNT_BASE_IDX 0
+#define mmGDS_EDC_GRBM_CNT 0x05c6
+#define mmGDS_EDC_GRBM_CNT_BASE_IDX 0
+#define mmGDS_EDC_OA_DED 0x05c7
+#define mmGDS_EDC_OA_DED_BASE_IDX 0
+#define mmGDS_EDC_OA_PHY_CNT 0x05cb
+#define mmGDS_EDC_OA_PHY_CNT_BASE_IDX 0
+#define mmGDS_EDC_OA_PIPE_CNT 0x05cc
+#define mmGDS_EDC_OA_PIPE_CNT_BASE_IDX 0
+
+// addressBlock: gc_shsdec
+// base address: 0x9000
+#define mmSPI_EDC_CNT 0x0445
+#define mmSPI_EDC_CNT_BASE_IDX 0
+
+// addressBlock: gc_sqdec
+// base address: 0x8c00
+#define mmSQC_EDC_CNT2 0x032c
+#define mmSQC_EDC_CNT2_BASE_IDX 0
+#define mmSQC_EDC_CNT3 0x032d
+#define mmSQC_EDC_CNT3_BASE_IDX 0
+#define mmSQC_EDC_PARITY_CNT3 0x032e
+#define mmSQC_EDC_PARITY_CNT3_BASE_IDX 0
+#define mmSQC_EDC_CNT 0x03a2
+#define mmSQC_EDC_CNT_BASE_IDX 0
+#define mmSQ_EDC_SEC_CNT 0x03a3
+#define mmSQ_EDC_SEC_CNT_BASE_IDX 0
+#define mmSQ_EDC_DED_CNT 0x03a4
+#define mmSQ_EDC_DED_CNT_BASE_IDX 0
+#define mmSQ_EDC_INFO 0x03a5
+#define mmSQ_EDC_INFO_BASE_IDX 0
+#define mmSQ_EDC_CNT 0x03a6
+#define mmSQ_EDC_CNT_BASE_IDX 0
+
+// addressBlock: gc_tpdec
+// base address: 0x9400
+#define mmTA_EDC_CNT 0x0586
+#define mmTA_EDC_CNT_BASE_IDX 0
+
+// addressBlock: gc_tcdec
+// base address: 0xac00
+#define mmTCP_EDC_CNT 0x0b17
+#define mmTCP_EDC_CNT_BASE_IDX 0
+#define mmTCP_EDC_CNT_NEW 0x0b18
+#define mmTCP_EDC_CNT_NEW_BASE_IDX 0
+#define mmTCP_ATC_EDC_GATCL1_CNT 0x12b1
+#define mmTCP_ATC_EDC_GATCL1_CNT_BASE_IDX 0
+#define mmTCI_EDC_CNT 0x0b60
+#define mmTCI_EDC_CNT_BASE_IDX 0
+#define mmTCC_EDC_CNT 0x0b82
+#define mmTCC_EDC_CNT_BASE_IDX 0
+#define mmTCC_EDC_CNT2 0x0b83
+#define mmTCC_EDC_CNT2_BASE_IDX 0
+#define mmTCA_EDC_CNT 0x0bc5
+#define mmTCA_EDC_CNT_BASE_IDX 0
+
+// addressBlock: gc_tpdec
+// base address: 0x9400
+#define mmTD_EDC_CNT 0x052e
+#define mmTD_EDC_CNT_BASE_IDX 0
+#define mmTA_EDC_CNT 0x0586
+#define mmTA_EDC_CNT_BASE_IDX 0
+
+// addressBlock: gc_ea_gceadec2
+// base address: 0x9c00
+#define mmGCEA_EDC_CNT 0x0706
+#define mmGCEA_EDC_CNT_BASE_IDX 0
+#define mmGCEA_EDC_CNT2 0x0707
+#define mmGCEA_EDC_CNT2_BASE_IDX 0
+#define mmGCEA_EDC_CNT3 0x071b
+#define mmGCEA_EDC_CNT3_BASE_IDX 0
+
+// addressBlock: gc_gfxudec
+// base address: 0x30000
+#define mmSCRATCH_REG0 0x2040
+#define mmSCRATCH_REG0_BASE_IDX 1
+#define mmSCRATCH_REG1 0x2041
+#define mmSCRATCH_REG1_BASE_IDX 1
+#define mmSCRATCH_REG2 0x2042
+#define mmSCRATCH_REG2_BASE_IDX 1
+#define mmSCRATCH_REG3 0x2043
+#define mmSCRATCH_REG3_BASE_IDX 1
+#define mmSCRATCH_REG4 0x2044
+#define mmSCRATCH_REG4_BASE_IDX 1
+#define mmSCRATCH_REG5 0x2045
+#define mmSCRATCH_REG5_BASE_IDX 1
+#define mmSCRATCH_REG6 0x2046
+#define mmSCRATCH_REG6_BASE_IDX 1
+#define mmSCRATCH_REG7 0x2047
+#define mmSCRATCH_REG7_BASE_IDX 1
+#define mmGRBM_GFX_INDEX 0x2200
+#define mmGRBM_GFX_INDEX_BASE_IDX 1
+
+// addressBlock: gc_utcl2_atcl2dec
+// base address: 0xa000
+#define mmATC_L2_CACHE_4K_DSM_INDEX 0x080e
+#define mmATC_L2_CACHE_4K_DSM_INDEX_BASE_IDX 0
+#define mmATC_L2_CACHE_2M_DSM_INDEX 0x080f
+#define mmATC_L2_CACHE_2M_DSM_INDEX_BASE_IDX 0
+#define mmATC_L2_CACHE_4K_DSM_CNTL 0x0810
+#define mmATC_L2_CACHE_4K_DSM_CNTL_BASE_IDX 0
+#define mmATC_L2_CACHE_2M_DSM_CNTL 0x0811
+#define mmATC_L2_CACHE_2M_DSM_CNTL_BASE_IDX 0
+
+// addressBlock: gc_utcl2_vml2pfdec
+// base address: 0xa100
+#define mmVML2_MEM_ECC_INDEX 0x0860
+#define mmVML2_MEM_ECC_INDEX_BASE_IDX 0
+#define mmVML2_WALKER_MEM_ECC_INDEX 0x0861
+#define mmVML2_WALKER_MEM_ECC_INDEX_BASE_IDX 0
+#define mmUTCL2_MEM_ECC_INDEX 0x0862
+#define mmUTCL2_MEM_ECC_INDEX_BASE_IDX 0
+
+#define mmVML2_MEM_ECC_CNTL 0x0863
+#define mmVML2_MEM_ECC_CNTL_BASE_IDX 0
+#define mmVML2_WALKER_MEM_ECC_CNTL 0x0864
+#define mmVML2_WALKER_MEM_ECC_CNTL_BASE_IDX 0
+#define mmUTCL2_MEM_ECC_CNTL 0x0865
+#define mmUTCL2_MEM_ECC_CNTL_BASE_IDX 0
+
+// addressBlock: gc_rlcpdec
+// base address: 0x3b000
+#define mmRLC_EDC_CNT 0x4d40
+#define mmRLC_EDC_CNT_BASE_IDX 1
+#define mmRLC_EDC_CNT2 0x4d41
+#define mmRLC_EDC_CNT2_BASE_IDX 1
+
+#endif \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h
new file mode 100644
index 000000000000..f26246a600c6
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h
@@ -0,0 +1,748 @@
+/*
+ * Copyright (C) 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _gc_9_4_1_SH_MASK_HEADER
+#define _gc_9_4_1_SH_MASK_HEADER
+
+// addressBlock: gc_cppdec2
+//CPF_EDC_TAG_CNT
+#define CPF_EDC_TAG_CNT__DED_COUNT__SHIFT 0x0
+#define CPF_EDC_TAG_CNT__SEC_COUNT__SHIFT 0x2
+#define CPF_EDC_TAG_CNT__DED_COUNT_MASK 0x00000003L
+#define CPF_EDC_TAG_CNT__SEC_COUNT_MASK 0x0000000CL
+//CPF_EDC_ROQ_CNT
+#define CPF_EDC_ROQ_CNT__DED_COUNT_ME1__SHIFT 0x0
+#define CPF_EDC_ROQ_CNT__SEC_COUNT_ME1__SHIFT 0x2
+#define CPF_EDC_ROQ_CNT__DED_COUNT_ME2__SHIFT 0x4
+#define CPF_EDC_ROQ_CNT__SEC_COUNT_ME2__SHIFT 0x6
+#define CPF_EDC_ROQ_CNT__DED_COUNT_ME1_MASK 0x00000003L
+#define CPF_EDC_ROQ_CNT__SEC_COUNT_ME1_MASK 0x0000000CL
+#define CPF_EDC_ROQ_CNT__DED_COUNT_ME2_MASK 0x00000030L
+#define CPF_EDC_ROQ_CNT__SEC_COUNT_ME2_MASK 0x000000C0L
+//CPG_EDC_TAG_CNT
+#define CPG_EDC_TAG_CNT__DED_COUNT__SHIFT 0x0
+#define CPG_EDC_TAG_CNT__SEC_COUNT__SHIFT 0x2
+#define CPG_EDC_TAG_CNT__DED_COUNT_MASK 0x00000003L
+#define CPG_EDC_TAG_CNT__SEC_COUNT_MASK 0x0000000CL
+//CPG_EDC_DMA_CNT
+#define CPG_EDC_DMA_CNT__ROQ_DED_COUNT__SHIFT 0x0
+#define CPG_EDC_DMA_CNT__ROQ_SEC_COUNT__SHIFT 0x2
+#define CPG_EDC_DMA_CNT__TAG_DED_COUNT__SHIFT 0x4
+#define CPG_EDC_DMA_CNT__TAG_SEC_COUNT__SHIFT 0x6
+#define CPG_EDC_DMA_CNT__ROQ_DED_COUNT_MASK 0x00000003L
+#define CPG_EDC_DMA_CNT__ROQ_SEC_COUNT_MASK 0x0000000CL
+#define CPG_EDC_DMA_CNT__TAG_DED_COUNT_MASK 0x00000030L
+#define CPG_EDC_DMA_CNT__TAG_SEC_COUNT_MASK 0x000000C0L
+//CPC_EDC_SCRATCH_CNT
+#define CPC_EDC_SCRATCH_CNT__DED_COUNT__SHIFT 0x0
+#define CPC_EDC_SCRATCH_CNT__SEC_COUNT__SHIFT 0x2
+#define CPC_EDC_SCRATCH_CNT__DED_COUNT_MASK 0x00000003L
+#define CPC_EDC_SCRATCH_CNT__SEC_COUNT_MASK 0x0000000CL
+//CPC_EDC_UCODE_CNT
+#define CPC_EDC_UCODE_CNT__DED_COUNT__SHIFT 0x0
+#define CPC_EDC_UCODE_CNT__SEC_COUNT__SHIFT 0x2
+#define CPC_EDC_UCODE_CNT__DED_COUNT_MASK 0x00000003L
+#define CPC_EDC_UCODE_CNT__SEC_COUNT_MASK 0x0000000CL
+//DC_EDC_STATE_CNT
+#define DC_EDC_STATE_CNT__DED_COUNT_ME1__SHIFT 0x0
+#define DC_EDC_STATE_CNT__SEC_COUNT_ME1__SHIFT 0x2
+#define DC_EDC_STATE_CNT__DED_COUNT_ME1_MASK 0x00000003L
+#define DC_EDC_STATE_CNT__SEC_COUNT_ME1_MASK 0x0000000CL
+//DC_EDC_CSINVOC_CNT
+#define DC_EDC_CSINVOC_CNT__DED_COUNT_ME1__SHIFT 0x0
+#define DC_EDC_CSINVOC_CNT__SEC_COUNT_ME1__SHIFT 0x2
+#define DC_EDC_CSINVOC_CNT__DED_COUNT1_ME1__SHIFT 0x4
+#define DC_EDC_CSINVOC_CNT__SEC_COUNT1_ME1__SHIFT 0x6
+#define DC_EDC_CSINVOC_CNT__DED_COUNT_ME1_MASK 0x00000003L
+#define DC_EDC_CSINVOC_CNT__SEC_COUNT_ME1_MASK 0x0000000CL
+#define DC_EDC_CSINVOC_CNT__DED_COUNT1_ME1_MASK 0x00000030L
+#define DC_EDC_CSINVOC_CNT__SEC_COUNT1_ME1_MASK 0x000000C0L
+//DC_EDC_RESTORE_CNT
+#define DC_EDC_RESTORE_CNT__DED_COUNT_ME1__SHIFT 0x0
+#define DC_EDC_RESTORE_CNT__SEC_COUNT_ME1__SHIFT 0x2
+#define DC_EDC_RESTORE_CNT__DED_COUNT1_ME1__SHIFT 0x4
+#define DC_EDC_RESTORE_CNT__SEC_COUNT1_ME1__SHIFT 0x6
+#define DC_EDC_RESTORE_CNT__DED_COUNT_ME1_MASK 0x00000003L
+#define DC_EDC_RESTORE_CNT__SEC_COUNT_ME1_MASK 0x0000000CL
+#define DC_EDC_RESTORE_CNT__DED_COUNT1_ME1_MASK 0x00000030L
+#define DC_EDC_RESTORE_CNT__SEC_COUNT1_ME1_MASK 0x000000C0L
+
+// addressBlock: gc_gdsdec
+//GDS_EDC_CNT
+#define GDS_EDC_CNT__GDS_MEM_DED__SHIFT 0x0
+#define GDS_EDC_CNT__GDS_MEM_SEC__SHIFT 0x4
+#define GDS_EDC_CNT__UNUSED__SHIFT 0x6
+#define GDS_EDC_CNT__GDS_MEM_DED_MASK 0x00000003L
+#define GDS_EDC_CNT__GDS_MEM_SEC_MASK 0x00000030L
+#define GDS_EDC_CNT__UNUSED_MASK 0xFFFFFFC0L
+//GDS_EDC_GRBM_CNT
+#define GDS_EDC_GRBM_CNT__DED__SHIFT 0x0
+#define GDS_EDC_GRBM_CNT__SEC__SHIFT 0x2
+#define GDS_EDC_GRBM_CNT__UNUSED__SHIFT 0x4
+#define GDS_EDC_GRBM_CNT__DED_MASK 0x00000003L
+#define GDS_EDC_GRBM_CNT__SEC_MASK 0x0000000CL
+#define GDS_EDC_GRBM_CNT__UNUSED_MASK 0xFFFFFFF0L
+//GDS_EDC_OA_DED
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_PIX_DED__SHIFT 0x0
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_VTX_DED__SHIFT 0x1
+#define GDS_EDC_OA_DED__ME0_CS_DED__SHIFT 0x2
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_GS_DED__SHIFT 0x3
+#define GDS_EDC_OA_DED__ME1_PIPE0_DED__SHIFT 0x4
+#define GDS_EDC_OA_DED__ME1_PIPE1_DED__SHIFT 0x5
+#define GDS_EDC_OA_DED__ME1_PIPE2_DED__SHIFT 0x6
+#define GDS_EDC_OA_DED__ME1_PIPE3_DED__SHIFT 0x7
+#define GDS_EDC_OA_DED__ME2_PIPE0_DED__SHIFT 0x8
+#define GDS_EDC_OA_DED__ME2_PIPE1_DED__SHIFT 0x9
+#define GDS_EDC_OA_DED__ME2_PIPE2_DED__SHIFT 0xa
+#define GDS_EDC_OA_DED__ME2_PIPE3_DED__SHIFT 0xb
+#define GDS_EDC_OA_DED__UNUSED1__SHIFT 0xc
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_PIX_DED_MASK 0x00000001L
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_VTX_DED_MASK 0x00000002L
+#define GDS_EDC_OA_DED__ME0_CS_DED_MASK 0x00000004L
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_GS_DED_MASK 0x00000008L
+#define GDS_EDC_OA_DED__ME1_PIPE0_DED_MASK 0x00000010L
+#define GDS_EDC_OA_DED__ME1_PIPE1_DED_MASK 0x00000020L
+#define GDS_EDC_OA_DED__ME1_PIPE2_DED_MASK 0x00000040L
+#define GDS_EDC_OA_DED__ME1_PIPE3_DED_MASK 0x00000080L
+#define GDS_EDC_OA_DED__ME2_PIPE0_DED_MASK 0x00000100L
+#define GDS_EDC_OA_DED__ME2_PIPE1_DED_MASK 0x00000200L
+#define GDS_EDC_OA_DED__ME2_PIPE2_DED_MASK 0x00000400L
+#define GDS_EDC_OA_DED__ME2_PIPE3_DED_MASK 0x00000800L
+#define GDS_EDC_OA_DED__UNUSED1_MASK 0xFFFFF000L
+//GDS_EDC_OA_PHY_CNT
+#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_SEC__SHIFT 0x0
+#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_DED__SHIFT 0x2
+#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_SEC__SHIFT 0x4
+#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_DED__SHIFT 0x6
+#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_SEC__SHIFT 0x8
+#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_DED__SHIFT 0xa
+#define GDS_EDC_OA_PHY_CNT__UNUSED1__SHIFT 0xc
+#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_SEC_MASK 0x00000003L
+#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_DED_MASK 0x0000000CL
+#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_SEC_MASK 0x00000030L
+#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_DED_MASK 0x000000C0L
+#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_SEC_MASK 0x00000300L
+#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_DED_MASK 0x00000C00L
+#define GDS_EDC_OA_PHY_CNT__UNUSED1_MASK 0xFFFFF000L
+//GDS_EDC_OA_PIPE_CNT
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_SEC__SHIFT 0x0
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_DED__SHIFT 0x2
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_SEC__SHIFT 0x4
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_DED__SHIFT 0x6
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_SEC__SHIFT 0x8
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_DED__SHIFT 0xa
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_SEC__SHIFT 0xc
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_DED__SHIFT 0xe
+#define GDS_EDC_OA_PIPE_CNT__UNUSED__SHIFT 0x10
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_SEC_MASK 0x00000003L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_DED_MASK 0x0000000CL
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_SEC_MASK 0x00000030L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_DED_MASK 0x000000C0L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_SEC_MASK 0x00000300L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_DED_MASK 0x00000C00L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_SEC_MASK 0x00003000L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_DED_MASK 0x0000C000L
+#define GDS_EDC_OA_PIPE_CNT__UNUSED_MASK 0xFFFF0000L
+
+// addressBlock: gc_shsdec
+//SPI_EDC_CNT
+#define SPI_EDC_CNT__SPI_SR_MEM_SEC_COUNT__SHIFT 0x0
+#define SPI_EDC_CNT__SPI_SR_MEM_DED_COUNT__SHIFT 0x2
+#define SPI_EDC_CNT__SPI_GDS_EXPREQ_SEC_COUNT__SHIFT 0x4
+#define SPI_EDC_CNT__SPI_GDS_EXPREQ_DED_COUNT__SHIFT 0x6
+#define SPI_EDC_CNT__SPI_WB_GRANT_30_SEC_COUNT__SHIFT 0x8
+#define SPI_EDC_CNT__SPI_WB_GRANT_30_DED_COUNT__SHIFT 0xa
+#define SPI_EDC_CNT__SPI_WB_GRANT_61_SEC_COUNT__SHIFT 0xc
+#define SPI_EDC_CNT__SPI_WB_GRANT_61_DED_COUNT__SHIFT 0xe
+#define SPI_EDC_CNT__SPI_LIFE_CNT_SEC_COUNT__SHIFT 0x10
+#define SPI_EDC_CNT__SPI_LIFE_CNT_DED_COUNT__SHIFT 0x12
+#define SPI_EDC_CNT__SPI_SR_MEM_SEC_COUNT_MASK 0x00000003L
+#define SPI_EDC_CNT__SPI_SR_MEM_DED_COUNT_MASK 0x0000000CL
+#define SPI_EDC_CNT__SPI_GDS_EXPREQ_SEC_COUNT_MASK 0x00000030L
+#define SPI_EDC_CNT__SPI_GDS_EXPREQ_DED_COUNT_MASK 0x000000C0L
+#define SPI_EDC_CNT__SPI_WB_GRANT_30_SEC_COUNT_MASK 0x00000300L
+#define SPI_EDC_CNT__SPI_WB_GRANT_30_DED_COUNT_MASK 0x00000C00L
+#define SPI_EDC_CNT__SPI_WB_GRANT_61_SEC_COUNT_MASK 0x00003000L
+#define SPI_EDC_CNT__SPI_WB_GRANT_61_DED_COUNT_MASK 0x0000C000L
+#define SPI_EDC_CNT__SPI_LIFE_CNT_SEC_COUNT_MASK 0x00030000L
+#define SPI_EDC_CNT__SPI_LIFE_CNT_DED_COUNT_MASK 0x000C0000L
+
+// addressBlock: gc_sqdec
+//SQC_EDC_CNT2
+#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_SEC_COUNT__SHIFT 0x0
+#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_DED_COUNT__SHIFT 0x2
+#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_SEC_COUNT__SHIFT 0x4
+#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_DED_COUNT__SHIFT 0x6
+#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_SEC_COUNT__SHIFT 0x8
+#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_DED_COUNT__SHIFT 0xa
+#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_SEC_COUNT__SHIFT 0xc
+#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_DED_COUNT__SHIFT 0xe
+#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x10
+#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_DED_COUNT__SHIFT 0x12
+#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_SEC_COUNT_MASK 0x00000003L
+#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_DED_COUNT_MASK 0x0000000CL
+#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_SEC_COUNT_MASK 0x00000030L
+#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_DED_COUNT_MASK 0x000000C0L
+#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_SEC_COUNT_MASK 0x00000300L
+#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_DED_COUNT_MASK 0x00000C00L
+#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_SEC_COUNT_MASK 0x00003000L
+#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_DED_COUNT_MASK 0x0000C000L
+#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_SEC_COUNT_MASK 0x00030000L
+#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_DED_COUNT_MASK 0x000C0000L
+//SQC_EDC_CNT3
+#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_SEC_COUNT__SHIFT 0x0
+#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_DED_COUNT__SHIFT 0x2
+#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_SEC_COUNT__SHIFT 0x4
+#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_DED_COUNT__SHIFT 0x6
+#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_SEC_COUNT__SHIFT 0x8
+#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_DED_COUNT__SHIFT 0xa
+#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_SEC_COUNT__SHIFT 0xc
+#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_DED_COUNT__SHIFT 0xe
+#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_SEC_COUNT_MASK 0x00000003L
+#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_DED_COUNT_MASK 0x0000000CL
+#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_SEC_COUNT_MASK 0x00000030L
+#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_DED_COUNT_MASK 0x000000C0L
+#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_SEC_COUNT_MASK 0x00000300L
+#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_DED_COUNT_MASK 0x00000C00L
+#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_SEC_COUNT_MASK 0x00003000L
+#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_DED_COUNT_MASK 0x0000C000L
+//SQC_EDC_PARITY_CNT3
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_SEC_COUNT__SHIFT 0x0
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_DED_COUNT__SHIFT 0x2
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_SEC_COUNT__SHIFT 0x4
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_DED_COUNT__SHIFT 0x6
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_SEC_COUNT__SHIFT 0x8
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_DED_COUNT__SHIFT 0xa
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_SEC_COUNT__SHIFT 0xc
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_DED_COUNT__SHIFT 0xe
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_SEC_COUNT__SHIFT 0x10
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_DED_COUNT__SHIFT 0x12
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_SEC_COUNT__SHIFT 0x14
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_DED_COUNT__SHIFT 0x16
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_SEC_COUNT__SHIFT 0x18
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_DED_COUNT__SHIFT 0x1a
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_SEC_COUNT__SHIFT 0x1c
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_DED_COUNT__SHIFT 0x1e
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_SEC_COUNT_MASK 0x00000003L
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_DED_COUNT_MASK 0x0000000CL
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_SEC_COUNT_MASK 0x00000030L
+#define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_DED_COUNT_MASK 0x000000C0L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_SEC_COUNT_MASK 0x00000300L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_DED_COUNT_MASK 0x00000C00L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_SEC_COUNT_MASK 0x00003000L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_DED_COUNT_MASK 0x0000C000L
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_SEC_COUNT_MASK 0x00030000L
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_DED_COUNT_MASK 0x000C0000L
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_SEC_COUNT_MASK 0x00300000L
+#define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_DED_COUNT_MASK 0x00C00000L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_SEC_COUNT_MASK 0x03000000L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_DED_COUNT_MASK 0x0C000000L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_SEC_COUNT_MASK 0x30000000L
+#define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_DED_COUNT_MASK 0xC0000000L
+//SQC_EDC_CNT
+#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x0
+#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_DED_COUNT__SHIFT 0x2
+#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x4
+#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_DED_COUNT__SHIFT 0x6
+#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x8
+#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_DED_COUNT__SHIFT 0xa
+#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_SEC_COUNT__SHIFT 0xc
+#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_DED_COUNT__SHIFT 0xe
+#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x10
+#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_DED_COUNT__SHIFT 0x12
+#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x14
+#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_DED_COUNT__SHIFT 0x16
+#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x18
+#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_DED_COUNT__SHIFT 0x1a
+#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x1c
+#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_DED_COUNT__SHIFT 0x1e
+#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_SEC_COUNT_MASK 0x00000003L
+#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_DED_COUNT_MASK 0x0000000CL
+#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_SEC_COUNT_MASK 0x00000030L
+#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_DED_COUNT_MASK 0x000000C0L
+#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_SEC_COUNT_MASK 0x00000300L
+#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_DED_COUNT_MASK 0x00000C00L
+#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_SEC_COUNT_MASK 0x00003000L
+#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_DED_COUNT_MASK 0x0000C000L
+#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_SEC_COUNT_MASK 0x00030000L
+#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_DED_COUNT_MASK 0x000C0000L
+#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_SEC_COUNT_MASK 0x00300000L
+#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_DED_COUNT_MASK 0x00C00000L
+#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_SEC_COUNT_MASK 0x03000000L
+#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_DED_COUNT_MASK 0x0C000000L
+#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_SEC_COUNT_MASK 0x30000000L
+#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_DED_COUNT_MASK 0xC0000000L
+//SQ_EDC_SEC_CNT
+#define SQ_EDC_SEC_CNT__LDS_SEC__SHIFT 0x0
+#define SQ_EDC_SEC_CNT__SGPR_SEC__SHIFT 0x8
+#define SQ_EDC_SEC_CNT__VGPR_SEC__SHIFT 0x10
+#define SQ_EDC_SEC_CNT__LDS_SEC_MASK 0x000000FFL
+#define SQ_EDC_SEC_CNT__SGPR_SEC_MASK 0x0000FF00L
+#define SQ_EDC_SEC_CNT__VGPR_SEC_MASK 0x00FF0000L
+//SQ_EDC_DED_CNT
+#define SQ_EDC_DED_CNT__LDS_DED__SHIFT 0x0
+#define SQ_EDC_DED_CNT__SGPR_DED__SHIFT 0x8
+#define SQ_EDC_DED_CNT__VGPR_DED__SHIFT 0x10
+#define SQ_EDC_DED_CNT__LDS_DED_MASK 0x000000FFL
+#define SQ_EDC_DED_CNT__SGPR_DED_MASK 0x0000FF00L
+#define SQ_EDC_DED_CNT__VGPR_DED_MASK 0x00FF0000L
+//SQ_EDC_INFO
+#define SQ_EDC_INFO__WAVE_ID__SHIFT 0x0
+#define SQ_EDC_INFO__SIMD_ID__SHIFT 0x4
+#define SQ_EDC_INFO__SOURCE__SHIFT 0x6
+#define SQ_EDC_INFO__VM_ID__SHIFT 0x9
+#define SQ_EDC_INFO__WAVE_ID_MASK 0x0000000FL
+#define SQ_EDC_INFO__SIMD_ID_MASK 0x00000030L
+#define SQ_EDC_INFO__SOURCE_MASK 0x000001C0L
+#define SQ_EDC_INFO__VM_ID_MASK 0x00001E00L
+//SQ_EDC_CNT
+#define SQ_EDC_CNT__LDS_D_SEC_COUNT__SHIFT 0x0
+#define SQ_EDC_CNT__LDS_D_DED_COUNT__SHIFT 0x2
+#define SQ_EDC_CNT__LDS_I_SEC_COUNT__SHIFT 0x4
+#define SQ_EDC_CNT__LDS_I_DED_COUNT__SHIFT 0x6
+#define SQ_EDC_CNT__SGPR_SEC_COUNT__SHIFT 0x8
+#define SQ_EDC_CNT__SGPR_DED_COUNT__SHIFT 0xa
+#define SQ_EDC_CNT__VGPR0_SEC_COUNT__SHIFT 0xc
+#define SQ_EDC_CNT__VGPR0_DED_COUNT__SHIFT 0xe
+#define SQ_EDC_CNT__VGPR1_SEC_COUNT__SHIFT 0x10
+#define SQ_EDC_CNT__VGPR1_DED_COUNT__SHIFT 0x12
+#define SQ_EDC_CNT__VGPR2_SEC_COUNT__SHIFT 0x14
+#define SQ_EDC_CNT__VGPR2_DED_COUNT__SHIFT 0x16
+#define SQ_EDC_CNT__VGPR3_SEC_COUNT__SHIFT 0x18
+#define SQ_EDC_CNT__VGPR3_DED_COUNT__SHIFT 0x1a
+#define SQ_EDC_CNT__LDS_D_SEC_COUNT_MASK 0x00000003L
+#define SQ_EDC_CNT__LDS_D_DED_COUNT_MASK 0x0000000CL
+#define SQ_EDC_CNT__LDS_I_SEC_COUNT_MASK 0x00000030L
+#define SQ_EDC_CNT__LDS_I_DED_COUNT_MASK 0x000000C0L
+#define SQ_EDC_CNT__SGPR_SEC_COUNT_MASK 0x00000300L
+#define SQ_EDC_CNT__SGPR_DED_COUNT_MASK 0x00000C00L
+#define SQ_EDC_CNT__VGPR0_SEC_COUNT_MASK 0x00003000L
+#define SQ_EDC_CNT__VGPR0_DED_COUNT_MASK 0x0000C000L
+#define SQ_EDC_CNT__VGPR1_SEC_COUNT_MASK 0x00030000L
+#define SQ_EDC_CNT__VGPR1_DED_COUNT_MASK 0x000C0000L
+#define SQ_EDC_CNT__VGPR2_SEC_COUNT_MASK 0x00300000L
+#define SQ_EDC_CNT__VGPR2_DED_COUNT_MASK 0x00C00000L
+#define SQ_EDC_CNT__VGPR3_SEC_COUNT_MASK 0x03000000L
+#define SQ_EDC_CNT__VGPR3_DED_COUNT_MASK 0x0C000000L
+
+// addressBlock: gc_tpdec
+//TA_EDC_CNT
+#define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT__SHIFT 0x0
+#define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT__SHIFT 0x2
+#define TA_EDC_CNT__TA_FS_AFIFO_SEC_COUNT__SHIFT 0x4
+#define TA_EDC_CNT__TA_FS_AFIFO_DED_COUNT__SHIFT 0x6
+#define TA_EDC_CNT__TA_FL_LFIFO_SEC_COUNT__SHIFT 0x8
+#define TA_EDC_CNT__TA_FL_LFIFO_DED_COUNT__SHIFT 0xa
+#define TA_EDC_CNT__TA_FX_LFIFO_SEC_COUNT__SHIFT 0xc
+#define TA_EDC_CNT__TA_FX_LFIFO_DED_COUNT__SHIFT 0xe
+#define TA_EDC_CNT__TA_FS_CFIFO_SEC_COUNT__SHIFT 0x10
+#define TA_EDC_CNT__TA_FS_CFIFO_DED_COUNT__SHIFT 0x12
+#define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT_MASK 0x00000003L
+#define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT_MASK 0x0000000CL
+#define TA_EDC_CNT__TA_FS_AFIFO_SEC_COUNT_MASK 0x00000030L
+#define TA_EDC_CNT__TA_FS_AFIFO_DED_COUNT_MASK 0x000000C0L
+#define TA_EDC_CNT__TA_FL_LFIFO_SEC_COUNT_MASK 0x00000300L
+#define TA_EDC_CNT__TA_FL_LFIFO_DED_COUNT_MASK 0x00000C00L
+#define TA_EDC_CNT__TA_FX_LFIFO_SEC_COUNT_MASK 0x00003000L
+#define TA_EDC_CNT__TA_FX_LFIFO_DED_COUNT_MASK 0x0000C000L
+#define TA_EDC_CNT__TA_FS_CFIFO_SEC_COUNT_MASK 0x00030000L
+#define TA_EDC_CNT__TA_FS_CFIFO_DED_COUNT_MASK 0x000C0000L
+
+// addressBlock: gc_tcdec
+//TCP_EDC_CNT
+#define TCP_EDC_CNT__SEC_COUNT__SHIFT 0x0
+#define TCP_EDC_CNT__LFIFO_SED_COUNT__SHIFT 0x8
+#define TCP_EDC_CNT__DED_COUNT__SHIFT 0x10
+#define TCP_EDC_CNT__SEC_COUNT_MASK 0x000000FFL
+#define TCP_EDC_CNT__LFIFO_SED_COUNT_MASK 0x0000FF00L
+#define TCP_EDC_CNT__DED_COUNT_MASK 0x00FF0000L
+//TCP_EDC_CNT_NEW
+#define TCP_EDC_CNT_NEW__CACHE_RAM_SEC_COUNT__SHIFT 0x0
+#define TCP_EDC_CNT_NEW__CACHE_RAM_DED_COUNT__SHIFT 0x2
+#define TCP_EDC_CNT_NEW__LFIFO_RAM_SEC_COUNT__SHIFT 0x4
+#define TCP_EDC_CNT_NEW__LFIFO_RAM_DED_COUNT__SHIFT 0x6
+#define TCP_EDC_CNT_NEW__CMD_FIFO_SEC_COUNT__SHIFT 0x8
+#define TCP_EDC_CNT_NEW__CMD_FIFO_DED_COUNT__SHIFT 0xa
+#define TCP_EDC_CNT_NEW__VM_FIFO_SEC_COUNT__SHIFT 0xc
+#define TCP_EDC_CNT_NEW__VM_FIFO_DED_COUNT__SHIFT 0xe
+#define TCP_EDC_CNT_NEW__DB_RAM_SED_COUNT__SHIFT 0x10
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_SEC_COUNT__SHIFT 0x12
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_DED_COUNT__SHIFT 0x14
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_SEC_COUNT__SHIFT 0x16
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_DED_COUNT__SHIFT 0x18
+#define TCP_EDC_CNT_NEW__CACHE_RAM_SEC_COUNT_MASK 0x00000003L
+#define TCP_EDC_CNT_NEW__CACHE_RAM_DED_COUNT_MASK 0x0000000CL
+#define TCP_EDC_CNT_NEW__LFIFO_RAM_SEC_COUNT_MASK 0x00000030L
+#define TCP_EDC_CNT_NEW__LFIFO_RAM_DED_COUNT_MASK 0x000000C0L
+#define TCP_EDC_CNT_NEW__CMD_FIFO_SEC_COUNT_MASK 0x00000300L
+#define TCP_EDC_CNT_NEW__CMD_FIFO_DED_COUNT_MASK 0x00000C00L
+#define TCP_EDC_CNT_NEW__VM_FIFO_SEC_COUNT_MASK 0x00003000L
+#define TCP_EDC_CNT_NEW__VM_FIFO_DED_COUNT_MASK 0x0000C000L
+#define TCP_EDC_CNT_NEW__DB_RAM_SED_COUNT_MASK 0x00030000L
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_SEC_COUNT_MASK 0x000C0000L
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_DED_COUNT_MASK 0x00300000L
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_SEC_COUNT_MASK 0x00C00000L
+#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_DED_COUNT_MASK 0x03000000L
+//TCP_ATC_EDC_GATCL1_CNT
+#define TCP_ATC_EDC_GATCL1_CNT__DATA_SEC__SHIFT 0x0
+#define TCP_ATC_EDC_GATCL1_CNT__DATA_SEC_MASK 0x000000FFL
+//TCI_EDC_CNT
+#define TCI_EDC_CNT__WRITE_RAM_SEC_COUNT__SHIFT 0x0
+#define TCI_EDC_CNT__WRITE_RAM_DED_COUNT__SHIFT 0x2
+#define TCI_EDC_CNT__WRITE_RAM_SEC_COUNT_MASK 0x00000003L
+#define TCI_EDC_CNT__WRITE_RAM_DED_COUNT_MASK 0x0000000CL
+//TCA_EDC_CNT
+#define TCA_EDC_CNT__HOLE_FIFO_SEC_COUNT__SHIFT 0x0
+#define TCA_EDC_CNT__HOLE_FIFO_DED_COUNT__SHIFT 0x2
+#define TCA_EDC_CNT__REQ_FIFO_SEC_COUNT__SHIFT 0x4
+#define TCA_EDC_CNT__REQ_FIFO_DED_COUNT__SHIFT 0x6
+#define TCA_EDC_CNT__HOLE_FIFO_SEC_COUNT_MASK 0x00000003L
+#define TCA_EDC_CNT__HOLE_FIFO_DED_COUNT_MASK 0x0000000CL
+#define TCA_EDC_CNT__REQ_FIFO_SEC_COUNT_MASK 0x00000030L
+#define TCA_EDC_CNT__REQ_FIFO_DED_COUNT_MASK 0x000000C0L
+//TCC_EDC_CNT
+#define TCC_EDC_CNT__CACHE_DATA_SEC_COUNT__SHIFT 0x0
+#define TCC_EDC_CNT__CACHE_DATA_DED_COUNT__SHIFT 0x2
+#define TCC_EDC_CNT__CACHE_DIRTY_SEC_COUNT__SHIFT 0x4
+#define TCC_EDC_CNT__CACHE_DIRTY_DED_COUNT__SHIFT 0x6
+#define TCC_EDC_CNT__HIGH_RATE_TAG_SEC_COUNT__SHIFT 0x8
+#define TCC_EDC_CNT__HIGH_RATE_TAG_DED_COUNT__SHIFT 0xa
+#define TCC_EDC_CNT__LOW_RATE_TAG_SEC_COUNT__SHIFT 0xc
+#define TCC_EDC_CNT__LOW_RATE_TAG_DED_COUNT__SHIFT 0xe
+#define TCC_EDC_CNT__SRC_FIFO_SEC_COUNT__SHIFT 0x10
+#define TCC_EDC_CNT__SRC_FIFO_DED_COUNT__SHIFT 0x12
+#define TCC_EDC_CNT__LATENCY_FIFO_SEC_COUNT__SHIFT 0x14
+#define TCC_EDC_CNT__LATENCY_FIFO_DED_COUNT__SHIFT 0x16
+#define TCC_EDC_CNT__LATENCY_FIFO_NEXT_RAM_SEC_COUNT__SHIFT 0x18
+#define TCC_EDC_CNT__LATENCY_FIFO_NEXT_RAM_DED_COUNT__SHIFT 0x1a
+#define TCC_EDC_CNT__CACHE_DATA_SEC_COUNT_MASK 0x00000003L
+#define TCC_EDC_CNT__CACHE_DATA_DED_COUNT_MASK 0x0000000CL
+#define TCC_EDC_CNT__CACHE_DIRTY_SEC_COUNT_MASK 0x00000030L
+#define TCC_EDC_CNT__CACHE_DIRTY_DED_COUNT_MASK 0x000000C0L
+#define TCC_EDC_CNT__HIGH_RATE_TAG_SEC_COUNT_MASK 0x00000300L
+#define TCC_EDC_CNT__HIGH_RATE_TAG_DED_COUNT_MASK 0x00000C00L
+#define TCC_EDC_CNT__LOW_RATE_TAG_SEC_COUNT_MASK 0x00003000L
+#define TCC_EDC_CNT__LOW_RATE_TAG_DED_COUNT_MASK 0x0000C000L
+#define TCC_EDC_CNT__SRC_FIFO_SEC_COUNT_MASK 0x00030000L
+#define TCC_EDC_CNT__SRC_FIFO_DED_COUNT_MASK 0x000C0000L
+#define TCC_EDC_CNT__LATENCY_FIFO_SEC_COUNT_MASK 0x00300000L
+#define TCC_EDC_CNT__LATENCY_FIFO_DED_COUNT_MASK 0x00C00000L
+#define TCC_EDC_CNT__LATENCY_FIFO_NEXT_RAM_SEC_COUNT_MASK 0x03000000L
+#define TCC_EDC_CNT__LATENCY_FIFO_NEXT_RAM_DED_COUNT_MASK 0x0C000000L
+//TCC_EDC_CNT2
+#define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_SEC_COUNT__SHIFT 0x0
+#define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_DED_COUNT__SHIFT 0x2
+#define TCC_EDC_CNT2__UC_ATOMIC_FIFO_SEC_COUNT__SHIFT 0x4
+#define TCC_EDC_CNT2__UC_ATOMIC_FIFO_DED_COUNT__SHIFT 0x6
+#define TCC_EDC_CNT2__WRITE_CACHE_READ_SEC_COUNT__SHIFT 0x8
+#define TCC_EDC_CNT2__WRITE_CACHE_READ_DED_COUNT__SHIFT 0xa
+#define TCC_EDC_CNT2__RETURN_CONTROL_SEC_COUNT__SHIFT 0xc
+#define TCC_EDC_CNT2__RETURN_CONTROL_DED_COUNT__SHIFT 0xe
+#define TCC_EDC_CNT2__IN_USE_TRANSFER_SEC_COUNT__SHIFT 0x10
+#define TCC_EDC_CNT2__IN_USE_TRANSFER_DED_COUNT__SHIFT 0x12
+#define TCC_EDC_CNT2__IN_USE_DEC_SEC_COUNT__SHIFT 0x14
+#define TCC_EDC_CNT2__IN_USE_DEC_DED_COUNT__SHIFT 0x16
+#define TCC_EDC_CNT2__WRITE_RETURN_SEC_COUNT__SHIFT 0x18
+#define TCC_EDC_CNT2__WRITE_RETURN_DED_COUNT__SHIFT 0x1a
+#define TCC_EDC_CNT2__RETURN_DATA_SEC_COUNT__SHIFT 0x1c
+#define TCC_EDC_CNT2__RETURN_DATA_DED_COUNT__SHIFT 0x1e
+#define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_SEC_COUNT_MASK 0x00000003L
+#define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_DED_COUNT_MASK 0x0000000CL
+#define TCC_EDC_CNT2__UC_ATOMIC_FIFO_SEC_COUNT_MASK 0x00000030L
+#define TCC_EDC_CNT2__UC_ATOMIC_FIFO_DED_COUNT_MASK 0x000000C0L
+#define TCC_EDC_CNT2__WRITE_CACHE_READ_SEC_COUNT_MASK 0x00000300L
+#define TCC_EDC_CNT2__WRITE_CACHE_READ_DED_COUNT_MASK 0x00000C00L
+#define TCC_EDC_CNT2__RETURN_CONTROL_SEC_COUNT_MASK 0x00003000L
+#define TCC_EDC_CNT2__RETURN_CONTROL_DED_COUNT_MASK 0x0000C000L
+#define TCC_EDC_CNT2__IN_USE_TRANSFER_SEC_COUNT_MASK 0x00030000L
+#define TCC_EDC_CNT2__IN_USE_TRANSFER_DED_COUNT_MASK 0x000C0000L
+#define TCC_EDC_CNT2__IN_USE_DEC_SEC_COUNT_MASK 0x00300000L
+#define TCC_EDC_CNT2__IN_USE_DEC_DED_COUNT_MASK 0x00C00000L
+#define TCC_EDC_CNT2__WRITE_RETURN_SEC_COUNT_MASK 0x03000000L
+#define TCC_EDC_CNT2__WRITE_RETURN_DED_COUNT_MASK 0x0C000000L
+#define TCC_EDC_CNT2__RETURN_DATA_SEC_COUNT_MASK 0x30000000L
+#define TCC_EDC_CNT2__RETURN_DATA_DED_COUNT_MASK 0xC0000000L
+
+// addressBlock: gc_tpdec
+//TD_EDC_CNT
+#define TD_EDC_CNT__SS_FIFO_LO_SEC_COUNT__SHIFT 0x0
+#define TD_EDC_CNT__SS_FIFO_LO_DED_COUNT__SHIFT 0x2
+#define TD_EDC_CNT__SS_FIFO_HI_SEC_COUNT__SHIFT 0x4
+#define TD_EDC_CNT__SS_FIFO_HI_DED_COUNT__SHIFT 0x6
+#define TD_EDC_CNT__CS_FIFO_SEC_COUNT__SHIFT 0x8
+#define TD_EDC_CNT__CS_FIFO_DED_COUNT__SHIFT 0xa
+#define TD_EDC_CNT__SS_FIFO_LO_SEC_COUNT_MASK 0x00000003L
+#define TD_EDC_CNT__SS_FIFO_LO_DED_COUNT_MASK 0x0000000CL
+#define TD_EDC_CNT__SS_FIFO_HI_SEC_COUNT_MASK 0x00000030L
+#define TD_EDC_CNT__SS_FIFO_HI_DED_COUNT_MASK 0x000000C0L
+#define TD_EDC_CNT__CS_FIFO_SEC_COUNT_MASK 0x00000300L
+#define TD_EDC_CNT__CS_FIFO_DED_COUNT_MASK 0x00000C00L
+//TA_EDC_CNT
+#define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT__SHIFT 0x0
+#define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT__SHIFT 0x2
+#define TA_EDC_CNT__TA_FS_AFIFO_SEC_COUNT__SHIFT 0x4
+#define TA_EDC_CNT__TA_FS_AFIFO_DED_COUNT__SHIFT 0x6
+#define TA_EDC_CNT__TA_FL_LFIFO_SEC_COUNT__SHIFT 0x8
+#define TA_EDC_CNT__TA_FL_LFIFO_DED_COUNT__SHIFT 0xa
+#define TA_EDC_CNT__TA_FX_LFIFO_SEC_COUNT__SHIFT 0xc
+#define TA_EDC_CNT__TA_FX_LFIFO_DED_COUNT__SHIFT 0xe
+#define TA_EDC_CNT__TA_FS_CFIFO_SEC_COUNT__SHIFT 0x10
+#define TA_EDC_CNT__TA_FS_CFIFO_DED_COUNT__SHIFT 0x12
+#define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT_MASK 0x00000003L
+#define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT_MASK 0x0000000CL
+#define TA_EDC_CNT__TA_FS_AFIFO_SEC_COUNT_MASK 0x00000030L
+#define TA_EDC_CNT__TA_FS_AFIFO_DED_COUNT_MASK 0x000000C0L
+#define TA_EDC_CNT__TA_FL_LFIFO_SEC_COUNT_MASK 0x00000300L
+#define TA_EDC_CNT__TA_FL_LFIFO_DED_COUNT_MASK 0x00000C00L
+#define TA_EDC_CNT__TA_FX_LFIFO_SEC_COUNT_MASK 0x00003000L
+#define TA_EDC_CNT__TA_FX_LFIFO_DED_COUNT_MASK 0x0000C000L
+#define TA_EDC_CNT__TA_FS_CFIFO_SEC_COUNT_MASK 0x00030000L
+#define TA_EDC_CNT__TA_FS_CFIFO_DED_COUNT_MASK 0x000C0000L
+
+// addressBlock: gc_ea_gceadec2
+//GCEA_EDC_CNT
+#define GCEA_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define GCEA_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define GCEA_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define GCEA_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define GCEA_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define GCEA_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define GCEA_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc
+#define GCEA_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe
+#define GCEA_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10
+#define GCEA_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12
+#define GCEA_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14
+#define GCEA_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16
+#define GCEA_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x18
+#define GCEA_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a
+#define GCEA_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c
+#define GCEA_EDC_CNT__MAM_AFMEM_SEC_COUNT__SHIFT 0x1e
+#define GCEA_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define GCEA_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define GCEA_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define GCEA_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define GCEA_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define GCEA_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define GCEA_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L
+#define GCEA_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L
+#define GCEA_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L
+#define GCEA_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L
+#define GCEA_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L
+#define GCEA_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L
+#define GCEA_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L
+#define GCEA_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L
+#define GCEA_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L
+#define GCEA_EDC_CNT__MAM_AFMEM_SEC_COUNT_MASK 0xC0000000L
+//GCEA_EDC_CNT2
+#define GCEA_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define GCEA_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define GCEA_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define GCEA_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define GCEA_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define GCEA_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define GCEA_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc
+#define GCEA_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe
+#define GCEA_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10
+#define GCEA_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12
+#define GCEA_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14
+#define GCEA_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16
+#define GCEA_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18
+#define GCEA_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a
+#define GCEA_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c
+#define GCEA_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e
+#define GCEA_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define GCEA_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define GCEA_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define GCEA_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define GCEA_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define GCEA_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define GCEA_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L
+#define GCEA_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L
+#define GCEA_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L
+#define GCEA_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L
+#define GCEA_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L
+#define GCEA_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L
+#define GCEA_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L
+#define GCEA_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L
+#define GCEA_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L
+#define GCEA_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L
+//GCEA_EDC_CNT3
+#define GCEA_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT__SHIFT 0x0
+#define GCEA_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT__SHIFT 0x2
+#define GCEA_EDC_CNT3__IORD_CMDMEM_DED_COUNT__SHIFT 0x4
+#define GCEA_EDC_CNT3__IOWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define GCEA_EDC_CNT3__IOWR_DATAMEM_DED_COUNT__SHIFT 0x8
+#define GCEA_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT__SHIFT 0xa
+#define GCEA_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT__SHIFT 0xc
+#define GCEA_EDC_CNT3__MAM_AFMEM_DED_COUNT__SHIFT 0xe
+#define GCEA_EDC_CNT3__MAM_A0MEM_SEC_COUNT__SHIFT 0x10
+#define GCEA_EDC_CNT3__MAM_A0MEM_DED_COUNT__SHIFT 0x12
+#define GCEA_EDC_CNT3__MAM_A1MEM_SEC_COUNT__SHIFT 0x14
+#define GCEA_EDC_CNT3__MAM_A1MEM_DED_COUNT__SHIFT 0x16
+#define GCEA_EDC_CNT3__MAM_A2MEM_SEC_COUNT__SHIFT 0x18
+#define GCEA_EDC_CNT3__MAM_A2MEM_DED_COUNT__SHIFT 0x1a
+#define GCEA_EDC_CNT3__MAM_A3MEM_SEC_COUNT__SHIFT 0x1c
+#define GCEA_EDC_CNT3__MAM_A3MEM_DED_COUNT__SHIFT 0x1e
+#define GCEA_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT_MASK 0x00000003L
+#define GCEA_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT_MASK 0x0000000CL
+#define GCEA_EDC_CNT3__IORD_CMDMEM_DED_COUNT_MASK 0x00000030L
+#define GCEA_EDC_CNT3__IOWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define GCEA_EDC_CNT3__IOWR_DATAMEM_DED_COUNT_MASK 0x00000300L
+#define GCEA_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT_MASK 0x00000C00L
+#define GCEA_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT_MASK 0x00003000L
+#define GCEA_EDC_CNT3__MAM_AFMEM_DED_COUNT_MASK 0x0000C000L
+#define GCEA_EDC_CNT3__MAM_A0MEM_SEC_COUNT_MASK 0x00030000L
+#define GCEA_EDC_CNT3__MAM_A0MEM_DED_COUNT_MASK 0x000C0000L
+#define GCEA_EDC_CNT3__MAM_A1MEM_SEC_COUNT_MASK 0x00300000L
+#define GCEA_EDC_CNT3__MAM_A1MEM_DED_COUNT_MASK 0x00C00000L
+#define GCEA_EDC_CNT3__MAM_A2MEM_SEC_COUNT_MASK 0x03000000L
+#define GCEA_EDC_CNT3__MAM_A2MEM_DED_COUNT_MASK 0x0C000000L
+#define GCEA_EDC_CNT3__MAM_A3MEM_SEC_COUNT_MASK 0x30000000L
+#define GCEA_EDC_CNT3__MAM_A3MEM_DED_COUNT_MASK 0xC0000000L
+
+// addressBlock: gc_gfxudec
+//GRBM_GFX_INDEX
+#define GRBM_GFX_INDEX__INSTANCE_INDEX__SHIFT 0x0
+#define GRBM_GFX_INDEX__SH_INDEX__SHIFT 0x8
+#define GRBM_GFX_INDEX__SE_INDEX__SHIFT 0x10
+#define GRBM_GFX_INDEX__SH_BROADCAST_WRITES__SHIFT 0x1d
+#define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES__SHIFT 0x1e
+#define GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT 0x1f
+#define GRBM_GFX_INDEX__INSTANCE_INDEX_MASK 0x000000FFL
+#define GRBM_GFX_INDEX__SH_INDEX_MASK 0x0000FF00L
+#define GRBM_GFX_INDEX__SE_INDEX_MASK 0x00FF0000L
+#define GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK 0x20000000L
+#define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK 0x40000000L
+#define GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK 0x80000000L
+
+// addressBlock: gc_utcl2_atcl2dec
+//ATC_L2_CNTL
+//ATC_L2_CACHE_4K_DSM_INDEX
+#define ATC_L2_CACHE_4K_DSM_INDEX__INDEX__SHIFT 0x0
+#define ATC_L2_CACHE_4K_DSM_INDEX__INDEX_MASK 0x000000FFL
+//ATC_L2_CACHE_2M_DSM_INDEX
+#define ATC_L2_CACHE_2M_DSM_INDEX__INDEX__SHIFT 0x0
+#define ATC_L2_CACHE_2M_DSM_INDEX__INDEX_MASK 0x000000FFL
+//ATC_L2_CACHE_4K_DSM_CNTL
+#define ATC_L2_CACHE_4K_DSM_CNTL__SEC_COUNT__SHIFT 0xd
+#define ATC_L2_CACHE_4K_DSM_CNTL__DED_COUNT__SHIFT 0xf
+#define ATC_L2_CACHE_4K_DSM_CNTL__SEC_COUNT_MASK 0x00006000L
+#define ATC_L2_CACHE_4K_DSM_CNTL__DED_COUNT_MASK 0x00018000L
+//ATC_L2_CACHE_2M_DSM_CNTL
+#define ATC_L2_CACHE_2M_DSM_CNTL__SEC_COUNT__SHIFT 0xd
+#define ATC_L2_CACHE_2M_DSM_CNTL__DED_COUNT__SHIFT 0xf
+#define ATC_L2_CACHE_2M_DSM_CNTL__SEC_COUNT_MASK 0x00006000L
+#define ATC_L2_CACHE_2M_DSM_CNTL__DED_COUNT_MASK 0x00018000L
+
+// addressBlock: gc_utcl2_vml2pfdec
+//VML2_MEM_ECC_INDEX
+#define VML2_MEM_ECC_INDEX__INDEX__SHIFT 0x0
+#define VML2_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL
+//VML2_WALKER_MEM_ECC_INDEX
+#define VML2_WALKER_MEM_ECC_INDEX__INDEX__SHIFT 0x0
+#define VML2_WALKER_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL
+//UTCL2_MEM_ECC_INDEX
+#define UTCL2_MEM_ECC_INDEX__INDEX__SHIFT 0x0
+#define UTCL2_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL
+//VML2_MEM_ECC_CNTL
+#define VML2_MEM_ECC_CNTL__SEC_COUNT__SHIFT 0xc
+#define VML2_MEM_ECC_CNTL__DED_COUNT__SHIFT 0xe
+#define VML2_MEM_ECC_CNTL__SEC_COUNT_MASK 0x00003000L
+#define VML2_MEM_ECC_CNTL__DED_COUNT_MASK 0x0000C000L
+//VML2_WALKER_MEM_ECC_CNTL
+#define VML2_WALKER_MEM_ECC_CNTL__SEC_COUNT__SHIFT 0xc
+#define VML2_WALKER_MEM_ECC_CNTL__DED_COUNT__SHIFT 0xe
+#define VML2_WALKER_MEM_ECC_CNTL__SEC_COUNT_MASK 0x00003000L
+#define VML2_WALKER_MEM_ECC_CNTL__DED_COUNT_MASK 0x0000C000L
+//UTCL2_MEM_ECC_CNTL
+#define UTCL2_MEM_ECC_CNTL__SEC_COUNT__SHIFT 0xc
+#define UTCL2_MEM_ECC_CNTL__DED_COUNT__SHIFT 0xe
+#define UTCL2_MEM_ECC_CNTL__SEC_COUNT_MASK 0x00003000L
+#define UTCL2_MEM_ECC_CNTL__DED_COUNT_MASK 0x0000C000L
+
+// addressBlock: gc_rlcpdec
+//RLC_EDC_CNT
+#define RLC_EDC_CNT__RLCG_INSTR_RAM_SEC_COUNT__SHIFT 0x0
+#define RLC_EDC_CNT__RLCG_INSTR_RAM_DED_COUNT__SHIFT 0x2
+#define RLC_EDC_CNT__RLCG_SCRATCH_RAM_SEC_COUNT__SHIFT 0x4
+#define RLC_EDC_CNT__RLCG_SCRATCH_RAM_DED_COUNT__SHIFT 0x6
+#define RLC_EDC_CNT__RLCV_INSTR_RAM_SEC_COUNT__SHIFT 0x8
+#define RLC_EDC_CNT__RLCV_INSTR_RAM_DED_COUNT__SHIFT 0xa
+#define RLC_EDC_CNT__RLCV_SCRATCH_RAM_SEC_COUNT__SHIFT 0xc
+#define RLC_EDC_CNT__RLCV_SCRATCH_RAM_DED_COUNT__SHIFT 0xe
+#define RLC_EDC_CNT__RLC_TCTAG_RAM_SEC_COUNT__SHIFT 0x10
+#define RLC_EDC_CNT__RLC_TCTAG_RAM_DED_COUNT__SHIFT 0x12
+#define RLC_EDC_CNT__RLC_SPM_SCRATCH_RAM_SEC_COUNT__SHIFT 0x14
+#define RLC_EDC_CNT__RLC_SPM_SCRATCH_RAM_DED_COUNT__SHIFT 0x16
+#define RLC_EDC_CNT__RLC_SRM_DATA_RAM_SEC_COUNT__SHIFT 0x18
+#define RLC_EDC_CNT__RLC_SRM_DATA_RAM_DED_COUNT__SHIFT 0x1a
+#define RLC_EDC_CNT__RLC_SRM_ADDR_RAM_SEC_COUNT__SHIFT 0x1c
+#define RLC_EDC_CNT__RLC_SRM_ADDR_RAM_DED_COUNT__SHIFT 0x1e
+#define RLC_EDC_CNT__RLCG_INSTR_RAM_SEC_COUNT_MASK 0x00000003L
+#define RLC_EDC_CNT__RLCG_INSTR_RAM_DED_COUNT_MASK 0x0000000CL
+#define RLC_EDC_CNT__RLCG_SCRATCH_RAM_SEC_COUNT_MASK 0x00000030L
+#define RLC_EDC_CNT__RLCG_SCRATCH_RAM_DED_COUNT_MASK 0x000000C0L
+#define RLC_EDC_CNT__RLCV_INSTR_RAM_SEC_COUNT_MASK 0x00000300L
+#define RLC_EDC_CNT__RLCV_INSTR_RAM_DED_COUNT_MASK 0x00000C00L
+#define RLC_EDC_CNT__RLCV_SCRATCH_RAM_SEC_COUNT_MASK 0x00003000L
+#define RLC_EDC_CNT__RLCV_SCRATCH_RAM_DED_COUNT_MASK 0x0000C000L
+#define RLC_EDC_CNT__RLC_TCTAG_RAM_SEC_COUNT_MASK 0x00030000L
+#define RLC_EDC_CNT__RLC_TCTAG_RAM_DED_COUNT_MASK 0x000C0000L
+#define RLC_EDC_CNT__RLC_SPM_SCRATCH_RAM_SEC_COUNT_MASK 0x00300000L
+#define RLC_EDC_CNT__RLC_SPM_SCRATCH_RAM_DED_COUNT_MASK 0x00C00000L
+#define RLC_EDC_CNT__RLC_SRM_DATA_RAM_SEC_COUNT_MASK 0x03000000L
+#define RLC_EDC_CNT__RLC_SRM_DATA_RAM_DED_COUNT_MASK 0x0C000000L
+#define RLC_EDC_CNT__RLC_SRM_ADDR_RAM_SEC_COUNT_MASK 0x30000000L
+#define RLC_EDC_CNT__RLC_SRM_ADDR_RAM_DED_COUNT_MASK 0xC0000000L
+//RLC_EDC_CNT2
+#define RLC_EDC_CNT2__RLC_SPM_SE0_SCRATCH_RAM_SEC_COUNT__SHIFT 0x0
+#define RLC_EDC_CNT2__RLC_SPM_SE0_SCRATCH_RAM_DED_COUNT__SHIFT 0x2
+#define RLC_EDC_CNT2__RLC_SPM_SE1_SCRATCH_RAM_SEC_COUNT__SHIFT 0x4
+#define RLC_EDC_CNT2__RLC_SPM_SE1_SCRATCH_RAM_DED_COUNT__SHIFT 0x6
+#define RLC_EDC_CNT2__RLC_SPM_SE2_SCRATCH_RAM_SEC_COUNT__SHIFT 0x8
+#define RLC_EDC_CNT2__RLC_SPM_SE2_SCRATCH_RAM_DED_COUNT__SHIFT 0xa
+#define RLC_EDC_CNT2__RLC_SPM_SE3_SCRATCH_RAM_SEC_COUNT__SHIFT 0xc
+#define RLC_EDC_CNT2__RLC_SPM_SE3_SCRATCH_RAM_DED_COUNT__SHIFT 0xe
+#define RLC_EDC_CNT2__RLC_SPM_SE4_SCRATCH_RAM_SEC_COUNT__SHIFT 0x10
+#define RLC_EDC_CNT2__RLC_SPM_SE4_SCRATCH_RAM_DED_COUNT__SHIFT 0x12
+#define RLC_EDC_CNT2__RLC_SPM_SE5_SCRATCH_RAM_SEC_COUNT__SHIFT 0x14
+#define RLC_EDC_CNT2__RLC_SPM_SE5_SCRATCH_RAM_DED_COUNT__SHIFT 0x16
+#define RLC_EDC_CNT2__RLC_SPM_SE6_SCRATCH_RAM_SEC_COUNT__SHIFT 0x18
+#define RLC_EDC_CNT2__RLC_SPM_SE6_SCRATCH_RAM_DED_COUNT__SHIFT 0x1a
+#define RLC_EDC_CNT2__RLC_SPM_SE7_SCRATCH_RAM_SEC_COUNT__SHIFT 0x1c
+#define RLC_EDC_CNT2__RLC_SPM_SE7_SCRATCH_RAM_DED_COUNT__SHIFT 0x1e
+#define RLC_EDC_CNT2__RLC_SPM_SE0_SCRATCH_RAM_SEC_COUNT_MASK 0x00000003L
+#define RLC_EDC_CNT2__RLC_SPM_SE0_SCRATCH_RAM_DED_COUNT_MASK 0x0000000CL
+#define RLC_EDC_CNT2__RLC_SPM_SE1_SCRATCH_RAM_SEC_COUNT_MASK 0x00000030L
+#define RLC_EDC_CNT2__RLC_SPM_SE1_SCRATCH_RAM_DED_COUNT_MASK 0x000000C0L
+#define RLC_EDC_CNT2__RLC_SPM_SE2_SCRATCH_RAM_SEC_COUNT_MASK 0x00000300L
+#define RLC_EDC_CNT2__RLC_SPM_SE2_SCRATCH_RAM_DED_COUNT_MASK 0x00000C00L
+#define RLC_EDC_CNT2__RLC_SPM_SE3_SCRATCH_RAM_SEC_COUNT_MASK 0x00003000L
+#define RLC_EDC_CNT2__RLC_SPM_SE3_SCRATCH_RAM_DED_COUNT_MASK 0x0000C000L
+#define RLC_EDC_CNT2__RLC_SPM_SE4_SCRATCH_RAM_SEC_COUNT_MASK 0x00030000L
+#define RLC_EDC_CNT2__RLC_SPM_SE4_SCRATCH_RAM_DED_COUNT_MASK 0x000C0000L
+#define RLC_EDC_CNT2__RLC_SPM_SE5_SCRATCH_RAM_SEC_COUNT_MASK 0x00300000L
+#define RLC_EDC_CNT2__RLC_SPM_SE5_SCRATCH_RAM_DED_COUNT_MASK 0x00C00000L
+#define RLC_EDC_CNT2__RLC_SPM_SE6_SCRATCH_RAM_SEC_COUNT_MASK 0x03000000L
+#define RLC_EDC_CNT2__RLC_SPM_SE6_SCRATCH_RAM_DED_COUNT_MASK 0x0C000000L
+#define RLC_EDC_CNT2__RLC_SPM_SE7_SCRATCH_RAM_SEC_COUNT_MASK 0x30000000L
+#define RLC_EDC_CNT2__RLC_SPM_SE7_SCRATCH_RAM_DED_COUNT_MASK 0xC0000000L
+
+#endif \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
index 40dfbf16bd34..111a71b434e2 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
@@ -11185,6 +11185,14 @@
#define MMEA0_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa
#define MMEA0_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc
#define MMEA0_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe
+#define MMEA0_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10
+#define MMEA0_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12
+#define MMEA0_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14
+#define MMEA0_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16
+#define MMEA0_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18
+#define MMEA0_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a
+#define MMEA0_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c
+#define MMEA0_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e
#define MMEA0_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
#define MMEA0_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
#define MMEA0_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
@@ -11193,6 +11201,14 @@
#define MMEA0_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
#define MMEA0_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L
#define MMEA0_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L
+#define MMEA0_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L
+#define MMEA0_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L
+#define MMEA0_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L
+#define MMEA0_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L
+#define MMEA0_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L
+#define MMEA0_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L
+#define MMEA0_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L
+#define MMEA0_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L
//MMEA0_DSM_CNTL
#define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
#define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
@@ -14197,6 +14213,14 @@
#define MMEA1_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa
#define MMEA1_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc
#define MMEA1_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe
+#define MMEA1_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10
+#define MMEA1_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12
+#define MMEA1_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14
+#define MMEA1_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16
+#define MMEA1_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18
+#define MMEA1_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a
+#define MMEA1_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c
+#define MMEA1_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e
#define MMEA1_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
#define MMEA1_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
#define MMEA1_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
@@ -14205,6 +14229,14 @@
#define MMEA1_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
#define MMEA1_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L
#define MMEA1_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L
+#define MMEA1_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L
+#define MMEA1_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L
+#define MMEA1_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L
+#define MMEA1_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L
+#define MMEA1_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L
+#define MMEA1_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L
+#define MMEA1_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L
+#define MMEA1_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L
//MMEA1_DSM_CNTL
#define MMEA1_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
#define MMEA1_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
@@ -17209,6 +17241,14 @@
#define MMEA2_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa
#define MMEA2_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc
#define MMEA2_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe
+#define MMEA2_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10
+#define MMEA2_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12
+#define MMEA2_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14
+#define MMEA2_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16
+#define MMEA2_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18
+#define MMEA2_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a
+#define MMEA2_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c
+#define MMEA2_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e
#define MMEA2_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
#define MMEA2_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
#define MMEA2_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
@@ -17217,6 +17257,14 @@
#define MMEA2_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
#define MMEA2_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L
#define MMEA2_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L
+#define MMEA2_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L
+#define MMEA2_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L
+#define MMEA2_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L
+#define MMEA2_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L
+#define MMEA2_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L
+#define MMEA2_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L
+#define MMEA2_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L
+#define MMEA2_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L
//MMEA2_DSM_CNTL
#define MMEA2_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
#define MMEA2_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
@@ -20221,6 +20269,14 @@
#define MMEA3_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa
#define MMEA3_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc
#define MMEA3_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe
+#define MMEA3_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10
+#define MMEA3_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12
+#define MMEA3_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14
+#define MMEA3_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16
+#define MMEA3_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18
+#define MMEA3_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a
+#define MMEA3_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c
+#define MMEA3_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e
#define MMEA3_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
#define MMEA3_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
#define MMEA3_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
@@ -20229,6 +20285,14 @@
#define MMEA3_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
#define MMEA3_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L
#define MMEA3_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L
+#define MMEA3_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L
+#define MMEA3_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L
+#define MMEA3_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L
+#define MMEA3_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L
+#define MMEA3_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L
+#define MMEA3_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L
+#define MMEA3_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L
+#define MMEA3_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L
//MMEA3_DSM_CNTL
#define MMEA3_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
#define MMEA3_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
@@ -23233,6 +23297,14 @@
#define MMEA4_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa
#define MMEA4_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc
#define MMEA4_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe
+#define MMEA4_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10
+#define MMEA4_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12
+#define MMEA4_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14
+#define MMEA4_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16
+#define MMEA4_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18
+#define MMEA4_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a
+#define MMEA4_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c
+#define MMEA4_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e
#define MMEA4_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
#define MMEA4_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
#define MMEA4_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
@@ -23241,6 +23313,14 @@
#define MMEA4_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
#define MMEA4_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L
#define MMEA4_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L
+#define MMEA4_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L
+#define MMEA4_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L
+#define MMEA4_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L
+#define MMEA4_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L
+#define MMEA4_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L
+#define MMEA4_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L
+#define MMEA4_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L
+#define MMEA4_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L
//MMEA4_DSM_CNTL
#define MMEA4_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
#define MMEA4_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
@@ -34952,6 +35032,14 @@
#define MMEA5_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa
#define MMEA5_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc
#define MMEA5_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe
+#define MMEA5_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10
+#define MMEA5_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12
+#define MMEA5_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14
+#define MMEA5_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16
+#define MMEA5_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18
+#define MMEA5_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a
+#define MMEA5_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c
+#define MMEA5_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e
#define MMEA5_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
#define MMEA5_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
#define MMEA5_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
@@ -34960,6 +35048,14 @@
#define MMEA5_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
#define MMEA5_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L
#define MMEA5_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L
+#define MMEA5_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L
+#define MMEA5_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L
+#define MMEA5_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L
+#define MMEA5_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L
+#define MMEA5_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L
+#define MMEA5_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L
+#define MMEA5_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L
+#define MMEA5_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L
//MMEA5_DSM_CNTL
#define MMEA5_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
#define MMEA5_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
@@ -37964,6 +38060,14 @@
#define MMEA6_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa
#define MMEA6_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc
#define MMEA6_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe
+#define MMEA6_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10
+#define MMEA6_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12
+#define MMEA6_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14
+#define MMEA6_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16
+#define MMEA6_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18
+#define MMEA6_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a
+#define MMEA6_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c
+#define MMEA6_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e
#define MMEA6_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
#define MMEA6_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
#define MMEA6_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
@@ -37972,6 +38076,14 @@
#define MMEA6_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
#define MMEA6_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L
#define MMEA6_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L
+#define MMEA6_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L
+#define MMEA6_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L
+#define MMEA6_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L
+#define MMEA6_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L
+#define MMEA6_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L
+#define MMEA6_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L
+#define MMEA6_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L
+#define MMEA6_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L
//MMEA6_DSM_CNTL
#define MMEA6_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
#define MMEA6_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
@@ -40976,6 +41088,14 @@
#define MMEA7_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa
#define MMEA7_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc
#define MMEA7_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe
+#define MMEA7_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10
+#define MMEA7_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12
+#define MMEA7_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14
+#define MMEA7_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16
+#define MMEA7_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18
+#define MMEA7_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a
+#define MMEA7_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c
+#define MMEA7_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e
#define MMEA7_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
#define MMEA7_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
#define MMEA7_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
@@ -40984,6 +41104,14 @@
#define MMEA7_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
#define MMEA7_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L
#define MMEA7_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L
+#define MMEA7_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L
+#define MMEA7_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L
+#define MMEA7_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L
+#define MMEA7_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L
+#define MMEA7_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L
+#define MMEA7_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L
+#define MMEA7_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L
+#define MMEA7_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L
//MMEA7_DSM_CNTL
#define MMEA7_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
#define MMEA7_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_offset.h
index 043aa695d63f..0d6b594be775 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_offset.h
@@ -27,5 +27,7 @@
#define mmUMCCH0_0_EccErrCnt_BASE_IDX 0
#define mmMCA_UMC_UMC0_MCUMC_STATUST0 0x03c2
#define mmMCA_UMC_UMC0_MCUMC_STATUST0_BASE_IDX 0
+#define mmMCA_UMC_UMC0_MCUMC_ADDRT0 0x03c4
+#define mmMCA_UMC_UMC0_MCUMC_ADDRT0_BASE_IDX 0
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_offset.h
new file mode 100644
index 000000000000..ce005c674a18
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_offset.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _umc_6_1_2_OFFSET_HEADER
+#define _umc_6_1_2_OFFSET_HEADER
+
+#define mmUMCCH0_0_EccErrCntSel_ARCT 0x0360
+#define mmUMCCH0_0_EccErrCntSel_ARCT_BASE_IDX 1
+#define mmUMCCH0_0_EccErrCnt_ARCT 0x0361
+#define mmUMCCH0_0_EccErrCnt_ARCT_BASE_IDX 1
+#define mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT 0x03c2
+#define mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT_BASE_IDX 1
+#define mmMCA_UMC_UMC0_MCUMC_ADDRT0_ARCT 0x03c4
+#define mmMCA_UMC_UMC0_MCUMC_ADDRT0_ARCT_BASE_IDX 1
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_sh_mask.h
new file mode 100644
index 000000000000..a5a8c993ec3a
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_sh_mask.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _umc_6_1_2_SH_MASK_HEADER
+#define _umc_6_1_2_SH_MASK_HEADER
+
+//UMCCH0_0_EccErrCntSel_ARCT
+#define UMCCH0_0_EccErrCntSel_ARCT__EccErrCntCsSel__SHIFT 0x0
+#define UMCCH0_0_EccErrCntSel_ARCT__EccErrInt__SHIFT 0xc
+#define UMCCH0_0_EccErrCntSel_ARCT__EccErrCntEn__SHIFT 0xf
+#define UMCCH0_0_EccErrCntSel_ARCT__EccErrCntCsSel_MASK 0x0000000FL
+#define UMCCH0_0_EccErrCntSel_ARCT__EccErrInt_MASK 0x00003000L
+#define UMCCH0_0_EccErrCntSel_ARCT__EccErrCntEn_MASK 0x00008000L
+//UMCCH0_0_EccErrCnt_ARCT
+#define UMCCH0_0_EccErrCnt_ARCT__EccErrCnt__SHIFT 0x0
+#define UMCCH0_0_EccErrCnt_ARCT__EccErrCnt_MASK 0x0000FFFFL
+//MCA_UMC_UMC0_MCUMC_STATUST0_ARCT
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrorCode__SHIFT 0x0
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrorCodeExt__SHIFT 0x10
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV0__SHIFT 0x16
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrCoreId__SHIFT 0x20
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV1__SHIFT 0x26
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Scrub__SHIFT 0x28
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV2__SHIFT 0x29
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Poison__SHIFT 0x2b
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Deferred__SHIFT 0x2c
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__UECC__SHIFT 0x2d
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__CECC__SHIFT 0x2e
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV3__SHIFT 0x2f
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Transparent__SHIFT 0x34
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__SyndV__SHIFT 0x35
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV4__SHIFT 0x36
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__TCC__SHIFT 0x37
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrCoreIdVal__SHIFT 0x38
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__PCC__SHIFT 0x39
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__AddrV__SHIFT 0x3a
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__MiscV__SHIFT 0x3b
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__En__SHIFT 0x3c
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__UC__SHIFT 0x3d
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Overflow__SHIFT 0x3e
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Val__SHIFT 0x3f
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrorCode_MASK 0x000000000000FFFFL
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrorCodeExt_MASK 0x00000000003F0000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV0_MASK 0x00000000FFC00000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrCoreId_MASK 0x0000003F00000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV1_MASK 0x000000C000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Scrub_MASK 0x0000010000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV2_MASK 0x0000060000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Poison_MASK 0x0000080000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Deferred_MASK 0x0000100000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__UECC_MASK 0x0000200000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__CECC_MASK 0x0000400000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV3_MASK 0x000F800000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Transparent_MASK 0x0010000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__SyndV_MASK 0x0020000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV4_MASK 0x0040000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__TCC_MASK 0x0080000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrCoreIdVal_MASK 0x0100000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__PCC_MASK 0x0200000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__AddrV_MASK 0x0400000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__MiscV_MASK 0x0800000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__En_MASK 0x1000000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__UC_MASK 0x2000000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Overflow_MASK 0x4000000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Val_MASK 0x8000000000000000L
+//MCA_UMC_UMC0_MCUMC_ADDRT0_ARCT
+#define MCA_UMC_UMC0_MCUMC_ADDRT0_ARCT__ErrorAddr__SHIFT 0x0
+#define MCA_UMC_UMC0_MCUMC_ADDRT0_ARCT__LSB__SHIFT 0x38
+#define MCA_UMC_UMC0_MCUMC_ADDRT0_ARCT__Reserved__SHIFT 0x3e
+#define MCA_UMC_UMC0_MCUMC_ADDRT0_ARCT__ErrorAddr_MASK 0x00FFFFFFFFFFFFFFL
+#define MCA_UMC_UMC0_MCUMC_ADDRT0_ARCT__LSB_MASK 0x3F00000000000000L
+#define MCA_UMC_UMC0_MCUMC_ADDRT0_ARCT__Reserved_MASK 0xC000000000000000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index dd7cbc00a0aa..70146518174c 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -672,20 +672,6 @@ struct vram_usagebyfirmware_v2_1
uint16_t used_by_driver_in_kb;
};
-/* This is part of vram_usagebyfirmware_v2_1 */
-struct vram_reserve_block
-{
- uint32_t start_address_in_kb;
- uint16_t used_by_firmware_in_kb;
- uint16_t used_by_driver_in_kb;
-};
-
-/* Definitions for constance */
-enum atomfirmware_internal_constants
-{
- ONE_KiB = 0x400,
- ONE_MiB = 0x100000,
-};
/*
***************************************************************************
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 2cd217e60125..a607b1034962 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -256,6 +256,10 @@ struct kfd2kgd_calls {
uint32_t wptr_shift, uint32_t wptr_mask,
struct mm_struct *mm);
+ int (*hiq_mqd_load)(struct kgd_dev *kgd, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t doorbell_off);
+
int (*hqd_sdma_load)(struct kgd_dev *kgd, void *mqd,
uint32_t __user *wptr, struct mm_struct *mm);
@@ -307,8 +311,6 @@ struct kfd2kgd_calls {
void (*set_vm_context_page_table_base)(struct kgd_dev *kgd,
uint32_t vmid, uint64_t page_table_base);
- int (*invalidate_tlbs)(struct kgd_dev *kgd, uint16_t pasid);
- int (*invalidate_tlbs_vmid)(struct kgd_dev *kgd, uint16_t vmid);
uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd);
uint64_t (*get_hive_id)(struct kgd_dev *kgd);
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 5087d6bdba60..c195575366a3 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -275,9 +275,6 @@ static int pp_dpm_load_fw(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
- if (!hwmgr->not_vf)
- return 0;
-
if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu)
return -EINVAL;
@@ -930,9 +927,12 @@ static int pp_dpm_set_mp1_state(void *handle, enum pp_mp1_state mp1_state)
{
struct pp_hwmgr *hwmgr = handle;
- if (!hwmgr || !hwmgr->pm_en)
+ if (!hwmgr)
return -EINVAL;
+ if (!hwmgr->pm_en)
+ return 0;
+
if (hwmgr->hwmgr_func->set_mp1_state)
return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state);
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 6dddd7818558..99ad4ddbe12f 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -21,6 +21,7 @@
*/
#include <linux/firmware.h>
+#include <linux/pci.h>
#include "pp_debug.h"
#include "amdgpu.h"
@@ -356,6 +357,35 @@ int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
}
+int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t *min_value, uint32_t *max_value)
+{
+ int ret = 0;
+ uint32_t level_count = 0;
+
+ if (!min_value && !max_value)
+ return -EINVAL;
+
+ if (min_value) {
+ /* by default, level 0 clock value as min value */
+ ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, min_value);
+ if (ret)
+ return ret;
+ }
+
+ if (max_value) {
+ ret = smu_get_dpm_level_count(smu, clk_type, &level_count);
+ if (ret)
+ return ret;
+
+ ret = smu_get_dpm_freq_by_index(smu, clk_type, level_count - 1, max_value);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
{
enum smu_feature_mask feature_id = 0;
@@ -404,10 +434,10 @@ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
switch (block_type) {
case AMD_IP_BLOCK_TYPE_UVD:
- ret = smu_dpm_set_uvd_enable(smu, gate);
+ ret = smu_dpm_set_uvd_enable(smu, !gate);
break;
case AMD_IP_BLOCK_TYPE_VCE:
- ret = smu_dpm_set_vce_enable(smu, gate);
+ ret = smu_dpm_set_vce_enable(smu, !gate);
break;
case AMD_IP_BLOCK_TYPE_GFX:
ret = smu_gfx_off_control(smu, gate);
@@ -416,7 +446,7 @@ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
ret = smu_powergate_sdma(smu, gate);
break;
case AMD_IP_BLOCK_TYPE_JPEG:
- ret = smu_dpm_set_jpeg_enable(smu, gate);
+ ret = smu_dpm_set_jpeg_enable(smu, !gate);
break;
default:
break;
@@ -490,26 +520,25 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
{
struct smu_table_context *smu_table = &smu->smu_table;
struct amdgpu_device *adev = smu->adev;
- struct smu_table *table = NULL;
- int ret = 0;
+ struct smu_table *table = &smu_table->driver_table;
int table_id = smu_table_get_index(smu, table_index);
+ uint32_t table_size;
+ int ret = 0;
if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
return -EINVAL;
- table = &smu_table->tables[table_index];
+ table_size = smu_table->tables[table_index].size;
- if (drv2smu)
- memcpy(table->cpu_addr, table_data, table->size);
+ if (drv2smu) {
+ memcpy(table->cpu_addr, table_data, table_size);
+ /*
+ * Flush hdp cache: to guard the content seen by
+ * GPU is consitent with CPU.
+ */
+ amdgpu_asic_flush_hdp(adev, NULL);
+ }
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
- upper_32_bits(table->mc_address));
- if (ret)
- return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
- lower_32_bits(table->mc_address));
- if (ret)
- return ret;
ret = smu_send_smc_msg_with_param(smu, drv2smu ?
SMU_MSG_TransferTableDram2Smu :
SMU_MSG_TransferTableSmu2Dram,
@@ -517,11 +546,10 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
if (ret)
return ret;
- /* flush hdp cache */
- adev->nbio.funcs->hdp_flush(adev, NULL);
-
- if (!drv2smu)
- memcpy(table_data, table->cpu_addr, table->size);
+ if (!drv2smu) {
+ amdgpu_asic_flush_hdp(adev, NULL);
+ memcpy(table_data, table->cpu_addr, table_size);
+ }
return ret;
}
@@ -531,7 +559,7 @@ bool is_support_sw_smu(struct amdgpu_device *adev)
if (adev->asic_type == CHIP_VEGA20)
return (amdgpu_dpm == 2) ? true : false;
else if (adev->asic_type >= CHIP_ARCTURUS) {
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
return false;
else
return true;
@@ -643,12 +671,11 @@ int smu_feature_init_dpm(struct smu_context *smu)
int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
{
- struct amdgpu_device *adev = smu->adev;
struct smu_feature *feature = &smu->smu_feature;
int feature_id;
int ret = 0;
- if (adev->flags & AMD_IS_APU)
+ if (smu->is_apu)
return 1;
feature_id = smu_feature_get_index(smu, mask);
@@ -872,6 +899,7 @@ static int smu_sw_init(void *handle)
smu->smu_baco.platform_support = false;
mutex_init(&smu->sensor_lock);
+ mutex_init(&smu->metrics_lock);
smu->watermarks_bitmap = 0;
smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
@@ -947,32 +975,56 @@ static int smu_init_fb_allocations(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
+ struct smu_table *driver_table = &(smu_table->driver_table);
+ uint32_t max_table_size = 0;
int ret, i;
- for (i = 0; i < SMU_TABLE_COUNT; i++) {
- if (tables[i].size == 0)
- continue;
+ /* VRAM allocation for tool table */
+ if (tables[SMU_TABLE_PMSTATUSLOG].size) {
ret = amdgpu_bo_create_kernel(adev,
- tables[i].size,
- tables[i].align,
- tables[i].domain,
- &tables[i].bo,
- &tables[i].mc_address,
- &tables[i].cpu_addr);
- if (ret)
- goto failed;
+ tables[SMU_TABLE_PMSTATUSLOG].size,
+ tables[SMU_TABLE_PMSTATUSLOG].align,
+ tables[SMU_TABLE_PMSTATUSLOG].domain,
+ &tables[SMU_TABLE_PMSTATUSLOG].bo,
+ &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
+ &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
+ if (ret) {
+ pr_err("VRAM allocation for tool table failed!\n");
+ return ret;
+ }
}
- return 0;
-failed:
- while (--i >= 0) {
+ /* VRAM allocation for driver table */
+ for (i = 0; i < SMU_TABLE_COUNT; i++) {
if (tables[i].size == 0)
continue;
- amdgpu_bo_free_kernel(&tables[i].bo,
- &tables[i].mc_address,
- &tables[i].cpu_addr);
+ if (i == SMU_TABLE_PMSTATUSLOG)
+ continue;
+
+ if (max_table_size < tables[i].size)
+ max_table_size = tables[i].size;
}
+
+ driver_table->size = max_table_size;
+ driver_table->align = PAGE_SIZE;
+ driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
+
+ ret = amdgpu_bo_create_kernel(adev,
+ driver_table->size,
+ driver_table->align,
+ driver_table->domain,
+ &driver_table->bo,
+ &driver_table->mc_address,
+ &driver_table->cpu_addr);
+ if (ret) {
+ pr_err("VRAM allocation for driver table failed!\n");
+ if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
+ amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
+ &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
+ &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
+ }
+
return ret;
}
@@ -980,18 +1032,19 @@ static int smu_fini_fb_allocations(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
- uint32_t i = 0;
+ struct smu_table *driver_table = &(smu_table->driver_table);
if (!tables)
return 0;
- for (i = 0; i < SMU_TABLE_COUNT; i++) {
- if (tables[i].size == 0)
- continue;
- amdgpu_bo_free_kernel(&tables[i].bo,
- &tables[i].mc_address,
- &tables[i].cpu_addr);
- }
+ if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
+ amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
+ &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
+ &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
+
+ amdgpu_bo_free_kernel(&driver_table->bo,
+ &driver_table->mc_address,
+ &driver_table->cpu_addr);
return 0;
}
@@ -1061,28 +1114,48 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
}
/* smu_dump_pptable(smu); */
+ if (!amdgpu_sriov_vf(adev)) {
+ ret = smu_set_driver_table_location(smu);
+ if (ret)
+ return ret;
- /*
- * Copy pptable bo in the vram to smc with SMU MSGs such as
- * SetDriverDramAddr and TransferTableDram2Smu.
- */
- ret = smu_write_pptable(smu);
- if (ret)
- return ret;
-
- /* issue Run*Btc msg */
- ret = smu_run_btc(smu);
- if (ret)
- return ret;
+ /*
+ * Copy pptable bo in the vram to smc with SMU MSGs such as
+ * SetDriverDramAddr and TransferTableDram2Smu.
+ */
+ ret = smu_write_pptable(smu);
+ if (ret)
+ return ret;
- ret = smu_feature_set_allowed_mask(smu);
- if (ret)
- return ret;
+ /* issue Run*Btc msg */
+ ret = smu_run_btc(smu);
+ if (ret)
+ return ret;
+ ret = smu_feature_set_allowed_mask(smu);
+ if (ret)
+ return ret;
- ret = smu_system_features_control(smu, true);
- if (ret)
- return ret;
+ ret = smu_system_features_control(smu, true);
+ if (ret)
+ return ret;
+ if (adev->asic_type == CHIP_NAVI10) {
+ if ((adev->pdev->device == 0x731f && (adev->pdev->revision == 0xc2 ||
+ adev->pdev->revision == 0xc3 ||
+ adev->pdev->revision == 0xca ||
+ adev->pdev->revision == 0xcb)) ||
+ (adev->pdev->device == 0x66af && (adev->pdev->revision == 0xf3 ||
+ adev->pdev->revision == 0xf4 ||
+ adev->pdev->revision == 0xf5 ||
+ adev->pdev->revision == 0xf6))) {
+ ret = smu_disable_umc_cdr_12gbps_workaround(smu);
+ if (ret) {
+ pr_err("Workaround failed to disable UMC CDR feature on 12Gbps SKU!\n");
+ return ret;
+ }
+ }
+ }
+ }
if (adev->asic_type != CHIP_ARCTURUS) {
ret = smu_notify_display_change(smu);
if (ret)
@@ -1135,8 +1208,9 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
/*
* Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
*/
- ret = smu_set_tool_table_location(smu);
-
+ if (!amdgpu_sriov_vf(adev)) {
+ ret = smu_set_tool_table_location(smu);
+ }
if (!smu_is_dpm_running(smu))
pr_info("dpm has been disabled\n");
@@ -1241,13 +1315,16 @@ static int smu_hw_init(void *handle)
return ret;
}
- if (adev->flags & AMD_IS_APU) {
+ if (smu->is_apu) {
smu_powergate_sdma(&adev->smu, false);
smu_powergate_vcn(&adev->smu, false);
smu_powergate_jpeg(&adev->smu, false);
smu_set_gfx_cgpg(&adev->smu, true);
}
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
if (!smu->pm_enabled)
return 0;
@@ -1290,7 +1367,7 @@ failed:
static int smu_stop_dpms(struct smu_context *smu)
{
- return smu_send_smc_msg(smu, SMU_MSG_DisableAllSmuFeatures);
+ return smu_system_features_control(smu, false);
}
static int smu_hw_fini(void *handle)
@@ -1300,37 +1377,45 @@ static int smu_hw_fini(void *handle)
struct smu_table_context *table_context = &smu->smu_table;
int ret = 0;
- if (adev->flags & AMD_IS_APU) {
+ if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
+ if (smu->is_apu) {
smu_powergate_sdma(&adev->smu, true);
smu_powergate_vcn(&adev->smu, true);
smu_powergate_jpeg(&adev->smu, true);
}
- ret = smu_stop_thermal_control(smu);
- if (ret) {
- pr_warn("Fail to stop thermal control!\n");
- return ret;
- }
+ if (!smu->pm_enabled)
+ return 0;
- /*
- * For custom pptable uploading, skip the DPM features
- * disable process on Navi1x ASICs.
- * - As the gfx related features are under control of
- * RLC on those ASICs. RLC reinitialization will be
- * needed to reenable them. That will cost much more
- * efforts.
- *
- * - SMU firmware can handle the DPM reenablement
- * properly.
- */
- if (!smu->uploading_custom_pp_table ||
- !((adev->asic_type >= CHIP_NAVI10) &&
- (adev->asic_type <= CHIP_NAVI12))) {
- ret = smu_stop_dpms(smu);
+ if (!amdgpu_sriov_vf(adev)){
+ ret = smu_stop_thermal_control(smu);
if (ret) {
- pr_warn("Fail to stop Dpms!\n");
+ pr_warn("Fail to stop thermal control!\n");
return ret;
}
+
+ /*
+ * For custom pptable uploading, skip the DPM features
+ * disable process on Navi1x ASICs.
+ * - As the gfx related features are under control of
+ * RLC on those ASICs. RLC reinitialization will be
+ * needed to reenable them. That will cost much more
+ * efforts.
+ *
+ * - SMU firmware can handle the DPM reenablement
+ * properly.
+ */
+ if (!smu->uploading_custom_pp_table ||
+ !((adev->asic_type >= CHIP_NAVI10) &&
+ (adev->asic_type <= CHIP_NAVI12))) {
+ ret = smu_stop_dpms(smu);
+ if (ret) {
+ pr_warn("Fail to stop Dpms!\n");
+ return ret;
+ }
+ }
}
kfree(table_context->driver_pptable);
@@ -1376,7 +1461,10 @@ static int smu_suspend(void *handle)
struct smu_context *smu = &adev->smu;
bool baco_feature_is_enabled = false;
- if(!(adev->flags & AMD_IS_APU))
+ if (!smu->pm_enabled)
+ return 0;
+
+ if(!smu->is_apu)
baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
ret = smu_system_features_control(smu, false);
@@ -1408,6 +1496,12 @@ static int smu_resume(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu;
+ if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
+ if (!smu->pm_enabled)
+ return 0;
+
pr_info("SMU is resuming...\n");
ret = smu_start_smc_engine(smu);
@@ -1606,43 +1700,6 @@ static int smu_enable_umd_pstate(void *handle,
return 0;
}
-static int smu_default_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
-{
- int ret = 0;
- uint32_t sclk_mask, mclk_mask, soc_mask;
-
- switch (level) {
- case AMD_DPM_FORCED_LEVEL_HIGH:
- ret = smu_force_dpm_limit_value(smu, true);
- break;
- case AMD_DPM_FORCED_LEVEL_LOW:
- ret = smu_force_dpm_limit_value(smu, false);
- break;
- case AMD_DPM_FORCED_LEVEL_AUTO:
- case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
- ret = smu_unforce_dpm_levels(smu);
- break;
- case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
- case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
- case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
- ret = smu_get_profiling_clk_mask(smu, level,
- &sclk_mask,
- &mclk_mask,
- &soc_mask);
- if (ret)
- return ret;
- smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
- smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false);
- smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false);
- break;
- case AMD_DPM_FORCED_LEVEL_MANUAL:
- case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
- default:
- break;
- }
- return ret;
-}
-
int smu_adjust_power_state_dynamic(struct smu_context *smu,
enum amd_dpm_forced_level level,
bool skip_display_settings)
@@ -1670,7 +1727,7 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
}
if (!skip_display_settings) {
- ret = smu_notify_smc_dispaly_config(smu);
+ ret = smu_notify_smc_display_config(smu);
if (ret) {
pr_err("Failed to notify smc display config!");
return ret;
@@ -1680,11 +1737,8 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
if (smu_dpm_ctx->dpm_level != level) {
ret = smu_asic_set_performance_level(smu, level);
if (ret) {
- ret = smu_default_set_performance_level(smu, level);
- if (ret) {
- pr_err("Failed to set performance level!");
- return ret;
- }
+ pr_err("Failed to set performance level!");
+ return ret;
}
/* update the saved copy */
@@ -1926,26 +1980,25 @@ int smu_set_df_cstate(struct smu_context *smu,
int smu_write_watermarks_table(struct smu_context *smu)
{
- int ret = 0;
- struct smu_table_context *smu_table = &smu->smu_table;
- struct smu_table *table = NULL;
-
- table = &smu_table->tables[SMU_TABLE_WATERMARKS];
+ void *watermarks_table = smu->smu_table.watermarks_table;
- if (!table->cpu_addr)
+ if (!watermarks_table)
return -EINVAL;
- ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr,
+ return smu_update_table(smu,
+ SMU_TABLE_WATERMARKS,
+ 0,
+ watermarks_table,
true);
-
- return ret;
}
int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
{
- struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS];
- void *table = watermarks->cpu_addr;
+ void *table = smu->smu_table.watermarks_table;
+
+ if (!table)
+ return -EINVAL;
mutex_lock(&smu->mutex);
@@ -2284,13 +2337,9 @@ int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
{
int ret = 0;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->set_active_display_count)
ret = smu->ppt_funcs->set_active_display_count(smu, count);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2437,7 +2486,7 @@ bool smu_baco_is_support(struct smu_context *smu)
mutex_lock(&smu->mutex);
- if (smu->ppt_funcs->baco_is_support)
+ if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
ret = smu->ppt_funcs->baco_is_support(smu);
mutex_unlock(&smu->mutex);
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index 17eeb546c550..14ba6aa876e2 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -179,6 +179,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(DRIVER_SMU_CONFIG),
TAB_MAP(OVERDRIVE),
TAB_MAP(I2C_COMMANDS),
+ TAB_MAP(ACTIVITY_MONITOR_COEFF),
};
static struct smu_11_0_cmn2aisc_mapping arcturus_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
@@ -302,6 +303,10 @@ static int arcturus_tables_init(struct smu_context *smu, struct smu_table *table
SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM);
+
smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
if (!smu_table->metrics_table)
return -ENOMEM;
@@ -867,18 +872,21 @@ static int arcturus_get_metrics_table(struct smu_context *smu,
struct smu_table_context *smu_table= &smu->smu_table;
int ret = 0;
+ mutex_lock(&smu->metrics_lock);
if (!smu_table->metrics_time ||
time_after(jiffies, smu_table->metrics_time + HZ / 1000)) {
ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
(void *)smu_table->metrics_table, false);
if (ret) {
pr_info("Failed to export SMU metrics table!\n");
+ mutex_unlock(&smu->metrics_lock);
return ret;
}
smu_table->metrics_time = jiffies;
}
memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
+ mutex_unlock(&smu->metrics_lock);
return ret;
}
@@ -1310,6 +1318,8 @@ static int arcturus_get_power_limit(struct smu_context *smu,
static int arcturus_get_power_profile_mode(struct smu_context *smu,
char *buf)
{
+ struct amdgpu_device *adev = smu->adev;
+ DpmActivityMonitorCoeffInt_t activity_monitor;
static const char *profile_name[] = {
"BOOTUP_DEFAULT",
"3D_FULL_SCREEN",
@@ -1319,14 +1329,35 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
"COMPUTE",
"CUSTOM"};
static const char *title[] = {
- "PROFILE_INDEX(NAME)"};
+ "PROFILE_INDEX(NAME)",
+ "CLOCK_TYPE(NAME)",
+ "FPS",
+ "UseRlcBusy",
+ "MinActiveFreqType",
+ "MinActiveFreq",
+ "BoosterFreqType",
+ "BoosterFreq",
+ "PD_Data_limit_c",
+ "PD_Data_error_coeff",
+ "PD_Data_error_rate_coeff"};
uint32_t i, size = 0;
int16_t workload_type = 0;
+ int result = 0;
+ uint32_t smu_version;
- if (!smu->pm_enabled || !buf)
+ if (!buf)
return -EINVAL;
- size += sprintf(buf + size, "%16s\n",
+ result = smu_get_smc_version(smu, NULL, &smu_version);
+ if (result)
+ return result;
+
+ if (smu_version >= 0x360d00 && !amdgpu_sriov_vf(adev))
+ size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
+ title[0], title[1], title[2], title[3], title[4], title[5],
+ title[6], title[7], title[8], title[9], title[10]);
+ else
+ size += sprintf(buf + size, "%16s\n",
title[0]);
for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
@@ -1338,8 +1369,50 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
if (workload_type < 0)
continue;
+ if (smu_version >= 0x360d00 && !amdgpu_sriov_vf(adev)) {
+ result = smu_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ workload_type,
+ (void *)(&activity_monitor),
+ false);
+ if (result) {
+ pr_err("[%s] Failed to get activity monitor!", __func__);
+ return result;
+ }
+ }
+
size += sprintf(buf + size, "%2d %14s%s\n",
i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
+
+ if (smu_version >= 0x360d00 && !amdgpu_sriov_vf(adev)) {
+ size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+ " ",
+ 0,
+ "GFXCLK",
+ activity_monitor.Gfx_FPS,
+ activity_monitor.Gfx_UseRlcBusy,
+ activity_monitor.Gfx_MinActiveFreqType,
+ activity_monitor.Gfx_MinActiveFreq,
+ activity_monitor.Gfx_BoosterFreqType,
+ activity_monitor.Gfx_BoosterFreq,
+ activity_monitor.Gfx_PD_Data_limit_c,
+ activity_monitor.Gfx_PD_Data_error_coeff,
+ activity_monitor.Gfx_PD_Data_error_rate_coeff);
+
+ size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+ " ",
+ 1,
+ "UCLK",
+ activity_monitor.Mem_FPS,
+ activity_monitor.Mem_UseRlcBusy,
+ activity_monitor.Mem_MinActiveFreqType,
+ activity_monitor.Mem_MinActiveFreq,
+ activity_monitor.Mem_BoosterFreqType,
+ activity_monitor.Mem_BoosterFreq,
+ activity_monitor.Mem_PD_Data_limit_c,
+ activity_monitor.Mem_PD_Data_error_coeff,
+ activity_monitor.Mem_PD_Data_error_rate_coeff);
+ }
}
return size;
@@ -1349,18 +1422,69 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
long *input,
uint32_t size)
{
+ DpmActivityMonitorCoeffInt_t activity_monitor;
int workload_type = 0;
uint32_t profile_mode = input[size];
int ret = 0;
-
- if (!smu->pm_enabled)
- return -EINVAL;
+ uint32_t smu_version;
if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
pr_err("Invalid power profile mode %d\n", profile_mode);
return -EINVAL;
}
+ ret = smu_get_smc_version(smu, NULL, &smu_version);
+ if (ret)
+ return ret;
+
+ if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) &&
+ (smu_version >=0x360d00)) {
+ ret = smu_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor),
+ false);
+ if (ret) {
+ pr_err("[%s] Failed to get activity monitor!", __func__);
+ return ret;
+ }
+
+ switch (input[0]) {
+ case 0: /* Gfxclk */
+ activity_monitor.Gfx_FPS = input[1];
+ activity_monitor.Gfx_UseRlcBusy = input[2];
+ activity_monitor.Gfx_MinActiveFreqType = input[3];
+ activity_monitor.Gfx_MinActiveFreq = input[4];
+ activity_monitor.Gfx_BoosterFreqType = input[5];
+ activity_monitor.Gfx_BoosterFreq = input[6];
+ activity_monitor.Gfx_PD_Data_limit_c = input[7];
+ activity_monitor.Gfx_PD_Data_error_coeff = input[8];
+ activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
+ break;
+ case 1: /* Uclk */
+ activity_monitor.Mem_FPS = input[1];
+ activity_monitor.Mem_UseRlcBusy = input[2];
+ activity_monitor.Mem_MinActiveFreqType = input[3];
+ activity_monitor.Mem_MinActiveFreq = input[4];
+ activity_monitor.Mem_BoosterFreqType = input[5];
+ activity_monitor.Mem_BoosterFreq = input[6];
+ activity_monitor.Mem_PD_Data_limit_c = input[7];
+ activity_monitor.Mem_PD_Data_error_coeff = input[8];
+ activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
+ break;
+ }
+
+ ret = smu_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor),
+ true);
+ if (ret) {
+ pr_err("[%s] Failed to set activity monitor!", __func__);
+ return ret;
+ }
+ }
+
/*
* Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
* Not all profile modes are supported on arcturus.
@@ -1899,7 +2023,7 @@ static int arcturus_i2c_eeprom_read_data(struct i2c_adapter *control,
SwI2cRequest_t req;
struct amdgpu_device *adev = to_amdgpu_device(control);
struct smu_table_context *smu_table = &adev->smu.smu_table;
- struct smu_table *table = &smu_table->tables[SMU_TABLE_I2C_COMMANDS];
+ struct smu_table *table = &smu_table->driver_table;
memset(&req, 0, sizeof(req));
arcturus_fill_eeprom_i2c_req(&req, false, address, numbytes, data);
@@ -2053,8 +2177,12 @@ static const struct i2c_algorithm arcturus_i2c_eeprom_i2c_algo = {
static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control)
{
struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct smu_context *smu = &adev->smu;
int res;
+ if (!smu->pm_enabled)
+ return -EOPNOTSUPP;
+
control->owner = THIS_MODULE;
control->class = I2C_CLASS_SPD;
control->dev.parent = &adev->pdev->dev;
@@ -2070,6 +2198,12 @@ static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control)
static void arcturus_i2c_eeprom_control_fini(struct i2c_adapter *control)
{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct smu_context *smu = &adev->smu;
+
+ if (!smu->pm_enabled)
+ return;
+
i2c_del_adapter(control);
}
@@ -2114,6 +2248,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.get_profiling_clk_mask = arcturus_get_profiling_clk_mask,
.get_power_profile_mode = arcturus_get_power_profile_mode,
.set_power_profile_mode = arcturus_set_power_profile_mode,
+ .set_performance_level = smu_v11_0_set_performance_level,
/* debug (internal used) */
.dump_pptable = arcturus_dump_pptable,
.get_power_limit = arcturus_get_power_limit,
@@ -2137,6 +2272,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.check_fw_version = smu_v11_0_check_fw_version,
.write_pptable = smu_v11_0_write_pptable,
.set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
+ .set_driver_table_location = smu_v11_0_set_driver_table_location,
.set_tool_table_location = smu_v11_0_set_tool_table_location,
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.system_features_control = smu_v11_0_system_features_control,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index 253860d30b20..9454ab50f9a1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -99,6 +99,9 @@ int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr)
PHM_FUNC_CHECK(hwmgr);
+ if (!hwmgr->not_vf)
+ return 0;
+
if (!smum_is_dpm_running(hwmgr)) {
pr_info("dpm has been disabled\n");
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index e2b82c902948..f48fdc7f0382 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -282,10 +282,7 @@ err:
int hwmgr_hw_fini(struct pp_hwmgr *hwmgr)
{
- if (!hwmgr->not_vf)
- return 0;
-
- if (!hwmgr || !hwmgr->pm_en)
+ if (!hwmgr || !hwmgr->pm_en || !hwmgr->not_vf)
return 0;
phm_stop_thermal_controller(hwmgr);
@@ -305,10 +302,7 @@ int hwmgr_suspend(struct pp_hwmgr *hwmgr)
{
int ret = 0;
- if (!hwmgr->not_vf)
- return 0;
-
- if (!hwmgr || !hwmgr->pm_en)
+ if (!hwmgr || !hwmgr->pm_en || !hwmgr->not_vf)
return 0;
phm_disable_smc_firmware_ctf(hwmgr);
@@ -327,13 +321,10 @@ int hwmgr_resume(struct pp_hwmgr *hwmgr)
{
int ret = 0;
- if (!hwmgr->not_vf)
- return 0;
-
if (!hwmgr)
return -EINVAL;
- if (!hwmgr->pm_en)
+ if (!hwmgr->not_vf || !hwmgr->pm_en)
return 0;
ret = phm_setup_asic(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 4e8ab139bb3b..689072a312a7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -1026,12 +1026,15 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
clocks->num_levels = 0;
for (i = 0; i < pclk_vol_table->count; i++) {
- clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10;
- clocks->data[i].latency_in_us = latency_required ?
- smu10_get_mem_latency(hwmgr,
- pclk_vol_table->entries[i].clk) :
- 0;
- clocks->num_levels++;
+ if (pclk_vol_table->entries[i].clk) {
+ clocks->data[clocks->num_levels].clocks_in_khz =
+ pclk_vol_table->entries[i].clk * 10;
+ clocks->data[clocks->num_levels].latency_in_us = latency_required ?
+ smu10_get_mem_latency(hwmgr,
+ pclk_vol_table->entries[i].clk) :
+ 0;
+ clocks->num_levels++;
+ }
}
return 0;
@@ -1077,9 +1080,11 @@ static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
clocks->num_levels = 0;
for (i = 0; i < pclk_vol_table->count; i++) {
- clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10;
- clocks->data[i].voltage_in_mv = pclk_vol_table->entries[i].vol;
- clocks->num_levels++;
+ if (pclk_vol_table->entries[i].clk) {
+ clocks->data[clocks->num_levels].clocks_in_khz = pclk_vol_table->entries[i].clk * 10;
+ clocks->data[clocks->num_levels].voltage_in_mv = pclk_vol_table->entries[i].vol;
+ clocks->num_levels++;
+ }
}
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index d70abada66bf..bf04cfefb283 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -720,7 +720,7 @@ static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage;
/* param1 is for corresponding std voltage */
- data->dpm_table.vddc_table.dpm_levels[i].enabled = 1;
+ data->dpm_table.vddc_table.dpm_levels[i].enabled = true;
}
data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
@@ -730,7 +730,7 @@ static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
/* Initialize Vddci DPM table based on allow Mclk values */
for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
- data->dpm_table.vddci_table.dpm_levels[i].enabled = 1;
+ data->dpm_table.vddci_table.dpm_levels[i].enabled = true;
}
data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count;
}
@@ -744,7 +744,7 @@ static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
*/
for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
- data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1;
+ data->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
}
data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 148446570e21..92a65e3daff4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -3538,7 +3538,8 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
if (!data->registry_data.mclk_dpm_key_disabled) {
if (data->smc_state_table.mem_boot_level !=
data->dpm_table.mem_table.dpm_state.soft_min_level) {
- if (data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1) {
+ if ((data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1)
+ && hwmgr->not_vf) {
socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMinSocclkByIndex,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 5bcf0d684151..3b3ec5666051 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -872,7 +872,7 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
"[OverridePcieParameters] Attempt to override pcie params failed!",
return ret);
- data->pcie_parameters_override = 1;
+ data->pcie_parameters_override = true;
data->pcie_gen_level1 = pcie_gen;
data->pcie_width_level1 = pcie_width;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index ca3fdc6777cf..97b6714e83e6 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -254,15 +254,26 @@ struct smu_table_context
unsigned long metrics_time;
void *metrics_table;
void *clocks_table;
+ void *watermarks_table;
void *max_sustainable_clocks;
struct smu_bios_boot_up_values boot_values;
void *driver_pptable;
struct smu_table *tables;
+ /*
+ * The driver table is just a staging buffer for
+ * uploading/downloading content from the SMU.
+ *
+ * And the table_id for SMU_MSG_TransferTableSmu2Dram/
+ * SMU_MSG_TransferTableDram2Smu instructs SMU
+ * which content driver is interested.
+ */
+ struct smu_table driver_table;
struct smu_table memory_pool;
uint8_t thermal_controller_type;
void *overdrive_table;
+ void *boot_overdrive_table;
};
struct smu_dpm_context {
@@ -350,6 +361,7 @@ struct smu_context
const struct pptable_funcs *ppt_funcs;
struct mutex mutex;
struct mutex sensor_lock;
+ struct mutex metrics_lock;
uint64_t pool_size;
struct smu_table_context smu_table;
@@ -443,7 +455,7 @@ struct pptable_funcs {
int (*pre_display_config_changed)(struct smu_context *smu);
int (*display_config_changed)(struct smu_context *smu);
int (*apply_clocks_adjust_rules)(struct smu_context *smu);
- int (*notify_smc_dispaly_config)(struct smu_context *smu);
+ int (*notify_smc_display_config)(struct smu_context *smu);
int (*force_dpm_limit_value)(struct smu_context *smu, bool highest);
int (*unforce_dpm_levels)(struct smu_context *smu);
int (*get_profiling_clk_mask)(struct smu_context *smu,
@@ -496,6 +508,7 @@ struct pptable_funcs {
int (*set_gfx_cgpg)(struct smu_context *smu, bool enable);
int (*write_pptable)(struct smu_context *smu);
int (*set_min_dcef_deep_sleep)(struct smu_context *smu);
+ int (*set_driver_table_location)(struct smu_context *smu);
int (*set_tool_table_location)(struct smu_context *smu);
int (*notify_memory_pool_location)(struct smu_context *smu);
int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu);
@@ -553,6 +566,7 @@ struct pptable_funcs {
int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max);
int (*override_pcie_parameters)(struct smu_context *smu);
uint32_t (*get_pptable_power_limit)(struct smu_context *smu);
+ int (*disable_umc_cdr_12gbps_workaround)(struct smu_context *smu);
};
int smu_load_microcode(struct smu_context *smu);
@@ -696,6 +710,8 @@ int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max);
int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max);
+int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t *min_value, uint32_t *max_value);
enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu);
int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level);
int smu_set_display_count(struct smu_context *smu, uint32_t count);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
index a886f0644d24..ce5b5011c122 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
@@ -622,8 +622,14 @@ typedef struct {
uint16_t PccThresholdHigh;
uint32_t PaddingAPCC[6]; //FIXME pending SPEC
+ // OOB Settings
+ uint16_t BasePerformanceCardPower;
+ uint16_t MaxPerformanceCardPower;
+ uint16_t BasePerformanceFrequencyCap; //In Mhz
+ uint16_t MaxPerformanceFrequencyCap; //In Mhz
+
// SECTION: Reserved
- uint32_t Reserved[11];
+ uint32_t Reserved[9];
// SECTION: BOARD PARAMETERS
@@ -823,7 +829,6 @@ typedef struct {
uint32_t MmHubPadding[8]; // SMU internal use
} AvfsFuseOverride_t;
-/* NOT CURRENTLY USED
typedef struct {
uint8_t Gfx_ActiveHystLimit;
uint8_t Gfx_IdleHystLimit;
@@ -866,7 +871,6 @@ typedef struct {
uint32_t MmHubPadding[8]; // SMU internal use
} DpmActivityMonitorCoeffInt_t;
-*/
// These defines are used with the following messages:
// SMC_MSG_TransferTableDram2Smu
@@ -878,11 +882,11 @@ typedef struct {
#define TABLE_PMSTATUSLOG 4
#define TABLE_SMU_METRICS 5
#define TABLE_DRIVER_SMU_CONFIG 6
-//#define TABLE_ACTIVITY_MONITOR_COEFF 7
#define TABLE_OVERDRIVE 7
#define TABLE_WAFL_XGMI_TOPOLOGY 8
#define TABLE_I2C_COMMANDS 9
-#define TABLE_COUNT 10
+#define TABLE_ACTIVITY_MONITOR_COEFF 10
+#define TABLE_COUNT 11
// These defines are used with the SMC_MSG_SetUclkFastSwitch message.
typedef enum {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h
index c27c82851468..2f85a34c0591 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h
@@ -27,7 +27,7 @@
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
-#define SMU12_DRIVER_IF_VERSION 10
+#define SMU12_DRIVER_IF_VERSION 11
typedef struct {
int32_t value;
@@ -192,6 +192,11 @@ typedef struct {
uint16_t SocTemperature; //[centi-Celsius]
uint16_t ThrottlerStatus;
uint16_t spare;
+
+ uint16_t StapmOriginalLimit; //[mW]
+ uint16_t StapmCurrentLimit; //[mW]
+ uint16_t ApuPower; //[mW]
+ uint16_t dGpuPower; //[mW]
} SmuMetrics_t;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
index d8c9b7f91fcc..a5b4df146713 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
@@ -170,6 +170,8 @@
__SMU_DUMMY_MAP(SetSoftMinJpeg), \
__SMU_DUMMY_MAP(SetHardMinFclkByFreq), \
__SMU_DUMMY_MAP(DFCstateControl), \
+ __SMU_DUMMY_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE), \
+ __SMU_DUMMY_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE), \
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index 786de7741990..d5314d12628a 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -27,7 +27,7 @@
#define SMU11_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU11_DRIVER_IF_VERSION_VG20 0x13
-#define SMU11_DRIVER_IF_VERSION_ARCT 0x10
+#define SMU11_DRIVER_IF_VERSION_ARCT 0x12
#define SMU11_DRIVER_IF_VERSION_NV10 0x33
#define SMU11_DRIVER_IF_VERSION_NV14 0x34
@@ -170,6 +170,8 @@ int smu_v11_0_write_pptable(struct smu_context *smu);
int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu);
+int smu_v11_0_set_driver_table_location(struct smu_context *smu);
+
int smu_v11_0_set_tool_table_location(struct smu_context *smu);
int smu_v11_0_notify_memory_pool_location(struct smu_context *smu);
@@ -262,4 +264,7 @@ int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize,
uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu);
+int smu_v11_0_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level);
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h
index 373861ddccd0..406bfd187ce8 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h
@@ -120,7 +120,10 @@
#define PPSMC_MSG_GetVoltageByDpmOverdrive 0x45
#define PPSMC_MSG_BacoAudioD3PME 0x48
-#define PPSMC_Message_Count 0x49
+#define PPSMC_MSG_DALDisableDummyPstateChange 0x49
+#define PPSMC_MSG_DALEnableDummyPstateChange 0x4A
+
+#define PPSMC_Message_Count 0x4B
typedef uint32_t PPSMC_Result;
typedef uint32_t PPSMC_Msg;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
index b2f96a101124..7a63cf8e85ed 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
@@ -39,21 +39,39 @@
#define SMU_11_0_PP_OVERDRIVE_VERSION 0x0800
#define SMU_11_0_PP_POWERSAVINGCLOCK_VERSION 0x0100
+enum SMU_11_0_ODFEATURE_CAP {
+ SMU_11_0_ODCAP_GFXCLK_LIMITS = 0,
+ SMU_11_0_ODCAP_GFXCLK_CURVE,
+ SMU_11_0_ODCAP_UCLK_MAX,
+ SMU_11_0_ODCAP_POWER_LIMIT,
+ SMU_11_0_ODCAP_FAN_ACOUSTIC_LIMIT,
+ SMU_11_0_ODCAP_FAN_SPEED_MIN,
+ SMU_11_0_ODCAP_TEMPERATURE_FAN,
+ SMU_11_0_ODCAP_TEMPERATURE_SYSTEM,
+ SMU_11_0_ODCAP_MEMORY_TIMING_TUNE,
+ SMU_11_0_ODCAP_FAN_ZERO_RPM_CONTROL,
+ SMU_11_0_ODCAP_AUTO_UV_ENGINE,
+ SMU_11_0_ODCAP_AUTO_OC_ENGINE,
+ SMU_11_0_ODCAP_AUTO_OC_MEMORY,
+ SMU_11_0_ODCAP_FAN_CURVE,
+ SMU_11_0_ODCAP_COUNT,
+};
+
enum SMU_11_0_ODFEATURE_ID {
- SMU_11_0_ODFEATURE_GFXCLK_LIMITS = 1 << 0, //GFXCLK Limit feature
- SMU_11_0_ODFEATURE_GFXCLK_CURVE = 1 << 1, //GFXCLK Curve feature
- SMU_11_0_ODFEATURE_UCLK_MAX = 1 << 2, //UCLK Limit feature
- SMU_11_0_ODFEATURE_POWER_LIMIT = 1 << 3, //Power Limit feature
- SMU_11_0_ODFEATURE_FAN_ACOUSTIC_LIMIT = 1 << 4, //Fan Acoustic RPM feature
- SMU_11_0_ODFEATURE_FAN_SPEED_MIN = 1 << 5, //Minimum Fan Speed feature
- SMU_11_0_ODFEATURE_TEMPERATURE_FAN = 1 << 6, //Fan Target Temperature Limit feature
- SMU_11_0_ODFEATURE_TEMPERATURE_SYSTEM = 1 << 7, //Operating Temperature Limit feature
- SMU_11_0_ODFEATURE_MEMORY_TIMING_TUNE = 1 << 8, //AC Timing Tuning feature
- SMU_11_0_ODFEATURE_FAN_ZERO_RPM_CONTROL = 1 << 9, //Zero RPM feature
- SMU_11_0_ODFEATURE_AUTO_UV_ENGINE = 1 << 10, //Auto Under Volt GFXCLK feature
- SMU_11_0_ODFEATURE_AUTO_OC_ENGINE = 1 << 11, //Auto Over Clock GFXCLK feature
- SMU_11_0_ODFEATURE_AUTO_OC_MEMORY = 1 << 12, //Auto Over Clock MCLK feature
- SMU_11_0_ODFEATURE_FAN_CURVE = 1 << 13, //VICTOR TODO
+ SMU_11_0_ODFEATURE_GFXCLK_LIMITS = 1 << SMU_11_0_ODCAP_GFXCLK_LIMITS, //GFXCLK Limit feature
+ SMU_11_0_ODFEATURE_GFXCLK_CURVE = 1 << SMU_11_0_ODCAP_GFXCLK_CURVE, //GFXCLK Curve feature
+ SMU_11_0_ODFEATURE_UCLK_MAX = 1 << SMU_11_0_ODCAP_UCLK_MAX, //UCLK Limit feature
+ SMU_11_0_ODFEATURE_POWER_LIMIT = 1 << SMU_11_0_ODCAP_POWER_LIMIT, //Power Limit feature
+ SMU_11_0_ODFEATURE_FAN_ACOUSTIC_LIMIT = 1 << SMU_11_0_ODCAP_FAN_ACOUSTIC_LIMIT, //Fan Acoustic RPM feature
+ SMU_11_0_ODFEATURE_FAN_SPEED_MIN = 1 << SMU_11_0_ODCAP_FAN_SPEED_MIN, //Minimum Fan Speed feature
+ SMU_11_0_ODFEATURE_TEMPERATURE_FAN = 1 << SMU_11_0_ODCAP_TEMPERATURE_FAN, //Fan Target Temperature Limit feature
+ SMU_11_0_ODFEATURE_TEMPERATURE_SYSTEM = 1 << SMU_11_0_ODCAP_TEMPERATURE_SYSTEM, //Operating Temperature Limit feature
+ SMU_11_0_ODFEATURE_MEMORY_TIMING_TUNE = 1 << SMU_11_0_ODCAP_MEMORY_TIMING_TUNE, //AC Timing Tuning feature
+ SMU_11_0_ODFEATURE_FAN_ZERO_RPM_CONTROL = 1 << SMU_11_0_ODCAP_FAN_ZERO_RPM_CONTROL, //Zero RPM feature
+ SMU_11_0_ODFEATURE_AUTO_UV_ENGINE = 1 << SMU_11_0_ODCAP_AUTO_UV_ENGINE, //Auto Under Volt GFXCLK feature
+ SMU_11_0_ODFEATURE_AUTO_OC_ENGINE = 1 << SMU_11_0_ODCAP_AUTO_OC_ENGINE, //Auto Over Clock GFXCLK feature
+ SMU_11_0_ODFEATURE_AUTO_OC_MEMORY = 1 << SMU_11_0_ODCAP_AUTO_OC_MEMORY, //Auto Over Clock MCLK feature
+ SMU_11_0_ODFEATURE_FAN_CURVE = 1 << SMU_11_0_ODCAP_FAN_CURVE, //Fan Curve feature
SMU_11_0_ODFEATURE_COUNT = 14,
};
#define SMU_11_0_MAX_ODFEATURE 32 //Maximum Number of OD Features
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
index 3f1cd06e273c..d79e54b5ebf6 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
@@ -90,4 +90,6 @@ int smu_v12_0_mode2_reset(struct smu_context *smu);
int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max);
+int smu_v12_0_set_driver_table_location(struct smu_context *smu);
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 15403b7979d6..0d73a49166af 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -119,6 +119,10 @@ static struct smu_11_0_cmn2aisc_mapping navi10_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg),
MSG_MAP(BacoAudioD3PME, PPSMC_MSG_BacoAudioD3PME),
MSG_MAP(ArmD3, PPSMC_MSG_ArmD3),
+ MSG_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE,PPSMC_MSG_DALDisableDummyPstateChange),
+ MSG_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE, PPSMC_MSG_DALEnableDummyPstateChange),
+ MSG_MAP(GetVoltageByDpm, PPSMC_MSG_GetVoltageByDpm),
+ MSG_MAP(GetVoltageByDpmOverdrive, PPSMC_MSG_GetVoltageByDpmOverdrive),
};
static struct smu_11_0_cmn2aisc_mapping navi10_clk_map[SMU_CLK_COUNT] = {
@@ -555,6 +559,10 @@ static int navi10_tables_init(struct smu_context *smu, struct smu_table *tables)
return -ENOMEM;
smu_table->metrics_time = 0;
+ smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
+ if (!smu_table->watermarks_table)
+ return -ENOMEM;
+
return 0;
}
@@ -564,17 +572,20 @@ static int navi10_get_metrics_table(struct smu_context *smu,
struct smu_table_context *smu_table= &smu->smu_table;
int ret = 0;
+ mutex_lock(&smu->metrics_lock);
if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) {
ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
(void *)smu_table->metrics_table, false);
if (ret) {
pr_info("Failed to export SMU metrics table!\n");
+ mutex_unlock(&smu->metrics_lock);
return ret;
}
smu_table->metrics_time = jiffies;
}
memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
+ mutex_unlock(&smu->metrics_lock);
return ret;
}
@@ -725,11 +736,20 @@ static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu
return dpm_desc->SnapToDiscrete == 0 ? true : false;
}
-static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_ID feature)
+static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_CAP cap)
{
- return od_table->cap[feature];
+ return od_table->cap[cap];
}
+static void navi10_od_setting_get_range(struct smu_11_0_overdrive_table *od_table,
+ enum SMU_11_0_ODSETTING_ID setting,
+ uint32_t *min, uint32_t *max)
+{
+ if (min)
+ *min = od_table->min[setting];
+ if (max)
+ *max = od_table->max[setting];
+}
static int navi10_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
@@ -748,6 +768,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
OverDriveTable_t *od_table =
(OverDriveTable_t *)table_context->overdrive_table;
struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
+ uint32_t min_value, max_value;
switch (clk_type) {
case SMU_GFXCLK:
@@ -825,7 +846,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
case SMU_OD_SCLK:
if (!smu->od_enabled || !od_table || !od_settings)
break;
- if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS))
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS))
break;
size += sprintf(buf + size, "OD_SCLK:\n");
size += sprintf(buf + size, "0: %uMhz\n1: %uMhz\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
@@ -833,15 +854,15 @@ static int navi10_print_clk_levels(struct smu_context *smu,
case SMU_OD_MCLK:
if (!smu->od_enabled || !od_table || !od_settings)
break;
- if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX))
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX))
break;
size += sprintf(buf + size, "OD_MCLK:\n");
- size += sprintf(buf + size, "0: %uMHz\n", od_table->UclkFmax);
+ size += sprintf(buf + size, "1: %uMHz\n", od_table->UclkFmax);
break;
case SMU_OD_VDDC_CURVE:
if (!smu->od_enabled || !od_table || !od_settings)
break;
- if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE))
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE))
break;
size += sprintf(buf + size, "OD_VDDC_CURVE:\n");
for (i = 0; i < 3; i++) {
@@ -861,6 +882,55 @@ static int navi10_print_clk_levels(struct smu_context *smu,
size += sprintf(buf + size, "%d: %uMHz @ %umV\n", i, curve_settings[0], curve_settings[1] / NAVI10_VOLTAGE_SCALE);
}
break;
+ case SMU_OD_RANGE:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ break;
+ size = sprintf(buf, "%s:\n", "OD_RANGE");
+
+ if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
+ navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN,
+ &min_value, NULL);
+ navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMAX,
+ NULL, &max_value);
+ size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
+ min_value, max_value);
+ }
+
+ if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX)) {
+ navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX,
+ &min_value, &max_value);
+ size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
+ min_value, max_value);
+ }
+
+ if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
+ navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1,
+ &min_value, &max_value);
+ size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
+ min_value, max_value);
+ navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1,
+ &min_value, &max_value);
+ size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
+ min_value, max_value);
+ navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2,
+ &min_value, &max_value);
+ size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
+ min_value, max_value);
+ navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2,
+ &min_value, &max_value);
+ size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
+ min_value, max_value);
+ navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3,
+ &min_value, &max_value);
+ size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
+ min_value, max_value);
+ navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3,
+ &min_value, &max_value);
+ size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
+ min_value, max_value);
+ }
+
+ break;
default:
break;
}
@@ -942,6 +1012,8 @@ static int navi10_get_clock_by_type_with_latency(struct smu_context *smu,
case SMU_GFXCLK:
case SMU_DCEFCLK:
case SMU_SOCCLK:
+ case SMU_MCLK:
+ case SMU_UCLK:
ret = smu_get_dpm_level_count(smu, clk_type, &level_count);
if (ret)
return ret;
@@ -1374,7 +1446,7 @@ static int navi10_get_profiling_clk_mask(struct smu_context *smu,
return ret;
}
-static int navi10_notify_smc_dispaly_config(struct smu_context *smu)
+static int navi10_notify_smc_display_config(struct smu_context *smu)
{
struct smu_clocks min_clocks = {0};
struct pp_display_clock_request clock_req;
@@ -1579,12 +1651,44 @@ static int navi10_get_uclk_dpm_states(struct smu_context *smu, uint32_t *clocks_
return 0;
}
-static int navi10_set_peak_clock_by_device(struct smu_context *smu)
+static int navi10_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level);
+
+static int navi10_set_standard_performance_level(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+ uint32_t sclk_freq = 0, uclk_freq = 0;
+
+ switch (adev->asic_type) {
+ case CHIP_NAVI10:
+ sclk_freq = NAVI10_UMD_PSTATE_PROFILING_GFXCLK;
+ uclk_freq = NAVI10_UMD_PSTATE_PROFILING_MEMCLK;
+ break;
+ case CHIP_NAVI14:
+ sclk_freq = NAVI14_UMD_PSTATE_PROFILING_GFXCLK;
+ uclk_freq = NAVI14_UMD_PSTATE_PROFILING_MEMCLK;
+ break;
+ default:
+ /* by default, this is same as auto performance level */
+ return navi10_set_performance_level(smu, AMD_DPM_FORCED_LEVEL_AUTO);
+ }
+
+ ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
+ if (ret)
+ return ret;
+ ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int navi10_set_peak_performance_level(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0;
uint32_t sclk_freq = 0, uclk_freq = 0;
- uint32_t uclk_level = 0;
switch (adev->asic_type) {
case CHIP_NAVI10:
@@ -1625,14 +1729,16 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu)
break;
}
break;
+ case CHIP_NAVI12:
+ sclk_freq = NAVI12_UMD_PSTATE_PEAK_GFXCLK;
+ break;
default:
- return -EINVAL;
+ ret = smu_get_dpm_level_range(smu, SMU_SCLK, NULL, &sclk_freq);
+ if (ret)
+ return ret;
}
- ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_level);
- if (ret)
- return ret;
- ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, uclk_level - 1, &uclk_freq);
+ ret = smu_get_dpm_level_range(smu, SMU_UCLK, NULL, &uclk_freq);
if (ret)
return ret;
@@ -1646,19 +1752,45 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu)
return ret;
}
-static int navi10_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
+static int navi10_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level)
{
int ret = 0;
+ uint32_t sclk_mask, mclk_mask, soc_mask;
switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ ret = smu_force_dpm_limit_value(smu, true);
+ break;
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ ret = smu_force_dpm_limit_value(smu, false);
+ break;
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ ret = smu_unforce_dpm_levels(smu);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ ret = navi10_set_standard_performance_level(smu);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ ret = smu_get_profiling_clk_mask(smu, level,
+ &sclk_mask,
+ &mclk_mask,
+ &soc_mask);
+ if (ret)
+ return ret;
+ smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
+ smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false);
+ smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false);
+ break;
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
- ret = navi10_set_peak_clock_by_device(smu);
+ ret = navi10_set_peak_performance_level(smu);
break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
- ret = -EINVAL;
break;
}
-
return ret;
}
@@ -1804,6 +1936,28 @@ static int navi10_od_setting_check_range(struct smu_11_0_overdrive_table *od_tab
return 0;
}
+static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
+ uint16_t *voltage,
+ uint32_t freq)
+{
+ uint32_t param = (freq & 0xFFFF) | (PPCLK_GFXCLK << 16);
+ uint32_t value = 0;
+ int ret;
+
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_GetVoltageByDpm,
+ param);
+ if (ret) {
+ pr_err("[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!");
+ return ret;
+ }
+
+ smu_read_smc_arg(smu, &value);
+ *voltage = (uint16_t)value;
+
+ return 0;
+}
+
static int navi10_setup_od_limits(struct smu_context *smu) {
struct smu_11_0_overdrive_table *overdrive_table = NULL;
struct smu_11_0_powerplay_table *powerplay_table = NULL;
@@ -1823,23 +1977,54 @@ static int navi10_setup_od_limits(struct smu_context *smu) {
}
static int navi10_set_default_od_settings(struct smu_context *smu, bool initialize) {
- OverDriveTable_t *od_table;
+ OverDriveTable_t *od_table, *boot_od_table;
int ret = 0;
ret = smu_v11_0_set_default_od_settings(smu, initialize, sizeof(OverDriveTable_t));
if (ret)
return ret;
+ od_table = (OverDriveTable_t *)smu->smu_table.overdrive_table;
+ boot_od_table = (OverDriveTable_t *)smu->smu_table.boot_overdrive_table;
if (initialize) {
ret = navi10_setup_od_limits(smu);
if (ret) {
pr_err("Failed to retrieve board OD limits\n");
return ret;
}
+ if (od_table) {
+ if (!od_table->GfxclkVolt1) {
+ ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
+ &od_table->GfxclkVolt1,
+ od_table->GfxclkFreq1);
+ if (ret)
+ od_table->GfxclkVolt1 = 0;
+ if (boot_od_table)
+ boot_od_table->GfxclkVolt1 = od_table->GfxclkVolt1;
+ }
+
+ if (!od_table->GfxclkVolt2) {
+ ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
+ &od_table->GfxclkVolt2,
+ od_table->GfxclkFreq2);
+ if (ret)
+ od_table->GfxclkVolt2 = 0;
+ if (boot_od_table)
+ boot_od_table->GfxclkVolt2 = od_table->GfxclkVolt2;
+ }
+ if (!od_table->GfxclkVolt3) {
+ ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
+ &od_table->GfxclkVolt3,
+ od_table->GfxclkFreq3);
+ if (ret)
+ od_table->GfxclkVolt3 = 0;
+ if (boot_od_table)
+ boot_od_table->GfxclkVolt3 = od_table->GfxclkVolt3;
+ }
+ }
}
- od_table = (OverDriveTable_t *)smu->smu_table.overdrive_table;
if (od_table) {
navi10_dump_od_table(od_table);
}
@@ -1871,7 +2056,7 @@ static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABL
switch (type) {
case PP_OD_EDIT_SCLK_VDDC_TABLE:
- if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS)) {
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
pr_warn("GFXCLK_LIMITS not supported!\n");
return -ENOTSUPP;
}
@@ -1917,7 +2102,7 @@ static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABL
}
break;
case PP_OD_EDIT_MCLK_VDDC_TABLE:
- if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX)) {
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX)) {
pr_warn("UCLK_MAX not supported!\n");
return -ENOTSUPP;
}
@@ -1935,6 +2120,13 @@ static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABL
return ret;
od_table->UclkFmax = input[1];
break;
+ case PP_OD_RESTORE_DEFAULT_TABLE:
+ if (!(table_context->overdrive_table && table_context->boot_overdrive_table)) {
+ pr_err("Overdrive table was not initialized!\n");
+ return -EINVAL;
+ }
+ memcpy(table_context->overdrive_table, table_context->boot_overdrive_table, sizeof(OverDriveTable_t));
+ break;
case PP_OD_COMMIT_DPM_TABLE:
navi10_dump_od_table(od_table);
ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, true);
@@ -1951,7 +2143,7 @@ static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABL
}
break;
case PP_OD_EDIT_VDDC_CURVE:
- if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE)) {
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
pr_warn("GFXCLK_CURVE not supported!\n");
return -ENOTSUPP;
}
@@ -2024,6 +2216,61 @@ static int navi10_run_btc(struct smu_context *smu)
return ret;
}
+static int navi10_dummy_pstate_control(struct smu_context *smu, bool enable)
+{
+ int result = 0;
+
+ if (!enable)
+ result = smu_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE);
+ else
+ result = smu_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE);
+
+ return result;
+}
+
+static int navi10_disable_umc_cdr_12gbps_workaround(struct smu_context *smu)
+{
+ uint32_t uclk_count, uclk_min, uclk_max;
+ uint32_t smu_version;
+ int ret = 0;
+
+ ret = smu_get_smc_version(smu, NULL, &smu_version);
+ if (ret)
+ return ret;
+
+ /* This workaround is available only for 42.50 or later SMC firmwares */
+ if (smu_version < 0x2A3200)
+ return 0;
+
+ ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_count);
+ if (ret)
+ return ret;
+
+ ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)0, &uclk_min);
+ if (ret)
+ return ret;
+
+ ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)(uclk_count - 1), &uclk_max);
+ if (ret)
+ return ret;
+
+ /* Force UCLK out of the highest DPM */
+ ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, uclk_min);
+ if (ret)
+ return ret;
+
+ /* Revert the UCLK Hardmax */
+ ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, uclk_max);
+ if (ret)
+ return ret;
+
+ /*
+ * In this case, SMU already disabled dummy pstate during enablement
+ * of UCLK DPM, we have to re-enabled it.
+ * */
+ return navi10_dummy_pstate_control(smu, true);
+}
+
static const struct pptable_funcs navi10_ppt_funcs = {
.tables_init = navi10_tables_init,
.alloc_dpm_context = navi10_allocate_dpm_context,
@@ -2047,7 +2294,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.get_clock_by_type_with_latency = navi10_get_clock_by_type_with_latency,
.pre_display_config_changed = navi10_pre_display_config_changed,
.display_config_changed = navi10_display_config_changed,
- .notify_smc_dispaly_config = navi10_notify_smc_dispaly_config,
+ .notify_smc_display_config = navi10_notify_smc_display_config,
.force_dpm_limit_value = navi10_force_dpm_limit_value,
.unforce_dpm_levels = navi10_unforce_dpm_levels,
.is_dpm_running = navi10_is_dpm_running,
@@ -2080,6 +2327,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.check_fw_version = smu_v11_0_check_fw_version,
.write_pptable = smu_v11_0_write_pptable,
.set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
+ .set_driver_table_location = smu_v11_0_set_driver_table_location,
.set_tool_table_location = smu_v11_0_set_tool_table_location,
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.system_features_control = smu_v11_0_system_features_control,
@@ -2117,6 +2365,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.od_edit_dpm_table = navi10_od_edit_dpm_table,
.get_pptable_power_limit = navi10_get_pptable_power_limit,
.run_btc = navi10_run_btc,
+ .disable_umc_cdr_12gbps_workaround = navi10_disable_umc_cdr_12gbps_workaround,
};
void navi10_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
index ec03c7992f6d..2abb4ba01db1 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
@@ -27,12 +27,26 @@
#define NAVI10_PEAK_SCLK_XT (1755)
#define NAVI10_PEAK_SCLK_XL (1625)
+#define NAVI10_UMD_PSTATE_PROFILING_GFXCLK (1300)
+#define NAVI10_UMD_PSTATE_PROFILING_SOCCLK (980)
+#define NAVI10_UMD_PSTATE_PROFILING_MEMCLK (625)
+#define NAVI10_UMD_PSTATE_PROFILING_VCLK (980)
+#define NAVI10_UMD_PSTATE_PROFILING_DCLK (850)
+
#define NAVI14_UMD_PSTATE_PEAK_XT_GFXCLK (1670)
#define NAVI14_UMD_PSTATE_PEAK_XTM_GFXCLK (1448)
#define NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK (1181)
#define NAVI14_UMD_PSTATE_PEAK_XTX_GFXCLK (1717)
#define NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK (1448)
+#define NAVI14_UMD_PSTATE_PROFILING_GFXCLK (1200)
+#define NAVI14_UMD_PSTATE_PROFILING_SOCCLK (900)
+#define NAVI14_UMD_PSTATE_PROFILING_MEMCLK (600)
+#define NAVI14_UMD_PSTATE_PROFILING_VCLK (900)
+#define NAVI14_UMD_PSTATE_PROFILING_DCLK (800)
+
+#define NAVI12_UMD_PSTATE_PEAK_GFXCLK (1100)
+
#define NAVI10_VOLTAGE_SCALE (4)
#define smnPCIE_LC_SPEED_CNTL 0x11140290
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index 89a54f8e08d3..861e6410363b 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -171,17 +171,20 @@ static int renoir_get_metrics_table(struct smu_context *smu,
struct smu_table_context *smu_table= &smu->smu_table;
int ret = 0;
+ mutex_lock(&smu->metrics_lock);
if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) {
ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
(void *)smu_table->metrics_table, false);
if (ret) {
pr_info("Failed to export SMU metrics table!\n");
+ mutex_unlock(&smu->metrics_lock);
return ret;
}
smu_table->metrics_time = jiffies;
}
memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
+ mutex_unlock(&smu->metrics_lock);
return ret;
}
@@ -206,6 +209,10 @@ static int renoir_tables_init(struct smu_context *smu, struct smu_table *tables)
return -ENOMEM;
smu_table->metrics_time = 0;
+ smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
+ if (!smu_table->watermarks_table)
+ return -ENOMEM;
+
return 0;
}
@@ -239,8 +246,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
memset(&metrics, 0, sizeof(metrics));
- ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
- (void *)&metrics, false);
+ ret = renoir_get_metrics_table(smu, &metrics);
if (ret)
return ret;
@@ -706,19 +712,43 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu)
return ret;
}
-static int renoir_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
+static int renoir_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level)
{
int ret = 0;
+ uint32_t sclk_mask, mclk_mask, soc_mask;
switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ ret = smu_force_dpm_limit_value(smu, true);
+ break;
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ ret = smu_force_dpm_limit_value(smu, false);
+ break;
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ ret = smu_unforce_dpm_levels(smu);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ ret = smu_get_profiling_clk_mask(smu, level,
+ &sclk_mask,
+ &mclk_mask,
+ &soc_mask);
+ if (ret)
+ return ret;
+ smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
+ smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false);
+ smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false);
+ break;
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
ret = renoir_set_peak_clock_by_device(smu);
break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
- ret = -EINVAL;
break;
}
-
return ret;
}
@@ -777,9 +807,17 @@ static int renoir_set_watermarks_table(
}
/* pass data to smu controller */
- ret = smu_write_watermarks_table(smu);
+ if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
+ !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
+ ret = smu_write_watermarks_table(smu);
+ if (ret) {
+ pr_err("Failed to update WMTABLE!");
+ return ret;
+ }
+ smu->watermarks_bitmap |= WATERMARKS_LOADED;
+ }
- return ret;
+ return 0;
}
static int renoir_get_power_profile_mode(struct smu_context *smu,
@@ -882,6 +920,7 @@ static const struct pptable_funcs renoir_ppt_funcs = {
.get_dpm_ultimate_freq = smu_v12_0_get_dpm_ultimate_freq,
.mode2_reset = smu_v12_0_mode2_reset,
.set_soft_freq_limited_range = smu_v12_0_set_soft_freq_limited_range,
+ .set_driver_table_location = smu_v12_0_set_driver_table_location,
};
void renoir_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h
index 60ce1fccaeb5..7bd200ffcda8 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_internal.h
+++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h
@@ -61,6 +61,8 @@
((smu)->ppt_funcs->write_pptable ? (smu)->ppt_funcs->write_pptable((smu)) : 0)
#define smu_set_min_dcef_deep_sleep(smu) \
((smu)->ppt_funcs->set_min_dcef_deep_sleep ? (smu)->ppt_funcs->set_min_dcef_deep_sleep((smu)) : 0)
+#define smu_set_driver_table_location(smu) \
+ ((smu)->ppt_funcs->set_driver_table_location ? (smu)->ppt_funcs->set_driver_table_location((smu)) : 0)
#define smu_set_tool_table_location(smu) \
((smu)->ppt_funcs->set_tool_table_location ? (smu)->ppt_funcs->set_tool_table_location((smu)) : 0)
#define smu_notify_memory_pool_location(smu) \
@@ -129,8 +131,8 @@ int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg);
((smu)->ppt_funcs->display_config_changed ? (smu)->ppt_funcs->display_config_changed((smu)) : 0)
#define smu_apply_clocks_adjust_rules(smu) \
((smu)->ppt_funcs->apply_clocks_adjust_rules ? (smu)->ppt_funcs->apply_clocks_adjust_rules((smu)) : 0)
-#define smu_notify_smc_dispaly_config(smu) \
- ((smu)->ppt_funcs->notify_smc_dispaly_config ? (smu)->ppt_funcs->notify_smc_dispaly_config((smu)) : 0)
+#define smu_notify_smc_display_config(smu) \
+ ((smu)->ppt_funcs->notify_smc_display_config ? (smu)->ppt_funcs->notify_smc_display_config((smu)) : 0)
#define smu_force_dpm_limit_value(smu, highest) \
((smu)->ppt_funcs->force_dpm_limit_value ? (smu)->ppt_funcs->force_dpm_limit_value((smu), (highest)) : 0)
#define smu_unforce_dpm_levels(smu) \
@@ -205,4 +207,7 @@ int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg);
#define smu_update_pcie_parameters(smu, pcie_gen_cap, pcie_width_cap) \
((smu)->ppt_funcs->update_pcie_parameters ? (smu)->ppt_funcs->update_pcie_parameters((smu), (pcie_gen_cap), (pcie_width_cap)) : 0)
+#define smu_disable_umc_cdr_12gbps_workaround(smu) \
+ ((smu)->ppt_funcs->disable_umc_cdr_12gbps_workaround ? (smu)->ppt_funcs->disable_umc_cdr_12gbps_workaround((smu)) : 0)
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index 7781d245f8ef..0dc49479a7eb 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -450,8 +450,10 @@ int smu_v11_0_fini_smc_tables(struct smu_context *smu)
kfree(smu_table->tables);
kfree(smu_table->metrics_table);
+ kfree(smu_table->watermarks_table);
smu_table->tables = NULL;
smu_table->metrics_table = NULL;
+ smu_table->watermarks_table = NULL;
smu_table->metrics_time = 0;
ret = smu_v11_0_fini_dpm_context(smu);
@@ -774,6 +776,24 @@ int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
return smu_v11_0_set_deep_sleep_dcefclk(smu, table_context->boot_values.dcefclk / 100);
}
+int smu_v11_0_set_driver_table_location(struct smu_context *smu)
+{
+ struct smu_table *driver_table = &smu->smu_table.driver_table;
+ int ret = 0;
+
+ if (driver_table->mc_address) {
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetDriverDramAddrHigh,
+ upper_32_bits(driver_table->mc_address));
+ if (!ret)
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetDriverDramAddrLow,
+ lower_32_bits(driver_table->mc_address));
+ }
+
+ return ret;
+}
+
int smu_v11_0_set_tool_table_location(struct smu_context *smu)
{
int ret = 0;
@@ -835,27 +855,33 @@ int smu_v11_0_get_enabled_mask(struct smu_context *smu,
uint32_t *feature_mask, uint32_t num)
{
uint32_t feature_mask_high = 0, feature_mask_low = 0;
+ struct smu_feature *feature = &smu->smu_feature;
int ret = 0;
if (!feature_mask || num < 2)
return -EINVAL;
- ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
- if (ret)
- return ret;
- ret = smu_read_smc_arg(smu, &feature_mask_high);
- if (ret)
- return ret;
+ if (bitmap_empty(feature->enabled, feature->feature_num)) {
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
+ if (ret)
+ return ret;
+ ret = smu_read_smc_arg(smu, &feature_mask_high);
+ if (ret)
+ return ret;
- ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
- if (ret)
- return ret;
- ret = smu_read_smc_arg(smu, &feature_mask_low);
- if (ret)
- return ret;
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
+ if (ret)
+ return ret;
+ ret = smu_read_smc_arg(smu, &feature_mask_low);
+ if (ret)
+ return ret;
- feature_mask[0] = feature_mask_low;
- feature_mask[1] = feature_mask_high;
+ feature_mask[0] = feature_mask_low;
+ feature_mask[1] = feature_mask_high;
+ } else {
+ bitmap_copy((unsigned long *)feature_mask, feature->enabled,
+ feature->feature_num);
+ }
return ret;
}
@@ -867,21 +893,24 @@ int smu_v11_0_system_features_control(struct smu_context *smu,
uint32_t feature_mask[2];
int ret = 0;
- if (smu->pm_enabled) {
- ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
- SMU_MSG_DisableAllSmuFeatures));
- if (ret)
- return ret;
- }
-
- ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+ ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
+ SMU_MSG_DisableAllSmuFeatures));
if (ret)
return ret;
- bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
- feature->feature_num);
- bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
- feature->feature_num);
+ if (en) {
+ ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+ if (ret)
+ return ret;
+
+ bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
+ feature->feature_num);
+ bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
+ feature->feature_num);
+ } else {
+ bitmap_zero(feature->enabled, feature->feature_num);
+ bitmap_zero(feature->supported, feature->feature_num);
+ }
return ret;
}
@@ -1125,11 +1154,12 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu,
int low = SMU_THERMAL_MINIMUM_ALERT_TEMP;
int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP;
uint32_t val;
+ struct smu_table_context *table_context = &smu->smu_table;
+ struct smu_11_0_powerplay_table *powerplay_table = table_context->power_play_table;
low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP,
range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
- high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP,
- range.max / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
+ high = min((uint16_t)SMU_THERMAL_MAXIMUM_ALERT_TEMP, powerplay_table->software_shutdown_temp);
if (low > high)
return -EINVAL;
@@ -1852,6 +1882,12 @@ int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize,
pr_err("Failed to export overdrive table!\n");
return ret;
}
+ if (!table_context->boot_overdrive_table) {
+ table_context->boot_overdrive_table = kmemdup(table_context->overdrive_table, overdrive_table_size, GFP_KERNEL);
+ if (!table_context->boot_overdrive_table) {
+ return -ENOMEM;
+ }
+ }
}
ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, true);
if (ret) {
@@ -1860,3 +1896,42 @@ int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize,
}
return ret;
}
+
+int smu_v11_0_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level)
+{
+ int ret = 0;
+ uint32_t sclk_mask, mclk_mask, soc_mask;
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ ret = smu_force_dpm_limit_value(smu, true);
+ break;
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ ret = smu_force_dpm_limit_value(smu, false);
+ break;
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ ret = smu_unforce_dpm_levels(smu);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ ret = smu_get_profiling_clk_mask(smu, level,
+ &sclk_mask,
+ &mclk_mask,
+ &soc_mask);
+ if (ret)
+ return ret;
+ smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
+ smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false);
+ smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false);
+ break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+ default:
+ break;
+ }
+ return ret;
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
index 2ac7f2f231b6..870e6db2907e 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
@@ -159,7 +159,7 @@ int smu_v12_0_check_fw_version(struct smu_context *smu)
int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
{
- if (!(smu->adev->flags & AMD_IS_APU))
+ if (!smu->is_apu)
return 0;
if (gate)
@@ -170,7 +170,7 @@ int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
{
- if (!(smu->adev->flags & AMD_IS_APU))
+ if (!smu->is_apu)
return 0;
if (gate)
@@ -181,7 +181,7 @@ int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate)
{
- if (!(smu->adev->flags & AMD_IS_APU))
+ if (!smu->is_apu)
return 0;
if (gate)
@@ -318,14 +318,6 @@ int smu_v12_0_fini_smc_tables(struct smu_context *smu)
int smu_v12_0_populate_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct smu_table *table = NULL;
-
- table = &smu_table->tables[SMU_TABLE_DPMCLOCKS];
- if (!table)
- return -EINVAL;
-
- if (!table->cpu_addr)
- return -EINVAL;
return smu_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
}
@@ -514,3 +506,21 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
return ret;
}
+
+int smu_v12_0_set_driver_table_location(struct smu_context *smu)
+{
+ struct smu_table *driver_table = &smu->smu_table.driver_table;
+ int ret = 0;
+
+ if (driver_table->mc_address) {
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetDriverDramAddrHigh,
+ upper_32_bits(driver_table->mc_address));
+ if (!ret)
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetDriverDramAddrLow,
+ lower_32_bits(driver_table->mc_address));
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
index aa0ee2b46135..2319400a3fcb 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -137,7 +137,7 @@ static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
priv->smu_tables.entry[table_id].table_id);
/* flush hdp cache */
- adev->nbio.funcs->hdp_flush(adev, NULL);
+ amdgpu_asic_flush_hdp(adev, NULL);
memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
@@ -150,6 +150,7 @@ static int smu10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
{
struct smu10_smumgr *priv =
(struct smu10_smumgr *)(hwmgr->smu_backend);
+ struct amdgpu_device *adev = hwmgr->adev;
PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
"Invalid SMU Table ID!", return -EINVAL;);
@@ -161,6 +162,8 @@ static int smu10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
+ amdgpu_asic_flush_hdp(adev, NULL);
+
smu10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index 39427ca32a15..715564009089 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -58,7 +58,7 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
priv->smu_tables.entry[table_id].table_id);
/* flush hdp cache */
- adev->nbio.funcs->hdp_flush(adev, NULL);
+ amdgpu_asic_flush_hdp(adev, NULL);
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
@@ -70,6 +70,7 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id)
{
struct vega10_smumgr *priv = hwmgr->smu_backend;
+ struct amdgpu_device *adev = hwmgr->adev;
/* under sriov, vbios or hypervisor driver
* has already copy table to smc so here only skip it
@@ -87,6 +88,8 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
+ amdgpu_asic_flush_hdp(adev, NULL);
+
smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
index 90c782c132d2..275dbf65f1a0 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
@@ -66,7 +66,7 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
return -EINVAL);
/* flush hdp cache */
- adev->nbio.funcs->hdp_flush(adev, NULL);
+ amdgpu_asic_flush_hdp(adev, NULL);
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
@@ -84,6 +84,7 @@ static int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
{
struct vega12_smumgr *priv =
(struct vega12_smumgr *)(hwmgr->smu_backend);
+ struct amdgpu_device *adev = hwmgr->adev;
PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT,
"Invalid SMU Table ID!", return -EINVAL);
@@ -95,6 +96,8 @@ static int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
+ amdgpu_asic_flush_hdp(adev, NULL);
+
PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
@@ -125,20 +128,20 @@ int vega12_enable_smc_features(struct pp_hwmgr *hwmgr,
if (enable) {
PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low) == 0,
- "[EnableDisableSMCFeatures] Attemp to enable SMU features Low failed!",
+ "[EnableDisableSMCFeatures] Attempt to enable SMU features Low failed!",
return -EINVAL);
PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high) == 0,
- "[EnableDisableSMCFeatures] Attemp to enable SMU features High failed!",
+ "[EnableDisableSMCFeatures] Attempt to enable SMU features High failed!",
return -EINVAL);
} else {
PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low) == 0,
- "[EnableDisableSMCFeatures] Attemp to disable SMU features Low failed!",
+ "[EnableDisableSMCFeatures] Attempt to disable SMU features Low failed!",
return -EINVAL);
PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high) == 0,
- "[EnableDisableSMCFeatures] Attemp to disable SMU features High failed!",
+ "[EnableDisableSMCFeatures] Attempt to disable SMU features High failed!",
return -EINVAL);
}
@@ -155,13 +158,13 @@ int vega12_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetEnabledSmuFeaturesLow) == 0,
- "[GetEnabledSMCFeatures] Attemp to get SMU features Low failed!",
+ "[GetEnabledSMCFeatures] Attempt to get SMU features Low failed!",
return -EINVAL);
smc_features_low = smu9_get_argument(hwmgr);
PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetEnabledSmuFeaturesHigh) == 0,
- "[GetEnabledSMCFeatures] Attemp to get SMU features High failed!",
+ "[GetEnabledSMCFeatures] Attempt to get SMU features High failed!",
return -EINVAL);
smc_features_high = smu9_get_argument(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
index f604612f411f..49e5ef3e3876 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
@@ -189,7 +189,7 @@ static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
return ret);
/* flush hdp cache */
- adev->nbio.funcs->hdp_flush(adev, NULL);
+ amdgpu_asic_flush_hdp(adev, NULL);
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
@@ -207,6 +207,7 @@ static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
{
struct vega20_smumgr *priv =
(struct vega20_smumgr *)(hwmgr->smu_backend);
+ struct amdgpu_device *adev = hwmgr->adev;
int ret = 0;
PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT,
@@ -219,6 +220,8 @@ static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
+ amdgpu_asic_flush_hdp(adev, NULL);
+
PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
upper_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
@@ -242,11 +245,14 @@ int vega20_set_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
{
struct vega20_smumgr *priv =
(struct vega20_smumgr *)(hwmgr->smu_backend);
+ struct amdgpu_device *adev = hwmgr->adev;
int ret = 0;
memcpy(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, table,
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
+ amdgpu_asic_flush_hdp(adev, NULL);
+
PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
@@ -290,7 +296,7 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
return ret);
/* flush hdp cache */
- adev->nbio.funcs->hdp_flush(adev, NULL);
+ amdgpu_asic_flush_hdp(adev, NULL);
memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table,
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
@@ -310,20 +316,20 @@ int vega20_enable_smc_features(struct pp_hwmgr *hwmgr,
if (enable) {
PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low)) == 0,
- "[EnableDisableSMCFeatures] Attemp to enable SMU features Low failed!",
+ "[EnableDisableSMCFeatures] Attempt to enable SMU features Low failed!",
return ret);
PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high)) == 0,
- "[EnableDisableSMCFeatures] Attemp to enable SMU features High failed!",
+ "[EnableDisableSMCFeatures] Attempt to enable SMU features High failed!",
return ret);
} else {
PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low)) == 0,
- "[EnableDisableSMCFeatures] Attemp to disable SMU features Low failed!",
+ "[EnableDisableSMCFeatures] Attempt to disable SMU features Low failed!",
return ret);
PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high)) == 0,
- "[EnableDisableSMCFeatures] Attemp to disable SMU features High failed!",
+ "[EnableDisableSMCFeatures] Attempt to disable SMU features High failed!",
return ret);
}
@@ -341,12 +347,12 @@ int vega20_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetEnabledSmuFeaturesLow)) == 0,
- "[GetEnabledSMCFeatures] Attemp to get SMU features Low failed!",
+ "[GetEnabledSMCFeatures] Attempt to get SMU features Low failed!",
return ret);
smc_features_low = vega20_get_argument(hwmgr);
PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetEnabledSmuFeaturesHigh)) == 0,
- "[GetEnabledSMCFeatures] Attemp to get SMU features High failed!",
+ "[GetEnabledSMCFeatures] Attempt to get SMU features High failed!",
return ret);
smc_features_high = vega20_get_argument(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index 12bcc3e3ba99..4ad8d6c14ee5 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -338,6 +338,10 @@ static int vega20_tables_init(struct smu_context *smu, struct smu_table *tables)
return -ENOMEM;
smu_table->metrics_time = 0;
+ smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
+ if (!smu_table->watermarks_table)
+ return -ENOMEM;
+
return 0;
}
@@ -1678,17 +1682,20 @@ static int vega20_get_metrics_table(struct smu_context *smu,
struct smu_table_context *smu_table= &smu->smu_table;
int ret = 0;
+ mutex_lock(&smu->metrics_lock);
if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) {
ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
(void *)smu_table->metrics_table, false);
if (ret) {
pr_info("Failed to export SMU metrics table!\n");
+ mutex_unlock(&smu->metrics_lock);
return ret;
}
smu_table->metrics_time = jiffies;
}
memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
+ mutex_unlock(&smu->metrics_lock);
return ret;
}
@@ -1699,22 +1706,11 @@ static int vega20_set_default_od_settings(struct smu_context *smu,
struct smu_table_context *table_context = &smu->smu_table;
int ret;
- if (initialize) {
- if (table_context->overdrive_table)
- return -EINVAL;
-
- table_context->overdrive_table = kzalloc(sizeof(OverDriveTable_t), GFP_KERNEL);
-
- if (!table_context->overdrive_table)
- return -ENOMEM;
-
- ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0,
- table_context->overdrive_table, false);
- if (ret) {
- pr_err("Failed to export over drive table!\n");
- return ret;
- }
+ ret = smu_v11_0_set_default_od_settings(smu, initialize, sizeof(OverDriveTable_t));
+ if (ret)
+ return ret;
+ if (initialize) {
ret = vega20_set_default_od8_setttings(smu);
if (ret)
return ret;
@@ -2232,7 +2228,7 @@ static int vega20_apply_clocks_adjust_rules(struct smu_context *smu)
}
static int
-vega20_notify_smc_dispaly_config(struct smu_context *smu)
+vega20_notify_smc_display_config(struct smu_context *smu)
{
struct vega20_dpm_table *dpm_table = smu->smu_dpm.dpm_context;
struct vega20_single_dpm_table *memtable = &dpm_table->mem_table;
@@ -2771,12 +2767,11 @@ static int vega20_odn_edit_dpm_table(struct smu_context *smu,
break;
case PP_OD_RESTORE_DEFAULT_TABLE:
- ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, false);
- if (ret) {
- pr_err("Failed to export over drive table!\n");
- return ret;
+ if (!(table_context->overdrive_table && table_context->boot_overdrive_table)) {
+ pr_err("Overdrive table was not initialized!\n");
+ return -EINVAL;
}
-
+ memcpy(table_context->overdrive_table, table_context->boot_overdrive_table, sizeof(OverDriveTable_t));
break;
case PP_OD_COMMIT_DPM_TABLE:
@@ -3191,6 +3186,7 @@ static const struct pptable_funcs vega20_ppt_funcs = {
.get_od_percentage = vega20_get_od_percentage,
.get_power_profile_mode = vega20_get_power_profile_mode,
.set_power_profile_mode = vega20_set_power_profile_mode,
+ .set_performance_level = smu_v11_0_set_performance_level,
.set_od_percentage = vega20_set_od_percentage,
.set_default_od_settings = vega20_set_default_od_settings,
.od_edit_dpm_table = vega20_odn_edit_dpm_table,
@@ -3200,7 +3196,7 @@ static const struct pptable_funcs vega20_ppt_funcs = {
.pre_display_config_changed = vega20_pre_display_config_changed,
.display_config_changed = vega20_display_config_changed,
.apply_clocks_adjust_rules = vega20_apply_clocks_adjust_rules,
- .notify_smc_dispaly_config = vega20_notify_smc_dispaly_config,
+ .notify_smc_display_config = vega20_notify_smc_display_config,
.force_dpm_limit_value = vega20_force_dpm_limit_value,
.unforce_dpm_levels = vega20_unforce_dpm_levels,
.get_profiling_clk_mask = vega20_get_profiling_clk_mask,
@@ -3228,6 +3224,7 @@ static const struct pptable_funcs vega20_ppt_funcs = {
.check_fw_version = smu_v11_0_check_fw_version,
.write_pptable = smu_v11_0_write_pptable,
.set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
+ .set_driver_table_location = smu_v11_0_set_driver_table_location,
.set_tool_table_location = smu_v11_0_set_tool_table_location,
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.system_features_control = smu_v11_0_system_features_control,
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
index 875a3a9eabfa..7d0e7b031e44 100644
--- a/drivers/gpu/drm/arm/malidp_mw.c
+++ b/drivers/gpu/drm/arm/malidp_mw.c
@@ -56,7 +56,7 @@ malidp_mw_connector_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = {
.get_modes = malidp_mw_connector_get_modes,
.mode_valid = malidp_mw_connector_mode_valid,
};
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index 4f03be9b2a83..841794e19eb6 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -115,6 +115,33 @@ drm_client_find_modeset(struct drm_client_dev *client, struct drm_crtc *crtc)
}
static struct drm_display_mode *
+drm_connector_get_tiled_mode(struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ list_for_each_entry(mode, &connector->modes, head) {
+ if (mode->hdisplay == connector->tile_h_size &&
+ mode->vdisplay == connector->tile_v_size)
+ return mode;
+ }
+ return NULL;
+}
+
+static struct drm_display_mode *
+drm_connector_fallback_non_tiled_mode(struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ list_for_each_entry(mode, &connector->modes, head) {
+ if (mode->hdisplay == connector->tile_h_size &&
+ mode->vdisplay == connector->tile_v_size)
+ continue;
+ return mode;
+ }
+ return NULL;
+}
+
+static struct drm_display_mode *
drm_connector_has_preferred_mode(struct drm_connector *connector, int width, int height)
{
struct drm_display_mode *mode;
@@ -348,8 +375,15 @@ static bool drm_client_target_preferred(struct drm_connector **connectors,
struct drm_connector *connector;
u64 conn_configured = 0;
int tile_pass = 0;
+ int num_tiled_conns = 0;
int i;
+ for (i = 0; i < connector_count; i++) {
+ if (connectors[i]->has_tile &&
+ connectors[i]->status == connector_status_connected)
+ num_tiled_conns++;
+ }
+
retry:
for (i = 0; i < connector_count; i++) {
connector = connectors[i];
@@ -399,6 +433,28 @@ retry:
list_for_each_entry(modes[i], &connector->modes, head)
break;
}
+ /*
+ * In case of tiled mode if all tiles not present fallback to
+ * first available non tiled mode.
+ * After all tiles are present, try to find the tiled mode
+ * for all and if tiled mode not present due to fbcon size
+ * limitations, use first non tiled mode only for
+ * tile 0,0 and set to no mode for all other tiles.
+ */
+ if (connector->has_tile) {
+ if (num_tiled_conns <
+ connector->num_h_tile * connector->num_v_tile ||
+ (connector->tile_h_loc == 0 &&
+ connector->tile_v_loc == 0 &&
+ !drm_connector_get_tiled_mode(connector))) {
+ DRM_DEBUG_KMS("Falling back to non tiled mode on Connector %d\n",
+ connector->base.id);
+ modes[i] = drm_connector_fallback_non_tiled_mode(connector);
+ } else {
+ modes[i] = drm_connector_get_tiled_mode(connector);
+ }
+ }
+
DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
"none");
conn_configured |= BIT_ULL(i);
@@ -515,6 +571,7 @@ static bool drm_client_firmware_config(struct drm_client_dev *client,
bool fallback = true, ret = true;
int num_connectors_enabled = 0;
int num_connectors_detected = 0;
+ int num_tiled_conns = 0;
struct drm_modeset_acquire_ctx ctx;
if (!drm_drv_uses_atomic_modeset(dev))
@@ -532,6 +589,11 @@ static bool drm_client_firmware_config(struct drm_client_dev *client,
memcpy(save_enabled, enabled, count);
mask = GENMASK(count - 1, 0);
conn_configured = 0;
+ for (i = 0; i < count; i++) {
+ if (connectors[i]->has_tile &&
+ connectors[i]->status == connector_status_connected)
+ num_tiled_conns++;
+ }
retry:
conn_seq = conn_configured;
for (i = 0; i < count; i++) {
@@ -631,6 +693,16 @@ retry:
connector->name);
modes[i] = &connector->state->crtc->mode;
}
+ /*
+ * In case of tiled modes, if all tiles are not present
+ * then fallback to a non tiled mode.
+ */
+ if (connector->has_tile &&
+ num_tiled_conns < connector->num_h_tile * connector->num_v_tile) {
+ DRM_DEBUG_KMS("Falling back to non tiled mode on Connector %d\n",
+ connector->base.id);
+ modes[i] = drm_connector_fallback_non_tiled_mode(connector);
+ }
crtcs[i] = new_crtc;
DRM_DEBUG_KMS("connector %s on [CRTC:%d:%s]: %dx%d%s\n",
diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c
index 0cfb386754c3..2510717d5a08 100644
--- a/drivers/gpu/drm/drm_dp_aux_dev.c
+++ b/drivers/gpu/drm/drm_dp_aux_dev.c
@@ -163,11 +163,7 @@ static ssize_t auxdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
break;
}
- if (aux_dev->aux->is_remote)
- res = drm_dp_mst_dpcd_read(aux_dev->aux, pos, buf,
- todo);
- else
- res = drm_dp_dpcd_read(aux_dev->aux, pos, buf, todo);
+ res = drm_dp_dpcd_read(aux_dev->aux, pos, buf, todo);
if (res <= 0)
break;
@@ -215,11 +211,7 @@ static ssize_t auxdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
break;
}
- if (aux_dev->aux->is_remote)
- res = drm_dp_mst_dpcd_write(aux_dev->aux, pos, buf,
- todo);
- else
- res = drm_dp_dpcd_write(aux_dev->aux, pos, buf, todo);
+ res = drm_dp_dpcd_write(aux_dev->aux, pos, buf, todo);
if (res <= 0)
break;
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 43e9f1968af4..a4b98f8055f4 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -32,6 +32,7 @@
#include <drm/drm_dp_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
+#include <drm/drm_dp_mst_helper.h>
#include "drm_crtc_helper_internal.h"
@@ -266,7 +267,7 @@ unlock:
/**
* drm_dp_dpcd_read() - read a series of bytes from the DPCD
- * @aux: DisplayPort AUX channel
+ * @aux: DisplayPort AUX channel (SST or MST)
* @offset: address of the (first) register to read
* @buffer: buffer to store the register values
* @size: number of bytes in @buffer
@@ -295,13 +296,18 @@ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
* We just have to do it before any DPCD access and hope that the
* monitor doesn't power down exactly after the throw away read.
*/
- ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, DP_DPCD_REV, buffer,
- 1);
- if (ret != 1)
- goto out;
+ if (!aux->is_remote) {
+ ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, DP_DPCD_REV,
+ buffer, 1);
+ if (ret != 1)
+ goto out;
+ }
- ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, buffer,
- size);
+ if (aux->is_remote)
+ ret = drm_dp_mst_dpcd_read(aux, offset, buffer, size);
+ else
+ ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset,
+ buffer, size);
out:
drm_dp_dump_access(aux, DP_AUX_NATIVE_READ, offset, buffer, ret);
@@ -311,7 +317,7 @@ EXPORT_SYMBOL(drm_dp_dpcd_read);
/**
* drm_dp_dpcd_write() - write a series of bytes to the DPCD
- * @aux: DisplayPort AUX channel
+ * @aux: DisplayPort AUX channel (SST or MST)
* @offset: address of the (first) register to write
* @buffer: buffer containing the values to write
* @size: number of bytes in @buffer
@@ -328,8 +334,12 @@ ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
{
int ret;
- ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer,
- size);
+ if (aux->is_remote)
+ ret = drm_dp_mst_dpcd_write(aux, offset, buffer, size);
+ else
+ ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_WRITE, offset,
+ buffer, size);
+
drm_dp_dump_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer, ret);
return ret;
}
@@ -1027,6 +1037,19 @@ static void drm_dp_aux_crc_work(struct work_struct *work)
}
/**
+ * drm_dp_remote_aux_init() - minimally initialise a remote aux channel
+ * @aux: DisplayPort AUX channel
+ *
+ * Used for remote aux channel in general. Merely initialize the crc work
+ * struct.
+ */
+void drm_dp_remote_aux_init(struct drm_dp_aux *aux)
+{
+ INIT_WORK(&aux->crc_work, drm_dp_aux_crc_work);
+}
+EXPORT_SYMBOL(drm_dp_remote_aux_init);
+
+/**
* drm_dp_aux_init() - minimally initialise an aux channel
* @aux: DisplayPort AUX channel
*
@@ -1213,6 +1236,8 @@ static const struct dpcd_quirk dpcd_quirk_list[] = {
{ OUI(0x00, 0x10, 0xfa), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_NO_PSR) },
/* CH7511 seems to leave SINK_COUNT zeroed */
{ OUI(0x00, 0x00, 0x00), DEVICE_ID('C', 'H', '7', '5', '1', '1'), false, BIT(DP_DPCD_QUIRK_NO_SINK_COUNT) },
+ /* Synaptics DP1.4 MST hubs can support DSC without virtual DPCD */
+ { OUI(0x90, 0xCC, 0x24), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) },
};
#undef OUI
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 4104f15f4594..a811247cecfe 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -398,7 +398,7 @@ drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
idx += req->u.i2c_read.transactions[i].num_bytes;
- buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
+ buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;
buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
idx++;
}
@@ -853,6 +853,7 @@ static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband
{
int idx = 1;
repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
+ repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1;
idx++;
if (idx > raw->curlen)
goto fail_len;
@@ -1208,6 +1209,8 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
mstb->tx_slots[txmsg->seqno] = NULL;
}
+ mgr->is_waiting_for_dwn_reply = false;
+
}
out:
if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
@@ -1217,6 +1220,7 @@ out:
}
mutex_unlock(&mgr->qlock);
+ drm_dp_mst_kick_tx(mgr);
return ret;
}
@@ -1931,73 +1935,90 @@ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
return parent_lct + 1;
}
-static int drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt)
+static bool drm_dp_mst_is_dp_mst_end_device(u8 pdt, bool mcs)
+{
+ switch (pdt) {
+ case DP_PEER_DEVICE_DP_LEGACY_CONV:
+ case DP_PEER_DEVICE_SST_SINK:
+ return true;
+ case DP_PEER_DEVICE_MST_BRANCHING:
+ /* For sst branch device */
+ if (!mcs)
+ return true;
+
+ return false;
+ }
+ return true;
+}
+
+static int
+drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
+ bool new_mcs)
{
struct drm_dp_mst_topology_mgr *mgr = port->mgr;
struct drm_dp_mst_branch *mstb;
u8 rad[8], lct;
int ret = 0;
- if (port->pdt == new_pdt)
+ if (port->pdt == new_pdt && port->mcs == new_mcs)
return 0;
/* Teardown the old pdt, if there is one */
- switch (port->pdt) {
- case DP_PEER_DEVICE_DP_LEGACY_CONV:
- case DP_PEER_DEVICE_SST_SINK:
- /*
- * If the new PDT would also have an i2c bus, don't bother
- * with reregistering it
- */
- if (new_pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
- new_pdt == DP_PEER_DEVICE_SST_SINK) {
- port->pdt = new_pdt;
- return 0;
- }
+ if (port->pdt != DP_PEER_DEVICE_NONE) {
+ if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
+ /*
+ * If the new PDT would also have an i2c bus,
+ * don't bother with reregistering it
+ */
+ if (new_pdt != DP_PEER_DEVICE_NONE &&
+ drm_dp_mst_is_dp_mst_end_device(new_pdt, new_mcs)) {
+ port->pdt = new_pdt;
+ port->mcs = new_mcs;
+ return 0;
+ }
- /* remove i2c over sideband */
- drm_dp_mst_unregister_i2c_bus(&port->aux);
- break;
- case DP_PEER_DEVICE_MST_BRANCHING:
- mutex_lock(&mgr->lock);
- drm_dp_mst_topology_put_mstb(port->mstb);
- port->mstb = NULL;
- mutex_unlock(&mgr->lock);
- break;
+ /* remove i2c over sideband */
+ drm_dp_mst_unregister_i2c_bus(&port->aux);
+ } else {
+ mutex_lock(&mgr->lock);
+ drm_dp_mst_topology_put_mstb(port->mstb);
+ port->mstb = NULL;
+ mutex_unlock(&mgr->lock);
+ }
}
port->pdt = new_pdt;
- switch (port->pdt) {
- case DP_PEER_DEVICE_DP_LEGACY_CONV:
- case DP_PEER_DEVICE_SST_SINK:
- /* add i2c over sideband */
- ret = drm_dp_mst_register_i2c_bus(&port->aux);
- break;
+ port->mcs = new_mcs;
- case DP_PEER_DEVICE_MST_BRANCHING:
- lct = drm_dp_calculate_rad(port, rad);
- mstb = drm_dp_add_mst_branch_device(lct, rad);
- if (!mstb) {
- ret = -ENOMEM;
- DRM_ERROR("Failed to create MSTB for port %p", port);
- goto out;
- }
+ if (port->pdt != DP_PEER_DEVICE_NONE) {
+ if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
+ /* add i2c over sideband */
+ ret = drm_dp_mst_register_i2c_bus(&port->aux);
+ } else {
+ lct = drm_dp_calculate_rad(port, rad);
+ mstb = drm_dp_add_mst_branch_device(lct, rad);
+ if (!mstb) {
+ ret = -ENOMEM;
+ DRM_ERROR("Failed to create MSTB for port %p",
+ port);
+ goto out;
+ }
- mutex_lock(&mgr->lock);
- port->mstb = mstb;
- mstb->mgr = port->mgr;
- mstb->port_parent = port;
+ mutex_lock(&mgr->lock);
+ port->mstb = mstb;
+ mstb->mgr = port->mgr;
+ mstb->port_parent = port;
- /*
- * Make sure this port's memory allocation stays
- * around until its child MSTB releases it
- */
- drm_dp_mst_get_port_malloc(port);
- mutex_unlock(&mgr->lock);
+ /*
+ * Make sure this port's memory allocation stays
+ * around until its child MSTB releases it
+ */
+ drm_dp_mst_get_port_malloc(port);
+ mutex_unlock(&mgr->lock);
- /* And make sure we send a link address for this */
- ret = 1;
- break;
+ /* And make sure we send a link address for this */
+ ret = 1;
+ }
}
out:
@@ -2150,9 +2171,8 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
goto error;
}
- if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
- port->pdt == DP_PEER_DEVICE_SST_SINK) &&
- port->port_num >= DP_MST_LOGICAL_PORT_0) {
+ if (port->pdt != DP_PEER_DEVICE_NONE &&
+ drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
port->cached_edid = drm_get_edid(port->connector,
&port->aux.ddc);
drm_connector_set_tile_property(port->connector);
@@ -2174,6 +2194,7 @@ drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port)
{
mutex_lock(&mgr->lock);
+ port->parent->num_ports--;
list_del(&port->next);
mutex_unlock(&mgr->lock);
drm_dp_mst_topology_put_port(port);
@@ -2198,6 +2219,9 @@ drm_dp_mst_add_port(struct drm_device *dev,
port->aux.dev = dev->dev;
port->aux.is_remote = true;
+ /* initialize the MST downstream port's AUX crc work queue */
+ drm_dp_remote_aux_init(&port->aux);
+
/*
* Make sure the memory allocation for our parent branch stays
* around until our own memory allocation is released
@@ -2216,6 +2240,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_port *port;
int old_ddps = 0, ret;
u8 new_pdt = DP_PEER_DEVICE_NONE;
+ bool new_mcs = 0;
bool created = false, send_link_addr = false, changed = false;
port = drm_dp_get_port(mstb, port_msg->port_number);
@@ -2260,7 +2285,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
port->input = port_msg->input_port;
if (!port->input)
new_pdt = port_msg->peer_device_type;
- port->mcs = port_msg->mcs;
+ new_mcs = port_msg->mcs;
port->ddps = port_msg->ddps;
port->ldps = port_msg->legacy_device_plug_status;
port->dpcd_rev = port_msg->dpcd_revision;
@@ -2273,6 +2298,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
mutex_lock(&mgr->lock);
drm_dp_mst_topology_get_port(port);
list_add(&port->next, &mstb->ports);
+ mstb->num_ports++;
mutex_unlock(&mgr->lock);
}
@@ -2287,7 +2313,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
}
}
- ret = drm_dp_port_set_pdt(port, new_pdt);
+ ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
if (ret == 1) {
send_link_addr = true;
} else if (ret < 0) {
@@ -2301,7 +2327,8 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
* we're coming out of suspend. In this case, always resend the link
* address if there's an MSTB on this port
*/
- if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING)
+ if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
+ port->mcs)
send_link_addr = true;
if (port->connector)
@@ -2336,8 +2363,9 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
{
struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
struct drm_dp_mst_port *port;
- int old_ddps, ret;
+ int old_ddps, old_input, ret, i;
u8 new_pdt;
+ bool new_mcs;
bool dowork = false, create_connector = false;
port = drm_dp_get_port(mstb, conn_stat->port_number);
@@ -2367,8 +2395,8 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
}
old_ddps = port->ddps;
+ old_input = port->input;
port->input = conn_stat->input_port;
- port->mcs = conn_stat->message_capability_status;
port->ldps = conn_stat->legacy_device_plug_status;
port->ddps = conn_stat->displayport_device_plug_status;
@@ -2381,8 +2409,8 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
}
new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
-
- ret = drm_dp_port_set_pdt(port, new_pdt);
+ new_mcs = conn_stat->message_capability_status;
+ ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
if (ret == 1) {
dowork = true;
} else if (ret < 0) {
@@ -2391,6 +2419,28 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
dowork = false;
}
+ if (!old_input && old_ddps != port->ddps && !port->ddps) {
+ for (i = 0; i < mgr->max_payloads; i++) {
+ struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
+ struct drm_dp_mst_port *port_validated;
+
+ if (!vcpi)
+ continue;
+
+ port_validated =
+ container_of(vcpi, struct drm_dp_mst_port, vcpi);
+ port_validated =
+ drm_dp_mst_topology_get_port_validated(mgr, port_validated);
+ if (!port_validated) {
+ mutex_lock(&mgr->payload_lock);
+ vcpi->num_slots = 0;
+ mutex_unlock(&mgr->payload_lock);
+ } else {
+ drm_dp_mst_topology_put_port(port_validated);
+ }
+ }
+ }
+
if (port->connector)
drm_modeset_unlock(&mgr->base.lock);
else if (create_connector)
@@ -2753,9 +2803,11 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
ret = process_single_tx_qlock(mgr, txmsg, false);
if (ret == 1) {
/* txmsg is sent it should be in the slots now */
+ mgr->is_waiting_for_dwn_reply = true;
list_del(&txmsg->next);
} else if (ret) {
DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
+ mgr->is_waiting_for_dwn_reply = false;
list_del(&txmsg->next);
if (txmsg->seqno != -1)
txmsg->dst->tx_slots[txmsg->seqno] = NULL;
@@ -2795,7 +2847,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
}
- if (list_is_singular(&mgr->tx_msg_downq))
+ if (list_is_singular(&mgr->tx_msg_downq) &&
+ !mgr->is_waiting_for_dwn_reply)
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
@@ -2951,6 +3004,7 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
path_res->avail_payload_bw_number);
port->available_pbn =
path_res->avail_payload_bw_number;
+ port->fec_capable = path_res->fec_capable;
}
}
@@ -3740,6 +3794,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
mutex_lock(&mgr->qlock);
txmsg->state = DRM_DP_SIDEBAND_TX_RX;
mstb->tx_slots[slot] = NULL;
+ mgr->is_waiting_for_dwn_reply = false;
mutex_unlock(&mgr->qlock);
wake_up_all(&mgr->tx_waitq);
@@ -3749,6 +3804,9 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
no_msg:
drm_dp_mst_topology_put_mstb(mstb);
clear_down_rep_recv:
+ mutex_lock(&mgr->qlock);
+ mgr->is_waiting_for_dwn_reply = false;
+ mutex_unlock(&mgr->qlock);
memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
return 0;
@@ -3771,7 +3829,8 @@ drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
guid = msg->u.resource_stat.guid;
- mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
+ if (guid)
+ mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
} else {
mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
}
@@ -3958,6 +4017,8 @@ drm_dp_mst_detect_port(struct drm_connector *connector,
switch (port->pdt) {
case DP_PEER_DEVICE_NONE:
case DP_PEER_DEVICE_MST_BRANCHING:
+ if (!port->mcs)
+ ret = connector_status_connected;
break;
case DP_PEER_DEVICE_SST_SINK:
@@ -4080,6 +4141,7 @@ static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
* @mgr: MST topology manager for the port
* @port: port to find vcpi slots for
* @pbn: bandwidth required for the mode in PBN
+ * @pbn_div: divider for DSC mode that takes FEC into account
*
* Allocates VCPI slots to @port, replacing any previous VCPI allocations it
* may have had. Any atomic drivers which support MST must call this function
@@ -4106,11 +4168,12 @@ static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
*/
int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port, int pbn)
+ struct drm_dp_mst_port *port, int pbn,
+ int pbn_div)
{
struct drm_dp_mst_topology_state *topology_state;
struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
- int prev_slots, req_slots;
+ int prev_slots, prev_bw, req_slots;
topology_state = drm_atomic_get_mst_topology_state(state, mgr);
if (IS_ERR(topology_state))
@@ -4121,6 +4184,7 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
if (pos->port == port) {
vcpi = pos;
prev_slots = vcpi->vcpi;
+ prev_bw = vcpi->pbn;
/*
* This should never happen, unless the driver tries
@@ -4136,14 +4200,22 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
break;
}
}
- if (!vcpi)
+ if (!vcpi) {
prev_slots = 0;
+ prev_bw = 0;
+ }
- req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
+ if (pbn_div <= 0)
+ pbn_div = mgr->pbn_div;
+
+ req_slots = DIV_ROUND_UP(pbn, pbn_div);
DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
port->connector->base.id, port->connector->name,
port, prev_slots, req_slots);
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n",
+ port->connector->base.id, port->connector->name,
+ port, prev_bw, pbn);
/* Add the new allocation to the state */
if (!vcpi) {
@@ -4156,6 +4228,7 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
list_add(&vcpi->next, &topology_state->vcpis);
}
vcpi->vcpi = req_slots;
+ vcpi->pbn = pbn;
return req_slots;
}
@@ -4406,10 +4479,11 @@ EXPORT_SYMBOL(drm_dp_check_act_status);
* drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
* @clock: dot clock for the mode
* @bpp: bpp for the mode.
+ * @dsc: DSC mode. If true, bpp has units of 1/16 of a bit per pixel
*
* This uses the formula in the spec to calculate the PBN value for a mode.
*/
-int drm_dp_calc_pbn_mode(int clock, int bpp)
+int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc)
{
/*
* margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
@@ -4420,7 +4494,16 @@ int drm_dp_calc_pbn_mode(int clock, int bpp)
* peak_kbps *= (1006/1000)
* peak_kbps *= (64/54)
* peak_kbps *= 8 convert to bytes
+ *
+ * If the bpp is in units of 1/16, further divide by 16. Put this
+ * factor in the numerator rather than the denominator to avoid
+ * integer overflow
*/
+
+ if (dsc)
+ return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006),
+ 8 * 54 * 1000 * 1000);
+
return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
8 * 54 * 1000 * 1000);
}
@@ -4559,7 +4642,7 @@ static void drm_dp_tx_work(struct work_struct *work)
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
mutex_lock(&mgr->qlock);
- if (!list_empty(&mgr->tx_msg_downq))
+ if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply)
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
@@ -4570,7 +4653,7 @@ drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
if (port->connector)
port->mgr->cbs->destroy_connector(port->mgr, port->connector);
- drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE);
+ drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
drm_dp_mst_put_port_malloc(port);
}
@@ -4722,9 +4805,61 @@ static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
kfree(mst_state);
}
+static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
+ struct drm_dp_mst_branch *branch)
+{
+ while (port->parent) {
+ if (port->parent == branch)
+ return true;
+
+ if (port->parent->port_parent)
+ port = port->parent->port_parent;
+ else
+ break;
+ }
+ return false;
+}
+
+static inline
+int drm_dp_mst_atomic_check_bw_limit(struct drm_dp_mst_branch *branch,
+ struct drm_dp_mst_topology_state *mst_state)
+{
+ struct drm_dp_mst_port *port;
+ struct drm_dp_vcpi_allocation *vcpi;
+ int pbn_limit = 0, pbn_used = 0;
+
+ list_for_each_entry(port, &branch->ports, next) {
+ if (port->mstb)
+ if (drm_dp_mst_atomic_check_bw_limit(port->mstb, mst_state))
+ return -ENOSPC;
+
+ if (port->available_pbn > 0)
+ pbn_limit = port->available_pbn;
+ }
+ DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch has %d PBN available\n",
+ branch, pbn_limit);
+
+ list_for_each_entry(vcpi, &mst_state->vcpis, next) {
+ if (!vcpi->pbn)
+ continue;
+
+ if (drm_dp_mst_port_downstream_of_branch(vcpi->port, branch))
+ pbn_used += vcpi->pbn;
+ }
+ DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch used %d PBN\n",
+ branch, pbn_used);
+
+ if (pbn_used > pbn_limit) {
+ DRM_DEBUG_ATOMIC("[MST BRANCH:%p] No available bandwidth\n",
+ branch);
+ return -ENOSPC;
+ }
+ return 0;
+}
+
static inline int
-drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_topology_state *mst_state)
+drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_state *mst_state)
{
struct drm_dp_vcpi_allocation *vcpi;
int avail_slots = 63, payload_count = 0;
@@ -4762,6 +4897,128 @@ drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr,
}
/**
+ * drm_dp_mst_add_affected_dsc_crtcs
+ * @state: Pointer to the new struct drm_dp_mst_topology_state
+ * @mgr: MST topology manager
+ *
+ * Whenever there is a change in mst topology
+ * DSC configuration would have to be recalculated
+ * therefore we need to trigger modeset on all affected
+ * CRTCs in that topology
+ *
+ * See also:
+ * drm_dp_mst_atomic_enable_dsc()
+ */
+int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct drm_dp_mst_topology_state *mst_state;
+ struct drm_dp_vcpi_allocation *pos;
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+
+ mst_state = drm_atomic_get_mst_topology_state(state, mgr);
+
+ if (IS_ERR(mst_state))
+ return -EINVAL;
+
+ list_for_each_entry(pos, &mst_state->vcpis, next) {
+
+ connector = pos->port->connector;
+
+ if (!connector)
+ return -EINVAL;
+
+ conn_state = drm_atomic_get_connector_state(state, connector);
+
+ if (IS_ERR(conn_state))
+ return PTR_ERR(conn_state);
+
+ crtc = conn_state->crtc;
+
+ if (WARN_ON(!crtc))
+ return -EINVAL;
+
+ if (!drm_dp_mst_dsc_aux_for_port(pos->port))
+ continue;
+
+ crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc);
+
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ DRM_DEBUG_ATOMIC("[MST MGR:%p] Setting mode_changed flag on CRTC %p\n",
+ mgr, crtc);
+
+ crtc_state->mode_changed = true;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);
+
+/**
+ * drm_dp_mst_atomic_enable_dsc - Set DSC Enable Flag to On/Off
+ * @state: Pointer to the new drm_atomic_state
+ * @port: Pointer to the affected MST Port
+ * @pbn: Newly recalculated bw required for link with DSC enabled
+ * @pbn_div: Divider to calculate correct number of pbn per slot
+ * @enable: Boolean flag to enable or disable DSC on the port
+ *
+ * This function enables DSC on the given Port
+ * by recalculating its vcpi from pbn provided
+ * and sets dsc_enable flag to keep track of which
+ * ports have DSC enabled
+ *
+ */
+int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
+ struct drm_dp_mst_port *port,
+ int pbn, int pbn_div,
+ bool enable)
+{
+ struct drm_dp_mst_topology_state *mst_state;
+ struct drm_dp_vcpi_allocation *pos;
+ bool found = false;
+ int vcpi = 0;
+
+ mst_state = drm_atomic_get_mst_topology_state(state, port->mgr);
+
+ if (IS_ERR(mst_state))
+ return PTR_ERR(mst_state);
+
+ list_for_each_entry(pos, &mst_state->vcpis, next) {
+ if (pos->port == port) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ DRM_DEBUG_ATOMIC("[MST PORT:%p] Couldn't find VCPI allocation in mst state %p\n",
+ port, mst_state);
+ return -EINVAL;
+ }
+
+ if (pos->dsc_enabled == enable) {
+ DRM_DEBUG_ATOMIC("[MST PORT:%p] DSC flag is already set to %d, returning %d VCPI slots\n",
+ port, enable, pos->vcpi);
+ vcpi = pos->vcpi;
+ }
+
+ if (enable) {
+ vcpi = drm_dp_atomic_find_vcpi_slots(state, port->mgr, port, pbn, pbn_div);
+ DRM_DEBUG_ATOMIC("[MST PORT:%p] Enabling DSC flag, reallocating %d VCPI slots on the port\n",
+ port, vcpi);
+ if (vcpi < 0)
+ return -EINVAL;
+ }
+
+ pos->dsc_enabled = enable;
+
+ return vcpi;
+}
+EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
+/**
* drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
* atomic update is valid
* @state: Pointer to the new &struct drm_dp_mst_topology_state
@@ -4789,7 +5046,13 @@ int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
int i, ret = 0;
for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
- ret = drm_dp_mst_atomic_check_topology_state(mgr, mst_state);
+ if (!mgr->mst_state)
+ continue;
+
+ ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state);
+ if (ret)
+ break;
+ ret = drm_dp_mst_atomic_check_bw_limit(mgr->mst_primary, mst_state);
if (ret)
break;
}
@@ -5053,3 +5316,173 @@ static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
{
i2c_del_adapter(&aux->ddc);
}
+
+/**
+ * drm_dp_mst_is_virtual_dpcd() - Is the given port a virtual DP Peer Device
+ * @port: The port to check
+ *
+ * A single physical MST hub object can be represented in the topology
+ * by multiple branches, with virtual ports between those branches.
+ *
+ * As of DP1.4, An MST hub with internal (virtual) ports must expose
+ * certain DPCD registers over those ports. See sections 2.6.1.1.1
+ * and 2.6.1.1.2 of Display Port specification v1.4 for details.
+ *
+ * May acquire mgr->lock
+ *
+ * Returns:
+ * true if the port is a virtual DP peer device, false otherwise
+ */
+static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
+{
+ struct drm_dp_mst_port *downstream_port;
+
+ if (!port || port->dpcd_rev < DP_DPCD_REV_14)
+ return false;
+
+ /* Virtual DP Sink (Internal Display Panel) */
+ if (port->port_num >= 8)
+ return true;
+
+ /* DP-to-HDMI Protocol Converter */
+ if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV &&
+ !port->mcs &&
+ port->ldps)
+ return true;
+
+ /* DP-to-DP */
+ mutex_lock(&port->mgr->lock);
+ if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
+ port->mstb &&
+ port->mstb->num_ports == 2) {
+ list_for_each_entry(downstream_port, &port->mstb->ports, next) {
+ if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK &&
+ !downstream_port->input) {
+ mutex_unlock(&port->mgr->lock);
+ return true;
+ }
+ }
+ }
+ mutex_unlock(&port->mgr->lock);
+
+ return false;
+}
+
+/**
+ * drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC
+ * @port: The port to check. A leaf of the MST tree with an attached display.
+ *
+ * Depending on the situation, DSC may be enabled via the endpoint aux,
+ * the immediately upstream aux, or the connector's physical aux.
+ *
+ * This is both the correct aux to read DSC_CAPABILITY and the
+ * correct aux to write DSC_ENABLED.
+ *
+ * This operation can be expensive (up to four aux reads), so
+ * the caller should cache the return.
+ *
+ * Returns:
+ * NULL if DSC cannot be enabled on this port, otherwise the aux device
+ */
+struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
+{
+ struct drm_dp_mst_port *immediate_upstream_port;
+ struct drm_dp_mst_port *fec_port;
+ struct drm_dp_desc desc = { 0 };
+ u8 endpoint_fec;
+ u8 endpoint_dsc;
+
+ if (!port)
+ return NULL;
+
+ if (port->parent->port_parent)
+ immediate_upstream_port = port->parent->port_parent;
+ else
+ immediate_upstream_port = NULL;
+
+ fec_port = immediate_upstream_port;
+ while (fec_port) {
+ /*
+ * Each physical link (i.e. not a virtual port) between the
+ * output and the primary device must support FEC
+ */
+ if (!drm_dp_mst_is_virtual_dpcd(fec_port) &&
+ !fec_port->fec_capable)
+ return NULL;
+
+ fec_port = fec_port->parent->port_parent;
+ }
+
+ /* DP-to-DP peer device */
+ if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
+ u8 upstream_dsc;
+
+ if (drm_dp_dpcd_read(&port->aux,
+ DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
+ return NULL;
+ if (drm_dp_dpcd_read(&port->aux,
+ DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
+ return NULL;
+ if (drm_dp_dpcd_read(&immediate_upstream_port->aux,
+ DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
+ return NULL;
+
+ /* Enpoint decompression with DP-to-DP peer device */
+ if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
+ (endpoint_fec & DP_FEC_CAPABLE) &&
+ (upstream_dsc & 0x2) /* DSC passthrough */)
+ return &port->aux;
+
+ /* Virtual DPCD decompression with DP-to-DP peer device */
+ return &immediate_upstream_port->aux;
+ }
+
+ /* Virtual DPCD decompression with DP-to-HDMI or Virtual DP Sink */
+ if (drm_dp_mst_is_virtual_dpcd(port))
+ return &port->aux;
+
+ /*
+ * Synaptics quirk
+ * Applies to ports for which:
+ * - Physical aux has Synaptics OUI
+ * - DPv1.4 or higher
+ * - Port is on primary branch device
+ * - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG)
+ */
+ if (drm_dp_read_desc(port->mgr->aux, &desc, true))
+ return NULL;
+
+ if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
+ port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
+ port->parent == port->mgr->mst_primary) {
+ u8 downstreamport;
+
+ if (drm_dp_dpcd_read(&port->aux, DP_DOWNSTREAMPORT_PRESENT,
+ &downstreamport, 1) < 0)
+ return NULL;
+
+ if ((downstreamport & DP_DWN_STRM_PORT_PRESENT) &&
+ ((downstreamport & DP_DWN_STRM_PORT_TYPE_MASK)
+ != DP_DWN_STRM_PORT_TYPE_ANALOG))
+ return port->mgr->aux;
+ }
+
+ /*
+ * The check below verifies if the MST sink
+ * connected to the GPU is capable of DSC -
+ * therefore the endpoint needs to be
+ * both DSC and FEC capable.
+ */
+ if (drm_dp_dpcd_read(&port->aux,
+ DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
+ return NULL;
+ if (drm_dp_dpcd_read(&port->aux,
+ DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
+ return NULL;
+ if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
+ (endpoint_fec & DP_FEC_CAPABLE))
+ return &port->aux;
+
+ return NULL;
+}
+EXPORT_SYMBOL(drm_dp_mst_dsc_aux_for_port);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 097e54a4379e..ec10dc3e859c 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3251,7 +3251,7 @@ static u8 *drm_find_cea_extension(const struct edid *edid)
return cea;
}
-static const struct drm_display_mode *cea_mode_for_vic(u8 vic)
+static __always_inline const struct drm_display_mode *cea_mode_for_vic(u8 vic)
{
BUILD_BUG_ON(1 + ARRAY_SIZE(edid_cea_modes_1) - 1 != 127);
BUILD_BUG_ON(193 + ARRAY_SIZE(edid_cea_modes_193) - 1 != 219);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index f47454df0780..490a99de6ec1 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1257,7 +1257,7 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
* Changes struct fb_var_screeninfo are currently not pushed back
* to KMS, hence fail if different settings are requested.
*/
- if (var->bits_per_pixel != fb->format->cpp[0] * 8 ||
+ if (var->bits_per_pixel > fb->format->cpp[0] * 8 ||
var->xres > fb->width || var->yres > fb->height ||
var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
drm_dbg_kms(dev, "fb requested width/height/bpp can't fit in current fb "
@@ -1283,6 +1283,11 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
}
/*
+ * Likewise, bits_per_pixel should be rounded up to a supported value.
+ */
+ var->bits_per_pixel = fb->format->cpp[0] * 8;
+
+ /*
* drm fbdev emulation doesn't support changing the pixel format at all,
* so reject all pixel format changing requests.
*/
@@ -1551,7 +1556,9 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
for (j = 0; j < mode_set->num_connectors; j++) {
struct drm_connector *connector = mode_set->connectors[j];
- if (connector->has_tile) {
+ if (connector->has_tile &&
+ desired_mode->hdisplay == connector->tile_h_size &&
+ desired_mode->vdisplay == connector->tile_v_size) {
lasth = (connector->tile_h_loc == (connector->num_h_tile - 1));
lastv = (connector->tile_v_loc == (connector->num_v_tile - 1));
/* cloning to multiple tiles is just crazy-talk, so: */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 1f9c01be40d7..6b43c1c94e8f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -65,12 +65,13 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
for (i = 0; i < ETNA_MAX_PIPES; i++) {
struct etnaviv_gpu *gpu = priv->gpu[i];
- struct drm_sched_rq *rq;
+ struct drm_gpu_scheduler *sched;
if (gpu) {
- rq = &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ sched = &gpu->sched;
drm_sched_entity_init(&ctx->sched_entity[i],
- &rq, 1, NULL);
+ DRM_SCHED_PRIORITY_NORMAL, &sched,
+ 1, NULL);
}
}
@@ -282,11 +283,6 @@ static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data,
args->flags, &args->handle);
}
-#define TS(t) ((struct timespec){ \
- .tv_sec = (t).tv_sec, \
- .tv_nsec = (t).tv_nsec \
-})
-
static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
struct drm_file *file)
{
@@ -301,7 +297,7 @@ static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
if (!obj)
return -ENOENT;
- ret = etnaviv_gem_cpu_prep(obj, args->op, &TS(args->timeout));
+ ret = etnaviv_gem_cpu_prep(obj, args->op, &args->timeout);
drm_gem_object_put_unlocked(obj);
@@ -354,7 +350,7 @@ static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data,
{
struct drm_etnaviv_wait_fence *args = data;
struct etnaviv_drm_private *priv = dev->dev_private;
- struct timespec *timeout = &TS(args->timeout);
+ struct drm_etnaviv_timespec *timeout = &args->timeout;
struct etnaviv_gpu *gpu;
if (args->flags & ~(ETNA_WAIT_NONBLOCK))
@@ -403,7 +399,7 @@ static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
{
struct etnaviv_drm_private *priv = dev->dev_private;
struct drm_etnaviv_gem_wait *args = data;
- struct timespec *timeout = &TS(args->timeout);
+ struct drm_etnaviv_timespec *timeout = &args->timeout;
struct drm_gem_object *obj;
struct etnaviv_gpu *gpu;
int ret;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index 32cfa5a48d42..efc656efeb0f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -61,7 +61,7 @@ int etnaviv_gem_prime_pin(struct drm_gem_object *obj);
void etnaviv_gem_prime_unpin(struct drm_gem_object *obj);
void *etnaviv_gem_vmap(struct drm_gem_object *obj);
int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
- struct timespec *timeout);
+ struct drm_etnaviv_timespec *timeout);
int etnaviv_gem_cpu_fini(struct drm_gem_object *obj);
void etnaviv_gem_free_object(struct drm_gem_object *obj);
int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
@@ -107,11 +107,12 @@ static inline size_t size_vstruct(size_t nelem, size_t elem_size, size_t base)
* between the specified timeout and the current CLOCK_MONOTONIC time.
*/
static inline unsigned long etnaviv_timeout_to_jiffies(
- const struct timespec *timeout)
+ const struct drm_etnaviv_timespec *timeout)
{
- struct timespec64 ts, to;
-
- to = timespec_to_timespec64(*timeout);
+ struct timespec64 ts, to = {
+ .tv_sec = timeout->tv_sec,
+ .tv_nsec = timeout->tv_nsec,
+ };
ktime_get_ts64(&ts);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index cb1faaac380a..6adea180d629 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -373,7 +373,7 @@ static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
}
int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
- struct timespec *timeout)
+ struct drm_etnaviv_timespec *timeout)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
struct drm_device *dev = obj->dev;
@@ -431,7 +431,7 @@ int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
}
int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
- struct timespec *timeout)
+ struct drm_etnaviv_timespec *timeout)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index d6270acce619..6b68fe16041b 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -112,7 +112,7 @@ struct etnaviv_gem_submit {
void etnaviv_submit_put(struct etnaviv_gem_submit * submit);
int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
- struct timespec *timeout);
+ struct drm_etnaviv_timespec *timeout);
int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res);
void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index d47d1a8e0219..799ec20b267d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1132,7 +1132,7 @@ static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
* Cmdstream submission/retirement:
*/
int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
- u32 id, struct timespec *timeout)
+ u32 id, struct drm_etnaviv_timespec *timeout)
{
struct dma_fence *fence;
int ret;
@@ -1179,7 +1179,8 @@ int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
* that lock in this function while waiting.
*/
int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
- struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout)
+ struct etnaviv_gem_object *etnaviv_obj,
+ struct drm_etnaviv_timespec *timeout)
{
unsigned long remaining;
long ret;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 8f9bd4edc96a..97bb48042b4d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -169,9 +169,10 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m);
void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu);
void etnaviv_gpu_retire(struct etnaviv_gpu *gpu);
int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
- u32 fence, struct timespec *timeout);
+ u32 fence, struct drm_etnaviv_timespec *timeout);
int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
- struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout);
+ struct etnaviv_gem_object *etnaviv_obj,
+ struct drm_etnaviv_timespec *timeout);
struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit);
int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu);
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 6f7d3b3b3628..6417f374b923 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -1,13 +1,13 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_EXYNOS
- tristate "DRM Support for Samsung SoC EXYNOS Series"
+ tristate "DRM Support for Samsung SoC Exynos Series"
depends on OF && DRM && (ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST)
depends on MMU
select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
select SND_SOC_HDMI_CODEC if SND_SOC
help
- Choose this option if you have a Samsung SoC EXYNOS chipset.
+ Choose this option if you have a Samsung SoC Exynos chipset.
If M is selected the module will be called exynosdrm.
if DRM_EXYNOS
@@ -62,7 +62,7 @@ config DRM_EXYNOS_DSI
This enables support for Exynos MIPI-DSI device.
config DRM_EXYNOS_DP
- bool "EXYNOS specific extensions for Analogix DP driver"
+ bool "Exynos specific extensions for Analogix DP driver"
depends on DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON
select DRM_ANALOGIX_DP
default DRM_EXYNOS
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 2d5cbfda3ca7..8428ae12dfa5 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -510,7 +510,7 @@ static void decon_swreset(struct decon_context *ctx)
ctx->addr + DECON_CRCCTRL);
}
-static void decon_enable(struct exynos_drm_crtc *crtc)
+static void decon_atomic_enable(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
@@ -523,7 +523,7 @@ static void decon_enable(struct exynos_drm_crtc *crtc)
decon_commit(ctx->crtc);
}
-static void decon_disable(struct exynos_drm_crtc *crtc)
+static void decon_atomic_disable(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
int i;
@@ -599,8 +599,8 @@ static enum drm_mode_status decon_mode_valid(struct exynos_drm_crtc *crtc,
}
static const struct exynos_drm_crtc_ops decon_crtc_ops = {
- .enable = decon_enable,
- .disable = decon_disable,
+ .atomic_enable = decon_atomic_enable,
+ .atomic_disable = decon_atomic_disable,
.enable_vblank = decon_enable_vblank,
.disable_vblank = decon_disable_vblank,
.atomic_begin = decon_atomic_begin,
@@ -651,7 +651,7 @@ static void decon_unbind(struct device *dev, struct device *master, void *data)
{
struct decon_context *ctx = dev_get_drvdata(dev);
- decon_disable(ctx->crtc);
+ decon_atomic_disable(ctx->crtc);
/* detach this sub driver from iommu mapping if supported. */
exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index f0640950bd46..ff59c641fa80 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -526,7 +526,7 @@ static void decon_init(struct decon_context *ctx)
writel(VIDCON1_VCLK_HOLD, ctx->regs + VIDCON1(0));
}
-static void decon_enable(struct exynos_drm_crtc *crtc)
+static void decon_atomic_enable(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
@@ -546,7 +546,7 @@ static void decon_enable(struct exynos_drm_crtc *crtc)
ctx->suspended = false;
}
-static void decon_disable(struct exynos_drm_crtc *crtc)
+static void decon_atomic_disable(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
int i;
@@ -568,8 +568,8 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
}
static const struct exynos_drm_crtc_ops decon_crtc_ops = {
- .enable = decon_enable,
- .disable = decon_disable,
+ .atomic_enable = decon_atomic_enable,
+ .atomic_disable = decon_atomic_disable,
.enable_vblank = decon_enable_vblank,
.disable_vblank = decon_disable_vblank,
.atomic_begin = decon_atomic_begin,
@@ -653,7 +653,7 @@ static void decon_unbind(struct device *dev, struct device *master,
{
struct decon_context *ctx = dev_get_drvdata(dev);
- decon_disable(ctx->crtc);
+ decon_atomic_disable(ctx->crtc);
if (ctx->encoder)
exynos_dpi_remove(ctx->encoder);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 77ce78986408..1c03485676ef 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -23,8 +23,8 @@ static void exynos_drm_crtc_atomic_enable(struct drm_crtc *crtc,
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
- if (exynos_crtc->ops->enable)
- exynos_crtc->ops->enable(exynos_crtc);
+ if (exynos_crtc->ops->atomic_enable)
+ exynos_crtc->ops->atomic_enable(exynos_crtc);
drm_crtc_vblank_on(crtc);
}
@@ -36,8 +36,8 @@ static void exynos_drm_crtc_atomic_disable(struct drm_crtc *crtc,
drm_crtc_vblank_off(crtc);
- if (exynos_crtc->ops->disable)
- exynos_crtc->ops->disable(exynos_crtc);
+ if (exynos_crtc->ops->atomic_disable)
+ exynos_crtc->ops->atomic_disable(exynos_crtc);
if (crtc->state->event && !crtc->state->active) {
spin_lock_irq(&crtc->dev->event_lock);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index d4014ba592fd..d4d21d8cfb90 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -118,8 +118,8 @@ struct exynos_drm_plane_config {
/*
* Exynos drm crtc ops
*
- * @enable: enable the device
- * @disable: disable the device
+ * @atomic_enable: enable the device
+ * @atomic_disable: disable the device
* @enable_vblank: specific driver callback for enabling vblank interrupt.
* @disable_vblank: specific driver callback for disabling vblank interrupt.
* @mode_valid: specific driver callback for mode validation
@@ -133,8 +133,8 @@ struct exynos_drm_plane_config {
*/
struct exynos_drm_crtc;
struct exynos_drm_crtc_ops {
- void (*enable)(struct exynos_drm_crtc *crtc);
- void (*disable)(struct exynos_drm_crtc *crtc);
+ void (*atomic_enable)(struct exynos_drm_crtc *crtc);
+ void (*atomic_disable)(struct exynos_drm_crtc *crtc);
int (*enable_vblank)(struct exynos_drm_crtc *crtc);
void (*disable_vblank)(struct exynos_drm_crtc *crtc);
enum drm_mode_status (*mode_valid)(struct exynos_drm_crtc *crtc,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 8d0a929104e5..21aec38702fc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -894,7 +894,7 @@ static void fimd_disable_plane(struct exynos_drm_crtc *crtc,
fimd_enable_shadow_channel_path(ctx, win, false);
}
-static void fimd_enable(struct exynos_drm_crtc *crtc)
+static void fimd_atomic_enable(struct exynos_drm_crtc *crtc)
{
struct fimd_context *ctx = crtc->ctx;
@@ -912,7 +912,7 @@ static void fimd_enable(struct exynos_drm_crtc *crtc)
fimd_commit(ctx->crtc);
}
-static void fimd_disable(struct exynos_drm_crtc *crtc)
+static void fimd_atomic_disable(struct exynos_drm_crtc *crtc)
{
struct fimd_context *ctx = crtc->ctx;
int i;
@@ -1006,8 +1006,8 @@ static void fimd_dp_clock_enable(struct exynos_drm_clk *clk, bool enable)
}
static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
- .enable = fimd_enable,
- .disable = fimd_disable,
+ .atomic_enable = fimd_atomic_enable,
+ .atomic_disable = fimd_atomic_disable,
.enable_vblank = fimd_enable_vblank,
.disable_vblank = fimd_disable_vblank,
.atomic_begin = fimd_atomic_begin,
@@ -1098,7 +1098,7 @@ static void fimd_unbind(struct device *dev, struct device *master,
{
struct fimd_context *ctx = dev_get_drvdata(dev);
- fimd_disable(ctx->crtc);
+ fimd_atomic_disable(ctx->crtc);
exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 7ae087b0504d..88b6fcaa20be 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1313,6 +1313,7 @@ static int gsc_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ component_del(dev, &gsc_component_ops);
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 65b891cb9c50..b320b3a21ad4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -120,7 +120,7 @@ static void vidi_update_plane(struct exynos_drm_crtc *crtc,
DRM_DEV_DEBUG_KMS(ctx->dev, "dma_addr = %pad\n", &addr);
}
-static void vidi_enable(struct exynos_drm_crtc *crtc)
+static void vidi_atomic_enable(struct exynos_drm_crtc *crtc)
{
struct vidi_context *ctx = crtc->ctx;
@@ -133,7 +133,7 @@ static void vidi_enable(struct exynos_drm_crtc *crtc)
drm_crtc_vblank_on(&crtc->base);
}
-static void vidi_disable(struct exynos_drm_crtc *crtc)
+static void vidi_atomic_disable(struct exynos_drm_crtc *crtc)
{
struct vidi_context *ctx = crtc->ctx;
@@ -147,8 +147,8 @@ static void vidi_disable(struct exynos_drm_crtc *crtc)
}
static const struct exynos_drm_crtc_ops vidi_crtc_ops = {
- .enable = vidi_enable,
- .disable = vidi_disable,
+ .atomic_enable = vidi_atomic_enable,
+ .atomic_disable = vidi_atomic_disable,
.enable_vblank = vidi_enable_vblank,
.disable_vblank = vidi_disable_vblank,
.update_plane = vidi_update_plane,
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 6cfdb95fef2f..38ae9c32feef 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -986,7 +986,7 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc)
exynos_crtc_handle_event(crtc);
}
-static void mixer_enable(struct exynos_drm_crtc *crtc)
+static void mixer_atomic_enable(struct exynos_drm_crtc *crtc)
{
struct mixer_context *ctx = crtc->ctx;
@@ -1015,7 +1015,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
set_bit(MXR_BIT_POWERED, &ctx->flags);
}
-static void mixer_disable(struct exynos_drm_crtc *crtc)
+static void mixer_atomic_disable(struct exynos_drm_crtc *crtc)
{
struct mixer_context *ctx = crtc->ctx;
int i;
@@ -1109,8 +1109,8 @@ static bool mixer_mode_fixup(struct exynos_drm_crtc *crtc,
}
static const struct exynos_drm_crtc_ops mixer_crtc_ops = {
- .enable = mixer_enable,
- .disable = mixer_disable,
+ .atomic_enable = mixer_atomic_enable,
+ .atomic_disable = mixer_atomic_disable,
.enable_vblank = mixer_enable_vblank,
.disable_vblank = mixer_disable_vblank,
.atomic_begin = mixer_atomic_begin,
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index afaf4bea21cf..9278bcfad1bf 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -503,7 +503,7 @@ int psb_gtt_init(struct drm_device *dev, int resume)
* Map the GTT and the stolen memory area
*/
if (!resume)
- dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start,
+ dev_priv->gtt_map = ioremap(pg->gtt_phys_start,
gtt_pages << PAGE_SHIFT);
if (!dev_priv->gtt_map) {
dev_err(dev->dev, "Failure to map gtt.\n");
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 36cb292fdebe..2411eb9827b8 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -256,7 +256,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
PSB_AUX_RESOURCE);
resource_len = pci_resource_len(dev_priv->aux_pdev,
PSB_AUX_RESOURCE);
- dev_priv->aux_reg = ioremap_nocache(resource_start,
+ dev_priv->aux_reg = ioremap(resource_start,
resource_len);
if (!dev_priv->aux_reg)
goto out_err;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index 11d1b0761c9a..4a8a4cfb4b75 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -213,7 +213,7 @@ static int hibmc_hw_map(struct hibmc_drm_private *priv)
ioaddr = pci_resource_start(pdev, 1);
iosize = pci_resource_len(pdev, 1);
- priv->mmio = devm_ioremap_nocache(dev->dev, ioaddr, iosize);
+ priv->mmio = devm_ioremap(dev->dev, ioaddr, iosize);
if (!priv->mmio) {
DRM_ERROR("Cannot map mmio region\n");
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/.gitignore b/drivers/gpu/drm/i915/.gitignore
new file mode 100644
index 000000000000..d9a77f3b59b2
--- /dev/null
+++ b/drivers/gpu/drm/i915/.gitignore
@@ -0,0 +1 @@
+*.hdrtest
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index b0c53661f62b..b8c5f8934dbd 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -31,9 +31,6 @@ CFLAGS_display/intel_fbdev.o = $(call cc-disable-warning, override-init)
subdir-ccflags-y += \
$(call as-instr,movntdqa (%eax)$(comma)%xmm0,-DCONFIG_AS_MOVNTDQA)
-# Extra header tests
-header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
-
subdir-ccflags-y += -I$(srctree)/$(src)
# Please keep these build lists sorted!
@@ -73,11 +70,12 @@ i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o display/intel_pipe_crc.o
i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
# "Graphics Technology" (aka we talk to the gpu)
-obj-y += gt/
gt-y += \
gt/debugfs_engines.o \
gt/debugfs_gt.o \
gt/debugfs_gt_pm.o \
+ gt/gen6_ppgtt.o \
+ gt/gen8_ppgtt.o \
gt/intel_breadcrumbs.o \
gt/intel_context.o \
gt/intel_engine_cs.o \
@@ -85,14 +83,17 @@ gt-y += \
gt/intel_engine_pm.o \
gt/intel_engine_pool.o \
gt/intel_engine_user.o \
+ gt/intel_ggtt.o \
gt/intel_gt.o \
gt/intel_gt_irq.o \
gt/intel_gt_pm.o \
gt/intel_gt_pm_irq.o \
gt/intel_gt_requests.o \
+ gt/intel_gtt.o \
gt/intel_llc.o \
gt/intel_lrc.o \
gt/intel_mocs.o \
+ gt/intel_ppgtt.o \
gt/intel_rc6.o \
gt/intel_renderstate.o \
gt/intel_reset.o \
@@ -111,7 +112,6 @@ gt-y += \
i915-y += $(gt-y)
# GEM (Graphics Execution Management) code
-obj-y += gem/
gem-y += \
gem/i915_gem_busy.o \
gem/i915_gem_clflush.o \
@@ -157,7 +157,6 @@ i915-y += \
intel_wopcm.o
# general-purpose microcontroller (GuC) support
-obj-y += gt/uc/
i915-y += gt/uc/intel_uc.o \
gt/uc/intel_uc_fw.o \
gt/uc/intel_guc.o \
@@ -170,7 +169,6 @@ i915-y += gt/uc/intel_uc.o \
gt/uc/intel_huc_fw.o
# modesetting core code
-obj-y += display/
i915-y += \
display/intel_atomic.o \
display/intel_atomic_plane.o \
@@ -235,7 +233,6 @@ i915-y += \
display/vlv_dsi_pll.o
# perf code
-obj-y += oa/
i915-y += \
oa/i915_oa_hsw.o \
oa/i915_oa_bdw.o \
@@ -260,6 +257,7 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \
gem/selftests/igt_gem_utils.o \
selftests/i915_random.o \
selftests/i915_selftest.o \
+ selftests/igt_atomic.o \
selftests/igt_flush_test.o \
selftests/igt_live_test.o \
selftests/igt_mmap.o \
@@ -276,3 +274,27 @@ endif
obj-$(CONFIG_DRM_I915) += i915.o
obj-$(CONFIG_DRM_I915_GVT_KVMGT) += gvt/kvmgt.o
+
+# header test
+
+# exclude some broken headers from the test coverage
+no-header-test := \
+ display/intel_vbt_defs.h \
+ gvt/execlist.h \
+ gvt/fb_decoder.h \
+ gvt/gtt.h \
+ gvt/gvt.h \
+ gvt/interrupt.h \
+ gvt/mmio_context.h \
+ gvt/mpt.h \
+ gvt/scheduler.h
+
+extra-$(CONFIG_DRM_I915_WERROR) += \
+ $(patsubst %.h,%.hdrtest, $(filter-out $(no-header-test), \
+ $(shell cd $(srctree)/$(src) && find * -name '*.h')))
+
+quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
+ cmd_hdrtest = $(CC) $(c_flags) -S -o /dev/null -x c /dev/null -include $<; touch $@
+
+$(obj)/%.hdrtest: $(src)/%.h FORCE
+ $(call if_changed_dep,hdrtest)
diff --git a/drivers/gpu/drm/i915/display/Makefile b/drivers/gpu/drm/i915/display/Makefile
deleted file mode 100644
index 173c305d7866..000000000000
--- a/drivers/gpu/drm/i915/display/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-# For building individual subdir files on the command line
-subdir-ccflags-y += -I$(srctree)/$(src)/..
-
-# Extra header tests
-header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
-header-test- := intel_vbt_defs.h
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 4450d98c98a1..a7457303c62e 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -77,7 +77,7 @@ static enum transcoder dsi_port_to_transcoder(enum port port)
static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct mipi_dsi_device *dsi;
enum port port;
enum transcoder dsi_trans;
@@ -202,7 +202,7 @@ static int dsi_send_pkt_payld(struct intel_dsi_host *host,
static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum phy phy;
u32 tmp;
int lane;
@@ -267,7 +267,7 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u32 dss_ctl1;
dss_ctl1 = I915_READ(DSS_CTL1);
@@ -306,7 +306,7 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
static int afe_clk(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
int bpp;
if (crtc_state->dsc.compression_enable)
@@ -321,7 +321,7 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
int afe_clk_khz;
u32 esc_clk_div_m;
@@ -360,7 +360,7 @@ static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv,
static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 tmp;
@@ -376,7 +376,7 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum phy phy;
for_each_dsi_phy(phy, intel_dsi->phys)
@@ -387,7 +387,7 @@ static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum phy phy;
u32 tmp;
int lane;
@@ -436,7 +436,7 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u32 tmp;
enum phy phy;
@@ -488,7 +488,7 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u32 tmp;
enum port port;
@@ -509,7 +509,7 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u32 tmp;
enum port port;
enum phy phy;
@@ -575,7 +575,7 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u32 tmp;
enum phy phy;
@@ -591,7 +591,7 @@ static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u32 tmp;
enum phy phy;
@@ -608,7 +608,7 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum phy phy;
u32 val;
@@ -640,7 +640,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
enum pipe pipe = intel_crtc->pipe;
u32 tmp;
@@ -789,7 +789,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
enum port port;
@@ -923,7 +923,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
enum transcoder dsi_trans;
u32 tmp;
@@ -945,7 +945,7 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
enum transcoder dsi_trans;
u32 tmp, hs_tx_timeout, lp_rx_timeout, ta_timeout, divisor, mul;
@@ -1026,7 +1026,7 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct mipi_dsi_device *dsi;
enum port port;
enum transcoder dsi_trans;
@@ -1077,7 +1077,7 @@ static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
/* step3b */
gen11_dsi_map_pll(encoder, pipe_config);
@@ -1104,7 +1104,7 @@ static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
enum transcoder dsi_trans;
u32 tmp;
@@ -1126,7 +1126,7 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_OFF);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET);
@@ -1139,7 +1139,7 @@ static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder)
static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
enum transcoder dsi_trans;
u32 tmp;
@@ -1180,7 +1180,7 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
static void gen11_dsi_disable_port(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u32 tmp;
enum port port;
@@ -1202,7 +1202,7 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 tmp;
@@ -1229,7 +1229,7 @@ static void gen11_dsi_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
/* step1: turn off backlight */
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
@@ -1259,7 +1259,7 @@ static void gen11_dsi_post_disable(struct intel_encoder *encoder,
intel_dsc_disable(old_crtc_state);
- skylake_scaler_disable(old_crtc_state);
+ skl_scaler_disable(old_crtc_state);
}
static enum drm_mode_status gen11_dsi_mode_valid(struct drm_connector *connector,
@@ -1272,7 +1272,7 @@ static enum drm_mode_status gen11_dsi_mode_valid(struct drm_connector *connector
static void gen11_dsi_get_timings(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
@@ -1313,7 +1313,7 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
intel_dsc_get_config(encoder, pipe_config);
@@ -1417,7 +1417,8 @@ static void gen11_dsi_get_power_domains(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- get_dsi_io_power_domains(i915, enc_to_intel_dsi(&encoder->base));
+ get_dsi_io_power_domains(i915,
+ enc_to_intel_dsi(encoder));
if (crtc_state->dsc.compression_enable)
intel_display_power_get(i915,
@@ -1428,7 +1429,7 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum transcoder dsi_trans;
intel_wakeref_t wakeref;
enum port port;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index fd0026fc3618..c362eecdd414 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -37,6 +37,7 @@
#include "intel_atomic.h"
#include "intel_display_types.h"
#include "intel_hdcp.h"
+#include "intel_psr.h"
#include "intel_sprite.h"
/**
@@ -129,6 +130,7 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn,
struct drm_crtc_state *crtc_state;
intel_hdcp_atomic_check(conn, old_state, new_state);
+ intel_psr_atomic_check(conn, old_state, new_state);
if (!new_state->crtc)
return 0;
@@ -175,6 +177,38 @@ intel_digital_connector_duplicate_state(struct drm_connector *connector)
}
/**
+ * intel_connector_needs_modeset - check if connector needs a modeset
+ */
+bool
+intel_connector_needs_modeset(struct intel_atomic_state *state,
+ struct drm_connector *connector)
+{
+ const struct drm_connector_state *old_conn_state, *new_conn_state;
+
+ old_conn_state = drm_atomic_get_old_connector_state(&state->base, connector);
+ new_conn_state = drm_atomic_get_new_connector_state(&state->base, connector);
+
+ return old_conn_state->crtc != new_conn_state->crtc ||
+ (new_conn_state->crtc &&
+ drm_atomic_crtc_needs_modeset(drm_atomic_get_new_crtc_state(&state->base,
+ new_conn_state->crtc)));
+}
+
+struct intel_digital_connector_state *
+intel_atomic_get_digital_connector_state(struct intel_atomic_state *state,
+ struct intel_connector *connector)
+{
+ struct drm_connector_state *conn_state;
+
+ conn_state = drm_atomic_get_connector_state(&state->base,
+ &connector->base);
+ if (IS_ERR(conn_state))
+ return ERR_CAST(conn_state);
+
+ return to_intel_digital_connector_state(conn_state);
+}
+
+/**
* intel_crtc_duplicate_state - duplicate crtc state
* @crtc: drm crtc
*
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h
index 7b49623419ba..74c749dbfb4f 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic.h
@@ -17,6 +17,7 @@ struct drm_device;
struct drm_i915_private;
struct drm_property;
struct intel_atomic_state;
+struct intel_connector;
struct intel_crtc;
struct intel_crtc_state;
@@ -32,6 +33,11 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn,
struct drm_atomic_state *state);
struct drm_connector_state *
intel_digital_connector_duplicate_state(struct drm_connector *connector);
+bool intel_connector_needs_modeset(struct intel_atomic_state *state,
+ struct drm_connector *connector);
+struct intel_digital_connector_state *
+intel_atomic_get_digital_connector_state(struct intel_atomic_state *state,
+ struct intel_connector *connector);
struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc);
void intel_crtc_destroy_state(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 27710098d056..b18040793d9e 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -707,8 +707,8 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id,
connector->name,
- connector->encoder->base.id,
- connector->encoder->name);
+ encoder->base.base.id,
+ encoder->base.name);
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
@@ -856,7 +856,7 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
}
/* Force CDCLK to 2*BCLK as long as we need audio powered. */
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ if (IS_GEMINILAKE(dev_priv))
glk_force_audio_cdclk(dev_priv, true);
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
@@ -875,7 +875,7 @@ static void i915_audio_component_put_power(struct device *kdev,
/* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */
if (--dev_priv->audio_power_refcount == 0)
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ if (IS_GEMINILAKE(dev_priv))
glk_force_audio_cdclk(dev_priv, false);
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO, cookie);
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 8beac06e3f10..ef4017a1baba 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -357,14 +357,16 @@ parse_generic_dtd(struct drm_i915_private *dev_priv,
panel_fixed_mode->hdisplay + dtd->hfront_porch;
panel_fixed_mode->hsync_end =
panel_fixed_mode->hsync_start + dtd->hsync;
- panel_fixed_mode->htotal = panel_fixed_mode->hsync_end;
+ panel_fixed_mode->htotal =
+ panel_fixed_mode->hdisplay + dtd->hblank;
panel_fixed_mode->vdisplay = dtd->vactive;
panel_fixed_mode->vsync_start =
panel_fixed_mode->vdisplay + dtd->vfront_porch;
panel_fixed_mode->vsync_end =
panel_fixed_mode->vsync_start + dtd->vsync;
- panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end;
+ panel_fixed_mode->vtotal =
+ panel_fixed_mode->vdisplay + dtd->vblank;
panel_fixed_mode->clock = dtd->pixel_clock;
panel_fixed_mode->width_mm = dtd->width_mm;
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index dcb66a33be9b..b228671d5a5d 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -486,3 +486,8 @@ int intel_bw_init(struct drm_i915_private *dev_priv)
return 0;
}
+
+void intel_bw_cleanup(struct drm_i915_private *dev_priv)
+{
+ drm_atomic_private_obj_fini(&dev_priv->bw_obj);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index 9db10af012f4..20b9ad241802 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -25,6 +25,7 @@ struct intel_bw_state {
void intel_bw_init_hw(struct drm_i915_private *dev_priv);
int intel_bw_init(struct drm_i915_private *dev_priv);
+void intel_bw_cleanup(struct drm_i915_private *dev_priv);
int intel_bw_atomic_check(struct intel_atomic_state *state);
void intel_bw_crtc_update(struct intel_bw_state *bw_state,
const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 7d1ab1e5b7c3..0ce5926006ca 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -2004,6 +2004,18 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
/* Account for additional needs from the planes */
min_cdclk = max(intel_planes_min_cdclk(crtc_state), min_cdclk);
+ /*
+ * HACK. Currently for TGL platforms we calculate
+ * min_cdclk initially based on pixel_rate divided
+ * by 2, accounting for also plane requirements,
+ * however in some cases the lowest possible CDCLK
+ * doesn't work and causing the underruns.
+ * Explicitly stating here that this seems to be currently
+ * rather a Hack, than final solution.
+ */
+ if (IS_TIGERLAKE(dev_priv))
+ min_cdclk = max(min_cdclk, (int)crtc_state->pixel_rate);
+
if (min_cdclk > dev_priv->max_cdclk_freq) {
DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n",
min_cdclk, dev_priv->max_cdclk_freq);
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index b2b1336ecdb6..f976b800b245 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -65,7 +65,7 @@ static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
return container_of(encoder, struct intel_crt, base);
}
-static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
+static struct intel_crt *intel_attached_crt(struct intel_connector *connector)
{
return intel_encoder_to_crt(intel_attached_encoder(connector));
}
@@ -247,7 +247,7 @@ static void hsw_post_disable_crt(struct intel_encoder *encoder,
intel_ddi_disable_transcoder_func(old_crtc_state);
- ironlake_pfit_disable(old_crtc_state);
+ ilk_pfit_disable(old_crtc_state);
intel_ddi_disable_pipe_clock(old_crtc_state);
@@ -351,7 +351,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
/* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
if (HAS_PCH_LPT(dev_priv) &&
- (ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
+ ilk_get_lanes_required(mode->clock, 270000, 24) > 2)
return MODE_CLOCK_HIGH;
/* HSW/BDW FDI limited to 4k */
@@ -427,10 +427,10 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
return 0;
}
-static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
+static bool ilk_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct intel_crt *crt = intel_attached_crt(connector);
+ struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
struct drm_i915_private *dev_priv = to_i915(dev);
u32 adpa;
bool ret;
@@ -440,7 +440,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
bool turn_off_dac = HAS_PCH_SPLIT(dev_priv);
u32 save_adpa;
- crt->force_hotplug_required = 0;
+ crt->force_hotplug_required = false;
save_adpa = adpa = I915_READ(crt->adpa_reg);
DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
@@ -477,7 +477,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct intel_crt *crt = intel_attached_crt(connector);
+ struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
struct drm_i915_private *dev_priv = to_i915(dev);
bool reenable_hpd;
u32 adpa;
@@ -535,7 +535,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
int i, tries = 0;
if (HAS_PCH_SPLIT(dev_priv))
- return intel_ironlake_crt_detect_hotplug(connector);
+ return ilk_crt_detect_hotplug(connector);
if (IS_VALLEYVIEW(dev_priv))
return valleyview_crt_detect_hotplug(connector);
@@ -609,7 +609,7 @@ static int intel_crt_ddc_get_modes(struct drm_connector *connector,
static bool intel_crt_detect_ddc(struct drm_connector *connector)
{
- struct intel_crt *crt = intel_attached_crt(connector);
+ struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev);
struct edid *edid;
struct i2c_adapter *i2c;
@@ -795,7 +795,7 @@ intel_crt_detect(struct drm_connector *connector,
bool force)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_crt *crt = intel_attached_crt(connector);
+ struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
struct intel_encoder *intel_encoder = &crt->base;
intel_wakeref_t wakeref;
int status, ret;
@@ -886,7 +886,7 @@ static int intel_crt_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crt *crt = intel_attached_crt(connector);
+ struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
struct intel_encoder *intel_encoder = &crt->base;
intel_wakeref_t wakeref;
struct i2c_adapter *i2c;
@@ -925,7 +925,7 @@ void intel_crt_reset(struct drm_encoder *encoder)
POSTING_READ(crt->adpa_reg);
DRM_DEBUG_KMS("crt adpa set to 0x%x\n", adpa);
- crt->force_hotplug_required = 1;
+ crt->force_hotplug_required = true;
}
}
@@ -1063,7 +1063,7 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
/*
* Configure the automatic hotplug detection stuff
*/
- crt->force_hotplug_required = 0;
+ crt->force_hotplug_required = false;
/*
* TODO: find a proper way to discover whether we need to set the the
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index c9ba7d7f3787..33f1dc3d7c1a 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -34,6 +34,7 @@
#include "intel_ddi.h"
#include "intel_display_types.h"
#include "intel_dp.h"
+#include "intel_dp_mst.h"
#include "intel_dp_link_training.h"
#include "intel_dpio_phy.h"
#include "intel_dsi.h"
@@ -1237,9 +1238,9 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *intel_dig_port =
- enc_to_dig_port(&encoder->base);
+ enc_to_dig_port(encoder);
intel_dp->DP = intel_dig_port->saved_port_bits |
DDI_BUF_CTL_ENABLE | DDI_BUF_TRANS_SELECT(0);
@@ -1899,8 +1900,13 @@ intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state)
temp |= TRANS_DDI_MODE_SELECT_DP_MST;
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
- if (INTEL_GEN(dev_priv) >= 12)
- temp |= TRANS_DDI_MST_TRANSPORT_SELECT(crtc_state->cpu_transcoder);
+ if (INTEL_GEN(dev_priv) >= 12) {
+ enum transcoder master;
+
+ master = crtc_state->mst_master_transcoder;
+ WARN_ON(master == INVALID_TRANSCODER);
+ temp |= TRANS_DDI_MST_TRANSPORT_SELECT(master);
+ }
} else {
temp |= TRANS_DDI_MODE_SELECT_DP_SST;
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
@@ -1944,17 +1950,18 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
- u32 val = I915_READ(reg);
+ u32 val;
+
+ val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ val &= ~TRANS_DDI_FUNC_ENABLE;
if (INTEL_GEN(dev_priv) >= 12) {
- val &= ~(TRANS_DDI_FUNC_ENABLE | TGL_TRANS_DDI_PORT_MASK |
- TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
+ if (!intel_dp_mst_is_master_trans(crtc_state))
+ val &= ~TGL_TRANS_DDI_PORT_MASK;
} else {
- val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK |
- TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
+ val &= ~TRANS_DDI_PORT_MASK;
}
- I915_WRITE(reg, val);
+ I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), val);
if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
@@ -2217,7 +2224,7 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
if (WARN_ON(intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)))
return;
- dig_port = enc_to_dig_port(&encoder->base);
+ dig_port = enc_to_dig_port(encoder);
intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
/*
@@ -2287,7 +2294,7 @@ static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
static void skl_ddi_set_iboost(struct intel_encoder *encoder,
int level, enum intel_output_type type)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
u8 iboost;
@@ -2358,7 +2365,7 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
enum port port = encoder->port;
enum phy phy = intel_port_to_phy(dev_priv, port);
int n_entries;
@@ -2497,7 +2504,7 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
width = 4;
rate = 0; /* Rate is always < than 6GHz for HDMI */
} else {
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
width = intel_dp->lane_count;
rate = intel_dp->link_rate;
@@ -2623,7 +2630,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
width = 4;
/* Rate is always < than 6GHz for HDMI */
} else {
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
width = intel_dp->lane_count;
rate = intel_dp->link_rate;
@@ -3161,57 +3168,6 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
}
static void
-icl_phy_set_clock_gating(struct intel_digital_port *dig_port, bool enable)
-{
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
- u32 val, bits;
- int ln;
-
- if (tc_port == PORT_TC_NONE)
- return;
-
- bits = MG_DP_MODE_CFG_TR2PWR_GATING | MG_DP_MODE_CFG_TRPWR_GATING |
- MG_DP_MODE_CFG_CLNPWR_GATING | MG_DP_MODE_CFG_DIGPWR_GATING |
- MG_DP_MODE_CFG_GAONPWR_GATING;
-
- for (ln = 0; ln < 2; ln++) {
- if (INTEL_GEN(dev_priv) >= 12) {
- I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, ln));
- val = I915_READ(DKL_DP_MODE(tc_port));
- } else {
- val = I915_READ(MG_DP_MODE(ln, tc_port));
- }
-
- if (enable)
- val |= bits;
- else
- val &= ~bits;
-
- if (INTEL_GEN(dev_priv) >= 12)
- I915_WRITE(DKL_DP_MODE(tc_port), val);
- else
- I915_WRITE(MG_DP_MODE(ln, tc_port), val);
- }
-
- if (INTEL_GEN(dev_priv) == 11) {
- bits = MG_MISC_SUS0_CFG_TR2PWR_GATING |
- MG_MISC_SUS0_CFG_CL2PWR_GATING |
- MG_MISC_SUS0_CFG_GAONPWR_GATING |
- MG_MISC_SUS0_CFG_TRPWR_GATING |
- MG_MISC_SUS0_CFG_CL1PWR_GATING |
- MG_MISC_SUS0_CFG_DGPWR_GATING;
-
- val = I915_READ(MG_MISC_SUS0(tc_port));
- if (enable)
- val |= (bits | MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3));
- else
- val &= ~(bits | MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK);
- I915_WRITE(MG_MISC_SUS0(tc_port), val);
- }
-}
-
-static void
icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
const struct intel_crtc_state *crtc_state)
{
@@ -3317,7 +3273,7 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
if (!crtc_state->fec_enable)
return;
- intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_dp = enc_to_intel_dp(encoder);
val = I915_READ(intel_dp->regs.dp_tp_ctl);
val |= DP_TP_CTL_FEC_ENABLE;
I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
@@ -3337,7 +3293,7 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
if (!crtc_state->fec_enable)
return;
- intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_dp = enc_to_intel_dp(encoder);
val = I915_READ(intel_dp->regs.dp_tp_ctl);
val &= ~DP_TP_CTL_FEC_ENABLE;
I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
@@ -3428,10 +3384,10 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
int level = intel_ddi_dp_level(intel_dp);
enum transcoder transcoder = crtc_state->cpu_transcoder;
@@ -3458,14 +3414,14 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
* (DFLEXDPSP.DPX4TXLATC)
*
* This was done before tgl_ddi_pre_enable_dp by
- * haswell_crtc_enable()->intel_encoders_pre_pll_enable().
+ * hsw_crtc_enable()->intel_encoders_pre_pll_enable().
*/
/*
* 4. Enable the port PLL.
*
* The PLL enabling itself was already done before this function by
- * haswell_crtc_enable()->intel_enable_shared_dpll(). We need only
+ * hsw_crtc_enable()->intel_enable_shared_dpll(). We need only
* configure the PLL to port mapping here.
*/
intel_ddi_clk_select(encoder, crtc_state);
@@ -3509,12 +3465,6 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
* down this function.
*/
- /*
- * 7.d Type C with DP alternate or fixed/legacy/static connection -
- * Disable PHY clock gating per Type-C DDI Buffer page
- */
- icl_phy_set_clock_gating(dig_port, false);
-
/* 7.e Configure voltage swing and related IO settings */
tgl_ddi_vswing_sequence(encoder, crtc_state->port_clock, level,
encoder->type);
@@ -3566,15 +3516,6 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
if (!is_trans_port_sync_mode(crtc_state))
intel_dp_stop_link_train(intel_dp);
- /*
- * TODO: enable clock gating
- *
- * It is not written in DP enabling sequence but "PHY Clockgating
- * programming" states that clock gating should be enabled after the
- * link training but doing so causes all the following trainings to fail
- * so not enabling it for now.
- */
-
/* 7.l Configure and enable FEC if needed */
intel_ddi_enable_fec(encoder, crtc_state);
intel_dsc_enable(encoder, crtc_state);
@@ -3584,15 +3525,18 @@ static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
enum phy phy = intel_port_to_phy(dev_priv, port);
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
int level = intel_ddi_dp_level(intel_dp);
- WARN_ON(is_mst && (port == PORT_A || port == PORT_E));
+ if (INTEL_GEN(dev_priv) < 11)
+ WARN_ON(is_mst && (port == PORT_A || port == PORT_E));
+ else
+ WARN_ON(is_mst && port == PORT_A);
intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
crtc_state->lane_count, is_mst);
@@ -3610,7 +3554,6 @@ static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder,
dig_port->ddi_io_power_domain);
icl_program_mg_dp_mode(dig_port, crtc_state);
- icl_phy_set_clock_gating(dig_port, false);
if (INTEL_GEN(dev_priv) >= 11)
icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
@@ -3644,8 +3587,6 @@ static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_ddi_enable_fec(encoder, crtc_state);
- icl_phy_set_clock_gating(dig_port, true);
-
if (!is_mst)
intel_ddi_enable_pipe_clock(crtc_state);
@@ -3674,12 +3615,12 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
int level = intel_ddi_hdmi_level(dev_priv, port);
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
intel_ddi_clk_select(encoder, crtc_state);
@@ -3687,7 +3628,6 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
icl_program_mg_dp_mode(dig_port, crtc_state);
- icl_phy_set_clock_gating(dig_port, false);
if (INTEL_GEN(dev_priv) >= 12)
tgl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
@@ -3702,8 +3642,6 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
else
intel_prepare_hdmi_ddi_buffers(encoder, level);
- icl_phy_set_clock_gating(dig_port, true);
-
if (IS_GEN9_BC(dev_priv))
skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI);
@@ -3746,12 +3684,12 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
intel_ddi_pre_enable_hdmi(encoder, crtc_state, conn_state);
} else {
struct intel_lspcon *lspcon =
- enc_to_intel_lspcon(&encoder->base);
+ enc_to_intel_lspcon(encoder);
intel_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
if (lspcon->active) {
struct intel_digital_port *dig_port =
- enc_to_dig_port(&encoder->base);
+ enc_to_dig_port(encoder);
dig_port->set_infoframes(encoder,
crtc_state->has_infoframe,
@@ -3776,7 +3714,7 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder,
}
if (intel_crtc_has_dp_encoder(crtc_state)) {
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
val = I915_READ(intel_dp->regs.dp_tp_ctl);
val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
@@ -3796,7 +3734,7 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_dp *intel_dp = &dig_port->dp;
bool is_mst = intel_crtc_has_type(old_crtc_state,
INTEL_OUTPUT_DP_MST);
@@ -3808,8 +3746,19 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
*/
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
- if (INTEL_GEN(dev_priv) < 12 && !is_mst)
- intel_ddi_disable_pipe_clock(old_crtc_state);
+ if (INTEL_GEN(dev_priv) >= 12) {
+ if (is_mst) {
+ enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
+ u32 val;
+
+ val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ val &= ~TGL_TRANS_DDI_PORT_MASK;
+ I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), val);
+ }
+ } else {
+ if (!is_mst)
+ intel_ddi_disable_pipe_clock(old_crtc_state);
+ }
intel_disable_ddi_buf(encoder, old_crtc_state);
@@ -3838,7 +3787,7 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
dig_port->set_infoframes(encoder, false,
@@ -3860,8 +3809,6 @@ static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_
{
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- i915_reg_t reg;
- u32 trans_ddi_func_ctl2_val;
if (old_crtc_state->master_transcoder == INVALID_TRANSCODER)
return;
@@ -3869,10 +3816,7 @@ static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_
DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n",
transcoder_name(old_crtc_state->cpu_transcoder));
- reg = TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder);
- trans_ddi_func_ctl2_val = ~(PORT_SYNC_MODE_ENABLE |
- PORT_SYNC_MODE_MASTER_SELECT_MASK);
- I915_WRITE(reg, trans_ddi_func_ctl2_val);
+ I915_WRITE(TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder), 0);
}
static void intel_ddi_post_disable(struct intel_encoder *encoder,
@@ -3880,25 +3824,27 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
bool is_tc_port = intel_phy_is_tc(dev_priv, phy);
- intel_crtc_vblank_off(old_crtc_state);
+ if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) {
+ intel_crtc_vblank_off(old_crtc_state);
- intel_disable_pipe(old_crtc_state);
+ intel_disable_pipe(old_crtc_state);
- if (INTEL_GEN(dev_priv) >= 11)
- icl_disable_transcoder_port_sync(old_crtc_state);
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_disable_transcoder_port_sync(old_crtc_state);
- intel_ddi_disable_transcoder_func(old_crtc_state);
+ intel_ddi_disable_transcoder_func(old_crtc_state);
- intel_dsc_disable(old_crtc_state);
+ intel_dsc_disable(old_crtc_state);
- if (INTEL_GEN(dev_priv) >= 9)
- skylake_scaler_disable(old_crtc_state);
- else
- ironlake_pfit_disable(old_crtc_state);
+ if (INTEL_GEN(dev_priv) >= 9)
+ skl_scaler_disable(old_crtc_state);
+ else
+ ilk_pfit_disable(old_crtc_state);
+ }
/*
* When called from DP MST code:
@@ -3970,7 +3916,7 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
enum port port = encoder->port;
if (port == PORT_A && INTEL_GEN(dev_priv) < 9)
@@ -4011,7 +3957,7 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_connector *connector = conn_state->connector;
enum port port = encoder->port;
@@ -4088,7 +4034,7 @@ static void intel_disable_ddi_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_dp->link_trained = false;
@@ -4136,7 +4082,7 @@ static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_ddi_set_dp_msa(crtc_state, conn_state);
@@ -4200,7 +4146,8 @@ intel_ddi_update_prepare(struct intel_atomic_state *state,
WARN_ON(crtc && crtc->active);
- intel_tc_port_get_link(enc_to_dig_port(&encoder->base), required_lanes);
+ intel_tc_port_get_link(enc_to_dig_port(encoder),
+ required_lanes);
if (crtc_state && crtc_state->hw.active)
intel_update_active_dpll(state, crtc, encoder);
}
@@ -4210,7 +4157,7 @@ intel_ddi_update_complete(struct intel_atomic_state *state,
struct intel_encoder *encoder,
struct intel_crtc *crtc)
{
- intel_tc_port_put_link(enc_to_dig_port(&encoder->base));
+ intel_tc_port_put_link(enc_to_dig_port(encoder));
}
static void
@@ -4219,7 +4166,7 @@ intel_ddi_pre_pll_enable(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
bool is_tc_port = intel_phy_is_tc(dev_priv, phy);
@@ -4405,6 +4352,11 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST);
pipe_config->lane_count =
((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ pipe_config->mst_master_transcoder =
+ REG_FIELD_GET(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, temp);
+
intel_dp_get_m_n(intel_crtc, pipe_config);
break;
default:
@@ -4518,7 +4470,7 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
{
- struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
intel_dp_encoder_flush_work(encoder);
@@ -4585,7 +4537,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_hdmi *hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder);
struct intel_connector *connector = hdmi->attached_connector;
struct i2c_adapter *adapter =
intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
@@ -4657,7 +4609,7 @@ intel_ddi_hotplug(struct intel_encoder *encoder,
struct intel_connector *connector,
bool irq_received)
{
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_modeset_acquire_ctx ctx;
enum intel_hotplug_state state;
int ret;
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index fbd56248efa8..05b6656cfcfb 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -46,6 +46,7 @@
#include "display/intel_crt.h"
#include "display/intel_ddi.h"
#include "display/intel_dp.h"
+#include "display/intel_dp_mst.h"
#include "display/intel_dsi.h"
#include "display/intel_dvo.h"
#include "display/intel_gmbus.h"
@@ -145,8 +146,8 @@ static const u64 cursor_format_modifiers[] = {
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
-static void ironlake_pch_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config);
+static void ilk_pch_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config);
static int intel_framebuffer_init(struct intel_framebuffer *ifb,
struct drm_i915_gem_object *obj,
@@ -157,15 +158,15 @@ static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_sta
const struct intel_link_m_n *m_n,
const struct intel_link_m_n *m2_n2);
static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
-static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
-static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
+static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
+static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
static void vlv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
static void chv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
-static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
-static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
+static void skl_pfit_enable(const struct intel_crtc_state *crtc_state);
+static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
static void intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);
@@ -369,7 +370,7 @@ static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
},
};
-static const struct intel_limit intel_limits_pineview_sdvo = {
+static const struct intel_limit pnv_limits_sdvo = {
.dot = { .min = 20000, .max = 400000},
.vco = { .min = 1700000, .max = 3500000 },
/* Pineview's Ncounter is a ring counter */
@@ -384,7 +385,7 @@ static const struct intel_limit intel_limits_pineview_sdvo = {
.p2_slow = 10, .p2_fast = 5 },
};
-static const struct intel_limit intel_limits_pineview_lvds = {
+static const struct intel_limit pnv_limits_lvds = {
.dot = { .min = 20000, .max = 400000 },
.vco = { .min = 1700000, .max = 3500000 },
.n = { .min = 3, .max = 6 },
@@ -402,7 +403,7 @@ static const struct intel_limit intel_limits_pineview_lvds = {
* We calculate clock using (register_value + 2) for N/M1/M2, so here
* the range value for them is (actual_value - 2).
*/
-static const struct intel_limit intel_limits_ironlake_dac = {
+static const struct intel_limit ilk_limits_dac = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 5 },
@@ -415,7 +416,7 @@ static const struct intel_limit intel_limits_ironlake_dac = {
.p2_slow = 10, .p2_fast = 5 },
};
-static const struct intel_limit intel_limits_ironlake_single_lvds = {
+static const struct intel_limit ilk_limits_single_lvds = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 3 },
@@ -428,7 +429,7 @@ static const struct intel_limit intel_limits_ironlake_single_lvds = {
.p2_slow = 14, .p2_fast = 14 },
};
-static const struct intel_limit intel_limits_ironlake_dual_lvds = {
+static const struct intel_limit ilk_limits_dual_lvds = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 3 },
@@ -442,7 +443,7 @@ static const struct intel_limit intel_limits_ironlake_dual_lvds = {
};
/* LVDS 100mhz refclk limits. */
-static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
+static const struct intel_limit ilk_limits_single_lvds_100m = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 2 },
@@ -455,7 +456,7 @@ static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
.p2_slow = 14, .p2_fast = 14 },
};
-static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
+static const struct intel_limit ilk_limits_dual_lvds_100m = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 3 },
@@ -553,13 +554,6 @@ is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
}
static bool
-is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
-{
- return (crtc_state->master_transcoder == INVALID_TRANSCODER &&
- crtc_state->sync_mode_slaves_mask);
-}
-
-static bool
is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
{
return crtc_state->master_transcoder != INVALID_TRANSCODER;
@@ -1637,7 +1631,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
I915_READ(dpll_reg) & port_mask, expected_mask);
}
-static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
+static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -1735,8 +1729,8 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
DRM_ERROR("Failed to enable PCH transcoder\n");
}
-static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
{
i915_reg_t reg;
u32 val;
@@ -1944,7 +1938,9 @@ static bool is_ccs_plane(const struct drm_framebuffer *fb, int plane)
static bool is_gen12_ccs_modifier(u64 modifier)
{
- return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS;
+ return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
+
}
static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane)
@@ -1977,8 +1973,7 @@ static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
}
/* Return either the main plane's CCS or - if not a CCS FB - UV plane */
-static int
-intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
+int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
{
if (is_ccs_modifier(fb->modifier))
return main_to_ccs_plane(fb, main_plane);
@@ -1994,6 +1989,13 @@ intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
}
+static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb,
+ int color_plane)
+{
+ return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
+ color_plane == 1;
+}
+
static unsigned int
intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
{
@@ -2013,6 +2015,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
return 128;
/* fall through */
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
if (is_ccs_plane(fb, color_plane))
return 64;
/* fall through */
@@ -2068,6 +2071,16 @@ static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
*tile_height = intel_tile_height(fb, color_plane);
}
+static unsigned int intel_tile_row_size(const struct drm_framebuffer *fb,
+ int color_plane)
+{
+ unsigned int tile_width, tile_height;
+
+ intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
+
+ return fb->pitches[color_plane] * tile_height;
+}
+
unsigned int
intel_fb_align_height(const struct drm_framebuffer *fb,
int color_plane, unsigned int height)
@@ -2142,7 +2155,8 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
struct drm_i915_private *dev_priv = to_i915(fb->dev);
/* AUX_DIST needs only 4K alignment */
- if (is_aux_plane(fb, color_plane))
+ if ((INTEL_GEN(dev_priv) < 12 && is_aux_plane(fb, color_plane)) ||
+ is_ccs_plane(fb, color_plane))
return 4096;
switch (fb->modifier) {
@@ -2152,11 +2166,19 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
if (INTEL_GEN(dev_priv) >= 9)
return 256 * 1024;
return 0;
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ if (is_semiplanar_uv_plane(fb, color_plane))
+ return intel_tile_row_size(fb, color_plane);
+ /* Fall-through */
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
return 16 * 1024;
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Yf_TILED_CCS:
case I915_FORMAT_MOD_Y_TILED:
+ if (INTEL_GEN(dev_priv) >= 12 &&
+ is_semiplanar_uv_plane(fb, color_plane))
+ return intel_tile_row_size(fb, color_plane);
+ /* Fall-through */
case I915_FORMAT_MOD_Yf_TILED:
return 1 * 1024 * 1024;
default:
@@ -2193,6 +2215,8 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
return ERR_PTR(-EINVAL);
alignment = intel_surf_alignment(fb, 0);
+ if (WARN_ON(alignment && !is_power_of_2(alignment)))
+ return ERR_PTR(-EINVAL);
/* Note that the w/a also requires 64 PTE of padding following the
* bo. We currently fill all unused PTE with the shadow page and so
@@ -2431,9 +2455,6 @@ static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
unsigned int cpp = fb->format->cpp[color_plane];
u32 offset, offset_aligned;
- if (alignment)
- alignment--;
-
if (!is_surface_linear(fb, color_plane)) {
unsigned int tile_size, tile_width, tile_height;
unsigned int tile_rows, tiles, pitch_tiles;
@@ -2455,17 +2476,24 @@ static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
*x %= tile_width;
offset = (tile_rows * pitch_tiles + tiles) * tile_size;
- offset_aligned = offset & ~alignment;
+
+ offset_aligned = offset;
+ if (alignment)
+ offset_aligned = rounddown(offset_aligned, alignment);
intel_adjust_tile_offset(x, y, tile_width, tile_height,
tile_size, pitch_tiles,
offset, offset_aligned);
} else {
offset = *y * pitch + *x * cpp;
- offset_aligned = offset & ~alignment;
-
- *y = (offset & alignment) / pitch;
- *x = ((offset & alignment) - *y * pitch) / cpp;
+ offset_aligned = offset;
+ if (alignment) {
+ offset_aligned = rounddown(offset_aligned, alignment);
+ *y = (offset % alignment) / pitch;
+ *x = ((offset % alignment) - *y * pitch) / cpp;
+ } else {
+ *y = *x = 0;
+ }
}
return offset_aligned;
@@ -2498,9 +2526,17 @@ static int intel_fb_offset_to_xy(int *x, int *y,
{
struct drm_i915_private *dev_priv = to_i915(fb->dev);
unsigned int height;
+ u32 alignment;
+
+ if (INTEL_GEN(dev_priv) >= 12 &&
+ is_semiplanar_uv_plane(fb, color_plane))
+ alignment = intel_tile_row_size(fb, color_plane);
+ else if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
+ alignment = intel_tile_size(dev_priv);
+ else
+ alignment = 0;
- if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
- fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
+ if (alignment != 0 && fb->offsets[color_plane] % alignment) {
DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
fb->offsets[color_plane], color_plane);
return -EINVAL;
@@ -2537,6 +2573,7 @@ static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
return I915_TILING_Y;
default:
return I915_TILING_NONE;
@@ -2588,6 +2625,30 @@ static const struct drm_format_info gen12_ccs_formats[] = {
{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
.char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
.hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_YUYV, .num_planes = 2,
+ .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_YVYU, .num_planes = 2,
+ .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_UYVY, .num_planes = 2,
+ .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_VYUY, .num_planes = 2,
+ .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_NV12, .num_planes = 4,
+ .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
+ .hsub = 2, .vsub = 2, .is_yuv = true },
+ { .format = DRM_FORMAT_P010, .num_planes = 4,
+ .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
+ .hsub = 2, .vsub = 2, .is_yuv = true },
+ { .format = DRM_FORMAT_P012, .num_planes = 4,
+ .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
+ .hsub = 2, .vsub = 2, .is_yuv = true },
+ { .format = DRM_FORMAT_P016, .num_planes = 4,
+ .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
+ .hsub = 2, .vsub = 2, .is_yuv = true },
};
static const struct drm_format_info *
@@ -2614,6 +2675,7 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
ARRAY_SIZE(skl_ccs_formats),
cmd->pixel_format);
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
return lookup_format_info(gen12_ccs_formats,
ARRAY_SIZE(gen12_ccs_formats),
cmd->pixel_format);
@@ -2625,6 +2687,7 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
bool is_ccs_modifier(u64 modifier)
{
return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
}
@@ -2698,7 +2761,7 @@ intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
}
tile_width = intel_tile_width_bytes(fb, color_plane);
- if (is_ccs_modifier(fb->modifier) && color_plane == 0) {
+ if (is_ccs_modifier(fb->modifier)) {
/*
* Display WA #0531: skl,bxt,kbl,glk
*
@@ -2708,7 +2771,7 @@ intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
* require the entire fb to accommodate that to avoid
* potential runtime errors at plane configuration time.
*/
- if (IS_GEN(dev_priv, 9) && fb->width > 3840)
+ if (IS_GEN(dev_priv, 9) && color_plane == 0 && fb->width > 3840)
tile_width *= 4;
/*
* The main surface pitch must be padded to a multiple of four
@@ -2876,11 +2939,15 @@ intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
static void
intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane)
{
+ int main_plane = is_ccs_plane(fb, color_plane) ?
+ ccs_to_main_plane(fb, color_plane) : 0;
+ int main_hsub, main_vsub;
int hsub, vsub;
+ intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, main_plane);
intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane);
- *w = fb->width / hsub;
- *h = fb->height / vsub;
+ *w = fb->width / main_hsub / hsub;
+ *h = fb->height / main_vsub / vsub;
}
/*
@@ -3598,6 +3665,7 @@ static int skl_max_plane_width(const struct drm_framebuffer *fb,
return 5120;
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Yf_TILED_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
/* FIXME AUX plane? */
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Yf_TILED:
@@ -3656,11 +3724,12 @@ static int icl_max_plane_height(void)
return 4320;
}
-static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
- int main_x, int main_y, u32 main_offset)
+static bool
+skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
+ int main_x, int main_y, u32 main_offset,
+ int ccs_plane)
{
const struct drm_framebuffer *fb = plane_state->hw.fb;
- int ccs_plane = main_to_ccs_plane(fb, 0);
int aux_x = plane_state->color_plane[ccs_plane].x;
int aux_y = plane_state->color_plane[ccs_plane].y;
u32 aux_offset = plane_state->color_plane[ccs_plane].offset;
@@ -3737,6 +3806,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
intel_add_fb_offsets(&x, &y, plane_state, 0);
offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
alignment = intel_surf_alignment(fb, 0);
+ if (WARN_ON(alignment && !is_power_of_2(alignment)))
+ return -EINVAL;
/*
* AUX surface offset is specified as the distance from the
@@ -3772,7 +3843,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
* they match with the main surface x/y offsets.
*/
if (is_ccs_modifier(fb->modifier)) {
- while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
+ while (!skl_check_main_ccs_coordinates(plane_state, x, y,
+ offset, aux_plane)) {
if (offset == 0)
break;
@@ -3805,7 +3877,8 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int rotation = plane_state->hw.rotation;
- int max_width = skl_max_plane_width(fb, 1, rotation);
+ int uv_plane = 1;
+ int max_width = skl_max_plane_width(fb, uv_plane, rotation);
int max_height = 4096;
int x = plane_state->uapi.src.x1 >> 17;
int y = plane_state->uapi.src.y1 >> 17;
@@ -3813,8 +3886,9 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
int h = drm_rect_height(&plane_state->uapi.src) >> 17;
u32 offset;
- intel_add_fb_offsets(&x, &y, plane_state, 1);
- offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
+ intel_add_fb_offsets(&x, &y, plane_state, uv_plane);
+ offset = intel_plane_compute_aligned_offset(&x, &y,
+ plane_state, uv_plane);
/* FIXME not quite sure how/if these apply to the chroma plane */
if (w > max_width || h > max_height) {
@@ -3823,9 +3897,39 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
return -EINVAL;
}
- plane_state->color_plane[1].offset = offset;
- plane_state->color_plane[1].x = x;
- plane_state->color_plane[1].y = y;
+ if (is_ccs_modifier(fb->modifier)) {
+ int ccs_plane = main_to_ccs_plane(fb, uv_plane);
+ int aux_offset = plane_state->color_plane[ccs_plane].offset;
+ int alignment = intel_surf_alignment(fb, uv_plane);
+
+ if (offset > aux_offset)
+ offset = intel_plane_adjust_aligned_offset(&x, &y,
+ plane_state,
+ uv_plane,
+ offset,
+ aux_offset & ~(alignment - 1));
+
+ while (!skl_check_main_ccs_coordinates(plane_state, x, y,
+ offset, ccs_plane)) {
+ if (offset == 0)
+ break;
+
+ offset = intel_plane_adjust_aligned_offset(&x, &y,
+ plane_state,
+ uv_plane,
+ offset, offset - alignment);
+ }
+
+ if (x != plane_state->color_plane[ccs_plane].x ||
+ y != plane_state->color_plane[ccs_plane].y) {
+ DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
+ return -EINVAL;
+ }
+ }
+
+ plane_state->color_plane[uv_plane].offset = offset;
+ plane_state->color_plane[uv_plane].x = x;
+ plane_state->color_plane[uv_plane].y = y;
return 0;
}
@@ -3835,21 +3939,40 @@ static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
const struct drm_framebuffer *fb = plane_state->hw.fb;
int src_x = plane_state->uapi.src.x1 >> 16;
int src_y = plane_state->uapi.src.y1 >> 16;
- int hsub;
- int vsub;
- int x;
- int y;
u32 offset;
+ int ccs_plane;
+
+ for (ccs_plane = 0; ccs_plane < fb->format->num_planes; ccs_plane++) {
+ int main_hsub, main_vsub;
+ int hsub, vsub;
+ int x, y;
+
+ if (!is_ccs_plane(fb, ccs_plane))
+ continue;
+
+ intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb,
+ ccs_to_main_plane(fb, ccs_plane));
+ intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
+
+ hsub *= main_hsub;
+ vsub *= main_vsub;
+ x = src_x / hsub;
+ y = src_y / vsub;
- intel_fb_plane_get_subsampling(&hsub, &vsub, fb, 1);
- x = src_x / hsub;
- y = src_y / vsub;
- intel_add_fb_offsets(&x, &y, plane_state, 1);
- offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
+ intel_add_fb_offsets(&x, &y, plane_state, ccs_plane);
- plane_state->color_plane[1].offset = offset;
- plane_state->color_plane[1].x = x * hsub + src_x % hsub;
- plane_state->color_plane[1].y = y * vsub + src_y % vsub;
+ offset = intel_plane_compute_aligned_offset(&x, &y,
+ plane_state,
+ ccs_plane);
+
+ plane_state->color_plane[ccs_plane].offset = offset;
+ plane_state->color_plane[ccs_plane].x = (x * hsub +
+ src_x % hsub) /
+ main_hsub;
+ plane_state->color_plane[ccs_plane].y = (y * vsub +
+ src_y % vsub) /
+ main_vsub;
+ }
return 0;
}
@@ -3858,6 +3981,7 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->hw.fb;
int ret;
+ bool needs_aux = false;
ret = intel_plane_compute_gtt(plane_state);
if (ret)
@@ -3867,22 +3991,32 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
return 0;
/*
- * Handle the AUX surface first since
- * the main surface setup depends on it.
+ * Handle the AUX surface first since the main surface setup depends on
+ * it.
*/
+ if (is_ccs_modifier(fb->modifier)) {
+ needs_aux = true;
+ ret = skl_check_ccs_aux_surface(plane_state);
+ if (ret)
+ return ret;
+ }
+
if (intel_format_info_is_yuv_semiplanar(fb->format,
fb->modifier)) {
+ needs_aux = true;
ret = skl_check_nv12_aux_surface(plane_state);
if (ret)
return ret;
- } else if (is_ccs_modifier(fb->modifier)) {
- ret = skl_check_ccs_aux_surface(plane_state);
- if (ret)
- return ret;
- } else {
- plane_state->color_plane[1].offset = ~0xfff;
- plane_state->color_plane[1].x = 0;
- plane_state->color_plane[1].y = 0;
+ }
+
+ if (!needs_aux) {
+ int i;
+
+ for (i = 1; i < fb->format->num_planes; i++) {
+ plane_state->color_plane[i].offset = ~0xfff;
+ plane_state->color_plane[i].x = 0;
+ plane_state->color_plane[i].y = 0;
+ }
}
ret = skl_check_main_surface(plane_state);
@@ -4472,6 +4606,8 @@ static u32 skl_plane_ctl_tiling(u64 fb_modifier)
return PLANE_CTL_TILED_Y |
PLANE_CTL_RENDER_DECOMPRESSION_ENABLE |
PLANE_CTL_CLEAR_COLOR_DISABLE;
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ return PLANE_CTL_TILED_Y | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE;
case I915_FORMAT_MOD_Yf_TILED:
return PLANE_CTL_TILED_YF;
case I915_FORMAT_MOD_Yf_TILED_CCS:
@@ -4869,8 +5005,8 @@ static void intel_fdi_normal_train(struct intel_crtc *crtc)
}
/* The FDI link training functions for ILK/Ibexpeak. */
-static void ironlake_fdi_link_train(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state)
+static void ilk_fdi_link_train(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -5222,7 +5358,7 @@ train_done:
DRM_DEBUG_KMS("FDI train done.\n");
}
-static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
+static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
@@ -5259,7 +5395,7 @@ static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
}
}
-static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
+static void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -5289,7 +5425,7 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
udelay(100);
}
-static void ironlake_fdi_disable(struct intel_crtc *crtc)
+static void ilk_fdi_disable(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
@@ -5496,8 +5632,8 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv)
desired_divisor << auxdiv);
}
-static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
- enum pipe pch_transcoder)
+static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
+ enum pipe pch_transcoder)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -5540,7 +5676,7 @@ static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool e
POSTING_READ(SOUTH_CHICKEN1);
}
-static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
+static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -5601,8 +5737,8 @@ intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
* - DP transcoding bits
* - transcoder
*/
-static void ironlake_pch_enable(const struct intel_atomic_state *state,
- const struct intel_crtc_state *crtc_state)
+static void ilk_pch_enable(const struct intel_atomic_state *state,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_device *dev = crtc->base.dev;
@@ -5613,7 +5749,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
assert_pch_transcoder_disabled(dev_priv, pipe);
if (IS_IVYBRIDGE(dev_priv))
- ivybridge_update_fdi_bc_bifurcation(crtc_state);
+ ivb_update_fdi_bc_bifurcation(crtc_state);
/* Write the TU size bits before fdi link training, so that error
* detection works. */
@@ -5650,7 +5786,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
/* set transcoder timing, panel must allow it */
assert_panel_unlocked(dev_priv, pipe);
- ironlake_pch_transcoder_set_timings(crtc_state, pipe);
+ ilk_pch_transcoder_set_timings(crtc_state, pipe);
intel_fdi_normal_train(crtc);
@@ -5682,7 +5818,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
I915_WRITE(reg, temp);
}
- ironlake_enable_pch_transcoder(crtc_state);
+ ilk_enable_pch_transcoder(crtc_state);
}
static void lpt_pch_enable(const struct intel_atomic_state *state,
@@ -5697,7 +5833,7 @@ static void lpt_pch_enable(const struct intel_atomic_state *state,
lpt_program_iclkip(crtc_state);
/* Set transcoder timing. */
- ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
+ ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
}
@@ -6001,7 +6137,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
return 0;
}
-void skylake_scaler_disable(const struct intel_crtc_state *old_crtc_state)
+void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
int i;
@@ -6010,7 +6146,7 @@ void skylake_scaler_disable(const struct intel_crtc_state *old_crtc_state)
skl_detach_scaler(crtc, i);
}
-static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
+static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -6047,7 +6183,7 @@ static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
}
}
-static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
+static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -6411,45 +6547,29 @@ intel_connector_primary_encoder(struct intel_connector *connector)
if (connector->mst_port)
return &dp_to_dig_port(connector->mst_port)->base;
- encoder = intel_attached_encoder(&connector->base);
+ encoder = intel_attached_encoder(connector);
WARN_ON(!encoder);
return encoder;
}
-static bool
-intel_connector_needs_modeset(struct intel_atomic_state *state,
- const struct drm_connector_state *old_conn_state,
- const struct drm_connector_state *new_conn_state)
-{
- struct intel_crtc *old_crtc = old_conn_state->crtc ?
- to_intel_crtc(old_conn_state->crtc) : NULL;
- struct intel_crtc *new_crtc = new_conn_state->crtc ?
- to_intel_crtc(new_conn_state->crtc) : NULL;
-
- return new_crtc != old_crtc ||
- (new_crtc &&
- needs_modeset(intel_atomic_get_new_crtc_state(state, new_crtc)));
-}
-
static void intel_encoders_update_prepare(struct intel_atomic_state *state)
{
- struct drm_connector_state *old_conn_state;
struct drm_connector_state *new_conn_state;
- struct drm_connector *conn;
+ struct drm_connector *connector;
int i;
- for_each_oldnew_connector_in_state(&state->base, conn,
- old_conn_state, new_conn_state, i) {
+ for_each_new_connector_in_state(&state->base, connector, new_conn_state,
+ i) {
+ struct intel_connector *intel_connector;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
- if (!intel_connector_needs_modeset(state,
- old_conn_state,
- new_conn_state))
+ if (!intel_connector_needs_modeset(state, connector))
continue;
- encoder = intel_connector_primary_encoder(to_intel_connector(conn));
+ intel_connector = to_intel_connector(connector);
+ encoder = intel_connector_primary_encoder(intel_connector);
if (!encoder->update_prepare)
continue;
@@ -6461,22 +6581,21 @@ static void intel_encoders_update_prepare(struct intel_atomic_state *state)
static void intel_encoders_update_complete(struct intel_atomic_state *state)
{
- struct drm_connector_state *old_conn_state;
struct drm_connector_state *new_conn_state;
- struct drm_connector *conn;
+ struct drm_connector *connector;
int i;
- for_each_oldnew_connector_in_state(&state->base, conn,
- old_conn_state, new_conn_state, i) {
+ for_each_new_connector_in_state(&state->base, connector, new_conn_state,
+ i) {
+ struct intel_connector *intel_connector;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
- if (!intel_connector_needs_modeset(state,
- old_conn_state,
- new_conn_state))
+ if (!intel_connector_needs_modeset(state, connector))
continue;
- encoder = intel_connector_primary_encoder(to_intel_connector(conn));
+ intel_connector = to_intel_connector(connector);
+ encoder = intel_connector_primary_encoder(intel_connector);
if (!encoder->update_complete)
continue;
@@ -6643,8 +6762,8 @@ static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_stat
plane->disable_plane(plane, crtc_state);
}
-static void ironlake_crtc_enable(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static void ilk_crtc_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -6680,7 +6799,7 @@ static void ironlake_crtc_enable(struct intel_atomic_state *state,
intel_cpu_transcoder_set_m_n(new_crtc_state,
&new_crtc_state->fdi_m_n, NULL);
- ironlake_set_pipeconf(new_crtc_state);
+ ilk_set_pipeconf(new_crtc_state);
crtc->active = true;
@@ -6690,13 +6809,13 @@ static void ironlake_crtc_enable(struct intel_atomic_state *state,
/* Note: FDI PLL enabling _must_ be done before we enable the
* cpu pipes, hence this is separate from all the other fdi/pch
* enabling. */
- ironlake_fdi_pll_enable(new_crtc_state);
+ ilk_fdi_pll_enable(new_crtc_state);
} else {
assert_fdi_tx_disabled(dev_priv, pipe);
assert_fdi_rx_disabled(dev_priv, pipe);
}
- ironlake_pfit_enable(new_crtc_state);
+ ilk_pfit_enable(new_crtc_state);
/*
* On ILK+ LUT must be loaded before the pipe is running but with
@@ -6712,7 +6831,7 @@ static void ironlake_crtc_enable(struct intel_atomic_state *state,
intel_enable_pipe(new_crtc_state);
if (new_crtc_state->has_pch_encoder)
- ironlake_pch_enable(state, new_crtc_state);
+ ilk_pch_enable(state, new_crtc_state);
intel_crtc_vblank_on(new_crtc_state);
@@ -6787,8 +6906,8 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
I915_WRITE(reg, val);
}
-static void haswell_crtc_enable(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static void hsw_crtc_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -6829,7 +6948,7 @@ static void haswell_crtc_enable(struct intel_atomic_state *state,
if (!transcoder_is_dsi(cpu_transcoder)) {
hsw_set_frame_start_delay(new_crtc_state);
- haswell_set_pipeconf(new_crtc_state);
+ hsw_set_pipeconf(new_crtc_state);
}
if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
@@ -6844,9 +6963,9 @@ static void haswell_crtc_enable(struct intel_atomic_state *state,
glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
if (INTEL_GEN(dev_priv) >= 9)
- skylake_pfit_enable(new_crtc_state);
+ skl_pfit_enable(new_crtc_state);
else
- ironlake_pfit_enable(new_crtc_state);
+ ilk_pfit_enable(new_crtc_state);
/*
* On ILK+ LUT must be loaded before the pipe is running but with
@@ -6895,7 +7014,7 @@ static void haswell_crtc_enable(struct intel_atomic_state *state,
}
}
-void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
+void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -6910,8 +7029,8 @@ void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
}
}
-static void ironlake_crtc_disable(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static void ilk_crtc_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
@@ -6932,15 +7051,15 @@ static void ironlake_crtc_disable(struct intel_atomic_state *state,
intel_disable_pipe(old_crtc_state);
- ironlake_pfit_disable(old_crtc_state);
+ ilk_pfit_disable(old_crtc_state);
if (old_crtc_state->has_pch_encoder)
- ironlake_fdi_disable(crtc);
+ ilk_fdi_disable(crtc);
intel_encoders_post_disable(state, crtc);
if (old_crtc_state->has_pch_encoder) {
- ironlake_disable_pch_transcoder(dev_priv, pipe);
+ ilk_disable_pch_transcoder(dev_priv, pipe);
if (HAS_PCH_CPT(dev_priv)) {
i915_reg_t reg;
@@ -6960,15 +7079,15 @@ static void ironlake_crtc_disable(struct intel_atomic_state *state,
I915_WRITE(PCH_DPLL_SEL, temp);
}
- ironlake_fdi_pll_disable(crtc);
+ ilk_fdi_pll_disable(crtc);
}
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
}
-static void haswell_crtc_disable(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static void hsw_crtc_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
/*
* FIXME collapse everything to one hook.
@@ -7505,8 +7624,8 @@ static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
return 0;
}
-static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
- struct intel_crtc_state *pipe_config)
+static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
+ struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_atomic_state *state = pipe_config->uapi.state;
@@ -7578,8 +7697,8 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
}
#define RETRY 1
-static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *pipe_config)
+static int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = intel_crtc->base.dev;
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
@@ -7598,15 +7717,15 @@ retry:
fdi_dotclock = adjusted_mode->crtc_clock;
- lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
- pipe_config->pipe_bpp);
+ lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
+ pipe_config->pipe_bpp);
pipe_config->fdi_lanes = lane;
intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
link_bw, &pipe_config->fdi_m_n, false, false);
- ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
+ ret = ilk_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
if (ret == -EDEADLK)
return ret;
@@ -7812,7 +7931,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
intel_crtc_compute_pixel_rate(pipe_config);
if (pipe_config->has_pch_encoder)
- return ironlake_fdi_compute_config(crtc, pipe_config);
+ return ilk_fdi_compute_config(crtc, pipe_config);
return 0;
}
@@ -8795,9 +8914,9 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
}
- limit = &intel_limits_pineview_lvds;
+ limit = &pnv_limits_lvds;
} else {
- limit = &intel_limits_pineview_sdvo;
+ limit = &pnv_limits_sdvo;
}
if (!crtc_state->clock_set &&
@@ -9224,7 +9343,7 @@ out:
return ret;
}
-static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
+static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
{
struct intel_encoder *encoder;
int i;
@@ -9722,12 +9841,12 @@ static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
{
if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
- ironlake_init_pch_refclk(dev_priv);
+ ilk_init_pch_refclk(dev_priv);
else if (HAS_PCH_LPT(dev_priv))
lpt_init_pch_refclk(dev_priv);
}
-static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
+static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -9783,7 +9902,7 @@ static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
POSTING_READ(PIPECONF(pipe));
}
-static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
+static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -9871,7 +9990,7 @@ int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
}
}
-int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
+int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
{
/*
* Account for spread spectrum to avoid
@@ -9882,14 +10001,14 @@ int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
return DIV_ROUND_UP(bps, link_bw * 8);
}
-static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
+static bool ilk_needs_fb_cb_tune(struct dpll *dpll, int factor)
{
return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
}
-static void ironlake_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct dpll *reduced_clock)
+static void ilk_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct dpll *reduced_clock)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll, fp, fp2;
@@ -9909,7 +10028,7 @@ static void ironlake_compute_dpll(struct intel_crtc *crtc,
fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
- if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
+ if (ilk_needs_fb_cb_tune(&crtc_state->dpll, factor))
fp |= FP_CB_TUNE;
if (reduced_clock) {
@@ -9989,8 +10108,8 @@ static void ironlake_compute_dpll(struct intel_crtc *crtc,
crtc_state->dpll_hw_state.fp1 = fp2;
}
-static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_atomic_state *state =
@@ -10014,17 +10133,17 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
if (intel_is_dual_link_lvds(dev_priv)) {
if (refclk == 100000)
- limit = &intel_limits_ironlake_dual_lvds_100m;
+ limit = &ilk_limits_dual_lvds_100m;
else
- limit = &intel_limits_ironlake_dual_lvds;
+ limit = &ilk_limits_dual_lvds;
} else {
if (refclk == 100000)
- limit = &intel_limits_ironlake_single_lvds_100m;
+ limit = &ilk_limits_single_lvds_100m;
else
- limit = &intel_limits_ironlake_single_lvds;
+ limit = &ilk_limits_single_lvds;
}
} else {
- limit = &intel_limits_ironlake_dac;
+ limit = &ilk_limits_dac;
}
if (!crtc_state->clock_set &&
@@ -10034,7 +10153,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
return -EINVAL;
}
- ironlake_compute_dpll(crtc, crtc_state, NULL);
+ ilk_compute_dpll(crtc, crtc_state, NULL);
if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
@@ -10109,15 +10228,15 @@ void intel_dp_get_m_n(struct intel_crtc *crtc,
&pipe_config->dp_m2_n2);
}
-static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
{
intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
&pipe_config->fdi_m_n, NULL);
}
-static void skylake_get_pfit_config(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static void skl_get_pfit_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -10148,8 +10267,8 @@ static void skylake_get_pfit_config(struct intel_crtc *crtc,
}
static void
-skylake_get_initial_plane_config(struct intel_crtc *crtc,
- struct intel_initial_plane_config *plane_config)
+skl_get_initial_plane_config(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -10210,6 +10329,8 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
fb->modifier = INTEL_GEN(dev_priv) >= 12 ?
I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS :
I915_FORMAT_MOD_Y_TILED_CCS;
+ else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE)
+ fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
else
fb->modifier = I915_FORMAT_MOD_Y_TILED;
break;
@@ -10276,8 +10397,8 @@ error:
kfree(intel_fb);
}
-static void ironlake_get_pfit_config(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static void ilk_get_pfit_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -10300,8 +10421,8 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
}
}
-static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static bool ilk_get_pipe_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -10372,7 +10493,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
FDI_DP_PORT_WIDTH_SHIFT) + 1;
- ironlake_get_fdi_m_n_config(crtc, pipe_config);
+ ilk_get_fdi_m_n_config(crtc, pipe_config);
if (HAS_PCH_IBX(dev_priv)) {
/*
@@ -10400,7 +10521,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
>> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
- ironlake_pch_clock_get(crtc, pipe_config);
+ ilk_pch_clock_get(crtc, pipe_config);
} else {
pipe_config->pixel_multiplier = 1;
}
@@ -10408,7 +10529,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
intel_get_pipe_timings(crtc, pipe_config);
intel_get_pipe_src_size(crtc, pipe_config);
- ironlake_get_pfit_config(crtc, pipe_config);
+ ilk_get_pfit_config(crtc, pipe_config);
ret = true;
@@ -10417,8 +10538,9 @@ out:
return ret;
}
-static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+
+static int hsw_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_atomic_state *state =
@@ -10439,9 +10561,8 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
return 0;
}
-static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
- enum port port,
- struct intel_crtc_state *pipe_config)
+static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
+ struct intel_crtc_state *pipe_config)
{
enum intel_dpll_id id;
u32 temp;
@@ -10455,9 +10576,8 @@ static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
}
-static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
- enum port port,
- struct intel_crtc_state *pipe_config)
+static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
+ struct intel_crtc_state *pipe_config)
{
enum phy phy = intel_port_to_phy(dev_priv, port);
enum icl_port_dpll_id port_dpll_id;
@@ -10516,9 +10636,8 @@ static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
}
-static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
- enum port port,
- struct intel_crtc_state *pipe_config)
+static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
+ struct intel_crtc_state *pipe_config)
{
enum intel_dpll_id id;
u32 temp;
@@ -10532,9 +10651,8 @@ static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
}
-static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
- enum port port,
- struct intel_crtc_state *pipe_config)
+static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
+ struct intel_crtc_state *pipe_config)
{
enum intel_dpll_id id;
u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
@@ -10722,8 +10840,8 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
return transcoder_is_dsi(pipe_config->cpu_transcoder);
}
-static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
@@ -10743,15 +10861,15 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
}
if (INTEL_GEN(dev_priv) >= 11)
- icelake_get_ddi_pll(dev_priv, port, pipe_config);
+ icl_get_ddi_pll(dev_priv, port, pipe_config);
else if (IS_CANNONLAKE(dev_priv))
- cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
+ cnl_get_ddi_pll(dev_priv, port, pipe_config);
else if (IS_GEN9_BC(dev_priv))
- skylake_get_ddi_pll(dev_priv, port, pipe_config);
+ skl_get_ddi_pll(dev_priv, port, pipe_config);
else if (IS_GEN9_LP(dev_priv))
bxt_get_ddi_pll(dev_priv, port, pipe_config);
else
- haswell_get_ddi_pll(dev_priv, port, pipe_config);
+ hsw_get_ddi_pll(dev_priv, port, pipe_config);
pll = pipe_config->shared_dpll;
if (pll) {
@@ -10772,7 +10890,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
FDI_DP_PORT_WIDTH_SHIFT) + 1;
- ironlake_get_fdi_m_n_config(crtc, pipe_config);
+ ilk_get_fdi_m_n_config(crtc, pipe_config);
}
}
@@ -10794,7 +10912,7 @@ static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_pr
return master_select - 1;
}
-static void icelake_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
+static void icl_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 transcoders;
@@ -10829,8 +10947,8 @@ static void icelake_get_trans_port_sync_config(struct intel_crtc_state *crtc_sta
crtc_state->sync_mode_slaves_mask);
}
-static bool haswell_get_pipe_config(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static bool hsw_get_pipe_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
@@ -10865,7 +10983,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
INTEL_GEN(dev_priv) >= 11) {
- haswell_get_ddi_port_state(crtc, pipe_config);
+ hsw_get_ddi_port_state(crtc, pipe_config);
intel_get_pipe_timings(crtc, pipe_config);
}
@@ -10922,9 +11040,9 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
power_domain_mask |= BIT_ULL(power_domain);
if (INTEL_GEN(dev_priv) >= 9)
- skylake_get_pfit_config(crtc, pipe_config);
+ skl_get_pfit_config(crtc, pipe_config);
else
- ironlake_get_pfit_config(crtc, pipe_config);
+ ilk_get_pfit_config(crtc, pipe_config);
}
if (hsw_crtc_supports_ips(crtc)) {
@@ -10950,7 +11068,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
if (INTEL_GEN(dev_priv) >= 11 &&
!transcoder_is_dsi(pipe_config->cpu_transcoder))
- icelake_get_trans_port_sync_config(pipe_config);
+ icl_get_trans_port_sync_config(pipe_config);
out:
for_each_power_domain(power_domain, power_domain_mask)
@@ -11570,7 +11688,7 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
{
struct intel_crtc *intel_crtc;
struct intel_encoder *intel_encoder =
- intel_attached_encoder(connector);
+ intel_attached_encoder(to_intel_connector(connector));
struct drm_crtc *possible_crtc;
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = NULL;
@@ -11724,7 +11842,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx)
{
struct intel_encoder *intel_encoder =
- intel_attached_encoder(connector);
+ intel_attached_encoder(to_intel_connector(connector));
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_atomic_state *state = old->restore_state;
int ret;
@@ -11867,8 +11985,8 @@ int intel_dotclock_calculate(int link_freq,
return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
}
-static void ironlake_pch_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static void ilk_pch_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -11897,6 +12015,7 @@ static void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
crtc_state->hsw_workaround_pipe = INVALID_PIPE;
crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID;
crtc_state->scaler_state.scaler_id = -1;
+ crtc_state->mst_master_transcoder = INVALID_TRANSCODER;
}
static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
@@ -12247,6 +12366,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
/* Copy parameters to slave plane */
linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
linked_state->color_ctl = plane_state->color_ctl;
+ linked_state->view = plane_state->view;
memcpy(linked_state->color_plane, plane_state->color_plane,
sizeof(linked_state->color_plane));
@@ -12278,88 +12398,121 @@ static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
}
-static int icl_add_sync_mode_crtcs(struct intel_crtc_state *crtc_state)
+static bool
+intel_atomic_is_master_connector(struct intel_crtc_state *crtc_state)
+{
+ struct drm_crtc *crtc = crtc_state->uapi.crtc;
+ struct drm_atomic_state *state = crtc_state->uapi.state;
+ struct drm_connector *connector;
+ struct drm_connector_state *connector_state;
+ int i;
+
+ for_each_new_connector_in_state(state, connector, connector_state, i) {
+ if (connector_state->crtc != crtc)
+ continue;
+ if (connector->has_tile &&
+ connector->tile_h_loc == connector->num_h_tile - 1 &&
+ connector->tile_v_loc == connector->num_v_tile - 1)
+ return true;
+ }
+
+ return false;
+}
+
+static void reset_port_sync_mode_state(struct intel_crtc_state *crtc_state)
+{
+ crtc_state->master_transcoder = INVALID_TRANSCODER;
+ crtc_state->sync_mode_slaves_mask = 0;
+}
+
+static int icl_compute_port_sync_crtc_state(struct drm_connector *connector,
+ struct intel_crtc_state *crtc_state,
+ int num_tiled_conns)
{
struct drm_crtc *crtc = crtc_state->uapi.crtc;
struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- struct drm_connector *master_connector, *connector;
- struct drm_connector_state *connector_state;
+ struct drm_connector *master_connector;
struct drm_connector_list_iter conn_iter;
struct drm_crtc *master_crtc = NULL;
struct drm_crtc_state *master_crtc_state;
struct intel_crtc_state *master_pipe_config;
- int i, tile_group_id;
if (INTEL_GEN(dev_priv) < 11)
return 0;
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP))
+ return 0;
+
/*
* In case of tiled displays there could be one or more slaves but there is
* only one master. Lets make the CRTC used by the connector corresponding
* to the last horizonal and last vertical tile a master/genlock CRTC.
* All the other CRTCs corresponding to other tiles of the same Tile group
* are the slave CRTCs and hold a pointer to their genlock CRTC.
+ * If all tiles not present do not make master slave assignments.
*/
- for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
- if (connector_state->crtc != crtc)
- continue;
- if (!connector->has_tile)
+ if (!connector->has_tile ||
+ crtc_state->hw.mode.hdisplay != connector->tile_h_size ||
+ crtc_state->hw.mode.vdisplay != connector->tile_v_size ||
+ num_tiled_conns < connector->num_h_tile * connector->num_v_tile) {
+ reset_port_sync_mode_state(crtc_state);
+ return 0;
+ }
+ /* Last Horizontal and last vertical tile connector is a master
+ * Master's crtc state is already populated in slave for port sync
+ */
+ if (connector->tile_h_loc == connector->num_h_tile - 1 &&
+ connector->tile_v_loc == connector->num_v_tile - 1)
+ return 0;
+
+ /* Loop through all connectors and configure the Slave crtc_state
+ * to point to the correct master.
+ */
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_for_each_connector_iter(master_connector, &conn_iter) {
+ struct drm_connector_state *master_conn_state = NULL;
+
+ if (!(master_connector->has_tile &&
+ master_connector->tile_group->id == connector->tile_group->id))
continue;
- if (crtc_state->hw.mode.hdisplay != connector->tile_h_size ||
- crtc_state->hw.mode.vdisplay != connector->tile_v_size)
- return 0;
- if (connector->tile_h_loc == connector->num_h_tile - 1 &&
- connector->tile_v_loc == connector->num_v_tile - 1)
+ if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 ||
+ master_connector->tile_v_loc != master_connector->num_v_tile - 1)
continue;
- crtc_state->sync_mode_slaves_mask = 0;
- tile_group_id = connector->tile_group->id;
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
- drm_for_each_connector_iter(master_connector, &conn_iter) {
- struct drm_connector_state *master_conn_state = NULL;
-
- if (!master_connector->has_tile)
- continue;
- if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 ||
- master_connector->tile_v_loc != master_connector->num_v_tile - 1)
- continue;
- if (master_connector->tile_group->id != tile_group_id)
- continue;
- master_conn_state = drm_atomic_get_connector_state(&state->base,
- master_connector);
- if (IS_ERR(master_conn_state)) {
- drm_connector_list_iter_end(&conn_iter);
- return PTR_ERR(master_conn_state);
- }
- if (master_conn_state->crtc) {
- master_crtc = master_conn_state->crtc;
- break;
- }
+ master_conn_state = drm_atomic_get_connector_state(&state->base,
+ master_connector);
+ if (IS_ERR(master_conn_state)) {
+ drm_connector_list_iter_end(&conn_iter);
+ return PTR_ERR(master_conn_state);
}
- drm_connector_list_iter_end(&conn_iter);
-
- if (!master_crtc) {
- DRM_DEBUG_KMS("Could not find Master CRTC for Slave CRTC %d\n",
- connector_state->crtc->base.id);
- return -EINVAL;
+ if (master_conn_state->crtc) {
+ master_crtc = master_conn_state->crtc;
+ break;
}
+ }
+ drm_connector_list_iter_end(&conn_iter);
- master_crtc_state = drm_atomic_get_crtc_state(&state->base,
- master_crtc);
- if (IS_ERR(master_crtc_state))
- return PTR_ERR(master_crtc_state);
-
- master_pipe_config = to_intel_crtc_state(master_crtc_state);
- crtc_state->master_transcoder = master_pipe_config->cpu_transcoder;
- master_pipe_config->sync_mode_slaves_mask |=
- BIT(crtc_state->cpu_transcoder);
- DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n",
- transcoder_name(crtc_state->master_transcoder),
- crtc_state->uapi.crtc->base.id,
- master_pipe_config->sync_mode_slaves_mask);
+ if (!master_crtc) {
+ DRM_DEBUG_KMS("Could not find Master CRTC for Slave CRTC %d\n",
+ crtc->base.id);
+ return -EINVAL;
}
+ master_crtc_state = drm_atomic_get_crtc_state(&state->base,
+ master_crtc);
+ if (IS_ERR(master_crtc_state))
+ return PTR_ERR(master_crtc_state);
+
+ master_pipe_config = to_intel_crtc_state(master_crtc_state);
+ crtc_state->master_transcoder = master_pipe_config->cpu_transcoder;
+ master_pipe_config->sync_mode_slaves_mask |=
+ BIT(crtc_state->cpu_transcoder);
+ DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n",
+ transcoder_name(crtc_state->master_transcoder),
+ crtc->base.id,
+ master_pipe_config->sync_mode_slaves_mask);
+
return 0;
}
@@ -12755,6 +12908,9 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
pipe_config->csc_mode, pipe_config->gamma_mode,
pipe_config->gamma_enable, pipe_config->csc_enable);
+ DRM_DEBUG_KMS("MST master transcoder: %s\n",
+ transcoder_name(pipe_config->mst_master_transcoder));
+
dump_planes:
if (!state)
return;
@@ -12901,9 +13057,11 @@ intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state)
saved_state->wm = crtc_state->wm;
/*
* Save the slave bitmask which gets filled for master crtc state during
- * slave atomic check call.
+ * slave atomic check call. For all other CRTCs reset the port sync variables
+ * crtc_state->master_transcoder needs to be set to INVALID
*/
- if (is_trans_port_sync_master(crtc_state))
+ reset_port_sync_mode_state(saved_state);
+ if (intel_atomic_is_master_connector(crtc_state))
saved_state->sync_mode_slaves_mask =
crtc_state->sync_mode_slaves_mask;
@@ -12924,7 +13082,7 @@ intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
struct drm_connector *connector;
struct drm_connector_state *connector_state;
int base_bpp, ret;
- int i;
+ int i, tile_group_id = -1, num_tiled_conns = 0;
bool retry = true;
pipe_config->cpu_transcoder =
@@ -12994,13 +13152,22 @@ encoder_retry:
drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
CRTC_STEREO_DOUBLE);
- /* Set the crtc_state defaults for trans_port_sync */
- pipe_config->master_transcoder = INVALID_TRANSCODER;
- ret = icl_add_sync_mode_crtcs(pipe_config);
- if (ret) {
- DRM_DEBUG_KMS("Cannot assign Sync Mode CRTCs: %d\n",
- ret);
- return ret;
+ /* Get tile_group_id of tiled connector */
+ for_each_new_connector_in_state(state, connector, connector_state, i) {
+ if (connector_state->crtc == crtc &&
+ connector->has_tile) {
+ tile_group_id = connector->tile_group->id;
+ break;
+ }
+ }
+
+ /* Get total number of tiled connectors in state that belong to
+ * this tile group.
+ */
+ for_each_new_connector_in_state(state, connector, connector_state, i) {
+ if (connector->has_tile &&
+ connector->tile_group->id == tile_group_id)
+ num_tiled_conns++;
}
/* Pass our mode to the connectors and the CRTC to give them a chance to
@@ -13011,6 +13178,14 @@ encoder_retry:
if (connector_state->crtc != crtc)
continue;
+ ret = icl_compute_port_sync_crtc_state(connector, pipe_config,
+ num_tiled_conns);
+ if (ret) {
+ DRM_DEBUG_KMS("Cannot assign Sync Mode CRTCs: %d\n",
+ ret);
+ return ret;
+ }
+
encoder = to_intel_encoder(connector_state->best_encoder);
ret = encoder->compute_config(encoder, pipe_config,
connector_state);
@@ -13535,6 +13710,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(dsc.dsc_split);
PIPE_CONF_CHECK_I(dsc.compressed_bpp);
+ PIPE_CONF_CHECK_I(mst_master_transcoder);
+
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
#undef PIPE_CONF_CHECK_BOOL
@@ -14048,7 +14225,7 @@ static void intel_modeset_clear_plls(struct intel_atomic_state *state)
* multiple pipes, and planes are enabled after the pipe, we need to wait at
* least 2 vblanks on the first pipe before enabling planes on the second pipe.
*/
-static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
+static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
{
struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
@@ -14143,7 +14320,7 @@ static int intel_modeset_checks(struct intel_atomic_state *state)
intel_modeset_clear_plls(state);
if (IS_HASWELL(dev_priv))
- return haswell_mode_set_planes_workaround(state);
+ return hsw_mode_set_planes_workaround(state);
return 0;
}
@@ -14173,7 +14350,11 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta
new_crtc_state->uapi.mode_changed = false;
new_crtc_state->update_pipe = true;
+}
+static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
+{
/*
* If we're not doing the full modeset we want to
* keep the current M/N values as they may be
@@ -14296,6 +14477,93 @@ static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
return 0;
}
+static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
+ u8 transcoders)
+{
+ const struct intel_crtc_state *new_crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (new_crtc_state->hw.enable &&
+ transcoders & BIT(new_crtc_state->cpu_transcoder) &&
+ needs_modeset(new_crtc_state))
+ return true;
+ }
+
+ return false;
+}
+
+static int
+intel_modeset_all_tiles(struct intel_atomic_state *state, int tile_grp_id)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ int ret = 0;
+
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+
+ if (!connector->has_tile ||
+ connector->tile_group->id != tile_grp_id)
+ continue;
+ conn_state = drm_atomic_get_connector_state(&state->base,
+ connector);
+ if (IS_ERR(conn_state)) {
+ ret = PTR_ERR(conn_state);
+ break;
+ }
+
+ if (!conn_state->crtc)
+ continue;
+
+ crtc_state = drm_atomic_get_crtc_state(&state->base,
+ conn_state->crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ break;
+ }
+ crtc_state->mode_changed = true;
+ ret = drm_atomic_add_affected_connectors(&state->base,
+ conn_state->crtc);
+ if (ret)
+ break;
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return ret;
+}
+
+static int
+intel_atomic_check_tiled_conns(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state, *new_conn_state;
+ int i, ret;
+
+ if (INTEL_GEN(dev_priv) < 11)
+ return 0;
+
+ /* Is tiled, mark all other tiled CRTCs as needing a modeset */
+ for_each_oldnew_connector_in_state(&state->base, connector,
+ old_conn_state, new_conn_state, i) {
+ if (!connector->has_tile)
+ continue;
+ if (!intel_connector_needs_modeset(state, connector))
+ continue;
+
+ ret = intel_modeset_all_tiles(state, connector->tile_group->id);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
/**
* intel_atomic_check - validate state object
* @dev: drm device
@@ -14323,6 +14591,21 @@ static int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
+ /**
+ * This check adds all the connectors in current state that belong to
+ * the same tile group to a full modeset.
+ * This function directly sets the mode_changed to true and we also call
+ * drm_atomic_add_affected_connectors(). Hence we are not explicitly
+ * calling drm_atomic_helper_check_modeset() after this.
+ *
+ * Fixme: Handle some corner cases where one of the
+ * tiled connectors gets disconnected and tile info is lost but since it
+ * was previously synced to other conn, we need to add that to the modeset.
+ */
+ ret = intel_atomic_check_tiled_conns(state);
+ if (ret)
+ goto fail;
+
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
if (!needs_modeset(new_crtc_state)) {
@@ -14334,8 +14617,6 @@ static int intel_atomic_check(struct drm_device *dev,
if (!new_crtc_state->uapi.enable) {
intel_crtc_copy_uapi_to_hw_state(new_crtc_state);
-
- any_ms = true;
continue;
}
@@ -14348,9 +14629,54 @@ static int intel_atomic_check(struct drm_device *dev,
goto fail;
intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
+ }
+
+ /**
+ * Check if fastset is allowed by external dependencies like other
+ * pipes and transcoders.
+ *
+ * Right now it only forces a fullmodeset when the MST master
+ * transcoder did not changed but the pipe of the master transcoder
+ * needs a fullmodeset so all slaves also needs to do a fullmodeset or
+ * in case of port synced crtcs, if one of the synced crtcs
+ * needs a full modeset, all other synced crtcs should be
+ * forced a full modeset.
+ */
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (!new_crtc_state->hw.enable || needs_modeset(new_crtc_state))
+ continue;
+
+ if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
+ enum transcoder master = new_crtc_state->mst_master_transcoder;
+
+ if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
+ new_crtc_state->uapi.mode_changed = true;
+ new_crtc_state->update_pipe = false;
+ }
+ }
+
+ if (is_trans_port_sync_mode(new_crtc_state)) {
+ u8 trans = new_crtc_state->sync_mode_slaves_mask |
+ BIT(new_crtc_state->master_transcoder);
+
+ if (intel_cpu_transcoders_need_modeset(state, trans)) {
+ new_crtc_state->uapi.mode_changed = true;
+ new_crtc_state->update_pipe = false;
+ }
+ }
+ }
- if (needs_modeset(new_crtc_state))
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (needs_modeset(new_crtc_state)) {
any_ms = true;
+ continue;
+ }
+
+ if (!new_crtc_state->update_pipe)
+ continue;
+
+ intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
}
if (any_ms && !check_digital_port_conflicts(state)) {
@@ -14472,12 +14798,12 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
skl_detach_scalers(new_crtc_state);
if (new_crtc_state->pch_pfit.enabled)
- skylake_pfit_enable(new_crtc_state);
+ skl_pfit_enable(new_crtc_state);
} else if (HAS_PCH_SPLIT(dev_priv)) {
if (new_crtc_state->pch_pfit.enabled)
- ironlake_pfit_enable(new_crtc_state);
+ ilk_pfit_enable(new_crtc_state);
else if (old_crtc_state->pch_pfit.enabled)
- ironlake_pfit_disable(old_crtc_state);
+ ilk_pfit_disable(old_crtc_state);
}
if (INTEL_GEN(dev_priv) >= 11)
@@ -14619,7 +14945,7 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
u32 handled = 0;
int i;
- /* Only disable port sync slaves */
+ /* Only disable port sync and MST slaves */
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
if (!needs_modeset(new_crtc_state))
@@ -14633,7 +14959,8 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
* slave CRTCs are disabled first and then master CRTC since
* Slave vblanks are masked till Master Vblanks.
*/
- if (!is_trans_port_sync_slave(old_crtc_state))
+ if (!is_trans_port_sync_slave(old_crtc_state) &&
+ !intel_dp_mst_is_slave_trans(old_crtc_state))
continue;
intel_pre_plane_update(state, crtc);
@@ -14694,10 +15021,14 @@ static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc,
if (conn_state->crtc == &crtc->base)
break;
}
- intel_dp = enc_to_intel_dp(&intel_attached_encoder(conn)->base);
+ intel_dp = enc_to_intel_dp(intel_attached_encoder(to_intel_connector(conn)));
intel_dp_stop_link_train(intel_dp);
}
+/*
+ * TODO: This is only called from port sync and it is identical to what will be
+ * executed again in intel_update_crtc() over port sync pipes
+ */
static void intel_post_crtc_enable_updates(struct intel_crtc *crtc,
struct intel_atomic_state *state)
{
@@ -14786,15 +15117,21 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
u8 required_slices = state->wm_results.ddb.enabled_slices;
struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
- u8 dirty_pipes = 0;
+ const u8 num_pipes = INTEL_NUM_PIPES(dev_priv);
+ u8 update_pipes = 0, modeset_pipes = 0;
int i;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ if (!new_crtc_state->hw.active)
+ continue;
+
/* ignore allocations for crtc's that have been turned off. */
- if (!needs_modeset(new_crtc_state) && new_crtc_state->hw.active)
+ if (!needs_modeset(new_crtc_state)) {
entries[i] = old_crtc_state->wm.skl.ddb;
- if (new_crtc_state->hw.active)
- dirty_pipes |= BIT(crtc->pipe);
+ update_pipes |= BIT(crtc->pipe);
+ } else {
+ modeset_pipes |= BIT(crtc->pipe);
+ }
}
/* If 2nd DBuf slice required, enable it here */
@@ -14804,38 +15141,29 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
/*
* Whenever the number of active pipes changes, we need to make sure we
* update the pipes in the right order so that their ddb allocations
- * never overlap with eachother inbetween CRTC updates. Otherwise we'll
+ * never overlap with each other between CRTC updates. Otherwise we'll
* cause pipe underruns and other bad stuff.
+ *
+ * So first lets enable all pipes that do not need a fullmodeset as
+ * those don't have any external dependency.
*/
- while (dirty_pipes) {
+ while (update_pipes) {
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
enum pipe pipe = crtc->pipe;
- bool modeset = needs_modeset(new_crtc_state);
- if ((dirty_pipes & BIT(pipe)) == 0)
+ if ((update_pipes & BIT(pipe)) == 0)
continue;
if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
- entries,
- INTEL_NUM_PIPES(dev_priv), i))
+ entries, num_pipes, i))
continue;
entries[i] = new_crtc_state->wm.skl.ddb;
- dirty_pipes &= ~BIT(pipe);
-
- if (modeset && is_trans_port_sync_mode(new_crtc_state)) {
- if (is_trans_port_sync_master(new_crtc_state))
- intel_update_trans_port_sync_crtcs(crtc,
- state,
- old_crtc_state,
- new_crtc_state);
- else
- continue;
- } else {
- intel_update_crtc(crtc, state, old_crtc_state,
- new_crtc_state);
- }
+ update_pipes &= ~BIT(pipe);
+
+ intel_update_crtc(crtc, state, old_crtc_state,
+ new_crtc_state);
/*
* If this is an already active pipe, it's DDB changed,
@@ -14845,11 +15173,72 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
*/
if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
&old_crtc_state->wm.skl.ddb) &&
- !modeset && dirty_pipes)
+ (update_pipes | modeset_pipes))
intel_wait_for_vblank(dev_priv, pipe);
}
}
+ /*
+ * Enable all pipes that needs a modeset and do not depends on other
+ * pipes
+ */
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ enum pipe pipe = crtc->pipe;
+
+ if ((modeset_pipes & BIT(pipe)) == 0)
+ continue;
+
+ if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
+ is_trans_port_sync_slave(new_crtc_state))
+ continue;
+
+ WARN_ON(skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
+ entries, num_pipes, i));
+
+ entries[i] = new_crtc_state->wm.skl.ddb;
+ modeset_pipes &= ~BIT(pipe);
+
+ if (is_trans_port_sync_mode(new_crtc_state)) {
+ struct intel_crtc *slave_crtc;
+
+ intel_update_trans_port_sync_crtcs(crtc, state,
+ old_crtc_state,
+ new_crtc_state);
+
+ slave_crtc = intel_get_slave_crtc(new_crtc_state);
+ /* TODO: update entries[] of slave */
+ modeset_pipes &= ~BIT(slave_crtc->pipe);
+
+ } else {
+ intel_update_crtc(crtc, state, old_crtc_state,
+ new_crtc_state);
+ }
+ }
+
+ /*
+ * Finally enable all pipes that needs a modeset and depends on
+ * other pipes, right now it is only MST slaves as both port sync slave
+ * and master are enabled together
+ */
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ enum pipe pipe = crtc->pipe;
+
+ if ((modeset_pipes & BIT(pipe)) == 0)
+ continue;
+
+ WARN_ON(skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
+ entries, num_pipes, i));
+
+ entries[i] = new_crtc_state->wm.skl.ddb;
+ modeset_pipes &= ~BIT(pipe);
+
+ intel_update_crtc(crtc, state, old_crtc_state, new_crtc_state);
+ }
+
+ WARN_ON(modeset_pipes);
+
/* If 2nd DBuf slice is no more required disable it */
if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
icl_dbuf_slices_update(dev_priv, required_slices);
@@ -16593,8 +16982,11 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
}
/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
- if (mode_cmd->offsets[0] != 0)
+ if (mode_cmd->offsets[0] != 0) {
+ DRM_DEBUG_KMS("plane 0 offset (0x%08x) must be 0\n",
+ mode_cmd->offsets[0]);
goto err;
+ }
drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
@@ -16821,29 +17213,28 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
intel_init_cdclk_hooks(dev_priv);
if (INTEL_GEN(dev_priv) >= 9) {
- dev_priv->display.get_pipe_config = haswell_get_pipe_config;
+ dev_priv->display.get_pipe_config = hsw_get_pipe_config;
dev_priv->display.get_initial_plane_config =
- skylake_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock =
- haswell_crtc_compute_clock;
- dev_priv->display.crtc_enable = haswell_crtc_enable;
- dev_priv->display.crtc_disable = haswell_crtc_disable;
+ skl_get_initial_plane_config;
+ dev_priv->display.crtc_compute_clock = hsw_crtc_compute_clock;
+ dev_priv->display.crtc_enable = hsw_crtc_enable;
+ dev_priv->display.crtc_disable = hsw_crtc_disable;
} else if (HAS_DDI(dev_priv)) {
- dev_priv->display.get_pipe_config = haswell_get_pipe_config;
+ dev_priv->display.get_pipe_config = hsw_get_pipe_config;
dev_priv->display.get_initial_plane_config =
i9xx_get_initial_plane_config;
dev_priv->display.crtc_compute_clock =
- haswell_crtc_compute_clock;
- dev_priv->display.crtc_enable = haswell_crtc_enable;
- dev_priv->display.crtc_disable = haswell_crtc_disable;
+ hsw_crtc_compute_clock;
+ dev_priv->display.crtc_enable = hsw_crtc_enable;
+ dev_priv->display.crtc_disable = hsw_crtc_disable;
} else if (HAS_PCH_SPLIT(dev_priv)) {
- dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
+ dev_priv->display.get_pipe_config = ilk_get_pipe_config;
dev_priv->display.get_initial_plane_config =
i9xx_get_initial_plane_config;
dev_priv->display.crtc_compute_clock =
- ironlake_crtc_compute_clock;
- dev_priv->display.crtc_enable = ironlake_crtc_enable;
- dev_priv->display.crtc_disable = ironlake_crtc_disable;
+ ilk_crtc_compute_clock;
+ dev_priv->display.crtc_enable = ilk_crtc_enable;
+ dev_priv->display.crtc_disable = ilk_crtc_disable;
} else if (IS_CHERRYVIEW(dev_priv)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.get_initial_plane_config =
@@ -16889,7 +17280,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
}
if (IS_GEN(dev_priv, 5)) {
- dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
+ dev_priv->display.fdi_link_train = ilk_fdi_link_train;
} else if (IS_GEN(dev_priv, 6)) {
dev_priv->display.fdi_link_train = gen6_fdi_link_train;
} else if (IS_IVYBRIDGE(dev_priv)) {
@@ -17834,8 +18225,11 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
static void intel_early_display_was(struct drm_i915_private *dev_priv)
{
- /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
- if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ /*
+ * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl
+ * Also known as Wa_14010480278.
+ */
+ if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv))
I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
DARBF_GATING_DIS);
@@ -17935,7 +18329,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
/* We need to sanitize only the MST primary port. */
if (encoder->type != INTEL_OUTPUT_DP_MST &&
intel_phy_is_tc(dev_priv, phy))
- intel_tc_port_sanitize(enc_to_dig_port(&encoder->base));
+ intel_tc_port_sanitize(enc_to_dig_port(encoder));
}
get_encoder_power_domains(dev_priv);
@@ -18108,6 +18502,8 @@ void intel_modeset_driver_remove(struct drm_i915_private *i915)
intel_gmbus_teardown(i915);
+ intel_bw_cleanup(i915);
+
destroy_workqueue(i915->flip_wq);
destroy_workqueue(i915->modeset_wq);
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 0fef9263cddc..028aab728514 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -474,6 +474,7 @@ void intel_link_compute_m_n(u16 bpp, int nlanes,
struct intel_link_m_n *m_n,
bool constant_n, bool fec_enable);
bool is_ccs_modifier(u64 modifier);
+int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane);
void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
u32 pixel_format, u64 modifier);
@@ -521,7 +522,7 @@ int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state);
-int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
+int ilk_get_lanes_required(int target_clock, int link_bw, int bpp);
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dport,
unsigned int expected_mask);
@@ -578,8 +579,8 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
-void skylake_scaler_disable(const struct intel_crtc_state *old_crtc_state);
-void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
+void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state);
+void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state);
u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 679457156797..21561acfa3ac 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -514,7 +514,7 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
if (encoder->type == INTEL_OUTPUT_DP_MST)
continue;
- dig_port = enc_to_dig_port(&encoder->base);
+ dig_port = enc_to_dig_port(encoder);
if (WARN_ON(!dig_port))
continue;
@@ -1664,8 +1664,8 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct i915_power_domains *power_domains = &dev_priv->power_domains;
- enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
- enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
+ enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(encoder));
+ enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder));
mutex_lock(&power_domains->lock);
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 3fd822d536c9..fdd943a17de3 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -90,8 +90,8 @@ struct intel_framebuffer {
/* for each plane in the normal GTT view */
struct {
unsigned int x, y;
- } normal[2];
- /* for each plane in the rotated GTT view */
+ } normal[4];
+ /* for each plane in the rotated GTT view for no-CCS formats */
struct {
unsigned int x, y;
unsigned int pitch; /* pixels */
@@ -555,7 +555,7 @@ struct intel_plane_state {
*/
u32 stride;
int x, y;
- } color_plane[2];
+ } color_plane[4];
/* plane control register */
u32 ctl;
@@ -1054,6 +1054,9 @@ struct intel_crtc_state {
/* Bitmask to indicate slaves attached */
u8 sync_mode_slaves_mask;
+
+ /* Only valid on TGL+ */
+ enum transcoder mst_master_transcoder;
};
struct intel_crtc {
@@ -1433,9 +1436,9 @@ struct intel_load_detect_pipe {
};
static inline struct intel_encoder *
-intel_attached_encoder(struct drm_connector *connector)
+intel_attached_encoder(struct intel_connector *connector)
{
- return to_intel_connector(connector)->encoder;
+ return connector->encoder;
}
static inline bool intel_encoder_is_dig_port(struct intel_encoder *encoder)
@@ -1452,12 +1455,12 @@ static inline bool intel_encoder_is_dig_port(struct intel_encoder *encoder)
}
static inline struct intel_digital_port *
-enc_to_dig_port(struct drm_encoder *encoder)
+enc_to_dig_port(struct intel_encoder *encoder)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+ struct intel_encoder *intel_encoder = encoder;
if (intel_encoder_is_dig_port(intel_encoder))
- return container_of(encoder, struct intel_digital_port,
+ return container_of(&encoder->base, struct intel_digital_port,
base.base);
else
return NULL;
@@ -1466,16 +1469,17 @@ enc_to_dig_port(struct drm_encoder *encoder)
static inline struct intel_digital_port *
conn_to_dig_port(struct intel_connector *connector)
{
- return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base);
+ return enc_to_dig_port(intel_attached_encoder(connector));
}
static inline struct intel_dp_mst_encoder *
-enc_to_mst(struct drm_encoder *encoder)
+enc_to_mst(struct intel_encoder *encoder)
{
- return container_of(encoder, struct intel_dp_mst_encoder, base.base);
+ return container_of(&encoder->base, struct intel_dp_mst_encoder,
+ base.base);
}
-static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
+static inline struct intel_dp *enc_to_intel_dp(struct intel_encoder *encoder)
{
return &enc_to_dig_port(encoder)->dp;
}
@@ -1488,14 +1492,14 @@ static inline bool intel_encoder_is_dp(struct intel_encoder *encoder)
return true;
case INTEL_OUTPUT_DDI:
/* Skip pure HDMI/DVI DDI encoders */
- return i915_mmio_reg_valid(enc_to_intel_dp(&encoder->base)->output_reg);
+ return i915_mmio_reg_valid(enc_to_intel_dp(encoder)->output_reg);
default:
return false;
}
}
static inline struct intel_lspcon *
-enc_to_intel_lspcon(struct drm_encoder *encoder)
+enc_to_intel_lspcon(struct intel_encoder *encoder)
{
return &enc_to_dig_port(encoder)->lspcon;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 20ccc9422107..4074d83b1a5f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -146,9 +146,9 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp)
return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
}
-static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
+static struct intel_dp *intel_attached_dp(struct intel_connector *connector)
{
- return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+ return enc_to_intel_dp(intel_attached_encoder(connector));
}
static void intel_dp_link_down(struct intel_encoder *encoder,
@@ -614,7 +614,7 @@ static enum drm_mode_status
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
struct drm_i915_private *dev_priv = to_i915(connector->dev);
@@ -834,7 +834,7 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
* Pick one that's not used by other ports.
*/
for_each_intel_dp(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (encoder->type == INTEL_OUTPUT_EDP) {
WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
@@ -1031,7 +1031,7 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
*/
for_each_intel_dp(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
@@ -2034,7 +2034,7 @@ static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
u8 line_buf_depth;
int ret;
@@ -2205,7 +2205,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct link_config_limits limits;
int common_len;
int ret;
@@ -2366,8 +2366,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
enum port port = encoder->port;
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_connector *intel_connector = intel_dp->attached_connector;
@@ -2482,7 +2482,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
enum port port = encoder->port;
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
@@ -2509,7 +2509,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
*
* CPT PCH is quite different, having many bits moved
* to the TRANS_DP_CTL register instead. That
- * configuration happens (oddly) in ironlake_pch_enable
+ * configuration happens (oddly) in ilk_pch_enable
*/
/* Preserve the BIOS-computed detected bit. This is
@@ -2653,7 +2653,7 @@ static void edp_wait_backlight_off(struct intel_dp *intel_dp)
* is locked
*/
-static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
+static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 control;
@@ -2703,7 +2703,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
if (!edp_have_panel_power(intel_dp))
wait_panel_power_cycle(intel_dp);
- pp = ironlake_get_pp_control(intel_dp);
+ pp = ilk_get_pp_control(intel_dp);
pp |= EDP_FORCE_VDD;
pp_stat_reg = _pp_stat_reg(intel_dp);
@@ -2768,7 +2768,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
intel_dig_port->base.base.base.id,
intel_dig_port->base.base.name);
- pp = ironlake_get_pp_control(intel_dp);
+ pp = ilk_get_pp_control(intel_dp);
pp &= ~EDP_FORCE_VDD;
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
@@ -2864,7 +2864,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
wait_panel_power_cycle(intel_dp);
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
- pp = ironlake_get_pp_control(intel_dp);
+ pp = ilk_get_pp_control(intel_dp);
if (IS_GEN(dev_priv, 5)) {
/* ILK workaround: disable reset around power sequence */
pp &= ~PANEL_POWER_RESET;
@@ -2919,7 +2919,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
WARN(!intel_dp->want_panel_vdd, "Need [ENCODER:%d:%s] VDD to turn off panel\n",
dig_port->base.base.base.id, dig_port->base.base.name);
- pp = ironlake_get_pp_control(intel_dp);
+ pp = ilk_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
* panels get very unhappy and cease to work. */
pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
@@ -2968,7 +2968,7 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
u32 pp;
- pp = ironlake_get_pp_control(intel_dp);
+ pp = ilk_get_pp_control(intel_dp);
pp |= EDP_BLC_ENABLE;
I915_WRITE(pp_ctrl_reg, pp);
@@ -2980,7 +2980,7 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(conn_state->best_encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder));
if (!intel_dp_is_edp(intel_dp))
return;
@@ -3004,7 +3004,7 @@ static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
u32 pp;
- pp = ironlake_get_pp_control(intel_dp);
+ pp = ilk_get_pp_control(intel_dp);
pp &= ~EDP_BLC_ENABLE;
I915_WRITE(pp_ctrl_reg, pp);
@@ -3018,7 +3018,7 @@ static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
/* Disable backlight PP control and backlight PWM. */
void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(old_conn_state->best_encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder));
if (!intel_dp_is_edp(intel_dp))
return;
@@ -3036,13 +3036,13 @@ void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
static void intel_edp_backlight_power(struct intel_connector *connector,
bool enable)
{
- struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
intel_wakeref_t wakeref;
bool is_enabled;
is_enabled = false;
with_pps_lock(intel_dp, wakeref)
- is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
+ is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
if (is_enabled == enable)
return;
@@ -3079,13 +3079,13 @@ static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
-static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
- const struct intel_crtc_state *pipe_config)
+static void ilk_edp_pll_on(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *pipe_config)
{
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- assert_pipe_disabled(dev_priv, crtc->pipe);
+ assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
assert_dp_port_disabled(intel_dp);
assert_edp_pll_disabled(dev_priv);
@@ -3119,13 +3119,13 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
udelay(200);
}
-static void ironlake_edp_pll_off(struct intel_dp *intel_dp,
- const struct intel_crtc_state *old_crtc_state)
+static void ilk_edp_pll_off(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *old_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- assert_pipe_disabled(dev_priv, crtc->pipe);
+ assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
assert_dp_port_disabled(intel_dp);
assert_edp_pll_enabled(dev_priv);
@@ -3258,7 +3258,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_wakeref_t wakeref;
bool ret;
@@ -3279,7 +3279,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u32 tmp, flags = 0;
enum port port = encoder->port;
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
@@ -3363,7 +3363,7 @@ static void intel_disable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_dp->link_trained = false;
@@ -3397,7 +3397,7 @@ static void g4x_post_disable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
enum port port = encoder->port;
/*
@@ -3410,7 +3410,7 @@ static void g4x_post_disable_dp(struct intel_encoder *encoder,
/* Only ilk+ has port A */
if (port == PORT_A)
- ironlake_edp_pll_off(intel_dp, old_crtc_state);
+ ilk_edp_pll_off(intel_dp, old_crtc_state);
}
static void vlv_post_disable_dp(struct intel_encoder *encoder,
@@ -3548,7 +3548,7 @@ static void intel_enable_dp(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
u32 dp_reg = I915_READ(intel_dp->output_reg);
enum pipe pipe = crtc->pipe;
@@ -3608,14 +3608,14 @@ static void g4x_pre_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
enum port port = encoder->port;
intel_dp_prepare(encoder, pipe_config);
/* Only ilk+ has port A */
if (port == PORT_A)
- ironlake_edp_pll_on(intel_dp, pipe_config);
+ ilk_edp_pll_on(intel_dp, pipe_config);
}
static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
@@ -3658,7 +3658,7 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
lockdep_assert_held(&dev_priv->pps_mutex);
for_each_intel_dp(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
WARN(intel_dp->active_pipe == pipe,
"stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
@@ -3681,7 +3681,7 @@ static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -4203,7 +4203,7 @@ intel_dp_link_down(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum port port = encoder->port;
u32 DP = intel_dp->DP;
@@ -4903,7 +4903,7 @@ static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
/* Set test active flag here so userspace doesn't interrupt things */
- intel_dp->compliance.test_active = 1;
+ intel_dp->compliance.test_active = true;
return DP_TEST_ACK;
}
@@ -4947,7 +4947,7 @@ static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
}
/* Set test active flag here so userspace doesn't interrupt things */
- intel_dp->compliance.test_active = 1;
+ intel_dp->compliance.test_active = true;
return test_result;
}
@@ -5096,7 +5096,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_connector *connector = intel_dp->attached_connector;
struct drm_connector_state *conn_state;
struct intel_crtc_state *crtc_state;
@@ -5536,7 +5536,7 @@ static bool intel_combo_phy_connected(struct drm_i915_private *dev_priv,
static bool icp_digital_port_connected(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
if (intel_phy_is_combo(dev_priv, phy))
@@ -5651,7 +5651,7 @@ intel_dp_detect(struct drm_connector *connector,
bool force)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
enum drm_connector_status status;
@@ -5755,7 +5755,7 @@ out:
static void
intel_dp_force(struct drm_connector *connector)
{
- struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &dig_port->base;
struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
@@ -5790,7 +5790,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
}
/* if eDP has no EDID, fall back to fixed mode */
- if (intel_dp_is_edp(intel_attached_dp(connector)) &&
+ if (intel_dp_is_edp(intel_attached_dp(to_intel_connector(connector))) &&
intel_connector->panel.fixed_mode) {
struct drm_display_mode *mode;
@@ -5808,7 +5808,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
static int
intel_dp_connector_register(struct drm_connector *connector)
{
- struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
int ret;
ret = intel_connector_register(connector);
@@ -5830,7 +5830,7 @@ intel_dp_connector_register(struct drm_connector *connector)
static void
intel_dp_connector_unregister(struct drm_connector *connector)
{
- struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
drm_dp_cec_unregister_connector(&intel_dp->aux);
drm_dp_aux_unregister(&intel_dp->aux);
@@ -5839,7 +5839,7 @@ intel_dp_connector_unregister(struct drm_connector *connector)
void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(to_intel_encoder(encoder));
struct intel_dp *intel_dp = &intel_dig_port->dp;
intel_dp_mst_encoder_cleanup(intel_dig_port);
@@ -5868,12 +5868,12 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
intel_dp_encoder_flush_work(encoder);
drm_encoder_cleanup(encoder);
- kfree(enc_to_dig_port(encoder));
+ kfree(enc_to_dig_port(to_intel_encoder(encoder)));
}
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
intel_wakeref_t wakeref;
if (!intel_dp_is_edp(intel_dp))
@@ -5904,7 +5904,7 @@ static
int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
u8 *an)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&intel_dig_port->base.base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&intel_dig_port->base.base));
static const struct drm_dp_aux_msg msg = {
.request = DP_AUX_NATIVE_WRITE,
.address = DP_AUX_HDCP_AKSV,
@@ -6514,7 +6514,7 @@ static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
intel_wakeref_t wakeref;
@@ -6693,7 +6693,7 @@ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
intel_pps_get_registers(intel_dp, &regs);
- pp_ctl = ironlake_get_pp_control(intel_dp);
+ pp_ctl = ilk_get_pp_control(intel_dp);
/* Ensure PPS is unlocked */
if (!HAS_DDI(dev_priv))
@@ -6863,7 +6863,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
* soon as the new power sequencer gets initialized.
*/
if (force_disable_vdd) {
- u32 pp = ironlake_get_pp_control(intel_dp);
+ u32 pp = ilk_get_pp_control(intel_dp);
WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
@@ -7663,7 +7663,7 @@ void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
if (encoder->type != INTEL_OUTPUT_DDI)
continue;
- intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_dp = enc_to_intel_dp(encoder);
if (!intel_dp->can_mst)
continue;
@@ -7684,7 +7684,7 @@ void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
if (encoder->type != INTEL_OUTPUT_DDI)
continue;
- intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_dp = enc_to_intel_dp(encoder);
if (!intel_dp->can_mst)
continue;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 020422da2ae2..7c653f8c307f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -57,7 +57,7 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
*/
static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
u8 read_val[2] = { 0x0 };
u16 level = 0;
@@ -82,7 +82,7 @@ static void
intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
u8 vals[2] = { 0x0 };
vals[0] = level;
@@ -110,7 +110,7 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev
static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
int freq, fxp, fxp_min, fxp_max, fxp_actual, f = 1;
u8 pn, pn_min, pn_max;
@@ -178,7 +178,7 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
const struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
u8 dpcd_buf, new_dpcd_buf, edp_backlight_mode;
if (drm_dp_dpcd_readb(&intel_dp->aux,
@@ -222,13 +222,14 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
static void intel_dp_aux_disable_backlight(const struct drm_connector_state *old_conn_state)
{
- set_aux_backlight_enable(enc_to_intel_dp(old_conn_state->best_encoder), false);
+ set_aux_backlight_enable(enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)),
+ false);
}
static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
enum pipe pipe)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
struct intel_panel *panel = &connector->panel;
if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
@@ -247,7 +248,7 @@ static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
static bool
intel_dp_aux_display_control_capable(struct intel_connector *connector)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
/* Check the eDP Display control capabilities registers to determine if
* the panel can support backlight control over the aux channel
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 7aa0975c33b7..cba68c5a80fa 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -43,7 +43,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
struct link_config_limits *limits)
{
struct drm_atomic_state *state = crtc_state->uapi.state;
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_dp *intel_dp = &intel_mst->primary->dp;
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
@@ -61,10 +61,11 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
crtc_state->pipe_bpp = bpp;
crtc_state->pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock,
- crtc_state->pipe_bpp);
+ crtc_state->pipe_bpp,
+ false);
slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr,
- port, crtc_state->pbn);
+ port, crtc_state->pbn, 0);
if (slots == -EDEADLK)
return slots;
if (slots >= 0)
@@ -87,12 +88,58 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
return 0;
}
+/*
+ * Iterate over all connectors and return the smallest transcoder in the MST
+ * stream
+ */
+static enum transcoder
+intel_dp_mst_master_trans_compute(struct intel_atomic_state *state,
+ struct intel_dp *mst_port)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_digital_connector_state *conn_state;
+ struct intel_connector *connector;
+ enum pipe ret = I915_MAX_PIPES;
+ int i;
+
+ if (INTEL_GEN(dev_priv) < 12)
+ return INVALID_TRANSCODER;
+
+ for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+
+ if (connector->mst_port != mst_port || !conn_state->base.crtc)
+ continue;
+
+ crtc = to_intel_crtc(conn_state->base.crtc);
+ crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+ if (!crtc_state->uapi.active)
+ continue;
+
+ /*
+ * Using crtc->pipe because crtc_state->cpu_transcoder is
+ * computed, so others CRTCs could have non-computed
+ * cpu_transcoder
+ */
+ if (crtc->pipe < ret)
+ ret = crtc->pipe;
+ }
+
+ if (ret == I915_MAX_PIPES)
+ return INVALID_TRANSCODER;
+
+ /* Simple cast works because TGL don't have a eDP transcoder */
+ return (enum transcoder)ret;
+}
+
static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
+ struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_dp *intel_dp = &intel_mst->primary->dp;
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
@@ -154,24 +201,91 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
+ pipe_config->mst_master_transcoder = intel_dp_mst_master_trans_compute(state, intel_dp);
+
+ return 0;
+}
+
+/*
+ * If one of the connectors in a MST stream needs a modeset, mark all CRTCs
+ * that shares the same MST stream as mode changed,
+ * intel_modeset_pipe_config()+intel_crtc_check_fastset() will take care to do
+ * a fastset when possible.
+ */
+static int
+intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector,
+ struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct drm_connector_list_iter connector_list_iter;
+ struct intel_connector *connector_iter;
+
+ if (INTEL_GEN(dev_priv) < 12)
+ return 0;
+
+ if (!intel_connector_needs_modeset(state, &connector->base))
+ return 0;
+
+ drm_connector_list_iter_begin(&dev_priv->drm, &connector_list_iter);
+ for_each_intel_connector_iter(connector_iter, &connector_list_iter) {
+ struct intel_digital_connector_state *conn_iter_state;
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int ret;
+
+ if (connector_iter->mst_port != connector->mst_port ||
+ connector_iter == connector)
+ continue;
+
+ conn_iter_state = intel_atomic_get_digital_connector_state(state,
+ connector_iter);
+ if (IS_ERR(conn_iter_state)) {
+ drm_connector_list_iter_end(&connector_list_iter);
+ return PTR_ERR(conn_iter_state);
+ }
+
+ if (!conn_iter_state->base.crtc)
+ continue;
+
+ crtc = to_intel_crtc(conn_iter_state->base.crtc);
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state)) {
+ drm_connector_list_iter_end(&connector_list_iter);
+ return PTR_ERR(crtc_state);
+ }
+
+ ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
+ if (ret) {
+ drm_connector_list_iter_end(&connector_list_iter);
+ return ret;
+ }
+ crtc_state->uapi.mode_changed = true;
+ }
+ drm_connector_list_iter_end(&connector_list_iter);
+
return 0;
}
static int
intel_dp_mst_atomic_check(struct drm_connector *connector,
- struct drm_atomic_state *state)
+ struct drm_atomic_state *_state)
{
+ struct intel_atomic_state *state = to_intel_atomic_state(_state);
struct drm_connector_state *new_conn_state =
- drm_atomic_get_new_connector_state(state, connector);
+ drm_atomic_get_new_connector_state(&state->base, connector);
struct drm_connector_state *old_conn_state =
- drm_atomic_get_old_connector_state(state, connector);
+ drm_atomic_get_old_connector_state(&state->base, connector);
struct intel_connector *intel_connector =
to_intel_connector(connector);
struct drm_crtc *new_crtc = new_conn_state->crtc;
struct drm_dp_mst_topology_mgr *mgr;
int ret;
- ret = intel_digital_connector_atomic_check(connector, state);
+ ret = intel_digital_connector_atomic_check(connector, &state->base);
+ if (ret)
+ return ret;
+
+ ret = intel_dp_mst_atomic_master_trans_check(intel_connector, state);
if (ret)
return ret;
@@ -182,12 +296,9 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
* connector
*/
if (new_crtc) {
- struct intel_atomic_state *intel_state =
- to_intel_atomic_state(state);
struct intel_crtc *intel_crtc = to_intel_crtc(new_crtc);
struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(intel_state,
- intel_crtc);
+ intel_atomic_get_new_crtc_state(state, intel_crtc);
if (!crtc_state ||
!drm_atomic_crtc_needs_modeset(&crtc_state->uapi) ||
@@ -195,8 +306,8 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
return 0;
}
- mgr = &enc_to_mst(old_conn_state->best_encoder)->primary->dp.mst_mgr;
- ret = drm_dp_atomic_release_vcpi_slots(state, mgr,
+ mgr = &enc_to_mst(to_intel_encoder(old_conn_state->best_encoder))->primary->dp.mst_mgr;
+ ret = drm_dp_atomic_release_vcpi_slots(&state->base, mgr,
intel_connector->port);
return ret;
@@ -206,7 +317,7 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct intel_connector *connector =
@@ -230,29 +341,51 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
bool last_mst_stream;
+ u32 val;
intel_dp->active_mst_links--;
last_mst_stream = intel_dp->active_mst_links == 0;
+ WARN_ON(INTEL_GEN(dev_priv) >= 12 && last_mst_stream &&
+ !intel_dp_mst_is_master_trans(old_crtc_state));
intel_crtc_vblank_off(old_crtc_state);
intel_disable_pipe(old_crtc_state);
+ drm_dp_update_payload_part2(&intel_dp->mst_mgr);
+
+ val = I915_READ(TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder));
+ val &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
+ I915_WRITE(TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder), val);
+
+ if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
+ DP_TP_STATUS_ACT_SENT, 1))
+ DRM_ERROR("Timed out waiting for ACT sent when disabling\n");
+ drm_dp_check_act_status(&intel_dp->mst_mgr);
+
+ drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port);
+
intel_ddi_disable_transcoder_func(old_crtc_state);
if (INTEL_GEN(dev_priv) >= 9)
- skylake_scaler_disable(old_crtc_state);
+ skl_scaler_disable(old_crtc_state);
else
- ironlake_pfit_disable(old_crtc_state);
+ ilk_pfit_disable(old_crtc_state);
/*
+ * Power down mst path before disabling the port, otherwise we end
+ * up getting interrupts from the sink upon detecting link loss.
+ */
+ drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port,
+ false);
+ /*
* From TGL spec: "If multi-stream slave transcoder: Configure
* Transcoder Clock Select to direct no clock to the transcoder"
*
@@ -262,19 +395,6 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
if (INTEL_GEN(dev_priv) < 12 || !last_mst_stream)
intel_ddi_disable_pipe_clock(old_crtc_state);
- /* this can fail */
- drm_dp_check_act_status(&intel_dp->mst_mgr);
- /* and this can also fail */
- drm_dp_update_payload_part2(&intel_dp->mst_mgr);
-
- drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port);
-
- /*
- * Power down mst path before disabling the port, otherwise we end
- * up getting interrupts from the sink upon detecting link loss.
- */
- drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port,
- false);
intel_mst->connector = NULL;
if (last_mst_stream)
@@ -288,7 +408,7 @@ static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
@@ -301,7 +421,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -317,6 +437,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
connector->encoder = encoder;
intel_mst->connector = connector;
first_mst_stream = intel_dp->active_mst_links == 0;
+ WARN_ON(INTEL_GEN(dev_priv) >= 12 && first_mst_stream &&
+ !intel_dp_mst_is_master_trans(pipe_config));
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
@@ -359,7 +481,7 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -380,7 +502,7 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
*pipe = intel_mst->pipe;
if (intel_mst->connector)
return true;
@@ -390,7 +512,7 @@ static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
intel_ddi_get_config(&intel_dig_port->base, pipe_config);
@@ -498,7 +620,7 @@ static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_fun
static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(to_intel_encoder(encoder));
drm_encoder_cleanup(encoder);
kfree(intel_mst);
@@ -722,3 +844,14 @@ intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port)
drm_dp_mst_topology_mgr_destroy(&intel_dp->mst_mgr);
/* encoders will get killed by normal cleanup */
}
+
+bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->mst_master_transcoder == crtc_state->cpu_transcoder;
+}
+
+bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->mst_master_transcoder != INVALID_TRANSCODER &&
+ crtc_state->mst_master_transcoder != crtc_state->cpu_transcoder;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
index f660ad80db04..854724f68f09 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
@@ -6,10 +6,15 @@
#ifndef __INTEL_DP_MST_H__
#define __INTEL_DP_MST_H__
+#include <linux/types.h>
+
struct intel_digital_port;
+struct intel_crtc_state;
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
int intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port);
+bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state);
+bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_DP_MST_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 704f38681c4b..6fb1f7a7364e 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -642,7 +642,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
bool uniq_trans_scale)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dport = enc_to_dig_port(encoder);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
enum pipe pipe = intel_crtc->pipe;
@@ -738,7 +738,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
bool reset)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
+ enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder));
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
u32 val;
@@ -781,7 +781,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dport = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
@@ -861,7 +861,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -940,7 +940,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
void chv_phy_release_cl2_override(struct intel_encoder *encoder)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dport = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (dport->release_cl2_override) {
@@ -989,7 +989,7 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dport = enc_to_dig_port(encoder);
enum dpio_channel port = vlv_dport_to_channel(dport);
enum pipe pipe = intel_crtc->pipe;
@@ -1014,7 +1014,7 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dport = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel port = vlv_dport_to_channel(dport);
@@ -1043,7 +1043,7 @@ void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -1073,7 +1073,7 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
void vlv_phy_reset_lanes(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dport = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum dpio_channel port = vlv_dport_to_channel(dport);
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 728a4b045de7..c75e34d87111 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -2972,8 +2972,8 @@ static void icl_update_active_dpll(struct intel_atomic_state *state,
enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
- enc_to_mst(&encoder->base)->primary :
- enc_to_dig_port(&encoder->base);
+ enc_to_mst(encoder)->primary :
+ enc_to_dig_port(encoder);
if (primary_port &&
(primary_port->tc_mode == TC_PORT_DP_ALT ||
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h
index b15be5814599..19f78a4022d3 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.h
+++ b/drivers/gpu/drm/i915/display/intel_dsi.h
@@ -45,8 +45,9 @@ struct intel_dsi {
struct intel_dsi_host *dsi_hosts[I915_MAX_PORTS];
intel_wakeref_t io_wakeref[I915_MAX_PORTS];
- /* GPIO Desc for CRC based Panel control */
+ /* GPIO Desc for panel and backlight control */
struct gpio_desc *gpio_panel;
+ struct gpio_desc *gpio_backlight;
struct intel_connector *attached_connector;
@@ -68,6 +69,9 @@ struct intel_dsi {
/* number of DSI lanes */
unsigned int lane_count;
+ /* i2c bus associated with the slave device */
+ int i2c_bus_num;
+
/*
* video mode pixel format
*
@@ -141,9 +145,9 @@ static inline struct intel_dsi_host *to_intel_dsi_host(struct mipi_dsi_host *h)
#define for_each_dsi_phy(__phy, __phys_mask) \
for_each_phy_masked(__phy, __phys_mask)
-static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
+static inline struct intel_dsi *enc_to_intel_dsi(struct intel_encoder *encoder)
{
- return container_of(encoder, struct intel_dsi, base.base);
+ return container_of(&encoder->base, struct intel_dsi, base.base);
}
static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
@@ -158,7 +162,7 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
static inline u16 intel_dsi_encoder_ports(struct intel_encoder *encoder)
{
- return enc_to_intel_dsi(&encoder->base)->ports;
+ return enc_to_intel_dsi(encoder)->ports;
}
/* icl_dsi.c */
@@ -203,6 +207,8 @@ void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
/* intel_dsi_vbt.c */
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
+void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on);
+void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi);
void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
enum mipi_seq seq_id);
void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec);
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
index bb3fd8b786a2..c87838843d0b 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
@@ -46,7 +46,7 @@
static u32 dcs_get_backlight(struct intel_connector *connector)
{
struct intel_encoder *encoder = connector->encoder;
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct mipi_dsi_device *dsi_device;
u8 data = 0;
enum port port;
@@ -64,7 +64,7 @@ static u32 dcs_get_backlight(struct intel_connector *connector)
static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(conn_state->best_encoder);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
struct mipi_dsi_device *dsi_device;
u8 data = level;
enum port port;
@@ -79,7 +79,7 @@ static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32
static void dcs_disable_backlight(const struct drm_connector_state *conn_state)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(conn_state->best_encoder);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
struct mipi_dsi_device *dsi_device;
enum port port;
@@ -113,7 +113,7 @@ static void dcs_disable_backlight(const struct drm_connector_state *conn_state)
static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(conn_state->best_encoder);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
struct mipi_dsi_device *dsi_device;
enum port port;
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index f90946c912ee..04f953ba8f00 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -25,7 +25,10 @@
*/
#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h>
#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/machine.h>
#include <linux/slab.h>
#include <asm/intel-mid.h>
@@ -83,6 +86,12 @@ static struct gpio_map vlv_gpio_table[] = {
{ VLV_GPIO_NC_11_PANEL1_BKLTCTL },
};
+struct i2c_adapter_lookup {
+ u16 slave_addr;
+ struct intel_dsi *intel_dsi;
+ acpi_handle dev_handle;
+};
+
#define CHV_GPIO_IDX_START_N 0
#define CHV_GPIO_IDX_START_E 73
#define CHV_GPIO_IDX_START_SW 100
@@ -375,11 +384,112 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
return data;
}
+#ifdef CONFIG_ACPI
+static int i2c_adapter_lookup(struct acpi_resource *ares, void *data)
+{
+ struct i2c_adapter_lookup *lookup = data;
+ struct intel_dsi *intel_dsi = lookup->intel_dsi;
+ struct acpi_resource_i2c_serialbus *sb;
+ struct i2c_adapter *adapter;
+ acpi_handle adapter_handle;
+ acpi_status status;
+
+ if (!i2c_acpi_get_i2c_resource(ares, &sb))
+ return 1;
+
+ if (lookup->slave_addr != sb->slave_address)
+ return 1;
+
+ status = acpi_get_handle(lookup->dev_handle,
+ sb->resource_source.string_ptr,
+ &adapter_handle);
+ if (ACPI_FAILURE(status))
+ return 1;
+
+ adapter = i2c_acpi_find_adapter_by_handle(adapter_handle);
+ if (adapter)
+ intel_dsi->i2c_bus_num = adapter->nr;
+
+ return 1;
+}
+
+static void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
+ const u16 slave_addr)
+{
+ struct drm_device *drm_dev = intel_dsi->base.base.dev;
+ struct device *dev = &drm_dev->pdev->dev;
+ struct acpi_device *acpi_dev;
+ struct list_head resource_list;
+ struct i2c_adapter_lookup lookup;
+
+ acpi_dev = ACPI_COMPANION(dev);
+ if (acpi_dev) {
+ memset(&lookup, 0, sizeof(lookup));
+ lookup.slave_addr = slave_addr;
+ lookup.intel_dsi = intel_dsi;
+ lookup.dev_handle = acpi_device_handle(acpi_dev);
+
+ INIT_LIST_HEAD(&resource_list);
+ acpi_dev_get_resources(acpi_dev, &resource_list,
+ i2c_adapter_lookup,
+ &lookup);
+ acpi_dev_free_resource_list(&resource_list);
+ }
+}
+#else
+static inline void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
+ const u16 slave_addr)
+{
+}
+#endif
+
static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
{
- DRM_DEBUG_KMS("Skipping I2C element execution\n");
+ struct drm_device *drm_dev = intel_dsi->base.base.dev;
+ struct device *dev = &drm_dev->pdev->dev;
+ struct i2c_adapter *adapter;
+ struct i2c_msg msg;
+ int ret;
+ u8 vbt_i2c_bus_num = *(data + 2);
+ u16 slave_addr = *(u16 *)(data + 3);
+ u8 reg_offset = *(data + 5);
+ u8 payload_size = *(data + 6);
+ u8 *payload_data;
+
+ if (intel_dsi->i2c_bus_num < 0) {
+ intel_dsi->i2c_bus_num = vbt_i2c_bus_num;
+ i2c_acpi_find_adapter(intel_dsi, slave_addr);
+ }
+
+ adapter = i2c_get_adapter(intel_dsi->i2c_bus_num);
+ if (!adapter) {
+ DRM_DEV_ERROR(dev, "Cannot find a valid i2c bus for xfer\n");
+ goto err_bus;
+ }
- return data + *(data + 6) + 7;
+ payload_data = kzalloc(payload_size + 1, GFP_KERNEL);
+ if (!payload_data)
+ goto err_alloc;
+
+ payload_data[0] = reg_offset;
+ memcpy(&payload_data[1], (data + 7), payload_size);
+
+ msg.addr = slave_addr;
+ msg.flags = 0;
+ msg.len = payload_size + 1;
+ msg.buf = payload_data;
+
+ ret = i2c_transfer(adapter, &msg, 1);
+ if (ret < 0)
+ DRM_DEV_ERROR(dev,
+ "Failed to xfer payload of size (%u) to reg (%u)\n",
+ payload_size, reg_offset);
+
+ kfree(payload_data);
+err_alloc:
+ i2c_put_adapter(adapter);
+err_bus:
+ return data + payload_size + 7;
}
static const u8 *mipi_exec_spi(struct intel_dsi *intel_dsi, const u8 *data)
@@ -453,8 +563,8 @@ static const char *sequence_name(enum mipi_seq seq_id)
return "(unknown)";
}
-void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
- enum mipi_seq seq_id)
+static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
+ enum mipi_seq seq_id)
{
struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
const u8 *data;
@@ -519,6 +629,22 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
}
}
+void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
+ enum mipi_seq seq_id)
+{
+ if (seq_id == MIPI_SEQ_POWER_ON && intel_dsi->gpio_panel)
+ gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1);
+ if (seq_id == MIPI_SEQ_BACKLIGHT_ON && intel_dsi->gpio_backlight)
+ gpiod_set_value_cansleep(intel_dsi->gpio_backlight, 1);
+
+ intel_dsi_vbt_exec(intel_dsi, seq_id);
+
+ if (seq_id == MIPI_SEQ_POWER_OFF && intel_dsi->gpio_panel)
+ gpiod_set_value_cansleep(intel_dsi->gpio_panel, 0);
+ if (seq_id == MIPI_SEQ_BACKLIGHT_OFF && intel_dsi->gpio_backlight)
+ gpiod_set_value_cansleep(intel_dsi->gpio_backlight, 0);
+}
+
void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
{
struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
@@ -664,6 +790,8 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
intel_dsi->panel_off_delay = pps->panel_off_delay / 10;
intel_dsi->panel_pwr_cycle_delay = pps->panel_power_cycle_delay / 10;
+ intel_dsi->i2c_bus_num = -1;
+
/* a regular driver would get the device in probe */
for_each_dsi_port(port, intel_dsi->ports) {
mipi_dsi_attach(intel_dsi->dsi_hosts[port]->device);
@@ -671,3 +799,110 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
return true;
}
+
+/*
+ * On some BYT/CHT devs some sequences are incomplete and we need to manually
+ * control some GPIOs. We need to add a GPIO lookup table before we get these.
+ * If the GOP did not initialize the panel (HDMI inserted) we may need to also
+ * change the pinmux for the SoC's PWM0 pin from GPIO to PWM.
+ */
+static struct gpiod_lookup_table pmic_panel_gpio_table = {
+ /* Intel GFX is consumer */
+ .dev_id = "0000:00:02.0",
+ .table = {
+ /* Panel EN/DISABLE */
+ GPIO_LOOKUP("gpio_crystalcove", 94, "panel", GPIO_ACTIVE_HIGH),
+ { }
+ },
+};
+
+static struct gpiod_lookup_table soc_panel_gpio_table = {
+ .dev_id = "0000:00:02.0",
+ .table = {
+ GPIO_LOOKUP("INT33FC:01", 10, "backlight", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("INT33FC:01", 11, "panel", GPIO_ACTIVE_HIGH),
+ { }
+ },
+};
+
+static const struct pinctrl_map soc_pwm_pinctrl_map[] = {
+ PIN_MAP_MUX_GROUP("0000:00:02.0", "soc_pwm0", "INT33FC:00",
+ "pwm0_grp", "pwm"),
+};
+
+void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
+{
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+ enum gpiod_flags flags = panel_is_on ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+ bool want_backlight_gpio = false;
+ bool want_panel_gpio = false;
+ struct pinctrl *pinctrl;
+ int ret;
+
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ mipi_config->pwm_blc == PPS_BLC_PMIC) {
+ gpiod_add_lookup_table(&pmic_panel_gpio_table);
+ want_panel_gpio = true;
+ }
+
+ if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) {
+ gpiod_add_lookup_table(&soc_panel_gpio_table);
+ want_panel_gpio = true;
+ want_backlight_gpio = true;
+
+ /* Ensure PWM0 pin is muxed as PWM instead of GPIO */
+ ret = pinctrl_register_mappings(soc_pwm_pinctrl_map,
+ ARRAY_SIZE(soc_pwm_pinctrl_map));
+ if (ret)
+ DRM_ERROR("Failed to register pwm0 pinmux mapping\n");
+
+ pinctrl = devm_pinctrl_get_select(dev->dev, "soc_pwm0");
+ if (IS_ERR(pinctrl))
+ DRM_ERROR("Failed to set pinmux to PWM\n");
+ }
+
+ if (want_panel_gpio) {
+ intel_dsi->gpio_panel = gpiod_get(dev->dev, "panel", flags);
+ if (IS_ERR(intel_dsi->gpio_panel)) {
+ DRM_ERROR("Failed to own gpio for panel control\n");
+ intel_dsi->gpio_panel = NULL;
+ }
+ }
+
+ if (want_backlight_gpio) {
+ intel_dsi->gpio_backlight =
+ gpiod_get(dev->dev, "backlight", flags);
+ if (IS_ERR(intel_dsi->gpio_backlight)) {
+ DRM_ERROR("Failed to own gpio for backlight control\n");
+ intel_dsi->gpio_backlight = NULL;
+ }
+ }
+}
+
+void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi)
+{
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+
+ if (intel_dsi->gpio_panel) {
+ gpiod_put(intel_dsi->gpio_panel);
+ intel_dsi->gpio_panel = NULL;
+ }
+
+ if (intel_dsi->gpio_backlight) {
+ gpiod_put(intel_dsi->gpio_backlight);
+ intel_dsi->gpio_backlight = NULL;
+ }
+
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ mipi_config->pwm_blc == PPS_BLC_PMIC)
+ gpiod_remove_lookup_table(&pmic_panel_gpio_table);
+
+ if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) {
+ pinctrl_unregister_mappings(soc_pwm_pinctrl_map);
+ gpiod_remove_lookup_table(&soc_panel_gpio_table);
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index a74dc5b915d1..86a337c9d85d 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -125,7 +125,7 @@ static struct intel_dvo *enc_to_dvo(struct intel_encoder *encoder)
return container_of(encoder, struct intel_dvo, base);
}
-static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
+static struct intel_dvo *intel_attached_dvo(struct intel_connector *connector)
{
return enc_to_dvo(intel_attached_encoder(connector));
}
@@ -134,7 +134,7 @@ static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
u32 tmp;
tmp = I915_READ(intel_dvo->dev.dvo_reg);
@@ -220,7 +220,7 @@ static enum drm_mode_status
intel_dvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ struct intel_dvo *intel_dvo = intel_attached_dvo(to_intel_connector(connector));
const struct drm_display_mode *fixed_mode =
to_intel_connector(connector)->panel.fixed_mode;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
@@ -311,7 +311,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder,
static enum drm_connector_status
intel_dvo_detect(struct drm_connector *connector, bool force)
{
- struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ struct intel_dvo *intel_dvo = intel_attached_dvo(to_intel_connector(connector));
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index ab61f88d1d33..6c83b350525d 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -126,8 +126,8 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
}
}
-static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe, bool enable)
+static void ilk_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = to_i915(dev);
u32 bit = (pipe == PIPE_A) ?
@@ -139,7 +139,7 @@ static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
ilk_disable_display_irq(dev_priv, bit);
}
-static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc)
+static void ivb_check_fifo_underruns(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
@@ -157,9 +157,9 @@ static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc)
DRM_ERROR("fifo underrun on pipe %c\n", pipe_name(pipe));
}
-static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe,
- bool enable, bool old)
+static void ivb_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable,
+ bool old)
{
struct drm_i915_private *dev_priv = to_i915(dev);
if (enable) {
@@ -180,8 +180,8 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
}
}
-static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
- enum pipe pipe, bool enable)
+static void bdw_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -264,11 +264,11 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
if (HAS_GMCH(dev_priv))
i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
else if (IS_GEN_RANGE(dev_priv, 5, 6))
- ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
+ ilk_set_fifo_underrun_reporting(dev, pipe, enable);
else if (IS_GEN(dev_priv, 7))
- ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
+ ivb_set_fifo_underrun_reporting(dev, pipe, enable, old);
else if (INTEL_GEN(dev_priv) >= 8)
- broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
+ bdw_set_fifo_underrun_reporting(dev, pipe, enable);
return old;
}
@@ -427,7 +427,7 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv)
if (HAS_GMCH(dev_priv))
i9xx_check_fifo_underruns(crtc);
else if (IS_GEN(dev_priv, 7))
- ivybridge_check_fifo_underruns(crtc);
+ ivb_check_fifo_underruns(crtc);
}
spin_unlock_irq(&dev_priv->irq_lock);
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 685589064d10..93ac0f296852 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -85,16 +85,17 @@ assert_hdmi_transcoder_func_disabled(struct drm_i915_private *dev_priv,
"HDMI transcoder function enabled, expecting disabled\n");
}
-struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
+struct intel_hdmi *enc_to_intel_hdmi(struct intel_encoder *encoder)
{
struct intel_digital_port *intel_dig_port =
- container_of(encoder, struct intel_digital_port, base.base);
+ container_of(&encoder->base, struct intel_digital_port,
+ base.base);
return &intel_dig_port->hdmi;
}
-static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
+static struct intel_hdmi *intel_attached_hdmi(struct intel_connector *connector)
{
- return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
+ return enc_to_intel_hdmi(intel_attached_encoder(connector));
}
static u32 g4x_infoframe_index(unsigned int type)
@@ -602,7 +603,7 @@ u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
u32 val, ret = 0;
int i;
@@ -646,7 +647,7 @@ static void intel_write_infoframe(struct intel_encoder *encoder,
enum hdmi_infoframe_type type,
const union hdmi_infoframe *frame)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
u8 buffer[VIDEO_DIP_DATA_SIZE];
ssize_t len;
@@ -675,7 +676,7 @@ void intel_read_infoframe(struct intel_encoder *encoder,
enum hdmi_infoframe_type type,
union hdmi_infoframe *frame)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
u8 buffer[VIDEO_DIP_DATA_SIZE];
int ret;
@@ -855,7 +856,7 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
i915_reg_t reg = VIDEO_DIP_CTL;
u32 val = I915_READ(reg);
@@ -1038,7 +1039,7 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
@@ -1097,7 +1098,7 @@ static void cpt_set_infoframes(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
@@ -1146,7 +1147,7 @@ static void vlv_set_infoframes(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
u32 port = VIDEO_DIP_PORT(encoder->port);
@@ -1737,7 +1738,7 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
u32 hdmi_val;
@@ -1774,7 +1775,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
intel_wakeref_t wakeref;
bool ret;
@@ -1793,7 +1794,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
static void intel_hdmi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp, flags = 0;
@@ -1874,7 +1875,7 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 temp;
temp = I915_READ(intel_hdmi->hdmi_reg);
@@ -1896,7 +1897,7 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 temp;
temp = I915_READ(intel_hdmi->hdmi_reg);
@@ -1947,7 +1948,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
enum pipe pipe = crtc->pipe;
u32 temp;
@@ -2007,7 +2008,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct intel_digital_port *intel_dig_port =
hdmi_to_dig_port(intel_hdmi);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
@@ -2160,7 +2161,7 @@ static enum drm_mode_status
intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
+ struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
struct drm_i915_private *dev_priv = to_i915(dev);
enum drm_mode_status status;
@@ -2316,7 +2317,7 @@ static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
int clock, bool force_dvi)
{
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
int bpc;
for (bpc = 12; bpc >= 10; bpc -= 2) {
@@ -2334,7 +2335,7 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
bool force_dvi)
{
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
int bpc, clock = adjusted_mode->crtc_clock;
@@ -2404,7 +2405,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
struct drm_connector *connector = conn_state->connector;
@@ -2496,7 +2497,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
static void
intel_hdmi_unset_edid(struct drm_connector *connector)
{
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
intel_hdmi->has_hdmi_sink = false;
intel_hdmi->has_audio = false;
@@ -2512,7 +2513,7 @@ static void
intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
+ struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
enum port port = hdmi_to_dig_port(hdmi)->base.port;
struct i2c_adapter *adapter =
intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
@@ -2559,7 +2560,7 @@ static bool
intel_hdmi_set_edid(struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
intel_wakeref_t wakeref;
struct edid *edid;
bool connected = false;
@@ -2600,7 +2601,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
{
enum drm_connector_status status = connector_status_disconnected;
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base;
intel_wakeref_t wakeref;
@@ -2663,7 +2664,7 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct intel_digital_port *intel_dig_port =
- enc_to_dig_port(&encoder->base);
+ enc_to_dig_port(encoder);
intel_hdmi_prepare(encoder, pipe_config);
@@ -2676,7 +2677,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dport = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
vlv_phy_pre_encoder_enable(encoder, pipe_config);
@@ -2746,7 +2747,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dport = enc_to_dig_port(encoder);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -2772,7 +2773,7 @@ static struct i2c_adapter *
intel_hdmi_get_i2c_adapter(struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
return intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
}
@@ -2816,7 +2817,7 @@ intel_hdmi_connector_register(struct drm_connector *connector)
static void intel_hdmi_destroy(struct drm_connector *connector)
{
- struct cec_notifier *n = intel_attached_hdmi(connector)->cec_notifier;
+ struct cec_notifier *n = intel_attached_hdmi(to_intel_connector(connector))->cec_notifier;
cec_notifier_conn_unregister(n);
@@ -2906,7 +2907,7 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
bool scrambling)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct drm_scrambling *sink_scrambling =
&connector->display_info.hdmi.scdc.scrambling;
struct i2c_adapter *adapter =
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index cf1ea5427639..d3659d0b408b 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -29,7 +29,7 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg,
enum port port);
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
-struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
+struct intel_hdmi *enc_to_intel_hdmi(struct intel_encoder *encoder);
int intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index fc29046d48ea..99d3a3c7989e 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -302,7 +302,7 @@ intel_encoder_hotplug(struct intel_encoder *encoder,
static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
{
return intel_encoder_is_dig_port(encoder) &&
- enc_to_dig_port(&encoder->base)->hpd_pulse != NULL;
+ enc_to_dig_port(encoder)->hpd_pulse != NULL;
}
static void i915_digport_work_func(struct work_struct *work)
@@ -335,7 +335,7 @@ static void i915_digport_work_func(struct work_struct *work)
if (!long_hpd && !short_hpd)
continue;
- dig_port = enc_to_dig_port(&encoder->base);
+ dig_port = enc_to_dig_port(encoder);
ret = dig_port->hpd_pulse(dig_port, long_hpd);
if (ret == IRQ_NONE) {
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index 5145ff8b962b..d807c5648c87 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -434,8 +434,8 @@ void lspcon_write_infoframe(struct intel_encoder *encoder,
const void *frame, ssize_t len)
{
bool ret;
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
/* LSPCON only needs AVI IF */
if (type != HDMI_INFOFRAME_TYPE_AVI)
@@ -472,7 +472,7 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
ssize_t ret;
union hdmi_infoframe frame;
u8 buf[VIDEO_DIP_DATA_SIZE];
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_lspcon *lspcon = &dig_port->lspcon;
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
@@ -522,7 +522,7 @@ u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
/* FIXME actually read this from the hw */
- return enc_to_intel_lspcon(&encoder->base)->active;
+ return enc_to_intel_lspcon(encoder)->active;
}
void lspcon_resume(struct intel_lspcon *lspcon)
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index 2746512f4466..520408e83681 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -98,7 +98,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
break;
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_EDP:
- dig_port = enc_to_dig_port(&encoder->base);
+ dig_port = enc_to_dig_port(encoder);
switch (dig_port->base.port) {
case PORT_B:
*source = INTEL_PIPE_CRC_SOURCE_DP_B;
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 16e9ff47d519..89c9cf5f38d2 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -1523,3 +1523,27 @@ bool intel_psr_enabled(struct intel_dp *intel_dp)
return ret;
}
+
+void intel_psr_atomic_check(struct drm_connector *connector,
+ struct drm_connector_state *old_state,
+ struct drm_connector_state *new_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_connector *intel_connector;
+ struct intel_digital_port *dig_port;
+ struct drm_crtc_state *crtc_state;
+
+ if (!CAN_PSR(dev_priv) || !new_state->crtc ||
+ dev_priv->psr.initially_probed)
+ return;
+
+ intel_connector = to_intel_connector(connector);
+ dig_port = enc_to_dig_port(intel_connector->encoder);
+ if (dev_priv->psr.dp != &dig_port->dp)
+ return;
+
+ crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
+ new_state->crtc);
+ crtc_state->mode_changed = true;
+ dev_priv->psr.initially_probed = true;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index 46e4de8b8cd5..c58a1d438808 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -8,6 +8,8 @@
#include "intel_frontbuffer.h"
+struct drm_connector;
+struct drm_connector_state;
struct drm_i915_private;
struct intel_crtc_state;
struct intel_dp;
@@ -35,5 +37,8 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp);
int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
u32 *out_value);
bool intel_psr_enabled(struct intel_dp *intel_dp);
+void intel_psr_atomic_check(struct drm_connector *connector,
+ struct drm_connector_state *old_state,
+ struct drm_connector_state *new_state);
#endif /* __INTEL_PSR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 8758ee2a4442..e8819fd21e03 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -180,7 +180,7 @@ static struct intel_sdvo *to_sdvo(struct intel_encoder *encoder)
return container_of(encoder, struct intel_sdvo, base);
}
-static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
+static struct intel_sdvo *intel_attached_sdvo(struct intel_connector *connector)
{
return to_sdvo(intel_attached_encoder(connector));
}
@@ -1551,7 +1551,7 @@ static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
{
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(&connector->base);
- struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
u16 active_outputs = 0;
intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
@@ -1823,7 +1823,7 @@ static enum drm_mode_status
intel_sdvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(connector);
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
@@ -1941,7 +1941,7 @@ intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
static struct edid *
intel_sdvo_get_edid(struct drm_connector *connector)
{
- struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo *sdvo = intel_attached_sdvo(to_intel_connector(connector));
return drm_get_edid(connector, &sdvo->ddc);
}
@@ -1959,7 +1959,7 @@ intel_sdvo_get_analog_edid(struct drm_connector *connector)
static enum drm_connector_status
intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
{
- struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(connector);
enum drm_connector_status status;
@@ -2028,7 +2028,7 @@ static enum drm_connector_status
intel_sdvo_detect(struct drm_connector *connector, bool force)
{
u16 response;
- struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
enum drm_connector_status ret;
@@ -2175,7 +2175,7 @@ static const struct drm_display_mode sdvo_tv_modes[] = {
static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
{
- struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
const struct drm_connector_state *conn_state = connector->state;
struct intel_sdvo_sdtv_resolution_request tv_res;
u32 reply = 0, format_map = 0;
@@ -2215,7 +2215,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
{
- struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct drm_display_mode *newmode;
@@ -2379,7 +2379,7 @@ intel_sdvo_connector_atomic_set_property(struct drm_connector *connector,
static int
intel_sdvo_connector_register(struct drm_connector *connector)
{
- struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo *sdvo = intel_attached_sdvo(to_intel_connector(connector));
int ret;
ret = intel_connector_register(connector);
@@ -2394,7 +2394,7 @@ intel_sdvo_connector_register(struct drm_connector *connector)
static void
intel_sdvo_connector_unregister(struct drm_connector *connector)
{
- struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo *sdvo = intel_attached_sdvo(to_intel_connector(connector));
sysfs_remove_link(&connector->kdev->kobj,
sdvo->ddc.dev.kobj.name);
@@ -2932,7 +2932,7 @@ static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
list_for_each_entry_safe(connector, tmp,
&dev->mode_config.connector_list, head) {
- if (intel_attached_encoder(connector) == &intel_sdvo->base) {
+ if (intel_attached_encoder(to_intel_connector(connector)) == &intel_sdvo->base) {
drm_connector_unregister(connector);
intel_connector_destroy(connector);
}
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 3f7b8f2ff671..fca77ec1e0dd 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -583,15 +583,16 @@ skl_program_plane(struct intel_plane *plane,
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 surf_addr = plane_state->color_plane[color_plane].offset;
u32 stride = skl_plane_stride(plane_state, color_plane);
- u32 aux_dist = plane_state->color_plane[1].offset - surf_addr;
- u32 aux_stride = skl_plane_stride(plane_state, 1);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int aux_plane = intel_main_to_aux_plane(fb, color_plane);
+ u32 aux_dist = plane_state->color_plane[aux_plane].offset - surf_addr;
+ u32 aux_stride = skl_plane_stride(plane_state, aux_plane);
int crtc_x = plane_state->uapi.dst.x1;
int crtc_y = plane_state->uapi.dst.y1;
u32 x = plane_state->color_plane[color_plane].x;
u32 y = plane_state->color_plane[color_plane].y;
u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
- const struct drm_framebuffer *fb = plane_state->hw.fb;
u8 alpha = plane_state->hw.alpha >> 8;
u32 plane_color_ctl = 0;
unsigned long irqflags;
@@ -2106,7 +2107,8 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
- fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS)) {
+ fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS)) {
DRM_DEBUG_KMS("Y/Yf tiling not supported in IF-ID mode\n");
return -EINVAL;
}
@@ -2578,7 +2580,16 @@ static const u64 skl_plane_format_modifiers_ccs[] = {
DRM_FORMAT_MOD_INVALID
};
-static const u64 gen12_plane_format_modifiers_ccs[] = {
+static const u64 gen12_plane_format_modifiers_mc_ccs[] = {
+ I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS,
+ I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
+ I915_FORMAT_MOD_Y_TILED,
+ I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static const u64 gen12_plane_format_modifiers_rc_ccs[] = {
I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
I915_FORMAT_MOD_Y_TILED,
I915_FORMAT_MOD_X_TILED,
@@ -2743,10 +2754,21 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
}
}
+static bool gen12_plane_supports_mc_ccs(enum plane_id plane_id)
+{
+ return plane_id < PLANE_SPRITE4;
+}
+
static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
+ struct intel_plane *plane = to_intel_plane(_plane);
+
switch (modifier) {
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ if (!gen12_plane_supports_mc_ccs(plane->id))
+ return false;
+ /* fall through */
case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
case I915_FORMAT_MOD_Y_TILED:
@@ -2764,11 +2786,6 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
if (is_ccs_modifier(modifier))
return true;
/* fall through */
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_ARGB2101010:
- case DRM_FORMAT_ABGR2101010:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
@@ -2777,6 +2794,14 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_P010:
case DRM_FORMAT_P012:
case DRM_FORMAT_P016:
+ if (modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS)
+ return true;
+ /* fall through */
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
case DRM_FORMAT_XVYU2101010:
case DRM_FORMAT_C8:
case DRM_FORMAT_XBGR16161616F:
@@ -2910,6 +2935,14 @@ static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv,
}
}
+static const u64 *gen12_get_plane_modifiers(enum plane_id plane_id)
+{
+ if (gen12_plane_supports_mc_ccs(plane_id))
+ return gen12_plane_format_modifiers_mc_ccs;
+ else
+ return gen12_plane_format_modifiers_rc_ccs;
+}
+
static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id)
{
@@ -2975,7 +3008,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
if (INTEL_GEN(dev_priv) >= 12) {
- modifiers = gen12_plane_format_modifiers_ccs;
+ modifiers = gen12_get_plane_modifiers(plane_id);
plane_funcs = &gen12_plane_funcs;
} else {
if (plane->has_ccs)
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 50703536436c..c75e0ceecee6 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -898,7 +898,7 @@ static struct intel_tv *enc_to_tv(struct intel_encoder *encoder)
return container_of(encoder, struct intel_tv, base);
}
-static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
+static struct intel_tv *intel_attached_tv(struct intel_connector *connector)
{
return enc_to_tv(intel_attached_encoder(connector));
}
@@ -1527,7 +1527,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
(video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
- assert_pipe_disabled(dev_priv, intel_crtc->pipe);
+ assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
/* Filter ctl must be set before TV_WIN_SIZE */
tv_filter_ctl = TV_AUTO_SCALE;
@@ -1662,7 +1662,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
*/
static void intel_tv_find_better_format(struct drm_connector *connector)
{
- struct intel_tv *intel_tv = intel_attached_tv(connector);
+ struct intel_tv *intel_tv = intel_attached_tv(to_intel_connector(connector));
const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
int i;
@@ -1689,7 +1689,7 @@ intel_tv_detect(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx,
bool force)
{
- struct intel_tv *intel_tv = intel_attached_tv(connector);
+ struct intel_tv *intel_tv = intel_attached_tv(to_intel_connector(connector));
enum drm_connector_status status;
int type;
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 6bab08db5d75..9e6aaa302e40 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -943,7 +943,7 @@ static void intel_dsc_dsi_pps_write(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct mipi_dsi_device *dsi;
struct drm_dsc_picture_parameter_set pps;
enum port port;
@@ -961,7 +961,7 @@ static void intel_dsc_dsi_pps_write(struct intel_encoder *encoder,
static void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
struct drm_dsc_pps_infoframe dp_dsc_pps_sdp;
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 5b96051f4f7c..4514529ff6f3 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -23,7 +23,6 @@
* Author: Jani Nikula <jani.nikula@intel.com>
*/
-#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <drm/drm_atomic_helper.h>
@@ -319,7 +318,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
static bool glk_dsi_enable_io(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 tmp;
bool cold_boot = false;
@@ -367,7 +366,7 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder)
static void glk_dsi_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 val;
@@ -438,7 +437,7 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
static void bxt_dsi_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 val;
@@ -465,7 +464,7 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder)
static void vlv_dsi_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 val;
@@ -516,7 +515,7 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 val;
@@ -546,7 +545,7 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 tmp;
@@ -579,7 +578,7 @@ static void glk_dsi_clear_device_ready(struct intel_encoder *encoder)
static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
DRM_DEBUG_KMS("\n");
@@ -625,7 +624,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
@@ -681,7 +680,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
@@ -745,7 +744,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct drm_crtc *crtc = pipe_config->uapi.crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -793,9 +792,6 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
if (!IS_GEMINILAKE(dev_priv))
intel_dsi_prepare(encoder, pipe_config);
- /* Power on, try both CRC pmic gpio and VBT */
- if (intel_dsi->gpio_panel)
- gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
@@ -850,7 +846,7 @@ static void intel_dsi_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
DRM_DEBUG_KMS("\n");
@@ -886,7 +882,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 val;
@@ -895,7 +891,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
if (IS_GEN9_LP(dev_priv)) {
intel_crtc_vblank_off(old_crtc_state);
- skylake_scaler_disable(old_crtc_state);
+ skl_scaler_disable(old_crtc_state);
}
if (is_vid_mode(intel_dsi)) {
@@ -945,11 +941,8 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
/* Assert reset */
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET);
- /* Power off, try both CRC pmic gpio and VBT */
intel_dsi_msleep(intel_dsi, intel_dsi->panel_off_delay);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF);
- if (intel_dsi->gpio_panel)
- gpiod_set_value_cansleep(intel_dsi->gpio_panel, 0);
/*
* FIXME As we do with eDP, just make a note of the time here
@@ -962,7 +955,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
intel_wakeref_t wakeref;
enum port port;
bool active = false;
@@ -1041,7 +1034,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
&pipe_config->hw.adjusted_mode;
struct drm_display_mode *adjusted_mode_sw;
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
unsigned int lane_count = intel_dsi->lane_count;
unsigned int bpp, fmt;
enum port port;
@@ -1234,7 +1227,7 @@ static void set_dsi_timings(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder));
enum port port;
unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
unsigned int lane_count = intel_dsi->lane_count;
@@ -1322,7 +1315,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder));
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
enum port port;
unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
@@ -1512,7 +1505,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
static void intel_dsi_unprepare(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 val;
@@ -1539,12 +1532,9 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder)
static void intel_dsi_encoder_destroy(struct drm_encoder *encoder)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
-
- /* dispose of the gpios */
- if (intel_dsi->gpio_panel)
- gpiod_put(intel_dsi->gpio_panel);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder));
+ intel_dsi_vbt_gpio_cleanup(intel_dsi);
intel_encoder_destroy(encoder);
}
@@ -1824,6 +1814,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
struct drm_connector *connector;
struct drm_display_mode *current_mode, *fixed_mode;
enum port port;
+ enum pipe pipe;
DRM_DEBUG_KMS("\n");
@@ -1922,20 +1913,8 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
vlv_dphy_param_init(intel_dsi);
- /*
- * In case of BYT with CRC PMIC, we need to use GPIO for
- * Panel control.
- */
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC)) {
- intel_dsi->gpio_panel =
- gpiod_get(dev->dev, "panel", GPIOD_OUT_HIGH);
-
- if (IS_ERR(intel_dsi->gpio_panel)) {
- DRM_ERROR("Failed to own gpio for panel control\n");
- intel_dsi->gpio_panel = NULL;
- }
- }
+ intel_dsi_vbt_gpio_init(intel_dsi,
+ intel_dsi_get_hw_state(intel_encoder, &pipe));
drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index 95f39cd0ce02..6b89e67b120f 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -117,7 +117,7 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
int ret;
u32 dsi_clk;
@@ -255,7 +255,7 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
u32 dsi_clock, pclk;
u32 pll_ctl, pll_div;
@@ -321,7 +321,7 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
u32 pclk;
u32 dsi_clk;
u32 dsi_ratio;
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
@@ -341,7 +341,7 @@ void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
u32 temp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
temp = I915_READ(MIPI_CTRL(port));
temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
@@ -455,7 +455,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u8 dsi_ratio, dsi_ratio_min, dsi_ratio_max;
u32 dsi_clk;
@@ -503,7 +503,7 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
u32 val;
diff --git a/drivers/gpu/drm/i915/gem/Makefile b/drivers/gpu/drm/i915/gem/Makefile
deleted file mode 100644
index 7e73aa587967..000000000000
--- a/drivers/gpu/drm/i915/gem/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-# For building individual subdir files on the command line
-subdir-ccflags-y += -I$(srctree)/$(src)/..
-
-# Extra header tests
-header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index 3d4f5775a4ba..25235ef630c1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -9,16 +9,16 @@
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
-static __always_inline u32 __busy_read_flag(u8 id)
+static __always_inline u32 __busy_read_flag(u16 id)
{
- if (id == (u8)I915_ENGINE_CLASS_INVALID)
+ if (id == (u16)I915_ENGINE_CLASS_INVALID)
return 0xffff0000u;
GEM_BUG_ON(id >= 16);
return 0x10000u << id;
}
-static __always_inline u32 __busy_write_id(u8 id)
+static __always_inline u32 __busy_write_id(u16 id)
{
/*
* The uABI guarantees an active writer is also amongst the read
@@ -29,14 +29,14 @@ static __always_inline u32 __busy_write_id(u8 id)
* last_read - hence we always set both read and write busy for
* last_write.
*/
- if (id == (u8)I915_ENGINE_CLASS_INVALID)
+ if (id == (u16)I915_ENGINE_CLASS_INVALID)
return 0xffffffffu;
return (id + 1) | __busy_read_flag(id);
}
static __always_inline unsigned int
-__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id))
+__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u16 id))
{
const struct i915_request *rq;
@@ -57,7 +57,7 @@ __busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id))
return 0;
/* Beware type-expansion follies! */
- BUILD_BUG_ON(!typecheck(u8, rq->engine->uabi_class));
+ BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class));
return flag(rq->engine->uabi_class);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index dc90b044a217..a2e57e62af30 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -69,6 +69,7 @@
#include <drm/i915_drm.h>
+#include "gt/gen6_ppgtt.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_engine_pm.h"
@@ -705,7 +706,7 @@ i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
if (HAS_FULL_PPGTT(i915)) {
struct i915_ppgtt *ppgtt;
- ppgtt = i915_ppgtt_create(i915);
+ ppgtt = i915_ppgtt_create(&i915->gt);
if (IS_ERR(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
@@ -760,12 +761,6 @@ void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
flush_work(&i915->gem.contexts.free_work);
}
-static int context_idr_cleanup(int id, void *p, void *data)
-{
- context_close(p);
- return 0;
-}
-
static int vm_idr_cleanup(int id, void *p, void *data)
{
i915_vm_put(p);
@@ -773,7 +768,8 @@ static int vm_idr_cleanup(int id, void *p, void *data)
}
static int gem_context_register(struct i915_gem_context *ctx,
- struct drm_i915_file_private *fpriv)
+ struct drm_i915_file_private *fpriv,
+ u32 *id)
{
struct i915_address_space *vm;
int ret;
@@ -791,14 +787,10 @@ static int gem_context_register(struct i915_gem_context *ctx,
current->comm, pid_nr(ctx->pid));
/* And finally expose ourselves to userspace via the idr */
- mutex_lock(&fpriv->context_idr_lock);
- ret = idr_alloc(&fpriv->context_idr, ctx, 0, 0, GFP_KERNEL);
- mutex_unlock(&fpriv->context_idr_lock);
- if (ret >= 0)
- goto out;
+ ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL);
+ if (ret)
+ put_pid(fetch_and_zero(&ctx->pid));
- put_pid(fetch_and_zero(&ctx->pid));
-out:
return ret;
}
@@ -808,11 +800,11 @@ int i915_gem_context_open(struct drm_i915_private *i915,
struct drm_i915_file_private *file_priv = file->driver_priv;
struct i915_gem_context *ctx;
int err;
+ u32 id;
- mutex_init(&file_priv->context_idr_lock);
- mutex_init(&file_priv->vm_idr_lock);
+ xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC);
- idr_init(&file_priv->context_idr);
+ mutex_init(&file_priv->vm_idr_lock);
idr_init_base(&file_priv->vm_idr, 1);
ctx = i915_gem_create_context(i915, 0);
@@ -821,21 +813,19 @@ int i915_gem_context_open(struct drm_i915_private *i915,
goto err;
}
- err = gem_context_register(ctx, file_priv);
+ err = gem_context_register(ctx, file_priv, &id);
if (err < 0)
goto err_ctx;
- GEM_BUG_ON(err > 0);
-
+ GEM_BUG_ON(id);
return 0;
err_ctx:
context_close(ctx);
err:
idr_destroy(&file_priv->vm_idr);
- idr_destroy(&file_priv->context_idr);
+ xa_destroy(&file_priv->context_xa);
mutex_destroy(&file_priv->vm_idr_lock);
- mutex_destroy(&file_priv->context_idr_lock);
return err;
}
@@ -843,10 +833,12 @@ void i915_gem_context_close(struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
struct drm_i915_private *i915 = file_priv->dev_priv;
+ struct i915_gem_context *ctx;
+ unsigned long idx;
- idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
- idr_destroy(&file_priv->context_idr);
- mutex_destroy(&file_priv->context_idr_lock);
+ xa_for_each(&file_priv->context_xa, idx, ctx)
+ context_close(ctx);
+ xa_destroy(&file_priv->context_xa);
idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
idr_destroy(&file_priv->vm_idr);
@@ -870,7 +862,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
if (args->flags)
return -EINVAL;
- ppgtt = i915_ppgtt_create(i915);
+ ppgtt = i915_ppgtt_create(&i915->gt);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
@@ -1244,12 +1236,14 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
* image, or into the registers directory, does not stick). Pristine
* and idle contexts will be configured on pinning.
*/
- if (!intel_context_is_pinned(ce))
+ if (!intel_context_pin_if_active(ce))
return 0;
rq = intel_engine_create_kernel_request(ce->engine);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ goto out_unpin;
+ }
/* Serialise with the remote context */
ret = intel_context_prepare_remote_request(ce, rq);
@@ -1257,6 +1251,8 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
ret = gen8_emit_rpcs_config(rq, ce, sseu);
i915_request_add(rq);
+out_unpin:
+ intel_context_unpin(ce);
return ret;
}
@@ -2187,6 +2183,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_context_create_ext *args = data;
struct create_ext ext_data;
int ret;
+ u32 id;
if (!DRIVER_CAPS(i915)->has_logical_contexts)
return -ENODEV;
@@ -2218,11 +2215,11 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
goto err_ctx;
}
- ret = gem_context_register(ext_data.ctx, ext_data.fpriv);
+ ret = gem_context_register(ext_data.ctx, ext_data.fpriv, &id);
if (ret < 0)
goto err_ctx;
- args->ctx_id = ret;
+ args->ctx_id = id;
DRM_DEBUG("HW context %d created\n", args->ctx_id);
return 0;
@@ -2245,11 +2242,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
if (!args->ctx_id)
return -ENOENT;
- if (mutex_lock_interruptible(&file_priv->context_idr_lock))
- return -EINTR;
-
- ctx = idr_remove(&file_priv->context_idr, args->ctx_id);
- mutex_unlock(&file_priv->context_idr_lock);
+ ctx = xa_erase(&file_priv->context_xa, args->ctx_id);
if (!ctx)
return -ENOENT;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index 14f3cc1b7583..3ae61a355d87 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -13,7 +13,6 @@
#include "i915_drv.h"
#include "i915_gem.h"
-#include "i915_gem_gtt.h"
#include "i915_scheduler.h"
#include "intel_device_info.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index cbd2bcade3c8..60c984e10c4a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1981,9 +1981,20 @@ static int __eb_parse(struct dma_fence_work *work)
pw->trampoline);
}
+static void __eb_parse_release(struct dma_fence_work *work)
+{
+ struct eb_parse_work *pw = container_of(work, typeof(*pw), base);
+
+ if (pw->trampoline)
+ i915_active_release(&pw->trampoline->active);
+ i915_active_release(&pw->shadow->active);
+ i915_active_release(&pw->batch->active);
+}
+
static const struct dma_fence_work_ops eb_parse_ops = {
.name = "eb_parse",
.work = __eb_parse,
+ .release = __eb_parse_release,
};
static int eb_parse_pipeline(struct i915_execbuffer *eb,
@@ -1997,6 +2008,20 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
if (!pw)
return -ENOMEM;
+ err = i915_active_acquire(&eb->batch->active);
+ if (err)
+ goto err_free;
+
+ err = i915_active_acquire(&shadow->active);
+ if (err)
+ goto err_batch;
+
+ if (trampoline) {
+ err = i915_active_acquire(&trampoline->active);
+ if (err)
+ goto err_shadow;
+ }
+
dma_fence_work_init(&pw->base, &eb_parse_ops);
pw->engine = eb->engine;
@@ -2006,7 +2031,9 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
pw->shadow = shadow;
pw->trampoline = trampoline;
- dma_resv_lock(pw->batch->resv, NULL);
+ err = dma_resv_lock_interruptible(pw->batch->resv, NULL);
+ if (err)
+ goto err_trampoline;
err = dma_resv_reserve_shared(pw->batch->resv, 1);
if (err)
@@ -2034,6 +2061,14 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
err_batch_unlock:
dma_resv_unlock(pw->batch->resv);
+err_trampoline:
+ if (trampoline)
+ i915_active_release(&trampoline->active);
+err_shadow:
+ i915_active_release(&shadow->active);
+err_batch:
+ i915_active_release(&eb->batch->active);
+err_free:
kfree(pw);
return err;
}
@@ -2173,7 +2208,7 @@ static int eb_submit(struct i915_execbuffer *eb)
}
if (intel_context_nopreempt(eb->context))
- eb->request->flags |= I915_REQUEST_NOPREEMPT;
+ __set_bit(I915_FENCE_FLAG_NOPREEMPT, &eb->request->fence.flags);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 520cc9cac471..70543c83df06 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -16,46 +16,6 @@ const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
.release = i915_gem_object_release_memory_region,
};
-/* XXX: Time to vfunc your life up? */
-void __iomem *
-i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
- unsigned long n)
-{
- resource_size_t offset;
-
- offset = i915_gem_object_get_dma_address(obj, n);
- offset -= obj->mm.region->region.start;
-
- return io_mapping_map_wc(&obj->mm.region->iomap, offset, PAGE_SIZE);
-}
-
-void __iomem *
-i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
- unsigned long n)
-{
- resource_size_t offset;
-
- offset = i915_gem_object_get_dma_address(obj, n);
- offset -= obj->mm.region->region.start;
-
- return io_mapping_map_atomic_wc(&obj->mm.region->iomap, offset);
-}
-
-void __iomem *
-i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
- unsigned long n,
- unsigned long size)
-{
- resource_size_t offset;
-
- GEM_BUG_ON(!i915_gem_object_is_contiguous(obj));
-
- offset = i915_gem_object_get_dma_address(obj, n);
- offset -= obj->mm.region->region.start;
-
- return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
-}
-
bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
{
return obj->ops == &i915_gem_lmem_obj_ops;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
index 7c176b8b7d2f..fc3f15580fe3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -14,14 +14,6 @@ struct intel_memory_region;
extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
-void __iomem *i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
- unsigned long n, unsigned long size);
-void __iomem *i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
- unsigned long n);
-void __iomem *
-i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
- unsigned long n);
-
bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 879fff8adc48..0b6a442108de 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -4,6 +4,7 @@
* Copyright © 2014-2016 Intel Corporation
*/
+#include <linux/anon_inodes.h>
#include <linux/mman.h>
#include <linux/pfn_t.h>
#include <linux/sizes.h>
@@ -212,6 +213,7 @@ static vm_fault_t i915_error_to_vmf_fault(int err)
case -EIO: /* shmemfs failure from swap device */
case -EFAULT: /* purged object */
case -ENODEV: /* bad object, how did you get here! */
+ case -ENXIO: /* unable to access backing store (on device) */
return VM_FAULT_SIGBUS;
case -ENOSPC: /* shmemfs allocation failure */
@@ -236,42 +238,38 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
struct vm_area_struct *area = vmf->vma;
struct i915_mmap_offset *mmo = area->vm_private_data;
struct drm_i915_gem_object *obj = mmo->obj;
- unsigned long i, size = area->vm_end - area->vm_start;
- bool write = area->vm_flags & VM_WRITE;
- vm_fault_t ret = VM_FAULT_SIGBUS;
+ resource_size_t iomap;
int err;
- if (!i915_gem_object_has_struct_page(obj))
- return ret;
-
/* Sanity check that we allow writing into this object */
- if (i915_gem_object_is_readonly(obj) && write)
- return ret;
+ if (unlikely(i915_gem_object_is_readonly(obj) &&
+ area->vm_flags & VM_WRITE))
+ return VM_FAULT_SIGBUS;
err = i915_gem_object_pin_pages(obj);
if (err)
- return i915_error_to_vmf_fault(err);
+ goto out;
- /* PTEs are revoked in obj->ops->put_pages() */
- for (i = 0; i < size >> PAGE_SHIFT; i++) {
- struct page *page = i915_gem_object_get_page(obj, i);
-
- ret = vmf_insert_pfn(area,
- (unsigned long)area->vm_start + i * PAGE_SIZE,
- page_to_pfn(page));
- if (ret != VM_FAULT_NOPAGE)
- break;
+ iomap = -1;
+ if (!i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE)) {
+ iomap = obj->mm.region->iomap.base;
+ iomap -= obj->mm.region->region.start;
}
- if (write) {
+ /* PTEs are revoked in obj->ops->put_pages() */
+ err = remap_io_sg(area,
+ area->vm_start, area->vm_end - area->vm_start,
+ obj->mm.pages->sgl, iomap);
+
+ if (area->vm_flags & VM_WRITE) {
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
- obj->cache_dirty = true; /* XXX flush after PAT update? */
obj->mm.dirty = true;
}
i915_gem_object_unpin_pages(obj);
- return ret;
+out:
+ return i915_error_to_vmf_fault(err);
}
static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
@@ -457,10 +455,11 @@ out:
void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
{
- struct i915_mmap_offset *mmo;
+ struct i915_mmap_offset *mmo, *mn;
spin_lock(&obj->mmo.lock);
- list_for_each_entry(mmo, &obj->mmo.offsets, offset) {
+ rbtree_postorder_for_each_entry_safe(mmo, mn,
+ &obj->mmo.offsets, offset) {
/*
* vma_node_unmap for GTT mmaps handled already in
* __i915_gem_object_release_mmap_gtt
@@ -490,6 +489,67 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
}
static struct i915_mmap_offset *
+lookup_mmo(struct drm_i915_gem_object *obj,
+ enum i915_mmap_type mmap_type)
+{
+ struct rb_node *rb;
+
+ spin_lock(&obj->mmo.lock);
+ rb = obj->mmo.offsets.rb_node;
+ while (rb) {
+ struct i915_mmap_offset *mmo =
+ rb_entry(rb, typeof(*mmo), offset);
+
+ if (mmo->mmap_type == mmap_type) {
+ spin_unlock(&obj->mmo.lock);
+ return mmo;
+ }
+
+ if (mmo->mmap_type < mmap_type)
+ rb = rb->rb_right;
+ else
+ rb = rb->rb_left;
+ }
+ spin_unlock(&obj->mmo.lock);
+
+ return NULL;
+}
+
+static struct i915_mmap_offset *
+insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo)
+{
+ struct rb_node *rb, **p;
+
+ spin_lock(&obj->mmo.lock);
+ rb = NULL;
+ p = &obj->mmo.offsets.rb_node;
+ while (*p) {
+ struct i915_mmap_offset *pos;
+
+ rb = *p;
+ pos = rb_entry(rb, typeof(*pos), offset);
+
+ if (pos->mmap_type == mmo->mmap_type) {
+ spin_unlock(&obj->mmo.lock);
+ drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
+ &mmo->vma_node);
+ kfree(mmo);
+ return pos;
+ }
+
+ if (pos->mmap_type < mmo->mmap_type)
+ p = &rb->rb_right;
+ else
+ p = &rb->rb_left;
+ }
+ rb_link_node(&mmo->offset, rb, p);
+ rb_insert_color(&mmo->offset, &obj->mmo.offsets);
+ spin_unlock(&obj->mmo.lock);
+
+ return mmo;
+}
+
+static struct i915_mmap_offset *
mmap_offset_attach(struct drm_i915_gem_object *obj,
enum i915_mmap_type mmap_type,
struct drm_file *file)
@@ -498,20 +558,22 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
struct i915_mmap_offset *mmo;
int err;
+ mmo = lookup_mmo(obj, mmap_type);
+ if (mmo)
+ goto out;
+
mmo = kmalloc(sizeof(*mmo), GFP_KERNEL);
if (!mmo)
return ERR_PTR(-ENOMEM);
mmo->obj = obj;
- mmo->dev = obj->base.dev;
- mmo->file = file;
mmo->mmap_type = mmap_type;
drm_vma_node_reset(&mmo->vma_node);
- err = drm_vma_offset_add(mmo->dev->vma_offset_manager, &mmo->vma_node,
- obj->base.size / PAGE_SIZE);
+ err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
+ &mmo->vma_node, obj->base.size / PAGE_SIZE);
if (likely(!err))
- goto out;
+ goto insert;
/* Attempt to reap some mmap space from dead objects */
err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT);
@@ -519,19 +581,17 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
goto err;
i915_gem_drain_freed_objects(i915);
- err = drm_vma_offset_add(mmo->dev->vma_offset_manager, &mmo->vma_node,
- obj->base.size / PAGE_SIZE);
+ err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
+ &mmo->vma_node, obj->base.size / PAGE_SIZE);
if (err)
goto err;
+insert:
+ mmo = insert_mmo(obj, mmo);
+ GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
out:
if (file)
drm_vma_node_allow(&mmo->vma_node, file);
-
- spin_lock(&obj->mmo.lock);
- list_add(&mmo->offset, &obj->mmo.offsets);
- spin_unlock(&obj->mmo.lock);
-
return mmo;
err:
@@ -560,7 +620,9 @@ __assign_mmap_offset(struct drm_file *file,
}
if (mmap_type != I915_MMAP_TYPE_GTT &&
- !i915_gem_object_has_struct_page(obj)) {
+ !i915_gem_object_type_has(obj,
+ I915_GEM_OBJECT_HAS_STRUCT_PAGE |
+ I915_GEM_OBJECT_HAS_IOMEM)) {
err = -ENODEV;
goto out;
}
@@ -694,6 +756,46 @@ static const struct vm_operations_struct vm_ops_cpu = {
.close = vm_close,
};
+static int singleton_release(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *i915 = file->private_data;
+
+ cmpxchg(&i915->gem.mmap_singleton, file, NULL);
+ drm_dev_put(&i915->drm);
+
+ return 0;
+}
+
+static const struct file_operations singleton_fops = {
+ .owner = THIS_MODULE,
+ .release = singleton_release,
+};
+
+static struct file *mmap_singleton(struct drm_i915_private *i915)
+{
+ struct file *file;
+
+ rcu_read_lock();
+ file = i915->gem.mmap_singleton;
+ if (file && !get_file_rcu(file))
+ file = NULL;
+ rcu_read_unlock();
+ if (file)
+ return file;
+
+ file = anon_inode_getfile("i915.gem", &singleton_fops, i915, O_RDWR);
+ if (IS_ERR(file))
+ return file;
+
+ /* Everyone shares a single global address space */
+ file->f_mapping = i915->drm.anon_inode->i_mapping;
+
+ smp_store_mb(i915->gem.mmap_singleton, file);
+ drm_dev_get(&i915->drm);
+
+ return file;
+}
+
/*
* This overcomes the limitation in drm_gem_mmap's assignment of a
* drm_gem_object as the vma->vm_private_data. Since we need to
@@ -705,59 +807,60 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
struct drm_vma_offset_node *node;
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
+ struct drm_i915_gem_object *obj = NULL;
struct i915_mmap_offset *mmo = NULL;
- struct drm_gem_object *obj = NULL;
+ struct file *anon;
if (drm_dev_is_unplugged(dev))
return -ENODEV;
+ rcu_read_lock();
drm_vma_offset_lock_lookup(dev->vma_offset_manager);
node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
vma->vm_pgoff,
vma_pages(vma));
- if (likely(node)) {
- mmo = container_of(node, struct i915_mmap_offset,
- vma_node);
- /*
- * In our dependency chain, the drm_vma_offset_node
- * depends on the validity of the mmo, which depends on
- * the gem object. However the only reference we have
- * at this point is the mmo (as the parent of the node).
- * Try to check if the gem object was at least cleared.
- */
- if (!mmo || !mmo->obj) {
- drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
- return -EINVAL;
- }
+ if (node && drm_vma_node_is_allowed(node, priv)) {
/*
* Skip 0-refcnted objects as it is in the process of being
* destroyed and will be invalid when the vma manager lock
* is released.
*/
- obj = &mmo->obj->base;
- if (!kref_get_unless_zero(&obj->refcount))
- obj = NULL;
+ mmo = container_of(node, struct i915_mmap_offset, vma_node);
+ obj = i915_gem_object_get_rcu(mmo->obj);
}
drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
+ rcu_read_unlock();
if (!obj)
- return -EINVAL;
+ return node ? -EACCES : -EINVAL;
- if (!drm_vma_node_is_allowed(node, priv)) {
- drm_gem_object_put_unlocked(obj);
- return -EACCES;
- }
-
- if (i915_gem_object_is_readonly(to_intel_bo(obj))) {
+ if (i915_gem_object_is_readonly(obj)) {
if (vma->vm_flags & VM_WRITE) {
- drm_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return -EINVAL;
}
vma->vm_flags &= ~VM_MAYWRITE;
}
+ anon = mmap_singleton(to_i915(dev));
+ if (IS_ERR(anon)) {
+ i915_gem_object_put(obj);
+ return PTR_ERR(anon);
+ }
+
vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_private_data = mmo;
+ /*
+ * We keep the ref on mmo->obj, not vm_file, but we require
+ * vma->vm_file->f_mapping, see vma_link(), for later revocation.
+ * Our userspace is accustomed to having per-file resource cleanup
+ * (i.e. contexts, objects and requests) on their close(fd), which
+ * requires avoiding extraneous references to their filp, hence why
+ * we prefer to use an anonymous file for their mmaps.
+ */
+ fput(vma->vm_file);
+ vma->vm_file = anon;
+
switch (mmo->mmap_type) {
case I915_MMAP_TYPE_WC:
vma->vm_page_prot =
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 46bacc82ddc4..35985218bd85 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -63,7 +63,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&obj->lut_list);
spin_lock_init(&obj->mmo.lock);
- INIT_LIST_HEAD(&obj->mmo.offsets);
+ obj->mmo.offsets = RB_ROOT;
init_rcu_head(&obj->rcu);
@@ -100,8 +100,8 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem);
struct drm_i915_file_private *fpriv = file->driver_priv;
+ struct i915_mmap_offset *mmo, *mn;
struct i915_lut_handle *lut, *ln;
- struct i915_mmap_offset *mmo;
LIST_HEAD(close);
i915_gem_object_lock(obj);
@@ -117,14 +117,8 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
i915_gem_object_unlock(obj);
spin_lock(&obj->mmo.lock);
- list_for_each_entry(mmo, &obj->mmo.offsets, offset) {
- if (mmo->file != file)
- continue;
-
- spin_unlock(&obj->mmo.lock);
+ rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset)
drm_vma_node_revoke(&mmo->vma_node, file);
- spin_lock(&obj->mmo.lock);
- }
spin_unlock(&obj->mmo.lock);
list_for_each_entry_safe(lut, ln, &close, obj_link) {
@@ -203,12 +197,14 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
i915_gem_object_release_mmap(obj);
- list_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset) {
+ rbtree_postorder_for_each_entry_safe(mmo, mn,
+ &obj->mmo.offsets,
+ offset) {
drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
&mmo->vma_node);
kfree(mmo);
}
- INIT_LIST_HEAD(&obj->mmo.offsets);
+ obj->mmo.offsets = RB_ROOT;
GEM_BUG_ON(atomic_read(&obj->bind_count));
GEM_BUG_ON(obj->userfault_count);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 858f8bf49a04..9c86f2dea947 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -16,6 +16,7 @@
#include "display/intel_frontbuffer.h"
#include "i915_gem_object_types.h"
#include "i915_gem_gtt.h"
+#include "i915_vma_types.h"
void i915_gem_init__objects(struct drm_i915_private *i915);
@@ -69,14 +70,22 @@ i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
}
static inline struct drm_i915_gem_object *
+i915_gem_object_get_rcu(struct drm_i915_gem_object *obj)
+{
+ if (obj && !kref_get_unless_zero(&obj->base.refcount))
+ obj = NULL;
+
+ return obj;
+}
+
+static inline struct drm_i915_gem_object *
i915_gem_object_lookup(struct drm_file *file, u32 handle)
{
struct drm_i915_gem_object *obj;
rcu_read_lock();
obj = i915_gem_object_lookup_rcu(file, handle);
- if (obj && !kref_get_unless_zero(&obj->base.refcount))
- obj = NULL;
+ obj = i915_gem_object_get_rcu(obj);
rcu_read_unlock();
return obj;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 88e268633fdc..f64ad77e6b1e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -71,13 +71,11 @@ enum i915_mmap_type {
};
struct i915_mmap_offset {
- struct drm_device *dev;
struct drm_vma_offset_node vma_node;
struct drm_i915_gem_object *obj;
- struct drm_file *file;
enum i915_mmap_type mmap_type;
- struct list_head offset;
+ struct rb_node offset;
};
struct drm_i915_gem_object {
@@ -137,7 +135,7 @@ struct drm_i915_gem_object {
struct {
spinlock_t lock; /* Protects access to mmo offsets */
- struct list_head offsets;
+ struct rb_root offsets;
} mmo;
I915_SELFTEST_DECLARE(struct list_head st_link);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 75197ca696a8..54aca5c9101e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -158,9 +158,7 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
{
- if (i915_gem_object_is_lmem(obj))
- io_mapping_unmap((void __force __iomem *)ptr);
- else if (is_vmalloc_addr(ptr))
+ if (is_vmalloc_addr(ptr))
vunmap(ptr);
else
kunmap(kmap_to_page(ptr));
@@ -236,46 +234,44 @@ unlock:
return err;
}
+static inline pte_t iomap_pte(resource_size_t base,
+ dma_addr_t offset,
+ pgprot_t prot)
+{
+ return pte_mkspecial(pfn_pte((base + offset) >> PAGE_SHIFT, prot));
+}
+
/* The 'mapping' part of i915_gem_object_pin_map() below */
static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
enum i915_map_type type)
{
- unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
+ unsigned long n_pte = obj->base.size >> PAGE_SHIFT;
struct sg_table *sgt = obj->mm.pages;
- struct sgt_iter sgt_iter;
- struct page *page;
- struct page *stack_pages[32];
- struct page **pages = stack_pages;
- unsigned long i = 0;
+ pte_t *stack[32], **mem;
+ struct vm_struct *area;
pgprot_t pgprot;
- void *addr;
- if (i915_gem_object_is_lmem(obj)) {
- void __iomem *io;
-
- if (type != I915_MAP_WC)
- return NULL;
-
- io = i915_gem_object_lmem_io_map(obj, 0, obj->base.size);
- return (void __force *)io;
- }
+ if (!i915_gem_object_has_struct_page(obj) && type != I915_MAP_WC)
+ return NULL;
/* A single page can always be kmapped */
- if (n_pages == 1 && type == I915_MAP_WB)
+ if (n_pte == 1 && type == I915_MAP_WB)
return kmap(sg_page(sgt->sgl));
- if (n_pages > ARRAY_SIZE(stack_pages)) {
+ mem = stack;
+ if (n_pte > ARRAY_SIZE(stack)) {
/* Too big for stack -- allocate temporary array instead */
- pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
- if (!pages)
+ mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL);
+ if (!mem)
return NULL;
}
- for_each_sgt_page(page, sgt_iter, sgt)
- pages[i++] = page;
-
- /* Check that we have the expected number of pages */
- GEM_BUG_ON(i != n_pages);
+ area = alloc_vm_area(obj->base.size, mem);
+ if (!area) {
+ if (mem != stack)
+ kvfree(mem);
+ return NULL;
+ }
switch (type) {
default:
@@ -288,12 +284,31 @@ static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
break;
}
- addr = vmap(pages, n_pages, 0, pgprot);
- if (pages != stack_pages)
- kvfree(pages);
+ if (i915_gem_object_has_struct_page(obj)) {
+ struct sgt_iter iter;
+ struct page *page;
+ pte_t **ptes = mem;
+
+ for_each_sgt_page(page, iter, sgt)
+ **ptes++ = mk_pte(page, pgprot);
+ } else {
+ resource_size_t iomap;
+ struct sgt_iter iter;
+ pte_t **ptes = mem;
+ dma_addr_t addr;
+
+ iomap = obj->mm.region->iomap.base;
+ iomap -= obj->mm.region->region.start;
+
+ for_each_sgt_daddr(addr, iter, sgt)
+ **ptes++ = iomap_pte(iomap, addr, pgprot);
+ }
+
+ if (mem != stack)
+ kvfree(mem);
- return addr;
+ return area->addr;
}
/* get, pin, and map the pages of the object into kernel space */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index d50adac12249..1515384d7e0e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -107,7 +107,10 @@ void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
{
INIT_LIST_HEAD(&obj->mm.blocks);
obj->mm.region = intel_memory_region_get(mem);
+
obj->flags |= flags;
+ if (obj->base.size <= mem->min_page_size)
+ obj->flags |= I915_BO_ALLOC_CONTIGUOUS;
mutex_lock(&mem->objects.lock);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 4d69c3fc3439..a2a980d9d241 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -594,6 +594,8 @@ static int init_shmem(struct intel_memory_region *mem)
err);
}
+ intel_memory_region_set_name(mem, "system");
+
return 0; /* Don't error, we can simply fallback to the kernel mnt */
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index afb08a1704a2..451f3078d60d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -645,6 +645,8 @@ i915_gem_object_create_stolen(struct drm_i915_private *i915,
static int init_stolen(struct intel_memory_region *mem)
{
+ intel_memory_region_set_name(mem, "stolen");
+
/*
* Initialise stolen early so that we may reserve preallocated
* objects for the BIOS to KMS transition.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index e5558af111e2..580319b7bf1a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -403,7 +403,7 @@ struct get_pages_work {
static struct sg_table *
__i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
- struct page **pvec, int num_pages)
+ struct page **pvec, unsigned long num_pages)
{
unsigned int max_segment = i915_sg_segment_size();
struct sg_table *st;
@@ -449,9 +449,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
{
struct get_pages_work *work = container_of(_work, typeof(*work), work);
struct drm_i915_gem_object *obj = work->obj;
- const int npages = obj->base.size >> PAGE_SHIFT;
+ const unsigned long npages = obj->base.size >> PAGE_SHIFT;
+ unsigned long pinned;
struct page **pvec;
- int pinned, ret;
+ int ret;
ret = -ENOMEM;
pinned = 0;
@@ -559,7 +560,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
{
- const int num_pages = obj->base.size >> PAGE_SHIFT;
+ const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
struct mm_struct *mm = obj->userptr.mm->mm;
struct page **pvec;
struct sg_table *pages;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h
index 549c1394bcdc..b8cf31b7bf14 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h
@@ -7,6 +7,12 @@
#ifndef __HUGE_GEM_OBJECT_H
#define __HUGE_GEM_OBJECT_H
+#include <linux/types.h>
+
+#include "gem/i915_gem_object_types.h"
+
+struct drm_i915_private;
+
struct drm_i915_gem_object *
huge_gem_object(struct drm_i915_private *i915,
phys_addr_t phys_size,
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 2479395c1873..9311250d7d6f 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -1017,38 +1017,33 @@ __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
return err;
}
-static int __cpu_check_lmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+static int __cpu_check_vmap(struct drm_i915_gem_object *obj, u32 dword, u32 val)
{
- unsigned long n;
+ unsigned long n = obj->base.size >> PAGE_SHIFT;
+ u32 *ptr;
int err;
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_wc_domain(obj, false);
- i915_gem_object_unlock(obj);
- if (err)
- return err;
-
- err = i915_gem_object_pin_pages(obj);
+ err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
if (err)
return err;
- for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
- u32 __iomem *base;
- u32 read_val;
-
- base = i915_gem_object_lmem_io_map_page_atomic(obj, n);
+ ptr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
- read_val = ioread32(base + dword);
- io_mapping_unmap_atomic(base);
- if (read_val != val) {
- pr_err("n=%lu base[%u]=%u, val=%u\n",
- n, dword, read_val, val);
+ ptr += dword;
+ while (n--) {
+ if (*ptr != val) {
+ pr_err("base[%u]=%08x, val=%08x\n",
+ dword, *ptr, val);
err = -EINVAL;
break;
}
+
+ ptr += PAGE_SIZE / sizeof(*ptr);
}
- i915_gem_object_unpin_pages(obj);
+ i915_gem_object_unpin_map(obj);
return err;
}
@@ -1056,10 +1051,8 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
{
if (i915_gem_object_has_struct_page(obj))
return __cpu_check_shmem(obj, dword, val);
- else if (i915_gem_object_is_lmem(obj))
- return __cpu_check_lmem(obj, dword, val);
-
- return -ENODEV;
+ else
+ return __cpu_check_vmap(obj, dword, val);
}
static int __igt_write_huge(struct intel_context *ce,
@@ -1872,7 +1865,7 @@ int i915_gem_huge_page_mock_selftests(void)
mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
mkwrite_device_info(dev_priv)->ppgtt_size = 48;
- ppgtt = i915_ppgtt_create(dev_priv);
+ ppgtt = i915_ppgtt_create(&dev_priv->gt);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
goto out_unlock;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index 49edc51111d5..3f6079e1dfb6 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -325,7 +325,10 @@ static int igt_gem_coherency(void *arg)
values = offsets + ncachelines;
ctx.engine = random_engine(i915, &prng);
- GEM_BUG_ON(!ctx.engine);
+ if (!ctx.engine) {
+ err = -ENODEV;
+ goto out_free;
+ }
pr_info("%s: using %s\n", __func__, ctx.engine->name);
intel_engine_pm_get(ctx.engine);
@@ -354,7 +357,7 @@ static int igt_gem_coherency(void *arg)
ctx.obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(ctx.obj)) {
err = PTR_ERR(ctx.obj);
- goto free;
+ goto out_pm;
}
i915_random_reorder(offsets, ncachelines, &prng);
@@ -405,14 +408,15 @@ static int igt_gem_coherency(void *arg)
}
}
}
-free:
+out_pm:
intel_engine_pm_put(ctx.engine);
+out_free:
kfree(offsets);
return err;
put_object:
i915_gem_object_put(ctx.obj);
- goto free;
+ goto out_pm;
}
int i915_gem_coherency_live_selftests(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index cbf796da64e3..ef7c74cff28a 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -9,6 +9,7 @@
#include "gt/intel_engine_pm.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
+#include "gem/i915_gem_region.h"
#include "huge_gem_object.h"
#include "i915_selftest.h"
#include "selftests/i915_random.h"
@@ -725,114 +726,359 @@ err_obj:
goto out;
}
-#define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24))
-static int igt_mmap(void *arg, enum i915_mmap_type type)
+static int gtt_set(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_object *obj;
- struct i915_mmap_offset *mmo;
- struct vm_area_struct *area;
- unsigned long addr;
- void *vaddr;
- int err = 0, i;
+ struct i915_vma *vma;
+ void __iomem *map;
+ int err = 0;
- if (!i915_ggtt_has_aperture(&i915->ggtt))
- return 0;
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
+ intel_gt_pm_get(vma->vm->gt);
+ map = i915_vma_pin_iomap(vma);
+ i915_vma_unpin(vma);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto out;
+ }
+
+ memset_io(map, POISON_INUSE, obj->base.size);
+ i915_vma_unpin_iomap(vma);
+
+out:
+ intel_gt_pm_put(vma->vm->gt);
+ return err;
+}
+
+static int gtt_check(struct drm_i915_gem_object *obj)
+{
+ struct i915_vma *vma;
+ void __iomem *map;
+ int err = 0;
- vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- err = PTR_ERR(vaddr);
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ intel_gt_pm_get(vma->vm->gt);
+ map = i915_vma_pin_iomap(vma);
+ i915_vma_unpin(vma);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
goto out;
}
- memset(vaddr, POISON_INUSE, PAGE_SIZE);
+
+ if (memchr_inv((void __force *)map, POISON_FREE, obj->base.size)) {
+ pr_err("%s: Write via mmap did not land in backing store (GTT)\n",
+ obj->mm.region->name);
+ err = -EINVAL;
+ }
+ i915_vma_unpin_iomap(vma);
+
+out:
+ intel_gt_pm_put(vma->vm->gt);
+ return err;
+}
+
+static int wc_set(struct drm_i915_gem_object *obj)
+{
+ void *vaddr;
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ memset(vaddr, POISON_INUSE, obj->base.size);
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
- mmo = mmap_offset_attach(obj, type, NULL);
- if (IS_ERR(mmo)) {
- err = PTR_ERR(mmo);
- goto out;
+ return 0;
+}
+
+static int wc_check(struct drm_i915_gem_object *obj)
+{
+ void *vaddr;
+ int err = 0;
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ if (memchr_inv(vaddr, POISON_FREE, obj->base.size)) {
+ pr_err("%s: Write via mmap did not land in backing store (WC)\n",
+ obj->mm.region->name);
+ err = -EINVAL;
}
+ i915_gem_object_unpin_map(obj);
+
+ return err;
+}
+
+static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
+{
+ if (type == I915_MMAP_TYPE_GTT &&
+ !i915_ggtt_has_aperture(&to_i915(obj->base.dev)->ggtt))
+ return false;
+
+ if (type != I915_MMAP_TYPE_GTT &&
+ !i915_gem_object_type_has(obj,
+ I915_GEM_OBJECT_HAS_STRUCT_PAGE |
+ I915_GEM_OBJECT_HAS_IOMEM))
+ return false;
+
+ return true;
+}
+
+#define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24))
+static int __igt_mmap(struct drm_i915_private *i915,
+ struct drm_i915_gem_object *obj,
+ enum i915_mmap_type type)
+{
+ struct i915_mmap_offset *mmo;
+ struct vm_area_struct *area;
+ unsigned long addr;
+ int err, i;
+
+ if (!can_mmap(obj, type))
+ return 0;
+
+ err = wc_set(obj);
+ if (err == -ENXIO)
+ err = gtt_set(obj);
+ if (err)
+ return err;
+
+ mmo = mmap_offset_attach(obj, type, NULL);
+ if (IS_ERR(mmo))
+ return PTR_ERR(mmo);
addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
- if (IS_ERR_VALUE(addr)) {
- err = addr;
- goto out;
- }
+ if (IS_ERR_VALUE(addr))
+ return addr;
- pr_debug("igt_mmap() @ %lx\n", addr);
+ pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr);
area = find_vma(current->mm, addr);
if (!area) {
- pr_err("Did not create a vm_area_struct for the mmap\n");
+ pr_err("%s: Did not create a vm_area_struct for the mmap\n",
+ obj->mm.region->name);
err = -EINVAL;
goto out_unmap;
}
if (area->vm_private_data != mmo) {
- pr_err("vm_area_struct did not point back to our mmap_offset object!\n");
+ pr_err("%s: vm_area_struct did not point back to our mmap_offset object!\n",
+ obj->mm.region->name);
err = -EINVAL;
goto out_unmap;
}
- for (i = 0; i < PAGE_SIZE / sizeof(u32); i++) {
+ for (i = 0; i < obj->base.size / sizeof(u32); i++) {
u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
u32 x;
if (get_user(x, ux)) {
- pr_err("Unable to read from mmap, offset:%zd\n",
- i * sizeof(x));
+ pr_err("%s: Unable to read from mmap, offset:%zd\n",
+ obj->mm.region->name, i * sizeof(x));
err = -EFAULT;
- break;
+ goto out_unmap;
}
if (x != expand32(POISON_INUSE)) {
- pr_err("Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
+ pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
+ obj->mm.region->name,
i * sizeof(x), x, expand32(POISON_INUSE));
err = -EINVAL;
- break;
+ goto out_unmap;
}
x = expand32(POISON_FREE);
if (put_user(x, ux)) {
- pr_err("Unable to write to mmap, offset:%zd\n",
- i * sizeof(x));
+ pr_err("%s: Unable to write to mmap, offset:%zd\n",
+ obj->mm.region->name, i * sizeof(x));
err = -EFAULT;
- break;
+ goto out_unmap;
}
}
+ if (type == I915_MMAP_TYPE_GTT)
+ intel_gt_flush_ggtt_writes(&i915->gt);
+
+ err = wc_check(obj);
+ if (err == -ENXIO)
+ err = gtt_check(obj);
out_unmap:
- vm_munmap(addr, PAGE_SIZE);
+ vm_munmap(addr, obj->base.size);
+ return err;
+}
- vaddr = i915_gem_object_pin_map(obj, I915_MAP_FORCE_WC);
- if (IS_ERR(vaddr)) {
- err = PTR_ERR(vaddr);
- goto out;
- }
- if (err == 0 && memchr_inv(vaddr, POISON_FREE, PAGE_SIZE)) {
- pr_err("Write via mmap did not land in backing store\n");
- err = -EINVAL;
+static int igt_mmap(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_memory_region *mr;
+ enum intel_region_id id;
+
+ for_each_memory_region(mr, i915, id) {
+ unsigned long sizes[] = {
+ PAGE_SIZE,
+ mr->min_page_size,
+ SZ_4M,
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sizes); i++) {
+ struct drm_i915_gem_object *obj;
+ int err;
+
+ obj = i915_gem_object_create_region(mr, sizes[i], 0);
+ if (obj == ERR_PTR(-ENODEV))
+ continue;
+
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT);
+ if (err == 0)
+ err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC);
+
+ i915_gem_object_put(obj);
+ if (err)
+ return err;
+ }
}
- i915_gem_object_unpin_map(obj);
-out:
- i915_gem_object_put(obj);
- return err;
+ return 0;
}
-static int igt_mmap_gtt(void *arg)
+static int __igt_mmap_gpu(struct drm_i915_private *i915,
+ struct drm_i915_gem_object *obj,
+ enum i915_mmap_type type)
{
- return igt_mmap(arg, I915_MMAP_TYPE_GTT);
+ struct intel_engine_cs *engine;
+ struct i915_mmap_offset *mmo;
+ unsigned long addr;
+ u32 __user *ux;
+ u32 bbe;
+ int err;
+
+ /*
+ * Verify that the mmap access into the backing store aligns with
+ * that of the GPU, i.e. that mmap is indeed writing into the same
+ * page as being read by the GPU.
+ */
+
+ if (!can_mmap(obj, type))
+ return 0;
+
+ err = wc_set(obj);
+ if (err == -ENXIO)
+ err = gtt_set(obj);
+ if (err)
+ return err;
+
+ mmo = mmap_offset_attach(obj, type, NULL);
+ if (IS_ERR(mmo))
+ return PTR_ERR(mmo);
+
+ addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
+ if (IS_ERR_VALUE(addr))
+ return addr;
+
+ ux = u64_to_user_ptr((u64)addr);
+ bbe = MI_BATCH_BUFFER_END;
+ if (put_user(bbe, ux)) {
+ pr_err("%s: Unable to write to mmap\n", obj->mm.region->name);
+ err = -EFAULT;
+ goto out_unmap;
+ }
+
+ if (type == I915_MMAP_TYPE_GTT)
+ intel_gt_flush_ggtt_writes(&i915->gt);
+
+ for_each_uabi_engine(engine, i915) {
+ struct i915_request *rq;
+ struct i915_vma *vma;
+
+ vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_unmap;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto out_unmap;
+
+ rq = i915_request_create(engine->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_unpin;
+ }
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ i915_vma_unlock(vma);
+
+ err = engine->emit_bb_start(rq, vma->node.start, 0, 0);
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ struct drm_printer p =
+ drm_info_printer(engine->i915->drm.dev);
+
+ pr_err("%s(%s, %s): Failed to execute batch\n",
+ __func__, engine->name, obj->mm.region->name);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ intel_gt_set_wedged(engine->gt);
+ err = -EIO;
+ }
+ i915_request_put(rq);
+
+out_unpin:
+ i915_vma_unpin(vma);
+ if (err)
+ goto out_unmap;
+ }
+
+out_unmap:
+ vm_munmap(addr, obj->base.size);
+ return err;
}
-static int igt_mmap_cpu(void *arg)
+static int igt_mmap_gpu(void *arg)
{
- return igt_mmap(arg, I915_MMAP_TYPE_WC);
+ struct drm_i915_private *i915 = arg;
+ struct intel_memory_region *mr;
+ enum intel_region_id id;
+
+ for_each_memory_region(mr, i915, id) {
+ struct drm_i915_gem_object *obj;
+ int err;
+
+ obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0);
+ if (obj == ERR_PTR(-ENODEV))
+ continue;
+
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT);
+ if (err == 0)
+ err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC);
+
+ i915_gem_object_put(obj);
+ if (err)
+ return err;
+ }
+
+ return 0;
}
static int check_present_pte(pte_t *pte, unsigned long addr, void *data)
@@ -887,32 +1133,24 @@ static int prefault_range(u64 start, u64 len)
return __get_user(c, end - 1);
}
-static int igt_mmap_revoke(void *arg, enum i915_mmap_type type)
+static int __igt_mmap_revoke(struct drm_i915_private *i915,
+ struct drm_i915_gem_object *obj,
+ enum i915_mmap_type type)
{
- struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_object *obj;
struct i915_mmap_offset *mmo;
unsigned long addr;
int err;
- if (!i915_ggtt_has_aperture(&i915->ggtt))
+ if (!can_mmap(obj, type))
return 0;
- obj = i915_gem_object_create_internal(i915, SZ_4M);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
mmo = mmap_offset_attach(obj, type, NULL);
- if (IS_ERR(mmo)) {
- err = PTR_ERR(mmo);
- goto out;
- }
+ if (IS_ERR(mmo))
+ return PTR_ERR(mmo);
addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
- if (IS_ERR_VALUE(addr)) {
- err = addr;
- goto out;
- }
+ if (IS_ERR_VALUE(addr))
+ return addr;
err = prefault_range(addr, obj->base.size);
if (err)
@@ -922,8 +1160,10 @@ static int igt_mmap_revoke(void *arg, enum i915_mmap_type type)
!atomic_read(&obj->bind_count));
err = check_present(addr, obj->base.size);
- if (err)
+ if (err) {
+ pr_err("%s: was not present\n", obj->mm.region->name);
goto out_unmap;
+ }
/*
* After unbinding the object from the GGTT, its address may be reused
@@ -947,24 +1187,43 @@ static int igt_mmap_revoke(void *arg, enum i915_mmap_type type)
}
err = check_absent(addr, obj->base.size);
- if (err)
+ if (err) {
+ pr_err("%s: was not absent\n", obj->mm.region->name);
goto out_unmap;
+ }
out_unmap:
vm_munmap(addr, obj->base.size);
-out:
- i915_gem_object_put(obj);
return err;
}
-static int igt_mmap_gtt_revoke(void *arg)
+static int igt_mmap_revoke(void *arg)
{
- return igt_mmap_revoke(arg, I915_MMAP_TYPE_GTT);
-}
+ struct drm_i915_private *i915 = arg;
+ struct intel_memory_region *mr;
+ enum intel_region_id id;
-static int igt_mmap_cpu_revoke(void *arg)
-{
- return igt_mmap_revoke(arg, I915_MMAP_TYPE_WC);
+ for_each_memory_region(mr, i915, id) {
+ struct drm_i915_gem_object *obj;
+ int err;
+
+ obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0);
+ if (obj == ERR_PTR(-ENODEV))
+ continue;
+
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT);
+ if (err == 0)
+ err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC);
+
+ i915_gem_object_put(obj);
+ if (err)
+ return err;
+ }
+
+ return 0;
}
int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
@@ -973,10 +1232,9 @@ int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_partial_tiling),
SUBTEST(igt_smoke_tiling),
SUBTEST(igt_mmap_offset_exhaustion),
- SUBTEST(igt_mmap_gtt),
- SUBTEST(igt_mmap_cpu),
- SUBTEST(igt_mmap_gtt_revoke),
- SUBTEST(igt_mmap_cpu_revoke),
+ SUBTEST(igt_mmap),
+ SUBTEST(igt_mmap_revoke),
+ SUBTEST(igt_mmap_gpu),
};
return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index 7d7e13dc2fdf..384143aa7776 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -77,12 +77,13 @@ live_context(struct drm_i915_private *i915, struct file *file)
{
struct i915_gem_context *ctx;
int err;
+ u32 id;
ctx = i915_gem_create_context(i915, 0);
if (IS_ERR(ctx))
return ctx;
- err = gem_context_register(ctx, to_drm_file(file)->driver_priv);
+ err = gem_context_register(ctx, to_drm_file(file)->driver_priv, &id);
if (err < 0)
goto err_ctx;
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h b/drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h
index 370360b4a148..688511afa883 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h
@@ -7,6 +7,8 @@
#ifndef __MOCK_GEM_OBJECT_H__
#define __MOCK_GEM_OBJECT_H__
+#include "gem/i915_gem_object_types.h"
+
struct mock_object {
struct drm_i915_gem_object base;
};
diff --git a/drivers/gpu/drm/i915/gt/Makefile b/drivers/gpu/drm/i915/gt/Makefile
deleted file mode 100644
index 7e73aa587967..000000000000
--- a/drivers/gpu/drm/i915/gt/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-# For building individual subdir files on the command line
-subdir-ccflags-y += -I$(srctree)/$(src)/..
-
-# Extra header tests
-header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
new file mode 100644
index 000000000000..f4fec7eb4064
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
@@ -0,0 +1,483 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/log2.h>
+
+#include "gen6_ppgtt.h"
+#include "i915_scatterlist.h"
+#include "i915_trace.h"
+#include "i915_vgpu.h"
+#include "intel_gt.h"
+
+/* Write pde (index) from the page directory @pd to the page table @pt */
+static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
+ const unsigned int pde,
+ const struct i915_page_table *pt)
+{
+ /* Caller needs to make sure the write completes if necessary */
+ iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
+ ppgtt->pd_addr + pde);
+}
+
+void gen7_ppgtt_enable(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ u32 ecochk;
+
+ intel_uncore_rmw(uncore, GAC_ECO_BITS, 0, ECOBITS_PPGTT_CACHE64B);
+
+ ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
+ if (IS_HASWELL(i915)) {
+ ecochk |= ECOCHK_PPGTT_WB_HSW;
+ } else {
+ ecochk |= ECOCHK_PPGTT_LLC_IVB;
+ ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
+ }
+ intel_uncore_write(uncore, GAM_ECOCHK, ecochk);
+
+ for_each_engine(engine, gt, id) {
+ /* GFX_MODE is per-ring on gen7+ */
+ ENGINE_WRITE(engine,
+ RING_MODE_GEN7,
+ _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+ }
+}
+
+void gen6_ppgtt_enable(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+
+ intel_uncore_rmw(uncore,
+ GAC_ECO_BITS,
+ 0,
+ ECOBITS_SNB_BIT | ECOBITS_PPGTT_CACHE64B);
+
+ intel_uncore_rmw(uncore,
+ GAB_CTL,
+ 0,
+ GAB_CTL_CONT_AFTER_PAGEFAULT);
+
+ intel_uncore_rmw(uncore,
+ GAM_ECOCHK,
+ 0,
+ ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
+
+ if (HAS_PPGTT(uncore->i915)) /* may be disabled for VT-d */
+ intel_uncore_write(uncore,
+ GFX_MODE,
+ _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+}
+
+/* PPGTT support for Sandybdrige/Gen6 and later */
+static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
+ const unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
+ const gen6_pte_t scratch_pte = vm->scratch[0].encode;
+ unsigned int pde = first_entry / GEN6_PTES;
+ unsigned int pte = first_entry % GEN6_PTES;
+ unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
+
+ while (num_entries) {
+ struct i915_page_table * const pt =
+ i915_pt_entry(ppgtt->base.pd, pde++);
+ const unsigned int count = min(num_entries, GEN6_PTES - pte);
+ gen6_pte_t *vaddr;
+
+ GEM_BUG_ON(px_base(pt) == px_base(&vm->scratch[1]));
+
+ num_entries -= count;
+
+ GEM_BUG_ON(count > atomic_read(&pt->used));
+ if (!atomic_sub_return(count, &pt->used))
+ ppgtt->scan_for_unused_pt = true;
+
+ /*
+ * Note that the hw doesn't support removing PDE on the fly
+ * (they are cached inside the context with no means to
+ * invalidate the cache), so we can only reset the PTE
+ * entries back to scratch.
+ */
+
+ vaddr = kmap_atomic_px(pt);
+ memset32(vaddr + pte, scratch_pte, count);
+ kunmap_atomic(vaddr);
+
+ pte = 0;
+ }
+}
+
+static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
+ struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct i915_page_directory * const pd = ppgtt->pd;
+ unsigned int first_entry = vma->node.start / I915_GTT_PAGE_SIZE;
+ unsigned int act_pt = first_entry / GEN6_PTES;
+ unsigned int act_pte = first_entry % GEN6_PTES;
+ const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
+ struct sgt_dma iter = sgt_dma(vma);
+ gen6_pte_t *vaddr;
+
+ GEM_BUG_ON(pd->entry[act_pt] == &vm->scratch[1]);
+
+ vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt));
+ do {
+ GEM_BUG_ON(iter.sg->length < I915_GTT_PAGE_SIZE);
+ vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
+
+ iter.dma += I915_GTT_PAGE_SIZE;
+ if (iter.dma == iter.max) {
+ iter.sg = __sg_next(iter.sg);
+ if (!iter.sg)
+ break;
+
+ iter.dma = sg_dma_address(iter.sg);
+ iter.max = iter.dma + iter.sg->length;
+ }
+
+ if (++act_pte == GEN6_PTES) {
+ kunmap_atomic(vaddr);
+ vaddr = kmap_atomic_px(i915_pt_entry(pd, ++act_pt));
+ act_pte = 0;
+ }
+ } while (1);
+ kunmap_atomic(vaddr);
+
+ vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
+}
+
+static void gen6_flush_pd(struct gen6_ppgtt *ppgtt, u64 start, u64 end)
+{
+ struct i915_page_directory * const pd = ppgtt->base.pd;
+ struct i915_page_table *pt;
+ unsigned int pde;
+
+ start = round_down(start, SZ_64K);
+ end = round_up(end, SZ_64K) - start;
+
+ mutex_lock(&ppgtt->flush);
+
+ gen6_for_each_pde(pt, pd, start, end, pde)
+ gen6_write_pde(ppgtt, pde, pt);
+
+ mb();
+ ioread32(ppgtt->pd_addr + pde - 1);
+ gen6_ggtt_invalidate(ppgtt->base.vm.gt->ggtt);
+ mb();
+
+ mutex_unlock(&ppgtt->flush);
+}
+
+static int gen6_alloc_va_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
+ struct i915_page_directory * const pd = ppgtt->base.pd;
+ struct i915_page_table *pt, *alloc = NULL;
+ intel_wakeref_t wakeref;
+ u64 from = start;
+ unsigned int pde;
+ int ret = 0;
+
+ wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
+
+ spin_lock(&pd->lock);
+ gen6_for_each_pde(pt, pd, start, length, pde) {
+ const unsigned int count = gen6_pte_count(start, length);
+
+ if (px_base(pt) == px_base(&vm->scratch[1])) {
+ spin_unlock(&pd->lock);
+
+ pt = fetch_and_zero(&alloc);
+ if (!pt)
+ pt = alloc_pt(vm);
+ if (IS_ERR(pt)) {
+ ret = PTR_ERR(pt);
+ goto unwind_out;
+ }
+
+ fill32_px(pt, vm->scratch[0].encode);
+
+ spin_lock(&pd->lock);
+ if (pd->entry[pde] == &vm->scratch[1]) {
+ pd->entry[pde] = pt;
+ } else {
+ alloc = pt;
+ pt = pd->entry[pde];
+ }
+ }
+
+ atomic_add(count, &pt->used);
+ }
+ spin_unlock(&pd->lock);
+
+ if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND))
+ gen6_flush_pd(ppgtt, from, start);
+
+ goto out;
+
+unwind_out:
+ gen6_ppgtt_clear_range(vm, from, start - from);
+out:
+ if (alloc)
+ free_px(vm, alloc);
+ intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
+ return ret;
+}
+
+static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt)
+{
+ struct i915_address_space * const vm = &ppgtt->base.vm;
+ struct i915_page_directory * const pd = ppgtt->base.pd;
+ int ret;
+
+ ret = setup_scratch_page(vm, __GFP_HIGHMEM);
+ if (ret)
+ return ret;
+
+ vm->scratch[0].encode =
+ vm->pte_encode(px_dma(&vm->scratch[0]),
+ I915_CACHE_NONE, PTE_READ_ONLY);
+
+ if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[1])))) {
+ cleanup_scratch_page(vm);
+ return -ENOMEM;
+ }
+
+ fill32_px(&vm->scratch[1], vm->scratch[0].encode);
+ memset_p(pd->entry, &vm->scratch[1], I915_PDES);
+
+ return 0;
+}
+
+static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt)
+{
+ struct i915_page_directory * const pd = ppgtt->base.pd;
+ struct i915_page_dma * const scratch =
+ px_base(&ppgtt->base.vm.scratch[1]);
+ struct i915_page_table *pt;
+ u32 pde;
+
+ gen6_for_all_pdes(pt, pd, pde)
+ if (px_base(pt) != scratch)
+ free_px(&ppgtt->base.vm, pt);
+}
+
+static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
+{
+ struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
+
+ __i915_vma_put(ppgtt->vma);
+
+ gen6_ppgtt_free_pd(ppgtt);
+ free_scratch(vm);
+
+ mutex_destroy(&ppgtt->flush);
+ mutex_destroy(&ppgtt->pin_mutex);
+ kfree(ppgtt->base.pd);
+}
+
+static int pd_vma_set_pages(struct i915_vma *vma)
+{
+ vma->pages = ERR_PTR(-ENODEV);
+ return 0;
+}
+
+static void pd_vma_clear_pages(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!vma->pages);
+
+ vma->pages = NULL;
+}
+
+static int pd_vma_bind(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 unused)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
+ struct gen6_ppgtt *ppgtt = vma->private;
+ u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
+
+ px_base(ppgtt->base.pd)->ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
+ ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
+
+ gen6_flush_pd(ppgtt, 0, ppgtt->base.vm.total);
+ return 0;
+}
+
+static void pd_vma_unbind(struct i915_vma *vma)
+{
+ struct gen6_ppgtt *ppgtt = vma->private;
+ struct i915_page_directory * const pd = ppgtt->base.pd;
+ struct i915_page_dma * const scratch =
+ px_base(&ppgtt->base.vm.scratch[1]);
+ struct i915_page_table *pt;
+ unsigned int pde;
+
+ if (!ppgtt->scan_for_unused_pt)
+ return;
+
+ /* Free all no longer used page tables */
+ gen6_for_all_pdes(pt, ppgtt->base.pd, pde) {
+ if (px_base(pt) == scratch || atomic_read(&pt->used))
+ continue;
+
+ free_px(&ppgtt->base.vm, pt);
+ pd->entry[pde] = scratch;
+ }
+
+ ppgtt->scan_for_unused_pt = false;
+}
+
+static const struct i915_vma_ops pd_vma_ops = {
+ .set_pages = pd_vma_set_pages,
+ .clear_pages = pd_vma_clear_pages,
+ .bind_vma = pd_vma_bind,
+ .unbind_vma = pd_vma_unbind,
+};
+
+static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
+{
+ struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt;
+ struct i915_vma *vma;
+
+ GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
+ GEM_BUG_ON(size > ggtt->vm.total);
+
+ vma = i915_vma_alloc();
+ if (!vma)
+ return ERR_PTR(-ENOMEM);
+
+ i915_active_init(&vma->active, NULL, NULL);
+
+ kref_init(&vma->ref);
+ mutex_init(&vma->pages_mutex);
+ vma->vm = i915_vm_get(&ggtt->vm);
+ vma->ops = &pd_vma_ops;
+ vma->private = ppgtt;
+
+ vma->size = size;
+ vma->fence_size = size;
+ atomic_set(&vma->flags, I915_VMA_GGTT);
+ vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
+
+ INIT_LIST_HEAD(&vma->obj_link);
+ INIT_LIST_HEAD(&vma->closed_link);
+
+ return vma;
+}
+
+int gen6_ppgtt_pin(struct i915_ppgtt *base)
+{
+ struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
+ int err;
+
+ GEM_BUG_ON(!atomic_read(&ppgtt->base.vm.open));
+
+ /*
+ * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
+ * which will be pinned into every active context.
+ * (When vma->pin_count becomes atomic, I expect we will naturally
+ * need a larger, unpacked, type and kill this redundancy.)
+ */
+ if (atomic_add_unless(&ppgtt->pin_count, 1, 0))
+ return 0;
+
+ if (mutex_lock_interruptible(&ppgtt->pin_mutex))
+ return -EINTR;
+
+ /*
+ * PPGTT PDEs reside in the GGTT and consists of 512 entries. The
+ * allocator works in address space sizes, so it's multiplied by page
+ * size. We allocate at the top of the GTT to avoid fragmentation.
+ */
+ err = 0;
+ if (!atomic_read(&ppgtt->pin_count))
+ err = i915_ggtt_pin(ppgtt->vma, GEN6_PD_ALIGN, PIN_HIGH);
+ if (!err)
+ atomic_inc(&ppgtt->pin_count);
+ mutex_unlock(&ppgtt->pin_mutex);
+
+ return err;
+}
+
+void gen6_ppgtt_unpin(struct i915_ppgtt *base)
+{
+ struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
+
+ GEM_BUG_ON(!atomic_read(&ppgtt->pin_count));
+ if (atomic_dec_and_test(&ppgtt->pin_count))
+ i915_vma_unpin(ppgtt->vma);
+}
+
+void gen6_ppgtt_unpin_all(struct i915_ppgtt *base)
+{
+ struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
+
+ if (!atomic_read(&ppgtt->pin_count))
+ return;
+
+ i915_vma_unpin(ppgtt->vma);
+ atomic_set(&ppgtt->pin_count, 0);
+}
+
+struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt)
+{
+ struct i915_ggtt * const ggtt = gt->ggtt;
+ struct gen6_ppgtt *ppgtt;
+ int err;
+
+ ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+ if (!ppgtt)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&ppgtt->flush);
+ mutex_init(&ppgtt->pin_mutex);
+
+ ppgtt_init(&ppgtt->base, gt);
+ ppgtt->base.vm.top = 1;
+
+ ppgtt->base.vm.bind_async_flags = I915_VMA_LOCAL_BIND;
+ ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
+ ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
+ ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
+ ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
+
+ ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
+
+ ppgtt->base.pd = __alloc_pd(sizeof(*ppgtt->base.pd));
+ if (!ppgtt->base.pd) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ err = gen6_ppgtt_init_scratch(ppgtt);
+ if (err)
+ goto err_pd;
+
+ ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE);
+ if (IS_ERR(ppgtt->vma)) {
+ err = PTR_ERR(ppgtt->vma);
+ goto err_scratch;
+ }
+
+ return &ppgtt->base;
+
+err_scratch:
+ free_scratch(&ppgtt->base.vm);
+err_pd:
+ kfree(ppgtt->base.pd);
+err_free:
+ mutex_destroy(&ppgtt->pin_mutex);
+ kfree(ppgtt);
+ return ERR_PTR(err);
+}
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.h b/drivers/gpu/drm/i915/gt/gen6_ppgtt.h
new file mode 100644
index 000000000000..72e481806c96
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __GEN6_PPGTT_H__
+#define __GEN6_PPGTT_H__
+
+#include "intel_gtt.h"
+
+struct gen6_ppgtt {
+ struct i915_ppgtt base;
+
+ struct mutex flush;
+ struct i915_vma *vma;
+ gen6_pte_t __iomem *pd_addr;
+
+ atomic_t pin_count;
+ struct mutex pin_mutex;
+
+ bool scan_for_unused_pt;
+};
+
+static inline u32 gen6_pte_index(u32 addr)
+{
+ return i915_pte_index(addr, GEN6_PDE_SHIFT);
+}
+
+static inline u32 gen6_pte_count(u32 addr, u32 length)
+{
+ return i915_pte_count(addr, length, GEN6_PDE_SHIFT);
+}
+
+static inline u32 gen6_pde_index(u32 addr)
+{
+ return i915_pde_index(addr, GEN6_PDE_SHIFT);
+}
+
+#define __to_gen6_ppgtt(base) container_of(base, struct gen6_ppgtt, base)
+
+static inline struct gen6_ppgtt *to_gen6_ppgtt(struct i915_ppgtt *base)
+{
+ BUILD_BUG_ON(offsetof(struct gen6_ppgtt, base));
+ return __to_gen6_ppgtt(base);
+}
+
+/*
+ * gen6_for_each_pde() iterates over every pde from start until start+length.
+ * If start and start+length are not perfectly divisible, the macro will round
+ * down and up as needed. Start=0 and length=2G effectively iterates over
+ * every PDE in the system. The macro modifies ALL its parameters except 'pd',
+ * so each of the other parameters should preferably be a simple variable, or
+ * at most an lvalue with no side-effects!
+ */
+#define gen6_for_each_pde(pt, pd, start, length, iter) \
+ for (iter = gen6_pde_index(start); \
+ length > 0 && iter < I915_PDES && \
+ (pt = i915_pt_entry(pd, iter), true); \
+ ({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT); \
+ temp = min(temp - start, length); \
+ start += temp, length -= temp; }), ++iter)
+
+#define gen6_for_all_pdes(pt, pd, iter) \
+ for (iter = 0; \
+ iter < I915_PDES && \
+ (pt = i915_pt_entry(pd, iter), true); \
+ ++iter)
+
+int gen6_ppgtt_pin(struct i915_ppgtt *base);
+void gen6_ppgtt_unpin(struct i915_ppgtt *base);
+void gen6_ppgtt_unpin_all(struct i915_ppgtt *base);
+void gen6_ppgtt_enable(struct intel_gt *gt);
+void gen7_ppgtt_enable(struct intel_gt *gt);
+struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
new file mode 100644
index 000000000000..4d1de2d97d5c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -0,0 +1,724 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/log2.h>
+
+#include "gen8_ppgtt.h"
+#include "i915_scatterlist.h"
+#include "i915_trace.h"
+#include "i915_vgpu.h"
+#include "intel_gt.h"
+#include "intel_gtt.h"
+
+static u64 gen8_pde_encode(const dma_addr_t addr,
+ const enum i915_cache_level level)
+{
+ u64 pde = addr | _PAGE_PRESENT | _PAGE_RW;
+
+ if (level != I915_CACHE_NONE)
+ pde |= PPAT_CACHED_PDE;
+ else
+ pde |= PPAT_UNCACHED;
+
+ return pde;
+}
+
+static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
+{
+ struct drm_i915_private *i915 = ppgtt->vm.i915;
+ struct intel_uncore *uncore = ppgtt->vm.gt->uncore;
+ enum vgt_g2v_type msg;
+ int i;
+
+ if (create)
+ atomic_inc(px_used(ppgtt->pd)); /* never remove */
+ else
+ atomic_dec(px_used(ppgtt->pd));
+
+ mutex_lock(&i915->vgpu.lock);
+
+ if (i915_vm_is_4lvl(&ppgtt->vm)) {
+ const u64 daddr = px_dma(ppgtt->pd);
+
+ intel_uncore_write(uncore,
+ vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
+ intel_uncore_write(uncore,
+ vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
+
+ msg = create ?
+ VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
+ VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY;
+ } else {
+ for (i = 0; i < GEN8_3LVL_PDPES; i++) {
+ const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
+
+ intel_uncore_write(uncore,
+ vgtif_reg(pdp[i].lo),
+ lower_32_bits(daddr));
+ intel_uncore_write(uncore,
+ vgtif_reg(pdp[i].hi),
+ upper_32_bits(daddr));
+ }
+
+ msg = create ?
+ VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
+ VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY;
+ }
+
+ /* g2v_notify atomically (via hv trap) consumes the message packet. */
+ intel_uncore_write(uncore, vgtif_reg(g2v_notify), msg);
+
+ mutex_unlock(&i915->vgpu.lock);
+}
+
+/* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */
+#define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */
+#define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE))
+#define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64))
+#define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES))
+#define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl))
+#define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl))
+#define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl))
+
+#define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
+
+static inline unsigned int
+gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
+{
+ const int shift = gen8_pd_shift(lvl);
+ const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
+
+ GEM_BUG_ON(start >= end);
+ end += ~mask >> gen8_pd_shift(1);
+
+ *idx = i915_pde_index(start, shift);
+ if ((start ^ end) & mask)
+ return GEN8_PDES - *idx;
+ else
+ return i915_pde_index(end, shift) - *idx;
+}
+
+static inline bool gen8_pd_contains(u64 start, u64 end, int lvl)
+{
+ const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
+
+ GEM_BUG_ON(start >= end);
+ return (start ^ end) & mask && (start & ~mask) == 0;
+}
+
+static inline unsigned int gen8_pt_count(u64 start, u64 end)
+{
+ GEM_BUG_ON(start >= end);
+ if ((start ^ end) >> gen8_pd_shift(1))
+ return GEN8_PDES - (start & (GEN8_PDES - 1));
+ else
+ return end - start;
+}
+
+static inline unsigned int
+gen8_pd_top_count(const struct i915_address_space *vm)
+{
+ unsigned int shift = __gen8_pte_shift(vm->top);
+ return (vm->total + (1ull << shift) - 1) >> shift;
+}
+
+static inline struct i915_page_directory *
+gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
+{
+ struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
+
+ if (vm->top == 2)
+ return ppgtt->pd;
+ else
+ return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
+}
+
+static inline struct i915_page_directory *
+gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
+{
+ return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
+}
+
+static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
+ struct i915_page_directory *pd,
+ int count, int lvl)
+{
+ if (lvl) {
+ void **pde = pd->entry;
+
+ do {
+ if (!*pde)
+ continue;
+
+ __gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1);
+ } while (pde++, --count);
+ }
+
+ free_px(vm, pd);
+}
+
+static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
+{
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+
+ if (intel_vgpu_active(vm->i915))
+ gen8_ppgtt_notify_vgt(ppgtt, false);
+
+ __gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top);
+ free_scratch(vm);
+}
+
+static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
+ struct i915_page_directory * const pd,
+ u64 start, const u64 end, int lvl)
+{
+ const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
+ unsigned int idx, len;
+
+ GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
+
+ len = gen8_pd_range(start, end, lvl--, &idx);
+ DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
+ __func__, vm, lvl + 1, start, end,
+ idx, len, atomic_read(px_used(pd)));
+ GEM_BUG_ON(!len || len >= atomic_read(px_used(pd)));
+
+ do {
+ struct i915_page_table *pt = pd->entry[idx];
+
+ if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) &&
+ gen8_pd_contains(start, end, lvl)) {
+ DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n",
+ __func__, vm, lvl + 1, idx, start, end);
+ clear_pd_entry(pd, idx, scratch);
+ __gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl);
+ start += (u64)I915_PDES << gen8_pd_shift(lvl);
+ continue;
+ }
+
+ if (lvl) {
+ start = __gen8_ppgtt_clear(vm, as_pd(pt),
+ start, end, lvl);
+ } else {
+ unsigned int count;
+ u64 *vaddr;
+
+ count = gen8_pt_count(start, end);
+ DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n",
+ __func__, vm, lvl, start, end,
+ gen8_pd_index(start, 0), count,
+ atomic_read(&pt->used));
+ GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
+
+ vaddr = kmap_atomic_px(pt);
+ memset64(vaddr + gen8_pd_index(start, 0),
+ vm->scratch[0].encode,
+ count);
+ kunmap_atomic(vaddr);
+
+ atomic_sub(count, &pt->used);
+ start += count;
+ }
+
+ if (release_pd_entry(pd, idx, pt, scratch))
+ free_px(vm, pt);
+ } while (idx++, --len);
+
+ return start;
+}
+
+static void gen8_ppgtt_clear(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
+ GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
+ GEM_BUG_ON(range_overflows(start, length, vm->total));
+
+ start >>= GEN8_PTE_SHIFT;
+ length >>= GEN8_PTE_SHIFT;
+ GEM_BUG_ON(length == 0);
+
+ __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
+ start, start + length, vm->top);
+}
+
+static int __gen8_ppgtt_alloc(struct i915_address_space * const vm,
+ struct i915_page_directory * const pd,
+ u64 * const start, const u64 end, int lvl)
+{
+ const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
+ struct i915_page_table *alloc = NULL;
+ unsigned int idx, len;
+ int ret = 0;
+
+ GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
+
+ len = gen8_pd_range(*start, end, lvl--, &idx);
+ DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
+ __func__, vm, lvl + 1, *start, end,
+ idx, len, atomic_read(px_used(pd)));
+ GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1));
+
+ spin_lock(&pd->lock);
+ GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */
+ do {
+ struct i915_page_table *pt = pd->entry[idx];
+
+ if (!pt) {
+ spin_unlock(&pd->lock);
+
+ DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n",
+ __func__, vm, lvl + 1, idx);
+
+ pt = fetch_and_zero(&alloc);
+ if (lvl) {
+ if (!pt) {
+ pt = &alloc_pd(vm)->pt;
+ if (IS_ERR(pt)) {
+ ret = PTR_ERR(pt);
+ goto out;
+ }
+ }
+
+ fill_px(pt, vm->scratch[lvl].encode);
+ } else {
+ if (!pt) {
+ pt = alloc_pt(vm);
+ if (IS_ERR(pt)) {
+ ret = PTR_ERR(pt);
+ goto out;
+ }
+ }
+
+ if (intel_vgpu_active(vm->i915) ||
+ gen8_pt_count(*start, end) < I915_PDES)
+ fill_px(pt, vm->scratch[lvl].encode);
+ }
+
+ spin_lock(&pd->lock);
+ if (likely(!pd->entry[idx]))
+ set_pd_entry(pd, idx, pt);
+ else
+ alloc = pt, pt = pd->entry[idx];
+ }
+
+ if (lvl) {
+ atomic_inc(&pt->used);
+ spin_unlock(&pd->lock);
+
+ ret = __gen8_ppgtt_alloc(vm, as_pd(pt),
+ start, end, lvl);
+ if (unlikely(ret)) {
+ if (release_pd_entry(pd, idx, pt, scratch))
+ free_px(vm, pt);
+ goto out;
+ }
+
+ spin_lock(&pd->lock);
+ atomic_dec(&pt->used);
+ GEM_BUG_ON(!atomic_read(&pt->used));
+ } else {
+ unsigned int count = gen8_pt_count(*start, end);
+
+ DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n",
+ __func__, vm, lvl, *start, end,
+ gen8_pd_index(*start, 0), count,
+ atomic_read(&pt->used));
+
+ atomic_add(count, &pt->used);
+ /* All other pdes may be simultaneously removed */
+ GEM_BUG_ON(atomic_read(&pt->used) > NALLOC * I915_PDES);
+ *start += count;
+ }
+ } while (idx++, --len);
+ spin_unlock(&pd->lock);
+out:
+ if (alloc)
+ free_px(vm, alloc);
+ return ret;
+}
+
+static int gen8_ppgtt_alloc(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ u64 from;
+ int err;
+
+ GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
+ GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
+ GEM_BUG_ON(range_overflows(start, length, vm->total));
+
+ start >>= GEN8_PTE_SHIFT;
+ length >>= GEN8_PTE_SHIFT;
+ GEM_BUG_ON(length == 0);
+ from = start;
+
+ err = __gen8_ppgtt_alloc(vm, i915_vm_to_ppgtt(vm)->pd,
+ &start, start + length, vm->top);
+ if (unlikely(err && from != start))
+ __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
+ from, start, vm->top);
+
+ return err;
+}
+
+static __always_inline u64
+gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
+ struct i915_page_directory *pdp,
+ struct sgt_dma *iter,
+ u64 idx,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ struct i915_page_directory *pd;
+ const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
+ gen8_pte_t *vaddr;
+
+ pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
+ vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
+ do {
+ GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE);
+ vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
+
+ iter->dma += I915_GTT_PAGE_SIZE;
+ if (iter->dma >= iter->max) {
+ iter->sg = __sg_next(iter->sg);
+ if (!iter->sg) {
+ idx = 0;
+ break;
+ }
+
+ iter->dma = sg_dma_address(iter->sg);
+ iter->max = iter->dma + iter->sg->length;
+ }
+
+ if (gen8_pd_index(++idx, 0) == 0) {
+ if (gen8_pd_index(idx, 1) == 0) {
+ /* Limited by sg length for 3lvl */
+ if (gen8_pd_index(idx, 2) == 0)
+ break;
+
+ pd = pdp->entry[gen8_pd_index(idx, 2)];
+ }
+
+ kunmap_atomic(vaddr);
+ vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
+ }
+ } while (1);
+ kunmap_atomic(vaddr);
+
+ return idx;
+}
+
+static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
+ struct sgt_dma *iter,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
+ u64 start = vma->node.start;
+ dma_addr_t rem = iter->sg->length;
+
+ GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm));
+
+ do {
+ struct i915_page_directory * const pdp =
+ gen8_pdp_for_page_address(vma->vm, start);
+ struct i915_page_directory * const pd =
+ i915_pd_entry(pdp, __gen8_pte_index(start, 2));
+ gen8_pte_t encode = pte_encode;
+ unsigned int maybe_64K = -1;
+ unsigned int page_size;
+ gen8_pte_t *vaddr;
+ u16 index;
+
+ if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
+ IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
+ rem >= I915_GTT_PAGE_SIZE_2M &&
+ !__gen8_pte_index(start, 0)) {
+ index = __gen8_pte_index(start, 1);
+ encode |= GEN8_PDE_PS_2M;
+ page_size = I915_GTT_PAGE_SIZE_2M;
+
+ vaddr = kmap_atomic_px(pd);
+ } else {
+ struct i915_page_table *pt =
+ i915_pt_entry(pd, __gen8_pte_index(start, 1));
+
+ index = __gen8_pte_index(start, 0);
+ page_size = I915_GTT_PAGE_SIZE;
+
+ if (!index &&
+ vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
+ IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
+ (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
+ rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
+ maybe_64K = __gen8_pte_index(start, 1);
+
+ vaddr = kmap_atomic_px(pt);
+ }
+
+ do {
+ GEM_BUG_ON(iter->sg->length < page_size);
+ vaddr[index++] = encode | iter->dma;
+
+ start += page_size;
+ iter->dma += page_size;
+ rem -= page_size;
+ if (iter->dma >= iter->max) {
+ iter->sg = __sg_next(iter->sg);
+ if (!iter->sg)
+ break;
+
+ rem = iter->sg->length;
+ iter->dma = sg_dma_address(iter->sg);
+ iter->max = iter->dma + rem;
+
+ if (maybe_64K != -1 && index < I915_PDES &&
+ !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
+ (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
+ rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)))
+ maybe_64K = -1;
+
+ if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
+ break;
+ }
+ } while (rem >= page_size && index < I915_PDES);
+
+ kunmap_atomic(vaddr);
+
+ /*
+ * Is it safe to mark the 2M block as 64K? -- Either we have
+ * filled whole page-table with 64K entries, or filled part of
+ * it and have reached the end of the sg table and we have
+ * enough padding.
+ */
+ if (maybe_64K != -1 &&
+ (index == I915_PDES ||
+ (i915_vm_has_scratch_64K(vma->vm) &&
+ !iter->sg && IS_ALIGNED(vma->node.start +
+ vma->node.size,
+ I915_GTT_PAGE_SIZE_2M)))) {
+ vaddr = kmap_atomic_px(pd);
+ vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
+ kunmap_atomic(vaddr);
+ page_size = I915_GTT_PAGE_SIZE_64K;
+
+ /*
+ * We write all 4K page entries, even when using 64K
+ * pages. In order to verify that the HW isn't cheating
+ * by using the 4K PTE instead of the 64K PTE, we want
+ * to remove all the surplus entries. If the HW skipped
+ * the 64K PTE, it will read/write into the scratch page
+ * instead - which we detect as missing results during
+ * selftests.
+ */
+ if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
+ u16 i;
+
+ encode = vma->vm->scratch[0].encode;
+ vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K));
+
+ for (i = 1; i < index; i += 16)
+ memset64(vaddr + i, encode, 15);
+
+ kunmap_atomic(vaddr);
+ }
+ }
+
+ vma->page_sizes.gtt |= page_size;
+ } while (iter->sg);
+}
+
+static void gen8_ppgtt_insert(struct i915_address_space *vm,
+ struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
+ struct sgt_dma iter = sgt_dma(vma);
+
+ if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
+ gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags);
+ } else {
+ u64 idx = vma->node.start >> GEN8_PTE_SHIFT;
+
+ do {
+ struct i915_page_directory * const pdp =
+ gen8_pdp_for_page_index(vm, idx);
+
+ idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx,
+ cache_level, flags);
+ } while (idx);
+
+ vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
+ }
+}
+
+static int gen8_init_scratch(struct i915_address_space *vm)
+{
+ int ret;
+ int i;
+
+ /*
+ * If everybody agrees to not to write into the scratch page,
+ * we can reuse it for all vm, keeping contexts and processes separate.
+ */
+ if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) {
+ struct i915_address_space *clone = vm->gt->vm;
+
+ GEM_BUG_ON(!clone->has_read_only);
+
+ vm->scratch_order = clone->scratch_order;
+ memcpy(vm->scratch, clone->scratch, sizeof(vm->scratch));
+ px_dma(&vm->scratch[0]) = 0; /* no xfer of ownership */
+ return 0;
+ }
+
+ ret = setup_scratch_page(vm, __GFP_HIGHMEM);
+ if (ret)
+ return ret;
+
+ vm->scratch[0].encode =
+ gen8_pte_encode(px_dma(&vm->scratch[0]),
+ I915_CACHE_LLC, vm->has_read_only);
+
+ for (i = 1; i <= vm->top; i++) {
+ if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[i]))))
+ goto free_scratch;
+
+ fill_px(&vm->scratch[i], vm->scratch[i - 1].encode);
+ vm->scratch[i].encode =
+ gen8_pde_encode(px_dma(&vm->scratch[i]),
+ I915_CACHE_LLC);
+ }
+
+ return 0;
+
+free_scratch:
+ free_scratch(vm);
+ return -ENOMEM;
+}
+
+static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
+{
+ struct i915_address_space *vm = &ppgtt->vm;
+ struct i915_page_directory *pd = ppgtt->pd;
+ unsigned int idx;
+
+ GEM_BUG_ON(vm->top != 2);
+ GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES);
+
+ for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) {
+ struct i915_page_directory *pde;
+
+ pde = alloc_pd(vm);
+ if (IS_ERR(pde))
+ return PTR_ERR(pde);
+
+ fill_px(pde, vm->scratch[1].encode);
+ set_pd_entry(pd, idx, pde);
+ atomic_inc(px_used(pde)); /* keep pinned */
+ }
+ wmb();
+
+ return 0;
+}
+
+static struct i915_page_directory *
+gen8_alloc_top_pd(struct i915_address_space *vm)
+{
+ const unsigned int count = gen8_pd_top_count(vm);
+ struct i915_page_directory *pd;
+
+ GEM_BUG_ON(count > ARRAY_SIZE(pd->entry));
+
+ pd = __alloc_pd(offsetof(typeof(*pd), entry[count]));
+ if (unlikely(!pd))
+ return ERR_PTR(-ENOMEM);
+
+ if (unlikely(setup_page_dma(vm, px_base(pd)))) {
+ kfree(pd);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ fill_page_dma(px_base(pd), vm->scratch[vm->top].encode, count);
+ atomic_inc(px_used(pd)); /* mark as pinned */
+ return pd;
+}
+
+/*
+ * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
+ * with a net effect resembling a 2-level page table in normal x86 terms. Each
+ * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
+ * space.
+ *
+ */
+struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
+{
+ struct i915_ppgtt *ppgtt;
+ int err;
+
+ ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+ if (!ppgtt)
+ return ERR_PTR(-ENOMEM);
+
+ ppgtt_init(ppgtt, gt);
+ ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
+
+ /*
+ * From bdw, there is hw support for read-only pages in the PPGTT.
+ *
+ * Gen11 has HSDES#:1807136187 unresolved. Disable ro support
+ * for now.
+ *
+ * Gen12 has inherited the same read-only fault issue from gen11.
+ */
+ ppgtt->vm.has_read_only = !IS_GEN_RANGE(gt->i915, 11, 12);
+
+ /*
+ * There are only few exceptions for gen >=6. chv and bxt.
+ * And we are not sure about the latter so play safe for now.
+ */
+ if (IS_CHERRYVIEW(gt->i915) || IS_BROXTON(gt->i915))
+ ppgtt->vm.pt_kmap_wc = true;
+
+ err = gen8_init_scratch(&ppgtt->vm);
+ if (err)
+ goto err_free;
+
+ ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm);
+ if (IS_ERR(ppgtt->pd)) {
+ err = PTR_ERR(ppgtt->pd);
+ goto err_free_scratch;
+ }
+
+ if (!i915_vm_is_4lvl(&ppgtt->vm)) {
+ err = gen8_preallocate_top_level_pdp(ppgtt);
+ if (err)
+ goto err_free_pd;
+ }
+
+ ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
+ ppgtt->vm.insert_entries = gen8_ppgtt_insert;
+ ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
+ ppgtt->vm.clear_range = gen8_ppgtt_clear;
+
+ if (intel_vgpu_active(gt->i915))
+ gen8_ppgtt_notify_vgt(ppgtt, true);
+
+ ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
+
+ return ppgtt;
+
+err_free_pd:
+ __gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd,
+ gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top);
+err_free_scratch:
+ free_scratch(&ppgtt->vm);
+err_free:
+ kfree(ppgtt);
+ return ERR_PTR(err);
+}
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.h b/drivers/gpu/drm/i915/gt/gen8_ppgtt.h
new file mode 100644
index 000000000000..76a08b9c1f5c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __GEN8_PPGTT_H__
+#define __GEN8_PPGTT_H__
+
+struct intel_gt;
+
+struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index fbaa9df6f436..57e8a051ddc2 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -43,65 +43,115 @@ intel_context_create(struct intel_engine_cs *engine)
return ce;
}
-int __intel_context_do_pin(struct intel_context *ce)
+int intel_context_alloc_state(struct intel_context *ce)
{
- int err;
+ int err = 0;
if (mutex_lock_interruptible(&ce->pin_mutex))
return -EINTR;
- if (likely(!atomic_read(&ce->pin_count))) {
- intel_wakeref_t wakeref;
+ if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
+ err = ce->ops->alloc(ce);
+ if (unlikely(err))
+ goto unlock;
+
+ set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
+ }
+
+unlock:
+ mutex_unlock(&ce->pin_mutex);
+ return err;
+}
+
+static int intel_context_active_acquire(struct intel_context *ce)
+{
+ int err;
+
+ __i915_active_acquire(&ce->active);
- if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
- err = ce->ops->alloc(ce);
- if (unlikely(err))
- goto err;
+ if (intel_context_is_barrier(ce))
+ return 0;
+
+ /* Preallocate tracking nodes */
+ err = i915_active_acquire_preallocate_barrier(&ce->active,
+ ce->engine);
+ if (err)
+ i915_active_release(&ce->active);
+
+ return err;
+}
- __set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
- }
+static void intel_context_active_release(struct intel_context *ce)
+{
+ /* Nodes preallocated in intel_context_active() */
+ i915_active_acquire_barrier(&ce->active);
+ i915_active_release(&ce->active);
+}
- err = 0;
- with_intel_runtime_pm(ce->engine->uncore->rpm, wakeref)
- err = ce->ops->pin(ce);
+int __intel_context_do_pin(struct intel_context *ce)
+{
+ int err;
+
+ if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
+ err = intel_context_alloc_state(ce);
if (err)
- goto err;
+ return err;
+ }
+
+ err = i915_active_acquire(&ce->active);
+ if (err)
+ return err;
+
+ if (mutex_lock_interruptible(&ce->pin_mutex)) {
+ err = -EINTR;
+ goto out_release;
+ }
+
+ if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) {
+ err = intel_context_active_acquire(ce);
+ if (unlikely(err))
+ goto out_unlock;
+
+ err = ce->ops->pin(ce);
+ if (unlikely(err))
+ goto err_active;
CE_TRACE(ce, "pin ring:{head:%04x, tail:%04x}\n",
ce->ring->head, ce->ring->tail);
smp_mb__before_atomic(); /* flush pin before it is visible */
+ atomic_inc(&ce->pin_count);
}
- atomic_inc(&ce->pin_count);
GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
+ GEM_BUG_ON(i915_active_is_idle(&ce->active));
+ goto out_unlock;
+err_active:
+ intel_context_active_release(ce);
+out_unlock:
mutex_unlock(&ce->pin_mutex);
- return 0;
-
-err:
- mutex_unlock(&ce->pin_mutex);
+out_release:
+ i915_active_release(&ce->active);
return err;
}
void intel_context_unpin(struct intel_context *ce)
{
- if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
+ if (!atomic_dec_and_test(&ce->pin_count))
return;
- /* We may be called from inside intel_context_pin() to evict another */
- intel_context_get(ce);
- mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);
-
- if (likely(atomic_dec_and_test(&ce->pin_count))) {
- CE_TRACE(ce, "retire\n");
+ CE_TRACE(ce, "unpin\n");
+ ce->ops->unpin(ce);
- ce->ops->unpin(ce);
-
- intel_context_active_release(ce);
- }
-
- mutex_unlock(&ce->pin_mutex);
+ /*
+ * Once released, we may asynchronously drop the active reference.
+ * As that may be the only reference keeping the context alive,
+ * take an extra now so that it is not freed before we finish
+ * dereferencing it.
+ */
+ intel_context_get(ce);
+ intel_context_active_release(ce);
intel_context_put(ce);
}
@@ -114,6 +164,10 @@ static int __context_pin_state(struct i915_vma *vma)
if (err)
return err;
+ err = i915_active_acquire(&vma->active);
+ if (err)
+ goto err_unpin;
+
/*
* And mark it as a globally pinned object to let the shrinker know
* it cannot reclaim the object until we release it.
@@ -122,14 +176,44 @@ static int __context_pin_state(struct i915_vma *vma)
vma->obj->mm.dirty = true;
return 0;
+
+err_unpin:
+ i915_vma_unpin(vma);
+ return err;
}
static void __context_unpin_state(struct i915_vma *vma)
{
i915_vma_make_shrinkable(vma);
+ i915_active_release(&vma->active);
__i915_vma_unpin(vma);
}
+static int __ring_active(struct intel_ring *ring)
+{
+ int err;
+
+ err = i915_active_acquire(&ring->vma->active);
+ if (err)
+ return err;
+
+ err = intel_ring_pin(ring);
+ if (err)
+ goto err_active;
+
+ return 0;
+
+err_active:
+ i915_active_release(&ring->vma->active);
+ return err;
+}
+
+static void __ring_retire(struct intel_ring *ring)
+{
+ intel_ring_unpin(ring);
+ i915_active_release(&ring->vma->active);
+}
+
__i915_active_call
static void __intel_context_retire(struct i915_active *active)
{
@@ -142,7 +226,7 @@ static void __intel_context_retire(struct i915_active *active)
__context_unpin_state(ce->state);
intel_timeline_unpin(ce->timeline);
- intel_ring_unpin(ce->ring);
+ __ring_retire(ce->ring);
intel_context_put(ce);
}
@@ -152,9 +236,11 @@ static int __intel_context_active(struct i915_active *active)
struct intel_context *ce = container_of(active, typeof(*ce), active);
int err;
+ CE_TRACE(ce, "active\n");
+
intel_context_get(ce);
- err = intel_ring_pin(ce->ring);
+ err = __ring_active(ce->ring);
if (err)
goto err_put;
@@ -174,40 +260,12 @@ static int __intel_context_active(struct i915_active *active)
err_timeline:
intel_timeline_unpin(ce->timeline);
err_ring:
- intel_ring_unpin(ce->ring);
+ __ring_retire(ce->ring);
err_put:
intel_context_put(ce);
return err;
}
-int intel_context_active_acquire(struct intel_context *ce)
-{
- int err;
-
- err = i915_active_acquire(&ce->active);
- if (err)
- return err;
-
- /* Preallocate tracking nodes */
- if (!intel_context_is_barrier(ce)) {
- err = i915_active_acquire_preallocate_barrier(&ce->active,
- ce->engine);
- if (err) {
- i915_active_release(&ce->active);
- return err;
- }
- }
-
- return 0;
-}
-
-void intel_context_active_release(struct intel_context *ce)
-{
- /* Nodes preallocated in intel_context_active() */
- i915_active_acquire_barrier(&ce->active);
- i915_active_release(&ce->active);
-}
-
void
intel_context_init(struct intel_context *ce,
struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index 1d4a1b1357cf..30bd248827d8 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -19,7 +19,7 @@
#define CE_TRACE(ce, fmt, ...) do { \
const struct intel_context *ce__ = (ce); \
- ENGINE_TRACE(ce__->engine, "context:%llx" fmt, \
+ ENGINE_TRACE(ce__->engine, "context:%llx " fmt, \
ce__->timeline->fence_context, \
##__VA_ARGS__); \
} while (0)
@@ -31,6 +31,8 @@ void intel_context_fini(struct intel_context *ce);
struct intel_context *
intel_context_create(struct intel_engine_cs *engine);
+int intel_context_alloc_state(struct intel_context *ce);
+
void intel_context_free(struct intel_context *ce);
/**
@@ -76,9 +78,14 @@ static inline void intel_context_unlock_pinned(struct intel_context *ce)
int __intel_context_do_pin(struct intel_context *ce);
+static inline bool intel_context_pin_if_active(struct intel_context *ce)
+{
+ return atomic_inc_not_zero(&ce->pin_count);
+}
+
static inline int intel_context_pin(struct intel_context *ce)
{
- if (likely(atomic_inc_not_zero(&ce->pin_count)))
+ if (likely(intel_context_pin_if_active(ce)))
return 0;
return __intel_context_do_pin(ce);
@@ -116,9 +123,6 @@ static inline void intel_context_exit(struct intel_context *ce)
ce->ops->exit(ce);
}
-int intel_context_active_acquire(struct intel_context *ce);
-void intel_context_active_release(struct intel_context *ce);
-
static inline struct intel_context *intel_context_get(struct intel_context *ce)
{
kref_get(&ce->ref);
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 9527a659546c..ca1420fb8b53 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -17,6 +17,8 @@
#include "intel_engine_types.h"
#include "intel_sseu.h"
+#define CONTEXT_REDZONE POISON_INUSE
+
struct i915_gem_context;
struct i915_vma;
struct intel_context;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 79ecac5ac0ab..5df003061e44 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -202,7 +202,7 @@ void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask);
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
-void intel_engine_get_instdone(struct intel_engine_cs *engine,
+void intel_engine_get_instdone(const struct intel_engine_cs *engine,
struct intel_instdone *instdone);
void intel_engine_init_execlists(struct intel_engine_cs *engine);
@@ -282,7 +282,7 @@ static inline void __intel_engine_reset(struct intel_engine_cs *engine,
bool intel_engines_are_idle(struct intel_gt *gt);
bool intel_engine_is_idle(struct intel_engine_cs *engine);
-bool intel_engine_flush_submission(struct intel_engine_cs *engine);
+void intel_engine_flush_submission(struct intel_engine_cs *engine);
void intel_engines_reset_default_submission(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index ddf9543b1261..06ff7695fa29 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -671,6 +671,7 @@ void
intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
{
INIT_LIST_HEAD(&engine->active.requests);
+ INIT_LIST_HEAD(&engine->active.hold);
spin_lock_init(&engine->active.lock);
lockdep_set_subclass(&engine->active.lock, subclass);
@@ -914,8 +915,8 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
}
static u32
-read_subslice_reg(struct intel_engine_cs *engine, int slice, int subslice,
- i915_reg_t reg)
+read_subslice_reg(const struct intel_engine_cs *engine,
+ int slice, int subslice, i915_reg_t reg)
{
struct drm_i915_private *i915 = engine->i915;
struct intel_uncore *uncore = engine->uncore;
@@ -959,7 +960,7 @@ read_subslice_reg(struct intel_engine_cs *engine, int slice, int subslice,
}
/* NB: please notice the memset */
-void intel_engine_get_instdone(struct intel_engine_cs *engine,
+void intel_engine_get_instdone(const struct intel_engine_cs *engine,
struct intel_instdone *instdone)
{
struct drm_i915_private *i915 = engine->i915;
@@ -1047,10 +1048,9 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
return idle;
}
-bool intel_engine_flush_submission(struct intel_engine_cs *engine)
+void intel_engine_flush_submission(struct intel_engine_cs *engine)
{
struct tasklet_struct *t = &engine->execlists.tasklet;
- bool active = tasklet_is_locked(t);
if (__tasklet_is_scheduled(t)) {
local_bh_disable();
@@ -1061,13 +1061,10 @@ bool intel_engine_flush_submission(struct intel_engine_cs *engine)
tasklet_unlock(t);
}
local_bh_enable();
- active = true;
}
/* Otherwise flush the tasklet if it was running on another cpu */
tasklet_unlock_wait(t);
-
- return active;
}
/**
@@ -1426,6 +1423,17 @@ static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
}
}
+static unsigned long list_count(struct list_head *list)
+{
+ struct list_head *pos;
+ unsigned long count = 0;
+
+ list_for_each(pos, list)
+ count++;
+
+ return count;
+}
+
void intel_engine_dump(struct intel_engine_cs *engine,
struct drm_printer *m,
const char *header, ...)
@@ -1495,6 +1503,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE);
}
}
+ drm_printf(m, "\tOn hold?: %lu\n", list_count(&engine->active.hold));
spin_unlock_irqrestore(&engine->active.lock, flags);
drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index 742628e40201..6c6fd185457c 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -199,7 +199,7 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
goto out_unlock;
}
- rq->flags |= I915_REQUEST_SENTINEL;
+ __set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
idle_pulse(engine, rq);
__i915_request_commit(rq);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 010620b78202..ea90ab3e396e 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -20,6 +20,7 @@ static int __engine_unpark(struct intel_wakeref *wf)
{
struct intel_engine_cs *engine =
container_of(wf, typeof(*engine), wakeref);
+ struct intel_context *ce;
void *map;
ENGINE_TRACE(engine, "\n");
@@ -34,6 +35,27 @@ static int __engine_unpark(struct intel_wakeref *wf)
if (!IS_ERR_OR_NULL(map))
engine->pinned_default_state = map;
+ /* Discard stale context state from across idling */
+ ce = engine->kernel_context;
+ if (ce) {
+ GEM_BUG_ON(test_bit(CONTEXT_VALID_BIT, &ce->flags));
+
+ /* First poison the image to verify we never fully trust it */
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && ce->state) {
+ struct drm_i915_gem_object *obj = ce->state->obj;
+ int type = i915_coherent_map_type(engine->i915);
+
+ map = i915_gem_object_pin_map(obj, type);
+ if (!IS_ERR(map)) {
+ memset(map, CONTEXT_REDZONE, obj->base.size);
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+ }
+ }
+
+ ce->ops->reset(ce);
+ }
+
if (engine->unpark)
engine->unpark(engine);
@@ -123,16 +145,16 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
unsigned long flags;
bool result = true;
+ /* GPU is pointing to the void, as good as in the kernel context. */
+ if (intel_gt_is_wedged(engine->gt))
+ return true;
+
GEM_BUG_ON(!intel_context_is_barrier(ce));
/* Already inside the kernel context, safe to power down. */
if (engine->wakeref_serial == engine->serial)
return true;
- /* GPU is pointing to the void, as good as in the kernel context. */
- if (intel_gt_is_wedged(engine->gt))
- return true;
-
/*
* Note, we do this without taking the timeline->mutex. We cannot
* as we may be called while retiring the kernel context and so
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 00287515e7af..92be41a6903c 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -278,8 +278,8 @@ struct intel_engine_cs {
u8 class;
u8 instance;
- u8 uabi_class;
- u8 uabi_instance;
+ u16 uabi_class;
+ u16 uabi_instance;
u32 uabi_capabilities;
u32 context_size;
@@ -295,6 +295,7 @@ struct intel_engine_cs {
struct {
spinlock_t lock;
struct list_head requests;
+ struct list_head hold; /* ready requests, but on hold */
} active;
struct llist_head barrier_tasks;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c
index 7f7150a733f4..9e7f12bef828 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_user.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c
@@ -11,6 +11,7 @@
#include "i915_drv.h"
#include "intel_engine.h"
#include "intel_engine_user.h"
+#include "intel_gt.h"
struct intel_engine_cs *
intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
@@ -200,6 +201,9 @@ void intel_engines_driver_register(struct drm_i915_private *i915)
uabi_node);
char old[sizeof(engine->name)];
+ if (intel_gt_has_init_error(engine->gt))
+ continue; /* ignore incomplete engines */
+
GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
engine->uabi_class = uabi_classes[engine->class];
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
new file mode 100644
index 000000000000..531d501be01f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -0,0 +1,1486 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/stop_machine.h>
+
+#include <asm/set_memory.h>
+#include <asm/smp.h>
+
+#include "intel_gt.h"
+#include "i915_drv.h"
+#include "i915_scatterlist.h"
+#include "i915_vgpu.h"
+
+#include "intel_gtt.h"
+
+static int
+i915_get_ggtt_vma_pages(struct i915_vma *vma);
+
+static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
+ unsigned long color,
+ u64 *start,
+ u64 *end)
+{
+ if (i915_node_color_differs(node, color))
+ *start += I915_GTT_PAGE_SIZE;
+
+ /*
+ * Also leave a space between the unallocated reserved node after the
+ * GTT and any objects within the GTT, i.e. we use the color adjustment
+ * to insert a guard page to prevent prefetches crossing over the
+ * GTT boundary.
+ */
+ node = list_next_entry(node, node_list);
+ if (node->color != color)
+ *end -= I915_GTT_PAGE_SIZE;
+}
+
+static int ggtt_init_hw(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+
+ i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
+
+ ggtt->vm.is_ggtt = true;
+
+ /* Only VLV supports read-only GGTT mappings */
+ ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
+
+ if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
+ ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust;
+
+ if (ggtt->mappable_end) {
+ if (!io_mapping_init_wc(&ggtt->iomap,
+ ggtt->gmadr.start,
+ ggtt->mappable_end)) {
+ ggtt->vm.cleanup(&ggtt->vm);
+ return -EIO;
+ }
+
+ ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start,
+ ggtt->mappable_end);
+ }
+
+ i915_ggtt_init_fences(ggtt);
+
+ return 0;
+}
+
+/**
+ * i915_ggtt_init_hw - Initialize GGTT hardware
+ * @i915: i915 device
+ */
+int i915_ggtt_init_hw(struct drm_i915_private *i915)
+{
+ int ret;
+
+ stash_init(&i915->mm.wc_stash);
+
+ /*
+ * Note that we use page colouring to enforce a guard page at the
+ * end of the address space. This is required as the CS may prefetch
+ * beyond the end of the batch buffer, across the page boundary,
+ * and beyond the end of the GTT if we do not provide a guard.
+ */
+ ret = ggtt_init_hw(&i915->ggtt);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * Certain Gen5 chipsets require require idling the GPU before
+ * unmapping anything from the GTT when VT-d is enabled.
+ */
+static bool needs_idle_maps(struct drm_i915_private *i915)
+{
+ /*
+ * Query intel_iommu to see if we need the workaround. Presumably that
+ * was loaded first.
+ */
+ return IS_GEN(i915, 5) && IS_MOBILE(i915) && intel_vtd_active();
+}
+
+static void ggtt_suspend_mappings(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+
+ /*
+ * Don't bother messing with faults pre GEN6 as we have little
+ * documentation supporting that it's a good idea.
+ */
+ if (INTEL_GEN(i915) < 6)
+ return;
+
+ intel_gt_check_and_clear_faults(ggtt->vm.gt);
+
+ ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
+
+ ggtt->invalidate(ggtt);
+}
+
+void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915)
+{
+ ggtt_suspend_mappings(&i915->ggtt);
+}
+
+void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
+{
+ struct intel_uncore *uncore = ggtt->vm.gt->uncore;
+
+ spin_lock_irq(&uncore->lock);
+ intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+ intel_uncore_read_fw(uncore, GFX_FLSH_CNTL_GEN6);
+ spin_unlock_irq(&uncore->lock);
+}
+
+static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
+{
+ struct intel_uncore *uncore = ggtt->vm.gt->uncore;
+
+ /*
+ * Note that as an uncached mmio write, this will flush the
+ * WCB of the writes into the GGTT before it triggers the invalidate.
+ */
+ intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+}
+
+static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
+{
+ struct intel_uncore *uncore = ggtt->vm.gt->uncore;
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+
+ gen8_ggtt_invalidate(ggtt);
+
+ if (INTEL_GEN(i915) >= 12)
+ intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR,
+ GEN12_GUC_TLB_INV_CR_INVALIDATE);
+ else
+ intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
+}
+
+static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
+{
+ intel_gtt_chipset_flush();
+}
+
+static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
+{
+ writeq(pte, addr);
+}
+
+static void gen8_ggtt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 unused)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen8_pte_t __iomem *pte =
+ (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
+
+ gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
+
+ ggtt->invalidate(ggtt);
+}
+
+static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
+ struct i915_vma *vma,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ struct sgt_iter sgt_iter;
+ gen8_pte_t __iomem *gtt_entries;
+ const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
+ dma_addr_t addr;
+
+ /*
+ * Note that we ignore PTE_READ_ONLY here. The caller must be careful
+ * not to allow the user to override access to a read only page.
+ */
+
+ gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
+ gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE;
+ for_each_sgt_daddr(addr, sgt_iter, vma->pages)
+ gen8_set_pte(gtt_entries++, pte_encode | addr);
+
+ /*
+ * We want to flush the TLBs only after we're certain all the PTE
+ * updates have finished.
+ */
+ ggtt->invalidate(ggtt);
+}
+
+static void gen6_ggtt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen6_pte_t __iomem *pte =
+ (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
+
+ iowrite32(vm->pte_encode(addr, level, flags), pte);
+
+ ggtt->invalidate(ggtt);
+}
+
+/*
+ * Binds an object into the global gtt with the specified cache level.
+ * The object will be accessible to the GPU via commands whose operands
+ * reference offsets within the global GTT as well as accessible by the GPU
+ * through the GMADR mapped BAR (i915->mm.gtt->gtt).
+ */
+static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
+ struct i915_vma *vma,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
+ unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE;
+ struct sgt_iter iter;
+ dma_addr_t addr;
+
+ for_each_sgt_daddr(addr, iter, vma->pages)
+ iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
+
+ /*
+ * We want to flush the TLBs only after we're certain all the PTE
+ * updates have finished.
+ */
+ ggtt->invalidate(ggtt);
+}
+
+static void nop_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+}
+
+static void gen8_ggtt_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
+ unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
+ const gen8_pte_t scratch_pte = vm->scratch[0].encode;
+ gen8_pte_t __iomem *gtt_base =
+ (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
+ const int max_entries = ggtt_total_entries(ggtt) - first_entry;
+ int i;
+
+ if (WARN(num_entries > max_entries,
+ "First entry = %d; Num entries = %d (max=%d)\n",
+ first_entry, num_entries, max_entries))
+ num_entries = max_entries;
+
+ for (i = 0; i < num_entries; i++)
+ gen8_set_pte(&gtt_base[i], scratch_pte);
+}
+
+static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
+{
+ /*
+ * Make sure the internal GAM fifo has been cleared of all GTT
+ * writes before exiting stop_machine(). This guarantees that
+ * any aperture accesses waiting to start in another process
+ * cannot back up behind the GTT writes causing a hang.
+ * The register can be any arbitrary GAM register.
+ */
+ intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6);
+}
+
+struct insert_page {
+ struct i915_address_space *vm;
+ dma_addr_t addr;
+ u64 offset;
+ enum i915_cache_level level;
+};
+
+static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
+{
+ struct insert_page *arg = _arg;
+
+ gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
+ bxt_vtd_ggtt_wa(arg->vm);
+
+ return 0;
+}
+
+static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 unused)
+{
+ struct insert_page arg = { vm, addr, offset, level };
+
+ stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
+}
+
+struct insert_entries {
+ struct i915_address_space *vm;
+ struct i915_vma *vma;
+ enum i915_cache_level level;
+ u32 flags;
+};
+
+static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
+{
+ struct insert_entries *arg = _arg;
+
+ gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
+ bxt_vtd_ggtt_wa(arg->vm);
+
+ return 0;
+}
+
+static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
+ struct i915_vma *vma,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct insert_entries arg = { vm, vma, level, flags };
+
+ stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
+}
+
+struct clear_range {
+ struct i915_address_space *vm;
+ u64 start;
+ u64 length;
+};
+
+static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
+{
+ struct clear_range *arg = _arg;
+
+ gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
+ bxt_vtd_ggtt_wa(arg->vm);
+
+ return 0;
+}
+
+static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
+ u64 start,
+ u64 length)
+{
+ struct clear_range arg = { vm, start, length };
+
+ stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
+}
+
+static void gen6_ggtt_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
+ unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
+ gen6_pte_t scratch_pte, __iomem *gtt_base =
+ (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
+ const int max_entries = ggtt_total_entries(ggtt) - first_entry;
+ int i;
+
+ if (WARN(num_entries > max_entries,
+ "First entry = %d; Num entries = %d (max=%d)\n",
+ first_entry, num_entries, max_entries))
+ num_entries = max_entries;
+
+ scratch_pte = vm->scratch[0].encode;
+ for (i = 0; i < num_entries; i++)
+ iowrite32(scratch_pte, &gtt_base[i]);
+}
+
+static void i915_ggtt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level cache_level,
+ u32 unused)
+{
+ unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+ AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+
+ intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
+}
+
+static void i915_ggtt_insert_entries(struct i915_address_space *vm,
+ struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 unused)
+{
+ unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+ AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+
+ intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
+ flags);
+}
+
+static void i915_ggtt_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
+}
+
+static int ggtt_bind_vma(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+ u32 pte_flags;
+
+ /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
+ pte_flags = 0;
+ if (i915_gem_object_is_readonly(obj))
+ pte_flags |= PTE_READ_ONLY;
+
+ vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
+
+ vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
+
+ /*
+ * Without aliasing PPGTT there's no difference between
+ * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
+ * upgrade to both bound if we bind either to avoid double-binding.
+ */
+ atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
+
+ return 0;
+}
+
+static void ggtt_unbind_vma(struct i915_vma *vma)
+{
+ vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
+}
+
+static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
+{
+ u64 size;
+ int ret;
+
+ if (!USES_GUC(ggtt->vm.i915))
+ return 0;
+
+ GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
+ size = ggtt->vm.total - GUC_GGTT_TOP;
+
+ ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size,
+ GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
+ PIN_NOEVICT);
+ if (ret)
+ DRM_DEBUG_DRIVER("Failed to reserve top of GGTT for GuC\n");
+
+ return ret;
+}
+
+static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
+{
+ if (drm_mm_node_allocated(&ggtt->uc_fw))
+ drm_mm_remove_node(&ggtt->uc_fw);
+}
+
+static void cleanup_init_ggtt(struct i915_ggtt *ggtt)
+{
+ ggtt_release_guc_top(ggtt);
+ if (drm_mm_node_allocated(&ggtt->error_capture))
+ drm_mm_remove_node(&ggtt->error_capture);
+ mutex_destroy(&ggtt->error_mutex);
+}
+
+static int init_ggtt(struct i915_ggtt *ggtt)
+{
+ /*
+ * Let GEM Manage all of the aperture.
+ *
+ * However, leave one page at the end still bound to the scratch page.
+ * There are a number of places where the hardware apparently prefetches
+ * past the end of the object, and we've seen multiple hangs with the
+ * GPU head pointer stuck in a batchbuffer bound at the last page of the
+ * aperture. One page should be enough to keep any prefetching inside
+ * of the aperture.
+ */
+ unsigned long hole_start, hole_end;
+ struct drm_mm_node *entry;
+ int ret;
+
+ /*
+ * GuC requires all resources that we're sharing with it to be placed in
+ * non-WOPCM memory. If GuC is not present or not in use we still need a
+ * small bias as ring wraparound at offset 0 sometimes hangs. No idea
+ * why.
+ */
+ ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
+ intel_wopcm_guc_size(&ggtt->vm.i915->wopcm));
+
+ ret = intel_vgt_balloon(ggtt);
+ if (ret)
+ return ret;
+
+ mutex_init(&ggtt->error_mutex);
+ if (ggtt->mappable_end) {
+ /* Reserve a mappable slot for our lockless error capture */
+ ret = drm_mm_insert_node_in_range(&ggtt->vm.mm,
+ &ggtt->error_capture,
+ PAGE_SIZE, 0,
+ I915_COLOR_UNEVICTABLE,
+ 0, ggtt->mappable_end,
+ DRM_MM_INSERT_LOW);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * The upper portion of the GuC address space has a sizeable hole
+ * (several MB) that is inaccessible by GuC. Reserve this range within
+ * GGTT as it can comfortably hold GuC/HuC firmware images.
+ */
+ ret = ggtt_reserve_guc_top(ggtt);
+ if (ret)
+ goto err;
+
+ /* Clear any non-preallocated blocks */
+ drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
+ DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
+ hole_start, hole_end);
+ ggtt->vm.clear_range(&ggtt->vm, hole_start,
+ hole_end - hole_start);
+ }
+
+ /* And finally clear the reserved guard page */
+ ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
+
+ return 0;
+
+err:
+ cleanup_init_ggtt(ggtt);
+ return ret;
+}
+
+static int aliasing_gtt_bind_vma(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ u32 pte_flags;
+ int ret;
+
+ /* Currently applicable only to VLV */
+ pte_flags = 0;
+ if (i915_gem_object_is_readonly(vma->obj))
+ pte_flags |= PTE_READ_ONLY;
+
+ if (flags & I915_VMA_LOCAL_BIND) {
+ struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias;
+
+ if (flags & I915_VMA_ALLOC) {
+ ret = alias->vm.allocate_va_range(&alias->vm,
+ vma->node.start,
+ vma->size);
+ if (ret)
+ return ret;
+
+ set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
+ }
+
+ GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT,
+ __i915_vma_flags(vma)));
+ alias->vm.insert_entries(&alias->vm, vma,
+ cache_level, pte_flags);
+ }
+
+ if (flags & I915_VMA_GLOBAL_BIND)
+ vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
+
+ return 0;
+}
+
+static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
+{
+ if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
+ struct i915_address_space *vm = vma->vm;
+
+ vm->clear_range(vm, vma->node.start, vma->size);
+ }
+
+ if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
+ struct i915_address_space *vm =
+ &i915_vm_to_ggtt(vma->vm)->alias->vm;
+
+ vm->clear_range(vm, vma->node.start, vma->size);
+ }
+}
+
+static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
+{
+ struct i915_ppgtt *ppgtt;
+ int err;
+
+ ppgtt = i915_ppgtt_create(ggtt->vm.gt);
+ if (IS_ERR(ppgtt))
+ return PTR_ERR(ppgtt);
+
+ if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
+ err = -ENODEV;
+ goto err_ppgtt;
+ }
+
+ /*
+ * Note we only pre-allocate as far as the end of the global
+ * GTT. On 48b / 4-level page-tables, the difference is very,
+ * very significant! We have to preallocate as GVT/vgpu does
+ * not like the page directory disappearing.
+ */
+ err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total);
+ if (err)
+ goto err_ppgtt;
+
+ ggtt->alias = ppgtt;
+ ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags;
+
+ GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
+ ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
+
+ GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
+ ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
+
+ return 0;
+
+err_ppgtt:
+ i915_vm_put(&ppgtt->vm);
+ return err;
+}
+
+static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt)
+{
+ struct i915_ppgtt *ppgtt;
+
+ ppgtt = fetch_and_zero(&ggtt->alias);
+ if (!ppgtt)
+ return;
+
+ i915_vm_put(&ppgtt->vm);
+
+ ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
+}
+
+int i915_init_ggtt(struct drm_i915_private *i915)
+{
+ int ret;
+
+ ret = init_ggtt(&i915->ggtt);
+ if (ret)
+ return ret;
+
+ if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) {
+ ret = init_aliasing_ppgtt(&i915->ggtt);
+ if (ret)
+ cleanup_init_ggtt(&i915->ggtt);
+ }
+
+ return 0;
+}
+
+static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
+{
+ struct i915_vma *vma, *vn;
+
+ atomic_set(&ggtt->vm.open, 0);
+
+ rcu_barrier(); /* flush the RCU'ed__i915_vm_release */
+ flush_workqueue(ggtt->vm.i915->wq);
+
+ mutex_lock(&ggtt->vm.mutex);
+
+ list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
+ WARN_ON(__i915_vma_unbind(vma));
+
+ if (drm_mm_node_allocated(&ggtt->error_capture))
+ drm_mm_remove_node(&ggtt->error_capture);
+ mutex_destroy(&ggtt->error_mutex);
+
+ ggtt_release_guc_top(ggtt);
+ intel_vgt_deballoon(ggtt);
+
+ ggtt->vm.cleanup(&ggtt->vm);
+
+ mutex_unlock(&ggtt->vm.mutex);
+ i915_address_space_fini(&ggtt->vm);
+
+ arch_phys_wc_del(ggtt->mtrr);
+
+ if (ggtt->iomap.size)
+ io_mapping_fini(&ggtt->iomap);
+}
+
+/**
+ * i915_ggtt_driver_release - Clean up GGTT hardware initialization
+ * @i915: i915 device
+ */
+void i915_ggtt_driver_release(struct drm_i915_private *i915)
+{
+ struct pagevec *pvec;
+
+ fini_aliasing_ppgtt(&i915->ggtt);
+
+ ggtt_cleanup_hw(&i915->ggtt);
+
+ pvec = &i915->mm.wc_stash.pvec;
+ if (pvec->nr) {
+ set_pages_array_wb(pvec->pages, pvec->nr);
+ __pagevec_release(pvec);
+ }
+}
+
+static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
+{
+ snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
+ snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
+ return snb_gmch_ctl << 20;
+}
+
+static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
+{
+ bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
+ bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
+ if (bdw_gmch_ctl)
+ bdw_gmch_ctl = 1 << bdw_gmch_ctl;
+
+#ifdef CONFIG_X86_32
+ /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
+ if (bdw_gmch_ctl > 4)
+ bdw_gmch_ctl = 4;
+#endif
+
+ return bdw_gmch_ctl << 20;
+}
+
+static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
+{
+ gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
+ gmch_ctrl &= SNB_GMCH_GGMS_MASK;
+
+ if (gmch_ctrl)
+ return 1 << (20 + gmch_ctrl);
+
+ return 0;
+}
+
+static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ struct pci_dev *pdev = i915->drm.pdev;
+ phys_addr_t phys_addr;
+ int ret;
+
+ /* For Modern GENs the PTEs and register space are split in the BAR */
+ phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
+
+ /*
+ * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range
+ * will be dropped. For WC mappings in general we have 64 byte burst
+ * writes when the WC buffer is flushed, so we can't use it, but have to
+ * resort to an uncached mapping. The WC issue is easily caught by the
+ * readback check when writing GTT PTE entries.
+ */
+ if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10)
+ ggtt->gsm = ioremap(phys_addr, size);
+ else
+ ggtt->gsm = ioremap_wc(phys_addr, size);
+ if (!ggtt->gsm) {
+ DRM_ERROR("Failed to map the ggtt page table\n");
+ return -ENOMEM;
+ }
+
+ ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
+ if (ret) {
+ DRM_ERROR("Scratch setup failed\n");
+ /* iounmap will also get called at remove, but meh */
+ iounmap(ggtt->gsm);
+ return ret;
+ }
+
+ ggtt->vm.scratch[0].encode =
+ ggtt->vm.pte_encode(px_dma(&ggtt->vm.scratch[0]),
+ I915_CACHE_NONE, 0);
+
+ return 0;
+}
+
+int ggtt_set_pages(struct i915_vma *vma)
+{
+ int ret;
+
+ GEM_BUG_ON(vma->pages);
+
+ ret = i915_get_ggtt_vma_pages(vma);
+ if (ret)
+ return ret;
+
+ vma->page_sizes = vma->obj->mm.page_sizes;
+
+ return 0;
+}
+
+static void gen6_gmch_remove(struct i915_address_space *vm)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+
+ iounmap(ggtt->gsm);
+ cleanup_scratch_page(vm);
+}
+
+static struct resource pci_resource(struct pci_dev *pdev, int bar)
+{
+ return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar),
+ pci_resource_len(pdev, bar));
+}
+
+static int gen8_gmch_probe(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ struct pci_dev *pdev = i915->drm.pdev;
+ unsigned int size;
+ u16 snb_gmch_ctl;
+ int err;
+
+ /* TODO: We're not aware of mappable constraints on gen8 yet */
+ if (!IS_DGFX(i915)) {
+ ggtt->gmadr = pci_resource(pdev, 2);
+ ggtt->mappable_end = resource_size(&ggtt->gmadr);
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
+ if (err)
+ DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
+
+ pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+ if (IS_CHERRYVIEW(i915))
+ size = chv_get_total_gtt_size(snb_gmch_ctl);
+ else
+ size = gen8_get_total_gtt_size(snb_gmch_ctl);
+
+ ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
+ ggtt->vm.cleanup = gen6_gmch_remove;
+ ggtt->vm.insert_page = gen8_ggtt_insert_page;
+ ggtt->vm.clear_range = nop_clear_range;
+ if (intel_scanout_needs_vtd_wa(i915))
+ ggtt->vm.clear_range = gen8_ggtt_clear_range;
+
+ ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
+
+ /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
+ if (intel_ggtt_update_needs_vtd_wa(i915) ||
+ IS_CHERRYVIEW(i915) /* fails with concurrent use/update */) {
+ ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
+ ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
+ if (ggtt->vm.clear_range != nop_clear_range)
+ ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
+ }
+
+ ggtt->invalidate = gen8_ggtt_invalidate;
+
+ ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
+ ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
+ ggtt->vm.vma_ops.clear_pages = clear_pages;
+
+ ggtt->vm.pte_encode = gen8_pte_encode;
+
+ setup_private_pat(ggtt->vm.gt->uncore);
+
+ return ggtt_probe_common(ggtt, size);
+}
+
+static u64 snb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ switch (level) {
+ case I915_CACHE_L3_LLC:
+ case I915_CACHE_LLC:
+ pte |= GEN6_PTE_CACHE_LLC;
+ break;
+ case I915_CACHE_NONE:
+ pte |= GEN6_PTE_UNCACHED;
+ break;
+ default:
+ MISSING_CASE(level);
+ }
+
+ return pte;
+}
+
+static u64 ivb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ switch (level) {
+ case I915_CACHE_L3_LLC:
+ pte |= GEN7_PTE_CACHE_L3_LLC;
+ break;
+ case I915_CACHE_LLC:
+ pte |= GEN6_PTE_CACHE_LLC;
+ break;
+ case I915_CACHE_NONE:
+ pte |= GEN6_PTE_UNCACHED;
+ break;
+ default:
+ MISSING_CASE(level);
+ }
+
+ return pte;
+}
+
+static u64 byt_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ if (!(flags & PTE_READ_ONLY))
+ pte |= BYT_PTE_WRITEABLE;
+
+ if (level != I915_CACHE_NONE)
+ pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
+
+ return pte;
+}
+
+static u64 hsw_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ if (level != I915_CACHE_NONE)
+ pte |= HSW_WB_LLC_AGE3;
+
+ return pte;
+}
+
+static u64 iris_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ switch (level) {
+ case I915_CACHE_NONE:
+ break;
+ case I915_CACHE_WT:
+ pte |= HSW_WT_ELLC_LLC_AGE3;
+ break;
+ default:
+ pte |= HSW_WB_ELLC_LLC_AGE3;
+ break;
+ }
+
+ return pte;
+}
+
+static int gen6_gmch_probe(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ struct pci_dev *pdev = i915->drm.pdev;
+ unsigned int size;
+ u16 snb_gmch_ctl;
+ int err;
+
+ ggtt->gmadr = pci_resource(pdev, 2);
+ ggtt->mappable_end = resource_size(&ggtt->gmadr);
+
+ /*
+ * 64/512MB is the current min/max we actually know of, but this is
+ * just a coarse sanity check.
+ */
+ if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
+ DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end);
+ return -ENXIO;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
+ if (err)
+ DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
+ pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+
+ size = gen6_get_total_gtt_size(snb_gmch_ctl);
+ ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
+
+ ggtt->vm.clear_range = nop_clear_range;
+ if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915))
+ ggtt->vm.clear_range = gen6_ggtt_clear_range;
+ ggtt->vm.insert_page = gen6_ggtt_insert_page;
+ ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
+ ggtt->vm.cleanup = gen6_gmch_remove;
+
+ ggtt->invalidate = gen6_ggtt_invalidate;
+
+ if (HAS_EDRAM(i915))
+ ggtt->vm.pte_encode = iris_pte_encode;
+ else if (IS_HASWELL(i915))
+ ggtt->vm.pte_encode = hsw_pte_encode;
+ else if (IS_VALLEYVIEW(i915))
+ ggtt->vm.pte_encode = byt_pte_encode;
+ else if (INTEL_GEN(i915) >= 7)
+ ggtt->vm.pte_encode = ivb_pte_encode;
+ else
+ ggtt->vm.pte_encode = snb_pte_encode;
+
+ ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
+ ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
+ ggtt->vm.vma_ops.clear_pages = clear_pages;
+
+ return ggtt_probe_common(ggtt, size);
+}
+
+static void i915_gmch_remove(struct i915_address_space *vm)
+{
+ intel_gmch_remove();
+}
+
+static int i915_gmch_probe(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ phys_addr_t gmadr_base;
+ int ret;
+
+ ret = intel_gmch_probe(i915->bridge_dev, i915->drm.pdev, NULL);
+ if (!ret) {
+ DRM_ERROR("failed to set up gmch\n");
+ return -EIO;
+ }
+
+ intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
+
+ ggtt->gmadr =
+ (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
+
+ ggtt->do_idle_maps = needs_idle_maps(i915);
+ ggtt->vm.insert_page = i915_ggtt_insert_page;
+ ggtt->vm.insert_entries = i915_ggtt_insert_entries;
+ ggtt->vm.clear_range = i915_ggtt_clear_range;
+ ggtt->vm.cleanup = i915_gmch_remove;
+
+ ggtt->invalidate = gmch_ggtt_invalidate;
+
+ ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
+ ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
+ ggtt->vm.vma_ops.clear_pages = clear_pages;
+
+ if (unlikely(ggtt->do_idle_maps))
+ dev_notice(i915->drm.dev,
+ "Applying Ironlake quirks for intel_iommu\n");
+
+ return 0;
+}
+
+static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ int ret;
+
+ ggtt->vm.gt = gt;
+ ggtt->vm.i915 = i915;
+ ggtt->vm.dma = &i915->drm.pdev->dev;
+
+ if (INTEL_GEN(i915) <= 5)
+ ret = i915_gmch_probe(ggtt);
+ else if (INTEL_GEN(i915) < 8)
+ ret = gen6_gmch_probe(ggtt);
+ else
+ ret = gen8_gmch_probe(ggtt);
+ if (ret)
+ return ret;
+
+ if ((ggtt->vm.total - 1) >> 32) {
+ DRM_ERROR("We never expected a Global GTT with more than 32bits"
+ " of address space! Found %lldM!\n",
+ ggtt->vm.total >> 20);
+ ggtt->vm.total = 1ULL << 32;
+ ggtt->mappable_end =
+ min_t(u64, ggtt->mappable_end, ggtt->vm.total);
+ }
+
+ if (ggtt->mappable_end > ggtt->vm.total) {
+ DRM_ERROR("mappable aperture extends past end of GGTT,"
+ " aperture=%pa, total=%llx\n",
+ &ggtt->mappable_end, ggtt->vm.total);
+ ggtt->mappable_end = ggtt->vm.total;
+ }
+
+ /* GMADR is the PCI mmio aperture into the global GTT. */
+ DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20);
+ DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
+ DRM_DEBUG_DRIVER("DSM size = %lluM\n",
+ (u64)resource_size(&intel_graphics_stolen_res) >> 20);
+
+ return 0;
+}
+
+/**
+ * i915_ggtt_probe_hw - Probe GGTT hardware location
+ * @i915: i915 device
+ */
+int i915_ggtt_probe_hw(struct drm_i915_private *i915)
+{
+ int ret;
+
+ ret = ggtt_probe_hw(&i915->ggtt, &i915->gt);
+ if (ret)
+ return ret;
+
+ if (intel_vtd_active())
+ dev_info(i915->drm.dev, "VT-d active for gfx access\n");
+
+ return 0;
+}
+
+int i915_ggtt_enable_hw(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) < 6 && !intel_enable_gtt())
+ return -EIO;
+
+ return 0;
+}
+
+void i915_ggtt_enable_guc(struct i915_ggtt *ggtt)
+{
+ GEM_BUG_ON(ggtt->invalidate != gen8_ggtt_invalidate);
+
+ ggtt->invalidate = guc_ggtt_invalidate;
+
+ ggtt->invalidate(ggtt);
+}
+
+void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
+{
+ /* XXX Temporary pardon for error unload */
+ if (ggtt->invalidate == gen8_ggtt_invalidate)
+ return;
+
+ /* We should only be called after i915_ggtt_enable_guc() */
+ GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate);
+
+ ggtt->invalidate = gen8_ggtt_invalidate;
+
+ ggtt->invalidate(ggtt);
+}
+
+static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
+{
+ struct i915_vma *vma;
+ bool flush = false;
+ int open;
+
+ intel_gt_check_and_clear_faults(ggtt->vm.gt);
+
+ mutex_lock(&ggtt->vm.mutex);
+
+ /* First fill our portion of the GTT with scratch pages */
+ ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
+
+ /* Skip rewriting PTE on VMA unbind. */
+ open = atomic_xchg(&ggtt->vm.open, 0);
+
+ /* clflush objects bound into the GGTT and rebind them. */
+ list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) {
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
+ continue;
+
+ clear_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
+ WARN_ON(i915_vma_bind(vma,
+ obj ? obj->cache_level : 0,
+ PIN_GLOBAL, NULL));
+ if (obj) { /* only used during resume => exclusive access */
+ flush |= fetch_and_zero(&obj->write_domain);
+ obj->read_domains |= I915_GEM_DOMAIN_GTT;
+ }
+ }
+
+ atomic_set(&ggtt->vm.open, open);
+ ggtt->invalidate(ggtt);
+
+ mutex_unlock(&ggtt->vm.mutex);
+
+ if (flush)
+ wbinvd_on_all_cpus();
+}
+
+void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915)
+{
+ struct i915_ggtt *ggtt = &i915->ggtt;
+
+ ggtt_restore_mappings(ggtt);
+
+ if (INTEL_GEN(i915) >= 8)
+ setup_private_pat(ggtt->vm.gt->uncore);
+}
+
+static struct scatterlist *
+rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
+ unsigned int width, unsigned int height,
+ unsigned int stride,
+ struct sg_table *st, struct scatterlist *sg)
+{
+ unsigned int column, row;
+ unsigned int src_idx;
+
+ for (column = 0; column < width; column++) {
+ src_idx = stride * (height - 1) + column + offset;
+ for (row = 0; row < height; row++) {
+ st->nents++;
+ /*
+ * We don't need the pages, but need to initialize
+ * the entries so the sg list can be happily traversed.
+ * The only thing we need are DMA addresses.
+ */
+ sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
+ sg_dma_address(sg) =
+ i915_gem_object_get_dma_address(obj, src_idx);
+ sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
+ sg = sg_next(sg);
+ src_idx -= stride;
+ }
+ }
+
+ return sg;
+}
+
+static noinline struct sg_table *
+intel_rotate_pages(struct intel_rotation_info *rot_info,
+ struct drm_i915_gem_object *obj)
+{
+ unsigned int size = intel_rotation_info_size(rot_info);
+ struct sg_table *st;
+ struct scatterlist *sg;
+ int ret = -ENOMEM;
+ int i;
+
+ /* Allocate target SG list. */
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ goto err_st_alloc;
+
+ ret = sg_alloc_table(st, size, GFP_KERNEL);
+ if (ret)
+ goto err_sg_alloc;
+
+ st->nents = 0;
+ sg = st->sgl;
+
+ for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
+ sg = rotate_pages(obj, rot_info->plane[i].offset,
+ rot_info->plane[i].width, rot_info->plane[i].height,
+ rot_info->plane[i].stride, st, sg);
+ }
+
+ return st;
+
+err_sg_alloc:
+ kfree(st);
+err_st_alloc:
+
+ DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
+ obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
+
+ return ERR_PTR(ret);
+}
+
+static struct scatterlist *
+remap_pages(struct drm_i915_gem_object *obj, unsigned int offset,
+ unsigned int width, unsigned int height,
+ unsigned int stride,
+ struct sg_table *st, struct scatterlist *sg)
+{
+ unsigned int row;
+
+ for (row = 0; row < height; row++) {
+ unsigned int left = width * I915_GTT_PAGE_SIZE;
+
+ while (left) {
+ dma_addr_t addr;
+ unsigned int length;
+
+ /*
+ * We don't need the pages, but need to initialize
+ * the entries so the sg list can be happily traversed.
+ * The only thing we need are DMA addresses.
+ */
+
+ addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
+
+ length = min(left, length);
+
+ st->nents++;
+
+ sg_set_page(sg, NULL, length, 0);
+ sg_dma_address(sg) = addr;
+ sg_dma_len(sg) = length;
+ sg = sg_next(sg);
+
+ offset += length / I915_GTT_PAGE_SIZE;
+ left -= length;
+ }
+
+ offset += stride - width;
+ }
+
+ return sg;
+}
+
+static noinline struct sg_table *
+intel_remap_pages(struct intel_remapped_info *rem_info,
+ struct drm_i915_gem_object *obj)
+{
+ unsigned int size = intel_remapped_info_size(rem_info);
+ struct sg_table *st;
+ struct scatterlist *sg;
+ int ret = -ENOMEM;
+ int i;
+
+ /* Allocate target SG list. */
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ goto err_st_alloc;
+
+ ret = sg_alloc_table(st, size, GFP_KERNEL);
+ if (ret)
+ goto err_sg_alloc;
+
+ st->nents = 0;
+ sg = st->sgl;
+
+ for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
+ sg = remap_pages(obj, rem_info->plane[i].offset,
+ rem_info->plane[i].width, rem_info->plane[i].height,
+ rem_info->plane[i].stride, st, sg);
+ }
+
+ i915_sg_trim(st);
+
+ return st;
+
+err_sg_alloc:
+ kfree(st);
+err_st_alloc:
+
+ DRM_DEBUG_DRIVER("Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
+ obj->base.size, rem_info->plane[0].width, rem_info->plane[0].height, size);
+
+ return ERR_PTR(ret);
+}
+
+static noinline struct sg_table *
+intel_partial_pages(const struct i915_ggtt_view *view,
+ struct drm_i915_gem_object *obj)
+{
+ struct sg_table *st;
+ struct scatterlist *sg, *iter;
+ unsigned int count = view->partial.size;
+ unsigned int offset;
+ int ret = -ENOMEM;
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ goto err_st_alloc;
+
+ ret = sg_alloc_table(st, count, GFP_KERNEL);
+ if (ret)
+ goto err_sg_alloc;
+
+ iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
+ GEM_BUG_ON(!iter);
+
+ sg = st->sgl;
+ st->nents = 0;
+ do {
+ unsigned int len;
+
+ len = min(iter->length - (offset << PAGE_SHIFT),
+ count << PAGE_SHIFT);
+ sg_set_page(sg, NULL, len, 0);
+ sg_dma_address(sg) =
+ sg_dma_address(iter) + (offset << PAGE_SHIFT);
+ sg_dma_len(sg) = len;
+
+ st->nents++;
+ count -= len >> PAGE_SHIFT;
+ if (count == 0) {
+ sg_mark_end(sg);
+ i915_sg_trim(st); /* Drop any unused tail entries. */
+
+ return st;
+ }
+
+ sg = __sg_next(sg);
+ iter = __sg_next(iter);
+ offset = 0;
+ } while (1);
+
+err_sg_alloc:
+ kfree(st);
+err_st_alloc:
+ return ERR_PTR(ret);
+}
+
+static int
+i915_get_ggtt_vma_pages(struct i915_vma *vma)
+{
+ int ret;
+
+ /*
+ * The vma->pages are only valid within the lifespan of the borrowed
+ * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
+ * must be the vma->pages. A simple rule is that vma->pages must only
+ * be accessed when the obj->mm.pages are pinned.
+ */
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
+
+ switch (vma->ggtt_view.type) {
+ default:
+ GEM_BUG_ON(vma->ggtt_view.type);
+ /* fall through */
+ case I915_GGTT_VIEW_NORMAL:
+ vma->pages = vma->obj->mm.pages;
+ return 0;
+
+ case I915_GGTT_VIEW_ROTATED:
+ vma->pages =
+ intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
+ break;
+
+ case I915_GGTT_VIEW_REMAPPED:
+ vma->pages =
+ intel_remap_pages(&vma->ggtt_view.remapped, vma->obj);
+ break;
+
+ case I915_GGTT_VIEW_PARTIAL:
+ vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
+ break;
+ }
+
+ ret = 0;
+ if (IS_ERR(vma->pages)) {
+ ret = PTR_ERR(vma->pages);
+ vma->pages = NULL;
+ DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
+ vma->ggtt_view.type, ret);
+ }
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index ec84b5e62fef..da2b6e2ae692 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -38,8 +38,6 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt)
{
gt->ggtt = ggtt;
-
- intel_gt_sanitize(gt, false);
}
static void init_unused_ring(struct intel_gt *gt, u32 base)
@@ -77,10 +75,6 @@ int intel_gt_init_hw(struct intel_gt *gt)
struct intel_uncore *uncore = gt->uncore;
int ret;
- ret = intel_gt_terminally_wedged(gt);
- if (ret)
- return ret;
-
gt->last_init_time = ktime_get();
/* Double layer security blanket, see i915_gem_init() */
@@ -372,7 +366,7 @@ static void intel_gt_fini_scratch(struct intel_gt *gt)
static struct i915_address_space *kernel_vm(struct intel_gt *gt)
{
if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING)
- return &i915_ppgtt_create(gt->i915)->vm;
+ return &i915_ppgtt_create(gt)->vm;
else
return i915_vm_get(&gt->ggtt->vm);
}
@@ -410,14 +404,13 @@ static int __engines_record_defaults(struct intel_gt *gt)
struct intel_context *ce;
struct i915_request *rq;
+ /* We must be able to switch to something! */
+ GEM_BUG_ON(!engine->kernel_context);
+
err = intel_renderstate_init(&so, engine);
if (err)
goto out;
- /* We must be able to switch to something! */
- GEM_BUG_ON(!engine->kernel_context);
- engine->serial++; /* force the kernel context switch */
-
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 2355cf129e9c..1dac441cb8f4 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -58,9 +58,14 @@ static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt,
return i915_ggtt_offset(gt->scratch) + field;
}
-static inline bool intel_gt_is_wedged(struct intel_gt *gt)
+static inline bool intel_gt_is_wedged(const struct intel_gt *gt)
{
return __intel_reset_failed(&gt->reset);
}
+static inline bool intel_gt_has_init_error(const struct intel_gt *gt)
+{
+ return test_bit(I915_WEDGED_ON_INIT, &gt->reset.flags);
+}
+
#endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 45b68a17da4d..d1c2f034296a 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -126,17 +126,7 @@ static bool reset_engines(struct intel_gt *gt)
return __intel_gt_reset(gt, ALL_ENGINES) == 0;
}
-/**
- * intel_gt_sanitize: called after the GPU has lost power
- * @gt: the i915 GT container
- * @force: ignore a failed reset and sanitize engine state anyway
- *
- * Anytime we reset the GPU, either with an explicit GPU reset or through a
- * PCI power cycle, the GPU loses state and we must reset our state tracking
- * to match. Note that calling intel_gt_sanitize() if the GPU has not
- * been reset results in much confusion!
- */
-void intel_gt_sanitize(struct intel_gt *gt, bool force)
+static void gt_sanitize(struct intel_gt *gt, bool force)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -189,6 +179,10 @@ int intel_gt_resume(struct intel_gt *gt)
enum intel_engine_id id;
int err;
+ err = intel_gt_has_init_error(gt);
+ if (err)
+ return err;
+
GT_TRACE(gt, "\n");
/*
@@ -201,30 +195,26 @@ int intel_gt_resume(struct intel_gt *gt)
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
intel_rc6_sanitize(&gt->rc6);
+ gt_sanitize(gt, true);
+ if (intel_gt_is_wedged(gt)) {
+ err = -EIO;
+ goto out_fw;
+ }
/* Only when the HW is re-initialised, can we replay the requests */
err = intel_gt_init_hw(gt);
if (err) {
dev_err(gt->i915->drm.dev,
"Failed to initialize GPU, declaring it wedged!\n");
- intel_gt_set_wedged(gt);
- goto err_fw;
+ goto err_wedged;
}
intel_rps_enable(&gt->rps);
intel_llc_enable(&gt->llc);
for_each_engine(engine, gt, id) {
- struct intel_context *ce;
-
intel_engine_pm_get(engine);
- ce = engine->kernel_context;
- if (ce) {
- GEM_BUG_ON(!intel_context_is_pinned(ce));
- ce->ops->reset(ce);
- }
-
engine->serial++; /* kernel context lost */
err = engine->resume(engine);
@@ -233,7 +223,7 @@ int intel_gt_resume(struct intel_gt *gt)
dev_err(gt->i915->drm.dev,
"Failed to restart %s (%d)\n",
engine->name, err);
- break;
+ goto err_wedged;
}
}
@@ -243,11 +233,14 @@ int intel_gt_resume(struct intel_gt *gt)
user_forcewake(gt, false);
-err_fw:
+out_fw:
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
intel_gt_pm_put(gt);
-
return err;
+
+err_wedged:
+ intel_gt_set_wedged(gt);
+ goto out_fw;
}
static void wait_for_suspend(struct intel_gt *gt)
@@ -315,7 +308,7 @@ void intel_gt_suspend_late(struct intel_gt *gt)
intel_llc_disable(&gt->llc);
}
- intel_gt_sanitize(gt, false);
+ gt_sanitize(gt, false);
GT_TRACE(gt, "\n");
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index 4a9e48c12bd4..60f0e2fbe55c 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -51,8 +51,6 @@ void intel_gt_pm_init_early(struct intel_gt *gt);
void intel_gt_pm_init(struct intel_gt *gt);
void intel_gt_pm_fini(struct intel_gt *gt);
-void intel_gt_sanitize(struct intel_gt *gt, bool force);
-
void intel_gt_suspend_prepare(struct intel_gt *gt);
void intel_gt_suspend_late(struct intel_gt *gt);
int intel_gt_resume(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
index b4f04614230e..7ef1d37970f6 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -14,13 +14,16 @@
#include "intel_gt_requests.h"
#include "intel_timeline.h"
-static void retire_requests(struct intel_timeline *tl)
+static bool retire_requests(struct intel_timeline *tl)
{
struct i915_request *rq, *rn;
list_for_each_entry_safe(rq, rn, &tl->requests, link)
if (!i915_request_retire(rq))
- break;
+ return false;
+
+ /* And check nothing new was submitted */
+ return !i915_active_fence_isset(&tl->last_request);
}
static bool flush_submission(struct intel_gt *gt)
@@ -29,9 +32,13 @@ static bool flush_submission(struct intel_gt *gt)
enum intel_engine_id id;
bool active = false;
+ if (!intel_gt_pm_is_awake(gt))
+ return false;
+
for_each_engine(engine, gt, id) {
- active |= intel_engine_flush_submission(engine);
+ intel_engine_flush_submission(engine);
active |= flush_work(&engine->retire_work);
+ active |= flush_work(&engine->wakeref.work);
}
return active;
@@ -120,7 +127,6 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
timeout = -timeout, interruptible = false;
flush_submission(gt); /* kick the ksoftirqd tasklets */
-
spin_lock(&timelines->lock);
list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
if (!mutex_trylock(&tl->mutex)) {
@@ -145,7 +151,8 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
}
}
- retire_requests(tl);
+ if (!retire_requests(tl) || flush_submission(gt))
+ active_count++;
spin_lock(&timelines->lock);
@@ -153,8 +160,6 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
list_safe_reset_next(tl, tn, link);
if (atomic_dec_and_test(&tl->active_count))
list_del(&tl->link);
- else
- active_count += i915_active_fence_isset(&tl->last_request);
mutex_unlock(&tl->mutex);
@@ -169,9 +174,6 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
list_for_each_entry_safe(tl, tn, &free, link)
__intel_timeline_free(&tl->kref);
- if (flush_submission(gt))
- active_count++;
-
return active_count ? timeout : 0;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
new file mode 100644
index 000000000000..16acdc5d6734
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -0,0 +1,598 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/slab.h> /* fault-inject.h is not standalone! */
+
+#include <linux/fault-inject.h>
+
+#include "i915_trace.h"
+#include "intel_gt.h"
+#include "intel_gtt.h"
+
+void stash_init(struct pagestash *stash)
+{
+ pagevec_init(&stash->pvec);
+ spin_lock_init(&stash->lock);
+}
+
+static struct page *stash_pop_page(struct pagestash *stash)
+{
+ struct page *page = NULL;
+
+ spin_lock(&stash->lock);
+ if (likely(stash->pvec.nr))
+ page = stash->pvec.pages[--stash->pvec.nr];
+ spin_unlock(&stash->lock);
+
+ return page;
+}
+
+static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec)
+{
+ unsigned int nr;
+
+ spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING);
+
+ nr = min_t(typeof(nr), pvec->nr, pagevec_space(&stash->pvec));
+ memcpy(stash->pvec.pages + stash->pvec.nr,
+ pvec->pages + pvec->nr - nr,
+ sizeof(pvec->pages[0]) * nr);
+ stash->pvec.nr += nr;
+
+ spin_unlock(&stash->lock);
+
+ pvec->nr -= nr;
+}
+
+static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
+{
+ struct pagevec stack;
+ struct page *page;
+
+ if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
+ i915_gem_shrink_all(vm->i915);
+
+ page = stash_pop_page(&vm->free_pages);
+ if (page)
+ return page;
+
+ if (!vm->pt_kmap_wc)
+ return alloc_page(gfp);
+
+ /* Look in our global stash of WC pages... */
+ page = stash_pop_page(&vm->i915->mm.wc_stash);
+ if (page)
+ return page;
+
+ /*
+ * Otherwise batch allocate pages to amortize cost of set_pages_wc.
+ *
+ * We have to be careful as page allocation may trigger the shrinker
+ * (via direct reclaim) which will fill up the WC stash underneath us.
+ * So we add our WB pages into a temporary pvec on the stack and merge
+ * them into the WC stash after all the allocations are complete.
+ */
+ pagevec_init(&stack);
+ do {
+ struct page *page;
+
+ page = alloc_page(gfp);
+ if (unlikely(!page))
+ break;
+
+ stack.pages[stack.nr++] = page;
+ } while (pagevec_space(&stack));
+
+ if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) {
+ page = stack.pages[--stack.nr];
+
+ /* Merge spare WC pages to the global stash */
+ if (stack.nr)
+ stash_push_pagevec(&vm->i915->mm.wc_stash, &stack);
+
+ /* Push any surplus WC pages onto the local VM stash */
+ if (stack.nr)
+ stash_push_pagevec(&vm->free_pages, &stack);
+ }
+
+ /* Return unwanted leftovers */
+ if (unlikely(stack.nr)) {
+ WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr));
+ __pagevec_release(&stack);
+ }
+
+ return page;
+}
+
+static void vm_free_pages_release(struct i915_address_space *vm,
+ bool immediate)
+{
+ struct pagevec *pvec = &vm->free_pages.pvec;
+ struct pagevec stack;
+
+ lockdep_assert_held(&vm->free_pages.lock);
+ GEM_BUG_ON(!pagevec_count(pvec));
+
+ if (vm->pt_kmap_wc) {
+ /*
+ * When we use WC, first fill up the global stash and then
+ * only if full immediately free the overflow.
+ */
+ stash_push_pagevec(&vm->i915->mm.wc_stash, pvec);
+
+ /*
+ * As we have made some room in the VM's free_pages,
+ * we can wait for it to fill again. Unless we are
+ * inside i915_address_space_fini() and must
+ * immediately release the pages!
+ */
+ if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1))
+ return;
+
+ /*
+ * We have to drop the lock to allow ourselves to sleep,
+ * so take a copy of the pvec and clear the stash for
+ * others to use it as we sleep.
+ */
+ stack = *pvec;
+ pagevec_reinit(pvec);
+ spin_unlock(&vm->free_pages.lock);
+
+ pvec = &stack;
+ set_pages_array_wb(pvec->pages, pvec->nr);
+
+ spin_lock(&vm->free_pages.lock);
+ }
+
+ __pagevec_release(pvec);
+}
+
+static void vm_free_page(struct i915_address_space *vm, struct page *page)
+{
+ /*
+ * On !llc, we need to change the pages back to WB. We only do so
+ * in bulk, so we rarely need to change the page attributes here,
+ * but doing so requires a stop_machine() from deep inside arch/x86/mm.
+ * To make detection of the possible sleep more likely, use an
+ * unconditional might_sleep() for everybody.
+ */
+ might_sleep();
+ spin_lock(&vm->free_pages.lock);
+ while (!pagevec_space(&vm->free_pages.pvec))
+ vm_free_pages_release(vm, false);
+ GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec) >= PAGEVEC_SIZE);
+ pagevec_add(&vm->free_pages.pvec, page);
+ spin_unlock(&vm->free_pages.lock);
+}
+
+void __i915_vm_close(struct i915_address_space *vm)
+{
+ struct i915_vma *vma, *vn;
+
+ mutex_lock(&vm->mutex);
+ list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ /* Keep the obj (and hence the vma) alive as _we_ destroy it */
+ if (!kref_get_unless_zero(&obj->base.refcount))
+ continue;
+
+ atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
+ WARN_ON(__i915_vma_unbind(vma));
+ __i915_vma_put(vma);
+
+ i915_gem_object_put(obj);
+ }
+ GEM_BUG_ON(!list_empty(&vm->bound_list));
+ mutex_unlock(&vm->mutex);
+}
+
+void i915_address_space_fini(struct i915_address_space *vm)
+{
+ spin_lock(&vm->free_pages.lock);
+ if (pagevec_count(&vm->free_pages.pvec))
+ vm_free_pages_release(vm, true);
+ GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec));
+ spin_unlock(&vm->free_pages.lock);
+
+ drm_mm_takedown(&vm->mm);
+
+ mutex_destroy(&vm->mutex);
+}
+
+static void __i915_vm_release(struct work_struct *work)
+{
+ struct i915_address_space *vm =
+ container_of(work, struct i915_address_space, rcu.work);
+
+ vm->cleanup(vm);
+ i915_address_space_fini(vm);
+
+ kfree(vm);
+}
+
+void i915_vm_release(struct kref *kref)
+{
+ struct i915_address_space *vm =
+ container_of(kref, struct i915_address_space, ref);
+
+ GEM_BUG_ON(i915_is_ggtt(vm));
+ trace_i915_ppgtt_release(vm);
+
+ queue_rcu_work(vm->i915->wq, &vm->rcu);
+}
+
+void i915_address_space_init(struct i915_address_space *vm, int subclass)
+{
+ kref_init(&vm->ref);
+ INIT_RCU_WORK(&vm->rcu, __i915_vm_release);
+ atomic_set(&vm->open, 1);
+
+ /*
+ * The vm->mutex must be reclaim safe (for use in the shrinker).
+ * Do a dummy acquire now under fs_reclaim so that any allocation
+ * attempt holding the lock is immediately reported by lockdep.
+ */
+ mutex_init(&vm->mutex);
+ lockdep_set_subclass(&vm->mutex, subclass);
+ i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
+
+ GEM_BUG_ON(!vm->total);
+ drm_mm_init(&vm->mm, 0, vm->total);
+ vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
+
+ stash_init(&vm->free_pages);
+
+ INIT_LIST_HEAD(&vm->bound_list);
+}
+
+void clear_pages(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!vma->pages);
+
+ if (vma->pages != vma->obj->mm.pages) {
+ sg_free_table(vma->pages);
+ kfree(vma->pages);
+ }
+ vma->pages = NULL;
+
+ memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
+}
+
+static int __setup_page_dma(struct i915_address_space *vm,
+ struct i915_page_dma *p,
+ gfp_t gfp)
+{
+ p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL);
+ if (unlikely(!p->page))
+ return -ENOMEM;
+
+ p->daddr = dma_map_page_attrs(vm->dma,
+ p->page, 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC |
+ DMA_ATTR_NO_WARN);
+ if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
+ vm_free_page(vm, p->page);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p)
+{
+ return __setup_page_dma(vm, p, __GFP_HIGHMEM);
+}
+
+void cleanup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p)
+{
+ dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ vm_free_page(vm, p->page);
+}
+
+void
+fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count)
+{
+ kunmap_atomic(memset64(kmap_atomic(p->page), val, count));
+}
+
+int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
+{
+ unsigned long size;
+
+ /*
+ * In order to utilize 64K pages for an object with a size < 2M, we will
+ * need to support a 64K scratch page, given that every 16th entry for a
+ * page-table operating in 64K mode must point to a properly aligned 64K
+ * region, including any PTEs which happen to point to scratch.
+ *
+ * This is only relevant for the 48b PPGTT where we support
+ * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
+ * scratch (read-only) between all vm, we create one 64k scratch page
+ * for all.
+ */
+ size = I915_GTT_PAGE_SIZE_4K;
+ if (i915_vm_is_4lvl(vm) &&
+ HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
+ size = I915_GTT_PAGE_SIZE_64K;
+ gfp |= __GFP_NOWARN;
+ }
+ gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL;
+
+ do {
+ unsigned int order = get_order(size);
+ struct page *page;
+ dma_addr_t addr;
+
+ page = alloc_pages(gfp, order);
+ if (unlikely(!page))
+ goto skip;
+
+ addr = dma_map_page_attrs(vm->dma,
+ page, 0, size,
+ PCI_DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC |
+ DMA_ATTR_NO_WARN);
+ if (unlikely(dma_mapping_error(vm->dma, addr)))
+ goto free_page;
+
+ if (unlikely(!IS_ALIGNED(addr, size)))
+ goto unmap_page;
+
+ vm->scratch[0].base.page = page;
+ vm->scratch[0].base.daddr = addr;
+ vm->scratch_order = order;
+ return 0;
+
+unmap_page:
+ dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL);
+free_page:
+ __free_pages(page, order);
+skip:
+ if (size == I915_GTT_PAGE_SIZE_4K)
+ return -ENOMEM;
+
+ size = I915_GTT_PAGE_SIZE_4K;
+ gfp &= ~__GFP_NOWARN;
+ } while (1);
+}
+
+void cleanup_scratch_page(struct i915_address_space *vm)
+{
+ struct i915_page_dma *p = px_base(&vm->scratch[0]);
+ unsigned int order = vm->scratch_order;
+
+ dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT,
+ PCI_DMA_BIDIRECTIONAL);
+ __free_pages(p->page, order);
+}
+
+void free_scratch(struct i915_address_space *vm)
+{
+ int i;
+
+ if (!px_dma(&vm->scratch[0])) /* set to 0 on clones */
+ return;
+
+ for (i = 1; i <= vm->top; i++) {
+ if (!px_dma(&vm->scratch[i]))
+ break;
+ cleanup_page_dma(vm, px_base(&vm->scratch[i]));
+ }
+
+ cleanup_scratch_page(vm);
+}
+
+void gtt_write_workarounds(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+
+ /*
+ * This function is for gtt related workarounds. This function is
+ * called on driver load and after a GPU reset, so you can place
+ * workarounds here even if they get overwritten by GPU reset.
+ */
+ /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
+ if (IS_BROADWELL(i915))
+ intel_uncore_write(uncore,
+ GEN8_L3_LRA_1_GPGPU,
+ GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
+ else if (IS_CHERRYVIEW(i915))
+ intel_uncore_write(uncore,
+ GEN8_L3_LRA_1_GPGPU,
+ GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
+ else if (IS_GEN9_LP(i915))
+ intel_uncore_write(uncore,
+ GEN8_L3_LRA_1_GPGPU,
+ GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
+ else if (INTEL_GEN(i915) >= 9 && INTEL_GEN(i915) <= 11)
+ intel_uncore_write(uncore,
+ GEN8_L3_LRA_1_GPGPU,
+ GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
+
+ /*
+ * To support 64K PTEs we need to first enable the use of the
+ * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
+ * mmio, otherwise the page-walker will simply ignore the IPS bit. This
+ * shouldn't be needed after GEN10.
+ *
+ * 64K pages were first introduced from BDW+, although technically they
+ * only *work* from gen9+. For pre-BDW we instead have the option for
+ * 32K pages, but we don't currently have any support for it in our
+ * driver.
+ */
+ if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
+ INTEL_GEN(i915) <= 10)
+ intel_uncore_rmw(uncore,
+ GEN8_GAMW_ECO_DEV_RW_IA,
+ 0,
+ GAMW_ECO_ENABLE_64K_IPS_FIELD);
+
+ if (IS_GEN_RANGE(i915, 8, 11)) {
+ bool can_use_gtt_cache = true;
+
+ /*
+ * According to the BSpec if we use 2M/1G pages then we also
+ * need to disable the GTT cache. At least on BDW we can see
+ * visual corruption when using 2M pages, and not disabling the
+ * GTT cache.
+ */
+ if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M))
+ can_use_gtt_cache = false;
+
+ /* WaGttCachingOffByDefault */
+ intel_uncore_write(uncore,
+ HSW_GTT_CACHE_EN,
+ can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
+ WARN_ON_ONCE(can_use_gtt_cache &&
+ intel_uncore_read(uncore,
+ HSW_GTT_CACHE_EN) == 0);
+ }
+}
+
+u64 gen8_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
+
+ if (unlikely(flags & PTE_READ_ONLY))
+ pte &= ~_PAGE_RW;
+
+ switch (level) {
+ case I915_CACHE_NONE:
+ pte |= PPAT_UNCACHED;
+ break;
+ case I915_CACHE_WT:
+ pte |= PPAT_DISPLAY_ELLC;
+ break;
+ default:
+ pte |= PPAT_CACHED;
+ break;
+ }
+
+ return pte;
+}
+
+static void tgl_setup_private_ppat(struct intel_uncore *uncore)
+{
+ /* TGL doesn't support LLC or AGE settings */
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
+}
+
+static void cnl_setup_private_ppat(struct intel_uncore *uncore)
+{
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(0),
+ GEN8_PPAT_WB | GEN8_PPAT_LLC);
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(1),
+ GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(2),
+ GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(3),
+ GEN8_PPAT_UC);
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(4),
+ GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(5),
+ GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(6),
+ GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(7),
+ GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
+}
+
+/*
+ * The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
+ * bits. When using advanced contexts each context stores its own PAT, but
+ * writing this data shouldn't be harmful even in those cases.
+ */
+static void bdw_setup_private_ppat(struct intel_uncore *uncore)
+{
+ u64 pat;
+
+ pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
+ GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
+ GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
+ GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
+ GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
+ GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
+ GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
+ GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
+
+ intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
+ intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
+}
+
+static void chv_setup_private_ppat(struct intel_uncore *uncore)
+{
+ u64 pat;
+
+ /*
+ * Map WB on BDW to snooped on CHV.
+ *
+ * Only the snoop bit has meaning for CHV, the rest is
+ * ignored.
+ *
+ * The hardware will never snoop for certain types of accesses:
+ * - CPU GTT (GMADR->GGTT->no snoop->memory)
+ * - PPGTT page tables
+ * - some other special cycles
+ *
+ * As with BDW, we also need to consider the following for GT accesses:
+ * "For GGTT, there is NO pat_sel[2:0] from the entry,
+ * so RTL will always use the value corresponding to
+ * pat_sel = 000".
+ * Which means we must set the snoop bit in PAT entry 0
+ * in order to keep the global status page working.
+ */
+
+ pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(1, 0) |
+ GEN8_PPAT(2, 0) |
+ GEN8_PPAT(3, 0) |
+ GEN8_PPAT(4, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(5, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(6, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(7, CHV_PPAT_SNOOP);
+
+ intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
+ intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
+}
+
+void setup_private_pat(struct intel_uncore *uncore)
+{
+ struct drm_i915_private *i915 = uncore->i915;
+
+ GEM_BUG_ON(INTEL_GEN(i915) < 8);
+
+ if (INTEL_GEN(i915) >= 12)
+ tgl_setup_private_ppat(uncore);
+ else if (INTEL_GEN(i915) >= 10)
+ cnl_setup_private_ppat(uncore);
+ else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915))
+ chv_setup_private_ppat(uncore);
+ else
+ bdw_setup_private_ppat(uncore);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/mock_gtt.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
new file mode 100644
index 000000000000..7da7681c20b1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -0,0 +1,587 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * Please try to maintain the following order within this file unless it makes
+ * sense to do otherwise. From top to bottom:
+ * 1. typedefs
+ * 2. #defines, and macros
+ * 3. structure definitions
+ * 4. function prototypes
+ *
+ * Within each section, please try to order by generation in ascending order,
+ * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
+ */
+
+#ifndef __INTEL_GTT_H__
+#define __INTEL_GTT_H__
+
+#include <linux/io-mapping.h>
+#include <linux/kref.h>
+#include <linux/mm.h>
+#include <linux/pagevec.h>
+#include <linux/scatterlist.h>
+#include <linux/workqueue.h>
+
+#include <drm/drm_mm.h>
+
+#include "gt/intel_reset.h"
+#include "i915_gem_fence_reg.h"
+#include "i915_selftest.h"
+#include "i915_vma_types.h"
+
+#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
+
+#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT)
+#define DBG(...) trace_printk(__VA_ARGS__)
+#else
+#define DBG(...)
+#endif
+
+#define NALLOC 3 /* 1 normal, 1 for concurrent threads, 1 for preallocation */
+
+#define I915_GTT_PAGE_SIZE_4K BIT_ULL(12)
+#define I915_GTT_PAGE_SIZE_64K BIT_ULL(16)
+#define I915_GTT_PAGE_SIZE_2M BIT_ULL(21)
+
+#define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
+#define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
+
+#define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
+
+#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
+
+#define I915_FENCE_REG_NONE -1
+#define I915_MAX_NUM_FENCES 32
+/* 32 fences + sign bit for FENCE_REG_NONE */
+#define I915_MAX_NUM_FENCE_BITS 6
+
+typedef u32 gen6_pte_t;
+typedef u64 gen8_pte_t;
+
+#define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
+
+#define I915_PTES(pte_len) ((unsigned int)(PAGE_SIZE / (pte_len)))
+#define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1)
+#define I915_PDES 512
+#define I915_PDE_MASK (I915_PDES - 1)
+
+/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
+#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
+#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
+#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
+#define GEN6_PTE_CACHE_LLC (2 << 1)
+#define GEN6_PTE_UNCACHED (1 << 1)
+#define GEN6_PTE_VALID REG_BIT(0)
+
+#define GEN6_PTES I915_PTES(sizeof(gen6_pte_t))
+#define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE)
+#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
+#define GEN6_PDE_SHIFT 22
+#define GEN6_PDE_VALID REG_BIT(0)
+#define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT))
+
+#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
+
+#define BYT_PTE_SNOOPED_BY_CPU_CACHES REG_BIT(2)
+#define BYT_PTE_WRITEABLE REG_BIT(1)
+
+/*
+ * Cacheability Control is a 4-bit value. The low three bits are stored in bits
+ * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
+ */
+#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
+ (((bits) & 0x8) << (11 - 3)))
+#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
+#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
+#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
+#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
+#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
+#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
+#define HSW_PTE_UNCACHED (0)
+#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
+#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
+
+/*
+ * GEN8 32b style address is defined as a 3 level page table:
+ * 31:30 | 29:21 | 20:12 | 11:0
+ * PDPE | PDE | PTE | offset
+ * The difference as compared to normal x86 3 level page table is the PDPEs are
+ * programmed via register.
+ *
+ * GEN8 48b style address is defined as a 4 level page table:
+ * 47:39 | 38:30 | 29:21 | 20:12 | 11:0
+ * PML4E | PDPE | PDE | PTE | offset
+ */
+#define GEN8_3LVL_PDPES 4
+
+#define PPAT_UNCACHED (_PAGE_PWT | _PAGE_PCD)
+#define PPAT_CACHED_PDE 0 /* WB LLC */
+#define PPAT_CACHED _PAGE_PAT /* WB LLCeLLC */
+#define PPAT_DISPLAY_ELLC _PAGE_PCD /* WT eLLC */
+
+#define CHV_PPAT_SNOOP REG_BIT(6)
+#define GEN8_PPAT_AGE(x) ((x)<<4)
+#define GEN8_PPAT_LLCeLLC (3<<2)
+#define GEN8_PPAT_LLCELLC (2<<2)
+#define GEN8_PPAT_LLC (1<<2)
+#define GEN8_PPAT_WB (3<<0)
+#define GEN8_PPAT_WT (2<<0)
+#define GEN8_PPAT_WC (1<<0)
+#define GEN8_PPAT_UC (0<<0)
+#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
+#define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8))
+
+#define GEN8_PDE_IPS_64K BIT(11)
+#define GEN8_PDE_PS_2M BIT(7)
+
+#define for_each_sgt_daddr(__dp, __iter, __sgt) \
+ __for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE)
+
+struct i915_page_dma {
+ struct page *page;
+ union {
+ dma_addr_t daddr;
+
+ /*
+ * For gen6/gen7 only. This is the offset in the GGTT
+ * where the page directory entries for PPGTT begin
+ */
+ u32 ggtt_offset;
+ };
+};
+
+struct i915_page_scratch {
+ struct i915_page_dma base;
+ u64 encode;
+};
+
+struct i915_page_table {
+ struct i915_page_dma base;
+ atomic_t used;
+};
+
+struct i915_page_directory {
+ struct i915_page_table pt;
+ spinlock_t lock;
+ void *entry[512];
+};
+
+#define __px_choose_expr(x, type, expr, other) \
+ __builtin_choose_expr( \
+ __builtin_types_compatible_p(typeof(x), type) || \
+ __builtin_types_compatible_p(typeof(x), const type), \
+ ({ type __x = (type)(x); expr; }), \
+ other)
+
+#define px_base(px) \
+ __px_choose_expr(px, struct i915_page_dma *, __x, \
+ __px_choose_expr(px, struct i915_page_scratch *, &__x->base, \
+ __px_choose_expr(px, struct i915_page_table *, &__x->base, \
+ __px_choose_expr(px, struct i915_page_directory *, &__x->pt.base, \
+ (void)0))))
+#define px_dma(px) (px_base(px)->daddr)
+
+#define px_pt(px) \
+ __px_choose_expr(px, struct i915_page_table *, __x, \
+ __px_choose_expr(px, struct i915_page_directory *, &__x->pt, \
+ (void)0))
+#define px_used(px) (&px_pt(px)->used)
+
+enum i915_cache_level;
+
+struct drm_i915_file_private;
+struct drm_i915_gem_object;
+struct i915_vma;
+struct intel_gt;
+
+struct i915_vma_ops {
+ /* Map an object into an address space with the given cache flags. */
+ int (*bind_vma)(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags);
+ /*
+ * Unmap an object from an address space. This usually consists of
+ * setting the valid PTE entries to a reserved scratch page.
+ */
+ void (*unbind_vma)(struct i915_vma *vma);
+
+ int (*set_pages)(struct i915_vma *vma);
+ void (*clear_pages)(struct i915_vma *vma);
+};
+
+struct pagestash {
+ spinlock_t lock;
+ struct pagevec pvec;
+};
+
+void stash_init(struct pagestash *stash);
+
+struct i915_address_space {
+ struct kref ref;
+ struct rcu_work rcu;
+
+ struct drm_mm mm;
+ struct intel_gt *gt;
+ struct drm_i915_private *i915;
+ struct device *dma;
+ /*
+ * Every address space belongs to a struct file - except for the global
+ * GTT that is owned by the driver (and so @file is set to NULL). In
+ * principle, no information should leak from one context to another
+ * (or between files/processes etc) unless explicitly shared by the
+ * owner. Tracking the owner is important in order to free up per-file
+ * objects along with the file, to aide resource tracking, and to
+ * assign blame.
+ */
+ struct drm_i915_file_private *file;
+ u64 total; /* size addr space maps (ex. 2GB for ggtt) */
+ u64 reserved; /* size addr space reserved */
+
+ unsigned int bind_async_flags;
+
+ /*
+ * Each active user context has its own address space (in full-ppgtt).
+ * Since the vm may be shared between multiple contexts, we count how
+ * many contexts keep us "open". Once open hits zero, we are closed
+ * and do not allow any new attachments, and proceed to shutdown our
+ * vma and page directories.
+ */
+ atomic_t open;
+
+ struct mutex mutex; /* protects vma and our lists */
+#define VM_CLASS_GGTT 0
+#define VM_CLASS_PPGTT 1
+
+ struct i915_page_scratch scratch[4];
+ unsigned int scratch_order;
+ unsigned int top;
+
+ /**
+ * List of vma currently bound.
+ */
+ struct list_head bound_list;
+
+ struct pagestash free_pages;
+
+ /* Global GTT */
+ bool is_ggtt:1;
+
+ /* Some systems require uncached updates of the page directories */
+ bool pt_kmap_wc:1;
+
+ /* Some systems support read-only mappings for GGTT and/or PPGTT */
+ bool has_read_only:1;
+
+ u64 (*pte_encode)(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags); /* Create a valid PTE */
+#define PTE_READ_ONLY BIT(0)
+
+ int (*allocate_va_range)(struct i915_address_space *vm,
+ u64 start, u64 length);
+ void (*clear_range)(struct i915_address_space *vm,
+ u64 start, u64 length);
+ void (*insert_page)(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level cache_level,
+ u32 flags);
+ void (*insert_entries)(struct i915_address_space *vm,
+ struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags);
+ void (*cleanup)(struct i915_address_space *vm);
+
+ struct i915_vma_ops vma_ops;
+
+ I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
+ I915_SELFTEST_DECLARE(bool scrub_64K);
+};
+
+/*
+ * The Graphics Translation Table is the way in which GEN hardware translates a
+ * Graphics Virtual Address into a Physical Address. In addition to the normal
+ * collateral associated with any va->pa translations GEN hardware also has a
+ * portion of the GTT which can be mapped by the CPU and remain both coherent
+ * and correct (in cases like swizzling). That region is referred to as GMADR in
+ * the spec.
+ */
+struct i915_ggtt {
+ struct i915_address_space vm;
+
+ struct io_mapping iomap; /* Mapping to our CPU mappable region */
+ struct resource gmadr; /* GMADR resource */
+ resource_size_t mappable_end; /* End offset that we can CPU map */
+
+ /** "Graphics Stolen Memory" holds the global PTEs */
+ void __iomem *gsm;
+ void (*invalidate)(struct i915_ggtt *ggtt);
+
+ /** PPGTT used for aliasing the PPGTT with the GTT */
+ struct i915_ppgtt *alias;
+
+ bool do_idle_maps;
+
+ int mtrr;
+
+ /** Bit 6 swizzling required for X tiling */
+ u32 bit_6_swizzle_x;
+ /** Bit 6 swizzling required for Y tiling */
+ u32 bit_6_swizzle_y;
+
+ u32 pin_bias;
+
+ unsigned int num_fences;
+ struct i915_fence_reg fence_regs[I915_MAX_NUM_FENCES];
+ struct list_head fence_list;
+
+ /**
+ * List of all objects in gtt_space, currently mmaped by userspace.
+ * All objects within this list must also be on bound_list.
+ */
+ struct list_head userfault_list;
+
+ /* Manual runtime pm autosuspend delay for user GGTT mmaps */
+ struct intel_wakeref_auto userfault_wakeref;
+
+ struct mutex error_mutex;
+ struct drm_mm_node error_capture;
+ struct drm_mm_node uc_fw;
+};
+
+struct i915_ppgtt {
+ struct i915_address_space vm;
+
+ struct i915_page_directory *pd;
+};
+
+#define i915_is_ggtt(vm) ((vm)->is_ggtt)
+
+static inline bool
+i915_vm_is_4lvl(const struct i915_address_space *vm)
+{
+ return (vm->total - 1) >> 32;
+}
+
+static inline bool
+i915_vm_has_scratch_64K(struct i915_address_space *vm)
+{
+ return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
+}
+
+static inline bool
+i915_vm_has_cache_coloring(struct i915_address_space *vm)
+{
+ return i915_is_ggtt(vm) && vm->mm.color_adjust;
+}
+
+static inline struct i915_ggtt *
+i915_vm_to_ggtt(struct i915_address_space *vm)
+{
+ BUILD_BUG_ON(offsetof(struct i915_ggtt, vm));
+ GEM_BUG_ON(!i915_is_ggtt(vm));
+ return container_of(vm, struct i915_ggtt, vm);
+}
+
+static inline struct i915_ppgtt *
+i915_vm_to_ppgtt(struct i915_address_space *vm)
+{
+ BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm));
+ GEM_BUG_ON(i915_is_ggtt(vm));
+ return container_of(vm, struct i915_ppgtt, vm);
+}
+
+static inline struct i915_address_space *
+i915_vm_get(struct i915_address_space *vm)
+{
+ kref_get(&vm->ref);
+ return vm;
+}
+
+void i915_vm_release(struct kref *kref);
+
+static inline void i915_vm_put(struct i915_address_space *vm)
+{
+ kref_put(&vm->ref, i915_vm_release);
+}
+
+static inline struct i915_address_space *
+i915_vm_open(struct i915_address_space *vm)
+{
+ GEM_BUG_ON(!atomic_read(&vm->open));
+ atomic_inc(&vm->open);
+ return i915_vm_get(vm);
+}
+
+static inline bool
+i915_vm_tryopen(struct i915_address_space *vm)
+{
+ if (atomic_add_unless(&vm->open, 1, 0))
+ return i915_vm_get(vm);
+
+ return false;
+}
+
+void __i915_vm_close(struct i915_address_space *vm);
+
+static inline void
+i915_vm_close(struct i915_address_space *vm)
+{
+ GEM_BUG_ON(!atomic_read(&vm->open));
+ if (atomic_dec_and_test(&vm->open))
+ __i915_vm_close(vm);
+
+ i915_vm_put(vm);
+}
+
+void i915_address_space_init(struct i915_address_space *vm, int subclass);
+void i915_address_space_fini(struct i915_address_space *vm);
+
+static inline u32 i915_pte_index(u64 address, unsigned int pde_shift)
+{
+ const u32 mask = NUM_PTE(pde_shift) - 1;
+
+ return (address >> PAGE_SHIFT) & mask;
+}
+
+/*
+ * Helper to counts the number of PTEs within the given length. This count
+ * does not cross a page table boundary, so the max value would be
+ * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
+ */
+static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift)
+{
+ const u64 mask = ~((1ULL << pde_shift) - 1);
+ u64 end;
+
+ GEM_BUG_ON(length == 0);
+ GEM_BUG_ON(offset_in_page(addr | length));
+
+ end = addr + length;
+
+ if ((addr & mask) != (end & mask))
+ return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
+
+ return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
+}
+
+static inline u32 i915_pde_index(u64 addr, u32 shift)
+{
+ return (addr >> shift) & I915_PDE_MASK;
+}
+
+static inline struct i915_page_table *
+i915_pt_entry(const struct i915_page_directory * const pd,
+ const unsigned short n)
+{
+ return pd->entry[n];
+}
+
+static inline struct i915_page_directory *
+i915_pd_entry(const struct i915_page_directory * const pdp,
+ const unsigned short n)
+{
+ return pdp->entry[n];
+}
+
+static inline dma_addr_t
+i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
+{
+ struct i915_page_dma *pt = ppgtt->pd->entry[n];
+
+ return px_dma(pt ?: px_base(&ppgtt->vm.scratch[ppgtt->vm.top]));
+}
+
+void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt);
+
+int i915_ggtt_probe_hw(struct drm_i915_private *i915);
+int i915_ggtt_init_hw(struct drm_i915_private *i915);
+int i915_ggtt_enable_hw(struct drm_i915_private *i915);
+void i915_ggtt_enable_guc(struct i915_ggtt *ggtt);
+void i915_ggtt_disable_guc(struct i915_ggtt *ggtt);
+int i915_init_ggtt(struct drm_i915_private *i915);
+void i915_ggtt_driver_release(struct drm_i915_private *i915);
+
+static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt)
+{
+ return ggtt->mappable_end > 0;
+}
+
+int i915_ppgtt_init_hw(struct intel_gt *gt);
+
+struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt);
+
+void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915);
+void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915);
+
+u64 gen8_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags);
+
+int setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p);
+void cleanup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p);
+
+#define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
+
+void
+fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count);
+
+#define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64))
+#define fill32_px(px, v) do { \
+ u64 v__ = lower_32_bits(v); \
+ fill_px((px), v__ << 32 | v__); \
+} while (0)
+
+int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp);
+void cleanup_scratch_page(struct i915_address_space *vm);
+void free_scratch(struct i915_address_space *vm);
+
+struct i915_page_table *alloc_pt(struct i915_address_space *vm);
+struct i915_page_directory *alloc_pd(struct i915_address_space *vm);
+struct i915_page_directory *__alloc_pd(size_t sz);
+
+void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd);
+
+#define free_px(vm, px) free_pd(vm, px_base(px))
+
+void
+__set_pd_entry(struct i915_page_directory * const pd,
+ const unsigned short idx,
+ struct i915_page_dma * const to,
+ u64 (*encode)(const dma_addr_t, const enum i915_cache_level));
+
+#define set_pd_entry(pd, idx, to) \
+ __set_pd_entry((pd), (idx), px_base(to), gen8_pde_encode)
+
+void
+clear_pd_entry(struct i915_page_directory * const pd,
+ const unsigned short idx,
+ const struct i915_page_scratch * const scratch);
+
+bool
+release_pd_entry(struct i915_page_directory * const pd,
+ const unsigned short idx,
+ struct i915_page_table * const pt,
+ const struct i915_page_scratch * const scratch);
+void gen6_ggtt_invalidate(struct i915_ggtt *ggtt);
+
+int ggtt_set_pages(struct i915_vma *vma);
+int ppgtt_set_pages(struct i915_vma *vma);
+void clear_pages(struct i915_vma *vma);
+
+void gtt_write_workarounds(struct intel_gt *gt);
+
+void setup_private_pat(struct intel_uncore *uncore);
+
+static inline struct sgt_dma {
+ struct scatterlist *sg;
+ dma_addr_t dma, max;
+} sgt_dma(struct i915_vma *vma) {
+ struct scatterlist *sg = vma->pages->sgl;
+ dma_addr_t addr = sg_dma_address(sg);
+
+ return (struct sgt_dma){ sg, addr, addr + sg->length };
+}
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 4fb70a7716e3..a13a8c4b65ab 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -488,17 +488,23 @@ lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
return desc;
}
-static u32 *set_offsets(u32 *regs,
+static inline unsigned int dword_in_page(void *addr)
+{
+ return offset_in_page(addr) / sizeof(u32);
+}
+
+static void set_offsets(u32 *regs,
const u8 *data,
- const struct intel_engine_cs *engine)
+ const struct intel_engine_cs *engine,
+ bool clear)
#define NOP(x) (BIT(7) | (x))
-#define LRI(count, flags) ((flags) << 6 | (count))
+#define LRI(count, flags) ((flags) << 6 | (count) | BUILD_BUG_ON_ZERO(count >= BIT(6)))
#define POSTED BIT(0)
#define REG(x) (((x) >> 2) | BUILD_BUG_ON_ZERO(x >= 0x200))
#define REG16(x) \
(((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \
(((x) >> 2) & 0x7f)
-#define END() 0
+#define END(x) 0, (x)
{
const u32 base = engine->mmio_base;
@@ -506,7 +512,10 @@ static u32 *set_offsets(u32 *regs,
u8 count, flags;
if (*data & BIT(7)) { /* skip */
- regs += *data++ & ~BIT(7);
+ count = *data++ & ~BIT(7);
+ if (clear)
+ memset32(regs, MI_NOOP, count);
+ regs += count;
continue;
}
@@ -532,12 +541,25 @@ static u32 *set_offsets(u32 *regs,
offset |= v & ~BIT(7);
} while (v & BIT(7));
- *regs = base + (offset << 2);
+ regs[0] = base + (offset << 2);
+ if (clear)
+ regs[1] = 0;
regs += 2;
} while (--count);
}
- return regs;
+ if (clear) {
+ u8 count = *++data;
+
+ /* Clear past the tail for HW access */
+ GEM_BUG_ON(dword_in_page(regs) > count);
+ memset32(regs, MI_NOOP, count - dword_in_page(regs));
+
+ /* Close the batch; used mainly by live_lrc_layout() */
+ *regs = MI_BATCH_BUFFER_END;
+ if (INTEL_GEN(engine->i915) >= 10)
+ *regs |= BIT(0);
+ }
}
static const u8 gen8_xcs_offsets[] = {
@@ -572,7 +594,7 @@ static const u8 gen8_xcs_offsets[] = {
REG16(0x200),
REG(0x028),
- END(),
+ END(80)
};
static const u8 gen9_xcs_offsets[] = {
@@ -656,7 +678,7 @@ static const u8 gen9_xcs_offsets[] = {
REG16(0x67c),
REG(0x068),
- END(),
+ END(176)
};
static const u8 gen12_xcs_offsets[] = {
@@ -688,7 +710,7 @@ static const u8 gen12_xcs_offsets[] = {
REG16(0x274),
REG16(0x270),
- END(),
+ END(80)
};
static const u8 gen8_rcs_offsets[] = {
@@ -725,7 +747,91 @@ static const u8 gen8_rcs_offsets[] = {
LRI(1, 0),
REG(0x0c8),
- END(),
+ END(80)
+};
+
+static const u8 gen9_rcs_offsets[] = {
+ NOP(1),
+ LRI(14, POSTED),
+ REG16(0x244),
+ REG(0x34),
+ REG(0x30),
+ REG(0x38),
+ REG(0x3c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x11c),
+ REG(0x114),
+ REG(0x118),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+
+ NOP(3),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ NOP(13),
+ LRI(1, 0),
+ REG(0xc8),
+
+ NOP(13),
+ LRI(44, POSTED),
+ REG(0x28),
+ REG(0x9c),
+ REG(0xc0),
+ REG(0x178),
+ REG(0x17c),
+ REG16(0x358),
+ REG(0x170),
+ REG(0x150),
+ REG(0x154),
+ REG(0x158),
+ REG16(0x41c),
+ REG16(0x600),
+ REG16(0x604),
+ REG16(0x608),
+ REG16(0x60c),
+ REG16(0x610),
+ REG16(0x614),
+ REG16(0x618),
+ REG16(0x61c),
+ REG16(0x620),
+ REG16(0x624),
+ REG16(0x628),
+ REG16(0x62c),
+ REG16(0x630),
+ REG16(0x634),
+ REG16(0x638),
+ REG16(0x63c),
+ REG16(0x640),
+ REG16(0x644),
+ REG16(0x648),
+ REG16(0x64c),
+ REG16(0x650),
+ REG16(0x654),
+ REG16(0x658),
+ REG16(0x65c),
+ REG16(0x660),
+ REG16(0x664),
+ REG16(0x668),
+ REG16(0x66c),
+ REG16(0x670),
+ REG16(0x674),
+ REG16(0x678),
+ REG16(0x67c),
+ REG(0x68),
+
+ END(176)
};
static const u8 gen11_rcs_offsets[] = {
@@ -766,7 +872,7 @@ static const u8 gen11_rcs_offsets[] = {
LRI(1, 0),
REG(0x0c8),
- END(),
+ END(80)
};
static const u8 gen12_rcs_offsets[] = {
@@ -807,7 +913,7 @@ static const u8 gen12_rcs_offsets[] = {
LRI(1, 0),
REG(0x0c8),
- END(),
+ END(80)
};
#undef END
@@ -832,6 +938,8 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
return gen12_rcs_offsets;
else if (INTEL_GEN(engine->i915) >= 11)
return gen11_rcs_offsets;
+ else if (INTEL_GEN(engine->i915) >= 9)
+ return gen9_rcs_offsets;
else
return gen8_rcs_offsets;
} else {
@@ -877,6 +985,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
list_move(&rq->sched.link, pl);
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+
active = rq;
} else {
struct intel_engine_cs *owner = rq->context->engine;
@@ -1108,7 +1218,7 @@ __execlists_schedule_in(struct i915_request *rq)
/* We don't need a strict matching tag, just different values */
ce->lrc_desc &= ~GENMASK_ULL(47, 37);
ce->lrc_desc |=
- (u64)(engine->context_tag++ % NUM_CONTEXT_TAG) <<
+ (u64)(++engine->context_tag % NUM_CONTEXT_TAG) <<
GEN11_SW_CTX_ID_SHIFT;
BUILD_BUG_ON(NUM_CONTEXT_TAG > GEN12_MAX_CONTEXT_HW_ID);
}
@@ -1243,10 +1353,6 @@ static u64 execlists_update_context(struct i915_request *rq)
*/
wmb();
- /* Wa_1607138340:tgl */
- if (IS_TGL_REVID(rq->i915, TGL_REVID_A0, TGL_REVID_A0))
- desc |= CTX_DESC_FORCE_RESTORE;
-
ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE;
return desc;
}
@@ -1430,8 +1536,9 @@ static bool can_merge_rq(const struct i915_request *prev,
if (i915_request_completed(next))
return true;
- if (unlikely((prev->flags ^ next->flags) &
- (I915_REQUEST_NOPREEMPT | I915_REQUEST_SENTINEL)))
+ if (unlikely((prev->fence.flags ^ next->fence.flags) &
+ (BIT(I915_FENCE_FLAG_NOPREEMPT) |
+ BIT(I915_FENCE_FLAG_SENTINEL))))
return false;
if (!can_merge_ctx(prev->context, next->context))
@@ -1443,7 +1550,7 @@ static bool can_merge_rq(const struct i915_request *prev,
static void virtual_update_register_offsets(u32 *regs,
struct intel_engine_cs *engine)
{
- set_offsets(regs, reg_offsets(engine), engine);
+ set_offsets(regs, reg_offsets(engine), engine, false);
}
static bool virtual_matches(const struct virtual_engine *ve,
@@ -1528,8 +1635,8 @@ static void defer_request(struct i915_request *rq, struct list_head * const pl)
!i915_request_completed(rq));
GEM_BUG_ON(i915_request_is_active(w));
- if (list_empty(&w->sched.link))
- continue; /* Not yet submitted; unready */
+ if (!i915_request_is_ready(w))
+ continue;
if (rq_prio(w) < rq_prio(rq))
continue;
@@ -1590,7 +1697,7 @@ active_timeslice(const struct intel_engine_cs *engine)
{
const struct i915_request *rq = *engine->execlists.active;
- if (i915_request_completed(rq))
+ if (!rq || i915_request_completed(rq))
return 0;
if (engine->execlists.switch_priority_hint < effective_prio(rq))
@@ -1636,6 +1743,11 @@ static void set_preempt_timeout(struct intel_engine_cs *engine)
active_preempt_timeout(engine));
}
+static inline void clear_ports(struct i915_request **ports, int count)
+{
+ memset_p((void **)ports, NULL, count);
+}
+
static void execlists_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -1996,10 +2108,9 @@ done:
goto skip_submit;
}
+ clear_ports(port + 1, last_port - port);
- memset(port + 1, 0, (last_port - port) * sizeof(*port));
execlists_submit_ports(engine);
-
set_preempt_timeout(engine);
} else {
skip_submit:
@@ -2014,13 +2125,14 @@ cancel_port_requests(struct intel_engine_execlists * const execlists)
for (port = execlists->pending; *port; port++)
execlists_schedule_out(*port);
- memset(execlists->pending, 0, sizeof(execlists->pending));
+ clear_ports(execlists->pending, ARRAY_SIZE(execlists->pending));
/* Mark the end of active before we overwrite *active */
for (port = xchg(&execlists->active, execlists->pending); *port; port++)
execlists_schedule_out(*port);
- WRITE_ONCE(execlists->active,
- memset(execlists->inflight, 0, sizeof(execlists->inflight)));
+ clear_ports(execlists->inflight, ARRAY_SIZE(execlists->inflight));
+
+ WRITE_ONCE(execlists->active, execlists->inflight);
}
static inline void
@@ -2176,7 +2288,6 @@ static void process_csb(struct intel_engine_cs *engine)
/* Point active to the new ELSP; prevent overwriting */
WRITE_ONCE(execlists->active, execlists->pending);
- set_timeslice(engine);
if (!inject_preempt_hang(execlists))
ring_set_paused(engine, 0);
@@ -2217,6 +2328,7 @@ static void process_csb(struct intel_engine_cs *engine)
} while (head != tail);
execlists->csb_head = head;
+ set_timeslice(engine);
/*
* Gen11 has proven to fail wrt global observation point between
@@ -2242,6 +2354,310 @@ static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
}
}
+static void __execlists_hold(struct i915_request *rq)
+{
+ LIST_HEAD(list);
+
+ do {
+ struct i915_dependency *p;
+
+ if (i915_request_is_active(rq))
+ __i915_request_unsubmit(rq);
+
+ RQ_TRACE(rq, "on hold\n");
+ clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+ list_move_tail(&rq->sched.link, &rq->engine->active.hold);
+ i915_request_set_hold(rq);
+
+ list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
+ struct i915_request *w =
+ container_of(p->waiter, typeof(*w), sched);
+
+ /* Leave semaphores spinning on the other engines */
+ if (w->engine != rq->engine)
+ continue;
+
+ if (!i915_request_is_ready(w))
+ continue;
+
+ if (i915_request_completed(w))
+ continue;
+
+ if (i915_request_on_hold(rq))
+ continue;
+
+ list_move_tail(&w->sched.link, &list);
+ }
+
+ rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+ } while (rq);
+}
+
+static bool execlists_hold(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ spin_lock_irq(&engine->active.lock);
+
+ if (i915_request_completed(rq)) { /* too late! */
+ rq = NULL;
+ goto unlock;
+ }
+
+ if (rq->engine != engine) { /* preempted virtual engine */
+ struct virtual_engine *ve = to_virtual_engine(rq->engine);
+
+ /*
+ * intel_context_inflight() is only protected by virtue
+ * of process_csb() being called only by the tasklet (or
+ * directly from inside reset while the tasklet is suspended).
+ * Assert that neither of those are allowed to run while we
+ * poke at the request queues.
+ */
+ GEM_BUG_ON(!reset_in_progress(&engine->execlists));
+
+ /*
+ * An unsubmitted request along a virtual engine will
+ * remain on the active (this) engine until we are able
+ * to process the context switch away (and so mark the
+ * context as no longer in flight). That cannot have happened
+ * yet, otherwise we would not be hanging!
+ */
+ spin_lock(&ve->base.active.lock);
+ GEM_BUG_ON(intel_context_inflight(rq->context) != engine);
+ GEM_BUG_ON(ve->request != rq);
+ ve->request = NULL;
+ spin_unlock(&ve->base.active.lock);
+ i915_request_put(rq);
+
+ rq->engine = engine;
+ }
+
+ /*
+ * Transfer this request onto the hold queue to prevent it
+ * being resumbitted to HW (and potentially completed) before we have
+ * released it. Since we may have already submitted following
+ * requests, we need to remove those as well.
+ */
+ GEM_BUG_ON(i915_request_on_hold(rq));
+ GEM_BUG_ON(rq->engine != engine);
+ __execlists_hold(rq);
+
+unlock:
+ spin_unlock_irq(&engine->active.lock);
+ return rq;
+}
+
+static bool hold_request(const struct i915_request *rq)
+{
+ struct i915_dependency *p;
+
+ /*
+ * If one of our ancestors is on hold, we must also be on hold,
+ * otherwise we will bypass it and execute before it.
+ */
+ list_for_each_entry(p, &rq->sched.signalers_list, signal_link) {
+ const struct i915_request *s =
+ container_of(p->signaler, typeof(*s), sched);
+
+ if (s->engine != rq->engine)
+ continue;
+
+ if (i915_request_on_hold(s))
+ return true;
+ }
+
+ return false;
+}
+
+static void __execlists_unhold(struct i915_request *rq)
+{
+ LIST_HEAD(list);
+
+ do {
+ struct i915_dependency *p;
+
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+ GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
+
+ i915_request_clear_hold(rq);
+ list_move_tail(&rq->sched.link,
+ i915_sched_lookup_priolist(rq->engine,
+ rq_prio(rq)));
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+ RQ_TRACE(rq, "hold release\n");
+
+ /* Also release any children on this engine that are ready */
+ list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
+ struct i915_request *w =
+ container_of(p->waiter, typeof(*w), sched);
+
+ if (w->engine != rq->engine)
+ continue;
+
+ if (!i915_request_on_hold(rq))
+ continue;
+
+ /* Check that no other parents are also on hold */
+ if (hold_request(rq))
+ continue;
+
+ list_move_tail(&w->sched.link, &list);
+ }
+
+ rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+ } while (rq);
+}
+
+static void execlists_unhold(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ spin_lock_irq(&engine->active.lock);
+
+ /*
+ * Move this request back to the priority queue, and all of its
+ * children and grandchildren that were suspended along with it.
+ */
+ __execlists_unhold(rq);
+
+ if (rq_prio(rq) > engine->execlists.queue_priority_hint) {
+ engine->execlists.queue_priority_hint = rq_prio(rq);
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+ }
+
+ spin_unlock_irq(&engine->active.lock);
+}
+
+struct execlists_capture {
+ struct work_struct work;
+ struct i915_request *rq;
+ struct i915_gpu_coredump *error;
+};
+
+static void execlists_capture_work(struct work_struct *work)
+{
+ struct execlists_capture *cap = container_of(work, typeof(*cap), work);
+ const gfp_t gfp = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
+ struct intel_engine_cs *engine = cap->rq->engine;
+ struct intel_gt_coredump *gt = cap->error->gt;
+ struct intel_engine_capture_vma *vma;
+
+ /* Compress all the objects attached to the request, slow! */
+ vma = intel_engine_coredump_add_request(gt->engine, cap->rq, gfp);
+ if (vma) {
+ struct i915_vma_compress *compress =
+ i915_vma_capture_prepare(gt);
+
+ intel_engine_coredump_add_vma(gt->engine, vma, compress);
+ i915_vma_capture_finish(gt, compress);
+ }
+
+ gt->simulated = gt->engine->simulated;
+ cap->error->simulated = gt->simulated;
+
+ /* Publish the error state, and announce it to the world */
+ i915_error_state_store(cap->error);
+ i915_gpu_coredump_put(cap->error);
+
+ /* Return this request and all that depend upon it for signaling */
+ execlists_unhold(engine, cap->rq);
+ i915_request_put(cap->rq);
+
+ kfree(cap);
+}
+
+static struct execlists_capture *capture_regs(struct intel_engine_cs *engine)
+{
+ const gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
+ struct execlists_capture *cap;
+
+ cap = kmalloc(sizeof(*cap), gfp);
+ if (!cap)
+ return NULL;
+
+ cap->error = i915_gpu_coredump_alloc(engine->i915, gfp);
+ if (!cap->error)
+ goto err_cap;
+
+ cap->error->gt = intel_gt_coredump_alloc(engine->gt, gfp);
+ if (!cap->error->gt)
+ goto err_gpu;
+
+ cap->error->gt->engine = intel_engine_coredump_alloc(engine, gfp);
+ if (!cap->error->gt->engine)
+ goto err_gt;
+
+ return cap;
+
+err_gt:
+ kfree(cap->error->gt);
+err_gpu:
+ kfree(cap->error);
+err_cap:
+ kfree(cap);
+ return NULL;
+}
+
+static bool execlists_capture(struct intel_engine_cs *engine)
+{
+ struct execlists_capture *cap;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR))
+ return true;
+
+ /*
+ * We need to _quickly_ capture the engine state before we reset.
+ * We are inside an atomic section (softirq) here and we are delaying
+ * the forced preemption event.
+ */
+ cap = capture_regs(engine);
+ if (!cap)
+ return true;
+
+ cap->rq = execlists_active(&engine->execlists);
+ GEM_BUG_ON(!cap->rq);
+
+ rcu_read_lock();
+ cap->rq = active_request(cap->rq->context->timeline, cap->rq);
+ cap->rq = i915_request_get_rcu(cap->rq);
+ rcu_read_unlock();
+ if (!cap->rq)
+ goto err_free;
+
+ /*
+ * Remove the request from the execlists queue, and take ownership
+ * of the request. We pass it to our worker who will _slowly_ compress
+ * all the pages the _user_ requested for debugging their batch, after
+ * which we return it to the queue for signaling.
+ *
+ * By removing them from the execlists queue, we also remove the
+ * requests from being processed by __unwind_incomplete_requests()
+ * during the intel_engine_reset(), and so they will *not* be replayed
+ * afterwards.
+ *
+ * Note that because we have not yet reset the engine at this point,
+ * it is possible for the request that we have identified as being
+ * guilty, did in fact complete and we will then hit an arbitration
+ * point allowing the outstanding preemption to succeed. The likelihood
+ * of that is very low (as capturing of the engine registers should be
+ * fast enough to run inside an irq-off atomic section!), so we will
+ * simply hold that request accountable for being non-preemptible
+ * long enough to force the reset.
+ */
+ if (!execlists_hold(engine, cap->rq))
+ goto err_rq;
+
+ INIT_WORK(&cap->work, execlists_capture_work);
+ schedule_work(&cap->work);
+ return true;
+
+err_rq:
+ i915_request_put(cap->rq);
+err_free:
+ i915_gpu_coredump_put(cap->error);
+ kfree(cap);
+ return false;
+}
+
static noinline void preempt_reset(struct intel_engine_cs *engine)
{
const unsigned int bit = I915_RESET_ENGINE + engine->id;
@@ -2259,7 +2675,12 @@ static noinline void preempt_reset(struct intel_engine_cs *engine)
ENGINE_TRACE(engine, "preempt timeout %lu+%ums\n",
READ_ONCE(engine->props.preempt_timeout_ms),
jiffies_to_msecs(jiffies - engine->execlists.preempt.expires));
- intel_engine_reset(engine, "preemption time out");
+
+ ring_set_paused(engine, 1); /* Freeze the current request in place */
+ if (execlists_capture(engine))
+ intel_engine_reset(engine, "preemption time out");
+ else
+ ring_set_paused(engine, 0);
tasklet_enable(&engine->execlists.tasklet);
clear_and_wake_up_bit(bit, lock);
@@ -2321,11 +2742,12 @@ static void execlists_preempt(struct timer_list *timer)
}
static void queue_request(struct intel_engine_cs *engine,
- struct i915_sched_node *node,
- int prio)
+ struct i915_request *rq)
{
- GEM_BUG_ON(!list_empty(&node->link));
- list_add_tail(&node->link, i915_sched_lookup_priolist(engine, prio));
+ GEM_BUG_ON(!list_empty(&rq->sched.link));
+ list_add_tail(&rq->sched.link,
+ i915_sched_lookup_priolist(engine, rq_prio(rq)));
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
}
static void __submit_queue_imm(struct intel_engine_cs *engine)
@@ -2353,6 +2775,13 @@ static void submit_queue(struct intel_engine_cs *engine,
__submit_queue_imm(engine);
}
+static bool ancestor_on_hold(const struct intel_engine_cs *engine,
+ const struct i915_request *rq)
+{
+ GEM_BUG_ON(i915_request_on_hold(rq));
+ return !list_empty(&engine->active.hold) && hold_request(rq);
+}
+
static void execlists_submit_request(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
@@ -2361,12 +2790,17 @@ static void execlists_submit_request(struct i915_request *request)
/* Will be called from irq-context when using foreign fences. */
spin_lock_irqsave(&engine->active.lock, flags);
- queue_request(engine, &request->sched, rq_prio(request));
+ if (unlikely(ancestor_on_hold(engine, request))) {
+ list_add_tail(&request->sched.link, &engine->active.hold);
+ i915_request_set_hold(request);
+ } else {
+ queue_request(engine, request);
- GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
- GEM_BUG_ON(list_empty(&request->sched.link));
+ GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
+ GEM_BUG_ON(list_empty(&request->sched.link));
- submit_queue(engine, request);
+ submit_queue(engine, request);
+ }
spin_unlock_irqrestore(&engine->active.lock, flags);
}
@@ -2399,7 +2833,7 @@ set_redzone(void *vaddr, const struct intel_engine_cs *engine)
vaddr += engine->context_size;
- memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE);
+ memset(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE);
}
static void
@@ -2410,7 +2844,7 @@ check_redzone(const void *vaddr, const struct intel_engine_cs *engine)
vaddr += engine->context_size;
- if (memchr_inv(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE))
+ if (memchr_inv(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE))
dev_err_once(engine->i915->drm.dev,
"%s context redzone overwritten!\n",
engine->name);
@@ -2422,7 +2856,6 @@ static void execlists_context_unpin(struct intel_context *ce)
ce->engine);
i915_gem_object_unpin_map(ce->state->obj);
- intel_ring_reset(ce->ring, ce->ring->tail);
}
static void
@@ -2453,33 +2886,21 @@ __execlists_context_pin(struct intel_context *ce,
struct intel_engine_cs *engine)
{
void *vaddr;
- int ret;
GEM_BUG_ON(!ce->state);
-
- ret = intel_context_active_acquire(ce);
- if (ret)
- goto err;
GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
vaddr = i915_gem_object_pin_map(ce->state->obj,
i915_coherent_map_type(engine->i915) |
I915_MAP_OVERRIDE);
- if (IS_ERR(vaddr)) {
- ret = PTR_ERR(vaddr);
- goto unpin_active;
- }
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
- ce->lrc_desc = lrc_descriptor(ce, engine);
+ ce->lrc_desc = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE;
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
__execlists_update_reg_state(ce, engine);
return 0;
-
-unpin_active:
- intel_context_active_release(ce);
-err:
- return ret;
}
static int execlists_context_pin(struct intel_context *ce)
@@ -2494,6 +2915,9 @@ static int execlists_context_alloc(struct intel_context *ce)
static void execlists_context_reset(struct intel_context *ce)
{
+ CE_TRACE(ce, "reset\n");
+ GEM_BUG_ON(!intel_context_is_pinned(ce));
+
/*
* Because we emit WA_TAIL_DWORDS there may be a disparity
* between our bookkeeping in ce->ring->head and ce->ring->tail and
@@ -2510,8 +2934,14 @@ static void execlists_context_reset(struct intel_context *ce)
* So to avoid that we reset the context images upon resume. For
* simplicity, we just zero everything out.
*/
- intel_ring_reset(ce->ring, 0);
+ intel_ring_reset(ce->ring, ce->ring->emit);
+
+ /* Scrub away the garbage */
+ execlists_init_reg_state(ce->lrc_reg_state,
+ ce, ce->engine, ce->ring, true);
__execlists_update_reg_state(ce, ce->engine);
+
+ ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
}
static const struct intel_context_ops execlists_context_ops = {
@@ -2730,6 +3160,14 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
batch = gen8_emit_flush_coherentl3_wa(engine, batch);
+ /* WaClearSlmSpaceAtContextSwitch:skl,bxt,kbl,glk,cfl */
+ batch = gen8_emit_pipe_control(batch,
+ PIPE_CONTROL_FLUSH_L3 |
+ PIPE_CONTROL_STORE_DATA_INDEX |
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE,
+ LRC_PPHWSP_SCRATCH_ADDR);
+
batch = emit_lri(batch, lri, ARRAY_SIZE(lri));
/* WaMediaPoolStateCmdInWABB:bxt,glk */
@@ -2925,6 +3363,8 @@ static void enable_execlists(struct intel_engine_cs *engine)
RING_HWS_PGA,
i915_ggtt_offset(engine->status_page.vma));
ENGINE_POSTING_READ(engine, RING_HWS_PGA);
+
+ engine->context_tag = 0;
}
static bool unexpected_starting_state(struct intel_engine_cs *engine)
@@ -3030,10 +3470,8 @@ static void reset_csb_pointers(struct intel_engine_cs *engine)
&execlists->csb_status[reset_value]);
}
-static void __execlists_reset_reg_state(const struct intel_context *ce,
- const struct intel_engine_cs *engine)
+static void __reset_stop_ring(u32 *regs, const struct intel_engine_cs *engine)
{
- u32 *regs = ce->lrc_reg_state;
int x;
x = lrc_ring_mi_mode(engine);
@@ -3043,6 +3481,14 @@ static void __execlists_reset_reg_state(const struct intel_context *ce,
}
}
+static void __execlists_reset_reg_state(const struct intel_context *ce,
+ const struct intel_engine_cs *engine)
+{
+ u32 *regs = ce->lrc_reg_state;
+
+ __reset_stop_ring(regs, engine);
+}
+
static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -3203,6 +3649,10 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
i915_priolist_free(p);
}
+ /* On-hold requests will be flushed to timeline upon their release */
+ list_for_each_entry(rq, &engine->active.hold, sched.link)
+ mark_eio(rq);
+
/* Cancel all attached virtual engines */
while ((rb = rb_first_cached(&execlists->virtual))) {
struct virtual_engine *ve =
@@ -3795,7 +4245,6 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
{
/* Default vfuncs which can be overriden by each engine. */
- engine->release = execlists_release;
engine->resume = execlists_resume;
engine->cops = &execlists_context_ops;
@@ -3910,6 +4359,9 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
reset_csb_pointers(engine);
+ /* Finally, take ownership and responsibility for cleanup! */
+ engine->release = execlists_release;
+
return 0;
}
@@ -3949,18 +4401,21 @@ static u32 intel_lr_indirect_ctx_offset(const struct intel_engine_cs *engine)
static void init_common_reg_state(u32 * const regs,
const struct intel_engine_cs *engine,
- const struct intel_ring *ring)
+ const struct intel_ring *ring,
+ bool inhibit)
{
- regs[CTX_CONTEXT_CONTROL] =
- _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) |
- _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH);
+ u32 ctl;
+
+ ctl = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH);
+ ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+ if (inhibit)
+ ctl |= CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT;
if (INTEL_GEN(engine->i915) < 11)
- regs[CTX_CONTEXT_CONTROL] |=
- _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
- CTX_CTRL_RS_CTX_ENABLE);
+ ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
+ CTX_CTRL_RS_CTX_ENABLE);
+ regs[CTX_CONTEXT_CONTROL] = ctl;
regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
- regs[CTX_BB_STATE] = RING_BB_PPGTT;
}
static void init_wa_bb_reg_state(u32 * const regs,
@@ -4016,7 +4471,7 @@ static void execlists_init_reg_state(u32 *regs,
const struct intel_context *ce,
const struct intel_engine_cs *engine,
const struct intel_ring *ring,
- bool close)
+ bool inhibit)
{
/*
* A context is actually a big batch buffer with several
@@ -4028,21 +4483,17 @@ static void execlists_init_reg_state(u32 *regs,
*
* Must keep consistent with virtual_update_register_offsets().
*/
- u32 *bbe = set_offsets(regs, reg_offsets(engine), engine);
+ set_offsets(regs, reg_offsets(engine), engine, inhibit);
- if (close) { /* Close the batch; used mainly by live_lrc_layout() */
- *bbe = MI_BATCH_BUFFER_END;
- if (INTEL_GEN(engine->i915) >= 10)
- *bbe |= BIT(0);
- }
-
- init_common_reg_state(regs, engine, ring);
+ init_common_reg_state(regs, engine, ring, inhibit);
init_ppgtt_reg_state(regs, vm_alias(ce->vm));
init_wa_bb_reg_state(regs, engine,
INTEL_GEN(engine->i915) >= 12 ?
GEN12_CTX_BB_PER_CTX_PTR :
CTX_BB_PER_CTX_PTR);
+
+ __reset_stop_ring(regs, engine);
}
static int
@@ -4053,7 +4504,6 @@ populate_lr_context(struct intel_context *ce,
{
bool inhibit = true;
void *vaddr;
- u32 *regs;
int ret;
vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
@@ -4083,11 +4533,8 @@ populate_lr_context(struct intel_context *ce,
/* The second page of the context object contains some fields which must
* be set up prior to the first execution. */
- regs = vaddr + LRC_STATE_PN * PAGE_SIZE;
- execlists_init_reg_state(regs, ce, engine, ring, inhibit);
- if (inhibit)
- regs[CTX_CONTEXT_CONTROL] |=
- _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+ execlists_init_reg_state(vaddr + LRC_STATE_PN * PAGE_SIZE,
+ ce, engine, ring, inhibit);
ret = 0;
err_unpin_ctx:
@@ -4481,9 +4928,11 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
ve->base.gt = siblings[0]->gt;
ve->base.uncore = siblings[0]->uncore;
ve->base.id = -1;
+
ve->base.class = OTHER_CLASS;
ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
+ ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
/*
* The decision on whether to submit a request using semaphores
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 893249ea48d4..eeef90b55c64 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -127,7 +127,7 @@ struct drm_i915_mocs_table {
LE_0_PAGETABLE | LE_TC_2_LLC_ELLC | LE_LRUM(3), \
L3_3_WB)
-static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
+static const struct drm_i915_mocs_entry skl_mocs_table[] = {
GEN9_MOCS_ENTRIES,
MOCS_ENTRY(I915_MOCS_CACHED,
LE_3_WB | LE_TC_2_LLC_ELLC | LE_LRUM(3),
@@ -233,7 +233,7 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \
L3_1_UC)
-static const struct drm_i915_mocs_entry tigerlake_mocs_table[] = {
+static const struct drm_i915_mocs_entry tgl_mocs_table[] = {
/* Base - Error (Reserved for Non-Use) */
MOCS_ENTRY(0, 0x0, 0x0),
/* Base - Reserved */
@@ -267,7 +267,7 @@ static const struct drm_i915_mocs_entry tigerlake_mocs_table[] = {
L3_3_WB),
};
-static const struct drm_i915_mocs_entry icelake_mocs_table[] = {
+static const struct drm_i915_mocs_entry icl_mocs_table[] = {
/* Base - Uncached (Deprecated) */
MOCS_ENTRY(I915_MOCS_UNCACHED,
LE_1_UC | LE_TC_1_LLC,
@@ -284,17 +284,17 @@ static bool get_mocs_settings(const struct drm_i915_private *i915,
struct drm_i915_mocs_table *table)
{
if (INTEL_GEN(i915) >= 12) {
- table->size = ARRAY_SIZE(tigerlake_mocs_table);
- table->table = tigerlake_mocs_table;
+ table->size = ARRAY_SIZE(tgl_mocs_table);
+ table->table = tgl_mocs_table;
table->n_entries = GEN11_NUM_MOCS_ENTRIES;
} else if (IS_GEN(i915, 11)) {
- table->size = ARRAY_SIZE(icelake_mocs_table);
- table->table = icelake_mocs_table;
+ table->size = ARRAY_SIZE(icl_mocs_table);
+ table->table = icl_mocs_table;
table->n_entries = GEN11_NUM_MOCS_ENTRIES;
} else if (IS_GEN9_BC(i915) || IS_CANNONLAKE(i915)) {
- table->size = ARRAY_SIZE(skylake_mocs_table);
+ table->size = ARRAY_SIZE(skl_mocs_table);
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
- table->table = skylake_mocs_table;
+ table->table = skl_mocs_table;
} else if (IS_GEN9_LP(i915)) {
table->size = ARRAY_SIZE(broxton_mocs_table);
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
new file mode 100644
index 000000000000..f86f7e68ce5e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/slab.h>
+
+#include "i915_trace.h"
+#include "intel_gtt.h"
+#include "gen6_ppgtt.h"
+#include "gen8_ppgtt.h"
+
+struct i915_page_table *alloc_pt(struct i915_address_space *vm)
+{
+ struct i915_page_table *pt;
+
+ pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
+ if (unlikely(!pt))
+ return ERR_PTR(-ENOMEM);
+
+ if (unlikely(setup_page_dma(vm, &pt->base))) {
+ kfree(pt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ atomic_set(&pt->used, 0);
+ return pt;
+}
+
+struct i915_page_directory *__alloc_pd(size_t sz)
+{
+ struct i915_page_directory *pd;
+
+ pd = kzalloc(sz, I915_GFP_ALLOW_FAIL);
+ if (unlikely(!pd))
+ return NULL;
+
+ spin_lock_init(&pd->lock);
+ return pd;
+}
+
+struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
+{
+ struct i915_page_directory *pd;
+
+ pd = __alloc_pd(sizeof(*pd));
+ if (unlikely(!pd))
+ return ERR_PTR(-ENOMEM);
+
+ if (unlikely(setup_page_dma(vm, px_base(pd)))) {
+ kfree(pd);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return pd;
+}
+
+void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd)
+{
+ cleanup_page_dma(vm, pd);
+ kfree(pd);
+}
+
+static inline void
+write_dma_entry(struct i915_page_dma * const pdma,
+ const unsigned short idx,
+ const u64 encoded_entry)
+{
+ u64 * const vaddr = kmap_atomic(pdma->page);
+
+ vaddr[idx] = encoded_entry;
+ kunmap_atomic(vaddr);
+}
+
+void
+__set_pd_entry(struct i915_page_directory * const pd,
+ const unsigned short idx,
+ struct i915_page_dma * const to,
+ u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
+{
+ /* Each thread pre-pins the pd, and we may have a thread per pde. */
+ GEM_BUG_ON(atomic_read(px_used(pd)) > NALLOC * ARRAY_SIZE(pd->entry));
+
+ atomic_inc(px_used(pd));
+ pd->entry[idx] = to;
+ write_dma_entry(px_base(pd), idx, encode(to->daddr, I915_CACHE_LLC));
+}
+
+void
+clear_pd_entry(struct i915_page_directory * const pd,
+ const unsigned short idx,
+ const struct i915_page_scratch * const scratch)
+{
+ GEM_BUG_ON(atomic_read(px_used(pd)) == 0);
+
+ write_dma_entry(px_base(pd), idx, scratch->encode);
+ pd->entry[idx] = NULL;
+ atomic_dec(px_used(pd));
+}
+
+bool
+release_pd_entry(struct i915_page_directory * const pd,
+ const unsigned short idx,
+ struct i915_page_table * const pt,
+ const struct i915_page_scratch * const scratch)
+{
+ bool free = false;
+
+ if (atomic_add_unless(&pt->used, -1, 1))
+ return false;
+
+ spin_lock(&pd->lock);
+ if (atomic_dec_and_test(&pt->used)) {
+ clear_pd_entry(pd, idx, scratch);
+ free = true;
+ }
+ spin_unlock(&pd->lock);
+
+ return free;
+}
+
+int i915_ppgtt_init_hw(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+
+ gtt_write_workarounds(gt);
+
+ if (IS_GEN(i915, 6))
+ gen6_ppgtt_enable(gt);
+ else if (IS_GEN(i915, 7))
+ gen7_ppgtt_enable(gt);
+
+ return 0;
+}
+
+static struct i915_ppgtt *
+__ppgtt_create(struct intel_gt *gt)
+{
+ if (INTEL_GEN(gt->i915) < 8)
+ return gen6_ppgtt_create(gt);
+ else
+ return gen8_ppgtt_create(gt);
+}
+
+struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt)
+{
+ struct i915_ppgtt *ppgtt;
+
+ ppgtt = __ppgtt_create(gt);
+ if (IS_ERR(ppgtt))
+ return ppgtt;
+
+ trace_i915_ppgtt_create(&ppgtt->vm);
+
+ return ppgtt;
+}
+
+static int ppgtt_bind_vma(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ u32 pte_flags;
+ int err;
+
+ if (flags & I915_VMA_ALLOC) {
+ err = vma->vm->allocate_va_range(vma->vm,
+ vma->node.start, vma->size);
+ if (err)
+ return err;
+
+ set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
+ }
+
+ /* Applicable to VLV, and gen8+ */
+ pte_flags = 0;
+ if (i915_gem_object_is_readonly(vma->obj))
+ pte_flags |= PTE_READ_ONLY;
+
+ GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)));
+ vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
+ wmb();
+
+ return 0;
+}
+
+static void ppgtt_unbind_vma(struct i915_vma *vma)
+{
+ if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)))
+ vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
+}
+
+int ppgtt_set_pages(struct i915_vma *vma)
+{
+ GEM_BUG_ON(vma->pages);
+
+ vma->pages = vma->obj->mm.pages;
+
+ vma->page_sizes = vma->obj->mm.page_sizes;
+
+ return 0;
+}
+
+void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+
+ ppgtt->vm.gt = gt;
+ ppgtt->vm.i915 = i915;
+ ppgtt->vm.dma = &i915->drm.pdev->dev;
+ ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
+
+ i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
+
+ ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
+ ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
+ ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
+ ppgtt->vm.vma_ops.clear_pages = clear_pages;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 1c51296646e0..beee0cf89bce 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -147,11 +147,7 @@ static void mark_innocent(struct i915_request *rq)
void __i915_request_reset(struct i915_request *rq, bool guilty)
{
- GEM_TRACE("%s rq=%llx:%lld, guilty? %s\n",
- rq->engine->name,
- rq->fence.context,
- rq->fence.seqno,
- yesno(guilty));
+ RQ_TRACE(rq, "guilty? %s\n", yesno(guilty));
GEM_BUG_ON(i915_request_completed(rq));
@@ -251,9 +247,8 @@ out:
return ret;
}
-static int ironlake_do_reset(struct intel_gt *gt,
- intel_engine_mask_t engine_mask,
- unsigned int retry)
+static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
+ unsigned int retry)
{
struct intel_uncore *uncore = gt->uncore;
int ret;
@@ -597,7 +592,7 @@ static reset_func intel_get_gpu_reset(const struct intel_gt *gt)
else if (INTEL_GEN(i915) >= 6)
return gen6_reset_engines;
else if (INTEL_GEN(i915) >= 5)
- return ironlake_do_reset;
+ return ilk_do_reset;
else if (IS_G4X(i915))
return g4x_do_reset;
else if (IS_G33(i915) || IS_PINEVIEW(i915))
@@ -625,7 +620,7 @@ int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
*/
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
- GEM_TRACE("engine_mask=%x\n", engine_mask);
+ GT_TRACE(gt, "engine_mask=%x\n", engine_mask);
preempt_disable();
ret = reset(gt, engine_mask, retry);
preempt_enable();
@@ -785,8 +780,7 @@ static void nop_submit_request(struct i915_request *request)
struct intel_engine_cs *engine = request->engine;
unsigned long flags;
- GEM_TRACE("%s fence %llx:%lld -> -EIO\n",
- engine->name, request->fence.context, request->fence.seqno);
+ RQ_TRACE(request, "-EIO\n");
dma_fence_set_error(&request->fence, -EIO);
spin_lock_irqsave(&engine->active.lock, flags);
@@ -813,7 +807,7 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
intel_engine_dump(engine, &p, "%s\n", engine->name);
}
- GEM_TRACE("start\n");
+ GT_TRACE(gt, "start\n");
/*
* First, stop submission to hw, but do not yet complete requests by
@@ -844,7 +838,7 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
reset_finish(gt, awake);
- GEM_TRACE("end\n");
+ GT_TRACE(gt, "end\n");
}
void intel_gt_set_wedged(struct intel_gt *gt)
@@ -870,7 +864,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
if (test_bit(I915_WEDGED_ON_INIT, &gt->reset.flags))
return false;
- GEM_TRACE("start\n");
+ GT_TRACE(gt, "start\n");
/*
* Before unwedging, make sure that all pending operations
@@ -932,7 +926,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
*/
intel_engines_reset_default_submission(gt);
- GEM_TRACE("end\n");
+ GT_TRACE(gt, "end\n");
smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
clear_bit(I915_WEDGED, &gt->reset.flags);
@@ -1007,7 +1001,7 @@ void intel_gt_reset(struct intel_gt *gt,
intel_engine_mask_t awake;
int ret;
- GEM_TRACE("flags=%lx\n", gt->reset.flags);
+ GT_TRACE(gt, "flags=%lx\n", gt->reset.flags);
might_sleep();
GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
@@ -1236,7 +1230,7 @@ void intel_gt_handle_error(struct intel_gt *gt,
engine_mask &= INTEL_INFO(gt->i915)->engine_mask;
if (flags & I915_ERROR_CAPTURE) {
- i915_capture_error_state(gt->i915, engine_mask, msg);
+ i915_capture_error_state(gt->i915);
intel_gt_clear_error_registers(gt, engine_mask);
}
@@ -1329,10 +1323,10 @@ int intel_gt_terminally_wedged(struct intel_gt *gt)
if (!intel_gt_is_wedged(gt))
return 0;
- /* Reset still in progress? Maybe we will recover? */
- if (!test_bit(I915_RESET_BACKOFF, &gt->reset.flags))
+ if (intel_gt_has_init_error(gt))
return -EIO;
+ /* Reset still in progress? Maybe we will recover? */
if (wait_event_interruptible(gt->reset.queue,
!test_bit(I915_RESET_BACKOFF,
&gt->reset.flags)))
@@ -1354,6 +1348,9 @@ void intel_gt_init_reset(struct intel_gt *gt)
init_waitqueue_head(&gt->reset.queue);
mutex_init(&gt->reset.mutex);
init_srcu_struct(&gt->reset.backoff_srcu);
+
+ /* no GPU until we are ready! */
+ __set_bit(I915_WEDGED, &gt->reset.flags);
}
void intel_gt_fini_reset(struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 81f872f9ef03..bc44fe8e5ffa 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -33,6 +33,7 @@
#include "gem/i915_gem_context.h"
+#include "gen6_ppgtt.h"
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_context.h"
@@ -1328,26 +1329,12 @@ static int ring_context_alloc(struct intel_context *ce)
static int ring_context_pin(struct intel_context *ce)
{
- int err;
-
- err = intel_context_active_acquire(ce);
- if (err)
- return err;
-
- err = __context_pin_ppgtt(ce);
- if (err)
- goto err_active;
-
- return 0;
-
-err_active:
- intel_context_active_release(ce);
- return err;
+ return __context_pin_ppgtt(ce);
}
static void ring_context_reset(struct intel_context *ce)
{
- intel_ring_reset(ce->ring, 0);
+ intel_ring_reset(ce->ring, ce->ring->emit);
}
static const struct intel_context_ops ring_context_ops = {
@@ -1394,7 +1381,7 @@ static int load_pd_dir(struct i915_request *rq,
intel_ring_advance(rq, cs);
- return 0;
+ return rq->engine->emit_flush(rq, EMIT_FLUSH);
}
static inline int mi_set_context(struct i915_request *rq, u32 flags)
@@ -1408,14 +1395,6 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
int len;
u32 *cs;
- flags |= MI_MM_SPACE_GTT;
- if (IS_HASWELL(i915))
- /* These flags are for resource streamer on HSW+ */
- flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN;
- else
- /* We need to save the extended state for powersaving modes */
- flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN;
-
len = 4;
if (IS_GEN(i915, 7))
len += 2 + (num_engines ? 4 * num_engines + 6 : 0);
@@ -1592,7 +1571,7 @@ static int switch_mm(struct i915_request *rq, struct i915_address_space *vm)
if (ret)
return ret;
- return rq->engine->emit_flush(rq, EMIT_FLUSH);
+ return rq->engine->emit_flush(rq, EMIT_INVALIDATE);
}
static int switch_context(struct i915_request *rq)
@@ -1607,15 +1586,21 @@ static int switch_context(struct i915_request *rq)
return ret;
if (ce->state) {
- u32 hw_flags;
+ u32 flags;
GEM_BUG_ON(rq->engine->id != RCS0);
- hw_flags = 0;
- if (!test_bit(CONTEXT_VALID_BIT, &ce->flags))
- hw_flags = MI_RESTORE_INHIBIT;
+ /* For resource streamer on HSW+ and power context elsewhere */
+ BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN);
+ BUILD_BUG_ON(HSW_MI_RS_RESTORE_STATE_EN != MI_RESTORE_EXT_STATE_EN);
+
+ flags = MI_SAVE_EXT_STATE_EN | MI_MM_SPACE_GTT;
+ if (test_bit(CONTEXT_VALID_BIT, &ce->flags))
+ flags |= MI_RESTORE_EXT_STATE_EN;
+ else
+ flags |= MI_RESTORE_INHIBIT;
- ret = mi_set_context(rq, hw_flags);
+ ret = mi_set_context(rq, flags);
if (ret)
return ret;
}
@@ -1842,8 +1827,6 @@ static void setup_common(struct intel_engine_cs *engine)
setup_irq(engine);
- engine->release = ring_release;
-
engine->resume = xcs_resume;
engine->reset.prepare = reset_prepare;
engine->reset.rewind = reset_rewind;
@@ -2009,6 +1992,9 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
+ /* Finally, take ownership and responsibility for cleanup! */
+ engine->release = ring_release;
+
return 0;
err_ring:
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index f232036c3c7a..d2a3d935d186 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -777,7 +777,7 @@ void intel_rps_boost(struct i915_request *rq)
spin_lock_irqsave(&rq->lock, flags);
if (!i915_request_has_waitboost(rq) &&
!dma_fence_is_signaled_locked(&rq->fence)) {
- rq->flags |= I915_REQUEST_WAITBOOST;
+ set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
if (!atomic_fetch_inc(&rps->num_waiters) &&
READ_ONCE(rps->cur_freq) < rps->boost_freq)
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index ee5dc4fbdeb9..87716529cd2f 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -348,7 +348,6 @@ void intel_timeline_enter(struct intel_timeline *tl)
* use atomic to manipulate tl->active_count.
*/
lockdep_assert_held(&tl->mutex);
- GEM_BUG_ON(!atomic_read(&tl->pin_count));
if (atomic_add_unless(&tl->active_count, 1, 0))
return;
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 195ccf7db272..4e292d4bf7b9 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -254,7 +254,7 @@ static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
/* WaDisableDopClockGating:bdw
*
- * Also see the related UCGTCL1 write in broadwell_init_clock_gating()
+ * Also see the related UCGTCL1 write in bdw_init_clock_gating()
* to disable EUTC clock gating.
*/
WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 4e1eafa94be9..f2806381733f 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -59,11 +59,26 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
ring->vaddr = (void *)(ring + 1);
atomic_set(&ring->pin_count, 1);
+ ring->vma = i915_vma_alloc();
+ if (!ring->vma) {
+ kfree(ring);
+ return NULL;
+ }
+ i915_active_init(&ring->vma->active, NULL, NULL);
+
intel_ring_update_space(ring);
return ring;
}
+static void mock_ring_free(struct intel_ring *ring)
+{
+ i915_active_fini(&ring->vma->active);
+ i915_vma_free(ring->vma);
+
+ kfree(ring);
+}
+
static struct i915_request *first_request(struct mock_engine *engine)
{
return list_first_entry_or_null(&engine->hw_queue,
@@ -121,7 +136,7 @@ static void mock_context_destroy(struct kref *ref)
GEM_BUG_ON(intel_context_is_pinned(ce));
if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
- kfree(ce->ring);
+ mock_ring_free(ce->ring);
mock_timeline_unpin(ce->timeline);
}
@@ -149,7 +164,11 @@ static int mock_context_alloc(struct intel_context *ce)
static int mock_context_pin(struct intel_context *ce)
{
- return intel_context_active_acquire(ce);
+ return 0;
+}
+
+static void mock_context_reset(struct intel_context *ce)
+{
}
static const struct intel_context_ops mock_context_ops = {
@@ -161,6 +180,7 @@ static const struct intel_context_ops mock_context_ops = {
.enter = intel_context_enter_engine,
.exit = intel_context_exit_engine,
+ .reset = mock_context_reset,
.destroy = mock_context_destroy,
};
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 5dbda2a74272..3e5e6c86e843 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -1312,7 +1312,7 @@ static int igt_reset_evict_ppgtt(void *arg)
if (INTEL_PPGTT(gt->i915) < INTEL_PPGTT_FULL)
return 0;
- ppgtt = i915_ppgtt_create(gt->i915);
+ ppgtt = i915_ppgtt_create(gt);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
@@ -1498,7 +1498,7 @@ static int igt_handle_error(void *arg)
struct intel_engine_cs *engine = gt->engine[RCS0];
struct hang h;
struct i915_request *rq;
- struct i915_gpu_state *error;
+ struct i915_gpu_coredump *error;
int err;
/* Check that we can issue a global GPU and engine reset */
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 9ec9833c9c7b..65718ca2326e 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -285,6 +285,107 @@ static int live_unlite_preempt(void *arg)
return live_unlite_restore(arg, I915_USER_PRIORITY(I915_PRIORITY_MAX));
}
+static int live_hold_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ /*
+ * In order to support offline error capture for fast preempt reset,
+ * we need to decouple the guilty request and ensure that it and its
+ * descendents are not executed while the capture is in progress.
+ */
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ unsigned long heartbeat;
+ struct i915_request *rq;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ engine_heartbeat_disable(engine, &heartbeat);
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ goto out;
+ }
+
+ /* We have our request executing, now remove it and reset */
+
+ if (test_and_set_bit(I915_RESET_ENGINE + id,
+ &gt->reset.flags)) {
+ intel_gt_set_wedged(gt);
+ err = -EBUSY;
+ goto out;
+ }
+ tasklet_disable(&engine->execlists.tasklet);
+
+ engine->execlists.tasklet.func(engine->execlists.tasklet.data);
+ GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
+
+ i915_request_get(rq);
+ execlists_hold(engine, rq);
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ intel_engine_reset(engine, NULL);
+ GEM_BUG_ON(rq->fence.error != -EIO);
+
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(I915_RESET_ENGINE + id,
+ &gt->reset.flags);
+
+ /* Check that we do not resubmit the held request */
+ if (!i915_request_wait(rq, 0, HZ / 5)) {
+ pr_err("%s: on hold request completed!\n",
+ engine->name);
+ i915_request_put(rq);
+ err = -EIO;
+ goto out;
+ }
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ /* But is resubmitted on release */
+ execlists_unhold(engine, rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ pr_err("%s: held request did not complete!\n",
+ engine->name);
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ }
+ i915_request_put(rq);
+
+out:
+ engine_heartbeat_enable(engine, heartbeat);
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
static int
emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
{
@@ -527,13 +628,19 @@ static struct i915_request *nop_request(struct intel_engine_cs *engine)
return rq;
}
-static void wait_for_submit(struct intel_engine_cs *engine,
- struct i915_request *rq)
+static int wait_for_submit(struct intel_engine_cs *engine,
+ struct i915_request *rq,
+ unsigned long timeout)
{
+ timeout += jiffies;
do {
cond_resched();
intel_engine_flush_submission(engine);
- } while (!i915_request_is_active(rq));
+ if (i915_request_is_active(rq))
+ return 0;
+ } while (time_before(jiffies, timeout));
+
+ return -ETIME;
}
static long timeslice_threshold(const struct intel_engine_cs *engine)
@@ -601,7 +708,12 @@ static int live_timeslice_queue(void *arg)
goto err_heartbeat;
}
engine->schedule(rq, &attr);
- wait_for_submit(engine, rq);
+ err = wait_for_submit(engine, rq, HZ / 2);
+ if (err) {
+ pr_err("%s: Timed out trying to submit semaphores\n",
+ engine->name);
+ goto err_rq;
+ }
/* ELSP[1]: nop request */
nop = nop_request(engine);
@@ -609,8 +721,13 @@ static int live_timeslice_queue(void *arg)
err = PTR_ERR(nop);
goto err_rq;
}
- wait_for_submit(engine, nop);
+ err = wait_for_submit(engine, nop, HZ / 2);
i915_request_put(nop);
+ if (err) {
+ pr_err("%s: Timed out trying to submit nop\n",
+ engine->name);
+ goto err_rq;
+ }
GEM_BUG_ON(i915_request_completed(rq));
GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
@@ -1137,7 +1254,7 @@ static int live_nopreempt(void *arg)
}
/* Low priority client, but unpreemptable! */
- rq_a->flags |= I915_REQUEST_NOPREEMPT;
+ __set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq_a->fence.flags);
i915_request_add(rq_a);
if (!igt_wait_for_spinner(&a.spin, rq_a)) {
@@ -3293,12 +3410,168 @@ static int live_virtual_bond(void *arg)
return 0;
}
+static int reset_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ struct intel_engine_cs *engine;
+ struct intel_context *ve;
+ unsigned long *heartbeat;
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ unsigned int n;
+ int err = 0;
+
+ /*
+ * In order to support offline error capture for fast preempt reset,
+ * we need to decouple the guilty request and ensure that it and its
+ * descendents are not executed while the capture is in progress.
+ */
+
+ heartbeat = kmalloc_array(nsibling, sizeof(*heartbeat), GFP_KERNEL);
+ if (!heartbeat)
+ return -ENOMEM;
+
+ if (igt_spinner_init(&spin, gt)) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ ve = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ve)) {
+ err = PTR_ERR(ve);
+ goto out_spin;
+ }
+
+ for (n = 0; n < nsibling; n++)
+ engine_heartbeat_disable(siblings[n], &heartbeat[n]);
+
+ rq = igt_spinner_create_request(&spin, ve, MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_heartbeat;
+ }
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ goto out_heartbeat;
+ }
+
+ engine = rq->engine;
+ GEM_BUG_ON(engine == ve->engine);
+
+ /* Take ownership of the reset and tasklet */
+ if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
+ &gt->reset.flags)) {
+ intel_gt_set_wedged(gt);
+ err = -EBUSY;
+ goto out_heartbeat;
+ }
+ tasklet_disable(&engine->execlists.tasklet);
+
+ engine->execlists.tasklet.func(engine->execlists.tasklet.data);
+ GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
+
+ /* Fake a preemption event; failed of course */
+ spin_lock_irq(&engine->active.lock);
+ __unwind_incomplete_requests(engine);
+ spin_unlock_irq(&engine->active.lock);
+ GEM_BUG_ON(rq->engine != ve->engine);
+
+ /* Reset the engine while keeping our active request on hold */
+ execlists_hold(engine, rq);
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ intel_engine_reset(engine, NULL);
+ GEM_BUG_ON(rq->fence.error != -EIO);
+
+ /* Release our grasp on the engine, letting CS flow again */
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags);
+
+ /* Check that we do not resubmit the held request */
+ i915_request_get(rq);
+ if (!i915_request_wait(rq, 0, HZ / 5)) {
+ pr_err("%s: on hold request completed!\n",
+ engine->name);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto out_rq;
+ }
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ /* But is resubmitted on release */
+ execlists_unhold(engine, rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ pr_err("%s: held request did not complete!\n",
+ engine->name);
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ }
+
+out_rq:
+ i915_request_put(rq);
+out_heartbeat:
+ for (n = 0; n < nsibling; n++)
+ engine_heartbeat_enable(siblings[n], heartbeat[n]);
+
+ intel_context_put(ve);
+out_spin:
+ igt_spinner_fini(&spin);
+out_free:
+ kfree(heartbeat);
+ return err;
+}
+
+static int live_virtual_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class, inst;
+
+ /*
+ * Check that we handle a reset event within a virtual engine.
+ * Only the physical engine is reset, but we have to check the flow
+ * of the virtual requests around the reset, and make sure it is not
+ * forgotten.
+ */
+
+ if (USES_GUC_SUBMISSION(gt->i915))
+ return 0;
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ int nsibling, err;
+
+ nsibling = 0;
+ for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
+ if (!gt->engine_class[class][inst])
+ continue;
+
+ siblings[nsibling++] = gt->engine_class[class][inst];
+ }
+ if (nsibling < 2)
+ continue;
+
+ err = reset_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
int intel_execlists_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_sanitycheck),
SUBTEST(live_unlite_switch),
SUBTEST(live_unlite_preempt),
+ SUBTEST(live_hold_reset),
SUBTEST(live_timeslice_preempt),
SUBTEST(live_timeslice_queue),
SUBTEST(live_busywait_preempt),
@@ -3317,6 +3590,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_virtual_mask),
SUBTEST(live_virtual_preserved),
SUBTEST(live_virtual_bond),
+ SUBTEST(live_virtual_reset),
};
if (!HAS_EXECLISTS(i915))
@@ -3362,7 +3636,7 @@ static int live_lrc_layout(void *arg)
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- u32 *mem;
+ u32 *lrc;
int err;
/*
@@ -3370,13 +3644,13 @@ static int live_lrc_layout(void *arg)
* match the layout saved by HW.
*/
- mem = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!mem)
+ lrc = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!lrc)
return -ENOMEM;
err = 0;
for_each_engine(engine, gt, id) {
- u32 *hw, *lrc;
+ u32 *hw;
int dw;
if (!engine->default_state)
@@ -3390,8 +3664,7 @@ static int live_lrc_layout(void *arg)
}
hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
- lrc = memset(mem, 0, PAGE_SIZE);
- execlists_init_reg_state(lrc,
+ execlists_init_reg_state(memset(lrc, POISON_INUSE, PAGE_SIZE),
engine->kernel_context,
engine,
engine->kernel_context->ring,
@@ -3406,6 +3679,13 @@ static int live_lrc_layout(void *arg)
continue;
}
+ if (lrc[dw] == 0) {
+ pr_debug("%s: skipped instruction %x at dword %d\n",
+ engine->name, lri, dw);
+ dw++;
+ continue;
+ }
+
if ((lri & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
pr_err("%s: Expected LRI command at dword %d, found %08x\n",
engine->name, dw, lri);
@@ -3454,7 +3734,7 @@ static int live_lrc_layout(void *arg)
break;
}
- kfree(mem);
+ kfree(lrc);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/Makefile b/drivers/gpu/drm/i915/gt/uc/Makefile
deleted file mode 100644
index bec94d434cb6..000000000000
--- a/drivers/gpu/drm/i915/gt/uc/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-# For building individual subdir files on the command line
-subdir-ccflags-y += -I$(srctree)/$(src)/../..
-
-# Extra header tests
-header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 3ffc6267f96e..64934a876a50 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -12,6 +12,9 @@
#include "i915_drv.h"
+static const struct intel_uc_ops uc_ops_off;
+static const struct intel_uc_ops uc_ops_on;
+
/* Reset GuC providing us with fresh state for both GuC and HuC.
*/
static int __intel_uc_reset_hw(struct intel_uc *uc)
@@ -89,6 +92,11 @@ void intel_uc_init_early(struct intel_uc *uc)
intel_huc_init_early(&uc->huc);
__confirm_options(uc);
+
+ if (intel_uc_uses_guc(uc))
+ uc->ops = &uc_ops_on;
+ else
+ uc->ops = &uc_ops_off;
}
void intel_uc_driver_late_release(struct intel_uc *uc)
@@ -245,12 +253,11 @@ static void guc_disable_communication(struct intel_guc *guc)
DRM_INFO("GuC communication disabled\n");
}
-void intel_uc_fetch_firmwares(struct intel_uc *uc)
+static void __uc_fetch_firmwares(struct intel_uc *uc)
{
int err;
- if (!intel_uc_uses_guc(uc))
- return;
+ GEM_BUG_ON(!intel_uc_uses_guc(uc));
err = intel_uc_fw_fetch(&uc->guc.fw);
if (err)
@@ -260,20 +267,19 @@ void intel_uc_fetch_firmwares(struct intel_uc *uc)
intel_uc_fw_fetch(&uc->huc.fw);
}
-void intel_uc_cleanup_firmwares(struct intel_uc *uc)
+static void __uc_cleanup_firmwares(struct intel_uc *uc)
{
intel_uc_fw_cleanup_fetch(&uc->huc.fw);
intel_uc_fw_cleanup_fetch(&uc->guc.fw);
}
-void intel_uc_init(struct intel_uc *uc)
+static void __uc_init(struct intel_uc *uc)
{
struct intel_guc *guc = &uc->guc;
struct intel_huc *huc = &uc->huc;
int ret;
- if (!intel_uc_uses_guc(uc))
- return;
+ GEM_BUG_ON(!intel_uc_uses_guc(uc));
/* XXX: GuC submission is unavailable for now */
GEM_BUG_ON(intel_uc_supports_guc_submission(uc));
@@ -288,7 +294,7 @@ void intel_uc_init(struct intel_uc *uc)
intel_huc_init(huc);
}
-void intel_uc_fini(struct intel_uc *uc)
+static void __uc_fini(struct intel_uc *uc)
{
intel_huc_fini(&uc->huc);
intel_guc_fini(&uc->guc);
@@ -309,14 +315,6 @@ static int __uc_sanitize(struct intel_uc *uc)
return __intel_uc_reset_hw(uc);
}
-void intel_uc_sanitize(struct intel_uc *uc)
-{
- if (!intel_uc_supports_guc(uc))
- return;
-
- __uc_sanitize(uc);
-}
-
/* Initialize and verify the uC regs related to uC positioning in WOPCM */
static int uc_init_wopcm(struct intel_uc *uc)
{
@@ -380,13 +378,8 @@ static bool uc_is_wopcm_locked(struct intel_uc *uc)
(intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET) & GUC_WOPCM_OFFSET_VALID);
}
-int intel_uc_init_hw(struct intel_uc *uc)
+static int __uc_check_hw(struct intel_uc *uc)
{
- struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
- struct intel_guc *guc = &uc->guc;
- struct intel_huc *huc = &uc->huc;
- int ret, attempts;
-
if (!intel_uc_supports_guc(uc))
return 0;
@@ -395,11 +388,24 @@ int intel_uc_init_hw(struct intel_uc *uc)
* before on this system after reboot, otherwise we risk GPU hangs.
* To check if GuC was loaded before we look at WOPCM registers.
*/
- if (!intel_uc_uses_guc(uc) && !uc_is_wopcm_locked(uc))
- return 0;
+ if (uc_is_wopcm_locked(uc))
+ return -EIO;
+
+ return 0;
+}
+
+static int __uc_init_hw(struct intel_uc *uc)
+{
+ struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
+ struct intel_guc *guc = &uc->guc;
+ struct intel_huc *huc = &uc->huc;
+ int ret, attempts;
+
+ GEM_BUG_ON(!intel_uc_supports_guc(uc));
+ GEM_BUG_ON(!intel_uc_uses_guc(uc));
if (!intel_uc_fw_is_available(&guc->fw)) {
- ret = uc_is_wopcm_locked(uc) ||
+ ret = __uc_check_hw(uc) ||
intel_uc_fw_is_overridden(&guc->fw) ||
intel_uc_supports_guc_submission(uc) ?
intel_uc_fw_status_to_error(guc->fw.status) : 0;
@@ -495,7 +501,7 @@ err_out:
return -EIO;
}
-void intel_uc_fini_hw(struct intel_uc *uc)
+static void __uc_fini_hw(struct intel_uc *uc)
{
struct intel_guc *guc = &uc->guc;
@@ -595,3 +601,20 @@ int intel_uc_runtime_resume(struct intel_uc *uc)
*/
return __uc_resume(uc, true);
}
+
+static const struct intel_uc_ops uc_ops_off = {
+ .init_hw = __uc_check_hw,
+};
+
+static const struct intel_uc_ops uc_ops_on = {
+ .sanitize = __uc_sanitize,
+
+ .init_fw = __uc_fetch_firmwares,
+ .fini_fw = __uc_cleanup_firmwares,
+
+ .init = __uc_init,
+ .fini = __uc_fini,
+
+ .init_hw = __uc_init_hw,
+ .fini_hw = __uc_fini_hw,
+};
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
index 527995c21196..49c913524686 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
@@ -10,7 +10,20 @@
#include "intel_huc.h"
#include "i915_params.h"
+struct intel_uc;
+
+struct intel_uc_ops {
+ int (*sanitize)(struct intel_uc *uc);
+ void (*init_fw)(struct intel_uc *uc);
+ void (*fini_fw)(struct intel_uc *uc);
+ void (*init)(struct intel_uc *uc);
+ void (*fini)(struct intel_uc *uc);
+ int (*init_hw)(struct intel_uc *uc);
+ void (*fini_hw)(struct intel_uc *uc);
+};
+
struct intel_uc {
+ struct intel_uc_ops const *ops;
struct intel_guc guc;
struct intel_huc huc;
@@ -21,13 +34,6 @@ struct intel_uc {
void intel_uc_init_early(struct intel_uc *uc);
void intel_uc_driver_late_release(struct intel_uc *uc);
void intel_uc_init_mmio(struct intel_uc *uc);
-void intel_uc_fetch_firmwares(struct intel_uc *uc);
-void intel_uc_cleanup_firmwares(struct intel_uc *uc);
-void intel_uc_sanitize(struct intel_uc *uc);
-void intel_uc_init(struct intel_uc *uc);
-int intel_uc_init_hw(struct intel_uc *uc);
-void intel_uc_fini_hw(struct intel_uc *uc);
-void intel_uc_fini(struct intel_uc *uc);
void intel_uc_reset_prepare(struct intel_uc *uc);
void intel_uc_suspend(struct intel_uc *uc);
void intel_uc_runtime_suspend(struct intel_uc *uc);
@@ -64,4 +70,20 @@ static inline bool intel_uc_uses_huc(struct intel_uc *uc)
return intel_huc_is_enabled(&uc->huc);
}
+#define intel_uc_ops_function(_NAME, _OPS, _TYPE, _RET) \
+static inline _TYPE intel_uc_##_NAME(struct intel_uc *uc) \
+{ \
+ if (uc->ops->_OPS) \
+ return uc->ops->_OPS(uc); \
+ return _RET; \
+}
+intel_uc_ops_function(sanitize, sanitize, int, 0);
+intel_uc_ops_function(fetch_firmwares, init_fw, void, );
+intel_uc_ops_function(cleanup_firmwares, fini_fw, void, );
+intel_uc_ops_function(init, init, void, );
+intel_uc_ops_function(fini, fini, void, );
+intel_uc_ops_function(init_hw, init_hw, int, 0);
+intel_uc_ops_function(fini_hw, fini_hw, void, );
+#undef intel_uc_ops_function
+
#endif
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index e451298d11c3..2477a1e5a166 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -36,13 +36,32 @@
#define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
+static int vgpu_pin_dma_address(struct intel_vgpu *vgpu,
+ unsigned long size,
+ dma_addr_t dma_addr)
+{
+ int ret = 0;
+
+ if (intel_gvt_hypervisor_dma_pin_guest_page(vgpu, dma_addr))
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static void vgpu_unpin_dma_address(struct intel_vgpu *vgpu,
+ dma_addr_t dma_addr)
+{
+ intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, dma_addr);
+}
+
static int vgpu_gem_get_pages(
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct intel_vgpu *vgpu;
struct sg_table *st;
struct scatterlist *sg;
- int i, ret;
+ int i, j, ret;
gen8_pte_t __iomem *gtt_entries;
struct intel_vgpu_fb_info *fb_info;
u32 page_num;
@@ -51,6 +70,10 @@ static int vgpu_gem_get_pages(
if (WARN_ON(!fb_info))
return -ENODEV;
+ vgpu = fb_info->obj->vgpu;
+ if (WARN_ON(!vgpu))
+ return -ENODEV;
+
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (unlikely(!st))
return -ENOMEM;
@@ -64,21 +87,53 @@ static int vgpu_gem_get_pages(
gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
(fb_info->start >> PAGE_SHIFT);
for_each_sg(st->sgl, sg, page_num, i) {
+ dma_addr_t dma_addr =
+ GEN8_DECODE_PTE(readq(&gtt_entries[i]));
+ if (vgpu_pin_dma_address(vgpu, PAGE_SIZE, dma_addr)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
sg->offset = 0;
sg->length = PAGE_SIZE;
- sg_dma_address(sg) =
- GEN8_DECODE_PTE(readq(&gtt_entries[i]));
sg_dma_len(sg) = PAGE_SIZE;
+ sg_dma_address(sg) = dma_addr;
}
__i915_gem_object_set_pages(obj, st, PAGE_SIZE);
+out:
+ if (ret) {
+ dma_addr_t dma_addr;
+
+ for_each_sg(st->sgl, sg, i, j) {
+ dma_addr = sg_dma_address(sg);
+ if (dma_addr)
+ vgpu_unpin_dma_address(vgpu, dma_addr);
+ }
+ sg_free_table(st);
+ kfree(st);
+ }
+
+ return ret;
- return 0;
}
static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
+ struct scatterlist *sg;
+
+ if (obj->base.dma_buf) {
+ struct intel_vgpu_fb_info *fb_info = obj->gvt_info;
+ struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
+ struct intel_vgpu *vgpu = obj->vgpu;
+ int i;
+
+ for_each_sg(pages->sgl, sg, fb_info->size, i)
+ vgpu_unpin_dma_address(vgpu,
+ sg_dma_address(sg));
+ }
+
sg_free_table(pages);
kfree(pages);
}
@@ -163,6 +218,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
drm_gem_private_object_init(dev, &obj->base,
roundup(info->size, PAGE_SIZE));
i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class);
+ i915_gem_object_set_readonly(obj);
obj->read_domains = I915_GEM_DOMAIN_GTT;
obj->write_domain = 0;
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 049775e8e350..b0c1fda32977 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -146,7 +146,7 @@ void intel_gvt_free_firmware(struct intel_gvt *gvt)
clean_firmware_sysfs(gvt);
kfree(gvt->firmware.cfg_space);
- kfree(gvt->firmware.mmio);
+ vfree(gvt->firmware.mmio);
}
static int verify_firmware(struct intel_gvt *gvt,
@@ -229,7 +229,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
firmware->cfg_space = mem;
- mem = kmalloc(info->mmio_size, GFP_KERNEL);
+ mem = vmalloc(info->mmio_size);
if (!mem) {
kfree(path);
kfree(firmware->cfg_space);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 34cb404ba4b7..4a4828074cb7 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1956,7 +1956,11 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
if (mm->type == INTEL_GVT_MM_PPGTT) {
list_del(&mm->ppgtt_mm.list);
+
+ mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
list_del(&mm->ppgtt_mm.lru_list);
+ mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
+
invalidate_ppgtt_mm(mm);
} else {
vfree(mm->ggtt_mm.virtual_ggtt);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index bb9fe6bf5275..6d28d72e6c7e 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -341,6 +341,10 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
engine_mask |= BIT(VCS1);
}
+ if (data & GEN9_GRDOM_GUC) {
+ gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id);
+ vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET;
+ }
engine_mask &= INTEL_INFO(vgpu->gvt->dev_priv)->engine_mask;
}
@@ -1636,6 +1640,16 @@ static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
return 0;
}
+static int guc_status_read(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data,
+ unsigned int bytes)
+{
+ /* keep MIA_IN_RESET before clearing */
+ read_vreg(vgpu, offset, p_data, bytes);
+ vgpu_vreg(vgpu, offset) &= ~GS_MIA_IN_RESET;
+ return 0;
+}
+
static int mmio_read_from_hw(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
@@ -2672,10 +2686,12 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
+ MMIO_DH(GUC_STATUS, D_ALL, guc_status_read, NULL);
+
return 0;
}
-static int init_broadwell_mmio_info(struct intel_gvt *gvt)
+static int init_bdw_mmio_info(struct intel_gvt *gvt)
{
struct drm_i915_private *dev_priv = gvt->dev_priv;
int ret;
@@ -3364,20 +3380,20 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
goto err;
if (IS_BROADWELL(dev_priv)) {
- ret = init_broadwell_mmio_info(gvt);
+ ret = init_bdw_mmio_info(gvt);
if (ret)
goto err;
} else if (IS_SKYLAKE(dev_priv)
|| IS_KABYLAKE(dev_priv)
|| IS_COFFEELAKE(dev_priv)) {
- ret = init_broadwell_mmio_info(gvt);
+ ret = init_bdw_mmio_info(gvt);
if (ret)
goto err;
ret = init_skl_mmio_info(gvt);
if (ret)
goto err;
} else if (IS_BROXTON(dev_priv)) {
- ret = init_broadwell_mmio_info(gvt);
+ ret = init_bdw_mmio_info(gvt);
if (ret)
goto err;
ret = init_skl_mmio_info(gvt);
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index 9599c0a762b2..b17c4a1599cd 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -66,6 +66,8 @@ struct intel_gvt_mpt {
unsigned long size, dma_addr_t *dma_addr);
void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr);
+ int (*dma_pin_guest_page)(unsigned long handle, dma_addr_t dma_addr);
+
int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
unsigned long mfn, unsigned int nr, bool map);
int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 04a5a0d90823..3259a1fa69e1 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1916,6 +1916,28 @@ err_unlock:
return ret;
}
+static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr)
+{
+ struct kvmgt_guest_info *info;
+ struct gvt_dma *entry;
+ int ret = 0;
+
+ if (!handle_valid(handle))
+ return -ENODEV;
+
+ info = (struct kvmgt_guest_info *)handle;
+
+ mutex_lock(&info->vgpu->vdev.cache_lock);
+ entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
+ if (entry)
+ kref_get(&entry->ref);
+ else
+ ret = -ENOMEM;
+ mutex_unlock(&info->vgpu->vdev.cache_lock);
+
+ return ret;
+}
+
static void __gvt_dma_release(struct kref *ref)
{
struct gvt_dma *entry = container_of(ref, typeof(*entry), ref);
@@ -2027,6 +2049,7 @@ static struct intel_gvt_mpt kvmgt_mpt = {
.gfn_to_mfn = kvmgt_gfn_to_pfn,
.dma_map_guest_page = kvmgt_dma_map_guest_page,
.dma_unmap_guest_page = kvmgt_dma_unmap_guest_page,
+ .dma_pin_guest_page = kvmgt_dma_pin_guest_page,
.set_opregion = kvmgt_set_opregion,
.set_edid = kvmgt_set_edid,
.get_vfio_device = kvmgt_get_vfio_device,
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 0f9440128123..9ad224df9c68 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -255,6 +255,21 @@ static inline void intel_gvt_hypervisor_dma_unmap_guest_page(
}
/**
+ * intel_gvt_hypervisor_dma_pin_guest_page - pin guest dma buf
+ * @vgpu: a vGPU
+ * @dma_addr: guest dma addr
+ *
+ * Returns:
+ * 0 on success, negative error code if failed.
+ */
+static inline int
+intel_gvt_hypervisor_dma_pin_guest_page(struct intel_vgpu *vgpu,
+ dma_addr_t dma_addr)
+{
+ return intel_gvt_host.mpt->dma_pin_guest_page(vgpu->handle, dma_addr);
+}
+
+/**
* intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
* @vgpu: a vGPU
* @gfn: guest PFN
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index b3299f88e24e..685d1e04a5ff 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -1224,7 +1224,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
enum intel_engine_id i;
int ret;
- ppgtt = i915_ppgtt_create(i915);
+ ppgtt = i915_ppgtt_create(&i915->gt);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index d5a6e4e3d0fd..85bd9bf4f6ee 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -212,9 +212,9 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
*/
void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
{
- mutex_lock(&vgpu->gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
vgpu->active = true;
- mutex_unlock(&vgpu->gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
}
/**
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index cfe09964622b..b0a499753526 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -416,13 +416,15 @@ int i915_active_acquire(struct i915_active *ref)
if (err)
return err;
- if (!atomic_read(&ref->count) && ref->active)
- err = ref->active(ref);
- if (!err) {
- spin_lock_irq(&ref->tree_lock); /* vs __active_retire() */
- debug_active_activate(ref);
- atomic_inc(&ref->count);
- spin_unlock_irq(&ref->tree_lock);
+ if (likely(!i915_active_acquire_if_busy(ref))) {
+ if (ref->active)
+ err = ref->active(ref);
+ if (!err) {
+ spin_lock_irq(&ref->tree_lock); /* __active_retire() */
+ debug_active_activate(ref);
+ atomic_inc(&ref->count);
+ spin_unlock_irq(&ref->tree_lock);
+ }
}
mutex_unlock(&ref->mutex);
@@ -605,12 +607,15 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
struct intel_engine_cs *engine)
{
intel_engine_mask_t tmp, mask = engine->mask;
+ struct llist_node *first = NULL, *last = NULL;
struct intel_gt *gt = engine->gt;
- struct llist_node *pos, *next;
int err;
GEM_BUG_ON(i915_active_is_idle(ref));
- GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
+
+ /* Wait until the previous preallocation is completed */
+ while (!llist_empty(&ref->preallocated_barriers))
+ cond_resched();
/*
* Preallocate a node for each physical engine supporting the target
@@ -620,6 +625,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
*/
for_each_engine_masked(engine, gt, mask, tmp) {
u64 idx = engine->kernel_context->timeline->fence_context;
+ struct llist_node *prev = first;
struct active_node *node;
node = reuse_idle_barrier(ref, idx);
@@ -653,15 +659,23 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN));
GEM_BUG_ON(barrier_to_engine(node) != engine);
- llist_add(barrier_to_ll(node), &ref->preallocated_barriers);
+ first = barrier_to_ll(node);
+ first->next = prev;
+ if (!last)
+ last = first;
intel_engine_pm_get(engine);
}
+ GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
+ llist_add_batch(first, last, &ref->preallocated_barriers);
+
return 0;
unwind:
- llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
- struct active_node *node = barrier_from_ll(pos);
+ while (first) {
+ struct active_node *node = barrier_from_ll(first);
+
+ first = first->next;
atomic_dec(&ref->count);
intel_engine_pm_put(barrier_to_engine(node));
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index b571f675c795..51e1e854ca55 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -188,6 +188,12 @@ int i915_active_acquire(struct i915_active *ref);
bool i915_active_acquire_if_busy(struct i915_active *ref);
void i915_active_release(struct i915_active *ref);
+static inline void __i915_active_acquire(struct i915_active *ref)
+{
+ GEM_BUG_ON(!atomic_read(&ref->count));
+ atomic_inc(&ref->count);
+}
+
static inline bool
i915_active_is_idle(const struct i915_active *ref)
{
diff --git a/drivers/gpu/drm/i915/i915_buddy.c b/drivers/gpu/drm/i915/i915_buddy.c
index e9d4200ce3bc..66883af64ca1 100644
--- a/drivers/gpu/drm/i915/i915_buddy.c
+++ b/drivers/gpu/drm/i915/i915_buddy.c
@@ -262,8 +262,10 @@ void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects)
{
struct i915_buddy_block *block, *on;
- list_for_each_entry_safe(block, on, objects, link)
+ list_for_each_entry_safe(block, on, objects, link) {
i915_buddy_free(mm, block);
+ cond_resched();
+ }
INIT_LIST_HEAD(objects);
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index d28468eaed57..d5a9b8a964c2 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -321,16 +321,15 @@ static void print_context_stats(struct seq_file *m,
for_each_gem_engine(ce,
i915_gem_context_lock_engines(ctx), it) {
- intel_context_lock_pinned(ce);
- if (intel_context_is_pinned(ce)) {
+ if (intel_context_pin_if_active(ce)) {
rcu_read_lock();
if (ce->state)
per_file_stats(0,
ce->state->obj, &kstats);
per_file_stats(0, ce->ring->vma->obj, &kstats);
rcu_read_unlock();
+ intel_context_unpin(ce);
}
- intel_context_unlock_pinned(ce);
}
i915_gem_context_unlock_engines(ctx);
@@ -367,12 +366,16 @@ static void print_context_stats(struct seq_file *m,
static int i915_gem_object_info(struct seq_file *m, void *data)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct intel_memory_region *mr;
+ enum intel_region_id id;
seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
i915->mm.shrink_count,
atomic_read(&i915->mm.free_count),
i915->mm.shrink_memory);
-
+ for_each_memory_region(mr, i915, id)
+ seq_printf(m, "%s: total:%pa, available:%pa bytes\n",
+ mr->name, &mr->total, &mr->avail);
seq_putc(m, '\n');
print_context_stats(m, i915);
@@ -682,7 +685,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
size_t count, loff_t *pos)
{
- struct i915_gpu_state *error;
+ struct i915_gpu_coredump *error;
ssize_t ret;
void *buf;
@@ -695,7 +698,7 @@ static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
if (!buf)
return -ENOMEM;
- ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
+ ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count);
if (ret <= 0)
goto out;
@@ -711,19 +714,19 @@ out:
static int gpu_state_release(struct inode *inode, struct file *file)
{
- i915_gpu_state_put(file->private_data);
+ i915_gpu_coredump_put(file->private_data);
return 0;
}
static int i915_gpu_info_open(struct inode *inode, struct file *file)
{
struct drm_i915_private *i915 = inode->i_private;
- struct i915_gpu_state *gpu;
+ struct i915_gpu_coredump *gpu;
intel_wakeref_t wakeref;
gpu = NULL;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- gpu = i915_capture_gpu_state(i915);
+ gpu = i915_gpu_coredump(i915);
if (IS_ERR(gpu))
return PTR_ERR(gpu);
@@ -745,7 +748,7 @@ i915_error_state_write(struct file *filp,
size_t cnt,
loff_t *ppos)
{
- struct i915_gpu_state *error = filp->private_data;
+ struct i915_gpu_coredump *error = filp->private_data;
if (!error)
return 0;
@@ -758,7 +761,7 @@ i915_error_state_write(struct file *filp,
static int i915_error_state_open(struct inode *inode, struct file *file)
{
- struct i915_gpu_state *error;
+ struct i915_gpu_coredump *error;
error = i915_first_error_state(inode->i_private);
if (IS_ERR(error))
@@ -1001,7 +1004,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
return ret;
}
-static int ironlake_drpc_info(struct seq_file *m)
+static int ilk_drpc_info(struct seq_file *m)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
struct intel_uncore *uncore = &i915->uncore;
@@ -1209,7 +1212,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
else if (INTEL_GEN(dev_priv) >= 6)
err = gen6_drpc_info(m);
else
- err = ironlake_drpc_info(m);
+ err = ilk_drpc_info(m);
}
return err;
@@ -1509,15 +1512,14 @@ static int i915_context_status(struct seq_file *m, void *unused)
for_each_gem_engine(ce,
i915_gem_context_lock_engines(ctx), it) {
- intel_context_lock_pinned(ce);
- if (intel_context_is_pinned(ce)) {
+ if (intel_context_pin_if_active(ce)) {
seq_printf(m, "%s: ", ce->engine->name);
if (ce->state)
describe_obj(m, ce->state->obj);
describe_ctx_ring(m, ce->ring);
seq_putc(m, '\n');
+ intel_context_unpin(ce);
}
- intel_context_unlock_pinned(ce);
}
i915_gem_context_unlock_engines(ctx);
@@ -1977,7 +1979,7 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data)
struct drm_connector *connector = m->private;
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_dp *intel_dp =
- enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+ enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
int ret;
if (!CAN_PSR(dev_priv)) {
@@ -2389,7 +2391,7 @@ static void intel_dp_info(struct seq_file *m,
struct intel_connector *intel_connector)
{
struct intel_encoder *intel_encoder = intel_connector->encoder;
- struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
@@ -2409,7 +2411,7 @@ static void intel_dp_mst_info(struct seq_file *m,
{
struct intel_encoder *intel_encoder = intel_connector->encoder;
struct intel_dp_mst_encoder *intel_mst =
- enc_to_mst(&intel_encoder->base);
+ enc_to_mst(intel_encoder);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
@@ -2422,7 +2424,7 @@ static void intel_hdmi_info(struct seq_file *m,
struct intel_connector *intel_connector)
{
struct intel_encoder *intel_encoder = intel_connector->encoder;
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
if (intel_connector->hdcp.shim) {
@@ -3012,11 +3014,11 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
continue;
- intel_encoder = intel_attached_encoder(connector);
+ intel_encoder = intel_attached_encoder(to_intel_connector(connector));
if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
continue;
- intel_dig_port = enc_to_dig_port(&intel_encoder->base);
+ intel_dig_port = enc_to_dig_port(intel_encoder);
if (!intel_dig_port->dp.can_mst)
continue;
@@ -3066,7 +3068,7 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
continue;
if (encoder && connector->status == connector_status_connected) {
- intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_dp = enc_to_intel_dp(encoder);
status = kstrtoint(input_buffer, 10, &val);
if (status < 0)
break;
@@ -3075,9 +3077,9 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
* testing code, only accept an actual value of 1 here
*/
if (val == 1)
- intel_dp->compliance.test_active = 1;
+ intel_dp->compliance.test_active = true;
else
- intel_dp->compliance.test_active = 0;
+ intel_dp->compliance.test_active = false;
}
}
drm_connector_list_iter_end(&conn_iter);
@@ -3110,7 +3112,7 @@ static int i915_displayport_test_active_show(struct seq_file *m, void *data)
continue;
if (encoder && connector->status == connector_status_connected) {
- intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_dp = enc_to_intel_dp(encoder);
if (intel_dp->compliance.test_active)
seq_puts(m, "1");
else
@@ -3160,7 +3162,7 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
continue;
if (encoder && connector->status == connector_status_connected) {
- intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_dp = enc_to_intel_dp(encoder);
if (intel_dp->compliance.test_type ==
DP_TEST_LINK_EDID_READ)
seq_printf(m, "%lx",
@@ -3204,7 +3206,7 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
continue;
if (encoder && connector->status == connector_status_connected) {
- intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_dp = enc_to_intel_dp(encoder);
seq_printf(m, "%02lx", intel_dp->compliance.test_type);
} else
seq_puts(m, "0");
@@ -3815,8 +3817,8 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
#undef SS_MAX
}
-static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
- struct sseu_dev_info *sseu)
+static void bdw_sseu_device_status(struct drm_i915_private *dev_priv,
+ struct sseu_dev_info *sseu)
{
const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
@@ -3901,7 +3903,7 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
if (IS_CHERRYVIEW(dev_priv))
cherryview_sseu_device_status(dev_priv, &sseu);
else if (IS_BROADWELL(dev_priv))
- broadwell_sseu_device_status(dev_priv, &sseu);
+ bdw_sseu_device_status(dev_priv, &sseu);
else if (IS_GEN(dev_priv, 9))
gen9_sseu_device_status(dev_priv, &sseu);
else if (INTEL_GEN(dev_priv) >= 10)
@@ -4142,14 +4144,14 @@ static int i915_drrs_ctl_set(void *data, u64 val)
drm_connector_mask(connector)))
continue;
- encoder = intel_attached_encoder(connector);
+ encoder = intel_attached_encoder(to_intel_connector(connector));
if (encoder->type != INTEL_OUTPUT_EDP)
continue;
DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
val ? "en" : "dis", val);
- intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_dp = enc_to_intel_dp(encoder);
if (val)
intel_edp_drrs_enable(intel_dp,
crtc_state);
@@ -4353,7 +4355,7 @@ static int i915_dpcd_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
struct intel_dp *intel_dp =
- enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+ enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
u8 buf[16];
ssize_t err;
int i;
@@ -4388,7 +4390,7 @@ static int i915_panel_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
struct intel_dp *intel_dp =
- enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+ enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
if (connector->status != connector_status_connected)
return -ENODEV;
@@ -4466,7 +4468,7 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
} else if (ret) {
break;
}
- intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+ intel_dp = enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
crtc_state = to_intel_crtc_state(crtc->state);
seq_printf(m, "DSC_Enabled: %s\n",
yesno(crtc_state->dsc.compression_enable));
@@ -4493,8 +4495,8 @@ static ssize_t i915_dsc_fec_support_write(struct file *file,
int ret;
struct drm_connector *connector =
((struct seq_file *)file->private_data)->private;
- struct intel_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (len == 0)
return 0;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 3bfc777a7a34..801197fb40c3 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -469,6 +469,12 @@ static void vlv_free_s0ix_state(struct drm_i915_private *i915)
i915->vlv_s0ix_state = NULL;
}
+static void sanitize_gpu(struct drm_i915_private *i915)
+{
+ if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
+ __intel_gt_reset(&i915->gt, ALL_ENGINES);
+}
+
/**
* i915_driver_early_probe - setup state not requiring device access
* @dev_priv: device private
@@ -602,6 +608,9 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
if (ret)
goto err_uncore;
+ /* As early as possible, scrub existing GPU state before clobbering */
+ sanitize_gpu(dev_priv);
+
return 0;
err_uncore:
@@ -1817,7 +1826,7 @@ static int i915_drm_resume(struct drm_device *dev)
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
- intel_gt_sanitize(&dev_priv->gt, true);
+ sanitize_gpu(dev_priv);
ret = i915_ggtt_enable_hw(dev_priv);
if (ret)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d05a968227f7..077af22b8340 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -46,6 +46,7 @@
#include <linux/dma-resv.h>
#include <linux/shmem_fs.h>
#include <linux/stackdepot.h>
+#include <linux/xarray.h>
#include <drm/intel-gtt.h>
#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
@@ -110,8 +111,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20191223"
-#define DRIVER_TIMESTAMP 1577120893
+#define DRIVER_DATE "20200114"
+#define DRIVER_TIMESTAMP 1579001978
struct drm_i915_gem_object;
@@ -201,8 +202,7 @@ struct drm_i915_file_private {
struct list_head request_list;
} mm;
- struct idr context_idr;
- struct mutex context_idr_lock; /* guards context_idr */
+ struct xarray context_xa;
struct idr vm_idr;
struct mutex vm_idr_lock; /* guards vm_idr */
@@ -505,6 +505,7 @@ struct i915_psr {
bool dc3co_enabled;
u32 dc3co_exit_delay;
struct delayed_work idle_work;
+ bool initially_probed;
};
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
@@ -1252,6 +1253,16 @@ struct drm_i915_private {
struct llist_head free_list;
struct work_struct free_work;
} contexts;
+
+ /*
+ * We replace the local file with a global mappings as the
+ * backing storage for the mmap is on the device and not
+ * on the struct file, and we do not want to prolong the
+ * lifetime of the local fd. To minimise the number of
+ * anonymous inodes we create, we use a global singleton to
+ * share the global mapping.
+ */
+ struct file *mmap_singleton;
} gem;
u8 pch_ssc_use;
@@ -1657,8 +1668,10 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
(IS_BROADWELL(dev_priv) || IS_GEN(dev_priv, 9))
/* WaRsDisableCoarsePowerGating:skl,cnl */
-#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
- IS_GEN_RANGE(dev_priv, 9, 10)
+#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
+ (IS_CANNONLAKE(dev_priv) || \
+ IS_SKL_GT3(dev_priv) || \
+ IS_SKL_GT4(dev_priv))
#define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
#define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \
@@ -1861,7 +1874,7 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
}
static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
- struct intel_engine_cs *engine)
+ const struct intel_engine_cs *engine)
{
return atomic_read(&error->reset_engine_count[engine->uabi_class]);
}
@@ -1889,7 +1902,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags)
static inline struct i915_gem_context *
__i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id)
{
- return idr_find(&file_priv->context_idr, id);
+ return xa_load(&file_priv->context_xa, id);
}
static inline struct i915_gem_context *
@@ -2015,6 +2028,9 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
int remap_io_mapping(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn, unsigned long size,
struct io_mapping *iomap);
+int remap_io_sg(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long size,
+ struct scatterlist *sgl, resource_size_t iobase);
static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
{
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9ddcf17230e6..c2de2f45b459 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -45,6 +45,7 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_ioctls.h"
#include "gem/i915_gem_mman.h"
+#include "gem/i915_gem_region.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
@@ -200,7 +201,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
static int
i915_gem_create(struct drm_file *file,
- struct drm_i915_private *dev_priv,
+ struct intel_memory_region *mr,
u64 *size_p,
u32 *handle_p)
{
@@ -209,12 +210,16 @@ i915_gem_create(struct drm_file *file,
u64 size;
int ret;
- size = round_up(*size_p, PAGE_SIZE);
+ GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
+ size = round_up(*size_p, mr->min_page_size);
if (size == 0)
return -EINVAL;
+ /* For most of the ABI (e.g. mmap) we think in system pages */
+ GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
+
/* Allocate the new object */
- obj = i915_gem_object_create_shmem(dev_priv, size);
+ obj = i915_gem_object_create_region(mr, size, 0);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -234,6 +239,7 @@ i915_gem_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
+ enum intel_memory_type mem_type;
int cpp = DIV_ROUND_UP(args->bpp, 8);
u32 format;
@@ -259,8 +265,18 @@ i915_gem_dumb_create(struct drm_file *file,
DRM_FORMAT_MOD_LINEAR))
args->pitch = ALIGN(args->pitch, 4096);
- args->size = args->pitch * args->height;
- return i915_gem_create(file, to_i915(dev),
+ if (args->pitch < args->width)
+ return -EINVAL;
+
+ args->size = mul_u32_u32(args->pitch, args->height);
+
+ mem_type = INTEL_MEMORY_SYSTEM;
+ if (HAS_LMEM(to_i915(dev)))
+ mem_type = INTEL_MEMORY_LOCAL;
+
+ return i915_gem_create(file,
+ intel_memory_region_by_type(to_i915(dev),
+ mem_type),
&args->size, &args->handle);
}
@@ -274,12 +290,14 @@ int
i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_create *args = data;
- i915_gem_flush_free_objects(dev_priv);
+ i915_gem_flush_free_objects(i915);
- return i915_gem_create(file, dev_priv,
+ return i915_gem_create(file,
+ intel_memory_region_by_type(i915,
+ INTEL_MEMORY_SYSTEM),
&args->size, &args->handle);
}
@@ -1172,6 +1190,8 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
void i915_gem_driver_release(struct drm_i915_private *dev_priv)
{
+ i915_gem_driver_release__contexts(dev_priv);
+
intel_gt_driver_release(&dev_priv->gt);
intel_wa_list_free(&dev_priv->gt_wa_list);
@@ -1179,8 +1199,6 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv)
intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
i915_gem_cleanup_userptr(dev_priv);
- i915_gem_driver_release__contexts(dev_priv);
-
i915_gem_drain_freed_objects(dev_priv);
WARN_ON(!list_empty(&dev_priv->gem.contexts.list));
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index 71efccfde122..d9c34a23cd67 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -412,6 +412,9 @@ int i915_vma_pin_fence(struct i915_vma *vma)
{
int err;
+ if (!vma->fence && !i915_gem_object_is_tiled(vma->obj))
+ return 0;
+
/*
* Note that we revoke fences on runtime suspend. Therefore the user
* must keep the device awake whilst using the fence.
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 1efe58ad0ce9..e039eb56900f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1,26 +1,7 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright © 2010 Daniel Vetter
- * Copyright © 2011-2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
+ * Copyright © 2020 Intel Corporation
*/
#include <linux/slab.h> /* fault-inject.h is not standalone! */
@@ -45,2116 +26,6 @@
#include "i915_trace.h"
#include "i915_vgpu.h"
-#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
-
-#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT)
-#define DBG(...) trace_printk(__VA_ARGS__)
-#else
-#define DBG(...)
-#endif
-
-#define NALLOC 3 /* 1 normal, 1 for concurrent threads, 1 for preallocation */
-
-/**
- * DOC: Global GTT views
- *
- * Background and previous state
- *
- * Historically objects could exists (be bound) in global GTT space only as
- * singular instances with a view representing all of the object's backing pages
- * in a linear fashion. This view will be called a normal view.
- *
- * To support multiple views of the same object, where the number of mapped
- * pages is not equal to the backing store, or where the layout of the pages
- * is not linear, concept of a GGTT view was added.
- *
- * One example of an alternative view is a stereo display driven by a single
- * image. In this case we would have a framebuffer looking like this
- * (2x2 pages):
- *
- * 12
- * 34
- *
- * Above would represent a normal GGTT view as normally mapped for GPU or CPU
- * rendering. In contrast, fed to the display engine would be an alternative
- * view which could look something like this:
- *
- * 1212
- * 3434
- *
- * In this example both the size and layout of pages in the alternative view is
- * different from the normal view.
- *
- * Implementation and usage
- *
- * GGTT views are implemented using VMAs and are distinguished via enum
- * i915_ggtt_view_type and struct i915_ggtt_view.
- *
- * A new flavour of core GEM functions which work with GGTT bound objects were
- * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
- * renaming in large amounts of code. They take the struct i915_ggtt_view
- * parameter encapsulating all metadata required to implement a view.
- *
- * As a helper for callers which are only interested in the normal view,
- * globally const i915_ggtt_view_normal singleton instance exists. All old core
- * GEM API functions, the ones not taking the view parameter, are operating on,
- * or with the normal GGTT view.
- *
- * Code wanting to add or use a new GGTT view needs to:
- *
- * 1. Add a new enum with a suitable name.
- * 2. Extend the metadata in the i915_ggtt_view structure if required.
- * 3. Add support to i915_get_vma_pages().
- *
- * New views are required to build a scatter-gather table from within the
- * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
- * exists for the lifetime of an VMA.
- *
- * Core API is designed to have copy semantics which means that passed in
- * struct i915_ggtt_view does not need to be persistent (left around after
- * calling the core API functions).
- *
- */
-
-#define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
-
-static int
-i915_get_ggtt_vma_pages(struct i915_vma *vma);
-
-static void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
-{
- struct intel_uncore *uncore = ggtt->vm.gt->uncore;
-
- spin_lock_irq(&uncore->lock);
- intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
- intel_uncore_read_fw(uncore, GFX_FLSH_CNTL_GEN6);
- spin_unlock_irq(&uncore->lock);
-}
-
-static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
-{
- struct intel_uncore *uncore = ggtt->vm.gt->uncore;
-
- /*
- * Note that as an uncached mmio write, this will flush the
- * WCB of the writes into the GGTT before it triggers the invalidate.
- */
- intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
-}
-
-static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
-{
- struct intel_uncore *uncore = ggtt->vm.gt->uncore;
- struct drm_i915_private *i915 = ggtt->vm.i915;
-
- gen8_ggtt_invalidate(ggtt);
-
- if (INTEL_GEN(i915) >= 12)
- intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR,
- GEN12_GUC_TLB_INV_CR_INVALIDATE);
- else
- intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
-}
-
-static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
-{
- intel_gtt_chipset_flush();
-}
-
-static int ppgtt_bind_vma(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags)
-{
- u32 pte_flags;
- int err;
-
- if (flags & I915_VMA_ALLOC) {
- err = vma->vm->allocate_va_range(vma->vm,
- vma->node.start, vma->size);
- if (err)
- return err;
-
- set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
- }
-
- /* Applicable to VLV, and gen8+ */
- pte_flags = 0;
- if (i915_gem_object_is_readonly(vma->obj))
- pte_flags |= PTE_READ_ONLY;
-
- GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)));
- vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
- wmb();
-
- return 0;
-}
-
-static void ppgtt_unbind_vma(struct i915_vma *vma)
-{
- if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)))
- vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
-}
-
-static int ppgtt_set_pages(struct i915_vma *vma)
-{
- GEM_BUG_ON(vma->pages);
-
- vma->pages = vma->obj->mm.pages;
-
- vma->page_sizes = vma->obj->mm.page_sizes;
-
- return 0;
-}
-
-static void clear_pages(struct i915_vma *vma)
-{
- GEM_BUG_ON(!vma->pages);
-
- if (vma->pages != vma->obj->mm.pages) {
- sg_free_table(vma->pages);
- kfree(vma->pages);
- }
- vma->pages = NULL;
-
- memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
-}
-
-static u64 gen8_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
-
- if (unlikely(flags & PTE_READ_ONLY))
- pte &= ~_PAGE_RW;
-
- switch (level) {
- case I915_CACHE_NONE:
- pte |= PPAT_UNCACHED;
- break;
- case I915_CACHE_WT:
- pte |= PPAT_DISPLAY_ELLC;
- break;
- default:
- pte |= PPAT_CACHED;
- break;
- }
-
- return pte;
-}
-
-static u64 gen8_pde_encode(const dma_addr_t addr,
- const enum i915_cache_level level)
-{
- u64 pde = _PAGE_PRESENT | _PAGE_RW;
- pde |= addr;
- if (level != I915_CACHE_NONE)
- pde |= PPAT_CACHED_PDE;
- else
- pde |= PPAT_UNCACHED;
- return pde;
-}
-
-static u64 snb_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen6_pte_t pte = GEN6_PTE_VALID;
- pte |= GEN6_PTE_ADDR_ENCODE(addr);
-
- switch (level) {
- case I915_CACHE_L3_LLC:
- case I915_CACHE_LLC:
- pte |= GEN6_PTE_CACHE_LLC;
- break;
- case I915_CACHE_NONE:
- pte |= GEN6_PTE_UNCACHED;
- break;
- default:
- MISSING_CASE(level);
- }
-
- return pte;
-}
-
-static u64 ivb_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen6_pte_t pte = GEN6_PTE_VALID;
- pte |= GEN6_PTE_ADDR_ENCODE(addr);
-
- switch (level) {
- case I915_CACHE_L3_LLC:
- pte |= GEN7_PTE_CACHE_L3_LLC;
- break;
- case I915_CACHE_LLC:
- pte |= GEN6_PTE_CACHE_LLC;
- break;
- case I915_CACHE_NONE:
- pte |= GEN6_PTE_UNCACHED;
- break;
- default:
- MISSING_CASE(level);
- }
-
- return pte;
-}
-
-static u64 byt_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen6_pte_t pte = GEN6_PTE_VALID;
- pte |= GEN6_PTE_ADDR_ENCODE(addr);
-
- if (!(flags & PTE_READ_ONLY))
- pte |= BYT_PTE_WRITEABLE;
-
- if (level != I915_CACHE_NONE)
- pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
-
- return pte;
-}
-
-static u64 hsw_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen6_pte_t pte = GEN6_PTE_VALID;
- pte |= HSW_PTE_ADDR_ENCODE(addr);
-
- if (level != I915_CACHE_NONE)
- pte |= HSW_WB_LLC_AGE3;
-
- return pte;
-}
-
-static u64 iris_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen6_pte_t pte = GEN6_PTE_VALID;
- pte |= HSW_PTE_ADDR_ENCODE(addr);
-
- switch (level) {
- case I915_CACHE_NONE:
- break;
- case I915_CACHE_WT:
- pte |= HSW_WT_ELLC_LLC_AGE3;
- break;
- default:
- pte |= HSW_WB_ELLC_LLC_AGE3;
- break;
- }
-
- return pte;
-}
-
-static void stash_init(struct pagestash *stash)
-{
- pagevec_init(&stash->pvec);
- spin_lock_init(&stash->lock);
-}
-
-static struct page *stash_pop_page(struct pagestash *stash)
-{
- struct page *page = NULL;
-
- spin_lock(&stash->lock);
- if (likely(stash->pvec.nr))
- page = stash->pvec.pages[--stash->pvec.nr];
- spin_unlock(&stash->lock);
-
- return page;
-}
-
-static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec)
-{
- unsigned int nr;
-
- spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING);
-
- nr = min_t(typeof(nr), pvec->nr, pagevec_space(&stash->pvec));
- memcpy(stash->pvec.pages + stash->pvec.nr,
- pvec->pages + pvec->nr - nr,
- sizeof(pvec->pages[0]) * nr);
- stash->pvec.nr += nr;
-
- spin_unlock(&stash->lock);
-
- pvec->nr -= nr;
-}
-
-static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
-{
- struct pagevec stack;
- struct page *page;
-
- if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
- i915_gem_shrink_all(vm->i915);
-
- page = stash_pop_page(&vm->free_pages);
- if (page)
- return page;
-
- if (!vm->pt_kmap_wc)
- return alloc_page(gfp);
-
- /* Look in our global stash of WC pages... */
- page = stash_pop_page(&vm->i915->mm.wc_stash);
- if (page)
- return page;
-
- /*
- * Otherwise batch allocate pages to amortize cost of set_pages_wc.
- *
- * We have to be careful as page allocation may trigger the shrinker
- * (via direct reclaim) which will fill up the WC stash underneath us.
- * So we add our WB pages into a temporary pvec on the stack and merge
- * them into the WC stash after all the allocations are complete.
- */
- pagevec_init(&stack);
- do {
- struct page *page;
-
- page = alloc_page(gfp);
- if (unlikely(!page))
- break;
-
- stack.pages[stack.nr++] = page;
- } while (pagevec_space(&stack));
-
- if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) {
- page = stack.pages[--stack.nr];
-
- /* Merge spare WC pages to the global stash */
- if (stack.nr)
- stash_push_pagevec(&vm->i915->mm.wc_stash, &stack);
-
- /* Push any surplus WC pages onto the local VM stash */
- if (stack.nr)
- stash_push_pagevec(&vm->free_pages, &stack);
- }
-
- /* Return unwanted leftovers */
- if (unlikely(stack.nr)) {
- WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr));
- __pagevec_release(&stack);
- }
-
- return page;
-}
-
-static void vm_free_pages_release(struct i915_address_space *vm,
- bool immediate)
-{
- struct pagevec *pvec = &vm->free_pages.pvec;
- struct pagevec stack;
-
- lockdep_assert_held(&vm->free_pages.lock);
- GEM_BUG_ON(!pagevec_count(pvec));
-
- if (vm->pt_kmap_wc) {
- /*
- * When we use WC, first fill up the global stash and then
- * only if full immediately free the overflow.
- */
- stash_push_pagevec(&vm->i915->mm.wc_stash, pvec);
-
- /*
- * As we have made some room in the VM's free_pages,
- * we can wait for it to fill again. Unless we are
- * inside i915_address_space_fini() and must
- * immediately release the pages!
- */
- if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1))
- return;
-
- /*
- * We have to drop the lock to allow ourselves to sleep,
- * so take a copy of the pvec and clear the stash for
- * others to use it as we sleep.
- */
- stack = *pvec;
- pagevec_reinit(pvec);
- spin_unlock(&vm->free_pages.lock);
-
- pvec = &stack;
- set_pages_array_wb(pvec->pages, pvec->nr);
-
- spin_lock(&vm->free_pages.lock);
- }
-
- __pagevec_release(pvec);
-}
-
-static void vm_free_page(struct i915_address_space *vm, struct page *page)
-{
- /*
- * On !llc, we need to change the pages back to WB. We only do so
- * in bulk, so we rarely need to change the page attributes here,
- * but doing so requires a stop_machine() from deep inside arch/x86/mm.
- * To make detection of the possible sleep more likely, use an
- * unconditional might_sleep() for everybody.
- */
- might_sleep();
- spin_lock(&vm->free_pages.lock);
- while (!pagevec_space(&vm->free_pages.pvec))
- vm_free_pages_release(vm, false);
- GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec) >= PAGEVEC_SIZE);
- pagevec_add(&vm->free_pages.pvec, page);
- spin_unlock(&vm->free_pages.lock);
-}
-
-static void i915_address_space_fini(struct i915_address_space *vm)
-{
- spin_lock(&vm->free_pages.lock);
- if (pagevec_count(&vm->free_pages.pvec))
- vm_free_pages_release(vm, true);
- GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec));
- spin_unlock(&vm->free_pages.lock);
-
- drm_mm_takedown(&vm->mm);
-
- mutex_destroy(&vm->mutex);
-}
-
-void __i915_vm_close(struct i915_address_space *vm)
-{
- struct i915_vma *vma, *vn;
-
- mutex_lock(&vm->mutex);
- list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
- struct drm_i915_gem_object *obj = vma->obj;
-
- /* Keep the obj (and hence the vma) alive as _we_ destroy it */
- if (!kref_get_unless_zero(&obj->base.refcount))
- continue;
-
- atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
- WARN_ON(__i915_vma_unbind(vma));
- __i915_vma_put(vma);
-
- i915_gem_object_put(obj);
- }
- GEM_BUG_ON(!list_empty(&vm->bound_list));
- mutex_unlock(&vm->mutex);
-}
-
-static void __i915_vm_release(struct work_struct *work)
-{
- struct i915_address_space *vm =
- container_of(work, struct i915_address_space, rcu.work);
-
- vm->cleanup(vm);
- i915_address_space_fini(vm);
-
- kfree(vm);
-}
-
-void i915_vm_release(struct kref *kref)
-{
- struct i915_address_space *vm =
- container_of(kref, struct i915_address_space, ref);
-
- GEM_BUG_ON(i915_is_ggtt(vm));
- trace_i915_ppgtt_release(vm);
-
- queue_rcu_work(vm->i915->wq, &vm->rcu);
-}
-
-static void i915_address_space_init(struct i915_address_space *vm, int subclass)
-{
- kref_init(&vm->ref);
- INIT_RCU_WORK(&vm->rcu, __i915_vm_release);
- atomic_set(&vm->open, 1);
-
- /*
- * The vm->mutex must be reclaim safe (for use in the shrinker).
- * Do a dummy acquire now under fs_reclaim so that any allocation
- * attempt holding the lock is immediately reported by lockdep.
- */
- mutex_init(&vm->mutex);
- lockdep_set_subclass(&vm->mutex, subclass);
- i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
-
- GEM_BUG_ON(!vm->total);
- drm_mm_init(&vm->mm, 0, vm->total);
- vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
-
- stash_init(&vm->free_pages);
-
- INIT_LIST_HEAD(&vm->bound_list);
-}
-
-static int __setup_page_dma(struct i915_address_space *vm,
- struct i915_page_dma *p,
- gfp_t gfp)
-{
- p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL);
- if (unlikely(!p->page))
- return -ENOMEM;
-
- p->daddr = dma_map_page_attrs(vm->dma,
- p->page, 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL,
- DMA_ATTR_SKIP_CPU_SYNC |
- DMA_ATTR_NO_WARN);
- if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
- vm_free_page(vm, p->page);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static int setup_page_dma(struct i915_address_space *vm,
- struct i915_page_dma *p)
-{
- return __setup_page_dma(vm, p, __GFP_HIGHMEM);
-}
-
-static void cleanup_page_dma(struct i915_address_space *vm,
- struct i915_page_dma *p)
-{
- dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- vm_free_page(vm, p->page);
-}
-
-#define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
-
-static void
-fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count)
-{
- kunmap_atomic(memset64(kmap_atomic(p->page), val, count));
-}
-
-#define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64))
-#define fill32_px(px, v) do { \
- u64 v__ = lower_32_bits(v); \
- fill_px((px), v__ << 32 | v__); \
-} while (0)
-
-static int
-setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
-{
- unsigned long size;
-
- /*
- * In order to utilize 64K pages for an object with a size < 2M, we will
- * need to support a 64K scratch page, given that every 16th entry for a
- * page-table operating in 64K mode must point to a properly aligned 64K
- * region, including any PTEs which happen to point to scratch.
- *
- * This is only relevant for the 48b PPGTT where we support
- * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
- * scratch (read-only) between all vm, we create one 64k scratch page
- * for all.
- */
- size = I915_GTT_PAGE_SIZE_4K;
- if (i915_vm_is_4lvl(vm) &&
- HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
- size = I915_GTT_PAGE_SIZE_64K;
- gfp |= __GFP_NOWARN;
- }
- gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL;
-
- do {
- unsigned int order = get_order(size);
- struct page *page;
- dma_addr_t addr;
-
- page = alloc_pages(gfp, order);
- if (unlikely(!page))
- goto skip;
-
- addr = dma_map_page_attrs(vm->dma,
- page, 0, size,
- PCI_DMA_BIDIRECTIONAL,
- DMA_ATTR_SKIP_CPU_SYNC |
- DMA_ATTR_NO_WARN);
- if (unlikely(dma_mapping_error(vm->dma, addr)))
- goto free_page;
-
- if (unlikely(!IS_ALIGNED(addr, size)))
- goto unmap_page;
-
- vm->scratch[0].base.page = page;
- vm->scratch[0].base.daddr = addr;
- vm->scratch_order = order;
- return 0;
-
-unmap_page:
- dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL);
-free_page:
- __free_pages(page, order);
-skip:
- if (size == I915_GTT_PAGE_SIZE_4K)
- return -ENOMEM;
-
- size = I915_GTT_PAGE_SIZE_4K;
- gfp &= ~__GFP_NOWARN;
- } while (1);
-}
-
-static void cleanup_scratch_page(struct i915_address_space *vm)
-{
- struct i915_page_dma *p = px_base(&vm->scratch[0]);
- unsigned int order = vm->scratch_order;
-
- dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT,
- PCI_DMA_BIDIRECTIONAL);
- __free_pages(p->page, order);
-}
-
-static void free_scratch(struct i915_address_space *vm)
-{
- int i;
-
- if (!px_dma(&vm->scratch[0])) /* set to 0 on clones */
- return;
-
- for (i = 1; i <= vm->top; i++) {
- if (!px_dma(&vm->scratch[i]))
- break;
- cleanup_page_dma(vm, px_base(&vm->scratch[i]));
- }
-
- cleanup_scratch_page(vm);
-}
-
-static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
-{
- struct i915_page_table *pt;
-
- pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
- if (unlikely(!pt))
- return ERR_PTR(-ENOMEM);
-
- if (unlikely(setup_page_dma(vm, &pt->base))) {
- kfree(pt);
- return ERR_PTR(-ENOMEM);
- }
-
- atomic_set(&pt->used, 0);
- return pt;
-}
-
-static struct i915_page_directory *__alloc_pd(size_t sz)
-{
- struct i915_page_directory *pd;
-
- pd = kzalloc(sz, I915_GFP_ALLOW_FAIL);
- if (unlikely(!pd))
- return NULL;
-
- spin_lock_init(&pd->lock);
- return pd;
-}
-
-static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
-{
- struct i915_page_directory *pd;
-
- pd = __alloc_pd(sizeof(*pd));
- if (unlikely(!pd))
- return ERR_PTR(-ENOMEM);
-
- if (unlikely(setup_page_dma(vm, px_base(pd)))) {
- kfree(pd);
- return ERR_PTR(-ENOMEM);
- }
-
- return pd;
-}
-
-static void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd)
-{
- cleanup_page_dma(vm, pd);
- kfree(pd);
-}
-
-#define free_px(vm, px) free_pd(vm, px_base(px))
-
-static inline void
-write_dma_entry(struct i915_page_dma * const pdma,
- const unsigned short idx,
- const u64 encoded_entry)
-{
- u64 * const vaddr = kmap_atomic(pdma->page);
-
- vaddr[idx] = encoded_entry;
- kunmap_atomic(vaddr);
-}
-
-static inline void
-__set_pd_entry(struct i915_page_directory * const pd,
- const unsigned short idx,
- struct i915_page_dma * const to,
- u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
-{
- /* Each thread pre-pins the pd, and we may have a thread per pde. */
- GEM_BUG_ON(atomic_read(px_used(pd)) > NALLOC * ARRAY_SIZE(pd->entry));
-
- atomic_inc(px_used(pd));
- pd->entry[idx] = to;
- write_dma_entry(px_base(pd), idx, encode(to->daddr, I915_CACHE_LLC));
-}
-
-#define set_pd_entry(pd, idx, to) \
- __set_pd_entry((pd), (idx), px_base(to), gen8_pde_encode)
-
-static inline void
-clear_pd_entry(struct i915_page_directory * const pd,
- const unsigned short idx,
- const struct i915_page_scratch * const scratch)
-{
- GEM_BUG_ON(atomic_read(px_used(pd)) == 0);
-
- write_dma_entry(px_base(pd), idx, scratch->encode);
- pd->entry[idx] = NULL;
- atomic_dec(px_used(pd));
-}
-
-static bool
-release_pd_entry(struct i915_page_directory * const pd,
- const unsigned short idx,
- struct i915_page_table * const pt,
- const struct i915_page_scratch * const scratch)
-{
- bool free = false;
-
- if (atomic_add_unless(&pt->used, -1, 1))
- return false;
-
- spin_lock(&pd->lock);
- if (atomic_dec_and_test(&pt->used)) {
- clear_pd_entry(pd, idx, scratch);
- free = true;
- }
- spin_unlock(&pd->lock);
-
- return free;
-}
-
-static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
-{
- struct drm_i915_private *dev_priv = ppgtt->vm.i915;
- enum vgt_g2v_type msg;
- int i;
-
- if (create)
- atomic_inc(px_used(ppgtt->pd)); /* never remove */
- else
- atomic_dec(px_used(ppgtt->pd));
-
- mutex_lock(&dev_priv->vgpu.lock);
-
- if (i915_vm_is_4lvl(&ppgtt->vm)) {
- const u64 daddr = px_dma(ppgtt->pd);
-
- I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
- I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
-
- msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
- VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
- } else {
- for (i = 0; i < GEN8_3LVL_PDPES; i++) {
- const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
-
- I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
- I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
- }
-
- msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
- VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
- }
-
- /* g2v_notify atomically (via hv trap) consumes the message packet. */
- I915_WRITE(vgtif_reg(g2v_notify), msg);
-
- mutex_unlock(&dev_priv->vgpu.lock);
-}
-
-/* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */
-#define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */
-#define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE))
-#define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64))
-#define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES))
-#define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl))
-#define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl))
-#define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl))
-
-static inline unsigned int
-gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
-{
- const int shift = gen8_pd_shift(lvl);
- const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
-
- GEM_BUG_ON(start >= end);
- end += ~mask >> gen8_pd_shift(1);
-
- *idx = i915_pde_index(start, shift);
- if ((start ^ end) & mask)
- return GEN8_PDES - *idx;
- else
- return i915_pde_index(end, shift) - *idx;
-}
-
-static inline bool gen8_pd_contains(u64 start, u64 end, int lvl)
-{
- const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
-
- GEM_BUG_ON(start >= end);
- return (start ^ end) & mask && (start & ~mask) == 0;
-}
-
-static inline unsigned int gen8_pt_count(u64 start, u64 end)
-{
- GEM_BUG_ON(start >= end);
- if ((start ^ end) >> gen8_pd_shift(1))
- return GEN8_PDES - (start & (GEN8_PDES - 1));
- else
- return end - start;
-}
-
-static inline unsigned int gen8_pd_top_count(const struct i915_address_space *vm)
-{
- unsigned int shift = __gen8_pte_shift(vm->top);
- return (vm->total + (1ull << shift) - 1) >> shift;
-}
-
-static inline struct i915_page_directory *
-gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
-{
- struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
-
- if (vm->top == 2)
- return ppgtt->pd;
- else
- return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
-}
-
-static inline struct i915_page_directory *
-gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
-{
- return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
-}
-
-static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
- struct i915_page_directory *pd,
- int count, int lvl)
-{
- if (lvl) {
- void **pde = pd->entry;
-
- do {
- if (!*pde)
- continue;
-
- __gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1);
- } while (pde++, --count);
- }
-
- free_px(vm, pd);
-}
-
-static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
-{
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
-
- if (intel_vgpu_active(vm->i915))
- gen8_ppgtt_notify_vgt(ppgtt, false);
-
- __gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top);
- free_scratch(vm);
-}
-
-static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
- struct i915_page_directory * const pd,
- u64 start, const u64 end, int lvl)
-{
- const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
- unsigned int idx, len;
-
- GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
-
- len = gen8_pd_range(start, end, lvl--, &idx);
- DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
- __func__, vm, lvl + 1, start, end,
- idx, len, atomic_read(px_used(pd)));
- GEM_BUG_ON(!len || len >= atomic_read(px_used(pd)));
-
- do {
- struct i915_page_table *pt = pd->entry[idx];
-
- if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) &&
- gen8_pd_contains(start, end, lvl)) {
- DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n",
- __func__, vm, lvl + 1, idx, start, end);
- clear_pd_entry(pd, idx, scratch);
- __gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl);
- start += (u64)I915_PDES << gen8_pd_shift(lvl);
- continue;
- }
-
- if (lvl) {
- start = __gen8_ppgtt_clear(vm, as_pd(pt),
- start, end, lvl);
- } else {
- unsigned int count;
- u64 *vaddr;
-
- count = gen8_pt_count(start, end);
- DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n",
- __func__, vm, lvl, start, end,
- gen8_pd_index(start, 0), count,
- atomic_read(&pt->used));
- GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
-
- vaddr = kmap_atomic_px(pt);
- memset64(vaddr + gen8_pd_index(start, 0),
- vm->scratch[0].encode,
- count);
- kunmap_atomic(vaddr);
-
- atomic_sub(count, &pt->used);
- start += count;
- }
-
- if (release_pd_entry(pd, idx, pt, scratch))
- free_px(vm, pt);
- } while (idx++, --len);
-
- return start;
-}
-
-static void gen8_ppgtt_clear(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
- GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
- GEM_BUG_ON(range_overflows(start, length, vm->total));
-
- start >>= GEN8_PTE_SHIFT;
- length >>= GEN8_PTE_SHIFT;
- GEM_BUG_ON(length == 0);
-
- __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
- start, start + length, vm->top);
-}
-
-static int __gen8_ppgtt_alloc(struct i915_address_space * const vm,
- struct i915_page_directory * const pd,
- u64 * const start, const u64 end, int lvl)
-{
- const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
- struct i915_page_table *alloc = NULL;
- unsigned int idx, len;
- int ret = 0;
-
- GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
-
- len = gen8_pd_range(*start, end, lvl--, &idx);
- DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
- __func__, vm, lvl + 1, *start, end,
- idx, len, atomic_read(px_used(pd)));
- GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1));
-
- spin_lock(&pd->lock);
- GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */
- do {
- struct i915_page_table *pt = pd->entry[idx];
-
- if (!pt) {
- spin_unlock(&pd->lock);
-
- DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n",
- __func__, vm, lvl + 1, idx);
-
- pt = fetch_and_zero(&alloc);
- if (lvl) {
- if (!pt) {
- pt = &alloc_pd(vm)->pt;
- if (IS_ERR(pt)) {
- ret = PTR_ERR(pt);
- goto out;
- }
- }
-
- fill_px(pt, vm->scratch[lvl].encode);
- } else {
- if (!pt) {
- pt = alloc_pt(vm);
- if (IS_ERR(pt)) {
- ret = PTR_ERR(pt);
- goto out;
- }
- }
-
- if (intel_vgpu_active(vm->i915) ||
- gen8_pt_count(*start, end) < I915_PDES)
- fill_px(pt, vm->scratch[lvl].encode);
- }
-
- spin_lock(&pd->lock);
- if (likely(!pd->entry[idx]))
- set_pd_entry(pd, idx, pt);
- else
- alloc = pt, pt = pd->entry[idx];
- }
-
- if (lvl) {
- atomic_inc(&pt->used);
- spin_unlock(&pd->lock);
-
- ret = __gen8_ppgtt_alloc(vm, as_pd(pt),
- start, end, lvl);
- if (unlikely(ret)) {
- if (release_pd_entry(pd, idx, pt, scratch))
- free_px(vm, pt);
- goto out;
- }
-
- spin_lock(&pd->lock);
- atomic_dec(&pt->used);
- GEM_BUG_ON(!atomic_read(&pt->used));
- } else {
- unsigned int count = gen8_pt_count(*start, end);
-
- DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n",
- __func__, vm, lvl, *start, end,
- gen8_pd_index(*start, 0), count,
- atomic_read(&pt->used));
-
- atomic_add(count, &pt->used);
- /* All other pdes may be simultaneously removed */
- GEM_BUG_ON(atomic_read(&pt->used) > NALLOC * I915_PDES);
- *start += count;
- }
- } while (idx++, --len);
- spin_unlock(&pd->lock);
-out:
- if (alloc)
- free_px(vm, alloc);
- return ret;
-}
-
-static int gen8_ppgtt_alloc(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- u64 from;
- int err;
-
- GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
- GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
- GEM_BUG_ON(range_overflows(start, length, vm->total));
-
- start >>= GEN8_PTE_SHIFT;
- length >>= GEN8_PTE_SHIFT;
- GEM_BUG_ON(length == 0);
- from = start;
-
- err = __gen8_ppgtt_alloc(vm, i915_vm_to_ppgtt(vm)->pd,
- &start, start + length, vm->top);
- if (unlikely(err && from != start))
- __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
- from, start, vm->top);
-
- return err;
-}
-
-static inline struct sgt_dma {
- struct scatterlist *sg;
- dma_addr_t dma, max;
-} sgt_dma(struct i915_vma *vma) {
- struct scatterlist *sg = vma->pages->sgl;
- dma_addr_t addr = sg_dma_address(sg);
- return (struct sgt_dma) { sg, addr, addr + sg->length };
-}
-
-static __always_inline u64
-gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
- struct i915_page_directory *pdp,
- struct sgt_dma *iter,
- u64 idx,
- enum i915_cache_level cache_level,
- u32 flags)
-{
- struct i915_page_directory *pd;
- const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
- gen8_pte_t *vaddr;
-
- pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
- vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
- do {
- vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
-
- iter->dma += I915_GTT_PAGE_SIZE;
- if (iter->dma >= iter->max) {
- iter->sg = __sg_next(iter->sg);
- if (!iter->sg) {
- idx = 0;
- break;
- }
-
- iter->dma = sg_dma_address(iter->sg);
- iter->max = iter->dma + iter->sg->length;
- }
-
- if (gen8_pd_index(++idx, 0) == 0) {
- if (gen8_pd_index(idx, 1) == 0) {
- /* Limited by sg length for 3lvl */
- if (gen8_pd_index(idx, 2) == 0)
- break;
-
- pd = pdp->entry[gen8_pd_index(idx, 2)];
- }
-
- kunmap_atomic(vaddr);
- vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
- }
- } while (1);
- kunmap_atomic(vaddr);
-
- return idx;
-}
-
-static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
- struct sgt_dma *iter,
- enum i915_cache_level cache_level,
- u32 flags)
-{
- const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
- u64 start = vma->node.start;
- dma_addr_t rem = iter->sg->length;
-
- GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm));
-
- do {
- struct i915_page_directory * const pdp =
- gen8_pdp_for_page_address(vma->vm, start);
- struct i915_page_directory * const pd =
- i915_pd_entry(pdp, __gen8_pte_index(start, 2));
- gen8_pte_t encode = pte_encode;
- unsigned int maybe_64K = -1;
- unsigned int page_size;
- gen8_pte_t *vaddr;
- u16 index;
-
- if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
- IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
- rem >= I915_GTT_PAGE_SIZE_2M &&
- !__gen8_pte_index(start, 0)) {
- index = __gen8_pte_index(start, 1);
- encode |= GEN8_PDE_PS_2M;
- page_size = I915_GTT_PAGE_SIZE_2M;
-
- vaddr = kmap_atomic_px(pd);
- } else {
- struct i915_page_table *pt =
- i915_pt_entry(pd, __gen8_pte_index(start, 1));
-
- index = __gen8_pte_index(start, 0);
- page_size = I915_GTT_PAGE_SIZE;
-
- if (!index &&
- vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
- IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
- (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
- rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
- maybe_64K = __gen8_pte_index(start, 1);
-
- vaddr = kmap_atomic_px(pt);
- }
-
- do {
- GEM_BUG_ON(iter->sg->length < page_size);
- vaddr[index++] = encode | iter->dma;
-
- start += page_size;
- iter->dma += page_size;
- rem -= page_size;
- if (iter->dma >= iter->max) {
- iter->sg = __sg_next(iter->sg);
- if (!iter->sg)
- break;
-
- rem = iter->sg->length;
- iter->dma = sg_dma_address(iter->sg);
- iter->max = iter->dma + rem;
-
- if (maybe_64K != -1 && index < I915_PDES &&
- !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
- (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
- rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)))
- maybe_64K = -1;
-
- if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
- break;
- }
- } while (rem >= page_size && index < I915_PDES);
-
- kunmap_atomic(vaddr);
-
- /*
- * Is it safe to mark the 2M block as 64K? -- Either we have
- * filled whole page-table with 64K entries, or filled part of
- * it and have reached the end of the sg table and we have
- * enough padding.
- */
- if (maybe_64K != -1 &&
- (index == I915_PDES ||
- (i915_vm_has_scratch_64K(vma->vm) &&
- !iter->sg && IS_ALIGNED(vma->node.start +
- vma->node.size,
- I915_GTT_PAGE_SIZE_2M)))) {
- vaddr = kmap_atomic_px(pd);
- vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
- kunmap_atomic(vaddr);
- page_size = I915_GTT_PAGE_SIZE_64K;
-
- /*
- * We write all 4K page entries, even when using 64K
- * pages. In order to verify that the HW isn't cheating
- * by using the 4K PTE instead of the 64K PTE, we want
- * to remove all the surplus entries. If the HW skipped
- * the 64K PTE, it will read/write into the scratch page
- * instead - which we detect as missing results during
- * selftests.
- */
- if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
- u16 i;
-
- encode = vma->vm->scratch[0].encode;
- vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K));
-
- for (i = 1; i < index; i += 16)
- memset64(vaddr + i, encode, 15);
-
- kunmap_atomic(vaddr);
- }
- }
-
- vma->page_sizes.gtt |= page_size;
- } while (iter->sg);
-}
-
-static void gen8_ppgtt_insert(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags)
-{
- struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
- struct sgt_dma iter = sgt_dma(vma);
-
- if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
- gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags);
- } else {
- u64 idx = vma->node.start >> GEN8_PTE_SHIFT;
-
- do {
- struct i915_page_directory * const pdp =
- gen8_pdp_for_page_index(vm, idx);
-
- idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx,
- cache_level, flags);
- } while (idx);
-
- vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
- }
-}
-
-static int gen8_init_scratch(struct i915_address_space *vm)
-{
- int ret;
- int i;
-
- /*
- * If everybody agrees to not to write into the scratch page,
- * we can reuse it for all vm, keeping contexts and processes separate.
- */
- if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) {
- struct i915_address_space *clone = vm->gt->vm;
-
- GEM_BUG_ON(!clone->has_read_only);
-
- vm->scratch_order = clone->scratch_order;
- memcpy(vm->scratch, clone->scratch, sizeof(vm->scratch));
- px_dma(&vm->scratch[0]) = 0; /* no xfer of ownership */
- return 0;
- }
-
- ret = setup_scratch_page(vm, __GFP_HIGHMEM);
- if (ret)
- return ret;
-
- vm->scratch[0].encode =
- gen8_pte_encode(px_dma(&vm->scratch[0]),
- I915_CACHE_LLC, vm->has_read_only);
-
- for (i = 1; i <= vm->top; i++) {
- if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[i]))))
- goto free_scratch;
-
- fill_px(&vm->scratch[i], vm->scratch[i - 1].encode);
- vm->scratch[i].encode =
- gen8_pde_encode(px_dma(&vm->scratch[i]),
- I915_CACHE_LLC);
- }
-
- return 0;
-
-free_scratch:
- free_scratch(vm);
- return -ENOMEM;
-}
-
-static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
-{
- struct i915_address_space *vm = &ppgtt->vm;
- struct i915_page_directory *pd = ppgtt->pd;
- unsigned int idx;
-
- GEM_BUG_ON(vm->top != 2);
- GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES);
-
- for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) {
- struct i915_page_directory *pde;
-
- pde = alloc_pd(vm);
- if (IS_ERR(pde))
- return PTR_ERR(pde);
-
- fill_px(pde, vm->scratch[1].encode);
- set_pd_entry(pd, idx, pde);
- atomic_inc(px_used(pde)); /* keep pinned */
- }
- wmb();
-
- return 0;
-}
-
-static void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
-{
- struct drm_i915_private *i915 = gt->i915;
-
- ppgtt->vm.gt = gt;
- ppgtt->vm.i915 = i915;
- ppgtt->vm.dma = &i915->drm.pdev->dev;
- ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
-
- i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
-
- ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
- ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
- ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
- ppgtt->vm.vma_ops.clear_pages = clear_pages;
-}
-
-static struct i915_page_directory *
-gen8_alloc_top_pd(struct i915_address_space *vm)
-{
- const unsigned int count = gen8_pd_top_count(vm);
- struct i915_page_directory *pd;
-
- GEM_BUG_ON(count > ARRAY_SIZE(pd->entry));
-
- pd = __alloc_pd(offsetof(typeof(*pd), entry[count]));
- if (unlikely(!pd))
- return ERR_PTR(-ENOMEM);
-
- if (unlikely(setup_page_dma(vm, px_base(pd)))) {
- kfree(pd);
- return ERR_PTR(-ENOMEM);
- }
-
- fill_page_dma(px_base(pd), vm->scratch[vm->top].encode, count);
- atomic_inc(px_used(pd)); /* mark as pinned */
- return pd;
-}
-
-/*
- * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
- * with a net effect resembling a 2-level page table in normal x86 terms. Each
- * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
- * space.
- *
- */
-static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
-{
- struct i915_ppgtt *ppgtt;
- int err;
-
- ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
- if (!ppgtt)
- return ERR_PTR(-ENOMEM);
-
- ppgtt_init(ppgtt, &i915->gt);
- ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
-
- /*
- * From bdw, there is hw support for read-only pages in the PPGTT.
- *
- * Gen11 has HSDES#:1807136187 unresolved. Disable ro support
- * for now.
- *
- * Gen12 has inherited the same read-only fault issue from gen11.
- */
- ppgtt->vm.has_read_only = !IS_GEN_RANGE(i915, 11, 12);
-
- /* There are only few exceptions for gen >=6. chv and bxt.
- * And we are not sure about the latter so play safe for now.
- */
- if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915))
- ppgtt->vm.pt_kmap_wc = true;
-
- err = gen8_init_scratch(&ppgtt->vm);
- if (err)
- goto err_free;
-
- ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm);
- if (IS_ERR(ppgtt->pd)) {
- err = PTR_ERR(ppgtt->pd);
- goto err_free_scratch;
- }
-
- if (!i915_vm_is_4lvl(&ppgtt->vm)) {
- err = gen8_preallocate_top_level_pdp(ppgtt);
- if (err)
- goto err_free_pd;
- }
-
- ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
- ppgtt->vm.insert_entries = gen8_ppgtt_insert;
- ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
- ppgtt->vm.clear_range = gen8_ppgtt_clear;
-
- if (intel_vgpu_active(i915))
- gen8_ppgtt_notify_vgt(ppgtt, true);
-
- ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
-
- return ppgtt;
-
-err_free_pd:
- __gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd,
- gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top);
-err_free_scratch:
- free_scratch(&ppgtt->vm);
-err_free:
- kfree(ppgtt);
- return ERR_PTR(err);
-}
-
-/* Write pde (index) from the page directory @pd to the page table @pt */
-static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
- const unsigned int pde,
- const struct i915_page_table *pt)
-{
- /* Caller needs to make sure the write completes if necessary */
- iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
- ppgtt->pd_addr + pde);
-}
-
-static void gen7_ppgtt_enable(struct intel_gt *gt)
-{
- struct drm_i915_private *i915 = gt->i915;
- struct intel_uncore *uncore = gt->uncore;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- u32 ecochk;
-
- intel_uncore_rmw(uncore, GAC_ECO_BITS, 0, ECOBITS_PPGTT_CACHE64B);
-
- ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
- if (IS_HASWELL(i915)) {
- ecochk |= ECOCHK_PPGTT_WB_HSW;
- } else {
- ecochk |= ECOCHK_PPGTT_LLC_IVB;
- ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
- }
- intel_uncore_write(uncore, GAM_ECOCHK, ecochk);
-
- for_each_engine(engine, gt, id) {
- /* GFX_MODE is per-ring on gen7+ */
- ENGINE_WRITE(engine,
- RING_MODE_GEN7,
- _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
- }
-}
-
-static void gen6_ppgtt_enable(struct intel_gt *gt)
-{
- struct intel_uncore *uncore = gt->uncore;
-
- intel_uncore_rmw(uncore,
- GAC_ECO_BITS,
- 0,
- ECOBITS_SNB_BIT | ECOBITS_PPGTT_CACHE64B);
-
- intel_uncore_rmw(uncore,
- GAB_CTL,
- 0,
- GAB_CTL_CONT_AFTER_PAGEFAULT);
-
- intel_uncore_rmw(uncore,
- GAM_ECOCHK,
- 0,
- ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
-
- if (HAS_PPGTT(uncore->i915)) /* may be disabled for VT-d */
- intel_uncore_write(uncore,
- GFX_MODE,
- _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
-}
-
-/* PPGTT support for Sandybdrige/Gen6 and later */
-static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
- const unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
- const gen6_pte_t scratch_pte = vm->scratch[0].encode;
- unsigned int pde = first_entry / GEN6_PTES;
- unsigned int pte = first_entry % GEN6_PTES;
- unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
-
- while (num_entries) {
- struct i915_page_table * const pt =
- i915_pt_entry(ppgtt->base.pd, pde++);
- const unsigned int count = min(num_entries, GEN6_PTES - pte);
- gen6_pte_t *vaddr;
-
- GEM_BUG_ON(px_base(pt) == px_base(&vm->scratch[1]));
-
- num_entries -= count;
-
- GEM_BUG_ON(count > atomic_read(&pt->used));
- if (!atomic_sub_return(count, &pt->used))
- ppgtt->scan_for_unused_pt = true;
-
- /*
- * Note that the hw doesn't support removing PDE on the fly
- * (they are cached inside the context with no means to
- * invalidate the cache), so we can only reset the PTE
- * entries back to scratch.
- */
-
- vaddr = kmap_atomic_px(pt);
- memset32(vaddr + pte, scratch_pte, count);
- kunmap_atomic(vaddr);
-
- pte = 0;
- }
-}
-
-static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags)
-{
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct i915_page_directory * const pd = ppgtt->pd;
- unsigned first_entry = vma->node.start / I915_GTT_PAGE_SIZE;
- unsigned act_pt = first_entry / GEN6_PTES;
- unsigned act_pte = first_entry % GEN6_PTES;
- const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
- struct sgt_dma iter = sgt_dma(vma);
- gen6_pte_t *vaddr;
-
- GEM_BUG_ON(pd->entry[act_pt] == &vm->scratch[1]);
-
- vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt));
- do {
- vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
-
- iter.dma += I915_GTT_PAGE_SIZE;
- if (iter.dma == iter.max) {
- iter.sg = __sg_next(iter.sg);
- if (!iter.sg)
- break;
-
- iter.dma = sg_dma_address(iter.sg);
- iter.max = iter.dma + iter.sg->length;
- }
-
- if (++act_pte == GEN6_PTES) {
- kunmap_atomic(vaddr);
- vaddr = kmap_atomic_px(i915_pt_entry(pd, ++act_pt));
- act_pte = 0;
- }
- } while (1);
- kunmap_atomic(vaddr);
-
- vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
-}
-
-static void gen6_flush_pd(struct gen6_ppgtt *ppgtt, u64 start, u64 end)
-{
- struct i915_page_directory * const pd = ppgtt->base.pd;
- struct i915_page_table *pt;
- unsigned int pde;
-
- start = round_down(start, SZ_64K);
- end = round_up(end, SZ_64K) - start;
-
- mutex_lock(&ppgtt->flush);
-
- gen6_for_each_pde(pt, pd, start, end, pde)
- gen6_write_pde(ppgtt, pde, pt);
-
- mb();
- ioread32(ppgtt->pd_addr + pde - 1);
- gen6_ggtt_invalidate(ppgtt->base.vm.gt->ggtt);
- mb();
-
- mutex_unlock(&ppgtt->flush);
-}
-
-static int gen6_alloc_va_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
- struct i915_page_directory * const pd = ppgtt->base.pd;
- struct i915_page_table *pt, *alloc = NULL;
- intel_wakeref_t wakeref;
- u64 from = start;
- unsigned int pde;
- int ret = 0;
-
- wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
-
- spin_lock(&pd->lock);
- gen6_for_each_pde(pt, pd, start, length, pde) {
- const unsigned int count = gen6_pte_count(start, length);
-
- if (px_base(pt) == px_base(&vm->scratch[1])) {
- spin_unlock(&pd->lock);
-
- pt = fetch_and_zero(&alloc);
- if (!pt)
- pt = alloc_pt(vm);
- if (IS_ERR(pt)) {
- ret = PTR_ERR(pt);
- goto unwind_out;
- }
-
- fill32_px(pt, vm->scratch[0].encode);
-
- spin_lock(&pd->lock);
- if (pd->entry[pde] == &vm->scratch[1]) {
- pd->entry[pde] = pt;
- } else {
- alloc = pt;
- pt = pd->entry[pde];
- }
- }
-
- atomic_add(count, &pt->used);
- }
- spin_unlock(&pd->lock);
-
- if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND))
- gen6_flush_pd(ppgtt, from, start);
-
- goto out;
-
-unwind_out:
- gen6_ppgtt_clear_range(vm, from, start - from);
-out:
- if (alloc)
- free_px(vm, alloc);
- intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
- return ret;
-}
-
-static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt)
-{
- struct i915_address_space * const vm = &ppgtt->base.vm;
- struct i915_page_directory * const pd = ppgtt->base.pd;
- int ret;
-
- ret = setup_scratch_page(vm, __GFP_HIGHMEM);
- if (ret)
- return ret;
-
- vm->scratch[0].encode =
- vm->pte_encode(px_dma(&vm->scratch[0]),
- I915_CACHE_NONE, PTE_READ_ONLY);
-
- if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[1])))) {
- cleanup_scratch_page(vm);
- return -ENOMEM;
- }
-
- fill32_px(&vm->scratch[1], vm->scratch[0].encode);
- memset_p(pd->entry, &vm->scratch[1], I915_PDES);
-
- return 0;
-}
-
-static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt)
-{
- struct i915_page_directory * const pd = ppgtt->base.pd;
- struct i915_page_dma * const scratch =
- px_base(&ppgtt->base.vm.scratch[1]);
- struct i915_page_table *pt;
- u32 pde;
-
- gen6_for_all_pdes(pt, pd, pde)
- if (px_base(pt) != scratch)
- free_px(&ppgtt->base.vm, pt);
-}
-
-static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
-{
- struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
-
- __i915_vma_put(ppgtt->vma);
-
- gen6_ppgtt_free_pd(ppgtt);
- free_scratch(vm);
-
- mutex_destroy(&ppgtt->flush);
- mutex_destroy(&ppgtt->pin_mutex);
- kfree(ppgtt->base.pd);
-}
-
-static int pd_vma_set_pages(struct i915_vma *vma)
-{
- vma->pages = ERR_PTR(-ENODEV);
- return 0;
-}
-
-static void pd_vma_clear_pages(struct i915_vma *vma)
-{
- GEM_BUG_ON(!vma->pages);
-
- vma->pages = NULL;
-}
-
-static int pd_vma_bind(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 unused)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
- struct gen6_ppgtt *ppgtt = vma->private;
- u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
-
- px_base(ppgtt->base.pd)->ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
- ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
-
- gen6_flush_pd(ppgtt, 0, ppgtt->base.vm.total);
- return 0;
-}
-
-static void pd_vma_unbind(struct i915_vma *vma)
-{
- struct gen6_ppgtt *ppgtt = vma->private;
- struct i915_page_directory * const pd = ppgtt->base.pd;
- struct i915_page_dma * const scratch =
- px_base(&ppgtt->base.vm.scratch[1]);
- struct i915_page_table *pt;
- unsigned int pde;
-
- if (!ppgtt->scan_for_unused_pt)
- return;
-
- /* Free all no longer used page tables */
- gen6_for_all_pdes(pt, ppgtt->base.pd, pde) {
- if (px_base(pt) == scratch || atomic_read(&pt->used))
- continue;
-
- free_px(&ppgtt->base.vm, pt);
- pd->entry[pde] = scratch;
- }
-
- ppgtt->scan_for_unused_pt = false;
-}
-
-static const struct i915_vma_ops pd_vma_ops = {
- .set_pages = pd_vma_set_pages,
- .clear_pages = pd_vma_clear_pages,
- .bind_vma = pd_vma_bind,
- .unbind_vma = pd_vma_unbind,
-};
-
-static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
-{
- struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt;
- struct i915_vma *vma;
-
- GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
- GEM_BUG_ON(size > ggtt->vm.total);
-
- vma = i915_vma_alloc();
- if (!vma)
- return ERR_PTR(-ENOMEM);
-
- i915_active_init(&vma->active, NULL, NULL);
-
- kref_init(&vma->ref);
- mutex_init(&vma->pages_mutex);
- vma->vm = i915_vm_get(&ggtt->vm);
- vma->ops = &pd_vma_ops;
- vma->private = ppgtt;
-
- vma->size = size;
- vma->fence_size = size;
- atomic_set(&vma->flags, I915_VMA_GGTT);
- vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
-
- INIT_LIST_HEAD(&vma->obj_link);
- INIT_LIST_HEAD(&vma->closed_link);
-
- return vma;
-}
-
-int gen6_ppgtt_pin(struct i915_ppgtt *base)
-{
- struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
- int err = 0;
-
- GEM_BUG_ON(!atomic_read(&ppgtt->base.vm.open));
-
- /*
- * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
- * which will be pinned into every active context.
- * (When vma->pin_count becomes atomic, I expect we will naturally
- * need a larger, unpacked, type and kill this redundancy.)
- */
- if (atomic_add_unless(&ppgtt->pin_count, 1, 0))
- return 0;
-
- if (mutex_lock_interruptible(&ppgtt->pin_mutex))
- return -EINTR;
-
- /*
- * PPGTT PDEs reside in the GGTT and consists of 512 entries. The
- * allocator works in address space sizes, so it's multiplied by page
- * size. We allocate at the top of the GTT to avoid fragmentation.
- */
- if (!atomic_read(&ppgtt->pin_count)) {
- err = i915_ggtt_pin(ppgtt->vma, GEN6_PD_ALIGN, PIN_HIGH);
- }
- if (!err)
- atomic_inc(&ppgtt->pin_count);
- mutex_unlock(&ppgtt->pin_mutex);
-
- return err;
-}
-
-void gen6_ppgtt_unpin(struct i915_ppgtt *base)
-{
- struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
-
- GEM_BUG_ON(!atomic_read(&ppgtt->pin_count));
- if (atomic_dec_and_test(&ppgtt->pin_count))
- i915_vma_unpin(ppgtt->vma);
-}
-
-void gen6_ppgtt_unpin_all(struct i915_ppgtt *base)
-{
- struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
-
- if (!atomic_read(&ppgtt->pin_count))
- return;
-
- i915_vma_unpin(ppgtt->vma);
- atomic_set(&ppgtt->pin_count, 0);
-}
-
-static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
-{
- struct i915_ggtt * const ggtt = &i915->ggtt;
- struct gen6_ppgtt *ppgtt;
- int err;
-
- ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
- if (!ppgtt)
- return ERR_PTR(-ENOMEM);
-
- mutex_init(&ppgtt->flush);
- mutex_init(&ppgtt->pin_mutex);
-
- ppgtt_init(&ppgtt->base, &i915->gt);
- ppgtt->base.vm.top = 1;
-
- ppgtt->base.vm.bind_async_flags = I915_VMA_LOCAL_BIND;
- ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
- ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
- ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
- ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
-
- ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
-
- ppgtt->base.pd = __alloc_pd(sizeof(*ppgtt->base.pd));
- if (!ppgtt->base.pd) {
- err = -ENOMEM;
- goto err_free;
- }
-
- err = gen6_ppgtt_init_scratch(ppgtt);
- if (err)
- goto err_pd;
-
- ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE);
- if (IS_ERR(ppgtt->vma)) {
- err = PTR_ERR(ppgtt->vma);
- goto err_scratch;
- }
-
- return &ppgtt->base;
-
-err_scratch:
- free_scratch(&ppgtt->base.vm);
-err_pd:
- kfree(ppgtt->base.pd);
-err_free:
- mutex_destroy(&ppgtt->pin_mutex);
- kfree(ppgtt);
- return ERR_PTR(err);
-}
-
-static void gtt_write_workarounds(struct intel_gt *gt)
-{
- struct drm_i915_private *i915 = gt->i915;
- struct intel_uncore *uncore = gt->uncore;
-
- /* This function is for gtt related workarounds. This function is
- * called on driver load and after a GPU reset, so you can place
- * workarounds here even if they get overwritten by GPU reset.
- */
- /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
- if (IS_BROADWELL(i915))
- intel_uncore_write(uncore,
- GEN8_L3_LRA_1_GPGPU,
- GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
- else if (IS_CHERRYVIEW(i915))
- intel_uncore_write(uncore,
- GEN8_L3_LRA_1_GPGPU,
- GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
- else if (IS_GEN9_LP(i915))
- intel_uncore_write(uncore,
- GEN8_L3_LRA_1_GPGPU,
- GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
- else if (INTEL_GEN(i915) >= 9 && INTEL_GEN(i915) <= 11)
- intel_uncore_write(uncore,
- GEN8_L3_LRA_1_GPGPU,
- GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
-
- /*
- * To support 64K PTEs we need to first enable the use of the
- * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
- * mmio, otherwise the page-walker will simply ignore the IPS bit. This
- * shouldn't be needed after GEN10.
- *
- * 64K pages were first introduced from BDW+, although technically they
- * only *work* from gen9+. For pre-BDW we instead have the option for
- * 32K pages, but we don't currently have any support for it in our
- * driver.
- */
- if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
- INTEL_GEN(i915) <= 10)
- intel_uncore_rmw(uncore,
- GEN8_GAMW_ECO_DEV_RW_IA,
- 0,
- GAMW_ECO_ENABLE_64K_IPS_FIELD);
-
- if (IS_GEN_RANGE(i915, 8, 11)) {
- bool can_use_gtt_cache = true;
-
- /*
- * According to the BSpec if we use 2M/1G pages then we also
- * need to disable the GTT cache. At least on BDW we can see
- * visual corruption when using 2M pages, and not disabling the
- * GTT cache.
- */
- if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M))
- can_use_gtt_cache = false;
-
- /* WaGttCachingOffByDefault */
- intel_uncore_write(uncore,
- HSW_GTT_CACHE_EN,
- can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
- WARN_ON_ONCE(can_use_gtt_cache &&
- intel_uncore_read(uncore,
- HSW_GTT_CACHE_EN) == 0);
- }
-}
-
-int i915_ppgtt_init_hw(struct intel_gt *gt)
-{
- struct drm_i915_private *i915 = gt->i915;
-
- gtt_write_workarounds(gt);
-
- if (IS_GEN(i915, 6))
- gen6_ppgtt_enable(gt);
- else if (IS_GEN(i915, 7))
- gen7_ppgtt_enable(gt);
-
- return 0;
-}
-
-static struct i915_ppgtt *
-__ppgtt_create(struct drm_i915_private *i915)
-{
- if (INTEL_GEN(i915) < 8)
- return gen6_ppgtt_create(i915);
- else
- return gen8_ppgtt_create(i915);
-}
-
-struct i915_ppgtt *
-i915_ppgtt_create(struct drm_i915_private *i915)
-{
- struct i915_ppgtt *ppgtt;
-
- ppgtt = __ppgtt_create(i915);
- if (IS_ERR(ppgtt))
- return ppgtt;
-
- trace_i915_ppgtt_create(&ppgtt->vm);
-
- return ppgtt;
-}
-
-/* Certain Gen5 chipsets require require idling the GPU before
- * unmapping anything from the GTT when VT-d is enabled.
- */
-static bool needs_idle_maps(struct drm_i915_private *dev_priv)
-{
- /* Query intel_iommu to see if we need the workaround. Presumably that
- * was loaded first.
- */
- return IS_GEN(dev_priv, 5) && IS_MOBILE(dev_priv) && intel_vtd_active();
-}
-
-static void ggtt_suspend_mappings(struct i915_ggtt *ggtt)
-{
- struct drm_i915_private *i915 = ggtt->vm.i915;
-
- /* Don't bother messing with faults pre GEN6 as we have little
- * documentation supporting that it's a good idea.
- */
- if (INTEL_GEN(i915) < 6)
- return;
-
- intel_gt_check_and_clear_faults(ggtt->vm.gt);
-
- ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
-
- ggtt->invalidate(ggtt);
-}
-
-void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915)
-{
- ggtt_suspend_mappings(&i915->ggtt);
-}
-
int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
@@ -2181,368 +52,6 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
return -ENOSPC;
}
-static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
-{
- writeq(pte, addr);
-}
-
-static void gen8_ggtt_insert_page(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level level,
- u32 unused)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- gen8_pte_t __iomem *pte =
- (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
-
- gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
-
- ggtt->invalidate(ggtt);
-}
-
-static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level level,
- u32 flags)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- struct sgt_iter sgt_iter;
- gen8_pte_t __iomem *gtt_entries;
- const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
- dma_addr_t addr;
-
- /*
- * Note that we ignore PTE_READ_ONLY here. The caller must be careful
- * not to allow the user to override access to a read only page.
- */
-
- gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
- gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE;
- for_each_sgt_daddr(addr, sgt_iter, vma->pages)
- gen8_set_pte(gtt_entries++, pte_encode | addr);
-
- /*
- * We want to flush the TLBs only after we're certain all the PTE
- * updates have finished.
- */
- ggtt->invalidate(ggtt);
-}
-
-static void gen6_ggtt_insert_page(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level level,
- u32 flags)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- gen6_pte_t __iomem *pte =
- (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
-
- iowrite32(vm->pte_encode(addr, level, flags), pte);
-
- ggtt->invalidate(ggtt);
-}
-
-/*
- * Binds an object into the global gtt with the specified cache level. The object
- * will be accessible to the GPU via commands whose operands reference offsets
- * within the global GTT as well as accessible by the GPU through the GMADR
- * mapped BAR (dev_priv->mm.gtt->gtt).
- */
-static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level level,
- u32 flags)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
- unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE;
- struct sgt_iter iter;
- dma_addr_t addr;
- for_each_sgt_daddr(addr, iter, vma->pages)
- iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
-
- /*
- * We want to flush the TLBs only after we're certain all the PTE
- * updates have finished.
- */
- ggtt->invalidate(ggtt);
-}
-
-static void nop_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
-}
-
-static void gen8_ggtt_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- unsigned first_entry = start / I915_GTT_PAGE_SIZE;
- unsigned num_entries = length / I915_GTT_PAGE_SIZE;
- const gen8_pte_t scratch_pte = vm->scratch[0].encode;
- gen8_pte_t __iomem *gtt_base =
- (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
- const int max_entries = ggtt_total_entries(ggtt) - first_entry;
- int i;
-
- if (WARN(num_entries > max_entries,
- "First entry = %d; Num entries = %d (max=%d)\n",
- first_entry, num_entries, max_entries))
- num_entries = max_entries;
-
- for (i = 0; i < num_entries; i++)
- gen8_set_pte(&gtt_base[i], scratch_pte);
-}
-
-static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
-{
- struct drm_i915_private *dev_priv = vm->i915;
-
- /*
- * Make sure the internal GAM fifo has been cleared of all GTT
- * writes before exiting stop_machine(). This guarantees that
- * any aperture accesses waiting to start in another process
- * cannot back up behind the GTT writes causing a hang.
- * The register can be any arbitrary GAM register.
- */
- POSTING_READ(GFX_FLSH_CNTL_GEN6);
-}
-
-struct insert_page {
- struct i915_address_space *vm;
- dma_addr_t addr;
- u64 offset;
- enum i915_cache_level level;
-};
-
-static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
-{
- struct insert_page *arg = _arg;
-
- gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
- bxt_vtd_ggtt_wa(arg->vm);
-
- return 0;
-}
-
-static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level level,
- u32 unused)
-{
- struct insert_page arg = { vm, addr, offset, level };
-
- stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
-}
-
-struct insert_entries {
- struct i915_address_space *vm;
- struct i915_vma *vma;
- enum i915_cache_level level;
- u32 flags;
-};
-
-static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
-{
- struct insert_entries *arg = _arg;
-
- gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
- bxt_vtd_ggtt_wa(arg->vm);
-
- return 0;
-}
-
-static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level level,
- u32 flags)
-{
- struct insert_entries arg = { vm, vma, level, flags };
-
- stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
-}
-
-struct clear_range {
- struct i915_address_space *vm;
- u64 start;
- u64 length;
-};
-
-static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
-{
- struct clear_range *arg = _arg;
-
- gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
- bxt_vtd_ggtt_wa(arg->vm);
-
- return 0;
-}
-
-static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
- u64 start,
- u64 length)
-{
- struct clear_range arg = { vm, start, length };
-
- stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
-}
-
-static void gen6_ggtt_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- unsigned first_entry = start / I915_GTT_PAGE_SIZE;
- unsigned num_entries = length / I915_GTT_PAGE_SIZE;
- gen6_pte_t scratch_pte, __iomem *gtt_base =
- (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
- const int max_entries = ggtt_total_entries(ggtt) - first_entry;
- int i;
-
- if (WARN(num_entries > max_entries,
- "First entry = %d; Num entries = %d (max=%d)\n",
- first_entry, num_entries, max_entries))
- num_entries = max_entries;
-
- scratch_pte = vm->scratch[0].encode;
- for (i = 0; i < num_entries; i++)
- iowrite32(scratch_pte, &gtt_base[i]);
-}
-
-static void i915_ggtt_insert_page(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level cache_level,
- u32 unused)
-{
- unsigned int flags = (cache_level == I915_CACHE_NONE) ?
- AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
-
- intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
-}
-
-static void i915_ggtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 unused)
-{
- unsigned int flags = (cache_level == I915_CACHE_NONE) ?
- AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
-
- intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
- flags);
-}
-
-static void i915_ggtt_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
-}
-
-static int ggtt_bind_vma(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags)
-{
- struct drm_i915_private *i915 = vma->vm->i915;
- struct drm_i915_gem_object *obj = vma->obj;
- intel_wakeref_t wakeref;
- u32 pte_flags;
-
- /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
- pte_flags = 0;
- if (i915_gem_object_is_readonly(obj))
- pte_flags |= PTE_READ_ONLY;
-
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
-
- vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
-
- /*
- * Without aliasing PPGTT there's no difference between
- * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
- * upgrade to both bound if we bind either to avoid double-binding.
- */
- atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
-
- return 0;
-}
-
-static void ggtt_unbind_vma(struct i915_vma *vma)
-{
- struct drm_i915_private *i915 = vma->vm->i915;
- intel_wakeref_t wakeref;
-
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
-}
-
-static int aliasing_gtt_bind_vma(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags)
-{
- struct drm_i915_private *i915 = vma->vm->i915;
- u32 pte_flags;
- int ret;
-
- /* Currently applicable only to VLV */
- pte_flags = 0;
- if (i915_gem_object_is_readonly(vma->obj))
- pte_flags |= PTE_READ_ONLY;
-
- if (flags & I915_VMA_LOCAL_BIND) {
- struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias;
-
- if (flags & I915_VMA_ALLOC) {
- ret = alias->vm.allocate_va_range(&alias->vm,
- vma->node.start,
- vma->size);
- if (ret)
- return ret;
-
- set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
- }
-
- GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT,
- __i915_vma_flags(vma)));
- alias->vm.insert_entries(&alias->vm, vma,
- cache_level, pte_flags);
- }
-
- if (flags & I915_VMA_GLOBAL_BIND) {
- intel_wakeref_t wakeref;
-
- with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- vma->vm->insert_entries(vma->vm, vma,
- cache_level, pte_flags);
- }
- }
-
- return 0;
-}
-
-static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
-{
- struct drm_i915_private *i915 = vma->vm->i915;
-
- if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
- struct i915_address_space *vm = vma->vm;
- intel_wakeref_t wakeref;
-
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- vm->clear_range(vm, vma->node.start, vma->size);
- }
-
- if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
- struct i915_address_space *vm =
- &i915_vm_to_ggtt(vma->vm)->alias->vm;
-
- vm->clear_range(vm, vma->node.start, vma->size);
- }
-}
-
void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
@@ -2563,1070 +72,6 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
}
-static int ggtt_set_pages(struct i915_vma *vma)
-{
- int ret;
-
- GEM_BUG_ON(vma->pages);
-
- ret = i915_get_ggtt_vma_pages(vma);
- if (ret)
- return ret;
-
- vma->page_sizes = vma->obj->mm.page_sizes;
-
- return 0;
-}
-
-static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
- unsigned long color,
- u64 *start,
- u64 *end)
-{
- if (i915_node_color_differs(node, color))
- *start += I915_GTT_PAGE_SIZE;
-
- /* Also leave a space between the unallocated reserved node after the
- * GTT and any objects within the GTT, i.e. we use the color adjustment
- * to insert a guard page to prevent prefetches crossing over the
- * GTT boundary.
- */
- node = list_next_entry(node, node_list);
- if (node->color != color)
- *end -= I915_GTT_PAGE_SIZE;
-}
-
-static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
-{
- struct i915_ppgtt *ppgtt;
- int err;
-
- ppgtt = i915_ppgtt_create(ggtt->vm.i915);
- if (IS_ERR(ppgtt))
- return PTR_ERR(ppgtt);
-
- if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
- err = -ENODEV;
- goto err_ppgtt;
- }
-
- /*
- * Note we only pre-allocate as far as the end of the global
- * GTT. On 48b / 4-level page-tables, the difference is very,
- * very significant! We have to preallocate as GVT/vgpu does
- * not like the page directory disappearing.
- */
- err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total);
- if (err)
- goto err_ppgtt;
-
- ggtt->alias = ppgtt;
- ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags;
-
- GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
- ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
-
- GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
- ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
-
- return 0;
-
-err_ppgtt:
- i915_vm_put(&ppgtt->vm);
- return err;
-}
-
-static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt)
-{
- struct i915_ppgtt *ppgtt;
-
- ppgtt = fetch_and_zero(&ggtt->alias);
- if (!ppgtt)
- return;
-
- i915_vm_put(&ppgtt->vm);
-
- ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
- ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
-}
-
-static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
-{
- u64 size;
- int ret;
-
- if (!USES_GUC(ggtt->vm.i915))
- return 0;
-
- GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
- size = ggtt->vm.total - GUC_GGTT_TOP;
-
- ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size,
- GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
- PIN_NOEVICT);
- if (ret)
- DRM_DEBUG_DRIVER("Failed to reserve top of GGTT for GuC\n");
-
- return ret;
-}
-
-static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
-{
- if (drm_mm_node_allocated(&ggtt->uc_fw))
- drm_mm_remove_node(&ggtt->uc_fw);
-}
-
-static void cleanup_init_ggtt(struct i915_ggtt *ggtt)
-{
- ggtt_release_guc_top(ggtt);
- if (drm_mm_node_allocated(&ggtt->error_capture))
- drm_mm_remove_node(&ggtt->error_capture);
-}
-
-static int init_ggtt(struct i915_ggtt *ggtt)
-{
- /* Let GEM Manage all of the aperture.
- *
- * However, leave one page at the end still bound to the scratch page.
- * There are a number of places where the hardware apparently prefetches
- * past the end of the object, and we've seen multiple hangs with the
- * GPU head pointer stuck in a batchbuffer bound at the last page of the
- * aperture. One page should be enough to keep any prefetching inside
- * of the aperture.
- */
- unsigned long hole_start, hole_end;
- struct drm_mm_node *entry;
- int ret;
-
- /*
- * GuC requires all resources that we're sharing with it to be placed in
- * non-WOPCM memory. If GuC is not present or not in use we still need a
- * small bias as ring wraparound at offset 0 sometimes hangs. No idea
- * why.
- */
- ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
- intel_wopcm_guc_size(&ggtt->vm.i915->wopcm));
-
- ret = intel_vgt_balloon(ggtt);
- if (ret)
- return ret;
-
- if (ggtt->mappable_end) {
- /* Reserve a mappable slot for our lockless error capture */
- ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture,
- PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
- 0, ggtt->mappable_end,
- DRM_MM_INSERT_LOW);
- if (ret)
- return ret;
- }
-
- /*
- * The upper portion of the GuC address space has a sizeable hole
- * (several MB) that is inaccessible by GuC. Reserve this range within
- * GGTT as it can comfortably hold GuC/HuC firmware images.
- */
- ret = ggtt_reserve_guc_top(ggtt);
- if (ret)
- goto err;
-
- /* Clear any non-preallocated blocks */
- drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
- DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
- hole_start, hole_end);
- ggtt->vm.clear_range(&ggtt->vm, hole_start,
- hole_end - hole_start);
- }
-
- /* And finally clear the reserved guard page */
- ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
-
- return 0;
-
-err:
- cleanup_init_ggtt(ggtt);
- return ret;
-}
-
-int i915_init_ggtt(struct drm_i915_private *i915)
-{
- int ret;
-
- ret = init_ggtt(&i915->ggtt);
- if (ret)
- return ret;
-
- if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) {
- ret = init_aliasing_ppgtt(&i915->ggtt);
- if (ret)
- cleanup_init_ggtt(&i915->ggtt);
- }
-
- return 0;
-}
-
-static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
-{
- struct i915_vma *vma, *vn;
-
- atomic_set(&ggtt->vm.open, 0);
-
- rcu_barrier(); /* flush the RCU'ed__i915_vm_release */
- flush_workqueue(ggtt->vm.i915->wq);
-
- mutex_lock(&ggtt->vm.mutex);
-
- list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
- WARN_ON(__i915_vma_unbind(vma));
-
- if (drm_mm_node_allocated(&ggtt->error_capture))
- drm_mm_remove_node(&ggtt->error_capture);
-
- ggtt_release_guc_top(ggtt);
- intel_vgt_deballoon(ggtt);
-
- ggtt->vm.cleanup(&ggtt->vm);
-
- mutex_unlock(&ggtt->vm.mutex);
- i915_address_space_fini(&ggtt->vm);
-
- arch_phys_wc_del(ggtt->mtrr);
-
- if (ggtt->iomap.size)
- io_mapping_fini(&ggtt->iomap);
-}
-
-/**
- * i915_ggtt_driver_release - Clean up GGTT hardware initialization
- * @i915: i915 device
- */
-void i915_ggtt_driver_release(struct drm_i915_private *i915)
-{
- struct pagevec *pvec;
-
- fini_aliasing_ppgtt(&i915->ggtt);
-
- ggtt_cleanup_hw(&i915->ggtt);
-
- pvec = &i915->mm.wc_stash.pvec;
- if (pvec->nr) {
- set_pages_array_wb(pvec->pages, pvec->nr);
- __pagevec_release(pvec);
- }
-}
-
-static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
-{
- snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
- snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
- return snb_gmch_ctl << 20;
-}
-
-static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
-{
- bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
- bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
- if (bdw_gmch_ctl)
- bdw_gmch_ctl = 1 << bdw_gmch_ctl;
-
-#ifdef CONFIG_X86_32
- /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
- if (bdw_gmch_ctl > 4)
- bdw_gmch_ctl = 4;
-#endif
-
- return bdw_gmch_ctl << 20;
-}
-
-static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
-{
- gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
- gmch_ctrl &= SNB_GMCH_GGMS_MASK;
-
- if (gmch_ctrl)
- return 1 << (20 + gmch_ctrl);
-
- return 0;
-}
-
-static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
-{
- struct drm_i915_private *dev_priv = ggtt->vm.i915;
- struct pci_dev *pdev = dev_priv->drm.pdev;
- phys_addr_t phys_addr;
- int ret;
-
- /* For Modern GENs the PTEs and register space are split in the BAR */
- phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
-
- /*
- * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range
- * will be dropped. For WC mappings in general we have 64 byte burst
- * writes when the WC buffer is flushed, so we can't use it, but have to
- * resort to an uncached mapping. The WC issue is easily caught by the
- * readback check when writing GTT PTE entries.
- */
- if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10)
- ggtt->gsm = ioremap_nocache(phys_addr, size);
- else
- ggtt->gsm = ioremap_wc(phys_addr, size);
- if (!ggtt->gsm) {
- DRM_ERROR("Failed to map the ggtt page table\n");
- return -ENOMEM;
- }
-
- ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
- if (ret) {
- DRM_ERROR("Scratch setup failed\n");
- /* iounmap will also get called at remove, but meh */
- iounmap(ggtt->gsm);
- return ret;
- }
-
- ggtt->vm.scratch[0].encode =
- ggtt->vm.pte_encode(px_dma(&ggtt->vm.scratch[0]),
- I915_CACHE_NONE, 0);
-
- return 0;
-}
-
-static void tgl_setup_private_ppat(struct intel_uncore *uncore)
-{
- /* TGL doesn't support LLC or AGE settings */
- intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
- intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
- intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
- intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
- intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
- intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
- intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
- intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
-}
-
-static void cnl_setup_private_ppat(struct intel_uncore *uncore)
-{
- intel_uncore_write(uncore,
- GEN10_PAT_INDEX(0),
- GEN8_PPAT_WB | GEN8_PPAT_LLC);
- intel_uncore_write(uncore,
- GEN10_PAT_INDEX(1),
- GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
- intel_uncore_write(uncore,
- GEN10_PAT_INDEX(2),
- GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
- intel_uncore_write(uncore,
- GEN10_PAT_INDEX(3),
- GEN8_PPAT_UC);
- intel_uncore_write(uncore,
- GEN10_PAT_INDEX(4),
- GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
- intel_uncore_write(uncore,
- GEN10_PAT_INDEX(5),
- GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
- intel_uncore_write(uncore,
- GEN10_PAT_INDEX(6),
- GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
- intel_uncore_write(uncore,
- GEN10_PAT_INDEX(7),
- GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
-}
-
-/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
- * bits. When using advanced contexts each context stores its own PAT, but
- * writing this data shouldn't be harmful even in those cases. */
-static void bdw_setup_private_ppat(struct intel_uncore *uncore)
-{
- u64 pat;
-
- pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
- GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
- GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
- GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
- GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
- GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
- GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
- GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
-
- intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
- intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
-}
-
-static void chv_setup_private_ppat(struct intel_uncore *uncore)
-{
- u64 pat;
-
- /*
- * Map WB on BDW to snooped on CHV.
- *
- * Only the snoop bit has meaning for CHV, the rest is
- * ignored.
- *
- * The hardware will never snoop for certain types of accesses:
- * - CPU GTT (GMADR->GGTT->no snoop->memory)
- * - PPGTT page tables
- * - some other special cycles
- *
- * As with BDW, we also need to consider the following for GT accesses:
- * "For GGTT, there is NO pat_sel[2:0] from the entry,
- * so RTL will always use the value corresponding to
- * pat_sel = 000".
- * Which means we must set the snoop bit in PAT entry 0
- * in order to keep the global status page working.
- */
-
- pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
- GEN8_PPAT(1, 0) |
- GEN8_PPAT(2, 0) |
- GEN8_PPAT(3, 0) |
- GEN8_PPAT(4, CHV_PPAT_SNOOP) |
- GEN8_PPAT(5, CHV_PPAT_SNOOP) |
- GEN8_PPAT(6, CHV_PPAT_SNOOP) |
- GEN8_PPAT(7, CHV_PPAT_SNOOP);
-
- intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
- intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
-}
-
-static void gen6_gmch_remove(struct i915_address_space *vm)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
-
- iounmap(ggtt->gsm);
- cleanup_scratch_page(vm);
-}
-
-static void setup_private_pat(struct intel_uncore *uncore)
-{
- struct drm_i915_private *i915 = uncore->i915;
-
- GEM_BUG_ON(INTEL_GEN(i915) < 8);
-
- if (INTEL_GEN(i915) >= 12)
- tgl_setup_private_ppat(uncore);
- else if (INTEL_GEN(i915) >= 10)
- cnl_setup_private_ppat(uncore);
- else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915))
- chv_setup_private_ppat(uncore);
- else
- bdw_setup_private_ppat(uncore);
-}
-
-static struct resource pci_resource(struct pci_dev *pdev, int bar)
-{
- return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar),
- pci_resource_len(pdev, bar));
-}
-
-static int gen8_gmch_probe(struct i915_ggtt *ggtt)
-{
- struct drm_i915_private *dev_priv = ggtt->vm.i915;
- struct pci_dev *pdev = dev_priv->drm.pdev;
- unsigned int size;
- u16 snb_gmch_ctl;
- int err;
-
- /* TODO: We're not aware of mappable constraints on gen8 yet */
- if (!IS_DGFX(dev_priv)) {
- ggtt->gmadr = pci_resource(pdev, 2);
- ggtt->mappable_end = resource_size(&ggtt->gmadr);
- }
-
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
- if (err)
- DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
-
- pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- if (IS_CHERRYVIEW(dev_priv))
- size = chv_get_total_gtt_size(snb_gmch_ctl);
- else
- size = gen8_get_total_gtt_size(snb_gmch_ctl);
-
- ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
- ggtt->vm.cleanup = gen6_gmch_remove;
- ggtt->vm.insert_page = gen8_ggtt_insert_page;
- ggtt->vm.clear_range = nop_clear_range;
- if (intel_scanout_needs_vtd_wa(dev_priv))
- ggtt->vm.clear_range = gen8_ggtt_clear_range;
-
- ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
-
- /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
- if (intel_ggtt_update_needs_vtd_wa(dev_priv) ||
- IS_CHERRYVIEW(dev_priv) /* fails with concurrent use/update */) {
- ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
- ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
- if (ggtt->vm.clear_range != nop_clear_range)
- ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
- }
-
- ggtt->invalidate = gen8_ggtt_invalidate;
-
- ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
- ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
- ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
- ggtt->vm.vma_ops.clear_pages = clear_pages;
-
- ggtt->vm.pte_encode = gen8_pte_encode;
-
- setup_private_pat(ggtt->vm.gt->uncore);
-
- return ggtt_probe_common(ggtt, size);
-}
-
-static int gen6_gmch_probe(struct i915_ggtt *ggtt)
-{
- struct drm_i915_private *dev_priv = ggtt->vm.i915;
- struct pci_dev *pdev = dev_priv->drm.pdev;
- unsigned int size;
- u16 snb_gmch_ctl;
- int err;
-
- ggtt->gmadr =
- (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
- pci_resource_len(pdev, 2));
- ggtt->mappable_end = resource_size(&ggtt->gmadr);
-
- /* 64/512MB is the current min/max we actually know of, but this is just
- * a coarse sanity check.
- */
- if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
- DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end);
- return -ENXIO;
- }
-
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
- if (err)
- DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
- pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
-
- size = gen6_get_total_gtt_size(snb_gmch_ctl);
- ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
-
- ggtt->vm.clear_range = nop_clear_range;
- if (!HAS_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
- ggtt->vm.clear_range = gen6_ggtt_clear_range;
- ggtt->vm.insert_page = gen6_ggtt_insert_page;
- ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
- ggtt->vm.cleanup = gen6_gmch_remove;
-
- ggtt->invalidate = gen6_ggtt_invalidate;
-
- if (HAS_EDRAM(dev_priv))
- ggtt->vm.pte_encode = iris_pte_encode;
- else if (IS_HASWELL(dev_priv))
- ggtt->vm.pte_encode = hsw_pte_encode;
- else if (IS_VALLEYVIEW(dev_priv))
- ggtt->vm.pte_encode = byt_pte_encode;
- else if (INTEL_GEN(dev_priv) >= 7)
- ggtt->vm.pte_encode = ivb_pte_encode;
- else
- ggtt->vm.pte_encode = snb_pte_encode;
-
- ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
- ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
- ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
- ggtt->vm.vma_ops.clear_pages = clear_pages;
-
- return ggtt_probe_common(ggtt, size);
-}
-
-static void i915_gmch_remove(struct i915_address_space *vm)
-{
- intel_gmch_remove();
-}
-
-static int i915_gmch_probe(struct i915_ggtt *ggtt)
-{
- struct drm_i915_private *dev_priv = ggtt->vm.i915;
- phys_addr_t gmadr_base;
- int ret;
-
- ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
- if (!ret) {
- DRM_ERROR("failed to set up gmch\n");
- return -EIO;
- }
-
- intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
-
- ggtt->gmadr =
- (struct resource) DEFINE_RES_MEM(gmadr_base,
- ggtt->mappable_end);
-
- ggtt->do_idle_maps = needs_idle_maps(dev_priv);
- ggtt->vm.insert_page = i915_ggtt_insert_page;
- ggtt->vm.insert_entries = i915_ggtt_insert_entries;
- ggtt->vm.clear_range = i915_ggtt_clear_range;
- ggtt->vm.cleanup = i915_gmch_remove;
-
- ggtt->invalidate = gmch_ggtt_invalidate;
-
- ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
- ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
- ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
- ggtt->vm.vma_ops.clear_pages = clear_pages;
-
- if (unlikely(ggtt->do_idle_maps))
- dev_notice(dev_priv->drm.dev,
- "Applying Ironlake quirks for intel_iommu\n");
-
- return 0;
-}
-
-static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
-{
- struct drm_i915_private *i915 = gt->i915;
- int ret;
-
- ggtt->vm.gt = gt;
- ggtt->vm.i915 = i915;
- ggtt->vm.dma = &i915->drm.pdev->dev;
-
- if (INTEL_GEN(i915) <= 5)
- ret = i915_gmch_probe(ggtt);
- else if (INTEL_GEN(i915) < 8)
- ret = gen6_gmch_probe(ggtt);
- else
- ret = gen8_gmch_probe(ggtt);
- if (ret)
- return ret;
-
- if ((ggtt->vm.total - 1) >> 32) {
- DRM_ERROR("We never expected a Global GTT with more than 32bits"
- " of address space! Found %lldM!\n",
- ggtt->vm.total >> 20);
- ggtt->vm.total = 1ULL << 32;
- ggtt->mappable_end =
- min_t(u64, ggtt->mappable_end, ggtt->vm.total);
- }
-
- if (ggtt->mappable_end > ggtt->vm.total) {
- DRM_ERROR("mappable aperture extends past end of GGTT,"
- " aperture=%pa, total=%llx\n",
- &ggtt->mappable_end, ggtt->vm.total);
- ggtt->mappable_end = ggtt->vm.total;
- }
-
- /* GMADR is the PCI mmio aperture into the global GTT. */
- DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20);
- DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
- DRM_DEBUG_DRIVER("DSM size = %lluM\n",
- (u64)resource_size(&intel_graphics_stolen_res) >> 20);
-
- return 0;
-}
-
-/**
- * i915_ggtt_probe_hw - Probe GGTT hardware location
- * @i915: i915 device
- */
-int i915_ggtt_probe_hw(struct drm_i915_private *i915)
-{
- int ret;
-
- ret = ggtt_probe_hw(&i915->ggtt, &i915->gt);
- if (ret)
- return ret;
-
- if (intel_vtd_active())
- dev_info(i915->drm.dev, "VT-d active for gfx access\n");
-
- return 0;
-}
-
-static int ggtt_init_hw(struct i915_ggtt *ggtt)
-{
- struct drm_i915_private *i915 = ggtt->vm.i915;
-
- i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
-
- ggtt->vm.is_ggtt = true;
-
- /* Only VLV supports read-only GGTT mappings */
- ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
-
- if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
- ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust;
-
- if (ggtt->mappable_end) {
- if (!io_mapping_init_wc(&ggtt->iomap,
- ggtt->gmadr.start,
- ggtt->mappable_end)) {
- ggtt->vm.cleanup(&ggtt->vm);
- return -EIO;
- }
-
- ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start,
- ggtt->mappable_end);
- }
-
- i915_ggtt_init_fences(ggtt);
-
- return 0;
-}
-
-/**
- * i915_ggtt_init_hw - Initialize GGTT hardware
- * @dev_priv: i915 device
- */
-int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
-{
- int ret;
-
- stash_init(&dev_priv->mm.wc_stash);
-
- /* Note that we use page colouring to enforce a guard page at the
- * end of the address space. This is required as the CS may prefetch
- * beyond the end of the batch buffer, across the page boundary,
- * and beyond the end of the GTT if we do not provide a guard.
- */
- ret = ggtt_init_hw(&dev_priv->ggtt);
- if (ret)
- return ret;
-
- return 0;
-}
-
-int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
-{
- if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
- return -EIO;
-
- return 0;
-}
-
-void i915_ggtt_enable_guc(struct i915_ggtt *ggtt)
-{
- GEM_BUG_ON(ggtt->invalidate != gen8_ggtt_invalidate);
-
- ggtt->invalidate = guc_ggtt_invalidate;
-
- ggtt->invalidate(ggtt);
-}
-
-void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
-{
- /* XXX Temporary pardon for error unload */
- if (ggtt->invalidate == gen8_ggtt_invalidate)
- return;
-
- /* We should only be called after i915_ggtt_enable_guc() */
- GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate);
-
- ggtt->invalidate = gen8_ggtt_invalidate;
-
- ggtt->invalidate(ggtt);
-}
-
-static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
-{
- struct i915_vma *vma, *vn;
- bool flush = false;
- int open;
-
- intel_gt_check_and_clear_faults(ggtt->vm.gt);
-
- mutex_lock(&ggtt->vm.mutex);
-
- /* First fill our portion of the GTT with scratch pages */
- ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
-
- /* Skip rewriting PTE on VMA unbind. */
- open = atomic_xchg(&ggtt->vm.open, 0);
-
- /* clflush objects bound into the GGTT and rebind them. */
- list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
- struct drm_i915_gem_object *obj = vma->obj;
-
- if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
- continue;
-
- if (!__i915_vma_unbind(vma))
- continue;
-
- clear_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
- WARN_ON(i915_vma_bind(vma,
- obj ? obj->cache_level : 0,
- PIN_GLOBAL, NULL));
- if (obj) { /* only used during resume => exclusive access */
- flush |= fetch_and_zero(&obj->write_domain);
- obj->read_domains |= I915_GEM_DOMAIN_GTT;
- }
- }
-
- atomic_set(&ggtt->vm.open, open);
- ggtt->invalidate(ggtt);
-
- mutex_unlock(&ggtt->vm.mutex);
-
- if (flush)
- wbinvd_on_all_cpus();
-}
-
-void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915)
-{
- struct i915_ggtt *ggtt = &i915->ggtt;
-
- ggtt_restore_mappings(ggtt);
-
- if (INTEL_GEN(i915) >= 8)
- setup_private_pat(ggtt->vm.gt->uncore);
-}
-
-static struct scatterlist *
-rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
- unsigned int width, unsigned int height,
- unsigned int stride,
- struct sg_table *st, struct scatterlist *sg)
-{
- unsigned int column, row;
- unsigned int src_idx;
-
- for (column = 0; column < width; column++) {
- src_idx = stride * (height - 1) + column + offset;
- for (row = 0; row < height; row++) {
- st->nents++;
- /* We don't need the pages, but need to initialize
- * the entries so the sg list can be happily traversed.
- * The only thing we need are DMA addresses.
- */
- sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
- sg_dma_address(sg) =
- i915_gem_object_get_dma_address(obj, src_idx);
- sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
- sg = sg_next(sg);
- src_idx -= stride;
- }
- }
-
- return sg;
-}
-
-static noinline struct sg_table *
-intel_rotate_pages(struct intel_rotation_info *rot_info,
- struct drm_i915_gem_object *obj)
-{
- unsigned int size = intel_rotation_info_size(rot_info);
- struct sg_table *st;
- struct scatterlist *sg;
- int ret = -ENOMEM;
- int i;
-
- /* Allocate target SG list. */
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
- goto err_st_alloc;
-
- ret = sg_alloc_table(st, size, GFP_KERNEL);
- if (ret)
- goto err_sg_alloc;
-
- st->nents = 0;
- sg = st->sgl;
-
- for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
- sg = rotate_pages(obj, rot_info->plane[i].offset,
- rot_info->plane[i].width, rot_info->plane[i].height,
- rot_info->plane[i].stride, st, sg);
- }
-
- return st;
-
-err_sg_alloc:
- kfree(st);
-err_st_alloc:
-
- DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
- obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
-
- return ERR_PTR(ret);
-}
-
-static struct scatterlist *
-remap_pages(struct drm_i915_gem_object *obj, unsigned int offset,
- unsigned int width, unsigned int height,
- unsigned int stride,
- struct sg_table *st, struct scatterlist *sg)
-{
- unsigned int row;
-
- for (row = 0; row < height; row++) {
- unsigned int left = width * I915_GTT_PAGE_SIZE;
-
- while (left) {
- dma_addr_t addr;
- unsigned int length;
-
- /* We don't need the pages, but need to initialize
- * the entries so the sg list can be happily traversed.
- * The only thing we need are DMA addresses.
- */
-
- addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
-
- length = min(left, length);
-
- st->nents++;
-
- sg_set_page(sg, NULL, length, 0);
- sg_dma_address(sg) = addr;
- sg_dma_len(sg) = length;
- sg = sg_next(sg);
-
- offset += length / I915_GTT_PAGE_SIZE;
- left -= length;
- }
-
- offset += stride - width;
- }
-
- return sg;
-}
-
-static noinline struct sg_table *
-intel_remap_pages(struct intel_remapped_info *rem_info,
- struct drm_i915_gem_object *obj)
-{
- unsigned int size = intel_remapped_info_size(rem_info);
- struct sg_table *st;
- struct scatterlist *sg;
- int ret = -ENOMEM;
- int i;
-
- /* Allocate target SG list. */
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
- goto err_st_alloc;
-
- ret = sg_alloc_table(st, size, GFP_KERNEL);
- if (ret)
- goto err_sg_alloc;
-
- st->nents = 0;
- sg = st->sgl;
-
- for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
- sg = remap_pages(obj, rem_info->plane[i].offset,
- rem_info->plane[i].width, rem_info->plane[i].height,
- rem_info->plane[i].stride, st, sg);
- }
-
- i915_sg_trim(st);
-
- return st;
-
-err_sg_alloc:
- kfree(st);
-err_st_alloc:
-
- DRM_DEBUG_DRIVER("Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
- obj->base.size, rem_info->plane[0].width, rem_info->plane[0].height, size);
-
- return ERR_PTR(ret);
-}
-
-static noinline struct sg_table *
-intel_partial_pages(const struct i915_ggtt_view *view,
- struct drm_i915_gem_object *obj)
-{
- struct sg_table *st;
- struct scatterlist *sg, *iter;
- unsigned int count = view->partial.size;
- unsigned int offset;
- int ret = -ENOMEM;
-
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
- goto err_st_alloc;
-
- ret = sg_alloc_table(st, count, GFP_KERNEL);
- if (ret)
- goto err_sg_alloc;
-
- iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
- GEM_BUG_ON(!iter);
-
- sg = st->sgl;
- st->nents = 0;
- do {
- unsigned int len;
-
- len = min(iter->length - (offset << PAGE_SHIFT),
- count << PAGE_SHIFT);
- sg_set_page(sg, NULL, len, 0);
- sg_dma_address(sg) =
- sg_dma_address(iter) + (offset << PAGE_SHIFT);
- sg_dma_len(sg) = len;
-
- st->nents++;
- count -= len >> PAGE_SHIFT;
- if (count == 0) {
- sg_mark_end(sg);
- i915_sg_trim(st); /* Drop any unused tail entries. */
-
- return st;
- }
-
- sg = __sg_next(sg);
- iter = __sg_next(iter);
- offset = 0;
- } while (1);
-
-err_sg_alloc:
- kfree(st);
-err_st_alloc:
- return ERR_PTR(ret);
-}
-
-static int
-i915_get_ggtt_vma_pages(struct i915_vma *vma)
-{
- int ret;
-
- /* The vma->pages are only valid within the lifespan of the borrowed
- * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
- * must be the vma->pages. A simple rule is that vma->pages must only
- * be accessed when the obj->mm.pages are pinned.
- */
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
-
- switch (vma->ggtt_view.type) {
- default:
- GEM_BUG_ON(vma->ggtt_view.type);
- /* fall through */
- case I915_GGTT_VIEW_NORMAL:
- vma->pages = vma->obj->mm.pages;
- return 0;
-
- case I915_GGTT_VIEW_ROTATED:
- vma->pages =
- intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
- break;
-
- case I915_GGTT_VIEW_REMAPPED:
- vma->pages =
- intel_remap_pages(&vma->ggtt_view.remapped, vma->obj);
- break;
-
- case I915_GGTT_VIEW_PARTIAL:
- vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
- break;
- }
-
- ret = 0;
- if (IS_ERR(vma->pages)) {
- ret = PTR_ERR(vma->pages);
- vma->pages = NULL;
- DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
- vma->ggtt_view.type, ret);
- }
- return ret;
-}
-
/**
* i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
* @vm: the &struct i915_address_space
@@ -3848,6 +293,5 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/mock_gtt.c"
#include "selftests/i915_gem_gtt.c"
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 31a4a96ddd0d..f6226df9f972 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -1,639 +1,21 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Please try to maintain the following order within this file unless it makes
- * sense to do otherwise. From top to bottom:
- * 1. typedefs
- * 2. #defines, and macros
- * 3. structure definitions
- * 4. function prototypes
- *
- * Within each section, please try to order by generation in ascending order,
- * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
+ * Copyright © 2020 Intel Corporation
*/
#ifndef __I915_GEM_GTT_H__
#define __I915_GEM_GTT_H__
#include <linux/io-mapping.h>
-#include <linux/kref.h>
-#include <linux/mm.h>
-#include <linux/pagevec.h>
-#include <linux/workqueue.h>
+#include <linux/types.h>
#include <drm/drm_mm.h>
-#include "gt/intel_reset.h"
-#include "i915_gem_fence_reg.h"
-#include "i915_request.h"
+#include "gt/intel_gtt.h"
#include "i915_scatterlist.h"
-#include "i915_selftest.h"
-#include "gt/intel_timeline.h"
-#define I915_GTT_PAGE_SIZE_4K BIT_ULL(12)
-#define I915_GTT_PAGE_SIZE_64K BIT_ULL(16)
-#define I915_GTT_PAGE_SIZE_2M BIT_ULL(21)
-
-#define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
-#define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
-
-#define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
-
-#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
-
-#define I915_FENCE_REG_NONE -1
-#define I915_MAX_NUM_FENCES 32
-/* 32 fences + sign bit for FENCE_REG_NONE */
-#define I915_MAX_NUM_FENCE_BITS 6
-
-struct drm_i915_file_private;
struct drm_i915_gem_object;
-struct i915_vma;
-struct intel_gt;
-
-typedef u32 gen6_pte_t;
-typedef u64 gen8_pte_t;
-
-#define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
-
-/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
-#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
-#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
-#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
-#define GEN6_PTE_CACHE_LLC (2 << 1)
-#define GEN6_PTE_UNCACHED (1 << 1)
-#define GEN6_PTE_VALID (1 << 0)
-
-#define I915_PTES(pte_len) ((unsigned int)(PAGE_SIZE / (pte_len)))
-#define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1)
-#define I915_PDES 512
-#define I915_PDE_MASK (I915_PDES - 1)
-#define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT))
-
-#define GEN6_PTES I915_PTES(sizeof(gen6_pte_t))
-#define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE)
-#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
-#define GEN6_PDE_SHIFT 22
-#define GEN6_PDE_VALID (1 << 0)
-
-#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
-
-#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
-#define BYT_PTE_WRITEABLE (1 << 1)
-
-/* Cacheability Control is a 4-bit value. The low three bits are stored in bits
- * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
- */
-#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
- (((bits) & 0x8) << (11 - 3)))
-#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
-#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
-#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
-#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
-#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
-#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
-#define HSW_PTE_UNCACHED (0)
-#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
-#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
-
-/*
- * GEN8 32b style address is defined as a 3 level page table:
- * 31:30 | 29:21 | 20:12 | 11:0
- * PDPE | PDE | PTE | offset
- * The difference as compared to normal x86 3 level page table is the PDPEs are
- * programmed via register.
- *
- * GEN8 48b style address is defined as a 4 level page table:
- * 47:39 | 38:30 | 29:21 | 20:12 | 11:0
- * PML4E | PDPE | PDE | PTE | offset
- */
-#define GEN8_3LVL_PDPES 4
-
-#define PPAT_UNCACHED (_PAGE_PWT | _PAGE_PCD)
-#define PPAT_CACHED_PDE 0 /* WB LLC */
-#define PPAT_CACHED _PAGE_PAT /* WB LLCeLLC */
-#define PPAT_DISPLAY_ELLC _PAGE_PCD /* WT eLLC */
-
-#define CHV_PPAT_SNOOP (1<<6)
-#define GEN8_PPAT_AGE(x) ((x)<<4)
-#define GEN8_PPAT_LLCeLLC (3<<2)
-#define GEN8_PPAT_LLCELLC (2<<2)
-#define GEN8_PPAT_LLC (1<<2)
-#define GEN8_PPAT_WB (3<<0)
-#define GEN8_PPAT_WT (2<<0)
-#define GEN8_PPAT_WC (1<<0)
-#define GEN8_PPAT_UC (0<<0)
-#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
-#define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8))
-
-#define GEN8_PDE_IPS_64K BIT(11)
-#define GEN8_PDE_PS_2M BIT(7)
-
-#define for_each_sgt_daddr(__dp, __iter, __sgt) \
- __for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE)
-
-struct intel_remapped_plane_info {
- /* in gtt pages */
- unsigned int width, height, stride, offset;
-} __packed;
-
-struct intel_remapped_info {
- struct intel_remapped_plane_info plane[2];
- unsigned int unused_mbz;
-} __packed;
-
-struct intel_rotation_info {
- struct intel_remapped_plane_info plane[2];
-} __packed;
-
-struct intel_partial_info {
- u64 offset;
- unsigned int size;
-} __packed;
-
-enum i915_ggtt_view_type {
- I915_GGTT_VIEW_NORMAL = 0,
- I915_GGTT_VIEW_ROTATED = sizeof(struct intel_rotation_info),
- I915_GGTT_VIEW_PARTIAL = sizeof(struct intel_partial_info),
- I915_GGTT_VIEW_REMAPPED = sizeof(struct intel_remapped_info),
-};
-
-static inline void assert_i915_gem_gtt_types(void)
-{
- BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 8*sizeof(unsigned int));
- BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int));
- BUILD_BUG_ON(sizeof(struct intel_remapped_info) != 9*sizeof(unsigned int));
-
- /* Check that rotation/remapped shares offsets for simplicity */
- BUILD_BUG_ON(offsetof(struct intel_remapped_info, plane[0]) !=
- offsetof(struct intel_rotation_info, plane[0]));
- BUILD_BUG_ON(offsetofend(struct intel_remapped_info, plane[1]) !=
- offsetofend(struct intel_rotation_info, plane[1]));
-
- /* As we encode the size of each branch inside the union into its type,
- * we have to be careful that each branch has a unique size.
- */
- switch ((enum i915_ggtt_view_type)0) {
- case I915_GGTT_VIEW_NORMAL:
- case I915_GGTT_VIEW_PARTIAL:
- case I915_GGTT_VIEW_ROTATED:
- case I915_GGTT_VIEW_REMAPPED:
- /* gcc complains if these are identical cases */
- break;
- }
-}
-
-struct i915_ggtt_view {
- enum i915_ggtt_view_type type;
- union {
- /* Members need to contain no holes/padding */
- struct intel_partial_info partial;
- struct intel_rotation_info rotated;
- struct intel_remapped_info remapped;
- };
-};
-
-enum i915_cache_level;
-
-struct i915_vma;
-
-struct i915_page_dma {
- struct page *page;
- union {
- dma_addr_t daddr;
-
- /* For gen6/gen7 only. This is the offset in the GGTT
- * where the page directory entries for PPGTT begin
- */
- u32 ggtt_offset;
- };
-};
-
-struct i915_page_scratch {
- struct i915_page_dma base;
- u64 encode;
-};
-
-struct i915_page_table {
- struct i915_page_dma base;
- atomic_t used;
-};
-
-struct i915_page_directory {
- struct i915_page_table pt;
- spinlock_t lock;
- void *entry[512];
-};
-
-#define __px_choose_expr(x, type, expr, other) \
- __builtin_choose_expr( \
- __builtin_types_compatible_p(typeof(x), type) || \
- __builtin_types_compatible_p(typeof(x), const type), \
- ({ type __x = (type)(x); expr; }), \
- other)
-
-#define px_base(px) \
- __px_choose_expr(px, struct i915_page_dma *, __x, \
- __px_choose_expr(px, struct i915_page_scratch *, &__x->base, \
- __px_choose_expr(px, struct i915_page_table *, &__x->base, \
- __px_choose_expr(px, struct i915_page_directory *, &__x->pt.base, \
- (void)0))))
-#define px_dma(px) (px_base(px)->daddr)
-
-#define px_pt(px) \
- __px_choose_expr(px, struct i915_page_table *, __x, \
- __px_choose_expr(px, struct i915_page_directory *, &__x->pt, \
- (void)0))
-#define px_used(px) (&px_pt(px)->used)
-
-struct i915_vma_ops {
- /* Map an object into an address space with the given cache flags. */
- int (*bind_vma)(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags);
- /*
- * Unmap an object from an address space. This usually consists of
- * setting the valid PTE entries to a reserved scratch page.
- */
- void (*unbind_vma)(struct i915_vma *vma);
-
- int (*set_pages)(struct i915_vma *vma);
- void (*clear_pages)(struct i915_vma *vma);
-};
-
-struct pagestash {
- spinlock_t lock;
- struct pagevec pvec;
-};
-
-struct i915_address_space {
- struct kref ref;
- struct rcu_work rcu;
-
- struct drm_mm mm;
- struct intel_gt *gt;
- struct drm_i915_private *i915;
- struct device *dma;
- /* Every address space belongs to a struct file - except for the global
- * GTT that is owned by the driver (and so @file is set to NULL). In
- * principle, no information should leak from one context to another
- * (or between files/processes etc) unless explicitly shared by the
- * owner. Tracking the owner is important in order to free up per-file
- * objects along with the file, to aide resource tracking, and to
- * assign blame.
- */
- struct drm_i915_file_private *file;
- u64 total; /* size addr space maps (ex. 2GB for ggtt) */
- u64 reserved; /* size addr space reserved */
-
- unsigned int bind_async_flags;
-
- /*
- * Each active user context has its own address space (in full-ppgtt).
- * Since the vm may be shared between multiple contexts, we count how
- * many contexts keep us "open". Once open hits zero, we are closed
- * and do not allow any new attachments, and proceed to shutdown our
- * vma and page directories.
- */
- atomic_t open;
-
- struct mutex mutex; /* protects vma and our lists */
-#define VM_CLASS_GGTT 0
-#define VM_CLASS_PPGTT 1
-
- struct i915_page_scratch scratch[4];
- unsigned int scratch_order;
- unsigned int top;
-
- /**
- * List of vma currently bound.
- */
- struct list_head bound_list;
-
- struct pagestash free_pages;
-
- /* Global GTT */
- bool is_ggtt:1;
-
- /* Some systems require uncached updates of the page directories */
- bool pt_kmap_wc:1;
-
- /* Some systems support read-only mappings for GGTT and/or PPGTT */
- bool has_read_only:1;
-
- u64 (*pte_encode)(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags); /* Create a valid PTE */
-#define PTE_READ_ONLY (1<<0)
-
- int (*allocate_va_range)(struct i915_address_space *vm,
- u64 start, u64 length);
- void (*clear_range)(struct i915_address_space *vm,
- u64 start, u64 length);
- void (*insert_page)(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level cache_level,
- u32 flags);
- void (*insert_entries)(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags);
- void (*cleanup)(struct i915_address_space *vm);
-
- struct i915_vma_ops vma_ops;
-
- I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
- I915_SELFTEST_DECLARE(bool scrub_64K);
-};
-
-#define i915_is_ggtt(vm) ((vm)->is_ggtt)
-
-static inline bool
-i915_vm_is_4lvl(const struct i915_address_space *vm)
-{
- return (vm->total - 1) >> 32;
-}
-
-static inline bool
-i915_vm_has_scratch_64K(struct i915_address_space *vm)
-{
- return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
-}
-
-static inline bool
-i915_vm_has_cache_coloring(struct i915_address_space *vm)
-{
- return i915_is_ggtt(vm) && vm->mm.color_adjust;
-}
-
-/* The Graphics Translation Table is the way in which GEN hardware translates a
- * Graphics Virtual Address into a Physical Address. In addition to the normal
- * collateral associated with any va->pa translations GEN hardware also has a
- * portion of the GTT which can be mapped by the CPU and remain both coherent
- * and correct (in cases like swizzling). That region is referred to as GMADR in
- * the spec.
- */
-struct i915_ggtt {
- struct i915_address_space vm;
-
- struct io_mapping iomap; /* Mapping to our CPU mappable region */
- struct resource gmadr; /* GMADR resource */
- resource_size_t mappable_end; /* End offset that we can CPU map */
-
- /** "Graphics Stolen Memory" holds the global PTEs */
- void __iomem *gsm;
- void (*invalidate)(struct i915_ggtt *ggtt);
-
- /** PPGTT used for aliasing the PPGTT with the GTT */
- struct i915_ppgtt *alias;
-
- bool do_idle_maps;
-
- int mtrr;
-
- /** Bit 6 swizzling required for X tiling */
- u32 bit_6_swizzle_x;
- /** Bit 6 swizzling required for Y tiling */
- u32 bit_6_swizzle_y;
-
- u32 pin_bias;
-
- unsigned int num_fences;
- struct i915_fence_reg fence_regs[I915_MAX_NUM_FENCES];
- struct list_head fence_list;
-
- /** List of all objects in gtt_space, currently mmaped by userspace.
- * All objects within this list must also be on bound_list.
- */
- struct list_head userfault_list;
-
- /* Manual runtime pm autosuspend delay for user GGTT mmaps */
- struct intel_wakeref_auto userfault_wakeref;
-
- struct drm_mm_node error_capture;
- struct drm_mm_node uc_fw;
-};
-
-struct i915_ppgtt {
- struct i915_address_space vm;
-
- struct i915_page_directory *pd;
-};
-
-struct gen6_ppgtt {
- struct i915_ppgtt base;
-
- struct mutex flush;
- struct i915_vma *vma;
- gen6_pte_t __iomem *pd_addr;
-
- atomic_t pin_count;
- struct mutex pin_mutex;
-
- bool scan_for_unused_pt;
-};
-
-#define __to_gen6_ppgtt(base) container_of(base, struct gen6_ppgtt, base)
-
-static inline struct gen6_ppgtt *to_gen6_ppgtt(struct i915_ppgtt *base)
-{
- BUILD_BUG_ON(offsetof(struct gen6_ppgtt, base));
- return __to_gen6_ppgtt(base);
-}
-
-/*
- * gen6_for_each_pde() iterates over every pde from start until start+length.
- * If start and start+length are not perfectly divisible, the macro will round
- * down and up as needed. Start=0 and length=2G effectively iterates over
- * every PDE in the system. The macro modifies ALL its parameters except 'pd',
- * so each of the other parameters should preferably be a simple variable, or
- * at most an lvalue with no side-effects!
- */
-#define gen6_for_each_pde(pt, pd, start, length, iter) \
- for (iter = gen6_pde_index(start); \
- length > 0 && iter < I915_PDES && \
- (pt = i915_pt_entry(pd, iter), true); \
- ({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT); \
- temp = min(temp - start, length); \
- start += temp, length -= temp; }), ++iter)
-
-#define gen6_for_all_pdes(pt, pd, iter) \
- for (iter = 0; \
- iter < I915_PDES && \
- (pt = i915_pt_entry(pd, iter), true); \
- ++iter)
-
-static inline u32 i915_pte_index(u64 address, unsigned int pde_shift)
-{
- const u32 mask = NUM_PTE(pde_shift) - 1;
-
- return (address >> PAGE_SHIFT) & mask;
-}
-
-/* Helper to counts the number of PTEs within the given length. This count
- * does not cross a page table boundary, so the max value would be
- * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
-*/
-static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift)
-{
- const u64 mask = ~((1ULL << pde_shift) - 1);
- u64 end;
-
- GEM_BUG_ON(length == 0);
- GEM_BUG_ON(offset_in_page(addr | length));
-
- end = addr + length;
-
- if ((addr & mask) != (end & mask))
- return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
-
- return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
-}
-
-static inline u32 i915_pde_index(u64 addr, u32 shift)
-{
- return (addr >> shift) & I915_PDE_MASK;
-}
-
-static inline u32 gen6_pte_index(u32 addr)
-{
- return i915_pte_index(addr, GEN6_PDE_SHIFT);
-}
-
-static inline u32 gen6_pte_count(u32 addr, u32 length)
-{
- return i915_pte_count(addr, length, GEN6_PDE_SHIFT);
-}
-
-static inline u32 gen6_pde_index(u32 addr)
-{
- return i915_pde_index(addr, GEN6_PDE_SHIFT);
-}
-
-static inline struct i915_page_table *
-i915_pt_entry(const struct i915_page_directory * const pd,
- const unsigned short n)
-{
- return pd->entry[n];
-}
-
-static inline struct i915_page_directory *
-i915_pd_entry(const struct i915_page_directory * const pdp,
- const unsigned short n)
-{
- return pdp->entry[n];
-}
-
-static inline dma_addr_t
-i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
-{
- struct i915_page_dma *pt = ppgtt->pd->entry[n];
-
- return px_dma(pt ?: px_base(&ppgtt->vm.scratch[ppgtt->vm.top]));
-}
-
-static inline struct i915_ggtt *
-i915_vm_to_ggtt(struct i915_address_space *vm)
-{
- BUILD_BUG_ON(offsetof(struct i915_ggtt, vm));
- GEM_BUG_ON(!i915_is_ggtt(vm));
- return container_of(vm, struct i915_ggtt, vm);
-}
-
-static inline struct i915_ppgtt *
-i915_vm_to_ppgtt(struct i915_address_space *vm)
-{
- BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm));
- GEM_BUG_ON(i915_is_ggtt(vm));
- return container_of(vm, struct i915_ppgtt, vm);
-}
-
-int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv);
-int i915_ggtt_init_hw(struct drm_i915_private *dev_priv);
-int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv);
-void i915_ggtt_enable_guc(struct i915_ggtt *ggtt);
-void i915_ggtt_disable_guc(struct i915_ggtt *ggtt);
-int i915_init_ggtt(struct drm_i915_private *dev_priv);
-void i915_ggtt_driver_release(struct drm_i915_private *dev_priv);
-
-static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt)
-{
- return ggtt->mappable_end > 0;
-}
-
-int i915_ppgtt_init_hw(struct intel_gt *gt);
-
-struct i915_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv);
-
-static inline struct i915_address_space *
-i915_vm_get(struct i915_address_space *vm)
-{
- kref_get(&vm->ref);
- return vm;
-}
-
-void i915_vm_release(struct kref *kref);
-
-static inline void i915_vm_put(struct i915_address_space *vm)
-{
- kref_put(&vm->ref, i915_vm_release);
-}
-
-static inline struct i915_address_space *
-i915_vm_open(struct i915_address_space *vm)
-{
- GEM_BUG_ON(!atomic_read(&vm->open));
- atomic_inc(&vm->open);
- return i915_vm_get(vm);
-}
-
-static inline bool
-i915_vm_tryopen(struct i915_address_space *vm)
-{
- if (atomic_add_unless(&vm->open, 1, 0))
- return i915_vm_get(vm);
-
- return false;
-}
-
-void __i915_vm_close(struct i915_address_space *vm);
-
-static inline void
-i915_vm_close(struct i915_address_space *vm)
-{
- GEM_BUG_ON(!atomic_read(&vm->open));
- if (atomic_dec_and_test(&vm->open))
- __i915_vm_close(vm);
-
- i915_vm_put(vm);
-}
-
-int gen6_ppgtt_pin(struct i915_ppgtt *base);
-void gen6_ppgtt_unpin(struct i915_ppgtt *base);
-void gen6_ppgtt_unpin_all(struct i915_ppgtt *base);
-
-void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv);
-void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv);
+struct i915_address_space;
int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages);
@@ -664,6 +46,6 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
#define PIN_GLOBAL BIT_ULL(10) /* I915_VMA_GLOBAL_BIND */
#define PIN_USER BIT_ULL(11) /* I915_VMA_LOCAL_BIND */
-#define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE)
+#define PIN_OFFSET_MASK I915_GTT_PAGE_MASK
#endif
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index fda0977d2059..594341e27a47 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -41,6 +41,7 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_lmem.h"
+#include "gt/intel_gt_pm.h"
#include "i915_drv.h"
#include "i915_gpu_error.h"
@@ -232,14 +233,13 @@ static void pool_free(struct pagevec *pv, void *addr)
#ifdef CONFIG_DRM_I915_COMPRESS_ERROR
-struct compress {
+struct i915_vma_compress {
struct pagevec pool;
struct z_stream_s zstream;
void *tmp;
- bool wc;
};
-static bool compress_init(struct compress *c)
+static bool compress_init(struct i915_vma_compress *c)
{
struct z_stream_s *zstream = &c->zstream;
@@ -261,7 +261,7 @@ static bool compress_init(struct compress *c)
return true;
}
-static bool compress_start(struct compress *c)
+static bool compress_start(struct i915_vma_compress *c)
{
struct z_stream_s *zstream = &c->zstream;
void *workspace = zstream->workspace;
@@ -272,8 +272,8 @@ static bool compress_start(struct compress *c)
return zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) == Z_OK;
}
-static void *compress_next_page(struct compress *c,
- struct drm_i915_error_object *dst)
+static void *compress_next_page(struct i915_vma_compress *c,
+ struct i915_vma_coredump *dst)
{
void *page;
@@ -287,14 +287,15 @@ static void *compress_next_page(struct compress *c,
return dst->pages[dst->page_count++] = page;
}
-static int compress_page(struct compress *c,
+static int compress_page(struct i915_vma_compress *c,
void *src,
- struct drm_i915_error_object *dst)
+ struct i915_vma_coredump *dst,
+ bool wc)
{
struct z_stream_s *zstream = &c->zstream;
zstream->next_in = src;
- if (c->wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
+ if (wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
zstream->next_in = c->tmp;
zstream->avail_in = PAGE_SIZE;
@@ -318,8 +319,8 @@ static int compress_page(struct compress *c,
return 0;
}
-static int compress_flush(struct compress *c,
- struct drm_i915_error_object *dst)
+static int compress_flush(struct i915_vma_compress *c,
+ struct i915_vma_coredump *dst)
{
struct z_stream_s *zstream = &c->zstream;
@@ -347,12 +348,12 @@ end:
return 0;
}
-static void compress_finish(struct compress *c)
+static void compress_finish(struct i915_vma_compress *c)
{
zlib_deflateEnd(&c->zstream);
}
-static void compress_fini(struct compress *c)
+static void compress_fini(struct i915_vma_compress *c)
{
kfree(c->zstream.workspace);
if (c->tmp)
@@ -367,24 +368,24 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m)
#else
-struct compress {
+struct i915_vma_compress {
struct pagevec pool;
- bool wc;
};
-static bool compress_init(struct compress *c)
+static bool compress_init(struct i915_vma_compress *c)
{
return pool_init(&c->pool, ALLOW_FAIL) == 0;
}
-static bool compress_start(struct compress *c)
+static bool compress_start(struct i915_vma_compress *c)
{
return true;
}
-static int compress_page(struct compress *c,
+static int compress_page(struct i915_vma_compress *c,
void *src,
- struct drm_i915_error_object *dst)
+ struct i915_vma_coredump *dst,
+ bool wc)
{
void *ptr;
@@ -392,24 +393,24 @@ static int compress_page(struct compress *c,
if (!ptr)
return -ENOMEM;
- if (!(c->wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
+ if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
memcpy(ptr, src, PAGE_SIZE);
dst->pages[dst->page_count++] = ptr;
return 0;
}
-static int compress_flush(struct compress *c,
- struct drm_i915_error_object *dst)
+static int compress_flush(struct i915_vma_compress *c,
+ struct i915_vma_coredump *dst)
{
return 0;
}
-static void compress_finish(struct compress *c)
+static void compress_finish(struct i915_vma_compress *c)
{
}
-static void compress_fini(struct compress *c)
+static void compress_fini(struct i915_vma_compress *c)
{
pool_fini(&c->pool);
}
@@ -422,7 +423,7 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m)
#endif
static void error_print_instdone(struct drm_i915_error_state_buf *m,
- const struct drm_i915_error_engine *ee)
+ const struct intel_engine_coredump *ee)
{
const struct sseu_dev_info *sseu = &RUNTIME_INFO(m->i915)->sseu;
int slice;
@@ -453,40 +454,56 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
static void error_print_request(struct drm_i915_error_state_buf *m,
const char *prefix,
- const struct drm_i915_error_request *erq,
- const unsigned long epoch)
+ const struct i915_request_coredump *erq)
{
if (!erq->seqno)
return;
- err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, emitted %dms, start %08x, head %08x, tail %08x\n",
+ err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, start %08x, head %08x, tail %08x\n",
prefix, erq->pid, erq->context, erq->seqno,
test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&erq->flags) ? "!" : "",
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&erq->flags) ? "+" : "",
erq->sched_attr.priority,
- jiffies_to_msecs(erq->jiffies - epoch),
erq->start, erq->head, erq->tail);
}
static void error_print_context(struct drm_i915_error_state_buf *m,
const char *header,
- const struct drm_i915_error_context *ctx)
+ const struct i915_gem_context_coredump *ctx)
{
err_printf(m, "%s%s[%d] prio %d, guilty %d active %d\n",
header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
ctx->guilty, ctx->active);
}
+static struct i915_vma_coredump *
+__find_vma(struct i915_vma_coredump *vma, const char *name)
+{
+ while (vma) {
+ if (strcmp(vma->name, name) == 0)
+ return vma;
+ vma = vma->next;
+ }
+
+ return NULL;
+}
+
+static struct i915_vma_coredump *
+find_batch(const struct intel_engine_coredump *ee)
+{
+ return __find_vma(ee->vma, "batch");
+}
+
static void error_print_engine(struct drm_i915_error_state_buf *m,
- const struct drm_i915_error_engine *ee,
- const unsigned long epoch)
+ const struct intel_engine_coredump *ee)
{
+ struct i915_vma_coredump *batch;
int n;
err_printf(m, "%s command stream:\n", ee->engine->name);
- err_printf(m, " IDLE?: %s\n", yesno(ee->idle));
+ err_printf(m, " CCID: 0x%08x\n", ee->ccid);
err_printf(m, " START: 0x%08x\n", ee->start);
err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n",
@@ -501,9 +518,10 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
error_print_instdone(m, ee);
- if (ee->batchbuffer) {
- u64 start = ee->batchbuffer->gtt_offset;
- u64 end = start + ee->batchbuffer->gtt_size;
+ batch = find_batch(ee);
+ if (batch) {
+ u64 start = batch->gtt_offset;
+ u64 end = start + batch->gtt_size;
err_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n",
upper_32_bits(start), lower_32_bits(start),
@@ -535,13 +553,11 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
ee->vm_info.pp_dir_base);
}
}
- err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head);
- err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail);
err_printf(m, " engine reset count: %u\n", ee->reset_count);
for (n = 0; n < ee->num_ports; n++) {
err_printf(m, " ELSP[%d]:", n);
- error_print_request(m, " ", &ee->execlist[n], epoch);
+ error_print_request(m, " ", &ee->execlist[n]);
}
error_print_context(m, " Active context: ", &ee->context);
@@ -556,38 +572,35 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
va_end(args);
}
-static void print_error_obj(struct drm_i915_error_state_buf *m,
+static void print_error_vma(struct drm_i915_error_state_buf *m,
const struct intel_engine_cs *engine,
- const char *name,
- const struct drm_i915_error_object *obj)
+ const struct i915_vma_coredump *vma)
{
char out[ASCII85_BUFSZ];
int page;
- if (!obj)
+ if (!vma)
return;
- if (name) {
- err_printf(m, "%s --- %s = 0x%08x %08x\n",
- engine ? engine->name : "global", name,
- upper_32_bits(obj->gtt_offset),
- lower_32_bits(obj->gtt_offset));
- }
+ err_printf(m, "%s --- %s = 0x%08x %08x\n",
+ engine ? engine->name : "global", vma->name,
+ upper_32_bits(vma->gtt_offset),
+ lower_32_bits(vma->gtt_offset));
- if (obj->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K)
- err_printf(m, "gtt_page_sizes = 0x%08x\n", obj->gtt_page_sizes);
+ if (vma->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K)
+ err_printf(m, "gtt_page_sizes = 0x%08x\n", vma->gtt_page_sizes);
err_compression_marker(m);
- for (page = 0; page < obj->page_count; page++) {
+ for (page = 0; page < vma->page_count; page++) {
int i, len;
len = PAGE_SIZE;
- if (page == obj->page_count - 1)
- len -= obj->unused;
+ if (page == vma->page_count - 1)
+ len -= vma->unused;
len = ascii85_encode_len(len);
for (i = 0; i < len; i++)
- err_puts(m, ascii85_encode(obj->pages[page][i], out));
+ err_puts(m, ascii85_encode(vma->pages[page][i], out));
}
err_puts(m, "\n");
}
@@ -626,18 +639,13 @@ static void err_print_pciid(struct drm_i915_error_state_buf *m,
}
static void err_print_uc(struct drm_i915_error_state_buf *m,
- const struct i915_error_uc *error_uc)
+ const struct intel_uc_coredump *error_uc)
{
struct drm_printer p = i915_error_printer(m);
- const struct i915_gpu_state *error =
- container_of(error_uc, typeof(*error), uc);
-
- if (!error->device_info.has_gt_uc)
- return;
intel_uc_fw_dump(&error_uc->guc_fw, &p);
intel_uc_fw_dump(&error_uc->huc_fw, &p);
- print_error_obj(m, NULL, "GuC log buffer", error_uc->guc_log);
+ print_error_vma(m, NULL, error_uc->guc_log);
}
static void err_free_sgl(struct scatterlist *sgl)
@@ -657,12 +665,69 @@ static void err_free_sgl(struct scatterlist *sgl)
}
}
+static void err_print_gt(struct drm_i915_error_state_buf *m,
+ struct intel_gt_coredump *gt)
+{
+ const struct intel_engine_coredump *ee;
+ int i;
+
+ err_printf(m, "GT awake: %s\n", yesno(gt->awake));
+ err_printf(m, "EIR: 0x%08x\n", gt->eir);
+ err_printf(m, "IER: 0x%08x\n", gt->ier);
+ for (i = 0; i < gt->ngtier; i++)
+ err_printf(m, "GTIER[%d]: 0x%08x\n", i, gt->gtier[i]);
+ err_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er);
+ err_printf(m, "FORCEWAKE: 0x%08x\n", gt->forcewake);
+ err_printf(m, "DERRMR: 0x%08x\n", gt->derrmr);
+
+ for (i = 0; i < gt->nfence; i++)
+ err_printf(m, " fence[%d] = %08llx\n", i, gt->fence[i]);
+
+ if (IS_GEN_RANGE(m->i915, 6, 11)) {
+ err_printf(m, "ERROR: 0x%08x\n", gt->error);
+ err_printf(m, "DONE_REG: 0x%08x\n", gt->done_reg);
+ }
+
+ if (INTEL_GEN(m->i915) >= 8)
+ err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
+ gt->fault_data1, gt->fault_data0);
+
+ if (IS_GEN(m->i915, 7))
+ err_printf(m, "ERR_INT: 0x%08x\n", gt->err_int);
+
+ if (IS_GEN_RANGE(m->i915, 8, 11))
+ err_printf(m, "GTT_CACHE_EN: 0x%08x\n", gt->gtt_cache);
+
+ if (IS_GEN(m->i915, 12))
+ err_printf(m, "AUX_ERR_DBG: 0x%08x\n", gt->aux_err);
+
+ if (INTEL_GEN(m->i915) >= 12) {
+ int i;
+
+ for (i = 0; i < GEN12_SFC_DONE_MAX; i++)
+ err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i,
+ gt->sfc_done[i]);
+
+ err_printf(m, " GAM_DONE: 0x%08x\n", gt->gam_done);
+ }
+
+ for (ee = gt->engine; ee; ee = ee->next) {
+ const struct i915_vma_coredump *vma;
+
+ error_print_engine(m, ee);
+ for (vma = ee->vma; vma; vma = vma->next)
+ print_error_vma(m, ee->engine, vma);
+ }
+
+ if (gt->uc)
+ err_print_uc(m, gt->uc);
+}
+
static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
- struct i915_gpu_state *error)
+ struct i915_gpu_coredump *error)
{
- const struct drm_i915_error_engine *ee;
+ const struct intel_engine_coredump *ee;
struct timespec64 ts;
- int i, j;
if (*error->error_msg)
err_printf(m, "%s\n", error->error_msg);
@@ -682,7 +747,7 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
err_printf(m, "Capture: %lu jiffies; %d ms ago\n",
error->capture, jiffies_to_msecs(jiffies - error->capture));
- for (ee = error->engine; ee; ee = ee->next)
+ for (ee = error->gt ? error->gt->engine : NULL; ee; ee = ee->next)
err_printf(m, "Active process (on ring %s): %s [%d]\n",
ee->engine->name,
ee->context.comm,
@@ -708,90 +773,11 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
CSR_VERSION_MINOR(csr->version));
}
- err_printf(m, "GT awake: %s\n", yesno(error->awake));
err_printf(m, "RPM wakelock: %s\n", yesno(error->wakelock));
err_printf(m, "PM suspended: %s\n", yesno(error->suspended));
- err_printf(m, "EIR: 0x%08x\n", error->eir);
- err_printf(m, "IER: 0x%08x\n", error->ier);
- for (i = 0; i < error->ngtier; i++)
- err_printf(m, "GTIER[%d]: 0x%08x\n", i, error->gtier[i]);
- err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
- err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
- err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
- err_printf(m, "CCID: 0x%08x\n", error->ccid);
-
- for (i = 0; i < error->nfence; i++)
- err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
-
- if (IS_GEN_RANGE(m->i915, 6, 11)) {
- err_printf(m, "ERROR: 0x%08x\n", error->error);
- err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
- }
- if (INTEL_GEN(m->i915) >= 8)
- err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
- error->fault_data1, error->fault_data0);
-
- if (IS_GEN(m->i915, 7))
- err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
-
- if (IS_GEN_RANGE(m->i915, 8, 11))
- err_printf(m, "GTT_CACHE_EN: 0x%08x\n", error->gtt_cache);
-
- if (IS_GEN(m->i915, 12))
- err_printf(m, "AUX_ERR_DBG: 0x%08x\n", error->aux_err);
-
- if (INTEL_GEN(m->i915) >= 12) {
- int i;
-
- for (i = 0; i < GEN12_SFC_DONE_MAX; i++)
- err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i,
- error->sfc_done[i]);
-
- err_printf(m, " GAM_DONE: 0x%08x\n", error->gam_done);
- }
-
- for (ee = error->engine; ee; ee = ee->next)
- error_print_engine(m, ee, error->capture);
-
- for (ee = error->engine; ee; ee = ee->next) {
- const struct drm_i915_error_object *obj;
-
- obj = ee->batchbuffer;
- if (obj) {
- err_puts(m, ee->engine->name);
- if (ee->context.pid)
- err_printf(m, " (submitted by %s [%d])",
- ee->context.comm,
- ee->context.pid);
- err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
- upper_32_bits(obj->gtt_offset),
- lower_32_bits(obj->gtt_offset));
- print_error_obj(m, ee->engine, NULL, obj);
- }
-
- for (j = 0; j < ee->user_bo_count; j++)
- print_error_obj(m, ee->engine, "user", ee->user_bo[j]);
-
- if (ee->num_requests) {
- err_printf(m, "%s --- %d requests\n",
- ee->engine->name,
- ee->num_requests);
- for (j = 0; j < ee->num_requests; j++)
- error_print_request(m, " ",
- &ee->requests[j],
- error->capture);
- }
-
- print_error_obj(m, ee->engine, "ringbuffer", ee->ringbuffer);
- print_error_obj(m, ee->engine, "HW Status", ee->hws_page);
- print_error_obj(m, ee->engine, "HW context", ee->ctx);
- print_error_obj(m, ee->engine, "WA context", ee->wa_ctx);
- print_error_obj(m, ee->engine,
- "WA batchbuffer", ee->wa_batchbuffer);
- print_error_obj(m, ee->engine,
- "NULL context", ee->default_state);
- }
+ if (error->gt)
+ err_print_gt(m, error->gt);
if (error->overlay)
intel_overlay_print_error_state(m, error->overlay);
@@ -802,10 +788,9 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
err_print_capabilities(m, &error->device_info, &error->runtime_info,
&error->driver_caps);
err_print_params(m, &error->params);
- err_print_uc(m, &error->uc);
}
-static int err_print_to_sgl(struct i915_gpu_state *error)
+static int err_print_to_sgl(struct i915_gpu_coredump *error)
{
struct drm_i915_error_state_buf m;
@@ -842,8 +827,8 @@ static int err_print_to_sgl(struct i915_gpu_state *error)
return 0;
}
-ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error,
- char *buf, loff_t off, size_t rem)
+ssize_t i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump *error,
+ char *buf, loff_t off, size_t rem)
{
struct scatterlist *sg;
size_t count;
@@ -906,85 +891,88 @@ ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error,
return count;
}
-static void i915_error_object_free(struct drm_i915_error_object *obj)
+static void i915_vma_coredump_free(struct i915_vma_coredump *vma)
{
- int page;
+ while (vma) {
+ struct i915_vma_coredump *next = vma->next;
+ int page;
- if (obj == NULL)
- return;
+ for (page = 0; page < vma->page_count; page++)
+ free_page((unsigned long)vma->pages[page]);
- for (page = 0; page < obj->page_count; page++)
- free_page((unsigned long)obj->pages[page]);
-
- kfree(obj);
+ kfree(vma);
+ vma = next;
+ }
}
-
-static void cleanup_params(struct i915_gpu_state *error)
+static void cleanup_params(struct i915_gpu_coredump *error)
{
i915_params_free(&error->params);
}
-static void cleanup_uc_state(struct i915_gpu_state *error)
+static void cleanup_uc(struct intel_uc_coredump *uc)
{
- struct i915_error_uc *error_uc = &error->uc;
+ kfree(uc->guc_fw.path);
+ kfree(uc->huc_fw.path);
+ i915_vma_coredump_free(uc->guc_log);
- kfree(error_uc->guc_fw.path);
- kfree(error_uc->huc_fw.path);
- i915_error_object_free(error_uc->guc_log);
+ kfree(uc);
}
-void __i915_gpu_state_free(struct kref *error_ref)
+static void cleanup_gt(struct intel_gt_coredump *gt)
{
- struct i915_gpu_state *error =
- container_of(error_ref, typeof(*error), ref);
- long i;
+ while (gt->engine) {
+ struct intel_engine_coredump *ee = gt->engine;
+
+ gt->engine = ee->next;
+
+ i915_vma_coredump_free(ee->vma);
+ kfree(ee);
+ }
- while (error->engine) {
- struct drm_i915_error_engine *ee = error->engine;
+ if (gt->uc)
+ cleanup_uc(gt->uc);
- error->engine = ee->next;
+ kfree(gt);
+}
- for (i = 0; i < ee->user_bo_count; i++)
- i915_error_object_free(ee->user_bo[i]);
- kfree(ee->user_bo);
+void __i915_gpu_coredump_free(struct kref *error_ref)
+{
+ struct i915_gpu_coredump *error =
+ container_of(error_ref, typeof(*error), ref);
- i915_error_object_free(ee->batchbuffer);
- i915_error_object_free(ee->wa_batchbuffer);
- i915_error_object_free(ee->ringbuffer);
- i915_error_object_free(ee->hws_page);
- i915_error_object_free(ee->ctx);
- i915_error_object_free(ee->wa_ctx);
+ while (error->gt) {
+ struct intel_gt_coredump *gt = error->gt;
- kfree(ee->requests);
- kfree(ee);
+ error->gt = gt->next;
+ cleanup_gt(gt);
}
kfree(error->overlay);
kfree(error->display);
cleanup_params(error);
- cleanup_uc_state(error);
err_free_sgl(error->sgl);
kfree(error);
}
-static struct drm_i915_error_object *
-i915_error_object_create(struct drm_i915_private *i915,
- struct i915_vma *vma,
- struct compress *compress)
+static struct i915_vma_coredump *
+i915_vma_coredump_create(const struct intel_gt *gt,
+ const struct i915_vma *vma,
+ const char *name,
+ struct i915_vma_compress *compress)
{
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = gt->ggtt;
const u64 slot = ggtt->error_capture.start;
- struct drm_i915_error_object *dst;
+ struct i915_vma_coredump *dst;
unsigned long num_pages;
struct sgt_iter iter;
int ret;
might_sleep();
- if (!vma || !vma->pages)
+ if (!vma || !vma->pages || !compress)
return NULL;
num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
@@ -998,6 +986,9 @@ i915_error_object_create(struct drm_i915_private *i915,
return NULL;
}
+ strcpy(dst->name, name);
+ dst->next = NULL;
+
dst->gtt_offset = vma->node.start;
dst->gtt_size = vma->node.size;
dst->gtt_page_sizes = vma->page_sizes.gtt;
@@ -1005,9 +996,6 @@ i915_error_object_create(struct drm_i915_private *i915,
dst->page_count = 0;
dst->unused = 0;
- compress->wc = i915_gem_object_is_lmem(vma->obj) ||
- drm_mm_node_allocated(&ggtt->error_capture);
-
ret = -EINVAL;
if (drm_mm_node_allocated(&ggtt->error_capture)) {
void __iomem *s;
@@ -1016,9 +1004,12 @@ i915_error_object_create(struct drm_i915_private *i915,
for_each_sgt_daddr(dma, iter, vma->pages) {
ggtt->vm.insert_page(&ggtt->vm, dma, slot,
I915_CACHE_NONE, 0);
+ mb();
s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
- ret = compress_page(compress, (void __force *)s, dst);
+ ret = compress_page(compress,
+ (void __force *)s, dst,
+ true);
io_mapping_unmap(s);
if (ret)
break;
@@ -1031,7 +1022,9 @@ i915_error_object_create(struct drm_i915_private *i915,
void __iomem *s;
s = io_mapping_map_wc(&mem->iomap, dma, PAGE_SIZE);
- ret = compress_page(compress, (void __force *)s, dst);
+ ret = compress_page(compress,
+ (void __force *)s, dst,
+ true);
io_mapping_unmap(s);
if (ret)
break;
@@ -1045,7 +1038,7 @@ i915_error_object_create(struct drm_i915_private *i915,
drm_clflush_pages(&page, 1);
s = kmap(page);
- ret = compress_page(compress, s, dst);
+ ret = compress_page(compress, s, dst, false);
kunmap(page);
drm_clflush_pages(&page, 1);
@@ -1066,77 +1059,56 @@ i915_error_object_create(struct drm_i915_private *i915,
return dst;
}
-/*
- * Generate a semi-unique error code. The code is not meant to have meaning, The
- * code's only purpose is to try to prevent false duplicated bug reports by
- * grossly estimating a GPU error state.
- *
- * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
- * the hang if we could strip the GTT offset information from it.
- *
- * It's only a small step better than a random number in its current form.
- */
-static u32 i915_error_generate_code(struct i915_gpu_state *error)
-{
- const struct drm_i915_error_engine *ee = error->engine;
-
- /*
- * IPEHR would be an ideal way to detect errors, as it's the gross
- * measure of "the command that hung." However, has some very common
- * synchronization commands which almost always appear in the case
- * strictly a client bug. Use instdone to differentiate those some.
- */
- return ee ? ee->ipehr ^ ee->instdone.instdone : 0;
-}
-
-static void gem_record_fences(struct i915_gpu_state *error)
+static void gt_record_fences(struct intel_gt_coredump *gt)
{
- struct drm_i915_private *dev_priv = error->i915;
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct i915_ggtt *ggtt = gt->_gt->ggtt;
+ struct intel_uncore *uncore = gt->_gt->uncore;
int i;
- if (INTEL_GEN(dev_priv) >= 6) {
- for (i = 0; i < dev_priv->ggtt.num_fences; i++)
- error->fence[i] =
+ if (INTEL_GEN(uncore->i915) >= 6) {
+ for (i = 0; i < ggtt->num_fences; i++)
+ gt->fence[i] =
intel_uncore_read64(uncore,
FENCE_REG_GEN6_LO(i));
- } else if (INTEL_GEN(dev_priv) >= 4) {
- for (i = 0; i < dev_priv->ggtt.num_fences; i++)
- error->fence[i] =
+ } else if (INTEL_GEN(uncore->i915) >= 4) {
+ for (i = 0; i < ggtt->num_fences; i++)
+ gt->fence[i] =
intel_uncore_read64(uncore,
FENCE_REG_965_LO(i));
} else {
- for (i = 0; i < dev_priv->ggtt.num_fences; i++)
- error->fence[i] =
+ for (i = 0; i < ggtt->num_fences; i++)
+ gt->fence[i] =
intel_uncore_read(uncore, FENCE_REG(i));
}
- error->nfence = i;
+ gt->nfence = i;
}
-static void error_record_engine_registers(struct i915_gpu_state *error,
- struct intel_engine_cs *engine,
- struct drm_i915_error_engine *ee)
+static void engine_record_registers(struct intel_engine_coredump *ee)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ const struct intel_engine_cs *engine = ee->engine;
+ struct drm_i915_private *i915 = engine->i915;
- if (INTEL_GEN(dev_priv) >= 6) {
+ if (INTEL_GEN(i915) >= 6) {
ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL);
- if (INTEL_GEN(dev_priv) >= 12)
- ee->fault_reg = I915_READ(GEN12_RING_FAULT_REG);
- else if (INTEL_GEN(dev_priv) >= 8)
- ee->fault_reg = I915_READ(GEN8_RING_FAULT_REG);
+ if (INTEL_GEN(i915) >= 12)
+ ee->fault_reg = intel_uncore_read(engine->uncore,
+ GEN12_RING_FAULT_REG);
+ else if (INTEL_GEN(i915) >= 8)
+ ee->fault_reg = intel_uncore_read(engine->uncore,
+ GEN8_RING_FAULT_REG);
else
ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine);
}
- if (INTEL_GEN(dev_priv) >= 4) {
+ if (INTEL_GEN(i915) >= 4) {
ee->faddr = ENGINE_READ(engine, RING_DMA_FADD);
ee->ipeir = ENGINE_READ(engine, RING_IPEIR);
ee->ipehr = ENGINE_READ(engine, RING_IPEHR);
ee->instps = ENGINE_READ(engine, RING_INSTPS);
ee->bbaddr = ENGINE_READ(engine, RING_BBADDR);
- if (INTEL_GEN(dev_priv) >= 8) {
+ ee->ccid = ENGINE_READ(engine, CCID);
+ if (INTEL_GEN(i915) >= 8) {
ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32;
ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32;
}
@@ -1155,13 +1127,13 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
ee->head = ENGINE_READ(engine, RING_HEAD);
ee->tail = ENGINE_READ(engine, RING_TAIL);
ee->ctl = ENGINE_READ(engine, RING_CTL);
- if (INTEL_GEN(dev_priv) > 2)
+ if (INTEL_GEN(i915) > 2)
ee->mode = ENGINE_READ(engine, RING_MI_MODE);
- if (!HWS_NEEDS_PHYSICAL(dev_priv)) {
+ if (!HWS_NEEDS_PHYSICAL(i915)) {
i915_reg_t mmio;
- if (IS_GEN(dev_priv, 7)) {
+ if (IS_GEN(i915, 7)) {
switch (engine->id) {
default:
MISSING_CASE(engine->id);
@@ -1186,40 +1158,40 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
mmio = RING_HWS_PGA(engine->mmio_base);
}
- ee->hws = I915_READ(mmio);
+ ee->hws = intel_uncore_read(engine->uncore, mmio);
}
- ee->idle = intel_engine_is_idle(engine);
- ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error,
- engine);
+ ee->reset_count = i915_reset_engine_count(&i915->gpu_error, engine);
- if (HAS_PPGTT(dev_priv)) {
+ if (HAS_PPGTT(i915)) {
int i;
ee->vm_info.gfx_mode = ENGINE_READ(engine, RING_MODE_GEN7);
- if (IS_GEN(dev_priv, 6)) {
+ if (IS_GEN(i915, 6)) {
ee->vm_info.pp_dir_base =
ENGINE_READ(engine, RING_PP_DIR_BASE_READ);
- } else if (IS_GEN(dev_priv, 7)) {
+ } else if (IS_GEN(i915, 7)) {
ee->vm_info.pp_dir_base =
ENGINE_READ(engine, RING_PP_DIR_BASE);
- } else if (INTEL_GEN(dev_priv) >= 8) {
+ } else if (INTEL_GEN(i915) >= 8) {
u32 base = engine->mmio_base;
for (i = 0; i < 4; i++) {
ee->vm_info.pdp[i] =
- I915_READ(GEN8_RING_PDP_UDW(base, i));
+ intel_uncore_read(engine->uncore,
+ GEN8_RING_PDP_UDW(base, i));
ee->vm_info.pdp[i] <<= 32;
ee->vm_info.pdp[i] |=
- I915_READ(GEN8_RING_PDP_LDW(base, i));
+ intel_uncore_read(engine->uncore,
+ GEN8_RING_PDP_LDW(base, i));
}
}
}
}
static void record_request(const struct i915_request *request,
- struct drm_i915_error_request *erq)
+ struct i915_request_coredump *erq)
{
const struct i915_gem_context *ctx;
@@ -1227,7 +1199,6 @@ static void record_request(const struct i915_request *request,
erq->context = request->fence.context;
erq->seqno = request->fence.seqno;
erq->sched_attr = request->sched.attr;
- erq->jiffies = request->emitted_jiffies;
erq->start = i915_ggtt_offset(request->ring->vma);
erq->head = request->head;
erq->tail = request->tail;
@@ -1240,59 +1211,10 @@ static void record_request(const struct i915_request *request,
rcu_read_unlock();
}
-static void engine_record_requests(struct intel_engine_cs *engine,
- struct i915_request *first,
- struct drm_i915_error_engine *ee)
+static void engine_record_execlists(struct intel_engine_coredump *ee)
{
- struct i915_request *request;
- int count;
-
- count = 0;
- request = first;
- list_for_each_entry_from(request, &engine->active.requests, sched.link)
- count++;
- if (!count)
- return;
-
- ee->requests = kcalloc(count, sizeof(*ee->requests), ATOMIC_MAYFAIL);
- if (!ee->requests)
- return;
-
- ee->num_requests = count;
-
- count = 0;
- request = first;
- list_for_each_entry_from(request,
- &engine->active.requests, sched.link) {
- if (count >= ee->num_requests) {
- /*
- * If the ring request list was changed in
- * between the point where the error request
- * list was created and dimensioned and this
- * point then just exit early to avoid crashes.
- *
- * We don't need to communicate that the
- * request list changed state during error
- * state capture and that the error state is
- * slightly incorrect as a consequence since we
- * are typically only interested in the request
- * list state at the point of error state
- * capture, not in any changes happening during
- * the capture.
- */
- break;
- }
-
- record_request(request, &ee->requests[count++]);
- }
- ee->num_requests = count;
-}
-
-static void error_record_engine_execlists(const struct intel_engine_cs *engine,
- struct drm_i915_error_engine *ee)
-{
- const struct intel_engine_execlists * const execlists = &engine->execlists;
- struct i915_request * const *port = execlists->active;
+ const struct intel_engine_execlists * const el = &ee->engine->execlists;
+ struct i915_request * const *port = el->active;
unsigned int n = 0;
while (*port)
@@ -1301,7 +1223,7 @@ static void error_record_engine_execlists(const struct intel_engine_cs *engine,
ee->num_ports = n;
}
-static bool record_context(struct drm_i915_error_context *e,
+static bool record_context(struct i915_gem_context_coredump *e,
const struct i915_request *rq)
{
struct i915_gem_context *ctx;
@@ -1334,23 +1256,24 @@ static bool record_context(struct drm_i915_error_context *e,
return capture;
}
-struct capture_vma {
- struct capture_vma *next;
- void **slot;
+struct intel_engine_capture_vma {
+ struct intel_engine_capture_vma *next;
+ struct i915_vma *vma;
+ char name[16];
};
-static struct capture_vma *
-capture_vma(struct capture_vma *next,
+static struct intel_engine_capture_vma *
+capture_vma(struct intel_engine_capture_vma *next,
struct i915_vma *vma,
- struct drm_i915_error_object **out)
+ const char *name,
+ gfp_t gfp)
{
- struct capture_vma *c;
+ struct intel_engine_capture_vma *c;
- *out = NULL;
if (!vma)
return next;
- c = kmalloc(sizeof(*c), ATOMIC_MAYFAIL);
+ c = kmalloc(sizeof(*c), gfp);
if (!c)
return next;
@@ -1359,54 +1282,31 @@ capture_vma(struct capture_vma *next,
return next;
}
- c->slot = (void **)out;
- *c->slot = i915_vma_get(vma);
+ strcpy(c->name, name);
+ c->vma = i915_vma_get(vma);
c->next = next;
return c;
}
-static struct capture_vma *
-request_record_user_bo(struct i915_request *request,
- struct drm_i915_error_engine *ee,
- struct capture_vma *capture)
+static struct intel_engine_capture_vma *
+capture_user(struct intel_engine_capture_vma *capture,
+ const struct i915_request *rq,
+ gfp_t gfp)
{
struct i915_capture_list *c;
- struct drm_i915_error_object **bo;
- long count, max;
-
- max = 0;
- for (c = request->capture_list; c; c = c->next)
- max++;
- if (!max)
- return capture;
-
- bo = kmalloc_array(max, sizeof(*bo), ATOMIC_MAYFAIL);
- if (!bo) {
- /* If we can't capture everything, try to capture something. */
- max = min_t(long, max, PAGE_SIZE / sizeof(*bo));
- bo = kmalloc_array(max, sizeof(*bo), ATOMIC_MAYFAIL);
- }
- if (!bo)
- return capture;
- count = 0;
- for (c = request->capture_list; c; c = c->next) {
- capture = capture_vma(capture, c->vma, &bo[count]);
- if (++count == max)
- break;
- }
-
- ee->user_bo = bo;
- ee->user_bo_count = count;
+ for (c = rq->capture_list; c; c = c->next)
+ capture = capture_vma(capture, c->vma, "user", gfp);
return capture;
}
-static struct drm_i915_error_object *
-capture_object(struct drm_i915_private *dev_priv,
+static struct i915_vma_coredump *
+capture_object(const struct intel_gt *gt,
struct drm_i915_gem_object *obj,
- struct compress *compress)
+ const char *name,
+ struct i915_vma_compress *compress)
{
if (obj && i915_gem_object_has_pages(obj)) {
struct i915_vma fake = {
@@ -1416,127 +1316,175 @@ capture_object(struct drm_i915_private *dev_priv,
.obj = obj,
};
- return i915_error_object_create(dev_priv, &fake, compress);
+ return i915_vma_coredump_create(gt, &fake, name, compress);
} else {
return NULL;
}
}
-static void
-gem_record_rings(struct i915_gpu_state *error, struct compress *compress)
+static void add_vma(struct intel_engine_coredump *ee,
+ struct i915_vma_coredump *vma)
{
- struct drm_i915_private *i915 = error->i915;
- struct intel_engine_cs *engine;
- struct drm_i915_error_engine *ee;
+ if (vma) {
+ vma->next = ee->vma;
+ ee->vma = vma;
+ }
+}
+
+struct intel_engine_coredump *
+intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp)
+{
+ struct intel_engine_coredump *ee;
- ee = kzalloc(sizeof(*ee), GFP_KERNEL);
+ ee = kzalloc(sizeof(*ee), gfp);
if (!ee)
- return;
+ return NULL;
- for_each_uabi_engine(engine, i915) {
- struct capture_vma *capture = NULL;
- struct i915_request *request;
- unsigned long flags;
+ ee->engine = engine;
- /* Refill our page pool before entering atomic section */
- pool_refill(&compress->pool, ALLOW_FAIL);
+ engine_record_registers(ee);
+ engine_record_execlists(ee);
- spin_lock_irqsave(&engine->active.lock, flags);
- request = intel_engine_find_active_request(engine);
- if (!request) {
- spin_unlock_irqrestore(&engine->active.lock, flags);
- continue;
- }
+ return ee;
+}
- error->simulated |= record_context(&ee->context, request);
+struct intel_engine_capture_vma *
+intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
+ struct i915_request *rq,
+ gfp_t gfp)
+{
+ struct intel_engine_capture_vma *vma = NULL;
- /*
- * We need to copy these to an anonymous buffer
- * as the simplest method to avoid being overwritten
- * by userspace.
- */
- capture = capture_vma(capture,
- request->batch,
- &ee->batchbuffer);
+ ee->simulated |= record_context(&ee->context, rq);
+ if (ee->simulated)
+ return NULL;
- if (HAS_BROKEN_CS_TLB(i915))
- capture = capture_vma(capture,
- engine->gt->scratch,
- &ee->wa_batchbuffer);
+ /*
+ * We need to copy these to an anonymous buffer
+ * as the simplest method to avoid being overwritten
+ * by userspace.
+ */
+ vma = capture_vma(vma, rq->batch, "batch", gfp);
+ vma = capture_user(vma, rq, gfp);
+ vma = capture_vma(vma, rq->ring->vma, "ring", gfp);
+ vma = capture_vma(vma, rq->context->state, "HW context", gfp);
- capture = request_record_user_bo(request, ee, capture);
+ ee->rq_head = rq->head;
+ ee->rq_post = rq->postfix;
+ ee->rq_tail = rq->tail;
- capture = capture_vma(capture,
- request->context->state,
- &ee->ctx);
+ return vma;
+}
- capture = capture_vma(capture,
- request->ring->vma,
- &ee->ringbuffer);
+void
+intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
+ struct intel_engine_capture_vma *capture,
+ struct i915_vma_compress *compress)
+{
+ const struct intel_engine_cs *engine = ee->engine;
- ee->cpu_ring_head = request->ring->head;
- ee->cpu_ring_tail = request->ring->tail;
+ while (capture) {
+ struct intel_engine_capture_vma *this = capture;
+ struct i915_vma *vma = this->vma;
- ee->rq_head = request->head;
- ee->rq_post = request->postfix;
- ee->rq_tail = request->tail;
+ add_vma(ee,
+ i915_vma_coredump_create(engine->gt,
+ vma, this->name,
+ compress));
- engine_record_requests(engine, request, ee);
- spin_unlock_irqrestore(&engine->active.lock, flags);
+ i915_active_release(&vma->active);
+ i915_vma_put(vma);
- error_record_engine_registers(error, engine, ee);
- error_record_engine_execlists(engine, ee);
+ capture = this->next;
+ kfree(this);
+ }
- while (capture) {
- struct capture_vma *this = capture;
- struct i915_vma *vma = *this->slot;
+ add_vma(ee,
+ i915_vma_coredump_create(engine->gt,
+ engine->status_page.vma,
+ "HW Status",
+ compress));
- *this->slot =
- i915_error_object_create(i915, vma, compress);
+ add_vma(ee,
+ i915_vma_coredump_create(engine->gt,
+ engine->wa_ctx.vma,
+ "WA context",
+ compress));
- i915_active_release(&vma->active);
- i915_vma_put(vma);
+ add_vma(ee,
+ capture_object(engine->gt,
+ engine->default_state,
+ "NULL context",
+ compress));
+}
- capture = this->next;
- kfree(this);
- }
+static struct intel_engine_coredump *
+capture_engine(struct intel_engine_cs *engine,
+ struct i915_vma_compress *compress)
+{
+ struct intel_engine_capture_vma *capture = NULL;
+ struct intel_engine_coredump *ee;
+ struct i915_request *rq;
+ unsigned long flags;
- ee->hws_page =
- i915_error_object_create(i915,
- engine->status_page.vma,
- compress);
+ ee = intel_engine_coredump_alloc(engine, GFP_KERNEL);
+ if (!ee)
+ return NULL;
+
+ spin_lock_irqsave(&engine->active.lock, flags);
+ rq = intel_engine_find_active_request(engine);
+ if (rq)
+ capture = intel_engine_coredump_add_request(ee, rq,
+ ATOMIC_MAYFAIL);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+ if (!capture) {
+ kfree(ee);
+ return NULL;
+ }
- ee->wa_ctx =
- i915_error_object_create(i915,
- engine->wa_ctx.vma,
- compress);
+ intel_engine_coredump_add_vma(ee, capture, compress);
- ee->default_state =
- capture_object(i915, engine->default_state, compress);
+ return ee;
+}
- ee->engine = engine;
+static void
+gt_record_engines(struct intel_gt_coredump *gt,
+ struct i915_vma_compress *compress)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- ee->next = error->engine;
- error->engine = ee;
+ for_each_engine(engine, gt->_gt, id) {
+ struct intel_engine_coredump *ee;
- ee = kzalloc(sizeof(*ee), GFP_KERNEL);
+ /* Refill our page pool before entering atomic section */
+ pool_refill(&compress->pool, ALLOW_FAIL);
+
+ ee = capture_engine(engine, compress);
if (!ee)
- return;
- }
+ continue;
- kfree(ee);
+ gt->simulated |= ee->simulated;
+ if (ee->simulated) {
+ kfree(ee);
+ continue;
+ }
+
+ ee->next = gt->engine;
+ gt->engine = ee;
+ }
}
-static void
-capture_uc_state(struct i915_gpu_state *error, struct compress *compress)
+static struct intel_uc_coredump *
+gt_record_uc(struct intel_gt_coredump *gt,
+ struct i915_vma_compress *compress)
{
- struct drm_i915_private *i915 = error->i915;
- struct i915_error_uc *error_uc = &error->uc;
- struct intel_uc *uc = &i915->gt.uc;
+ const struct intel_uc *uc = &gt->_gt->uc;
+ struct intel_uc_coredump *error_uc;
- /* Capturing uC state won't be useful if there is no GuC */
- if (!error->device_info.has_gt_uc)
- return;
+ error_uc = kzalloc(sizeof(*error_uc), ALLOW_FAIL);
+ if (!error_uc)
+ return NULL;
memcpy(&error_uc->guc_fw, &uc->guc.fw, sizeof(uc->guc.fw));
memcpy(&error_uc->huc_fw, &uc->huc.fw, sizeof(uc->huc.fw));
@@ -1547,19 +1495,42 @@ capture_uc_state(struct i915_gpu_state *error, struct compress *compress)
*/
error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ALLOW_FAIL);
error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ALLOW_FAIL);
- error_uc->guc_log = i915_error_object_create(i915,
- uc->guc.log.vma,
- compress);
+ error_uc->guc_log =
+ i915_vma_coredump_create(gt->_gt,
+ uc->guc.log.vma, "GuC log buffer",
+ compress);
+
+ return error_uc;
+}
+
+static void gt_capture_prepare(struct intel_gt_coredump *gt)
+{
+ struct i915_ggtt *ggtt = gt->_gt->ggtt;
+
+ mutex_lock(&ggtt->error_mutex);
+}
+
+static void gt_capture_finish(struct intel_gt_coredump *gt)
+{
+ struct i915_ggtt *ggtt = gt->_gt->ggtt;
+
+ if (drm_mm_node_allocated(&ggtt->error_capture))
+ ggtt->vm.clear_range(&ggtt->vm,
+ ggtt->error_capture.start,
+ PAGE_SIZE);
+
+ mutex_unlock(&ggtt->error_mutex);
}
/* Capture all registers which don't fit into another category. */
-static void capture_reg_state(struct i915_gpu_state *error)
+static void gt_record_regs(struct intel_gt_coredump *gt)
{
- struct drm_i915_private *i915 = error->i915;
- struct intel_uncore *uncore = &i915->uncore;
+ struct intel_uncore *uncore = gt->_gt->uncore;
+ struct drm_i915_private *i915 = uncore->i915;
int i;
- /* General organization
+ /*
+ * General organization
* 1. Registers specific to a single generation
* 2. Registers which belong to multiple generations
* 3. Feature specific registers.
@@ -1569,138 +1540,162 @@ static void capture_reg_state(struct i915_gpu_state *error)
/* 1: Registers specific to a single generation */
if (IS_VALLEYVIEW(i915)) {
- error->gtier[0] = intel_uncore_read(uncore, GTIER);
- error->ier = intel_uncore_read(uncore, VLV_IER);
- error->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV);
+ gt->gtier[0] = intel_uncore_read(uncore, GTIER);
+ gt->ier = intel_uncore_read(uncore, VLV_IER);
+ gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV);
}
if (IS_GEN(i915, 7))
- error->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
+ gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
if (INTEL_GEN(i915) >= 12) {
- error->fault_data0 = intel_uncore_read(uncore,
- GEN12_FAULT_TLB_DATA0);
- error->fault_data1 = intel_uncore_read(uncore,
- GEN12_FAULT_TLB_DATA1);
+ gt->fault_data0 = intel_uncore_read(uncore,
+ GEN12_FAULT_TLB_DATA0);
+ gt->fault_data1 = intel_uncore_read(uncore,
+ GEN12_FAULT_TLB_DATA1);
} else if (INTEL_GEN(i915) >= 8) {
- error->fault_data0 = intel_uncore_read(uncore,
- GEN8_FAULT_TLB_DATA0);
- error->fault_data1 = intel_uncore_read(uncore,
- GEN8_FAULT_TLB_DATA1);
+ gt->fault_data0 = intel_uncore_read(uncore,
+ GEN8_FAULT_TLB_DATA0);
+ gt->fault_data1 = intel_uncore_read(uncore,
+ GEN8_FAULT_TLB_DATA1);
}
if (IS_GEN(i915, 6)) {
- error->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE);
- error->gab_ctl = intel_uncore_read(uncore, GAB_CTL);
- error->gfx_mode = intel_uncore_read(uncore, GFX_MODE);
+ gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE);
+ gt->gab_ctl = intel_uncore_read(uncore, GAB_CTL);
+ gt->gfx_mode = intel_uncore_read(uncore, GFX_MODE);
}
/* 2: Registers which belong to multiple generations */
if (INTEL_GEN(i915) >= 7)
- error->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
+ gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
if (INTEL_GEN(i915) >= 6) {
- error->derrmr = intel_uncore_read(uncore, DERRMR);
+ gt->derrmr = intel_uncore_read(uncore, DERRMR);
if (INTEL_GEN(i915) < 12) {
- error->error = intel_uncore_read(uncore, ERROR_GEN6);
- error->done_reg = intel_uncore_read(uncore, DONE_REG);
+ gt->error = intel_uncore_read(uncore, ERROR_GEN6);
+ gt->done_reg = intel_uncore_read(uncore, DONE_REG);
}
}
- if (INTEL_GEN(i915) >= 5)
- error->ccid = intel_uncore_read(uncore, CCID(RENDER_RING_BASE));
-
/* 3: Feature specific registers */
if (IS_GEN_RANGE(i915, 6, 7)) {
- error->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
- error->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS);
+ gt->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
+ gt->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS);
}
if (IS_GEN_RANGE(i915, 8, 11))
- error->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN);
+ gt->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN);
if (IS_GEN(i915, 12))
- error->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG);
+ gt->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG);
if (INTEL_GEN(i915) >= 12) {
for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
- error->sfc_done[i] =
+ gt->sfc_done[i] =
intel_uncore_read(uncore, GEN12_SFC_DONE(i));
}
- error->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE);
+ gt->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE);
}
/* 4: Everything else */
if (INTEL_GEN(i915) >= 11) {
- error->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
- error->gtier[0] =
+ gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
+ gt->gtier[0] =
intel_uncore_read(uncore,
GEN11_RENDER_COPY_INTR_ENABLE);
- error->gtier[1] =
+ gt->gtier[1] =
intel_uncore_read(uncore, GEN11_VCS_VECS_INTR_ENABLE);
- error->gtier[2] =
+ gt->gtier[2] =
intel_uncore_read(uncore, GEN11_GUC_SG_INTR_ENABLE);
- error->gtier[3] =
+ gt->gtier[3] =
intel_uncore_read(uncore,
GEN11_GPM_WGBOXPERF_INTR_ENABLE);
- error->gtier[4] =
+ gt->gtier[4] =
intel_uncore_read(uncore,
GEN11_CRYPTO_RSVD_INTR_ENABLE);
- error->gtier[5] =
+ gt->gtier[5] =
intel_uncore_read(uncore,
GEN11_GUNIT_CSME_INTR_ENABLE);
- error->ngtier = 6;
+ gt->ngtier = 6;
} else if (INTEL_GEN(i915) >= 8) {
- error->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
+ gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
for (i = 0; i < 4; i++)
- error->gtier[i] = intel_uncore_read(uncore,
- GEN8_GT_IER(i));
- error->ngtier = 4;
+ gt->gtier[i] =
+ intel_uncore_read(uncore, GEN8_GT_IER(i));
+ gt->ngtier = 4;
} else if (HAS_PCH_SPLIT(i915)) {
- error->ier = intel_uncore_read(uncore, DEIER);
- error->gtier[0] = intel_uncore_read(uncore, GTIER);
- error->ngtier = 1;
+ gt->ier = intel_uncore_read(uncore, DEIER);
+ gt->gtier[0] = intel_uncore_read(uncore, GTIER);
+ gt->ngtier = 1;
} else if (IS_GEN(i915, 2)) {
- error->ier = intel_uncore_read16(uncore, GEN2_IER);
+ gt->ier = intel_uncore_read16(uncore, GEN2_IER);
} else if (!IS_VALLEYVIEW(i915)) {
- error->ier = intel_uncore_read(uncore, GEN2_IER);
+ gt->ier = intel_uncore_read(uncore, GEN2_IER);
}
- error->eir = intel_uncore_read(uncore, EIR);
- error->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER);
+ gt->eir = intel_uncore_read(uncore, EIR);
+ gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER);
}
-static const char *
-error_msg(struct i915_gpu_state *error,
- intel_engine_mask_t engines, const char *msg)
+/*
+ * Generate a semi-unique error code. The code is not meant to have meaning, The
+ * code's only purpose is to try to prevent false duplicated bug reports by
+ * grossly estimating a GPU error state.
+ *
+ * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
+ * the hang if we could strip the GTT offset information from it.
+ *
+ * It's only a small step better than a random number in its current form.
+ */
+static u32 generate_ecode(const struct intel_engine_coredump *ee)
{
+ /*
+ * IPEHR would be an ideal way to detect errors, as it's the gross
+ * measure of "the command that hung." However, has some very common
+ * synchronization commands which almost always appear in the case
+ * strictly a client bug. Use instdone to differentiate those some.
+ */
+ return ee ? ee->ipehr ^ ee->instdone.instdone : 0;
+}
+
+static const char *error_msg(struct i915_gpu_coredump *error)
+{
+ struct intel_engine_coredump *first = NULL;
+ struct intel_gt_coredump *gt;
+ intel_engine_mask_t engines;
int len;
+ engines = 0;
+ for (gt = error->gt; gt; gt = gt->next) {
+ struct intel_engine_coredump *cs;
+
+ if (gt->engine && !first)
+ first = gt->engine;
+
+ for (cs = gt->engine; cs; cs = cs->next)
+ engines |= cs->engine->mask;
+ }
+
len = scnprintf(error->error_msg, sizeof(error->error_msg),
- "GPU HANG: ecode %d:%x:0x%08x",
+ "GPU HANG: ecode %d:%x:%08x",
INTEL_GEN(error->i915), engines,
- i915_error_generate_code(error));
- if (error->engine) {
+ generate_ecode(first));
+ if (first && first->context.pid) {
/* Just show the first executing process, more is confusing */
len += scnprintf(error->error_msg + len,
sizeof(error->error_msg) - len,
", in %s [%d]",
- error->engine->context.comm,
- error->engine->context.pid);
+ first->context.comm, first->context.pid);
}
- if (msg)
- len += scnprintf(error->error_msg + len,
- sizeof(error->error_msg) - len,
- ", %s", msg);
return error->error_msg;
}
-static void capture_gen_state(struct i915_gpu_state *error)
+static void capture_gen(struct i915_gpu_coredump *error)
{
struct drm_i915_private *i915 = error->i915;
- error->awake = i915->gt.awake;
error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count);
error->suspended = i915->runtime_pm.suspended;
@@ -1711,6 +1706,7 @@ static void capture_gen_state(struct i915_gpu_state *error)
error->reset_count = i915_reset_count(&i915->gpu_error);
error->suspend_count = i915->suspend_count;
+ i915_params_copy(&error->params, &i915_modparams);
memcpy(&error->device_info,
INTEL_INFO(i915),
sizeof(error->device_info));
@@ -1720,115 +1716,138 @@ static void capture_gen_state(struct i915_gpu_state *error)
error->driver_caps = i915->caps;
}
-static void capture_params(struct i915_gpu_state *error)
+struct i915_gpu_coredump *
+i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp)
{
- i915_params_copy(&error->params, &i915_modparams);
+ struct i915_gpu_coredump *error;
+
+ if (!i915_modparams.error_capture)
+ return NULL;
+
+ error = kzalloc(sizeof(*error), gfp);
+ if (!error)
+ return NULL;
+
+ kref_init(&error->ref);
+ error->i915 = i915;
+
+ error->time = ktime_get_real();
+ error->boottime = ktime_get_boottime();
+ error->uptime = ktime_sub(ktime_get(), i915->gt.last_init_time);
+ error->capture = jiffies;
+
+ capture_gen(error);
+
+ return error;
}
-static void capture_finish(struct i915_gpu_state *error)
+#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
+
+struct intel_gt_coredump *
+intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp)
{
- struct i915_ggtt *ggtt = &error->i915->ggtt;
+ struct intel_gt_coredump *gc;
- if (drm_mm_node_allocated(&ggtt->error_capture)) {
- const u64 slot = ggtt->error_capture.start;
+ gc = kzalloc(sizeof(*gc), gfp);
+ if (!gc)
+ return NULL;
+
+ gc->_gt = gt;
+ gc->awake = intel_gt_pm_is_awake(gt);
- ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
+ gt_record_regs(gc);
+ gt_record_fences(gc);
+
+ return gc;
+}
+
+struct i915_vma_compress *
+i915_vma_capture_prepare(struct intel_gt_coredump *gt)
+{
+ struct i915_vma_compress *compress;
+
+ compress = kmalloc(sizeof(*compress), ALLOW_FAIL);
+ if (!compress)
+ return NULL;
+
+ if (!compress_init(compress)) {
+ kfree(compress);
+ return NULL;
}
+
+ gt_capture_prepare(gt);
+
+ return compress;
}
-#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
+void i915_vma_capture_finish(struct intel_gt_coredump *gt,
+ struct i915_vma_compress *compress)
+{
+ if (!compress)
+ return;
+
+ gt_capture_finish(gt);
-struct i915_gpu_state *
-i915_capture_gpu_state(struct drm_i915_private *i915)
+ compress_fini(compress);
+ kfree(compress);
+}
+
+struct i915_gpu_coredump *i915_gpu_coredump(struct drm_i915_private *i915)
{
- struct i915_gpu_state *error;
- struct compress compress;
+ struct i915_gpu_coredump *error;
/* Check if GPU capture has been disabled */
error = READ_ONCE(i915->gpu_error.first_error);
if (IS_ERR(error))
return error;
- error = kzalloc(sizeof(*error), ALLOW_FAIL);
- if (!error) {
- i915_disable_error_state(i915, -ENOMEM);
+ error = i915_gpu_coredump_alloc(i915, ALLOW_FAIL);
+ if (!error)
return ERR_PTR(-ENOMEM);
- }
- if (!compress_init(&compress)) {
- kfree(error);
- i915_disable_error_state(i915, -ENOMEM);
- return ERR_PTR(-ENOMEM);
- }
+ error->gt = intel_gt_coredump_alloc(&i915->gt, ALLOW_FAIL);
+ if (error->gt) {
+ struct i915_vma_compress *compress;
- kref_init(&error->ref);
- error->i915 = i915;
+ compress = i915_vma_capture_prepare(error->gt);
+ if (!compress) {
+ kfree(error->gt);
+ kfree(error);
+ return ERR_PTR(-ENOMEM);
+ }
- error->time = ktime_get_real();
- error->boottime = ktime_get_boottime();
- error->uptime = ktime_sub(ktime_get(), i915->gt.last_init_time);
- error->capture = jiffies;
+ gt_record_engines(error->gt, compress);
- capture_params(error);
- capture_gen_state(error);
- capture_uc_state(error, &compress);
- capture_reg_state(error);
- gem_record_fences(error);
- gem_record_rings(error, &compress);
+ if (INTEL_INFO(i915)->has_gt_uc)
+ error->gt->uc = gt_record_uc(error->gt, compress);
+
+ i915_vma_capture_finish(error->gt, compress);
+
+ error->simulated |= error->gt->simulated;
+ }
error->overlay = intel_overlay_capture_error_state(i915);
error->display = intel_display_capture_error_state(i915);
- capture_finish(error);
- compress_fini(&compress);
-
return error;
}
-/**
- * i915_capture_error_state - capture an error record for later analysis
- * @i915: i915 device
- * @engine_mask: the mask of engines triggering the hang
- * @msg: a message to insert into the error capture header
- *
- * Should be called when an error is detected (either a hang or an error
- * interrupt) to capture error state from the time of the error. Fills
- * out a structure which becomes available in debugfs for user level tools
- * to pick up.
- */
-void i915_capture_error_state(struct drm_i915_private *i915,
- intel_engine_mask_t engine_mask,
- const char *msg)
+void i915_error_state_store(struct i915_gpu_coredump *error)
{
+ struct drm_i915_private *i915;
static bool warned;
- struct i915_gpu_state *error;
- unsigned long flags;
- if (!i915_modparams.error_capture)
+ if (IS_ERR_OR_NULL(error))
return;
- if (READ_ONCE(i915->gpu_error.first_error))
- return;
+ i915 = error->i915;
+ dev_info(i915->drm.dev, "%s\n", error_msg(error));
- error = i915_capture_gpu_state(i915);
- if (IS_ERR(error))
+ if (error->simulated ||
+ cmpxchg(&i915->gpu_error.first_error, NULL, error))
return;
- dev_info(i915->drm.dev, "%s\n", error_msg(error, engine_mask, msg));
-
- if (!error->simulated) {
- spin_lock_irqsave(&i915->gpu_error.lock, flags);
- if (!i915->gpu_error.first_error) {
- i915->gpu_error.first_error = error;
- error = NULL;
- }
- spin_unlock_irqrestore(&i915->gpu_error.lock, flags);
- }
-
- if (error) {
- __i915_gpu_state_free(&error->ref);
- return;
- }
+ i915_gpu_coredump_get(error);
if (!xchg(&warned, true) &&
ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
@@ -1841,15 +1860,38 @@ void i915_capture_error_state(struct drm_i915_private *i915,
}
}
-struct i915_gpu_state *
+/**
+ * i915_capture_error_state - capture an error record for later analysis
+ * @i915: i915 device
+ *
+ * Should be called when an error is detected (either a hang or an error
+ * interrupt) to capture error state from the time of the error. Fills
+ * out a structure which becomes available in debugfs for user level tools
+ * to pick up.
+ */
+void i915_capture_error_state(struct drm_i915_private *i915)
+{
+ struct i915_gpu_coredump *error;
+
+ error = i915_gpu_coredump(i915);
+ if (IS_ERR(error)) {
+ cmpxchg(&i915->gpu_error.first_error, NULL, error);
+ return;
+ }
+
+ i915_error_state_store(error);
+ i915_gpu_coredump_put(error);
+}
+
+struct i915_gpu_coredump *
i915_first_error_state(struct drm_i915_private *i915)
{
- struct i915_gpu_state *error;
+ struct i915_gpu_coredump *error;
spin_lock_irq(&i915->gpu_error.lock);
error = i915->gpu_error.first_error;
if (!IS_ERR_OR_NULL(error))
- i915_gpu_state_get(error);
+ i915_gpu_coredump_get(error);
spin_unlock_irq(&i915->gpu_error.lock);
return error;
@@ -1857,7 +1899,7 @@ i915_first_error_state(struct drm_i915_private *i915)
void i915_reset_error_state(struct drm_i915_private *i915)
{
- struct i915_gpu_state *error;
+ struct i915_gpu_coredump *error;
spin_lock_irq(&i915->gpu_error.lock);
error = i915->gpu_error.first_error;
@@ -1866,7 +1908,7 @@ void i915_reset_error_state(struct drm_i915_private *i915)
spin_unlock_irq(&i915->gpu_error.lock);
if (!IS_ERR_OR_NULL(error))
- i915_gpu_state_put(error);
+ i915_gpu_coredump_put(error);
}
void i915_disable_error_state(struct drm_i915_private *i915, int err)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 5d2c3372ff99..e4a6afed3bbf 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -25,43 +25,100 @@
#include "i915_scheduler.h"
struct drm_i915_private;
+struct i915_vma_compress;
+struct intel_engine_capture_vma;
struct intel_overlay_error_state;
struct intel_display_error_state;
-struct i915_gpu_state {
- struct kref ref;
- ktime_t time;
- ktime_t boottime;
- ktime_t uptime;
- unsigned long capture;
+struct i915_vma_coredump {
+ struct i915_vma_coredump *next;
- struct drm_i915_private *i915;
+ char name[20];
+
+ u64 gtt_offset;
+ u64 gtt_size;
+ u32 gtt_page_sizes;
+
+ int num_pages;
+ int page_count;
+ int unused;
+ u32 *pages[0];
+};
+
+struct i915_request_coredump {
+ unsigned long flags;
+ pid_t pid;
+ u32 context;
+ u32 seqno;
+ u32 start;
+ u32 head;
+ u32 tail;
+ struct i915_sched_attr sched_attr;
+};
+
+struct intel_engine_coredump {
+ const struct intel_engine_cs *engine;
- char error_msg[128];
bool simulated;
- bool awake;
- bool wakelock;
- bool suspended;
- int iommu;
u32 reset_count;
- u32 suspend_count;
- struct intel_device_info device_info;
- struct intel_runtime_info runtime_info;
- struct intel_driver_caps driver_caps;
- struct i915_params params;
- struct i915_error_uc {
- struct intel_uc_fw guc_fw;
- struct intel_uc_fw huc_fw;
- struct drm_i915_error_object *guc_log;
- } uc;
+ /* position of active request inside the ring */
+ u32 rq_head, rq_post, rq_tail;
+
+ /* Register state */
+ u32 ccid;
+ u32 start;
+ u32 tail;
+ u32 head;
+ u32 ctl;
+ u32 mode;
+ u32 hws;
+ u32 ipeir;
+ u32 ipehr;
+ u32 bbstate;
+ u32 instpm;
+ u32 instps;
+ u64 bbaddr;
+ u64 acthd;
+ u32 fault_reg;
+ u64 faddr;
+ u32 rc_psmi; /* sleep state */
+ struct intel_instdone instdone;
+
+ struct i915_gem_context_coredump {
+ char comm[TASK_COMM_LEN];
+ pid_t pid;
+ int active;
+ int guilty;
+ struct i915_sched_attr sched_attr;
+ } context;
+
+ struct i915_vma_coredump *vma;
+
+ struct i915_request_coredump execlist[EXECLIST_MAX_PORTS];
+ unsigned int num_ports;
+
+ struct {
+ u32 gfx_mode;
+ union {
+ u64 pdp[4];
+ u32 pp_dir_base;
+ };
+ } vm_info;
+
+ struct intel_engine_coredump *next;
+};
+
+struct intel_gt_coredump {
+ const struct intel_gt *_gt;
+ bool awake;
+ bool simulated;
/* Generic register state */
u32 eir;
u32 pgtbl_er;
u32 ier;
u32 gtier[6], ngtier;
- u32 ccid;
u32 derrmr;
u32 forcewake;
u32 error; /* gen6+ */
@@ -80,91 +137,45 @@ struct i915_gpu_state {
u32 nfence;
u64 fence[I915_MAX_NUM_FENCES];
+
+ struct intel_engine_coredump *engine;
+
+ struct intel_uc_coredump {
+ struct intel_uc_fw guc_fw;
+ struct intel_uc_fw huc_fw;
+ struct i915_vma_coredump *guc_log;
+ } *uc;
+
+ struct intel_gt_coredump *next;
+};
+
+struct i915_gpu_coredump {
+ struct kref ref;
+ ktime_t time;
+ ktime_t boottime;
+ ktime_t uptime;
+ unsigned long capture;
+
+ struct drm_i915_private *i915;
+
+ struct intel_gt_coredump *gt;
+
+ char error_msg[128];
+ bool simulated;
+ bool wakelock;
+ bool suspended;
+ int iommu;
+ u32 reset_count;
+ u32 suspend_count;
+
+ struct intel_device_info device_info;
+ struct intel_runtime_info runtime_info;
+ struct intel_driver_caps driver_caps;
+ struct i915_params params;
+
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
- struct drm_i915_error_engine {
- const struct intel_engine_cs *engine;
-
- /* Software tracked state */
- bool idle;
- int num_requests;
- u32 reset_count;
-
- /* position of active request inside the ring */
- u32 rq_head, rq_post, rq_tail;
-
- /* our own tracking of ring head and tail */
- u32 cpu_ring_head;
- u32 cpu_ring_tail;
-
- /* Register state */
- u32 start;
- u32 tail;
- u32 head;
- u32 ctl;
- u32 mode;
- u32 hws;
- u32 ipeir;
- u32 ipehr;
- u32 bbstate;
- u32 instpm;
- u32 instps;
- u64 bbaddr;
- u64 acthd;
- u32 fault_reg;
- u64 faddr;
- u32 rc_psmi; /* sleep state */
- struct intel_instdone instdone;
-
- struct drm_i915_error_context {
- char comm[TASK_COMM_LEN];
- pid_t pid;
- int active;
- int guilty;
- struct i915_sched_attr sched_attr;
- } context;
-
- struct drm_i915_error_object {
- u64 gtt_offset;
- u64 gtt_size;
- u32 gtt_page_sizes;
- int num_pages;
- int page_count;
- int unused;
- u32 *pages[0];
- } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
-
- struct drm_i915_error_object **user_bo;
- long user_bo_count;
-
- struct drm_i915_error_object *wa_ctx;
- struct drm_i915_error_object *default_state;
-
- struct drm_i915_error_request {
- unsigned long flags;
- long jiffies;
- pid_t pid;
- u32 context;
- u32 seqno;
- u32 start;
- u32 head;
- u32 tail;
- struct i915_sched_attr sched_attr;
- } *requests, execlist[EXECLIST_MAX_PORTS];
- unsigned int num_ports;
-
- struct {
- u32 gfx_mode;
- union {
- u64 pdp[4];
- u32 pp_dir_base;
- };
- } vm_info;
-
- struct drm_i915_error_engine *next;
- } *engine;
-
struct scatterlist *sgl, *fit;
};
@@ -172,7 +183,7 @@ struct i915_gpu_error {
/* For reset and error_state handling. */
spinlock_t lock;
/* Protected by the above dev->gpu_error.lock. */
- struct i915_gpu_state *first_error;
+ struct i915_gpu_coredump *first_error;
atomic_t pending_fb_pin;
@@ -200,41 +211,118 @@ struct drm_i915_error_state_buf {
__printf(2, 3)
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
-struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
-void i915_capture_error_state(struct drm_i915_private *dev_priv,
- intel_engine_mask_t engine_mask,
- const char *error_msg);
+struct i915_gpu_coredump *i915_gpu_coredump(struct drm_i915_private *i915);
+void i915_capture_error_state(struct drm_i915_private *i915);
+
+struct i915_gpu_coredump *
+i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp);
+
+struct intel_gt_coredump *
+intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp);
+
+struct intel_engine_coredump *
+intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp);
+
+struct intel_engine_capture_vma *
+intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
+ struct i915_request *rq,
+ gfp_t gfp);
+
+void intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
+ struct intel_engine_capture_vma *capture,
+ struct i915_vma_compress *compress);
-static inline struct i915_gpu_state *
-i915_gpu_state_get(struct i915_gpu_state *gpu)
+struct i915_vma_compress *
+i915_vma_capture_prepare(struct intel_gt_coredump *gt);
+
+void i915_vma_capture_finish(struct intel_gt_coredump *gt,
+ struct i915_vma_compress *compress);
+
+void i915_error_state_store(struct i915_gpu_coredump *error);
+
+static inline struct i915_gpu_coredump *
+i915_gpu_coredump_get(struct i915_gpu_coredump *gpu)
{
kref_get(&gpu->ref);
return gpu;
}
-ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error,
- char *buf, loff_t offset, size_t count);
+ssize_t
+i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump *error,
+ char *buf, loff_t offset, size_t count);
-void __i915_gpu_state_free(struct kref *kref);
-static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
+void __i915_gpu_coredump_free(struct kref *kref);
+static inline void i915_gpu_coredump_put(struct i915_gpu_coredump *gpu)
{
if (gpu)
- kref_put(&gpu->ref, __i915_gpu_state_free);
+ kref_put(&gpu->ref, __i915_gpu_coredump_free);
}
-struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915);
+struct i915_gpu_coredump *i915_first_error_state(struct drm_i915_private *i915);
void i915_reset_error_state(struct drm_i915_private *i915);
void i915_disable_error_state(struct drm_i915_private *i915, int err);
#else
-static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
- u32 engine_mask,
- const char *error_msg)
+static inline void i915_capture_error_state(struct drm_i915_private *i915)
+{
+}
+
+static inline struct i915_gpu_coredump *
+i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp)
+{
+ return NULL;
+}
+
+static inline struct intel_gt_coredump *
+intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp)
+{
+ return NULL;
+}
+
+static inline struct intel_engine_coredump *
+intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp)
+{
+ return NULL;
+}
+
+static inline struct intel_engine_capture_vma *
+intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
+ struct i915_request *rq,
+ gfp_t gfp)
+{
+ return NULL;
+}
+
+static inline void
+intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
+ struct intel_engine_capture_vma *capture,
+ struct i915_vma_compress *compress)
+{
+}
+
+static inline struct i915_vma_compress *
+i915_vma_capture_prepare(struct intel_gt_coredump *gt)
+{
+ return NULL;
+}
+
+static inline void
+i915_vma_capture_finish(struct intel_gt_coredump *gt,
+ struct i915_vma_compress *compress)
+{
+}
+
+static inline void
+i915_error_state_store(struct i915_gpu_coredump *error)
+{
+}
+
+static inline void i915_gpu_coredump_put(struct i915_gpu_coredump *gpu)
{
}
-static inline struct i915_gpu_state *
+static inline struct i915_gpu_coredump *
i915_first_error_state(struct drm_i915_private *i915)
{
return ERR_PTR(-ENODEV);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index fb9ba4059f0d..3245f7c5c84f 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -903,7 +903,7 @@ int intel_get_crtc_scanline(struct intel_crtc *crtc)
}
/**
- * ivybridge_parity_work - Workqueue called when a parity error interrupt
+ * ivb_parity_work - Workqueue called when a parity error interrupt
* occurred.
* @work: workqueue struct
*
@@ -911,7 +911,7 @@ int intel_get_crtc_scanline(struct intel_crtc *crtc)
* this event, userspace should try to remap the bad rows since statistically
* it is likely the same row is more likely to go bad again.
*/
-static void ivybridge_parity_work(struct work_struct *work)
+static void ivb_parity_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), l3_parity.error_work);
@@ -2041,7 +2041,7 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
* 4 - Process the interrupt(s) that had bits set in the IIRs.
* 5 - Re-enable Master Interrupt Control.
*/
-static irqreturn_t ironlake_irq_handler(int irq, void *arg)
+static irqreturn_t ilk_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = arg;
u32 de_iir, gt_iir, de_ier, sde_ier = 0;
@@ -2752,7 +2752,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
/* drm_dma.h hooks
*/
-static void ironlake_irq_reset(struct drm_i915_private *dev_priv)
+static void ilk_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
@@ -3235,7 +3235,7 @@ static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
spt_hpd_detection_setup(dev_priv);
}
-static void ironlake_irq_postinstall(struct drm_i915_private *dev_priv)
+static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
u32 display_mask, extra_mask;
@@ -3909,7 +3909,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
intel_hpd_init_work(dev_priv);
- INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
+ INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
for (i = 0; i < MAX_L3_SLICES; ++i)
dev_priv->l3_parity.remap_info[i] = NULL;
@@ -3990,7 +3990,7 @@ static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
else if (INTEL_GEN(dev_priv) >= 8)
return gen8_irq_handler;
else
- return ironlake_irq_handler;
+ return ilk_irq_handler;
}
}
@@ -4013,7 +4013,7 @@ static void intel_irq_reset(struct drm_i915_private *dev_priv)
else if (INTEL_GEN(dev_priv) >= 8)
gen8_irq_reset(dev_priv);
else
- ironlake_irq_reset(dev_priv);
+ ilk_irq_reset(dev_priv);
}
}
@@ -4036,7 +4036,7 @@ static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
else if (INTEL_GEN(dev_priv) >= 8)
gen8_irq_postinstall(dev_priv);
else
- ironlake_irq_postinstall(dev_priv);
+ ilk_irq_postinstall(dev_priv);
}
}
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
index 318562ce64c0..b6376b25ef63 100644
--- a/drivers/gpu/drm/i915/i915_mm.c
+++ b/drivers/gpu/drm/i915/i915_mm.c
@@ -33,6 +33,9 @@ struct remap_pfn {
struct mm_struct *mm;
unsigned long pfn;
pgprot_t prot;
+
+ struct sgt_iter sgt;
+ resource_size_t iobase;
};
static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
@@ -46,6 +49,35 @@ static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
return 0;
}
+#define use_dma(io) ((io) != -1)
+
+static inline unsigned long sgt_pfn(const struct remap_pfn *r)
+{
+ if (use_dma(r->iobase))
+ return (r->sgt.dma + r->sgt.curr + r->iobase) >> PAGE_SHIFT;
+ else
+ return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT);
+}
+
+static int remap_sg(pte_t *pte, unsigned long addr, void *data)
+{
+ struct remap_pfn *r = data;
+
+ if (GEM_WARN_ON(!r->sgt.pfn))
+ return -EINVAL;
+
+ /* Special PTE are not associated with any struct page */
+ set_pte_at(r->mm, addr, pte,
+ pte_mkspecial(pfn_pte(sgt_pfn(r), r->prot)));
+ r->pfn++; /* track insertions in case we need to unwind later */
+
+ r->sgt.curr += PAGE_SIZE;
+ if (r->sgt.curr >= r->sgt.max)
+ r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), use_dma(r->iobase));
+
+ return 0;
+}
+
/**
* remap_io_mapping - remap an IO mapping to userspace
* @vma: user vma to map to
@@ -80,3 +112,40 @@ int remap_io_mapping(struct vm_area_struct *vma,
return 0;
}
+
+/**
+ * remap_io_sg - remap an IO mapping to userspace
+ * @vma: user vma to map to
+ * @addr: target user address to start at
+ * @size: size of map area
+ * @sgl: Start sg entry
+ * @iobase: Use stored dma address offset by this address or pfn if -1
+ *
+ * Note: this is only safe if the mm semaphore is held when called.
+ */
+int remap_io_sg(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long size,
+ struct scatterlist *sgl, resource_size_t iobase)
+{
+ struct remap_pfn r = {
+ .mm = vma->vm_mm,
+ .prot = vma->vm_page_prot,
+ .sgt = __sgt_iter(sgl, use_dma(iobase)),
+ .iobase = iobase,
+ };
+ int err;
+
+ /* We rely on prevalidation of the io-mapping to skip track_pfn(). */
+ GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
+
+ if (!use_dma(iobase))
+ flush_cache_range(vma, addr, size);
+
+ err = apply_to_page_range(r.mm, addr, size, remap_sg, &r);
+ if (unlikely(err)) {
+ zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT);
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 9571611b4b16..83f01401b8b5 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -193,23 +193,23 @@
GEN_DEFAULT_PAGE_SIZES, \
GEN_DEFAULT_REGIONS
-static const struct intel_device_info intel_i830_info = {
+static const struct intel_device_info i830_info = {
I830_FEATURES,
PLATFORM(INTEL_I830),
};
-static const struct intel_device_info intel_i845g_info = {
+static const struct intel_device_info i845g_info = {
I845_FEATURES,
PLATFORM(INTEL_I845G),
};
-static const struct intel_device_info intel_i85x_info = {
+static const struct intel_device_info i85x_info = {
I830_FEATURES,
PLATFORM(INTEL_I85X),
.display.has_fbc = 1,
};
-static const struct intel_device_info intel_i865g_info = {
+static const struct intel_device_info i865g_info = {
I845_FEATURES,
PLATFORM(INTEL_I865G),
};
@@ -228,7 +228,7 @@ static const struct intel_device_info intel_i865g_info = {
GEN_DEFAULT_PAGE_SIZES, \
GEN_DEFAULT_REGIONS
-static const struct intel_device_info intel_i915g_info = {
+static const struct intel_device_info i915g_info = {
GEN3_FEATURES,
PLATFORM(INTEL_I915G),
.has_coherent_ggtt = false,
@@ -239,7 +239,7 @@ static const struct intel_device_info intel_i915g_info = {
.unfenced_needs_alignment = 1,
};
-static const struct intel_device_info intel_i915gm_info = {
+static const struct intel_device_info i915gm_info = {
GEN3_FEATURES,
PLATFORM(INTEL_I915GM),
.is_mobile = 1,
@@ -252,7 +252,7 @@ static const struct intel_device_info intel_i915gm_info = {
.unfenced_needs_alignment = 1,
};
-static const struct intel_device_info intel_i945g_info = {
+static const struct intel_device_info i945g_info = {
GEN3_FEATURES,
PLATFORM(INTEL_I945G),
.display.has_hotplug = 1,
@@ -263,7 +263,7 @@ static const struct intel_device_info intel_i945g_info = {
.unfenced_needs_alignment = 1,
};
-static const struct intel_device_info intel_i945gm_info = {
+static const struct intel_device_info i945gm_info = {
GEN3_FEATURES,
PLATFORM(INTEL_I945GM),
.is_mobile = 1,
@@ -277,21 +277,21 @@ static const struct intel_device_info intel_i945gm_info = {
.unfenced_needs_alignment = 1,
};
-static const struct intel_device_info intel_g33_info = {
+static const struct intel_device_info g33_info = {
GEN3_FEATURES,
PLATFORM(INTEL_G33),
.display.has_hotplug = 1,
.display.has_overlay = 1,
};
-static const struct intel_device_info intel_pineview_g_info = {
+static const struct intel_device_info pnv_g_info = {
GEN3_FEATURES,
PLATFORM(INTEL_PINEVIEW),
.display.has_hotplug = 1,
.display.has_overlay = 1,
};
-static const struct intel_device_info intel_pineview_m_info = {
+static const struct intel_device_info pnv_m_info = {
GEN3_FEATURES,
PLATFORM(INTEL_PINEVIEW),
.is_mobile = 1,
@@ -314,7 +314,7 @@ static const struct intel_device_info intel_pineview_m_info = {
GEN_DEFAULT_PAGE_SIZES, \
GEN_DEFAULT_REGIONS
-static const struct intel_device_info intel_i965g_info = {
+static const struct intel_device_info i965g_info = {
GEN4_FEATURES,
PLATFORM(INTEL_I965G),
.display.has_overlay = 1,
@@ -322,7 +322,7 @@ static const struct intel_device_info intel_i965g_info = {
.has_snoop = false,
};
-static const struct intel_device_info intel_i965gm_info = {
+static const struct intel_device_info i965gm_info = {
GEN4_FEATURES,
PLATFORM(INTEL_I965GM),
.is_mobile = 1,
@@ -333,14 +333,14 @@ static const struct intel_device_info intel_i965gm_info = {
.has_snoop = false,
};
-static const struct intel_device_info intel_g45_info = {
+static const struct intel_device_info g45_info = {
GEN4_FEATURES,
PLATFORM(INTEL_G45),
.engine_mask = BIT(RCS0) | BIT(VCS0),
.gpu_reset_clobbers_display = false,
};
-static const struct intel_device_info intel_gm45_info = {
+static const struct intel_device_info gm45_info = {
GEN4_FEATURES,
PLATFORM(INTEL_GM45),
.is_mobile = 1,
@@ -365,12 +365,12 @@ static const struct intel_device_info intel_gm45_info = {
GEN_DEFAULT_PAGE_SIZES, \
GEN_DEFAULT_REGIONS
-static const struct intel_device_info intel_ironlake_d_info = {
+static const struct intel_device_info ilk_d_info = {
GEN5_FEATURES,
PLATFORM(INTEL_IRONLAKE),
};
-static const struct intel_device_info intel_ironlake_m_info = {
+static const struct intel_device_info ilk_m_info = {
GEN5_FEATURES,
PLATFORM(INTEL_IRONLAKE),
.is_mobile = 1,
@@ -400,12 +400,12 @@ static const struct intel_device_info intel_ironlake_m_info = {
GEN6_FEATURES, \
PLATFORM(INTEL_SANDYBRIDGE)
-static const struct intel_device_info intel_sandybridge_d_gt1_info = {
+static const struct intel_device_info snb_d_gt1_info = {
SNB_D_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_sandybridge_d_gt2_info = {
+static const struct intel_device_info snb_d_gt2_info = {
SNB_D_PLATFORM,
.gt = 2,
};
@@ -416,12 +416,12 @@ static const struct intel_device_info intel_sandybridge_d_gt2_info = {
.is_mobile = 1
-static const struct intel_device_info intel_sandybridge_m_gt1_info = {
+static const struct intel_device_info snb_m_gt1_info = {
SNB_M_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_sandybridge_m_gt2_info = {
+static const struct intel_device_info snb_m_gt2_info = {
SNB_M_PLATFORM,
.gt = 2,
};
@@ -450,12 +450,12 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
PLATFORM(INTEL_IVYBRIDGE), \
.has_l3_dpf = 1
-static const struct intel_device_info intel_ivybridge_d_gt1_info = {
+static const struct intel_device_info ivb_d_gt1_info = {
IVB_D_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_ivybridge_d_gt2_info = {
+static const struct intel_device_info ivb_d_gt2_info = {
IVB_D_PLATFORM,
.gt = 2,
};
@@ -466,17 +466,17 @@ static const struct intel_device_info intel_ivybridge_d_gt2_info = {
.is_mobile = 1, \
.has_l3_dpf = 1
-static const struct intel_device_info intel_ivybridge_m_gt1_info = {
+static const struct intel_device_info ivb_m_gt1_info = {
IVB_M_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_ivybridge_m_gt2_info = {
+static const struct intel_device_info ivb_m_gt2_info = {
IVB_M_PLATFORM,
.gt = 2,
};
-static const struct intel_device_info intel_ivybridge_q_info = {
+static const struct intel_device_info ivb_q_info = {
GEN7_FEATURES,
PLATFORM(INTEL_IVYBRIDGE),
.gt = 2,
@@ -484,7 +484,7 @@ static const struct intel_device_info intel_ivybridge_q_info = {
.has_l3_dpf = 1,
};
-static const struct intel_device_info intel_valleyview_info = {
+static const struct intel_device_info vlv_info = {
PLATFORM(INTEL_VALLEYVIEW),
GEN(7),
.is_lp = 1,
@@ -523,17 +523,17 @@ static const struct intel_device_info intel_valleyview_info = {
PLATFORM(INTEL_HASWELL), \
.has_l3_dpf = 1
-static const struct intel_device_info intel_haswell_gt1_info = {
+static const struct intel_device_info hsw_gt1_info = {
HSW_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_haswell_gt2_info = {
+static const struct intel_device_info hsw_gt2_info = {
HSW_PLATFORM,
.gt = 2,
};
-static const struct intel_device_info intel_haswell_gt3_info = {
+static const struct intel_device_info hsw_gt3_info = {
HSW_PLATFORM,
.gt = 3,
};
@@ -551,17 +551,17 @@ static const struct intel_device_info intel_haswell_gt3_info = {
GEN8_FEATURES, \
PLATFORM(INTEL_BROADWELL)
-static const struct intel_device_info intel_broadwell_gt1_info = {
+static const struct intel_device_info bdw_gt1_info = {
BDW_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_broadwell_gt2_info = {
+static const struct intel_device_info bdw_gt2_info = {
BDW_PLATFORM,
.gt = 2,
};
-static const struct intel_device_info intel_broadwell_rsvd_info = {
+static const struct intel_device_info bdw_rsvd_info = {
BDW_PLATFORM,
.gt = 3,
/* According to the device ID those devices are GT3, they were
@@ -569,14 +569,14 @@ static const struct intel_device_info intel_broadwell_rsvd_info = {
*/
};
-static const struct intel_device_info intel_broadwell_gt3_info = {
+static const struct intel_device_info bdw_gt3_info = {
BDW_PLATFORM,
.gt = 3,
.engine_mask =
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
};
-static const struct intel_device_info intel_cherryview_info = {
+static const struct intel_device_info chv_info = {
PLATFORM(INTEL_CHERRYVIEW),
GEN(8),
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
@@ -621,12 +621,12 @@ static const struct intel_device_info intel_cherryview_info = {
GEN9_FEATURES, \
PLATFORM(INTEL_SKYLAKE)
-static const struct intel_device_info intel_skylake_gt1_info = {
+static const struct intel_device_info skl_gt1_info = {
SKL_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_skylake_gt2_info = {
+static const struct intel_device_info skl_gt2_info = {
SKL_PLATFORM,
.gt = 2,
};
@@ -637,12 +637,12 @@ static const struct intel_device_info intel_skylake_gt2_info = {
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1)
-static const struct intel_device_info intel_skylake_gt3_info = {
+static const struct intel_device_info skl_gt3_info = {
SKL_GT3_PLUS_PLATFORM,
.gt = 3,
};
-static const struct intel_device_info intel_skylake_gt4_info = {
+static const struct intel_device_info skl_gt4_info = {
SKL_GT3_PLUS_PLATFORM,
.gt = 4,
};
@@ -679,13 +679,13 @@ static const struct intel_device_info intel_skylake_gt4_info = {
GEN9_DEFAULT_PAGE_SIZES, \
GEN_DEFAULT_REGIONS
-static const struct intel_device_info intel_broxton_info = {
+static const struct intel_device_info bxt_info = {
GEN9_LP_FEATURES,
PLATFORM(INTEL_BROXTON),
.ddb_size = 512,
};
-static const struct intel_device_info intel_geminilake_info = {
+static const struct intel_device_info glk_info = {
GEN9_LP_FEATURES,
PLATFORM(INTEL_GEMINILAKE),
.ddb_size = 1024,
@@ -696,17 +696,17 @@ static const struct intel_device_info intel_geminilake_info = {
GEN9_FEATURES, \
PLATFORM(INTEL_KABYLAKE)
-static const struct intel_device_info intel_kabylake_gt1_info = {
+static const struct intel_device_info kbl_gt1_info = {
KBL_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_kabylake_gt2_info = {
+static const struct intel_device_info kbl_gt2_info = {
KBL_PLATFORM,
.gt = 2,
};
-static const struct intel_device_info intel_kabylake_gt3_info = {
+static const struct intel_device_info kbl_gt3_info = {
KBL_PLATFORM,
.gt = 3,
.engine_mask =
@@ -717,17 +717,17 @@ static const struct intel_device_info intel_kabylake_gt3_info = {
GEN9_FEATURES, \
PLATFORM(INTEL_COFFEELAKE)
-static const struct intel_device_info intel_coffeelake_gt1_info = {
+static const struct intel_device_info cfl_gt1_info = {
CFL_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_coffeelake_gt2_info = {
+static const struct intel_device_info cfl_gt2_info = {
CFL_PLATFORM,
.gt = 2,
};
-static const struct intel_device_info intel_coffeelake_gt3_info = {
+static const struct intel_device_info cfl_gt3_info = {
CFL_PLATFORM,
.gt = 3,
.engine_mask =
@@ -742,7 +742,7 @@ static const struct intel_device_info intel_coffeelake_gt3_info = {
.has_coherent_ggtt = false, \
GLK_COLORS
-static const struct intel_device_info intel_cannonlake_info = {
+static const struct intel_device_info cnl_info = {
GEN10_FEATURES,
PLATFORM(INTEL_CANNONLAKE),
.gt = 2,
@@ -777,14 +777,14 @@ static const struct intel_device_info intel_cannonlake_info = {
.has_logical_ring_elsq = 1, \
.color = { .degamma_lut_size = 33, .gamma_lut_size = 262145 }
-static const struct intel_device_info intel_icelake_11_info = {
+static const struct intel_device_info icl_info = {
GEN11_FEATURES,
PLATFORM(INTEL_ICELAKE),
.engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
};
-static const struct intel_device_info intel_elkhartlake_info = {
+static const struct intel_device_info ehl_info = {
GEN11_FEATURES,
PLATFORM(INTEL_ELKHARTLAKE),
.require_force_probe = 1,
@@ -815,7 +815,7 @@ static const struct intel_device_info intel_elkhartlake_info = {
.has_global_mocs = 1, \
.display.has_dsb = 1
-static const struct intel_device_info intel_tigerlake_12_info = {
+static const struct intel_device_info tgl_info = {
GEN12_FEATURES,
PLATFORM(INTEL_TIGERLAKE),
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
@@ -840,70 +840,70 @@ static const struct intel_device_info intel_tigerlake_12_info = {
* PCI ID matches, otherwise we'll use the wrong info struct above.
*/
static const struct pci_device_id pciidlist[] = {
- INTEL_I830_IDS(&intel_i830_info),
- INTEL_I845G_IDS(&intel_i845g_info),
- INTEL_I85X_IDS(&intel_i85x_info),
- INTEL_I865G_IDS(&intel_i865g_info),
- INTEL_I915G_IDS(&intel_i915g_info),
- INTEL_I915GM_IDS(&intel_i915gm_info),
- INTEL_I945G_IDS(&intel_i945g_info),
- INTEL_I945GM_IDS(&intel_i945gm_info),
- INTEL_I965G_IDS(&intel_i965g_info),
- INTEL_G33_IDS(&intel_g33_info),
- INTEL_I965GM_IDS(&intel_i965gm_info),
- INTEL_GM45_IDS(&intel_gm45_info),
- INTEL_G45_IDS(&intel_g45_info),
- INTEL_PINEVIEW_G_IDS(&intel_pineview_g_info),
- INTEL_PINEVIEW_M_IDS(&intel_pineview_m_info),
- INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
- INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
- INTEL_SNB_D_GT1_IDS(&intel_sandybridge_d_gt1_info),
- INTEL_SNB_D_GT2_IDS(&intel_sandybridge_d_gt2_info),
- INTEL_SNB_M_GT1_IDS(&intel_sandybridge_m_gt1_info),
- INTEL_SNB_M_GT2_IDS(&intel_sandybridge_m_gt2_info),
- INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
- INTEL_IVB_M_GT1_IDS(&intel_ivybridge_m_gt1_info),
- INTEL_IVB_M_GT2_IDS(&intel_ivybridge_m_gt2_info),
- INTEL_IVB_D_GT1_IDS(&intel_ivybridge_d_gt1_info),
- INTEL_IVB_D_GT2_IDS(&intel_ivybridge_d_gt2_info),
- INTEL_HSW_GT1_IDS(&intel_haswell_gt1_info),
- INTEL_HSW_GT2_IDS(&intel_haswell_gt2_info),
- INTEL_HSW_GT3_IDS(&intel_haswell_gt3_info),
- INTEL_VLV_IDS(&intel_valleyview_info),
- INTEL_BDW_GT1_IDS(&intel_broadwell_gt1_info),
- INTEL_BDW_GT2_IDS(&intel_broadwell_gt2_info),
- INTEL_BDW_GT3_IDS(&intel_broadwell_gt3_info),
- INTEL_BDW_RSVD_IDS(&intel_broadwell_rsvd_info),
- INTEL_CHV_IDS(&intel_cherryview_info),
- INTEL_SKL_GT1_IDS(&intel_skylake_gt1_info),
- INTEL_SKL_GT2_IDS(&intel_skylake_gt2_info),
- INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
- INTEL_SKL_GT4_IDS(&intel_skylake_gt4_info),
- INTEL_BXT_IDS(&intel_broxton_info),
- INTEL_GLK_IDS(&intel_geminilake_info),
- INTEL_KBL_GT1_IDS(&intel_kabylake_gt1_info),
- INTEL_KBL_GT2_IDS(&intel_kabylake_gt2_info),
- INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
- INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
- INTEL_AML_KBL_GT2_IDS(&intel_kabylake_gt2_info),
- INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info),
- INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info),
- INTEL_CFL_H_GT1_IDS(&intel_coffeelake_gt1_info),
- INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info),
- INTEL_CFL_U_GT2_IDS(&intel_coffeelake_gt2_info),
- INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info),
- INTEL_WHL_U_GT1_IDS(&intel_coffeelake_gt1_info),
- INTEL_WHL_U_GT2_IDS(&intel_coffeelake_gt2_info),
- INTEL_AML_CFL_GT2_IDS(&intel_coffeelake_gt2_info),
- INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info),
- INTEL_CML_GT1_IDS(&intel_coffeelake_gt1_info),
- INTEL_CML_GT2_IDS(&intel_coffeelake_gt2_info),
- INTEL_CML_U_GT1_IDS(&intel_coffeelake_gt1_info),
- INTEL_CML_U_GT2_IDS(&intel_coffeelake_gt2_info),
- INTEL_CNL_IDS(&intel_cannonlake_info),
- INTEL_ICL_11_IDS(&intel_icelake_11_info),
- INTEL_EHL_IDS(&intel_elkhartlake_info),
- INTEL_TGL_12_IDS(&intel_tigerlake_12_info),
+ INTEL_I830_IDS(&i830_info),
+ INTEL_I845G_IDS(&i845g_info),
+ INTEL_I85X_IDS(&i85x_info),
+ INTEL_I865G_IDS(&i865g_info),
+ INTEL_I915G_IDS(&i915g_info),
+ INTEL_I915GM_IDS(&i915gm_info),
+ INTEL_I945G_IDS(&i945g_info),
+ INTEL_I945GM_IDS(&i945gm_info),
+ INTEL_I965G_IDS(&i965g_info),
+ INTEL_G33_IDS(&g33_info),
+ INTEL_I965GM_IDS(&i965gm_info),
+ INTEL_GM45_IDS(&gm45_info),
+ INTEL_G45_IDS(&g45_info),
+ INTEL_PINEVIEW_G_IDS(&pnv_g_info),
+ INTEL_PINEVIEW_M_IDS(&pnv_m_info),
+ INTEL_IRONLAKE_D_IDS(&ilk_d_info),
+ INTEL_IRONLAKE_M_IDS(&ilk_m_info),
+ INTEL_SNB_D_GT1_IDS(&snb_d_gt1_info),
+ INTEL_SNB_D_GT2_IDS(&snb_d_gt2_info),
+ INTEL_SNB_M_GT1_IDS(&snb_m_gt1_info),
+ INTEL_SNB_M_GT2_IDS(&snb_m_gt2_info),
+ INTEL_IVB_Q_IDS(&ivb_q_info), /* must be first IVB */
+ INTEL_IVB_M_GT1_IDS(&ivb_m_gt1_info),
+ INTEL_IVB_M_GT2_IDS(&ivb_m_gt2_info),
+ INTEL_IVB_D_GT1_IDS(&ivb_d_gt1_info),
+ INTEL_IVB_D_GT2_IDS(&ivb_d_gt2_info),
+ INTEL_HSW_GT1_IDS(&hsw_gt1_info),
+ INTEL_HSW_GT2_IDS(&hsw_gt2_info),
+ INTEL_HSW_GT3_IDS(&hsw_gt3_info),
+ INTEL_VLV_IDS(&vlv_info),
+ INTEL_BDW_GT1_IDS(&bdw_gt1_info),
+ INTEL_BDW_GT2_IDS(&bdw_gt2_info),
+ INTEL_BDW_GT3_IDS(&bdw_gt3_info),
+ INTEL_BDW_RSVD_IDS(&bdw_rsvd_info),
+ INTEL_CHV_IDS(&chv_info),
+ INTEL_SKL_GT1_IDS(&skl_gt1_info),
+ INTEL_SKL_GT2_IDS(&skl_gt2_info),
+ INTEL_SKL_GT3_IDS(&skl_gt3_info),
+ INTEL_SKL_GT4_IDS(&skl_gt4_info),
+ INTEL_BXT_IDS(&bxt_info),
+ INTEL_GLK_IDS(&glk_info),
+ INTEL_KBL_GT1_IDS(&kbl_gt1_info),
+ INTEL_KBL_GT2_IDS(&kbl_gt2_info),
+ INTEL_KBL_GT3_IDS(&kbl_gt3_info),
+ INTEL_KBL_GT4_IDS(&kbl_gt3_info),
+ INTEL_AML_KBL_GT2_IDS(&kbl_gt2_info),
+ INTEL_CFL_S_GT1_IDS(&cfl_gt1_info),
+ INTEL_CFL_S_GT2_IDS(&cfl_gt2_info),
+ INTEL_CFL_H_GT1_IDS(&cfl_gt1_info),
+ INTEL_CFL_H_GT2_IDS(&cfl_gt2_info),
+ INTEL_CFL_U_GT2_IDS(&cfl_gt2_info),
+ INTEL_CFL_U_GT3_IDS(&cfl_gt3_info),
+ INTEL_WHL_U_GT1_IDS(&cfl_gt1_info),
+ INTEL_WHL_U_GT2_IDS(&cfl_gt2_info),
+ INTEL_AML_CFL_GT2_IDS(&cfl_gt2_info),
+ INTEL_WHL_U_GT3_IDS(&cfl_gt3_info),
+ INTEL_CML_GT1_IDS(&cfl_gt1_info),
+ INTEL_CML_GT2_IDS(&cfl_gt2_info),
+ INTEL_CML_U_GT1_IDS(&cfl_gt1_info),
+ INTEL_CML_U_GT2_IDS(&cfl_gt2_info),
+ INTEL_CNL_IDS(&cnl_info),
+ INTEL_ICL_11_IDS(&icl_info),
+ INTEL_EHL_IDS(&ehl_info),
+ INTEL_TGL_12_IDS(&tgl_info),
{0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, pciidlist);
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 84350c7bc711..0f556d80ba36 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -2159,8 +2159,6 @@ static int gen8_modify_context(struct intel_context *ce,
struct i915_request *rq;
int err;
- lockdep_assert_held(&ce->pin_mutex);
-
rq = intel_engine_create_kernel_request(ce->engine);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -2203,17 +2201,14 @@ static int gen8_configure_context(struct i915_gem_context *ctx,
if (ce->engine->class != RENDER_CLASS)
continue;
- err = intel_context_lock_pinned(ce);
- if (err)
- break;
+ /* Otherwise OA settings will be set upon first use */
+ if (!intel_context_pin_if_active(ce))
+ continue;
flex->value = intel_sseu_make_rpcs(ctx->i915, &ce->sseu);
+ err = gen8_modify_context(ce, flex, count);
- /* Otherwise OA settings will be set upon first use */
- if (intel_context_is_pinned(ce))
- err = gen8_modify_context(ce, flex, count);
-
- intel_context_unlock_pinned(ce);
+ intel_context_unpin(ce);
if (err)
break;
}
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index f3ef6700a5f2..ec0299490dd4 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -637,8 +637,10 @@ static void i915_pmu_enable(struct perf_event *event)
container_of(event->pmu, typeof(*i915), pmu.base);
unsigned int bit = event_enabled_bit(event);
struct i915_pmu *pmu = &i915->pmu;
+ intel_wakeref_t wakeref;
unsigned long flags;
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
spin_lock_irqsave(&pmu->lock, flags);
/*
@@ -648,6 +650,14 @@ static void i915_pmu_enable(struct perf_event *event)
BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS);
GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
GEM_BUG_ON(pmu->enable_count[bit] == ~0);
+
+ if (pmu->enable_count[bit] == 0 &&
+ config_enabled_mask(I915_PMU_RC6_RESIDENCY) & BIT_ULL(bit)) {
+ pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = 0;
+ pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
+ pmu->sleep_last = ktime_get();
+ }
+
pmu->enable |= BIT_ULL(bit);
pmu->enable_count[bit]++;
@@ -688,6 +698,8 @@ static void i915_pmu_enable(struct perf_event *event)
* an existing non-zero value.
*/
local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
+
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
static void i915_pmu_disable(struct perf_event *event)
@@ -1117,12 +1129,17 @@ void i915_pmu_register(struct drm_i915_private *i915)
hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
pmu->timer.function = i915_sample;
- if (!is_igp(i915))
+ if (!is_igp(i915)) {
pmu->name = kasprintf(GFP_KERNEL,
- "i915-%s",
+ "i915_%s",
dev_name(i915->drm.dev));
- else
+ if (pmu->name) {
+ /* tools/perf reserves colons as special. */
+ strreplace((char *)pmu->name, ':', '_');
+ }
+ } else {
pmu->name = "i915";
+ }
if (!pmu->name)
goto err;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index bbfedeb00b7f..6cc55c103f67 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2244,26 +2244,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
MG_DP_MODE_LN1_ACU_PORT1)
#define MG_DP_MODE_CFG_DP_X2_MODE (1 << 7)
#define MG_DP_MODE_CFG_DP_X1_MODE (1 << 6)
-#define MG_DP_MODE_CFG_TR2PWR_GATING (1 << 5)
-#define MG_DP_MODE_CFG_TRPWR_GATING (1 << 4)
-#define MG_DP_MODE_CFG_CLNPWR_GATING (1 << 3)
-#define MG_DP_MODE_CFG_DIGPWR_GATING (1 << 2)
-#define MG_DP_MODE_CFG_GAONPWR_GATING (1 << 1)
-
-#define MG_MISC_SUS0_PORT1 0x168814
-#define MG_MISC_SUS0_PORT2 0x169814
-#define MG_MISC_SUS0_PORT3 0x16A814
-#define MG_MISC_SUS0_PORT4 0x16B814
-#define MG_MISC_SUS0(tc_port) \
- _MMIO(_PORT(tc_port, MG_MISC_SUS0_PORT1, MG_MISC_SUS0_PORT2))
-#define MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK (3 << 14)
-#define MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(x) ((x) << 14)
-#define MG_MISC_SUS0_CFG_TR2PWR_GATING (1 << 12)
-#define MG_MISC_SUS0_CFG_CL2PWR_GATING (1 << 11)
-#define MG_MISC_SUS0_CFG_GAONPWR_GATING (1 << 10)
-#define MG_MISC_SUS0_CFG_TRPWR_GATING (1 << 7)
-#define MG_MISC_SUS0_CFG_CL1PWR_GATING (1 << 6)
-#define MG_MISC_SUS0_CFG_DGPWR_GATING (1 << 5)
/* The spec defines this only for BXT PHY0, but lets assume that this
* would exist for PHY1 too if it had a second channel.
@@ -4177,7 +4157,13 @@ enum {
#define CPSSUNIT_CLKGATE_DIS REG_BIT(9)
#define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434)
-#define VFUNIT_CLKGATE_DIS (1 << 20)
+#define VFUNIT_CLKGATE_DIS REG_BIT(20)
+#define HSUNIT_CLKGATE_DIS REG_BIT(8)
+#define VSUNIT_CLKGATE_DIS REG_BIT(3)
+
+#define UNSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x94e4)
+#define VSUNIT_CLKGATE_DIS_TGL REG_BIT(19)
+#define PSDUNIT_CLKGATE_DIS REG_BIT(5)
#define INF_UNIT_LEVEL_CLKGATE _MMIO(0x9560)
#define CGPSF_CLKGATE_DIS (1 << 3)
@@ -6808,6 +6794,7 @@ enum {
#define PLANE_CTL_TILED_Y (4 << 10)
#define PLANE_CTL_TILED_YF (5 << 10)
#define PLANE_CTL_FLIP_HORIZONTAL (1 << 8)
+#define PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE (1 << 4) /* TGL+ */
#define PLANE_CTL_ALPHA_MASK (0x3 << 4) /* Pre-GLK */
#define PLANE_CTL_ALPHA_DISABLE (0 << 4)
#define PLANE_CTL_ALPHA_SW_PREMULTIPLY (2 << 4)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 44a0d1a950c5..78a5f5d3c070 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -221,6 +221,8 @@ static void remove_from_engine(struct i915_request *rq)
locked = engine;
}
list_del_init(&rq->sched.link);
+ clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+ clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
spin_unlock_irq(&locked->active.lock);
}
@@ -408,8 +410,10 @@ bool __i915_request_submit(struct i915_request *request)
xfer: /* We may be recursing from the signal callback of another i915 fence */
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
- if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags))
+ if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)) {
list_move_tail(&request->sched.link, &engine->active.requests);
+ clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
+ }
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
@@ -658,7 +662,6 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq->engine = ce->engine;
rq->ring = ce->ring;
rq->execution_mask = ce->engine->mask;
- rq->flags = 0;
RCU_INIT_POINTER(rq->timeline, tl);
RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline);
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 565322640378..f57eadcf3583 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -51,7 +51,7 @@ struct i915_capture_list {
#define RQ_TRACE(rq, fmt, ...) do { \
const struct i915_request *rq__ = (rq); \
- ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d" fmt, \
+ ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d " fmt, \
rq__->fence.context, rq__->fence.seqno, \
hwsp_seqno(rq__), ##__VA_ARGS__); \
} while (0)
@@ -71,12 +71,63 @@ enum {
I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
/*
+ * I915_FENCE_FLAG_PQUEUE - this request is ready for execution
+ *
+ * Using the scheduler, when a request is ready for execution it is put
+ * into the priority queue, and removed from that queue when transferred
+ * to the HW runlists. We want to track its membership within the
+ * priority queue so that we can easily check before rescheduling.
+ *
+ * See i915_request_in_priority_queue()
+ */
+ I915_FENCE_FLAG_PQUEUE,
+
+ /*
* I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
*
* Internal bookkeeping used by the breadcrumb code to track when
* a request is on the various signal_list.
*/
I915_FENCE_FLAG_SIGNAL,
+
+ /*
+ * I915_FENCE_FLAG_HOLD - this request is currently on hold
+ *
+ * This request has been suspended, pending an ongoing investigation.
+ */
+ I915_FENCE_FLAG_HOLD,
+
+ /*
+ * I915_FENCE_FLAG_NOPREEMPT - this request should not be preempted
+ *
+ * The execution of some requests should not be interrupted. This is
+ * a sensitive operation as it makes the request super important,
+ * blocking other higher priority work. Abuse of this flag will
+ * lead to quality of service issues.
+ */
+ I915_FENCE_FLAG_NOPREEMPT,
+
+ /*
+ * I915_FENCE_FLAG_SENTINEL - this request should be last in the queue
+ *
+ * A high priority sentinel request may be submitted to clear the
+ * submission queue. As it will be the only request in-flight, upon
+ * execution all other active requests will have been preempted and
+ * unsubmitted. This preemptive pulse is used to re-evaluate the
+ * in-flight requests, particularly in cases where an active context
+ * is banned and those active requests need to be cancelled.
+ */
+ I915_FENCE_FLAG_SENTINEL,
+
+ /*
+ * I915_FENCE_FLAG_BOOST - upclock the gpu for this request
+ *
+ * Some requests are more important than others! In particular, a
+ * request that the user is waiting on is typically required for
+ * interactive latency, for which we want to minimise by upclocking
+ * the GPU. Here we track such boost requests on a per-request basis.
+ */
+ I915_FENCE_FLAG_BOOST,
};
/**
@@ -225,11 +276,6 @@ struct i915_request {
/** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
- unsigned long flags;
-#define I915_REQUEST_WAITBOOST BIT(0)
-#define I915_REQUEST_NOPREEMPT BIT(1)
-#define I915_REQUEST_SENTINEL BIT(2)
-
/** timeline->request entry for this request */
struct list_head link;
@@ -334,6 +380,11 @@ static inline bool i915_request_is_active(const struct i915_request *rq)
return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
}
+static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
+{
+ return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+}
+
/**
* Returns true if seq1 is later than seq2.
*/
@@ -427,6 +478,27 @@ static inline bool i915_request_is_running(const struct i915_request *rq)
return __i915_request_has_started(rq);
}
+/**
+ * i915_request_is_running - check if the request is ready for execution
+ * @rq: the request
+ *
+ * Upon construction, the request is instructed to wait upon various
+ * signals before it is ready to be executed by the HW. That is, we do
+ * not want to start execution and read data before it is written. In practice,
+ * this is controlled with a mixture of interrupts and semaphores. Once
+ * the submit fence is completed, the backend scheduler will place the
+ * request into its queue and from there submit it for execution. So we
+ * can detect when a request is eligible for execution (and is under control
+ * of the scheduler) by querying where it is in any of the scheduler's lists.
+ *
+ * Returns true if the request is ready for execution (it may be inflight),
+ * false otherwise.
+ */
+static inline bool i915_request_is_ready(const struct i915_request *rq)
+{
+ return !list_empty(&rq->sched.link);
+}
+
static inline bool i915_request_completed(const struct i915_request *rq)
{
if (i915_request_signaled(rq))
@@ -442,18 +514,33 @@ static inline void i915_request_mark_complete(struct i915_request *rq)
static inline bool i915_request_has_waitboost(const struct i915_request *rq)
{
- return rq->flags & I915_REQUEST_WAITBOOST;
+ return test_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
}
static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
{
/* Preemption should only be disabled very rarely */
- return unlikely(rq->flags & I915_REQUEST_NOPREEMPT);
+ return unlikely(test_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags));
}
static inline bool i915_request_has_sentinel(const struct i915_request *rq)
{
- return unlikely(rq->flags & I915_REQUEST_SENTINEL);
+ return unlikely(test_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags));
+}
+
+static inline bool i915_request_on_hold(const struct i915_request *rq)
+{
+ return unlikely(test_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags));
+}
+
+static inline void i915_request_set_hold(struct i915_request *rq)
+{
+ set_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
+}
+
+static inline void i915_request_clear_hold(struct i915_request *rq)
+{
+ clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
}
static inline struct intel_timeline *
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index bf87c70bfdd9..5d96cfba40f8 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -326,20 +326,18 @@ static void __i915_schedule(struct i915_sched_node *node,
node->attr.priority = prio;
- if (list_empty(&node->link)) {
- /*
- * If the request is not in the priolist queue because
- * it is not yet runnable, then it doesn't contribute
- * to our preemption decisions. On the other hand,
- * if the request is on the HW, it too is not in the
- * queue; but in that case we may still need to reorder
- * the inflight requests.
- */
+ /*
+ * Once the request is ready, it will be placed into the
+ * priority lists and then onto the HW runlist. Before the
+ * request is ready, it does not contribute to our preemption
+ * decisions and we can safely ignore it, as it will, and
+ * any preemption required, be dealt with upon submission.
+ * See engine->submit_request()
+ */
+ if (list_empty(&node->link))
continue;
- }
- if (!intel_engine_is_virtual(engine) &&
- !i915_request_is_active(node_to_request(node))) {
+ if (i915_request_in_priority_queue(node_to_request(node))) {
if (!cache.priolist)
cache.priolist =
i915_sched_lookup_priolist(engine,
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index ad2b1b833d7b..0cef3130db05 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -498,15 +498,15 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
struct device *kdev = kobj_to_dev(kobj);
struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
- struct i915_gpu_state *gpu;
+ struct i915_gpu_coredump *gpu;
ssize_t ret;
gpu = i915_first_error_state(i915);
if (IS_ERR(gpu)) {
ret = PTR_ERR(gpu);
} else if (gpu) {
- ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count);
- i915_gpu_state_put(gpu);
+ ret = i915_gpu_coredump_copy_to_buffer(gpu, buf, off, count);
+ i915_gpu_coredump_put(gpu);
} else {
const char *str = "No error state collected\n";
size_t len = strlen(str);
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index cbd783c31adb..4ff380770b32 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -423,8 +423,6 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
void __iomem *ptr;
int err;
- /* Access through the GTT requires the device to be awake. */
- assert_rpm_wakelock_held(vma->vm->gt->uncore->rpm);
if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
err = -ENODEV;
goto err;
@@ -456,6 +454,8 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
goto err_unpin;
i915_vma_set_ggtt_write(vma);
+
+ /* NB Access through the GTT requires the device to be awake. */
return ptr;
err_unpin:
@@ -858,6 +858,7 @@ static void vma_unbind_pages(struct i915_vma *vma)
int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
{
struct i915_vma_work *work = NULL;
+ intel_wakeref_t wakeref = 0;
unsigned int bound;
int err;
@@ -883,6 +884,9 @@ int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
}
}
+ if (flags & PIN_GLOBAL)
+ wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
+
/* No more allocations allowed once we hold vm->mutex */
err = mutex_lock_interruptible(&vma->vm->mutex);
if (err)
@@ -946,6 +950,8 @@ err_unlock:
err_fence:
if (work)
dma_fence_work_commit(&work->base);
+ if (wakeref)
+ intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
err_pages:
vma_put_pages(vma);
return err;
@@ -1196,16 +1202,26 @@ int __i915_vma_unbind(struct i915_vma *vma)
if (ret)
return ret;
- GEM_BUG_ON(i915_vma_is_active(vma));
if (i915_vma_is_pinned(vma)) {
vma_print_allocator(vma, "is pinned");
return -EAGAIN;
}
- GEM_BUG_ON(i915_vma_is_active(vma));
+ /*
+ * After confirming that no one else is pinning this vma, wait for
+ * any laggards who may have crept in during the wait (through
+ * a residual pin skipping the vm->mutex) to complete.
+ */
+ ret = i915_vma_sync(vma);
+ if (ret)
+ return ret;
+
if (!drm_mm_node_allocated(&vma->node))
return 0;
+ GEM_BUG_ON(i915_vma_is_pinned(vma));
+ GEM_BUG_ON(i915_vma_is_active(vma));
+
if (i915_vma_is_map_and_fenceable(vma)) {
/*
* Check that we have flushed all writes through the GGTT
@@ -1246,11 +1262,16 @@ int __i915_vma_unbind(struct i915_vma *vma)
int i915_vma_unbind(struct i915_vma *vma)
{
struct i915_address_space *vm = vma->vm;
+ intel_wakeref_t wakeref = 0;
int err;
if (!drm_mm_node_allocated(&vma->node))
return 0;
+ if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
+ /* XXX not always required: nop_clear_range */
+ wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
+
err = mutex_lock_interruptible(&vm->mutex);
if (err)
return err;
@@ -1258,6 +1279,9 @@ int i915_vma_unbind(struct i915_vma *vma)
err = __i915_vma_unbind(vma);
mutex_unlock(&vm->mutex);
+ if (wakeref)
+ intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
+
return err;
}
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 5fffa3c58908..02b31a62951e 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -30,148 +30,14 @@
#include <drm/drm_mm.h>
+#include "gem/i915_gem_object.h"
+
#include "i915_gem_gtt.h"
#include "i915_gem_fence_reg.h"
-#include "gem/i915_gem_object.h"
#include "i915_active.h"
#include "i915_request.h"
-
-enum i915_cache_level;
-
-/**
- * DOC: Virtual Memory Address
- *
- * A VMA represents a GEM BO that is bound into an address space. Therefore, a
- * VMA's presence cannot be guaranteed before binding, or after unbinding the
- * object into/from the address space.
- *
- * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
- * will always be <= an objects lifetime. So object refcounting should cover us.
- */
-struct i915_vma {
- struct drm_mm_node node;
-
- struct i915_address_space *vm;
- const struct i915_vma_ops *ops;
-
- struct drm_i915_gem_object *obj;
- struct dma_resv *resv; /** Alias of obj->resv */
-
- struct sg_table *pages;
- void __iomem *iomap;
- void *private; /* owned by creator */
-
- struct i915_fence_reg *fence;
-
- u64 size;
- u64 display_alignment;
- struct i915_page_sizes page_sizes;
-
- /* mmap-offset associated with fencing for this vma */
- struct i915_mmap_offset *mmo;
-
- u32 fence_size;
- u32 fence_alignment;
-
- /**
- * Count of the number of times this vma has been opened by different
- * handles (but same file) for execbuf, i.e. the number of aliases
- * that exist in the ctx->handle_vmas LUT for this vma.
- */
- struct kref ref;
- atomic_t open_count;
- atomic_t flags;
- /**
- * How many users have pinned this object in GTT space.
- *
- * This is a tightly bound, fairly small number of users, so we
- * stuff inside the flags field so that we can both check for overflow
- * and detect a no-op i915_vma_pin() in a single check, while also
- * pinning the vma.
- *
- * The worst case display setup would have the same vma pinned for
- * use on each plane on each crtc, while also building the next atomic
- * state and holding a pin for the length of the cleanup queue. In the
- * future, the flip queue may be increased from 1.
- * Estimated worst case: 3 [qlen] * 4 [max crtcs] * 7 [max planes] = 84
- *
- * For GEM, the number of concurrent users for pwrite/pread is
- * unbounded. For execbuffer, it is currently one but will in future
- * be extended to allow multiple clients to pin vma concurrently.
- *
- * We also use suballocated pages, with each suballocation claiming
- * its own pin on the shared vma. At present, this is limited to
- * exclusive cachelines of a single page, so a maximum of 64 possible
- * users.
- */
-#define I915_VMA_PIN_MASK 0x3ff
-#define I915_VMA_OVERFLOW 0x200
-
- /** Flags and address space this VMA is bound to */
-#define I915_VMA_GLOBAL_BIND_BIT 10
-#define I915_VMA_LOCAL_BIND_BIT 11
-
-#define I915_VMA_GLOBAL_BIND ((int)BIT(I915_VMA_GLOBAL_BIND_BIT))
-#define I915_VMA_LOCAL_BIND ((int)BIT(I915_VMA_LOCAL_BIND_BIT))
-
-#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)
-
-#define I915_VMA_ALLOC_BIT 12
-#define I915_VMA_ALLOC ((int)BIT(I915_VMA_ALLOC_BIT))
-
-#define I915_VMA_ERROR_BIT 13
-#define I915_VMA_ERROR ((int)BIT(I915_VMA_ERROR_BIT))
-
-#define I915_VMA_GGTT_BIT 14
-#define I915_VMA_CAN_FENCE_BIT 15
-#define I915_VMA_USERFAULT_BIT 16
-#define I915_VMA_GGTT_WRITE_BIT 17
-
-#define I915_VMA_GGTT ((int)BIT(I915_VMA_GGTT_BIT))
-#define I915_VMA_CAN_FENCE ((int)BIT(I915_VMA_CAN_FENCE_BIT))
-#define I915_VMA_USERFAULT ((int)BIT(I915_VMA_USERFAULT_BIT))
-#define I915_VMA_GGTT_WRITE ((int)BIT(I915_VMA_GGTT_WRITE_BIT))
-
- struct i915_active active;
-
-#define I915_VMA_PAGES_BIAS 24
-#define I915_VMA_PAGES_ACTIVE (BIT(24) | 1)
- atomic_t pages_count; /* number of active binds to the pages */
- struct mutex pages_mutex; /* protect acquire/release of backing pages */
-
- /**
- * Support different GGTT views into the same object.
- * This means there can be multiple VMA mappings per object and per VM.
- * i915_ggtt_view_type is used to distinguish between those entries.
- * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
- * assumed in GEM functions which take no ggtt view parameter.
- */
- struct i915_ggtt_view ggtt_view;
-
- /** This object's place on the active/inactive lists */
- struct list_head vm_link;
-
- struct list_head obj_link; /* Link in the object's VMA list */
- struct rb_node obj_node;
- struct hlist_node obj_hash;
-
- /** This vma's place in the execbuf reservation list */
- struct list_head exec_link;
- struct list_head reloc_link;
-
- /** This vma's place in the eviction list */
- struct list_head evict_link;
-
- struct list_head closed_link;
-
- /**
- * Used for performing relocations during execbuffer insertion.
- */
- unsigned int *exec_flags;
- struct hlist_node exec_node;
- u32 exec_handle;
-};
+#include "i915_vma_types.h"
struct i915_vma *
i915_vma_instance(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
new file mode 100644
index 000000000000..e0942efd5236
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_VMA_TYPES_H__
+#define __I915_VMA_TYPES_H__
+
+#include <linux/rbtree.h>
+
+#include <drm/drm_mm.h>
+
+#include "gem/i915_gem_object_types.h"
+
+enum i915_cache_level;
+
+/**
+ * DOC: Global GTT views
+ *
+ * Background and previous state
+ *
+ * Historically objects could exists (be bound) in global GTT space only as
+ * singular instances with a view representing all of the object's backing pages
+ * in a linear fashion. This view will be called a normal view.
+ *
+ * To support multiple views of the same object, where the number of mapped
+ * pages is not equal to the backing store, or where the layout of the pages
+ * is not linear, concept of a GGTT view was added.
+ *
+ * One example of an alternative view is a stereo display driven by a single
+ * image. In this case we would have a framebuffer looking like this
+ * (2x2 pages):
+ *
+ * 12
+ * 34
+ *
+ * Above would represent a normal GGTT view as normally mapped for GPU or CPU
+ * rendering. In contrast, fed to the display engine would be an alternative
+ * view which could look something like this:
+ *
+ * 1212
+ * 3434
+ *
+ * In this example both the size and layout of pages in the alternative view is
+ * different from the normal view.
+ *
+ * Implementation and usage
+ *
+ * GGTT views are implemented using VMAs and are distinguished via enum
+ * i915_ggtt_view_type and struct i915_ggtt_view.
+ *
+ * A new flavour of core GEM functions which work with GGTT bound objects were
+ * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
+ * renaming in large amounts of code. They take the struct i915_ggtt_view
+ * parameter encapsulating all metadata required to implement a view.
+ *
+ * As a helper for callers which are only interested in the normal view,
+ * globally const i915_ggtt_view_normal singleton instance exists. All old core
+ * GEM API functions, the ones not taking the view parameter, are operating on,
+ * or with the normal GGTT view.
+ *
+ * Code wanting to add or use a new GGTT view needs to:
+ *
+ * 1. Add a new enum with a suitable name.
+ * 2. Extend the metadata in the i915_ggtt_view structure if required.
+ * 3. Add support to i915_get_vma_pages().
+ *
+ * New views are required to build a scatter-gather table from within the
+ * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
+ * exists for the lifetime of an VMA.
+ *
+ * Core API is designed to have copy semantics which means that passed in
+ * struct i915_ggtt_view does not need to be persistent (left around after
+ * calling the core API functions).
+ *
+ */
+
+struct intel_remapped_plane_info {
+ /* in gtt pages */
+ unsigned int width, height, stride, offset;
+} __packed;
+
+struct intel_remapped_info {
+ struct intel_remapped_plane_info plane[2];
+ unsigned int unused_mbz;
+} __packed;
+
+struct intel_rotation_info {
+ struct intel_remapped_plane_info plane[2];
+} __packed;
+
+struct intel_partial_info {
+ u64 offset;
+ unsigned int size;
+} __packed;
+
+enum i915_ggtt_view_type {
+ I915_GGTT_VIEW_NORMAL = 0,
+ I915_GGTT_VIEW_ROTATED = sizeof(struct intel_rotation_info),
+ I915_GGTT_VIEW_PARTIAL = sizeof(struct intel_partial_info),
+ I915_GGTT_VIEW_REMAPPED = sizeof(struct intel_remapped_info),
+};
+
+static inline void assert_i915_gem_gtt_types(void)
+{
+ BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 8*sizeof(unsigned int));
+ BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int));
+ BUILD_BUG_ON(sizeof(struct intel_remapped_info) != 9*sizeof(unsigned int));
+
+ /* Check that rotation/remapped shares offsets for simplicity */
+ BUILD_BUG_ON(offsetof(struct intel_remapped_info, plane[0]) !=
+ offsetof(struct intel_rotation_info, plane[0]));
+ BUILD_BUG_ON(offsetofend(struct intel_remapped_info, plane[1]) !=
+ offsetofend(struct intel_rotation_info, plane[1]));
+
+ /* As we encode the size of each branch inside the union into its type,
+ * we have to be careful that each branch has a unique size.
+ */
+ switch ((enum i915_ggtt_view_type)0) {
+ case I915_GGTT_VIEW_NORMAL:
+ case I915_GGTT_VIEW_PARTIAL:
+ case I915_GGTT_VIEW_ROTATED:
+ case I915_GGTT_VIEW_REMAPPED:
+ /* gcc complains if these are identical cases */
+ break;
+ }
+}
+
+struct i915_ggtt_view {
+ enum i915_ggtt_view_type type;
+ union {
+ /* Members need to contain no holes/padding */
+ struct intel_partial_info partial;
+ struct intel_rotation_info rotated;
+ struct intel_remapped_info remapped;
+ };
+};
+
+/**
+ * DOC: Virtual Memory Address
+ *
+ * A VMA represents a GEM BO that is bound into an address space. Therefore, a
+ * VMA's presence cannot be guaranteed before binding, or after unbinding the
+ * object into/from the address space.
+ *
+ * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
+ * will always be <= an objects lifetime. So object refcounting should cover us.
+ */
+struct i915_vma {
+ struct drm_mm_node node;
+
+ struct i915_address_space *vm;
+ const struct i915_vma_ops *ops;
+
+ struct drm_i915_gem_object *obj;
+ struct dma_resv *resv; /** Alias of obj->resv */
+
+ struct sg_table *pages;
+ void __iomem *iomap;
+ void *private; /* owned by creator */
+
+ struct i915_fence_reg *fence;
+
+ u64 size;
+ u64 display_alignment;
+ struct i915_page_sizes page_sizes;
+
+ /* mmap-offset associated with fencing for this vma */
+ struct i915_mmap_offset *mmo;
+
+ u32 fence_size;
+ u32 fence_alignment;
+
+ /**
+ * Count of the number of times this vma has been opened by different
+ * handles (but same file) for execbuf, i.e. the number of aliases
+ * that exist in the ctx->handle_vmas LUT for this vma.
+ */
+ struct kref ref;
+ atomic_t open_count;
+ atomic_t flags;
+ /**
+ * How many users have pinned this object in GTT space.
+ *
+ * This is a tightly bound, fairly small number of users, so we
+ * stuff inside the flags field so that we can both check for overflow
+ * and detect a no-op i915_vma_pin() in a single check, while also
+ * pinning the vma.
+ *
+ * The worst case display setup would have the same vma pinned for
+ * use on each plane on each crtc, while also building the next atomic
+ * state and holding a pin for the length of the cleanup queue. In the
+ * future, the flip queue may be increased from 1.
+ * Estimated worst case: 3 [qlen] * 4 [max crtcs] * 7 [max planes] = 84
+ *
+ * For GEM, the number of concurrent users for pwrite/pread is
+ * unbounded. For execbuffer, it is currently one but will in future
+ * be extended to allow multiple clients to pin vma concurrently.
+ *
+ * We also use suballocated pages, with each suballocation claiming
+ * its own pin on the shared vma. At present, this is limited to
+ * exclusive cachelines of a single page, so a maximum of 64 possible
+ * users.
+ */
+#define I915_VMA_PIN_MASK 0x3ff
+#define I915_VMA_OVERFLOW 0x200
+
+ /** Flags and address space this VMA is bound to */
+#define I915_VMA_GLOBAL_BIND_BIT 10
+#define I915_VMA_LOCAL_BIND_BIT 11
+
+#define I915_VMA_GLOBAL_BIND ((int)BIT(I915_VMA_GLOBAL_BIND_BIT))
+#define I915_VMA_LOCAL_BIND ((int)BIT(I915_VMA_LOCAL_BIND_BIT))
+
+#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)
+
+#define I915_VMA_ALLOC_BIT 12
+#define I915_VMA_ALLOC ((int)BIT(I915_VMA_ALLOC_BIT))
+
+#define I915_VMA_ERROR_BIT 13
+#define I915_VMA_ERROR ((int)BIT(I915_VMA_ERROR_BIT))
+
+#define I915_VMA_GGTT_BIT 14
+#define I915_VMA_CAN_FENCE_BIT 15
+#define I915_VMA_USERFAULT_BIT 16
+#define I915_VMA_GGTT_WRITE_BIT 17
+
+#define I915_VMA_GGTT ((int)BIT(I915_VMA_GGTT_BIT))
+#define I915_VMA_CAN_FENCE ((int)BIT(I915_VMA_CAN_FENCE_BIT))
+#define I915_VMA_USERFAULT ((int)BIT(I915_VMA_USERFAULT_BIT))
+#define I915_VMA_GGTT_WRITE ((int)BIT(I915_VMA_GGTT_WRITE_BIT))
+
+ struct i915_active active;
+
+#define I915_VMA_PAGES_BIAS 24
+#define I915_VMA_PAGES_ACTIVE (BIT(24) | 1)
+ atomic_t pages_count; /* number of active binds to the pages */
+ struct mutex pages_mutex; /* protect acquire/release of backing pages */
+
+ /**
+ * Support different GGTT views into the same object.
+ * This means there can be multiple VMA mappings per object and per VM.
+ * i915_ggtt_view_type is used to distinguish between those entries.
+ * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
+ * assumed in GEM functions which take no ggtt view parameter.
+ */
+ struct i915_ggtt_view ggtt_view;
+
+ /** This object's place on the active/inactive lists */
+ struct list_head vm_link;
+
+ struct list_head obj_link; /* Link in the object's VMA list */
+ struct rb_node obj_node;
+ struct hlist_node obj_hash;
+
+ /** This vma's place in the execbuf reservation list */
+ struct list_head exec_link;
+ struct list_head reloc_link;
+
+ /** This vma's place in the eviction list */
+ struct list_head evict_link;
+
+ struct list_head closed_link;
+
+ /**
+ * Used for performing relocations during execbuffer insertion.
+ */
+ unsigned int *exec_flags;
+ struct hlist_node exec_node;
+ u32 exec_handle;
+};
+
+#endif
+
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 1acb5db77431..6670a0763be2 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -519,7 +519,7 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
}
}
-static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
+static void bdw_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
int s, ss;
@@ -600,7 +600,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
sseu->has_eu_pg = 0;
}
-static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
+static void hsw_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
u32 fuse1;
@@ -1021,11 +1021,11 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
/* Initialize slice/subslice/EU info */
if (IS_HASWELL(dev_priv))
- haswell_sseu_info_init(dev_priv);
+ hsw_sseu_info_init(dev_priv);
else if (IS_CHERRYVIEW(dev_priv))
cherryview_sseu_info_init(dev_priv);
else if (IS_BROADWELL(dev_priv))
- broadwell_sseu_info_init(dev_priv);
+ bdw_sseu_info_init(dev_priv);
else if (IS_GEN(dev_priv, 9))
gen9_sseu_info_init(dev_priv);
else if (IS_GEN(dev_priv, 10))
@@ -1093,7 +1093,7 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
* hooked up to an SFC (Scaler & Format Converter) unit.
* In TGL each VDBOX has access to an SFC.
*/
- if (IS_TIGERLAKE(dev_priv) || logical_vdbox++ % 2 == 0)
+ if (INTEL_GEN(dev_priv) >= 12 || logical_vdbox++ % 2 == 0)
RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
}
DRM_DEBUG_DRIVER("vdbox enable: %04x, instances: %04lx\n",
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index e24c280e5930..d0d038b3cd79 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -16,6 +16,20 @@ const u32 intel_region_map[] = {
[INTEL_REGION_STOLEN] = REGION_MAP(INTEL_MEMORY_STOLEN, 0),
};
+struct intel_memory_region *
+intel_memory_region_by_type(struct drm_i915_private *i915,
+ enum intel_memory_type mem_type)
+{
+ struct intel_memory_region *mr;
+ int id;
+
+ for_each_memory_region(mr, i915, id)
+ if (mr->type == mem_type)
+ return mr;
+
+ return NULL;
+}
+
static u64
intel_memory_region_free_pages(struct intel_memory_region *mem,
struct list_head *blocks)
@@ -37,7 +51,7 @@ __intel_memory_region_put_pages_buddy(struct intel_memory_region *mem,
struct list_head *blocks)
{
mutex_lock(&mem->mm_lock);
- intel_memory_region_free_pages(mem, blocks);
+ mem->avail += intel_memory_region_free_pages(mem, blocks);
mutex_unlock(&mem->mm_lock);
}
@@ -106,6 +120,7 @@ __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
break;
} while (1);
+ mem->avail -= size;
mutex_unlock(&mem->mm_lock);
return 0;
@@ -164,6 +179,8 @@ intel_memory_region_create(struct drm_i915_private *i915,
mem->io_start = io_start;
mem->min_page_size = min_page_size;
mem->ops = ops;
+ mem->total = size;
+ mem->avail = mem->total;
mutex_init(&mem->objects.lock);
INIT_LIST_HEAD(&mem->objects.list);
@@ -185,6 +202,16 @@ err_free:
return ERR_PTR(err);
}
+void intel_memory_region_set_name(struct intel_memory_region *mem,
+ const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ vsnprintf(mem->name, sizeof(mem->name), fmt, ap);
+ va_end(ap);
+}
+
static void __intel_memory_region_destroy(struct kref *kref)
{
struct intel_memory_region *mem =
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index 238722009677..232490d89a83 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -47,6 +47,10 @@ enum intel_region_id {
#define I915_ALLOC_MIN_PAGE_SIZE BIT(0)
#define I915_ALLOC_CONTIGUOUS BIT(1)
+#define for_each_memory_region(mr, i915, id) \
+ for (id = 0; id < ARRAY_SIZE((i915)->mm.regions); id++) \
+ for_each_if((mr) = (i915)->mm.regions[id])
+
/**
* Memory regions encoded as type | instance
*/
@@ -82,10 +86,13 @@ struct intel_memory_region {
resource_size_t io_start;
resource_size_t min_page_size;
+ resource_size_t total;
+ resource_size_t avail;
unsigned int type;
unsigned int instance;
unsigned int id;
+ char name[8];
dma_addr_t remap_addr;
@@ -125,5 +132,12 @@ void intel_memory_region_put(struct intel_memory_region *mem);
int intel_memory_regions_hw_probe(struct drm_i915_private *i915);
void intel_memory_regions_driver_release(struct drm_i915_private *i915);
+struct intel_memory_region *
+intel_memory_region_by_type(struct drm_i915_private *i915,
+ enum intel_memory_type mem_type);
+
+__printf(2, 3) void
+intel_memory_region_set_name(struct intel_memory_region *mem,
+ const char *fmt, ...);
#endif
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
index 43b68b5fc562..4ed60e1f01db 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -12,90 +12,91 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
{
switch (id) {
case INTEL_PCH_IBX_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
+ drm_dbg_kms(&dev_priv->drm, "Found Ibex Peak PCH\n");
WARN_ON(!IS_GEN(dev_priv, 5));
return PCH_IBX;
case INTEL_PCH_CPT_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found CougarPoint PCH\n");
+ drm_dbg_kms(&dev_priv->drm, "Found CougarPoint PCH\n");
WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
return PCH_CPT;
case INTEL_PCH_PPT_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found PantherPoint PCH\n");
+ drm_dbg_kms(&dev_priv->drm, "Found PantherPoint PCH\n");
WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
/* PantherPoint is CPT compatible */
return PCH_CPT;
case INTEL_PCH_LPT_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found LynxPoint PCH\n");
+ drm_dbg_kms(&dev_priv->drm, "Found LynxPoint PCH\n");
WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
return PCH_LPT;
case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
+ drm_dbg_kms(&dev_priv->drm, "Found LynxPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
return PCH_LPT;
case INTEL_PCH_WPT_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found WildcatPoint PCH\n");
+ drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint PCH\n");
WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
/* WildcatPoint is LPT compatible */
return PCH_LPT;
case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n");
+ drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
/* WildcatPoint is LPT compatible */
return PCH_LPT;
case INTEL_PCH_SPT_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
+ drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint PCH\n");
WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
return PCH_SPT;
case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
+ drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint LP PCH\n");
WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
!IS_COFFEELAKE(dev_priv));
return PCH_SPT;
case INTEL_PCH_KBP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
+ drm_dbg_kms(&dev_priv->drm, "Found Kaby Lake PCH (KBP)\n");
WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
!IS_COFFEELAKE(dev_priv));
/* KBP is SPT compatible */
return PCH_SPT;
case INTEL_PCH_CNP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n");
+ drm_dbg_kms(&dev_priv->drm, "Found Cannon Lake PCH (CNP)\n");
WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
return PCH_CNP;
case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Found Cannon Lake LP PCH (CNP-LP)\n");
WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
return PCH_CNP;
case INTEL_PCH_CMP_DEVICE_ID_TYPE:
case INTEL_PCH_CMP2_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Comet Lake PCH (CMP)\n");
+ drm_dbg_kms(&dev_priv->drm, "Found Comet Lake PCH (CMP)\n");
WARN_ON(!IS_COFFEELAKE(dev_priv));
/* CometPoint is CNP Compatible */
return PCH_CNP;
case INTEL_PCH_CMP_V_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Comet Lake V PCH (CMP-V)\n");
+ drm_dbg_kms(&dev_priv->drm, "Found Comet Lake V PCH (CMP-V)\n");
WARN_ON(!IS_COFFEELAKE(dev_priv));
/* Comet Lake V PCH is based on KBP, which is SPT compatible */
return PCH_SPT;
case INTEL_PCH_ICP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Ice Lake PCH\n");
+ drm_dbg_kms(&dev_priv->drm, "Found Ice Lake PCH\n");
WARN_ON(!IS_ICELAKE(dev_priv));
return PCH_ICP;
case INTEL_PCH_MCC_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Mule Creek Canyon PCH\n");
+ drm_dbg_kms(&dev_priv->drm, "Found Mule Creek Canyon PCH\n");
WARN_ON(!IS_ELKHARTLAKE(dev_priv));
return PCH_MCC;
case INTEL_PCH_TGP_DEVICE_ID_TYPE:
case INTEL_PCH_TGP2_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Tiger Lake LP PCH\n");
+ drm_dbg_kms(&dev_priv->drm, "Found Tiger Lake LP PCH\n");
WARN_ON(!IS_TIGERLAKE(dev_priv));
return PCH_TGP;
case INTEL_PCH_JSP_DEVICE_ID_TYPE:
case INTEL_PCH_JSP2_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Jasper Lake PCH\n");
+ drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n");
WARN_ON(!IS_ELKHARTLAKE(dev_priv));
return PCH_JSP;
default:
@@ -145,9 +146,9 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
if (id)
- DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id);
+ drm_dbg_kms(&dev_priv->drm, "Assuming PCH ID %04x\n", id);
else
- DRM_DEBUG_KMS("Assuming no PCH\n");
+ drm_dbg_kms(&dev_priv->drm, "Assuming no PCH\n");
return id;
}
@@ -201,13 +202,14 @@ void intel_detect_pch(struct drm_i915_private *dev_priv)
* display.
*/
if (pch && !HAS_DISPLAY(dev_priv)) {
- DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Display disabled, reverting to NOP PCH\n");
dev_priv->pch_type = PCH_NOP;
dev_priv->pch_id = 0;
}
if (!pch)
- DRM_DEBUG_KMS("No PCH found.\n");
+ drm_dbg_kms(&dev_priv->drm, "No PCH found.\n");
pci_dev_put(pch);
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 31ec82337e4f..bd2d30ecc030 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -140,7 +140,7 @@ static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
}
-static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv)
+static void pnv_get_mem_freq(struct drm_i915_private *dev_priv)
{
u32 tmp;
@@ -178,7 +178,7 @@ static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv)
dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
}
-static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
+static void ilk_get_mem_freq(struct drm_i915_private *dev_priv)
{
u16 ddrpll, csipll;
@@ -199,8 +199,8 @@ static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
dev_priv->mem_freq = 1600;
break;
default:
- DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
- ddrpll & 0xff);
+ drm_dbg(&dev_priv->drm, "unknown memory frequency 0x%02x\n",
+ ddrpll & 0xff);
dev_priv->mem_freq = 0;
break;
}
@@ -228,8 +228,8 @@ static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
dev_priv->fsb_freq = 6400;
break;
default:
- DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
- csipll & 0x3ff);
+ drm_dbg(&dev_priv->drm, "unknown fsb frequency 0x%04x\n",
+ csipll & 0x3ff);
dev_priv->fsb_freq = 0;
break;
}
@@ -314,7 +314,8 @@ static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
- DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
+ drm_err(&dev_priv->drm,
+ "timed out waiting for Punit DDR DVFS request\n");
vlv_punit_put(dev_priv);
}
@@ -383,9 +384,9 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl
trace_intel_memory_cxsr(dev_priv, was_enabled, enable);
- DRM_DEBUG_KMS("memory self-refresh is %s (was %s)\n",
- enableddisabled(enable),
- enableddisabled(was_enabled));
+ drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n",
+ enableddisabled(enable),
+ enableddisabled(was_enabled));
return was_enabled;
}
@@ -510,8 +511,8 @@ static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
if (i9xx_plane == PLANE_B)
size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %c: %d\n",
- dsparb, plane_name(i9xx_plane), size);
+ drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ dsparb, plane_name(i9xx_plane), size);
return size;
}
@@ -527,8 +528,8 @@ static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
size >>= 1; /* Convert to cachelines */
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %c: %d\n",
- dsparb, plane_name(i9xx_plane), size);
+ drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ dsparb, plane_name(i9xx_plane), size);
return size;
}
@@ -542,41 +543,45 @@ static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
size = dsparb & 0x7f;
size >>= 2; /* Convert to cachelines */
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %c: %d\n",
- dsparb, plane_name(i9xx_plane), size);
+ drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ dsparb, plane_name(i9xx_plane), size);
return size;
}
/* Pineview has different values for various configs */
-static const struct intel_watermark_params pineview_display_wm = {
+static const struct intel_watermark_params pnv_display_wm = {
.fifo_size = PINEVIEW_DISPLAY_FIFO,
.max_wm = PINEVIEW_MAX_WM,
.default_wm = PINEVIEW_DFT_WM,
.guard_size = PINEVIEW_GUARD_WM,
.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
};
-static const struct intel_watermark_params pineview_display_hplloff_wm = {
+
+static const struct intel_watermark_params pnv_display_hplloff_wm = {
.fifo_size = PINEVIEW_DISPLAY_FIFO,
.max_wm = PINEVIEW_MAX_WM,
.default_wm = PINEVIEW_DFT_HPLLOFF_WM,
.guard_size = PINEVIEW_GUARD_WM,
.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
};
-static const struct intel_watermark_params pineview_cursor_wm = {
+
+static const struct intel_watermark_params pnv_cursor_wm = {
.fifo_size = PINEVIEW_CURSOR_FIFO,
.max_wm = PINEVIEW_CURSOR_MAX_WM,
.default_wm = PINEVIEW_CURSOR_DFT_WM,
.guard_size = PINEVIEW_CURSOR_GUARD_WM,
.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
};
-static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
+
+static const struct intel_watermark_params pnv_cursor_hplloff_wm = {
.fifo_size = PINEVIEW_CURSOR_FIFO,
.max_wm = PINEVIEW_CURSOR_MAX_WM,
.default_wm = PINEVIEW_CURSOR_DFT_WM,
.guard_size = PINEVIEW_CURSOR_GUARD_WM,
.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
};
+
static const struct intel_watermark_params i965_cursor_wm_info = {
.fifo_size = I965_CURSOR_FIFO,
.max_wm = I965_CURSOR_MAX_WM,
@@ -584,6 +589,7 @@ static const struct intel_watermark_params i965_cursor_wm_info = {
.guard_size = 2,
.cacheline_size = I915_FIFO_LINE_SIZE,
};
+
static const struct intel_watermark_params i945_wm_info = {
.fifo_size = I945_FIFO_SIZE,
.max_wm = I915_MAX_WM,
@@ -591,6 +597,7 @@ static const struct intel_watermark_params i945_wm_info = {
.guard_size = 2,
.cacheline_size = I915_FIFO_LINE_SIZE,
};
+
static const struct intel_watermark_params i915_wm_info = {
.fifo_size = I915_FIFO_SIZE,
.max_wm = I915_MAX_WM,
@@ -598,6 +605,7 @@ static const struct intel_watermark_params i915_wm_info = {
.guard_size = 2,
.cacheline_size = I915_FIFO_LINE_SIZE,
};
+
static const struct intel_watermark_params i830_a_wm_info = {
.fifo_size = I855GM_FIFO_SIZE,
.max_wm = I915_MAX_WM,
@@ -605,6 +613,7 @@ static const struct intel_watermark_params i830_a_wm_info = {
.guard_size = 2,
.cacheline_size = I830_FIFO_LINE_SIZE,
};
+
static const struct intel_watermark_params i830_bc_wm_info = {
.fifo_size = I855GM_FIFO_SIZE,
.max_wm = I915_MAX_WM/2,
@@ -612,6 +621,7 @@ static const struct intel_watermark_params i830_bc_wm_info = {
.guard_size = 2,
.cacheline_size = I830_FIFO_LINE_SIZE,
};
+
static const struct intel_watermark_params i845_wm_info = {
.fifo_size = I830_FIFO_SIZE,
.max_wm = I915_MAX_WM,
@@ -848,7 +858,7 @@ static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
return enabled;
}
-static void pineview_update_wm(struct intel_crtc *unused_crtc)
+static void pnv_update_wm(struct intel_crtc *unused_crtc)
{
struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
struct intel_crtc *crtc;
@@ -861,7 +871,8 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc)
dev_priv->fsb_freq,
dev_priv->mem_freq);
if (!latency) {
- DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Unknown FSB/MEM found, disable CxSR\n");
intel_set_memory_cxsr(dev_priv, false);
return;
}
@@ -876,18 +887,18 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc)
int clock = adjusted_mode->crtc_clock;
/* Display SR */
- wm = intel_calculate_wm(clock, &pineview_display_wm,
- pineview_display_wm.fifo_size,
+ wm = intel_calculate_wm(clock, &pnv_display_wm,
+ pnv_display_wm.fifo_size,
cpp, latency->display_sr);
reg = I915_READ(DSPFW1);
reg &= ~DSPFW_SR_MASK;
reg |= FW_WM(wm, SR);
I915_WRITE(DSPFW1, reg);
- DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
+ drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
/* cursor SR */
- wm = intel_calculate_wm(clock, &pineview_cursor_wm,
- pineview_display_wm.fifo_size,
+ wm = intel_calculate_wm(clock, &pnv_cursor_wm,
+ pnv_display_wm.fifo_size,
4, latency->cursor_sr);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_CURSOR_SR_MASK;
@@ -895,8 +906,8 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc)
I915_WRITE(DSPFW3, reg);
/* Display HPLL off SR */
- wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
- pineview_display_hplloff_wm.fifo_size,
+ wm = intel_calculate_wm(clock, &pnv_display_hplloff_wm,
+ pnv_display_hplloff_wm.fifo_size,
cpp, latency->display_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_HPLL_SR_MASK;
@@ -904,14 +915,14 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc)
I915_WRITE(DSPFW3, reg);
/* cursor HPLL off SR */
- wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
- pineview_display_hplloff_wm.fifo_size,
+ wm = intel_calculate_wm(clock, &pnv_cursor_hplloff_wm,
+ pnv_display_hplloff_wm.fifo_size,
4, latency->cursor_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_HPLL_CURSOR_MASK;
reg |= FW_WM(wm, HPLL_CURSOR);
I915_WRITE(DSPFW3, reg);
- DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
+ drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg);
intel_set_memory_cxsr(dev_priv, true);
} else {
@@ -1202,6 +1213,7 @@ static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
enum plane_id plane_id = plane->id;
bool dirty = false;
@@ -1254,16 +1266,18 @@ static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
out:
if (dirty) {
- DRM_DEBUG_KMS("%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
- plane->base.name,
- crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
- crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id],
- crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
+ drm_dbg_kms(&dev_priv->drm,
+ "%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
+ plane->base.name,
+ crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
+ crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id],
+ crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
if (plane_id == PLANE_PRIMARY)
- DRM_DEBUG_KMS("FBC watermarks: SR=%d, HPLL=%d\n",
- crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
- crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
+ drm_dbg_kms(&dev_priv->drm,
+ "FBC watermarks: SR=%d, HPLL=%d\n",
+ crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
+ crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
}
return dirty;
@@ -1781,6 +1795,7 @@ static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum plane_id plane_id = plane->id;
int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
int level;
@@ -1808,11 +1823,12 @@ static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
out:
if (dirty)
- DRM_DEBUG_KMS("%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
- plane->base.name,
- crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
- crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
- crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]);
+ drm_dbg_kms(&dev_priv->drm,
+ "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
+ plane->base.name,
+ crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
+ crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
+ crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]);
return dirty;
}
@@ -2227,8 +2243,9 @@ static void i965_update_wm(struct intel_crtc *unused_crtc)
if (srwm < 0)
srwm = 1;
srwm &= 0x1ff;
- DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
- entries, srwm);
+ drm_dbg_kms(&dev_priv->drm,
+ "self-refresh entries: %d, wm: %d\n",
+ entries, srwm);
entries = intel_wm_method2(clock, htotal,
crtc->base.cursor->state->crtc_w, 4,
@@ -2241,8 +2258,9 @@ static void i965_update_wm(struct intel_crtc *unused_crtc)
if (cursor_sr > i965_cursor_wm_info.max_wm)
cursor_sr = i965_cursor_wm_info.max_wm;
- DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
- "cursor %d\n", srwm, cursor_sr);
+ drm_dbg_kms(&dev_priv->drm,
+ "self-refresh watermark: display plane %d "
+ "cursor %d\n", srwm, cursor_sr);
cxsr_enabled = true;
} else {
@@ -2251,8 +2269,9 @@ static void i965_update_wm(struct intel_crtc *unused_crtc)
intel_set_memory_cxsr(dev_priv, false);
}
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
- srwm);
+ drm_dbg_kms(&dev_priv->drm,
+ "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
+ srwm);
/* 965 has limitations... */
I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
@@ -2342,7 +2361,8 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
planeb_wm = wm_info->max_wm;
}
- DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
+ drm_dbg_kms(&dev_priv->drm,
+ "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
if (IS_I915GM(dev_priv) && enabled) {
struct drm_i915_gem_object *obj;
@@ -2384,7 +2404,8 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
entries = intel_wm_method2(clock, htotal, hdisplay, cpp,
sr_latency_ns / 100);
entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
- DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
+ drm_dbg_kms(&dev_priv->drm,
+ "self-refresh entries: %d\n", entries);
srwm = wm_info->fifo_size - entries;
if (srwm < 0)
srwm = 1;
@@ -2396,8 +2417,9 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
}
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
- planea_wm, planeb_wm, cwm, srwm);
+ drm_dbg_kms(&dev_priv->drm,
+ "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
+ planea_wm, planeb_wm, cwm, srwm);
fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
fwater_hi = (cwm & 0x1f);
@@ -2433,7 +2455,8 @@ static void i845_update_wm(struct intel_crtc *unused_crtc)
fwater_lo = I915_READ(FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
+ drm_dbg_kms(&dev_priv->drm,
+ "Setting FIFO watermarks - A: %d\n", planea_wm);
I915_WRITE(FW_BLC, fwater_lo);
}
@@ -2832,7 +2855,8 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
&val, NULL);
if (ret) {
- DRM_ERROR("SKL Mailbox read error = %d\n", ret);
+ drm_err(&dev_priv->drm,
+ "SKL Mailbox read error = %d\n", ret);
return;
}
@@ -2850,7 +2874,8 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
GEN9_PCODE_READ_MEM_LATENCY,
&val, NULL);
if (ret) {
- DRM_ERROR("SKL Mailbox read error = %d\n", ret);
+ drm_err(&dev_priv->drm,
+ "SKL Mailbox read error = %d\n", ret);
return;
}
@@ -2968,8 +2993,9 @@ static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
unsigned int latency = wm[level];
if (latency == 0) {
- DRM_DEBUG_KMS("%s WM%d latency not provided\n",
- name, level);
+ drm_dbg_kms(&dev_priv->drm,
+ "%s WM%d latency not provided\n",
+ name, level);
continue;
}
@@ -2982,9 +3008,9 @@ static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
else if (level > 0)
latency *= 5;
- DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
- name, level, wm[level],
- latency / 10, latency % 10);
+ drm_dbg_kms(&dev_priv->drm,
+ "%s WM%d latency %u (%u.%u usec)\n", name, level,
+ wm[level], latency / 10, latency % 10);
}
}
@@ -3018,7 +3044,8 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
if (!changed)
return;
- DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "WM latency values increased to avoid potential underruns\n");
intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
@@ -3046,7 +3073,8 @@ static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
dev_priv->wm.spr_latency[3] = 0;
dev_priv->wm.cur_latency[3] = 0;
- DRM_DEBUG_KMS("LP3 watermarks disabled due to potential for lost interrupts\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "LP3 watermarks disabled due to potential for lost interrupts\n");
intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
@@ -3096,7 +3124,7 @@ static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
/* At least LP0 must be valid */
if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
- DRM_DEBUG_KMS("LP0 watermark invalid\n");
+ drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n");
return false;
}
@@ -3673,7 +3701,7 @@ skl_setup_sagv_block_time(struct drm_i915_private *dev_priv)
return;
}
- DRM_DEBUG_DRIVER("Couldn't read SAGV block time!\n");
+ drm_dbg(&dev_priv->drm, "Couldn't read SAGV block time!\n");
} else if (IS_GEN(dev_priv, 11)) {
dev_priv->sagv_block_time_us = 10;
return;
@@ -3713,7 +3741,7 @@ intel_enable_sagv(struct drm_i915_private *dev_priv)
if (dev_priv->sagv_status == I915_SAGV_ENABLED)
return 0;
- DRM_DEBUG_KMS("Enabling SAGV\n");
+ drm_dbg_kms(&dev_priv->drm, "Enabling SAGV\n");
ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
GEN9_SAGV_ENABLE);
@@ -3724,11 +3752,11 @@ intel_enable_sagv(struct drm_i915_private *dev_priv)
* don't actually have SAGV.
*/
if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
- DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
+ drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
return 0;
} else if (ret < 0) {
- DRM_ERROR("Failed to enable SAGV\n");
+ drm_err(&dev_priv->drm, "Failed to enable SAGV\n");
return ret;
}
@@ -3747,7 +3775,7 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
if (dev_priv->sagv_status == I915_SAGV_DISABLED)
return 0;
- DRM_DEBUG_KMS("Disabling SAGV\n");
+ drm_dbg_kms(&dev_priv->drm, "Disabling SAGV\n");
/* bspec says to keep retrying for at least 1 ms */
ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
GEN9_SAGV_DISABLE,
@@ -3758,11 +3786,11 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
* don't actually have SAGV.
*/
if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
- DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
+ drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
return 0;
} else if (ret < 0) {
- DRM_ERROR("Failed to disable SAGV (%d)\n", ret);
+ drm_err(&dev_priv->drm, "Failed to disable SAGV (%d)\n", ret);
return ret;
}
@@ -4331,9 +4359,10 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
}
if (level < 0) {
- DRM_DEBUG_KMS("Requested display configuration exceeds system DDB limitations");
- DRM_DEBUG_KMS("minimum required %d/%d\n", blocks,
- alloc_size);
+ drm_dbg_kms(&dev_priv->drm,
+ "Requested display configuration exceeds system DDB limitations");
+ drm_dbg_kms(&dev_priv->drm, "minimum required %d/%d\n",
+ blocks, alloc_size);
return -EINVAL;
}
@@ -4561,7 +4590,8 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
/* only planar format has two planes */
if (color_plane == 1 &&
!intel_format_info_is_yuv_semiplanar(format, modifier)) {
- DRM_DEBUG_KMS("Non planar format have single plane\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Non planar format have single plane\n");
return -EINVAL;
}
@@ -5260,10 +5290,11 @@ skl_print_wm_changes(struct intel_atomic_state *state)
if (skl_ddb_entry_equal(old, new))
continue;
- DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
- plane->base.base.id, plane->base.name,
- old->start, old->end, new->start, new->end,
- skl_ddb_entry_size(old), skl_ddb_entry_size(new));
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
+ plane->base.base.id, plane->base.name,
+ old->start, old->end, new->start, new->end,
+ skl_ddb_entry_size(old), skl_ddb_entry_size(new));
}
for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
@@ -5276,70 +5307,74 @@ skl_print_wm_changes(struct intel_atomic_state *state)
if (skl_plane_wm_equals(dev_priv, old_wm, new_wm))
continue;
- DRM_DEBUG_KMS("[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm"
- " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm\n",
- plane->base.base.id, plane->base.name,
- enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en),
- enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en),
- enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en),
- enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en),
- enast(old_wm->trans_wm.plane_en),
- enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en),
- enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en),
- enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en),
- enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en),
- enast(new_wm->trans_wm.plane_en));
-
- DRM_DEBUG_KMS("[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d"
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm"
+ " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm\n",
+ plane->base.base.id, plane->base.name,
+ enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en),
+ enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en),
+ enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en),
+ enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en),
+ enast(old_wm->trans_wm.plane_en),
+ enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en),
+ enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en),
+ enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en),
+ enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en),
+ enast(new_wm->trans_wm.plane_en));
+
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d"
" -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n",
- plane->base.base.id, plane->base.name,
- enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l,
- enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l,
- enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l,
- enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l,
- enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l,
- enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l,
- enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l,
- enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l,
- enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.plane_res_l,
-
- enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l,
- enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l,
- enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].plane_res_l,
- enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].plane_res_l,
- enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].plane_res_l,
- enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].plane_res_l,
- enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l,
- enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l,
- enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l);
-
- DRM_DEBUG_KMS("[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
- " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
- plane->base.base.id, plane->base.name,
- old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b,
- old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b,
- old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b,
- old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b,
- old_wm->trans_wm.plane_res_b,
- new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b,
- new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b,
- new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b,
- new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b,
- new_wm->trans_wm.plane_res_b);
-
- DRM_DEBUG_KMS("[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
- " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
- plane->base.base.id, plane->base.name,
- old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
- old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
- old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
- old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
- old_wm->trans_wm.min_ddb_alloc,
- new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
- new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
- new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
- new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
- new_wm->trans_wm.min_ddb_alloc);
+ plane->base.base.id, plane->base.name,
+ enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l,
+ enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l,
+ enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l,
+ enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l,
+ enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l,
+ enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l,
+ enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l,
+ enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l,
+ enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.plane_res_l,
+
+ enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l,
+ enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l,
+ enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].plane_res_l,
+ enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].plane_res_l,
+ enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].plane_res_l,
+ enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].plane_res_l,
+ enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l,
+ enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l,
+ enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
+ " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
+ plane->base.base.id, plane->base.name,
+ old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b,
+ old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b,
+ old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b,
+ old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b,
+ old_wm->trans_wm.plane_res_b,
+ new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b,
+ new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b,
+ new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b,
+ new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b,
+ new_wm->trans_wm.plane_res_b);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
+ " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
+ plane->base.base.id, plane->base.name,
+ old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
+ old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
+ old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
+ old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
+ old_wm->trans_wm.min_ddb_alloc,
+ new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
+ new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
+ new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
+ new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
+ new_wm->trans_wm.min_ddb_alloc);
}
}
}
@@ -5931,19 +5966,22 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
crtc_state->wm.g4x.optimal = *active;
crtc_state->wm.g4x.intermediate = *active;
- DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
- pipe_name(pipe),
- wm->pipe[pipe].plane[PLANE_PRIMARY],
- wm->pipe[pipe].plane[PLANE_CURSOR],
- wm->pipe[pipe].plane[PLANE_SPRITE0]);
+ drm_dbg_kms(&dev_priv->drm,
+ "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
+ pipe_name(pipe),
+ wm->pipe[pipe].plane[PLANE_PRIMARY],
+ wm->pipe[pipe].plane[PLANE_CURSOR],
+ wm->pipe[pipe].plane[PLANE_SPRITE0]);
}
- DRM_DEBUG_KMS("Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
- wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
- DRM_DEBUG_KMS("Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
- wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
- DRM_DEBUG_KMS("Initial SR=%s HPLL=%s FBC=%s\n",
- yesno(wm->cxsr), yesno(wm->hpll_en), yesno(wm->fbc_en));
+ drm_dbg_kms(&dev_priv->drm,
+ "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
+ wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
+ drm_dbg_kms(&dev_priv->drm,
+ "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
+ wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
+ drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
+ yesno(wm->cxsr), yesno(wm->hpll_en), yesno(wm->fbc_en));
}
void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
@@ -6035,8 +6073,9 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
- DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
- "assuming DDR DVFS is disabled\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Punit not acking DDR DVFS request, "
+ "assuming DDR DVFS is disabled\n");
dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
} else {
val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
@@ -6087,16 +6126,18 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
crtc_state->wm.vlv.optimal = *active;
crtc_state->wm.vlv.intermediate = *active;
- DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
- pipe_name(pipe),
- wm->pipe[pipe].plane[PLANE_PRIMARY],
- wm->pipe[pipe].plane[PLANE_CURSOR],
- wm->pipe[pipe].plane[PLANE_SPRITE0],
- wm->pipe[pipe].plane[PLANE_SPRITE1]);
+ drm_dbg_kms(&dev_priv->drm,
+ "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
+ pipe_name(pipe),
+ wm->pipe[pipe].plane[PLANE_PRIMARY],
+ wm->pipe[pipe].plane[PLANE_CURSOR],
+ wm->pipe[pipe].plane[PLANE_SPRITE0],
+ wm->pipe[pipe].plane[PLANE_SPRITE1]);
}
- DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
- wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
+ drm_dbg_kms(&dev_priv->drm,
+ "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
+ wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
}
void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
@@ -6412,8 +6453,9 @@ static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
tmp = I915_READ(MCH_SSKPD);
if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
- DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
- tmp);
+ drm_dbg_kms(&dev_priv->drm,
+ "Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
+ tmp);
}
static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -6590,6 +6632,17 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
/* WaEnable32PlaneMode:icl */
I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
_MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE));
+
+ /*
+ * Wa_1408615072:icl,ehl (vsunit)
+ * Wa_1407596294:icl,ehl (hsunit)
+ */
+ intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE,
+ 0, VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
+
+ /* Wa_1407352427:icl,ehl */
+ intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2,
+ 0, PSDUNIT_CLKGATE_DIS);
}
static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -6597,6 +6650,10 @@ static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
u32 vd_pg_enable = 0;
unsigned int i;
+ /* Wa_1408615072:tgl */
+ intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2,
+ 0, VSUNIT_CLKGATE_DIS_TGL);
+
/* This is not a WA. Enable VD HCP & MFX_ENC powergate */
for (i = 0; i < I915_MAX_VCS; i++) {
if (HAS_ENGINE(dev_priv, _VCS(i)))
@@ -7113,7 +7170,8 @@ void intel_suspend_hw(struct drm_i915_private *dev_priv)
static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
{
- DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "No clock gating settings or workarounds applied.\n");
}
/**
@@ -7180,9 +7238,9 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
{
/* For cxsr */
if (IS_PINEVIEW(dev_priv))
- i915_pineview_get_mem_freq(dev_priv);
+ pnv_get_mem_freq(dev_priv);
else if (IS_GEN(dev_priv, 5))
- i915_ironlake_get_mem_freq(dev_priv);
+ ilk_get_mem_freq(dev_priv);
if (intel_has_sagv(dev_priv))
skl_setup_sagv_block_time(dev_priv);
@@ -7208,8 +7266,9 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
dev_priv->display.optimize_watermarks =
ilk_optimize_watermarks;
} else {
- DRM_DEBUG_KMS("Failed to read display plane latency. "
- "Disable CxSR\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to read display plane latency. "
+ "Disable CxSR\n");
}
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
vlv_setup_wm_latency(dev_priv);
@@ -7229,7 +7288,8 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
dev_priv->is_ddr3,
dev_priv->fsb_freq,
dev_priv->mem_freq)) {
- DRM_INFO("failed to find known CxSR latency "
+ drm_info(&dev_priv->drm,
+ "failed to find known CxSR latency "
"(found ddr%s fsb freq %d, mem freq %d), "
"disabling CxSR\n",
(dev_priv->is_ddr3 == 1) ? "3" : "2",
@@ -7238,7 +7298,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
intel_set_memory_cxsr(dev_priv, false);
dev_priv->display.update_wm = NULL;
} else
- dev_priv->display.update_wm = pineview_update_wm;
+ dev_priv->display.update_wm = pnv_update_wm;
} else if (IS_GEN(dev_priv, 4)) {
dev_priv->display.update_wm = i965_update_wm;
} else if (IS_GEN(dev_priv, 3)) {
@@ -7253,7 +7313,8 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
dev_priv->display.get_fifo_size = i830_get_fifo_size;
}
} else {
- DRM_ERROR("unexpected fall-through in intel_init_pm\n");
+ drm_err(&dev_priv->drm,
+ "unexpected fall-through in %s\n", __func__);
}
}
diff --git a/drivers/gpu/drm/i915/intel_region_lmem.c b/drivers/gpu/drm/i915/intel_region_lmem.c
index eddb392917aa..14b59b899c9b 100644
--- a/drivers/gpu/drm/i915/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/intel_region_lmem.c
@@ -90,6 +90,8 @@ region_lmem_init(struct intel_memory_region *mem)
if (ret)
io_mapping_fini(&mem->iomap);
+ intel_memory_region_set_name(mem, "local");
+
return ret;
}
@@ -123,10 +125,12 @@ intel_setup_fake_lmem(struct drm_i915_private *i915)
io_start,
&intel_region_lmem_ops);
if (!IS_ERR(mem)) {
- DRM_INFO("Intel graphics fake LMEM: %pR\n", &mem->region);
- DRM_INFO("Intel graphics fake LMEM IO start: %llx\n",
- (u64)mem->io_start);
- DRM_INFO("Intel graphics fake LMEM size: %llx\n",
+ drm_info(&i915->drm, "Intel graphics fake LMEM: %pR\n",
+ &mem->region);
+ drm_info(&i915->drm,
+ "Intel graphics fake LMEM IO start: %llx\n",
+ (u64)mem->io_start);
+ drm_info(&i915->drm, "Intel graphics fake LMEM size: %llx\n",
(u64)resource_size(&mem->region));
}
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index e06b35b844a0..cbfb7171d62d 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -105,8 +105,8 @@ static int vlv_sideband_rw(struct drm_i915_private *i915,
if (intel_wait_for_register(uncore,
VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
5)) {
- DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n",
- is_read ? "read" : "write");
+ drm_dbg(&i915->drm, "IOSF sideband idle wait (%s) timed out\n",
+ is_read ? "read" : "write");
return -EAGAIN;
}
@@ -129,8 +129,8 @@ static int vlv_sideband_rw(struct drm_i915_private *i915,
*val = intel_uncore_read_fw(uncore, VLV_IOSF_DATA);
err = 0;
} else {
- DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n",
- is_read ? "read" : "write");
+ drm_dbg(&i915->drm, "IOSF sideband finish wait (%s) timed out\n",
+ is_read ? "read" : "write");
err = -ETIMEDOUT;
}
@@ -283,7 +283,8 @@ static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg,
if (intel_wait_for_register_fw(uncore,
SBI_CTL_STAT, SBI_BUSY, 0,
100)) {
- DRM_ERROR("timeout waiting for SBI to become ready\n");
+ drm_err(&i915->drm,
+ "timeout waiting for SBI to become ready\n");
return -EBUSY;
}
@@ -301,12 +302,13 @@ static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg,
if (__intel_wait_for_register_fw(uncore,
SBI_CTL_STAT, SBI_BUSY, 0,
100, 100, &cmd)) {
- DRM_ERROR("timeout waiting for SBI to complete read\n");
+ drm_err(&i915->drm,
+ "timeout waiting for SBI to complete read\n");
return -ETIMEDOUT;
}
if (cmd & SBI_RESPONSE_FAIL) {
- DRM_ERROR("error during SBI read of reg %x\n", reg);
+ drm_err(&i915->drm, "error during SBI read of reg %x\n", reg);
return -ENXIO;
}
@@ -426,8 +428,9 @@ int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox,
mutex_unlock(&i915->sb_lock);
if (err) {
- DRM_DEBUG_DRIVER("warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
- mbox, __builtin_return_address(0), err);
+ drm_dbg(&i915->drm,
+ "warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
+ mbox, __builtin_return_address(0), err);
}
return err;
@@ -447,8 +450,9 @@ int sandybridge_pcode_write_timeout(struct drm_i915_private *i915,
mutex_unlock(&i915->sb_lock);
if (err) {
- DRM_DEBUG_DRIVER("warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
- val, mbox, __builtin_return_address(0), err);
+ drm_dbg(&i915->drm,
+ "warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
+ val, mbox, __builtin_return_address(0), err);
}
return err;
@@ -519,7 +523,8 @@ int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
* requests, and for any quirks of the PCODE firmware that delays
* the request completion.
*/
- DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
+ drm_dbg_kms(&i915->drm,
+ "PCODE timeout, retrying with preemption disabled\n");
WARN_ON_ONCE(timeout_base_ms > 3);
preempt_disable();
ret = wait_for_atomic(COND, 50);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 94a97bf8c021..5f2cf6f43b8b 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -359,7 +359,8 @@ static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
if (wait_for_atomic((n = fifo_free_entries(uncore)) >
GT_FIFO_NUM_RESERVED_ENTRIES,
GT_FIFO_TIMEOUT_MS)) {
- DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n);
+ drm_dbg(&uncore->i915->drm,
+ "GT_FIFO timeout, entries: %u\n", n);
return;
}
}
@@ -432,7 +433,7 @@ intel_uncore_forcewake_reset(struct intel_uncore *uncore)
break;
if (--retry_count == 0) {
- DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
+ drm_err(&uncore->i915->drm, "Timed out waiting for forcewake timers to finish\n");
break;
}
@@ -490,7 +491,7 @@ gen6_check_for_fifo_debug(struct intel_uncore *uncore)
fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
if (unlikely(fifodbg)) {
- DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg);
+ drm_dbg(&uncore->i915->drm, "GTFIFODBG = 0x08%x\n", fifodbg);
__raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
}
@@ -562,7 +563,7 @@ void intel_uncore_resume_early(struct intel_uncore *uncore)
unsigned int restore_forcewake;
if (intel_uncore_unclaimed_mmio(uncore))
- DRM_DEBUG("unclaimed mmio detected on resume, clearing\n");
+ drm_dbg(&uncore->i915->drm, "unclaimed mmio detected on resume, clearing\n");
if (!intel_uncore_has_forcewake(uncore))
return;
@@ -1595,8 +1596,8 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
spin_unlock_irq(&uncore->lock);
if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
- DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
- DRM_INFO("when using vblank-synced partial screen updates.\n");
+ drm_info(&i915->drm, "No MT forcewake available on Ivybridge, this can result in issues\n");
+ drm_info(&i915->drm, "when using vblank-synced partial screen updates.\n");
fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER);
fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE, FORCEWAKE_ACK);
@@ -1683,8 +1684,7 @@ static int uncore_mmio_setup(struct intel_uncore *uncore)
mmio_size = 2 * 1024 * 1024;
uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size);
if (uncore->regs == NULL) {
- DRM_ERROR("failed to map registers\n");
-
+ drm_err(&i915->drm, "failed to map registers\n");
return -EIO;
}
@@ -1807,7 +1807,7 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
/* clear out unclaimed reg detection bit */
if (intel_uncore_unclaimed_mmio(uncore))
- DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
+ drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n");
return 0;
@@ -2072,9 +2072,10 @@ intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
if (unlikely(check_for_unclaimed_mmio(uncore))) {
if (!i915_modparams.mmio_debug) {
- DRM_DEBUG("Unclaimed register detected, "
- "enabling oneshot unclaimed register reporting. "
- "Please use i915.mmio_debug=N for more information.\n");
+ drm_dbg(&uncore->i915->drm,
+ "Unclaimed register detected, "
+ "enabling oneshot unclaimed register reporting. "
+ "Please use i915.mmio_debug=N for more information.\n");
i915_modparams.mmio_debug++;
}
uncore->debug->unclaimed_mmio_check--;
diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c
index 59aa1b6f1827..8fbf6f4d3f26 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.c
+++ b/drivers/gpu/drm/i915/intel_wakeref.c
@@ -95,16 +95,17 @@ static void __intel_wakeref_put_work(struct work_struct *wrk)
void __intel_wakeref_init(struct intel_wakeref *wf,
struct intel_runtime_pm *rpm,
const struct intel_wakeref_ops *ops,
- struct lock_class_key *key)
+ struct intel_wakeref_lockclass *key)
{
wf->rpm = rpm;
wf->ops = ops;
- __mutex_init(&wf->mutex, "wakeref", key);
+ __mutex_init(&wf->mutex, "wakeref.mutex", &key->mutex);
atomic_set(&wf->count, 0);
wf->wakeref = 0;
INIT_WORK(&wf->work, __intel_wakeref_put_work);
+ lockdep_init_map(&wf->work.lockdep_map, "wakeref.work", &key->work, 0);
}
int intel_wakeref_wait_for_idle(struct intel_wakeref *wf)
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index 8d945db94b7a..7d1e676b71ef 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -44,12 +44,17 @@ struct intel_wakeref {
struct work_struct work;
};
+struct intel_wakeref_lockclass {
+ struct lock_class_key mutex;
+ struct lock_class_key work;
+};
+
void __intel_wakeref_init(struct intel_wakeref *wf,
struct intel_runtime_pm *rpm,
const struct intel_wakeref_ops *ops,
- struct lock_class_key *key);
+ struct intel_wakeref_lockclass *key);
#define intel_wakeref_init(wf, rpm, ops) do { \
- static struct lock_class_key __key; \
+ static struct intel_wakeref_lockclass __key; \
\
__intel_wakeref_init((wf), (rpm), (ops), &__key); \
} while (0)
diff --git a/drivers/gpu/drm/i915/oa/Makefile b/drivers/gpu/drm/i915/oa/Makefile
deleted file mode 100644
index df028e2b0d64..000000000000
--- a/drivers/gpu/drm/i915/oa/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-# For building individual subdir files on the command line
-subdir-ccflags-y += -I$(srctree)/$(src)/..
-
-# Extra header tests
-header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index b37fc53973cc..78f36faf2bbe 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -124,8 +124,6 @@ static void pm_resume(struct drm_i915_private *i915)
* that runtime-pm just works.
*/
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- intel_gt_sanitize(&i915->gt, false);
-
i915_gem_restore_gtt_mappings(i915);
i915_gem_restore_fences(&i915->ggtt);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 80cde5bda922..b342bef5e7c9 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -34,6 +34,7 @@
#include "mock_drm.h"
#include "mock_gem_device.h"
+#include "mock_gtt.h"
#include "igt_flush_test.h"
static void cleanup_freed_objects(struct drm_i915_private *i915)
@@ -151,7 +152,7 @@ static int igt_ppgtt_alloc(void *arg)
if (!HAS_PPGTT(dev_priv))
return 0;
- ppgtt = __ppgtt_create(dev_priv);
+ ppgtt = i915_ppgtt_create(&dev_priv->gt);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
@@ -206,8 +207,7 @@ err_ppgtt_cleanup:
return err;
}
-static int lowlevel_hole(struct drm_i915_private *i915,
- struct i915_address_space *vm,
+static int lowlevel_hole(struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
unsigned long end_time)
{
@@ -256,7 +256,7 @@ static int lowlevel_hole(struct drm_i915_private *i915,
* memory. We expect to hit -ENOMEM.
*/
- obj = fake_dma_object(i915, BIT_ULL(size));
+ obj = fake_dma_object(vm->i915, BIT_ULL(size));
if (IS_ERR(obj)) {
kfree(order);
break;
@@ -291,7 +291,7 @@ static int lowlevel_hole(struct drm_i915_private *i915,
mock_vma->node.size = BIT_ULL(size);
mock_vma->node.start = addr;
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
vm->insert_entries(vm, mock_vma,
I915_CACHE_NONE, 0);
}
@@ -303,7 +303,7 @@ static int lowlevel_hole(struct drm_i915_private *i915,
intel_wakeref_t wakeref;
GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
vm->clear_range(vm, addr, BIT_ULL(size));
}
@@ -312,7 +312,7 @@ static int lowlevel_hole(struct drm_i915_private *i915,
kfree(order);
- cleanup_freed_objects(i915);
+ cleanup_freed_objects(vm->i915);
}
kfree(mock_vma);
@@ -340,8 +340,7 @@ static void close_object_list(struct list_head *objects,
}
}
-static int fill_hole(struct drm_i915_private *i915,
- struct i915_address_space *vm,
+static int fill_hole(struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
unsigned long end_time)
{
@@ -374,7 +373,7 @@ static int fill_hole(struct drm_i915_private *i915,
{ }
}, *p;
- obj = fake_dma_object(i915, full_size);
+ obj = fake_dma_object(vm->i915, full_size);
if (IS_ERR(obj))
break;
@@ -542,7 +541,7 @@ static int fill_hole(struct drm_i915_private *i915,
}
close_object_list(&objects, vm);
- cleanup_freed_objects(i915);
+ cleanup_freed_objects(vm->i915);
}
return 0;
@@ -552,8 +551,7 @@ err:
return err;
}
-static int walk_hole(struct drm_i915_private *i915,
- struct i915_address_space *vm,
+static int walk_hole(struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
unsigned long end_time)
{
@@ -575,7 +573,7 @@ static int walk_hole(struct drm_i915_private *i915,
u64 addr;
int err = 0;
- obj = fake_dma_object(i915, size << PAGE_SHIFT);
+ obj = fake_dma_object(vm->i915, size << PAGE_SHIFT);
if (IS_ERR(obj))
break;
@@ -630,14 +628,13 @@ err_put:
if (err)
return err;
- cleanup_freed_objects(i915);
+ cleanup_freed_objects(vm->i915);
}
return 0;
}
-static int pot_hole(struct drm_i915_private *i915,
- struct i915_address_space *vm,
+static int pot_hole(struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
unsigned long end_time)
{
@@ -651,7 +648,7 @@ static int pot_hole(struct drm_i915_private *i915,
if (i915_is_ggtt(vm))
flags |= PIN_GLOBAL;
- obj = i915_gem_object_create_internal(i915, 2 * I915_GTT_PAGE_SIZE);
+ obj = i915_gem_object_create_internal(vm->i915, 2 * I915_GTT_PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -712,8 +709,7 @@ err_obj:
return err;
}
-static int drunk_hole(struct drm_i915_private *i915,
- struct i915_address_space *vm,
+static int drunk_hole(struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
unsigned long end_time)
{
@@ -758,7 +754,7 @@ static int drunk_hole(struct drm_i915_private *i915,
* memory. We expect to hit -ENOMEM.
*/
- obj = fake_dma_object(i915, BIT_ULL(size));
+ obj = fake_dma_object(vm->i915, BIT_ULL(size));
if (IS_ERR(obj)) {
kfree(order);
break;
@@ -816,14 +812,13 @@ err_obj:
if (err)
return err;
- cleanup_freed_objects(i915);
+ cleanup_freed_objects(vm->i915);
}
return 0;
}
-static int __shrink_hole(struct drm_i915_private *i915,
- struct i915_address_space *vm,
+static int __shrink_hole(struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
unsigned long end_time)
{
@@ -840,7 +835,7 @@ static int __shrink_hole(struct drm_i915_private *i915,
u64 size = BIT_ULL(order++);
size = min(size, hole_end - addr);
- obj = fake_dma_object(i915, size);
+ obj = fake_dma_object(vm->i915, size);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
break;
@@ -894,12 +889,11 @@ static int __shrink_hole(struct drm_i915_private *i915,
}
close_object_list(&objects, vm);
- cleanup_freed_objects(i915);
+ cleanup_freed_objects(vm->i915);
return err;
}
-static int shrink_hole(struct drm_i915_private *i915,
- struct i915_address_space *vm,
+static int shrink_hole(struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
unsigned long end_time)
{
@@ -911,7 +905,7 @@ static int shrink_hole(struct drm_i915_private *i915,
for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
vm->fault_attr.interval = prime;
- err = __shrink_hole(i915, vm, hole_start, hole_end, end_time);
+ err = __shrink_hole(vm, hole_start, hole_end, end_time);
if (err)
break;
}
@@ -921,8 +915,7 @@ static int shrink_hole(struct drm_i915_private *i915,
return err;
}
-static int shrink_boom(struct drm_i915_private *i915,
- struct i915_address_space *vm,
+static int shrink_boom(struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
unsigned long end_time)
{
@@ -944,7 +937,7 @@ static int shrink_boom(struct drm_i915_private *i915,
unsigned int size = sizes[i];
struct i915_vma *vma;
- purge = fake_dma_object(i915, size);
+ purge = fake_dma_object(vm->i915, size);
if (IS_ERR(purge))
return PTR_ERR(purge);
@@ -961,7 +954,7 @@ static int shrink_boom(struct drm_i915_private *i915,
/* Should now be ripe for purging */
i915_vma_unpin(vma);
- explode = fake_dma_object(i915, size);
+ explode = fake_dma_object(vm->i915, size);
if (IS_ERR(explode)) {
err = PTR_ERR(explode);
goto err_purge;
@@ -987,7 +980,7 @@ static int shrink_boom(struct drm_i915_private *i915,
i915_gem_object_put(explode);
memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
- cleanup_freed_objects(i915);
+ cleanup_freed_objects(vm->i915);
}
return 0;
@@ -1001,8 +994,7 @@ err_purge:
}
static int exercise_ppgtt(struct drm_i915_private *dev_priv,
- int (*func)(struct drm_i915_private *i915,
- struct i915_address_space *vm,
+ int (*func)(struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
unsigned long end_time))
{
@@ -1018,7 +1010,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
if (IS_ERR(file))
return PTR_ERR(file);
- ppgtt = i915_ppgtt_create(dev_priv);
+ ppgtt = i915_ppgtt_create(&dev_priv->gt);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
goto out_free;
@@ -1026,7 +1018,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
GEM_BUG_ON(!atomic_read(&ppgtt->vm.open));
- err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
+ err = func(&ppgtt->vm, 0, ppgtt->vm.total, end_time);
i915_vm_put(&ppgtt->vm);
@@ -1082,8 +1074,7 @@ static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
}
static int exercise_ggtt(struct drm_i915_private *i915,
- int (*func)(struct drm_i915_private *i915,
- struct i915_address_space *vm,
+ int (*func)(struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
unsigned long end_time))
{
@@ -1105,7 +1096,7 @@ restart:
if (hole_start >= hole_end)
continue;
- err = func(i915, &ggtt->vm, hole_start, hole_end, end_time);
+ err = func(&ggtt->vm, hole_start, hole_end, end_time);
if (err)
break;
@@ -1252,8 +1243,7 @@ static void track_vma_bind(struct i915_vma *vma)
}
static int exercise_mock(struct drm_i915_private *i915,
- int (*func)(struct drm_i915_private *i915,
- struct i915_address_space *vm,
+ int (*func)(struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
unsigned long end_time))
{
@@ -1268,7 +1258,7 @@ static int exercise_mock(struct drm_i915_private *i915,
return -ENOMEM;
vm = i915_gem_context_get_vm_rcu(ctx);
- err = func(i915, vm, 0, min(vm->total, limit), end_time);
+ err = func(vm, 0, min(vm->total, limit), end_time);
i915_vm_put(vm);
mock_context_close(ctx);
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 476fba2ed8bb..34138c7bdd15 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -1,5 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* List each unit test as selftest(name, function)
+
+#ifndef selftest
+#define selftest(x, y)
+#endif
+
+/*
+ * List each unit test as selftest(name, function)
*
* The name is used as both an enum and expanded as subtest__name to create
* a module parameter. It must be unique and legal for a C identifier.
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index aa5a0e7f5d9e..5b39bab4da1d 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -1,5 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* List each unit test as selftest(name, function)
+
+#ifndef selftest
+#define selftest(x, y)
+#endif
+
+/*
+ * List each unit test as selftest(name, function)
*
* The name is used as both an enum and expanded as subtest__name to create
* a module parameter. It must be unique and legal for a C identifier.
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
index f7129a243daa..5a577a1332f5 100644
--- a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
@@ -1,5 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* List each unit test as selftest(name, function)
+
+#ifndef selftest
+#define selftest(x, y)
+#endif
+
+/*
+ * List each unit test as selftest(name, function)
*
* The name is used as both an enum and expanded as subtest__name to create
* a module parameter. It must be unique and legal for a C identifier.
diff --git a/drivers/gpu/drm/i915/selftests/i915_random.h b/drivers/gpu/drm/i915/selftests/i915_random.h
index 35cc69a3a1b9..05364eca20f7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_random.h
+++ b/drivers/gpu/drm/i915/selftests/i915_random.h
@@ -25,6 +25,7 @@
#ifndef __I915_SELFTESTS_RANDOM_H__
#define __I915_SELFTESTS_RANDOM_H__
+#include <linux/math64.h>
#include <linux/random.h>
#include "../i915_selftest.h"
diff --git a/drivers/gpu/drm/i915/selftests/igt_atomic.c b/drivers/gpu/drm/i915/selftests/igt_atomic.c
new file mode 100644
index 000000000000..fb506b699095
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_atomic.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/preempt.h>
+#include <linux/bottom_half.h>
+#include <linux/irqflags.h>
+
+#include "igt_atomic.h"
+
+static void __preempt_begin(void)
+{
+ preempt_disable();
+}
+
+static void __preempt_end(void)
+{
+ preempt_enable();
+}
+
+static void __softirq_begin(void)
+{
+ local_bh_disable();
+}
+
+static void __softirq_end(void)
+{
+ local_bh_enable();
+}
+
+static void __hardirq_begin(void)
+{
+ local_irq_disable();
+}
+
+static void __hardirq_end(void)
+{
+ local_irq_enable();
+}
+
+const struct igt_atomic_section igt_atomic_phases[] = {
+ { "preempt", __preempt_begin, __preempt_end },
+ { "softirq", __softirq_begin, __softirq_end },
+ { "hardirq", __hardirq_begin, __hardirq_end },
+ { }
+};
diff --git a/drivers/gpu/drm/i915/selftests/igt_atomic.h b/drivers/gpu/drm/i915/selftests/igt_atomic.h
index 93ec89f487ec..1991798abf4b 100644
--- a/drivers/gpu/drm/i915/selftests/igt_atomic.h
+++ b/drivers/gpu/drm/i915/selftests/igt_atomic.h
@@ -6,51 +6,12 @@
#ifndef IGT_ATOMIC_H
#define IGT_ATOMIC_H
-#include <linux/preempt.h>
-#include <linux/bottom_half.h>
-#include <linux/irqflags.h>
-
-static void __preempt_begin(void)
-{
- preempt_disable();
-}
-
-static void __preempt_end(void)
-{
- preempt_enable();
-}
-
-static void __softirq_begin(void)
-{
- local_bh_disable();
-}
-
-static void __softirq_end(void)
-{
- local_bh_enable();
-}
-
-static void __hardirq_begin(void)
-{
- local_irq_disable();
-}
-
-static void __hardirq_end(void)
-{
- local_irq_enable();
-}
-
struct igt_atomic_section {
const char *name;
void (*critical_section_begin)(void);
void (*critical_section_end)(void);
};
-static const struct igt_atomic_section igt_atomic_phases[] = {
- { "preempt", __preempt_begin, __preempt_end },
- { "softirq", __softirq_begin, __softirq_end },
- { "hardirq", __hardirq_begin, __hardirq_end },
- { }
-};
+extern const struct igt_atomic_section igt_atomic_phases[];
#endif /* IGT_ATOMIC_H */
diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.h b/drivers/gpu/drm/i915/selftests/igt_live_test.h
index c0e9f99d50de..36ed42736c52 100644
--- a/drivers/gpu/drm/i915/selftests/igt_live_test.h
+++ b/drivers/gpu/drm/i915/selftests/igt_live_test.h
@@ -7,7 +7,7 @@
#ifndef IGT_LIVE_TEST_H
#define IGT_LIVE_TEST_H
-#include "../i915_gem.h"
+#include "gt/intel_engine.h" /* for I915_NUM_ENGINES */
struct drm_i915_private;
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 04d0aa7b349e..3ef3620e0da5 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -270,36 +270,31 @@ static int igt_gpu_write_dw(struct intel_context *ce,
static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
{
- unsigned long n;
+ unsigned long n = obj->base.size >> PAGE_SHIFT;
+ u32 *ptr;
int err;
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_wc_domain(obj, false);
- i915_gem_object_unlock(obj);
- if (err)
- return err;
-
- err = i915_gem_object_pin_pages(obj);
+ err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
if (err)
return err;
- for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
- u32 __iomem *base;
- u32 read_val;
-
- base = i915_gem_object_lmem_io_map_page_atomic(obj, n);
+ ptr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
- read_val = ioread32(base + dword);
- io_mapping_unmap_atomic(base);
- if (read_val != val) {
- pr_err("n=%lu base[%u]=%u, val=%u\n",
- n, dword, read_val, val);
+ ptr += dword;
+ while (n--) {
+ if (*ptr != val) {
+ pr_err("base[%u]=%08x, val=%08x\n",
+ dword, *ptr, val);
err = -EINVAL;
break;
}
+
+ ptr += PAGE_SIZE / sizeof(*ptr);
}
- i915_gem_object_unpin_pages(obj);
+ i915_gem_object_unpin_map(obj);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index ac641f5360e1..3b8986983afc 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -58,6 +58,8 @@ static void mock_device_release(struct drm_device *dev)
mock_device_flush(i915);
intel_gt_driver_remove(&i915->gt);
+ i915_gem_driver_release__contexts(i915);
+
i915_gem_drain_workqueue(i915);
i915_gem_drain_freed_objects(i915);
@@ -184,6 +186,7 @@ struct drm_i915_private *mock_gem_device(void)
if (mock_engine_init(i915->engine[RCS0]))
goto err_context;
+ __clear_bit(I915_WEDGED, &i915->gt.reset.flags);
intel_engines_driver_register(i915);
return i915;
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index 20ac3844edec..edc5e3dda8ca 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -55,6 +55,11 @@ static void mock_cleanup(struct i915_address_space *vm)
{
}
+static void mock_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+}
+
struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name)
{
struct i915_ppgtt *ppgtt;
@@ -70,7 +75,7 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name)
i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
- ppgtt->vm.clear_range = nop_clear_range;
+ ppgtt->vm.clear_range = mock_clear_range;
ppgtt->vm.insert_page = mock_insert_page;
ppgtt->vm.insert_entries = mock_insert_entries;
ppgtt->vm.cleanup = mock_cleanup;
@@ -107,7 +112,7 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt)
ggtt->mappable_end = resource_size(&ggtt->gmadr);
ggtt->vm.total = 4096 * PAGE_SIZE;
- ggtt->vm.clear_range = nop_clear_range;
+ ggtt->vm.clear_range = mock_clear_range;
ggtt->vm.insert_page = mock_insert_page;
ggtt->vm.insert_entries = mock_insert_entries;
ggtt->vm.cleanup = mock_cleanup;
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index 5f9c54475799..3886999b4533 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -159,9 +159,10 @@ int lima_sched_context_init(struct lima_sched_pipe *pipe,
struct lima_sched_context *context,
atomic_t *guilty)
{
- struct drm_sched_rq *rq = pipe->base.sched_rq + DRM_SCHED_PRIORITY_NORMAL;
+ struct drm_gpu_scheduler *sched = &pipe->base;
- return drm_sched_entity_init(&context->base, &rq, 1, guilty);
+ return drm_sched_entity_init(&context->base, DRM_SCHED_PRIORITY_NORMAL,
+ &sched, 1, guilty);
}
void lima_sched_context_fini(struct lima_sched_pipe *pipe,
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
index 5044dfb8e3d6..b7a82ed5788f 100644
--- a/drivers/gpu/drm/mediatek/Makefile
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -20,7 +20,7 @@ obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o
mediatek-drm-hdmi-objs := mtk_cec.o \
mtk_hdmi.o \
mtk_hdmi_ddc.o \
- mtk_mt2701_hdmi_phy.o \
+ mtk_mt2701_hdmi_phy.o \
mtk_mt8173_hdmi_phy.o \
mtk_hdmi_phy.o
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c
index 59de2a46aa49..6fb0d6983a4a 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_color.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c
@@ -9,6 +9,7 @@
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
@@ -45,12 +46,12 @@ static inline struct mtk_disp_color *comp_to_color(struct mtk_ddp_comp *comp)
static void mtk_color_config(struct mtk_ddp_comp *comp, unsigned int w,
unsigned int h, unsigned int vrefresh,
- unsigned int bpc)
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
struct mtk_disp_color *color = comp_to_color(comp);
- writel(w, comp->regs + DISP_COLOR_WIDTH(color));
- writel(h, comp->regs + DISP_COLOR_HEIGHT(color));
+ mtk_ddp_write(cmdq_pkt, w, comp, DISP_COLOR_WIDTH(color));
+ mtk_ddp_write(cmdq_pkt, h, comp, DISP_COLOR_HEIGHT(color));
}
static void mtk_color_start(struct mtk_ddp_comp *comp)
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 4a55bb6e2213..891d80c73e04 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -11,6 +11,7 @@
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
@@ -124,14 +125,15 @@ static void mtk_ovl_stop(struct mtk_ddp_comp *comp)
static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w,
unsigned int h, unsigned int vrefresh,
- unsigned int bpc)
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
if (w != 0 && h != 0)
- writel_relaxed(h << 16 | w, comp->regs + DISP_REG_OVL_ROI_SIZE);
- writel_relaxed(0x0, comp->regs + DISP_REG_OVL_ROI_BGCLR);
+ mtk_ddp_write_relaxed(cmdq_pkt, h << 16 | w, comp,
+ DISP_REG_OVL_ROI_SIZE);
+ mtk_ddp_write_relaxed(cmdq_pkt, 0x0, comp, DISP_REG_OVL_ROI_BGCLR);
- writel(0x1, comp->regs + DISP_REG_OVL_RST);
- writel(0x0, comp->regs + DISP_REG_OVL_RST);
+ mtk_ddp_write(cmdq_pkt, 0x1, comp, DISP_REG_OVL_RST);
+ mtk_ddp_write(cmdq_pkt, 0x0, comp, DISP_REG_OVL_RST);
}
static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp)
@@ -175,16 +177,16 @@ static int mtk_ovl_layer_check(struct mtk_ddp_comp *comp, unsigned int idx,
return 0;
}
-static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx)
+static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx,
+ struct cmdq_pkt *cmdq_pkt)
{
- unsigned int reg;
unsigned int gmc_thrshd_l;
unsigned int gmc_thrshd_h;
unsigned int gmc_value;
struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
- writel(0x1, comp->regs + DISP_REG_OVL_RDMA_CTRL(idx));
-
+ mtk_ddp_write(cmdq_pkt, 0x1, comp,
+ DISP_REG_OVL_RDMA_CTRL(idx));
gmc_thrshd_l = GMC_THRESHOLD_LOW >>
(GMC_THRESHOLD_BITS - ovl->data->gmc_bits);
gmc_thrshd_h = GMC_THRESHOLD_HIGH >>
@@ -194,22 +196,19 @@ static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx)
else
gmc_value = gmc_thrshd_l | gmc_thrshd_l << 8 |
gmc_thrshd_h << 16 | gmc_thrshd_h << 24;
- writel(gmc_value, comp->regs + DISP_REG_OVL_RDMA_GMC(idx));
-
- reg = readl(comp->regs + DISP_REG_OVL_SRC_CON);
- reg = reg | BIT(idx);
- writel(reg, comp->regs + DISP_REG_OVL_SRC_CON);
+ mtk_ddp_write(cmdq_pkt, gmc_value,
+ comp, DISP_REG_OVL_RDMA_GMC(idx));
+ mtk_ddp_write_mask(cmdq_pkt, BIT(idx), comp,
+ DISP_REG_OVL_SRC_CON, BIT(idx));
}
-static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx)
+static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx,
+ struct cmdq_pkt *cmdq_pkt)
{
- unsigned int reg;
-
- reg = readl(comp->regs + DISP_REG_OVL_SRC_CON);
- reg = reg & ~BIT(idx);
- writel(reg, comp->regs + DISP_REG_OVL_SRC_CON);
-
- writel(0x0, comp->regs + DISP_REG_OVL_RDMA_CTRL(idx));
+ mtk_ddp_write_mask(cmdq_pkt, 0, comp,
+ DISP_REG_OVL_SRC_CON, BIT(idx));
+ mtk_ddp_write(cmdq_pkt, 0, comp,
+ DISP_REG_OVL_RDMA_CTRL(idx));
}
static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt)
@@ -249,7 +248,8 @@ static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt)
}
static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
- struct mtk_plane_state *state)
+ struct mtk_plane_state *state,
+ struct cmdq_pkt *cmdq_pkt)
{
struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
struct mtk_plane_pending_state *pending = &state->pending;
@@ -260,11 +260,13 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
unsigned int src_size = (pending->height << 16) | pending->width;
unsigned int con;
- if (!pending->enable)
- mtk_ovl_layer_off(comp, idx);
+ if (!pending->enable) {
+ mtk_ovl_layer_off(comp, idx, cmdq_pkt);
+ return;
+ }
con = ovl_fmt_convert(ovl, fmt);
- if (idx != 0)
+ if (state->base.fb->format->has_alpha)
con |= OVL_CON_AEN | OVL_CON_ALPHA;
if (pending->rotation & DRM_MODE_REFLECT_Y) {
@@ -277,14 +279,18 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
addr += pending->pitch - 1;
}
- writel_relaxed(con, comp->regs + DISP_REG_OVL_CON(idx));
- writel_relaxed(pitch, comp->regs + DISP_REG_OVL_PITCH(idx));
- writel_relaxed(src_size, comp->regs + DISP_REG_OVL_SRC_SIZE(idx));
- writel_relaxed(offset, comp->regs + DISP_REG_OVL_OFFSET(idx));
- writel_relaxed(addr, comp->regs + DISP_REG_OVL_ADDR(ovl, idx));
-
- if (pending->enable)
- mtk_ovl_layer_on(comp, idx);
+ mtk_ddp_write_relaxed(cmdq_pkt, con, comp,
+ DISP_REG_OVL_CON(idx));
+ mtk_ddp_write_relaxed(cmdq_pkt, pitch, comp,
+ DISP_REG_OVL_PITCH(idx));
+ mtk_ddp_write_relaxed(cmdq_pkt, src_size, comp,
+ DISP_REG_OVL_SRC_SIZE(idx));
+ mtk_ddp_write_relaxed(cmdq_pkt, offset, comp,
+ DISP_REG_OVL_OFFSET(idx));
+ mtk_ddp_write_relaxed(cmdq_pkt, addr, comp,
+ DISP_REG_OVL_ADDR(ovl, idx));
+
+ mtk_ovl_layer_on(comp, idx, cmdq_pkt);
}
static void mtk_ovl_bgclr_in_on(struct mtk_ddp_comp *comp)
@@ -313,8 +319,6 @@ static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = {
.disable_vblank = mtk_ovl_disable_vblank,
.supported_rotations = mtk_ovl_supported_rotations,
.layer_nr = mtk_ovl_layer_nr,
- .layer_on = mtk_ovl_layer_on,
- .layer_off = mtk_ovl_layer_off,
.layer_check = mtk_ovl_layer_check,
.layer_config = mtk_ovl_layer_config,
.bgclr_in_on = mtk_ovl_bgclr_in_on,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index 405afef31407..0cb848d64206 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -9,6 +9,7 @@
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
@@ -125,14 +126,16 @@ static void mtk_rdma_stop(struct mtk_ddp_comp *comp)
static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width,
unsigned int height, unsigned int vrefresh,
- unsigned int bpc)
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
unsigned int threshold;
unsigned int reg;
struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
- rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, 0xfff, width);
- rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_1, 0xfffff, height);
+ mtk_ddp_write_mask(cmdq_pkt, width, comp,
+ DISP_REG_RDMA_SIZE_CON_0, 0xfff);
+ mtk_ddp_write_mask(cmdq_pkt, height, comp,
+ DISP_REG_RDMA_SIZE_CON_1, 0xfffff);
/*
* Enable FIFO underflow since DSI and DPI can't be blocked.
@@ -144,7 +147,7 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width,
reg = RDMA_FIFO_UNDERFLOW_EN |
RDMA_FIFO_PSEUDO_SIZE(RDMA_FIFO_SIZE(rdma)) |
RDMA_OUTPUT_VALID_FIFO_THRESHOLD(threshold);
- writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON);
+ mtk_ddp_write(cmdq_pkt, reg, comp, DISP_REG_RDMA_FIFO_CON);
}
static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma,
@@ -190,7 +193,8 @@ static unsigned int mtk_rdma_layer_nr(struct mtk_ddp_comp *comp)
}
static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
- struct mtk_plane_state *state)
+ struct mtk_plane_state *state,
+ struct cmdq_pkt *cmdq_pkt)
{
struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
struct mtk_plane_pending_state *pending = &state->pending;
@@ -200,24 +204,27 @@ static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
unsigned int con;
con = rdma_fmt_convert(rdma, fmt);
- writel_relaxed(con, comp->regs + DISP_RDMA_MEM_CON);
+ mtk_ddp_write_relaxed(cmdq_pkt, con, comp, DISP_RDMA_MEM_CON);
if (fmt == DRM_FORMAT_UYVY || fmt == DRM_FORMAT_YUYV) {
- rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
- RDMA_MATRIX_ENABLE, RDMA_MATRIX_ENABLE);
- rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
- RDMA_MATRIX_INT_MTX_SEL,
- RDMA_MATRIX_INT_MTX_BT601_to_RGB);
+ mtk_ddp_write_mask(cmdq_pkt, RDMA_MATRIX_ENABLE, comp,
+ DISP_REG_RDMA_SIZE_CON_0,
+ RDMA_MATRIX_ENABLE);
+ mtk_ddp_write_mask(cmdq_pkt, RDMA_MATRIX_INT_MTX_BT601_to_RGB,
+ comp, DISP_REG_RDMA_SIZE_CON_0,
+ RDMA_MATRIX_INT_MTX_SEL);
} else {
- rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
- RDMA_MATRIX_ENABLE, 0);
+ mtk_ddp_write_mask(cmdq_pkt, 0, comp,
+ DISP_REG_RDMA_SIZE_CON_0,
+ RDMA_MATRIX_ENABLE);
}
+ mtk_ddp_write_relaxed(cmdq_pkt, addr, comp, DISP_RDMA_MEM_START_ADDR);
+ mtk_ddp_write_relaxed(cmdq_pkt, pitch, comp, DISP_RDMA_MEM_SRC_PITCH);
+ mtk_ddp_write(cmdq_pkt, RDMA_MEM_GMC, comp,
+ DISP_RDMA_MEM_GMC_SETTING_0);
+ mtk_ddp_write_mask(cmdq_pkt, RDMA_MODE_MEMORY, comp,
+ DISP_REG_RDMA_GLOBAL_CON, RDMA_MODE_MEMORY);
- writel_relaxed(addr, comp->regs + DISP_RDMA_MEM_START_ADDR);
- writel_relaxed(pitch, comp->regs + DISP_RDMA_MEM_SRC_PITCH);
- writel(RDMA_MEM_GMC, comp->regs + DISP_RDMA_MEM_GMC_SETTING_0);
- rdma_update_bits(comp, DISP_REG_RDMA_GLOBAL_CON,
- RDMA_MODE_MEMORY, RDMA_MODE_MEMORY);
}
static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = {
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index f80a8ba75977..0dfcd1787e65 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/pm_runtime.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
#include <asm/barrier.h>
#include <soc/mediatek/smi.h>
@@ -42,11 +43,20 @@ struct mtk_drm_crtc {
struct drm_plane *planes;
unsigned int layer_nr;
bool pending_planes;
+ bool pending_async_planes;
+
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ struct cmdq_client *cmdq_client;
+ u32 cmdq_event;
+#endif
void __iomem *config_regs;
struct mtk_disp_mutex *mutex;
unsigned int ddp_comp_nr;
struct mtk_ddp_comp **ddp_comp;
+
+ /* lock for display hardware access */
+ struct mutex hw_lock;
};
struct mtk_crtc_state {
@@ -215,11 +225,12 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_ddp_comp *comp;
int i, count = 0;
+ unsigned int local_index = plane - mtk_crtc->planes;
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
comp = mtk_crtc->ddp_comp[i];
- if (plane->index < (count + mtk_ddp_comp_layer_nr(comp))) {
- *local_layer = plane->index - count;
+ if (local_index < (count + mtk_ddp_comp_layer_nr(comp))) {
+ *local_layer = local_index - count;
return comp;
}
count += mtk_ddp_comp_layer_nr(comp);
@@ -229,6 +240,13 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
return NULL;
}
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+static void ddp_cmdq_cb(struct cmdq_cb_data data)
+{
+ cmdq_pkt_destroy(data.data);
+}
+#endif
+
static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
{
struct drm_crtc *crtc = &mtk_crtc->base;
@@ -297,7 +315,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
if (i == 1)
mtk_ddp_comp_bgclr_in_on(comp);
- mtk_ddp_comp_config(comp, width, height, vrefresh, bpc);
+ mtk_ddp_comp_config(comp, width, height, vrefresh, bpc, NULL);
mtk_ddp_comp_start(comp);
}
@@ -310,7 +328,9 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
plane_state = to_mtk_plane_state(plane->state);
comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
- mtk_ddp_comp_layer_config(comp, local_layer, plane_state);
+ if (comp)
+ mtk_ddp_comp_layer_config(comp, local_layer,
+ plane_state, NULL);
}
return 0;
@@ -325,6 +345,7 @@ err_pm_runtime_put:
static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
{
struct drm_device *drm = mtk_crtc->base.dev;
+ struct drm_crtc *crtc = &mtk_crtc->base;
int i;
DRM_DEBUG_DRIVER("%s\n", __func__);
@@ -350,9 +371,17 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
mtk_disp_mutex_unprepare(mtk_crtc->mutex);
pm_runtime_put(drm->dev);
+
+ if (crtc->state->event && !crtc->state->active) {
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ spin_unlock_irq(&crtc->dev->event_lock);
+ }
}
-static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
+static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
+ struct cmdq_pkt *cmdq_handle)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
@@ -368,7 +397,8 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
if (state->pending_config) {
mtk_ddp_comp_config(comp, state->pending_width,
state->pending_height,
- state->pending_vrefresh, 0);
+ state->pending_vrefresh, 0,
+ cmdq_handle);
state->pending_config = false;
}
@@ -386,12 +416,84 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
comp = mtk_drm_ddp_comp_for_plane(crtc, plane,
&local_layer);
- mtk_ddp_comp_layer_config(comp, local_layer,
- plane_state);
+ if (comp)
+ mtk_ddp_comp_layer_config(comp, local_layer,
+ plane_state,
+ cmdq_handle);
plane_state->pending.config = false;
}
mtk_crtc->pending_planes = false;
}
+
+ if (mtk_crtc->pending_async_planes) {
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
+ struct drm_plane *plane = &mtk_crtc->planes[i];
+ struct mtk_plane_state *plane_state;
+
+ plane_state = to_mtk_plane_state(plane->state);
+
+ if (!plane_state->pending.async_config)
+ continue;
+
+ comp = mtk_drm_ddp_comp_for_plane(crtc, plane,
+ &local_layer);
+
+ if (comp)
+ mtk_ddp_comp_layer_config(comp, local_layer,
+ plane_state,
+ cmdq_handle);
+ plane_state->pending.async_config = false;
+ }
+ mtk_crtc->pending_async_planes = false;
+ }
+}
+
+static void mtk_drm_crtc_hw_config(struct mtk_drm_crtc *mtk_crtc)
+{
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ struct cmdq_pkt *cmdq_handle;
+#endif
+ struct drm_crtc *crtc = &mtk_crtc->base;
+ struct mtk_drm_private *priv = crtc->dev->dev_private;
+ unsigned int pending_planes = 0, pending_async_planes = 0;
+ int i;
+
+ mutex_lock(&mtk_crtc->hw_lock);
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
+ struct drm_plane *plane = &mtk_crtc->planes[i];
+ struct mtk_plane_state *plane_state;
+
+ plane_state = to_mtk_plane_state(plane->state);
+ if (plane_state->pending.dirty) {
+ plane_state->pending.config = true;
+ plane_state->pending.dirty = false;
+ pending_planes |= BIT(i);
+ } else if (plane_state->pending.async_dirty) {
+ plane_state->pending.async_config = true;
+ plane_state->pending.async_dirty = false;
+ pending_async_planes |= BIT(i);
+ }
+ }
+ if (pending_planes)
+ mtk_crtc->pending_planes = true;
+ if (pending_async_planes)
+ mtk_crtc->pending_async_planes = true;
+
+ if (priv->data->shadow_register) {
+ mtk_disp_mutex_acquire(mtk_crtc->mutex);
+ mtk_crtc_ddp_config(crtc, NULL);
+ mtk_disp_mutex_release(mtk_crtc->mutex);
+ }
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ if (mtk_crtc->cmdq_client) {
+ cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE);
+ cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
+ cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event);
+ mtk_crtc_ddp_config(crtc, cmdq_handle);
+ cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle);
+ }
+#endif
+ mutex_unlock(&mtk_crtc->hw_lock);
}
int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
@@ -401,7 +503,23 @@ int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
struct mtk_ddp_comp *comp;
comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
- return mtk_ddp_comp_layer_check(comp, local_layer, state);
+ if (comp)
+ return mtk_ddp_comp_layer_check(comp, local_layer, state);
+ return 0;
+}
+
+void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ const struct drm_plane_helper_funcs *plane_helper_funcs =
+ plane->helper_private;
+
+ if (!mtk_crtc->enabled)
+ return;
+
+ plane_helper_funcs->atomic_update(plane, new_state);
+ mtk_drm_crtc_hw_config(mtk_crtc);
}
static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
@@ -451,6 +569,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
}
mtk_crtc->pending_planes = true;
+ mtk_drm_crtc_hw_config(mtk_crtc);
/* Wait for planes to be disabled */
drm_crtc_wait_one_vblank(crtc);
@@ -482,34 +601,16 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
- struct mtk_drm_private *priv = crtc->dev->dev_private;
- unsigned int pending_planes = 0;
int i;
if (mtk_crtc->event)
mtk_crtc->pending_needs_vblank = true;
- for (i = 0; i < mtk_crtc->layer_nr; i++) {
- struct drm_plane *plane = &mtk_crtc->planes[i];
- struct mtk_plane_state *plane_state;
-
- plane_state = to_mtk_plane_state(plane->state);
- if (plane_state->pending.dirty) {
- plane_state->pending.config = true;
- plane_state->pending.dirty = false;
- pending_planes |= BIT(i);
- }
- }
- if (pending_planes)
- mtk_crtc->pending_planes = true;
if (crtc->state->color_mgmt_changed)
- for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
+ for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state);
-
- if (priv->data->shadow_register) {
- mtk_disp_mutex_acquire(mtk_crtc->mutex);
- mtk_crtc_ddp_config(crtc);
- mtk_disp_mutex_release(mtk_crtc->mutex);
- }
+ mtk_ddp_ctm_set(mtk_crtc->ddp_comp[i], crtc->state);
+ }
+ mtk_drm_crtc_hw_config(mtk_crtc);
}
static const struct drm_crtc_funcs mtk_crtc_funcs = {
@@ -559,8 +660,12 @@ void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp)
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_drm_private *priv = crtc->dev->dev_private;
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ if (!priv->data->shadow_register && !mtk_crtc->cmdq_client)
+#else
if (!priv->data->shadow_register)
- mtk_crtc_ddp_config(crtc);
+#endif
+ mtk_crtc_ddp_config(crtc, NULL);
mtk_drm_finish_page_flip(mtk_crtc);
}
@@ -627,6 +732,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
int pipe = priv->num_pipes;
int ret;
int i;
+ bool has_ctm = false;
+ uint gamma_lut_size = 0;
if (!path)
return 0;
@@ -677,6 +784,14 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
}
mtk_crtc->ddp_comp[i] = comp;
+
+ if (comp->funcs) {
+ if (comp->funcs->gamma_set)
+ gamma_lut_size = MTK_LUT_SIZE;
+
+ if (comp->funcs->ctm_set)
+ has_ctm = true;
+ }
}
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
@@ -697,9 +812,28 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
NULL, pipe);
if (ret < 0)
return ret;
- drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE);
- drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, false, MTK_LUT_SIZE);
- priv->num_pipes++;
+ if (gamma_lut_size)
+ drm_mode_crtc_set_gamma_size(&mtk_crtc->base, gamma_lut_size);
+ drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, has_ctm, gamma_lut_size);
+ priv->num_pipes++;
+ mutex_init(&mtk_crtc->hw_lock);
+
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ mtk_crtc->cmdq_client =
+ cmdq_mbox_create(dev, drm_crtc_index(&mtk_crtc->base),
+ 2000);
+ if (IS_ERR(mtk_crtc->cmdq_client)) {
+ dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n",
+ drm_crtc_index(&mtk_crtc->base));
+ mtk_crtc->cmdq_client = NULL;
+ }
+ ret = of_property_read_u32_index(dev->of_node, "mediatek,gce-events",
+ drm_crtc_index(&mtk_crtc->base),
+ &mtk_crtc->cmdq_event);
+ if (ret)
+ dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
+ drm_crtc_index(&mtk_crtc->base));
+#endif
return 0;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
index 6afe1c19557a..a2b4677a451c 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
@@ -21,5 +21,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
unsigned int path_len);
int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
struct mtk_plane_state *state);
+void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
+ struct drm_plane_state *plane_state);
#endif /* MTK_DRM_CRTC_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 7f21307cda75..1f5a112bb034 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -12,7 +12,7 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
-
+#include <linux/soc/mediatek/mtk-cmdq.h>
#include "mtk_drm_drv.h"
#include "mtk_drm_plane.h"
#include "mtk_drm_ddp_comp.h"
@@ -37,7 +37,15 @@
#define CCORR_EN BIT(0)
#define DISP_CCORR_CFG 0x0020
#define CCORR_RELAY_MODE BIT(0)
+#define CCORR_ENGINE_EN BIT(1)
+#define CCORR_GAMMA_OFF BIT(2)
+#define CCORR_WGAMUT_SRC_CLIP BIT(3)
#define DISP_CCORR_SIZE 0x0030
+#define DISP_CCORR_COEF_0 0x0080
+#define DISP_CCORR_COEF_1 0x0084
+#define DISP_CCORR_COEF_2 0x0088
+#define DISP_CCORR_COEF_3 0x008C
+#define DISP_CCORR_COEF_4 0x0090
#define DISP_DITHER_EN 0x0000
#define DITHER_EN BIT(0)
@@ -76,36 +84,84 @@
#define DITHER_ADD_LSHIFT_G(x) (((x) & 0x7) << 4)
#define DITHER_ADD_RSHIFT_G(x) (((x) & 0x7) << 0)
+void mtk_ddp_write(struct cmdq_pkt *cmdq_pkt, unsigned int value,
+ struct mtk_ddp_comp *comp, unsigned int offset)
+{
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ if (cmdq_pkt)
+ cmdq_pkt_write(cmdq_pkt, comp->subsys,
+ comp->regs_pa + offset, value);
+ else
+#endif
+ writel(value, comp->regs + offset);
+}
+
+void mtk_ddp_write_relaxed(struct cmdq_pkt *cmdq_pkt, unsigned int value,
+ struct mtk_ddp_comp *comp,
+ unsigned int offset)
+{
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ if (cmdq_pkt)
+ cmdq_pkt_write(cmdq_pkt, comp->subsys,
+ comp->regs_pa + offset, value);
+ else
+#endif
+ writel_relaxed(value, comp->regs + offset);
+}
+
+void mtk_ddp_write_mask(struct cmdq_pkt *cmdq_pkt,
+ unsigned int value,
+ struct mtk_ddp_comp *comp,
+ unsigned int offset,
+ unsigned int mask)
+{
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ if (cmdq_pkt) {
+ cmdq_pkt_write_mask(cmdq_pkt, comp->subsys,
+ comp->regs_pa + offset, value, mask);
+ } else {
+#endif
+ u32 tmp = readl(comp->regs + offset);
+
+ tmp = (tmp & ~mask) | (value & mask);
+ writel(tmp, comp->regs + offset);
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ }
+#endif
+}
+
void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc,
- unsigned int CFG)
+ unsigned int CFG, struct cmdq_pkt *cmdq_pkt)
{
/* If bpc equal to 0, the dithering function didn't be enabled */
if (bpc == 0)
return;
if (bpc >= MTK_MIN_BPC) {
- writel(0, comp->regs + DISP_DITHER_5);
- writel(0, comp->regs + DISP_DITHER_7);
- writel(DITHER_LSB_ERR_SHIFT_R(MTK_MAX_BPC - bpc) |
- DITHER_ADD_LSHIFT_R(MTK_MAX_BPC - bpc) |
- DITHER_NEW_BIT_MODE,
- comp->regs + DISP_DITHER_15);
- writel(DITHER_LSB_ERR_SHIFT_B(MTK_MAX_BPC - bpc) |
- DITHER_ADD_LSHIFT_B(MTK_MAX_BPC - bpc) |
- DITHER_LSB_ERR_SHIFT_G(MTK_MAX_BPC - bpc) |
- DITHER_ADD_LSHIFT_G(MTK_MAX_BPC - bpc),
- comp->regs + DISP_DITHER_16);
- writel(DISP_DITHERING, comp->regs + CFG);
+ mtk_ddp_write(cmdq_pkt, 0, comp, DISP_DITHER_5);
+ mtk_ddp_write(cmdq_pkt, 0, comp, DISP_DITHER_7);
+ mtk_ddp_write(cmdq_pkt,
+ DITHER_LSB_ERR_SHIFT_R(MTK_MAX_BPC - bpc) |
+ DITHER_ADD_LSHIFT_R(MTK_MAX_BPC - bpc) |
+ DITHER_NEW_BIT_MODE,
+ comp, DISP_DITHER_15);
+ mtk_ddp_write(cmdq_pkt,
+ DITHER_LSB_ERR_SHIFT_B(MTK_MAX_BPC - bpc) |
+ DITHER_ADD_LSHIFT_B(MTK_MAX_BPC - bpc) |
+ DITHER_LSB_ERR_SHIFT_G(MTK_MAX_BPC - bpc) |
+ DITHER_ADD_LSHIFT_G(MTK_MAX_BPC - bpc),
+ comp, DISP_DITHER_16);
+ mtk_ddp_write(cmdq_pkt, DISP_DITHERING, comp, CFG);
}
}
static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w,
unsigned int h, unsigned int vrefresh,
- unsigned int bpc)
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
- writel(w << 16 | h, comp->regs + DISP_OD_SIZE);
- writel(OD_RELAYMODE, comp->regs + DISP_OD_CFG);
- mtk_dither_set(comp, bpc, DISP_OD_CFG);
+ mtk_ddp_write(cmdq_pkt, w << 16 | h, comp, DISP_OD_SIZE);
+ mtk_ddp_write(cmdq_pkt, OD_RELAYMODE, comp, DISP_OD_CFG);
+ mtk_dither_set(comp, bpc, DISP_OD_CFG, cmdq_pkt);
}
static void mtk_od_start(struct mtk_ddp_comp *comp)
@@ -120,9 +176,9 @@ static void mtk_ufoe_start(struct mtk_ddp_comp *comp)
static void mtk_aal_config(struct mtk_ddp_comp *comp, unsigned int w,
unsigned int h, unsigned int vrefresh,
- unsigned int bpc)
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
- writel(h << 16 | w, comp->regs + DISP_AAL_SIZE);
+ mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_AAL_SIZE);
}
static void mtk_aal_start(struct mtk_ddp_comp *comp)
@@ -137,10 +193,10 @@ static void mtk_aal_stop(struct mtk_ddp_comp *comp)
static void mtk_ccorr_config(struct mtk_ddp_comp *comp, unsigned int w,
unsigned int h, unsigned int vrefresh,
- unsigned int bpc)
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
- writel(h << 16 | w, comp->regs + DISP_CCORR_SIZE);
- writel(CCORR_RELAY_MODE, comp->regs + DISP_CCORR_CFG);
+ mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_CCORR_SIZE);
+ mtk_ddp_write(cmdq_pkt, CCORR_ENGINE_EN, comp, DISP_CCORR_CFG);
}
static void mtk_ccorr_start(struct mtk_ddp_comp *comp)
@@ -153,12 +209,63 @@ static void mtk_ccorr_stop(struct mtk_ddp_comp *comp)
writel_relaxed(0x0, comp->regs + DISP_CCORR_EN);
}
+/* Converts a DRM S31.32 value to the HW S1.10 format. */
+static u16 mtk_ctm_s31_32_to_s1_10(u64 in)
+{
+ u16 r;
+
+ /* Sign bit. */
+ r = in & BIT_ULL(63) ? BIT(11) : 0;
+
+ if ((in & GENMASK_ULL(62, 33)) > 0) {
+ /* identity value 0x100000000 -> 0x400, */
+ /* if bigger this, set it to max 0x7ff. */
+ r |= GENMASK(10, 0);
+ } else {
+ /* take the 11 most important bits. */
+ r |= (in >> 22) & GENMASK(10, 0);
+ }
+
+ return r;
+}
+
+static void mtk_ccorr_ctm_set(struct mtk_ddp_comp *comp,
+ struct drm_crtc_state *state)
+{
+ struct drm_property_blob *blob = state->ctm;
+ struct drm_color_ctm *ctm;
+ const u64 *input;
+ uint16_t coeffs[9] = { 0 };
+ int i;
+ struct cmdq_pkt *cmdq_pkt = NULL;
+
+ if (!blob)
+ return;
+
+ ctm = (struct drm_color_ctm *)blob->data;
+ input = ctm->matrix;
+
+ for (i = 0; i < ARRAY_SIZE(coeffs); i++)
+ coeffs[i] = mtk_ctm_s31_32_to_s1_10(input[i]);
+
+ mtk_ddp_write(cmdq_pkt, coeffs[0] << 16 | coeffs[1],
+ comp, DISP_CCORR_COEF_0);
+ mtk_ddp_write(cmdq_pkt, coeffs[2] << 16 | coeffs[3],
+ comp, DISP_CCORR_COEF_1);
+ mtk_ddp_write(cmdq_pkt, coeffs[4] << 16 | coeffs[5],
+ comp, DISP_CCORR_COEF_2);
+ mtk_ddp_write(cmdq_pkt, coeffs[6] << 16 | coeffs[7],
+ comp, DISP_CCORR_COEF_3);
+ mtk_ddp_write(cmdq_pkt, coeffs[8] << 16,
+ comp, DISP_CCORR_COEF_4);
+}
+
static void mtk_dither_config(struct mtk_ddp_comp *comp, unsigned int w,
unsigned int h, unsigned int vrefresh,
- unsigned int bpc)
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
- writel(h << 16 | w, comp->regs + DISP_DITHER_SIZE);
- writel(DITHER_RELAY_MODE, comp->regs + DISP_DITHER_CFG);
+ mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_DITHER_SIZE);
+ mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, comp, DISP_DITHER_CFG);
}
static void mtk_dither_start(struct mtk_ddp_comp *comp)
@@ -173,10 +280,10 @@ static void mtk_dither_stop(struct mtk_ddp_comp *comp)
static void mtk_gamma_config(struct mtk_ddp_comp *comp, unsigned int w,
unsigned int h, unsigned int vrefresh,
- unsigned int bpc)
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
- writel(h << 16 | w, comp->regs + DISP_GAMMA_SIZE);
- mtk_dither_set(comp, bpc, DISP_GAMMA_CFG);
+ mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_GAMMA_SIZE);
+ mtk_dither_set(comp, bpc, DISP_GAMMA_CFG, cmdq_pkt);
}
static void mtk_gamma_start(struct mtk_ddp_comp *comp)
@@ -223,6 +330,7 @@ static const struct mtk_ddp_comp_funcs ddp_ccorr = {
.config = mtk_ccorr_config,
.start = mtk_ccorr_start,
.stop = mtk_ccorr_stop,
+ .ctm_set = mtk_ccorr_ctm_set,
};
static const struct mtk_ddp_comp_funcs ddp_dither = {
@@ -326,6 +434,11 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
enum mtk_ddp_comp_type type;
struct device_node *larb_node;
struct platform_device *larb_pdev;
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ struct resource res;
+ struct cmdq_client_reg cmdq_reg;
+ int ret;
+#endif
if (comp_id < 0 || comp_id >= DDP_COMPONENT_ID_MAX)
return -EINVAL;
@@ -379,6 +492,19 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
comp->larb_dev = &larb_pdev->dev;
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ if (of_address_to_resource(node, 0, &res) != 0) {
+ dev_err(dev, "Missing reg in %s node\n", node->full_name);
+ return -EINVAL;
+ }
+ comp->regs_pa = res.start;
+
+ ret = cmdq_dev_get_client_reg(dev, &cmdq_reg, 0);
+ if (ret)
+ dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
+ else
+ comp->subsys = cmdq_reg.subsys;
+#endif
return 0;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index 2f1e9e75b8da..debe36395fe7 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -69,27 +69,29 @@ enum mtk_ddp_comp_id {
};
struct mtk_ddp_comp;
-
+struct cmdq_pkt;
struct mtk_ddp_comp_funcs {
void (*config)(struct mtk_ddp_comp *comp, unsigned int w,
- unsigned int h, unsigned int vrefresh, unsigned int bpc);
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt);
void (*start)(struct mtk_ddp_comp *comp);
void (*stop)(struct mtk_ddp_comp *comp);
void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc);
void (*disable_vblank)(struct mtk_ddp_comp *comp);
unsigned int (*supported_rotations)(struct mtk_ddp_comp *comp);
unsigned int (*layer_nr)(struct mtk_ddp_comp *comp);
- void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx);
- void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx);
int (*layer_check)(struct mtk_ddp_comp *comp,
unsigned int idx,
struct mtk_plane_state *state);
void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx,
- struct mtk_plane_state *state);
+ struct mtk_plane_state *state,
+ struct cmdq_pkt *cmdq_pkt);
void (*gamma_set)(struct mtk_ddp_comp *comp,
struct drm_crtc_state *state);
void (*bgclr_in_on)(struct mtk_ddp_comp *comp);
void (*bgclr_in_off)(struct mtk_ddp_comp *comp);
+ void (*ctm_set)(struct mtk_ddp_comp *comp,
+ struct drm_crtc_state *state);
};
struct mtk_ddp_comp {
@@ -99,14 +101,17 @@ struct mtk_ddp_comp {
struct device *larb_dev;
enum mtk_ddp_comp_id id;
const struct mtk_ddp_comp_funcs *funcs;
+ resource_size_t regs_pa;
+ u8 subsys;
};
static inline void mtk_ddp_comp_config(struct mtk_ddp_comp *comp,
unsigned int w, unsigned int h,
- unsigned int vrefresh, unsigned int bpc)
+ unsigned int vrefresh, unsigned int bpc,
+ struct cmdq_pkt *cmdq_pkt)
{
if (comp->funcs && comp->funcs->config)
- comp->funcs->config(comp, w, h, vrefresh, bpc);
+ comp->funcs->config(comp, w, h, vrefresh, bpc, cmdq_pkt);
}
static inline void mtk_ddp_comp_start(struct mtk_ddp_comp *comp)
@@ -151,20 +156,6 @@ static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp)
return 0;
}
-static inline void mtk_ddp_comp_layer_on(struct mtk_ddp_comp *comp,
- unsigned int idx)
-{
- if (comp->funcs && comp->funcs->layer_on)
- comp->funcs->layer_on(comp, idx);
-}
-
-static inline void mtk_ddp_comp_layer_off(struct mtk_ddp_comp *comp,
- unsigned int idx)
-{
- if (comp->funcs && comp->funcs->layer_off)
- comp->funcs->layer_off(comp, idx);
-}
-
static inline int mtk_ddp_comp_layer_check(struct mtk_ddp_comp *comp,
unsigned int idx,
struct mtk_plane_state *state)
@@ -176,10 +167,11 @@ static inline int mtk_ddp_comp_layer_check(struct mtk_ddp_comp *comp,
static inline void mtk_ddp_comp_layer_config(struct mtk_ddp_comp *comp,
unsigned int idx,
- struct mtk_plane_state *state)
+ struct mtk_plane_state *state,
+ struct cmdq_pkt *cmdq_pkt)
{
if (comp->funcs && comp->funcs->layer_config)
- comp->funcs->layer_config(comp, idx, state);
+ comp->funcs->layer_config(comp, idx, state, cmdq_pkt);
}
static inline void mtk_ddp_gamma_set(struct mtk_ddp_comp *comp,
@@ -201,6 +193,13 @@ static inline void mtk_ddp_comp_bgclr_in_off(struct mtk_ddp_comp *comp)
comp->funcs->bgclr_in_off(comp);
}
+static inline void mtk_ddp_ctm_set(struct mtk_ddp_comp *comp,
+ struct drm_crtc_state *state)
+{
+ if (comp->funcs && comp->funcs->ctm_set)
+ comp->funcs->ctm_set(comp, state);
+}
+
int mtk_ddp_comp_get_id(struct device_node *node,
enum mtk_ddp_comp_type comp_type);
int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node,
@@ -209,6 +208,13 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node,
int mtk_ddp_comp_register(struct drm_device *drm, struct mtk_ddp_comp *comp);
void mtk_ddp_comp_unregister(struct drm_device *drm, struct mtk_ddp_comp *comp);
void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc,
- unsigned int CFG);
-
+ unsigned int CFG, struct cmdq_pkt *cmdq_pkt);
+enum mtk_ddp_comp_type mtk_ddp_comp_get_type(enum mtk_ddp_comp_id comp_id);
+void mtk_ddp_write(struct cmdq_pkt *cmdq_pkt, unsigned int value,
+ struct mtk_ddp_comp *comp, unsigned int offset);
+void mtk_ddp_write_relaxed(struct cmdq_pkt *cmdq_pkt, unsigned int value,
+ struct mtk_ddp_comp *comp, unsigned int offset);
+void mtk_ddp_write_mask(struct cmdq_pkt *cmdq_pkt, unsigned int value,
+ struct mtk_ddp_comp *comp, unsigned int offset,
+ unsigned int mask);
#endif /* MTK_DRM_DDP_COMP_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 2b1c122066ea..0563c6813333 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -37,84 +37,9 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
-static void mtk_atomic_schedule(struct mtk_drm_private *private,
- struct drm_atomic_state *state)
-{
- private->commit.state = state;
- schedule_work(&private->commit.work);
-}
-
-static void mtk_atomic_complete(struct mtk_drm_private *private,
- struct drm_atomic_state *state)
-{
- struct drm_device *drm = private->drm;
-
- drm_atomic_helper_wait_for_fences(drm, state, false);
-
- /*
- * Mediatek drm supports runtime PM, so plane registers cannot be
- * written when their crtc is disabled.
- *
- * The comment for drm_atomic_helper_commit states:
- * For drivers supporting runtime PM the recommended sequence is
- *
- * drm_atomic_helper_commit_modeset_disables(dev, state);
- * drm_atomic_helper_commit_modeset_enables(dev, state);
- * drm_atomic_helper_commit_planes(dev, state,
- * DRM_PLANE_COMMIT_ACTIVE_ONLY);
- *
- * See the kerneldoc entries for these three functions for more details.
- */
- drm_atomic_helper_commit_modeset_disables(drm, state);
- drm_atomic_helper_commit_modeset_enables(drm, state);
- drm_atomic_helper_commit_planes(drm, state,
- DRM_PLANE_COMMIT_ACTIVE_ONLY);
-
- drm_atomic_helper_wait_for_vblanks(drm, state);
-
- drm_atomic_helper_cleanup_planes(drm, state);
- drm_atomic_state_put(state);
-}
-
-static void mtk_atomic_work(struct work_struct *work)
-{
- struct mtk_drm_private *private = container_of(work,
- struct mtk_drm_private, commit.work);
-
- mtk_atomic_complete(private, private->commit.state);
-}
-
-static int mtk_atomic_commit(struct drm_device *drm,
- struct drm_atomic_state *state,
- bool async)
-{
- struct mtk_drm_private *private = drm->dev_private;
- int ret;
-
- ret = drm_atomic_helper_prepare_planes(drm, state);
- if (ret)
- return ret;
-
- mutex_lock(&private->commit.lock);
- flush_work(&private->commit.work);
-
- ret = drm_atomic_helper_swap_state(state, true);
- if (ret) {
- mutex_unlock(&private->commit.lock);
- drm_atomic_helper_cleanup_planes(drm, state);
- return ret;
- }
-
- drm_atomic_state_get(state);
- if (async)
- mtk_atomic_schedule(private, state);
- else
- mtk_atomic_complete(private, state);
-
- mutex_unlock(&private->commit.lock);
-
- return 0;
-}
+static const struct drm_mode_config_helper_funcs mtk_drm_mode_config_helpers = {
+ .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
+};
static struct drm_framebuffer *
mtk_drm_mode_fb_create(struct drm_device *dev,
@@ -132,7 +57,7 @@ mtk_drm_mode_fb_create(struct drm_device *dev,
static const struct drm_mode_config_funcs mtk_drm_mode_config_funcs = {
.fb_create = mtk_drm_mode_fb_create,
.atomic_check = drm_atomic_helper_check,
- .atomic_commit = mtk_atomic_commit,
+ .atomic_commit = drm_atomic_helper_commit,
};
static const enum mtk_ddp_comp_id mt2701_mtk_ddp_main[] = {
@@ -250,6 +175,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
drm->mode_config.max_width = 4096;
drm->mode_config.max_height = 4096;
drm->mode_config.funcs = &mtk_drm_mode_config_funcs;
+ drm->mode_config.helper_private = &mtk_drm_mode_config_helpers;
ret = component_bind_all(drm->dev, drm);
if (ret)
@@ -509,8 +435,6 @@ static int mtk_drm_probe(struct platform_device *pdev)
if (!private)
return -ENOMEM;
- mutex_init(&private->commit.lock);
- INIT_WORK(&private->commit.work, mtk_atomic_work);
private->data = of_device_get_match_data(dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
index e03fea12ff59..17bc99b9f5d4 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -43,13 +43,6 @@ struct mtk_drm_private {
struct device_node *comp_node[DDP_COMPONENT_ID_MAX];
struct mtk_ddp_comp *ddp_comp[DDP_COMPONENT_ID_MAX];
const struct mtk_mmsys_driver_data *data;
-
- struct {
- struct drm_atomic_state *state;
- struct work_struct work;
- struct mutex lock;
- } commit;
-
struct drm_atomic_state *suspend_state;
bool dma_parms_allocated;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index f0b0325381e0..914cc7619cd7 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -7,6 +7,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_atomic_uapi.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -75,6 +76,50 @@ static void mtk_drm_plane_destroy_state(struct drm_plane *plane,
kfree(to_mtk_plane_state(state));
}
+static int mtk_plane_atomic_async_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+
+ if (plane != state->crtc->cursor)
+ return -EINVAL;
+
+ if (!plane->state)
+ return -EINVAL;
+
+ if (!plane->state->fb)
+ return -EINVAL;
+
+ if (state->state)
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state,
+ state->crtc);
+ else /* Special case for asynchronous cursor updates. */
+ crtc_state = state->crtc->state;
+
+ return drm_atomic_helper_check_plane_state(plane->state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true);
+}
+
+static void mtk_plane_atomic_async_update(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
+
+ plane->state->crtc_x = new_state->crtc_x;
+ plane->state->crtc_y = new_state->crtc_y;
+ plane->state->crtc_h = new_state->crtc_h;
+ plane->state->crtc_w = new_state->crtc_w;
+ plane->state->src_x = new_state->src_x;
+ plane->state->src_y = new_state->src_y;
+ plane->state->src_h = new_state->src_h;
+ plane->state->src_w = new_state->src_w;
+ state->pending.async_dirty = true;
+
+ mtk_drm_crtc_async_update(new_state->crtc, plane, new_state);
+}
+
static const struct drm_plane_funcs mtk_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
@@ -163,6 +208,8 @@ static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
.atomic_check = mtk_plane_atomic_check,
.atomic_update = mtk_plane_atomic_update,
.atomic_disable = mtk_plane_atomic_disable,
+ .atomic_async_update = mtk_plane_atomic_async_update,
+ .atomic_async_check = mtk_plane_atomic_async_check,
};
int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.h b/drivers/gpu/drm/mediatek/mtk_drm_plane.h
index 760885e35b27..d454bece9535 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.h
@@ -22,6 +22,8 @@ struct mtk_plane_pending_state {
unsigned int height;
unsigned int rotation;
bool dirty;
+ bool async_dirty;
+ bool async_config;
};
struct mtk_plane_state {
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 3b5e016d16c4..5fa1073cf26b 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -230,28 +230,25 @@ static void mtk_dsi_mask(struct mtk_dsi *dsi, u32 offset, u32 mask, u32 data)
static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi)
{
u32 timcon0, timcon1, timcon2, timcon3;
- u32 ui, cycle_time;
+ u32 data_rate_mhz = DIV_ROUND_UP(dsi->data_rate, 1000000);
struct mtk_phy_timing *timing = &dsi->phy_timing;
- ui = DIV_ROUND_UP(1000000000, dsi->data_rate);
- cycle_time = div_u64(8000000000ULL, dsi->data_rate);
+ timing->lpx = (60 * data_rate_mhz / (8 * 1000)) + 1;
+ timing->da_hs_prepare = (80 * data_rate_mhz + 4 * 1000) / 8000;
+ timing->da_hs_zero = (170 * data_rate_mhz + 10 * 1000) / 8000 + 1 -
+ timing->da_hs_prepare;
+ timing->da_hs_trail = timing->da_hs_prepare + 1;
- timing->lpx = NS_TO_CYCLE(60, cycle_time);
- timing->da_hs_prepare = NS_TO_CYCLE(50 + 5 * ui, cycle_time);
- timing->da_hs_zero = NS_TO_CYCLE(110 + 6 * ui, cycle_time);
- timing->da_hs_trail = NS_TO_CYCLE(77 + 4 * ui, cycle_time);
+ timing->ta_go = 4 * timing->lpx - 2;
+ timing->ta_sure = timing->lpx + 2;
+ timing->ta_get = 4 * timing->lpx;
+ timing->da_hs_exit = 2 * timing->lpx + 1;
- timing->ta_go = 4 * timing->lpx;
- timing->ta_sure = 3 * timing->lpx / 2;
- timing->ta_get = 5 * timing->lpx;
- timing->da_hs_exit = 2 * timing->lpx;
-
- timing->clk_hs_zero = NS_TO_CYCLE(336, cycle_time);
- timing->clk_hs_trail = NS_TO_CYCLE(100, cycle_time) + 10;
-
- timing->clk_hs_prepare = NS_TO_CYCLE(64, cycle_time);
- timing->clk_hs_post = NS_TO_CYCLE(80 + 52 * ui, cycle_time);
- timing->clk_hs_exit = 2 * timing->lpx;
+ timing->clk_hs_prepare = 70 * data_rate_mhz / (8 * 1000);
+ timing->clk_hs_post = timing->clk_hs_prepare + 8;
+ timing->clk_hs_trail = timing->clk_hs_prepare;
+ timing->clk_hs_zero = timing->clk_hs_trail * 4;
+ timing->clk_hs_exit = 2 * timing->clk_hs_trail;
timcon0 = timing->lpx | timing->da_hs_prepare << 8 |
timing->da_hs_zero << 16 | timing->da_hs_trail << 24;
@@ -482,27 +479,39 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
dsi_tmp_buf_bpp - 10);
data_phy_cycles = timing->lpx + timing->da_hs_prepare +
- timing->da_hs_zero + timing->da_hs_exit + 2;
+ timing->da_hs_zero + timing->da_hs_exit + 3;
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
- if (vm->hfront_porch * dsi_tmp_buf_bpp >
+ if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
data_phy_cycles * dsi->lanes + 18) {
- horizontal_frontporch_byte = vm->hfront_porch *
- dsi_tmp_buf_bpp -
- data_phy_cycles *
- dsi->lanes - 18;
+ horizontal_frontporch_byte =
+ vm->hfront_porch * dsi_tmp_buf_bpp -
+ (data_phy_cycles * dsi->lanes + 18) *
+ vm->hfront_porch /
+ (vm->hfront_porch + vm->hback_porch);
+
+ horizontal_backporch_byte =
+ horizontal_backporch_byte -
+ (data_phy_cycles * dsi->lanes + 18) *
+ vm->hback_porch /
+ (vm->hfront_porch + vm->hback_porch);
} else {
DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
horizontal_frontporch_byte = vm->hfront_porch *
dsi_tmp_buf_bpp;
}
} else {
- if (vm->hfront_porch * dsi_tmp_buf_bpp >
+ if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
data_phy_cycles * dsi->lanes + 12) {
- horizontal_frontporch_byte = vm->hfront_porch *
- dsi_tmp_buf_bpp -
- data_phy_cycles *
- dsi->lanes - 12;
+ horizontal_frontporch_byte =
+ vm->hfront_porch * dsi_tmp_buf_bpp -
+ (data_phy_cycles * dsi->lanes + 12) *
+ vm->hfront_porch /
+ (vm->hfront_porch + vm->hback_porch);
+ horizontal_backporch_byte = horizontal_backporch_byte -
+ (data_phy_cycles * dsi->lanes + 12) *
+ vm->hback_porch /
+ (vm->hfront_porch + vm->hback_porch);
} else {
DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
horizontal_frontporch_byte = vm->hfront_porch *
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 7ad14937fcdf..b67f88872726 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -506,6 +506,14 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
goto fail;
}
+ /*
+ * Set the ICC path to maximum speed for now by multiplying the fastest
+ * frequency by the bus width (8). We'll want to scale this later on to
+ * improve battery life.
+ */
+ icc_set_bw(gpu->icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
+ icc_set_bw(gpu->ocmem_icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
+
return gpu;
fail:
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index b01388a9e89e..253d8d85daad 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -591,6 +591,14 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
goto fail;
}
+ /*
+ * Set the ICC path to maximum speed for now by multiplying the fastest
+ * frequency by the bus width (8). We'll want to scale this later on to
+ * improve battery life.
+ */
+ icc_set_bw(gpu->icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
+ icc_set_bw(gpu->ocmem_icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
+
return gpu;
fail:
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index b02e2042547f..7d9e63e20ded 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -753,11 +753,18 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu->funcs->flush(gpu, gpu->rb[0]);
if (!a5xx_idle(gpu, gpu->rb[0]))
return -EINVAL;
- } else {
- /* Print a warning so if we die, we know why */
+ } else if (ret == -ENODEV) {
+ /*
+ * This device does not use zap shader (but print a warning
+ * just in case someone got their dt wrong.. hopefully they
+ * have a debug UART to realize the error of their ways...
+ * if you mess this up you are about to crash horribly)
+ */
dev_warn_once(gpu->dev->dev,
"Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
+ } else {
+ return ret;
}
/* Last step - yield the ringbuffer */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
index f44553ec3193..ed78fee2a262 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
@@ -16,11 +16,11 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
+- /home/smasetty/playarea/envytools/rnndb/adreno/a6xx.xml ( 161969 bytes, from 2019-11-29 07:18:16)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
-Copyright (C) 2013-2018 by the following authors:
+Copyright (C) 2013-2019 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -2519,6 +2519,54 @@ static inline uint32_t A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL(uint32_t val)
#define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH2 0x0000311a
+#define REG_A6XX_GBIF_SCACHE_CNTL1 0x00003c02
+
+#define REG_A6XX_GBIF_QSB_SIDE0 0x00003c03
+
+#define REG_A6XX_GBIF_QSB_SIDE1 0x00003c04
+
+#define REG_A6XX_GBIF_QSB_SIDE2 0x00003c05
+
+#define REG_A6XX_GBIF_QSB_SIDE3 0x00003c06
+
+#define REG_A6XX_GBIF_HALT 0x00003c45
+
+#define REG_A6XX_GBIF_HALT_ACK 0x00003c46
+
+#define REG_A6XX_GBIF_PERF_PWR_CNT_EN 0x00003cc0
+
+#define REG_A6XX_GBIF_PERF_CNT_SEL 0x00003cc2
+
+#define REG_A6XX_GBIF_PERF_PWR_CNT_SEL 0x00003cc3
+
+#define REG_A6XX_GBIF_PERF_CNT_LOW0 0x00003cc4
+
+#define REG_A6XX_GBIF_PERF_CNT_LOW1 0x00003cc5
+
+#define REG_A6XX_GBIF_PERF_CNT_LOW2 0x00003cc6
+
+#define REG_A6XX_GBIF_PERF_CNT_LOW3 0x00003cc7
+
+#define REG_A6XX_GBIF_PERF_CNT_HIGH0 0x00003cc8
+
+#define REG_A6XX_GBIF_PERF_CNT_HIGH1 0x00003cc9
+
+#define REG_A6XX_GBIF_PERF_CNT_HIGH2 0x00003cca
+
+#define REG_A6XX_GBIF_PERF_CNT_HIGH3 0x00003ccb
+
+#define REG_A6XX_GBIF_PWR_CNT_LOW0 0x00003ccc
+
+#define REG_A6XX_GBIF_PWR_CNT_LOW1 0x00003ccd
+
+#define REG_A6XX_GBIF_PWR_CNT_LOW2 0x00003cce
+
+#define REG_A6XX_GBIF_PWR_CNT_HIGH0 0x00003ccf
+
+#define REG_A6XX_GBIF_PWR_CNT_HIGH1 0x00003cd0
+
+#define REG_A6XX_GBIF_PWR_CNT_HIGH2 0x00003cd1
+
#define REG_A6XX_RB_WINDOW_OFFSET2 0x000088d4
#define A6XX_RB_WINDOW_OFFSET2_WINDOW_OFFSET_DISABLE 0x80000000
#define A6XX_RB_WINDOW_OFFSET2_X__MASK 0x00007fff
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 85f14feafdec..983afeaee737 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
#include <linux/clk.h>
#include <linux/interconnect.h>
@@ -149,6 +149,8 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq)
if (freq == gmu->gpu_freqs[perf_index])
break;
+ gmu->current_perf_index = perf_index;
+
__a6xx_gmu_set_freq(gmu, perf_index);
}
@@ -433,6 +435,8 @@ static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct platform_device *pdev = to_platform_device(gmu->dev);
void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
void __iomem *seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
@@ -480,20 +484,34 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
+
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
- pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080);
+ if (adreno_is_a618(adreno_gpu))
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30090);
+ else
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
+
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
+
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
- pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
+ if (adreno_is_a618(adreno_gpu))
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2);
+ else
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
+
+
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
- pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080);
+ if (adreno_is_a618(adreno_gpu))
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30090);
+ else
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
/* Setup GPU PDC */
@@ -741,8 +759,8 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK);
enable_irq(gmu->hfi_irq);
- /* Set the GPU to the highest power frequency */
- __a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
+ /* Set the GPU to the current freq */
+ __a6xx_gmu_set_freq(gmu, gmu->current_perf_index);
/*
* "enable" the GX power domain which won't actually do anything but it
@@ -1166,6 +1184,8 @@ static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev,
gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs));
+ gmu->current_perf_index = gmu->nr_gpu_freqs - 1;
+
/* Build the list of RPMh votes that we'll send to the GMU */
return a6xx_gmu_rpmh_votes_init(gmu);
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 39a26dd63674..2af91ed7ed0c 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -63,6 +63,9 @@ struct a6xx_gmu {
struct clk_bulk_data *clocks;
struct clk *core_clk;
+ /* current performance index set externally */
+ int current_perf_index;
+
int nr_gpu_freqs;
unsigned long gpu_freqs[16];
u32 gx_arc_votes[16];
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index dc8ec2c94301..daf07800cde0 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
#include "msm_gem.h"
@@ -378,6 +378,18 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
int ret;
+ /*
+ * During a previous slumber, GBIF halt is asserted to ensure
+ * no further transaction can go through GPU before GPU
+ * headswitch is turned off.
+ *
+ * This halt is deasserted once headswitch goes off but
+ * incase headswitch doesn't goes off clear GBIF halt
+ * here to ensure GPU wake-up doesn't fail because of
+ * halted GPU transactions.
+ */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
+
/* Make sure the GMU keeps the GPU on while we set it up */
a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
@@ -406,12 +418,17 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
- /* enable hardware clockgating */
- a6xx_set_hwcg(gpu, true);
+ /*
+ * enable hardware clockgating
+ * For now enable clock gating only for a630
+ */
+ if (adreno_is_a630(adreno_gpu))
+ a6xx_set_hwcg(gpu, true);
- /* VBIF start */
- gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
+ /* VBIF/GBIF start*/
gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
+ if (adreno_is_a630(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
/* Make all blocks contribute to the GPU BUSY perf counter */
gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff);
@@ -537,12 +554,19 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
a6xx_flush(gpu, gpu->rb[0]);
if (!a6xx_idle(gpu, gpu->rb[0]))
return -EINVAL;
- } else {
- /* Print a warning so if we die, we know why */
+ } else if (ret == -ENODEV) {
+ /*
+ * This device does not use zap shader (but print a warning
+ * just in case someone got their dt wrong.. hopefully they
+ * have a debug UART to realize the error of their ways...
+ * if you mess this up you are about to crash horribly)
+ */
dev_warn_once(gpu->dev->dev,
"Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
ret = 0;
+ } else {
+ return ret;
}
out:
@@ -724,6 +748,39 @@ static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL),
};
+#define GBIF_CLIENT_HALT_MASK BIT(0)
+#define GBIF_ARB_HALT_MASK BIT(1)
+
+static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
+{
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ if(!a6xx_has_gbif(adreno_gpu)){
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
+ spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
+ 0xf) == 0xf);
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
+
+ return;
+ }
+
+ /* Halt new client requests on GBIF */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
+ spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
+ (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
+
+ /* Halt all AXI requests on GBIF */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
+ spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
+ (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
+
+ /*
+ * GMU needs DDR access in slumber path. Deassert GBIF halt now
+ * to allow for GMU to access system memory.
+ */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
+}
+
static int a6xx_pm_resume(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -748,6 +805,16 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
devfreq_suspend_device(gpu->devfreq.devfreq);
+ /*
+ * Make sure the GMU is idle before continuing (because some transitions
+ * may use VBIF
+ */
+ a6xx_gmu_wait_for_idle(&a6xx_gpu->gmu);
+
+ /* Clear the VBIF pipe before shutting down */
+ /* FIXME: This accesses the GPU - do we need to make sure it is on? */
+ a6xx_bus_clear_pending_transactions(adreno_gpu);
+
return a6xx_gmu_stop(a6xx_gpu);
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index 64399554f2dd..7239b8b60939 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2017 The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2017, 2019 The Linux Foundation. All rights reserved. */
#ifndef __A6XX_GPU_H__
#define __A6XX_GPU_H__
@@ -42,6 +42,13 @@ struct a6xx_gpu {
#define A6XX_PROTECT_RDONLY(_reg, _len) \
((((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
+static inline bool a6xx_has_gbif(struct adreno_gpu *gpu)
+{
+ if(adreno_is_a630(gpu))
+ return false;
+
+ return true;
+}
int a6xx_gmu_resume(struct a6xx_gpu *gpu);
int a6xx_gmu_stop(struct a6xx_gpu *gpu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index 691c1a277d91..d6023ba8033c 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. */
#include <linux/ascii85.h>
#include "msm_gem.h"
@@ -320,6 +320,7 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu,
{
struct resource *res;
void __iomem *cxdbg = NULL;
+ int nr_debugbus_blocks;
/* Set up the GX debug bus */
@@ -374,9 +375,11 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu,
cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
}
- a6xx_state->debugbus = state_kcalloc(a6xx_state,
- ARRAY_SIZE(a6xx_debugbus_blocks),
- sizeof(*a6xx_state->debugbus));
+ nr_debugbus_blocks = ARRAY_SIZE(a6xx_debugbus_blocks) +
+ (a6xx_has_gbif(to_adreno_gpu(gpu)) ? 1 : 0);
+
+ a6xx_state->debugbus = state_kcalloc(a6xx_state, nr_debugbus_blocks,
+ sizeof(*a6xx_state->debugbus));
if (a6xx_state->debugbus) {
int i;
@@ -388,15 +391,31 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu,
&a6xx_state->debugbus[i]);
a6xx_state->nr_debugbus = ARRAY_SIZE(a6xx_debugbus_blocks);
+
+ /*
+ * GBIF has same debugbus as of other GPU blocks, fall back to
+ * default path if GPU uses GBIF, also GBIF uses exactly same
+ * ID as of VBIF.
+ */
+ if (a6xx_has_gbif(to_adreno_gpu(gpu))) {
+ a6xx_get_debugbus_block(gpu, a6xx_state,
+ &a6xx_gbif_debugbus_block,
+ &a6xx_state->debugbus[i]);
+
+ a6xx_state->nr_debugbus += 1;
+ }
}
- a6xx_state->vbif_debugbus =
- state_kcalloc(a6xx_state, 1,
- sizeof(*a6xx_state->vbif_debugbus));
+ /* Dump the VBIF debugbus on applicable targets */
+ if (!a6xx_has_gbif(to_adreno_gpu(gpu))) {
+ a6xx_state->vbif_debugbus =
+ state_kcalloc(a6xx_state, 1,
+ sizeof(*a6xx_state->vbif_debugbus));
- if (a6xx_state->vbif_debugbus)
- a6xx_get_vbif_debugbus_block(gpu, a6xx_state,
- a6xx_state->vbif_debugbus);
+ if (a6xx_state->vbif_debugbus)
+ a6xx_get_vbif_debugbus_block(gpu, a6xx_state,
+ a6xx_state->vbif_debugbus);
+ }
if (cxdbg) {
a6xx_state->cx_debugbus =
@@ -770,14 +789,16 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
&a6xx_state->gmu_registers[1]);
}
+#define A6XX_GBIF_REGLIST_SIZE 1
static void a6xx_get_registers(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
struct a6xx_crashdumper *dumper)
{
int i, count = ARRAY_SIZE(a6xx_ahb_reglist) +
ARRAY_SIZE(a6xx_reglist) +
- ARRAY_SIZE(a6xx_hlsq_reglist);
+ ARRAY_SIZE(a6xx_hlsq_reglist) + A6XX_GBIF_REGLIST_SIZE;
int index = 0;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
a6xx_state->registers = state_kcalloc(a6xx_state,
count, sizeof(*a6xx_state->registers));
@@ -792,6 +813,15 @@ static void a6xx_get_registers(struct msm_gpu *gpu,
a6xx_state, &a6xx_ahb_reglist[i],
&a6xx_state->registers[index++]);
+ if (a6xx_has_gbif(adreno_gpu))
+ a6xx_get_ahb_gpu_registers(gpu,
+ a6xx_state, &a6xx_gbif_reglist,
+ &a6xx_state->registers[index++]);
+ else
+ a6xx_get_ahb_gpu_registers(gpu,
+ a6xx_state, &a6xx_vbif_reglist,
+ &a6xx_state->registers[index++]);
+
for (i = 0; i < ARRAY_SIZE(a6xx_reglist); i++)
a6xx_get_crashdumper_registers(gpu,
a6xx_state, &a6xx_reglist[i],
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
index 68cccfa2870a..e67c20c415af 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. */
#ifndef _A6XX_CRASH_DUMP_H_
#define _A6XX_CRASH_DUMP_H_
@@ -307,11 +307,20 @@ static const u32 a6xx_vbif_registers[] = {
0x3410, 0x3410, 0x3800, 0x3801,
};
+static const u32 a6xx_gbif_registers[] = {
+ 0x3C00, 0X3C0B, 0X3C40, 0X3C47, 0X3CC0, 0X3CD1, 0xE3A, 0xE3A,
+};
+
static const struct a6xx_registers a6xx_ahb_reglist[] = {
REGS(a6xx_ahb_registers, 0, 0),
- REGS(a6xx_vbif_registers, 0, 0),
};
+static const struct a6xx_registers a6xx_vbif_reglist =
+ REGS(a6xx_vbif_registers, 0, 0);
+
+static const struct a6xx_registers a6xx_gbif_reglist =
+ REGS(a6xx_gbif_registers, 0, 0);
+
static const u32 a6xx_gmu_gx_registers[] = {
/* GMU GX */
0x0000, 0x0000, 0x0010, 0x0013, 0x0016, 0x0016, 0x0018, 0x001b,
@@ -422,6 +431,9 @@ static const struct a6xx_debugbus_block {
DEBUGBUS(A6XX_DBGBUS_TPL1_3, 0x100),
};
+static const struct a6xx_debugbus_block a6xx_gbif_debugbus_block =
+ DEBUGBUS(A6XX_DBGBUS_VBIF, 0x100);
+
static const struct a6xx_debugbus_block a6xx_cx_debugbus_blocks[] = {
DEBUGBUS(A6XX_DBGBUS_GMU_CX, 0x100),
DEBUGBUS(A6XX_DBGBUS_CX, 0x100),
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index fbbdf86504f5..cb3a6e597d76 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -167,6 +167,17 @@ static const struct adreno_info gpulist[] = {
.init = a5xx_gpu_init,
.zapfw = "a540_zap.mdt",
}, {
+ .rev = ADRENO_REV(6, 1, 8, ANY_ID),
+ .revn = 618,
+ .name = "A618",
+ .fw = {
+ [ADRENO_FW_SQE] = "a630_sqe.fw",
+ [ADRENO_FW_GMU] = "a630_gmu.bin",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a6xx_gpu_init,
+ }, {
.rev = ADRENO_REV(6, 3, 0, ANY_ID),
.revn = 630,
.name = "A630",
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 0783e4b5486a..7fd29829b2fa 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -26,6 +26,7 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
{
struct device *dev = &gpu->pdev->dev;
const struct firmware *fw;
+ const char *signed_fwname = NULL;
struct device_node *np, *mem_np;
struct resource r;
phys_addr_t mem_phys;
@@ -58,8 +59,43 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
mem_phys = r.start;
- /* Request the MDT file for the firmware */
- fw = adreno_request_fw(to_adreno_gpu(gpu), fwname);
+ /*
+ * Check for a firmware-name property. This is the new scheme
+ * to handle firmware that may be signed with device specific
+ * keys, allowing us to have a different zap fw path for different
+ * devices.
+ *
+ * If the firmware-name property is found, we bypass the
+ * adreno_request_fw() mechanism, because we don't need to handle
+ * the /lib/firmware/qcom/... vs /lib/firmware/... case.
+ *
+ * If the firmware-name property is not found, for backwards
+ * compatibility we fall back to the fwname from the gpulist
+ * table.
+ */
+ of_property_read_string_index(np, "firmware-name", 0, &signed_fwname);
+ if (signed_fwname) {
+ fwname = signed_fwname;
+ ret = request_firmware_direct(&fw, fwname, gpu->dev->dev);
+ if (ret)
+ fw = ERR_PTR(ret);
+ } else if (fwname) {
+ /* Request the MDT file from the default location: */
+ fw = adreno_request_fw(to_adreno_gpu(gpu), fwname);
+ } else {
+ /*
+ * For new targets, we require the firmware-name property,
+ * if a zap-shader is required, rather than falling back
+ * to a firmware name specified in gpulist.
+ *
+ * Because the firmware is signed with a (potentially)
+ * device specific key, having the name come from gpulist
+ * was a bad idea, and is only provided for backwards
+ * compatibility for older targets.
+ */
+ return -ENODEV;
+ }
+
if (IS_ERR(fw)) {
DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname);
return PTR_ERR(fw);
@@ -95,7 +131,7 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
* not. But since we've already gotten through adreno_request_fw()
* we know which of the two cases it is:
*/
- if (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY) {
+ if (signed_fwname || (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY)) {
ret = qcom_mdt_load(dev, fw, fwname, pasid,
mem_region, mem_phys, mem_size, NULL);
} else {
@@ -146,14 +182,6 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
return -EPROBE_DEFER;
}
- /* Each GPU has a target specific zap shader firmware name to use */
- if (!adreno_gpu->info->zapfw) {
- zap_available = false;
- DRM_DEV_ERROR(&pdev->dev,
- "Zap shader firmware file not specified for this target\n");
- return -ENODEV;
- }
-
return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
}
@@ -826,7 +854,7 @@ static int adreno_get_legacy_pwrlevels(struct device *dev)
node = of_get_compatible_child(dev->of_node, "qcom,gpu-pwrlevels");
if (!node) {
- DRM_DEV_ERROR(dev, "Could not find the GPU powerlevels\n");
+ DRM_DEV_DEBUG(dev, "Could not find the GPU powerlevels\n");
return -ENXIO;
}
@@ -887,10 +915,21 @@ static int adreno_get_pwrlevels(struct device *dev,
DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate);
/* Check for an interconnect path for the bus */
- gpu->icc_path = of_icc_get(dev, NULL);
+ gpu->icc_path = of_icc_get(dev, "gfx-mem");
+ if (!gpu->icc_path) {
+ /*
+ * Keep compatbility with device trees that don't have an
+ * interconnect-names property.
+ */
+ gpu->icc_path = of_icc_get(dev, NULL);
+ }
if (IS_ERR(gpu->icc_path))
gpu->icc_path = NULL;
+ gpu->ocmem_icc_path = of_icc_get(dev, "ocmem");
+ if (IS_ERR(gpu->ocmem_icc_path))
+ gpu->ocmem_icc_path = NULL;
+
return 0;
}
@@ -977,6 +1016,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
release_firmware(adreno_gpu->fw[i]);
icc_put(gpu->icc_path);
+ icc_put(gpu->ocmem_icc_path);
msm_gpu_cleanup(&adreno_gpu->base);
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index e71a7570ef72..9ff4e550e7bd 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -3,7 +3,7 @@
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
- * Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2017, 2019 The Linux Foundation. All rights reserved.
*/
#ifndef __ADRENO_GPU_H__
@@ -227,6 +227,16 @@ static inline int adreno_is_a540(struct adreno_gpu *gpu)
return gpu->revn == 540;
}
+static inline int adreno_is_a618(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 618;
+}
+
+static inline int adreno_is_a630(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 630;
+}
+
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
const char *fwname);
@@ -330,10 +340,7 @@ OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
static inline bool adreno_reg_check(struct adreno_gpu *gpu,
enum adreno_regs offset_name)
{
- if (offset_name >= REG_ADRENO_REGISTER_MAX ||
- !gpu->reg_offsets[offset_name]) {
- BUG();
- }
+ BUG_ON(offset_name >= REG_ADRENO_REGISTER_MAX || !gpu->reg_offsets[offset_name]);
/*
* REG_SKIP is a special value that tell us that the register in
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index b177d5052c5e..17448505a9b5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -197,10 +197,6 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
DPU_DEBUG("%s\n", dpu_crtc->name);
for (i = 0; i < cstate->num_mixers; i++) {
- if (!mixer[i].hw_lm || !mixer[i].lm_ctl) {
- DPU_ERROR("invalid lm or ctl assigned to mixer\n");
- return;
- }
mixer[i].mixer_op_mode = 0;
mixer[i].flush_mask = 0;
if (mixer[i].lm_ctl->ops.clear_all_blendstages)
@@ -1113,14 +1109,9 @@ static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
for (i = 0; i < cstate->num_mixers; ++i) {
m = &cstate->mixers[i];
- if (!m->hw_lm)
- seq_printf(s, "\tmixer[%d] has no lm\n", i);
- else if (!m->lm_ctl)
- seq_printf(s, "\tmixer[%d] has no ctl\n", i);
- else
- seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
- m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
- out_width, mode->vdisplay);
+ seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
+ m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
+ out_width, mode->vdisplay);
}
seq_puts(s, "\n");
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 6197261e22c1..58d3400668f5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -58,7 +58,7 @@
#define IDLE_SHORT_TIMEOUT 1
-#define MAX_VDISPLAY_SPLIT 1080
+#define MAX_HDISPLAY_SPLIT 1080
/* timeout in frames waiting for frame done */
#define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5
@@ -233,7 +233,7 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
u32 irq_status;
int ret;
- if (!phys_enc || !wait_info || intr_idx >= INTR_IDX_MAX) {
+ if (!wait_info || intr_idx >= INTR_IDX_MAX) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
@@ -308,7 +308,7 @@ int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
struct dpu_encoder_irq *irq;
int ret = 0;
- if (!phys_enc || intr_idx >= INTR_IDX_MAX) {
+ if (intr_idx >= INTR_IDX_MAX) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
@@ -363,10 +363,6 @@ int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
struct dpu_encoder_irq *irq;
int ret;
- if (!phys_enc) {
- DPU_ERROR("invalid encoder\n");
- return -EINVAL;
- }
irq = &phys_enc->irq[intr_idx];
/* silently skip irqs that weren't registered */
@@ -415,7 +411,7 @@ void dpu_encoder_get_hw_resources(struct drm_encoder *drm_enc,
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (phys && phys->ops.get_hw_resources)
+ if (phys->ops.get_hw_resources)
phys->ops.get_hw_resources(phys, hw_res);
}
}
@@ -438,7 +434,7 @@ static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (phys && phys->ops.destroy) {
+ if (phys->ops.destroy) {
phys->ops.destroy(phys);
--dpu_enc->num_phys_encs;
dpu_enc->phys_encs[i] = NULL;
@@ -464,7 +460,7 @@ void dpu_encoder_helper_split_config(
struct dpu_hw_mdp *hw_mdptop;
struct msm_display_info *disp_info;
- if (!phys_enc || !phys_enc->hw_mdptop || !phys_enc->parent) {
+ if (!phys_enc->hw_mdptop || !phys_enc->parent) {
DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
return;
}
@@ -533,8 +529,23 @@ static struct msm_display_topology dpu_encoder_get_topology(
if (dpu_enc->phys_encs[i])
intf_count++;
- /* User split topology for width > 1080 */
- topology.num_lm = (mode->vdisplay > MAX_VDISPLAY_SPLIT) ? 2 : 1;
+ /* Datapath topology selection
+ *
+ * Dual display
+ * 2 LM, 2 INTF ( Split display using 2 interfaces)
+ *
+ * Single display
+ * 1 LM, 1 INTF
+ * 2 LM, 1 INTF (stream merge to support high resolution interfaces)
+ *
+ */
+ if (intf_count == 2)
+ topology.num_lm = 2;
+ else if (!dpu_kms->catalog->caps->has_3d_merge)
+ topology.num_lm = 1;
+ else
+ topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
+
topology.num_enc = 0;
topology.num_intf = intf_count;
@@ -582,10 +593,10 @@ static int dpu_encoder_virt_atomic_check(
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (phys && phys->ops.atomic_check)
+ if (phys->ops.atomic_check)
ret = phys->ops.atomic_check(phys, crtc_state,
conn_state);
- else if (phys && phys->ops.mode_fixup)
+ else if (phys->ops.mode_fixup)
if (!phys->ops.mode_fixup(phys, mode, adj_mode))
ret = -EINVAL;
@@ -681,7 +692,7 @@ static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable)
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (phys && phys->ops.irq_control)
+ if (phys->ops.irq_control)
phys->ops.irq_control(phys, enable);
}
@@ -1031,46 +1042,43 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (phys) {
- if (!dpu_enc->hw_pp[i]) {
- DPU_ERROR_ENC(dpu_enc, "no pp block assigned"
- "at idx: %d\n", i);
- goto error;
- }
+ if (!dpu_enc->hw_pp[i]) {
+ DPU_ERROR_ENC(dpu_enc,
+ "no pp block assigned at idx: %d\n", i);
+ goto error;
+ }
- if (!hw_ctl[i]) {
- DPU_ERROR_ENC(dpu_enc, "no ctl block assigned"
- "at idx: %d\n", i);
- goto error;
- }
+ if (!hw_ctl[i]) {
+ DPU_ERROR_ENC(dpu_enc,
+ "no ctl block assigned at idx: %d\n", i);
+ goto error;
+ }
- phys->hw_pp = dpu_enc->hw_pp[i];
- phys->hw_ctl = hw_ctl[i];
+ phys->hw_pp = dpu_enc->hw_pp[i];
+ phys->hw_ctl = hw_ctl[i];
- dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id,
- DPU_HW_BLK_INTF);
- for (j = 0; j < MAX_CHANNELS_PER_ENC; j++) {
- struct dpu_hw_intf *hw_intf;
+ dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id,
+ DPU_HW_BLK_INTF);
+ for (j = 0; j < MAX_CHANNELS_PER_ENC; j++) {
+ struct dpu_hw_intf *hw_intf;
- if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
- break;
+ if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
+ break;
- hw_intf = (struct dpu_hw_intf *)hw_iter.hw;
- if (hw_intf->idx == phys->intf_idx)
- phys->hw_intf = hw_intf;
- }
+ hw_intf = (struct dpu_hw_intf *)hw_iter.hw;
+ if (hw_intf->idx == phys->intf_idx)
+ phys->hw_intf = hw_intf;
+ }
- if (!phys->hw_intf) {
- DPU_ERROR_ENC(dpu_enc,
- "no intf block assigned at idx: %d\n",
- i);
+ if (!phys->hw_intf) {
+ DPU_ERROR_ENC(dpu_enc,
+ "no intf block assigned at idx: %d\n", i);
goto error;
- }
-
- phys->connector = conn->state->connector;
- if (phys->ops.mode_set)
- phys->ops.mode_set(phys, mode, adj_mode);
}
+
+ phys->connector = conn->state->connector;
+ if (phys->ops.mode_set)
+ phys->ops.mode_set(phys, mode, adj_mode);
}
dpu_enc->mode_set_complete = true;
@@ -1202,7 +1210,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (phys && phys->ops.disable)
+ if (phys->ops.disable)
phys->ops.disable(phys);
}
@@ -1215,8 +1223,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP);
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
- if (dpu_enc->phys_encs[i])
- dpu_enc->phys_encs[i]->connector = NULL;
+ dpu_enc->phys_encs[i]->connector = NULL;
}
DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
@@ -1306,7 +1313,7 @@ void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc,
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (phys && phys->ops.control_vblank_irq)
+ if (phys->ops.control_vblank_irq)
phys->ops.control_vblank_irq(phys, enable);
}
}
@@ -1418,7 +1425,7 @@ static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
}
ctl = phys->hw_ctl;
- if (!ctl || !ctl->ops.trigger_flush) {
+ if (!ctl->ops.trigger_flush) {
DPU_ERROR("missing trigger cb\n");
return;
}
@@ -1462,13 +1469,8 @@ void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_ctl *ctl;
- if (!phys_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- }
-
ctl = phys_enc->hw_ctl;
- if (ctl && ctl->ops.trigger_start) {
+ if (ctl->ops.trigger_start) {
ctl->ops.trigger_start(ctl);
trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx);
}
@@ -1505,14 +1507,10 @@ static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
struct dpu_hw_ctl *ctl;
int rc;
- if (!phys_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- }
dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
ctl = phys_enc->hw_ctl;
- if (!ctl || !ctl->ops.reset)
+ if (!ctl->ops.reset)
return;
DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(phys_enc->parent),
@@ -1549,12 +1547,10 @@ static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (!phys || phys->enable_state == DPU_ENC_DISABLED)
+ if (phys->enable_state == DPU_ENC_DISABLED)
continue;
ctl = phys->hw_ctl;
- if (!ctl)
- continue;
/*
* This is cleared in frame_done worker, which isn't invoked
@@ -1602,17 +1598,15 @@ void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
phys = dpu_enc->phys_encs[i];
- if (phys && phys->hw_ctl) {
- ctl = phys->hw_ctl;
- if (ctl->ops.clear_pending_flush)
- ctl->ops.clear_pending_flush(ctl);
+ ctl = phys->hw_ctl;
+ if (ctl->ops.clear_pending_flush)
+ ctl->ops.clear_pending_flush(ctl);
- /* update only for command mode primary ctl */
- if ((phys == dpu_enc->cur_master) &&
- (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
- && ctl->ops.trigger_pending)
- ctl->ops.trigger_pending(ctl);
- }
+ /* update only for command mode primary ctl */
+ if ((phys == dpu_enc->cur_master) &&
+ (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
+ && ctl->ops.trigger_pending)
+ ctl->ops.trigger_pending(ctl);
}
}
@@ -1772,12 +1766,10 @@ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
DPU_ATRACE_BEGIN("enc_prepare_for_kickoff");
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
phys = dpu_enc->phys_encs[i];
- if (phys) {
- if (phys->ops.prepare_for_kickoff)
- phys->ops.prepare_for_kickoff(phys);
- if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
- needs_hw_reset = true;
- }
+ if (phys->ops.prepare_for_kickoff)
+ phys->ops.prepare_for_kickoff(phys);
+ if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
+ needs_hw_reset = true;
}
DPU_ATRACE_END("enc_prepare_for_kickoff");
@@ -1818,7 +1810,7 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
/* allow phys encs to handle any post-kickoff business */
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
phys = dpu_enc->phys_encs[i];
- if (phys && phys->ops.handle_post_kickoff)
+ if (phys->ops.handle_post_kickoff)
phys->ops.handle_post_kickoff(phys);
}
@@ -1847,7 +1839,7 @@ void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc)
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
phys = dpu_enc->phys_encs[i];
- if (phys && phys->ops.prepare_commit)
+ if (phys->ops.prepare_commit)
phys->ops.prepare_commit(phys);
}
}
@@ -1862,9 +1854,6 @@ static int _dpu_encoder_status_show(struct seq_file *s, void *data)
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (!phys)
- continue;
-
seq_printf(s, "intf:%d vsync:%8d underrun:%8d ",
phys->intf_idx - INTF_0,
atomic_read(&phys->vsync_cnt),
@@ -1923,8 +1912,7 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
dpu_enc->debugfs_root, dpu_enc, &debugfs_status_fops);
for (i = 0; i < dpu_enc->num_phys_encs; i++)
- if (dpu_enc->phys_encs[i] &&
- dpu_enc->phys_encs[i]->ops.late_register)
+ if (dpu_enc->phys_encs[i]->ops.late_register)
dpu_enc->phys_encs[i]->ops.late_register(
dpu_enc->phys_encs[i],
dpu_enc->debugfs_root);
@@ -2093,11 +2081,8 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
-
- if (phys) {
- atomic_set(&phys->vsync_cnt, 0);
- atomic_set(&phys->underrun_cnt, 0);
- }
+ atomic_set(&phys->vsync_cnt, 0);
+ atomic_set(&phys->underrun_cnt, 0);
}
mutex_unlock(&dpu_enc->enc_lock);
@@ -2239,8 +2224,6 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (!phys)
- continue;
switch (event) {
case MSM_ENC_COMMIT_DONE:
@@ -2256,7 +2239,7 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n",
event);
return -EINVAL;
- };
+ }
if (fn_wait) {
DPU_ATRACE_BEGIN("wait_for_completion_event");
@@ -2273,7 +2256,6 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
{
struct dpu_encoder_virt *dpu_enc = NULL;
- int i;
if (!encoder) {
DPU_ERROR("invalid encoder\n");
@@ -2284,12 +2266,8 @@ enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
if (dpu_enc->cur_master)
return dpu_enc->cur_master->intf_mode;
- for (i = 0; i < dpu_enc->num_phys_encs; i++) {
- struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
-
- if (phys)
- return phys->intf_mode;
- }
+ if (dpu_enc->num_phys_encs)
+ return dpu_enc->phys_encs[0]->intf_mode;
return INTF_MODE_NONE;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index 047960949fbb..39e1e280ba44 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -45,8 +45,7 @@ static bool dpu_encoder_phys_cmd_mode_fixup(
const struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
- if (phys_enc)
- DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc), "\n");
+ DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc), "\n");
return true;
}
@@ -58,11 +57,8 @@ static void _dpu_encoder_phys_cmd_update_intf_cfg(
struct dpu_hw_ctl *ctl;
struct dpu_hw_intf_cfg intf_cfg = { 0 };
- if (!phys_enc)
- return;
-
ctl = phys_enc->hw_ctl;
- if (!ctl || !ctl->ops.setup_intf_cfg)
+ if (!ctl->ops.setup_intf_cfg)
return;
intf_cfg.intf = phys_enc->intf_idx;
@@ -79,7 +75,7 @@ static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
int new_cnt;
u32 event = DPU_ENCODER_FRAME_EVENT_DONE;
- if (!phys_enc || !phys_enc->hw_pp)
+ if (!phys_enc->hw_pp)
return;
DPU_ATRACE_BEGIN("pp_done_irq");
@@ -106,7 +102,7 @@ static void dpu_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
struct dpu_encoder_phys *phys_enc = arg;
struct dpu_encoder_phys_cmd *cmd_enc;
- if (!phys_enc || !phys_enc->hw_pp)
+ if (!phys_enc->hw_pp)
return;
DPU_ATRACE_BEGIN("rd_ptr_irq");
@@ -125,9 +121,6 @@ static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
{
struct dpu_encoder_phys *phys_enc = arg;
- if (!phys_enc || !phys_enc->hw_ctl)
- return;
-
DPU_ATRACE_BEGIN("ctl_start_irq");
atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
@@ -141,9 +134,6 @@ static void dpu_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
{
struct dpu_encoder_phys *phys_enc = arg;
- if (!phys_enc)
- return;
-
if (phys_enc->parent_ops->handle_underrun_virt)
phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent,
phys_enc);
@@ -179,7 +169,7 @@ static void dpu_encoder_phys_cmd_mode_set(
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
- if (!phys_enc || !mode || !adj_mode) {
+ if (!mode || !adj_mode) {
DPU_ERROR("invalid args\n");
return;
}
@@ -198,7 +188,7 @@ static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR;
bool do_log = false;
- if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_ctl)
+ if (!phys_enc->hw_pp)
return -EINVAL;
cmd_enc->pp_timeout_report_cnt++;
@@ -247,11 +237,6 @@ static int _dpu_encoder_phys_cmd_wait_for_idle(
struct dpu_encoder_wait_info wait_info;
int ret;
- if (!phys_enc) {
- DPU_ERROR("invalid encoder\n");
- return -EINVAL;
- }
-
wait_info.wq = &phys_enc->pending_kickoff_wq;
wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
@@ -273,7 +258,7 @@ static int dpu_encoder_phys_cmd_control_vblank_irq(
int ret = 0;
int refcount;
- if (!phys_enc || !phys_enc->hw_pp) {
+ if (!phys_enc->hw_pp) {
DPU_ERROR("invalid encoder\n");
return -EINVAL;
}
@@ -314,9 +299,6 @@ end:
static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc,
bool enable)
{
- if (!phys_enc)
- return;
-
trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent),
phys_enc->hw_pp->idx - PINGPONG_0,
enable, atomic_read(&phys_enc->vblank_refcount));
@@ -351,7 +333,7 @@ static void dpu_encoder_phys_cmd_tearcheck_config(
u32 vsync_hz;
struct dpu_kms *dpu_kms;
- if (!phys_enc || !phys_enc->hw_pp) {
+ if (!phys_enc->hw_pp) {
DPU_ERROR("invalid encoder\n");
return;
}
@@ -428,8 +410,7 @@ static void _dpu_encoder_phys_cmd_pingpong_config(
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
- if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp
- || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+ if (!phys_enc->hw_pp || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != 0);
return;
}
@@ -458,7 +439,7 @@ static void dpu_encoder_phys_cmd_enable_helper(
struct dpu_hw_ctl *ctl;
u32 flush_mask = 0;
- if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp) {
+ if (!phys_enc->hw_pp) {
DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
return;
}
@@ -480,7 +461,7 @@ static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
- if (!phys_enc || !phys_enc->hw_pp) {
+ if (!phys_enc->hw_pp) {
DPU_ERROR("invalid phys encoder\n");
return;
}
@@ -499,8 +480,7 @@ static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
static void _dpu_encoder_phys_cmd_connect_te(
struct dpu_encoder_phys *phys_enc, bool enable)
{
- if (!phys_enc || !phys_enc->hw_pp ||
- !phys_enc->hw_pp->ops.connect_external_te)
+ if (!phys_enc->hw_pp || !phys_enc->hw_pp->ops.connect_external_te)
return;
trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
@@ -518,7 +498,7 @@ static int dpu_encoder_phys_cmd_get_line_count(
{
struct dpu_hw_pingpong *hw_pp;
- if (!phys_enc || !phys_enc->hw_pp)
+ if (!phys_enc->hw_pp)
return -EINVAL;
if (!dpu_encoder_phys_cmd_is_master(phys_enc))
@@ -536,7 +516,7 @@ static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
- if (!phys_enc || !phys_enc->hw_pp) {
+ if (!phys_enc->hw_pp) {
DPU_ERROR("invalid encoder\n");
return;
}
@@ -559,10 +539,6 @@ static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc)
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
- if (!phys_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- }
kfree(cmd_enc);
}
@@ -580,7 +556,7 @@ static void dpu_encoder_phys_cmd_prepare_for_kickoff(
to_dpu_encoder_phys_cmd(phys_enc);
int ret;
- if (!phys_enc || !phys_enc->hw_pp) {
+ if (!phys_enc->hw_pp) {
DPU_ERROR("invalid encoder\n");
return;
}
@@ -614,11 +590,6 @@ static int _dpu_encoder_phys_cmd_wait_for_ctl_start(
struct dpu_encoder_wait_info wait_info;
int ret;
- if (!phys_enc || !phys_enc->hw_ctl) {
- DPU_ERROR("invalid argument(s)\n");
- return -EINVAL;
- }
-
wait_info.wq = &phys_enc->pending_kickoff_wq;
wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt;
wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
@@ -639,9 +610,6 @@ static int dpu_encoder_phys_cmd_wait_for_tx_complete(
{
int rc;
- if (!phys_enc)
- return -EINVAL;
-
rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
if (rc) {
DRM_ERROR("failed wait_for_idle: id:%u ret:%d intf:%d\n",
@@ -658,9 +626,6 @@ static int dpu_encoder_phys_cmd_wait_for_commit_done(
int rc = 0;
struct dpu_encoder_phys_cmd *cmd_enc;
- if (!phys_enc)
- return -EINVAL;
-
cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
/* only required for master controller */
@@ -681,9 +646,6 @@ static int dpu_encoder_phys_cmd_wait_for_vblank(
struct dpu_encoder_phys_cmd *cmd_enc;
struct dpu_encoder_wait_info wait_info;
- if (!phys_enc)
- return -EINVAL;
-
cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
/* only required for master controller */
@@ -715,9 +677,6 @@ static void dpu_encoder_phys_cmd_handle_post_kickoff(
static void dpu_encoder_phys_cmd_trigger_start(
struct dpu_encoder_phys *phys_enc)
{
- if (!phys_enc)
- return;
-
dpu_encoder_helper_trigger_start(phys_enc);
}
@@ -816,6 +775,4 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
DPU_DEBUG_CMDENC(cmd_enc, "created\n");
return phys_enc;
-
- return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index 3123ef873cdf..c71c18de5966 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -220,8 +220,7 @@ static bool dpu_encoder_phys_vid_mode_fixup(
const struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
- if (phys_enc)
- DPU_DEBUG_VIDENC(phys_enc, "\n");
+ DPU_DEBUG_VIDENC(phys_enc, "\n");
/*
* Modifying mode has consequences when the mode comes back to us
@@ -239,7 +238,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
unsigned long lock_flags;
struct dpu_hw_intf_cfg intf_cfg = { 0 };
- if (!phys_enc || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+ if (!phys_enc->hw_ctl->ops.setup_intf_cfg) {
DPU_ERROR("invalid encoder %d\n", phys_enc != 0);
return;
}
@@ -280,6 +279,14 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
phys_enc->hw_intf->ops.setup_timing_gen(phys_enc->hw_intf,
&timing_params, fmt);
phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
+
+ /* setup which pp blk will connect to this intf */
+ if (phys_enc->hw_intf->ops.bind_pingpong_blk)
+ phys_enc->hw_intf->ops.bind_pingpong_blk(
+ phys_enc->hw_intf,
+ true,
+ phys_enc->hw_pp->idx);
+
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
programmable_fetch_config(phys_enc, &timing_params);
@@ -293,12 +300,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
u32 flush_register = 0;
int new_cnt = -1, old_cnt = -1;
- if (!phys_enc)
- return;
-
hw_ctl = phys_enc->hw_ctl;
- if (!hw_ctl)
- return;
DPU_ATRACE_BEGIN("vblank_irq");
@@ -314,7 +316,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
* so we need to double-check with hw that it accepted the flush bits
*/
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
- if (hw_ctl && hw_ctl->ops.get_flush_register)
+ if (hw_ctl->ops.get_flush_register)
flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
if (!(flush_register & hw_ctl->ops.get_pending_flush(hw_ctl)))
@@ -335,9 +337,6 @@ static void dpu_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
{
struct dpu_encoder_phys *phys_enc = arg;
- if (!phys_enc)
- return;
-
if (phys_enc->parent_ops->handle_underrun_virt)
phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent,
phys_enc);
@@ -374,11 +373,6 @@ static void dpu_encoder_phys_vid_mode_set(
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
- if (!phys_enc) {
- DPU_ERROR("invalid encoder/kms\n");
- return;
- }
-
if (adj_mode) {
phys_enc->cached_mode = *adj_mode;
drm_mode_debug_printmodeline(adj_mode);
@@ -395,11 +389,6 @@ static int dpu_encoder_phys_vid_control_vblank_irq(
int ret = 0;
int refcount;
- if (!phys_enc) {
- DPU_ERROR("invalid encoder\n");
- return -EINVAL;
- }
-
refcount = atomic_read(&phys_enc->vblank_refcount);
/* Slave encoders don't report vblank */
@@ -435,6 +424,7 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_ctl *ctl;
u32 flush_mask = 0;
+ u32 intf_flush_mask = 0;
ctl = phys_enc->hw_ctl;
@@ -459,10 +449,18 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->hw_intf->idx);
ctl->ops.update_pending_flush(ctl, flush_mask);
+ if (ctl->ops.get_bitmask_active_intf)
+ ctl->ops.get_bitmask_active_intf(ctl, &intf_flush_mask,
+ phys_enc->hw_intf->idx);
+
+ if (ctl->ops.update_pending_intf_flush)
+ ctl->ops.update_pending_intf_flush(ctl, intf_flush_mask);
+
skip_flush:
DPU_DEBUG_VIDENC(phys_enc,
- "update pending flush ctl %d flush_mask %x\n",
- ctl->idx - CTL_0, flush_mask);
+ "update pending flush ctl %d flush_mask 0%x intf_mask 0x%x\n",
+ ctl->idx - CTL_0, flush_mask, intf_flush_mask);
+
/* ctl_flush & timing engine enable will be triggered by framework */
if (phys_enc->enable_state == DPU_ENC_DISABLED)
@@ -471,11 +469,6 @@ skip_flush:
static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc)
{
- if (!phys_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- }
-
DPU_DEBUG_VIDENC(phys_enc, "\n");
kfree(phys_enc);
}
@@ -493,11 +486,6 @@ static int dpu_encoder_phys_vid_wait_for_vblank(
struct dpu_encoder_wait_info wait_info;
int ret;
- if (!phys_enc) {
- pr_err("invalid encoder\n");
- return -EINVAL;
- }
-
wait_info.wq = &phys_enc->pending_kickoff_wq;
wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
@@ -543,13 +531,8 @@ static void dpu_encoder_phys_vid_prepare_for_kickoff(
struct dpu_hw_ctl *ctl;
int rc;
- if (!phys_enc) {
- DPU_ERROR("invalid encoder/parameters\n");
- return;
- }
-
ctl = phys_enc->hw_ctl;
- if (!ctl || !ctl->ops.wait_reset_status)
+ if (!ctl->ops.wait_reset_status)
return;
/*
@@ -569,12 +552,12 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
unsigned long lock_flags;
int ret;
- if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev) {
+ if (!phys_enc->parent || !phys_enc->parent->dev) {
DPU_ERROR("invalid encoder/device\n");
return;
}
- if (!phys_enc->hw_intf || !phys_enc->hw_ctl) {
+ if (!phys_enc->hw_intf) {
DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
phys_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
return;
@@ -639,9 +622,6 @@ static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
{
int ret;
- if (!phys_enc)
- return;
-
trace_dpu_enc_phys_vid_irq_ctrl(DRMID(phys_enc->parent),
phys_enc->hw_intf->idx - INTF_0,
enable,
@@ -662,9 +642,6 @@ static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
static int dpu_encoder_phys_vid_get_line_count(
struct dpu_encoder_phys *phys_enc)
{
- if (!phys_enc)
- return -EINVAL;
-
if (!dpu_encoder_phys_vid_is_master(phys_enc))
return -EINVAL;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
index 24ab6249083a..528632690f1e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
@@ -489,12 +489,28 @@ static const struct dpu_format dpu_format_map_ubwc[] = {
true, 4, DPU_FORMAT_FLAG_COMPRESSED,
DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+ /* ARGB8888 and ABGR8888 purposely have the same color
+ * ordering. The hardware only supports ABGR8888 UBWC
+ * natively.
+ */
+ INTERLEAVED_RGB_FMT_TILED(ARGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
INTERLEAVED_RGB_FMT_TILED(XBGR8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
false, 4, DPU_FORMAT_FLAG_COMPRESSED,
DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+ INTERLEAVED_RGB_FMT_TILED(XRGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
INTERLEAVED_RGB_FMT_TILED(ABGR2101010,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
@@ -550,7 +566,9 @@ static int _dpu_format_get_media_color_ubwc(const struct dpu_format *fmt)
{
static const struct dpu_media_color_map dpu_media_ubwc_map[] = {
{DRM_FORMAT_ABGR8888, COLOR_FMT_RGBA8888_UBWC},
+ {DRM_FORMAT_ARGB8888, COLOR_FMT_RGBA8888_UBWC},
{DRM_FORMAT_XBGR8888, COLOR_FMT_RGBA8888_UBWC},
+ {DRM_FORMAT_XRGB8888, COLOR_FMT_RGBA8888_UBWC},
{DRM_FORMAT_ABGR2101010, COLOR_FMT_RGBA1010102_UBWC},
{DRM_FORMAT_XBGR2101010, COLOR_FMT_RGBA1010102_UBWC},
{DRM_FORMAT_BGR565, COLOR_FMT_RGB565_UBWC},
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 04c8c44f5b9c..c567917541e8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -11,11 +11,17 @@
#include "dpu_hw_catalog_format.h"
#include "dpu_kms.h"
-#define VIG_SDM845_MASK \
- (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_SCALER_QSEED3) | BIT(DPU_SSPP_QOS) |\
+#define VIG_MASK \
+ (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\
BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_QOS_8LVL) |\
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT))
+#define VIG_SDM845_MASK \
+ (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3))
+
+#define VIG_SC7180_MASK \
+ (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED4))
+
#define DMA_SDM845_MASK \
(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
@@ -27,6 +33,9 @@
#define MIXER_SDM845_MASK \
(BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER))
+#define MIXER_SC7180_MASK \
+ (BIT(DPU_DIM_LAYER))
+
#define PINGPONG_SDM845_MASK BIT(DPU_PINGPONG_DITHER)
#define PINGPONG_SDM845_SPLIT_MASK \
@@ -58,9 +67,20 @@ static const struct dpu_caps sdm845_dpu_caps = {
.has_src_split = true,
.has_dim_layer = true,
.has_idle_pc = true,
+ .has_3d_merge = true,
+};
+
+static const struct dpu_caps sc7180_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0x9,
+ .qseed_type = DPU_SSPP_SCALER_QSEED4,
+ .smart_dma_rev = DPU_SSPP_SMART_DMA_V2,
+ .ubwc_version = DPU_HW_UBWC_VER_20,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
};
-static struct dpu_mdp_cfg sdm845_mdp[] = {
+static const struct dpu_mdp_cfg sdm845_mdp[] = {
{
.name = "top_0", .id = MDP_TOP,
.base = 0x0, .len = 0x45C,
@@ -85,10 +105,27 @@ static struct dpu_mdp_cfg sdm845_mdp[] = {
},
};
+static const struct dpu_mdp_cfg sc7180_mdp[] = {
+ {
+ .name = "top_0", .id = MDP_TOP,
+ .base = 0x0, .len = 0x494,
+ .features = 0,
+ .highest_bank_bit = 0x3,
+ .clk_ctrls[DPU_CLK_CTRL_VIG0] = {
+ .reg_off = 0x2AC, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_DMA0] = {
+ .reg_off = 0x2AC, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_DMA1] = {
+ .reg_off = 0x2B4, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+ .reg_off = 0x2BC, .bit_off = 8},
+ },
+};
+
/*************************************************************
* CTL sub blocks config
*************************************************************/
-static struct dpu_ctl_cfg sdm845_ctl[] = {
+static const struct dpu_ctl_cfg sdm845_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0xE4,
@@ -116,6 +153,24 @@ static struct dpu_ctl_cfg sdm845_ctl[] = {
},
};
+static const struct dpu_ctl_cfg sc7180_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0xE4,
+ .features = BIT(DPU_CTL_ACTIVE_CFG)
+ },
+ {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0xE4,
+ .features = BIT(DPU_CTL_ACTIVE_CFG)
+ },
+ {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0xE4,
+ .features = BIT(DPU_CTL_ACTIVE_CFG)
+ },
+};
+
/*************************************************************
* SSPP sub blocks config
*************************************************************/
@@ -128,7 +183,7 @@ static const struct dpu_sspp_blks_common sdm845_sspp_common = {
.maxvdeciexp = MAX_VERT_DECIMATION,
};
-#define _VIG_SBLK(num, sdma_pri) \
+#define _VIG_SBLK(num, sdma_pri, qseed_ver) \
{ \
.common = &sdm845_sspp_common, \
.maxdwnscale = MAX_DOWNSCALE_RATIO, \
@@ -137,7 +192,7 @@ static const struct dpu_sspp_blks_common sdm845_sspp_common = {
.src_blk = {.name = STRCAT("sspp_src_", num), \
.id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
.scaler_blk = {.name = STRCAT("sspp_scaler", num), \
- .id = DPU_SSPP_SCALER_QSEED3, \
+ .id = qseed_ver, \
.base = 0xa00, .len = 0xa0,}, \
.csc_blk = {.name = STRCAT("sspp_csc", num), \
.id = DPU_SSPP_CSC_10BIT, \
@@ -162,10 +217,14 @@ static const struct dpu_sspp_blks_common sdm845_sspp_common = {
.virt_num_formats = ARRAY_SIZE(plane_formats), \
}
-static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 = _VIG_SBLK("0", 5);
-static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 = _VIG_SBLK("1", 6);
-static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 = _VIG_SBLK("2", 7);
-static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 = _VIG_SBLK("3", 8);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 =
+ _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED3);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 =
+ _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED3);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 =
+ _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED3);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 =
+ _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED3);
static const struct dpu_sspp_sub_blks sdm845_dma_sblk_0 = _DMA_SBLK("8", 1);
static const struct dpu_sspp_sub_blks sdm845_dma_sblk_1 = _DMA_SBLK("9", 2);
@@ -184,7 +243,7 @@ static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK("11", 4);
.clk_ctrl = _clkctrl \
}
-static struct dpu_sspp_cfg sdm845_sspp[] = {
+static const struct dpu_sspp_cfg sdm845_sspp[] = {
SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SDM845_MASK,
sdm845_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SDM845_MASK,
@@ -203,9 +262,26 @@ static struct dpu_sspp_cfg sdm845_sspp[] = {
sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
};
+static const struct dpu_sspp_sub_blks sc7180_vig_sblk_0 =
+ _VIG_SBLK("0", 4, DPU_SSPP_SCALER_QSEED4);
+
+static const struct dpu_sspp_cfg sc7180_sspp[] = {
+ SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK,
+ sc7180_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
+ SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK,
+ sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
+ SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_SDM845_MASK,
+ sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
+ SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK,
+ sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+};
+
/*************************************************************
* MIXER sub blocks config
*************************************************************/
+
+/* SDM845 */
+
static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
.maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.maxblendstages = 11, /* excluding base layer */
@@ -215,23 +291,46 @@ static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
},
};
-#define LM_BLK(_name, _id, _base, _pp, _lmpair) \
+#define LM_BLK(_name, _id, _base, _fmask, _sblk, _pp, _lmpair) \
{ \
.name = _name, .id = _id, \
.base = _base, .len = 0x320, \
- .features = MIXER_SDM845_MASK, \
- .sblk = &sdm845_lm_sblk, \
+ .features = _fmask, \
+ .sblk = _sblk, \
.pingpong = _pp, \
.lm_pair_mask = (1 << _lmpair) \
}
-static struct dpu_lm_cfg sdm845_lm[] = {
- LM_BLK("lm_0", LM_0, 0x44000, PINGPONG_0, LM_1),
- LM_BLK("lm_1", LM_1, 0x45000, PINGPONG_1, LM_0),
- LM_BLK("lm_2", LM_2, 0x46000, PINGPONG_2, LM_5),
- LM_BLK("lm_3", LM_3, 0x0, PINGPONG_MAX, 0),
- LM_BLK("lm_4", LM_4, 0x0, PINGPONG_MAX, 0),
- LM_BLK("lm_5", LM_5, 0x49000, PINGPONG_3, LM_2),
+static const struct dpu_lm_cfg sdm845_lm[] = {
+ LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_0, LM_1),
+ LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_1, LM_0),
+ LM_BLK("lm_2", LM_2, 0x46000, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_2, LM_5),
+ LM_BLK("lm_3", LM_3, 0x0, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_MAX, 0),
+ LM_BLK("lm_4", LM_4, 0x0, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_MAX, 0),
+ LM_BLK("lm_5", LM_5, 0x49000, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_3, LM_2),
+};
+
+/* SC7180 */
+
+static const struct dpu_lm_sub_blks sc7180_lm_sblk = {
+ .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .maxblendstages = 7, /* excluding base layer */
+ .blendstage_base = { /* offsets relative to mixer base */
+ 0x20, 0x38, 0x50, 0x68, 0x80, 0x98, 0xb0
+ },
+};
+
+static const struct dpu_lm_cfg sc7180_lm[] = {
+ LM_BLK("lm_0", LM_0, 0x44000, MIXER_SC7180_MASK,
+ &sc7180_lm_sblk, PINGPONG_0, LM_1),
+ LM_BLK("lm_1", LM_1, 0x45000, MIXER_SC7180_MASK,
+ &sc7180_lm_sblk, PINGPONG_1, LM_0),
};
/*************************************************************
@@ -264,13 +363,18 @@ static const struct dpu_pingpong_sub_blks sdm845_pp_sblk = {
.sblk = &sdm845_pp_sblk \
}
-static struct dpu_pingpong_cfg sdm845_pp[] = {
+static const struct dpu_pingpong_cfg sdm845_pp[] = {
PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000),
PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800),
PP_BLK("pingpong_2", PINGPONG_2, 0x71000),
PP_BLK("pingpong_3", PINGPONG_3, 0x71800),
};
+static struct dpu_pingpong_cfg sc7180_pp[] = {
+ PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000),
+ PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800),
+};
+
/*************************************************************
* INTF sub blocks config
*************************************************************/
@@ -278,26 +382,32 @@ static struct dpu_pingpong_cfg sdm845_pp[] = {
{\
.name = _name, .id = _id, \
.base = _base, .len = 0x280, \
+ .features = BIT(DPU_CTL_ACTIVE_CFG), \
.type = _type, \
.controller_id = _ctrl_id, \
.prog_fetch_lines_worst_case = 24 \
}
-static struct dpu_intf_cfg sdm845_intf[] = {
+static const struct dpu_intf_cfg sdm845_intf[] = {
INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0),
INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0),
INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1),
INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1),
};
+static const struct dpu_intf_cfg sc7180_intf[] = {
+ INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0),
+ INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0),
+};
+
/*************************************************************
* VBIF sub blocks config
*************************************************************/
/* VBIF QOS remap */
-static u32 sdm845_rt_pri_lvl[] = {3, 3, 4, 4, 5, 5, 6, 6};
-static u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3};
+static const u32 sdm845_rt_pri_lvl[] = {3, 3, 4, 4, 5, 5, 6, 6};
+static const u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3};
-static struct dpu_vbif_cfg sdm845_vbif[] = {
+static const struct dpu_vbif_cfg sdm845_vbif[] = {
{
.name = "vbif_0", .id = VBIF_0,
.base = 0, .len = 0x1040,
@@ -316,7 +426,7 @@ static struct dpu_vbif_cfg sdm845_vbif[] = {
},
};
-static struct dpu_reg_dma_cfg sdm845_regdma = {
+static const struct dpu_reg_dma_cfg sdm845_regdma = {
.base = 0x0, .version = 0x1, .trigger_sel_off = 0x119c
};
@@ -325,7 +435,7 @@ static struct dpu_reg_dma_cfg sdm845_regdma = {
*************************************************************/
/* SSPP QOS LUTs */
-static struct dpu_qos_lut_entry sdm845_qos_linear[] = {
+static const struct dpu_qos_lut_entry sdm845_qos_linear[] = {
{.fl = 4, .lut = 0x357},
{.fl = 5, .lut = 0x3357},
{.fl = 6, .lut = 0x23357},
@@ -340,7 +450,11 @@ static struct dpu_qos_lut_entry sdm845_qos_linear[] = {
{.fl = 0, .lut = 0x11222222223357}
};
-static struct dpu_qos_lut_entry sdm845_qos_macrotile[] = {
+static const struct dpu_qos_lut_entry sc7180_qos_linear[] = {
+ {.fl = 0, .lut = 0x0011222222335777},
+};
+
+static const struct dpu_qos_lut_entry sdm845_qos_macrotile[] = {
{.fl = 10, .lut = 0x344556677},
{.fl = 11, .lut = 0x3344556677},
{.fl = 12, .lut = 0x23344556677},
@@ -349,11 +463,19 @@ static struct dpu_qos_lut_entry sdm845_qos_macrotile[] = {
{.fl = 0, .lut = 0x112233344556677},
};
-static struct dpu_qos_lut_entry sdm845_qos_nrt[] = {
+static const struct dpu_qos_lut_entry sc7180_qos_macrotile[] = {
+ {.fl = 0, .lut = 0x0011223344556677},
+};
+
+static const struct dpu_qos_lut_entry sdm845_qos_nrt[] = {
+ {.fl = 0, .lut = 0x0},
+};
+
+static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = {
{.fl = 0, .lut = 0x0},
};
-static struct dpu_perf_cfg sdm845_perf_data = {
+static const struct dpu_perf_cfg sdm845_perf_data = {
.max_bw_low = 6800000,
.max_bw_high = 6800000,
.min_core_ib = 2400000,
@@ -392,6 +514,30 @@ static struct dpu_perf_cfg sdm845_perf_data = {
},
};
+static const struct dpu_perf_cfg sc7180_perf_data = {
+ .max_bw_low = 3900000,
+ .max_bw_high = 5500000,
+ .min_core_ib = 2400000,
+ .min_llcc_ib = 800000,
+ .min_dram_ib = 800000,
+ .danger_lut_tbl = {0xff, 0xffff, 0x0},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc7180_qos_linear),
+ .entries = sc7180_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+};
+
/*************************************************************
* Hardware catalog init
*************************************************************/
@@ -421,12 +567,43 @@ static void sdm845_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
.reg_dma_count = 1,
.dma_cfg = sdm845_regdma,
.perf = sdm845_perf_data,
+ .mdss_irqs = 0x3ff,
+ };
+}
+
+/*
+ * sc7180_cfg_init(): populate sc7180 dpu sub-blocks reg offsets
+ * and instance counts.
+ */
+static void sc7180_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
+{
+ *dpu_cfg = (struct dpu_mdss_cfg){
+ .caps = &sc7180_dpu_caps,
+ .mdp_count = ARRAY_SIZE(sc7180_mdp),
+ .mdp = sc7180_mdp,
+ .ctl_count = ARRAY_SIZE(sc7180_ctl),
+ .ctl = sc7180_ctl,
+ .sspp_count = ARRAY_SIZE(sc7180_sspp),
+ .sspp = sc7180_sspp,
+ .mixer_count = ARRAY_SIZE(sc7180_lm),
+ .mixer = sc7180_lm,
+ .pingpong_count = ARRAY_SIZE(sc7180_pp),
+ .pingpong = sc7180_pp,
+ .intf_count = ARRAY_SIZE(sc7180_intf),
+ .intf = sc7180_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .reg_dma_count = 1,
+ .dma_cfg = sdm845_regdma,
+ .perf = sc7180_perf_data,
+ .mdss_irqs = 0x3f,
};
}
-static struct dpu_mdss_hw_cfg_handler cfg_handler[] = {
+static const struct dpu_mdss_hw_cfg_handler cfg_handler[] = {
{ .hw_rev = DPU_HW_VER_400, .cfg_init = sdm845_cfg_init},
{ .hw_rev = DPU_HW_VER_401, .cfg_init = sdm845_cfg_init},
+ { .hw_rev = DPU_HW_VER_620, .cfg_init = sc7180_cfg_init},
};
void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index ec76b8687a98..09df7d87dd43 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -38,6 +38,7 @@
#define DPU_HW_VER_401 DPU_HW_VER(4, 0, 1) /* sdm845 v2.0 */
#define DPU_HW_VER_410 DPU_HW_VER(4, 1, 0) /* sdm670 v1.0 */
#define DPU_HW_VER_500 DPU_HW_VER(5, 0, 0) /* sdm855 v1.0 */
+#define DPU_HW_VER_620 DPU_HW_VER(6, 2, 0) /* sc7180 v1.0 */
#define IS_MSM8996_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_170)
@@ -45,6 +46,7 @@
#define IS_SDM845_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_400)
#define IS_SDM670_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_410)
#define IS_SDM855_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_500)
+#define IS_SC7180_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_620)
#define DPU_HW_BLK_NAME_LEN 16
@@ -92,6 +94,7 @@ enum {
* @DPU_SSPP_SRC Src and fetch part of the pipes,
* @DPU_SSPP_SCALER_QSEED2, QSEED2 algorithm support
* @DPU_SSPP_SCALER_QSEED3, QSEED3 alogorithm support
+ * @DPU_SSPP_SCALER_QSEED4, QSEED4 algorithm support
* @DPU_SSPP_SCALER_RGB, RGB Scaler, supported by RGB pipes
* @DPU_SSPP_CSC, Support of Color space converion
* @DPU_SSPP_CSC_10BIT, Support of 10-bit Color space conversion
@@ -110,6 +113,7 @@ enum {
DPU_SSPP_SRC = 0x1,
DPU_SSPP_SCALER_QSEED2,
DPU_SSPP_SCALER_QSEED3,
+ DPU_SSPP_SCALER_QSEED4,
DPU_SSPP_SCALER_RGB,
DPU_SSPP_CSC,
DPU_SSPP_CSC_10BIT,
@@ -166,6 +170,7 @@ enum {
*/
enum {
DPU_CTL_SPLIT_DISPLAY = 0x1,
+ DPU_CTL_ACTIVE_CFG,
DPU_CTL_MAX
};
@@ -269,7 +274,7 @@ struct dpu_qos_lut_entry {
*/
struct dpu_qos_lut_tbl {
u32 nentry;
- struct dpu_qos_lut_entry *entries;
+ const struct dpu_qos_lut_entry *entries;
};
/**
@@ -283,6 +288,7 @@ struct dpu_qos_lut_tbl {
* @has_src_split source split feature status
* @has_dim_layer dim layer feature status
* @has_idle_pc indicate if idle power collapse feature is supported
+ * @has_3d_merge indicate if 3D merge is supported
*/
struct dpu_caps {
u32 max_mixer_width;
@@ -293,6 +299,7 @@ struct dpu_caps {
bool has_src_split;
bool has_dim_layer;
bool has_idle_pc;
+ bool has_3d_merge;
};
/**
@@ -320,6 +327,7 @@ struct dpu_sspp_blks_common {
* @maxupscale: maxupscale ratio supported
* @smart_dma_priority: hw priority of rect1 of multirect pipe
* @max_per_pipe_bw: maximum allowable bandwidth of this pipe in kBps
+ * @qseed_ver: qseed version
* @src_blk:
* @scaler_blk:
* @csc_blk:
@@ -340,6 +348,7 @@ struct dpu_sspp_sub_blks {
u32 maxupscale;
u32 smart_dma_priority;
u32 max_per_pipe_bw;
+ u32 qseed_ver;
struct dpu_src_blk src_blk;
struct dpu_scaler_blk scaler_blk;
struct dpu_pp_blk csc_blk;
@@ -511,7 +520,7 @@ struct dpu_vbif_dynamic_ot_cfg {
*/
struct dpu_vbif_dynamic_ot_tbl {
u32 count;
- struct dpu_vbif_dynamic_ot_cfg *cfg;
+ const struct dpu_vbif_dynamic_ot_cfg *cfg;
};
/**
@@ -521,7 +530,7 @@ struct dpu_vbif_dynamic_ot_tbl {
*/
struct dpu_vbif_qos_tbl {
u32 npriority_lvl;
- u32 *priority_lvl;
+ const u32 *priority_lvl;
};
/**
@@ -646,6 +655,7 @@ struct dpu_perf_cfg {
* @dma_formats Supported formats for dma pipe
* @cursor_formats Supported formats for cursor pipe
* @vig_formats Supported formats for vig pipe
+ * @mdss_irqs: Bitmap with the irqs supported by the target
*/
struct dpu_mdss_cfg {
u32 hwversion;
@@ -653,25 +663,25 @@ struct dpu_mdss_cfg {
const struct dpu_caps *caps;
u32 mdp_count;
- struct dpu_mdp_cfg *mdp;
+ const struct dpu_mdp_cfg *mdp;
u32 ctl_count;
- struct dpu_ctl_cfg *ctl;
+ const struct dpu_ctl_cfg *ctl;
u32 sspp_count;
- struct dpu_sspp_cfg *sspp;
+ const struct dpu_sspp_cfg *sspp;
u32 mixer_count;
- struct dpu_lm_cfg *mixer;
+ const struct dpu_lm_cfg *mixer;
u32 pingpong_count;
- struct dpu_pingpong_cfg *pingpong;
+ const struct dpu_pingpong_cfg *pingpong;
u32 intf_count;
- struct dpu_intf_cfg *intf;
+ const struct dpu_intf_cfg *intf;
u32 vbif_count;
- struct dpu_vbif_cfg *vbif;
+ const struct dpu_vbif_cfg *vbif;
u32 reg_dma_count;
struct dpu_reg_dma_cfg dma_cfg;
@@ -681,9 +691,11 @@ struct dpu_mdss_cfg {
/* Add additional block data structures here */
struct dpu_perf_cfg perf;
- struct dpu_format_extended *dma_formats;
- struct dpu_format_extended *cursor_formats;
- struct dpu_format_extended *vig_formats;
+ const struct dpu_format_extended *dma_formats;
+ const struct dpu_format_extended *cursor_formats;
+ const struct dpu_format_extended *vig_formats;
+
+ unsigned long mdss_irqs;
};
struct dpu_mdss_hw_cfg_handler {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
index bb6112c949ae..3766f0fd0bf0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
@@ -6,8 +6,12 @@
static const uint32_t qcom_compressed_supported_formats[] = {
DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB8888,
DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB8888,
DRM_FORMAT_BGR565,
+
+ DRM_FORMAT_NV12,
};
static const uint32_t plane_formats[] = {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index 179e8d52cadb..831e5f7a9b7f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -22,14 +22,18 @@
#define CTL_PREPARE 0x0d0
#define CTL_SW_RESET 0x030
#define CTL_LAYER_EXTN_OFFSET 0x40
+#define CTL_INTF_ACTIVE 0x0F4
+#define CTL_INTF_FLUSH 0x110
+#define CTL_INTF_MASTER 0x134
#define CTL_MIXER_BORDER_OUT BIT(24)
#define CTL_FLUSH_MASK_CTL BIT(17)
#define DPU_REG_RESET_TIMEOUT_US 2000
+#define INTF_IDX 31
-static struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
- struct dpu_mdss_cfg *m,
+static const struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
+ const struct dpu_mdss_cfg *m,
void __iomem *addr,
struct dpu_hw_blk_reg_map *b)
{
@@ -100,11 +104,27 @@ static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
ctx->pending_flush_mask |= flushbits;
}
+static inline void dpu_hw_ctl_update_pending_intf_flush(struct dpu_hw_ctl *ctx,
+ u32 flushbits)
+{
+ ctx->pending_intf_flush_mask |= flushbits;
+}
+
static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
{
return ctx->pending_flush_mask;
}
+static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
+{
+
+ if (ctx->pending_flush_mask & BIT(INTF_IDX))
+ DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH,
+ ctx->pending_intf_flush_mask);
+
+ DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
+}
+
static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
{
trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
@@ -222,6 +242,36 @@ static int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
return 0;
}
+static int dpu_hw_ctl_get_bitmask_intf_v1(struct dpu_hw_ctl *ctx,
+ u32 *flushbits, enum dpu_intf intf)
+{
+ switch (intf) {
+ case INTF_0:
+ case INTF_1:
+ *flushbits |= BIT(31);
+ break;
+ default:
+ return 0;
+ }
+ return 0;
+}
+
+static int dpu_hw_ctl_active_get_bitmask_intf(struct dpu_hw_ctl *ctx,
+ u32 *flushbits, enum dpu_intf intf)
+{
+ switch (intf) {
+ case INTF_0:
+ *flushbits |= BIT(0);
+ break;
+ case INTF_1:
+ *flushbits |= BIT(1);
+ break;
+ default:
+ return 0;
+ }
+ return 0;
+}
+
static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
@@ -422,6 +472,24 @@ exit:
DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
}
+
+static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
+ struct dpu_hw_intf_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 intf_active = 0;
+ u32 mode_sel = 0;
+
+ if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
+ mode_sel |= BIT(17);
+
+ intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
+ intf_active |= BIT(cfg->intf - INTF_0);
+
+ DPU_REG_WRITE(c, CTL_TOP, mode_sel);
+ DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
+}
+
static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
struct dpu_hw_intf_cfg *cfg)
{
@@ -455,31 +523,41 @@ static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
unsigned long cap)
{
+ if (cap & BIT(DPU_CTL_ACTIVE_CFG)) {
+ ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1;
+ ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
+ ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf_v1;
+ ops->get_bitmask_active_intf =
+ dpu_hw_ctl_active_get_bitmask_intf;
+ ops->update_pending_intf_flush =
+ dpu_hw_ctl_update_pending_intf_flush;
+ } else {
+ ops->trigger_flush = dpu_hw_ctl_trigger_flush;
+ ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
+ ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
+ }
ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
- ops->trigger_flush = dpu_hw_ctl_trigger_flush;
ops->get_flush_register = dpu_hw_ctl_get_flush_register;
ops->trigger_start = dpu_hw_ctl_trigger_start;
ops->trigger_pending = dpu_hw_ctl_trigger_pending;
- ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
ops->reset = dpu_hw_ctl_reset_control;
ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
- ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
};
static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
void __iomem *addr,
- struct dpu_mdss_cfg *m)
+ const struct dpu_mdss_cfg *m)
{
struct dpu_hw_ctl *c;
- struct dpu_ctl_cfg *cfg;
+ const struct dpu_ctl_cfg *cfg;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index d3ae939ef9f8..09e1263c72e2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -91,6 +91,15 @@ struct dpu_hw_ctl_ops {
u32 flushbits);
/**
+ * OR in the given flushbits to the cached pending_intf_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @flushbits : module flushmask
+ */
+ void (*update_pending_intf_flush)(struct dpu_hw_ctl *ctx,
+ u32 flushbits);
+
+ /**
* Write the value of the pending_flush_mask to hardware
* @ctx : ctl path ctx pointer
*/
@@ -130,11 +139,24 @@ struct dpu_hw_ctl_ops {
uint32_t (*get_bitmask_mixer)(struct dpu_hw_ctl *ctx,
enum dpu_lm blk);
+ /**
+ * Query the value of the intf flush mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ */
int (*get_bitmask_intf)(struct dpu_hw_ctl *ctx,
u32 *flushbits,
enum dpu_intf blk);
/**
+ * Query the value of the intf active flush mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ */
+ int (*get_bitmask_active_intf)(struct dpu_hw_ctl *ctx,
+ u32 *flushbits, enum dpu_intf blk);
+
+ /**
* Set all blend stages to disabled
* @ctx : ctl path ctx pointer
*/
@@ -159,6 +181,7 @@ struct dpu_hw_ctl_ops {
* @mixer_count: number of mixers
* @mixer_hw_caps: mixer hardware capabilities
* @pending_flush_mask: storage for pending ctl_flush managed via ops
+ * @pending_intf_flush_mask: pending INTF flush
* @ops: operation list
*/
struct dpu_hw_ctl {
@@ -171,6 +194,7 @@ struct dpu_hw_ctl {
int mixer_count;
const struct dpu_lm_cfg *mixer_hw_caps;
u32 pending_flush_mask;
+ u32 pending_intf_flush_mask;
/* ops */
struct dpu_hw_ctl_ops ops;
@@ -195,7 +219,7 @@ static inline struct dpu_hw_ctl *to_dpu_hw_ctl(struct dpu_hw_blk *hw)
*/
struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
void __iomem *addr,
- struct dpu_mdss_cfg *m);
+ const struct dpu_mdss_cfg *m);
/**
* dpu_hw_ctl_destroy(): Destroys ctl driver context
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
index 8bfa7d0eede6..d84a84f7fe1a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -800,8 +800,8 @@ static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
start_idx = reg_idx * 32;
end_idx = start_idx + 32;
- if (start_idx >= ARRAY_SIZE(dpu_irq_map) ||
- end_idx > ARRAY_SIZE(dpu_irq_map))
+ if (!test_bit(reg_idx, &intr->irq_mask) ||
+ start_idx >= ARRAY_SIZE(dpu_irq_map))
continue;
/*
@@ -955,8 +955,11 @@ static int dpu_hw_intr_clear_irqs(struct dpu_hw_intr *intr)
if (!intr)
return -EINVAL;
- for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++)
- DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].clr_off, 0xffffffff);
+ for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
+ if (test_bit(i, &intr->irq_mask))
+ DPU_REG_WRITE(&intr->hw,
+ dpu_intr_set[i].clr_off, 0xffffffff);
+ }
/* ensure register writes go through */
wmb();
@@ -971,8 +974,11 @@ static int dpu_hw_intr_disable_irqs(struct dpu_hw_intr *intr)
if (!intr)
return -EINVAL;
- for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++)
- DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].en_off, 0x00000000);
+ for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
+ if (test_bit(i, &intr->irq_mask))
+ DPU_REG_WRITE(&intr->hw,
+ dpu_intr_set[i].en_off, 0x00000000);
+ }
/* ensure register writes go through */
wmb();
@@ -991,6 +997,9 @@ static void dpu_hw_intr_get_interrupt_statuses(struct dpu_hw_intr *intr)
spin_lock_irqsave(&intr->irq_lock, irq_flags);
for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
+ if (!test_bit(i, &intr->irq_mask))
+ continue;
+
/* Read interrupt status */
intr->save_irq_status[i] = DPU_REG_READ(&intr->hw,
dpu_intr_set[i].status_off);
@@ -1115,6 +1124,7 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
return ERR_PTR(-ENOMEM);
}
+ intr->irq_mask = m->mdss_irqs;
spin_lock_init(&intr->irq_lock);
return intr;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
index 4edcf402dc46..fc9c98617281 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
@@ -187,6 +187,7 @@ struct dpu_hw_intr {
u32 *save_irq_status;
u32 irq_idx_tbl_size;
spinlock_t irq_lock;
+ unsigned long irq_mask;
};
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
index dcd87cda13fe..efe9a5719c6b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -56,8 +56,10 @@
#define INTF_FRAME_COUNT 0x0AC
#define INTF_LINE_COUNT 0x0B0
-static struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf,
- struct dpu_mdss_cfg *m,
+#define INTF_MUX 0x25C
+
+static const struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf,
+ const struct dpu_mdss_cfg *m,
void __iomem *addr,
struct dpu_hw_blk_reg_map *b)
{
@@ -218,6 +220,30 @@ static void dpu_hw_intf_setup_prg_fetch(
DPU_REG_WRITE(c, INTF_CONFIG, fetch_enable);
}
+static void dpu_hw_intf_bind_pingpong_blk(
+ struct dpu_hw_intf *intf,
+ bool enable,
+ const enum dpu_pingpong pp)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 mux_cfg;
+
+ if (!intf)
+ return;
+
+ c = &intf->hw;
+
+ mux_cfg = DPU_REG_READ(c, INTF_MUX);
+ mux_cfg &= ~0xf;
+
+ if (enable)
+ mux_cfg |= (pp - PINGPONG_0) & 0x7;
+ else
+ mux_cfg |= 0xf;
+
+ DPU_REG_WRITE(c, INTF_MUX, mux_cfg);
+}
+
static void dpu_hw_intf_get_status(
struct dpu_hw_intf *intf,
struct intf_status *s)
@@ -254,16 +280,18 @@ static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
ops->get_status = dpu_hw_intf_get_status;
ops->enable_timing = dpu_hw_intf_enable_timing_engine;
ops->get_line_count = dpu_hw_intf_get_line_count;
+ if (cap & BIT(DPU_CTL_ACTIVE_CFG))
+ ops->bind_pingpong_blk = dpu_hw_intf_bind_pingpong_blk;
}
static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
void __iomem *addr,
- struct dpu_mdss_cfg *m)
+ const struct dpu_mdss_cfg *m)
{
struct dpu_hw_intf *c;
- struct dpu_intf_cfg *cfg;
+ const struct dpu_intf_cfg *cfg;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
index b03acc225c9b..85468981632d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -52,6 +52,8 @@ struct intf_status {
* @ enable_timing: enable/disable timing engine
* @ get_status: returns if timing engine is enabled or not
* @ get_line_count: reads current vertical line counter
+ * @bind_pingpong_blk: enable/disable the connection with pingpong which will
+ * feed pixels to this interface
*/
struct dpu_hw_intf_ops {
void (*setup_timing_gen)(struct dpu_hw_intf *intf,
@@ -68,6 +70,10 @@ struct dpu_hw_intf_ops {
struct intf_status *status);
u32 (*get_line_count)(struct dpu_hw_intf *intf);
+
+ void (*bind_pingpong_blk)(struct dpu_hw_intf *intf,
+ bool enable,
+ const enum dpu_pingpong pp);
};
struct dpu_hw_intf {
@@ -92,7 +98,7 @@ struct dpu_hw_intf {
*/
struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
void __iomem *addr,
- struct dpu_mdss_cfg *m);
+ const struct dpu_mdss_cfg *m);
/**
* dpu_hw_intf_destroy(): Destroys INTF driver context
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
index 5bc39baa746a..37becd43bd54 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -24,8 +24,8 @@
#define LM_BLEND0_FG_ALPHA 0x04
#define LM_BLEND0_BG_ALPHA 0x08
-static struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
- struct dpu_mdss_cfg *m,
+static const struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
+ const struct dpu_mdss_cfg *m,
void __iomem *addr,
struct dpu_hw_blk_reg_map *b)
{
@@ -147,12 +147,13 @@ static void dpu_hw_lm_setup_color3(struct dpu_hw_mixer *ctx,
DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
}
-static void _setup_mixer_ops(struct dpu_mdss_cfg *m,
+static void _setup_mixer_ops(const struct dpu_mdss_cfg *m,
struct dpu_hw_lm_ops *ops,
unsigned long features)
{
ops->setup_mixer_out = dpu_hw_lm_setup_out;
- if (IS_SDM845_TARGET(m->hwversion) || IS_SDM670_TARGET(m->hwversion))
+ if (IS_SDM845_TARGET(m->hwversion) || IS_SDM670_TARGET(m->hwversion)
+ || IS_SC7180_TARGET(m->hwversion))
ops->setup_blend_config = dpu_hw_lm_setup_blend_config_sdm845;
else
ops->setup_blend_config = dpu_hw_lm_setup_blend_config;
@@ -164,10 +165,10 @@ static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
void __iomem *addr,
- struct dpu_mdss_cfg *m)
+ const struct dpu_mdss_cfg *m)
{
struct dpu_hw_mixer *c;
- struct dpu_lm_cfg *cfg;
+ const struct dpu_lm_cfg *cfg;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
index 147ace31cfc2..4a6b2de19ef6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
@@ -91,7 +91,7 @@ static inline struct dpu_hw_mixer *to_dpu_hw_mixer(struct dpu_hw_blk *hw)
*/
struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
void __iomem *addr,
- struct dpu_mdss_cfg *m);
+ const struct dpu_mdss_cfg *m);
/**
* dpu_hw_lm_destroy(): Destroys layer mixer driver context
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
index 5dbaba9fd180..d110a40f0e73 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
@@ -28,8 +28,8 @@
#define PP_FBC_BUDGET_CTL 0x038
#define PP_FBC_LOSSY_MODE 0x03C
-static struct dpu_pingpong_cfg *_pingpong_offset(enum dpu_pingpong pp,
- struct dpu_mdss_cfg *m,
+static const struct dpu_pingpong_cfg *_pingpong_offset(enum dpu_pingpong pp,
+ const struct dpu_mdss_cfg *m,
void __iomem *addr,
struct dpu_hw_blk_reg_map *b)
{
@@ -195,10 +195,10 @@ static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
void __iomem *addr,
- struct dpu_mdss_cfg *m)
+ const struct dpu_mdss_cfg *m)
{
struct dpu_hw_pingpong *c;
- struct dpu_pingpong_cfg *cfg;
+ const struct dpu_pingpong_cfg *cfg;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
index 58bdb9279aa8..3d6f46b1db30 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -106,7 +106,7 @@ struct dpu_hw_pingpong {
*/
struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
void __iomem *addr,
- struct dpu_mdss_cfg *m);
+ const struct dpu_mdss_cfg *m);
/**
* dpu_hw_pingpong_destroy - destroys pingpong driver context
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index 4f8b813aab81..82c5dbfdabc7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -132,6 +132,7 @@
/* traffic shaper clock in Hz */
#define TS_CLK 19200000
+
static int _sspp_subblk_offset(struct dpu_hw_pipe *ctx,
int s_id,
u32 *idx)
@@ -657,7 +658,8 @@ static void _setup_layer_ops(struct dpu_hw_pipe *c,
test_bit(DPU_SSPP_SMART_DMA_V2, &c->cap->features))
c->ops.setup_multirect = dpu_hw_sspp_setup_multirect;
- if (test_bit(DPU_SSPP_SCALER_QSEED3, &features)) {
+ if (test_bit(DPU_SSPP_SCALER_QSEED3, &features) ||
+ test_bit(DPU_SSPP_SCALER_QSEED4, &features)) {
c->ops.setup_scaler = _dpu_hw_sspp_setup_scaler3;
c->ops.get_scaler_ver = _dpu_hw_sspp_get_scaler3_ver;
}
@@ -666,7 +668,7 @@ static void _setup_layer_ops(struct dpu_hw_pipe *c,
c->ops.setup_cdp = dpu_hw_sspp_setup_cdp;
}
-static struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
+static const struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
void __iomem *addr,
struct dpu_mdss_cfg *catalog,
struct dpu_hw_blk_reg_map *b)
@@ -696,7 +698,7 @@ struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
bool is_virtual_pipe)
{
struct dpu_hw_pipe *hw_pipe;
- struct dpu_sspp_cfg *cfg;
+ const struct dpu_sspp_cfg *cfg;
if (!addr || !catalog)
return ERR_PTR(-EINVAL);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
index a3680b482b41..85b018a9b03c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -27,7 +27,8 @@ struct dpu_hw_pipe;
*/
#define DPU_SSPP_SCALER ((1UL << DPU_SSPP_SCALER_RGB) | \
(1UL << DPU_SSPP_SCALER_QSEED2) | \
- (1UL << DPU_SSPP_SCALER_QSEED3))
+ (1UL << DPU_SSPP_SCALER_QSEED3) | \
+ (1UL << DPU_SSPP_SCALER_QSEED4))
/**
* Component indices
@@ -373,7 +374,7 @@ struct dpu_hw_pipe {
struct dpu_hw_blk base;
struct dpu_hw_blk_reg_map hw;
struct dpu_mdss_cfg *catalog;
- struct dpu_mdp_cfg *mdp;
+ const struct dpu_mdp_cfg *mdp;
/* Pipe */
enum dpu_sspp idx;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
index 27fbeb504362..078afc5f5882 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
@@ -93,19 +93,12 @@ int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable)
DEV_DBG("%pS->%s: enable '%s'\n",
__builtin_return_address(0), __func__,
clk_arry[i].clk_name);
- if (clk_arry[i].clk) {
- rc = clk_prepare_enable(clk_arry[i].clk);
- if (rc)
- DEV_ERR("%pS->%s: %s en fail. rc=%d\n",
- __builtin_return_address(0),
- __func__,
- clk_arry[i].clk_name, rc);
- } else {
- DEV_ERR("%pS->%s: '%s' is not available\n",
- __builtin_return_address(0), __func__,
- clk_arry[i].clk_name);
- rc = -EPERM;
- }
+ rc = clk_prepare_enable(clk_arry[i].clk);
+ if (rc)
+ DEV_ERR("%pS->%s: %s en fail. rc=%d\n",
+ __builtin_return_address(0),
+ __func__,
+ clk_arry[i].clk_name, rc);
if (rc && i) {
msm_dss_enable_clk(&clk_arry[i - 1],
@@ -119,12 +112,7 @@ int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable)
__builtin_return_address(0), __func__,
clk_arry[i].clk_name);
- if (clk_arry[i].clk)
- clk_disable_unprepare(clk_arry[i].clk);
- else
- DEV_ERR("%pS->%s: '%s' is not available\n",
- __builtin_return_address(0), __func__,
- clk_arry[i].clk_name);
+ clk_disable_unprepare(clk_arry[i].clk);
}
}
@@ -187,6 +175,7 @@ int msm_dss_parse_clock(struct platform_device *pdev,
continue;
mp->clk_config[i].rate = rate;
mp->clk_config[i].type = DSS_CLK_PCLK;
+ mp->clk_config[i].max_rate = rate;
}
mp->num_clk = num_clk;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 6c92f0fbeac9..cb08fafb1dc1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -1059,6 +1059,7 @@ static const struct dev_pm_ops dpu_pm_ops = {
static const struct of_device_id dpu_dt_match[] = {
{ .compatible = "qcom,sdm845-dpu", },
+ { .compatible = "qcom,sc7180-dpu", },
{}
};
MODULE_DEVICE_TABLE(of, dpu_dt_match);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index 58d5acbcfc5c..3b9c33e694bf 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -53,8 +53,13 @@ enum {
R_MAX
};
+/*
+ * Default Preload Values
+ */
#define DPU_QSEED3_DEFAULT_PRELOAD_H 0x4
#define DPU_QSEED3_DEFAULT_PRELOAD_V 0x3
+#define DPU_QSEED4_DEFAULT_PRELOAD_V 0x2
+#define DPU_QSEED4_DEFAULT_PRELOAD_H 0x4
#define DEFAULT_REFRESH_RATE 60
@@ -477,8 +482,16 @@ static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
scale_cfg->src_width[i] /= chroma_subsmpl_h;
scale_cfg->src_height[i] /= chroma_subsmpl_v;
}
- scale_cfg->preload_x[i] = DPU_QSEED3_DEFAULT_PRELOAD_H;
- scale_cfg->preload_y[i] = DPU_QSEED3_DEFAULT_PRELOAD_V;
+
+ if (pdpu->pipe_hw->cap->features &
+ BIT(DPU_SSPP_SCALER_QSEED4)) {
+ scale_cfg->preload_x[i] = DPU_QSEED4_DEFAULT_PRELOAD_H;
+ scale_cfg->preload_y[i] = DPU_QSEED4_DEFAULT_PRELOAD_V;
+ } else {
+ scale_cfg->preload_x[i] = DPU_QSEED3_DEFAULT_PRELOAD_H;
+ scale_cfg->preload_y[i] = DPU_QSEED3_DEFAULT_PRELOAD_V;
+ }
+
pstate->pixel_ext.num_ext_pxls_top[i] =
scale_cfg->src_height[i];
pstate->pixel_ext.num_ext_pxls_left[i] =
@@ -738,7 +751,7 @@ done:
} else {
pstate[R0]->multirect_index = DPU_SSPP_RECT_0;
pstate[R1]->multirect_index = DPU_SSPP_RECT_1;
- };
+ }
DPU_DEBUG_PLANE(dpu_plane[R0], "R0: %d - %d\n",
pstate[R0]->multirect_mode, pstate[R0]->multirect_index);
@@ -858,7 +871,7 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
pdpu->pipe_sblk->maxupscale << 16,
true, true);
if (ret) {
- DPU_ERROR_PLANE(pdpu, "Check plane state failed (%d)\n", ret);
+ DPU_DEBUG_PLANE(pdpu, "Check plane state failed (%d)\n", ret);
return ret;
}
if (!state->visible)
@@ -884,13 +897,13 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
(!(pdpu->features & DPU_SSPP_SCALER) ||
!(pdpu->features & (BIT(DPU_SSPP_CSC)
| BIT(DPU_SSPP_CSC_10BIT))))) {
- DPU_ERROR_PLANE(pdpu,
+ DPU_DEBUG_PLANE(pdpu,
"plane doesn't have scaler/csc for yuv\n");
return -EINVAL;
/* check src bounds */
} else if (!dpu_plane_validate_src(&src, &fb_rect, min_src_size)) {
- DPU_ERROR_PLANE(pdpu, "invalid source " DRM_RECT_FMT "\n",
+ DPU_DEBUG_PLANE(pdpu, "invalid source " DRM_RECT_FMT "\n",
DRM_RECT_ARG(&src));
return -E2BIG;
@@ -899,19 +912,19 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
(src.x1 & 0x1 || src.y1 & 0x1 ||
drm_rect_width(&src) & 0x1 ||
drm_rect_height(&src) & 0x1)) {
- DPU_ERROR_PLANE(pdpu, "invalid yuv source " DRM_RECT_FMT "\n",
+ DPU_DEBUG_PLANE(pdpu, "invalid yuv source " DRM_RECT_FMT "\n",
DRM_RECT_ARG(&src));
return -EINVAL;
/* min dst support */
} else if (drm_rect_width(&dst) < 0x1 || drm_rect_height(&dst) < 0x1) {
- DPU_ERROR_PLANE(pdpu, "invalid dest rect " DRM_RECT_FMT "\n",
+ DPU_DEBUG_PLANE(pdpu, "invalid dest rect " DRM_RECT_FMT "\n",
DRM_RECT_ARG(&dst));
return -EINVAL;
/* check decimated source width */
} else if (drm_rect_width(&src) > max_linewidth) {
- DPU_ERROR_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n",
+ DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n",
DRM_RECT_ARG(&src), max_linewidth);
return -E2BIG;
}
@@ -1337,7 +1350,8 @@ static int _dpu_plane_init_debugfs(struct drm_plane *plane)
pdpu->debugfs_root, &pdpu->debugfs_src);
if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) ||
- cfg->features & BIT(DPU_SSPP_SCALER_QSEED2)) {
+ cfg->features & BIT(DPU_SSPP_SCALER_QSEED2) ||
+ cfg->features & BIT(DPU_SSPP_SCALER_QSEED4)) {
dpu_debugfs_setup_regset32(&pdpu->debugfs_scaler,
sblk->scaler_blk.base + cfg->base,
sblk->scaler_blk.len,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index ddc8412731af..23f5b1433b35 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -141,11 +141,11 @@ int dpu_rm_destroy(struct dpu_rm *rm)
static int _dpu_rm_hw_blk_create(
struct dpu_rm *rm,
- struct dpu_mdss_cfg *cat,
+ const struct dpu_mdss_cfg *cat,
void __iomem *mmio,
enum dpu_hw_blk_type type,
uint32_t id,
- void *hw_catalog_info)
+ const void *hw_catalog_info)
{
struct dpu_rm_hw_blk *blk;
void *hw;
@@ -215,7 +215,7 @@ int dpu_rm_init(struct dpu_rm *rm,
/* Interrogate HW catalog and create tracking items for hw blocks */
for (i = 0; i < cat->mixer_count; i++) {
- struct dpu_lm_cfg *lm = &cat->mixer[i];
+ const struct dpu_lm_cfg *lm = &cat->mixer[i];
if (lm->pingpong == PINGPONG_MAX) {
DPU_DEBUG("skip mixer %d without pingpong\n", lm->id);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
index 991f4c8f8a12..93ab36bd8df3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
@@ -299,7 +299,7 @@ void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
entry = debugfs_create_dir("vbif", debugfs_root);
for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
- struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
+ const struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id);
@@ -318,7 +318,7 @@ void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
(u32 *)&vbif->default_ot_wr_limit);
for (j = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) {
- struct dpu_vbif_dynamic_ot_cfg *cfg =
+ const struct dpu_vbif_dynamic_ot_cfg *cfg =
&vbif->dynamic_ot_rd_tbl.cfg[j];
snprintf(vbif_name, sizeof(vbif_name),
@@ -332,7 +332,7 @@ void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
}
for (j = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) {
- struct dpu_vbif_dynamic_ot_cfg *cfg =
+ const struct dpu_vbif_dynamic_ot_cfg *cfg =
&vbif->dynamic_ot_wr_tbl.cfg[j];
snprintf(vbif_name, sizeof(vbif_name),
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
index 772f0753ed38..aaf2f26f8505 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
@@ -121,7 +121,7 @@ static void mdp4_dsi_encoder_enable(struct drm_encoder *encoder)
if (mdp4_dsi_encoder->enabled)
return;
- mdp4_crtc_set_config(encoder->crtc,
+ mdp4_crtc_set_config(encoder->crtc,
MDP4_DMA_CONFIG_PACK_ALIGN_MSB |
MDP4_DMA_CONFIG_DEFLKR_EN |
MDP4_DMA_CONFIG_DITHER_EN |
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
index 1f48f64539a2..e3c4c250238b 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
@@ -902,7 +902,7 @@ struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
major, minor);
ret = -ENXIO;
goto fail;
- };
+ }
/* only after mdp5_cfg global pointer's init can we access the hw */
for (i = 0; i < num_handlers; i++) {
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index eff1a4c61258..4de771d6f0be 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -178,6 +178,8 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
int msm_dsi_host_init(struct msm_dsi *msm_dsi);
int msm_dsi_runtime_suspend(struct device *dev);
int msm_dsi_runtime_resume(struct device *dev);
+int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host);
+int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host);
int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host);
int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host);
void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 86ad3fdf207d..813d69deb5e8 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -153,6 +153,10 @@ static const char * const dsi_sdm845_bus_clk_names[] = {
"iface", "bus",
};
+static const char * const dsi_sc7180_bus_clk_names[] = {
+ "iface", "bus",
+};
+
static const struct msm_dsi_config sdm845_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
@@ -167,7 +171,22 @@ static const struct msm_dsi_config sdm845_dsi_cfg = {
.num_dsi = 2,
};
+static const struct msm_dsi_config sc7180_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .reg_cfg = {
+ .num = 1,
+ .regs = {
+ {"vdda", 21800, 4 }, /* 1.2 V */
+ },
+ },
+ .bus_clk_names = dsi_sc7180_bus_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_sc7180_bus_clk_names),
+ .io_start = { 0xae94000 },
+ .num_dsi = 1,
+};
+
static const struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
+ .link_clk_set_rate = dsi_link_clk_set_rate_v2,
.link_clk_enable = dsi_link_clk_enable_v2,
.link_clk_disable = dsi_link_clk_disable_v2,
.clk_init_ver = dsi_clk_init_v2,
@@ -179,6 +198,7 @@ static const struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
};
static const struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = {
+ .link_clk_set_rate = dsi_link_clk_set_rate_6g,
.link_clk_enable = dsi_link_clk_enable_6g,
.link_clk_disable = dsi_link_clk_disable_6g,
.clk_init_ver = NULL,
@@ -190,6 +210,7 @@ static const struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = {
};
static const struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_host_ops = {
+ .link_clk_set_rate = dsi_link_clk_set_rate_6g,
.link_clk_enable = dsi_link_clk_enable_6g,
.link_clk_disable = dsi_link_clk_disable_6g,
.clk_init_ver = dsi_clk_init_6g_v2,
@@ -223,6 +244,9 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
&msm8998_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1,
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_1,
+ &sc7180_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+
};
const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index 50a37ceb6a25..217e24a65178 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -20,6 +20,7 @@
#define MSM_DSI_6G_VER_MINOR_V1_4_2 0x10040002
#define MSM_DSI_6G_VER_MINOR_V2_2_0 0x20000000
#define MSM_DSI_6G_VER_MINOR_V2_2_1 0x20020001
+#define MSM_DSI_6G_VER_MINOR_V2_4_1 0x20040001
#define MSM_DSI_V2_VER_MINOR_8064 0x0
@@ -35,6 +36,7 @@ struct msm_dsi_config {
};
struct msm_dsi_host_cfg_ops {
+ int (*link_clk_set_rate)(struct msm_dsi_host *msm_host);
int (*link_clk_enable)(struct msm_dsi_host *msm_host);
void (*link_clk_disable)(struct msm_dsi_host *msm_host);
int (*clk_init_ver)(struct msm_dsi_host *msm_host);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 458cec82ae13..11ae5b8444c3 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -505,7 +505,7 @@ int msm_dsi_runtime_resume(struct device *dev)
return dsi_bus_clk_enable(msm_host);
}
-int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
+int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host)
{
int ret;
@@ -515,13 +515,13 @@ int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
- goto error;
+ return ret;
}
ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
- goto error;
+ return ret;
}
if (msm_host->byte_intf_clk) {
@@ -530,10 +530,18 @@ int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
if (ret) {
pr_err("%s: Failed to set rate byte intf clk, %d\n",
__func__, ret);
- goto error;
+ return ret;
}
}
+ return 0;
+}
+
+
+int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
+{
+ int ret;
+
ret = clk_prepare_enable(msm_host->esc_clk);
if (ret) {
pr_err("%s: Failed to enable dsi esc clk\n", __func__);
@@ -573,7 +581,7 @@ error:
return ret;
}
-int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
+int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host)
{
int ret;
@@ -584,27 +592,34 @@ int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
- goto error;
+ return ret;
}
ret = clk_set_rate(msm_host->esc_clk, msm_host->esc_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate esc clk, %d\n", __func__, ret);
- goto error;
+ return ret;
}
ret = clk_set_rate(msm_host->src_clk, msm_host->src_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate src clk, %d\n", __func__, ret);
- goto error;
+ return ret;
}
ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
- goto error;
+ return ret;
}
+ return 0;
+}
+
+int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
+{
+ int ret;
+
ret = clk_prepare_enable(msm_host->byte_clk);
if (ret) {
pr_err("%s: Failed to enable dsi byte clk\n", __func__);
@@ -818,7 +833,7 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
u32 flags = msm_host->mode_flags;
enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
- u32 data = 0;
+ u32 data = 0, lane_ctrl = 0;
if (!enable) {
dsi_write(msm_host, REG_DSI_CTRL, 0);
@@ -906,9 +921,11 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(msm_host->dlane_swap));
- if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
+ if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) {
+ lane_ctrl = dsi_read(msm_host, REG_DSI_LANE_CTRL);
dsi_write(msm_host, REG_DSI_LANE_CTRL,
- DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST);
+ lane_ctrl | DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST);
+ }
data |= DSI_CTRL_ENABLE;
@@ -1996,6 +2013,7 @@ int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
* mdp clock need to be enabled to receive dsi interrupt
*/
pm_runtime_get_sync(&msm_host->pdev->dev);
+ cfg_hnd->ops->link_clk_set_rate(msm_host);
cfg_hnd->ops->link_clk_enable(msm_host);
/* TODO: vote for bus bandwidth */
@@ -2344,7 +2362,9 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
}
pm_runtime_get_sync(&msm_host->pdev->dev);
- ret = cfg_hnd->ops->link_clk_enable(msm_host);
+ ret = cfg_hnd->ops->link_clk_set_rate(msm_host);
+ if (!ret)
+ ret = cfg_hnd->ops->link_clk_enable(msm_host);
if (ret) {
pr_err("%s: failed to enable link clocks. ret=%d\n",
__func__, ret);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 0fc29f1be8cc..104115d112eb 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -432,20 +432,8 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
}
}
- if (panel) {
- ret = drm_panel_enable(panel);
- if (ret) {
- pr_err("%s: enable panel %d failed, %d\n", __func__, id,
- ret);
- goto panel_en_fail;
- }
- }
-
return;
-panel_en_fail:
- if (is_dual_dsi && msm_dsi1)
- msm_dsi_host_disable(msm_dsi1->host);
host1_en_fail:
msm_dsi_host_disable(host);
host_en_fail:
@@ -464,12 +452,51 @@ phy_en_fail:
static void dsi_mgr_bridge_enable(struct drm_bridge *bridge)
{
- DBG("");
+ int id = dsi_mgr_bridge_get_id(bridge);
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct drm_panel *panel = msm_dsi->panel;
+ bool is_dual_dsi = IS_DUAL_DSI();
+ int ret;
+
+ DBG("id=%d", id);
+ if (!msm_dsi_device_connected(msm_dsi))
+ return;
+
+ /* Do nothing with the host if it is slave-DSI in case of dual DSI */
+ if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
+ return;
+
+ if (panel) {
+ ret = drm_panel_enable(panel);
+ if (ret) {
+ pr_err("%s: enable panel %d failed, %d\n", __func__, id,
+ ret);
+ }
+ }
}
static void dsi_mgr_bridge_disable(struct drm_bridge *bridge)
{
- DBG("");
+ int id = dsi_mgr_bridge_get_id(bridge);
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct drm_panel *panel = msm_dsi->panel;
+ bool is_dual_dsi = IS_DUAL_DSI();
+ int ret;
+
+ DBG("id=%d", id);
+ if (!msm_dsi_device_connected(msm_dsi))
+ return;
+
+ /* Do nothing with the host if it is slave-DSI in case of dual DSI */
+ if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
+ return;
+
+ if (panel) {
+ ret = drm_panel_disable(panel);
+ if (ret)
+ pr_err("%s: Panel %d OFF failed, %d\n", __func__, id,
+ ret);
+ }
}
static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
@@ -495,13 +522,6 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
goto disable_phy;
- if (panel) {
- ret = drm_panel_disable(panel);
- if (ret)
- pr_err("%s: Panel %d OFF failed, %d\n", __func__, id,
- ret);
- }
-
ret = msm_dsi_host_disable(host);
if (ret)
pr_err("%s: host %d disable failed, %d\n", __func__, id, ret);
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
index 8f6100db90ed..1c894548dd72 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
@@ -751,9 +751,9 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->id);
hw = clk_hw_register_mux(dev, clk_name,
- (const char *[]){
+ ((const char *[]){
parent, parent2, parent3, parent4
- }, 4, 0, pll_10nm->phy_cmn_mmio +
+ }), 4, 0, pll_10nm->phy_cmn_mmio +
REG_DSI_10nm_PHY_CMN_CLK_CFG1,
0, 2, 0, NULL);
if (IS_ERR(hw)) {
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
index 8c99e01ae332..6dffd7f4a99b 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
@@ -554,9 +554,9 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);
snprintf(parent2, 32, "dsi%dindirect_path_div2_clk", pll_28nm->id);
clks[num++] = clk_register_mux(dev, clk_name,
- (const char *[]){
+ ((const char *[]){
parent1, parent2
- }, 2, CLK_SET_RATE_PARENT, pll_28nm->mmio +
+ }), 2, CLK_SET_RATE_PARENT, pll_28nm->mmio +
REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);
snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->id);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index 59702684d576..58707a1f3878 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -101,7 +101,7 @@ static int gpio_config(struct hdmi *hdmi, bool on)
gpiod_set_value_cansleep(gpio.gpiod, value);
}
- };
+ }
DBG("gpio off");
}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index c5e044136fe5..2a82c23a6e4d 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -138,7 +138,7 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
size = resource_size(res);
- ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
+ ptr = devm_ioremap(&pdev->dev, res->start, size);
if (!ptr) {
DRM_DEV_ERROR(&pdev->dev, "failed to ioremap: %s\n", name);
return ERR_PTR(-ENOMEM);
@@ -441,6 +441,14 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
if (ret)
goto err_msm_uninit;
+ if (!dev->dma_parms) {
+ dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
+ GFP_KERNEL);
+ if (!dev->dma_parms)
+ return -ENOMEM;
+ }
+ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+
msm_gem_shrinker_init(ddev);
switch (get_mdp_ver(pdev)) {
@@ -1194,7 +1202,8 @@ static int add_display_components(struct device *dev,
* the interfaces to our components list.
*/
if (of_device_is_compatible(dev->of_node, "qcom,mdss") ||
- of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss")) {
+ of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss") ||
+ of_device_is_compatible(dev->of_node, "qcom,sc7180-mdss")) {
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (ret) {
DRM_DEV_ERROR(dev, "failed to populate children devices\n");
@@ -1319,6 +1328,7 @@ static const struct of_device_id dt_match[] = {
{ .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 },
{ .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 },
{ .compatible = "qcom,sdm845-mdss", .data = (void *)KMS_DPU },
+ { .compatible = "qcom,sc7180-mdss", .data = (void *)KMS_DPU },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 108625cd553d..194d900a460e 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -457,8 +457,7 @@ static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
remaining_jiffies = 0;
} else {
ktime_t rem = ktime_sub(*timeout, now);
- struct timespec ts = ktime_to_timespec(rem);
- remaining_jiffies = timespec_to_jiffies(&ts);
+ remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ);
}
return remaining_jiffies;
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index ab8f0f9c9dc8..be5bc2e8425c 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -111,8 +111,15 @@ struct msm_gpu {
struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
uint32_t fast_rate;
+ /* The gfx-mem interconnect path that's used by all GPU types. */
struct icc_path *icc_path;
+ /*
+ * Second interconnect path for some A3xx and all A4xx GPUs to the
+ * On Chip MEMory (OCMEM).
+ */
+ struct icc_path *ocmem_icc_path;
+
/* Hang and Inactivity Detection:
*/
#define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 9c990266e876..d6e4ae1ef705 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -2,6 +2,7 @@
config DRM_NOUVEAU
tristate "Nouveau (NVIDIA) cards"
depends on DRM && PCI && MMU
+ select IOMMU_API
select FW_LOADER
select DRM_KMS_HELPER
select DRM_TTM
@@ -16,6 +17,7 @@ config DRM_NOUVEAU
select INPUT if ACPI && X86
select THERMAL if ACPI && X86
select ACPI_VIDEO if ACPI && X86
+ select SND_HDA_COMPONENT if SND_HDA_CORE
help
Choose this option for open-source NVIDIA support.
diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c
index 362495535e69..9d4a2d97507e 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/arb.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c
@@ -53,8 +53,8 @@ struct nv_sim_state {
static void
nv04_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb)
{
- int pagemiss, cas, width, bpp;
- int nvclks, mclks, pclks, crtpagemiss;
+ int pagemiss, cas, bpp;
+ int nvclks, mclks, crtpagemiss;
int found, mclk_extra, mclk_loop, cbs, m1, p1;
int mclk_freq, pclk_freq, nvclk_freq;
int us_m, us_n, us_p, crtc_drain_rate;
@@ -65,11 +65,9 @@ nv04_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb)
nvclk_freq = arb->nvclk_khz;
pagemiss = arb->mem_page_miss;
cas = arb->mem_latency;
- width = arb->memory_width >> 6;
bpp = arb->bpp;
cbs = 128;
- pclks = 2;
nvclks = 10;
mclks = 13 + cas;
mclk_extra = 3;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index 03466f04c741..3a9489ed6544 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -644,16 +644,13 @@ static int nv17_tv_create_resources(struct drm_encoder *encoder,
int i;
if (nouveau_tv_norm) {
- for (i = 0; i < num_tv_norms; i++) {
- if (!strcmp(nv17_tv_norm_names[i], nouveau_tv_norm)) {
- tv_enc->tv_norm = i;
- break;
- }
- }
-
- if (i == num_tv_norms)
+ i = match_string(nv17_tv_norm_names, num_tv_norms,
+ nouveau_tv_norm);
+ if (i < 0)
NV_WARN(drm, "Invalid TV norm setting \"%s\"\n",
nouveau_tv_norm);
+ else
+ tv_enc->tv_norm = i;
}
drm_mode_create_tv_properties(dev, num_tv_norms, nv17_tv_norm_names);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base907c.c b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
index 5f2de77e0f32..224a34c340fe 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base907c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
@@ -75,12 +75,16 @@ base907c_xlut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
}
}
-static void
-base907c_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+static bool
+base907c_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
{
- asyw->xlut.i.mode = 7;
+ if (size != 256 && size != 1024)
+ return false;
+
+ asyw->xlut.i.mode = size == 1024 ? 4 : 7;
asyw->xlut.i.enable = 2;
asyw->xlut.i.load = head907d_olut_load;
+ return true;
}
static inline u32
@@ -160,6 +164,7 @@ base907c = {
.csc_set = base907c_csc_set,
.csc_clr = base907c_csc_clr,
.olut_core = true,
+ .ilut_size = 1024,
.xlut_set = base907c_xlut_set,
.xlut_clr = base907c_xlut_clr,
.image_set = base907c_image_set,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.h b/drivers/gpu/drm/nouveau/dispnv50/core.h
index df8336b593f7..ff94f3f6f264 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.h
@@ -6,6 +6,7 @@
struct nv50_core {
const struct nv50_core_func *func;
struct nv50_dmac chan;
+ bool assign_windows;
};
int nv50_core_new(struct nouveau_drm *, struct nv50_core **);
@@ -18,6 +19,10 @@ struct nv50_core_func {
struct nvif_device *);
void (*update)(struct nv50_core *, u32 *interlock, bool ntfy);
+ struct {
+ void (*owner)(struct nv50_core *);
+ } wndw;
+
const struct nv50_head_func *head;
const struct nv50_outp_func {
void (*ctrl)(struct nv50_core *, int or, u32 ctrl,
@@ -48,6 +53,7 @@ int core917d_new(struct nouveau_drm *, s32, struct nv50_core **);
int corec37d_new(struct nouveau_drm *, s32, struct nv50_core **);
int corec37d_ntfy_wait_done(struct nouveau_bo *, u32, struct nvif_device *);
void corec37d_update(struct nv50_core *, u32 *, bool);
+void corec37d_wndw_owner(struct nv50_core *);
extern const struct nv50_outp_func sorc37d;
int corec57d_new(struct nouveau_drm *, s32, struct nv50_core **);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
index 40d9b654ab8c..3b36dc8d36b2 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
@@ -25,6 +25,20 @@
#include <nouveau_bo.h>
void
+corec37d_wndw_owner(struct nv50_core *core)
+{
+ const u32 windows = 8; /*XXX*/
+ u32 *push, i;
+ if ((push = evo_wait(&core->chan, 2 * windows))) {
+ for (i = 0; i < windows; i++) {
+ evo_mthd(push, 0x1000 + (i * 0x080), 1);
+ evo_data(push, i >> 1);
+ }
+ evo_kick(push, &core->chan);
+ }
+}
+
+void
corec37d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
{
u32 *push;
@@ -76,20 +90,18 @@ corec37d_init(struct nv50_core *core)
{
const u32 windows = 8; /*XXX*/
u32 *push, i;
- if ((push = evo_wait(&core->chan, 2 + 6 * windows + 2))) {
+ if ((push = evo_wait(&core->chan, 2 + 5 * windows))) {
evo_mthd(push, 0x0208, 1);
evo_data(push, core->chan.sync.handle);
for (i = 0; i < windows; i++) {
- evo_mthd(push, 0x1000 + (i * 0x080), 3);
- evo_data(push, i >> 1);
+ evo_mthd(push, 0x1004 + (i * 0x080), 2);
evo_data(push, 0x0000001f);
evo_data(push, 0x00000000);
evo_mthd(push, 0x1010 + (i * 0x080), 1);
evo_data(push, 0x00127fff);
}
- evo_mthd(push, 0x0200, 1);
- evo_data(push, 0x00000001);
evo_kick(push, &core->chan);
+ core->assign_windows = true;
}
}
@@ -99,6 +111,7 @@ corec37d = {
.ntfy_init = corec37d_ntfy_init,
.ntfy_wait_done = corec37d_ntfy_wait_done,
.update = corec37d_update,
+ .wndw.owner = corec37d_wndw_owner,
.head = &headc37d,
.sor = &sorc37d,
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
index b606d68cda10..147adcd60937 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
@@ -27,20 +27,18 @@ corec57d_init(struct nv50_core *core)
{
const u32 windows = 8; /*XXX*/
u32 *push, i;
- if ((push = evo_wait(&core->chan, 2 + 6 * windows + 2))) {
+ if ((push = evo_wait(&core->chan, 2 + 5 * windows))) {
evo_mthd(push, 0x0208, 1);
evo_data(push, core->chan.sync.handle);
for (i = 0; i < windows; i++) {
- evo_mthd(push, 0x1000 + (i * 0x080), 3);
- evo_data(push, i >> 1);
+ evo_mthd(push, 0x1004 + (i * 0x080), 2);
evo_data(push, 0x0000000f);
evo_data(push, 0x00000000);
evo_mthd(push, 0x1010 + (i * 0x080), 1);
evo_data(push, 0x00117fff);
}
- evo_mthd(push, 0x0200, 1);
- evo_data(push, 0x00000001);
evo_kick(push, &core->chan);
+ core->assign_windows = true;
}
}
@@ -50,6 +48,7 @@ corec57d = {
.ntfy_init = corec37d_ntfy_init,
.ntfy_wait_done = corec37d_ntfy_wait_done,
.update = corec37d_update,
+ .wndw.owner = corec37d_wndw_owner,
.head = &headc57d,
.sor = &sorc37d,
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 63425e246018..a3dc2ba19fb2 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -29,6 +29,7 @@
#include <linux/dma-mapping.h>
#include <linux/hdmi.h>
+#include <linux/component.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_helper.h>
@@ -476,12 +477,113 @@ nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
return 0;
}
+/*
+ * audio component binding for ELD notification
+ */
+static void
+nv50_audio_component_eld_notify(struct drm_audio_component *acomp, int port)
+{
+ if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
+ acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
+ port, -1);
+}
+
+static int
+nv50_audio_component_get_eld(struct device *kdev, int port, int pipe,
+ bool *enabled, unsigned char *buf, int max_bytes)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(kdev);
+ struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct drm_encoder *encoder;
+ struct nouveau_encoder *nv_encoder;
+ struct nouveau_connector *nv_connector;
+ struct nouveau_crtc *nv_crtc;
+ int ret = 0;
+
+ *enabled = false;
+ drm_for_each_encoder(encoder, drm->dev) {
+ nv_encoder = nouveau_encoder(encoder);
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ nv_crtc = nouveau_crtc(encoder->crtc);
+ if (!nv_connector || !nv_crtc || nv_crtc->index != port)
+ continue;
+ *enabled = drm_detect_monitor_audio(nv_connector->edid);
+ if (*enabled) {
+ ret = drm_eld_size(nv_connector->base.eld);
+ memcpy(buf, nv_connector->base.eld,
+ min(max_bytes, ret));
+ }
+ break;
+ }
+ return ret;
+}
+
+static const struct drm_audio_component_ops nv50_audio_component_ops = {
+ .get_eld = nv50_audio_component_get_eld,
+};
+
+static int
+nv50_audio_component_bind(struct device *kdev, struct device *hda_kdev,
+ void *data)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(kdev);
+ struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct drm_audio_component *acomp = data;
+
+ if (WARN_ON(!device_link_add(hda_kdev, kdev, DL_FLAG_STATELESS)))
+ return -ENOMEM;
+
+ drm_modeset_lock_all(drm_dev);
+ acomp->ops = &nv50_audio_component_ops;
+ acomp->dev = kdev;
+ drm->audio.component = acomp;
+ drm_modeset_unlock_all(drm_dev);
+ return 0;
+}
+
+static void
+nv50_audio_component_unbind(struct device *kdev, struct device *hda_kdev,
+ void *data)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(kdev);
+ struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct drm_audio_component *acomp = data;
+
+ drm_modeset_lock_all(drm_dev);
+ drm->audio.component = NULL;
+ acomp->ops = NULL;
+ acomp->dev = NULL;
+ drm_modeset_unlock_all(drm_dev);
+}
+
+static const struct component_ops nv50_audio_component_bind_ops = {
+ .bind = nv50_audio_component_bind,
+ .unbind = nv50_audio_component_unbind,
+};
+
+static void
+nv50_audio_component_init(struct nouveau_drm *drm)
+{
+ if (!component_add(drm->dev->dev, &nv50_audio_component_bind_ops))
+ drm->audio.component_registered = true;
+}
+
+static void
+nv50_audio_component_fini(struct nouveau_drm *drm)
+{
+ if (drm->audio.component_registered) {
+ component_del(drm->dev->dev, &nv50_audio_component_bind_ops);
+ drm->audio.component_registered = false;
+ }
+}
+
/******************************************************************************
* Audio
*****************************************************************************/
static void
nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
{
+ struct nouveau_drm *drm = nouveau_drm(encoder->dev);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_disp *disp = nv50_disp(encoder->dev);
struct {
@@ -496,11 +598,14 @@ nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
};
nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
+
+ nv50_audio_component_eld_notify(drm->audio.component, nv_crtc->index);
}
static void
nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
{
+ struct nouveau_drm *drm = nouveau_drm(encoder->dev);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nouveau_connector *nv_connector;
@@ -527,6 +632,8 @@ nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
nvif_mthd(&disp->disp->object, 0, &args,
sizeof(args.base) + drm_eld_size(args.data));
+
+ nv50_audio_component_eld_notify(drm->audio.component, nv_crtc->index);
}
/******************************************************************************
@@ -660,7 +767,6 @@ struct nv50_mstm {
struct nouveau_encoder *outp;
struct drm_dp_mst_topology_mgr mgr;
- struct nv50_msto *msto[4];
bool modified;
bool disabled;
@@ -726,7 +832,6 @@ nv50_msto_cleanup(struct nv50_msto *msto)
drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port);
msto->mstc = NULL;
- msto->head = NULL;
msto->disabled = false;
}
@@ -806,11 +911,11 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
* topology
*/
asyh->or.bpc = min(connector->display_info.bpc, 8U);
- asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3);
+ asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3, false);
}
slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, mstc->port,
- asyh->dp.pbn);
+ asyh->dp.pbn, 0);
if (slots < 0)
return slots;
@@ -872,7 +977,6 @@ nv50_msto_enable(struct drm_encoder *encoder)
mstm->outp->update(mstm->outp, head->base.index, armh, proto,
nv50_dp_bpc_to_depth(armh->or.bpc));
- msto->head = head;
msto->mstc = mstc;
mstm->modified = true;
}
@@ -913,45 +1017,40 @@ nv50_msto = {
.destroy = nv50_msto_destroy,
};
-static int
-nv50_msto_new(struct drm_device *dev, u32 heads, const char *name, int id,
- struct nv50_msto **pmsto)
+static struct nv50_msto *
+nv50_msto_new(struct drm_device *dev, struct nv50_head *head, int id)
{
struct nv50_msto *msto;
int ret;
- if (!(msto = *pmsto = kzalloc(sizeof(*msto), GFP_KERNEL)))
- return -ENOMEM;
+ msto = kzalloc(sizeof(*msto), GFP_KERNEL);
+ if (!msto)
+ return ERR_PTR(-ENOMEM);
ret = drm_encoder_init(dev, &msto->encoder, &nv50_msto,
- DRM_MODE_ENCODER_DPMST, "%s-mst-%d", name, id);
+ DRM_MODE_ENCODER_DPMST, "mst-%d", id);
if (ret) {
- kfree(*pmsto);
- *pmsto = NULL;
- return ret;
+ kfree(msto);
+ return ERR_PTR(ret);
}
drm_encoder_helper_add(&msto->encoder, &nv50_msto_help);
- msto->encoder.possible_crtcs = heads;
- return 0;
+ msto->encoder.possible_crtcs = drm_crtc_mask(&head->base.base);
+ msto->head = head;
+ return msto;
}
static struct drm_encoder *
nv50_mstc_atomic_best_encoder(struct drm_connector *connector,
struct drm_connector_state *connector_state)
{
- struct nv50_head *head = nv50_head(connector_state->crtc);
struct nv50_mstc *mstc = nv50_mstc(connector);
+ struct drm_crtc *crtc = connector_state->crtc;
- return &mstc->mstm->msto[head->base.index]->encoder;
-}
-
-static struct drm_encoder *
-nv50_mstc_best_encoder(struct drm_connector *connector)
-{
- struct nv50_mstc *mstc = nv50_mstc(connector);
+ if (!(mstc->mstm->outp->dcb->heads & drm_crtc_mask(crtc)))
+ return NULL;
- return &mstc->mstm->msto[0]->encoder;
+ return &nv50_head(crtc)->msto->encoder;
}
static enum drm_mode_status
@@ -1038,7 +1137,6 @@ static const struct drm_connector_helper_funcs
nv50_mstc_help = {
.get_modes = nv50_mstc_get_modes,
.mode_valid = nv50_mstc_mode_valid,
- .best_encoder = nv50_mstc_best_encoder,
.atomic_best_encoder = nv50_mstc_atomic_best_encoder,
.atomic_check = nv50_mstc_atomic_check,
.detect_ctx = nv50_mstc_detect,
@@ -1071,8 +1169,9 @@ nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
const char *path, struct nv50_mstc **pmstc)
{
struct drm_device *dev = mstm->outp->base.base.dev;
+ struct drm_crtc *crtc;
struct nv50_mstc *mstc;
- int ret, i;
+ int ret;
if (!(mstc = *pmstc = kzalloc(sizeof(*mstc), GFP_KERNEL)))
return -ENOMEM;
@@ -1092,8 +1191,13 @@ nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
mstc->connector.funcs->reset(&mstc->connector);
nouveau_conn_attach_properties(&mstc->connector);
- for (i = 0; i < ARRAY_SIZE(mstm->msto) && mstm->msto[i]; i++)
- drm_connector_attach_encoder(&mstc->connector, &mstm->msto[i]->encoder);
+ drm_for_each_crtc(crtc, dev) {
+ if (!(mstm->outp->dcb->heads & drm_crtc_mask(crtc)))
+ continue;
+
+ drm_connector_attach_encoder(&mstc->connector,
+ &nv50_head(crtc)->msto->encoder);
+ }
drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0);
drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0);
@@ -1367,7 +1471,7 @@ nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
const int max_payloads = hweight8(outp->dcb->heads);
struct drm_device *dev = outp->base.base.dev;
struct nv50_mstm *mstm;
- int ret, i;
+ int ret;
u8 dpcd;
/* This is a workaround for some monitors not functioning
@@ -1390,13 +1494,6 @@ nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
if (ret)
return ret;
- for (i = 0; i < max_payloads; i++) {
- ret = nv50_msto_new(dev, outp->dcb->heads, outp->base.base.name,
- i, &mstm->msto[i]);
- if (ret)
- return ret;
- }
-
return 0;
}
@@ -1569,17 +1666,24 @@ nv50_sor_func = {
.destroy = nv50_sor_destroy,
};
+static bool nv50_has_mst(struct nouveau_drm *drm)
+{
+ struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
+ u32 data;
+ u8 ver, hdr, cnt, len;
+
+ data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len);
+ return data && ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04);
+}
+
static int
nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
- u8 ver, hdr, cnt, len;
- u32 data;
int type, ret;
switch (dcbe->type) {
@@ -1624,10 +1728,9 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
}
if (nv_connector->type != DCB_CONNECTOR_eDP &&
- (data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len)) &&
- ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04)) {
- ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
- nv_connector->base.base.id,
+ nv50_has_mst(drm)) {
+ ret = nv50_mstm_new(nv_encoder, &nv_connector->aux,
+ 16, nv_connector->base.base.id,
&nv_encoder->dp.mstm);
if (ret)
return ret;
@@ -1673,7 +1776,6 @@ nv50_pior_enable(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- struct nouveau_connector *nv_connector;
struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
struct nv50_core *core = nv50_disp(encoder->dev)->core;
u8 owner = 1 << nv_crtc->index;
@@ -1681,7 +1783,6 @@ nv50_pior_enable(struct drm_encoder *encoder)
nv50_outp_acquire(nv_encoder);
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
switch (asyh->or.bpc) {
case 10: asyh->or.depth = 0x6; break;
case 8: asyh->or.depth = 0x5; break;
@@ -1832,6 +1933,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nv50_disp *disp = nv50_disp(dev);
struct nv50_atom *atom = nv50_atom(state);
+ struct nv50_core *core = disp->core;
struct nv50_outp_atom *outp, *outt;
u32 interlock[NV50_DISP_INTERLOCK__SIZE] = {};
int i;
@@ -1950,6 +2052,21 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
}
}
+ /* Update window->head assignment.
+ *
+ * This has to happen in an update that's not interlocked with
+ * any window channels to avoid hitting HW error checks.
+ *
+ *TODO: Proper handling of window ownership (Turing apparently
+ * supports non-fixed mappings).
+ */
+ if (core->assign_windows) {
+ core->func->wndw.owner(core);
+ core->func->update(core, interlock, false);
+ core->assign_windows = false;
+ interlock[NV50_DISP_INTERLOCK_CORE] = 0;
+ }
+
/* Update plane(s). */
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
@@ -2302,6 +2419,8 @@ nv50_display_destroy(struct drm_device *dev)
{
struct nv50_disp *disp = nv50_disp(dev);
+ nv50_audio_component_fini(nouveau_drm(dev));
+
nv50_core_del(&disp->core);
nouveau_bo_unmap(disp->sync);
@@ -2323,6 +2442,7 @@ nv50_display_create(struct drm_device *dev)
struct nv50_disp *disp;
struct dcb_output *dcbe;
int crtcs, ret, i;
+ bool has_mst = nv50_has_mst(drm);
disp = kzalloc(sizeof(*disp), GFP_KERNEL);
if (!disp)
@@ -2371,11 +2491,37 @@ nv50_display_create(struct drm_device *dev)
crtcs = 0x3;
for (i = 0; i < fls(crtcs); i++) {
+ struct nv50_head *head;
+
if (!(crtcs & (1 << i)))
continue;
- ret = nv50_head_create(dev, i);
- if (ret)
+
+ head = nv50_head_create(dev, i);
+ if (IS_ERR(head)) {
+ ret = PTR_ERR(head);
goto out;
+ }
+
+ if (has_mst) {
+ head->msto = nv50_msto_new(dev, head, i);
+ if (IS_ERR(head->msto)) {
+ ret = PTR_ERR(head->msto);
+ head->msto = NULL;
+ goto out;
+ }
+
+ /*
+ * FIXME: This is a hack to workaround the following
+ * issues:
+ *
+ * https://gitlab.gnome.org/GNOME/mutter/issues/759
+ * https://gitlab.freedesktop.org/xorg/xserver/merge_requests/277
+ *
+ * Once these issues are closed, this should be
+ * removed
+ */
+ head->msto->encoder.possible_crtcs = crtcs;
+ }
}
/* create encoder/connector objects based on VBIOS DCB table */
@@ -2423,6 +2569,8 @@ nv50_display_create(struct drm_device *dev)
/* Disable vblank irqs aggressively for power-saving, safe on nv50+ */
dev->vblank_disable_immediate = true;
+ nv50_audio_component_init(drm);
+
out:
if (ret)
nv50_display_destroy(dev);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
index c0a79531b087..d54fe00ac3a3 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -4,6 +4,8 @@
#include "nouveau_display.h"
+struct nv50_msto;
+
struct nv50_disp {
struct nvif_disp *disp;
struct nv50_core *core;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index 0b4c7a6fb39c..8f6455697ba7 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -214,6 +214,7 @@ nv50_head_atomic_check_lut(struct nv50_head *head,
{
struct nv50_disp *disp = nv50_disp(head->base.base.dev);
struct drm_property_blob *olut = asyh->state.gamma_lut;
+ int size;
/* Determine whether core output LUT should be enabled. */
if (olut) {
@@ -230,14 +231,23 @@ nv50_head_atomic_check_lut(struct nv50_head *head,
}
}
- if (!olut && !head->func->olut_identity) {
- asyh->olut.handle = 0;
- return 0;
+ if (!olut) {
+ if (!head->func->olut_identity) {
+ asyh->olut.handle = 0;
+ return 0;
+ }
+ size = 0;
+ } else {
+ size = drm_color_lut_size(olut);
}
+ if (!head->func->olut(head, asyh, size)) {
+ DRM_DEBUG_KMS("Invalid olut\n");
+ return -EINVAL;
+ }
asyh->olut.handle = disp->core->chan.vram.handle;
asyh->olut.buffer = !asyh->olut.buffer;
- head->func->olut(head, asyh);
+
return 0;
}
@@ -478,7 +488,7 @@ nv50_head_func = {
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
-int
+struct nv50_head *
nv50_head_create(struct drm_device *dev, int index)
{
struct nouveau_drm *drm = nouveau_drm(dev);
@@ -490,7 +500,7 @@ nv50_head_create(struct drm_device *dev, int index)
head = kzalloc(sizeof(*head), GFP_KERNEL);
if (!head)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
head->func = disp->core->func->head;
head->base.index = index;
@@ -508,27 +518,26 @@ nv50_head_create(struct drm_device *dev, int index)
ret = nv50_curs_new(drm, head->base.index, &curs);
if (ret) {
kfree(head);
- return ret;
+ return ERR_PTR(ret);
}
crtc = &head->base.base;
drm_crtc_init_with_planes(dev, crtc, &base->plane, &curs->plane,
&nv50_head_func, "head-%d", head->base.index);
drm_crtc_helper_add(crtc, &nv50_head_help);
+ /* Keep the legacy gamma size at 256 to avoid compatibility issues */
drm_mode_crtc_set_gamma_size(crtc, 256);
- if (disp->disp->object.oclass >= GF110_DISP)
- drm_crtc_enable_color_mgmt(crtc, 256, true, 256);
- else
- drm_crtc_enable_color_mgmt(crtc, 0, false, 256);
+ drm_crtc_enable_color_mgmt(crtc, base->func->ilut_size,
+ disp->disp->object.oclass >= GF110_DISP,
+ head->func->olut_size);
if (head->func->olut_set) {
ret = nv50_lut_init(disp, &drm->client.mmu, &head->olut);
- if (ret)
- goto out;
+ if (ret) {
+ nv50_head_destroy(crtc);
+ return ERR_PTR(ret);
+ }
}
-out:
- if (ret)
- nv50_head_destroy(crtc);
- return ret;
+ return head;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.h b/drivers/gpu/drm/nouveau/dispnv50/head.h
index d1c002f534d4..c32b27cdaefc 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.h
@@ -11,17 +11,19 @@ struct nv50_head {
const struct nv50_head_func *func;
struct nouveau_crtc base;
struct nv50_lut olut;
+ struct nv50_msto *msto;
};
-int nv50_head_create(struct drm_device *, int index);
+struct nv50_head *nv50_head_create(struct drm_device *, int index);
void nv50_head_flush_set(struct nv50_head *, struct nv50_head_atom *);
void nv50_head_flush_clr(struct nv50_head *, struct nv50_head_atom *, bool y);
struct nv50_head_func {
void (*view)(struct nv50_head *, struct nv50_head_atom *);
void (*mode)(struct nv50_head *, struct nv50_head_atom *);
- void (*olut)(struct nv50_head *, struct nv50_head_atom *);
+ bool (*olut)(struct nv50_head *, struct nv50_head_atom *, int);
bool olut_identity;
+ int olut_size;
void (*olut_set)(struct nv50_head *, struct nv50_head_atom *);
void (*olut_clr)(struct nv50_head *);
void (*core_calc)(struct nv50_head *, struct nv50_head_atom *);
@@ -43,7 +45,7 @@ struct nv50_head_func {
extern const struct nv50_head_func head507d;
void head507d_view(struct nv50_head *, struct nv50_head_atom *);
void head507d_mode(struct nv50_head *, struct nv50_head_atom *);
-void head507d_olut(struct nv50_head *, struct nv50_head_atom *);
+bool head507d_olut(struct nv50_head *, struct nv50_head_atom *, int);
void head507d_core_calc(struct nv50_head *, struct nv50_head_atom *);
void head507d_core_clr(struct nv50_head *);
int head507d_curs_layout(struct nv50_head *, struct nv50_wndw_atom *,
@@ -60,7 +62,7 @@ extern const struct nv50_head_func head827d;
extern const struct nv50_head_func head907d;
void head907d_view(struct nv50_head *, struct nv50_head_atom *);
void head907d_mode(struct nv50_head *, struct nv50_head_atom *);
-void head907d_olut(struct nv50_head *, struct nv50_head_atom *);
+bool head907d_olut(struct nv50_head *, struct nv50_head_atom *, int);
void head907d_olut_set(struct nv50_head *, struct nv50_head_atom *);
void head907d_olut_clr(struct nv50_head *);
void head907d_core_set(struct nv50_head *, struct nv50_head_atom *);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head507d.c b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
index 7561be5ca707..66ccf36b56a2 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
@@ -271,15 +271,19 @@ head507d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem)
writew(readw(mem - 4), mem + 4);
}
-void
-head507d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
+bool
+head507d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
{
+ if (size != 256)
+ return false;
+
if (asyh->base.cpp == 1)
asyh->olut.mode = 0;
else
asyh->olut.mode = 1;
asyh->olut.load = head507d_olut_load;
+ return true;
}
void
@@ -328,6 +332,7 @@ head507d = {
.view = head507d_view,
.mode = head507d_mode,
.olut = head507d_olut,
+ .olut_size = 256,
.olut_set = head507d_olut_set,
.olut_clr = head507d_olut_clr,
.core_calc = head507d_core_calc,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head827d.c b/drivers/gpu/drm/nouveau/dispnv50/head827d.c
index af5e7bd5978b..11877119eea4 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head827d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head827d.c
@@ -108,6 +108,7 @@ head827d = {
.view = head507d_view,
.mode = head507d_mode,
.olut = head507d_olut,
+ .olut_size = 256,
.olut_set = head827d_olut_set,
.olut_clr = head827d_olut_clr,
.core_calc = head507d_core_calc,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head907d.c b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
index c2d09dd97b1f..3002ec23d7a6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
@@ -230,11 +230,15 @@ head907d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem)
writew(readw(mem - 4), mem + 4);
}
-void
-head907d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
+bool
+head907d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
{
- asyh->olut.mode = 7;
+ if (size != 256 && size != 1024)
+ return false;
+
+ asyh->olut.mode = size == 1024 ? 4 : 7;
asyh->olut.load = head907d_olut_load;
+ return true;
}
void
@@ -285,6 +289,7 @@ head907d = {
.view = head907d_view,
.mode = head907d_mode,
.olut = head907d_olut,
+ .olut_size = 1024,
.olut_set = head907d_olut_set,
.olut_clr = head907d_olut_clr,
.core_calc = head507d_core_calc,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head917d.c b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
index 303df8459ca8..76958cedd51f 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head917d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
@@ -83,6 +83,7 @@ head917d = {
.view = head907d_view,
.mode = head907d_mode,
.olut = head907d_olut,
+ .olut_size = 1024,
.olut_set = head907d_olut_set,
.olut_clr = head907d_olut_clr,
.core_calc = head507d_core_calc,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
index ef6a99d95a9c..00011ce109a6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
@@ -148,14 +148,18 @@ headc37d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
}
}
-static void
-headc37d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
+static bool
+headc37d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
{
+ if (size != 256 && size != 1024)
+ return false;
+
asyh->olut.mode = 2;
- asyh->olut.size = 0;
+ asyh->olut.size = size == 1024 ? 2 : 0;
asyh->olut.range = 0;
asyh->olut.output_mode = 1;
asyh->olut.load = head907d_olut_load;
+ return true;
}
static void
@@ -201,6 +205,7 @@ headc37d = {
.view = headc37d_view,
.mode = headc37d_mode,
.olut = headc37d_olut,
+ .olut_size = 1024,
.olut_set = headc37d_olut_set,
.olut_clr = headc37d_olut_clr,
.curs_layout = head917d_curs_layout,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
index 32a7f9e85fb0..938d910a1b1e 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
@@ -151,17 +151,20 @@ headc57d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem)
writew(readw(mem - 4), mem + 4);
}
-void
-headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
+bool
+headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
{
+ if (size != 0 && size != 256 && size != 1024)
+ return false;
+
asyh->olut.mode = 2; /* DIRECT10 */
asyh->olut.size = 4 /* VSS header. */ + 1024 + 1 /* Entries. */;
asyh->olut.output_mode = 1; /* INTERPOLATE_ENABLE. */
- if (asyh->state.gamma_lut &&
- asyh->state.gamma_lut->length / sizeof(struct drm_color_lut) == 256)
+ if (size == 256)
asyh->olut.load = headc57d_olut_load_8;
else
asyh->olut.load = headc57d_olut_load;
+ return true;
}
static void
@@ -194,6 +197,7 @@ headc57d = {
.mode = headc57d_mode,
.olut = headc57d_olut,
.olut_identity = true,
+ .olut_size = 1024,
.olut_set = headc57d_olut_set,
.olut_clr = headc57d_olut_clr,
.curs_layout = head917d_curs_layout,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/lut.c b/drivers/gpu/drm/nouveau/dispnv50/lut.c
index 994def4fd51a..4e95ca5604ab 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/lut.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/lut.c
@@ -49,7 +49,7 @@ nv50_lut_load(struct nv50_lut *lut, int buffer, struct drm_property_blob *blob,
kvfree(in);
}
} else {
- load(in, blob->length / sizeof(*in), mem);
+ load(in, drm_color_lut_size(blob), mem);
}
return addr;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 5193b6257061..890315291b01 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -318,7 +318,7 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
return wndw->func->acquire(wndw, asyw, asyh);
}
-static void
+static int
nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
struct nv50_wndw_atom *armw,
struct nv50_wndw_atom *asyw,
@@ -340,7 +340,7 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
*/
if (!(ilut = asyh->state.gamma_lut)) {
asyw->visible = false;
- return;
+ return 0;
}
if (wndw->func->ilut)
@@ -359,7 +359,10 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
/* Recalculate LUT state. */
memset(&asyw->xlut, 0x00, sizeof(asyw->xlut));
if ((asyw->ilut = wndw->func->ilut ? ilut : NULL)) {
- wndw->func->ilut(wndw, asyw);
+ if (!wndw->func->ilut(wndw, asyw, drm_color_lut_size(ilut))) {
+ DRM_DEBUG_KMS("Invalid ilut\n");
+ return -EINVAL;
+ }
asyw->xlut.handle = wndw->wndw.vram.handle;
asyw->xlut.i.buffer = !asyw->xlut.i.buffer;
asyw->set.xlut = true;
@@ -384,6 +387,7 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
/* Can't do an immediate flip while changing the LUT. */
asyh->state.async_flip = false;
+ return 0;
}
static int
@@ -424,8 +428,11 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
(!armw->visible ||
asyh->state.color_mgmt_changed ||
asyw->state.fb->format->format !=
- armw->state.fb->format->format))
- nv50_wndw_atomic_check_lut(wndw, armw, asyw, asyh);
+ armw->state.fb->format->format)) {
+ ret = nv50_wndw_atomic_check_lut(wndw, armw, asyw, asyh);
+ if (ret)
+ return ret;
+ }
/* Calculate new window state. */
if (asyw->visible) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
index c63bd3bdaf06..caf397475918 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
@@ -64,12 +64,13 @@ struct nv50_wndw_func {
void (*ntfy_clr)(struct nv50_wndw *);
int (*ntfy_wait_begun)(struct nouveau_bo *, u32 offset,
struct nvif_device *);
- void (*ilut)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ bool (*ilut)(struct nv50_wndw *, struct nv50_wndw_atom *, int);
void (*csc)(struct nv50_wndw *, struct nv50_wndw_atom *,
const struct drm_color_ctm *);
void (*csc_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
void (*csc_clr)(struct nv50_wndw *);
bool ilut_identity;
+ int ilut_size;
bool olut_core;
void (*xlut_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
void (*xlut_clr)(struct nv50_wndw *);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
index 0f9402162bde..b92dc3461bbd 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
@@ -71,14 +71,18 @@ wndwc37e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
}
}
-static void
-wndwc37e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+static bool
+wndwc37e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
{
+ if (size != 256 && size != 1024)
+ return false;
+
asyw->xlut.i.mode = 2;
- asyw->xlut.i.size = 0;
+ asyw->xlut.i.size = size == 1024 ? 2 : 0;
asyw->xlut.i.range = 0;
asyw->xlut.i.output_mode = 1;
asyw->xlut.i.load = head907d_olut_load;
+ return true;
}
void
@@ -261,6 +265,7 @@ wndwc37e = {
.ntfy_reset = corec37d_ntfy_init,
.ntfy_wait_begun = base507c_ntfy_wait_begun,
.ilut = wndwc37e_ilut,
+ .ilut_size = 1024,
.xlut_set = wndwc37e_ilut_set,
.xlut_clr = wndwc37e_ilut_clr,
.csc = base907c_csc,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
index a311c79e5295..35c9c52fab26 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
@@ -156,19 +156,21 @@ wndwc57e_ilut_load(struct drm_color_lut *in, int size, void __iomem *mem)
writew(readw(mem - 4), mem + 4);
}
-static void
-wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+static bool
+wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
{
- u16 size = asyw->ilut->length / sizeof(struct drm_color_lut);
+ if (size = size ? size : 1024, size != 256 && size != 1024)
+ return false;
+
if (size == 256) {
asyw->xlut.i.mode = 1; /* DIRECT8. */
} else {
asyw->xlut.i.mode = 2; /* DIRECT10. */
- size = 1024;
}
asyw->xlut.i.size = 4 /* VSS header. */ + size + 1 /* Entries. */;
asyw->xlut.i.output_mode = 0; /* INTERPOLATE_DISABLE. */
asyw->xlut.i.load = wndwc57e_ilut_load;
+ return true;
}
static const struct nv50_wndw_func
@@ -183,6 +185,7 @@ wndwc57e = {
.ntfy_wait_begun = base507c_ntfy_wait_begun,
.ilut = wndwc57e_ilut,
.ilut_identity = true,
+ .ilut_size = 1024,
.xlut_set = wndwc57e_ilut_set,
.xlut_clr = wndwc57e_ilut_clr,
.csc = base907c_csc,
diff --git a/drivers/gpu/drm/nouveau/include/nvfw/acr.h b/drivers/gpu/drm/nouveau/include/nvfw/acr.h
new file mode 100644
index 000000000000..e65d6a8db104
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvfw/acr.h
@@ -0,0 +1,152 @@
+#ifndef __NVFW_ACR_H__
+#define __NVFW_ACR_H__
+
+struct wpr_header {
+#define WPR_HEADER_V0_FALCON_ID_INVALID 0xffffffff
+ u32 falcon_id;
+ u32 lsb_offset;
+ u32 bootstrap_owner;
+ u32 lazy_bootstrap;
+#define WPR_HEADER_V0_STATUS_NONE 0
+#define WPR_HEADER_V0_STATUS_COPY 1
+#define WPR_HEADER_V0_STATUS_VALIDATION_CODE_FAILED 2
+#define WPR_HEADER_V0_STATUS_VALIDATION_DATA_FAILED 3
+#define WPR_HEADER_V0_STATUS_VALIDATION_DONE 4
+#define WPR_HEADER_V0_STATUS_VALIDATION_SKIPPED 5
+#define WPR_HEADER_V0_STATUS_BOOTSTRAP_READY 6
+ u32 status;
+};
+
+void wpr_header_dump(struct nvkm_subdev *, const struct wpr_header *);
+
+struct wpr_header_v1 {
+#define WPR_HEADER_V1_FALCON_ID_INVALID 0xffffffff
+ u32 falcon_id;
+ u32 lsb_offset;
+ u32 bootstrap_owner;
+ u32 lazy_bootstrap;
+ u32 bin_version;
+#define WPR_HEADER_V1_STATUS_NONE 0
+#define WPR_HEADER_V1_STATUS_COPY 1
+#define WPR_HEADER_V1_STATUS_VALIDATION_CODE_FAILED 2
+#define WPR_HEADER_V1_STATUS_VALIDATION_DATA_FAILED 3
+#define WPR_HEADER_V1_STATUS_VALIDATION_DONE 4
+#define WPR_HEADER_V1_STATUS_VALIDATION_SKIPPED 5
+#define WPR_HEADER_V1_STATUS_BOOTSTRAP_READY 6
+#define WPR_HEADER_V1_STATUS_REVOCATION_CHECK_FAILED 7
+ u32 status;
+};
+
+void wpr_header_v1_dump(struct nvkm_subdev *, const struct wpr_header_v1 *);
+
+struct lsf_signature {
+ u8 prd_keys[2][16];
+ u8 dbg_keys[2][16];
+ u32 b_prd_present;
+ u32 b_dbg_present;
+ u32 falcon_id;
+};
+
+struct lsf_signature_v1 {
+ u8 prd_keys[2][16];
+ u8 dbg_keys[2][16];
+ u32 b_prd_present;
+ u32 b_dbg_present;
+ u32 falcon_id;
+ u32 supports_versioning;
+ u32 version;
+ u32 depmap_count;
+ u8 depmap[11/*LSF_LSB_DEPMAP_SIZE*/ * 2 * 4];
+ u8 kdf[16];
+};
+
+struct lsb_header_tail {
+ u32 ucode_off;
+ u32 ucode_size;
+ u32 data_size;
+ u32 bl_code_size;
+ u32 bl_imem_off;
+ u32 bl_data_off;
+ u32 bl_data_size;
+ u32 app_code_off;
+ u32 app_code_size;
+ u32 app_data_off;
+ u32 app_data_size;
+ u32 flags;
+};
+
+struct lsb_header {
+ struct lsf_signature signature;
+ struct lsb_header_tail tail;
+};
+
+void lsb_header_dump(struct nvkm_subdev *, struct lsb_header *);
+
+struct lsb_header_v1 {
+ struct lsf_signature_v1 signature;
+ struct lsb_header_tail tail;
+};
+
+void lsb_header_v1_dump(struct nvkm_subdev *, struct lsb_header_v1 *);
+
+struct flcn_acr_desc {
+ union {
+ u8 reserved_dmem[0x200];
+ u32 signatures[4];
+ } ucode_reserved_space;
+ u32 wpr_region_id;
+ u32 wpr_offset;
+ u32 mmu_mem_range;
+ struct {
+ u32 no_regions;
+ struct {
+ u32 start_addr;
+ u32 end_addr;
+ u32 region_id;
+ u32 read_mask;
+ u32 write_mask;
+ u32 client_mask;
+ } region_props[2];
+ } regions;
+ u32 ucode_blob_size;
+ u64 ucode_blob_base __aligned(8);
+ struct {
+ u32 vpr_enabled;
+ u32 vpr_start;
+ u32 vpr_end;
+ u32 hdcp_policies;
+ } vpr_desc;
+};
+
+void flcn_acr_desc_dump(struct nvkm_subdev *, struct flcn_acr_desc *);
+
+struct flcn_acr_desc_v1 {
+ u8 reserved_dmem[0x200];
+ u32 signatures[4];
+ u32 wpr_region_id;
+ u32 wpr_offset;
+ u32 mmu_memory_range;
+ struct {
+ u32 no_regions;
+ struct {
+ u32 start_addr;
+ u32 end_addr;
+ u32 region_id;
+ u32 read_mask;
+ u32 write_mask;
+ u32 client_mask;
+ u32 shadow_mem_start_addr;
+ } region_props[2];
+ } regions;
+ u32 ucode_blob_size;
+ u64 ucode_blob_base __aligned(8);
+ struct {
+ u32 vpr_enabled;
+ u32 vpr_start;
+ u32 vpr_end;
+ u32 hdcp_policies;
+ } vpr_desc;
+};
+
+void flcn_acr_desc_v1_dump(struct nvkm_subdev *, struct flcn_acr_desc_v1 *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvfw/flcn.h b/drivers/gpu/drm/nouveau/include/nvfw/flcn.h
new file mode 100644
index 000000000000..e090f347d220
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvfw/flcn.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVFW_FLCN_H__
+#define __NVFW_FLCN_H__
+#include <core/os.h>
+struct nvkm_subdev;
+
+struct loader_config {
+ u32 dma_idx;
+ u32 code_dma_base;
+ u32 code_size_total;
+ u32 code_size_to_load;
+ u32 code_entry_point;
+ u32 data_dma_base;
+ u32 data_size;
+ u32 overlay_dma_base;
+ u32 argc;
+ u32 argv;
+ u32 code_dma_base1;
+ u32 data_dma_base1;
+ u32 overlay_dma_base1;
+};
+
+void
+loader_config_dump(struct nvkm_subdev *, const struct loader_config *);
+
+struct loader_config_v1 {
+ u32 reserved;
+ u32 dma_idx;
+ u64 code_dma_base;
+ u32 code_size_total;
+ u32 code_size_to_load;
+ u32 code_entry_point;
+ u64 data_dma_base;
+ u32 data_size;
+ u64 overlay_dma_base;
+ u32 argc;
+ u32 argv;
+} __packed;
+
+void
+loader_config_v1_dump(struct nvkm_subdev *, const struct loader_config_v1 *);
+
+struct flcn_bl_dmem_desc {
+ u32 reserved[4];
+ u32 signature[4];
+ u32 ctx_dma;
+ u32 code_dma_base;
+ u32 non_sec_code_off;
+ u32 non_sec_code_size;
+ u32 sec_code_off;
+ u32 sec_code_size;
+ u32 code_entry_point;
+ u32 data_dma_base;
+ u32 data_size;
+ u32 code_dma_base1;
+ u32 data_dma_base1;
+};
+
+void
+flcn_bl_dmem_desc_dump(struct nvkm_subdev *, const struct flcn_bl_dmem_desc *);
+
+struct flcn_bl_dmem_desc_v1 {
+ u32 reserved[4];
+ u32 signature[4];
+ u32 ctx_dma;
+ u64 code_dma_base;
+ u32 non_sec_code_off;
+ u32 non_sec_code_size;
+ u32 sec_code_off;
+ u32 sec_code_size;
+ u32 code_entry_point;
+ u64 data_dma_base;
+ u32 data_size;
+} __packed;
+
+void flcn_bl_dmem_desc_v1_dump(struct nvkm_subdev *,
+ const struct flcn_bl_dmem_desc_v1 *);
+
+struct flcn_bl_dmem_desc_v2 {
+ u32 reserved[4];
+ u32 signature[4];
+ u32 ctx_dma;
+ u64 code_dma_base;
+ u32 non_sec_code_off;
+ u32 non_sec_code_size;
+ u32 sec_code_off;
+ u32 sec_code_size;
+ u32 code_entry_point;
+ u64 data_dma_base;
+ u32 data_size;
+ u32 argc;
+ u32 argv;
+} __packed;
+
+void flcn_bl_dmem_desc_v2_dump(struct nvkm_subdev *,
+ const struct flcn_bl_dmem_desc_v2 *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvfw/fw.h b/drivers/gpu/drm/nouveau/include/nvfw/fw.h
new file mode 100644
index 000000000000..a7cf1188c9d6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvfw/fw.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVFW_FW_H__
+#define __NVFW_FW_H__
+#include <core/os.h>
+struct nvkm_subdev;
+
+struct nvfw_bin_hdr {
+ u32 bin_magic;
+ u32 bin_ver;
+ u32 bin_size;
+ u32 header_offset;
+ u32 data_offset;
+ u32 data_size;
+};
+
+const struct nvfw_bin_hdr *nvfw_bin_hdr(struct nvkm_subdev *, const void *);
+
+struct nvfw_bl_desc {
+ u32 start_tag;
+ u32 dmem_load_off;
+ u32 code_off;
+ u32 code_size;
+ u32 data_off;
+ u32 data_size;
+};
+
+const struct nvfw_bl_desc *nvfw_bl_desc(struct nvkm_subdev *, const void *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvfw/hs.h b/drivers/gpu/drm/nouveau/include/nvfw/hs.h
new file mode 100644
index 000000000000..64d0d32200c2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvfw/hs.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVFW_HS_H__
+#define __NVFW_HS_H__
+#include <core/os.h>
+struct nvkm_subdev;
+
+struct nvfw_hs_header {
+ u32 sig_dbg_offset;
+ u32 sig_dbg_size;
+ u32 sig_prod_offset;
+ u32 sig_prod_size;
+ u32 patch_loc;
+ u32 patch_sig;
+ u32 hdr_offset;
+ u32 hdr_size;
+};
+
+const struct nvfw_hs_header *nvfw_hs_header(struct nvkm_subdev *, const void *);
+
+struct nvfw_hs_load_header {
+ u32 non_sec_code_off;
+ u32 non_sec_code_size;
+ u32 data_dma_base;
+ u32 data_size;
+ u32 num_apps;
+ u32 apps[0];
+};
+
+const struct nvfw_hs_load_header *
+nvfw_hs_load_header(struct nvkm_subdev *, const void *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvfw/ls.h b/drivers/gpu/drm/nouveau/include/nvfw/ls.h
new file mode 100644
index 000000000000..f63692a2a16c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvfw/ls.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVFW_LS_H__
+#define __NVFW_LS_H__
+#include <core/os.h>
+struct nvkm_subdev;
+
+struct nvfw_ls_desc_head {
+ u32 descriptor_size;
+ u32 image_size;
+ u32 tools_version;
+ u32 app_version;
+ char date[64];
+ u32 bootloader_start_offset;
+ u32 bootloader_size;
+ u32 bootloader_imem_offset;
+ u32 bootloader_entry_point;
+ u32 app_start_offset;
+ u32 app_size;
+ u32 app_imem_offset;
+ u32 app_imem_entry;
+ u32 app_dmem_offset;
+ u32 app_resident_code_offset;
+ u32 app_resident_code_size;
+ u32 app_resident_data_offset;
+ u32 app_resident_data_size;
+};
+
+struct nvfw_ls_desc {
+ struct nvfw_ls_desc_head head;
+ u32 nb_overlays;
+ struct {
+ u32 start;
+ u32 size;
+ } load_ovl[64];
+ u32 compressed;
+};
+
+const struct nvfw_ls_desc *nvfw_ls_desc(struct nvkm_subdev *, const void *);
+
+struct nvfw_ls_desc_v1 {
+ struct nvfw_ls_desc_head head;
+ u32 nb_imem_overlays;
+ u32 nb_dmem_overlays;
+ struct {
+ u32 start;
+ u32 size;
+ } load_ovl[64];
+ u32 compressed;
+};
+
+const struct nvfw_ls_desc_v1 *
+nvfw_ls_desc_v1(struct nvkm_subdev *, const void *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvfw/pmu.h b/drivers/gpu/drm/nouveau/include/nvfw/pmu.h
new file mode 100644
index 000000000000..452ed7d03827
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvfw/pmu.h
@@ -0,0 +1,98 @@
+#ifndef __NVFW_PMU_H__
+#define __NVFW_PMU_H__
+
+struct nv_pmu_args {
+ u32 reserved;
+ u32 freq_hz;
+ u32 trace_size;
+ u32 trace_dma_base;
+ u16 trace_dma_base1;
+ u8 trace_dma_offset;
+ u32 trace_dma_idx;
+ bool secure_mode;
+ bool raise_priv_sec;
+ struct {
+ u32 dma_base;
+ u16 dma_base1;
+ u8 dma_offset;
+ u16 fb_size;
+ u8 dma_idx;
+ } gc6_ctx;
+ u8 pad;
+};
+
+#define NV_PMU_UNIT_INIT 0x07
+#define NV_PMU_UNIT_ACR 0x0a
+
+struct nv_pmu_init_msg {
+ struct nv_falcon_msg hdr;
+#define NV_PMU_INIT_MSG_INIT 0x00
+ u8 msg_type;
+
+ u8 pad;
+ u16 os_debug_entry_point;
+
+ struct {
+ u16 size;
+ u16 offset;
+ u8 index;
+ u8 pad;
+ } queue_info[5];
+
+ u16 sw_managed_area_offset;
+ u16 sw_managed_area_size;
+};
+
+struct nv_pmu_acr_cmd {
+ struct nv_falcon_cmd hdr;
+#define NV_PMU_ACR_CMD_INIT_WPR_REGION 0x00
+#define NV_PMU_ACR_CMD_BOOTSTRAP_FALCON 0x01
+#define NV_PMU_ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS 0x03
+ u8 cmd_type;
+};
+
+struct nv_pmu_acr_msg {
+ struct nv_falcon_cmd hdr;
+ u8 msg_type;
+};
+
+struct nv_pmu_acr_init_wpr_region_cmd {
+ struct nv_pmu_acr_cmd cmd;
+ u32 region_id;
+ u32 wpr_offset;
+};
+
+struct nv_pmu_acr_init_wpr_region_msg {
+ struct nv_pmu_acr_msg msg;
+ u32 error_code;
+};
+
+struct nv_pmu_acr_bootstrap_falcon_cmd {
+ struct nv_pmu_acr_cmd cmd;
+#define NV_PMU_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES 0x00000000
+#define NV_PMU_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_NO 0x00000001
+ u32 flags;
+ u32 falcon_id;
+};
+
+struct nv_pmu_acr_bootstrap_falcon_msg {
+ struct nv_pmu_acr_msg msg;
+ u32 falcon_id;
+};
+
+struct nv_pmu_acr_bootstrap_multiple_falcons_cmd {
+ struct nv_pmu_acr_cmd cmd;
+#define NV_PMU_ACR_BOOTSTRAP_MULTIPLE_FALCONS_FLAGS_RESET_YES 0x00000000
+#define NV_PMU_ACR_BOOTSTRAP_MULTIPLE_FALCONS_FLAGS_RESET_NO 0x00000001
+ u32 flags;
+ u32 falcon_mask;
+ u32 use_va_mask;
+ u32 wpr_lo;
+ u32 wpr_hi;
+};
+
+struct nv_pmu_acr_bootstrap_multiple_falcons_msg {
+ struct nv_pmu_acr_msg msg;
+ u32 falcon_mask;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvfw/sec2.h b/drivers/gpu/drm/nouveau/include/nvfw/sec2.h
new file mode 100644
index 000000000000..03496558b775
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvfw/sec2.h
@@ -0,0 +1,60 @@
+#ifndef __NVFW_SEC2_H__
+#define __NVFW_SEC2_H__
+
+struct nv_sec2_args {
+ u32 freq_hz;
+ u32 falc_trace_size;
+ u32 falc_trace_dma_base;
+ u32 falc_trace_dma_idx;
+ bool secure_mode;
+};
+
+#define NV_SEC2_UNIT_INIT 0x01
+#define NV_SEC2_UNIT_ACR 0x08
+
+struct nv_sec2_init_msg {
+ struct nv_falcon_msg hdr;
+#define NV_SEC2_INIT_MSG_INIT 0x00
+ u8 msg_type;
+
+ u8 num_queues;
+ u16 os_debug_entry_point;
+
+ struct {
+ u32 offset;
+ u16 size;
+ u8 index;
+#define NV_SEC2_INIT_MSG_QUEUE_ID_CMDQ 0x00
+#define NV_SEC2_INIT_MSG_QUEUE_ID_MSGQ 0x01
+ u8 id;
+ } queue_info[2];
+
+ u32 sw_managed_area_offset;
+ u16 sw_managed_area_size;
+};
+
+struct nv_sec2_acr_cmd {
+ struct nv_falcon_cmd hdr;
+#define NV_SEC2_ACR_CMD_BOOTSTRAP_FALCON 0x00
+ u8 cmd_type;
+};
+
+struct nv_sec2_acr_msg {
+ struct nv_falcon_cmd hdr;
+ u8 msg_type;
+};
+
+struct nv_sec2_acr_bootstrap_falcon_cmd {
+ struct nv_sec2_acr_cmd cmd;
+#define NV_SEC2_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES 0x00000000
+#define NV_SEC2_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_NO 0x00000001
+ u32 flags;
+ u32 falcon_id;
+};
+
+struct nv_sec2_acr_bootstrap_falcon_msg {
+ struct nv_sec2_acr_msg msg;
+ u32 error_code;
+ u32 falcon_id;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index f704ae600e94..30659747ffe8 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -166,6 +166,8 @@
#define VOLTA_A /* cl9097.h */ 0x0000c397
+#define TURING_A /* cl9097.h */ 0x0000c597
+
#define NV74_BSP 0x000074b0
#define GT212_MSVLD 0x000085b1
@@ -207,6 +209,7 @@
#define PASCAL_COMPUTE_A 0x0000c0c0
#define PASCAL_COMPUTE_B 0x0000c1c0
#define VOLTA_COMPUTE_A 0x0000c3c0
+#define TURING_COMPUTE_A 0x0000c5c0
#define NV74_CIPHER 0x000074c1
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0008.h b/drivers/gpu/drm/nouveau/include/nvif/if0008.h
index 8450127420f5..c21d09f04f1d 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if0008.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0008.h
@@ -35,7 +35,7 @@ struct nvif_mmu_type_v0 {
struct nvif_mmu_kind_v0 {
__u8 version;
- __u8 pad01[1];
+ __u8 kind_inv;
__u16 count;
__u8 data[];
};
diff --git a/drivers/gpu/drm/nouveau/include/nvif/mmu.h b/drivers/gpu/drm/nouveau/include/nvif/mmu.h
index 747ecf67e403..cec1e88a0a05 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/mmu.h
@@ -7,6 +7,7 @@ struct nvif_mmu {
u8 dmabits;
u8 heap_nr;
u8 type_nr;
+ u8 kind_inv;
u16 kind_nr;
s32 mem;
@@ -36,9 +37,8 @@ void nvif_mmu_fini(struct nvif_mmu *);
static inline bool
nvif_mmu_kind_valid(struct nvif_mmu *mmu, u8 kind)
{
- const u8 invalid = mmu->kind_nr - 1;
if (kind) {
- if (kind >= mmu->kind_nr || mmu->kind[kind] == invalid)
+ if (kind >= mmu->kind_nr || mmu->kind[kind] == mmu->kind_inv)
return false;
}
return true;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 6d55cd0476aa..5c007ce62fc3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -23,13 +23,13 @@ enum nvkm_devidx {
NVKM_SUBDEV_MMU,
NVKM_SUBDEV_BAR,
NVKM_SUBDEV_FAULT,
+ NVKM_SUBDEV_ACR,
NVKM_SUBDEV_PMU,
NVKM_SUBDEV_VOLT,
NVKM_SUBDEV_ICCSENSE,
NVKM_SUBDEV_THERM,
NVKM_SUBDEV_CLK,
NVKM_SUBDEV_GSP,
- NVKM_SUBDEV_SECBOOT,
NVKM_ENGINE_BSP,
@@ -129,6 +129,7 @@ struct nvkm_device {
struct notifier_block nb;
} acpi;
+ struct nvkm_acr *acr;
struct nvkm_bar *bar;
struct nvkm_bios *bios;
struct nvkm_bus *bus;
@@ -149,7 +150,6 @@ struct nvkm_device {
struct nvkm_subdev *mxm;
struct nvkm_pci *pci;
struct nvkm_pmu *pmu;
- struct nvkm_secboot *secboot;
struct nvkm_therm *therm;
struct nvkm_timer *timer;
struct nvkm_top *top;
@@ -169,7 +169,7 @@ struct nvkm_device {
struct nvkm_engine *mspdec;
struct nvkm_engine *msppp;
struct nvkm_engine *msvld;
- struct nvkm_engine *nvenc[3];
+ struct nvkm_nvenc *nvenc[3];
struct nvkm_nvdec *nvdec[3];
struct nvkm_pm *pm;
struct nvkm_engine *sec;
@@ -202,6 +202,7 @@ struct nvkm_device_quirk {
struct nvkm_device_chip {
const char *name;
+ int (*acr )(struct nvkm_device *, int idx, struct nvkm_acr **);
int (*bar )(struct nvkm_device *, int idx, struct nvkm_bar **);
int (*bios )(struct nvkm_device *, int idx, struct nvkm_bios **);
int (*bus )(struct nvkm_device *, int idx, struct nvkm_bus **);
@@ -222,7 +223,6 @@ struct nvkm_device_chip {
int (*mxm )(struct nvkm_device *, int idx, struct nvkm_subdev **);
int (*pci )(struct nvkm_device *, int idx, struct nvkm_pci **);
int (*pmu )(struct nvkm_device *, int idx, struct nvkm_pmu **);
- int (*secboot )(struct nvkm_device *, int idx, struct nvkm_secboot **);
int (*therm )(struct nvkm_device *, int idx, struct nvkm_therm **);
int (*timer )(struct nvkm_device *, int idx, struct nvkm_timer **);
int (*top )(struct nvkm_device *, int idx, struct nvkm_top **);
@@ -242,7 +242,7 @@ struct nvkm_device_chip {
int (*mspdec )(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*msppp )(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*msvld )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_nvenc **);
int (*nvdec[3])(struct nvkm_device *, int idx, struct nvkm_nvdec **);
int (*pm )(struct nvkm_device *, int idx, struct nvkm_pm **);
int (*sec )(struct nvkm_device *, int idx, struct nvkm_engine **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h
new file mode 100644
index 000000000000..daa8e4bfb6bf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h
@@ -0,0 +1,77 @@
+#ifndef __NVKM_FALCON_H__
+#define __NVKM_FALCON_H__
+#include <engine/falcon.h>
+
+int nvkm_falcon_ctor(const struct nvkm_falcon_func *, struct nvkm_subdev *owner,
+ const char *name, u32 addr, struct nvkm_falcon *);
+void nvkm_falcon_dtor(struct nvkm_falcon *);
+
+void nvkm_falcon_v1_load_imem(struct nvkm_falcon *,
+ void *, u32, u32, u16, u8, bool);
+void nvkm_falcon_v1_load_dmem(struct nvkm_falcon *, void *, u32, u32, u8);
+void nvkm_falcon_v1_read_dmem(struct nvkm_falcon *, u32, u32, u8, void *);
+void nvkm_falcon_v1_bind_context(struct nvkm_falcon *, struct nvkm_memory *);
+int nvkm_falcon_v1_wait_for_halt(struct nvkm_falcon *, u32);
+int nvkm_falcon_v1_clear_interrupt(struct nvkm_falcon *, u32);
+void nvkm_falcon_v1_set_start_addr(struct nvkm_falcon *, u32 start_addr);
+void nvkm_falcon_v1_start(struct nvkm_falcon *);
+int nvkm_falcon_v1_enable(struct nvkm_falcon *);
+void nvkm_falcon_v1_disable(struct nvkm_falcon *);
+
+void gp102_sec2_flcn_bind_context(struct nvkm_falcon *, struct nvkm_memory *);
+int gp102_sec2_flcn_enable(struct nvkm_falcon *);
+
+#define FLCN_PRINTK(t,f,fmt,a...) do { \
+ if (nvkm_subdev_name[(f)->owner->index] != (f)->name) \
+ nvkm_##t((f)->owner, "%s: "fmt"\n", (f)->name, ##a); \
+ else \
+ nvkm_##t((f)->owner, fmt"\n", ##a); \
+} while(0)
+#define FLCN_DBG(f,fmt,a...) FLCN_PRINTK(debug, (f), fmt, ##a)
+#define FLCN_ERR(f,fmt,a...) FLCN_PRINTK(error, (f), fmt, ##a)
+
+/**
+ * struct nv_falcon_msg - header for all messages
+ *
+ * @unit_id: id of firmware process that sent the message
+ * @size: total size of message
+ * @ctrl_flags: control flags
+ * @seq_id: used to match a message from its corresponding command
+ */
+struct nv_falcon_msg {
+ u8 unit_id;
+ u8 size;
+ u8 ctrl_flags;
+ u8 seq_id;
+};
+
+#define nv_falcon_cmd nv_falcon_msg
+#define NV_FALCON_CMD_UNIT_ID_REWIND 0x00
+
+struct nvkm_falcon_qmgr;
+int nvkm_falcon_qmgr_new(struct nvkm_falcon *, struct nvkm_falcon_qmgr **);
+void nvkm_falcon_qmgr_del(struct nvkm_falcon_qmgr **);
+
+typedef int
+(*nvkm_falcon_qmgr_callback)(void *priv, struct nv_falcon_msg *);
+
+struct nvkm_falcon_cmdq;
+int nvkm_falcon_cmdq_new(struct nvkm_falcon_qmgr *, const char *name,
+ struct nvkm_falcon_cmdq **);
+void nvkm_falcon_cmdq_del(struct nvkm_falcon_cmdq **);
+void nvkm_falcon_cmdq_init(struct nvkm_falcon_cmdq *,
+ u32 index, u32 offset, u32 size);
+void nvkm_falcon_cmdq_fini(struct nvkm_falcon_cmdq *);
+int nvkm_falcon_cmdq_send(struct nvkm_falcon_cmdq *, struct nv_falcon_cmd *,
+ nvkm_falcon_qmgr_callback, void *priv,
+ unsigned long timeout_jiffies);
+
+struct nvkm_falcon_msgq;
+int nvkm_falcon_msgq_new(struct nvkm_falcon_qmgr *, const char *name,
+ struct nvkm_falcon_msgq **);
+void nvkm_falcon_msgq_del(struct nvkm_falcon_msgq **);
+void nvkm_falcon_msgq_init(struct nvkm_falcon_msgq *,
+ u32 index, u32 offset, u32 size);
+int nvkm_falcon_msgq_recv_initmsg(struct nvkm_falcon_msgq *, void *, u32 size);
+void nvkm_falcon_msgq_recv(struct nvkm_falcon_msgq *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h b/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
index 383370c32428..d14b7fb07368 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
@@ -1,12 +1,55 @@
/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_FIRMWARE_H__
#define __NVKM_FIRMWARE_H__
+#include <core/option.h>
#include <core/subdev.h>
-int nvkm_firmware_get_version(const struct nvkm_subdev *, const char *fwname,
- int min_version, int max_version,
- const struct firmware **);
-int nvkm_firmware_get(const struct nvkm_subdev *, const char *fwname,
+int nvkm_firmware_get(const struct nvkm_subdev *, const char *fwname, int ver,
const struct firmware **);
void nvkm_firmware_put(const struct firmware *);
+
+int nvkm_firmware_load_blob(const struct nvkm_subdev *subdev, const char *path,
+ const char *name, int ver, struct nvkm_blob *);
+int nvkm_firmware_load_name(const struct nvkm_subdev *subdev, const char *path,
+ const char *name, int ver,
+ const struct firmware **);
+
+#define nvkm_firmware_load(s,l,o,p...) ({ \
+ struct nvkm_subdev *_s = (s); \
+ const char *_opts = (o); \
+ char _option[32]; \
+ typeof(l[0]) *_list = (l), *_next, *_fwif = NULL; \
+ int _ver, _fwv, _ret = 0; \
+ \
+ snprintf(_option, sizeof(_option), "Nv%sFw", _opts); \
+ _ver = nvkm_longopt(_s->device->cfgopt, _option, -2); \
+ if (_ver >= -1) { \
+ for (_next = _list; !_fwif && _next->load; _next++) { \
+ if (_next->version == _ver) \
+ _fwif = _next; \
+ } \
+ _ret = _fwif ? 0 : -EINVAL; \
+ } \
+ \
+ if (_ret == 0) { \
+ snprintf(_option, sizeof(_option), "Nv%sFwVer", _opts); \
+ _fwv = _fwif ? _fwif->version : -1; \
+ _ver = nvkm_longopt(_s->device->cfgopt, _option, _fwv); \
+ for (_next = _fwif ? _fwif : _list; _next->load; _next++) { \
+ _fwv = (_ver >= 0) ? _ver : _next->version; \
+ _ret = _next->load(p, _fwv, _next); \
+ if (_ret == 0 || _ver >= 0) { \
+ _fwif = _next; \
+ break; \
+ } \
+ } \
+ } \
+ \
+ if (_ret) { \
+ nvkm_error(_s, "failed to load firmware\n"); \
+ _fwif = ERR_PTR(_ret); \
+ } \
+ \
+ _fwif; \
+})
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
index b23bf6109f2d..74d3f1a809d7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
@@ -84,6 +84,22 @@ void nvkm_memory_tags_put(struct nvkm_memory *, struct nvkm_device *,
nvkm_wo32((o), __a + 4, upper_32_bits(__d)); \
} while(0)
+#define nvkm_robj(o,a,p,s) do { \
+ u32 _addr = (a), _size = (s) >> 2, *_data = (void *)(p); \
+ while (_size--) { \
+ *(_data++) = nvkm_ro32((o), _addr); \
+ _addr += 4; \
+ } \
+} while(0)
+
+#define nvkm_wobj(o,a,p,s) do { \
+ u32 _addr = (a), _size = (s) >> 2, *_data = (void *)(p); \
+ while (_size--) { \
+ nvkm_wo32((o), _addr, *(_data++)); \
+ _addr += 4; \
+ } \
+} while(0)
+
#define nvkm_fill(t,s,o,a,d,c) do { \
u64 _a = (a), _c = (c), _d = (d), _o = _a >> s, _s = _c << s; \
u##t __iomem *_m = nvkm_kmap(o); \
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/os.h b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
index 029a416197db..d7ba3205207f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
@@ -21,4 +21,17 @@
iowrite32_native(lower_32_bits(_v), &_p[0]); \
iowrite32_native(upper_32_bits(_v), &_p[1]); \
} while(0)
+
+struct nvkm_blob {
+ void *data;
+ u32 size;
+};
+
+static inline void
+nvkm_blob_dtor(struct nvkm_blob *blob)
+{
+ kfree(blob->data);
+ blob->data = NULL;
+ blob->size = 0;
+}
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
index 23b582d696c6..27c1f868552c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_FALCON_H__
-#define __NVKM_FALCON_H__
+#ifndef __NVKM_FLCNEN_H__
+#define __NVKM_FLCNEN_H__
#define nvkm_falcon(p) container_of((p), struct nvkm_falcon, engine)
#include <core/engine.h>
struct nvkm_fifo_chan;
@@ -23,12 +23,13 @@ struct nvkm_falcon {
struct mutex mutex;
struct mutex dmem_mutex;
+ bool oneinit;
+
const struct nvkm_subdev *user;
u8 version;
u8 secret;
bool debug;
- bool has_emem;
struct nvkm_memory *core;
bool external;
@@ -76,9 +77,14 @@ struct nvkm_falcon_func {
} data;
void (*init)(struct nvkm_falcon *);
void (*intr)(struct nvkm_falcon *, struct nvkm_fifo_chan *);
+
+ u32 debug;
+ u32 fbif;
+
void (*load_imem)(struct nvkm_falcon *, void *, u32, u32, u16, u8, bool);
void (*load_dmem)(struct nvkm_falcon *, void *, u32, u32, u8);
void (*read_dmem)(struct nvkm_falcon *, u32, u32, u8, void *);
+ u32 emem_addr;
void (*bind_context)(struct nvkm_falcon *, struct nvkm_memory *);
int (*wait_for_halt)(struct nvkm_falcon *, u32);
int (*clear_interrupt)(struct nvkm_falcon *, u32);
@@ -86,6 +92,13 @@ struct nvkm_falcon_func {
void (*start)(struct nvkm_falcon *);
int (*enable)(struct nvkm_falcon *falcon);
void (*disable)(struct nvkm_falcon *falcon);
+ int (*reset)(struct nvkm_falcon *);
+
+ struct {
+ u32 head;
+ u32 tail;
+ u32 stride;
+ } cmdq, msgq;
struct nvkm_sclass sclass[];
};
@@ -122,5 +135,4 @@ int nvkm_falcon_clear_interrupt(struct nvkm_falcon *, u32);
int nvkm_falcon_enable(struct nvkm_falcon *);
void nvkm_falcon_disable(struct nvkm_falcon *);
int nvkm_falcon_reset(struct nvkm_falcon *);
-
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
index 2cde36f3c064..1530c81f86a2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -50,6 +50,8 @@ int gp100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gp102_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gp104_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gp107_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gp108_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gp10b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gv100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int tu102_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
index 7c7d7f0abfcc..1b3183e31606 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
@@ -3,13 +3,13 @@
#define __NVKM_NVDEC_H__
#define nvkm_nvdec(p) container_of((p), struct nvkm_nvdec, engine)
#include <core/engine.h>
+#include <core/falcon.h>
struct nvkm_nvdec {
+ const struct nvkm_nvdec_func *func;
struct nvkm_engine engine;
- u32 addr;
-
- struct nvkm_falcon *falcon;
+ struct nvkm_falcon falcon;
};
-int gp102_nvdec_new(struct nvkm_device *, int, struct nvkm_nvdec **);
+int gm107_nvdec_new(struct nvkm_device *, int, struct nvkm_nvdec **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
index 21624046d0a1..33e6ba8adc8d 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
@@ -1,5 +1,15 @@
/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_NVENC_H__
#define __NVKM_NVENC_H__
+#define nvkm_nvenc(p) container_of((p), struct nvkm_nvenc, engine)
#include <core/engine.h>
+#include <core/falcon.h>
+
+struct nvkm_nvenc {
+ const struct nvkm_nvenc_func *func;
+ struct nvkm_engine engine;
+ struct nvkm_falcon falcon;
+};
+
+int gm107_nvenc_new(struct nvkm_device *, int, struct nvkm_nvenc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
index 33078f86c779..34dc765648d5 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
@@ -1,17 +1,24 @@
/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_SEC2_H__
#define __NVKM_SEC2_H__
+#define nvkm_sec2(p) container_of((p), struct nvkm_sec2, engine)
#include <core/engine.h>
+#include <core/falcon.h>
struct nvkm_sec2 {
+ const struct nvkm_sec2_func *func;
struct nvkm_engine engine;
- u32 addr;
+ struct nvkm_falcon falcon;
+
+ struct nvkm_falcon_qmgr *qmgr;
+ struct nvkm_falcon_cmdq *cmdq;
+ struct nvkm_falcon_msgq *msgq;
- struct nvkm_falcon *falcon;
- struct nvkm_msgqueue *queue;
struct work_struct work;
+ bool initmsg_received;
};
int gp102_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **);
+int gp108_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **);
int tu102_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h
new file mode 100644
index 000000000000..5d9c3a966de6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_ACR_H__
+#define __NVKM_ACR_H__
+#define nvkm_acr(p) container_of((p), struct nvkm_acr, subdev)
+#include <core/subdev.h>
+#include <core/falcon.h>
+
+enum nvkm_acr_lsf_id {
+ NVKM_ACR_LSF_PMU = 0,
+ NVKM_ACR_LSF_GSPLITE = 1,
+ NVKM_ACR_LSF_FECS = 2,
+ NVKM_ACR_LSF_GPCCS = 3,
+ NVKM_ACR_LSF_NVDEC = 4,
+ NVKM_ACR_LSF_SEC2 = 7,
+ NVKM_ACR_LSF_MINION = 10,
+ NVKM_ACR_LSF_NUM
+};
+
+static inline const char *
+nvkm_acr_lsf_id(enum nvkm_acr_lsf_id id)
+{
+ switch (id) {
+ case NVKM_ACR_LSF_PMU : return "pmu";
+ case NVKM_ACR_LSF_GSPLITE: return "gsplite";
+ case NVKM_ACR_LSF_FECS : return "fecs";
+ case NVKM_ACR_LSF_GPCCS : return "gpccs";
+ case NVKM_ACR_LSF_NVDEC : return "nvdec";
+ case NVKM_ACR_LSF_SEC2 : return "sec2";
+ case NVKM_ACR_LSF_MINION : return "minion";
+ default:
+ return "unknown";
+ }
+}
+
+struct nvkm_acr {
+ const struct nvkm_acr_func *func;
+ struct nvkm_subdev subdev;
+
+ struct list_head hsfw, hsf;
+ struct list_head lsfw, lsf;
+
+ struct nvkm_memory *wpr;
+ u64 wpr_start;
+ u64 wpr_end;
+ u64 shadow_start;
+
+ struct nvkm_memory *inst;
+ struct nvkm_vmm *vmm;
+
+ bool done;
+
+ const struct firmware *wpr_fw;
+ bool wpr_comp;
+ u64 wpr_prev;
+};
+
+bool nvkm_acr_managed_falcon(struct nvkm_device *, enum nvkm_acr_lsf_id);
+int nvkm_acr_bootstrap_falcons(struct nvkm_device *, unsigned long mask);
+
+int gm200_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
+int gm20b_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
+int gp102_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
+int gp108_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
+int gp10b_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
+int tu102_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
+
+struct nvkm_acr_lsfw {
+ const struct nvkm_acr_lsf_func *func;
+ struct nvkm_falcon *falcon;
+ enum nvkm_acr_lsf_id id;
+
+ struct list_head head;
+
+ struct nvkm_blob img;
+
+ const struct firmware *sig;
+
+ u32 bootloader_size;
+ u32 bootloader_imem_offset;
+
+ u32 app_size;
+ u32 app_start_offset;
+ u32 app_imem_entry;
+ u32 app_resident_code_offset;
+ u32 app_resident_code_size;
+ u32 app_resident_data_offset;
+ u32 app_resident_data_size;
+
+ u32 ucode_size;
+ u32 data_size;
+
+ struct {
+ u32 lsb;
+ u32 img;
+ u32 bld;
+ } offset;
+ u32 bl_data_size;
+};
+
+struct nvkm_acr_lsf_func {
+/* The (currently) map directly to LSB header flags. */
+#define NVKM_ACR_LSF_LOAD_CODE_AT_0 0x00000001
+#define NVKM_ACR_LSF_DMACTL_REQ_CTX 0x00000004
+#define NVKM_ACR_LSF_FORCE_PRIV_LOAD 0x00000008
+ u32 flags;
+ u32 bld_size;
+ void (*bld_write)(struct nvkm_acr *, u32 bld, struct nvkm_acr_lsfw *);
+ void (*bld_patch)(struct nvkm_acr *, u32 bld, s64 adjust);
+ int (*boot)(struct nvkm_falcon *);
+ int (*bootstrap_falcon)(struct nvkm_falcon *, enum nvkm_acr_lsf_id);
+ int (*bootstrap_multiple_falcons)(struct nvkm_falcon *, u32 mask);
+};
+
+int
+nvkm_acr_lsfw_load_sig_image_desc(struct nvkm_subdev *, struct nvkm_falcon *,
+ enum nvkm_acr_lsf_id, const char *path,
+ int ver, const struct nvkm_acr_lsf_func *);
+int
+nvkm_acr_lsfw_load_sig_image_desc_v1(struct nvkm_subdev *, struct nvkm_falcon *,
+ enum nvkm_acr_lsf_id, const char *path,
+ int ver, const struct nvkm_acr_lsf_func *);
+int
+nvkm_acr_lsfw_load_bl_inst_data_sig(struct nvkm_subdev *, struct nvkm_falcon *,
+ enum nvkm_acr_lsf_id, const char *path,
+ int ver, const struct nvkm_acr_lsf_func *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
index 97322f95b3ee..a513c16ab105 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
@@ -31,6 +31,7 @@ struct nvkm_fault_data {
};
int gp100_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
+int gp10b_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
int gv100_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
int tu102_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 239ad222b95a..34b56b10218a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -33,6 +33,8 @@ struct nvkm_fb {
const struct nvkm_fb_func *func;
struct nvkm_subdev subdev;
+ struct nvkm_blob vpr_scrubber;
+
struct nvkm_ram *ram;
struct nvkm_mm tags;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
index 4c672a5c4cd5..06db67610a50 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
@@ -2,12 +2,11 @@
#define __NVKM_GSP_H__
#define nvkm_gsp(p) container_of((p), struct nvkm_gsp, subdev)
#include <core/subdev.h>
+#include <core/falcon.h>
struct nvkm_gsp {
struct nvkm_subdev subdev;
- u32 addr;
-
- struct nvkm_falcon *falcon;
+ struct nvkm_falcon falcon;
};
int gv100_gsp_new(struct nvkm_device *, int, struct nvkm_gsp **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
index 644d527c3b96..d76f60d7d29a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
@@ -40,4 +40,5 @@ int gm107_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
int gm200_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
int gp100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
int gp102_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
+int gp10b_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
index 4752006880f3..da553089d2d8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
@@ -2,13 +2,20 @@
#ifndef __NVKM_PMU_H__
#define __NVKM_PMU_H__
#include <core/subdev.h>
-#include <engine/falcon.h>
+#include <core/falcon.h>
struct nvkm_pmu {
const struct nvkm_pmu_func *func;
struct nvkm_subdev subdev;
- struct nvkm_falcon *falcon;
- struct nvkm_msgqueue *queue;
+ struct nvkm_falcon falcon;
+
+ struct nvkm_falcon_qmgr *qmgr;
+ struct nvkm_falcon_cmdq *hpq;
+ struct nvkm_falcon_cmdq *lpq;
+ struct nvkm_falcon_msgq *msgq;
+ bool initmsg_received;
+
+ struct completion wpr_ready;
struct {
u32 base;
@@ -43,6 +50,7 @@ int gm107_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
int gm20b_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
int gp100_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
int gp102_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gp10b_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
/* interface to MEMX process running on PMU */
struct nvkm_memx;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 81668104595f..2b4b21b02e40 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1155,7 +1155,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
void
nouveau_bo_move_init(struct nouveau_drm *drm)
{
- static const struct {
+ static const struct _method_table {
const char *name;
int engine;
s32 oclass;
@@ -1185,7 +1185,8 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
{ "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
{},
{ "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
- }, *mthd = _methods;
+ };
+ const struct _method_table *mthd = _methods;
const char *name = "CPU";
int ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 282fd90b65e1..d9381a053169 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -55,6 +55,8 @@ nouveau_channel_killed(struct nvif_notify *ntfy)
struct nouveau_cli *cli = (void *)chan->user.client;
NV_PRINTK(warn, cli, "channel %d killed!\n", chan->chid);
atomic_set(&chan->killed, 1);
+ if (chan->fence)
+ nouveau_fence_context_kill(chan->fence, -ENODEV);
return NVIF_NOTIFY_DROP;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index fa1439941596..0ad5d87b5a8e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -635,10 +635,10 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
unsigned long c, i;
int ret = -ENOMEM;
- args.src = kcalloc(max, sizeof(args.src), GFP_KERNEL);
+ args.src = kcalloc(max, sizeof(*args.src), GFP_KERNEL);
if (!args.src)
goto out;
- args.dst = kcalloc(max, sizeof(args.dst), GFP_KERNEL);
+ args.dst = kcalloc(max, sizeof(*args.dst), GFP_KERNEL);
if (!args.dst)
goto out_free_src;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 0003343014ce..6b1629c14dd7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -715,7 +715,6 @@ fail_nvkm:
void
nouveau_drm_device_remove(struct drm_device *dev)
{
- struct pci_dev *pdev = dev->pdev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_client *client;
struct nvkm_device *device;
@@ -727,7 +726,6 @@ nouveau_drm_device_remove(struct drm_device *dev)
device = nvkm_device_find(client->device);
nouveau_drm_device_fini(dev);
- pci_disable_device(pdev);
drm_dev_put(dev);
nvkm_device_del(&device);
}
@@ -738,6 +736,7 @@ nouveau_drm_remove(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
nouveau_drm_device_remove(dev);
+ pci_disable_device(pdev);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index da8c46e09943..c2c332fbde97 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -58,6 +58,8 @@
#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_page_alloc.h>
+#include <drm/drm_audio_component.h>
+
#include "uapi/drm/nouveau_drm.h"
struct nouveau_channel;
@@ -211,6 +213,11 @@ struct nouveau_drm {
struct nouveau_svm *svm;
struct nouveau_dmem *dmem;
+
+ struct {
+ struct drm_audio_component *component;
+ bool component_registered;
+ } audio;
};
static inline struct nouveau_drm *
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 9118df035b28..666f2090d92b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -87,7 +87,7 @@ nouveau_local_fence(struct dma_fence *fence, struct nouveau_drm *drm)
}
void
-nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
+nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
{
struct nouveau_fence *fence;
@@ -95,11 +95,19 @@ nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
while (!list_empty(&fctx->pending)) {
fence = list_entry(fctx->pending.next, typeof(*fence), head);
+ if (error)
+ dma_fence_set_error(&fence->base, error);
+
if (nouveau_fence_signal(fence))
nvif_notify_put(&fctx->notify);
}
spin_unlock_irq(&fctx->lock);
+}
+void
+nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
+{
+ nouveau_fence_context_kill(fctx, 0);
nvif_notify_fini(&fctx->notify);
fctx->dead = 1;
@@ -156,7 +164,7 @@ nouveau_fence_wait_uevent_handler(struct nvif_notify *notify)
fence = list_entry(fctx->pending.next, typeof(*fence), head);
chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
- if (nouveau_fence_update(fence->channel, fctx))
+ if (nouveau_fence_update(chan, fctx))
ret = NVIF_NOTIFY_DROP;
}
spin_unlock_irqrestore(&fctx->lock, flags);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index c9e24baaaa4f..4887caa69c65 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -63,6 +63,7 @@ struct nouveau_fence_priv {
void nouveau_fence_context_new(struct nouveau_channel *, struct nouveau_fence_chan *);
void nouveau_fence_context_del(struct nouveau_fence_chan *);
void nouveau_fence_context_free(struct nouveau_fence_chan *);
+void nouveau_fence_context_kill(struct nouveau_fence_chan *, int error);
int nv04_fence_create(struct nouveau_drm *);
int nv04_fence_mthd(struct nouveau_channel *, u32, u32, u32);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 05ec8edd6a8b..f5ece1f94973 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -688,7 +688,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
struct validate_op op;
struct nouveau_fence *fence = NULL;
int i, j, ret = 0;
- bool do_reloc = false;
+ bool do_reloc = false, sync = false;
if (unlikely(!abi16))
return -ENOMEM;
@@ -702,6 +702,10 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (!chan)
return nouveau_abi16_put(abi16, -ENOENT);
+ if (unlikely(atomic_read(&chan->killed)))
+ return nouveau_abi16_put(abi16, -ENODEV);
+
+ sync = req->vram_available & NOUVEAU_GEM_PUSHBUF_SYNC;
req->vram_available = drm->gem.vram_available;
req->gart_available = drm->gem.gart_available;
@@ -850,6 +854,13 @@ revalidate:
goto out;
}
+ if (sync) {
+ if (!(ret = nouveau_fence_wait(fence, false, false))) {
+ if ((ret = dma_fence_get_status(&fence->base)) == 1)
+ ret = 0;
+ }
+ }
+
out:
validate_fini(&op, chan, fence, bo);
nouveau_fence_unref(&fence);
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index d445c6f3fece..1c3104d20571 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -741,7 +741,7 @@ nouveau_hwmon_init(struct drm_device *dev)
special_groups[i++] = &pwm_fan_sensor_group;
}
- special_groups[i] = 0;
+ special_groups[i] = NULL;
hwmon_dev = hwmon_device_register_with_info(dev->dev, "nouveau", dev,
&nouveau_chip_info,
special_groups);
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 77a0c6ad3cef..7ca0a2498532 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -63,14 +63,12 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
- struct nouveau_mem *mem;
int ret;
if (drm->client.device.info.ram_size == 0)
return -ENOMEM;
ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
- mem = nouveau_mem(reg);
if (ret)
return ret;
@@ -103,11 +101,9 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
- struct nouveau_mem *mem;
int ret;
ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
- mem = nouveau_mem(reg);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.c b/drivers/gpu/drm/nouveau/nouveau_vmm.c
index 77061182a1cf..b28c7dc13ad6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vmm.c
@@ -69,8 +69,8 @@ nouveau_vma_del(struct nouveau_vma **pvma)
}
list_del(&vma->head);
kfree(*pvma);
- *pvma = NULL;
}
+ *pvma = NULL;
}
int
diff --git a/drivers/gpu/drm/nouveau/nvif/mmu.c b/drivers/gpu/drm/nouveau/nvif/mmu.c
index 5641bda2046d..47efc408efa6 100644
--- a/drivers/gpu/drm/nouveau/nvif/mmu.c
+++ b/drivers/gpu/drm/nouveau/nvif/mmu.c
@@ -121,6 +121,7 @@ nvif_mmu_init(struct nvif_object *parent, s32 oclass, struct nvif_mmu *mmu)
kind, argc);
if (ret == 0)
memcpy(mmu->kind, kind->data, kind->count);
+ mmu->kind_inv = kind->kind_inv;
kfree(kind);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/Kbuild
index b53de9ba8c73..db3ade125fa9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/Kbuild
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: MIT
include $(src)/nvkm/core/Kbuild
+include $(src)/nvkm/nvfw/Kbuild
include $(src)/nvkm/falcon/Kbuild
include $(src)/nvkm/subdev/Kbuild
include $(src)/nvkm/engine/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
index 092acdec2c39..8b25367917ca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
@@ -22,6 +22,40 @@
#include <core/device.h>
#include <core/firmware.h>
+int
+nvkm_firmware_load_name(const struct nvkm_subdev *subdev, const char *base,
+ const char *name, int ver, const struct firmware **pfw)
+{
+ char path[64];
+ int ret;
+
+ snprintf(path, sizeof(path), "%s%s", base, name);
+ ret = nvkm_firmware_get(subdev, path, ver, pfw);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int
+nvkm_firmware_load_blob(const struct nvkm_subdev *subdev, const char *base,
+ const char *name, int ver, struct nvkm_blob *blob)
+{
+ const struct firmware *fw;
+ int ret;
+
+ ret = nvkm_firmware_load_name(subdev, base, name, ver, &fw);
+ if (ret == 0) {
+ blob->data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ blob->size = fw->size;
+ nvkm_firmware_put(fw);
+ if (!blob->data)
+ return -ENOMEM;
+ }
+
+ return ret;
+}
+
/**
* nvkm_firmware_get - load firmware from the official nvidia/chip/ directory
* @subdev subdevice that will use that firmware
@@ -32,9 +66,8 @@
* Firmware files released by NVIDIA will always follow this format.
*/
int
-nvkm_firmware_get_version(const struct nvkm_subdev *subdev, const char *fwname,
- int min_version, int max_version,
- const struct firmware **fw)
+nvkm_firmware_get(const struct nvkm_subdev *subdev, const char *fwname, int ver,
+ const struct firmware **fw)
{
struct nvkm_device *device = subdev->device;
char f[64];
@@ -50,31 +83,21 @@ nvkm_firmware_get_version(const struct nvkm_subdev *subdev, const char *fwname,
cname[i] = tolower(cname[i]);
}
- for (i = max_version; i >= min_version; i--) {
- if (i != 0)
- snprintf(f, sizeof(f), "nvidia/%s/%s-%d.bin", cname, fwname, i);
- else
- snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname);
-
- if (!firmware_request_nowarn(fw, f, device->dev)) {
- nvkm_debug(subdev, "firmware \"%s\" loaded\n", f);
- return i;
- }
+ if (ver != 0)
+ snprintf(f, sizeof(f), "nvidia/%s/%s-%d.bin", cname, fwname, ver);
+ else
+ snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname);
- nvkm_debug(subdev, "firmware \"%s\" unavailable\n", f);
+ if (!firmware_request_nowarn(fw, f, device->dev)) {
+ nvkm_debug(subdev, "firmware \"%s\" loaded - %zu byte(s)\n",
+ f, (*fw)->size);
+ return 0;
}
- nvkm_error(subdev, "failed to load firmware \"%s\"", fwname);
+ nvkm_debug(subdev, "firmware \"%s\" unavailable\n", f);
return -ENOENT;
}
-int
-nvkm_firmware_get(const struct nvkm_subdev *subdev, const char *fwname,
- const struct firmware **fw)
-{
- return nvkm_firmware_get_version(subdev, fwname, 0, 0, fw);
-}
-
/**
* nvkm_firmware_put - release firmware loaded with nvkm_firmware_get
*/
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/memory.c b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
index e85a08ecd9da..4cc186262d34 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/memory.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
@@ -91,8 +91,8 @@ nvkm_memory_tags_get(struct nvkm_memory *memory, struct nvkm_device *device,
}
refcount_set(&tags->refcount, 1);
+ *ptags = memory->tags = tags;
mutex_unlock(&fb->subdev.mutex);
- *ptags = tags;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
index 245990de1e90..79a8f9d305c5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
@@ -30,6 +30,7 @@ static struct lock_class_key nvkm_subdev_lock_class[NVKM_SUBDEV_NR];
const char *
nvkm_subdev_name[NVKM_SUBDEV_NR] = {
+ [NVKM_SUBDEV_ACR ] = "acr",
[NVKM_SUBDEV_BAR ] = "bar",
[NVKM_SUBDEV_VBIOS ] = "bios",
[NVKM_SUBDEV_BUS ] = "bus",
@@ -50,7 +51,6 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = {
[NVKM_SUBDEV_MXM ] = "mxm",
[NVKM_SUBDEV_PCI ] = "pci",
[NVKM_SUBDEV_PMU ] = "pmu",
- [NVKM_SUBDEV_SECBOOT ] = "secboot",
[NVKM_SUBDEV_THERM ] = "therm",
[NVKM_SUBDEV_TIMER ] = "tmr",
[NVKM_SUBDEV_TOP ] = "top",
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index c3c7159f3411..c7d700916eae 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -1987,6 +1987,8 @@ nv117_chipset = {
.dma = gf119_dma_new,
.fifo = gm107_fifo_new,
.gr = gm107_gr_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
.sw = gf100_sw_new,
};
@@ -2027,6 +2029,7 @@ nv118_chipset = {
static const struct nvkm_device_chip
nv120_chipset = {
.name = "GM200",
+ .acr = gm200_acr_new,
.bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2045,7 +2048,6 @@ nv120_chipset = {
.pci = gk104_pci_new,
.pmu = gm107_pmu_new,
.therm = gm200_therm_new,
- .secboot = gm200_secboot_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
.volt = gk104_volt_new,
@@ -2056,12 +2058,16 @@ nv120_chipset = {
.dma = gf119_dma_new,
.fifo = gm200_fifo_new,
.gr = gm200_gr_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
+ .nvenc[1] = gm107_nvenc_new,
.sw = gf100_sw_new,
};
static const struct nvkm_device_chip
nv124_chipset = {
.name = "GM204",
+ .acr = gm200_acr_new,
.bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2080,7 +2086,6 @@ nv124_chipset = {
.pci = gk104_pci_new,
.pmu = gm107_pmu_new,
.therm = gm200_therm_new,
- .secboot = gm200_secboot_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
.volt = gk104_volt_new,
@@ -2091,12 +2096,16 @@ nv124_chipset = {
.dma = gf119_dma_new,
.fifo = gm200_fifo_new,
.gr = gm200_gr_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
+ .nvenc[1] = gm107_nvenc_new,
.sw = gf100_sw_new,
};
static const struct nvkm_device_chip
nv126_chipset = {
.name = "GM206",
+ .acr = gm200_acr_new,
.bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2115,7 +2124,6 @@ nv126_chipset = {
.pci = gk104_pci_new,
.pmu = gm107_pmu_new,
.therm = gm200_therm_new,
- .secboot = gm200_secboot_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
.volt = gk104_volt_new,
@@ -2126,12 +2134,15 @@ nv126_chipset = {
.dma = gf119_dma_new,
.fifo = gm200_fifo_new,
.gr = gm200_gr_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
.sw = gf100_sw_new,
};
static const struct nvkm_device_chip
nv12b_chipset = {
.name = "GM20B",
+ .acr = gm20b_acr_new,
.bar = gm20b_bar_new,
.bus = gf100_bus_new,
.clk = gm20b_clk_new,
@@ -2143,7 +2154,6 @@ nv12b_chipset = {
.mc = gk20a_mc_new,
.mmu = gm20b_mmu_new,
.pmu = gm20b_pmu_new,
- .secboot = gm20b_secboot_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
.ce[2] = gm200_ce_new,
@@ -2157,6 +2167,7 @@ nv12b_chipset = {
static const struct nvkm_device_chip
nv130_chipset = {
.name = "GP100",
+ .acr = gm200_acr_new,
.bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2172,7 +2183,6 @@ nv130_chipset = {
.mc = gp100_mc_new,
.mmu = gp100_mmu_new,
.therm = gp100_therm_new,
- .secboot = gm200_secboot_new,
.pci = gp100_pci_new,
.pmu = gp100_pmu_new,
.timer = gk20a_timer_new,
@@ -2187,12 +2197,17 @@ nv130_chipset = {
.disp = gp100_disp_new,
.fifo = gp100_fifo_new,
.gr = gp100_gr_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
+ .nvenc[1] = gm107_nvenc_new,
+ .nvenc[2] = gm107_nvenc_new,
.sw = gf100_sw_new,
};
static const struct nvkm_device_chip
nv132_chipset = {
.name = "GP102",
+ .acr = gp102_acr_new,
.bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2208,7 +2223,6 @@ nv132_chipset = {
.mc = gp100_mc_new,
.mmu = gp100_mmu_new,
.therm = gp100_therm_new,
- .secboot = gp102_secboot_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.timer = gk20a_timer_new,
@@ -2221,7 +2235,9 @@ nv132_chipset = {
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
.gr = gp102_gr_new,
- .nvdec[0] = gp102_nvdec_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
+ .nvenc[1] = gm107_nvenc_new,
.sec2 = gp102_sec2_new,
.sw = gf100_sw_new,
};
@@ -2229,6 +2245,7 @@ nv132_chipset = {
static const struct nvkm_device_chip
nv134_chipset = {
.name = "GP104",
+ .acr = gp102_acr_new,
.bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2244,7 +2261,6 @@ nv134_chipset = {
.mc = gp100_mc_new,
.mmu = gp100_mmu_new,
.therm = gp100_therm_new,
- .secboot = gp102_secboot_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.timer = gk20a_timer_new,
@@ -2257,7 +2273,9 @@ nv134_chipset = {
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
.gr = gp104_gr_new,
- .nvdec[0] = gp102_nvdec_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
+ .nvenc[1] = gm107_nvenc_new,
.sec2 = gp102_sec2_new,
.sw = gf100_sw_new,
};
@@ -2265,6 +2283,7 @@ nv134_chipset = {
static const struct nvkm_device_chip
nv136_chipset = {
.name = "GP106",
+ .acr = gp102_acr_new,
.bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2280,7 +2299,6 @@ nv136_chipset = {
.mc = gp100_mc_new,
.mmu = gp100_mmu_new,
.therm = gp100_therm_new,
- .secboot = gp102_secboot_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.timer = gk20a_timer_new,
@@ -2293,7 +2311,8 @@ nv136_chipset = {
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
.gr = gp104_gr_new,
- .nvdec[0] = gp102_nvdec_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
.sec2 = gp102_sec2_new,
.sw = gf100_sw_new,
};
@@ -2301,6 +2320,7 @@ nv136_chipset = {
static const struct nvkm_device_chip
nv137_chipset = {
.name = "GP107",
+ .acr = gp102_acr_new,
.bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2316,7 +2336,6 @@ nv137_chipset = {
.mc = gp100_mc_new,
.mmu = gp100_mmu_new,
.therm = gp100_therm_new,
- .secboot = gp102_secboot_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.timer = gk20a_timer_new,
@@ -2329,7 +2348,9 @@ nv137_chipset = {
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
.gr = gp107_gr_new,
- .nvdec[0] = gp102_nvdec_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
+ .nvenc[1] = gm107_nvenc_new,
.sec2 = gp102_sec2_new,
.sw = gf100_sw_new,
};
@@ -2337,6 +2358,7 @@ nv137_chipset = {
static const struct nvkm_device_chip
nv138_chipset = {
.name = "GP108",
+ .acr = gp108_acr_new,
.bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2352,7 +2374,6 @@ nv138_chipset = {
.mc = gp100_mc_new,
.mmu = gp100_mmu_new,
.therm = gp100_therm_new,
- .secboot = gp108_secboot_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.timer = gk20a_timer_new,
@@ -2364,30 +2385,30 @@ nv138_chipset = {
.disp = gp102_disp_new,
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
- .gr = gp107_gr_new,
- .nvdec[0] = gp102_nvdec_new,
- .sec2 = gp102_sec2_new,
+ .gr = gp108_gr_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .sec2 = gp108_sec2_new,
.sw = gf100_sw_new,
};
static const struct nvkm_device_chip
nv13b_chipset = {
.name = "GP10B",
+ .acr = gp10b_acr_new,
.bar = gm20b_bar_new,
.bus = gf100_bus_new,
- .fault = gp100_fault_new,
+ .fault = gp10b_fault_new,
.fb = gp10b_fb_new,
.fuse = gm107_fuse_new,
.ibus = gp10b_ibus_new,
.imem = gk20a_instmem_new,
- .ltc = gp102_ltc_new,
+ .ltc = gp10b_ltc_new,
.mc = gp10b_mc_new,
.mmu = gp10b_mmu_new,
- .secboot = gp10b_secboot_new,
- .pmu = gm20b_pmu_new,
+ .pmu = gp10b_pmu_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
- .ce[2] = gp102_ce_new,
+ .ce[0] = gp100_ce_new,
.dma = gf119_dma_new,
.fifo = gp10b_fifo_new,
.gr = gp10b_gr_new,
@@ -2397,6 +2418,7 @@ nv13b_chipset = {
static const struct nvkm_device_chip
nv140_chipset = {
.name = "GV100",
+ .acr = gp108_acr_new,
.bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2414,7 +2436,6 @@ nv140_chipset = {
.mmu = gv100_mmu_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
- .secboot = gp108_secboot_new,
.therm = gp100_therm_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
@@ -2431,13 +2452,17 @@ nv140_chipset = {
.dma = gv100_dma_new,
.fifo = gv100_fifo_new,
.gr = gv100_gr_new,
- .nvdec[0] = gp102_nvdec_new,
- .sec2 = gp102_sec2_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
+ .nvenc[1] = gm107_nvenc_new,
+ .nvenc[2] = gm107_nvenc_new,
+ .sec2 = gp108_sec2_new,
};
static const struct nvkm_device_chip
nv162_chipset = {
.name = "TU102",
+ .acr = tu102_acr_new,
.bar = tu102_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2466,13 +2491,16 @@ nv162_chipset = {
.disp = tu102_disp_new,
.dma = gv100_dma_new,
.fifo = tu102_fifo_new,
- .nvdec[0] = gp102_nvdec_new,
+ .gr = tu102_gr_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
.sec2 = tu102_sec2_new,
};
static const struct nvkm_device_chip
nv164_chipset = {
.name = "TU104",
+ .acr = tu102_acr_new,
.bar = tu102_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2501,13 +2529,17 @@ nv164_chipset = {
.disp = tu102_disp_new,
.dma = gv100_dma_new,
.fifo = tu102_fifo_new,
- .nvdec[0] = gp102_nvdec_new,
+ .gr = tu102_gr_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvdec[1] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
.sec2 = tu102_sec2_new,
};
static const struct nvkm_device_chip
nv166_chipset = {
.name = "TU106",
+ .acr = tu102_acr_new,
.bar = tu102_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2536,7 +2568,11 @@ nv166_chipset = {
.disp = tu102_disp_new,
.dma = gv100_dma_new,
.fifo = tu102_fifo_new,
- .nvdec[0] = gp102_nvdec_new,
+ .gr = tu102_gr_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvdec[1] = gm107_nvdec_new,
+ .nvdec[2] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
.sec2 = tu102_sec2_new,
};
@@ -2571,7 +2607,8 @@ nv167_chipset = {
.disp = tu102_disp_new,
.dma = gv100_dma_new,
.fifo = tu102_fifo_new,
- .nvdec[0] = gp102_nvdec_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
.sec2 = tu102_sec2_new,
};
@@ -2606,7 +2643,8 @@ nv168_chipset = {
.disp = tu102_disp_new,
.dma = gv100_dma_new,
.fifo = tu102_fifo_new,
- .nvdec[0] = gp102_nvdec_new,
+ .nvdec[0] = gm107_nvdec_new,
+ .nvenc[0] = gm107_nvenc_new,
.sec2 = tu102_sec2_new,
};
@@ -2638,6 +2676,7 @@ nvkm_device_subdev(struct nvkm_device *device, int index)
switch (index) {
#define _(n,p,m) case NVKM_SUBDEV_##n: if (p) return (m); break
+ _(ACR , device->acr , &device->acr->subdev);
_(BAR , device->bar , &device->bar->subdev);
_(VBIOS , device->bios , &device->bios->subdev);
_(BUS , device->bus , &device->bus->subdev);
@@ -2658,7 +2697,6 @@ nvkm_device_subdev(struct nvkm_device *device, int index)
_(MXM , device->mxm , device->mxm);
_(PCI , device->pci , &device->pci->subdev);
_(PMU , device->pmu , &device->pmu->subdev);
- _(SECBOOT , device->secboot , &device->secboot->subdev);
_(THERM , device->therm , &device->therm->subdev);
_(TIMER , device->timer , &device->timer->subdev);
_(TOP , device->top , &device->top->subdev);
@@ -2703,9 +2741,9 @@ nvkm_device_engine(struct nvkm_device *device, int index)
_(MSPDEC , device->mspdec , device->mspdec);
_(MSPPP , device->msppp , device->msppp);
_(MSVLD , device->msvld , device->msvld);
- _(NVENC0 , device->nvenc[0], device->nvenc[0]);
- _(NVENC1 , device->nvenc[1], device->nvenc[1]);
- _(NVENC2 , device->nvenc[2], device->nvenc[2]);
+ _(NVENC0 , device->nvenc[0], &device->nvenc[0]->engine);
+ _(NVENC1 , device->nvenc[1], &device->nvenc[1]->engine);
+ _(NVENC2 , device->nvenc[2], &device->nvenc[2]->engine);
_(NVDEC0 , device->nvdec[0], &device->nvdec[0]->engine);
_(NVDEC1 , device->nvdec[1], &device->nvdec[1]->engine);
_(NVDEC2 , device->nvdec[2], &device->nvdec[2]->engine);
@@ -3144,6 +3182,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
} \
break
switch (i) {
+ _(NVKM_SUBDEV_ACR , acr);
_(NVKM_SUBDEV_BAR , bar);
_(NVKM_SUBDEV_VBIOS , bios);
_(NVKM_SUBDEV_BUS , bus);
@@ -3164,7 +3203,6 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
_(NVKM_SUBDEV_MXM , mxm);
_(NVKM_SUBDEV_PCI , pci);
_(NVKM_SUBDEV_PMU , pmu);
- _(NVKM_SUBDEV_SECBOOT , secboot);
_(NVKM_SUBDEV_THERM , therm);
_(NVKM_SUBDEV_TIMER , timer);
_(NVKM_SUBDEV_TOP , top);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
index d8be2f77ac66..54eab5e04230 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
@@ -3,6 +3,7 @@
#define __NVKM_DEVICE_PRIV_H__
#include <core/device.h>
+#include <subdev/acr.h>
#include <subdev/bar.h>
#include <subdev/bios.h>
#include <subdev/bus.h>
@@ -27,7 +28,6 @@
#include <subdev/timer.h>
#include <subdev/top.h>
#include <subdev/volt.h>
-#include <subdev/secboot.h>
#include <engine/bsp.h>
#include <engine/ce.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 0e372a190d3f..d0d52c1d4aee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -52,18 +52,18 @@ nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
clk_set_rate(tdev->clk_pwr, 204000000);
udelay(10);
- reset_control_assert(tdev->rst);
- udelay(10);
-
if (!tdev->pdev->dev.pm_domain) {
+ reset_control_assert(tdev->rst);
+ udelay(10);
+
ret = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D);
if (ret)
goto err_clamp;
udelay(10);
- }
- reset_control_deassert(tdev->rst);
- udelay(10);
+ reset_control_deassert(tdev->rst);
+ udelay(10);
+ }
return 0;
@@ -279,6 +279,7 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
struct nvkm_device **pdevice)
{
struct nvkm_device_tegra *tdev;
+ unsigned long rate;
int ret;
if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
@@ -307,6 +308,17 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
goto free;
}
+ rate = clk_get_rate(tdev->clk);
+ if (rate == 0) {
+ ret = clk_set_rate(tdev->clk, ULONG_MAX);
+ if (ret < 0)
+ goto free;
+
+ rate = clk_get_rate(tdev->clk);
+
+ dev_dbg(&pdev->dev, "GPU clock set to %lu\n", rate);
+ }
+
if (func->require_ref_clk)
tdev->clk_ref = devm_clk_get(&pdev->dev, "ref");
if (IS_ERR(tdev->clk_ref)) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
index bcf32d92ee5a..50e3539f33d2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
@@ -74,6 +74,8 @@ nv50_disp_chan_mthd(struct nv50_disp_chan *chan, int debug)
if (debug > subdev->debug)
return;
+ if (!mthd)
+ return;
for (i = 0; (list = mthd->data[i].mthd) != NULL; i++) {
u32 base = chan->head * mthd->addr;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
index 818d21bd28d3..3800aeb507d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
@@ -365,7 +365,7 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
* and it's better to have a failed modeset than that.
*/
for (cfg = nvkm_dp_rates; cfg->rate; cfg++) {
- if (cfg->nr <= outp_nr && cfg->nr <= outp_bw) {
+ if (cfg->nr <= outp_nr && cfg->bw <= outp_bw) {
/* Try to respect sink limits too when selecting
* lowest link configuration.
*/
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
index 892be6c9b76c..c1032527f791 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
@@ -101,15 +101,26 @@ gv100_disp_exception(struct nv50_disp *disp, int chid)
u32 stat = nvkm_rd32(device, 0x611020 + (chid * 12));
u32 type = (stat & 0x00007000) >> 12;
u32 mthd = (stat & 0x00000fff) << 2;
- u32 data = nvkm_rd32(device, 0x611024 + (chid * 12));
- u32 code = nvkm_rd32(device, 0x611028 + (chid * 12));
const struct nvkm_enum *reason =
nvkm_enum_find(nv50_disp_intr_error_type, type);
- nvkm_error(subdev, "chid %d stat %08x reason %d [%s] mthd %04x "
- "data %08x code %08x\n",
- chid, stat, type, reason ? reason->name : "",
- mthd, data, code);
+ /*TODO: Suspect 33->41 are for WRBK channel exceptions, but we
+ * don't support those currently.
+ *
+ * CORE+WIN CHIDs map directly to the FE_EXCEPT() slots.
+ */
+ if (chid <= 32) {
+ u32 data = nvkm_rd32(device, 0x611024 + (chid * 12));
+ u32 code = nvkm_rd32(device, 0x611028 + (chid * 12));
+ nvkm_error(subdev, "chid %d stat %08x reason %d [%s] "
+ "mthd %04x data %08x code %08x\n",
+ chid, stat, type, reason ? reason->name : "",
+ mthd, data, code);
+ } else {
+ nvkm_error(subdev, "chid %d stat %08x reason %d [%s] "
+ "mthd %04x\n",
+ chid, stat, type, reason ? reason->name : "", mthd);
+ }
if (chid < ARRAY_SIZE(disp->chan) && disp->chan[chid]) {
switch (mthd) {
@@ -144,6 +155,12 @@ gv100_disp_intr_ctrl_disp(struct nv50_disp *disp)
if (stat & 0x00000008)
stat &= ~0x00000008;
+ if (stat & 0x00000080) {
+ u32 error = nvkm_mask(device, 0x611848, 0x00000000, 0x00000000);
+ nvkm_warn(subdev, "error %08x\n", error);
+ stat &= ~0x00000080;
+ }
+
if (stat & 0x00000100) {
unsigned long wndws = nvkm_rd32(device, 0x611858);
unsigned long other = nvkm_rd32(device, 0x61185c);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
index 73724a8cb861..558c86fd8e82 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
@@ -36,8 +36,10 @@ nvkm-y += nvkm/engine/gr/gp100.o
nvkm-y += nvkm/engine/gr/gp102.o
nvkm-y += nvkm/engine/gr/gp104.o
nvkm-y += nvkm/engine/gr/gp107.o
+nvkm-y += nvkm/engine/gr/gp108.o
nvkm-y += nvkm/engine/gr/gp10b.o
nvkm-y += nvkm/engine/gr/gv100.o
+nvkm-y += nvkm/engine/gr/tu102.o
nvkm-y += nvkm/engine/gr/ctxnv40.o
nvkm-y += nvkm/engine/gr/ctxnv50.o
@@ -60,3 +62,4 @@ nvkm-y += nvkm/engine/gr/ctxgp102.o
nvkm-y += nvkm/engine/gr/ctxgp104.o
nvkm-y += nvkm/engine/gr/ctxgp107.o
nvkm-y += nvkm/engine/gr/ctxgv100.o
+nvkm-y += nvkm/engine/gr/ctxtu102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index 85f2d1e950e8..297915719bf2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1324,10 +1324,8 @@ gf100_grctx_generate_sm_id(struct gf100_gr *gr, int gpc, int tpc, int sm)
void
gf100_grctx_generate_floorsweep(struct gf100_gr *gr)
{
- struct nvkm_device *device = gr->base.engine.subdev.device;
const struct gf100_grctx_func *func = gr->func->grctx;
- int gpc, sm, i, j;
- u32 data;
+ int sm;
for (sm = 0; sm < gr->sm_nr; sm++) {
func->sm_id(gr, gr->sm[sm].gpc, gr->sm[sm].tpc, sm);
@@ -1335,12 +1333,9 @@ gf100_grctx_generate_floorsweep(struct gf100_gr *gr)
func->tpc_nr(gr, gr->sm[sm].gpc);
}
- for (gpc = 0, i = 0; i < 4; i++) {
- for (data = 0, j = 0; j < 8 && gpc < gr->gpc_nr; j++, gpc++)
- data |= gr->tpc_nr[gpc] << (j * 4);
- nvkm_wr32(device, 0x406028 + (i * 4), data);
- nvkm_wr32(device, 0x405870 + (i * 4), data);
- }
+ gf100_gr_init_num_tpc_per_gpc(gr, false, true);
+ if (!func->skip_pd_num_tpc_per_gpc)
+ gf100_gr_init_num_tpc_per_gpc(gr, true, false);
if (func->r4060a8)
func->r4060a8(gr);
@@ -1374,7 +1369,7 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
nvkm_mc_unk260(device, 0);
- if (!gr->fuc_sw_ctx) {
+ if (!gr->sw_ctx) {
gf100_gr_mmio(gr, grctx->hub);
gf100_gr_mmio(gr, grctx->gpc_0);
gf100_gr_mmio(gr, grctx->zcull);
@@ -1382,7 +1377,7 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
gf100_gr_mmio(gr, grctx->tpc);
gf100_gr_mmio(gr, grctx->ppc);
} else {
- gf100_gr_mmio(gr, gr->fuc_sw_ctx);
+ gf100_gr_mmio(gr, gr->sw_ctx);
}
gf100_gr_wait_idle(gr);
@@ -1401,8 +1396,8 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
gf100_gr_wait_idle(gr);
if (grctx->r400088) grctx->r400088(gr, false);
- if (gr->fuc_bundle)
- gf100_gr_icmd(gr, gr->fuc_bundle);
+ if (gr->bundle)
+ gf100_gr_icmd(gr, gr->bundle);
else
gf100_gr_icmd(gr, grctx->icmd);
if (grctx->sw_veid_bundle_init)
@@ -1411,8 +1406,8 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
nvkm_wr32(device, 0x404154, idle_timeout);
- if (gr->fuc_method)
- gf100_gr_mthd(gr, gr->fuc_method);
+ if (gr->method)
+ gf100_gr_mthd(gr, gr->method);
else
gf100_gr_mthd(gr, grctx->mthd);
nvkm_mc_unk260(device, 1);
@@ -1431,6 +1426,8 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
grctx->r419a3c(gr);
if (grctx->r408840)
grctx->r408840(gr);
+ if (grctx->r419c0c)
+ grctx->r419c0c(gr);
}
#define CB_RESERVED 0x80000
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 478b4723d0f9..32bbddc0993e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -57,6 +57,7 @@ struct gf100_grctx_func {
/* floorsweeping */
void (*sm_id)(struct gf100_gr *, int gpc, int tpc, int sm);
void (*tpc_nr)(struct gf100_gr *, int gpc);
+ bool skip_pd_num_tpc_per_gpc;
void (*r4060a8)(struct gf100_gr *);
void (*rop_mapping)(struct gf100_gr *);
void (*alpha_beta_tables)(struct gf100_gr *);
@@ -76,6 +77,7 @@ struct gf100_grctx_func {
void (*r418e94)(struct gf100_gr *);
void (*r419a3c)(struct gf100_gr *);
void (*r408840)(struct gf100_gr *);
+ void (*r419c0c)(struct gf100_gr *);
};
extern const struct gf100_grctx_func gf100_grctx;
@@ -153,6 +155,14 @@ extern const struct gf100_grctx_func gp107_grctx;
extern const struct gf100_grctx_func gv100_grctx;
+extern const struct gf100_grctx_func tu102_grctx;
+void gv100_grctx_unkn88c(struct gf100_gr *, bool);
+void gv100_grctx_generate_unkn(struct gf100_gr *);
+extern const struct gf100_gr_init gv100_grctx_init_sw_veid_bundle_init_0[];
+void gv100_grctx_generate_attrib(struct gf100_grctx *);
+void gv100_grctx_generate_rop_mapping(struct gf100_gr *);
+void gv100_grctx_generate_r400088(struct gf100_gr *, bool);
+
/* context init value lists */
extern const struct gf100_gr_pack gf100_grctx_pack_icmd[];
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
index 896d473dcc0f..c0d36bc601f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
@@ -32,7 +32,7 @@ gk20a_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
u32 idle_timeout;
int i;
- gf100_gr_mmio(gr, gr->fuc_sw_ctx);
+ gf100_gr_mmio(gr, gr->sw_ctx);
gf100_gr_wait_idle(gr);
@@ -56,10 +56,10 @@ gk20a_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
nvkm_wr32(device, 0x404154, idle_timeout);
gf100_gr_wait_idle(gr);
- gf100_gr_mthd(gr, gr->fuc_method);
+ gf100_gr_mthd(gr, gr->method);
gf100_gr_wait_idle(gr);
- gf100_gr_icmd(gr, gr->fuc_bundle);
+ gf100_gr_icmd(gr, gr->bundle);
grctx->pagepool(info);
grctx->bundle(info);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
index a1d9e114ebeb..6b92f8aa18a3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
@@ -29,7 +29,7 @@ gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
u32 idle_timeout;
int i, tmp;
- gf100_gr_mmio(gr, gr->fuc_sw_ctx);
+ gf100_gr_mmio(gr, gr->sw_ctx);
gf100_gr_wait_idle(gr);
@@ -59,10 +59,10 @@ gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
nvkm_wr32(device, 0x404154, idle_timeout);
gf100_gr_wait_idle(gr);
- gf100_gr_mthd(gr, gr->fuc_method);
+ gf100_gr_mthd(gr, gr->method);
gf100_gr_wait_idle(gr);
- gf100_gr_icmd(gr, gr->fuc_bundle);
+ gf100_gr_icmd(gr, gr->bundle);
grctx->pagepool(info);
grctx->bundle(info);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c
index 0990765ef191..39553d55d3f3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c
@@ -25,7 +25,7 @@
* PGRAPH context implementation
******************************************************************************/
-static const struct gf100_gr_init
+const struct gf100_gr_init
gv100_grctx_init_sw_veid_bundle_init_0[] = {
{ 0x00001000, 64, 0x00100000, 0x00000008 },
{ 0x00000941, 64, 0x00100000, 0x00000000 },
@@ -58,7 +58,7 @@ gv100_grctx_pack_sw_veid_bundle_init[] = {
{}
};
-static void
+void
gv100_grctx_generate_attrib(struct gf100_grctx *info)
{
struct gf100_gr *gr = info->gr;
@@ -67,14 +67,14 @@ gv100_grctx_generate_attrib(struct gf100_grctx *info)
const u32 attrib = grctx->attrib_nr;
const u32 gfxp = grctx->gfxp_nr;
const int s = 12;
- const int max_batches = 0xffff;
u32 size = grctx->alpha_nr_max * gr->tpc_total;
u32 ao = 0;
u32 bo = ao + size;
int gpc, ppc, b, n = 0;
- size += grctx->gfxp_nr * gr->tpc_total;
- size = ((size * 0x20) + 128) & ~127;
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++)
+ size += grctx->gfxp_nr * gr->ppc_nr[gpc] * gr->ppc_tpc_max;
+ size = ((size * 0x20) + 127) & ~127;
b = mmio_vram(info, size, (1 << s), false);
mmio_refn(info, 0x418810, 0x80000000, s, b);
@@ -84,13 +84,12 @@ gv100_grctx_generate_attrib(struct gf100_grctx *info)
mmio_wr32(info, 0x419e04, 0x80000000 | size >> 7);
mmio_wr32(info, 0x405830, attrib);
mmio_wr32(info, 0x40585c, alpha);
- mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) {
const u32 as = alpha * gr->ppc_tpc_nr[gpc][ppc];
- const u32 bs = attrib * gr->ppc_tpc_nr[gpc][ppc];
- const u32 gs = gfxp * gr->ppc_tpc_nr[gpc][ppc];
+ const u32 bs = attrib * gr->ppc_tpc_max;
+ const u32 gs = gfxp * gr->ppc_tpc_max;
const u32 u = 0x418ea0 + (n * 0x04);
const u32 o = PPC_UNIT(gpc, ppc, 0);
if (!(gr->ppc_mask[gpc] & (1 << ppc)))
@@ -110,7 +109,7 @@ gv100_grctx_generate_attrib(struct gf100_grctx *info)
mmio_wr32(info, 0x41befc, 0x00000100);
}
-static void
+void
gv100_grctx_generate_rop_mapping(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
@@ -147,7 +146,7 @@ gv100_grctx_generate_rop_mapping(struct gf100_gr *gr)
gr->screen_tile_row_offset);
}
-static void
+void
gv100_grctx_generate_r400088(struct gf100_gr *gr, bool on)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
@@ -163,7 +162,7 @@ gv100_grctx_generate_sm_id(struct gf100_gr *gr, int gpc, int tpc, int sm)
nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), sm);
}
-static void
+void
gv100_grctx_generate_unkn(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
@@ -174,7 +173,7 @@ gv100_grctx_generate_unkn(struct gf100_gr *gr)
nvkm_mask(device, 0x419c00, 0x00000008, 0x00000008);
}
-static void
+void
gv100_grctx_unkn88c(struct gf100_gr *gr, bool on)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxtu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxtu102.c
new file mode 100644
index 000000000000..2299ca07d04a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxtu102.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ctxgf100.h"
+
+static void
+tu102_grctx_generate_r419c0c(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ nvkm_mask(device, 0x419c0c, 0x80000000, 0x80000000);
+ nvkm_mask(device, 0x40584c, 0x00000008, 0x00000000);
+ nvkm_mask(device, 0x400080, 0x00000000, 0x00000000);
+}
+
+static void
+tu102_grctx_generate_sm_id(struct gf100_gr *gr, int gpc, int tpc, int sm)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x608), sm);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), sm);
+}
+
+static const struct gf100_gr_init
+tu102_grctx_init_unknown_bundle_init_0[] = {
+ { 0x00001000, 1, 0x00000001, 0x00000004 },
+ { 0x00002020, 64, 0x00000001, 0x00000000 },
+ { 0x0001e100, 1, 0x00000001, 0x00000001 },
+ {}
+};
+
+static const struct gf100_gr_pack
+tu102_grctx_pack_sw_veid_bundle_init[] = {
+ { gv100_grctx_init_sw_veid_bundle_init_0 },
+ { tu102_grctx_init_unknown_bundle_init_0 },
+ {}
+};
+
+static void
+tu102_grctx_generate_attrib(struct gf100_grctx *info)
+{
+ const u64 size = 0x80000; /*XXX: educated guess */
+ const int s = 8;
+ const int b = mmio_vram(info, size, (1 << s), true);
+
+ gv100_grctx_generate_attrib(info);
+
+ mmio_refn(info, 0x408070, 0x00000000, s, b);
+ mmio_wr32(info, 0x408074, size >> s); /*XXX: guess */
+ mmio_refn(info, 0x419034, 0x00000000, s, b);
+ mmio_wr32(info, 0x408078, 0x00000000);
+}
+
+const struct gf100_grctx_func
+tu102_grctx = {
+ .unkn88c = gv100_grctx_unkn88c,
+ .main = gf100_grctx_generate_main,
+ .unkn = gv100_grctx_generate_unkn,
+ .sw_veid_bundle_init = tu102_grctx_pack_sw_veid_bundle_init,
+ .bundle = gm107_grctx_generate_bundle,
+ .bundle_size = 0x3000,
+ .bundle_min_gpm_fifo_depth = 0x180,
+ .bundle_token_limit = 0xa80,
+ .pagepool = gp100_grctx_generate_pagepool,
+ .pagepool_size = 0x20000,
+ .attrib = tu102_grctx_generate_attrib,
+ .attrib_nr_max = 0x800,
+ .attrib_nr = 0x700,
+ .alpha_nr_max = 0xc00,
+ .alpha_nr = 0x800,
+ .gfxp_nr = 0xfa8,
+ .sm_id = tu102_grctx_generate_sm_id,
+ .skip_pd_num_tpc_per_gpc = true,
+ .rop_mapping = gv100_grctx_generate_rop_mapping,
+ .r406500 = gm200_grctx_generate_r406500,
+ .r400088 = gv100_grctx_generate_r400088,
+ .r419c0c = tu102_grctx_generate_r419c0c,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
index c24f35ad56a6..ae2d5b6891cb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
@@ -441,7 +441,7 @@ static uint32_t gk208_grhub_code[] = {
0x020014fe,
0x12004002,
0xbd0002f6,
- 0x05c94104,
+ 0x05ca4104,
0xbd0010fe,
0x07004024,
0xbd0002f6,
@@ -460,423 +460,423 @@ static uint32_t gk208_grhub_code[] = {
0x01039204,
0x03090080,
0xbd0003f6,
- 0x87044204,
- 0xf6040040,
- 0x04bd0002,
- 0x00400402,
- 0x0002f603,
- 0x31f404bd,
- 0x96048e10,
- 0x00657e40,
- 0xc7feb200,
- 0x01b590f1,
- 0x1ff4f003,
- 0x01020fb5,
- 0x041fbb01,
- 0x800112b6,
- 0xf6010300,
- 0x04bd0001,
- 0x01040080,
+ 0x87048204,
+ 0x04004000,
+ 0xbd0002f6,
+ 0x40040204,
+ 0x02f60300,
+ 0xf404bd00,
+ 0x048e1031,
+ 0x657e4096,
+ 0xfeb20000,
+ 0xb590f1c7,
+ 0xf4f00301,
+ 0x020fb51f,
+ 0x1fbb0101,
+ 0x0112b604,
+ 0x01030080,
0xbd0001f6,
- 0x01004104,
- 0xac7e020f,
- 0xbb7e0006,
- 0x100f0006,
- 0x0006fd7e,
- 0x98000e98,
- 0x207e010f,
- 0x14950001,
- 0xc0008008,
- 0x0004f601,
- 0x008004bd,
- 0x04f601c1,
- 0xb704bd00,
- 0xbb130030,
- 0xf5b6001f,
- 0xd3008002,
- 0x000ff601,
- 0x15b604bd,
- 0x0110b608,
- 0xb20814b6,
- 0x02687e1f,
- 0x001fbb00,
- 0x84020398,
-/* 0x041f: init_gpc */
- 0xb8502000,
- 0x0008044e,
- 0x8f7e1fb2,
+ 0x04008004,
+ 0x0001f601,
+ 0x004104bd,
+ 0x7e020f01,
+ 0x7e0006ad,
+ 0x0f0006bc,
+ 0x06fe7e10,
+ 0x000e9800,
+ 0x7e010f98,
+ 0x95000120,
+ 0x00800814,
+ 0x04f601c0,
+ 0x8004bd00,
+ 0xf601c100,
+ 0x04bd0004,
+ 0x130030b7,
+ 0xb6001fbb,
+ 0x008002f5,
+ 0x0ff601d3,
+ 0xb604bd00,
+ 0x10b60815,
+ 0x0814b601,
+ 0x687e1fb2,
+ 0x1fbb0002,
+ 0x02039800,
+ 0x50200084,
+/* 0x0420: init_gpc */
+ 0x08044eb8,
+ 0x7e1fb200,
+ 0xb800008f,
+ 0x00010c4e,
+ 0x8f7ef4bd,
0x4eb80000,
- 0xbd00010c,
- 0x008f7ef4,
- 0x044eb800,
- 0x8f7e0001,
+ 0x7e000104,
+ 0xb800008f,
+ 0x0001004e,
+ 0x8f7e020f,
0x4eb80000,
- 0x0f000100,
- 0x008f7e02,
- 0x004eb800,
-/* 0x044e: init_gpc_wait */
+/* 0x044f: init_gpc_wait */
+ 0x7e000800,
+ 0xc8000065,
+ 0x0bf41fff,
+ 0x044eb8f9,
0x657e0008,
- 0xffc80000,
- 0xf90bf41f,
- 0x08044eb8,
- 0x00657e00,
- 0x001fbb00,
- 0x800040b7,
- 0xf40132b6,
- 0x000fb41b,
- 0x0006fd7e,
- 0xac7e000f,
- 0x00800006,
- 0x01f60201,
- 0xbd04bd00,
- 0x1f19f014,
- 0x02300080,
- 0xbd0001f6,
-/* 0x0491: wait */
- 0x0028f404,
-/* 0x0497: main */
- 0x0d0031f4,
- 0x00377e10,
- 0xf401f400,
- 0x4001e4b1,
- 0x00c71bf5,
- 0x99f094bd,
- 0x37008004,
- 0x0009f602,
- 0x008104bd,
- 0x11cf02c0,
- 0xc1008200,
- 0x0022cf02,
- 0xf41f13c8,
- 0x23c8770b,
- 0x550bf41f,
- 0x12b220f9,
- 0x99f094bd,
- 0x37008007,
- 0x0009f602,
- 0x32f404bd,
- 0x0231f401,
- 0x0008807e,
- 0x99f094bd,
- 0x17008007,
- 0x0009f602,
- 0x20fc04bd,
- 0x99f094bd,
- 0x37008006,
- 0x0009f602,
- 0x31f404bd,
- 0x08807e01,
+ 0x1fbb0000,
+ 0x0040b700,
+ 0x0132b680,
+ 0x0fb41bf4,
+ 0x06fe7e00,
+ 0x7e000f00,
+ 0x800006ad,
+ 0xf6020100,
+ 0x04bd0001,
+ 0x19f014bd,
+ 0x3000801f,
+ 0x0001f602,
+/* 0x0492: wait */
+ 0x28f404bd,
+ 0x0031f400,
+/* 0x0498: main */
+ 0x377e100d,
+ 0x01f40000,
+ 0x01e4b1f4,
+ 0xc71bf540,
0xf094bd00,
- 0x00800699,
+ 0x00800499,
+ 0x09f60237,
+ 0x8104bd00,
+ 0xcf02c000,
+ 0x00820011,
+ 0x22cf02c1,
+ 0x1f13c800,
+ 0xc8770bf4,
+ 0x0bf41f23,
+ 0xb220f955,
+ 0xf094bd12,
+ 0x00800799,
+ 0x09f60237,
+ 0xf404bd00,
+ 0x31f40132,
+ 0x08817e02,
+ 0xf094bd00,
+ 0x00800799,
0x09f60217,
+ 0xfc04bd00,
+ 0xf094bd20,
+ 0x00800699,
+ 0x09f60237,
0xf404bd00,
-/* 0x0522: chsw_prev_no_next */
- 0x20f92f0e,
- 0x32f412b2,
- 0x0232f401,
- 0x0008807e,
- 0x008020fc,
- 0x02f602c0,
+ 0x817e0131,
+ 0x94bd0008,
+ 0x800699f0,
+ 0xf6021700,
+ 0x04bd0009,
+/* 0x0523: chsw_prev_no_next */
+ 0xf92f0ef4,
+ 0xf412b220,
+ 0x32f40132,
+ 0x08817e02,
+ 0x8020fc00,
+ 0xf602c000,
+ 0x04bd0002,
+/* 0x053f: chsw_no_prev */
+ 0xc8130ef4,
+ 0x0bf41f23,
+ 0x0131f40d,
+ 0x7e0232f4,
+/* 0x054f: chsw_done */
+ 0x02000881,
+ 0xc3008001,
+ 0x0002f602,
+ 0x94bd04bd,
+ 0x800499f0,
+ 0xf6021700,
+ 0x04bd0009,
+ 0xff300ef5,
+/* 0x056c: main_not_ctx_switch */
+ 0xf401e4b0,
+ 0xf2b20c1b,
+ 0x0008217e,
+/* 0x057b: main_not_ctx_chan */
+ 0xb0400ef4,
+ 0x1bf402e4,
+ 0xf094bd2c,
+ 0x00800799,
+ 0x09f60237,
0xf404bd00,
-/* 0x053e: chsw_no_prev */
- 0x23c8130e,
- 0x0d0bf41f,
- 0xf40131f4,
- 0x807e0232,
-/* 0x054e: chsw_done */
- 0x01020008,
- 0x02c30080,
- 0xbd0002f6,
- 0xf094bd04,
- 0x00800499,
+ 0x32f40132,
+ 0x08817e02,
+ 0xf094bd00,
+ 0x00800799,
0x09f60217,
- 0xf504bd00,
-/* 0x056b: main_not_ctx_switch */
- 0xb0ff300e,
- 0x1bf401e4,
- 0x7ef2b20c,
- 0xf4000820,
-/* 0x057a: main_not_ctx_chan */
- 0xe4b0400e,
- 0x2c1bf402,
- 0x99f094bd,
- 0x37008007,
- 0x0009f602,
- 0x32f404bd,
- 0x0232f401,
- 0x0008807e,
- 0x99f094bd,
- 0x17008007,
- 0x0009f602,
- 0x0ef404bd,
-/* 0x05a9: main_not_ctx_save */
- 0x10ef9411,
- 0x7e01f5f0,
- 0xf50002f8,
-/* 0x05b7: main_done */
- 0xbdfee40e,
- 0x1f29f024,
- 0x02300080,
- 0xbd0002f6,
- 0xd20ef504,
-/* 0x05c9: ih */
- 0xf900f9fe,
- 0x0188fe80,
- 0x90f980f9,
- 0xb0f9a0f9,
- 0xe0f9d0f9,
- 0x04bdf0f9,
- 0xcf02004a,
- 0xabc400aa,
- 0x230bf404,
- 0x004e100d,
- 0x00eecf1a,
- 0xcf19004f,
- 0x047e00ff,
- 0xb0b70000,
- 0x010e0400,
- 0xf61d0040,
- 0x04bd000e,
-/* 0x060c: ih_no_fifo */
- 0x0100abe4,
- 0x0d0c0bf4,
- 0x40014e10,
- 0x0000047e,
-/* 0x061c: ih_no_ctxsw */
- 0x0400abe4,
- 0x8e560bf4,
- 0x7e400708,
+ 0xf404bd00,
+/* 0x05aa: main_not_ctx_save */
+ 0xef94110e,
+ 0x01f5f010,
+ 0x0002f87e,
+ 0xfee40ef5,
+/* 0x05b8: main_done */
+ 0x29f024bd,
+ 0x3000801f,
+ 0x0002f602,
+ 0x0ef504bd,
+/* 0x05ca: ih */
+ 0x00f9fed2,
+ 0x88fe80f9,
+ 0xf980f901,
+ 0xf9a0f990,
+ 0xf9d0f9b0,
+ 0xbdf0f9e0,
+ 0x02004a04,
+ 0xc400aacf,
+ 0x0bf404ab,
+ 0x4e100d23,
+ 0xeecf1a00,
+ 0x19004f00,
+ 0x7e00ffcf,
+ 0xb7000004,
+ 0x0e0400b0,
+ 0x1d004001,
+ 0xbd000ef6,
+/* 0x060d: ih_no_fifo */
+ 0x00abe404,
+ 0x0c0bf401,
+ 0x014e100d,
+ 0x00047e40,
+/* 0x061d: ih_no_ctxsw */
+ 0x00abe400,
+ 0x560bf404,
+ 0x4007088e,
+ 0x0000657e,
+ 0x0080ffb2,
+ 0x0ff60204,
+ 0x8e04bd00,
+ 0x7e400704,
0xb2000065,
- 0x040080ff,
+ 0x030080ff,
0x000ff602,
- 0x048e04bd,
- 0x657e4007,
- 0xffb20000,
- 0x02030080,
- 0xbd000ff6,
- 0x50fec704,
- 0x8f02ee94,
- 0xbb400700,
- 0x657e00ef,
- 0x00800000,
- 0x0ff60202,
+ 0xfec704bd,
+ 0x02ee9450,
+ 0x4007008f,
+ 0x7e00efbb,
+ 0x80000065,
+ 0xf6020200,
+ 0x04bd000f,
+ 0xf87e030f,
+ 0x004b0002,
+ 0x8ebfb201,
+ 0x7e400144,
+/* 0x0677: ih_no_fwmthd */
+ 0x4b00008f,
+ 0xb0bd0504,
+ 0xf4b4abff,
+ 0x00800c0b,
+ 0x0bf60307,
+/* 0x068b: ih_no_other */
+ 0x4004bd00,
+ 0x0af60100,
+ 0xfc04bd00,
+ 0xfce0fcf0,
+ 0xfcb0fcd0,
+ 0xfc90fca0,
+ 0x0088fe80,
+ 0x00fc80fc,
+ 0xf80032f4,
+/* 0x06ad: ctx_4170s */
+ 0x10f5f001,
+ 0x708effb2,
+ 0x8f7e4041,
+ 0x00f80000,
+/* 0x06bc: ctx_4170w */
+ 0x4041708e,
+ 0x0000657e,
+ 0xf4f0ffb2,
+ 0xf31bf410,
+/* 0x06ce: ctx_redswitch */
+ 0x004e00f8,
+ 0x40e5f002,
+ 0xf020e5f0,
+ 0x008010e5,
+ 0x0ef60185,
0x0f04bd00,
- 0x02f87e03,
- 0x01004b00,
- 0x448ebfb2,
- 0x8f7e4001,
-/* 0x0676: ih_no_fwmthd */
- 0x044b0000,
- 0xffb0bd05,
- 0x0bf4b4ab,
- 0x0700800c,
- 0x000bf603,
-/* 0x068a: ih_no_other */
- 0x004004bd,
- 0x000af601,
- 0xf0fc04bd,
- 0xd0fce0fc,
- 0xa0fcb0fc,
- 0x80fc90fc,
- 0xfc0088fe,
- 0xf400fc80,
- 0x01f80032,
-/* 0x06ac: ctx_4170s */
- 0xb210f5f0,
- 0x41708eff,
+/* 0x06e5: ctx_redswitch_delay */
+ 0x01f2b608,
+ 0xf1fd1bf4,
+ 0xf10400e5,
+ 0x800100e5,
+ 0xf6018500,
+ 0x04bd000e,
+/* 0x06fe: ctx_86c */
+ 0x008000f8,
+ 0x0ff60223,
+ 0xb204bd00,
+ 0x8a148eff,
0x008f7e40,
-/* 0x06bb: ctx_4170w */
- 0x8e00f800,
- 0x7e404170,
- 0xb2000065,
- 0x10f4f0ff,
- 0xf8f31bf4,
-/* 0x06cd: ctx_redswitch */
- 0x02004e00,
- 0xf040e5f0,
- 0xe5f020e5,
- 0x85008010,
- 0x000ef601,
- 0x080f04bd,
-/* 0x06e4: ctx_redswitch_delay */
- 0xf401f2b6,
- 0xe5f1fd1b,
- 0xe5f10400,
- 0x00800100,
- 0x0ef60185,
- 0xf804bd00,
-/* 0x06fd: ctx_86c */
- 0x23008000,
+ 0x8effb200,
+ 0x7e41a88c,
+ 0xf800008f,
+/* 0x071d: ctx_mem */
+ 0x84008000,
0x000ff602,
- 0xffb204bd,
- 0x408a148e,
- 0x00008f7e,
- 0x8c8effb2,
- 0x8f7e41a8,
- 0x00f80000,
-/* 0x071c: ctx_mem */
- 0x02840080,
- 0xbd000ff6,
-/* 0x0725: ctx_mem_wait */
- 0x84008f04,
- 0x00ffcf02,
- 0xf405fffd,
- 0x00f8f61b,
-/* 0x0734: ctx_load */
- 0x99f094bd,
- 0x37008005,
- 0x0009f602,
- 0x0c0a04bd,
- 0x0000b87e,
- 0x0080f4bd,
- 0x0ff60289,
- 0x8004bd00,
- 0xf602c100,
- 0x04bd0002,
- 0x02830080,
+/* 0x0726: ctx_mem_wait */
+ 0x008f04bd,
+ 0xffcf0284,
+ 0x05fffd00,
+ 0xf8f61bf4,
+/* 0x0735: ctx_load */
+ 0xf094bd00,
+ 0x00800599,
+ 0x09f60237,
+ 0x0a04bd00,
+ 0x00b87e0c,
+ 0x80f4bd00,
+ 0xf6028900,
+ 0x04bd000f,
+ 0x02c10080,
0xbd0002f6,
- 0x7e070f04,
- 0x8000071c,
- 0xf602c000,
- 0x04bd0002,
- 0xf0000bfe,
- 0x24b61f2a,
- 0x0220b604,
- 0x99f094bd,
- 0x37008008,
- 0x0009f602,
- 0x008004bd,
- 0x02f60281,
- 0xd204bd00,
- 0x80000000,
- 0x800225f0,
- 0xf6028800,
- 0x04bd0002,
- 0x00421001,
- 0x0223f002,
- 0xf80512fa,
- 0xf094bd03,
+ 0x83008004,
+ 0x0002f602,
+ 0x070f04bd,
+ 0x00071d7e,
+ 0x02c00080,
+ 0xbd0002f6,
+ 0x000bfe04,
+ 0xb61f2af0,
+ 0x20b60424,
+ 0xf094bd02,
0x00800899,
- 0x09f60217,
- 0x9804bd00,
- 0x14b68101,
- 0x80029818,
- 0xfd0825b6,
- 0x01b50512,
- 0xf094bd16,
- 0x00800999,
0x09f60237,
0x8004bd00,
0xf6028100,
- 0x04bd0001,
- 0x00800102,
- 0x02f60288,
- 0x4104bd00,
- 0x13f00100,
- 0x0501fa06,
+ 0x04bd0002,
+ 0x000000d2,
+ 0x0225f080,
+ 0x02880080,
+ 0xbd0002f6,
+ 0x42100104,
+ 0x23f00200,
+ 0x0512fa02,
0x94bd03f8,
- 0x800999f0,
+ 0x800899f0,
0xf6021700,
0x04bd0009,
- 0x99f094bd,
- 0x17008005,
- 0x0009f602,
- 0x00f804bd,
-/* 0x0820: ctx_chan */
- 0x0007347e,
- 0xb87e0c0a,
- 0x050f0000,
- 0x00071c7e,
-/* 0x0832: ctx_mmio_exec */
- 0x039800f8,
- 0x81008041,
- 0x0003f602,
- 0x34bd04bd,
-/* 0x0840: ctx_mmio_loop */
- 0xf4ff34c4,
- 0x00450e1b,
- 0x0653f002,
- 0xf80535fa,
-/* 0x0851: ctx_mmio_pull */
- 0x804e9803,
- 0x7e814f98,
- 0xb600008f,
- 0x12b60830,
- 0xdf1bf401,
-/* 0x0864: ctx_mmio_done */
- 0x80160398,
- 0xf6028100,
- 0x04bd0003,
- 0x414000b5,
- 0x13f00100,
- 0x0601fa06,
- 0x00f803f8,
-/* 0x0880: ctx_xfer */
- 0x0080040e,
- 0x0ef60302,
-/* 0x088b: ctx_xfer_idle */
- 0x8e04bd00,
- 0xcf030000,
- 0xe4f100ee,
- 0x1bf42000,
- 0x0611f4f5,
-/* 0x089f: ctx_xfer_pre */
- 0x0f0c02f4,
- 0x06fd7e10,
- 0x1b11f400,
-/* 0x08a8: ctx_xfer_pre_load */
- 0xac7e020f,
- 0xbb7e0006,
- 0xcd7e0006,
- 0xf4bd0006,
- 0x0006ac7e,
- 0x0007347e,
-/* 0x08c0: ctx_xfer_exec */
- 0xbd160198,
- 0x05008024,
- 0x0002f601,
- 0x1fb204bd,
- 0x41a5008e,
- 0x00008f7e,
- 0xf001fcf0,
- 0x24b6022c,
- 0x05f2fd01,
- 0x048effb2,
- 0x8f7e41a5,
- 0x167e0000,
- 0x24bd0002,
- 0x0247fc80,
- 0xbd0002f6,
- 0x012cf004,
- 0x800320b6,
- 0xf6024afc,
+ 0xb6810198,
+ 0x02981814,
+ 0x0825b680,
+ 0xb50512fd,
+ 0x94bd1601,
+ 0x800999f0,
+ 0xf6023700,
+ 0x04bd0009,
+ 0x02810080,
+ 0xbd0001f6,
+ 0x80010204,
+ 0xf6028800,
0x04bd0002,
- 0xf001acf0,
- 0x000b06a5,
- 0x98000c98,
- 0x000e010d,
- 0x00013d7e,
- 0xec7e080a,
- 0x0a7e0000,
- 0x01f40002,
- 0x7e0c0a12,
+ 0xf0010041,
+ 0x01fa0613,
+ 0xbd03f805,
+ 0x0999f094,
+ 0x02170080,
+ 0xbd0009f6,
+ 0xf094bd04,
+ 0x00800599,
+ 0x09f60217,
+ 0xf804bd00,
+/* 0x0821: ctx_chan */
+ 0x07357e00,
+ 0x7e0c0a00,
0x0f0000b8,
- 0x071c7e05,
- 0x2d02f400,
-/* 0x093c: ctx_xfer_post */
- 0xac7e020f,
- 0xf4bd0006,
- 0x0006fd7e,
- 0x0002277e,
- 0x0006bb7e,
- 0xac7ef4bd,
+ 0x071d7e05,
+/* 0x0833: ctx_mmio_exec */
+ 0x9800f800,
+ 0x00804103,
+ 0x03f60281,
+ 0xbd04bd00,
+/* 0x0841: ctx_mmio_loop */
+ 0xff34c434,
+ 0x450e1bf4,
+ 0x53f00200,
+ 0x0535fa06,
+/* 0x0852: ctx_mmio_pull */
+ 0x4e9803f8,
+ 0x814f9880,
+ 0x00008f7e,
+ 0xb60830b6,
+ 0x1bf40112,
+/* 0x0865: ctx_mmio_done */
+ 0x160398df,
+ 0x02810080,
+ 0xbd0003f6,
+ 0x4000b504,
+ 0xf0010041,
+ 0x01fa0613,
+ 0xf803f806,
+/* 0x0881: ctx_xfer */
+ 0x80040e00,
+ 0xf6030200,
+ 0x04bd000e,
+/* 0x088c: ctx_xfer_idle */
+ 0x0300008e,
+ 0xf100eecf,
+ 0xf42000e4,
+ 0x11f4f51b,
+ 0x0c02f406,
+/* 0x08a0: ctx_xfer_pre */
+ 0xfe7e100f,
0x11f40006,
- 0x40019810,
- 0xf40511fd,
- 0x327e070b,
-/* 0x0966: ctx_xfer_no_post_mmio */
-/* 0x0966: ctx_xfer_done */
- 0x00f80008,
+/* 0x08a9: ctx_xfer_pre_load */
+ 0x7e020f1b,
+ 0x7e0006ad,
+ 0x7e0006bc,
+ 0xbd0006ce,
+ 0x06ad7ef4,
+ 0x07357e00,
+/* 0x08c1: ctx_xfer_exec */
+ 0x16019800,
+ 0x008024bd,
+ 0x02f60105,
+ 0xb204bd00,
+ 0xa5008e1f,
+ 0x008f7e41,
+ 0x01fcf000,
+ 0xb6022cf0,
+ 0xf2fd0124,
+ 0x8effb205,
+ 0x7e41a504,
+ 0x7e00008f,
+ 0xbd000216,
+ 0x47fc8024,
+ 0x0002f602,
+ 0x2cf004bd,
+ 0x0320b601,
+ 0x024afc80,
+ 0xbd0002f6,
+ 0x01acf004,
+ 0x0b06a5f0,
+ 0x000c9800,
+ 0x0e010d98,
+ 0x013d7e00,
+ 0x7e080a00,
+ 0x7e0000ec,
+ 0xf400020a,
+ 0x0c0a1201,
+ 0x0000b87e,
+ 0x1d7e050f,
+ 0x02f40007,
+/* 0x093d: ctx_xfer_post */
+ 0x7e020f2d,
+ 0xbd0006ad,
+ 0x06fe7ef4,
+ 0x02277e00,
+ 0x06bc7e00,
+ 0x7ef4bd00,
+ 0xf40006ad,
+ 0x01981011,
+ 0x0511fd40,
+ 0x7e070bf4,
+/* 0x0967: ctx_xfer_no_post_mmio */
+/* 0x0967: ctx_xfer_done */
+ 0xf8000833,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
index 649a442b4390..449dae753203 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
@@ -441,7 +441,7 @@ static uint32_t gm107_grhub_code[] = {
0x020014fe,
0x12004002,
0xbd0002f6,
- 0x05c94104,
+ 0x05ca4104,
0xbd0010fe,
0x07004024,
0xbd0002f6,
@@ -460,423 +460,423 @@ static uint32_t gm107_grhub_code[] = {
0x01039204,
0x03090080,
0xbd0003f6,
- 0x87044204,
- 0xf6040040,
- 0x04bd0002,
- 0x00400402,
- 0x0002f603,
- 0x31f404bd,
- 0x96048e10,
- 0x00657e40,
- 0xc7feb200,
- 0x01b590f1,
- 0x1ff4f003,
- 0x01020fb5,
- 0x041fbb01,
- 0x800112b6,
- 0xf6010300,
- 0x04bd0001,
- 0x01040080,
+ 0x87048204,
+ 0x04004000,
+ 0xbd0002f6,
+ 0x40040204,
+ 0x02f60300,
+ 0xf404bd00,
+ 0x048e1031,
+ 0x657e4096,
+ 0xfeb20000,
+ 0xb590f1c7,
+ 0xf4f00301,
+ 0x020fb51f,
+ 0x1fbb0101,
+ 0x0112b604,
+ 0x01030080,
0xbd0001f6,
- 0x01004104,
- 0xac7e020f,
- 0xbb7e0006,
- 0x100f0006,
- 0x0006fd7e,
- 0x98000e98,
- 0x207e010f,
- 0x14950001,
- 0xc0008008,
- 0x0004f601,
- 0x008004bd,
- 0x04f601c1,
- 0xb704bd00,
- 0xbb130030,
- 0xf5b6001f,
- 0xd3008002,
- 0x000ff601,
- 0x15b604bd,
- 0x0110b608,
- 0xb20814b6,
- 0x02687e1f,
- 0x001fbb00,
- 0x84020398,
-/* 0x041f: init_gpc */
- 0xb8502000,
- 0x0008044e,
- 0x8f7e1fb2,
+ 0x04008004,
+ 0x0001f601,
+ 0x004104bd,
+ 0x7e020f01,
+ 0x7e0006ad,
+ 0x0f0006bc,
+ 0x06fe7e10,
+ 0x000e9800,
+ 0x7e010f98,
+ 0x95000120,
+ 0x00800814,
+ 0x04f601c0,
+ 0x8004bd00,
+ 0xf601c100,
+ 0x04bd0004,
+ 0x130030b7,
+ 0xb6001fbb,
+ 0x008002f5,
+ 0x0ff601d3,
+ 0xb604bd00,
+ 0x10b60815,
+ 0x0814b601,
+ 0x687e1fb2,
+ 0x1fbb0002,
+ 0x02039800,
+ 0x50200084,
+/* 0x0420: init_gpc */
+ 0x08044eb8,
+ 0x7e1fb200,
+ 0xb800008f,
+ 0x00010c4e,
+ 0x8f7ef4bd,
0x4eb80000,
- 0xbd00010c,
- 0x008f7ef4,
- 0x044eb800,
- 0x8f7e0001,
+ 0x7e000104,
+ 0xb800008f,
+ 0x0001004e,
+ 0x8f7e020f,
0x4eb80000,
- 0x0f000100,
- 0x008f7e02,
- 0x004eb800,
-/* 0x044e: init_gpc_wait */
+/* 0x044f: init_gpc_wait */
+ 0x7e000800,
+ 0xc8000065,
+ 0x0bf41fff,
+ 0x044eb8f9,
0x657e0008,
- 0xffc80000,
- 0xf90bf41f,
- 0x08044eb8,
- 0x00657e00,
- 0x001fbb00,
- 0x800040b7,
- 0xf40132b6,
- 0x000fb41b,
- 0x0006fd7e,
- 0xac7e000f,
- 0x00800006,
- 0x01f60201,
- 0xbd04bd00,
- 0x1f19f014,
- 0x02300080,
- 0xbd0001f6,
-/* 0x0491: wait */
- 0x0028f404,
-/* 0x0497: main */
- 0x0d0031f4,
- 0x00377e10,
- 0xf401f400,
- 0x4001e4b1,
- 0x00c71bf5,
- 0x99f094bd,
- 0x37008004,
- 0x0009f602,
- 0x008104bd,
- 0x11cf02c0,
- 0xc1008200,
- 0x0022cf02,
- 0xf41f13c8,
- 0x23c8770b,
- 0x550bf41f,
- 0x12b220f9,
- 0x99f094bd,
- 0x37008007,
- 0x0009f602,
- 0x32f404bd,
- 0x0231f401,
- 0x0008807e,
- 0x99f094bd,
- 0x17008007,
- 0x0009f602,
- 0x20fc04bd,
- 0x99f094bd,
- 0x37008006,
- 0x0009f602,
- 0x31f404bd,
- 0x08807e01,
+ 0x1fbb0000,
+ 0x0040b700,
+ 0x0132b680,
+ 0x0fb41bf4,
+ 0x06fe7e00,
+ 0x7e000f00,
+ 0x800006ad,
+ 0xf6020100,
+ 0x04bd0001,
+ 0x19f014bd,
+ 0x3000801f,
+ 0x0001f602,
+/* 0x0492: wait */
+ 0x28f404bd,
+ 0x0031f400,
+/* 0x0498: main */
+ 0x377e100d,
+ 0x01f40000,
+ 0x01e4b1f4,
+ 0xc71bf540,
0xf094bd00,
- 0x00800699,
+ 0x00800499,
+ 0x09f60237,
+ 0x8104bd00,
+ 0xcf02c000,
+ 0x00820011,
+ 0x22cf02c1,
+ 0x1f13c800,
+ 0xc8770bf4,
+ 0x0bf41f23,
+ 0xb220f955,
+ 0xf094bd12,
+ 0x00800799,
+ 0x09f60237,
+ 0xf404bd00,
+ 0x31f40132,
+ 0x08817e02,
+ 0xf094bd00,
+ 0x00800799,
0x09f60217,
+ 0xfc04bd00,
+ 0xf094bd20,
+ 0x00800699,
+ 0x09f60237,
0xf404bd00,
-/* 0x0522: chsw_prev_no_next */
- 0x20f92f0e,
- 0x32f412b2,
- 0x0232f401,
- 0x0008807e,
- 0x008020fc,
- 0x02f602c0,
+ 0x817e0131,
+ 0x94bd0008,
+ 0x800699f0,
+ 0xf6021700,
+ 0x04bd0009,
+/* 0x0523: chsw_prev_no_next */
+ 0xf92f0ef4,
+ 0xf412b220,
+ 0x32f40132,
+ 0x08817e02,
+ 0x8020fc00,
+ 0xf602c000,
+ 0x04bd0002,
+/* 0x053f: chsw_no_prev */
+ 0xc8130ef4,
+ 0x0bf41f23,
+ 0x0131f40d,
+ 0x7e0232f4,
+/* 0x054f: chsw_done */
+ 0x02000881,
+ 0xc3008001,
+ 0x0002f602,
+ 0x94bd04bd,
+ 0x800499f0,
+ 0xf6021700,
+ 0x04bd0009,
+ 0xff300ef5,
+/* 0x056c: main_not_ctx_switch */
+ 0xf401e4b0,
+ 0xf2b20c1b,
+ 0x0008217e,
+/* 0x057b: main_not_ctx_chan */
+ 0xb0400ef4,
+ 0x1bf402e4,
+ 0xf094bd2c,
+ 0x00800799,
+ 0x09f60237,
0xf404bd00,
-/* 0x053e: chsw_no_prev */
- 0x23c8130e,
- 0x0d0bf41f,
- 0xf40131f4,
- 0x807e0232,
-/* 0x054e: chsw_done */
- 0x01020008,
- 0x02c30080,
- 0xbd0002f6,
- 0xf094bd04,
- 0x00800499,
+ 0x32f40132,
+ 0x08817e02,
+ 0xf094bd00,
+ 0x00800799,
0x09f60217,
- 0xf504bd00,
-/* 0x056b: main_not_ctx_switch */
- 0xb0ff300e,
- 0x1bf401e4,
- 0x7ef2b20c,
- 0xf4000820,
-/* 0x057a: main_not_ctx_chan */
- 0xe4b0400e,
- 0x2c1bf402,
- 0x99f094bd,
- 0x37008007,
- 0x0009f602,
- 0x32f404bd,
- 0x0232f401,
- 0x0008807e,
- 0x99f094bd,
- 0x17008007,
- 0x0009f602,
- 0x0ef404bd,
-/* 0x05a9: main_not_ctx_save */
- 0x10ef9411,
- 0x7e01f5f0,
- 0xf50002f8,
-/* 0x05b7: main_done */
- 0xbdfee40e,
- 0x1f29f024,
- 0x02300080,
- 0xbd0002f6,
- 0xd20ef504,
-/* 0x05c9: ih */
- 0xf900f9fe,
- 0x0188fe80,
- 0x90f980f9,
- 0xb0f9a0f9,
- 0xe0f9d0f9,
- 0x04bdf0f9,
- 0xcf02004a,
- 0xabc400aa,
- 0x230bf404,
- 0x004e100d,
- 0x00eecf1a,
- 0xcf19004f,
- 0x047e00ff,
- 0xb0b70000,
- 0x010e0400,
- 0xf61d0040,
- 0x04bd000e,
-/* 0x060c: ih_no_fifo */
- 0x0100abe4,
- 0x0d0c0bf4,
- 0x40014e10,
- 0x0000047e,
-/* 0x061c: ih_no_ctxsw */
- 0x0400abe4,
- 0x8e560bf4,
- 0x7e400708,
+ 0xf404bd00,
+/* 0x05aa: main_not_ctx_save */
+ 0xef94110e,
+ 0x01f5f010,
+ 0x0002f87e,
+ 0xfee40ef5,
+/* 0x05b8: main_done */
+ 0x29f024bd,
+ 0x3000801f,
+ 0x0002f602,
+ 0x0ef504bd,
+/* 0x05ca: ih */
+ 0x00f9fed2,
+ 0x88fe80f9,
+ 0xf980f901,
+ 0xf9a0f990,
+ 0xf9d0f9b0,
+ 0xbdf0f9e0,
+ 0x02004a04,
+ 0xc400aacf,
+ 0x0bf404ab,
+ 0x4e100d23,
+ 0xeecf1a00,
+ 0x19004f00,
+ 0x7e00ffcf,
+ 0xb7000004,
+ 0x0e0400b0,
+ 0x1d004001,
+ 0xbd000ef6,
+/* 0x060d: ih_no_fifo */
+ 0x00abe404,
+ 0x0c0bf401,
+ 0x014e100d,
+ 0x00047e40,
+/* 0x061d: ih_no_ctxsw */
+ 0x00abe400,
+ 0x560bf404,
+ 0x4007088e,
+ 0x0000657e,
+ 0x0080ffb2,
+ 0x0ff60204,
+ 0x8e04bd00,
+ 0x7e400704,
0xb2000065,
- 0x040080ff,
+ 0x030080ff,
0x000ff602,
- 0x048e04bd,
- 0x657e4007,
- 0xffb20000,
- 0x02030080,
- 0xbd000ff6,
- 0x50fec704,
- 0x8f02ee94,
- 0xbb400700,
- 0x657e00ef,
- 0x00800000,
- 0x0ff60202,
+ 0xfec704bd,
+ 0x02ee9450,
+ 0x4007008f,
+ 0x7e00efbb,
+ 0x80000065,
+ 0xf6020200,
+ 0x04bd000f,
+ 0xf87e030f,
+ 0x004b0002,
+ 0x8ebfb201,
+ 0x7e400144,
+/* 0x0677: ih_no_fwmthd */
+ 0x4b00008f,
+ 0xb0bd0504,
+ 0xf4b4abff,
+ 0x00800c0b,
+ 0x0bf60307,
+/* 0x068b: ih_no_other */
+ 0x4004bd00,
+ 0x0af60100,
+ 0xfc04bd00,
+ 0xfce0fcf0,
+ 0xfcb0fcd0,
+ 0xfc90fca0,
+ 0x0088fe80,
+ 0x00fc80fc,
+ 0xf80032f4,
+/* 0x06ad: ctx_4170s */
+ 0x10f5f001,
+ 0x708effb2,
+ 0x8f7e4041,
+ 0x00f80000,
+/* 0x06bc: ctx_4170w */
+ 0x4041708e,
+ 0x0000657e,
+ 0xf4f0ffb2,
+ 0xf31bf410,
+/* 0x06ce: ctx_redswitch */
+ 0x004e00f8,
+ 0x40e5f002,
+ 0xf020e5f0,
+ 0x008010e5,
+ 0x0ef60185,
0x0f04bd00,
- 0x02f87e03,
- 0x01004b00,
- 0x448ebfb2,
- 0x8f7e4001,
-/* 0x0676: ih_no_fwmthd */
- 0x044b0000,
- 0xffb0bd05,
- 0x0bf4b4ab,
- 0x0700800c,
- 0x000bf603,
-/* 0x068a: ih_no_other */
- 0x004004bd,
- 0x000af601,
- 0xf0fc04bd,
- 0xd0fce0fc,
- 0xa0fcb0fc,
- 0x80fc90fc,
- 0xfc0088fe,
- 0xf400fc80,
- 0x01f80032,
-/* 0x06ac: ctx_4170s */
- 0xb210f5f0,
- 0x41708eff,
+/* 0x06e5: ctx_redswitch_delay */
+ 0x01f2b608,
+ 0xf1fd1bf4,
+ 0xf10400e5,
+ 0x800100e5,
+ 0xf6018500,
+ 0x04bd000e,
+/* 0x06fe: ctx_86c */
+ 0x008000f8,
+ 0x0ff60223,
+ 0xb204bd00,
+ 0x8a148eff,
0x008f7e40,
-/* 0x06bb: ctx_4170w */
- 0x8e00f800,
- 0x7e404170,
- 0xb2000065,
- 0x10f4f0ff,
- 0xf8f31bf4,
-/* 0x06cd: ctx_redswitch */
- 0x02004e00,
- 0xf040e5f0,
- 0xe5f020e5,
- 0x85008010,
- 0x000ef601,
- 0x080f04bd,
-/* 0x06e4: ctx_redswitch_delay */
- 0xf401f2b6,
- 0xe5f1fd1b,
- 0xe5f10400,
- 0x00800100,
- 0x0ef60185,
- 0xf804bd00,
-/* 0x06fd: ctx_86c */
- 0x23008000,
+ 0x8effb200,
+ 0x7e41a88c,
+ 0xf800008f,
+/* 0x071d: ctx_mem */
+ 0x84008000,
0x000ff602,
- 0xffb204bd,
- 0x408a148e,
- 0x00008f7e,
- 0x8c8effb2,
- 0x8f7e41a8,
- 0x00f80000,
-/* 0x071c: ctx_mem */
- 0x02840080,
- 0xbd000ff6,
-/* 0x0725: ctx_mem_wait */
- 0x84008f04,
- 0x00ffcf02,
- 0xf405fffd,
- 0x00f8f61b,
-/* 0x0734: ctx_load */
- 0x99f094bd,
- 0x37008005,
- 0x0009f602,
- 0x0c0a04bd,
- 0x0000b87e,
- 0x0080f4bd,
- 0x0ff60289,
- 0x8004bd00,
- 0xf602c100,
- 0x04bd0002,
- 0x02830080,
+/* 0x0726: ctx_mem_wait */
+ 0x008f04bd,
+ 0xffcf0284,
+ 0x05fffd00,
+ 0xf8f61bf4,
+/* 0x0735: ctx_load */
+ 0xf094bd00,
+ 0x00800599,
+ 0x09f60237,
+ 0x0a04bd00,
+ 0x00b87e0c,
+ 0x80f4bd00,
+ 0xf6028900,
+ 0x04bd000f,
+ 0x02c10080,
0xbd0002f6,
- 0x7e070f04,
- 0x8000071c,
- 0xf602c000,
- 0x04bd0002,
- 0xf0000bfe,
- 0x24b61f2a,
- 0x0220b604,
- 0x99f094bd,
- 0x37008008,
- 0x0009f602,
- 0x008004bd,
- 0x02f60281,
- 0xd204bd00,
- 0x80000000,
- 0x800225f0,
- 0xf6028800,
- 0x04bd0002,
- 0x00421001,
- 0x0223f002,
- 0xf80512fa,
- 0xf094bd03,
+ 0x83008004,
+ 0x0002f602,
+ 0x070f04bd,
+ 0x00071d7e,
+ 0x02c00080,
+ 0xbd0002f6,
+ 0x000bfe04,
+ 0xb61f2af0,
+ 0x20b60424,
+ 0xf094bd02,
0x00800899,
- 0x09f60217,
- 0x9804bd00,
- 0x14b68101,
- 0x80029818,
- 0xfd0825b6,
- 0x01b50512,
- 0xf094bd16,
- 0x00800999,
0x09f60237,
0x8004bd00,
0xf6028100,
- 0x04bd0001,
- 0x00800102,
- 0x02f60288,
- 0x4104bd00,
- 0x13f00100,
- 0x0501fa06,
+ 0x04bd0002,
+ 0x000000d2,
+ 0x0225f080,
+ 0x02880080,
+ 0xbd0002f6,
+ 0x42100104,
+ 0x23f00200,
+ 0x0512fa02,
0x94bd03f8,
- 0x800999f0,
+ 0x800899f0,
0xf6021700,
0x04bd0009,
- 0x99f094bd,
- 0x17008005,
- 0x0009f602,
- 0x00f804bd,
-/* 0x0820: ctx_chan */
- 0x0007347e,
- 0xb87e0c0a,
- 0x050f0000,
- 0x00071c7e,
-/* 0x0832: ctx_mmio_exec */
- 0x039800f8,
- 0x81008041,
- 0x0003f602,
- 0x34bd04bd,
-/* 0x0840: ctx_mmio_loop */
- 0xf4ff34c4,
- 0x00450e1b,
- 0x0653f002,
- 0xf80535fa,
-/* 0x0851: ctx_mmio_pull */
- 0x804e9803,
- 0x7e814f98,
- 0xb600008f,
- 0x12b60830,
- 0xdf1bf401,
-/* 0x0864: ctx_mmio_done */
- 0x80160398,
- 0xf6028100,
- 0x04bd0003,
- 0x414000b5,
- 0x13f00100,
- 0x0601fa06,
- 0x00f803f8,
-/* 0x0880: ctx_xfer */
- 0x0080040e,
- 0x0ef60302,
-/* 0x088b: ctx_xfer_idle */
- 0x8e04bd00,
- 0xcf030000,
- 0xe4f100ee,
- 0x1bf42000,
- 0x0611f4f5,
-/* 0x089f: ctx_xfer_pre */
- 0x0f0c02f4,
- 0x06fd7e10,
- 0x1b11f400,
-/* 0x08a8: ctx_xfer_pre_load */
- 0xac7e020f,
- 0xbb7e0006,
- 0xcd7e0006,
- 0xf4bd0006,
- 0x0006ac7e,
- 0x0007347e,
-/* 0x08c0: ctx_xfer_exec */
- 0xbd160198,
- 0x05008024,
- 0x0002f601,
- 0x1fb204bd,
- 0x41a5008e,
- 0x00008f7e,
- 0xf001fcf0,
- 0x24b6022c,
- 0x05f2fd01,
- 0x048effb2,
- 0x8f7e41a5,
- 0x167e0000,
- 0x24bd0002,
- 0x0247fc80,
- 0xbd0002f6,
- 0x012cf004,
- 0x800320b6,
- 0xf6024afc,
+ 0xb6810198,
+ 0x02981814,
+ 0x0825b680,
+ 0xb50512fd,
+ 0x94bd1601,
+ 0x800999f0,
+ 0xf6023700,
+ 0x04bd0009,
+ 0x02810080,
+ 0xbd0001f6,
+ 0x80010204,
+ 0xf6028800,
0x04bd0002,
- 0xf001acf0,
- 0x000b06a5,
- 0x98000c98,
- 0x000e010d,
- 0x00013d7e,
- 0xec7e080a,
- 0x0a7e0000,
- 0x01f40002,
- 0x7e0c0a12,
+ 0xf0010041,
+ 0x01fa0613,
+ 0xbd03f805,
+ 0x0999f094,
+ 0x02170080,
+ 0xbd0009f6,
+ 0xf094bd04,
+ 0x00800599,
+ 0x09f60217,
+ 0xf804bd00,
+/* 0x0821: ctx_chan */
+ 0x07357e00,
+ 0x7e0c0a00,
0x0f0000b8,
- 0x071c7e05,
- 0x2d02f400,
-/* 0x093c: ctx_xfer_post */
- 0xac7e020f,
- 0xf4bd0006,
- 0x0006fd7e,
- 0x0002277e,
- 0x0006bb7e,
- 0xac7ef4bd,
+ 0x071d7e05,
+/* 0x0833: ctx_mmio_exec */
+ 0x9800f800,
+ 0x00804103,
+ 0x03f60281,
+ 0xbd04bd00,
+/* 0x0841: ctx_mmio_loop */
+ 0xff34c434,
+ 0x450e1bf4,
+ 0x53f00200,
+ 0x0535fa06,
+/* 0x0852: ctx_mmio_pull */
+ 0x4e9803f8,
+ 0x814f9880,
+ 0x00008f7e,
+ 0xb60830b6,
+ 0x1bf40112,
+/* 0x0865: ctx_mmio_done */
+ 0x160398df,
+ 0x02810080,
+ 0xbd0003f6,
+ 0x4000b504,
+ 0xf0010041,
+ 0x01fa0613,
+ 0xf803f806,
+/* 0x0881: ctx_xfer */
+ 0x80040e00,
+ 0xf6030200,
+ 0x04bd000e,
+/* 0x088c: ctx_xfer_idle */
+ 0x0300008e,
+ 0xf100eecf,
+ 0xf42000e4,
+ 0x11f4f51b,
+ 0x0c02f406,
+/* 0x08a0: ctx_xfer_pre */
+ 0xfe7e100f,
0x11f40006,
- 0x40019810,
- 0xf40511fd,
- 0x327e070b,
-/* 0x0966: ctx_xfer_no_post_mmio */
-/* 0x0966: ctx_xfer_done */
- 0x00f80008,
+/* 0x08a9: ctx_xfer_pre_load */
+ 0x7e020f1b,
+ 0x7e0006ad,
+ 0x7e0006bc,
+ 0xbd0006ce,
+ 0x06ad7ef4,
+ 0x07357e00,
+/* 0x08c1: ctx_xfer_exec */
+ 0x16019800,
+ 0x008024bd,
+ 0x02f60105,
+ 0xb204bd00,
+ 0xa5008e1f,
+ 0x008f7e41,
+ 0x01fcf000,
+ 0xb6022cf0,
+ 0xf2fd0124,
+ 0x8effb205,
+ 0x7e41a504,
+ 0x7e00008f,
+ 0xbd000216,
+ 0x47fc8024,
+ 0x0002f602,
+ 0x2cf004bd,
+ 0x0320b601,
+ 0x024afc80,
+ 0xbd0002f6,
+ 0x01acf004,
+ 0x0b06a5f0,
+ 0x000c9800,
+ 0x0e010d98,
+ 0x013d7e00,
+ 0x7e080a00,
+ 0x7e0000ec,
+ 0xf400020a,
+ 0x0c0a1201,
+ 0x0000b87e,
+ 0x1d7e050f,
+ 0x02f40007,
+/* 0x093d: ctx_xfer_post */
+ 0x7e020f2d,
+ 0xbd0006ad,
+ 0x06fe7ef4,
+ 0x02277e00,
+ 0x06bc7e00,
+ 0x7ef4bd00,
+ 0xf40006ad,
+ 0x01981011,
+ 0x0511fd40,
+ 0x7e070bf4,
+/* 0x0967: ctx_xfer_no_post_mmio */
+/* 0x0967: ctx_xfer_done */
+ 0xf8000833,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index c578deb5867a..dd8f85b8b3a7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -26,9 +26,9 @@
#include "fuc/os.h"
#include <core/client.h>
-#include <core/option.h>
#include <core/firmware.h>
-#include <subdev/secboot.h>
+#include <core/option.h>
+#include <subdev/acr.h>
#include <subdev/fb.h>
#include <subdev/mc.h>
#include <subdev/pmu.h>
@@ -1636,7 +1636,7 @@ gf100_gr_intr(struct nvkm_gr *base)
static void
gf100_gr_init_fw(struct nvkm_falcon *falcon,
- struct gf100_gr_fuc *code, struct gf100_gr_fuc *data)
+ struct nvkm_blob *code, struct nvkm_blob *data)
{
nvkm_falcon_load_dmem(falcon, data->data, 0x0, data->size, 0);
nvkm_falcon_load_imem(falcon, code->data, 0x0, code->size, 0, 0, false);
@@ -1690,26 +1690,30 @@ gf100_gr_init_ctxctl_ext(struct gf100_gr *gr)
{
struct nvkm_subdev *subdev = &gr->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- struct nvkm_secboot *sb = device->secboot;
- u32 secboot_mask = 0;
+ u32 lsf_mask = 0;
int ret;
/* load fuc microcode */
nvkm_mc_unk260(device, 0);
/* securely-managed falcons must be reset using secure boot */
- if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS))
- secboot_mask |= BIT(NVKM_SECBOOT_FALCON_FECS);
- else
- gf100_gr_init_fw(gr->fecs.falcon, &gr->fuc409c, &gr->fuc409d);
- if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS))
- secboot_mask |= BIT(NVKM_SECBOOT_FALCON_GPCCS);
- else
- gf100_gr_init_fw(gr->gpccs.falcon, &gr->fuc41ac, &gr->fuc41ad);
+ if (!nvkm_acr_managed_falcon(device, NVKM_ACR_LSF_FECS)) {
+ gf100_gr_init_fw(&gr->fecs.falcon, &gr->fecs.inst,
+ &gr->fecs.data);
+ } else {
+ lsf_mask |= BIT(NVKM_ACR_LSF_FECS);
+ }
- if (secboot_mask != 0) {
- int ret = nvkm_secboot_reset(sb, secboot_mask);
+ if (!nvkm_acr_managed_falcon(device, NVKM_ACR_LSF_GPCCS)) {
+ gf100_gr_init_fw(&gr->gpccs.falcon, &gr->gpccs.inst,
+ &gr->gpccs.data);
+ } else {
+ lsf_mask |= BIT(NVKM_ACR_LSF_GPCCS);
+ }
+
+ if (lsf_mask) {
+ ret = nvkm_acr_bootstrap_falcons(device, lsf_mask);
if (ret)
return ret;
}
@@ -1721,8 +1725,8 @@ gf100_gr_init_ctxctl_ext(struct gf100_gr *gr)
nvkm_wr32(device, 0x41a10c, 0x00000000);
nvkm_wr32(device, 0x40910c, 0x00000000);
- nvkm_falcon_start(gr->gpccs.falcon);
- nvkm_falcon_start(gr->fecs.falcon);
+ nvkm_falcon_start(&gr->gpccs.falcon);
+ nvkm_falcon_start(&gr->fecs.falcon);
if (nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x409800) & 0x00000001)
@@ -1784,18 +1788,18 @@ gf100_gr_init_ctxctl_int(struct gf100_gr *gr)
/* load HUB microcode */
nvkm_mc_unk260(device, 0);
- nvkm_falcon_load_dmem(gr->fecs.falcon,
+ nvkm_falcon_load_dmem(&gr->fecs.falcon,
gr->func->fecs.ucode->data.data, 0x0,
gr->func->fecs.ucode->data.size, 0);
- nvkm_falcon_load_imem(gr->fecs.falcon,
+ nvkm_falcon_load_imem(&gr->fecs.falcon,
gr->func->fecs.ucode->code.data, 0x0,
gr->func->fecs.ucode->code.size, 0, 0, false);
/* load GPC microcode */
- nvkm_falcon_load_dmem(gr->gpccs.falcon,
+ nvkm_falcon_load_dmem(&gr->gpccs.falcon,
gr->func->gpccs.ucode->data.data, 0x0,
gr->func->gpccs.ucode->data.size, 0);
- nvkm_falcon_load_imem(gr->gpccs.falcon,
+ nvkm_falcon_load_imem(&gr->gpccs.falcon,
gr->func->gpccs.ucode->code.data, 0x0,
gr->func->gpccs.ucode->code.size, 0, 0, false);
nvkm_mc_unk260(device, 1);
@@ -1941,17 +1945,6 @@ gf100_gr_oneinit(struct nvkm_gr *base)
struct nvkm_subdev *subdev = &gr->base.engine.subdev;
struct nvkm_device *device = subdev->device;
int i, j;
- int ret;
-
- ret = nvkm_falcon_v1_new(subdev, "FECS", 0x409000, &gr->fecs.falcon);
- if (ret)
- return ret;
-
- mutex_init(&gr->fecs.mutex);
-
- ret = nvkm_falcon_v1_new(subdev, "GPCCS", 0x41a000, &gr->gpccs.falcon);
- if (ret)
- return ret;
nvkm_pmu_pgob(device->pmu, false);
@@ -1992,11 +1985,11 @@ gf100_gr_init_(struct nvkm_gr *base)
nvkm_pmu_pgob(gr->base.engine.subdev.device->pmu, false);
- ret = nvkm_falcon_get(gr->fecs.falcon, subdev);
+ ret = nvkm_falcon_get(&gr->fecs.falcon, subdev);
if (ret)
return ret;
- ret = nvkm_falcon_get(gr->gpccs.falcon, subdev);
+ ret = nvkm_falcon_get(&gr->gpccs.falcon, subdev);
if (ret)
return ret;
@@ -2004,49 +1997,34 @@ gf100_gr_init_(struct nvkm_gr *base)
}
static int
-gf100_gr_fini_(struct nvkm_gr *base, bool suspend)
+gf100_gr_fini(struct nvkm_gr *base, bool suspend)
{
struct gf100_gr *gr = gf100_gr(base);
struct nvkm_subdev *subdev = &gr->base.engine.subdev;
- nvkm_falcon_put(gr->gpccs.falcon, subdev);
- nvkm_falcon_put(gr->fecs.falcon, subdev);
+ nvkm_falcon_put(&gr->gpccs.falcon, subdev);
+ nvkm_falcon_put(&gr->fecs.falcon, subdev);
return 0;
}
-void
-gf100_gr_dtor_fw(struct gf100_gr_fuc *fuc)
-{
- kfree(fuc->data);
- fuc->data = NULL;
-}
-
-static void
-gf100_gr_dtor_init(struct gf100_gr_pack *pack)
-{
- vfree(pack);
-}
-
void *
gf100_gr_dtor(struct nvkm_gr *base)
{
struct gf100_gr *gr = gf100_gr(base);
- if (gr->func->dtor)
- gr->func->dtor(gr);
kfree(gr->data);
- nvkm_falcon_del(&gr->gpccs.falcon);
- nvkm_falcon_del(&gr->fecs.falcon);
+ nvkm_falcon_dtor(&gr->gpccs.falcon);
+ nvkm_falcon_dtor(&gr->fecs.falcon);
- gf100_gr_dtor_fw(&gr->fuc409c);
- gf100_gr_dtor_fw(&gr->fuc409d);
- gf100_gr_dtor_fw(&gr->fuc41ac);
- gf100_gr_dtor_fw(&gr->fuc41ad);
+ nvkm_blob_dtor(&gr->fecs.inst);
+ nvkm_blob_dtor(&gr->fecs.data);
+ nvkm_blob_dtor(&gr->gpccs.inst);
+ nvkm_blob_dtor(&gr->gpccs.data);
- gf100_gr_dtor_init(gr->fuc_bundle);
- gf100_gr_dtor_init(gr->fuc_method);
- gf100_gr_dtor_init(gr->fuc_sw_ctx);
- gf100_gr_dtor_init(gr->fuc_sw_nonctx);
+ vfree(gr->bundle);
+ vfree(gr->method);
+ vfree(gr->sw_ctx);
+ vfree(gr->sw_nonctx);
return gr;
}
@@ -2056,7 +2034,7 @@ gf100_gr_ = {
.dtor = gf100_gr_dtor,
.oneinit = gf100_gr_oneinit,
.init = gf100_gr_init_,
- .fini = gf100_gr_fini_,
+ .fini = gf100_gr_fini,
.intr = gf100_gr_intr,
.units = gf100_gr_units,
.chan_new = gf100_gr_chan_new,
@@ -2067,87 +2045,24 @@ gf100_gr_ = {
.ctxsw.inst = gf100_gr_ctxsw_inst,
};
-int
-gf100_gr_ctor_fw_legacy(struct gf100_gr *gr, const char *fwname,
- struct gf100_gr_fuc *fuc, int ret)
-{
- struct nvkm_subdev *subdev = &gr->base.engine.subdev;
- struct nvkm_device *device = subdev->device;
- const struct firmware *fw;
- char f[32];
-
- /* see if this firmware has a legacy path */
- if (!strcmp(fwname, "fecs_inst"))
- fwname = "fuc409c";
- else if (!strcmp(fwname, "fecs_data"))
- fwname = "fuc409d";
- else if (!strcmp(fwname, "gpccs_inst"))
- fwname = "fuc41ac";
- else if (!strcmp(fwname, "gpccs_data"))
- fwname = "fuc41ad";
- else {
- /* nope, let's just return the error we got */
- nvkm_error(subdev, "failed to load %s\n", fwname);
- return ret;
- }
-
- /* yes, try to load from the legacy path */
- nvkm_debug(subdev, "%s: falling back to legacy path\n", fwname);
-
- snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, fwname);
- ret = request_firmware(&fw, f, device->dev);
- if (ret) {
- snprintf(f, sizeof(f), "nouveau/%s", fwname);
- ret = request_firmware(&fw, f, device->dev);
- if (ret) {
- nvkm_error(subdev, "failed to load %s\n", fwname);
- return ret;
- }
- }
-
- fuc->size = fw->size;
- fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
- release_firmware(fw);
- return (fuc->data != NULL) ? 0 : -ENOMEM;
-}
-
-int
-gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname,
- struct gf100_gr_fuc *fuc)
-{
- const struct firmware *fw;
- int ret;
-
- ret = nvkm_firmware_get(&gr->base.engine.subdev, fwname, &fw);
- if (ret) {
- ret = gf100_gr_ctor_fw_legacy(gr, fwname, fuc, ret);
- if (ret)
- return -ENODEV;
- return 0;
- }
-
- fuc->size = fw->size;
- fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
- nvkm_firmware_put(fw);
- return (fuc->data != NULL) ? 0 : -ENOMEM;
-}
-
-int
-gf100_gr_ctor(const struct gf100_gr_func *func, struct nvkm_device *device,
- int index, struct gf100_gr *gr)
-{
- gr->func = func;
- gr->firmware = nvkm_boolopt(device->cfgopt, "NvGrUseFW",
- func->fecs.ucode == NULL);
-
- return nvkm_gr_ctor(&gf100_gr_, device, index,
- gr->firmware || func->fecs.ucode != NULL,
- &gr->base);
-}
+static const struct nvkm_falcon_func
+gf100_gr_flcn = {
+ .fbif = 0x600,
+ .load_imem = nvkm_falcon_v1_load_imem,
+ .load_dmem = nvkm_falcon_v1_load_dmem,
+ .read_dmem = nvkm_falcon_v1_read_dmem,
+ .bind_context = nvkm_falcon_v1_bind_context,
+ .wait_for_halt = nvkm_falcon_v1_wait_for_halt,
+ .clear_interrupt = nvkm_falcon_v1_clear_interrupt,
+ .set_start_addr = nvkm_falcon_v1_set_start_addr,
+ .start = nvkm_falcon_v1_start,
+ .enable = nvkm_falcon_v1_enable,
+ .disable = nvkm_falcon_v1_disable,
+};
int
-gf100_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
- int index, struct nvkm_gr **pgr)
+gf100_gr_new_(const struct gf100_gr_fwif *fwif,
+ struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
struct gf100_gr *gr;
int ret;
@@ -2156,22 +2071,49 @@ gf100_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
return -ENOMEM;
*pgr = &gr->base;
- ret = gf100_gr_ctor(func, device, index, gr);
+ ret = nvkm_gr_ctor(&gf100_gr_, device, index, true, &gr->base);
if (ret)
return ret;
- if (gr->firmware) {
- if (gf100_gr_ctor_fw(gr, "fecs_inst", &gr->fuc409c) ||
- gf100_gr_ctor_fw(gr, "fecs_data", &gr->fuc409d) ||
- gf100_gr_ctor_fw(gr, "gpccs_inst", &gr->fuc41ac) ||
- gf100_gr_ctor_fw(gr, "gpccs_data", &gr->fuc41ad))
- return -ENODEV;
- }
+ fwif = nvkm_firmware_load(&gr->base.engine.subdev, fwif, "Gr", gr);
+ if (IS_ERR(fwif))
+ return -ENODEV;
+
+ gr->func = fwif->func;
+
+ ret = nvkm_falcon_ctor(&gf100_gr_flcn, &gr->base.engine.subdev,
+ "fecs", 0x409000, &gr->fecs.falcon);
+ if (ret)
+ return ret;
+
+ mutex_init(&gr->fecs.mutex);
+
+ ret = nvkm_falcon_ctor(&gf100_gr_flcn, &gr->base.engine.subdev,
+ "gpccs", 0x41a000, &gr->gpccs.falcon);
+ if (ret)
+ return ret;
return 0;
}
void
+gf100_gr_init_num_tpc_per_gpc(struct gf100_gr *gr, bool pd, bool ds)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ int gpc, i, j;
+ u32 data;
+
+ for (gpc = 0, i = 0; i < 4; i++) {
+ for (data = 0, j = 0; j < 8 && gpc < gr->gpc_nr; j++, gpc++)
+ data |= gr->tpc_nr[gpc] << (j * 4);
+ if (pd)
+ nvkm_wr32(device, 0x406028 + (i * 4), data);
+ if (ds)
+ nvkm_wr32(device, 0x405870 + (i * 4), data);
+ }
+}
+
+void
gf100_gr_init_400054(struct gf100_gr *gr)
{
nvkm_wr32(gr->base.engine.subdev.device, 0x400054, 0x34ce3464);
@@ -2295,8 +2237,8 @@ gf100_gr_init(struct gf100_gr *gr)
gr->func->init_gpc_mmu(gr);
- if (gr->fuc_sw_nonctx)
- gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
+ if (gr->sw_nonctx)
+ gf100_gr_mmio(gr, gr->sw_nonctx);
else
gf100_gr_mmio(gr, gr->func->mmio);
@@ -2320,6 +2262,8 @@ gf100_gr_init(struct gf100_gr *gr)
gr->func->init_bios_2(gr);
if (gr->func->init_swdx_pes_mask)
gr->func->init_swdx_pes_mask(gr);
+ if (gr->func->init_fs)
+ gr->func->init_fs(gr);
nvkm_wr32(device, 0x400500, 0x00010001);
@@ -2338,8 +2282,8 @@ gf100_gr_init(struct gf100_gr *gr)
if (gr->func->init_40601c)
gr->func->init_40601c(gr);
- nvkm_wr32(device, 0x404490, 0xc0000000);
nvkm_wr32(device, 0x406018, 0xc0000000);
+ nvkm_wr32(device, 0x404490, 0xc0000000);
if (gr->func->init_sked_hww_esr)
gr->func->init_sked_hww_esr(gr);
@@ -2454,7 +2398,66 @@ gf100_gr = {
};
int
+gf100_gr_nofw(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
+{
+ gr->firmware = false;
+ return 0;
+}
+
+static int
+gf100_gr_load_fw(struct gf100_gr *gr, const char *name,
+ struct nvkm_blob *blob)
+{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const struct firmware *fw;
+ char f[32];
+ int ret;
+
+ snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, name);
+ ret = request_firmware(&fw, f, device->dev);
+ if (ret) {
+ snprintf(f, sizeof(f), "nouveau/%s", name);
+ ret = request_firmware(&fw, f, device->dev);
+ if (ret) {
+ nvkm_error(subdev, "failed to load %s\n", name);
+ return ret;
+ }
+ }
+
+ blob->size = fw->size;
+ blob->data = kmemdup(fw->data, blob->size, GFP_KERNEL);
+ release_firmware(fw);
+ return (blob->data != NULL) ? 0 : -ENOMEM;
+}
+
+int
+gf100_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+
+ if (!nvkm_boolopt(device->cfgopt, "NvGrUseFW", false))
+ return -EINVAL;
+
+ if (gf100_gr_load_fw(gr, "fuc409c", &gr->fecs.inst) ||
+ gf100_gr_load_fw(gr, "fuc409d", &gr->fecs.data) ||
+ gf100_gr_load_fw(gr, "fuc41ac", &gr->gpccs.inst) ||
+ gf100_gr_load_fw(gr, "fuc41ad", &gr->gpccs.data))
+ return -ENOENT;
+
+ gr->firmware = true;
+ return 0;
+}
+
+static const struct gf100_gr_fwif
+gf100_gr_fwif[] = {
+ { -1, gf100_gr_load, &gf100_gr },
+ { -1, gf100_gr_nofw, &gf100_gr },
+ {}
+};
+
+int
gf100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(&gf100_gr, device, index, pgr);
+ return gf100_gr_new_(gf100_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index fafdd0bbea9b..88bcb57c2e07 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -31,6 +31,8 @@
#include <subdev/mmu.h>
#include <engine/falcon.h>
+struct nvkm_acr_lsfw;
+
#define GPC_MAX 32
#define TPC_MAX_PER_GPC 8
#define TPC_MAX (GPC_MAX * TPC_MAX_PER_GPC)
@@ -55,11 +57,6 @@ struct gf100_gr_mmio {
int buffer;
};
-struct gf100_gr_fuc {
- u32 *data;
- u32 size;
-};
-
struct gf100_gr_zbc_color {
u32 format;
u32 ds[4];
@@ -83,29 +80,30 @@ struct gf100_gr {
struct nvkm_gr base;
struct {
- struct nvkm_falcon *falcon;
+ struct nvkm_falcon falcon;
+ struct nvkm_blob inst;
+ struct nvkm_blob data;
+
struct mutex mutex;
u32 disable;
} fecs;
struct {
- struct nvkm_falcon *falcon;
+ struct nvkm_falcon falcon;
+ struct nvkm_blob inst;
+ struct nvkm_blob data;
} gpccs;
- struct gf100_gr_fuc fuc409c;
- struct gf100_gr_fuc fuc409d;
- struct gf100_gr_fuc fuc41ac;
- struct gf100_gr_fuc fuc41ad;
bool firmware;
/*
* Used if the register packs are loaded from NVIDIA fw instead of
* using hardcoded arrays. To be allocated with vzalloc().
*/
- struct gf100_gr_pack *fuc_sw_nonctx;
- struct gf100_gr_pack *fuc_sw_ctx;
- struct gf100_gr_pack *fuc_bundle;
- struct gf100_gr_pack *fuc_method;
+ struct gf100_gr_pack *sw_nonctx;
+ struct gf100_gr_pack *sw_ctx;
+ struct gf100_gr_pack *bundle;
+ struct gf100_gr_pack *method;
struct gf100_gr_zbc_color zbc_color[NVKM_LTC_MAX_ZBC_CNT];
struct gf100_gr_zbc_depth zbc_depth[NVKM_LTC_MAX_ZBC_CNT];
@@ -140,12 +138,6 @@ struct gf100_gr {
u32 size_pm;
};
-int gf100_gr_ctor(const struct gf100_gr_func *, struct nvkm_device *,
- int, struct gf100_gr *);
-int gf100_gr_new_(const struct gf100_gr_func *, struct nvkm_device *,
- int, struct nvkm_gr **);
-void *gf100_gr_dtor(struct nvkm_gr *);
-
int gf100_gr_fecs_bind_pointer(struct gf100_gr *, u32 inst);
struct gf100_gr_func_zbc {
@@ -157,7 +149,6 @@ struct gf100_gr_func_zbc {
};
struct gf100_gr_func {
- void (*dtor)(struct gf100_gr *);
void (*oneinit_tiles)(struct gf100_gr *);
void (*oneinit_sm_id)(struct gf100_gr *);
int (*init)(struct gf100_gr *);
@@ -171,6 +162,7 @@ struct gf100_gr_func {
void (*init_rop_active_fbps)(struct gf100_gr *);
void (*init_bios_2)(struct gf100_gr *);
void (*init_swdx_pes_mask)(struct gf100_gr *);
+ void (*init_fs)(struct gf100_gr *);
void (*init_fecs_exceptions)(struct gf100_gr *);
void (*init_ds_hww_esr_2)(struct gf100_gr *);
void (*init_40601c)(struct gf100_gr *);
@@ -217,6 +209,7 @@ void gf100_gr_init_419eb4(struct gf100_gr *);
void gf100_gr_init_tex_hww_esr(struct gf100_gr *, int, int);
void gf100_gr_init_shader_exceptions(struct gf100_gr *, int, int);
void gf100_gr_init_400054(struct gf100_gr *);
+void gf100_gr_init_num_tpc_per_gpc(struct gf100_gr *, bool, bool);
extern const struct gf100_gr_func_zbc gf100_gr_zbc;
void gf117_gr_init_zcull(struct gf100_gr *);
@@ -245,10 +238,18 @@ void gp100_gr_init_fecs_exceptions(struct gf100_gr *);
void gp100_gr_init_shader_exceptions(struct gf100_gr *, int, int);
void gp100_gr_zbc_clear_color(struct gf100_gr *, int);
void gp100_gr_zbc_clear_depth(struct gf100_gr *, int);
+extern const struct gf100_gr_func_zbc gp100_gr_zbc;
void gp102_gr_init_swdx_pes_mask(struct gf100_gr *);
extern const struct gf100_gr_func_zbc gp102_gr_zbc;
+extern const struct gf100_gr_func gp107_gr;
+
+void gv100_gr_init_419bd8(struct gf100_gr *);
+void gv100_gr_init_504430(struct gf100_gr *, int, int);
+void gv100_gr_init_shader_exceptions(struct gf100_gr *, int, int);
+void gv100_gr_trap_mp(struct gf100_gr *, int, int);
+
#define gf100_gr_chan(p) container_of((p), struct gf100_gr_chan, object)
#include <core/object.h>
@@ -269,9 +270,6 @@ struct gf100_gr_chan {
void gf100_gr_ctxctl_debug(struct gf100_gr *);
-void gf100_gr_dtor_fw(struct gf100_gr_fuc *);
-int gf100_gr_ctor_fw(struct gf100_gr *, const char *,
- struct gf100_gr_fuc *);
u64 gf100_gr_units(struct nvkm_gr *);
void gf100_gr_zbc_init(struct gf100_gr *);
@@ -294,8 +292,8 @@ struct gf100_gr_pack {
for (init = pack->init; init && init->count; init++)
struct gf100_gr_ucode {
- struct gf100_gr_fuc code;
- struct gf100_gr_fuc data;
+ struct nvkm_blob code;
+ struct nvkm_blob data;
};
extern struct gf100_gr_ucode gf100_gr_fecs_ucode;
@@ -310,17 +308,6 @@ void gf100_gr_icmd(struct gf100_gr *, const struct gf100_gr_pack *);
void gf100_gr_mthd(struct gf100_gr *, const struct gf100_gr_pack *);
int gf100_gr_init_ctxctl(struct gf100_gr *);
-/* external bundles loading functions */
-int gk20a_gr_av_to_init(struct gf100_gr *, const char *,
- struct gf100_gr_pack **);
-int gk20a_gr_aiv_to_init(struct gf100_gr *, const char *,
- struct gf100_gr_pack **);
-int gk20a_gr_av_to_method(struct gf100_gr *, const char *,
- struct gf100_gr_pack **);
-
-int gm200_gr_new_(const struct gf100_gr_func *, struct nvkm_device *, int,
- struct nvkm_gr **);
-
/* register init value lists */
extern const struct gf100_gr_init gf100_gr_init_main_0[];
@@ -403,4 +390,31 @@ extern const struct gf100_gr_init gm107_gr_init_cbm_0[];
void gm107_gr_init_bios(struct gf100_gr *);
void gm200_gr_init_gpc_mmu(struct gf100_gr *);
+
+struct gf100_gr_fwif {
+ int version;
+ int (*load)(struct gf100_gr *, int ver, const struct gf100_gr_fwif *);
+ const struct gf100_gr_func *func;
+ const struct nvkm_acr_lsf_func *fecs;
+ const struct nvkm_acr_lsf_func *gpccs;
+};
+
+int gf100_gr_load(struct gf100_gr *, int, const struct gf100_gr_fwif *);
+int gf100_gr_nofw(struct gf100_gr *, int, const struct gf100_gr_fwif *);
+
+int gk20a_gr_load_sw(struct gf100_gr *, const char *path, int ver);
+
+int gm200_gr_load(struct gf100_gr *, int, const struct gf100_gr_fwif *);
+extern const struct nvkm_acr_lsf_func gm200_gr_gpccs_acr;
+extern const struct nvkm_acr_lsf_func gm200_gr_fecs_acr;
+
+extern const struct nvkm_acr_lsf_func gm20b_gr_fecs_acr;
+void gm20b_gr_acr_bld_write(struct nvkm_acr *, u32, struct nvkm_acr_lsfw *);
+void gm20b_gr_acr_bld_patch(struct nvkm_acr *, u32, s64);
+
+extern const struct nvkm_acr_lsf_func gp108_gr_gpccs_acr;
+extern const struct nvkm_acr_lsf_func gp108_gr_fecs_acr;
+
+int gf100_gr_new_(const struct gf100_gr_fwif *, struct nvkm_device *, int,
+ struct nvkm_gr **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
index 42c2fd9fc04e..0536fe8b2b92 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
@@ -144,8 +144,15 @@ gf104_gr = {
}
};
+static const struct gf100_gr_fwif
+gf104_gr_fwif[] = {
+ { -1, gf100_gr_load, &gf104_gr },
+ { -1, gf100_gr_nofw, &gf104_gr },
+ {}
+};
+
int
gf104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(&gf104_gr, device, index, pgr);
+ return gf100_gr_new_(gf104_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index 4731a460adc7..14284b06112f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -143,8 +143,15 @@ gf108_gr = {
}
};
+const struct gf100_gr_fwif
+gf108_gr_fwif[] = {
+ { -1, gf100_gr_load, &gf108_gr },
+ { -1, gf100_gr_nofw, &gf108_gr },
+ {}
+};
+
int
gf108_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(&gf108_gr, device, index, pgr);
+ return gf100_gr_new_(gf108_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index cdf759c8cd7f..280752551a3a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -119,8 +119,15 @@ gf110_gr = {
}
};
+static const struct gf100_gr_fwif
+gf110_gr_fwif[] = {
+ { -1, gf100_gr_load, &gf110_gr },
+ { -1, gf100_gr_nofw, &gf110_gr },
+ {}
+};
+
int
gf110_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(&gf110_gr, device, index, pgr);
+ return gf100_gr_new_(gf110_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index a4158f84c649..235c3fbe4b95 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -184,8 +184,15 @@ gf117_gr = {
}
};
+static const struct gf100_gr_fwif
+gf117_gr_fwif[] = {
+ { -1, gf100_gr_load, &gf117_gr },
+ { -1, gf100_gr_nofw, &gf117_gr },
+ {}
+};
+
int
gf117_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(&gf117_gr, device, index, pgr);
+ return gf100_gr_new_(gf117_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
index 4197844870b3..7eac385ece97 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
@@ -210,8 +210,15 @@ gf119_gr = {
}
};
+static const struct gf100_gr_fwif
+gf119_gr_fwif[] = {
+ { -1, gf100_gr_load, &gf119_gr },
+ { -1, gf100_gr_nofw, &gf119_gr },
+ {}
+};
+
int
gf119_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(&gf119_gr, device, index, pgr);
+ return gf100_gr_new_(gf119_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index 477fee3e3715..89f51d76082b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -489,8 +489,15 @@ gk104_gr = {
}
};
+static const struct gf100_gr_fwif
+gk104_gr_fwif[] = {
+ { -1, gf100_gr_load, &gk104_gr },
+ { -1, gf100_gr_nofw, &gk104_gr },
+ {}
+};
+
int
gk104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(&gk104_gr, device, index, pgr);
+ return gf100_gr_new_(gk104_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index 7cd628c84e07..735f05e54d62 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -385,8 +385,15 @@ gk110_gr = {
}
};
+static const struct gf100_gr_fwif
+gk110_gr_fwif[] = {
+ { -1, gf100_gr_load, &gk110_gr },
+ { -1, gf100_gr_nofw, &gk110_gr },
+ {}
+};
+
int
gk110_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(&gk110_gr, device, index, pgr);
+ return gf100_gr_new_(gk110_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index a38faa215635..adc971be8f3b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -136,8 +136,15 @@ gk110b_gr = {
}
};
+static const struct gf100_gr_fwif
+gk110b_gr_fwif[] = {
+ { -1, gf100_gr_load, &gk110b_gr },
+ { -1, gf100_gr_nofw, &gk110b_gr },
+ {}
+};
+
int
gk110b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(&gk110b_gr, device, index, pgr);
+ return gf100_gr_new_(gk110b_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index 58456660e603..aa0eff6795ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -194,8 +194,15 @@ gk208_gr = {
}
};
+static const struct gf100_gr_fwif
+gk208_gr_fwif[] = {
+ { -1, gf100_gr_load, &gk208_gr },
+ { -1, gf100_gr_nofw, &gk208_gr },
+ {}
+};
+
int
gk208_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(&gk208_gr, device, index, pgr);
+ return gf100_gr_new_(gk208_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
index 500cb08dd608..4209b24a46d7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
@@ -22,6 +22,7 @@
#include "gf100.h"
#include "ctxgf100.h"
+#include <core/firmware.h>
#include <subdev/timer.h>
#include <nvif/class.h>
@@ -33,21 +34,22 @@ struct gk20a_fw_av
};
int
-gk20a_gr_av_to_init(struct gf100_gr *gr, const char *fw_name,
- struct gf100_gr_pack **ppack)
+gk20a_gr_av_to_init(struct gf100_gr *gr, const char *path, const char *name,
+ int ver, struct gf100_gr_pack **ppack)
{
- struct gf100_gr_fuc fuc;
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_blob blob;
struct gf100_gr_init *init;
struct gf100_gr_pack *pack;
int nent;
int ret;
int i;
- ret = gf100_gr_ctor_fw(gr, fw_name, &fuc);
+ ret = nvkm_firmware_load_blob(subdev, path, name, ver, &blob);
if (ret)
return ret;
- nent = (fuc.size / sizeof(struct gk20a_fw_av));
+ nent = (blob.size / sizeof(struct gk20a_fw_av));
pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1)));
if (!pack) {
ret = -ENOMEM;
@@ -59,7 +61,7 @@ gk20a_gr_av_to_init(struct gf100_gr *gr, const char *fw_name,
for (i = 0; i < nent; i++) {
struct gf100_gr_init *ent = &init[i];
- struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc.data)[i];
+ struct gk20a_fw_av *av = &((struct gk20a_fw_av *)blob.data)[i];
ent->addr = av->addr;
ent->data = av->data;
@@ -70,7 +72,7 @@ gk20a_gr_av_to_init(struct gf100_gr *gr, const char *fw_name,
*ppack = pack;
end:
- gf100_gr_dtor_fw(&fuc);
+ nvkm_blob_dtor(&blob);
return ret;
}
@@ -82,21 +84,22 @@ struct gk20a_fw_aiv
};
int
-gk20a_gr_aiv_to_init(struct gf100_gr *gr, const char *fw_name,
- struct gf100_gr_pack **ppack)
+gk20a_gr_aiv_to_init(struct gf100_gr *gr, const char *path, const char *name,
+ int ver, struct gf100_gr_pack **ppack)
{
- struct gf100_gr_fuc fuc;
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_blob blob;
struct gf100_gr_init *init;
struct gf100_gr_pack *pack;
int nent;
int ret;
int i;
- ret = gf100_gr_ctor_fw(gr, fw_name, &fuc);
+ ret = nvkm_firmware_load_blob(subdev, path, name, ver, &blob);
if (ret)
return ret;
- nent = (fuc.size / sizeof(struct gk20a_fw_aiv));
+ nent = (blob.size / sizeof(struct gk20a_fw_aiv));
pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1)));
if (!pack) {
ret = -ENOMEM;
@@ -108,7 +111,7 @@ gk20a_gr_aiv_to_init(struct gf100_gr *gr, const char *fw_name,
for (i = 0; i < nent; i++) {
struct gf100_gr_init *ent = &init[i];
- struct gk20a_fw_aiv *av = &((struct gk20a_fw_aiv *)fuc.data)[i];
+ struct gk20a_fw_aiv *av = &((struct gk20a_fw_aiv *)blob.data)[i];
ent->addr = av->addr;
ent->data = av->data;
@@ -119,15 +122,16 @@ gk20a_gr_aiv_to_init(struct gf100_gr *gr, const char *fw_name,
*ppack = pack;
end:
- gf100_gr_dtor_fw(&fuc);
+ nvkm_blob_dtor(&blob);
return ret;
}
int
-gk20a_gr_av_to_method(struct gf100_gr *gr, const char *fw_name,
- struct gf100_gr_pack **ppack)
+gk20a_gr_av_to_method(struct gf100_gr *gr, const char *path, const char *name,
+ int ver, struct gf100_gr_pack **ppack)
{
- struct gf100_gr_fuc fuc;
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_blob blob;
struct gf100_gr_init *init;
struct gf100_gr_pack *pack;
/* We don't suppose we will initialize more than 16 classes here... */
@@ -137,29 +141,30 @@ gk20a_gr_av_to_method(struct gf100_gr *gr, const char *fw_name,
int ret;
int i;
- ret = gf100_gr_ctor_fw(gr, fw_name, &fuc);
+ ret = nvkm_firmware_load_blob(subdev, path, name, ver, &blob);
if (ret)
return ret;
- nent = (fuc.size / sizeof(struct gk20a_fw_av));
+ nent = (blob.size / sizeof(struct gk20a_fw_av));
- pack = vzalloc((sizeof(*pack) * max_classes) +
- (sizeof(*init) * (nent + 1)));
+ pack = vzalloc((sizeof(*pack) * (max_classes + 1)) +
+ (sizeof(*init) * (nent + max_classes + 1)));
if (!pack) {
ret = -ENOMEM;
goto end;
}
- init = (void *)(pack + max_classes);
+ init = (void *)(pack + max_classes + 1);
- for (i = 0; i < nent; i++) {
- struct gf100_gr_init *ent = &init[i];
- struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc.data)[i];
+ for (i = 0; i < nent; i++, init++) {
+ struct gk20a_fw_av *av = &((struct gk20a_fw_av *)blob.data)[i];
u32 class = av->addr & 0xffff;
u32 addr = (av->addr & 0xffff0000) >> 14;
if (prevclass != class) {
- pack[classidx].init = ent;
+ if (prevclass) /* Add terminator to the method list. */
+ init++;
+ pack[classidx].init = init;
pack[classidx].type = class;
prevclass = class;
if (++classidx >= max_classes) {
@@ -169,16 +174,16 @@ gk20a_gr_av_to_method(struct gf100_gr *gr, const char *fw_name,
}
}
- ent->addr = addr;
- ent->data = av->data;
- ent->count = 1;
- ent->pitch = 1;
+ init->addr = addr;
+ init->data = av->data;
+ init->count = 1;
+ init->pitch = 1;
}
*ppack = pack;
end:
- gf100_gr_dtor_fw(&fuc);
+ nvkm_blob_dtor(&blob);
return ret;
}
@@ -224,7 +229,7 @@ gk20a_gr_init(struct gf100_gr *gr)
/* Clear SCC RAM */
nvkm_wr32(device, 0x40802c, 0x1);
- gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
+ gf100_gr_mmio(gr, gr->sw_nonctx);
ret = gk20a_gr_wait_mem_scrubbing(gr);
if (ret)
@@ -303,40 +308,45 @@ gk20a_gr = {
};
int
-gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gk20a_gr_load_sw(struct gf100_gr *gr, const char *path, int ver)
{
- struct gf100_gr *gr;
- int ret;
+ if (gk20a_gr_av_to_init(gr, path, "sw_nonctx", ver, &gr->sw_nonctx) ||
+ gk20a_gr_aiv_to_init(gr, path, "sw_ctx", ver, &gr->sw_ctx) ||
+ gk20a_gr_av_to_init(gr, path, "sw_bundle_init", ver, &gr->bundle) ||
+ gk20a_gr_av_to_method(gr, path, "sw_method_init", ver, &gr->method))
+ return -ENOENT;
- if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
- return -ENOMEM;
- *pgr = &gr->base;
-
- ret = gf100_gr_ctor(&gk20a_gr, device, index, gr);
- if (ret)
- return ret;
+ return 0;
+}
- if (gf100_gr_ctor_fw(gr, "fecs_inst", &gr->fuc409c) ||
- gf100_gr_ctor_fw(gr, "fecs_data", &gr->fuc409d) ||
- gf100_gr_ctor_fw(gr, "gpccs_inst", &gr->fuc41ac) ||
- gf100_gr_ctor_fw(gr, "gpccs_data", &gr->fuc41ad))
- return -ENODEV;
+static int
+gk20a_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
+{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
- ret = gk20a_gr_av_to_init(gr, "sw_nonctx", &gr->fuc_sw_nonctx);
- if (ret)
- return ret;
+ if (nvkm_firmware_load_blob(subdev, "", "fecs_inst", ver,
+ &gr->fecs.inst) ||
+ nvkm_firmware_load_blob(subdev, "", "fecs_data", ver,
+ &gr->fecs.data) ||
+ nvkm_firmware_load_blob(subdev, "", "gpccs_inst", ver,
+ &gr->gpccs.inst) ||
+ nvkm_firmware_load_blob(subdev, "", "gpccs_data", ver,
+ &gr->gpccs.data))
+ return -ENOENT;
- ret = gk20a_gr_aiv_to_init(gr, "sw_ctx", &gr->fuc_sw_ctx);
- if (ret)
- return ret;
+ gr->firmware = true;
- ret = gk20a_gr_av_to_init(gr, "sw_bundle_init", &gr->fuc_bundle);
- if (ret)
- return ret;
+ return gk20a_gr_load_sw(gr, "", ver);
+}
- ret = gk20a_gr_av_to_method(gr, "sw_method_init", &gr->fuc_method);
- if (ret)
- return ret;
+static const struct gf100_gr_fwif
+gk20a_gr_fwif[] = {
+ { -1, gk20a_gr_load, &gk20a_gr },
+ {}
+};
- return 0;
+int
+gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return gf100_gr_new_(gk20a_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index 92e31d397207..09bb78ba9d00 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -429,8 +429,15 @@ gm107_gr = {
}
};
+static const struct gf100_gr_fwif
+gm107_gr_fwif[] = {
+ { -1, gf100_gr_load, &gm107_gr },
+ { -1, gf100_gr_nofw, &gm107_gr },
+ {}
+};
+
int
gm107_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(&gm107_gr, device, index, pgr);
+ return gf100_gr_new_(gm107_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
index eff30662b984..3d67cfb08395 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
@@ -24,14 +24,64 @@
#include "gf100.h"
#include "ctxgf100.h"
+#include <core/firmware.h>
+#include <subdev/acr.h>
#include <subdev/secboot.h>
+#include <nvfw/flcn.h>
+
#include <nvif/class.h>
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
+static void
+gm200_gr_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust)
+{
+ struct flcn_bl_dmem_desc_v1 hdr;
+ nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
+ hdr.code_dma_base = hdr.code_dma_base + adjust;
+ hdr.data_dma_base = hdr.data_dma_base + adjust;
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+ flcn_bl_dmem_desc_v1_dump(&acr->subdev, &hdr);
+}
+
+static void
+gm200_gr_acr_bld_write(struct nvkm_acr *acr, u32 bld,
+ struct nvkm_acr_lsfw *lsfw)
+{
+ const u64 base = lsfw->offset.img + lsfw->app_start_offset;
+ const u64 code = base + lsfw->app_resident_code_offset;
+ const u64 data = base + lsfw->app_resident_data_offset;
+ const struct flcn_bl_dmem_desc_v1 hdr = {
+ .ctx_dma = FALCON_DMAIDX_UCODE,
+ .code_dma_base = code,
+ .non_sec_code_off = lsfw->app_resident_code_offset,
+ .non_sec_code_size = lsfw->app_resident_code_size,
+ .code_entry_point = lsfw->app_imem_entry,
+ .data_dma_base = data,
+ .data_size = lsfw->app_resident_data_size,
+ };
+
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+}
+
+const struct nvkm_acr_lsf_func
+gm200_gr_gpccs_acr = {
+ .flags = NVKM_ACR_LSF_FORCE_PRIV_LOAD,
+ .bld_size = sizeof(struct flcn_bl_dmem_desc_v1),
+ .bld_write = gm200_gr_acr_bld_write,
+ .bld_patch = gm200_gr_acr_bld_patch,
+};
+
+const struct nvkm_acr_lsf_func
+gm200_gr_fecs_acr = {
+ .bld_size = sizeof(struct flcn_bl_dmem_desc_v1),
+ .bld_write = gm200_gr_acr_bld_write,
+ .bld_patch = gm200_gr_acr_bld_patch,
+};
+
int
gm200_gr_rops(struct gf100_gr *gr)
{
@@ -124,44 +174,6 @@ gm200_gr_oneinit_tiles(struct gf100_gr *gr)
}
}
-int
-gm200_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
- int index, struct nvkm_gr **pgr)
-{
- struct gf100_gr *gr;
- int ret;
-
- if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
- return -ENOMEM;
- *pgr = &gr->base;
-
- ret = gf100_gr_ctor(func, device, index, gr);
- if (ret)
- return ret;
-
- /* Load firmwares for non-secure falcons */
- if (!nvkm_secboot_is_managed(device->secboot,
- NVKM_SECBOOT_FALCON_FECS)) {
- if ((ret = gf100_gr_ctor_fw(gr, "gr/fecs_inst", &gr->fuc409c)) ||
- (ret = gf100_gr_ctor_fw(gr, "gr/fecs_data", &gr->fuc409d)))
- return ret;
- }
- if (!nvkm_secboot_is_managed(device->secboot,
- NVKM_SECBOOT_FALCON_GPCCS)) {
- if ((ret = gf100_gr_ctor_fw(gr, "gr/gpccs_inst", &gr->fuc41ac)) ||
- (ret = gf100_gr_ctor_fw(gr, "gr/gpccs_data", &gr->fuc41ad)))
- return ret;
- }
-
- if ((ret = gk20a_gr_av_to_init(gr, "gr/sw_nonctx", &gr->fuc_sw_nonctx)) ||
- (ret = gk20a_gr_aiv_to_init(gr, "gr/sw_ctx", &gr->fuc_sw_ctx)) ||
- (ret = gk20a_gr_av_to_init(gr, "gr/sw_bundle_init", &gr->fuc_bundle)) ||
- (ret = gk20a_gr_av_to_method(gr, "gr/sw_method_init", &gr->fuc_method)))
- return ret;
-
- return 0;
-}
-
static const struct gf100_gr_func
gm200_gr = {
.oneinit_tiles = gm200_gr_oneinit_tiles,
@@ -198,7 +210,77 @@ gm200_gr = {
};
int
+gm200_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
+{
+ int ret;
+
+ ret = nvkm_acr_lsfw_load_bl_inst_data_sig(&gr->base.engine.subdev,
+ &gr->fecs.falcon,
+ NVKM_ACR_LSF_FECS,
+ "gr/fecs_", ver, fwif->fecs);
+ if (ret)
+ return ret;
+
+ ret = nvkm_acr_lsfw_load_bl_inst_data_sig(&gr->base.engine.subdev,
+ &gr->gpccs.falcon,
+ NVKM_ACR_LSF_GPCCS,
+ "gr/gpccs_", ver,
+ fwif->gpccs);
+ if (ret)
+ return ret;
+
+ gr->firmware = true;
+
+ return gk20a_gr_load_sw(gr, "gr/", ver);
+}
+
+MODULE_FIRMWARE("nvidia/gm200/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/sw_method_init.bin");
+
+MODULE_FIRMWARE("nvidia/gm204/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/sw_method_init.bin");
+
+MODULE_FIRMWARE("nvidia/gm206/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/sw_method_init.bin");
+
+static const struct gf100_gr_fwif
+gm200_gr_fwif[] = {
+ { 0, gm200_gr_load, &gm200_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+ {}
+};
+
+int
gm200_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gm200_gr_new_(&gm200_gr, device, index, pgr);
+ return gf100_gr_new_(gm200_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
index a667770ce3cb..09d8c5d5b000 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
@@ -22,10 +22,61 @@
#include "gf100.h"
#include "ctxgf100.h"
+#include <core/firmware.h>
+#include <subdev/acr.h>
#include <subdev/timer.h>
+#include <nvfw/flcn.h>
+
#include <nvif/class.h>
+void
+gm20b_gr_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust)
+{
+ struct flcn_bl_dmem_desc hdr;
+ u64 addr;
+
+ nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
+ addr = ((u64)hdr.code_dma_base1 << 40 | hdr.code_dma_base << 8);
+ hdr.code_dma_base = lower_32_bits((addr + adjust) >> 8);
+ hdr.code_dma_base1 = upper_32_bits((addr + adjust) >> 8);
+ addr = ((u64)hdr.data_dma_base1 << 40 | hdr.data_dma_base << 8);
+ hdr.data_dma_base = lower_32_bits((addr + adjust) >> 8);
+ hdr.data_dma_base1 = upper_32_bits((addr + adjust) >> 8);
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+
+ flcn_bl_dmem_desc_dump(&acr->subdev, &hdr);
+}
+
+void
+gm20b_gr_acr_bld_write(struct nvkm_acr *acr, u32 bld,
+ struct nvkm_acr_lsfw *lsfw)
+{
+ const u64 base = lsfw->offset.img + lsfw->app_start_offset;
+ const u64 code = (base + lsfw->app_resident_code_offset) >> 8;
+ const u64 data = (base + lsfw->app_resident_data_offset) >> 8;
+ const struct flcn_bl_dmem_desc hdr = {
+ .ctx_dma = FALCON_DMAIDX_UCODE,
+ .code_dma_base = lower_32_bits(code),
+ .non_sec_code_off = lsfw->app_resident_code_offset,
+ .non_sec_code_size = lsfw->app_resident_code_size,
+ .code_entry_point = lsfw->app_imem_entry,
+ .data_dma_base = lower_32_bits(data),
+ .data_size = lsfw->app_resident_data_size,
+ .code_dma_base1 = upper_32_bits(code),
+ .data_dma_base1 = upper_32_bits(data),
+ };
+
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+}
+
+const struct nvkm_acr_lsf_func
+gm20b_gr_fecs_acr = {
+ .bld_size = sizeof(struct flcn_bl_dmem_desc),
+ .bld_write = gm20b_gr_acr_bld_write,
+ .bld_patch = gm20b_gr_acr_bld_patch,
+};
+
static void
gm20b_gr_init_gpc_mmu(struct gf100_gr *gr)
{
@@ -33,7 +84,7 @@ gm20b_gr_init_gpc_mmu(struct gf100_gr *gr)
u32 val;
/* Bypass MMU check for non-secure boot */
- if (!device->secboot) {
+ if (!device->acr) {
nvkm_wr32(device, 0x100ce4, 0xffffffff);
if (nvkm_rd32(device, 0x100ce4) != 0xffffffff)
@@ -85,8 +136,51 @@ gm20b_gr = {
}
};
+static int
+gm20b_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
+{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ int ret;
+
+ ret = nvkm_acr_lsfw_load_bl_inst_data_sig(subdev, &gr->fecs.falcon,
+ NVKM_ACR_LSF_FECS,
+ "gr/fecs_", ver, fwif->fecs);
+ if (ret)
+ return ret;
+
+
+ if (nvkm_firmware_load_blob(subdev, "gr/", "gpccs_inst", ver,
+ &gr->gpccs.inst) ||
+ nvkm_firmware_load_blob(subdev, "gr/", "gpccs_data", ver,
+ &gr->gpccs.data))
+ return -ENOENT;
+
+ gr->firmware = true;
+
+ return gk20a_gr_load_sw(gr, "gr/", ver);
+}
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/sw_method_init.bin");
+#endif
+
+static const struct gf100_gr_fwif
+gm20b_gr_fwif[] = {
+ { 0, gm20b_gr_load, &gm20b_gr, &gm20b_gr_fecs_acr },
+ {}
+};
+
int
gm20b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gm200_gr_new_(&gm20b_gr, device, index, pgr);
+ return gf100_gr_new_(gm20b_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
index 9d0521ce309a..33c8634ae567 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
@@ -62,7 +62,7 @@ gp100_gr_zbc_clear_depth(struct gf100_gr *gr, int zbc)
gr->zbc_depth[zbc].format << ((znum % 4) * 7));
}
-static const struct gf100_gr_func_zbc
+const struct gf100_gr_func_zbc
gp100_gr_zbc = {
.clear_color = gp100_gr_zbc_clear_color,
.clear_depth = gp100_gr_zbc_clear_depth,
@@ -135,8 +135,27 @@ gp100_gr = {
}
};
+MODULE_FIRMWARE("nvidia/gp100/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/sw_method_init.bin");
+
+static const struct gf100_gr_fwif
+gp100_gr_fwif[] = {
+ { 0, gm200_gr_load, &gp100_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+ {}
+};
+
int
gp100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gm200_gr_new_(&gp100_gr, device, index, pgr);
+ return gf100_gr_new_(gp100_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
index 37f7d739bf80..7baf67f743f4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
@@ -131,8 +131,27 @@ gp102_gr = {
}
};
+MODULE_FIRMWARE("nvidia/gp102/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/sw_method_init.bin");
+
+static const struct gf100_gr_fwif
+gp102_gr_fwif[] = {
+ { 0, gm200_gr_load, &gp102_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+ {}
+};
+
int
gp102_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gm200_gr_new_(&gp102_gr, device, index, pgr);
+ return gf100_gr_new_(gp102_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
index 4573c914c021..d9b8ef875f8d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
@@ -59,8 +59,40 @@ gp104_gr = {
}
};
+MODULE_FIRMWARE("nvidia/gp104/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/sw_method_init.bin");
+
+MODULE_FIRMWARE("nvidia/gp106/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/sw_method_init.bin");
+
+static const struct gf100_gr_fwif
+gp104_gr_fwif[] = {
+ { 0, gm200_gr_load, &gp104_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+ {}
+};
+
int
gp104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gm200_gr_new_(&gp104_gr, device, index, pgr);
+ return gf100_gr_new_(gp104_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
index 812aba91653f..2b1ad5522184 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
@@ -26,7 +26,7 @@
#include <nvif/class.h>
-static const struct gf100_gr_func
+const struct gf100_gr_func
gp107_gr = {
.oneinit_tiles = gm200_gr_oneinit_tiles,
.oneinit_sm_id = gm200_gr_oneinit_sm_id,
@@ -61,8 +61,27 @@ gp107_gr = {
}
};
+MODULE_FIRMWARE("nvidia/gp107/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/sw_method_init.bin");
+
+static const struct gf100_gr_fwif
+gp107_gr_fwif[] = {
+ { 0, gm200_gr_load, &gp107_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
+ {}
+};
+
int
gp107_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gm200_gr_new_(&gp107_gr, device, index, pgr);
+ return gf100_gr_new_(gp107_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c
new file mode 100644
index 000000000000..113e4c1ba9e8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+
+#include <subdev/acr.h>
+
+#include <nvfw/flcn.h>
+
+static void
+gp108_gr_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust)
+{
+ struct flcn_bl_dmem_desc_v2 hdr;
+ nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
+ hdr.code_dma_base = hdr.code_dma_base + adjust;
+ hdr.data_dma_base = hdr.data_dma_base + adjust;
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+ flcn_bl_dmem_desc_v2_dump(&acr->subdev, &hdr);
+}
+
+static void
+gp108_gr_acr_bld_write(struct nvkm_acr *acr, u32 bld,
+ struct nvkm_acr_lsfw *lsfw)
+{
+ const u64 base = lsfw->offset.img + lsfw->app_start_offset;
+ const u64 code = base + lsfw->app_resident_code_offset;
+ const u64 data = base + lsfw->app_resident_data_offset;
+ const struct flcn_bl_dmem_desc_v2 hdr = {
+ .ctx_dma = FALCON_DMAIDX_UCODE,
+ .code_dma_base = code,
+ .non_sec_code_off = lsfw->app_resident_code_offset,
+ .non_sec_code_size = lsfw->app_resident_code_size,
+ .code_entry_point = lsfw->app_imem_entry,
+ .data_dma_base = data,
+ .data_size = lsfw->app_resident_data_size,
+ };
+
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+}
+
+const struct nvkm_acr_lsf_func
+gp108_gr_gpccs_acr = {
+ .flags = NVKM_ACR_LSF_FORCE_PRIV_LOAD,
+ .bld_size = sizeof(struct flcn_bl_dmem_desc_v2),
+ .bld_write = gp108_gr_acr_bld_write,
+ .bld_patch = gp108_gr_acr_bld_patch,
+};
+
+const struct nvkm_acr_lsf_func
+gp108_gr_fecs_acr = {
+ .bld_size = sizeof(struct flcn_bl_dmem_desc_v2),
+ .bld_write = gp108_gr_acr_bld_write,
+ .bld_patch = gp108_gr_acr_bld_patch,
+};
+
+MODULE_FIRMWARE("nvidia/gp108/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/sw_method_init.bin");
+
+static const struct gf100_gr_fwif
+gp108_gr_fwif[] = {
+ { 0, gm200_gr_load, &gp107_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr },
+ {}
+};
+
+int
+gp108_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return gf100_gr_new_(gp108_gr_fwif, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
index 303dceddd4a8..eaf913eb5aa3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
@@ -23,8 +23,20 @@
#include "gf100.h"
#include "ctxgf100.h"
+#include <subdev/acr.h>
+
#include <nvif/class.h>
+#include <nvfw/flcn.h>
+
+static const struct nvkm_acr_lsf_func
+gp10b_gr_gpccs_acr = {
+ .flags = NVKM_ACR_LSF_FORCE_PRIV_LOAD,
+ .bld_size = sizeof(struct flcn_bl_dmem_desc),
+ .bld_write = gm20b_gr_acr_bld_write,
+ .bld_patch = gm20b_gr_acr_bld_patch,
+};
+
static const struct gf100_gr_func
gp10b_gr = {
.oneinit_tiles = gm200_gr_oneinit_tiles,
@@ -48,8 +60,8 @@ gp10b_gr = {
.gpc_nr = 1,
.tpc_nr = 2,
.ppc_nr = 1,
- .grctx = &gp102_grctx,
- .zbc = &gp102_gr_zbc,
+ .grctx = &gp100_grctx,
+ .zbc = &gp100_gr_zbc,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
@@ -59,8 +71,29 @@ gp10b_gr = {
}
};
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
+MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/sw_method_init.bin");
+#endif
+
+static const struct gf100_gr_fwif
+gp10b_gr_fwif[] = {
+ { 0, gm200_gr_load, &gp10b_gr, &gm20b_gr_fecs_acr, &gp10b_gr_gpccs_acr },
+ {}
+};
+
int
gp10b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gm200_gr_new_(&gp10b_gr, device, index, pgr);
+ return gf100_gr_new_(gp10b_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
index 3b3327789ae7..70639d88b8e6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
@@ -45,7 +45,7 @@ gv100_gr_trap_sm(struct gf100_gr *gr, int gpc, int tpc, int sm)
nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x734 + sm * 0x80), gerr);
}
-static void
+void
gv100_gr_trap_mp(struct gf100_gr *gr, int gpc, int tpc)
{
gv100_gr_trap_sm(gr, gpc, tpc, 0);
@@ -59,7 +59,7 @@ gv100_gr_init_4188a4(struct gf100_gr *gr)
nvkm_mask(device, 0x4188a4, 0x03000000, 0x03000000);
}
-static void
+void
gv100_gr_init_shader_exceptions(struct gf100_gr *gr, int gpc, int tpc)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
@@ -71,14 +71,14 @@ gv100_gr_init_shader_exceptions(struct gf100_gr *gr, int gpc, int tpc)
}
}
-static void
+void
gv100_gr_init_504430(struct gf100_gr *gr, int gpc, int tpc)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0x403f0000);
}
-static void
+void
gv100_gr_init_419bd8(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
@@ -120,8 +120,27 @@ gv100_gr = {
}
};
+MODULE_FIRMWARE("nvidia/gv100/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/sw_method_init.bin");
+
+static const struct gf100_gr_fwif
+gv100_gr_fwif[] = {
+ { 0, gm200_gr_load, &gv100_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr },
+ {}
+};
+
int
gv100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gm200_gr_new_(&gv100_gr, device, index, pgr);
+ return gf100_gr_new_(gv100_gr_fwif, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
new file mode 100644
index 000000000000..454668b1cf54
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
@@ -0,0 +1,177 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+#include "ctxgf100.h"
+
+#include <nvif/class.h>
+
+static void
+tu102_gr_init_fecs_exceptions(struct gf100_gr *gr)
+{
+ nvkm_wr32(gr->base.engine.subdev.device, 0x409c24, 0x006f0002);
+}
+
+static void
+tu102_gr_init_fs(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ int sm;
+
+ gp100_grctx_generate_smid_config(gr);
+ gk104_grctx_generate_gpc_tpc_nr(gr);
+
+ for (sm = 0; sm < gr->sm_nr; sm++) {
+ nvkm_wr32(device, GPC_UNIT(gr->sm[sm].gpc, 0x0c10 +
+ gr->sm[sm].tpc * 4), sm);
+ }
+
+ gm200_grctx_generate_dist_skip_table(gr);
+ gf100_gr_init_num_tpc_per_gpc(gr, true, true);
+}
+
+static void
+tu102_gr_init_zcull(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
+ const u8 tile_nr = ALIGN(gr->tpc_total, 64);
+ u8 bank[GPC_MAX] = {}, gpc, i, j;
+ u32 data;
+
+ for (i = 0; i < tile_nr; i += 8) {
+ for (data = 0, j = 0; j < 8 && i + j < gr->tpc_total; j++) {
+ data |= bank[gr->tile[i + j]] << (j * 4);
+ bank[gr->tile[i + j]]++;
+ }
+ nvkm_wr32(device, GPC_BCAST(0x0980 + ((i / 8) * 4)), data);
+ }
+
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
+ gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+ gr->tpc_total);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
+ }
+
+ nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
+}
+
+static void
+tu102_gr_init_gpc_mmu(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+
+ nvkm_wr32(device, 0x418880, nvkm_rd32(device, 0x100c80) & 0xf8001fff);
+ nvkm_wr32(device, 0x418890, 0x00000000);
+ nvkm_wr32(device, 0x418894, 0x00000000);
+
+ nvkm_wr32(device, 0x4188b4, nvkm_rd32(device, 0x100cc8));
+ nvkm_wr32(device, 0x4188b8, nvkm_rd32(device, 0x100ccc));
+ nvkm_wr32(device, 0x4188b0, nvkm_rd32(device, 0x100cc4));
+}
+
+static const struct gf100_gr_func
+tu102_gr = {
+ .oneinit_tiles = gm200_gr_oneinit_tiles,
+ .oneinit_sm_id = gm200_gr_oneinit_sm_id,
+ .init = gf100_gr_init,
+ .init_419bd8 = gv100_gr_init_419bd8,
+ .init_gpc_mmu = tu102_gr_init_gpc_mmu,
+ .init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
+ .init_zcull = tu102_gr_init_zcull,
+ .init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
+ .init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
+ .init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
+ .init_fs = tu102_gr_init_fs,
+ .init_fecs_exceptions = tu102_gr_init_fecs_exceptions,
+ .init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
+ .init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
+ .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
+ .init_504430 = gv100_gr_init_504430,
+ .init_shader_exceptions = gv100_gr_init_shader_exceptions,
+ .trap_mp = gv100_gr_trap_mp,
+ .rops = gm200_gr_rops,
+ .gpc_nr = 6,
+ .tpc_nr = 5,
+ .ppc_nr = 3,
+ .grctx = &tu102_grctx,
+ .zbc = &gp102_gr_zbc,
+ .sclass = {
+ { -1, -1, FERMI_TWOD_A },
+ { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+ { -1, -1, TURING_A, &gf100_fermi },
+ { -1, -1, TURING_COMPUTE_A },
+ {}
+ }
+};
+
+MODULE_FIRMWARE("nvidia/tu102/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/tu102/gr/sw_method_init.bin");
+
+MODULE_FIRMWARE("nvidia/tu104/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/tu104/gr/sw_method_init.bin");
+
+MODULE_FIRMWARE("nvidia/tu106/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/tu106/gr/sw_method_init.bin");
+
+static const struct gf100_gr_fwif
+tu102_gr_fwif[] = {
+ { 0, gm200_gr_load, &tu102_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr },
+ {}
+};
+
+int
+tu102_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return gf100_gr_new_(tu102_gr_fwif, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
index cdf631822282..9a0fd9812750 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: MIT
nvkm-y += nvkm/engine/nvdec/base.o
-nvkm-y += nvkm/engine/nvdec/gp102.o
+nvkm-y += nvkm/engine/nvdec/gm107.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
index 4a63581bdd5e..9b23c1b70ebf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
@@ -20,48 +20,42 @@
* DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
-
-#include <subdev/top.h>
-#include <engine/falcon.h>
-
-static int
-nvkm_nvdec_oneinit(struct nvkm_engine *engine)
-{
- struct nvkm_nvdec *nvdec = nvkm_nvdec(engine);
- struct nvkm_subdev *subdev = &nvdec->engine.subdev;
-
- nvdec->addr = nvkm_top_addr(subdev->device, subdev->index);
- if (!nvdec->addr)
- return -EINVAL;
-
- /*XXX: fix naming of this when adding support for multiple-NVDEC */
- return nvkm_falcon_v1_new(subdev, "NVDEC", nvdec->addr,
- &nvdec->falcon);
-}
+#include <core/firmware.h>
static void *
nvkm_nvdec_dtor(struct nvkm_engine *engine)
{
struct nvkm_nvdec *nvdec = nvkm_nvdec(engine);
- nvkm_falcon_del(&nvdec->falcon);
+ nvkm_falcon_dtor(&nvdec->falcon);
return nvdec;
}
static const struct nvkm_engine_func
nvkm_nvdec = {
.dtor = nvkm_nvdec_dtor,
- .oneinit = nvkm_nvdec_oneinit,
};
int
-nvkm_nvdec_new_(struct nvkm_device *device, int index,
- struct nvkm_nvdec **pnvdec)
+nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *device,
+ int index, struct nvkm_nvdec **pnvdec)
{
struct nvkm_nvdec *nvdec;
+ int ret;
if (!(nvdec = *pnvdec = kzalloc(sizeof(*nvdec), GFP_KERNEL)))
return -ENOMEM;
- return nvkm_engine_ctor(&nvkm_nvdec, device, index, true,
- &nvdec->engine);
+ ret = nvkm_engine_ctor(&nvkm_nvdec, device, index, true,
+ &nvdec->engine);
+ if (ret)
+ return ret;
+
+ fwif = nvkm_firmware_load(&nvdec->engine.subdev, fwif, "Nvdec", nvdec);
+ if (IS_ERR(fwif))
+ return -ENODEV;
+
+ nvdec->func = fwif->func;
+
+ return nvkm_falcon_ctor(nvdec->func->flcn, &nvdec->engine.subdev,
+ nvkm_subdev_name[index], 0, &nvdec->falcon);
};
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c
index bf3e532665fb..0ab27ab4d8ee 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c
@@ -19,25 +19,45 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
+#include "priv.h"
-#ifndef __NVKM_CORE_MSGQUEUE_H
-#define __NVKM_CORE_MSGQUEUE_H
-#include <subdev/secboot.h>
-struct nvkm_msgqueue;
+static const struct nvkm_falcon_func
+gm107_nvdec_flcn = {
+ .debug = 0xd00,
+ .fbif = 0x600,
+ .load_imem = nvkm_falcon_v1_load_imem,
+ .load_dmem = nvkm_falcon_v1_load_dmem,
+ .read_dmem = nvkm_falcon_v1_read_dmem,
+ .bind_context = nvkm_falcon_v1_bind_context,
+ .wait_for_halt = nvkm_falcon_v1_wait_for_halt,
+ .clear_interrupt = nvkm_falcon_v1_clear_interrupt,
+ .set_start_addr = nvkm_falcon_v1_set_start_addr,
+ .start = nvkm_falcon_v1_start,
+ .enable = nvkm_falcon_v1_enable,
+ .disable = nvkm_falcon_v1_disable,
+};
-/* Hopefully we will never have firmware arguments larger than that... */
-#define NVKM_MSGQUEUE_CMDLINE_SIZE 0x100
+static const struct nvkm_nvdec_func
+gm107_nvdec = {
+ .flcn = &gm107_nvdec_flcn,
+};
-int nvkm_msgqueue_new(u32, struct nvkm_falcon *, const struct nvkm_secboot *,
- struct nvkm_msgqueue **);
-void nvkm_msgqueue_del(struct nvkm_msgqueue **);
-void nvkm_msgqueue_recv(struct nvkm_msgqueue *);
-int nvkm_msgqueue_reinit(struct nvkm_msgqueue *);
+static int
+gm107_nvdec_nofw(struct nvkm_nvdec *nvdec, int ver,
+ const struct nvkm_nvdec_fwif *fwif)
+{
+ return 0;
+}
-/* useful if we run a NVIDIA-signed firmware */
-void nvkm_msgqueue_write_cmdline(struct nvkm_msgqueue *, void *);
+static const struct nvkm_nvdec_fwif
+gm107_nvdec_fwif[] = {
+ { -1, gm107_nvdec_nofw, &gm107_nvdec },
+ {}
+};
-/* interface to ACR unit running on falcon (NVIDIA signed firmware) */
-int nvkm_msgqueue_acr_boot_falcons(struct nvkm_msgqueue *, unsigned long);
-
-#endif
+int
+gm107_nvdec_new(struct nvkm_device *device, int index,
+ struct nvkm_nvdec **pnvdec)
+{
+ return nvkm_nvdec_new_(gm107_nvdec_fwif, device, index, pnvdec);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
index 57bfa3aa1835..e14da8b000d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
@@ -3,5 +3,17 @@
#define __NVKM_NVDEC_PRIV_H__
#include <engine/nvdec.h>
-int nvkm_nvdec_new_(struct nvkm_device *, int, struct nvkm_nvdec **);
+struct nvkm_nvdec_func {
+ const struct nvkm_falcon_func *flcn;
+};
+
+struct nvkm_nvdec_fwif {
+ int version;
+ int (*load)(struct nvkm_nvdec *, int ver,
+ const struct nvkm_nvdec_fwif *);
+ const struct nvkm_nvdec_func *func;
+};
+
+int nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif,
+ struct nvkm_device *, int, struct nvkm_nvdec **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
index f316de8d45a8..75bf4436bf3f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: MIT
-#nvkm-y += nvkm/engine/nvenc/base.o
+nvkm-y += nvkm/engine/nvenc/base.o
+nvkm-y += nvkm/engine/nvenc/gm107.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c
new file mode 100644
index 000000000000..484100e15668
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include "priv.h"
+#include <core/firmware.h>
+
+static void *
+nvkm_nvenc_dtor(struct nvkm_engine *engine)
+{
+ struct nvkm_nvenc *nvenc = nvkm_nvenc(engine);
+ nvkm_falcon_dtor(&nvenc->falcon);
+ return nvenc;
+}
+
+static const struct nvkm_engine_func
+nvkm_nvenc = {
+ .dtor = nvkm_nvenc_dtor,
+};
+
+int
+nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *fwif, struct nvkm_device *device,
+ int index, struct nvkm_nvenc **pnvenc)
+{
+ struct nvkm_nvenc *nvenc;
+ int ret;
+
+ if (!(nvenc = *pnvenc = kzalloc(sizeof(*nvenc), GFP_KERNEL)))
+ return -ENOMEM;
+
+ ret = nvkm_engine_ctor(&nvkm_nvenc, device, index, true,
+ &nvenc->engine);
+ if (ret)
+ return ret;
+
+ fwif = nvkm_firmware_load(&nvenc->engine.subdev, fwif, "Nvenc", nvenc);
+ if (IS_ERR(fwif))
+ return -ENODEV;
+
+ nvenc->func = fwif->func;
+
+ return nvkm_falcon_ctor(nvenc->func->flcn, &nvenc->engine.subdev,
+ nvkm_subdev_name[index], 0, &nvenc->falcon);
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c
new file mode 100644
index 000000000000..d249c8ffb2d5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "priv.h"
+
+static const struct nvkm_falcon_func
+gm107_nvenc_flcn = {
+ .fbif = 0x800,
+ .load_imem = nvkm_falcon_v1_load_imem,
+ .load_dmem = nvkm_falcon_v1_load_dmem,
+ .read_dmem = nvkm_falcon_v1_read_dmem,
+ .bind_context = nvkm_falcon_v1_bind_context,
+ .wait_for_halt = nvkm_falcon_v1_wait_for_halt,
+ .clear_interrupt = nvkm_falcon_v1_clear_interrupt,
+ .set_start_addr = nvkm_falcon_v1_set_start_addr,
+ .start = nvkm_falcon_v1_start,
+ .enable = nvkm_falcon_v1_enable,
+ .disable = nvkm_falcon_v1_disable,
+};
+
+static const struct nvkm_nvenc_func
+gm107_nvenc = {
+ .flcn = &gm107_nvenc_flcn,
+};
+
+static int
+gm107_nvenc_nofw(struct nvkm_nvenc *nvenc, int ver,
+ const struct nvkm_nvenc_fwif *fwif)
+{
+ return 0;
+}
+
+static const struct nvkm_nvenc_fwif
+gm107_nvenc_fwif[] = {
+ { -1, gm107_nvenc_nofw, &gm107_nvenc },
+ {}
+};
+
+int
+gm107_nvenc_new(struct nvkm_device *device, int index,
+ struct nvkm_nvenc **pnvenc)
+{
+ return nvkm_nvenc_new_(gm107_nvenc_fwif, device, index, pnvenc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h
new file mode 100644
index 000000000000..100fa5ebbeef
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_NVENC_PRIV_H__
+#define __NVKM_NVENC_PRIV_H__
+#include <engine/nvenc.h>
+
+struct nvkm_nvenc_func {
+ const struct nvkm_falcon_func *flcn;
+};
+
+struct nvkm_nvenc_fwif {
+ int version;
+ int (*load)(struct nvkm_nvenc *, int ver,
+ const struct nvkm_nvenc_fwif *);
+ const struct nvkm_nvenc_func *func;
+};
+
+int nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *, struct nvkm_device *,
+ int, struct nvkm_nvenc **pnvenc);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild
index 97c4696171f0..63cd2be3de08 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: MIT
nvkm-y += nvkm/engine/sec2/base.o
nvkm-y += nvkm/engine/sec2/gp102.o
+nvkm-y += nvkm/engine/sec2/gp108.o
nvkm-y += nvkm/engine/sec2/tu102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c
index 1b49e5b6717f..41318aa0d481 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c
@@ -21,97 +21,98 @@
*/
#include "priv.h"
-#include <core/msgqueue.h>
+#include <core/firmware.h>
#include <subdev/top.h>
-#include <engine/falcon.h>
-
-static void *
-nvkm_sec2_dtor(struct nvkm_engine *engine)
-{
- struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
- nvkm_msgqueue_del(&sec2->queue);
- nvkm_falcon_del(&sec2->falcon);
- return sec2;
-}
static void
-nvkm_sec2_intr(struct nvkm_engine *engine)
+nvkm_sec2_recv(struct work_struct *work)
{
- struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
- struct nvkm_subdev *subdev = &engine->subdev;
- struct nvkm_device *device = subdev->device;
- u32 disp = nvkm_rd32(device, sec2->addr + 0x01c);
- u32 intr = nvkm_rd32(device, sec2->addr + 0x008) & disp & ~(disp >> 16);
-
- if (intr & 0x00000040) {
- schedule_work(&sec2->work);
- nvkm_wr32(device, sec2->addr + 0x004, 0x00000040);
- intr &= ~0x00000040;
- }
+ struct nvkm_sec2 *sec2 = container_of(work, typeof(*sec2), work);
- if (intr) {
- nvkm_error(subdev, "unhandled intr %08x\n", intr);
- nvkm_wr32(device, sec2->addr + 0x004, intr);
+ if (!sec2->initmsg_received) {
+ int ret = sec2->func->initmsg(sec2);
+ if (ret) {
+ nvkm_error(&sec2->engine.subdev,
+ "error parsing init message: %d\n", ret);
+ return;
+ }
+ sec2->initmsg_received = true;
}
+
+ nvkm_falcon_msgq_recv(sec2->msgq);
}
static void
-nvkm_sec2_recv(struct work_struct *work)
+nvkm_sec2_intr(struct nvkm_engine *engine)
{
- struct nvkm_sec2 *sec2 = container_of(work, typeof(*sec2), work);
-
- if (!sec2->queue) {
- nvkm_warn(&sec2->engine.subdev,
- "recv function called while no firmware set!\n");
- return;
- }
-
- nvkm_msgqueue_recv(sec2->queue);
+ struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
+ sec2->func->intr(sec2);
}
-
static int
-nvkm_sec2_oneinit(struct nvkm_engine *engine)
+nvkm_sec2_fini(struct nvkm_engine *engine, bool suspend)
{
struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
- struct nvkm_subdev *subdev = &sec2->engine.subdev;
- if (!sec2->addr) {
- sec2->addr = nvkm_top_addr(subdev->device, subdev->index);
- if (WARN_ON(!sec2->addr))
- return -EINVAL;
+ flush_work(&sec2->work);
+
+ if (suspend) {
+ nvkm_falcon_cmdq_fini(sec2->cmdq);
+ sec2->initmsg_received = false;
}
- return nvkm_falcon_v1_new(subdev, "SEC2", sec2->addr, &sec2->falcon);
+ return 0;
}
-static int
-nvkm_sec2_fini(struct nvkm_engine *engine, bool suspend)
+static void *
+nvkm_sec2_dtor(struct nvkm_engine *engine)
{
struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
- flush_work(&sec2->work);
- return 0;
+ nvkm_falcon_msgq_del(&sec2->msgq);
+ nvkm_falcon_cmdq_del(&sec2->cmdq);
+ nvkm_falcon_qmgr_del(&sec2->qmgr);
+ nvkm_falcon_dtor(&sec2->falcon);
+ return sec2;
}
static const struct nvkm_engine_func
nvkm_sec2 = {
.dtor = nvkm_sec2_dtor,
- .oneinit = nvkm_sec2_oneinit,
.fini = nvkm_sec2_fini,
.intr = nvkm_sec2_intr,
};
int
-nvkm_sec2_new_(struct nvkm_device *device, int index, u32 addr,
- struct nvkm_sec2 **psec2)
+nvkm_sec2_new_(const struct nvkm_sec2_fwif *fwif, struct nvkm_device *device,
+ int index, u32 addr, struct nvkm_sec2 **psec2)
{
struct nvkm_sec2 *sec2;
+ int ret;
if (!(sec2 = *psec2 = kzalloc(sizeof(*sec2), GFP_KERNEL)))
return -ENOMEM;
- sec2->addr = addr;
- INIT_WORK(&sec2->work, nvkm_sec2_recv);
- return nvkm_engine_ctor(&nvkm_sec2, device, index, true, &sec2->engine);
+ ret = nvkm_engine_ctor(&nvkm_sec2, device, index, true, &sec2->engine);
+ if (ret)
+ return ret;
+
+ fwif = nvkm_firmware_load(&sec2->engine.subdev, fwif, "Sec2", sec2);
+ if (IS_ERR(fwif))
+ return PTR_ERR(fwif);
+
+ sec2->func = fwif->func;
+
+ ret = nvkm_falcon_ctor(sec2->func->flcn, &sec2->engine.subdev,
+ nvkm_subdev_name[index], addr, &sec2->falcon);
+ if (ret)
+ return ret;
+
+ if ((ret = nvkm_falcon_qmgr_new(&sec2->falcon, &sec2->qmgr)) ||
+ (ret = nvkm_falcon_cmdq_new(sec2->qmgr, "cmdq", &sec2->cmdq)) ||
+ (ret = nvkm_falcon_msgq_new(sec2->qmgr, "msgq", &sec2->msgq)))
+ return ret;
+
+ INIT_WORK(&sec2->work, nvkm_sec2_recv);
+ return 0;
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
index 858cf27fa010..368f2a0042ff 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
@@ -19,12 +19,316 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
-
#include "priv.h"
+#include <core/memory.h>
+#include <subdev/acr.h>
+#include <subdev/timer.h>
+
+#include <nvfw/flcn.h>
+#include <nvfw/sec2.h>
+
+static int
+gp102_sec2_acr_bootstrap_falcon_callback(void *priv, struct nv_falcon_msg *hdr)
+{
+ struct nv_sec2_acr_bootstrap_falcon_msg *msg =
+ container_of(hdr, typeof(*msg), msg.hdr);
+ struct nvkm_subdev *subdev = priv;
+ const char *name = nvkm_acr_lsf_id(msg->falcon_id);
+
+ if (msg->error_code) {
+ nvkm_error(subdev, "ACR_BOOTSTRAP_FALCON failed for "
+ "falcon %d [%s]: %08x\n",
+ msg->falcon_id, name, msg->error_code);
+ return -EINVAL;
+ }
+
+ nvkm_debug(subdev, "%s booted\n", name);
+ return 0;
+}
+
+static int
+gp102_sec2_acr_bootstrap_falcon(struct nvkm_falcon *falcon,
+ enum nvkm_acr_lsf_id id)
+{
+ struct nvkm_sec2 *sec2 = container_of(falcon, typeof(*sec2), falcon);
+ struct nv_sec2_acr_bootstrap_falcon_cmd cmd = {
+ .cmd.hdr.unit_id = sec2->func->unit_acr,
+ .cmd.hdr.size = sizeof(cmd),
+ .cmd.cmd_type = NV_SEC2_ACR_CMD_BOOTSTRAP_FALCON,
+ .flags = NV_SEC2_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES,
+ .falcon_id = id,
+ };
+
+ return nvkm_falcon_cmdq_send(sec2->cmdq, &cmd.cmd.hdr,
+ gp102_sec2_acr_bootstrap_falcon_callback,
+ &sec2->engine.subdev,
+ msecs_to_jiffies(1000));
+}
+
+static int
+gp102_sec2_acr_boot(struct nvkm_falcon *falcon)
+{
+ struct nv_sec2_args args = {};
+ nvkm_falcon_load_dmem(falcon, &args,
+ falcon->func->emem_addr, sizeof(args), 0);
+ nvkm_falcon_start(falcon);
+ return 0;
+}
+
+static void
+gp102_sec2_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust)
+{
+ struct loader_config_v1 hdr;
+ nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
+ hdr.code_dma_base = hdr.code_dma_base + adjust;
+ hdr.data_dma_base = hdr.data_dma_base + adjust;
+ hdr.overlay_dma_base = hdr.overlay_dma_base + adjust;
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+ loader_config_v1_dump(&acr->subdev, &hdr);
+}
+
+static void
+gp102_sec2_acr_bld_write(struct nvkm_acr *acr, u32 bld,
+ struct nvkm_acr_lsfw *lsfw)
+{
+ const struct loader_config_v1 hdr = {
+ .dma_idx = FALCON_SEC2_DMAIDX_UCODE,
+ .code_dma_base = lsfw->offset.img + lsfw->app_start_offset,
+ .code_size_total = lsfw->app_size,
+ .code_size_to_load = lsfw->app_resident_code_size,
+ .code_entry_point = lsfw->app_imem_entry,
+ .data_dma_base = lsfw->offset.img + lsfw->app_start_offset +
+ lsfw->app_resident_data_offset,
+ .data_size = lsfw->app_resident_data_size,
+ .overlay_dma_base = lsfw->offset.img + lsfw->app_start_offset,
+ .argc = 1,
+ .argv = lsfw->falcon->func->emem_addr,
+ };
+
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+}
+
+static const struct nvkm_acr_lsf_func
+gp102_sec2_acr_0 = {
+ .bld_size = sizeof(struct loader_config_v1),
+ .bld_write = gp102_sec2_acr_bld_write,
+ .bld_patch = gp102_sec2_acr_bld_patch,
+ .boot = gp102_sec2_acr_boot,
+ .bootstrap_falcon = gp102_sec2_acr_bootstrap_falcon,
+};
+
+int
+gp102_sec2_initmsg(struct nvkm_sec2 *sec2)
+{
+ struct nv_sec2_init_msg msg;
+ int ret, i;
+
+ ret = nvkm_falcon_msgq_recv_initmsg(sec2->msgq, &msg, sizeof(msg));
+ if (ret)
+ return ret;
+
+ if (msg.hdr.unit_id != NV_SEC2_UNIT_INIT ||
+ msg.msg_type != NV_SEC2_INIT_MSG_INIT)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(msg.queue_info); i++) {
+ if (msg.queue_info[i].id == NV_SEC2_INIT_MSG_QUEUE_ID_MSGQ) {
+ nvkm_falcon_msgq_init(sec2->msgq,
+ msg.queue_info[i].index,
+ msg.queue_info[i].offset,
+ msg.queue_info[i].size);
+ } else {
+ nvkm_falcon_cmdq_init(sec2->cmdq,
+ msg.queue_info[i].index,
+ msg.queue_info[i].offset,
+ msg.queue_info[i].size);
+ }
+ }
+
+ return 0;
+}
+
+void
+gp102_sec2_intr(struct nvkm_sec2 *sec2)
+{
+ struct nvkm_subdev *subdev = &sec2->engine.subdev;
+ struct nvkm_falcon *falcon = &sec2->falcon;
+ u32 disp = nvkm_falcon_rd32(falcon, 0x01c);
+ u32 intr = nvkm_falcon_rd32(falcon, 0x008) & disp & ~(disp >> 16);
+
+ if (intr & 0x00000040) {
+ schedule_work(&sec2->work);
+ nvkm_falcon_wr32(falcon, 0x004, 0x00000040);
+ intr &= ~0x00000040;
+ }
+
+ if (intr) {
+ nvkm_error(subdev, "unhandled intr %08x\n", intr);
+ nvkm_falcon_wr32(falcon, 0x004, intr);
+ }
+}
+
+int
+gp102_sec2_flcn_enable(struct nvkm_falcon *falcon)
+{
+ nvkm_falcon_mask(falcon, 0x3c0, 0x00000001, 0x00000001);
+ udelay(10);
+ nvkm_falcon_mask(falcon, 0x3c0, 0x00000001, 0x00000000);
+ return nvkm_falcon_v1_enable(falcon);
+}
+
+void
+gp102_sec2_flcn_bind_context(struct nvkm_falcon *falcon,
+ struct nvkm_memory *ctx)
+{
+ struct nvkm_device *device = falcon->owner->device;
+
+ nvkm_falcon_v1_bind_context(falcon, ctx);
+ if (!ctx)
+ return;
+
+ /* Not sure if this is a WAR for a HW issue, or some additional
+ * programming sequence that's needed to properly complete the
+ * context switch we trigger above.
+ *
+ * Fixes unreliability of booting the SEC2 RTOS on Quadro P620,
+ * particularly when resuming from suspend.
+ *
+ * Also removes the need for an odd workaround where we needed
+ * to program SEC2's FALCON_CPUCTL_ALIAS_STARTCPU twice before
+ * the SEC2 RTOS would begin executing.
+ */
+ nvkm_msec(device, 10,
+ u32 irqstat = nvkm_falcon_rd32(falcon, 0x008);
+ u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc);
+ if ((irqstat & 0x00000008) &&
+ (flcn0dc & 0x00007000) == 0x00005000)
+ break;
+ );
+
+ nvkm_falcon_mask(falcon, 0x004, 0x00000008, 0x00000008);
+ nvkm_falcon_mask(falcon, 0x058, 0x00000002, 0x00000002);
+
+ nvkm_msec(device, 10,
+ u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc);
+ if ((flcn0dc & 0x00007000) == 0x00000000)
+ break;
+ );
+}
+
+static const struct nvkm_falcon_func
+gp102_sec2_flcn = {
+ .debug = 0x408,
+ .fbif = 0x600,
+ .load_imem = nvkm_falcon_v1_load_imem,
+ .load_dmem = nvkm_falcon_v1_load_dmem,
+ .read_dmem = nvkm_falcon_v1_read_dmem,
+ .emem_addr = 0x01000000,
+ .bind_context = gp102_sec2_flcn_bind_context,
+ .wait_for_halt = nvkm_falcon_v1_wait_for_halt,
+ .clear_interrupt = nvkm_falcon_v1_clear_interrupt,
+ .set_start_addr = nvkm_falcon_v1_set_start_addr,
+ .start = nvkm_falcon_v1_start,
+ .enable = gp102_sec2_flcn_enable,
+ .disable = nvkm_falcon_v1_disable,
+ .cmdq = { 0xa00, 0xa04, 8 },
+ .msgq = { 0xa30, 0xa34, 8 },
+};
+
+const struct nvkm_sec2_func
+gp102_sec2 = {
+ .flcn = &gp102_sec2_flcn,
+ .unit_acr = NV_SEC2_UNIT_ACR,
+ .intr = gp102_sec2_intr,
+ .initmsg = gp102_sec2_initmsg,
+};
+
+MODULE_FIRMWARE("nvidia/gp102/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/gp102/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/gp102/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/sig.bin");
+
+static void
+gp102_sec2_acr_bld_patch_1(struct nvkm_acr *acr, u32 bld, s64 adjust)
+{
+ struct flcn_bl_dmem_desc_v2 hdr;
+ nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
+ hdr.code_dma_base = hdr.code_dma_base + adjust;
+ hdr.data_dma_base = hdr.data_dma_base + adjust;
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+ flcn_bl_dmem_desc_v2_dump(&acr->subdev, &hdr);
+}
+
+static void
+gp102_sec2_acr_bld_write_1(struct nvkm_acr *acr, u32 bld,
+ struct nvkm_acr_lsfw *lsfw)
+{
+ const struct flcn_bl_dmem_desc_v2 hdr = {
+ .ctx_dma = FALCON_SEC2_DMAIDX_UCODE,
+ .code_dma_base = lsfw->offset.img + lsfw->app_start_offset,
+ .non_sec_code_off = lsfw->app_resident_code_offset,
+ .non_sec_code_size = lsfw->app_resident_code_size,
+ .code_entry_point = lsfw->app_imem_entry,
+ .data_dma_base = lsfw->offset.img + lsfw->app_start_offset +
+ lsfw->app_resident_data_offset,
+ .data_size = lsfw->app_resident_data_size,
+ .argc = 1,
+ .argv = lsfw->falcon->func->emem_addr,
+ };
+
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+}
+
+const struct nvkm_acr_lsf_func
+gp102_sec2_acr_1 = {
+ .bld_size = sizeof(struct flcn_bl_dmem_desc_v2),
+ .bld_write = gp102_sec2_acr_bld_write_1,
+ .bld_patch = gp102_sec2_acr_bld_patch_1,
+ .boot = gp102_sec2_acr_boot,
+ .bootstrap_falcon = gp102_sec2_acr_bootstrap_falcon,
+};
+
+int
+gp102_sec2_load(struct nvkm_sec2 *sec2, int ver,
+ const struct nvkm_sec2_fwif *fwif)
+{
+ return nvkm_acr_lsfw_load_sig_image_desc_v1(&sec2->engine.subdev,
+ &sec2->falcon,
+ NVKM_ACR_LSF_SEC2, "sec2/",
+ ver, fwif->acr);
+}
+
+MODULE_FIRMWARE("nvidia/gp102/sec2/desc-1.bin");
+MODULE_FIRMWARE("nvidia/gp102/sec2/image-1.bin");
+MODULE_FIRMWARE("nvidia/gp102/sec2/sig-1.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/desc-1.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/image-1.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/sig-1.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/desc-1.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/image-1.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/sig-1.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/desc-1.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/image-1.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/sig-1.bin");
+
+static const struct nvkm_sec2_fwif
+gp102_sec2_fwif[] = {
+ { 1, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_1 },
+ { 0, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_0 },
+ {}
+};
+
int
-gp102_sec2_new(struct nvkm_device *device, int index,
- struct nvkm_sec2 **psec2)
+gp102_sec2_new(struct nvkm_device *device, int index, struct nvkm_sec2 **psec2)
{
- return nvkm_sec2_new_(device, index, 0, psec2);
+ return nvkm_sec2_new_(gp102_sec2_fwif, device, index, 0, psec2);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c
index 8bdfb3e5cd1c..232a9d7c51e5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ * Copyright 2019 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -14,23 +14,26 @@
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*/
+#include "priv.h"
+#include <subdev/acr.h>
-#ifndef __NVKM_SECBOOT_ACR_R367_H__
-#define __NVKM_SECBOOT_ACR_R367_H__
+MODULE_FIRMWARE("nvidia/gp108/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/gp108/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/gp108/sec2/sig.bin");
-#include "acr_r352.h"
+static const struct nvkm_sec2_fwif
+gp108_sec2_fwif[] = {
+ { 0, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_1 },
+ {}
+};
-void acr_r367_fixup_hs_desc(struct acr_r352 *, struct nvkm_secboot *, void *);
-
-struct ls_ucode_img *acr_r367_ls_ucode_img_load(const struct acr_r352 *,
- const struct nvkm_secboot *,
- enum nvkm_secboot_falcon);
-int acr_r367_ls_fill_headers(struct acr_r352 *, struct list_head *);
-int acr_r367_ls_write_wpr(struct acr_r352 *, struct list_head *,
- struct nvkm_gpuobj *, u64);
-#endif
+int
+gp108_sec2_new(struct nvkm_device *device, int index, struct nvkm_sec2 **psec2)
+{
+ return nvkm_sec2_new_(gp108_sec2_fwif, device, index, 0, psec2);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
index b331b00517e6..bb88117e018a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
@@ -3,7 +3,27 @@
#define __NVKM_SEC2_PRIV_H__
#include <engine/sec2.h>
-#define nvkm_sec2(p) container_of((p), struct nvkm_sec2, engine)
+struct nvkm_sec2_func {
+ const struct nvkm_falcon_func *flcn;
+ u8 unit_acr;
+ void (*intr)(struct nvkm_sec2 *);
+ int (*initmsg)(struct nvkm_sec2 *);
+};
-int nvkm_sec2_new_(struct nvkm_device *, int, u32 addr, struct nvkm_sec2 **);
+void gp102_sec2_intr(struct nvkm_sec2 *);
+int gp102_sec2_initmsg(struct nvkm_sec2 *);
+
+struct nvkm_sec2_fwif {
+ int version;
+ int (*load)(struct nvkm_sec2 *, int ver, const struct nvkm_sec2_fwif *);
+ const struct nvkm_sec2_func *func;
+ const struct nvkm_acr_lsf_func *acr;
+};
+
+int gp102_sec2_load(struct nvkm_sec2 *, int, const struct nvkm_sec2_fwif *);
+extern const struct nvkm_sec2_func gp102_sec2;
+extern const struct nvkm_acr_lsf_func gp102_sec2_acr_1;
+
+int nvkm_sec2_new_(const struct nvkm_sec2_fwif *, struct nvkm_device *,
+ int, u32 addr, struct nvkm_sec2 **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
index d655576164b1..b6ebd95c9ba1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
@@ -19,15 +19,54 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-
#include "priv.h"
+#include <subdev/acr.h>
+
+static const struct nvkm_falcon_func
+tu102_sec2_flcn = {
+ .debug = 0x408,
+ .fbif = 0x600,
+ .load_imem = nvkm_falcon_v1_load_imem,
+ .load_dmem = nvkm_falcon_v1_load_dmem,
+ .read_dmem = nvkm_falcon_v1_read_dmem,
+ .emem_addr = 0x01000000,
+ .bind_context = gp102_sec2_flcn_bind_context,
+ .wait_for_halt = nvkm_falcon_v1_wait_for_halt,
+ .clear_interrupt = nvkm_falcon_v1_clear_interrupt,
+ .set_start_addr = nvkm_falcon_v1_set_start_addr,
+ .start = nvkm_falcon_v1_start,
+ .enable = nvkm_falcon_v1_enable,
+ .disable = nvkm_falcon_v1_disable,
+ .cmdq = { 0xc00, 0xc04, 8 },
+ .msgq = { 0xc80, 0xc84, 8 },
+};
+
+static const struct nvkm_sec2_func
+tu102_sec2 = {
+ .flcn = &tu102_sec2_flcn,
+ .unit_acr = 0x07,
+ .intr = gp102_sec2_intr,
+ .initmsg = gp102_sec2_initmsg,
+};
+
+static int
+tu102_sec2_nofw(struct nvkm_sec2 *sec2, int ver,
+ const struct nvkm_sec2_fwif *fwif)
+{
+ return 0;
+}
+
+static const struct nvkm_sec2_fwif
+tu102_sec2_fwif[] = {
+ { 0, gp102_sec2_load, &tu102_sec2, &gp102_sec2_acr_1 },
+ { -1, tu102_sec2_nofw, &tu102_sec2 }
+};
int
-tu102_sec2_new(struct nvkm_device *device, int index,
- struct nvkm_sec2 **psec2)
+tu102_sec2_new(struct nvkm_device *device, int index, struct nvkm_sec2 **psec2)
{
/* TOP info wasn't updated on Turing to reflect the PRI
* address change for some reason. We override it here.
*/
- return nvkm_sec2_new_(device, index, 0x840000, psec2);
+ return nvkm_sec2_new_(tu102_sec2_fwif, device, index, 0x840000, psec2);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild
index b5665ada850a..d79d783904ee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: MIT
nvkm-y += nvkm/falcon/base.o
+nvkm-y += nvkm/falcon/cmdq.o
+nvkm-y += nvkm/falcon/msgq.o
+nvkm-y += nvkm/falcon/qmgr.o
nvkm-y += nvkm/falcon/v1.o
-nvkm-y += nvkm/falcon/msgqueue.o
-nvkm-y += nvkm/falcon/msgqueue_0137c63d.o
-nvkm-y += nvkm/falcon/msgqueue_0148cdec.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
index 366c87de6e72..c6a3448180d6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
@@ -22,6 +22,7 @@
#include "priv.h"
#include <subdev/mc.h>
+#include <subdev/top.h>
void
nvkm_falcon_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
@@ -134,6 +135,37 @@ nvkm_falcon_clear_interrupt(struct nvkm_falcon *falcon, u32 mask)
return falcon->func->clear_interrupt(falcon, mask);
}
+static int
+nvkm_falcon_oneinit(struct nvkm_falcon *falcon)
+{
+ const struct nvkm_falcon_func *func = falcon->func;
+ const struct nvkm_subdev *subdev = falcon->owner;
+ u32 reg;
+
+ if (!falcon->addr) {
+ falcon->addr = nvkm_top_addr(subdev->device, subdev->index);
+ if (WARN_ON(!falcon->addr))
+ return -ENODEV;
+ }
+
+ reg = nvkm_falcon_rd32(falcon, 0x12c);
+ falcon->version = reg & 0xf;
+ falcon->secret = (reg >> 4) & 0x3;
+ falcon->code.ports = (reg >> 8) & 0xf;
+ falcon->data.ports = (reg >> 12) & 0xf;
+
+ reg = nvkm_falcon_rd32(falcon, 0x108);
+ falcon->code.limit = (reg & 0x1ff) << 8;
+ falcon->data.limit = (reg & 0x3fe00) >> 1;
+
+ if (func->debug) {
+ u32 val = nvkm_falcon_rd32(falcon, func->debug);
+ falcon->debug = (val >> 20) & 0x1;
+ }
+
+ return 0;
+}
+
void
nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
{
@@ -151,6 +183,8 @@ nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
int
nvkm_falcon_get(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
{
+ int ret = 0;
+
mutex_lock(&falcon->mutex);
if (falcon->user) {
nvkm_error(user, "%s falcon already acquired by %s!\n",
@@ -160,70 +194,37 @@ nvkm_falcon_get(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
}
nvkm_debug(user, "acquired %s falcon\n", falcon->name);
+ if (!falcon->oneinit)
+ ret = nvkm_falcon_oneinit(falcon);
falcon->user = user;
mutex_unlock(&falcon->mutex);
- return 0;
+ return ret;
}
void
+nvkm_falcon_dtor(struct nvkm_falcon *falcon)
+{
+}
+
+int
nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
struct nvkm_subdev *subdev, const char *name, u32 addr,
struct nvkm_falcon *falcon)
{
- u32 debug_reg;
- u32 reg;
-
falcon->func = func;
falcon->owner = subdev;
falcon->name = name;
falcon->addr = addr;
mutex_init(&falcon->mutex);
mutex_init(&falcon->dmem_mutex);
-
- reg = nvkm_falcon_rd32(falcon, 0x12c);
- falcon->version = reg & 0xf;
- falcon->secret = (reg >> 4) & 0x3;
- falcon->code.ports = (reg >> 8) & 0xf;
- falcon->data.ports = (reg >> 12) & 0xf;
-
- reg = nvkm_falcon_rd32(falcon, 0x108);
- falcon->code.limit = (reg & 0x1ff) << 8;
- falcon->data.limit = (reg & 0x3fe00) >> 1;
-
- switch (subdev->index) {
- case NVKM_ENGINE_GR:
- debug_reg = 0x0;
- break;
- case NVKM_SUBDEV_PMU:
- debug_reg = 0xc08;
- break;
- case NVKM_ENGINE_NVDEC0:
- debug_reg = 0xd00;
- break;
- case NVKM_ENGINE_SEC2:
- debug_reg = 0x408;
- falcon->has_emem = true;
- break;
- case NVKM_SUBDEV_GSP:
- debug_reg = 0x0; /*XXX*/
- break;
- default:
- nvkm_warn(subdev, "unsupported falcon %s!\n",
- nvkm_subdev_name[subdev->index]);
- debug_reg = 0;
- break;
- }
-
- if (debug_reg) {
- u32 val = nvkm_falcon_rd32(falcon, debug_reg);
- falcon->debug = (val >> 20) & 0x1;
- }
+ return 0;
}
void
nvkm_falcon_del(struct nvkm_falcon **pfalcon)
{
if (*pfalcon) {
+ nvkm_falcon_dtor(*pfalcon);
kfree(*pfalcon);
*pfalcon = NULL;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c b/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c
new file mode 100644
index 000000000000..40e3f3fc83ef
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c
@@ -0,0 +1,214 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "qmgr.h"
+
+static bool
+nvkm_falcon_cmdq_has_room(struct nvkm_falcon_cmdq *cmdq, u32 size, bool *rewind)
+{
+ u32 head = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->head_reg);
+ u32 tail = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->tail_reg);
+ u32 free;
+
+ size = ALIGN(size, QUEUE_ALIGNMENT);
+
+ if (head >= tail) {
+ free = cmdq->offset + cmdq->size - head;
+ free -= HDR_SIZE;
+
+ if (size > free) {
+ *rewind = true;
+ head = cmdq->offset;
+ }
+ }
+
+ if (head < tail)
+ free = tail - head - 1;
+
+ return size <= free;
+}
+
+static void
+nvkm_falcon_cmdq_push(struct nvkm_falcon_cmdq *cmdq, void *data, u32 size)
+{
+ struct nvkm_falcon *falcon = cmdq->qmgr->falcon;
+ nvkm_falcon_load_dmem(falcon, data, cmdq->position, size, 0);
+ cmdq->position += ALIGN(size, QUEUE_ALIGNMENT);
+}
+
+static void
+nvkm_falcon_cmdq_rewind(struct nvkm_falcon_cmdq *cmdq)
+{
+ struct nv_falcon_cmd cmd;
+
+ cmd.unit_id = NV_FALCON_CMD_UNIT_ID_REWIND;
+ cmd.size = sizeof(cmd);
+ nvkm_falcon_cmdq_push(cmdq, &cmd, cmd.size);
+
+ cmdq->position = cmdq->offset;
+}
+
+static int
+nvkm_falcon_cmdq_open(struct nvkm_falcon_cmdq *cmdq, u32 size)
+{
+ struct nvkm_falcon *falcon = cmdq->qmgr->falcon;
+ bool rewind = false;
+
+ mutex_lock(&cmdq->mutex);
+
+ if (!nvkm_falcon_cmdq_has_room(cmdq, size, &rewind)) {
+ FLCNQ_DBG(cmdq, "queue full");
+ mutex_unlock(&cmdq->mutex);
+ return -EAGAIN;
+ }
+
+ cmdq->position = nvkm_falcon_rd32(falcon, cmdq->head_reg);
+
+ if (rewind)
+ nvkm_falcon_cmdq_rewind(cmdq);
+
+ return 0;
+}
+
+static void
+nvkm_falcon_cmdq_close(struct nvkm_falcon_cmdq *cmdq)
+{
+ nvkm_falcon_wr32(cmdq->qmgr->falcon, cmdq->head_reg, cmdq->position);
+ mutex_unlock(&cmdq->mutex);
+}
+
+static int
+nvkm_falcon_cmdq_write(struct nvkm_falcon_cmdq *cmdq, struct nv_falcon_cmd *cmd)
+{
+ static unsigned timeout = 2000;
+ unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout);
+ int ret = -EAGAIN;
+
+ while (ret == -EAGAIN && time_before(jiffies, end_jiffies))
+ ret = nvkm_falcon_cmdq_open(cmdq, cmd->size);
+ if (ret) {
+ FLCNQ_ERR(cmdq, "timeout waiting for queue space");
+ return ret;
+ }
+
+ nvkm_falcon_cmdq_push(cmdq, cmd, cmd->size);
+ nvkm_falcon_cmdq_close(cmdq);
+ return ret;
+}
+
+/* specifies that we want to know the command status in the answer message */
+#define CMD_FLAGS_STATUS BIT(0)
+/* specifies that we want an interrupt when the answer message is queued */
+#define CMD_FLAGS_INTR BIT(1)
+
+int
+nvkm_falcon_cmdq_send(struct nvkm_falcon_cmdq *cmdq, struct nv_falcon_cmd *cmd,
+ nvkm_falcon_qmgr_callback cb, void *priv,
+ unsigned long timeout)
+{
+ struct nvkm_falcon_qmgr_seq *seq;
+ int ret;
+
+ if (!wait_for_completion_timeout(&cmdq->ready,
+ msecs_to_jiffies(1000))) {
+ FLCNQ_ERR(cmdq, "timeout waiting for queue ready");
+ return -ETIMEDOUT;
+ }
+
+ seq = nvkm_falcon_qmgr_seq_acquire(cmdq->qmgr);
+ if (IS_ERR(seq))
+ return PTR_ERR(seq);
+
+ cmd->seq_id = seq->id;
+ cmd->ctrl_flags = CMD_FLAGS_STATUS | CMD_FLAGS_INTR;
+
+ seq->state = SEQ_STATE_USED;
+ seq->async = !timeout;
+ seq->callback = cb;
+ seq->priv = priv;
+
+ ret = nvkm_falcon_cmdq_write(cmdq, cmd);
+ if (ret) {
+ seq->state = SEQ_STATE_PENDING;
+ nvkm_falcon_qmgr_seq_release(cmdq->qmgr, seq);
+ return ret;
+ }
+
+ if (!seq->async) {
+ if (!wait_for_completion_timeout(&seq->done, timeout)) {
+ FLCNQ_ERR(cmdq, "timeout waiting for reply");
+ return -ETIMEDOUT;
+ }
+ ret = seq->result;
+ nvkm_falcon_qmgr_seq_release(cmdq->qmgr, seq);
+ }
+
+ return ret;
+}
+
+void
+nvkm_falcon_cmdq_fini(struct nvkm_falcon_cmdq *cmdq)
+{
+ reinit_completion(&cmdq->ready);
+}
+
+void
+nvkm_falcon_cmdq_init(struct nvkm_falcon_cmdq *cmdq,
+ u32 index, u32 offset, u32 size)
+{
+ const struct nvkm_falcon_func *func = cmdq->qmgr->falcon->func;
+
+ cmdq->head_reg = func->cmdq.head + index * func->cmdq.stride;
+ cmdq->tail_reg = func->cmdq.tail + index * func->cmdq.stride;
+ cmdq->offset = offset;
+ cmdq->size = size;
+ complete_all(&cmdq->ready);
+
+ FLCNQ_DBG(cmdq, "initialised @ index %d offset 0x%08x size 0x%08x",
+ index, cmdq->offset, cmdq->size);
+}
+
+void
+nvkm_falcon_cmdq_del(struct nvkm_falcon_cmdq **pcmdq)
+{
+ struct nvkm_falcon_cmdq *cmdq = *pcmdq;
+ if (cmdq) {
+ kfree(*pcmdq);
+ *pcmdq = NULL;
+ }
+}
+
+int
+nvkm_falcon_cmdq_new(struct nvkm_falcon_qmgr *qmgr, const char *name,
+ struct nvkm_falcon_cmdq **pcmdq)
+{
+ struct nvkm_falcon_cmdq *cmdq = *pcmdq;
+
+ if (!(cmdq = *pcmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL)))
+ return -ENOMEM;
+
+ cmdq->qmgr = qmgr;
+ cmdq->name = name;
+ mutex_init(&cmdq->mutex);
+ init_completion(&cmdq->ready);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c
new file mode 100644
index 000000000000..cbfe09a561a1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "qmgr.h"
+
+static void
+nvkm_falcon_msgq_open(struct nvkm_falcon_msgq *msgq)
+{
+ mutex_lock(&msgq->mutex);
+ msgq->position = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->tail_reg);
+}
+
+static void
+nvkm_falcon_msgq_close(struct nvkm_falcon_msgq *msgq, bool commit)
+{
+ struct nvkm_falcon *falcon = msgq->qmgr->falcon;
+
+ if (commit)
+ nvkm_falcon_wr32(falcon, msgq->tail_reg, msgq->position);
+
+ mutex_unlock(&msgq->mutex);
+}
+
+static bool
+nvkm_falcon_msgq_empty(struct nvkm_falcon_msgq *msgq)
+{
+ u32 head = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->head_reg);
+ u32 tail = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->tail_reg);
+ return head == tail;
+}
+
+static int
+nvkm_falcon_msgq_pop(struct nvkm_falcon_msgq *msgq, void *data, u32 size)
+{
+ struct nvkm_falcon *falcon = msgq->qmgr->falcon;
+ u32 head, tail, available;
+
+ head = nvkm_falcon_rd32(falcon, msgq->head_reg);
+ /* has the buffer looped? */
+ if (head < msgq->position)
+ msgq->position = msgq->offset;
+
+ tail = msgq->position;
+
+ available = head - tail;
+ if (size > available) {
+ FLCNQ_ERR(msgq, "requested %d bytes, but only %d available",
+ size, available);
+ return -EINVAL;
+ }
+
+ nvkm_falcon_read_dmem(falcon, tail, size, 0, data);
+ msgq->position += ALIGN(size, QUEUE_ALIGNMENT);
+ return 0;
+}
+
+static int
+nvkm_falcon_msgq_read(struct nvkm_falcon_msgq *msgq, struct nv_falcon_msg *hdr)
+{
+ int ret = 0;
+
+ nvkm_falcon_msgq_open(msgq);
+
+ if (nvkm_falcon_msgq_empty(msgq))
+ goto close;
+
+ ret = nvkm_falcon_msgq_pop(msgq, hdr, HDR_SIZE);
+ if (ret) {
+ FLCNQ_ERR(msgq, "failed to read message header");
+ goto close;
+ }
+
+ if (hdr->size > MSG_BUF_SIZE) {
+ FLCNQ_ERR(msgq, "message too big, %d bytes", hdr->size);
+ ret = -ENOSPC;
+ goto close;
+ }
+
+ if (hdr->size > HDR_SIZE) {
+ u32 read_size = hdr->size - HDR_SIZE;
+
+ ret = nvkm_falcon_msgq_pop(msgq, (hdr + 1), read_size);
+ if (ret) {
+ FLCNQ_ERR(msgq, "failed to read message data");
+ goto close;
+ }
+ }
+
+ ret = 1;
+close:
+ nvkm_falcon_msgq_close(msgq, (ret >= 0));
+ return ret;
+}
+
+static int
+nvkm_falcon_msgq_exec(struct nvkm_falcon_msgq *msgq, struct nv_falcon_msg *hdr)
+{
+ struct nvkm_falcon_qmgr_seq *seq;
+
+ seq = &msgq->qmgr->seq.id[hdr->seq_id];
+ if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) {
+ FLCNQ_ERR(msgq, "message for unknown sequence %08x", seq->id);
+ return -EINVAL;
+ }
+
+ if (seq->state == SEQ_STATE_USED) {
+ if (seq->callback)
+ seq->result = seq->callback(seq->priv, hdr);
+ }
+
+ if (seq->async) {
+ nvkm_falcon_qmgr_seq_release(msgq->qmgr, seq);
+ return 0;
+ }
+
+ complete_all(&seq->done);
+ return 0;
+}
+
+void
+nvkm_falcon_msgq_recv(struct nvkm_falcon_msgq *msgq)
+{
+ /*
+ * We are invoked from a worker thread, so normally we have plenty of
+ * stack space to work with.
+ */
+ u8 msg_buffer[MSG_BUF_SIZE];
+ struct nv_falcon_msg *hdr = (void *)msg_buffer;
+
+ while (nvkm_falcon_msgq_read(msgq, hdr) > 0)
+ nvkm_falcon_msgq_exec(msgq, hdr);
+}
+
+int
+nvkm_falcon_msgq_recv_initmsg(struct nvkm_falcon_msgq *msgq,
+ void *data, u32 size)
+{
+ struct nvkm_falcon *falcon = msgq->qmgr->falcon;
+ struct nv_falcon_msg *hdr = data;
+ int ret;
+
+ msgq->head_reg = falcon->func->msgq.head;
+ msgq->tail_reg = falcon->func->msgq.tail;
+ msgq->offset = nvkm_falcon_rd32(falcon, falcon->func->msgq.tail);
+
+ nvkm_falcon_msgq_open(msgq);
+ ret = nvkm_falcon_msgq_pop(msgq, data, size);
+ if (ret == 0 && hdr->size != size) {
+ FLCN_ERR(falcon, "unexpected init message size %d vs %d",
+ hdr->size, size);
+ ret = -EINVAL;
+ }
+ nvkm_falcon_msgq_close(msgq, ret == 0);
+ return ret;
+}
+
+void
+nvkm_falcon_msgq_init(struct nvkm_falcon_msgq *msgq,
+ u32 index, u32 offset, u32 size)
+{
+ const struct nvkm_falcon_func *func = msgq->qmgr->falcon->func;
+
+ msgq->head_reg = func->msgq.head + index * func->msgq.stride;
+ msgq->tail_reg = func->msgq.tail + index * func->msgq.stride;
+ msgq->offset = offset;
+
+ FLCNQ_DBG(msgq, "initialised @ index %d offset 0x%08x size 0x%08x",
+ index, msgq->offset, size);
+}
+
+void
+nvkm_falcon_msgq_del(struct nvkm_falcon_msgq **pmsgq)
+{
+ struct nvkm_falcon_msgq *msgq = *pmsgq;
+ if (msgq) {
+ kfree(*pmsgq);
+ *pmsgq = NULL;
+ }
+}
+
+int
+nvkm_falcon_msgq_new(struct nvkm_falcon_qmgr *qmgr, const char *name,
+ struct nvkm_falcon_msgq **pmsgq)
+{
+ struct nvkm_falcon_msgq *msgq = *pmsgq;
+
+ if (!(msgq = *pmsgq = kzalloc(sizeof(*msgq), GFP_KERNEL)))
+ return -ENOMEM;
+
+ msgq->qmgr = qmgr;
+ msgq->name = name;
+ mutex_init(&msgq->mutex);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
deleted file mode 100644
index a8bee1e046aa..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
+++ /dev/null
@@ -1,577 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "msgqueue.h"
-#include <engine/falcon.h>
-
-#include <subdev/secboot.h>
-
-
-#define HDR_SIZE sizeof(struct nvkm_msgqueue_hdr)
-#define QUEUE_ALIGNMENT 4
-/* max size of the messages we can receive */
-#define MSG_BUF_SIZE 128
-
-static int
-msg_queue_open(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue)
-{
- struct nvkm_falcon *falcon = priv->falcon;
-
- mutex_lock(&queue->mutex);
-
- queue->position = nvkm_falcon_rd32(falcon, queue->tail_reg);
-
- return 0;
-}
-
-static void
-msg_queue_close(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
- bool commit)
-{
- struct nvkm_falcon *falcon = priv->falcon;
-
- if (commit)
- nvkm_falcon_wr32(falcon, queue->tail_reg, queue->position);
-
- mutex_unlock(&queue->mutex);
-}
-
-static bool
-msg_queue_empty(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue)
-{
- struct nvkm_falcon *falcon = priv->falcon;
- u32 head, tail;
-
- head = nvkm_falcon_rd32(falcon, queue->head_reg);
- tail = nvkm_falcon_rd32(falcon, queue->tail_reg);
-
- return head == tail;
-}
-
-static int
-msg_queue_pop(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
- void *data, u32 size)
-{
- struct nvkm_falcon *falcon = priv->falcon;
- const struct nvkm_subdev *subdev = priv->falcon->owner;
- u32 head, tail, available;
-
- head = nvkm_falcon_rd32(falcon, queue->head_reg);
- /* has the buffer looped? */
- if (head < queue->position)
- queue->position = queue->offset;
-
- tail = queue->position;
-
- available = head - tail;
-
- if (available == 0) {
- nvkm_warn(subdev, "no message data available\n");
- return 0;
- }
-
- if (size > available) {
- nvkm_warn(subdev, "message data smaller than read request\n");
- size = available;
- }
-
- nvkm_falcon_read_dmem(priv->falcon, tail, size, 0, data);
- queue->position += ALIGN(size, QUEUE_ALIGNMENT);
-
- return size;
-}
-
-static int
-msg_queue_read(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
- struct nvkm_msgqueue_hdr *hdr)
-{
- const struct nvkm_subdev *subdev = priv->falcon->owner;
- int err;
-
- err = msg_queue_open(priv, queue);
- if (err) {
- nvkm_error(subdev, "fail to open queue %d\n", queue->index);
- return err;
- }
-
- if (msg_queue_empty(priv, queue)) {
- err = 0;
- goto close;
- }
-
- err = msg_queue_pop(priv, queue, hdr, HDR_SIZE);
- if (err >= 0 && err != HDR_SIZE)
- err = -EINVAL;
- if (err < 0) {
- nvkm_error(subdev, "failed to read message header: %d\n", err);
- goto close;
- }
-
- if (hdr->size > MSG_BUF_SIZE) {
- nvkm_error(subdev, "message too big (%d bytes)\n", hdr->size);
- err = -ENOSPC;
- goto close;
- }
-
- if (hdr->size > HDR_SIZE) {
- u32 read_size = hdr->size - HDR_SIZE;
-
- err = msg_queue_pop(priv, queue, (hdr + 1), read_size);
- if (err >= 0 && err != read_size)
- err = -EINVAL;
- if (err < 0) {
- nvkm_error(subdev, "failed to read message: %d\n", err);
- goto close;
- }
- }
-
-close:
- msg_queue_close(priv, queue, (err >= 0));
-
- return err;
-}
-
-static bool
-cmd_queue_has_room(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
- u32 size, bool *rewind)
-{
- struct nvkm_falcon *falcon = priv->falcon;
- u32 head, tail, free;
-
- size = ALIGN(size, QUEUE_ALIGNMENT);
-
- head = nvkm_falcon_rd32(falcon, queue->head_reg);
- tail = nvkm_falcon_rd32(falcon, queue->tail_reg);
-
- if (head >= tail) {
- free = queue->offset + queue->size - head;
- free -= HDR_SIZE;
-
- if (size > free) {
- *rewind = true;
- head = queue->offset;
- }
- }
-
- if (head < tail)
- free = tail - head - 1;
-
- return size <= free;
-}
-
-static int
-cmd_queue_push(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
- void *data, u32 size)
-{
- nvkm_falcon_load_dmem(priv->falcon, data, queue->position, size, 0);
- queue->position += ALIGN(size, QUEUE_ALIGNMENT);
-
- return 0;
-}
-
-/* REWIND unit is always 0x00 */
-#define MSGQUEUE_UNIT_REWIND 0x00
-
-static void
-cmd_queue_rewind(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue)
-{
- const struct nvkm_subdev *subdev = priv->falcon->owner;
- struct nvkm_msgqueue_hdr cmd;
- int err;
-
- cmd.unit_id = MSGQUEUE_UNIT_REWIND;
- cmd.size = sizeof(cmd);
- err = cmd_queue_push(priv, queue, &cmd, cmd.size);
- if (err)
- nvkm_error(subdev, "queue %d rewind failed\n", queue->index);
- else
- nvkm_error(subdev, "queue %d rewinded\n", queue->index);
-
- queue->position = queue->offset;
-}
-
-static int
-cmd_queue_open(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
- u32 size)
-{
- struct nvkm_falcon *falcon = priv->falcon;
- const struct nvkm_subdev *subdev = priv->falcon->owner;
- bool rewind = false;
-
- mutex_lock(&queue->mutex);
-
- if (!cmd_queue_has_room(priv, queue, size, &rewind)) {
- nvkm_error(subdev, "queue full\n");
- mutex_unlock(&queue->mutex);
- return -EAGAIN;
- }
-
- queue->position = nvkm_falcon_rd32(falcon, queue->head_reg);
-
- if (rewind)
- cmd_queue_rewind(priv, queue);
-
- return 0;
-}
-
-static void
-cmd_queue_close(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
- bool commit)
-{
- struct nvkm_falcon *falcon = priv->falcon;
-
- if (commit)
- nvkm_falcon_wr32(falcon, queue->head_reg, queue->position);
-
- mutex_unlock(&queue->mutex);
-}
-
-static int
-cmd_write(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *cmd,
- struct nvkm_msgqueue_queue *queue)
-{
- const struct nvkm_subdev *subdev = priv->falcon->owner;
- static unsigned timeout = 2000;
- unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout);
- int ret = -EAGAIN;
- bool commit = true;
-
- while (ret == -EAGAIN && time_before(jiffies, end_jiffies))
- ret = cmd_queue_open(priv, queue, cmd->size);
- if (ret) {
- nvkm_error(subdev, "pmu_queue_open_write failed\n");
- return ret;
- }
-
- ret = cmd_queue_push(priv, queue, cmd, cmd->size);
- if (ret) {
- nvkm_error(subdev, "pmu_queue_push failed\n");
- commit = false;
- }
-
- cmd_queue_close(priv, queue, commit);
-
- return ret;
-}
-
-static struct nvkm_msgqueue_seq *
-msgqueue_seq_acquire(struct nvkm_msgqueue *priv)
-{
- const struct nvkm_subdev *subdev = priv->falcon->owner;
- struct nvkm_msgqueue_seq *seq;
- u32 index;
-
- mutex_lock(&priv->seq_lock);
-
- index = find_first_zero_bit(priv->seq_tbl, NVKM_MSGQUEUE_NUM_SEQUENCES);
-
- if (index >= NVKM_MSGQUEUE_NUM_SEQUENCES) {
- nvkm_error(subdev, "no free sequence available\n");
- mutex_unlock(&priv->seq_lock);
- return ERR_PTR(-EAGAIN);
- }
-
- set_bit(index, priv->seq_tbl);
-
- mutex_unlock(&priv->seq_lock);
-
- seq = &priv->seq[index];
- seq->state = SEQ_STATE_PENDING;
-
- return seq;
-}
-
-static void
-msgqueue_seq_release(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_seq *seq)
-{
- /* no need to acquire seq_lock since clear_bit is atomic */
- seq->state = SEQ_STATE_FREE;
- seq->callback = NULL;
- seq->completion = NULL;
- clear_bit(seq->id, priv->seq_tbl);
-}
-
-/* specifies that we want to know the command status in the answer message */
-#define CMD_FLAGS_STATUS BIT(0)
-/* specifies that we want an interrupt when the answer message is queued */
-#define CMD_FLAGS_INTR BIT(1)
-
-int
-nvkm_msgqueue_post(struct nvkm_msgqueue *priv, enum msgqueue_msg_priority prio,
- struct nvkm_msgqueue_hdr *cmd, nvkm_msgqueue_callback cb,
- struct completion *completion, bool wait_init)
-{
- struct nvkm_msgqueue_seq *seq;
- struct nvkm_msgqueue_queue *queue;
- int ret;
-
- if (wait_init && !wait_for_completion_timeout(&priv->init_done,
- msecs_to_jiffies(1000)))
- return -ETIMEDOUT;
-
- queue = priv->func->cmd_queue(priv, prio);
- if (IS_ERR(queue))
- return PTR_ERR(queue);
-
- seq = msgqueue_seq_acquire(priv);
- if (IS_ERR(seq))
- return PTR_ERR(seq);
-
- cmd->seq_id = seq->id;
- cmd->ctrl_flags = CMD_FLAGS_STATUS | CMD_FLAGS_INTR;
-
- seq->callback = cb;
- seq->state = SEQ_STATE_USED;
- seq->completion = completion;
-
- ret = cmd_write(priv, cmd, queue);
- if (ret) {
- seq->state = SEQ_STATE_PENDING;
- msgqueue_seq_release(priv, seq);
- }
-
- return ret;
-}
-
-static int
-msgqueue_msg_handle(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *hdr)
-{
- const struct nvkm_subdev *subdev = priv->falcon->owner;
- struct nvkm_msgqueue_seq *seq;
-
- seq = &priv->seq[hdr->seq_id];
- if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) {
- nvkm_error(subdev, "msg for unknown sequence %d", seq->id);
- return -EINVAL;
- }
-
- if (seq->state == SEQ_STATE_USED) {
- if (seq->callback)
- seq->callback(priv, hdr);
- }
-
- if (seq->completion)
- complete(seq->completion);
-
- msgqueue_seq_release(priv, seq);
-
- return 0;
-}
-
-static int
-msgqueue_handle_init_msg(struct nvkm_msgqueue *priv,
- struct nvkm_msgqueue_hdr *hdr)
-{
- struct nvkm_falcon *falcon = priv->falcon;
- const struct nvkm_subdev *subdev = falcon->owner;
- u32 tail;
- u32 tail_reg;
- int ret;
-
- /*
- * Of course the message queue registers vary depending on the falcon
- * used...
- */
- switch (falcon->owner->index) {
- case NVKM_SUBDEV_PMU:
- tail_reg = 0x4cc;
- break;
- case NVKM_ENGINE_SEC2:
- tail_reg = 0xa34;
- break;
- default:
- nvkm_error(subdev, "falcon %s unsupported for msgqueue!\n",
- nvkm_subdev_name[falcon->owner->index]);
- return -EINVAL;
- }
-
- /*
- * Read the message - queues are not initialized yet so we cannot rely
- * on msg_queue_read()
- */
- tail = nvkm_falcon_rd32(falcon, tail_reg);
- nvkm_falcon_read_dmem(falcon, tail, HDR_SIZE, 0, hdr);
-
- if (hdr->size > MSG_BUF_SIZE) {
- nvkm_error(subdev, "message too big (%d bytes)\n", hdr->size);
- return -ENOSPC;
- }
-
- nvkm_falcon_read_dmem(falcon, tail + HDR_SIZE, hdr->size - HDR_SIZE, 0,
- (hdr + 1));
-
- tail += ALIGN(hdr->size, QUEUE_ALIGNMENT);
- nvkm_falcon_wr32(falcon, tail_reg, tail);
-
- ret = priv->func->init_func->init_callback(priv, hdr);
- if (ret)
- return ret;
-
- return 0;
-}
-
-void
-nvkm_msgqueue_process_msgs(struct nvkm_msgqueue *priv,
- struct nvkm_msgqueue_queue *queue)
-{
- /*
- * We are invoked from a worker thread, so normally we have plenty of
- * stack space to work with.
- */
- u8 msg_buffer[MSG_BUF_SIZE];
- struct nvkm_msgqueue_hdr *hdr = (void *)msg_buffer;
- int ret;
-
- /* the first message we receive must be the init message */
- if ((!priv->init_msg_received)) {
- ret = msgqueue_handle_init_msg(priv, hdr);
- if (!ret)
- priv->init_msg_received = true;
- } else {
- while (msg_queue_read(priv, queue, hdr) > 0)
- msgqueue_msg_handle(priv, hdr);
- }
-}
-
-void
-nvkm_msgqueue_write_cmdline(struct nvkm_msgqueue *queue, void *buf)
-{
- if (!queue || !queue->func || !queue->func->init_func)
- return;
-
- queue->func->init_func->gen_cmdline(queue, buf);
-}
-
-int
-nvkm_msgqueue_acr_boot_falcons(struct nvkm_msgqueue *queue,
- unsigned long falcon_mask)
-{
- unsigned long falcon;
-
- if (!queue || !queue->func->acr_func)
- return -ENODEV;
-
- /* Does the firmware support booting multiple falcons? */
- if (queue->func->acr_func->boot_multiple_falcons)
- return queue->func->acr_func->boot_multiple_falcons(queue,
- falcon_mask);
-
- /* Else boot all requested falcons individually */
- if (!queue->func->acr_func->boot_falcon)
- return -ENODEV;
-
- for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) {
- int ret = queue->func->acr_func->boot_falcon(queue, falcon);
-
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-int
-nvkm_msgqueue_new(u32 version, struct nvkm_falcon *falcon,
- const struct nvkm_secboot *sb, struct nvkm_msgqueue **queue)
-{
- const struct nvkm_subdev *subdev = falcon->owner;
- int ret = -EINVAL;
-
- switch (version) {
- case 0x0137c63d:
- ret = msgqueue_0137c63d_new(falcon, sb, queue);
- break;
- case 0x0137bca5:
- ret = msgqueue_0137bca5_new(falcon, sb, queue);
- break;
- case 0x0148cdec:
- case 0x015ccf3e:
- case 0x0167d263:
- ret = msgqueue_0148cdec_new(falcon, sb, queue);
- break;
- default:
- nvkm_error(subdev, "unhandled firmware version 0x%08x\n",
- version);
- break;
- }
-
- if (ret == 0) {
- nvkm_debug(subdev, "firmware version: 0x%08x\n", version);
- (*queue)->fw_version = version;
- }
-
- return ret;
-}
-
-void
-nvkm_msgqueue_del(struct nvkm_msgqueue **queue)
-{
- if (*queue) {
- (*queue)->func->dtor(*queue);
- *queue = NULL;
- }
-}
-
-void
-nvkm_msgqueue_recv(struct nvkm_msgqueue *queue)
-{
- if (!queue->func || !queue->func->recv) {
- const struct nvkm_subdev *subdev = queue->falcon->owner;
-
- nvkm_warn(subdev, "missing msgqueue recv function\n");
- return;
- }
-
- queue->func->recv(queue);
-}
-
-int
-nvkm_msgqueue_reinit(struct nvkm_msgqueue *queue)
-{
- /* firmware not set yet... */
- if (!queue)
- return 0;
-
- queue->init_msg_received = false;
- reinit_completion(&queue->init_done);
-
- return 0;
-}
-
-void
-nvkm_msgqueue_ctor(const struct nvkm_msgqueue_func *func,
- struct nvkm_falcon *falcon,
- struct nvkm_msgqueue *queue)
-{
- int i;
-
- queue->func = func;
- queue->falcon = falcon;
- mutex_init(&queue->seq_lock);
- for (i = 0; i < NVKM_MSGQUEUE_NUM_SEQUENCES; i++)
- queue->seq[i].id = i;
-
- init_completion(&queue->init_done);
-
-
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h
deleted file mode 100644
index 13b54f8d8e04..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __NVKM_CORE_FALCON_MSGQUEUE_H
-#define __NVKM_CORE_FALCON_MSGQUEUE_H
-
-#include <core/msgqueue.h>
-
-/*
- * The struct nvkm_msgqueue (named so for lack of better candidate) manages
- * a firmware (typically, NVIDIA signed firmware) running under a given falcon.
- *
- * Such firmwares expect to receive commands (through one or several command
- * queues) and will reply to such command by sending messages (using one
- * message queue).
- *
- * Each firmware can support one or several units - ACR for managing secure
- * falcons, PMU for power management, etc. A unit can be seen as a class to
- * which command can be sent.
- *
- * One usage example would be to send a command to the SEC falcon to ask it to
- * reset a secure falcon. The SEC falcon will receive the command, process it,
- * and send a message to signal success or failure. Only when the corresponding
- * message is received can the requester assume the request has been processed.
- *
- * Since we expect many variations between the firmwares NVIDIA will release
- * across GPU generations, this library is built in a very modular way. Message
- * formats and queues details (such as number of usage) are left to
- * specializations of struct nvkm_msgqueue, while the functions in msgqueue.c
- * take care of posting commands and processing messages in a fashion that is
- * universal.
- *
- */
-
-enum msgqueue_msg_priority {
- MSGQUEUE_MSG_PRIORITY_HIGH,
- MSGQUEUE_MSG_PRIORITY_LOW,
-};
-
-/**
- * struct nvkm_msgqueue_hdr - header for all commands/messages
- * @unit_id: id of firmware using receiving the command/sending the message
- * @size: total size of command/message
- * @ctrl_flags: type of command/message
- * @seq_id: used to match a message from its corresponding command
- */
-struct nvkm_msgqueue_hdr {
- u8 unit_id;
- u8 size;
- u8 ctrl_flags;
- u8 seq_id;
-};
-
-/**
- * struct nvkm_msgqueue_msg - base message.
- *
- * This is just a header and a message (or command) type. Useful when
- * building command-specific structures.
- */
-struct nvkm_msgqueue_msg {
- struct nvkm_msgqueue_hdr hdr;
- u8 msg_type;
-};
-
-struct nvkm_msgqueue;
-typedef void
-(*nvkm_msgqueue_callback)(struct nvkm_msgqueue *, struct nvkm_msgqueue_hdr *);
-
-/**
- * struct nvkm_msgqueue_init_func - msgqueue functions related to initialization
- *
- * @gen_cmdline: build the commandline into a pre-allocated buffer
- * @init_callback: called to process the init message
- */
-struct nvkm_msgqueue_init_func {
- void (*gen_cmdline)(struct nvkm_msgqueue *, void *);
- int (*init_callback)(struct nvkm_msgqueue *, struct nvkm_msgqueue_hdr *);
-};
-
-/**
- * struct nvkm_msgqueue_acr_func - msgqueue functions related to ACR
- *
- * @boot_falcon: build and send the command to reset a given falcon
- * @boot_multiple_falcons: build and send the command to reset several falcons
- */
-struct nvkm_msgqueue_acr_func {
- int (*boot_falcon)(struct nvkm_msgqueue *, enum nvkm_secboot_falcon);
- int (*boot_multiple_falcons)(struct nvkm_msgqueue *, unsigned long);
-};
-
-struct nvkm_msgqueue_func {
- const struct nvkm_msgqueue_init_func *init_func;
- const struct nvkm_msgqueue_acr_func *acr_func;
- void (*dtor)(struct nvkm_msgqueue *);
- struct nvkm_msgqueue_queue *(*cmd_queue)(struct nvkm_msgqueue *,
- enum msgqueue_msg_priority);
- void (*recv)(struct nvkm_msgqueue *queue);
-};
-
-/**
- * struct nvkm_msgqueue_queue - information about a command or message queue
- *
- * The number of queues is firmware-dependent. All queues must have their
- * information filled by the init message handler.
- *
- * @mutex_lock: to be acquired when the queue is being used
- * @index: physical queue index
- * @offset: DMEM offset where this queue begins
- * @size: size allocated to this queue in DMEM (in bytes)
- * @position: current write position
- * @head_reg: address of the HEAD register for this queue
- * @tail_reg: address of the TAIL register for this queue
- */
-struct nvkm_msgqueue_queue {
- struct mutex mutex;
- u32 index;
- u32 offset;
- u32 size;
- u32 position;
-
- u32 head_reg;
- u32 tail_reg;
-};
-
-/**
- * struct nvkm_msgqueue_seq - keep track of ongoing commands
- *
- * Every time a command is sent, a sequence is assigned to it so the
- * corresponding message can be matched. Upon receiving the message, a callback
- * can be called and/or a completion signaled.
- *
- * @id: sequence ID
- * @state: current state
- * @callback: callback to call upon receiving matching message
- * @completion: completion to signal after callback is called
- */
-struct nvkm_msgqueue_seq {
- u16 id;
- enum {
- SEQ_STATE_FREE = 0,
- SEQ_STATE_PENDING,
- SEQ_STATE_USED,
- SEQ_STATE_CANCELLED
- } state;
- nvkm_msgqueue_callback callback;
- struct completion *completion;
-};
-
-/*
- * We can have an arbitrary number of sequences, but realistically we will
- * probably not use that much simultaneously.
- */
-#define NVKM_MSGQUEUE_NUM_SEQUENCES 16
-
-/**
- * struct nvkm_msgqueue - manage a command/message based FW on a falcon
- *
- * @falcon: falcon to be managed
- * @func: implementation of the firmware to use
- * @init_msg_received: whether the init message has already been received
- * @init_done: whether all init is complete and commands can be processed
- * @seq_lock: protects seq and seq_tbl
- * @seq: sequences to match commands and messages
- * @seq_tbl: bitmap of sequences currently in use
- */
-struct nvkm_msgqueue {
- struct nvkm_falcon *falcon;
- const struct nvkm_msgqueue_func *func;
- u32 fw_version;
- bool init_msg_received;
- struct completion init_done;
-
- struct mutex seq_lock;
- struct nvkm_msgqueue_seq seq[NVKM_MSGQUEUE_NUM_SEQUENCES];
- unsigned long seq_tbl[BITS_TO_LONGS(NVKM_MSGQUEUE_NUM_SEQUENCES)];
-};
-
-void nvkm_msgqueue_ctor(const struct nvkm_msgqueue_func *, struct nvkm_falcon *,
- struct nvkm_msgqueue *);
-int nvkm_msgqueue_post(struct nvkm_msgqueue *, enum msgqueue_msg_priority,
- struct nvkm_msgqueue_hdr *, nvkm_msgqueue_callback,
- struct completion *, bool);
-void nvkm_msgqueue_process_msgs(struct nvkm_msgqueue *,
- struct nvkm_msgqueue_queue *);
-
-int msgqueue_0137c63d_new(struct nvkm_falcon *, const struct nvkm_secboot *,
- struct nvkm_msgqueue **);
-int msgqueue_0137bca5_new(struct nvkm_falcon *, const struct nvkm_secboot *,
- struct nvkm_msgqueue **);
-int msgqueue_0148cdec_new(struct nvkm_falcon *, const struct nvkm_secboot *,
- struct nvkm_msgqueue **);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c
deleted file mode 100644
index fec0273158f6..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c
+++ /dev/null
@@ -1,436 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "msgqueue.h"
-#include <engine/falcon.h>
-#include <subdev/secboot.h>
-
-/* Queues identifiers */
-enum {
- /* High Priority Command Queue for Host -> PMU communication */
- MSGQUEUE_0137C63D_COMMAND_QUEUE_HPQ = 0,
- /* Low Priority Command Queue for Host -> PMU communication */
- MSGQUEUE_0137C63D_COMMAND_QUEUE_LPQ = 1,
- /* Message queue for PMU -> Host communication */
- MSGQUEUE_0137C63D_MESSAGE_QUEUE = 4,
- MSGQUEUE_0137C63D_NUM_QUEUES = 5,
-};
-
-struct msgqueue_0137c63d {
- struct nvkm_msgqueue base;
-
- struct nvkm_msgqueue_queue queue[MSGQUEUE_0137C63D_NUM_QUEUES];
-};
-#define msgqueue_0137c63d(q) \
- container_of(q, struct msgqueue_0137c63d, base)
-
-struct msgqueue_0137bca5 {
- struct msgqueue_0137c63d base;
-
- u64 wpr_addr;
-};
-#define msgqueue_0137bca5(q) \
- container_of(container_of(q, struct msgqueue_0137c63d, base), \
- struct msgqueue_0137bca5, base);
-
-static struct nvkm_msgqueue_queue *
-msgqueue_0137c63d_cmd_queue(struct nvkm_msgqueue *queue,
- enum msgqueue_msg_priority priority)
-{
- struct msgqueue_0137c63d *priv = msgqueue_0137c63d(queue);
- const struct nvkm_subdev *subdev = priv->base.falcon->owner;
-
- switch (priority) {
- case MSGQUEUE_MSG_PRIORITY_HIGH:
- return &priv->queue[MSGQUEUE_0137C63D_COMMAND_QUEUE_HPQ];
- case MSGQUEUE_MSG_PRIORITY_LOW:
- return &priv->queue[MSGQUEUE_0137C63D_COMMAND_QUEUE_LPQ];
- default:
- nvkm_error(subdev, "invalid command queue!\n");
- return ERR_PTR(-EINVAL);
- }
-}
-
-static void
-msgqueue_0137c63d_process_msgs(struct nvkm_msgqueue *queue)
-{
- struct msgqueue_0137c63d *priv = msgqueue_0137c63d(queue);
- struct nvkm_msgqueue_queue *q_queue =
- &priv->queue[MSGQUEUE_0137C63D_MESSAGE_QUEUE];
-
- nvkm_msgqueue_process_msgs(&priv->base, q_queue);
-}
-
-/* Init unit */
-#define MSGQUEUE_0137C63D_UNIT_INIT 0x07
-
-enum {
- INIT_MSG_INIT = 0x0,
-};
-
-static void
-init_gen_cmdline(struct nvkm_msgqueue *queue, void *buf)
-{
- struct {
- u32 reserved;
- u32 freq_hz;
- u32 trace_size;
- u32 trace_dma_base;
- u16 trace_dma_base1;
- u8 trace_dma_offset;
- u32 trace_dma_idx;
- bool secure_mode;
- bool raise_priv_sec;
- struct {
- u32 dma_base;
- u16 dma_base1;
- u8 dma_offset;
- u16 fb_size;
- u8 dma_idx;
- } gc6_ctx;
- u8 pad;
- } *args = buf;
-
- args->secure_mode = 1;
-}
-
-/* forward declaration */
-static int acr_init_wpr(struct nvkm_msgqueue *queue);
-
-static int
-init_callback(struct nvkm_msgqueue *_queue, struct nvkm_msgqueue_hdr *hdr)
-{
- struct msgqueue_0137c63d *priv = msgqueue_0137c63d(_queue);
- struct {
- struct nvkm_msgqueue_msg base;
-
- u8 pad;
- u16 os_debug_entry_point;
-
- struct {
- u16 size;
- u16 offset;
- u8 index;
- u8 pad;
- } queue_info[MSGQUEUE_0137C63D_NUM_QUEUES];
-
- u16 sw_managed_area_offset;
- u16 sw_managed_area_size;
- } *init = (void *)hdr;
- const struct nvkm_subdev *subdev = _queue->falcon->owner;
- int i;
-
- if (init->base.hdr.unit_id != MSGQUEUE_0137C63D_UNIT_INIT) {
- nvkm_error(subdev, "expected message from init unit\n");
- return -EINVAL;
- }
-
- if (init->base.msg_type != INIT_MSG_INIT) {
- nvkm_error(subdev, "expected PMU init msg\n");
- return -EINVAL;
- }
-
- for (i = 0; i < MSGQUEUE_0137C63D_NUM_QUEUES; i++) {
- struct nvkm_msgqueue_queue *queue = &priv->queue[i];
-
- mutex_init(&queue->mutex);
-
- queue->index = init->queue_info[i].index;
- queue->offset = init->queue_info[i].offset;
- queue->size = init->queue_info[i].size;
-
- if (i != MSGQUEUE_0137C63D_MESSAGE_QUEUE) {
- queue->head_reg = 0x4a0 + (queue->index * 4);
- queue->tail_reg = 0x4b0 + (queue->index * 4);
- } else {
- queue->head_reg = 0x4c8;
- queue->tail_reg = 0x4cc;
- }
-
- nvkm_debug(subdev,
- "queue %d: index %d, offset 0x%08x, size 0x%08x\n",
- i, queue->index, queue->offset, queue->size);
- }
-
- /* Complete initialization by initializing WPR region */
- return acr_init_wpr(&priv->base);
-}
-
-static const struct nvkm_msgqueue_init_func
-msgqueue_0137c63d_init_func = {
- .gen_cmdline = init_gen_cmdline,
- .init_callback = init_callback,
-};
-
-
-
-/* ACR unit */
-#define MSGQUEUE_0137C63D_UNIT_ACR 0x0a
-
-enum {
- ACR_CMD_INIT_WPR_REGION = 0x00,
- ACR_CMD_BOOTSTRAP_FALCON = 0x01,
- ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS = 0x03,
-};
-
-static void
-acr_init_wpr_callback(struct nvkm_msgqueue *queue,
- struct nvkm_msgqueue_hdr *hdr)
-{
- struct {
- struct nvkm_msgqueue_msg base;
- u32 error_code;
- } *msg = (void *)hdr;
- const struct nvkm_subdev *subdev = queue->falcon->owner;
-
- if (msg->error_code) {
- nvkm_error(subdev, "ACR WPR init failure: %d\n",
- msg->error_code);
- return;
- }
-
- nvkm_debug(subdev, "ACR WPR init complete\n");
- complete_all(&queue->init_done);
-}
-
-static int
-acr_init_wpr(struct nvkm_msgqueue *queue)
-{
- /*
- * region_id: region ID in WPR region
- * wpr_offset: offset in WPR region
- */
- struct {
- struct nvkm_msgqueue_hdr hdr;
- u8 cmd_type;
- u32 region_id;
- u32 wpr_offset;
- } cmd;
- memset(&cmd, 0, sizeof(cmd));
-
- cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR;
- cmd.hdr.size = sizeof(cmd);
- cmd.cmd_type = ACR_CMD_INIT_WPR_REGION;
- cmd.region_id = 0x01;
- cmd.wpr_offset = 0x00;
-
- nvkm_msgqueue_post(queue, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
- acr_init_wpr_callback, NULL, false);
-
- return 0;
-}
-
-
-static void
-acr_boot_falcon_callback(struct nvkm_msgqueue *priv,
- struct nvkm_msgqueue_hdr *hdr)
-{
- struct acr_bootstrap_falcon_msg {
- struct nvkm_msgqueue_msg base;
-
- u32 falcon_id;
- } *msg = (void *)hdr;
- const struct nvkm_subdev *subdev = priv->falcon->owner;
- u32 falcon_id = msg->falcon_id;
-
- if (falcon_id >= NVKM_SECBOOT_FALCON_END) {
- nvkm_error(subdev, "in bootstrap falcon callback:\n");
- nvkm_error(subdev, "invalid falcon ID 0x%x\n", falcon_id);
- return;
- }
- nvkm_debug(subdev, "%s booted\n", nvkm_secboot_falcon_name[falcon_id]);
-}
-
-enum {
- ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES = 0,
- ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_NO = 1,
-};
-
-static int
-acr_boot_falcon(struct nvkm_msgqueue *priv, enum nvkm_secboot_falcon falcon)
-{
- DECLARE_COMPLETION_ONSTACK(completed);
- /*
- * flags - Flag specifying RESET or no RESET.
- * falcon id - Falcon id specifying falcon to bootstrap.
- */
- struct {
- struct nvkm_msgqueue_hdr hdr;
- u8 cmd_type;
- u32 flags;
- u32 falcon_id;
- } cmd;
-
- memset(&cmd, 0, sizeof(cmd));
-
- cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR;
- cmd.hdr.size = sizeof(cmd);
- cmd.cmd_type = ACR_CMD_BOOTSTRAP_FALCON;
- cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
- cmd.falcon_id = falcon;
- nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
- acr_boot_falcon_callback, &completed, true);
-
- if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000)))
- return -ETIMEDOUT;
-
- return 0;
-}
-
-static void
-acr_boot_multiple_falcons_callback(struct nvkm_msgqueue *priv,
- struct nvkm_msgqueue_hdr *hdr)
-{
- struct acr_bootstrap_falcon_msg {
- struct nvkm_msgqueue_msg base;
-
- u32 falcon_mask;
- } *msg = (void *)hdr;
- const struct nvkm_subdev *subdev = priv->falcon->owner;
- unsigned long falcon_mask = msg->falcon_mask;
- u32 falcon_id, falcon_treated = 0;
-
- for_each_set_bit(falcon_id, &falcon_mask, NVKM_SECBOOT_FALCON_END) {
- nvkm_debug(subdev, "%s booted\n",
- nvkm_secboot_falcon_name[falcon_id]);
- falcon_treated |= BIT(falcon_id);
- }
-
- if (falcon_treated != msg->falcon_mask) {
- nvkm_error(subdev, "in bootstrap falcon callback:\n");
- nvkm_error(subdev, "invalid falcon mask 0x%x\n",
- msg->falcon_mask);
- return;
- }
-}
-
-static int
-acr_boot_multiple_falcons(struct nvkm_msgqueue *priv, unsigned long falcon_mask)
-{
- DECLARE_COMPLETION_ONSTACK(completed);
- /*
- * flags - Flag specifying RESET or no RESET.
- * falcon id - Falcon id specifying falcon to bootstrap.
- */
- struct {
- struct nvkm_msgqueue_hdr hdr;
- u8 cmd_type;
- u32 flags;
- u32 falcon_mask;
- u32 use_va_mask;
- u32 wpr_lo;
- u32 wpr_hi;
- } cmd;
- struct msgqueue_0137bca5 *queue = msgqueue_0137bca5(priv);
-
- memset(&cmd, 0, sizeof(cmd));
-
- cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR;
- cmd.hdr.size = sizeof(cmd);
- cmd.cmd_type = ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS;
- cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
- cmd.falcon_mask = falcon_mask;
- cmd.wpr_lo = lower_32_bits(queue->wpr_addr);
- cmd.wpr_hi = upper_32_bits(queue->wpr_addr);
- nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
- acr_boot_multiple_falcons_callback, &completed, true);
-
- if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000)))
- return -ETIMEDOUT;
-
- return 0;
-}
-
-static const struct nvkm_msgqueue_acr_func
-msgqueue_0137c63d_acr_func = {
- .boot_falcon = acr_boot_falcon,
-};
-
-static const struct nvkm_msgqueue_acr_func
-msgqueue_0137bca5_acr_func = {
- .boot_falcon = acr_boot_falcon,
- .boot_multiple_falcons = acr_boot_multiple_falcons,
-};
-
-static void
-msgqueue_0137c63d_dtor(struct nvkm_msgqueue *queue)
-{
- kfree(msgqueue_0137c63d(queue));
-}
-
-static const struct nvkm_msgqueue_func
-msgqueue_0137c63d_func = {
- .init_func = &msgqueue_0137c63d_init_func,
- .acr_func = &msgqueue_0137c63d_acr_func,
- .cmd_queue = msgqueue_0137c63d_cmd_queue,
- .recv = msgqueue_0137c63d_process_msgs,
- .dtor = msgqueue_0137c63d_dtor,
-};
-
-int
-msgqueue_0137c63d_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb,
- struct nvkm_msgqueue **queue)
-{
- struct msgqueue_0137c63d *ret;
-
- ret = kzalloc(sizeof(*ret), GFP_KERNEL);
- if (!ret)
- return -ENOMEM;
-
- *queue = &ret->base;
-
- nvkm_msgqueue_ctor(&msgqueue_0137c63d_func, falcon, &ret->base);
-
- return 0;
-}
-
-static const struct nvkm_msgqueue_func
-msgqueue_0137bca5_func = {
- .init_func = &msgqueue_0137c63d_init_func,
- .acr_func = &msgqueue_0137bca5_acr_func,
- .cmd_queue = msgqueue_0137c63d_cmd_queue,
- .recv = msgqueue_0137c63d_process_msgs,
- .dtor = msgqueue_0137c63d_dtor,
-};
-
-int
-msgqueue_0137bca5_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb,
- struct nvkm_msgqueue **queue)
-{
- struct msgqueue_0137bca5 *ret;
-
- ret = kzalloc(sizeof(*ret), GFP_KERNEL);
- if (!ret)
- return -ENOMEM;
-
- *queue = &ret->base.base;
-
- /*
- * FIXME this must be set to the address of a *GPU* mapping within the
- * ACR address space!
- */
- /* ret->wpr_addr = sb->wpr_addr; */
-
- nvkm_msgqueue_ctor(&msgqueue_0137bca5_func, falcon, &ret->base.base);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c
deleted file mode 100644
index 9424803b9ef4..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "msgqueue.h"
-#include <engine/falcon.h>
-#include <subdev/secboot.h>
-
-/*
- * This firmware runs on the SEC falcon. It only has one command and one
- * message queue, and uses a different command line and init message.
- */
-
-enum {
- MSGQUEUE_0148CDEC_COMMAND_QUEUE = 0,
- MSGQUEUE_0148CDEC_MESSAGE_QUEUE = 1,
- MSGQUEUE_0148CDEC_NUM_QUEUES,
-};
-
-struct msgqueue_0148cdec {
- struct nvkm_msgqueue base;
-
- struct nvkm_msgqueue_queue queue[MSGQUEUE_0148CDEC_NUM_QUEUES];
-};
-#define msgqueue_0148cdec(q) \
- container_of(q, struct msgqueue_0148cdec, base)
-
-static struct nvkm_msgqueue_queue *
-msgqueue_0148cdec_cmd_queue(struct nvkm_msgqueue *queue,
- enum msgqueue_msg_priority priority)
-{
- struct msgqueue_0148cdec *priv = msgqueue_0148cdec(queue);
-
- return &priv->queue[MSGQUEUE_0148CDEC_COMMAND_QUEUE];
-}
-
-static void
-msgqueue_0148cdec_process_msgs(struct nvkm_msgqueue *queue)
-{
- struct msgqueue_0148cdec *priv = msgqueue_0148cdec(queue);
- struct nvkm_msgqueue_queue *q_queue =
- &priv->queue[MSGQUEUE_0148CDEC_MESSAGE_QUEUE];
-
- nvkm_msgqueue_process_msgs(&priv->base, q_queue);
-}
-
-
-/* Init unit */
-#define MSGQUEUE_0148CDEC_UNIT_INIT 0x01
-
-enum {
- INIT_MSG_INIT = 0x0,
-};
-
-static void
-init_gen_cmdline(struct nvkm_msgqueue *queue, void *buf)
-{
- struct {
- u32 freq_hz;
- u32 falc_trace_size;
- u32 falc_trace_dma_base;
- u32 falc_trace_dma_idx;
- bool secure_mode;
- } *args = buf;
-
- args->secure_mode = false;
-}
-
-static int
-init_callback(struct nvkm_msgqueue *_queue, struct nvkm_msgqueue_hdr *hdr)
-{
- struct msgqueue_0148cdec *priv = msgqueue_0148cdec(_queue);
- struct {
- struct nvkm_msgqueue_msg base;
-
- u8 num_queues;
- u16 os_debug_entry_point;
-
- struct {
- u32 offset;
- u16 size;
- u8 index;
- u8 id;
- } queue_info[MSGQUEUE_0148CDEC_NUM_QUEUES];
-
- u16 sw_managed_area_offset;
- u16 sw_managed_area_size;
- } *init = (void *)hdr;
- const struct nvkm_subdev *subdev = _queue->falcon->owner;
- int i;
-
- if (init->base.hdr.unit_id != MSGQUEUE_0148CDEC_UNIT_INIT) {
- nvkm_error(subdev, "expected message from init unit\n");
- return -EINVAL;
- }
-
- if (init->base.msg_type != INIT_MSG_INIT) {
- nvkm_error(subdev, "expected SEC init msg\n");
- return -EINVAL;
- }
-
- for (i = 0; i < MSGQUEUE_0148CDEC_NUM_QUEUES; i++) {
- u8 id = init->queue_info[i].id;
- struct nvkm_msgqueue_queue *queue = &priv->queue[id];
-
- mutex_init(&queue->mutex);
-
- queue->index = init->queue_info[i].index;
- queue->offset = init->queue_info[i].offset;
- queue->size = init->queue_info[i].size;
-
- if (id == MSGQUEUE_0148CDEC_MESSAGE_QUEUE) {
- queue->head_reg = 0xa30 + (queue->index * 8);
- queue->tail_reg = 0xa34 + (queue->index * 8);
- } else {
- queue->head_reg = 0xa00 + (queue->index * 8);
- queue->tail_reg = 0xa04 + (queue->index * 8);
- }
-
- nvkm_debug(subdev,
- "queue %d: index %d, offset 0x%08x, size 0x%08x\n",
- id, queue->index, queue->offset, queue->size);
- }
-
- complete_all(&_queue->init_done);
-
- return 0;
-}
-
-static const struct nvkm_msgqueue_init_func
-msgqueue_0148cdec_init_func = {
- .gen_cmdline = init_gen_cmdline,
- .init_callback = init_callback,
-};
-
-
-
-/* ACR unit */
-#define MSGQUEUE_0148CDEC_UNIT_ACR 0x08
-
-enum {
- ACR_CMD_BOOTSTRAP_FALCON = 0x00,
-};
-
-static void
-acr_boot_falcon_callback(struct nvkm_msgqueue *priv,
- struct nvkm_msgqueue_hdr *hdr)
-{
- struct acr_bootstrap_falcon_msg {
- struct nvkm_msgqueue_msg base;
-
- u32 error_code;
- u32 falcon_id;
- } *msg = (void *)hdr;
- const struct nvkm_subdev *subdev = priv->falcon->owner;
- u32 falcon_id = msg->falcon_id;
-
- if (msg->error_code) {
- nvkm_error(subdev, "in bootstrap falcon callback:\n");
- nvkm_error(subdev, "expected error code 0x%x\n",
- msg->error_code);
- return;
- }
-
- if (falcon_id >= NVKM_SECBOOT_FALCON_END) {
- nvkm_error(subdev, "in bootstrap falcon callback:\n");
- nvkm_error(subdev, "invalid falcon ID 0x%x\n", falcon_id);
- return;
- }
-
- nvkm_debug(subdev, "%s booted\n", nvkm_secboot_falcon_name[falcon_id]);
-}
-
-enum {
- ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES = 0,
- ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_NO = 1,
-};
-
-static int
-acr_boot_falcon(struct nvkm_msgqueue *priv, enum nvkm_secboot_falcon falcon)
-{
- DECLARE_COMPLETION_ONSTACK(completed);
- /*
- * flags - Flag specifying RESET or no RESET.
- * falcon id - Falcon id specifying falcon to bootstrap.
- */
- struct {
- struct nvkm_msgqueue_hdr hdr;
- u8 cmd_type;
- u32 flags;
- u32 falcon_id;
- } cmd;
-
- memset(&cmd, 0, sizeof(cmd));
-
- cmd.hdr.unit_id = MSGQUEUE_0148CDEC_UNIT_ACR;
- cmd.hdr.size = sizeof(cmd);
- cmd.cmd_type = ACR_CMD_BOOTSTRAP_FALCON;
- cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
- cmd.falcon_id = falcon;
- nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
- acr_boot_falcon_callback, &completed, true);
-
- if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000)))
- return -ETIMEDOUT;
-
- return 0;
-}
-
-const struct nvkm_msgqueue_acr_func
-msgqueue_0148cdec_acr_func = {
- .boot_falcon = acr_boot_falcon,
-};
-
-static void
-msgqueue_0148cdec_dtor(struct nvkm_msgqueue *queue)
-{
- kfree(msgqueue_0148cdec(queue));
-}
-
-const struct nvkm_msgqueue_func
-msgqueue_0148cdec_func = {
- .init_func = &msgqueue_0148cdec_init_func,
- .acr_func = &msgqueue_0148cdec_acr_func,
- .cmd_queue = msgqueue_0148cdec_cmd_queue,
- .recv = msgqueue_0148cdec_process_msgs,
- .dtor = msgqueue_0148cdec_dtor,
-};
-
-int
-msgqueue_0148cdec_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb,
- struct nvkm_msgqueue **queue)
-{
- struct msgqueue_0148cdec *ret;
-
- ret = kzalloc(sizeof(*ret), GFP_KERNEL);
- if (!ret)
- return -ENOMEM;
-
- *queue = &ret->base;
-
- nvkm_msgqueue_ctor(&msgqueue_0148cdec_func, falcon, &ret->base);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h b/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h
index 900fe1d37b4d..466188752eb0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h
@@ -1,9 +1,5 @@
/* SPDX-License-Identifier: MIT */
#ifndef __NVKM_FALCON_PRIV_H__
#define __NVKM_FALCON_PRIV_H__
-#include <engine/falcon.h>
-
-void
-nvkm_falcon_ctor(const struct nvkm_falcon_func *, struct nvkm_subdev *,
- const char *, u32, struct nvkm_falcon *);
+#include <core/falcon.h>
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c
new file mode 100644
index 000000000000..a453de341a75
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "qmgr.h"
+
+struct nvkm_falcon_qmgr_seq *
+nvkm_falcon_qmgr_seq_acquire(struct nvkm_falcon_qmgr *qmgr)
+{
+ const struct nvkm_subdev *subdev = qmgr->falcon->owner;
+ struct nvkm_falcon_qmgr_seq *seq;
+ u32 index;
+
+ mutex_lock(&qmgr->seq.mutex);
+ index = find_first_zero_bit(qmgr->seq.tbl, NVKM_FALCON_QMGR_SEQ_NUM);
+ if (index >= NVKM_FALCON_QMGR_SEQ_NUM) {
+ nvkm_error(subdev, "no free sequence available\n");
+ mutex_unlock(&qmgr->seq.mutex);
+ return ERR_PTR(-EAGAIN);
+ }
+
+ set_bit(index, qmgr->seq.tbl);
+ mutex_unlock(&qmgr->seq.mutex);
+
+ seq = &qmgr->seq.id[index];
+ seq->state = SEQ_STATE_PENDING;
+ return seq;
+}
+
+void
+nvkm_falcon_qmgr_seq_release(struct nvkm_falcon_qmgr *qmgr,
+ struct nvkm_falcon_qmgr_seq *seq)
+{
+ /* no need to acquire seq.mutex since clear_bit is atomic */
+ seq->state = SEQ_STATE_FREE;
+ seq->callback = NULL;
+ reinit_completion(&seq->done);
+ clear_bit(seq->id, qmgr->seq.tbl);
+}
+
+void
+nvkm_falcon_qmgr_del(struct nvkm_falcon_qmgr **pqmgr)
+{
+ struct nvkm_falcon_qmgr *qmgr = *pqmgr;
+ if (qmgr) {
+ kfree(*pqmgr);
+ *pqmgr = NULL;
+ }
+}
+
+int
+nvkm_falcon_qmgr_new(struct nvkm_falcon *falcon,
+ struct nvkm_falcon_qmgr **pqmgr)
+{
+ struct nvkm_falcon_qmgr *qmgr;
+ int i;
+
+ if (!(qmgr = *pqmgr = kzalloc(sizeof(*qmgr), GFP_KERNEL)))
+ return -ENOMEM;
+
+ qmgr->falcon = falcon;
+ mutex_init(&qmgr->seq.mutex);
+ for (i = 0; i < NVKM_FALCON_QMGR_SEQ_NUM; i++) {
+ qmgr->seq.id[i].id = i;
+ init_completion(&qmgr->seq.id[i].done);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h
new file mode 100644
index 000000000000..a45cd705e4f7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_FALCON_QMGR_H__
+#define __NVKM_FALCON_QMGR_H__
+#include <core/falcon.h>
+
+#define HDR_SIZE sizeof(struct nv_falcon_msg)
+#define QUEUE_ALIGNMENT 4
+/* max size of the messages we can receive */
+#define MSG_BUF_SIZE 128
+
+/**
+ * struct nvkm_falcon_qmgr_seq - keep track of ongoing commands
+ *
+ * Every time a command is sent, a sequence is assigned to it so the
+ * corresponding message can be matched. Upon receiving the message, a callback
+ * can be called and/or a completion signaled.
+ *
+ * @id: sequence ID
+ * @state: current state
+ * @callback: callback to call upon receiving matching message
+ * @completion: completion to signal after callback is called
+ */
+struct nvkm_falcon_qmgr_seq {
+ u16 id;
+ enum {
+ SEQ_STATE_FREE = 0,
+ SEQ_STATE_PENDING,
+ SEQ_STATE_USED,
+ SEQ_STATE_CANCELLED
+ } state;
+ bool async;
+ nvkm_falcon_qmgr_callback callback;
+ void *priv;
+ struct completion done;
+ int result;
+};
+
+/*
+ * We can have an arbitrary number of sequences, but realistically we will
+ * probably not use that much simultaneously.
+ */
+#define NVKM_FALCON_QMGR_SEQ_NUM 16
+
+struct nvkm_falcon_qmgr {
+ struct nvkm_falcon *falcon;
+
+ struct {
+ struct mutex mutex;
+ struct nvkm_falcon_qmgr_seq id[NVKM_FALCON_QMGR_SEQ_NUM];
+ unsigned long tbl[BITS_TO_LONGS(NVKM_FALCON_QMGR_SEQ_NUM)];
+ } seq;
+};
+
+struct nvkm_falcon_qmgr_seq *
+nvkm_falcon_qmgr_seq_acquire(struct nvkm_falcon_qmgr *);
+void nvkm_falcon_qmgr_seq_release(struct nvkm_falcon_qmgr *,
+ struct nvkm_falcon_qmgr_seq *);
+
+struct nvkm_falcon_cmdq {
+ struct nvkm_falcon_qmgr *qmgr;
+ const char *name;
+ struct mutex mutex;
+ struct completion ready;
+
+ u32 head_reg;
+ u32 tail_reg;
+ u32 offset;
+ u32 size;
+
+ u32 position;
+};
+
+struct nvkm_falcon_msgq {
+ struct nvkm_falcon_qmgr *qmgr;
+ const char *name;
+ struct mutex mutex;
+
+ u32 head_reg;
+ u32 tail_reg;
+ u32 offset;
+
+ u32 position;
+};
+
+#define FLCNQ_PRINTK(t,q,f,a...) \
+ FLCN_PRINTK(t, (q)->qmgr->falcon, "%s: "f, (q)->name, ##a)
+#define FLCNQ_DBG(q,f,a...) FLCNQ_PRINTK(debug, (q), f, ##a)
+#define FLCNQ_ERR(q,f,a...) FLCNQ_PRINTK(error, (q), f, ##a)
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
index 6d978feebbd7..1ff9b9c2e651 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
@@ -25,7 +25,7 @@
#include <core/memory.h>
#include <subdev/timer.h>
-static void
+void
nvkm_falcon_v1_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
u32 size, u16 tag, u8 port, bool secure)
{
@@ -89,18 +89,17 @@ nvkm_falcon_v1_load_emem(struct nvkm_falcon *falcon, void *data, u32 start,
}
}
-static const u32 EMEM_START_ADDR = 0x1000000;
-
-static void
+void
nvkm_falcon_v1_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
- u32 size, u8 port)
+ u32 size, u8 port)
{
+ const struct nvkm_falcon_func *func = falcon->func;
u8 rem = size % 4;
int i;
- if (start >= EMEM_START_ADDR && falcon->has_emem)
+ if (func->emem_addr && start >= func->emem_addr)
return nvkm_falcon_v1_load_emem(falcon, data,
- start - EMEM_START_ADDR, size,
+ start - func->emem_addr, size,
port);
size -= rem;
@@ -148,15 +147,16 @@ nvkm_falcon_v1_read_emem(struct nvkm_falcon *falcon, u32 start, u32 size,
}
}
-static void
+void
nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size,
u8 port, void *data)
{
+ const struct nvkm_falcon_func *func = falcon->func;
u8 rem = size % 4;
int i;
- if (start >= EMEM_START_ADDR && falcon->has_emem)
- return nvkm_falcon_v1_read_emem(falcon, start - EMEM_START_ADDR,
+ if (func->emem_addr && start >= func->emem_addr)
+ return nvkm_falcon_v1_read_emem(falcon, start - func->emem_addr,
size, port, data);
size -= rem;
@@ -179,12 +179,11 @@ nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size,
}
}
-static void
+void
nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *ctx)
{
- struct nvkm_device *device = falcon->owner->device;
+ const u32 fbif = falcon->func->fbif;
u32 inst_loc;
- u32 fbif;
/* disable instance block binding */
if (ctx == NULL) {
@@ -192,20 +191,6 @@ nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *ctx)
return;
}
- switch (falcon->owner->index) {
- case NVKM_ENGINE_NVENC0:
- case NVKM_ENGINE_NVENC1:
- case NVKM_ENGINE_NVENC2:
- fbif = 0x800;
- break;
- case NVKM_SUBDEV_PMU:
- fbif = 0xe00;
- break;
- default:
- fbif = 0x600;
- break;
- }
-
nvkm_falcon_wr32(falcon, 0x10c, 0x1);
/* setup apertures - virtual */
@@ -234,50 +219,15 @@ nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *ctx)
nvkm_falcon_mask(falcon, 0x090, 0x10000, 0x10000);
nvkm_falcon_mask(falcon, 0x0a4, 0x8, 0x8);
-
- /* Not sure if this is a WAR for a HW issue, or some additional
- * programming sequence that's needed to properly complete the
- * context switch we trigger above.
- *
- * Fixes unreliability of booting the SEC2 RTOS on Quadro P620,
- * particularly when resuming from suspend.
- *
- * Also removes the need for an odd workaround where we needed
- * to program SEC2's FALCON_CPUCTL_ALIAS_STARTCPU twice before
- * the SEC2 RTOS would begin executing.
- */
- switch (falcon->owner->index) {
- case NVKM_SUBDEV_GSP:
- case NVKM_ENGINE_SEC2:
- nvkm_msec(device, 10,
- u32 irqstat = nvkm_falcon_rd32(falcon, 0x008);
- u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc);
- if ((irqstat & 0x00000008) &&
- (flcn0dc & 0x00007000) == 0x00005000)
- break;
- );
-
- nvkm_falcon_mask(falcon, 0x004, 0x00000008, 0x00000008);
- nvkm_falcon_mask(falcon, 0x058, 0x00000002, 0x00000002);
-
- nvkm_msec(device, 10,
- u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc);
- if ((flcn0dc & 0x00007000) == 0x00000000)
- break;
- );
- break;
- default:
- break;
- }
}
-static void
+void
nvkm_falcon_v1_set_start_addr(struct nvkm_falcon *falcon, u32 start_addr)
{
nvkm_falcon_wr32(falcon, 0x104, start_addr);
}
-static void
+void
nvkm_falcon_v1_start(struct nvkm_falcon *falcon)
{
u32 reg = nvkm_falcon_rd32(falcon, 0x100);
@@ -288,7 +238,7 @@ nvkm_falcon_v1_start(struct nvkm_falcon *falcon)
nvkm_falcon_wr32(falcon, 0x100, 0x2);
}
-static int
+int
nvkm_falcon_v1_wait_for_halt(struct nvkm_falcon *falcon, u32 ms)
{
struct nvkm_device *device = falcon->owner->device;
@@ -301,7 +251,7 @@ nvkm_falcon_v1_wait_for_halt(struct nvkm_falcon *falcon, u32 ms)
return 0;
}
-static int
+int
nvkm_falcon_v1_clear_interrupt(struct nvkm_falcon *falcon, u32 mask)
{
struct nvkm_device *device = falcon->owner->device;
@@ -330,7 +280,7 @@ falcon_v1_wait_idle(struct nvkm_falcon *falcon)
return 0;
}
-static int
+int
nvkm_falcon_v1_enable(struct nvkm_falcon *falcon)
{
struct nvkm_device *device = falcon->owner->device;
@@ -352,7 +302,7 @@ nvkm_falcon_v1_enable(struct nvkm_falcon *falcon)
return 0;
}
-static void
+void
nvkm_falcon_v1_disable(struct nvkm_falcon *falcon)
{
/* disable IRQs and wait for any previous code to complete */
diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/Kbuild b/drivers/gpu/drm/nouveau/nvkm/nvfw/Kbuild
new file mode 100644
index 000000000000..41d75f98e603
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/Kbuild
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: MIT
+nvkm-y += nvkm/nvfw/fw.o
+nvkm-y += nvkm/nvfw/hs.o
+nvkm-y += nvkm/nvfw/ls.o
+
+nvkm-y += nvkm/nvfw/acr.o
+nvkm-y += nvkm/nvfw/flcn.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/acr.c b/drivers/gpu/drm/nouveau/nvkm/nvfw/acr.c
new file mode 100644
index 000000000000..0d063b8317f7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/acr.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <core/subdev.h>
+#include <nvfw/acr.h>
+
+void
+wpr_header_dump(struct nvkm_subdev *subdev, const struct wpr_header *hdr)
+{
+ nvkm_debug(subdev, "wprHeader\n");
+ nvkm_debug(subdev, "\tfalconID : %d\n", hdr->falcon_id);
+ nvkm_debug(subdev, "\tlsbOffset : 0x%x\n", hdr->lsb_offset);
+ nvkm_debug(subdev, "\tbootstrapOwner: %d\n", hdr->bootstrap_owner);
+ nvkm_debug(subdev, "\tlazyBootstrap : %d\n", hdr->lazy_bootstrap);
+ nvkm_debug(subdev, "\tstatus : %d\n", hdr->status);
+}
+
+void
+wpr_header_v1_dump(struct nvkm_subdev *subdev, const struct wpr_header_v1 *hdr)
+{
+ nvkm_debug(subdev, "wprHeader\n");
+ nvkm_debug(subdev, "\tfalconID : %d\n", hdr->falcon_id);
+ nvkm_debug(subdev, "\tlsbOffset : 0x%x\n", hdr->lsb_offset);
+ nvkm_debug(subdev, "\tbootstrapOwner: %d\n", hdr->bootstrap_owner);
+ nvkm_debug(subdev, "\tlazyBootstrap : %d\n", hdr->lazy_bootstrap);
+ nvkm_debug(subdev, "\tbinVersion : %d\n", hdr->bin_version);
+ nvkm_debug(subdev, "\tstatus : %d\n", hdr->status);
+}
+
+void
+lsb_header_tail_dump(struct nvkm_subdev *subdev,
+ struct lsb_header_tail *hdr)
+{
+ nvkm_debug(subdev, "lsbHeader\n");
+ nvkm_debug(subdev, "\tucodeOff : 0x%x\n", hdr->ucode_off);
+ nvkm_debug(subdev, "\tucodeSize : 0x%x\n", hdr->ucode_size);
+ nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size);
+ nvkm_debug(subdev, "\tblCodeSize : 0x%x\n", hdr->bl_code_size);
+ nvkm_debug(subdev, "\tblImemOff : 0x%x\n", hdr->bl_imem_off);
+ nvkm_debug(subdev, "\tblDataOff : 0x%x\n", hdr->bl_data_off);
+ nvkm_debug(subdev, "\tblDataSize : 0x%x\n", hdr->bl_data_size);
+ nvkm_debug(subdev, "\tappCodeOff : 0x%x\n", hdr->app_code_off);
+ nvkm_debug(subdev, "\tappCodeSize : 0x%x\n", hdr->app_code_size);
+ nvkm_debug(subdev, "\tappDataOff : 0x%x\n", hdr->app_data_off);
+ nvkm_debug(subdev, "\tappDataSize : 0x%x\n", hdr->app_data_size);
+ nvkm_debug(subdev, "\tflags : 0x%x\n", hdr->flags);
+}
+
+void
+lsb_header_dump(struct nvkm_subdev *subdev, struct lsb_header *hdr)
+{
+ lsb_header_tail_dump(subdev, &hdr->tail);
+}
+
+void
+lsb_header_v1_dump(struct nvkm_subdev *subdev, struct lsb_header_v1 *hdr)
+{
+ lsb_header_tail_dump(subdev, &hdr->tail);
+}
+
+void
+flcn_acr_desc_dump(struct nvkm_subdev *subdev, struct flcn_acr_desc *hdr)
+{
+ int i;
+
+ nvkm_debug(subdev, "acrDesc\n");
+ nvkm_debug(subdev, "\twprRegionId : %d\n", hdr->wpr_region_id);
+ nvkm_debug(subdev, "\twprOffset : 0x%x\n", hdr->wpr_offset);
+ nvkm_debug(subdev, "\tmmuMemRange : 0x%x\n",
+ hdr->mmu_mem_range);
+ nvkm_debug(subdev, "\tnoRegions : %d\n",
+ hdr->regions.no_regions);
+
+ for (i = 0; i < ARRAY_SIZE(hdr->regions.region_props); i++) {
+ nvkm_debug(subdev, "\tregion[%d] :\n", i);
+ nvkm_debug(subdev, "\t startAddr : 0x%x\n",
+ hdr->regions.region_props[i].start_addr);
+ nvkm_debug(subdev, "\t endAddr : 0x%x\n",
+ hdr->regions.region_props[i].end_addr);
+ nvkm_debug(subdev, "\t regionId : %d\n",
+ hdr->regions.region_props[i].region_id);
+ nvkm_debug(subdev, "\t readMask : 0x%x\n",
+ hdr->regions.region_props[i].read_mask);
+ nvkm_debug(subdev, "\t writeMask : 0x%x\n",
+ hdr->regions.region_props[i].write_mask);
+ nvkm_debug(subdev, "\t clientMask : 0x%x\n",
+ hdr->regions.region_props[i].client_mask);
+ }
+
+ nvkm_debug(subdev, "\tucodeBlobSize: %d\n",
+ hdr->ucode_blob_size);
+ nvkm_debug(subdev, "\tucodeBlobBase: 0x%llx\n",
+ hdr->ucode_blob_base);
+ nvkm_debug(subdev, "\tvprEnabled : %d\n",
+ hdr->vpr_desc.vpr_enabled);
+ nvkm_debug(subdev, "\tvprStart : 0x%x\n",
+ hdr->vpr_desc.vpr_start);
+ nvkm_debug(subdev, "\tvprEnd : 0x%x\n",
+ hdr->vpr_desc.vpr_end);
+ nvkm_debug(subdev, "\thdcpPolicies : 0x%x\n",
+ hdr->vpr_desc.hdcp_policies);
+}
+
+void
+flcn_acr_desc_v1_dump(struct nvkm_subdev *subdev, struct flcn_acr_desc_v1 *hdr)
+{
+ int i;
+
+ nvkm_debug(subdev, "acrDesc\n");
+ nvkm_debug(subdev, "\twprRegionId : %d\n", hdr->wpr_region_id);
+ nvkm_debug(subdev, "\twprOffset : 0x%x\n", hdr->wpr_offset);
+ nvkm_debug(subdev, "\tmmuMemoryRange : 0x%x\n",
+ hdr->mmu_memory_range);
+ nvkm_debug(subdev, "\tnoRegions : %d\n",
+ hdr->regions.no_regions);
+
+ for (i = 0; i < ARRAY_SIZE(hdr->regions.region_props); i++) {
+ nvkm_debug(subdev, "\tregion[%d] :\n", i);
+ nvkm_debug(subdev, "\t startAddr : 0x%x\n",
+ hdr->regions.region_props[i].start_addr);
+ nvkm_debug(subdev, "\t endAddr : 0x%x\n",
+ hdr->regions.region_props[i].end_addr);
+ nvkm_debug(subdev, "\t regionId : %d\n",
+ hdr->regions.region_props[i].region_id);
+ nvkm_debug(subdev, "\t readMask : 0x%x\n",
+ hdr->regions.region_props[i].read_mask);
+ nvkm_debug(subdev, "\t writeMask : 0x%x\n",
+ hdr->regions.region_props[i].write_mask);
+ nvkm_debug(subdev, "\t clientMask : 0x%x\n",
+ hdr->regions.region_props[i].client_mask);
+ nvkm_debug(subdev, "\t shadowMemStartAddr: 0x%x\n",
+ hdr->regions.region_props[i].shadow_mem_start_addr);
+ }
+
+ nvkm_debug(subdev, "\tucodeBlobSize : %d\n",
+ hdr->ucode_blob_size);
+ nvkm_debug(subdev, "\tucodeBlobBase : 0x%llx\n",
+ hdr->ucode_blob_base);
+ nvkm_debug(subdev, "\tvprEnabled : %d\n",
+ hdr->vpr_desc.vpr_enabled);
+ nvkm_debug(subdev, "\tvprStart : 0x%x\n",
+ hdr->vpr_desc.vpr_start);
+ nvkm_debug(subdev, "\tvprEnd : 0x%x\n",
+ hdr->vpr_desc.vpr_end);
+ nvkm_debug(subdev, "\thdcpPolicies : 0x%x\n",
+ hdr->vpr_desc.hdcp_policies);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/flcn.c b/drivers/gpu/drm/nouveau/nvkm/nvfw/flcn.c
new file mode 100644
index 000000000000..00ec764e1aab
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/flcn.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <core/subdev.h>
+#include <nvfw/flcn.h>
+
+void
+loader_config_dump(struct nvkm_subdev *subdev, const struct loader_config *hdr)
+{
+ nvkm_debug(subdev, "loaderConfig\n");
+ nvkm_debug(subdev, "\tdmaIdx : %d\n", hdr->dma_idx);
+ nvkm_debug(subdev, "\tcodeDmaBase : 0x%xx\n", hdr->code_dma_base);
+ nvkm_debug(subdev, "\tcodeSizeTotal : 0x%x\n", hdr->code_size_total);
+ nvkm_debug(subdev, "\tcodeSizeToLoad: 0x%x\n", hdr->code_size_to_load);
+ nvkm_debug(subdev, "\tcodeEntryPoint: 0x%x\n", hdr->code_entry_point);
+ nvkm_debug(subdev, "\tdataDmaBase : 0x%x\n", hdr->data_dma_base);
+ nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size);
+ nvkm_debug(subdev, "\toverlayDmaBase: 0x%x\n", hdr->overlay_dma_base);
+ nvkm_debug(subdev, "\targc : 0x%08x\n", hdr->argc);
+ nvkm_debug(subdev, "\targv : 0x%08x\n", hdr->argv);
+ nvkm_debug(subdev, "\tcodeDmaBase1 : 0x%x\n", hdr->code_dma_base1);
+ nvkm_debug(subdev, "\tdataDmaBase1 : 0x%x\n", hdr->data_dma_base1);
+ nvkm_debug(subdev, "\tovlyDmaBase1 : 0x%x\n", hdr->overlay_dma_base1);
+}
+
+void
+loader_config_v1_dump(struct nvkm_subdev *subdev,
+ const struct loader_config_v1 *hdr)
+{
+ nvkm_debug(subdev, "loaderConfig\n");
+ nvkm_debug(subdev, "\treserved : 0x%08x\n", hdr->reserved);
+ nvkm_debug(subdev, "\tdmaIdx : %d\n", hdr->dma_idx);
+ nvkm_debug(subdev, "\tcodeDmaBase : 0x%llxx\n", hdr->code_dma_base);
+ nvkm_debug(subdev, "\tcodeSizeTotal : 0x%x\n", hdr->code_size_total);
+ nvkm_debug(subdev, "\tcodeSizeToLoad: 0x%x\n", hdr->code_size_to_load);
+ nvkm_debug(subdev, "\tcodeEntryPoint: 0x%x\n", hdr->code_entry_point);
+ nvkm_debug(subdev, "\tdataDmaBase : 0x%llx\n", hdr->data_dma_base);
+ nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size);
+ nvkm_debug(subdev, "\toverlayDmaBase: 0x%llx\n", hdr->overlay_dma_base);
+ nvkm_debug(subdev, "\targc : 0x%08x\n", hdr->argc);
+ nvkm_debug(subdev, "\targv : 0x%08x\n", hdr->argv);
+}
+
+void
+flcn_bl_dmem_desc_dump(struct nvkm_subdev *subdev,
+ const struct flcn_bl_dmem_desc *hdr)
+{
+ nvkm_debug(subdev, "flcnBlDmemDesc\n");
+ nvkm_debug(subdev, "\treserved : 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ hdr->reserved[0], hdr->reserved[1], hdr->reserved[2],
+ hdr->reserved[3]);
+ nvkm_debug(subdev, "\tsignature : 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ hdr->signature[0], hdr->signature[1], hdr->signature[2],
+ hdr->signature[3]);
+ nvkm_debug(subdev, "\tctxDma : %d\n", hdr->ctx_dma);
+ nvkm_debug(subdev, "\tcodeDmaBase : 0x%x\n", hdr->code_dma_base);
+ nvkm_debug(subdev, "\tnonSecCodeOff : 0x%x\n", hdr->non_sec_code_off);
+ nvkm_debug(subdev, "\tnonSecCodeSize: 0x%x\n", hdr->non_sec_code_size);
+ nvkm_debug(subdev, "\tsecCodeOff : 0x%x\n", hdr->sec_code_off);
+ nvkm_debug(subdev, "\tsecCodeSize : 0x%x\n", hdr->sec_code_size);
+ nvkm_debug(subdev, "\tcodeEntryPoint: 0x%x\n", hdr->code_entry_point);
+ nvkm_debug(subdev, "\tdataDmaBase : 0x%x\n", hdr->data_dma_base);
+ nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size);
+ nvkm_debug(subdev, "\tcodeDmaBase1 : 0x%x\n", hdr->code_dma_base1);
+ nvkm_debug(subdev, "\tdataDmaBase1 : 0x%x\n", hdr->data_dma_base1);
+}
+
+void
+flcn_bl_dmem_desc_v1_dump(struct nvkm_subdev *subdev,
+ const struct flcn_bl_dmem_desc_v1 *hdr)
+{
+ nvkm_debug(subdev, "flcnBlDmemDesc\n");
+ nvkm_debug(subdev, "\treserved : 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ hdr->reserved[0], hdr->reserved[1], hdr->reserved[2],
+ hdr->reserved[3]);
+ nvkm_debug(subdev, "\tsignature : 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ hdr->signature[0], hdr->signature[1], hdr->signature[2],
+ hdr->signature[3]);
+ nvkm_debug(subdev, "\tctxDma : %d\n", hdr->ctx_dma);
+ nvkm_debug(subdev, "\tcodeDmaBase : 0x%llx\n", hdr->code_dma_base);
+ nvkm_debug(subdev, "\tnonSecCodeOff : 0x%x\n", hdr->non_sec_code_off);
+ nvkm_debug(subdev, "\tnonSecCodeSize: 0x%x\n", hdr->non_sec_code_size);
+ nvkm_debug(subdev, "\tsecCodeOff : 0x%x\n", hdr->sec_code_off);
+ nvkm_debug(subdev, "\tsecCodeSize : 0x%x\n", hdr->sec_code_size);
+ nvkm_debug(subdev, "\tcodeEntryPoint: 0x%x\n", hdr->code_entry_point);
+ nvkm_debug(subdev, "\tdataDmaBase : 0x%llx\n", hdr->data_dma_base);
+ nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size);
+}
+
+void
+flcn_bl_dmem_desc_v2_dump(struct nvkm_subdev *subdev,
+ const struct flcn_bl_dmem_desc_v2 *hdr)
+{
+ flcn_bl_dmem_desc_v1_dump(subdev, (void *)hdr);
+ nvkm_debug(subdev, "\targc : 0x%08x\n", hdr->argc);
+ nvkm_debug(subdev, "\targv : 0x%08x\n", hdr->argv);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/fw.c b/drivers/gpu/drm/nouveau/nvkm/nvfw/fw.c
new file mode 100644
index 000000000000..746803bd5318
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/fw.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <core/subdev.h>
+#include <nvfw/fw.h>
+
+const struct nvfw_bin_hdr *
+nvfw_bin_hdr(struct nvkm_subdev *subdev, const void *data)
+{
+ const struct nvfw_bin_hdr *hdr = data;
+ nvkm_debug(subdev, "binHdr:\n");
+ nvkm_debug(subdev, "\tbinMagic : 0x%08x\n", hdr->bin_magic);
+ nvkm_debug(subdev, "\tbinVer : %d\n", hdr->bin_ver);
+ nvkm_debug(subdev, "\tbinSize : %d\n", hdr->bin_size);
+ nvkm_debug(subdev, "\theaderOffset : 0x%x\n", hdr->header_offset);
+ nvkm_debug(subdev, "\tdataOffset : 0x%x\n", hdr->data_offset);
+ nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size);
+ return hdr;
+}
+
+const struct nvfw_bl_desc *
+nvfw_bl_desc(struct nvkm_subdev *subdev, const void *data)
+{
+ const struct nvfw_bl_desc *hdr = data;
+ nvkm_debug(subdev, "blDesc\n");
+ nvkm_debug(subdev, "\tstartTag : 0x%x\n", hdr->start_tag);
+ nvkm_debug(subdev, "\tdmemLoadOff : 0x%x\n", hdr->dmem_load_off);
+ nvkm_debug(subdev, "\tcodeOff : 0x%x\n", hdr->code_off);
+ nvkm_debug(subdev, "\tcodeSize : 0x%x\n", hdr->code_size);
+ nvkm_debug(subdev, "\tdataOff : 0x%x\n", hdr->data_off);
+ nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size);
+ return hdr;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/hs.c b/drivers/gpu/drm/nouveau/nvkm/nvfw/hs.c
new file mode 100644
index 000000000000..04ed77cb2eba
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/hs.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <core/subdev.h>
+#include <nvfw/hs.h>
+
+const struct nvfw_hs_header *
+nvfw_hs_header(struct nvkm_subdev *subdev, const void *data)
+{
+ const struct nvfw_hs_header *hdr = data;
+ nvkm_debug(subdev, "hsHeader:\n");
+ nvkm_debug(subdev, "\tsigDbgOffset : 0x%x\n", hdr->sig_dbg_offset);
+ nvkm_debug(subdev, "\tsigDbgSize : 0x%x\n", hdr->sig_dbg_size);
+ nvkm_debug(subdev, "\tsigProdOffset : 0x%x\n", hdr->sig_prod_offset);
+ nvkm_debug(subdev, "\tsigProdSize : 0x%x\n", hdr->sig_prod_size);
+ nvkm_debug(subdev, "\tpatchLoc : 0x%x\n", hdr->patch_loc);
+ nvkm_debug(subdev, "\tpatchSig : 0x%x\n", hdr->patch_sig);
+ nvkm_debug(subdev, "\thdrOffset : 0x%x\n", hdr->hdr_offset);
+ nvkm_debug(subdev, "\thdrSize : 0x%x\n", hdr->hdr_size);
+ return hdr;
+}
+
+const struct nvfw_hs_load_header *
+nvfw_hs_load_header(struct nvkm_subdev *subdev, const void *data)
+{
+ const struct nvfw_hs_load_header *hdr = data;
+ int i;
+
+ nvkm_debug(subdev, "hsLoadHeader:\n");
+ nvkm_debug(subdev, "\tnonSecCodeOff : 0x%x\n",
+ hdr->non_sec_code_off);
+ nvkm_debug(subdev, "\tnonSecCodeSize : 0x%x\n",
+ hdr->non_sec_code_size);
+ nvkm_debug(subdev, "\tdataDmaBase : 0x%x\n", hdr->data_dma_base);
+ nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size);
+ nvkm_debug(subdev, "\tnumApps : 0x%x\n", hdr->num_apps);
+ for (i = 0; i < hdr->num_apps; i++) {
+ nvkm_debug(subdev,
+ "\tApp[%d] : offset 0x%x size 0x%x\n", i,
+ hdr->apps[(i * 2) + 0], hdr->apps[(i * 2) + 1]);
+ }
+
+ return hdr;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/ls.c b/drivers/gpu/drm/nouveau/nvkm/nvfw/ls.c
new file mode 100644
index 000000000000..b847f281ce97
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/ls.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <core/subdev.h>
+#include <nvfw/ls.h>
+
+static void
+nvfw_ls_desc_head(struct nvkm_subdev *subdev,
+ const struct nvfw_ls_desc_head *hdr)
+{
+ char *date;
+
+ nvkm_debug(subdev, "lsUcodeImgDesc:\n");
+ nvkm_debug(subdev, "\tdescriptorSize : %d\n",
+ hdr->descriptor_size);
+ nvkm_debug(subdev, "\timageSize : %d\n", hdr->image_size);
+ nvkm_debug(subdev, "\ttoolsVersion : 0x%x\n",
+ hdr->tools_version);
+ nvkm_debug(subdev, "\tappVersion : 0x%x\n", hdr->app_version);
+
+ date = kstrndup(hdr->date, sizeof(hdr->date), GFP_KERNEL);
+ nvkm_debug(subdev, "\tdate : %s\n", date);
+ kfree(date);
+
+ nvkm_debug(subdev, "\tbootloaderStartOffset: 0x%x\n",
+ hdr->bootloader_start_offset);
+ nvkm_debug(subdev, "\tbootloaderSize : 0x%x\n",
+ hdr->bootloader_size);
+ nvkm_debug(subdev, "\tbootloaderImemOffset : 0x%x\n",
+ hdr->bootloader_imem_offset);
+ nvkm_debug(subdev, "\tbootloaderEntryPoint : 0x%x\n",
+ hdr->bootloader_entry_point);
+
+ nvkm_debug(subdev, "\tappStartOffset : 0x%x\n",
+ hdr->app_start_offset);
+ nvkm_debug(subdev, "\tappSize : 0x%x\n", hdr->app_size);
+ nvkm_debug(subdev, "\tappImemOffset : 0x%x\n",
+ hdr->app_imem_offset);
+ nvkm_debug(subdev, "\tappImemEntry : 0x%x\n",
+ hdr->app_imem_entry);
+ nvkm_debug(subdev, "\tappDmemOffset : 0x%x\n",
+ hdr->app_dmem_offset);
+ nvkm_debug(subdev, "\tappResidentCodeOffset: 0x%x\n",
+ hdr->app_resident_code_offset);
+ nvkm_debug(subdev, "\tappResidentCodeSize : 0x%x\n",
+ hdr->app_resident_code_size);
+ nvkm_debug(subdev, "\tappResidentDataOffset: 0x%x\n",
+ hdr->app_resident_data_offset);
+ nvkm_debug(subdev, "\tappResidentDataSize : 0x%x\n",
+ hdr->app_resident_data_size);
+}
+
+const struct nvfw_ls_desc *
+nvfw_ls_desc(struct nvkm_subdev *subdev, const void *data)
+{
+ const struct nvfw_ls_desc *hdr = data;
+ int i;
+
+ nvfw_ls_desc_head(subdev, &hdr->head);
+
+ nvkm_debug(subdev, "\tnbOverlays : %d\n", hdr->nb_overlays);
+ for (i = 0; i < ARRAY_SIZE(hdr->load_ovl); i++) {
+ nvkm_debug(subdev, "\tloadOvl[%d] : 0x%x %d\n", i,
+ hdr->load_ovl[i].start, hdr->load_ovl[i].size);
+ }
+ nvkm_debug(subdev, "\tcompressed : %d\n", hdr->compressed);
+
+ return hdr;
+}
+
+const struct nvfw_ls_desc_v1 *
+nvfw_ls_desc_v1(struct nvkm_subdev *subdev, const void *data)
+{
+ const struct nvfw_ls_desc_v1 *hdr = data;
+ int i;
+
+ nvfw_ls_desc_head(subdev, &hdr->head);
+
+ nvkm_debug(subdev, "\tnbImemOverlays : %d\n",
+ hdr->nb_imem_overlays);
+ nvkm_debug(subdev, "\tnbDmemOverlays : %d\n",
+ hdr->nb_imem_overlays);
+ for (i = 0; i < ARRAY_SIZE(hdr->load_ovl); i++) {
+ nvkm_debug(subdev, "\tloadOvl[%2d] : 0x%x %d\n", i,
+ hdr->load_ovl[i].start, hdr->load_ovl[i].size);
+ }
+ nvkm_debug(subdev, "\tcompressed : %d\n", hdr->compressed);
+
+ return hdr;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
index 4e136f3d7c28..fb4fff1222af 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: MIT
+include $(src)/nvkm/subdev/acr/Kbuild
include $(src)/nvkm/subdev/bar/Kbuild
include $(src)/nvkm/subdev/bios/Kbuild
include $(src)/nvkm/subdev/bus/Kbuild
@@ -19,7 +20,6 @@ include $(src)/nvkm/subdev/mmu/Kbuild
include $(src)/nvkm/subdev/mxm/Kbuild
include $(src)/nvkm/subdev/pci/Kbuild
include $(src)/nvkm/subdev/pmu/Kbuild
-include $(src)/nvkm/subdev/secboot/Kbuild
include $(src)/nvkm/subdev/therm/Kbuild
include $(src)/nvkm/subdev/timer/Kbuild
include $(src)/nvkm/subdev/top/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/Kbuild
new file mode 100644
index 000000000000..5b9f64a8957f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/Kbuild
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: MIT
+nvkm-y += nvkm/subdev/acr/base.o
+nvkm-y += nvkm/subdev/acr/hsfw.o
+nvkm-y += nvkm/subdev/acr/lsfw.o
+nvkm-y += nvkm/subdev/acr/gm200.o
+nvkm-y += nvkm/subdev/acr/gm20b.o
+nvkm-y += nvkm/subdev/acr/gp102.o
+nvkm-y += nvkm/subdev/acr/gp108.o
+nvkm-y += nvkm/subdev/acr/gp10b.o
+nvkm-y += nvkm/subdev/acr/tu102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
new file mode 100644
index 000000000000..8eb2a930a9b5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
@@ -0,0 +1,411 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/firmware.h>
+#include <core/memory.h>
+#include <subdev/mmu.h>
+
+static struct nvkm_acr_hsf *
+nvkm_acr_hsf_find(struct nvkm_acr *acr, const char *name)
+{
+ struct nvkm_acr_hsf *hsf;
+ list_for_each_entry(hsf, &acr->hsf, head) {
+ if (!strcmp(hsf->name, name))
+ return hsf;
+ }
+ return NULL;
+}
+
+int
+nvkm_acr_hsf_boot(struct nvkm_acr *acr, const char *name)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ struct nvkm_acr_hsf *hsf;
+ int ret;
+
+ hsf = nvkm_acr_hsf_find(acr, name);
+ if (!hsf)
+ return -EINVAL;
+
+ nvkm_debug(subdev, "executing %s binary\n", hsf->name);
+ ret = nvkm_falcon_get(hsf->falcon, subdev);
+ if (ret)
+ return ret;
+
+ ret = hsf->func->boot(acr, hsf);
+ nvkm_falcon_put(hsf->falcon, subdev);
+ if (ret) {
+ nvkm_error(subdev, "%s binary failed\n", hsf->name);
+ return ret;
+ }
+
+ nvkm_debug(subdev, "%s binary completed successfully\n", hsf->name);
+ return 0;
+}
+
+static void
+nvkm_acr_unload(struct nvkm_acr *acr)
+{
+ if (acr->done) {
+ nvkm_acr_hsf_boot(acr, "unload");
+ acr->done = false;
+ }
+}
+
+static int
+nvkm_acr_load(struct nvkm_acr *acr)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ struct nvkm_acr_lsf *lsf;
+ u64 start, limit;
+ int ret;
+
+ if (list_empty(&acr->lsf)) {
+ nvkm_debug(subdev, "No LSF(s) present.\n");
+ return 0;
+ }
+
+ ret = acr->func->init(acr);
+ if (ret)
+ return ret;
+
+ acr->func->wpr_check(acr, &start, &limit);
+
+ if (start != acr->wpr_start || limit != acr->wpr_end) {
+ nvkm_error(subdev, "WPR not configured as expected: "
+ "%016llx-%016llx vs %016llx-%016llx\n",
+ acr->wpr_start, acr->wpr_end, start, limit);
+ return -EIO;
+ }
+
+ acr->done = true;
+
+ list_for_each_entry(lsf, &acr->lsf, head) {
+ if (lsf->func->boot) {
+ ret = lsf->func->boot(lsf->falcon);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int
+nvkm_acr_reload(struct nvkm_acr *acr)
+{
+ nvkm_acr_unload(acr);
+ return nvkm_acr_load(acr);
+}
+
+static struct nvkm_acr_lsf *
+nvkm_acr_falcon(struct nvkm_device *device)
+{
+ struct nvkm_acr *acr = device->acr;
+ struct nvkm_acr_lsf *lsf;
+
+ if (acr) {
+ list_for_each_entry(lsf, &acr->lsf, head) {
+ if (lsf->func->bootstrap_falcon)
+ return lsf;
+ }
+ }
+
+ return NULL;
+}
+
+int
+nvkm_acr_bootstrap_falcons(struct nvkm_device *device, unsigned long mask)
+{
+ struct nvkm_acr_lsf *acrflcn = nvkm_acr_falcon(device);
+ struct nvkm_acr *acr = device->acr;
+ unsigned long id;
+
+ if (!acrflcn) {
+ int ret = nvkm_acr_reload(acr);
+ if (ret)
+ return ret;
+
+ return acr->done ? 0 : -EINVAL;
+ }
+
+ if (acrflcn->func->bootstrap_multiple_falcons) {
+ return acrflcn->func->
+ bootstrap_multiple_falcons(acrflcn->falcon, mask);
+ }
+
+ for_each_set_bit(id, &mask, NVKM_ACR_LSF_NUM) {
+ int ret = acrflcn->func->bootstrap_falcon(acrflcn->falcon, id);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+bool
+nvkm_acr_managed_falcon(struct nvkm_device *device, enum nvkm_acr_lsf_id id)
+{
+ struct nvkm_acr *acr = device->acr;
+ struct nvkm_acr_lsf *lsf;
+
+ if (acr) {
+ list_for_each_entry(lsf, &acr->lsf, head) {
+ if (lsf->id == id)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int
+nvkm_acr_fini(struct nvkm_subdev *subdev, bool suspend)
+{
+ nvkm_acr_unload(nvkm_acr(subdev));
+ return 0;
+}
+
+static int
+nvkm_acr_init(struct nvkm_subdev *subdev)
+{
+ if (!nvkm_acr_falcon(subdev->device))
+ return 0;
+
+ return nvkm_acr_load(nvkm_acr(subdev));
+}
+
+static void
+nvkm_acr_cleanup(struct nvkm_acr *acr)
+{
+ nvkm_acr_lsfw_del_all(acr);
+ nvkm_acr_hsfw_del_all(acr);
+ nvkm_firmware_put(acr->wpr_fw);
+ acr->wpr_fw = NULL;
+}
+
+static int
+nvkm_acr_oneinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_acr *acr = nvkm_acr(subdev);
+ struct nvkm_acr_hsfw *hsfw;
+ struct nvkm_acr_lsfw *lsfw, *lsft;
+ struct nvkm_acr_lsf *lsf;
+ u32 wpr_size = 0;
+ int ret, i;
+
+ if (list_empty(&acr->hsfw)) {
+ nvkm_debug(subdev, "No HSFW(s)\n");
+ nvkm_acr_cleanup(acr);
+ return 0;
+ }
+
+ /* Determine layout/size of WPR image up-front, as we need to know
+ * it to allocate memory before we begin constructing it.
+ */
+ list_for_each_entry_safe(lsfw, lsft, &acr->lsfw, head) {
+ /* Cull unknown falcons that are present in WPR image. */
+ if (acr->wpr_fw) {
+ if (!lsfw->func) {
+ nvkm_acr_lsfw_del(lsfw);
+ continue;
+ }
+
+ wpr_size = acr->wpr_fw->size;
+ }
+
+ /* Ensure we've fetched falcon configuration. */
+ ret = nvkm_falcon_get(lsfw->falcon, subdev);
+ if (ret)
+ return ret;
+
+ nvkm_falcon_put(lsfw->falcon, subdev);
+
+ if (!(lsf = kmalloc(sizeof(*lsf), GFP_KERNEL)))
+ return -ENOMEM;
+ lsf->func = lsfw->func;
+ lsf->falcon = lsfw->falcon;
+ lsf->id = lsfw->id;
+ list_add_tail(&lsf->head, &acr->lsf);
+ }
+
+ if (!acr->wpr_fw || acr->wpr_comp)
+ wpr_size = acr->func->wpr_layout(acr);
+
+ /* Allocate/Locate WPR + fill ucode blob pointer.
+ *
+ * dGPU: allocate WPR + shadow blob
+ * Tegra: locate WPR with regs, ensure size is sufficient,
+ * allocate ucode blob.
+ */
+ ret = acr->func->wpr_alloc(acr, wpr_size);
+ if (ret)
+ return ret;
+
+ nvkm_debug(subdev, "WPR region is from 0x%llx-0x%llx (shadow 0x%llx)\n",
+ acr->wpr_start, acr->wpr_end, acr->shadow_start);
+
+ /* Write WPR to ucode blob. */
+ nvkm_kmap(acr->wpr);
+ if (acr->wpr_fw && !acr->wpr_comp)
+ nvkm_wobj(acr->wpr, 0, acr->wpr_fw->data, acr->wpr_fw->size);
+
+ if (!acr->wpr_fw || acr->wpr_comp)
+ acr->func->wpr_build(acr, nvkm_acr_falcon(device));
+ acr->func->wpr_patch(acr, (s64)acr->wpr_start - acr->wpr_prev);
+
+ if (acr->wpr_fw && acr->wpr_comp) {
+ nvkm_kmap(acr->wpr);
+ for (i = 0; i < acr->wpr_fw->size; i += 4) {
+ u32 us = nvkm_ro32(acr->wpr, i);
+ u32 fw = ((u32 *)acr->wpr_fw->data)[i/4];
+ if (fw != us) {
+ nvkm_warn(subdev, "%08x: %08x %08x\n",
+ i, us, fw);
+ }
+ }
+ return -EINVAL;
+ }
+ nvkm_done(acr->wpr);
+
+ /* Allocate instance block for ACR-related stuff. */
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, true,
+ &acr->inst);
+ if (ret)
+ return ret;
+
+ ret = nvkm_vmm_new(device, 0, 0, NULL, 0, NULL, "acr", &acr->vmm);
+ if (ret)
+ return ret;
+
+ acr->vmm->debug = acr->subdev.debug;
+
+ ret = nvkm_vmm_join(acr->vmm, acr->inst);
+ if (ret)
+ return ret;
+
+ /* Load HS firmware blobs into ACR VMM. */
+ list_for_each_entry(hsfw, &acr->hsfw, head) {
+ nvkm_debug(subdev, "loading %s fw\n", hsfw->name);
+ ret = hsfw->func->load(acr, hsfw);
+ if (ret)
+ return ret;
+ }
+
+ /* Kill temporary data. */
+ nvkm_acr_cleanup(acr);
+ return 0;
+}
+
+static void *
+nvkm_acr_dtor(struct nvkm_subdev *subdev)
+{
+ struct nvkm_acr *acr = nvkm_acr(subdev);
+ struct nvkm_acr_hsf *hsf, *hst;
+ struct nvkm_acr_lsf *lsf, *lst;
+
+ list_for_each_entry_safe(hsf, hst, &acr->hsf, head) {
+ nvkm_vmm_put(acr->vmm, &hsf->vma);
+ nvkm_memory_unref(&hsf->ucode);
+ kfree(hsf->imem);
+ list_del(&hsf->head);
+ kfree(hsf);
+ }
+
+ nvkm_vmm_part(acr->vmm, acr->inst);
+ nvkm_vmm_unref(&acr->vmm);
+ nvkm_memory_unref(&acr->inst);
+
+ nvkm_memory_unref(&acr->wpr);
+
+ list_for_each_entry_safe(lsf, lst, &acr->lsf, head) {
+ list_del(&lsf->head);
+ kfree(lsf);
+ }
+
+ nvkm_acr_cleanup(acr);
+ return acr;
+}
+
+static const struct nvkm_subdev_func
+nvkm_acr = {
+ .dtor = nvkm_acr_dtor,
+ .oneinit = nvkm_acr_oneinit,
+ .init = nvkm_acr_init,
+ .fini = nvkm_acr_fini,
+};
+
+static int
+nvkm_acr_ctor_wpr(struct nvkm_acr *acr, int ver)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ struct nvkm_device *device = subdev->device;
+ int ret;
+
+ ret = nvkm_firmware_get(subdev, "acr/wpr", ver, &acr->wpr_fw);
+ if (ret < 0)
+ return ret;
+
+ /* Pre-add LSFs in the order they appear in the FW WPR image so that
+ * we're able to do a binary comparison with our own generator.
+ */
+ ret = acr->func->wpr_parse(acr);
+ if (ret)
+ return ret;
+
+ acr->wpr_comp = nvkm_boolopt(device->cfgopt, "NvAcrWprCompare", false);
+ acr->wpr_prev = nvkm_longopt(device->cfgopt, "NvAcrWprPrevAddr", 0);
+ return 0;
+}
+
+int
+nvkm_acr_new_(const struct nvkm_acr_fwif *fwif, struct nvkm_device *device,
+ int index, struct nvkm_acr **pacr)
+{
+ struct nvkm_acr *acr;
+ long wprfw;
+
+ if (!(acr = *pacr = kzalloc(sizeof(*acr), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_subdev_ctor(&nvkm_acr, device, index, &acr->subdev);
+ INIT_LIST_HEAD(&acr->hsfw);
+ INIT_LIST_HEAD(&acr->lsfw);
+ INIT_LIST_HEAD(&acr->hsf);
+ INIT_LIST_HEAD(&acr->lsf);
+
+ fwif = nvkm_firmware_load(&acr->subdev, fwif, "Acr", acr);
+ if (IS_ERR(fwif))
+ return PTR_ERR(fwif);
+
+ acr->func = fwif->func;
+
+ wprfw = nvkm_longopt(device->cfgopt, "NvAcrWpr", -1);
+ if (wprfw >= 0) {
+ int ret = nvkm_acr_ctor_wpr(acr, wprfw);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
new file mode 100644
index 000000000000..9a6394085cf0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
@@ -0,0 +1,470 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/falcon.h>
+#include <core/firmware.h>
+#include <core/memory.h>
+#include <subdev/mc.h>
+#include <subdev/mmu.h>
+#include <subdev/pmu.h>
+#include <subdev/timer.h>
+
+#include <nvfw/acr.h>
+#include <nvfw/flcn.h>
+
+int
+gm200_acr_init(struct nvkm_acr *acr)
+{
+ return nvkm_acr_hsf_boot(acr, "load");
+}
+
+void
+gm200_acr_wpr_check(struct nvkm_acr *acr, u64 *start, u64 *limit)
+{
+ struct nvkm_device *device = acr->subdev.device;
+
+ nvkm_wr32(device, 0x100cd4, 2);
+ *start = (u64)(nvkm_rd32(device, 0x100cd4) & 0xffffff00) << 8;
+ nvkm_wr32(device, 0x100cd4, 3);
+ *limit = (u64)(nvkm_rd32(device, 0x100cd4) & 0xffffff00) << 8;
+ *limit = *limit + 0x20000;
+}
+
+void
+gm200_acr_wpr_patch(struct nvkm_acr *acr, s64 adjust)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ struct wpr_header hdr;
+ struct lsb_header lsb;
+ struct nvkm_acr_lsf *lsfw;
+ u32 offset = 0;
+
+ do {
+ nvkm_robj(acr->wpr, offset, &hdr, sizeof(hdr));
+ wpr_header_dump(subdev, &hdr);
+
+ list_for_each_entry(lsfw, &acr->lsfw, head) {
+ if (lsfw->id != hdr.falcon_id)
+ continue;
+
+ nvkm_robj(acr->wpr, hdr.lsb_offset, &lsb, sizeof(lsb));
+ lsb_header_dump(subdev, &lsb);
+
+ lsfw->func->bld_patch(acr, lsb.tail.bl_data_off, adjust);
+ break;
+ }
+ offset += sizeof(hdr);
+ } while (hdr.falcon_id != WPR_HEADER_V0_FALCON_ID_INVALID);
+}
+
+void
+gm200_acr_wpr_build_lsb_tail(struct nvkm_acr_lsfw *lsfw,
+ struct lsb_header_tail *hdr)
+{
+ hdr->ucode_off = lsfw->offset.img;
+ hdr->ucode_size = lsfw->ucode_size;
+ hdr->data_size = lsfw->data_size;
+ hdr->bl_code_size = lsfw->bootloader_size;
+ hdr->bl_imem_off = lsfw->bootloader_imem_offset;
+ hdr->bl_data_off = lsfw->offset.bld;
+ hdr->bl_data_size = lsfw->bl_data_size;
+ hdr->app_code_off = lsfw->app_start_offset +
+ lsfw->app_resident_code_offset;
+ hdr->app_code_size = lsfw->app_resident_code_size;
+ hdr->app_data_off = lsfw->app_start_offset +
+ lsfw->app_resident_data_offset;
+ hdr->app_data_size = lsfw->app_resident_data_size;
+ hdr->flags = lsfw->func->flags;
+}
+
+static int
+gm200_acr_wpr_build_lsb(struct nvkm_acr *acr, struct nvkm_acr_lsfw *lsfw)
+{
+ struct lsb_header hdr;
+
+ if (WARN_ON(lsfw->sig->size != sizeof(hdr.signature)))
+ return -EINVAL;
+
+ memcpy(&hdr.signature, lsfw->sig->data, lsfw->sig->size);
+ gm200_acr_wpr_build_lsb_tail(lsfw, &hdr.tail);
+
+ nvkm_wobj(acr->wpr, lsfw->offset.lsb, &hdr, sizeof(hdr));
+ return 0;
+}
+
+int
+gm200_acr_wpr_build(struct nvkm_acr *acr, struct nvkm_acr_lsf *rtos)
+{
+ struct nvkm_acr_lsfw *lsfw;
+ u32 offset = 0;
+ int ret;
+
+ /* Fill per-LSF structures. */
+ list_for_each_entry(lsfw, &acr->lsfw, head) {
+ struct wpr_header hdr = {
+ .falcon_id = lsfw->id,
+ .lsb_offset = lsfw->offset.lsb,
+ .bootstrap_owner = NVKM_ACR_LSF_PMU,
+ .lazy_bootstrap = rtos && lsfw->id != rtos->id,
+ .status = WPR_HEADER_V0_STATUS_COPY,
+ };
+
+ /* Write WPR header. */
+ nvkm_wobj(acr->wpr, offset, &hdr, sizeof(hdr));
+ offset += sizeof(hdr);
+
+ /* Write LSB header. */
+ ret = gm200_acr_wpr_build_lsb(acr, lsfw);
+ if (ret)
+ return ret;
+
+ /* Write ucode image. */
+ nvkm_wobj(acr->wpr, lsfw->offset.img,
+ lsfw->img.data,
+ lsfw->img.size);
+
+ /* Write bootloader data. */
+ lsfw->func->bld_write(acr, lsfw->offset.bld, lsfw);
+ }
+
+ /* Finalise WPR. */
+ nvkm_wo32(acr->wpr, offset, WPR_HEADER_V0_FALCON_ID_INVALID);
+ return 0;
+}
+
+static int
+gm200_acr_wpr_alloc(struct nvkm_acr *acr, u32 wpr_size)
+{
+ int ret = nvkm_memory_new(acr->subdev.device, NVKM_MEM_TARGET_INST,
+ ALIGN(wpr_size, 0x40000), 0x40000, true,
+ &acr->wpr);
+ if (ret)
+ return ret;
+
+ acr->wpr_start = nvkm_memory_addr(acr->wpr);
+ acr->wpr_end = acr->wpr_start + nvkm_memory_size(acr->wpr);
+ return 0;
+}
+
+u32
+gm200_acr_wpr_layout(struct nvkm_acr *acr)
+{
+ struct nvkm_acr_lsfw *lsfw;
+ u32 wpr = 0;
+
+ wpr += 11 /* MAX_LSF */ * sizeof(struct wpr_header);
+
+ list_for_each_entry(lsfw, &acr->lsfw, head) {
+ wpr = ALIGN(wpr, 256);
+ lsfw->offset.lsb = wpr;
+ wpr += sizeof(struct lsb_header);
+
+ wpr = ALIGN(wpr, 4096);
+ lsfw->offset.img = wpr;
+ wpr += lsfw->img.size;
+
+ wpr = ALIGN(wpr, 256);
+ lsfw->offset.bld = wpr;
+ lsfw->bl_data_size = ALIGN(lsfw->func->bld_size, 256);
+ wpr += lsfw->bl_data_size;
+ }
+
+ return wpr;
+}
+
+int
+gm200_acr_wpr_parse(struct nvkm_acr *acr)
+{
+ const struct wpr_header *hdr = (void *)acr->wpr_fw->data;
+
+ while (hdr->falcon_id != WPR_HEADER_V0_FALCON_ID_INVALID) {
+ wpr_header_dump(&acr->subdev, hdr);
+ if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void
+gm200_acr_hsfw_bld(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf)
+{
+ struct flcn_bl_dmem_desc_v1 hsdesc = {
+ .ctx_dma = FALCON_DMAIDX_VIRT,
+ .code_dma_base = hsf->vma->addr,
+ .non_sec_code_off = hsf->non_sec_addr,
+ .non_sec_code_size = hsf->non_sec_size,
+ .sec_code_off = hsf->sec_addr,
+ .sec_code_size = hsf->sec_size,
+ .code_entry_point = 0,
+ .data_dma_base = hsf->vma->addr + hsf->data_addr,
+ .data_size = hsf->data_size,
+ };
+
+ flcn_bl_dmem_desc_v1_dump(&acr->subdev, &hsdesc);
+
+ nvkm_falcon_load_dmem(hsf->falcon, &hsdesc, 0, sizeof(hsdesc), 0);
+}
+
+int
+gm200_acr_hsfw_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf,
+ u32 intr_clear, u32 mbox0_ok)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_falcon *falcon = hsf->falcon;
+ u32 mbox0, mbox1;
+ int ret;
+
+ /* Reset falcon. */
+ nvkm_falcon_reset(falcon);
+ nvkm_falcon_bind_context(falcon, acr->inst);
+
+ /* Load bootloader into IMEM. */
+ nvkm_falcon_load_imem(falcon, hsf->imem,
+ falcon->code.limit - hsf->imem_size,
+ hsf->imem_size,
+ hsf->imem_tag,
+ 0, false);
+
+ /* Load bootloader data into DMEM. */
+ hsf->func->bld(acr, hsf);
+
+ /* Boot the falcon. */
+ nvkm_mc_intr_mask(device, falcon->owner->index, false);
+
+ nvkm_falcon_wr32(falcon, 0x040, 0xdeada5a5);
+ nvkm_falcon_set_start_addr(falcon, hsf->imem_tag << 8);
+ nvkm_falcon_start(falcon);
+ ret = nvkm_falcon_wait_for_halt(falcon, 100);
+ if (ret)
+ return ret;
+
+ /* Check for successful completion. */
+ mbox0 = nvkm_falcon_rd32(falcon, 0x040);
+ mbox1 = nvkm_falcon_rd32(falcon, 0x044);
+ nvkm_debug(subdev, "mailbox %08x %08x\n", mbox0, mbox1);
+ if (mbox0 && mbox0 != mbox0_ok)
+ return -EIO;
+
+ nvkm_falcon_clear_interrupt(falcon, intr_clear);
+ nvkm_mc_intr_mask(device, falcon->owner->index, true);
+ return ret;
+}
+
+int
+gm200_acr_hsfw_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw,
+ struct nvkm_falcon *falcon)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ struct nvkm_acr_hsf *hsf;
+ int ret;
+
+ /* Patch the appropriate signature (production/debug) into the FW
+ * image, as determined by the mode the falcon is in.
+ */
+ ret = nvkm_falcon_get(falcon, subdev);
+ if (ret)
+ return ret;
+
+ if (hsfw->sig.patch_loc) {
+ if (!falcon->debug) {
+ nvkm_debug(subdev, "patching production signature\n");
+ memcpy(hsfw->image + hsfw->sig.patch_loc,
+ hsfw->sig.prod.data,
+ hsfw->sig.prod.size);
+ } else {
+ nvkm_debug(subdev, "patching debug signature\n");
+ memcpy(hsfw->image + hsfw->sig.patch_loc,
+ hsfw->sig.dbg.data,
+ hsfw->sig.dbg.size);
+ }
+ }
+
+ nvkm_falcon_put(falcon, subdev);
+
+ if (!(hsf = kzalloc(sizeof(*hsf), GFP_KERNEL)))
+ return -ENOMEM;
+ hsf->func = hsfw->func;
+ hsf->name = hsfw->name;
+ list_add_tail(&hsf->head, &acr->hsf);
+
+ hsf->imem_size = hsfw->imem_size;
+ hsf->imem_tag = hsfw->imem_tag;
+ hsf->imem = kmemdup(hsfw->imem, hsfw->imem_size, GFP_KERNEL);
+ if (!hsf->imem)
+ return -ENOMEM;
+
+ hsf->non_sec_addr = hsfw->non_sec_addr;
+ hsf->non_sec_size = hsfw->non_sec_size;
+ hsf->sec_addr = hsfw->sec_addr;
+ hsf->sec_size = hsfw->sec_size;
+ hsf->data_addr = hsfw->data_addr;
+ hsf->data_size = hsfw->data_size;
+
+ /* Make the FW image accessible to the HS bootloader. */
+ ret = nvkm_memory_new(subdev->device, NVKM_MEM_TARGET_INST,
+ hsfw->image_size, 0x1000, false, &hsf->ucode);
+ if (ret)
+ return ret;
+
+ nvkm_kmap(hsf->ucode);
+ nvkm_wobj(hsf->ucode, 0, hsfw->image, hsfw->image_size);
+ nvkm_done(hsf->ucode);
+
+ ret = nvkm_vmm_get(acr->vmm, 12, nvkm_memory_size(hsf->ucode),
+ &hsf->vma);
+ if (ret)
+ return ret;
+
+ ret = nvkm_memory_map(hsf->ucode, 0, acr->vmm, hsf->vma, NULL, 0);
+ if (ret)
+ return ret;
+
+ hsf->falcon = falcon;
+ return 0;
+}
+
+int
+gm200_acr_unload_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf)
+{
+ return gm200_acr_hsfw_boot(acr, hsf, 0, 0x1d);
+}
+
+int
+gm200_acr_unload_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw)
+{
+ return gm200_acr_hsfw_load(acr, hsfw, &acr->subdev.device->pmu->falcon);
+}
+
+const struct nvkm_acr_hsf_func
+gm200_acr_unload_0 = {
+ .load = gm200_acr_unload_load,
+ .boot = gm200_acr_unload_boot,
+ .bld = gm200_acr_hsfw_bld,
+};
+
+MODULE_FIRMWARE("nvidia/gm200/acr/ucode_unload.bin");
+MODULE_FIRMWARE("nvidia/gm204/acr/ucode_unload.bin");
+MODULE_FIRMWARE("nvidia/gm206/acr/ucode_unload.bin");
+MODULE_FIRMWARE("nvidia/gp100/acr/ucode_unload.bin");
+
+static const struct nvkm_acr_hsf_fwif
+gm200_acr_unload_fwif[] = {
+ { 0, nvkm_acr_hsfw_load, &gm200_acr_unload_0 },
+ {}
+};
+
+int
+gm200_acr_load_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf)
+{
+ return gm200_acr_hsfw_boot(acr, hsf, 0x10, 0);
+}
+
+static int
+gm200_acr_load_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw)
+{
+ struct flcn_acr_desc *desc = (void *)&hsfw->image[hsfw->data_addr];
+
+ desc->wpr_region_id = 1;
+ desc->regions.no_regions = 2;
+ desc->regions.region_props[0].start_addr = acr->wpr_start >> 8;
+ desc->regions.region_props[0].end_addr = acr->wpr_end >> 8;
+ desc->regions.region_props[0].region_id = 1;
+ desc->regions.region_props[0].read_mask = 0xf;
+ desc->regions.region_props[0].write_mask = 0xc;
+ desc->regions.region_props[0].client_mask = 0x2;
+ flcn_acr_desc_dump(&acr->subdev, desc);
+
+ return gm200_acr_hsfw_load(acr, hsfw, &acr->subdev.device->pmu->falcon);
+}
+
+static const struct nvkm_acr_hsf_func
+gm200_acr_load_0 = {
+ .load = gm200_acr_load_load,
+ .boot = gm200_acr_load_boot,
+ .bld = gm200_acr_hsfw_bld,
+};
+
+MODULE_FIRMWARE("nvidia/gm200/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gm200/acr/ucode_load.bin");
+
+MODULE_FIRMWARE("nvidia/gm204/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gm204/acr/ucode_load.bin");
+
+MODULE_FIRMWARE("nvidia/gm206/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gm206/acr/ucode_load.bin");
+
+MODULE_FIRMWARE("nvidia/gp100/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp100/acr/ucode_load.bin");
+
+static const struct nvkm_acr_hsf_fwif
+gm200_acr_load_fwif[] = {
+ { 0, nvkm_acr_hsfw_load, &gm200_acr_load_0 },
+ {}
+};
+
+static const struct nvkm_acr_func
+gm200_acr = {
+ .load = gm200_acr_load_fwif,
+ .unload = gm200_acr_unload_fwif,
+ .wpr_parse = gm200_acr_wpr_parse,
+ .wpr_layout = gm200_acr_wpr_layout,
+ .wpr_alloc = gm200_acr_wpr_alloc,
+ .wpr_build = gm200_acr_wpr_build,
+ .wpr_patch = gm200_acr_wpr_patch,
+ .wpr_check = gm200_acr_wpr_check,
+ .init = gm200_acr_init,
+};
+
+static int
+gm200_acr_load(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ const struct nvkm_acr_hsf_fwif *hsfwif;
+
+ hsfwif = nvkm_firmware_load(subdev, fwif->func->load, "AcrLoad",
+ acr, "acr/bl", "acr/ucode_load", "load");
+ if (IS_ERR(hsfwif))
+ return PTR_ERR(hsfwif);
+
+ hsfwif = nvkm_firmware_load(subdev, fwif->func->unload, "AcrUnload",
+ acr, "acr/bl", "acr/ucode_unload",
+ "unload");
+ if (IS_ERR(hsfwif))
+ return PTR_ERR(hsfwif);
+
+ return 0;
+}
+
+static const struct nvkm_acr_fwif
+gm200_acr_fwif[] = {
+ { 0, gm200_acr_load, &gm200_acr },
+ {}
+};
+
+int
+gm200_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+{
+ return nvkm_acr_new_(gm200_acr_fwif, device, index, pacr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c
new file mode 100644
index 000000000000..034a6ede70c7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/firmware.h>
+#include <core/memory.h>
+#include <subdev/mmu.h>
+#include <subdev/pmu.h>
+
+#include <nvfw/acr.h>
+#include <nvfw/flcn.h>
+
+int
+gm20b_acr_wpr_alloc(struct nvkm_acr *acr, u32 wpr_size)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+
+ acr->func->wpr_check(acr, &acr->wpr_start, &acr->wpr_end);
+
+ if ((acr->wpr_end - acr->wpr_start) < wpr_size) {
+ nvkm_error(subdev, "WPR image too big for WPR!\n");
+ return -ENOSPC;
+ }
+
+ return nvkm_memory_new(subdev->device, NVKM_MEM_TARGET_INST,
+ wpr_size, 0, true, &acr->wpr);
+}
+
+static void
+gm20b_acr_load_bld(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf)
+{
+ struct flcn_bl_dmem_desc hsdesc = {
+ .ctx_dma = FALCON_DMAIDX_VIRT,
+ .code_dma_base = hsf->vma->addr >> 8,
+ .non_sec_code_off = hsf->non_sec_addr,
+ .non_sec_code_size = hsf->non_sec_size,
+ .sec_code_off = hsf->sec_addr,
+ .sec_code_size = hsf->sec_size,
+ .code_entry_point = 0,
+ .data_dma_base = (hsf->vma->addr + hsf->data_addr) >> 8,
+ .data_size = hsf->data_size,
+ };
+
+ flcn_bl_dmem_desc_dump(&acr->subdev, &hsdesc);
+
+ nvkm_falcon_load_dmem(hsf->falcon, &hsdesc, 0, sizeof(hsdesc), 0);
+}
+
+static int
+gm20b_acr_load_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw)
+{
+ struct flcn_acr_desc *desc = (void *)&hsfw->image[hsfw->data_addr];
+
+ desc->ucode_blob_base = nvkm_memory_addr(acr->wpr);
+ desc->ucode_blob_size = nvkm_memory_size(acr->wpr);
+ flcn_acr_desc_dump(&acr->subdev, desc);
+
+ return gm200_acr_hsfw_load(acr, hsfw, &acr->subdev.device->pmu->falcon);
+}
+
+const struct nvkm_acr_hsf_func
+gm20b_acr_load_0 = {
+ .load = gm20b_acr_load_load,
+ .boot = gm200_acr_load_boot,
+ .bld = gm20b_acr_load_bld,
+};
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+MODULE_FIRMWARE("nvidia/gm20b/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gm20b/acr/ucode_load.bin");
+#endif
+
+static const struct nvkm_acr_hsf_fwif
+gm20b_acr_load_fwif[] = {
+ { 0, nvkm_acr_hsfw_load, &gm20b_acr_load_0 },
+ {}
+};
+
+static const struct nvkm_acr_func
+gm20b_acr = {
+ .load = gm20b_acr_load_fwif,
+ .wpr_parse = gm200_acr_wpr_parse,
+ .wpr_layout = gm200_acr_wpr_layout,
+ .wpr_alloc = gm20b_acr_wpr_alloc,
+ .wpr_build = gm200_acr_wpr_build,
+ .wpr_patch = gm200_acr_wpr_patch,
+ .wpr_check = gm200_acr_wpr_check,
+ .init = gm200_acr_init,
+};
+
+int
+gm20b_acr_load(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ const struct nvkm_acr_hsf_fwif *hsfwif;
+
+ hsfwif = nvkm_firmware_load(subdev, fwif->func->load, "AcrLoad",
+ acr, "acr/bl", "acr/ucode_load", "load");
+ if (IS_ERR(hsfwif))
+ return PTR_ERR(hsfwif);
+
+ return 0;
+}
+
+static const struct nvkm_acr_fwif
+gm20b_acr_fwif[] = {
+ { 0, gm20b_acr_load, &gm20b_acr },
+ {}
+};
+
+int
+gm20b_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+{
+ return nvkm_acr_new_(gm20b_acr_fwif, device, index, pacr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c
new file mode 100644
index 000000000000..49e11c46d525
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c
@@ -0,0 +1,281 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/firmware.h>
+#include <core/memory.h>
+#include <subdev/mmu.h>
+#include <engine/sec2.h>
+
+#include <nvfw/acr.h>
+#include <nvfw/flcn.h>
+
+void
+gp102_acr_wpr_patch(struct nvkm_acr *acr, s64 adjust)
+{
+ struct wpr_header_v1 hdr;
+ struct lsb_header_v1 lsb;
+ struct nvkm_acr_lsfw *lsfw;
+ u32 offset = 0;
+
+ do {
+ nvkm_robj(acr->wpr, offset, &hdr, sizeof(hdr));
+ wpr_header_v1_dump(&acr->subdev, &hdr);
+
+ list_for_each_entry(lsfw, &acr->lsfw, head) {
+ if (lsfw->id != hdr.falcon_id)
+ continue;
+
+ nvkm_robj(acr->wpr, hdr.lsb_offset, &lsb, sizeof(lsb));
+ lsb_header_v1_dump(&acr->subdev, &lsb);
+
+ lsfw->func->bld_patch(acr, lsb.tail.bl_data_off, adjust);
+ break;
+ }
+
+ offset += sizeof(hdr);
+ } while (hdr.falcon_id != WPR_HEADER_V1_FALCON_ID_INVALID);
+}
+
+int
+gp102_acr_wpr_build_lsb(struct nvkm_acr *acr, struct nvkm_acr_lsfw *lsfw)
+{
+ struct lsb_header_v1 hdr;
+
+ if (WARN_ON(lsfw->sig->size != sizeof(hdr.signature)))
+ return -EINVAL;
+
+ memcpy(&hdr.signature, lsfw->sig->data, lsfw->sig->size);
+ gm200_acr_wpr_build_lsb_tail(lsfw, &hdr.tail);
+
+ nvkm_wobj(acr->wpr, lsfw->offset.lsb, &hdr, sizeof(hdr));
+ return 0;
+}
+
+int
+gp102_acr_wpr_build(struct nvkm_acr *acr, struct nvkm_acr_lsf *rtos)
+{
+ struct nvkm_acr_lsfw *lsfw;
+ u32 offset = 0;
+ int ret;
+
+ /* Fill per-LSF structures. */
+ list_for_each_entry(lsfw, &acr->lsfw, head) {
+ struct lsf_signature_v1 *sig = (void *)lsfw->sig->data;
+ struct wpr_header_v1 hdr = {
+ .falcon_id = lsfw->id,
+ .lsb_offset = lsfw->offset.lsb,
+ .bootstrap_owner = NVKM_ACR_LSF_SEC2,
+ .lazy_bootstrap = rtos && lsfw->id != rtos->id,
+ .bin_version = sig->version,
+ .status = WPR_HEADER_V1_STATUS_COPY,
+ };
+
+ /* Write WPR header. */
+ nvkm_wobj(acr->wpr, offset, &hdr, sizeof(hdr));
+ offset += sizeof(hdr);
+
+ /* Write LSB header. */
+ ret = gp102_acr_wpr_build_lsb(acr, lsfw);
+ if (ret)
+ return ret;
+
+ /* Write ucode image. */
+ nvkm_wobj(acr->wpr, lsfw->offset.img,
+ lsfw->img.data,
+ lsfw->img.size);
+
+ /* Write bootloader data. */
+ lsfw->func->bld_write(acr, lsfw->offset.bld, lsfw);
+ }
+
+ /* Finalise WPR. */
+ nvkm_wo32(acr->wpr, offset, WPR_HEADER_V1_FALCON_ID_INVALID);
+ return 0;
+}
+
+int
+gp102_acr_wpr_alloc(struct nvkm_acr *acr, u32 wpr_size)
+{
+ int ret = nvkm_memory_new(acr->subdev.device, NVKM_MEM_TARGET_INST,
+ ALIGN(wpr_size, 0x40000) << 1, 0x40000, true,
+ &acr->wpr);
+ if (ret)
+ return ret;
+
+ acr->shadow_start = nvkm_memory_addr(acr->wpr);
+ acr->wpr_start = acr->shadow_start + (nvkm_memory_size(acr->wpr) >> 1);
+ acr->wpr_end = acr->wpr_start + (nvkm_memory_size(acr->wpr) >> 1);
+ return 0;
+}
+
+u32
+gp102_acr_wpr_layout(struct nvkm_acr *acr)
+{
+ struct nvkm_acr_lsfw *lsfw;
+ u32 wpr = 0;
+
+ wpr += 11 /* MAX_LSF */ * sizeof(struct wpr_header_v1);
+ wpr = ALIGN(wpr, 256);
+
+ wpr += 0x100; /* Shared sub-WPR headers. */
+
+ list_for_each_entry(lsfw, &acr->lsfw, head) {
+ wpr = ALIGN(wpr, 256);
+ lsfw->offset.lsb = wpr;
+ wpr += sizeof(struct lsb_header_v1);
+
+ wpr = ALIGN(wpr, 4096);
+ lsfw->offset.img = wpr;
+ wpr += lsfw->img.size;
+
+ wpr = ALIGN(wpr, 256);
+ lsfw->offset.bld = wpr;
+ lsfw->bl_data_size = ALIGN(lsfw->func->bld_size, 256);
+ wpr += lsfw->bl_data_size;
+ }
+
+ return wpr;
+}
+
+int
+gp102_acr_wpr_parse(struct nvkm_acr *acr)
+{
+ const struct wpr_header_v1 *hdr = (void *)acr->wpr_fw->data;
+
+ while (hdr->falcon_id != WPR_HEADER_V1_FALCON_ID_INVALID) {
+ wpr_header_v1_dump(&acr->subdev, hdr);
+ if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+MODULE_FIRMWARE("nvidia/gp102/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/gp102/acr/ucode_unload.bin");
+
+MODULE_FIRMWARE("nvidia/gp104/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/gp104/acr/ucode_unload.bin");
+
+MODULE_FIRMWARE("nvidia/gp106/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/gp106/acr/ucode_unload.bin");
+
+MODULE_FIRMWARE("nvidia/gp107/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/gp107/acr/ucode_unload.bin");
+
+static const struct nvkm_acr_hsf_fwif
+gp102_acr_unload_fwif[] = {
+ { 0, nvkm_acr_hsfw_load, &gm200_acr_unload_0 },
+ {}
+};
+
+int
+gp102_acr_load_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw)
+{
+ struct flcn_acr_desc_v1 *desc = (void *)&hsfw->image[hsfw->data_addr];
+
+ desc->wpr_region_id = 1;
+ desc->regions.no_regions = 2;
+ desc->regions.region_props[0].start_addr = acr->wpr_start >> 8;
+ desc->regions.region_props[0].end_addr = acr->wpr_end >> 8;
+ desc->regions.region_props[0].region_id = 1;
+ desc->regions.region_props[0].read_mask = 0xf;
+ desc->regions.region_props[0].write_mask = 0xc;
+ desc->regions.region_props[0].client_mask = 0x2;
+ desc->regions.region_props[0].shadow_mem_start_addr =
+ acr->shadow_start >> 8;
+ flcn_acr_desc_v1_dump(&acr->subdev, desc);
+
+ return gm200_acr_hsfw_load(acr, hsfw,
+ &acr->subdev.device->sec2->falcon);
+}
+
+static const struct nvkm_acr_hsf_func
+gp102_acr_load_0 = {
+ .load = gp102_acr_load_load,
+ .boot = gm200_acr_load_boot,
+ .bld = gm200_acr_hsfw_bld,
+};
+
+MODULE_FIRMWARE("nvidia/gp102/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp102/acr/ucode_load.bin");
+
+MODULE_FIRMWARE("nvidia/gp104/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp104/acr/ucode_load.bin");
+
+MODULE_FIRMWARE("nvidia/gp106/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp106/acr/ucode_load.bin");
+
+MODULE_FIRMWARE("nvidia/gp107/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp107/acr/ucode_load.bin");
+
+static const struct nvkm_acr_hsf_fwif
+gp102_acr_load_fwif[] = {
+ { 0, nvkm_acr_hsfw_load, &gp102_acr_load_0 },
+ {}
+};
+
+static const struct nvkm_acr_func
+gp102_acr = {
+ .load = gp102_acr_load_fwif,
+ .unload = gp102_acr_unload_fwif,
+ .wpr_parse = gp102_acr_wpr_parse,
+ .wpr_layout = gp102_acr_wpr_layout,
+ .wpr_alloc = gp102_acr_wpr_alloc,
+ .wpr_build = gp102_acr_wpr_build,
+ .wpr_patch = gp102_acr_wpr_patch,
+ .wpr_check = gm200_acr_wpr_check,
+ .init = gm200_acr_init,
+};
+
+int
+gp102_acr_load(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ const struct nvkm_acr_hsf_fwif *hsfwif;
+
+ hsfwif = nvkm_firmware_load(subdev, fwif->func->load, "AcrLoad",
+ acr, "acr/bl", "acr/ucode_load", "load");
+ if (IS_ERR(hsfwif))
+ return PTR_ERR(hsfwif);
+
+ hsfwif = nvkm_firmware_load(subdev, fwif->func->unload, "AcrUnload",
+ acr, "acr/unload_bl", "acr/ucode_unload",
+ "unload");
+ if (IS_ERR(hsfwif))
+ return PTR_ERR(hsfwif);
+
+ return 0;
+}
+
+static const struct nvkm_acr_fwif
+gp102_acr_fwif[] = {
+ { 0, gp102_acr_load, &gp102_acr },
+ {}
+};
+
+int
+gp102_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+{
+ return nvkm_acr_new_(gp102_acr_fwif, device, index, pacr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c
new file mode 100644
index 000000000000..f10dc9112678
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/mmu.h>
+
+#include <nvfw/flcn.h>
+
+void
+gp108_acr_hsfw_bld(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf)
+{
+ struct flcn_bl_dmem_desc_v2 hsdesc = {
+ .ctx_dma = FALCON_DMAIDX_VIRT,
+ .code_dma_base = hsf->vma->addr,
+ .non_sec_code_off = hsf->non_sec_addr,
+ .non_sec_code_size = hsf->non_sec_size,
+ .sec_code_off = hsf->sec_addr,
+ .sec_code_size = hsf->sec_size,
+ .code_entry_point = 0,
+ .data_dma_base = hsf->vma->addr + hsf->data_addr,
+ .data_size = hsf->data_size,
+ .argc = 0,
+ .argv = 0,
+ };
+
+ flcn_bl_dmem_desc_v2_dump(&acr->subdev, &hsdesc);
+
+ nvkm_falcon_load_dmem(hsf->falcon, &hsdesc, 0, sizeof(hsdesc), 0);
+}
+
+const struct nvkm_acr_hsf_func
+gp108_acr_unload_0 = {
+ .load = gm200_acr_unload_load,
+ .boot = gm200_acr_unload_boot,
+ .bld = gp108_acr_hsfw_bld,
+};
+
+MODULE_FIRMWARE("nvidia/gp108/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/gp108/acr/ucode_unload.bin");
+
+MODULE_FIRMWARE("nvidia/gv100/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/gv100/acr/ucode_unload.bin");
+
+static const struct nvkm_acr_hsf_fwif
+gp108_acr_unload_fwif[] = {
+ { 0, nvkm_acr_hsfw_load, &gp108_acr_unload_0 },
+ {}
+};
+
+static const struct nvkm_acr_hsf_func
+gp108_acr_load_0 = {
+ .load = gp102_acr_load_load,
+ .boot = gm200_acr_load_boot,
+ .bld = gp108_acr_hsfw_bld,
+};
+
+MODULE_FIRMWARE("nvidia/gp108/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp108/acr/ucode_load.bin");
+
+MODULE_FIRMWARE("nvidia/gv100/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gv100/acr/ucode_load.bin");
+
+static const struct nvkm_acr_hsf_fwif
+gp108_acr_load_fwif[] = {
+ { 0, nvkm_acr_hsfw_load, &gp108_acr_load_0 },
+ {}
+};
+
+static const struct nvkm_acr_func
+gp108_acr = {
+ .load = gp108_acr_load_fwif,
+ .unload = gp108_acr_unload_fwif,
+ .wpr_parse = gp102_acr_wpr_parse,
+ .wpr_layout = gp102_acr_wpr_layout,
+ .wpr_alloc = gp102_acr_wpr_alloc,
+ .wpr_build = gp102_acr_wpr_build,
+ .wpr_patch = gp102_acr_wpr_patch,
+ .wpr_check = gm200_acr_wpr_check,
+ .init = gm200_acr_init,
+};
+
+static const struct nvkm_acr_fwif
+gp108_acr_fwif[] = {
+ { 0, gp102_acr_load, &gp108_acr },
+ {}
+};
+
+int
+gp108_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+{
+ return nvkm_acr_new_(gp108_acr_fwif, device, index, pacr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c
new file mode 100644
index 000000000000..39de64292a41
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
+MODULE_FIRMWARE("nvidia/gp10b/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp10b/acr/ucode_load.bin");
+#endif
+
+static const struct nvkm_acr_hsf_fwif
+gp10b_acr_load_fwif[] = {
+ { 0, nvkm_acr_hsfw_load, &gm20b_acr_load_0 },
+ {}
+};
+
+static const struct nvkm_acr_func
+gp10b_acr = {
+ .load = gp10b_acr_load_fwif,
+ .wpr_parse = gm200_acr_wpr_parse,
+ .wpr_layout = gm200_acr_wpr_layout,
+ .wpr_alloc = gm20b_acr_wpr_alloc,
+ .wpr_build = gm200_acr_wpr_build,
+ .wpr_patch = gm200_acr_wpr_patch,
+ .wpr_check = gm200_acr_wpr_check,
+ .init = gm200_acr_init,
+};
+
+static const struct nvkm_acr_fwif
+gp10b_acr_fwif[] = {
+ { 0, gm20b_acr_load, &gp10b_acr },
+ {}
+};
+
+int
+gp10b_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+{
+ return nvkm_acr_new_(gp10b_acr_fwif, device, index, pacr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c
new file mode 100644
index 000000000000..aecce2dac558
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/firmware.h>
+
+#include <nvfw/fw.h>
+#include <nvfw/hs.h>
+
+static void
+nvkm_acr_hsfw_del(struct nvkm_acr_hsfw *hsfw)
+{
+ list_del(&hsfw->head);
+ kfree(hsfw->imem);
+ kfree(hsfw->image);
+ kfree(hsfw->sig.prod.data);
+ kfree(hsfw->sig.dbg.data);
+ kfree(hsfw);
+}
+
+void
+nvkm_acr_hsfw_del_all(struct nvkm_acr *acr)
+{
+ struct nvkm_acr_hsfw *hsfw, *hsft;
+ list_for_each_entry_safe(hsfw, hsft, &acr->hsfw, head) {
+ nvkm_acr_hsfw_del(hsfw);
+ }
+}
+
+static int
+nvkm_acr_hsfw_load_image(struct nvkm_acr *acr, const char *name, int ver,
+ struct nvkm_acr_hsfw *hsfw)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ const struct firmware *fw;
+ const struct nvfw_bin_hdr *hdr;
+ const struct nvfw_hs_header *fwhdr;
+ const struct nvfw_hs_load_header *lhdr;
+ u32 loc, sig;
+ int ret;
+
+ ret = nvkm_firmware_get(subdev, name, ver, &fw);
+ if (ret < 0)
+ return ret;
+
+ hdr = nvfw_bin_hdr(subdev, fw->data);
+ fwhdr = nvfw_hs_header(subdev, fw->data + hdr->header_offset);
+
+ /* Earlier FW releases by NVIDIA for Nouveau's use aren't in NVIDIA's
+ * standard format, and don't have the indirection seen in the 0x10de
+ * case.
+ */
+ switch (hdr->bin_magic) {
+ case 0x000010de:
+ loc = *(u32 *)(fw->data + fwhdr->patch_loc);
+ sig = *(u32 *)(fw->data + fwhdr->patch_sig);
+ break;
+ case 0x3b1d14f0:
+ loc = fwhdr->patch_loc;
+ sig = fwhdr->patch_sig;
+ break;
+ default:
+ ret = -EINVAL;
+ goto done;
+ }
+
+ lhdr = nvfw_hs_load_header(subdev, fw->data + fwhdr->hdr_offset);
+
+ if (!(hsfw->image = kmalloc(hdr->data_size, GFP_KERNEL))) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ memcpy(hsfw->image, fw->data + hdr->data_offset, hdr->data_size);
+ hsfw->image_size = hdr->data_size;
+ hsfw->non_sec_addr = lhdr->non_sec_code_off;
+ hsfw->non_sec_size = lhdr->non_sec_code_size;
+ hsfw->sec_addr = lhdr->apps[0];
+ hsfw->sec_size = lhdr->apps[lhdr->num_apps];
+ hsfw->data_addr = lhdr->data_dma_base;
+ hsfw->data_size = lhdr->data_size;
+
+ hsfw->sig.prod.size = fwhdr->sig_prod_size;
+ hsfw->sig.prod.data = kmalloc(hsfw->sig.prod.size, GFP_KERNEL);
+ if (!hsfw->sig.prod.data) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ memcpy(hsfw->sig.prod.data, fw->data + fwhdr->sig_prod_offset + sig,
+ hsfw->sig.prod.size);
+
+ hsfw->sig.dbg.size = fwhdr->sig_dbg_size;
+ hsfw->sig.dbg.data = kmalloc(hsfw->sig.dbg.size, GFP_KERNEL);
+ if (!hsfw->sig.dbg.data) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ memcpy(hsfw->sig.dbg.data, fw->data + fwhdr->sig_dbg_offset + sig,
+ hsfw->sig.dbg.size);
+
+ hsfw->sig.patch_loc = loc;
+done:
+ nvkm_firmware_put(fw);
+ return ret;
+}
+
+static int
+nvkm_acr_hsfw_load_bl(struct nvkm_acr *acr, const char *name, int ver,
+ struct nvkm_acr_hsfw *hsfw)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ const struct nvfw_bin_hdr *hdr;
+ const struct nvfw_bl_desc *desc;
+ const struct firmware *fw;
+ u8 *data;
+ int ret;
+
+ ret = nvkm_firmware_get(subdev, name, ver, &fw);
+ if (ret)
+ return ret;
+
+ hdr = nvfw_bin_hdr(subdev, fw->data);
+ desc = nvfw_bl_desc(subdev, fw->data + hdr->header_offset);
+ data = (void *)fw->data + hdr->data_offset;
+
+ hsfw->imem_size = desc->code_size;
+ hsfw->imem_tag = desc->start_tag;
+ hsfw->imem = kmalloc(desc->code_size, GFP_KERNEL);
+ memcpy(hsfw->imem, data + desc->code_off, desc->code_size);
+
+ nvkm_firmware_put(fw);
+ return 0;
+}
+
+int
+nvkm_acr_hsfw_load(struct nvkm_acr *acr, const char *bl, const char *fw,
+ const char *name, int version,
+ const struct nvkm_acr_hsf_fwif *fwif)
+{
+ struct nvkm_acr_hsfw *hsfw;
+ int ret;
+
+ if (!(hsfw = kzalloc(sizeof(*hsfw), GFP_KERNEL)))
+ return -ENOMEM;
+
+ hsfw->func = fwif->func;
+ hsfw->name = name;
+ list_add_tail(&hsfw->head, &acr->hsfw);
+
+ ret = nvkm_acr_hsfw_load_bl(acr, bl, version, hsfw);
+ if (ret)
+ goto done;
+
+ ret = nvkm_acr_hsfw_load_image(acr, fw, version, hsfw);
+done:
+ if (ret)
+ nvkm_acr_hsfw_del(hsfw);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c
new file mode 100644
index 000000000000..07d1830126ab
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include <core/falcon.h>
+#include <core/firmware.h>
+#include <nvfw/fw.h>
+#include <nvfw/ls.h>
+
+void
+nvkm_acr_lsfw_del(struct nvkm_acr_lsfw *lsfw)
+{
+ nvkm_blob_dtor(&lsfw->img);
+ nvkm_firmware_put(lsfw->sig);
+ list_del(&lsfw->head);
+ kfree(lsfw);
+}
+
+void
+nvkm_acr_lsfw_del_all(struct nvkm_acr *acr)
+{
+ struct nvkm_acr_lsfw *lsfw, *lsft;
+ list_for_each_entry_safe(lsfw, lsft, &acr->lsfw, head) {
+ nvkm_acr_lsfw_del(lsfw);
+ }
+}
+
+static struct nvkm_acr_lsfw *
+nvkm_acr_lsfw_get(struct nvkm_acr *acr, enum nvkm_acr_lsf_id id)
+{
+ struct nvkm_acr_lsfw *lsfw;
+ list_for_each_entry(lsfw, &acr->lsfw, head) {
+ if (lsfw->id == id)
+ return lsfw;
+ }
+ return NULL;
+}
+
+struct nvkm_acr_lsfw *
+nvkm_acr_lsfw_add(const struct nvkm_acr_lsf_func *func, struct nvkm_acr *acr,
+ struct nvkm_falcon *falcon, enum nvkm_acr_lsf_id id)
+{
+ struct nvkm_acr_lsfw *lsfw;
+
+ if (!acr)
+ return ERR_PTR(-ENOSYS);
+
+ lsfw = nvkm_acr_lsfw_get(acr, id);
+ if (lsfw && lsfw->func) {
+ nvkm_error(&acr->subdev, "LSFW %d redefined\n", id);
+ return ERR_PTR(-EEXIST);
+ }
+
+ if (!lsfw) {
+ if (!(lsfw = kzalloc(sizeof(*lsfw), GFP_KERNEL)))
+ return ERR_PTR(-ENOMEM);
+
+ lsfw->id = id;
+ list_add_tail(&lsfw->head, &acr->lsfw);
+ }
+
+ lsfw->func = func;
+ lsfw->falcon = falcon;
+ return lsfw;
+}
+
+static struct nvkm_acr_lsfw *
+nvkm_acr_lsfw_load_sig_image_desc_(struct nvkm_subdev *subdev,
+ struct nvkm_falcon *falcon,
+ enum nvkm_acr_lsf_id id,
+ const char *path, int ver,
+ const struct nvkm_acr_lsf_func *func,
+ const struct firmware **pdesc)
+{
+ struct nvkm_acr *acr = subdev->device->acr;
+ struct nvkm_acr_lsfw *lsfw;
+ int ret;
+
+ if (IS_ERR((lsfw = nvkm_acr_lsfw_add(func, acr, falcon, id))))
+ return lsfw;
+
+ ret = nvkm_firmware_load_name(subdev, path, "sig", ver, &lsfw->sig);
+ if (ret)
+ goto done;
+
+ ret = nvkm_firmware_load_blob(subdev, path, "image", ver, &lsfw->img);
+ if (ret)
+ goto done;
+
+ ret = nvkm_firmware_load_name(subdev, path, "desc", ver, pdesc);
+done:
+ if (ret) {
+ nvkm_acr_lsfw_del(lsfw);
+ return ERR_PTR(ret);
+ }
+
+ return lsfw;
+}
+
+static void
+nvkm_acr_lsfw_from_desc(const struct nvfw_ls_desc_head *desc,
+ struct nvkm_acr_lsfw *lsfw)
+{
+ lsfw->bootloader_size = ALIGN(desc->bootloader_size, 256);
+ lsfw->bootloader_imem_offset = desc->bootloader_imem_offset;
+
+ lsfw->app_size = ALIGN(desc->app_size, 256);
+ lsfw->app_start_offset = desc->app_start_offset;
+ lsfw->app_imem_entry = desc->app_imem_entry;
+ lsfw->app_resident_code_offset = desc->app_resident_code_offset;
+ lsfw->app_resident_code_size = desc->app_resident_code_size;
+ lsfw->app_resident_data_offset = desc->app_resident_data_offset;
+ lsfw->app_resident_data_size = desc->app_resident_data_size;
+
+ lsfw->ucode_size = ALIGN(lsfw->app_resident_data_offset, 256) +
+ lsfw->bootloader_size;
+ lsfw->data_size = lsfw->app_size + lsfw->bootloader_size -
+ lsfw->ucode_size;
+}
+
+int
+nvkm_acr_lsfw_load_sig_image_desc(struct nvkm_subdev *subdev,
+ struct nvkm_falcon *falcon,
+ enum nvkm_acr_lsf_id id,
+ const char *path, int ver,
+ const struct nvkm_acr_lsf_func *func)
+{
+ const struct firmware *fw;
+ struct nvkm_acr_lsfw *lsfw;
+
+ lsfw = nvkm_acr_lsfw_load_sig_image_desc_(subdev, falcon, id, path, ver,
+ func, &fw);
+ if (IS_ERR(lsfw))
+ return PTR_ERR(lsfw);
+
+ nvkm_acr_lsfw_from_desc(&nvfw_ls_desc(subdev, fw->data)->head, lsfw);
+ nvkm_firmware_put(fw);
+ return 0;
+}
+
+int
+nvkm_acr_lsfw_load_sig_image_desc_v1(struct nvkm_subdev *subdev,
+ struct nvkm_falcon *falcon,
+ enum nvkm_acr_lsf_id id,
+ const char *path, int ver,
+ const struct nvkm_acr_lsf_func *func)
+{
+ const struct firmware *fw;
+ struct nvkm_acr_lsfw *lsfw;
+
+ lsfw = nvkm_acr_lsfw_load_sig_image_desc_(subdev, falcon, id, path, ver,
+ func, &fw);
+ if (IS_ERR(lsfw))
+ return PTR_ERR(lsfw);
+
+ nvkm_acr_lsfw_from_desc(&nvfw_ls_desc_v1(subdev, fw->data)->head, lsfw);
+ nvkm_firmware_put(fw);
+ return 0;
+}
+
+int
+nvkm_acr_lsfw_load_bl_inst_data_sig(struct nvkm_subdev *subdev,
+ struct nvkm_falcon *falcon,
+ enum nvkm_acr_lsf_id id,
+ const char *path, int ver,
+ const struct nvkm_acr_lsf_func *func)
+{
+ struct nvkm_acr *acr = subdev->device->acr;
+ struct nvkm_acr_lsfw *lsfw;
+ const struct firmware *bl = NULL, *inst = NULL, *data = NULL;
+ const struct nvfw_bin_hdr *hdr;
+ const struct nvfw_bl_desc *desc;
+ u32 *bldata;
+ int ret;
+
+ if (IS_ERR((lsfw = nvkm_acr_lsfw_add(func, acr, falcon, id))))
+ return PTR_ERR(lsfw);
+
+ ret = nvkm_firmware_load_name(subdev, path, "bl", ver, &bl);
+ if (ret)
+ goto done;
+
+ hdr = nvfw_bin_hdr(subdev, bl->data);
+ desc = nvfw_bl_desc(subdev, bl->data + hdr->header_offset);
+ bldata = (void *)(bl->data + hdr->data_offset);
+
+ ret = nvkm_firmware_load_name(subdev, path, "inst", ver, &inst);
+ if (ret)
+ goto done;
+
+ ret = nvkm_firmware_load_name(subdev, path, "data", ver, &data);
+ if (ret)
+ goto done;
+
+ ret = nvkm_firmware_load_name(subdev, path, "sig", ver, &lsfw->sig);
+ if (ret)
+ goto done;
+
+ lsfw->bootloader_size = ALIGN(desc->code_size, 256);
+ lsfw->bootloader_imem_offset = desc->start_tag << 8;
+
+ lsfw->app_start_offset = lsfw->bootloader_size;
+ lsfw->app_imem_entry = 0;
+ lsfw->app_resident_code_offset = 0;
+ lsfw->app_resident_code_size = ALIGN(inst->size, 256);
+ lsfw->app_resident_data_offset = lsfw->app_resident_code_size;
+ lsfw->app_resident_data_size = ALIGN(data->size, 256);
+ lsfw->app_size = lsfw->app_resident_code_size +
+ lsfw->app_resident_data_size;
+
+ lsfw->img.size = lsfw->bootloader_size + lsfw->app_size;
+ if (!(lsfw->img.data = kzalloc(lsfw->img.size, GFP_KERNEL))) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ memcpy(lsfw->img.data, bldata, lsfw->bootloader_size);
+ memcpy(lsfw->img.data + lsfw->app_start_offset +
+ lsfw->app_resident_code_offset, inst->data, inst->size);
+ memcpy(lsfw->img.data + lsfw->app_start_offset +
+ lsfw->app_resident_data_offset, data->data, data->size);
+
+ lsfw->ucode_size = ALIGN(lsfw->app_resident_data_offset, 256) +
+ lsfw->bootloader_size;
+ lsfw->data_size = lsfw->app_size + lsfw->bootloader_size -
+ lsfw->ucode_size;
+
+done:
+ if (ret)
+ nvkm_acr_lsfw_del(lsfw);
+ nvkm_firmware_put(data);
+ nvkm_firmware_put(inst);
+ nvkm_firmware_put(bl);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h
new file mode 100644
index 000000000000..d8ba72806d39
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h
@@ -0,0 +1,151 @@
+#ifndef __NVKM_ACR_PRIV_H__
+#define __NVKM_ACR_PRIV_H__
+#include <subdev/acr.h>
+struct lsb_header_tail;
+
+struct nvkm_acr_fwif {
+ int version;
+ int (*load)(struct nvkm_acr *, int version,
+ const struct nvkm_acr_fwif *);
+ const struct nvkm_acr_func *func;
+};
+
+int gm20b_acr_load(struct nvkm_acr *, int, const struct nvkm_acr_fwif *);
+int gp102_acr_load(struct nvkm_acr *, int, const struct nvkm_acr_fwif *);
+
+struct nvkm_acr_lsf;
+struct nvkm_acr_func {
+ const struct nvkm_acr_hsf_fwif *load;
+ const struct nvkm_acr_hsf_fwif *ahesasc;
+ const struct nvkm_acr_hsf_fwif *asb;
+ const struct nvkm_acr_hsf_fwif *unload;
+ int (*wpr_parse)(struct nvkm_acr *);
+ u32 (*wpr_layout)(struct nvkm_acr *);
+ int (*wpr_alloc)(struct nvkm_acr *, u32 wpr_size);
+ int (*wpr_build)(struct nvkm_acr *, struct nvkm_acr_lsf *rtos);
+ void (*wpr_patch)(struct nvkm_acr *, s64 adjust);
+ void (*wpr_check)(struct nvkm_acr *, u64 *start, u64 *limit);
+ int (*init)(struct nvkm_acr *);
+ void (*fini)(struct nvkm_acr *);
+};
+
+int gm200_acr_wpr_parse(struct nvkm_acr *);
+u32 gm200_acr_wpr_layout(struct nvkm_acr *);
+int gm200_acr_wpr_build(struct nvkm_acr *, struct nvkm_acr_lsf *);
+void gm200_acr_wpr_patch(struct nvkm_acr *, s64);
+void gm200_acr_wpr_check(struct nvkm_acr *, u64 *, u64 *);
+void gm200_acr_wpr_build_lsb_tail(struct nvkm_acr_lsfw *,
+ struct lsb_header_tail *);
+int gm200_acr_init(struct nvkm_acr *);
+
+int gm20b_acr_wpr_alloc(struct nvkm_acr *, u32 wpr_size);
+
+int gp102_acr_wpr_parse(struct nvkm_acr *);
+u32 gp102_acr_wpr_layout(struct nvkm_acr *);
+int gp102_acr_wpr_alloc(struct nvkm_acr *, u32 wpr_size);
+int gp102_acr_wpr_build(struct nvkm_acr *, struct nvkm_acr_lsf *);
+int gp102_acr_wpr_build_lsb(struct nvkm_acr *, struct nvkm_acr_lsfw *);
+void gp102_acr_wpr_patch(struct nvkm_acr *, s64);
+
+struct nvkm_acr_hsfw {
+ const struct nvkm_acr_hsf_func *func;
+ const char *name;
+ struct list_head head;
+
+ u32 imem_size;
+ u32 imem_tag;
+ u32 *imem;
+
+ u8 *image;
+ u32 image_size;
+ u32 non_sec_addr;
+ u32 non_sec_size;
+ u32 sec_addr;
+ u32 sec_size;
+ u32 data_addr;
+ u32 data_size;
+
+ struct {
+ struct {
+ void *data;
+ u32 size;
+ } prod, dbg;
+ u32 patch_loc;
+ } sig;
+};
+
+struct nvkm_acr_hsf_fwif {
+ int version;
+ int (*load)(struct nvkm_acr *, const char *bl, const char *fw,
+ const char *name, int version,
+ const struct nvkm_acr_hsf_fwif *);
+ const struct nvkm_acr_hsf_func *func;
+};
+
+int nvkm_acr_hsfw_load(struct nvkm_acr *, const char *, const char *,
+ const char *, int, const struct nvkm_acr_hsf_fwif *);
+void nvkm_acr_hsfw_del_all(struct nvkm_acr *);
+
+struct nvkm_acr_hsf {
+ const struct nvkm_acr_hsf_func *func;
+ const char *name;
+ struct list_head head;
+
+ u32 imem_size;
+ u32 imem_tag;
+ u32 *imem;
+
+ u32 non_sec_addr;
+ u32 non_sec_size;
+ u32 sec_addr;
+ u32 sec_size;
+ u32 data_addr;
+ u32 data_size;
+
+ struct nvkm_memory *ucode;
+ struct nvkm_vma *vma;
+ struct nvkm_falcon *falcon;
+};
+
+struct nvkm_acr_hsf_func {
+ int (*load)(struct nvkm_acr *, struct nvkm_acr_hsfw *);
+ int (*boot)(struct nvkm_acr *, struct nvkm_acr_hsf *);
+ void (*bld)(struct nvkm_acr *, struct nvkm_acr_hsf *);
+};
+
+int gm200_acr_hsfw_load(struct nvkm_acr *, struct nvkm_acr_hsfw *,
+ struct nvkm_falcon *);
+int gm200_acr_hsfw_boot(struct nvkm_acr *, struct nvkm_acr_hsf *,
+ u32 clear_intr, u32 mbox0_ok);
+
+int gm200_acr_load_boot(struct nvkm_acr *, struct nvkm_acr_hsf *);
+
+extern const struct nvkm_acr_hsf_func gm200_acr_unload_0;
+int gm200_acr_unload_load(struct nvkm_acr *, struct nvkm_acr_hsfw *);
+int gm200_acr_unload_boot(struct nvkm_acr *, struct nvkm_acr_hsf *);
+void gm200_acr_hsfw_bld(struct nvkm_acr *, struct nvkm_acr_hsf *);
+
+extern const struct nvkm_acr_hsf_func gm20b_acr_load_0;
+
+int gp102_acr_load_load(struct nvkm_acr *, struct nvkm_acr_hsfw *);
+
+extern const struct nvkm_acr_hsf_func gp108_acr_unload_0;
+void gp108_acr_hsfw_bld(struct nvkm_acr *, struct nvkm_acr_hsf *);
+
+int nvkm_acr_new_(const struct nvkm_acr_fwif *, struct nvkm_device *, int,
+ struct nvkm_acr **);
+int nvkm_acr_hsf_boot(struct nvkm_acr *, const char *name);
+
+struct nvkm_acr_lsf {
+ const struct nvkm_acr_lsf_func *func;
+ struct nvkm_falcon *falcon;
+ enum nvkm_acr_lsf_id id;
+ struct list_head head;
+};
+
+struct nvkm_acr_lsfw *nvkm_acr_lsfw_add(const struct nvkm_acr_lsf_func *,
+ struct nvkm_acr *, struct nvkm_falcon *,
+ enum nvkm_acr_lsf_id);
+void nvkm_acr_lsfw_del(struct nvkm_acr_lsfw *);
+void nvkm_acr_lsfw_del_all(struct nvkm_acr *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
new file mode 100644
index 000000000000..7f4b89d82d32
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/firmware.h>
+#include <core/memory.h>
+#include <subdev/gsp.h>
+#include <subdev/pmu.h>
+#include <engine/sec2.h>
+
+#include <nvfw/acr.h>
+
+static int
+tu102_acr_init(struct nvkm_acr *acr)
+{
+ int ret = nvkm_acr_hsf_boot(acr, "AHESASC");
+ if (ret)
+ return ret;
+
+ return nvkm_acr_hsf_boot(acr, "ASB");
+}
+
+static int
+tu102_acr_wpr_build(struct nvkm_acr *acr, struct nvkm_acr_lsf *rtos)
+{
+ struct nvkm_acr_lsfw *lsfw;
+ u32 offset = 0;
+ int ret;
+
+ /*XXX: shared sub-WPR headers, fill terminator for now. */
+ nvkm_wo32(acr->wpr, 0x200, 0xffffffff);
+
+ /* Fill per-LSF structures. */
+ list_for_each_entry(lsfw, &acr->lsfw, head) {
+ struct lsf_signature_v1 *sig = (void *)lsfw->sig->data;
+ struct wpr_header_v1 hdr = {
+ .falcon_id = lsfw->id,
+ .lsb_offset = lsfw->offset.lsb,
+ .bootstrap_owner = NVKM_ACR_LSF_GSPLITE,
+ .lazy_bootstrap = 1,
+ .bin_version = sig->version,
+ .status = WPR_HEADER_V1_STATUS_COPY,
+ };
+
+ /* Write WPR header. */
+ nvkm_wobj(acr->wpr, offset, &hdr, sizeof(hdr));
+ offset += sizeof(hdr);
+
+ /* Write LSB header. */
+ ret = gp102_acr_wpr_build_lsb(acr, lsfw);
+ if (ret)
+ return ret;
+
+ /* Write ucode image. */
+ nvkm_wobj(acr->wpr, lsfw->offset.img,
+ lsfw->img.data,
+ lsfw->img.size);
+
+ /* Write bootloader data. */
+ lsfw->func->bld_write(acr, lsfw->offset.bld, lsfw);
+ }
+
+ /* Finalise WPR. */
+ nvkm_wo32(acr->wpr, offset, WPR_HEADER_V1_FALCON_ID_INVALID);
+ return 0;
+}
+
+static int
+tu102_acr_hsfw_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf)
+{
+ return gm200_acr_hsfw_boot(acr, hsf, 0, 0);
+}
+
+static int
+tu102_acr_hsfw_nofw(struct nvkm_acr *acr, const char *bl, const char *fw,
+ const char *name, int version,
+ const struct nvkm_acr_hsf_fwif *fwif)
+{
+ return 0;
+}
+
+MODULE_FIRMWARE("nvidia/tu102/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/tu102/acr/ucode_unload.bin");
+
+MODULE_FIRMWARE("nvidia/tu104/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/tu104/acr/ucode_unload.bin");
+
+MODULE_FIRMWARE("nvidia/tu106/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/tu106/acr/ucode_unload.bin");
+
+static const struct nvkm_acr_hsf_fwif
+tu102_acr_unload_fwif[] = {
+ { 0, nvkm_acr_hsfw_load, &gp108_acr_unload_0 },
+ { -1, tu102_acr_hsfw_nofw },
+ {}
+};
+
+static int
+tu102_acr_asb_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw)
+{
+ return gm200_acr_hsfw_load(acr, hsfw, &acr->subdev.device->gsp->falcon);
+}
+
+static const struct nvkm_acr_hsf_func
+tu102_acr_asb_0 = {
+ .load = tu102_acr_asb_load,
+ .boot = tu102_acr_hsfw_boot,
+ .bld = gp108_acr_hsfw_bld,
+};
+
+MODULE_FIRMWARE("nvidia/tu102/acr/ucode_asb.bin");
+MODULE_FIRMWARE("nvidia/tu104/acr/ucode_asb.bin");
+MODULE_FIRMWARE("nvidia/tu106/acr/ucode_asb.bin");
+
+static const struct nvkm_acr_hsf_fwif
+tu102_acr_asb_fwif[] = {
+ { 0, nvkm_acr_hsfw_load, &tu102_acr_asb_0 },
+ { -1, tu102_acr_hsfw_nofw },
+ {}
+};
+
+static const struct nvkm_acr_hsf_func
+tu102_acr_ahesasc_0 = {
+ .load = gp102_acr_load_load,
+ .boot = tu102_acr_hsfw_boot,
+ .bld = gp108_acr_hsfw_bld,
+};
+
+MODULE_FIRMWARE("nvidia/tu102/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/tu102/acr/ucode_ahesasc.bin");
+
+MODULE_FIRMWARE("nvidia/tu104/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/tu104/acr/ucode_ahesasc.bin");
+
+MODULE_FIRMWARE("nvidia/tu106/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/tu106/acr/ucode_ahesasc.bin");
+
+static const struct nvkm_acr_hsf_fwif
+tu102_acr_ahesasc_fwif[] = {
+ { 0, nvkm_acr_hsfw_load, &tu102_acr_ahesasc_0 },
+ { -1, tu102_acr_hsfw_nofw },
+ {}
+};
+
+static const struct nvkm_acr_func
+tu102_acr = {
+ .ahesasc = tu102_acr_ahesasc_fwif,
+ .asb = tu102_acr_asb_fwif,
+ .unload = tu102_acr_unload_fwif,
+ .wpr_parse = gp102_acr_wpr_parse,
+ .wpr_layout = gp102_acr_wpr_layout,
+ .wpr_alloc = gp102_acr_wpr_alloc,
+ .wpr_patch = gp102_acr_wpr_patch,
+ .wpr_build = tu102_acr_wpr_build,
+ .wpr_check = gm200_acr_wpr_check,
+ .init = tu102_acr_init,
+};
+
+static int
+tu102_acr_load(struct nvkm_acr *acr, int version,
+ const struct nvkm_acr_fwif *fwif)
+{
+ struct nvkm_subdev *subdev = &acr->subdev;
+ const struct nvkm_acr_hsf_fwif *hsfwif;
+
+ hsfwif = nvkm_firmware_load(subdev, fwif->func->ahesasc, "AcrAHESASC",
+ acr, "acr/bl", "acr/ucode_ahesasc",
+ "AHESASC");
+ if (IS_ERR(hsfwif))
+ return PTR_ERR(hsfwif);
+
+ hsfwif = nvkm_firmware_load(subdev, fwif->func->asb, "AcrASB",
+ acr, "acr/bl", "acr/ucode_asb", "ASB");
+ if (IS_ERR(hsfwif))
+ return PTR_ERR(hsfwif);
+
+ hsfwif = nvkm_firmware_load(subdev, fwif->func->unload, "AcrUnload",
+ acr, "acr/unload_bl", "acr/ucode_unload",
+ "unload");
+ if (IS_ERR(hsfwif))
+ return PTR_ERR(hsfwif);
+
+ return 0;
+}
+
+static const struct nvkm_acr_fwif
+tu102_acr_fwif[] = {
+ { 0, tu102_acr_load, &tu102_acr },
+ {}
+};
+
+int
+tu102_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+{
+ return nvkm_acr_new_(tu102_acr_fwif, device, index, pacr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
index 53b9d638f2c8..d65ec719f153 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
@@ -2,5 +2,6 @@
nvkm-y += nvkm/subdev/fault/base.o
nvkm-y += nvkm/subdev/fault/user.o
nvkm-y += nvkm/subdev/fault/gp100.o
+nvkm-y += nvkm/subdev/fault/gp10b.o
nvkm-y += nvkm/subdev/fault/gv100.o
nvkm-y += nvkm/subdev/fault/tu102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
index ca251560d3e0..f6dca97140d6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
@@ -108,7 +108,7 @@ nvkm_fault_oneinit_buffer(struct nvkm_fault *fault, int id)
return ret;
/* Pin fault buffer in BAR2. */
- buffer->addr = nvkm_memory_bar2(buffer->mem);
+ buffer->addr = fault->func->buffer.pin(buffer);
if (buffer->addr == ~0ULL)
return -EFAULT;
@@ -146,6 +146,7 @@ nvkm_fault_dtor(struct nvkm_subdev *subdev)
struct nvkm_fault *fault = nvkm_fault(subdev);
int i;
+ nvkm_notify_fini(&fault->nrpfb);
nvkm_event_fini(&fault->event);
for (i = 0; i < fault->buffer_nr; i++) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
index 4f3c4e091117..f6b189cc4330 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
@@ -21,25 +21,26 @@
*/
#include "priv.h"
+#include <core/memory.h>
#include <subdev/mc.h>
#include <nvif/class.h>
-static void
+void
gp100_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
{
struct nvkm_device *device = buffer->fault->subdev.device;
nvkm_mc_intr_mask(device, NVKM_SUBDEV_FAULT, enable);
}
-static void
+void
gp100_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
{
struct nvkm_device *device = buffer->fault->subdev.device;
nvkm_mask(device, 0x002a70, 0x00000001, 0x00000000);
}
-static void
+void
gp100_fault_buffer_init(struct nvkm_fault_buffer *buffer)
{
struct nvkm_device *device = buffer->fault->subdev.device;
@@ -48,7 +49,12 @@ gp100_fault_buffer_init(struct nvkm_fault_buffer *buffer)
nvkm_mask(device, 0x002a70, 0x00000001, 0x00000001);
}
-static void
+u64 gp100_fault_buffer_pin(struct nvkm_fault_buffer *buffer)
+{
+ return nvkm_memory_bar2(buffer->mem);
+}
+
+void
gp100_fault_buffer_info(struct nvkm_fault_buffer *buffer)
{
buffer->entries = nvkm_rd32(buffer->fault->subdev.device, 0x002a78);
@@ -56,7 +62,7 @@ gp100_fault_buffer_info(struct nvkm_fault_buffer *buffer)
buffer->put = 0x002a80;
}
-static void
+void
gp100_fault_intr(struct nvkm_fault *fault)
{
nvkm_event_send(&fault->event, 1, 0, NULL, 0);
@@ -68,6 +74,7 @@ gp100_fault = {
.buffer.nr = 1,
.buffer.entry_size = 32,
.buffer.info = gp100_fault_buffer_info,
+ .buffer.pin = gp100_fault_buffer_pin,
.buffer.init = gp100_fault_buffer_init,
.buffer.fini = gp100_fault_buffer_fini,
.buffer.intr = gp100_fault_buffer_intr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c
new file mode 100644
index 000000000000..9e66d1f7654d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2019 NVIDIA Corporation.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "priv.h"
+
+#include <core/memory.h>
+
+#include <nvif/class.h>
+
+u64
+gp10b_fault_buffer_pin(struct nvkm_fault_buffer *buffer)
+{
+ return nvkm_memory_addr(buffer->mem);
+}
+
+static const struct nvkm_fault_func
+gp10b_fault = {
+ .intr = gp100_fault_intr,
+ .buffer.nr = 1,
+ .buffer.entry_size = 32,
+ .buffer.info = gp100_fault_buffer_info,
+ .buffer.pin = gp10b_fault_buffer_pin,
+ .buffer.init = gp100_fault_buffer_init,
+ .buffer.fini = gp100_fault_buffer_fini,
+ .buffer.intr = gp100_fault_buffer_intr,
+ .user = { { 0, 0, MAXWELL_FAULT_BUFFER_A }, 0 },
+};
+
+int
+gp10b_fault_new(struct nvkm_device *device, int index,
+ struct nvkm_fault **pfault)
+{
+ return nvkm_fault_new_(&gp10b_fault, device, index, pfault);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
index 6747f09c2dc3..2707be4ffabc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
@@ -214,6 +214,7 @@ gv100_fault = {
.buffer.nr = 2,
.buffer.entry_size = 32,
.buffer.info = gv100_fault_buffer_info,
+ .buffer.pin = gp100_fault_buffer_pin,
.buffer.init = gv100_fault_buffer_init,
.buffer.fini = gv100_fault_buffer_fini,
.buffer.intr = gv100_fault_buffer_intr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
index 975e66ac6344..f6f1dd7eee1f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
@@ -30,6 +30,7 @@ struct nvkm_fault_func {
int nr;
u32 entry_size;
void (*info)(struct nvkm_fault_buffer *);
+ u64 (*pin)(struct nvkm_fault_buffer *);
void (*init)(struct nvkm_fault_buffer *);
void (*fini)(struct nvkm_fault_buffer *);
void (*intr)(struct nvkm_fault_buffer *, bool enable);
@@ -40,6 +41,15 @@ struct nvkm_fault_func {
} user;
};
+void gp100_fault_buffer_intr(struct nvkm_fault_buffer *, bool enable);
+void gp100_fault_buffer_fini(struct nvkm_fault_buffer *);
+void gp100_fault_buffer_init(struct nvkm_fault_buffer *);
+u64 gp100_fault_buffer_pin(struct nvkm_fault_buffer *);
+void gp100_fault_buffer_info(struct nvkm_fault_buffer *);
+void gp100_fault_intr(struct nvkm_fault *);
+
+u64 gp10b_fault_buffer_pin(struct nvkm_fault_buffer *);
+
int gv100_fault_oneinit(struct nvkm_fault *);
int nvkm_ufault_new(struct nvkm_device *, const struct nvkm_oclass *,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
index fa1dfe5692b0..45a6a68b9f48 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
@@ -154,6 +154,7 @@ tu102_fault = {
.buffer.nr = 2,
.buffer.entry_size = 32,
.buffer.info = tu102_fault_buffer_info,
+ .buffer.pin = gp100_fault_buffer_pin,
.buffer.init = tu102_fault_buffer_init,
.buffer.fini = tu102_fault_buffer_fini,
.buffer.intr = tu102_fault_buffer_intr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
index b2bb5a3ccb02..5940e0dea2f8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
@@ -126,6 +126,34 @@ nvkm_fb_oneinit(struct nvkm_subdev *subdev)
}
static int
+nvkm_fb_init_scrub_vpr(struct nvkm_fb *fb)
+{
+ struct nvkm_subdev *subdev = &fb->subdev;
+ int ret;
+
+ nvkm_debug(subdev, "VPR locked, running scrubber binary\n");
+
+ if (!fb->vpr_scrubber.size) {
+ nvkm_warn(subdev, "VPR locked, but no scrubber binary!\n");
+ return 0;
+ }
+
+ ret = fb->func->vpr.scrub(fb);
+ if (ret) {
+ nvkm_error(subdev, "VPR scrubber binary failed\n");
+ return ret;
+ }
+
+ if (fb->func->vpr.scrub_required(fb)) {
+ nvkm_error(subdev, "VPR still locked after scrub!\n");
+ return -EIO;
+ }
+
+ nvkm_debug(subdev, "VPR scrubber binary successful\n");
+ return 0;
+}
+
+static int
nvkm_fb_init(struct nvkm_subdev *subdev)
{
struct nvkm_fb *fb = nvkm_fb(subdev);
@@ -154,6 +182,14 @@ nvkm_fb_init(struct nvkm_subdev *subdev)
if (fb->func->init_unkn)
fb->func->init_unkn(fb);
+
+ if (fb->func->vpr.scrub_required &&
+ fb->func->vpr.scrub_required(fb)) {
+ ret = nvkm_fb_init_scrub_vpr(fb);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -172,6 +208,8 @@ nvkm_fb_dtor(struct nvkm_subdev *subdev)
nvkm_mm_fini(&fb->tags);
nvkm_ram_del(&fb->ram);
+ nvkm_blob_dtor(&fb->vpr_scrubber);
+
if (fb->func->dtor)
return fb->func->dtor(fb);
return fb;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
index b4d74e815674..fc8c93aa3da5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
@@ -24,7 +24,81 @@
#include "gf100.h"
#include "ram.h"
+#include <core/firmware.h>
#include <core/memory.h>
+#include <nvfw/fw.h>
+#include <nvfw/hs.h>
+#include <engine/nvdec.h>
+
+int
+gp102_fb_vpr_scrub(struct nvkm_fb *fb)
+{
+ struct nvkm_subdev *subdev = &fb->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_falcon *falcon = &device->nvdec[0]->falcon;
+ struct nvkm_blob *blob = &fb->vpr_scrubber;
+ const struct nvfw_bin_hdr *hsbin_hdr;
+ const struct nvfw_hs_header *fw_hdr;
+ const struct nvfw_hs_load_header *lhdr;
+ void *scrub_data;
+ u32 patch_loc, patch_sig;
+ int ret;
+
+ nvkm_falcon_get(falcon, subdev);
+
+ hsbin_hdr = nvfw_bin_hdr(subdev, blob->data);
+ fw_hdr = nvfw_hs_header(subdev, blob->data + hsbin_hdr->header_offset);
+ lhdr = nvfw_hs_load_header(subdev, blob->data + fw_hdr->hdr_offset);
+ scrub_data = blob->data + hsbin_hdr->data_offset;
+
+ patch_loc = *(u32 *)(blob->data + fw_hdr->patch_loc);
+ patch_sig = *(u32 *)(blob->data + fw_hdr->patch_sig);
+ if (falcon->debug) {
+ memcpy(scrub_data + patch_loc,
+ blob->data + fw_hdr->sig_dbg_offset + patch_sig,
+ fw_hdr->sig_dbg_size);
+ } else {
+ memcpy(scrub_data + patch_loc,
+ blob->data + fw_hdr->sig_prod_offset + patch_sig,
+ fw_hdr->sig_prod_size);
+ }
+
+ nvkm_falcon_reset(falcon);
+ nvkm_falcon_bind_context(falcon, NULL);
+
+ nvkm_falcon_load_imem(falcon, scrub_data, lhdr->non_sec_code_off,
+ lhdr->non_sec_code_size,
+ lhdr->non_sec_code_off >> 8, 0, false);
+ nvkm_falcon_load_imem(falcon, scrub_data + lhdr->apps[0],
+ ALIGN(lhdr->apps[0], 0x100),
+ lhdr->apps[1],
+ lhdr->apps[0] >> 8, 0, true);
+ nvkm_falcon_load_dmem(falcon, scrub_data + lhdr->data_dma_base, 0,
+ lhdr->data_size, 0);
+
+ nvkm_falcon_set_start_addr(falcon, 0x0);
+ nvkm_falcon_start(falcon);
+
+ ret = nvkm_falcon_wait_for_halt(falcon, 500);
+ if (ret < 0) {
+ ret = -ETIMEDOUT;
+ goto end;
+ }
+
+ /* put nvdec in clean state - without reset it will remain in HS mode */
+ nvkm_falcon_reset(falcon);
+end:
+ nvkm_falcon_put(falcon, subdev);
+ return ret;
+}
+
+bool
+gp102_fb_vpr_scrub_required(struct nvkm_fb *fb)
+{
+ struct nvkm_device *device = fb->subdev.device;
+ nvkm_wr32(device, 0x100cd0, 0x2);
+ return (nvkm_rd32(device, 0x100cd0) & 0x00000010) != 0;
+}
static const struct nvkm_fb_func
gp102_fb = {
@@ -33,11 +107,32 @@ gp102_fb = {
.init = gp100_fb_init,
.init_remapper = gp100_fb_init_remapper,
.init_page = gm200_fb_init_page,
+ .vpr.scrub_required = gp102_fb_vpr_scrub_required,
+ .vpr.scrub = gp102_fb_vpr_scrub,
.ram_new = gp100_ram_new,
};
int
+gp102_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
+ int index, struct nvkm_fb **pfb)
+{
+ int ret = gf100_fb_new_(func, device, index, pfb);
+ if (ret)
+ return ret;
+
+ nvkm_firmware_load_blob(&(*pfb)->subdev, "nvdec/scrubber", "", 0,
+ &(*pfb)->vpr_scrubber);
+ return 0;
+}
+
+int
gp102_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
{
- return gf100_fb_new_(&gp102_fb, device, index, pfb);
+ return gp102_fb_new_(&gp102_fb, device, index, pfb);
}
+
+MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/gp104/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/gp106/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/gp107/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/gp108/nvdec/scrubber.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
index 3c5e02e9794a..389bad312bf2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
@@ -35,6 +35,8 @@ gv100_fb = {
.init = gp100_fb_init,
.init_page = gv100_fb_init_page,
.init_unkn = gp100_fb_init_unkn,
+ .vpr.scrub_required = gp102_fb_vpr_scrub_required,
+ .vpr.scrub = gp102_fb_vpr_scrub,
.ram_new = gp100_ram_new,
.default_bigpage = 16,
};
@@ -42,5 +44,10 @@ gv100_fb = {
int
gv100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
{
- return gf100_fb_new_(&gv100_fb, device, index, pfb);
+ return gp102_fb_new_(&gv100_fb, device, index, pfb);
}
+
+MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/tu102/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/tu104/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/tu106/nvdec/scrubber.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index c4e9f55af283..5be9c563350d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -17,6 +17,11 @@ struct nvkm_fb_func {
void (*intr)(struct nvkm_fb *);
struct {
+ bool (*scrub_required)(struct nvkm_fb *);
+ int (*scrub)(struct nvkm_fb *);
+ } vpr;
+
+ struct {
int regions;
void (*init)(struct nvkm_fb *, int i, u32 addr, u32 size,
u32 pitch, u32 flags, struct nvkm_fb_tile *);
@@ -72,4 +77,9 @@ int gm200_fb_init_page(struct nvkm_fb *);
void gp100_fb_init_remapper(struct nvkm_fb *);
void gp100_fb_init_unkn(struct nvkm_fb *);
+
+int gp102_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *, int,
+ struct nvkm_fb **);
+bool gp102_fb_vpr_scrub_required(struct nvkm_fb *);
+int gp102_fb_vpr_scrub(struct nvkm_fb *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
index ac87a3b6b7c9..ba43fe158b22 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
@@ -655,7 +655,7 @@ gf100_ram_new_(const struct nvkm_ram_func *func,
static const struct nvkm_ram_func
gf100_ram = {
- .upper = 0x0200000000,
+ .upper = 0x0200000000ULL,
.probe_fbp = gf100_ram_probe_fbp,
.probe_fbp_amount = gf100_ram_probe_fbp_amount,
.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c
index 70a06e3cd55a..d97fa43efb91 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c
@@ -43,7 +43,7 @@ gf108_ram_probe_fbp_amount(const struct nvkm_ram_func *func, u32 fbpao,
static const struct nvkm_ram_func
gf108_ram = {
- .upper = 0x0200000000,
+ .upper = 0x0200000000ULL,
.probe_fbp = gf100_ram_probe_fbp,
.probe_fbp_amount = gf108_ram_probe_fbp_amount,
.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
index 456aed1f2a02..d350d92852d2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
@@ -1698,7 +1698,7 @@ gk104_ram_new_(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
static const struct nvkm_ram_func
gk104_ram = {
- .upper = 0x0200000000,
+ .upper = 0x0200000000ULL,
.probe_fbp = gf100_ram_probe_fbp,
.probe_fbp_amount = gf108_ram_probe_fbp_amount,
.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
index 27c68e3f9772..be91da854dca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
@@ -33,7 +33,7 @@ gm107_ram_probe_fbp(const struct nvkm_ram_func *func,
static const struct nvkm_ram_func
gm107_ram = {
- .upper = 0x1000000000,
+ .upper = 0x1000000000ULL,
.probe_fbp = gm107_ram_probe_fbp,
.probe_fbp_amount = gf108_ram_probe_fbp_amount,
.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c
index 6b0cac1fe7b4..8f91ea91ee25 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c
@@ -48,7 +48,7 @@ gm200_ram_probe_fbp_amount(const struct nvkm_ram_func *func, u32 fbpao,
static const struct nvkm_ram_func
gm200_ram = {
- .upper = 0x1000000000,
+ .upper = 0x1000000000ULL,
.probe_fbp = gm107_ram_probe_fbp,
.probe_fbp_amount = gm200_ram_probe_fbp_amount,
.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
index adb62a6beb63..378f6fb70990 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
@@ -79,7 +79,7 @@ gp100_ram_probe_fbpa(struct nvkm_device *device, int fbpa)
static const struct nvkm_ram_func
gp100_ram = {
- .upper = 0x1000000000,
+ .upper = 0x1000000000ULL,
.probe_fbp = gm107_ram_probe_fbp,
.probe_fbp_amount = gm200_ram_probe_fbp_amount,
.probe_fbpa_amount = gp100_ram_probe_fbpa,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
index e7c4f068936e..67cc3b320169 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: MIT
+nvkm-y += nvkm/subdev/gsp/base.o
nvkm-y += nvkm/subdev/gsp/gv100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
new file mode 100644
index 000000000000..5a32df0f9992
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include <core/falcon.h>
+#include <core/firmware.h>
+#include <subdev/acr.h>
+#include <subdev/top.h>
+
+static void *
+nvkm_gsp_dtor(struct nvkm_subdev *subdev)
+{
+ struct nvkm_gsp *gsp = nvkm_gsp(subdev);
+ nvkm_falcon_dtor(&gsp->falcon);
+ return gsp;
+}
+
+static const struct nvkm_subdev_func
+nvkm_gsp = {
+ .dtor = nvkm_gsp_dtor,
+};
+
+int
+nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device,
+ int index, struct nvkm_gsp **pgsp)
+{
+ struct nvkm_gsp *gsp;
+
+ if (!(gsp = *pgsp = kzalloc(sizeof(*gsp), GFP_KERNEL)))
+ return -ENOMEM;
+
+ nvkm_subdev_ctor(&nvkm_gsp, device, index, &gsp->subdev);
+
+ fwif = nvkm_firmware_load(&gsp->subdev, fwif, "Gsp", gsp);
+ if (IS_ERR(fwif))
+ return PTR_ERR(fwif);
+
+ return nvkm_falcon_ctor(fwif->flcn, &gsp->subdev,
+ nvkm_subdev_name[gsp->subdev.index], 0,
+ &gsp->falcon);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c
index dccfaf1162e2..2114f9b00a28 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c
@@ -19,44 +19,37 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <subdev/gsp.h>
-#include <subdev/top.h>
-#include <engine/falcon.h>
+#include "priv.h"
+
+static const struct nvkm_falcon_func
+gv100_gsp_flcn = {
+ .fbif = 0x600,
+ .load_imem = nvkm_falcon_v1_load_imem,
+ .load_dmem = nvkm_falcon_v1_load_dmem,
+ .read_dmem = nvkm_falcon_v1_read_dmem,
+ .bind_context = gp102_sec2_flcn_bind_context,
+ .wait_for_halt = nvkm_falcon_v1_wait_for_halt,
+ .clear_interrupt = nvkm_falcon_v1_clear_interrupt,
+ .set_start_addr = nvkm_falcon_v1_set_start_addr,
+ .start = nvkm_falcon_v1_start,
+ .enable = gp102_sec2_flcn_enable,
+ .disable = nvkm_falcon_v1_disable,
+};
static int
-gv100_gsp_oneinit(struct nvkm_subdev *subdev)
-{
- struct nvkm_gsp *gsp = nvkm_gsp(subdev);
-
- gsp->addr = nvkm_top_addr(subdev->device, subdev->index);
- if (!gsp->addr)
- return -EINVAL;
-
- return nvkm_falcon_v1_new(subdev, "GSP", gsp->addr, &gsp->falcon);
-}
-
-static void *
-gv100_gsp_dtor(struct nvkm_subdev *subdev)
+gv100_gsp_nofw(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
{
- struct nvkm_gsp *gsp = nvkm_gsp(subdev);
- nvkm_falcon_del(&gsp->falcon);
- return gsp;
+ return 0;
}
-static const struct nvkm_subdev_func
-gv100_gsp = {
- .dtor = gv100_gsp_dtor,
- .oneinit = gv100_gsp_oneinit,
+struct nvkm_gsp_fwif
+gv100_gsp[] = {
+ { -1, gv100_gsp_nofw, &gv100_gsp_flcn },
+ {}
};
int
gv100_gsp_new(struct nvkm_device *device, int index, struct nvkm_gsp **pgsp)
{
- struct nvkm_gsp *gsp;
-
- if (!(gsp = *pgsp = kzalloc(sizeof(*gsp), GFP_KERNEL)))
- return -ENOMEM;
-
- nvkm_subdev_ctor(&gv100_gsp, device, index, &gsp->subdev);
- return 0;
+ return nvkm_gsp_new_(gv100_gsp, device, index, pgsp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
new file mode 100644
index 000000000000..92820fb997c1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_GSP_PRIV_H__
+#define __NVKM_GSP_PRIV_H__
+#include <subdev/gsp.h>
+enum nvkm_acr_lsf_id;
+
+struct nvkm_gsp_fwif {
+ int version;
+ int (*load)(struct nvkm_gsp *, int ver, const struct nvkm_gsp_fwif *);
+ const struct nvkm_falcon_func *flcn;
+};
+
+int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, int,
+ struct nvkm_gsp **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
index 2b6d36ea7067..728d75010847 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
@@ -6,3 +6,4 @@ nvkm-y += nvkm/subdev/ltc/gm107.o
nvkm-y += nvkm/subdev/ltc/gm200.o
nvkm-y += nvkm/subdev/ltc/gp100.o
nvkm-y += nvkm/subdev/ltc/gp102.o
+nvkm-y += nvkm/subdev/ltc/gp10b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c
new file mode 100644
index 000000000000..c0063c7caa50
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2019 NVIDIA Corporation.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Thierry Reding
+ */
+
+#include "priv.h"
+
+static void
+gp10b_ltc_init(struct nvkm_ltc *ltc)
+{
+ struct nvkm_device *device = ltc->subdev.device;
+ struct iommu_fwspec *spec;
+
+ nvkm_wr32(device, 0x17e27c, ltc->ltc_nr);
+ nvkm_wr32(device, 0x17e000, ltc->ltc_nr);
+ nvkm_wr32(device, 0x100800, ltc->ltc_nr);
+
+ spec = dev_iommu_fwspec_get(device->dev);
+ if (spec) {
+ u32 sid = spec->ids[0] & 0xffff;
+
+ /* stream ID */
+ nvkm_wr32(device, 0x160000, sid << 2);
+ }
+}
+
+static const struct nvkm_ltc_func
+gp10b_ltc = {
+ .oneinit = gp100_ltc_oneinit,
+ .init = gp10b_ltc_init,
+ .intr = gp100_ltc_intr,
+ .cbc_clear = gm107_ltc_cbc_clear,
+ .cbc_wait = gm107_ltc_cbc_wait,
+ .zbc = 16,
+ .zbc_clear_color = gm107_ltc_zbc_clear_color,
+ .zbc_clear_depth = gm107_ltc_zbc_clear_depth,
+ .zbc_clear_stencil = gp102_ltc_zbc_clear_stencil,
+ .invalidate = gf100_ltc_invalidate,
+ .flush = gf100_ltc_flush,
+};
+
+int
+gp10b_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+{
+ return nvkm_ltc_new_(&gp10b_ltc, device, index, pltc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
index 2fcf18e46ce3..eca5a711b1b8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
@@ -46,4 +46,6 @@ void gm107_ltc_zbc_clear_depth(struct nvkm_ltc *, int, const u32);
int gp100_ltc_oneinit(struct nvkm_ltc *);
void gp100_ltc_init(struct nvkm_ltc *);
void gp100_ltc_intr(struct nvkm_ltc *);
+
+void gp102_ltc_zbc_clear_stencil(struct nvkm_ltc *, int, const u32);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
index 2d075246dc46..2cd5ec81c0d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
@@ -30,7 +30,7 @@
* The value 0xff represents an invalid storage type.
*/
const u8 *
-gf100_mmu_kind(struct nvkm_mmu *mmu, int *count)
+gf100_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid)
{
static const u8
kind[256] = {
@@ -69,6 +69,7 @@ gf100_mmu_kind(struct nvkm_mmu *mmu, int *count)
};
*count = ARRAY_SIZE(kind);
+ *invalid = 0xff;
return kind;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
index dbf644ebac97..83990c83f9f8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
@@ -27,7 +27,7 @@
#include <nvif/class.h>
const u8 *
-gm200_mmu_kind(struct nvkm_mmu *mmu, int *count)
+gm200_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid)
{
static const u8
kind[256] = {
@@ -65,6 +65,7 @@ gm200_mmu_kind(struct nvkm_mmu *mmu, int *count)
0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff
};
*count = ARRAY_SIZE(kind);
+ *invalid = 0xff;
return kind;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
index db3dfbbb2aa0..c0083ddda65a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
@@ -27,7 +27,7 @@
#include <nvif/class.h>
const u8 *
-nv50_mmu_kind(struct nvkm_mmu *base, int *count)
+nv50_mmu_kind(struct nvkm_mmu *base, int *count, u8 *invalid)
{
/* 0x01: no bank swizzle
* 0x02: bank swizzled
@@ -57,6 +57,7 @@ nv50_mmu_kind(struct nvkm_mmu *base, int *count)
0x01, 0x01, 0x02, 0x02, 0x01, 0x01, 0x7f, 0x7f
};
*count = ARRAY_SIZE(kind);
+ *invalid = 0x7f;
return kind;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
index 07f2fcd18f3d..479b02344271 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
@@ -35,17 +35,17 @@ struct nvkm_mmu_func {
u32 pd_offset;
} vmm;
- const u8 *(*kind)(struct nvkm_mmu *, int *count);
+ const u8 *(*kind)(struct nvkm_mmu *, int *count, u8 *invalid);
bool kind_sys;
};
extern const struct nvkm_mmu_func nv04_mmu;
-const u8 *nv50_mmu_kind(struct nvkm_mmu *, int *count);
+const u8 *nv50_mmu_kind(struct nvkm_mmu *, int *count, u8 *invalid);
-const u8 *gf100_mmu_kind(struct nvkm_mmu *, int *count);
+const u8 *gf100_mmu_kind(struct nvkm_mmu *, int *count, u8 *invalid);
-const u8 *gm200_mmu_kind(struct nvkm_mmu *, int *);
+const u8 *gm200_mmu_kind(struct nvkm_mmu *, int *, u8 *);
struct nvkm_mmu_pt {
union {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
index c0db0ce10cba..b21e82eb0916 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
@@ -1,5 +1,6 @@
/*
* Copyright 2018 Red Hat Inc.
+ * Copyright 2019 NVIDIA Corporation.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -26,13 +27,26 @@
#include <nvif/class.h>
+const u8 *
+tu102_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid)
+{
+ static const u8
+ kind[16] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00 */
+ 0x06, 0x06, 0x02, 0x01, 0x03, 0x04, 0x05, 0x07,
+ };
+ *count = ARRAY_SIZE(kind);
+ *invalid = 0x07;
+ return kind;
+}
+
static const struct nvkm_mmu_func
tu102_mmu = {
.dma_bits = 47,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
.vmm = {{ -1, 0, NVIF_CLASS_VMM_GP100}, tu102_vmm_new },
- .kind = gm200_mmu_kind,
+ .kind = tu102_mmu_kind,
.kind_sys = true,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
index 353f10f92b77..0e4b8941da37 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
@@ -111,15 +111,17 @@ nvkm_ummu_kind(struct nvkm_ummu *ummu, void *argv, u32 argc)
} *args = argv;
const u8 *kind = NULL;
int ret = -ENOSYS, count = 0;
+ u8 kind_inv = 0;
if (mmu->func->kind)
- kind = mmu->func->kind(mmu, &count);
+ kind = mmu->func->kind(mmu, &count, &kind_inv);
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
if (argc != args->v0.count * sizeof(*args->v0.data))
return -EINVAL;
if (args->v0.count > count)
return -EINVAL;
+ args->v0.kind_inv = kind_inv;
memcpy(args->v0.data, kind, args->v0.count);
} else
return ret;
@@ -157,9 +159,10 @@ nvkm_ummu_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
struct nvkm_mmu *mmu = device->mmu;
struct nvkm_ummu *ummu;
int ret = -ENOSYS, kinds = 0;
+ u8 unused = 0;
if (mmu->func->kind)
- mmu->func->kind(mmu, &kinds);
+ mmu->func->kind(mmu, &kinds, &unused);
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
args->v0.dmabits = mmu->dma_bits;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
index ab6424faf84c..6a2d9eb8e1ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
@@ -247,7 +247,7 @@ gf100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
} *args = argv;
struct nvkm_device *device = vmm->mmu->subdev.device;
struct nvkm_memory *memory = map->memory;
- u8 kind, priv, ro, vol;
+ u8 kind, kind_inv, priv, ro, vol;
int kindn, aper, ret = -ENOSYS;
const u8 *kindm;
@@ -274,8 +274,8 @@ gf100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
if (WARN_ON(aper < 0))
return aper;
- kindm = vmm->mmu->func->kind(vmm->mmu, &kindn);
- if (kind >= kindn || kindm[kind] == 0xff) {
+ kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv);
+ if (kind >= kindn || kindm[kind] == kind_inv) {
VMM_DEBUG(vmm, "kind %02x", kind);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
index b4f519768d5e..d86287565542 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
@@ -320,7 +320,7 @@ gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
} *args = argv;
struct nvkm_device *device = vmm->mmu->subdev.device;
struct nvkm_memory *memory = map->memory;
- u8 kind, priv, ro, vol;
+ u8 kind, kind_inv, priv, ro, vol;
int kindn, aper, ret = -ENOSYS;
const u8 *kindm;
@@ -347,8 +347,8 @@ gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
if (WARN_ON(aper < 0))
return aper;
- kindm = vmm->mmu->func->kind(vmm->mmu, &kindn);
- if (kind >= kindn || kindm[kind] == 0xff) {
+ kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv);
+ if (kind >= kindn || kindm[kind] == kind_inv) {
VMM_DEBUG(vmm, "kind %02x", kind);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
index c98afe3134ee..2d89e27e8e9e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
@@ -235,7 +235,7 @@ nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
struct nvkm_device *device = vmm->mmu->subdev.device;
struct nvkm_ram *ram = device->fb->ram;
struct nvkm_memory *memory = map->memory;
- u8 aper, kind, comp, priv, ro;
+ u8 aper, kind, kind_inv, comp, priv, ro;
int kindn, ret = -ENOSYS;
const u8 *kindm;
@@ -278,8 +278,8 @@ nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
return -EINVAL;
}
- kindm = vmm->mmu->func->kind(vmm->mmu, &kindn);
- if (kind >= kindn || kindm[kind] == 0x7f) {
+ kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv);
+ if (kind >= kindn || kindm[kind] == kind_inv) {
VMM_DEBUG(vmm, "kind %02x", kind);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
index e37b6e45eaa2..a76c2a7bd696 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
@@ -12,3 +12,4 @@ nvkm-y += nvkm/subdev/pmu/gm107.o
nvkm-y += nvkm/subdev/pmu/gm20b.o
nvkm-y += nvkm/subdev/pmu/gp100.o
nvkm-y += nvkm/subdev/pmu/gp102.o
+nvkm-y += nvkm/subdev/pmu/gp10b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
index ea2e11771bca..a0fe607c9c07 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
@@ -23,7 +23,7 @@
*/
#include "priv.h"
-#include <core/msgqueue.h>
+#include <core/firmware.h>
#include <subdev/timer.h>
bool
@@ -85,6 +85,12 @@ nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend)
pmu->func->fini(pmu);
flush_work(&pmu->recv.work);
+
+ reinit_completion(&pmu->wpr_ready);
+
+ nvkm_falcon_cmdq_fini(pmu->lpq);
+ nvkm_falcon_cmdq_fini(pmu->hpq);
+ pmu->initmsg_received = false;
return 0;
}
@@ -133,19 +139,15 @@ nvkm_pmu_init(struct nvkm_subdev *subdev)
return ret;
}
-static int
-nvkm_pmu_oneinit(struct nvkm_subdev *subdev)
-{
- struct nvkm_pmu *pmu = nvkm_pmu(subdev);
- return nvkm_falcon_v1_new(&pmu->subdev, "PMU", 0x10a000, &pmu->falcon);
-}
-
static void *
nvkm_pmu_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_pmu *pmu = nvkm_pmu(subdev);
- nvkm_msgqueue_del(&pmu->queue);
- nvkm_falcon_del(&pmu->falcon);
+ nvkm_falcon_msgq_del(&pmu->msgq);
+ nvkm_falcon_cmdq_del(&pmu->lpq);
+ nvkm_falcon_cmdq_del(&pmu->hpq);
+ nvkm_falcon_qmgr_del(&pmu->qmgr);
+ nvkm_falcon_dtor(&pmu->falcon);
return nvkm_pmu(subdev);
}
@@ -153,29 +155,50 @@ static const struct nvkm_subdev_func
nvkm_pmu = {
.dtor = nvkm_pmu_dtor,
.preinit = nvkm_pmu_preinit,
- .oneinit = nvkm_pmu_oneinit,
.init = nvkm_pmu_init,
.fini = nvkm_pmu_fini,
.intr = nvkm_pmu_intr,
};
int
-nvkm_pmu_ctor(const struct nvkm_pmu_func *func, struct nvkm_device *device,
+nvkm_pmu_ctor(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device,
int index, struct nvkm_pmu *pmu)
{
+ int ret;
+
nvkm_subdev_ctor(&nvkm_pmu, device, index, &pmu->subdev);
- pmu->func = func;
+
INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
init_waitqueue_head(&pmu->recv.wait);
+
+ fwif = nvkm_firmware_load(&pmu->subdev, fwif, "Pmu", pmu);
+ if (IS_ERR(fwif))
+ return PTR_ERR(fwif);
+
+ pmu->func = fwif->func;
+
+ ret = nvkm_falcon_ctor(pmu->func->flcn, &pmu->subdev,
+ nvkm_subdev_name[pmu->subdev.index], 0x10a000,
+ &pmu->falcon);
+ if (ret)
+ return ret;
+
+ if ((ret = nvkm_falcon_qmgr_new(&pmu->falcon, &pmu->qmgr)) ||
+ (ret = nvkm_falcon_cmdq_new(pmu->qmgr, "hpq", &pmu->hpq)) ||
+ (ret = nvkm_falcon_cmdq_new(pmu->qmgr, "lpq", &pmu->lpq)) ||
+ (ret = nvkm_falcon_msgq_new(pmu->qmgr, "msgq", &pmu->msgq)))
+ return ret;
+
+ init_completion(&pmu->wpr_ready);
return 0;
}
int
-nvkm_pmu_new_(const struct nvkm_pmu_func *func, struct nvkm_device *device,
+nvkm_pmu_new_(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device,
int index, struct nvkm_pmu **ppmu)
{
struct nvkm_pmu *pmu;
if (!(pmu = *ppmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
return -ENOMEM;
- return nvkm_pmu_ctor(func, device, index, *ppmu);
+ return nvkm_pmu_ctor(fwif, device, index, *ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
index 0b458656e870..3ecb3d9cbcf2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
@@ -42,6 +42,7 @@ gf100_pmu_enabled(struct nvkm_pmu *pmu)
static const struct nvkm_pmu_func
gf100_pmu = {
+ .flcn = &gt215_pmu_flcn,
.code.data = gf100_pmu_code,
.code.size = sizeof(gf100_pmu_code),
.data.data = gf100_pmu_data,
@@ -56,7 +57,19 @@ gf100_pmu = {
};
int
+gf100_pmu_nofw(struct nvkm_pmu *pmu, int ver, const struct nvkm_pmu_fwif *fwif)
+{
+ return 0;
+}
+
+static const struct nvkm_pmu_fwif
+gf100_pmu_fwif[] = {
+ { -1, gf100_pmu_nofw, &gf100_pmu },
+ {}
+};
+
+int
gf100_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(&gf100_pmu, device, index, ppmu);
+ return nvkm_pmu_new_(gf100_pmu_fwif, device, index, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
index 3dfa79d4fb13..8dd0271aaaee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
@@ -26,6 +26,7 @@
static const struct nvkm_pmu_func
gf119_pmu = {
+ .flcn = &gt215_pmu_flcn,
.code.data = gf119_pmu_code,
.code.size = sizeof(gf119_pmu_code),
.data.data = gf119_pmu_data,
@@ -39,8 +40,14 @@ gf119_pmu = {
.recv = gt215_pmu_recv,
};
+static const struct nvkm_pmu_fwif
+gf119_pmu_fwif[] = {
+ { -1, gf100_pmu_nofw, &gf119_pmu },
+ {}
+};
+
int
gf119_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(&gf119_pmu, device, index, ppmu);
+ return nvkm_pmu_new_(gf119_pmu_fwif, device, index, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
index 8f7ec10fd2a4..8b70cc17a634 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
@@ -105,6 +105,7 @@ gk104_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
static const struct nvkm_pmu_func
gk104_pmu = {
+ .flcn = &gt215_pmu_flcn,
.code.data = gk104_pmu_code,
.code.size = sizeof(gk104_pmu_code),
.data.data = gk104_pmu_data,
@@ -119,8 +120,14 @@ gk104_pmu = {
.pgob = gk104_pmu_pgob,
};
+static const struct nvkm_pmu_fwif
+gk104_pmu_fwif[] = {
+ { -1, gf100_pmu_nofw, &gk104_pmu },
+ {}
+};
+
int
gk104_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(&gk104_pmu, device, index, ppmu);
+ return nvkm_pmu_new_(gk104_pmu_fwif, device, index, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
index 345741d55a56..0081f2141b10 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
@@ -84,6 +84,7 @@ gk110_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
static const struct nvkm_pmu_func
gk110_pmu = {
+ .flcn = &gt215_pmu_flcn,
.code.data = gk110_pmu_code,
.code.size = sizeof(gk110_pmu_code),
.data.data = gk110_pmu_data,
@@ -98,8 +99,14 @@ gk110_pmu = {
.pgob = gk110_pmu_pgob,
};
+static const struct nvkm_pmu_fwif
+gk110_pmu_fwif[] = {
+ { -1, gf100_pmu_nofw, &gk110_pmu },
+ {}
+};
+
int
gk110_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(&gk110_pmu, device, index, ppmu);
+ return nvkm_pmu_new_(gk110_pmu_fwif, device, index, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
index e4acf7876ea1..b227c701a5e7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
@@ -26,6 +26,7 @@
static const struct nvkm_pmu_func
gk208_pmu = {
+ .flcn = &gt215_pmu_flcn,
.code.data = gk208_pmu_code,
.code.size = sizeof(gk208_pmu_code),
.data.data = gk208_pmu_data,
@@ -40,8 +41,14 @@ gk208_pmu = {
.pgob = gk110_pmu_pgob,
};
+static const struct nvkm_pmu_fwif
+gk208_pmu_fwif[] = {
+ { -1, gf100_pmu_nofw, &gk208_pmu },
+ {}
+};
+
int
gk208_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(&gk208_pmu, device, index, ppmu);
+ return nvkm_pmu_new_(gk208_pmu_fwif, device, index, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
index 05e81855c367..26c1adf8f44c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
@@ -95,7 +95,7 @@ static void
gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu,
struct gk20a_pmu_dvfs_dev_status *status)
{
- struct nvkm_falcon *falcon = pmu->base.falcon;
+ struct nvkm_falcon *falcon = &pmu->base.falcon;
status->busy = nvkm_falcon_rd32(falcon, 0x508 + (BUSY_SLOT * 0x10));
status->total= nvkm_falcon_rd32(falcon, 0x508 + (CLK_SLOT * 0x10));
@@ -104,7 +104,7 @@ gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu,
static void
gk20a_pmu_dvfs_reset_dev_status(struct gk20a_pmu *pmu)
{
- struct nvkm_falcon *falcon = pmu->base.falcon;
+ struct nvkm_falcon *falcon = &pmu->base.falcon;
nvkm_falcon_wr32(falcon, 0x508 + (BUSY_SLOT * 0x10), 0x80000000);
nvkm_falcon_wr32(falcon, 0x508 + (CLK_SLOT * 0x10), 0x80000000);
@@ -160,7 +160,7 @@ gk20a_pmu_fini(struct nvkm_pmu *pmu)
struct gk20a_pmu *gpmu = gk20a_pmu(pmu);
nvkm_timer_alarm(pmu->subdev.device->timer, 0, &gpmu->alarm);
- nvkm_falcon_put(pmu->falcon, &pmu->subdev);
+ nvkm_falcon_put(&pmu->falcon, &pmu->subdev);
}
static int
@@ -169,7 +169,7 @@ gk20a_pmu_init(struct nvkm_pmu *pmu)
struct gk20a_pmu *gpmu = gk20a_pmu(pmu);
struct nvkm_subdev *subdev = &pmu->subdev;
struct nvkm_device *device = pmu->subdev.device;
- struct nvkm_falcon *falcon = pmu->falcon;
+ struct nvkm_falcon *falcon = &pmu->falcon;
int ret;
ret = nvkm_falcon_get(falcon, subdev);
@@ -196,25 +196,34 @@ gk20a_dvfs_data= {
static const struct nvkm_pmu_func
gk20a_pmu = {
+ .flcn = &gt215_pmu_flcn,
.enabled = gf100_pmu_enabled,
.init = gk20a_pmu_init,
.fini = gk20a_pmu_fini,
.reset = gf100_pmu_reset,
};
+static const struct nvkm_pmu_fwif
+gk20a_pmu_fwif[] = {
+ { -1, gf100_pmu_nofw, &gk20a_pmu },
+ {}
+};
+
int
gk20a_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
{
struct gk20a_pmu *pmu;
+ int ret;
if (!(pmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
return -ENOMEM;
*ppmu = &pmu->base;
- nvkm_pmu_ctor(&gk20a_pmu, device, index, &pmu->base);
+ ret = nvkm_pmu_ctor(gk20a_pmu_fwif, device, index, &pmu->base);
+ if (ret)
+ return ret;
pmu->data = &gk20a_dvfs_data;
nvkm_alarm_init(&pmu->alarm, gk20a_pmu_dvfs_work);
-
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
index 459df1ef9e70..5afb55e58b51 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
@@ -28,6 +28,7 @@
static const struct nvkm_pmu_func
gm107_pmu = {
+ .flcn = &gt215_pmu_flcn,
.code.data = gm107_pmu_code,
.code.size = sizeof(gm107_pmu_code),
.data.data = gm107_pmu_data,
@@ -41,8 +42,14 @@ gm107_pmu = {
.recv = gt215_pmu_recv,
};
+static const struct nvkm_pmu_fwif
+gm107_pmu_fwif[] = {
+ { -1, gf100_pmu_nofw, &gm107_pmu },
+ {}
+};
+
int
gm107_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(&gm107_pmu, device, index, ppmu);
+ return nvkm_pmu_new_(gm107_pmu_fwif, device, index, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
index 31c843145c7a..82571032a07d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
@@ -19,38 +19,224 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
-
-#include <engine/falcon.h>
-#include <core/msgqueue.h>
#include "priv.h"
-static void
+#include <core/memory.h>
+#include <subdev/acr.h>
+
+#include <nvfw/flcn.h>
+#include <nvfw/pmu.h>
+
+static int
+gm20b_pmu_acr_bootstrap_falcon_cb(void *priv, struct nv_falcon_msg *hdr)
+{
+ struct nv_pmu_acr_bootstrap_falcon_msg *msg =
+ container_of(hdr, typeof(*msg), msg.hdr);
+ return msg->falcon_id;
+}
+
+int
+gm20b_pmu_acr_bootstrap_falcon(struct nvkm_falcon *falcon,
+ enum nvkm_acr_lsf_id id)
+{
+ struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon);
+ struct nv_pmu_acr_bootstrap_falcon_cmd cmd = {
+ .cmd.hdr.unit_id = NV_PMU_UNIT_ACR,
+ .cmd.hdr.size = sizeof(cmd),
+ .cmd.cmd_type = NV_PMU_ACR_CMD_BOOTSTRAP_FALCON,
+ .flags = NV_PMU_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES,
+ .falcon_id = id,
+ };
+ int ret;
+
+ ret = nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr,
+ gm20b_pmu_acr_bootstrap_falcon_cb,
+ &pmu->subdev, msecs_to_jiffies(1000));
+ if (ret >= 0) {
+ if (ret != cmd.falcon_id)
+ ret = -EIO;
+ else
+ ret = 0;
+ }
+
+ return ret;
+}
+
+int
+gm20b_pmu_acr_boot(struct nvkm_falcon *falcon)
+{
+ struct nv_pmu_args args = { .secure_mode = true };
+ const u32 addr_args = falcon->data.limit - sizeof(struct nv_pmu_args);
+ nvkm_falcon_load_dmem(falcon, &args, addr_args, sizeof(args), 0);
+ nvkm_falcon_start(falcon);
+ return 0;
+}
+
+void
+gm20b_pmu_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust)
+{
+ struct loader_config hdr;
+ u64 addr;
+
+ nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
+ addr = ((u64)hdr.code_dma_base1 << 40 | hdr.code_dma_base << 8);
+ hdr.code_dma_base = lower_32_bits((addr + adjust) >> 8);
+ hdr.code_dma_base1 = upper_32_bits((addr + adjust) >> 8);
+ addr = ((u64)hdr.data_dma_base1 << 40 | hdr.data_dma_base << 8);
+ hdr.data_dma_base = lower_32_bits((addr + adjust) >> 8);
+ hdr.data_dma_base1 = upper_32_bits((addr + adjust) >> 8);
+ addr = ((u64)hdr.overlay_dma_base1 << 40 | hdr.overlay_dma_base << 8);
+ hdr.overlay_dma_base = lower_32_bits((addr + adjust) << 8);
+ hdr.overlay_dma_base1 = upper_32_bits((addr + adjust) << 8);
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+
+ loader_config_dump(&acr->subdev, &hdr);
+}
+
+void
+gm20b_pmu_acr_bld_write(struct nvkm_acr *acr, u32 bld,
+ struct nvkm_acr_lsfw *lsfw)
+{
+ const u64 base = lsfw->offset.img + lsfw->app_start_offset;
+ const u64 code = (base + lsfw->app_resident_code_offset) >> 8;
+ const u64 data = (base + lsfw->app_resident_data_offset) >> 8;
+ const struct loader_config hdr = {
+ .dma_idx = FALCON_DMAIDX_UCODE,
+ .code_dma_base = lower_32_bits(code),
+ .code_size_total = lsfw->app_size,
+ .code_size_to_load = lsfw->app_resident_code_size,
+ .code_entry_point = lsfw->app_imem_entry,
+ .data_dma_base = lower_32_bits(data),
+ .data_size = lsfw->app_resident_data_size,
+ .overlay_dma_base = lower_32_bits(code),
+ .argc = 1,
+ .argv = lsfw->falcon->data.limit - sizeof(struct nv_pmu_args),
+ .code_dma_base1 = upper_32_bits(code),
+ .data_dma_base1 = upper_32_bits(data),
+ .overlay_dma_base1 = upper_32_bits(code),
+ };
+
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
+}
+
+static const struct nvkm_acr_lsf_func
+gm20b_pmu_acr = {
+ .flags = NVKM_ACR_LSF_DMACTL_REQ_CTX,
+ .bld_size = sizeof(struct loader_config),
+ .bld_write = gm20b_pmu_acr_bld_write,
+ .bld_patch = gm20b_pmu_acr_bld_patch,
+ .boot = gm20b_pmu_acr_boot,
+ .bootstrap_falcon = gm20b_pmu_acr_bootstrap_falcon,
+};
+
+static int
+gm20b_pmu_acr_init_wpr_callback(void *priv, struct nv_falcon_msg *hdr)
+{
+ struct nv_pmu_acr_init_wpr_region_msg *msg =
+ container_of(hdr, typeof(*msg), msg.hdr);
+ struct nvkm_pmu *pmu = priv;
+ struct nvkm_subdev *subdev = &pmu->subdev;
+
+ if (msg->error_code) {
+ nvkm_error(subdev, "ACR WPR init failure: %d\n",
+ msg->error_code);
+ return -EINVAL;
+ }
+
+ nvkm_debug(subdev, "ACR WPR init complete\n");
+ complete_all(&pmu->wpr_ready);
+ return 0;
+}
+
+static int
+gm20b_pmu_acr_init_wpr(struct nvkm_pmu *pmu)
+{
+ struct nv_pmu_acr_init_wpr_region_cmd cmd = {
+ .cmd.hdr.unit_id = NV_PMU_UNIT_ACR,
+ .cmd.hdr.size = sizeof(cmd),
+ .cmd.cmd_type = NV_PMU_ACR_CMD_INIT_WPR_REGION,
+ .region_id = 1,
+ .wpr_offset = 0,
+ };
+
+ return nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr,
+ gm20b_pmu_acr_init_wpr_callback, pmu, 0);
+}
+
+int
+gm20b_pmu_initmsg(struct nvkm_pmu *pmu)
+{
+ struct nv_pmu_init_msg msg;
+ int ret;
+
+ ret = nvkm_falcon_msgq_recv_initmsg(pmu->msgq, &msg, sizeof(msg));
+ if (ret)
+ return ret;
+
+ if (msg.hdr.unit_id != NV_PMU_UNIT_INIT ||
+ msg.msg_type != NV_PMU_INIT_MSG_INIT)
+ return -EINVAL;
+
+ nvkm_falcon_cmdq_init(pmu->hpq, msg.queue_info[0].index,
+ msg.queue_info[0].offset,
+ msg.queue_info[0].size);
+ nvkm_falcon_cmdq_init(pmu->lpq, msg.queue_info[1].index,
+ msg.queue_info[1].offset,
+ msg.queue_info[1].size);
+ nvkm_falcon_msgq_init(pmu->msgq, msg.queue_info[4].index,
+ msg.queue_info[4].offset,
+ msg.queue_info[4].size);
+ return gm20b_pmu_acr_init_wpr(pmu);
+}
+
+void
gm20b_pmu_recv(struct nvkm_pmu *pmu)
{
- if (!pmu->queue) {
- nvkm_warn(&pmu->subdev,
- "recv function called while no firmware set!\n");
- return;
+ if (!pmu->initmsg_received) {
+ int ret = pmu->func->initmsg(pmu);
+ if (ret) {
+ nvkm_error(&pmu->subdev,
+ "error parsing init message: %d\n", ret);
+ return;
+ }
+
+ pmu->initmsg_received = true;
}
- nvkm_msgqueue_recv(pmu->queue);
+ nvkm_falcon_msgq_recv(pmu->msgq);
}
static const struct nvkm_pmu_func
gm20b_pmu = {
+ .flcn = &gt215_pmu_flcn,
.enabled = gf100_pmu_enabled,
.intr = gt215_pmu_intr,
.recv = gm20b_pmu_recv,
+ .initmsg = gm20b_pmu_initmsg,
};
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+MODULE_FIRMWARE("nvidia/gm20b/pmu/desc.bin");
+MODULE_FIRMWARE("nvidia/gm20b/pmu/image.bin");
+MODULE_FIRMWARE("nvidia/gm20b/pmu/sig.bin");
+#endif
+
int
-gm20b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gm20b_pmu_load(struct nvkm_pmu *pmu, int ver, const struct nvkm_pmu_fwif *fwif)
{
- int ret;
+ return nvkm_acr_lsfw_load_sig_image_desc(&pmu->subdev, &pmu->falcon,
+ NVKM_ACR_LSF_PMU, "pmu/",
+ ver, fwif->acr);
+}
- ret = nvkm_pmu_new_(&gm20b_pmu, device, index, ppmu);
- if (ret)
- return ret;
+static const struct nvkm_pmu_fwif
+gm20b_pmu_fwif[] = {
+ { 0, gm20b_pmu_load, &gm20b_pmu, &gm20b_pmu_acr },
+ {}
+};
- return 0;
+int
+gm20b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+ return nvkm_pmu_new_(gm20b_pmu_fwif, device, index, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c
index e210cd6af816..09e05db21ff5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c
@@ -25,12 +25,19 @@
static const struct nvkm_pmu_func
gp100_pmu = {
+ .flcn = &gt215_pmu_flcn,
.enabled = gf100_pmu_enabled,
.reset = gf100_pmu_reset,
};
+static const struct nvkm_pmu_fwif
+gp100_pmu_fwif[] = {
+ { -1, gf100_pmu_nofw, &gp100_pmu },
+ {}
+};
+
int
gp100_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(&gp100_pmu, device, index, ppmu);
+ return nvkm_pmu_new_(gp100_pmu_fwif, device, index, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
index 98c7a2a8afc4..262b8a3dd507 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
@@ -39,12 +39,19 @@ gp102_pmu_enabled(struct nvkm_pmu *pmu)
static const struct nvkm_pmu_func
gp102_pmu = {
+ .flcn = &gt215_pmu_flcn,
.enabled = gp102_pmu_enabled,
.reset = gp102_pmu_reset,
};
+static const struct nvkm_pmu_fwif
+gp102_pmu_fwif[] = {
+ { -1, gf100_pmu_nofw, &gp102_pmu },
+ {}
+};
+
int
gp102_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(&gp102_pmu, device, index, ppmu);
+ return nvkm_pmu_new_(gp102_pmu_fwif, device, index, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
new file mode 100644
index 000000000000..5b81c7320479
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/acr.h>
+
+#include <nvfw/flcn.h>
+#include <nvfw/pmu.h>
+
+static int
+gp10b_pmu_acr_bootstrap_multiple_falcons_cb(void *priv,
+ struct nv_falcon_msg *hdr)
+{
+ struct nv_pmu_acr_bootstrap_multiple_falcons_msg *msg =
+ container_of(hdr, typeof(*msg), msg.hdr);
+ return msg->falcon_mask;
+}
+static int
+gp10b_pmu_acr_bootstrap_multiple_falcons(struct nvkm_falcon *falcon, u32 mask)
+{
+ struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon);
+ struct nv_pmu_acr_bootstrap_multiple_falcons_cmd cmd = {
+ .cmd.hdr.unit_id = NV_PMU_UNIT_ACR,
+ .cmd.hdr.size = sizeof(cmd),
+ .cmd.cmd_type = NV_PMU_ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS,
+ .flags = NV_PMU_ACR_BOOTSTRAP_MULTIPLE_FALCONS_FLAGS_RESET_YES,
+ .falcon_mask = mask,
+ .wpr_lo = 0, /*XXX*/
+ .wpr_hi = 0, /*XXX*/
+ };
+ int ret;
+
+ ret = nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr,
+ gp10b_pmu_acr_bootstrap_multiple_falcons_cb,
+ &pmu->subdev, msecs_to_jiffies(1000));
+ if (ret >= 0) {
+ if (ret != cmd.falcon_mask)
+ ret = -EIO;
+ else
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static const struct nvkm_acr_lsf_func
+gp10b_pmu_acr = {
+ .flags = NVKM_ACR_LSF_DMACTL_REQ_CTX,
+ .bld_size = sizeof(struct loader_config),
+ .bld_write = gm20b_pmu_acr_bld_write,
+ .bld_patch = gm20b_pmu_acr_bld_patch,
+ .boot = gm20b_pmu_acr_boot,
+ .bootstrap_falcon = gm20b_pmu_acr_bootstrap_falcon,
+ .bootstrap_multiple_falcons = gp10b_pmu_acr_bootstrap_multiple_falcons,
+};
+
+static const struct nvkm_pmu_func
+gp10b_pmu = {
+ .flcn = &gt215_pmu_flcn,
+ .enabled = gf100_pmu_enabled,
+ .intr = gt215_pmu_intr,
+ .recv = gm20b_pmu_recv,
+ .initmsg = gm20b_pmu_initmsg,
+};
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin");
+MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin");
+MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin");
+#endif
+
+static const struct nvkm_pmu_fwif
+gp10b_pmu_fwif[] = {
+ { 0, gm20b_pmu_load, &gp10b_pmu, &gp10b_pmu_acr },
+ {}
+};
+
+int
+gp10b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+ return nvkm_pmu_new_(gp10b_pmu_fwif, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
index e04216daea58..88b909913ff9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
@@ -241,8 +241,27 @@ gt215_pmu_init(struct nvkm_pmu *pmu)
return 0;
}
+const struct nvkm_falcon_func
+gt215_pmu_flcn = {
+ .debug = 0xc08,
+ .fbif = 0xe00,
+ .load_imem = nvkm_falcon_v1_load_imem,
+ .load_dmem = nvkm_falcon_v1_load_dmem,
+ .read_dmem = nvkm_falcon_v1_read_dmem,
+ .bind_context = nvkm_falcon_v1_bind_context,
+ .wait_for_halt = nvkm_falcon_v1_wait_for_halt,
+ .clear_interrupt = nvkm_falcon_v1_clear_interrupt,
+ .set_start_addr = nvkm_falcon_v1_set_start_addr,
+ .start = nvkm_falcon_v1_start,
+ .enable = nvkm_falcon_v1_enable,
+ .disable = nvkm_falcon_v1_disable,
+ .cmdq = { 0x4a0, 0x4b0, 4 },
+ .msgq = { 0x4c8, 0x4cc, 0 },
+};
+
static const struct nvkm_pmu_func
gt215_pmu = {
+ .flcn = &gt215_pmu_flcn,
.code.data = gt215_pmu_code,
.code.size = sizeof(gt215_pmu_code),
.data.data = gt215_pmu_data,
@@ -256,8 +275,14 @@ gt215_pmu = {
.recv = gt215_pmu_recv,
};
+static const struct nvkm_pmu_fwif
+gt215_pmu_fwif[] = {
+ { -1, gf100_pmu_nofw, &gt215_pmu },
+ {}
+};
+
int
gt215_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(&gt215_pmu, device, index, ppmu);
+ return nvkm_pmu_new_(gt215_pmu_fwif, device, index, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
index 26d73f9cd6d3..f470859244de 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
@@ -4,13 +4,12 @@
#define nvkm_pmu(p) container_of((p), struct nvkm_pmu, subdev)
#include <subdev/pmu.h>
#include <subdev/pmu/fuc/os.h>
-
-int nvkm_pmu_ctor(const struct nvkm_pmu_func *, struct nvkm_device *,
- int index, struct nvkm_pmu *);
-int nvkm_pmu_new_(const struct nvkm_pmu_func *, struct nvkm_device *,
- int index, struct nvkm_pmu **);
+enum nvkm_acr_lsf_id;
+struct nvkm_acr_lsfw;
struct nvkm_pmu_func {
+ const struct nvkm_falcon_func *flcn;
+
struct {
u32 *data;
u32 size;
@@ -29,9 +28,11 @@ struct nvkm_pmu_func {
int (*send)(struct nvkm_pmu *, u32 reply[2], u32 process,
u32 message, u32 data0, u32 data1);
void (*recv)(struct nvkm_pmu *);
+ int (*initmsg)(struct nvkm_pmu *);
void (*pgob)(struct nvkm_pmu *, bool);
};
+extern const struct nvkm_falcon_func gt215_pmu_flcn;
int gt215_pmu_init(struct nvkm_pmu *);
void gt215_pmu_fini(struct nvkm_pmu *);
void gt215_pmu_intr(struct nvkm_pmu *);
@@ -42,4 +43,26 @@ bool gf100_pmu_enabled(struct nvkm_pmu *);
void gf100_pmu_reset(struct nvkm_pmu *);
void gk110_pmu_pgob(struct nvkm_pmu *, bool);
+
+void gm20b_pmu_acr_bld_patch(struct nvkm_acr *, u32, s64);
+void gm20b_pmu_acr_bld_write(struct nvkm_acr *, u32, struct nvkm_acr_lsfw *);
+int gm20b_pmu_acr_boot(struct nvkm_falcon *);
+int gm20b_pmu_acr_bootstrap_falcon(struct nvkm_falcon *, enum nvkm_acr_lsf_id);
+void gm20b_pmu_recv(struct nvkm_pmu *);
+int gm20b_pmu_initmsg(struct nvkm_pmu *);
+
+struct nvkm_pmu_fwif {
+ int version;
+ int (*load)(struct nvkm_pmu *, int ver, const struct nvkm_pmu_fwif *);
+ const struct nvkm_pmu_func *func;
+ const struct nvkm_acr_lsf_func *acr;
+};
+
+int gf100_pmu_nofw(struct nvkm_pmu *, int, const struct nvkm_pmu_fwif *);
+int gm20b_pmu_load(struct nvkm_pmu *, int, const struct nvkm_pmu_fwif *);
+
+int nvkm_pmu_ctor(const struct nvkm_pmu_fwif *, struct nvkm_device *,
+ int index, struct nvkm_pmu *);
+int nvkm_pmu_new_(const struct nvkm_pmu_fwif *, struct nvkm_device *,
+ int index, struct nvkm_pmu **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
deleted file mode 100644
index f3dee2693c79..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
+++ /dev/null
@@ -1,17 +0,0 @@
-# SPDX-License-Identifier: MIT
-nvkm-y += nvkm/subdev/secboot/base.o
-nvkm-y += nvkm/subdev/secboot/hs_ucode.o
-nvkm-y += nvkm/subdev/secboot/ls_ucode_gr.o
-nvkm-y += nvkm/subdev/secboot/ls_ucode_msgqueue.o
-nvkm-y += nvkm/subdev/secboot/acr.o
-nvkm-y += nvkm/subdev/secboot/acr_r352.o
-nvkm-y += nvkm/subdev/secboot/acr_r361.o
-nvkm-y += nvkm/subdev/secboot/acr_r364.o
-nvkm-y += nvkm/subdev/secboot/acr_r367.o
-nvkm-y += nvkm/subdev/secboot/acr_r370.o
-nvkm-y += nvkm/subdev/secboot/acr_r375.o
-nvkm-y += nvkm/subdev/secboot/gm200.o
-nvkm-y += nvkm/subdev/secboot/gm20b.o
-nvkm-y += nvkm/subdev/secboot/gp102.o
-nvkm-y += nvkm/subdev/secboot/gp108.o
-nvkm-y += nvkm/subdev/secboot/gp10b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c
deleted file mode 100644
index dc80985cf093..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr.h"
-
-#include <core/firmware.h>
-
-/**
- * Convenience function to duplicate a firmware file in memory and check that
- * it has the required minimum size.
- */
-void *
-nvkm_acr_load_firmware(const struct nvkm_subdev *subdev, const char *name,
- size_t min_size)
-{
- const struct firmware *fw;
- void *blob;
- int ret;
-
- ret = nvkm_firmware_get(subdev, name, &fw);
- if (ret)
- return ERR_PTR(ret);
- if (fw->size < min_size) {
- nvkm_error(subdev, "%s is smaller than expected size %zu\n",
- name, min_size);
- nvkm_firmware_put(fw);
- return ERR_PTR(-EINVAL);
- }
- blob = kmemdup(fw->data, fw->size, GFP_KERNEL);
- nvkm_firmware_put(fw);
- if (!blob)
- return ERR_PTR(-ENOMEM);
-
- return blob;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h
deleted file mode 100644
index 73a2ac81ac69..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-#ifndef __NVKM_SECBOOT_ACR_H__
-#define __NVKM_SECBOOT_ACR_H__
-
-#include "priv.h"
-
-struct nvkm_acr;
-
-/**
- * struct nvkm_acr_func - properties and functions specific to an ACR
- *
- * @load: make the ACR ready to run on the given secboot device
- * @reset: reset the specified falcon
- * @start: start the specified falcon (assumed to have been reset)
- */
-struct nvkm_acr_func {
- void (*dtor)(struct nvkm_acr *);
- int (*oneinit)(struct nvkm_acr *, struct nvkm_secboot *);
- int (*fini)(struct nvkm_acr *, struct nvkm_secboot *, bool);
- int (*load)(struct nvkm_acr *, struct nvkm_falcon *,
- struct nvkm_gpuobj *, u64);
- int (*reset)(struct nvkm_acr *, struct nvkm_secboot *, unsigned long);
-};
-
-/**
- * struct nvkm_acr - instance of an ACR
- *
- * @boot_falcon: ID of the falcon that will perform secure boot
- * @managed_falcons: bitfield of falcons managed by this ACR
- * @optional_falcons: bitfield of falcons we can live without
- */
-struct nvkm_acr {
- const struct nvkm_acr_func *func;
- const struct nvkm_subdev *subdev;
-
- enum nvkm_secboot_falcon boot_falcon;
- unsigned long managed_falcons;
- unsigned long optional_falcons;
-};
-
-void *nvkm_acr_load_firmware(const struct nvkm_subdev *, const char *, size_t);
-
-struct nvkm_acr *acr_r352_new(unsigned long);
-struct nvkm_acr *acr_r361_new(unsigned long);
-struct nvkm_acr *acr_r364_new(unsigned long);
-struct nvkm_acr *acr_r367_new(enum nvkm_secboot_falcon, unsigned long);
-struct nvkm_acr *acr_r370_new(enum nvkm_secboot_falcon, unsigned long);
-struct nvkm_acr *acr_r375_new(enum nvkm_secboot_falcon, unsigned long);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
deleted file mode 100644
index 7af971db91bc..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
+++ /dev/null
@@ -1,1241 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr_r352.h"
-#include "hs_ucode.h"
-
-#include <core/gpuobj.h>
-#include <core/firmware.h>
-#include <engine/falcon.h>
-#include <subdev/pmu.h>
-#include <core/msgqueue.h>
-#include <engine/sec2.h>
-
-/**
- * struct acr_r352_flcn_bl_desc - DMEM bootloader descriptor
- * @signature: 16B signature for secure code. 0s if no secure code
- * @ctx_dma: DMA context to be used by BL while loading code/data
- * @code_dma_base: 256B-aligned Physical FB Address where code is located
- * (falcon's $xcbase register)
- * @non_sec_code_off: offset from code_dma_base where the non-secure code is
- * located. The offset must be multiple of 256 to help perf
- * @non_sec_code_size: the size of the nonSecure code part.
- * @sec_code_off: offset from code_dma_base where the secure code is
- * located. The offset must be multiple of 256 to help perf
- * @sec_code_size: offset from code_dma_base where the secure code is
- * located. The offset must be multiple of 256 to help perf
- * @code_entry_point: code entry point which will be invoked by BL after
- * code is loaded.
- * @data_dma_base: 256B aligned Physical FB Address where data is located.
- * (falcon's $xdbase register)
- * @data_size: size of data block. Should be multiple of 256B
- *
- * Structure used by the bootloader to load the rest of the code. This has
- * to be filled by host and copied into DMEM at offset provided in the
- * hsflcn_bl_desc.bl_desc_dmem_load_off.
- */
-struct acr_r352_flcn_bl_desc {
- u32 reserved[4];
- u32 signature[4];
- u32 ctx_dma;
- u32 code_dma_base;
- u32 non_sec_code_off;
- u32 non_sec_code_size;
- u32 sec_code_off;
- u32 sec_code_size;
- u32 code_entry_point;
- u32 data_dma_base;
- u32 data_size;
- u32 code_dma_base1;
- u32 data_dma_base1;
-};
-
-/**
- * acr_r352_generate_flcn_bl_desc - generate generic BL descriptor for LS image
- */
-static void
-acr_r352_generate_flcn_bl_desc(const struct nvkm_acr *acr,
- const struct ls_ucode_img *img, u64 wpr_addr,
- void *_desc)
-{
- struct acr_r352_flcn_bl_desc *desc = _desc;
- const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
- u64 base, addr_code, addr_data;
-
- base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
- addr_code = (base + pdesc->app_resident_code_offset) >> 8;
- addr_data = (base + pdesc->app_resident_data_offset) >> 8;
-
- desc->ctx_dma = FALCON_DMAIDX_UCODE;
- desc->code_dma_base = lower_32_bits(addr_code);
- desc->code_dma_base1 = upper_32_bits(addr_code);
- desc->non_sec_code_off = pdesc->app_resident_code_offset;
- desc->non_sec_code_size = pdesc->app_resident_code_size;
- desc->code_entry_point = pdesc->app_imem_entry;
- desc->data_dma_base = lower_32_bits(addr_data);
- desc->data_dma_base1 = upper_32_bits(addr_data);
- desc->data_size = pdesc->app_resident_data_size;
-}
-
-
-/**
- * struct hsflcn_acr_desc - data section of the HS firmware
- *
- * This header is to be copied at the beginning of DMEM by the HS bootloader.
- *
- * @signature: signature of ACR ucode
- * @wpr_region_id: region ID holding the WPR header and its details
- * @wpr_offset: offset from the WPR region holding the wpr header
- * @regions: region descriptors
- * @nonwpr_ucode_blob_size: size of LS blob
- * @nonwpr_ucode_blob_start: FB location of LS blob is
- */
-struct hsflcn_acr_desc {
- union {
- u8 reserved_dmem[0x200];
- u32 signatures[4];
- } ucode_reserved_space;
- u32 wpr_region_id;
- u32 wpr_offset;
- u32 mmu_mem_range;
-#define FLCN_ACR_MAX_REGIONS 2
- struct {
- u32 no_regions;
- struct {
- u32 start_addr;
- u32 end_addr;
- u32 region_id;
- u32 read_mask;
- u32 write_mask;
- u32 client_mask;
- } region_props[FLCN_ACR_MAX_REGIONS];
- } regions;
- u32 ucode_blob_size;
- u64 ucode_blob_base __aligned(8);
- struct {
- u32 vpr_enabled;
- u32 vpr_start;
- u32 vpr_end;
- u32 hdcp_policies;
- } vpr_desc;
-};
-
-
-/*
- * Low-secure blob creation
- */
-
-/**
- * struct acr_r352_lsf_lsb_header - LS firmware header
- * @signature: signature to verify the firmware against
- * @ucode_off: offset of the ucode blob in the WPR region. The ucode
- * blob contains the bootloader, code and data of the
- * LS falcon
- * @ucode_size: size of the ucode blob, including bootloader
- * @data_size: size of the ucode blob data
- * @bl_code_size: size of the bootloader code
- * @bl_imem_off: offset in imem of the bootloader
- * @bl_data_off: offset of the bootloader data in WPR region
- * @bl_data_size: size of the bootloader data
- * @app_code_off: offset of the app code relative to ucode_off
- * @app_code_size: size of the app code
- * @app_data_off: offset of the app data relative to ucode_off
- * @app_data_size: size of the app data
- * @flags: flags for the secure bootloader
- *
- * This structure is written into the WPR region for each managed falcon. Each
- * instance is referenced by the lsb_offset member of the corresponding
- * lsf_wpr_header.
- */
-struct acr_r352_lsf_lsb_header {
- /**
- * LS falcon signatures
- * @prd_keys: signature to use in production mode
- * @dgb_keys: signature to use in debug mode
- * @b_prd_present: whether the production key is present
- * @b_dgb_present: whether the debug key is present
- * @falcon_id: ID of the falcon the ucode applies to
- */
- struct {
- u8 prd_keys[2][16];
- u8 dbg_keys[2][16];
- u32 b_prd_present;
- u32 b_dbg_present;
- u32 falcon_id;
- } signature;
- u32 ucode_off;
- u32 ucode_size;
- u32 data_size;
- u32 bl_code_size;
- u32 bl_imem_off;
- u32 bl_data_off;
- u32 bl_data_size;
- u32 app_code_off;
- u32 app_code_size;
- u32 app_data_off;
- u32 app_data_size;
- u32 flags;
-};
-
-/**
- * struct acr_r352_lsf_wpr_header - LS blob WPR Header
- * @falcon_id: LS falcon ID
- * @lsb_offset: offset of the lsb_lsf_header in the WPR region
- * @bootstrap_owner: secure falcon reponsible for bootstrapping the LS falcon
- * @lazy_bootstrap: skip bootstrapping by ACR
- * @status: bootstrapping status
- *
- * An array of these is written at the beginning of the WPR region, one for
- * each managed falcon. The array is terminated by an instance which falcon_id
- * is LSF_FALCON_ID_INVALID.
- */
-struct acr_r352_lsf_wpr_header {
- u32 falcon_id;
- u32 lsb_offset;
- u32 bootstrap_owner;
- u32 lazy_bootstrap;
- u32 status;
-#define LSF_IMAGE_STATUS_NONE 0
-#define LSF_IMAGE_STATUS_COPY 1
-#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2
-#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3
-#define LSF_IMAGE_STATUS_VALIDATION_DONE 4
-#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5
-#define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6
-};
-
-/**
- * struct ls_ucode_img_r352 - ucode image augmented with r352 headers
- */
-struct ls_ucode_img_r352 {
- struct ls_ucode_img base;
-
- const struct acr_r352_lsf_func *func;
-
- struct acr_r352_lsf_wpr_header wpr_header;
- struct acr_r352_lsf_lsb_header lsb_header;
-};
-#define ls_ucode_img_r352(i) container_of(i, struct ls_ucode_img_r352, base)
-
-/**
- * ls_ucode_img_load() - create a lsf_ucode_img and load it
- */
-struct ls_ucode_img *
-acr_r352_ls_ucode_img_load(const struct acr_r352 *acr,
- const struct nvkm_secboot *sb,
- enum nvkm_secboot_falcon falcon_id)
-{
- const struct nvkm_subdev *subdev = acr->base.subdev;
- const struct acr_r352_ls_func *func = acr->func->ls_func[falcon_id];
- struct ls_ucode_img_r352 *img;
- int ret;
-
- img = kzalloc(sizeof(*img), GFP_KERNEL);
- if (!img)
- return ERR_PTR(-ENOMEM);
-
- img->base.falcon_id = falcon_id;
-
- ret = func->load(sb, func->version_max, &img->base);
- if (ret < 0) {
- kfree(img->base.ucode_data);
- kfree(img->base.sig);
- kfree(img);
- return ERR_PTR(ret);
- }
-
- img->func = func->version[ret];
-
- /* Check that the signature size matches our expectations... */
- if (img->base.sig_size != sizeof(img->lsb_header.signature)) {
- nvkm_error(subdev, "invalid signature size for %s falcon!\n",
- nvkm_secboot_falcon_name[falcon_id]);
- return ERR_PTR(-EINVAL);
- }
-
- /* Copy signature to the right place */
- memcpy(&img->lsb_header.signature, img->base.sig, img->base.sig_size);
-
- /* not needed? the signature should already have the right value */
- img->lsb_header.signature.falcon_id = falcon_id;
-
- return &img->base;
-}
-
-#define LSF_LSB_HEADER_ALIGN 256
-#define LSF_BL_DATA_ALIGN 256
-#define LSF_BL_DATA_SIZE_ALIGN 256
-#define LSF_BL_CODE_SIZE_ALIGN 256
-#define LSF_UCODE_DATA_ALIGN 4096
-
-/**
- * acr_r352_ls_img_fill_headers - fill the WPR and LSB headers of an image
- * @acr: ACR to use
- * @img: image to generate for
- * @offset: offset in the WPR region where this image starts
- *
- * Allocate space in the WPR area from offset and write the WPR and LSB headers
- * accordingly.
- *
- * Return: offset at the end of this image.
- */
-static u32
-acr_r352_ls_img_fill_headers(struct acr_r352 *acr,
- struct ls_ucode_img_r352 *img, u32 offset)
-{
- struct ls_ucode_img *_img = &img->base;
- struct acr_r352_lsf_wpr_header *whdr = &img->wpr_header;
- struct acr_r352_lsf_lsb_header *lhdr = &img->lsb_header;
- struct ls_ucode_img_desc *desc = &_img->ucode_desc;
- const struct acr_r352_lsf_func *func = img->func;
-
- /* Fill WPR header */
- whdr->falcon_id = _img->falcon_id;
- whdr->bootstrap_owner = acr->base.boot_falcon;
- whdr->status = LSF_IMAGE_STATUS_COPY;
-
- /* Skip bootstrapping falcons started by someone else than ACR */
- if (acr->lazy_bootstrap & BIT(_img->falcon_id))
- whdr->lazy_bootstrap = 1;
-
- /* Align, save off, and include an LSB header size */
- offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN);
- whdr->lsb_offset = offset;
- offset += sizeof(*lhdr);
-
- /*
- * Align, save off, and include the original (static) ucode
- * image size
- */
- offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN);
- _img->ucode_off = lhdr->ucode_off = offset;
- offset += _img->ucode_size;
-
- /*
- * For falcons that use a boot loader (BL), we append a loader
- * desc structure on the end of the ucode image and consider
- * this the boot loader data. The host will then copy the loader
- * desc args to this space within the WPR region (before locking
- * down) and the HS bin will then copy them to DMEM 0 for the
- * loader.
- */
- lhdr->bl_code_size = ALIGN(desc->bootloader_size,
- LSF_BL_CODE_SIZE_ALIGN);
- lhdr->ucode_size = ALIGN(desc->app_resident_data_offset,
- LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size;
- lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) +
- lhdr->bl_code_size - lhdr->ucode_size;
- /*
- * Though the BL is located at 0th offset of the image, the VA
- * is different to make sure that it doesn't collide the actual
- * OS VA range
- */
- lhdr->bl_imem_off = desc->bootloader_imem_offset;
- lhdr->app_code_off = desc->app_start_offset +
- desc->app_resident_code_offset;
- lhdr->app_code_size = desc->app_resident_code_size;
- lhdr->app_data_off = desc->app_start_offset +
- desc->app_resident_data_offset;
- lhdr->app_data_size = desc->app_resident_data_size;
-
- lhdr->flags = func->lhdr_flags;
- if (_img->falcon_id == acr->base.boot_falcon)
- lhdr->flags |= LSF_FLAG_DMACTL_REQ_CTX;
-
- /* Align and save off BL descriptor size */
- lhdr->bl_data_size = ALIGN(func->bl_desc_size, LSF_BL_DATA_SIZE_ALIGN);
-
- /*
- * Align, save off, and include the additional BL data
- */
- offset = ALIGN(offset, LSF_BL_DATA_ALIGN);
- lhdr->bl_data_off = offset;
- offset += lhdr->bl_data_size;
-
- return offset;
-}
-
-/**
- * acr_r352_ls_fill_headers - fill WPR and LSB headers of all managed images
- */
-int
-acr_r352_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs)
-{
- struct ls_ucode_img_r352 *img;
- struct list_head *l;
- u32 count = 0;
- u32 offset;
-
- /* Count the number of images to manage */
- list_for_each(l, imgs)
- count++;
-
- /*
- * Start with an array of WPR headers at the base of the WPR.
- * The expectation here is that the secure falcon will do a single DMA
- * read of this array and cache it internally so it's ok to pack these.
- * Also, we add 1 to the falcon count to indicate the end of the array.
- */
- offset = sizeof(img->wpr_header) * (count + 1);
-
- /*
- * Walk the managed falcons, accounting for the LSB structs
- * as well as the ucode images.
- */
- list_for_each_entry(img, imgs, base.node) {
- offset = acr_r352_ls_img_fill_headers(acr, img, offset);
- }
-
- return offset;
-}
-
-/**
- * acr_r352_ls_write_wpr - write the WPR blob contents
- */
-int
-acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
- struct nvkm_gpuobj *wpr_blob, u64 wpr_addr)
-{
- struct ls_ucode_img *_img;
- u32 pos = 0;
- u32 max_desc_size = 0;
- u8 *gdesc;
-
- /* Figure out how large we need gdesc to be. */
- list_for_each_entry(_img, imgs, node) {
- struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
- const struct acr_r352_lsf_func *ls_func = img->func;
-
- max_desc_size = max(max_desc_size, ls_func->bl_desc_size);
- }
-
- gdesc = kmalloc(max_desc_size, GFP_KERNEL);
- if (!gdesc)
- return -ENOMEM;
-
- nvkm_kmap(wpr_blob);
-
- list_for_each_entry(_img, imgs, node) {
- struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
- const struct acr_r352_lsf_func *ls_func = img->func;
-
- nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
- sizeof(img->wpr_header));
-
- nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset,
- &img->lsb_header, sizeof(img->lsb_header));
-
- /* Generate and write BL descriptor */
- memset(gdesc, 0, ls_func->bl_desc_size);
- ls_func->generate_bl_desc(&acr->base, _img, wpr_addr, gdesc);
-
- nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.bl_data_off,
- gdesc, ls_func->bl_desc_size);
-
- /* Copy ucode */
- nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off,
- _img->ucode_data, _img->ucode_size);
-
- pos += sizeof(img->wpr_header);
- }
-
- nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID);
-
- nvkm_done(wpr_blob);
-
- kfree(gdesc);
-
- return 0;
-}
-
-/* Both size and address of WPR need to be 256K-aligned */
-#define WPR_ALIGNMENT 0x40000
-/**
- * acr_r352_prepare_ls_blob() - prepare the LS blob
- *
- * For each securely managed falcon, load the FW, signatures and bootloaders and
- * prepare a ucode blob. Then, compute the offsets in the WPR region for each
- * blob, and finally write the headers and ucode blobs into a GPU object that
- * will be copied into the WPR region by the HS firmware.
- */
-static int
-acr_r352_prepare_ls_blob(struct acr_r352 *acr, struct nvkm_secboot *sb)
-{
- const struct nvkm_subdev *subdev = acr->base.subdev;
- struct list_head imgs;
- struct ls_ucode_img *img, *t;
- unsigned long managed_falcons = acr->base.managed_falcons;
- u64 wpr_addr = sb->wpr_addr;
- u32 wpr_size = sb->wpr_size;
- int managed_count = 0;
- u32 image_wpr_size, ls_blob_size;
- int falcon_id;
- int ret;
-
- INIT_LIST_HEAD(&imgs);
-
- /* Load all LS blobs */
- for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
- struct ls_ucode_img *img;
-
- img = acr->func->ls_ucode_img_load(acr, sb, falcon_id);
- if (IS_ERR(img)) {
- if (acr->base.optional_falcons & BIT(falcon_id)) {
- managed_falcons &= ~BIT(falcon_id);
- nvkm_info(subdev, "skipping %s falcon...\n",
- nvkm_secboot_falcon_name[falcon_id]);
- continue;
- }
- ret = PTR_ERR(img);
- goto cleanup;
- }
-
- list_add_tail(&img->node, &imgs);
- managed_count++;
- }
-
- /* Commit the actual list of falcons we will manage from now on */
- acr->base.managed_falcons = managed_falcons;
-
- /*
- * If the boot falcon has a firmare, let it manage the bootstrap of other
- * falcons.
- */
- if (acr->func->ls_func[acr->base.boot_falcon] &&
- (managed_falcons & BIT(acr->base.boot_falcon))) {
- for_each_set_bit(falcon_id, &managed_falcons,
- NVKM_SECBOOT_FALCON_END) {
- if (falcon_id == acr->base.boot_falcon)
- continue;
-
- acr->lazy_bootstrap |= BIT(falcon_id);
- }
- }
-
- /*
- * Fill the WPR and LSF headers with the right offsets and compute
- * required WPR size
- */
- image_wpr_size = acr->func->ls_fill_headers(acr, &imgs);
- image_wpr_size = ALIGN(image_wpr_size, WPR_ALIGNMENT);
-
- ls_blob_size = image_wpr_size;
-
- /*
- * If we need a shadow area, allocate twice the size and use the
- * upper half as WPR
- */
- if (wpr_size == 0 && acr->func->shadow_blob)
- ls_blob_size *= 2;
-
- /* Allocate GPU object that will contain the WPR region */
- ret = nvkm_gpuobj_new(subdev->device, ls_blob_size, WPR_ALIGNMENT,
- false, NULL, &acr->ls_blob);
- if (ret)
- goto cleanup;
-
- nvkm_debug(subdev, "%d managed LS falcons, WPR size is %d bytes\n",
- managed_count, image_wpr_size);
-
- /* If WPR address and size are not fixed, set them to fit the LS blob */
- if (wpr_size == 0) {
- wpr_addr = acr->ls_blob->addr;
- if (acr->func->shadow_blob)
- wpr_addr += acr->ls_blob->size / 2;
-
- wpr_size = image_wpr_size;
- /*
- * But if the WPR region is set by the bootloader, it is illegal for
- * the HS blob to be larger than this region.
- */
- } else if (image_wpr_size > wpr_size) {
- nvkm_error(subdev, "WPR region too small for FW blob!\n");
- nvkm_error(subdev, "required: %dB\n", image_wpr_size);
- nvkm_error(subdev, "available: %dB\n", wpr_size);
- ret = -ENOSPC;
- goto cleanup;
- }
-
- /* Write LS blob */
- ret = acr->func->ls_write_wpr(acr, &imgs, acr->ls_blob, wpr_addr);
- if (ret)
- nvkm_gpuobj_del(&acr->ls_blob);
-
-cleanup:
- list_for_each_entry_safe(img, t, &imgs, node) {
- kfree(img->ucode_data);
- kfree(img->sig);
- kfree(img);
- }
-
- return ret;
-}
-
-
-
-
-void
-acr_r352_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
- void *_desc)
-{
- struct hsflcn_acr_desc *desc = _desc;
- struct nvkm_gpuobj *ls_blob = acr->ls_blob;
-
- /* WPR region information if WPR is not fixed */
- if (sb->wpr_size == 0) {
- u64 wpr_start = ls_blob->addr;
- u64 wpr_end = wpr_start + ls_blob->size;
-
- desc->wpr_region_id = 1;
- desc->regions.no_regions = 2;
- desc->regions.region_props[0].start_addr = wpr_start >> 8;
- desc->regions.region_props[0].end_addr = wpr_end >> 8;
- desc->regions.region_props[0].region_id = 1;
- desc->regions.region_props[0].read_mask = 0xf;
- desc->regions.region_props[0].write_mask = 0xc;
- desc->regions.region_props[0].client_mask = 0x2;
- } else {
- desc->ucode_blob_base = ls_blob->addr;
- desc->ucode_blob_size = ls_blob->size;
- }
-}
-
-static void
-acr_r352_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
- u64 offset)
-{
- struct acr_r352_flcn_bl_desc *bl_desc = _bl_desc;
- u64 addr_code, addr_data;
-
- addr_code = offset >> 8;
- addr_data = (offset + hdr->data_dma_base) >> 8;
-
- bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
- bl_desc->code_dma_base = lower_32_bits(addr_code);
- bl_desc->non_sec_code_off = hdr->non_sec_code_off;
- bl_desc->non_sec_code_size = hdr->non_sec_code_size;
- bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
- bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
- bl_desc->code_entry_point = 0;
- bl_desc->data_dma_base = lower_32_bits(addr_data);
- bl_desc->data_size = hdr->data_size;
-}
-
-/**
- * acr_r352_prepare_hs_blob - load and prepare a HS blob and BL descriptor
- *
- * @sb secure boot instance to prepare for
- * @fw name of the HS firmware to load
- * @blob pointer to gpuobj that will be allocated to receive the HS FW payload
- * @bl_desc pointer to the BL descriptor to write for this firmware
- * @patch whether we should patch the HS descriptor (only for HS loaders)
- */
-static int
-acr_r352_prepare_hs_blob(struct acr_r352 *acr, struct nvkm_secboot *sb,
- const char *fw, struct nvkm_gpuobj **blob,
- struct hsf_load_header *load_header, bool patch)
-{
- struct nvkm_subdev *subdev = &sb->subdev;
- void *acr_image;
- struct fw_bin_header *hsbin_hdr;
- struct hsf_fw_header *fw_hdr;
- struct hsf_load_header *load_hdr;
- void *acr_data;
- int ret;
-
- acr_image = hs_ucode_load_blob(subdev, sb->boot_falcon, fw);
- if (IS_ERR(acr_image))
- return PTR_ERR(acr_image);
-
- hsbin_hdr = acr_image;
- fw_hdr = acr_image + hsbin_hdr->header_offset;
- load_hdr = acr_image + fw_hdr->hdr_offset;
- acr_data = acr_image + hsbin_hdr->data_offset;
-
- /* Patch descriptor with WPR information? */
- if (patch) {
- struct hsflcn_acr_desc *desc;
-
- desc = acr_data + load_hdr->data_dma_base;
- acr->func->fixup_hs_desc(acr, sb, desc);
- }
-
- if (load_hdr->num_apps > ACR_R352_MAX_APPS) {
- nvkm_error(subdev, "more apps (%d) than supported (%d)!",
- load_hdr->num_apps, ACR_R352_MAX_APPS);
- ret = -EINVAL;
- goto cleanup;
- }
- memcpy(load_header, load_hdr, sizeof(*load_header) +
- (sizeof(load_hdr->apps[0]) * 2 * load_hdr->num_apps));
-
- /* Create ACR blob and copy HS data to it */
- ret = nvkm_gpuobj_new(subdev->device, ALIGN(hsbin_hdr->data_size, 256),
- 0x1000, false, NULL, blob);
- if (ret)
- goto cleanup;
-
- nvkm_kmap(*blob);
- nvkm_gpuobj_memcpy_to(*blob, 0, acr_data, hsbin_hdr->data_size);
- nvkm_done(*blob);
-
-cleanup:
- kfree(acr_image);
-
- return ret;
-}
-
-/**
- * acr_r352_load_blobs - load blobs common to all ACR V1 versions.
- *
- * This includes the LS blob, HS ucode loading blob, and HS bootloader.
- *
- * The HS ucode unload blob is only used on dGPU if the WPR region is variable.
- */
-int
-acr_r352_load_blobs(struct acr_r352 *acr, struct nvkm_secboot *sb)
-{
- struct nvkm_subdev *subdev = &sb->subdev;
- int ret;
-
- /* Firmware already loaded? */
- if (acr->firmware_ok)
- return 0;
-
- /* Load and prepare the managed falcon's firmwares */
- ret = acr_r352_prepare_ls_blob(acr, sb);
- if (ret)
- return ret;
-
- /* Load the HS firmware that will load the LS firmwares */
- if (!acr->load_blob) {
- ret = acr_r352_prepare_hs_blob(acr, sb, "acr/ucode_load",
- &acr->load_blob,
- &acr->load_bl_header, true);
- if (ret)
- return ret;
- }
-
- /* If the ACR region is dynamically programmed, we need an unload FW */
- if (sb->wpr_size == 0) {
- ret = acr_r352_prepare_hs_blob(acr, sb, "acr/ucode_unload",
- &acr->unload_blob,
- &acr->unload_bl_header, false);
- if (ret)
- return ret;
- }
-
- /* Load the HS firmware bootloader */
- if (!acr->hsbl_blob) {
- acr->hsbl_blob = nvkm_acr_load_firmware(subdev, "acr/bl", 0);
- if (IS_ERR(acr->hsbl_blob)) {
- ret = PTR_ERR(acr->hsbl_blob);
- acr->hsbl_blob = NULL;
- return ret;
- }
-
- if (acr->base.boot_falcon != NVKM_SECBOOT_FALCON_PMU) {
- acr->hsbl_unload_blob = nvkm_acr_load_firmware(subdev,
- "acr/unload_bl", 0);
- if (IS_ERR(acr->hsbl_unload_blob)) {
- ret = PTR_ERR(acr->hsbl_unload_blob);
- acr->hsbl_unload_blob = NULL;
- return ret;
- }
- } else {
- acr->hsbl_unload_blob = acr->hsbl_blob;
- }
- }
-
- acr->firmware_ok = true;
- nvkm_debug(&sb->subdev, "LS blob successfully created\n");
-
- return 0;
-}
-
-/**
- * acr_r352_load() - prepare HS falcon to run the specified blob, mapped.
- *
- * Returns the start address to use, or a negative error value.
- */
-static int
-acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon,
- struct nvkm_gpuobj *blob, u64 offset)
-{
- struct acr_r352 *acr = acr_r352(_acr);
- const u32 bl_desc_size = acr->func->hs_bl_desc_size;
- const struct hsf_load_header *load_hdr;
- struct fw_bin_header *bl_hdr;
- struct fw_bl_desc *hsbl_desc;
- void *bl, *blob_data, *hsbl_code, *hsbl_data;
- u32 code_size;
- u8 *bl_desc;
-
- bl_desc = kzalloc(bl_desc_size, GFP_KERNEL);
- if (!bl_desc)
- return -ENOMEM;
-
- /* Find the bootloader descriptor for our blob and copy it */
- if (blob == acr->load_blob) {
- load_hdr = &acr->load_bl_header;
- bl = acr->hsbl_blob;
- } else if (blob == acr->unload_blob) {
- load_hdr = &acr->unload_bl_header;
- bl = acr->hsbl_unload_blob;
- } else {
- nvkm_error(_acr->subdev, "invalid secure boot blob!\n");
- kfree(bl_desc);
- return -EINVAL;
- }
-
- bl_hdr = bl;
- hsbl_desc = bl + bl_hdr->header_offset;
- blob_data = bl + bl_hdr->data_offset;
- hsbl_code = blob_data + hsbl_desc->code_off;
- hsbl_data = blob_data + hsbl_desc->data_off;
- code_size = ALIGN(hsbl_desc->code_size, 256);
-
- /*
- * Copy HS bootloader data
- */
- nvkm_falcon_load_dmem(falcon, hsbl_data, 0x0, hsbl_desc->data_size, 0);
-
- /* Copy HS bootloader code to end of IMEM */
- nvkm_falcon_load_imem(falcon, hsbl_code, falcon->code.limit - code_size,
- code_size, hsbl_desc->start_tag, 0, false);
-
- /* Generate the BL header */
- acr->func->generate_hs_bl_desc(load_hdr, bl_desc, offset);
-
- /*
- * Copy HS BL header where the HS descriptor expects it to be
- */
- nvkm_falcon_load_dmem(falcon, bl_desc, hsbl_desc->dmem_load_off,
- bl_desc_size, 0);
-
- kfree(bl_desc);
- return hsbl_desc->start_tag << 8;
-}
-
-static int
-acr_r352_shutdown(struct acr_r352 *acr, struct nvkm_secboot *sb)
-{
- struct nvkm_subdev *subdev = &sb->subdev;
- int i;
-
- /* Run the unload blob to unprotect the WPR region */
- if (acr->unload_blob && sb->wpr_set) {
- int ret;
-
- nvkm_debug(subdev, "running HS unload blob\n");
- ret = sb->func->run_blob(sb, acr->unload_blob, sb->halt_falcon);
- if (ret < 0)
- return ret;
- /*
- * Unload blob will return this error code - it is not an error
- * and the expected behavior on RM as well
- */
- if (ret && ret != 0x1d) {
- nvkm_error(subdev, "HS unload failed, ret 0x%08x\n", ret);
- return -EINVAL;
- }
- nvkm_debug(subdev, "HS unload blob completed\n");
- }
-
- for (i = 0; i < NVKM_SECBOOT_FALCON_END; i++)
- acr->falcon_state[i] = NON_SECURE;
-
- sb->wpr_set = false;
-
- return 0;
-}
-
-/**
- * Check if the WPR region has been indeed set by the ACR firmware, and
- * matches where it should be.
- */
-static bool
-acr_r352_wpr_is_set(const struct acr_r352 *acr, const struct nvkm_secboot *sb)
-{
- const struct nvkm_subdev *subdev = &sb->subdev;
- const struct nvkm_device *device = subdev->device;
- u64 wpr_lo, wpr_hi;
- u64 wpr_range_lo, wpr_range_hi;
-
- nvkm_wr32(device, 0x100cd4, 0x2);
- wpr_lo = (nvkm_rd32(device, 0x100cd4) & ~0xff);
- wpr_lo <<= 8;
- nvkm_wr32(device, 0x100cd4, 0x3);
- wpr_hi = (nvkm_rd32(device, 0x100cd4) & ~0xff);
- wpr_hi <<= 8;
-
- if (sb->wpr_size != 0) {
- wpr_range_lo = sb->wpr_addr;
- wpr_range_hi = wpr_range_lo + sb->wpr_size;
- } else {
- wpr_range_lo = acr->ls_blob->addr;
- wpr_range_hi = wpr_range_lo + acr->ls_blob->size;
- }
-
- return (wpr_lo >= wpr_range_lo && wpr_lo < wpr_range_hi &&
- wpr_hi > wpr_range_lo && wpr_hi <= wpr_range_hi);
-}
-
-static int
-acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb)
-{
- const struct nvkm_subdev *subdev = &sb->subdev;
- unsigned long managed_falcons = acr->base.managed_falcons;
- int falcon_id;
- int ret;
-
- if (sb->wpr_set)
- return 0;
-
- /* Make sure all blobs are ready */
- ret = acr_r352_load_blobs(acr, sb);
- if (ret)
- return ret;
-
- nvkm_debug(subdev, "running HS load blob\n");
- ret = sb->func->run_blob(sb, acr->load_blob, sb->boot_falcon);
- /* clear halt interrupt */
- nvkm_falcon_clear_interrupt(sb->boot_falcon, 0x10);
- sb->wpr_set = acr_r352_wpr_is_set(acr, sb);
- if (ret < 0) {
- return ret;
- } else if (ret > 0) {
- nvkm_error(subdev, "HS load failed, ret 0x%08x\n", ret);
- return -EINVAL;
- }
- nvkm_debug(subdev, "HS load blob completed\n");
- /* WPR must be set at this point */
- if (!sb->wpr_set) {
- nvkm_error(subdev, "ACR blob completed but WPR not set!\n");
- return -EINVAL;
- }
-
- /* Run LS firmwares post_run hooks */
- for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
- const struct acr_r352_ls_func *func =
- acr->func->ls_func[falcon_id];
-
- if (func->post_run) {
- ret = func->post_run(&acr->base, sb);
- if (ret)
- return ret;
- }
- }
-
- return 0;
-}
-
-/**
- * acr_r352_reset_nopmu - dummy reset method when no PMU firmware is loaded
- *
- * Reset is done by re-executing secure boot from scratch, with lazy bootstrap
- * disabled. This has the effect of making all managed falcons ready-to-run.
- */
-static int
-acr_r352_reset_nopmu(struct acr_r352 *acr, struct nvkm_secboot *sb,
- unsigned long falcon_mask)
-{
- int falcon;
- int ret;
-
- /*
- * Perform secure boot each time we are called on FECS. Since only FECS
- * and GPCCS are managed and started together, this ought to be safe.
- */
- if (!(falcon_mask & BIT(NVKM_SECBOOT_FALCON_FECS)))
- goto end;
-
- ret = acr_r352_shutdown(acr, sb);
- if (ret)
- return ret;
-
- ret = acr_r352_bootstrap(acr, sb);
- if (ret)
- return ret;
-
-end:
- for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) {
- acr->falcon_state[falcon] = RESET;
- }
- return 0;
-}
-
-/*
- * acr_r352_reset() - execute secure boot from the prepared state
- *
- * Load the HS bootloader and ask the falcon to run it. This will in turn
- * load the HS firmware and run it, so once the falcon stops all the managed
- * falcons should have their LS firmware loaded and be ready to run.
- */
-static int
-acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
- unsigned long falcon_mask)
-{
- struct acr_r352 *acr = acr_r352(_acr);
- struct nvkm_msgqueue *queue;
- int falcon;
- bool wpr_already_set = sb->wpr_set;
- int ret;
-
- /* Make sure secure boot is performed */
- ret = acr_r352_bootstrap(acr, sb);
- if (ret)
- return ret;
-
- /* No PMU interface? */
- if (!nvkm_secboot_is_managed(sb, _acr->boot_falcon)) {
- /* Redo secure boot entirely if it was already done */
- if (wpr_already_set)
- return acr_r352_reset_nopmu(acr, sb, falcon_mask);
- /* Else return the result of the initial invokation */
- else
- return ret;
- }
-
- switch (_acr->boot_falcon) {
- case NVKM_SECBOOT_FALCON_PMU:
- queue = sb->subdev.device->pmu->queue;
- break;
- case NVKM_SECBOOT_FALCON_SEC2:
- queue = sb->subdev.device->sec2->queue;
- break;
- default:
- return -EINVAL;
- }
-
- /* Otherwise just ask the LS firmware to reset the falcon */
- for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END)
- nvkm_debug(&sb->subdev, "resetting %s falcon\n",
- nvkm_secboot_falcon_name[falcon]);
- ret = nvkm_msgqueue_acr_boot_falcons(queue, falcon_mask);
- if (ret) {
- nvkm_error(&sb->subdev, "error during falcon reset: %d\n", ret);
- return ret;
- }
- nvkm_debug(&sb->subdev, "falcon reset done\n");
-
- return 0;
-}
-
-static int
-acr_r352_fini(struct nvkm_acr *_acr, struct nvkm_secboot *sb, bool suspend)
-{
- struct acr_r352 *acr = acr_r352(_acr);
-
- return acr_r352_shutdown(acr, sb);
-}
-
-static void
-acr_r352_dtor(struct nvkm_acr *_acr)
-{
- struct acr_r352 *acr = acr_r352(_acr);
-
- nvkm_gpuobj_del(&acr->unload_blob);
-
- if (_acr->boot_falcon != NVKM_SECBOOT_FALCON_PMU)
- kfree(acr->hsbl_unload_blob);
- kfree(acr->hsbl_blob);
- nvkm_gpuobj_del(&acr->load_blob);
- nvkm_gpuobj_del(&acr->ls_blob);
-
- kfree(acr);
-}
-
-static const struct acr_r352_lsf_func
-acr_r352_ls_fecs_func_0 = {
- .generate_bl_desc = acr_r352_generate_flcn_bl_desc,
- .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
-};
-
-const struct acr_r352_ls_func
-acr_r352_ls_fecs_func = {
- .load = acr_ls_ucode_load_fecs,
- .version_max = 0,
- .version = {
- &acr_r352_ls_fecs_func_0,
- }
-};
-
-static const struct acr_r352_lsf_func
-acr_r352_ls_gpccs_func_0 = {
- .generate_bl_desc = acr_r352_generate_flcn_bl_desc,
- .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
- /* GPCCS will be loaded using PRI */
- .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
-};
-
-static const struct acr_r352_ls_func
-acr_r352_ls_gpccs_func = {
- .load = acr_ls_ucode_load_gpccs,
- .version_max = 0,
- .version = {
- &acr_r352_ls_gpccs_func_0,
- }
-};
-
-
-
-/**
- * struct acr_r352_pmu_bl_desc - PMU DMEM bootloader descriptor
- * @dma_idx: DMA context to be used by BL while loading code/data
- * @code_dma_base: 256B-aligned Physical FB Address where code is located
- * @total_code_size: total size of the code part in the ucode
- * @code_size_to_load: size of the code part to load in PMU IMEM.
- * @code_entry_point: entry point in the code.
- * @data_dma_base: Physical FB address where data part of ucode is located
- * @data_size: Total size of the data portion.
- * @overlay_dma_base: Physical Fb address for resident code present in ucode
- * @argc: Total number of args
- * @argv: offset where args are copied into PMU's DMEM.
- *
- * Structure used by the PMU bootloader to load the rest of the code
- */
-struct acr_r352_pmu_bl_desc {
- u32 dma_idx;
- u32 code_dma_base;
- u32 code_size_total;
- u32 code_size_to_load;
- u32 code_entry_point;
- u32 data_dma_base;
- u32 data_size;
- u32 overlay_dma_base;
- u32 argc;
- u32 argv;
- u16 code_dma_base1;
- u16 data_dma_base1;
- u16 overlay_dma_base1;
-};
-
-/**
- * acr_r352_generate_pmu_bl_desc() - populate a DMEM BL descriptor for PMU LS image
- *
- */
-static void
-acr_r352_generate_pmu_bl_desc(const struct nvkm_acr *acr,
- const struct ls_ucode_img *img, u64 wpr_addr,
- void *_desc)
-{
- const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
- const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
- struct acr_r352_pmu_bl_desc *desc = _desc;
- u64 base;
- u64 addr_code;
- u64 addr_data;
- u32 addr_args;
-
- base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
- addr_code = (base + pdesc->app_resident_code_offset) >> 8;
- addr_data = (base + pdesc->app_resident_data_offset) >> 8;
- addr_args = pmu->falcon->data.limit;
- addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
-
- desc->dma_idx = FALCON_DMAIDX_UCODE;
- desc->code_dma_base = lower_32_bits(addr_code);
- desc->code_dma_base1 = upper_32_bits(addr_code);
- desc->code_size_total = pdesc->app_size;
- desc->code_size_to_load = pdesc->app_resident_code_size;
- desc->code_entry_point = pdesc->app_imem_entry;
- desc->data_dma_base = lower_32_bits(addr_data);
- desc->data_dma_base1 = upper_32_bits(addr_data);
- desc->data_size = pdesc->app_resident_data_size;
- desc->overlay_dma_base = lower_32_bits(addr_code);
- desc->overlay_dma_base1 = upper_32_bits(addr_code);
- desc->argc = 1;
- desc->argv = addr_args;
-}
-
-static const struct acr_r352_lsf_func
-acr_r352_ls_pmu_func_0 = {
- .generate_bl_desc = acr_r352_generate_pmu_bl_desc,
- .bl_desc_size = sizeof(struct acr_r352_pmu_bl_desc),
-};
-
-static const struct acr_r352_ls_func
-acr_r352_ls_pmu_func = {
- .load = acr_ls_ucode_load_pmu,
- .post_run = acr_ls_pmu_post_run,
- .version_max = 0,
- .version = {
- &acr_r352_ls_pmu_func_0,
- }
-};
-
-const struct acr_r352_func
-acr_r352_func = {
- .fixup_hs_desc = acr_r352_fixup_hs_desc,
- .generate_hs_bl_desc = acr_r352_generate_hs_bl_desc,
- .hs_bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
- .ls_ucode_img_load = acr_r352_ls_ucode_img_load,
- .ls_fill_headers = acr_r352_ls_fill_headers,
- .ls_write_wpr = acr_r352_ls_write_wpr,
- .ls_func = {
- [NVKM_SECBOOT_FALCON_FECS] = &acr_r352_ls_fecs_func,
- [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r352_ls_gpccs_func,
- [NVKM_SECBOOT_FALCON_PMU] = &acr_r352_ls_pmu_func,
- },
-};
-
-static const struct nvkm_acr_func
-acr_r352_base_func = {
- .dtor = acr_r352_dtor,
- .fini = acr_r352_fini,
- .load = acr_r352_load,
- .reset = acr_r352_reset,
-};
-
-struct nvkm_acr *
-acr_r352_new_(const struct acr_r352_func *func,
- enum nvkm_secboot_falcon boot_falcon,
- unsigned long managed_falcons)
-{
- struct acr_r352 *acr;
- int i;
-
- /* Check that all requested falcons are supported */
- for_each_set_bit(i, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
- if (!func->ls_func[i])
- return ERR_PTR(-ENOTSUPP);
- }
-
- acr = kzalloc(sizeof(*acr), GFP_KERNEL);
- if (!acr)
- return ERR_PTR(-ENOMEM);
-
- acr->base.boot_falcon = boot_falcon;
- acr->base.managed_falcons = managed_falcons;
- acr->base.func = &acr_r352_base_func;
- acr->func = func;
-
- return &acr->base;
-}
-
-struct nvkm_acr *
-acr_r352_new(unsigned long managed_falcons)
-{
- return acr_r352_new_(&acr_r352_func, NVKM_SECBOOT_FALCON_PMU,
- managed_falcons);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
deleted file mode 100644
index e516cab849dd..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-#ifndef __NVKM_SECBOOT_ACR_R352_H__
-#define __NVKM_SECBOOT_ACR_R352_H__
-
-#include "acr.h"
-#include "ls_ucode.h"
-#include "hs_ucode.h"
-
-struct ls_ucode_img;
-
-#define ACR_R352_MAX_APPS 8
-
-#define LSF_FLAG_LOAD_CODE_AT_0 1
-#define LSF_FLAG_DMACTL_REQ_CTX 4
-#define LSF_FLAG_FORCE_PRIV_LOAD 8
-
-static inline u32
-hsf_load_header_app_off(const struct hsf_load_header *hdr, u32 app)
-{
- return hdr->apps[app];
-}
-
-static inline u32
-hsf_load_header_app_size(const struct hsf_load_header *hdr, u32 app)
-{
- return hdr->apps[hdr->num_apps + app];
-}
-
-/**
- * struct acr_r352_lsf_func - manages a specific LS firmware version
- *
- * @generate_bl_desc: function called on a block of bl_desc_size to generate the
- * proper bootloader descriptor for this LS firmware
- * @bl_desc_size: size of the bootloader descriptor
- * @lhdr_flags: LS flags
- */
-struct acr_r352_lsf_func {
- void (*generate_bl_desc)(const struct nvkm_acr *,
- const struct ls_ucode_img *, u64, void *);
- u32 bl_desc_size;
- u32 lhdr_flags;
-};
-
-/**
- * struct acr_r352_ls_func - manages a single LS falcon
- *
- * @load: load the external firmware into a ls_ucode_img
- * @post_run: hook called right after the ACR is executed
- */
-struct acr_r352_ls_func {
- int (*load)(const struct nvkm_secboot *, int maxver,
- struct ls_ucode_img *);
- int (*post_run)(const struct nvkm_acr *, const struct nvkm_secboot *);
- int version_max;
- const struct acr_r352_lsf_func *version[];
-};
-
-struct acr_r352;
-
-/**
- * struct acr_r352_func - manages nuances between ACR versions
- *
- * @generate_hs_bl_desc: function called on a block of bl_desc_size to generate
- * the proper HS bootloader descriptor
- * @hs_bl_desc_size: size of the HS bootloader descriptor
- */
-struct acr_r352_func {
- void (*generate_hs_bl_desc)(const struct hsf_load_header *, void *,
- u64);
- void (*fixup_hs_desc)(struct acr_r352 *, struct nvkm_secboot *, void *);
- u32 hs_bl_desc_size;
- bool shadow_blob;
-
- struct ls_ucode_img *(*ls_ucode_img_load)(const struct acr_r352 *,
- const struct nvkm_secboot *,
- enum nvkm_secboot_falcon);
- int (*ls_fill_headers)(struct acr_r352 *, struct list_head *);
- int (*ls_write_wpr)(struct acr_r352 *, struct list_head *,
- struct nvkm_gpuobj *, u64);
-
- const struct acr_r352_ls_func *ls_func[NVKM_SECBOOT_FALCON_END];
-};
-
-/**
- * struct acr_r352 - ACR data for driver release 352 (and beyond)
- */
-struct acr_r352 {
- struct nvkm_acr base;
- const struct acr_r352_func *func;
-
- /*
- * HS FW - lock WPR region (dGPU only) and load LS FWs
- * on Tegra the HS FW copies the LS blob into the fixed WPR instead
- */
- struct nvkm_gpuobj *load_blob;
- struct {
- struct hsf_load_header load_bl_header;
- u32 __load_apps[ACR_R352_MAX_APPS * 2];
- };
-
- /* HS FW - unlock WPR region (dGPU only) */
- struct nvkm_gpuobj *unload_blob;
- struct {
- struct hsf_load_header unload_bl_header;
- u32 __unload_apps[ACR_R352_MAX_APPS * 2];
- };
-
- /* HS bootloader */
- void *hsbl_blob;
-
- /* HS bootloader for unload blob, if using a different falcon */
- void *hsbl_unload_blob;
-
- /* LS FWs, to be loaded by the HS ACR */
- struct nvkm_gpuobj *ls_blob;
-
- /* Firmware already loaded? */
- bool firmware_ok;
-
- /* Falcons to lazy-bootstrap */
- u32 lazy_bootstrap;
-
- /* To keep track of the state of all managed falcons */
- enum {
- /* In non-secure state, no firmware loaded, no privileges*/
- NON_SECURE = 0,
- /* In low-secure mode and ready to be started */
- RESET,
- /* In low-secure mode and running */
- RUNNING,
- } falcon_state[NVKM_SECBOOT_FALCON_END];
-};
-#define acr_r352(acr) container_of(acr, struct acr_r352, base)
-
-struct nvkm_acr *acr_r352_new_(const struct acr_r352_func *,
- enum nvkm_secboot_falcon, unsigned long);
-
-struct ls_ucode_img *acr_r352_ls_ucode_img_load(const struct acr_r352 *,
- const struct nvkm_secboot *,
- enum nvkm_secboot_falcon);
-int acr_r352_ls_fill_headers(struct acr_r352 *, struct list_head *);
-int acr_r352_ls_write_wpr(struct acr_r352 *, struct list_head *,
- struct nvkm_gpuobj *, u64);
-
-void acr_r352_fixup_hs_desc(struct acr_r352 *, struct nvkm_secboot *, void *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c
deleted file mode 100644
index f6b2d20d7fc3..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr_r361.h"
-
-#include <engine/falcon.h>
-#include <core/msgqueue.h>
-#include <subdev/pmu.h>
-#include <engine/sec2.h>
-
-static void
-acr_r361_generate_flcn_bl_desc(const struct nvkm_acr *acr,
- const struct ls_ucode_img *img, u64 wpr_addr,
- void *_desc)
-{
- struct acr_r361_flcn_bl_desc *desc = _desc;
- const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
- u64 base, addr_code, addr_data;
-
- base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
- addr_code = base + pdesc->app_resident_code_offset;
- addr_data = base + pdesc->app_resident_data_offset;
-
- desc->ctx_dma = FALCON_DMAIDX_UCODE;
- desc->code_dma_base = u64_to_flcn64(addr_code);
- desc->non_sec_code_off = pdesc->app_resident_code_offset;
- desc->non_sec_code_size = pdesc->app_resident_code_size;
- desc->code_entry_point = pdesc->app_imem_entry;
- desc->data_dma_base = u64_to_flcn64(addr_data);
- desc->data_size = pdesc->app_resident_data_size;
-}
-
-void
-acr_r361_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
- u64 offset)
-{
- struct acr_r361_flcn_bl_desc *bl_desc = _bl_desc;
-
- bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
- bl_desc->code_dma_base = u64_to_flcn64(offset);
- bl_desc->non_sec_code_off = hdr->non_sec_code_off;
- bl_desc->non_sec_code_size = hdr->non_sec_code_size;
- bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
- bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
- bl_desc->code_entry_point = 0;
- bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base);
- bl_desc->data_size = hdr->data_size;
-}
-
-static const struct acr_r352_lsf_func
-acr_r361_ls_fecs_func_0 = {
- .generate_bl_desc = acr_r361_generate_flcn_bl_desc,
- .bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
-};
-
-const struct acr_r352_ls_func
-acr_r361_ls_fecs_func = {
- .load = acr_ls_ucode_load_fecs,
- .version_max = 0,
- .version = {
- &acr_r361_ls_fecs_func_0,
- }
-};
-
-static const struct acr_r352_lsf_func
-acr_r361_ls_gpccs_func_0 = {
- .generate_bl_desc = acr_r361_generate_flcn_bl_desc,
- .bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
- /* GPCCS will be loaded using PRI */
- .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
-};
-
-const struct acr_r352_ls_func
-acr_r361_ls_gpccs_func = {
- .load = acr_ls_ucode_load_gpccs,
- .version_max = 0,
- .version = {
- &acr_r361_ls_gpccs_func_0,
- }
-};
-
-struct acr_r361_pmu_bl_desc {
- u32 reserved;
- u32 dma_idx;
- struct flcn_u64 code_dma_base;
- u32 total_code_size;
- u32 code_size_to_load;
- u32 code_entry_point;
- struct flcn_u64 data_dma_base;
- u32 data_size;
- struct flcn_u64 overlay_dma_base;
- u32 argc;
- u32 argv;
-};
-
-static void
-acr_r361_generate_pmu_bl_desc(const struct nvkm_acr *acr,
- const struct ls_ucode_img *img, u64 wpr_addr,
- void *_desc)
-{
- const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
- const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
- struct acr_r361_pmu_bl_desc *desc = _desc;
- u64 base, addr_code, addr_data;
- u32 addr_args;
-
- base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
- addr_code = base + pdesc->app_resident_code_offset;
- addr_data = base + pdesc->app_resident_data_offset;
- addr_args = pmu->falcon->data.limit;
- addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
-
- desc->dma_idx = FALCON_DMAIDX_UCODE;
- desc->code_dma_base = u64_to_flcn64(addr_code);
- desc->total_code_size = pdesc->app_size;
- desc->code_size_to_load = pdesc->app_resident_code_size;
- desc->code_entry_point = pdesc->app_imem_entry;
- desc->data_dma_base = u64_to_flcn64(addr_data);
- desc->data_size = pdesc->app_resident_data_size;
- desc->overlay_dma_base = u64_to_flcn64(addr_code);
- desc->argc = 1;
- desc->argv = addr_args;
-}
-
-static const struct acr_r352_lsf_func
-acr_r361_ls_pmu_func_0 = {
- .generate_bl_desc = acr_r361_generate_pmu_bl_desc,
- .bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc),
-};
-
-const struct acr_r352_ls_func
-acr_r361_ls_pmu_func = {
- .load = acr_ls_ucode_load_pmu,
- .post_run = acr_ls_pmu_post_run,
- .version_max = 0,
- .version = {
- &acr_r361_ls_pmu_func_0,
- }
-};
-
-static void
-acr_r361_generate_sec2_bl_desc(const struct nvkm_acr *acr,
- const struct ls_ucode_img *img, u64 wpr_addr,
- void *_desc)
-{
- const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
- const struct nvkm_sec2 *sec = acr->subdev->device->sec2;
- struct acr_r361_pmu_bl_desc *desc = _desc;
- u64 base, addr_code, addr_data;
- u32 addr_args;
-
- base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
- /* For some reason we should not add app_resident_code_offset here */
- addr_code = base;
- addr_data = base + pdesc->app_resident_data_offset;
- addr_args = sec->falcon->data.limit;
- addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
-
- desc->dma_idx = FALCON_SEC2_DMAIDX_UCODE;
- desc->code_dma_base = u64_to_flcn64(addr_code);
- desc->total_code_size = pdesc->app_size;
- desc->code_size_to_load = pdesc->app_resident_code_size;
- desc->code_entry_point = pdesc->app_imem_entry;
- desc->data_dma_base = u64_to_flcn64(addr_data);
- desc->data_size = pdesc->app_resident_data_size;
- desc->overlay_dma_base = u64_to_flcn64(addr_code);
- desc->argc = 1;
- /* args are stored at the beginning of EMEM */
- desc->argv = 0x01000000;
-}
-
-const struct acr_r352_lsf_func
-acr_r361_ls_sec2_func_0 = {
- .generate_bl_desc = acr_r361_generate_sec2_bl_desc,
- .bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc),
-};
-
-static const struct acr_r352_ls_func
-acr_r361_ls_sec2_func = {
- .load = acr_ls_ucode_load_sec2,
- .post_run = acr_ls_sec2_post_run,
- .version_max = 0,
- .version = {
- &acr_r361_ls_sec2_func_0,
- }
-};
-
-
-const struct acr_r352_func
-acr_r361_func = {
- .fixup_hs_desc = acr_r352_fixup_hs_desc,
- .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc,
- .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
- .ls_ucode_img_load = acr_r352_ls_ucode_img_load,
- .ls_fill_headers = acr_r352_ls_fill_headers,
- .ls_write_wpr = acr_r352_ls_write_wpr,
- .ls_func = {
- [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
- [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
- [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
- [NVKM_SECBOOT_FALCON_SEC2] = &acr_r361_ls_sec2_func,
- },
-};
-
-struct nvkm_acr *
-acr_r361_new(unsigned long managed_falcons)
-{
- return acr_r352_new_(&acr_r361_func, NVKM_SECBOOT_FALCON_PMU,
- managed_falcons);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h
deleted file mode 100644
index 38dec93779c8..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __NVKM_SECBOOT_ACR_R361_H__
-#define __NVKM_SECBOOT_ACR_R361_H__
-
-#include "acr_r352.h"
-
-/**
- * struct acr_r361_flcn_bl_desc - DMEM bootloader descriptor
- * @signature: 16B signature for secure code. 0s if no secure code
- * @ctx_dma: DMA context to be used by BL while loading code/data
- * @code_dma_base: 256B-aligned Physical FB Address where code is located
- * (falcon's $xcbase register)
- * @non_sec_code_off: offset from code_dma_base where the non-secure code is
- * located. The offset must be multiple of 256 to help perf
- * @non_sec_code_size: the size of the nonSecure code part.
- * @sec_code_off: offset from code_dma_base where the secure code is
- * located. The offset must be multiple of 256 to help perf
- * @sec_code_size: offset from code_dma_base where the secure code is
- * located. The offset must be multiple of 256 to help perf
- * @code_entry_point: code entry point which will be invoked by BL after
- * code is loaded.
- * @data_dma_base: 256B aligned Physical FB Address where data is located.
- * (falcon's $xdbase register)
- * @data_size: size of data block. Should be multiple of 256B
- *
- * Structure used by the bootloader to load the rest of the code. This has
- * to be filled by host and copied into DMEM at offset provided in the
- * hsflcn_bl_desc.bl_desc_dmem_load_off.
- */
-struct acr_r361_flcn_bl_desc {
- u32 reserved[4];
- u32 signature[4];
- u32 ctx_dma;
- struct flcn_u64 code_dma_base;
- u32 non_sec_code_off;
- u32 non_sec_code_size;
- u32 sec_code_off;
- u32 sec_code_size;
- u32 code_entry_point;
- struct flcn_u64 data_dma_base;
- u32 data_size;
-};
-
-void acr_r361_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64);
-
-extern const struct acr_r352_ls_func acr_r361_ls_fecs_func;
-extern const struct acr_r352_ls_func acr_r361_ls_gpccs_func;
-extern const struct acr_r352_ls_func acr_r361_ls_pmu_func;
-extern const struct acr_r352_lsf_func acr_r361_ls_sec2_func_0;
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r364.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r364.c
deleted file mode 100644
index 30cf04109991..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r364.c
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr_r361.h"
-
-#include <core/gpuobj.h>
-
-/*
- * r364 ACR: hsflcn_desc structure has changed to introduce the shadow_mem
- * parameter.
- */
-
-struct acr_r364_hsflcn_desc {
- union {
- u8 reserved_dmem[0x200];
- u32 signatures[4];
- } ucode_reserved_space;
- u32 wpr_region_id;
- u32 wpr_offset;
- u32 mmu_memory_range;
- struct {
- u32 no_regions;
- struct {
- u32 start_addr;
- u32 end_addr;
- u32 region_id;
- u32 read_mask;
- u32 write_mask;
- u32 client_mask;
- u32 shadow_mem_start_addr;
- } region_props[2];
- } regions;
- u32 ucode_blob_size;
- u64 ucode_blob_base __aligned(8);
- struct {
- u32 vpr_enabled;
- u32 vpr_start;
- u32 vpr_end;
- u32 hdcp_policies;
- } vpr_desc;
-};
-
-static void
-acr_r364_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
- void *_desc)
-{
- struct acr_r364_hsflcn_desc *desc = _desc;
- struct nvkm_gpuobj *ls_blob = acr->ls_blob;
-
- /* WPR region information if WPR is not fixed */
- if (sb->wpr_size == 0) {
- u64 wpr_start = ls_blob->addr;
- u64 wpr_end = ls_blob->addr + ls_blob->size;
-
- if (acr->func->shadow_blob)
- wpr_start += ls_blob->size / 2;
-
- desc->wpr_region_id = 1;
- desc->regions.no_regions = 2;
- desc->regions.region_props[0].start_addr = wpr_start >> 8;
- desc->regions.region_props[0].end_addr = wpr_end >> 8;
- desc->regions.region_props[0].region_id = 1;
- desc->regions.region_props[0].read_mask = 0xf;
- desc->regions.region_props[0].write_mask = 0xc;
- desc->regions.region_props[0].client_mask = 0x2;
- if (acr->func->shadow_blob)
- desc->regions.region_props[0].shadow_mem_start_addr =
- ls_blob->addr >> 8;
- else
- desc->regions.region_props[0].shadow_mem_start_addr = 0;
- } else {
- desc->ucode_blob_base = ls_blob->addr;
- desc->ucode_blob_size = ls_blob->size;
- }
-}
-
-const struct acr_r352_func
-acr_r364_func = {
- .fixup_hs_desc = acr_r364_fixup_hs_desc,
- .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc,
- .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
- .ls_ucode_img_load = acr_r352_ls_ucode_img_load,
- .ls_fill_headers = acr_r352_ls_fill_headers,
- .ls_write_wpr = acr_r352_ls_write_wpr,
- .ls_func = {
- [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
- [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
- [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
- },
-};
-
-
-struct nvkm_acr *
-acr_r364_new(unsigned long managed_falcons)
-{
- return acr_r352_new_(&acr_r364_func, NVKM_SECBOOT_FALCON_PMU,
- managed_falcons);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
deleted file mode 100644
index 472ced29da7e..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
+++ /dev/null
@@ -1,418 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr_r367.h"
-#include "acr_r361.h"
-#include "acr_r370.h"
-
-#include <core/gpuobj.h>
-
-/*
- * r367 ACR: new LS signature format requires a rewrite of LS firmware and
- * blob creation functions. Also the hsflcn_desc layout has changed slightly.
- */
-
-#define LSF_LSB_DEPMAP_SIZE 11
-
-/**
- * struct acr_r367_lsf_lsb_header - LS firmware header
- *
- * See also struct acr_r352_lsf_lsb_header for documentation.
- */
-struct acr_r367_lsf_lsb_header {
- /**
- * LS falcon signatures
- * @prd_keys: signature to use in production mode
- * @dgb_keys: signature to use in debug mode
- * @b_prd_present: whether the production key is present
- * @b_dgb_present: whether the debug key is present
- * @falcon_id: ID of the falcon the ucode applies to
- */
- struct {
- u8 prd_keys[2][16];
- u8 dbg_keys[2][16];
- u32 b_prd_present;
- u32 b_dbg_present;
- u32 falcon_id;
- u32 supports_versioning;
- u32 version;
- u32 depmap_count;
- u8 depmap[LSF_LSB_DEPMAP_SIZE * 2 * 4];
- u8 kdf[16];
- } signature;
- u32 ucode_off;
- u32 ucode_size;
- u32 data_size;
- u32 bl_code_size;
- u32 bl_imem_off;
- u32 bl_data_off;
- u32 bl_data_size;
- u32 app_code_off;
- u32 app_code_size;
- u32 app_data_off;
- u32 app_data_size;
- u32 flags;
-};
-
-/**
- * struct acr_r367_lsf_wpr_header - LS blob WPR Header
- *
- * See also struct acr_r352_lsf_wpr_header for documentation.
- */
-struct acr_r367_lsf_wpr_header {
- u32 falcon_id;
- u32 lsb_offset;
- u32 bootstrap_owner;
- u32 lazy_bootstrap;
- u32 bin_version;
- u32 status;
-#define LSF_IMAGE_STATUS_NONE 0
-#define LSF_IMAGE_STATUS_COPY 1
-#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2
-#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3
-#define LSF_IMAGE_STATUS_VALIDATION_DONE 4
-#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5
-#define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6
-#define LSF_IMAGE_STATUS_REVOCATION_CHECK_FAILED 7
-};
-
-/**
- * struct ls_ucode_img_r367 - ucode image augmented with r367 headers
- */
-struct ls_ucode_img_r367 {
- struct ls_ucode_img base;
-
- const struct acr_r352_lsf_func *func;
-
- struct acr_r367_lsf_wpr_header wpr_header;
- struct acr_r367_lsf_lsb_header lsb_header;
-};
-#define ls_ucode_img_r367(i) container_of(i, struct ls_ucode_img_r367, base)
-
-struct ls_ucode_img *
-acr_r367_ls_ucode_img_load(const struct acr_r352 *acr,
- const struct nvkm_secboot *sb,
- enum nvkm_secboot_falcon falcon_id)
-{
- const struct nvkm_subdev *subdev = acr->base.subdev;
- const struct acr_r352_ls_func *func = acr->func->ls_func[falcon_id];
- struct ls_ucode_img_r367 *img;
- int ret;
-
- img = kzalloc(sizeof(*img), GFP_KERNEL);
- if (!img)
- return ERR_PTR(-ENOMEM);
-
- img->base.falcon_id = falcon_id;
-
- ret = func->load(sb, func->version_max, &img->base);
- if (ret < 0) {
- kfree(img->base.ucode_data);
- kfree(img->base.sig);
- kfree(img);
- return ERR_PTR(ret);
- }
-
- img->func = func->version[ret];
-
- /* Check that the signature size matches our expectations... */
- if (img->base.sig_size != sizeof(img->lsb_header.signature)) {
- nvkm_error(subdev, "invalid signature size for %s falcon!\n",
- nvkm_secboot_falcon_name[falcon_id]);
- return ERR_PTR(-EINVAL);
- }
-
- /* Copy signature to the right place */
- memcpy(&img->lsb_header.signature, img->base.sig, img->base.sig_size);
-
- /* not needed? the signature should already have the right value */
- img->lsb_header.signature.falcon_id = falcon_id;
-
- return &img->base;
-}
-
-#define LSF_LSB_HEADER_ALIGN 256
-#define LSF_BL_DATA_ALIGN 256
-#define LSF_BL_DATA_SIZE_ALIGN 256
-#define LSF_BL_CODE_SIZE_ALIGN 256
-#define LSF_UCODE_DATA_ALIGN 4096
-
-static u32
-acr_r367_ls_img_fill_headers(struct acr_r352 *acr,
- struct ls_ucode_img_r367 *img, u32 offset)
-{
- struct ls_ucode_img *_img = &img->base;
- struct acr_r367_lsf_wpr_header *whdr = &img->wpr_header;
- struct acr_r367_lsf_lsb_header *lhdr = &img->lsb_header;
- struct ls_ucode_img_desc *desc = &_img->ucode_desc;
- const struct acr_r352_lsf_func *func = img->func;
-
- /* Fill WPR header */
- whdr->falcon_id = _img->falcon_id;
- whdr->bootstrap_owner = acr->base.boot_falcon;
- whdr->bin_version = lhdr->signature.version;
- whdr->status = LSF_IMAGE_STATUS_COPY;
-
- /* Skip bootstrapping falcons started by someone else than ACR */
- if (acr->lazy_bootstrap & BIT(_img->falcon_id))
- whdr->lazy_bootstrap = 1;
-
- /* Align, save off, and include an LSB header size */
- offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN);
- whdr->lsb_offset = offset;
- offset += sizeof(*lhdr);
-
- /*
- * Align, save off, and include the original (static) ucode
- * image size
- */
- offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN);
- _img->ucode_off = lhdr->ucode_off = offset;
- offset += _img->ucode_size;
-
- /*
- * For falcons that use a boot loader (BL), we append a loader
- * desc structure on the end of the ucode image and consider
- * this the boot loader data. The host will then copy the loader
- * desc args to this space within the WPR region (before locking
- * down) and the HS bin will then copy them to DMEM 0 for the
- * loader.
- */
- lhdr->bl_code_size = ALIGN(desc->bootloader_size,
- LSF_BL_CODE_SIZE_ALIGN);
- lhdr->ucode_size = ALIGN(desc->app_resident_data_offset,
- LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size;
- lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) +
- lhdr->bl_code_size - lhdr->ucode_size;
- /*
- * Though the BL is located at 0th offset of the image, the VA
- * is different to make sure that it doesn't collide the actual
- * OS VA range
- */
- lhdr->bl_imem_off = desc->bootloader_imem_offset;
- lhdr->app_code_off = desc->app_start_offset +
- desc->app_resident_code_offset;
- lhdr->app_code_size = desc->app_resident_code_size;
- lhdr->app_data_off = desc->app_start_offset +
- desc->app_resident_data_offset;
- lhdr->app_data_size = desc->app_resident_data_size;
-
- lhdr->flags = func->lhdr_flags;
- if (_img->falcon_id == acr->base.boot_falcon)
- lhdr->flags |= LSF_FLAG_DMACTL_REQ_CTX;
-
- /* Align and save off BL descriptor size */
- lhdr->bl_data_size = ALIGN(func->bl_desc_size, LSF_BL_DATA_SIZE_ALIGN);
-
- /*
- * Align, save off, and include the additional BL data
- */
- offset = ALIGN(offset, LSF_BL_DATA_ALIGN);
- lhdr->bl_data_off = offset;
- offset += lhdr->bl_data_size;
-
- return offset;
-}
-
-int
-acr_r367_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs)
-{
- struct ls_ucode_img_r367 *img;
- struct list_head *l;
- u32 count = 0;
- u32 offset;
-
- /* Count the number of images to manage */
- list_for_each(l, imgs)
- count++;
-
- /*
- * Start with an array of WPR headers at the base of the WPR.
- * The expectation here is that the secure falcon will do a single DMA
- * read of this array and cache it internally so it's ok to pack these.
- * Also, we add 1 to the falcon count to indicate the end of the array.
- */
- offset = sizeof(img->wpr_header) * (count + 1);
-
- /*
- * Walk the managed falcons, accounting for the LSB structs
- * as well as the ucode images.
- */
- list_for_each_entry(img, imgs, base.node) {
- offset = acr_r367_ls_img_fill_headers(acr, img, offset);
- }
-
- return offset;
-}
-
-int
-acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
- struct nvkm_gpuobj *wpr_blob, u64 wpr_addr)
-{
- struct ls_ucode_img *_img;
- u32 pos = 0;
- u32 max_desc_size = 0;
- u8 *gdesc;
-
- list_for_each_entry(_img, imgs, node) {
- struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img);
- const struct acr_r352_lsf_func *ls_func = img->func;
-
- max_desc_size = max(max_desc_size, ls_func->bl_desc_size);
- }
-
- gdesc = kmalloc(max_desc_size, GFP_KERNEL);
- if (!gdesc)
- return -ENOMEM;
-
- nvkm_kmap(wpr_blob);
-
- list_for_each_entry(_img, imgs, node) {
- struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img);
- const struct acr_r352_lsf_func *ls_func = img->func;
-
- nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
- sizeof(img->wpr_header));
-
- nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset,
- &img->lsb_header, sizeof(img->lsb_header));
-
- /* Generate and write BL descriptor */
- memset(gdesc, 0, ls_func->bl_desc_size);
- ls_func->generate_bl_desc(&acr->base, _img, wpr_addr, gdesc);
-
- nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.bl_data_off,
- gdesc, ls_func->bl_desc_size);
-
- /* Copy ucode */
- nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off,
- _img->ucode_data, _img->ucode_size);
-
- pos += sizeof(img->wpr_header);
- }
-
- nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID);
-
- nvkm_done(wpr_blob);
-
- kfree(gdesc);
-
- return 0;
-}
-
-struct acr_r367_hsflcn_desc {
- u8 reserved_dmem[0x200];
- u32 signatures[4];
- u32 wpr_region_id;
- u32 wpr_offset;
- u32 mmu_memory_range;
-#define FLCN_ACR_MAX_REGIONS 2
- struct {
- u32 no_regions;
- struct {
- u32 start_addr;
- u32 end_addr;
- u32 region_id;
- u32 read_mask;
- u32 write_mask;
- u32 client_mask;
- u32 shadow_mem_start_addr;
- } region_props[FLCN_ACR_MAX_REGIONS];
- } regions;
- u32 ucode_blob_size;
- u64 ucode_blob_base __aligned(8);
- struct {
- u32 vpr_enabled;
- u32 vpr_start;
- u32 vpr_end;
- u32 hdcp_policies;
- } vpr_desc;
-};
-
-void
-acr_r367_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
- void *_desc)
-{
- struct acr_r367_hsflcn_desc *desc = _desc;
- struct nvkm_gpuobj *ls_blob = acr->ls_blob;
-
- /* WPR region information if WPR is not fixed */
- if (sb->wpr_size == 0) {
- u64 wpr_start = ls_blob->addr;
- u64 wpr_end = ls_blob->addr + ls_blob->size;
-
- if (acr->func->shadow_blob)
- wpr_start += ls_blob->size / 2;
-
- desc->wpr_region_id = 1;
- desc->regions.no_regions = 2;
- desc->regions.region_props[0].start_addr = wpr_start >> 8;
- desc->regions.region_props[0].end_addr = wpr_end >> 8;
- desc->regions.region_props[0].region_id = 1;
- desc->regions.region_props[0].read_mask = 0xf;
- desc->regions.region_props[0].write_mask = 0xc;
- desc->regions.region_props[0].client_mask = 0x2;
- if (acr->func->shadow_blob)
- desc->regions.region_props[0].shadow_mem_start_addr =
- ls_blob->addr >> 8;
- else
- desc->regions.region_props[0].shadow_mem_start_addr = 0;
- } else {
- desc->ucode_blob_base = ls_blob->addr;
- desc->ucode_blob_size = ls_blob->size;
- }
-}
-
-static const struct acr_r352_ls_func
-acr_r367_ls_sec2_func = {
- .load = acr_ls_ucode_load_sec2,
- .post_run = acr_ls_sec2_post_run,
- .version_max = 1,
- .version = {
- &acr_r361_ls_sec2_func_0,
- &acr_r370_ls_sec2_func_0,
- }
-};
-
-const struct acr_r352_func
-acr_r367_func = {
- .fixup_hs_desc = acr_r367_fixup_hs_desc,
- .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc,
- .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
- .shadow_blob = true,
- .ls_ucode_img_load = acr_r367_ls_ucode_img_load,
- .ls_fill_headers = acr_r367_ls_fill_headers,
- .ls_write_wpr = acr_r367_ls_write_wpr,
- .ls_func = {
- [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
- [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
- [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
- [NVKM_SECBOOT_FALCON_SEC2] = &acr_r367_ls_sec2_func,
- },
-};
-
-struct nvkm_acr *
-acr_r367_new(enum nvkm_secboot_falcon boot_falcon,
- unsigned long managed_falcons)
-{
- return acr_r352_new_(&acr_r367_func, boot_falcon, managed_falcons);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c
deleted file mode 100644
index e821d0fd6217..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr_r370.h"
-#include "acr_r367.h"
-
-#include <core/msgqueue.h>
-#include <engine/falcon.h>
-#include <engine/sec2.h>
-
-static void
-acr_r370_generate_flcn_bl_desc(const struct nvkm_acr *acr,
- const struct ls_ucode_img *img, u64 wpr_addr,
- void *_desc)
-{
- struct acr_r370_flcn_bl_desc *desc = _desc;
- const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
- u64 base, addr_code, addr_data;
-
- base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
- addr_code = base + pdesc->app_resident_code_offset;
- addr_data = base + pdesc->app_resident_data_offset;
-
- desc->ctx_dma = FALCON_DMAIDX_UCODE;
- desc->code_dma_base = u64_to_flcn64(addr_code);
- desc->non_sec_code_off = pdesc->app_resident_code_offset;
- desc->non_sec_code_size = pdesc->app_resident_code_size;
- desc->code_entry_point = pdesc->app_imem_entry;
- desc->data_dma_base = u64_to_flcn64(addr_data);
- desc->data_size = pdesc->app_resident_data_size;
-}
-
-static const struct acr_r352_lsf_func
-acr_r370_ls_fecs_func_0 = {
- .generate_bl_desc = acr_r370_generate_flcn_bl_desc,
- .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
-};
-
-const struct acr_r352_ls_func
-acr_r370_ls_fecs_func = {
- .load = acr_ls_ucode_load_fecs,
- .version_max = 0,
- .version = {
- &acr_r370_ls_fecs_func_0,
- }
-};
-
-static const struct acr_r352_lsf_func
-acr_r370_ls_gpccs_func_0 = {
- .generate_bl_desc = acr_r370_generate_flcn_bl_desc,
- .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
- /* GPCCS will be loaded using PRI */
- .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
-};
-
-const struct acr_r352_ls_func
-acr_r370_ls_gpccs_func = {
- .load = acr_ls_ucode_load_gpccs,
- .version_max = 0,
- .version = {
- &acr_r370_ls_gpccs_func_0,
- }
-};
-
-static void
-acr_r370_generate_sec2_bl_desc(const struct nvkm_acr *acr,
- const struct ls_ucode_img *img, u64 wpr_addr,
- void *_desc)
-{
- const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
- const struct nvkm_sec2 *sec = acr->subdev->device->sec2;
- struct acr_r370_flcn_bl_desc *desc = _desc;
- u64 base, addr_code, addr_data;
- u32 addr_args;
-
- base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
- /* For some reason we should not add app_resident_code_offset here */
- addr_code = base;
- addr_data = base + pdesc->app_resident_data_offset;
- addr_args = sec->falcon->data.limit;
- addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
-
- desc->ctx_dma = FALCON_SEC2_DMAIDX_UCODE;
- desc->code_dma_base = u64_to_flcn64(addr_code);
- desc->non_sec_code_off = pdesc->app_resident_code_offset;
- desc->non_sec_code_size = pdesc->app_resident_code_size;
- desc->code_entry_point = pdesc->app_imem_entry;
- desc->data_dma_base = u64_to_flcn64(addr_data);
- desc->data_size = pdesc->app_resident_data_size;
- desc->argc = 1;
- /* args are stored at the beginning of EMEM */
- desc->argv = 0x01000000;
-}
-
-const struct acr_r352_lsf_func
-acr_r370_ls_sec2_func_0 = {
- .generate_bl_desc = acr_r370_generate_sec2_bl_desc,
- .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
-};
-
-const struct acr_r352_ls_func
-acr_r370_ls_sec2_func = {
- .load = acr_ls_ucode_load_sec2,
- .post_run = acr_ls_sec2_post_run,
- .version_max = 0,
- .version = {
- &acr_r370_ls_sec2_func_0,
- }
-};
-
-void
-acr_r370_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
- u64 offset)
-{
- struct acr_r370_flcn_bl_desc *bl_desc = _bl_desc;
-
- bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
- bl_desc->non_sec_code_off = hdr->non_sec_code_off;
- bl_desc->non_sec_code_size = hdr->non_sec_code_size;
- bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
- bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
- bl_desc->code_entry_point = 0;
- bl_desc->code_dma_base = u64_to_flcn64(offset);
- bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base);
- bl_desc->data_size = hdr->data_size;
-}
-
-const struct acr_r352_func
-acr_r370_func = {
- .fixup_hs_desc = acr_r367_fixup_hs_desc,
- .generate_hs_bl_desc = acr_r370_generate_hs_bl_desc,
- .hs_bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
- .shadow_blob = true,
- .ls_ucode_img_load = acr_r367_ls_ucode_img_load,
- .ls_fill_headers = acr_r367_ls_fill_headers,
- .ls_write_wpr = acr_r367_ls_write_wpr,
- .ls_func = {
- [NVKM_SECBOOT_FALCON_SEC2] = &acr_r370_ls_sec2_func,
- [NVKM_SECBOOT_FALCON_FECS] = &acr_r370_ls_fecs_func,
- [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r370_ls_gpccs_func,
- },
-};
-
-struct nvkm_acr *
-acr_r370_new(enum nvkm_secboot_falcon boot_falcon,
- unsigned long managed_falcons)
-{
- return acr_r352_new_(&acr_r370_func, boot_falcon, managed_falcons);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h
deleted file mode 100644
index 2efed6f995ad..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __NVKM_SECBOOT_ACR_R370_H__
-#define __NVKM_SECBOOT_ACR_R370_H__
-
-#include "priv.h"
-struct hsf_load_header;
-
-/* Same as acr_r361_flcn_bl_desc, plus argc/argv */
-struct acr_r370_flcn_bl_desc {
- u32 reserved[4];
- u32 signature[4];
- u32 ctx_dma;
- struct flcn_u64 code_dma_base;
- u32 non_sec_code_off;
- u32 non_sec_code_size;
- u32 sec_code_off;
- u32 sec_code_size;
- u32 code_entry_point;
- struct flcn_u64 data_dma_base;
- u32 data_size;
- u32 argc;
- u32 argv;
-};
-
-void acr_r370_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64);
-extern const struct acr_r352_ls_func acr_r370_ls_fecs_func;
-extern const struct acr_r352_ls_func acr_r370_ls_gpccs_func;
-extern const struct acr_r352_lsf_func acr_r370_ls_sec2_func_0;
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c
deleted file mode 100644
index 8f0647766038..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr_r370.h"
-#include "acr_r367.h"
-
-#include <core/msgqueue.h>
-#include <subdev/pmu.h>
-
-static void
-acr_r375_generate_pmu_bl_desc(const struct nvkm_acr *acr,
- const struct ls_ucode_img *img, u64 wpr_addr,
- void *_desc)
-{
- const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
- const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
- struct acr_r370_flcn_bl_desc *desc = _desc;
- u64 base, addr_code, addr_data;
- u32 addr_args;
-
- base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
- addr_code = base + pdesc->app_resident_code_offset;
- addr_data = base + pdesc->app_resident_data_offset;
- addr_args = pmu->falcon->data.limit;
- addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
-
- desc->ctx_dma = FALCON_DMAIDX_UCODE;
- desc->code_dma_base = u64_to_flcn64(addr_code);
- desc->non_sec_code_off = pdesc->app_resident_code_offset;
- desc->non_sec_code_size = pdesc->app_resident_code_size;
- desc->code_entry_point = pdesc->app_imem_entry;
- desc->data_dma_base = u64_to_flcn64(addr_data);
- desc->data_size = pdesc->app_resident_data_size;
- desc->argc = 1;
- desc->argv = addr_args;
-}
-
-static const struct acr_r352_lsf_func
-acr_r375_ls_pmu_func_0 = {
- .generate_bl_desc = acr_r375_generate_pmu_bl_desc,
- .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
-};
-
-const struct acr_r352_ls_func
-acr_r375_ls_pmu_func = {
- .load = acr_ls_ucode_load_pmu,
- .post_run = acr_ls_pmu_post_run,
- .version_max = 0,
- .version = {
- &acr_r375_ls_pmu_func_0,
- }
-};
-
-const struct acr_r352_func
-acr_r375_func = {
- .fixup_hs_desc = acr_r367_fixup_hs_desc,
- .generate_hs_bl_desc = acr_r370_generate_hs_bl_desc,
- .hs_bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
- .shadow_blob = true,
- .ls_ucode_img_load = acr_r367_ls_ucode_img_load,
- .ls_fill_headers = acr_r367_ls_fill_headers,
- .ls_write_wpr = acr_r367_ls_write_wpr,
- .ls_func = {
- [NVKM_SECBOOT_FALCON_FECS] = &acr_r370_ls_fecs_func,
- [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r370_ls_gpccs_func,
- [NVKM_SECBOOT_FALCON_PMU] = &acr_r375_ls_pmu_func,
- },
-};
-
-struct nvkm_acr *
-acr_r375_new(enum nvkm_secboot_falcon boot_falcon,
- unsigned long managed_falcons)
-{
- return acr_r352_new_(&acr_r375_func, boot_falcon, managed_falcons);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
deleted file mode 100644
index ee29c6c11afd..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * Secure boot is the process by which NVIDIA-signed firmware is loaded into
- * some of the falcons of a GPU. For production devices this is the only way
- * for the firmware to access useful (but sensitive) registers.
- *
- * A Falcon microprocessor supporting advanced security modes can run in one of
- * three modes:
- *
- * - Non-secure (NS). In this mode, functionality is similar to Falcon
- * architectures before security modes were introduced (pre-Maxwell), but
- * capability is restricted. In particular, certain registers may be
- * inaccessible for reads and/or writes, and physical memory access may be
- * disabled (on certain Falcon instances). This is the only possible mode that
- * can be used if you don't have microcode cryptographically signed by NVIDIA.
- *
- * - Heavy Secure (HS). In this mode, the microprocessor is a black box - it's
- * not possible to read or write any Falcon internal state or Falcon registers
- * from outside the Falcon (for example, from the host system). The only way
- * to enable this mode is by loading microcode that has been signed by NVIDIA.
- * (The loading process involves tagging the IMEM block as secure, writing the
- * signature into a Falcon register, and starting execution. The hardware will
- * validate the signature, and if valid, grant HS privileges.)
- *
- * - Light Secure (LS). In this mode, the microprocessor has more privileges
- * than NS but fewer than HS. Some of the microprocessor state is visible to
- * host software to ease debugging. The only way to enable this mode is by HS
- * microcode enabling LS mode. Some privileges available to HS mode are not
- * available here. LS mode is introduced in GM20x.
- *
- * Secure boot consists in temporarily switching a HS-capable falcon (typically
- * PMU) into HS mode in order to validate the LS firmwares of managed falcons,
- * load them, and switch managed falcons into LS mode. Once secure boot
- * completes, no falcon remains in HS mode.
- *
- * Secure boot requires a write-protected memory region (WPR) which can only be
- * written by the secure falcon. On dGPU, the driver sets up the WPR region in
- * video memory. On Tegra, it is set up by the bootloader and its location and
- * size written into memory controller registers.
- *
- * The secure boot process takes place as follows:
- *
- * 1) A LS blob is constructed that contains all the LS firmwares we want to
- * load, along with their signatures and bootloaders.
- *
- * 2) A HS blob (also called ACR) is created that contains the signed HS
- * firmware in charge of loading the LS firmwares into their respective
- * falcons.
- *
- * 3) The HS blob is loaded (via its own bootloader) and executed on the
- * HS-capable falcon. It authenticates itself, switches the secure falcon to
- * HS mode and setup the WPR region around the LS blob (dGPU) or copies the
- * LS blob into the WPR region (Tegra).
- *
- * 4) The LS blob is now secure from all external tampering. The HS falcon
- * checks the signatures of the LS firmwares and, if valid, switches the
- * managed falcons to LS mode and makes them ready to run the LS firmware.
- *
- * 5) The managed falcons remain in LS mode and can be started.
- *
- */
-
-#include "priv.h"
-#include "acr.h"
-
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/pmu.h>
-#include <engine/sec2.h>
-
-const char *
-nvkm_secboot_falcon_name[] = {
- [NVKM_SECBOOT_FALCON_PMU] = "PMU",
- [NVKM_SECBOOT_FALCON_RESERVED] = "<reserved>",
- [NVKM_SECBOOT_FALCON_FECS] = "FECS",
- [NVKM_SECBOOT_FALCON_GPCCS] = "GPCCS",
- [NVKM_SECBOOT_FALCON_SEC2] = "SEC2",
- [NVKM_SECBOOT_FALCON_END] = "<invalid>",
-};
-/**
- * nvkm_secboot_reset() - reset specified falcon
- */
-int
-nvkm_secboot_reset(struct nvkm_secboot *sb, unsigned long falcon_mask)
-{
- /* Unmanaged falcon? */
- if ((falcon_mask | sb->acr->managed_falcons) != sb->acr->managed_falcons) {
- nvkm_error(&sb->subdev, "cannot reset unmanaged falcon!\n");
- return -EINVAL;
- }
-
- return sb->acr->func->reset(sb->acr, sb, falcon_mask);
-}
-
-/**
- * nvkm_secboot_is_managed() - check whether a given falcon is securely-managed
- */
-bool
-nvkm_secboot_is_managed(struct nvkm_secboot *sb, enum nvkm_secboot_falcon fid)
-{
- if (!sb)
- return false;
-
- return sb->acr->managed_falcons & BIT(fid);
-}
-
-static int
-nvkm_secboot_oneinit(struct nvkm_subdev *subdev)
-{
- struct nvkm_secboot *sb = nvkm_secboot(subdev);
- int ret = 0;
-
- switch (sb->acr->boot_falcon) {
- case NVKM_SECBOOT_FALCON_PMU:
- sb->halt_falcon = sb->boot_falcon = subdev->device->pmu->falcon;
- break;
- case NVKM_SECBOOT_FALCON_SEC2:
- /* we must keep SEC2 alive forever since ACR will run on it */
- nvkm_engine_ref(&subdev->device->sec2->engine);
- sb->boot_falcon = subdev->device->sec2->falcon;
- sb->halt_falcon = subdev->device->pmu->falcon;
- break;
- default:
- nvkm_error(subdev, "Unmanaged boot falcon %s!\n",
- nvkm_secboot_falcon_name[sb->acr->boot_falcon]);
- return -EINVAL;
- }
- nvkm_debug(subdev, "using %s falcon for ACR\n", sb->boot_falcon->name);
-
- /* Call chip-specific init function */
- if (sb->func->oneinit)
- ret = sb->func->oneinit(sb);
- if (ret) {
- nvkm_error(subdev, "Secure Boot initialization failed: %d\n",
- ret);
- return ret;
- }
-
- return 0;
-}
-
-static int
-nvkm_secboot_fini(struct nvkm_subdev *subdev, bool suspend)
-{
- struct nvkm_secboot *sb = nvkm_secboot(subdev);
- int ret = 0;
-
- if (sb->func->fini)
- ret = sb->func->fini(sb, suspend);
-
- return ret;
-}
-
-static void *
-nvkm_secboot_dtor(struct nvkm_subdev *subdev)
-{
- struct nvkm_secboot *sb = nvkm_secboot(subdev);
- void *ret = NULL;
-
- if (sb->func->dtor)
- ret = sb->func->dtor(sb);
-
- return ret;
-}
-
-static const struct nvkm_subdev_func
-nvkm_secboot = {
- .oneinit = nvkm_secboot_oneinit,
- .fini = nvkm_secboot_fini,
- .dtor = nvkm_secboot_dtor,
-};
-
-int
-nvkm_secboot_ctor(const struct nvkm_secboot_func *func, struct nvkm_acr *acr,
- struct nvkm_device *device, int index,
- struct nvkm_secboot *sb)
-{
- unsigned long fid;
-
- nvkm_subdev_ctor(&nvkm_secboot, device, index, &sb->subdev);
- sb->func = func;
- sb->acr = acr;
- acr->subdev = &sb->subdev;
-
- nvkm_debug(&sb->subdev, "securely managed falcons:\n");
- for_each_set_bit(fid, &sb->acr->managed_falcons,
- NVKM_SECBOOT_FALCON_END)
- nvkm_debug(&sb->subdev, "- %s\n",
- nvkm_secboot_falcon_name[fid]);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
deleted file mode 100644
index 5e91b3f90065..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
+++ /dev/null
@@ -1,262 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-
-#include "acr.h"
-#include "gm200.h"
-
-#include <core/gpuobj.h>
-#include <subdev/fb.h>
-#include <engine/falcon.h>
-#include <subdev/mc.h>
-
-/**
- * gm200_secboot_run_blob() - run the given high-secure blob
- *
- */
-int
-gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob,
- struct nvkm_falcon *falcon)
-{
- struct gm200_secboot *gsb = gm200_secboot(sb);
- struct nvkm_subdev *subdev = &gsb->base.subdev;
- struct nvkm_vma *vma = NULL;
- u32 start_address;
- int ret;
-
- ret = nvkm_falcon_get(falcon, subdev);
- if (ret)
- return ret;
-
- /* Map the HS firmware so the HS bootloader can see it */
- ret = nvkm_vmm_get(gsb->vmm, 12, blob->size, &vma);
- if (ret) {
- nvkm_falcon_put(falcon, subdev);
- return ret;
- }
-
- ret = nvkm_memory_map(blob, 0, gsb->vmm, vma, NULL, 0);
- if (ret)
- goto end;
-
- /* Reset and set the falcon up */
- ret = nvkm_falcon_reset(falcon);
- if (ret)
- goto end;
- nvkm_falcon_bind_context(falcon, gsb->inst);
-
- /* Load the HS bootloader into the falcon's IMEM/DMEM */
- ret = sb->acr->func->load(sb->acr, falcon, blob, vma->addr);
- if (ret < 0)
- goto end;
-
- start_address = ret;
-
- /* Disable interrupts as we will poll for the HALT bit */
- nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, false);
-
- /* Set default error value in mailbox register */
- nvkm_falcon_wr32(falcon, 0x040, 0xdeada5a5);
-
- /* Start the HS bootloader */
- nvkm_falcon_set_start_addr(falcon, start_address);
- nvkm_falcon_start(falcon);
- ret = nvkm_falcon_wait_for_halt(falcon, 100);
- if (ret)
- goto end;
-
- /*
- * The mailbox register contains the (positive) error code - return this
- * to the caller
- */
- ret = nvkm_falcon_rd32(falcon, 0x040);
-
-end:
- /* Reenable interrupts */
- nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, true);
-
- /* We don't need the ACR firmware anymore */
- nvkm_vmm_put(gsb->vmm, &vma);
- nvkm_falcon_put(falcon, subdev);
-
- return ret;
-}
-
-int
-gm200_secboot_oneinit(struct nvkm_secboot *sb)
-{
- struct gm200_secboot *gsb = gm200_secboot(sb);
- struct nvkm_device *device = sb->subdev.device;
- int ret;
-
- /* Allocate instance block and VM */
- ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, true,
- &gsb->inst);
- if (ret)
- return ret;
-
- ret = nvkm_vmm_new(device, 0, 600 * 1024, NULL, 0, NULL, "acr",
- &gsb->vmm);
- if (ret)
- return ret;
-
- atomic_inc(&gsb->vmm->engref[NVKM_SUBDEV_PMU]);
- gsb->vmm->debug = gsb->base.subdev.debug;
-
- ret = nvkm_vmm_join(gsb->vmm, gsb->inst);
- if (ret)
- return ret;
-
- if (sb->acr->func->oneinit) {
- ret = sb->acr->func->oneinit(sb->acr, sb);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-int
-gm200_secboot_fini(struct nvkm_secboot *sb, bool suspend)
-{
- int ret = 0;
-
- if (sb->acr->func->fini)
- ret = sb->acr->func->fini(sb->acr, sb, suspend);
-
- return ret;
-}
-
-void *
-gm200_secboot_dtor(struct nvkm_secboot *sb)
-{
- struct gm200_secboot *gsb = gm200_secboot(sb);
-
- sb->acr->func->dtor(sb->acr);
-
- nvkm_vmm_part(gsb->vmm, gsb->inst);
- nvkm_vmm_unref(&gsb->vmm);
- nvkm_memory_unref(&gsb->inst);
-
- return gsb;
-}
-
-
-static const struct nvkm_secboot_func
-gm200_secboot = {
- .dtor = gm200_secboot_dtor,
- .oneinit = gm200_secboot_oneinit,
- .fini = gm200_secboot_fini,
- .run_blob = gm200_secboot_run_blob,
-};
-
-int
-gm200_secboot_new(struct nvkm_device *device, int index,
- struct nvkm_secboot **psb)
-{
- int ret;
- struct gm200_secboot *gsb;
- struct nvkm_acr *acr;
-
- acr = acr_r361_new(BIT(NVKM_SECBOOT_FALCON_FECS) |
- BIT(NVKM_SECBOOT_FALCON_GPCCS));
- if (IS_ERR(acr))
- return PTR_ERR(acr);
-
- gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
- if (!gsb) {
- psb = NULL;
- return -ENOMEM;
- }
- *psb = &gsb->base;
-
- ret = nvkm_secboot_ctor(&gm200_secboot, acr, device, index, &gsb->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-
-MODULE_FIRMWARE("nvidia/gm200/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gm200/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gm200/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gm200/gr/sw_method_init.bin");
-
-MODULE_FIRMWARE("nvidia/gm204/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gm204/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gm204/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gm204/gr/sw_method_init.bin");
-
-MODULE_FIRMWARE("nvidia/gm206/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gm206/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gm206/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gm206/gr/sw_method_init.bin");
-
-MODULE_FIRMWARE("nvidia/gp100/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gp100/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gp100/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gp100/gr/sw_method_init.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h
deleted file mode 100644
index 62c5e162099a..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __NVKM_SECBOOT_GM200_H__
-#define __NVKM_SECBOOT_GM200_H__
-
-#include "priv.h"
-
-struct gm200_secboot {
- struct nvkm_secboot base;
-
- /* Instance block & address space used for HS FW execution */
- struct nvkm_memory *inst;
- struct nvkm_vmm *vmm;
-};
-#define gm200_secboot(sb) container_of(sb, struct gm200_secboot, base)
-
-int gm200_secboot_oneinit(struct nvkm_secboot *);
-int gm200_secboot_fini(struct nvkm_secboot *, bool);
-void *gm200_secboot_dtor(struct nvkm_secboot *);
-int gm200_secboot_run_blob(struct nvkm_secboot *, struct nvkm_gpuobj *,
- struct nvkm_falcon *);
-
-/* Tegra-only */
-int gm20b_secboot_tegra_read_wpr(struct gm200_secboot *, u32);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
deleted file mode 100644
index df8b919dcf09..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr.h"
-#include "gm200.h"
-
-#define TEGRA210_MC_BASE 0x70019000
-
-#ifdef CONFIG_ARCH_TEGRA
-#define MC_SECURITY_CARVEOUT2_CFG0 0xc58
-#define MC_SECURITY_CARVEOUT2_BOM_0 0xc5c
-#define MC_SECURITY_CARVEOUT2_BOM_HI_0 0xc60
-#define MC_SECURITY_CARVEOUT2_SIZE_128K 0xc64
-#define TEGRA_MC_SECURITY_CARVEOUT_CFG_LOCKED (1 << 1)
-/**
- * gm20b_secboot_tegra_read_wpr() - read the WPR registers on Tegra
- *
- * On dGPU, we can manage the WPR region ourselves, but on Tegra the WPR region
- * is reserved from system memory by the bootloader and irreversibly locked.
- * This function reads the address and size of the pre-configured WPR region.
- */
-int
-gm20b_secboot_tegra_read_wpr(struct gm200_secboot *gsb, u32 mc_base)
-{
- struct nvkm_secboot *sb = &gsb->base;
- void __iomem *mc;
- u32 cfg;
-
- mc = ioremap(mc_base, 0xd00);
- if (!mc) {
- nvkm_error(&sb->subdev, "Cannot map Tegra MC registers\n");
- return -ENOMEM;
- }
- sb->wpr_addr = ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_0) |
- ((u64)ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_HI_0) << 32);
- sb->wpr_size = ioread32_native(mc + MC_SECURITY_CARVEOUT2_SIZE_128K)
- << 17;
- cfg = ioread32_native(mc + MC_SECURITY_CARVEOUT2_CFG0);
- iounmap(mc);
-
- /* Check that WPR settings are valid */
- if (sb->wpr_size == 0) {
- nvkm_error(&sb->subdev, "WPR region is empty\n");
- return -EINVAL;
- }
-
- if (!(cfg & TEGRA_MC_SECURITY_CARVEOUT_CFG_LOCKED)) {
- nvkm_error(&sb->subdev, "WPR region not locked\n");
- return -EINVAL;
- }
-
- return 0;
-}
-#else
-int
-gm20b_secboot_tegra_read_wpr(struct gm200_secboot *gsb, u32 mc_base)
-{
- nvkm_error(&gsb->base.subdev, "Tegra support not compiled in\n");
- return -EINVAL;
-}
-#endif
-
-static int
-gm20b_secboot_oneinit(struct nvkm_secboot *sb)
-{
- struct gm200_secboot *gsb = gm200_secboot(sb);
- int ret;
-
- ret = gm20b_secboot_tegra_read_wpr(gsb, TEGRA210_MC_BASE);
- if (ret)
- return ret;
-
- return gm200_secboot_oneinit(sb);
-}
-
-static const struct nvkm_secboot_func
-gm20b_secboot = {
- .dtor = gm200_secboot_dtor,
- .oneinit = gm20b_secboot_oneinit,
- .fini = gm200_secboot_fini,
- .run_blob = gm200_secboot_run_blob,
-};
-
-int
-gm20b_secboot_new(struct nvkm_device *device, int index,
- struct nvkm_secboot **psb)
-{
- int ret;
- struct gm200_secboot *gsb;
- struct nvkm_acr *acr;
-
- acr = acr_r352_new(BIT(NVKM_SECBOOT_FALCON_FECS) |
- BIT(NVKM_SECBOOT_FALCON_PMU));
- if (IS_ERR(acr))
- return PTR_ERR(acr);
- /* Support the initial GM20B firmware release without PMU */
- acr->optional_falcons = BIT(NVKM_SECBOOT_FALCON_PMU);
-
- gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
- if (!gsb) {
- psb = NULL;
- return -ENOMEM;
- }
- *psb = &gsb->base;
-
- ret = nvkm_secboot_ctor(&gm20b_secboot, acr, device, index, &gsb->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
-MODULE_FIRMWARE("nvidia/gm20b/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gm20b/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gm20b/gr/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gm20b/pmu/desc.bin");
-MODULE_FIRMWARE("nvidia/gm20b/pmu/image.bin");
-MODULE_FIRMWARE("nvidia/gm20b/pmu/sig.bin");
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
deleted file mode 100644
index 4695f1c8e33f..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr.h"
-#include "gm200.h"
-
-#include "ls_ucode.h"
-#include "hs_ucode.h"
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <engine/falcon.h>
-#include <engine/nvdec.h>
-
-static bool
-gp102_secboot_scrub_required(struct nvkm_secboot *sb)
-{
- struct nvkm_subdev *subdev = &sb->subdev;
- struct nvkm_device *device = subdev->device;
- u32 reg;
-
- nvkm_wr32(device, 0x100cd0, 0x2);
- reg = nvkm_rd32(device, 0x100cd0);
-
- return (reg & BIT(4));
-}
-
-static int
-gp102_run_secure_scrub(struct nvkm_secboot *sb)
-{
- struct nvkm_subdev *subdev = &sb->subdev;
- struct nvkm_device *device = subdev->device;
- struct nvkm_engine *engine;
- struct nvkm_falcon *falcon;
- void *scrub_image;
- struct fw_bin_header *hsbin_hdr;
- struct hsf_fw_header *fw_hdr;
- struct hsf_load_header *lhdr;
- void *scrub_data;
- int ret;
-
- nvkm_debug(subdev, "running VPR scrubber binary on NVDEC...\n");
-
- engine = nvkm_engine_ref(&device->nvdec[0]->engine);
- if (IS_ERR(engine))
- return PTR_ERR(engine);
- falcon = device->nvdec[0]->falcon;
-
- nvkm_falcon_get(falcon, &sb->subdev);
-
- scrub_image = hs_ucode_load_blob(subdev, falcon, "nvdec/scrubber");
- if (IS_ERR(scrub_image))
- return PTR_ERR(scrub_image);
-
- nvkm_falcon_reset(falcon);
- nvkm_falcon_bind_context(falcon, NULL);
-
- hsbin_hdr = scrub_image;
- fw_hdr = scrub_image + hsbin_hdr->header_offset;
- lhdr = scrub_image + fw_hdr->hdr_offset;
- scrub_data = scrub_image + hsbin_hdr->data_offset;
-
- nvkm_falcon_load_imem(falcon, scrub_data, lhdr->non_sec_code_off,
- lhdr->non_sec_code_size,
- lhdr->non_sec_code_off >> 8, 0, false);
- nvkm_falcon_load_imem(falcon, scrub_data + lhdr->apps[0],
- ALIGN(lhdr->apps[0], 0x100),
- lhdr->apps[1],
- lhdr->apps[0] >> 8, 0, true);
- nvkm_falcon_load_dmem(falcon, scrub_data + lhdr->data_dma_base, 0,
- lhdr->data_size, 0);
-
- kfree(scrub_image);
-
- nvkm_falcon_set_start_addr(falcon, 0x0);
- nvkm_falcon_start(falcon);
-
- ret = nvkm_falcon_wait_for_halt(falcon, 500);
- if (ret < 0) {
- nvkm_error(subdev, "failed to run VPR scrubber binary!\n");
- ret = -ETIMEDOUT;
- goto end;
- }
-
- /* put nvdec in clean state - without reset it will remain in HS mode */
- nvkm_falcon_reset(falcon);
-
- if (gp102_secboot_scrub_required(sb)) {
- nvkm_error(subdev, "VPR scrubber binary failed!\n");
- ret = -EINVAL;
- goto end;
- }
-
- nvkm_debug(subdev, "VPR scrub successfully completed\n");
-
-end:
- nvkm_falcon_put(falcon, &sb->subdev);
- nvkm_engine_unref(&engine);
- return ret;
-}
-
-static int
-gp102_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob,
- struct nvkm_falcon *falcon)
-{
- int ret;
-
- /* make sure the VPR region is unlocked */
- if (gp102_secboot_scrub_required(sb)) {
- ret = gp102_run_secure_scrub(sb);
- if (ret)
- return ret;
- }
-
- return gm200_secboot_run_blob(sb, blob, falcon);
-}
-
-const struct nvkm_secboot_func
-gp102_secboot = {
- .dtor = gm200_secboot_dtor,
- .oneinit = gm200_secboot_oneinit,
- .fini = gm200_secboot_fini,
- .run_blob = gp102_secboot_run_blob,
-};
-
-int
-gp102_secboot_new(struct nvkm_device *device, int index,
- struct nvkm_secboot **psb)
-{
- int ret;
- struct gm200_secboot *gsb;
- struct nvkm_acr *acr;
-
- acr = acr_r367_new(NVKM_SECBOOT_FALCON_SEC2,
- BIT(NVKM_SECBOOT_FALCON_FECS) |
- BIT(NVKM_SECBOOT_FALCON_GPCCS) |
- BIT(NVKM_SECBOOT_FALCON_SEC2));
- if (IS_ERR(acr))
- return PTR_ERR(acr);
-
- gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
- if (!gsb) {
- psb = NULL;
- return -ENOMEM;
- }
- *psb = &gsb->base;
-
- ret = nvkm_secboot_ctor(&gp102_secboot, acr, device, index, &gsb->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-MODULE_FIRMWARE("nvidia/gp102/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gp102/acr/unload_bl.bin");
-MODULE_FIRMWARE("nvidia/gp102/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gp102/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gp102/gr/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/gp102/sec2/desc.bin");
-MODULE_FIRMWARE("nvidia/gp102/sec2/image.bin");
-MODULE_FIRMWARE("nvidia/gp102/sec2/sig.bin");
-MODULE_FIRMWARE("nvidia/gp102/sec2/desc-1.bin");
-MODULE_FIRMWARE("nvidia/gp102/sec2/image-1.bin");
-MODULE_FIRMWARE("nvidia/gp102/sec2/sig-1.bin");
-MODULE_FIRMWARE("nvidia/gp104/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gp104/acr/unload_bl.bin");
-MODULE_FIRMWARE("nvidia/gp104/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gp104/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gp104/gr/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gp104/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/gp104/sec2/desc.bin");
-MODULE_FIRMWARE("nvidia/gp104/sec2/image.bin");
-MODULE_FIRMWARE("nvidia/gp104/sec2/sig.bin");
-MODULE_FIRMWARE("nvidia/gp104/sec2/desc-1.bin");
-MODULE_FIRMWARE("nvidia/gp104/sec2/image-1.bin");
-MODULE_FIRMWARE("nvidia/gp104/sec2/sig-1.bin");
-MODULE_FIRMWARE("nvidia/gp106/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gp106/acr/unload_bl.bin");
-MODULE_FIRMWARE("nvidia/gp106/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gp106/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gp106/gr/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gp106/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/gp106/sec2/desc.bin");
-MODULE_FIRMWARE("nvidia/gp106/sec2/image.bin");
-MODULE_FIRMWARE("nvidia/gp106/sec2/sig.bin");
-MODULE_FIRMWARE("nvidia/gp106/sec2/desc-1.bin");
-MODULE_FIRMWARE("nvidia/gp106/sec2/image-1.bin");
-MODULE_FIRMWARE("nvidia/gp106/sec2/sig-1.bin");
-MODULE_FIRMWARE("nvidia/gp107/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gp107/acr/unload_bl.bin");
-MODULE_FIRMWARE("nvidia/gp107/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gp107/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gp107/gr/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gp107/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/gp107/sec2/desc.bin");
-MODULE_FIRMWARE("nvidia/gp107/sec2/image.bin");
-MODULE_FIRMWARE("nvidia/gp107/sec2/sig.bin");
-MODULE_FIRMWARE("nvidia/gp107/sec2/desc-1.bin");
-MODULE_FIRMWARE("nvidia/gp107/sec2/image-1.bin");
-MODULE_FIRMWARE("nvidia/gp107/sec2/sig-1.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c
deleted file mode 100644
index 737a8d50a1f2..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright 2017 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "gm200.h"
-#include "acr.h"
-
-int
-gp108_secboot_new(struct nvkm_device *device, int index,
- struct nvkm_secboot **psb)
-{
- struct gm200_secboot *gsb;
- struct nvkm_acr *acr;
-
- acr = acr_r370_new(NVKM_SECBOOT_FALCON_SEC2,
- BIT(NVKM_SECBOOT_FALCON_FECS) |
- BIT(NVKM_SECBOOT_FALCON_GPCCS) |
- BIT(NVKM_SECBOOT_FALCON_SEC2));
- if (IS_ERR(acr))
- return PTR_ERR(acr);
-
- if (!(gsb = kzalloc(sizeof(*gsb), GFP_KERNEL))) {
- acr->func->dtor(acr);
- return -ENOMEM;
- }
- *psb = &gsb->base;
-
- return nvkm_secboot_ctor(&gp102_secboot, acr, device, index, &gsb->base);
-}
-
-MODULE_FIRMWARE("nvidia/gp108/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gp108/acr/unload_bl.bin");
-MODULE_FIRMWARE("nvidia/gp108/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gp108/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gp108/gr/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gp108/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/gp108/sec2/desc.bin");
-MODULE_FIRMWARE("nvidia/gp108/sec2/image.bin");
-MODULE_FIRMWARE("nvidia/gp108/sec2/sig.bin");
-
-MODULE_FIRMWARE("nvidia/gv100/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gv100/acr/unload_bl.bin");
-MODULE_FIRMWARE("nvidia/gv100/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gv100/acr/ucode_unload.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gv100/gr/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/gv100/sec2/desc.bin");
-MODULE_FIRMWARE("nvidia/gv100/sec2/image.bin");
-MODULE_FIRMWARE("nvidia/gv100/sec2/sig.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c
deleted file mode 100644
index 28ca29d0eeee..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "acr.h"
-#include "gm200.h"
-
-#define TEGRA186_MC_BASE 0x02c10000
-
-static int
-gp10b_secboot_oneinit(struct nvkm_secboot *sb)
-{
- struct gm200_secboot *gsb = gm200_secboot(sb);
- int ret;
-
- ret = gm20b_secboot_tegra_read_wpr(gsb, TEGRA186_MC_BASE);
- if (ret)
- return ret;
-
- return gm200_secboot_oneinit(sb);
-}
-
-static const struct nvkm_secboot_func
-gp10b_secboot = {
- .dtor = gm200_secboot_dtor,
- .oneinit = gp10b_secboot_oneinit,
- .fini = gm200_secboot_fini,
- .run_blob = gm200_secboot_run_blob,
-};
-
-int
-gp10b_secboot_new(struct nvkm_device *device, int index,
- struct nvkm_secboot **psb)
-{
- int ret;
- struct gm200_secboot *gsb;
- struct nvkm_acr *acr;
-
- acr = acr_r352_new(BIT(NVKM_SECBOOT_FALCON_FECS) |
- BIT(NVKM_SECBOOT_FALCON_GPCCS) |
- BIT(NVKM_SECBOOT_FALCON_PMU));
- if (IS_ERR(acr))
- return PTR_ERR(acr);
-
- gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
- if (!gsb) {
- psb = NULL;
- return -ENOMEM;
- }
- *psb = &gsb->base;
-
- ret = nvkm_secboot_ctor(&gp10b_secboot, acr, device, index, &gsb->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
-MODULE_FIRMWARE("nvidia/gp10b/acr/bl.bin");
-MODULE_FIRMWARE("nvidia/gp10b/acr/ucode_load.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_bl.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_sig.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/sw_nonctx.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gp10b/gr/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin");
-MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin");
-MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin");
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.c
deleted file mode 100644
index 6b33182ddc2f..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.c
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "hs_ucode.h"
-#include "ls_ucode.h"
-#include "acr.h"
-
-#include <engine/falcon.h>
-
-/**
- * hs_ucode_patch_signature() - patch HS blob with correct signature for
- * specified falcon.
- */
-static void
-hs_ucode_patch_signature(const struct nvkm_falcon *falcon, void *acr_image,
- bool new_format)
-{
- struct fw_bin_header *hsbin_hdr = acr_image;
- struct hsf_fw_header *fw_hdr = acr_image + hsbin_hdr->header_offset;
- void *hs_data = acr_image + hsbin_hdr->data_offset;
- void *sig;
- u32 sig_size;
- u32 patch_loc, patch_sig;
-
- /*
- * I had the brilliant idea to "improve" the binary format by
- * removing this useless indirection. However to make NVIDIA files
- * directly compatible, let's support both format.
- */
- if (new_format) {
- patch_loc = fw_hdr->patch_loc;
- patch_sig = fw_hdr->patch_sig;
- } else {
- patch_loc = *(u32 *)(acr_image + fw_hdr->patch_loc);
- patch_sig = *(u32 *)(acr_image + fw_hdr->patch_sig);
- }
-
- /* Falcon in debug or production mode? */
- if (falcon->debug) {
- sig = acr_image + fw_hdr->sig_dbg_offset;
- sig_size = fw_hdr->sig_dbg_size;
- } else {
- sig = acr_image + fw_hdr->sig_prod_offset;
- sig_size = fw_hdr->sig_prod_size;
- }
-
- /* Patch signature */
- memcpy(hs_data + patch_loc, sig + patch_sig, sig_size);
-}
-
-void *
-hs_ucode_load_blob(struct nvkm_subdev *subdev, const struct nvkm_falcon *falcon,
- const char *fw)
-{
- void *acr_image;
- bool new_format;
-
- acr_image = nvkm_acr_load_firmware(subdev, fw, 0);
- if (IS_ERR(acr_image))
- return acr_image;
-
- /* detect the format to define how signature should be patched */
- switch (((u32 *)acr_image)[0]) {
- case 0x3b1d14f0:
- new_format = true;
- break;
- case 0x000010de:
- new_format = false;
- break;
- default:
- nvkm_error(subdev, "unknown header for HS blob %s\n", fw);
- return ERR_PTR(-EINVAL);
- }
-
- hs_ucode_patch_signature(falcon, acr_image, new_format);
-
- return acr_image;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.h
deleted file mode 100644
index d8cfc6f7752a..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __NVKM_SECBOOT_HS_UCODE_H__
-#define __NVKM_SECBOOT_HS_UCODE_H__
-
-#include <core/os.h>
-#include <core/subdev.h>
-
-struct nvkm_falcon;
-
-/**
- * struct hsf_fw_header - HS firmware descriptor
- * @sig_dbg_offset: offset of the debug signature
- * @sig_dbg_size: size of the debug signature
- * @sig_prod_offset: offset of the production signature
- * @sig_prod_size: size of the production signature
- * @patch_loc: offset of the offset (sic) of where the signature is
- * @patch_sig: offset of the offset (sic) to add to sig_*_offset
- * @hdr_offset: offset of the load header (see struct hs_load_header)
- * @hdr_size: size of above header
- *
- * This structure is embedded in the HS firmware image at
- * hs_bin_hdr.header_offset.
- */
-struct hsf_fw_header {
- u32 sig_dbg_offset;
- u32 sig_dbg_size;
- u32 sig_prod_offset;
- u32 sig_prod_size;
- u32 patch_loc;
- u32 patch_sig;
- u32 hdr_offset;
- u32 hdr_size;
-};
-
-/**
- * struct hsf_load_header - HS firmware load header
- */
-struct hsf_load_header {
- u32 non_sec_code_off;
- u32 non_sec_code_size;
- u32 data_dma_base;
- u32 data_size;
- u32 num_apps;
- /*
- * Organized as follows:
- * - app0_code_off
- * - app1_code_off
- * - ...
- * - appn_code_off
- * - app0_code_size
- * - app1_code_size
- * - ...
- */
- u32 apps[0];
-};
-
-void *hs_ucode_load_blob(struct nvkm_subdev *, const struct nvkm_falcon *,
- const char *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
deleted file mode 100644
index d43f906da3a7..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __NVKM_SECBOOT_LS_UCODE_H__
-#define __NVKM_SECBOOT_LS_UCODE_H__
-
-#include <core/os.h>
-#include <core/subdev.h>
-#include <subdev/secboot.h>
-
-struct nvkm_acr;
-
-/**
- * struct ls_ucode_img_desc - descriptor of firmware image
- * @descriptor_size: size of this descriptor
- * @image_size: size of the whole image
- * @bootloader_start_offset: start offset of the bootloader in ucode image
- * @bootloader_size: size of the bootloader
- * @bootloader_imem_offset: start off set of the bootloader in IMEM
- * @bootloader_entry_point: entry point of the bootloader in IMEM
- * @app_start_offset: start offset of the LS firmware
- * @app_size: size of the LS firmware's code and data
- * @app_imem_offset: offset of the app in IMEM
- * @app_imem_entry: entry point of the app in IMEM
- * @app_dmem_offset: offset of the data in DMEM
- * @app_resident_code_offset: offset of app code from app_start_offset
- * @app_resident_code_size: size of the code
- * @app_resident_data_offset: offset of data from app_start_offset
- * @app_resident_data_size: size of data
- *
- * A firmware image contains the code, data, and bootloader of a given LS
- * falcon in a single blob. This structure describes where everything is.
- *
- * This can be generated from a (bootloader, code, data) set if they have
- * been loaded separately, or come directly from a file.
- */
-struct ls_ucode_img_desc {
- u32 descriptor_size;
- u32 image_size;
- u32 tools_version;
- u32 app_version;
- char date[64];
- u32 bootloader_start_offset;
- u32 bootloader_size;
- u32 bootloader_imem_offset;
- u32 bootloader_entry_point;
- u32 app_start_offset;
- u32 app_size;
- u32 app_imem_offset;
- u32 app_imem_entry;
- u32 app_dmem_offset;
- u32 app_resident_code_offset;
- u32 app_resident_code_size;
- u32 app_resident_data_offset;
- u32 app_resident_data_size;
- u32 nb_overlays;
- struct {u32 start; u32 size; } load_ovl[64];
- u32 compressed;
-};
-
-/**
- * struct ls_ucode_img - temporary storage for loaded LS firmwares
- * @node: to link within lsf_ucode_mgr
- * @falcon_id: ID of the falcon this LS firmware is for
- * @ucode_desc: loaded or generated map of ucode_data
- * @ucode_data: firmware payload (code and data)
- * @ucode_size: size in bytes of data in ucode_data
- * @ucode_off: offset of the ucode in ucode_data
- * @sig: signature for this firmware
- * @sig:size: size of the signature in bytes
- *
- * Preparing the WPR LS blob requires information about all the LS firmwares
- * (size, etc) to be known. This structure contains all the data of one LS
- * firmware.
- */
-struct ls_ucode_img {
- struct list_head node;
- enum nvkm_secboot_falcon falcon_id;
-
- struct ls_ucode_img_desc ucode_desc;
- u8 *ucode_data;
- u32 ucode_size;
- u32 ucode_off;
-
- u8 *sig;
- u32 sig_size;
-};
-
-/**
- * struct fw_bin_header - header of firmware files
- * @bin_magic: always 0x3b1d14f0
- * @bin_ver: version of the bin format
- * @bin_size: entire image size including this header
- * @header_offset: offset of the firmware/bootloader header in the file
- * @data_offset: offset of the firmware/bootloader payload in the file
- * @data_size: size of the payload
- *
- * This header is located at the beginning of the HS firmware and HS bootloader
- * files, to describe where the headers and data can be found.
- */
-struct fw_bin_header {
- u32 bin_magic;
- u32 bin_ver;
- u32 bin_size;
- u32 header_offset;
- u32 data_offset;
- u32 data_size;
-};
-
-/**
- * struct fw_bl_desc - firmware bootloader descriptor
- * @start_tag: starting tag of bootloader
- * @desc_dmem_load_off: DMEM offset of flcn_bl_dmem_desc
- * @code_off: offset of code section
- * @code_size: size of code section
- * @data_off: offset of data section
- * @data_size: size of data section
- *
- * This structure is embedded in bootloader firmware files at to describe the
- * IMEM and DMEM layout expected by the bootloader.
- */
-struct fw_bl_desc {
- u32 start_tag;
- u32 dmem_load_off;
- u32 code_off;
- u32 code_size;
- u32 data_off;
- u32 data_size;
-};
-
-int acr_ls_ucode_load_fecs(const struct nvkm_secboot *, int,
- struct ls_ucode_img *);
-int acr_ls_ucode_load_gpccs(const struct nvkm_secboot *, int,
- struct ls_ucode_img *);
-int acr_ls_ucode_load_pmu(const struct nvkm_secboot *, int,
- struct ls_ucode_img *);
-int acr_ls_pmu_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
-int acr_ls_ucode_load_sec2(const struct nvkm_secboot *, int,
- struct ls_ucode_img *);
-int acr_ls_sec2_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
deleted file mode 100644
index 821d3b2bdb1f..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-
-#include "ls_ucode.h"
-#include "acr.h"
-
-#include <core/firmware.h>
-
-#define BL_DESC_BLK_SIZE 256
-/**
- * Build a ucode image and descriptor from provided bootloader, code and data.
- *
- * @bl: bootloader image, including 16-bytes descriptor
- * @code: LS firmware code segment
- * @data: LS firmware data segment
- * @desc: ucode descriptor to be written
- *
- * Return: allocated ucode image with corresponding descriptor information. desc
- * is also updated to contain the right offsets within returned image.
- */
-static void *
-ls_ucode_img_build(const struct firmware *bl, const struct firmware *code,
- const struct firmware *data, struct ls_ucode_img_desc *desc)
-{
- struct fw_bin_header *bin_hdr = (void *)bl->data;
- struct fw_bl_desc *bl_desc = (void *)bl->data + bin_hdr->header_offset;
- void *bl_data = (void *)bl->data + bin_hdr->data_offset;
- u32 pos = 0;
- void *image;
-
- desc->bootloader_start_offset = pos;
- desc->bootloader_size = ALIGN(bl_desc->code_size, sizeof(u32));
- desc->bootloader_imem_offset = bl_desc->start_tag * 256;
- desc->bootloader_entry_point = bl_desc->start_tag * 256;
-
- pos = ALIGN(pos + desc->bootloader_size, BL_DESC_BLK_SIZE);
- desc->app_start_offset = pos;
- desc->app_size = ALIGN(code->size, BL_DESC_BLK_SIZE) +
- ALIGN(data->size, BL_DESC_BLK_SIZE);
- desc->app_imem_offset = 0;
- desc->app_imem_entry = 0;
- desc->app_dmem_offset = 0;
- desc->app_resident_code_offset = 0;
- desc->app_resident_code_size = ALIGN(code->size, BL_DESC_BLK_SIZE);
-
- pos = ALIGN(pos + desc->app_resident_code_size, BL_DESC_BLK_SIZE);
- desc->app_resident_data_offset = pos - desc->app_start_offset;
- desc->app_resident_data_size = ALIGN(data->size, BL_DESC_BLK_SIZE);
-
- desc->image_size = ALIGN(bl_desc->code_size, BL_DESC_BLK_SIZE) +
- desc->app_size;
-
- image = kzalloc(desc->image_size, GFP_KERNEL);
- if (!image)
- return ERR_PTR(-ENOMEM);
-
- memcpy(image + desc->bootloader_start_offset, bl_data,
- bl_desc->code_size);
- memcpy(image + desc->app_start_offset, code->data, code->size);
- memcpy(image + desc->app_start_offset + desc->app_resident_data_offset,
- data->data, data->size);
-
- return image;
-}
-
-/**
- * ls_ucode_img_load_gr() - load and prepare a LS GR ucode image
- *
- * Load the LS microcode, bootloader and signature and pack them into a single
- * blob. Also generate the corresponding ucode descriptor.
- */
-static int
-ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, int maxver,
- struct ls_ucode_img *img, const char *falcon_name)
-{
- const struct firmware *bl, *code, *data, *sig;
- char f[64];
- int ret;
-
- snprintf(f, sizeof(f), "gr/%s_bl", falcon_name);
- ret = nvkm_firmware_get(subdev, f, &bl);
- if (ret)
- goto error;
-
- snprintf(f, sizeof(f), "gr/%s_inst", falcon_name);
- ret = nvkm_firmware_get(subdev, f, &code);
- if (ret)
- goto free_bl;
-
- snprintf(f, sizeof(f), "gr/%s_data", falcon_name);
- ret = nvkm_firmware_get(subdev, f, &data);
- if (ret)
- goto free_inst;
-
- snprintf(f, sizeof(f), "gr/%s_sig", falcon_name);
- ret = nvkm_firmware_get(subdev, f, &sig);
- if (ret)
- goto free_data;
-
- img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL);
- if (!img->sig) {
- ret = -ENOMEM;
- goto free_sig;
- }
- img->sig_size = sig->size;
-
- img->ucode_data = ls_ucode_img_build(bl, code, data,
- &img->ucode_desc);
- if (IS_ERR(img->ucode_data)) {
- kfree(img->sig);
- ret = PTR_ERR(img->ucode_data);
- goto free_sig;
- }
- img->ucode_size = img->ucode_desc.image_size;
-
-free_sig:
- nvkm_firmware_put(sig);
-free_data:
- nvkm_firmware_put(data);
-free_inst:
- nvkm_firmware_put(code);
-free_bl:
- nvkm_firmware_put(bl);
-error:
- return ret;
-}
-
-int
-acr_ls_ucode_load_fecs(const struct nvkm_secboot *sb, int maxver,
- struct ls_ucode_img *img)
-{
- return ls_ucode_img_load_gr(&sb->subdev, maxver, img, "fecs");
-}
-
-int
-acr_ls_ucode_load_gpccs(const struct nvkm_secboot *sb, int maxver,
- struct ls_ucode_img *img)
-{
- return ls_ucode_img_load_gr(&sb->subdev, maxver, img, "gpccs");
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
deleted file mode 100644
index a84a999445bb..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-
-#include "ls_ucode.h"
-#include "acr.h"
-
-#include <core/firmware.h>
-#include <core/msgqueue.h>
-#include <subdev/pmu.h>
-#include <engine/sec2.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-
-/**
- * acr_ls_ucode_load_msgqueue - load and prepare a ucode img for a msgqueue fw
- *
- * Load the LS microcode, desc and signature and pack them into a single
- * blob.
- */
-static int
-acr_ls_ucode_load_msgqueue(const struct nvkm_subdev *subdev, const char *name,
- int maxver, struct ls_ucode_img *img)
-{
- const struct firmware *image, *desc, *sig;
- char f[64];
- int ver, ret;
-
- snprintf(f, sizeof(f), "%s/image", name);
- ver = nvkm_firmware_get_version(subdev, f, 0, maxver, &image);
- if (ver < 0)
- return ver;
- img->ucode_data = kmemdup(image->data, image->size, GFP_KERNEL);
- nvkm_firmware_put(image);
- if (!img->ucode_data)
- return -ENOMEM;
-
- snprintf(f, sizeof(f), "%s/desc", name);
- ret = nvkm_firmware_get_version(subdev, f, ver, ver, &desc);
- if (ret < 0)
- return ret;
- memcpy(&img->ucode_desc, desc->data, sizeof(img->ucode_desc));
- img->ucode_size = ALIGN(img->ucode_desc.app_start_offset + img->ucode_desc.app_size, 256);
- nvkm_firmware_put(desc);
-
- snprintf(f, sizeof(f), "%s/sig", name);
- ret = nvkm_firmware_get_version(subdev, f, ver, ver, &sig);
- if (ret < 0)
- return ret;
- img->sig_size = sig->size;
- img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL);
- nvkm_firmware_put(sig);
- if (!img->sig)
- return -ENOMEM;
-
- return ver;
-}
-
-static int
-acr_ls_msgqueue_post_run(struct nvkm_msgqueue *queue,
- struct nvkm_falcon *falcon, u32 addr_args)
-{
- struct nvkm_device *device = falcon->owner->device;
- u8 buf[NVKM_MSGQUEUE_CMDLINE_SIZE];
-
- memset(buf, 0, sizeof(buf));
- nvkm_msgqueue_write_cmdline(queue, buf);
- nvkm_falcon_load_dmem(falcon, buf, addr_args, sizeof(buf), 0);
- /* rearm the queue so it will wait for the init message */
- nvkm_msgqueue_reinit(queue);
-
- /* Enable interrupts */
- nvkm_falcon_wr32(falcon, 0x10, 0xff);
- nvkm_mc_intr_mask(device, falcon->owner->index, true);
-
- /* Start LS firmware on boot falcon */
- nvkm_falcon_start(falcon);
-
- return 0;
-}
-
-int
-acr_ls_ucode_load_pmu(const struct nvkm_secboot *sb, int maxver,
- struct ls_ucode_img *img)
-{
- struct nvkm_pmu *pmu = sb->subdev.device->pmu;
- int ret;
-
- ret = acr_ls_ucode_load_msgqueue(&sb->subdev, "pmu", maxver, img);
- if (ret)
- return ret;
-
- /* Allocate the PMU queue corresponding to the FW version */
- ret = nvkm_msgqueue_new(img->ucode_desc.app_version, pmu->falcon,
- sb, &pmu->queue);
- if (ret)
- return ret;
-
- return 0;
-}
-
-int
-acr_ls_pmu_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb)
-{
- struct nvkm_device *device = sb->subdev.device;
- struct nvkm_pmu *pmu = device->pmu;
- u32 addr_args = pmu->falcon->data.limit - NVKM_MSGQUEUE_CMDLINE_SIZE;
- int ret;
-
- ret = acr_ls_msgqueue_post_run(pmu->queue, pmu->falcon, addr_args);
- if (ret)
- return ret;
-
- nvkm_debug(&sb->subdev, "%s started\n",
- nvkm_secboot_falcon_name[acr->boot_falcon]);
-
- return 0;
-}
-
-int
-acr_ls_ucode_load_sec2(const struct nvkm_secboot *sb, int maxver,
- struct ls_ucode_img *img)
-{
- struct nvkm_sec2 *sec = sb->subdev.device->sec2;
- int ver, ret;
-
- ver = acr_ls_ucode_load_msgqueue(&sb->subdev, "sec2", maxver, img);
- if (ver < 0)
- return ver;
-
- /* Allocate the PMU queue corresponding to the FW version */
- ret = nvkm_msgqueue_new(img->ucode_desc.app_version, sec->falcon,
- sb, &sec->queue);
- if (ret)
- return ret;
-
- return ver;
-}
-
-int
-acr_ls_sec2_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb)
-{
- const struct nvkm_subdev *subdev = &sb->subdev;
- struct nvkm_device *device = subdev->device;
- struct nvkm_sec2 *sec = device->sec2;
- /* on SEC arguments are always at the beginning of EMEM */
- const u32 addr_args = 0x01000000;
- int ret;
-
- ret = acr_ls_msgqueue_post_run(sec->queue, sec->falcon, addr_args);
- if (ret)
- return ret;
-
- nvkm_debug(&sb->subdev, "%s started\n",
- nvkm_secboot_falcon_name[acr->boot_falcon]);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
deleted file mode 100644
index 959a7b2dbdc9..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __NVKM_SECBOOT_PRIV_H__
-#define __NVKM_SECBOOT_PRIV_H__
-
-#include <subdev/secboot.h>
-#include <subdev/mmu.h>
-struct nvkm_gpuobj;
-
-struct nvkm_secboot_func {
- int (*oneinit)(struct nvkm_secboot *);
- int (*fini)(struct nvkm_secboot *, bool suspend);
- void *(*dtor)(struct nvkm_secboot *);
- int (*run_blob)(struct nvkm_secboot *, struct nvkm_gpuobj *,
- struct nvkm_falcon *);
-};
-
-int nvkm_secboot_ctor(const struct nvkm_secboot_func *, struct nvkm_acr *,
- struct nvkm_device *, int, struct nvkm_secboot *);
-int nvkm_secboot_falcon_reset(struct nvkm_secboot *);
-int nvkm_secboot_falcon_run(struct nvkm_secboot *);
-
-extern const struct nvkm_secboot_func gp102_secboot;
-
-struct flcn_u64 {
- u32 lo;
- u32 hi;
-};
-
-static inline u64 flcn64_to_u64(const struct flcn_u64 f)
-{
- return ((u64)f.hi) << 32 | f.lo;
-}
-
-static inline struct flcn_u64 u64_to_flcn64(u64 u)
-{
- struct flcn_u64 ret;
-
- ret.hi = upper_32_bits(u);
- ret.lo = lower_32_bits(u);
-
- return ret;
-}
-
-#endif
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 48e3c4165247..b7a618db3ee2 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -78,8 +78,10 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct
static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct panfrost_file_priv *priv = file->driver_priv;
struct panfrost_gem_object *bo;
struct drm_panfrost_create_bo *args = data;
+ struct panfrost_gem_mapping *mapping;
if (!args->size || args->pad ||
(args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
@@ -95,7 +97,14 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
if (IS_ERR(bo))
return PTR_ERR(bo);
- args->offset = bo->node.start << PAGE_SHIFT;
+ mapping = panfrost_gem_mapping_get(bo, priv);
+ if (!mapping) {
+ drm_gem_object_put_unlocked(&bo->base.base);
+ return -EINVAL;
+ }
+
+ args->offset = mapping->mmnode.start << PAGE_SHIFT;
+ panfrost_gem_mapping_put(mapping);
return 0;
}
@@ -119,6 +128,11 @@ panfrost_lookup_bos(struct drm_device *dev,
struct drm_panfrost_submit *args,
struct panfrost_job *job)
{
+ struct panfrost_file_priv *priv = file_priv->driver_priv;
+ struct panfrost_gem_object *bo;
+ unsigned int i;
+ int ret;
+
job->bo_count = args->bo_handle_count;
if (!job->bo_count)
@@ -130,9 +144,33 @@ panfrost_lookup_bos(struct drm_device *dev,
if (!job->implicit_fences)
return -ENOMEM;
- return drm_gem_objects_lookup(file_priv,
- (void __user *)(uintptr_t)args->bo_handles,
- job->bo_count, &job->bos);
+ ret = drm_gem_objects_lookup(file_priv,
+ (void __user *)(uintptr_t)args->bo_handles,
+ job->bo_count, &job->bos);
+ if (ret)
+ return ret;
+
+ job->mappings = kvmalloc_array(job->bo_count,
+ sizeof(struct panfrost_gem_mapping *),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!job->mappings)
+ return -ENOMEM;
+
+ for (i = 0; i < job->bo_count; i++) {
+ struct panfrost_gem_mapping *mapping;
+
+ bo = to_panfrost_bo(job->bos[i]);
+ mapping = panfrost_gem_mapping_get(bo, priv);
+ if (!mapping) {
+ ret = -EINVAL;
+ break;
+ }
+
+ atomic_inc(&bo->gpu_usecount);
+ job->mappings[i] = mapping;
+ }
+
+ return ret;
}
/**
@@ -320,7 +358,9 @@ out:
static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct panfrost_file_priv *priv = file_priv->driver_priv;
struct drm_panfrost_get_bo_offset *args = data;
+ struct panfrost_gem_mapping *mapping;
struct drm_gem_object *gem_obj;
struct panfrost_gem_object *bo;
@@ -331,18 +371,26 @@ static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
}
bo = to_panfrost_bo(gem_obj);
- args->offset = bo->node.start << PAGE_SHIFT;
-
+ mapping = panfrost_gem_mapping_get(bo, priv);
drm_gem_object_put_unlocked(gem_obj);
+
+ if (!mapping)
+ return -EINVAL;
+
+ args->offset = mapping->mmnode.start << PAGE_SHIFT;
+ panfrost_gem_mapping_put(mapping);
return 0;
}
static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct panfrost_file_priv *priv = file_priv->driver_priv;
struct drm_panfrost_madvise *args = data;
struct panfrost_device *pfdev = dev->dev_private;
struct drm_gem_object *gem_obj;
+ struct panfrost_gem_object *bo;
+ int ret = 0;
gem_obj = drm_gem_object_lookup(file_priv, args->handle);
if (!gem_obj) {
@@ -350,22 +398,48 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
return -ENOENT;
}
+ bo = to_panfrost_bo(gem_obj);
+
mutex_lock(&pfdev->shrinker_lock);
+ mutex_lock(&bo->mappings.lock);
+ if (args->madv == PANFROST_MADV_DONTNEED) {
+ struct panfrost_gem_mapping *first;
+
+ first = list_first_entry(&bo->mappings.list,
+ struct panfrost_gem_mapping,
+ node);
+
+ /*
+ * If we want to mark the BO purgeable, there must be only one
+ * user: the caller FD.
+ * We could do something smarter and mark the BO purgeable only
+ * when all its users have marked it purgeable, but globally
+ * visible/shared BOs are likely to never be marked purgeable
+ * anyway, so let's not bother.
+ */
+ if (!list_is_singular(&bo->mappings.list) ||
+ WARN_ON_ONCE(first->mmu != &priv->mmu)) {
+ ret = -EINVAL;
+ goto out_unlock_mappings;
+ }
+ }
+
args->retained = drm_gem_shmem_madvise(gem_obj, args->madv);
if (args->retained) {
- struct panfrost_gem_object *bo = to_panfrost_bo(gem_obj);
-
if (args->madv == PANFROST_MADV_DONTNEED)
list_add_tail(&bo->base.madv_list,
&pfdev->shrinker_list);
else if (args->madv == PANFROST_MADV_WILLNEED)
list_del_init(&bo->base.madv_list);
}
+
+out_unlock_mappings:
+ mutex_unlock(&bo->mappings.lock);
mutex_unlock(&pfdev->shrinker_lock);
drm_gem_object_put_unlocked(gem_obj);
- return 0;
+ return ret;
}
int panfrost_unstable_ioctl_check(void)
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index fd766b1395fb..17b654e1eb94 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -29,6 +29,12 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
list_del_init(&bo->base.madv_list);
mutex_unlock(&pfdev->shrinker_lock);
+ /*
+ * If we still have mappings attached to the BO, there's a problem in
+ * our refcounting.
+ */
+ WARN_ON_ONCE(!list_empty(&bo->mappings.list));
+
if (bo->sgts) {
int i;
int n_sgt = bo->base.base.size / SZ_2M;
@@ -46,6 +52,69 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
drm_gem_shmem_free_object(obj);
}
+struct panfrost_gem_mapping *
+panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
+ struct panfrost_file_priv *priv)
+{
+ struct panfrost_gem_mapping *iter, *mapping = NULL;
+
+ mutex_lock(&bo->mappings.lock);
+ list_for_each_entry(iter, &bo->mappings.list, node) {
+ if (iter->mmu == &priv->mmu) {
+ kref_get(&iter->refcount);
+ mapping = iter;
+ break;
+ }
+ }
+ mutex_unlock(&bo->mappings.lock);
+
+ return mapping;
+}
+
+static void
+panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
+{
+ struct panfrost_file_priv *priv;
+
+ if (mapping->active)
+ panfrost_mmu_unmap(mapping);
+
+ priv = container_of(mapping->mmu, struct panfrost_file_priv, mmu);
+ spin_lock(&priv->mm_lock);
+ if (drm_mm_node_allocated(&mapping->mmnode))
+ drm_mm_remove_node(&mapping->mmnode);
+ spin_unlock(&priv->mm_lock);
+}
+
+static void panfrost_gem_mapping_release(struct kref *kref)
+{
+ struct panfrost_gem_mapping *mapping;
+
+ mapping = container_of(kref, struct panfrost_gem_mapping, refcount);
+
+ panfrost_gem_teardown_mapping(mapping);
+ drm_gem_object_put_unlocked(&mapping->obj->base.base);
+ kfree(mapping);
+}
+
+void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
+{
+ if (!mapping)
+ return;
+
+ kref_put(&mapping->refcount, panfrost_gem_mapping_release);
+}
+
+void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo)
+{
+ struct panfrost_gem_mapping *mapping;
+
+ mutex_lock(&bo->mappings.lock);
+ list_for_each_entry(mapping, &bo->mappings.list, node)
+ panfrost_gem_teardown_mapping(mapping);
+ mutex_unlock(&bo->mappings.lock);
+}
+
int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
{
int ret;
@@ -54,6 +123,16 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
struct panfrost_file_priv *priv = file_priv->driver_priv;
+ struct panfrost_gem_mapping *mapping;
+
+ mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
+ if (!mapping)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&mapping->node);
+ kref_init(&mapping->refcount);
+ drm_gem_object_get(obj);
+ mapping->obj = bo;
/*
* Executable buffers cannot cross a 16MB boundary as the program
@@ -66,37 +145,48 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
else
align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
- bo->mmu = &priv->mmu;
+ mapping->mmu = &priv->mmu;
spin_lock(&priv->mm_lock);
- ret = drm_mm_insert_node_generic(&priv->mm, &bo->node,
+ ret = drm_mm_insert_node_generic(&priv->mm, &mapping->mmnode,
size >> PAGE_SHIFT, align, color, 0);
spin_unlock(&priv->mm_lock);
if (ret)
- return ret;
+ goto err;
if (!bo->is_heap) {
- ret = panfrost_mmu_map(bo);
- if (ret) {
- spin_lock(&priv->mm_lock);
- drm_mm_remove_node(&bo->node);
- spin_unlock(&priv->mm_lock);
- }
+ ret = panfrost_mmu_map(mapping);
+ if (ret)
+ goto err;
}
+
+ mutex_lock(&bo->mappings.lock);
+ WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED);
+ list_add_tail(&mapping->node, &bo->mappings.list);
+ mutex_unlock(&bo->mappings.lock);
+
+err:
+ if (ret)
+ panfrost_gem_mapping_put(mapping);
return ret;
}
void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
{
- struct panfrost_gem_object *bo = to_panfrost_bo(obj);
struct panfrost_file_priv *priv = file_priv->driver_priv;
+ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+ struct panfrost_gem_mapping *mapping = NULL, *iter;
- if (bo->is_mapped)
- panfrost_mmu_unmap(bo);
+ mutex_lock(&bo->mappings.lock);
+ list_for_each_entry(iter, &bo->mappings.list, node) {
+ if (iter->mmu == &priv->mmu) {
+ mapping = iter;
+ list_del(&iter->node);
+ break;
+ }
+ }
+ mutex_unlock(&bo->mappings.lock);
- spin_lock(&priv->mm_lock);
- if (drm_mm_node_allocated(&bo->node))
- drm_mm_remove_node(&bo->node);
- spin_unlock(&priv->mm_lock);
+ panfrost_gem_mapping_put(mapping);
}
static int panfrost_gem_pin(struct drm_gem_object *obj)
@@ -136,6 +226,8 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
if (!obj)
return NULL;
+ INIT_LIST_HEAD(&obj->mappings.list);
+ mutex_init(&obj->mappings.lock);
obj->base.base.funcs = &panfrost_gem_funcs;
return &obj->base.base;
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
index 4b17e7308764..b3517ff9630c 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
@@ -13,23 +13,52 @@ struct panfrost_gem_object {
struct drm_gem_shmem_object base;
struct sg_table *sgts;
- struct panfrost_mmu *mmu;
- struct drm_mm_node node;
- bool is_mapped :1;
+ /*
+ * Use a list for now. If searching a mapping ever becomes the
+ * bottleneck, we should consider using an RB-tree, or even better,
+ * let the core store drm_gem_object_mapping entries (where we
+ * could place driver specific data) instead of drm_gem_object ones
+ * in its drm_file->object_idr table.
+ *
+ * struct drm_gem_object_mapping {
+ * struct drm_gem_object *obj;
+ * void *driver_priv;
+ * };
+ */
+ struct {
+ struct list_head list;
+ struct mutex lock;
+ } mappings;
+
+ /*
+ * Count the number of jobs referencing this BO so we don't let the
+ * shrinker reclaim this object prematurely.
+ */
+ atomic_t gpu_usecount;
+
bool noexec :1;
bool is_heap :1;
};
+struct panfrost_gem_mapping {
+ struct list_head node;
+ struct kref refcount;
+ struct panfrost_gem_object *obj;
+ struct drm_mm_node mmnode;
+ struct panfrost_mmu *mmu;
+ bool active :1;
+};
+
static inline
struct panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj)
{
return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base);
}
-static inline
-struct panfrost_gem_object *drm_mm_node_to_panfrost_bo(struct drm_mm_node *node)
+static inline struct panfrost_gem_mapping *
+drm_mm_node_to_panfrost_mapping(struct drm_mm_node *node)
{
- return container_of(node, struct panfrost_gem_object, node);
+ return container_of(node, struct panfrost_gem_mapping, mmnode);
}
struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size);
@@ -49,6 +78,12 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv);
void panfrost_gem_close(struct drm_gem_object *obj,
struct drm_file *file_priv);
+struct panfrost_gem_mapping *
+panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
+ struct panfrost_file_priv *priv);
+void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping);
+void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo);
+
void panfrost_gem_shrinker_init(struct drm_device *dev);
void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
index 458f0fa68111..288e46c40673 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
@@ -39,11 +39,15 @@ panfrost_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc
static bool panfrost_gem_purge(struct drm_gem_object *obj)
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+
+ if (atomic_read(&bo->gpu_usecount))
+ return false;
if (!mutex_trylock(&shmem->pages_lock))
return false;
- panfrost_mmu_unmap(to_panfrost_bo(obj));
+ panfrost_gem_teardown_mappings(bo);
drm_gem_shmem_purge_locked(obj);
mutex_unlock(&shmem->pages_lock);
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index f68ad9df701a..8fd8726313ae 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -268,9 +268,25 @@ static void panfrost_job_cleanup(struct kref *ref)
dma_fence_put(job->done_fence);
dma_fence_put(job->render_done_fence);
+ if (job->mappings) {
+ for (i = 0; i < job->bo_count; i++) {
+ if (!job->mappings[i])
+ break;
+
+ atomic_dec(&job->mappings[i]->obj->gpu_usecount);
+ panfrost_gem_mapping_put(job->mappings[i]);
+ }
+ kvfree(job->mappings);
+ }
+
if (job->bos) {
- for (i = 0; i < job->bo_count; i++)
+ struct panfrost_gem_object *bo;
+
+ for (i = 0; i < job->bo_count; i++) {
+ bo = to_panfrost_bo(job->bos[i]);
drm_gem_object_put_unlocked(job->bos[i]);
+ }
+
kvfree(job->bos);
}
@@ -542,12 +558,14 @@ int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
{
struct panfrost_device *pfdev = panfrost_priv->pfdev;
struct panfrost_job_slot *js = pfdev->js;
- struct drm_sched_rq *rq;
+ struct drm_gpu_scheduler *sched;
int ret, i;
for (i = 0; i < NUM_JOB_SLOTS; i++) {
- rq = &js->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
- ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i], &rq, 1, NULL);
+ sched = &js->queue[i].sched;
+ ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i],
+ DRM_SCHED_PRIORITY_NORMAL, &sched,
+ 1, NULL);
if (WARN_ON(ret))
return ret;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h
index 62454128a792..bbd3ba97ff67 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.h
+++ b/drivers/gpu/drm/panfrost/panfrost_job.h
@@ -32,6 +32,7 @@ struct panfrost_job {
/* Exclusive fences we have taken from the BOs to wait for */
struct dma_fence **implicit_fences;
+ struct panfrost_gem_mapping **mappings;
struct drm_gem_object **bos;
u32 bo_count;
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index d3694579a56a..23314f41717b 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -269,14 +269,15 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
return 0;
}
-int panfrost_mmu_map(struct panfrost_gem_object *bo)
+int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
{
+ struct panfrost_gem_object *bo = mapping->obj;
struct drm_gem_object *obj = &bo->base.base;
struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
struct sg_table *sgt;
int prot = IOMMU_READ | IOMMU_WRITE;
- if (WARN_ON(bo->is_mapped))
+ if (WARN_ON(mapping->active))
return 0;
if (bo->noexec)
@@ -286,25 +287,28 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo)
if (WARN_ON(IS_ERR(sgt)))
return PTR_ERR(sgt);
- mmu_map_sg(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, prot, sgt);
- bo->is_mapped = true;
+ mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
+ prot, sgt);
+ mapping->active = true;
return 0;
}
-void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
+void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
{
+ struct panfrost_gem_object *bo = mapping->obj;
struct drm_gem_object *obj = &bo->base.base;
struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
- struct io_pgtable_ops *ops = bo->mmu->pgtbl_ops;
- u64 iova = bo->node.start << PAGE_SHIFT;
- size_t len = bo->node.size << PAGE_SHIFT;
+ struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops;
+ u64 iova = mapping->mmnode.start << PAGE_SHIFT;
+ size_t len = mapping->mmnode.size << PAGE_SHIFT;
size_t unmapped_len = 0;
- if (WARN_ON(!bo->is_mapped))
+ if (WARN_ON(!mapping->active))
return;
- dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", bo->mmu->as, iova, len);
+ dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx",
+ mapping->mmu->as, iova, len);
while (unmapped_len < len) {
size_t unmapped_page;
@@ -318,8 +322,9 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
unmapped_len += pgsize;
}
- panfrost_mmu_flush_range(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, len);
- bo->is_mapped = false;
+ panfrost_mmu_flush_range(pfdev, mapping->mmu,
+ mapping->mmnode.start << PAGE_SHIFT, len);
+ mapping->active = false;
}
static void mmu_tlb_inv_context_s1(void *cookie)
@@ -394,10 +399,10 @@ void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv)
free_io_pgtable_ops(mmu->pgtbl_ops);
}
-static struct panfrost_gem_object *
-addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
+static struct panfrost_gem_mapping *
+addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
{
- struct panfrost_gem_object *bo = NULL;
+ struct panfrost_gem_mapping *mapping = NULL;
struct panfrost_file_priv *priv;
struct drm_mm_node *node;
u64 offset = addr >> PAGE_SHIFT;
@@ -418,8 +423,9 @@ found_mmu:
drm_mm_for_each_node(node, &priv->mm) {
if (offset >= node->start &&
offset < (node->start + node->size)) {
- bo = drm_mm_node_to_panfrost_bo(node);
- drm_gem_object_get(&bo->base.base);
+ mapping = drm_mm_node_to_panfrost_mapping(node);
+
+ kref_get(&mapping->refcount);
break;
}
}
@@ -427,7 +433,7 @@ found_mmu:
spin_unlock(&priv->mm_lock);
out:
spin_unlock(&pfdev->as_lock);
- return bo;
+ return mapping;
}
#define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
@@ -436,28 +442,30 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
u64 addr)
{
int ret, i;
+ struct panfrost_gem_mapping *bomapping;
struct panfrost_gem_object *bo;
struct address_space *mapping;
pgoff_t page_offset;
struct sg_table *sgt;
struct page **pages;
- bo = addr_to_drm_mm_node(pfdev, as, addr);
- if (!bo)
+ bomapping = addr_to_mapping(pfdev, as, addr);
+ if (!bomapping)
return -ENOENT;
+ bo = bomapping->obj;
if (!bo->is_heap) {
dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
- bo->node.start << PAGE_SHIFT);
+ bomapping->mmnode.start << PAGE_SHIFT);
ret = -EINVAL;
goto err_bo;
}
- WARN_ON(bo->mmu->as != as);
+ WARN_ON(bomapping->mmu->as != as);
/* Assume 2MB alignment and size multiple */
addr &= ~((u64)SZ_2M - 1);
page_offset = addr >> PAGE_SHIFT;
- page_offset -= bo->node.start;
+ page_offset -= bomapping->mmnode.start;
mutex_lock(&bo->base.pages_lock);
@@ -509,13 +517,14 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
goto err_map;
}
- mmu_map_sg(pfdev, bo->mmu, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
+ mmu_map_sg(pfdev, bomapping->mmu, addr,
+ IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
- bo->is_mapped = true;
+ bomapping->active = true;
dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
- drm_gem_object_put_unlocked(&bo->base.base);
+ panfrost_gem_mapping_put(bomapping);
return 0;
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.h b/drivers/gpu/drm/panfrost/panfrost_mmu.h
index 7c5b6775ae23..44fc2edf63ce 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.h
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.h
@@ -4,12 +4,12 @@
#ifndef __PANFROST_MMU_H__
#define __PANFROST_MMU_H__
-struct panfrost_gem_object;
+struct panfrost_gem_mapping;
struct panfrost_file_priv;
struct panfrost_mmu;
-int panfrost_mmu_map(struct panfrost_gem_object *bo);
-void panfrost_mmu_unmap(struct panfrost_gem_object *bo);
+int panfrost_mmu_map(struct panfrost_gem_mapping *mapping);
+void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping);
int panfrost_mmu_init(struct panfrost_device *pfdev);
void panfrost_mmu_fini(struct panfrost_device *pfdev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
index 2c04e858c50a..684820448be3 100644
--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
@@ -25,7 +25,7 @@
#define V4_SHADERS_PER_COREGROUP 4
struct panfrost_perfcnt {
- struct panfrost_gem_object *bo;
+ struct panfrost_gem_mapping *mapping;
size_t bosize;
void *buf;
struct panfrost_file_priv *user;
@@ -49,7 +49,7 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev)
int ret;
reinit_completion(&pfdev->perfcnt->dump_comp);
- gpuva = pfdev->perfcnt->bo->node.start << PAGE_SHIFT;
+ gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT;
gpu_write(pfdev, GPU_PERFCNT_BASE_LO, gpuva);
gpu_write(pfdev, GPU_PERFCNT_BASE_HI, gpuva >> 32);
gpu_write(pfdev, GPU_INT_CLEAR,
@@ -89,17 +89,22 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
if (IS_ERR(bo))
return PTR_ERR(bo);
- perfcnt->bo = to_panfrost_bo(&bo->base);
-
/* Map the perfcnt buf in the address space attached to file_priv. */
- ret = panfrost_gem_open(&perfcnt->bo->base.base, file_priv);
+ ret = panfrost_gem_open(&bo->base, file_priv);
if (ret)
goto err_put_bo;
+ perfcnt->mapping = panfrost_gem_mapping_get(to_panfrost_bo(&bo->base),
+ user);
+ if (!perfcnt->mapping) {
+ ret = -EINVAL;
+ goto err_close_bo;
+ }
+
perfcnt->buf = drm_gem_shmem_vmap(&bo->base);
if (IS_ERR(perfcnt->buf)) {
ret = PTR_ERR(perfcnt->buf);
- goto err_close_bo;
+ goto err_put_mapping;
}
/*
@@ -154,12 +159,17 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8186))
gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0xffffffff);
+ /* The BO ref is retained by the mapping. */
+ drm_gem_object_put_unlocked(&bo->base);
+
return 0;
err_vunmap:
- drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
+ drm_gem_shmem_vunmap(&bo->base, perfcnt->buf);
+err_put_mapping:
+ panfrost_gem_mapping_put(perfcnt->mapping);
err_close_bo:
- panfrost_gem_close(&perfcnt->bo->base.base, file_priv);
+ panfrost_gem_close(&bo->base, file_priv);
err_put_bo:
drm_gem_object_put_unlocked(&bo->base);
return ret;
@@ -182,11 +192,11 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF));
perfcnt->user = NULL;
- drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
+ drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, perfcnt->buf);
perfcnt->buf = NULL;
- panfrost_gem_close(&perfcnt->bo->base.base, file_priv);
- drm_gem_object_put_unlocked(&perfcnt->bo->base.base);
- perfcnt->bo = NULL;
+ panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
+ panfrost_gem_mapping_put(perfcnt->mapping);
+ perfcnt->mapping = NULL;
pm_runtime_mark_last_busy(pfdev->dev);
pm_runtime_put_autosuspend(pfdev->dev);
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 92ccd7aed0d4..c693b2ca0329 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -5,7 +5,7 @@
ccflags-y := -Idrivers/gpu/drm/amd/include
-hostprogs-y := mkregtable
+hostprogs := mkregtable
clean-files := rn50_reg_safe.h r100_reg_safe.h r200_reg_safe.h rv515_reg_safe.h r300_reg_safe.h r420_reg_safe.h rs600_reg_safe.h r600_reg_safe.h evergreen_reg_safe.h cayman_reg_safe.h
quiet_cmd_mkregtable = MKREGTABLE $@
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 447d74b78f19..91811757104c 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -244,9 +244,8 @@ static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
- if (ASIC_IS_DCE8(rdev)) {
+ if (ASIC_IS_DCE8(rdev))
WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control);
- }
}
static void atombios_powergate_crtc(struct drm_crtc *crtc, int state)
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 911735f8d5de..15b00a347560 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -813,9 +813,8 @@ void radeon_dp_link_train(struct drm_encoder *encoder,
dp_info.use_dpencoder = true;
index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
if (atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) {
- if (crev > 1) {
+ if (crev > 1)
dp_info.use_dpencoder = false;
- }
}
dp_info.enc_id = 0;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 2a7be5d5e7e6..cc5ee1b3af84 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1885,11 +1885,10 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
if (ASIC_IS_AVIVO(rdev))
args.v1.ucCRTC = radeon_crtc->crtc_id;
else {
- if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) {
+ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1)
args.v1.ucCRTC = radeon_crtc->crtc_id;
- } else {
+ else
args.v1.ucCRTC = radeon_crtc->crtc_id << 2;
- }
}
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
@@ -2234,9 +2233,9 @@ assigned:
DRM_ERROR("Got encoder index incorrect - returning 0\n");
return 0;
}
- if (rdev->mode_info.active_encoders & (1 << enc_idx)) {
+ if (rdev->mode_info.active_encoders & (1 << enc_idx))
DRM_ERROR("chosen encoder in use %d\n", enc_idx);
- }
+
rdev->mode_info.active_encoders |= (1 << enc_idx);
return enc_idx;
}
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
index a570ce40af19..ab4d21072191 100644
--- a/drivers/gpu/drm/radeon/atombios_i2c.c
+++ b/drivers/gpu/drm/radeon/atombios_i2c.c
@@ -68,11 +68,6 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
memcpy(&out, &buf[1], num);
args.lpI2CDataOut = cpu_to_le16(out);
} else {
- if (num > ATOM_MAX_HW_I2C_READ) {
- DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num);
- r = -EINVAL;
- goto done;
- }
args.ucRegIndex = 0;
args.lpI2CDataOut = 0;
}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 4fa488cedd55..5c42877fd6fb 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -8137,7 +8137,7 @@ static void cik_uvd_init(struct radeon_device *rdev)
* there. So it is pointless to try to go through that code
* hence why we disable uvd here.
*/
- rdev->has_uvd = 0;
+ rdev->has_uvd = false;
return;
}
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
@@ -8209,7 +8209,7 @@ static void cik_vce_init(struct radeon_device *rdev)
* there. So it is pointless to try to go through that code
* hence why we disable vce here.
*/
- rdev->has_vce = 0;
+ rdev->has_vce = false;
return;
}
rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 35b9dc6ce46a..68403e77756d 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -333,7 +333,7 @@ void cik_sdma_enable(struct radeon_device *rdev, bool enable)
u32 me_cntl, reg_offset;
int i;
- if (enable == false) {
+ if (!enable) {
cik_sdma_gfx_stop(rdev);
cik_sdma_rlc_stop(rdev);
}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 683c79001bbb..14d90dc376e7 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -4945,7 +4945,7 @@ static void evergreen_uvd_init(struct radeon_device *rdev)
* there. So it is pointless to try to go through that code
* hence why we disable uvd here.
*/
- rdev->has_uvd = 0;
+ rdev->has_uvd = false;
return;
}
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index a99442b2019b..02feb0801fd3 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -2017,7 +2017,7 @@ static void cayman_uvd_init(struct radeon_device *rdev)
* there. So it is pointless to try to go through that code
* hence why we disable uvd here.
*/
- rdev->has_uvd = 0;
+ rdev->has_uvd = false;
return;
}
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
@@ -2085,7 +2085,7 @@ static void cayman_vce_init(struct radeon_device *rdev)
* there. So it is pointless to try to go through that code
* hence why we disable vce here.
*/
- rdev->has_vce = 0;
+ rdev->has_vce = false;
return;
}
rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 29c966f3407e..24c8db673931 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1823,9 +1823,9 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_PP_TXFORMAT_2:
i = (reg - RADEON_PP_TXFORMAT_0) / 24;
if (idx_value & RADEON_TXFORMAT_NON_POWER2) {
- track->textures[i].use_pitch = 1;
+ track->textures[i].use_pitch = true;
} else {
- track->textures[i].use_pitch = 0;
+ track->textures[i].use_pitch = false;
track->textures[i].width = 1 << ((idx_value & RADEON_TXFORMAT_WIDTH_MASK) >> RADEON_TXFORMAT_WIDTH_SHIFT);
track->textures[i].height = 1 << ((idx_value & RADEON_TXFORMAT_HEIGHT_MASK) >> RADEON_TXFORMAT_HEIGHT_SHIFT);
}
@@ -2387,12 +2387,12 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
else
track->num_texture = 6;
track->maxy = 2048;
- track->separate_cube = 1;
+ track->separate_cube = true;
} else {
track->num_cb = 4;
track->num_texture = 16;
track->maxy = 4096;
- track->separate_cube = 0;
+ track->separate_cube = false;
track->aaresolve = false;
track->aa.robj = NULL;
}
@@ -2815,7 +2815,7 @@ void r100_vga_set_state(struct radeon_device *rdev, bool state)
uint32_t temp;
temp = RREG32(RADEON_CONFIG_CNTL);
- if (state == false) {
+ if (!state) {
temp &= ~RADEON_CFG_VGA_RAM_EN;
temp |= RADEON_CFG_VGA_IO_DIS;
} else {
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index d2e51a9433f5..d9a33ca768f3 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3053,7 +3053,7 @@ static void r600_uvd_init(struct radeon_device *rdev)
* there. So it is pointless to try to go through that code
* hence why we disable uvd here.
*/
- rdev->has_uvd = 0;
+ rdev->has_uvd = false;
return;
}
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
@@ -3191,7 +3191,7 @@ void r600_vga_set_state(struct radeon_device *rdev, bool state)
uint32_t temp;
temp = RREG32(CONFIG_CNTL);
- if (state == false) {
+ if (!state) {
temp &= ~(1<<0);
temp |= (1<<1);
} else {
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 072e6daedf7a..848ef68d9086 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -570,7 +570,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
path_size += le16_to_cpu(path->usSize);
if (device_support & le16_to_cpu(path->usDeviceTag)) {
- uint8_t con_obj_id, con_obj_num, con_obj_type;
+ uint8_t con_obj_id, con_obj_num;
con_obj_id =
(le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK)
@@ -578,9 +578,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
con_obj_num =
(le16_to_cpu(path->usConnObjectId) & ENUM_ID_MASK)
>> ENUM_ID_SHIFT;
- con_obj_type =
- (le16_to_cpu(path->usConnObjectId) &
- OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
/* TODO CV support */
if (le16_to_cpu(path->usDeviceTag) ==
@@ -648,15 +645,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
router.ddc_valid = false;
router.cd_valid = false;
for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
- uint8_t grph_obj_id, grph_obj_num, grph_obj_type;
-
- grph_obj_id =
- (le16_to_cpu(path->usGraphicObjIds[j]) &
- OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
- grph_obj_num =
- (le16_to_cpu(path->usGraphicObjIds[j]) &
- ENUM_ID_MASK) >> ENUM_ID_SHIFT;
- grph_obj_type =
+ uint8_t grph_obj_type =
(le16_to_cpu(path->usGraphicObjIds[j]) &
OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index c84d965c283e..c42f73fad3e3 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -664,17 +664,17 @@ bool radeon_get_bios(struct radeon_device *rdev)
uint16_t tmp;
r = radeon_atrm_get_bios(rdev);
- if (r == false)
+ if (!r)
r = radeon_acpi_vfct_bios(rdev);
- if (r == false)
+ if (!r)
r = igp_read_bios_from_vram(rdev);
- if (r == false)
+ if (!r)
r = radeon_read_bios(rdev);
- if (r == false)
+ if (!r)
r = radeon_read_disabled_bios(rdev);
- if (r == false)
+ if (!r)
r = radeon_read_platform_bios(rdev);
- if (r == false || rdev->bios == NULL) {
+ if (!r || rdev->bios == NULL) {
DRM_ERROR("Unable to locate a BIOS ROM\n");
rdev->bios = NULL;
return false;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 0851e6817e57..fe12d9d91d7a 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -440,7 +440,7 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
if (radeon_conflict->use_digital)
continue;
- if (priority == true) {
+ if (priority) {
DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n",
conflict->name);
DRM_DEBUG_KMS("in favor of %s\n",
@@ -700,9 +700,9 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
else
ret = radeon_legacy_get_tmds_info_from_combios(radeon_encoder, tmds);
}
- if (val == 1 || ret == false) {
+ if (val == 1 || !ret)
radeon_legacy_get_tmds_info_from_table(radeon_encoder, tmds);
- }
+
radeon_property_change_mode(&radeon_encoder->base);
}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 91dda2125661..35db79a168bf 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -131,6 +131,8 @@ static void dce5_crtc_load_lut(struct drm_crtc *crtc)
DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
+ msleep(10);
+
WREG32(NI_INPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
(NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) |
NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS)));
@@ -680,7 +682,6 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
{
struct radeon_device *rdev = dev->dev_private;
struct radeon_crtc *radeon_crtc;
- int i;
radeon_crtc = kzalloc(sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
if (radeon_crtc == NULL)
@@ -709,12 +710,6 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
radeon_crtc->mode_set.num_connectors = 0;
#endif
- for (i = 0; i < 256; i++) {
- radeon_crtc->lut_r[i] = i << 2;
- radeon_crtc->lut_g[i] = i << 2;
- radeon_crtc->lut_b[i] = i << 2;
- }
-
if (rdev->is_atom_bios && (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom))
radeon_atombios_init_crtc(dev, radeon_crtc);
else
@@ -855,11 +850,11 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
if (rdev->bios) {
if (rdev->is_atom_bios) {
ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
- if (ret == false)
+ if (!ret)
ret = radeon_get_atom_connector_info_from_object_table(dev);
} else {
ret = radeon_get_legacy_connector_info_from_bios(dev);
- if (ret == false)
+ if (!ret)
ret = radeon_get_legacy_connector_info_from_table(dev);
}
} else {
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index ee28f5b3785e..28eef9282874 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -518,7 +518,7 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
mst_enc = radeon_encoder->enc_priv;
- mst_enc->pbn = drm_dp_calc_pbn_mode(adjusted_mode->clock, bpp);
+ mst_enc->pbn = drm_dp_calc_pbn_mode(adjusted_mode->clock, bpp, false);
mst_enc->primary->active_device = mst_enc->primary->devices & mst_enc->connector->devices;
DRM_DEBUG_KMS("setting active device to %08x from %08x %08x for encoder %d\n",
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index a33b19566b2d..44d060f75318 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -1712,7 +1712,7 @@ static struct radeon_encoder_int_tmds *radeon_legacy_get_tmds_info(struct radeon
else
ret = radeon_legacy_get_tmds_info_from_combios(encoder, tmds);
- if (ret == false)
+ if (!ret)
radeon_legacy_get_tmds_info_from_table(encoder, tmds);
return tmds;
@@ -1735,7 +1735,7 @@ static struct radeon_encoder_ext_tmds *radeon_legacy_get_ext_tmds_info(struct ra
ret = radeon_legacy_get_ext_tmds_info_from_combios(encoder, tmds);
- if (ret == false)
+ if (!ret)
radeon_legacy_get_ext_tmds_info_from_table(encoder, tmds);
return tmds;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 3a61530c1398..629567da29f1 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -327,7 +327,6 @@ enum radeon_flip_status {
struct radeon_crtc {
struct drm_crtc base;
int crtc_id;
- u16 lut_r[256], lut_g[256], lut_b[256];
bool enabled;
bool can_tile;
bool cursor_out_of_bounds;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index b37121f2631d..8c5d6fda0d75 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1789,7 +1789,7 @@ static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish
u32 stat_crtc = 0;
bool in_vbl = radeon_pm_in_vbl(rdev);
- if (in_vbl == false)
+ if (!in_vbl)
DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
finish ? "exit" : "entry");
return in_vbl;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 40282bf0adbe..badf1b6d1549 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -438,7 +438,7 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
mem->bus.size);
else
mem->bus.addr =
- ioremap_nocache(mem->bus.base + mem->bus.offset,
+ ioremap(mem->bus.base + mem->bus.offset,
mem->bus.size);
if (!mem->bus.addr)
return -ENOMEM;
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index 59db54ace428..5e8006444704 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -388,9 +388,9 @@ int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
ib.ptr[i] = cpu_to_le32(0x0);
r = radeon_ib_schedule(rdev, &ib, NULL, false);
- if (r) {
+ if (r)
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
- }
+
if (fence)
*fence = radeon_fence_ref(ib.fence);
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index e0ad547786e8..f60fae0aed11 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -296,9 +296,9 @@ struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
struct radeon_bo_va *bo_va;
list_for_each_entry(bo_va, &bo->va, bo_list) {
- if (bo_va->vm == vm) {
+ if (bo_va->vm == vm)
return bo_va;
- }
+
}
return NULL;
}
@@ -323,9 +323,9 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
struct radeon_bo_va *bo_va;
bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
- if (bo_va == NULL) {
+ if (bo_va == NULL)
return NULL;
- }
+
bo_va->vm = vm;
bo_va->bo = bo;
bo_va->it.start = 0;
@@ -947,9 +947,9 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
if (mem) {
addr = (u64)mem->start << PAGE_SHIFT;
- if (mem->mem_type != TTM_PL_SYSTEM) {
+ if (mem->mem_type != TTM_PL_SYSTEM)
bo_va->flags |= RADEON_VM_PAGE_VALID;
- }
+
if (mem->mem_type == TTM_PL_TT) {
bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
if (!(bo_va->bo->flags & (RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC)))
@@ -1233,9 +1233,9 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
struct radeon_bo_va *bo_va, *tmp;
int i, r;
- if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
+ if (!RB_EMPTY_ROOT(&vm->va.rb_root))
dev_err(rdev->dev, "still active bo inside vm\n");
- }
+
rbtree_postorder_for_each_entry_safe(bo_va, tmp,
&vm->va.rb_root, it.rb) {
interval_tree_remove(&bo_va->it, &vm->va);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 3fc461defeeb..21f653ae1e1b 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1703,7 +1703,7 @@ static void rv770_uvd_init(struct radeon_device *rdev)
* there. So it is pointless to try to go through that code
* hence why we disable uvd here.
*/
- rdev->has_uvd = 0;
+ rdev->has_uvd = false;
return;
}
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 8788a0564582..93dcab548a83 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6472,7 +6472,7 @@ static void si_uvd_init(struct radeon_device *rdev)
* there. So it is pointless to try to go through that code
* hence why we disable uvd here.
*/
- rdev->has_uvd = 0;
+ rdev->has_uvd = false;
return;
}
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
@@ -6539,7 +6539,7 @@ static void si_vce_init(struct radeon_device *rdev)
* there. So it is pointless to try to go through that code
* hence why we disable vce here.
*/
- rdev->has_vce = 0;
+ rdev->has_vce = false;
return;
}
rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.h b/drivers/gpu/drm/rockchip/cdn-dp-core.h
index 83c4586665b4..81ac9b658a70 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.h
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.h
@@ -95,7 +95,7 @@ struct cdn_dp_device {
struct cdn_dp_port *port[MAX_PHY];
u8 ports;
u8 max_lanes;
- u8 max_rate;
+ unsigned int max_rate;
u8 lanes;
int active_port;
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 461a7a8129f4..63bccd201b97 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -38,46 +38,40 @@
* submit to HW ring.
*
* @entity: scheduler entity to init
- * @rq_list: the list of run queue on which jobs from this
+ * @priority: priority of the entity
+ * @sched_list: the list of drm scheds on which jobs from this
* entity can be submitted
- * @num_rq_list: number of run queue in rq_list
+ * @num_sched_list: number of drm sched in sched_list
* @guilty: atomic_t set to 1 when a job on this queue
* is found to be guilty causing a timeout
*
- * Note: the rq_list should have atleast one element to schedule
+ * Note: the sched_list should have at least one element to schedule
* the entity
*
* Returns 0 on success or a negative error code on failure.
*/
int drm_sched_entity_init(struct drm_sched_entity *entity,
- struct drm_sched_rq **rq_list,
- unsigned int num_rq_list,
+ enum drm_sched_priority priority,
+ struct drm_gpu_scheduler **sched_list,
+ unsigned int num_sched_list,
atomic_t *guilty)
{
- int i;
-
- if (!(entity && rq_list && (num_rq_list == 0 || rq_list[0])))
+ if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
return -EINVAL;
memset(entity, 0, sizeof(struct drm_sched_entity));
INIT_LIST_HEAD(&entity->list);
entity->rq = NULL;
entity->guilty = guilty;
- entity->num_rq_list = num_rq_list;
- entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *),
- GFP_KERNEL);
- if (!entity->rq_list)
- return -ENOMEM;
-
- init_completion(&entity->entity_idle);
-
- for (i = 0; i < num_rq_list; ++i)
- entity->rq_list[i] = rq_list[i];
+ entity->num_sched_list = num_sched_list;
+ entity->priority = priority;
+ entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
+ entity->last_scheduled = NULL;
- if (num_rq_list)
- entity->rq = rq_list[0];
+ if(num_sched_list)
+ entity->rq = &sched_list[0]->sched_rq[entity->priority];
- entity->last_scheduled = NULL;
+ init_completion(&entity->entity_idle);
spin_lock_init(&entity->rq_lock);
spsc_queue_init(&entity->job_queue);
@@ -136,21 +130,21 @@ static struct drm_sched_rq *
drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
{
struct drm_sched_rq *rq = NULL;
- unsigned int min_jobs = UINT_MAX, num_jobs;
+ unsigned int min_score = UINT_MAX, num_score;
int i;
- for (i = 0; i < entity->num_rq_list; ++i) {
- struct drm_gpu_scheduler *sched = entity->rq_list[i]->sched;
+ for (i = 0; i < entity->num_sched_list; ++i) {
+ struct drm_gpu_scheduler *sched = entity->sched_list[i];
- if (!entity->rq_list[i]->sched->ready) {
+ if (!entity->sched_list[i]->ready) {
DRM_WARN("sched%s is not ready, skipping", sched->name);
continue;
}
- num_jobs = atomic_read(&sched->num_jobs);
- if (num_jobs < min_jobs) {
- min_jobs = num_jobs;
- rq = entity->rq_list[i];
+ num_score = atomic_read(&sched->score);
+ if (num_score < min_score) {
+ min_score = num_score;
+ rq = &entity->sched_list[i]->sched_rq[entity->priority];
}
}
@@ -308,7 +302,6 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
dma_fence_put(entity->last_scheduled);
entity->last_scheduled = NULL;
- kfree(entity->rq_list);
}
EXPORT_SYMBOL(drm_sched_entity_fini);
@@ -354,15 +347,6 @@ static void drm_sched_entity_wakeup(struct dma_fence *f,
}
/**
- * drm_sched_entity_set_rq_priority - helper for drm_sched_entity_set_priority
- */
-static void drm_sched_entity_set_rq_priority(struct drm_sched_rq **rq,
- enum drm_sched_priority priority)
-{
- *rq = &(*rq)->sched->sched_rq[priority];
-}
-
-/**
* drm_sched_entity_set_priority - Sets priority of the entity
*
* @entity: scheduler entity
@@ -373,19 +357,8 @@ static void drm_sched_entity_set_rq_priority(struct drm_sched_rq **rq,
void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
enum drm_sched_priority priority)
{
- unsigned int i;
-
spin_lock(&entity->rq_lock);
-
- for (i = 0; i < entity->num_rq_list; ++i)
- drm_sched_entity_set_rq_priority(&entity->rq_list[i], priority);
-
- if (entity->rq) {
- drm_sched_rq_remove_entity(entity->rq, entity);
- drm_sched_entity_set_rq_priority(&entity->rq, priority);
- drm_sched_rq_add_entity(entity->rq, entity);
- }
-
+ entity->priority = priority;
spin_unlock(&entity->rq_lock);
}
EXPORT_SYMBOL(drm_sched_entity_set_priority);
@@ -490,20 +463,20 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
struct dma_fence *fence;
struct drm_sched_rq *rq;
- if (spsc_queue_count(&entity->job_queue) || entity->num_rq_list <= 1)
+ if (spsc_queue_count(&entity->job_queue) || entity->num_sched_list <= 1)
return;
fence = READ_ONCE(entity->last_scheduled);
if (fence && !dma_fence_is_signaled(fence))
return;
+ spin_lock(&entity->rq_lock);
rq = drm_sched_entity_get_free_sched(entity);
- if (rq == entity->rq)
- return;
+ if (rq != entity->rq) {
+ drm_sched_rq_remove_entity(entity->rq, entity);
+ entity->rq = rq;
+ }
- spin_lock(&entity->rq_lock);
- drm_sched_rq_remove_entity(entity->rq, entity);
- entity->rq = rq;
spin_unlock(&entity->rq_lock);
}
@@ -525,7 +498,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
bool first;
trace_drm_sched_job(sched_job, entity);
- atomic_inc(&entity->rq->sched->num_jobs);
+ atomic_inc(&entity->rq->sched->score);
WRITE_ONCE(entity->last_user, current->group_leader);
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 3fad5876a13f..71ce6215956f 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -92,6 +92,7 @@ void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
if (!list_empty(&entity->list))
return;
spin_lock(&rq->lock);
+ atomic_inc(&rq->sched->score);
list_add_tail(&entity->list, &rq->entities);
spin_unlock(&rq->lock);
}
@@ -110,6 +111,7 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
if (list_empty(&entity->list))
return;
spin_lock(&rq->lock);
+ atomic_dec(&rq->sched->score);
list_del_init(&entity->list);
if (rq->current_entity == entity)
rq->current_entity = NULL;
@@ -655,7 +657,7 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
struct drm_gpu_scheduler *sched = s_fence->sched;
atomic_dec(&sched->hw_rq_count);
- atomic_dec(&sched->num_jobs);
+ atomic_dec(&sched->score);
trace_drm_sched_process_job(s_fence);
@@ -830,7 +832,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
spin_lock_init(&sched->job_list_lock);
atomic_set(&sched->hw_rq_count, 0);
INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
- atomic_set(&sched->num_jobs, 0);
+ atomic_set(&sched->score, 0);
atomic64_set(&sched->job_id_count, 0);
/* Each scheduler will run on a seperate kernel thread */
diff --git a/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c b/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c
index af2b2de65316..bd990d178765 100644
--- a/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c
+++ b/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c
@@ -18,15 +18,19 @@ int igt_dp_mst_calc_pbn_mode(void *ignored)
int rate;
int bpp;
int expected;
+ bool dsc;
} test_params[] = {
- { 154000, 30, 689 },
- { 234000, 30, 1047 },
- { 297000, 24, 1063 },
+ { 154000, 30, 689, false },
+ { 234000, 30, 1047, false },
+ { 297000, 24, 1063, false },
+ { 332880, 24, 50, true },
+ { 324540, 24, 49, true },
};
for (i = 0; i < ARRAY_SIZE(test_params); i++) {
pbn = drm_dp_calc_pbn_mode(test_params[i].rate,
- test_params[i].bpp);
+ test_params[i].bpp,
+ test_params[i].dsc);
FAIL(pbn != test_params[i].expected,
"Expected PBN %d for clock %d bpp %d, got %d\n",
test_params[i].expected, test_params[i].rate,
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index df2ee86cd4c1..b2778ec1cdd7 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -534,7 +534,7 @@ static int sti_dvo_probe(struct platform_device *pdev)
DRM_ERROR("Invalid dvo resource\n");
return -ENOMEM;
}
- dvo->regs = devm_ioremap_nocache(dev, res->start,
+ dvo->regs = devm_ioremap(dev, res->start,
resource_size(res));
if (!dvo->regs)
return -ENOMEM;
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 8f7bf33815fd..2bb32009d117 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -759,14 +759,14 @@ static int sti_hda_probe(struct platform_device *pdev)
DRM_ERROR("Invalid hda resource\n");
return -ENOMEM;
}
- hda->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ hda->regs = devm_ioremap(dev, res->start, resource_size(res));
if (!hda->regs)
return -ENOMEM;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"video-dacs-ctrl");
if (res) {
- hda->video_dacs_ctrl = devm_ioremap_nocache(dev, res->start,
+ hda->video_dacs_ctrl = devm_ioremap(dev, res->start,
resource_size(res));
if (!hda->video_dacs_ctrl)
return -ENOMEM;
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 814560ead4e1..64ed102033c8 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -1393,7 +1393,7 @@ static int sti_hdmi_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto release_adapter;
}
- hdmi->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ hdmi->regs = devm_ioremap(dev, res->start, resource_size(res));
if (!hdmi->regs) {
ret = -ENOMEM;
goto release_adapter;
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index 5767e93dd1cd..c36a8da373cb 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -860,7 +860,7 @@ static int sti_tvout_probe(struct platform_device *pdev)
DRM_ERROR("Invalid glue resource\n");
return -ENOMEM;
}
- tvout->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ tvout->regs = devm_ioremap(dev, res->start, resource_size(res));
if (!tvout->regs)
return -ENOMEM;
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index 0b17ac8a3faa..5e5f82b6a5d9 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -393,7 +393,7 @@ static int vtg_probe(struct platform_device *pdev)
DRM_ERROR("Get memory resource failed\n");
return -ENOMEM;
}
- vtg->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ vtg->regs = devm_ioremap(dev, res->start, resource_size(res));
if (!vtg->regs) {
DRM_ERROR("failed to remap I/O memory\n");
return -ENOMEM;
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 5ae67d526b1d..328272ff77d8 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -85,7 +85,6 @@ static int sun4i_drv_bind(struct device *dev)
}
drm_mode_config_init(drm);
- drm->mode_config.allow_fb_modifiers = true;
ret = component_bind_all(drm->dev, drm);
if (ret) {
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index a7c4654445c7..68d4644ac2dc 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -685,8 +685,6 @@ static void sun4i_hdmi_unbind(struct device *dev, struct device *master,
struct sun4i_hdmi *hdmi = dev_get_drvdata(dev);
cec_unregister_adapter(hdmi->cec_adap);
- drm_connector_cleanup(&hdmi->connector);
- drm_encoder_cleanup(&hdmi->encoder);
i2c_del_adapter(hdmi->i2c);
i2c_put_adapter(hdmi->ddc_i2c);
clk_disable_unprepare(hdmi->mod_clk);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 42651d737c55..c81cdce6ed55 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -489,7 +489,7 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
WARN_ON(!tcon->quirks->has_channel_0);
- tcon->dclk_min_div = 1;
+ tcon->dclk_min_div = tcon->quirks->dclk_min_div;
tcon->dclk_max_div = 127;
sun4i_tcon0_mode_set_common(tcon, mode);
@@ -1426,12 +1426,14 @@ static int sun8i_r40_tcon_tv_set_mux(struct sun4i_tcon *tcon,
static const struct sun4i_tcon_quirks sun4i_a10_quirks = {
.has_channel_0 = true,
.has_channel_1 = true,
+ .dclk_min_div = 4,
.set_mux = sun4i_a10_tcon_set_mux,
};
static const struct sun4i_tcon_quirks sun5i_a13_quirks = {
.has_channel_0 = true,
.has_channel_1 = true,
+ .dclk_min_div = 4,
.set_mux = sun5i_a13_tcon_set_mux,
};
@@ -1440,6 +1442,7 @@ static const struct sun4i_tcon_quirks sun6i_a31_quirks = {
.has_channel_1 = true,
.has_lvds_alt = true,
.needs_de_be_mux = true,
+ .dclk_min_div = 1,
.set_mux = sun6i_tcon_set_mux,
};
@@ -1447,11 +1450,13 @@ static const struct sun4i_tcon_quirks sun6i_a31s_quirks = {
.has_channel_0 = true,
.has_channel_1 = true,
.needs_de_be_mux = true,
+ .dclk_min_div = 1,
};
static const struct sun4i_tcon_quirks sun7i_a20_quirks = {
.has_channel_0 = true,
.has_channel_1 = true,
+ .dclk_min_div = 4,
/* Same display pipeline structure as A10 */
.set_mux = sun4i_a10_tcon_set_mux,
};
@@ -1459,11 +1464,13 @@ static const struct sun4i_tcon_quirks sun7i_a20_quirks = {
static const struct sun4i_tcon_quirks sun8i_a33_quirks = {
.has_channel_0 = true,
.has_lvds_alt = true,
+ .dclk_min_div = 1,
};
static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = {
.supports_lvds = true,
.has_channel_0 = true,
+ .dclk_min_div = 1,
};
static const struct sun4i_tcon_quirks sun8i_a83t_tv_quirks = {
@@ -1477,11 +1484,13 @@ static const struct sun4i_tcon_quirks sun8i_r40_tv_quirks = {
static const struct sun4i_tcon_quirks sun8i_v3s_quirks = {
.has_channel_0 = true,
+ .dclk_min_div = 1,
};
static const struct sun4i_tcon_quirks sun9i_a80_tcon_lcd_quirks = {
- .has_channel_0 = true,
- .needs_edp_reset = true,
+ .has_channel_0 = true,
+ .needs_edp_reset = true,
+ .dclk_min_div = 1,
};
static const struct sun4i_tcon_quirks sun9i_a80_tcon_tv_quirks = {
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index f9f1fe80b206..a62ec826ae71 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -224,6 +224,7 @@ struct sun4i_tcon_quirks {
bool needs_de_be_mux; /* sun6i needs mux to select backend */
bool needs_edp_reset; /* a80 edp reset needed for tcon0 access */
bool supports_lvds; /* Does the TCON support an LVDS output? */
+ u8 dclk_min_div; /* minimum divider for TCON0 DCLK */
/* callback to handle tcon muxing options */
int (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *);
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 714af052fbef..7c70fd31a4c2 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -1727,6 +1727,7 @@ static void tegra_crtc_atomic_disable(struct drm_crtc *crtc,
{
struct tegra_dc *dc = to_tegra_dc(crtc);
u32 value;
+ int err;
if (!tegra_dc_idle(dc)) {
tegra_dc_stop(dc);
@@ -1773,7 +1774,9 @@ static void tegra_crtc_atomic_disable(struct drm_crtc *crtc,
spin_unlock_irq(&crtc->dev->event_lock);
- pm_runtime_put_sync(dc->dev);
+ err = host1x_client_suspend(&dc->client);
+ if (err < 0)
+ dev_err(dc->dev, "failed to suspend: %d\n", err);
}
static void tegra_crtc_atomic_enable(struct drm_crtc *crtc,
@@ -1783,8 +1786,13 @@ static void tegra_crtc_atomic_enable(struct drm_crtc *crtc,
struct tegra_dc_state *state = to_dc_state(crtc->state);
struct tegra_dc *dc = to_tegra_dc(crtc);
u32 value;
+ int err;
- pm_runtime_get_sync(dc->dev);
+ err = host1x_client_resume(&dc->client);
+ if (err < 0) {
+ dev_err(dc->dev, "failed to resume: %d\n", err);
+ return;
+ }
/* initialize display controller */
if (dc->syncpt) {
@@ -1996,7 +2004,7 @@ static bool tegra_dc_has_window_groups(struct tegra_dc *dc)
static int tegra_dc_init(struct host1x_client *client)
{
- struct drm_device *drm = dev_get_drvdata(client->parent);
+ struct drm_device *drm = dev_get_drvdata(client->host);
unsigned long flags = HOST1X_SYNCPT_CLIENT_MANAGED;
struct tegra_dc *dc = host1x_client_to_dc(client);
struct tegra_drm *tegra = drm->dev_private;
@@ -2012,6 +2020,15 @@ static int tegra_dc_init(struct host1x_client *client)
if (!tegra_dc_has_window_groups(dc))
return 0;
+ /*
+ * Set the display hub as the host1x client parent for the display
+ * controller. This is needed for the runtime reference counting that
+ * ensures the display hub is always powered when any of the display
+ * controllers are.
+ */
+ if (dc->soc->has_nvdisplay)
+ client->parent = &tegra->hub->client;
+
dc->syncpt = host1x_syncpt_request(client, flags);
if (!dc->syncpt)
dev_warn(dc->dev, "failed to allocate syncpoint\n");
@@ -2077,9 +2094,9 @@ static int tegra_dc_init(struct host1x_client *client)
/*
* Inherit the DMA parameters (such as maximum segment size) from the
- * parent device.
+ * parent host1x device.
*/
- client->dev->dma_parms = client->parent->dma_parms;
+ client->dev->dma_parms = client->host->dma_parms;
return 0;
@@ -2121,9 +2138,74 @@ static int tegra_dc_exit(struct host1x_client *client)
return 0;
}
+static int tegra_dc_runtime_suspend(struct host1x_client *client)
+{
+ struct tegra_dc *dc = host1x_client_to_dc(client);
+ struct device *dev = client->dev;
+ int err;
+
+ err = reset_control_assert(dc->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ return err;
+ }
+
+ if (dc->soc->has_powergate)
+ tegra_powergate_power_off(dc->powergate);
+
+ clk_disable_unprepare(dc->clk);
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int tegra_dc_runtime_resume(struct host1x_client *client)
+{
+ struct tegra_dc *dc = host1x_client_to_dc(client);
+ struct device *dev = client->dev;
+ int err;
+
+ err = pm_runtime_get_sync(dev);
+ if (err < 0) {
+ dev_err(dev, "failed to get runtime PM: %d\n", err);
+ return err;
+ }
+
+ if (dc->soc->has_powergate) {
+ err = tegra_powergate_sequence_power_up(dc->powergate, dc->clk,
+ dc->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to power partition: %d\n", err);
+ goto put_rpm;
+ }
+ } else {
+ err = clk_prepare_enable(dc->clk);
+ if (err < 0) {
+ dev_err(dev, "failed to enable clock: %d\n", err);
+ goto put_rpm;
+ }
+
+ err = reset_control_deassert(dc->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to deassert reset: %d\n", err);
+ goto disable_clk;
+ }
+ }
+
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(dc->clk);
+put_rpm:
+ pm_runtime_put_sync(dev);
+ return err;
+}
+
static const struct host1x_client_ops dc_client_ops = {
.init = tegra_dc_init,
.exit = tegra_dc_exit,
+ .suspend = tegra_dc_runtime_suspend,
+ .resume = tegra_dc_runtime_resume,
};
static const struct tegra_dc_soc_info tegra20_dc_soc_info = {
@@ -2535,65 +2617,10 @@ static int tegra_dc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int tegra_dc_suspend(struct device *dev)
-{
- struct tegra_dc *dc = dev_get_drvdata(dev);
- int err;
-
- err = reset_control_assert(dc->rst);
- if (err < 0) {
- dev_err(dev, "failed to assert reset: %d\n", err);
- return err;
- }
-
- if (dc->soc->has_powergate)
- tegra_powergate_power_off(dc->powergate);
-
- clk_disable_unprepare(dc->clk);
-
- return 0;
-}
-
-static int tegra_dc_resume(struct device *dev)
-{
- struct tegra_dc *dc = dev_get_drvdata(dev);
- int err;
-
- if (dc->soc->has_powergate) {
- err = tegra_powergate_sequence_power_up(dc->powergate, dc->clk,
- dc->rst);
- if (err < 0) {
- dev_err(dev, "failed to power partition: %d\n", err);
- return err;
- }
- } else {
- err = clk_prepare_enable(dc->clk);
- if (err < 0) {
- dev_err(dev, "failed to enable clock: %d\n", err);
- return err;
- }
-
- err = reset_control_deassert(dc->rst);
- if (err < 0) {
- dev_err(dev, "failed to deassert reset: %d\n", err);
- return err;
- }
- }
-
- return 0;
-}
-#endif
-
-static const struct dev_pm_ops tegra_dc_pm_ops = {
- SET_RUNTIME_PM_OPS(tegra_dc_suspend, tegra_dc_resume, NULL)
-};
-
struct platform_driver tegra_dc_driver = {
.driver = {
.name = "tegra-dc",
.of_match_table = tegra_dc_of_match,
- .pm = &tegra_dc_pm_ops,
},
.probe = tegra_dc_probe,
.remove = tegra_dc_remove,
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 622cdf1ad246..7dfb50f65067 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -588,7 +588,7 @@ static int tegra_dpaux_remove(struct platform_device *pdev)
/* make sure pads are powered down when not in use */
tegra_dpaux_pad_power_down(dpaux);
- pm_runtime_put(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
drm_dp_aux_unregister(&dpaux->aux);
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index f455ce71e85d..bd268028fb3d 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -905,7 +905,7 @@ int tegra_drm_unregister_client(struct tegra_drm *tegra,
int host1x_client_iommu_attach(struct host1x_client *client)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(client->dev);
- struct drm_device *drm = dev_get_drvdata(client->parent);
+ struct drm_device *drm = dev_get_drvdata(client->host);
struct tegra_drm *tegra = drm->dev_private;
struct iommu_group *group = NULL;
int err;
@@ -941,7 +941,7 @@ int host1x_client_iommu_attach(struct host1x_client *client)
void host1x_client_iommu_detach(struct host1x_client *client)
{
- struct drm_device *drm = dev_get_drvdata(client->parent);
+ struct drm_device *drm = dev_get_drvdata(client->host);
struct tegra_drm *tegra = drm->dev_private;
struct iommu_domain *domain;
@@ -1037,23 +1037,9 @@ void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
free_pages((unsigned long)virt, get_order(size));
}
-static int host1x_drm_probe(struct host1x_device *dev)
+static bool host1x_drm_wants_iommu(struct host1x_device *dev)
{
- struct drm_driver *driver = &tegra_drm_driver;
struct iommu_domain *domain;
- struct tegra_drm *tegra;
- struct drm_device *drm;
- int err;
-
- drm = drm_dev_alloc(driver, &dev->dev);
- if (IS_ERR(drm))
- return PTR_ERR(drm);
-
- tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
- if (!tegra) {
- err = -ENOMEM;
- goto put;
- }
/*
* If the Tegra DRM clients are backed by an IOMMU, push buffers are
@@ -1082,9 +1068,38 @@ static int host1x_drm_probe(struct host1x_device *dev)
* up the device tree appropriately. This is considered an problem
* of integration, so care must be taken for the DT to be consistent.
*/
- domain = iommu_get_domain_for_dev(drm->dev->parent);
+ domain = iommu_get_domain_for_dev(dev->dev.parent);
+
+ /*
+ * Tegra20 and Tegra30 don't support addressing memory beyond the
+ * 32-bit boundary, so the regular GATHER opcodes will always be
+ * sufficient and whether or not the host1x is attached to an IOMMU
+ * doesn't matter.
+ */
+ if (!domain && dma_get_mask(dev->dev.parent) <= DMA_BIT_MASK(32))
+ return true;
+
+ return domain != NULL;
+}
+
+static int host1x_drm_probe(struct host1x_device *dev)
+{
+ struct drm_driver *driver = &tegra_drm_driver;
+ struct tegra_drm *tegra;
+ struct drm_device *drm;
+ int err;
+
+ drm = drm_dev_alloc(driver, &dev->dev);
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
+
+ tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
+ if (!tegra) {
+ err = -ENOMEM;
+ goto put;
+ }
- if (domain && iommu_present(&platform_bus_type)) {
+ if (host1x_drm_wants_iommu(dev) && iommu_present(&platform_bus_type)) {
tegra->domain = iommu_domain_alloc(&platform_bus_type);
if (!tegra->domain) {
err = -ENOMEM;
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index d941553f7a3d..ed99b67deb29 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -144,6 +144,8 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
void tegra_output_exit(struct tegra_output *output);
void tegra_output_find_possible_crtcs(struct tegra_output *output,
struct drm_device *drm);
+int tegra_output_suspend(struct tegra_output *output);
+int tegra_output_resume(struct tegra_output *output);
int tegra_output_connector_get_modes(struct drm_connector *connector);
enum drm_connector_status
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index a5d47e301c5f..88b9d64c77bf 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -840,7 +840,9 @@ static void tegra_dsi_unprepare(struct tegra_dsi *dsi)
dev_err(dsi->dev, "failed to disable MIPI calibration: %d\n",
err);
- pm_runtime_put(dsi->dev);
+ err = host1x_client_suspend(&dsi->client);
+ if (err < 0)
+ dev_err(dsi->dev, "failed to suspend: %d\n", err);
}
static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
@@ -882,11 +884,15 @@ static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
tegra_dsi_unprepare(dsi);
}
-static void tegra_dsi_prepare(struct tegra_dsi *dsi)
+static int tegra_dsi_prepare(struct tegra_dsi *dsi)
{
int err;
- pm_runtime_get_sync(dsi->dev);
+ err = host1x_client_resume(&dsi->client);
+ if (err < 0) {
+ dev_err(dsi->dev, "failed to resume: %d\n", err);
+ return err;
+ }
err = tegra_mipi_enable(dsi->mipi);
if (err < 0)
@@ -899,6 +905,8 @@ static void tegra_dsi_prepare(struct tegra_dsi *dsi)
if (dsi->slave)
tegra_dsi_prepare(dsi->slave);
+
+ return 0;
}
static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
@@ -909,8 +917,13 @@ static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
struct tegra_dsi *dsi = to_dsi(output);
struct tegra_dsi_state *state;
u32 value;
+ int err;
- tegra_dsi_prepare(dsi);
+ err = tegra_dsi_prepare(dsi);
+ if (err < 0) {
+ dev_err(dsi->dev, "failed to prepare: %d\n", err);
+ return;
+ }
state = tegra_dsi_get_state(dsi);
@@ -1030,7 +1043,7 @@ static const struct drm_encoder_helper_funcs tegra_dsi_encoder_helper_funcs = {
static int tegra_dsi_init(struct host1x_client *client)
{
- struct drm_device *drm = dev_get_drvdata(client->parent);
+ struct drm_device *drm = dev_get_drvdata(client->host);
struct tegra_dsi *dsi = host1x_client_to_dsi(client);
int err;
@@ -1075,9 +1088,89 @@ static int tegra_dsi_exit(struct host1x_client *client)
return 0;
}
+static int tegra_dsi_runtime_suspend(struct host1x_client *client)
+{
+ struct tegra_dsi *dsi = host1x_client_to_dsi(client);
+ struct device *dev = client->dev;
+ int err;
+
+ if (dsi->rst) {
+ err = reset_control_assert(dsi->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ return err;
+ }
+ }
+
+ usleep_range(1000, 2000);
+
+ clk_disable_unprepare(dsi->clk_lp);
+ clk_disable_unprepare(dsi->clk);
+
+ regulator_disable(dsi->vdd);
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int tegra_dsi_runtime_resume(struct host1x_client *client)
+{
+ struct tegra_dsi *dsi = host1x_client_to_dsi(client);
+ struct device *dev = client->dev;
+ int err;
+
+ err = pm_runtime_get_sync(dev);
+ if (err < 0) {
+ dev_err(dev, "failed to get runtime PM: %d\n", err);
+ return err;
+ }
+
+ err = regulator_enable(dsi->vdd);
+ if (err < 0) {
+ dev_err(dev, "failed to enable VDD supply: %d\n", err);
+ goto put_rpm;
+ }
+
+ err = clk_prepare_enable(dsi->clk);
+ if (err < 0) {
+ dev_err(dev, "cannot enable DSI clock: %d\n", err);
+ goto disable_vdd;
+ }
+
+ err = clk_prepare_enable(dsi->clk_lp);
+ if (err < 0) {
+ dev_err(dev, "cannot enable low-power clock: %d\n", err);
+ goto disable_clk;
+ }
+
+ usleep_range(1000, 2000);
+
+ if (dsi->rst) {
+ err = reset_control_deassert(dsi->rst);
+ if (err < 0) {
+ dev_err(dev, "cannot assert reset: %d\n", err);
+ goto disable_clk_lp;
+ }
+ }
+
+ return 0;
+
+disable_clk_lp:
+ clk_disable_unprepare(dsi->clk_lp);
+disable_clk:
+ clk_disable_unprepare(dsi->clk);
+disable_vdd:
+ regulator_disable(dsi->vdd);
+put_rpm:
+ pm_runtime_put_sync(dev);
+ return err;
+}
+
static const struct host1x_client_ops dsi_client_ops = {
.init = tegra_dsi_init,
.exit = tegra_dsi_exit,
+ .suspend = tegra_dsi_runtime_suspend,
+ .resume = tegra_dsi_runtime_resume,
};
static int tegra_dsi_setup_clocks(struct tegra_dsi *dsi)
@@ -1596,79 +1689,6 @@ static int tegra_dsi_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int tegra_dsi_suspend(struct device *dev)
-{
- struct tegra_dsi *dsi = dev_get_drvdata(dev);
- int err;
-
- if (dsi->rst) {
- err = reset_control_assert(dsi->rst);
- if (err < 0) {
- dev_err(dev, "failed to assert reset: %d\n", err);
- return err;
- }
- }
-
- usleep_range(1000, 2000);
-
- clk_disable_unprepare(dsi->clk_lp);
- clk_disable_unprepare(dsi->clk);
-
- regulator_disable(dsi->vdd);
-
- return 0;
-}
-
-static int tegra_dsi_resume(struct device *dev)
-{
- struct tegra_dsi *dsi = dev_get_drvdata(dev);
- int err;
-
- err = regulator_enable(dsi->vdd);
- if (err < 0) {
- dev_err(dsi->dev, "failed to enable VDD supply: %d\n", err);
- return err;
- }
-
- err = clk_prepare_enable(dsi->clk);
- if (err < 0) {
- dev_err(dev, "cannot enable DSI clock: %d\n", err);
- goto disable_vdd;
- }
-
- err = clk_prepare_enable(dsi->clk_lp);
- if (err < 0) {
- dev_err(dev, "cannot enable low-power clock: %d\n", err);
- goto disable_clk;
- }
-
- usleep_range(1000, 2000);
-
- if (dsi->rst) {
- err = reset_control_deassert(dsi->rst);
- if (err < 0) {
- dev_err(dev, "cannot assert reset: %d\n", err);
- goto disable_clk_lp;
- }
- }
-
- return 0;
-
-disable_clk_lp:
- clk_disable_unprepare(dsi->clk_lp);
-disable_clk:
- clk_disable_unprepare(dsi->clk);
-disable_vdd:
- regulator_disable(dsi->vdd);
- return err;
-}
-#endif
-
-static const struct dev_pm_ops tegra_dsi_pm_ops = {
- SET_RUNTIME_PM_OPS(tegra_dsi_suspend, tegra_dsi_resume, NULL)
-};
-
static const struct of_device_id tegra_dsi_of_match[] = {
{ .compatible = "nvidia,tegra210-dsi", },
{ .compatible = "nvidia,tegra132-dsi", },
@@ -1682,7 +1702,6 @@ struct platform_driver tegra_dsi_driver = {
.driver = {
.name = "tegra-dsi",
.of_match_table = tegra_dsi_of_match,
- .pm = &tegra_dsi_pm_ops,
},
.probe = tegra_dsi_probe,
.remove = tegra_dsi_remove,
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 1237df157e05..623768100c6a 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -60,8 +60,16 @@ static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
/*
* If we've manually mapped the buffer object through the IOMMU, make
* sure to return the IOVA address of our mapping.
+ *
+ * Similarly, for buffers that have been allocated by the DMA API the
+ * physical address can be used for devices that are not attached to
+ * an IOMMU. For these devices, callers must pass a valid pointer via
+ * the @phys argument.
+ *
+ * Imported buffers were also already mapped at import time, so the
+ * existing mapping can be reused.
*/
- if (phys && obj->mm) {
+ if (phys) {
*phys = obj->iova;
return NULL;
}
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
index 1fc4e56c7cc5..48363f744bb9 100644
--- a/drivers/gpu/drm/tegra/gr2d.c
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -34,7 +34,7 @@ static inline struct gr2d *to_gr2d(struct tegra_drm_client *client)
static int gr2d_init(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
- struct drm_device *dev = dev_get_drvdata(client->parent);
+ struct drm_device *dev = dev_get_drvdata(client->host);
unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
struct gr2d *gr2d = to_gr2d(drm);
int err;
@@ -76,7 +76,7 @@ put:
static int gr2d_exit(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
- struct drm_device *dev = dev_get_drvdata(client->parent);
+ struct drm_device *dev = dev_get_drvdata(client->host);
struct tegra_drm *tegra = dev->dev_private;
struct gr2d *gr2d = to_gr2d(drm);
int err;
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
index 24fae0f64032..c0a528be0369 100644
--- a/drivers/gpu/drm/tegra/gr3d.c
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -43,7 +43,7 @@ static inline struct gr3d *to_gr3d(struct tegra_drm_client *client)
static int gr3d_init(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
- struct drm_device *dev = dev_get_drvdata(client->parent);
+ struct drm_device *dev = dev_get_drvdata(client->host);
unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
struct gr3d *gr3d = to_gr3d(drm);
int err;
@@ -85,7 +85,7 @@ put:
static int gr3d_exit(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
- struct drm_device *dev = dev_get_drvdata(client->parent);
+ struct drm_device *dev = dev_get_drvdata(client->host);
struct gr3d *gr3d = to_gr3d(drm);
int err;
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 21a629adcb51..6f117628f257 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -1146,6 +1146,7 @@ static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
struct tegra_hdmi *hdmi = to_hdmi(output);
u32 value;
+ int err;
/*
* The following accesses registers of the display controller, so make
@@ -1171,7 +1172,9 @@ static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_ENABLE);
tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_MASK);
- pm_runtime_put(hdmi->dev);
+ err = host1x_client_suspend(&hdmi->client);
+ if (err < 0)
+ dev_err(hdmi->dev, "failed to suspend: %d\n", err);
}
static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
@@ -1186,7 +1189,11 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
u32 value;
int err;
- pm_runtime_get_sync(hdmi->dev);
+ err = host1x_client_resume(&hdmi->client);
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to resume: %d\n", err);
+ return;
+ }
/*
* Enable and unmask the HDA codec SCRATCH0 register interrupt. This
@@ -1424,8 +1431,8 @@ static const struct drm_encoder_helper_funcs tegra_hdmi_encoder_helper_funcs = {
static int tegra_hdmi_init(struct host1x_client *client)
{
- struct drm_device *drm = dev_get_drvdata(client->parent);
struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
+ struct drm_device *drm = dev_get_drvdata(client->host);
int err;
hdmi->output.dev = client->dev;
@@ -1490,9 +1497,66 @@ static int tegra_hdmi_exit(struct host1x_client *client)
return 0;
}
+static int tegra_hdmi_runtime_suspend(struct host1x_client *client)
+{
+ struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
+ struct device *dev = client->dev;
+ int err;
+
+ err = reset_control_assert(hdmi->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ return err;
+ }
+
+ usleep_range(1000, 2000);
+
+ clk_disable_unprepare(hdmi->clk);
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int tegra_hdmi_runtime_resume(struct host1x_client *client)
+{
+ struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
+ struct device *dev = client->dev;
+ int err;
+
+ err = pm_runtime_get_sync(dev);
+ if (err < 0) {
+ dev_err(dev, "failed to get runtime PM: %d\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(hdmi->clk);
+ if (err < 0) {
+ dev_err(dev, "failed to enable clock: %d\n", err);
+ goto put_rpm;
+ }
+
+ usleep_range(1000, 2000);
+
+ err = reset_control_deassert(hdmi->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to deassert reset: %d\n", err);
+ goto disable_clk;
+ }
+
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(hdmi->clk);
+put_rpm:
+ pm_runtime_put_sync(dev);
+ return err;
+}
+
static const struct host1x_client_ops hdmi_client_ops = {
.init = tegra_hdmi_init,
.exit = tegra_hdmi_exit,
+ .suspend = tegra_hdmi_runtime_suspend,
+ .resume = tegra_hdmi_runtime_resume,
};
static const struct tegra_hdmi_config tegra20_hdmi_config = {
@@ -1700,58 +1764,10 @@ static int tegra_hdmi_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int tegra_hdmi_suspend(struct device *dev)
-{
- struct tegra_hdmi *hdmi = dev_get_drvdata(dev);
- int err;
-
- err = reset_control_assert(hdmi->rst);
- if (err < 0) {
- dev_err(dev, "failed to assert reset: %d\n", err);
- return err;
- }
-
- usleep_range(1000, 2000);
-
- clk_disable_unprepare(hdmi->clk);
-
- return 0;
-}
-
-static int tegra_hdmi_resume(struct device *dev)
-{
- struct tegra_hdmi *hdmi = dev_get_drvdata(dev);
- int err;
-
- err = clk_prepare_enable(hdmi->clk);
- if (err < 0) {
- dev_err(dev, "failed to enable clock: %d\n", err);
- return err;
- }
-
- usleep_range(1000, 2000);
-
- err = reset_control_deassert(hdmi->rst);
- if (err < 0) {
- dev_err(dev, "failed to deassert reset: %d\n", err);
- clk_disable_unprepare(hdmi->clk);
- return err;
- }
-
- return 0;
-}
-#endif
-
-static const struct dev_pm_ops tegra_hdmi_pm_ops = {
- SET_RUNTIME_PM_OPS(tegra_hdmi_suspend, tegra_hdmi_resume, NULL)
-};
-
struct platform_driver tegra_hdmi_driver = {
.driver = {
.name = "tegra-hdmi",
.of_match_table = tegra_hdmi_of_match,
- .pm = &tegra_hdmi_pm_ops,
},
.probe = tegra_hdmi_probe,
.remove = tegra_hdmi_remove,
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index 47d985ac7cd7..8183e617bf6b 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -95,17 +95,25 @@ static inline void tegra_plane_writel(struct tegra_plane *plane, u32 value,
static int tegra_windowgroup_enable(struct tegra_windowgroup *wgrp)
{
+ int err = 0;
+
mutex_lock(&wgrp->lock);
if (wgrp->usecount == 0) {
- pm_runtime_get_sync(wgrp->parent);
+ err = host1x_client_resume(wgrp->parent);
+ if (err < 0) {
+ dev_err(wgrp->parent->dev, "failed to resume: %d\n", err);
+ goto unlock;
+ }
+
reset_control_deassert(wgrp->rst);
}
wgrp->usecount++;
- mutex_unlock(&wgrp->lock);
- return 0;
+unlock:
+ mutex_unlock(&wgrp->lock);
+ return err;
}
static void tegra_windowgroup_disable(struct tegra_windowgroup *wgrp)
@@ -121,7 +129,7 @@ static void tegra_windowgroup_disable(struct tegra_windowgroup *wgrp)
wgrp->index);
}
- pm_runtime_put(wgrp->parent);
+ host1x_client_suspend(wgrp->parent);
}
wgrp->usecount--;
@@ -379,6 +387,7 @@ static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
struct tegra_plane *p = to_tegra_plane(plane);
struct tegra_dc *dc;
u32 value;
+ int err;
/* rien ne va plus */
if (!old_state || !old_state->crtc)
@@ -386,6 +395,12 @@ static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
dc = to_tegra_dc(old_state->crtc);
+ err = host1x_client_resume(&dc->client);
+ if (err < 0) {
+ dev_err(dc->dev, "failed to resume: %d\n", err);
+ return;
+ }
+
/*
* XXX Legacy helpers seem to sometimes call ->atomic_disable() even
* on planes that are already disabled. Make sure we fallback to the
@@ -394,15 +409,13 @@ static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
if (WARN_ON(p->dc == NULL))
p->dc = dc;
- pm_runtime_get_sync(dc->dev);
-
value = tegra_plane_readl(p, DC_WIN_WIN_OPTIONS);
value &= ~WIN_ENABLE;
tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
tegra_dc_remove_shared_plane(dc, p);
- pm_runtime_put(dc->dev);
+ host1x_client_suspend(&dc->client);
}
static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
@@ -415,6 +428,7 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
struct tegra_plane *p = to_tegra_plane(plane);
dma_addr_t base;
u32 value;
+ int err;
/* rien ne va plus */
if (!plane->state->crtc || !plane->state->fb)
@@ -425,7 +439,11 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
return;
}
- pm_runtime_get_sync(dc->dev);
+ err = host1x_client_resume(&dc->client);
+ if (err < 0) {
+ dev_err(dc->dev, "failed to resume: %d\n", err);
+ return;
+ }
tegra_dc_assign_shared_plane(dc, p);
@@ -515,7 +533,7 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
value &= ~CONTROL_CSC_ENABLE;
tegra_plane_writel(p, value, DC_WIN_WINDOW_SET_CONTROL);
- pm_runtime_put(dc->dev);
+ host1x_client_suspend(&dc->client);
}
static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
@@ -551,7 +569,7 @@ struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
plane->base.index = index;
plane->wgrp = &hub->wgrps[wgrp];
- plane->wgrp->parent = dc->dev;
+ plane->wgrp->parent = &dc->client;
p = &plane->base.base;
@@ -656,8 +674,13 @@ int tegra_display_hub_atomic_check(struct drm_device *drm,
static void tegra_display_hub_update(struct tegra_dc *dc)
{
u32 value;
+ int err;
- pm_runtime_get_sync(dc->dev);
+ err = host1x_client_resume(&dc->client);
+ if (err < 0) {
+ dev_err(dc->dev, "failed to resume: %d\n", err);
+ return;
+ }
value = tegra_dc_readl(dc, DC_CMD_IHUB_COMMON_MISC_CTL);
value &= ~LATENCY_EVENT;
@@ -672,7 +695,7 @@ static void tegra_display_hub_update(struct tegra_dc *dc)
tegra_dc_writel(dc, COMMON_ACTREQ, DC_CMD_STATE_CONTROL);
tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
- pm_runtime_put(dc->dev);
+ host1x_client_suspend(&dc->client);
}
void tegra_display_hub_atomic_commit(struct drm_device *drm,
@@ -705,7 +728,7 @@ void tegra_display_hub_atomic_commit(struct drm_device *drm,
static int tegra_display_hub_init(struct host1x_client *client)
{
struct tegra_display_hub *hub = to_tegra_display_hub(client);
- struct drm_device *drm = dev_get_drvdata(client->parent);
+ struct drm_device *drm = dev_get_drvdata(client->host);
struct tegra_drm *tegra = drm->dev_private;
struct tegra_display_hub_state *state;
@@ -723,7 +746,7 @@ static int tegra_display_hub_init(struct host1x_client *client)
static int tegra_display_hub_exit(struct host1x_client *client)
{
- struct drm_device *drm = dev_get_drvdata(client->parent);
+ struct drm_device *drm = dev_get_drvdata(client->host);
struct tegra_drm *tegra = drm->dev_private;
drm_atomic_private_obj_fini(&tegra->hub->base);
@@ -732,9 +755,85 @@ static int tegra_display_hub_exit(struct host1x_client *client)
return 0;
}
+static int tegra_display_hub_runtime_suspend(struct host1x_client *client)
+{
+ struct tegra_display_hub *hub = to_tegra_display_hub(client);
+ struct device *dev = client->dev;
+ unsigned int i = hub->num_heads;
+ int err;
+
+ err = reset_control_assert(hub->rst);
+ if (err < 0)
+ return err;
+
+ while (i--)
+ clk_disable_unprepare(hub->clk_heads[i]);
+
+ clk_disable_unprepare(hub->clk_hub);
+ clk_disable_unprepare(hub->clk_dsc);
+ clk_disable_unprepare(hub->clk_disp);
+
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int tegra_display_hub_runtime_resume(struct host1x_client *client)
+{
+ struct tegra_display_hub *hub = to_tegra_display_hub(client);
+ struct device *dev = client->dev;
+ unsigned int i;
+ int err;
+
+ err = pm_runtime_get_sync(dev);
+ if (err < 0) {
+ dev_err(dev, "failed to get runtime PM: %d\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(hub->clk_disp);
+ if (err < 0)
+ goto put_rpm;
+
+ err = clk_prepare_enable(hub->clk_dsc);
+ if (err < 0)
+ goto disable_disp;
+
+ err = clk_prepare_enable(hub->clk_hub);
+ if (err < 0)
+ goto disable_dsc;
+
+ for (i = 0; i < hub->num_heads; i++) {
+ err = clk_prepare_enable(hub->clk_heads[i]);
+ if (err < 0)
+ goto disable_heads;
+ }
+
+ err = reset_control_deassert(hub->rst);
+ if (err < 0)
+ goto disable_heads;
+
+ return 0;
+
+disable_heads:
+ while (i--)
+ clk_disable_unprepare(hub->clk_heads[i]);
+
+ clk_disable_unprepare(hub->clk_hub);
+disable_dsc:
+ clk_disable_unprepare(hub->clk_dsc);
+disable_disp:
+ clk_disable_unprepare(hub->clk_disp);
+put_rpm:
+ pm_runtime_put_sync(dev);
+ return err;
+}
+
static const struct host1x_client_ops tegra_display_hub_ops = {
.init = tegra_display_hub_init,
.exit = tegra_display_hub_exit,
+ .suspend = tegra_display_hub_runtime_suspend,
+ .resume = tegra_display_hub_runtime_resume,
};
static int tegra_display_hub_probe(struct platform_device *pdev)
@@ -851,6 +950,7 @@ static int tegra_display_hub_probe(struct platform_device *pdev)
static int tegra_display_hub_remove(struct platform_device *pdev)
{
struct tegra_display_hub *hub = platform_get_drvdata(pdev);
+ unsigned int i;
int err;
err = host1x_client_unregister(&hub->client);
@@ -859,78 +959,17 @@ static int tegra_display_hub_remove(struct platform_device *pdev)
err);
}
- pm_runtime_disable(&pdev->dev);
-
- return err;
-}
-
-static int __maybe_unused tegra_display_hub_suspend(struct device *dev)
-{
- struct tegra_display_hub *hub = dev_get_drvdata(dev);
- unsigned int i = hub->num_heads;
- int err;
-
- err = reset_control_assert(hub->rst);
- if (err < 0)
- return err;
-
- while (i--)
- clk_disable_unprepare(hub->clk_heads[i]);
-
- clk_disable_unprepare(hub->clk_hub);
- clk_disable_unprepare(hub->clk_dsc);
- clk_disable_unprepare(hub->clk_disp);
-
- return 0;
-}
-
-static int __maybe_unused tegra_display_hub_resume(struct device *dev)
-{
- struct tegra_display_hub *hub = dev_get_drvdata(dev);
- unsigned int i;
- int err;
-
- err = clk_prepare_enable(hub->clk_disp);
- if (err < 0)
- return err;
-
- err = clk_prepare_enable(hub->clk_dsc);
- if (err < 0)
- goto disable_disp;
-
- err = clk_prepare_enable(hub->clk_hub);
- if (err < 0)
- goto disable_dsc;
+ for (i = 0; i < hub->soc->num_wgrps; i++) {
+ struct tegra_windowgroup *wgrp = &hub->wgrps[i];
- for (i = 0; i < hub->num_heads; i++) {
- err = clk_prepare_enable(hub->clk_heads[i]);
- if (err < 0)
- goto disable_heads;
+ mutex_destroy(&wgrp->lock);
}
- err = reset_control_deassert(hub->rst);
- if (err < 0)
- goto disable_heads;
-
- return 0;
-
-disable_heads:
- while (i--)
- clk_disable_unprepare(hub->clk_heads[i]);
+ pm_runtime_disable(&pdev->dev);
- clk_disable_unprepare(hub->clk_hub);
-disable_dsc:
- clk_disable_unprepare(hub->clk_dsc);
-disable_disp:
- clk_disable_unprepare(hub->clk_disp);
return err;
}
-static const struct dev_pm_ops tegra_display_hub_pm_ops = {
- SET_RUNTIME_PM_OPS(tegra_display_hub_suspend,
- tegra_display_hub_resume, NULL)
-};
-
static const struct tegra_display_hub_soc tegra186_display_hub = {
.num_wgrps = 6,
.supports_dsc = true,
@@ -958,7 +997,6 @@ struct platform_driver tegra_display_hub_driver = {
.driver = {
.name = "tegra-display-hub",
.of_match_table = tegra_display_hub_of_match,
- .pm = &tegra_display_hub_pm_ops,
},
.probe = tegra_display_hub_probe,
.remove = tegra_display_hub_remove,
diff --git a/drivers/gpu/drm/tegra/hub.h b/drivers/gpu/drm/tegra/hub.h
index 767a60d9313c..3efa1be07ff8 100644
--- a/drivers/gpu/drm/tegra/hub.h
+++ b/drivers/gpu/drm/tegra/hub.h
@@ -17,7 +17,7 @@ struct tegra_windowgroup {
struct mutex lock;
unsigned int index;
- struct device *parent;
+ struct host1x_client *parent;
struct reset_control *rst;
};
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index 80ddde4adbae..a264259b97a2 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -250,3 +250,19 @@ void tegra_output_find_possible_crtcs(struct tegra_output *output,
output->encoder.possible_crtcs = mask;
}
+
+int tegra_output_suspend(struct tegra_output *output)
+{
+ if (output->hpd_irq)
+ disable_irq(output->hpd_irq);
+
+ return 0;
+}
+
+int tegra_output_resume(struct tegra_output *output)
+{
+ if (output->hpd_irq)
+ enable_irq(output->hpd_irq);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c
index cadcdd9ea427..9ccfb56e9b01 100644
--- a/drivers/gpu/drm/tegra/plane.c
+++ b/drivers/gpu/drm/tegra/plane.c
@@ -3,6 +3,8 @@
* Copyright (C) 2017 NVIDIA CORPORATION. All rights reserved.
*/
+#include <linux/iommu.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
@@ -107,21 +109,27 @@ const struct drm_plane_funcs tegra_plane_funcs = {
static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(dc->dev);
unsigned int i;
int err;
for (i = 0; i < state->base.fb->format->num_planes; i++) {
struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
+ dma_addr_t phys_addr, *phys;
+ struct sg_table *sgt;
- if (!dc->client.group) {
- struct sg_table *sgt;
+ if (!domain || dc->client.group)
+ phys = &phys_addr;
+ else
+ phys = NULL;
- sgt = host1x_bo_pin(dc->dev, &bo->base, NULL);
- if (IS_ERR(sgt)) {
- err = PTR_ERR(sgt);
- goto unpin;
- }
+ sgt = host1x_bo_pin(dc->dev, &bo->base, phys);
+ if (IS_ERR(sgt)) {
+ err = PTR_ERR(sgt);
+ goto unpin;
+ }
+ if (sgt) {
err = dma_map_sg(dc->dev, sgt->sgl, sgt->nents,
DMA_TO_DEVICE);
if (err == 0) {
@@ -143,7 +151,7 @@ static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
state->iova[i] = sg_dma_address(sgt->sgl);
state->sgt[i] = sgt;
} else {
- state->iova[i] = bo->iova;
+ state->iova[i] = phys_addr;
}
}
@@ -156,9 +164,11 @@ unpin:
struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
struct sg_table *sgt = state->sgt[i];
- dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
- host1x_bo_unpin(dc->dev, &bo->base, sgt);
+ if (sgt)
+ dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents,
+ DMA_TO_DEVICE);
+ host1x_bo_unpin(dc->dev, &bo->base, sgt);
state->iova[i] = DMA_MAPPING_ERROR;
state->sgt[i] = NULL;
}
@@ -172,17 +182,13 @@ static void tegra_dc_unpin(struct tegra_dc *dc, struct tegra_plane_state *state)
for (i = 0; i < state->base.fb->format->num_planes; i++) {
struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
+ struct sg_table *sgt = state->sgt[i];
- if (!dc->client.group) {
- struct sg_table *sgt = state->sgt[i];
-
- if (sgt) {
- dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents,
- DMA_TO_DEVICE);
- host1x_bo_unpin(dc->dev, &bo->base, sgt);
- }
- }
+ if (sgt)
+ dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents,
+ DMA_TO_DEVICE);
+ host1x_bo_unpin(dc->dev, &bo->base, sgt);
state->iova[i] = DMA_MAPPING_ERROR;
state->sgt[i] = NULL;
}
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 1b8087d2dafe..81226a4953c1 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -2255,7 +2255,7 @@ static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
if (err < 0)
dev_err(sor->dev, "failed to power off I/O pad: %d\n", err);
- pm_runtime_put(sor->dev);
+ host1x_client_suspend(&sor->client);
}
static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
@@ -2276,7 +2276,11 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
mode = &encoder->crtc->state->adjusted_mode;
pclk = mode->clock * 1000;
- pm_runtime_get_sync(sor->dev);
+ err = host1x_client_resume(&sor->client);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to resume: %d\n", err);
+ return;
+ }
/* switch to safe parent clock */
err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
@@ -2722,7 +2726,7 @@ static void tegra_sor_dp_disable(struct drm_encoder *encoder)
if (output->panel)
drm_panel_unprepare(output->panel);
- pm_runtime_put(sor->dev);
+ host1x_client_suspend(&sor->client);
}
static void tegra_sor_dp_enable(struct drm_encoder *encoder)
@@ -2742,7 +2746,11 @@ static void tegra_sor_dp_enable(struct drm_encoder *encoder)
mode = &encoder->crtc->state->adjusted_mode;
info = &output->connector.display_info;
- pm_runtime_get_sync(sor->dev);
+ err = host1x_client_resume(&sor->client);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to resume: %d\n", err);
+ return;
+ }
/* switch to safe parent clock */
err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
@@ -3053,7 +3061,7 @@ static const struct tegra_sor_ops tegra_sor_dp_ops = {
static int tegra_sor_init(struct host1x_client *client)
{
- struct drm_device *drm = dev_get_drvdata(client->parent);
+ struct drm_device *drm = dev_get_drvdata(client->host);
const struct drm_encoder_helper_funcs *helpers = NULL;
struct tegra_sor *sor = host1x_client_to_sor(client);
int connector = DRM_MODE_CONNECTOR_Unknown;
@@ -3190,9 +3198,80 @@ static int tegra_sor_exit(struct host1x_client *client)
return 0;
}
+static int tegra_sor_runtime_suspend(struct host1x_client *client)
+{
+ struct tegra_sor *sor = host1x_client_to_sor(client);
+ struct device *dev = client->dev;
+ int err;
+
+ if (sor->rst) {
+ err = reset_control_assert(sor->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ return err;
+ }
+
+ reset_control_release(sor->rst);
+ }
+
+ usleep_range(1000, 2000);
+
+ clk_disable_unprepare(sor->clk);
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int tegra_sor_runtime_resume(struct host1x_client *client)
+{
+ struct tegra_sor *sor = host1x_client_to_sor(client);
+ struct device *dev = client->dev;
+ int err;
+
+ err = pm_runtime_get_sync(dev);
+ if (err < 0) {
+ dev_err(dev, "failed to get runtime PM: %d\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(sor->clk);
+ if (err < 0) {
+ dev_err(dev, "failed to enable clock: %d\n", err);
+ goto put_rpm;
+ }
+
+ usleep_range(1000, 2000);
+
+ if (sor->rst) {
+ err = reset_control_acquire(sor->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to acquire reset: %d\n", err);
+ goto disable_clk;
+ }
+
+ err = reset_control_deassert(sor->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to deassert reset: %d\n", err);
+ goto release_reset;
+ }
+ }
+
+ return 0;
+
+release_reset:
+ reset_control_release(sor->rst);
+disable_clk:
+ clk_disable_unprepare(sor->clk);
+put_rpm:
+ pm_runtime_put_sync(dev);
+ return err;
+}
+
static const struct host1x_client_ops sor_client_ops = {
.init = tegra_sor_init,
.exit = tegra_sor_exit,
+ .suspend = tegra_sor_runtime_suspend,
+ .resume = tegra_sor_runtime_resume,
};
static const u8 tegra124_sor_xbar_cfg[5] = {
@@ -3836,6 +3915,17 @@ static int tegra_sor_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sor);
pm_runtime_enable(&pdev->dev);
+ INIT_LIST_HEAD(&sor->client.list);
+ sor->client.ops = &sor_client_ops;
+ sor->client.dev = &pdev->dev;
+
+ err = host1x_client_register(&sor->client);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+ err);
+ goto rpm_disable;
+ }
+
/*
* On Tegra210 and earlier, provide our own implementation for the
* pad output clock.
@@ -3843,43 +3933,36 @@ static int tegra_sor_probe(struct platform_device *pdev)
if (!sor->clk_pad) {
char *name;
- err = pm_runtime_get_sync(&pdev->dev);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to get runtime PM: %d\n",
- err);
- goto remove;
- }
-
- name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "sor%u_pad_clkout", sor->index);
+ name = devm_kasprintf(sor->dev, GFP_KERNEL, "sor%u_pad_clkout",
+ sor->index);
if (!name) {
err = -ENOMEM;
- goto remove;
+ goto unregister;
+ }
+
+ err = host1x_client_resume(&sor->client);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to resume: %d\n", err);
+ goto unregister;
}
sor->clk_pad = tegra_clk_sor_pad_register(sor, name);
- pm_runtime_put(&pdev->dev);
+ host1x_client_suspend(&sor->client);
}
if (IS_ERR(sor->clk_pad)) {
err = PTR_ERR(sor->clk_pad);
- dev_err(&pdev->dev, "failed to register SOR pad clock: %d\n",
- err);
- goto remove;
- }
-
- INIT_LIST_HEAD(&sor->client.list);
- sor->client.ops = &sor_client_ops;
- sor->client.dev = &pdev->dev;
-
- err = host1x_client_register(&sor->client);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+ dev_err(sor->dev, "failed to register SOR pad clock: %d\n",
err);
- goto remove;
+ goto unregister;
}
return 0;
+unregister:
+ host1x_client_unregister(&sor->client);
+rpm_disable:
+ pm_runtime_disable(&pdev->dev);
remove:
if (sor->ops && sor->ops->remove)
sor->ops->remove(sor);
@@ -3893,8 +3976,6 @@ static int tegra_sor_remove(struct platform_device *pdev)
struct tegra_sor *sor = platform_get_drvdata(pdev);
int err;
- pm_runtime_disable(&pdev->dev);
-
err = host1x_client_unregister(&sor->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
@@ -3902,6 +3983,8 @@ static int tegra_sor_remove(struct platform_device *pdev)
return err;
}
+ pm_runtime_disable(&pdev->dev);
+
if (sor->ops && sor->ops->remove) {
err = sor->ops->remove(sor);
if (err < 0)
@@ -3913,54 +3996,21 @@ static int tegra_sor_remove(struct platform_device *pdev)
return 0;
}
-static int tegra_sor_runtime_suspend(struct device *dev)
-{
- struct tegra_sor *sor = dev_get_drvdata(dev);
- int err;
-
- if (sor->rst) {
- err = reset_control_assert(sor->rst);
- if (err < 0) {
- dev_err(dev, "failed to assert reset: %d\n", err);
- return err;
- }
-
- reset_control_release(sor->rst);
- }
-
- usleep_range(1000, 2000);
-
- clk_disable_unprepare(sor->clk);
-
- return 0;
-}
-
-static int tegra_sor_runtime_resume(struct device *dev)
+static int __maybe_unused tegra_sor_suspend(struct device *dev)
{
struct tegra_sor *sor = dev_get_drvdata(dev);
int err;
- err = clk_prepare_enable(sor->clk);
+ err = tegra_output_suspend(&sor->output);
if (err < 0) {
- dev_err(dev, "failed to enable clock: %d\n", err);
+ dev_err(dev, "failed to suspend output: %d\n", err);
return err;
}
- usleep_range(1000, 2000);
-
- if (sor->rst) {
- err = reset_control_acquire(sor->rst);
- if (err < 0) {
- dev_err(dev, "failed to acquire reset: %d\n", err);
- clk_disable_unprepare(sor->clk);
- return err;
- }
-
- err = reset_control_deassert(sor->rst);
+ if (sor->hdmi_supply) {
+ err = regulator_disable(sor->hdmi_supply);
if (err < 0) {
- dev_err(dev, "failed to deassert reset: %d\n", err);
- reset_control_release(sor->rst);
- clk_disable_unprepare(sor->clk);
+ tegra_output_resume(&sor->output);
return err;
}
}
@@ -3968,37 +4018,31 @@ static int tegra_sor_runtime_resume(struct device *dev)
return 0;
}
-static int tegra_sor_suspend(struct device *dev)
+static int __maybe_unused tegra_sor_resume(struct device *dev)
{
struct tegra_sor *sor = dev_get_drvdata(dev);
int err;
if (sor->hdmi_supply) {
- err = regulator_disable(sor->hdmi_supply);
+ err = regulator_enable(sor->hdmi_supply);
if (err < 0)
return err;
}
- return 0;
-}
+ err = tegra_output_resume(&sor->output);
+ if (err < 0) {
+ dev_err(dev, "failed to resume output: %d\n", err);
-static int tegra_sor_resume(struct device *dev)
-{
- struct tegra_sor *sor = dev_get_drvdata(dev);
- int err;
+ if (sor->hdmi_supply)
+ regulator_disable(sor->hdmi_supply);
- if (sor->hdmi_supply) {
- err = regulator_enable(sor->hdmi_supply);
- if (err < 0)
- return err;
+ return err;
}
return 0;
}
static const struct dev_pm_ops tegra_sor_pm_ops = {
- SET_RUNTIME_PM_OPS(tegra_sor_runtime_suspend, tegra_sor_runtime_resume,
- NULL)
SET_SYSTEM_SLEEP_PM_OPS(tegra_sor_suspend, tegra_sor_resume)
};
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index 3526c2892ddb..ade56b860cf9 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -161,7 +161,7 @@ static int vic_boot(struct vic *vic)
static int vic_init(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
- struct drm_device *dev = dev_get_drvdata(client->parent);
+ struct drm_device *dev = dev_get_drvdata(client->host);
struct tegra_drm *tegra = dev->dev_private;
struct vic *vic = to_vic(drm);
int err;
@@ -190,9 +190,9 @@ static int vic_init(struct host1x_client *client)
/*
* Inherit the DMA parameters (such as maximum segment size) from the
- * parent device.
+ * parent host1x device.
*/
- client->dev->dma_parms = client->parent->dma_parms;
+ client->dev->dma_parms = client->host->dma_parms;
return 0;
@@ -209,7 +209,7 @@ detach:
static int vic_exit(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
- struct drm_device *dev = dev_get_drvdata(client->parent);
+ struct drm_device *dev = dev_get_drvdata(client->host);
struct tegra_drm *tegra = dev->dev_private;
struct vic *vic = to_vic(drm);
int err;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index c18a28df6e2c..0791a0200cc3 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -249,7 +249,7 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
goto init_failed;
}
- priv->mmio = ioremap_nocache(res->start, resource_size(res));
+ priv->mmio = ioremap(res->start, resource_size(res));
if (!priv->mmio) {
dev_err(dev, "failed to ioremap\n");
ret = -ENOMEM;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index c8e359ded1df..f1f670642c97 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -218,7 +218,7 @@ static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *m
if (mem->placement & TTM_PL_FLAG_WC)
addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
else
- addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
+ addr = ioremap(mem->bus.base + mem->bus.offset, mem->bus.size);
if (!addr) {
(void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bdev, mem);
@@ -563,7 +563,7 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
size);
else
- map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
+ map->virtual = ioremap(bo->mem.bus.base + bo->mem.bus.offset + offset,
size);
}
return (!map->virtual) ? -ENOMEM : 0;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index eebb4c06c04d..389128b8c4dd 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -179,7 +179,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
pgoff_t num_prefault)
{
struct vm_area_struct *vma = vmf->vma;
- struct vm_area_struct cvma = *vma;
struct ttm_buffer_object *bo = vma->vm_private_data;
struct ttm_bo_device *bdev = bo->bdev;
unsigned long page_offset;
@@ -250,7 +249,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
goto out_io_unlock;
}
- cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, prot);
+ prot = ttm_io_prot(bo->mem.placement, prot);
if (!bo->mem.bus.is_iomem) {
struct ttm_operation_ctx ctx = {
.interruptible = false,
@@ -266,7 +265,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
}
} else {
/* Iomem should not be marked encrypted */
- cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
+ prot = pgprot_decrypted(prot);
}
/*
@@ -289,11 +288,20 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
pfn = page_to_pfn(page);
}
+ /*
+ * Note that the value of @prot at this point may differ from
+ * the value of @vma->vm_page_prot in the caching- and
+ * encryption bits. This is because the exact location of the
+ * data may not be known at mmap() time and may also change
+ * at arbitrary times while the data is mmap'ed.
+ * See vmf_insert_mixed_prot() for a discussion.
+ */
if (vma->vm_flags & VM_MIXEDMAP)
- ret = vmf_insert_mixed(&cvma, address,
- __pfn_to_pfn_t(pfn, PFN_DEV));
+ ret = vmf_insert_mixed_prot(vma, address,
+ __pfn_to_pfn_t(pfn, PFN_DEV),
+ prot);
else
- ret = vmf_insert_pfn(&cvma, address, pfn);
+ ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
/* Never error on prefaulted PTEs */
if (unlikely((ret & VM_FAULT_ERROR))) {
@@ -325,7 +333,7 @@ vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
if (ret)
return ret;
- prot = vm_get_page_prot(vma->vm_flags);
+ prot = vma->vm_page_prot;
ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
return ret;
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 1a07462b4528..eaa8e9682373 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -140,7 +140,7 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
{
struct v3d_dev *v3d = to_v3d_dev(dev);
struct v3d_file_priv *v3d_priv;
- struct drm_sched_rq *rq;
+ struct drm_gpu_scheduler *sched;
int i;
v3d_priv = kzalloc(sizeof(*v3d_priv), GFP_KERNEL);
@@ -150,8 +150,10 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
v3d_priv->v3d = v3d;
for (i = 0; i < V3D_MAX_QUEUES; i++) {
- rq = &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
- drm_sched_entity_init(&v3d_priv->sched_entity[i], &rq, 1, NULL);
+ sched = &v3d->queue[i].sched;
+ drm_sched_entity_init(&v3d_priv->sched_entity[i],
+ DRM_SCHED_PRIORITY_NORMAL, &sched,
+ 1, NULL);
}
file->driver_priv = v3d_priv;
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 5bd60ded3d81..909eba43664a 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -196,9 +196,10 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
return ERR_CAST(obj);
ret = drm_gem_handle_create(file, &obj->base, handle);
- drm_gem_object_put_unlocked(&obj->base);
- if (ret)
+ if (ret) {
+ drm_gem_object_put_unlocked(&obj->base);
return ERR_PTR(ret);
+ }
return &obj->base;
}
@@ -221,7 +222,9 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
args->size = gem_object->size;
args->pitch = pitch;
- DRM_DEBUG("Created object of size %lld\n", size);
+ drm_gem_object_put_unlocked(gem_object);
+
+ DRM_DEBUG("Created object of size %llu\n", args->size);
return 0;
}
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index d13a3897506e..551fa31629af 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -188,8 +188,8 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
kfree(vsg->desc_pages);
/* fall through */
case dr_via_pages_locked:
- put_user_pages_dirty_lock(vsg->pages, vsg->num_pages,
- (vsg->direction == DMA_FROM_DEVICE));
+ unpin_user_pages_dirty_lock(vsg->pages, vsg->num_pages,
+ (vsg->direction == DMA_FROM_DEVICE));
/* fall through */
case dr_via_pages_alloc:
vfree(vsg->pages);
@@ -239,7 +239,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
vsg->pages = vzalloc(array_size(sizeof(struct page *), vsg->num_pages));
if (NULL == vsg->pages)
return -ENOMEM;
- ret = get_user_pages_fast((unsigned long)xfer->mem_addr,
+ ret = pin_user_pages_fast((unsigned long)xfer->mem_addr,
vsg->num_pages,
vsg->direction == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
vsg->pages);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 4ac55fc2bf97..44d858ce4ce7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -209,8 +209,10 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
cres->hash.key = user_key | (res_type << 24);
ret = drm_ht_insert_item(&man->resources, &cres->hash);
- if (unlikely(ret != 0))
+ if (unlikely(ret != 0)) {
+ kfree(cres);
goto out_invalid_key;
+ }
cres->state = VMW_CMDBUF_RES_ADD;
cres->res = vmw_resource_reference(res);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 673d6920fc29..cf3dc56d7cf4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -28,10 +28,10 @@
#include <linux/console.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
+#include <linux/pci.h>
#include <drm/drm_drv.h>
#include <drm/drm_ioctl.h>
-#include <drm/drm_pci.h>
#include <drm/drm_sysfs.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_module.h>
@@ -150,6 +150,9 @@
#define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT, \
union drm_vmw_gb_surface_reference_ext_arg)
+#define DRM_IOCTL_VMW_MSG \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MSG, \
+ struct drm_vmw_msg_arg)
/**
* The core DRM version of this macro doesn't account for
@@ -165,9 +168,9 @@
static const struct drm_ioctl_desc vmw_ioctls[] = {
VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
@@ -182,16 +185,16 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
DRM_MASTER),
VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
- VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, DRM_AUTH |
+ DRM_RENDER_ALLOW),
+ VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
DRM_RENDER_ALLOW),
@@ -201,9 +204,9 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
/* these allow direct access to the framebuffers mark as master only */
VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
@@ -221,28 +224,31 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_CREATE_SHADER,
vmw_shader_define_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_SHADER,
vmw_shader_destroy_ioctl,
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
vmw_gb_surface_define_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
vmw_gb_surface_reference_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_SYNCCPU,
vmw_user_bo_synccpu_ioctl,
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
vmw_extended_context_define_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE_EXT,
vmw_gb_surface_define_ext_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_GB_SURFACE_REF_EXT,
vmw_gb_surface_reference_ext_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
+ VMW_IOCTL_DEF(VMW_MSG,
+ vmw_msg_ioctl,
+ DRM_RENDER_ALLOW),
};
static const struct pci_device_id vmw_pci_id_list[] = {
@@ -1211,8 +1217,10 @@ static void vmw_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
+ drm_dev_unregister(dev);
+ vmw_driver_unload(dev);
+ drm_dev_put(dev);
pci_disable_device(pdev);
- drm_put_dev(dev);
}
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
@@ -1391,8 +1399,6 @@ static const struct file_operations vmwgfx_driver_fops = {
static struct drm_driver driver = {
.driver_features =
DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC,
- .load = vmw_driver_load,
- .unload = vmw_driver_unload,
.ioctls = vmw_ioctls,
.num_ioctls = ARRAY_SIZE(vmw_ioctls),
.master_set = vmw_master_set,
@@ -1428,7 +1434,39 @@ static struct pci_driver vmw_pci_driver = {
static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- return drm_get_pci_dev(pdev, ent, &driver);
+ struct drm_device *dev;
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ dev = drm_dev_alloc(&driver, &pdev->dev);
+ if (IS_ERR(dev)) {
+ ret = PTR_ERR(dev);
+ goto err_pci_disable_device;
+ }
+
+ dev->pdev = pdev;
+ pci_set_drvdata(pdev, dev);
+
+ ret = vmw_driver_load(dev, ent->driver_data);
+ if (ret)
+ goto err_drm_dev_put;
+
+ ret = drm_dev_register(dev, ent->driver_data);
+ if (ret)
+ goto err_vmw_driver_unload;
+
+ return 0;
+
+err_vmw_driver_unload:
+ vmw_driver_unload(dev);
+err_drm_dev_put:
+ drm_dev_put(dev);
+err_pci_disable_device:
+ pci_disable_device(pdev);
+ return ret;
}
static int __init vmwgfx_init(void)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 845b3b8c29ca..4ce39c94c0eb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -56,9 +56,9 @@
#define VMWGFX_DRIVER_NAME "vmwgfx"
-#define VMWGFX_DRIVER_DATE "20190328"
+#define VMWGFX_DRIVER_DATE "20200114"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 16
+#define VMWGFX_DRIVER_MINOR 17
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
@@ -1403,6 +1403,8 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
int vmw_host_get_guestinfo(const char *guest_info_param,
char *buffer, size_t *length);
int vmw_host_log(const char *log);
+int vmw_msg_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
/* VMW logging */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 934ad7c0c342..73489a45decb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2377,9 +2377,12 @@ static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
container_of(header, typeof(*cmd), header);
+ struct vmw_resource *ret;
- return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_rt,
- cmd->body.renderTargetViewId));
+ ret = vmw_view_id_val_add(sw_context, vmw_view_rt,
+ cmd->body.renderTargetViewId);
+
+ return PTR_ERR_OR_ZERO(ret);
}
/**
@@ -2396,9 +2399,12 @@ static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
container_of(header, typeof(*cmd), header);
+ struct vmw_resource *ret;
+
+ ret = vmw_view_id_val_add(sw_context, vmw_view_ds,
+ cmd->body.depthStencilViewId);
- return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_ds,
- cmd->body.depthStencilViewId));
+ return PTR_ERR_OR_ZERO(ret);
}
static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
@@ -2741,9 +2747,12 @@ static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
container_of(header, typeof(*cmd), header);
+ struct vmw_resource *ret;
+
+ ret = vmw_view_id_val_add(sw_context, vmw_view_sr,
+ cmd->body.shaderResourceViewId);
- return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_sr,
- cmd->body.shaderResourceViewId));
+ return PTR_ERR_OR_ZERO(ret);
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index b6c5e4c2ac3c..e9f448a5ebb3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -28,6 +28,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/mem_encrypt.h>
#include <asm/hypervisor.h>
@@ -56,6 +57,8 @@
#define HIGH_WORD(X) ((X & 0xFFFF0000) >> 16)
+#define MAX_USER_MSG_LENGTH PAGE_SIZE
+
static u32 vmw_msg_enabled = 1;
enum rpc_msg_type {
@@ -148,7 +151,8 @@ static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
unsigned long si, di, eax, ebx, ecx, edx;
unsigned long msg_len = strlen(msg);
- if (hb) {
+ /* HB port can't access encrypted memory. */
+ if (hb && !mem_encrypt_active()) {
unsigned long bp = channel->cookie_high;
si = (uintptr_t) msg;
@@ -202,7 +206,8 @@ static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
{
unsigned long si, di, eax, ebx, ecx, edx;
- if (hb) {
+ /* HB port can't access encrypted memory */
+ if (hb && !mem_encrypt_active()) {
unsigned long bp = channel->cookie_low;
si = channel->cookie_high;
@@ -514,3 +519,84 @@ out_open:
return -EINVAL;
}
+
+
+/**
+ * vmw_msg_ioctl: Sends and receveives a message to/from host from/to user-space
+ *
+ * Sends a message from user-space to host.
+ * Can also receive a result from host and return that to user-space.
+ *
+ * @dev: Identifies the drm device.
+ * @data: Pointer to the ioctl argument.
+ * @file_priv: Identifies the caller.
+ * Return: Zero on success, negative error code on error.
+ */
+
+int vmw_msg_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_msg_arg *arg =
+ (struct drm_vmw_msg_arg *) data;
+ struct rpc_channel channel;
+ char *msg;
+ int length;
+
+ msg = kmalloc(MAX_USER_MSG_LENGTH, GFP_KERNEL);
+ if (!msg) {
+ DRM_ERROR("Cannot allocate memory for log message.\n");
+ return -ENOMEM;
+ }
+
+ length = strncpy_from_user(msg, (void __user *)((unsigned long)arg->send),
+ MAX_USER_MSG_LENGTH);
+ if (length < 0 || length >= MAX_USER_MSG_LENGTH) {
+ DRM_ERROR("Userspace message access failure.\n");
+ kfree(msg);
+ return -EINVAL;
+ }
+
+
+ if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM)) {
+ DRM_ERROR("Failed to open channel.\n");
+ goto out_open;
+ }
+
+ if (vmw_send_msg(&channel, msg)) {
+ DRM_ERROR("Failed to send message to host.\n");
+ goto out_msg;
+ }
+
+ if (!arg->send_only) {
+ char *reply = NULL;
+ size_t reply_len = 0;
+
+ if (vmw_recv_msg(&channel, (void *) &reply, &reply_len)) {
+ DRM_ERROR("Failed to receive message from host.\n");
+ goto out_msg;
+ }
+ if (reply && reply_len > 0) {
+ if (copy_to_user((void __user *)((unsigned long)arg->receive),
+ reply, reply_len)) {
+ DRM_ERROR("Failed to copy message to userspace.\n");
+ kfree(reply);
+ goto out_msg;
+ }
+ arg->receive_len = (__u32)reply_len;
+ }
+ kfree(reply);
+ }
+
+ vmw_close_channel(&channel);
+ kfree(msg);
+
+ return 0;
+
+out_msg:
+ vmw_close_channel(&channel);
+out_open:
+ kfree(msg);
+
+ return -EINVAL;
+}
+
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 32b9131b2bae..3ce630aa4fde 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -934,16 +934,12 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
uint32_t handle;
struct ttm_base_object *base;
int ret;
- bool require_exist = false;
if (handle_type == DRM_VMW_HANDLE_PRIME) {
ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
if (unlikely(ret != 0))
return ret;
} else {
- if (unlikely(drm_is_render_client(file_priv)))
- require_exist = true;
-
handle = u_handle;
}
@@ -960,9 +956,18 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
}
if (handle_type != DRM_VMW_HANDLE_PRIME) {
+ bool require_exist = false;
+
user_srf = container_of(base, struct vmw_user_surface,
prime.base);
+ /* Error out if we are unauthenticated primary */
+ if (drm_is_primary_client(file_priv) &&
+ !file_priv->authenticated) {
+ ret = -EACCES;
+ goto out_bad_resource;
+ }
+
/*
* Make sure the surface creator has the same
* authenticating master, or is already registered with us.
@@ -971,6 +976,9 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
user_srf->master != file_priv->master)
require_exist = true;
+ if (unlikely(drm_is_render_client(file_priv)))
+ require_exist = true;
+
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL,
require_exist);
if (unlikely(ret != 0)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index ce288756531b..aa7e50f63b94 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -45,6 +45,10 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_ops = &vmw_vm_ops;
+ /* Use VM_PFNMAP rather than VM_MIXEDMAP if not a COW mapping */
+ if ((vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) != VM_MAYWRITE)
+ vma->vm_flags = (vma->vm_flags & ~VM_MIXEDMAP) | VM_PFNMAP;
+
return 0;
}
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 2c8559ff3481..6a995db51d6d 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -120,7 +120,7 @@ static void host1x_subdev_register(struct host1x_device *device,
mutex_lock(&device->clients_lock);
list_move_tail(&client->list, &device->clients);
list_move_tail(&subdev->list, &device->active);
- client->parent = &device->dev;
+ client->host = &device->dev;
subdev->client = client;
mutex_unlock(&device->clients_lock);
mutex_unlock(&device->subdevs_lock);
@@ -156,7 +156,7 @@ static void __host1x_subdev_unregister(struct host1x_device *device,
*/
mutex_lock(&device->clients_lock);
subdev->client = NULL;
- client->parent = NULL;
+ client->host = NULL;
list_move_tail(&subdev->list, &device->subdevs);
/*
* XXX: Perhaps don't do this here, but rather explicitly remove it
@@ -710,6 +710,10 @@ int host1x_client_register(struct host1x_client *client)
struct host1x *host1x;
int err;
+ INIT_LIST_HEAD(&client->list);
+ mutex_init(&client->lock);
+ client->usecount = 0;
+
mutex_lock(&devices_lock);
list_for_each_entry(host1x, &devices, list) {
@@ -768,3 +772,74 @@ int host1x_client_unregister(struct host1x_client *client)
return 0;
}
EXPORT_SYMBOL(host1x_client_unregister);
+
+int host1x_client_suspend(struct host1x_client *client)
+{
+ int err = 0;
+
+ mutex_lock(&client->lock);
+
+ if (client->usecount == 1) {
+ if (client->ops && client->ops->suspend) {
+ err = client->ops->suspend(client);
+ if (err < 0)
+ goto unlock;
+ }
+ }
+
+ client->usecount--;
+ dev_dbg(client->dev, "use count: %u\n", client->usecount);
+
+ if (client->parent) {
+ err = host1x_client_suspend(client->parent);
+ if (err < 0)
+ goto resume;
+ }
+
+ goto unlock;
+
+resume:
+ if (client->usecount == 0)
+ if (client->ops && client->ops->resume)
+ client->ops->resume(client);
+
+ client->usecount++;
+unlock:
+ mutex_unlock(&client->lock);
+ return err;
+}
+EXPORT_SYMBOL(host1x_client_suspend);
+
+int host1x_client_resume(struct host1x_client *client)
+{
+ int err = 0;
+
+ mutex_lock(&client->lock);
+
+ if (client->parent) {
+ err = host1x_client_resume(client->parent);
+ if (err < 0)
+ goto unlock;
+ }
+
+ if (client->usecount == 0) {
+ if (client->ops && client->ops->resume) {
+ err = client->ops->resume(client);
+ if (err < 0)
+ goto suspend;
+ }
+ }
+
+ client->usecount++;
+ dev_dbg(client->dev, "use count: %u\n", client->usecount);
+
+ goto unlock;
+
+suspend:
+ if (client->parent)
+ host1x_client_suspend(client->parent);
+unlock:
+ mutex_unlock(&client->lock);
+ return err;
+}
+EXPORT_SYMBOL(host1x_client_resume);
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index a738ea55e407..388bcc2889aa 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -339,10 +339,8 @@ static int host1x_probe(struct platform_device *pdev)
}
syncpt_irq = platform_get_irq(pdev, 0);
- if (syncpt_irq < 0) {
- dev_err(&pdev->dev, "failed to get IRQ: %d\n", syncpt_irq);
+ if (syncpt_irq < 0)
return syncpt_irq;
- }
mutex_init(&host->devices_lock);
INIT_LIST_HEAD(&host->devices);
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index 60b2fedd0061..a10643aa89aa 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -8,6 +8,7 @@
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/host1x.h>
+#include <linux/iommu.h>
#include <linux/kref.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
@@ -101,9 +102,11 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
{
struct host1x_client *client = job->client;
struct device *dev = client->dev;
+ struct iommu_domain *domain;
unsigned int i;
int err;
+ domain = iommu_get_domain_for_dev(dev);
job->num_unpins = 0;
for (i = 0; i < job->num_relocs; i++) {
@@ -117,7 +120,19 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
goto unpin;
}
- if (client->group)
+ /*
+ * If the client device is not attached to an IOMMU, the
+ * physical address of the buffer object can be used.
+ *
+ * Similarly, when an IOMMU domain is shared between all
+ * host1x clients, the IOVA is already available, so no
+ * need to map the buffer object again.
+ *
+ * XXX Note that this isn't always safe to do because it
+ * relies on an assumption that no cache maintenance is
+ * needed on the buffer objects.
+ */
+ if (!domain || client->group)
phys = &phys_addr;
else
phys = NULL;
@@ -176,6 +191,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
dma_addr_t phys_addr;
unsigned long shift;
struct iova *alloc;
+ dma_addr_t *phys;
unsigned int j;
g->bo = host1x_bo_get(g->bo);
@@ -184,7 +200,17 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
goto unpin;
}
- sgt = host1x_bo_pin(host->dev, g->bo, NULL);
+ /**
+ * If the host1x is not attached to an IOMMU, there is no need
+ * to map the buffer object for the host1x, since the physical
+ * address can simply be used.
+ */
+ if (!iommu_get_domain_for_dev(host->dev))
+ phys = &phys_addr;
+ else
+ phys = NULL;
+
+ sgt = host1x_bo_pin(host->dev, g->bo, phys);
if (IS_ERR(sgt)) {
err = PTR_ERR(sgt);
goto unpin;
@@ -214,7 +240,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
job->unpins[job->num_unpins].size = gather_size;
phys_addr = iova_dma_addr(&host->iova, alloc);
- } else {
+ } else if (sgt) {
err = dma_map_sg(host->dev, sgt->sgl, sgt->nents,
DMA_TO_DEVICE);
if (!err) {
@@ -222,6 +248,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
goto unpin;
}
+ job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
job->unpins[job->num_unpins].dev = host->dev;
phys_addr = sg_dma_address(sgt->sgl);
}
@@ -229,7 +256,6 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
job->addr_phys[job->num_unpins] = phys_addr;
job->gather_addr_phys[i] = phys_addr;
- job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
job->unpins[job->num_unpins].bo = g->bo;
job->unpins[job->num_unpins].sgt = sgt;
job->num_unpins++;
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index dd1cd0142941..fce7892d5137 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -421,7 +421,7 @@ int host1x_syncpt_init(struct host1x *host)
struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client,
unsigned long flags)
{
- struct host1x *host = dev_get_drvdata(client->parent->parent);
+ struct host1x *host = dev_get_drvdata(client->host->parent);
return host1x_syncpt_alloc(host, client, flags);
}
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 8063b1d567b1..e6e4c841fb06 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -261,7 +261,8 @@ static int asus_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
if ((usage->hid & HID_USAGE_PAGE) == 0xff310000 &&
- (usage->hid & HID_USAGE) != 0x00 && !usage->type) {
+ (usage->hid & HID_USAGE) != 0x00 &&
+ (usage->hid & HID_USAGE) != 0xff && !usage->type) {
hid_warn(hdev, "Unmapped Asus vendor usagepage code 0x%02x\n",
usage->hid & HID_USAGE);
}
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index e0b241bd3070..851fe54ea59e 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -288,6 +288,12 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
offset = report->size;
report->size += parser->global.report_size * parser->global.report_count;
+ /* Total size check: Allow for possible report index byte */
+ if (report->size > (HID_MAX_BUFFER_SIZE - 1) << 3) {
+ hid_err(parser->device, "report is too long\n");
+ return -1;
+ }
+
if (!parser->local.usage_index) /* Ignore padding fields */
return 0;
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 7e1689ef35f5..3a400ce603c4 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -631,6 +631,7 @@
#define USB_VENDOR_ID_ITE 0x048d
#define USB_DEVICE_ID_ITE_LENOVO_YOGA 0x8386
#define USB_DEVICE_ID_ITE_LENOVO_YOGA2 0x8350
+#define I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720 0x837a
#define USB_DEVICE_ID_ITE_LENOVO_YOGA900 0x8396
#define USB_DEVICE_ID_ITE8595 0x8595
@@ -730,6 +731,7 @@
#define USB_DEVICE_ID_LG_MULTITOUCH 0x0064
#define USB_DEVICE_ID_LG_MELFAS_MT 0x6007
#define I2C_DEVICE_ID_LG_8001 0x8001
+#define I2C_DEVICE_ID_LG_7010 0x7010
#define USB_VENDOR_ID_LOGITECH 0x046d
#define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
@@ -1102,6 +1104,7 @@
#define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10
#define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3
#define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3
+#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012 0x2968
#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5 0x81a7
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 63855f275a38..dea9cc65bf80 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -1132,9 +1132,15 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
}
mapped:
- if (device->driver->input_mapped && device->driver->input_mapped(device,
- hidinput, field, usage, &bit, &max) < 0)
- goto ignore;
+ if (device->driver->input_mapped &&
+ device->driver->input_mapped(device, hidinput, field, usage,
+ &bit, &max) < 0) {
+ /*
+ * The driver indicated that no further generic handling
+ * of the usage is desired.
+ */
+ return;
+ }
set_bit(usage->type, input->evbit);
@@ -1215,9 +1221,11 @@ mapped:
set_bit(MSC_SCAN, input->mscbit);
}
-ignore:
return;
+ignore:
+ usage->type = 0;
+ usage->code = 0;
}
static void hidinput_handle_scroll(struct hid_usage *usage,
diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c
index a45f2352618d..c436e12feb23 100644
--- a/drivers/hid/hid-ite.c
+++ b/drivers/hid/hid-ite.c
@@ -40,6 +40,9 @@ static int ite_event(struct hid_device *hdev, struct hid_field *field,
static const struct hid_device_id ite_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
{ HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) },
+ /* ITE8595 USB kbd ctlr, with Synaptics touchpad connected to it. */
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS,
+ USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) },
{ }
};
MODULE_DEVICE_TABLE(hid, ite_devices);
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index cd9193078525..70e1cb928bf0 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -49,6 +49,10 @@ MODULE_PARM_DESC(disable_tap_to_click,
#define HIDPP_REPORT_LONG_LENGTH 20
#define HIDPP_REPORT_VERY_LONG_MAX_LENGTH 64
+#define HIDPP_REPORT_SHORT_SUPPORTED BIT(0)
+#define HIDPP_REPORT_LONG_SUPPORTED BIT(1)
+#define HIDPP_REPORT_VERY_LONG_SUPPORTED BIT(2)
+
#define HIDPP_SUB_ID_CONSUMER_VENDOR_KEYS 0x03
#define HIDPP_SUB_ID_ROLLER 0x05
#define HIDPP_SUB_ID_MOUSE_EXTRA_BTNS 0x06
@@ -87,6 +91,7 @@ MODULE_PARM_DESC(disable_tap_to_click,
#define HIDPP_CAPABILITY_HIDPP20_BATTERY BIT(1)
#define HIDPP_CAPABILITY_BATTERY_MILEAGE BIT(2)
#define HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS BIT(3)
+#define HIDPP_CAPABILITY_BATTERY_VOLTAGE BIT(4)
/*
* There are two hidpp protocols in use, the first version hidpp10 is known
@@ -135,12 +140,15 @@ struct hidpp_report {
struct hidpp_battery {
u8 feature_index;
u8 solar_feature_index;
+ u8 voltage_feature_index;
struct power_supply_desc desc;
struct power_supply *ps;
char name[64];
int status;
int capacity;
int level;
+ int voltage;
+ int charge_type;
bool online;
};
@@ -183,9 +191,12 @@ struct hidpp_device {
unsigned long quirks;
unsigned long capabilities;
+ u8 supported_reports;
struct hidpp_battery battery;
struct hidpp_scroll_counter vertical_wheel_counter;
+
+ u8 wireless_feature_index;
};
/* HID++ 1.0 error codes */
@@ -340,6 +351,11 @@ static int hidpp_send_rap_command_sync(struct hidpp_device *hidpp_dev,
struct hidpp_report *message;
int ret, max_count;
+ /* Send as long report if short reports are not supported. */
+ if (report_id == REPORT_ID_HIDPP_SHORT &&
+ !(hidpp_dev->supported_reports & HIDPP_REPORT_SHORT_SUPPORTED))
+ report_id = REPORT_ID_HIDPP_LONG;
+
switch (report_id) {
case REPORT_ID_HIDPP_SHORT:
max_count = HIDPP_REPORT_SHORT_LENGTH - 4;
@@ -393,10 +409,13 @@ static inline bool hidpp_match_error(struct hidpp_report *question,
(answer->fap.params[0] == question->fap.funcindex_clientid);
}
-static inline bool hidpp_report_is_connect_event(struct hidpp_report *report)
+static inline bool hidpp_report_is_connect_event(struct hidpp_device *hidpp,
+ struct hidpp_report *report)
{
- return (report->report_id == REPORT_ID_HIDPP_SHORT) &&
- (report->rap.sub_id == 0x41);
+ return (hidpp->wireless_feature_index &&
+ (report->fap.feature_index == hidpp->wireless_feature_index)) ||
+ ((report->report_id == REPORT_ID_HIDPP_SHORT) &&
+ (report->rap.sub_id == 0x41));
}
/**
@@ -1222,6 +1241,144 @@ static int hidpp20_battery_event(struct hidpp_device *hidpp,
return 0;
}
+/* -------------------------------------------------------------------------- */
+/* 0x1001: Battery voltage */
+/* -------------------------------------------------------------------------- */
+
+#define HIDPP_PAGE_BATTERY_VOLTAGE 0x1001
+
+#define CMD_BATTERY_VOLTAGE_GET_BATTERY_VOLTAGE 0x00
+
+#define EVENT_BATTERY_VOLTAGE_STATUS_BROADCAST 0x00
+
+static int hidpp20_battery_map_status_voltage(u8 data[3], int *voltage,
+ int *level, int *charge_type)
+{
+ int status;
+
+ long charge_sts = (long)data[2];
+
+ *level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
+ switch (data[2] & 0xe0) {
+ case 0x00:
+ status = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ case 0x20:
+ status = POWER_SUPPLY_STATUS_FULL;
+ *level = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
+ break;
+ case 0x40:
+ status = POWER_SUPPLY_STATUS_DISCHARGING;
+ break;
+ case 0xe0:
+ status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ default:
+ status = POWER_SUPPLY_STATUS_UNKNOWN;
+ }
+
+ *charge_type = POWER_SUPPLY_CHARGE_TYPE_STANDARD;
+ if (test_bit(3, &charge_sts)) {
+ *charge_type = POWER_SUPPLY_CHARGE_TYPE_FAST;
+ }
+ if (test_bit(4, &charge_sts)) {
+ *charge_type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+ }
+
+ if (test_bit(5, &charge_sts)) {
+ *level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+ }
+
+ *voltage = get_unaligned_be16(data);
+
+ return status;
+}
+
+static int hidpp20_battery_get_battery_voltage(struct hidpp_device *hidpp,
+ u8 feature_index,
+ int *status, int *voltage,
+ int *level, int *charge_type)
+{
+ struct hidpp_report response;
+ int ret;
+ u8 *params = (u8 *)response.fap.params;
+
+ ret = hidpp_send_fap_command_sync(hidpp, feature_index,
+ CMD_BATTERY_VOLTAGE_GET_BATTERY_VOLTAGE,
+ NULL, 0, &response);
+
+ if (ret > 0) {
+ hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
+ __func__, ret);
+ return -EPROTO;
+ }
+ if (ret)
+ return ret;
+
+ hidpp->capabilities |= HIDPP_CAPABILITY_BATTERY_VOLTAGE;
+
+ *status = hidpp20_battery_map_status_voltage(params, voltage,
+ level, charge_type);
+
+ return 0;
+}
+
+static int hidpp20_query_battery_voltage_info(struct hidpp_device *hidpp)
+{
+ u8 feature_type;
+ int ret;
+ int status, voltage, level, charge_type;
+
+ if (hidpp->battery.voltage_feature_index == 0xff) {
+ ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_BATTERY_VOLTAGE,
+ &hidpp->battery.voltage_feature_index,
+ &feature_type);
+ if (ret)
+ return ret;
+ }
+
+ ret = hidpp20_battery_get_battery_voltage(hidpp,
+ hidpp->battery.voltage_feature_index,
+ &status, &voltage, &level, &charge_type);
+
+ if (ret)
+ return ret;
+
+ hidpp->battery.status = status;
+ hidpp->battery.voltage = voltage;
+ hidpp->battery.level = level;
+ hidpp->battery.charge_type = charge_type;
+ hidpp->battery.online = status != POWER_SUPPLY_STATUS_NOT_CHARGING;
+
+ return 0;
+}
+
+static int hidpp20_battery_voltage_event(struct hidpp_device *hidpp,
+ u8 *data, int size)
+{
+ struct hidpp_report *report = (struct hidpp_report *)data;
+ int status, voltage, level, charge_type;
+
+ if (report->fap.feature_index != hidpp->battery.voltage_feature_index ||
+ report->fap.funcindex_clientid != EVENT_BATTERY_VOLTAGE_STATUS_BROADCAST)
+ return 0;
+
+ status = hidpp20_battery_map_status_voltage(report->fap.params, &voltage,
+ &level, &charge_type);
+
+ hidpp->battery.online = status != POWER_SUPPLY_STATUS_NOT_CHARGING;
+
+ if (voltage != hidpp->battery.voltage || status != hidpp->battery.status) {
+ hidpp->battery.voltage = voltage;
+ hidpp->battery.status = status;
+ hidpp->battery.level = level;
+ hidpp->battery.charge_type = charge_type;
+ if (hidpp->battery.ps)
+ power_supply_changed(hidpp->battery.ps);
+ }
+ return 0;
+}
+
static enum power_supply_property hidpp_battery_props[] = {
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_STATUS,
@@ -1231,6 +1388,7 @@ static enum power_supply_property hidpp_battery_props[] = {
POWER_SUPPLY_PROP_SERIAL_NUMBER,
0, /* placeholder for POWER_SUPPLY_PROP_CAPACITY, */
0, /* placeholder for POWER_SUPPLY_PROP_CAPACITY_LEVEL, */
+ 0, /* placeholder for POWER_SUPPLY_PROP_VOLTAGE_NOW, */
};
static int hidpp_battery_get_property(struct power_supply *psy,
@@ -1268,6 +1426,13 @@ static int hidpp_battery_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_SERIAL_NUMBER:
val->strval = hidpp->hid_dev->uniq;
break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ /* hardware reports voltage in in mV. sysfs expects uV */
+ val->intval = hidpp->battery.voltage * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ val->intval = hidpp->battery.charge_type;
+ break;
default:
ret = -EINVAL;
break;
@@ -1277,6 +1442,24 @@ static int hidpp_battery_get_property(struct power_supply *psy,
}
/* -------------------------------------------------------------------------- */
+/* 0x1d4b: Wireless device status */
+/* -------------------------------------------------------------------------- */
+#define HIDPP_PAGE_WIRELESS_DEVICE_STATUS 0x1d4b
+
+static int hidpp_set_wireless_feature_index(struct hidpp_device *hidpp)
+{
+ u8 feature_type;
+ int ret;
+
+ ret = hidpp_root_get_feature(hidpp,
+ HIDPP_PAGE_WIRELESS_DEVICE_STATUS,
+ &hidpp->wireless_feature_index,
+ &feature_type);
+
+ return ret;
+}
+
+/* -------------------------------------------------------------------------- */
/* 0x2120: Hi-resolution scrolling */
/* -------------------------------------------------------------------------- */
@@ -3091,7 +3274,7 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data,
}
}
- if (unlikely(hidpp_report_is_connect_event(report))) {
+ if (unlikely(hidpp_report_is_connect_event(hidpp, report))) {
atomic_set(&hidpp->connected,
!(report->rap.params[0] & (1 << 6)));
if (schedule_work(&hidpp->work) == 0)
@@ -3106,6 +3289,9 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data,
ret = hidpp_solar_battery_event(hidpp, data, size);
if (ret != 0)
return ret;
+ ret = hidpp20_battery_voltage_event(hidpp, data, size);
+ if (ret != 0)
+ return ret;
}
if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP10_BATTERY) {
@@ -3227,12 +3413,16 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp)
hidpp->battery.feature_index = 0xff;
hidpp->battery.solar_feature_index = 0xff;
+ hidpp->battery.voltage_feature_index = 0xff;
if (hidpp->protocol_major >= 2) {
if (hidpp->quirks & HIDPP_QUIRK_CLASS_K750)
ret = hidpp_solar_request_battery_event(hidpp);
- else
- ret = hidpp20_query_battery_info(hidpp);
+ else {
+ ret = hidpp20_query_battery_voltage_info(hidpp);
+ if (ret)
+ ret = hidpp20_query_battery_info(hidpp);
+ }
if (ret)
return ret;
@@ -3257,7 +3447,7 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp)
if (!battery_props)
return -ENOMEM;
- num_battery_props = ARRAY_SIZE(hidpp_battery_props) - 2;
+ num_battery_props = ARRAY_SIZE(hidpp_battery_props) - 3;
if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_MILEAGE)
battery_props[num_battery_props++] =
@@ -3267,6 +3457,10 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp)
battery_props[num_battery_props++] =
POWER_SUPPLY_PROP_CAPACITY_LEVEL;
+ if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_VOLTAGE)
+ battery_props[num_battery_props++] =
+ POWER_SUPPLY_PROP_VOLTAGE_NOW;
+
battery = &hidpp->battery;
n = atomic_inc_return(&battery_no) - 1;
@@ -3430,7 +3624,10 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
else
hidpp10_query_battery_status(hidpp);
} else if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP20_BATTERY) {
- hidpp20_query_battery_info(hidpp);
+ if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_VOLTAGE)
+ hidpp20_query_battery_voltage_info(hidpp);
+ else
+ hidpp20_query_battery_info(hidpp);
}
if (hidpp->battery.ps)
power_supply_changed(hidpp->battery.ps);
@@ -3481,10 +3678,11 @@ static int hidpp_get_report_length(struct hid_device *hdev, int id)
return report->field[0]->report_count + 1;
}
-static bool hidpp_validate_device(struct hid_device *hdev)
+static u8 hidpp_validate_device(struct hid_device *hdev)
{
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
- int id, report_length, supported_reports = 0;
+ int id, report_length;
+ u8 supported_reports = 0;
id = REPORT_ID_HIDPP_SHORT;
report_length = hidpp_get_report_length(hdev, id);
@@ -3492,7 +3690,7 @@ static bool hidpp_validate_device(struct hid_device *hdev)
if (report_length < HIDPP_REPORT_SHORT_LENGTH)
goto bad_device;
- supported_reports++;
+ supported_reports |= HIDPP_REPORT_SHORT_SUPPORTED;
}
id = REPORT_ID_HIDPP_LONG;
@@ -3501,7 +3699,7 @@ static bool hidpp_validate_device(struct hid_device *hdev)
if (report_length < HIDPP_REPORT_LONG_LENGTH)
goto bad_device;
- supported_reports++;
+ supported_reports |= HIDPP_REPORT_LONG_SUPPORTED;
}
id = REPORT_ID_HIDPP_VERY_LONG;
@@ -3511,7 +3709,7 @@ static bool hidpp_validate_device(struct hid_device *hdev)
report_length > HIDPP_REPORT_VERY_LONG_MAX_LENGTH)
goto bad_device;
- supported_reports++;
+ supported_reports |= HIDPP_REPORT_VERY_LONG_SUPPORTED;
hidpp->very_long_report_length = report_length;
}
@@ -3560,7 +3758,9 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
/*
* Make sure the device is HID++ capable, otherwise treat as generic HID
*/
- if (!hidpp_validate_device(hdev)) {
+ hidpp->supported_reports = hidpp_validate_device(hdev);
+
+ if (!hidpp->supported_reports) {
hid_set_drvdata(hdev, NULL);
devm_kfree(&hdev->dev, hidpp);
return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
@@ -3617,7 +3817,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (ret < 0) {
dev_err(&hdev->dev, "%s:hid_hw_open returned error:%d\n",
__func__, ret);
- hid_hw_stop(hdev);
goto hid_hw_open_fail;
}
@@ -3639,6 +3838,14 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
hidpp_overwrite_name(hdev);
}
+ if (connected && hidpp->protocol_major >= 2) {
+ ret = hidpp_set_wireless_feature_index(hidpp);
+ if (ret == -ENOENT)
+ hidpp->wireless_feature_index = 0;
+ else if (ret)
+ goto hid_hw_init_fail;
+ }
+
if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)) {
ret = wtp_get_config(hidpp);
if (ret)
@@ -3752,6 +3959,8 @@ static const struct hid_device_id hidpp_devices[] = {
{ LDJ_DEVICE(0x4071), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
{ /* Mouse Logitech MX Master 2S */
LDJ_DEVICE(0x4069), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* Mouse Logitech MX Master 3 */
+ LDJ_DEVICE(0x4082), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
{ /* Mouse Logitech Performance MX */
LDJ_DEVICE(0x101a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
{ /* Keyboard logitech K400 */
@@ -3808,6 +4017,14 @@ static const struct hid_device_id hidpp_devices[] = {
{ /* MX5500 keyboard over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb30b),
.driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
+ { /* MX Master mouse over Bluetooth */
+ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb012),
+ .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01e),
+ .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* MX Master 3 mouse over Bluetooth */
+ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb023),
+ .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
{}
};
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 3cfeb1629f79..362805ddf377 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1019,7 +1019,7 @@ static int mt_process_slot(struct mt_device *td, struct input_dev *input,
tool = MT_TOOL_DIAL;
else if (unlikely(!confidence_state)) {
tool = MT_TOOL_PALM;
- if (!active &&
+ if (!active && mt &&
input_mt_is_active(&mt->slots[slotnum])) {
/*
* The non-confidence was reported for
@@ -1985,6 +1985,9 @@ static const struct hid_device_id mt_devices[] = {
{ .driver_data = MT_CLS_LG,
HID_USB_DEVICE(USB_VENDOR_ID_LG,
USB_DEVICE_ID_LG_MELFAS_MT) },
+ { .driver_data = MT_CLS_LG,
+ HID_DEVICE(BUS_I2C, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_7010) },
/* MosArt panels */
{ .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index d1b39c29e353..0e7b2d998395 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -174,6 +174,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD2, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE), HID_QUIRK_MULTI_INPUT },
{ 0 }
};
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index 8dae0f9b819e..6286204d4c56 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -768,8 +768,12 @@ static int steam_probe(struct hid_device *hdev,
if (steam->quirks & STEAM_QUIRK_WIRELESS) {
hid_info(hdev, "Steam wireless receiver connected");
+ /* If using a wireless adaptor ask for connection status */
+ steam->connected = false;
steam_request_conn_status(steam);
} else {
+ /* A wired connection is always present */
+ steam->connected = true;
ret = steam_register(steam);
if (ret) {
hid_err(hdev,
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index c3fc0ceb8096..2eee5e31c2b7 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -249,13 +249,14 @@ out:
static __poll_t hidraw_poll(struct file *file, poll_table *wait)
{
struct hidraw_list *list = file->private_data;
+ __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* hidraw is always writable */
poll_wait(file, &list->hidraw->wait, wait);
if (list->head != list->tail)
- return EPOLLIN | EPOLLRDNORM | EPOLLOUT;
+ mask |= EPOLLIN | EPOLLRDNORM;
if (!list->hidraw->exist)
- return EPOLLERR | EPOLLHUP;
- return 0;
+ mask |= EPOLLERR | EPOLLHUP;
+ return mask;
}
static int hidraw_open(struct inode *inode, struct file *file)
@@ -450,6 +451,15 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
-EFAULT : len;
break;
}
+
+ if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGRAWUNIQ(0))) {
+ int len = strlen(hid->uniq) + 1;
+ if (len > _IOC_SIZE(cmd))
+ len = _IOC_SIZE(cmd);
+ ret = copy_to_user(user_arg, hid->uniq, len) ?
+ -EFAULT : len;
+ break;
+ }
}
ret = -ENOTTY;
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index a358e61fbc82..009000c5d55c 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -49,6 +49,8 @@
#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
#define I2C_HID_QUIRK_BOGUS_IRQ BIT(4)
#define I2C_HID_QUIRK_RESET_ON_RESUME BIT(5)
+#define I2C_HID_QUIRK_BAD_INPUT_SIZE BIT(6)
+
/* flags */
#define I2C_HID_STARTED 0
@@ -175,6 +177,8 @@ static const struct i2c_hid_quirks {
I2C_HID_QUIRK_BOGUS_IRQ },
{ USB_VENDOR_ID_ALPS_JP, HID_ANY_ID,
I2C_HID_QUIRK_RESET_ON_RESUME },
+ { USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720,
+ I2C_HID_QUIRK_BAD_INPUT_SIZE },
{ 0, 0 }
};
@@ -496,9 +500,15 @@ static void i2c_hid_get_input(struct i2c_hid *ihid)
}
if ((ret_size > size) || (ret_size < 2)) {
- dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
- __func__, size, ret_size);
- return;
+ if (ihid->quirks & I2C_HID_QUIRK_BAD_INPUT_SIZE) {
+ ihid->inbuf[0] = size & 0xff;
+ ihid->inbuf[1] = size >> 8;
+ ret_size = size;
+ } else {
+ dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
+ __func__, size, ret_size);
+ return;
+ }
}
i2c_hid_dbg(ihid, "input: %*ph\n", ret_size, ihid->inbuf);
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index 6c1e6110867f..1fb294ca463e 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -24,7 +24,9 @@
#define ICL_MOBILE_DEVICE_ID 0x34FC
#define SPT_H_DEVICE_ID 0xA135
#define CML_LP_DEVICE_ID 0x02FC
+#define CMP_H_DEVICE_ID 0x06FC
#define EHL_Ax_DEVICE_ID 0x4BB3
+#define TGL_LP_DEVICE_ID 0xA0FC
#define REVISION_ID_CHT_A0 0x6
#define REVISION_ID_CHT_Ax_SI 0x0
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 784dcc8c7022..f491d8b4e24c 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -34,7 +34,9 @@ static const struct pci_device_id ish_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CML_LP_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CMP_H_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, TGL_LP_DEVICE_ID)},
{0, }
};
MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index fa0cc0899827..8fe3efcb8327 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -766,13 +766,14 @@ unlock:
static __poll_t uhid_char_poll(struct file *file, poll_table *wait)
{
struct uhid_device *uhid = file->private_data;
+ __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* uhid is always writable */
poll_wait(file, &uhid->waitq, wait);
if (uhid->head != uhid->tail)
- return EPOLLIN | EPOLLRDNORM;
+ mask |= EPOLLIN | EPOLLRDNORM;
- return 0;
+ return mask;
}
static const struct file_operations uhid_fops = {
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index e421cdf2d1a4..a970b809d778 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -241,12 +241,51 @@ static int hiddev_release(struct inode * inode, struct file * file)
return 0;
}
+static int __hiddev_open(struct hiddev *hiddev, struct file *file)
+{
+ struct hiddev_list *list;
+ int error;
+
+ lockdep_assert_held(&hiddev->existancelock);
+
+ list = vzalloc(sizeof(*list));
+ if (!list)
+ return -ENOMEM;
+
+ mutex_init(&list->thread_lock);
+ list->hiddev = hiddev;
+
+ if (!hiddev->open++) {
+ error = hid_hw_power(hiddev->hid, PM_HINT_FULLON);
+ if (error < 0)
+ goto err_drop_count;
+
+ error = hid_hw_open(hiddev->hid);
+ if (error < 0)
+ goto err_normal_power;
+ }
+
+ spin_lock_irq(&hiddev->list_lock);
+ list_add_tail(&list->node, &hiddev->list);
+ spin_unlock_irq(&hiddev->list_lock);
+
+ file->private_data = list;
+
+ return 0;
+
+err_normal_power:
+ hid_hw_power(hiddev->hid, PM_HINT_NORMAL);
+err_drop_count:
+ hiddev->open--;
+ vfree(list);
+ return error;
+}
+
/*
* open file op
*/
static int hiddev_open(struct inode *inode, struct file *file)
{
- struct hiddev_list *list;
struct usb_interface *intf;
struct hid_device *hid;
struct hiddev *hiddev;
@@ -255,66 +294,14 @@ static int hiddev_open(struct inode *inode, struct file *file)
intf = usbhid_find_interface(iminor(inode));
if (!intf)
return -ENODEV;
+
hid = usb_get_intfdata(intf);
hiddev = hid->hiddev;
- if (!(list = vzalloc(sizeof(struct hiddev_list))))
- return -ENOMEM;
- mutex_init(&list->thread_lock);
- list->hiddev = hiddev;
- file->private_data = list;
-
- /*
- * no need for locking because the USB major number
- * is shared which usbcore guards against disconnect
- */
- if (list->hiddev->exist) {
- if (!list->hiddev->open++) {
- res = hid_hw_open(hiddev->hid);
- if (res < 0)
- goto bail;
- }
- } else {
- res = -ENODEV;
- goto bail;
- }
-
- spin_lock_irq(&list->hiddev->list_lock);
- list_add_tail(&list->node, &hiddev->list);
- spin_unlock_irq(&list->hiddev->list_lock);
-
mutex_lock(&hiddev->existancelock);
- /*
- * recheck exist with existance lock held to
- * avoid opening a disconnected device
- */
- if (!list->hiddev->exist) {
- res = -ENODEV;
- goto bail_unlock;
- }
- if (!list->hiddev->open++)
- if (list->hiddev->exist) {
- struct hid_device *hid = hiddev->hid;
- res = hid_hw_power(hid, PM_HINT_FULLON);
- if (res < 0)
- goto bail_unlock;
- res = hid_hw_open(hid);
- if (res < 0)
- goto bail_normal_power;
- }
- mutex_unlock(&hiddev->existancelock);
- return 0;
-bail_normal_power:
- hid_hw_power(hid, PM_HINT_NORMAL);
-bail_unlock:
+ res = hiddev->exist ? __hiddev_open(hiddev, file) : -ENODEV;
mutex_unlock(&hiddev->existancelock);
- spin_lock_irq(&list->hiddev->list_lock);
- list_del(&list->node);
- spin_unlock_irq(&list->hiddev->list_lock);
-bail:
- file->private_data = NULL;
- vfree(list);
return res;
}
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index ccb74529bc78..d99a9d407671 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2096,14 +2096,16 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
(hdev->product == 0x34d || hdev->product == 0x34e || /* MobileStudio Pro */
hdev->product == 0x357 || hdev->product == 0x358 || /* Intuos Pro 2 */
hdev->product == 0x392 || /* Intuos Pro 2 */
- hdev->product == 0x398 || hdev->product == 0x399)) { /* MobileStudio Pro */
+ hdev->product == 0x398 || hdev->product == 0x399 || /* MobileStudio Pro */
+ hdev->product == 0x3AA)) { /* MobileStudio Pro */
value = (field->logical_maximum - value);
if (hdev->product == 0x357 || hdev->product == 0x358 ||
hdev->product == 0x392)
value = wacom_offset_rotation(input, usage, value, 3, 16);
else if (hdev->product == 0x34d || hdev->product == 0x34e ||
- hdev->product == 0x398 || hdev->product == 0x399)
+ hdev->product == 0x398 || hdev->product == 0x399 ||
+ hdev->product == 0x3AA)
value = wacom_offset_rotation(input, usage, value, 1, 2);
}
else {
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 8eb167540b4f..0370364169c4 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -1351,6 +1351,8 @@ channel_message_table[CHANNELMSG_COUNT] = {
{ CHANNELMSG_19, 0, NULL },
{ CHANNELMSG_20, 0, NULL },
{ CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL },
+ { CHANNELMSG_22, 0, NULL },
+ { CHANNELMSG_TL_CONNECT_RESULT, 0, NULL },
};
/*
@@ -1362,25 +1364,16 @@ void vmbus_onmessage(void *context)
{
struct hv_message *msg = context;
struct vmbus_channel_message_header *hdr;
- int size;
hdr = (struct vmbus_channel_message_header *)msg->u.payload;
- size = msg->header.payload_size;
trace_vmbus_on_message(hdr);
- if (hdr->msgtype >= CHANNELMSG_COUNT) {
- pr_err("Received invalid channel message type %d size %d\n",
- hdr->msgtype, size);
- print_hex_dump_bytes("", DUMP_PREFIX_NONE,
- (unsigned char *)msg->u.payload, size);
- return;
- }
-
- if (channel_message_table[hdr->msgtype].message_handler)
- channel_message_table[hdr->msgtype].message_handler(hdr);
- else
- pr_err("Unhandled channel message type %d\n", hdr->msgtype);
+ /*
+ * vmbus_on_msg_dpc() makes sure the hdr->msgtype here can not go
+ * out of bound and the message_handler pointer can not be NULL.
+ */
+ channel_message_table[hdr->msgtype].message_handler(hdr);
}
/*
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index b155d0052981..a02ce43d778d 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -1217,10 +1217,7 @@ static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
unsigned int i, j;
struct page *pg;
- if (num_pages < alloc_unit)
- return 0;
-
- for (i = 0; (i * alloc_unit) < num_pages; i++) {
+ for (i = 0; i < num_pages / alloc_unit; i++) {
if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
HV_HYP_PAGE_SIZE)
return i * alloc_unit;
@@ -1258,7 +1255,7 @@ static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
}
- return num_pages;
+ return i * alloc_unit;
}
static void balloon_up(struct work_struct *dummy)
@@ -1273,9 +1270,6 @@ static void balloon_up(struct work_struct *dummy)
long avail_pages;
unsigned long floor;
- /* The host balloons pages in 2M granularity. */
- WARN_ON_ONCE(num_pages % PAGES_IN_2M != 0);
-
/*
* We will attempt 2M allocations. However, if we fail to
* allocate 2M chunks, we will go back to PAGE_SIZE allocations.
@@ -1285,14 +1279,13 @@ static void balloon_up(struct work_struct *dummy)
avail_pages = si_mem_available();
floor = compute_balloon_floor();
- /* Refuse to balloon below the floor, keep the 2M granularity. */
+ /* Refuse to balloon below the floor. */
if (avail_pages < num_pages || avail_pages - num_pages < floor) {
pr_warn("Balloon request will be partially fulfilled. %s\n",
avail_pages < num_pages ? "Not enough memory." :
"Balloon floor reached.");
num_pages = avail_pages > floor ? (avail_pages - floor) : 0;
- num_pages -= num_pages % PAGES_IN_2M;
}
while (!done) {
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index 08fa4a5de644..bb9ba3f7c794 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -346,9 +346,61 @@ int hv_fcopy_init(struct hv_util_service *srv)
return 0;
}
+static void hv_fcopy_cancel_work(void)
+{
+ cancel_delayed_work_sync(&fcopy_timeout_work);
+ cancel_work_sync(&fcopy_send_work);
+}
+
+int hv_fcopy_pre_suspend(void)
+{
+ struct vmbus_channel *channel = fcopy_transaction.recv_channel;
+ struct hv_fcopy_hdr *fcopy_msg;
+
+ /*
+ * Fake a CANCEL_FCOPY message for the user space daemon in case the
+ * daemon is in the middle of copying some file. It doesn't matter if
+ * there is already a message pending to be delivered to the user
+ * space since we force fcopy_transaction.state to be HVUTIL_READY, so
+ * the user space daemon's write() will fail with EINVAL (see
+ * fcopy_on_msg()), and the daemon will reset the device by closing
+ * and re-opening it.
+ */
+ fcopy_msg = kzalloc(sizeof(*fcopy_msg), GFP_KERNEL);
+ if (!fcopy_msg)
+ return -ENOMEM;
+
+ tasklet_disable(&channel->callback_event);
+
+ fcopy_msg->operation = CANCEL_FCOPY;
+
+ hv_fcopy_cancel_work();
+
+ /* We don't care about the return value. */
+ hvutil_transport_send(hvt, fcopy_msg, sizeof(*fcopy_msg), NULL);
+
+ kfree(fcopy_msg);
+
+ fcopy_transaction.state = HVUTIL_READY;
+
+ /* tasklet_enable() will be called in hv_fcopy_pre_resume(). */
+ return 0;
+}
+
+int hv_fcopy_pre_resume(void)
+{
+ struct vmbus_channel *channel = fcopy_transaction.recv_channel;
+
+ tasklet_enable(&channel->callback_event);
+
+ return 0;
+}
+
void hv_fcopy_deinit(void)
{
fcopy_transaction.state = HVUTIL_DEVICE_DYING;
- cancel_delayed_work_sync(&fcopy_timeout_work);
+
+ hv_fcopy_cancel_work();
+
hvutil_transport_destroy(hvt);
}
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index ae7c028dc5a8..e74b144b8f3d 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -758,11 +758,50 @@ hv_kvp_init(struct hv_util_service *srv)
return 0;
}
-void hv_kvp_deinit(void)
+static void hv_kvp_cancel_work(void)
{
- kvp_transaction.state = HVUTIL_DEVICE_DYING;
cancel_delayed_work_sync(&kvp_host_handshake_work);
cancel_delayed_work_sync(&kvp_timeout_work);
cancel_work_sync(&kvp_sendkey_work);
+}
+
+int hv_kvp_pre_suspend(void)
+{
+ struct vmbus_channel *channel = kvp_transaction.recv_channel;
+
+ tasklet_disable(&channel->callback_event);
+
+ /*
+ * If there is a pending transtion, it's unnecessary to tell the host
+ * that the transaction will fail, because that is implied when
+ * util_suspend() calls vmbus_close() later.
+ */
+ hv_kvp_cancel_work();
+
+ /*
+ * Forece the state to READY to handle the ICMSGTYPE_NEGOTIATE message
+ * later. The user space daemon may go out of order and its write()
+ * may fail with EINVAL: this doesn't matter since the daemon will
+ * reset the device by closing and re-opening it.
+ */
+ kvp_transaction.state = HVUTIL_READY;
+ return 0;
+}
+
+int hv_kvp_pre_resume(void)
+{
+ struct vmbus_channel *channel = kvp_transaction.recv_channel;
+
+ tasklet_enable(&channel->callback_event);
+
+ return 0;
+}
+
+void hv_kvp_deinit(void)
+{
+ kvp_transaction.state = HVUTIL_DEVICE_DYING;
+
+ hv_kvp_cancel_work();
+
hvutil_transport_destroy(hvt);
}
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index 03b6454268b3..1c75b38f0d6d 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -379,10 +379,61 @@ hv_vss_init(struct hv_util_service *srv)
return 0;
}
-void hv_vss_deinit(void)
+static void hv_vss_cancel_work(void)
{
- vss_transaction.state = HVUTIL_DEVICE_DYING;
cancel_delayed_work_sync(&vss_timeout_work);
cancel_work_sync(&vss_handle_request_work);
+}
+
+int hv_vss_pre_suspend(void)
+{
+ struct vmbus_channel *channel = vss_transaction.recv_channel;
+ struct hv_vss_msg *vss_msg;
+
+ /*
+ * Fake a THAW message for the user space daemon in case the daemon
+ * has frozen the file systems. It doesn't matter if there is already
+ * a message pending to be delivered to the user space since we force
+ * vss_transaction.state to be HVUTIL_READY, so the user space daemon's
+ * write() will fail with EINVAL (see vss_on_msg()), and the daemon
+ * will reset the device by closing and re-opening it.
+ */
+ vss_msg = kzalloc(sizeof(*vss_msg), GFP_KERNEL);
+ if (!vss_msg)
+ return -ENOMEM;
+
+ tasklet_disable(&channel->callback_event);
+
+ vss_msg->vss_hdr.operation = VSS_OP_THAW;
+
+ /* Cancel any possible pending work. */
+ hv_vss_cancel_work();
+
+ /* We don't care about the return value. */
+ hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg), NULL);
+
+ kfree(vss_msg);
+
+ vss_transaction.state = HVUTIL_READY;
+
+ /* tasklet_enable() will be called in hv_vss_pre_resume(). */
+ return 0;
+}
+
+int hv_vss_pre_resume(void)
+{
+ struct vmbus_channel *channel = vss_transaction.recv_channel;
+
+ tasklet_enable(&channel->callback_event);
+
+ return 0;
+}
+
+void hv_vss_deinit(void)
+{
+ vss_transaction.state = HVUTIL_DEVICE_DYING;
+
+ hv_vss_cancel_work();
+
hvutil_transport_destroy(hvt);
}
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 766bd8457346..92ee0fe4c919 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -24,6 +24,10 @@
#define SD_MAJOR 3
#define SD_MINOR 0
+#define SD_MINOR_1 1
+#define SD_MINOR_2 2
+#define SD_VERSION_3_1 (SD_MAJOR << 16 | SD_MINOR_1)
+#define SD_VERSION_3_2 (SD_MAJOR << 16 | SD_MINOR_2)
#define SD_VERSION (SD_MAJOR << 16 | SD_MINOR)
#define SD_MAJOR_1 1
@@ -50,8 +54,10 @@ static int sd_srv_version;
static int ts_srv_version;
static int hb_srv_version;
-#define SD_VER_COUNT 2
+#define SD_VER_COUNT 4
static const int sd_versions[] = {
+ SD_VERSION_3_2,
+ SD_VERSION_3_1,
SD_VERSION,
SD_VERSION_1
};
@@ -75,18 +81,56 @@ static const int fw_versions[] = {
UTIL_WS2K8_FW_VERSION
};
+/*
+ * Send the "hibernate" udev event in a thread context.
+ */
+struct hibernate_work_context {
+ struct work_struct work;
+ struct hv_device *dev;
+};
+
+static struct hibernate_work_context hibernate_context;
+static bool hibernation_supported;
+
+static void send_hibernate_uevent(struct work_struct *work)
+{
+ char *uevent_env[2] = { "EVENT=hibernate", NULL };
+ struct hibernate_work_context *ctx;
+
+ ctx = container_of(work, struct hibernate_work_context, work);
+
+ kobject_uevent_env(&ctx->dev->device.kobj, KOBJ_CHANGE, uevent_env);
+
+ pr_info("Sent hibernation uevent\n");
+}
+
+static int hv_shutdown_init(struct hv_util_service *srv)
+{
+ struct vmbus_channel *channel = srv->channel;
+
+ INIT_WORK(&hibernate_context.work, send_hibernate_uevent);
+ hibernate_context.dev = channel->device_obj;
+
+ hibernation_supported = hv_is_hibernation_supported();
+
+ return 0;
+}
+
static void shutdown_onchannelcallback(void *context);
static struct hv_util_service util_shutdown = {
.util_cb = shutdown_onchannelcallback,
+ .util_init = hv_shutdown_init,
};
static int hv_timesync_init(struct hv_util_service *srv);
+static int hv_timesync_pre_suspend(void);
static void hv_timesync_deinit(void);
static void timesync_onchannelcallback(void *context);
static struct hv_util_service util_timesynch = {
.util_cb = timesync_onchannelcallback,
.util_init = hv_timesync_init,
+ .util_pre_suspend = hv_timesync_pre_suspend,
.util_deinit = hv_timesync_deinit,
};
@@ -98,18 +142,24 @@ static struct hv_util_service util_heartbeat = {
static struct hv_util_service util_kvp = {
.util_cb = hv_kvp_onchannelcallback,
.util_init = hv_kvp_init,
+ .util_pre_suspend = hv_kvp_pre_suspend,
+ .util_pre_resume = hv_kvp_pre_resume,
.util_deinit = hv_kvp_deinit,
};
static struct hv_util_service util_vss = {
.util_cb = hv_vss_onchannelcallback,
.util_init = hv_vss_init,
+ .util_pre_suspend = hv_vss_pre_suspend,
+ .util_pre_resume = hv_vss_pre_resume,
.util_deinit = hv_vss_deinit,
};
static struct hv_util_service util_fcopy = {
.util_cb = hv_fcopy_onchannelcallback,
.util_init = hv_fcopy_init,
+ .util_pre_suspend = hv_fcopy_pre_suspend,
+ .util_pre_resume = hv_fcopy_pre_resume,
.util_deinit = hv_fcopy_deinit,
};
@@ -118,17 +168,27 @@ static void perform_shutdown(struct work_struct *dummy)
orderly_poweroff(true);
}
+static void perform_restart(struct work_struct *dummy)
+{
+ orderly_reboot();
+}
+
/*
* Perform the shutdown operation in a thread context.
*/
static DECLARE_WORK(shutdown_work, perform_shutdown);
+/*
+ * Perform the restart operation in a thread context.
+ */
+static DECLARE_WORK(restart_work, perform_restart);
+
static void shutdown_onchannelcallback(void *context)
{
struct vmbus_channel *channel = context;
+ struct work_struct *work = NULL;
u32 recvlen;
u64 requestid;
- bool execute_shutdown = false;
u8 *shut_txf_buf = util_shutdown.recv_buffer;
struct shutdown_msg_data *shutdown_msg;
@@ -157,19 +217,37 @@ static void shutdown_onchannelcallback(void *context)
sizeof(struct vmbuspipe_hdr) +
sizeof(struct icmsg_hdr)];
+ /*
+ * shutdown_msg->flags can be 0(shut down), 2(reboot),
+ * or 4(hibernate). It may bitwise-OR 1, which means
+ * performing the request by force. Linux always tries
+ * to perform the request by force.
+ */
switch (shutdown_msg->flags) {
case 0:
case 1:
icmsghdrp->status = HV_S_OK;
- execute_shutdown = true;
-
+ work = &shutdown_work;
pr_info("Shutdown request received -"
" graceful shutdown initiated\n");
break;
+ case 2:
+ case 3:
+ icmsghdrp->status = HV_S_OK;
+ work = &restart_work;
+ pr_info("Restart request received -"
+ " graceful restart initiated\n");
+ break;
+ case 4:
+ case 5:
+ pr_info("Hibernation request received\n");
+ icmsghdrp->status = hibernation_supported ?
+ HV_S_OK : HV_E_FAIL;
+ if (hibernation_supported)
+ work = &hibernate_context.work;
+ break;
default:
icmsghdrp->status = HV_E_FAIL;
- execute_shutdown = false;
-
pr_info("Shutdown request received -"
" Invalid request\n");
break;
@@ -184,8 +262,8 @@ static void shutdown_onchannelcallback(void *context)
VM_PKT_DATA_INBAND, 0);
}
- if (execute_shutdown == true)
- schedule_work(&shutdown_work);
+ if (work)
+ schedule_work(work);
}
/*
@@ -211,7 +289,7 @@ static struct timespec64 hv_get_adj_host_time(void)
unsigned long flags;
spin_lock_irqsave(&host_ts.lock, flags);
- reftime = hyperv_cs->read(hyperv_cs);
+ reftime = hv_read_reference_counter();
newtime = host_ts.host_time + (reftime - host_ts.ref_time);
ts = ns_to_timespec64((newtime - WLTIMEDELTA) * 100);
spin_unlock_irqrestore(&host_ts.lock, flags);
@@ -250,7 +328,7 @@ static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 adj_flags)
*/
spin_lock_irqsave(&host_ts.lock, flags);
- cur_reftime = hyperv_cs->read(hyperv_cs);
+ cur_reftime = hv_read_reference_counter();
host_ts.host_time = hosttime;
host_ts.ref_time = cur_reftime;
@@ -315,7 +393,7 @@ static void timesync_onchannelcallback(void *context)
sizeof(struct vmbuspipe_hdr) +
sizeof(struct icmsg_hdr)];
adj_guesttime(timedatap->parenttime,
- hyperv_cs->read(hyperv_cs),
+ hv_read_reference_counter(),
timedatap->flags);
}
}
@@ -441,6 +519,44 @@ static int util_remove(struct hv_device *dev)
return 0;
}
+/*
+ * When we're in util_suspend(), all the userspace processes have been frozen
+ * (refer to hibernate() -> freeze_processes()). The userspace is thawed only
+ * after the whole resume procedure, including util_resume(), finishes.
+ */
+static int util_suspend(struct hv_device *dev)
+{
+ struct hv_util_service *srv = hv_get_drvdata(dev);
+ int ret = 0;
+
+ if (srv->util_pre_suspend) {
+ ret = srv->util_pre_suspend();
+ if (ret)
+ return ret;
+ }
+
+ vmbus_close(dev->channel);
+
+ return 0;
+}
+
+static int util_resume(struct hv_device *dev)
+{
+ struct hv_util_service *srv = hv_get_drvdata(dev);
+ int ret = 0;
+
+ if (srv->util_pre_resume) {
+ ret = srv->util_pre_resume();
+ if (ret)
+ return ret;
+ }
+
+ ret = vmbus_open(dev->channel, 4 * HV_HYP_PAGE_SIZE,
+ 4 * HV_HYP_PAGE_SIZE, NULL, 0, srv->util_cb,
+ dev->channel);
+ return ret;
+}
+
static const struct hv_vmbus_device_id id_table[] = {
/* Shutdown guid */
{ HV_SHUTDOWN_GUID,
@@ -477,6 +593,8 @@ static struct hv_driver util_drv = {
.id_table = id_table,
.probe = util_probe,
.remove = util_remove,
+ .suspend = util_suspend,
+ .resume = util_resume,
.driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
@@ -524,7 +642,7 @@ static struct ptp_clock *hv_ptp_clock;
static int hv_timesync_init(struct hv_util_service *srv)
{
/* TimeSync requires Hyper-V clocksource. */
- if (!hyperv_cs)
+ if (!hv_read_reference_counter)
return -ENODEV;
spin_lock_init(&host_ts.lock);
@@ -546,11 +664,23 @@ static int hv_timesync_init(struct hv_util_service *srv)
return 0;
}
+static void hv_timesync_cancel_work(void)
+{
+ cancel_work_sync(&adj_time_work);
+}
+
+static int hv_timesync_pre_suspend(void)
+{
+ hv_timesync_cancel_work();
+ return 0;
+}
+
static void hv_timesync_deinit(void)
{
if (hv_ptp_clock)
ptp_clock_unregister(hv_ptp_clock);
- cancel_work_sync(&adj_time_work);
+
+ hv_timesync_cancel_work();
}
static int __init init_hyperv_utils(void)
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 20edcfd3b96c..f5fa3b3c9baf 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -352,14 +352,20 @@ void vmbus_on_msg_dpc(unsigned long data);
int hv_kvp_init(struct hv_util_service *srv);
void hv_kvp_deinit(void);
+int hv_kvp_pre_suspend(void);
+int hv_kvp_pre_resume(void);
void hv_kvp_onchannelcallback(void *context);
int hv_vss_init(struct hv_util_service *srv);
void hv_vss_deinit(void);
+int hv_vss_pre_suspend(void);
+int hv_vss_pre_resume(void);
void hv_vss_onchannelcallback(void *context);
int hv_fcopy_init(struct hv_util_service *srv);
void hv_fcopy_deinit(void);
+int hv_fcopy_pre_suspend(void);
+int hv_fcopy_pre_resume(void);
void hv_fcopy_onchannelcallback(void *context);
void vmbus_initiate_unload(bool crash);
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 4ef5a66df680..029378c27421 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1033,6 +1033,10 @@ void vmbus_on_msg_dpc(unsigned long data)
}
entry = &channel_message_table[hdr->msgtype];
+
+ if (!entry->message_handler)
+ goto msg_handled;
+
if (entry->handler_type == VMHT_BLOCKING) {
ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
if (ctx == NULL)
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 23dfe848979a..47ac20aee06f 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -164,6 +164,16 @@ config SENSORS_ADM1031
This driver can also be built as a module. If so, the module
will be called adm1031.
+config SENSORS_ADM1177
+ tristate "Analog Devices ADM1177 and compatibles"
+ depends on I2C
+ help
+ If you say yes here you get support for Analog Devices ADM1177
+ sensor chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called adm1177.
+
config SENSORS_ADM9240
tristate "Analog Devices ADM9240 and compatibles"
depends on I2C
@@ -385,6 +395,16 @@ config SENSORS_ATXP1
This driver can also be built as a module. If so, the module
will be called atxp1.
+config SENSORS_DRIVETEMP
+ tristate "Hard disk drives with temperature sensors"
+ depends on SCSI && ATA
+ help
+ If you say yes you get support for the temperature sensor on
+ hard disk drives.
+
+ This driver can also be built as a module. If so, the module
+ will be called satatemp.
+
config SENSORS_DS620
tristate "Dallas Semiconductor DS620"
depends on I2C
@@ -889,7 +909,7 @@ config SENSORS_MAX197
will be called max197.
config SENSORS_MAX31722
-tristate "MAX31722 temperature sensor"
+ tristate "MAX31722 temperature sensor"
depends on SPI
help
Support for the Maxim Integrated MAX31722/MAX31723 digital
@@ -898,6 +918,16 @@ tristate "MAX31722 temperature sensor"
This driver can also be built as a module. If so, the module
will be called max31722.
+config SENSORS_MAX31730
+ tristate "MAX31730 temperature sensor"
+ depends on I2C
+ help
+ Support for the Maxim Integrated MAX31730 3-Channel Remote
+ Temperature Sensor.
+
+ This driver can also be built as a module. If so, the module
+ will be called max31730.
+
config SENSORS_MAX6621
tristate "Maxim MAX6621 sensor chip"
depends on I2C
@@ -1905,7 +1935,7 @@ config SENSORS_W83627HF
will be called w83627hf.
config SENSORS_W83627EHF
- tristate "Winbond W83627EHF/EHG/DHG/UHG, W83667HG, NCT6775F, NCT6776F"
+ tristate "Winbond W83627EHF/EHG/DHG/UHG, W83667HG"
depends on !PPC
select HWMON_VID
help
@@ -1918,8 +1948,7 @@ config SENSORS_W83627EHF
the Core 2 Duo. And also the W83627UHG, which is a stripped down
version of the W83627DHG (as far as hardware monitoring goes.)
- This driver also supports Nuvoton W83667HG, W83667HG-B, NCT6775F
- (also known as W83667HG-I), and NCT6776F.
+ This driver also supports Nuvoton W83667HG and W83667HG-B.
This driver can also be built as a module. If so, the module
will be called w83627ehf.
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 6db5db9cdc29..613f50987965 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_SENSORS_ADM1025) += adm1025.o
obj-$(CONFIG_SENSORS_ADM1026) += adm1026.o
obj-$(CONFIG_SENSORS_ADM1029) += adm1029.o
obj-$(CONFIG_SENSORS_ADM1031) += adm1031.o
+obj-$(CONFIG_SENSORS_ADM1177) += adm1177.o
obj-$(CONFIG_SENSORS_ADM9240) += adm9240.o
obj-$(CONFIG_SENSORS_ADS7828) += ads7828.o
obj-$(CONFIG_SENSORS_ADS7871) += ads7871.o
@@ -56,6 +57,7 @@ obj-$(CONFIG_SENSORS_DA9052_ADC)+= da9052-hwmon.o
obj-$(CONFIG_SENSORS_DA9055)+= da9055-hwmon.o
obj-$(CONFIG_SENSORS_DELL_SMM) += dell-smm-hwmon.o
obj-$(CONFIG_SENSORS_DME1737) += dme1737.o
+obj-$(CONFIG_SENSORS_DRIVETEMP) += drivetemp.o
obj-$(CONFIG_SENSORS_DS620) += ds620.o
obj-$(CONFIG_SENSORS_DS1621) += ds1621.o
obj-$(CONFIG_SENSORS_EMC1403) += emc1403.o
@@ -123,6 +125,7 @@ obj-$(CONFIG_SENSORS_MAX1619) += max1619.o
obj-$(CONFIG_SENSORS_MAX1668) += max1668.o
obj-$(CONFIG_SENSORS_MAX197) += max197.o
obj-$(CONFIG_SENSORS_MAX31722) += max31722.o
+obj-$(CONFIG_SENSORS_MAX31730) += max31730.o
obj-$(CONFIG_SENSORS_MAX6621) += max6621.o
obj-$(CONFIG_SENSORS_MAX6639) += max6639.o
obj-$(CONFIG_SENSORS_MAX6642) += max6642.o
diff --git a/drivers/hwmon/adm1177.c b/drivers/hwmon/adm1177.c
new file mode 100644
index 000000000000..d314223a404a
--- /dev/null
+++ b/drivers/hwmon/adm1177.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ADM1177 Hot Swap Controller and Digital Power Monitor with Soft Start Pin
+ *
+ * Copyright 2015-2019 Analog Devices Inc.
+ */
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+
+/* Command Byte Operations */
+#define ADM1177_CMD_V_CONT BIT(0)
+#define ADM1177_CMD_I_CONT BIT(2)
+#define ADM1177_CMD_VRANGE BIT(4)
+
+/* Extended Register */
+#define ADM1177_REG_ALERT_TH 2
+
+#define ADM1177_BITS 12
+
+/**
+ * struct adm1177_state - driver instance specific data
+ * @client pointer to i2c client
+ * @reg regulator info for the the power supply of the device
+ * @r_sense_uohm current sense resistor value
+ * @alert_threshold_ua current limit for shutdown
+ * @vrange_high internal voltage divider
+ */
+struct adm1177_state {
+ struct i2c_client *client;
+ struct regulator *reg;
+ u32 r_sense_uohm;
+ u32 alert_threshold_ua;
+ bool vrange_high;
+};
+
+static int adm1177_read_raw(struct adm1177_state *st, u8 num, u8 *data)
+{
+ return i2c_master_recv(st->client, data, num);
+}
+
+static int adm1177_write_cmd(struct adm1177_state *st, u8 cmd)
+{
+ return i2c_smbus_write_byte(st->client, cmd);
+}
+
+static int adm1177_write_alert_thr(struct adm1177_state *st,
+ u32 alert_threshold_ua)
+{
+ u64 val;
+ int ret;
+
+ val = 0xFFULL * alert_threshold_ua * st->r_sense_uohm;
+ val = div_u64(val, 105840000U);
+ val = div_u64(val, 1000U);
+ if (val > 0xFF)
+ val = 0xFF;
+
+ ret = i2c_smbus_write_byte_data(st->client, ADM1177_REG_ALERT_TH,
+ val);
+ if (ret)
+ return ret;
+
+ st->alert_threshold_ua = alert_threshold_ua;
+ return 0;
+}
+
+static int adm1177_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct adm1177_state *st = dev_get_drvdata(dev);
+ u8 data[3];
+ long dummy;
+ int ret;
+
+ switch (type) {
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ ret = adm1177_read_raw(st, 3, data);
+ if (ret < 0)
+ return ret;
+ dummy = (data[1] << 4) | (data[2] & 0xF);
+ /*
+ * convert to milliamperes
+ * ((105.84mV / 4096) x raw) / senseResistor(ohm)
+ */
+ *val = div_u64((105840000ull * dummy),
+ 4096 * st->r_sense_uohm);
+ return 0;
+ case hwmon_curr_max_alarm:
+ *val = st->alert_threshold_ua;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ case hwmon_in:
+ ret = adm1177_read_raw(st, 3, data);
+ if (ret < 0)
+ return ret;
+ dummy = (data[0] << 4) | (data[2] >> 4);
+ /*
+ * convert to millivolts based on resistor devision
+ * (V_fullscale / 4096) * raw
+ */
+ if (st->vrange_high)
+ dummy *= 26350;
+ else
+ dummy *= 6650;
+
+ *val = DIV_ROUND_CLOSEST(dummy, 4096);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int adm1177_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct adm1177_state *st = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_max_alarm:
+ adm1177_write_alert_thr(st, val);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t adm1177_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct adm1177_state *st = data;
+
+ switch (type) {
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ return 0444;
+ }
+ break;
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ if (st->r_sense_uohm)
+ return 0444;
+ return 0;
+ case hwmon_curr_max_alarm:
+ if (st->r_sense_uohm)
+ return 0644;
+ return 0;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const struct hwmon_channel_info *adm1177_info[] = {
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT | HWMON_C_MAX_ALARM),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT),
+ NULL
+};
+
+static const struct hwmon_ops adm1177_hwmon_ops = {
+ .is_visible = adm1177_is_visible,
+ .read = adm1177_read,
+ .write = adm1177_write,
+};
+
+static const struct hwmon_chip_info adm1177_chip_info = {
+ .ops = &adm1177_hwmon_ops,
+ .info = adm1177_info,
+};
+
+static void adm1177_remove(void *data)
+{
+ struct adm1177_state *st = data;
+
+ regulator_disable(st->reg);
+}
+
+static int adm1177_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
+ struct adm1177_state *st;
+ u32 alert_threshold_ua;
+ int ret;
+
+ st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ st->client = client;
+
+ st->reg = devm_regulator_get_optional(&client->dev, "vref");
+ if (IS_ERR(st->reg)) {
+ if (PTR_ERR(st->reg) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ st->reg = NULL;
+ } else {
+ ret = regulator_enable(st->reg);
+ if (ret)
+ return ret;
+ ret = devm_add_action_or_reset(&client->dev, adm1177_remove,
+ st);
+ if (ret)
+ return ret;
+ }
+
+ if (device_property_read_u32(dev, "shunt-resistor-micro-ohms",
+ &st->r_sense_uohm))
+ st->r_sense_uohm = 0;
+ if (device_property_read_u32(dev, "adi,shutdown-threshold-microamp",
+ &alert_threshold_ua)) {
+ if (st->r_sense_uohm)
+ /*
+ * set maximum default value from datasheet based on
+ * shunt-resistor
+ */
+ alert_threshold_ua = div_u64(105840000000,
+ st->r_sense_uohm);
+ else
+ alert_threshold_ua = 0;
+ }
+ st->vrange_high = device_property_read_bool(dev,
+ "adi,vrange-high-enable");
+ if (alert_threshold_ua && st->r_sense_uohm)
+ adm1177_write_alert_thr(st, alert_threshold_ua);
+
+ ret = adm1177_write_cmd(st, ADM1177_CMD_V_CONT |
+ ADM1177_CMD_I_CONT |
+ (st->vrange_high ? 0 : ADM1177_CMD_VRANGE));
+ if (ret)
+ return ret;
+
+ hwmon_dev =
+ devm_hwmon_device_register_with_info(dev, client->name, st,
+ &adm1177_chip_info, NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id adm1177_id[] = {
+ {"adm1177", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, adm1177_id);
+
+static const struct of_device_id adm1177_dt_ids[] = {
+ { .compatible = "adi,adm1177" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, adm1177_dt_ids);
+
+static struct i2c_driver adm1177_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "adm1177",
+ .of_match_table = adm1177_dt_ids,
+ },
+ .probe = adm1177_probe,
+ .id_table = adm1177_id,
+};
+module_i2c_driver(adm1177_driver);
+
+MODULE_AUTHOR("Beniamin Bia <beniamin.bia@analog.com>");
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
+MODULE_DESCRIPTION("Analog Devices ADM1177 ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 6c64d50c9aae..01c2eeb02aa9 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -294,9 +294,10 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn)
long reg;
if (bypass_attn & (1 << channel))
- reg = (volt * 1024) / 2250;
+ reg = DIV_ROUND_CLOSEST(volt * 1024, 2250);
else
- reg = (volt * r[1] * 1024) / ((r[0] + r[1]) * 2250);
+ reg = DIV_ROUND_CLOSEST(volt * r[1] * 1024,
+ (r[0] + r[1]) * 2250);
return clamp_val(reg, 0, 1023) & (0xff << 2);
}
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 17583bf8c2dc..d4c83009d625 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -595,19 +595,18 @@ static int i8k_open_fs(struct inode *inode, struct file *file)
return single_open(file, i8k_proc_show, NULL);
}
-static const struct file_operations i8k_fops = {
- .owner = THIS_MODULE,
- .open = i8k_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .unlocked_ioctl = i8k_ioctl,
+static const struct proc_ops i8k_proc_ops = {
+ .proc_open = i8k_open_fs,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_ioctl = i8k_ioctl,
};
static void __init i8k_init_procfs(void)
{
/* Register the proc entry */
- proc_create("i8k", 0, NULL, &i8k_fops);
+ proc_create("i8k", 0, NULL, &i8k_proc_ops);
}
static void __exit i8k_exit_procfs(void)
diff --git a/drivers/hwmon/drivetemp.c b/drivers/hwmon/drivetemp.c
new file mode 100644
index 000000000000..370d0c74eb01
--- /dev/null
+++ b/drivers/hwmon/drivetemp.c
@@ -0,0 +1,574 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hwmon client for disk and solid state drives with temperature sensors
+ * Copyright (C) 2019 Zodiac Inflight Innovations
+ *
+ * With input from:
+ * Hwmon client for S.M.A.R.T. hard disk drives with temperature sensors.
+ * (C) 2018 Linus Walleij
+ *
+ * hwmon: Driver for SCSI/ATA temperature sensors
+ * by Constantin Baranov <const@mimas.ru>, submitted September 2009
+ *
+ * This drive supports reporting the temperatire of SATA drives. It can be
+ * easily extended to report the temperature of SCSI drives.
+ *
+ * The primary means to read drive temperatures and temperature limits
+ * for ATA drives is the SCT Command Transport feature set as specified in
+ * ATA8-ACS.
+ * It can be used to read the current drive temperature, temperature limits,
+ * and historic minimum and maximum temperatures. The SCT Command Transport
+ * feature set is documented in "AT Attachment 8 - ATA/ATAPI Command Set
+ * (ATA8-ACS)".
+ *
+ * If the SCT Command Transport feature set is not available, drive temperatures
+ * may be readable through SMART attributes. Since SMART attributes are not well
+ * defined, this method is only used as fallback mechanism.
+ *
+ * There are three SMART attributes which may report drive temperatures.
+ * Those are defined as follows (from
+ * http://www.cropel.com/library/smart-attribute-list.aspx).
+ *
+ * 190 Temperature Temperature, monitored by a sensor somewhere inside
+ * the drive. Raw value typicaly holds the actual
+ * temperature (hexadecimal) in its rightmost two digits.
+ *
+ * 194 Temperature Temperature, monitored by a sensor somewhere inside
+ * the drive. Raw value typicaly holds the actual
+ * temperature (hexadecimal) in its rightmost two digits.
+ *
+ * 231 Temperature Temperature, monitored by a sensor somewhere inside
+ * the drive. Raw value typicaly holds the actual
+ * temperature (hexadecimal) in its rightmost two digits.
+ *
+ * Wikipedia defines attributes a bit differently.
+ *
+ * 190 Temperature Value is equal to (100-temp. °C), allowing manufacturer
+ * Difference or to set a minimum threshold which corresponds to a
+ * Airflow maximum temperature. This also follows the convention of
+ * Temperature 100 being a best-case value and lower values being
+ * undesirable. However, some older drives may instead
+ * report raw Temperature (identical to 0xC2) or
+ * Temperature minus 50 here.
+ * 194 Temperature or Indicates the device temperature, if the appropriate
+ * Temperature sensor is fitted. Lowest byte of the raw value contains
+ * Celsius the exact temperature value (Celsius degrees).
+ * 231 Life Left Indicates the approximate SSD life left, in terms of
+ * (SSDs) or program/erase cycles or available reserved blocks.
+ * Temperature A normalized value of 100 represents a new drive, with
+ * a threshold value at 10 indicating a need for
+ * replacement. A value of 0 may mean that the drive is
+ * operating in read-only mode to allow data recovery.
+ * Previously (pre-2010) occasionally used for Drive
+ * Temperature (more typically reported at 0xC2).
+ *
+ * Common denominator is that the first raw byte reports the temperature
+ * in degrees C on almost all drives. Some drives may report a fractional
+ * temperature in the second raw byte.
+ *
+ * Known exceptions (from libatasmart):
+ * - SAMSUNG SV0412H and SAMSUNG SV1204H) report the temperature in 10th
+ * degrees C in the first two raw bytes.
+ * - A few Maxtor drives report an unknown or bad value in attribute 194.
+ * - Certain Apple SSD drives report an unknown value in attribute 190.
+ * Only certain firmware versions are affected.
+ *
+ * Those exceptions affect older ATA drives and are currently ignored.
+ * Also, the second raw byte (possibly reporting the fractional temperature)
+ * is currently ignored.
+ *
+ * Many drives also report temperature limits in additional SMART data raw
+ * bytes. The format of those is not well defined and varies widely.
+ * The driver does not currently attempt to report those limits.
+ *
+ * According to data in smartmontools, attribute 231 is rarely used to report
+ * drive temperatures. At the same time, several drives report SSD life left
+ * in attribute 231, but do not support temperature sensors. For this reason,
+ * attribute 231 is currently ignored.
+ *
+ * Following above definitions, temperatures are reported as follows.
+ * If SCT Command Transport is supported, it is used to read the
+ * temperature and, if available, temperature limits.
+ * - Otherwise, if SMART attribute 194 is supported, it is used to read
+ * the temperature.
+ * - Otherwise, if SMART attribute 190 is supported, it is used to read
+ * the temperature.
+ */
+
+#include <linux/ata.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/scsi_proto.h>
+
+struct drivetemp_data {
+ struct list_head list; /* list of instantiated devices */
+ struct mutex lock; /* protect data buffer accesses */
+ struct scsi_device *sdev; /* SCSI device */
+ struct device *dev; /* instantiating device */
+ struct device *hwdev; /* hardware monitoring device */
+ u8 smartdata[ATA_SECT_SIZE]; /* local buffer */
+ int (*get_temp)(struct drivetemp_data *st, u32 attr, long *val);
+ bool have_temp_lowest; /* lowest temp in SCT status */
+ bool have_temp_highest; /* highest temp in SCT status */
+ bool have_temp_min; /* have min temp */
+ bool have_temp_max; /* have max temp */
+ bool have_temp_lcrit; /* have lower critical limit */
+ bool have_temp_crit; /* have critical limit */
+ int temp_min; /* min temp */
+ int temp_max; /* max temp */
+ int temp_lcrit; /* lower critical limit */
+ int temp_crit; /* critical limit */
+};
+
+static LIST_HEAD(drivetemp_devlist);
+
+#define ATA_MAX_SMART_ATTRS 30
+#define SMART_TEMP_PROP_190 190
+#define SMART_TEMP_PROP_194 194
+
+#define SCT_STATUS_REQ_ADDR 0xe0
+#define SCT_STATUS_VERSION_LOW 0 /* log byte offsets */
+#define SCT_STATUS_VERSION_HIGH 1
+#define SCT_STATUS_TEMP 200
+#define SCT_STATUS_TEMP_LOWEST 201
+#define SCT_STATUS_TEMP_HIGHEST 202
+#define SCT_READ_LOG_ADDR 0xe1
+#define SMART_READ_LOG 0xd5
+#define SMART_WRITE_LOG 0xd6
+
+#define INVALID_TEMP 0x80
+
+#define temp_is_valid(temp) ((temp) != INVALID_TEMP)
+#define temp_from_sct(temp) (((s8)(temp)) * 1000)
+
+static inline bool ata_id_smart_supported(u16 *id)
+{
+ return id[ATA_ID_COMMAND_SET_1] & BIT(0);
+}
+
+static inline bool ata_id_smart_enabled(u16 *id)
+{
+ return id[ATA_ID_CFS_ENABLE_1] & BIT(0);
+}
+
+static int drivetemp_scsi_command(struct drivetemp_data *st,
+ u8 ata_command, u8 feature,
+ u8 lba_low, u8 lba_mid, u8 lba_high)
+{
+ u8 scsi_cmd[MAX_COMMAND_SIZE];
+ int data_dir;
+
+ memset(scsi_cmd, 0, sizeof(scsi_cmd));
+ scsi_cmd[0] = ATA_16;
+ if (ata_command == ATA_CMD_SMART && feature == SMART_WRITE_LOG) {
+ scsi_cmd[1] = (5 << 1); /* PIO Data-out */
+ /*
+ * No off.line or cc, write to dev, block count in sector count
+ * field.
+ */
+ scsi_cmd[2] = 0x06;
+ data_dir = DMA_TO_DEVICE;
+ } else {
+ scsi_cmd[1] = (4 << 1); /* PIO Data-in */
+ /*
+ * No off.line or cc, read from dev, block count in sector count
+ * field.
+ */
+ scsi_cmd[2] = 0x0e;
+ data_dir = DMA_FROM_DEVICE;
+ }
+ scsi_cmd[4] = feature;
+ scsi_cmd[6] = 1; /* 1 sector */
+ scsi_cmd[8] = lba_low;
+ scsi_cmd[10] = lba_mid;
+ scsi_cmd[12] = lba_high;
+ scsi_cmd[14] = ata_command;
+
+ return scsi_execute_req(st->sdev, scsi_cmd, data_dir,
+ st->smartdata, ATA_SECT_SIZE, NULL, HZ, 5,
+ NULL);
+}
+
+static int drivetemp_ata_command(struct drivetemp_data *st, u8 feature,
+ u8 select)
+{
+ return drivetemp_scsi_command(st, ATA_CMD_SMART, feature, select,
+ ATA_SMART_LBAM_PASS, ATA_SMART_LBAH_PASS);
+}
+
+static int drivetemp_get_smarttemp(struct drivetemp_data *st, u32 attr,
+ long *temp)
+{
+ u8 *buf = st->smartdata;
+ bool have_temp = false;
+ u8 temp_raw;
+ u8 csum;
+ int err;
+ int i;
+
+ err = drivetemp_ata_command(st, ATA_SMART_READ_VALUES, 0);
+ if (err)
+ return err;
+
+ /* Checksum the read value table */
+ csum = 0;
+ for (i = 0; i < ATA_SECT_SIZE; i++)
+ csum += buf[i];
+ if (csum) {
+ dev_dbg(&st->sdev->sdev_gendev,
+ "checksum error reading SMART values\n");
+ return -EIO;
+ }
+
+ for (i = 0; i < ATA_MAX_SMART_ATTRS; i++) {
+ u8 *attr = buf + i * 12;
+ int id = attr[2];
+
+ if (!id)
+ continue;
+
+ if (id == SMART_TEMP_PROP_190) {
+ temp_raw = attr[7];
+ have_temp = true;
+ }
+ if (id == SMART_TEMP_PROP_194) {
+ temp_raw = attr[7];
+ have_temp = true;
+ break;
+ }
+ }
+
+ if (have_temp) {
+ *temp = temp_raw * 1000;
+ return 0;
+ }
+
+ return -ENXIO;
+}
+
+static int drivetemp_get_scttemp(struct drivetemp_data *st, u32 attr, long *val)
+{
+ u8 *buf = st->smartdata;
+ int err;
+
+ err = drivetemp_ata_command(st, SMART_READ_LOG, SCT_STATUS_REQ_ADDR);
+ if (err)
+ return err;
+ switch (attr) {
+ case hwmon_temp_input:
+ *val = temp_from_sct(buf[SCT_STATUS_TEMP]);
+ break;
+ case hwmon_temp_lowest:
+ *val = temp_from_sct(buf[SCT_STATUS_TEMP_LOWEST]);
+ break;
+ case hwmon_temp_highest:
+ *val = temp_from_sct(buf[SCT_STATUS_TEMP_HIGHEST]);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ return err;
+}
+
+static int drivetemp_identify_sata(struct drivetemp_data *st)
+{
+ struct scsi_device *sdev = st->sdev;
+ u8 *buf = st->smartdata;
+ struct scsi_vpd *vpd;
+ bool is_ata, is_sata;
+ bool have_sct_data_table;
+ bool have_sct_temp;
+ bool have_smart;
+ bool have_sct;
+ u16 *ata_id;
+ u16 version;
+ long temp;
+ int err;
+
+ /* SCSI-ATA Translation present? */
+ rcu_read_lock();
+ vpd = rcu_dereference(sdev->vpd_pg89);
+
+ /*
+ * Verify that ATA IDENTIFY DEVICE data is included in ATA Information
+ * VPD and that the drive implements the SATA protocol.
+ */
+ if (!vpd || vpd->len < 572 || vpd->data[56] != ATA_CMD_ID_ATA ||
+ vpd->data[36] != 0x34) {
+ rcu_read_unlock();
+ return -ENODEV;
+ }
+ ata_id = (u16 *)&vpd->data[60];
+ is_ata = ata_id_is_ata(ata_id);
+ is_sata = ata_id_is_sata(ata_id);
+ have_sct = ata_id_sct_supported(ata_id);
+ have_sct_data_table = ata_id_sct_data_tables(ata_id);
+ have_smart = ata_id_smart_supported(ata_id) &&
+ ata_id_smart_enabled(ata_id);
+
+ rcu_read_unlock();
+
+ /* bail out if this is not a SATA device */
+ if (!is_ata || !is_sata)
+ return -ENODEV;
+ if (!have_sct)
+ goto skip_sct;
+
+ err = drivetemp_ata_command(st, SMART_READ_LOG, SCT_STATUS_REQ_ADDR);
+ if (err)
+ goto skip_sct;
+
+ version = (buf[SCT_STATUS_VERSION_HIGH] << 8) |
+ buf[SCT_STATUS_VERSION_LOW];
+ if (version != 2 && version != 3)
+ goto skip_sct;
+
+ have_sct_temp = temp_is_valid(buf[SCT_STATUS_TEMP]);
+ if (!have_sct_temp)
+ goto skip_sct;
+
+ st->have_temp_lowest = temp_is_valid(buf[SCT_STATUS_TEMP_LOWEST]);
+ st->have_temp_highest = temp_is_valid(buf[SCT_STATUS_TEMP_HIGHEST]);
+
+ if (!have_sct_data_table)
+ goto skip_sct;
+
+ /* Request and read temperature history table */
+ memset(buf, '\0', sizeof(st->smartdata));
+ buf[0] = 5; /* data table command */
+ buf[2] = 1; /* read table */
+ buf[4] = 2; /* temperature history table */
+
+ err = drivetemp_ata_command(st, SMART_WRITE_LOG, SCT_STATUS_REQ_ADDR);
+ if (err)
+ goto skip_sct_data;
+
+ err = drivetemp_ata_command(st, SMART_READ_LOG, SCT_READ_LOG_ADDR);
+ if (err)
+ goto skip_sct_data;
+
+ /*
+ * Temperature limits per AT Attachment 8 -
+ * ATA/ATAPI Command Set (ATA8-ACS)
+ */
+ st->have_temp_max = temp_is_valid(buf[6]);
+ st->have_temp_crit = temp_is_valid(buf[7]);
+ st->have_temp_min = temp_is_valid(buf[8]);
+ st->have_temp_lcrit = temp_is_valid(buf[9]);
+
+ st->temp_max = temp_from_sct(buf[6]);
+ st->temp_crit = temp_from_sct(buf[7]);
+ st->temp_min = temp_from_sct(buf[8]);
+ st->temp_lcrit = temp_from_sct(buf[9]);
+
+skip_sct_data:
+ if (have_sct_temp) {
+ st->get_temp = drivetemp_get_scttemp;
+ return 0;
+ }
+skip_sct:
+ if (!have_smart)
+ return -ENODEV;
+ st->get_temp = drivetemp_get_smarttemp;
+ return drivetemp_get_smarttemp(st, hwmon_temp_input, &temp);
+}
+
+static int drivetemp_identify(struct drivetemp_data *st)
+{
+ struct scsi_device *sdev = st->sdev;
+
+ /* Bail out immediately if there is no inquiry data */
+ if (!sdev->inquiry || sdev->inquiry_len < 16)
+ return -ENODEV;
+
+ /* Disk device? */
+ if (sdev->type != TYPE_DISK && sdev->type != TYPE_ZBC)
+ return -ENODEV;
+
+ return drivetemp_identify_sata(st);
+}
+
+static int drivetemp_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct drivetemp_data *st = dev_get_drvdata(dev);
+ int err = 0;
+
+ if (type != hwmon_temp)
+ return -EINVAL;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_lowest:
+ case hwmon_temp_highest:
+ mutex_lock(&st->lock);
+ err = st->get_temp(st, attr, val);
+ mutex_unlock(&st->lock);
+ break;
+ case hwmon_temp_lcrit:
+ *val = st->temp_lcrit;
+ break;
+ case hwmon_temp_min:
+ *val = st->temp_min;
+ break;
+ case hwmon_temp_max:
+ *val = st->temp_max;
+ break;
+ case hwmon_temp_crit:
+ *val = st->temp_crit;
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ return err;
+}
+
+static umode_t drivetemp_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct drivetemp_data *st = data;
+
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ return 0444;
+ case hwmon_temp_lowest:
+ if (st->have_temp_lowest)
+ return 0444;
+ break;
+ case hwmon_temp_highest:
+ if (st->have_temp_highest)
+ return 0444;
+ break;
+ case hwmon_temp_min:
+ if (st->have_temp_min)
+ return 0444;
+ break;
+ case hwmon_temp_max:
+ if (st->have_temp_max)
+ return 0444;
+ break;
+ case hwmon_temp_lcrit:
+ if (st->have_temp_lcrit)
+ return 0444;
+ break;
+ case hwmon_temp_crit:
+ if (st->have_temp_crit)
+ return 0444;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const struct hwmon_channel_info *drivetemp_info[] = {
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT |
+ HWMON_T_LOWEST | HWMON_T_HIGHEST |
+ HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_LCRIT | HWMON_T_CRIT),
+ NULL
+};
+
+static const struct hwmon_ops drivetemp_ops = {
+ .is_visible = drivetemp_is_visible,
+ .read = drivetemp_read,
+};
+
+static const struct hwmon_chip_info drivetemp_chip_info = {
+ .ops = &drivetemp_ops,
+ .info = drivetemp_info,
+};
+
+/*
+ * The device argument points to sdev->sdev_dev. Its parent is
+ * sdev->sdev_gendev, which we can use to get the scsi_device pointer.
+ */
+static int drivetemp_add(struct device *dev, struct class_interface *intf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev->parent);
+ struct drivetemp_data *st;
+ int err;
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ st->sdev = sdev;
+ st->dev = dev;
+ mutex_init(&st->lock);
+
+ if (drivetemp_identify(st)) {
+ err = -ENODEV;
+ goto abort;
+ }
+
+ st->hwdev = hwmon_device_register_with_info(dev->parent, "drivetemp",
+ st, &drivetemp_chip_info,
+ NULL);
+ if (IS_ERR(st->hwdev)) {
+ err = PTR_ERR(st->hwdev);
+ goto abort;
+ }
+
+ list_add(&st->list, &drivetemp_devlist);
+ return 0;
+
+abort:
+ kfree(st);
+ return err;
+}
+
+static void drivetemp_remove(struct device *dev, struct class_interface *intf)
+{
+ struct drivetemp_data *st, *tmp;
+
+ list_for_each_entry_safe(st, tmp, &drivetemp_devlist, list) {
+ if (st->dev == dev) {
+ list_del(&st->list);
+ hwmon_device_unregister(st->hwdev);
+ kfree(st);
+ break;
+ }
+ }
+}
+
+static struct class_interface drivetemp_interface = {
+ .add_dev = drivetemp_add,
+ .remove_dev = drivetemp_remove,
+};
+
+static int __init drivetemp_init(void)
+{
+ return scsi_register_interface(&drivetemp_interface);
+}
+
+static void __exit drivetemp_exit(void)
+{
+ scsi_unregister_interface(&drivetemp_interface);
+}
+
+module_init(drivetemp_init);
+module_exit(drivetemp_exit);
+
+MODULE_AUTHOR("Guenter Roeck <linus@roeck-us.net>");
+MODULE_DESCRIPTION("Hard drive temperature monitor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 1f3b30b085b9..6a30fb453f7a 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -51,6 +51,7 @@ struct hwmon_device_attribute {
#define to_hwmon_attr(d) \
container_of(d, struct hwmon_device_attribute, dev_attr)
+#define to_dev_attr(a) container_of(a, struct device_attribute, attr)
/*
* Thermal zone information
@@ -58,7 +59,7 @@ struct hwmon_device_attribute {
* also provides the sensor index.
*/
struct hwmon_thermal_data {
- struct hwmon_device *hwdev; /* Reference to hwmon device */
+ struct device *dev; /* Reference to hwmon device */
int index; /* sensor index */
};
@@ -95,9 +96,27 @@ static const struct attribute_group *hwmon_dev_attr_groups[] = {
NULL
};
+static void hwmon_free_attrs(struct attribute **attrs)
+{
+ int i;
+
+ for (i = 0; attrs[i]; i++) {
+ struct device_attribute *dattr = to_dev_attr(attrs[i]);
+ struct hwmon_device_attribute *hattr = to_hwmon_attr(dattr);
+
+ kfree(hattr);
+ }
+ kfree(attrs);
+}
+
static void hwmon_dev_release(struct device *dev)
{
- kfree(to_hwmon_device(dev));
+ struct hwmon_device *hwdev = to_hwmon_device(dev);
+
+ if (hwdev->group.attrs)
+ hwmon_free_attrs(hwdev->group.attrs);
+ kfree(hwdev->groups);
+ kfree(hwdev);
}
static struct class hwmon_class = {
@@ -119,11 +138,11 @@ static DEFINE_IDA(hwmon_ida);
static int hwmon_thermal_get_temp(void *data, int *temp)
{
struct hwmon_thermal_data *tdata = data;
- struct hwmon_device *hwdev = tdata->hwdev;
+ struct hwmon_device *hwdev = to_hwmon_device(tdata->dev);
int ret;
long t;
- ret = hwdev->chip->ops->read(&hwdev->dev, hwmon_temp, hwmon_temp_input,
+ ret = hwdev->chip->ops->read(tdata->dev, hwmon_temp, hwmon_temp_input,
tdata->index, &t);
if (ret < 0)
return ret;
@@ -137,8 +156,7 @@ static const struct thermal_zone_of_device_ops hwmon_thermal_ops = {
.get_temp = hwmon_thermal_get_temp,
};
-static int hwmon_thermal_add_sensor(struct device *dev,
- struct hwmon_device *hwdev, int index)
+static int hwmon_thermal_add_sensor(struct device *dev, int index)
{
struct hwmon_thermal_data *tdata;
struct thermal_zone_device *tzd;
@@ -147,10 +165,10 @@ static int hwmon_thermal_add_sensor(struct device *dev,
if (!tdata)
return -ENOMEM;
- tdata->hwdev = hwdev;
+ tdata->dev = dev;
tdata->index = index;
- tzd = devm_thermal_zone_of_sensor_register(&hwdev->dev, index, tdata,
+ tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata,
&hwmon_thermal_ops);
/*
* If CONFIG_THERMAL_OF is disabled, this returns -ENODEV,
@@ -162,8 +180,7 @@ static int hwmon_thermal_add_sensor(struct device *dev,
return 0;
}
#else
-static int hwmon_thermal_add_sensor(struct device *dev,
- struct hwmon_device *hwdev, int index)
+static int hwmon_thermal_add_sensor(struct device *dev, int index)
{
return 0;
}
@@ -171,7 +188,7 @@ static int hwmon_thermal_add_sensor(struct device *dev,
static int hwmon_attr_base(enum hwmon_sensor_types type)
{
- if (type == hwmon_in)
+ if (type == hwmon_in || type == hwmon_intrusion)
return 0;
return 1;
}
@@ -250,8 +267,7 @@ static bool is_string_attr(enum hwmon_sensor_types type, u32 attr)
(type == hwmon_fan && attr == hwmon_fan_label);
}
-static struct attribute *hwmon_genattr(struct device *dev,
- const void *drvdata,
+static struct attribute *hwmon_genattr(const void *drvdata,
enum hwmon_sensor_types type,
u32 attr,
int index,
@@ -279,7 +295,7 @@ static struct attribute *hwmon_genattr(struct device *dev,
if ((mode & 0222) && !ops->write)
return ERR_PTR(-EINVAL);
- hattr = devm_kzalloc(dev, sizeof(*hattr), GFP_KERNEL);
+ hattr = kzalloc(sizeof(*hattr), GFP_KERNEL);
if (!hattr)
return ERR_PTR(-ENOMEM);
@@ -327,6 +343,7 @@ static const char * const hwmon_chip_attrs[] = {
};
static const char * const hwmon_temp_attr_templates[] = {
+ [hwmon_temp_enable] = "temp%d_enable",
[hwmon_temp_input] = "temp%d_input",
[hwmon_temp_type] = "temp%d_type",
[hwmon_temp_lcrit] = "temp%d_lcrit",
@@ -354,6 +371,7 @@ static const char * const hwmon_temp_attr_templates[] = {
};
static const char * const hwmon_in_attr_templates[] = {
+ [hwmon_in_enable] = "in%d_enable",
[hwmon_in_input] = "in%d_input",
[hwmon_in_min] = "in%d_min",
[hwmon_in_max] = "in%d_max",
@@ -369,10 +387,10 @@ static const char * const hwmon_in_attr_templates[] = {
[hwmon_in_max_alarm] = "in%d_max_alarm",
[hwmon_in_lcrit_alarm] = "in%d_lcrit_alarm",
[hwmon_in_crit_alarm] = "in%d_crit_alarm",
- [hwmon_in_enable] = "in%d_enable",
};
static const char * const hwmon_curr_attr_templates[] = {
+ [hwmon_curr_enable] = "curr%d_enable",
[hwmon_curr_input] = "curr%d_input",
[hwmon_curr_min] = "curr%d_min",
[hwmon_curr_max] = "curr%d_max",
@@ -391,6 +409,7 @@ static const char * const hwmon_curr_attr_templates[] = {
};
static const char * const hwmon_power_attr_templates[] = {
+ [hwmon_power_enable] = "power%d_enable",
[hwmon_power_average] = "power%d_average",
[hwmon_power_average_interval] = "power%d_average_interval",
[hwmon_power_average_interval_max] = "power%d_interval_max",
@@ -422,11 +441,13 @@ static const char * const hwmon_power_attr_templates[] = {
};
static const char * const hwmon_energy_attr_templates[] = {
+ [hwmon_energy_enable] = "energy%d_enable",
[hwmon_energy_input] = "energy%d_input",
[hwmon_energy_label] = "energy%d_label",
};
static const char * const hwmon_humidity_attr_templates[] = {
+ [hwmon_humidity_enable] = "humidity%d_enable",
[hwmon_humidity_input] = "humidity%d_input",
[hwmon_humidity_label] = "humidity%d_label",
[hwmon_humidity_min] = "humidity%d_min",
@@ -438,6 +459,7 @@ static const char * const hwmon_humidity_attr_templates[] = {
};
static const char * const hwmon_fan_attr_templates[] = {
+ [hwmon_fan_enable] = "fan%d_enable",
[hwmon_fan_input] = "fan%d_input",
[hwmon_fan_label] = "fan%d_label",
[hwmon_fan_min] = "fan%d_min",
@@ -458,6 +480,11 @@ static const char * const hwmon_pwm_attr_templates[] = {
[hwmon_pwm_freq] = "pwm%d_freq",
};
+static const char * const hwmon_intrusion_attr_templates[] = {
+ [hwmon_intrusion_alarm] = "intrusion%d_alarm",
+ [hwmon_intrusion_beep] = "intrusion%d_beep",
+};
+
static const char * const *__templates[] = {
[hwmon_chip] = hwmon_chip_attrs,
[hwmon_temp] = hwmon_temp_attr_templates,
@@ -468,6 +495,7 @@ static const char * const *__templates[] = {
[hwmon_humidity] = hwmon_humidity_attr_templates,
[hwmon_fan] = hwmon_fan_attr_templates,
[hwmon_pwm] = hwmon_pwm_attr_templates,
+ [hwmon_intrusion] = hwmon_intrusion_attr_templates,
};
static const int __templates_size[] = {
@@ -480,6 +508,7 @@ static const int __templates_size[] = {
[hwmon_humidity] = ARRAY_SIZE(hwmon_humidity_attr_templates),
[hwmon_fan] = ARRAY_SIZE(hwmon_fan_attr_templates),
[hwmon_pwm] = ARRAY_SIZE(hwmon_pwm_attr_templates),
+ [hwmon_intrusion] = ARRAY_SIZE(hwmon_intrusion_attr_templates),
};
static int hwmon_num_channel_attrs(const struct hwmon_channel_info *info)
@@ -492,8 +521,7 @@ static int hwmon_num_channel_attrs(const struct hwmon_channel_info *info)
return n;
}
-static int hwmon_genattrs(struct device *dev,
- const void *drvdata,
+static int hwmon_genattrs(const void *drvdata,
struct attribute **attrs,
const struct hwmon_ops *ops,
const struct hwmon_channel_info *info)
@@ -519,7 +547,7 @@ static int hwmon_genattrs(struct device *dev,
attr_mask &= ~BIT(attr);
if (attr >= template_size)
return -EINVAL;
- a = hwmon_genattr(dev, drvdata, info->type, attr, i,
+ a = hwmon_genattr(drvdata, info->type, attr, i,
templates[attr], ops);
if (IS_ERR(a)) {
if (PTR_ERR(a) != -ENOENT)
@@ -533,8 +561,7 @@ static int hwmon_genattrs(struct device *dev,
}
static struct attribute **
-__hwmon_create_attrs(struct device *dev, const void *drvdata,
- const struct hwmon_chip_info *chip)
+__hwmon_create_attrs(const void *drvdata, const struct hwmon_chip_info *chip)
{
int ret, i, aindex = 0, nattrs = 0;
struct attribute **attrs;
@@ -545,15 +572,17 @@ __hwmon_create_attrs(struct device *dev, const void *drvdata,
if (nattrs == 0)
return ERR_PTR(-EINVAL);
- attrs = devm_kcalloc(dev, nattrs + 1, sizeof(*attrs), GFP_KERNEL);
+ attrs = kcalloc(nattrs + 1, sizeof(*attrs), GFP_KERNEL);
if (!attrs)
return ERR_PTR(-ENOMEM);
for (i = 0; chip->info[i]; i++) {
- ret = hwmon_genattrs(dev, drvdata, &attrs[aindex], chip->ops,
+ ret = hwmon_genattrs(drvdata, &attrs[aindex], chip->ops,
chip->info[i]);
- if (ret < 0)
+ if (ret < 0) {
+ hwmon_free_attrs(attrs);
return ERR_PTR(ret);
+ }
aindex += ret;
}
@@ -595,14 +624,13 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
for (i = 0; groups[i]; i++)
ngroups++;
- hwdev->groups = devm_kcalloc(dev, ngroups, sizeof(*groups),
- GFP_KERNEL);
+ hwdev->groups = kcalloc(ngroups, sizeof(*groups), GFP_KERNEL);
if (!hwdev->groups) {
err = -ENOMEM;
goto free_hwmon;
}
- attrs = __hwmon_create_attrs(dev, drvdata, chip);
+ attrs = __hwmon_create_attrs(drvdata, chip);
if (IS_ERR(attrs)) {
err = PTR_ERR(attrs);
goto free_hwmon;
@@ -647,8 +675,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
hwmon_temp_input, j))
continue;
if (info[i]->config[j] & HWMON_T_INPUT) {
- err = hwmon_thermal_add_sensor(dev,
- hwdev, j);
+ err = hwmon_thermal_add_sensor(hdev, j);
if (err) {
device_unregister(hdev);
/*
@@ -667,7 +694,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
return hdev;
free_hwmon:
- kfree(hwdev);
+ hwmon_dev_release(hdev);
ida_remove:
ida_simple_remove(&hwmon_ida, id);
return ERR_PTR(err);
diff --git a/drivers/hwmon/i5k_amb.c b/drivers/hwmon/i5k_amb.c
index b09c39abd3a8..eeac4b04df27 100644
--- a/drivers/hwmon/i5k_amb.c
+++ b/drivers/hwmon/i5k_amb.c
@@ -528,7 +528,7 @@ static int i5k_amb_probe(struct platform_device *pdev)
goto err;
}
- data->amb_mmio = ioremap_nocache(data->amb_base, data->amb_len);
+ data->amb_mmio = ioremap(data->amb_base, data->amb_len);
if (!data->amb_mmio) {
res = -EBUSY;
goto err_map_failed;
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 5c1dddde193c..e39354ffe973 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -1,13 +1,29 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * k10temp.c - AMD Family 10h/11h/12h/14h/15h/16h processor hardware monitoring
+ * k10temp.c - AMD Family 10h/11h/12h/14h/15h/16h/17h
+ * processor hardware monitoring
*
* Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de>
+ * Copyright (c) 2020 Guenter Roeck <linux@roeck-us.net>
+ *
+ * Implementation notes:
+ * - CCD register address information as well as the calculation to
+ * convert raw register values is from https://github.com/ocerman/zenpower.
+ * The information is not confirmed from chip datasheets, but experiments
+ * suggest that it provides reasonable temperature values.
+ * - Register addresses to read chip voltage and current are also from
+ * https://github.com/ocerman/zenpower, and not confirmed from chip
+ * datasheets. Current calibration is board specific and not typically
+ * shared by board vendors. For this reason, current values are
+ * normalized to report 1A/LSB for core current and and 0.25A/LSB for SoC
+ * current. Reported values can be adjusted using the sensors configuration
+ * file.
*/
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -31,22 +47,22 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
#endif
/* CPUID function 0x80000001, ebx */
-#define CPUID_PKGTYPE_MASK 0xf0000000
+#define CPUID_PKGTYPE_MASK GENMASK(31, 28)
#define CPUID_PKGTYPE_F 0x00000000
#define CPUID_PKGTYPE_AM2R2_AM3 0x10000000
/* DRAM controller (PCI function 2) */
#define REG_DCT0_CONFIG_HIGH 0x094
-#define DDR3_MODE 0x00000100
+#define DDR3_MODE BIT(8)
/* miscellaneous (PCI function 3) */
#define REG_HARDWARE_THERMAL_CONTROL 0x64
-#define HTC_ENABLE 0x00000001
+#define HTC_ENABLE BIT(0)
#define REG_REPORTED_TEMPERATURE 0xa4
#define REG_NORTHBRIDGE_CAPABILITIES 0xe8
-#define NB_CAP_HTC 0x00000400
+#define NB_CAP_HTC BIT(10)
/*
* For F15h M60h and M70h, REG_HARDWARE_THERMAL_CONTROL
@@ -60,6 +76,20 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
/* F17h M01h Access througn SMN */
#define F17H_M01H_REPORTED_TEMP_CTRL_OFFSET 0x00059800
+#define F17H_M70H_CCD_TEMP(x) (0x00059954 + ((x) * 4))
+#define F17H_M70H_CCD_TEMP_VALID BIT(11)
+#define F17H_M70H_CCD_TEMP_MASK GENMASK(10, 0)
+
+#define F17H_M01H_SVI 0x0005A000
+#define F17H_M01H_SVI_TEL_PLANE0 (F17H_M01H_SVI + 0xc)
+#define F17H_M01H_SVI_TEL_PLANE1 (F17H_M01H_SVI + 0x10)
+
+#define CUR_TEMP_SHIFT 21
+#define CUR_TEMP_RANGE_SEL_MASK BIT(19)
+
+#define CFACTOR_ICORE 1000000 /* 1A / LSB */
+#define CFACTOR_ISOC 250000 /* 0.25A / LSB */
+
struct k10temp_data {
struct pci_dev *pdev;
void (*read_htcreg)(struct pci_dev *pdev, u32 *regval);
@@ -67,6 +97,10 @@ struct k10temp_data {
int temp_offset;
u32 temp_adjust_mask;
bool show_tdie;
+ u32 show_tccd;
+ u32 svi_addr[2];
+ bool show_current;
+ int cfactor[2];
};
struct tctl_offset {
@@ -84,6 +118,16 @@ static const struct tctl_offset tctl_offset_table[] = {
{ 0x17, "AMD Ryzen Threadripper 29", 27000 }, /* 29{20,50,70,90}[W]X */
};
+static bool is_threadripper(void)
+{
+ return strstr(boot_cpu_data.x86_model_id, "Threadripper");
+}
+
+static bool is_epyc(void)
+{
+ return strstr(boot_cpu_data.x86_model_id, "EPYC");
+}
+
static void read_htcreg_pci(struct pci_dev *pdev, u32 *regval)
{
pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, regval);
@@ -123,130 +167,237 @@ static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval)
F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval);
}
-static unsigned int get_raw_temp(struct k10temp_data *data)
+static long get_raw_temp(struct k10temp_data *data)
{
- unsigned int temp;
u32 regval;
+ long temp;
data->read_tempreg(data->pdev, &regval);
- temp = (regval >> 21) * 125;
+ temp = (regval >> CUR_TEMP_SHIFT) * 125;
if (regval & data->temp_adjust_mask)
temp -= 49000;
return temp;
}
-static ssize_t temp1_input_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct k10temp_data *data = dev_get_drvdata(dev);
- unsigned int temp = get_raw_temp(data);
+const char *k10temp_temp_label[] = {
+ "Tdie",
+ "Tctl",
+ "Tccd1",
+ "Tccd2",
+ "Tccd3",
+ "Tccd4",
+ "Tccd5",
+ "Tccd6",
+ "Tccd7",
+ "Tccd8",
+};
- if (temp > data->temp_offset)
- temp -= data->temp_offset;
- else
- temp = 0;
+const char *k10temp_in_label[] = {
+ "Vcore",
+ "Vsoc",
+};
- return sprintf(buf, "%u\n", temp);
-}
+const char *k10temp_curr_label[] = {
+ "Icore",
+ "Isoc",
+};
-static ssize_t temp2_input_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static int k10temp_read_labels(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
{
- struct k10temp_data *data = dev_get_drvdata(dev);
- unsigned int temp = get_raw_temp(data);
-
- return sprintf(buf, "%u\n", temp);
+ switch (type) {
+ case hwmon_temp:
+ *str = k10temp_temp_label[channel];
+ break;
+ case hwmon_in:
+ *str = k10temp_in_label[channel];
+ break;
+ case hwmon_curr:
+ *str = k10temp_curr_label[channel];
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
}
-static ssize_t temp_label_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static int k10temp_read_curr(struct device *dev, u32 attr, int channel,
+ long *val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct k10temp_data *data = dev_get_drvdata(dev);
+ u32 regval;
- return sprintf(buf, "%s\n", attr->index ? "Tctl" : "Tdie");
+ switch (attr) {
+ case hwmon_curr_input:
+ amd_smn_read(amd_pci_dev_to_node_id(data->pdev),
+ data->svi_addr[channel], &regval);
+ *val = DIV_ROUND_CLOSEST(data->cfactor[channel] *
+ (regval & 0xff),
+ 1000);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
}
-static ssize_t temp1_max_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static int k10temp_read_in(struct device *dev, u32 attr, int channel, long *val)
{
- return sprintf(buf, "%d\n", 70 * 1000);
+ struct k10temp_data *data = dev_get_drvdata(dev);
+ u32 regval;
+
+ switch (attr) {
+ case hwmon_in_input:
+ amd_smn_read(amd_pci_dev_to_node_id(data->pdev),
+ data->svi_addr[channel], &regval);
+ regval = (regval >> 16) & 0xff;
+ *val = DIV_ROUND_CLOSEST(155000 - regval * 625, 100);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
}
-static ssize_t temp_crit_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static int k10temp_read_temp(struct device *dev, u32 attr, int channel,
+ long *val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct k10temp_data *data = dev_get_drvdata(dev);
- int show_hyst = attr->index;
u32 regval;
- int value;
- data->read_htcreg(data->pdev, &regval);
- value = ((regval >> 16) & 0x7f) * 500 + 52000;
- if (show_hyst)
- value -= ((regval >> 24) & 0xf) * 500;
- return sprintf(buf, "%d\n", value);
+ switch (attr) {
+ case hwmon_temp_input:
+ switch (channel) {
+ case 0: /* Tdie */
+ *val = get_raw_temp(data) - data->temp_offset;
+ if (*val < 0)
+ *val = 0;
+ break;
+ case 1: /* Tctl */
+ *val = get_raw_temp(data);
+ if (*val < 0)
+ *val = 0;
+ break;
+ case 2 ... 9: /* Tccd{1-8} */
+ amd_smn_read(amd_pci_dev_to_node_id(data->pdev),
+ F17H_M70H_CCD_TEMP(channel - 2), &regval);
+ *val = (regval & F17H_M70H_CCD_TEMP_MASK) * 125 - 49000;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case hwmon_temp_max:
+ *val = 70 * 1000;
+ break;
+ case hwmon_temp_crit:
+ data->read_htcreg(data->pdev, &regval);
+ *val = ((regval >> 16) & 0x7f) * 500 + 52000;
+ break;
+ case hwmon_temp_crit_hyst:
+ data->read_htcreg(data->pdev, &regval);
+ *val = (((regval >> 16) & 0x7f)
+ - ((regval >> 24) & 0xf)) * 500 + 52000;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
}
-static DEVICE_ATTR_RO(temp1_input);
-static DEVICE_ATTR_RO(temp1_max);
-static SENSOR_DEVICE_ATTR_RO(temp1_crit, temp_crit, 0);
-static SENSOR_DEVICE_ATTR_RO(temp1_crit_hyst, temp_crit, 1);
-
-static SENSOR_DEVICE_ATTR_RO(temp1_label, temp_label, 0);
-static DEVICE_ATTR_RO(temp2_input);
-static SENSOR_DEVICE_ATTR_RO(temp2_label, temp_label, 1);
+static int k10temp_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ switch (type) {
+ case hwmon_temp:
+ return k10temp_read_temp(dev, attr, channel, val);
+ case hwmon_in:
+ return k10temp_read_in(dev, attr, channel, val);
+ case hwmon_curr:
+ return k10temp_read_curr(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
-static umode_t k10temp_is_visible(struct kobject *kobj,
- struct attribute *attr, int index)
+static umode_t k10temp_is_visible(const void *_data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct k10temp_data *data = dev_get_drvdata(dev);
+ const struct k10temp_data *data = _data;
struct pci_dev *pdev = data->pdev;
u32 reg;
- switch (index) {
- case 0 ... 1: /* temp1_input, temp1_max */
- default:
- break;
- case 2 ... 3: /* temp1_crit, temp1_crit_hyst */
- if (!data->read_htcreg)
- return 0;
-
- pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES,
- &reg);
- if (!(reg & NB_CAP_HTC))
- return 0;
-
- data->read_htcreg(data->pdev, &reg);
- if (!(reg & HTC_ENABLE))
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ switch (channel) {
+ case 0: /* Tdie, or Tctl if we don't show it */
+ break;
+ case 1: /* Tctl */
+ if (!data->show_tdie)
+ return 0;
+ break;
+ case 2 ... 9: /* Tccd{1-8} */
+ if (!(data->show_tccd & BIT(channel - 2)))
+ return 0;
+ break;
+ default:
+ return 0;
+ }
+ break;
+ case hwmon_temp_max:
+ if (channel || data->show_tdie)
+ return 0;
+ break;
+ case hwmon_temp_crit:
+ case hwmon_temp_crit_hyst:
+ if (channel || !data->read_htcreg)
+ return 0;
+
+ pci_read_config_dword(pdev,
+ REG_NORTHBRIDGE_CAPABILITIES,
+ &reg);
+ if (!(reg & NB_CAP_HTC))
+ return 0;
+
+ data->read_htcreg(data->pdev, &reg);
+ if (!(reg & HTC_ENABLE))
+ return 0;
+ break;
+ case hwmon_temp_label:
+ /* No labels if we don't show the die temperature */
+ if (!data->show_tdie)
+ return 0;
+ switch (channel) {
+ case 0: /* Tdie */
+ case 1: /* Tctl */
+ break;
+ case 2 ... 9: /* Tccd{1-8} */
+ if (!(data->show_tccd & BIT(channel - 2)))
+ return 0;
+ break;
+ default:
+ return 0;
+ }
+ break;
+ default:
return 0;
+ }
break;
- case 4 ... 6: /* temp1_label, temp2_input, temp2_label */
- if (!data->show_tdie)
+ case hwmon_in:
+ case hwmon_curr:
+ if (!data->show_current)
return 0;
break;
+ default:
+ return 0;
}
- return attr->mode;
+ return 0444;
}
-static struct attribute *k10temp_attrs[] = {
- &dev_attr_temp1_input.attr,
- &dev_attr_temp1_max.attr,
- &sensor_dev_attr_temp1_crit.dev_attr.attr,
- &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
- &sensor_dev_attr_temp1_label.dev_attr.attr,
- &dev_attr_temp2_input.attr,
- &sensor_dev_attr_temp2_label.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group k10temp_group = {
- .attrs = k10temp_attrs,
- .is_visible = k10temp_is_visible,
-};
-__ATTRIBUTE_GROUPS(k10temp);
-
static bool has_erratum_319(struct pci_dev *pdev)
{
u32 pkg_type, reg_dram_cfg;
@@ -281,8 +432,125 @@ static bool has_erratum_319(struct pci_dev *pdev)
(boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2);
}
-static int k10temp_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+#ifdef CONFIG_DEBUG_FS
+
+static void k10temp_smn_regs_show(struct seq_file *s, struct pci_dev *pdev,
+ u32 addr, int count)
+{
+ u32 reg;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if (!(i & 3))
+ seq_printf(s, "0x%06x: ", addr + i * 4);
+ amd_smn_read(amd_pci_dev_to_node_id(pdev), addr + i * 4, &reg);
+ seq_printf(s, "%08x ", reg);
+ if ((i & 3) == 3)
+ seq_puts(s, "\n");
+ }
+}
+
+static int svi_show(struct seq_file *s, void *unused)
+{
+ struct k10temp_data *data = s->private;
+
+ k10temp_smn_regs_show(s, data->pdev, F17H_M01H_SVI, 32);
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(svi);
+
+static int thm_show(struct seq_file *s, void *unused)
+{
+ struct k10temp_data *data = s->private;
+
+ k10temp_smn_regs_show(s, data->pdev,
+ F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, 256);
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(thm);
+
+static void k10temp_debugfs_cleanup(void *ddir)
+{
+ debugfs_remove_recursive(ddir);
+}
+
+static void k10temp_init_debugfs(struct k10temp_data *data)
+{
+ struct dentry *debugfs;
+ char name[32];
+
+ /* Only show debugfs data for Family 17h/18h CPUs */
+ if (!data->show_tdie)
+ return;
+
+ scnprintf(name, sizeof(name), "k10temp-%s", pci_name(data->pdev));
+
+ debugfs = debugfs_create_dir(name, NULL);
+ if (debugfs) {
+ debugfs_create_file("svi", 0444, debugfs, data, &svi_fops);
+ debugfs_create_file("thm", 0444, debugfs, data, &thm_fops);
+ devm_add_action_or_reset(&data->pdev->dev,
+ k10temp_debugfs_cleanup, debugfs);
+ }
+}
+
+#else
+
+static void k10temp_init_debugfs(struct k10temp_data *data)
+{
+}
+
+#endif
+
+static const struct hwmon_channel_info *k10temp_info[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL),
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL),
+ NULL
+};
+
+static const struct hwmon_ops k10temp_hwmon_ops = {
+ .is_visible = k10temp_is_visible,
+ .read = k10temp_read,
+ .read_string = k10temp_read_labels,
+};
+
+static const struct hwmon_chip_info k10temp_chip_info = {
+ .ops = &k10temp_hwmon_ops,
+ .info = k10temp_info,
+};
+
+static void k10temp_get_ccd_support(struct pci_dev *pdev,
+ struct k10temp_data *data, int limit)
+{
+ u32 regval;
+ int i;
+
+ for (i = 0; i < limit; i++) {
+ amd_smn_read(amd_pci_dev_to_node_id(pdev),
+ F17H_M70H_CCD_TEMP(i), &regval);
+ if (regval & F17H_M70H_CCD_TEMP_VALID)
+ data->show_tccd |= BIT(i);
+ }
+}
+
+static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int unreliable = has_erratum_319(pdev);
struct device *dev = &pdev->dev;
@@ -312,9 +580,32 @@ static int k10temp_probe(struct pci_dev *pdev,
data->read_htcreg = read_htcreg_nb_f15;
data->read_tempreg = read_tempreg_nb_f15;
} else if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) {
- data->temp_adjust_mask = 0x80000;
+ data->temp_adjust_mask = CUR_TEMP_RANGE_SEL_MASK;
data->read_tempreg = read_tempreg_nb_f17;
data->show_tdie = true;
+
+ switch (boot_cpu_data.x86_model) {
+ case 0x1: /* Zen */
+ case 0x8: /* Zen+ */
+ case 0x11: /* Zen APU */
+ case 0x18: /* Zen+ APU */
+ data->show_current = !is_threadripper() && !is_epyc();
+ data->svi_addr[0] = F17H_M01H_SVI_TEL_PLANE0;
+ data->svi_addr[1] = F17H_M01H_SVI_TEL_PLANE1;
+ data->cfactor[0] = CFACTOR_ICORE;
+ data->cfactor[1] = CFACTOR_ISOC;
+ k10temp_get_ccd_support(pdev, data, 4);
+ break;
+ case 0x31: /* Zen2 Threadripper */
+ case 0x71: /* Zen2 */
+ data->show_current = !is_threadripper() && !is_epyc();
+ data->cfactor[0] = CFACTOR_ICORE;
+ data->cfactor[1] = CFACTOR_ISOC;
+ data->svi_addr[0] = F17H_M01H_SVI_TEL_PLANE1;
+ data->svi_addr[1] = F17H_M01H_SVI_TEL_PLANE0;
+ k10temp_get_ccd_support(pdev, data, 8);
+ break;
+ }
} else {
data->read_htcreg = read_htcreg_pci;
data->read_tempreg = read_tempreg_pci;
@@ -330,9 +621,15 @@ static int k10temp_probe(struct pci_dev *pdev,
}
}
- hwmon_dev = devm_hwmon_device_register_with_groups(dev, "k10temp", data,
- k10temp_groups);
- return PTR_ERR_OR_ZERO(hwmon_dev);
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, "k10temp", data,
+ &k10temp_chip_info,
+ NULL);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
+
+ k10temp_init_debugfs(data);
+
+ return 0;
}
static const struct pci_device_id k10temp_id_table[] = {
diff --git a/drivers/hwmon/max31730.c b/drivers/hwmon/max31730.c
new file mode 100644
index 000000000000..eb22a34dc36b
--- /dev/null
+++ b/drivers/hwmon/max31730.c
@@ -0,0 +1,440 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for MAX31730 3-Channel Remote Temperature Sensor
+ *
+ * Copyright (c) 2019 Guenter Roeck <linux@roeck-us.net>
+ */
+
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+/* Addresses scanned */
+static const unsigned short normal_i2c[] = { 0x1c, 0x1d, 0x1e, 0x1f, 0x4c,
+ 0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
+
+/* The MAX31730 registers */
+#define MAX31730_REG_TEMP 0x00
+#define MAX31730_REG_CONF 0x13
+#define MAX31730_STOP BIT(7)
+#define MAX31730_EXTRANGE BIT(1)
+#define MAX31730_REG_TEMP_OFFSET 0x16
+#define MAX31730_TEMP_OFFSET_BASELINE 0x77
+#define MAX31730_REG_OFFSET_ENABLE 0x17
+#define MAX31730_REG_TEMP_MAX 0x20
+#define MAX31730_REG_TEMP_MIN 0x30
+#define MAX31730_REG_STATUS_HIGH 0x32
+#define MAX31730_REG_STATUS_LOW 0x33
+#define MAX31730_REG_CHANNEL_ENABLE 0x35
+#define MAX31730_REG_TEMP_FAULT 0x36
+
+#define MAX31730_REG_MFG_ID 0x50
+#define MAX31730_MFG_ID 0x4d
+#define MAX31730_REG_MFG_REV 0x51
+#define MAX31730_MFG_REV 0x01
+
+#define MAX31730_TEMP_MIN (-128000)
+#define MAX31730_TEMP_MAX 127937
+
+/* Each client has this additional data */
+struct max31730_data {
+ struct i2c_client *client;
+ u8 orig_conf;
+ u8 current_conf;
+ u8 offset_enable;
+ u8 channel_enable;
+};
+
+/*-----------------------------------------------------------------------*/
+
+static inline long max31730_reg_to_mc(s16 temp)
+{
+ return DIV_ROUND_CLOSEST((temp >> 4) * 1000, 16);
+}
+
+static int max31730_write_config(struct max31730_data *data, u8 set_mask,
+ u8 clr_mask)
+{
+ u8 value;
+
+ clr_mask |= MAX31730_EXTRANGE;
+ value = data->current_conf & ~clr_mask;
+ value |= set_mask;
+
+ if (data->current_conf != value) {
+ s32 err;
+
+ err = i2c_smbus_write_byte_data(data->client, MAX31730_REG_CONF,
+ value);
+ if (err)
+ return err;
+ data->current_conf = value;
+ }
+ return 0;
+}
+
+static int max31730_set_enable(struct i2c_client *client, int reg,
+ u8 *confdata, int channel, bool enable)
+{
+ u8 regval = *confdata;
+ int err;
+
+ if (enable)
+ regval |= BIT(channel);
+ else
+ regval &= ~BIT(channel);
+
+ if (regval != *confdata) {
+ err = i2c_smbus_write_byte_data(client, reg, regval);
+ if (err)
+ return err;
+ *confdata = regval;
+ }
+ return 0;
+}
+
+static int max31730_set_offset_enable(struct max31730_data *data, int channel,
+ bool enable)
+{
+ return max31730_set_enable(data->client, MAX31730_REG_OFFSET_ENABLE,
+ &data->offset_enable, channel, enable);
+}
+
+static int max31730_set_channel_enable(struct max31730_data *data, int channel,
+ bool enable)
+{
+ return max31730_set_enable(data->client, MAX31730_REG_CHANNEL_ENABLE,
+ &data->channel_enable, channel, enable);
+}
+
+static int max31730_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct max31730_data *data = dev_get_drvdata(dev);
+ int regval, reg, offset;
+
+ if (type != hwmon_temp)
+ return -EINVAL;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ if (!(data->channel_enable & BIT(channel)))
+ return -ENODATA;
+ reg = MAX31730_REG_TEMP + (channel * 2);
+ break;
+ case hwmon_temp_max:
+ reg = MAX31730_REG_TEMP_MAX + (channel * 2);
+ break;
+ case hwmon_temp_min:
+ reg = MAX31730_REG_TEMP_MIN;
+ break;
+ case hwmon_temp_enable:
+ *val = !!(data->channel_enable & BIT(channel));
+ return 0;
+ case hwmon_temp_offset:
+ if (!channel)
+ return -EINVAL;
+ if (!(data->offset_enable & BIT(channel))) {
+ *val = 0;
+ return 0;
+ }
+ offset = i2c_smbus_read_byte_data(data->client,
+ MAX31730_REG_TEMP_OFFSET);
+ if (offset < 0)
+ return offset;
+ *val = (offset - MAX31730_TEMP_OFFSET_BASELINE) * 125;
+ return 0;
+ case hwmon_temp_fault:
+ regval = i2c_smbus_read_byte_data(data->client,
+ MAX31730_REG_TEMP_FAULT);
+ if (regval < 0)
+ return regval;
+ *val = !!(regval & BIT(channel));
+ return 0;
+ case hwmon_temp_min_alarm:
+ regval = i2c_smbus_read_byte_data(data->client,
+ MAX31730_REG_STATUS_LOW);
+ if (regval < 0)
+ return regval;
+ *val = !!(regval & BIT(channel));
+ return 0;
+ case hwmon_temp_max_alarm:
+ regval = i2c_smbus_read_byte_data(data->client,
+ MAX31730_REG_STATUS_HIGH);
+ if (regval < 0)
+ return regval;
+ *val = !!(regval & BIT(channel));
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ regval = i2c_smbus_read_word_swapped(data->client, reg);
+ if (regval < 0)
+ return regval;
+
+ *val = max31730_reg_to_mc(regval);
+
+ return 0;
+}
+
+static int max31730_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct max31730_data *data = dev_get_drvdata(dev);
+ int reg, err;
+
+ if (type != hwmon_temp)
+ return -EINVAL;
+
+ switch (attr) {
+ case hwmon_temp_max:
+ reg = MAX31730_REG_TEMP_MAX + channel * 2;
+ break;
+ case hwmon_temp_min:
+ reg = MAX31730_REG_TEMP_MIN;
+ break;
+ case hwmon_temp_enable:
+ if (val != 0 && val != 1)
+ return -EINVAL;
+ return max31730_set_channel_enable(data, channel, val);
+ case hwmon_temp_offset:
+ val = clamp_val(val, -14875, 17000) + 14875;
+ val = DIV_ROUND_CLOSEST(val, 125);
+ err = max31730_set_offset_enable(data, channel,
+ val != MAX31730_TEMP_OFFSET_BASELINE);
+ if (err)
+ return err;
+ return i2c_smbus_write_byte_data(data->client,
+ MAX31730_REG_TEMP_OFFSET, val);
+ default:
+ return -EINVAL;
+ }
+
+ val = clamp_val(val, MAX31730_TEMP_MIN, MAX31730_TEMP_MAX);
+ val = DIV_ROUND_CLOSEST(val << 4, 1000) << 4;
+
+ return i2c_smbus_write_word_swapped(data->client, reg, (u16)val);
+}
+
+static umode_t max31730_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_min_alarm:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_fault:
+ return 0444;
+ case hwmon_temp_min:
+ return channel ? 0444 : 0644;
+ case hwmon_temp_offset:
+ case hwmon_temp_enable:
+ case hwmon_temp_max:
+ return 0644;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const struct hwmon_channel_info *max31730_info[] = {
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_ENABLE |
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_OFFSET | HWMON_T_ENABLE |
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM |
+ HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_OFFSET | HWMON_T_ENABLE |
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM |
+ HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_OFFSET | HWMON_T_ENABLE |
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM |
+ HWMON_T_FAULT
+ ),
+ NULL
+};
+
+static const struct hwmon_ops max31730_hwmon_ops = {
+ .is_visible = max31730_is_visible,
+ .read = max31730_read,
+ .write = max31730_write,
+};
+
+static const struct hwmon_chip_info max31730_chip_info = {
+ .ops = &max31730_hwmon_ops,
+ .info = max31730_info,
+};
+
+static void max31730_remove(void *data)
+{
+ struct max31730_data *max31730 = data;
+ struct i2c_client *client = max31730->client;
+
+ i2c_smbus_write_byte_data(client, MAX31730_REG_CONF,
+ max31730->orig_conf);
+}
+
+static int
+max31730_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
+ struct max31730_data *data;
+ int status, err;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA))
+ return -EIO;
+
+ data = devm_kzalloc(dev, sizeof(struct max31730_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+
+ /* Cache original configuration and enable status */
+ status = i2c_smbus_read_byte_data(client, MAX31730_REG_CHANNEL_ENABLE);
+ if (status < 0)
+ return status;
+ data->channel_enable = status;
+
+ status = i2c_smbus_read_byte_data(client, MAX31730_REG_OFFSET_ENABLE);
+ if (status < 0)
+ return status;
+ data->offset_enable = status;
+
+ status = i2c_smbus_read_byte_data(client, MAX31730_REG_CONF);
+ if (status < 0)
+ return status;
+ data->orig_conf = status;
+ data->current_conf = status;
+
+ err = max31730_write_config(data,
+ data->channel_enable ? 0 : MAX31730_STOP,
+ data->channel_enable ? MAX31730_STOP : 0);
+ if (err)
+ return err;
+
+ dev_set_drvdata(dev, data);
+
+ err = devm_add_action_or_reset(dev, max31730_remove, data);
+ if (err)
+ return err;
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data,
+ &max31730_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id max31730_ids[] = {
+ { "max31730", 0, },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max31730_ids);
+
+static const struct of_device_id __maybe_unused max31730_of_match[] = {
+ {
+ .compatible = "maxim,max31730",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, max31730_of_match);
+
+static bool max31730_check_reg_temp(struct i2c_client *client,
+ int reg)
+{
+ int regval;
+
+ regval = i2c_smbus_read_byte_data(client, reg + 1);
+ return regval < 0 || (regval & 0x0f);
+}
+
+/* Return 0 if detection is successful, -ENODEV otherwise */
+static int max31730_detect(struct i2c_client *client,
+ struct i2c_board_info *info)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ int regval;
+ int i;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ regval = i2c_smbus_read_byte_data(client, MAX31730_REG_MFG_ID);
+ if (regval != MAX31730_MFG_ID)
+ return -ENODEV;
+ regval = i2c_smbus_read_byte_data(client, MAX31730_REG_MFG_REV);
+ if (regval != MAX31730_MFG_REV)
+ return -ENODEV;
+
+ /* lower 4 bit of temperature and limit registers must be 0 */
+ if (max31730_check_reg_temp(client, MAX31730_REG_TEMP_MIN))
+ return -ENODEV;
+
+ for (i = 0; i < 4; i++) {
+ if (max31730_check_reg_temp(client, MAX31730_REG_TEMP + i * 2))
+ return -ENODEV;
+ if (max31730_check_reg_temp(client,
+ MAX31730_REG_TEMP_MAX + i * 2))
+ return -ENODEV;
+ }
+
+ strlcpy(info->type, "max31730", I2C_NAME_SIZE);
+
+ return 0;
+}
+
+static int __maybe_unused max31730_suspend(struct device *dev)
+{
+ struct max31730_data *data = dev_get_drvdata(dev);
+
+ return max31730_write_config(data, MAX31730_STOP, 0);
+}
+
+static int __maybe_unused max31730_resume(struct device *dev)
+{
+ struct max31730_data *data = dev_get_drvdata(dev);
+
+ return max31730_write_config(data, 0, MAX31730_STOP);
+}
+
+static SIMPLE_DEV_PM_OPS(max31730_pm_ops, max31730_suspend, max31730_resume);
+
+static struct i2c_driver max31730_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "max31730",
+ .of_match_table = of_match_ptr(max31730_of_match),
+ .pm = &max31730_pm_ops,
+ },
+ .probe = max31730_probe,
+ .id_table = max31730_ids,
+ .detect = max31730_detect,
+ .address_list = normal_i2c,
+};
+
+module_i2c_driver(max31730_driver);
+
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
+MODULE_DESCRIPTION("MAX31730 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index f3dd2a17bd42..2e97e56c72c7 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -23,8 +23,8 @@
static const u8 REG_VOLTAGE[5] = { 0x09, 0x0a, 0x0c, 0x0d, 0x0e };
static const u8 REG_VOLTAGE_LIMIT_LSB[2][5] = {
- { 0x40, 0x00, 0x42, 0x44, 0x46 },
- { 0x3f, 0x00, 0x41, 0x43, 0x45 },
+ { 0x46, 0x00, 0x40, 0x42, 0x44 },
+ { 0x45, 0x00, 0x3f, 0x41, 0x43 },
};
static const u8 REG_VOLTAGE_LIMIT_MSB[5] = { 0x48, 0x00, 0x47, 0x47, 0x48 };
@@ -58,6 +58,8 @@ static const u8 REG_VOLTAGE_LIMIT_MSB_SHIFT[2][5] = {
struct nct7802_data {
struct regmap *regmap;
struct mutex access_lock; /* for multi-byte read and write operations */
+ u8 in_status;
+ struct mutex in_alarm_lock;
};
static ssize_t temp_type_show(struct device *dev,
@@ -368,6 +370,66 @@ static ssize_t in_store(struct device *dev, struct device_attribute *attr,
return err ? : count;
}
+static ssize_t in_alarm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ struct nct7802_data *data = dev_get_drvdata(dev);
+ int volt, min, max, ret;
+ unsigned int val;
+
+ mutex_lock(&data->in_alarm_lock);
+
+ /*
+ * The SMI Voltage status register is the only register giving a status
+ * for voltages. A bit is set for each input crossing a threshold, in
+ * both direction, but the "inside" or "outside" limits info is not
+ * available. Also this register is cleared on read.
+ * Note: this is not explicitly spelled out in the datasheet, but
+ * from experiment.
+ * To deal with this we use a status cache with one validity bit and
+ * one status bit for each input. Validity is cleared at startup and
+ * each time the register reports a change, and the status is processed
+ * by software based on current input value and limits.
+ */
+ ret = regmap_read(data->regmap, 0x1e, &val); /* SMI Voltage status */
+ if (ret < 0)
+ goto abort;
+
+ /* invalidate cached status for all inputs crossing a threshold */
+ data->in_status &= ~((val & 0x0f) << 4);
+
+ /* if cached status for requested input is invalid, update it */
+ if (!(data->in_status & (0x10 << sattr->index))) {
+ ret = nct7802_read_voltage(data, sattr->nr, 0);
+ if (ret < 0)
+ goto abort;
+ volt = ret;
+
+ ret = nct7802_read_voltage(data, sattr->nr, 1);
+ if (ret < 0)
+ goto abort;
+ min = ret;
+
+ ret = nct7802_read_voltage(data, sattr->nr, 2);
+ if (ret < 0)
+ goto abort;
+ max = ret;
+
+ if (volt < min || volt > max)
+ data->in_status |= (1 << sattr->index);
+ else
+ data->in_status &= ~(1 << sattr->index);
+
+ data->in_status |= 0x10 << sattr->index;
+ }
+
+ ret = sprintf(buf, "%u\n", !!(data->in_status & (1 << sattr->index)));
+abort:
+ mutex_unlock(&data->in_alarm_lock);
+ return ret;
+}
+
static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -660,7 +722,7 @@ static const struct attribute_group nct7802_temp_group = {
static SENSOR_DEVICE_ATTR_2_RO(in0_input, in, 0, 0);
static SENSOR_DEVICE_ATTR_2_RW(in0_min, in, 0, 1);
static SENSOR_DEVICE_ATTR_2_RW(in0_max, in, 0, 2);
-static SENSOR_DEVICE_ATTR_2_RO(in0_alarm, alarm, 0x1e, 3);
+static SENSOR_DEVICE_ATTR_2_RO(in0_alarm, in_alarm, 0, 3);
static SENSOR_DEVICE_ATTR_2_RW(in0_beep, beep, 0x5a, 3);
static SENSOR_DEVICE_ATTR_2_RO(in1_input, in, 1, 0);
@@ -668,19 +730,19 @@ static SENSOR_DEVICE_ATTR_2_RO(in1_input, in, 1, 0);
static SENSOR_DEVICE_ATTR_2_RO(in2_input, in, 2, 0);
static SENSOR_DEVICE_ATTR_2_RW(in2_min, in, 2, 1);
static SENSOR_DEVICE_ATTR_2_RW(in2_max, in, 2, 2);
-static SENSOR_DEVICE_ATTR_2_RO(in2_alarm, alarm, 0x1e, 0);
+static SENSOR_DEVICE_ATTR_2_RO(in2_alarm, in_alarm, 2, 0);
static SENSOR_DEVICE_ATTR_2_RW(in2_beep, beep, 0x5a, 0);
static SENSOR_DEVICE_ATTR_2_RO(in3_input, in, 3, 0);
static SENSOR_DEVICE_ATTR_2_RW(in3_min, in, 3, 1);
static SENSOR_DEVICE_ATTR_2_RW(in3_max, in, 3, 2);
-static SENSOR_DEVICE_ATTR_2_RO(in3_alarm, alarm, 0x1e, 1);
+static SENSOR_DEVICE_ATTR_2_RO(in3_alarm, in_alarm, 3, 1);
static SENSOR_DEVICE_ATTR_2_RW(in3_beep, beep, 0x5a, 1);
static SENSOR_DEVICE_ATTR_2_RO(in4_input, in, 4, 0);
static SENSOR_DEVICE_ATTR_2_RW(in4_min, in, 4, 1);
static SENSOR_DEVICE_ATTR_2_RW(in4_max, in, 4, 2);
-static SENSOR_DEVICE_ATTR_2_RO(in4_alarm, alarm, 0x1e, 2);
+static SENSOR_DEVICE_ATTR_2_RO(in4_alarm, in_alarm, 4, 2);
static SENSOR_DEVICE_ATTR_2_RW(in4_beep, beep, 0x5a, 2);
static struct attribute *nct7802_in_attrs[] = {
@@ -1011,6 +1073,7 @@ static int nct7802_probe(struct i2c_client *client,
return PTR_ERR(data->regmap);
mutex_init(&data->access_lock);
+ mutex_init(&data->in_alarm_lock);
ret = nct7802_init_chip(data);
if (ret < 0)
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 59859979571d..a9ea06204767 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -20,8 +20,8 @@ config SENSORS_PMBUS
help
If you say yes here you get hardware monitoring support for generic
PMBus devices, including but not limited to ADP4000, BMR453, BMR454,
- MDT040, NCP4200, NCP4208, PDT003, PDT006, PDT012, TPS40400, TPS544B20,
- TPS544B25, TPS544C20, TPS544C25, and UDT020.
+ MAX20796, MDT040, NCP4200, NCP4208, PDT003, PDT006, PDT012, TPS40400,
+ TPS544B20, TPS544B25, TPS544C20, TPS544C25, and UDT020.
This driver can also be built as a module. If so, the module will
be called pmbus.
@@ -145,6 +145,15 @@ config SENSORS_MAX16064
This driver can also be built as a module. If so, the module will
be called max16064.
+config SENSORS_MAX20730
+ tristate "Maxim MAX20730, MAX20734, MAX20743"
+ help
+ If you say yes here you get hardware monitoring support for Maxim
+ MAX20730, MAX20734, and MAX20743.
+
+ This driver can also be built as a module. If so, the module will
+ be called max20730.
+
config SENSORS_MAX20751
tristate "Maxim MAX20751"
help
@@ -200,20 +209,20 @@ config SENSORS_TPS40422
be called tps40422.
config SENSORS_TPS53679
- tristate "TI TPS53679"
+ tristate "TI TPS53679, TPS53688"
help
If you say yes here you get hardware monitoring support for TI
- TPS53679.
+ TPS53679, TPS53688
This driver can also be built as a module. If so, the module will
be called tps53679.
config SENSORS_UCD9000
- tristate "TI UCD90120, UCD90124, UCD90160, UCD9090, UCD90910"
+ tristate "TI UCD90120, UCD90124, UCD90160, UCD90320, UCD9090, UCD90910"
help
If you say yes here you get hardware monitoring support for TI
- UCD90120, UCD90124, UCD90160, UCD9090, UCD90910, Sequencer and System
- Health Controllers.
+ UCD90120, UCD90124, UCD90160, UCD90320, UCD9090, UCD90910, Sequencer
+ and System Health Controllers.
This driver can also be built as a module. If so, the module will
be called ucd9000.
@@ -228,6 +237,15 @@ config SENSORS_UCD9200
This driver can also be built as a module. If so, the module will
be called ucd9200.
+config SENSORS_XDPE122
+ tristate "Infineon XDPE122 family"
+ help
+ If you say yes here you get hardware monitoring support for Infineon
+ XDPE12254, XDPE12284, device.
+
+ This driver can also be built as a module. If so, the module will
+ be called xdpe12284.
+
config SENSORS_ZL6100
tristate "Intersil ZL6100 and compatibles"
help
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index 3f8c1014938b..5feb45806123 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_SENSORS_LM25066) += lm25066.o
obj-$(CONFIG_SENSORS_LTC2978) += ltc2978.o
obj-$(CONFIG_SENSORS_LTC3815) += ltc3815.o
obj-$(CONFIG_SENSORS_MAX16064) += max16064.o
+obj-$(CONFIG_SENSORS_MAX20730) += max20730.o
obj-$(CONFIG_SENSORS_MAX20751) += max20751.o
obj-$(CONFIG_SENSORS_MAX31785) += max31785.o
obj-$(CONFIG_SENSORS_MAX34440) += max34440.o
@@ -26,4 +27,5 @@ obj-$(CONFIG_SENSORS_TPS40422) += tps40422.o
obj-$(CONFIG_SENSORS_TPS53679) += tps53679.o
obj-$(CONFIG_SENSORS_UCD9000) += ucd9000.o
obj-$(CONFIG_SENSORS_UCD9200) += ucd9200.o
+obj-$(CONFIG_SENSORS_XDPE122) += xdpe12284.o
obj-$(CONFIG_SENSORS_ZL6100) += zl6100.o
diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c
index d359b76bcb36..3795fe55b84f 100644
--- a/drivers/hwmon/pmbus/ibm-cffps.c
+++ b/drivers/hwmon/pmbus/ibm-cffps.c
@@ -20,12 +20,15 @@
#define CFFPS_FRU_CMD 0x9A
#define CFFPS_PN_CMD 0x9B
+#define CFFPS_HEADER_CMD 0x9C
#define CFFPS_SN_CMD 0x9E
+#define CFFPS_MAX_POWER_OUT_CMD 0xA7
#define CFFPS_CCIN_CMD 0xBD
#define CFFPS_FW_CMD 0xFA
#define CFFPS1_FW_NUM_BYTES 4
#define CFFPS2_FW_NUM_WORDS 3
#define CFFPS_SYS_CONFIG_CMD 0xDA
+#define CFFPS_12VCS_VOUT_CMD 0xDE
#define CFFPS_INPUT_HISTORY_CMD 0xD6
#define CFFPS_INPUT_HISTORY_SIZE 100
@@ -44,22 +47,21 @@
#define CFFPS_MFR_VAUX_FAULT BIT(6)
#define CFFPS_MFR_CURRENT_SHARE_WARNING BIT(7)
-/*
- * LED off state actually relinquishes LED control to PSU firmware, so it can
- * turn on the LED for faults.
- */
-#define CFFPS_LED_OFF 0
#define CFFPS_LED_BLINK BIT(0)
#define CFFPS_LED_ON BIT(1)
+#define CFFPS_LED_OFF BIT(2)
#define CFFPS_BLINK_RATE_MS 250
enum {
CFFPS_DEBUGFS_INPUT_HISTORY = 0,
CFFPS_DEBUGFS_FRU,
CFFPS_DEBUGFS_PN,
+ CFFPS_DEBUGFS_HEADER,
CFFPS_DEBUGFS_SN,
+ CFFPS_DEBUGFS_MAX_POWER_OUT,
CFFPS_DEBUGFS_CCIN,
CFFPS_DEBUGFS_FW,
+ CFFPS_DEBUGFS_ON_OFF_CONFIG,
CFFPS_DEBUGFS_NUM_ENTRIES
};
@@ -136,15 +138,15 @@ static ssize_t ibm_cffps_read_input_history(struct ibm_cffps *psu,
psu->input_history.byte_count);
}
-static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t ibm_cffps_debugfs_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
{
u8 cmd;
int i, rc;
int *idxp = file->private_data;
int idx = *idxp;
struct ibm_cffps *psu = to_psu(idxp, idx);
- char data[I2C_SMBUS_BLOCK_MAX] = { 0 };
+ char data[I2C_SMBUS_BLOCK_MAX + 2] = { 0 };
pmbus_set_page(psu->client, 0);
@@ -157,9 +159,20 @@ static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf,
case CFFPS_DEBUGFS_PN:
cmd = CFFPS_PN_CMD;
break;
+ case CFFPS_DEBUGFS_HEADER:
+ cmd = CFFPS_HEADER_CMD;
+ break;
case CFFPS_DEBUGFS_SN:
cmd = CFFPS_SN_CMD;
break;
+ case CFFPS_DEBUGFS_MAX_POWER_OUT:
+ rc = i2c_smbus_read_word_swapped(psu->client,
+ CFFPS_MAX_POWER_OUT_CMD);
+ if (rc < 0)
+ return rc;
+
+ rc = snprintf(data, I2C_SMBUS_BLOCK_MAX, "%d", rc);
+ goto done;
case CFFPS_DEBUGFS_CCIN:
rc = i2c_smbus_read_word_swapped(psu->client, CFFPS_CCIN_CMD);
if (rc < 0)
@@ -199,6 +212,14 @@ static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf,
return -EOPNOTSUPP;
}
goto done;
+ case CFFPS_DEBUGFS_ON_OFF_CONFIG:
+ rc = i2c_smbus_read_byte_data(psu->client,
+ PMBUS_ON_OFF_CONFIG);
+ if (rc < 0)
+ return rc;
+
+ rc = snprintf(data, 3, "%02x", rc);
+ goto done;
default:
return -EINVAL;
}
@@ -214,9 +235,42 @@ done:
return simple_read_from_buffer(buf, count, ppos, data, rc);
}
+static ssize_t ibm_cffps_debugfs_write(struct file *file,
+ const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ u8 data;
+ ssize_t rc;
+ int *idxp = file->private_data;
+ int idx = *idxp;
+ struct ibm_cffps *psu = to_psu(idxp, idx);
+
+ switch (idx) {
+ case CFFPS_DEBUGFS_ON_OFF_CONFIG:
+ pmbus_set_page(psu->client, 0);
+
+ rc = simple_write_to_buffer(&data, 1, ppos, buf, count);
+ if (rc <= 0)
+ return rc;
+
+ rc = i2c_smbus_write_byte_data(psu->client,
+ PMBUS_ON_OFF_CONFIG, data);
+ if (rc)
+ return rc;
+
+ rc = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
static const struct file_operations ibm_cffps_fops = {
.llseek = noop_llseek,
- .read = ibm_cffps_debugfs_op,
+ .read = ibm_cffps_debugfs_read,
+ .write = ibm_cffps_debugfs_write,
.open = simple_open,
};
@@ -293,6 +347,9 @@ static int ibm_cffps_read_word_data(struct i2c_client *client, int page,
if (mfr & CFFPS_MFR_PS_KILL)
rc |= PB_STATUS_OFF;
break;
+ case PMBUS_VIRT_READ_VMON:
+ rc = pmbus_read_word_data(client, page, CFFPS_12VCS_VOUT_CMD);
+ break;
default:
rc = -ENODATA;
break;
@@ -375,6 +432,9 @@ static void ibm_cffps_create_led_class(struct ibm_cffps *psu)
rc = devm_led_classdev_register(dev, &psu->led);
if (rc)
dev_warn(dev, "failed to register led class: %d\n", rc);
+ else
+ i2c_smbus_write_byte_data(client, CFFPS_SYS_CONFIG_CMD,
+ CFFPS_LED_OFF);
}
static struct pmbus_driver_info ibm_cffps_info[] = {
@@ -396,7 +456,7 @@ static struct pmbus_driver_info ibm_cffps_info[] = {
PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 |
PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT |
PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_STATUS_TEMP |
- PMBUS_HAVE_STATUS_FAN12,
+ PMBUS_HAVE_STATUS_FAN12 | PMBUS_HAVE_VMON,
.func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT |
PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 |
PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT,
@@ -486,15 +546,24 @@ static int ibm_cffps_probe(struct i2c_client *client,
debugfs_create_file("part_number", 0444, ibm_cffps_dir,
&psu->debugfs_entries[CFFPS_DEBUGFS_PN],
&ibm_cffps_fops);
+ debugfs_create_file("header", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_HEADER],
+ &ibm_cffps_fops);
debugfs_create_file("serial_number", 0444, ibm_cffps_dir,
&psu->debugfs_entries[CFFPS_DEBUGFS_SN],
&ibm_cffps_fops);
+ debugfs_create_file("max_power_out", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_MAX_POWER_OUT],
+ &ibm_cffps_fops);
debugfs_create_file("ccin", 0444, ibm_cffps_dir,
&psu->debugfs_entries[CFFPS_DEBUGFS_CCIN],
&ibm_cffps_fops);
debugfs_create_file("fw_version", 0444, ibm_cffps_dir,
&psu->debugfs_entries[CFFPS_DEBUGFS_FW],
&ibm_cffps_fops);
+ debugfs_create_file("on_off_config", 0644, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_ON_OFF_CONFIG],
+ &ibm_cffps_fops);
return 0;
}
diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c
index f01f4887fb2e..a91ed01abb68 100644
--- a/drivers/hwmon/pmbus/ltc2978.c
+++ b/drivers/hwmon/pmbus/ltc2978.c
@@ -82,8 +82,8 @@ enum chips { ltc2974, ltc2975, ltc2977, ltc2978, ltc2980, ltc3880, ltc3882,
#define LTC_POLL_TIMEOUT 100 /* in milli-seconds */
-#define LTC_NOT_BUSY BIT(5)
-#define LTC_NOT_PENDING BIT(4)
+#define LTC_NOT_BUSY BIT(6)
+#define LTC_NOT_PENDING BIT(5)
/*
* LTC2978 clears peak data whenever the CLEAR_FAULTS command is executed, which
diff --git a/drivers/hwmon/pmbus/max20730.c b/drivers/hwmon/pmbus/max20730.c
new file mode 100644
index 000000000000..294e2212f61e
--- /dev/null
+++ b/drivers/hwmon/pmbus/max20730.c
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for MAX20730, MAX20734, and MAX20743 Integrated, Step-Down
+ * Switching Regulators
+ *
+ * Copyright 2019 Google LLC.
+ */
+
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/pmbus.h>
+#include <linux/util_macros.h>
+#include "pmbus.h"
+
+enum chips {
+ max20730,
+ max20734,
+ max20743
+};
+
+struct max20730_data {
+ enum chips id;
+ struct pmbus_driver_info info;
+ struct mutex lock; /* Used to protect against parallel writes */
+ u16 mfr_devset1;
+};
+
+#define to_max20730_data(x) container_of(x, struct max20730_data, info)
+
+#define MAX20730_MFR_DEVSET1 0xd2
+
+/*
+ * Convert discreet value to direct data format. Strictly speaking, all passed
+ * values are constants, so we could do that calculation manually. On the
+ * downside, that would make the driver more difficult to maintain, so lets
+ * use this approach.
+ */
+static u16 val_to_direct(int v, enum pmbus_sensor_classes class,
+ const struct pmbus_driver_info *info)
+{
+ int R = info->R[class] - 3; /* take milli-units into account */
+ int b = info->b[class] * 1000;
+ long d;
+
+ d = v * info->m[class] + b;
+ /*
+ * R < 0 is true for all callers, so we don't need to bother
+ * about the R > 0 case.
+ */
+ while (R < 0) {
+ d = DIV_ROUND_CLOSEST(d, 10);
+ R++;
+ }
+ return (u16)d;
+}
+
+static long direct_to_val(u16 w, enum pmbus_sensor_classes class,
+ const struct pmbus_driver_info *info)
+{
+ int R = info->R[class] - 3;
+ int b = info->b[class] * 1000;
+ int m = info->m[class];
+ long d = (s16)w;
+
+ if (m == 0)
+ return 0;
+
+ while (R < 0) {
+ d *= 10;
+ R++;
+ }
+ d = (d - b) / m;
+ return d;
+}
+
+static u32 max_current[][5] = {
+ [max20730] = { 13000, 16600, 20100, 23600 },
+ [max20734] = { 21000, 27000, 32000, 38000 },
+ [max20743] = { 18900, 24100, 29200, 34100 },
+};
+
+static int max20730_read_word_data(struct i2c_client *client, int page, int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ const struct max20730_data *data = to_max20730_data(info);
+ int ret = 0;
+ u32 max_c;
+
+ switch (reg) {
+ case PMBUS_OT_FAULT_LIMIT:
+ switch ((data->mfr_devset1 >> 11) & 0x3) {
+ case 0x0:
+ ret = val_to_direct(150000, PSC_TEMPERATURE, info);
+ break;
+ case 0x1:
+ ret = val_to_direct(130000, PSC_TEMPERATURE, info);
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ break;
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+ max_c = max_current[data->id][(data->mfr_devset1 >> 5) & 0x3];
+ ret = val_to_direct(max_c, PSC_CURRENT_OUT, info);
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
+static int max20730_write_word_data(struct i2c_client *client, int page,
+ int reg, u16 word)
+{
+ struct pmbus_driver_info *info;
+ struct max20730_data *data;
+ u16 devset1;
+ int ret = 0;
+ int idx;
+
+ info = (struct pmbus_driver_info *)pmbus_get_driver_info(client);
+ data = to_max20730_data(info);
+
+ mutex_lock(&data->lock);
+ devset1 = data->mfr_devset1;
+
+ switch (reg) {
+ case PMBUS_OT_FAULT_LIMIT:
+ devset1 &= ~(BIT(11) | BIT(12));
+ if (direct_to_val(word, PSC_TEMPERATURE, info) < 140000)
+ devset1 |= BIT(11);
+ break;
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+ devset1 &= ~(BIT(5) | BIT(6));
+
+ idx = find_closest(direct_to_val(word, PSC_CURRENT_OUT, info),
+ max_current[data->id], 4);
+ devset1 |= (idx << 5);
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+
+ if (!ret && devset1 != data->mfr_devset1) {
+ ret = i2c_smbus_write_word_data(client, MAX20730_MFR_DEVSET1,
+ devset1);
+ if (!ret) {
+ data->mfr_devset1 = devset1;
+ pmbus_clear_cache(client);
+ }
+ }
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static const struct pmbus_driver_info max20730_info[] = {
+ [max20730] = {
+ .pages = 1,
+ .read_word_data = max20730_read_word_data,
+ .write_word_data = max20730_write_word_data,
+
+ /* Source : Maxim AN6042 */
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_TEMPERATURE] = 21,
+ .b[PSC_TEMPERATURE] = 5887,
+ .R[PSC_TEMPERATURE] = -1,
+
+ .format[PSC_VOLTAGE_IN] = direct,
+ .m[PSC_VOLTAGE_IN] = 3609,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = -2,
+
+ /*
+ * Values in the datasheet are adjusted for temperature and
+ * for the relationship between Vin and Vout.
+ * Unfortunately, the data sheet suggests that Vout measurement
+ * may be scaled with a resistor array. This is indeed the case
+ * at least on the evaulation boards. As a result, any in-driver
+ * adjustments would either be wrong or require elaborate means
+ * to configure the scaling. Instead of doing that, just report
+ * raw values and let userspace handle adjustments.
+ */
+ .format[PSC_CURRENT_OUT] = direct,
+ .m[PSC_CURRENT_OUT] = 153,
+ .b[PSC_CURRENT_OUT] = 4976,
+ .R[PSC_CURRENT_OUT] = -1,
+
+ .format[PSC_VOLTAGE_OUT] = linear,
+
+ .func[0] = PMBUS_HAVE_VIN |
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ },
+ [max20734] = {
+ .pages = 1,
+ .read_word_data = max20730_read_word_data,
+ .write_word_data = max20730_write_word_data,
+
+ /* Source : Maxim AN6209 */
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_TEMPERATURE] = 21,
+ .b[PSC_TEMPERATURE] = 5887,
+ .R[PSC_TEMPERATURE] = -1,
+
+ .format[PSC_VOLTAGE_IN] = direct,
+ .m[PSC_VOLTAGE_IN] = 3592,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = -2,
+
+ .format[PSC_CURRENT_OUT] = direct,
+ .m[PSC_CURRENT_OUT] = 111,
+ .b[PSC_CURRENT_OUT] = 3461,
+ .R[PSC_CURRENT_OUT] = -1,
+
+ .format[PSC_VOLTAGE_OUT] = linear,
+
+ .func[0] = PMBUS_HAVE_VIN |
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ },
+ [max20743] = {
+ .pages = 1,
+ .read_word_data = max20730_read_word_data,
+ .write_word_data = max20730_write_word_data,
+
+ /* Source : Maxim AN6042 */
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_TEMPERATURE] = 21,
+ .b[PSC_TEMPERATURE] = 5887,
+ .R[PSC_TEMPERATURE] = -1,
+
+ .format[PSC_VOLTAGE_IN] = direct,
+ .m[PSC_VOLTAGE_IN] = 3597,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = -2,
+
+ .format[PSC_CURRENT_OUT] = direct,
+ .m[PSC_CURRENT_OUT] = 95,
+ .b[PSC_CURRENT_OUT] = 5014,
+ .R[PSC_CURRENT_OUT] = -1,
+
+ .format[PSC_VOLTAGE_OUT] = linear,
+
+ .func[0] = PMBUS_HAVE_VIN |
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ },
+};
+
+static int max20730_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ u8 buf[I2C_SMBUS_BLOCK_MAX + 1];
+ struct max20730_data *data;
+ enum chips chip_id;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_BYTE_DATA |
+ I2C_FUNC_SMBUS_READ_WORD_DATA |
+ I2C_FUNC_SMBUS_BLOCK_DATA))
+ return -ENODEV;
+
+ ret = i2c_smbus_read_block_data(client, PMBUS_MFR_ID, buf);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to read Manufacturer ID\n");
+ return ret;
+ }
+ if (ret != 5 || strncmp(buf, "MAXIM", 5)) {
+ buf[ret] = '\0';
+ dev_err(dev, "Unsupported Manufacturer ID '%s'\n", buf);
+ return -ENODEV;
+ }
+
+ /*
+ * The chips support reading PMBUS_MFR_MODEL. On both MAX20730
+ * and MAX20734, reading it returns M20743. Presumably that is
+ * the reason why the command is not documented. Unfortunately,
+ * that means that there is no reliable means to detect the chip.
+ * However, we can at least detect the chip series. Compare
+ * the returned value against 'M20743' and bail out if there is
+ * a mismatch. If that doesn't work for all chips, we may have
+ * to remove this check.
+ */
+ ret = i2c_smbus_read_block_data(client, PMBUS_MFR_MODEL, buf);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read Manufacturer Model\n");
+ return ret;
+ }
+ if (ret != 6 || strncmp(buf, "M20743", 6)) {
+ buf[ret] = '\0';
+ dev_err(dev, "Unsupported Manufacturer Model '%s'\n", buf);
+ return -ENODEV;
+ }
+
+ ret = i2c_smbus_read_block_data(client, PMBUS_MFR_REVISION, buf);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read Manufacturer Revision\n");
+ return ret;
+ }
+ if (ret != 1 || buf[0] != 'F') {
+ buf[ret] = '\0';
+ dev_err(dev, "Unsupported Manufacturer Revision '%s'\n", buf);
+ return -ENODEV;
+ }
+
+ if (client->dev.of_node)
+ chip_id = (enum chips)of_device_get_match_data(dev);
+ else
+ chip_id = id->driver_data;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ data->id = chip_id;
+ mutex_init(&data->lock);
+ memcpy(&data->info, &max20730_info[chip_id], sizeof(data->info));
+
+ ret = i2c_smbus_read_word_data(client, MAX20730_MFR_DEVSET1);
+ if (ret < 0)
+ return ret;
+ data->mfr_devset1 = ret;
+
+ return pmbus_do_probe(client, id, &data->info);
+}
+
+static const struct i2c_device_id max20730_id[] = {
+ { "max20730", max20730 },
+ { "max20734", max20734 },
+ { "max20743", max20743 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(i2c, max20730_id);
+
+static const struct of_device_id max20730_of_match[] = {
+ { .compatible = "maxim,max20730", .data = (void *)max20730 },
+ { .compatible = "maxim,max20734", .data = (void *)max20734 },
+ { .compatible = "maxim,max20743", .data = (void *)max20743 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, max20730_of_match);
+
+static struct i2c_driver max20730_driver = {
+ .driver = {
+ .name = "max20730",
+ .of_match_table = max20730_of_match,
+ },
+ .probe = max20730_probe,
+ .remove = pmbus_do_remove,
+ .id_table = max20730_id,
+};
+
+module_i2c_driver(max20730_driver);
+
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
+MODULE_DESCRIPTION("PMBus driver for Maxim MAX20730 / MAX20734 / MAX20743");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/max20751.c b/drivers/hwmon/pmbus/max20751.c
index ee5f0cdbde06..da3c38cb9a5c 100644
--- a/drivers/hwmon/pmbus/max20751.c
+++ b/drivers/hwmon/pmbus/max20751.c
@@ -16,7 +16,7 @@ static struct pmbus_driver_info max20751_info = {
.pages = 1,
.format[PSC_VOLTAGE_IN] = linear,
.format[PSC_VOLTAGE_OUT] = vid,
- .vrm_version = vr12,
+ .vrm_version[0] = vr12,
.format[PSC_TEMPERATURE] = linear,
.format[PSC_CURRENT_OUT] = linear,
.format[PSC_POWER] = linear,
diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
index c0bc43d01018..51e8312b6c2d 100644
--- a/drivers/hwmon/pmbus/pmbus.c
+++ b/drivers/hwmon/pmbus/pmbus.c
@@ -115,7 +115,7 @@ static int pmbus_identify(struct i2c_client *client,
}
if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) {
- int vout_mode;
+ int vout_mode, i;
vout_mode = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
if (vout_mode >= 0 && vout_mode != 0xff) {
@@ -124,7 +124,8 @@ static int pmbus_identify(struct i2c_client *client,
break;
case 1:
info->format[PSC_VOLTAGE_OUT] = vid;
- info->vrm_version = vr11;
+ for (i = 0; i < info->pages; i++)
+ info->vrm_version[i] = vr11;
break;
case 2:
info->format[PSC_VOLTAGE_OUT] = direct;
@@ -210,6 +211,7 @@ static const struct i2c_device_id pmbus_id[] = {
{"dps460", (kernel_ulong_t)&pmbus_info_one_skip},
{"dps650ab", (kernel_ulong_t)&pmbus_info_one_skip},
{"dps800", (kernel_ulong_t)&pmbus_info_one_skip},
+ {"max20796", (kernel_ulong_t)&pmbus_info_one},
{"mdt040", (kernel_ulong_t)&pmbus_info_one},
{"ncp4200", (kernel_ulong_t)&pmbus_info_one},
{"ncp4208", (kernel_ulong_t)&pmbus_info_one},
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index d198af3a92b6..13b34bd67f23 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -22,6 +22,8 @@ enum pmbus_regs {
PMBUS_CLEAR_FAULTS = 0x03,
PMBUS_PHASE = 0x04,
+ PMBUS_WRITE_PROTECT = 0x10,
+
PMBUS_CAPABILITY = 0x19,
PMBUS_QUERY = 0x1A,
@@ -226,6 +228,15 @@ enum pmbus_regs {
#define PB_OPERATION_CONTROL_ON BIT(7)
/*
+ * WRITE_PROTECT
+ */
+#define PB_WP_ALL BIT(7) /* all but WRITE_PROTECT */
+#define PB_WP_OP BIT(6) /* all but WP, OPERATION, PAGE */
+#define PB_WP_VOUT BIT(5) /* all but WP, OPERATION, PAGE, VOUT, ON_OFF */
+
+#define PB_WP_ANY (PB_WP_ALL | PB_WP_OP | PB_WP_VOUT)
+
+/*
* CAPABILITY
*/
#define PB_CAPABILITY_SMBALERT BIT(4)
@@ -377,12 +388,12 @@ enum pmbus_sensor_classes {
#define PMBUS_PAGE_VIRTUAL BIT(31)
enum pmbus_data_format { linear = 0, direct, vid };
-enum vrm_version { vr11 = 0, vr12, vr13 };
+enum vrm_version { vr11 = 0, vr12, vr13, imvp9, amd625mv };
struct pmbus_driver_info {
int pages; /* Total number of pages */
enum pmbus_data_format format[PSC_NUM_CLASSES];
- enum vrm_version vrm_version;
+ enum vrm_version vrm_version[PMBUS_PAGES]; /* vrm version per page */
/*
* Support one set of coefficients for each sensor type
* Used for chips providing data in direct mode.
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 8470097907bc..d9c17feb7b4a 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -696,7 +696,7 @@ static long pmbus_reg2data_vid(struct pmbus_data *data,
long val = sensor->data;
long rv = 0;
- switch (data->info->vrm_version) {
+ switch (data->info->vrm_version[sensor->page]) {
case vr11:
if (val >= 0x02 && val <= 0xb2)
rv = DIV_ROUND_CLOSEST(160000 - (val - 2) * 625, 100);
@@ -709,6 +709,14 @@ static long pmbus_reg2data_vid(struct pmbus_data *data,
if (val >= 0x01)
rv = 500 + (val - 1) * 10;
break;
+ case imvp9:
+ if (val >= 0x01)
+ rv = 200 + (val - 1) * 10;
+ break;
+ case amd625mv:
+ if (val >= 0x0 && val <= 0xd8)
+ rv = DIV_ROUND_CLOSEST(155000 - val * 625, 100);
+ break;
}
return rv;
}
@@ -1088,6 +1096,9 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
snprintf(sensor->name, sizeof(sensor->name), "%s%d",
name, seq);
+ if (data->flags & PMBUS_WRITE_PROTECTED)
+ readonly = true;
+
sensor->page = page;
sensor->reg = reg;
sensor->class = class;
@@ -2141,6 +2152,15 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK))
client->flags |= I2C_CLIENT_PEC;
+ /*
+ * Check if the chip is write protected. If it is, we can not clear
+ * faults, and we should not try it. Also, in that case, writes into
+ * limit registers need to be disabled.
+ */
+ ret = i2c_smbus_read_byte_data(client, PMBUS_WRITE_PROTECT);
+ if (ret > 0 && (ret & PB_WP_ANY))
+ data->flags |= PMBUS_WRITE_PROTECTED | PMBUS_SKIP_STATUS_CHECK;
+
if (data->info->pages)
pmbus_clear_faults(client);
else
diff --git a/drivers/hwmon/pmbus/pxe1610.c b/drivers/hwmon/pmbus/pxe1610.c
index ebe3f023f840..517584cff3de 100644
--- a/drivers/hwmon/pmbus/pxe1610.c
+++ b/drivers/hwmon/pmbus/pxe1610.c
@@ -19,26 +19,30 @@
static int pxe1610_identify(struct i2c_client *client,
struct pmbus_driver_info *info)
{
- if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) {
- u8 vout_mode;
- int ret;
-
- /* Read the register with VOUT scaling value.*/
- ret = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
- if (ret < 0)
- return ret;
-
- vout_mode = ret & GENMASK(4, 0);
-
- switch (vout_mode) {
- case 1:
- info->vrm_version = vr12;
- break;
- case 2:
- info->vrm_version = vr13;
- break;
- default:
- return -ENODEV;
+ int i;
+
+ for (i = 0; i < PXE1610_NUM_PAGES; i++) {
+ if (pmbus_check_byte_register(client, i, PMBUS_VOUT_MODE)) {
+ u8 vout_mode;
+ int ret;
+
+ /* Read the register with VOUT scaling value.*/
+ ret = pmbus_read_byte_data(client, i, PMBUS_VOUT_MODE);
+ if (ret < 0)
+ return ret;
+
+ vout_mode = ret & GENMASK(4, 0);
+
+ switch (vout_mode) {
+ case 1:
+ info->vrm_version[i] = vr12;
+ break;
+ case 2:
+ info->vrm_version[i] = vr13;
+ break;
+ default:
+ return -ENODEV;
+ }
}
}
diff --git a/drivers/hwmon/pmbus/tps53679.c b/drivers/hwmon/pmbus/tps53679.c
index 86bb3aca09ed..9c22e9013dd7 100644
--- a/drivers/hwmon/pmbus/tps53679.c
+++ b/drivers/hwmon/pmbus/tps53679.c
@@ -24,27 +24,29 @@ static int tps53679_identify(struct i2c_client *client,
struct pmbus_driver_info *info)
{
u8 vout_params;
- int ret;
-
- /* Read the register with VOUT scaling value.*/
- ret = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
- if (ret < 0)
- return ret;
-
- vout_params = ret & GENMASK(4, 0);
-
- switch (vout_params) {
- case TPS53679_PROT_VR13_10MV:
- case TPS53679_PROT_VR12_5_10MV:
- info->vrm_version = vr13;
- break;
- case TPS53679_PROT_VR13_5MV:
- case TPS53679_PROT_VR12_5MV:
- case TPS53679_PROT_IMVP8_5MV:
- info->vrm_version = vr12;
- break;
- default:
- return -EINVAL;
+ int i, ret;
+
+ for (i = 0; i < TPS53679_PAGE_NUM; i++) {
+ /* Read the register with VOUT scaling value.*/
+ ret = pmbus_read_byte_data(client, i, PMBUS_VOUT_MODE);
+ if (ret < 0)
+ return ret;
+
+ vout_params = ret & GENMASK(4, 0);
+
+ switch (vout_params) {
+ case TPS53679_PROT_VR13_10MV:
+ case TPS53679_PROT_VR12_5_10MV:
+ info->vrm_version[i] = vr13;
+ break;
+ case TPS53679_PROT_VR13_5MV:
+ case TPS53679_PROT_VR12_5MV:
+ case TPS53679_PROT_IMVP8_5MV:
+ info->vrm_version[i] = vr12;
+ break;
+ default:
+ return -EINVAL;
+ }
}
return 0;
@@ -83,6 +85,7 @@ static int tps53679_probe(struct i2c_client *client,
static const struct i2c_device_id tps53679_id[] = {
{"tps53679", 0},
+ {"tps53688", 0},
{}
};
@@ -90,6 +93,7 @@ MODULE_DEVICE_TABLE(i2c, tps53679_id);
static const struct of_device_id __maybe_unused tps53679_of_match[] = {
{.compatible = "ti,tps53679"},
+ {.compatible = "ti,tps53688"},
{}
};
MODULE_DEVICE_TABLE(of, tps53679_of_match);
diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index a9229c6b0e84..23ea3415f166 100644
--- a/drivers/hwmon/pmbus/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
@@ -18,7 +18,8 @@
#include <linux/gpio/driver.h>
#include "pmbus.h"
-enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd9090, ucd90910 };
+enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd90320, ucd9090,
+ ucd90910 };
#define UCD9000_MONITOR_CONFIG 0xd5
#define UCD9000_NUM_PAGES 0xd6
@@ -38,7 +39,7 @@ enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd9090, ucd90910 };
#define UCD9000_GPIO_OUTPUT 1
#define UCD9000_MON_TYPE(x) (((x) >> 5) & 0x07)
-#define UCD9000_MON_PAGE(x) ((x) & 0x0f)
+#define UCD9000_MON_PAGE(x) ((x) & 0x1f)
#define UCD9000_MON_VOLTAGE 1
#define UCD9000_MON_TEMPERATURE 2
@@ -50,10 +51,12 @@ enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd9090, ucd90910 };
#define UCD9000_GPIO_NAME_LEN 16
#define UCD9090_NUM_GPIOS 23
#define UCD901XX_NUM_GPIOS 26
+#define UCD90320_NUM_GPIOS 84
#define UCD90910_NUM_GPIOS 26
#define UCD9000_DEBUGFS_NAME_LEN 24
#define UCD9000_GPI_COUNT 8
+#define UCD90320_GPI_COUNT 32
struct ucd9000_data {
u8 fan_data[UCD9000_NUM_FAN][I2C_SMBUS_BLOCK_MAX];
@@ -131,6 +134,7 @@ static const struct i2c_device_id ucd9000_id[] = {
{"ucd90120", ucd90120},
{"ucd90124", ucd90124},
{"ucd90160", ucd90160},
+ {"ucd90320", ucd90320},
{"ucd9090", ucd9090},
{"ucd90910", ucd90910},
{}
@@ -155,6 +159,10 @@ static const struct of_device_id __maybe_unused ucd9000_of_match[] = {
.data = (void *)ucd90160
},
{
+ .compatible = "ti,ucd90320",
+ .data = (void *)ucd90320
+ },
+ {
.compatible = "ti,ucd9090",
.data = (void *)ucd9090
},
@@ -322,6 +330,9 @@ static void ucd9000_probe_gpio(struct i2c_client *client,
case ucd90160:
data->gpio.ngpio = UCD901XX_NUM_GPIOS;
break;
+ case ucd90320:
+ data->gpio.ngpio = UCD90320_NUM_GPIOS;
+ break;
case ucd90910:
data->gpio.ngpio = UCD90910_NUM_GPIOS;
break;
@@ -372,17 +383,18 @@ static int ucd9000_debugfs_show_mfr_status_bit(void *data, u64 *val)
struct ucd9000_debugfs_entry *entry = data;
struct i2c_client *client = entry->client;
u8 buffer[I2C_SMBUS_BLOCK_MAX];
- int ret;
+ int ret, i;
ret = ucd9000_get_mfr_status(client, buffer);
if (ret < 0)
return ret;
/*
- * Attribute only created for devices with gpi fault bits at bits
- * 16-23, which is the second byte of the response.
+ * GPI fault bits are in sets of 8, two bytes from end of response.
*/
- *val = !!(buffer[1] & BIT(entry->index));
+ i = ret - 3 - entry->index / 8;
+ if (i >= 0)
+ *val = !!(buffer[i] & BIT(entry->index % 8));
return 0;
}
@@ -422,7 +434,7 @@ static int ucd9000_init_debugfs(struct i2c_client *client,
{
struct dentry *debugfs;
struct ucd9000_debugfs_entry *entries;
- int i;
+ int i, gpi_count;
char name[UCD9000_DEBUGFS_NAME_LEN];
debugfs = pmbus_get_debugfs_dir(client);
@@ -435,18 +447,21 @@ static int ucd9000_init_debugfs(struct i2c_client *client,
/*
* Of the chips this driver supports, only the UCD9090, UCD90160,
- * and UCD90910 report GPI faults in their MFR_STATUS register, so only
- * create the GPI fault debugfs attributes for those chips.
+ * UCD90320, and UCD90910 report GPI faults in their MFR_STATUS
+ * register, so only create the GPI fault debugfs attributes for those
+ * chips.
*/
if (mid->driver_data == ucd9090 || mid->driver_data == ucd90160 ||
- mid->driver_data == ucd90910) {
+ mid->driver_data == ucd90320 || mid->driver_data == ucd90910) {
+ gpi_count = mid->driver_data == ucd90320 ? UCD90320_GPI_COUNT
+ : UCD9000_GPI_COUNT;
entries = devm_kcalloc(&client->dev,
- UCD9000_GPI_COUNT, sizeof(*entries),
+ gpi_count, sizeof(*entries),
GFP_KERNEL);
if (!entries)
return -ENOMEM;
- for (i = 0; i < UCD9000_GPI_COUNT; i++) {
+ for (i = 0; i < gpi_count; i++) {
entries[i].client = client;
entries[i].index = i;
scnprintf(name, UCD9000_DEBUGFS_NAME_LEN,
diff --git a/drivers/hwmon/pmbus/xdpe12284.c b/drivers/hwmon/pmbus/xdpe12284.c
new file mode 100644
index 000000000000..ecd9b65627ec
--- /dev/null
+++ b/drivers/hwmon/pmbus/xdpe12284.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Hardware monitoring driver for Infineon Multi-phase Digital VR Controllers
+ *
+ * Copyright (c) 2020 Mellanox Technologies. All rights reserved.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "pmbus.h"
+
+#define XDPE122_PROT_VR12_5MV 0x01 /* VR12.0 mode, 5-mV DAC */
+#define XDPE122_PROT_VR12_5_10MV 0x02 /* VR12.5 mode, 10-mV DAC */
+#define XDPE122_PROT_IMVP9_10MV 0x03 /* IMVP9 mode, 10-mV DAC */
+#define XDPE122_AMD_625MV 0x10 /* AMD mode 6.25mV */
+#define XDPE122_PAGE_NUM 2
+
+static int xdpe122_identify(struct i2c_client *client,
+ struct pmbus_driver_info *info)
+{
+ u8 vout_params;
+ int i, ret;
+
+ for (i = 0; i < XDPE122_PAGE_NUM; i++) {
+ /* Read the register with VOUT scaling value.*/
+ ret = pmbus_read_byte_data(client, i, PMBUS_VOUT_MODE);
+ if (ret < 0)
+ return ret;
+
+ vout_params = ret & GENMASK(4, 0);
+
+ switch (vout_params) {
+ case XDPE122_PROT_VR12_5_10MV:
+ info->vrm_version[i] = vr13;
+ break;
+ case XDPE122_PROT_VR12_5MV:
+ info->vrm_version[i] = vr12;
+ break;
+ case XDPE122_PROT_IMVP9_10MV:
+ info->vrm_version[i] = imvp9;
+ break;
+ case XDPE122_AMD_625MV:
+ info->vrm_version[i] = amd625mv;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static struct pmbus_driver_info xdpe122_info = {
+ .pages = XDPE122_PAGE_NUM,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = vid,
+ .format[PSC_TEMPERATURE] = linear,
+ .format[PSC_CURRENT_IN] = linear,
+ .format[PSC_CURRENT_OUT] = linear,
+ .format[PSC_POWER] = linear,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP |
+ PMBUS_HAVE_POUT | PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT,
+ .func[1] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP |
+ PMBUS_HAVE_POUT | PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT,
+ .identify = xdpe122_identify,
+};
+
+static int xdpe122_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct pmbus_driver_info *info;
+
+ info = devm_kmemdup(&client->dev, &xdpe122_info, sizeof(*info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ return pmbus_do_probe(client, id, info);
+}
+
+static const struct i2c_device_id xdpe122_id[] = {
+ {"xdpe12254", 0},
+ {"xdpe12284", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, xdpe122_id);
+
+static const struct of_device_id __maybe_unused xdpe122_of_match[] = {
+ {.compatible = "infineon,xdpe12254"},
+ {.compatible = "infineon,xdpe12284"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, xdpe122_of_match);
+
+static struct i2c_driver xdpe122_driver = {
+ .driver = {
+ .name = "xdpe12284",
+ .of_match_table = of_match_ptr(xdpe122_of_match),
+ },
+ .probe = xdpe122_probe,
+ .remove = pmbus_do_remove,
+ .id_table = xdpe122_id,
+};
+
+module_i2c_driver(xdpe122_driver);
+
+MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
+MODULE_DESCRIPTION("PMBus driver for Infineon XDPE122 family");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 42ffd2e5182d..30b7b3ea8836 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -390,8 +390,7 @@ static int pwm_fan_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int pwm_fan_suspend(struct device *dev)
+static int pwm_fan_disable(struct device *dev)
{
struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
struct pwm_args args;
@@ -418,6 +417,17 @@ static int pwm_fan_suspend(struct device *dev)
return 0;
}
+static void pwm_fan_shutdown(struct platform_device *pdev)
+{
+ pwm_fan_disable(&pdev->dev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int pwm_fan_suspend(struct device *dev)
+{
+ return pwm_fan_disable(dev);
+}
+
static int pwm_fan_resume(struct device *dev)
{
struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
@@ -455,6 +465,7 @@ MODULE_DEVICE_TABLE(of, of_pwm_fan_match);
static struct platform_driver pwm_fan_driver = {
.probe = pwm_fan_probe,
+ .shutdown = pwm_fan_shutdown,
.driver = {
.name = "pwm-fan",
.pm = &pwm_fan_pm,
diff --git a/drivers/hwmon/scmi-hwmon.c b/drivers/hwmon/scmi-hwmon.c
index 8a7732c0bef3..286d3cfda7de 100644
--- a/drivers/hwmon/scmi-hwmon.c
+++ b/drivers/hwmon/scmi-hwmon.c
@@ -259,7 +259,7 @@ static int scmi_hwmon_probe(struct scmi_device *sdev)
}
static const struct scmi_device_id scmi_id_table[] = {
- { SCMI_PROTOCOL_SENSOR },
+ { SCMI_PROTOCOL_SENSOR, "hwmon" },
{ },
};
MODULE_DEVICE_TABLE(scmi, scmi_id_table);
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index eb171d15ac48..7ffadc2da57b 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -28,8 +28,6 @@
* w83627uhg 8 2 2 3 0xa230 0xc1 0x5ca3
* w83667hg 9 5 3 3 0xa510 0xc1 0x5ca3
* w83667hg-b 9 5 3 4 0xb350 0xc1 0x5ca3
- * nct6775f 9 4 3 9 0xb470 0xc1 0x5ca3
- * nct6776f 9 5 3 9 0xC330 0xc1 0x5ca3
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -50,7 +48,7 @@
enum kinds {
w83627ehf, w83627dhg, w83627dhg_p, w83627uhg,
- w83667hg, w83667hg_b, nct6775, nct6776,
+ w83667hg, w83667hg_b,
};
/* used to set data->name = w83627ehf_device_names[data->sio_kind] */
@@ -61,18 +59,12 @@ static const char * const w83627ehf_device_names[] = {
"w83627uhg",
"w83667hg",
"w83667hg",
- "nct6775",
- "nct6776",
};
static unsigned short force_id;
module_param(force_id, ushort, 0);
MODULE_PARM_DESC(force_id, "Override the detected device ID");
-static unsigned short fan_debounce;
-module_param(fan_debounce, ushort, 0);
-MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal");
-
#define DRVNAME "w83627ehf"
/*
@@ -97,8 +89,6 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal");
#define SIO_W83627UHG_ID 0xa230
#define SIO_W83667HG_ID 0xa510
#define SIO_W83667HG_B_ID 0xb350
-#define SIO_NCT6775_ID 0xb470
-#define SIO_NCT6776_ID 0xc330
#define SIO_ID_MASK 0xFFF0
static inline void
@@ -187,11 +177,6 @@ static const u16 W83627EHF_REG_TEMP_CONFIG[] = { 0, 0x152, 0x252, 0 };
#define W83627EHF_REG_DIODE 0x59
#define W83627EHF_REG_SMI_OVT 0x4C
-/* NCT6775F has its own fan divider registers */
-#define NCT6775_REG_FANDIV1 0x506
-#define NCT6775_REG_FANDIV2 0x507
-#define NCT6775_REG_FAN_DEBOUNCE 0xf0
-
#define W83627EHF_REG_ALARM1 0x459
#define W83627EHF_REG_ALARM2 0x45A
#define W83627EHF_REG_ALARM3 0x45B
@@ -235,28 +220,6 @@ static const u16 W83627EHF_REG_FAN_STEP_OUTPUT_W83667_B[]
static const u16 W83627EHF_REG_TEMP_OFFSET[] = { 0x454, 0x455, 0x456 };
-static const u16 NCT6775_REG_TARGET[] = { 0x101, 0x201, 0x301 };
-static const u16 NCT6775_REG_FAN_MODE[] = { 0x102, 0x202, 0x302 };
-static const u16 NCT6775_REG_FAN_STOP_OUTPUT[] = { 0x105, 0x205, 0x305 };
-static const u16 NCT6775_REG_FAN_START_OUTPUT[] = { 0x106, 0x206, 0x306 };
-static const u16 NCT6775_REG_FAN_STOP_TIME[] = { 0x107, 0x207, 0x307 };
-static const u16 NCT6775_REG_PWM[] = { 0x109, 0x209, 0x309 };
-static const u16 NCT6775_REG_FAN_MAX_OUTPUT[] = { 0x10a, 0x20a, 0x30a };
-static const u16 NCT6775_REG_FAN_STEP_OUTPUT[] = { 0x10b, 0x20b, 0x30b };
-static const u16 NCT6775_REG_FAN[] = { 0x630, 0x632, 0x634, 0x636, 0x638 };
-static const u16 NCT6776_REG_FAN_MIN[] = { 0x63a, 0x63c, 0x63e, 0x640, 0x642};
-
-static const u16 NCT6775_REG_TEMP[]
- = { 0x27, 0x150, 0x250, 0x73, 0x75, 0x77, 0x62b, 0x62c, 0x62d };
-static const u16 NCT6775_REG_TEMP_CONFIG[]
- = { 0, 0x152, 0x252, 0, 0, 0, 0x628, 0x629, 0x62A };
-static const u16 NCT6775_REG_TEMP_HYST[]
- = { 0x3a, 0x153, 0x253, 0, 0, 0, 0x673, 0x678, 0x67D };
-static const u16 NCT6775_REG_TEMP_OVER[]
- = { 0x39, 0x155, 0x255, 0, 0, 0, 0x672, 0x677, 0x67C };
-static const u16 NCT6775_REG_TEMP_SOURCE[]
- = { 0x621, 0x622, 0x623, 0x100, 0x200, 0x300, 0x624, 0x625, 0x626 };
-
static const char *const w83667hg_b_temp_label[] = {
"SYSTIN",
"CPUTIN",
@@ -268,57 +231,7 @@ static const char *const w83667hg_b_temp_label[] = {
"PECI Agent 4"
};
-static const char *const nct6775_temp_label[] = {
- "",
- "SYSTIN",
- "CPUTIN",
- "AUXTIN",
- "AMD SB-TSI",
- "PECI Agent 0",
- "PECI Agent 1",
- "PECI Agent 2",
- "PECI Agent 3",
- "PECI Agent 4",
- "PECI Agent 5",
- "PECI Agent 6",
- "PECI Agent 7",
- "PCH_CHIP_CPU_MAX_TEMP",
- "PCH_CHIP_TEMP",
- "PCH_CPU_TEMP",
- "PCH_MCH_TEMP",
- "PCH_DIM0_TEMP",
- "PCH_DIM1_TEMP",
- "PCH_DIM2_TEMP",
- "PCH_DIM3_TEMP"
-};
-
-static const char *const nct6776_temp_label[] = {
- "",
- "SYSTIN",
- "CPUTIN",
- "AUXTIN",
- "SMBUSMASTER 0",
- "SMBUSMASTER 1",
- "SMBUSMASTER 2",
- "SMBUSMASTER 3",
- "SMBUSMASTER 4",
- "SMBUSMASTER 5",
- "SMBUSMASTER 6",
- "SMBUSMASTER 7",
- "PECI Agent 0",
- "PECI Agent 1",
- "PCH_CHIP_CPU_MAX_TEMP",
- "PCH_CHIP_TEMP",
- "PCH_CPU_TEMP",
- "PCH_MCH_TEMP",
- "PCH_DIM0_TEMP",
- "PCH_DIM1_TEMP",
- "PCH_DIM2_TEMP",
- "PCH_DIM3_TEMP",
- "BYTE_TEMP"
-};
-
-#define NUM_REG_TEMP ARRAY_SIZE(NCT6775_REG_TEMP)
+#define NUM_REG_TEMP ARRAY_SIZE(W83627EHF_REG_TEMP)
static int is_word_sized(u16 reg)
{
@@ -358,31 +271,6 @@ static unsigned int fan_from_reg8(u16 reg, unsigned int divreg)
return 1350000U / (reg << divreg);
}
-static unsigned int fan_from_reg13(u16 reg, unsigned int divreg)
-{
- if ((reg & 0xff1f) == 0xff1f)
- return 0;
-
- reg = (reg & 0x1f) | ((reg & 0xff00) >> 3);
-
- if (reg == 0)
- return 0;
-
- return 1350000U / reg;
-}
-
-static unsigned int fan_from_reg16(u16 reg, unsigned int divreg)
-{
- if (reg == 0 || reg == 0xffff)
- return 0;
-
- /*
- * Even though the registers are 16 bit wide, the fan divisor
- * still applies.
- */
- return 1350000U / (reg << divreg);
-}
-
static inline unsigned int
div_from_reg(u8 reg)
{
@@ -418,7 +306,6 @@ struct w83627ehf_data {
int addr; /* IO base of hw monitor block */
const char *name;
- struct device *hwmon_dev;
struct mutex lock;
u16 reg_temp[NUM_REG_TEMP];
@@ -428,20 +315,10 @@ struct w83627ehf_data {
u8 temp_src[NUM_REG_TEMP];
const char * const *temp_label;
- const u16 *REG_PWM;
- const u16 *REG_TARGET;
- const u16 *REG_FAN;
- const u16 *REG_FAN_MIN;
- const u16 *REG_FAN_START_OUTPUT;
- const u16 *REG_FAN_STOP_OUTPUT;
- const u16 *REG_FAN_STOP_TIME;
const u16 *REG_FAN_MAX_OUTPUT;
const u16 *REG_FAN_STEP_OUTPUT;
const u16 *scale_in;
- unsigned int (*fan_from_reg)(u16 reg, unsigned int divreg);
- unsigned int (*fan_from_reg_min)(u16 reg, unsigned int divreg);
-
struct mutex update_lock;
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
@@ -457,7 +334,6 @@ struct w83627ehf_data {
u8 fan_div[5];
u8 has_fan; /* some fan inputs can be disabled */
u8 has_fan_min; /* some fans don't have min register */
- bool has_fan_div;
u8 temp_type[3];
s8 temp_offset[3];
s16 temp[9];
@@ -494,6 +370,7 @@ struct w83627ehf_data {
u16 have_temp_offset;
u8 in6_skip:1;
u8 temp3_val_only:1;
+ u8 have_vid:1;
#ifdef CONFIG_PM
/* Remember extra register values over suspend/resume */
@@ -584,35 +461,6 @@ static int w83627ehf_write_temp(struct w83627ehf_data *data, u16 reg,
}
/* This function assumes that the caller holds data->update_lock */
-static void nct6775_write_fan_div(struct w83627ehf_data *data, int nr)
-{
- u8 reg;
-
- switch (nr) {
- case 0:
- reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV1) & 0x70)
- | (data->fan_div[0] & 0x7);
- w83627ehf_write_value(data, NCT6775_REG_FANDIV1, reg);
- break;
- case 1:
- reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV1) & 0x7)
- | ((data->fan_div[1] << 4) & 0x70);
- w83627ehf_write_value(data, NCT6775_REG_FANDIV1, reg);
- break;
- case 2:
- reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV2) & 0x70)
- | (data->fan_div[2] & 0x7);
- w83627ehf_write_value(data, NCT6775_REG_FANDIV2, reg);
- break;
- case 3:
- reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV2) & 0x7)
- | ((data->fan_div[3] << 4) & 0x70);
- w83627ehf_write_value(data, NCT6775_REG_FANDIV2, reg);
- break;
- }
-}
-
-/* This function assumes that the caller holds data->update_lock */
static void w83627ehf_write_fan_div(struct w83627ehf_data *data, int nr)
{
u8 reg;
@@ -663,32 +511,6 @@ static void w83627ehf_write_fan_div(struct w83627ehf_data *data, int nr)
}
}
-static void w83627ehf_write_fan_div_common(struct device *dev,
- struct w83627ehf_data *data, int nr)
-{
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
-
- if (sio_data->kind == nct6776)
- ; /* no dividers, do nothing */
- else if (sio_data->kind == nct6775)
- nct6775_write_fan_div(data, nr);
- else
- w83627ehf_write_fan_div(data, nr);
-}
-
-static void nct6775_update_fan_div(struct w83627ehf_data *data)
-{
- u8 i;
-
- i = w83627ehf_read_value(data, NCT6775_REG_FANDIV1);
- data->fan_div[0] = i & 0x7;
- data->fan_div[1] = (i & 0x70) >> 4;
- i = w83627ehf_read_value(data, NCT6775_REG_FANDIV2);
- data->fan_div[2] = i & 0x7;
- if (data->has_fan & (1<<3))
- data->fan_div[3] = (i & 0x70) >> 4;
-}
-
static void w83627ehf_update_fan_div(struct w83627ehf_data *data)
{
int i;
@@ -714,37 +536,6 @@ static void w83627ehf_update_fan_div(struct w83627ehf_data *data)
}
}
-static void w83627ehf_update_fan_div_common(struct device *dev,
- struct w83627ehf_data *data)
-{
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
-
- if (sio_data->kind == nct6776)
- ; /* no dividers, do nothing */
- else if (sio_data->kind == nct6775)
- nct6775_update_fan_div(data);
- else
- w83627ehf_update_fan_div(data);
-}
-
-static void nct6775_update_pwm(struct w83627ehf_data *data)
-{
- int i;
- int pwmcfg, fanmodecfg;
-
- for (i = 0; i < data->pwm_num; i++) {
- pwmcfg = w83627ehf_read_value(data,
- W83627EHF_REG_PWM_ENABLE[i]);
- fanmodecfg = w83627ehf_read_value(data,
- NCT6775_REG_FAN_MODE[i]);
- data->pwm_mode[i] =
- ((pwmcfg >> W83627EHF_PWM_MODE_SHIFT[i]) & 1) ? 0 : 1;
- data->pwm_enable[i] = ((fanmodecfg >> 4) & 7) + 1;
- data->tolerance[i] = fanmodecfg & 0x0f;
- data->pwm[i] = w83627ehf_read_value(data, data->REG_PWM[i]);
- }
-}
-
static void w83627ehf_update_pwm(struct w83627ehf_data *data)
{
int i;
@@ -765,28 +556,15 @@ static void w83627ehf_update_pwm(struct w83627ehf_data *data)
((pwmcfg >> W83627EHF_PWM_MODE_SHIFT[i]) & 1) ? 0 : 1;
data->pwm_enable[i] = ((pwmcfg >> W83627EHF_PWM_ENABLE_SHIFT[i])
& 3) + 1;
- data->pwm[i] = w83627ehf_read_value(data, data->REG_PWM[i]);
+ data->pwm[i] = w83627ehf_read_value(data, W83627EHF_REG_PWM[i]);
data->tolerance[i] = (tolerance >> (i == 1 ? 4 : 0)) & 0x0f;
}
}
-static void w83627ehf_update_pwm_common(struct device *dev,
- struct w83627ehf_data *data)
-{
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
-
- if (sio_data->kind == nct6775 || sio_data->kind == nct6776)
- nct6775_update_pwm(data);
- else
- w83627ehf_update_pwm(data);
-}
-
static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
{
struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
-
int i;
mutex_lock(&data->update_lock);
@@ -794,7 +572,7 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
if (time_after(jiffies, data->last_updated + HZ + HZ/2)
|| !data->valid) {
/* Fan clock dividers */
- w83627ehf_update_fan_div_common(dev, data);
+ w83627ehf_update_fan_div(data);
/* Measured voltages and limits */
for (i = 0; i < data->in_num; i++) {
@@ -816,40 +594,36 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
if (!(data->has_fan & (1 << i)))
continue;
- reg = w83627ehf_read_value(data, data->REG_FAN[i]);
- data->rpm[i] = data->fan_from_reg(reg,
- data->fan_div[i]);
+ reg = w83627ehf_read_value(data, W83627EHF_REG_FAN[i]);
+ data->rpm[i] = fan_from_reg8(reg, data->fan_div[i]);
if (data->has_fan_min & (1 << i))
data->fan_min[i] = w83627ehf_read_value(data,
- data->REG_FAN_MIN[i]);
+ W83627EHF_REG_FAN_MIN[i]);
/*
* If we failed to measure the fan speed and clock
* divider can be increased, let's try that for next
* time
*/
- if (data->has_fan_div
- && (reg >= 0xff || (sio_data->kind == nct6775
- && reg == 0x00))
- && data->fan_div[i] < 0x07) {
+ if (reg >= 0xff && data->fan_div[i] < 0x07) {
dev_dbg(dev,
"Increasing fan%d clock divider from %u to %u\n",
i + 1, div_from_reg(data->fan_div[i]),
div_from_reg(data->fan_div[i] + 1));
data->fan_div[i]++;
- w83627ehf_write_fan_div_common(dev, data, i);
+ w83627ehf_write_fan_div(data, i);
/* Preserve min limit if possible */
if ((data->has_fan_min & (1 << i))
&& data->fan_min[i] >= 2
&& data->fan_min[i] != 255)
w83627ehf_write_value(data,
- data->REG_FAN_MIN[i],
+ W83627EHF_REG_FAN_MIN[i],
(data->fan_min[i] /= 2));
}
}
- w83627ehf_update_pwm_common(dev, data);
+ w83627ehf_update_pwm(data);
for (i = 0; i < data->pwm_num; i++) {
if (!(data->has_fan & (1 << i)))
@@ -857,13 +631,13 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
data->fan_start_output[i] =
w83627ehf_read_value(data,
- data->REG_FAN_START_OUTPUT[i]);
+ W83627EHF_REG_FAN_START_OUTPUT[i]);
data->fan_stop_output[i] =
w83627ehf_read_value(data,
- data->REG_FAN_STOP_OUTPUT[i]);
+ W83627EHF_REG_FAN_STOP_OUTPUT[i]);
data->fan_stop_time[i] =
w83627ehf_read_value(data,
- data->REG_FAN_STOP_TIME[i]);
+ W83627EHF_REG_FAN_STOP_TIME[i]);
if (data->REG_FAN_MAX_OUTPUT &&
data->REG_FAN_MAX_OUTPUT[i] != 0xff)
@@ -879,7 +653,7 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
data->target_temp[i] =
w83627ehf_read_value(data,
- data->REG_TARGET[i]) &
+ W83627EHF_REG_TARGET[i]) &
(data->pwm_mode[i] == 1 ? 0x7f : 0xff);
}
@@ -923,199 +697,61 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
return data;
}
-/*
- * Sysfs callback functions
- */
-#define show_in_reg(reg) \
-static ssize_t \
-show_##reg(struct device *dev, struct device_attribute *attr, \
- char *buf) \
-{ \
- struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- struct sensor_device_attribute *sensor_attr = \
- to_sensor_dev_attr(attr); \
- int nr = sensor_attr->index; \
- return sprintf(buf, "%ld\n", in_from_reg(data->reg[nr], nr, \
- data->scale_in)); \
-}
-show_in_reg(in)
-show_in_reg(in_min)
-show_in_reg(in_max)
-
#define store_in_reg(REG, reg) \
-static ssize_t \
-store_in_##reg(struct device *dev, struct device_attribute *attr, \
- const char *buf, size_t count) \
+static int \
+store_in_##reg(struct device *dev, struct w83627ehf_data *data, int channel, \
+ long val) \
{ \
- struct w83627ehf_data *data = dev_get_drvdata(dev); \
- struct sensor_device_attribute *sensor_attr = \
- to_sensor_dev_attr(attr); \
- int nr = sensor_attr->index; \
- unsigned long val; \
- int err; \
- err = kstrtoul(buf, 10, &val); \
- if (err < 0) \
- return err; \
+ if (val < 0) \
+ return -EINVAL; \
mutex_lock(&data->update_lock); \
- data->in_##reg[nr] = in_to_reg(val, nr, data->scale_in); \
- w83627ehf_write_value(data, W83627EHF_REG_IN_##REG(nr), \
- data->in_##reg[nr]); \
+ data->in_##reg[channel] = in_to_reg(val, channel, data->scale_in); \
+ w83627ehf_write_value(data, W83627EHF_REG_IN_##REG(channel), \
+ data->in_##reg[channel]); \
mutex_unlock(&data->update_lock); \
- return count; \
+ return 0; \
}
store_in_reg(MIN, min)
store_in_reg(MAX, max)
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
- char *buf)
+static int
+store_fan_min(struct device *dev, struct w83627ehf_data *data, int channel,
+ long val)
{
- struct w83627ehf_data *data = w83627ehf_update_device(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- return sprintf(buf, "%u\n", (data->alarms >> nr) & 0x01);
-}
-
-static struct sensor_device_attribute sda_in_input[] = {
- SENSOR_ATTR(in0_input, S_IRUGO, show_in, NULL, 0),
- SENSOR_ATTR(in1_input, S_IRUGO, show_in, NULL, 1),
- SENSOR_ATTR(in2_input, S_IRUGO, show_in, NULL, 2),
- SENSOR_ATTR(in3_input, S_IRUGO, show_in, NULL, 3),
- SENSOR_ATTR(in4_input, S_IRUGO, show_in, NULL, 4),
- SENSOR_ATTR(in5_input, S_IRUGO, show_in, NULL, 5),
- SENSOR_ATTR(in6_input, S_IRUGO, show_in, NULL, 6),
- SENSOR_ATTR(in7_input, S_IRUGO, show_in, NULL, 7),
- SENSOR_ATTR(in8_input, S_IRUGO, show_in, NULL, 8),
- SENSOR_ATTR(in9_input, S_IRUGO, show_in, NULL, 9),
-};
-
-static struct sensor_device_attribute sda_in_alarm[] = {
- SENSOR_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0),
- SENSOR_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1),
- SENSOR_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2),
- SENSOR_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3),
- SENSOR_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8),
- SENSOR_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 21),
- SENSOR_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 20),
- SENSOR_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 16),
- SENSOR_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, 17),
- SENSOR_ATTR(in9_alarm, S_IRUGO, show_alarm, NULL, 19),
-};
-
-static struct sensor_device_attribute sda_in_min[] = {
- SENSOR_ATTR(in0_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 0),
- SENSOR_ATTR(in1_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 1),
- SENSOR_ATTR(in2_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 2),
- SENSOR_ATTR(in3_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 3),
- SENSOR_ATTR(in4_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 4),
- SENSOR_ATTR(in5_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 5),
- SENSOR_ATTR(in6_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 6),
- SENSOR_ATTR(in7_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 7),
- SENSOR_ATTR(in8_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 8),
- SENSOR_ATTR(in9_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 9),
-};
-
-static struct sensor_device_attribute sda_in_max[] = {
- SENSOR_ATTR(in0_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 0),
- SENSOR_ATTR(in1_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 1),
- SENSOR_ATTR(in2_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 2),
- SENSOR_ATTR(in3_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 3),
- SENSOR_ATTR(in4_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 4),
- SENSOR_ATTR(in5_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 5),
- SENSOR_ATTR(in6_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 6),
- SENSOR_ATTR(in7_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 7),
- SENSOR_ATTR(in8_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 8),
- SENSOR_ATTR(in9_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 9),
-};
-
-static ssize_t
-show_fan(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct w83627ehf_data *data = w83627ehf_update_device(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- return sprintf(buf, "%d\n", data->rpm[nr]);
-}
-
-static ssize_t
-show_fan_min(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct w83627ehf_data *data = w83627ehf_update_device(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- return sprintf(buf, "%d\n",
- data->fan_from_reg_min(data->fan_min[nr],
- data->fan_div[nr]));
-}
-
-static ssize_t
-show_fan_div(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct w83627ehf_data *data = w83627ehf_update_device(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- return sprintf(buf, "%u\n", div_from_reg(data->fan_div[nr]));
-}
-
-static ssize_t
-store_fan_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- unsigned long val;
- int err;
unsigned int reg;
u8 new_div;
- err = kstrtoul(buf, 10, &val);
- if (err < 0)
- return err;
+ if (val < 0)
+ return -EINVAL;
mutex_lock(&data->update_lock);
- if (!data->has_fan_div) {
- /*
- * Only NCT6776F for now, so we know that this is a 13 bit
- * register
- */
- if (!val) {
- val = 0xff1f;
- } else {
- if (val > 1350000U)
- val = 135000U;
- val = 1350000U / val;
- val = (val & 0x1f) | ((val << 3) & 0xff00);
- }
- data->fan_min[nr] = val;
- goto done; /* Leave fan divider alone */
- }
if (!val) {
/* No min limit, alarm disabled */
- data->fan_min[nr] = 255;
- new_div = data->fan_div[nr]; /* No change */
- dev_info(dev, "fan%u low limit and alarm disabled\n", nr + 1);
+ data->fan_min[channel] = 255;
+ new_div = data->fan_div[channel]; /* No change */
+ dev_info(dev, "fan%u low limit and alarm disabled\n",
+ channel + 1);
} else if ((reg = 1350000U / val) >= 128 * 255) {
/*
* Speed below this value cannot possibly be represented,
* even with the highest divider (128)
*/
- data->fan_min[nr] = 254;
+ data->fan_min[channel] = 254;
new_div = 7; /* 128 == (1 << 7) */
dev_warn(dev,
"fan%u low limit %lu below minimum %u, set to minimum\n",
- nr + 1, val, data->fan_from_reg_min(254, 7));
+ channel + 1, val, fan_from_reg8(254, 7));
} else if (!reg) {
/*
* Speed above this value cannot possibly be represented,
* even with the lowest divider (1)
*/
- data->fan_min[nr] = 1;
+ data->fan_min[channel] = 1;
new_div = 0; /* 1 == (1 << 0) */
dev_warn(dev,
"fan%u low limit %lu above maximum %u, set to maximum\n",
- nr + 1, val, data->fan_from_reg_min(1, 0));
+ channel + 1, val, fan_from_reg8(1, 0));
} else {
/*
* Automatically pick the best divider, i.e. the one such
@@ -1127,362 +763,117 @@ store_fan_min(struct device *dev, struct device_attribute *attr,
reg >>= 1;
new_div++;
}
- data->fan_min[nr] = reg;
+ data->fan_min[channel] = reg;
}
/*
* Write both the fan clock divider (if it changed) and the new
* fan min (unconditionally)
*/
- if (new_div != data->fan_div[nr]) {
+ if (new_div != data->fan_div[channel]) {
dev_dbg(dev, "fan%u clock divider changed from %u to %u\n",
- nr + 1, div_from_reg(data->fan_div[nr]),
+ channel + 1, div_from_reg(data->fan_div[channel]),
div_from_reg(new_div));
- data->fan_div[nr] = new_div;
- w83627ehf_write_fan_div_common(dev, data, nr);
+ data->fan_div[channel] = new_div;
+ w83627ehf_write_fan_div(data, channel);
/* Give the chip time to sample a new speed value */
data->last_updated = jiffies;
}
-done:
- w83627ehf_write_value(data, data->REG_FAN_MIN[nr],
- data->fan_min[nr]);
- mutex_unlock(&data->update_lock);
- return count;
-}
-
-static struct sensor_device_attribute sda_fan_input[] = {
- SENSOR_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0),
- SENSOR_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1),
- SENSOR_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 2),
- SENSOR_ATTR(fan4_input, S_IRUGO, show_fan, NULL, 3),
- SENSOR_ATTR(fan5_input, S_IRUGO, show_fan, NULL, 4),
-};
-
-static struct sensor_device_attribute sda_fan_alarm[] = {
- SENSOR_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6),
- SENSOR_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7),
- SENSOR_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 11),
- SENSOR_ATTR(fan4_alarm, S_IRUGO, show_alarm, NULL, 10),
- SENSOR_ATTR(fan5_alarm, S_IRUGO, show_alarm, NULL, 23),
-};
-
-static struct sensor_device_attribute sda_fan_min[] = {
- SENSOR_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan_min,
- store_fan_min, 0),
- SENSOR_ATTR(fan2_min, S_IWUSR | S_IRUGO, show_fan_min,
- store_fan_min, 1),
- SENSOR_ATTR(fan3_min, S_IWUSR | S_IRUGO, show_fan_min,
- store_fan_min, 2),
- SENSOR_ATTR(fan4_min, S_IWUSR | S_IRUGO, show_fan_min,
- store_fan_min, 3),
- SENSOR_ATTR(fan5_min, S_IWUSR | S_IRUGO, show_fan_min,
- store_fan_min, 4),
-};
-
-static struct sensor_device_attribute sda_fan_div[] = {
- SENSOR_ATTR(fan1_div, S_IRUGO, show_fan_div, NULL, 0),
- SENSOR_ATTR(fan2_div, S_IRUGO, show_fan_div, NULL, 1),
- SENSOR_ATTR(fan3_div, S_IRUGO, show_fan_div, NULL, 2),
- SENSOR_ATTR(fan4_div, S_IRUGO, show_fan_div, NULL, 3),
- SENSOR_ATTR(fan5_div, S_IRUGO, show_fan_div, NULL, 4),
-};
-
-static ssize_t
-show_temp_label(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct w83627ehf_data *data = w83627ehf_update_device(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- return sprintf(buf, "%s\n", data->temp_label[data->temp_src[nr]]);
-}
+ w83627ehf_write_value(data, W83627EHF_REG_FAN_MIN[channel],
+ data->fan_min[channel]);
+ mutex_unlock(&data->update_lock);
-#define show_temp_reg(addr, reg) \
-static ssize_t \
-show_##reg(struct device *dev, struct device_attribute *attr, \
- char *buf) \
-{ \
- struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- struct sensor_device_attribute *sensor_attr = \
- to_sensor_dev_attr(attr); \
- int nr = sensor_attr->index; \
- return sprintf(buf, "%d\n", LM75_TEMP_FROM_REG(data->reg[nr])); \
+ return 0;
}
-show_temp_reg(reg_temp, temp);
-show_temp_reg(reg_temp_over, temp_max);
-show_temp_reg(reg_temp_hyst, temp_max_hyst);
#define store_temp_reg(addr, reg) \
-static ssize_t \
-store_##reg(struct device *dev, struct device_attribute *attr, \
- const char *buf, size_t count) \
+static int \
+store_##reg(struct device *dev, struct w83627ehf_data *data, int channel, \
+ long val) \
{ \
- struct w83627ehf_data *data = dev_get_drvdata(dev); \
- struct sensor_device_attribute *sensor_attr = \
- to_sensor_dev_attr(attr); \
- int nr = sensor_attr->index; \
- int err; \
- long val; \
- err = kstrtol(buf, 10, &val); \
- if (err < 0) \
- return err; \
mutex_lock(&data->update_lock); \
- data->reg[nr] = LM75_TEMP_TO_REG(val); \
- w83627ehf_write_temp(data, data->addr[nr], data->reg[nr]); \
+ data->reg[channel] = LM75_TEMP_TO_REG(val); \
+ w83627ehf_write_temp(data, data->addr[channel], data->reg[channel]); \
mutex_unlock(&data->update_lock); \
- return count; \
+ return 0; \
}
store_temp_reg(reg_temp_over, temp_max);
store_temp_reg(reg_temp_hyst, temp_max_hyst);
-static ssize_t
-show_temp_offset(struct device *dev, struct device_attribute *attr, char *buf)
+static int
+store_temp_offset(struct device *dev, struct w83627ehf_data *data, int channel,
+ long val)
{
- struct w83627ehf_data *data = w83627ehf_update_device(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
-
- return sprintf(buf, "%d\n",
- data->temp_offset[sensor_attr->index] * 1000);
-}
-
-static ssize_t
-store_temp_offset(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- long val;
- int err;
-
- err = kstrtol(buf, 10, &val);
- if (err < 0)
- return err;
-
val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
mutex_lock(&data->update_lock);
- data->temp_offset[nr] = val;
- w83627ehf_write_value(data, W83627EHF_REG_TEMP_OFFSET[nr], val);
+ data->temp_offset[channel] = val;
+ w83627ehf_write_value(data, W83627EHF_REG_TEMP_OFFSET[channel], val);
mutex_unlock(&data->update_lock);
- return count;
-}
-
-static ssize_t
-show_temp_type(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct w83627ehf_data *data = w83627ehf_update_device(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- return sprintf(buf, "%d\n", (int)data->temp_type[nr]);
-}
-
-static struct sensor_device_attribute sda_temp_input[] = {
- SENSOR_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0),
- SENSOR_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1),
- SENSOR_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2),
- SENSOR_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3),
- SENSOR_ATTR(temp5_input, S_IRUGO, show_temp, NULL, 4),
- SENSOR_ATTR(temp6_input, S_IRUGO, show_temp, NULL, 5),
- SENSOR_ATTR(temp7_input, S_IRUGO, show_temp, NULL, 6),
- SENSOR_ATTR(temp8_input, S_IRUGO, show_temp, NULL, 7),
- SENSOR_ATTR(temp9_input, S_IRUGO, show_temp, NULL, 8),
-};
-
-static struct sensor_device_attribute sda_temp_label[] = {
- SENSOR_ATTR(temp1_label, S_IRUGO, show_temp_label, NULL, 0),
- SENSOR_ATTR(temp2_label, S_IRUGO, show_temp_label, NULL, 1),
- SENSOR_ATTR(temp3_label, S_IRUGO, show_temp_label, NULL, 2),
- SENSOR_ATTR(temp4_label, S_IRUGO, show_temp_label, NULL, 3),
- SENSOR_ATTR(temp5_label, S_IRUGO, show_temp_label, NULL, 4),
- SENSOR_ATTR(temp6_label, S_IRUGO, show_temp_label, NULL, 5),
- SENSOR_ATTR(temp7_label, S_IRUGO, show_temp_label, NULL, 6),
- SENSOR_ATTR(temp8_label, S_IRUGO, show_temp_label, NULL, 7),
- SENSOR_ATTR(temp9_label, S_IRUGO, show_temp_label, NULL, 8),
-};
-
-static struct sensor_device_attribute sda_temp_max[] = {
- SENSOR_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 0),
- SENSOR_ATTR(temp2_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 1),
- SENSOR_ATTR(temp3_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 2),
- SENSOR_ATTR(temp4_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 3),
- SENSOR_ATTR(temp5_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 4),
- SENSOR_ATTR(temp6_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 5),
- SENSOR_ATTR(temp7_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 6),
- SENSOR_ATTR(temp8_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 7),
- SENSOR_ATTR(temp9_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 8),
-};
-
-static struct sensor_device_attribute sda_temp_max_hyst[] = {
- SENSOR_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 0),
- SENSOR_ATTR(temp2_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 1),
- SENSOR_ATTR(temp3_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 2),
- SENSOR_ATTR(temp4_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 3),
- SENSOR_ATTR(temp5_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 4),
- SENSOR_ATTR(temp6_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 5),
- SENSOR_ATTR(temp7_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 6),
- SENSOR_ATTR(temp8_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 7),
- SENSOR_ATTR(temp9_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 8),
-};
-
-static struct sensor_device_attribute sda_temp_alarm[] = {
- SENSOR_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4),
- SENSOR_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5),
- SENSOR_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13),
-};
-
-static struct sensor_device_attribute sda_temp_type[] = {
- SENSOR_ATTR(temp1_type, S_IRUGO, show_temp_type, NULL, 0),
- SENSOR_ATTR(temp2_type, S_IRUGO, show_temp_type, NULL, 1),
- SENSOR_ATTR(temp3_type, S_IRUGO, show_temp_type, NULL, 2),
-};
-
-static struct sensor_device_attribute sda_temp_offset[] = {
- SENSOR_ATTR(temp1_offset, S_IRUGO | S_IWUSR, show_temp_offset,
- store_temp_offset, 0),
- SENSOR_ATTR(temp2_offset, S_IRUGO | S_IWUSR, show_temp_offset,
- store_temp_offset, 1),
- SENSOR_ATTR(temp3_offset, S_IRUGO | S_IWUSR, show_temp_offset,
- store_temp_offset, 2),
-};
-
-#define show_pwm_reg(reg) \
-static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
- char *buf) \
-{ \
- struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- struct sensor_device_attribute *sensor_attr = \
- to_sensor_dev_attr(attr); \
- int nr = sensor_attr->index; \
- return sprintf(buf, "%d\n", data->reg[nr]); \
+ return 0;
}
-show_pwm_reg(pwm_mode)
-show_pwm_reg(pwm_enable)
-show_pwm_reg(pwm)
-
-static ssize_t
-store_pwm_mode(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static int
+store_pwm_mode(struct device *dev, struct w83627ehf_data *data, int channel,
+ long val)
{
- struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
- int nr = sensor_attr->index;
- unsigned long val;
- int err;
u16 reg;
- err = kstrtoul(buf, 10, &val);
- if (err < 0)
- return err;
-
- if (val > 1)
- return -EINVAL;
-
- /* On NCT67766F, DC mode is only supported for pwm1 */
- if (sio_data->kind == nct6776 && nr && val != 1)
+ if (val < 0 || val > 1)
return -EINVAL;
mutex_lock(&data->update_lock);
- reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]);
- data->pwm_mode[nr] = val;
- reg &= ~(1 << W83627EHF_PWM_MODE_SHIFT[nr]);
+ reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[channel]);
+ data->pwm_mode[channel] = val;
+ reg &= ~(1 << W83627EHF_PWM_MODE_SHIFT[channel]);
if (!val)
- reg |= 1 << W83627EHF_PWM_MODE_SHIFT[nr];
- w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[nr], reg);
+ reg |= 1 << W83627EHF_PWM_MODE_SHIFT[channel];
+ w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[channel], reg);
mutex_unlock(&data->update_lock);
- return count;
+ return 0;
}
-static ssize_t
-store_pwm(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static int
+store_pwm(struct device *dev, struct w83627ehf_data *data, int channel,
+ long val)
{
- struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- unsigned long val;
- int err;
-
- err = kstrtoul(buf, 10, &val);
- if (err < 0)
- return err;
-
val = clamp_val(val, 0, 255);
mutex_lock(&data->update_lock);
- data->pwm[nr] = val;
- w83627ehf_write_value(data, data->REG_PWM[nr], val);
+ data->pwm[channel] = val;
+ w83627ehf_write_value(data, W83627EHF_REG_PWM[channel], val);
mutex_unlock(&data->update_lock);
- return count;
+ return 0;
}
-static ssize_t
-store_pwm_enable(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static int
+store_pwm_enable(struct device *dev, struct w83627ehf_data *data, int channel,
+ long val)
{
- struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- unsigned long val;
- int err;
u16 reg;
- err = kstrtoul(buf, 10, &val);
- if (err < 0)
- return err;
-
- if (!val || (val > 4 && val != data->pwm_enable_orig[nr]))
- return -EINVAL;
- /* SmartFan III mode is not supported on NCT6776F */
- if (sio_data->kind == nct6776 && val == 4)
+ if (!val || val < 0 ||
+ (val > 4 && val != data->pwm_enable_orig[channel]))
return -EINVAL;
mutex_lock(&data->update_lock);
- data->pwm_enable[nr] = val;
- if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
- reg = w83627ehf_read_value(data,
- NCT6775_REG_FAN_MODE[nr]);
- reg &= 0x0f;
- reg |= (val - 1) << 4;
- w83627ehf_write_value(data,
- NCT6775_REG_FAN_MODE[nr], reg);
- } else {
- reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]);
- reg &= ~(0x03 << W83627EHF_PWM_ENABLE_SHIFT[nr]);
- reg |= (val - 1) << W83627EHF_PWM_ENABLE_SHIFT[nr];
- w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[nr], reg);
- }
+ data->pwm_enable[channel] = val;
+ reg = w83627ehf_read_value(data,
+ W83627EHF_REG_PWM_ENABLE[channel]);
+ reg &= ~(0x03 << W83627EHF_PWM_ENABLE_SHIFT[channel]);
+ reg |= (val - 1) << W83627EHF_PWM_ENABLE_SHIFT[channel];
+ w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[channel],
+ reg);
mutex_unlock(&data->update_lock);
- return count;
+ return 0;
}
-
#define show_tol_temp(reg) \
static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
- struct w83627ehf_data *data = w83627ehf_update_device(dev); \
+ struct w83627ehf_data *data = w83627ehf_update_device(dev->parent); \
struct sensor_device_attribute *sensor_attr = \
to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
@@ -1510,7 +901,7 @@ store_target_temp(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
data->target_temp[nr] = val;
- w83627ehf_write_value(data, data->REG_TARGET[nr], val);
+ w83627ehf_write_value(data, W83627EHF_REG_TARGET[nr], val);
mutex_unlock(&data->update_lock);
return count;
}
@@ -1520,7 +911,6 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
u16 reg;
@@ -1535,76 +925,34 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 15);
mutex_lock(&data->update_lock);
- if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
- /* Limit tolerance further for NCT6776F */
- if (sio_data->kind == nct6776 && val > 7)
- val = 7;
- reg = w83627ehf_read_value(data, NCT6775_REG_FAN_MODE[nr]);
+ reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]);
+ if (nr == 1)
+ reg = (reg & 0x0f) | (val << 4);
+ else
reg = (reg & 0xf0) | val;
- w83627ehf_write_value(data, NCT6775_REG_FAN_MODE[nr], reg);
- } else {
- reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]);
- if (nr == 1)
- reg = (reg & 0x0f) | (val << 4);
- else
- reg = (reg & 0xf0) | val;
- w83627ehf_write_value(data, W83627EHF_REG_TOLERANCE[nr], reg);
- }
+ w83627ehf_write_value(data, W83627EHF_REG_TOLERANCE[nr], reg);
data->tolerance[nr] = val;
mutex_unlock(&data->update_lock);
return count;
}
-static struct sensor_device_attribute sda_pwm[] = {
- SENSOR_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0),
- SENSOR_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 1),
- SENSOR_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 2),
- SENSOR_ATTR(pwm4, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 3),
-};
-
-static struct sensor_device_attribute sda_pwm_mode[] = {
- SENSOR_ATTR(pwm1_mode, S_IWUSR | S_IRUGO, show_pwm_mode,
- store_pwm_mode, 0),
- SENSOR_ATTR(pwm2_mode, S_IWUSR | S_IRUGO, show_pwm_mode,
- store_pwm_mode, 1),
- SENSOR_ATTR(pwm3_mode, S_IWUSR | S_IRUGO, show_pwm_mode,
- store_pwm_mode, 2),
- SENSOR_ATTR(pwm4_mode, S_IWUSR | S_IRUGO, show_pwm_mode,
- store_pwm_mode, 3),
-};
-
-static struct sensor_device_attribute sda_pwm_enable[] = {
- SENSOR_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
- store_pwm_enable, 0),
- SENSOR_ATTR(pwm2_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
- store_pwm_enable, 1),
- SENSOR_ATTR(pwm3_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
- store_pwm_enable, 2),
- SENSOR_ATTR(pwm4_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
- store_pwm_enable, 3),
-};
-
-static struct sensor_device_attribute sda_target_temp[] = {
- SENSOR_ATTR(pwm1_target, S_IWUSR | S_IRUGO, show_target_temp,
- store_target_temp, 0),
- SENSOR_ATTR(pwm2_target, S_IWUSR | S_IRUGO, show_target_temp,
- store_target_temp, 1),
- SENSOR_ATTR(pwm3_target, S_IWUSR | S_IRUGO, show_target_temp,
- store_target_temp, 2),
- SENSOR_ATTR(pwm4_target, S_IWUSR | S_IRUGO, show_target_temp,
- store_target_temp, 3),
-};
-
-static struct sensor_device_attribute sda_tolerance[] = {
- SENSOR_ATTR(pwm1_tolerance, S_IWUSR | S_IRUGO, show_tolerance,
- store_tolerance, 0),
- SENSOR_ATTR(pwm2_tolerance, S_IWUSR | S_IRUGO, show_tolerance,
- store_tolerance, 1),
- SENSOR_ATTR(pwm3_tolerance, S_IWUSR | S_IRUGO, show_tolerance,
- store_tolerance, 2),
- SENSOR_ATTR(pwm4_tolerance, S_IWUSR | S_IRUGO, show_tolerance,
- store_tolerance, 3),
-};
+static SENSOR_DEVICE_ATTR(pwm1_target, 0644, show_target_temp,
+ store_target_temp, 0);
+static SENSOR_DEVICE_ATTR(pwm2_target, 0644, show_target_temp,
+ store_target_temp, 1);
+static SENSOR_DEVICE_ATTR(pwm3_target, 0644, show_target_temp,
+ store_target_temp, 2);
+static SENSOR_DEVICE_ATTR(pwm4_target, 0644, show_target_temp,
+ store_target_temp, 3);
+
+static SENSOR_DEVICE_ATTR(pwm1_tolerance, 0644, show_tolerance,
+ store_tolerance, 0);
+static SENSOR_DEVICE_ATTR(pwm2_tolerance, 0644, show_tolerance,
+ store_tolerance, 1);
+static SENSOR_DEVICE_ATTR(pwm3_tolerance, 0644, show_tolerance,
+ store_tolerance, 2);
+static SENSOR_DEVICE_ATTR(pwm4_tolerance, 0644, show_tolerance,
+ store_tolerance, 3);
/* Smart Fan registers */
@@ -1612,7 +960,7 @@ static struct sensor_device_attribute sda_tolerance[] = {
static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
- struct w83627ehf_data *data = w83627ehf_update_device(dev); \
+ struct w83627ehf_data *data = w83627ehf_update_device(dev->parent); \
struct sensor_device_attribute *sensor_attr = \
to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
@@ -1634,21 +982,21 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
val = clamp_val(val, 1, 255); \
mutex_lock(&data->update_lock); \
data->reg[nr] = val; \
- w83627ehf_write_value(data, data->REG_##REG[nr], val); \
+ w83627ehf_write_value(data, REG[nr], val); \
mutex_unlock(&data->update_lock); \
return count; \
}
-fan_functions(fan_start_output, FAN_START_OUTPUT)
-fan_functions(fan_stop_output, FAN_STOP_OUTPUT)
-fan_functions(fan_max_output, FAN_MAX_OUTPUT)
-fan_functions(fan_step_output, FAN_STEP_OUTPUT)
+fan_functions(fan_start_output, W83627EHF_REG_FAN_START_OUTPUT)
+fan_functions(fan_stop_output, W83627EHF_REG_FAN_STOP_OUTPUT)
+fan_functions(fan_max_output, data->REG_FAN_MAX_OUTPUT)
+fan_functions(fan_step_output, data->REG_FAN_STEP_OUTPUT)
#define fan_time_functions(reg, REG) \
static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
- struct w83627ehf_data *data = w83627ehf_update_device(dev); \
+ struct w83627ehf_data *data = w83627ehf_update_device(dev->parent); \
struct sensor_device_attribute *sensor_attr = \
to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
@@ -1673,78 +1021,61 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
val = step_time_to_reg(val, data->pwm_mode[nr]); \
mutex_lock(&data->update_lock); \
data->reg[nr] = val; \
- w83627ehf_write_value(data, data->REG_##REG[nr], val); \
+ w83627ehf_write_value(data, REG[nr], val); \
mutex_unlock(&data->update_lock); \
return count; \
} \
-fan_time_functions(fan_stop_time, FAN_STOP_TIME)
-
-static ssize_t name_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct w83627ehf_data *data = dev_get_drvdata(dev);
-
- return sprintf(buf, "%s\n", data->name);
-}
-static DEVICE_ATTR_RO(name);
-
-static struct sensor_device_attribute sda_sf3_arrays_fan4[] = {
- SENSOR_ATTR(pwm4_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
- store_fan_stop_time, 3),
- SENSOR_ATTR(pwm4_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
- store_fan_start_output, 3),
- SENSOR_ATTR(pwm4_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
- store_fan_stop_output, 3),
- SENSOR_ATTR(pwm4_max_output, S_IWUSR | S_IRUGO, show_fan_max_output,
- store_fan_max_output, 3),
- SENSOR_ATTR(pwm4_step_output, S_IWUSR | S_IRUGO, show_fan_step_output,
- store_fan_step_output, 3),
-};
-
-static struct sensor_device_attribute sda_sf3_arrays_fan3[] = {
- SENSOR_ATTR(pwm3_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
- store_fan_stop_time, 2),
- SENSOR_ATTR(pwm3_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
- store_fan_start_output, 2),
- SENSOR_ATTR(pwm3_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
- store_fan_stop_output, 2),
-};
-
-static struct sensor_device_attribute sda_sf3_arrays[] = {
- SENSOR_ATTR(pwm1_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
- store_fan_stop_time, 0),
- SENSOR_ATTR(pwm2_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
- store_fan_stop_time, 1),
- SENSOR_ATTR(pwm1_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
- store_fan_start_output, 0),
- SENSOR_ATTR(pwm2_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
- store_fan_start_output, 1),
- SENSOR_ATTR(pwm1_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
- store_fan_stop_output, 0),
- SENSOR_ATTR(pwm2_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
- store_fan_stop_output, 1),
-};
+fan_time_functions(fan_stop_time, W83627EHF_REG_FAN_STOP_TIME)
+
+static SENSOR_DEVICE_ATTR(pwm4_stop_time, 0644, show_fan_stop_time,
+ store_fan_stop_time, 3);
+static SENSOR_DEVICE_ATTR(pwm4_start_output, 0644, show_fan_start_output,
+ store_fan_start_output, 3);
+static SENSOR_DEVICE_ATTR(pwm4_stop_output, 0644, show_fan_stop_output,
+ store_fan_stop_output, 3);
+static SENSOR_DEVICE_ATTR(pwm4_max_output, 0644, show_fan_max_output,
+ store_fan_max_output, 3);
+static SENSOR_DEVICE_ATTR(pwm4_step_output, 0644, show_fan_step_output,
+ store_fan_step_output, 3);
+
+static SENSOR_DEVICE_ATTR(pwm3_stop_time, 0644, show_fan_stop_time,
+ store_fan_stop_time, 2);
+static SENSOR_DEVICE_ATTR(pwm3_start_output, 0644, show_fan_start_output,
+ store_fan_start_output, 2);
+static SENSOR_DEVICE_ATTR(pwm3_stop_output, 0644, show_fan_stop_output,
+ store_fan_stop_output, 2);
+
+static SENSOR_DEVICE_ATTR(pwm1_stop_time, 0644, show_fan_stop_time,
+ store_fan_stop_time, 0);
+static SENSOR_DEVICE_ATTR(pwm2_stop_time, 0644, show_fan_stop_time,
+ store_fan_stop_time, 1);
+static SENSOR_DEVICE_ATTR(pwm1_start_output, 0644, show_fan_start_output,
+ store_fan_start_output, 0);
+static SENSOR_DEVICE_ATTR(pwm2_start_output, 0644, show_fan_start_output,
+ store_fan_start_output, 1);
+static SENSOR_DEVICE_ATTR(pwm1_stop_output, 0644, show_fan_stop_output,
+ store_fan_stop_output, 0);
+static SENSOR_DEVICE_ATTR(pwm2_stop_output, 0644, show_fan_stop_output,
+ store_fan_stop_output, 1);
/*
* pwm1 and pwm3 don't support max and step settings on all chips.
* Need to check support while generating/removing attribute files.
*/
-static struct sensor_device_attribute sda_sf3_max_step_arrays[] = {
- SENSOR_ATTR(pwm1_max_output, S_IWUSR | S_IRUGO, show_fan_max_output,
- store_fan_max_output, 0),
- SENSOR_ATTR(pwm1_step_output, S_IWUSR | S_IRUGO, show_fan_step_output,
- store_fan_step_output, 0),
- SENSOR_ATTR(pwm2_max_output, S_IWUSR | S_IRUGO, show_fan_max_output,
- store_fan_max_output, 1),
- SENSOR_ATTR(pwm2_step_output, S_IWUSR | S_IRUGO, show_fan_step_output,
- store_fan_step_output, 1),
- SENSOR_ATTR(pwm3_max_output, S_IWUSR | S_IRUGO, show_fan_max_output,
- store_fan_max_output, 2),
- SENSOR_ATTR(pwm3_step_output, S_IWUSR | S_IRUGO, show_fan_step_output,
- store_fan_step_output, 2),
-};
+static SENSOR_DEVICE_ATTR(pwm1_max_output, 0644, show_fan_max_output,
+ store_fan_max_output, 0);
+static SENSOR_DEVICE_ATTR(pwm1_step_output, 0644, show_fan_step_output,
+ store_fan_step_output, 0);
+static SENSOR_DEVICE_ATTR(pwm2_max_output, 0644, show_fan_max_output,
+ store_fan_max_output, 1);
+static SENSOR_DEVICE_ATTR(pwm2_step_output, 0644, show_fan_step_output,
+ store_fan_step_output, 1);
+static SENSOR_DEVICE_ATTR(pwm3_max_output, 0644, show_fan_max_output,
+ store_fan_max_output, 2);
+static SENSOR_DEVICE_ATTR(pwm3_step_output, 0644, show_fan_step_output,
+ store_fan_step_output, 2);
static ssize_t
cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -1752,33 +1083,20 @@ cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf)
struct w83627ehf_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
}
-static DEVICE_ATTR_RO(cpu0_vid);
+DEVICE_ATTR_RO(cpu0_vid);
/* Case open detection */
-
-static ssize_t
-show_caseopen(struct device *dev, struct device_attribute *attr, char *buf)
+static int
+clear_caseopen(struct device *dev, struct w83627ehf_data *data, int channel,
+ long val)
{
- struct w83627ehf_data *data = w83627ehf_update_device(dev);
-
- return sprintf(buf, "%d\n",
- !!(data->caseopen & to_sensor_dev_attr_2(attr)->index));
-}
-
-static ssize_t
-clear_caseopen(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct w83627ehf_data *data = dev_get_drvdata(dev);
- unsigned long val;
- u16 reg, mask;
+ const u16 mask = 0x80;
+ u16 reg;
- if (kstrtoul(buf, 10, &val) || val != 0)
+ if (val != 0 || channel != 0)
return -EINVAL;
- mask = to_sensor_dev_attr_2(attr)->nr;
-
mutex_lock(&data->update_lock);
reg = w83627ehf_read_value(data, W83627EHF_REG_CASEOPEN_CLR);
w83627ehf_write_value(data, W83627EHF_REG_CASEOPEN_CLR, reg | mask);
@@ -1786,85 +1104,116 @@ clear_caseopen(struct device *dev, struct device_attribute *attr,
data->valid = 0; /* Force cache refresh */
mutex_unlock(&data->update_lock);
- return count;
+ return 0;
}
-static struct sensor_device_attribute_2 sda_caseopen[] = {
- SENSOR_ATTR_2(intrusion0_alarm, S_IWUSR | S_IRUGO, show_caseopen,
- clear_caseopen, 0x80, 0x10),
- SENSOR_ATTR_2(intrusion1_alarm, S_IWUSR | S_IRUGO, show_caseopen,
- clear_caseopen, 0x40, 0x40),
-};
-
-/*
- * Driver and device management
- */
-
-static void w83627ehf_device_remove_files(struct device *dev)
+static umode_t w83627ehf_attrs_visible(struct kobject *kobj,
+ struct attribute *a, int n)
{
- /*
- * some entries in the following arrays may not have been used in
- * device_create_file(), but device_remove_file() will ignore them
- */
- int i;
+ struct device *dev = container_of(kobj, struct device, kobj);
struct w83627ehf_data *data = dev_get_drvdata(dev);
+ struct device_attribute *devattr;
+ struct sensor_device_attribute *sda;
+
+ devattr = container_of(a, struct device_attribute, attr);
+
+ /* Not sensor */
+ if (devattr->show == cpu0_vid_show && data->have_vid)
+ return a->mode;
+
+ sda = (struct sensor_device_attribute *)devattr;
+
+ if (sda->index < 2 &&
+ (devattr->show == show_fan_stop_time ||
+ devattr->show == show_fan_start_output ||
+ devattr->show == show_fan_stop_output))
+ return a->mode;
+
+ if (sda->index < 3 &&
+ (devattr->show == show_fan_max_output ||
+ devattr->show == show_fan_step_output) &&
+ data->REG_FAN_STEP_OUTPUT &&
+ data->REG_FAN_STEP_OUTPUT[sda->index] != 0xff)
+ return a->mode;
+
+ /* if fan3 and fan4 are enabled create the files for them */
+ if (sda->index == 2 &&
+ (data->has_fan & (1 << 2)) && data->pwm_num >= 3 &&
+ (devattr->show == show_fan_stop_time ||
+ devattr->show == show_fan_start_output ||
+ devattr->show == show_fan_stop_output))
+ return a->mode;
+
+ if (sda->index == 3 &&
+ (data->has_fan & (1 << 3)) && data->pwm_num >= 4 &&
+ (devattr->show == show_fan_stop_time ||
+ devattr->show == show_fan_start_output ||
+ devattr->show == show_fan_stop_output ||
+ devattr->show == show_fan_max_output ||
+ devattr->show == show_fan_step_output))
+ return a->mode;
+
+ if ((devattr->show == show_target_temp ||
+ devattr->show == show_tolerance) &&
+ (data->has_fan & (1 << sda->index)) &&
+ sda->index < data->pwm_num)
+ return a->mode;
- for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++)
- device_remove_file(dev, &sda_sf3_arrays[i].dev_attr);
- for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) {
- struct sensor_device_attribute *attr =
- &sda_sf3_max_step_arrays[i];
- if (data->REG_FAN_STEP_OUTPUT &&
- data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff)
- device_remove_file(dev, &attr->dev_attr);
- }
- for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan3); i++)
- device_remove_file(dev, &sda_sf3_arrays_fan3[i].dev_attr);
- for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++)
- device_remove_file(dev, &sda_sf3_arrays_fan4[i].dev_attr);
- for (i = 0; i < data->in_num; i++) {
- if ((i == 6) && data->in6_skip)
- continue;
- device_remove_file(dev, &sda_in_input[i].dev_attr);
- device_remove_file(dev, &sda_in_alarm[i].dev_attr);
- device_remove_file(dev, &sda_in_min[i].dev_attr);
- device_remove_file(dev, &sda_in_max[i].dev_attr);
- }
- for (i = 0; i < 5; i++) {
- device_remove_file(dev, &sda_fan_input[i].dev_attr);
- device_remove_file(dev, &sda_fan_alarm[i].dev_attr);
- device_remove_file(dev, &sda_fan_div[i].dev_attr);
- device_remove_file(dev, &sda_fan_min[i].dev_attr);
- }
- for (i = 0; i < data->pwm_num; i++) {
- device_remove_file(dev, &sda_pwm[i].dev_attr);
- device_remove_file(dev, &sda_pwm_mode[i].dev_attr);
- device_remove_file(dev, &sda_pwm_enable[i].dev_attr);
- device_remove_file(dev, &sda_target_temp[i].dev_attr);
- device_remove_file(dev, &sda_tolerance[i].dev_attr);
- }
- for (i = 0; i < NUM_REG_TEMP; i++) {
- if (!(data->have_temp & (1 << i)))
- continue;
- device_remove_file(dev, &sda_temp_input[i].dev_attr);
- device_remove_file(dev, &sda_temp_label[i].dev_attr);
- if (i == 2 && data->temp3_val_only)
- continue;
- device_remove_file(dev, &sda_temp_max[i].dev_attr);
- device_remove_file(dev, &sda_temp_max_hyst[i].dev_attr);
- if (i > 2)
- continue;
- device_remove_file(dev, &sda_temp_alarm[i].dev_attr);
- device_remove_file(dev, &sda_temp_type[i].dev_attr);
- device_remove_file(dev, &sda_temp_offset[i].dev_attr);
- }
+ return 0;
+}
+
+/* These groups handle non-standard attributes used in this device */
+static struct attribute *w83627ehf_attrs[] = {
+
+ &sensor_dev_attr_pwm1_stop_time.dev_attr.attr,
+ &sensor_dev_attr_pwm1_start_output.dev_attr.attr,
+ &sensor_dev_attr_pwm1_stop_output.dev_attr.attr,
+ &sensor_dev_attr_pwm1_max_output.dev_attr.attr,
+ &sensor_dev_attr_pwm1_step_output.dev_attr.attr,
+ &sensor_dev_attr_pwm1_target.dev_attr.attr,
+ &sensor_dev_attr_pwm1_tolerance.dev_attr.attr,
+
+ &sensor_dev_attr_pwm2_stop_time.dev_attr.attr,
+ &sensor_dev_attr_pwm2_start_output.dev_attr.attr,
+ &sensor_dev_attr_pwm2_stop_output.dev_attr.attr,
+ &sensor_dev_attr_pwm2_max_output.dev_attr.attr,
+ &sensor_dev_attr_pwm2_step_output.dev_attr.attr,
+ &sensor_dev_attr_pwm2_target.dev_attr.attr,
+ &sensor_dev_attr_pwm2_tolerance.dev_attr.attr,
+
+ &sensor_dev_attr_pwm3_stop_time.dev_attr.attr,
+ &sensor_dev_attr_pwm3_start_output.dev_attr.attr,
+ &sensor_dev_attr_pwm3_stop_output.dev_attr.attr,
+ &sensor_dev_attr_pwm3_max_output.dev_attr.attr,
+ &sensor_dev_attr_pwm3_step_output.dev_attr.attr,
+ &sensor_dev_attr_pwm3_target.dev_attr.attr,
+ &sensor_dev_attr_pwm3_tolerance.dev_attr.attr,
+
+ &sensor_dev_attr_pwm4_stop_time.dev_attr.attr,
+ &sensor_dev_attr_pwm4_start_output.dev_attr.attr,
+ &sensor_dev_attr_pwm4_stop_output.dev_attr.attr,
+ &sensor_dev_attr_pwm4_max_output.dev_attr.attr,
+ &sensor_dev_attr_pwm4_step_output.dev_attr.attr,
+ &sensor_dev_attr_pwm4_target.dev_attr.attr,
+ &sensor_dev_attr_pwm4_tolerance.dev_attr.attr,
+
+ &dev_attr_cpu0_vid.attr,
+ NULL
+};
- device_remove_file(dev, &sda_caseopen[0].dev_attr);
- device_remove_file(dev, &sda_caseopen[1].dev_attr);
+static const struct attribute_group w83627ehf_group = {
+ .attrs = w83627ehf_attrs,
+ .is_visible = w83627ehf_attrs_visible,
+};
- device_remove_file(dev, &dev_attr_name);
- device_remove_file(dev, &dev_attr_cpu0_vid);
-}
+static const struct attribute_group *w83627ehf_groups[] = {
+ &w83627ehf_group,
+ NULL
+};
+
+/*
+ * Driver and device management
+ */
/* Get the monitoring functions started */
static inline void w83627ehf_init_device(struct w83627ehf_data *data,
@@ -1927,16 +1276,6 @@ static inline void w83627ehf_init_device(struct w83627ehf_data *data,
}
}
-static void w82627ehf_swap_tempreg(struct w83627ehf_data *data,
- int r1, int r2)
-{
- swap(data->temp_src[r1], data->temp_src[r2]);
- swap(data->reg_temp[r1], data->reg_temp[r2]);
- swap(data->reg_temp_over[r1], data->reg_temp_over[r2]);
- swap(data->reg_temp_hyst[r1], data->reg_temp_hyst[r2]);
- swap(data->reg_temp_config[r1], data->reg_temp_config[r2]);
-}
-
static void
w83627ehf_set_temp_reg_ehf(struct w83627ehf_data *data, int n_temp)
{
@@ -1954,7 +1293,7 @@ static void
w83627ehf_check_fan_inputs(const struct w83627ehf_sio_data *sio_data,
struct w83627ehf_data *data)
{
- int fan3pin, fan4pin, fan4min, fan5pin, regval;
+ int fan3pin, fan4pin, fan5pin, regval;
/* The W83627UHG is simple, only two fan inputs, no config */
if (sio_data->kind == w83627uhg) {
@@ -1964,77 +1303,392 @@ w83627ehf_check_fan_inputs(const struct w83627ehf_sio_data *sio_data,
}
/* fan4 and fan5 share some pins with the GPIO and serial flash */
- if (sio_data->kind == nct6775) {
- /* On NCT6775, fan4 shares pins with the fdc interface */
- fan3pin = 1;
- fan4pin = !(superio_inb(sio_data->sioreg, 0x2A) & 0x80);
- fan4min = 0;
- fan5pin = 0;
- } else if (sio_data->kind == nct6776) {
- bool gpok = superio_inb(sio_data->sioreg, 0x27) & 0x80;
-
- superio_select(sio_data->sioreg, W83627EHF_LD_HWM);
- regval = superio_inb(sio_data->sioreg, SIO_REG_ENABLE);
-
- if (regval & 0x80)
- fan3pin = gpok;
- else
- fan3pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x40);
-
- if (regval & 0x40)
- fan4pin = gpok;
- else
- fan4pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x01);
-
- if (regval & 0x20)
- fan5pin = gpok;
- else
- fan5pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x02);
-
- fan4min = fan4pin;
- } else if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
+ if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
fan3pin = 1;
fan4pin = superio_inb(sio_data->sioreg, 0x27) & 0x40;
fan5pin = superio_inb(sio_data->sioreg, 0x27) & 0x20;
- fan4min = fan4pin;
} else {
fan3pin = 1;
fan4pin = !(superio_inb(sio_data->sioreg, 0x29) & 0x06);
fan5pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x02);
- fan4min = fan4pin;
}
data->has_fan = data->has_fan_min = 0x03; /* fan1 and fan2 */
data->has_fan |= (fan3pin << 2);
data->has_fan_min |= (fan3pin << 2);
- if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
- /*
- * NCT6775F and NCT6776F don't have the W83627EHF_REG_FANDIV1
- * register
- */
- data->has_fan |= (fan4pin << 3) | (fan5pin << 4);
- data->has_fan_min |= (fan4min << 3) | (fan5pin << 4);
- } else {
- /*
- * It looks like fan4 and fan5 pins can be alternatively used
- * as fan on/off switches, but fan5 control is write only :/
- * We assume that if the serial interface is disabled, designers
- * connected fan5 as input unless they are emitting log 1, which
- * is not the default.
- */
- regval = w83627ehf_read_value(data, W83627EHF_REG_FANDIV1);
- if ((regval & (1 << 2)) && fan4pin) {
- data->has_fan |= (1 << 3);
- data->has_fan_min |= (1 << 3);
+ /*
+ * It looks like fan4 and fan5 pins can be alternatively used
+ * as fan on/off switches, but fan5 control is write only :/
+ * We assume that if the serial interface is disabled, designers
+ * connected fan5 as input unless they are emitting log 1, which
+ * is not the default.
+ */
+ regval = w83627ehf_read_value(data, W83627EHF_REG_FANDIV1);
+ if ((regval & (1 << 2)) && fan4pin) {
+ data->has_fan |= (1 << 3);
+ data->has_fan_min |= (1 << 3);
+ }
+ if (!(regval & (1 << 1)) && fan5pin) {
+ data->has_fan |= (1 << 4);
+ data->has_fan_min |= (1 << 4);
+ }
+}
+
+static umode_t
+w83627ehf_is_visible(const void *drvdata, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct w83627ehf_data *data = drvdata;
+
+ switch (type) {
+ case hwmon_temp:
+ /* channel 0.., name 1.. */
+ if (!(data->have_temp & (1 << channel)))
+ return 0;
+ if (attr == hwmon_temp_input || attr == hwmon_temp_label)
+ return 0444;
+ if (channel == 2 && data->temp3_val_only)
+ return 0;
+ if (attr == hwmon_temp_max) {
+ if (data->reg_temp_over[channel])
+ return 0644;
+ else
+ return 0;
+ }
+ if (attr == hwmon_temp_max_hyst) {
+ if (data->reg_temp_hyst[channel])
+ return 0644;
+ else
+ return 0;
+ }
+ if (channel > 2)
+ return 0;
+ if (attr == hwmon_temp_alarm || attr == hwmon_temp_type)
+ return 0444;
+ if (attr == hwmon_temp_offset) {
+ if (data->have_temp_offset & (1 << channel))
+ return 0644;
+ else
+ return 0;
+ }
+ break;
+
+ case hwmon_fan:
+ /* channel 0.., name 1.. */
+ if (!(data->has_fan & (1 << channel)))
+ return 0;
+ if (attr == hwmon_fan_input || attr == hwmon_fan_alarm)
+ return 0444;
+ if (attr == hwmon_fan_div) {
+ return 0444;
}
- if (!(regval & (1 << 1)) && fan5pin) {
- data->has_fan |= (1 << 4);
- data->has_fan_min |= (1 << 4);
+ if (attr == hwmon_fan_min) {
+ if (data->has_fan_min & (1 << channel))
+ return 0644;
+ else
+ return 0;
}
+ break;
+
+ case hwmon_in:
+ /* channel 0.., name 0.. */
+ if (channel >= data->in_num)
+ return 0;
+ if (channel == 6 && data->in6_skip)
+ return 0;
+ if (attr == hwmon_in_alarm || attr == hwmon_in_input)
+ return 0444;
+ if (attr == hwmon_in_min || attr == hwmon_in_max)
+ return 0644;
+ break;
+
+ case hwmon_pwm:
+ /* channel 0.., name 1.. */
+ if (!(data->has_fan & (1 << channel)) ||
+ channel >= data->pwm_num)
+ return 0;
+ if (attr == hwmon_pwm_mode || attr == hwmon_pwm_enable ||
+ attr == hwmon_pwm_input)
+ return 0644;
+ break;
+
+ case hwmon_intrusion:
+ return 0644;
+
+ default: /* Shouldn't happen */
+ return 0;
}
+
+ return 0; /* Shouldn't happen */
}
+static int
+w83627ehf_do_read_temp(struct w83627ehf_data *data, u32 attr,
+ int channel, long *val)
+{
+ switch (attr) {
+ case hwmon_temp_input:
+ *val = LM75_TEMP_FROM_REG(data->temp[channel]);
+ return 0;
+ case hwmon_temp_max:
+ *val = LM75_TEMP_FROM_REG(data->temp_max[channel]);
+ return 0;
+ case hwmon_temp_max_hyst:
+ *val = LM75_TEMP_FROM_REG(data->temp_max_hyst[channel]);
+ return 0;
+ case hwmon_temp_offset:
+ *val = data->temp_offset[channel] * 1000;
+ return 0;
+ case hwmon_temp_type:
+ *val = (int)data->temp_type[channel];
+ return 0;
+ case hwmon_temp_alarm:
+ if (channel < 3) {
+ int bit[] = { 4, 5, 13 };
+ *val = (data->alarms >> bit[channel]) & 1;
+ return 0;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int
+w83627ehf_do_read_in(struct w83627ehf_data *data, u32 attr,
+ int channel, long *val)
+{
+ switch (attr) {
+ case hwmon_in_input:
+ *val = in_from_reg(data->in[channel], channel, data->scale_in);
+ return 0;
+ case hwmon_in_min:
+ *val = in_from_reg(data->in_min[channel], channel,
+ data->scale_in);
+ return 0;
+ case hwmon_in_max:
+ *val = in_from_reg(data->in_max[channel], channel,
+ data->scale_in);
+ return 0;
+ case hwmon_in_alarm:
+ if (channel < 10) {
+ int bit[] = { 0, 1, 2, 3, 8, 21, 20, 16, 17, 19 };
+ *val = (data->alarms >> bit[channel]) & 1;
+ return 0;
+ }
+ break;
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+static int
+w83627ehf_do_read_fan(struct w83627ehf_data *data, u32 attr,
+ int channel, long *val)
+{
+ switch (attr) {
+ case hwmon_fan_input:
+ *val = data->rpm[channel];
+ return 0;
+ case hwmon_fan_min:
+ *val = fan_from_reg8(data->fan_min[channel],
+ data->fan_div[channel]);
+ return 0;
+ case hwmon_fan_div:
+ *val = div_from_reg(data->fan_div[channel]);
+ return 0;
+ case hwmon_fan_alarm:
+ if (channel < 5) {
+ int bit[] = { 6, 7, 11, 10, 23 };
+ *val = (data->alarms >> bit[channel]) & 1;
+ return 0;
+ }
+ break;
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+static int
+w83627ehf_do_read_pwm(struct w83627ehf_data *data, u32 attr,
+ int channel, long *val)
+{
+ switch (attr) {
+ case hwmon_pwm_input:
+ *val = data->pwm[channel];
+ return 0;
+ case hwmon_pwm_enable:
+ *val = data->pwm_enable[channel];
+ return 0;
+ case hwmon_pwm_mode:
+ *val = data->pwm_enable[channel];
+ return 0;
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+static int
+w83627ehf_do_read_intrusion(struct w83627ehf_data *data, u32 attr,
+ int channel, long *val)
+{
+ if (attr != hwmon_intrusion_alarm || channel != 0)
+ return -EOPNOTSUPP; /* shouldn't happen */
+
+ *val = !!(data->caseopen & 0x10);
+ return 0;
+}
+
+static int
+w83627ehf_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct w83627ehf_data *data = w83627ehf_update_device(dev->parent);
+
+ switch (type) {
+ case hwmon_fan:
+ return w83627ehf_do_read_fan(data, attr, channel, val);
+
+ case hwmon_in:
+ return w83627ehf_do_read_in(data, attr, channel, val);
+
+ case hwmon_pwm:
+ return w83627ehf_do_read_pwm(data, attr, channel, val);
+
+ case hwmon_temp:
+ return w83627ehf_do_read_temp(data, attr, channel, val);
+
+ case hwmon_intrusion:
+ return w83627ehf_do_read_intrusion(data, attr, channel, val);
+
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int
+w83627ehf_read_string(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ struct w83627ehf_data *data = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_temp:
+ if (attr == hwmon_temp_label) {
+ *str = data->temp_label[data->temp_src[channel]];
+ return 0;
+ }
+ break;
+
+ default:
+ break;
+ }
+ /* Nothing else should be read as a string */
+ return -EOPNOTSUPP;
+}
+
+static int
+w83627ehf_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct w83627ehf_data *data = dev_get_drvdata(dev);
+
+ if (type == hwmon_in && attr == hwmon_in_min)
+ return store_in_min(dev, data, channel, val);
+ if (type == hwmon_in && attr == hwmon_in_max)
+ return store_in_max(dev, data, channel, val);
+
+ if (type == hwmon_fan && attr == hwmon_fan_min)
+ return store_fan_min(dev, data, channel, val);
+
+ if (type == hwmon_temp && attr == hwmon_temp_max)
+ return store_temp_max(dev, data, channel, val);
+ if (type == hwmon_temp && attr == hwmon_temp_max_hyst)
+ return store_temp_max_hyst(dev, data, channel, val);
+ if (type == hwmon_temp && attr == hwmon_temp_offset)
+ return store_temp_offset(dev, data, channel, val);
+
+ if (type == hwmon_pwm && attr == hwmon_pwm_mode)
+ return store_pwm_mode(dev, data, channel, val);
+ if (type == hwmon_pwm && attr == hwmon_pwm_enable)
+ return store_pwm_enable(dev, data, channel, val);
+ if (type == hwmon_pwm && attr == hwmon_pwm_input)
+ return store_pwm(dev, data, channel, val);
+
+ if (type == hwmon_intrusion && attr == hwmon_intrusion_alarm)
+ return clear_caseopen(dev, data, channel, val);
+
+ return -EOPNOTSUPP;
+}
+
+static const struct hwmon_ops w83627ehf_ops = {
+ .is_visible = w83627ehf_is_visible,
+ .read = w83627ehf_read,
+ .read_string = w83627ehf_read_string,
+ .write = w83627ehf_write,
+};
+
+static const struct hwmon_channel_info *w83627ehf_info[] = {
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN,
+ HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN,
+ HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN,
+ HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN,
+ HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE,
+ HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE,
+ HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE,
+ HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE),
+ HWMON_CHANNEL_INFO(intrusion,
+ HWMON_INTRUSION_ALARM),
+ NULL
+};
+
+static const struct hwmon_chip_info w83627ehf_chip_info = {
+ .ops = &w83627ehf_ops,
+ .info = w83627ehf_info,
+};
+
static int w83627ehf_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -2043,6 +1697,7 @@ static int w83627ehf_probe(struct platform_device *pdev)
struct resource *res;
u8 en_vrm10;
int i, err = 0;
+ struct device *hwmon_dev;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!request_region(res->start, IOREGION_LENGTH, DRVNAME)) {
@@ -2069,15 +1724,13 @@ static int w83627ehf_probe(struct platform_device *pdev)
/* 627EHG and 627EHF have 10 voltage inputs; 627DHG and 667HG have 9 */
data->in_num = (sio_data->kind == w83627ehf) ? 10 : 9;
- /* 667HG, NCT6775F, and NCT6776F have 3 pwms, and 627UHG has only 2 */
+ /* 667HG has 3 pwms, and 627UHG has only 2 */
switch (sio_data->kind) {
default:
data->pwm_num = 4;
break;
case w83667hg:
case w83667hg_b:
- case nct6775:
- case nct6776:
data->pwm_num = 3;
break;
case w83627uhg:
@@ -2089,83 +1742,7 @@ static int w83627ehf_probe(struct platform_device *pdev)
data->have_temp = 0x07;
/* Deal with temperature register setup first. */
- if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
- int mask = 0;
-
- /*
- * Display temperature sensor output only if it monitors
- * a source other than one already reported. Always display
- * first three temperature registers, though.
- */
- for (i = 0; i < NUM_REG_TEMP; i++) {
- u8 src;
-
- data->reg_temp[i] = NCT6775_REG_TEMP[i];
- data->reg_temp_over[i] = NCT6775_REG_TEMP_OVER[i];
- data->reg_temp_hyst[i] = NCT6775_REG_TEMP_HYST[i];
- data->reg_temp_config[i] = NCT6775_REG_TEMP_CONFIG[i];
-
- src = w83627ehf_read_value(data,
- NCT6775_REG_TEMP_SOURCE[i]);
- src &= 0x1f;
- if (src && !(mask & (1 << src))) {
- data->have_temp |= 1 << i;
- mask |= 1 << src;
- }
-
- data->temp_src[i] = src;
-
- /*
- * Now do some register swapping if index 0..2 don't
- * point to SYSTIN(1), CPUIN(2), and AUXIN(3).
- * Idea is to have the first three attributes
- * report SYSTIN, CPUIN, and AUXIN if possible
- * without overriding the basic system configuration.
- */
- if (i > 0 && data->temp_src[0] != 1
- && data->temp_src[i] == 1)
- w82627ehf_swap_tempreg(data, 0, i);
- if (i > 1 && data->temp_src[1] != 2
- && data->temp_src[i] == 2)
- w82627ehf_swap_tempreg(data, 1, i);
- if (i > 2 && data->temp_src[2] != 3
- && data->temp_src[i] == 3)
- w82627ehf_swap_tempreg(data, 2, i);
- }
- if (sio_data->kind == nct6776) {
- /*
- * On NCT6776, AUXTIN and VIN3 pins are shared.
- * Only way to detect it is to check if AUXTIN is used
- * as a temperature source, and if that source is
- * enabled.
- *
- * If that is the case, disable in6, which reports VIN3.
- * Otherwise disable temp3.
- */
- if (data->temp_src[2] == 3) {
- u8 reg;
-
- if (data->reg_temp_config[2])
- reg = w83627ehf_read_value(data,
- data->reg_temp_config[2]);
- else
- reg = 0; /* Assume AUXTIN is used */
-
- if (reg & 0x01)
- data->have_temp &= ~(1 << 2);
- else
- data->in6_skip = 1;
- }
- data->temp_label = nct6776_temp_label;
- } else {
- data->temp_label = nct6775_temp_label;
- }
- data->have_temp_offset = data->have_temp & 0x07;
- for (i = 0; i < 3; i++) {
- if (data->temp_src[i] > 3)
- data->have_temp_offset &= ~(1 << i);
- }
- } else if (sio_data->kind == w83667hg_b) {
+ if (sio_data->kind == w83667hg_b) {
u8 reg;
w83627ehf_set_temp_reg_ehf(data, 4);
@@ -2275,56 +1852,12 @@ static int w83627ehf_probe(struct platform_device *pdev)
data->have_temp_offset = data->have_temp & 0x07;
}
- if (sio_data->kind == nct6775) {
- data->has_fan_div = true;
- data->fan_from_reg = fan_from_reg16;
- data->fan_from_reg_min = fan_from_reg8;
- data->REG_PWM = NCT6775_REG_PWM;
- data->REG_TARGET = NCT6775_REG_TARGET;
- data->REG_FAN = NCT6775_REG_FAN;
- data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN;
- data->REG_FAN_START_OUTPUT = NCT6775_REG_FAN_START_OUTPUT;
- data->REG_FAN_STOP_OUTPUT = NCT6775_REG_FAN_STOP_OUTPUT;
- data->REG_FAN_STOP_TIME = NCT6775_REG_FAN_STOP_TIME;
- data->REG_FAN_MAX_OUTPUT = NCT6775_REG_FAN_MAX_OUTPUT;
- data->REG_FAN_STEP_OUTPUT = NCT6775_REG_FAN_STEP_OUTPUT;
- } else if (sio_data->kind == nct6776) {
- data->has_fan_div = false;
- data->fan_from_reg = fan_from_reg13;
- data->fan_from_reg_min = fan_from_reg13;
- data->REG_PWM = NCT6775_REG_PWM;
- data->REG_TARGET = NCT6775_REG_TARGET;
- data->REG_FAN = NCT6775_REG_FAN;
- data->REG_FAN_MIN = NCT6776_REG_FAN_MIN;
- data->REG_FAN_START_OUTPUT = NCT6775_REG_FAN_START_OUTPUT;
- data->REG_FAN_STOP_OUTPUT = NCT6775_REG_FAN_STOP_OUTPUT;
- data->REG_FAN_STOP_TIME = NCT6775_REG_FAN_STOP_TIME;
- } else if (sio_data->kind == w83667hg_b) {
- data->has_fan_div = true;
- data->fan_from_reg = fan_from_reg8;
- data->fan_from_reg_min = fan_from_reg8;
- data->REG_PWM = W83627EHF_REG_PWM;
- data->REG_TARGET = W83627EHF_REG_TARGET;
- data->REG_FAN = W83627EHF_REG_FAN;
- data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN;
- data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT;
- data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT;
- data->REG_FAN_STOP_TIME = W83627EHF_REG_FAN_STOP_TIME;
+ if (sio_data->kind == w83667hg_b) {
data->REG_FAN_MAX_OUTPUT =
W83627EHF_REG_FAN_MAX_OUTPUT_W83667_B;
data->REG_FAN_STEP_OUTPUT =
W83627EHF_REG_FAN_STEP_OUTPUT_W83667_B;
} else {
- data->has_fan_div = true;
- data->fan_from_reg = fan_from_reg8;
- data->fan_from_reg_min = fan_from_reg8;
- data->REG_PWM = W83627EHF_REG_PWM;
- data->REG_TARGET = W83627EHF_REG_TARGET;
- data->REG_FAN = W83627EHF_REG_FAN;
- data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN;
- data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT;
- data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT;
- data->REG_FAN_STOP_TIME = W83627EHF_REG_FAN_STOP_TIME;
data->REG_FAN_MAX_OUTPUT =
W83627EHF_REG_FAN_MAX_OUTPUT_COMMON;
data->REG_FAN_STEP_OUTPUT =
@@ -2347,8 +1880,7 @@ static int w83627ehf_probe(struct platform_device *pdev)
goto exit_release;
/* Read VID value */
- if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b ||
- sio_data->kind == nct6775 || sio_data->kind == nct6776) {
+ if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
/*
* W83667HG has different pins for VID input and output, so
* we can get the VID input values directly at logical device D
@@ -2356,11 +1888,7 @@ static int w83627ehf_probe(struct platform_device *pdev)
*/
superio_select(sio_data->sioreg, W83667HG_LD_VID);
data->vid = superio_inb(sio_data->sioreg, 0xe3);
- err = device_create_file(dev, &dev_attr_cpu0_vid);
- if (err) {
- superio_exit(sio_data->sioreg);
- goto exit_release;
- }
+ data->have_vid = true;
} else if (sio_data->kind != w83627uhg) {
superio_select(sio_data->sioreg, W83627EHF_LD_HWM);
if (superio_inb(sio_data->sioreg, SIO_REG_VID_CTRL) & 0x80) {
@@ -2394,190 +1922,33 @@ static int w83627ehf_probe(struct platform_device *pdev)
SIO_REG_VID_DATA);
if (sio_data->kind == w83627ehf) /* 6 VID pins only */
data->vid &= 0x3f;
-
- err = device_create_file(dev, &dev_attr_cpu0_vid);
- if (err) {
- superio_exit(sio_data->sioreg);
- goto exit_release;
- }
+ data->have_vid = true;
} else {
dev_info(dev,
"VID pins in output mode, CPU VID not available\n");
}
}
- if (fan_debounce &&
- (sio_data->kind == nct6775 || sio_data->kind == nct6776)) {
- u8 tmp;
-
- superio_select(sio_data->sioreg, W83627EHF_LD_HWM);
- tmp = superio_inb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE);
- if (sio_data->kind == nct6776)
- superio_outb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE,
- 0x3e | tmp);
- else
- superio_outb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE,
- 0x1e | tmp);
- pr_info("Enabled fan debounce for chip %s\n", data->name);
- }
-
w83627ehf_check_fan_inputs(sio_data, data);
superio_exit(sio_data->sioreg);
/* Read fan clock dividers immediately */
- w83627ehf_update_fan_div_common(dev, data);
+ w83627ehf_update_fan_div(data);
/* Read pwm data to save original values */
- w83627ehf_update_pwm_common(dev, data);
+ w83627ehf_update_pwm(data);
for (i = 0; i < data->pwm_num; i++)
data->pwm_enable_orig[i] = data->pwm_enable[i];
- /* Register sysfs hooks */
- for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++) {
- err = device_create_file(dev, &sda_sf3_arrays[i].dev_attr);
- if (err)
- goto exit_remove;
- }
-
- for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) {
- struct sensor_device_attribute *attr =
- &sda_sf3_max_step_arrays[i];
- if (data->REG_FAN_STEP_OUTPUT &&
- data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff) {
- err = device_create_file(dev, &attr->dev_attr);
- if (err)
- goto exit_remove;
- }
- }
- /* if fan3 and fan4 are enabled create the sf3 files for them */
- if ((data->has_fan & (1 << 2)) && data->pwm_num >= 3)
- for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan3); i++) {
- err = device_create_file(dev,
- &sda_sf3_arrays_fan3[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- if ((data->has_fan & (1 << 3)) && data->pwm_num >= 4)
- for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++) {
- err = device_create_file(dev,
- &sda_sf3_arrays_fan4[i].dev_attr);
- if (err)
- goto exit_remove;
- }
-
- for (i = 0; i < data->in_num; i++) {
- if ((i == 6) && data->in6_skip)
- continue;
- if ((err = device_create_file(dev, &sda_in_input[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_in_alarm[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_in_min[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_in_max[i].dev_attr)))
- goto exit_remove;
- }
-
- for (i = 0; i < 5; i++) {
- if (data->has_fan & (1 << i)) {
- if ((err = device_create_file(dev,
- &sda_fan_input[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_fan_alarm[i].dev_attr)))
- goto exit_remove;
- if (sio_data->kind != nct6776) {
- err = device_create_file(dev,
- &sda_fan_div[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- if (data->has_fan_min & (1 << i)) {
- err = device_create_file(dev,
- &sda_fan_min[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- if (i < data->pwm_num &&
- ((err = device_create_file(dev,
- &sda_pwm[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_pwm_mode[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_pwm_enable[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_target_temp[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_tolerance[i].dev_attr))))
- goto exit_remove;
- }
- }
+ hwmon_dev = devm_hwmon_device_register_with_info(&pdev->dev,
+ data->name,
+ data,
+ &w83627ehf_chip_info,
+ w83627ehf_groups);
- for (i = 0; i < NUM_REG_TEMP; i++) {
- if (!(data->have_temp & (1 << i)))
- continue;
- err = device_create_file(dev, &sda_temp_input[i].dev_attr);
- if (err)
- goto exit_remove;
- if (data->temp_label) {
- err = device_create_file(dev,
- &sda_temp_label[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- if (i == 2 && data->temp3_val_only)
- continue;
- if (data->reg_temp_over[i]) {
- err = device_create_file(dev,
- &sda_temp_max[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- if (data->reg_temp_hyst[i]) {
- err = device_create_file(dev,
- &sda_temp_max_hyst[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- if (i > 2)
- continue;
- if ((err = device_create_file(dev,
- &sda_temp_alarm[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_temp_type[i].dev_attr)))
- goto exit_remove;
- if (data->have_temp_offset & (1 << i)) {
- err = device_create_file(dev,
- &sda_temp_offset[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- }
-
- err = device_create_file(dev, &sda_caseopen[0].dev_attr);
- if (err)
- goto exit_remove;
-
- if (sio_data->kind == nct6776) {
- err = device_create_file(dev, &sda_caseopen[1].dev_attr);
- if (err)
- goto exit_remove;
- }
-
- err = device_create_file(dev, &dev_attr_name);
- if (err)
- goto exit_remove;
-
- data->hwmon_dev = hwmon_device_register(dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto exit_remove;
- }
+ return PTR_ERR_OR_ZERO(hwmon_dev);
- return 0;
-
-exit_remove:
- w83627ehf_device_remove_files(dev);
exit_release:
release_region(res->start, IOREGION_LENGTH);
exit:
@@ -2588,8 +1959,6 @@ static int w83627ehf_remove(struct platform_device *pdev)
{
struct w83627ehf_data *data = platform_get_drvdata(pdev);
- hwmon_device_unregister(data->hwmon_dev);
- w83627ehf_device_remove_files(&pdev->dev);
release_region(data->addr, IOREGION_LENGTH);
return 0;
@@ -2599,14 +1968,9 @@ static int w83627ehf_remove(struct platform_device *pdev)
static int w83627ehf_suspend(struct device *dev)
{
struct w83627ehf_data *data = w83627ehf_update_device(dev);
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
mutex_lock(&data->update_lock);
data->vbat = w83627ehf_read_value(data, W83627EHF_REG_VBAT);
- if (sio_data->kind == nct6775) {
- data->fandiv1 = w83627ehf_read_value(data, NCT6775_REG_FANDIV1);
- data->fandiv2 = w83627ehf_read_value(data, NCT6775_REG_FANDIV2);
- }
mutex_unlock(&data->update_lock);
return 0;
@@ -2615,7 +1979,6 @@ static int w83627ehf_suspend(struct device *dev)
static int w83627ehf_resume(struct device *dev)
{
struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
int i;
mutex_lock(&data->update_lock);
@@ -2636,7 +1999,7 @@ static int w83627ehf_resume(struct device *dev)
if (!(data->has_fan_min & (1 << i)))
continue;
- w83627ehf_write_value(data, data->REG_FAN_MIN[i],
+ w83627ehf_write_value(data, W83627EHF_REG_FAN_MIN[i],
data->fan_min[i]);
}
@@ -2660,10 +2023,6 @@ static int w83627ehf_resume(struct device *dev)
/* Restore other settings */
w83627ehf_write_value(data, W83627EHF_REG_VBAT, data->vbat);
- if (sio_data->kind == nct6775) {
- w83627ehf_write_value(data, NCT6775_REG_FANDIV1, data->fandiv1);
- w83627ehf_write_value(data, NCT6775_REG_FANDIV2, data->fandiv2);
- }
/* Force re-reading all values */
data->valid = 0;
@@ -2704,8 +2063,6 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr,
static const char sio_name_W83627UHG[] __initconst = "W83627UHG";
static const char sio_name_W83667HG[] __initconst = "W83667HG";
static const char sio_name_W83667HG_B[] __initconst = "W83667HG-B";
- static const char sio_name_NCT6775[] __initconst = "NCT6775F";
- static const char sio_name_NCT6776[] __initconst = "NCT6776F";
u16 val;
const char *sio_name;
@@ -2749,14 +2106,6 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr,
sio_data->kind = w83667hg_b;
sio_name = sio_name_W83667HG_B;
break;
- case SIO_NCT6775_ID:
- sio_data->kind = nct6775;
- sio_name = sio_name_NCT6775;
- break;
- case SIO_NCT6776_ID:
- sio_data->kind = nct6776;
- sio_name = sio_name_NCT6776;
- break;
default:
if (val != 0xffff)
pr_debug("unsupported chip ID: 0x%04x\n", val);
diff --git a/drivers/hwspinlock/omap_hwspinlock.c b/drivers/hwspinlock/omap_hwspinlock.c
index 14e1a532ebb5..3b05560456ea 100644
--- a/drivers/hwspinlock/omap_hwspinlock.c
+++ b/drivers/hwspinlock/omap_hwspinlock.c
@@ -76,7 +76,6 @@ static int omap_hwspinlock_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct hwspinlock_device *bank;
struct hwspinlock *hwlock;
- struct resource *res;
void __iomem *io_base;
int num_locks, i, ret;
/* Only a single hwspinlock block device is supported */
@@ -85,13 +84,9 @@ static int omap_hwspinlock_probe(struct platform_device *pdev)
if (!node)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
- io_base = ioremap(res->start, resource_size(res));
- if (!io_base)
- return -ENOMEM;
+ io_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(io_base))
+ return PTR_ERR(io_base);
/*
* make sure the module is enabled and clocked before reading
@@ -101,7 +96,7 @@ static int omap_hwspinlock_probe(struct platform_device *pdev)
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
pm_runtime_put_noidle(&pdev->dev);
- goto iounmap_base;
+ goto runtime_err;
}
/* Determine number of locks */
@@ -114,20 +109,21 @@ static int omap_hwspinlock_probe(struct platform_device *pdev)
*/
ret = pm_runtime_put(&pdev->dev);
if (ret < 0)
- goto iounmap_base;
+ goto runtime_err;
/* one of the four lsb's must be set, and nothing else */
if (hweight_long(i & 0xf) != 1 || i > 8) {
ret = -EINVAL;
- goto iounmap_base;
+ goto runtime_err;
}
num_locks = i * 32; /* actual number of locks in this device */
- bank = kzalloc(struct_size(bank, lock, num_locks), GFP_KERNEL);
+ bank = devm_kzalloc(&pdev->dev, struct_size(bank, lock, num_locks),
+ GFP_KERNEL);
if (!bank) {
ret = -ENOMEM;
- goto iounmap_base;
+ goto runtime_err;
}
platform_set_drvdata(pdev, bank);
@@ -138,25 +134,21 @@ static int omap_hwspinlock_probe(struct platform_device *pdev)
ret = hwspin_lock_register(bank, &pdev->dev, &omap_hwspinlock_ops,
base_id, num_locks);
if (ret)
- goto reg_fail;
+ goto runtime_err;
dev_dbg(&pdev->dev, "Registered %d locks with HwSpinlock core\n",
num_locks);
return 0;
-reg_fail:
- kfree(bank);
-iounmap_base:
+runtime_err:
pm_runtime_disable(&pdev->dev);
- iounmap(io_base);
return ret;
}
static int omap_hwspinlock_remove(struct platform_device *pdev)
{
struct hwspinlock_device *bank = platform_get_drvdata(pdev);
- void __iomem *io_base = bank->lock[0].priv - LOCK_BASE_OFFSET;
int ret;
ret = hwspin_lock_unregister(bank);
@@ -166,8 +158,6 @@ static int omap_hwspinlock_remove(struct platform_device *pdev)
}
pm_runtime_disable(&pdev->dev);
- iounmap(io_base);
- kfree(bank);
return 0;
}
diff --git a/drivers/hwspinlock/qcom_hwspinlock.c b/drivers/hwspinlock/qcom_hwspinlock.c
index 6da7447d277d..f0da544b14d2 100644
--- a/drivers/hwspinlock/qcom_hwspinlock.c
+++ b/drivers/hwspinlock/qcom_hwspinlock.c
@@ -12,7 +12,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include "hwspinlock_internal.h"
@@ -122,35 +121,12 @@ static int qcom_hwspinlock_probe(struct platform_device *pdev)
regmap, field);
}
- pm_runtime_enable(&pdev->dev);
-
- ret = hwspin_lock_register(bank, &pdev->dev, &qcom_hwspinlock_ops,
- 0, QCOM_MUTEX_NUM_LOCKS);
- if (ret)
- pm_runtime_disable(&pdev->dev);
-
- return ret;
-}
-
-static int qcom_hwspinlock_remove(struct platform_device *pdev)
-{
- struct hwspinlock_device *bank = platform_get_drvdata(pdev);
- int ret;
-
- ret = hwspin_lock_unregister(bank);
- if (ret) {
- dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret);
- return ret;
- }
-
- pm_runtime_disable(&pdev->dev);
-
- return 0;
+ return devm_hwspin_lock_register(&pdev->dev, bank, &qcom_hwspinlock_ops,
+ 0, QCOM_MUTEX_NUM_LOCKS);
}
static struct platform_driver qcom_hwspinlock_driver = {
.probe = qcom_hwspinlock_probe,
- .remove = qcom_hwspinlock_remove,
.driver = {
.name = "qcom_hwspinlock",
.of_match_table = qcom_hwspinlock_of_match,
diff --git a/drivers/hwspinlock/sirf_hwspinlock.c b/drivers/hwspinlock/sirf_hwspinlock.c
index 1f625cd68c50..823d3c4f621e 100644
--- a/drivers/hwspinlock/sirf_hwspinlock.c
+++ b/drivers/hwspinlock/sirf_hwspinlock.c
@@ -9,7 +9,6 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/io.h>
-#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/hwspinlock.h>
@@ -56,7 +55,7 @@ static int sirf_hwspinlock_probe(struct platform_device *pdev)
{
struct sirf_hwspinlock *hwspin;
struct hwspinlock *hwlock;
- int idx, ret;
+ int idx;
if (!pdev->dev.of_node)
return -ENODEV;
@@ -69,9 +68,9 @@ static int sirf_hwspinlock_probe(struct platform_device *pdev)
return -ENOMEM;
/* retrieve io base */
- hwspin->io_base = of_iomap(pdev->dev.of_node, 0);
- if (!hwspin->io_base)
- return -ENOMEM;
+ hwspin->io_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(hwspin->io_base))
+ return PTR_ERR(hwspin->io_base);
for (idx = 0; idx < HW_SPINLOCK_NUMBER; idx++) {
hwlock = &hwspin->bank.lock[idx];
@@ -80,39 +79,9 @@ static int sirf_hwspinlock_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, hwspin);
- pm_runtime_enable(&pdev->dev);
-
- ret = hwspin_lock_register(&hwspin->bank, &pdev->dev,
- &sirf_hwspinlock_ops, 0,
- HW_SPINLOCK_NUMBER);
- if (ret)
- goto reg_failed;
-
- return 0;
-
-reg_failed:
- pm_runtime_disable(&pdev->dev);
- iounmap(hwspin->io_base);
-
- return ret;
-}
-
-static int sirf_hwspinlock_remove(struct platform_device *pdev)
-{
- struct sirf_hwspinlock *hwspin = platform_get_drvdata(pdev);
- int ret;
-
- ret = hwspin_lock_unregister(&hwspin->bank);
- if (ret) {
- dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret);
- return ret;
- }
-
- pm_runtime_disable(&pdev->dev);
-
- iounmap(hwspin->io_base);
-
- return 0;
+ return devm_hwspin_lock_register(&pdev->dev, &hwspin->bank,
+ &sirf_hwspinlock_ops, 0,
+ HW_SPINLOCK_NUMBER);
}
static const struct of_device_id sirf_hwpinlock_ids[] = {
@@ -123,7 +92,6 @@ MODULE_DEVICE_TABLE(of, sirf_hwpinlock_ids);
static struct platform_driver sirf_hwspinlock_driver = {
.probe = sirf_hwspinlock_probe,
- .remove = sirf_hwspinlock_remove,
.driver = {
.name = "atlas7_hwspinlock",
.of_match_table = of_match_ptr(sirf_hwpinlock_ids),
diff --git a/drivers/hwspinlock/stm32_hwspinlock.c b/drivers/hwspinlock/stm32_hwspinlock.c
index c8eacf4f9692..3ad0ce0da4d9 100644
--- a/drivers/hwspinlock/stm32_hwspinlock.c
+++ b/drivers/hwspinlock/stm32_hwspinlock.c
@@ -58,12 +58,10 @@ static int stm32_hwspinlock_probe(struct platform_device *pdev)
{
struct stm32_hwspinlock *hw;
void __iomem *io_base;
- struct resource *res;
size_t array_size;
int i, ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- io_base = devm_ioremap_resource(&pdev->dev, res);
+ io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(io_base))
return PTR_ERR(io_base);
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index dc3f507e7562..a90d757f7043 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -1132,7 +1132,6 @@ static void etm4_init_trace_id(struct etmv4_drvdata *drvdata)
drvdata->trcid = coresight_get_trace_id(drvdata->cpu);
}
-#ifdef CONFIG_CPU_PM
static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
{
int i, ret = 0;
@@ -1402,17 +1401,17 @@ static struct notifier_block etm4_cpu_pm_nb = {
static int etm4_cpu_pm_register(void)
{
- return cpu_pm_register_notifier(&etm4_cpu_pm_nb);
+ if (IS_ENABLED(CONFIG_CPU_PM))
+ return cpu_pm_register_notifier(&etm4_cpu_pm_nb);
+
+ return 0;
}
static void etm4_cpu_pm_unregister(void)
{
- cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
+ if (IS_ENABLED(CONFIG_CPU_PM))
+ cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
}
-#else
-static int etm4_cpu_pm_register(void) { return 0; }
-static void etm4_cpu_pm_unregister(void) { }
-#endif
static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
{
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index 0dfd97bbde9e..ca232ec565e8 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -834,9 +834,6 @@ static irqreturn_t intel_th_irq(int irq, void *data)
ret |= d->irq(th->thdev[i]);
}
- if (ret == IRQ_NONE)
- pr_warn_ratelimited("nobody cared for irq\n");
-
return ret;
}
@@ -887,6 +884,7 @@ intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata,
if (th->irq == -1)
th->irq = devres[r].start;
+ th->num_irqs++;
break;
default:
dev_warn(dev, "Unknown resource type %lx\n",
@@ -940,6 +938,9 @@ void intel_th_free(struct intel_th *th)
th->num_thdevs = 0;
+ for (i = 0; i < th->num_irqs; i++)
+ devm_free_irq(th->dev, th->irq + i, th);
+
pm_runtime_get_sync(th->dev);
pm_runtime_forbid(th->dev);
diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h
index 0df480072b6c..6f4f5486fe6d 100644
--- a/drivers/hwtracing/intel_th/intel_th.h
+++ b/drivers/hwtracing/intel_th/intel_th.h
@@ -261,6 +261,7 @@ enum th_mmio_idx {
* @num_thdevs: number of devices in the @thdev array
* @num_resources: number of resources in the @resource array
* @irq: irq number
+ * @num_irqs: number of IRQs is use
* @id: this Intel TH controller's device ID in the system
* @major: device node major for output devices
*/
@@ -277,6 +278,7 @@ struct intel_th {
unsigned int num_thdevs;
unsigned int num_resources;
int irq;
+ int num_irqs;
int id;
int major;
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index 6d240dfae9d9..8e48c7458aa3 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -1676,10 +1676,13 @@ static int intel_th_msc_init(struct msc *msc)
return 0;
}
-static void msc_win_switch(struct msc *msc)
+static int msc_win_switch(struct msc *msc)
{
struct msc_window *first;
+ if (list_empty(&msc->win_list))
+ return -EINVAL;
+
first = list_first_entry(&msc->win_list, struct msc_window, entry);
if (msc_is_last_win(msc->cur_win))
@@ -1691,6 +1694,8 @@ static void msc_win_switch(struct msc *msc)
msc->base_addr = msc_win_base_dma(msc->cur_win);
intel_th_trace_switch(msc->thdev);
+
+ return 0;
}
/**
@@ -2025,16 +2030,15 @@ win_switch_store(struct device *dev, struct device_attribute *attr,
if (val != 1)
return -EINVAL;
+ ret = -EINVAL;
mutex_lock(&msc->buf_mutex);
/*
* Window switch can only happen in the "multi" mode.
* If a external buffer is engaged, they have the full
* control over window switching.
*/
- if (msc->mode != MSC_MODE_MULTI || msc->mbuf)
- ret = -ENOTSUPP;
- else
- msc_win_switch(msc);
+ if (msc->mode == MSC_MODE_MULTI && !msc->mbuf)
+ ret = msc_win_switch(msc);
mutex_unlock(&msc->buf_mutex);
return ret ? ret : size;
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index ebf3e30e989a..e9d90b53bbc4 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -205,6 +205,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
+ /* Comet Lake PCH-V */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa3a6),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
/* Ice Lake NNPI */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5),
.driver_data = (kernel_ulong_t)&intel_th_2x,
@@ -229,6 +234,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4da6),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
+ {
+ /* Elkhart Lake */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4b26),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
{ 0 },
};
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 6a0aa76859f3..2ddca08f8a76 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -367,7 +367,8 @@ comment "I2C system bus drivers (mostly embedded / system-on-chip)"
config I2C_ALTERA
tristate "Altera Soft IP I2C"
- depends on (ARCH_SOCFPGA || NIOS2) && OF
+ depends on ARCH_SOCFPGA || NIOS2 || COMPILE_TEST
+ depends on OF
help
If you say yes to this option, support will be included for the
Altera Soft IP I2C interfaces on SoCFPGA and Nios2 architectures.
@@ -387,7 +388,7 @@ config I2C_ASPEED
config I2C_AT91
tristate "Atmel AT91 I2C Two-Wire interface (TWI)"
- depends on ARCH_AT91
+ depends on ARCH_AT91 || COMPILE_TEST
help
This supports the use of the I2C interface on Atmel AT91
processors.
@@ -440,7 +441,8 @@ config I2C_AXXIA
config I2C_BCM2835
tristate "Broadcom BCM2835 I2C controller"
- depends on ARCH_BCM2835 || ARCH_BRCMSTB
+ depends on ARCH_BCM2835 || ARCH_BRCMSTB || COMPILE_TEST
+ depends on COMMON_CLK
help
If you say yes to this option, support will be included for the
BCM2835 I2C controller.
@@ -463,8 +465,8 @@ config I2C_BCM_IPROC
config I2C_BCM_KONA
tristate "BCM Kona I2C adapter"
- depends on ARCH_BCM_MOBILE
- default y
+ depends on ARCH_BCM_MOBILE || COMPILE_TEST
+ default y if ARCH_BCM_MOBILE
help
If you say yes to this option, support will be included for the
I2C interface on the Broadcom Kona family of processors.
@@ -511,7 +513,7 @@ config I2C_CPM
config I2C_DAVINCI
tristate "DaVinci I2C driver"
- depends on ARCH_DAVINCI || ARCH_KEYSTONE
+ depends on ARCH_DAVINCI || ARCH_KEYSTONE || COMPILE_TEST
help
Support for TI DaVinci I2C controller driver.
@@ -572,7 +574,7 @@ config I2C_DESIGNWARE_BAYTRAIL
config I2C_DIGICOLOR
tristate "Conexant Digicolor I2C driver"
- depends on ARCH_DIGICOLOR
+ depends on ARCH_DIGICOLOR || COMPILE_TEST
help
Support for Conexant Digicolor SoCs (CX92755) I2C controller driver.
@@ -610,11 +612,12 @@ config I2C_EMEV2
I2C interface on the Renesas Electronics EM/EV family of processors.
config I2C_EXYNOS5
- tristate "Exynos5 high-speed I2C driver"
- depends on ARCH_EXYNOS && OF
- default y
+ tristate "Exynos high-speed I2C driver"
+ depends on OF
+ depends on ARCH_EXYNOS || COMPILE_TEST
+ default y if ARCH_EXYNOS
help
- High-speed I2C controller on Exynos5 based Samsung SoCs.
+ High-speed I2C controller on Exynos5 and newer Samsung SoCs.
config I2C_GPIO
tristate "GPIO-based bitbanging I2C"
@@ -634,7 +637,7 @@ config I2C_GPIO_FAULT_INJECTOR
config I2C_HIGHLANDER
tristate "Highlander FPGA SMBus interface"
- depends on SH_HIGHLANDER
+ depends on SH_HIGHLANDER || COMPILE_TEST
help
If you say yes to this option, support will be included for
the SMBus interface located in the FPGA on various Highlander
@@ -686,7 +689,7 @@ config I2C_IMX_LPI2C
config I2C_IOP3XX
tristate "Intel IOPx3xx and IXP4xx on-chip I2C interface"
- depends on ARCH_IOP32X || ARCH_IXP4XX
+ depends on ARCH_IOP32X || ARCH_IXP4XX || COMPILE_TEST
help
Say Y here if you want to use the IIC bus controller on
the Intel IOPx3xx I/O Processors or IXP4xx Network Processors.
@@ -726,6 +729,7 @@ config I2C_LPC2K
config I2C_MESON
tristate "Amlogic Meson I2C controller"
depends on ARCH_MESON || COMPILE_TEST
+ depends on COMMON_CLK
help
If you say yes to this option, support will be included for the
I2C interface on the Amlogic Meson family of SoCs.
@@ -759,7 +763,7 @@ config I2C_MT7621
config I2C_MV64XXX
tristate "Marvell mv64xxx I2C Controller"
- depends on MV64X60 || PLAT_ORION || ARCH_SUNXI || ARCH_MVEBU
+ depends on MV64X60 || PLAT_ORION || ARCH_SUNXI || ARCH_MVEBU || COMPILE_TEST
help
If you say yes to this option, support will be included for the
built-in I2C interface on the Marvell 64xxx line of host bridges.
@@ -770,7 +774,7 @@ config I2C_MV64XXX
config I2C_MXS
tristate "Freescale i.MX28 I2C interface"
- depends on SOC_IMX28
+ depends on SOC_IMX28 || COMPILE_TEST
select STMP_DEVICE
help
Say Y here if you want to use the I2C bus controller on
@@ -799,7 +803,7 @@ config I2C_OCORES
config I2C_OMAP
tristate "OMAP I2C adapter"
- depends on ARCH_OMAP || ARCH_K3
+ depends on ARCH_OMAP || ARCH_K3 || COMPILE_TEST
default y if MACH_OMAP_H3 || MACH_OMAP_OSK
help
If you say yes to this option, support will be included for the
@@ -833,7 +837,7 @@ config I2C_PCA_PLATFORM
config I2C_PMCMSP
tristate "PMC MSP I2C TWI Controller"
- depends on PMC_MSP
+ depends on PMC_MSP || COMPILE_TEST
help
This driver supports the PMC TWI controller on MSP devices.
@@ -842,7 +846,7 @@ config I2C_PMCMSP
config I2C_PNX
tristate "I2C bus support for Philips PNX and NXP LPC targets"
- depends on ARCH_LPC32XX
+ depends on ARCH_LPC32XX || COMPILE_TEST
help
This driver supports the Philips IP3204 I2C IP block master and/or
slave controller
@@ -863,7 +867,7 @@ config I2C_PUV3
config I2C_PXA
tristate "Intel PXA2XX I2C adapter"
- depends on ARCH_PXA || ARCH_MMP || ARCH_MVEBU || (X86_32 && PCI && OF)
+ depends on ARCH_PXA || ARCH_MMP || ARCH_MVEBU || (X86_32 && PCI && OF) || COMPILE_TEST
help
If you have devices in the PXA I2C bus, say yes to this option.
This driver can also be built as a module. If so, the module
@@ -932,11 +936,11 @@ config HAVE_S3C2410_I2C
respective Kconfig file.
config I2C_S3C2410
- tristate "S3C2410 I2C Driver"
- depends on HAVE_S3C2410_I2C
+ tristate "S3C/Exynos I2C Driver"
+ depends on HAVE_S3C2410_I2C || COMPILE_TEST
help
Say Y here to include support for I2C controller in the
- Samsung SoCs.
+ Samsung SoCs (S3C, S5Pv210, Exynos).
config I2C_SH7760
tristate "Renesas SH7760 I2C Controller"
@@ -971,7 +975,7 @@ config I2C_SIMTEC
config I2C_SIRF
tristate "CSR SiRFprimaII I2C interface"
- depends on ARCH_SIRF
+ depends on ARCH_SIRF || COMPILE_TEST
help
If you say yes to this option, support will be included for the
CSR SiRFprimaII I2C interface.
@@ -981,14 +985,14 @@ config I2C_SIRF
config I2C_SPRD
tristate "Spreadtrum I2C interface"
- depends on I2C=y && ARCH_SPRD
+ depends on I2C=y && (ARCH_SPRD || COMPILE_TEST)
help
If you say yes to this option, support will be included for the
Spreadtrum I2C interface.
config I2C_ST
tristate "STMicroelectronics SSC I2C support"
- depends on ARCH_STI
+ depends on ARCH_STI || COMPILE_TEST
help
Enable this option to add support for STMicroelectronics SoCs
hardware SSC (Synchronous Serial Controller) as an I2C controller.
@@ -1019,7 +1023,7 @@ config I2C_STM32F7
config I2C_STU300
tristate "ST Microelectronics DDC I2C interface"
- depends on MACH_U300
+ depends on MACH_U300 || COMPILE_TEST
default y if MACH_U300
help
If you say yes to this option, support will be included for the
@@ -1055,15 +1059,16 @@ config I2C_SYNQUACER
config I2C_TEGRA
tristate "NVIDIA Tegra internal I2C controller"
- depends on ARCH_TEGRA
+ depends on ARCH_TEGRA || (COMPILE_TEST && (ARC || ARM || ARM64 || M68K || RISCV || SUPERH || SPARC))
+ # COMPILE_TEST needs architectures with readsX()/writesX() primitives
help
If you say yes to this option, support will be included for the
I2C controller embedded in NVIDIA Tegra SOCs
config I2C_TEGRA_BPMP
tristate "NVIDIA Tegra BPMP I2C controller"
- depends on TEGRA_BPMP
- default y
+ depends on TEGRA_BPMP || COMPILE_TEST
+ default y if TEGRA_BPMP
help
If you say yes to this option, support will be included for the I2C
controller embedded in NVIDIA Tegra SoCs accessed via the BPMP.
@@ -1101,7 +1106,7 @@ config I2C_VERSATILE
config I2C_WMT
tristate "Wondermedia WM8xxx SoC I2C bus support"
- depends on ARCH_VT8500
+ depends on ARCH_VT8500 || COMPILE_TEST
help
Say yes if you want to support the I2C bus on Wondermedia 8xxx-series
SoCs.
@@ -1142,7 +1147,7 @@ config I2C_XILINX
config I2C_XLR
tristate "Netlogic XLR and Sigma Designs I2C support"
- depends on CPU_XLR || ARCH_TANGO
+ depends on CPU_XLR || ARCH_TANGO || COMPILE_TEST
help
This driver enables support for the on-chip I2C interface of
the Netlogic XLR/XLS MIPS processors and Sigma Designs SOCs.
@@ -1202,46 +1207,12 @@ config I2C_PARPORT
This supports parallel port I2C adapters such as the ones made by
Philips or Velleman, Analog Devices evaluation boards, and more.
Basically any adapter using the parallel port as an I2C bus with
- no extra chipset is supported by this driver, or could be.
-
- This driver is a replacement for (and was inspired by) an older
- driver named i2c-philips-par. The new driver supports more devices,
- and makes it easier to add support for new devices.
-
- An adapter type parameter is now mandatory. Please read the file
- Documentation/i2c/busses/i2c-parport.rst for details.
-
- Another driver exists, named i2c-parport-light, which doesn't depend
- on the parport driver. This is meant for embedded systems. Don't say
- Y here if you intend to say Y or M there.
+ no extra chipset is supported by this driver, or could be. Please
+ read the file Documentation/i2c/busses/i2c-parport.rst for details.
This support is also available as a module. If so, the module
will be called i2c-parport.
-config I2C_PARPORT_LIGHT
- tristate "Parallel port adapter (light)"
- select I2C_ALGOBIT
- select I2C_SMBUS
- help
- This supports parallel port I2C adapters such as the ones made by
- Philips or Velleman, Analog Devices evaluation boards, and more.
- Basically any adapter using the parallel port as an I2C bus with
- no extra chipset is supported by this driver, or could be.
-
- This driver is a light version of i2c-parport. It doesn't depend
- on the parport driver, and uses direct I/O access instead. This
- might be preferred on embedded systems where wasting memory for
- the clean but heavy parport handling is not an option. The
- drawback is a reduced portability and the impossibility to
- daisy-chain other parallel port devices.
-
- Don't say Y here if you said Y or M to i2c-parport. Saying M to
- both is possible but both modules should not be loaded at the same
- time.
-
- This support is also available as a module. If so, the module
- will be called i2c-parport-light.
-
config I2C_ROBOTFUZZ_OSIF
tristate "RobotFuzz Open Source InterFace USB adapter"
depends on USB
@@ -1328,7 +1299,7 @@ config I2C_ICY
config I2C_MLXCPLD
tristate "Mellanox I2C driver"
- depends on X86_64
+ depends on X86_64 || COMPILE_TEST
help
This exposes the Mellanox platform I2C busses to the linux I2C layer
for X86 based systems.
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 3ab8aebc39c9..25d60889713c 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -128,7 +128,6 @@ obj-$(CONFIG_I2C_ZX2967) += i2c-zx2967.o
obj-$(CONFIG_I2C_DIOLAN_U2C) += i2c-diolan-u2c.o
obj-$(CONFIG_I2C_DLN2) += i2c-dln2.o
obj-$(CONFIG_I2C_PARPORT) += i2c-parport.o
-obj-$(CONFIG_I2C_PARPORT_LIGHT) += i2c-parport-light.o
obj-$(CONFIG_I2C_ROBOTFUZZ_OSIF) += i2c-robotfuzz-osif.o
obj-$(CONFIG_I2C_TAOS_EVM) += i2c-taos-evm.o
obj-$(CONFIG_I2C_TINY_USB) += i2c-tiny-usb.o
diff --git a/drivers/i2c/busses/i2c-at91-core.c b/drivers/i2c/busses/i2c-at91-core.c
index e13af4874976..3da1a8acecb5 100644
--- a/drivers/i2c/busses/i2c-at91-core.c
+++ b/drivers/i2c/busses/i2c-at91-core.c
@@ -66,55 +66,26 @@ static struct at91_twi_pdata at91rm9200_config = {
.clk_max_div = 5,
.clk_offset = 3,
.has_unre_flag = true,
- .has_alt_cmd = false,
- .has_hold_field = false,
- .has_dig_filtr = false,
- .has_adv_dig_filtr = false,
- .has_ana_filtr = false,
};
static struct at91_twi_pdata at91sam9261_config = {
.clk_max_div = 5,
.clk_offset = 4,
- .has_unre_flag = false,
- .has_alt_cmd = false,
- .has_hold_field = false,
- .has_dig_filtr = false,
- .has_adv_dig_filtr = false,
- .has_ana_filtr = false,
};
static struct at91_twi_pdata at91sam9260_config = {
.clk_max_div = 7,
.clk_offset = 4,
- .has_unre_flag = false,
- .has_alt_cmd = false,
- .has_hold_field = false,
- .has_dig_filtr = false,
- .has_adv_dig_filtr = false,
- .has_ana_filtr = false,
};
static struct at91_twi_pdata at91sam9g20_config = {
.clk_max_div = 7,
.clk_offset = 4,
- .has_unre_flag = false,
- .has_alt_cmd = false,
- .has_hold_field = false,
- .has_dig_filtr = false,
- .has_adv_dig_filtr = false,
- .has_ana_filtr = false,
};
static struct at91_twi_pdata at91sam9g10_config = {
.clk_max_div = 7,
.clk_offset = 4,
- .has_unre_flag = false,
- .has_alt_cmd = false,
- .has_hold_field = false,
- .has_dig_filtr = false,
- .has_adv_dig_filtr = false,
- .has_ana_filtr = false,
};
static const struct platform_device_id at91_twi_devtypes[] = {
@@ -142,23 +113,13 @@ static const struct platform_device_id at91_twi_devtypes[] = {
static struct at91_twi_pdata at91sam9x5_config = {
.clk_max_div = 7,
.clk_offset = 4,
- .has_unre_flag = false,
- .has_alt_cmd = false,
- .has_hold_field = false,
- .has_dig_filtr = false,
- .has_adv_dig_filtr = false,
- .has_ana_filtr = false,
};
static struct at91_twi_pdata sama5d4_config = {
.clk_max_div = 7,
.clk_offset = 4,
- .has_unre_flag = false,
- .has_alt_cmd = false,
.has_hold_field = true,
.has_dig_filtr = true,
- .has_adv_dig_filtr = false,
- .has_ana_filtr = false,
};
static struct at91_twi_pdata sama5d2_config = {
@@ -174,7 +135,7 @@ static struct at91_twi_pdata sama5d2_config = {
static struct at91_twi_pdata sam9x60_config = {
.clk_max_div = 7,
- .clk_offset = 4,
+ .clk_offset = 3,
.has_unre_flag = true,
.has_alt_cmd = true,
.has_hold_field = true,
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index e01b2b57e724..5ab901ad615d 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -58,6 +58,7 @@ struct bcm2835_i2c_dev {
struct i2c_adapter adapter;
struct completion completion;
struct i2c_msg *curr_msg;
+ struct clk *bus_clk;
int num_msgs;
u32 msg_err;
u8 *msg_buf;
@@ -404,7 +405,6 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
struct resource *mem, *irq;
int ret;
struct i2c_adapter *adap;
- struct clk *bus_clk;
struct clk *mclk;
u32 bus_clk_rate;
@@ -427,11 +427,11 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
return PTR_ERR(mclk);
}
- bus_clk = bcm2835_i2c_register_div(&pdev->dev, mclk, i2c_dev);
+ i2c_dev->bus_clk = bcm2835_i2c_register_div(&pdev->dev, mclk, i2c_dev);
- if (IS_ERR(bus_clk)) {
+ if (IS_ERR(i2c_dev->bus_clk)) {
dev_err(&pdev->dev, "Could not register clock\n");
- return PTR_ERR(bus_clk);
+ return PTR_ERR(i2c_dev->bus_clk);
}
ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
@@ -442,13 +442,13 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
bus_clk_rate = 100000;
}
- ret = clk_set_rate_exclusive(bus_clk, bus_clk_rate);
+ ret = clk_set_rate_exclusive(i2c_dev->bus_clk, bus_clk_rate);
if (ret < 0) {
dev_err(&pdev->dev, "Could not set clock frequency\n");
return ret;
}
- ret = clk_prepare_enable(bus_clk);
+ ret = clk_prepare_enable(i2c_dev->bus_clk);
if (ret) {
dev_err(&pdev->dev, "Couldn't prepare clock");
return ret;
@@ -491,10 +491,9 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
static int bcm2835_i2c_remove(struct platform_device *pdev)
{
struct bcm2835_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
- struct clk *bus_clk = devm_clk_get(i2c_dev->dev, "div");
- clk_rate_exclusive_put(bus_clk);
- clk_disable_unprepare(bus_clk);
+ clk_rate_exclusive_put(i2c_dev->bus_clk);
+ clk_disable_unprepare(i2c_dev->bus_clk);
free_irq(i2c_dev->irq, i2c_dev);
i2c_del_adapter(&i2c_dev->adapter);
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 9d71ce15db05..1105aee6634a 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -208,6 +208,7 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET);
cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET);
+ id->err_status = 0;
/* Handling nack and arbitration lost interrupt */
if (isr_status & (CDNS_I2C_IXR_NACK | CDNS_I2C_IXR_ARB_LOST)) {
@@ -241,10 +242,17 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
!id->bus_hold_flag)
cdns_i2c_clear_bus_hold(id);
- *(id->p_recv_buf)++ =
- cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET);
- id->recv_count--;
- id->curr_recv_count--;
+ if (id->recv_count > 0) {
+ *(id->p_recv_buf)++ =
+ cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET);
+ id->recv_count--;
+ id->curr_recv_count--;
+ } else {
+ dev_err(id->adap.dev.parent,
+ "xfer_size reg rollover. xfer aborted!\n");
+ id->err_status |= CDNS_I2C_IXR_TO;
+ break;
+ }
if (cdns_is_holdquirk(id, hold_quirk))
break;
@@ -342,7 +350,7 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
}
/* Update the status for errors */
- id->err_status = isr_status & CDNS_I2C_IXR_ERR_INTR_MASK;
+ id->err_status |= isr_status & CDNS_I2C_IXR_ERR_INTR_MASK;
if (id->err_status)
status = IRQ_HANDLED;
@@ -500,7 +508,7 @@ static void cdns_i2c_master_reset(struct i2c_adapter *adap)
cdns_i2c_writereg(regval, CDNS_I2C_CR_OFFSET);
/* Update the transfercount register to zero */
cdns_i2c_writereg(0, CDNS_I2C_XFER_SIZE_OFFSET);
- /* Clear the interupt status register */
+ /* Clear the interrupt status register */
regval = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET);
cdns_i2c_writereg(regval, CDNS_I2C_ISR_OFFSET);
/* Clear the status register */
@@ -921,17 +929,18 @@ static int cdns_i2c_probe(struct platform_device *pdev)
id->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(id->clk)) {
- dev_err(&pdev->dev, "input clock not found.\n");
+ if (PTR_ERR(id->clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "input clock not found.\n");
return PTR_ERR(id->clk);
}
ret = clk_prepare_enable(id->clk);
if (ret)
dev_err(&pdev->dev, "Unable to enable clock.\n");
- pm_runtime_enable(id->dev);
pm_runtime_set_autosuspend_delay(id->dev, CNDS_I2C_PM_TIMEOUT);
pm_runtime_use_autosuspend(id->dev);
pm_runtime_set_active(id->dev);
+ pm_runtime_enable(id->dev);
id->clk_rate_change_nb.notifier_call = cdns_i2c_clk_notifier_cb;
if (clk_notifier_register(id->clk, &id->clk_rate_change_nb))
@@ -980,8 +989,8 @@ static int cdns_i2c_probe(struct platform_device *pdev)
err_clk_dis:
clk_disable_unprepare(id->clk);
- pm_runtime_set_suspended(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
return ret;
}
@@ -997,10 +1006,13 @@ static int cdns_i2c_remove(struct platform_device *pdev)
{
struct cdns_i2c *id = platform_get_drvdata(pdev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+
i2c_del_adapter(&id->adap);
clk_notifier_unregister(id->clk, &id->clk_rate_change_nb);
clk_disable_unprepare(id->clk);
- pm_runtime_disable(&pdev->dev);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c
index b8fde61bb5d8..35e55feda763 100644
--- a/drivers/i2c/busses/i2c-cht-wc.c
+++ b/drivers/i2c/busses/i2c-cht-wc.c
@@ -388,9 +388,9 @@ static int cht_wc_i2c_adap_i2c_probe(struct platform_device *pdev)
*/
if (acpi_dev_present("INT33FE", NULL, -1)) {
board_info.irq = adap->client_irq;
- adap->client = i2c_new_device(&adap->adapter, &board_info);
- if (!adap->client) {
- ret = -ENOMEM;
+ adap->client = i2c_new_client_device(&adap->adapter, &board_info);
+ if (IS_ERR(adap->client)) {
+ ret = PTR_ERR(adap->client);
goto del_adapter;
}
}
diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
index 958161c71985..790ea3fda693 100644
--- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c
+++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
@@ -273,6 +273,7 @@ static int ec_i2c_probe(struct platform_device *pdev)
bus->adap.dev.parent = &pdev->dev;
bus->adap.dev.of_node = pdev->dev.of_node;
bus->adap.retries = I2C_MAX_RETRIES;
+ ACPI_COMPANION_SET(&bus->adap.dev, ACPI_COMPANION(&pdev->dev));
err = i2c_add_adapter(&bus->adap);
if (err)
@@ -298,7 +299,7 @@ static const struct of_device_id cros_ec_i2c_of_match[] = {
MODULE_DEVICE_TABLE(of, cros_ec_i2c_of_match);
static const struct acpi_device_id cros_ec_i2c_tunnel_acpi_id[] = {
- { "GOOG001A", 0 },
+ { "GOOG0012", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, cros_ec_i2c_tunnel_acpi_id);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 16dd338877d0..3b7d58c2fe85 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -130,6 +130,7 @@ static const struct acpi_device_id dw_i2c_acpi_match[] = {
{ "APMC0D0F", 0 },
{ "HISI02A1", 0 },
{ "HISI02A2", 0 },
+ { "HISI02A3", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match);
diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c
index ff340d7ae2e5..803dad70e2a7 100644
--- a/drivers/i2c/busses/i2c-highlander.c
+++ b/drivers/i2c/busses/i2c-highlander.c
@@ -322,7 +322,7 @@ static int highlander_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr,
tmp |= (SMMR_MODE0 | SMMR_MODE1);
break;
default:
- dev_err(dev->dev, "unsupported xfer size %d\n", dev->buf_len);
+ dev_err(dev->dev, "unsupported xfer size %zu\n", dev->buf_len);
return -EINVAL;
}
@@ -369,7 +369,7 @@ static int highlander_i2c_probe(struct platform_device *pdev)
if (unlikely(!dev))
return -ENOMEM;
- dev->base = ioremap_nocache(res->start, resource_size(res));
+ dev->base = ioremap(res->start, resource_size(res));
if (unlikely(!dev->base)) {
ret = -ENXIO;
goto err;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index f5e69fe56532..ca4f096fef74 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -68,6 +68,7 @@
* Elkhart Lake (PCH) 0x4b23 32 hard yes yes yes
* Tiger Lake-LP (PCH) 0xa0a3 32 hard yes yes yes
* Jasper Lake (SOC) 0x4da3 32 hard yes yes yes
+ * Comet Lake-V (PCH) 0xa3a3 32 hard yes yes yes
*
* Features supported by this driver:
* Software PEC no
@@ -244,6 +245,7 @@
#define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS 0xa223
#define PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS 0xa2a3
#define PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS 0xa323
+#define PCI_DEVICE_ID_INTEL_COMETLAKE_V_SMBUS 0xa3a3
struct i801_mux_config {
char *gpio_chip;
@@ -1074,6 +1076,7 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_H_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_V_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TIGERLAKE_LP_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS) },
@@ -1142,7 +1145,7 @@ static void dmi_check_onboard_device(u8 type, const char *name,
memset(&info, 0, sizeof(struct i2c_board_info));
info.addr = dmi_devices[i].i2c_addr;
strlcpy(info.type, dmi_devices[i].i2c_type, I2C_NAME_SIZE);
- i2c_new_device(adap, &info);
+ i2c_new_client_device(adap, &info);
break;
}
}
@@ -1296,7 +1299,7 @@ static void register_dell_lis3lv02d_i2c_device(struct i801_priv *priv)
memset(&info, 0, sizeof(struct i2c_board_info));
info.addr = dell_lis3lv02d_devices[i].i2c_addr;
strlcpy(info.type, "lis3lv02d", I2C_NAME_SIZE);
- i2c_new_device(&priv->adapter, &info);
+ i2c_new_client_device(&priv->adapter, &info);
}
/* Register optional slaves */
@@ -1312,7 +1315,7 @@ static void i801_probe_optional_slaves(struct i801_priv *priv)
memset(&info, 0, sizeof(struct i2c_board_info));
info.addr = apanel_addr;
strlcpy(info.type, "fujitsu_apanel", I2C_NAME_SIZE);
- i2c_new_device(&priv->adapter, &info);
+ i2c_new_client_device(&priv->adapter, &info);
}
if (dmi_name_in_vendors("FUJITSU"))
@@ -1742,6 +1745,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS:
case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS:
+ case PCI_DEVICE_ID_INTEL_COMETLAKE_V_SMBUS:
priv->features |= FEATURE_BLOCK_PROC;
priv->features |= FEATURE_I2C_BLOCK_READ;
priv->features |= FEATURE_IRQ;
diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
index 38556381f4ca..2f8b8050a223 100644
--- a/drivers/i2c/busses/i2c-iop3xx.c
+++ b/drivers/i2c/busses/i2c-iop3xx.c
@@ -433,13 +433,17 @@ iop3xx_i2c_probe(struct platform_device *pdev)
adapter_data->gpio_scl = devm_gpiod_get_optional(&pdev->dev,
"scl",
GPIOD_ASIS);
- if (IS_ERR(adapter_data->gpio_scl))
- return PTR_ERR(adapter_data->gpio_scl);
+ if (IS_ERR(adapter_data->gpio_scl)) {
+ ret = PTR_ERR(adapter_data->gpio_scl);
+ goto free_both;
+ }
adapter_data->gpio_sda = devm_gpiod_get_optional(&pdev->dev,
"sda",
GPIOD_ASIS);
- if (IS_ERR(adapter_data->gpio_sda))
- return PTR_ERR(adapter_data->gpio_sda);
+ if (IS_ERR(adapter_data->gpio_sda)) {
+ ret = PTR_ERR(adapter_data->gpio_sda);
+ goto free_both;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index 25dcd73acd63..16a67a64284a 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -4,6 +4,7 @@
*
* Copyright (C) 2006 - 2009 Ingenic Semiconductor Inc.
* Copyright (C) 2015 Imagination Technologies
+ * Copyright (C) 2019 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
*/
#include <linux/bitops.h>
@@ -17,6 +18,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -55,6 +57,7 @@
#define JZ4780_I2C_ACKGC 0x98
#define JZ4780_I2C_ENSTA 0x9C
#define JZ4780_I2C_SDAHD 0xD0
+#define X1000_I2C_SDAHD 0x7C
#define JZ4780_I2C_CTRL_STPHLD BIT(7)
#define JZ4780_I2C_CTRL_SLVDIS BIT(6)
@@ -73,6 +76,8 @@
#define JZ4780_I2C_STA_TFNF BIT(1)
#define JZ4780_I2C_STA_ACT BIT(0)
+#define X1000_I2C_DC_STOP BIT(9)
+
static const char * const jz4780_i2c_abrt_src[] = {
"ABRT_7B_ADDR_NOACK",
"ABRT_10ADDR1_NOACK",
@@ -130,18 +135,33 @@ static const char * const jz4780_i2c_abrt_src[] = {
#define JZ4780_I2CFLCNT_ADJUST(n) (((n) - 1) < 8 ? 8 : ((n) - 1))
#define JZ4780_I2C_FIFO_LEN 16
-#define TX_LEVEL 3
-#define RX_LEVEL (JZ4780_I2C_FIFO_LEN - TX_LEVEL - 1)
+
+#define X1000_I2C_FIFO_LEN 64
#define JZ4780_I2C_TIMEOUT 300
#define BUFSIZE 200
+enum ingenic_i2c_version {
+ ID_JZ4780,
+ ID_X1000,
+};
+
+/* ingenic_i2c_config: SoC specific config data. */
+struct ingenic_i2c_config {
+ enum ingenic_i2c_version version;
+
+ int fifosize;
+ int tx_level;
+ int rx_level;
+};
+
struct jz4780_i2c {
void __iomem *iomem;
int irq;
struct clk *clk;
struct i2c_adapter adap;
+ const struct ingenic_i2c_config *cdata;
/* lock to protect rbuf and wbuf between xfer_rd/wr and irq handler */
spinlock_t lock;
@@ -340,11 +360,18 @@ static int jz4780_i2c_set_speed(struct jz4780_i2c *i2c)
if (hold_time >= 0) {
/*i2c hold time enable */
- hold_time |= JZ4780_I2C_SDAHD_HDENB;
- jz4780_i2c_writew(i2c, JZ4780_I2C_SDAHD, hold_time);
+ if (i2c->cdata->version >= ID_X1000) {
+ jz4780_i2c_writew(i2c, X1000_I2C_SDAHD, hold_time);
+ } else {
+ hold_time |= JZ4780_I2C_SDAHD_HDENB;
+ jz4780_i2c_writew(i2c, JZ4780_I2C_SDAHD, hold_time);
+ }
} else {
/* disable hold time */
- jz4780_i2c_writew(i2c, JZ4780_I2C_SDAHD, 0);
+ if (i2c->cdata->version >= ID_X1000)
+ jz4780_i2c_writew(i2c, X1000_I2C_SDAHD, 0);
+ else
+ jz4780_i2c_writew(i2c, JZ4780_I2C_SDAHD, 0);
}
return 0;
@@ -359,9 +386,11 @@ static int jz4780_i2c_cleanup(struct jz4780_i2c *i2c)
spin_lock_irqsave(&i2c->lock, flags);
/* can send stop now if need */
- tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL);
- tmp &= ~JZ4780_I2C_CTRL_STPHLD;
- jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp);
+ if (i2c->cdata->version < ID_X1000) {
+ tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL);
+ tmp &= ~JZ4780_I2C_CTRL_STPHLD;
+ jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp);
+ }
/* disable all interrupts first */
jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0);
@@ -399,11 +428,19 @@ static int jz4780_i2c_prepare(struct jz4780_i2c *i2c)
return jz4780_i2c_enable(i2c);
}
-static void jz4780_i2c_send_rcmd(struct jz4780_i2c *i2c, int cmd_count)
+static void jz4780_i2c_send_rcmd(struct jz4780_i2c *i2c,
+ int cmd_count,
+ int cmd_left)
{
int i;
- for (i = 0; i < cmd_count; i++)
+ for (i = 0; i < cmd_count - 1; i++)
+ jz4780_i2c_writew(i2c, JZ4780_I2C_DC, JZ4780_I2C_DC_READ);
+
+ if ((cmd_left == 0) && (i2c->cdata->version >= ID_X1000))
+ jz4780_i2c_writew(i2c, JZ4780_I2C_DC,
+ JZ4780_I2C_DC_READ | X1000_I2C_DC_STOP);
+ else
jz4780_i2c_writew(i2c, JZ4780_I2C_DC, JZ4780_I2C_DC_READ);
}
@@ -458,37 +495,44 @@ static irqreturn_t jz4780_i2c_irq(int irqno, void *dev_id)
rd_left = i2c->rd_total_len - i2c->rd_data_xfered;
- if (rd_left <= JZ4780_I2C_FIFO_LEN)
+ if (rd_left <= i2c->cdata->fifosize)
jz4780_i2c_writew(i2c, JZ4780_I2C_RXTL, rd_left - 1);
}
if (intst & JZ4780_I2C_INTST_TXEMP) {
if (i2c->is_write == 0) {
int cmd_left = i2c->rd_total_len - i2c->rd_cmd_xfered;
- int max_send = (JZ4780_I2C_FIFO_LEN - 1)
+ int max_send = (i2c->cdata->fifosize - 1)
- (i2c->rd_cmd_xfered
- i2c->rd_data_xfered);
int cmd_to_send = min(cmd_left, max_send);
if (i2c->rd_cmd_xfered != 0)
cmd_to_send = min(cmd_to_send,
- JZ4780_I2C_FIFO_LEN
- - TX_LEVEL - 1);
+ i2c->cdata->fifosize
+ - i2c->cdata->tx_level - 1);
if (cmd_to_send) {
- jz4780_i2c_send_rcmd(i2c, cmd_to_send);
i2c->rd_cmd_xfered += cmd_to_send;
+ cmd_left = i2c->rd_total_len -
+ i2c->rd_cmd_xfered;
+ jz4780_i2c_send_rcmd(i2c,
+ cmd_to_send, cmd_left);
+
}
- cmd_left = i2c->rd_total_len - i2c->rd_cmd_xfered;
if (cmd_left == 0) {
intmsk = jz4780_i2c_readw(i2c, JZ4780_I2C_INTM);
intmsk &= ~JZ4780_I2C_INTM_MTXEMP;
jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, intmsk);
- tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL);
- tmp &= ~JZ4780_I2C_CTRL_STPHLD;
- jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp);
+ if (i2c->cdata->version < ID_X1000) {
+ tmp = jz4780_i2c_readw(i2c,
+ JZ4780_I2C_CTRL);
+ tmp &= ~JZ4780_I2C_CTRL_STPHLD;
+ jz4780_i2c_writew(i2c,
+ JZ4780_I2C_CTRL, tmp);
+ }
}
} else {
unsigned short data;
@@ -497,23 +541,26 @@ static irqreturn_t jz4780_i2c_irq(int irqno, void *dev_id)
i2c_sta = jz4780_i2c_readw(i2c, JZ4780_I2C_STA);
while ((i2c_sta & JZ4780_I2C_STA_TFNF) &&
- (i2c->wt_len > 0)) {
+ (i2c->wt_len > 0)) {
i2c_sta = jz4780_i2c_readw(i2c, JZ4780_I2C_STA);
data = *i2c->wbuf;
data &= ~JZ4780_I2C_DC_READ;
- jz4780_i2c_writew(i2c, JZ4780_I2C_DC,
- data);
+ if ((!i2c->stop_hold) && (i2c->cdata->version >=
+ ID_X1000))
+ data |= X1000_I2C_DC_STOP;
+ jz4780_i2c_writew(i2c, JZ4780_I2C_DC, data);
i2c->wbuf++;
i2c->wt_len--;
}
if (i2c->wt_len == 0) {
- if (!i2c->stop_hold) {
+ if ((!i2c->stop_hold) && (i2c->cdata->version <
+ ID_X1000)) {
tmp = jz4780_i2c_readw(i2c,
- JZ4780_I2C_CTRL);
+ JZ4780_I2C_CTRL);
tmp &= ~JZ4780_I2C_CTRL_STPHLD;
- jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL,
- tmp);
+ jz4780_i2c_writew(i2c,
+ JZ4780_I2C_CTRL, tmp);
}
jz4780_i2c_trans_done(i2c);
@@ -567,20 +614,22 @@ static inline int jz4780_i2c_xfer_read(struct jz4780_i2c *i2c,
i2c->rd_data_xfered = 0;
i2c->rd_cmd_xfered = 0;
- if (len <= JZ4780_I2C_FIFO_LEN)
+ if (len <= i2c->cdata->fifosize)
jz4780_i2c_writew(i2c, JZ4780_I2C_RXTL, len - 1);
else
- jz4780_i2c_writew(i2c, JZ4780_I2C_RXTL, RX_LEVEL);
+ jz4780_i2c_writew(i2c, JZ4780_I2C_RXTL, i2c->cdata->rx_level);
- jz4780_i2c_writew(i2c, JZ4780_I2C_TXTL, TX_LEVEL);
+ jz4780_i2c_writew(i2c, JZ4780_I2C_TXTL, i2c->cdata->tx_level);
jz4780_i2c_writew(i2c, JZ4780_I2C_INTM,
JZ4780_I2C_INTM_MRXFL | JZ4780_I2C_INTM_MTXEMP
| JZ4780_I2C_INTM_MTXABT | JZ4780_I2C_INTM_MRXOF);
- tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL);
- tmp |= JZ4780_I2C_CTRL_STPHLD;
- jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp);
+ if (i2c->cdata->version < ID_X1000) {
+ tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL);
+ tmp |= JZ4780_I2C_CTRL_STPHLD;
+ jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp);
+ }
spin_unlock_irqrestore(&i2c->lock, flags);
@@ -626,14 +675,16 @@ static inline int jz4780_i2c_xfer_write(struct jz4780_i2c *i2c,
i2c->wbuf = buf;
i2c->wt_len = len;
- jz4780_i2c_writew(i2c, JZ4780_I2C_TXTL, TX_LEVEL);
+ jz4780_i2c_writew(i2c, JZ4780_I2C_TXTL, i2c->cdata->tx_level);
jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, JZ4780_I2C_INTM_MTXEMP
| JZ4780_I2C_INTM_MTXABT);
- tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL);
- tmp |= JZ4780_I2C_CTRL_STPHLD;
- jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp);
+ if (i2c->cdata->version < ID_X1000) {
+ tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL);
+ tmp |= JZ4780_I2C_CTRL_STPHLD;
+ jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp);
+ }
spin_unlock_irqrestore(&i2c->lock, flags);
@@ -716,8 +767,25 @@ static const struct i2c_algorithm jz4780_i2c_algorithm = {
.functionality = jz4780_i2c_functionality,
};
+static const struct ingenic_i2c_config jz4780_i2c_config = {
+ .version = ID_JZ4780,
+
+ .fifosize = JZ4780_I2C_FIFO_LEN,
+ .tx_level = JZ4780_I2C_FIFO_LEN / 2,
+ .rx_level = JZ4780_I2C_FIFO_LEN / 2 - 1,
+};
+
+static const struct ingenic_i2c_config x1000_i2c_config = {
+ .version = ID_X1000,
+
+ .fifosize = X1000_I2C_FIFO_LEN,
+ .tx_level = X1000_I2C_FIFO_LEN / 2,
+ .rx_level = X1000_I2C_FIFO_LEN / 2 - 1,
+};
+
static const struct of_device_id jz4780_i2c_of_matches[] = {
- { .compatible = "ingenic,jz4780-i2c", },
+ { .compatible = "ingenic,jz4780-i2c", .data = &jz4780_i2c_config },
+ { .compatible = "ingenic,x1000-i2c", .data = &x1000_i2c_config },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, jz4780_i2c_of_matches);
@@ -734,6 +802,12 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
if (!i2c)
return -ENOMEM;
+ i2c->cdata = device_get_match_data(&pdev->dev);
+ if (!i2c->cdata) {
+ dev_err(&pdev->dev, "Error: No device match found\n");
+ return -ENODEV;
+ }
+
i2c->adap.owner = THIS_MODULE;
i2c->adap.algo = &jz4780_i2c_algorithm;
i2c->adap.algo_data = i2c;
@@ -777,9 +851,11 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Bus frequency is %d KHz\n", i2c->speed);
- tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL);
- tmp &= ~JZ4780_I2C_CTRL_STPHLD;
- jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp);
+ if (i2c->cdata->version < ID_X1000) {
+ tmp = jz4780_i2c_readw(i2c, JZ4780_I2C_CTRL);
+ tmp &= ~JZ4780_I2C_CTRL_STPHLD;
+ jz4780_i2c_writew(i2c, JZ4780_I2C_CTRL, tmp);
+ }
jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0x0);
diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c
index 1e2647f9a2a7..06b3bed78421 100644
--- a/drivers/i2c/busses/i2c-meson.c
+++ b/drivers/i2c/busses/i2c-meson.c
@@ -10,6 +10,7 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -213,6 +214,30 @@ static void meson_i2c_prepare_xfer(struct meson_i2c *i2c)
writel(i2c->tokens[1], i2c->regs + REG_TOK_LIST1);
}
+static void meson_i2c_transfer_complete(struct meson_i2c *i2c, u32 ctrl)
+{
+ if (ctrl & REG_CTRL_ERROR) {
+ /*
+ * The bit is set when the IGNORE_NAK bit is cleared
+ * and the device didn't respond. In this case, the
+ * I2C controller automatically generates a STOP
+ * condition.
+ */
+ dev_dbg(i2c->dev, "error bit set\n");
+ i2c->error = -ENXIO;
+ i2c->state = STATE_IDLE;
+ } else {
+ if (i2c->state == STATE_READ && i2c->count)
+ meson_i2c_get_data(i2c, i2c->msg->buf + i2c->pos,
+ i2c->count);
+
+ i2c->pos += i2c->count;
+
+ if (i2c->pos >= i2c->msg->len)
+ i2c->state = STATE_IDLE;
+ }
+}
+
static irqreturn_t meson_i2c_irq(int irqno, void *dev_id)
{
struct meson_i2c *i2c = dev_id;
@@ -232,27 +257,9 @@ static irqreturn_t meson_i2c_irq(int irqno, void *dev_id)
return IRQ_NONE;
}
- if (ctrl & REG_CTRL_ERROR) {
- /*
- * The bit is set when the IGNORE_NAK bit is cleared
- * and the device didn't respond. In this case, the
- * I2C controller automatically generates a STOP
- * condition.
- */
- dev_dbg(i2c->dev, "error bit set\n");
- i2c->error = -ENXIO;
- i2c->state = STATE_IDLE;
- complete(&i2c->done);
- goto out;
- }
-
- if (i2c->state == STATE_READ && i2c->count)
- meson_i2c_get_data(i2c, i2c->msg->buf + i2c->pos, i2c->count);
+ meson_i2c_transfer_complete(i2c, ctrl);
- i2c->pos += i2c->count;
-
- if (i2c->pos >= i2c->msg->len) {
- i2c->state = STATE_IDLE;
+ if (i2c->state == STATE_IDLE) {
complete(&i2c->done);
goto out;
}
@@ -279,10 +286,11 @@ static void meson_i2c_do_start(struct meson_i2c *i2c, struct i2c_msg *msg)
}
static int meson_i2c_xfer_msg(struct meson_i2c *i2c, struct i2c_msg *msg,
- int last)
+ int last, bool atomic)
{
unsigned long time_left, flags;
int ret = 0;
+ u32 ctrl;
i2c->msg = msg;
i2c->last = last;
@@ -300,13 +308,24 @@ static int meson_i2c_xfer_msg(struct meson_i2c *i2c, struct i2c_msg *msg,
i2c->state = (msg->flags & I2C_M_RD) ? STATE_READ : STATE_WRITE;
meson_i2c_prepare_xfer(i2c);
- reinit_completion(&i2c->done);
+
+ if (!atomic)
+ reinit_completion(&i2c->done);
/* Start the transfer */
meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_START, REG_CTRL_START);
- time_left = msecs_to_jiffies(I2C_TIMEOUT_MS);
- time_left = wait_for_completion_timeout(&i2c->done, time_left);
+ if (atomic) {
+ ret = readl_poll_timeout_atomic(i2c->regs + REG_CTRL, ctrl,
+ !(ctrl & REG_CTRL_STATUS),
+ 10, I2C_TIMEOUT_MS * 1000);
+ } else {
+ time_left = msecs_to_jiffies(I2C_TIMEOUT_MS);
+ time_left = wait_for_completion_timeout(&i2c->done, time_left);
+
+ if (!time_left)
+ ret = -ETIMEDOUT;
+ }
/*
* Protect access to i2c struct and registers from interrupt
@@ -315,13 +334,14 @@ static int meson_i2c_xfer_msg(struct meson_i2c *i2c, struct i2c_msg *msg,
*/
spin_lock_irqsave(&i2c->lock, flags);
+ if (atomic && !ret)
+ meson_i2c_transfer_complete(i2c, ctrl);
+
/* Abort any active operation */
meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_START, 0);
- if (!time_left) {
+ if (ret)
i2c->state = STATE_IDLE;
- ret = -ETIMEDOUT;
- }
if (i2c->error)
ret = i2c->error;
@@ -331,8 +351,8 @@ static int meson_i2c_xfer_msg(struct meson_i2c *i2c, struct i2c_msg *msg,
return ret;
}
-static int meson_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
- int num)
+static int meson_i2c_xfer_messages(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num, bool atomic)
{
struct meson_i2c *i2c = adap->algo_data;
int i, ret = 0;
@@ -340,7 +360,7 @@ static int meson_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
clk_enable(i2c->clk);
for (i = 0; i < num; i++) {
- ret = meson_i2c_xfer_msg(i2c, msgs + i, i == num - 1);
+ ret = meson_i2c_xfer_msg(i2c, msgs + i, i == num - 1, atomic);
if (ret)
break;
}
@@ -350,14 +370,27 @@ static int meson_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
return ret ?: i;
}
+static int meson_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num)
+{
+ return meson_i2c_xfer_messages(adap, msgs, num, false);
+}
+
+static int meson_i2c_xfer_atomic(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num)
+{
+ return meson_i2c_xfer_messages(adap, msgs, num, true);
+}
+
static u32 meson_i2c_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static const struct i2c_algorithm meson_i2c_algorithm = {
- .master_xfer = meson_i2c_xfer,
- .functionality = meson_i2c_func,
+ .master_xfer = meson_i2c_xfer,
+ .master_xfer_atomic = meson_i2c_xfer_atomic,
+ .functionality = meson_i2c_func,
};
static int meson_i2c_probe(struct platform_device *pdev)
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index a5a95ea5b81a..febb7c7ea72b 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -901,14 +901,13 @@ mv64xxx_i2c_probe(struct platform_device *pd)
/* Not all platforms have clocks */
drv_data->clk = devm_clk_get(&pd->dev, NULL);
- if (IS_ERR(drv_data->clk) && PTR_ERR(drv_data->clk) == -EPROBE_DEFER)
+ if (PTR_ERR(drv_data->clk) == -EPROBE_DEFER)
return -EPROBE_DEFER;
if (!IS_ERR(drv_data->clk))
clk_prepare_enable(drv_data->clk);
drv_data->reg_clk = devm_clk_get(&pd->dev, "reg");
- if (IS_ERR(drv_data->reg_clk) &&
- PTR_ERR(drv_data->reg_clk) == -EPROBE_DEFER)
+ if (PTR_ERR(drv_data->reg_clk) == -EPROBE_DEFER)
return -EPROBE_DEFER;
if (!IS_ERR(drv_data->reg_clk))
clk_prepare_enable(drv_data->reg_clk);
diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c
index 5a1235fd86bb..62e18b4db0ed 100644
--- a/drivers/i2c/busses/i2c-nvidia-gpu.c
+++ b/drivers/i2c/busses/i2c-nvidia-gpu.c
@@ -280,9 +280,9 @@ static int gpu_populate_client(struct gpu_i2c_dev *i2cd, int irq)
i2cd->gpu_ccgx_ucsi->addr = 0x8;
i2cd->gpu_ccgx_ucsi->irq = irq;
i2cd->gpu_ccgx_ucsi->properties = ccgx_props;
- i2cd->ccgx_client = i2c_new_device(&i2cd->adapter, i2cd->gpu_ccgx_ucsi);
- if (!i2cd->ccgx_client)
- return -ENODEV;
+ i2cd->ccgx_client = i2c_new_client_device(&i2cd->adapter, i2cd->gpu_ccgx_ucsi);
+ if (IS_ERR(i2cd->ccgx_client))
+ return PTR_ERR(i2cd->ccgx_client);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index ca8b3ecfa93d..f5fc75b65a19 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -731,7 +731,7 @@ static int ocores_i2c_probe(struct platform_device *pdev)
/* add in known devices to the bus */
if (pdata) {
for (i = 0; i < pdata->num_devices; i++)
- i2c_new_device(&i2c->adap, pdata->devices + i);
+ i2c_new_client_device(&i2c->adap, pdata->devices + i);
}
return 0;
diff --git a/drivers/i2c/busses/i2c-parport-light.c b/drivers/i2c/busses/i2c-parport-light.c
deleted file mode 100644
index 00f6aaf22cfc..000000000000
--- a/drivers/i2c/busses/i2c-parport-light.c
+++ /dev/null
@@ -1,267 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* ------------------------------------------------------------------------ *
- * i2c-parport-light.c I2C bus over parallel port *
- * ------------------------------------------------------------------------ *
- Copyright (C) 2003-2010 Jean Delvare <jdelvare@suse.de>
-
- Based on older i2c-velleman.c driver
- Copyright (C) 1995-2000 Simon G. Vogl
- With some changes from:
- Frodo Looijaard <frodol@dds.nl>
- Kyösti Mälkki <kmalkki@cc.hut.fi>
-
- * ------------------------------------------------------------------------ */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/ioport.h>
-#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
-#include <linux/i2c-smbus.h>
-#include <linux/io.h>
-#include "i2c-parport.h"
-
-#define DEFAULT_BASE 0x378
-#define DRVNAME "i2c-parport-light"
-
-static struct platform_device *pdev;
-
-static u16 base;
-module_param_hw(base, ushort, ioport, 0);
-MODULE_PARM_DESC(base, "Base I/O address");
-
-static int irq;
-module_param_hw(irq, int, irq, 0);
-MODULE_PARM_DESC(irq, "IRQ (optional)");
-
-/* ----- Low-level parallel port access ----------------------------------- */
-
-static inline void port_write(unsigned char p, unsigned char d)
-{
- outb(d, base+p);
-}
-
-static inline unsigned char port_read(unsigned char p)
-{
- return inb(base+p);
-}
-
-/* ----- Unified line operation functions --------------------------------- */
-
-static inline void line_set(int state, const struct lineop *op)
-{
- u8 oldval = port_read(op->port);
-
- /* Touch only the bit(s) needed */
- if ((op->inverted && !state) || (!op->inverted && state))
- port_write(op->port, oldval | op->val);
- else
- port_write(op->port, oldval & ~op->val);
-}
-
-static inline int line_get(const struct lineop *op)
-{
- u8 oldval = port_read(op->port);
-
- return ((op->inverted && (oldval & op->val) != op->val)
- || (!op->inverted && (oldval & op->val) == op->val));
-}
-
-/* ----- I2C algorithm call-back functions and structures ----------------- */
-
-static void parport_setscl(void *data, int state)
-{
- line_set(state, &adapter_parm[type].setscl);
-}
-
-static void parport_setsda(void *data, int state)
-{
- line_set(state, &adapter_parm[type].setsda);
-}
-
-static int parport_getscl(void *data)
-{
- return line_get(&adapter_parm[type].getscl);
-}
-
-static int parport_getsda(void *data)
-{
- return line_get(&adapter_parm[type].getsda);
-}
-
-/* Encapsulate the functions above in the correct structure
- Note that getscl will be set to NULL by the attaching code for adapters
- that cannot read SCL back */
-static struct i2c_algo_bit_data parport_algo_data = {
- .setsda = parport_setsda,
- .setscl = parport_setscl,
- .getsda = parport_getsda,
- .getscl = parport_getscl,
- .udelay = 50,
- .timeout = HZ,
-};
-
-/* ----- Driver registration ---------------------------------------------- */
-
-static struct i2c_adapter parport_adapter = {
- .owner = THIS_MODULE,
- .class = I2C_CLASS_HWMON,
- .algo_data = &parport_algo_data,
- .name = "Parallel port adapter (light)",
-};
-
-/* SMBus alert support */
-static struct i2c_smbus_alert_setup alert_data = {
-};
-static struct i2c_client *ara;
-static struct lineop parport_ctrl_irq = {
- .val = (1 << 4),
- .port = PORT_CTRL,
-};
-
-static int i2c_parport_probe(struct platform_device *pdev)
-{
- int err;
-
- /* Reset hardware to a sane state (SCL and SDA high) */
- parport_setsda(NULL, 1);
- parport_setscl(NULL, 1);
- /* Other init if needed (power on...) */
- if (adapter_parm[type].init.val) {
- line_set(1, &adapter_parm[type].init);
- /* Give powered devices some time to settle */
- msleep(100);
- }
-
- parport_adapter.dev.parent = &pdev->dev;
- err = i2c_bit_add_bus(&parport_adapter);
- if (err) {
- dev_err(&pdev->dev, "Unable to register with I2C\n");
- return err;
- }
-
- /* Setup SMBus alert if supported */
- if (adapter_parm[type].smbus_alert && irq) {
- alert_data.irq = irq;
- ara = i2c_setup_smbus_alert(&parport_adapter, &alert_data);
- if (ara)
- line_set(1, &parport_ctrl_irq);
- else
- dev_warn(&pdev->dev, "Failed to register ARA client\n");
- }
-
- return 0;
-}
-
-static int i2c_parport_remove(struct platform_device *pdev)
-{
- if (ara) {
- line_set(0, &parport_ctrl_irq);
- i2c_unregister_device(ara);
- ara = NULL;
- }
- i2c_del_adapter(&parport_adapter);
-
- /* Un-init if needed (power off...) */
- if (adapter_parm[type].init.val)
- line_set(0, &adapter_parm[type].init);
-
- return 0;
-}
-
-static struct platform_driver i2c_parport_driver = {
- .driver = {
- .name = DRVNAME,
- },
- .probe = i2c_parport_probe,
- .remove = i2c_parport_remove,
-};
-
-static int __init i2c_parport_device_add(u16 address)
-{
- int err;
-
- pdev = platform_device_alloc(DRVNAME, -1);
- if (!pdev) {
- err = -ENOMEM;
- printk(KERN_ERR DRVNAME ": Device allocation failed\n");
- goto exit;
- }
-
- err = platform_device_add(pdev);
- if (err) {
- printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
- err);
- goto exit_device_put;
- }
-
- return 0;
-
-exit_device_put:
- platform_device_put(pdev);
-exit:
- return err;
-}
-
-static int __init i2c_parport_init(void)
-{
- int err;
-
- if (type < 0) {
- printk(KERN_ERR DRVNAME ": adapter type unspecified\n");
- return -ENODEV;
- }
-
- if (type >= ARRAY_SIZE(adapter_parm)) {
- printk(KERN_ERR DRVNAME ": invalid type (%d)\n", type);
- return -ENODEV;
- }
-
- if (base == 0) {
- pr_info(DRVNAME ": using default base 0x%x\n", DEFAULT_BASE);
- base = DEFAULT_BASE;
- }
-
- if (!request_region(base, 3, DRVNAME))
- return -EBUSY;
-
- if (irq != 0)
- pr_info(DRVNAME ": using irq %d\n", irq);
-
- if (!adapter_parm[type].getscl.val)
- parport_algo_data.getscl = NULL;
-
- /* Sets global pdev as a side effect */
- err = i2c_parport_device_add(base);
- if (err)
- goto exit_release;
-
- err = platform_driver_register(&i2c_parport_driver);
- if (err)
- goto exit_device;
-
- return 0;
-
-exit_device:
- platform_device_unregister(pdev);
-exit_release:
- release_region(base, 3);
- return err;
-}
-
-static void __exit i2c_parport_exit(void)
-{
- platform_driver_unregister(&i2c_parport_driver);
- platform_device_unregister(pdev);
- release_region(base, 3);
-}
-
-MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
-MODULE_DESCRIPTION("I2C bus over parallel port (light)");
-MODULE_LICENSE("GPL");
-
-module_init(i2c_parport_init);
-module_exit(i2c_parport_exit);
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index e8ed882de402..81eb441b2387 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -25,7 +25,90 @@
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/mutex.h>
-#include "i2c-parport.h"
+
+#define PORT_DATA 0
+#define PORT_STAT 1
+#define PORT_CTRL 2
+
+struct lineop {
+ u8 val;
+ u8 port;
+ u8 inverted;
+};
+
+struct adapter_parm {
+ struct lineop setsda;
+ struct lineop setscl;
+ struct lineop getsda;
+ struct lineop getscl;
+ struct lineop init;
+ unsigned int smbus_alert:1;
+};
+
+static const struct adapter_parm adapter_parm[] = {
+ /* type 0: Philips adapter */
+ {
+ .setsda = { 0x80, PORT_DATA, 1 },
+ .setscl = { 0x08, PORT_CTRL, 0 },
+ .getsda = { 0x80, PORT_STAT, 0 },
+ .getscl = { 0x08, PORT_STAT, 0 },
+ },
+ /* type 1: home brew teletext adapter */
+ {
+ .setsda = { 0x02, PORT_DATA, 0 },
+ .setscl = { 0x01, PORT_DATA, 0 },
+ .getsda = { 0x80, PORT_STAT, 1 },
+ },
+ /* type 2: Velleman K8000 adapter */
+ {
+ .setsda = { 0x02, PORT_CTRL, 1 },
+ .setscl = { 0x08, PORT_CTRL, 1 },
+ .getsda = { 0x10, PORT_STAT, 0 },
+ },
+ /* type 3: ELV adapter */
+ {
+ .setsda = { 0x02, PORT_DATA, 1 },
+ .setscl = { 0x01, PORT_DATA, 1 },
+ .getsda = { 0x40, PORT_STAT, 1 },
+ .getscl = { 0x08, PORT_STAT, 1 },
+ },
+ /* type 4: ADM1032 evaluation board */
+ {
+ .setsda = { 0x02, PORT_DATA, 1 },
+ .setscl = { 0x01, PORT_DATA, 1 },
+ .getsda = { 0x10, PORT_STAT, 1 },
+ .init = { 0xf0, PORT_DATA, 0 },
+ .smbus_alert = 1,
+ },
+ /* type 5: ADM1025, ADM1030 and ADM1031 evaluation boards */
+ {
+ .setsda = { 0x02, PORT_DATA, 1 },
+ .setscl = { 0x01, PORT_DATA, 1 },
+ .getsda = { 0x10, PORT_STAT, 1 },
+ },
+ /* type 6: Barco LPT->DVI (K5800236) adapter */
+ {
+ .setsda = { 0x02, PORT_DATA, 1 },
+ .setscl = { 0x01, PORT_DATA, 1 },
+ .getsda = { 0x20, PORT_STAT, 0 },
+ .getscl = { 0x40, PORT_STAT, 0 },
+ .init = { 0xfc, PORT_DATA, 0 },
+ },
+ /* type 7: One For All JP1 parallel port adapter */
+ {
+ .setsda = { 0x01, PORT_DATA, 0 },
+ .setscl = { 0x02, PORT_DATA, 0 },
+ .getsda = { 0x80, PORT_STAT, 1 },
+ .init = { 0x04, PORT_DATA, 1 },
+ },
+ /* type 8: VCT-jig */
+ {
+ .setsda = { 0x04, PORT_DATA, 1 },
+ .setscl = { 0x01, PORT_DATA, 1 },
+ .getsda = { 0x40, PORT_STAT, 0 },
+ .getscl = { 0x80, PORT_STAT, 1 },
+ },
+};
/* ----- Device list ------------------------------------------------------ */
@@ -40,9 +123,30 @@ struct i2c_par {
static LIST_HEAD(adapter_list);
static DEFINE_MUTEX(adapter_list_lock);
+
#define MAX_DEVICE 4
static int parport[MAX_DEVICE] = {0, -1, -1, -1};
+module_param_array(parport, int, NULL, 0);
+MODULE_PARM_DESC(parport,
+ "List of parallel ports to bind to, by index.\n"
+ " At most " __stringify(MAX_DEVICE) " devices are supported.\n"
+ " Default is one device connected to parport0.\n"
+);
+static int type = -1;
+module_param(type, int, 0);
+MODULE_PARM_DESC(type,
+ "Type of adapter:\n"
+ " 0 = Philips adapter\n"
+ " 1 = home brew teletext adapter\n"
+ " 2 = Velleman K8000 adapter\n"
+ " 3 = ELV adapter\n"
+ " 4 = ADM1032 evaluation board\n"
+ " 5 = ADM1025, ADM1030 and ADM1031 evaluation boards\n"
+ " 6 = Barco LPT->DVI (K5800236) adapter\n"
+ " 7 = One For All JP1 parallel port adapter\n"
+ " 8 = VCT-jig\n"
+);
/* ----- Low-level parallel port access ----------------------------------- */
@@ -311,12 +415,5 @@ MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("I2C bus over parallel port");
MODULE_LICENSE("GPL");
-module_param_array(parport, int, NULL, 0);
-MODULE_PARM_DESC(parport,
- "List of parallel ports to bind to, by index.\n"
- " Atmost " __stringify(MAX_DEVICE) " devices are supported.\n"
- " Default is one device connected to parport0.\n"
-);
-
module_init(i2c_parport_init);
module_exit(i2c_parport_exit);
diff --git a/drivers/i2c/busses/i2c-parport.h b/drivers/i2c/busses/i2c-parport.h
deleted file mode 100644
index 3b32d92b1264..000000000000
--- a/drivers/i2c/busses/i2c-parport.h
+++ /dev/null
@@ -1,106 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/* ------------------------------------------------------------------------ *
- * i2c-parport.h I2C bus over parallel port *
- * ------------------------------------------------------------------------ *
- Copyright (C) 2003-2010 Jean Delvare <jdelvare@suse.de>
-
- * ------------------------------------------------------------------------ */
-
-#define PORT_DATA 0
-#define PORT_STAT 1
-#define PORT_CTRL 2
-
-struct lineop {
- u8 val;
- u8 port;
- u8 inverted;
-};
-
-struct adapter_parm {
- struct lineop setsda;
- struct lineop setscl;
- struct lineop getsda;
- struct lineop getscl;
- struct lineop init;
- unsigned int smbus_alert:1;
-};
-
-static const struct adapter_parm adapter_parm[] = {
- /* type 0: Philips adapter */
- {
- .setsda = { 0x80, PORT_DATA, 1 },
- .setscl = { 0x08, PORT_CTRL, 0 },
- .getsda = { 0x80, PORT_STAT, 0 },
- .getscl = { 0x08, PORT_STAT, 0 },
- },
- /* type 1: home brew teletext adapter */
- {
- .setsda = { 0x02, PORT_DATA, 0 },
- .setscl = { 0x01, PORT_DATA, 0 },
- .getsda = { 0x80, PORT_STAT, 1 },
- },
- /* type 2: Velleman K8000 adapter */
- {
- .setsda = { 0x02, PORT_CTRL, 1 },
- .setscl = { 0x08, PORT_CTRL, 1 },
- .getsda = { 0x10, PORT_STAT, 0 },
- },
- /* type 3: ELV adapter */
- {
- .setsda = { 0x02, PORT_DATA, 1 },
- .setscl = { 0x01, PORT_DATA, 1 },
- .getsda = { 0x40, PORT_STAT, 1 },
- .getscl = { 0x08, PORT_STAT, 1 },
- },
- /* type 4: ADM1032 evaluation board */
- {
- .setsda = { 0x02, PORT_DATA, 1 },
- .setscl = { 0x01, PORT_DATA, 1 },
- .getsda = { 0x10, PORT_STAT, 1 },
- .init = { 0xf0, PORT_DATA, 0 },
- .smbus_alert = 1,
- },
- /* type 5: ADM1025, ADM1030 and ADM1031 evaluation boards */
- {
- .setsda = { 0x02, PORT_DATA, 1 },
- .setscl = { 0x01, PORT_DATA, 1 },
- .getsda = { 0x10, PORT_STAT, 1 },
- },
- /* type 6: Barco LPT->DVI (K5800236) adapter */
- {
- .setsda = { 0x02, PORT_DATA, 1 },
- .setscl = { 0x01, PORT_DATA, 1 },
- .getsda = { 0x20, PORT_STAT, 0 },
- .getscl = { 0x40, PORT_STAT, 0 },
- .init = { 0xfc, PORT_DATA, 0 },
- },
- /* type 7: One For All JP1 parallel port adapter */
- {
- .setsda = { 0x01, PORT_DATA, 0 },
- .setscl = { 0x02, PORT_DATA, 0 },
- .getsda = { 0x80, PORT_STAT, 1 },
- .init = { 0x04, PORT_DATA, 1 },
- },
- /* type 8: VCT-jig */
- {
- .setsda = { 0x04, PORT_DATA, 1 },
- .setscl = { 0x01, PORT_DATA, 1 },
- .getsda = { 0x40, PORT_STAT, 0 },
- .getscl = { 0x80, PORT_STAT, 1 },
- },
-};
-
-static int type = -1;
-module_param(type, int, 0);
-MODULE_PARM_DESC(type,
- "Type of adapter:\n"
- " 0 = Philips adapter\n"
- " 1 = home brew teletext adapter\n"
- " 2 = Velleman K8000 adapter\n"
- " 3 = ELV adapter\n"
- " 4 = ADM1032 evaluation board\n"
- " 5 = ADM1025, ADM1030 and ADM1031 evaluation boards\n"
- " 6 = Barco LPT->DVI (K5800236) adapter\n"
- " 7 = One For All JP1 parallel port adapter\n"
- " 8 = VCT-jig\n"
-);
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index 0829cb696d9d..5d89c7c1b3a8 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -274,18 +274,18 @@ static int pmcmsptwi_probe(struct platform_device *pldev)
if (!request_mem_region(res->start, resource_size(res),
pldev->name)) {
dev_err(&pldev->dev,
- "Unable to get memory/io address region 0x%08x\n",
- res->start);
+ "Unable to get memory/io address region %pap\n",
+ &res->start);
rc = -EBUSY;
goto ret_err;
}
/* remap the memory */
- pmcmsptwi_data.iobase = ioremap_nocache(res->start,
+ pmcmsptwi_data.iobase = ioremap(res->start,
resource_size(res));
if (!pmcmsptwi_data.iobase) {
dev_err(&pldev->dev,
- "Unable to ioremap address 0x%08x\n", res->start);
+ "Unable to ioremap address %pap\n", &res->start);
rc = -EIO;
goto ret_unreserve;
}
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index 6e0e546ef83f..686c06f31625 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -734,8 +734,8 @@ static int i2c_pnx_probe(struct platform_device *pdev)
if (ret < 0)
goto out_clock;
- dev_dbg(&pdev->dev, "%s: Master at %#8x, irq %d.\n",
- alg_data->adapter.name, res->start, alg_data->irq);
+ dev_dbg(&pdev->dev, "%s: Master at %pap, irq %d.\n",
+ alg_data->adapter.name, &res->start, alg_data->irq);
return 0;
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 504f5bf0e625..973e5339033c 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -240,8 +240,8 @@ static void i2c_powermac_create_one(struct i2c_adapter *adap,
strncpy(info.type, type, sizeof(info.type));
info.addr = addr;
- newdev = i2c_new_device(adap, &info);
- if (!newdev)
+ newdev = i2c_new_client_device(adap, &info);
+ if (IS_ERR(newdev))
dev_err(&adap->dev,
"i2c-powermac: Failure to register missing %s\n",
type);
@@ -359,8 +359,8 @@ static void i2c_powermac_register_devices(struct i2c_adapter *adap,
info.irq = irq_of_parse_and_map(node, 0);
info.of_node = of_node_get(node);
- newdev = i2c_new_device(adap, &info);
- if (!newdev) {
+ newdev = i2c_new_client_device(adap, &info);
+ if (IS_ERR(newdev)) {
dev_err(&adap->dev, "i2c-powermac: Failure to register"
" %pOF\n", node);
of_node_put(node);
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index b2634afe066d..5c3e8ac6ad92 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -169,6 +169,24 @@
#define STM32F7_AUTOSUSPEND_DELAY (HZ / 100)
/**
+ * struct stm32f7_i2c_regs - i2c f7 registers backup
+ * @cr1: Control register 1
+ * @cr2: Control register 2
+ * @oar1: Own address 1 register
+ * @oar2: Own address 2 register
+ * @pecr: PEC register
+ * @tmgr: Timing register
+ */
+struct stm32f7_i2c_regs {
+ u32 cr1;
+ u32 cr2;
+ u32 oar1;
+ u32 oar2;
+ u32 pecr;
+ u32 tmgr;
+};
+
+/**
* struct stm32f7_i2c_spec - private i2c specification timing
* @rate: I2C bus speed (Hz)
* @rate_min: 80% of I2C bus speed (Hz)
@@ -276,6 +294,7 @@ struct stm32f7_i2c_msg {
* @timing: I2C computed timings
* @slave: list of slave devices registered on the I2C bus
* @slave_running: slave device currently used
+ * @backup_regs: backup of i2c controller registers (for suspend/resume)
* @slave_dir: transfer direction for the current slave device
* @master_mode: boolean to know in which mode the I2C is running (master or
* slave)
@@ -298,6 +317,7 @@ struct stm32f7_i2c_dev {
struct stm32f7_i2c_timings timing;
struct i2c_client *slave[STM32F7_I2C_MAX_SLAVE];
struct i2c_client *slave_running;
+ struct stm32f7_i2c_regs backup_regs;
u32 slave_dir;
bool master_mode;
struct stm32_i2c_dma *dma;
@@ -2027,8 +2047,7 @@ static int stm32f7_i2c_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int stm32f7_i2c_runtime_suspend(struct device *dev)
+static int __maybe_unused stm32f7_i2c_runtime_suspend(struct device *dev)
{
struct stm32f7_i2c_dev *i2c_dev = dev_get_drvdata(dev);
@@ -2038,7 +2057,7 @@ static int stm32f7_i2c_runtime_suspend(struct device *dev)
return 0;
}
-static int stm32f7_i2c_runtime_resume(struct device *dev)
+static int __maybe_unused stm32f7_i2c_runtime_resume(struct device *dev)
{
struct stm32f7_i2c_dev *i2c_dev = dev_get_drvdata(dev);
int ret;
@@ -2053,11 +2072,101 @@ static int stm32f7_i2c_runtime_resume(struct device *dev)
return 0;
}
-#endif
+
+static int __maybe_unused
+stm32f7_i2c_regs_backup(struct stm32f7_i2c_dev *i2c_dev)
+{
+ int ret;
+ struct stm32f7_i2c_regs *backup_regs = &i2c_dev->backup_regs;
+
+ ret = pm_runtime_get_sync(i2c_dev->dev);
+ if (ret < 0)
+ return ret;
+
+ backup_regs->cr1 = readl_relaxed(i2c_dev->base + STM32F7_I2C_CR1);
+ backup_regs->cr2 = readl_relaxed(i2c_dev->base + STM32F7_I2C_CR2);
+ backup_regs->oar1 = readl_relaxed(i2c_dev->base + STM32F7_I2C_OAR1);
+ backup_regs->oar2 = readl_relaxed(i2c_dev->base + STM32F7_I2C_OAR2);
+ backup_regs->pecr = readl_relaxed(i2c_dev->base + STM32F7_I2C_PECR);
+ backup_regs->tmgr = readl_relaxed(i2c_dev->base + STM32F7_I2C_TIMINGR);
+
+ pm_runtime_put_sync(i2c_dev->dev);
+
+ return ret;
+}
+
+static int __maybe_unused
+stm32f7_i2c_regs_restore(struct stm32f7_i2c_dev *i2c_dev)
+{
+ u32 cr1;
+ int ret;
+ struct stm32f7_i2c_regs *backup_regs = &i2c_dev->backup_regs;
+
+ ret = pm_runtime_get_sync(i2c_dev->dev);
+ if (ret < 0)
+ return ret;
+
+ cr1 = readl_relaxed(i2c_dev->base + STM32F7_I2C_CR1);
+ if (cr1 & STM32F7_I2C_CR1_PE)
+ stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1,
+ STM32F7_I2C_CR1_PE);
+
+ writel_relaxed(backup_regs->tmgr, i2c_dev->base + STM32F7_I2C_TIMINGR);
+ writel_relaxed(backup_regs->cr1 & ~STM32F7_I2C_CR1_PE,
+ i2c_dev->base + STM32F7_I2C_CR1);
+ if (backup_regs->cr1 & STM32F7_I2C_CR1_PE)
+ stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
+ STM32F7_I2C_CR1_PE);
+ writel_relaxed(backup_regs->cr2, i2c_dev->base + STM32F7_I2C_CR2);
+ writel_relaxed(backup_regs->oar1, i2c_dev->base + STM32F7_I2C_OAR1);
+ writel_relaxed(backup_regs->oar2, i2c_dev->base + STM32F7_I2C_OAR2);
+ writel_relaxed(backup_regs->pecr, i2c_dev->base + STM32F7_I2C_PECR);
+
+ pm_runtime_put_sync(i2c_dev->dev);
+
+ return ret;
+}
+
+static int __maybe_unused stm32f7_i2c_suspend(struct device *dev)
+{
+ struct stm32f7_i2c_dev *i2c_dev = dev_get_drvdata(dev);
+ int ret;
+
+ i2c_mark_adapter_suspended(&i2c_dev->adap);
+ ret = stm32f7_i2c_regs_backup(i2c_dev);
+ if (ret < 0) {
+ i2c_mark_adapter_resumed(&i2c_dev->adap);
+ return ret;
+ }
+
+ pinctrl_pm_select_sleep_state(dev);
+ pm_runtime_force_suspend(dev);
+
+ return 0;
+}
+
+static int __maybe_unused stm32f7_i2c_resume(struct device *dev)
+{
+ struct stm32f7_i2c_dev *i2c_dev = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret < 0)
+ return ret;
+ pinctrl_pm_select_default_state(dev);
+
+ ret = stm32f7_i2c_regs_restore(i2c_dev);
+ if (ret < 0)
+ return ret;
+ i2c_mark_adapter_resumed(&i2c_dev->adap);
+
+ return 0;
+}
static const struct dev_pm_ops stm32f7_i2c_pm_ops = {
SET_RUNTIME_PM_OPS(stm32f7_i2c_runtime_suspend,
stm32f7_i2c_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(stm32f7_i2c_suspend, stm32f7_i2c_resume)
};
static const struct of_device_id stm32f7_i2c_match[] = {
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index 8c3e2d409d63..42e0a53e7fa4 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -444,7 +444,7 @@ static int stu300_wait_while_busy(struct stu300_dev *dev)
"Attempt: %d\n", i+1);
dev_err(&dev->pdev->dev, "base address = "
- "0x%08x, reinit hardware\n", (u32) dev->virtbase);
+ "0x%p, reinit hardware\n", dev->virtbase);
(void) stu300_init_hw(dev);
}
diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c
index 39762f0611b1..86026798b4f7 100644
--- a/drivers/i2c/busses/i2c-synquacer.c
+++ b/drivers/i2c/busses/i2c-synquacer.c
@@ -553,7 +553,7 @@ static int synquacer_i2c_probe(struct platform_device *pdev)
&i2c->pclkrate);
i2c->pclk = devm_clk_get(&pdev->dev, "pclk");
- if (IS_ERR(i2c->pclk) && PTR_ERR(i2c->pclk) == -EPROBE_DEFER)
+ if (PTR_ERR(i2c->pclk) == -EPROBE_DEFER)
return -EPROBE_DEFER;
if (!IS_ERR_OR_NULL(i2c->pclk)) {
dev_dbg(&pdev->dev, "clock source %p\n", i2c->pclk);
diff --git a/drivers/i2c/busses/i2c-taos-evm.c b/drivers/i2c/busses/i2c-taos-evm.c
index 0bff3f3a8779..b4050f5b6746 100644
--- a/drivers/i2c/busses/i2c-taos-evm.c
+++ b/drivers/i2c/busses/i2c-taos-evm.c
@@ -49,10 +49,10 @@ static struct i2c_client *taos_instantiate_device(struct i2c_adapter *adapter)
if (!strncmp(adapter->name, "TAOS TSL2550 EVM", 16)) {
dev_info(&adapter->dev, "Instantiating device %s at 0x%02x\n",
tsl2550_info.type, tsl2550_info.addr);
- return i2c_new_device(adapter, &tsl2550_info);
+ return i2c_new_client_device(adapter, &tsl2550_info);
}
- return NULL;
+ return ERR_PTR(-ENODEV);
}
static int taos_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index a98bf31d0e5c..cbc2ad49043e 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -16,7 +16,9 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
+#include <linux/ktime.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
@@ -129,11 +131,12 @@
#define I2C_PACKET_HEADER_SIZE 12
/*
- * Upto I2C_PIO_MODE_MAX_LEN bytes, controller will use PIO mode,
- * above this, controller will use DMA to fill FIFO.
- * MAX PIO len is 20 bytes excluding packet header.
+ * I2C Controller will use PIO mode for transfers up to 32 bytes in order to
+ * avoid DMA overhead, otherwise external APB DMA controller will be used.
+ * Note that the actual MAX PIO length is 20 bytes because 32 bytes include
+ * I2C_PACKET_HEADER_SIZE.
*/
-#define I2C_PIO_MODE_MAX_LEN 32
+#define I2C_PIO_MODE_PREFERRED_LEN 32
/*
* msg_end_type: The bus control which need to be send at end of transfer.
@@ -230,7 +233,6 @@ struct tegra_i2c_hw_feature {
* @base_phys: physical base address of the I2C controller
* @cont_id: I2C controller ID, used for packet header
* @irq: IRQ number of transfer complete interrupt
- * @irq_disabled: used to track whether or not the interrupt is enabled
* @is_dvc: identifies the DVC I2C controller, has a different register layout
* @msg_complete: transfer completion notifier
* @msg_err: error code for completed message
@@ -240,7 +242,6 @@ struct tegra_i2c_hw_feature {
* @bus_clk_rate: current I2C bus clock rate
* @clk_divisor_non_hs_mode: clock divider for non-high-speed modes
* @is_multimaster_mode: track if I2C controller is in multi-master mode
- * @xfer_lock: lock to serialize transfer submission and processing
* @tx_dma_chan: DMA transmit channel
* @rx_dma_chan: DMA receive channel
* @dma_phys: handle to DMA resources
@@ -248,6 +249,7 @@ struct tegra_i2c_hw_feature {
* @dma_buf_size: DMA buffer size
* @is_curr_dma_xfer: indicates active DMA transfer
* @dma_complete: DMA completion notifier
+ * @is_curr_atomic_xfer: indicates active atomic transfer
*/
struct tegra_i2c_dev {
struct device *dev;
@@ -260,7 +262,6 @@ struct tegra_i2c_dev {
phys_addr_t base_phys;
int cont_id;
int irq;
- bool irq_disabled;
int is_dvc;
struct completion msg_complete;
int msg_err;
@@ -270,8 +271,6 @@ struct tegra_i2c_dev {
u32 bus_clk_rate;
u16 clk_divisor_non_hs_mode;
bool is_multimaster_mode;
- /* xfer_lock: lock to serialize transfer submission and processing */
- spinlock_t xfer_lock;
struct dma_chan *tx_dma_chan;
struct dma_chan *rx_dma_chan;
dma_addr_t dma_phys;
@@ -279,17 +278,18 @@ struct tegra_i2c_dev {
unsigned int dma_buf_size;
bool is_curr_dma_xfer;
struct completion dma_complete;
+ bool is_curr_atomic_xfer;
};
static void dvc_writel(struct tegra_i2c_dev *i2c_dev, u32 val,
unsigned long reg)
{
- writel(val, i2c_dev->base + reg);
+ writel_relaxed(val, i2c_dev->base + reg);
}
static u32 dvc_readl(struct tegra_i2c_dev *i2c_dev, unsigned long reg)
{
- return readl(i2c_dev->base + reg);
+ return readl_relaxed(i2c_dev->base + reg);
}
/*
@@ -307,16 +307,16 @@ static unsigned long tegra_i2c_reg_addr(struct tegra_i2c_dev *i2c_dev,
static void i2c_writel(struct tegra_i2c_dev *i2c_dev, u32 val,
unsigned long reg)
{
- writel(val, i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
+ writel_relaxed(val, i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
/* Read back register to make sure that register writes completed */
if (reg != I2C_TX_FIFO)
- readl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
+ readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
}
static u32 i2c_readl(struct tegra_i2c_dev *i2c_dev, unsigned long reg)
{
- return readl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
+ return readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
}
static void i2c_writesl(struct tegra_i2c_dev *i2c_dev, void *data,
@@ -687,13 +687,15 @@ static int tegra_i2c_wait_for_config_load(struct tegra_i2c_dev *i2c_dev)
reg_offset = tegra_i2c_reg_addr(i2c_dev, I2C_CONFIG_LOAD);
addr = i2c_dev->base + reg_offset;
i2c_writel(i2c_dev, I2C_MSTR_CONFIG_LOAD, I2C_CONFIG_LOAD);
- if (in_interrupt())
- err = readl_poll_timeout_atomic(addr, val, val == 0,
- 1000,
- I2C_CONFIG_LOAD_TIMEOUT);
+
+ if (i2c_dev->is_curr_atomic_xfer)
+ err = readl_relaxed_poll_timeout_atomic(
+ addr, val, val == 0, 1000,
+ I2C_CONFIG_LOAD_TIMEOUT);
else
- err = readl_poll_timeout(addr, val, val == 0, 1000,
- I2C_CONFIG_LOAD_TIMEOUT);
+ err = readl_relaxed_poll_timeout(
+ addr, val, val == 0, 1000,
+ I2C_CONFIG_LOAD_TIMEOUT);
if (err) {
dev_warn(i2c_dev->dev,
@@ -790,11 +792,6 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev, bool clk_reinit)
if (err)
return err;
- if (i2c_dev->irq_disabled) {
- i2c_dev->irq_disabled = false;
- enable_irq(i2c_dev->irq);
- }
-
return 0;
}
@@ -825,18 +822,12 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
status = i2c_readl(i2c_dev, I2C_INT_STATUS);
- spin_lock(&i2c_dev->xfer_lock);
if (status == 0) {
dev_warn(i2c_dev->dev, "irq status 0 %08x %08x %08x\n",
i2c_readl(i2c_dev, I2C_PACKET_TRANSFER_STATUS),
i2c_readl(i2c_dev, I2C_STATUS),
i2c_readl(i2c_dev, I2C_CNFG));
i2c_dev->msg_err |= I2C_ERR_UNKNOWN_INTERRUPT;
-
- if (!i2c_dev->irq_disabled) {
- disable_irq_nosync(i2c_dev->irq);
- i2c_dev->irq_disabled = true;
- }
goto err;
}
@@ -925,7 +916,6 @@ err:
complete(&i2c_dev->msg_complete);
done:
- spin_unlock(&i2c_dev->xfer_lock);
return IRQ_HANDLED;
}
@@ -999,6 +989,64 @@ out:
i2c_writel(i2c_dev, val, reg);
}
+static unsigned long
+tegra_i2c_poll_completion_timeout(struct tegra_i2c_dev *i2c_dev,
+ struct completion *complete,
+ unsigned int timeout_ms)
+{
+ ktime_t ktime = ktime_get();
+ ktime_t ktimeout = ktime_add_ms(ktime, timeout_ms);
+
+ do {
+ u32 status = i2c_readl(i2c_dev, I2C_INT_STATUS);
+
+ if (status) {
+ tegra_i2c_isr(i2c_dev->irq, i2c_dev);
+
+ if (completion_done(complete)) {
+ s64 delta = ktime_ms_delta(ktimeout, ktime);
+
+ return msecs_to_jiffies(delta) ?: 1;
+ }
+ }
+
+ ktime = ktime_get();
+
+ } while (ktime_before(ktime, ktimeout));
+
+ return 0;
+}
+
+static unsigned long
+tegra_i2c_wait_completion_timeout(struct tegra_i2c_dev *i2c_dev,
+ struct completion *complete,
+ unsigned int timeout_ms)
+{
+ unsigned long ret;
+
+ if (i2c_dev->is_curr_atomic_xfer) {
+ ret = tegra_i2c_poll_completion_timeout(i2c_dev, complete,
+ timeout_ms);
+ } else {
+ enable_irq(i2c_dev->irq);
+ ret = wait_for_completion_timeout(complete,
+ msecs_to_jiffies(timeout_ms));
+ disable_irq(i2c_dev->irq);
+
+ /*
+ * There is a chance that completion may happen after IRQ
+ * synchronization, which is done by disable_irq().
+ */
+ if (ret == 0 && completion_done(complete)) {
+ dev_warn(i2c_dev->dev,
+ "completion done after timeout\n");
+ ret = 1;
+ }
+ }
+
+ return ret;
+}
+
static int tegra_i2c_issue_bus_clear(struct i2c_adapter *adap)
{
struct tegra_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
@@ -1020,8 +1068,8 @@ static int tegra_i2c_issue_bus_clear(struct i2c_adapter *adap)
i2c_writel(i2c_dev, reg, I2C_BUS_CLEAR_CNFG);
tegra_i2c_unmask_irq(i2c_dev, I2C_INT_BUS_CLR_DONE);
- time_left = wait_for_completion_timeout(&i2c_dev->msg_complete,
- msecs_to_jiffies(50));
+ time_left = tegra_i2c_wait_completion_timeout(
+ i2c_dev, &i2c_dev->msg_complete, 50);
if (time_left == 0) {
dev_err(i2c_dev->dev, "timed out for bus clear\n");
return -ETIMEDOUT;
@@ -1044,7 +1092,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
u32 packet_header;
u32 int_mask;
unsigned long time_left;
- unsigned long flags;
size_t xfer_size;
u32 *buffer = NULL;
int err = 0;
@@ -1065,8 +1112,9 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
xfer_size = msg->len + I2C_PACKET_HEADER_SIZE;
xfer_size = ALIGN(xfer_size, BYTES_PER_FIFO_WORD);
- i2c_dev->is_curr_dma_xfer = (xfer_size > I2C_PIO_MODE_MAX_LEN) &&
- i2c_dev->dma_buf;
+ i2c_dev->is_curr_dma_xfer = (xfer_size > I2C_PIO_MODE_PREFERRED_LEN) &&
+ i2c_dev->dma_buf &&
+ !i2c_dev->is_curr_atomic_xfer;
tegra_i2c_config_fifo_trig(i2c_dev, xfer_size);
dma = i2c_dev->is_curr_dma_xfer;
/*
@@ -1075,7 +1123,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
*/
xfer_time += DIV_ROUND_CLOSEST(((xfer_size * 9) + 2) * MSEC_PER_SEC,
i2c_dev->bus_clk_rate);
- spin_lock_irqsave(&i2c_dev->xfer_lock, flags);
int_mask = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST;
tegra_i2c_unmask_irq(i2c_dev, int_mask);
@@ -1090,7 +1137,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
dev_err(i2c_dev->dev,
"starting RX DMA failed, err %d\n",
err);
- goto unlock;
+ return err;
}
} else {
@@ -1149,7 +1196,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
dev_err(i2c_dev->dev,
"starting TX DMA failed, err %d\n",
err);
- goto unlock;
+ return err;
}
} else {
tegra_i2c_fill_tx_fifo(i2c_dev);
@@ -1169,20 +1216,16 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
dev_dbg(i2c_dev->dev, "unmasked irq: %02x\n",
i2c_readl(i2c_dev, I2C_INT_MASK));
-unlock:
- spin_unlock_irqrestore(&i2c_dev->xfer_lock, flags);
-
if (dma) {
- if (err)
- return err;
+ time_left = tegra_i2c_wait_completion_timeout(
+ i2c_dev, &i2c_dev->dma_complete, xfer_time);
+
+ dmaengine_terminate_sync(i2c_dev->msg_read ?
+ i2c_dev->rx_dma_chan :
+ i2c_dev->tx_dma_chan);
- time_left = wait_for_completion_timeout(&i2c_dev->dma_complete,
- msecs_to_jiffies(xfer_time));
- if (time_left == 0) {
+ if (!time_left && !completion_done(&i2c_dev->dma_complete)) {
dev_err(i2c_dev->dev, "DMA transfer timeout\n");
- dmaengine_terminate_sync(i2c_dev->msg_read ?
- i2c_dev->rx_dma_chan :
- i2c_dev->tx_dma_chan);
tegra_i2c_init(i2c_dev, true);
return -ETIMEDOUT;
}
@@ -1195,20 +1238,15 @@ unlock:
memcpy(i2c_dev->msg_buf, i2c_dev->dma_buf,
msg->len);
}
-
- if (i2c_dev->msg_err != I2C_ERR_NONE)
- dmaengine_synchronize(i2c_dev->msg_read ?
- i2c_dev->rx_dma_chan :
- i2c_dev->tx_dma_chan);
}
- time_left = wait_for_completion_timeout(&i2c_dev->msg_complete,
- msecs_to_jiffies(xfer_time));
+ time_left = tegra_i2c_wait_completion_timeout(
+ i2c_dev, &i2c_dev->msg_complete, xfer_time);
+
tegra_i2c_mask_irq(i2c_dev, int_mask);
if (time_left == 0) {
dev_err(i2c_dev->dev, "i2c transfer timed out\n");
-
tegra_i2c_init(i2c_dev, true);
return -ETIMEDOUT;
}
@@ -1270,6 +1308,19 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
return ret ?: i;
}
+static int tegra_i2c_xfer_atomic(struct i2c_adapter *adap,
+ struct i2c_msg msgs[], int num)
+{
+ struct tegra_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
+ int ret;
+
+ i2c_dev->is_curr_atomic_xfer = true;
+ ret = tegra_i2c_xfer(adap, msgs, num);
+ i2c_dev->is_curr_atomic_xfer = false;
+
+ return ret;
+}
+
static u32 tegra_i2c_func(struct i2c_adapter *adap)
{
struct tegra_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
@@ -1297,8 +1348,9 @@ static void tegra_i2c_parse_dt(struct tegra_i2c_dev *i2c_dev)
}
static const struct i2c_algorithm tegra_i2c_algo = {
- .master_xfer = tegra_i2c_xfer,
- .functionality = tegra_i2c_func,
+ .master_xfer = tegra_i2c_xfer,
+ .master_xfer_atomic = tegra_i2c_xfer_atomic,
+ .functionality = tegra_i2c_func,
};
/* payload size is only 12 bit */
@@ -1568,7 +1620,6 @@ static int tegra_i2c_probe(struct platform_device *pdev)
I2C_PACKET_HEADER_SIZE;
init_completion(&i2c_dev->msg_complete);
init_completion(&i2c_dev->dma_complete);
- spin_lock_init(&i2c_dev->xfer_lock);
if (!i2c_dev->hw->has_single_clk_source) {
fast_clk = devm_clk_get(&pdev->dev, "fast-clk");
@@ -1607,15 +1658,20 @@ static int tegra_i2c_probe(struct platform_device *pdev)
goto unprepare_fast_clk;
}
+ pm_runtime_irq_safe(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- if (!pm_runtime_enabled(&pdev->dev))
+ if (!pm_runtime_enabled(&pdev->dev)) {
ret = tegra_i2c_runtime_resume(&pdev->dev);
- else
+ if (ret < 0) {
+ dev_err(&pdev->dev, "runtime resume failed\n");
+ goto unprepare_div_clk;
+ }
+ } else {
ret = pm_runtime_get_sync(i2c_dev->dev);
-
- if (ret < 0) {
- dev_err(&pdev->dev, "runtime resume failed\n");
- goto unprepare_div_clk;
+ if (ret < 0) {
+ dev_err(&pdev->dev, "runtime resume failed\n");
+ goto disable_rpm;
+ }
}
if (i2c_dev->is_multimaster_mode) {
@@ -1623,7 +1679,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(i2c_dev->dev, "div_clk enable failed %d\n",
ret);
- goto disable_rpm;
+ goto put_rpm;
}
}
@@ -1640,6 +1696,8 @@ static int tegra_i2c_probe(struct platform_device *pdev)
goto release_dma;
}
+ irq_set_status_flags(i2c_dev->irq, IRQ_NOAUTOEN);
+
ret = devm_request_irq(&pdev->dev, i2c_dev->irq,
tegra_i2c_isr, 0, dev_name(&pdev->dev), i2c_dev);
if (ret) {
@@ -1671,11 +1729,16 @@ disable_div_clk:
if (i2c_dev->is_multimaster_mode)
clk_disable(i2c_dev->div_clk);
-disable_rpm:
- pm_runtime_disable(&pdev->dev);
- if (!pm_runtime_status_suspended(&pdev->dev))
+put_rpm:
+ if (pm_runtime_enabled(&pdev->dev))
+ pm_runtime_put_sync(&pdev->dev);
+ else
tegra_i2c_runtime_suspend(&pdev->dev);
+disable_rpm:
+ if (pm_runtime_enabled(&pdev->dev))
+ pm_runtime_disable(&pdev->dev);
+
unprepare_div_clk:
clk_unprepare(i2c_dev->div_clk);
@@ -1710,9 +1773,14 @@ static int tegra_i2c_remove(struct platform_device *pdev)
static int __maybe_unused tegra_i2c_suspend(struct device *dev)
{
struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev);
+ int err;
i2c_mark_adapter_suspended(&i2c_dev->adapter);
+ err = pm_runtime_force_suspend(dev);
+ if (err < 0)
+ return err;
+
return 0;
}
@@ -1733,6 +1801,10 @@ static int __maybe_unused tegra_i2c_resume(struct device *dev)
if (err)
return err;
+ err = pm_runtime_force_resume(dev);
+ if (err < 0)
+ return err;
+
i2c_mark_adapter_resumed(&i2c_dev->adapter);
return 0;
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
index 43e3603489ee..7279ca0eaa2d 100644
--- a/drivers/i2c/busses/i2c-tiny-usb.c
+++ b/drivers/i2c/busses/i2c-tiny-usb.c
@@ -84,7 +84,7 @@ static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
pmsg->buf, pmsg->len) != pmsg->len) {
dev_err(&adapter->dev,
"failure reading data\n");
- ret = -EREMOTEIO;
+ ret = -EIO;
goto out;
}
} else {
@@ -94,7 +94,7 @@ static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
pmsg->buf, pmsg->len) != pmsg->len) {
dev_err(&adapter->dev,
"failure writing data\n");
- ret = -EREMOTEIO;
+ ret = -EIO;
goto out;
}
}
@@ -102,13 +102,13 @@ static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
/* read status */
if (usb_read(adapter, CMD_GET_STATUS, 0, 0, pstatus, 1) != 1) {
dev_err(&adapter->dev, "failure reading status\n");
- ret = -EREMOTEIO;
+ ret = -EIO;
goto out;
}
dev_dbg(&adapter->dev, " status = %d\n", *pstatus);
if (*pstatus == STATUS_ADDRESS_NAK) {
- ret = -EREMOTEIO;
+ ret = -ENXIO;
goto out;
}
}
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index d8d49f1814c7..90c1c362394d 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -156,6 +156,8 @@ struct xiic_i2c {
#define XIIC_RESET_MASK 0xAUL
#define XIIC_PM_TIMEOUT 1000 /* ms */
+/* timeout waiting for the controller to respond */
+#define XIIC_I2C_TIMEOUT (msecs_to_jiffies(1000))
/*
* The following constant is used for the device global interrupt enable
* register, to enable all interrupts for the device, this is the only bit
@@ -166,7 +168,7 @@ struct xiic_i2c {
#define xiic_tx_space(i2c) ((i2c)->tx_msg->len - (i2c)->tx_pos)
#define xiic_rx_space(i2c) ((i2c)->rx_msg->len - (i2c)->rx_pos)
-static void xiic_start_xfer(struct xiic_i2c *i2c);
+static int xiic_start_xfer(struct xiic_i2c *i2c);
static void __xiic_start_xfer(struct xiic_i2c *i2c);
/*
@@ -247,17 +249,29 @@ static inline void xiic_irq_clr_en(struct xiic_i2c *i2c, u32 mask)
xiic_irq_en(i2c, mask);
}
-static void xiic_clear_rx_fifo(struct xiic_i2c *i2c)
+static int xiic_clear_rx_fifo(struct xiic_i2c *i2c)
{
u8 sr;
+ unsigned long timeout;
+
+ timeout = jiffies + XIIC_I2C_TIMEOUT;
for (sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET);
!(sr & XIIC_SR_RX_FIFO_EMPTY_MASK);
- sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET))
+ sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET)) {
xiic_getreg8(i2c, XIIC_DRR_REG_OFFSET);
+ if (time_after(jiffies, timeout)) {
+ dev_err(i2c->dev, "Failed to clear rx fifo\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
}
-static void xiic_reinit(struct xiic_i2c *i2c)
+static int xiic_reinit(struct xiic_i2c *i2c)
{
+ int ret;
+
xiic_setreg32(i2c, XIIC_RESETR_OFFSET, XIIC_RESET_MASK);
/* Set receive Fifo depth to maximum (zero based). */
@@ -270,12 +284,16 @@ static void xiic_reinit(struct xiic_i2c *i2c)
xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, XIIC_CR_ENABLE_DEVICE_MASK);
/* make sure RX fifo is empty */
- xiic_clear_rx_fifo(i2c);
+ ret = xiic_clear_rx_fifo(i2c);
+ if (ret)
+ return ret;
/* Enable interrupts */
xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK);
xiic_irq_clr_en(i2c, XIIC_INTR_ARB_LOST_MASK);
+
+ return 0;
}
static void xiic_deinit(struct xiic_i2c *i2c)
@@ -655,12 +673,18 @@ static void __xiic_start_xfer(struct xiic_i2c *i2c)
}
-static void xiic_start_xfer(struct xiic_i2c *i2c)
+static int xiic_start_xfer(struct xiic_i2c *i2c)
{
+ int ret;
mutex_lock(&i2c->lock);
- xiic_reinit(i2c);
- __xiic_start_xfer(i2c);
+
+ ret = xiic_reinit(i2c);
+ if (!ret)
+ __xiic_start_xfer(i2c);
+
mutex_unlock(&i2c->lock);
+
+ return ret;
}
static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
@@ -682,7 +706,11 @@ static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
i2c->tx_msg = msgs;
i2c->nmsgs = num;
- xiic_start_xfer(i2c);
+ err = xiic_start_xfer(i2c);
+ if (err < 0) {
+ dev_err(adap->dev.parent, "Error xiic_start_xfer\n");
+ goto out;
+ }
if (wait_event_timeout(i2c->wait, (i2c->state == STATE_ERROR) ||
(i2c->state == STATE_DONE), HZ)) {
@@ -760,7 +788,8 @@ static int xiic_i2c_probe(struct platform_device *pdev)
i2c->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(i2c->clk)) {
- dev_err(&pdev->dev, "input clock not found.\n");
+ if (PTR_ERR(i2c->clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "input clock not found.\n");
return PTR_ERR(i2c->clk);
}
ret = clk_prepare_enable(i2c->clk);
@@ -769,10 +798,10 @@ static int xiic_i2c_probe(struct platform_device *pdev)
return ret;
}
i2c->dev = &pdev->dev;
- pm_runtime_enable(i2c->dev);
pm_runtime_set_autosuspend_delay(i2c->dev, XIIC_PM_TIMEOUT);
pm_runtime_use_autosuspend(i2c->dev);
pm_runtime_set_active(i2c->dev);
+ pm_runtime_enable(i2c->dev);
ret = devm_request_threaded_irq(&pdev->dev, irq, xiic_isr,
xiic_process, IRQF_ONESHOT,
pdev->name, i2c);
@@ -794,7 +823,11 @@ static int xiic_i2c_probe(struct platform_device *pdev)
if (!(sr & XIIC_SR_TX_FIFO_EMPTY_MASK))
i2c->endianness = BIG;
- xiic_reinit(i2c);
+ ret = xiic_reinit(i2c);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Cannot xiic_reinit\n");
+ goto err_clk_dis;
+ }
/* add i2c adapter to i2c tree */
ret = i2c_add_adapter(&i2c->adap);
@@ -806,7 +839,7 @@ static int xiic_i2c_probe(struct platform_device *pdev)
if (pdata) {
/* add in known devices to the bus */
for (i = 0; i < pdata->num_devices; i++)
- i2c_new_device(&i2c->adap, pdata->devices + i);
+ i2c_new_client_device(&i2c->adap, pdata->devices + i);
}
return 0;
@@ -826,14 +859,16 @@ static int xiic_i2c_remove(struct platform_device *pdev)
/* remove adapter & data */
i2c_del_adapter(&i2c->adap);
- ret = clk_prepare_enable(i2c->clk);
- if (ret) {
- dev_err(&pdev->dev, "Unable to enable clock.\n");
+ ret = pm_runtime_get_sync(i2c->dev);
+ if (ret < 0)
return ret;
- }
+
xiic_deinit(i2c);
+ pm_runtime_put_sync(i2c->dev);
clk_disable_unprepare(i2c->clk);
pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
return 0;
}
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index 62a1c92ab803..8f3dbc97a057 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -225,7 +225,7 @@ static void i2c_acpi_register_device(struct i2c_adapter *adapter,
adev->power.flags.ignore_parent = true;
acpi_device_set_enumerated(adev);
- if (!i2c_new_device(adapter, info)) {
+ if (IS_ERR(i2c_new_client_device(adapter, info))) {
adev->power.flags.ignore_parent = false;
dev_err(&adapter->dev,
"failed to add I2C device %s from ACPI\n",
@@ -451,7 +451,8 @@ struct notifier_block i2c_acpi_notifier = {
* resources, in that case this function can be used to create an i2c-client
* for other I2cSerialBus resources in the Current Resource Settings table.
*
- * Also see i2c_new_device, which this function calls to create the i2c-client.
+ * Also see i2c_new_client_device, which this function calls to create the
+ * i2c-client.
*
* Returns a pointer to the new i2c-client, or error pointer in case of failure.
* Specifically, -EPROBE_DEFER is returned if the adapter is not found.
@@ -461,7 +462,6 @@ struct i2c_client *i2c_acpi_new_device(struct device *dev, int index,
{
struct i2c_acpi_lookup lookup;
struct i2c_adapter *adapter;
- struct i2c_client *client;
struct acpi_device *adev;
LIST_HEAD(resource_list);
int ret;
@@ -489,11 +489,7 @@ struct i2c_client *i2c_acpi_new_device(struct device *dev, int index,
if (!adapter)
return ERR_PTR(-EPROBE_DEFER);
- client = i2c_new_device(adapter, info);
- if (!client)
- return ERR_PTR(-ENODEV);
-
- return client;
+ return i2c_new_client_device(adapter, info);
}
EXPORT_SYMBOL_GPL(i2c_acpi_new_device);
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 9f8dcd3f8385..cefad0881942 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -186,10 +186,11 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap)
* If we can set SDA, we will always create a STOP to ensure additional
* pulses will do no harm. This is achieved by letting SDA follow SCL
* half a cycle later. Check the 'incomplete_write_byte' fault injector
- * for details.
+ * for details. Note that we must honour tsu:sto, 4us, but lets use 5us
+ * here for simplicity.
*/
bri->set_scl(adap, scl);
- ndelay(RECOVERY_NDELAY / 2);
+ ndelay(RECOVERY_NDELAY);
if (bri->set_sda)
bri->set_sda(adap, scl);
ndelay(RECOVERY_NDELAY / 2);
@@ -211,7 +212,13 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap)
scl = !scl;
bri->set_scl(adap, scl);
/* Creating STOP again, see above */
- ndelay(RECOVERY_NDELAY / 2);
+ if (scl) {
+ /* Honour minimum tsu:sto */
+ ndelay(RECOVERY_NDELAY);
+ } else {
+ /* Honour minimum tf and thd:dat */
+ ndelay(RECOVERY_NDELAY / 2);
+ }
if (bri->set_sda)
bri->set_sda(adap, scl);
ndelay(RECOVERY_NDELAY / 2);
@@ -449,15 +456,15 @@ static void i2c_client_dev_release(struct device *dev)
}
static ssize_t
-show_name(struct device *dev, struct device_attribute *attr, char *buf)
+name_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", dev->type == &i2c_client_type ?
to_i2c_client(dev)->name : to_i2c_adapter(dev)->name);
}
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR_RO(name);
static ssize_t
-show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
+modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
int len;
@@ -472,7 +479,7 @@ show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
return sprintf(buf, "%s%s\n", I2C_MODULE_PREFIX, client->name);
}
-static DEVICE_ATTR(modalias, S_IRUGO, show_modalias, NULL);
+static DEVICE_ATTR_RO(modalias);
static struct attribute *i2c_dev_attrs[] = {
&dev_attr_name.attr,
@@ -824,8 +831,8 @@ EXPORT_SYMBOL_GPL(i2c_new_device);
/**
- * i2c_unregister_device - reverse effect of i2c_new_device()
- * @client: value returned from i2c_new_device()
+ * i2c_unregister_device - reverse effect of i2c_new_*_device()
+ * @client: value returned from i2c_new_*_device()
* Context: can sleep
*/
void i2c_unregister_device(struct i2c_client *client)
@@ -1016,8 +1023,8 @@ EXPORT_SYMBOL_GPL(i2c_adapter_depth);
* the user to provide incorrect parameters.
*/
static ssize_t
-i2c_sysfs_new_device(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+new_device_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct i2c_adapter *adap = to_i2c_adapter(dev);
struct i2c_board_info info;
@@ -1072,7 +1079,7 @@ i2c_sysfs_new_device(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(new_device, S_IWUSR, NULL, i2c_sysfs_new_device);
+static DEVICE_ATTR_WO(new_device);
/*
* And of course let the users delete the devices they instantiated, if
@@ -1084,8 +1091,8 @@ static DEVICE_ATTR(new_device, S_IWUSR, NULL, i2c_sysfs_new_device);
* the user to delete the wrong device.
*/
static ssize_t
-i2c_sysfs_delete_device(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+delete_device_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct i2c_adapter *adap = to_i2c_adapter(dev);
struct i2c_client *client, *next;
@@ -1128,7 +1135,7 @@ i2c_sysfs_delete_device(struct device *dev, struct device_attribute *attr,
return res;
}
static DEVICE_ATTR_IGNORE_LOCKDEP(delete_device, S_IWUSR, NULL,
- i2c_sysfs_delete_device);
+ delete_device_store);
static struct attribute *i2c_adapter_attrs[] = {
&dev_attr_name.attr,
@@ -1171,9 +1178,8 @@ static void i2c_scan_static_board_info(struct i2c_adapter *adapter)
down_read(&__i2c_board_lock);
list_for_each_entry(devinfo, &__i2c_board_list, list) {
- if (devinfo->busnum == adapter->nr
- && !i2c_new_device(adapter,
- &devinfo->board_info))
+ if (devinfo->busnum == adapter->nr &&
+ IS_ERR(i2c_new_client_device(adapter, &devinfo->board_info)))
dev_err(&adapter->dev,
"Can't create device at 0x%02x\n",
devinfo->board_info.addr);
@@ -2160,8 +2166,8 @@ static int i2c_detect_address(struct i2c_client *temp_client,
dev_dbg(&adapter->dev, "Creating %s at 0x%02x\n",
info.type, info.addr);
- client = i2c_new_device(adapter, &info);
- if (client)
+ client = i2c_new_client_device(adapter, &info);
+ if (!IS_ERR(client))
list_add_tail(&client->detected, &driver->clients);
else
dev_err(&adapter->dev, "Failed creating %s at 0x%02x\n",
diff --git a/drivers/i2c/i2c-core-of.c b/drivers/i2c/i2c-core-of.c
index e4d296b40baa..6787c1f71483 100644
--- a/drivers/i2c/i2c-core-of.c
+++ b/drivers/i2c/i2c-core-of.c
@@ -75,11 +75,10 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
if (ret)
return ERR_PTR(ret);
- client = i2c_new_device(adap, &info);
- if (!client) {
+ client = i2c_new_client_device(adap, &info);
+ if (IS_ERR(client))
dev_err(&adap->dev, "of_i2c: Failure registering %pOF\n", node);
- return ERR_PTR(-EINVAL);
- }
+
return client;
}
diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
index 50e1fb4aedf5..6daec8d3d331 100644
--- a/drivers/i2c/muxes/i2c-mux-pca9541.c
+++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -16,6 +16,7 @@
* warranty of any kind, whether express or implied.
*/
+#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/i2c.h>
@@ -42,20 +43,20 @@
#define PCA9541_CONTROL 0x01
#define PCA9541_ISTAT 0x02
-#define PCA9541_CTL_MYBUS (1 << 0)
-#define PCA9541_CTL_NMYBUS (1 << 1)
-#define PCA9541_CTL_BUSON (1 << 2)
-#define PCA9541_CTL_NBUSON (1 << 3)
-#define PCA9541_CTL_BUSINIT (1 << 4)
-#define PCA9541_CTL_TESTON (1 << 6)
-#define PCA9541_CTL_NTESTON (1 << 7)
-
-#define PCA9541_ISTAT_INTIN (1 << 0)
-#define PCA9541_ISTAT_BUSINIT (1 << 1)
-#define PCA9541_ISTAT_BUSOK (1 << 2)
-#define PCA9541_ISTAT_BUSLOST (1 << 3)
-#define PCA9541_ISTAT_MYTEST (1 << 6)
-#define PCA9541_ISTAT_NMYTEST (1 << 7)
+#define PCA9541_CTL_MYBUS BIT(0)
+#define PCA9541_CTL_NMYBUS BIT(1)
+#define PCA9541_CTL_BUSON BIT(2)
+#define PCA9541_CTL_NBUSON BIT(3)
+#define PCA9541_CTL_BUSINIT BIT(4)
+#define PCA9541_CTL_TESTON BIT(6)
+#define PCA9541_CTL_NTESTON BIT(7)
+
+#define PCA9541_ISTAT_INTIN BIT(0)
+#define PCA9541_ISTAT_BUSINIT BIT(1)
+#define PCA9541_ISTAT_BUSOK BIT(2)
+#define PCA9541_ISTAT_BUSLOST BIT(3)
+#define PCA9541_ISTAT_MYTEST BIT(6)
+#define PCA9541_ISTAT_NMYTEST BIT(7)
#define BUSON (PCA9541_CTL_BUSON | PCA9541_CTL_NBUSON)
#define MYBUS (PCA9541_CTL_MYBUS | PCA9541_CTL_NMYBUS)
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index 923aa3a5a3dc..a0d926ae3f86 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -86,7 +86,7 @@ struct pca954x {
u8 last_chan; /* last register value */
/* MUX_IDLE_AS_IS, MUX_IDLE_DISCONNECT or >= 0 for channel */
- s8 idle_state;
+ s32 idle_state;
struct i2c_client *client;
@@ -229,20 +229,23 @@ static int pca954x_reg_write(struct i2c_adapter *adap,
I2C_SMBUS_BYTE, &dummy);
}
+static u8 pca954x_regval(struct pca954x *data, u8 chan)
+{
+ /* We make switches look like muxes, not sure how to be smarter. */
+ if (data->chip->muxtype == pca954x_ismux)
+ return chan | data->chip->enable;
+ else
+ return 1 << chan;
+}
+
static int pca954x_select_chan(struct i2c_mux_core *muxc, u32 chan)
{
struct pca954x *data = i2c_mux_priv(muxc);
struct i2c_client *client = data->client;
- const struct chip_desc *chip = data->chip;
u8 regval;
int ret = 0;
- /* we make switches look like muxes, not sure how to be smarter */
- if (chip->muxtype == pca954x_ismux)
- regval = chan | chip->enable;
- else
- regval = 1 << chan;
-
+ regval = pca954x_regval(data, chan);
/* Only select the channel if its different from the last channel */
if (data->last_chan != regval) {
ret = pca954x_reg_write(muxc->parent, client, regval);
@@ -256,7 +259,7 @@ static int pca954x_deselect_mux(struct i2c_mux_core *muxc, u32 chan)
{
struct pca954x *data = i2c_mux_priv(muxc);
struct i2c_client *client = data->client;
- s8 idle_state;
+ s32 idle_state;
idle_state = READ_ONCE(data->idle_state);
if (idle_state >= 0)
@@ -402,6 +405,22 @@ static void pca954x_cleanup(struct i2c_mux_core *muxc)
i2c_mux_del_adapters(muxc);
}
+static int pca954x_init(struct i2c_client *client, struct pca954x *data)
+{
+ int ret;
+
+ if (data->idle_state >= 0)
+ data->last_chan = pca954x_regval(data, data->idle_state);
+ else
+ data->last_chan = 0; /* Disconnect multiplexer */
+
+ ret = i2c_smbus_write_byte(client, data->last_chan);
+ if (ret < 0)
+ data->last_chan = 0;
+
+ return ret;
+}
+
/*
* I2C init/probing/exit functions
*/
@@ -411,7 +430,6 @@ static int pca954x_probe(struct i2c_client *client,
struct i2c_adapter *adap = client->adapter;
struct device *dev = &client->dev;
struct device_node *np = dev->of_node;
- bool idle_disconnect_dt;
struct gpio_desc *gpio;
struct i2c_mux_core *muxc;
struct pca954x *data;
@@ -462,23 +480,24 @@ static int pca954x_probe(struct i2c_client *client,
}
}
- /* Write the mux register at addr to verify
+ data->idle_state = MUX_IDLE_AS_IS;
+ if (of_property_read_u32(np, "idle-state", &data->idle_state)) {
+ if (np && of_property_read_bool(np, "i2c-mux-idle-disconnect"))
+ data->idle_state = MUX_IDLE_DISCONNECT;
+ }
+
+ /*
+ * Write the mux register at addr to verify
* that the mux is in fact present. This also
- * initializes the mux to disconnected state.
+ * initializes the mux to a channel
+ * or disconnected state.
*/
- if (i2c_smbus_write_byte(client, 0) < 0) {
+ ret = pca954x_init(client, data);
+ if (ret < 0) {
dev_warn(dev, "probe failed\n");
return -ENODEV;
}
- data->last_chan = 0; /* force the first selection */
- data->idle_state = MUX_IDLE_AS_IS;
-
- idle_disconnect_dt = np &&
- of_property_read_bool(np, "i2c-mux-idle-disconnect");
- if (idle_disconnect_dt)
- data->idle_state = MUX_IDLE_DISCONNECT;
-
ret = pca954x_irq_setup(muxc);
if (ret)
goto fail_cleanup;
@@ -530,9 +549,13 @@ static int pca954x_resume(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
struct i2c_mux_core *muxc = i2c_get_clientdata(client);
struct pca954x *data = i2c_mux_priv(muxc);
+ int ret;
- data->last_chan = 0;
- return i2c_smbus_write_byte(client, 0);
+ ret = pca954x_init(client, data);
+ if (ret < 0)
+ dev_err(&client->dev, "failed to verify mux presence\n");
+
+ return ret;
}
#endif
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index 043691656245..7f8f896fa0c3 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -527,8 +527,8 @@ static const struct device_type i3c_masterdev_type = {
.groups = i3c_masterdev_groups,
};
-int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode,
- unsigned long max_i2c_scl_rate)
+static int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode,
+ unsigned long max_i2c_scl_rate)
{
struct i3c_master_controller *master = i3c_bus_to_i3c_master(i3cbus);
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index b0ff0e12d84c..bd26c3b9634e 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -899,6 +899,22 @@ static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct dw_i3c_master *master = to_dw_i3c_master(m);
+ int pos;
+
+ pos = dw_i3c_master_get_free_pos(master);
+
+ if (data->index > pos && pos > 0) {
+ writel(0,
+ master->regs +
+ DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
+
+ master->addrs[data->index] = 0;
+ master->free_pos |= BIT(data->index);
+
+ data->index = pos;
+ master->addrs[pos] = dev->info.dyn_addr;
+ master->free_pos &= ~BIT(pos);
+ }
writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr),
master->regs +
@@ -1100,15 +1116,13 @@ static const struct i3c_master_controller_ops dw_mipi_i3c_ops = {
static int dw_i3c_probe(struct platform_device *pdev)
{
struct dw_i3c_master *master;
- struct resource *res;
int ret, irq;
master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL);
if (!master)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- master->regs = devm_ioremap_resource(&pdev->dev, res);
+ master->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(master->regs))
return PTR_ERR(master->regs);
diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
index 10db0bf0655a..54712793709e 100644
--- a/drivers/i3c/master/i3c-master-cdns.c
+++ b/drivers/i3c/master/i3c-master-cdns.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
+#include <linux/of_device.h>
#define DEV_ID 0x0
#define DEV_ID_I3C_MASTER 0x5034
@@ -60,6 +61,7 @@
#define CTRL_HALT_EN BIT(30)
#define CTRL_MCS BIT(29)
#define CTRL_MCS_EN BIT(28)
+#define CTRL_THD_DELAY(x) (((x) << 24) & GENMASK(25, 24))
#define CTRL_HJ_DISEC BIT(8)
#define CTRL_MST_ACK BIT(7)
#define CTRL_HJ_ACK BIT(6)
@@ -70,6 +72,7 @@
#define CTRL_MIXED_FAST_BUS_MODE 2
#define CTRL_MIXED_SLOW_BUS_MODE 3
#define CTRL_BUS_MODE_MASK GENMASK(1, 0)
+#define THD_DELAY_MAX 3
#define PRESCL_CTRL0 0x14
#define PRESCL_CTRL0_I2C(x) ((x) << 16)
@@ -388,6 +391,10 @@ struct cdns_i3c_xfer {
struct cdns_i3c_cmd cmds[0];
};
+struct cdns_i3c_data {
+ u8 thd_delay_ns;
+};
+
struct cdns_i3c_master {
struct work_struct hj_work;
struct i3c_master_controller base;
@@ -408,6 +415,7 @@ struct cdns_i3c_master {
struct clk *pclk;
struct cdns_i3c_master_caps caps;
unsigned long i3c_scl_lim;
+ const struct cdns_i3c_data *devdata;
};
static inline struct cdns_i3c_master *
@@ -1181,6 +1189,20 @@ static int cdns_i3c_master_do_daa(struct i3c_master_controller *m)
return 0;
}
+static u8 cdns_i3c_master_calculate_thd_delay(struct cdns_i3c_master *master)
+{
+ unsigned long sysclk_rate = clk_get_rate(master->sysclk);
+ u8 thd_delay = DIV_ROUND_UP(master->devdata->thd_delay_ns,
+ (NSEC_PER_SEC / sysclk_rate));
+
+ /* Every value greater than 3 is not valid. */
+ if (thd_delay > THD_DELAY_MAX)
+ thd_delay = THD_DELAY_MAX;
+
+ /* CTLR_THD_DEL value is encoded. */
+ return (THD_DELAY_MAX - thd_delay);
+}
+
static int cdns_i3c_master_bus_init(struct i3c_master_controller *m)
{
struct cdns_i3c_master *master = to_cdns_i3c_master(m);
@@ -1264,6 +1286,15 @@ static int cdns_i3c_master_bus_init(struct i3c_master_controller *m)
* We will issue ENTDAA afterwards from the threaded IRQ handler.
*/
ctrl |= CTRL_HJ_ACK | CTRL_HJ_DISEC | CTRL_HALT_EN | CTRL_MCS_EN;
+
+ /*
+ * Configure data hold delay based on device-specific data.
+ *
+ * MIPI I3C Specification 1.0 defines non-zero minimal tHD_PP timing on
+ * master output. This setting allows to meet this timing on master's
+ * SoC outputs, regardless of PCB balancing.
+ */
+ ctrl |= CTRL_THD_DELAY(cdns_i3c_master_calculate_thd_delay(master));
writel(ctrl, master->regs + CTRL);
cdns_i3c_master_enable(master);
@@ -1521,10 +1552,18 @@ static void cdns_i3c_master_hj(struct work_struct *work)
i3c_master_do_daa(&master->base);
}
+static struct cdns_i3c_data cdns_i3c_devdata = {
+ .thd_delay_ns = 10,
+};
+
+static const struct of_device_id cdns_i3c_master_of_ids[] = {
+ { .compatible = "cdns,i3c-master", .data = &cdns_i3c_devdata },
+ { /* sentinel */ },
+};
+
static int cdns_i3c_master_probe(struct platform_device *pdev)
{
struct cdns_i3c_master *master;
- struct resource *res;
int ret, irq;
u32 val;
@@ -1532,8 +1571,11 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
if (!master)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- master->regs = devm_ioremap_resource(&pdev->dev, res);
+ master->devdata = of_device_get_match_data(&pdev->dev);
+ if (!master->devdata)
+ return -EINVAL;
+
+ master->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(master->regs))
return PTR_ERR(master->regs);
@@ -1631,11 +1673,6 @@ static int cdns_i3c_master_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id cdns_i3c_master_of_ids[] = {
- { .compatible = "cdns,i3c-master" },
- { /* sentinel */ },
-};
-
static struct platform_driver cdns_i3c_master = {
.probe = cdns_i3c_master_probe,
.remove = cdns_i3c_master_remove,
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
index cac02db4098d..d4f4409cfb8b 100644
--- a/drivers/ide/Makefile
+++ b/drivers/ide/Makefile
@@ -3,8 +3,6 @@
# link order is important here
#
-ccflags-y := -Idrivers/ide
-
ide-core-y += ide.o ide-ioctls.o ide-io.o ide-iops.o ide-lib.o ide-probe.o \
ide-taskfile.o ide-pm.o ide-park.o ide-sysfs.o ide-devsets.o \
ide-io-std.o ide-eh.o
diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
index a1898e11b04e..943bf944bf72 100644
--- a/drivers/ide/cmd64x.c
+++ b/drivers/ide/cmd64x.c
@@ -66,6 +66,9 @@ static void cmd64x_program_timings(ide_drive_t *drive, u8 mode)
struct ide_timing t;
u8 arttim = 0;
+ if (drive->dn >= ARRAY_SIZE(drwtim_regs))
+ return;
+
ide_timing_compute(drive, mode, &t, T, 0);
/*
diff --git a/drivers/ide/ht6560b.c b/drivers/ide/ht6560b.c
index 0dae65ac7d6d..743bc3693ac8 100644
--- a/drivers/ide/ht6560b.c
+++ b/drivers/ide/ht6560b.c
@@ -310,7 +310,7 @@ static void __init ht6560b_init_dev(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
/* Setting default configurations for drives. */
- int t = (HT_CONFIG_DEFAULT << 8) | HT_TIMING_DEFAULT;
+ unsigned long t = (HT_CONFIG_DEFAULT << 8) | HT_TIMING_DEFAULT;
if (hwif->channel)
t |= (HT_SECONDARY_IF << 8);
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 9d117936bee1..dcf8b51b47fd 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -25,6 +25,7 @@
#define IDECD_VERSION "5.00"
+#include <linux/compat.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
@@ -1710,6 +1711,41 @@ static int idecd_ioctl(struct block_device *bdev, fmode_t mode,
return ret;
}
+static int idecd_locked_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ struct cdrom_info *info = ide_drv_g(bdev->bd_disk, cdrom_info);
+ void __user *argp = compat_ptr(arg);
+ int err;
+
+ switch (cmd) {
+ case CDROMSETSPINDOWN:
+ return idecd_set_spindown(&info->devinfo, (unsigned long)argp);
+ case CDROMGETSPINDOWN:
+ return idecd_get_spindown(&info->devinfo, (unsigned long)argp);
+ default:
+ break;
+ }
+
+ err = generic_ide_ioctl(info->drive, bdev, cmd, arg);
+ if (err == -EINVAL)
+ err = cdrom_ioctl(&info->devinfo, bdev, mode, cmd,
+ (unsigned long)argp);
+
+ return err;
+}
+
+static int idecd_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ mutex_lock(&ide_cd_mutex);
+ ret = idecd_locked_compat_ioctl(bdev, mode, cmd, arg);
+ mutex_unlock(&ide_cd_mutex);
+
+ return ret;
+}
static unsigned int idecd_check_events(struct gendisk *disk,
unsigned int clearing)
@@ -1732,6 +1768,8 @@ static const struct block_device_operations idecd_ops = {
.open = idecd_open,
.release = idecd_release,
.ioctl = idecd_ioctl,
+ .compat_ioctl = IS_ENABLED(CONFIG_COMPAT) ?
+ idecd_compat_ioctl : NULL,
.check_events = idecd_check_events,
.revalidate_disk = idecd_revalidate_disk
};
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 197912af5c2f..1d3407d7e095 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -794,4 +794,5 @@ const struct ide_disk_ops ide_ata_disk_ops = {
.set_doorlock = ide_disk_set_doorlock,
.do_request = ide_do_rw_disk,
.ioctl = ide_disk_ioctl,
+ .compat_ioctl = ide_disk_ioctl,
};
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 1ea2f9e82bf8..1fe1f9d37a51 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -19,6 +19,7 @@
#include <linux/types.h>
#include <linux/string.h>
#include <linux/kernel.h>
+#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/mm.h>
@@ -546,4 +547,7 @@ const struct ide_disk_ops ide_atapi_disk_ops = {
.set_doorlock = ide_set_media_lock,
.do_request = ide_floppy_do_request,
.ioctl = ide_floppy_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = ide_floppy_compat_ioctl,
+#endif
};
diff --git a/drivers/ide/ide-floppy.h b/drivers/ide/ide-floppy.h
index 13c9b4b6d75e..8505a5f58f4e 100644
--- a/drivers/ide/ide-floppy.h
+++ b/drivers/ide/ide-floppy.h
@@ -26,6 +26,8 @@ void ide_floppy_create_read_capacity_cmd(struct ide_atapi_pc *);
/* ide-floppy_ioctl.c */
int ide_floppy_ioctl(ide_drive_t *, struct block_device *, fmode_t,
unsigned int, unsigned long);
+int ide_floppy_compat_ioctl(ide_drive_t *, struct block_device *, fmode_t,
+ unsigned int, unsigned long);
#ifdef CONFIG_IDE_PROC_FS
/* ide-floppy_proc.c */
diff --git a/drivers/ide/ide-floppy_ioctl.c b/drivers/ide/ide-floppy_ioctl.c
index 40a2ebe34e1d..39a790ac6cc3 100644
--- a/drivers/ide/ide-floppy_ioctl.c
+++ b/drivers/ide/ide-floppy_ioctl.c
@@ -5,6 +5,7 @@
#include <linux/kernel.h>
#include <linux/ide.h>
+#include <linux/compat.h>
#include <linux/cdrom.h>
#include <linux/mutex.h>
@@ -302,3 +303,37 @@ out:
mutex_unlock(&ide_floppy_ioctl_mutex);
return err;
}
+
+#ifdef CONFIG_COMPAT
+int ide_floppy_compat_ioctl(ide_drive_t *drive, struct block_device *bdev,
+ fmode_t mode, unsigned int cmd, unsigned long arg)
+{
+ struct ide_atapi_pc pc;
+ void __user *argp = compat_ptr(arg);
+ int err;
+
+ mutex_lock(&ide_floppy_ioctl_mutex);
+ if (cmd == CDROMEJECT || cmd == CDROM_LOCKDOOR) {
+ err = ide_floppy_lockdoor(drive, &pc, arg, cmd);
+ goto out;
+ }
+
+ err = ide_floppy_format_ioctl(drive, &pc, mode, cmd, argp);
+ if (err != -ENOTTY)
+ goto out;
+
+ /*
+ * skip SCSI_IOCTL_SEND_COMMAND (deprecated)
+ * and CDROM_SEND_PACKET (legacy) ioctls
+ */
+ if (cmd != CDROM_SEND_PACKET && cmd != SCSI_IOCTL_SEND_COMMAND)
+ err = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
+
+ if (err == -ENOTTY)
+ err = generic_ide_ioctl(drive, bdev, cmd, arg);
+
+out:
+ mutex_unlock(&ide_floppy_ioctl_mutex);
+ return err;
+}
+#endif
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
index dba9ad5c97b3..1bb99b556393 100644
--- a/drivers/ide/ide-gd.c
+++ b/drivers/ide/ide-gd.c
@@ -341,11 +341,28 @@ static int ide_gd_ioctl(struct block_device *bdev, fmode_t mode,
return drive->disk_ops->ioctl(drive, bdev, mode, cmd, arg);
}
+#ifdef CONFIG_COMPAT
+static int ide_gd_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ struct ide_disk_obj *idkp = ide_drv_g(bdev->bd_disk, ide_disk_obj);
+ ide_drive_t *drive = idkp->drive;
+
+ if (!drive->disk_ops->compat_ioctl)
+ return -ENOIOCTLCMD;
+
+ return drive->disk_ops->compat_ioctl(drive, bdev, mode, cmd, arg);
+}
+#endif
+
static const struct block_device_operations ide_gd_ops = {
.owner = THIS_MODULE,
.open = ide_gd_unlocked_open,
.release = ide_gd_release,
.ioctl = ide_gd_ioctl,
+#ifdef CONFIG_COMPAT
+ .ioctl = ide_gd_compat_ioctl,
+#endif
.getgeo = ide_gd_getgeo,
.check_events = ide_gd_check_events,
.unlock_native_capacity = ide_gd_unlock_native_capacity,
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
index d48c17003874..09491098047b 100644
--- a/drivers/ide/ide-ioctls.c
+++ b/drivers/ide/ide-ioctls.c
@@ -3,11 +3,20 @@
* IDE ioctls handling.
*/
+#include <linux/compat.h>
#include <linux/export.h>
#include <linux/hdreg.h>
#include <linux/ide.h>
#include <linux/slab.h>
+static int put_user_long(long val, unsigned long arg)
+{
+ if (in_compat_syscall())
+ return put_user(val, (compat_long_t __user *)compat_ptr(arg));
+
+ return put_user(val, (long __user *)arg);
+}
+
static const struct ide_ioctl_devset ide_ioctl_settings[] = {
{ HDIO_GET_32BIT, HDIO_SET_32BIT, &ide_devset_io_32bit },
{ HDIO_GET_KEEPSETTINGS, HDIO_SET_KEEPSETTINGS, &ide_devset_keepsettings },
@@ -37,7 +46,7 @@ read_val:
mutex_lock(&ide_setting_mtx);
err = ds->get(drive);
mutex_unlock(&ide_setting_mtx);
- return err >= 0 ? put_user(err, (long __user *)arg) : err;
+ return err >= 0 ? put_user_long(err, arg) : err;
set_val:
if (bdev != bdev->bd_contains)
@@ -56,7 +65,7 @@ set_val:
EXPORT_SYMBOL_GPL(ide_setting_ioctl);
static int ide_get_identity_ioctl(ide_drive_t *drive, unsigned int cmd,
- unsigned long arg)
+ void __user *argp)
{
u16 *id = NULL;
int size = (cmd == HDIO_GET_IDENTITY) ? (ATA_ID_WORDS * 2) : 142;
@@ -77,7 +86,7 @@ static int ide_get_identity_ioctl(ide_drive_t *drive, unsigned int cmd,
memcpy(id, drive->id, size);
ata_id_to_hd_driveid(id);
- if (copy_to_user((void __user *)arg, id, size))
+ if (copy_to_user(argp, id, size))
rc = -EFAULT;
kfree(id);
@@ -87,10 +96,10 @@ out:
static int ide_get_nice_ioctl(ide_drive_t *drive, unsigned long arg)
{
- return put_user((!!(drive->dev_flags & IDE_DFLAG_DSC_OVERLAP)
+ return put_user_long((!!(drive->dev_flags & IDE_DFLAG_DSC_OVERLAP)
<< IDE_NICE_DSC_OVERLAP) |
(!!(drive->dev_flags & IDE_DFLAG_NICE1)
- << IDE_NICE_1), (long __user *)arg);
+ << IDE_NICE_1), arg);
}
static int ide_set_nice_ioctl(ide_drive_t *drive, unsigned long arg)
@@ -115,7 +124,7 @@ static int ide_set_nice_ioctl(ide_drive_t *drive, unsigned long arg)
return 0;
}
-static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg)
+static int ide_cmd_ioctl(ide_drive_t *drive, void __user *argp)
{
u8 *buf = NULL;
int bufsize = 0, err = 0;
@@ -123,7 +132,7 @@ static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg)
struct ide_cmd cmd;
struct ide_taskfile *tf = &cmd.tf;
- if (NULL == (void *) arg) {
+ if (NULL == argp) {
struct request *rq;
rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
@@ -135,7 +144,7 @@ static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg)
return err;
}
- if (copy_from_user(args, (void __user *)arg, 4))
+ if (copy_from_user(args, argp, 4))
return -EFAULT;
memset(&cmd, 0, sizeof(cmd));
@@ -181,19 +190,18 @@ static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg)
args[1] = tf->error;
args[2] = tf->nsect;
abort:
- if (copy_to_user((void __user *)arg, &args, 4))
+ if (copy_to_user(argp, &args, 4))
err = -EFAULT;
if (buf) {
- if (copy_to_user((void __user *)(arg + 4), buf, bufsize))
+ if (copy_to_user((argp + 4), buf, bufsize))
err = -EFAULT;
kfree(buf);
}
return err;
}
-static int ide_task_ioctl(ide_drive_t *drive, unsigned long arg)
+static int ide_task_ioctl(ide_drive_t *drive, void __user *p)
{
- void __user *p = (void __user *)arg;
int err = 0;
u8 args[7];
struct ide_cmd cmd;
@@ -237,6 +245,10 @@ int generic_ide_ioctl(ide_drive_t *drive, struct block_device *bdev,
unsigned int cmd, unsigned long arg)
{
int err;
+ void __user *argp = (void __user *)arg;
+
+ if (in_compat_syscall())
+ argp = compat_ptr(arg);
err = ide_setting_ioctl(drive, bdev, cmd, arg, ide_ioctl_settings);
if (err != -EOPNOTSUPP)
@@ -247,7 +259,7 @@ int generic_ide_ioctl(ide_drive_t *drive, struct block_device *bdev,
case HDIO_GET_IDENTITY:
if (bdev != bdev->bd_contains)
return -EINVAL;
- return ide_get_identity_ioctl(drive, cmd, arg);
+ return ide_get_identity_ioctl(drive, cmd, argp);
case HDIO_GET_NICE:
return ide_get_nice_ioctl(drive, arg);
case HDIO_SET_NICE:
@@ -258,6 +270,9 @@ int generic_ide_ioctl(ide_drive_t *drive, struct block_device *bdev,
case HDIO_DRIVE_TASKFILE:
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
+ /* missing compat handler for HDIO_DRIVE_TASKFILE */
+ if (in_compat_syscall())
+ return -ENOTTY;
if (drive->media == ide_disk)
return ide_taskfile_ioctl(drive, arg);
return -ENOMSG;
@@ -265,11 +280,11 @@ int generic_ide_ioctl(ide_drive_t *drive, struct block_device *bdev,
case HDIO_DRIVE_CMD:
if (!capable(CAP_SYS_RAWIO))
return -EACCES;
- return ide_cmd_ioctl(drive, arg);
+ return ide_cmd_ioctl(drive, argp);
case HDIO_DRIVE_TASK:
if (!capable(CAP_SYS_RAWIO))
return -EACCES;
- return ide_task_ioctl(drive, arg);
+ return ide_task_ioctl(drive, argp);
case HDIO_DRIVE_RESET:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -277,7 +292,7 @@ int generic_ide_ioctl(ide_drive_t *drive, struct block_device *bdev,
case HDIO_GET_BUSSTATE:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- if (put_user(BUSSTATE_ON, (long __user *)arg))
+ if (put_user_long(BUSSTATE_ON, arg))
return -EFAULT;
return 0;
case HDIO_SET_BUSSTATE:
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index d1445d74e9c3..f2be127ee96e 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -530,7 +530,6 @@ int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout)
*/
if (stat == 0xff)
return -ENODEV;
- touch_softlockup_watchdog();
touch_nmi_watchdog();
}
return -EBUSY;
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index 0363d73b0be0..15c17f3781ee 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -206,7 +206,7 @@ static int set_xfer_rate (ide_drive_t *drive, int arg)
ide_devset_rw(current_speed, xfer_rate);
ide_devset_rw_field(init_speed, init_speed);
ide_devset_rw_flag(nice1, IDE_DFLAG_NICE1);
-ide_devset_rw_field(number, dn);
+ide_devset_ro_field(number, dn);
static const struct ide_proc_devset ide_generic_settings[] = {
IDE_PROC_DEVSET(current_speed, 0, 70),
@@ -381,13 +381,12 @@ parse_error:
return -EINVAL;
}
-static const struct file_operations ide_settings_proc_fops = {
- .owner = THIS_MODULE,
- .open = ide_settings_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = ide_settings_proc_write,
+static const struct proc_ops ide_settings_proc_ops = {
+ .proc_open = ide_settings_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = ide_settings_proc_write,
};
int ide_capacity_proc_show(struct seq_file *m, void *v)
@@ -546,7 +545,7 @@ void ide_proc_port_register_devices(ide_hwif_t *hwif)
if (drive->proc) {
ide_add_proc_entries(drive->proc, generic_drive_entries, drive);
proc_create_data("settings", S_IFREG|S_IRUSR|S_IWUSR,
- drive->proc, &ide_settings_proc_fops,
+ drive->proc, &ide_settings_proc_ops,
drive);
}
sprintf(name, "ide%d/%s", (drive->name[2]-'a')/2, drive->name);
@@ -615,7 +614,7 @@ static int ide_drivers_show(struct seq_file *s, void *p)
return 0;
}
-DEFINE_SHOW_ATTRIBUTE(ide_drivers);
+DEFINE_PROC_SHOW_ATTRIBUTE(ide_drivers);
void proc_ide_create(void)
{
@@ -624,7 +623,7 @@ void proc_ide_create(void)
if (!proc_ide_root)
return;
- proc_create("drivers", 0, proc_ide_root, &ide_drivers_fops);
+ proc_create("drivers", 0, proc_ide_root, &ide_drivers_proc_ops);
}
void proc_ide_destroy(void)
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 3e7482695f77..6f26634b22bb 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -1945,11 +1945,22 @@ static int idetape_ioctl(struct block_device *bdev, fmode_t mode,
return err;
}
+static int idetape_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ if (cmd == 0x0340 || cmd == 0x350)
+ arg = (unsigned long)compat_ptr(arg);
+
+ return idetape_ioctl(bdev, mode, cmd, arg);
+}
+
static const struct block_device_operations idetape_block_ops = {
.owner = THIS_MODULE,
.open = idetape_open,
.release = idetape_release,
.ioctl = idetape_ioctl,
+ .compat_ioctl = IS_ENABLED(CONFIG_COMPAT) ?
+ idetape_compat_ioctl : NULL,
};
static int ide_tape_probe(ide_drive_t *drive)
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
index b5647e34e74e..ea0b064b5f56 100644
--- a/drivers/ide/pmac.c
+++ b/drivers/ide/pmac.c
@@ -1019,7 +1019,6 @@ static int pmac_ide_setup_device(pmac_ide_hwif_t *pmif, struct ide_hw *hw)
struct device_node *np = pmif->node;
const int *bidp;
struct ide_host *host;
- ide_hwif_t *hwif;
struct ide_hw *hws[] = { hw };
struct ide_port_info d = pmac_port_info;
int rc;
@@ -1075,7 +1074,7 @@ static int pmac_ide_setup_device(pmac_ide_hwif_t *pmif, struct ide_hw *hw)
rc = -ENOMEM;
goto bail;
}
- hwif = pmif->hwif = host->ports[0];
+ pmif->hwif = host->ports[0];
if (on_media_bay(pmif)) {
/* Fixup bus ID for media bay */
diff --git a/drivers/ide/qd65xx.c b/drivers/ide/qd65xx.c
index 6ce318ebd0cc..ab79b6289464 100644
--- a/drivers/ide/qd65xx.c
+++ b/drivers/ide/qd65xx.c
@@ -299,7 +299,7 @@ static void __init qd6500_init_dev(ide_drive_t *drive)
static void __init qd6580_init_dev(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
- u16 t1, t2;
+ unsigned long t1, t2;
u8 base = (hwif->config_data & 0xff00) >> 8;
u8 config = QD_CONFIG(hwif);
diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
index ac6fc3fffa0d..458e72e034b0 100644
--- a/drivers/ide/serverworks.c
+++ b/drivers/ide/serverworks.c
@@ -115,6 +115,9 @@ static void svwks_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
struct pci_dev *dev = to_pci_dev(hwif->dev);
const u8 pio = drive->pio_mode - XFER_PIO_0;
+ if (drive->dn >= ARRAY_SIZE(drive_pci))
+ return;
+
pci_write_config_byte(dev, drive_pci[drive->dn], pio_modes[pio]);
if (svwks_csb_check(dev)) {
@@ -141,6 +144,9 @@ static void svwks_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
u8 ultra_enable = 0, ultra_timing = 0, dma_timing = 0;
+ if (drive->dn >= ARRAY_SIZE(drive_pci2))
+ return;
+
pci_read_config_byte(dev, (0x56|hwif->channel), &ultra_timing);
pci_read_config_byte(dev, 0x54, &ultra_enable);
diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
index 57eea5a9047f..c4b20f350b84 100644
--- a/drivers/ide/siimage.c
+++ b/drivers/ide/siimage.c
@@ -648,8 +648,7 @@ static void sil_quirkproc(ide_drive_t *drive)
static void init_iops_siimage(ide_hwif_t *hwif)
{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct ide_host *host = pci_get_drvdata(dev);
+ struct ide_host *host = dev_get_drvdata(hwif->dev);
hwif->hwif_data = NULL;
diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c
index d5e871fe840d..b1bbf807bb3d 100644
--- a/drivers/ide/tx4939ide.c
+++ b/drivers/ide/tx4939ide.c
@@ -549,7 +549,7 @@ static int __init tx4939ide_probe(struct platform_device *pdev)
return -ENODEV;
if (!devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), "tx4938ide"))
+ resource_size(res), MODNAME))
return -EBUSY;
mapbase = (unsigned long)devm_ioremap(&pdev->dev, res->start,
resource_size(res));
diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
index 977cb00398b0..63a3aca506fc 100644
--- a/drivers/ide/via82cxxx.c
+++ b/drivers/ide/via82cxxx.c
@@ -175,8 +175,7 @@ static void via_set_speed(ide_hwif_t *hwif, u8 dn, struct ide_timing *timing)
static void via_set_drive(ide_hwif_t *hwif, ide_drive_t *drive)
{
ide_drive_t *peer = ide_get_pair_dev(drive);
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct ide_host *host = pci_get_drvdata(dev);
+ struct ide_host *host = dev_get_drvdata(hwif->dev);
struct via82cxxx_dev *vdev = host->host_priv;
struct ide_timing t, p;
unsigned int T, UT;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 75fd2a7b0842..d55606608ac8 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -41,6 +41,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/acpi.h>
#include <linux/kernel.h>
#include <linux/cpuidle.h>
#include <linux/tick.h>
@@ -62,6 +63,7 @@ static struct cpuidle_driver intel_idle_driver = {
};
/* intel_idle.max_cstate=0 disables driver */
static int max_cstate = CPUIDLE_STATE_MAX - 1;
+static unsigned int disabled_states_mask;
static unsigned int mwait_substates;
@@ -79,6 +81,7 @@ struct idle_cpu {
unsigned long auto_demotion_disable_flags;
bool byt_auto_demotion_disable_flag;
bool disable_promotion_to_c1e;
+ bool use_acpi;
};
static const struct idle_cpu *icpu;
@@ -90,6 +93,11 @@ static void intel_idle_s2idle(struct cpuidle_device *dev,
static struct cpuidle_state *cpuidle_state_table;
/*
+ * Enable this state by default even if the ACPI _CST does not list it.
+ */
+#define CPUIDLE_FLAG_ALWAYS_ENABLE BIT(15)
+
+/*
* Set this flag for states where the HW flushes the TLB for us
* and so we don't need cross-calls to keep it consistent.
* If this flag is set, SW flushes the TLB, so even if the
@@ -124,7 +132,7 @@ static struct cpuidle_state nehalem_cstates[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
@@ -161,7 +169,7 @@ static struct cpuidle_state snb_cstates[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
@@ -296,7 +304,7 @@ static struct cpuidle_state ivb_cstates[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
@@ -341,7 +349,7 @@ static struct cpuidle_state ivt_cstates[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 80,
.enter = &intel_idle,
@@ -378,7 +386,7 @@ static struct cpuidle_state ivt_cstates_4s[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 250,
.enter = &intel_idle,
@@ -415,7 +423,7 @@ static struct cpuidle_state ivt_cstates_8s[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 500,
.enter = &intel_idle,
@@ -452,7 +460,7 @@ static struct cpuidle_state hsw_cstates[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
@@ -520,7 +528,7 @@ static struct cpuidle_state bdw_cstates[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
@@ -589,7 +597,7 @@ static struct cpuidle_state skl_cstates[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
@@ -658,7 +666,7 @@ static struct cpuidle_state skx_cstates[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
@@ -808,7 +816,7 @@ static struct cpuidle_state bxt_cstates[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
@@ -869,7 +877,7 @@ static struct cpuidle_state dnv_cstates[] = {
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
@@ -944,37 +952,19 @@ static void intel_idle_s2idle(struct cpuidle_device *dev,
mwait_idle_with_hints(eax, ecx);
}
-static void __setup_broadcast_timer(bool on)
-{
- if (on)
- tick_broadcast_enable();
- else
- tick_broadcast_disable();
-}
-
-static void auto_demotion_disable(void)
-{
- unsigned long long msr_bits;
-
- rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
- msr_bits &= ~(icpu->auto_demotion_disable_flags);
- wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
-}
-static void c1e_promotion_disable(void)
-{
- unsigned long long msr_bits;
-
- rdmsrl(MSR_IA32_POWER_CTL, msr_bits);
- msr_bits &= ~0x2;
- wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
-}
-
static const struct idle_cpu idle_cpu_nehalem = {
.state_table = nehalem_cstates,
.auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
.disable_promotion_to_c1e = true,
};
+static const struct idle_cpu idle_cpu_nhx = {
+ .state_table = nehalem_cstates,
+ .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
+ .disable_promotion_to_c1e = true,
+ .use_acpi = true,
+};
+
static const struct idle_cpu idle_cpu_atom = {
.state_table = atom_cstates,
};
@@ -993,6 +983,12 @@ static const struct idle_cpu idle_cpu_snb = {
.disable_promotion_to_c1e = true,
};
+static const struct idle_cpu idle_cpu_snx = {
+ .state_table = snb_cstates,
+ .disable_promotion_to_c1e = true,
+ .use_acpi = true,
+};
+
static const struct idle_cpu idle_cpu_byt = {
.state_table = byt_cstates,
.disable_promotion_to_c1e = true,
@@ -1013,6 +1009,7 @@ static const struct idle_cpu idle_cpu_ivb = {
static const struct idle_cpu idle_cpu_ivt = {
.state_table = ivt_cstates,
.disable_promotion_to_c1e = true,
+ .use_acpi = true,
};
static const struct idle_cpu idle_cpu_hsw = {
@@ -1020,11 +1017,23 @@ static const struct idle_cpu idle_cpu_hsw = {
.disable_promotion_to_c1e = true,
};
+static const struct idle_cpu idle_cpu_hsx = {
+ .state_table = hsw_cstates,
+ .disable_promotion_to_c1e = true,
+ .use_acpi = true,
+};
+
static const struct idle_cpu idle_cpu_bdw = {
.state_table = bdw_cstates,
.disable_promotion_to_c1e = true,
};
+static const struct idle_cpu idle_cpu_bdx = {
+ .state_table = bdw_cstates,
+ .disable_promotion_to_c1e = true,
+ .use_acpi = true,
+};
+
static const struct idle_cpu idle_cpu_skl = {
.state_table = skl_cstates,
.disable_promotion_to_c1e = true,
@@ -1033,15 +1042,18 @@ static const struct idle_cpu idle_cpu_skl = {
static const struct idle_cpu idle_cpu_skx = {
.state_table = skx_cstates,
.disable_promotion_to_c1e = true,
+ .use_acpi = true,
};
static const struct idle_cpu idle_cpu_avn = {
.state_table = avn_cstates,
.disable_promotion_to_c1e = true,
+ .use_acpi = true,
};
static const struct idle_cpu idle_cpu_knl = {
.state_table = knl_cstates,
+ .use_acpi = true,
};
static const struct idle_cpu idle_cpu_bxt = {
@@ -1052,20 +1064,21 @@ static const struct idle_cpu idle_cpu_bxt = {
static const struct idle_cpu idle_cpu_dnv = {
.state_table = dnv_cstates,
.disable_promotion_to_c1e = true,
+ .use_acpi = true,
};
static const struct x86_cpu_id intel_idle_ids[] __initconst = {
- INTEL_CPU_FAM6(NEHALEM_EP, idle_cpu_nehalem),
+ INTEL_CPU_FAM6(NEHALEM_EP, idle_cpu_nhx),
INTEL_CPU_FAM6(NEHALEM, idle_cpu_nehalem),
INTEL_CPU_FAM6(NEHALEM_G, idle_cpu_nehalem),
INTEL_CPU_FAM6(WESTMERE, idle_cpu_nehalem),
- INTEL_CPU_FAM6(WESTMERE_EP, idle_cpu_nehalem),
- INTEL_CPU_FAM6(NEHALEM_EX, idle_cpu_nehalem),
+ INTEL_CPU_FAM6(WESTMERE_EP, idle_cpu_nhx),
+ INTEL_CPU_FAM6(NEHALEM_EX, idle_cpu_nhx),
INTEL_CPU_FAM6(ATOM_BONNELL, idle_cpu_atom),
INTEL_CPU_FAM6(ATOM_BONNELL_MID, idle_cpu_lincroft),
- INTEL_CPU_FAM6(WESTMERE_EX, idle_cpu_nehalem),
+ INTEL_CPU_FAM6(WESTMERE_EX, idle_cpu_nhx),
INTEL_CPU_FAM6(SANDYBRIDGE, idle_cpu_snb),
- INTEL_CPU_FAM6(SANDYBRIDGE_X, idle_cpu_snb),
+ INTEL_CPU_FAM6(SANDYBRIDGE_X, idle_cpu_snx),
INTEL_CPU_FAM6(ATOM_SALTWELL, idle_cpu_atom),
INTEL_CPU_FAM6(ATOM_SILVERMONT, idle_cpu_byt),
INTEL_CPU_FAM6(ATOM_SILVERMONT_MID, idle_cpu_tangier),
@@ -1073,14 +1086,14 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
INTEL_CPU_FAM6(IVYBRIDGE, idle_cpu_ivb),
INTEL_CPU_FAM6(IVYBRIDGE_X, idle_cpu_ivt),
INTEL_CPU_FAM6(HASWELL, idle_cpu_hsw),
- INTEL_CPU_FAM6(HASWELL_X, idle_cpu_hsw),
+ INTEL_CPU_FAM6(HASWELL_X, idle_cpu_hsx),
INTEL_CPU_FAM6(HASWELL_L, idle_cpu_hsw),
INTEL_CPU_FAM6(HASWELL_G, idle_cpu_hsw),
INTEL_CPU_FAM6(ATOM_SILVERMONT_D, idle_cpu_avn),
INTEL_CPU_FAM6(BROADWELL, idle_cpu_bdw),
INTEL_CPU_FAM6(BROADWELL_G, idle_cpu_bdw),
- INTEL_CPU_FAM6(BROADWELL_X, idle_cpu_bdw),
- INTEL_CPU_FAM6(BROADWELL_D, idle_cpu_bdw),
+ INTEL_CPU_FAM6(BROADWELL_X, idle_cpu_bdx),
+ INTEL_CPU_FAM6(BROADWELL_D, idle_cpu_bdx),
INTEL_CPU_FAM6(SKYLAKE_L, idle_cpu_skl),
INTEL_CPU_FAM6(SKYLAKE, idle_cpu_skl),
INTEL_CPU_FAM6(KABYLAKE_L, idle_cpu_skl),
@@ -1095,76 +1108,178 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
{}
};
-/*
- * intel_idle_probe()
+#define INTEL_CPU_FAM6_MWAIT \
+ { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_MWAIT, 0 }
+
+static const struct x86_cpu_id intel_mwait_ids[] __initconst = {
+ INTEL_CPU_FAM6_MWAIT,
+ {}
+};
+
+static bool __init intel_idle_max_cstate_reached(int cstate)
+{
+ if (cstate + 1 > max_cstate) {
+ pr_info("max_cstate %d reached\n", max_cstate);
+ return true;
+ }
+ return false;
+}
+
+#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
+#include <acpi/processor.h>
+
+static bool no_acpi __read_mostly;
+module_param(no_acpi, bool, 0444);
+MODULE_PARM_DESC(no_acpi, "Do not use ACPI _CST for building the idle states list");
+
+static bool force_use_acpi __read_mostly; /* No effect if no_acpi is set. */
+module_param_named(use_acpi, force_use_acpi, bool, 0444);
+MODULE_PARM_DESC(use_acpi, "Use ACPI _CST for building the idle states list");
+
+static struct acpi_processor_power acpi_state_table __initdata;
+
+/**
+ * intel_idle_cst_usable - Check if the _CST information can be used.
+ *
+ * Check if all of the C-states listed by _CST in the max_cstate range are
+ * ACPI_CSTATE_FFH, which means that they should be entered via MWAIT.
*/
-static int __init intel_idle_probe(void)
+static bool __init intel_idle_cst_usable(void)
{
- unsigned int eax, ebx, ecx;
- const struct x86_cpu_id *id;
+ int cstate, limit;
- if (max_cstate == 0) {
- pr_debug("disabled\n");
- return -EPERM;
- }
+ limit = min_t(int, min_t(int, CPUIDLE_STATE_MAX, max_cstate + 1),
+ acpi_state_table.count);
- id = x86_match_cpu(intel_idle_ids);
- if (!id) {
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
- boot_cpu_data.x86 == 6)
- pr_debug("does not run on family %d model %d\n",
- boot_cpu_data.x86, boot_cpu_data.x86_model);
- return -ENODEV;
+ for (cstate = 1; cstate < limit; cstate++) {
+ struct acpi_processor_cx *cx = &acpi_state_table.states[cstate];
+
+ if (cx->entry_method != ACPI_CSTATE_FFH)
+ return false;
}
- if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
- pr_debug("Please enable MWAIT in BIOS SETUP\n");
- return -ENODEV;
+ return true;
+}
+
+static bool __init intel_idle_acpi_cst_extract(void)
+{
+ unsigned int cpu;
+
+ if (no_acpi) {
+ pr_debug("Not allowed to use ACPI _CST\n");
+ return false;
}
- if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
- return -ENODEV;
+ for_each_possible_cpu(cpu) {
+ struct acpi_processor *pr = per_cpu(processors, cpu);
- cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
+ if (!pr)
+ continue;
- if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
- !(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
- !mwait_substates)
- return -ENODEV;
+ if (acpi_processor_evaluate_cst(pr->handle, cpu, &acpi_state_table))
+ continue;
- pr_debug("MWAIT substates: 0x%x\n", mwait_substates);
+ acpi_state_table.count++;
- icpu = (const struct idle_cpu *)id->driver_data;
- cpuidle_state_table = icpu->state_table;
+ if (!intel_idle_cst_usable())
+ continue;
- pr_debug("v" INTEL_IDLE_VERSION " model 0x%X\n",
- boot_cpu_data.x86_model);
+ if (!acpi_processor_claim_cst_control()) {
+ acpi_state_table.count = 0;
+ return false;
+ }
- return 0;
+ return true;
+ }
+
+ pr_debug("ACPI _CST not found or not usable\n");
+ return false;
}
-/*
- * intel_idle_cpuidle_devices_uninit()
- * Unregisters the cpuidle devices.
- */
-static void intel_idle_cpuidle_devices_uninit(void)
+static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
{
- int i;
- struct cpuidle_device *dev;
+ int cstate, limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count);
+
+ /*
+ * If limit > 0, intel_idle_cst_usable() has returned 'true', so all of
+ * the interesting states are ACPI_CSTATE_FFH.
+ */
+ for (cstate = 1; cstate < limit; cstate++) {
+ struct acpi_processor_cx *cx;
+ struct cpuidle_state *state;
+
+ if (intel_idle_max_cstate_reached(cstate))
+ break;
- for_each_online_cpu(i) {
- dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
- cpuidle_unregister_device(dev);
+ cx = &acpi_state_table.states[cstate];
+
+ state = &drv->states[drv->state_count++];
+
+ snprintf(state->name, CPUIDLE_NAME_LEN, "C%d_ACPI", cstate);
+ strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
+ state->exit_latency = cx->latency;
+ /*
+ * For C1-type C-states use the same number for both the exit
+ * latency and target residency, because that is the case for
+ * C1 in the majority of the static C-states tables above.
+ * For the other types of C-states, however, set the target
+ * residency to 3 times the exit latency which should lead to
+ * a reasonable balance between energy-efficiency and
+ * performance in the majority of interesting cases.
+ */
+ state->target_residency = cx->latency;
+ if (cx->type > ACPI_STATE_C1)
+ state->target_residency *= 3;
+
+ state->flags = MWAIT2flg(cx->address);
+ if (cx->type > ACPI_STATE_C2)
+ state->flags |= CPUIDLE_FLAG_TLB_FLUSHED;
+
+ if (disabled_states_mask & BIT(cstate))
+ state->flags |= CPUIDLE_FLAG_OFF;
+
+ state->enter = intel_idle;
+ state->enter_s2idle = intel_idle_s2idle;
}
}
+static bool __init intel_idle_off_by_default(u32 mwait_hint)
+{
+ int cstate, limit;
+
+ /*
+ * If there are no _CST C-states, do not disable any C-states by
+ * default.
+ */
+ if (!acpi_state_table.count)
+ return false;
+
+ limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count);
+ /*
+ * If limit > 0, intel_idle_cst_usable() has returned 'true', so all of
+ * the interesting states are ACPI_CSTATE_FFH.
+ */
+ for (cstate = 1; cstate < limit; cstate++) {
+ if (acpi_state_table.states[cstate].address == mwait_hint)
+ return false;
+ }
+ return true;
+}
+#else /* !CONFIG_ACPI_PROCESSOR_CSTATE */
+#define force_use_acpi (false)
+
+static inline bool intel_idle_acpi_cst_extract(void) { return false; }
+static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { }
+static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; }
+#endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */
+
/*
* ivt_idle_state_table_update(void)
*
* Tune IVT multi-socket targets
* Assumption: num_sockets == (max_package_num + 1)
*/
-static void ivt_idle_state_table_update(void)
+static void __init ivt_idle_state_table_update(void)
{
/* IVT uses a different table for 1-2, 3-4, and > 4 sockets */
int cpu, package_num, num_sockets = 1;
@@ -1187,15 +1302,17 @@ static void ivt_idle_state_table_update(void)
/* else, 1 and 2 socket systems use default ivt_cstates */
}
-/*
- * Translate IRTL (Interrupt Response Time Limit) MSR to usec
+/**
+ * irtl_2_usec - IRTL to microseconds conversion.
+ * @irtl: IRTL MSR value.
+ *
+ * Translate the IRTL (Interrupt Response Time Limit) MSR value to microseconds.
*/
-
-static unsigned int irtl_ns_units[] = {
- 1, 32, 1024, 32768, 1048576, 33554432, 0, 0 };
-
-static unsigned long long irtl_2_usec(unsigned long long irtl)
+static unsigned long long __init irtl_2_usec(unsigned long long irtl)
{
+ static const unsigned int irtl_ns_units[] __initconst = {
+ 1, 32, 1024, 32768, 1048576, 33554432, 0, 0
+ };
unsigned long long ns;
if (!irtl)
@@ -1203,15 +1320,16 @@ static unsigned long long irtl_2_usec(unsigned long long irtl)
ns = irtl_ns_units[(irtl >> 10) & 0x7];
- return div64_u64((irtl & 0x3FF) * ns, 1000);
+ return div_u64((irtl & 0x3FF) * ns, NSEC_PER_USEC);
}
+
/*
* bxt_idle_state_table_update(void)
*
* On BXT, we trust the IRTL to show the definitive maximum latency
* We use the same value for target_residency.
*/
-static void bxt_idle_state_table_update(void)
+static void __init bxt_idle_state_table_update(void)
{
unsigned long long msr;
unsigned int usec;
@@ -1258,7 +1376,7 @@ static void bxt_idle_state_table_update(void)
* On SKL-H (model 0x5e) disable C8 and C9 if:
* C10 is enabled and SGX disabled
*/
-static void sklh_idle_state_table_update(void)
+static void __init sklh_idle_state_table_update(void)
{
unsigned long long msr;
unsigned int eax, ebx, ecx, edx;
@@ -1284,7 +1402,7 @@ static void sklh_idle_state_table_update(void)
/* if SGX is present */
if (ebx & (1 << 2)) {
- rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
+ rdmsrl(MSR_IA32_FEAT_CTL, msr);
/* if SGX is enabled */
if (msr & (1 << 18))
@@ -1294,16 +1412,28 @@ static void sklh_idle_state_table_update(void)
skl_cstates[5].flags |= CPUIDLE_FLAG_UNUSABLE; /* C8-SKL */
skl_cstates[6].flags |= CPUIDLE_FLAG_UNUSABLE; /* C9-SKL */
}
-/*
- * intel_idle_state_table_update()
- *
- * Update the default state_table for this CPU-id
- */
-static void intel_idle_state_table_update(void)
+static bool __init intel_idle_verify_cstate(unsigned int mwait_hint)
{
- switch (boot_cpu_data.x86_model) {
+ unsigned int mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint) + 1;
+ unsigned int num_substates = (mwait_substates >> mwait_cstate * 4) &
+ MWAIT_SUBSTATE_MASK;
+
+ /* Ignore the C-state if there are NO sub-states in CPUID for it. */
+ if (num_substates == 0)
+ return false;
+
+ if (mwait_cstate > 2 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
+ mark_tsc_unstable("TSC halts in idle states deeper than C2");
+
+ return true;
+}
+
+static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
+{
+ int cstate;
+ switch (boot_cpu_data.x86_model) {
case INTEL_FAM6_IVYBRIDGE_X:
ivt_idle_state_table_update();
break;
@@ -1315,62 +1445,38 @@ static void intel_idle_state_table_update(void)
sklh_idle_state_table_update();
break;
}
-}
-
-/*
- * intel_idle_cpuidle_driver_init()
- * allocate, initialize cpuidle_states
- */
-static void __init intel_idle_cpuidle_driver_init(void)
-{
- int cstate;
- struct cpuidle_driver *drv = &intel_idle_driver;
-
- intel_idle_state_table_update();
-
- cpuidle_poll_state_init(drv);
- drv->state_count = 1;
for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) {
- int num_substates, mwait_hint, mwait_cstate;
+ unsigned int mwait_hint;
- if ((cpuidle_state_table[cstate].enter == NULL) &&
- (cpuidle_state_table[cstate].enter_s2idle == NULL))
+ if (intel_idle_max_cstate_reached(cstate))
break;
- if (cstate + 1 > max_cstate) {
- pr_info("max_cstate %d reached\n", max_cstate);
+ if (!cpuidle_state_table[cstate].enter &&
+ !cpuidle_state_table[cstate].enter_s2idle)
break;
- }
-
- mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags);
- mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint);
-
- /* number of sub-states for this state in CPUID.MWAIT */
- num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4))
- & MWAIT_SUBSTATE_MASK;
- /* if NO sub-states for this state in CPUID, skip it */
- if (num_substates == 0)
- continue;
-
- /* if state marked as disabled, skip it */
+ /* If marked as unusable, skip this state. */
if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_UNUSABLE) {
pr_debug("state %s is disabled\n",
cpuidle_state_table[cstate].name);
continue;
}
+ mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags);
+ if (!intel_idle_verify_cstate(mwait_hint))
+ continue;
- if (((mwait_cstate + 1) > 2) &&
- !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
- mark_tsc_unstable("TSC halts in idle"
- " states deeper than C2");
+ /* Structure copy. */
+ drv->states[drv->state_count] = cpuidle_state_table[cstate];
- drv->states[drv->state_count] = /* structure copy */
- cpuidle_state_table[cstate];
+ if ((disabled_states_mask & BIT(drv->state_count)) ||
+ ((icpu->use_acpi || force_use_acpi) &&
+ intel_idle_off_by_default(mwait_hint) &&
+ !(cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_ALWAYS_ENABLE)))
+ drv->states[drv->state_count].flags |= CPUIDLE_FLAG_OFF;
- drv->state_count += 1;
+ drv->state_count++;
}
if (icpu->byt_auto_demotion_disable_flag) {
@@ -1379,6 +1485,42 @@ static void __init intel_idle_cpuidle_driver_init(void)
}
}
+/*
+ * intel_idle_cpuidle_driver_init()
+ * allocate, initialize cpuidle_states
+ */
+static void __init intel_idle_cpuidle_driver_init(struct cpuidle_driver *drv)
+{
+ cpuidle_poll_state_init(drv);
+
+ if (disabled_states_mask & BIT(0))
+ drv->states[0].flags |= CPUIDLE_FLAG_OFF;
+
+ drv->state_count = 1;
+
+ if (icpu)
+ intel_idle_init_cstates_icpu(drv);
+ else
+ intel_idle_init_cstates_acpi(drv);
+}
+
+static void auto_demotion_disable(void)
+{
+ unsigned long long msr_bits;
+
+ rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
+ msr_bits &= ~(icpu->auto_demotion_disable_flags);
+ wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
+}
+
+static void c1e_promotion_disable(void)
+{
+ unsigned long long msr_bits;
+
+ rdmsrl(MSR_IA32_POWER_CTL, msr_bits);
+ msr_bits &= ~0x2;
+ wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
+}
/*
* intel_idle_cpu_init()
@@ -1397,6 +1539,9 @@ static int intel_idle_cpu_init(unsigned int cpu)
return -EIO;
}
+ if (!icpu)
+ return 0;
+
if (icpu->auto_demotion_disable_flags)
auto_demotion_disable();
@@ -1411,7 +1556,7 @@ static int intel_idle_cpu_online(unsigned int cpu)
struct cpuidle_device *dev;
if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
- __setup_broadcast_timer(true);
+ tick_broadcast_enable();
/*
* Some systems can hotplug a cpu at runtime after
@@ -1425,23 +1570,74 @@ static int intel_idle_cpu_online(unsigned int cpu)
return 0;
}
+/**
+ * intel_idle_cpuidle_devices_uninit - Unregister all cpuidle devices.
+ */
+static void __init intel_idle_cpuidle_devices_uninit(void)
+{
+ int i;
+
+ for_each_online_cpu(i)
+ cpuidle_unregister_device(per_cpu_ptr(intel_idle_cpuidle_devices, i));
+}
+
static int __init intel_idle_init(void)
{
+ const struct x86_cpu_id *id;
+ unsigned int eax, ebx, ecx;
int retval;
/* Do not load intel_idle at all for now if idle= is passed */
if (boot_option_idle_override != IDLE_NO_OVERRIDE)
return -ENODEV;
- retval = intel_idle_probe();
- if (retval)
- return retval;
+ if (max_cstate == 0) {
+ pr_debug("disabled\n");
+ return -EPERM;
+ }
+
+ id = x86_match_cpu(intel_idle_ids);
+ if (id) {
+ if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
+ pr_debug("Please enable MWAIT in BIOS SETUP\n");
+ return -ENODEV;
+ }
+ } else {
+ id = x86_match_cpu(intel_mwait_ids);
+ if (!id)
+ return -ENODEV;
+ }
+
+ if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
+ return -ENODEV;
+
+ cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
+
+ if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
+ !(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
+ !mwait_substates)
+ return -ENODEV;
+
+ pr_debug("MWAIT substates: 0x%x\n", mwait_substates);
+
+ icpu = (const struct idle_cpu *)id->driver_data;
+ if (icpu) {
+ cpuidle_state_table = icpu->state_table;
+ if (icpu->use_acpi || force_use_acpi)
+ intel_idle_acpi_cst_extract();
+ } else if (!intel_idle_acpi_cst_extract()) {
+ return -ENODEV;
+ }
+
+ pr_debug("v" INTEL_IDLE_VERSION " model 0x%X\n",
+ boot_cpu_data.x86_model);
intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
- if (intel_idle_cpuidle_devices == NULL)
+ if (!intel_idle_cpuidle_devices)
return -ENOMEM;
- intel_idle_cpuidle_driver_init();
+ intel_idle_cpuidle_driver_init(&intel_idle_driver);
+
retval = cpuidle_register_driver(&intel_idle_driver);
if (retval) {
struct cpuidle_driver *drv = cpuidle_get_driver();
@@ -1480,3 +1676,11 @@ device_initcall(intel_idle_init);
* is the easiest way (currently) to continue doing that.
*/
module_param(max_cstate, int, 0444);
+/*
+ * The positions of the bits that are set in this number are the indices of the
+ * idle states to be disabled by default (as reflected by the names of the
+ * corresponding idle state directories in sysfs, "state0", "state1" ...
+ * "state<i>" ..., where <i> is the index of the given state).
+ */
+module_param_named(states_off, disabled_states_mask, uint, 0444);
+MODULE_PARM_DESC(states_off, "Mask of disabled idle states");
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index d4ef35aeb579..5d91a6dda894 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -89,13 +89,13 @@ config ADXL372_I2C
module will be called adxl372_i2c.
config BMA180
- tristate "Bosch BMA180/BMA250 3-Axis Accelerometer Driver"
+ tristate "Bosch BMA180/BMA25x 3-Axis Accelerometer Driver"
depends on I2C
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
Say Y here if you want to build a driver for the Bosch BMA180 or
- BMA250 triaxial acceleration sensor.
+ BMA25x triaxial acceleration sensor.
To compile this driver as a module, choose M here: the
module will be called bma180.
@@ -112,6 +112,22 @@ config BMA220
To compile this driver as a module, choose M here: the
module will be called bma220_spi.
+config BMA400
+ tristate "Bosch BMA400 3-Axis Accelerometer Driver"
+ select REGMAP
+ select BMA400_I2C if I2C
+ help
+ Say Y here if you want to build a driver for the Bosch BMA400
+ triaxial acceleration sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bma400_core and you will also get
+ bma400_i2c if I2C is enabled.
+
+config BMA400_I2C
+ tristate
+ depends on BMA400
+
config BMC150_ACCEL
tristate "Bosch BMC150 Accelerometer Driver"
select IIO_BUFFER
diff --git a/drivers/iio/accel/Makefile b/drivers/iio/accel/Makefile
index 56bd0215e0d4..3a051cf37f40 100644
--- a/drivers/iio/accel/Makefile
+++ b/drivers/iio/accel/Makefile
@@ -14,6 +14,8 @@ obj-$(CONFIG_ADXL372_I2C) += adxl372_i2c.o
obj-$(CONFIG_ADXL372_SPI) += adxl372_spi.o
obj-$(CONFIG_BMA180) += bma180.o
obj-$(CONFIG_BMA220) += bma220_spi.o
+obj-$(CONFIG_BMA400) += bma400_core.o
+obj-$(CONFIG_BMA400_I2C) += bma400_i2c.o
obj-$(CONFIG_BMC150_ACCEL) += bmc150-accel-core.o
obj-$(CONFIG_BMC150_ACCEL_I2C) += bmc150-accel-i2c.o
obj-$(CONFIG_BMC150_ACCEL_SPI) += bmc150-accel-spi.o
diff --git a/drivers/iio/accel/adis16201.c b/drivers/iio/accel/adis16201.c
index c4810c73b2a2..0f0f27a8184e 100644
--- a/drivers/iio/accel/adis16201.c
+++ b/drivers/iio/accel/adis16201.c
@@ -233,6 +233,12 @@ static const char * const adis16201_status_error_msgs[] = {
[ADIS16201_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 2.975V",
};
+static const struct adis_timeout adis16201_timeouts = {
+ .reset_ms = ADIS16201_STARTUP_DELAY_MS,
+ .sw_reset_ms = ADIS16201_STARTUP_DELAY_MS,
+ .self_test_ms = ADIS16201_STARTUP_DELAY_MS,
+};
+
static const struct adis_data adis16201_data = {
.read_delay = 20,
.msc_ctrl_reg = ADIS16201_MSC_CTRL_REG,
@@ -241,7 +247,7 @@ static const struct adis_data adis16201_data = {
.self_test_mask = ADIS16201_MSC_CTRL_SELF_TEST_EN,
.self_test_no_autoclear = true,
- .startup_delay = ADIS16201_STARTUP_DELAY_MS,
+ .timeouts = &adis16201_timeouts,
.status_error_msgs = adis16201_status_error_msgs,
.status_error_mask = BIT(ADIS16201_DIAG_STAT_SPI_FAIL_BIT) |
diff --git a/drivers/iio/accel/adis16209.c b/drivers/iio/accel/adis16209.c
index 98d77af8a2b0..c6dbd2424e10 100644
--- a/drivers/iio/accel/adis16209.c
+++ b/drivers/iio/accel/adis16209.c
@@ -243,6 +243,12 @@ static const char * const adis16209_status_error_msgs[] = {
[ADIS16209_STAT_POWER_LOW_BIT] = "Power supply below 2.975V",
};
+static const struct adis_timeout adis16209_timeouts = {
+ .reset_ms = ADIS16209_STARTUP_DELAY_MS,
+ .self_test_ms = ADIS16209_STARTUP_DELAY_MS,
+ .sw_reset_ms = ADIS16209_STARTUP_DELAY_MS,
+};
+
static const struct adis_data adis16209_data = {
.read_delay = 30,
.msc_ctrl_reg = ADIS16209_MSC_CTRL_REG,
@@ -251,7 +257,7 @@ static const struct adis_data adis16209_data = {
.self_test_mask = ADIS16209_MSC_CTRL_SELF_TEST_EN,
.self_test_no_autoclear = true,
- .startup_delay = ADIS16209_STARTUP_DELAY_MS,
+ .timeouts = &adis16209_timeouts,
.status_error_msgs = adis16209_status_error_msgs,
.status_error_mask = BIT(ADIS16209_STAT_SELFTEST_FAIL_BIT) |
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 1574e4604a4f..fcd91d5f05fd 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -9,6 +9,7 @@
* SPI is not supported by driver
* BMA180: 7-bit I2C slave address 0x40 or 0x41
* BMA250: 7-bit I2C slave address 0x18 or 0x19
+ * BMA254: 7-bit I2C slave address 0x18 or 0x19
*/
#include <linux/module.h>
@@ -18,6 +19,7 @@
#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/bitops.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/iio/iio.h>
@@ -33,17 +35,20 @@
enum chip_ids {
BMA180,
BMA250,
+ BMA254,
};
struct bma180_data;
struct bma180_part_info {
+ u8 chip_id;
const struct iio_chan_spec *channels;
unsigned int num_channels;
const int *scale_table;
unsigned int num_scales;
const int *bw_table;
unsigned int num_bw;
+ int center_temp;
u8 int_reset_reg, int_reset_mask;
u8 sleep_reg, sleep_mask;
@@ -51,6 +56,7 @@ struct bma180_part_info {
u8 scale_reg, scale_mask;
u8 power_reg, power_mask, lowpower_val;
u8 int_enable_reg, int_enable_mask;
+ u8 int_map_reg, int_enable_dataready_int1_mask;
u8 softreset_reg;
int (*chip_config)(struct bma180_data *data);
@@ -89,6 +95,8 @@ struct bma180_part_info {
#define BMA180_RESET_VAL 0xb6
#define BMA180_ID_REG_VAL 0x03
+#define BMA250_ID_REG_VAL 0x03
+#define BMA254_ID_REG_VAL 0xfa /* 250 decimal */
/* Chip power modes */
#define BMA180_LOW_POWER 0x03
@@ -109,7 +117,26 @@ struct bma180_part_info {
#define BMA250_INT1_DATA_MASK BIT(0)
#define BMA250_INT_RESET_MASK BIT(7) /* Reset pending interrupts */
+#define BMA254_RANGE_REG 0x0f
+#define BMA254_BW_REG 0x10
+#define BMA254_POWER_REG 0x11
+#define BMA254_RESET_REG 0x14
+#define BMA254_INT_ENABLE_REG 0x17
+#define BMA254_INT_MAP_REG 0x1a
+#define BMA254_INT_RESET_REG 0x21
+
+#define BMA254_RANGE_MASK GENMASK(3, 0) /* Range of accel values */
+#define BMA254_BW_MASK GENMASK(4, 0) /* Accel bandwidth */
+#define BMA254_SUSPEND_MASK BIT(7) /* chip will sleep */
+#define BMA254_LOWPOWER_MASK BIT(6)
+#define BMA254_DATA_INTEN_MASK BIT(4)
+#define BMA254_INT2_DATA_MASK BIT(7)
+#define BMA254_INT1_DATA_MASK BIT(0)
+#define BMA254_INT_RESET_MASK BIT(7) /* Reset pending interrupts */
+
struct bma180_data {
+ struct regulator *vdd_supply;
+ struct regulator *vddio_supply;
struct i2c_client *client;
struct iio_trigger *trig;
const struct bma180_part_info *part_info;
@@ -132,8 +159,8 @@ enum bma180_chan {
static int bma180_bw_table[] = { 10, 20, 40, 75, 150, 300 }; /* Hz */
static int bma180_scale_table[] = { 1275, 1863, 2452, 3727, 4903, 9709, 19417 };
-static int bma250_bw_table[] = { 8, 16, 31, 63, 125, 250 }; /* Hz */
-static int bma250_scale_table[] = { 0, 0, 0, 38344, 0, 76590, 0, 0, 153180, 0,
+static int bma25x_bw_table[] = { 8, 16, 31, 63, 125, 250 }; /* Hz */
+static int bma25x_scale_table[] = { 0, 0, 0, 38344, 0, 76590, 0, 0, 153180, 0,
0, 0, 306458 };
static int bma180_get_data_reg(struct bma180_data *data, enum bma180_chan chan)
@@ -307,8 +334,11 @@ static int bma180_chip_init(struct bma180_data *data)
if (ret < 0)
return ret;
- if (ret != BMA180_ID_REG_VAL)
+ if (ret != data->part_info->chip_id) {
+ dev_err(&data->client->dev, "wrong chip ID %d expected %d\n",
+ ret, data->part_info->chip_id);
return -ENODEV;
+ }
ret = bma180_soft_reset(data);
if (ret)
@@ -355,7 +385,7 @@ err:
return ret;
}
-static int bma250_chip_config(struct bma180_data *data)
+static int bma25x_chip_config(struct bma180_data *data)
{
int ret = bma180_chip_init(data);
@@ -367,8 +397,12 @@ static int bma250_chip_config(struct bma180_data *data)
ret = bma180_set_scale(data, 38344); /* 2 G */
if (ret)
goto err;
- ret = bma180_set_bits(data, BMA250_INT_MAP_REG,
- BMA250_INT1_DATA_MASK, 1);
+ /*
+ * This enables dataready interrupt on the INT1 pin
+ * FIXME: support using the INT2 pin
+ */
+ ret = bma180_set_bits(data, data->part_info->int_map_reg,
+ data->part_info->int_enable_dataready_int1_mask, 1);
if (ret)
goto err;
@@ -394,7 +428,7 @@ err:
dev_err(&data->client->dev, "failed to disable the chip\n");
}
-static void bma250_chip_disable(struct bma180_data *data)
+static void bma25x_chip_disable(struct bma180_data *data)
{
if (bma180_set_new_data_intr_state(data, false))
goto err;
@@ -497,7 +531,7 @@ static int bma180_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
case IIO_CHAN_INFO_OFFSET:
- *val = 48; /* 0 LSB @ 24 degree C */
+ *val = data->part_info->center_temp;
return IIO_VAL_INT;
default:
return -EINVAL;
@@ -627,34 +661,96 @@ static const struct iio_chan_spec bma250_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(4),
};
+static const struct iio_chan_spec bma254_channels[] = {
+ BMA180_ACC_CHANNEL(X, 12),
+ BMA180_ACC_CHANNEL(Y, 12),
+ BMA180_ACC_CHANNEL(Z, 12),
+ BMA180_TEMP_CHANNEL,
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
static const struct bma180_part_info bma180_part_info[] = {
[BMA180] = {
- bma180_channels, ARRAY_SIZE(bma180_channels),
- bma180_scale_table, ARRAY_SIZE(bma180_scale_table),
- bma180_bw_table, ARRAY_SIZE(bma180_bw_table),
- BMA180_CTRL_REG0, BMA180_RESET_INT,
- BMA180_CTRL_REG0, BMA180_SLEEP,
- BMA180_BW_TCS, BMA180_BW,
- BMA180_OFFSET_LSB1, BMA180_RANGE,
- BMA180_TCO_Z, BMA180_MODE_CONFIG, BMA180_LOW_POWER,
- BMA180_CTRL_REG3, BMA180_NEW_DATA_INT,
- BMA180_RESET,
- bma180_chip_config,
- bma180_chip_disable,
+ .chip_id = BMA180_ID_REG_VAL,
+ .channels = bma180_channels,
+ .num_channels = ARRAY_SIZE(bma180_channels),
+ .scale_table = bma180_scale_table,
+ .num_scales = ARRAY_SIZE(bma180_scale_table),
+ .bw_table = bma180_bw_table,
+ .num_bw = ARRAY_SIZE(bma180_bw_table),
+ .center_temp = 48, /* 0 LSB @ 24 degree C */
+ .int_reset_reg = BMA180_CTRL_REG0,
+ .int_reset_mask = BMA180_RESET_INT,
+ .sleep_reg = BMA180_CTRL_REG0,
+ .sleep_mask = BMA180_SLEEP,
+ .bw_reg = BMA180_BW_TCS,
+ .bw_mask = BMA180_BW,
+ .scale_reg = BMA180_OFFSET_LSB1,
+ .scale_mask = BMA180_RANGE,
+ .power_reg = BMA180_TCO_Z,
+ .power_mask = BMA180_MODE_CONFIG,
+ .lowpower_val = BMA180_LOW_POWER,
+ .int_enable_reg = BMA180_CTRL_REG3,
+ .int_enable_mask = BMA180_NEW_DATA_INT,
+ .softreset_reg = BMA180_RESET,
+ .chip_config = bma180_chip_config,
+ .chip_disable = bma180_chip_disable,
},
[BMA250] = {
- bma250_channels, ARRAY_SIZE(bma250_channels),
- bma250_scale_table, ARRAY_SIZE(bma250_scale_table),
- bma250_bw_table, ARRAY_SIZE(bma250_bw_table),
- BMA250_INT_RESET_REG, BMA250_INT_RESET_MASK,
- BMA250_POWER_REG, BMA250_SUSPEND_MASK,
- BMA250_BW_REG, BMA250_BW_MASK,
- BMA250_RANGE_REG, BMA250_RANGE_MASK,
- BMA250_POWER_REG, BMA250_LOWPOWER_MASK, 1,
- BMA250_INT_ENABLE_REG, BMA250_DATA_INTEN_MASK,
- BMA250_RESET_REG,
- bma250_chip_config,
- bma250_chip_disable,
+ .chip_id = BMA250_ID_REG_VAL,
+ .channels = bma250_channels,
+ .num_channels = ARRAY_SIZE(bma250_channels),
+ .scale_table = bma25x_scale_table,
+ .num_scales = ARRAY_SIZE(bma25x_scale_table),
+ .bw_table = bma25x_bw_table,
+ .num_bw = ARRAY_SIZE(bma25x_bw_table),
+ .center_temp = 48, /* 0 LSB @ 24 degree C */
+ .int_reset_reg = BMA250_INT_RESET_REG,
+ .int_reset_mask = BMA250_INT_RESET_MASK,
+ .sleep_reg = BMA250_POWER_REG,
+ .sleep_mask = BMA250_SUSPEND_MASK,
+ .bw_reg = BMA250_BW_REG,
+ .bw_mask = BMA250_BW_MASK,
+ .scale_reg = BMA250_RANGE_REG,
+ .scale_mask = BMA250_RANGE_MASK,
+ .power_reg = BMA250_POWER_REG,
+ .power_mask = BMA250_LOWPOWER_MASK,
+ .lowpower_val = 1,
+ .int_enable_reg = BMA250_INT_ENABLE_REG,
+ .int_enable_mask = BMA250_DATA_INTEN_MASK,
+ .int_map_reg = BMA250_INT_MAP_REG,
+ .int_enable_dataready_int1_mask = BMA250_INT1_DATA_MASK,
+ .softreset_reg = BMA250_RESET_REG,
+ .chip_config = bma25x_chip_config,
+ .chip_disable = bma25x_chip_disable,
+ },
+ [BMA254] = {
+ .chip_id = BMA254_ID_REG_VAL,
+ .channels = bma254_channels,
+ .num_channels = ARRAY_SIZE(bma254_channels),
+ .scale_table = bma25x_scale_table,
+ .num_scales = ARRAY_SIZE(bma25x_scale_table),
+ .bw_table = bma25x_bw_table,
+ .num_bw = ARRAY_SIZE(bma25x_bw_table),
+ .center_temp = 46, /* 0 LSB @ 23 degree C */
+ .int_reset_reg = BMA254_INT_RESET_REG,
+ .int_reset_mask = BMA254_INT_RESET_MASK,
+ .sleep_reg = BMA254_POWER_REG,
+ .sleep_mask = BMA254_SUSPEND_MASK,
+ .bw_reg = BMA254_BW_REG,
+ .bw_mask = BMA254_BW_MASK,
+ .scale_reg = BMA254_RANGE_REG,
+ .scale_mask = BMA254_RANGE_MASK,
+ .power_reg = BMA254_POWER_REG,
+ .power_mask = BMA254_LOWPOWER_MASK,
+ .lowpower_val = 1,
+ .int_enable_reg = BMA254_INT_ENABLE_REG,
+ .int_enable_mask = BMA254_DATA_INTEN_MASK,
+ .int_map_reg = BMA254_INT_MAP_REG,
+ .int_enable_dataready_int1_mask = BMA254_INT1_DATA_MASK,
+ .softreset_reg = BMA254_RESET_REG,
+ .chip_config = bma25x_chip_config,
+ .chip_disable = bma25x_chip_disable,
},
};
@@ -712,12 +808,13 @@ static const struct iio_trigger_ops bma180_trigger_ops = {
static int bma180_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ struct device *dev = &client->dev;
struct bma180_data *data;
struct iio_dev *indio_dev;
enum chip_ids chip;
int ret;
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
@@ -725,22 +822,56 @@ static int bma180_probe(struct i2c_client *client,
i2c_set_clientdata(client, indio_dev);
data->client = client;
if (client->dev.of_node)
- chip = (enum chip_ids)of_device_get_match_data(&client->dev);
+ chip = (enum chip_ids)of_device_get_match_data(dev);
else
chip = id->driver_data;
data->part_info = &bma180_part_info[chip];
- ret = iio_read_mount_matrix(&client->dev, "mount-matrix",
+ ret = iio_read_mount_matrix(dev, "mount-matrix",
&data->orientation);
if (ret)
return ret;
+ data->vdd_supply = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(data->vdd_supply)) {
+ if (PTR_ERR(data->vdd_supply) != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get vdd regulator %d\n",
+ (int)PTR_ERR(data->vdd_supply));
+ return PTR_ERR(data->vdd_supply);
+ }
+ data->vddio_supply = devm_regulator_get(dev, "vddio");
+ if (IS_ERR(data->vddio_supply)) {
+ if (PTR_ERR(data->vddio_supply) != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get vddio regulator %d\n",
+ (int)PTR_ERR(data->vddio_supply));
+ return PTR_ERR(data->vddio_supply);
+ }
+ /* Typical voltage 2.4V these are min and max */
+ ret = regulator_set_voltage(data->vdd_supply, 1620000, 3600000);
+ if (ret)
+ return ret;
+ ret = regulator_set_voltage(data->vddio_supply, 1200000, 3600000);
+ if (ret)
+ return ret;
+ ret = regulator_enable(data->vdd_supply);
+ if (ret) {
+ dev_err(dev, "Failed to enable vdd regulator: %d\n", ret);
+ return ret;
+ }
+ ret = regulator_enable(data->vddio_supply);
+ if (ret) {
+ dev_err(dev, "Failed to enable vddio regulator: %d\n", ret);
+ goto err_disable_vdd;
+ }
+ /* Wait to make sure we started up properly (3 ms at least) */
+ usleep_range(3000, 5000);
+
ret = data->part_info->chip_config(data);
if (ret < 0)
goto err_chip_disable;
mutex_init(&data->mutex);
- indio_dev->dev.parent = &client->dev;
+ indio_dev->dev.parent = dev;
indio_dev->channels = data->part_info->channels;
indio_dev->num_channels = data->part_info->num_channels;
indio_dev->name = id->name;
@@ -755,15 +886,15 @@ static int bma180_probe(struct i2c_client *client,
goto err_chip_disable;
}
- ret = devm_request_irq(&client->dev, client->irq,
+ ret = devm_request_irq(dev, client->irq,
iio_trigger_generic_data_rdy_poll, IRQF_TRIGGER_RISING,
"bma180_event", data->trig);
if (ret) {
- dev_err(&client->dev, "unable to request IRQ\n");
+ dev_err(dev, "unable to request IRQ\n");
goto err_trigger_free;
}
- data->trig->dev.parent = &client->dev;
+ data->trig->dev.parent = dev;
data->trig->ops = &bma180_trigger_ops;
iio_trigger_set_drvdata(data->trig, indio_dev);
indio_dev->trig = iio_trigger_get(data->trig);
@@ -776,13 +907,13 @@ static int bma180_probe(struct i2c_client *client,
ret = iio_triggered_buffer_setup(indio_dev, NULL,
bma180_trigger_handler, NULL);
if (ret < 0) {
- dev_err(&client->dev, "unable to setup iio triggered buffer\n");
+ dev_err(dev, "unable to setup iio triggered buffer\n");
goto err_trigger_unregister;
}
ret = iio_device_register(indio_dev);
if (ret < 0) {
- dev_err(&client->dev, "unable to register iio device\n");
+ dev_err(dev, "unable to register iio device\n");
goto err_buffer_cleanup;
}
@@ -797,6 +928,9 @@ err_trigger_free:
iio_trigger_free(data->trig);
err_chip_disable:
data->part_info->chip_disable(data);
+ regulator_disable(data->vddio_supply);
+err_disable_vdd:
+ regulator_disable(data->vdd_supply);
return ret;
}
@@ -816,6 +950,8 @@ static int bma180_remove(struct i2c_client *client)
mutex_lock(&data->mutex);
data->part_info->chip_disable(data);
mutex_unlock(&data->mutex);
+ regulator_disable(data->vddio_supply);
+ regulator_disable(data->vdd_supply);
return 0;
}
@@ -856,6 +992,7 @@ static SIMPLE_DEV_PM_OPS(bma180_pm_ops, bma180_suspend, bma180_resume);
static const struct i2c_device_id bma180_ids[] = {
{ "bma180", BMA180 },
{ "bma250", BMA250 },
+ { "bma254", BMA254 },
{ }
};
@@ -870,6 +1007,10 @@ static const struct of_device_id bma180_of_match[] = {
.compatible = "bosch,bma250",
.data = (void *)BMA250
},
+ {
+ .compatible = "bosch,bma254",
+ .data = (void *)BMA254
+ },
{ }
};
MODULE_DEVICE_TABLE(of, bma180_of_match);
@@ -889,5 +1030,5 @@ module_i2c_driver(bma180_driver);
MODULE_AUTHOR("Kravchenko Oleksandr <x0199363@ti.com>");
MODULE_AUTHOR("Texas Instruments, Inc.");
-MODULE_DESCRIPTION("Bosch BMA180/BMA250 triaxial acceleration sensor");
+MODULE_DESCRIPTION("Bosch BMA180/BMA25x triaxial acceleration sensor");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/accel/bma400.h b/drivers/iio/accel/bma400.h
new file mode 100644
index 000000000000..5ad10db9819f
--- /dev/null
+++ b/drivers/iio/accel/bma400.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Register constants and other forward declarations needed by the bma400
+ * sources.
+ *
+ * Copyright 2019 Dan Robertson <dan@dlrobertson.com>
+ */
+
+#ifndef _BMA400_H_
+#define _BMA400_H_
+
+#include <linux/bits.h>
+#include <linux/regmap.h>
+
+/*
+ * Read-Only Registers
+ */
+
+/* Status and ID registers */
+#define BMA400_CHIP_ID_REG 0x00
+#define BMA400_ERR_REG 0x02
+#define BMA400_STATUS_REG 0x03
+
+/* Acceleration registers */
+#define BMA400_X_AXIS_LSB_REG 0x04
+#define BMA400_X_AXIS_MSB_REG 0x05
+#define BMA400_Y_AXIS_LSB_REG 0x06
+#define BMA400_Y_AXIS_MSB_REG 0x07
+#define BMA400_Z_AXIS_LSB_REG 0x08
+#define BMA400_Z_AXIS_MSB_REG 0x09
+
+/* Sensor time registers */
+#define BMA400_SENSOR_TIME0 0x0a
+#define BMA400_SENSOR_TIME1 0x0b
+#define BMA400_SENSOR_TIME2 0x0c
+
+/* Event and interrupt registers */
+#define BMA400_EVENT_REG 0x0d
+#define BMA400_INT_STAT0_REG 0x0e
+#define BMA400_INT_STAT1_REG 0x0f
+#define BMA400_INT_STAT2_REG 0x10
+
+/* Temperature register */
+#define BMA400_TEMP_DATA_REG 0x11
+
+/* FIFO length and data registers */
+#define BMA400_FIFO_LENGTH0_REG 0x12
+#define BMA400_FIFO_LENGTH1_REG 0x13
+#define BMA400_FIFO_DATA_REG 0x14
+
+/* Step count registers */
+#define BMA400_STEP_CNT0_REG 0x15
+#define BMA400_STEP_CNT1_REG 0x16
+#define BMA400_STEP_CNT3_REG 0x17
+#define BMA400_STEP_STAT_REG 0x18
+
+/*
+ * Read-write configuration registers
+ */
+#define BMA400_ACC_CONFIG0_REG 0x19
+#define BMA400_ACC_CONFIG1_REG 0x1a
+#define BMA400_ACC_CONFIG2_REG 0x1b
+#define BMA400_CMD_REG 0x7e
+
+/* Chip ID of BMA 400 devices found in the chip ID register. */
+#define BMA400_ID_REG_VAL 0x90
+
+#define BMA400_LP_OSR_SHIFT 5
+#define BMA400_NP_OSR_SHIFT 4
+#define BMA400_SCALE_SHIFT 6
+
+#define BMA400_TWO_BITS_MASK GENMASK(1, 0)
+#define BMA400_LP_OSR_MASK GENMASK(6, 5)
+#define BMA400_NP_OSR_MASK GENMASK(5, 4)
+#define BMA400_ACC_ODR_MASK GENMASK(3, 0)
+#define BMA400_ACC_SCALE_MASK GENMASK(7, 6)
+
+#define BMA400_ACC_ODR_MIN_RAW 0x05
+#define BMA400_ACC_ODR_LP_RAW 0x06
+#define BMA400_ACC_ODR_MAX_RAW 0x0b
+
+#define BMA400_ACC_ODR_MAX_HZ 800
+#define BMA400_ACC_ODR_MIN_WHOLE_HZ 25
+#define BMA400_ACC_ODR_MIN_HZ 12
+
+#define BMA400_SCALE_MIN 38357
+#define BMA400_SCALE_MAX 306864
+
+#define BMA400_NUM_REGULATORS 2
+#define BMA400_VDD_REGULATOR 0
+#define BMA400_VDDIO_REGULATOR 1
+
+extern const struct regmap_config bma400_regmap_config;
+
+int bma400_probe(struct device *dev, struct regmap *regmap, const char *name);
+
+int bma400_remove(struct device *dev);
+
+#endif
diff --git a/drivers/iio/accel/bma400_core.c b/drivers/iio/accel/bma400_core.c
new file mode 100644
index 000000000000..cc77f89c048b
--- /dev/null
+++ b/drivers/iio/accel/bma400_core.c
@@ -0,0 +1,853 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Core IIO driver for Bosch BMA400 triaxial acceleration sensor.
+ *
+ * Copyright 2019 Dan Robertson <dan@dlrobertson.com>
+ *
+ * TODO:
+ * - Support for power management
+ * - Support events and interrupts
+ * - Create channel for step count
+ * - Create channel for sensor time
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#include "bma400.h"
+
+/*
+ * The G-range selection may be one of 2g, 4g, 8, or 16g. The scale may
+ * be selected with the acc_range bits of the ACC_CONFIG1 register.
+ * NB: This buffer is populated in the device init.
+ */
+static int bma400_scales[8];
+
+/*
+ * See the ACC_CONFIG1 section of the datasheet.
+ * NB: This buffer is populated in the device init.
+ */
+static int bma400_sample_freqs[14];
+
+static const int bma400_osr_range[] = { 0, 1, 3 };
+
+/* See the ACC_CONFIG0 section of the datasheet */
+enum bma400_power_mode {
+ POWER_MODE_SLEEP = 0x00,
+ POWER_MODE_LOW = 0x01,
+ POWER_MODE_NORMAL = 0x02,
+ POWER_MODE_INVALID = 0x03,
+};
+
+struct bma400_sample_freq {
+ int hz;
+ int uhz;
+};
+
+struct bma400_data {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regulator_bulk_data regulators[BMA400_NUM_REGULATORS];
+ struct mutex mutex; /* data register lock */
+ struct iio_mount_matrix orientation;
+ enum bma400_power_mode power_mode;
+ struct bma400_sample_freq sample_freq;
+ int oversampling_ratio;
+ int scale;
+};
+
+static bool bma400_is_writable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case BMA400_CHIP_ID_REG:
+ case BMA400_ERR_REG:
+ case BMA400_STATUS_REG:
+ case BMA400_X_AXIS_LSB_REG:
+ case BMA400_X_AXIS_MSB_REG:
+ case BMA400_Y_AXIS_LSB_REG:
+ case BMA400_Y_AXIS_MSB_REG:
+ case BMA400_Z_AXIS_LSB_REG:
+ case BMA400_Z_AXIS_MSB_REG:
+ case BMA400_SENSOR_TIME0:
+ case BMA400_SENSOR_TIME1:
+ case BMA400_SENSOR_TIME2:
+ case BMA400_EVENT_REG:
+ case BMA400_INT_STAT0_REG:
+ case BMA400_INT_STAT1_REG:
+ case BMA400_INT_STAT2_REG:
+ case BMA400_TEMP_DATA_REG:
+ case BMA400_FIFO_LENGTH0_REG:
+ case BMA400_FIFO_LENGTH1_REG:
+ case BMA400_FIFO_DATA_REG:
+ case BMA400_STEP_CNT0_REG:
+ case BMA400_STEP_CNT1_REG:
+ case BMA400_STEP_CNT3_REG:
+ case BMA400_STEP_STAT_REG:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static bool bma400_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case BMA400_ERR_REG:
+ case BMA400_STATUS_REG:
+ case BMA400_X_AXIS_LSB_REG:
+ case BMA400_X_AXIS_MSB_REG:
+ case BMA400_Y_AXIS_LSB_REG:
+ case BMA400_Y_AXIS_MSB_REG:
+ case BMA400_Z_AXIS_LSB_REG:
+ case BMA400_Z_AXIS_MSB_REG:
+ case BMA400_SENSOR_TIME0:
+ case BMA400_SENSOR_TIME1:
+ case BMA400_SENSOR_TIME2:
+ case BMA400_EVENT_REG:
+ case BMA400_INT_STAT0_REG:
+ case BMA400_INT_STAT1_REG:
+ case BMA400_INT_STAT2_REG:
+ case BMA400_TEMP_DATA_REG:
+ case BMA400_FIFO_LENGTH0_REG:
+ case BMA400_FIFO_LENGTH1_REG:
+ case BMA400_FIFO_DATA_REG:
+ case BMA400_STEP_CNT0_REG:
+ case BMA400_STEP_CNT1_REG:
+ case BMA400_STEP_CNT3_REG:
+ case BMA400_STEP_STAT_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+const struct regmap_config bma400_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = BMA400_CMD_REG,
+ .cache_type = REGCACHE_RBTREE,
+ .writeable_reg = bma400_is_writable_reg,
+ .volatile_reg = bma400_is_volatile_reg,
+};
+EXPORT_SYMBOL(bma400_regmap_config);
+
+static const struct iio_mount_matrix *
+bma400_accel_get_mount_matrix(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct bma400_data *data = iio_priv(indio_dev);
+
+ return &data->orientation;
+}
+
+static const struct iio_chan_spec_ext_info bma400_ext_info[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, bma400_accel_get_mount_matrix),
+ { }
+};
+
+#define BMA400_ACC_CHANNEL(_axis) { \
+ .type = IIO_ACCEL, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##_axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .ext_info = bma400_ext_info, \
+}
+
+static const struct iio_chan_spec bma400_channels[] = {
+ BMA400_ACC_CHANNEL(X),
+ BMA400_ACC_CHANNEL(Y),
+ BMA400_ACC_CHANNEL(Z),
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ },
+};
+
+static int bma400_get_temp_reg(struct bma400_data *data, int *val, int *val2)
+{
+ unsigned int raw_temp;
+ int host_temp;
+ int ret;
+
+ if (data->power_mode == POWER_MODE_SLEEP)
+ return -EBUSY;
+
+ ret = regmap_read(data->regmap, BMA400_TEMP_DATA_REG, &raw_temp);
+ if (ret)
+ return ret;
+
+ host_temp = sign_extend32(raw_temp, 7);
+ /*
+ * The formula for the TEMP_DATA register in the datasheet
+ * is: x * 0.5 + 23
+ */
+ *val = (host_temp >> 1) + 23;
+ *val2 = (host_temp & 0x1) * 500000;
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int bma400_get_accel_reg(struct bma400_data *data,
+ const struct iio_chan_spec *chan,
+ int *val)
+{
+ __le16 raw_accel;
+ int lsb_reg;
+ int ret;
+
+ if (data->power_mode == POWER_MODE_SLEEP)
+ return -EBUSY;
+
+ switch (chan->channel2) {
+ case IIO_MOD_X:
+ lsb_reg = BMA400_X_AXIS_LSB_REG;
+ break;
+ case IIO_MOD_Y:
+ lsb_reg = BMA400_Y_AXIS_LSB_REG;
+ break;
+ case IIO_MOD_Z:
+ lsb_reg = BMA400_Z_AXIS_LSB_REG;
+ break;
+ default:
+ dev_err(data->dev, "invalid axis channel modifier\n");
+ return -EINVAL;
+ }
+
+ /* bulk read two registers, with the base being the LSB register */
+ ret = regmap_bulk_read(data->regmap, lsb_reg, &raw_accel,
+ sizeof(raw_accel));
+ if (ret)
+ return ret;
+
+ *val = sign_extend32(le16_to_cpu(raw_accel), 11);
+ return IIO_VAL_INT;
+}
+
+static void bma400_output_data_rate_from_raw(int raw, unsigned int *val,
+ unsigned int *val2)
+{
+ *val = BMA400_ACC_ODR_MAX_HZ >> (BMA400_ACC_ODR_MAX_RAW - raw);
+ if (raw > BMA400_ACC_ODR_MIN_RAW)
+ *val2 = 0;
+ else
+ *val2 = 500000;
+}
+
+static int bma400_get_accel_output_data_rate(struct bma400_data *data)
+{
+ unsigned int val;
+ unsigned int odr;
+ int ret;
+
+ switch (data->power_mode) {
+ case POWER_MODE_LOW:
+ /*
+ * Runs at a fixed rate in low-power mode. See section 4.3
+ * in the datasheet.
+ */
+ bma400_output_data_rate_from_raw(BMA400_ACC_ODR_LP_RAW,
+ &data->sample_freq.hz,
+ &data->sample_freq.uhz);
+ return 0;
+ case POWER_MODE_NORMAL:
+ /*
+ * In normal mode the ODR can be found in the ACC_CONFIG1
+ * register.
+ */
+ ret = regmap_read(data->regmap, BMA400_ACC_CONFIG1_REG, &val);
+ if (ret)
+ goto error;
+
+ odr = val & BMA400_ACC_ODR_MASK;
+ if (odr < BMA400_ACC_ODR_MIN_RAW ||
+ odr > BMA400_ACC_ODR_MAX_RAW) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ bma400_output_data_rate_from_raw(odr, &data->sample_freq.hz,
+ &data->sample_freq.uhz);
+ return 0;
+ case POWER_MODE_SLEEP:
+ data->sample_freq.hz = 0;
+ data->sample_freq.uhz = 0;
+ return 0;
+ default:
+ ret = 0;
+ goto error;
+ }
+error:
+ data->sample_freq.hz = -1;
+ data->sample_freq.uhz = -1;
+ return ret;
+}
+
+static int bma400_set_accel_output_data_rate(struct bma400_data *data,
+ int hz, int uhz)
+{
+ unsigned int idx;
+ unsigned int odr;
+ unsigned int val;
+ int ret;
+
+ if (hz >= BMA400_ACC_ODR_MIN_WHOLE_HZ) {
+ if (uhz || hz > BMA400_ACC_ODR_MAX_HZ)
+ return -EINVAL;
+
+ /* Note this works because MIN_WHOLE_HZ is odd */
+ idx = __ffs(hz);
+
+ if (hz >> idx != BMA400_ACC_ODR_MIN_WHOLE_HZ)
+ return -EINVAL;
+
+ idx += BMA400_ACC_ODR_MIN_RAW + 1;
+ } else if (hz == BMA400_ACC_ODR_MIN_HZ && uhz == 500000) {
+ idx = BMA400_ACC_ODR_MIN_RAW;
+ } else {
+ return -EINVAL;
+ }
+
+ ret = regmap_read(data->regmap, BMA400_ACC_CONFIG1_REG, &val);
+ if (ret)
+ return ret;
+
+ /* preserve the range and normal mode osr */
+ odr = (~BMA400_ACC_ODR_MASK & val) | idx;
+
+ ret = regmap_write(data->regmap, BMA400_ACC_CONFIG1_REG, odr);
+ if (ret)
+ return ret;
+
+ bma400_output_data_rate_from_raw(idx, &data->sample_freq.hz,
+ &data->sample_freq.uhz);
+ return 0;
+}
+
+static int bma400_get_accel_oversampling_ratio(struct bma400_data *data)
+{
+ unsigned int val;
+ unsigned int osr;
+ int ret;
+
+ /*
+ * The oversampling ratio is stored in a different register
+ * based on the power-mode. In normal mode the OSR is stored
+ * in ACC_CONFIG1. In low-power mode it is stored in
+ * ACC_CONFIG0.
+ */
+ switch (data->power_mode) {
+ case POWER_MODE_LOW:
+ ret = regmap_read(data->regmap, BMA400_ACC_CONFIG0_REG, &val);
+ if (ret) {
+ data->oversampling_ratio = -1;
+ return ret;
+ }
+
+ osr = (val & BMA400_LP_OSR_MASK) >> BMA400_LP_OSR_SHIFT;
+
+ data->oversampling_ratio = osr;
+ return 0;
+ case POWER_MODE_NORMAL:
+ ret = regmap_read(data->regmap, BMA400_ACC_CONFIG1_REG, &val);
+ if (ret) {
+ data->oversampling_ratio = -1;
+ return ret;
+ }
+
+ osr = (val & BMA400_NP_OSR_MASK) >> BMA400_NP_OSR_SHIFT;
+
+ data->oversampling_ratio = osr;
+ return 0;
+ case POWER_MODE_SLEEP:
+ data->oversampling_ratio = 0;
+ return 0;
+ default:
+ data->oversampling_ratio = -1;
+ return -EINVAL;
+ }
+}
+
+static int bma400_set_accel_oversampling_ratio(struct bma400_data *data,
+ int val)
+{
+ unsigned int acc_config;
+ int ret;
+
+ if (val & ~BMA400_TWO_BITS_MASK)
+ return -EINVAL;
+
+ /*
+ * The oversampling ratio is stored in a different register
+ * based on the power-mode.
+ */
+ switch (data->power_mode) {
+ case POWER_MODE_LOW:
+ ret = regmap_read(data->regmap, BMA400_ACC_CONFIG0_REG,
+ &acc_config);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(data->regmap, BMA400_ACC_CONFIG0_REG,
+ (acc_config & ~BMA400_LP_OSR_MASK) |
+ (val << BMA400_LP_OSR_SHIFT));
+ if (ret) {
+ dev_err(data->dev, "Failed to write out OSR\n");
+ return ret;
+ }
+
+ data->oversampling_ratio = val;
+ return 0;
+ case POWER_MODE_NORMAL:
+ ret = regmap_read(data->regmap, BMA400_ACC_CONFIG1_REG,
+ &acc_config);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(data->regmap, BMA400_ACC_CONFIG1_REG,
+ (acc_config & ~BMA400_NP_OSR_MASK) |
+ (val << BMA400_NP_OSR_SHIFT));
+ if (ret) {
+ dev_err(data->dev, "Failed to write out OSR\n");
+ return ret;
+ }
+
+ data->oversampling_ratio = val;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ return ret;
+}
+
+static int bma400_accel_scale_to_raw(struct bma400_data *data,
+ unsigned int val)
+{
+ int raw;
+
+ if (val == 0)
+ return -EINVAL;
+
+ /* Note this works because BMA400_SCALE_MIN is odd */
+ raw = __ffs(val);
+
+ if (val >> raw != BMA400_SCALE_MIN)
+ return -EINVAL;
+
+ return raw;
+}
+
+static int bma400_get_accel_scale(struct bma400_data *data)
+{
+ unsigned int raw_scale;
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(data->regmap, BMA400_ACC_CONFIG1_REG, &val);
+ if (ret)
+ return ret;
+
+ raw_scale = (val & BMA400_ACC_SCALE_MASK) >> BMA400_SCALE_SHIFT;
+ if (raw_scale > BMA400_TWO_BITS_MASK)
+ return -EINVAL;
+
+ data->scale = BMA400_SCALE_MIN << raw_scale;
+
+ return 0;
+}
+
+static int bma400_set_accel_scale(struct bma400_data *data, unsigned int val)
+{
+ unsigned int acc_config;
+ int raw;
+ int ret;
+
+ ret = regmap_read(data->regmap, BMA400_ACC_CONFIG1_REG, &acc_config);
+ if (ret)
+ return ret;
+
+ raw = bma400_accel_scale_to_raw(data, val);
+ if (raw < 0)
+ return raw;
+
+ ret = regmap_write(data->regmap, BMA400_ACC_CONFIG1_REG,
+ (acc_config & ~BMA400_ACC_SCALE_MASK) |
+ (raw << BMA400_SCALE_SHIFT));
+ if (ret)
+ return ret;
+
+ data->scale = val;
+ return 0;
+}
+
+static int bma400_get_power_mode(struct bma400_data *data)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(data->regmap, BMA400_STATUS_REG, &val);
+ if (ret) {
+ dev_err(data->dev, "Failed to read status register\n");
+ return ret;
+ }
+
+ data->power_mode = (val >> 1) & BMA400_TWO_BITS_MASK;
+ return 0;
+}
+
+static int bma400_set_power_mode(struct bma400_data *data,
+ enum bma400_power_mode mode)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(data->regmap, BMA400_ACC_CONFIG0_REG, &val);
+ if (ret)
+ return ret;
+
+ if (data->power_mode == mode)
+ return 0;
+
+ if (mode == POWER_MODE_INVALID)
+ return -EINVAL;
+
+ /* Preserve the low-power oversample ratio etc */
+ ret = regmap_write(data->regmap, BMA400_ACC_CONFIG0_REG,
+ mode | (val & ~BMA400_TWO_BITS_MASK));
+ if (ret) {
+ dev_err(data->dev, "Failed to write to power-mode\n");
+ return ret;
+ }
+
+ data->power_mode = mode;
+
+ /*
+ * Update our cached osr and odr based on the new
+ * power-mode.
+ */
+ bma400_get_accel_output_data_rate(data);
+ bma400_get_accel_oversampling_ratio(data);
+ return 0;
+}
+
+static void bma400_init_tables(void)
+{
+ int raw;
+ int i;
+
+ for (i = 0; i + 1 < ARRAY_SIZE(bma400_sample_freqs); i += 2) {
+ raw = (i / 2) + 5;
+ bma400_output_data_rate_from_raw(raw, &bma400_sample_freqs[i],
+ &bma400_sample_freqs[i + 1]);
+ }
+
+ for (i = 0; i + 1 < ARRAY_SIZE(bma400_scales); i += 2) {
+ raw = i / 2;
+ bma400_scales[i] = 0;
+ bma400_scales[i + 1] = BMA400_SCALE_MIN << raw;
+ }
+}
+
+static int bma400_init(struct bma400_data *data)
+{
+ unsigned int val;
+ int ret;
+
+ /* Try to read chip_id register. It must return 0x90. */
+ ret = regmap_read(data->regmap, BMA400_CHIP_ID_REG, &val);
+ if (ret) {
+ dev_err(data->dev, "Failed to read chip id register\n");
+ goto out;
+ }
+
+ if (val != BMA400_ID_REG_VAL) {
+ dev_err(data->dev, "Chip ID mismatch\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ data->regulators[BMA400_VDD_REGULATOR].supply = "vdd";
+ data->regulators[BMA400_VDDIO_REGULATOR].supply = "vddio";
+ ret = devm_regulator_bulk_get(data->dev,
+ ARRAY_SIZE(data->regulators),
+ data->regulators);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(data->dev,
+ "Failed to get regulators: %d\n",
+ ret);
+
+ goto out;
+ }
+ ret = regulator_bulk_enable(ARRAY_SIZE(data->regulators),
+ data->regulators);
+ if (ret) {
+ dev_err(data->dev, "Failed to enable regulators: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = bma400_get_power_mode(data);
+ if (ret) {
+ dev_err(data->dev, "Failed to get the initial power-mode\n");
+ goto err_reg_disable;
+ }
+
+ if (data->power_mode != POWER_MODE_NORMAL) {
+ ret = bma400_set_power_mode(data, POWER_MODE_NORMAL);
+ if (ret) {
+ dev_err(data->dev, "Failed to wake up the device\n");
+ goto err_reg_disable;
+ }
+ /*
+ * TODO: The datasheet waits 1500us here in the example, but
+ * lists 2/ODR as the wakeup time.
+ */
+ usleep_range(1500, 2000);
+ }
+
+ bma400_init_tables();
+
+ ret = bma400_get_accel_output_data_rate(data);
+ if (ret)
+ goto err_reg_disable;
+
+ ret = bma400_get_accel_oversampling_ratio(data);
+ if (ret)
+ goto err_reg_disable;
+
+ ret = bma400_get_accel_scale(data);
+ if (ret)
+ goto err_reg_disable;
+
+ /*
+ * Once the interrupt engine is supported we might use the
+ * data_src_reg, but for now ensure this is set to the
+ * variable ODR filter selectable by the sample frequency
+ * channel.
+ */
+ return regmap_write(data->regmap, BMA400_ACC_CONFIG2_REG, 0x00);
+
+err_reg_disable:
+ regulator_bulk_disable(ARRAY_SIZE(data->regulators),
+ data->regulators);
+out:
+ return ret;
+}
+
+static int bma400_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct bma400_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ mutex_lock(&data->mutex);
+ ret = bma400_get_temp_reg(data, val, val2);
+ mutex_unlock(&data->mutex);
+ return ret;
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&data->mutex);
+ ret = bma400_get_accel_reg(data, chan, val);
+ mutex_unlock(&data->mutex);
+ return ret;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ switch (chan->type) {
+ case IIO_ACCEL:
+ if (data->sample_freq.hz < 0)
+ return -EINVAL;
+
+ *val = data->sample_freq.hz;
+ *val2 = data->sample_freq.uhz;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_TEMP:
+ /*
+ * Runs at a fixed sampling frequency. See Section 4.4
+ * of the datasheet.
+ */
+ *val = 6;
+ *val2 = 250000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = data->scale;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ /*
+ * TODO: We could avoid this logic and returning -EINVAL here if
+ * we set both the low-power and normal mode OSR registers when
+ * we configure the device.
+ */
+ if (data->oversampling_ratio < 0)
+ return -EINVAL;
+
+ *val = data->oversampling_ratio;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bma400_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ *vals = bma400_scales;
+ *length = ARRAY_SIZE(bma400_scales);
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *type = IIO_VAL_INT;
+ *vals = bma400_osr_range;
+ *length = ARRAY_SIZE(bma400_osr_range);
+ return IIO_AVAIL_RANGE;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ *vals = bma400_sample_freqs;
+ *length = ARRAY_SIZE(bma400_sample_freqs);
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bma400_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2,
+ long mask)
+{
+ struct bma400_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ /*
+ * The sample frequency is readonly for the temperature
+ * register and a fixed value in low-power mode.
+ */
+ if (chan->type != IIO_ACCEL)
+ return -EINVAL;
+
+ mutex_lock(&data->mutex);
+ ret = bma400_set_accel_output_data_rate(data, val, val2);
+ mutex_unlock(&data->mutex);
+ return ret;
+ case IIO_CHAN_INFO_SCALE:
+ if (val != 0 ||
+ val2 < BMA400_SCALE_MIN || val2 > BMA400_SCALE_MAX)
+ return -EINVAL;
+
+ mutex_lock(&data->mutex);
+ ret = bma400_set_accel_scale(data, val2);
+ mutex_unlock(&data->mutex);
+ return ret;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ mutex_lock(&data->mutex);
+ ret = bma400_set_accel_oversampling_ratio(data, val);
+ mutex_unlock(&data->mutex);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bma400_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_SCALE:
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info bma400_info = {
+ .read_raw = bma400_read_raw,
+ .read_avail = bma400_read_avail,
+ .write_raw = bma400_write_raw,
+ .write_raw_get_fmt = bma400_write_raw_get_fmt,
+};
+
+int bma400_probe(struct device *dev, struct regmap *regmap, const char *name)
+{
+ struct iio_dev *indio_dev;
+ struct bma400_data *data;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->regmap = regmap;
+ data->dev = dev;
+
+ ret = bma400_init(data);
+ if (ret)
+ return ret;
+
+ ret = iio_read_mount_matrix(dev, "mount-matrix", &data->orientation);
+ if (ret)
+ return ret;
+
+ mutex_init(&data->mutex);
+ indio_dev->dev.parent = dev;
+ indio_dev->name = name;
+ indio_dev->info = &bma400_info;
+ indio_dev->channels = bma400_channels;
+ indio_dev->num_channels = ARRAY_SIZE(bma400_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ dev_set_drvdata(dev, indio_dev);
+
+ return iio_device_register(indio_dev);
+}
+EXPORT_SYMBOL(bma400_probe);
+
+int bma400_remove(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct bma400_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = bma400_set_power_mode(data, POWER_MODE_SLEEP);
+ mutex_unlock(&data->mutex);
+
+ regulator_bulk_disable(ARRAY_SIZE(data->regulators),
+ data->regulators);
+
+ iio_device_unregister(indio_dev);
+
+ return ret;
+}
+EXPORT_SYMBOL(bma400_remove);
+
+MODULE_AUTHOR("Dan Robertson <dan@dlrobertson.com>");
+MODULE_DESCRIPTION("Bosch BMA400 triaxial acceleration sensor core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/accel/bma400_i2c.c b/drivers/iio/accel/bma400_i2c.c
new file mode 100644
index 000000000000..9dcb7cc9996e
--- /dev/null
+++ b/drivers/iio/accel/bma400_i2c.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * I2C IIO driver for Bosch BMA400 triaxial acceleration sensor.
+ *
+ * Copyright 2019 Dan Robertson <dan@dlrobertson.com>
+ *
+ * I2C address is either 0x14 or 0x15 depending on SDO
+ */
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "bma400.h"
+
+static int bma400_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_i2c(client, &bma400_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "failed to create regmap\n");
+ return PTR_ERR(regmap);
+ }
+
+ return bma400_probe(&client->dev, regmap, id->name);
+}
+
+static int bma400_i2c_remove(struct i2c_client *client)
+{
+ return bma400_remove(&client->dev);
+}
+
+static const struct i2c_device_id bma400_i2c_ids[] = {
+ { "bma400", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, bma400_i2c_ids);
+
+static const struct of_device_id bma400_of_i2c_match[] = {
+ { .compatible = "bosch,bma400" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bma400_of_i2c_match);
+
+static struct i2c_driver bma400_i2c_driver = {
+ .driver = {
+ .name = "bma400",
+ .of_match_table = bma400_of_i2c_match,
+ },
+ .probe = bma400_i2c_probe,
+ .remove = bma400_i2c_remove,
+ .id_table = bma400_i2c_ids,
+};
+
+module_i2c_driver(bma400_i2c_driver);
+
+MODULE_AUTHOR("Dan Robertson <dan@dlrobertson.com>");
+MODULE_DESCRIPTION("Bosch BMA400 triaxial acceleration sensor (I2C)");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/accel/cros_ec_accel_legacy.c b/drivers/iio/accel/cros_ec_accel_legacy.c
index 65f85faf6f31..68e847c6255e 100644
--- a/drivers/iio/accel/cros_ec_accel_legacy.c
+++ b/drivers/iio/accel/cros_ec_accel_legacy.c
@@ -18,7 +18,6 @@
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/kernel.h>
-#include <linux/mfd/cros_ec.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/platform_data/cros_ec_commands.h>
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index fee535d6e45b..c9924a65c32a 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -130,6 +130,7 @@ struct kxcjk1013_data {
struct i2c_client *client;
struct iio_trigger *dready_trig;
struct iio_trigger *motion_trig;
+ struct iio_mount_matrix orientation;
struct mutex mutex;
s16 buffer[8];
u8 odr_bits;
@@ -983,6 +984,20 @@ static const struct iio_event_spec kxcjk1013_event = {
BIT(IIO_EV_INFO_PERIOD)
};
+static const struct iio_mount_matrix *
+kxcjk1013_get_mount_matrix(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct kxcjk1013_data *data = iio_priv(indio_dev);
+
+ return &data->orientation;
+}
+
+static const struct iio_chan_spec_ext_info kxcjk1013_ext_info[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_TYPE, kxcjk1013_get_mount_matrix),
+ { }
+};
+
#define KXCJK1013_CHANNEL(_axis) { \
.type = IIO_ACCEL, \
.modified = 1, \
@@ -999,6 +1014,7 @@ static const struct iio_event_spec kxcjk1013_event = {
.endianness = IIO_LE, \
}, \
.event_spec = &kxcjk1013_event, \
+ .ext_info = kxcjk1013_ext_info, \
.num_event_specs = 1 \
}
@@ -1267,11 +1283,18 @@ static int kxcjk1013_probe(struct i2c_client *client,
data->client = client;
pdata = dev_get_platdata(&client->dev);
- if (pdata)
+ if (pdata) {
data->active_high_intr = pdata->active_high_intr;
- else
+ data->orientation = pdata->orientation;
+ } else {
data->active_high_intr = true; /* default polarity */
+ ret = iio_read_mount_matrix(&client->dev, "mount-matrix",
+ &data->orientation);
+ if (ret)
+ return ret;
+ }
+
if (id) {
data->chipset = (enum kx_chipset)(id->driver_data);
name = id->name;
diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h
index af09943f38c9..5b13e293cade 100644
--- a/drivers/iio/accel/st_accel.h
+++ b/drivers/iio/accel/st_accel.h
@@ -64,7 +64,7 @@ enum st_accel_type {
* struct st_sensors_platform_data - default accel platform data
* @drdy_int_pin: default accel DRDY is available on INT1 pin.
*/
-static const struct st_sensors_platform_data default_accel_pdata = {
+static __maybe_unused const struct st_sensors_platform_data default_accel_pdata = {
.drdy_int_pin = 1,
};
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index 50fa0fc32baa..633955d764cc 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -18,7 +18,6 @@
#include <linux/iio/common/st_sensors_i2c.h>
#include "st_accel.h"
-#ifdef CONFIG_OF
static const struct of_device_id st_accel_of_match[] = {
{
/* An older compatible */
@@ -108,9 +107,6 @@ static const struct of_device_id st_accel_of_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, st_accel_of_match);
-#else
-#define st_accel_of_match NULL
-#endif
#ifdef CONFIG_ACPI
static const struct acpi_device_id st_accel_acpi_match[] = {
@@ -119,8 +115,6 @@ static const struct acpi_device_id st_accel_acpi_match[] = {
{ },
};
MODULE_DEVICE_TABLE(acpi, st_accel_acpi_match);
-#else
-#define st_accel_acpi_match NULL
#endif
static const struct i2c_device_id st_accel_id_table[] = {
@@ -195,7 +189,7 @@ static int st_accel_i2c_remove(struct i2c_client *client)
static struct i2c_driver st_accel_driver = {
.driver = {
.name = "st-accel-i2c",
- .of_match_table = of_match_ptr(st_accel_of_match),
+ .of_match_table = st_accel_of_match,
.acpi_match_table = ACPI_PTR(st_accel_acpi_match),
},
.probe_new = st_accel_i2c_probe,
diff --git a/drivers/iio/accel/st_accel_spi.c b/drivers/iio/accel/st_accel_spi.c
index 8af7027d5598..568ff1bae0ee 100644
--- a/drivers/iio/accel/st_accel_spi.c
+++ b/drivers/iio/accel/st_accel_spi.c
@@ -17,7 +17,6 @@
#include <linux/iio/common/st_sensors_spi.h>
#include "st_accel.h"
-#ifdef CONFIG_OF
/*
* For new single-chip sensors use <device_name> as compatible string.
* For old single-chip devices keep <device_name>-accel to maintain
@@ -96,9 +95,6 @@ static const struct of_device_id st_accel_of_match[] = {
{}
};
MODULE_DEVICE_TABLE(of, st_accel_of_match);
-#else
-#define st_accel_of_match NULL
-#endif
static int st_accel_spi_probe(struct spi_device *spi)
{
@@ -107,8 +103,7 @@ static int st_accel_spi_probe(struct spi_device *spi)
struct iio_dev *indio_dev;
int err;
- st_sensors_of_name_probe(&spi->dev, st_accel_of_match,
- spi->modalias, sizeof(spi->modalias));
+ st_sensors_dev_name_probe(&spi->dev, spi->modalias, sizeof(spi->modalias));
settings = st_accel_get_settings(spi->modalias);
if (!settings) {
@@ -166,7 +161,7 @@ MODULE_DEVICE_TABLE(spi, st_accel_id_table);
static struct spi_driver st_accel_driver = {
.driver = {
.name = "st-accel-spi",
- .of_match_table = of_match_ptr(st_accel_of_match),
+ .of_match_table = st_accel_of_match,
},
.probe = st_accel_spi_probe,
.remove = st_accel_spi_remove,
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 5d8540b7b427..82e33082958c 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -21,6 +21,13 @@ config AD_SIGMA_DELTA
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
+config AD7091R5
+ tristate "Analog Devices AD7091R5 ADC Driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say yes here to build support for Analog Devices AD7091R-5 ADC.
+
config AD7124
tristate "Analog Devices AD7124 and similar sigma-delta ADCs driver"
depends on SPI_MASTER
@@ -523,6 +530,16 @@ config LTC2485
To compile this driver as a module, choose M here: the module will be
called ltc2485.
+config LTC2496
+ tristate "Linear Technology LTC2496 ADC driver"
+ depends on SPI
+ help
+ Say yes here to build support for Linear Technology LTC2496
+ 16-Bit 8-/16-Channel Delta Sigma ADC.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ltc2496.
+
config LTC2497
tristate "Linear Technology LTC2497 ADC driver"
depends on I2C
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index a1f1fbec0f87..919228900df9 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -6,6 +6,7 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o
obj-$(CONFIG_AD_SIGMA_DELTA) += ad_sigma_delta.o
+obj-$(CONFIG_AD7091R5) += ad7091r5.o ad7091r-base.o
obj-$(CONFIG_AD7124) += ad7124.o
obj-$(CONFIG_AD7266) += ad7266.o
obj-$(CONFIG_AD7291) += ad7291.o
@@ -50,7 +51,8 @@ obj-$(CONFIG_LPC18XX_ADC) += lpc18xx_adc.o
obj-$(CONFIG_LPC32XX_ADC) += lpc32xx_adc.o
obj-$(CONFIG_LTC2471) += ltc2471.o
obj-$(CONFIG_LTC2485) += ltc2485.o
-obj-$(CONFIG_LTC2497) += ltc2497.o
+obj-$(CONFIG_LTC2496) += ltc2496.o ltc2497-core.o
+obj-$(CONFIG_LTC2497) += ltc2497.o ltc2497-core.o
obj-$(CONFIG_MAX1027) += max1027.o
obj-$(CONFIG_MAX11100) += max11100.o
obj-$(CONFIG_MAX1118) += max1118.o
diff --git a/drivers/iio/adc/ad7091r-base.c b/drivers/iio/adc/ad7091r-base.c
new file mode 100644
index 000000000000..33c40357bd5e
--- /dev/null
+++ b/drivers/iio/adc/ad7091r-base.c
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AD7091RX Analog to Digital converter driver
+ *
+ * Copyright 2014-2019 Analog Devices Inc.
+ */
+
+#include <linux/bitops.h>
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#include "ad7091r-base.h"
+
+#define AD7091R_REG_RESULT 0
+#define AD7091R_REG_CHANNEL 1
+#define AD7091R_REG_CONF 2
+#define AD7091R_REG_ALERT 3
+#define AD7091R_REG_CH_LOW_LIMIT(ch) ((ch) * 3 + 4)
+#define AD7091R_REG_CH_HIGH_LIMIT(ch) ((ch) * 3 + 5)
+#define AD7091R_REG_CH_HYSTERESIS(ch) ((ch) * 3 + 6)
+
+/* AD7091R_REG_RESULT */
+#define AD7091R_REG_RESULT_CH_ID(x) (((x) >> 13) & 0x3)
+#define AD7091R_REG_RESULT_CONV_RESULT(x) ((x) & 0xfff)
+
+/* AD7091R_REG_CONF */
+#define AD7091R_REG_CONF_AUTO BIT(8)
+#define AD7091R_REG_CONF_CMD BIT(10)
+
+#define AD7091R_REG_CONF_MODE_MASK \
+ (AD7091R_REG_CONF_AUTO | AD7091R_REG_CONF_CMD)
+
+enum ad7091r_mode {
+ AD7091R_MODE_SAMPLE,
+ AD7091R_MODE_COMMAND,
+ AD7091R_MODE_AUTOCYCLE,
+};
+
+struct ad7091r_state {
+ struct device *dev;
+ struct regmap *map;
+ struct regulator *vref;
+ const struct ad7091r_chip_info *chip_info;
+ enum ad7091r_mode mode;
+ struct mutex lock; /*lock to prevent concurent reads */
+};
+
+static int ad7091r_set_mode(struct ad7091r_state *st, enum ad7091r_mode mode)
+{
+ int ret, conf;
+
+ switch (mode) {
+ case AD7091R_MODE_SAMPLE:
+ conf = 0;
+ break;
+ case AD7091R_MODE_COMMAND:
+ conf = AD7091R_REG_CONF_CMD;
+ break;
+ case AD7091R_MODE_AUTOCYCLE:
+ conf = AD7091R_REG_CONF_AUTO;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(st->map, AD7091R_REG_CONF,
+ AD7091R_REG_CONF_MODE_MASK, conf);
+ if (ret)
+ return ret;
+
+ st->mode = mode;
+
+ return 0;
+}
+
+static int ad7091r_set_channel(struct ad7091r_state *st, unsigned int channel)
+{
+ unsigned int dummy;
+ int ret;
+
+ /* AD7091R_REG_CHANNEL specified which channels to be converted */
+ ret = regmap_write(st->map, AD7091R_REG_CHANNEL,
+ BIT(channel) | (BIT(channel) << 8));
+ if (ret)
+ return ret;
+
+ /*
+ * There is a latency of one conversion before the channel conversion
+ * sequence is updated
+ */
+ return regmap_read(st->map, AD7091R_REG_RESULT, &dummy);
+}
+
+static int ad7091r_read_one(struct iio_dev *iio_dev,
+ unsigned int channel, unsigned int *read_val)
+{
+ struct ad7091r_state *st = iio_priv(iio_dev);
+ unsigned int val;
+ int ret;
+
+ ret = ad7091r_set_channel(st, channel);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(st->map, AD7091R_REG_RESULT, &val);
+ if (ret)
+ return ret;
+
+ if (AD7091R_REG_RESULT_CH_ID(val) != channel)
+ return -EIO;
+
+ *read_val = AD7091R_REG_RESULT_CONV_RESULT(val);
+
+ return 0;
+}
+
+static int ad7091r_read_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long m)
+{
+ struct ad7091r_state *st = iio_priv(iio_dev);
+ unsigned int read_val;
+ int ret;
+
+ mutex_lock(&st->lock);
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ if (st->mode != AD7091R_MODE_COMMAND) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ ret = ad7091r_read_one(iio_dev, chan->channel, &read_val);
+ if (ret)
+ goto unlock;
+
+ *val = read_val;
+ ret = IIO_VAL_INT;
+ break;
+
+ case IIO_CHAN_INFO_SCALE:
+ if (st->vref) {
+ ret = regulator_get_voltage(st->vref);
+ if (ret < 0)
+ goto unlock;
+
+ *val = ret / 1000;
+ } else {
+ *val = st->chip_info->vref_mV;
+ }
+
+ *val2 = chan->scan_type.realbits;
+ ret = IIO_VAL_FRACTIONAL_LOG2;
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+unlock:
+ mutex_unlock(&st->lock);
+ return ret;
+}
+
+static const struct iio_info ad7091r_info = {
+ .read_raw = ad7091r_read_raw,
+};
+
+static irqreturn_t ad7091r_event_handler(int irq, void *private)
+{
+ struct ad7091r_state *st = (struct ad7091r_state *) private;
+ struct iio_dev *iio_dev = dev_get_drvdata(st->dev);
+ unsigned int i, read_val;
+ int ret;
+ s64 timestamp = iio_get_time_ns(iio_dev);
+
+ ret = regmap_read(st->map, AD7091R_REG_ALERT, &read_val);
+ if (ret)
+ return IRQ_HANDLED;
+
+ for (i = 0; i < st->chip_info->num_channels; i++) {
+ if (read_val & BIT(i * 2))
+ iio_push_event(iio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, i,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING), timestamp);
+ if (read_val & BIT(i * 2 + 1))
+ iio_push_event(iio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, i,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING), timestamp);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void ad7091r_remove(void *data)
+{
+ struct ad7091r_state *st = data;
+
+ regulator_disable(st->vref);
+}
+
+int ad7091r_probe(struct device *dev, const char *name,
+ const struct ad7091r_chip_info *chip_info,
+ struct regmap *map, int irq)
+{
+ struct iio_dev *iio_dev;
+ struct ad7091r_state *st;
+ int ret;
+
+ iio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+ if (!iio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(iio_dev);
+ st->dev = dev;
+ st->chip_info = chip_info;
+ st->map = map;
+
+ iio_dev->dev.parent = dev;
+ iio_dev->name = name;
+ iio_dev->info = &ad7091r_info;
+ iio_dev->modes = INDIO_DIRECT_MODE;
+
+ iio_dev->num_channels = chip_info->num_channels;
+ iio_dev->channels = chip_info->channels;
+
+ if (irq) {
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ ad7091r_event_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT, name, st);
+ if (ret)
+ return ret;
+ }
+
+ st->vref = devm_regulator_get_optional(dev, "vref");
+ if (IS_ERR(st->vref)) {
+ if (PTR_ERR(st->vref) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ st->vref = NULL;
+ } else {
+ ret = regulator_enable(st->vref);
+ if (ret)
+ return ret;
+ ret = devm_add_action_or_reset(dev, ad7091r_remove, st);
+ if (ret)
+ return ret;
+ }
+
+ /* Use command mode by default to convert only desired channels*/
+ ret = ad7091r_set_mode(st, AD7091R_MODE_COMMAND);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(dev, iio_dev);
+}
+EXPORT_SYMBOL_GPL(ad7091r_probe);
+
+static bool ad7091r_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case AD7091R_REG_RESULT:
+ case AD7091R_REG_ALERT:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static bool ad7091r_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case AD7091R_REG_RESULT:
+ case AD7091R_REG_ALERT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+const struct regmap_config ad7091r_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .writeable_reg = ad7091r_writeable_reg,
+ .volatile_reg = ad7091r_volatile_reg,
+};
+EXPORT_SYMBOL_GPL(ad7091r_regmap_config);
+
+MODULE_AUTHOR("Beniamin Bia <beniamin.bia@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD7091Rx multi-channel converters");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ad7091r-base.h b/drivers/iio/adc/ad7091r-base.h
new file mode 100644
index 000000000000..509748aef9b1
--- /dev/null
+++ b/drivers/iio/adc/ad7091r-base.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * AD7091RX Analog to Digital converter driver
+ *
+ * Copyright 2014-2019 Analog Devices Inc.
+ */
+
+#ifndef __DRIVERS_IIO_ADC_AD7091R_BASE_H__
+#define __DRIVERS_IIO_ADC_AD7091R_BASE_H__
+
+struct device;
+struct ad7091r_state;
+
+struct ad7091r_chip_info {
+ unsigned int num_channels;
+ const struct iio_chan_spec *channels;
+ unsigned int vref_mV;
+};
+
+extern const struct regmap_config ad7091r_regmap_config;
+
+int ad7091r_probe(struct device *dev, const char *name,
+ const struct ad7091r_chip_info *chip_info,
+ struct regmap *map, int irq);
+
+#endif /* __DRIVERS_IIO_ADC_AD7091R_BASE_H__ */
diff --git a/drivers/iio/adc/ad7091r5.c b/drivers/iio/adc/ad7091r5.c
new file mode 100644
index 000000000000..9665679c3ea6
--- /dev/null
+++ b/drivers/iio/adc/ad7091r5.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AD7091R5 Analog to Digital converter driver
+ *
+ * Copyright 2014-2019 Analog Devices Inc.
+ */
+
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "ad7091r-base.h"
+
+static const struct iio_event_spec ad7091r5_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_HYSTERESIS),
+ },
+};
+
+#define AD7091R_CHANNEL(idx, bits, ev, num_ev) { \
+ .type = IIO_VOLTAGE, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .indexed = 1, \
+ .channel = idx, \
+ .event_spec = ev, \
+ .num_event_specs = num_ev, \
+ .scan_type.storagebits = 16, \
+ .scan_type.realbits = bits, \
+}
+static const struct iio_chan_spec ad7091r5_channels_irq[] = {
+ AD7091R_CHANNEL(0, 12, ad7091r5_events, ARRAY_SIZE(ad7091r5_events)),
+ AD7091R_CHANNEL(1, 12, ad7091r5_events, ARRAY_SIZE(ad7091r5_events)),
+ AD7091R_CHANNEL(2, 12, ad7091r5_events, ARRAY_SIZE(ad7091r5_events)),
+ AD7091R_CHANNEL(3, 12, ad7091r5_events, ARRAY_SIZE(ad7091r5_events)),
+};
+
+static const struct iio_chan_spec ad7091r5_channels_noirq[] = {
+ AD7091R_CHANNEL(0, 12, NULL, 0),
+ AD7091R_CHANNEL(1, 12, NULL, 0),
+ AD7091R_CHANNEL(2, 12, NULL, 0),
+ AD7091R_CHANNEL(3, 12, NULL, 0),
+};
+
+static const struct ad7091r_chip_info ad7091r5_chip_info_irq = {
+ .channels = ad7091r5_channels_irq,
+ .num_channels = ARRAY_SIZE(ad7091r5_channels_irq),
+ .vref_mV = 2500,
+};
+
+static const struct ad7091r_chip_info ad7091r5_chip_info_noirq = {
+ .channels = ad7091r5_channels_noirq,
+ .num_channels = ARRAY_SIZE(ad7091r5_channels_noirq),
+ .vref_mV = 2500,
+};
+
+static int ad7091r5_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ const struct ad7091r_chip_info *chip_info;
+ struct regmap *map = devm_regmap_init_i2c(i2c, &ad7091r_regmap_config);
+
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ if (i2c->irq)
+ chip_info = &ad7091r5_chip_info_irq;
+ else
+ chip_info = &ad7091r5_chip_info_noirq;
+
+ return ad7091r_probe(&i2c->dev, id->name, chip_info, map, i2c->irq);
+}
+
+static const struct of_device_id ad7091r5_dt_ids[] = {
+ { .compatible = "adi,ad7091r5" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ad7091r5_dt_ids);
+
+static const struct i2c_device_id ad7091r5_i2c_ids[] = {
+ {"ad7091r5", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ad7091r5_i2c_ids);
+
+static struct i2c_driver ad7091r5_driver = {
+ .driver = {
+ .name = "ad7091r5",
+ .of_match_table = ad7091r5_dt_ids,
+ },
+ .probe = ad7091r5_i2c_probe,
+ .id_table = ad7091r5_i2c_ids,
+};
+module_i2c_driver(ad7091r5_driver);
+
+MODULE_AUTHOR("Beniamin Bia <beniamin.bia@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD7091R5 multi-channel ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index 3f03abf100b5..d9915dc71d1e 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -9,6 +9,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
+#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
@@ -224,6 +225,7 @@ static const struct ad_sigma_delta_info ad7124_sigma_delta_info = {
.addr_shift = 0,
.read_mask = BIT(6),
.data_reg = AD7124_DATA,
+ .irq_flags = IRQF_TRIGGER_FALLING,
};
static int ad7124_set_channel_odr(struct ad7124_state *st,
@@ -494,13 +496,11 @@ static int ad7124_of_parse_channel_config(struct iio_dev *indio_dev,
st->channel_config[channel].buf_negative =
of_property_read_bool(child, "adi,buffered-negative");
- *chan = ad7124_channel_template;
- chan->address = channel;
- chan->scan_index = channel;
- chan->channel = ain[0];
- chan->channel2 = ain[1];
-
- chan++;
+ chan[channel] = ad7124_channel_template;
+ chan[channel].address = channel;
+ chan[channel].scan_index = channel;
+ chan[channel].channel = ain[0];
+ chan[channel].channel2 = ain[1];
}
return 0;
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index c31b8eabb894..c8524f098883 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -11,7 +11,7 @@
#include <linux/spi/spi.h>
#include <linux/regulator/consumer.h>
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -34,7 +34,7 @@ struct ad7266_state {
enum ad7266_range range;
enum ad7266_mode mode;
bool fixed_addr;
- struct gpio gpios[3];
+ struct gpio_desc *gpios[3];
/*
* DMA (thus cache coherency maintenance) requires the
@@ -117,7 +117,7 @@ static void ad7266_select_input(struct ad7266_state *st, unsigned int nr)
}
for (i = 0; i < 3; ++i)
- gpio_set_value(st->gpios[i].gpio, (bool)(nr & BIT(i)));
+ gpiod_set_value(st->gpios[i], (bool)(nr & BIT(i)));
}
static int ad7266_update_scan_mode(struct iio_dev *indio_dev,
@@ -376,7 +376,7 @@ static void ad7266_init_channels(struct iio_dev *indio_dev)
}
static const char * const ad7266_gpio_labels[] = {
- "AD0", "AD1", "AD2",
+ "ad0", "ad1", "ad2",
};
static int ad7266_probe(struct spi_device *spi)
@@ -419,14 +419,14 @@ static int ad7266_probe(struct spi_device *spi)
if (!st->fixed_addr) {
for (i = 0; i < ARRAY_SIZE(st->gpios); ++i) {
- st->gpios[i].gpio = pdata->addr_gpios[i];
- st->gpios[i].flags = GPIOF_OUT_INIT_LOW;
- st->gpios[i].label = ad7266_gpio_labels[i];
+ st->gpios[i] = devm_gpiod_get(&spi->dev,
+ ad7266_gpio_labels[i],
+ GPIOD_OUT_LOW);
+ if (IS_ERR(st->gpios[i])) {
+ ret = PTR_ERR(st->gpios[i]);
+ goto error_disable_reg;
+ }
}
- ret = gpio_request_array(st->gpios,
- ARRAY_SIZE(st->gpios));
- if (ret)
- goto error_disable_reg;
}
} else {
st->fixed_addr = true;
@@ -465,7 +465,7 @@ static int ad7266_probe(struct spi_device *spi)
ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
&ad7266_trigger_handler, &iio_triggered_buffer_setup_ops);
if (ret)
- goto error_free_gpios;
+ goto error_disable_reg;
ret = iio_device_register(indio_dev);
if (ret)
@@ -475,9 +475,6 @@ static int ad7266_probe(struct spi_device *spi)
error_buffer_cleanup:
iio_triggered_buffer_cleanup(indio_dev);
-error_free_gpios:
- if (!st->fixed_addr)
- gpio_free_array(st->gpios, ARRAY_SIZE(st->gpios));
error_disable_reg:
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
@@ -492,8 +489,6 @@ static int ad7266_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
- if (!st->fixed_addr)
- gpio_free_array(st->gpios, ARRAY_SIZE(st->gpios));
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
diff --git a/drivers/iio/adc/ad7780.c b/drivers/iio/adc/ad7780.c
index 217a5a5c3c6d..291c1a898129 100644
--- a/drivers/iio/adc/ad7780.c
+++ b/drivers/iio/adc/ad7780.c
@@ -203,6 +203,7 @@ static const struct ad_sigma_delta_info ad7780_sigma_delta_info = {
.set_mode = ad7780_set_mode,
.postprocess_sample = ad7780_postprocess_sample,
.has_registers = false,
+ .irq_flags = IRQF_TRIGGER_LOW,
};
#define AD7780_CHANNEL(bits, wordsize) \
diff --git a/drivers/iio/adc/ad7791.c b/drivers/iio/adc/ad7791.c
index 54025ea10239..abb239392631 100644
--- a/drivers/iio/adc/ad7791.c
+++ b/drivers/iio/adc/ad7791.c
@@ -205,6 +205,7 @@ static const struct ad_sigma_delta_info ad7791_sigma_delta_info = {
.has_registers = true,
.addr_shift = 4,
.read_mask = BIT(3),
+ .irq_flags = IRQF_TRIGGER_LOW,
};
static int ad7791_read_raw(struct iio_dev *indio_dev,
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index bbc41ecf0d2f..b747db97f78a 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -206,6 +206,7 @@ static const struct ad_sigma_delta_info ad7793_sigma_delta_info = {
.has_registers = true,
.addr_shift = 3,
.read_mask = BIT(6),
+ .irq_flags = IRQF_TRIGGER_LOW,
};
static const struct ad_sd_calib_data ad7793_calib_arr[6] = {
diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c
index 6223043e432b..c6a3428e950a 100644
--- a/drivers/iio/adc/ad7887.c
+++ b/drivers/iio/adc/ad7887.c
@@ -43,11 +43,17 @@ enum ad7887_channels {
/**
* struct ad7887_chip_info - chip specifc information
* @int_vref_mv: the internal reference voltage
- * @channel: channel specification
+ * @channels: channels specification
+ * @num_channels: number of channels
+ * @dual_channels: channels specification in dual mode
+ * @num_dual_channels: number of channels in dual mode
*/
struct ad7887_chip_info {
u16 int_vref_mv;
- struct iio_chan_spec channel[3];
+ const struct iio_chan_spec *channels;
+ unsigned int num_channels;
+ const struct iio_chan_spec *dual_channels;
+ unsigned int num_dual_channels;
};
struct ad7887_state {
@@ -183,45 +189,43 @@ static int ad7887_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
+#define AD7887_CHANNEL(x) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (x), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .address = (x), \
+ .scan_index = (x), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ .shift = 0, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+static const struct iio_chan_spec ad7887_channels[] = {
+ AD7887_CHANNEL(0),
+ IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct iio_chan_spec ad7887_dual_channels[] = {
+ AD7887_CHANNEL(0),
+ AD7887_CHANNEL(1),
+ IIO_CHAN_SOFT_TIMESTAMP(2),
+};
static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
/*
* More devices added in future
*/
[ID_AD7887] = {
- .channel[0] = {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 1,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
- .address = 1,
- .scan_index = 1,
- .scan_type = {
- .sign = 'u',
- .realbits = 12,
- .storagebits = 16,
- .shift = 0,
- .endianness = IIO_BE,
- },
- },
- .channel[1] = {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
- .address = 0,
- .scan_index = 0,
- .scan_type = {
- .sign = 'u',
- .realbits = 12,
- .storagebits = 16,
- .shift = 0,
- .endianness = IIO_BE,
- },
- },
- .channel[2] = IIO_CHAN_SOFT_TIMESTAMP(2),
+ .channels = ad7887_channels,
+ .num_channels = ARRAY_SIZE(ad7887_channels),
+ .dual_channels = ad7887_dual_channels,
+ .num_dual_channels = ARRAY_SIZE(ad7887_dual_channels),
.int_vref_mv = 2500,
},
};
@@ -306,11 +310,11 @@ static int ad7887_probe(struct spi_device *spi)
spi_message_init(&st->msg[AD7887_CH1]);
spi_message_add_tail(&st->xfer[3], &st->msg[AD7887_CH1]);
- indio_dev->channels = st->chip_info->channel;
- indio_dev->num_channels = 3;
+ indio_dev->channels = st->chip_info->dual_channels;
+ indio_dev->num_channels = st->chip_info->num_dual_channels;
} else {
- indio_dev->channels = &st->chip_info->channel[1];
- indio_dev->num_channels = 2;
+ indio_dev->channels = st->chip_info->channels;
+ indio_dev->num_channels = st->chip_info->num_channels;
}
ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
diff --git a/drivers/iio/adc/ad7923.c b/drivers/iio/adc/ad7923.c
index 3212eb4c0f25..1d124c87c6ac 100644
--- a/drivers/iio/adc/ad7923.c
+++ b/drivers/iio/adc/ad7923.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * AD7904/AD7914/AD7923/AD7924 SPI ADC driver
+ * AD7904/AD7914/AD7923/AD7924/AD7908/AD7918/AD7928 SPI ADC driver
*
* Copyright 2011 Analog Devices Inc (from AD7923 Driver)
* Copyright 2012 CS Systemes d'Information
@@ -29,15 +29,10 @@
#define AD7923_PM_MODE_AS (1) /* auto shutdown */
#define AD7923_PM_MODE_FS (2) /* full shutdown */
#define AD7923_PM_MODE_OPS (3) /* normal operation */
-#define AD7923_CHANNEL_0 (0) /* analog input 0 */
-#define AD7923_CHANNEL_1 (1) /* analog input 1 */
-#define AD7923_CHANNEL_2 (2) /* analog input 2 */
-#define AD7923_CHANNEL_3 (3) /* analog input 3 */
#define AD7923_SEQUENCE_OFF (0) /* no sequence fonction */
#define AD7923_SEQUENCE_PROTECT (2) /* no interrupt write cycle */
#define AD7923_SEQUENCE_ON (3) /* continuous sequence */
-#define AD7923_MAX_CHAN 4
#define AD7923_PM_MODE_WRITE(mode) ((mode) << 4) /* write mode */
#define AD7923_CHANNEL_WRITE(channel) ((channel) << 6) /* write channel */
@@ -78,6 +73,9 @@ enum ad7923_id {
AD7904,
AD7914,
AD7924,
+ AD7908,
+ AD7918,
+ AD7928
};
#define AD7923_V_CHAN(index, bits) \
@@ -106,9 +104,25 @@ const struct iio_chan_spec name ## _channels[] = { \
IIO_CHAN_SOFT_TIMESTAMP(4), \
}
+#define DECLARE_AD7908_CHANNELS(name, bits) \
+const struct iio_chan_spec name ## _channels[] = { \
+ AD7923_V_CHAN(0, bits), \
+ AD7923_V_CHAN(1, bits), \
+ AD7923_V_CHAN(2, bits), \
+ AD7923_V_CHAN(3, bits), \
+ AD7923_V_CHAN(4, bits), \
+ AD7923_V_CHAN(5, bits), \
+ AD7923_V_CHAN(6, bits), \
+ AD7923_V_CHAN(7, bits), \
+ IIO_CHAN_SOFT_TIMESTAMP(8), \
+}
+
static DECLARE_AD7923_CHANNELS(ad7904, 8);
static DECLARE_AD7923_CHANNELS(ad7914, 10);
static DECLARE_AD7923_CHANNELS(ad7924, 12);
+static DECLARE_AD7908_CHANNELS(ad7908, 8);
+static DECLARE_AD7908_CHANNELS(ad7918, 10);
+static DECLARE_AD7908_CHANNELS(ad7928, 12);
static const struct ad7923_chip_info ad7923_chip_info[] = {
[AD7904] = {
@@ -123,6 +137,18 @@ static const struct ad7923_chip_info ad7923_chip_info[] = {
.channels = ad7924_channels,
.num_channels = ARRAY_SIZE(ad7924_channels),
},
+ [AD7908] = {
+ .channels = ad7908_channels,
+ .num_channels = ARRAY_SIZE(ad7908_channels),
+ },
+ [AD7918] = {
+ .channels = ad7918_channels,
+ .num_channels = ARRAY_SIZE(ad7918_channels),
+ },
+ [AD7928] = {
+ .channels = ad7928_channels,
+ .num_channels = ARRAY_SIZE(ad7928_channels),
+ },
};
/**
@@ -135,7 +161,11 @@ static int ad7923_update_scan_mode(struct iio_dev *indio_dev,
int i, cmd, len;
len = 0;
- for_each_set_bit(i, active_scan_mask, AD7923_MAX_CHAN) {
+ /*
+ * For this driver the last channel is always the software timestamp so
+ * skip that one.
+ */
+ for_each_set_bit(i, active_scan_mask, indio_dev->num_channels - 1) {
cmd = AD7923_WRITE_CR | AD7923_CHANNEL_WRITE(i) |
AD7923_SEQUENCE_WRITE(AD7923_SEQUENCE_OFF) |
st->settings;
@@ -188,7 +218,7 @@ done:
return IRQ_HANDLED;
}
-static int ad7923_scan_direct(struct ad7923_state *st, unsigned ch)
+static int ad7923_scan_direct(struct ad7923_state *st, unsigned int ch)
{
int ret, cmd;
@@ -348,13 +378,29 @@ static const struct spi_device_id ad7923_id[] = {
{"ad7914", AD7914},
{"ad7923", AD7924},
{"ad7924", AD7924},
+ {"ad7908", AD7908},
+ {"ad7918", AD7918},
+ {"ad7928", AD7928},
{}
};
MODULE_DEVICE_TABLE(spi, ad7923_id);
+static const struct of_device_id ad7923_of_match[] = {
+ { .compatible = "adi,ad7904", },
+ { .compatible = "adi,ad7914", },
+ { .compatible = "adi,ad7923", },
+ { .compatible = "adi,ad7924", },
+ { .compatible = "adi,ad7908", },
+ { .compatible = "adi,ad7918", },
+ { .compatible = "adi,ad7928", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ad7923_of_match);
+
static struct spi_driver ad7923_driver = {
.driver = {
.name = "ad7923",
+ .of_match_table = ad7923_of_match,
},
.probe = ad7923_probe,
.remove = ad7923_remove,
@@ -364,5 +410,5 @@ module_spi_driver(ad7923_driver);
MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_AUTHOR("Patrick Vasseur <patrick.vasseur@c-s.fr>");
-MODULE_DESCRIPTION("Analog Devices AD7904/AD7914/AD7923/AD7924 ADC");
+MODULE_DESCRIPTION("Analog Devices AD7923 and similar ADC");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index f658012baad8..ef013af1aec0 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -167,6 +167,21 @@ static int ad799x_read_config(struct ad799x_state *st)
}
}
+static int ad799x_update_config(struct ad799x_state *st, u16 config)
+{
+ int ret;
+
+ ret = ad799x_write_config(st, config);
+ if (ret < 0)
+ return ret;
+ ret = ad799x_read_config(st);
+ if (ret < 0)
+ return ret;
+ st->config = ret;
+
+ return 0;
+}
+
/**
* ad799x_trigger_handler() bh of trigger launched polling to ring buffer
*
@@ -808,13 +823,9 @@ static int ad799x_probe(struct i2c_client *client,
indio_dev->channels = st->chip_config->channel;
indio_dev->num_channels = chip_info->num_channels;
- ret = ad799x_write_config(st, st->chip_config->default_config);
- if (ret < 0)
- goto error_disable_vref;
- ret = ad799x_read_config(st);
- if (ret < 0)
+ ret = ad799x_update_config(st, st->chip_config->default_config);
+ if (ret)
goto error_disable_vref;
- st->config = ret;
ret = iio_triggered_buffer_setup(indio_dev, NULL,
&ad799x_trigger_handler, NULL);
@@ -864,6 +875,48 @@ static int ad799x_remove(struct i2c_client *client)
return 0;
}
+static int __maybe_unused ad799x_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct ad799x_state *st = iio_priv(indio_dev);
+
+ regulator_disable(st->vref);
+ regulator_disable(st->reg);
+
+ return 0;
+}
+
+static int __maybe_unused ad799x_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct ad799x_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = regulator_enable(st->reg);
+ if (ret) {
+ dev_err(dev, "Unable to enable vcc regulator\n");
+ return ret;
+ }
+ ret = regulator_enable(st->vref);
+ if (ret) {
+ regulator_disable(st->reg);
+ dev_err(dev, "Unable to enable vref regulator\n");
+ return ret;
+ }
+
+ /* resync config */
+ ret = ad799x_update_config(st, st->config);
+ if (ret) {
+ regulator_disable(st->vref);
+ regulator_disable(st->reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ad799x_pm_ops, ad799x_suspend, ad799x_resume);
+
static const struct i2c_device_id ad799x_id[] = {
{ "ad7991", ad7991 },
{ "ad7995", ad7995 },
@@ -881,6 +934,7 @@ MODULE_DEVICE_TABLE(i2c, ad799x_id);
static struct i2c_driver ad799x_driver = {
.driver = {
.name = "ad799x",
+ .pm = &ad799x_pm_ops,
},
.probe = ad799x_probe,
.remove = ad799x_remove,
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index 8ba90486c787..8115b6de1d6c 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -500,7 +500,7 @@ static int ad_sd_probe_trigger(struct iio_dev *indio_dev)
ret = request_irq(sigma_delta->spi->irq,
ad_sd_data_rdy_trig_poll,
- IRQF_TRIGGER_LOW,
+ sigma_delta->info->irq_flags,
indio_dev->name,
sigma_delta);
if (ret)
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index e1850f3d5cf3..a5c7771227d5 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -1444,10 +1444,10 @@ static void at91_adc_dma_init(struct platform_device *pdev)
if (st->dma_st.dma_chan)
return;
- st->dma_st.dma_chan = dma_request_slave_channel(&pdev->dev, "rx");
-
- if (!st->dma_st.dma_chan) {
+ st->dma_st.dma_chan = dma_request_chan(&pdev->dev, "rx");
+ if (IS_ERR(st->dma_st.dma_chan)) {
dev_info(&pdev->dev, "can't get DMA channel\n");
+ st->dma_st.dma_chan = NULL;
goto dma_exit;
}
diff --git a/drivers/iio/adc/ltc2496.c b/drivers/iio/adc/ltc2496.c
new file mode 100644
index 000000000000..88a30156a849
--- /dev/null
+++ b/drivers/iio/adc/ltc2496.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ltc2496.c - Driver for Analog Devices/Linear Technology LTC2496 ADC
+ *
+ * Based on ltc2497.c which has
+ * Copyright (C) 2017 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ *
+ * Datasheet: https://www.analog.com/media/en/technical-documentation/data-sheets/2496fc.pdf
+ */
+
+#include <linux/spi/spi.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/driver.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include "ltc2497.h"
+
+struct ltc2496_driverdata {
+ /* this must be the first member */
+ struct ltc2497core_driverdata common_ddata;
+ struct spi_device *spi;
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ unsigned char rxbuf[3] ____cacheline_aligned;
+ unsigned char txbuf[3];
+};
+
+static int ltc2496_result_and_measure(struct ltc2497core_driverdata *ddata,
+ u8 address, int *val)
+{
+ struct ltc2496_driverdata *st =
+ container_of(ddata, struct ltc2496_driverdata, common_ddata);
+ struct spi_transfer t = {
+ .tx_buf = st->txbuf,
+ .rx_buf = st->rxbuf,
+ .len = sizeof(st->txbuf),
+ };
+ int ret;
+
+ st->txbuf[0] = LTC2497_ENABLE | address;
+
+ ret = spi_sync_transfer(st->spi, &t, 1);
+ if (ret < 0) {
+ dev_err(&st->spi->dev, "spi_sync_transfer failed: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ if (val)
+ *val = ((st->rxbuf[0] & 0x3f) << 12 |
+ st->rxbuf[1] << 4 | st->rxbuf[2] >> 4) -
+ (1 << 17);
+
+ return 0;
+}
+
+static int ltc2496_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct ltc2496_driverdata *st;
+ struct device *dev = &spi->dev;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+ st->spi = spi;
+ st->common_ddata.result_and_measure = ltc2496_result_and_measure;
+
+ return ltc2497core_probe(dev, indio_dev);
+}
+
+static int ltc2496_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+
+ ltc2497core_remove(indio_dev);
+
+ return 0;
+}
+
+static const struct of_device_id ltc2496_of_match[] = {
+ { .compatible = "lltc,ltc2496", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ltc2496_of_match);
+
+static struct spi_driver ltc2496_driver = {
+ .driver = {
+ .name = "ltc2496",
+ .of_match_table = of_match_ptr(ltc2496_of_match),
+ },
+ .probe = ltc2496_probe,
+ .remove = ltc2496_remove,
+};
+module_spi_driver(ltc2496_driver);
+
+MODULE_AUTHOR("Uwe Kleine-König <u.kleine-könig@pengutronix.de>");
+MODULE_DESCRIPTION("Linear Technology LTC2496 ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ltc2497-core.c b/drivers/iio/adc/ltc2497-core.c
new file mode 100644
index 000000000000..f5f7039caacc
--- /dev/null
+++ b/drivers/iio/adc/ltc2497-core.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ltc2497-core.c - Common code for Analog Devices/Linear Technology
+ * LTC2496 and LTC2497 ADCs
+ *
+ * Copyright (C) 2017 Analog Devices Inc.
+ */
+
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/driver.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+
+#include "ltc2497.h"
+
+#define LTC2497_SGL BIT(4)
+#define LTC2497_DIFF 0
+#define LTC2497_SIGN BIT(3)
+
+static int ltc2497core_wait_conv(struct ltc2497core_driverdata *ddata)
+{
+ s64 time_elapsed;
+
+ time_elapsed = ktime_ms_delta(ktime_get(), ddata->time_prev);
+
+ if (time_elapsed < LTC2497_CONVERSION_TIME_MS) {
+ /* delay if conversion time not passed
+ * since last read or write
+ */
+ if (msleep_interruptible(
+ LTC2497_CONVERSION_TIME_MS - time_elapsed))
+ return -ERESTARTSYS;
+
+ return 0;
+ }
+
+ if (time_elapsed - LTC2497_CONVERSION_TIME_MS <= 0) {
+ /* We're in automatic mode -
+ * so the last reading is still not outdated
+ */
+ return 0;
+ }
+
+ return 1;
+}
+
+static int ltc2497core_read(struct ltc2497core_driverdata *ddata, u8 address, int *val)
+{
+ int ret;
+
+ ret = ltc2497core_wait_conv(ddata);
+ if (ret < 0)
+ return ret;
+
+ if (ret || ddata->addr_prev != address) {
+ ret = ddata->result_and_measure(ddata, address, NULL);
+ if (ret < 0)
+ return ret;
+ ddata->addr_prev = address;
+
+ if (msleep_interruptible(LTC2497_CONVERSION_TIME_MS))
+ return -ERESTARTSYS;
+ }
+
+ ret = ddata->result_and_measure(ddata, address, val);
+ if (ret < 0)
+ return ret;
+
+ ddata->time_prev = ktime_get();
+
+ return ret;
+}
+
+static int ltc2497core_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct ltc2497core_driverdata *ddata = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&indio_dev->mlock);
+ ret = ltc2497core_read(ddata, chan->address, val);
+ mutex_unlock(&indio_dev->mlock);
+ if (ret < 0)
+ return ret;
+
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ ret = regulator_get_voltage(ddata->ref);
+ if (ret < 0)
+ return ret;
+
+ *val = ret / 1000;
+ *val2 = 17;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+#define LTC2497_CHAN(_chan, _addr, _ds_name) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (_chan), \
+ .address = (_addr | (_chan / 2) | ((_chan & 1) ? LTC2497_SIGN : 0)), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .datasheet_name = (_ds_name), \
+}
+
+#define LTC2497_CHAN_DIFF(_chan, _addr) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (_chan) * 2 + ((_addr) & LTC2497_SIGN ? 1 : 0), \
+ .channel2 = (_chan) * 2 + ((_addr) & LTC2497_SIGN ? 0 : 1),\
+ .address = (_addr | _chan), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .differential = 1, \
+}
+
+static const struct iio_chan_spec ltc2497core_channel[] = {
+ LTC2497_CHAN(0, LTC2497_SGL, "CH0"),
+ LTC2497_CHAN(1, LTC2497_SGL, "CH1"),
+ LTC2497_CHAN(2, LTC2497_SGL, "CH2"),
+ LTC2497_CHAN(3, LTC2497_SGL, "CH3"),
+ LTC2497_CHAN(4, LTC2497_SGL, "CH4"),
+ LTC2497_CHAN(5, LTC2497_SGL, "CH5"),
+ LTC2497_CHAN(6, LTC2497_SGL, "CH6"),
+ LTC2497_CHAN(7, LTC2497_SGL, "CH7"),
+ LTC2497_CHAN(8, LTC2497_SGL, "CH8"),
+ LTC2497_CHAN(9, LTC2497_SGL, "CH9"),
+ LTC2497_CHAN(10, LTC2497_SGL, "CH10"),
+ LTC2497_CHAN(11, LTC2497_SGL, "CH11"),
+ LTC2497_CHAN(12, LTC2497_SGL, "CH12"),
+ LTC2497_CHAN(13, LTC2497_SGL, "CH13"),
+ LTC2497_CHAN(14, LTC2497_SGL, "CH14"),
+ LTC2497_CHAN(15, LTC2497_SGL, "CH15"),
+ LTC2497_CHAN_DIFF(0, LTC2497_DIFF),
+ LTC2497_CHAN_DIFF(1, LTC2497_DIFF),
+ LTC2497_CHAN_DIFF(2, LTC2497_DIFF),
+ LTC2497_CHAN_DIFF(3, LTC2497_DIFF),
+ LTC2497_CHAN_DIFF(4, LTC2497_DIFF),
+ LTC2497_CHAN_DIFF(5, LTC2497_DIFF),
+ LTC2497_CHAN_DIFF(6, LTC2497_DIFF),
+ LTC2497_CHAN_DIFF(7, LTC2497_DIFF),
+ LTC2497_CHAN_DIFF(0, LTC2497_DIFF | LTC2497_SIGN),
+ LTC2497_CHAN_DIFF(1, LTC2497_DIFF | LTC2497_SIGN),
+ LTC2497_CHAN_DIFF(2, LTC2497_DIFF | LTC2497_SIGN),
+ LTC2497_CHAN_DIFF(3, LTC2497_DIFF | LTC2497_SIGN),
+ LTC2497_CHAN_DIFF(4, LTC2497_DIFF | LTC2497_SIGN),
+ LTC2497_CHAN_DIFF(5, LTC2497_DIFF | LTC2497_SIGN),
+ LTC2497_CHAN_DIFF(6, LTC2497_DIFF | LTC2497_SIGN),
+ LTC2497_CHAN_DIFF(7, LTC2497_DIFF | LTC2497_SIGN),
+};
+
+static const struct iio_info ltc2497core_info = {
+ .read_raw = ltc2497core_read_raw,
+};
+
+int ltc2497core_probe(struct device *dev, struct iio_dev *indio_dev)
+{
+ struct ltc2497core_driverdata *ddata = iio_priv(indio_dev);
+ int ret;
+
+ indio_dev->dev.parent = dev;
+ indio_dev->name = dev_name(dev);
+ indio_dev->info = &ltc2497core_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ltc2497core_channel;
+ indio_dev->num_channels = ARRAY_SIZE(ltc2497core_channel);
+
+ ret = ddata->result_and_measure(ddata, LTC2497_CONFIG_DEFAULT, NULL);
+ if (ret < 0)
+ return ret;
+
+ ddata->ref = devm_regulator_get(dev, "vref");
+ if (IS_ERR(ddata->ref)) {
+ if (PTR_ERR(ddata->ref) != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get vref regulator: %pe\n",
+ ddata->ref);
+
+ return PTR_ERR(ddata->ref);
+ }
+
+ ret = regulator_enable(ddata->ref);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable vref regulator: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ if (dev->platform_data) {
+ struct iio_map *plat_data;
+
+ plat_data = (struct iio_map *)dev->platform_data;
+
+ ret = iio_map_array_register(indio_dev, plat_data);
+ if (ret) {
+ dev_err(&indio_dev->dev, "iio map err: %d\n", ret);
+ goto err_regulator_disable;
+ }
+ }
+
+ ddata->addr_prev = LTC2497_CONFIG_DEFAULT;
+ ddata->time_prev = ktime_get();
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto err_array_unregister;
+
+ return 0;
+
+err_array_unregister:
+ iio_map_array_unregister(indio_dev);
+
+err_regulator_disable:
+ regulator_disable(ddata->ref);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS(ltc2497core_probe, LTC2497);
+
+void ltc2497core_remove(struct iio_dev *indio_dev)
+{
+ struct ltc2497core_driverdata *ddata = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ iio_map_array_unregister(indio_dev);
+
+ regulator_disable(ddata->ref);
+}
+EXPORT_SYMBOL_NS(ltc2497core_remove, LTC2497);
+
+MODULE_DESCRIPTION("common code for LTC2496/LTC2497 drivers");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ltc2497.c b/drivers/iio/adc/ltc2497.c
index 470406032720..5db63d7c6bc5 100644
--- a/drivers/iio/adc/ltc2497.c
+++ b/drivers/iio/adc/ltc2497.c
@@ -7,27 +7,18 @@
* Datasheet: http://cds.linear.com/docs/en/datasheet/2497fd.pdf
*/
-#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/iio/iio.h>
#include <linux/iio/driver.h>
-#include <linux/iio/sysfs.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/regulator/consumer.h>
-#define LTC2497_ENABLE 0xA0
-#define LTC2497_SGL BIT(4)
-#define LTC2497_DIFF 0
-#define LTC2497_SIGN BIT(3)
-#define LTC2497_CONFIG_DEFAULT LTC2497_ENABLE
-#define LTC2497_CONVERSION_TIME_MS 150ULL
+#include "ltc2497.h"
-struct ltc2497_st {
+struct ltc2497_driverdata {
+ /* this must be the first member */
+ struct ltc2497core_driverdata common_ddata;
struct i2c_client *client;
- struct regulator *ref;
- ktime_t time_prev;
- u8 addr_prev;
/*
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache lines.
@@ -35,232 +26,59 @@ struct ltc2497_st {
__be32 buf ____cacheline_aligned;
};
-static int ltc2497_wait_conv(struct ltc2497_st *st)
+static int ltc2497_result_and_measure(struct ltc2497core_driverdata *ddata,
+ u8 address, int *val)
{
- s64 time_elapsed;
-
- time_elapsed = ktime_ms_delta(ktime_get(), st->time_prev);
-
- if (time_elapsed < LTC2497_CONVERSION_TIME_MS) {
- /* delay if conversion time not passed
- * since last read or write
- */
- if (msleep_interruptible(
- LTC2497_CONVERSION_TIME_MS - time_elapsed))
- return -ERESTARTSYS;
-
- return 0;
- }
-
- if (time_elapsed - LTC2497_CONVERSION_TIME_MS <= 0) {
- /* We're in automatic mode -
- * so the last reading is stil not outdated
- */
- return 0;
- }
-
- return 1;
-}
-
-static int ltc2497_read(struct ltc2497_st *st, u8 address, int *val)
-{
- struct i2c_client *client = st->client;
- int ret;
-
- ret = ltc2497_wait_conv(st);
- if (ret < 0)
- return ret;
-
- if (ret || st->addr_prev != address) {
- ret = i2c_smbus_write_byte(st->client,
- LTC2497_ENABLE | address);
- if (ret < 0)
- return ret;
- st->addr_prev = address;
- if (msleep_interruptible(LTC2497_CONVERSION_TIME_MS))
- return -ERESTARTSYS;
- }
- ret = i2c_master_recv(client, (char *)&st->buf, 3);
- if (ret < 0) {
- dev_err(&client->dev, "i2c_master_recv failed\n");
- return ret;
- }
- st->time_prev = ktime_get();
-
- /* convert and shift the result,
- * and finally convert from offset binary to signed integer
- */
- *val = (be32_to_cpu(st->buf) >> 14) - (1 << 17);
-
- return ret;
-}
-
-static int ltc2497_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2, long mask)
-{
- struct ltc2497_st *st = iio_priv(indio_dev);
+ struct ltc2497_driverdata *st =
+ container_of(ddata, struct ltc2497_driverdata, common_ddata);
int ret;
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- mutex_lock(&indio_dev->mlock);
- ret = ltc2497_read(st, chan->address, val);
- mutex_unlock(&indio_dev->mlock);
- if (ret < 0)
- return ret;
-
- return IIO_VAL_INT;
-
- case IIO_CHAN_INFO_SCALE:
- ret = regulator_get_voltage(st->ref);
- if (ret < 0)
+ if (val) {
+ ret = i2c_master_recv(st->client, (char *)&st->buf, 3);
+ if (ret < 0) {
+ dev_err(&st->client->dev, "i2c_master_recv failed\n");
return ret;
+ }
- *val = ret / 1000;
- *val2 = 17;
-
- return IIO_VAL_FRACTIONAL_LOG2;
-
- default:
- return -EINVAL;
+ *val = (be32_to_cpu(st->buf) >> 14) - (1 << 17);
}
-}
-#define LTC2497_CHAN(_chan, _addr, _ds_name) { \
- .type = IIO_VOLTAGE, \
- .indexed = 1, \
- .channel = (_chan), \
- .address = (_addr | (_chan / 2) | ((_chan & 1) ? LTC2497_SIGN : 0)), \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .datasheet_name = (_ds_name), \
-}
-
-#define LTC2497_CHAN_DIFF(_chan, _addr) { \
- .type = IIO_VOLTAGE, \
- .indexed = 1, \
- .channel = (_chan) * 2 + ((_addr) & LTC2497_SIGN ? 1 : 0), \
- .channel2 = (_chan) * 2 + ((_addr) & LTC2497_SIGN ? 0 : 1),\
- .address = (_addr | _chan), \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .differential = 1, \
+ ret = i2c_smbus_write_byte(st->client,
+ LTC2497_ENABLE | address);
+ if (ret)
+ dev_err(&st->client->dev, "i2c transfer failed: %pe\n",
+ ERR_PTR(ret));
+ return ret;
}
-static const struct iio_chan_spec ltc2497_channel[] = {
- LTC2497_CHAN(0, LTC2497_SGL, "CH0"),
- LTC2497_CHAN(1, LTC2497_SGL, "CH1"),
- LTC2497_CHAN(2, LTC2497_SGL, "CH2"),
- LTC2497_CHAN(3, LTC2497_SGL, "CH3"),
- LTC2497_CHAN(4, LTC2497_SGL, "CH4"),
- LTC2497_CHAN(5, LTC2497_SGL, "CH5"),
- LTC2497_CHAN(6, LTC2497_SGL, "CH6"),
- LTC2497_CHAN(7, LTC2497_SGL, "CH7"),
- LTC2497_CHAN(8, LTC2497_SGL, "CH8"),
- LTC2497_CHAN(9, LTC2497_SGL, "CH9"),
- LTC2497_CHAN(10, LTC2497_SGL, "CH10"),
- LTC2497_CHAN(11, LTC2497_SGL, "CH11"),
- LTC2497_CHAN(12, LTC2497_SGL, "CH12"),
- LTC2497_CHAN(13, LTC2497_SGL, "CH13"),
- LTC2497_CHAN(14, LTC2497_SGL, "CH14"),
- LTC2497_CHAN(15, LTC2497_SGL, "CH15"),
- LTC2497_CHAN_DIFF(0, LTC2497_DIFF),
- LTC2497_CHAN_DIFF(1, LTC2497_DIFF),
- LTC2497_CHAN_DIFF(2, LTC2497_DIFF),
- LTC2497_CHAN_DIFF(3, LTC2497_DIFF),
- LTC2497_CHAN_DIFF(4, LTC2497_DIFF),
- LTC2497_CHAN_DIFF(5, LTC2497_DIFF),
- LTC2497_CHAN_DIFF(6, LTC2497_DIFF),
- LTC2497_CHAN_DIFF(7, LTC2497_DIFF),
- LTC2497_CHAN_DIFF(0, LTC2497_DIFF | LTC2497_SIGN),
- LTC2497_CHAN_DIFF(1, LTC2497_DIFF | LTC2497_SIGN),
- LTC2497_CHAN_DIFF(2, LTC2497_DIFF | LTC2497_SIGN),
- LTC2497_CHAN_DIFF(3, LTC2497_DIFF | LTC2497_SIGN),
- LTC2497_CHAN_DIFF(4, LTC2497_DIFF | LTC2497_SIGN),
- LTC2497_CHAN_DIFF(5, LTC2497_DIFF | LTC2497_SIGN),
- LTC2497_CHAN_DIFF(6, LTC2497_DIFF | LTC2497_SIGN),
- LTC2497_CHAN_DIFF(7, LTC2497_DIFF | LTC2497_SIGN),
-};
-
-static const struct iio_info ltc2497_info = {
- .read_raw = ltc2497_read_raw,
-};
-
static int ltc2497_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct iio_dev *indio_dev;
- struct ltc2497_st *st;
- struct iio_map *plat_data;
- int ret;
+ struct ltc2497_driverdata *st;
+ struct device *dev = &client->dev;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
I2C_FUNC_SMBUS_WRITE_BYTE))
return -EOPNOTSUPP;
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
if (!indio_dev)
return -ENOMEM;
st = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
st->client = client;
+ st->common_ddata.result_and_measure = ltc2497_result_and_measure;
- indio_dev->dev.parent = &client->dev;
- indio_dev->name = id->name;
- indio_dev->info = &ltc2497_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = ltc2497_channel;
- indio_dev->num_channels = ARRAY_SIZE(ltc2497_channel);
-
- st->ref = devm_regulator_get(&client->dev, "vref");
- if (IS_ERR(st->ref))
- return PTR_ERR(st->ref);
-
- ret = regulator_enable(st->ref);
- if (ret < 0)
- return ret;
-
- if (client->dev.platform_data) {
- plat_data = ((struct iio_map *)client->dev.platform_data);
- ret = iio_map_array_register(indio_dev, plat_data);
- if (ret) {
- dev_err(&indio_dev->dev, "iio map err: %d\n", ret);
- goto err_regulator_disable;
- }
- }
-
- ret = i2c_smbus_write_byte(st->client, LTC2497_CONFIG_DEFAULT);
- if (ret < 0)
- goto err_array_unregister;
-
- st->addr_prev = LTC2497_CONFIG_DEFAULT;
- st->time_prev = ktime_get();
-
- ret = iio_device_register(indio_dev);
- if (ret < 0)
- goto err_array_unregister;
-
- return 0;
-
-err_array_unregister:
- iio_map_array_unregister(indio_dev);
-
-err_regulator_disable:
- regulator_disable(st->ref);
-
- return ret;
+ return ltc2497core_probe(dev, indio_dev);
}
static int ltc2497_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
- struct ltc2497_st *st = iio_priv(indio_dev);
- iio_map_array_unregister(indio_dev);
- iio_device_unregister(indio_dev);
- regulator_disable(st->ref);
+ ltc2497core_remove(indio_dev);
return 0;
}
diff --git a/drivers/iio/adc/ltc2497.h b/drivers/iio/adc/ltc2497.h
new file mode 100644
index 000000000000..d0b42dd6b8ad
--- /dev/null
+++ b/drivers/iio/adc/ltc2497.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#define LTC2497_ENABLE 0xA0
+#define LTC2497_CONFIG_DEFAULT LTC2497_ENABLE
+#define LTC2497_CONVERSION_TIME_MS 150ULL
+
+struct ltc2497core_driverdata {
+ struct regulator *ref;
+ ktime_t time_prev;
+ u8 addr_prev;
+ int (*result_and_measure)(struct ltc2497core_driverdata *ddata,
+ u8 address, int *val);
+};
+
+int ltc2497core_probe(struct device *dev, struct iio_dev *indio_dev);
+void ltc2497core_remove(struct iio_dev *indio_dev);
+
+MODULE_IMPORT_NS(LTC2497);
diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
index e480529b3f04..04d5ff7d2c8e 100644
--- a/drivers/iio/adc/max9611.c
+++ b/drivers/iio/adc/max9611.c
@@ -115,22 +115,17 @@ enum max9611_conf_ids {
* where data shall be read from
*/
static const unsigned int max9611_mux_conf[][2] = {
- /* CONF_SENSE_1x */
- { MAX9611_MUX_SENSE_1x, MAX9611_REG_CSA_DATA },
- /* CONF_SENSE_4x */
- { MAX9611_MUX_SENSE_4x, MAX9611_REG_CSA_DATA },
- /* CONF_SENSE_8x */
- { MAX9611_MUX_SENSE_8x, MAX9611_REG_CSA_DATA },
- /* CONF_IN_VOLT */
- { MAX9611_INPUT_VOLT, MAX9611_REG_RS_DATA },
- /* CONF_TEMP */
- { MAX9611_MUX_TEMP, MAX9611_REG_TEMP_DATA },
+ [CONF_SENSE_1x] = { MAX9611_MUX_SENSE_1x, MAX9611_REG_CSA_DATA },
+ [CONF_SENSE_4x] = { MAX9611_MUX_SENSE_4x, MAX9611_REG_CSA_DATA },
+ [CONF_SENSE_8x] = { MAX9611_MUX_SENSE_8x, MAX9611_REG_CSA_DATA },
+ [CONF_IN_VOLT] = { MAX9611_INPUT_VOLT, MAX9611_REG_RS_DATA },
+ [CONF_TEMP] = { MAX9611_MUX_TEMP, MAX9611_REG_TEMP_DATA },
};
enum max9611_csa_gain {
- CSA_GAIN_1x,
- CSA_GAIN_4x,
- CSA_GAIN_8x,
+ CSA_GAIN_1x = CONF_SENSE_1x,
+ CSA_GAIN_4x = CONF_SENSE_4x,
+ CSA_GAIN_8x = CONF_SENSE_8x,
};
enum max9611_csa_gain_params {
@@ -148,18 +143,9 @@ enum max9611_csa_gain_params {
* value; use this structure to retrieve the correct LSB and offset values.
*/
static const unsigned int max9611_gain_conf[][2] = {
- { /* [0] CSA_GAIN_1x */
- MAX9611_CSA_1X_LSB_nV,
- MAX9611_CSA_1X_OFFS_RAW,
- },
- { /* [1] CSA_GAIN_4x */
- MAX9611_CSA_4X_LSB_nV,
- MAX9611_CSA_4X_OFFS_RAW,
- },
- { /* [2] CSA_GAIN_8x */
- MAX9611_CSA_8X_LSB_nV,
- MAX9611_CSA_8X_OFFS_RAW,
- },
+ [CSA_GAIN_1x] = { MAX9611_CSA_1X_LSB_nV, MAX9611_CSA_1X_OFFS_RAW, },
+ [CSA_GAIN_4x] = { MAX9611_CSA_4X_LSB_nV, MAX9611_CSA_4X_OFFS_RAW, },
+ [CSA_GAIN_8x] = { MAX9611_CSA_8X_LSB_nV, MAX9611_CSA_8X_OFFS_RAW, },
};
enum max9611_chan_addrs {
diff --git a/drivers/iio/adc/qcom-vadc-common.c b/drivers/iio/adc/qcom-vadc-common.c
index dcd7fb5b9fb2..2bb78d1c4daa 100644
--- a/drivers/iio/adc/qcom-vadc-common.c
+++ b/drivers/iio/adc/qcom-vadc-common.c
@@ -6,6 +6,7 @@
#include <linux/log2.h>
#include <linux/err.h>
#include <linux/module.h>
+#include <linux/units.h>
#include "qcom-vadc-common.h"
@@ -236,8 +237,7 @@ static int qcom_vadc_scale_die_temp(const struct vadc_linear_graph *calib_graph,
voltage = 0;
}
- voltage -= KELVINMIL_CELSIUSMIL;
- *result_mdec = voltage;
+ *result_mdec = milli_kelvin_to_millicelsius(voltage);
return 0;
}
@@ -325,7 +325,7 @@ static int qcom_vadc_scale_hw_calib_die_temp(
{
*result_mdec = qcom_vadc_scale_code_voltage_factor(adc_code,
prescale, data, 2);
- *result_mdec -= KELVINMIL_CELSIUSMIL;
+ *result_mdec = milli_kelvin_to_millicelsius(*result_mdec);
return 0;
}
diff --git a/drivers/iio/adc/qcom-vadc-common.h b/drivers/iio/adc/qcom-vadc-common.h
index bbb1fa02b382..e074902a24cc 100644
--- a/drivers/iio/adc/qcom-vadc-common.h
+++ b/drivers/iio/adc/qcom-vadc-common.h
@@ -38,7 +38,6 @@
#define VADC_AVG_SAMPLES_MAX 512
#define ADC5_AVG_SAMPLES_MAX 16
-#define KELVINMIL_CELSIUSMIL 273150
#define PMIC5_CHG_TEMP_SCALE_FACTOR 377500
#define PMIC5_SMB_TEMP_CONSTANT 419400
#define PMIC5_SMB_TEMP_SCALE_FACTOR 356
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index 6537f4f776c5..2df88d2b880a 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -280,21 +280,21 @@ out:
static const struct stm32_adc_common_regs stm32f4_adc_common_regs = {
.csr = STM32F4_ADC_CSR,
.ccr = STM32F4_ADC_CCR,
- .eoc1_msk = STM32F4_EOC1,
- .eoc2_msk = STM32F4_EOC2,
- .eoc3_msk = STM32F4_EOC3,
+ .eoc1_msk = STM32F4_EOC1 | STM32F4_OVR1,
+ .eoc2_msk = STM32F4_EOC2 | STM32F4_OVR2,
+ .eoc3_msk = STM32F4_EOC3 | STM32F4_OVR3,
.ier = STM32F4_ADC_CR1,
- .eocie_msk = STM32F4_EOCIE,
+ .eocie_msk = STM32F4_EOCIE | STM32F4_OVRIE,
};
/* STM32H7 common registers definitions */
static const struct stm32_adc_common_regs stm32h7_adc_common_regs = {
.csr = STM32H7_ADC_CSR,
.ccr = STM32H7_ADC_CCR,
- .eoc1_msk = STM32H7_EOC_MST,
- .eoc2_msk = STM32H7_EOC_SLV,
+ .eoc1_msk = STM32H7_EOC_MST | STM32H7_OVR_MST,
+ .eoc2_msk = STM32H7_EOC_SLV | STM32H7_OVR_SLV,
.ier = STM32H7_ADC_IER,
- .eocie_msk = STM32H7_EOCIE,
+ .eocie_msk = STM32H7_EOCIE | STM32H7_OVRIE,
};
static const unsigned int stm32_adc_offset[STM32_ADC_MAX_ADCS] = {
@@ -688,7 +688,8 @@ static int stm32_adc_probe(struct platform_device *pdev)
priv->vref = devm_regulator_get(&pdev->dev, "vref");
if (IS_ERR(priv->vref)) {
ret = PTR_ERR(priv->vref);
- dev_err(&pdev->dev, "vref get failed, %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "vref get failed, %d\n", ret);
return ret;
}
@@ -696,7 +697,8 @@ static int stm32_adc_probe(struct platform_device *pdev)
if (IS_ERR(priv->aclk)) {
ret = PTR_ERR(priv->aclk);
if (ret != -ENOENT) {
- dev_err(&pdev->dev, "Can't get 'adc' clock\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Can't get 'adc' clock\n");
return ret;
}
priv->aclk = NULL;
@@ -706,7 +708,8 @@ static int stm32_adc_probe(struct platform_device *pdev)
if (IS_ERR(priv->bclk)) {
ret = PTR_ERR(priv->bclk);
if (ret != -ENOENT) {
- dev_err(&pdev->dev, "Can't get 'bus' clock\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Can't get 'bus' clock\n");
return ret;
}
priv->bclk = NULL;
diff --git a/drivers/iio/adc/stm32-adc-core.h b/drivers/iio/adc/stm32-adc-core.h
index 2579d514c2a3..2322809bfd2f 100644
--- a/drivers/iio/adc/stm32-adc-core.h
+++ b/drivers/iio/adc/stm32-adc-core.h
@@ -51,10 +51,12 @@
#define STM32F4_ADC_CCR (STM32_ADCX_COMN_OFFSET + 0x04)
/* STM32F4_ADC_SR - bit fields */
+#define STM32F4_OVR BIT(5)
#define STM32F4_STRT BIT(4)
#define STM32F4_EOC BIT(1)
/* STM32F4_ADC_CR1 - bit fields */
+#define STM32F4_OVRIE BIT(26)
#define STM32F4_RES_SHIFT 24
#define STM32F4_RES_MASK GENMASK(25, 24)
#define STM32F4_SCAN BIT(8)
@@ -72,8 +74,11 @@
#define STM32F4_ADON BIT(0)
/* STM32F4_ADC_CSR - bit fields */
+#define STM32F4_OVR3 BIT(21)
#define STM32F4_EOC3 BIT(17)
+#define STM32F4_OVR2 BIT(13)
#define STM32F4_EOC2 BIT(9)
+#define STM32F4_OVR1 BIT(5)
#define STM32F4_EOC1 BIT(1)
/* STM32F4_ADC_CCR - bit fields */
@@ -103,10 +108,12 @@
/* STM32H7_ADC_ISR - bit fields */
#define STM32MP1_VREGREADY BIT(12)
+#define STM32H7_OVR BIT(4)
#define STM32H7_EOC BIT(2)
#define STM32H7_ADRDY BIT(0)
/* STM32H7_ADC_IER - bit fields */
+#define STM32H7_OVRIE STM32H7_OVR
#define STM32H7_EOCIE STM32H7_EOC
/* STM32H7_ADC_CR - bit fields */
@@ -155,7 +162,9 @@ enum stm32h7_adc_dmngt {
#define STM32H7_LINCALFACT_MASK GENMASK(29, 0)
/* STM32H7_ADC_CSR - bit fields */
+#define STM32H7_OVR_SLV BIT(20)
#define STM32H7_EOC_SLV BIT(18)
+#define STM32H7_OVR_MST BIT(4)
#define STM32H7_EOC_MST BIT(2)
/* STM32H7_ADC_CCR - bit fields */
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 3b291d72701c..80c3f963527b 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -117,7 +117,9 @@ struct stm32_adc_regs {
* struct stm32_adc_regspec - stm32 registers definition
* @dr: data register offset
* @ier_eoc: interrupt enable register & eocie bitfield
+ * @ier_ovr: interrupt enable register & overrun bitfield
* @isr_eoc: interrupt status register & eoc bitfield
+ * @isr_ovr: interrupt status register & overrun bitfield
* @sqr: reference to sequence registers array
* @exten: trigger control register & bitfield
* @extsel: trigger selection register & bitfield
@@ -128,7 +130,9 @@ struct stm32_adc_regs {
struct stm32_adc_regspec {
const u32 dr;
const struct stm32_adc_regs ier_eoc;
+ const struct stm32_adc_regs ier_ovr;
const struct stm32_adc_regs isr_eoc;
+ const struct stm32_adc_regs isr_ovr;
const struct stm32_adc_regs *sqr;
const struct stm32_adc_regs exten;
const struct stm32_adc_regs extsel;
@@ -337,7 +341,9 @@ static const unsigned int stm32f4_adc_smp_cycles[STM32_ADC_MAX_SMP + 1] = {
static const struct stm32_adc_regspec stm32f4_adc_regspec = {
.dr = STM32F4_ADC_DR,
.ier_eoc = { STM32F4_ADC_CR1, STM32F4_EOCIE },
+ .ier_ovr = { STM32F4_ADC_CR1, STM32F4_OVRIE },
.isr_eoc = { STM32F4_ADC_SR, STM32F4_EOC },
+ .isr_ovr = { STM32F4_ADC_SR, STM32F4_OVR },
.sqr = stm32f4_sq,
.exten = { STM32F4_ADC_CR2, STM32F4_EXTEN_MASK, STM32F4_EXTEN_SHIFT },
.extsel = { STM32F4_ADC_CR2, STM32F4_EXTSEL_MASK,
@@ -429,7 +435,9 @@ static const unsigned int stm32h7_adc_smp_cycles[STM32_ADC_MAX_SMP + 1] = {
static const struct stm32_adc_regspec stm32h7_adc_regspec = {
.dr = STM32H7_ADC_DR,
.ier_eoc = { STM32H7_ADC_IER, STM32H7_EOCIE },
+ .ier_ovr = { STM32H7_ADC_IER, STM32H7_OVRIE },
.isr_eoc = { STM32H7_ADC_ISR, STM32H7_EOC },
+ .isr_ovr = { STM32H7_ADC_ISR, STM32H7_OVR },
.sqr = stm32h7_sq,
.exten = { STM32H7_ADC_CFGR, STM32H7_EXTEN_MASK, STM32H7_EXTEN_SHIFT },
.extsel = { STM32H7_ADC_CFGR, STM32H7_EXTSEL_MASK,
@@ -506,6 +514,18 @@ static void stm32_adc_conv_irq_disable(struct stm32_adc *adc)
adc->cfg->regs->ier_eoc.mask);
}
+static void stm32_adc_ovr_irq_enable(struct stm32_adc *adc)
+{
+ stm32_adc_set_bits(adc, adc->cfg->regs->ier_ovr.reg,
+ adc->cfg->regs->ier_ovr.mask);
+}
+
+static void stm32_adc_ovr_irq_disable(struct stm32_adc *adc)
+{
+ stm32_adc_clr_bits(adc, adc->cfg->regs->ier_ovr.reg,
+ adc->cfg->regs->ier_ovr.mask);
+}
+
static void stm32_adc_set_res(struct stm32_adc *adc)
{
const struct stm32_adc_regs *res = &adc->cfg->regs->res;
@@ -1205,6 +1225,19 @@ static int stm32_adc_read_raw(struct iio_dev *indio_dev,
}
}
+static irqreturn_t stm32_adc_threaded_isr(int irq, void *data)
+{
+ struct stm32_adc *adc = data;
+ struct iio_dev *indio_dev = iio_priv_to_dev(adc);
+ const struct stm32_adc_regspec *regs = adc->cfg->regs;
+ u32 status = stm32_adc_readl(adc, regs->isr_eoc.reg);
+
+ if (status & regs->isr_ovr.mask)
+ dev_err(&indio_dev->dev, "Overrun, stopping: restart needed\n");
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t stm32_adc_isr(int irq, void *data)
{
struct stm32_adc *adc = data;
@@ -1212,6 +1245,19 @@ static irqreturn_t stm32_adc_isr(int irq, void *data)
const struct stm32_adc_regspec *regs = adc->cfg->regs;
u32 status = stm32_adc_readl(adc, regs->isr_eoc.reg);
+ if (status & regs->isr_ovr.mask) {
+ /*
+ * Overrun occurred on regular conversions: data for wrong
+ * channel may be read. Unconditionally disable interrupts
+ * to stop processing data and print error message.
+ * Restarting the capture can be done by disabling, then
+ * re-enabling it (e.g. write 0, then 1 to buffer/enable).
+ */
+ stm32_adc_ovr_irq_disable(adc);
+ stm32_adc_conv_irq_disable(adc);
+ return IRQ_WAKE_THREAD;
+ }
+
if (status & regs->isr_eoc.mask) {
/* Reading DR also clears EOC status flag */
adc->buffer[adc->bufi] = stm32_adc_readw(adc, regs->dr);
@@ -1441,6 +1487,8 @@ static int __stm32_adc_buffer_postenable(struct iio_dev *indio_dev)
/* Reset adc buffer index */
adc->bufi = 0;
+ stm32_adc_ovr_irq_enable(adc);
+
if (!adc->dma_chan)
stm32_adc_conv_irq_enable(adc);
@@ -1481,6 +1529,8 @@ static void __stm32_adc_buffer_predisable(struct iio_dev *indio_dev)
if (!adc->dma_chan)
stm32_adc_conv_irq_disable(adc);
+ stm32_adc_ovr_irq_disable(adc);
+
if (adc->dma_chan)
dmaengine_terminate_sync(adc->dma_chan);
@@ -1746,9 +1796,21 @@ static int stm32_adc_dma_request(struct iio_dev *indio_dev)
struct dma_slave_config config;
int ret;
- adc->dma_chan = dma_request_slave_channel(&indio_dev->dev, "rx");
- if (!adc->dma_chan)
+ adc->dma_chan = dma_request_chan(&indio_dev->dev, "rx");
+ if (IS_ERR(adc->dma_chan)) {
+ ret = PTR_ERR(adc->dma_chan);
+ if (ret != -ENODEV) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&indio_dev->dev,
+ "DMA channel request failed with %d\n",
+ ret);
+ return ret;
+ }
+
+ /* DMA is optional: fall back to IRQ mode */
+ adc->dma_chan = NULL;
return 0;
+ }
adc->rx_buf = dma_alloc_coherent(adc->dma_chan->device->dev,
STM32_DMA_BUFFER_SIZE,
@@ -1818,8 +1880,9 @@ static int stm32_adc_probe(struct platform_device *pdev)
if (adc->irq < 0)
return adc->irq;
- ret = devm_request_irq(&pdev->dev, adc->irq, stm32_adc_isr,
- 0, pdev->name, adc);
+ ret = devm_request_threaded_irq(&pdev->dev, adc->irq, stm32_adc_isr,
+ stm32_adc_threaded_isr,
+ 0, pdev->name, adc);
if (ret) {
dev_err(&pdev->dev, "failed to request IRQ\n");
return ret;
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
index e493242c266e..2aad2cda6943 100644
--- a/drivers/iio/adc/stm32-dfsdm-adc.c
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -1204,6 +1204,8 @@ static int stm32_dfsdm_single_conv(struct iio_dev *indio_dev,
stm32_dfsdm_stop_conv(adc);
+ stm32_dfsdm_process_data(adc, res);
+
stop_dfsdm:
stm32_dfsdm_stop_dfsdm(adc->dfsdm);
@@ -1219,14 +1221,32 @@ static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev,
unsigned int spi_freq;
int ret = -EINVAL;
+ switch (ch->src) {
+ case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL:
+ spi_freq = adc->dfsdm->spi_master_freq;
+ break;
+ case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_FALLING:
+ case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_RISING:
+ spi_freq = adc->dfsdm->spi_master_freq / 2;
+ break;
+ default:
+ spi_freq = adc->spi_freq;
+ }
+
switch (mask) {
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
ret = iio_device_claim_direct_mode(indio_dev);
if (ret)
return ret;
+
ret = stm32_dfsdm_compute_all_osrs(indio_dev, val);
- if (!ret)
+ if (!ret) {
+ dev_dbg(&indio_dev->dev,
+ "Sampling rate changed from (%u) to (%u)\n",
+ adc->sample_freq, spi_freq / val);
adc->oversamp = val;
+ adc->sample_freq = spi_freq / val;
+ }
iio_device_release_direct_mode(indio_dev);
return ret;
@@ -1238,18 +1258,6 @@ static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev,
if (ret)
return ret;
- switch (ch->src) {
- case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL:
- spi_freq = adc->dfsdm->spi_master_freq;
- break;
- case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_FALLING:
- case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_RISING:
- spi_freq = adc->dfsdm->spi_master_freq / 2;
- break;
- default:
- spi_freq = adc->spi_freq;
- }
-
ret = dfsdm_adc_set_samp_freq(indio_dev, val, spi_freq);
iio_device_release_direct_mode(indio_dev);
return ret;
@@ -1383,9 +1391,13 @@ static int stm32_dfsdm_dma_request(struct iio_dev *indio_dev)
{
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
- adc->dma_chan = dma_request_slave_channel(&indio_dev->dev, "rx");
- if (!adc->dma_chan)
- return -EINVAL;
+ adc->dma_chan = dma_request_chan(&indio_dev->dev, "rx");
+ if (IS_ERR(adc->dma_chan)) {
+ int ret = PTR_ERR(adc->dma_chan);
+
+ adc->dma_chan = NULL;
+ return ret;
+ }
adc->rx_buf = dma_alloc_coherent(adc->dma_chan->device->dev,
DFSDM_DMA_BUFFER_SIZE,
@@ -1509,7 +1521,16 @@ static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev)
init_completion(&adc->completion);
/* Optionally request DMA */
- if (stm32_dfsdm_dma_request(indio_dev)) {
+ ret = stm32_dfsdm_dma_request(indio_dev);
+ if (ret) {
+ if (ret != -ENODEV) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&indio_dev->dev,
+ "DMA channel request failed with %d\n",
+ ret);
+ return ret;
+ }
+
dev_dbg(&indio_dev->dev, "No DMA support\n");
return 0;
}
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index a550b132cfb7..5ea4f45d6bad 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -12,17 +12,15 @@
*/
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/i2c.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/pm_runtime.h>
#include <linux/mutex.h>
#include <linux/delay.h>
-#include <linux/platform_data/ads1015.h>
-
#include <linux/iio/iio.h>
#include <linux/iio/types.h>
#include <linux/iio/sysfs.h>
@@ -33,6 +31,8 @@
#define ADS1015_DRV_NAME "ads1015"
+#define ADS1015_CHANNELS 8
+
#define ADS1015_CONV_REG 0x00
#define ADS1015_CFG_REG 0x01
#define ADS1015_LO_THRESH_REG 0x02
@@ -77,6 +77,7 @@
#define ADS1015_DEFAULT_CHAN 0
enum chip_ids {
+ ADSXXXX = 0,
ADS1015,
ADS1115,
};
@@ -219,6 +220,12 @@ static const struct iio_event_spec ads1015_events[] = {
.datasheet_name = "AIN"#_chan"-AIN"#_chan2, \
}
+struct ads1015_channel_data {
+ bool enabled;
+ unsigned int pga;
+ unsigned int data_rate;
+};
+
struct ads1015_thresh_data {
unsigned int comp_queue;
int high_thresh;
@@ -837,65 +844,58 @@ static const struct iio_info ads1115_info = {
.attrs = &ads1115_attribute_group,
};
-#ifdef CONFIG_OF
-static int ads1015_get_channels_config_of(struct i2c_client *client)
+static int ads1015_client_get_channels_config(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct ads1015_data *data = iio_priv(indio_dev);
- struct device_node *node;
+ struct device *dev = &client->dev;
+ struct fwnode_handle *node;
+ int i = -1;
- if (!client->dev.of_node ||
- !of_get_next_child(client->dev.of_node, NULL))
- return -EINVAL;
-
- for_each_child_of_node(client->dev.of_node, node) {
+ device_for_each_child_node(dev, node) {
u32 pval;
unsigned int channel;
unsigned int pga = ADS1015_DEFAULT_PGA;
unsigned int data_rate = ADS1015_DEFAULT_DATA_RATE;
- if (of_property_read_u32(node, "reg", &pval)) {
- dev_err(&client->dev, "invalid reg on %pOF\n",
- node);
+ if (fwnode_property_read_u32(node, "reg", &pval)) {
+ dev_err(dev, "invalid reg on %pfw\n", node);
continue;
}
channel = pval;
if (channel >= ADS1015_CHANNELS) {
- dev_err(&client->dev,
- "invalid channel index %d on %pOF\n",
+ dev_err(dev, "invalid channel index %d on %pfw\n",
channel, node);
continue;
}
- if (!of_property_read_u32(node, "ti,gain", &pval)) {
+ if (!fwnode_property_read_u32(node, "ti,gain", &pval)) {
pga = pval;
if (pga > 6) {
- dev_err(&client->dev, "invalid gain on %pOF\n",
- node);
- of_node_put(node);
+ dev_err(dev, "invalid gain on %pfw\n", node);
+ fwnode_handle_put(node);
return -EINVAL;
}
}
- if (!of_property_read_u32(node, "ti,datarate", &pval)) {
+ if (!fwnode_property_read_u32(node, "ti,datarate", &pval)) {
data_rate = pval;
if (data_rate > 7) {
- dev_err(&client->dev,
- "invalid data_rate on %pOF\n",
- node);
- of_node_put(node);
+ dev_err(dev, "invalid data_rate on %pfw\n", node);
+ fwnode_handle_put(node);
return -EINVAL;
}
}
data->channel_data[channel].pga = pga;
data->channel_data[channel].data_rate = data_rate;
+
+ i++;
}
- return 0;
+ return i < 0 ? -EINVAL : 0;
}
-#endif
static void ads1015_get_channels_config(struct i2c_client *client)
{
@@ -903,19 +903,10 @@ static void ads1015_get_channels_config(struct i2c_client *client)
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct ads1015_data *data = iio_priv(indio_dev);
- struct ads1015_platform_data *pdata = dev_get_platdata(&client->dev);
- /* prefer platform data */
- if (pdata) {
- memcpy(data->channel_data, pdata->channel_data,
- sizeof(data->channel_data));
+ if (!ads1015_client_get_channels_config(client))
return;
- }
-#ifdef CONFIG_OF
- if (!ads1015_get_channels_config_of(client))
- return;
-#endif
/* fallback on default configuration */
for (k = 0; k < ADS1015_CHANNELS; ++k) {
data->channel_data[k].pga = ADS1015_DEFAULT_PGA;
@@ -953,9 +944,8 @@ static int ads1015_probe(struct i2c_client *client,
indio_dev->name = ADS1015_DRV_NAME;
indio_dev->modes = INDIO_DIRECT_MODE;
- if (client->dev.of_node)
- chip = (enum chip_ids)of_device_get_match_data(&client->dev);
- else
+ chip = (enum chip_ids)device_get_match_data(&client->dev);
+ if (chip == ADSXXXX)
chip = id->driver_data;
switch (chip) {
case ADS1015:
@@ -970,6 +960,9 @@ static int ads1015_probe(struct i2c_client *client,
indio_dev->info = &ads1115_info;
data->data_rate = (unsigned int *) &ads1115_data_rate;
break;
+ default:
+ dev_err(&client->dev, "Unknown chip %d\n", chip);
+ return -EINVAL;
}
data->event_channel = ADS1015_CHANNELS;
diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c
index 2e66e4d586ff..f9edc1207f75 100644
--- a/drivers/iio/adc/ti-ads7950.c
+++ b/drivers/iio/adc/ti-ads7950.c
@@ -602,7 +602,7 @@ static int ti_ads7950_probe(struct spi_device *spi)
st->reg = devm_regulator_get(&spi->dev, "vref");
if (IS_ERR(st->reg)) {
- dev_err(&spi->dev, "Failed get get regulator \"vref\"\n");
+ dev_err(&spi->dev, "Failed to get regulator \"vref\"\n");
ret = PTR_ERR(st->reg);
goto error_destroy_mutex;
}
diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
index 90cf6e586b10..a74bd9c0587c 100644
--- a/drivers/iio/buffer/industrialio-buffer-dma.c
+++ b/drivers/iio/buffer/industrialio-buffer-dma.c
@@ -476,7 +476,7 @@ static struct iio_dma_buffer_block *iio_dma_buffer_dequeue(
* @n: Number of bytes to read
* @user_buffer: Userspace buffer to copy the data to
*
- * Should be used as the read_first_n callback for iio_buffer_access_ops
+ * Should be used as the read callback for iio_buffer_access_ops
* struct for DMA buffers.
*/
int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index bea4a75e92f1..b129693af0fd 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -10,8 +10,10 @@
#include <linux/dma-mapping.h>
#include <linux/spinlock.h>
#include <linux/err.h>
+#include <linux/module.h>
#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
#include <linux/iio/buffer_impl.h>
#include <linux/iio/buffer-dma.h>
@@ -107,7 +109,7 @@ static void iio_dmaengine_buffer_release(struct iio_buffer *buf)
}
static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = {
- .read_first_n = iio_dma_buffer_read,
+ .read = iio_dma_buffer_read,
.set_bytes_per_datum = iio_dma_buffer_set_bytes_per_datum,
.set_length = iio_dma_buffer_set_length,
.request_update = iio_dma_buffer_request_update,
@@ -125,6 +127,24 @@ static const struct iio_dma_buffer_ops iio_dmaengine_default_ops = {
.abort = iio_dmaengine_buffer_abort,
};
+static ssize_t iio_dmaengine_buffer_get_length_align(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct dmaengine_buffer *dmaengine_buffer =
+ iio_buffer_to_dmaengine_buffer(indio_dev->buffer);
+
+ return sprintf(buf, "%u\n", dmaengine_buffer->align);
+}
+
+static IIO_DEVICE_ATTR(length_align_bytes, 0444,
+ iio_dmaengine_buffer_get_length_align, NULL, 0);
+
+static const struct attribute *iio_dmaengine_buffer_attrs[] = {
+ &iio_dev_attr_length_align_bytes.dev_attr.attr,
+ NULL,
+};
+
/**
* iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine
* @dev: Parent device for the buffer
@@ -150,7 +170,7 @@ struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
if (!dmaengine_buffer)
return ERR_PTR(-ENOMEM);
- chan = dma_request_slave_channel_reason(dev, channel);
+ chan = dma_request_chan(dev, channel);
if (IS_ERR(chan)) {
ret = PTR_ERR(chan);
goto err_free;
@@ -178,6 +198,8 @@ struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
iio_dma_buffer_init(&dmaengine_buffer->queue, chan->device->dev,
&iio_dmaengine_default_ops);
+ iio_buffer_set_attrs(&dmaengine_buffer->queue.buffer,
+ iio_dmaengine_buffer_attrs);
dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops;
@@ -206,3 +228,7 @@ void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
iio_buffer_put(buffer);
}
EXPORT_SYMBOL_GPL(iio_dmaengine_buffer_free);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("DMA buffer for the IIO framework");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/buffer/kfifo_buf.c b/drivers/iio/buffer/kfifo_buf.c
index e78fc0834e6b..3150f8ab984b 100644
--- a/drivers/iio/buffer/kfifo_buf.c
+++ b/drivers/iio/buffer/kfifo_buf.c
@@ -98,8 +98,7 @@ static int iio_store_to_kfifo(struct iio_buffer *r,
return 0;
}
-static int iio_read_first_n_kfifo(struct iio_buffer *r,
- size_t n, char __user *buf)
+static int iio_read_kfifo(struct iio_buffer *r, size_t n, char __user *buf)
{
int ret, copied;
struct iio_kfifo *kf = iio_to_kfifo(r);
@@ -141,7 +140,7 @@ static void iio_kfifo_buffer_release(struct iio_buffer *buffer)
static const struct iio_buffer_access_funcs kfifo_access_funcs = {
.store_to = &iio_store_to_kfifo,
- .read_first_n = &iio_read_first_n_kfifo,
+ .read = &iio_read_kfifo,
.data_available = iio_kfifo_buf_data_available,
.request_update = &iio_request_update_kfifo,
.set_bytes_per_datum = &iio_set_bytes_per_datum_kfifo,
diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig
index fa4586037bb8..0b91de4df8f4 100644
--- a/drivers/iio/chemical/Kconfig
+++ b/drivers/iio/chemical/Kconfig
@@ -65,6 +65,7 @@ config IAQCORE
config PMS7003
tristate "Plantower PMS7003 particulate matter sensor"
depends on SERIAL_DEV_BUS
+ select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
Say Y here to build support for the Plantower PMS7003 particulate
diff --git a/drivers/iio/chemical/Makefile b/drivers/iio/chemical/Makefile
index f97270bc4034..33d3a595dda9 100644
--- a/drivers/iio/chemical/Makefile
+++ b/drivers/iio/chemical/Makefile
@@ -4,7 +4,7 @@
#
# When adding new entries keep the list in alphabetical order
-obj-$(CONFIG_ATLAS_PH_SENSOR) += atlas-ph-sensor.o
+obj-$(CONFIG_ATLAS_PH_SENSOR) += atlas-sensor.o
obj-$(CONFIG_BME680) += bme680_core.o
obj-$(CONFIG_BME680_I2C) += bme680_i2c.o
obj-$(CONFIG_BME680_SPI) += bme680_spi.o
diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-sensor.c
index 6c175eb1c7a7..2f0a6fed2589 100644
--- a/drivers/iio/chemical/atlas-ph-sensor.c
+++ b/drivers/iio/chemical/atlas-sensor.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * atlas-ph-sensor.c - Support for Atlas Scientific OEM pH-SM sensor
+ * atlas-sensor.c - Support for Atlas Scientific OEM SM sensors
*
- * Copyright (C) 2015-2018 Matt Ranostay
+ * Copyright (C) 2015-2019 Konsulko Group
* Author: Matt Ranostay <matt.ranostay@konsulko.com>
*/
@@ -14,7 +14,6 @@
#include <linux/err.h>
#include <linux/irq.h>
#include <linux/irq_work.h>
-#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/of_device.h>
#include <linux/regmap.h>
@@ -25,8 +24,8 @@
#include <linux/iio/triggered_buffer.h>
#include <linux/pm_runtime.h>
-#define ATLAS_REGMAP_NAME "atlas_ph_regmap"
-#define ATLAS_DRV_NAME "atlas_ph"
+#define ATLAS_REGMAP_NAME "atlas_regmap"
+#define ATLAS_DRV_NAME "atlas"
#define ATLAS_REG_DEV_TYPE 0x00
#define ATLAS_REG_DEV_VERSION 0x01
@@ -87,6 +86,16 @@ static const struct regmap_config atlas_regmap_config = {
.val_bits = 8,
};
+static int atlas_buffer_num_channels(const struct iio_chan_spec *spec)
+{
+ int idx = 0;
+
+ for (; spec->type != IIO_TIMESTAMP; spec++)
+ idx++;
+
+ return idx;
+};
+
static const struct iio_chan_spec atlas_ph_channels[] = {
{
.type = IIO_PH,
@@ -355,11 +364,12 @@ static irqreturn_t atlas_trigger_handler(int irq, void *private)
struct iio_poll_func *pf = private;
struct iio_dev *indio_dev = pf->indio_dev;
struct atlas_data *data = iio_priv(indio_dev);
+ int channels = atlas_buffer_num_channels(data->chip->channels);
int ret;
ret = regmap_bulk_read(data->regmap, data->chip->data_reg,
(u8 *) &data->buffer,
- sizeof(__be32) * (data->chip->num_channels - 2));
+ sizeof(__be32) * channels);
if (!ret)
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
@@ -681,5 +691,5 @@ static struct i2c_driver atlas_driver = {
module_i2c_driver(atlas_driver);
MODULE_AUTHOR("Matt Ranostay <matt.ranostay@konsulko.com>");
-MODULE_DESCRIPTION("Atlas Scientific pH-SM sensor");
+MODULE_DESCRIPTION("Atlas Scientific SM sensors");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
index 7dce04473467..576e45faafaf 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
@@ -16,7 +16,6 @@
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/kernel.h>
-#include <linux/mfd/cros_ec.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
index 81a7f692de2f..d3a3626c7cd8 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
@@ -13,7 +13,6 @@
#include <linux/iio/kfifo_buf.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/kernel.h>
-#include <linux/mfd/cros_ec.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/platform_data/cros_ec_commands.h>
diff --git a/drivers/iio/common/ssp_sensors/ssp.h b/drivers/iio/common/ssp_sensors/ssp.h
index 0a381bb1ae6f..abb832795619 100644
--- a/drivers/iio/common/ssp_sensors/ssp.h
+++ b/drivers/iio/common/ssp_sensors/ssp.h
@@ -7,7 +7,7 @@
#define __SSP_SENSORHUB_H__
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/iio/common/ssp_sensors.h>
#include <linux/iio/iio.h>
#include <linux/spi/spi.h>
@@ -168,9 +168,9 @@ struct ssp_sensorhub_info {
* @fw_dl_state: firmware download state
* @comm_lock: lock protecting the handshake
* @pending_lock: lock protecting pending list and completion
- * @mcu_reset_gpio: mcu reset line
- * @ap_mcu_gpio: ap to mcu gpio line
- * @mcu_ap_gpio: mcu to ap gpio line
+ * @mcu_reset_gpiod: mcu reset line
+ * @ap_mcu_gpiod: ap to mcu gpio line
+ * @mcu_ap_gpiod: mcu to ap gpio line
* @pending_list: pending list for messages queued to be sent/read
* @sensor_devs: registered IIO devices table
* @enable_refcount: enable reference count for wdt (watchdog timer)
@@ -212,9 +212,9 @@ struct ssp_data {
struct mutex comm_lock;
struct mutex pending_lock;
- int mcu_reset_gpio;
- int ap_mcu_gpio;
- int mcu_ap_gpio;
+ struct gpio_desc *mcu_reset_gpiod;
+ struct gpio_desc *ap_mcu_gpiod;
+ struct gpio_desc *mcu_ap_gpiod;
struct list_head pending_list;
diff --git a/drivers/iio/common/ssp_sensors/ssp_dev.c b/drivers/iio/common/ssp_sensors/ssp_dev.c
index 9c70553994c6..a94dbcf491ce 100644
--- a/drivers/iio/common/ssp_sensors/ssp_dev.c
+++ b/drivers/iio/common/ssp_sensors/ssp_dev.c
@@ -9,7 +9,6 @@
#include <linux/mfd/core.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include "ssp.h"
@@ -61,9 +60,9 @@ static const struct mfd_cell sensorhub_sensor_devs[] = {
static void ssp_toggle_mcu_reset_gpio(struct ssp_data *data)
{
- gpio_set_value(data->mcu_reset_gpio, 0);
+ gpiod_set_value(data->mcu_reset_gpiod, 0);
usleep_range(1000, 1200);
- gpio_set_value(data->mcu_reset_gpio, 1);
+ gpiod_set_value(data->mcu_reset_gpiod, 1);
msleep(50);
}
@@ -441,7 +440,6 @@ MODULE_DEVICE_TABLE(of, ssp_of_match);
static struct ssp_data *ssp_parse_dt(struct device *dev)
{
- int ret;
struct ssp_data *data;
struct device_node *node = dev->of_node;
const struct of_device_id *match;
@@ -450,26 +448,17 @@ static struct ssp_data *ssp_parse_dt(struct device *dev)
if (!data)
return NULL;
- data->mcu_ap_gpio = of_get_named_gpio(node, "mcu-ap-gpios", 0);
- if (data->mcu_ap_gpio < 0)
- return NULL;
-
- data->ap_mcu_gpio = of_get_named_gpio(node, "ap-mcu-gpios", 0);
- if (data->ap_mcu_gpio < 0)
- return NULL;
-
- data->mcu_reset_gpio = of_get_named_gpio(node, "mcu-reset-gpios", 0);
- if (data->mcu_reset_gpio < 0)
+ data->mcu_ap_gpiod = devm_gpiod_get(dev, "mcu-ap", GPIOD_IN);
+ if (IS_ERR(data->mcu_ap_gpiod))
return NULL;
- ret = devm_gpio_request_one(dev, data->ap_mcu_gpio, GPIOF_OUT_INIT_HIGH,
- "ap-mcu-gpios");
- if (ret)
+ data->ap_mcu_gpiod = devm_gpiod_get(dev, "ap-mcu", GPIOD_OUT_HIGH);
+ if (IS_ERR(data->ap_mcu_gpiod))
return NULL;
- ret = devm_gpio_request_one(dev, data->mcu_reset_gpio,
- GPIOF_OUT_INIT_HIGH, "mcu-reset-gpios");
- if (ret)
+ data->mcu_reset_gpiod = devm_gpiod_get(dev, "mcu-reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(data->mcu_reset_gpiod))
return NULL;
match = of_match_node(ssp_of_match, node);
diff --git a/drivers/iio/common/ssp_sensors/ssp_spi.c b/drivers/iio/common/ssp_sensors/ssp_spi.c
index 7db3d5886e3e..4864c38b8d1c 100644
--- a/drivers/iio/common/ssp_sensors/ssp_spi.c
+++ b/drivers/iio/common/ssp_sensors/ssp_spi.c
@@ -155,9 +155,9 @@ static int ssp_check_lines(struct ssp_data *data, bool state)
{
int delay_cnt = 0;
- gpio_set_value_cansleep(data->ap_mcu_gpio, state);
+ gpiod_set_value_cansleep(data->ap_mcu_gpiod, state);
- while (gpio_get_value_cansleep(data->mcu_ap_gpio) != state) {
+ while (gpiod_get_value_cansleep(data->mcu_ap_gpiod) != state) {
usleep_range(3000, 3500);
if (data->shut_down || delay_cnt++ > 500) {
@@ -165,7 +165,7 @@ static int ssp_check_lines(struct ssp_data *data, bool state)
__func__, state);
if (!state)
- gpio_set_value_cansleep(data->ap_mcu_gpio, 1);
+ gpiod_set_value_cansleep(data->ap_mcu_gpiod, 1);
return -ETIMEDOUT;
}
@@ -197,7 +197,7 @@ static int ssp_do_transfer(struct ssp_data *data, struct ssp_msg *msg,
status = spi_write(data->spi, msg->buffer, SSP_HEADER_SIZE);
if (status < 0) {
- gpio_set_value_cansleep(data->ap_mcu_gpio, 1);
+ gpiod_set_value_cansleep(data->ap_mcu_gpiod, 1);
dev_err(SSP_DEV, "%s spi_write fail\n", __func__);
goto _error_locked;
}
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 4a3064fb6cd9..e051edbc43c1 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -12,9 +12,8 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/iio/iio.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include <asm/unaligned.h>
#include <linux/iio/common/st_sensors.h>
@@ -319,63 +318,49 @@ static int st_sensors_set_drdy_int_pin(struct iio_dev *indio_dev,
return 0;
}
-#ifdef CONFIG_OF
-static struct st_sensors_platform_data *st_sensors_of_probe(struct device *dev,
+static struct st_sensors_platform_data *st_sensors_dev_probe(struct device *dev,
struct st_sensors_platform_data *defdata)
{
struct st_sensors_platform_data *pdata;
- struct device_node *np = dev->of_node;
u32 val;
- if (!np)
+ if (!dev_fwnode(dev))
return NULL;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!of_property_read_u32(np, "st,drdy-int-pin", &val) && (val <= 2))
+ if (!device_property_read_u32(dev, "st,drdy-int-pin", &val) && (val <= 2))
pdata->drdy_int_pin = (u8) val;
else
pdata->drdy_int_pin = defdata ? defdata->drdy_int_pin : 0;
- pdata->open_drain = of_property_read_bool(np, "drive-open-drain");
+ pdata->open_drain = device_property_read_bool(dev, "drive-open-drain");
return pdata;
}
/**
- * st_sensors_of_name_probe() - device tree probe for ST sensor name
+ * st_sensors_dev_name_probe() - device probe for ST sensor name
* @dev: driver model representation of the device.
- * @match: the OF match table for the device, containing compatible strings
- * but also a .data field with the corresponding internal kernel name
- * used by this sensor.
* @name: device name buffer reference.
* @len: device name buffer length.
*
- * In effect this function matches a compatible string to an internal kernel
+ * In effect this function matches an ID to an internal kernel
* name for a certain sensor device, so that the rest of the autodetection can
* rely on that name from this point on. I2C/SPI devices will be renamed
* to match the internal kernel convention.
*/
-void st_sensors_of_name_probe(struct device *dev,
- const struct of_device_id *match,
- char *name, int len)
+void st_sensors_dev_name_probe(struct device *dev, char *name, int len)
{
- const struct of_device_id *of_id;
+ const void *match;
- of_id = of_match_device(match, dev);
- if (!of_id || !of_id->data)
+ match = device_get_match_data(dev);
+ if (!match)
return;
- /* The name from the OF match takes precedence if present */
- strlcpy(name, of_id->data, len);
+ /* The name from the match takes precedence if present */
+ strlcpy(name, match, len);
}
-EXPORT_SYMBOL(st_sensors_of_name_probe);
-#else
-static struct st_sensors_platform_data *st_sensors_of_probe(struct device *dev,
- struct st_sensors_platform_data *defdata)
-{
- return NULL;
-}
-#endif
+EXPORT_SYMBOL(st_sensors_dev_name_probe);
int st_sensors_init_sensor(struct iio_dev *indio_dev,
struct st_sensors_platform_data *pdata)
@@ -385,7 +370,7 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
int err = 0;
/* If OF/DT pdata exists, it will take precedence of anything else */
- of_pdata = st_sensors_of_probe(indio_dev->dev.parent, pdata);
+ of_pdata = st_sensors_dev_probe(indio_dev->dev.parent, pdata);
if (of_pdata)
pdata = of_pdata;
diff --git a/drivers/iio/common/st_sensors/st_sensors_i2c.c b/drivers/iio/common/st_sensors/st_sensors_i2c.c
index aa89d54a7c59..286830fb5d35 100644
--- a/drivers/iio/common/st_sensors/st_sensors_i2c.c
+++ b/drivers/iio/common/st_sensors/st_sensors_i2c.c
@@ -11,8 +11,6 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/iio/iio.h>
-#include <linux/of_device.h>
-#include <linux/acpi.h>
#include <linux/regmap.h>
#include <linux/iio/common/st_sensors_i2c.h>
@@ -68,25 +66,6 @@ int st_sensors_i2c_configure(struct iio_dev *indio_dev,
}
EXPORT_SYMBOL(st_sensors_i2c_configure);
-#ifdef CONFIG_ACPI
-int st_sensors_match_acpi_device(struct device *dev)
-{
- const struct acpi_device_id *acpi_id;
- kernel_ulong_t driver_data = 0;
-
- if (ACPI_HANDLE(dev)) {
- acpi_id = acpi_match_device(dev->driver->acpi_match_table, dev);
- if (!acpi_id) {
- dev_err(dev, "No driver data\n");
- return -EINVAL;
- }
- driver_data = acpi_id->driver_data;
- }
- return driver_data;
-}
-EXPORT_SYMBOL(st_sensors_match_acpi_device);
-#endif
-
MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
MODULE_DESCRIPTION("STMicroelectronics ST-sensors i2c driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/common/st_sensors/st_sensors_spi.c b/drivers/iio/common/st_sensors/st_sensors_spi.c
index 2262f81b07c2..1275fb0eda31 100644
--- a/drivers/iio/common/st_sensors/st_sensors_spi.c
+++ b/drivers/iio/common/st_sensors/st_sensors_spi.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/iio/iio.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/iio/common/st_sensors_spi.h>
@@ -37,14 +38,15 @@ static const struct regmap_config st_sensors_spi_regmap_multiread_bit_config = {
*/
static bool st_sensors_is_spi_3_wire(struct spi_device *spi)
{
- struct device_node *np = spi->dev.of_node;
struct st_sensors_platform_data *pdata;
+ struct device *dev = &spi->dev;
- pdata = (struct st_sensors_platform_data *)spi->dev.platform_data;
- if ((np && of_property_read_bool(np, "spi-3wire")) ||
- (pdata && pdata->spi_3wire)) {
+ if (device_property_read_bool(dev, "spi-3wire"))
+ return true;
+
+ pdata = (struct st_sensors_platform_data *)dev->platform_data;
+ if (pdata && pdata->spi_3wire)
return true;
- }
return false;
}
diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
index 4a2efa00f7f2..e817537cdfb5 100644
--- a/drivers/iio/common/st_sensors/st_sensors_trigger.c
+++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
@@ -19,6 +19,9 @@
/**
* st_sensors_new_samples_available() - check if more samples came in
+ * @indio_dev: IIO device reference.
+ * @sdata: Sensor data.
+ *
* returns:
* 0 - no new samples available
* 1 - new samples available
diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
index 2d897e64c6a9..e2110113e884 100644
--- a/drivers/iio/dac/ad5592r-base.c
+++ b/drivers/iio/dac/ad5592r-base.c
@@ -15,7 +15,6 @@
#include <linux/regulator/consumer.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
-#include <linux/gpio.h>
#include <linux/property.h>
#include <dt-bindings/iio/adi,ad5592r.h>
diff --git a/drivers/iio/dac/ad7303.c b/drivers/iio/dac/ad7303.c
index 14bbac6bee98..15af8a1cce3e 100644
--- a/drivers/iio/dac/ad7303.c
+++ b/drivers/iio/dac/ad7303.c
@@ -12,7 +12,6 @@
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/regulator/consumer.h>
-#include <linux/of.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -202,7 +201,6 @@ static int ad7303_probe(struct spi_device *spi)
const struct spi_device_id *id = spi_get_device_id(spi);
struct iio_dev *indio_dev;
struct ad7303_state *st;
- bool ext_ref;
int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
@@ -224,24 +222,15 @@ static int ad7303_probe(struct spi_device *spi)
if (ret)
return ret;
- if (spi->dev.of_node) {
- ext_ref = of_property_read_bool(spi->dev.of_node,
- "REF-supply");
- } else {
- struct ad7303_platform_data *pdata = spi->dev.platform_data;
- if (pdata && pdata->use_external_ref)
- ext_ref = true;
- else
- ext_ref = false;
- }
-
- if (ext_ref) {
- st->vref_reg = devm_regulator_get(&spi->dev, "REF");
- if (IS_ERR(st->vref_reg)) {
- ret = PTR_ERR(st->vref_reg);
+ st->vref_reg = devm_regulator_get_optional(&spi->dev, "REF");
+ if (IS_ERR(st->vref_reg)) {
+ ret = PTR_ERR(st->vref_reg);
+ if (ret != -ENODEV)
goto err_disable_vdd_reg;
- }
+ st->vref_reg = NULL;
+ }
+ if (st->vref_reg) {
ret = regulator_enable(st->vref_reg);
if (ret)
goto err_disable_vdd_reg;
diff --git a/drivers/iio/dac/stm32-dac-core.c b/drivers/iio/dac/stm32-dac-core.c
index 9e6b4cd0a5cc..7e5809ba0dee 100644
--- a/drivers/iio/dac/stm32-dac-core.c
+++ b/drivers/iio/dac/stm32-dac-core.c
@@ -20,13 +20,11 @@
/**
* struct stm32_dac_priv - stm32 DAC core private data
* @pclk: peripheral clock common for all DACs
- * @rst: peripheral reset control
* @vref: regulator reference
* @common: Common data for all DAC instances
*/
struct stm32_dac_priv {
struct clk *pclk;
- struct reset_control *rst;
struct regulator *vref;
struct stm32_dac_common common;
};
@@ -94,6 +92,7 @@ static int stm32_dac_probe(struct platform_device *pdev)
struct regmap *regmap;
struct resource *res;
void __iomem *mmio;
+ struct reset_control *rst;
int ret;
if (!dev->of_node)
@@ -148,11 +147,19 @@ static int stm32_dac_probe(struct platform_device *pdev)
priv->common.vref_mv = ret / 1000;
dev_dbg(dev, "vref+=%dmV\n", priv->common.vref_mv);
- priv->rst = devm_reset_control_get_exclusive(dev, NULL);
- if (!IS_ERR(priv->rst)) {
- reset_control_assert(priv->rst);
+ rst = devm_reset_control_get_optional_exclusive(dev, NULL);
+ if (rst) {
+ if (IS_ERR(rst)) {
+ ret = PTR_ERR(rst);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "reset get failed, %d\n", ret);
+
+ goto err_hw_stop;
+ }
+
+ reset_control_assert(rst);
udelay(2);
- reset_control_deassert(priv->rst);
+ reset_control_deassert(rst);
}
if (cfg && cfg->has_hfsel) {
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
index ae0ca09ae062..1c2dc9b00f31 100644
--- a/drivers/iio/frequency/adf4350.c
+++ b/drivers/iio/frequency/adf4350.c
@@ -14,11 +14,10 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/gcd.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <asm/div64.h>
#include <linux/clk.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -34,6 +33,7 @@ enum {
struct adf4350_state {
struct spi_device *spi;
struct regulator *reg;
+ struct gpio_desc *lock_detect_gpiod;
struct adf4350_platform_data *pdata;
struct clk *clk;
unsigned long clkin;
@@ -61,7 +61,6 @@ static struct adf4350_platform_data default_pdata = {
.r3_user_settings = ADF4350_REG3_12BIT_CLKDIV_MODE(0),
.r4_user_settings = ADF4350_REG4_OUTPUT_PWR(3) |
ADF4350_REG4_MUTE_TILL_LOCK_EN,
- .gpio_lock_detect = -1,
};
static int adf4350_sync_config(struct adf4350_state *st)
@@ -317,8 +316,8 @@ static ssize_t adf4350_read(struct iio_dev *indio_dev,
(u64)st->fpfd;
do_div(val, st->r1_mod * (1 << st->r4_rf_div_sel));
/* PLL unlocked? return error */
- if (gpio_is_valid(st->pdata->gpio_lock_detect))
- if (!gpio_get_value(st->pdata->gpio_lock_detect)) {
+ if (st->lock_detect_gpiod)
+ if (!gpiod_get_value(st->lock_detect_gpiod)) {
dev_dbg(&st->spi->dev, "PLL un-locked\n");
ret = -EBUSY;
}
@@ -381,7 +380,6 @@ static struct adf4350_platform_data *adf4350_parse_dt(struct device *dev)
struct device_node *np = dev->of_node;
struct adf4350_platform_data *pdata;
unsigned int tmp;
- int ret;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
@@ -401,12 +399,6 @@ static struct adf4350_platform_data *adf4350_parse_dt(struct device *dev)
of_property_read_u32(np, "adi,reference-div-factor", &tmp);
pdata->ref_div_factor = tmp;
- ret = of_get_gpio(np, 0);
- if (ret < 0)
- pdata->gpio_lock_detect = -1;
- else
- pdata->gpio_lock_detect = ret;
-
pdata->ref_doubler_en = of_property_read_bool(np,
"adi,reference-doubler-enable");
pdata->ref_div2_en = of_property_read_bool(np,
@@ -561,16 +553,10 @@ static int adf4350_probe(struct spi_device *spi)
memset(st->regs_hw, 0xFF, sizeof(st->regs_hw));
- if (gpio_is_valid(pdata->gpio_lock_detect)) {
- ret = devm_gpio_request(&spi->dev, pdata->gpio_lock_detect,
- indio_dev->name);
- if (ret) {
- dev_err(&spi->dev, "fail to request lock detect GPIO-%d",
- pdata->gpio_lock_detect);
- goto error_disable_reg;
- }
- gpio_direction_input(pdata->gpio_lock_detect);
- }
+ st->lock_detect_gpiod = devm_gpiod_get_optional(&spi->dev, NULL,
+ GPIOD_IN);
+ if (IS_ERR(st->lock_detect_gpiod))
+ return PTR_ERR(st->lock_detect_gpiod);
if (pdata->power_up_frequency) {
ret = adf4350_set_freq(st, pdata->power_up_frequency);
diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
index 95e6f96d4529..7eaf77707b0b 100644
--- a/drivers/iio/gyro/Kconfig
+++ b/drivers/iio/gyro/Kconfig
@@ -75,26 +75,26 @@ config BMG160_SPI
select REGMAP_SPI
config FXAS21002C
- tristate "NXP FXAS21002C Gyro Sensor"
- select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
- select FXAS21002C_I2C if (I2C)
- select FXAS21002C_SPI if (SPI)
- depends on (I2C || SPI_MASTER)
- help
- Say yes here to build support for NXP FXAS21002C Tri-axis Gyro
- Sensor driver connected via I2C or SPI.
-
- This driver can also be built as a module. If so, the module
- will be called fxas21002c_i2c or fxas21002c_spi.
+ tristate "NXP FXAS21002C Gyro Sensor"
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select FXAS21002C_I2C if (I2C)
+ select FXAS21002C_SPI if (SPI)
+ depends on (I2C || SPI_MASTER)
+ help
+ Say yes here to build support for NXP FXAS21002C Tri-axis Gyro
+ Sensor driver connected via I2C or SPI.
+
+ This driver can also be built as a module. If so, the module
+ will be called fxas21002c_i2c or fxas21002c_spi.
config FXAS21002C_I2C
- tristate
- select REGMAP_I2C
+ tristate
+ select REGMAP_I2C
config FXAS21002C_SPI
- tristate
- select REGMAP_SPI
+ tristate
+ select REGMAP_SPI
config HID_SENSOR_GYRO_3D
depends on HID_SENSOR_HUB
diff --git a/drivers/iio/gyro/adis16136.c b/drivers/iio/gyro/adis16136.c
index d637d52d051a..d5e03a406d4a 100644
--- a/drivers/iio/gyro/adis16136.c
+++ b/drivers/iio/gyro/adis16136.c
@@ -59,6 +59,7 @@
struct adis16136_chip_info {
unsigned int precision;
unsigned int fullscale;
+ const struct adis_timeout *timeouts;
};
struct adis16136 {
@@ -185,12 +186,12 @@ static int adis16136_set_freq(struct adis16136 *adis16136, unsigned int freq)
return adis_write_reg_16(&adis16136->adis, ADIS16136_REG_SMPL_PRD, t);
}
-static int adis16136_get_freq(struct adis16136 *adis16136, unsigned int *freq)
+static int __adis16136_get_freq(struct adis16136 *adis16136, unsigned int *freq)
{
uint16_t t;
int ret;
- ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_SMPL_PRD, &t);
+ ret = __adis_read_reg_16(&adis16136->adis, ADIS16136_REG_SMPL_PRD, &t);
if (ret)
return ret;
@@ -224,10 +225,13 @@ static ssize_t adis16136_read_frequency(struct device *dev,
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct adis16136 *adis16136 = iio_priv(indio_dev);
+ struct mutex *slock = &adis16136->adis.state_lock;
unsigned int freq;
int ret;
- ret = adis16136_get_freq(adis16136, &freq);
+ mutex_lock(slock);
+ ret = __adis16136_get_freq(adis16136, &freq);
+ mutex_unlock(slock);
if (ret)
return ret;
@@ -252,42 +256,50 @@ static const unsigned adis16136_3db_divisors[] = {
static int adis16136_set_filter(struct iio_dev *indio_dev, int val)
{
struct adis16136 *adis16136 = iio_priv(indio_dev);
+ struct mutex *slock = &adis16136->adis.state_lock;
unsigned int freq;
int i, ret;
- ret = adis16136_get_freq(adis16136, &freq);
+ mutex_lock(slock);
+ ret = __adis16136_get_freq(adis16136, &freq);
if (ret)
- return ret;
+ goto out_unlock;
for (i = ARRAY_SIZE(adis16136_3db_divisors) - 1; i >= 1; i--) {
if (freq / adis16136_3db_divisors[i] >= val)
break;
}
- return adis_write_reg_16(&adis16136->adis, ADIS16136_REG_AVG_CNT, i);
+ ret = __adis_write_reg_16(&adis16136->adis, ADIS16136_REG_AVG_CNT, i);
+out_unlock:
+ mutex_unlock(slock);
+
+ return ret;
}
static int adis16136_get_filter(struct iio_dev *indio_dev, int *val)
{
struct adis16136 *adis16136 = iio_priv(indio_dev);
+ struct mutex *slock = &adis16136->adis.state_lock;
unsigned int freq;
uint16_t val16;
int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(slock);
- ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_AVG_CNT, &val16);
+ ret = __adis_read_reg_16(&adis16136->adis, ADIS16136_REG_AVG_CNT,
+ &val16);
if (ret)
goto err_unlock;
- ret = adis16136_get_freq(adis16136, &freq);
+ ret = __adis16136_get_freq(adis16136, &freq);
if (ret)
goto err_unlock;
*val = freq / adis16136_3db_divisors[val16 & 0x07];
err_unlock:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(slock);
return ret ? ret : IIO_VAL_INT;
}
@@ -460,7 +472,6 @@ static const struct adis_data adis16136_data = {
.msc_ctrl_reg = ADIS16136_REG_MSC_CTRL,
.self_test_mask = ADIS16136_MSC_CTRL_SELF_TEST,
- .startup_delay = 80,
.read_delay = 10,
.write_delay = 10,
@@ -479,30 +490,63 @@ enum adis16136_id {
ID_ADIS16137,
};
+static const struct adis_timeout adis16133_timeouts = {
+ .reset_ms = 75,
+ .sw_reset_ms = 75,
+ .self_test_ms = 50,
+};
+
+static const struct adis_timeout adis16136_timeouts = {
+ .reset_ms = 128,
+ .sw_reset_ms = 75,
+ .self_test_ms = 245,
+};
+
static const struct adis16136_chip_info adis16136_chip_info[] = {
[ID_ADIS16133] = {
.precision = IIO_DEGREE_TO_RAD(1200),
.fullscale = 24000,
+ .timeouts = &adis16133_timeouts,
},
[ID_ADIS16135] = {
.precision = IIO_DEGREE_TO_RAD(300),
.fullscale = 24000,
+ .timeouts = &adis16133_timeouts,
},
[ID_ADIS16136] = {
.precision = IIO_DEGREE_TO_RAD(450),
.fullscale = 24623,
+ .timeouts = &adis16136_timeouts,
},
[ID_ADIS16137] = {
.precision = IIO_DEGREE_TO_RAD(1000),
.fullscale = 24609,
+ .timeouts = &adis16136_timeouts,
},
};
+static struct adis_data *adis16136_adis_data_alloc(struct adis16136 *st,
+ struct device *dev)
+{
+ struct adis_data *data;
+
+ data = devm_kmalloc(dev, sizeof(struct adis_data), GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(data, &adis16136_data, sizeof(*data));
+
+ data->timeouts = st->chip_info->timeouts;
+
+ return data;
+}
+
static int adis16136_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
struct adis16136 *adis16136;
struct iio_dev *indio_dev;
+ const struct adis_data *adis16136_data;
int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adis16136));
@@ -521,7 +565,11 @@ static int adis16136_probe(struct spi_device *spi)
indio_dev->info = &adis16136_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = adis_init(&adis16136->adis, indio_dev, spi, &adis16136_data);
+ adis16136_data = adis16136_adis_data_alloc(adis16136, &spi->dev);
+ if (IS_ERR(adis16136_data))
+ return PTR_ERR(adis16136_data);
+
+ ret = adis_init(&adis16136->adis, indio_dev, spi, adis16136_data);
if (ret)
return ret;
diff --git a/drivers/iio/gyro/adis16260.c b/drivers/iio/gyro/adis16260.c
index 207a0ce13439..be09b3e5910c 100644
--- a/drivers/iio/gyro/adis16260.c
+++ b/drivers/iio/gyro/adis16260.c
@@ -293,7 +293,7 @@ static int adis16260_write_raw(struct iio_dev *indio_dev,
addr = adis16260_addresses[chan->scan_index][1];
return adis_write_reg_16(adis, addr, val);
case IIO_CHAN_INFO_SAMP_FREQ:
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&adis->state_lock);
if (spi_get_device_id(adis->spi)->driver_data)
t = 256 / val;
else
@@ -308,9 +308,9 @@ static int adis16260_write_raw(struct iio_dev *indio_dev,
adis->spi->max_speed_hz = ADIS16260_SPI_SLOW;
else
adis->spi->max_speed_hz = ADIS16260_SPI_FAST;
- ret = adis_write_reg_8(adis, ADIS16260_SMPL_PRD, t);
+ ret = __adis_write_reg_8(adis, ADIS16260_SMPL_PRD, t);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&adis->state_lock);
return ret;
}
return -EINVAL;
@@ -332,6 +332,12 @@ static const char * const adis1620_status_error_msgs[] = {
[ADIS16260_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 4.75",
};
+static const struct adis_timeout adis16260_timeouts = {
+ .reset_ms = ADIS16260_STARTUP_DELAY,
+ .sw_reset_ms = ADIS16260_STARTUP_DELAY,
+ .self_test_ms = ADIS16260_STARTUP_DELAY,
+};
+
static const struct adis_data adis16260_data = {
.write_delay = 30,
.read_delay = 30,
@@ -340,7 +346,7 @@ static const struct adis_data adis16260_data = {
.diag_stat_reg = ADIS16260_DIAG_STAT,
.self_test_mask = ADIS16260_MSC_CTRL_MEM_TEST,
- .startup_delay = ADIS16260_STARTUP_DELAY,
+ .timeouts = &adis16260_timeouts,
.status_error_msgs = adis1620_status_error_msgs,
.status_error_mask = BIT(ADIS16260_DIAG_STAT_FLASH_CHK_BIT) |
diff --git a/drivers/iio/gyro/itg3200_core.c b/drivers/iio/gyro/itg3200_core.c
index 981ae2291505..b3afa556f973 100644
--- a/drivers/iio/gyro/itg3200_core.c
+++ b/drivers/iio/gyro/itg3200_core.c
@@ -15,7 +15,6 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/module.h>
diff --git a/drivers/iio/gyro/st_gyro.h b/drivers/iio/gyro/st_gyro.h
index 592f6b34e987..fd9171cc3aba 100644
--- a/drivers/iio/gyro/st_gyro.h
+++ b/drivers/iio/gyro/st_gyro.h
@@ -28,7 +28,7 @@
* struct st_sensors_platform_data - gyro platform data
* @drdy_int_pin: DRDY on gyros is available only on INT2 pin.
*/
-static const struct st_sensors_platform_data gyro_pdata = {
+static __maybe_unused const struct st_sensors_platform_data gyro_pdata = {
.drdy_int_pin = 2,
};
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index 57be68b291fa..26c50b24bc08 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -138,7 +138,6 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
[2] = LSM330DLC_GYRO_DEV_NAME,
[3] = L3G4IS_GYRO_DEV_NAME,
[4] = LSM330_GYRO_DEV_NAME,
- [5] = LSM9DS0_GYRO_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
.odr = {
@@ -209,6 +208,80 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
.bootime = 2,
},
{
+ .wai = 0xd4,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
+ .sensors_supported = {
+ [0] = LSM9DS0_GYRO_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
+ .odr = {
+ .addr = 0x20,
+ .mask = GENMASK(7, 6),
+ .odr_avl = {
+ { .hz = 95, .value = 0x00, },
+ { .hz = 190, .value = 0x01, },
+ { .hz = 380, .value = 0x02, },
+ { .hz = 760, .value = 0x03, },
+ },
+ },
+ .pw = {
+ .addr = 0x20,
+ .mask = BIT(3),
+ .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .enable_axis = {
+ .addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
+ .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
+ },
+ .fs = {
+ .addr = 0x23,
+ .mask = GENMASK(5, 4),
+ .fs_avl = {
+ [0] = {
+ .num = ST_GYRO_FS_AVL_245DPS,
+ .value = 0x00,
+ .gain = IIO_DEGREE_TO_RAD(8750),
+ },
+ [1] = {
+ .num = ST_GYRO_FS_AVL_500DPS,
+ .value = 0x01,
+ .gain = IIO_DEGREE_TO_RAD(17500),
+ },
+ [2] = {
+ .num = ST_GYRO_FS_AVL_2000DPS,
+ .value = 0x02,
+ .gain = IIO_DEGREE_TO_RAD(70000),
+ },
+ },
+ },
+ .bdu = {
+ .addr = 0x23,
+ .mask = BIT(7),
+ },
+ .drdy_irq = {
+ .int2 = {
+ .addr = 0x22,
+ .mask = BIT(3),
+ },
+ /*
+ * The sensor has IHL (active low) and open
+ * drain settings, but only for INT1 and not
+ * for the DRDY line on INT2.
+ */
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = GENMASK(2, 0),
+ },
+ },
+ .sim = {
+ .addr = 0x23,
+ .value = BIT(0),
+ },
+ .multi_read_bit = true,
+ .bootime = 2,
+ },
+ {
.wai = 0xd7,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
diff --git a/drivers/iio/gyro/st_gyro_i2c.c b/drivers/iio/gyro/st_gyro_i2c.c
index 05a1a0874bd5..8190966e6ff0 100644
--- a/drivers/iio/gyro/st_gyro_i2c.c
+++ b/drivers/iio/gyro/st_gyro_i2c.c
@@ -17,7 +17,6 @@
#include <linux/iio/common/st_sensors_i2c.h>
#include "st_gyro.h"
-#ifdef CONFIG_OF
static const struct of_device_id st_gyro_of_match[] = {
{
.compatible = "st,l3g4200d-gyro",
@@ -58,9 +57,6 @@ static const struct of_device_id st_gyro_of_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, st_gyro_of_match);
-#else
-#define st_gyro_of_match NULL
-#endif
static int st_gyro_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
@@ -70,8 +66,7 @@ static int st_gyro_i2c_probe(struct i2c_client *client,
struct iio_dev *indio_dev;
int err;
- st_sensors_of_name_probe(&client->dev, st_gyro_of_match,
- client->name, sizeof(client->name));
+ st_sensors_dev_name_probe(&client->dev, client->name, sizeof(client->name));
settings = st_gyro_get_settings(client->name);
if (!settings) {
@@ -122,7 +117,7 @@ MODULE_DEVICE_TABLE(i2c, st_gyro_id_table);
static struct i2c_driver st_gyro_driver = {
.driver = {
.name = "st-gyro-i2c",
- .of_match_table = of_match_ptr(st_gyro_of_match),
+ .of_match_table = st_gyro_of_match,
},
.probe = st_gyro_i2c_probe,
.remove = st_gyro_i2c_remove,
diff --git a/drivers/iio/gyro/st_gyro_spi.c b/drivers/iio/gyro/st_gyro_spi.c
index b5c624251231..efb862763ca3 100644
--- a/drivers/iio/gyro/st_gyro_spi.c
+++ b/drivers/iio/gyro/st_gyro_spi.c
@@ -17,7 +17,6 @@
#include <linux/iio/common/st_sensors_spi.h>
#include "st_gyro.h"
-#ifdef CONFIG_OF
/*
* For new single-chip sensors use <device_name> as compatible string.
* For old single-chip devices keep <device_name>-gyro to maintain
@@ -63,9 +62,6 @@ static const struct of_device_id st_gyro_of_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, st_gyro_of_match);
-#else
-#define st_gyro_of_match NULL
-#endif
static int st_gyro_spi_probe(struct spi_device *spi)
{
@@ -74,8 +70,7 @@ static int st_gyro_spi_probe(struct spi_device *spi)
struct iio_dev *indio_dev;
int err;
- st_sensors_of_name_probe(&spi->dev, st_gyro_of_match,
- spi->modalias, sizeof(spi->modalias));
+ st_sensors_dev_name_probe(&spi->dev, spi->modalias, sizeof(spi->modalias));
settings = st_gyro_get_settings(spi->modalias);
if (!settings) {
@@ -126,7 +121,7 @@ MODULE_DEVICE_TABLE(spi, st_gyro_id_table);
static struct spi_driver st_gyro_driver = {
.driver = {
.name = "st-gyro-spi",
- .of_match_table = of_match_ptr(st_gyro_of_match),
+ .of_match_table = st_gyro_of_match,
},
.probe = st_gyro_spi_probe,
.remove = st_gyro_spi_remove,
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
index b459600e1a33..d05c6fdb758b 100644
--- a/drivers/iio/humidity/dht11.c
+++ b/drivers/iio/humidity/dht11.c
@@ -174,7 +174,6 @@ static irqreturn_t dht11_handle_irq(int irq, void *data)
struct iio_dev *iio = data;
struct dht11 *dht11 = iio_priv(iio);
- /* TODO: Consider making the handler safe for IRQ sharing */
if (dht11->num_edges < DHT11_EDGES_PER_READ && dht11->num_edges >= 0) {
dht11->edges[dht11->num_edges].ts = ktime_get_boottime_ns();
dht11->edges[dht11->num_edges++].value =
diff --git a/drivers/iio/humidity/hts221_core.c b/drivers/iio/humidity/hts221_core.c
index 4922444771c6..9003671f14fb 100644
--- a/drivers/iio/humidity/hts221_core.c
+++ b/drivers/iio/humidity/hts221_core.c
@@ -24,13 +24,6 @@
#define HTS221_REG_CNTRL1_ADDR 0x20
#define HTS221_REG_CNTRL2_ADDR 0x21
-#define HTS221_REG_AVG_ADDR 0x10
-#define HTS221_REG_H_OUT_L 0x28
-#define HTS221_REG_T_OUT_L 0x2a
-
-#define HTS221_HUMIDITY_AVG_MASK 0x07
-#define HTS221_TEMP_AVG_MASK 0x38
-
#define HTS221_ODR_MASK 0x03
#define HTS221_BDU_MASK BIT(2)
#define HTS221_ENABLE_MASK BIT(7)
@@ -66,8 +59,8 @@ static const struct hts221_odr hts221_odr_table[] = {
static const struct hts221_avg hts221_avg_list[] = {
{
- .addr = HTS221_REG_AVG_ADDR,
- .mask = HTS221_HUMIDITY_AVG_MASK,
+ .addr = 0x10,
+ .mask = 0x07,
.avg_avl = {
4, /* 0.4 %RH */
8, /* 0.3 %RH */
@@ -80,8 +73,8 @@ static const struct hts221_avg hts221_avg_list[] = {
},
},
{
- .addr = HTS221_REG_AVG_ADDR,
- .mask = HTS221_TEMP_AVG_MASK,
+ .addr = 0x10,
+ .mask = 0x38,
.avg_avl = {
2, /* 0.08 degC */
4, /* 0.05 degC */
@@ -98,7 +91,7 @@ static const struct hts221_avg hts221_avg_list[] = {
static const struct iio_chan_spec hts221_channels[] = {
{
.type = IIO_HUMIDITYRELATIVE,
- .address = HTS221_REG_H_OUT_L,
+ .address = 0x28,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_OFFSET) |
BIT(IIO_CHAN_INFO_SCALE) |
@@ -114,7 +107,7 @@ static const struct iio_chan_spec hts221_channels[] = {
},
{
.type = IIO_TEMP,
- .address = HTS221_REG_T_OUT_L,
+ .address = 0x2a,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_OFFSET) |
BIT(IIO_CHAN_INFO_SCALE) |
diff --git a/drivers/iio/iio_core.h b/drivers/iio/iio_core.h
index 159ea3f8c02b..fd9a5f1d5e51 100644
--- a/drivers/iio/iio_core.h
+++ b/drivers/iio/iio_core.h
@@ -42,14 +42,14 @@ struct poll_table_struct;
__poll_t iio_buffer_poll(struct file *filp,
struct poll_table_struct *wait);
-ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
- size_t n, loff_t *f_ps);
+ssize_t iio_buffer_read_outer(struct file *filp, char __user *buf,
+ size_t n, loff_t *f_ps);
int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev);
void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev);
#define iio_buffer_poll_addr (&iio_buffer_poll)
-#define iio_buffer_read_first_n_outer_addr (&iio_buffer_read_first_n_outer)
+#define iio_buffer_read_outer_addr (&iio_buffer_read_outer)
void iio_disable_all_buffers(struct iio_dev *indio_dev);
void iio_buffer_wakeup_poll(struct iio_dev *indio_dev);
@@ -57,7 +57,7 @@ void iio_buffer_wakeup_poll(struct iio_dev *indio_dev);
#else
#define iio_buffer_poll_addr NULL
-#define iio_buffer_read_first_n_outer_addr NULL
+#define iio_buffer_read_outer_addr NULL
static inline int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
{
diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c
index e14c8536fd09..022bb54fb748 100644
--- a/drivers/iio/imu/adis.c
+++ b/drivers/iio/imu/adis.c
@@ -26,7 +26,14 @@
#define ADIS_MSC_CTRL_DATA_RDY_DIO2 BIT(0)
#define ADIS_GLOB_CMD_SW_RESET BIT(7)
-int adis_write_reg(struct adis *adis, unsigned int reg,
+/**
+ * __adis_write_reg() - write N bytes to register (unlocked version)
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @value: The value to write to device (up to 4 bytes)
+ * @size: The size of the @value (in bytes)
+ */
+int __adis_write_reg(struct adis *adis, unsigned int reg,
unsigned int value, unsigned int size)
{
unsigned int page = reg / ADIS_PAGE_SIZE;
@@ -38,7 +45,8 @@ int adis_write_reg(struct adis *adis, unsigned int reg,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
- .delay_usecs = adis->data->write_delay,
+ .delay.value = adis->data->write_delay,
+ .delay.unit = SPI_DELAY_UNIT_USECS,
.cs_change_delay.value = adis->data->cs_change_delay,
.cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
}, {
@@ -46,7 +54,8 @@ int adis_write_reg(struct adis *adis, unsigned int reg,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
- .delay_usecs = adis->data->write_delay,
+ .delay.value = adis->data->write_delay,
+ .delay.unit = SPI_DELAY_UNIT_USECS,
.cs_change_delay.value = adis->data->cs_change_delay,
.cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
}, {
@@ -54,24 +63,25 @@ int adis_write_reg(struct adis *adis, unsigned int reg,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
- .delay_usecs = adis->data->write_delay,
+ .delay.value = adis->data->write_delay,
+ .delay.unit = SPI_DELAY_UNIT_USECS,
.cs_change_delay.value = adis->data->cs_change_delay,
.cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
}, {
.tx_buf = adis->tx + 6,
.bits_per_word = 8,
.len = 2,
- .delay_usecs = adis->data->write_delay,
+ .delay.value = adis->data->write_delay,
+ .delay.unit = SPI_DELAY_UNIT_USECS,
}, {
.tx_buf = adis->tx + 8,
.bits_per_word = 8,
.len = 2,
- .delay_usecs = adis->data->write_delay,
+ .delay.value = adis->data->write_delay,
+ .delay.unit = SPI_DELAY_UNIT_USECS,
},
};
- mutex_lock(&adis->txrx_lock);
-
spi_message_init(&msg);
if (adis->current_page != page) {
@@ -96,8 +106,7 @@ int adis_write_reg(struct adis *adis, unsigned int reg,
adis->tx[3] = value & 0xff;
break;
default:
- ret = -EINVAL;
- goto out_unlock;
+ return -EINVAL;
}
xfers[size].cs_change = 0;
@@ -113,20 +122,18 @@ int adis_write_reg(struct adis *adis, unsigned int reg,
adis->current_page = page;
}
-out_unlock:
- mutex_unlock(&adis->txrx_lock);
-
return ret;
}
-EXPORT_SYMBOL_GPL(adis_write_reg);
+EXPORT_SYMBOL_GPL(__adis_write_reg);
/**
- * adis_read_reg() - read 2 bytes from a 16-bit register
+ * __adis_read_reg() - read N bytes from register (unlocked version)
* @adis: The adis device
* @reg: The address of the lower of the two registers
* @val: The value read back from the device
+ * @size: The size of the @val buffer
*/
-int adis_read_reg(struct adis *adis, unsigned int reg,
+int __adis_read_reg(struct adis *adis, unsigned int reg,
unsigned int *val, unsigned int size)
{
unsigned int page = reg / ADIS_PAGE_SIZE;
@@ -138,7 +145,8 @@ int adis_read_reg(struct adis *adis, unsigned int reg,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
- .delay_usecs = adis->data->write_delay,
+ .delay.value = adis->data->write_delay,
+ .delay.unit = SPI_DELAY_UNIT_USECS,
.cs_change_delay.value = adis->data->cs_change_delay,
.cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
}, {
@@ -146,7 +154,8 @@ int adis_read_reg(struct adis *adis, unsigned int reg,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
- .delay_usecs = adis->data->read_delay,
+ .delay.value = adis->data->read_delay,
+ .delay.unit = SPI_DELAY_UNIT_USECS,
.cs_change_delay.value = adis->data->cs_change_delay,
.cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
}, {
@@ -155,18 +164,19 @@ int adis_read_reg(struct adis *adis, unsigned int reg,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
- .delay_usecs = adis->data->read_delay,
+ .delay.value = adis->data->read_delay,
+ .delay.unit = SPI_DELAY_UNIT_USECS,
.cs_change_delay.value = adis->data->cs_change_delay,
.cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
}, {
.rx_buf = adis->rx + 2,
.bits_per_word = 8,
.len = 2,
- .delay_usecs = adis->data->read_delay,
+ .delay.value = adis->data->read_delay,
+ .delay.unit = SPI_DELAY_UNIT_USECS,
},
};
- mutex_lock(&adis->txrx_lock);
spi_message_init(&msg);
if (adis->current_page != page) {
@@ -188,15 +198,14 @@ int adis_read_reg(struct adis *adis, unsigned int reg,
spi_message_add_tail(&xfers[3], &msg);
break;
default:
- ret = -EINVAL;
- goto out_unlock;
+ return -EINVAL;
}
ret = spi_sync(adis->spi, &msg);
if (ret) {
dev_err(&adis->spi->dev, "Failed to read register 0x%02X: %d\n",
reg, ret);
- goto out_unlock;
+ return ret;
} else {
adis->current_page = page;
}
@@ -210,12 +219,9 @@ int adis_read_reg(struct adis *adis, unsigned int reg,
break;
}
-out_unlock:
- mutex_unlock(&adis->txrx_lock);
-
return ret;
}
-EXPORT_SYMBOL_GPL(adis_read_reg);
+EXPORT_SYMBOL_GPL(__adis_read_reg);
#ifdef CONFIG_DEBUG_FS
@@ -253,12 +259,16 @@ int adis_enable_irq(struct adis *adis, bool enable)
int ret = 0;
uint16_t msc;
- if (adis->data->enable_irq)
- return adis->data->enable_irq(adis, enable);
+ mutex_lock(&adis->state_lock);
+
+ if (adis->data->enable_irq) {
+ ret = adis->data->enable_irq(adis, enable);
+ goto out_unlock;
+ }
- ret = adis_read_reg_16(adis, adis->data->msc_ctrl_reg, &msc);
+ ret = __adis_read_reg_16(adis, adis->data->msc_ctrl_reg, &msc);
if (ret)
- goto error_ret;
+ goto out_unlock;
msc |= ADIS_MSC_CTRL_DATA_RDY_POL_HIGH;
msc &= ~ADIS_MSC_CTRL_DATA_RDY_DIO2;
@@ -267,26 +277,27 @@ int adis_enable_irq(struct adis *adis, bool enable)
else
msc &= ~ADIS_MSC_CTRL_DATA_RDY_EN;
- ret = adis_write_reg_16(adis, adis->data->msc_ctrl_reg, msc);
+ ret = __adis_write_reg_16(adis, adis->data->msc_ctrl_reg, msc);
-error_ret:
+out_unlock:
+ mutex_unlock(&adis->state_lock);
return ret;
}
EXPORT_SYMBOL(adis_enable_irq);
/**
- * adis_check_status() - Check the device for error conditions
+ * __adis_check_status() - Check the device for error conditions (unlocked)
* @adis: The adis device
*
* Returns 0 on success, a negative error code otherwise
*/
-int adis_check_status(struct adis *adis)
+int __adis_check_status(struct adis *adis)
{
uint16_t status;
int ret;
int i;
- ret = adis_read_reg_16(adis, adis->data->diag_stat_reg, &status);
+ ret = __adis_read_reg_16(adis, adis->data->diag_stat_reg, &status);
if (ret)
return ret;
@@ -304,32 +315,38 @@ int adis_check_status(struct adis *adis)
return -EIO;
}
-EXPORT_SYMBOL_GPL(adis_check_status);
+EXPORT_SYMBOL_GPL(__adis_check_status);
/**
- * adis_reset() - Reset the device
+ * __adis_reset() - Reset the device (unlocked version)
* @adis: The adis device
*
* Returns 0 on success, a negative error code otherwise
*/
-int adis_reset(struct adis *adis)
+int __adis_reset(struct adis *adis)
{
int ret;
+ const struct adis_timeout *timeouts = adis->data->timeouts;
- ret = adis_write_reg_8(adis, adis->data->glob_cmd_reg,
+ ret = __adis_write_reg_8(adis, adis->data->glob_cmd_reg,
ADIS_GLOB_CMD_SW_RESET);
- if (ret)
+ if (ret) {
dev_err(&adis->spi->dev, "Failed to reset device: %d\n", ret);
+ return ret;
+ }
- return ret;
+ msleep(timeouts->sw_reset_ms);
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(adis_reset);
+EXPORT_SYMBOL_GPL(__adis_reset);
static int adis_self_test(struct adis *adis)
{
int ret;
+ const struct adis_timeout *timeouts = adis->data->timeouts;
- ret = adis_write_reg_16(adis, adis->data->msc_ctrl_reg,
+ ret = __adis_write_reg_16(adis, adis->data->msc_ctrl_reg,
adis->data->self_test_mask);
if (ret) {
dev_err(&adis->spi->dev, "Failed to initiate self test: %d\n",
@@ -337,12 +354,12 @@ static int adis_self_test(struct adis *adis)
return ret;
}
- msleep(adis->data->startup_delay);
+ msleep(timeouts->self_test_ms);
- ret = adis_check_status(adis);
+ ret = __adis_check_status(adis);
if (adis->data->self_test_no_autoclear)
- adis_write_reg_16(adis, adis->data->msc_ctrl_reg, 0x00);
+ __adis_write_reg_16(adis, adis->data->msc_ctrl_reg, 0x00);
return ret;
}
@@ -360,19 +377,22 @@ int adis_initial_startup(struct adis *adis)
{
int ret;
+ mutex_lock(&adis->state_lock);
+
ret = adis_self_test(adis);
if (ret) {
dev_err(&adis->spi->dev, "Self-test failed, trying reset.\n");
- adis_reset(adis);
- msleep(adis->data->startup_delay);
+ __adis_reset(adis);
ret = adis_self_test(adis);
if (ret) {
dev_err(&adis->spi->dev, "Second self-test failed, giving up.\n");
- return ret;
+ goto out_unlock;
}
}
- return 0;
+out_unlock:
+ mutex_unlock(&adis->state_lock);
+ return ret;
}
EXPORT_SYMBOL_GPL(adis_initial_startup);
@@ -398,15 +418,15 @@ int adis_single_conversion(struct iio_dev *indio_dev,
unsigned int uval;
int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&adis->state_lock);
- ret = adis_read_reg(adis, chan->address, &uval,
+ ret = __adis_read_reg(adis, chan->address, &uval,
chan->scan_type.storagebits / 8);
if (ret)
goto err_unlock;
if (uval & error_mask) {
- ret = adis_check_status(adis);
+ ret = __adis_check_status(adis);
if (ret)
goto err_unlock;
}
@@ -418,7 +438,7 @@ int adis_single_conversion(struct iio_dev *indio_dev,
ret = IIO_VAL_INT;
err_unlock:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&adis->state_lock);
return ret;
}
EXPORT_SYMBOL_GPL(adis_single_conversion);
@@ -438,7 +458,12 @@ EXPORT_SYMBOL_GPL(adis_single_conversion);
int adis_init(struct adis *adis, struct iio_dev *indio_dev,
struct spi_device *spi, const struct adis_data *data)
{
- mutex_init(&adis->txrx_lock);
+ if (!data || !data->timeouts) {
+ dev_err(&spi->dev, "No config data or timeouts not defined!\n");
+ return -EINVAL;
+ }
+
+ mutex_init(&adis->state_lock);
adis->spi = spi;
adis->data = data;
iio_device_set_drvdata(indio_dev, adis);
diff --git a/drivers/iio/imu/adis16400.c b/drivers/iio/imu/adis16400.c
index 44e46dc96e00..cfb1c19eb930 100644
--- a/drivers/iio/imu/adis16400.c
+++ b/drivers/iio/imu/adis16400.c
@@ -156,12 +156,14 @@ struct adis16400_state;
struct adis16400_chip_info {
const struct iio_chan_spec *channels;
+ const struct adis_timeout *timeouts;
const int num_channels;
const long flags;
unsigned int gyro_scale_micro;
unsigned int accel_scale_micro;
int temp_scale_nano;
int temp_offset;
+ /* set_freq() & get_freq() need to avoid using ADIS lib's state lock */
int (*set_freq)(struct adis16400_state *st, unsigned int freq);
int (*get_freq)(struct adis16400_state *st);
};
@@ -326,7 +328,7 @@ static int adis16334_get_freq(struct adis16400_state *st)
int ret;
uint16_t t;
- ret = adis_read_reg_16(&st->adis, ADIS16400_SMPL_PRD, &t);
+ ret = __adis_read_reg_16(&st->adis, ADIS16400_SMPL_PRD, &t);
if (ret)
return ret;
@@ -350,7 +352,7 @@ static int adis16334_set_freq(struct adis16400_state *st, unsigned int freq)
t <<= ADIS16334_RATE_DIV_SHIFT;
t |= ADIS16334_RATE_INT_CLK;
- return adis_write_reg_16(&st->adis, ADIS16400_SMPL_PRD, t);
+ return __adis_write_reg_16(&st->adis, ADIS16400_SMPL_PRD, t);
}
static int adis16400_get_freq(struct adis16400_state *st)
@@ -358,7 +360,7 @@ static int adis16400_get_freq(struct adis16400_state *st)
int sps, ret;
uint16_t t;
- ret = adis_read_reg_16(&st->adis, ADIS16400_SMPL_PRD, &t);
+ ret = __adis_read_reg_16(&st->adis, ADIS16400_SMPL_PRD, &t);
if (ret)
return ret;
@@ -390,7 +392,7 @@ static int adis16400_set_freq(struct adis16400_state *st, unsigned int freq)
else
st->adis.spi->max_speed_hz = ADIS16400_SPI_FAST;
- return adis_write_reg_8(&st->adis, ADIS16400_SMPL_PRD, val);
+ return __adis_write_reg_8(&st->adis, ADIS16400_SMPL_PRD, val);
}
static const unsigned int adis16400_3db_divisors[] = {
@@ -404,7 +406,7 @@ static const unsigned int adis16400_3db_divisors[] = {
[7] = 200, /* Not a valid setting */
};
-static int adis16400_set_filter(struct iio_dev *indio_dev, int sps, int val)
+static int __adis16400_set_filter(struct iio_dev *indio_dev, int sps, int val)
{
struct adis16400_state *st = iio_priv(indio_dev);
uint16_t val16;
@@ -415,11 +417,11 @@ static int adis16400_set_filter(struct iio_dev *indio_dev, int sps, int val)
break;
}
- ret = adis_read_reg_16(&st->adis, ADIS16400_SENS_AVG, &val16);
+ ret = __adis_read_reg_16(&st->adis, ADIS16400_SENS_AVG, &val16);
if (ret)
return ret;
- ret = adis_write_reg_16(&st->adis, ADIS16400_SENS_AVG,
+ ret = __adis_write_reg_16(&st->adis, ADIS16400_SENS_AVG,
(val16 & ~0x07) | i);
return ret;
}
@@ -507,32 +509,31 @@ static int adis16400_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int val, int val2, long info)
{
struct adis16400_state *st = iio_priv(indio_dev);
+ struct mutex *slock = &st->adis.state_lock;
int ret, sps;
switch (info) {
case IIO_CHAN_INFO_CALIBBIAS:
- mutex_lock(&indio_dev->mlock);
ret = adis_write_reg_16(&st->adis,
adis16400_addresses[chan->scan_index], val);
- mutex_unlock(&indio_dev->mlock);
return ret;
case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
/*
* Need to cache values so we can update if the frequency
* changes.
*/
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(slock);
st->filt_int = val;
/* Work out update to current value */
sps = st->variant->get_freq(st);
if (sps < 0) {
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(slock);
return sps;
}
- ret = adis16400_set_filter(indio_dev, sps,
+ ret = __adis16400_set_filter(indio_dev, sps,
val * 1000 + val2 / 1000);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(slock);
return ret;
case IIO_CHAN_INFO_SAMP_FREQ:
sps = val * 1000 + val2 / 1000;
@@ -540,9 +541,9 @@ static int adis16400_write_raw(struct iio_dev *indio_dev,
if (sps <= 0)
return -EINVAL;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(slock);
ret = st->variant->set_freq(st, sps);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(slock);
return ret;
default:
return -EINVAL;
@@ -553,6 +554,7 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val, int *val2, long info)
{
struct adis16400_state *st = iio_priv(indio_dev);
+ struct mutex *slock = &st->adis.state_lock;
int16_t val16;
int ret;
@@ -596,10 +598,8 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
case IIO_CHAN_INFO_CALIBBIAS:
- mutex_lock(&indio_dev->mlock);
ret = adis_read_reg_16(&st->adis,
adis16400_addresses[chan->scan_index], &val16);
- mutex_unlock(&indio_dev->mlock);
if (ret)
return ret;
val16 = sign_extend32(val16, 11);
@@ -610,27 +610,27 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
*val = st->variant->temp_offset;
return IIO_VAL_INT;
case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(slock);
/* Need both the number of taps and the sampling frequency */
- ret = adis_read_reg_16(&st->adis,
+ ret = __adis_read_reg_16(&st->adis,
ADIS16400_SENS_AVG,
&val16);
if (ret) {
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(slock);
return ret;
}
ret = st->variant->get_freq(st);
- if (ret >= 0) {
- ret /= adis16400_3db_divisors[val16 & 0x07];
- *val = ret / 1000;
- *val2 = (ret % 1000) * 1000;
- }
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(slock);
if (ret)
return ret;
+ ret /= adis16400_3db_divisors[val16 & 0x07];
+ *val = ret / 1000;
+ *val2 = (ret % 1000) * 1000;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_SAMP_FREQ:
+ mutex_lock(slock);
ret = st->variant->get_freq(st);
+ mutex_unlock(slock);
if (ret)
return ret;
*val = ret / 1000;
@@ -930,6 +930,36 @@ static const struct iio_chan_spec adis16334_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
};
+static const struct adis_timeout adis16300_timeouts = {
+ .reset_ms = ADIS16400_STARTUP_DELAY,
+ .sw_reset_ms = ADIS16400_STARTUP_DELAY,
+ .self_test_ms = ADIS16400_STARTUP_DELAY,
+};
+
+static const struct adis_timeout adis16362_timeouts = {
+ .reset_ms = 130,
+ .sw_reset_ms = 130,
+ .self_test_ms = 12,
+};
+
+static const struct adis_timeout adis16400_timeouts = {
+ .reset_ms = 170,
+ .sw_reset_ms = 170,
+ .self_test_ms = 12,
+};
+
+static const struct adis_timeout adis16445_timeouts = {
+ .reset_ms = 55,
+ .sw_reset_ms = 55,
+ .self_test_ms = 16,
+};
+
+static const struct adis_timeout adis16448_timeouts = {
+ .reset_ms = 90,
+ .sw_reset_ms = 90,
+ .self_test_ms = 45,
+};
+
static struct adis16400_chip_info adis16400_chips[] = {
[ADIS16300] = {
.channels = adis16300_channels,
@@ -942,6 +972,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.temp_offset = 25000000 / 140000, /* 25 C = 0x00 */
.set_freq = adis16400_set_freq,
.get_freq = adis16400_get_freq,
+ .timeouts = &adis16300_timeouts,
},
[ADIS16334] = {
.channels = adis16334_channels,
@@ -965,6 +996,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.flags = ADIS16400_NO_BURST | ADIS16400_HAS_SLOW_MODE,
.set_freq = adis16400_set_freq,
.get_freq = adis16400_get_freq,
+ .timeouts = &adis16300_timeouts,
},
[ADIS16360] = {
.channels = adis16350_channels,
@@ -977,6 +1009,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
.set_freq = adis16400_set_freq,
.get_freq = adis16400_get_freq,
+ .timeouts = &adis16300_timeouts,
},
[ADIS16362] = {
.channels = adis16350_channels,
@@ -989,6 +1022,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
.set_freq = adis16400_set_freq,
.get_freq = adis16400_get_freq,
+ .timeouts = &adis16362_timeouts,
},
[ADIS16364] = {
.channels = adis16350_channels,
@@ -1001,6 +1035,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
.set_freq = adis16400_set_freq,
.get_freq = adis16400_get_freq,
+ .timeouts = &adis16362_timeouts,
},
[ADIS16367] = {
.channels = adis16350_channels,
@@ -1013,6 +1048,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
.set_freq = adis16400_set_freq,
.get_freq = adis16400_get_freq,
+ .timeouts = &adis16300_timeouts,
},
[ADIS16400] = {
.channels = adis16400_channels,
@@ -1024,6 +1060,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.temp_offset = 25000000 / 140000, /* 25 C = 0x00 */
.set_freq = adis16400_set_freq,
.get_freq = adis16400_get_freq,
+ .timeouts = &adis16400_timeouts,
},
[ADIS16445] = {
.channels = adis16445_channels,
@@ -1037,6 +1074,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.temp_offset = 31000000 / 73860, /* 31 C = 0x00 */
.set_freq = adis16334_set_freq,
.get_freq = adis16334_get_freq,
+ .timeouts = &adis16445_timeouts,
},
[ADIS16448] = {
.channels = adis16448_channels,
@@ -1050,6 +1088,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.temp_offset = 31000000 / 73860, /* 31 C = 0x00 */
.set_freq = adis16334_set_freq,
.get_freq = adis16334_get_freq,
+ .timeouts = &adis16448_timeouts,
}
};
@@ -1087,7 +1126,6 @@ static const struct adis_data adis16400_data = {
.write_delay = 50,
.self_test_mask = ADIS16400_MSC_CTRL_MEM_TEST,
- .startup_delay = ADIS16400_STARTUP_DELAY,
.status_error_msgs = adis16400_status_error_msgs,
.status_error_mask = BIT(ADIS16400_DIAG_STAT_ZACCL_FAIL) |
@@ -1121,11 +1159,28 @@ static void adis16400_setup_chan_mask(struct adis16400_state *st)
}
}
+static struct adis_data *adis16400_adis_data_alloc(struct adis16400_state *st,
+ struct device *dev)
+{
+ struct adis_data *data;
+
+ data = devm_kmalloc(dev, sizeof(struct adis_data), GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(data, &adis16400_data, sizeof(*data));
+
+ data->timeouts = st->variant->timeouts;
+
+ return data;
+}
+
static int adis16400_probe(struct spi_device *spi)
{
struct adis16400_state *st;
struct iio_dev *indio_dev;
int ret;
+ const struct adis_data *adis16400_data;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (indio_dev == NULL)
@@ -1152,7 +1207,11 @@ static int adis16400_probe(struct spi_device *spi)
st->adis.burst->extra_len = sizeof(u16);
}
- ret = adis_init(&st->adis, indio_dev, spi, &adis16400_data);
+ adis16400_data = adis16400_adis_data_alloc(st, &spi->dev);
+ if (IS_ERR(adis16400_data))
+ return PTR_ERR(adis16400_data);
+
+ ret = adis_init(&st->adis, indio_dev, spi, adis16400_data);
if (ret)
return ret;
diff --git a/drivers/iio/imu/adis16460.c b/drivers/iio/imu/adis16460.c
index b55812521537..9539cfe4a259 100644
--- a/drivers/iio/imu/adis16460.c
+++ b/drivers/iio/imu/adis16460.c
@@ -383,6 +383,12 @@ static const char * const adis16460_status_error_msgs[] = {
[ADIS16460_DIAG_STAT_FLASH_UPT] = "Flash update failure",
};
+static const struct adis_timeout adis16460_timeouts = {
+ .reset_ms = 225,
+ .sw_reset_ms = 225,
+ .self_test_ms = 10,
+};
+
static const struct adis_data adis16460_data = {
.diag_stat_reg = ADIS16460_REG_DIAG_STAT,
.glob_cmd_reg = ADIS16460_REG_GLOB_CMD,
@@ -398,6 +404,7 @@ static const struct adis_data adis16460_data = {
BIT(ADIS16460_DIAG_STAT_SPI_COMM) |
BIT(ADIS16460_DIAG_STAT_FLASH_UPT),
.enable_irq = adis16460_enable_irq,
+ .timeouts = &adis16460_timeouts,
};
static int adis16460_probe(struct spi_device *spi)
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index 748f8bbf184d..dac87f1001fd 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -138,6 +138,7 @@ struct adis16480_chip_info {
unsigned int max_dec_rate;
const unsigned int *filter_freqs;
bool has_pps_clk_mode;
+ const struct adis_timeout *timeouts;
};
enum adis16480_int_pin {
@@ -555,6 +556,7 @@ static int adis16480_set_filter_freq(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan, unsigned int freq)
{
struct adis16480 *st = iio_priv(indio_dev);
+ struct mutex *slock = &st->adis.state_lock;
unsigned int enable_mask, offset, reg;
unsigned int diff, best_diff;
unsigned int i, best_freq;
@@ -565,9 +567,11 @@ static int adis16480_set_filter_freq(struct iio_dev *indio_dev,
offset = ad16480_filter_data[chan->scan_index][1];
enable_mask = BIT(offset + 2);
- ret = adis_read_reg_16(&st->adis, reg, &val);
+ mutex_lock(slock);
+
+ ret = __adis_read_reg_16(&st->adis, reg, &val);
if (ret)
- return ret;
+ goto out_unlock;
if (freq == 0) {
val &= ~enable_mask;
@@ -589,7 +593,11 @@ static int adis16480_set_filter_freq(struct iio_dev *indio_dev,
val |= enable_mask;
}
- return adis_write_reg_16(&st->adis, reg, val);
+ ret = __adis_write_reg_16(&st->adis, reg, val);
+out_unlock:
+ mutex_unlock(slock);
+
+ return ret;
}
static int adis16480_read_raw(struct iio_dev *indio_dev,
@@ -779,6 +787,7 @@ enum adis16480_variant {
ADIS16480,
ADIS16485,
ADIS16488,
+ ADIS16490,
ADIS16495_1,
ADIS16495_2,
ADIS16495_3,
@@ -787,6 +796,30 @@ enum adis16480_variant {
ADIS16497_3,
};
+static const struct adis_timeout adis16485_timeouts = {
+ .reset_ms = 560,
+ .sw_reset_ms = 120,
+ .self_test_ms = 12,
+};
+
+static const struct adis_timeout adis16480_timeouts = {
+ .reset_ms = 560,
+ .sw_reset_ms = 560,
+ .self_test_ms = 12,
+};
+
+static const struct adis_timeout adis16495_timeouts = {
+ .reset_ms = 170,
+ .sw_reset_ms = 130,
+ .self_test_ms = 40,
+};
+
+static const struct adis_timeout adis16495_1_timeouts = {
+ .reset_ms = 250,
+ .sw_reset_ms = 210,
+ .self_test_ms = 20,
+};
+
static const struct adis16480_chip_info adis16480_chip_info[] = {
[ADIS16375] = {
.channels = adis16485_channels,
@@ -805,6 +838,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.int_clk = 2460000,
.max_dec_rate = 2048,
.filter_freqs = adis16480_def_filter_freqs,
+ .timeouts = &adis16485_timeouts,
},
[ADIS16480] = {
.channels = adis16480_channels,
@@ -817,6 +851,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.int_clk = 2460000,
.max_dec_rate = 2048,
.filter_freqs = adis16480_def_filter_freqs,
+ .timeouts = &adis16480_timeouts,
},
[ADIS16485] = {
.channels = adis16485_channels,
@@ -829,6 +864,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.int_clk = 2460000,
.max_dec_rate = 2048,
.filter_freqs = adis16480_def_filter_freqs,
+ .timeouts = &adis16485_timeouts,
},
[ADIS16488] = {
.channels = adis16480_channels,
@@ -841,6 +877,21 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.int_clk = 2460000,
.max_dec_rate = 2048,
.filter_freqs = adis16480_def_filter_freqs,
+ .timeouts = &adis16485_timeouts,
+ },
+ [ADIS16490] = {
+ .channels = adis16485_channels,
+ .num_channels = ARRAY_SIZE(adis16485_channels),
+ .gyro_max_val = 20000 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(100),
+ .accel_max_val = IIO_M_S_2_TO_G(16000 << 16),
+ .accel_max_scale = 8,
+ .temp_scale = 14285, /* 14.285 milli degree Celsius */
+ .int_clk = 4250000,
+ .max_dec_rate = 4250,
+ .filter_freqs = adis16495_def_filter_freqs,
+ .has_pps_clk_mode = true,
+ .timeouts = &adis16495_timeouts,
},
[ADIS16495_1] = {
.channels = adis16485_channels,
@@ -854,6 +905,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.max_dec_rate = 4250,
.filter_freqs = adis16495_def_filter_freqs,
.has_pps_clk_mode = true,
+ .timeouts = &adis16495_1_timeouts,
},
[ADIS16495_2] = {
.channels = adis16485_channels,
@@ -867,6 +919,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.max_dec_rate = 4250,
.filter_freqs = adis16495_def_filter_freqs,
.has_pps_clk_mode = true,
+ .timeouts = &adis16495_1_timeouts,
},
[ADIS16495_3] = {
.channels = adis16485_channels,
@@ -880,6 +933,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.max_dec_rate = 4250,
.filter_freqs = adis16495_def_filter_freqs,
.has_pps_clk_mode = true,
+ .timeouts = &adis16495_1_timeouts,
},
[ADIS16497_1] = {
.channels = adis16485_channels,
@@ -893,6 +947,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.max_dec_rate = 4250,
.filter_freqs = adis16495_def_filter_freqs,
.has_pps_clk_mode = true,
+ .timeouts = &adis16495_1_timeouts,
},
[ADIS16497_2] = {
.channels = adis16485_channels,
@@ -906,6 +961,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.max_dec_rate = 4250,
.filter_freqs = adis16495_def_filter_freqs,
.has_pps_clk_mode = true,
+ .timeouts = &adis16495_1_timeouts,
},
[ADIS16497_3] = {
.channels = adis16485_channels,
@@ -919,6 +975,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.max_dec_rate = 4250,
.filter_freqs = adis16495_def_filter_freqs,
.has_pps_clk_mode = true,
+ .timeouts = &adis16495_1_timeouts,
},
};
@@ -947,14 +1004,14 @@ static int adis16480_enable_irq(struct adis *adis, bool enable)
uint16_t val;
int ret;
- ret = adis_read_reg_16(adis, ADIS16480_REG_FNCTIO_CTRL, &val);
+ ret = __adis_read_reg_16(adis, ADIS16480_REG_FNCTIO_CTRL, &val);
if (ret)
return ret;
val &= ~ADIS16480_DRDY_EN_MSK;
val |= ADIS16480_DRDY_EN(enable);
- return adis_write_reg_16(adis, ADIS16480_REG_FNCTIO_CTRL, val);
+ return __adis_write_reg_16(adis, ADIS16480_REG_FNCTIO_CTRL, val);
}
static int adis16480_initial_setup(struct iio_dev *indio_dev)
@@ -1188,9 +1245,26 @@ static int adis16480_get_ext_clocks(struct adis16480 *st)
return 0;
}
+static struct adis_data *adis16480_adis_data_alloc(struct adis16480 *st,
+ struct device *dev)
+{
+ struct adis_data *data;
+
+ data = devm_kmalloc(dev, sizeof(struct adis_data), GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(data, &adis16480_data, sizeof(*data));
+
+ data->timeouts = st->chip_info->timeouts;
+
+ return data;
+}
+
static int adis16480_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
+ const struct adis_data *adis16480_data;
struct iio_dev *indio_dev;
struct adis16480 *st;
int ret;
@@ -1211,7 +1285,11 @@ static int adis16480_probe(struct spi_device *spi)
indio_dev->info = &adis16480_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = adis_init(&st->adis, indio_dev, spi, &adis16480_data);
+ adis16480_data = adis16480_adis_data_alloc(st, &spi->dev);
+ if (IS_ERR(adis16480_data))
+ return PTR_ERR(adis16480_data);
+
+ ret = adis_init(&st->adis, indio_dev, spi, adis16480_data);
if (ret)
return ret;
@@ -1278,6 +1356,7 @@ static const struct spi_device_id adis16480_ids[] = {
{ "adis16480", ADIS16480 },
{ "adis16485", ADIS16485 },
{ "adis16488", ADIS16488 },
+ { "adis16490", ADIS16490 },
{ "adis16495-1", ADIS16495_1 },
{ "adis16495-2", ADIS16495_2 },
{ "adis16495-3", ADIS16495_3 },
@@ -1293,6 +1372,7 @@ static const struct of_device_id adis16480_of_match[] = {
{ .compatible = "adi,adis16480" },
{ .compatible = "adi,adis16485" },
{ .compatible = "adi,adis16488" },
+ { .compatible = "adi,adis16490" },
{ .compatible = "adi,adis16495-1" },
{ .compatible = "adi,adis16495-2" },
{ .compatible = "adi,adis16495-3" },
diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c
index 4998a89d083d..3f4dd5c00b03 100644
--- a/drivers/iio/imu/adis_buffer.c
+++ b/drivers/iio/imu/adis_buffer.c
@@ -129,7 +129,7 @@ static irqreturn_t adis_trigger_handler(int irq, void *p)
return -ENOMEM;
if (adis->data->has_paging) {
- mutex_lock(&adis->txrx_lock);
+ mutex_lock(&adis->state_lock);
if (adis->current_page != 0) {
adis->tx[0] = ADIS_WRITE_REG(ADIS_REG_PAGE_ID);
adis->tx[1] = 0;
@@ -144,7 +144,7 @@ static irqreturn_t adis_trigger_handler(int irq, void *p)
if (adis->data->has_paging) {
adis->current_page = 0;
- mutex_unlock(&adis->txrx_lock);
+ mutex_unlock(&adis->state_lock);
}
iio_push_to_buffers_with_timestamp(indio_dev, adis->buffer,
diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig
index e4c4c12236a7..017bc0fcc365 100644
--- a/drivers/iio/imu/inv_mpu6050/Kconfig
+++ b/drivers/iio/imu/inv_mpu6050/Kconfig
@@ -10,11 +10,12 @@ config INV_MPU6050_IIO
config INV_MPU6050_I2C
tristate "Invensense MPU6050 devices (I2C)"
- depends on I2C_MUX
+ depends on I2C
+ select I2C_MUX
select INV_MPU6050_IIO
select REGMAP_I2C
help
- This driver supports the Invensense MPU6000/6050/6500/6515,
+ This driver supports the Invensense MPU6050/6500/6515,
MPU9150/9250/9255 and ICM20608/20602 motion tracking devices
over I2C.
This driver can be built as a module. The module will be called
@@ -26,8 +27,8 @@ config INV_MPU6050_SPI
select INV_MPU6050_IIO
select REGMAP_SPI
help
- This driver supports the Invensense MPU6000/6050/6500/6515,
- MPU9150/9250/9255 and ICM20608/20602 motion tracking devices
+ This driver supports the Invensense MPU6000/6500/6515,
+ MPU9250/9255 and ICM20608/20602 motion tracking devices
over SPI.
This driver can be built as a module. The module will be called
inv-mpu6050-spi.
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 0686e41bb8a1..5096fc49012d 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -104,6 +104,7 @@ static const struct inv_mpu6050_chip_config chip_config_6050 = {
.divider = INV_MPU6050_FIFO_RATE_TO_DIVIDER(INV_MPU6050_INIT_FIFO_RATE),
.gyro_fifo_enable = false,
.accl_fifo_enable = false,
+ .temp_fifo_enable = false,
.magn_fifo_enable = false,
.accl_fs = INV_MPU6050_FS_02G,
.user_ctrl = 0,
@@ -856,19 +857,27 @@ static const struct iio_chan_spec_ext_info inv_ext_info[] = {
.ext_info = inv_ext_info, \
}
+#define INV_MPU6050_TEMP_CHAN(_index) \
+ { \
+ .type = IIO_TEMP, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) \
+ | BIT(IIO_CHAN_INFO_OFFSET) \
+ | BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_index = _index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .shift = 0, \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
static const struct iio_chan_spec inv_mpu_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(INV_MPU6050_SCAN_TIMESTAMP),
- /*
- * Note that temperature should only be via polled reading only,
- * not the final scan elements output.
- */
- {
- .type = IIO_TEMP,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW)
- | BIT(IIO_CHAN_INFO_OFFSET)
- | BIT(IIO_CHAN_INFO_SCALE),
- .scan_index = -1,
- },
+
+ INV_MPU6050_TEMP_CHAN(INV_MPU6050_SCAN_TEMP),
+
INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_X, INV_MPU6050_SCAN_GYRO_X),
INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Y, INV_MPU6050_SCAN_GYRO_Y),
INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Z, INV_MPU6050_SCAN_GYRO_Z),
@@ -878,22 +887,29 @@ static const struct iio_chan_spec inv_mpu_channels[] = {
INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_MPU6050_SCAN_ACCL_Z),
};
+#define INV_MPU6050_SCAN_MASK_3AXIS_ACCEL \
+ (BIT(INV_MPU6050_SCAN_ACCL_X) \
+ | BIT(INV_MPU6050_SCAN_ACCL_Y) \
+ | BIT(INV_MPU6050_SCAN_ACCL_Z))
+
+#define INV_MPU6050_SCAN_MASK_3AXIS_GYRO \
+ (BIT(INV_MPU6050_SCAN_GYRO_X) \
+ | BIT(INV_MPU6050_SCAN_GYRO_Y) \
+ | BIT(INV_MPU6050_SCAN_GYRO_Z))
+
+#define INV_MPU6050_SCAN_MASK_TEMP (BIT(INV_MPU6050_SCAN_TEMP))
+
static const unsigned long inv_mpu_scan_masks[] = {
/* 3-axis accel */
- BIT(INV_MPU6050_SCAN_ACCL_X)
- | BIT(INV_MPU6050_SCAN_ACCL_Y)
- | BIT(INV_MPU6050_SCAN_ACCL_Z),
+ INV_MPU6050_SCAN_MASK_3AXIS_ACCEL,
+ INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU6050_SCAN_MASK_TEMP,
/* 3-axis gyro */
- BIT(INV_MPU6050_SCAN_GYRO_X)
- | BIT(INV_MPU6050_SCAN_GYRO_Y)
- | BIT(INV_MPU6050_SCAN_GYRO_Z),
+ INV_MPU6050_SCAN_MASK_3AXIS_GYRO,
+ INV_MPU6050_SCAN_MASK_3AXIS_GYRO | INV_MPU6050_SCAN_MASK_TEMP,
/* 6-axis accel + gyro */
- BIT(INV_MPU6050_SCAN_ACCL_X)
- | BIT(INV_MPU6050_SCAN_ACCL_Y)
- | BIT(INV_MPU6050_SCAN_ACCL_Z)
- | BIT(INV_MPU6050_SCAN_GYRO_X)
- | BIT(INV_MPU6050_SCAN_GYRO_Y)
- | BIT(INV_MPU6050_SCAN_GYRO_Z),
+ INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU6050_SCAN_MASK_3AXIS_GYRO,
+ INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU6050_SCAN_MASK_3AXIS_GYRO
+ | INV_MPU6050_SCAN_MASK_TEMP,
0,
};
@@ -915,19 +931,30 @@ static const unsigned long inv_mpu_scan_masks[] = {
.ext_info = inv_ext_info, \
}
+static const struct iio_chan_spec inv_mpu9150_channels[] = {
+ IIO_CHAN_SOFT_TIMESTAMP(INV_MPU9X50_SCAN_TIMESTAMP),
+
+ INV_MPU6050_TEMP_CHAN(INV_MPU6050_SCAN_TEMP),
+
+ INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_X, INV_MPU6050_SCAN_GYRO_X),
+ INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Y, INV_MPU6050_SCAN_GYRO_Y),
+ INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Z, INV_MPU6050_SCAN_GYRO_Z),
+
+ INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_X, INV_MPU6050_SCAN_ACCL_X),
+ INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Y, INV_MPU6050_SCAN_ACCL_Y),
+ INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_MPU6050_SCAN_ACCL_Z),
+
+ /* Magnetometer resolution is 13 bits */
+ INV_MPU9X50_MAGN_CHAN(IIO_MOD_X, 13, INV_MPU9X50_SCAN_MAGN_X),
+ INV_MPU9X50_MAGN_CHAN(IIO_MOD_Y, 13, INV_MPU9X50_SCAN_MAGN_Y),
+ INV_MPU9X50_MAGN_CHAN(IIO_MOD_Z, 13, INV_MPU9X50_SCAN_MAGN_Z),
+};
+
static const struct iio_chan_spec inv_mpu9250_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(INV_MPU9X50_SCAN_TIMESTAMP),
- /*
- * Note that temperature should only be via polled reading only,
- * not the final scan elements output.
- */
- {
- .type = IIO_TEMP,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW)
- | BIT(IIO_CHAN_INFO_OFFSET)
- | BIT(IIO_CHAN_INFO_SCALE),
- .scan_index = -1,
- },
+
+ INV_MPU6050_TEMP_CHAN(INV_MPU6050_SCAN_TEMP),
+
INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_X, INV_MPU6050_SCAN_GYRO_X),
INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Y, INV_MPU6050_SCAN_GYRO_Y),
INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Z, INV_MPU6050_SCAN_GYRO_Z),
@@ -942,98 +969,50 @@ static const struct iio_chan_spec inv_mpu9250_channels[] = {
INV_MPU9X50_MAGN_CHAN(IIO_MOD_Z, 16, INV_MPU9X50_SCAN_MAGN_Z),
};
+#define INV_MPU9X50_SCAN_MASK_3AXIS_MAGN \
+ (BIT(INV_MPU9X50_SCAN_MAGN_X) \
+ | BIT(INV_MPU9X50_SCAN_MAGN_Y) \
+ | BIT(INV_MPU9X50_SCAN_MAGN_Z))
+
static const unsigned long inv_mpu9x50_scan_masks[] = {
/* 3-axis accel */
- BIT(INV_MPU6050_SCAN_ACCL_X)
- | BIT(INV_MPU6050_SCAN_ACCL_Y)
- | BIT(INV_MPU6050_SCAN_ACCL_Z),
+ INV_MPU6050_SCAN_MASK_3AXIS_ACCEL,
+ INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU6050_SCAN_MASK_TEMP,
/* 3-axis gyro */
- BIT(INV_MPU6050_SCAN_GYRO_X)
- | BIT(INV_MPU6050_SCAN_GYRO_Y)
- | BIT(INV_MPU6050_SCAN_GYRO_Z),
+ INV_MPU6050_SCAN_MASK_3AXIS_GYRO,
+ INV_MPU6050_SCAN_MASK_3AXIS_GYRO | INV_MPU6050_SCAN_MASK_TEMP,
/* 3-axis magn */
- BIT(INV_MPU9X50_SCAN_MAGN_X)
- | BIT(INV_MPU9X50_SCAN_MAGN_Y)
- | BIT(INV_MPU9X50_SCAN_MAGN_Z),
+ INV_MPU9X50_SCAN_MASK_3AXIS_MAGN,
+ INV_MPU9X50_SCAN_MASK_3AXIS_MAGN | INV_MPU6050_SCAN_MASK_TEMP,
/* 6-axis accel + gyro */
- BIT(INV_MPU6050_SCAN_ACCL_X)
- | BIT(INV_MPU6050_SCAN_ACCL_Y)
- | BIT(INV_MPU6050_SCAN_ACCL_Z)
- | BIT(INV_MPU6050_SCAN_GYRO_X)
- | BIT(INV_MPU6050_SCAN_GYRO_Y)
- | BIT(INV_MPU6050_SCAN_GYRO_Z),
+ INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU6050_SCAN_MASK_3AXIS_GYRO,
+ INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU6050_SCAN_MASK_3AXIS_GYRO
+ | INV_MPU6050_SCAN_MASK_TEMP,
/* 6-axis accel + magn */
- BIT(INV_MPU6050_SCAN_ACCL_X)
- | BIT(INV_MPU6050_SCAN_ACCL_Y)
- | BIT(INV_MPU6050_SCAN_ACCL_Z)
- | BIT(INV_MPU9X50_SCAN_MAGN_X)
- | BIT(INV_MPU9X50_SCAN_MAGN_Y)
- | BIT(INV_MPU9X50_SCAN_MAGN_Z),
+ INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU9X50_SCAN_MASK_3AXIS_MAGN,
+ INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU9X50_SCAN_MASK_3AXIS_MAGN
+ | INV_MPU6050_SCAN_MASK_TEMP,
/* 6-axis gyro + magn */
- BIT(INV_MPU6050_SCAN_GYRO_X)
- | BIT(INV_MPU6050_SCAN_GYRO_Y)
- | BIT(INV_MPU6050_SCAN_GYRO_Z)
- | BIT(INV_MPU9X50_SCAN_MAGN_X)
- | BIT(INV_MPU9X50_SCAN_MAGN_Y)
- | BIT(INV_MPU9X50_SCAN_MAGN_Z),
+ INV_MPU6050_SCAN_MASK_3AXIS_GYRO | INV_MPU9X50_SCAN_MASK_3AXIS_MAGN,
+ INV_MPU6050_SCAN_MASK_3AXIS_GYRO | INV_MPU9X50_SCAN_MASK_3AXIS_MAGN
+ | INV_MPU6050_SCAN_MASK_TEMP,
/* 9-axis accel + gyro + magn */
- BIT(INV_MPU6050_SCAN_ACCL_X)
- | BIT(INV_MPU6050_SCAN_ACCL_Y)
- | BIT(INV_MPU6050_SCAN_ACCL_Z)
- | BIT(INV_MPU6050_SCAN_GYRO_X)
- | BIT(INV_MPU6050_SCAN_GYRO_Y)
- | BIT(INV_MPU6050_SCAN_GYRO_Z)
- | BIT(INV_MPU9X50_SCAN_MAGN_X)
- | BIT(INV_MPU9X50_SCAN_MAGN_Y)
- | BIT(INV_MPU9X50_SCAN_MAGN_Z),
+ INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU6050_SCAN_MASK_3AXIS_GYRO
+ | INV_MPU9X50_SCAN_MASK_3AXIS_MAGN,
+ INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU6050_SCAN_MASK_3AXIS_GYRO
+ | INV_MPU9X50_SCAN_MASK_3AXIS_MAGN
+ | INV_MPU6050_SCAN_MASK_TEMP,
0,
};
-static const struct iio_chan_spec inv_icm20602_channels[] = {
- IIO_CHAN_SOFT_TIMESTAMP(INV_ICM20602_SCAN_TIMESTAMP),
- {
- .type = IIO_TEMP,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW)
- | BIT(IIO_CHAN_INFO_OFFSET)
- | BIT(IIO_CHAN_INFO_SCALE),
- .scan_index = INV_ICM20602_SCAN_TEMP,
- .scan_type = {
- .sign = 's',
- .realbits = 16,
- .storagebits = 16,
- .shift = 0,
- .endianness = IIO_BE,
- },
- },
-
- INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_X, INV_ICM20602_SCAN_GYRO_X),
- INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Y, INV_ICM20602_SCAN_GYRO_Y),
- INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Z, INV_ICM20602_SCAN_GYRO_Z),
-
- INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Y, INV_ICM20602_SCAN_ACCL_Y),
- INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_X, INV_ICM20602_SCAN_ACCL_X),
- INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_ICM20602_SCAN_ACCL_Z),
-};
-
static const unsigned long inv_icm20602_scan_masks[] = {
/* 3-axis accel + temp (mandatory) */
- BIT(INV_ICM20602_SCAN_ACCL_X)
- | BIT(INV_ICM20602_SCAN_ACCL_Y)
- | BIT(INV_ICM20602_SCAN_ACCL_Z)
- | BIT(INV_ICM20602_SCAN_TEMP),
+ INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU6050_SCAN_MASK_TEMP,
/* 3-axis gyro + temp (mandatory) */
- BIT(INV_ICM20602_SCAN_GYRO_X)
- | BIT(INV_ICM20602_SCAN_GYRO_Y)
- | BIT(INV_ICM20602_SCAN_GYRO_Z)
- | BIT(INV_ICM20602_SCAN_TEMP),
+ INV_MPU6050_SCAN_MASK_3AXIS_GYRO | INV_MPU6050_SCAN_MASK_TEMP,
/* 6-axis accel + gyro + temp (mandatory) */
- BIT(INV_ICM20602_SCAN_ACCL_X)
- | BIT(INV_ICM20602_SCAN_ACCL_Y)
- | BIT(INV_ICM20602_SCAN_ACCL_Z)
- | BIT(INV_ICM20602_SCAN_GYRO_X)
- | BIT(INV_ICM20602_SCAN_GYRO_Y)
- | BIT(INV_ICM20602_SCAN_GYRO_Z)
- | BIT(INV_ICM20602_SCAN_TEMP),
+ INV_MPU6050_SCAN_MASK_3AXIS_ACCEL | INV_MPU6050_SCAN_MASK_3AXIS_GYRO
+ | INV_MPU6050_SCAN_MASK_TEMP,
0,
};
@@ -1241,7 +1220,7 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
irq_type = irqd_get_trigger_type(desc);
if (!irq_type)
irq_type = IRQF_TRIGGER_RISING;
- if (irq_type == IRQF_TRIGGER_RISING)
+ if (irq_type & IRQF_TRIGGER_RISING) // rising or both-edge
st->irq_mask = INV_MPU6050_ACTIVE_HIGH;
else if (irq_type == IRQF_TRIGGER_FALLING)
st->irq_mask = INV_MPU6050_ACTIVE_LOW;
@@ -1324,25 +1303,20 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
inv_mpu_bus_setup(indio_dev);
switch (chip_type) {
+ case INV_MPU9150:
+ indio_dev->channels = inv_mpu9150_channels;
+ indio_dev->num_channels = ARRAY_SIZE(inv_mpu9150_channels);
+ indio_dev->available_scan_masks = inv_mpu9x50_scan_masks;
+ break;
case INV_MPU9250:
case INV_MPU9255:
- /*
- * Use magnetometer inside the chip only if there is no i2c
- * auxiliary device in use.
- */
- if (!st->magn_disabled) {
- indio_dev->channels = inv_mpu9250_channels;
- indio_dev->num_channels = ARRAY_SIZE(inv_mpu9250_channels);
- indio_dev->available_scan_masks = inv_mpu9x50_scan_masks;
- } else {
- indio_dev->channels = inv_mpu_channels;
- indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels);
- indio_dev->available_scan_masks = inv_mpu_scan_masks;
- }
+ indio_dev->channels = inv_mpu9250_channels;
+ indio_dev->num_channels = ARRAY_SIZE(inv_mpu9250_channels);
+ indio_dev->available_scan_masks = inv_mpu9x50_scan_masks;
break;
case INV_ICM20602:
- indio_dev->channels = inv_icm20602_channels;
- indio_dev->num_channels = ARRAY_SIZE(inv_icm20602_channels);
+ indio_dev->channels = inv_mpu_channels;
+ indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels);
indio_dev->available_scan_masks = inv_icm20602_scan_masks;
break;
default:
@@ -1351,6 +1325,15 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
indio_dev->available_scan_masks = inv_mpu_scan_masks;
break;
}
+ /*
+ * Use magnetometer inside the chip only if there is no i2c
+ * auxiliary device in use. Otherwise Going back to 6-axis only.
+ */
+ if (st->magn_disabled) {
+ indio_dev->channels = inv_mpu_channels;
+ indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels);
+ indio_dev->available_scan_masks = inv_mpu_scan_masks;
+ }
indio_dev->info = &mpu_info;
indio_dev->modes = INDIO_BUFFER_TRIGGERED;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index 389cc8505e0e..f47a28b4be23 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -77,6 +77,7 @@ static bool inv_mpu_i2c_aux_bus(struct device *dev)
case INV_ICM20602:
/* no i2c auxiliary bus on the chip */
return false;
+ case INV_MPU9150:
case INV_MPU9250:
case INV_MPU9255:
if (st->magn_disabled)
@@ -102,6 +103,7 @@ static int inv_mpu_magn_disable(struct iio_dev *indio_dev)
struct device_node *mux_node;
switch (st->chip_type) {
+ case INV_MPU9150:
case INV_MPU9250:
case INV_MPU9255:
mux_node = of_get_child_by_name(dev->of_node, "i2c-gate");
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index b096e010d4ee..6158fca7f70e 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -86,6 +86,7 @@ enum inv_devices {
* @accl_fs: accel full scale range.
* @accl_fifo_enable: enable accel data output
* @gyro_fifo_enable: enable gyro data output
+ * @temp_fifo_enable: enable temp data output
* @magn_fifo_enable: enable magn data output
* @divider: chip sample rate divider (sample rate divider - 1)
*/
@@ -95,6 +96,7 @@ struct inv_mpu6050_chip_config {
unsigned int accl_fs:2;
unsigned int accl_fifo_enable:1;
unsigned int gyro_fifo_enable:1;
+ unsigned int temp_fifo_enable:1;
unsigned int magn_fifo_enable:1;
u8 divider;
u8 user_ctrl;
@@ -184,6 +186,7 @@ struct inv_mpu6050_state {
#define INV_MPU6050_BIT_SLAVE_2 0x04
#define INV_MPU6050_BIT_ACCEL_OUT 0x08
#define INV_MPU6050_BITS_GYRO_OUT 0x70
+#define INV_MPU6050_BIT_TEMP_OUT 0x80
#define INV_MPU6050_REG_I2C_MST_CTRL 0x24
#define INV_MPU6050_BITS_I2C_MST_CLK_400KHZ 0x0D
@@ -268,8 +271,8 @@ struct inv_mpu6050_state {
/* MPU9X50 9-axis magnetometer */
#define INV_MPU9X50_BYTES_MAGN 7
-/* ICM20602 FIFO samples include temperature readings */
-#define INV_ICM20602_BYTES_PER_TEMP_SENSOR 2
+/* FIFO temperature sample size */
+#define INV_MPU6050_BYTES_PER_TEMP_SENSOR 2
/* mpu6500 registers */
#define INV_MPU6500_REG_ACCEL_CONFIG_2 0x1D
@@ -298,7 +301,7 @@ struct inv_mpu6050_state {
#define INV_ICM20608_TEMP_OFFSET 8170
#define INV_ICM20608_TEMP_SCALE 3059976
-/* 6 + 6 + 7 (for MPU9x50) = 19 round up to 24 and plus 8 */
+/* 6 + 6 + 2 + 7 (for MPU9x50) = 21 round up to 24 and plus 8 */
#define INV_MPU6050_OUTPUT_DATA_SIZE 32
#define INV_MPU6050_REG_INT_PIN_CFG 0x37
@@ -344,6 +347,7 @@ enum inv_mpu6050_scan {
INV_MPU6050_SCAN_ACCL_X,
INV_MPU6050_SCAN_ACCL_Y,
INV_MPU6050_SCAN_ACCL_Z,
+ INV_MPU6050_SCAN_TEMP,
INV_MPU6050_SCAN_GYRO_X,
INV_MPU6050_SCAN_GYRO_Y,
INV_MPU6050_SCAN_GYRO_Z,
@@ -355,18 +359,6 @@ enum inv_mpu6050_scan {
INV_MPU9X50_SCAN_TIMESTAMP,
};
-/* scan element definition for ICM20602, which includes temperature */
-enum inv_icm20602_scan {
- INV_ICM20602_SCAN_ACCL_X,
- INV_ICM20602_SCAN_ACCL_Y,
- INV_ICM20602_SCAN_ACCL_Z,
- INV_ICM20602_SCAN_TEMP,
- INV_ICM20602_SCAN_GYRO_X,
- INV_ICM20602_SCAN_GYRO_Y,
- INV_ICM20602_SCAN_GYRO_Z,
- INV_ICM20602_SCAN_TIMESTAMP,
-};
-
enum inv_mpu6050_filter_e {
INV_MPU6050_FILTER_256HZ_NOLPF2 = 0,
INV_MPU6050_FILTER_188HZ,
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c
index 02735af152c8..4f192352521e 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c
@@ -12,7 +12,9 @@
#include "inv_mpu_magn.h"
/*
- * MPU9250 magnetometer is an AKM AK8963 chip on I2C aux bus
+ * MPU9xxx magnetometer are AKM chips on I2C aux bus
+ * MPU9150 is AK8975
+ * MPU9250 is AK8963
*/
#define INV_MPU_MAGN_I2C_ADDR 0x0C
@@ -33,10 +35,10 @@
#define INV_MPU_MAGN_BITS_MODE_PWDN 0x00
#define INV_MPU_MAGN_BITS_MODE_SINGLE 0x01
#define INV_MPU_MAGN_BITS_MODE_FUSE 0x0F
-#define INV_MPU_MAGN_BIT_OUTPUT_BIT 0x10
+#define INV_MPU9250_MAGN_BIT_OUTPUT_BIT 0x10
-#define INV_MPU_MAGN_REG_CNTL2 0x0B
-#define INV_MPU_MAGN_BIT_SRST 0x01
+#define INV_MPU9250_MAGN_REG_CNTL2 0x0B
+#define INV_MPU9250_MAGN_BIT_SRST 0x01
#define INV_MPU_MAGN_REG_ASAX 0x10
#define INV_MPU_MAGN_REG_ASAY 0x11
@@ -48,6 +50,7 @@
static bool inv_magn_supported(const struct inv_mpu6050_state *st)
{
switch (st->chip_type) {
+ case INV_MPU9150:
case INV_MPU9250:
case INV_MPU9255:
return true;
@@ -61,6 +64,7 @@ static int inv_magn_init(struct inv_mpu6050_state *st)
{
uint8_t val;
uint8_t asa[3];
+ int32_t sensitivity;
int ret;
/* check whoami */
@@ -71,12 +75,19 @@ static int inv_magn_init(struct inv_mpu6050_state *st)
if (val != INV_MPU_MAGN_BITS_WIA)
return -ENODEV;
- /* reset chip */
- ret = inv_mpu_aux_write(st, INV_MPU_MAGN_I2C_ADDR,
- INV_MPU_MAGN_REG_CNTL2,
- INV_MPU_MAGN_BIT_SRST);
- if (ret)
- return ret;
+ /* software reset for MPU925x only */
+ switch (st->chip_type) {
+ case INV_MPU9250:
+ case INV_MPU9255:
+ ret = inv_mpu_aux_write(st, INV_MPU_MAGN_I2C_ADDR,
+ INV_MPU9250_MAGN_REG_CNTL2,
+ INV_MPU9250_MAGN_BIT_SRST);
+ if (ret)
+ return ret;
+ break;
+ default:
+ break;
+ }
/* read fuse ROM data */
ret = inv_mpu_aux_write(st, INV_MPU_MAGN_I2C_ADDR,
@@ -98,22 +109,36 @@ static int inv_magn_init(struct inv_mpu6050_state *st)
return ret;
/*
+ * Sensor sentivity
+ * 1 uT = 0.01 G and value is in micron (1e6)
+ * sensitvity = x uT * 0.01 * 1e6
+ */
+ switch (st->chip_type) {
+ case INV_MPU9150:
+ /* sensor sensitivity is 0.3 uT */
+ sensitivity = 3000;
+ break;
+ case INV_MPU9250:
+ case INV_MPU9255:
+ /* sensor sensitivity in 16 bits mode: 0.15 uT */
+ sensitivity = 1500;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
* Sensitivity adjustement and scale to Gauss
*
* Hadj = H * (((ASA - 128) * 0.5 / 128) + 1)
* Factor simplification:
* Hadj = H * ((ASA + 128) / 256)
*
- * Sensor sentivity
- * 0.15 uT in 16 bits mode
- * 1 uT = 0.01 G and value is in micron (1e6)
- * sensitvity = 0.15 uT * 0.01 * 1e6
- *
- * raw_to_gauss = Hadj * 1500
+ * raw_to_gauss = Hadj * sensitivity
*/
- st->magn_raw_to_gauss[0] = (((int32_t)asa[0] + 128) * 1500) / 256;
- st->magn_raw_to_gauss[1] = (((int32_t)asa[1] + 128) * 1500) / 256;
- st->magn_raw_to_gauss[2] = (((int32_t)asa[2] + 128) * 1500) / 256;
+ st->magn_raw_to_gauss[0] = (((int32_t)asa[0] + 128) * sensitivity) / 256;
+ st->magn_raw_to_gauss[1] = (((int32_t)asa[1] + 128) * sensitivity) / 256;
+ st->magn_raw_to_gauss[2] = (((int32_t)asa[2] + 128) * sensitivity) / 256;
return 0;
}
@@ -129,6 +154,7 @@ static int inv_magn_init(struct inv_mpu6050_state *st)
*/
int inv_mpu_magn_probe(struct inv_mpu6050_state *st)
{
+ uint8_t val;
int ret;
/* quit if chip is not supported */
@@ -179,10 +205,17 @@ int inv_mpu_magn_probe(struct inv_mpu6050_state *st)
if (ret)
return ret;
- /* add 16 bits mode */
- ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_DO(1),
- INV_MPU_MAGN_BITS_MODE_SINGLE |
- INV_MPU_MAGN_BIT_OUTPUT_BIT);
+ /* add 16 bits mode for MPU925x */
+ val = INV_MPU_MAGN_BITS_MODE_SINGLE;
+ switch (st->chip_type) {
+ case INV_MPU9250:
+ case INV_MPU9255:
+ val |= INV_MPU9250_MAGN_BIT_OUTPUT_BIT;
+ break;
+ default:
+ break;
+ }
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_DO(1), val);
if (ret)
return ret;
@@ -237,6 +270,7 @@ int inv_mpu_magn_set_orient(struct inv_mpu6050_state *st)
/* fill magnetometer orientation */
switch (st->chip_type) {
+ case INV_MPU9150:
case INV_MPU9250:
case INV_MPU9255:
/* x <- y */
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
index 10d16ec5104b..f9fdf4302a91 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -142,6 +142,8 @@ int inv_reset_fifo(struct iio_dev *indio_dev)
d |= INV_MPU6050_BITS_GYRO_OUT;
if (st->chip_config.accl_fifo_enable)
d |= INV_MPU6050_BIT_ACCEL_OUT;
+ if (st->chip_config.temp_fifo_enable)
+ d |= INV_MPU6050_BIT_TEMP_OUT;
if (st->chip_config.magn_fifo_enable)
d |= INV_MPU6050_BIT_SLAVE_0;
result = regmap_write(st->map, st->reg->fifo_en, d);
@@ -183,11 +185,8 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
"failed to ack interrupt\n");
goto flush_fifo;
}
- if (!(int_status & INV_MPU6050_BIT_RAW_DATA_RDY_INT)) {
- dev_warn(regmap_get_device(st->map),
- "spurious interrupt with status 0x%x\n", int_status);
+ if (!(int_status & INV_MPU6050_BIT_RAW_DATA_RDY_INT))
goto end_session;
- }
if (!(st->chip_config.accl_fifo_enable |
st->chip_config.gyro_fifo_enable |
@@ -200,8 +199,8 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
if (st->chip_config.gyro_fifo_enable)
bytes_per_datum += INV_MPU6050_BYTES_PER_3AXIS_SENSOR;
- if (st->chip_type == INV_ICM20602)
- bytes_per_datum += INV_ICM20602_BYTES_PER_TEMP_SENSOR;
+ if (st->chip_config.temp_fifo_enable)
+ bytes_per_datum += INV_MPU6050_BYTES_PER_TEMP_SENSOR;
if (st->chip_config.magn_fifo_enable)
bytes_per_datum += INV_MPU9X50_BYTES_MAGN;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
index 142692fc0758..ec102d5a5c77 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
@@ -74,7 +74,6 @@ static int inv_mpu_probe(struct spi_device *spi)
static const struct spi_device_id inv_mpu_id[] = {
{"mpu6000", INV_MPU6000},
{"mpu6500", INV_MPU6500},
- {"mpu9150", INV_MPU9150},
{"mpu9250", INV_MPU9250},
{"mpu9255", INV_MPU9255},
{"icm20608", INV_ICM20608},
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
index d7d951927a44..5199fe790c30 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
@@ -24,6 +24,9 @@ static void inv_scan_query_mpu6050(struct iio_dev *indio_dev)
indio_dev->active_scan_mask) ||
test_bit(INV_MPU6050_SCAN_ACCL_Z,
indio_dev->active_scan_mask);
+
+ st->chip_config.temp_fifo_enable =
+ test_bit(INV_MPU6050_SCAN_TEMP, indio_dev->active_scan_mask);
}
static void inv_scan_query_mpu9x50(struct iio_dev *indio_dev)
@@ -50,6 +53,7 @@ static void inv_scan_query(struct iio_dev *indio_dev)
struct inv_mpu6050_state *st = iio_priv(indio_dev);
switch (st->chip_type) {
+ case INV_MPU9150:
case INV_MPU9250:
case INV_MPU9255:
return inv_scan_query_mpu9x50(indio_dev);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
index dc55d7dff3eb..9c3486a8134f 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
@@ -76,6 +76,7 @@ enum st_lsm6dsx_hw_id {
.endianness = IIO_LE, \
}, \
.event_spec = &st_lsm6dsx_event, \
+ .ext_info = st_lsm6dsx_accel_ext_info, \
.num_event_specs = 1, \
}
@@ -176,21 +177,38 @@ struct st_lsm6dsx_hw_ts_settings {
* @pullup_en: i2c controller pull-up register info (addr + mask).
* @aux_sens: aux sensor register info (addr + mask).
* @wr_once: write_once register info (addr + mask).
+ * @emb_func: embedded function register info (addr + mask).
+ * @num_ext_dev: max number of slave devices.
* @shub_out: sensor hub first output register info.
* @slv0_addr: slave0 address in secondary page.
* @dw_slv0_addr: slave0 write register address in secondary page.
* @batch_en: Enable/disable FIFO batching.
+ * @pause: controller pause value.
*/
struct st_lsm6dsx_shub_settings {
struct st_lsm6dsx_reg page_mux;
- struct st_lsm6dsx_reg master_en;
- struct st_lsm6dsx_reg pullup_en;
+ struct {
+ bool sec_page;
+ u8 addr;
+ u8 mask;
+ } master_en;
+ struct {
+ bool sec_page;
+ u8 addr;
+ u8 mask;
+ } pullup_en;
struct st_lsm6dsx_reg aux_sens;
struct st_lsm6dsx_reg wr_once;
- u8 shub_out;
+ struct st_lsm6dsx_reg emb_func;
+ u8 num_ext_dev;
+ struct {
+ bool sec_page;
+ u8 addr;
+ } shub_out;
u8 slv0_addr;
u8 dw_slv0_addr;
u8 batch_en;
+ u8 pause;
};
struct st_lsm6dsx_event_settings {
@@ -361,6 +379,7 @@ struct st_lsm6dsx_sensor {
* @enable_event: enabled event bitmask.
* @iio_devs: Pointers to acc/gyro iio_dev instances.
* @settings: Pointer to the specific sensor settings in use.
+ * @orientation: sensor chip orientation relative to main hardware.
*/
struct st_lsm6dsx_hw {
struct device *dev;
@@ -387,16 +406,21 @@ struct st_lsm6dsx_hw {
struct iio_dev *iio_devs[ST_LSM6DSX_ID_MAX];
const struct st_lsm6dsx_settings *settings;
+
+ struct iio_mount_matrix orientation;
};
-static const struct iio_event_spec st_lsm6dsx_event = {
+static __maybe_unused const struct iio_event_spec st_lsm6dsx_event = {
.type = IIO_EV_TYPE_THRESH,
.dir = IIO_EV_DIR_EITHER,
.mask_separate = BIT(IIO_EV_INFO_VALUE) |
BIT(IIO_EV_INFO_ENABLE)
};
-static const unsigned long st_lsm6dsx_available_scan_masks[] = {0x7, 0x0};
+static __maybe_unused const unsigned long st_lsm6dsx_available_scan_masks[] = {
+ 0x7, 0x0,
+};
+
extern const struct dev_pm_ops st_lsm6dsx_pm_ops;
int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
@@ -457,4 +481,19 @@ st_lsm6dsx_write_locked(struct st_lsm6dsx_hw *hw, unsigned int addr,
return err;
}
+static const inline struct iio_mount_matrix *
+st_lsm6dsx_get_mount_matrix(const struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev);
+ struct st_lsm6dsx_hw *hw = sensor->hw;
+
+ return &hw->orientation;
+}
+
+static const struct iio_chan_spec_ext_info st_lsm6dsx_accel_ext_info[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_ALL, st_lsm6dsx_get_mount_matrix),
+ { }
+};
+
#endif /* ST_LSM6DSX_H */
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index cb536b81a1c2..bb899345f2bb 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -336,12 +336,13 @@ static inline int st_lsm6dsx_read_block(struct st_lsm6dsx_hw *hw, u8 addr,
*/
int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
{
+ struct st_lsm6dsx_sensor *acc_sensor, *gyro_sensor, *ext_sensor = NULL;
+ int err, acc_sip, gyro_sip, ts_sip, ext_sip, read_len, offset;
u16 fifo_len, pattern_len = hw->sip * ST_LSM6DSX_SAMPLE_SIZE;
u16 fifo_diff_mask = hw->settings->fifo_ops.fifo_diff.mask;
- int err, acc_sip, gyro_sip, ts_sip, read_len, offset;
- struct st_lsm6dsx_sensor *acc_sensor, *gyro_sensor;
u8 gyro_buff[ST_LSM6DSX_IIO_BUFF_SIZE];
u8 acc_buff[ST_LSM6DSX_IIO_BUFF_SIZE];
+ u8 ext_buff[ST_LSM6DSX_IIO_BUFF_SIZE];
bool reset_ts = false;
__le16 fifo_status;
s64 ts = 0;
@@ -364,6 +365,8 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
acc_sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_ACC]);
gyro_sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_GYRO]);
+ if (hw->iio_devs[ST_LSM6DSX_ID_EXT0])
+ ext_sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_EXT0]);
for (read_len = 0; read_len < fifo_len; read_len += pattern_len) {
err = st_lsm6dsx_read_block(hw, ST_LSM6DSX_REG_FIFO_OUTL_ADDR,
@@ -391,12 +394,13 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
* following pattern is repeated every 9 samples:
* - Gx, Gy, Gz, Ax, Ay, Az, Ts, Gx, Gy, Gz, Ts, Gx, ..
*/
+ ext_sip = ext_sensor ? ext_sensor->sip : 0;
gyro_sip = gyro_sensor->sip;
acc_sip = acc_sensor->sip;
ts_sip = hw->ts_sip;
offset = 0;
- while (acc_sip > 0 || gyro_sip > 0) {
+ while (acc_sip > 0 || gyro_sip > 0 || ext_sip > 0) {
if (gyro_sip > 0) {
memcpy(gyro_buff, &hw->buff[offset],
ST_LSM6DSX_SAMPLE_SIZE);
@@ -407,6 +411,11 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
ST_LSM6DSX_SAMPLE_SIZE);
offset += ST_LSM6DSX_SAMPLE_SIZE;
}
+ if (ext_sip > 0) {
+ memcpy(ext_buff, &hw->buff[offset],
+ ST_LSM6DSX_SAMPLE_SIZE);
+ offset += ST_LSM6DSX_SAMPLE_SIZE;
+ }
if (ts_sip-- > 0) {
u8 data[ST_LSM6DSX_SAMPLE_SIZE];
@@ -440,6 +449,10 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
iio_push_to_buffers_with_timestamp(
hw->iio_devs[ST_LSM6DSX_ID_ACC],
acc_buff, acc_sensor->ts_ref + ts);
+ if (ext_sip-- > 0)
+ iio_push_to_buffers_with_timestamp(
+ hw->iio_devs[ST_LSM6DSX_ID_EXT0],
+ ext_buff, ext_sensor->ts_ref + ts);
}
}
@@ -638,12 +651,12 @@ int st_lsm6dsx_update_fifo(struct st_lsm6dsx_sensor *sensor, bool enable)
err = st_lsm6dsx_sensor_set_enable(sensor, enable);
if (err < 0)
goto out;
-
- err = st_lsm6dsx_set_fifo_odr(sensor, enable);
- if (err < 0)
- goto out;
}
+ err = st_lsm6dsx_set_fifo_odr(sensor, enable);
+ if (err < 0)
+ goto out;
+
err = st_lsm6dsx_update_decimators(hw);
if (err < 0)
goto out;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index a7d40c02ce6b..84d219ae6aee 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -54,6 +54,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/pm.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/bitfield.h>
@@ -655,6 +656,10 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x08,
.mask = GENMASK(5, 3),
},
+ [ST_LSM6DSX_ID_EXT0] = {
+ .addr = 0x09,
+ .mask = GENMASK(2, 0),
+ },
},
.fifo_ops = {
.update_fifo = st_lsm6dsx_update_fifo,
@@ -687,6 +692,39 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.mask = GENMASK(5, 3),
},
},
+ .shub_settings = {
+ .page_mux = {
+ .addr = 0x01,
+ .mask = BIT(7),
+ },
+ .master_en = {
+ .addr = 0x1a,
+ .mask = BIT(0),
+ },
+ .pullup_en = {
+ .addr = 0x1a,
+ .mask = BIT(3),
+ },
+ .aux_sens = {
+ .addr = 0x04,
+ .mask = GENMASK(5, 4),
+ },
+ .wr_once = {
+ .addr = 0x07,
+ .mask = BIT(5),
+ },
+ .emb_func = {
+ .addr = 0x19,
+ .mask = BIT(2),
+ },
+ .num_ext_dev = 1,
+ .shub_out = {
+ .addr = 0x2e,
+ },
+ .slv0_addr = 0x02,
+ .dw_slv0_addr = 0x0e,
+ .pause = 0x7,
+ },
.event_settings = {
.enable_reg = {
.addr = 0x58,
@@ -867,10 +905,12 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.mask = BIT(6),
},
.master_en = {
+ .sec_page = true,
.addr = 0x14,
.mask = BIT(2),
},
.pullup_en = {
+ .sec_page = true,
.addr = 0x14,
.mask = BIT(3),
},
@@ -882,7 +922,11 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x14,
.mask = BIT(6),
},
- .shub_out = 0x02,
+ .num_ext_dev = 3,
+ .shub_out = {
+ .sec_page = true,
+ .addr = 0x02,
+ },
.slv0_addr = 0x15,
.dw_slv0_addr = 0x21,
.batch_en = BIT(3),
@@ -1241,10 +1285,12 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.mask = BIT(6),
},
.master_en = {
+ .sec_page = true,
.addr = 0x14,
.mask = BIT(2),
},
.pullup_en = {
+ .sec_page = true,
.addr = 0x14,
.mask = BIT(3),
},
@@ -1256,7 +1302,11 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x14,
.mask = BIT(6),
},
- .shub_out = 0x02,
+ .num_ext_dev = 3,
+ .shub_out = {
+ .sec_page = true,
+ .addr = 0x02,
+ },
.slv0_addr = 0x15,
.dw_slv0_addr = 0x21,
.batch_en = BIT(3),
@@ -1301,7 +1351,8 @@ static int st_lsm6dsx_check_whoami(struct st_lsm6dsx_hw *hw, int id,
for (i = 0; i < ARRAY_SIZE(st_lsm6dsx_sensor_settings); i++) {
for (j = 0; j < ST_LSM6DSX_MAX_ID; j++) {
- if (id == st_lsm6dsx_sensor_settings[i].id[j].hw_id)
+ if (st_lsm6dsx_sensor_settings[i].id[j].name &&
+ id == st_lsm6dsx_sensor_settings[i].id[j].hw_id)
break;
}
if (j < ST_LSM6DSX_MAX_ID)
@@ -1505,8 +1556,11 @@ static int st_lsm6dsx_read_oneshot(struct st_lsm6dsx_sensor *sensor,
if (err < 0)
return err;
- if (!hw->enable_event)
- st_lsm6dsx_sensor_set_enable(sensor, false);
+ if (!hw->enable_event) {
+ err = st_lsm6dsx_sensor_set_enable(sensor, false);
+ if (err < 0)
+ return err;
+ }
*val = (s16)le16_to_cpu(data);
@@ -1608,11 +1662,11 @@ static int st_lsm6dsx_event_setup(struct st_lsm6dsx_hw *hw, int state)
}
static int st_lsm6dsx_read_event(struct iio_dev *iio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- enum iio_event_info info,
- int *val, int *val2)
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
{
struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev);
struct st_lsm6dsx_hw *hw = sensor->hw;
@@ -1826,14 +1880,14 @@ static const struct iio_info st_lsm6dsx_gyro_info = {
.hwfifo_set_watermark = st_lsm6dsx_set_watermark,
};
-static int st_lsm6dsx_of_get_drdy_pin(struct st_lsm6dsx_hw *hw, int *drdy_pin)
+static int st_lsm6dsx_get_drdy_pin(struct st_lsm6dsx_hw *hw, int *drdy_pin)
{
- struct device_node *np = hw->dev->of_node;
+ struct device *dev = hw->dev;
- if (!np)
+ if (!dev_fwnode(dev))
return -EINVAL;
- return of_property_read_u32(np, "st,drdy-int-pin", drdy_pin);
+ return device_property_read_u32(dev, "st,drdy-int-pin", drdy_pin);
}
static int
@@ -1842,7 +1896,7 @@ st_lsm6dsx_get_drdy_reg(struct st_lsm6dsx_hw *hw,
{
int err = 0, drdy_pin;
- if (st_lsm6dsx_of_get_drdy_pin(hw, &drdy_pin) < 0) {
+ if (st_lsm6dsx_get_drdy_pin(hw, &drdy_pin) < 0) {
struct st_sensors_platform_data *pdata;
struct device *dev = hw->dev;
@@ -1871,26 +1925,29 @@ st_lsm6dsx_get_drdy_reg(struct st_lsm6dsx_hw *hw,
static int st_lsm6dsx_init_shub(struct st_lsm6dsx_hw *hw)
{
const struct st_lsm6dsx_shub_settings *hub_settings;
- struct device_node *np = hw->dev->of_node;
struct st_sensors_platform_data *pdata;
+ struct device *dev = hw->dev;
unsigned int data;
int err = 0;
hub_settings = &hw->settings->shub_settings;
- pdata = (struct st_sensors_platform_data *)hw->dev->platform_data;
- if ((np && of_property_read_bool(np, "st,pullups")) ||
+ pdata = (struct st_sensors_platform_data *)dev->platform_data;
+ if ((dev_fwnode(dev) && device_property_read_bool(dev, "st,pullups")) ||
(pdata && pdata->pullups)) {
- err = st_lsm6dsx_set_page(hw, true);
- if (err < 0)
- return err;
+ if (hub_settings->pullup_en.sec_page) {
+ err = st_lsm6dsx_set_page(hw, true);
+ if (err < 0)
+ return err;
+ }
data = ST_LSM6DSX_SHIFT_VAL(1, hub_settings->pullup_en.mask);
err = regmap_update_bits(hw->regmap,
hub_settings->pullup_en.addr,
hub_settings->pullup_en.mask, data);
- st_lsm6dsx_set_page(hw, false);
+ if (hub_settings->pullup_en.sec_page)
+ st_lsm6dsx_set_page(hw, false);
if (err < 0)
return err;
@@ -1908,6 +1965,16 @@ static int st_lsm6dsx_init_shub(struct st_lsm6dsx_hw *hw)
hub_settings->aux_sens.mask, data);
st_lsm6dsx_set_page(hw, false);
+
+ if (err < 0)
+ return err;
+ }
+
+ if (hub_settings->emb_func.addr) {
+ data = ST_LSM6DSX_SHIFT_VAL(1, hub_settings->emb_func.mask);
+ err = regmap_update_bits(hw->regmap,
+ hub_settings->emb_func.addr,
+ hub_settings->emb_func.mask, data);
}
return err;
@@ -2157,9 +2224,9 @@ static irqreturn_t st_lsm6dsx_handler_thread(int irq, void *private)
static int st_lsm6dsx_irq_setup(struct st_lsm6dsx_hw *hw)
{
- struct device_node *np = hw->dev->of_node;
struct st_sensors_platform_data *pdata;
const struct st_lsm6dsx_reg *reg;
+ struct device *dev = hw->dev;
unsigned long irq_type;
bool irq_active_low;
int err;
@@ -2187,8 +2254,8 @@ static int st_lsm6dsx_irq_setup(struct st_lsm6dsx_hw *hw)
if (err < 0)
return err;
- pdata = (struct st_sensors_platform_data *)hw->dev->platform_data;
- if ((np && of_property_read_bool(np, "drive-open-drain")) ||
+ pdata = (struct st_sensors_platform_data *)dev->platform_data;
+ if ((dev_fwnode(dev) && device_property_read_bool(dev, "drive-open-drain")) ||
(pdata && pdata->open_drain)) {
reg = &hw->settings->irq_config.od;
err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
@@ -2218,7 +2285,6 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
{
struct st_sensors_platform_data *pdata = dev->platform_data;
const struct st_lsm6dsx_shub_settings *hub_settings;
- struct device_node *np = dev->of_node;
struct st_lsm6dsx_hw *hw;
const char *name = NULL;
int i, err;
@@ -2272,6 +2338,10 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
return err;
}
+ err = iio_read_mount_matrix(hw->dev, "mount-matrix", &hw->orientation);
+ if (err)
+ return err;
+
for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) {
if (!hw->iio_devs[i])
continue;
@@ -2281,7 +2351,7 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
return err;
}
- if ((np && of_property_read_bool(np, "wakeup-source")) ||
+ if ((dev_fwnode(dev) && device_property_read_bool(dev, "wakeup-source")) ||
(pdata && pdata->wakeup_source))
device_init_wakeup(dev, true);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
index cd47ec1fedcb..0fb32131afce 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/slab.h>
-#include <linux/of.h>
#include <linux/regmap.h>
#include "st_lsm6dsx.h"
@@ -122,7 +121,7 @@ static struct i2c_driver st_lsm6dsx_driver = {
.driver = {
.name = "st_lsm6dsx_i2c",
.pm = &st_lsm6dsx_pm_ops,
- .of_match_table = of_match_ptr(st_lsm6dsx_i2c_of_match),
+ .of_match_table = st_lsm6dsx_i2c_of_match,
},
.probe = st_lsm6dsx_i2c_probe,
.id_table = st_lsm6dsx_i2c_id_table,
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
index fa5d1001a46c..eea555617d4a 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
@@ -30,7 +30,6 @@
#include "st_lsm6dsx.h"
-#define ST_LSM6DSX_MAX_SLV_NUM 3
#define ST_LSM6DSX_SLV_ADDR(n, base) ((base) + (n) * 3)
#define ST_LSM6DSX_SLV_SUB_ADDR(n, base) ((base) + 1 + (n) * 3)
#define ST_LSM6DSX_SLV_CONFIG(n, base) ((base) + 2 + (n) * 3)
@@ -102,24 +101,31 @@ static void st_lsm6dsx_shub_wait_complete(struct st_lsm6dsx_hw *hw)
}
/**
- * st_lsm6dsx_shub_read_reg - read i2c controller register
+ * st_lsm6dsx_shub_read_output - read i2c controller register
*
* Read st_lsm6dsx i2c controller register
*/
-static int st_lsm6dsx_shub_read_reg(struct st_lsm6dsx_hw *hw, u8 addr,
- u8 *data, int len)
+static int
+st_lsm6dsx_shub_read_output(struct st_lsm6dsx_hw *hw, u8 *data,
+ int len)
{
+ const struct st_lsm6dsx_shub_settings *hub_settings;
int err;
mutex_lock(&hw->page_lock);
- err = st_lsm6dsx_set_page(hw, true);
- if (err < 0)
- goto out;
+ hub_settings = &hw->settings->shub_settings;
+ if (hub_settings->shub_out.sec_page) {
+ err = st_lsm6dsx_set_page(hw, true);
+ if (err < 0)
+ goto out;
+ }
- err = regmap_bulk_read(hw->regmap, addr, data, len);
+ err = regmap_bulk_read(hw->regmap, hub_settings->shub_out.addr,
+ data, len);
- st_lsm6dsx_set_page(hw, false);
+ if (hub_settings->shub_out.sec_page)
+ st_lsm6dsx_set_page(hw, false);
out:
mutex_unlock(&hw->page_lock);
@@ -186,15 +192,18 @@ static int st_lsm6dsx_shub_master_enable(struct st_lsm6dsx_sensor *sensor,
mutex_lock(&hw->page_lock);
hub_settings = &hw->settings->shub_settings;
- err = st_lsm6dsx_set_page(hw, true);
- if (err < 0)
- goto out;
+ if (hub_settings->master_en.sec_page) {
+ err = st_lsm6dsx_set_page(hw, true);
+ if (err < 0)
+ goto out;
+ }
data = ST_LSM6DSX_SHIFT_VAL(enable, hub_settings->master_en.mask);
err = regmap_update_bits(hw->regmap, hub_settings->master_en.addr,
hub_settings->master_en.mask, data);
- st_lsm6dsx_set_page(hw, false);
+ if (hub_settings->master_en.sec_page)
+ st_lsm6dsx_set_page(hw, false);
out:
mutex_unlock(&hw->page_lock);
@@ -212,16 +221,21 @@ st_lsm6dsx_shub_read(struct st_lsm6dsx_sensor *sensor, u8 addr,
u8 *data, int len)
{
const struct st_lsm6dsx_shub_settings *hub_settings;
+ u8 config[3], slv_addr, slv_config = 0;
struct st_lsm6dsx_hw *hw = sensor->hw;
- u8 config[3], slv_addr;
+ const struct st_lsm6dsx_reg *aux_sens;
int err;
hub_settings = &hw->settings->shub_settings;
slv_addr = ST_LSM6DSX_SLV_ADDR(0, hub_settings->slv0_addr);
+ aux_sens = &hw->settings->shub_settings.aux_sens;
+ /* do not overwrite aux_sens */
+ if (slv_addr + 2 == aux_sens->addr)
+ slv_config = ST_LSM6DSX_SHIFT_VAL(3, aux_sens->mask);
config[0] = (sensor->ext_info.addr << 1) | 1;
config[1] = addr;
- config[2] = len & ST_LS6DSX_READ_OP_MASK;
+ config[2] = (len & ST_LS6DSX_READ_OP_MASK) | slv_config;
err = st_lsm6dsx_shub_write_reg(hw, slv_addr, config,
sizeof(config));
@@ -234,12 +248,14 @@ st_lsm6dsx_shub_read(struct st_lsm6dsx_sensor *sensor, u8 addr,
st_lsm6dsx_shub_wait_complete(hw);
- err = st_lsm6dsx_shub_read_reg(hw, hub_settings->shub_out, data,
- len & ST_LS6DSX_READ_OP_MASK);
+ err = st_lsm6dsx_shub_read_output(hw, data,
+ len & ST_LS6DSX_READ_OP_MASK);
st_lsm6dsx_shub_master_enable(sensor, false);
- memset(config, 0, sizeof(config));
+ config[0] = hub_settings->pause;
+ config[1] = 0;
+ config[2] = slv_config;
return st_lsm6dsx_shub_write_reg(hw, slv_addr, config,
sizeof(config));
}
@@ -296,7 +312,8 @@ st_lsm6dsx_shub_write(struct st_lsm6dsx_sensor *sensor, u8 addr,
st_lsm6dsx_shub_master_enable(sensor, false);
}
- memset(config, 0, sizeof(config));
+ config[0] = hub_settings->pause;
+ config[1] = 0;
return st_lsm6dsx_shub_write_reg(hw, slv_addr, config, sizeof(config));
}
@@ -688,14 +705,19 @@ st_lsm6dsx_shub_check_wai(struct st_lsm6dsx_hw *hw, u8 *i2c_addr,
const struct st_lsm6dsx_ext_dev_settings *settings)
{
const struct st_lsm6dsx_shub_settings *hub_settings;
+ u8 config[3], data, slv_addr, slv_config = 0;
+ const struct st_lsm6dsx_reg *aux_sens;
struct st_lsm6dsx_sensor *sensor;
- u8 config[3], data, slv_addr;
bool found = false;
int i, err;
+ sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_ACC]);
hub_settings = &hw->settings->shub_settings;
+ aux_sens = &hw->settings->shub_settings.aux_sens;
slv_addr = ST_LSM6DSX_SLV_ADDR(0, hub_settings->slv0_addr);
- sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_ACC]);
+ /* do not overwrite aux_sens */
+ if (slv_addr + 2 == aux_sens->addr)
+ slv_config = ST_LSM6DSX_SHIFT_VAL(3, aux_sens->mask);
for (i = 0; i < ARRAY_SIZE(settings->i2c_addr); i++) {
if (!settings->i2c_addr[i])
@@ -704,7 +726,7 @@ st_lsm6dsx_shub_check_wai(struct st_lsm6dsx_hw *hw, u8 *i2c_addr,
/* read wai slave register */
config[0] = (settings->i2c_addr[i] << 1) | 0x1;
config[1] = settings->wai.addr;
- config[2] = 0x1;
+ config[2] = 0x1 | slv_config;
err = st_lsm6dsx_shub_write_reg(hw, slv_addr, config,
sizeof(config));
@@ -717,9 +739,7 @@ st_lsm6dsx_shub_check_wai(struct st_lsm6dsx_hw *hw, u8 *i2c_addr,
st_lsm6dsx_shub_wait_complete(hw);
- err = st_lsm6dsx_shub_read_reg(hw,
- hub_settings->shub_out,
- &data, sizeof(data));
+ err = st_lsm6dsx_shub_read_output(hw, &data, sizeof(data));
st_lsm6dsx_shub_master_enable(sensor, false);
@@ -735,7 +755,9 @@ st_lsm6dsx_shub_check_wai(struct st_lsm6dsx_hw *hw, u8 *i2c_addr,
}
/* reset SLV0 channel */
- memset(config, 0, sizeof(config));
+ config[0] = hub_settings->pause;
+ config[1] = 0;
+ config[2] = slv_config;
err = st_lsm6dsx_shub_write_reg(hw, slv_addr, config,
sizeof(config));
if (err < 0)
@@ -770,7 +792,7 @@ int st_lsm6dsx_shub_probe(struct st_lsm6dsx_hw *hw, const char *name)
if (err < 0)
return err;
- if (++num_ext_dev >= ST_LSM6DSX_MAX_SLV_NUM)
+ if (++num_ext_dev >= hw->settings->shub_settings.num_ext_dev)
break;
id++;
}
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
index 67ff36eac247..eb1086e4a951 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
-#include <linux/of.h>
#include <linux/regmap.h>
#include "st_lsm6dsx.h"
@@ -122,7 +121,7 @@ static struct spi_driver st_lsm6dsx_driver = {
.driver = {
.name = "st_lsm6dsx_spi",
.pm = &st_lsm6dsx_pm_ops,
- .of_match_table = of_match_ptr(st_lsm6dsx_spi_of_match),
+ .of_match_table = st_lsm6dsx_spi_of_match,
},
.probe = st_lsm6dsx_spi_probe,
.id_table = st_lsm6dsx_spi_id_table,
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index c193d64e5217..4ada5592aa2b 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -87,7 +87,7 @@ static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
}
/**
- * iio_buffer_read_first_n_outer() - chrdev read for buffer access
+ * iio_buffer_read_outer() - chrdev read for buffer access
* @filp: File structure pointer for the char device
* @buf: Destination buffer for iio buffer read
* @n: First n bytes to read
@@ -99,8 +99,8 @@ static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
* Return: negative values corresponding to error codes or ret != 0
* for ending the reading activity
**/
-ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
- size_t n, loff_t *f_ps)
+ssize_t iio_buffer_read_outer(struct file *filp, char __user *buf,
+ size_t n, loff_t *f_ps)
{
struct iio_dev *indio_dev = filp->private_data;
struct iio_buffer *rb = indio_dev->buffer;
@@ -112,7 +112,7 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
if (!indio_dev->info)
return -ENODEV;
- if (!rb || !rb->access->read_first_n)
+ if (!rb || !rb->access->read)
return -EINVAL;
datum_size = rb->bytes_per_datum;
@@ -147,7 +147,7 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
continue;
}
- ret = rb->access->read_first_n(rb, n, buf);
+ ret = rb->access->read(rb, n, buf);
if (ret == 0 && (filp->f_flags & O_NONBLOCK))
ret = -EAGAIN;
} while (ret == 0);
@@ -566,7 +566,7 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
const unsigned long *mask, bool timestamp)
{
unsigned bytes = 0;
- int length, i;
+ int length, i, largest = 0;
/* How much space will the demuxed element take? */
for_each_set_bit(i, mask,
@@ -574,13 +574,17 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
length = iio_storage_bytes_for_si(indio_dev, i);
bytes = ALIGN(bytes, length);
bytes += length;
+ largest = max(largest, length);
}
if (timestamp) {
length = iio_storage_bytes_for_timestamp(indio_dev);
bytes = ALIGN(bytes, length);
bytes += length;
+ largest = max(largest, length);
}
+
+ bytes = ALIGN(bytes, largest);
return bytes;
}
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index a46cdf2d8833..65ff0d067018 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -161,6 +161,7 @@ static const char * const iio_chan_info_postfix[] = {
[IIO_CHAN_INFO_DEBOUNCE_TIME] = "debounce_time",
[IIO_CHAN_INFO_CALIBEMISSIVITY] = "calibemissivity",
[IIO_CHAN_INFO_OVERSAMPLING_RATIO] = "oversampling_ratio",
+ [IIO_CHAN_INFO_THERMOCOUPLE_TYPE] = "thermocouple_type",
};
/**
@@ -596,6 +597,8 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type,
}
return l;
}
+ case IIO_VAL_CHAR:
+ return snprintf(buf, len, "%c", (char)vals[0]);
default:
return 0;
}
@@ -837,7 +840,8 @@ static ssize_t iio_write_channel_info(struct device *dev,
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret, fract_mult = 100000;
- int integer, fract;
+ int integer, fract = 0;
+ bool is_char = false;
/* Assumes decimal - precision based on number of digits */
if (!indio_dev->info->write_raw)
@@ -855,13 +859,24 @@ static ssize_t iio_write_channel_info(struct device *dev,
case IIO_VAL_INT_PLUS_NANO:
fract_mult = 100000000;
break;
+ case IIO_VAL_CHAR:
+ is_char = true;
+ break;
default:
return -EINVAL;
}
- ret = iio_str_to_fixpoint(buf, fract_mult, &integer, &fract);
- if (ret)
- return ret;
+ if (is_char) {
+ char ch;
+
+ if (sscanf(buf, "%c", &ch) != 1)
+ return -EINVAL;
+ integer = ch;
+ } else {
+ ret = iio_str_to_fixpoint(buf, fract_mult, &integer, &fract);
+ if (ret)
+ return ret;
+ }
ret = indio_dev->info->write_raw(indio_dev, this_attr->c,
integer, fract, this_attr->address);
@@ -1617,7 +1632,7 @@ static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
static const struct file_operations iio_buffer_fileops = {
- .read = iio_buffer_read_first_n_outer_addr,
+ .read = iio_buffer_read_outer_addr,
.release = iio_chrdev_release,
.open = iio_chrdev_open,
.poll = iio_buffer_poll_addr,
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index c5dfb9a6b5a1..52f86bc777dd 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -15,7 +15,6 @@
#include <linux/mutex.h>
#include <linux/err.h>
#include <linux/irq.h>
-#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
@@ -24,7 +23,6 @@
#include <linux/iio/events.h>
#include <linux/iio/kfifo_buf.h>
#include <linux/iio/sysfs.h>
-#include <linux/of_gpio.h>
#define APDS9960_REGMAP_NAME "apds9960_regmap"
#define APDS9960_DRV_NAME "apds9960"
diff --git a/drivers/iio/light/cros_ec_light_prox.c b/drivers/iio/light/cros_ec_light_prox.c
index d85a391e50c5..7a838e2956f4 100644
--- a/drivers/iio/light/cros_ec_light_prox.c
+++ b/drivers/iio/light/cros_ec_light_prox.c
@@ -14,7 +14,6 @@
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/kernel.h>
-#include <linux/mfd/cros_ec.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
diff --git a/drivers/iio/light/lm3533-als.c b/drivers/iio/light/lm3533-als.c
index 6733b52c22df..bc196c212881 100644
--- a/drivers/iio/light/lm3533-als.c
+++ b/drivers/iio/light/lm3533-als.c
@@ -742,7 +742,7 @@ static int lm3533_als_set_resistor(struct lm3533_als *als, u8 val)
if (val < LM3533_ALS_RESISTOR_MIN || val > LM3533_ALS_RESISTOR_MAX) {
dev_err(&als->pdev->dev, "invalid resistor value\n");
return -EINVAL;
- };
+ }
ret = lm3533_write(als->lm3533, LM3533_REG_ALS_RESISTOR_SELECT, val);
if (ret) {
diff --git a/drivers/iio/light/si1145.c b/drivers/iio/light/si1145.c
index 982bba0c54e7..0476c2bc8138 100644
--- a/drivers/iio/light/si1145.c
+++ b/drivers/iio/light/si1145.c
@@ -17,7 +17,6 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/irq.h>
-#include <linux/gpio.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
diff --git a/drivers/iio/light/st_uvis25_i2c.c b/drivers/iio/light/st_uvis25_i2c.c
index dacbac6a6662..4889bbeb0c73 100644
--- a/drivers/iio/light/st_uvis25_i2c.c
+++ b/drivers/iio/light/st_uvis25_i2c.c
@@ -9,7 +9,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/acpi.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/regmap.h>
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index 16dacea9eadf..b0e241aaefb4 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -163,7 +163,6 @@ static int vcnl4200_init(struct vcnl4000_data *data)
if (ret < 0)
return ret;
- data->al_scale = 24000;
data->vcnl4200_al.reg = VCNL4200_AL_DATA;
data->vcnl4200_ps.reg = VCNL4200_PS_DATA;
switch (id) {
@@ -172,11 +171,13 @@ static int vcnl4200_init(struct vcnl4000_data *data)
/* show 54ms in total. */
data->vcnl4200_al.sampling_rate = ktime_set(0, 54000 * 1000);
data->vcnl4200_ps.sampling_rate = ktime_set(0, 4200 * 1000);
+ data->al_scale = 24000;
break;
case VCNL4040_PROD_ID:
/* Integration time is 80ms, add 10ms. */
data->vcnl4200_al.sampling_rate = ktime_set(0, 100000 * 1000);
data->vcnl4200_ps.sampling_rate = ktime_set(0, 100000 * 1000);
+ data->al_scale = 120000;
break;
}
data->vcnl4200_al.last_measurement = ktime_set(0, 0);
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index 893bec5a0312..3c881541ae72 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -16,8 +16,7 @@
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/bitops.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/acpi.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
@@ -29,8 +28,6 @@
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
-#include <linux/iio/magnetometer/ak8975.h>
-
/*
* Register definitions, as well as various shifts and masks to get at the
* individual fields of the registers.
@@ -206,11 +203,11 @@ static long ak09912_raw_to_gauss(u16 data)
/* Compatible Asahi Kasei Compass parts */
enum asahi_compass_chipset {
+ AKXXXX = 0,
AK8975,
AK8963,
AK09911,
AK09912,
- AK_MAX_TYPE
};
enum ak_ctrl_reg_addr {
@@ -248,7 +245,7 @@ struct ak_def {
u8 data_regs[3];
};
-static const struct ak_def ak_def_array[AK_MAX_TYPE] = {
+static const struct ak_def ak_def_array[] = {
{
.type = AK8975,
.raw_to_gauss = ak8975_raw_to_gauss,
@@ -360,7 +357,7 @@ struct ak8975_data {
struct mutex lock;
u8 asa[3];
long raw_to_gauss[3];
- int eoc_gpio;
+ struct gpio_desc *eoc_gpiod;
int eoc_irq;
wait_queue_head_t data_ready_queue;
unsigned long flags;
@@ -498,15 +495,13 @@ static int ak8975_setup_irq(struct ak8975_data *data)
if (client->irq)
irq = client->irq;
else
- irq = gpio_to_irq(data->eoc_gpio);
+ irq = gpiod_to_irq(data->eoc_gpiod);
rc = devm_request_irq(&client->dev, irq, ak8975_irq_handler,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
dev_name(&client->dev), data);
if (rc < 0) {
- dev_err(&client->dev,
- "irq %d request failed, (gpio %d): %d\n",
- irq, data->eoc_gpio, rc);
+ dev_err(&client->dev, "irq %d request failed: %d\n", irq, rc);
return rc;
}
@@ -549,7 +544,7 @@ static int ak8975_setup(struct i2c_client *client)
return ret;
}
- if (data->eoc_gpio > 0 || client->irq > 0) {
+ if (data->eoc_gpiod || client->irq > 0) {
ret = ak8975_setup_irq(data);
if (ret < 0) {
dev_err(&client->dev,
@@ -574,7 +569,7 @@ static int wait_conversion_complete_gpio(struct ak8975_data *data)
/* Wait for the conversion to complete. */
while (timeout_ms) {
msleep(AK8975_CONVERSION_DONE_POLL_TIME);
- if (gpio_get_value(data->eoc_gpio))
+ if (gpiod_get_value(data->eoc_gpiod))
break;
timeout_ms -= AK8975_CONVERSION_DONE_POLL_TIME;
}
@@ -646,7 +641,7 @@ static int ak8975_start_read_axis(struct ak8975_data *data,
/* Wait for the conversion to complete. */
if (data->eoc_irq)
ret = wait_conversion_complete_interrupt(data);
- else if (gpio_is_valid(data->eoc_gpio))
+ else if (data->eoc_gpiod)
ret = wait_conversion_complete_gpio(data);
else
ret = wait_conversion_complete_polled(data);
@@ -786,19 +781,6 @@ static const struct acpi_device_id ak_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, ak_acpi_match);
#endif
-static const char *ak8975_match_acpi_device(struct device *dev,
- enum asahi_compass_chipset *chipset)
-{
- const struct acpi_device_id *id;
-
- id = acpi_match_device(dev->driver->acpi_match_table, dev);
- if (!id)
- return NULL;
- *chipset = (int)id->driver_data;
-
- return dev_name(dev);
-}
-
static void ak8975_fill_buffer(struct iio_dev *indio_dev)
{
struct ak8975_data *data = iio_priv(indio_dev);
@@ -856,36 +838,23 @@ static int ak8975_probe(struct i2c_client *client,
{
struct ak8975_data *data;
struct iio_dev *indio_dev;
- int eoc_gpio;
+ struct gpio_desc *eoc_gpiod;
+ const void *match;
+ unsigned int i;
int err;
+ enum asahi_compass_chipset chipset;
const char *name = NULL;
- enum asahi_compass_chipset chipset = AK_MAX_TYPE;
- const struct ak8975_platform_data *pdata =
- dev_get_platdata(&client->dev);
-
- /* Grab and set up the supplied GPIO. */
- if (pdata)
- eoc_gpio = pdata->eoc_gpio;
- else if (client->dev.of_node)
- eoc_gpio = of_get_gpio(client->dev.of_node, 0);
- else
- eoc_gpio = -1;
- if (eoc_gpio == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- /* We may not have a GPIO based IRQ to scan, that is fine, we will
- poll if so */
- if (gpio_is_valid(eoc_gpio)) {
- err = devm_gpio_request_one(&client->dev, eoc_gpio,
- GPIOF_IN, "ak_8975");
- if (err < 0) {
- dev_err(&client->dev,
- "failed to request GPIO %d, error %d\n",
- eoc_gpio, err);
- return err;
- }
- }
+ /*
+ * Grab and set up the supplied GPIO.
+ * We may not have a GPIO based IRQ to scan, that is fine, we will
+ * poll if so.
+ */
+ eoc_gpiod = devm_gpiod_get_optional(&client->dev, NULL, GPIOD_IN);
+ if (IS_ERR(eoc_gpiod))
+ return PTR_ERR(eoc_gpiod);
+ if (eoc_gpiod)
+ gpiod_set_consumer_name(eoc_gpiod, "ak_8975");
/* Register with IIO */
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
@@ -896,35 +865,35 @@ static int ak8975_probe(struct i2c_client *client,
i2c_set_clientdata(client, indio_dev);
data->client = client;
- data->eoc_gpio = eoc_gpio;
+ data->eoc_gpiod = eoc_gpiod;
data->eoc_irq = 0;
- if (!pdata) {
- err = iio_read_mount_matrix(&client->dev, "mount-matrix",
- &data->orientation);
- if (err)
- return err;
- } else
- data->orientation = pdata->orientation;
+ err = iio_read_mount_matrix(&client->dev, "mount-matrix", &data->orientation);
+ if (err)
+ return err;
/* id will be NULL when enumerated via ACPI */
- if (id) {
+ match = device_get_match_data(&client->dev);
+ if (match) {
+ chipset = (enum asahi_compass_chipset)(match);
+ name = dev_name(&client->dev);
+ } else if (id) {
chipset = (enum asahi_compass_chipset)(id->driver_data);
name = id->name;
- } else if (ACPI_HANDLE(&client->dev)) {
- name = ak8975_match_acpi_device(&client->dev, &chipset);
- if (!name)
- return -ENODEV;
} else
return -ENOSYS;
- if (chipset >= AK_MAX_TYPE) {
+ for (i = 0; i < ARRAY_SIZE(ak_def_array); i++)
+ if (ak_def_array[i].type == chipset)
+ break;
+
+ if (i == ARRAY_SIZE(ak_def_array)) {
dev_err(&client->dev, "AKM device type unsupported: %d\n",
chipset);
return -ENODEV;
}
- data->def = &ak_def_array[chipset];
+ data->def = &ak_def_array[i];
/* Fetch the regulators */
data->vdd = devm_regulator_get(&client->dev, "vdd");
diff --git a/drivers/iio/magnetometer/st_magn_i2c.c b/drivers/iio/magnetometer/st_magn_i2c.c
index fdba480a12be..c6bb4ce77594 100644
--- a/drivers/iio/magnetometer/st_magn_i2c.c
+++ b/drivers/iio/magnetometer/st_magn_i2c.c
@@ -17,7 +17,6 @@
#include <linux/iio/common/st_sensors_i2c.h>
#include "st_magn.h"
-#ifdef CONFIG_OF
static const struct of_device_id st_magn_of_match[] = {
{
.compatible = "st,lsm303dlh-magn",
@@ -50,9 +49,6 @@ static const struct of_device_id st_magn_of_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, st_magn_of_match);
-#else
-#define st_magn_of_match NULL
-#endif
static int st_magn_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
@@ -62,8 +58,7 @@ static int st_magn_i2c_probe(struct i2c_client *client,
struct iio_dev *indio_dev;
int err;
- st_sensors_of_name_probe(&client->dev, st_magn_of_match,
- client->name, sizeof(client->name));
+ st_sensors_dev_name_probe(&client->dev, client->name, sizeof(client->name));
settings = st_magn_get_settings(client->name);
if (!settings) {
@@ -113,7 +108,7 @@ MODULE_DEVICE_TABLE(i2c, st_magn_id_table);
static struct i2c_driver st_magn_driver = {
.driver = {
.name = "st-magn-i2c",
- .of_match_table = of_match_ptr(st_magn_of_match),
+ .of_match_table = st_magn_of_match,
},
.probe = st_magn_i2c_probe,
.remove = st_magn_i2c_remove,
diff --git a/drivers/iio/magnetometer/st_magn_spi.c b/drivers/iio/magnetometer/st_magn_spi.c
index fbf909bde841..3d08d74c367d 100644
--- a/drivers/iio/magnetometer/st_magn_spi.c
+++ b/drivers/iio/magnetometer/st_magn_spi.c
@@ -17,7 +17,6 @@
#include <linux/iio/common/st_sensors_spi.h>
#include "st_magn.h"
-#ifdef CONFIG_OF
/*
* For new single-chip sensors use <device_name> as compatible string.
* For old single-chip devices keep <device_name>-magn to maintain
@@ -45,9 +44,6 @@ static const struct of_device_id st_magn_of_match[] = {
{}
};
MODULE_DEVICE_TABLE(of, st_magn_of_match);
-#else
-#define st_magn_of_match NULL
-#endif
static int st_magn_spi_probe(struct spi_device *spi)
{
@@ -56,8 +52,7 @@ static int st_magn_spi_probe(struct spi_device *spi)
struct iio_dev *indio_dev;
int err;
- st_sensors_of_name_probe(&spi->dev, st_magn_of_match,
- spi->modalias, sizeof(spi->modalias));
+ st_sensors_dev_name_probe(&spi->dev, spi->modalias, sizeof(spi->modalias));
settings = st_magn_get_settings(spi->modalias);
if (!settings) {
@@ -104,7 +99,7 @@ MODULE_DEVICE_TABLE(spi, st_magn_id_table);
static struct spi_driver st_magn_driver = {
.driver = {
.name = "st-magn-spi",
- .of_match_table = of_match_ptr(st_magn_of_match),
+ .of_match_table = st_magn_of_match,
},
.probe = st_magn_spi_probe,
.remove = st_magn_spi_remove,
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index ba420e484787..9c2d9bf8f100 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -53,6 +53,18 @@ config IIO_CROS_EC_BARO
To compile this driver as a module, choose M here: the module
will be called cros_ec_baro.
+config DLHL60D
+ tristate "All Sensors DLHL60D and DLHL60G low voltage digital pressure sensors"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for the All Sensors DLH series
+ pressure sensors driver.
+
+ To compile this driver as a module, choose M here: the module
+ will be called dlhl60d.
+
config DPS310
tristate "Infineon DPS310 pressure and temperature sensor"
depends on I2C
diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile
index d8f5ace1f25d..5a79192d8cb5 100644
--- a/drivers/iio/pressure/Makefile
+++ b/drivers/iio/pressure/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_BMP280) += bmp280.o
bmp280-objs := bmp280-core.o bmp280-regmap.o
obj-$(CONFIG_BMP280_I2C) += bmp280-i2c.o
obj-$(CONFIG_BMP280_SPI) += bmp280-spi.o
+obj-$(CONFIG_DLHL60D) += dlhl60d.o
obj-$(CONFIG_DPS310) += dps310.o
obj-$(CONFIG_IIO_CROS_EC_BARO) += cros_ec_baro.o
obj-$(CONFIG_HID_SENSOR_PRESS) += hid-sensor-press.o
diff --git a/drivers/iio/pressure/bmp280-i2c.c b/drivers/iio/pressure/bmp280-i2c.c
index 3109c8e2cc11..8b03ea15c0d0 100644
--- a/drivers/iio/pressure/bmp280-i2c.c
+++ b/drivers/iio/pressure/bmp280-i2c.c
@@ -1,8 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/module.h>
#include <linux/i2c.h>
-#include <linux/acpi.h>
-#include <linux/of.h>
#include <linux/regmap.h>
#include "bmp280.h"
@@ -38,16 +36,6 @@ static int bmp280_i2c_probe(struct i2c_client *client,
client->irq);
}
-static const struct acpi_device_id bmp280_acpi_i2c_match[] = {
- {"BMP0280", BMP280_CHIP_ID },
- {"BMP0180", BMP180_CHIP_ID },
- {"BMP0085", BMP180_CHIP_ID },
- {"BME0280", BME280_CHIP_ID },
- { },
-};
-MODULE_DEVICE_TABLE(acpi, bmp280_acpi_i2c_match);
-
-#ifdef CONFIG_OF
static const struct of_device_id bmp280_of_i2c_match[] = {
{ .compatible = "bosch,bme280", .data = (void *)BME280_CHIP_ID },
{ .compatible = "bosch,bmp280", .data = (void *)BMP280_CHIP_ID },
@@ -56,9 +44,6 @@ static const struct of_device_id bmp280_of_i2c_match[] = {
{ },
};
MODULE_DEVICE_TABLE(of, bmp280_of_i2c_match);
-#else
-#define bmp280_of_i2c_match NULL
-#endif
static const struct i2c_device_id bmp280_i2c_id[] = {
{"bmp280", BMP280_CHIP_ID },
@@ -72,8 +57,7 @@ MODULE_DEVICE_TABLE(i2c, bmp280_i2c_id);
static struct i2c_driver bmp280_i2c_driver = {
.driver = {
.name = "bmp280",
- .acpi_match_table = ACPI_PTR(bmp280_acpi_i2c_match),
- .of_match_table = of_match_ptr(bmp280_of_i2c_match),
+ .of_match_table = bmp280_of_i2c_match,
.pm = &bmp280_dev_pm_ops,
},
.probe = bmp280_i2c_probe,
diff --git a/drivers/iio/pressure/cros_ec_baro.c b/drivers/iio/pressure/cros_ec_baro.c
index 52f53f3123b1..b521bebd551c 100644
--- a/drivers/iio/pressure/cros_ec_baro.c
+++ b/drivers/iio/pressure/cros_ec_baro.c
@@ -14,7 +14,6 @@
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/kernel.h>
-#include <linux/mfd/cros_ec.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/platform_data/cros_ec_commands.h>
diff --git a/drivers/iio/pressure/dlhl60d.c b/drivers/iio/pressure/dlhl60d.c
new file mode 100644
index 000000000000..b8c99e7bd6cf
--- /dev/null
+++ b/drivers/iio/pressure/dlhl60d.c
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * All Sensors DLH series low voltage digital pressure sensors
+ *
+ * Copyright (c) 2019 AVL DiTEST GmbH
+ * Tomislav Denis <tomislav.denis@avl.com>
+ *
+ * Datasheet: http://www.allsensors.com/cad/DS-0355_Rev_B.PDF
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <asm/unaligned.h>
+
+/* Commands */
+#define DLH_START_SINGLE 0xAA
+
+/* Status bits */
+#define DLH_STATUS_OK 0x40
+
+/* DLH data format */
+#define DLH_NUM_READ_BYTES 7
+#define DLH_NUM_DATA_BYTES 3
+#define DLH_NUM_PR_BITS 24
+#define DLH_NUM_TEMP_BITS 24
+
+/* DLH timings */
+#define DLH_SINGLE_DUT_MS 5
+
+enum dhl_ids {
+ dlhl60d,
+ dlhl60g,
+};
+
+struct dlh_info {
+ u8 osdig; /* digital offset factor */
+ unsigned int fss; /* full scale span (inch H2O) */
+};
+
+struct dlh_state {
+ struct i2c_client *client;
+ struct dlh_info info;
+ bool use_interrupt;
+ struct completion completion;
+ u8 rx_buf[DLH_NUM_READ_BYTES] ____cacheline_aligned;
+};
+
+static struct dlh_info dlh_info_tbl[] = {
+ [dlhl60d] = {
+ .osdig = 2,
+ .fss = 120,
+ },
+ [dlhl60g] = {
+ .osdig = 10,
+ .fss = 60,
+ },
+};
+
+
+static int dlh_cmd_start_single(struct dlh_state *st)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte(st->client, DLH_START_SINGLE);
+ if (ret)
+ dev_err(&st->client->dev,
+ "%s: I2C write byte failed\n", __func__);
+
+ return ret;
+}
+
+static int dlh_cmd_read_data(struct dlh_state *st)
+{
+ int ret;
+
+ ret = i2c_master_recv(st->client, st->rx_buf, DLH_NUM_READ_BYTES);
+ if (ret < 0) {
+ dev_err(&st->client->dev,
+ "%s: I2C read block failed\n", __func__);
+ return ret;
+ }
+
+ if (st->rx_buf[0] != DLH_STATUS_OK) {
+ dev_err(&st->client->dev,
+ "%s: invalid status 0x%02x\n", __func__, st->rx_buf[0]);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int dlh_start_capture_and_read(struct dlh_state *st)
+{
+ int ret;
+
+ if (st->use_interrupt)
+ reinit_completion(&st->completion);
+
+ ret = dlh_cmd_start_single(st);
+ if (ret)
+ return ret;
+
+ if (st->use_interrupt) {
+ ret = wait_for_completion_timeout(&st->completion,
+ msecs_to_jiffies(DLH_SINGLE_DUT_MS));
+ if (!ret) {
+ dev_err(&st->client->dev,
+ "%s: conversion timed out\n", __func__);
+ return -ETIMEDOUT;
+ }
+ } else {
+ mdelay(DLH_SINGLE_DUT_MS);
+ }
+
+ return dlh_cmd_read_data(st);
+}
+
+static int dlh_read_direct(struct dlh_state *st,
+ unsigned int *pressure, unsigned int *temperature)
+{
+ int ret;
+
+ ret = dlh_start_capture_and_read(st);
+ if (ret)
+ return ret;
+
+ *pressure = get_unaligned_be32(&st->rx_buf[1]) >> 8;
+ *temperature = get_unaligned_be32(&st->rx_buf[3]) &
+ GENMASK(DLH_NUM_TEMP_BITS - 1, 0);
+
+ return 0;
+}
+
+static int dlh_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *channel, int *value,
+ int *value2, long mask)
+{
+ struct dlh_state *st = iio_priv(indio_dev);
+ unsigned int pressure, temperature;
+ int ret;
+ s64 tmp;
+ s32 rem;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = dlh_read_direct(st, &pressure, &temperature);
+ iio_device_release_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ switch (channel->type) {
+ case IIO_PRESSURE:
+ *value = pressure;
+ return IIO_VAL_INT;
+
+ case IIO_TEMP:
+ *value = temperature;
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (channel->type) {
+ case IIO_PRESSURE:
+ tmp = div_s64(125LL * st->info.fss * 24909 * 100,
+ 1 << DLH_NUM_PR_BITS);
+ tmp = div_s64_rem(tmp, 1000000000LL, &rem);
+ *value = tmp;
+ *value2 = rem;
+ return IIO_VAL_INT_PLUS_NANO;
+
+ case IIO_TEMP:
+ *value = 125 * 1000;
+ *value2 = DLH_NUM_TEMP_BITS;
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ switch (channel->type) {
+ case IIO_PRESSURE:
+ *value = -125 * st->info.fss * 24909;
+ *value2 = 100 * st->info.osdig * 100000;
+ return IIO_VAL_FRACTIONAL;
+
+ case IIO_TEMP:
+ *value = -40 * 1000;
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info dlh_info = {
+ .read_raw = dlh_read_raw,
+};
+
+static const struct iio_chan_spec dlh_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ .indexed = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type =
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = DLH_NUM_PR_BITS,
+ .storagebits = 32,
+ .shift = 8,
+ .endianness = IIO_BE,
+ },
+ }, {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type =
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = DLH_NUM_TEMP_BITS,
+ .storagebits = 32,
+ .shift = 8,
+ .endianness = IIO_BE,
+ },
+ }
+};
+
+static irqreturn_t dlh_trigger_handler(int irq, void *private)
+{
+ struct iio_poll_func *pf = private;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct dlh_state *st = iio_priv(indio_dev);
+ int ret;
+ unsigned int chn, i = 0;
+ __be32 tmp_buf[2];
+
+ ret = dlh_start_capture_and_read(st);
+ if (ret)
+ goto out;
+
+ for_each_set_bit(chn, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ memcpy(tmp_buf + i,
+ &st->rx_buf[1] + chn * DLH_NUM_DATA_BYTES,
+ DLH_NUM_DATA_BYTES);
+ i++;
+ }
+
+ iio_push_to_buffers(indio_dev, tmp_buf);
+
+out:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dlh_interrupt(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct dlh_state *st = iio_priv(indio_dev);
+
+ complete(&st->completion);
+
+ return IRQ_HANDLED;
+};
+
+static int dlh_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct dlh_state *st;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_I2C | I2C_FUNC_SMBUS_WRITE_BYTE)) {
+ dev_err(&client->dev,
+ "adapter doesn't support required i2c functionality\n");
+ return -EOPNOTSUPP;
+ }
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st));
+ if (!indio_dev) {
+ dev_err(&client->dev, "failed to allocate iio device\n");
+ return -ENOMEM;
+ }
+
+ i2c_set_clientdata(client, indio_dev);
+
+ st = iio_priv(indio_dev);
+ st->info = dlh_info_tbl[id->driver_data];
+ st->client = client;
+ st->use_interrupt = false;
+
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->dev.of_node = client->dev.of_node;
+ indio_dev->info = &dlh_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = dlh_channels;
+ indio_dev->num_channels = ARRAY_SIZE(dlh_channels);
+
+ if (client->irq > 0) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ dlh_interrupt, NULL,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ id->name, indio_dev);
+ if (ret) {
+ dev_err(&client->dev, "failed to allocate threaded irq");
+ return ret;
+ }
+
+ st->use_interrupt = true;
+ init_completion(&st->completion);
+ }
+
+ ret = devm_iio_triggered_buffer_setup(&client->dev, indio_dev,
+ NULL, &dlh_trigger_handler, NULL);
+ if (ret) {
+ dev_err(&client->dev, "failed to setup iio buffer\n");
+ return ret;
+ }
+
+ ret = devm_iio_device_register(&client->dev, indio_dev);
+ if (ret)
+ dev_err(&client->dev, "failed to register iio device\n");
+
+ return ret;
+}
+
+static const struct of_device_id dlh_of_match[] = {
+ { .compatible = "asc,dlhl60d" },
+ { .compatible = "asc,dlhl60g" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dlh_of_match);
+
+static const struct i2c_device_id dlh_id[] = {
+ { "dlhl60d", dlhl60d },
+ { "dlhl60g", dlhl60g },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, dlh_id);
+
+static struct i2c_driver dlh_driver = {
+ .driver = {
+ .name = "dlhl60d",
+ .of_match_table = dlh_of_match,
+ },
+ .probe = dlh_probe,
+ .id_table = dlh_id,
+};
+module_i2c_driver(dlh_driver);
+
+MODULE_AUTHOR("Tomislav Denis <tomislav.denis@avl.com>");
+MODULE_DESCRIPTION("Driver for All Sensors DLH series pressure sensors");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/pressure/st_pressure.h b/drivers/iio/pressure/st_pressure.h
index c2e47a6c3118..5c746ff6087e 100644
--- a/drivers/iio/pressure/st_pressure.h
+++ b/drivers/iio/pressure/st_pressure.h
@@ -37,7 +37,7 @@ enum st_press_type {
* struct st_sensors_platform_data - default press platform data
* @drdy_int_pin: default press DRDY is available on INT1 pin.
*/
-static const struct st_sensors_platform_data default_press_pdata = {
+static __maybe_unused const struct st_sensors_platform_data default_press_pdata = {
.drdy_int_pin = 1,
};
diff --git a/drivers/iio/pressure/st_pressure_i2c.c b/drivers/iio/pressure/st_pressure_i2c.c
index 71d2ed6b4948..09c6903f99b8 100644
--- a/drivers/iio/pressure/st_pressure_i2c.c
+++ b/drivers/iio/pressure/st_pressure_i2c.c
@@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/acpi.h>
#include <linux/i2c.h>
#include <linux/iio/iio.h>
@@ -18,7 +17,6 @@
#include <linux/iio/common/st_sensors_i2c.h>
#include "st_pressure.h"
-#ifdef CONFIG_OF
static const struct of_device_id st_press_of_match[] = {
{
.compatible = "st,lps001wp-press",
@@ -51,9 +49,6 @@ static const struct of_device_id st_press_of_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, st_press_of_match);
-#else
-#define st_press_of_match NULL
-#endif
#ifdef CONFIG_ACPI
static const struct acpi_device_id st_press_acpi_match[] = {
@@ -61,8 +56,6 @@ static const struct acpi_device_id st_press_acpi_match[] = {
{ },
};
MODULE_DEVICE_TABLE(acpi, st_press_acpi_match);
-#else
-#define st_press_acpi_match NULL
#endif
static const struct i2c_device_id st_press_id_table[] = {
@@ -85,18 +78,7 @@ static int st_press_i2c_probe(struct i2c_client *client,
struct iio_dev *indio_dev;
int ret;
- if (client->dev.of_node) {
- st_sensors_of_name_probe(&client->dev, st_press_of_match,
- client->name, sizeof(client->name));
- } else if (ACPI_HANDLE(&client->dev)) {
- ret = st_sensors_match_acpi_device(&client->dev);
- if ((ret < 0) || (ret >= ST_PRESS_MAX))
- return -ENODEV;
-
- strlcpy(client->name, st_press_id_table[ret].name,
- sizeof(client->name));
- } else if (!id)
- return -ENODEV;
+ st_sensors_dev_name_probe(&client->dev, client->name, sizeof(client->name));
settings = st_press_get_settings(client->name);
if (!settings) {
@@ -133,7 +115,7 @@ static int st_press_i2c_remove(struct i2c_client *client)
static struct i2c_driver st_press_driver = {
.driver = {
.name = "st-press-i2c",
- .of_match_table = of_match_ptr(st_press_of_match),
+ .of_match_table = st_press_of_match,
.acpi_match_table = ACPI_PTR(st_press_acpi_match),
},
.probe = st_press_i2c_probe,
diff --git a/drivers/iio/pressure/st_pressure_spi.c b/drivers/iio/pressure/st_pressure_spi.c
index 7c8b70221e70..b5ee3ec2764f 100644
--- a/drivers/iio/pressure/st_pressure_spi.c
+++ b/drivers/iio/pressure/st_pressure_spi.c
@@ -17,7 +17,6 @@
#include <linux/iio/common/st_sensors_spi.h>
#include "st_pressure.h"
-#ifdef CONFIG_OF
/*
* For new single-chip sensors use <device_name> as compatible string.
* For old single-chip devices keep <device_name>-press to maintain
@@ -55,9 +54,6 @@ static const struct of_device_id st_press_of_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, st_press_of_match);
-#else
-#define st_press_of_match NULL
-#endif
static int st_press_spi_probe(struct spi_device *spi)
{
@@ -66,8 +62,7 @@ static int st_press_spi_probe(struct spi_device *spi)
struct iio_dev *indio_dev;
int err;
- st_sensors_of_name_probe(&spi->dev, st_press_of_match,
- spi->modalias, sizeof(spi->modalias));
+ st_sensors_dev_name_probe(&spi->dev, spi->modalias, sizeof(spi->modalias));
settings = st_press_get_settings(spi->modalias);
if (!settings) {
@@ -116,7 +111,7 @@ MODULE_DEVICE_TABLE(spi, st_press_id_table);
static struct spi_driver st_press_driver = {
.driver = {
.name = "st-press-spi",
- .of_match_table = of_match_ptr(st_press_of_match),
+ .of_match_table = st_press_of_match,
},
.probe = st_press_spi_probe,
.remove = st_press_spi_remove,
diff --git a/drivers/iio/proximity/Kconfig b/drivers/iio/proximity/Kconfig
index d53601447da4..37606d400805 100644
--- a/drivers/iio/proximity/Kconfig
+++ b/drivers/iio/proximity/Kconfig
@@ -58,6 +58,21 @@ config MB1232
To compile this driver as a module, choose M here: the
module will be called mb1232.
+config PING
+ tristate "Parallax GPIO bitbanged ranger sensors"
+ depends on GPIOLIB
+ help
+ Say Y here to build a driver for GPIO bitbanged ranger sensors
+ with just one GPIO for the trigger and echo. This driver can be
+ used to measure the distance of objects.
+
+ Actually supported are:
+ - Parallax PING))) (ultrasonic)
+ - Parallax LaserPING (time-of-flight)
+
+ To compile this driver as a module, choose M here: the
+ module will be called ping.
+
config RFD77402
tristate "RFD77402 ToF sensor"
depends on I2C
diff --git a/drivers/iio/proximity/Makefile b/drivers/iio/proximity/Makefile
index 0bb5f9de13d6..c591b019304e 100644
--- a/drivers/iio/proximity/Makefile
+++ b/drivers/iio/proximity/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_AS3935) += as3935.o
obj-$(CONFIG_ISL29501) += isl29501.o
obj-$(CONFIG_LIDAR_LITE_V2) += pulsedlight-lidar-lite-v2.o
obj-$(CONFIG_MB1232) += mb1232.o
+obj-$(CONFIG_PING) += ping.o
obj-$(CONFIG_RFD77402) += rfd77402.o
obj-$(CONFIG_SRF04) += srf04.o
obj-$(CONFIG_SRF08) += srf08.o
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index b591c63bd6c4..bac9a433dd19 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -14,7 +14,6 @@
#include <linux/mutex.h>
#include <linux/err.h>
#include <linux/irq.h>
-#include <linux/gpio.h>
#include <linux/spi/spi.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -22,8 +21,6 @@
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/buffer.h>
#include <linux/iio/triggered_buffer.h>
-#include <linux/of_gpio.h>
-
#define AS3935_AFE_GAIN 0x00
#define AS3935_AFE_MASK 0x3F
diff --git a/drivers/iio/proximity/ping.c b/drivers/iio/proximity/ping.c
new file mode 100644
index 000000000000..34aff108dff5
--- /dev/null
+++ b/drivers/iio/proximity/ping.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PING: ultrasonic sensor for distance measuring by using only one GPIOs
+ *
+ * Copyright (c) 2019 Andreas Klinger <ak@it-klinger.de>
+ *
+ * For details about the devices see:
+ * http://parallax.com/sites/default/files/downloads/28041-LaserPING-2m-Rangefinder-Guide.pdf
+ * http://parallax.com/sites/default/files/downloads/28015-PING-Documentation-v1.6.pdf
+ *
+ * the measurement cycle as timing diagram looks like:
+ *
+ * GPIO ___ ________________________
+ * ping: __/ \____________/ \________________
+ * ^ ^ ^ ^
+ * |<->| interrupt interrupt
+ * udelay(5) (ts_rising) (ts_falling)
+ * |<---------------------->|
+ * . pulse time measured .
+ * . --> one round trip of ultra sonic waves
+ * ultra . .
+ * sonic _ _ _. .
+ * burst: _________/ \_/ \_/ \_________________________________________
+ * .
+ * ultra .
+ * sonic _ _ _.
+ * echo: __________________________________/ \_/ \_/ \________________
+ */
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+struct ping_cfg {
+ unsigned long trigger_pulse_us; /* length of trigger pulse */
+ int laserping_error; /* support error code in */
+ /* pulse width of laser */
+ /* ping sensors */
+ s64 timeout_ns; /* timeout in ns */
+};
+
+struct ping_data {
+ struct device *dev;
+ struct gpio_desc *gpiod_ping;
+ struct mutex lock;
+ int irqnr;
+ ktime_t ts_rising;
+ ktime_t ts_falling;
+ struct completion rising;
+ struct completion falling;
+ const struct ping_cfg *cfg;
+};
+
+static const struct ping_cfg pa_ping_cfg = {
+ .trigger_pulse_us = 5,
+ .laserping_error = 0,
+ .timeout_ns = 18500000, /* 3 meters */
+};
+
+static const struct ping_cfg pa_laser_ping_cfg = {
+ .trigger_pulse_us = 5,
+ .laserping_error = 1,
+ .timeout_ns = 15500000, /* 2 meters plus error codes */
+};
+
+static irqreturn_t ping_handle_irq(int irq, void *dev_id)
+{
+ struct iio_dev *indio_dev = dev_id;
+ struct ping_data *data = iio_priv(indio_dev);
+ ktime_t now = ktime_get();
+
+ if (gpiod_get_value(data->gpiod_ping)) {
+ data->ts_rising = now;
+ complete(&data->rising);
+ } else {
+ data->ts_falling = now;
+ complete(&data->falling);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int ping_read(struct ping_data *data)
+{
+ int ret;
+ ktime_t ktime_dt;
+ s64 dt_ns;
+ u32 time_ns, distance_mm;
+ struct platform_device *pdev = to_platform_device(data->dev);
+ struct iio_dev *indio_dev = iio_priv_to_dev(data);
+
+ /*
+ * just one read-echo-cycle can take place at a time
+ * ==> lock against concurrent reading calls
+ */
+ mutex_lock(&data->lock);
+
+ reinit_completion(&data->rising);
+ reinit_completion(&data->falling);
+
+ gpiod_set_value(data->gpiod_ping, 1);
+ udelay(data->cfg->trigger_pulse_us);
+ gpiod_set_value(data->gpiod_ping, 0);
+
+ ret = gpiod_direction_input(data->gpiod_ping);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+
+ data->irqnr = gpiod_to_irq(data->gpiod_ping);
+ if (data->irqnr < 0) {
+ dev_err(data->dev, "gpiod_to_irq: %d\n", data->irqnr);
+ mutex_unlock(&data->lock);
+ return data->irqnr;
+ }
+
+ ret = request_irq(data->irqnr, ping_handle_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ pdev->name, indio_dev);
+ if (ret < 0) {
+ dev_err(data->dev, "request_irq: %d\n", ret);
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+
+ /* it should not take more than 20 ms until echo is rising */
+ ret = wait_for_completion_killable_timeout(&data->rising, HZ/50);
+ if (ret < 0)
+ goto err_reset_direction;
+ else if (ret == 0) {
+ ret = -ETIMEDOUT;
+ goto err_reset_direction;
+ }
+
+ /* it cannot take more than 50 ms until echo is falling */
+ ret = wait_for_completion_killable_timeout(&data->falling, HZ/20);
+ if (ret < 0)
+ goto err_reset_direction;
+ else if (ret == 0) {
+ ret = -ETIMEDOUT;
+ goto err_reset_direction;
+ }
+
+ ktime_dt = ktime_sub(data->ts_falling, data->ts_rising);
+
+ free_irq(data->irqnr, indio_dev);
+
+ ret = gpiod_direction_output(data->gpiod_ping, GPIOD_OUT_LOW);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+
+ mutex_unlock(&data->lock);
+
+ dt_ns = ktime_to_ns(ktime_dt);
+ if (dt_ns > data->cfg->timeout_ns) {
+ dev_dbg(data->dev, "distance out of range: dt=%lldns\n",
+ dt_ns);
+ return -EIO;
+ }
+
+ time_ns = dt_ns;
+
+ /*
+ * read error code of laser ping sensor and give users chance to
+ * figure out error by using dynamic debuggging
+ */
+ if (data->cfg->laserping_error) {
+ if ((time_ns > 12500000) && (time_ns <= 13500000)) {
+ dev_dbg(data->dev, "target too close or to far\n");
+ return -EIO;
+ }
+ if ((time_ns > 13500000) && (time_ns <= 14500000)) {
+ dev_dbg(data->dev, "internal sensor error\n");
+ return -EIO;
+ }
+ if ((time_ns > 14500000) && (time_ns <= 15500000)) {
+ dev_dbg(data->dev, "internal sensor timeout\n");
+ return -EIO;
+ }
+ }
+
+ /*
+ * the speed as function of the temperature is approximately:
+ *
+ * speed = 331,5 + 0,6 * Temp
+ * with Temp in °C
+ * and speed in m/s
+ *
+ * use 343,5 m/s as ultrasonic speed at 20 °C here in absence of the
+ * temperature
+ *
+ * therefore:
+ * time 343,5 time * 232
+ * distance = ------ * ------- = ------------
+ * 10^6 2 1350800
+ * with time in ns
+ * and distance in mm (one way)
+ *
+ * because we limit to 3 meters the multiplication with 232 just
+ * fits into 32 bit
+ */
+ distance_mm = time_ns * 232 / 1350800;
+
+ return distance_mm;
+
+err_reset_direction:
+ free_irq(data->irqnr, indio_dev);
+ mutex_unlock(&data->lock);
+
+ if (gpiod_direction_output(data->gpiod_ping, GPIOD_OUT_LOW))
+ dev_dbg(data->dev, "error in gpiod_direction_output\n");
+ return ret;
+}
+
+static int ping_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *channel, int *val,
+ int *val2, long info)
+{
+ struct ping_data *data = iio_priv(indio_dev);
+ int ret;
+
+ if (channel->type != IIO_DISTANCE)
+ return -EINVAL;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ ret = ping_read(data);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ /*
+ * maximum resolution in datasheet is 1 mm
+ * 1 LSB is 1 mm
+ */
+ *val = 0;
+ *val2 = 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info ping_iio_info = {
+ .read_raw = ping_read_raw,
+};
+
+static const struct iio_chan_spec ping_chan_spec[] = {
+ {
+ .type = IIO_DISTANCE,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ },
+};
+
+static const struct of_device_id of_ping_match[] = {
+ { .compatible = "parallax,ping", .data = &pa_ping_cfg},
+ { .compatible = "parallax,laserping", .data = &pa_ping_cfg},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, of_ping_match);
+
+static int ping_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ping_data *data;
+ struct iio_dev *indio_dev;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(struct ping_data));
+ if (!indio_dev) {
+ dev_err(dev, "failed to allocate IIO device\n");
+ return -ENOMEM;
+ }
+
+ data = iio_priv(indio_dev);
+ data->dev = dev;
+ data->cfg = of_device_get_match_data(dev);
+
+ mutex_init(&data->lock);
+ init_completion(&data->rising);
+ init_completion(&data->falling);
+
+ data->gpiod_ping = devm_gpiod_get(dev, "ping", GPIOD_OUT_LOW);
+ if (IS_ERR(data->gpiod_ping)) {
+ dev_err(dev, "failed to get ping-gpios: err=%ld\n",
+ PTR_ERR(data->gpiod_ping));
+ return PTR_ERR(data->gpiod_ping);
+ }
+
+ if (gpiod_cansleep(data->gpiod_ping)) {
+ dev_err(data->dev, "cansleep-GPIOs not supported\n");
+ return -ENODEV;
+ }
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ indio_dev->name = "ping";
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->info = &ping_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ping_chan_spec;
+ indio_dev->num_channels = ARRAY_SIZE(ping_chan_spec);
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static struct platform_driver ping_driver = {
+ .probe = ping_probe,
+ .driver = {
+ .name = "ping-gpio",
+ .of_match_table = of_ping_match,
+ },
+};
+
+module_platform_driver(ping_driver);
+
+MODULE_AUTHOR("Andreas Klinger <ak@it-klinger.de>");
+MODULE_DESCRIPTION("PING sensors for distance measuring using one GPIOs");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ping");
diff --git a/drivers/iio/resolver/ad2s1200.c b/drivers/iio/resolver/ad2s1200.c
index 17b89623418c..a391f46ee06b 100644
--- a/drivers/iio/resolver/ad2s1200.c
+++ b/drivers/iio/resolver/ad2s1200.c
@@ -10,7 +10,6 @@
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/device.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/mutex.h>
diff --git a/drivers/iio/temperature/max31856.c b/drivers/iio/temperature/max31856.c
index 73ed550e3fc9..b4cb21ab2e85 100644
--- a/drivers/iio/temperature/max31856.c
+++ b/drivers/iio/temperature/max31856.c
@@ -6,12 +6,14 @@
* Copyright (C) 2018-2019 Rockwell Collins
*/
+#include <linux/ctype.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/spi/spi.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/util_macros.h>
#include <dt-bindings/iio/temperature/thermocouple.h>
/*
* The MSB of the register value determines whether the following byte will
@@ -23,6 +25,9 @@
#define MAX31856_CR0_1SHOT BIT(6)
#define MAX31856_CR0_OCFAULT BIT(4)
#define MAX31856_CR0_OCFAULT_MASK GENMASK(5, 4)
+#define MAX31856_CR0_FILTER_50HZ BIT(0)
+#define MAX31856_AVERAGING_MASK GENMASK(6, 4)
+#define MAX31856_AVERAGING_SHIFT 4
#define MAX31856_TC_TYPE_MASK GENMASK(3, 0)
#define MAX31856_FAULT_OVUV BIT(1)
#define MAX31856_FAULT_OPEN BIT(0)
@@ -49,7 +54,10 @@ static const struct iio_chan_spec max31856_channels[] = {
{ /* Thermocouple Temperature */
.type = IIO_TEMP,
.info_mask_separate =
- BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_THERMOCOUPLE_TYPE),
+ .info_mask_shared_by_type =
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO)
},
{ /* Cold Junction Temperature */
.type = IIO_TEMP,
@@ -57,12 +65,20 @@ static const struct iio_chan_spec max31856_channels[] = {
.modified = 1,
.info_mask_separate =
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_type =
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO)
},
};
struct max31856_data {
struct spi_device *spi;
u32 thermocouple_type;
+ bool filter_50hz;
+ int averaging;
+};
+
+static const char max31856_tc_types[] = {
+ 'B', 'E', 'J', 'K', 'N', 'R', 'S', 'T'
};
static int max31856_read(struct max31856_data *data, u8 reg,
@@ -107,6 +123,10 @@ static int max31856_init(struct max31856_data *data)
reg_cr1_val &= ~MAX31856_TC_TYPE_MASK;
reg_cr1_val |= data->thermocouple_type;
+
+ reg_cr1_val &= ~MAX31856_AVERAGING_MASK;
+ reg_cr1_val |= data->averaging << MAX31856_AVERAGING_SHIFT;
+
ret = max31856_write(data, MAX31856_CR1_REG, reg_cr1_val);
if (ret)
return ret;
@@ -123,6 +143,11 @@ static int max31856_init(struct max31856_data *data)
reg_cr0_val &= ~MAX31856_CR0_1SHOT;
reg_cr0_val |= MAX31856_CR0_AUTOCONVERT;
+ if (data->filter_50hz)
+ reg_cr0_val |= MAX31856_CR0_FILTER_50HZ;
+ else
+ reg_cr0_val &= ~MAX31856_CR0_FILTER_50HZ;
+
return max31856_write(data, MAX31856_CR0_REG, reg_cr0_val);
}
@@ -210,6 +235,12 @@ static int max31856_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT_PLUS_MICRO;
}
break;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *val = 1 << data->averaging;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_THERMOCOUPLE_TYPE:
+ *val = max31856_tc_types[data->thermocouple_type];
+ return IIO_VAL_CHAR;
default:
ret = -EINVAL;
break;
@@ -218,6 +249,62 @@ static int max31856_read_raw(struct iio_dev *indio_dev,
return ret;
}
+static int max31856_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_THERMOCOUPLE_TYPE:
+ return IIO_VAL_CHAR;
+ default:
+ return IIO_VAL_INT;
+ }
+}
+
+static int max31856_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct max31856_data *data = iio_priv(indio_dev);
+ int msb;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ if (val > 16 || val < 1)
+ return -EINVAL;
+ msb = fls(val) - 1;
+ /* Round up to next 2pow if needed */
+ if (BIT(msb) < val)
+ msb++;
+
+ data->averaging = msb;
+ max31856_init(data);
+ break;
+ case IIO_CHAN_INFO_THERMOCOUPLE_TYPE:
+ {
+ int tc_type = -1;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(max31856_tc_types); i++) {
+ if (max31856_tc_types[i] == toupper(val)) {
+ tc_type = i;
+ break;
+ }
+ }
+ if (tc_type < 0)
+ return -EINVAL;
+
+ data->thermocouple_type = tc_type;
+ max31856_init(data);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static ssize_t show_fault(struct device *dev, u8 faultbit, char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
@@ -249,12 +336,54 @@ static ssize_t show_fault_oc(struct device *dev,
return show_fault(dev, MAX31856_FAULT_OPEN, buf);
}
+static ssize_t show_filter(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct max31856_data *data = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", data->filter_50hz ? 50 : 60);
+}
+
+static ssize_t set_filter(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct max31856_data *data = iio_priv(indio_dev);
+ unsigned int freq;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &freq);
+ if (ret)
+ return ret;
+
+ switch (freq) {
+ case 50:
+ data->filter_50hz = true;
+ break;
+ case 60:
+ data->filter_50hz = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ max31856_init(data);
+ return len;
+}
+
static IIO_DEVICE_ATTR(fault_ovuv, 0444, show_fault_ovuv, NULL, 0);
static IIO_DEVICE_ATTR(fault_oc, 0444, show_fault_oc, NULL, 0);
+static IIO_DEVICE_ATTR(in_temp_filter_notch_center_frequency, 0644,
+ show_filter, set_filter, 0);
static struct attribute *max31856_attributes[] = {
&iio_dev_attr_fault_ovuv.dev_attr.attr,
&iio_dev_attr_fault_oc.dev_attr.attr,
+ &iio_dev_attr_in_temp_filter_notch_center_frequency.dev_attr.attr,
NULL,
};
@@ -264,6 +393,8 @@ static const struct attribute_group max31856_group = {
static const struct iio_info max31856_info = {
.read_raw = max31856_read_raw,
+ .write_raw = max31856_write_raw,
+ .write_raw_get_fmt = max31856_write_raw_get_fmt,
.attrs = &max31856_group,
};
@@ -280,6 +411,7 @@ static int max31856_probe(struct spi_device *spi)
data = iio_priv(indio_dev);
data->spi = spi;
+ data->filter_50hz = false;
spi_set_drvdata(spi, indio_dev);
diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
index d1360605209c..8d21116c7a22 100644
--- a/drivers/iio/temperature/maxim_thermocouple.c
+++ b/drivers/iio/temperature/maxim_thermocouple.c
@@ -14,6 +14,7 @@
#include <linux/of_device.h>
#include <linux/spi/spi.h>
#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
#include <linux/iio/trigger.h>
#include <linux/iio/buffer.h>
#include <linux/iio/triggered_buffer.h>
@@ -24,13 +25,25 @@
enum {
MAX6675,
MAX31855,
+ MAX31855K,
+ MAX31855J,
+ MAX31855N,
+ MAX31855S,
+ MAX31855T,
+ MAX31855E,
+ MAX31855R,
+};
+
+static const char maxim_tc_types[] = {
+ 'K', '?', 'K', 'J', 'N', 'S', 'T', 'E', 'R'
};
static const struct iio_chan_spec max6675_channels[] = {
{ /* thermocouple temperature */
.type = IIO_TEMP,
.info_mask_separate =
- BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_THERMOCOUPLE_TYPE),
.scan_index = 0,
.scan_type = {
.sign = 's',
@@ -48,7 +61,8 @@ static const struct iio_chan_spec max31855_channels[] = {
.type = IIO_TEMP,
.address = 2,
.info_mask_separate =
- BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_THERMOCOUPLE_TYPE),
.scan_index = 0,
.scan_type = {
.sign = 's',
@@ -110,6 +124,7 @@ struct maxim_thermocouple_data {
const struct maxim_thermocouple_chip *chip;
u8 buffer[16] ____cacheline_aligned;
+ char tc_type;
};
static int maxim_thermocouple_read(struct maxim_thermocouple_data *data,
@@ -196,6 +211,10 @@ static int maxim_thermocouple_read_raw(struct iio_dev *indio_dev,
ret = IIO_VAL_INT;
}
break;
+ case IIO_CHAN_INFO_THERMOCOUPLE_TYPE:
+ *val = data->tc_type;
+ ret = IIO_VAL_CHAR;
+ break;
}
return ret;
@@ -210,8 +229,9 @@ static int maxim_thermocouple_probe(struct spi_device *spi)
const struct spi_device_id *id = spi_get_device_id(spi);
struct iio_dev *indio_dev;
struct maxim_thermocouple_data *data;
+ const int chip_type = (id->driver_data == MAX6675) ? MAX6675 : MAX31855;
const struct maxim_thermocouple_chip *chip =
- &maxim_thermocouple_chips[id->driver_data];
+ &maxim_thermocouple_chips[chip_type];
int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*data));
@@ -229,6 +249,7 @@ static int maxim_thermocouple_probe(struct spi_device *spi)
data = iio_priv(indio_dev);
data->spi = spi;
data->chip = chip;
+ data->tc_type = maxim_tc_types[id->driver_data];
ret = devm_iio_triggered_buffer_setup(&spi->dev,
indio_dev, NULL,
@@ -236,12 +257,22 @@ static int maxim_thermocouple_probe(struct spi_device *spi)
if (ret)
return ret;
+ if (id->driver_data == MAX31855)
+ dev_warn(&spi->dev, "generic max31855 ID is deprecated\nplease use more specific part type");
+
return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct spi_device_id maxim_thermocouple_id[] = {
{"max6675", MAX6675},
{"max31855", MAX31855},
+ {"max31855k", MAX31855K},
+ {"max31855j", MAX31855J},
+ {"max31855n", MAX31855N},
+ {"max31855s", MAX31855S},
+ {"max31855t", MAX31855T},
+ {"max31855e", MAX31855E},
+ {"max31855r", MAX31855R},
{},
};
MODULE_DEVICE_TABLE(spi, maxim_thermocouple_id);
@@ -249,6 +280,13 @@ MODULE_DEVICE_TABLE(spi, maxim_thermocouple_id);
static const struct of_device_id maxim_thermocouple_of_match[] = {
{ .compatible = "maxim,max6675" },
{ .compatible = "maxim,max31855" },
+ { .compatible = "maxim,max31855k" },
+ { .compatible = "maxim,max31855j" },
+ { .compatible = "maxim,max31855n" },
+ { .compatible = "maxim,max31855s" },
+ { .compatible = "maxim,max31855t" },
+ { .compatible = "maxim,max31855e" },
+ { .compatible = "maxim,max31855r" },
{ },
};
MODULE_DEVICE_TABLE(of, maxim_thermocouple_of_match);
diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c
index a5dfe65cd9b9..2e0d32aa8436 100644
--- a/drivers/iio/trigger/stm32-timer-trigger.c
+++ b/drivers/iio/trigger/stm32-timer-trigger.c
@@ -297,9 +297,6 @@ static ssize_t stm32_tt_store_master_mode(struct device *dev,
strlen(master_mode_table[i]))) {
regmap_update_bits(priv->regmap, TIM_CR2, mask,
i << shift);
- /* Make sure that registers are updated */
- regmap_update_bits(priv->regmap, TIM_EGR,
- TIM_EGR_UG, TIM_EGR_UG);
return len;
}
}
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 9a8871e21545..d1b14887960e 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -11,7 +11,8 @@ ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \
device.o fmr_pool.o cache.o netlink.o \
roce_gid_mgmt.o mr_pool.o addr.o sa_query.o \
multicast.o mad.o smi.o agent.o mad_rmpp.o \
- nldev.o restrack.o counters.o ib_core_uverbs.o
+ nldev.o restrack.o counters.o ib_core_uverbs.o \
+ trace.o
ib_core-$(CONFIG_SECURITY_INFINIBAND) += security.o
ib_core-$(CONFIG_CGROUP_RDMA) += cgroup.o
@@ -20,7 +21,8 @@ ib_cm-y := cm.o
iw_cm-y := iwcm.o iwpm_util.o iwpm_msg.o
-rdma_cm-y := cma.o
+CFLAGS_cma_trace.o += -I$(src)
+rdma_cm-y := cma.o cma_trace.o
rdma_cm-$(CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS) += cma_configfs.o
@@ -33,6 +35,7 @@ ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_marshall.o \
uverbs_std_types_cq.o \
uverbs_std_types_flow_action.o uverbs_std_types_dm.o \
uverbs_std_types_mr.o uverbs_std_types_counters.o \
- uverbs_uapi.o uverbs_std_types_device.o
+ uverbs_uapi.o uverbs_std_types_device.o \
+ uverbs_std_types_async_fd.o
ib_uverbs-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
ib_uverbs-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 606fa6d86685..1753a9801b70 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -139,7 +139,7 @@ int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
if (ib_nl_is_good_ip_resp(nlh))
ib_nl_process_good_ip_rsep(nlh);
- return skb->len;
+ return 0;
}
static int ib_nl_ip_send_msg(struct rdma_dev_addr *dev_addr,
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index d535995711c3..17bfedd24cc3 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -51,9 +51,8 @@ struct ib_pkey_cache {
struct ib_update_work {
struct work_struct work;
- struct ib_device *device;
- u8 port_num;
- bool enforce_security;
+ struct ib_event event;
+ bool enforce_security;
};
union ib_gid zgid;
@@ -130,7 +129,7 @@ static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
event.element.port_num = port;
event.event = IB_EVENT_GID_CHANGE;
- ib_dispatch_event(&event);
+ ib_dispatch_event_clients(&event);
}
static const char * const gid_type_str[] = {
@@ -1034,7 +1033,7 @@ int ib_get_cached_pkey(struct ib_device *device,
if (!rdma_is_port_valid(device, port_num))
return -EINVAL;
- read_lock_irqsave(&device->cache.lock, flags);
+ read_lock_irqsave(&device->cache_lock, flags);
cache = device->port_data[port_num].cache.pkey;
@@ -1043,7 +1042,7 @@ int ib_get_cached_pkey(struct ib_device *device,
else
*pkey = cache->table[index];
- read_unlock_irqrestore(&device->cache.lock, flags);
+ read_unlock_irqrestore(&device->cache_lock, flags);
return ret;
}
@@ -1058,9 +1057,9 @@ int ib_get_cached_subnet_prefix(struct ib_device *device,
if (!rdma_is_port_valid(device, port_num))
return -EINVAL;
- read_lock_irqsave(&device->cache.lock, flags);
+ read_lock_irqsave(&device->cache_lock, flags);
*sn_pfx = device->port_data[port_num].cache.subnet_prefix;
- read_unlock_irqrestore(&device->cache.lock, flags);
+ read_unlock_irqrestore(&device->cache_lock, flags);
return 0;
}
@@ -1080,7 +1079,7 @@ int ib_find_cached_pkey(struct ib_device *device,
if (!rdma_is_port_valid(device, port_num))
return -EINVAL;
- read_lock_irqsave(&device->cache.lock, flags);
+ read_lock_irqsave(&device->cache_lock, flags);
cache = device->port_data[port_num].cache.pkey;
@@ -1101,7 +1100,7 @@ int ib_find_cached_pkey(struct ib_device *device,
ret = 0;
}
- read_unlock_irqrestore(&device->cache.lock, flags);
+ read_unlock_irqrestore(&device->cache_lock, flags);
return ret;
}
@@ -1120,7 +1119,7 @@ int ib_find_exact_cached_pkey(struct ib_device *device,
if (!rdma_is_port_valid(device, port_num))
return -EINVAL;
- read_lock_irqsave(&device->cache.lock, flags);
+ read_lock_irqsave(&device->cache_lock, flags);
cache = device->port_data[port_num].cache.pkey;
@@ -1133,7 +1132,7 @@ int ib_find_exact_cached_pkey(struct ib_device *device,
break;
}
- read_unlock_irqrestore(&device->cache.lock, flags);
+ read_unlock_irqrestore(&device->cache_lock, flags);
return ret;
}
@@ -1149,9 +1148,9 @@ int ib_get_cached_lmc(struct ib_device *device,
if (!rdma_is_port_valid(device, port_num))
return -EINVAL;
- read_lock_irqsave(&device->cache.lock, flags);
+ read_lock_irqsave(&device->cache_lock, flags);
*lmc = device->port_data[port_num].cache.lmc;
- read_unlock_irqrestore(&device->cache.lock, flags);
+ read_unlock_irqrestore(&device->cache_lock, flags);
return ret;
}
@@ -1167,9 +1166,9 @@ int ib_get_cached_port_state(struct ib_device *device,
if (!rdma_is_port_valid(device, port_num))
return -EINVAL;
- read_lock_irqsave(&device->cache.lock, flags);
+ read_lock_irqsave(&device->cache_lock, flags);
*port_state = device->port_data[port_num].cache.port_state;
- read_unlock_irqrestore(&device->cache.lock, flags);
+ read_unlock_irqrestore(&device->cache_lock, flags);
return ret;
}
@@ -1381,9 +1380,8 @@ err:
return ret;
}
-static void ib_cache_update(struct ib_device *device,
- u8 port,
- bool enforce_security)
+static int
+ib_cache_update(struct ib_device *device, u8 port, bool enforce_security)
{
struct ib_port_attr *tprops = NULL;
struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache;
@@ -1391,11 +1389,11 @@ static void ib_cache_update(struct ib_device *device,
int ret;
if (!rdma_is_port_valid(device, port))
- return;
+ return -EINVAL;
tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
if (!tprops)
- return;
+ return -ENOMEM;
ret = ib_query_port(device, port, tprops);
if (ret) {
@@ -1413,8 +1411,10 @@ static void ib_cache_update(struct ib_device *device,
pkey_cache = kmalloc(struct_size(pkey_cache, table,
tprops->pkey_tbl_len),
GFP_KERNEL);
- if (!pkey_cache)
+ if (!pkey_cache) {
+ ret = -ENOMEM;
goto err;
+ }
pkey_cache->table_len = tprops->pkey_tbl_len;
@@ -1428,7 +1428,7 @@ static void ib_cache_update(struct ib_device *device,
}
}
- write_lock_irq(&device->cache.lock);
+ write_lock_irq(&device->cache_lock);
old_pkey_cache = device->port_data[port].cache.pkey;
@@ -1437,7 +1437,7 @@ static void ib_cache_update(struct ib_device *device,
device->port_data[port].cache.port_state = tprops->state;
device->port_data[port].cache.subnet_prefix = tprops->subnet_prefix;
- write_unlock_irq(&device->cache.lock);
+ write_unlock_irq(&device->cache_lock);
if (enforce_security)
ib_security_cache_change(device,
@@ -1446,57 +1446,91 @@ static void ib_cache_update(struct ib_device *device,
kfree(old_pkey_cache);
kfree(tprops);
- return;
+ return 0;
err:
kfree(pkey_cache);
kfree(tprops);
+ return ret;
+}
+
+static void ib_cache_event_task(struct work_struct *_work)
+{
+ struct ib_update_work *work =
+ container_of(_work, struct ib_update_work, work);
+ int ret;
+
+ /* Before distributing the cache update event, first sync
+ * the cache.
+ */
+ ret = ib_cache_update(work->event.device, work->event.element.port_num,
+ work->enforce_security);
+
+ /* GID event is notified already for individual GID entries by
+ * dispatch_gid_change_event(). Hence, notifiy for rest of the
+ * events.
+ */
+ if (!ret && work->event.event != IB_EVENT_GID_CHANGE)
+ ib_dispatch_event_clients(&work->event);
+
+ kfree(work);
}
-static void ib_cache_task(struct work_struct *_work)
+static void ib_generic_event_task(struct work_struct *_work)
{
struct ib_update_work *work =
container_of(_work, struct ib_update_work, work);
- ib_cache_update(work->device,
- work->port_num,
- work->enforce_security);
+ ib_dispatch_event_clients(&work->event);
kfree(work);
}
-static void ib_cache_event(struct ib_event_handler *handler,
- struct ib_event *event)
+static bool is_cache_update_event(const struct ib_event *event)
+{
+ return (event->event == IB_EVENT_PORT_ERR ||
+ event->event == IB_EVENT_PORT_ACTIVE ||
+ event->event == IB_EVENT_LID_CHANGE ||
+ event->event == IB_EVENT_PKEY_CHANGE ||
+ event->event == IB_EVENT_CLIENT_REREGISTER ||
+ event->event == IB_EVENT_GID_CHANGE);
+}
+
+/**
+ * ib_dispatch_event - Dispatch an asynchronous event
+ * @event:Event to dispatch
+ *
+ * Low-level drivers must call ib_dispatch_event() to dispatch the
+ * event to all registered event handlers when an asynchronous event
+ * occurs.
+ */
+void ib_dispatch_event(const struct ib_event *event)
{
struct ib_update_work *work;
- if (event->event == IB_EVENT_PORT_ERR ||
- event->event == IB_EVENT_PORT_ACTIVE ||
- event->event == IB_EVENT_LID_CHANGE ||
- event->event == IB_EVENT_PKEY_CHANGE ||
- event->event == IB_EVENT_CLIENT_REREGISTER ||
- event->event == IB_EVENT_GID_CHANGE) {
- work = kmalloc(sizeof *work, GFP_ATOMIC);
- if (work) {
- INIT_WORK(&work->work, ib_cache_task);
- work->device = event->device;
- work->port_num = event->element.port_num;
- if (event->event == IB_EVENT_PKEY_CHANGE ||
- event->event == IB_EVENT_GID_CHANGE)
- work->enforce_security = true;
- else
- work->enforce_security = false;
-
- queue_work(ib_wq, &work->work);
- }
- }
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ if (is_cache_update_event(event))
+ INIT_WORK(&work->work, ib_cache_event_task);
+ else
+ INIT_WORK(&work->work, ib_generic_event_task);
+
+ work->event = *event;
+ if (event->event == IB_EVENT_PKEY_CHANGE ||
+ event->event == IB_EVENT_GID_CHANGE)
+ work->enforce_security = true;
+
+ queue_work(ib_wq, &work->work);
}
+EXPORT_SYMBOL(ib_dispatch_event);
int ib_cache_setup_one(struct ib_device *device)
{
unsigned int p;
int err;
- rwlock_init(&device->cache.lock);
+ rwlock_init(&device->cache_lock);
err = gid_table_setup_one(device);
if (err)
@@ -1505,9 +1539,6 @@ int ib_cache_setup_one(struct ib_device *device)
rdma_for_each_port (device, p)
ib_cache_update(device, p, true);
- INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
- device, ib_cache_event);
- ib_register_event_handler(&device->cache.event_handler);
return 0;
}
@@ -1529,14 +1560,12 @@ void ib_cache_release_one(struct ib_device *device)
void ib_cache_cleanup_one(struct ib_device *device)
{
- /* The cleanup function unregisters the event handler,
- * waits for all in-progress workqueue elements and cleans
- * up the GID cache. This function should be called after
- * the device was removed from the devices list and all
- * clients were removed, so the cache exists but is
+ /* The cleanup function waits for all in-progress workqueue
+ * elements and cleans up the GID cache. This function should be
+ * called after the device was removed from the devices list and
+ * all clients were removed, so the cache exists but is
* non-functional and shouldn't be updated anymore.
*/
- ib_unregister_event_handler(&device->cache.event_handler);
flush_workqueue(ib_wq);
gid_table_cleanup_one(device);
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 455b3659d84b..68cc1b2d6824 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -241,6 +241,7 @@ struct cm_id_private {
/* Number of clients sharing this ib_cm_id. Only valid for listeners.
* Protected by the cm.lock spinlock. */
int listen_sharecount;
+ struct rcu_head rcu;
struct ib_mad_send_buf *msg;
struct cm_timewait_info *timewait_info;
@@ -593,28 +594,16 @@ static void cm_free_id(__be32 local_id)
xa_erase_irq(&cm.local_id_table, cm_local_id(local_id));
}
-static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
+static struct cm_id_private *cm_acquire_id(__be32 local_id, __be32 remote_id)
{
struct cm_id_private *cm_id_priv;
+ rcu_read_lock();
cm_id_priv = xa_load(&cm.local_id_table, cm_local_id(local_id));
- if (cm_id_priv) {
- if (cm_id_priv->id.remote_id == remote_id)
- refcount_inc(&cm_id_priv->refcount);
- else
- cm_id_priv = NULL;
- }
-
- return cm_id_priv;
-}
-
-static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
-{
- struct cm_id_private *cm_id_priv;
-
- spin_lock_irq(&cm.lock);
- cm_id_priv = cm_get_id(local_id, remote_id);
- spin_unlock_irq(&cm.lock);
+ if (!cm_id_priv || cm_id_priv->id.remote_id != remote_id ||
+ !refcount_inc_not_zero(&cm_id_priv->refcount))
+ cm_id_priv = NULL;
+ rcu_read_unlock();
return cm_id_priv;
}
@@ -1089,7 +1078,7 @@ retest:
rdma_destroy_ah_attr(&cm_id_priv->av.ah_attr);
rdma_destroy_ah_attr(&cm_id_priv->alt_av.ah_attr);
kfree(cm_id_priv->private_data);
- kfree(cm_id_priv);
+ kfree_rcu(cm_id_priv, rcu);
}
void ib_destroy_cm_id(struct ib_cm_id *cm_id)
@@ -1262,54 +1251,72 @@ static void cm_format_req(struct cm_req_msg *req_msg,
cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
cm_form_tid(cm_id_priv));
- req_msg->local_comm_id = cm_id_priv->id.local_id;
- req_msg->service_id = param->service_id;
- req_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
- cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
- cm_req_set_init_depth(req_msg, param->initiator_depth);
- cm_req_set_remote_resp_timeout(req_msg,
- param->remote_cm_response_timeout);
+ IBA_SET(CM_REQ_LOCAL_COMM_ID, req_msg,
+ be32_to_cpu(cm_id_priv->id.local_id));
+ IBA_SET(CM_REQ_SERVICE_ID, req_msg, be64_to_cpu(param->service_id));
+ IBA_SET(CM_REQ_LOCAL_CA_GUID, req_msg,
+ be64_to_cpu(cm_id_priv->id.device->node_guid));
+ IBA_SET(CM_REQ_LOCAL_QPN, req_msg, param->qp_num);
+ IBA_SET(CM_REQ_INITIATOR_DEPTH, req_msg, param->initiator_depth);
+ IBA_SET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg,
+ param->remote_cm_response_timeout);
cm_req_set_qp_type(req_msg, param->qp_type);
- cm_req_set_flow_ctrl(req_msg, param->flow_control);
- cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
- cm_req_set_local_resp_timeout(req_msg,
- param->local_cm_response_timeout);
- req_msg->pkey = param->primary_path->pkey;
- cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
- cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
+ IBA_SET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg, param->flow_control);
+ IBA_SET(CM_REQ_STARTING_PSN, req_msg, param->starting_psn);
+ IBA_SET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg,
+ param->local_cm_response_timeout);
+ IBA_SET(CM_REQ_PARTITION_KEY, req_msg,
+ be16_to_cpu(param->primary_path->pkey));
+ IBA_SET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg,
+ param->primary_path->mtu);
+ IBA_SET(CM_REQ_MAX_CM_RETRIES, req_msg, param->max_cm_retries);
if (param->qp_type != IB_QPT_XRC_INI) {
- cm_req_set_resp_res(req_msg, param->responder_resources);
- cm_req_set_retry_count(req_msg, param->retry_count);
- cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
- cm_req_set_srq(req_msg, param->srq);
+ IBA_SET(CM_REQ_RESPONDER_RESOURCES, req_msg,
+ param->responder_resources);
+ IBA_SET(CM_REQ_RETRY_COUNT, req_msg, param->retry_count);
+ IBA_SET(CM_REQ_RNR_RETRY_COUNT, req_msg,
+ param->rnr_retry_count);
+ IBA_SET(CM_REQ_SRQ, req_msg, param->srq);
}
- req_msg->primary_local_gid = pri_path->sgid;
- req_msg->primary_remote_gid = pri_path->dgid;
+ *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg) =
+ pri_path->sgid;
+ *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg) =
+ pri_path->dgid;
if (pri_ext) {
- req_msg->primary_local_gid.global.interface_id
- = OPA_MAKE_ID(be32_to_cpu(pri_path->opa.slid));
- req_msg->primary_remote_gid.global.interface_id
- = OPA_MAKE_ID(be32_to_cpu(pri_path->opa.dlid));
+ IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg)
+ ->global.interface_id =
+ OPA_MAKE_ID(be32_to_cpu(pri_path->opa.slid));
+ IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg)
+ ->global.interface_id =
+ OPA_MAKE_ID(be32_to_cpu(pri_path->opa.dlid));
}
if (pri_path->hop_limit <= 1) {
- req_msg->primary_local_lid = pri_ext ? 0 :
- htons(ntohl(sa_path_get_slid(pri_path)));
- req_msg->primary_remote_lid = pri_ext ? 0 :
- htons(ntohl(sa_path_get_dlid(pri_path)));
+ IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
+ be16_to_cpu(pri_ext ? 0 :
+ htons(ntohl(sa_path_get_slid(
+ pri_path)))));
+ IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
+ be16_to_cpu(pri_ext ? 0 :
+ htons(ntohl(sa_path_get_dlid(
+ pri_path)))));
} else {
/* Work-around until there's a way to obtain remote LID info */
- req_msg->primary_local_lid = IB_LID_PERMISSIVE;
- req_msg->primary_remote_lid = IB_LID_PERMISSIVE;
+ IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
+ be16_to_cpu(IB_LID_PERMISSIVE));
+ IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
+ be16_to_cpu(IB_LID_PERMISSIVE));
}
- cm_req_set_primary_flow_label(req_msg, pri_path->flow_label);
- cm_req_set_primary_packet_rate(req_msg, pri_path->rate);
- req_msg->primary_traffic_class = pri_path->traffic_class;
- req_msg->primary_hop_limit = pri_path->hop_limit;
- cm_req_set_primary_sl(req_msg, pri_path->sl);
- cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1));
- cm_req_set_primary_local_ack_timeout(req_msg,
+ IBA_SET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg,
+ be32_to_cpu(pri_path->flow_label));
+ IBA_SET(CM_REQ_PRIMARY_PACKET_RATE, req_msg, pri_path->rate);
+ IBA_SET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg, pri_path->traffic_class);
+ IBA_SET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg, pri_path->hop_limit);
+ IBA_SET(CM_REQ_PRIMARY_SL, req_msg, pri_path->sl);
+ IBA_SET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg,
+ (pri_path->hop_limit <= 1));
+ IBA_SET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg,
cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
pri_path->packet_life_time));
@@ -1320,38 +1327,55 @@ static void cm_format_req(struct cm_req_msg *req_msg,
alt_ext = opa_is_extended_lid(alt_path->opa.dlid,
alt_path->opa.slid);
- req_msg->alt_local_gid = alt_path->sgid;
- req_msg->alt_remote_gid = alt_path->dgid;
+ *IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg) =
+ alt_path->sgid;
+ *IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg) =
+ alt_path->dgid;
if (alt_ext) {
- req_msg->alt_local_gid.global.interface_id
- = OPA_MAKE_ID(be32_to_cpu(alt_path->opa.slid));
- req_msg->alt_remote_gid.global.interface_id
- = OPA_MAKE_ID(be32_to_cpu(alt_path->opa.dlid));
+ IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID,
+ req_msg)
+ ->global.interface_id =
+ OPA_MAKE_ID(be32_to_cpu(alt_path->opa.slid));
+ IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID,
+ req_msg)
+ ->global.interface_id =
+ OPA_MAKE_ID(be32_to_cpu(alt_path->opa.dlid));
}
if (alt_path->hop_limit <= 1) {
- req_msg->alt_local_lid = alt_ext ? 0 :
- htons(ntohl(sa_path_get_slid(alt_path)));
- req_msg->alt_remote_lid = alt_ext ? 0 :
- htons(ntohl(sa_path_get_dlid(alt_path)));
+ IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
+ be16_to_cpu(
+ alt_ext ? 0 :
+ htons(ntohl(sa_path_get_slid(
+ alt_path)))));
+ IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
+ be16_to_cpu(
+ alt_ext ? 0 :
+ htons(ntohl(sa_path_get_dlid(
+ alt_path)))));
} else {
- req_msg->alt_local_lid = IB_LID_PERMISSIVE;
- req_msg->alt_remote_lid = IB_LID_PERMISSIVE;
+ IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
+ be16_to_cpu(IB_LID_PERMISSIVE));
+ IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
+ be16_to_cpu(IB_LID_PERMISSIVE));
}
- cm_req_set_alt_flow_label(req_msg,
- alt_path->flow_label);
- cm_req_set_alt_packet_rate(req_msg, alt_path->rate);
- req_msg->alt_traffic_class = alt_path->traffic_class;
- req_msg->alt_hop_limit = alt_path->hop_limit;
- cm_req_set_alt_sl(req_msg, alt_path->sl);
- cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1));
- cm_req_set_alt_local_ack_timeout(req_msg,
+ IBA_SET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg,
+ be32_to_cpu(alt_path->flow_label));
+ IBA_SET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg, alt_path->rate);
+ IBA_SET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg,
+ alt_path->traffic_class);
+ IBA_SET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg,
+ alt_path->hop_limit);
+ IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, alt_path->sl);
+ IBA_SET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg,
+ (alt_path->hop_limit <= 1));
+ IBA_SET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg,
cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
alt_path->packet_life_time));
}
if (param->private_data && param->private_data_len)
- memcpy(req_msg->private_data, param->private_data,
- param->private_data_len);
+ IBA_SET_MEM(CM_REQ_PRIVATE_DATA, req_msg, param->private_data,
+ param->private_data_len);
}
static int cm_validate_req_param(struct ib_cm_req_param *param)
@@ -1443,8 +1467,8 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
- cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
- cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
+ cm_id_priv->local_qpn = cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
+ cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
spin_lock_irqsave(&cm_id_priv->lock, flags);
ret = ib_post_send_mad(cm_id_priv->msg, NULL);
@@ -1482,14 +1506,16 @@ static int cm_issue_rej(struct cm_port *port,
rej_msg = (struct cm_rej_msg *) msg->mad;
cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
- rej_msg->remote_comm_id = rcv_msg->local_comm_id;
- rej_msg->local_comm_id = rcv_msg->remote_comm_id;
- cm_rej_set_msg_rejected(rej_msg, msg_rejected);
- rej_msg->reason = cpu_to_be16(reason);
+ IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
+ IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg));
+ IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
+ IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg));
+ IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, msg_rejected);
+ IBA_SET(CM_REJ_REASON, rej_msg, reason);
if (ari && ari_length) {
- cm_rej_set_reject_info_len(rej_msg, ari_length);
- memcpy(rej_msg->ari, ari, ari_length);
+ IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length);
+ IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
}
ret = ib_post_send_mad(msg, NULL);
@@ -1501,8 +1527,10 @@ static int cm_issue_rej(struct cm_port *port,
static bool cm_req_has_alt_path(struct cm_req_msg *req_msg)
{
- return ((req_msg->alt_local_lid) ||
- (ib_is_opa_gid(&req_msg->alt_local_gid)));
+ return ((cpu_to_be16(
+ IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg))) ||
+ (ib_is_opa_gid(IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID,
+ req_msg))));
}
static void cm_path_set_rec_type(struct ib_device *ib_device, u8 port_num,
@@ -1522,14 +1550,18 @@ static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) {
sa_path_set_dlid(primary_path,
- ntohs(req_msg->primary_local_lid));
+ IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID,
+ req_msg));
sa_path_set_slid(primary_path,
- ntohs(req_msg->primary_remote_lid));
+ IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
+ req_msg));
} else {
- lid = opa_get_lid_from_gid(&req_msg->primary_local_gid);
+ lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
+ CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg));
sa_path_set_dlid(primary_path, lid);
- lid = opa_get_lid_from_gid(&req_msg->primary_remote_gid);
+ lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
+ CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg));
sa_path_set_slid(primary_path, lid);
}
@@ -1537,13 +1569,19 @@ static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
return;
if (alt_path->rec_type != SA_PATH_REC_TYPE_OPA) {
- sa_path_set_dlid(alt_path, ntohs(req_msg->alt_local_lid));
- sa_path_set_slid(alt_path, ntohs(req_msg->alt_remote_lid));
+ sa_path_set_dlid(alt_path,
+ IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID,
+ req_msg));
+ sa_path_set_slid(alt_path,
+ IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID,
+ req_msg));
} else {
- lid = opa_get_lid_from_gid(&req_msg->alt_local_gid);
+ lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
+ CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg));
sa_path_set_dlid(alt_path, lid);
- lid = opa_get_lid_from_gid(&req_msg->alt_remote_gid);
+ lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
+ CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg));
sa_path_set_slid(alt_path, lid);
}
}
@@ -1552,44 +1590,58 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
struct sa_path_rec *primary_path,
struct sa_path_rec *alt_path)
{
- primary_path->dgid = req_msg->primary_local_gid;
- primary_path->sgid = req_msg->primary_remote_gid;
- primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
- primary_path->hop_limit = req_msg->primary_hop_limit;
- primary_path->traffic_class = req_msg->primary_traffic_class;
+ primary_path->dgid =
+ *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg);
+ primary_path->sgid =
+ *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg);
+ primary_path->flow_label =
+ cpu_to_be32(IBA_GET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg));
+ primary_path->hop_limit = IBA_GET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg);
+ primary_path->traffic_class =
+ IBA_GET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg);
primary_path->reversible = 1;
- primary_path->pkey = req_msg->pkey;
- primary_path->sl = cm_req_get_primary_sl(req_msg);
+ primary_path->pkey =
+ cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
+ primary_path->sl = IBA_GET(CM_REQ_PRIMARY_SL, req_msg);
primary_path->mtu_selector = IB_SA_EQ;
- primary_path->mtu = cm_req_get_path_mtu(req_msg);
+ primary_path->mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
primary_path->rate_selector = IB_SA_EQ;
- primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
+ primary_path->rate = IBA_GET(CM_REQ_PRIMARY_PACKET_RATE, req_msg);
primary_path->packet_life_time_selector = IB_SA_EQ;
primary_path->packet_life_time =
- cm_req_get_primary_local_ack_timeout(req_msg);
+ IBA_GET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg);
primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
- primary_path->service_id = req_msg->service_id;
+ primary_path->service_id =
+ cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
if (sa_path_is_roce(primary_path))
primary_path->roce.route_resolved = false;
if (cm_req_has_alt_path(req_msg)) {
- alt_path->dgid = req_msg->alt_local_gid;
- alt_path->sgid = req_msg->alt_remote_gid;
- alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
- alt_path->hop_limit = req_msg->alt_hop_limit;
- alt_path->traffic_class = req_msg->alt_traffic_class;
+ alt_path->dgid = *IBA_GET_MEM_PTR(
+ CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg);
+ alt_path->sgid = *IBA_GET_MEM_PTR(
+ CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg);
+ alt_path->flow_label = cpu_to_be32(
+ IBA_GET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg));
+ alt_path->hop_limit =
+ IBA_GET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg);
+ alt_path->traffic_class =
+ IBA_GET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg);
alt_path->reversible = 1;
- alt_path->pkey = req_msg->pkey;
- alt_path->sl = cm_req_get_alt_sl(req_msg);
+ alt_path->pkey =
+ cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
+ alt_path->sl = IBA_GET(CM_REQ_ALTERNATE_SL, req_msg);
alt_path->mtu_selector = IB_SA_EQ;
- alt_path->mtu = cm_req_get_path_mtu(req_msg);
+ alt_path->mtu =
+ IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
alt_path->rate_selector = IB_SA_EQ;
- alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
+ alt_path->rate = IBA_GET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg);
alt_path->packet_life_time_selector = IB_SA_EQ;
alt_path->packet_life_time =
- cm_req_get_alt_local_ack_timeout(req_msg);
+ IBA_GET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg);
alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
- alt_path->service_id = req_msg->service_id;
+ alt_path->service_id =
+ cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
if (sa_path_is_roce(alt_path))
alt_path->roce.route_resolved = false;
@@ -1664,23 +1716,25 @@ static void cm_format_req_event(struct cm_work *work,
} else {
param->alternate_path = NULL;
}
- param->remote_ca_guid = req_msg->local_ca_guid;
- param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
- param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
+ param->remote_ca_guid =
+ cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
+ param->remote_qkey = IBA_GET(CM_REQ_LOCAL_Q_KEY, req_msg);
+ param->remote_qpn = IBA_GET(CM_REQ_LOCAL_QPN, req_msg);
param->qp_type = cm_req_get_qp_type(req_msg);
- param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
- param->responder_resources = cm_req_get_init_depth(req_msg);
- param->initiator_depth = cm_req_get_resp_res(req_msg);
+ param->starting_psn = IBA_GET(CM_REQ_STARTING_PSN, req_msg);
+ param->responder_resources = IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
+ param->initiator_depth = IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
param->local_cm_response_timeout =
- cm_req_get_remote_resp_timeout(req_msg);
- param->flow_control = cm_req_get_flow_ctrl(req_msg);
+ IBA_GET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg);
+ param->flow_control = IBA_GET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg);
param->remote_cm_response_timeout =
- cm_req_get_local_resp_timeout(req_msg);
- param->retry_count = cm_req_get_retry_count(req_msg);
- param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
- param->srq = cm_req_get_srq(req_msg);
+ IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg);
+ param->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
+ param->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
+ param->srq = IBA_GET(CM_REQ_SRQ, req_msg);
param->ppath_sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
- work->cm_event.private_data = &req_msg->private_data;
+ work->cm_event.private_data =
+ IBA_GET_MEM_PTR(CM_REQ_PRIVATE_DATA, req_msg);
}
static void cm_process_work(struct cm_id_private *cm_id_priv,
@@ -1714,13 +1768,16 @@ static void cm_format_mra(struct cm_mra_msg *mra_msg,
const void *private_data, u8 private_data_len)
{
cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
- cm_mra_set_msg_mraed(mra_msg, msg_mraed);
- mra_msg->local_comm_id = cm_id_priv->id.local_id;
- mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
- cm_mra_set_service_timeout(mra_msg, service_timeout);
+ IBA_SET(CM_MRA_MESSAGE_MRAED, mra_msg, msg_mraed);
+ IBA_SET(CM_MRA_LOCAL_COMM_ID, mra_msg,
+ be32_to_cpu(cm_id_priv->id.local_id));
+ IBA_SET(CM_MRA_REMOTE_COMM_ID, mra_msg,
+ be32_to_cpu(cm_id_priv->id.remote_id));
+ IBA_SET(CM_MRA_SERVICE_TIMEOUT, mra_msg, service_timeout);
if (private_data && private_data_len)
- memcpy(mra_msg->private_data, private_data, private_data_len);
+ IBA_SET_MEM(CM_MRA_PRIVATE_DATA, mra_msg, private_data,
+ private_data_len);
}
static void cm_format_rej(struct cm_rej_msg *rej_msg,
@@ -1732,36 +1789,42 @@ static void cm_format_rej(struct cm_rej_msg *rej_msg,
u8 private_data_len)
{
cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
- rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
+ IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
+ be32_to_cpu(cm_id_priv->id.remote_id));
switch(cm_id_priv->id.state) {
case IB_CM_REQ_RCVD:
- rej_msg->local_comm_id = 0;
- cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
+ IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(0));
+ IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
break;
case IB_CM_MRA_REQ_SENT:
- rej_msg->local_comm_id = cm_id_priv->id.local_id;
- cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
+ IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
+ be32_to_cpu(cm_id_priv->id.local_id));
+ IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
break;
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
- rej_msg->local_comm_id = cm_id_priv->id.local_id;
- cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
+ IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
+ be32_to_cpu(cm_id_priv->id.local_id));
+ IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REP);
break;
default:
- rej_msg->local_comm_id = cm_id_priv->id.local_id;
- cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
+ IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
+ be32_to_cpu(cm_id_priv->id.local_id));
+ IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg,
+ CM_MSG_RESPONSE_OTHER);
break;
}
- rej_msg->reason = cpu_to_be16(reason);
+ IBA_SET(CM_REJ_REASON, rej_msg, reason);
if (ari && ari_length) {
- cm_rej_set_reject_info_len(rej_msg, ari_length);
- memcpy(rej_msg->ari, ari, ari_length);
+ IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length);
+ IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
}
if (private_data && private_data_len)
- memcpy(rej_msg->private_data, private_data, private_data_len);
+ IBA_SET_MEM(CM_REJ_PRIVATE_DATA, rej_msg, private_data,
+ private_data_len);
}
static void cm_dup_req_handler(struct cm_work *work,
@@ -1821,7 +1884,7 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
spin_lock_irq(&cm.lock);
timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
if (timewait_info) {
- cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
+ cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
timewait_info->work.remote_id);
spin_unlock_irq(&cm.lock);
if (cur_cm_id_priv) {
@@ -1835,7 +1898,7 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
if (timewait_info) {
cm_cleanup_timewait(cm_id_priv->timewait_info);
- cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
+ cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
timewait_info->work.remote_id);
spin_unlock_irq(&cm.lock);
@@ -1851,8 +1914,9 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
}
/* Find matching listen request. */
- listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
- req_msg->service_id);
+ listen_cm_id_priv = cm_find_listen(
+ cm_id_priv->id.device,
+ cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg)));
if (!listen_cm_id_priv) {
cm_cleanup_timewait(cm_id_priv->timewait_info);
spin_unlock_irq(&cm.lock);
@@ -1877,24 +1941,32 @@ out:
*/
static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
{
- if (!cm_req_get_primary_subnet_local(req_msg)) {
- if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) {
- req_msg->primary_local_lid = ib_lid_be16(wc->slid);
- cm_req_set_primary_sl(req_msg, wc->sl);
+ if (!IBA_GET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg)) {
+ if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID,
+ req_msg)) == IB_LID_PERMISSIVE) {
+ IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
+ be16_to_cpu(ib_lid_be16(wc->slid)));
+ IBA_SET(CM_REQ_PRIMARY_SL, req_msg, wc->sl);
}
- if (req_msg->primary_remote_lid == IB_LID_PERMISSIVE)
- req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits);
+ if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
+ req_msg)) == IB_LID_PERMISSIVE)
+ IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
+ wc->dlid_path_bits);
}
- if (!cm_req_get_alt_subnet_local(req_msg)) {
- if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) {
- req_msg->alt_local_lid = ib_lid_be16(wc->slid);
- cm_req_set_alt_sl(req_msg, wc->sl);
+ if (!IBA_GET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg)) {
+ if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID,
+ req_msg)) == IB_LID_PERMISSIVE) {
+ IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
+ be16_to_cpu(ib_lid_be16(wc->slid)));
+ IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, wc->sl);
}
- if (req_msg->alt_remote_lid == IB_LID_PERMISSIVE)
- req_msg->alt_remote_lid = cpu_to_be16(wc->dlid_path_bits);
+ if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID,
+ req_msg)) == IB_LID_PERMISSIVE)
+ IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
+ wc->dlid_path_bits);
}
}
@@ -1914,7 +1986,8 @@ static int cm_req_handler(struct cm_work *work)
return PTR_ERR(cm_id);
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
- cm_id_priv->id.remote_id = req_msg->local_comm_id;
+ cm_id_priv->id.remote_id =
+ cpu_to_be32(IBA_GET(CM_REQ_LOCAL_COMM_ID, req_msg));
ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
work->mad_recv_wc->recv_buf.grh,
&cm_id_priv->av);
@@ -1926,9 +1999,12 @@ static int cm_req_handler(struct cm_work *work)
ret = PTR_ERR(cm_id_priv->timewait_info);
goto destroy;
}
- cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
- cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
- cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
+ cm_id_priv->timewait_info->work.remote_id =
+ cpu_to_be32(IBA_GET(CM_REQ_LOCAL_COMM_ID, req_msg));
+ cm_id_priv->timewait_info->remote_ca_guid =
+ cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
+ cm_id_priv->timewait_info->remote_qpn =
+ cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
listen_cm_id_priv = cm_match_req(work, cm_id_priv);
if (!listen_cm_id_priv) {
@@ -1940,7 +2016,8 @@ static int cm_req_handler(struct cm_work *work)
cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
cm_id_priv->id.context = listen_cm_id_priv->id.context;
- cm_id_priv->id.service_id = req_msg->service_id;
+ cm_id_priv->id.service_id =
+ cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
cm_id_priv->id.service_mask = ~cpu_to_be64(0);
cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
@@ -1957,10 +2034,11 @@ static int cm_req_handler(struct cm_work *work)
work->path[0].rec_type =
sa_conv_gid_to_pathrec_type(gid_attr->gid_type);
} else {
- cm_path_set_rec_type(work->port->cm_dev->ib_device,
- work->port->port_num,
- &work->path[0],
- &req_msg->primary_local_gid);
+ cm_path_set_rec_type(
+ work->port->cm_dev->ib_device, work->port->port_num,
+ &work->path[0],
+ IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID,
+ req_msg));
}
if (cm_req_has_alt_path(req_msg))
work->path[1].rec_type = work->path[0].rec_type;
@@ -2000,16 +2078,19 @@ static int cm_req_handler(struct cm_work *work)
}
cm_id_priv->tid = req_msg->hdr.tid;
cm_id_priv->timeout_ms = cm_convert_to_ms(
- cm_req_get_local_resp_timeout(req_msg));
- cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
- cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
- cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
- cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
- cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
- cm_id_priv->pkey = req_msg->pkey;
- cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
- cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
- cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
+ IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg));
+ cm_id_priv->max_cm_retries = IBA_GET(CM_REQ_MAX_CM_RETRIES, req_msg);
+ cm_id_priv->remote_qpn =
+ cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
+ cm_id_priv->initiator_depth =
+ IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
+ cm_id_priv->responder_resources =
+ IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
+ cm_id_priv->path_mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
+ cm_id_priv->pkey = cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
+ cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
+ cm_id_priv->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
+ cm_id_priv->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
@@ -2032,29 +2113,35 @@ static void cm_format_rep(struct cm_rep_msg *rep_msg,
struct ib_cm_rep_param *param)
{
cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
- rep_msg->local_comm_id = cm_id_priv->id.local_id;
- rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
- cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
- rep_msg->resp_resources = param->responder_resources;
- cm_rep_set_target_ack_delay(rep_msg,
- cm_id_priv->av.port->cm_dev->ack_delay);
- cm_rep_set_failover(rep_msg, param->failover_accepted);
- cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
- rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
+ IBA_SET(CM_REP_LOCAL_COMM_ID, rep_msg,
+ be32_to_cpu(cm_id_priv->id.local_id));
+ IBA_SET(CM_REP_REMOTE_COMM_ID, rep_msg,
+ be32_to_cpu(cm_id_priv->id.remote_id));
+ IBA_SET(CM_REP_STARTING_PSN, rep_msg, param->starting_psn);
+ IBA_SET(CM_REP_RESPONDER_RESOURCES, rep_msg,
+ param->responder_resources);
+ IBA_SET(CM_REP_TARGET_ACK_DELAY, rep_msg,
+ cm_id_priv->av.port->cm_dev->ack_delay);
+ IBA_SET(CM_REP_FAILOVER_ACCEPTED, rep_msg, param->failover_accepted);
+ IBA_SET(CM_REP_RNR_RETRY_COUNT, rep_msg, param->rnr_retry_count);
+ IBA_SET(CM_REP_LOCAL_CA_GUID, rep_msg,
+ be64_to_cpu(cm_id_priv->id.device->node_guid));
if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
- rep_msg->initiator_depth = param->initiator_depth;
- cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
- cm_rep_set_srq(rep_msg, param->srq);
- cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
+ IBA_SET(CM_REP_INITIATOR_DEPTH, rep_msg,
+ param->initiator_depth);
+ IBA_SET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg,
+ param->flow_control);
+ IBA_SET(CM_REP_SRQ, rep_msg, param->srq);
+ IBA_SET(CM_REP_LOCAL_QPN, rep_msg, param->qp_num);
} else {
- cm_rep_set_srq(rep_msg, 1);
- cm_rep_set_local_eecn(rep_msg, cpu_to_be32(param->qp_num));
+ IBA_SET(CM_REP_SRQ, rep_msg, 1);
+ IBA_SET(CM_REP_LOCAL_EE_CONTEXT_NUMBER, rep_msg, param->qp_num);
}
if (param->private_data && param->private_data_len)
- memcpy(rep_msg->private_data, param->private_data,
- param->private_data_len);
+ IBA_SET_MEM(CM_REP_PRIVATE_DATA, rep_msg, param->private_data,
+ param->private_data_len);
}
int ib_send_cm_rep(struct ib_cm_id *cm_id,
@@ -2100,7 +2187,7 @@ int ib_send_cm_rep(struct ib_cm_id *cm_id,
cm_id_priv->msg = msg;
cm_id_priv->initiator_depth = param->initiator_depth;
cm_id_priv->responder_resources = param->responder_resources;
- cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
+ cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
@@ -2114,11 +2201,14 @@ static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
u8 private_data_len)
{
cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
- rtu_msg->local_comm_id = cm_id_priv->id.local_id;
- rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
+ IBA_SET(CM_RTU_LOCAL_COMM_ID, rtu_msg,
+ be32_to_cpu(cm_id_priv->id.local_id));
+ IBA_SET(CM_RTU_REMOTE_COMM_ID, rtu_msg,
+ be32_to_cpu(cm_id_priv->id.remote_id));
if (private_data && private_data_len)
- memcpy(rtu_msg->private_data, private_data, private_data_len);
+ IBA_SET_MEM(CM_RTU_PRIVATE_DATA, rtu_msg, private_data,
+ private_data_len);
}
int ib_send_cm_rtu(struct ib_cm_id *cm_id,
@@ -2181,18 +2271,20 @@ static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
param = &work->cm_event.param.rep_rcvd;
- param->remote_ca_guid = rep_msg->local_ca_guid;
- param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
+ param->remote_ca_guid =
+ cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg));
+ param->remote_qkey = IBA_GET(CM_REP_LOCAL_Q_KEY, rep_msg);
param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
- param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
- param->responder_resources = rep_msg->initiator_depth;
- param->initiator_depth = rep_msg->resp_resources;
- param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
- param->failover_accepted = cm_rep_get_failover(rep_msg);
- param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
- param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
- param->srq = cm_rep_get_srq(rep_msg);
- work->cm_event.private_data = &rep_msg->private_data;
+ param->starting_psn = IBA_GET(CM_REP_STARTING_PSN, rep_msg);
+ param->responder_resources = IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg);
+ param->initiator_depth = IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg);
+ param->target_ack_delay = IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg);
+ param->failover_accepted = IBA_GET(CM_REP_FAILOVER_ACCEPTED, rep_msg);
+ param->flow_control = IBA_GET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg);
+ param->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
+ param->srq = IBA_GET(CM_REP_SRQ, rep_msg);
+ work->cm_event.private_data =
+ IBA_GET_MEM_PTR(CM_REP_PRIVATE_DATA, rep_msg);
}
static void cm_dup_rep_handler(struct cm_work *work)
@@ -2203,8 +2295,9 @@ static void cm_dup_rep_handler(struct cm_work *work)
int ret;
rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
- cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
- rep_msg->local_comm_id);
+ cm_id_priv = cm_acquire_id(
+ cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)),
+ cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg)));
if (!cm_id_priv)
return;
@@ -2248,11 +2341,12 @@ static int cm_rep_handler(struct cm_work *work)
struct cm_timewait_info *timewait_info;
rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
- cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
+ cm_id_priv = cm_acquire_id(
+ cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)), 0);
if (!cm_id_priv) {
cm_dup_rep_handler(work);
pr_debug("%s: remote_comm_id %d, no cm_id_priv\n", __func__,
- be32_to_cpu(rep_msg->remote_comm_id));
+ IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
return -EINVAL;
}
@@ -2266,15 +2360,18 @@ static int cm_rep_handler(struct cm_work *work)
default:
spin_unlock_irq(&cm_id_priv->lock);
ret = -EINVAL;
- pr_debug("%s: cm_id_priv->id.state: %d, local_comm_id %d, remote_comm_id %d\n",
- __func__, cm_id_priv->id.state,
- be32_to_cpu(rep_msg->local_comm_id),
- be32_to_cpu(rep_msg->remote_comm_id));
+ pr_debug(
+ "%s: cm_id_priv->id.state: %d, local_comm_id %d, remote_comm_id %d\n",
+ __func__, cm_id_priv->id.state,
+ IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
+ IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
goto error;
}
- cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
- cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
+ cm_id_priv->timewait_info->work.remote_id =
+ cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg));
+ cm_id_priv->timewait_info->remote_ca_guid =
+ cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg));
cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
spin_lock(&cm.lock);
@@ -2284,7 +2381,7 @@ static int cm_rep_handler(struct cm_work *work)
spin_unlock_irq(&cm_id_priv->lock);
ret = -EINVAL;
pr_debug("%s: Failed to insert remote id %d\n", __func__,
- be32_to_cpu(rep_msg->remote_comm_id));
+ IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
goto error;
}
/* Check for a stale connection. */
@@ -2293,7 +2390,7 @@ static int cm_rep_handler(struct cm_work *work)
rb_erase(&cm_id_priv->timewait_info->remote_id_node,
&cm.remote_id_table);
cm_id_priv->timewait_info->inserted_remote_id = 0;
- cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
+ cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
timewait_info->work.remote_id);
spin_unlock(&cm.lock);
@@ -2302,9 +2399,10 @@ static int cm_rep_handler(struct cm_work *work)
IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
NULL, 0);
ret = -EINVAL;
- pr_debug("%s: Stale connection. local_comm_id %d, remote_comm_id %d\n",
- __func__, be32_to_cpu(rep_msg->local_comm_id),
- be32_to_cpu(rep_msg->remote_comm_id));
+ pr_debug(
+ "%s: Stale connection. local_comm_id %d, remote_comm_id %d\n",
+ __func__, IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
+ IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
if (cur_cm_id_priv) {
cm_id = &cur_cm_id_priv->id;
@@ -2317,13 +2415,17 @@ static int cm_rep_handler(struct cm_work *work)
spin_unlock(&cm.lock);
cm_id_priv->id.state = IB_CM_REP_RCVD;
- cm_id_priv->id.remote_id = rep_msg->local_comm_id;
+ cm_id_priv->id.remote_id =
+ cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg));
cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
- cm_id_priv->initiator_depth = rep_msg->resp_resources;
- cm_id_priv->responder_resources = rep_msg->initiator_depth;
- cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
- cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
- cm_id_priv->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
+ cm_id_priv->initiator_depth =
+ IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg);
+ cm_id_priv->responder_resources =
+ IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg);
+ cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
+ cm_id_priv->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
+ cm_id_priv->target_ack_delay =
+ IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg);
cm_id_priv->av.timeout =
cm_ack_timeout(cm_id_priv->target_ack_delay,
cm_id_priv->av.timeout - 1);
@@ -2389,12 +2491,14 @@ static int cm_rtu_handler(struct cm_work *work)
int ret;
rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
- cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
- rtu_msg->local_comm_id);
+ cm_id_priv = cm_acquire_id(
+ cpu_to_be32(IBA_GET(CM_RTU_REMOTE_COMM_ID, rtu_msg)),
+ cpu_to_be32(IBA_GET(CM_RTU_LOCAL_COMM_ID, rtu_msg)));
if (!cm_id_priv)
return -EINVAL;
- work->cm_event.private_data = &rtu_msg->private_data;
+ work->cm_event.private_data =
+ IBA_GET_MEM_PTR(CM_RTU_PRIVATE_DATA, rtu_msg);
spin_lock_irq(&cm_id_priv->lock);
if (cm_id_priv->id.state != IB_CM_REP_SENT &&
@@ -2429,12 +2533,16 @@ static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
{
cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
cm_form_tid(cm_id_priv));
- dreq_msg->local_comm_id = cm_id_priv->id.local_id;
- dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
- cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
+ IBA_SET(CM_DREQ_LOCAL_COMM_ID, dreq_msg,
+ be32_to_cpu(cm_id_priv->id.local_id));
+ IBA_SET(CM_DREQ_REMOTE_COMM_ID, dreq_msg,
+ be32_to_cpu(cm_id_priv->id.remote_id));
+ IBA_SET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg,
+ be32_to_cpu(cm_id_priv->remote_qpn));
if (private_data && private_data_len)
- memcpy(dreq_msg->private_data, private_data, private_data_len);
+ IBA_SET_MEM(CM_DREQ_PRIVATE_DATA, dreq_msg, private_data,
+ private_data_len);
}
int ib_send_cm_dreq(struct ib_cm_id *cm_id,
@@ -2494,11 +2602,14 @@ static void cm_format_drep(struct cm_drep_msg *drep_msg,
u8 private_data_len)
{
cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
- drep_msg->local_comm_id = cm_id_priv->id.local_id;
- drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
+ IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
+ be32_to_cpu(cm_id_priv->id.local_id));
+ IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg,
+ be32_to_cpu(cm_id_priv->id.remote_id));
if (private_data && private_data_len)
- memcpy(drep_msg->private_data, private_data, private_data_len);
+ IBA_SET_MEM(CM_DREP_PRIVATE_DATA, drep_msg, private_data,
+ private_data_len);
}
int ib_send_cm_drep(struct ib_cm_id *cm_id,
@@ -2566,8 +2677,10 @@ static int cm_issue_drep(struct cm_port *port,
drep_msg = (struct cm_drep_msg *) msg->mad;
cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
- drep_msg->remote_comm_id = dreq_msg->local_comm_id;
- drep_msg->local_comm_id = dreq_msg->remote_comm_id;
+ IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg,
+ IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg));
+ IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
+ IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
ret = ib_post_send_mad(msg, NULL);
if (ret)
@@ -2584,22 +2697,26 @@ static int cm_dreq_handler(struct cm_work *work)
int ret;
dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
- cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
- dreq_msg->local_comm_id);
+ cm_id_priv = cm_acquire_id(
+ cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg)),
+ cpu_to_be32(IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg)));
if (!cm_id_priv) {
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
counter[CM_DREQ_COUNTER]);
cm_issue_drep(work->port, work->mad_recv_wc);
- pr_debug("%s: no cm_id_priv, local_comm_id %d, remote_comm_id %d\n",
- __func__, be32_to_cpu(dreq_msg->local_comm_id),
- be32_to_cpu(dreq_msg->remote_comm_id));
+ pr_debug(
+ "%s: no cm_id_priv, local_comm_id %d, remote_comm_id %d\n",
+ __func__, IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
+ IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
return -EINVAL;
}
- work->cm_event.private_data = &dreq_msg->private_data;
+ work->cm_event.private_data =
+ IBA_GET_MEM_PTR(CM_DREQ_PRIVATE_DATA, dreq_msg);
spin_lock_irq(&cm_id_priv->lock);
- if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
+ if (cm_id_priv->local_qpn !=
+ cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg)))
goto unlock;
switch (cm_id_priv->id.state) {
@@ -2665,12 +2782,14 @@ static int cm_drep_handler(struct cm_work *work)
int ret;
drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
- cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
- drep_msg->local_comm_id);
+ cm_id_priv = cm_acquire_id(
+ cpu_to_be32(IBA_GET(CM_DREP_REMOTE_COMM_ID, drep_msg)),
+ cpu_to_be32(IBA_GET(CM_DREP_LOCAL_COMM_ID, drep_msg)));
if (!cm_id_priv)
return -EINVAL;
- work->cm_event.private_data = &drep_msg->private_data;
+ work->cm_event.private_data =
+ IBA_GET_MEM_PTR(CM_DREP_PRIVATE_DATA, drep_msg);
spin_lock_irq(&cm_id_priv->lock);
if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
@@ -2766,10 +2885,11 @@ static void cm_format_rej_event(struct cm_work *work)
rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
param = &work->cm_event.param.rej_rcvd;
- param->ari = rej_msg->ari;
- param->ari_length = cm_rej_get_reject_info_len(rej_msg);
- param->reason = __be16_to_cpu(rej_msg->reason);
- work->cm_event.private_data = &rej_msg->private_data;
+ param->ari = IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg);
+ param->ari_length = IBA_GET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg);
+ param->reason = IBA_GET(CM_REJ_REASON, rej_msg);
+ work->cm_event.private_data =
+ IBA_GET_MEM_PTR(CM_REJ_PRIVATE_DATA, rej_msg);
}
static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
@@ -2778,29 +2898,29 @@ static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
struct cm_id_private *cm_id_priv;
__be32 remote_id;
- remote_id = rej_msg->local_comm_id;
+ remote_id = cpu_to_be32(IBA_GET(CM_REJ_LOCAL_COMM_ID, rej_msg));
- if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
+ if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_TIMEOUT) {
spin_lock_irq(&cm.lock);
- timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
- remote_id);
+ timewait_info = cm_find_remote_id(
+ *((__be64 *)IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg)),
+ remote_id);
if (!timewait_info) {
spin_unlock_irq(&cm.lock);
return NULL;
}
- cm_id_priv = xa_load(&cm.local_id_table,
- cm_local_id(timewait_info->work.local_id));
- if (cm_id_priv) {
- if (cm_id_priv->id.remote_id == remote_id)
- refcount_inc(&cm_id_priv->refcount);
- else
- cm_id_priv = NULL;
- }
+ cm_id_priv =
+ cm_acquire_id(timewait_info->work.local_id, remote_id);
spin_unlock_irq(&cm.lock);
- } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
- cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
+ } else if (IBA_GET(CM_REJ_MESSAGE_REJECTED, rej_msg) ==
+ CM_MSG_RESPONSE_REQ)
+ cm_id_priv = cm_acquire_id(
+ cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)),
+ 0);
else
- cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
+ cm_id_priv = cm_acquire_id(
+ cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)),
+ remote_id);
return cm_id_priv;
}
@@ -2828,7 +2948,7 @@ static int cm_rej_handler(struct cm_work *work)
/* fall through */
case IB_CM_REQ_RCVD:
case IB_CM_MRA_REQ_SENT:
- if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
+ if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_STALE_CONN)
cm_enter_timewait(cm_id_priv);
else
cm_reset_to_idle(cm_id_priv);
@@ -2958,13 +3078,16 @@ EXPORT_SYMBOL(ib_send_cm_mra);
static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
{
- switch (cm_mra_get_msg_mraed(mra_msg)) {
+ switch (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg)) {
case CM_MSG_RESPONSE_REQ:
- return cm_acquire_id(mra_msg->remote_comm_id, 0);
+ return cm_acquire_id(
+ cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)),
+ 0);
case CM_MSG_RESPONSE_REP:
case CM_MSG_RESPONSE_OTHER:
- return cm_acquire_id(mra_msg->remote_comm_id,
- mra_msg->local_comm_id);
+ return cm_acquire_id(
+ cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)),
+ cpu_to_be32(IBA_GET(CM_MRA_LOCAL_COMM_ID, mra_msg)));
default:
return NULL;
}
@@ -2981,30 +3104,34 @@ static int cm_mra_handler(struct cm_work *work)
if (!cm_id_priv)
return -EINVAL;
- work->cm_event.private_data = &mra_msg->private_data;
+ work->cm_event.private_data =
+ IBA_GET_MEM_PTR(CM_MRA_PRIVATE_DATA, mra_msg);
work->cm_event.param.mra_rcvd.service_timeout =
- cm_mra_get_service_timeout(mra_msg);
- timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
+ IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg);
+ timeout = cm_convert_to_ms(IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg)) +
cm_convert_to_ms(cm_id_priv->av.timeout);
spin_lock_irq(&cm_id_priv->lock);
switch (cm_id_priv->id.state) {
case IB_CM_REQ_SENT:
- if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
+ if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
+ CM_MSG_RESPONSE_REQ ||
ib_modify_mad(cm_id_priv->av.port->mad_agent,
cm_id_priv->msg, timeout))
goto out;
cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
break;
case IB_CM_REP_SENT:
- if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
+ if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
+ CM_MSG_RESPONSE_REP ||
ib_modify_mad(cm_id_priv->av.port->mad_agent,
cm_id_priv->msg, timeout))
goto out;
cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
break;
case IB_CM_ESTABLISHED:
- if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
+ if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
+ CM_MSG_RESPONSE_OTHER ||
cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
ib_modify_mad(cm_id_priv->av.port->mad_agent,
cm_id_priv->msg, timeout)) {
@@ -3046,117 +3173,23 @@ out:
return -EINVAL;
}
-static void cm_format_lap(struct cm_lap_msg *lap_msg,
- struct cm_id_private *cm_id_priv,
- struct sa_path_rec *alternate_path,
- const void *private_data,
- u8 private_data_len)
-{
- bool alt_ext = false;
-
- if (alternate_path->rec_type == SA_PATH_REC_TYPE_OPA)
- alt_ext = opa_is_extended_lid(alternate_path->opa.dlid,
- alternate_path->opa.slid);
- cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
- cm_form_tid(cm_id_priv));
- lap_msg->local_comm_id = cm_id_priv->id.local_id;
- lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
- cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
- /* todo: need remote CM response timeout */
- cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
- lap_msg->alt_local_lid =
- htons(ntohl(sa_path_get_slid(alternate_path)));
- lap_msg->alt_remote_lid =
- htons(ntohl(sa_path_get_dlid(alternate_path)));
- lap_msg->alt_local_gid = alternate_path->sgid;
- lap_msg->alt_remote_gid = alternate_path->dgid;
- if (alt_ext) {
- lap_msg->alt_local_gid.global.interface_id
- = OPA_MAKE_ID(be32_to_cpu(alternate_path->opa.slid));
- lap_msg->alt_remote_gid.global.interface_id
- = OPA_MAKE_ID(be32_to_cpu(alternate_path->opa.dlid));
- }
- cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
- cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
- lap_msg->alt_hop_limit = alternate_path->hop_limit;
- cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
- cm_lap_set_sl(lap_msg, alternate_path->sl);
- cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
- cm_lap_set_local_ack_timeout(lap_msg,
- cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
- alternate_path->packet_life_time));
-
- if (private_data && private_data_len)
- memcpy(lap_msg->private_data, private_data, private_data_len);
-}
-
-int ib_send_cm_lap(struct ib_cm_id *cm_id,
- struct sa_path_rec *alternate_path,
- const void *private_data,
- u8 private_data_len)
-{
- struct cm_id_private *cm_id_priv;
- struct ib_mad_send_buf *msg;
- unsigned long flags;
- int ret;
-
- if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
- return -EINVAL;
-
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
- spin_lock_irqsave(&cm_id_priv->lock, flags);
- if (cm_id->state != IB_CM_ESTABLISHED ||
- (cm_id->lap_state != IB_CM_LAP_UNINIT &&
- cm_id->lap_state != IB_CM_LAP_IDLE)) {
- ret = -EINVAL;
- goto out;
- }
-
- ret = cm_init_av_by_path(alternate_path, NULL, &cm_id_priv->alt_av,
- cm_id_priv);
- if (ret)
- goto out;
- cm_id_priv->alt_av.timeout =
- cm_ack_timeout(cm_id_priv->target_ack_delay,
- cm_id_priv->alt_av.timeout - 1);
-
- ret = cm_alloc_msg(cm_id_priv, &msg);
- if (ret)
- goto out;
-
- cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
- alternate_path, private_data, private_data_len);
- msg->timeout_ms = cm_id_priv->timeout_ms;
- msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
-
- ret = ib_post_send_mad(msg, NULL);
- if (ret) {
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- cm_free_msg(msg);
- return ret;
- }
-
- cm_id->lap_state = IB_CM_LAP_SENT;
- cm_id_priv->msg = msg;
-
-out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- return ret;
-}
-EXPORT_SYMBOL(ib_send_cm_lap);
-
static void cm_format_path_lid_from_lap(struct cm_lap_msg *lap_msg,
struct sa_path_rec *path)
{
u32 lid;
if (path->rec_type != SA_PATH_REC_TYPE_OPA) {
- sa_path_set_dlid(path, ntohs(lap_msg->alt_local_lid));
- sa_path_set_slid(path, ntohs(lap_msg->alt_remote_lid));
+ sa_path_set_dlid(path, IBA_GET(CM_LAP_ALTERNATE_LOCAL_PORT_LID,
+ lap_msg));
+ sa_path_set_slid(path, IBA_GET(CM_LAP_ALTERNATE_REMOTE_PORT_LID,
+ lap_msg));
} else {
- lid = opa_get_lid_from_gid(&lap_msg->alt_local_gid);
+ lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
+ CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg));
sa_path_set_dlid(path, lid);
- lid = opa_get_lid_from_gid(&lap_msg->alt_remote_gid);
+ lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
+ CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg));
sa_path_set_slid(path, lid);
}
}
@@ -3165,20 +3198,23 @@ static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
struct sa_path_rec *path,
struct cm_lap_msg *lap_msg)
{
- path->dgid = lap_msg->alt_local_gid;
- path->sgid = lap_msg->alt_remote_gid;
- path->flow_label = cm_lap_get_flow_label(lap_msg);
- path->hop_limit = lap_msg->alt_hop_limit;
- path->traffic_class = cm_lap_get_traffic_class(lap_msg);
+ path->dgid = *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg);
+ path->sgid =
+ *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg);
+ path->flow_label =
+ cpu_to_be32(IBA_GET(CM_LAP_ALTERNATE_FLOW_LABEL, lap_msg));
+ path->hop_limit = IBA_GET(CM_LAP_ALTERNATE_HOP_LIMIT, lap_msg);
+ path->traffic_class = IBA_GET(CM_LAP_ALTERNATE_TRAFFIC_CLASS, lap_msg);
path->reversible = 1;
path->pkey = cm_id_priv->pkey;
- path->sl = cm_lap_get_sl(lap_msg);
+ path->sl = IBA_GET(CM_LAP_ALTERNATE_SL, lap_msg);
path->mtu_selector = IB_SA_EQ;
path->mtu = cm_id_priv->path_mtu;
path->rate_selector = IB_SA_EQ;
- path->rate = cm_lap_get_packet_rate(lap_msg);
+ path->rate = IBA_GET(CM_LAP_ALTERNATE_PACKET_RATE, lap_msg);
path->packet_life_time_selector = IB_SA_EQ;
- path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
+ path->packet_life_time =
+ IBA_GET(CM_LAP_ALTERNATE_LOCAL_ACK_TIMEOUT, lap_msg);
path->packet_life_time -= (path->packet_life_time > 0);
cm_format_path_lid_from_lap(lap_msg, path);
}
@@ -3200,20 +3236,22 @@ static int cm_lap_handler(struct cm_work *work)
/* todo: verify LAP request and send reject APR if invalid. */
lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
- cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
- lap_msg->local_comm_id);
+ cm_id_priv = cm_acquire_id(
+ cpu_to_be32(IBA_GET(CM_LAP_REMOTE_COMM_ID, lap_msg)),
+ cpu_to_be32(IBA_GET(CM_LAP_LOCAL_COMM_ID, lap_msg)));
if (!cm_id_priv)
return -EINVAL;
param = &work->cm_event.param.lap_rcvd;
memset(&work->path[0], 0, sizeof(work->path[1]));
cm_path_set_rec_type(work->port->cm_dev->ib_device,
- work->port->port_num,
- &work->path[0],
- &lap_msg->alt_local_gid);
+ work->port->port_num, &work->path[0],
+ IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID,
+ lap_msg));
param->alternate_path = &work->path[0];
cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
- work->cm_event.private_data = &lap_msg->private_data;
+ work->cm_event.private_data =
+ IBA_GET_MEM_PTR(CM_LAP_PRIVATE_DATA, lap_msg);
spin_lock_irq(&cm_id_priv->lock);
if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
@@ -3278,72 +3316,6 @@ deref: cm_deref_id(cm_id_priv);
return -EINVAL;
}
-static void cm_format_apr(struct cm_apr_msg *apr_msg,
- struct cm_id_private *cm_id_priv,
- enum ib_cm_apr_status status,
- void *info,
- u8 info_length,
- const void *private_data,
- u8 private_data_len)
-{
- cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
- apr_msg->local_comm_id = cm_id_priv->id.local_id;
- apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
- apr_msg->ap_status = (u8) status;
-
- if (info && info_length) {
- apr_msg->info_length = info_length;
- memcpy(apr_msg->info, info, info_length);
- }
-
- if (private_data && private_data_len)
- memcpy(apr_msg->private_data, private_data, private_data_len);
-}
-
-int ib_send_cm_apr(struct ib_cm_id *cm_id,
- enum ib_cm_apr_status status,
- void *info,
- u8 info_length,
- const void *private_data,
- u8 private_data_len)
-{
- struct cm_id_private *cm_id_priv;
- struct ib_mad_send_buf *msg;
- unsigned long flags;
- int ret;
-
- if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
- (info && info_length > IB_CM_APR_INFO_LENGTH))
- return -EINVAL;
-
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
- spin_lock_irqsave(&cm_id_priv->lock, flags);
- if (cm_id->state != IB_CM_ESTABLISHED ||
- (cm_id->lap_state != IB_CM_LAP_RCVD &&
- cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
- ret = -EINVAL;
- goto out;
- }
-
- ret = cm_alloc_msg(cm_id_priv, &msg);
- if (ret)
- goto out;
-
- cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
- info, info_length, private_data, private_data_len);
- ret = ib_post_send_mad(msg, NULL);
- if (ret) {
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- cm_free_msg(msg);
- return ret;
- }
-
- cm_id->lap_state = IB_CM_LAP_IDLE;
-out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- return ret;
-}
-EXPORT_SYMBOL(ib_send_cm_apr);
-
static int cm_apr_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
@@ -3358,15 +3330,20 @@ static int cm_apr_handler(struct cm_work *work)
return -EINVAL;
apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
- cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
- apr_msg->local_comm_id);
+ cm_id_priv = cm_acquire_id(
+ cpu_to_be32(IBA_GET(CM_APR_REMOTE_COMM_ID, apr_msg)),
+ cpu_to_be32(IBA_GET(CM_APR_LOCAL_COMM_ID, apr_msg)));
if (!cm_id_priv)
return -EINVAL; /* Unmatched reply. */
- work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
- work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
- work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
- work->cm_event.private_data = &apr_msg->private_data;
+ work->cm_event.param.apr_rcvd.ap_status =
+ IBA_GET(CM_APR_AR_STATUS, apr_msg);
+ work->cm_event.param.apr_rcvd.apr_info =
+ IBA_GET_MEM_PTR(CM_APR_ADDITIONAL_INFORMATION, apr_msg);
+ work->cm_event.param.apr_rcvd.info_len =
+ IBA_GET(CM_APR_ADDITIONAL_INFORMATION_LENGTH, apr_msg);
+ work->cm_event.private_data =
+ IBA_GET_MEM_PTR(CM_APR_PRIVATE_DATA, apr_msg);
spin_lock_irq(&cm_id_priv->lock);
if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
@@ -3438,13 +3415,16 @@ static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
{
cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
cm_form_tid(cm_id_priv));
- sidr_req_msg->request_id = cm_id_priv->id.local_id;
- sidr_req_msg->pkey = param->path->pkey;
- sidr_req_msg->service_id = param->service_id;
+ IBA_SET(CM_SIDR_REQ_REQUESTID, sidr_req_msg,
+ be32_to_cpu(cm_id_priv->id.local_id));
+ IBA_SET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg,
+ be16_to_cpu(param->path->pkey));
+ IBA_SET(CM_SIDR_REQ_SERVICEID, sidr_req_msg,
+ be64_to_cpu(param->service_id));
if (param->private_data && param->private_data_len)
- memcpy(sidr_req_msg->private_data, param->private_data,
- param->private_data_len);
+ IBA_SET_MEM(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg,
+ param->private_data, param->private_data_len);
}
int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
@@ -3508,13 +3488,15 @@ static void cm_format_sidr_req_event(struct cm_work *work,
sidr_req_msg = (struct cm_sidr_req_msg *)
work->mad_recv_wc->recv_buf.mad;
param = &work->cm_event.param.sidr_req_rcvd;
- param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
+ param->pkey = IBA_GET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg);
param->listen_id = listen_id;
- param->service_id = sidr_req_msg->service_id;
+ param->service_id =
+ cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
param->bth_pkey = cm_get_bth_pkey(work);
param->port = work->port->port_num;
param->sgid_attr = rx_cm_id->av.ah_attr.grh.sgid_attr;
- work->cm_event.private_data = &sidr_req_msg->private_data;
+ work->cm_event.private_data =
+ IBA_GET_MEM_PTR(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg);
}
static int cm_sidr_req_handler(struct cm_work *work)
@@ -3542,7 +3524,8 @@ static int cm_sidr_req_handler(struct cm_work *work)
if (ret)
goto out;
- cm_id_priv->id.remote_id = sidr_req_msg->request_id;
+ cm_id_priv->id.remote_id =
+ cpu_to_be32(IBA_GET(CM_SIDR_REQ_REQUESTID, sidr_req_msg));
cm_id_priv->tid = sidr_req_msg->hdr.tid;
atomic_inc(&cm_id_priv->work_count);
@@ -3555,8 +3538,9 @@ static int cm_sidr_req_handler(struct cm_work *work)
goto out; /* Duplicate message. */
}
cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
- cur_cm_id_priv = cm_find_listen(cm_id->device,
- sidr_req_msg->service_id);
+ cur_cm_id_priv = cm_find_listen(
+ cm_id->device,
+ cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg)));
if (!cur_cm_id_priv) {
spin_unlock_irq(&cm.lock);
cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED);
@@ -3568,7 +3552,8 @@ static int cm_sidr_req_handler(struct cm_work *work)
cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
cm_id_priv->id.context = cur_cm_id_priv->id.context;
- cm_id_priv->id.service_id = sidr_req_msg->service_id;
+ cm_id_priv->id.service_id =
+ cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
cm_id_priv->id.service_mask = ~cpu_to_be64(0);
cm_format_sidr_req_event(work, cm_id_priv, &cur_cm_id_priv->id);
@@ -3586,18 +3571,21 @@ static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
{
cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
cm_id_priv->tid);
- sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
- sidr_rep_msg->status = param->status;
- cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
- sidr_rep_msg->service_id = cm_id_priv->id.service_id;
- sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
+ IBA_SET(CM_SIDR_REP_REQUESTID, sidr_rep_msg,
+ be32_to_cpu(cm_id_priv->id.remote_id));
+ IBA_SET(CM_SIDR_REP_STATUS, sidr_rep_msg, param->status);
+ IBA_SET(CM_SIDR_REP_QPN, sidr_rep_msg, param->qp_num);
+ IBA_SET(CM_SIDR_REP_SERVICEID, sidr_rep_msg,
+ be64_to_cpu(cm_id_priv->id.service_id));
+ IBA_SET(CM_SIDR_REP_Q_KEY, sidr_rep_msg, param->qkey);
if (param->info && param->info_length)
- memcpy(sidr_rep_msg->info, param->info, param->info_length);
+ IBA_SET_MEM(CM_SIDR_REP_ADDITIONAL_INFORMATION, sidr_rep_msg,
+ param->info, param->info_length);
if (param->private_data && param->private_data_len)
- memcpy(sidr_rep_msg->private_data, param->private_data,
- param->private_data_len);
+ IBA_SET_MEM(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg,
+ param->private_data, param->private_data_len);
}
int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
@@ -3657,13 +3645,16 @@ static void cm_format_sidr_rep_event(struct cm_work *work,
sidr_rep_msg = (struct cm_sidr_rep_msg *)
work->mad_recv_wc->recv_buf.mad;
param = &work->cm_event.param.sidr_rep_rcvd;
- param->status = sidr_rep_msg->status;
- param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
- param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
- param->info = &sidr_rep_msg->info;
- param->info_len = sidr_rep_msg->info_length;
+ param->status = IBA_GET(CM_SIDR_REP_STATUS, sidr_rep_msg);
+ param->qkey = IBA_GET(CM_SIDR_REP_Q_KEY, sidr_rep_msg);
+ param->qpn = IBA_GET(CM_SIDR_REP_QPN, sidr_rep_msg);
+ param->info = IBA_GET_MEM_PTR(CM_SIDR_REP_ADDITIONAL_INFORMATION,
+ sidr_rep_msg);
+ param->info_len = IBA_GET(CM_SIDR_REP_ADDITIONAL_INFORMATION_LENGTH,
+ sidr_rep_msg);
param->sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
- work->cm_event.private_data = &sidr_rep_msg->private_data;
+ work->cm_event.private_data =
+ IBA_GET_MEM_PTR(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg);
}
static int cm_sidr_rep_handler(struct cm_work *work)
@@ -3673,7 +3664,8 @@ static int cm_sidr_rep_handler(struct cm_work *work)
sidr_rep_msg = (struct cm_sidr_rep_msg *)
work->mad_recv_wc->recv_buf.mad;
- cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
+ cm_id_priv = cm_acquire_id(
+ cpu_to_be32(IBA_GET(CM_SIDR_REP_REQUESTID, sidr_rep_msg)), 0);
if (!cm_id_priv)
return -EINVAL; /* Unmatched reply. */
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h
index 92d7260ac913..0cc40656b5c5 100644
--- a/drivers/infiniband/core/cm_msgs.h
+++ b/drivers/infiniband/core/cm_msgs.h
@@ -8,6 +8,7 @@
#ifndef CM_MSGS_H
#define CM_MSGS_H
+#include <rdma/ibta_vol1_c12.h>
#include <rdma/ib_mad.h>
#include <rdma/ib_cm.h>
@@ -18,120 +19,14 @@
#define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */
-struct cm_req_msg {
- struct ib_mad_hdr hdr;
-
- __be32 local_comm_id;
- __be32 rsvd4;
- __be64 service_id;
- __be64 local_ca_guid;
- __be32 rsvd24;
- __be32 local_qkey;
- /* local QPN:24, responder resources:8 */
- __be32 offset32;
- /* local EECN:24, initiator depth:8 */
- __be32 offset36;
- /*
- * remote EECN:24, remote CM response timeout:5,
- * transport service type:2, end-to-end flow control:1
- */
- __be32 offset40;
- /* starting PSN:24, local CM response timeout:5, retry count:3 */
- __be32 offset44;
- __be16 pkey;
- /* path MTU:4, RDC exists:1, RNR retry count:3. */
- u8 offset50;
- /* max CM Retries:4, SRQ:1, extended transport type:3 */
- u8 offset51;
-
- __be16 primary_local_lid;
- __be16 primary_remote_lid;
- union ib_gid primary_local_gid;
- union ib_gid primary_remote_gid;
- /* flow label:20, rsvd:6, packet rate:6 */
- __be32 primary_offset88;
- u8 primary_traffic_class;
- u8 primary_hop_limit;
- /* SL:4, subnet local:1, rsvd:3 */
- u8 primary_offset94;
- /* local ACK timeout:5, rsvd:3 */
- u8 primary_offset95;
-
- __be16 alt_local_lid;
- __be16 alt_remote_lid;
- union ib_gid alt_local_gid;
- union ib_gid alt_remote_gid;
- /* flow label:20, rsvd:6, packet rate:6 */
- __be32 alt_offset132;
- u8 alt_traffic_class;
- u8 alt_hop_limit;
- /* SL:4, subnet local:1, rsvd:3 */
- u8 alt_offset138;
- /* local ACK timeout:5, rsvd:3 */
- u8 alt_offset139;
-
- u32 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE / sizeof(u32)];
-
-} __packed;
-
-static inline __be32 cm_req_get_local_qpn(struct cm_req_msg *req_msg)
-{
- return cpu_to_be32(be32_to_cpu(req_msg->offset32) >> 8);
-}
-
-static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, __be32 qpn)
-{
- req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
- (be32_to_cpu(req_msg->offset32) &
- 0x000000FF));
-}
-
-static inline u8 cm_req_get_resp_res(struct cm_req_msg *req_msg)
-{
- return (u8) be32_to_cpu(req_msg->offset32);
-}
-
-static inline void cm_req_set_resp_res(struct cm_req_msg *req_msg, u8 resp_res)
-{
- req_msg->offset32 = cpu_to_be32(resp_res |
- (be32_to_cpu(req_msg->offset32) &
- 0xFFFFFF00));
-}
-
-static inline u8 cm_req_get_init_depth(struct cm_req_msg *req_msg)
-{
- return (u8) be32_to_cpu(req_msg->offset36);
-}
-
-static inline void cm_req_set_init_depth(struct cm_req_msg *req_msg,
- u8 init_depth)
-{
- req_msg->offset36 = cpu_to_be32(init_depth |
- (be32_to_cpu(req_msg->offset36) &
- 0xFFFFFF00));
-}
-
-static inline u8 cm_req_get_remote_resp_timeout(struct cm_req_msg *req_msg)
-{
- return (u8) ((be32_to_cpu(req_msg->offset40) & 0xF8) >> 3);
-}
-
-static inline void cm_req_set_remote_resp_timeout(struct cm_req_msg *req_msg,
- u8 resp_timeout)
-{
- req_msg->offset40 = cpu_to_be32((resp_timeout << 3) |
- (be32_to_cpu(req_msg->offset40) &
- 0xFFFFFF07));
-}
-
static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg)
{
- u8 transport_type = (u8) (be32_to_cpu(req_msg->offset40) & 0x06) >> 1;
+ u8 transport_type = IBA_GET(CM_REQ_TRANSPORT_SERVICE_TYPE, req_msg);
switch(transport_type) {
case 0: return IB_QPT_RC;
case 1: return IB_QPT_UC;
case 3:
- switch (req_msg->offset51 & 0x7) {
+ switch (IBA_GET(CM_REQ_EXTENDED_TRANSPORT_TYPE, req_msg)) {
case 1: return IB_QPT_XRC_TGT;
default: return 0;
}
@@ -144,240 +39,17 @@ static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg,
{
switch(qp_type) {
case IB_QPT_UC:
- req_msg->offset40 = cpu_to_be32((be32_to_cpu(
- req_msg->offset40) &
- 0xFFFFFFF9) | 0x2);
+ IBA_SET(CM_REQ_TRANSPORT_SERVICE_TYPE, req_msg, 1);
break;
case IB_QPT_XRC_INI:
- req_msg->offset40 = cpu_to_be32((be32_to_cpu(
- req_msg->offset40) &
- 0xFFFFFFF9) | 0x6);
- req_msg->offset51 = (req_msg->offset51 & 0xF8) | 1;
+ IBA_SET(CM_REQ_TRANSPORT_SERVICE_TYPE, req_msg, 3);
+ IBA_SET(CM_REQ_EXTENDED_TRANSPORT_TYPE, req_msg, 1);
break;
default:
- req_msg->offset40 = cpu_to_be32(be32_to_cpu(
- req_msg->offset40) &
- 0xFFFFFFF9);
+ IBA_SET(CM_REQ_TRANSPORT_SERVICE_TYPE, req_msg, 0);
}
}
-static inline u8 cm_req_get_flow_ctrl(struct cm_req_msg *req_msg)
-{
- return be32_to_cpu(req_msg->offset40) & 0x1;
-}
-
-static inline void cm_req_set_flow_ctrl(struct cm_req_msg *req_msg,
- u8 flow_ctrl)
-{
- req_msg->offset40 = cpu_to_be32((flow_ctrl & 0x1) |
- (be32_to_cpu(req_msg->offset40) &
- 0xFFFFFFFE));
-}
-
-static inline __be32 cm_req_get_starting_psn(struct cm_req_msg *req_msg)
-{
- return cpu_to_be32(be32_to_cpu(req_msg->offset44) >> 8);
-}
-
-static inline void cm_req_set_starting_psn(struct cm_req_msg *req_msg,
- __be32 starting_psn)
-{
- req_msg->offset44 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
- (be32_to_cpu(req_msg->offset44) & 0x000000FF));
-}
-
-static inline u8 cm_req_get_local_resp_timeout(struct cm_req_msg *req_msg)
-{
- return (u8) ((be32_to_cpu(req_msg->offset44) & 0xF8) >> 3);
-}
-
-static inline void cm_req_set_local_resp_timeout(struct cm_req_msg *req_msg,
- u8 resp_timeout)
-{
- req_msg->offset44 = cpu_to_be32((resp_timeout << 3) |
- (be32_to_cpu(req_msg->offset44) & 0xFFFFFF07));
-}
-
-static inline u8 cm_req_get_retry_count(struct cm_req_msg *req_msg)
-{
- return (u8) (be32_to_cpu(req_msg->offset44) & 0x7);
-}
-
-static inline void cm_req_set_retry_count(struct cm_req_msg *req_msg,
- u8 retry_count)
-{
- req_msg->offset44 = cpu_to_be32((retry_count & 0x7) |
- (be32_to_cpu(req_msg->offset44) & 0xFFFFFFF8));
-}
-
-static inline u8 cm_req_get_path_mtu(struct cm_req_msg *req_msg)
-{
- return req_msg->offset50 >> 4;
-}
-
-static inline void cm_req_set_path_mtu(struct cm_req_msg *req_msg, u8 path_mtu)
-{
- req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF) | (path_mtu << 4));
-}
-
-static inline u8 cm_req_get_rnr_retry_count(struct cm_req_msg *req_msg)
-{
- return req_msg->offset50 & 0x7;
-}
-
-static inline void cm_req_set_rnr_retry_count(struct cm_req_msg *req_msg,
- u8 rnr_retry_count)
-{
- req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF8) |
- (rnr_retry_count & 0x7));
-}
-
-static inline u8 cm_req_get_max_cm_retries(struct cm_req_msg *req_msg)
-{
- return req_msg->offset51 >> 4;
-}
-
-static inline void cm_req_set_max_cm_retries(struct cm_req_msg *req_msg,
- u8 retries)
-{
- req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF) | (retries << 4));
-}
-
-static inline u8 cm_req_get_srq(struct cm_req_msg *req_msg)
-{
- return (req_msg->offset51 & 0x8) >> 3;
-}
-
-static inline void cm_req_set_srq(struct cm_req_msg *req_msg, u8 srq)
-{
- req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF7) |
- ((srq & 0x1) << 3));
-}
-
-static inline __be32 cm_req_get_primary_flow_label(struct cm_req_msg *req_msg)
-{
- return cpu_to_be32(be32_to_cpu(req_msg->primary_offset88) >> 12);
-}
-
-static inline void cm_req_set_primary_flow_label(struct cm_req_msg *req_msg,
- __be32 flow_label)
-{
- req_msg->primary_offset88 = cpu_to_be32(
- (be32_to_cpu(req_msg->primary_offset88) &
- 0x00000FFF) |
- (be32_to_cpu(flow_label) << 12));
-}
-
-static inline u8 cm_req_get_primary_packet_rate(struct cm_req_msg *req_msg)
-{
- return (u8) (be32_to_cpu(req_msg->primary_offset88) & 0x3F);
-}
-
-static inline void cm_req_set_primary_packet_rate(struct cm_req_msg *req_msg,
- u8 rate)
-{
- req_msg->primary_offset88 = cpu_to_be32(
- (be32_to_cpu(req_msg->primary_offset88) &
- 0xFFFFFFC0) | (rate & 0x3F));
-}
-
-static inline u8 cm_req_get_primary_sl(struct cm_req_msg *req_msg)
-{
- return (u8) (req_msg->primary_offset94 >> 4);
-}
-
-static inline void cm_req_set_primary_sl(struct cm_req_msg *req_msg, u8 sl)
-{
- req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0x0F) |
- (sl << 4));
-}
-
-static inline u8 cm_req_get_primary_subnet_local(struct cm_req_msg *req_msg)
-{
- return (u8) ((req_msg->primary_offset94 & 0x08) >> 3);
-}
-
-static inline void cm_req_set_primary_subnet_local(struct cm_req_msg *req_msg,
- u8 subnet_local)
-{
- req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0xF7) |
- ((subnet_local & 0x1) << 3));
-}
-
-static inline u8 cm_req_get_primary_local_ack_timeout(struct cm_req_msg *req_msg)
-{
- return (u8) (req_msg->primary_offset95 >> 3);
-}
-
-static inline void cm_req_set_primary_local_ack_timeout(struct cm_req_msg *req_msg,
- u8 local_ack_timeout)
-{
- req_msg->primary_offset95 = (u8) ((req_msg->primary_offset95 & 0x07) |
- (local_ack_timeout << 3));
-}
-
-static inline __be32 cm_req_get_alt_flow_label(struct cm_req_msg *req_msg)
-{
- return cpu_to_be32(be32_to_cpu(req_msg->alt_offset132) >> 12);
-}
-
-static inline void cm_req_set_alt_flow_label(struct cm_req_msg *req_msg,
- __be32 flow_label)
-{
- req_msg->alt_offset132 = cpu_to_be32(
- (be32_to_cpu(req_msg->alt_offset132) &
- 0x00000FFF) |
- (be32_to_cpu(flow_label) << 12));
-}
-
-static inline u8 cm_req_get_alt_packet_rate(struct cm_req_msg *req_msg)
-{
- return (u8) (be32_to_cpu(req_msg->alt_offset132) & 0x3F);
-}
-
-static inline void cm_req_set_alt_packet_rate(struct cm_req_msg *req_msg,
- u8 rate)
-{
- req_msg->alt_offset132 = cpu_to_be32(
- (be32_to_cpu(req_msg->alt_offset132) &
- 0xFFFFFFC0) | (rate & 0x3F));
-}
-
-static inline u8 cm_req_get_alt_sl(struct cm_req_msg *req_msg)
-{
- return (u8) (req_msg->alt_offset138 >> 4);
-}
-
-static inline void cm_req_set_alt_sl(struct cm_req_msg *req_msg, u8 sl)
-{
- req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0x0F) |
- (sl << 4));
-}
-
-static inline u8 cm_req_get_alt_subnet_local(struct cm_req_msg *req_msg)
-{
- return (u8) ((req_msg->alt_offset138 & 0x08) >> 3);
-}
-
-static inline void cm_req_set_alt_subnet_local(struct cm_req_msg *req_msg,
- u8 subnet_local)
-{
- req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0xF7) |
- ((subnet_local & 0x1) << 3));
-}
-
-static inline u8 cm_req_get_alt_local_ack_timeout(struct cm_req_msg *req_msg)
-{
- return (u8) (req_msg->alt_offset139 >> 3);
-}
-
-static inline void cm_req_set_alt_local_ack_timeout(struct cm_req_msg *req_msg,
- u8 local_ack_timeout)
-{
- req_msg->alt_offset139 = (u8) ((req_msg->alt_offset139 & 0x07) |
- (local_ack_timeout << 3));
-}
-
/* Message REJected or MRAed */
enum cm_msg_response {
CM_MSG_RESPONSE_REQ = 0x0,
@@ -385,419 +57,12 @@ enum cm_msg_response {
CM_MSG_RESPONSE_OTHER = 0x2
};
- struct cm_mra_msg {
- struct ib_mad_hdr hdr;
-
- __be32 local_comm_id;
- __be32 remote_comm_id;
- /* message MRAed:2, rsvd:6 */
- u8 offset8;
- /* service timeout:5, rsvd:3 */
- u8 offset9;
-
- u8 private_data[IB_CM_MRA_PRIVATE_DATA_SIZE];
-
-} __packed;
-
-static inline u8 cm_mra_get_msg_mraed(struct cm_mra_msg *mra_msg)
-{
- return (u8) (mra_msg->offset8 >> 6);
-}
-
-static inline void cm_mra_set_msg_mraed(struct cm_mra_msg *mra_msg, u8 msg)
-{
- mra_msg->offset8 = (u8) ((mra_msg->offset8 & 0x3F) | (msg << 6));
-}
-
-static inline u8 cm_mra_get_service_timeout(struct cm_mra_msg *mra_msg)
-{
- return (u8) (mra_msg->offset9 >> 3);
-}
-
-static inline void cm_mra_set_service_timeout(struct cm_mra_msg *mra_msg,
- u8 service_timeout)
-{
- mra_msg->offset9 = (u8) ((mra_msg->offset9 & 0x07) |
- (service_timeout << 3));
-}
-
-struct cm_rej_msg {
- struct ib_mad_hdr hdr;
-
- __be32 local_comm_id;
- __be32 remote_comm_id;
- /* message REJected:2, rsvd:6 */
- u8 offset8;
- /* reject info length:7, rsvd:1. */
- u8 offset9;
- __be16 reason;
- u8 ari[IB_CM_REJ_ARI_LENGTH];
-
- u8 private_data[IB_CM_REJ_PRIVATE_DATA_SIZE];
-
-} __packed;
-
-static inline u8 cm_rej_get_msg_rejected(struct cm_rej_msg *rej_msg)
-{
- return (u8) (rej_msg->offset8 >> 6);
-}
-
-static inline void cm_rej_set_msg_rejected(struct cm_rej_msg *rej_msg, u8 msg)
-{
- rej_msg->offset8 = (u8) ((rej_msg->offset8 & 0x3F) | (msg << 6));
-}
-
-static inline u8 cm_rej_get_reject_info_len(struct cm_rej_msg *rej_msg)
-{
- return (u8) (rej_msg->offset9 >> 1);
-}
-
-static inline void cm_rej_set_reject_info_len(struct cm_rej_msg *rej_msg,
- u8 len)
-{
- rej_msg->offset9 = (u8) ((rej_msg->offset9 & 0x1) | (len << 1));
-}
-
-struct cm_rep_msg {
- struct ib_mad_hdr hdr;
-
- __be32 local_comm_id;
- __be32 remote_comm_id;
- __be32 local_qkey;
- /* local QPN:24, rsvd:8 */
- __be32 offset12;
- /* local EECN:24, rsvd:8 */
- __be32 offset16;
- /* starting PSN:24 rsvd:8 */
- __be32 offset20;
- u8 resp_resources;
- u8 initiator_depth;
- /* target ACK delay:5, failover accepted:2, end-to-end flow control:1 */
- u8 offset26;
- /* RNR retry count:3, SRQ:1, rsvd:5 */
- u8 offset27;
- __be64 local_ca_guid;
-
- u8 private_data[IB_CM_REP_PRIVATE_DATA_SIZE];
-
-} __packed;
-
-static inline __be32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg)
-{
- return cpu_to_be32(be32_to_cpu(rep_msg->offset12) >> 8);
-}
-
-static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn)
-{
- rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
- (be32_to_cpu(rep_msg->offset12) & 0x000000FF));
-}
-
-static inline __be32 cm_rep_get_local_eecn(struct cm_rep_msg *rep_msg)
-{
- return cpu_to_be32(be32_to_cpu(rep_msg->offset16) >> 8);
-}
-
-static inline void cm_rep_set_local_eecn(struct cm_rep_msg *rep_msg, __be32 eecn)
-{
- rep_msg->offset16 = cpu_to_be32((be32_to_cpu(eecn) << 8) |
- (be32_to_cpu(rep_msg->offset16) & 0x000000FF));
-}
-
static inline __be32 cm_rep_get_qpn(struct cm_rep_msg *rep_msg, enum ib_qp_type qp_type)
{
return (qp_type == IB_QPT_XRC_INI) ?
- cm_rep_get_local_eecn(rep_msg) : cm_rep_get_local_qpn(rep_msg);
-}
-
-static inline __be32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg)
-{
- return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8);
-}
-
-static inline void cm_rep_set_starting_psn(struct cm_rep_msg *rep_msg,
- __be32 starting_psn)
-{
- rep_msg->offset20 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
- (be32_to_cpu(rep_msg->offset20) & 0x000000FF));
-}
-
-static inline u8 cm_rep_get_target_ack_delay(struct cm_rep_msg *rep_msg)
-{
- return (u8) (rep_msg->offset26 >> 3);
-}
-
-static inline void cm_rep_set_target_ack_delay(struct cm_rep_msg *rep_msg,
- u8 target_ack_delay)
-{
- rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0x07) |
- (target_ack_delay << 3));
-}
-
-static inline u8 cm_rep_get_failover(struct cm_rep_msg *rep_msg)
-{
- return (u8) ((rep_msg->offset26 & 0x06) >> 1);
-}
-
-static inline void cm_rep_set_failover(struct cm_rep_msg *rep_msg, u8 failover)
-{
- rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xF9) |
- ((failover & 0x3) << 1));
-}
-
-static inline u8 cm_rep_get_flow_ctrl(struct cm_rep_msg *rep_msg)
-{
- return (u8) (rep_msg->offset26 & 0x01);
-}
-
-static inline void cm_rep_set_flow_ctrl(struct cm_rep_msg *rep_msg,
- u8 flow_ctrl)
-{
- rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xFE) |
- (flow_ctrl & 0x1));
-}
-
-static inline u8 cm_rep_get_rnr_retry_count(struct cm_rep_msg *rep_msg)
-{
- return (u8) (rep_msg->offset27 >> 5);
-}
-
-static inline void cm_rep_set_rnr_retry_count(struct cm_rep_msg *rep_msg,
- u8 rnr_retry_count)
-{
- rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0x1F) |
- (rnr_retry_count << 5));
-}
-
-static inline u8 cm_rep_get_srq(struct cm_rep_msg *rep_msg)
-{
- return (u8) ((rep_msg->offset27 >> 4) & 0x1);
-}
-
-static inline void cm_rep_set_srq(struct cm_rep_msg *rep_msg, u8 srq)
-{
- rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0xEF) |
- ((srq & 0x1) << 4));
-}
-
-struct cm_rtu_msg {
- struct ib_mad_hdr hdr;
-
- __be32 local_comm_id;
- __be32 remote_comm_id;
-
- u8 private_data[IB_CM_RTU_PRIVATE_DATA_SIZE];
-
-} __packed;
-
-struct cm_dreq_msg {
- struct ib_mad_hdr hdr;
-
- __be32 local_comm_id;
- __be32 remote_comm_id;
- /* remote QPN/EECN:24, rsvd:8 */
- __be32 offset8;
-
- u8 private_data[IB_CM_DREQ_PRIVATE_DATA_SIZE];
-
-} __packed;
-
-static inline __be32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg)
-{
- return cpu_to_be32(be32_to_cpu(dreq_msg->offset8) >> 8);
-}
-
-static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, __be32 qpn)
-{
- dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
- (be32_to_cpu(dreq_msg->offset8) & 0x000000FF));
-}
-
-struct cm_drep_msg {
- struct ib_mad_hdr hdr;
-
- __be32 local_comm_id;
- __be32 remote_comm_id;
-
- u8 private_data[IB_CM_DREP_PRIVATE_DATA_SIZE];
-
-} __packed;
-
-struct cm_lap_msg {
- struct ib_mad_hdr hdr;
-
- __be32 local_comm_id;
- __be32 remote_comm_id;
-
- __be32 rsvd8;
- /* remote QPN/EECN:24, remote CM response timeout:5, rsvd:3 */
- __be32 offset12;
- __be32 rsvd16;
-
- __be16 alt_local_lid;
- __be16 alt_remote_lid;
- union ib_gid alt_local_gid;
- union ib_gid alt_remote_gid;
- /* flow label:20, rsvd:4, traffic class:8 */
- __be32 offset56;
- u8 alt_hop_limit;
- /* rsvd:2, packet rate:6 */
- u8 offset61;
- /* SL:4, subnet local:1, rsvd:3 */
- u8 offset62;
- /* local ACK timeout:5, rsvd:3 */
- u8 offset63;
-
- u8 private_data[IB_CM_LAP_PRIVATE_DATA_SIZE];
-} __packed;
-
-static inline __be32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg)
-{
- return cpu_to_be32(be32_to_cpu(lap_msg->offset12) >> 8);
-}
-
-static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, __be32 qpn)
-{
- lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
- (be32_to_cpu(lap_msg->offset12) &
- 0x000000FF));
-}
-
-static inline u8 cm_lap_get_remote_resp_timeout(struct cm_lap_msg *lap_msg)
-{
- return (u8) ((be32_to_cpu(lap_msg->offset12) & 0xF8) >> 3);
-}
-
-static inline void cm_lap_set_remote_resp_timeout(struct cm_lap_msg *lap_msg,
- u8 resp_timeout)
-{
- lap_msg->offset12 = cpu_to_be32((resp_timeout << 3) |
- (be32_to_cpu(lap_msg->offset12) &
- 0xFFFFFF07));
-}
-
-static inline __be32 cm_lap_get_flow_label(struct cm_lap_msg *lap_msg)
-{
- return cpu_to_be32(be32_to_cpu(lap_msg->offset56) >> 12);
-}
-
-static inline void cm_lap_set_flow_label(struct cm_lap_msg *lap_msg,
- __be32 flow_label)
-{
- lap_msg->offset56 = cpu_to_be32(
- (be32_to_cpu(lap_msg->offset56) & 0x00000FFF) |
- (be32_to_cpu(flow_label) << 12));
-}
-
-static inline u8 cm_lap_get_traffic_class(struct cm_lap_msg *lap_msg)
-{
- return (u8) be32_to_cpu(lap_msg->offset56);
-}
-
-static inline void cm_lap_set_traffic_class(struct cm_lap_msg *lap_msg,
- u8 traffic_class)
-{
- lap_msg->offset56 = cpu_to_be32(traffic_class |
- (be32_to_cpu(lap_msg->offset56) &
- 0xFFFFFF00));
-}
-
-static inline u8 cm_lap_get_packet_rate(struct cm_lap_msg *lap_msg)
-{
- return lap_msg->offset61 & 0x3F;
-}
-
-static inline void cm_lap_set_packet_rate(struct cm_lap_msg *lap_msg,
- u8 packet_rate)
-{
- lap_msg->offset61 = (packet_rate & 0x3F) | (lap_msg->offset61 & 0xC0);
-}
-
-static inline u8 cm_lap_get_sl(struct cm_lap_msg *lap_msg)
-{
- return lap_msg->offset62 >> 4;
-}
-
-static inline void cm_lap_set_sl(struct cm_lap_msg *lap_msg, u8 sl)
-{
- lap_msg->offset62 = (sl << 4) | (lap_msg->offset62 & 0x0F);
-}
-
-static inline u8 cm_lap_get_subnet_local(struct cm_lap_msg *lap_msg)
-{
- return (lap_msg->offset62 >> 3) & 0x1;
-}
-
-static inline void cm_lap_set_subnet_local(struct cm_lap_msg *lap_msg,
- u8 subnet_local)
-{
- lap_msg->offset62 = ((subnet_local & 0x1) << 3) |
- (lap_msg->offset61 & 0xF7);
-}
-static inline u8 cm_lap_get_local_ack_timeout(struct cm_lap_msg *lap_msg)
-{
- return lap_msg->offset63 >> 3;
-}
-
-static inline void cm_lap_set_local_ack_timeout(struct cm_lap_msg *lap_msg,
- u8 local_ack_timeout)
-{
- lap_msg->offset63 = (local_ack_timeout << 3) |
- (lap_msg->offset63 & 0x07);
-}
-
-struct cm_apr_msg {
- struct ib_mad_hdr hdr;
-
- __be32 local_comm_id;
- __be32 remote_comm_id;
-
- u8 info_length;
- u8 ap_status;
- __be16 rsvd;
- u8 info[IB_CM_APR_INFO_LENGTH];
-
- u8 private_data[IB_CM_APR_PRIVATE_DATA_SIZE];
-} __packed;
-
-struct cm_sidr_req_msg {
- struct ib_mad_hdr hdr;
-
- __be32 request_id;
- __be16 pkey;
- __be16 rsvd;
- __be64 service_id;
-
- u32 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE / sizeof(u32)];
-} __packed;
-
-struct cm_sidr_rep_msg {
- struct ib_mad_hdr hdr;
-
- __be32 request_id;
- u8 status;
- u8 info_length;
- __be16 rsvd;
- /* QPN:24, rsvd:8 */
- __be32 offset8;
- __be64 service_id;
- __be32 qkey;
- u8 info[IB_CM_SIDR_REP_INFO_LENGTH];
-
- u8 private_data[IB_CM_SIDR_REP_PRIVATE_DATA_SIZE];
-} __packed;
-
-static inline __be32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg)
-{
- return cpu_to_be32(be32_to_cpu(sidr_rep_msg->offset8) >> 8);
-}
-
-static inline void cm_sidr_rep_set_qpn(struct cm_sidr_rep_msg *sidr_rep_msg,
- __be32 qpn)
-{
- sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
- (be32_to_cpu(sidr_rep_msg->offset8) &
- 0x000000FF));
+ cpu_to_be32(IBA_GET(CM_REP_LOCAL_EE_CONTEXT_NUMBER,
+ rep_msg)) :
+ cpu_to_be32(IBA_GET(CM_REP_LOCAL_QPN, rep_msg));
}
#endif /* CM_MSGS_H */
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 43a6f07e0afe..72f032160c4b 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -36,6 +36,7 @@
#include "core_priv.h"
#include "cma_priv.h"
+#include "cma_trace.h"
MODULE_AUTHOR("Sean Hefty");
MODULE_DESCRIPTION("Generic RDMA CM Agent");
@@ -877,6 +878,7 @@ struct rdma_cm_id *__rdma_create_id(struct net *net,
id_priv->id.route.addr.dev_addr.net = get_net(net);
id_priv->seq_num &= 0x00ffffff;
+ trace_cm_id_create(id_priv);
return &id_priv->id;
}
EXPORT_SYMBOL(__rdma_create_id);
@@ -928,27 +930,34 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
int ret;
id_priv = container_of(id, struct rdma_id_private, id);
- if (id->device != pd->device)
- return -EINVAL;
+ if (id->device != pd->device) {
+ ret = -EINVAL;
+ goto out_err;
+ }
qp_init_attr->port_num = id->port_num;
qp = ib_create_qp(pd, qp_init_attr);
- if (IS_ERR(qp))
- return PTR_ERR(qp);
+ if (IS_ERR(qp)) {
+ ret = PTR_ERR(qp);
+ goto out_err;
+ }
if (id->qp_type == IB_QPT_UD)
ret = cma_init_ud_qp(id_priv, qp);
else
ret = cma_init_conn_qp(id_priv, qp);
if (ret)
- goto err;
+ goto out_destroy;
id->qp = qp;
id_priv->qp_num = qp->qp_num;
id_priv->srq = (qp->srq != NULL);
+ trace_cm_qp_create(id_priv, pd, qp_init_attr, 0);
return 0;
-err:
+out_destroy:
ib_destroy_qp(qp);
+out_err:
+ trace_cm_qp_create(id_priv, pd, qp_init_attr, ret);
return ret;
}
EXPORT_SYMBOL(rdma_create_qp);
@@ -958,6 +967,7 @@ void rdma_destroy_qp(struct rdma_cm_id *id)
struct rdma_id_private *id_priv;
id_priv = container_of(id, struct rdma_id_private, id);
+ trace_cm_qp_destroy(id_priv);
mutex_lock(&id_priv->qp_mutex);
ib_destroy_qp(id_priv->id.qp);
id_priv->id.qp = NULL;
@@ -1811,6 +1821,7 @@ void rdma_destroy_id(struct rdma_cm_id *id)
enum rdma_cm_state state;
id_priv = container_of(id, struct rdma_id_private, id);
+ trace_cm_id_destroy(id_priv);
state = cma_exch(id_priv, RDMA_CM_DESTROYING);
cma_cancel_operation(id_priv, state);
@@ -1863,6 +1874,7 @@ static int cma_rep_recv(struct rdma_id_private *id_priv)
if (ret)
goto reject;
+ trace_cm_send_rtu(id_priv);
ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0);
if (ret)
goto reject;
@@ -1871,6 +1883,7 @@ static int cma_rep_recv(struct rdma_id_private *id_priv)
reject:
pr_debug_ratelimited("RDMA CM: CONNECT_ERROR: failed to handle reply. status %d\n", ret);
cma_modify_qp_err(id_priv);
+ trace_cm_send_rej(id_priv);
ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
NULL, 0, NULL, 0);
return ret;
@@ -1890,6 +1903,17 @@ static void cma_set_rep_event_data(struct rdma_cm_event *event,
event->param.conn.qp_num = rep_data->remote_qpn;
}
+static int cma_cm_event_handler(struct rdma_id_private *id_priv,
+ struct rdma_cm_event *event)
+{
+ int ret;
+
+ trace_cm_event_handler(id_priv, event);
+ ret = id_priv->id.event_handler(&id_priv->id, event);
+ trace_cm_event_done(id_priv, event, ret);
+ return ret;
+}
+
static int cma_ib_handler(struct ib_cm_id *cm_id,
const struct ib_cm_event *ib_event)
{
@@ -1912,8 +1936,10 @@ static int cma_ib_handler(struct ib_cm_id *cm_id,
break;
case IB_CM_REP_RECEIVED:
if (cma_comp(id_priv, RDMA_CM_CONNECT) &&
- (id_priv->id.qp_type != IB_QPT_UD))
+ (id_priv->id.qp_type != IB_QPT_UD)) {
+ trace_cm_send_mra(id_priv);
ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
+ }
if (id_priv->id.qp) {
event.status = cma_rep_recv(id_priv);
event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR :
@@ -1958,7 +1984,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id,
goto out;
}
- ret = id_priv->id.event_handler(&id_priv->id, &event);
+ ret = cma_cm_event_handler(id_priv, &event);
if (ret) {
/* Destroy the CM ID by returning a non-zero value. */
id_priv->cm_id.ib = NULL;
@@ -2119,6 +2145,7 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id,
if (IS_ERR(listen_id))
return PTR_ERR(listen_id);
+ trace_cm_req_handler(listen_id, ib_event->event);
if (!cma_ib_check_req_qp_type(&listen_id->id, ib_event)) {
ret = -EINVAL;
goto net_dev_put;
@@ -2161,7 +2188,7 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id,
* until we're done accessing it.
*/
atomic_inc(&conn_id->refcount);
- ret = conn_id->id.event_handler(&conn_id->id, &event);
+ ret = cma_cm_event_handler(conn_id, &event);
if (ret)
goto err3;
/*
@@ -2170,8 +2197,10 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id,
*/
mutex_lock(&lock);
if (cma_comp(conn_id, RDMA_CM_CONNECT) &&
- (conn_id->id.qp_type != IB_QPT_UD))
+ (conn_id->id.qp_type != IB_QPT_UD)) {
+ trace_cm_send_mra(cm_id->context);
ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
+ }
mutex_unlock(&lock);
mutex_unlock(&conn_id->handler_mutex);
mutex_unlock(&listen_id->handler_mutex);
@@ -2286,7 +2315,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
event.status = iw_event->status;
event.param.conn.private_data = iw_event->private_data;
event.param.conn.private_data_len = iw_event->private_data_len;
- ret = id_priv->id.event_handler(&id_priv->id, &event);
+ ret = cma_cm_event_handler(id_priv, &event);
if (ret) {
/* Destroy the CM ID by returning a non-zero value. */
id_priv->cm_id.iw = NULL;
@@ -2363,7 +2392,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
* until we're done accessing it.
*/
atomic_inc(&conn_id->refcount);
- ret = conn_id->id.event_handler(&conn_id->id, &event);
+ ret = cma_cm_event_handler(conn_id, &event);
if (ret) {
/* User wants to destroy the CM ID */
conn_id->cm_id.iw = NULL;
@@ -2435,6 +2464,7 @@ static int cma_listen_handler(struct rdma_cm_id *id,
id->context = id_priv->id.context;
id->event_handler = id_priv->id.event_handler;
+ trace_cm_event_handler(id_priv, event);
return id_priv->id.event_handler(id, event);
}
@@ -2611,7 +2641,7 @@ static void cma_work_handler(struct work_struct *_work)
if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
goto out;
- if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
+ if (cma_cm_event_handler(id_priv, &work->event)) {
cma_exch(id_priv, RDMA_CM_DESTROYING);
destroy = 1;
}
@@ -2634,7 +2664,7 @@ static void cma_ndev_work_handler(struct work_struct *_work)
id_priv->state == RDMA_CM_DEVICE_REMOVAL)
goto out;
- if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
+ if (cma_cm_event_handler(id_priv, &work->event)) {
cma_exch(id_priv, RDMA_CM_DESTROYING);
destroy = 1;
}
@@ -3089,7 +3119,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
} else
event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
- if (id_priv->id.event_handler(&id_priv->id, &event)) {
+ if (cma_cm_event_handler(id_priv, &event)) {
cma_exch(id_priv, RDMA_CM_DESTROYING);
mutex_unlock(&id_priv->handler_mutex);
rdma_destroy_id(&id_priv->id);
@@ -3118,6 +3148,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
+ atomic_inc(&id_priv->refcount);
cma_init_resolve_addr_work(work, id_priv);
queue_work(cma_wq, &work->work);
return 0;
@@ -3144,6 +3175,7 @@ static int cma_resolve_ib_addr(struct rdma_id_private *id_priv)
rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *)
&(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr));
+ atomic_inc(&id_priv->refcount);
cma_init_resolve_addr_work(work, id_priv);
queue_work(cma_wq, &work->work);
return 0;
@@ -3736,7 +3768,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
goto out;
}
- ret = id_priv->id.event_handler(&id_priv->id, &event);
+ ret = cma_cm_event_handler(id_priv, &event);
rdma_destroy_ah_attr(&event.param.ud.ah_attr);
if (ret) {
@@ -3800,6 +3832,7 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
req.max_cm_retries = CMA_MAX_CM_RETRIES;
+ trace_cm_send_sidr_req(id_priv);
ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req);
if (ret) {
ib_destroy_cm_id(id_priv->cm_id.ib);
@@ -3873,6 +3906,7 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
req.max_cm_retries = CMA_MAX_CM_RETRIES;
req.srq = id_priv->srq ? 1 : 0;
+ trace_cm_send_req(id_priv);
ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
out:
if (ret && !IS_ERR(id)) {
@@ -3986,6 +4020,7 @@ static int cma_accept_ib(struct rdma_id_private *id_priv,
rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
rep.srq = id_priv->srq ? 1 : 0;
+ trace_cm_send_rep(id_priv);
ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
out:
return ret;
@@ -4035,6 +4070,7 @@ static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
rep.private_data = private_data;
rep.private_data_len = private_data_len;
+ trace_cm_send_sidr_rep(id_priv);
return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep);
}
@@ -4120,13 +4156,15 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
return -EINVAL;
if (rdma_cap_ib_cm(id->device, id->port_num)) {
- if (id->qp_type == IB_QPT_UD)
+ if (id->qp_type == IB_QPT_UD) {
ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0,
private_data, private_data_len);
- else
+ } else {
+ trace_cm_send_rej(id_priv);
ret = ib_send_cm_rej(id_priv->cm_id.ib,
IB_CM_REJ_CONSUMER_DEFINED, NULL,
0, private_data, private_data_len);
+ }
} else if (rdma_cap_iw_cm(id->device, id->port_num)) {
ret = iw_cm_reject(id_priv->cm_id.iw,
private_data, private_data_len);
@@ -4151,8 +4189,13 @@ int rdma_disconnect(struct rdma_cm_id *id)
if (ret)
goto out;
/* Initiate or respond to a disconnect. */
- if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0))
- ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0);
+ trace_cm_disconnect(id_priv);
+ if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) {
+ if (!ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0))
+ trace_cm_sent_drep(id_priv);
+ } else {
+ trace_cm_sent_dreq(id_priv);
+ }
} else if (rdma_cap_iw_cm(id->device, id->port_num)) {
ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
} else
@@ -4218,7 +4261,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
} else
event.event = RDMA_CM_EVENT_MULTICAST_ERROR;
- ret = id_priv->id.event_handler(&id_priv->id, &event);
+ ret = cma_cm_event_handler(id_priv, &event);
rdma_destroy_ah_attr(&event.param.ud.ah_attr);
if (ret) {
@@ -4623,6 +4666,7 @@ static void cma_add_one(struct ib_device *device)
cma_listen_on_dev(id_priv, cma_dev);
mutex_unlock(&lock);
+ trace_cm_add_one(device);
return;
free_gid_type:
@@ -4653,7 +4697,7 @@ static int cma_remove_id_dev(struct rdma_id_private *id_priv)
goto out;
event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
- ret = id_priv->id.event_handler(&id_priv->id, &event);
+ ret = cma_cm_event_handler(id_priv, &event);
out:
mutex_unlock(&id_priv->handler_mutex);
return ret;
@@ -4691,6 +4735,8 @@ static void cma_remove_one(struct ib_device *device, void *client_data)
{
struct cma_device *cma_dev = client_data;
+ trace_cm_remove_one(device);
+
if (!cma_dev)
return;
diff --git a/drivers/infiniband/core/cma_trace.c b/drivers/infiniband/core/cma_trace.c
new file mode 100644
index 000000000000..b314a281e10e
--- /dev/null
+++ b/drivers/infiniband/core/cma_trace.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Trace points for the RDMA Connection Manager.
+ *
+ * Author: Chuck Lever <chuck.lever@oracle.com>
+ *
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ */
+
+#define CREATE_TRACE_POINTS
+
+#include <rdma/rdma_cm.h>
+#include <rdma/ib_cm.h>
+#include "cma_priv.h"
+
+#include "cma_trace.h"
diff --git a/drivers/infiniband/core/cma_trace.h b/drivers/infiniband/core/cma_trace.h
new file mode 100644
index 000000000000..81e36bf13159
--- /dev/null
+++ b/drivers/infiniband/core/cma_trace.h
@@ -0,0 +1,391 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Trace point definitions for the RDMA Connect Manager.
+ *
+ * Author: Chuck Lever <chuck.lever@oracle.com>
+ *
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rdma_cma
+
+#if !defined(_TRACE_RDMA_CMA_H) || defined(TRACE_HEADER_MULTI_READ)
+
+#define _TRACE_RDMA_CMA_H
+
+#include <linux/tracepoint.h>
+#include <trace/events/rdma.h>
+
+/*
+ * enum ib_cm_event_type, from include/rdma/ib_cm.h
+ */
+#define IB_CM_EVENT_LIST \
+ ib_cm_event(REQ_ERROR) \
+ ib_cm_event(REQ_RECEIVED) \
+ ib_cm_event(REP_ERROR) \
+ ib_cm_event(REP_RECEIVED) \
+ ib_cm_event(RTU_RECEIVED) \
+ ib_cm_event(USER_ESTABLISHED) \
+ ib_cm_event(DREQ_ERROR) \
+ ib_cm_event(DREQ_RECEIVED) \
+ ib_cm_event(DREP_RECEIVED) \
+ ib_cm_event(TIMEWAIT_EXIT) \
+ ib_cm_event(MRA_RECEIVED) \
+ ib_cm_event(REJ_RECEIVED) \
+ ib_cm_event(LAP_ERROR) \
+ ib_cm_event(LAP_RECEIVED) \
+ ib_cm_event(APR_RECEIVED) \
+ ib_cm_event(SIDR_REQ_ERROR) \
+ ib_cm_event(SIDR_REQ_RECEIVED) \
+ ib_cm_event_end(SIDR_REP_RECEIVED)
+
+#undef ib_cm_event
+#undef ib_cm_event_end
+
+#define ib_cm_event(x) TRACE_DEFINE_ENUM(IB_CM_##x);
+#define ib_cm_event_end(x) TRACE_DEFINE_ENUM(IB_CM_##x);
+
+IB_CM_EVENT_LIST
+
+#undef ib_cm_event
+#undef ib_cm_event_end
+
+#define ib_cm_event(x) { IB_CM_##x, #x },
+#define ib_cm_event_end(x) { IB_CM_##x, #x }
+
+#define rdma_show_ib_cm_event(x) \
+ __print_symbolic(x, IB_CM_EVENT_LIST)
+
+
+DECLARE_EVENT_CLASS(cma_fsm_class,
+ TP_PROTO(
+ const struct rdma_id_private *id_priv
+ ),
+
+ TP_ARGS(id_priv),
+
+ TP_STRUCT__entry(
+ __field(u32, cm_id)
+ __field(u32, tos)
+ __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
+ __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
+ ),
+
+ TP_fast_assign(
+ __entry->cm_id = id_priv->res.id;
+ __entry->tos = id_priv->tos;
+ memcpy(__entry->srcaddr, &id_priv->id.route.addr.src_addr,
+ sizeof(struct sockaddr_in6));
+ memcpy(__entry->dstaddr, &id_priv->id.route.addr.dst_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+
+ TP_printk("cm.id=%u src=%pISpc dst=%pISpc tos=%u",
+ __entry->cm_id, __entry->srcaddr, __entry->dstaddr, __entry->tos
+ )
+);
+
+#define DEFINE_CMA_FSM_EVENT(name) \
+ DEFINE_EVENT(cma_fsm_class, cm_##name, \
+ TP_PROTO( \
+ const struct rdma_id_private *id_priv \
+ ), \
+ TP_ARGS(id_priv))
+
+DEFINE_CMA_FSM_EVENT(send_rtu);
+DEFINE_CMA_FSM_EVENT(send_rej);
+DEFINE_CMA_FSM_EVENT(send_mra);
+DEFINE_CMA_FSM_EVENT(send_sidr_req);
+DEFINE_CMA_FSM_EVENT(send_sidr_rep);
+DEFINE_CMA_FSM_EVENT(disconnect);
+DEFINE_CMA_FSM_EVENT(sent_drep);
+DEFINE_CMA_FSM_EVENT(sent_dreq);
+DEFINE_CMA_FSM_EVENT(id_destroy);
+
+TRACE_EVENT(cm_id_create,
+ TP_PROTO(
+ const struct rdma_id_private *id_priv
+ ),
+
+ TP_ARGS(id_priv),
+
+ TP_STRUCT__entry(
+ __field(u32, cm_id)
+ ),
+
+ TP_fast_assign(
+ __entry->cm_id = id_priv->res.id;
+ ),
+
+ TP_printk("cm.id=%u",
+ __entry->cm_id
+ )
+);
+
+DECLARE_EVENT_CLASS(cma_qp_class,
+ TP_PROTO(
+ const struct rdma_id_private *id_priv
+ ),
+
+ TP_ARGS(id_priv),
+
+ TP_STRUCT__entry(
+ __field(u32, cm_id)
+ __field(u32, tos)
+ __field(u32, qp_num)
+ __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
+ __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
+ ),
+
+ TP_fast_assign(
+ __entry->cm_id = id_priv->res.id;
+ __entry->tos = id_priv->tos;
+ __entry->qp_num = id_priv->qp_num;
+ memcpy(__entry->srcaddr, &id_priv->id.route.addr.src_addr,
+ sizeof(struct sockaddr_in6));
+ memcpy(__entry->dstaddr, &id_priv->id.route.addr.dst_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+
+ TP_printk("cm.id=%u src=%pISpc dst=%pISpc tos=%u qp_num=%u",
+ __entry->cm_id, __entry->srcaddr, __entry->dstaddr, __entry->tos,
+ __entry->qp_num
+ )
+);
+
+#define DEFINE_CMA_QP_EVENT(name) \
+ DEFINE_EVENT(cma_qp_class, cm_##name, \
+ TP_PROTO( \
+ const struct rdma_id_private *id_priv \
+ ), \
+ TP_ARGS(id_priv))
+
+DEFINE_CMA_QP_EVENT(send_req);
+DEFINE_CMA_QP_EVENT(send_rep);
+DEFINE_CMA_QP_EVENT(qp_destroy);
+
+/*
+ * enum ib_wp_type, from include/rdma/ib_verbs.h
+ */
+#define IB_QP_TYPE_LIST \
+ ib_qp_type(SMI) \
+ ib_qp_type(GSI) \
+ ib_qp_type(RC) \
+ ib_qp_type(UC) \
+ ib_qp_type(UD) \
+ ib_qp_type(RAW_IPV6) \
+ ib_qp_type(RAW_ETHERTYPE) \
+ ib_qp_type(RAW_PACKET) \
+ ib_qp_type(XRC_INI) \
+ ib_qp_type_end(XRC_TGT)
+
+#undef ib_qp_type
+#undef ib_qp_type_end
+
+#define ib_qp_type(x) TRACE_DEFINE_ENUM(IB_QPT_##x);
+#define ib_qp_type_end(x) TRACE_DEFINE_ENUM(IB_QPT_##x);
+
+IB_QP_TYPE_LIST
+
+#undef ib_qp_type
+#undef ib_qp_type_end
+
+#define ib_qp_type(x) { IB_QPT_##x, #x },
+#define ib_qp_type_end(x) { IB_QPT_##x, #x }
+
+#define rdma_show_qp_type(x) \
+ __print_symbolic(x, IB_QP_TYPE_LIST)
+
+
+TRACE_EVENT(cm_qp_create,
+ TP_PROTO(
+ const struct rdma_id_private *id_priv,
+ const struct ib_pd *pd,
+ const struct ib_qp_init_attr *qp_init_attr,
+ int rc
+ ),
+
+ TP_ARGS(id_priv, pd, qp_init_attr, rc),
+
+ TP_STRUCT__entry(
+ __field(u32, cm_id)
+ __field(u32, pd_id)
+ __field(u32, tos)
+ __field(u32, qp_num)
+ __field(u32, send_wr)
+ __field(u32, recv_wr)
+ __field(int, rc)
+ __field(unsigned long, qp_type)
+ __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
+ __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
+ ),
+
+ TP_fast_assign(
+ __entry->cm_id = id_priv->res.id;
+ __entry->pd_id = pd->res.id;
+ __entry->tos = id_priv->tos;
+ __entry->send_wr = qp_init_attr->cap.max_send_wr;
+ __entry->recv_wr = qp_init_attr->cap.max_recv_wr;
+ __entry->rc = rc;
+ if (!rc) {
+ __entry->qp_num = id_priv->qp_num;
+ __entry->qp_type = id_priv->id.qp_type;
+ } else {
+ __entry->qp_num = 0;
+ __entry->qp_type = 0;
+ }
+ memcpy(__entry->srcaddr, &id_priv->id.route.addr.src_addr,
+ sizeof(struct sockaddr_in6));
+ memcpy(__entry->dstaddr, &id_priv->id.route.addr.dst_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+
+ TP_printk("cm.id=%u src=%pISpc dst=%pISpc tos=%u pd.id=%u qp_type=%s"
+ " send_wr=%u recv_wr=%u qp_num=%u rc=%d",
+ __entry->cm_id, __entry->srcaddr, __entry->dstaddr,
+ __entry->tos, __entry->pd_id,
+ rdma_show_qp_type(__entry->qp_type), __entry->send_wr,
+ __entry->recv_wr, __entry->qp_num, __entry->rc
+ )
+);
+
+TRACE_EVENT(cm_req_handler,
+ TP_PROTO(
+ const struct rdma_id_private *id_priv,
+ int event
+ ),
+
+ TP_ARGS(id_priv, event),
+
+ TP_STRUCT__entry(
+ __field(u32, cm_id)
+ __field(u32, tos)
+ __field(unsigned long, event)
+ __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
+ __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
+ ),
+
+ TP_fast_assign(
+ __entry->cm_id = id_priv->res.id;
+ __entry->tos = id_priv->tos;
+ __entry->event = event;
+ memcpy(__entry->srcaddr, &id_priv->id.route.addr.src_addr,
+ sizeof(struct sockaddr_in6));
+ memcpy(__entry->dstaddr, &id_priv->id.route.addr.dst_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+
+ TP_printk("cm.id=%u src=%pISpc dst=%pISpc tos=%u %s (%lu)",
+ __entry->cm_id, __entry->srcaddr, __entry->dstaddr, __entry->tos,
+ rdma_show_ib_cm_event(__entry->event), __entry->event
+ )
+);
+
+TRACE_EVENT(cm_event_handler,
+ TP_PROTO(
+ const struct rdma_id_private *id_priv,
+ const struct rdma_cm_event *event
+ ),
+
+ TP_ARGS(id_priv, event),
+
+ TP_STRUCT__entry(
+ __field(u32, cm_id)
+ __field(u32, tos)
+ __field(unsigned long, event)
+ __field(int, status)
+ __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
+ __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
+ ),
+
+ TP_fast_assign(
+ __entry->cm_id = id_priv->res.id;
+ __entry->tos = id_priv->tos;
+ __entry->event = event->event;
+ __entry->status = event->status;
+ memcpy(__entry->srcaddr, &id_priv->id.route.addr.src_addr,
+ sizeof(struct sockaddr_in6));
+ memcpy(__entry->dstaddr, &id_priv->id.route.addr.dst_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+
+ TP_printk("cm.id=%u src=%pISpc dst=%pISpc tos=%u %s (%lu/%d)",
+ __entry->cm_id, __entry->srcaddr, __entry->dstaddr, __entry->tos,
+ rdma_show_cm_event(__entry->event), __entry->event,
+ __entry->status
+ )
+);
+
+TRACE_EVENT(cm_event_done,
+ TP_PROTO(
+ const struct rdma_id_private *id_priv,
+ const struct rdma_cm_event *event,
+ int result
+ ),
+
+ TP_ARGS(id_priv, event, result),
+
+ TP_STRUCT__entry(
+ __field(u32, cm_id)
+ __field(u32, tos)
+ __field(unsigned long, event)
+ __field(int, result)
+ __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
+ __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
+ ),
+
+ TP_fast_assign(
+ __entry->cm_id = id_priv->res.id;
+ __entry->tos = id_priv->tos;
+ __entry->event = event->event;
+ __entry->result = result;
+ memcpy(__entry->srcaddr, &id_priv->id.route.addr.src_addr,
+ sizeof(struct sockaddr_in6));
+ memcpy(__entry->dstaddr, &id_priv->id.route.addr.dst_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+
+ TP_printk("cm.id=%u src=%pISpc dst=%pISpc tos=%u %s consumer returns %d",
+ __entry->cm_id, __entry->srcaddr, __entry->dstaddr, __entry->tos,
+ rdma_show_cm_event(__entry->event), __entry->result
+ )
+);
+
+DECLARE_EVENT_CLASS(cma_client_class,
+ TP_PROTO(
+ const struct ib_device *device
+ ),
+
+ TP_ARGS(device),
+
+ TP_STRUCT__entry(
+ __string(name, device->name)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, device->name);
+ ),
+
+ TP_printk("device name=%s",
+ __get_str(name)
+ )
+);
+
+#define DEFINE_CMA_CLIENT_EVENT(name) \
+ DEFINE_EVENT(cma_client_class, cm_##name, \
+ TP_PROTO( \
+ const struct ib_device *device \
+ ), \
+ TP_ARGS(device))
+
+DEFINE_CMA_CLIENT_EVENT(add_one);
+DEFINE_CMA_CLIENT_EVENT(remove_one);
+
+#endif /* _TRACE_RDMA_CMA_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE cma_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 3645e092e1c7..b1457b3464d3 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -149,6 +149,7 @@ unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port);
int ib_cache_setup_one(struct ib_device *device);
void ib_cache_cleanup_one(struct ib_device *device);
void ib_cache_release_one(struct ib_device *device);
+void ib_dispatch_event_clients(struct ib_event *event);
#ifdef CONFIG_CGROUP_RDMA
void ib_device_register_rdmacg(struct ib_device *device);
@@ -320,7 +321,7 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
struct ib_pd *pd,
struct ib_qp_init_attr *attr,
struct ib_udata *udata,
- struct ib_uobject *uobj)
+ struct ib_uqp_object *uobj)
{
enum ib_qp_type qp_type = attr->qp_type;
struct ib_qp *qp;
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index bbfded6d5d3d..4f25b2400694 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -7,6 +7,8 @@
#include <linux/slab.h>
#include <rdma/ib_verbs.h>
+#include <trace/events/rdma_core.h>
+
/* # of WCs to poll for with a single call to ib_poll_cq */
#define IB_POLL_BATCH 16
#define IB_POLL_BATCH_DIRECT 8
@@ -41,6 +43,7 @@ static void ib_cq_rdma_dim_work(struct work_struct *w)
dim->state = DIM_START_MEASURE;
+ trace_cq_modify(cq, comps, usec);
cq->device->ops.modify_cq(cq, comps, usec);
}
@@ -65,18 +68,29 @@ static void rdma_dim_init(struct ib_cq *cq)
INIT_WORK(&dim->work, ib_cq_rdma_dim_work);
}
+static int __poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
+{
+ int rc;
+
+ rc = ib_poll_cq(cq, num_entries, wc);
+ trace_cq_poll(cq, num_entries, rc);
+ return rc;
+}
+
static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs,
int batch)
{
int i, n, completed = 0;
+ trace_cq_process(cq);
+
/*
* budget might be (-1) if the caller does not
* want to bound this call, thus we need unsigned
* minimum here.
*/
- while ((n = ib_poll_cq(cq, min_t(u32, batch,
- budget - completed), wcs)) > 0) {
+ while ((n = __poll_cq(cq, min_t(u32, batch,
+ budget - completed), wcs)) > 0) {
for (i = 0; i < n; i++) {
struct ib_wc *wc = &wcs[i];
@@ -131,8 +145,10 @@ static int ib_poll_handler(struct irq_poll *iop, int budget)
completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH);
if (completed < budget) {
irq_poll_complete(&cq->iop);
- if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
+ if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) {
+ trace_cq_reschedule(cq);
irq_poll_sched(&cq->iop);
+ }
}
if (dim)
@@ -143,6 +159,7 @@ static int ib_poll_handler(struct irq_poll *iop, int budget)
static void ib_cq_completion_softirq(struct ib_cq *cq, void *private)
{
+ trace_cq_schedule(cq);
irq_poll_sched(&cq->iop);
}
@@ -162,6 +179,7 @@ static void ib_cq_poll_work(struct work_struct *work)
static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
{
+ trace_cq_schedule(cq);
queue_work(cq->comp_wq, &cq->work);
}
@@ -239,6 +257,7 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
goto out_destroy_cq;
}
+ trace_cq_alloc(cq, nr_cqe, comp_vector, poll_ctx);
return cq;
out_destroy_cq:
@@ -248,6 +267,7 @@ out_free_wc:
kfree(cq->wc);
out_free_cq:
kfree(cq);
+ trace_cq_alloc_error(nr_cqe, comp_vector, poll_ctx, ret);
return ERR_PTR(ret);
}
EXPORT_SYMBOL(__ib_alloc_cq_user);
@@ -304,6 +324,7 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
WARN_ON_ONCE(1);
}
+ trace_cq_free(cq);
rdma_restrack_del(&cq->res);
cq->device->ops.destroy_cq(cq, udata);
if (cq->dim)
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 84dd74fe13b8..f6c255202d7f 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -587,7 +587,8 @@ struct ib_device *_ib_alloc_device(size_t size)
rdma_init_coredev(&device->coredev, device, &init_net);
INIT_LIST_HEAD(&device->event_handler_list);
- spin_lock_init(&device->event_handler_lock);
+ spin_lock_init(&device->qp_open_list_lock);
+ init_rwsem(&device->event_handler_rwsem);
mutex_init(&device->unregistration_lock);
/*
* client_data needs to be alloc because we don't want our mark to be
@@ -1931,17 +1932,15 @@ EXPORT_SYMBOL(ib_set_client_data);
*
* ib_register_event_handler() registers an event handler that will be
* called back when asynchronous IB events occur (as defined in
- * chapter 11 of the InfiniBand Architecture Specification). This
- * callback may occur in interrupt context.
+ * chapter 11 of the InfiniBand Architecture Specification). This
+ * callback occurs in workqueue context.
*/
void ib_register_event_handler(struct ib_event_handler *event_handler)
{
- unsigned long flags;
-
- spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
+ down_write(&event_handler->device->event_handler_rwsem);
list_add_tail(&event_handler->list,
&event_handler->device->event_handler_list);
- spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
+ up_write(&event_handler->device->event_handler_rwsem);
}
EXPORT_SYMBOL(ib_register_event_handler);
@@ -1954,35 +1953,23 @@ EXPORT_SYMBOL(ib_register_event_handler);
*/
void ib_unregister_event_handler(struct ib_event_handler *event_handler)
{
- unsigned long flags;
-
- spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
+ down_write(&event_handler->device->event_handler_rwsem);
list_del(&event_handler->list);
- spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
+ up_write(&event_handler->device->event_handler_rwsem);
}
EXPORT_SYMBOL(ib_unregister_event_handler);
-/**
- * ib_dispatch_event - Dispatch an asynchronous event
- * @event:Event to dispatch
- *
- * Low-level drivers must call ib_dispatch_event() to dispatch the
- * event to all registered event handlers when an asynchronous event
- * occurs.
- */
-void ib_dispatch_event(struct ib_event *event)
+void ib_dispatch_event_clients(struct ib_event *event)
{
- unsigned long flags;
struct ib_event_handler *handler;
- spin_lock_irqsave(&event->device->event_handler_lock, flags);
+ down_read(&event->device->event_handler_rwsem);
list_for_each_entry(handler, &event->device->event_handler_list, list)
handler->handler(handler, event);
- spin_unlock_irqrestore(&event->device->event_handler_lock, flags);
+ up_read(&event->device->event_handler_rwsem);
}
-EXPORT_SYMBOL(ib_dispatch_event);
static int iw_query_port(struct ib_device *device,
u8 port_num,
@@ -1990,7 +1977,6 @@ static int iw_query_port(struct ib_device *device,
{
struct in_device *inetdev;
struct net_device *netdev;
- int err;
memset(port_attr, 0, sizeof(*port_attr));
@@ -2021,11 +2007,7 @@ static int iw_query_port(struct ib_device *device,
}
dev_put(netdev);
- err = device->ops.query_port(device, port_num, port_attr);
- if (err)
- return err;
-
- return 0;
+ return device->ops.query_port(device, port_num, port_attr);
}
static int __ib_query_port(struct ib_device *device,
diff --git a/drivers/infiniband/core/ib_core_uverbs.c b/drivers/infiniband/core/ib_core_uverbs.c
index b7cb59844ece..b51bd7087a88 100644
--- a/drivers/infiniband/core/ib_core_uverbs.c
+++ b/drivers/infiniband/core/ib_core_uverbs.c
@@ -232,7 +232,9 @@ void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry)
if (!entry)
return;
+ xa_lock(&entry->ucontext->mmap_xa);
entry->driver_removed = true;
+ xa_unlock(&entry->ucontext->mmap_xa);
kref_put(&entry->ref, rdma_user_mmap_entry_free);
}
EXPORT_SYMBOL(rdma_user_mmap_entry_remove);
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index cbf6041a5d4a..37b433aa7306 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -41,6 +41,7 @@
#include "core_priv.h"
#include "cma_priv.h"
#include "restrack.h"
+#include "uverbs.h"
typedef int (*res_fill_func_t)(struct sk_buff*, bool,
struct rdma_restrack_entry*, uint32_t);
@@ -599,7 +600,7 @@ static int fill_res_cq_entry(struct sk_buff *msg, bool has_cap_net_admin,
goto err;
if (!rdma_is_kernel_res(res) &&
nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
- cq->uobject->context->res.id))
+ cq->uobject->uevent.uobject.context->res.id))
goto err;
if (fill_res_name_pid(msg, res))
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index 6c72773faf29..5128cb16bb48 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -42,26 +42,21 @@
#include "core_priv.h"
#include "rdma_core.h"
-void uverbs_uobject_get(struct ib_uobject *uobject)
-{
- kref_get(&uobject->ref);
-}
-
static void uverbs_uobject_free(struct kref *ref)
{
- struct ib_uobject *uobj =
- container_of(ref, struct ib_uobject, ref);
-
- if (uobj->uapi_object->type_class->needs_kfree_rcu)
- kfree_rcu(uobj, rcu);
- else
- kfree(uobj);
+ kfree_rcu(container_of(ref, struct ib_uobject, ref), rcu);
}
+/*
+ * In order to indicate we no longer needs this uobject, uverbs_uobject_put
+ * is called. When the reference count is decreased, the uobject is freed.
+ * For example, this is used when attaching a completion channel to a CQ.
+ */
void uverbs_uobject_put(struct ib_uobject *uobject)
{
kref_put(&uobject->ref, uverbs_uobject_free);
}
+EXPORT_SYMBOL(uverbs_uobject_put);
static int uverbs_try_lock_object(struct ib_uobject *uobj,
enum rdma_lookup_mode mode)
@@ -135,7 +130,11 @@ static int uverbs_destroy_uobject(struct ib_uobject *uobj,
lockdep_assert_held(&ufile->hw_destroy_rwsem);
assert_uverbs_usecnt(uobj, UVERBS_LOOKUP_WRITE);
- if (uobj->object) {
+ if (reason == RDMA_REMOVE_ABORT) {
+ WARN_ON(!list_empty(&uobj->list));
+ WARN_ON(!uobj->context);
+ uobj->uapi_object->type_class->alloc_abort(uobj);
+ } else if (uobj->object) {
ret = uobj->uapi_object->type_class->destroy_hw(uobj, reason,
attrs);
if (ret) {
@@ -151,12 +150,6 @@ static int uverbs_destroy_uobject(struct ib_uobject *uobj,
uobj->object = NULL;
}
- if (reason == RDMA_REMOVE_ABORT) {
- WARN_ON(!list_empty(&uobj->list));
- WARN_ON(!uobj->context);
- uobj->uapi_object->type_class->alloc_abort(uobj);
- }
-
uobj->context = NULL;
/*
@@ -263,15 +256,20 @@ int __uobj_perform_destroy(const struct uverbs_api_object *obj, u32 id,
}
/* alloc_uobj must be undone by uverbs_destroy_uobject() */
-static struct ib_uobject *alloc_uobj(struct ib_uverbs_file *ufile,
+static struct ib_uobject *alloc_uobj(struct uverbs_attr_bundle *attrs,
const struct uverbs_api_object *obj)
{
+ struct ib_uverbs_file *ufile = attrs->ufile;
struct ib_uobject *uobj;
- struct ib_ucontext *ucontext;
- ucontext = ib_uverbs_get_ucontext_file(ufile);
- if (IS_ERR(ucontext))
- return ERR_CAST(ucontext);
+ if (!attrs->context) {
+ struct ib_ucontext *ucontext =
+ ib_uverbs_get_ucontext_file(ufile);
+
+ if (IS_ERR(ucontext))
+ return ERR_CAST(ucontext);
+ attrs->context = ucontext;
+ }
uobj = kzalloc(obj->type_attrs->obj_size, GFP_KERNEL);
if (!uobj)
@@ -281,7 +279,7 @@ static struct ib_uobject *alloc_uobj(struct ib_uverbs_file *ufile,
* The object is added to the list in the commit stage.
*/
uobj->ufile = ufile;
- uobj->context = ucontext;
+ uobj->context = attrs->context;
INIT_LIST_HEAD(&uobj->list);
uobj->uapi_object = obj;
/*
@@ -358,9 +356,9 @@ lookup_get_fd_uobject(const struct uverbs_api_object *obj,
uobject = f->private_data;
/*
- * fget(id) ensures we are not currently running uverbs_close_fd,
- * and the caller is expected to ensure that uverbs_close_fd is never
- * done while a call top lookup is possible.
+ * fget(id) ensures we are not currently running
+ * uverbs_uobject_fd_release(), and the caller is expected to ensure
+ * that release is never done while a call to lookup is possible.
*/
if (f->f_op != fd_type->fops) {
fput(f);
@@ -424,12 +422,12 @@ free:
static struct ib_uobject *
alloc_begin_idr_uobject(const struct uverbs_api_object *obj,
- struct ib_uverbs_file *ufile)
+ struct uverbs_attr_bundle *attrs)
{
int ret;
struct ib_uobject *uobj;
- uobj = alloc_uobj(ufile, obj);
+ uobj = alloc_uobj(attrs, obj);
if (IS_ERR(uobj))
return uobj;
@@ -445,7 +443,7 @@ alloc_begin_idr_uobject(const struct uverbs_api_object *obj,
return uobj;
remove:
- xa_erase(&ufile->idr, uobj->id);
+ xa_erase(&attrs->ufile->idr, uobj->id);
uobj_put:
uverbs_uobject_put(uobj);
return ERR_PTR(ret);
@@ -453,31 +451,48 @@ uobj_put:
static struct ib_uobject *
alloc_begin_fd_uobject(const struct uverbs_api_object *obj,
- struct ib_uverbs_file *ufile)
+ struct uverbs_attr_bundle *attrs)
{
+ const struct uverbs_obj_fd_type *fd_type =
+ container_of(obj->type_attrs, struct uverbs_obj_fd_type, type);
int new_fd;
struct ib_uobject *uobj;
+ struct file *filp;
+
+ if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release))
+ return ERR_PTR(-EINVAL);
new_fd = get_unused_fd_flags(O_CLOEXEC);
if (new_fd < 0)
return ERR_PTR(new_fd);
- uobj = alloc_uobj(ufile, obj);
- if (IS_ERR(uobj)) {
- put_unused_fd(new_fd);
- return uobj;
+ uobj = alloc_uobj(attrs, obj);
+ if (IS_ERR(uobj))
+ goto err_fd;
+
+ /* Note that uverbs_uobject_fd_release() is called during abort */
+ filp = anon_inode_getfile(fd_type->name, fd_type->fops, NULL,
+ fd_type->flags);
+ if (IS_ERR(filp)) {
+ uobj = ERR_CAST(filp);
+ goto err_uobj;
}
+ uobj->object = filp;
uobj->id = new_fd;
- uobj->ufile = ufile;
+ return uobj;
+err_uobj:
+ uverbs_uobject_put(uobj);
+err_fd:
+ put_unused_fd(new_fd);
return uobj;
}
struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
- struct ib_uverbs_file *ufile,
struct uverbs_attr_bundle *attrs)
{
+ struct ib_uverbs_file *ufile = attrs->ufile;
struct ib_uobject *ret;
if (IS_ERR(obj))
@@ -491,13 +506,11 @@ struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
if (!down_read_trylock(&ufile->hw_destroy_rwsem))
return ERR_PTR(-EIO);
- ret = obj->type_class->alloc_begin(obj, ufile);
+ ret = obj->type_class->alloc_begin(obj, attrs);
if (IS_ERR(ret)) {
up_read(&ufile->hw_destroy_rwsem);
return ret;
}
- if (attrs)
- attrs->context = ret->context;
return ret;
}
@@ -544,6 +557,9 @@ static void remove_handle_idr_uobject(struct ib_uobject *uobj)
static void alloc_abort_fd_uobject(struct ib_uobject *uobj)
{
+ struct file *filp = uobj->object;
+
+ fput(filp);
put_unused_fd(uobj->id);
}
@@ -553,7 +569,7 @@ static int __must_check destroy_hw_fd_uobject(struct ib_uobject *uobj,
{
const struct uverbs_obj_fd_type *fd_type = container_of(
uobj->uapi_object->type_attrs, struct uverbs_obj_fd_type, type);
- int ret = fd_type->context_closed(uobj, why);
+ int ret = fd_type->destroy_object(uobj, why);
if (ib_is_destroy_retryable(ret, why, uobj))
return ret;
@@ -565,7 +581,7 @@ static void remove_handle_fd_uobject(struct ib_uobject *uobj)
{
}
-static int alloc_commit_idr_uobject(struct ib_uobject *uobj)
+static void alloc_commit_idr_uobject(struct ib_uobject *uobj)
{
struct ib_uverbs_file *ufile = uobj->ufile;
void *old;
@@ -579,33 +595,14 @@ static int alloc_commit_idr_uobject(struct ib_uobject *uobj)
*/
old = xa_store(&ufile->idr, uobj->id, uobj, GFP_KERNEL);
WARN_ON(old != NULL);
-
- return 0;
}
-static int alloc_commit_fd_uobject(struct ib_uobject *uobj)
+static void alloc_commit_fd_uobject(struct ib_uobject *uobj)
{
- const struct uverbs_obj_fd_type *fd_type = container_of(
- uobj->uapi_object->type_attrs, struct uverbs_obj_fd_type, type);
int fd = uobj->id;
- struct file *filp;
-
- /*
- * The kref for uobj is moved into filp->private data and put in
- * uverbs_close_fd(). Once alloc_commit() succeeds uverbs_close_fd()
- * must be guaranteed to be called from the provided fops release
- * callback.
- */
- filp = anon_inode_getfile(fd_type->name,
- fd_type->fops,
- uobj,
- fd_type->flags);
- if (IS_ERR(filp))
- return PTR_ERR(filp);
-
- uobj->object = filp;
+ struct file *filp = uobj->object;
- /* Matching put will be done in uverbs_close_fd() */
+ /* Matching put will be done in uverbs_uobject_fd_release() */
kref_get(&uobj->ufile->ref);
/* This shouldn't be used anymore. Use the file object instead */
@@ -613,11 +610,10 @@ static int alloc_commit_fd_uobject(struct ib_uobject *uobj)
/*
* NOTE: Once we install the file we loose ownership of our kref on
- * uobj. It will be put by uverbs_close_fd()
+ * uobj. It will be put by uverbs_uobject_fd_release()
*/
+ filp->private_data = uobj;
fd_install(fd, filp);
-
- return 0;
}
/*
@@ -625,19 +621,13 @@ static int alloc_commit_fd_uobject(struct ib_uobject *uobj)
* caller can no longer assume uobj is valid. If this function fails it
* destroys the uboject, including the attached HW object.
*/
-int __must_check rdma_alloc_commit_uobject(struct ib_uobject *uobj,
- struct uverbs_attr_bundle *attrs)
+void rdma_alloc_commit_uobject(struct ib_uobject *uobj,
+ struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_file *ufile = attrs->ufile;
- int ret;
/* alloc_commit consumes the uobj kref */
- ret = uobj->uapi_object->type_class->alloc_commit(uobj);
- if (ret) {
- uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT, attrs);
- up_read(&ufile->hw_destroy_rwsem);
- return ret;
- }
+ uobj->uapi_object->type_class->alloc_commit(uobj);
/* kref is held so long as the uobj is on the uobj list. */
uverbs_uobject_get(uobj);
@@ -650,8 +640,6 @@ int __must_check rdma_alloc_commit_uobject(struct ib_uobject *uobj,
/* Matches the down_read in rdma_alloc_begin_uobject */
up_read(&ufile->hw_destroy_rwsem);
-
- return 0;
}
/*
@@ -663,7 +651,6 @@ void rdma_alloc_abort_uobject(struct ib_uobject *uobj,
{
struct ib_uverbs_file *ufile = uobj->ufile;
- uobj->object = NULL;
uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT, attrs);
/* Matches the down_read in rdma_alloc_begin_uobject */
@@ -681,7 +668,10 @@ static void lookup_put_fd_uobject(struct ib_uobject *uobj,
struct file *filp = uobj->object;
WARN_ON(mode != UVERBS_LOOKUP_READ);
- /* This indirectly calls uverbs_close_fd and free the object */
+ /*
+ * This indirectly calls uverbs_uobject_fd_release() and free the
+ * object
+ */
fput(filp);
}
@@ -744,33 +734,32 @@ const struct uverbs_obj_type_class uverbs_idr_class = {
.lookup_put = lookup_put_idr_uobject,
.destroy_hw = destroy_hw_idr_uobject,
.remove_handle = remove_handle_idr_uobject,
- /*
- * When we destroy an object, we first just lock it for WRITE and
- * actually DESTROY it in the finalize stage. So, the problematic
- * scenario is when we just started the finalize stage of the
- * destruction (nothing was executed yet). Now, the other thread
- * fetched the object for READ access, but it didn't lock it yet.
- * The DESTROY thread continues and starts destroying the object.
- * When the other thread continue - without the RCU, it would
- * access freed memory. However, the rcu_read_lock delays the free
- * until the rcu_read_lock of the READ operation quits. Since the
- * exclusive lock of the object is still taken by the DESTROY flow, the
- * READ operation will get -EBUSY and it'll just bail out.
- */
- .needs_kfree_rcu = true,
};
EXPORT_SYMBOL(uverbs_idr_class);
-void uverbs_close_fd(struct file *f)
+/*
+ * Users of UVERBS_TYPE_ALLOC_FD should set this function as the struct
+ * file_operations release method.
+ */
+int uverbs_uobject_fd_release(struct inode *inode, struct file *filp)
{
- struct ib_uobject *uobj = f->private_data;
- struct ib_uverbs_file *ufile = uobj->ufile;
- struct uverbs_attr_bundle attrs = {
- .context = uobj->context,
- .ufile = ufile,
- };
+ struct ib_uverbs_file *ufile;
+ struct ib_uobject *uobj;
+
+ /*
+ * This can only happen if the fput came from alloc_abort_fd_uobject()
+ */
+ if (!filp->private_data)
+ return 0;
+ uobj = filp->private_data;
+ ufile = uobj->ufile;
if (down_read_trylock(&ufile->hw_destroy_rwsem)) {
+ struct uverbs_attr_bundle attrs = {
+ .context = uobj->context,
+ .ufile = ufile,
+ };
+
/*
* lookup_get_fd_uobject holds the kref on the struct file any
* time a FD uobj is locked, which prevents this release
@@ -782,13 +771,14 @@ void uverbs_close_fd(struct file *f)
up_read(&ufile->hw_destroy_rwsem);
}
- /* Matches the get in alloc_begin_fd_uobject */
+ /* Matches the get in alloc_commit_fd_uobject() */
kref_put(&ufile->ref, ib_uverbs_release_file);
/* Pairs with filp->private_data in alloc_begin_fd_uobject */
uverbs_uobject_put(uobj);
+ return 0;
}
-EXPORT_SYMBOL(uverbs_close_fd);
+EXPORT_SYMBOL(uverbs_uobject_fd_release);
/*
* Drop the ucontext off the ufile and completely disconnect it from the
@@ -855,9 +845,7 @@ static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile,
}
/*
- * Destroy the uncontext and every uobject associated with it. If called with
- * reason != RDMA_REMOVE_CLOSE this will not return until the destruction has
- * been completed and ufile->ucontext is NULL.
+ * Destroy the uncontext and every uobject associated with it.
*
* This is internally locked and can be called in parallel from multiple
* contexts.
@@ -865,22 +853,6 @@ static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile,
void uverbs_destroy_ufile_hw(struct ib_uverbs_file *ufile,
enum rdma_remove_reason reason)
{
- if (reason == RDMA_REMOVE_CLOSE) {
- /*
- * During destruction we might trigger something that
- * synchronously calls release on any file descriptor. For
- * this reason all paths that come from file_operations
- * release must use try_lock. They can progress knowing that
- * there is an ongoing uverbs_destroy_ufile_hw that will clean
- * up the driver resources.
- */
- if (!mutex_trylock(&ufile->ucontext_lock))
- return;
-
- } else {
- mutex_lock(&ufile->ucontext_lock);
- }
-
down_write(&ufile->hw_destroy_rwsem);
/*
@@ -909,7 +881,6 @@ void uverbs_destroy_ufile_hw(struct ib_uverbs_file *ufile,
done:
up_write(&ufile->hw_destroy_rwsem);
- mutex_unlock(&ufile->ucontext_lock);
}
const struct uverbs_obj_type_class uverbs_fd_class = {
@@ -920,7 +891,6 @@ const struct uverbs_obj_type_class uverbs_fd_class = {
.lookup_put = lookup_put_fd_uobject,
.destroy_hw = destroy_hw_fd_uobject,
.remove_handle = remove_handle_fd_uobject,
- .needs_kfree_rcu = false,
};
EXPORT_SYMBOL(uverbs_fd_class);
@@ -943,19 +913,17 @@ uverbs_get_uobject_from_file(u16 object_id, enum uverbs_obj_access access,
return rdma_lookup_get_uobject(obj, attrs->ufile, id,
UVERBS_LOOKUP_WRITE, attrs);
case UVERBS_ACCESS_NEW:
- return rdma_alloc_begin_uobject(obj, attrs->ufile, attrs);
+ return rdma_alloc_begin_uobject(obj, attrs);
default:
WARN_ON(true);
return ERR_PTR(-EOPNOTSUPP);
}
}
-int uverbs_finalize_object(struct ib_uobject *uobj,
- enum uverbs_obj_access access, bool commit,
- struct uverbs_attr_bundle *attrs)
+void uverbs_finalize_object(struct ib_uobject *uobj,
+ enum uverbs_obj_access access, bool commit,
+ struct uverbs_attr_bundle *attrs)
{
- int ret = 0;
-
/*
* refcounts should be handled at the object level and not at the
* uobject level. Refcounts of the objects themselves are done in
@@ -975,14 +943,11 @@ int uverbs_finalize_object(struct ib_uobject *uobj,
break;
case UVERBS_ACCESS_NEW:
if (commit)
- ret = rdma_alloc_commit_uobject(uobj, attrs);
+ rdma_alloc_commit_uobject(uobj, attrs);
else
rdma_alloc_abort_uobject(uobj, attrs);
break;
default:
WARN_ON(true);
- ret = -EOPNOTSUPP;
}
-
- return ret;
}
diff --git a/drivers/infiniband/core/rdma_core.h b/drivers/infiniband/core/rdma_core.h
index e63fbda25e1d..33978e0f1262 100644
--- a/drivers/infiniband/core/rdma_core.h
+++ b/drivers/infiniband/core/rdma_core.h
@@ -51,29 +51,6 @@ void uverbs_destroy_ufile_hw(struct ib_uverbs_file *ufile,
int uobj_destroy(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs);
/*
- * uverbs_uobject_get is called in order to increase the reference count on
- * an uobject. This is useful when a handler wants to keep the uobject's memory
- * alive, regardless if this uobject is still alive in the context's objects
- * repository. Objects are put via uverbs_uobject_put.
- */
-void uverbs_uobject_get(struct ib_uobject *uobject);
-
-/*
- * In order to indicate we no longer needs this uobject, uverbs_uobject_put
- * is called. When the reference count is decreased, the uobject is freed.
- * For example, this is used when attaching a completion channel to a CQ.
- */
-void uverbs_uobject_put(struct ib_uobject *uobject);
-
-/* Indicate this fd is no longer used by this consumer, but its memory isn't
- * necessarily released yet. When the last reference is put, we release the
- * memory. After this call is executed, calling uverbs_uobject_get isn't
- * allowed.
- * This must be called from the release file_operations of the file!
- */
-void uverbs_close_fd(struct file *f);
-
-/*
* Get an ib_uobject that corresponds to the given id from ufile, assuming
* the object is from the given type. Lock it to the required access when
* applicable.
@@ -86,24 +63,9 @@ struct ib_uobject *
uverbs_get_uobject_from_file(u16 object_id, enum uverbs_obj_access access,
s64 id, struct uverbs_attr_bundle *attrs);
-/*
- * Note that certain finalize stages could return a status:
- * (a) alloc_commit could return a failure if the object is committed at the
- * same time when the context is destroyed.
- * (b) remove_commit could fail if the object wasn't destroyed successfully.
- * Since multiple objects could be finalized in one transaction, it is very NOT
- * recommended to have several finalize actions which have side effects.
- * For example, it's NOT recommended to have a certain action which has both
- * a commit action and a destroy action or two destroy objects in the same
- * action. The rule of thumb is to have one destroy or commit action with
- * multiple lookups.
- * The first non zero return value of finalize_object is returned from this
- * function. For example, this could happen when we couldn't destroy an
- * object.
- */
-int uverbs_finalize_object(struct ib_uobject *uobj,
- enum uverbs_obj_access access, bool commit,
- struct uverbs_attr_bundle *attrs);
+void uverbs_finalize_object(struct ib_uobject *uobj,
+ enum uverbs_obj_access access, bool commit,
+ struct uverbs_attr_bundle *attrs);
int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx);
@@ -189,6 +151,7 @@ void uapi_compute_bundle_size(struct uverbs_api_ioctl_method *method_elm,
unsigned int num_attrs);
void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile);
+extern const struct uapi_definition uverbs_def_obj_async_fd[];
extern const struct uapi_definition uverbs_def_obj_counters[];
extern const struct uapi_definition uverbs_def_obj_cq[];
extern const struct uapi_definition uverbs_def_obj_device[];
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 8917125ea16d..30d4c126a2db 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1068,7 +1068,7 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
}
settimeout_out:
- return skb->len;
+ return 0;
}
static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
@@ -1139,7 +1139,7 @@ int ib_nl_handle_resolve_resp(struct sk_buff *skb,
}
resp_out:
- return skb->len;
+ return 0;
}
static void free_sm_ah(struct kref *kref)
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
index 6eb6d2717ca5..2b4d80393bd0 100644
--- a/drivers/infiniband/core/security.c
+++ b/drivers/infiniband/core/security.c
@@ -339,22 +339,16 @@ static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
if (!new_pps)
return NULL;
- if (qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) {
- if (!qp_pps) {
- new_pps->main.port_num = qp_attr->port_num;
- new_pps->main.pkey_index = qp_attr->pkey_index;
- } else {
- new_pps->main.port_num = (qp_attr_mask & IB_QP_PORT) ?
- qp_attr->port_num :
- qp_pps->main.port_num;
-
- new_pps->main.pkey_index =
- (qp_attr_mask & IB_QP_PKEY_INDEX) ?
- qp_attr->pkey_index :
- qp_pps->main.pkey_index;
- }
+ if (qp_attr_mask & IB_QP_PORT)
+ new_pps->main.port_num =
+ (qp_pps) ? qp_pps->main.port_num : qp_attr->port_num;
+ if (qp_attr_mask & IB_QP_PKEY_INDEX)
+ new_pps->main.pkey_index = (qp_pps) ? qp_pps->main.pkey_index :
+ qp_attr->pkey_index;
+ if ((qp_attr_mask & IB_QP_PKEY_INDEX) && (qp_attr_mask & IB_QP_PORT))
new_pps->main.state = IB_PORT_PKEY_VALID;
- } else if (qp_pps) {
+
+ if (!(qp_attr_mask & (IB_QP_PKEY_INDEX || IB_QP_PORT)) && qp_pps) {
new_pps->main.port_num = qp_pps->main.port_num;
new_pps->main.pkey_index = qp_pps->main.pkey_index;
if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)
diff --git a/drivers/infiniband/core/trace.c b/drivers/infiniband/core/trace.c
new file mode 100644
index 000000000000..6c3514beac4d
--- /dev/null
+++ b/drivers/infiniband/core/trace.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Trace points for core RDMA functions.
+ *
+ * Author: Chuck Lever <chuck.lever@oracle.com>
+ *
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ */
+
+#define CREATE_TRACE_POINTS
+
+#include <rdma/ib_verbs.h>
+
+#include <trace/events/rdma_core.h>
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 7a3b99597ead..06b6125b5ae1 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -54,7 +54,7 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->sg_nents, 0) {
page = sg_page_iter_page(&sg_iter);
- put_user_pages_dirty_lock(&page, 1, umem->writable && dirty);
+ unpin_user_pages_dirty_lock(&page, 1, umem->writable && dirty);
}
sg_free_table(&umem->sg_head);
@@ -166,10 +166,13 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
* for any address.
*/
mask |= (sg_dma_address(sg) + pgoff) ^ va;
- if (i && i != (umem->nmap - 1))
- /* restrict by length as well for interior SGEs */
- mask |= sg_dma_len(sg);
va += sg_dma_len(sg) - pgoff;
+ /* Except for the last entry, the ending iova alignment sets
+ * the maximum possible page size as the low bits of the iova
+ * must be zero when starting the next chunk.
+ */
+ if (i != (umem->nmap - 1))
+ mask |= va;
pgoff = 0;
}
best_pg_bit = rdma_find_pg_bit(mask, pgsz_bitmap);
@@ -181,15 +184,14 @@ EXPORT_SYMBOL(ib_umem_find_best_pgsz);
/**
* ib_umem_get - Pin and DMA map userspace memory.
*
- * @udata: userspace context to pin memory for
+ * @device: IB device to connect UMEM
* @addr: userspace virtual address to start at
* @size: length of region to pin
* @access: IB_ACCESS_xxx flags for memory being pinned
*/
-struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
+struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
size_t size, int access)
{
- struct ib_ucontext *context;
struct ib_umem *umem;
struct page **page_list;
unsigned long lock_limit;
@@ -201,14 +203,6 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
struct scatterlist *sg;
unsigned int gup_flags = FOLL_WRITE;
- if (!udata)
- return ERR_PTR(-EIO);
-
- context = container_of(udata, struct uverbs_attr_bundle, driver_udata)
- ->context;
- if (!context)
- return ERR_PTR(-EIO);
-
/*
* If the combination of the addr and size requested for this memory
* region causes an integer overflow, return error.
@@ -226,7 +220,7 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
umem = kzalloc(sizeof(*umem), GFP_KERNEL);
if (!umem)
return ERR_PTR(-ENOMEM);
- umem->ibdev = context->device;
+ umem->ibdev = device;
umem->length = size;
umem->address = addr;
umem->writable = ib_access_writable(access);
@@ -266,33 +260,28 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
sg = umem->sg_head.sgl;
while (npages) {
- down_read(&mm->mmap_sem);
- ret = get_user_pages(cur_base,
- min_t(unsigned long, npages,
- PAGE_SIZE / sizeof (struct page *)),
- gup_flags | FOLL_LONGTERM,
- page_list, NULL);
- if (ret < 0) {
- up_read(&mm->mmap_sem);
+ ret = pin_user_pages_fast(cur_base,
+ min_t(unsigned long, npages,
+ PAGE_SIZE /
+ sizeof(struct page *)),
+ gup_flags | FOLL_LONGTERM, page_list);
+ if (ret < 0)
goto umem_release;
- }
cur_base += ret * PAGE_SIZE;
npages -= ret;
sg = ib_umem_add_sg_table(sg, page_list, ret,
- dma_get_max_seg_size(context->device->dma_device),
+ dma_get_max_seg_size(device->dma_device),
&umem->sg_nents);
-
- up_read(&mm->mmap_sem);
}
sg_mark_end(sg);
- umem->nmap = ib_dma_map_sg(context->device,
- umem->sg_head.sgl,
- umem->sg_nents,
- DMA_BIDIRECTIONAL);
+ umem->nmap = ib_dma_map_sg(device,
+ umem->sg_head.sgl,
+ umem->sg_nents,
+ DMA_BIDIRECTIONAL);
if (!umem->nmap) {
ret = -ENOMEM;
@@ -303,7 +292,7 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
goto out;
umem_release:
- __ib_umem_release(context->device, umem, 0);
+ __ib_umem_release(device, umem, 0);
vma:
atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm);
out:
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index e42d44e501fd..b8c657b28380 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -110,15 +110,12 @@ out_page_list:
* They exist only to hold the per_mm reference to help the driver create
* children umems.
*
- * @udata: udata from the syscall being used to create the umem
+ * @device: IB device to create UMEM
* @access: ib_reg_mr access flags
*/
-struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_udata *udata,
+struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
int access)
{
- struct ib_ucontext *context =
- container_of(udata, struct uverbs_attr_bundle, driver_udata)
- ->context;
struct ib_umem *umem;
struct ib_umem_odp *umem_odp;
int ret;
@@ -126,14 +123,11 @@ struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_udata *udata,
if (access & IB_ACCESS_HUGETLB)
return ERR_PTR(-EINVAL);
- if (!context)
- return ERR_PTR(-EIO);
-
umem_odp = kzalloc(sizeof(*umem_odp), GFP_KERNEL);
if (!umem_odp)
return ERR_PTR(-ENOMEM);
umem = &umem_odp->umem;
- umem->ibdev = context->device;
+ umem->ibdev = device;
umem->writable = ib_access_writable(access);
umem->owning_mm = current->mm;
umem_odp->is_implicit_odp = 1;
@@ -201,7 +195,7 @@ EXPORT_SYMBOL(ib_umem_odp_alloc_child);
/**
* ib_umem_odp_get - Create a umem_odp for a userspace va
*
- * @udata: userspace context to pin memory for
+ * @device: IB device struct to get UMEM
* @addr: userspace virtual address to start at
* @size: length of region to pin
* @access: IB_ACCESS_xxx flags for memory being pinned
@@ -210,23 +204,14 @@ EXPORT_SYMBOL(ib_umem_odp_alloc_child);
* pinning, instead, stores the mm for future page fault handling in
* conjunction with MMU notifiers.
*/
-struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr,
- size_t size, int access,
+struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device,
+ unsigned long addr, size_t size, int access,
const struct mmu_interval_notifier_ops *ops)
{
struct ib_umem_odp *umem_odp;
- struct ib_ucontext *context;
struct mm_struct *mm;
int ret;
- if (!udata)
- return ERR_PTR(-EIO);
-
- context = container_of(udata, struct uverbs_attr_bundle, driver_udata)
- ->context;
- if (!context)
- return ERR_PTR(-EIO);
-
if (WARN_ON_ONCE(!(access & IB_ACCESS_ON_DEMAND)))
return ERR_PTR(-EINVAL);
@@ -234,7 +219,7 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr,
if (!umem_odp)
return ERR_PTR(-ENOMEM);
- umem_odp->umem.ibdev = context->device;
+ umem_odp->umem.ibdev = device;
umem_odp->umem.length = size;
umem_odp->umem.address = addr;
umem_odp->umem.writable = ib_access_writable(access);
@@ -242,21 +227,10 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr,
umem_odp->notifier.ops = ops;
umem_odp->page_shift = PAGE_SHIFT;
- if (access & IB_ACCESS_HUGETLB) {
- struct vm_area_struct *vma;
- struct hstate *h;
-
- down_read(&mm->mmap_sem);
- vma = find_vma(mm, ib_umem_start(umem_odp));
- if (!vma || !is_vm_hugetlb_page(vma)) {
- up_read(&mm->mmap_sem);
- ret = -EINVAL;
- goto err_free;
- }
- h = hstate_vma(vma);
- umem_odp->page_shift = huge_page_shift(h);
- up_read(&mm->mmap_sem);
- }
+#ifdef CONFIG_HUGETLB_PAGE
+ if (access & IB_ACCESS_HUGETLB)
+ umem_odp->page_shift = HPAGE_SHIFT;
+#endif
umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
ret = ib_init_umem_odp(umem_odp, ops);
@@ -266,7 +240,6 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr,
err_put_pid:
put_pid(umem_odp->tgid);
-err_free:
kfree(umem_odp);
return ERR_PTR(ret);
}
@@ -308,9 +281,8 @@ EXPORT_SYMBOL(ib_umem_odp_release);
* The function returns -EFAULT if the DMA mapping operation fails. It returns
* -EAGAIN if a concurrent invalidation prevents us from updating the page.
*
- * The page is released via put_user_page even if the operation failed. For
- * on-demand pinning, the page is released whenever it isn't stored in the
- * umem.
+ * The page is released via put_page even if the operation failed. For on-demand
+ * pinning, the page is released whenever it isn't stored in the umem.
*/
static int ib_umem_odp_map_dma_single_page(
struct ib_umem_odp *umem_odp,
@@ -363,7 +335,7 @@ static int ib_umem_odp_map_dma_single_page(
}
out:
- put_user_page(page);
+ put_page(page);
return ret;
}
@@ -440,7 +412,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
while (bcnt > 0) {
const size_t gup_num_pages = min_t(size_t,
- (bcnt + BIT(page_shift) - 1) >> page_shift,
+ ALIGN(bcnt, PAGE_SIZE) / PAGE_SIZE,
PAGE_SIZE / sizeof(struct page *));
down_read(&owning_mm->mmap_sem);
@@ -473,7 +445,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
ret = -EFAULT;
break;
}
- put_user_page(local_page_list[j]);
+ put_page(local_page_list[j]);
continue;
}
@@ -500,8 +472,8 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
* ib_umem_odp_map_dma_single_page().
*/
if (npages - (j + 1) > 0)
- put_user_pages(&local_page_list[j+1],
- npages - (j + 1));
+ release_pages(&local_page_list[j+1],
+ npages - (j + 1));
break;
}
}
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index d1407fa378e8..1235ffb2389b 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -1312,6 +1312,9 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
struct ib_umad_file *file;
int id;
+ cdev_device_del(&port->sm_cdev, &port->sm_dev);
+ cdev_device_del(&port->cdev, &port->dev);
+
mutex_lock(&port->file_mutex);
/* Mark ib_dev NULL and block ioctl or other file ops to progress
@@ -1331,8 +1334,6 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
mutex_unlock(&port->file_mutex);
- cdev_device_del(&port->sm_cdev, &port->sm_dev);
- cdev_device_del(&port->cdev, &port->dev);
ida_free(&umad_ida, port->dev_num);
/* balances device_initialize() */
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 63f7f7db5902..7df71983212d 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -111,7 +111,6 @@ struct ib_uverbs_device {
struct srcu_struct disassociate_srcu;
struct mutex lists_mutex; /* protect lists */
struct list_head uverbs_file_list;
- struct list_head uverbs_events_file_list;
struct uverbs_api *uapi;
};
@@ -124,10 +123,9 @@ struct ib_uverbs_event_queue {
};
struct ib_uverbs_async_event_file {
+ struct ib_uobject uobj;
struct ib_uverbs_event_queue ev_queue;
- struct ib_uverbs_file *uverbs_file;
- struct kref ref;
- struct list_head list;
+ struct ib_event_handler event_handler;
};
struct ib_uverbs_completion_event_file {
@@ -144,8 +142,7 @@ struct ib_uverbs_file {
* ucontext_lock held
*/
struct ib_ucontext *ucontext;
- struct ib_event_handler event_handler;
- struct ib_uverbs_async_event_file *async_file;
+ struct ib_uverbs_async_event_file *async_file;
struct list_head list;
/*
@@ -183,6 +180,7 @@ struct ib_uverbs_mcast_entry {
struct ib_uevent_object {
struct ib_uobject uobject;
+ /* List member for ib_uverbs_async_event_file list */
struct list_head event_list;
u32 events_reported;
};
@@ -210,25 +208,24 @@ struct ib_uwq_object {
};
struct ib_ucq_object {
- struct ib_uobject uobject;
+ struct ib_uevent_object uevent;
struct list_head comp_list;
- struct list_head async_list;
u32 comp_events_reported;
- u32 async_events_reported;
};
extern const struct file_operations uverbs_event_fops;
+extern const struct file_operations uverbs_async_event_fops;
void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue *ev_queue);
-struct file *ib_uverbs_alloc_async_event_file(struct ib_uverbs_file *uverbs_file,
- struct ib_device *ib_dev);
-void ib_uverbs_free_async_event_file(struct ib_uverbs_file *uverbs_file);
+void ib_uverbs_init_async_event_file(struct ib_uverbs_async_event_file *ev_file);
+void ib_uverbs_free_event_queue(struct ib_uverbs_event_queue *event_queue);
void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res);
-void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
- struct ib_uverbs_completion_event_file *ev_file,
+int ib_alloc_ucontext(struct uverbs_attr_bundle *attrs);
+int ib_init_ucontext(struct uverbs_attr_bundle *attrs);
+
+void ib_uverbs_release_ucq(struct ib_uverbs_completion_event_file *ev_file,
struct ib_ucq_object *uobj);
-void ib_uverbs_release_uevent(struct ib_uverbs_file *file,
- struct ib_uevent_object *uobj);
+void ib_uverbs_release_uevent(struct ib_uevent_object *uobj);
void ib_uverbs_release_file(struct kref *ref);
void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context);
@@ -236,8 +233,6 @@ void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr);
void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr);
void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr);
void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr);
-void ib_uverbs_event_handler(struct ib_event_handler *handler,
- struct ib_event *event);
int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, struct ib_xrcd *xrcd,
enum rdma_remove_reason why,
struct uverbs_attr_bundle *attrs);
@@ -276,23 +271,6 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
size_t kern_filter_sz,
union ib_flow_spec *ib_spec);
-extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_DEVICE);
-extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_PD);
-extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_MR);
-extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_COMP_CHANNEL);
-extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_CQ);
-extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_QP);
-extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_AH);
-extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_MW);
-extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_SRQ);
-extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_FLOW);
-extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_WQ);
-extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_RWQ_IND_TBL);
-extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_XRCD);
-extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_FLOW_ACTION);
-extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_DM);
-extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_COUNTERS);
-
/*
* ib_uverbs_query_port_resp.port_cap_flags started out as just a copy of the
* PortInfo CapabilityMask, but was extended with unique bits.
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 06ed32c8662f..025933752e1d 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -203,82 +203,55 @@ _ib_uverbs_lookup_comp_file(s32 fd, struct uverbs_attr_bundle *attrs)
#define ib_uverbs_lookup_comp_file(_fd, _ufile) \
_ib_uverbs_lookup_comp_file((_fd)*typecheck(s32, _fd), _ufile)
-static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
+int ib_alloc_ucontext(struct uverbs_attr_bundle *attrs)
{
- struct ib_uverbs_file *file = attrs->ufile;
- struct ib_uverbs_get_context cmd;
- struct ib_uverbs_get_context_resp resp;
- struct ib_ucontext *ucontext;
- struct file *filp;
- struct ib_rdmacg_object cg_obj;
+ struct ib_uverbs_file *ufile = attrs->ufile;
+ struct ib_ucontext *ucontext;
struct ib_device *ib_dev;
- int ret;
-
- ret = uverbs_request(attrs, &cmd, sizeof(cmd));
- if (ret)
- return ret;
-
- mutex_lock(&file->ucontext_lock);
- ib_dev = srcu_dereference(file->device->ib_dev,
- &file->device->disassociate_srcu);
- if (!ib_dev) {
- ret = -EIO;
- goto err;
- }
- if (file->ucontext) {
- ret = -EINVAL;
- goto err;
- }
-
- ret = ib_rdmacg_try_charge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE);
- if (ret)
- goto err;
+ ib_dev = srcu_dereference(ufile->device->ib_dev,
+ &ufile->device->disassociate_srcu);
+ if (!ib_dev)
+ return -EIO;
ucontext = rdma_zalloc_drv_obj(ib_dev, ib_ucontext);
- if (!ucontext) {
- ret = -ENOMEM;
- goto err_alloc;
- }
-
- attrs->context = ucontext;
+ if (!ucontext)
+ return -ENOMEM;
ucontext->res.type = RDMA_RESTRACK_CTX;
ucontext->device = ib_dev;
- ucontext->cg_obj = cg_obj;
- /* ufile is required when some objects are released */
- ucontext->ufile = file;
-
- ucontext->closing = false;
- ucontext->cleanup_retryable = false;
-
+ ucontext->ufile = ufile;
xa_init_flags(&ucontext->mmap_xa, XA_FLAGS_ALLOC);
+ attrs->context = ucontext;
+ return 0;
+}
- ret = get_unused_fd_flags(O_CLOEXEC);
- if (ret < 0)
- goto err_free;
- resp.async_fd = ret;
+int ib_init_ucontext(struct uverbs_attr_bundle *attrs)
+{
+ struct ib_ucontext *ucontext = attrs->context;
+ struct ib_uverbs_file *file = attrs->ufile;
+ int ret;
- filp = ib_uverbs_alloc_async_event_file(file, ib_dev);
- if (IS_ERR(filp)) {
- ret = PTR_ERR(filp);
- goto err_fd;
+ if (!down_read_trylock(&file->hw_destroy_rwsem))
+ return -EIO;
+ mutex_lock(&file->ucontext_lock);
+ if (file->ucontext) {
+ ret = -EINVAL;
+ goto err;
}
- resp.num_comp_vectors = file->device->num_comp_vectors;
-
- ret = uverbs_response(attrs, &resp, sizeof(resp));
+ ret = ib_rdmacg_try_charge(&ucontext->cg_obj, ucontext->device,
+ RDMACG_RESOURCE_HCA_HANDLE);
if (ret)
- goto err_file;
+ goto err;
- ret = ib_dev->ops.alloc_ucontext(ucontext, &attrs->driver_udata);
+ ret = ucontext->device->ops.alloc_ucontext(ucontext,
+ &attrs->driver_udata);
if (ret)
- goto err_file;
+ goto err_uncharge;
rdma_restrack_uadd(&ucontext->res);
- fd_install(resp.async_fd, filp);
-
/*
* Make sure that ib_uverbs_get_ucontext() sees the pointer update
* only after all writes to setup the ucontext have completed
@@ -286,24 +259,62 @@ static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
smp_store_release(&file->ucontext, ucontext);
mutex_unlock(&file->ucontext_lock);
-
+ up_read(&file->hw_destroy_rwsem);
return 0;
-err_file:
- ib_uverbs_free_async_event_file(file);
- fput(filp);
+err_uncharge:
+ ib_rdmacg_uncharge(&ucontext->cg_obj, ucontext->device,
+ RDMACG_RESOURCE_HCA_HANDLE);
+err:
+ mutex_unlock(&file->ucontext_lock);
+ up_read(&file->hw_destroy_rwsem);
+ return ret;
+}
+
+static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uverbs_get_context_resp resp;
+ struct ib_uverbs_get_context cmd;
+ struct ib_device *ib_dev;
+ struct ib_uobject *uobj;
+ int ret;
-err_fd:
- put_unused_fd(resp.async_fd);
+ ret = uverbs_request(attrs, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
-err_free:
- kfree(ucontext);
+ ret = ib_alloc_ucontext(attrs);
+ if (ret)
+ return ret;
-err_alloc:
- ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE);
+ uobj = uobj_alloc(UVERBS_OBJECT_ASYNC_EVENT, attrs, &ib_dev);
+ if (IS_ERR(uobj)) {
+ ret = PTR_ERR(uobj);
+ goto err_ucontext;
+ }
-err:
- mutex_unlock(&file->ucontext_lock);
+ resp = (struct ib_uverbs_get_context_resp){
+ .num_comp_vectors = attrs->ufile->device->num_comp_vectors,
+ .async_fd = uobj->id,
+ };
+ ret = uverbs_response(attrs, &resp, sizeof(resp));
+ if (ret)
+ goto err_uobj;
+
+ ret = ib_init_ucontext(attrs);
+ if (ret)
+ goto err_uobj;
+
+ ib_uverbs_init_async_event_file(
+ container_of(uobj, struct ib_uverbs_async_event_file, uobj));
+ rdma_alloc_commit_uobject(uobj, attrs);
+ return 0;
+
+err_uobj:
+ rdma_alloc_abort_uobject(uobj, attrs);
+err_ucontext:
+ kfree(attrs->context);
+ attrs->context = NULL;
return ret;
}
@@ -446,7 +457,8 @@ static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
if (ret)
goto err_copy;
- return uobj_alloc_commit(uobj, attrs);
+ rdma_alloc_commit_uobject(uobj, attrs);
+ return 0;
err_copy:
ib_dealloc_pd_user(pd, uverbs_get_cleared_udata(attrs));
@@ -642,7 +654,8 @@ static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs)
mutex_unlock(&ibudev->xrcd_tree_mutex);
- return uobj_alloc_commit(&obj->uobject, attrs);
+ rdma_alloc_commit_uobject(&obj->uobject, attrs);
+ return 0;
err_copy:
if (inode) {
@@ -774,7 +787,8 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
uobj_put_obj_read(pd);
- return uobj_alloc_commit(uobj, attrs);
+ rdma_alloc_commit_uobject(uobj, attrs);
+ return 0;
err_copy:
ib_dereg_mr_user(mr, uverbs_get_cleared_udata(attrs));
@@ -928,7 +942,8 @@ static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs)
goto err_copy;
uobj_put_obj_read(pd);
- return uobj_alloc_commit(uobj, attrs);
+ rdma_alloc_commit_uobject(uobj, attrs);
+ return 0;
err_copy:
uverbs_dealloc_mw(mw);
@@ -980,7 +995,8 @@ static int ib_uverbs_create_comp_channel(struct uverbs_attr_bundle *attrs)
return ret;
}
- return uobj_alloc_commit(uobj, attrs);
+ rdma_alloc_commit_uobject(uobj, attrs);
+ return 0;
}
static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
@@ -1010,11 +1026,9 @@ static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
}
}
- obj->uobject.user_handle = cmd->user_handle;
- obj->comp_events_reported = 0;
- obj->async_events_reported = 0;
+ obj->uevent.uobject.user_handle = cmd->user_handle;
INIT_LIST_HEAD(&obj->comp_list);
- INIT_LIST_HEAD(&obj->async_list);
+ INIT_LIST_HEAD(&obj->uevent.event_list);
attr.cqe = cmd->cqe;
attr.comp_vector = cmd->comp_vector;
@@ -1026,7 +1040,7 @@ static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
goto err_file;
}
cq->device = ib_dev;
- cq->uobject = &obj->uobject;
+ cq->uobject = obj;
cq->comp_handler = ib_uverbs_comp_handler;
cq->event_handler = ib_uverbs_cq_event_handler;
cq->cq_context = ev_file ? &ev_file->ev_queue : NULL;
@@ -1036,9 +1050,9 @@ static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
if (ret)
goto err_free;
- obj->uobject.object = cq;
+ obj->uevent.uobject.object = cq;
memset(&resp, 0, sizeof resp);
- resp.base.cq_handle = obj->uobject.id;
+ resp.base.cq_handle = obj->uevent.uobject.id;
resp.base.cqe = cq->cqe;
resp.response_length = uverbs_response_length(attrs, sizeof(resp));
@@ -1049,9 +1063,7 @@ static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
if (ret)
goto err_cb;
- ret = uobj_alloc_commit(&obj->uobject, attrs);
- if (ret)
- return ERR_PTR(ret);
+ rdma_alloc_commit_uobject(&obj->uevent.uobject, attrs);
return obj;
err_cb:
@@ -1061,10 +1073,10 @@ err_free:
kfree(cq);
err_file:
if (ev_file)
- ib_uverbs_release_ucq(attrs->ufile, ev_file, obj);
+ ib_uverbs_release_ucq(ev_file, obj);
err:
- uobj_alloc_abort(&obj->uobject, attrs);
+ uobj_alloc_abort(&obj->uevent.uobject, attrs);
return ERR_PTR(ret);
}
@@ -1133,7 +1145,8 @@ static int ib_uverbs_resize_cq(struct uverbs_attr_bundle *attrs)
ret = uverbs_response(attrs, &resp, sizeof(resp));
out:
- uobj_put_obj_read(cq);
+ rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
return ret;
}
@@ -1216,7 +1229,8 @@ static int ib_uverbs_poll_cq(struct uverbs_attr_bundle *attrs)
ret = uverbs_output_written(attrs, UVERBS_ATTR_CORE_OUT);
out_put:
- uobj_put_obj_read(cq);
+ rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
return ret;
}
@@ -1237,8 +1251,8 @@ static int ib_uverbs_req_notify_cq(struct uverbs_attr_bundle *attrs)
ib_req_notify_cq(cq, cmd.solicited_only ?
IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
- uobj_put_obj_read(cq);
-
+ rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
return 0;
}
@@ -1258,10 +1272,10 @@ static int ib_uverbs_destroy_cq(struct uverbs_attr_bundle *attrs)
if (IS_ERR(uobj))
return PTR_ERR(uobj);
- obj = container_of(uobj, struct ib_ucq_object, uobject);
+ obj = container_of(uobj, struct ib_ucq_object, uevent.uobject);
memset(&resp, 0, sizeof(resp));
resp.comp_events_reported = obj->comp_events_reported;
- resp.async_events_reported = obj->async_events_reported;
+ resp.async_events_reported = obj->uevent.events_reported;
uobj_put_destroy(uobj);
@@ -1375,7 +1389,6 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
}
attr.event_handler = ib_uverbs_qp_event_handler;
- attr.qp_context = attrs->ufile;
attr.send_cq = scq;
attr.recv_cq = rcq;
attr.srq = srq;
@@ -1391,7 +1404,6 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
attr.cap.max_recv_sge = cmd->max_recv_sge;
attr.cap.max_inline_data = cmd->max_inline_data;
- obj->uevent.events_reported = 0;
INIT_LIST_HEAD(&obj->uevent.event_list);
INIT_LIST_HEAD(&obj->mcast_list);
@@ -1421,7 +1433,7 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
qp = ib_create_qp(pd, &attr);
else
qp = _ib_create_qp(device, pd, &attr, &attrs->driver_udata,
- &obj->uevent.uobject);
+ obj);
if (IS_ERR(qp)) {
ret = PTR_ERR(qp);
@@ -1439,7 +1451,6 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
qp->srq = attr.srq;
qp->rwq_ind_tbl = ind_tbl;
qp->event_handler = attr.event_handler;
- qp->qp_context = attr.qp_context;
qp->qp_type = attr.qp_type;
atomic_set(&qp->usecnt, 0);
atomic_inc(&pd->usecnt);
@@ -1454,7 +1465,7 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
atomic_inc(&ind_tbl->usecnt);
} else {
/* It is done in _ib_create_qp for other QP types */
- qp->uobject = &obj->uevent.uobject;
+ qp->uobject = obj;
}
obj->uevent.uobject.object = qp;
@@ -1483,15 +1494,19 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
if (pd)
uobj_put_obj_read(pd);
if (scq)
- uobj_put_obj_read(scq);
+ rdma_lookup_put_uobject(&scq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
if (rcq && rcq != scq)
- uobj_put_obj_read(rcq);
+ rdma_lookup_put_uobject(&rcq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
if (srq)
- uobj_put_obj_read(srq);
+ rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
if (ind_tbl)
uobj_put_obj_read(ind_tbl);
- return uobj_alloc_commit(&obj->uevent.uobject, attrs);
+ rdma_alloc_commit_uobject(&obj->uevent.uobject, attrs);
+ return 0;
err_cb:
ib_destroy_qp_user(qp, uverbs_get_cleared_udata(attrs));
@@ -1501,11 +1516,14 @@ err_put:
if (pd)
uobj_put_obj_read(pd);
if (scq)
- uobj_put_obj_read(scq);
+ rdma_lookup_put_uobject(&scq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
if (rcq && rcq != scq)
- uobj_put_obj_read(rcq);
+ rdma_lookup_put_uobject(&rcq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
if (srq)
- uobj_put_obj_read(srq);
+ rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
if (ind_tbl)
uobj_put_obj_read(ind_tbl);
@@ -1567,7 +1585,7 @@ static int ib_uverbs_open_qp(struct uverbs_attr_bundle *attrs)
struct ib_xrcd *xrcd;
struct ib_uobject *uninitialized_var(xrcd_uobj);
struct ib_qp *qp;
- struct ib_qp_open_attr attr;
+ struct ib_qp_open_attr attr = {};
int ret;
struct ib_device *ib_dev;
@@ -1593,11 +1611,9 @@ static int ib_uverbs_open_qp(struct uverbs_attr_bundle *attrs)
}
attr.event_handler = ib_uverbs_qp_event_handler;
- attr.qp_context = attrs->ufile;
attr.qp_num = cmd.qpn;
attr.qp_type = cmd.qp_type;
- obj->uevent.events_reported = 0;
INIT_LIST_HEAD(&obj->uevent.event_list);
INIT_LIST_HEAD(&obj->mcast_list);
@@ -1620,10 +1636,11 @@ static int ib_uverbs_open_qp(struct uverbs_attr_bundle *attrs)
obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
atomic_inc(&obj->uxrcd->refcnt);
- qp->uobject = &obj->uevent.uobject;
+ qp->uobject = obj;
uobj_put_read(xrcd_uobj);
- return uobj_alloc_commit(&obj->uevent.uobject, attrs);
+ rdma_alloc_commit_uobject(&obj->uevent.uobject, attrs);
+ return 0;
err_destroy:
ib_destroy_qp_user(qp, uverbs_get_cleared_udata(attrs));
@@ -1684,7 +1701,8 @@ static int ib_uverbs_query_qp(struct uverbs_attr_bundle *attrs)
ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
- uobj_put_obj_read(qp);
+ rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
if (ret)
goto out;
@@ -1921,7 +1939,8 @@ static int modify_qp(struct uverbs_attr_bundle *attrs,
&attrs->driver_udata);
release_qp:
- uobj_put_obj_read(qp);
+ rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
out:
kfree(attr);
@@ -2185,7 +2204,8 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
ret = ret2;
out_put:
- uobj_put_obj_read(qp);
+ rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
while (wr) {
if (is_ud && ud_wr(wr)->ah)
@@ -2327,7 +2347,8 @@ static int ib_uverbs_post_recv(struct uverbs_attr_bundle *attrs)
resp.bad_wr = 0;
ret = qp->device->ops.post_recv(qp->real_qp, wr, &bad_wr);
- uobj_put_obj_read(qp);
+ rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
if (ret) {
for (next = wr; next; next = next->next) {
++resp.bad_wr;
@@ -2377,7 +2398,8 @@ static int ib_uverbs_post_srq_recv(struct uverbs_attr_bundle *attrs)
resp.bad_wr = 0;
ret = srq->device->ops.post_srq_recv(srq, wr, &bad_wr);
- uobj_put_obj_read(srq);
+ rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
if (ret)
for (next = wr; next; next = next->next) {
@@ -2465,7 +2487,8 @@ static int ib_uverbs_create_ah(struct uverbs_attr_bundle *attrs)
goto err_copy;
uobj_put_obj_read(pd);
- return uobj_alloc_commit(uobj, attrs);
+ rdma_alloc_commit_uobject(uobj, attrs);
+ return 0;
err_copy:
rdma_destroy_ah_user(ah, RDMA_DESTROY_AH_SLEEPABLE,
@@ -2507,7 +2530,7 @@ static int ib_uverbs_attach_mcast(struct uverbs_attr_bundle *attrs)
if (!qp)
return -EINVAL;
- obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
+ obj = qp->uobject;
mutex_lock(&obj->mcast_lock);
list_for_each_entry(mcast, &obj->mcast_list, list)
@@ -2534,7 +2557,8 @@ static int ib_uverbs_attach_mcast(struct uverbs_attr_bundle *attrs)
out_put:
mutex_unlock(&obj->mcast_lock);
- uobj_put_obj_read(qp);
+ rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
return ret;
}
@@ -2556,7 +2580,7 @@ static int ib_uverbs_detach_mcast(struct uverbs_attr_bundle *attrs)
if (!qp)
return -EINVAL;
- obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
+ obj = qp->uobject;
mutex_lock(&obj->mcast_lock);
list_for_each_entry(mcast, &obj->mcast_list, list)
@@ -2577,7 +2601,8 @@ static int ib_uverbs_detach_mcast(struct uverbs_attr_bundle *attrs)
out_put:
mutex_unlock(&obj->mcast_lock);
- uobj_put_obj_read(qp);
+ rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
return ret;
}
@@ -2720,12 +2745,6 @@ static int kern_spec_to_ib_spec_action(struct uverbs_attr_bundle *attrs,
return 0;
}
-static size_t kern_spec_filter_sz(const struct ib_uverbs_flow_spec_hdr *spec)
-{
- /* Returns user space filter size, includes padding */
- return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2;
-}
-
static ssize_t spec_filter_size(const void *kern_spec_filter, u16 kern_filter_size,
u16 ib_real_filter_sz)
{
@@ -2869,11 +2888,16 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec,
union ib_flow_spec *ib_spec)
{
- ssize_t kern_filter_sz;
+ size_t kern_filter_sz;
void *kern_spec_mask;
void *kern_spec_val;
- kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr);
+ if (check_sub_overflow((size_t)kern_spec->hdr.size,
+ sizeof(struct ib_uverbs_flow_spec_hdr),
+ &kern_filter_sz))
+ return -EINVAL;
+
+ kern_filter_sz /= 2;
kern_spec_val = (void *)kern_spec +
sizeof(struct ib_uverbs_flow_spec_hdr);
@@ -2943,7 +2967,6 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
wq_init_attr.wq_type = cmd.wq_type;
wq_init_attr.event_handler = ib_uverbs_wq_event_handler;
wq_init_attr.create_flags = cmd.create_flags;
- obj->uevent.events_reported = 0;
INIT_LIST_HEAD(&obj->uevent.event_list);
wq = pd->device->ops.create_wq(pd, &wq_init_attr, &attrs->driver_udata);
@@ -2952,7 +2975,7 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
goto err_put_cq;
}
- wq->uobject = &obj->uevent.uobject;
+ wq->uobject = obj;
obj->uevent.uobject.object = wq;
wq->wq_type = wq_init_attr.wq_type;
wq->cq = cq;
@@ -2962,7 +2985,7 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
atomic_set(&wq->usecnt, 0);
atomic_inc(&pd->usecnt);
atomic_inc(&cq->usecnt);
- wq->uobject = &obj->uevent.uobject;
+ wq->uobject = obj;
obj->uevent.uobject.object = wq;
memset(&resp, 0, sizeof(resp));
@@ -2976,13 +2999,16 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
goto err_copy;
uobj_put_obj_read(pd);
- uobj_put_obj_read(cq);
- return uobj_alloc_commit(&obj->uevent.uobject, attrs);
+ rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
+ rdma_alloc_commit_uobject(&obj->uevent.uobject, attrs);
+ return 0;
err_copy:
ib_destroy_wq(wq, uverbs_get_cleared_udata(attrs));
err_put_cq:
- uobj_put_obj_read(cq);
+ rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
err_put_pd:
uobj_put_obj_read(pd);
err_uobj:
@@ -3048,7 +3074,8 @@ static int ib_uverbs_ex_modify_wq(struct uverbs_attr_bundle *attrs)
}
ret = wq->device->ops.modify_wq(wq, &wq_attr, cmd.attr_mask,
&attrs->driver_udata);
- uobj_put_obj_read(wq);
+ rdma_lookup_put_uobject(&wq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
return ret;
}
@@ -3149,9 +3176,11 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
kfree(wqs_handles);
for (j = 0; j < num_read_wqs; j++)
- uobj_put_obj_read(wqs[j]);
+ rdma_lookup_put_uobject(&wqs[j]->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
- return uobj_alloc_commit(uobj, attrs);
+ rdma_alloc_commit_uobject(uobj, attrs);
+ return 0;
err_copy:
ib_destroy_rwq_ind_table(rwq_ind_tbl);
@@ -3159,7 +3188,8 @@ err_uobj:
uobj_alloc_abort(uobj, attrs);
put_wqs:
for (j = 0; j < num_read_wqs; j++)
- uobj_put_obj_read(wqs[j]);
+ rdma_lookup_put_uobject(&wqs[j]->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
err_free:
kfree(wqs_handles);
kfree(wqs);
@@ -3325,11 +3355,13 @@ static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
if (err)
goto err_copy;
- uobj_put_obj_read(qp);
+ rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
kfree(flow_attr);
if (cmd.flow_attr.num_of_specs)
kfree(kern_flow_attr);
- return uobj_alloc_commit(uobj, attrs);
+ rdma_alloc_commit_uobject(uobj, attrs);
+ return 0;
err_copy:
if (!qp->device->ops.destroy_flow(flow_id))
atomic_dec(&qp->usecnt);
@@ -3338,7 +3370,8 @@ err_free:
err_free_flow_attr:
kfree(flow_attr);
err_put:
- uobj_put_obj_read(qp);
+ rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
err_uobj:
uobj_alloc_abort(uobj, attrs);
err_free_attr:
@@ -3423,7 +3456,6 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
attr.attr.max_sge = cmd->max_sge;
attr.attr.srq_limit = cmd->srq_limit;
- obj->uevent.events_reported = 0;
INIT_LIST_HEAD(&obj->uevent.event_list);
srq = rdma_zalloc_drv_obj(ib_dev, ib_srq);
@@ -3435,7 +3467,7 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
srq->device = pd->device;
srq->pd = pd;
srq->srq_type = cmd->srq_type;
- srq->uobject = &obj->uevent.uobject;
+ srq->uobject = obj;
srq->event_handler = attr.event_handler;
srq->srq_context = attr.srq_context;
@@ -3474,10 +3506,12 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
uobj_put_read(xrcd_uobj);
if (ib_srq_has_cq(cmd->srq_type))
- uobj_put_obj_read(attr.ext.cq);
+ rdma_lookup_put_uobject(&attr.ext.cq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
uobj_put_obj_read(pd);
- return uobj_alloc_commit(&obj->uevent.uobject, attrs);
+ rdma_alloc_commit_uobject(&obj->uevent.uobject, attrs);
+ return 0;
err_copy:
ib_destroy_srq_user(srq, uverbs_get_cleared_udata(attrs));
@@ -3490,7 +3524,8 @@ err_put:
err_put_cq:
if (ib_srq_has_cq(cmd->srq_type))
- uobj_put_obj_read(attr.ext.cq);
+ rdma_lookup_put_uobject(&attr.ext.cq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
err_put_xrcd:
if (cmd->srq_type == IB_SRQT_XRC) {
@@ -3558,7 +3593,8 @@ static int ib_uverbs_modify_srq(struct uverbs_attr_bundle *attrs)
ret = srq->device->ops.modify_srq(srq, &attr, cmd.attr_mask,
&attrs->driver_udata);
- uobj_put_obj_read(srq);
+ rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
return ret;
}
@@ -3581,7 +3617,8 @@ static int ib_uverbs_query_srq(struct uverbs_attr_bundle *attrs)
ret = ib_query_srq(srq, &attr);
- uobj_put_obj_read(srq);
+ rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
if (ret)
return ret;
@@ -3706,8 +3743,8 @@ static int ib_uverbs_ex_modify_cq(struct uverbs_attr_bundle *attrs)
ret = rdma_set_cq_moderation(cq, cmd.attr.cq_count, cmd.attr.cq_period);
- uobj_put_obj_read(cq);
-
+ rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
+ UVERBS_LOOKUP_READ);
return ret;
}
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index 269938f59d3f..538affbc517e 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -220,24 +220,17 @@ static int uverbs_process_idrs_array(struct bundle_priv *pbundle,
return ret;
}
-static int uverbs_free_idrs_array(const struct uverbs_api_attr *attr_uapi,
- struct uverbs_objs_arr_attr *attr,
- bool commit, struct uverbs_attr_bundle *attrs)
+static void uverbs_free_idrs_array(const struct uverbs_api_attr *attr_uapi,
+ struct uverbs_objs_arr_attr *attr,
+ bool commit,
+ struct uverbs_attr_bundle *attrs)
{
const struct uverbs_attr_spec *spec = &attr_uapi->spec;
- int current_ret;
- int ret = 0;
size_t i;
- for (i = 0; i != attr->len; i++) {
- current_ret = uverbs_finalize_object(attr->uobjects[i],
- spec->u2.objs_arr.access,
- commit, attrs);
- if (!ret)
- ret = current_ret;
- }
-
- return ret;
+ for (i = 0; i != attr->len; i++)
+ uverbs_finalize_object(attr->uobjects[i],
+ spec->u2.objs_arr.access, commit, attrs);
}
static int uverbs_process_attr(struct bundle_priv *pbundle,
@@ -495,26 +488,22 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle,
return ret;
}
-static int bundle_destroy(struct bundle_priv *pbundle, bool commit)
+static void bundle_destroy(struct bundle_priv *pbundle, bool commit)
{
unsigned int key_bitmap_len = pbundle->method_elm->key_bitmap_len;
struct bundle_alloc_head *memblock;
unsigned int i;
- int ret = 0;
/* fast path for simple uobjects */
i = -1;
while ((i = find_next_bit(pbundle->uobj_finalize, key_bitmap_len,
i + 1)) < key_bitmap_len) {
struct uverbs_attr *attr = &pbundle->bundle.attrs[i];
- int current_ret;
- current_ret = uverbs_finalize_object(
+ uverbs_finalize_object(
attr->obj_attr.uobject,
attr->obj_attr.attr_elm->spec.u.obj.access, commit,
&pbundle->bundle);
- if (!ret)
- ret = current_ret;
}
i = -1;
@@ -523,7 +512,6 @@ static int bundle_destroy(struct bundle_priv *pbundle, bool commit)
struct uverbs_attr *attr = &pbundle->bundle.attrs[i];
const struct uverbs_api_attr *attr_uapi;
void __rcu **slot;
- int current_ret;
slot = uapi_get_attr_for_method(
pbundle,
@@ -534,11 +522,8 @@ static int bundle_destroy(struct bundle_priv *pbundle, bool commit)
attr_uapi = rcu_dereference_protected(*slot, true);
if (attr_uapi->spec.type == UVERBS_ATTR_TYPE_IDRS_ARRAY) {
- current_ret = uverbs_free_idrs_array(
- attr_uapi, &attr->objs_arr_attr, commit,
- &pbundle->bundle);
- if (!ret)
- ret = current_ret;
+ uverbs_free_idrs_array(attr_uapi, &attr->objs_arr_attr,
+ commit, &pbundle->bundle);
}
}
@@ -548,8 +533,6 @@ static int bundle_destroy(struct bundle_priv *pbundle, bool commit)
memblock = memblock->next;
kvfree(tmp);
}
-
- return ret;
}
static int ib_uverbs_cmd_verbs(struct ib_uverbs_file *ufile,
@@ -562,7 +545,6 @@ static int ib_uverbs_cmd_verbs(struct ib_uverbs_file *ufile,
struct bundle_priv *pbundle;
struct bundle_priv onstack;
void __rcu **slot;
- int destroy_ret;
int ret;
if (unlikely(hdr->driver_id != uapi->driver_id))
@@ -610,10 +592,7 @@ static int ib_uverbs_cmd_verbs(struct ib_uverbs_file *ufile,
memset(pbundle->spec_finalize, 0, sizeof(pbundle->spec_finalize));
ret = ib_uverbs_run_method(pbundle, hdr->num_attrs);
- destroy_ret = bundle_destroy(pbundle, ret == 0);
- if (unlikely(destroy_ret && !ret))
- return destroy_ret;
-
+ bundle_destroy(pbundle, ret == 0);
return ret;
}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 970d8e31dd65..2d4083bf4a04 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -125,17 +125,8 @@ static void ib_uverbs_release_dev(struct device *device)
kfree(dev);
}
-static void ib_uverbs_release_async_event_file(struct kref *ref)
-{
- struct ib_uverbs_async_event_file *file =
- container_of(ref, struct ib_uverbs_async_event_file, ref);
-
- kfree(file);
-}
-
-void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
- struct ib_uverbs_completion_event_file *ev_file,
- struct ib_ucq_object *uobj)
+void ib_uverbs_release_ucq(struct ib_uverbs_completion_event_file *ev_file,
+ struct ib_ucq_object *uobj)
{
struct ib_uverbs_event *evt, *tmp;
@@ -150,25 +141,24 @@ void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
uverbs_uobject_put(&ev_file->uobj);
}
- spin_lock_irq(&file->async_file->ev_queue.lock);
- list_for_each_entry_safe(evt, tmp, &uobj->async_list, obj_list) {
- list_del(&evt->list);
- kfree(evt);
- }
- spin_unlock_irq(&file->async_file->ev_queue.lock);
+ ib_uverbs_release_uevent(&uobj->uevent);
}
-void ib_uverbs_release_uevent(struct ib_uverbs_file *file,
- struct ib_uevent_object *uobj)
+void ib_uverbs_release_uevent(struct ib_uevent_object *uobj)
{
+ struct ib_uverbs_async_event_file *async_file =
+ READ_ONCE(uobj->uobject.ufile->async_file);
struct ib_uverbs_event *evt, *tmp;
- spin_lock_irq(&file->async_file->ev_queue.lock);
+ if (!async_file)
+ return;
+
+ spin_lock_irq(&async_file->ev_queue.lock);
list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
list_del(&evt->list);
kfree(evt);
}
- spin_unlock_irq(&file->async_file->ev_queue.lock);
+ spin_unlock_irq(&async_file->ev_queue.lock);
}
void ib_uverbs_detach_umcast(struct ib_qp *qp,
@@ -208,8 +198,7 @@ void ib_uverbs_release_file(struct kref *ref)
ib_uverbs_comp_dev(file->device);
if (file->async_file)
- kref_put(&file->async_file->ref,
- ib_uverbs_release_async_event_file);
+ uverbs_uobject_put(&file->async_file->uobj);
put_device(&file->device->dev);
if (file->disassociate_page)
@@ -220,7 +209,6 @@ void ib_uverbs_release_file(struct kref *ref)
}
static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue,
- struct ib_uverbs_file *uverbs_file,
struct file *filp, char __user *buf,
size_t count, loff_t *pos,
size_t eventsz)
@@ -238,19 +226,16 @@ static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue,
if (wait_event_interruptible(ev_queue->poll_wait,
(!list_empty(&ev_queue->event_list) ||
- /* The barriers built into wait_event_interruptible()
- * and wake_up() guarentee this will see the null set
- * without using RCU
- */
- !uverbs_file->device->ib_dev)))
+ ev_queue->is_closed)))
return -ERESTARTSYS;
+ spin_lock_irq(&ev_queue->lock);
+
/* If device was disassociated and no event exists set an error */
- if (list_empty(&ev_queue->event_list) &&
- !uverbs_file->device->ib_dev)
+ if (list_empty(&ev_queue->event_list) && ev_queue->is_closed) {
+ spin_unlock_irq(&ev_queue->lock);
return -EIO;
-
- spin_lock_irq(&ev_queue->lock);
+ }
}
event = list_entry(ev_queue->event_list.next, struct ib_uverbs_event, list);
@@ -285,8 +270,7 @@ static ssize_t ib_uverbs_async_event_read(struct file *filp, char __user *buf,
{
struct ib_uverbs_async_event_file *file = filp->private_data;
- return ib_uverbs_event_read(&file->ev_queue, file->uverbs_file, filp,
- buf, count, pos,
+ return ib_uverbs_event_read(&file->ev_queue, filp, buf, count, pos,
sizeof(struct ib_uverbs_async_event_desc));
}
@@ -296,9 +280,8 @@ static ssize_t ib_uverbs_comp_event_read(struct file *filp, char __user *buf,
struct ib_uverbs_completion_event_file *comp_ev_file =
filp->private_data;
- return ib_uverbs_event_read(&comp_ev_file->ev_queue,
- comp_ev_file->uobj.ufile, filp,
- buf, count, pos,
+ return ib_uverbs_event_read(&comp_ev_file->ev_queue, filp, buf, count,
+ pos,
sizeof(struct ib_uverbs_comp_event_desc));
}
@@ -321,7 +304,9 @@ static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
static __poll_t ib_uverbs_async_event_poll(struct file *filp,
struct poll_table_struct *wait)
{
- return ib_uverbs_event_poll(filp->private_data, filp, wait);
+ struct ib_uverbs_async_event_file *file = filp->private_data;
+
+ return ib_uverbs_event_poll(&file->ev_queue, filp, wait);
}
static __poll_t ib_uverbs_comp_event_poll(struct file *filp,
@@ -335,9 +320,9 @@ static __poll_t ib_uverbs_comp_event_poll(struct file *filp,
static int ib_uverbs_async_event_fasync(int fd, struct file *filp, int on)
{
- struct ib_uverbs_event_queue *ev_queue = filp->private_data;
+ struct ib_uverbs_async_event_file *file = filp->private_data;
- return fasync_helper(fd, filp, on, &ev_queue->async_queue);
+ return fasync_helper(fd, filp, on, &file->ev_queue.async_queue);
}
static int ib_uverbs_comp_event_fasync(int fd, struct file *filp, int on)
@@ -348,70 +333,20 @@ static int ib_uverbs_comp_event_fasync(int fd, struct file *filp, int on)
return fasync_helper(fd, filp, on, &comp_ev_file->ev_queue.async_queue);
}
-static int ib_uverbs_async_event_close(struct inode *inode, struct file *filp)
-{
- struct ib_uverbs_async_event_file *file = filp->private_data;
- struct ib_uverbs_file *uverbs_file = file->uverbs_file;
- struct ib_uverbs_event *entry, *tmp;
- int closed_already = 0;
-
- mutex_lock(&uverbs_file->device->lists_mutex);
- spin_lock_irq(&file->ev_queue.lock);
- closed_already = file->ev_queue.is_closed;
- file->ev_queue.is_closed = 1;
- list_for_each_entry_safe(entry, tmp, &file->ev_queue.event_list, list) {
- if (entry->counter)
- list_del(&entry->obj_list);
- kfree(entry);
- }
- spin_unlock_irq(&file->ev_queue.lock);
- if (!closed_already) {
- list_del(&file->list);
- ib_unregister_event_handler(&uverbs_file->event_handler);
- }
- mutex_unlock(&uverbs_file->device->lists_mutex);
-
- kref_put(&uverbs_file->ref, ib_uverbs_release_file);
- kref_put(&file->ref, ib_uverbs_release_async_event_file);
-
- return 0;
-}
-
-static int ib_uverbs_comp_event_close(struct inode *inode, struct file *filp)
-{
- struct ib_uobject *uobj = filp->private_data;
- struct ib_uverbs_completion_event_file *file = container_of(
- uobj, struct ib_uverbs_completion_event_file, uobj);
- struct ib_uverbs_event *entry, *tmp;
-
- spin_lock_irq(&file->ev_queue.lock);
- list_for_each_entry_safe(entry, tmp, &file->ev_queue.event_list, list) {
- if (entry->counter)
- list_del(&entry->obj_list);
- kfree(entry);
- }
- file->ev_queue.is_closed = 1;
- spin_unlock_irq(&file->ev_queue.lock);
-
- uverbs_close_fd(filp);
-
- return 0;
-}
-
const struct file_operations uverbs_event_fops = {
.owner = THIS_MODULE,
.read = ib_uverbs_comp_event_read,
.poll = ib_uverbs_comp_event_poll,
- .release = ib_uverbs_comp_event_close,
+ .release = uverbs_uobject_fd_release,
.fasync = ib_uverbs_comp_event_fasync,
.llseek = no_llseek,
};
-static const struct file_operations uverbs_async_event_fops = {
+const struct file_operations uverbs_async_event_fops = {
.owner = THIS_MODULE,
.read = ib_uverbs_async_event_read,
.poll = ib_uverbs_async_event_poll,
- .release = ib_uverbs_async_event_close,
+ .release = uverbs_uobject_fd_release,
.fasync = ib_uverbs_async_event_fasync,
.llseek = no_llseek,
};
@@ -438,9 +373,9 @@ void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
return;
}
- uobj = container_of(cq->uobject, struct ib_ucq_object, uobject);
+ uobj = cq->uobject;
- entry->desc.comp.cq_handle = cq->uobject->user_handle;
+ entry->desc.comp.cq_handle = cq->uobject->uevent.uobject.user_handle;
entry->counter = &uobj->comp_events_reported;
list_add_tail(&entry->list, &ev_queue->event_list);
@@ -451,102 +386,82 @@ void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
kill_fasync(&ev_queue->async_queue, SIGIO, POLL_IN);
}
-static void ib_uverbs_async_handler(struct ib_uverbs_file *file,
- __u64 element, __u64 event,
- struct list_head *obj_list,
- u32 *counter)
+static void
+ib_uverbs_async_handler(struct ib_uverbs_async_event_file *async_file,
+ __u64 element, __u64 event, struct list_head *obj_list,
+ u32 *counter)
{
struct ib_uverbs_event *entry;
unsigned long flags;
- spin_lock_irqsave(&file->async_file->ev_queue.lock, flags);
- if (file->async_file->ev_queue.is_closed) {
- spin_unlock_irqrestore(&file->async_file->ev_queue.lock, flags);
+ if (!async_file)
+ return;
+
+ spin_lock_irqsave(&async_file->ev_queue.lock, flags);
+ if (async_file->ev_queue.is_closed) {
+ spin_unlock_irqrestore(&async_file->ev_queue.lock, flags);
return;
}
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry) {
- spin_unlock_irqrestore(&file->async_file->ev_queue.lock, flags);
+ spin_unlock_irqrestore(&async_file->ev_queue.lock, flags);
return;
}
- entry->desc.async.element = element;
+ entry->desc.async.element = element;
entry->desc.async.event_type = event;
- entry->desc.async.reserved = 0;
- entry->counter = counter;
+ entry->desc.async.reserved = 0;
+ entry->counter = counter;
- list_add_tail(&entry->list, &file->async_file->ev_queue.event_list);
+ list_add_tail(&entry->list, &async_file->ev_queue.event_list);
if (obj_list)
list_add_tail(&entry->obj_list, obj_list);
- spin_unlock_irqrestore(&file->async_file->ev_queue.lock, flags);
+ spin_unlock_irqrestore(&async_file->ev_queue.lock, flags);
- wake_up_interruptible(&file->async_file->ev_queue.poll_wait);
- kill_fasync(&file->async_file->ev_queue.async_queue, SIGIO, POLL_IN);
+ wake_up_interruptible(&async_file->ev_queue.poll_wait);
+ kill_fasync(&async_file->ev_queue.async_queue, SIGIO, POLL_IN);
}
-void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr)
+static void uverbs_uobj_event(struct ib_uevent_object *eobj,
+ struct ib_event *event)
{
- struct ib_ucq_object *uobj = container_of(event->element.cq->uobject,
- struct ib_ucq_object, uobject);
+ ib_uverbs_async_handler(READ_ONCE(eobj->uobject.ufile->async_file),
+ eobj->uobject.user_handle, event->event,
+ &eobj->event_list, &eobj->events_reported);
+}
- ib_uverbs_async_handler(uobj->uobject.ufile, uobj->uobject.user_handle,
- event->event, &uobj->async_list,
- &uobj->async_events_reported);
+void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr)
+{
+ uverbs_uobj_event(&event->element.cq->uobject->uevent, event);
}
void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr)
{
- struct ib_uevent_object *uobj;
-
/* for XRC target qp's, check that qp is live */
if (!event->element.qp->uobject)
return;
- uobj = container_of(event->element.qp->uobject,
- struct ib_uevent_object, uobject);
-
- ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
- event->event, &uobj->event_list,
- &uobj->events_reported);
+ uverbs_uobj_event(&event->element.qp->uobject->uevent, event);
}
void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr)
{
- struct ib_uevent_object *uobj = container_of(event->element.wq->uobject,
- struct ib_uevent_object, uobject);
-
- ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
- event->event, &uobj->event_list,
- &uobj->events_reported);
+ uverbs_uobj_event(&event->element.wq->uobject->uevent, event);
}
void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr)
{
- struct ib_uevent_object *uobj;
-
- uobj = container_of(event->element.srq->uobject,
- struct ib_uevent_object, uobject);
-
- ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
- event->event, &uobj->event_list,
- &uobj->events_reported);
-}
-
-void ib_uverbs_event_handler(struct ib_event_handler *handler,
- struct ib_event *event)
-{
- struct ib_uverbs_file *file =
- container_of(handler, struct ib_uverbs_file, event_handler);
-
- ib_uverbs_async_handler(file, event->element.port_num, event->event,
- NULL, NULL);
+ uverbs_uobj_event(&event->element.srq->uobject->uevent, event);
}
-void ib_uverbs_free_async_event_file(struct ib_uverbs_file *file)
+static void ib_uverbs_event_handler(struct ib_event_handler *handler,
+ struct ib_event *event)
{
- kref_put(&file->async_file->ref, ib_uverbs_release_async_event_file);
- file->async_file = NULL;
+ ib_uverbs_async_handler(
+ container_of(handler, struct ib_uverbs_async_event_file,
+ event_handler),
+ event->element.port_num, event->event, NULL, NULL);
}
void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue *ev_queue)
@@ -558,45 +473,26 @@ void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue *ev_queue)
ev_queue->async_queue = NULL;
}
-struct file *ib_uverbs_alloc_async_event_file(struct ib_uverbs_file *uverbs_file,
- struct ib_device *ib_dev)
+void ib_uverbs_init_async_event_file(
+ struct ib_uverbs_async_event_file *async_file)
{
- struct ib_uverbs_async_event_file *ev_file;
- struct file *filp;
-
- ev_file = kzalloc(sizeof(*ev_file), GFP_KERNEL);
- if (!ev_file)
- return ERR_PTR(-ENOMEM);
-
- ib_uverbs_init_event_queue(&ev_file->ev_queue);
- ev_file->uverbs_file = uverbs_file;
- kref_get(&ev_file->uverbs_file->ref);
- kref_init(&ev_file->ref);
- filp = anon_inode_getfile("[infinibandevent]", &uverbs_async_event_fops,
- ev_file, O_RDONLY);
- if (IS_ERR(filp))
- goto err_put_refs;
-
- mutex_lock(&uverbs_file->device->lists_mutex);
- list_add_tail(&ev_file->list,
- &uverbs_file->device->uverbs_events_file_list);
- mutex_unlock(&uverbs_file->device->lists_mutex);
-
- WARN_ON(uverbs_file->async_file);
- uverbs_file->async_file = ev_file;
- kref_get(&uverbs_file->async_file->ref);
- INIT_IB_EVENT_HANDLER(&uverbs_file->event_handler,
- ib_dev,
- ib_uverbs_event_handler);
- ib_register_event_handler(&uverbs_file->event_handler);
- /* At that point async file stuff was fully set */
+ struct ib_uverbs_file *uverbs_file = async_file->uobj.ufile;
+ struct ib_device *ib_dev = async_file->uobj.context->device;
+
+ ib_uverbs_init_event_queue(&async_file->ev_queue);
- return filp;
+ /* The first async_event_file becomes the default one for the file. */
+ mutex_lock(&uverbs_file->ucontext_lock);
+ if (!uverbs_file->async_file) {
+ /* Pairs with the put in ib_uverbs_release_file */
+ uverbs_uobject_get(&async_file->uobj);
+ smp_store_release(&uverbs_file->async_file, async_file);
+ }
+ mutex_unlock(&uverbs_file->ucontext_lock);
-err_put_refs:
- kref_put(&ev_file->uverbs_file->ref, ib_uverbs_release_file);
- kref_put(&ev_file->ref, ib_uverbs_release_async_event_file);
- return filp;
+ INIT_IB_EVENT_HANDLER(&async_file->event_handler, ib_dev,
+ ib_uverbs_event_handler);
+ ib_register_event_handler(&async_file->event_handler);
}
static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr,
@@ -1225,7 +1121,6 @@ static void ib_uverbs_add_one(struct ib_device *device)
mutex_init(&uverbs_dev->xrcd_tree_mutex);
mutex_init(&uverbs_dev->lists_mutex);
INIT_LIST_HEAD(&uverbs_dev->uverbs_file_list);
- INIT_LIST_HEAD(&uverbs_dev->uverbs_events_file_list);
rcu_assign_pointer(uverbs_dev->ib_dev, device);
uverbs_dev->num_comp_vectors = device->num_comp_vectors;
@@ -1270,14 +1165,9 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
struct ib_device *ib_dev)
{
struct ib_uverbs_file *file;
- struct ib_uverbs_async_event_file *event_file;
- struct ib_event event;
/* Pending running commands to terminate */
uverbs_disassociate_api_pre(uverbs_dev);
- event.event = IB_EVENT_DEVICE_FATAL;
- event.element.port_num = 0;
- event.device = ib_dev;
mutex_lock(&uverbs_dev->lists_mutex);
while (!list_empty(&uverbs_dev->uverbs_file_list)) {
@@ -1293,31 +1183,14 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
*/
mutex_unlock(&uverbs_dev->lists_mutex);
- ib_uverbs_event_handler(&file->event_handler, &event);
+ ib_uverbs_async_handler(READ_ONCE(file->async_file), 0,
+ IB_EVENT_DEVICE_FATAL, NULL, NULL);
+
uverbs_destroy_ufile_hw(file, RDMA_REMOVE_DRIVER_REMOVE);
kref_put(&file->ref, ib_uverbs_release_file);
mutex_lock(&uverbs_dev->lists_mutex);
}
-
- while (!list_empty(&uverbs_dev->uverbs_events_file_list)) {
- event_file = list_first_entry(&uverbs_dev->
- uverbs_events_file_list,
- struct ib_uverbs_async_event_file,
- list);
- spin_lock_irq(&event_file->ev_queue.lock);
- event_file->ev_queue.is_closed = 1;
- spin_unlock_irq(&event_file->ev_queue.lock);
-
- list_del(&event_file->list);
- ib_unregister_event_handler(
- &event_file->uverbs_file->event_handler);
- event_file->uverbs_file->event_handler.device =
- NULL;
-
- wake_up_interruptible(&event_file->ev_queue.poll_wait);
- kill_fasync(&event_file->ev_queue.async_queue, SIGIO, POLL_IN);
- }
mutex_unlock(&uverbs_dev->lists_mutex);
uverbs_disassociate_api(uverbs_dev->uapi);
diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c
index 35b2e2c640cc..3abfc63225cb 100644
--- a/drivers/infiniband/core/uverbs_std_types.c
+++ b/drivers/infiniband/core/uverbs_std_types.c
@@ -105,7 +105,7 @@ static int uverbs_free_qp(struct ib_uobject *uobject,
if (uqp->uxrcd)
atomic_dec(&uqp->uxrcd->refcnt);
- ib_uverbs_release_uevent(attrs->ufile, &uqp->uevent);
+ ib_uverbs_release_uevent(&uqp->uevent);
return ret;
}
@@ -138,7 +138,7 @@ static int uverbs_free_wq(struct ib_uobject *uobject,
if (ib_is_destroy_retryable(ret, why, uobject))
return ret;
- ib_uverbs_release_uevent(attrs->ufile, &uwq->uevent);
+ ib_uverbs_release_uevent(&uwq->uevent);
return ret;
}
@@ -163,7 +163,7 @@ static int uverbs_free_srq(struct ib_uobject *uobject,
atomic_dec(&us->uxrcd->refcnt);
}
- ib_uverbs_release_uevent(attrs->ufile, uevent);
+ ib_uverbs_release_uevent(uevent);
return ret;
}
@@ -202,24 +202,41 @@ static int uverbs_free_pd(struct ib_uobject *uobject,
return 0;
}
-static int uverbs_hot_unplug_completion_event_file(struct ib_uobject *uobj,
- enum rdma_remove_reason why)
+void ib_uverbs_free_event_queue(struct ib_uverbs_event_queue *event_queue)
{
- struct ib_uverbs_completion_event_file *comp_event_file =
- container_of(uobj, struct ib_uverbs_completion_event_file,
- uobj);
- struct ib_uverbs_event_queue *event_queue = &comp_event_file->ev_queue;
+ struct ib_uverbs_event *entry, *tmp;
spin_lock_irq(&event_queue->lock);
+ /*
+ * The user must ensure that no new items are added to the event_list
+ * once is_closed is set.
+ */
event_queue->is_closed = 1;
spin_unlock_irq(&event_queue->lock);
+ wake_up_interruptible(&event_queue->poll_wait);
+ kill_fasync(&event_queue->async_queue, SIGIO, POLL_IN);
- if (why == RDMA_REMOVE_DRIVER_REMOVE) {
- wake_up_interruptible(&event_queue->poll_wait);
- kill_fasync(&event_queue->async_queue, SIGIO, POLL_IN);
+ spin_lock_irq(&event_queue->lock);
+ list_for_each_entry_safe(entry, tmp, &event_queue->event_list, list) {
+ if (entry->counter)
+ list_del(&entry->obj_list);
+ list_del(&entry->list);
+ kfree(entry);
}
+ spin_unlock_irq(&event_queue->lock);
+}
+
+static int
+uverbs_completion_event_file_destroy_uobj(struct ib_uobject *uobj,
+ enum rdma_remove_reason why)
+{
+ struct ib_uverbs_completion_event_file *file =
+ container_of(uobj, struct ib_uverbs_completion_event_file,
+ uobj);
+
+ ib_uverbs_free_event_queue(&file->ev_queue);
return 0;
-};
+}
int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs)
{
@@ -230,7 +247,7 @@ EXPORT_SYMBOL(uverbs_destroy_def_handler);
DECLARE_UVERBS_NAMED_OBJECT(
UVERBS_OBJECT_COMP_CHANNEL,
UVERBS_TYPE_ALLOC_FD(sizeof(struct ib_uverbs_completion_event_file),
- uverbs_hot_unplug_completion_event_file,
+ uverbs_completion_event_file_destroy_uobj,
&uverbs_event_fops,
"[infinibandevent]",
O_RDONLY));
diff --git a/drivers/infiniband/core/uverbs_std_types_async_fd.c b/drivers/infiniband/core/uverbs_std_types_async_fd.c
new file mode 100644
index 000000000000..82ec0806b34b
--- /dev/null
+++ b/drivers/infiniband/core/uverbs_std_types_async_fd.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <rdma/uverbs_std_types.h>
+#include <rdma/uverbs_ioctl.h>
+#include "rdma_core.h"
+#include "uverbs.h"
+
+static int UVERBS_HANDLER(UVERBS_METHOD_ASYNC_EVENT_ALLOC)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj =
+ uverbs_attr_get_uobject(attrs, UVERBS_METHOD_ASYNC_EVENT_ALLOC);
+
+ ib_uverbs_init_async_event_file(
+ container_of(uobj, struct ib_uverbs_async_event_file, uobj));
+ return 0;
+}
+
+static int uverbs_async_event_destroy_uobj(struct ib_uobject *uobj,
+ enum rdma_remove_reason why)
+{
+ struct ib_uverbs_async_event_file *event_file =
+ container_of(uobj, struct ib_uverbs_async_event_file, uobj);
+
+ ib_unregister_event_handler(&event_file->event_handler);
+ ib_uverbs_free_event_queue(&event_file->ev_queue);
+ return 0;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_ASYNC_EVENT_ALLOC,
+ UVERBS_ATTR_FD(UVERBS_ATTR_ASYNC_EVENT_ALLOC_FD_HANDLE,
+ UVERBS_OBJECT_ASYNC_EVENT,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_ASYNC_EVENT,
+ UVERBS_TYPE_ALLOC_FD(sizeof(struct ib_uverbs_async_event_file),
+ uverbs_async_event_destroy_uobj,
+ &uverbs_async_event_fops,
+ "[infinibandevent]",
+ O_RDONLY),
+ &UVERBS_METHOD(UVERBS_METHOD_ASYNC_EVENT_ALLOC));
+
+const struct uapi_definition uverbs_def_obj_async_fd[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_ASYNC_EVENT),
+ {}
+};
diff --git a/drivers/infiniband/core/uverbs_std_types_cq.c b/drivers/infiniband/core/uverbs_std_types_cq.c
index e39fe6a8aac4..da4110a0eea2 100644
--- a/drivers/infiniband/core/uverbs_std_types_cq.c
+++ b/drivers/infiniband/core/uverbs_std_types_cq.c
@@ -41,7 +41,7 @@ static int uverbs_free_cq(struct ib_uobject *uobject,
struct ib_cq *cq = uobject->object;
struct ib_uverbs_event_queue *ev_queue = cq->cq_context;
struct ib_ucq_object *ucq =
- container_of(uobject, struct ib_ucq_object, uobject);
+ container_of(uobject, struct ib_ucq_object, uevent.uobject);
int ret;
ret = ib_destroy_cq_user(cq, &attrs->driver_udata);
@@ -49,7 +49,6 @@ static int uverbs_free_cq(struct ib_uobject *uobject,
return ret;
ib_uverbs_release_ucq(
- attrs->ufile,
ev_queue ? container_of(ev_queue,
struct ib_uverbs_completion_event_file,
ev_queue) :
@@ -63,7 +62,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
{
struct ib_ucq_object *obj = container_of(
uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_CQ_HANDLE),
- typeof(*obj), uobject);
+ typeof(*obj), uevent.uobject);
struct ib_device *ib_dev = attrs->context->device;
int ret;
u64 user_handle;
@@ -106,10 +105,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
goto err_event_file;
}
- obj->comp_events_reported = 0;
- obj->async_events_reported = 0;
INIT_LIST_HEAD(&obj->comp_list);
- INIT_LIST_HEAD(&obj->async_list);
+ INIT_LIST_HEAD(&obj->uevent.event_list);
cq = rdma_zalloc_drv_obj(ib_dev, ib_cq);
if (!cq) {
@@ -118,7 +115,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
}
cq->device = ib_dev;
- cq->uobject = &obj->uobject;
+ cq->uobject = obj;
cq->comp_handler = ib_uverbs_comp_handler;
cq->event_handler = ib_uverbs_cq_event_handler;
cq->cq_context = ev_file ? &ev_file->ev_queue : NULL;
@@ -129,8 +126,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
if (ret)
goto err_free;
- obj->uobject.object = cq;
- obj->uobject.user_handle = user_handle;
+ obj->uevent.uobject.object = cq;
+ obj->uevent.uobject.user_handle = user_handle;
rdma_restrack_uadd(&cq->res);
ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_CQ_RESP_CQE, &cq->cqe,
@@ -182,10 +179,10 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_DESTROY)(
struct ib_uobject *uobj =
uverbs_attr_get_uobject(attrs, UVERBS_ATTR_DESTROY_CQ_HANDLE);
struct ib_ucq_object *obj =
- container_of(uobj, struct ib_ucq_object, uobject);
+ container_of(uobj, struct ib_ucq_object, uevent.uobject);
struct ib_uverbs_destroy_cq_resp resp = {
.comp_events_reported = obj->comp_events_reported,
- .async_events_reported = obj->async_events_reported
+ .async_events_reported = obj->uevent.events_reported
};
return uverbs_copy_to(attrs, UVERBS_ATTR_DESTROY_CQ_RESP, &resp,
diff --git a/drivers/infiniband/core/uverbs_std_types_device.c b/drivers/infiniband/core/uverbs_std_types_device.c
index 2a3f2f01028d..ae4a59d6f9b1 100644
--- a/drivers/infiniband/core/uverbs_std_types_device.c
+++ b/drivers/infiniband/core/uverbs_std_types_device.c
@@ -200,6 +200,43 @@ static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_PORT)(
&resp, sizeof(resp));
}
+static int UVERBS_HANDLER(UVERBS_METHOD_GET_CONTEXT)(
+ struct uverbs_attr_bundle *attrs)
+{
+ u32 num_comp = attrs->ufile->device->num_comp_vectors;
+ u64 core_support = IB_UVERBS_CORE_SUPPORT_OPTIONAL_MR_ACCESS;
+ int ret;
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_GET_CONTEXT_NUM_COMP_VECTORS,
+ &num_comp, sizeof(num_comp));
+ if (IS_UVERBS_COPY_ERR(ret))
+ return ret;
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_GET_CONTEXT_CORE_SUPPORT,
+ &core_support, sizeof(core_support));
+ if (IS_UVERBS_COPY_ERR(ret))
+ return ret;
+
+ ret = ib_alloc_ucontext(attrs);
+ if (ret)
+ return ret;
+ ret = ib_init_ucontext(attrs);
+ if (ret) {
+ kfree(attrs->context);
+ attrs->context = NULL;
+ return ret;
+ }
+ return 0;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_GET_CONTEXT,
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_GET_CONTEXT_NUM_COMP_VECTORS,
+ UVERBS_ATTR_TYPE(u32), UA_OPTIONAL),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_GET_CONTEXT_CORE_SUPPORT,
+ UVERBS_ATTR_TYPE(u64), UA_OPTIONAL),
+ UVERBS_ATTR_UHW());
+
DECLARE_UVERBS_NAMED_METHOD(
UVERBS_METHOD_INFO_HANDLES,
/* Also includes any device specific object ids */
@@ -220,6 +257,7 @@ DECLARE_UVERBS_NAMED_METHOD(
UA_MANDATORY));
DECLARE_UVERBS_GLOBAL_METHODS(UVERBS_OBJECT_DEVICE,
+ &UVERBS_METHOD(UVERBS_METHOD_GET_CONTEXT),
&UVERBS_METHOD(UVERBS_METHOD_INVOKE_WRITE),
&UVERBS_METHOD(UVERBS_METHOD_INFO_HANDLES),
&UVERBS_METHOD(UVERBS_METHOD_QUERY_PORT));
diff --git a/drivers/infiniband/core/uverbs_uapi.c b/drivers/infiniband/core/uverbs_uapi.c
index 00c547887132..3f121ac31e0a 100644
--- a/drivers/infiniband/core/uverbs_uapi.c
+++ b/drivers/infiniband/core/uverbs_uapi.c
@@ -195,9 +195,9 @@ static int uapi_merge_obj_tree(struct uverbs_api *uapi,
* disassociation, and the FD types require the driver to use
* struct file_operations.owner to prevent the driver module
* code from unloading while the file is open. This provides
- * enough safety that uverbs_close_fd() will continue to work.
- * Drivers using FD are responsible to handle disassociation of
- * the device on their own.
+ * enough safety that uverbs_uobject_fd_release() will
+ * continue to work. Drivers using FD are responsible to
+ * handle disassociation of the device on their own.
*/
if (WARN_ON(is_driver &&
obj->type_attrs->type_class != &uverbs_idr_class &&
@@ -626,6 +626,7 @@ void uverbs_destroy_api(struct uverbs_api *uapi)
}
static const struct uapi_definition uverbs_core_api[] = {
+ UAPI_DEF_CHAIN(uverbs_def_obj_async_fd),
UAPI_DEF_CHAIN(uverbs_def_obj_counters),
UAPI_DEF_CHAIN(uverbs_def_obj_cq),
UAPI_DEF_CHAIN(uverbs_def_obj_device),
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index dd765e176cdd..3ebae3b65c28 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -52,6 +52,9 @@
#include <rdma/rw.h>
#include "core_priv.h"
+#include <trace/events/rdma_core.h>
+
+#include <trace/events/rdma_core.h>
static int ib_resolve_eth_dmac(struct ib_device *device,
struct rdma_ah_attr *ah_attr);
@@ -1053,11 +1056,11 @@ static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
struct ib_qp *qp = context;
unsigned long flags;
- spin_lock_irqsave(&qp->device->event_handler_lock, flags);
+ spin_lock_irqsave(&qp->device->qp_open_list_lock, flags);
list_for_each_entry(event->element.qp, &qp->open_list, open_list)
if (event->element.qp->event_handler)
event->element.qp->event_handler(event, event->element.qp->qp_context);
- spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
+ spin_unlock_irqrestore(&qp->device->qp_open_list_lock, flags);
}
static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
@@ -1094,9 +1097,9 @@ static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
qp->qp_num = real_qp->qp_num;
qp->qp_type = real_qp->qp_type;
- spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
+ spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags);
list_add(&qp->open_list, &real_qp->open_list);
- spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
+ spin_unlock_irqrestore(&real_qp->device->qp_open_list_lock, flags);
return qp;
}
@@ -1824,9 +1827,9 @@ int ib_close_qp(struct ib_qp *qp)
if (real_qp == qp)
return -EINVAL;
- spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
+ spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags);
list_del(&qp->open_list);
- spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
+ spin_unlock_irqrestore(&real_qp->device->qp_open_list_lock, flags);
atomic_dec(&real_qp->usecnt);
if (qp->qp_sec)
@@ -1990,6 +1993,47 @@ EXPORT_SYMBOL(ib_resize_cq);
/* Memory regions */
+struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int access_flags)
+{
+ struct ib_mr *mr;
+
+ if (access_flags & IB_ACCESS_ON_DEMAND) {
+ if (!(pd->device->attrs.device_cap_flags &
+ IB_DEVICE_ON_DEMAND_PAGING)) {
+ pr_debug("ODP support not available\n");
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ mr = pd->device->ops.reg_user_mr(pd, start, length, virt_addr,
+ access_flags, NULL);
+
+ if (IS_ERR(mr))
+ return mr;
+
+ mr->device = pd->device;
+ mr->pd = pd;
+ mr->dm = NULL;
+ atomic_inc(&pd->usecnt);
+ mr->res.type = RDMA_RESTRACK_MR;
+ rdma_restrack_kadd(&mr->res);
+
+ return mr;
+}
+EXPORT_SYMBOL(ib_reg_user_mr);
+
+int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
+ u32 flags, struct ib_sge *sg_list, u32 num_sge)
+{
+ if (!pd->device->ops.advise_mr)
+ return -EOPNOTSUPP;
+
+ return pd->device->ops.advise_mr(pd, advice, flags, sg_list, num_sge,
+ NULL);
+}
+EXPORT_SYMBOL(ib_advise_mr);
+
int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata)
{
struct ib_pd *pd = mr->pd;
@@ -1997,6 +2041,7 @@ int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata)
struct ib_sig_attrs *sig_attrs = mr->sig_attrs;
int ret;
+ trace_mr_dereg(mr);
rdma_restrack_del(&mr->res);
ret = mr->device->ops.dereg_mr(mr, udata);
if (!ret) {
@@ -2028,11 +2073,16 @@ struct ib_mr *ib_alloc_mr_user(struct ib_pd *pd, enum ib_mr_type mr_type,
{
struct ib_mr *mr;
- if (!pd->device->ops.alloc_mr)
- return ERR_PTR(-EOPNOTSUPP);
+ if (!pd->device->ops.alloc_mr) {
+ mr = ERR_PTR(-EOPNOTSUPP);
+ goto out;
+ }
- if (WARN_ON_ONCE(mr_type == IB_MR_TYPE_INTEGRITY))
- return ERR_PTR(-EINVAL);
+ if (mr_type == IB_MR_TYPE_INTEGRITY) {
+ WARN_ON_ONCE(1);
+ mr = ERR_PTR(-EINVAL);
+ goto out;
+ }
mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg, udata);
if (!IS_ERR(mr)) {
@@ -2048,6 +2098,8 @@ struct ib_mr *ib_alloc_mr_user(struct ib_pd *pd, enum ib_mr_type mr_type,
mr->sig_attrs = NULL;
}
+out:
+ trace_mr_alloc(pd, mr_type, max_num_sg, mr);
return mr;
}
EXPORT_SYMBOL(ib_alloc_mr_user);
@@ -2072,21 +2124,27 @@ struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
struct ib_sig_attrs *sig_attrs;
if (!pd->device->ops.alloc_mr_integrity ||
- !pd->device->ops.map_mr_sg_pi)
- return ERR_PTR(-EOPNOTSUPP);
+ !pd->device->ops.map_mr_sg_pi) {
+ mr = ERR_PTR(-EOPNOTSUPP);
+ goto out;
+ }
- if (!max_num_meta_sg)
- return ERR_PTR(-EINVAL);
+ if (!max_num_meta_sg) {
+ mr = ERR_PTR(-EINVAL);
+ goto out;
+ }
sig_attrs = kzalloc(sizeof(struct ib_sig_attrs), GFP_KERNEL);
- if (!sig_attrs)
- return ERR_PTR(-ENOMEM);
+ if (!sig_attrs) {
+ mr = ERR_PTR(-ENOMEM);
+ goto out;
+ }
mr = pd->device->ops.alloc_mr_integrity(pd, max_num_data_sg,
max_num_meta_sg);
if (IS_ERR(mr)) {
kfree(sig_attrs);
- return mr;
+ goto out;
}
mr->device = pd->device;
@@ -2100,6 +2158,8 @@ struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
mr->type = IB_MR_TYPE_INTEGRITY;
mr->sig_attrs = sig_attrs;
+out:
+ trace_mr_integ_alloc(pd, max_num_data_sg, max_num_meta_sg, mr);
return mr;
}
EXPORT_SYMBOL(ib_alloc_mr_integrity);
@@ -2744,6 +2804,7 @@ void ib_drain_sq(struct ib_qp *qp)
qp->device->ops.drain_sq(qp);
else
__ib_drain_sq(qp);
+ trace_cq_drain_complete(qp->send_cq);
}
EXPORT_SYMBOL(ib_drain_sq);
@@ -2772,6 +2833,7 @@ void ib_drain_rq(struct ib_qp *qp)
qp->device->ops.drain_rq(qp);
else
__ib_drain_rq(qp);
+ trace_cq_drain_complete(qp->recv_cq);
}
EXPORT_SYMBOL(ib_drain_rq);
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 9b6ca15a183c..52b6a4d85460 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -837,7 +837,8 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
bytes += (qplib_qp->sq.max_wqe * psn_sz);
}
bytes = PAGE_ALIGN(bytes);
- umem = ib_umem_get(udata, ureq.qpsva, bytes, IB_ACCESS_LOCAL_WRITE);
+ umem = ib_umem_get(&rdev->ibdev, ureq.qpsva, bytes,
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem))
return PTR_ERR(umem);
@@ -850,7 +851,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
if (!qp->qplib_qp.srq) {
bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
bytes = PAGE_ALIGN(bytes);
- umem = ib_umem_get(udata, ureq.qprva, bytes,
+ umem = ib_umem_get(&rdev->ibdev, ureq.qprva, bytes,
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem))
goto rqfail;
@@ -1304,7 +1305,8 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
bytes = PAGE_ALIGN(bytes);
- umem = ib_umem_get(udata, ureq.srqva, bytes, IB_ACCESS_LOCAL_WRITE);
+ umem = ib_umem_get(&rdev->ibdev, ureq.srqva, bytes,
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem))
return PTR_ERR(umem);
@@ -2545,7 +2547,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
goto fail;
}
- cq->umem = ib_umem_get(udata, req.cq_va,
+ cq->umem = ib_umem_get(&rdev->ibdev, req.cq_va,
entries * sizeof(struct cq_base),
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(cq->umem)) {
@@ -3305,8 +3307,10 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
int rc;
rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
- if (rc)
+ if (rc) {
dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
+ return rc;
+ }
if (mr->pages) {
rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
@@ -3512,7 +3516,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
/* The fixed portion of the rkey is the same as the lkey */
mr->ib_mr.rkey = mr->qplib_mr.rkey;
- umem = ib_umem_get(udata, start, length, mr_access_flags);
+ umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
if (IS_ERR(umem)) {
dev_err(rdev_to_dev(rdev), "Failed to get umem");
rc = -EFAULT;
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index e7e8a0f49464..793c97251588 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -677,7 +677,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
bnxt_qplib_get_guid(rdev->netdev->dev_addr, (u8 *)&ibdev->node_guid);
- ibdev->num_comp_vectors = 1;
+ ibdev->num_comp_vectors = rdev->num_msix - 1;
ibdev->dev.parent = &rdev->en_dev->pdev->dev;
ibdev->local_dma_lkey = BNXT_QPLIB_RSVD_LKEY;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 958c1ff9c515..020f70e6865e 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -442,7 +442,7 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
goto fail;
}
/* Unconditionally map 8 bytes to support 57500 series */
- nq->bar_reg_iomem = ioremap_nocache(nq_base + nq->bar_reg_off, 8);
+ nq->bar_reg_iomem = ioremap(nq_base + nq->bar_reg_off, 8);
if (!nq->bar_reg_iomem) {
rc = -ENOMEM;
goto fail;
@@ -2283,13 +2283,13 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
/* Add qp to flush list of the CQ */
bnxt_qplib_add_flush_qp(qp);
} else {
+ /* Before we complete, do WA 9060 */
+ if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
+ cqe_sq_cons)) {
+ *lib_qp = qp;
+ goto out;
+ }
if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
- /* Before we complete, do WA 9060 */
- if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
- cqe_sq_cons)) {
- *lib_qp = qp;
- goto out;
- }
cqe->status = CQ_REQ_STATUS_OK;
cqe++;
(*budget)--;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 5cdfa84faf85..1291b12287a5 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -717,7 +717,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
if (!res_base)
return -ENOMEM;
- rcfw->cmdq_bar_reg_iomem = ioremap_nocache(res_base +
+ rcfw->cmdq_bar_reg_iomem = ioremap(res_base +
RCFW_COMM_BASE_OFFSET,
RCFW_COMM_SIZE);
if (!rcfw->cmdq_bar_reg_iomem) {
@@ -739,7 +739,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
"CREQ BAR region %d resc start is 0!\n",
rcfw->creq_bar_reg);
/* Unconditionally map 8 bytes to support 57500 series */
- rcfw->creq_bar_reg_iomem = ioremap_nocache(res_base + cp_bar_reg_off,
+ rcfw->creq_bar_reg_iomem = ioremap(res_base + cp_bar_reg_off,
8);
if (!rcfw->creq_bar_reg_iomem) {
dev_err(&rcfw->pdev->dev, "CREQ BAR region %d mapping failed\n",
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index bdbde8e22420..60ea1b924b67 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -704,7 +704,7 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
return -ENOMEM;
}
- dpit->dbr_bar_reg_iomem = ioremap_nocache(bar_reg_base + dbr_offset,
+ dpit->dbr_bar_reg_iomem = ioremap(bar_reg_base + dbr_offset,
dbr_len);
if (!dpit->dbr_bar_reg_iomem) {
dev_err(&res->pdev->dev,
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index ee1182f9b627..d69dece3b1d5 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -3036,6 +3036,10 @@ static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
}
+ /* As per draft-hilland-iwarp-verbs-v1.0, sec 6.2.3,
+ * when entering the TERM state the RNIC MUST initiate a CLOSE.
+ */
+ c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
c4iw_put_ep(&ep->com);
} else
pr_warn("TERM received tid %u no ep/qp\n", tid);
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index fe3a7e8561df..962dc97a8ff2 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -543,7 +543,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mhp->rhp = rhp;
- mhp->umem = ib_umem_get(udata, start, length, acc);
+ mhp->umem = ib_umem_get(pd->device, start, length, acc);
if (IS_ERR(mhp->umem))
goto err_free_skb;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index bbcac539777a..89ac2f9ae6dd 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1948,10 +1948,10 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
qhp->attr.layer_etype = attrs->layer_etype;
qhp->attr.ecode = attrs->ecode;
ep = qhp->ep;
- c4iw_get_ep(&ep->com);
- disconnect = 1;
if (!internal) {
+ c4iw_get_ep(&ep->com);
terminate = 1;
+ disconnect = 1;
} else {
terminate = qhp->attr.send_term;
ret = rdma_fini(rhp, qhp, ep);
diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
index e96bcb16bd2b..74b787a90660 100644
--- a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
+++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
@@ -160,10 +160,16 @@ struct efa_admin_create_qp_resp {
/* Common Admin Queue completion descriptor */
struct efa_admin_acq_common_desc acq_common_desc;
- /* Opaque handle to be used for consequent operations on the QP */
+ /*
+ * Opaque handle to be used for consequent admin operations on the
+ * QP
+ */
u32 qp_handle;
- /* QP number in the given EFA virtual device */
+ /*
+ * QP number in the given EFA virtual device. Least-significant bits
+ * (as needed according to max_qp) carry unique QP ID
+ */
u16 qp_num;
/* MBZ */
@@ -286,6 +292,7 @@ struct efa_admin_create_ah_cmd {
/* PD number */
u16 pd;
+ /* MBZ */
u16 reserved;
};
@@ -296,6 +303,7 @@ struct efa_admin_create_ah_resp {
/* Target interface address handle (opaque) */
u16 ah;
+ /* MBZ */
u16 reserved;
};
@@ -372,6 +380,7 @@ struct efa_admin_reg_mr_cmd {
*/
u8 permissions;
+ /* MBZ */
u16 reserved16_w5;
/* number of pages in PBL (redundant, could be calculated) */
@@ -419,20 +428,20 @@ struct efa_admin_create_cq_cmd {
struct efa_admin_aq_common_desc aq_common_desc;
/*
- * 4:0 : reserved5
+ * 4:0 : reserved5 - MBZ
* 5 : interrupt_mode_enabled - if set, cq operates
* in interrupt mode (i.e. CQ events and MSI-X are
* generated), otherwise - polling
* 6 : virt - If set, ring base address is virtual
* (IOVA returned by MR registration)
- * 7 : reserved6
+ * 7 : reserved6 - MBZ
*/
u8 cq_caps_1;
/*
* 4:0 : cq_entry_size_words - size of CQ entry in
* 32-bit words, valid values: 4, 8.
- * 7:5 : reserved7
+ * 7:5 : reserved7 - MBZ
*/
u8 cq_caps_2;
@@ -478,6 +487,7 @@ struct efa_admin_destroy_cq_cmd {
u16 cq_idx;
+ /* MBZ */
u16 reserved1;
};
@@ -530,7 +540,7 @@ struct efa_admin_get_set_feature_common_desc {
/*
* 1:0 : select - 0x1 - current value; 0x3 - default
* value
- * 7:3 : reserved3
+ * 7:3 : reserved3 - MBZ
*/
u8 flags;
@@ -557,10 +567,10 @@ struct efa_admin_feature_device_attr_desc {
/* Bar used for SQ and RQ doorbells */
u16 db_bar;
- /* Indicates how many bits are used physical address access */
+ /* Indicates how many bits are used on physical address access */
u8 phys_addr_width;
- /* Indicates how many bits are used virtual address access */
+ /* Indicates how many bits are used on virtual address access */
u8 virt_addr_width;
/*
@@ -578,27 +588,28 @@ struct efa_admin_feature_queue_attr_desc {
/* The maximum number of queue pairs supported */
u32 max_qp;
+ /* Maximum number of WQEs per Send Queue */
u32 max_sq_depth;
- /* max send wr used in inline-buf */
+ /* Maximum size of data that can be sent inline in a Send WQE */
u32 inline_buf_size;
+ /* Maximum number of buffer descriptors per Recv Queue */
u32 max_rq_depth;
/* The maximum number of completion queues supported per VF */
u32 max_cq;
+ /* Maximum number of CQEs per Completion Queue */
u32 max_cq_depth;
/* Number of sub-CQs to be created for each CQ */
u16 sub_cqs_per_cq;
+ /* MBZ */
u16 reserved;
- /*
- * Maximum number of SGEs (buffs) allowed for a single send work
- * queue element (WQE)
- */
+ /* Maximum number of SGEs (buffers) allowed for a single send WQE */
u16 max_wr_send_sges;
/* Maximum number of SGEs allowed for a single recv WQE */
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index 50c22575aed6..ec5545870554 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -387,8 +387,7 @@ static int efa_destroy_qp_handle(struct efa_dev *dev, u32 qp_handle)
return efa_com_destroy_qp(&dev->edev, &params);
}
-static void efa_qp_user_mmap_entries_remove(struct efa_ucontext *uctx,
- struct efa_qp *qp)
+static void efa_qp_user_mmap_entries_remove(struct efa_qp *qp)
{
rdma_user_mmap_entry_remove(qp->rq_mmap_entry);
rdma_user_mmap_entry_remove(qp->rq_db_mmap_entry);
@@ -398,8 +397,6 @@ static void efa_qp_user_mmap_entries_remove(struct efa_ucontext *uctx,
int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{
- struct efa_ucontext *ucontext = rdma_udata_to_drv_context(udata,
- struct efa_ucontext, ibucontext);
struct efa_dev *dev = to_edev(ibqp->pd->device);
struct efa_qp *qp = to_eqp(ibqp);
int err;
@@ -418,7 +415,7 @@ int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
DMA_TO_DEVICE);
}
- efa_qp_user_mmap_entries_remove(ucontext, qp);
+ efa_qp_user_mmap_entries_remove(qp);
kfree(qp);
return 0;
}
@@ -510,7 +507,7 @@ static int qp_mmap_entries_setup(struct efa_qp *qp,
return 0;
err_remove_mmap:
- efa_qp_user_mmap_entries_remove(ucontext, qp);
+ efa_qp_user_mmap_entries_remove(qp);
return -ENOMEM;
}
@@ -719,7 +716,7 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
return &qp->ibqp;
err_remove_mmap_entries:
- efa_qp_user_mmap_entries_remove(ucontext, qp);
+ efa_qp_user_mmap_entries_remove(qp);
err_destroy_qp:
efa_destroy_qp_handle(dev, create_qp_resp.qp_handle);
err_free_mapped:
@@ -1358,7 +1355,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
int inline_size;
int err;
- if (udata->inlen &&
+ if (udata && udata->inlen &&
!ib_is_udata_cleared(udata, 0, sizeof(udata->inlen))) {
ibdev_dbg(&dev->ibdev,
"Incompatible ABI params, udata not cleared\n");
@@ -1370,6 +1367,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
IB_ACCESS_LOCAL_WRITE |
(is_rdma_read_cap(dev) ? IB_ACCESS_REMOTE_READ : 0);
+ access_flags &= ~IB_ACCESS_OPTIONAL;
if (access_flags & ~supp_access_flags) {
ibdev_dbg(&dev->ibdev,
"Unsupported access flags[%#x], supported[%#x]\n",
@@ -1384,7 +1382,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
goto err_out;
}
- mr->umem = ib_umem_get(udata, start, length, access_flags);
+ mr->umem = ib_umem_get(ibpd->device, start, length, access_flags);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
ibdev_dbg(&dev->ibdev,
@@ -1608,13 +1606,12 @@ static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
err = -EINVAL;
}
- if (err) {
+ if (err)
ibdev_dbg(
&dev->ibdev,
"Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
entry->address, rdma_entry->npages * PAGE_SIZE,
entry->mmap_flag, err);
- }
rdma_user_mmap_entry_put(rdma_entry);
return err;
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index c142b23bb401..1aeea5d65c01 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -479,6 +479,8 @@ static int _dev_comp_vect_mappings_create(struct hfi1_devdata *dd,
rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), i, cpu);
}
+ free_cpumask_var(available_cpus);
+ free_cpumask_var(non_intr_cpus);
return 0;
fail:
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 9b1fb84a3d45..e0b1238d31df 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -1685,6 +1685,14 @@ static u64 access_sw_pio_drain(const struct cntr_entry *entry,
return dd->verbs_dev.n_piodrain;
}
+static u64 access_sw_ctx0_seq_drop(const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = context;
+
+ return dd->ctx0_seq_drop;
+}
+
static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
void *context, int vl, int mode, u64 data)
{
@@ -4106,6 +4114,7 @@ def_access_ibp_counter(rc_crwaits);
static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
[C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
[C_RX_LEN_ERR] = RXE32_DEV_CNTR_ELEM(RxLenErr, RCV_LENGTH_ERR_CNT, CNTR_SYNTH),
+[C_RX_SHORT_ERR] = RXE32_DEV_CNTR_ELEM(RxShrErr, RCV_SHORT_ERR_CNT, CNTR_SYNTH),
[C_RX_ICRC_ERR] = RXE32_DEV_CNTR_ELEM(RxICrcErr, RCV_ICRC_ERR_CNT, CNTR_SYNTH),
[C_RX_EBP] = RXE32_DEV_CNTR_ELEM(RxEbpCnt, RCV_EBP_CNT, CNTR_SYNTH),
[C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
@@ -4249,6 +4258,8 @@ static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
access_sw_cpu_intr),
[C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
access_sw_cpu_rcv_limit),
+[C_SW_CTX0_SEQ_DROP] = CNTR_ELEM("SeqDrop0", 0, 0, CNTR_NORMAL,
+ access_sw_ctx0_seq_drop),
[C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
access_sw_vtx_wait),
[C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
@@ -6862,7 +6873,7 @@ static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
}
rcvmask = HFI1_RCVCTRL_CTXT_ENB;
/* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
- rcvmask |= rcd->rcvhdrtail_kvaddr ?
+ rcvmask |= hfi1_rcvhdrtail_kvaddr(rcd) ?
HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
hfi1_rcvctrl(dd, rcvmask, rcd);
hfi1_rcd_put(rcd);
@@ -8394,20 +8405,62 @@ void force_recv_intr(struct hfi1_ctxtdata *rcd)
static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
{
u32 tail;
- int present;
- if (!rcd->rcvhdrtail_kvaddr)
- present = (rcd->seq_cnt ==
- rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
- else /* is RDMA rtail */
- present = (rcd->head != get_rcvhdrtail(rcd));
-
- if (present)
+ if (hfi1_packet_present(rcd))
return 1;
/* fall back to a CSR read, correct indpendent of DMA_RTAIL */
tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
- return rcd->head != tail;
+ return hfi1_rcd_head(rcd) != tail;
+}
+
+/**
+ * Common code for receive contexts interrupt handlers.
+ * Update traces, increment kernel IRQ counter and
+ * setup ASPM when needed.
+ */
+static void receive_interrupt_common(struct hfi1_ctxtdata *rcd)
+{
+ struct hfi1_devdata *dd = rcd->dd;
+
+ trace_hfi1_receive_interrupt(dd, rcd);
+ this_cpu_inc(*dd->int_counter);
+ aspm_ctx_disable(rcd);
+}
+
+/**
+ * __hfi1_rcd_eoi_intr() - Make HW issue receive interrupt
+ * when there are packets present in the queue. When calling
+ * with interrupts enabled please use hfi1_rcd_eoi_intr.
+ *
+ * @rcd: valid receive context
+ */
+static void __hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd)
+{
+ clear_recv_intr(rcd);
+ if (check_packet_present(rcd))
+ force_recv_intr(rcd);
+}
+
+/**
+ * hfi1_rcd_eoi_intr() - End of Interrupt processing action
+ *
+ * @rcd: Ptr to hfi1_ctxtdata of receive context
+ *
+ * Hold IRQs so we can safely clear the interrupt and
+ * recheck for a packet that may have arrived after the previous
+ * check and the interrupt clear. If a packet arrived, force another
+ * interrupt. This routine can be called at the end of receive packet
+ * processing in interrupt service routines, interrupt service thread
+ * and softirqs
+ */
+static void hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __hfi1_rcd_eoi_intr(rcd);
+ local_irq_restore(flags);
}
/*
@@ -8421,13 +8474,9 @@ static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
irqreturn_t receive_context_interrupt(int irq, void *data)
{
struct hfi1_ctxtdata *rcd = data;
- struct hfi1_devdata *dd = rcd->dd;
int disposition;
- int present;
- trace_hfi1_receive_interrupt(dd, rcd);
- this_cpu_inc(*dd->int_counter);
- aspm_ctx_disable(rcd);
+ receive_interrupt_common(rcd);
/* receive interrupt remains blocked while processing packets */
disposition = rcd->do_interrupt(rcd, 0);
@@ -8440,17 +8489,7 @@ irqreturn_t receive_context_interrupt(int irq, void *data)
if (disposition == RCV_PKT_LIMIT)
return IRQ_WAKE_THREAD;
- /*
- * The packet processor detected no more packets. Clear the receive
- * interrupt and recheck for a packet packet that may have arrived
- * after the previous check and interrupt clear. If a packet arrived,
- * force another interrupt.
- */
- clear_recv_intr(rcd);
- present = check_packet_present(rcd);
- if (present)
- force_recv_intr(rcd);
-
+ __hfi1_rcd_eoi_intr(rcd);
return IRQ_HANDLED;
}
@@ -8461,24 +8500,11 @@ irqreturn_t receive_context_interrupt(int irq, void *data)
irqreturn_t receive_context_thread(int irq, void *data)
{
struct hfi1_ctxtdata *rcd = data;
- int present;
/* receive interrupt is still blocked from the IRQ handler */
(void)rcd->do_interrupt(rcd, 1);
- /*
- * The packet processor will only return if it detected no more
- * packets. Hold IRQs here so we can safely clear the interrupt and
- * recheck for a packet that may have arrived after the previous
- * check and the interrupt clear. If a packet arrived, force another
- * interrupt.
- */
- local_irq_disable();
- clear_recv_intr(rcd);
- present = check_packet_present(rcd);
- if (present)
- force_recv_intr(rcd);
- local_irq_enable();
+ hfi1_rcd_eoi_intr(rcd);
return IRQ_HANDLED;
}
@@ -10049,7 +10075,7 @@ u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
* the first kernel context would have been allocated by now so
* we are guaranteed a valid value.
*/
- return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
+ return (get_hdrqentsize(dd->rcd[0]) - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
}
/*
@@ -10094,7 +10120,7 @@ static void set_send_length(struct hfi1_pportdata *ppd)
thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
sc_mtu_to_threshold(dd->vld[i].sc,
dd->vld[i].mtu,
- dd->rcd[0]->rcvhdrqentsize));
+ get_hdrqentsize(dd->rcd[0])));
for (j = 0; j < INIT_SC_PER_VL; j++)
sc_set_cr_threshold(
pio_select_send_context_vl(dd, j, i),
@@ -11821,7 +11847,7 @@ u32 hdrqempty(struct hfi1_ctxtdata *rcd)
head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
& RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
- if (rcd->rcvhdrtail_kvaddr)
+ if (hfi1_rcvhdrtail_kvaddr(rcd))
tail = get_rcvhdrtail(rcd);
else
tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
@@ -11865,6 +11891,84 @@ static u32 encoded_size(u32 size)
return 0x1; /* if invalid, go with the minimum size */
}
+/**
+ * encode_rcv_header_entry_size - return chip specific encoding for size
+ * @size: size in dwords
+ *
+ * Convert a receive header entry size that to the encoding used in the CSR.
+ *
+ * Return a zero if the given size is invalid, otherwise the encoding.
+ */
+u8 encode_rcv_header_entry_size(u8 size)
+{
+ /* there are only 3 valid receive header entry sizes */
+ if (size == 2)
+ return 1;
+ if (size == 16)
+ return 2;
+ if (size == 32)
+ return 4;
+ return 0; /* invalid */
+}
+
+/**
+ * hfi1_validate_rcvhdrcnt - validate hdrcnt
+ * @dd: the device data
+ * @thecnt: the header count
+ */
+int hfi1_validate_rcvhdrcnt(struct hfi1_devdata *dd, uint thecnt)
+{
+ if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
+ dd_dev_err(dd, "Receive header queue count too small\n");
+ return -EINVAL;
+ }
+
+ if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
+ dd_dev_err(dd,
+ "Receive header queue count cannot be greater than %u\n",
+ HFI1_MAX_HDRQ_EGRBUF_CNT);
+ return -EINVAL;
+ }
+
+ if (thecnt % HDRQ_INCREMENT) {
+ dd_dev_err(dd, "Receive header queue count %d must be divisible by %lu\n",
+ thecnt, HDRQ_INCREMENT);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * set_hdrq_regs - set header queue registers for context
+ * @dd: the device data
+ * @ctxt: the context
+ * @entsize: the dword entry size
+ * @hdrcnt: the number of header entries
+ */
+void set_hdrq_regs(struct hfi1_devdata *dd, u8 ctxt, u8 entsize, u16 hdrcnt)
+{
+ u64 reg;
+
+ reg = (((u64)hdrcnt >> HDRQ_SIZE_SHIFT) & RCV_HDR_CNT_CNT_MASK) <<
+ RCV_HDR_CNT_CNT_SHIFT;
+ write_kctxt_csr(dd, ctxt, RCV_HDR_CNT, reg);
+ reg = ((u64)encode_rcv_header_entry_size(entsize) &
+ RCV_HDR_ENT_SIZE_ENT_SIZE_MASK) <<
+ RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT;
+ write_kctxt_csr(dd, ctxt, RCV_HDR_ENT_SIZE, reg);
+ reg = ((u64)DEFAULT_RCVHDRSIZE & RCV_HDR_SIZE_HDR_SIZE_MASK) <<
+ RCV_HDR_SIZE_HDR_SIZE_SHIFT;
+ write_kctxt_csr(dd, ctxt, RCV_HDR_SIZE, reg);
+
+ /*
+ * Program dummy tail address for every receive context
+ * before enabling any receive context
+ */
+ write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
+ dd->rcvhdrtail_dummy_dma);
+}
+
void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
struct hfi1_ctxtdata *rcd)
{
@@ -11886,13 +11990,13 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
/* reset the tail and hdr addresses, and sequence count */
write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
rcd->rcvhdrq_dma);
- if (rcd->rcvhdrtail_kvaddr)
+ if (hfi1_rcvhdrtail_kvaddr(rcd))
write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
rcd->rcvhdrqtailaddr_dma);
- rcd->seq_cnt = 1;
+ hfi1_set_seq_cnt(rcd, 1);
/* reset the cached receive header queue head value */
- rcd->head = 0;
+ hfi1_set_rcd_head(rcd, 0);
/*
* Zero the receive header queue so we don't get false
@@ -11972,7 +12076,7 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
IS_RCVAVAIL_START + rcd->ctxt, false);
rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
}
- if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && rcd->rcvhdrtail_kvaddr)
+ if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && hfi1_rcvhdrtail_kvaddr(rcd))
rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
/* See comment on RcvCtxtCtrl.TailUpd above */
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index 4ca5ac8d7e9e..725509261016 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -358,6 +358,8 @@
#define MAX_EAGER_BUFFER (256 * 1024)
#define MAX_EAGER_BUFFER_TOTAL (64 * (1 << 20)) /* max per ctxt 64MB */
#define MAX_EXPECTED_BUFFER (2048 * 1024)
+#define HFI1_MIN_HDRQ_EGRBUF_CNT 32
+#define HFI1_MAX_HDRQ_EGRBUF_CNT 16352
/*
* Receive expected base and count and eager base and count increment -
@@ -699,6 +701,10 @@ static inline u32 chip_rcv_array_count(struct hfi1_devdata *dd)
return read_csr(dd, RCV_ARRAY_CNT);
}
+u8 encode_rcv_header_entry_size(u8 size);
+int hfi1_validate_rcvhdrcnt(struct hfi1_devdata *dd, uint thecnt);
+void set_hdrq_regs(struct hfi1_devdata *dd, u8 ctxt, u8 entsize, u16 hdrcnt);
+
u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
u32 dw_len);
@@ -859,6 +865,7 @@ static inline int idx_from_vl(int vl)
enum {
C_RCV_OVF = 0,
C_RX_LEN_ERR,
+ C_RX_SHORT_ERR,
C_RX_ICRC_ERR,
C_RX_EBP,
C_RX_TID_FULL,
@@ -926,6 +933,7 @@ enum {
C_DC_PG_STS_TX_MBE_CNT,
C_SW_CPU_INTR,
C_SW_CPU_RCV_LIM,
+ C_SW_CTX0_SEQ_DROP,
C_SW_VTX_WAIT,
C_SW_PIO_WAIT,
C_SW_PIO_DRAIN,
diff --git a/drivers/infiniband/hw/hfi1/chip_registers.h b/drivers/infiniband/hw/hfi1/chip_registers.h
index ab3589d17aee..fb3ec9bff7a2 100644
--- a/drivers/infiniband/hw/hfi1/chip_registers.h
+++ b/drivers/infiniband/hw/hfi1/chip_registers.h
@@ -381,6 +381,7 @@
#define DC_LCB_STS_LINK_TRANSFER_ACTIVE (DC_LCB_CSRS + 0x000000000468)
#define DC_LCB_STS_ROUND_TRIP_LTP_CNT (DC_LCB_CSRS + 0x0000000004B0)
#define RCV_LENGTH_ERR_CNT 0
+#define RCV_SHORT_ERR_CNT 2
#define RCV_ICRC_ERR_CNT 6
#define RCV_EBP_CNT 9
#define RCV_BUF_OVFL_CNT 10
diff --git a/drivers/infiniband/hw/hfi1/common.h b/drivers/infiniband/hw/hfi1/common.h
index d47da7b0438f..40a1ff0c8a8e 100644
--- a/drivers/infiniband/hw/hfi1/common.h
+++ b/drivers/infiniband/hw/hfi1/common.h
@@ -323,6 +323,9 @@ struct diag_pkt {
/* RHF receive type error - bypass packet errors */
#define RHF_RTE_BYPASS_NO_ERR 0x0
+/* MAX RcvSEQ */
+#define RHF_MAX_SEQ 13
+
/* IB - LRH header constants */
#define HFI1_LRH_GRH 0x0003 /* 1. word of IB LRH - next header: GRH */
#define HFI1_LRH_BTH 0x0002 /* 1. word of IB LRH - next header: BTH */
diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
index d268bf9c42ee..4633a0ce1a8c 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.c
+++ b/drivers/infiniband/hw/hfi1/debugfs.c
@@ -379,7 +379,7 @@ static void *_rcds_seq_next(struct seq_file *s, void *v, loff_t *pos)
struct hfi1_devdata *dd = dd_from_dev(ibd);
++*pos;
- if (!dd->rcd || *pos >= dd->n_krcv_queues)
+ if (!dd->rcd || *pos >= dd->num_rcv_contexts)
return NULL;
return pos;
}
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index 01aa1f132f55..049d15befe58 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -411,14 +411,14 @@ drop:
static inline void init_packet(struct hfi1_ctxtdata *rcd,
struct hfi1_packet *packet)
{
- packet->rsize = rcd->rcvhdrqentsize; /* words */
- packet->maxcnt = rcd->rcvhdrq_cnt * packet->rsize; /* words */
+ packet->rsize = get_hdrqentsize(rcd); /* words */
+ packet->maxcnt = get_hdrq_cnt(rcd) * packet->rsize; /* words */
packet->rcd = rcd;
packet->updegr = 0;
packet->etail = -1;
packet->rhf_addr = get_rhf_addr(rcd);
packet->rhf = rhf_to_cpu(packet->rhf_addr);
- packet->rhqoff = rcd->head;
+ packet->rhqoff = hfi1_rcd_head(rcd);
packet->numpkt = 0;
}
@@ -551,22 +551,22 @@ static inline void init_ps_mdata(struct ps_mdata *mdata,
mdata->maxcnt = packet->maxcnt;
mdata->ps_head = packet->rhqoff;
- if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
+ if (get_dma_rtail_setting(rcd)) {
mdata->ps_tail = get_rcvhdrtail(rcd);
if (rcd->ctxt == HFI1_CTRL_CTXT)
- mdata->ps_seq = rcd->seq_cnt;
+ mdata->ps_seq = hfi1_seq_cnt(rcd);
else
mdata->ps_seq = 0; /* not used with DMA_RTAIL */
} else {
mdata->ps_tail = 0; /* used only with DMA_RTAIL*/
- mdata->ps_seq = rcd->seq_cnt;
+ mdata->ps_seq = hfi1_seq_cnt(rcd);
}
}
static inline int ps_done(struct ps_mdata *mdata, u64 rhf,
struct hfi1_ctxtdata *rcd)
{
- if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
+ if (get_dma_rtail_setting(rcd))
return mdata->ps_head == mdata->ps_tail;
return mdata->ps_seq != rhf_rcv_seq(rhf);
}
@@ -592,11 +592,9 @@ static inline void update_ps_mdata(struct ps_mdata *mdata,
mdata->ps_head = 0;
/* Control context must do seq counting */
- if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
- (rcd->ctxt == HFI1_CTRL_CTXT)) {
- if (++mdata->ps_seq > 13)
- mdata->ps_seq = 1;
- }
+ if (!get_dma_rtail_setting(rcd) ||
+ rcd->ctxt == HFI1_CTRL_CTXT)
+ mdata->ps_seq = hfi1_seq_incr_wrap(mdata->ps_seq);
}
/*
@@ -734,6 +732,7 @@ static noinline int skip_rcv_packet(struct hfi1_packet *packet, int thread)
{
int ret;
+ packet->rcd->dd->ctx0_seq_drop++;
/* Set up for the next packet */
packet->rhqoff += packet->rsize;
if (packet->rhqoff >= packet->maxcnt)
@@ -769,7 +768,7 @@ static inline int process_rcv_packet(struct hfi1_packet *packet, int thread)
* The +2 is the size of the RHF.
*/
prefetch_range(packet->ebuf,
- packet->tlen - ((packet->rcd->rcvhdrqentsize -
+ packet->tlen - ((get_hdrqentsize(packet->rcd) -
(rhf_hdrq_offset(packet->rhf)
+ 2)) * 4));
}
@@ -823,7 +822,7 @@ static inline void finish_packet(struct hfi1_packet *packet)
* The only thing we need to do is a final update and call for an
* interrupt
*/
- update_usrhead(packet->rcd, packet->rcd->head, packet->updegr,
+ update_usrhead(packet->rcd, hfi1_rcd_head(packet->rcd), packet->updegr,
packet->etail, rcv_intr_dynamic, packet->numpkt);
}
@@ -832,13 +831,11 @@ static inline void finish_packet(struct hfi1_packet *packet)
*/
int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread)
{
- u32 seq;
int last = RCV_PKT_OK;
struct hfi1_packet packet;
init_packet(rcd, &packet);
- seq = rhf_rcv_seq(packet.rhf);
- if (seq != rcd->seq_cnt) {
+ if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) {
last = RCV_PKT_DONE;
goto bail;
}
@@ -847,15 +844,12 @@ int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread)
while (last == RCV_PKT_OK) {
last = process_rcv_packet(&packet, thread);
- seq = rhf_rcv_seq(packet.rhf);
- if (++rcd->seq_cnt > 13)
- rcd->seq_cnt = 1;
- if (seq != rcd->seq_cnt)
+ if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf)))
last = RCV_PKT_DONE;
process_rcv_update(last, &packet);
}
process_rcv_qp_work(&packet);
- rcd->head = packet.rhqoff;
+ hfi1_set_rcd_head(rcd, packet.rhqoff);
bail:
finish_packet(&packet);
return last;
@@ -884,15 +878,14 @@ int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread)
process_rcv_update(last, &packet);
}
process_rcv_qp_work(&packet);
- rcd->head = packet.rhqoff;
+ hfi1_set_rcd_head(rcd, packet.rhqoff);
bail:
finish_packet(&packet);
return last;
}
-static inline void set_nodma_rtail(struct hfi1_devdata *dd, u16 ctxt)
+static void set_all_fastpath(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
{
- struct hfi1_ctxtdata *rcd;
u16 i;
/*
@@ -900,50 +893,17 @@ static inline void set_nodma_rtail(struct hfi1_devdata *dd, u16 ctxt)
* interrupt handler only for that context. Otherwise, switch
* interrupt handler for all statically allocated kernel contexts.
*/
- if (ctxt >= dd->first_dyn_alloc_ctxt) {
- rcd = hfi1_rcd_get_by_index_safe(dd, ctxt);
- if (rcd) {
- rcd->do_interrupt =
- &handle_receive_interrupt_nodma_rtail;
- hfi1_rcd_put(rcd);
- }
- return;
- }
-
- for (i = HFI1_CTRL_CTXT + 1; i < dd->first_dyn_alloc_ctxt; i++) {
- rcd = hfi1_rcd_get_by_index(dd, i);
- if (rcd)
- rcd->do_interrupt =
- &handle_receive_interrupt_nodma_rtail;
+ if (rcd->ctxt >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic) {
+ hfi1_rcd_get(rcd);
+ hfi1_set_fast(rcd);
hfi1_rcd_put(rcd);
- }
-}
-
-static inline void set_dma_rtail(struct hfi1_devdata *dd, u16 ctxt)
-{
- struct hfi1_ctxtdata *rcd;
- u16 i;
-
- /*
- * For dynamically allocated kernel contexts (like vnic) switch
- * interrupt handler only for that context. Otherwise, switch
- * interrupt handler for all statically allocated kernel contexts.
- */
- if (ctxt >= dd->first_dyn_alloc_ctxt) {
- rcd = hfi1_rcd_get_by_index_safe(dd, ctxt);
- if (rcd) {
- rcd->do_interrupt =
- &handle_receive_interrupt_dma_rtail;
- hfi1_rcd_put(rcd);
- }
return;
}
- for (i = HFI1_CTRL_CTXT + 1; i < dd->first_dyn_alloc_ctxt; i++) {
+ for (i = HFI1_CTRL_CTXT + 1; i < dd->num_rcv_contexts; i++) {
rcd = hfi1_rcd_get_by_index(dd, i);
- if (rcd)
- rcd->do_interrupt =
- &handle_receive_interrupt_dma_rtail;
+ if (rcd && (i < dd->first_dyn_alloc_ctxt || rcd->is_vnic))
+ hfi1_set_fast(rcd);
hfi1_rcd_put(rcd);
}
}
@@ -959,17 +919,14 @@ void set_all_slowpath(struct hfi1_devdata *dd)
if (!rcd)
continue;
if (i < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
- rcd->do_interrupt = &handle_receive_interrupt;
+ rcd->do_interrupt = rcd->slow_handler;
hfi1_rcd_put(rcd);
}
}
-static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd,
- struct hfi1_packet *packet,
- struct hfi1_devdata *dd)
+static bool __set_armed_to_active(struct hfi1_packet *packet)
{
- struct work_struct *lsaw = &rcd->ppd->linkstate_active_work;
u8 etype = rhf_rcv_type(packet->rhf);
u8 sc = SC15_PACKET;
@@ -984,19 +941,34 @@ static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd,
sc = hfi1_16B_get_sc(hdr);
}
if (sc != SC15_PACKET) {
- int hwstate = driver_lstate(rcd->ppd);
+ int hwstate = driver_lstate(packet->rcd->ppd);
+ struct work_struct *lsaw =
+ &packet->rcd->ppd->linkstate_active_work;
if (hwstate != IB_PORT_ACTIVE) {
- dd_dev_info(dd,
+ dd_dev_info(packet->rcd->dd,
"Unexpected link state %s\n",
opa_lstate_name(hwstate));
- return 0;
+ return false;
}
- queue_work(rcd->ppd->link_wq, lsaw);
- return 1;
+ queue_work(packet->rcd->ppd->link_wq, lsaw);
+ return true;
}
- return 0;
+ return false;
+}
+
+/**
+ * armed to active - the fast path for armed to active
+ * @packet: the packet structure
+ *
+ * Return true if packet processing needs to bail.
+ */
+static bool set_armed_to_active(struct hfi1_packet *packet)
+{
+ if (likely(packet->rcd->ppd->host_link_state != HLS_UP_ARMED))
+ return false;
+ return __set_armed_to_active(packet);
}
/*
@@ -1019,10 +991,8 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
init_packet(rcd, &packet);
- if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
- u32 seq = rhf_rcv_seq(packet.rhf);
-
- if (seq != rcd->seq_cnt) {
+ if (!get_dma_rtail_setting(rcd)) {
+ if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) {
last = RCV_PKT_DONE;
goto bail;
}
@@ -1039,22 +1009,15 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
* Control context can potentially receive an invalid
* rhf. Drop such packets.
*/
- if (rcd->ctxt == HFI1_CTRL_CTXT) {
- u32 seq = rhf_rcv_seq(packet.rhf);
-
- if (seq != rcd->seq_cnt)
+ if (rcd->ctxt == HFI1_CTRL_CTXT)
+ if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf)))
skip_pkt = 1;
- }
}
prescan_rxq(rcd, &packet);
while (last == RCV_PKT_OK) {
- if (unlikely(dd->do_drop &&
- atomic_xchg(&dd->drop_packet, DROP_PACKET_OFF) ==
- DROP_PACKET_ON)) {
- dd->do_drop = 0;
-
+ if (hfi1_need_drop(dd)) {
/* On to the next packet */
packet.rhqoff += packet.rsize;
packet.rhf_addr = (__le32 *)rcd->rcvhdrq +
@@ -1066,26 +1029,14 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
last = skip_rcv_packet(&packet, thread);
skip_pkt = 0;
} else {
- /* Auto activate link on non-SC15 packet receive */
- if (unlikely(rcd->ppd->host_link_state ==
- HLS_UP_ARMED) &&
- set_armed_to_active(rcd, &packet, dd))
+ if (set_armed_to_active(&packet))
goto bail;
last = process_rcv_packet(&packet, thread);
}
- if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
- u32 seq = rhf_rcv_seq(packet.rhf);
-
- if (++rcd->seq_cnt > 13)
- rcd->seq_cnt = 1;
- if (seq != rcd->seq_cnt)
+ if (!get_dma_rtail_setting(rcd)) {
+ if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf)))
last = RCV_PKT_DONE;
- if (needset) {
- dd_dev_info(dd, "Switching to NO_DMA_RTAIL\n");
- set_nodma_rtail(dd, rcd->ctxt);
- needset = 0;
- }
} else {
if (packet.rhqoff == hdrqtail)
last = RCV_PKT_DONE;
@@ -1094,27 +1045,24 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
* rhf. Drop such packets.
*/
if (rcd->ctxt == HFI1_CTRL_CTXT) {
- u32 seq = rhf_rcv_seq(packet.rhf);
+ bool lseq;
- if (++rcd->seq_cnt > 13)
- rcd->seq_cnt = 1;
- if (!last && (seq != rcd->seq_cnt))
+ lseq = hfi1_seq_incr(rcd,
+ rhf_rcv_seq(packet.rhf));
+ if (!last && lseq)
skip_pkt = 1;
}
-
- if (needset) {
- dd_dev_info(dd,
- "Switching to DMA_RTAIL\n");
- set_dma_rtail(dd, rcd->ctxt);
- needset = 0;
- }
}
+ if (needset) {
+ needset = false;
+ set_all_fastpath(dd, rcd);
+ }
process_rcv_update(last, &packet);
}
process_rcv_qp_work(&packet);
- rcd->head = packet.rhqoff;
+ hfi1_set_rcd_head(rcd, packet.rhqoff);
bail:
/*
@@ -1606,23 +1554,22 @@ void handle_eflags(struct hfi1_packet *packet)
* The following functions are called by the interrupt handler. They are type
* specific handlers for each packet type.
*/
-static int process_receive_ib(struct hfi1_packet *packet)
+static void process_receive_ib(struct hfi1_packet *packet)
{
if (hfi1_setup_9B_packet(packet))
- return RHF_RCV_CONTINUE;
+ return;
if (unlikely(hfi1_dbg_should_fault_rx(packet)))
- return RHF_RCV_CONTINUE;
+ return;
trace_hfi1_rcvhdr(packet);
if (unlikely(rhf_err_flags(packet->rhf))) {
handle_eflags(packet);
- return RHF_RCV_CONTINUE;
+ return;
}
hfi1_ib_rcv(packet);
- return RHF_RCV_CONTINUE;
}
static inline bool hfi1_is_vnic_packet(struct hfi1_packet *packet)
@@ -1638,23 +1585,23 @@ static inline bool hfi1_is_vnic_packet(struct hfi1_packet *packet)
return false;
}
-static int process_receive_bypass(struct hfi1_packet *packet)
+static void process_receive_bypass(struct hfi1_packet *packet)
{
struct hfi1_devdata *dd = packet->rcd->dd;
if (hfi1_is_vnic_packet(packet)) {
hfi1_vnic_bypass_rcv(packet);
- return RHF_RCV_CONTINUE;
+ return;
}
if (hfi1_setup_bypass_packet(packet))
- return RHF_RCV_CONTINUE;
+ return;
trace_hfi1_rcvhdr(packet);
if (unlikely(rhf_err_flags(packet->rhf))) {
handle_eflags(packet);
- return RHF_RCV_CONTINUE;
+ return;
}
if (hfi1_16B_get_l2(packet->hdr) == 0x2) {
@@ -1677,17 +1624,16 @@ static int process_receive_bypass(struct hfi1_packet *packet)
(OPA_EI_STATUS_SMASK | BAD_L2_ERR);
}
}
- return RHF_RCV_CONTINUE;
}
-static int process_receive_error(struct hfi1_packet *packet)
+static void process_receive_error(struct hfi1_packet *packet)
{
/* KHdrHCRCErr -- KDETH packet with a bad HCRC */
if (unlikely(
hfi1_dbg_fault_suppress_err(&packet->rcd->dd->verbs_dev) &&
(rhf_rcv_type_err(packet->rhf) == RHF_RCV_TYPE_ERROR ||
packet->rhf & RHF_DC_ERR)))
- return RHF_RCV_CONTINUE;
+ return;
hfi1_setup_ib_header(packet);
handle_eflags(packet);
@@ -1695,32 +1641,29 @@ static int process_receive_error(struct hfi1_packet *packet)
if (unlikely(rhf_err_flags(packet->rhf)))
dd_dev_err(packet->rcd->dd,
"Unhandled error packet received. Dropping.\n");
-
- return RHF_RCV_CONTINUE;
}
-static int kdeth_process_expected(struct hfi1_packet *packet)
+static void kdeth_process_expected(struct hfi1_packet *packet)
{
hfi1_setup_9B_packet(packet);
if (unlikely(hfi1_dbg_should_fault_rx(packet)))
- return RHF_RCV_CONTINUE;
+ return;
if (unlikely(rhf_err_flags(packet->rhf))) {
struct hfi1_ctxtdata *rcd = packet->rcd;
if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet))
- return RHF_RCV_CONTINUE;
+ return;
}
hfi1_kdeth_expected_rcv(packet);
- return RHF_RCV_CONTINUE;
}
-static int kdeth_process_eager(struct hfi1_packet *packet)
+static void kdeth_process_eager(struct hfi1_packet *packet)
{
hfi1_setup_9B_packet(packet);
if (unlikely(hfi1_dbg_should_fault_rx(packet)))
- return RHF_RCV_CONTINUE;
+ return;
trace_hfi1_rcvhdr(packet);
if (unlikely(rhf_err_flags(packet->rhf))) {
@@ -1728,37 +1671,41 @@ static int kdeth_process_eager(struct hfi1_packet *packet)
show_eflags_errs(packet);
if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet))
- return RHF_RCV_CONTINUE;
+ return;
}
hfi1_kdeth_eager_rcv(packet);
- return RHF_RCV_CONTINUE;
}
-static int process_receive_invalid(struct hfi1_packet *packet)
+static void process_receive_invalid(struct hfi1_packet *packet)
{
dd_dev_err(packet->rcd->dd, "Invalid packet type %d. Dropping\n",
rhf_rcv_type(packet->rhf));
- return RHF_RCV_CONTINUE;
}
+#define HFI1_RCVHDR_DUMP_MAX 5
+
void seqfile_dump_rcd(struct seq_file *s, struct hfi1_ctxtdata *rcd)
{
struct hfi1_packet packet;
struct ps_mdata mdata;
+ int i;
- seq_printf(s, "Rcd %u: RcvHdr cnt %u entsize %u %s head %llu tail %llu\n",
- rcd->ctxt, rcd->rcvhdrq_cnt, rcd->rcvhdrqentsize,
- HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ?
+ seq_printf(s, "Rcd %u: RcvHdr cnt %u entsize %u %s ctrl 0x%08llx status 0x%08llx, head %llu tail %llu sw head %u\n",
+ rcd->ctxt, get_hdrq_cnt(rcd), get_hdrqentsize(rcd),
+ get_dma_rtail_setting(rcd) ?
"dma_rtail" : "nodma_rtail",
+ read_kctxt_csr(rcd->dd, rcd->ctxt, RCV_CTXT_CTRL),
+ read_kctxt_csr(rcd->dd, rcd->ctxt, RCV_CTXT_STATUS),
read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD) &
RCV_HDR_HEAD_HEAD_MASK,
- read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL));
+ read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL),
+ rcd->head);
init_packet(rcd, &packet);
init_ps_mdata(&mdata, &packet);
- while (1) {
+ for (i = 0; i < HFI1_RCVHDR_DUMP_MAX; i++) {
__le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head +
rcd->rhf_offset;
struct ib_header *hdr;
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 7c5e3fb22413..259115886d35 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -200,23 +200,24 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
fd = kzalloc(sizeof(*fd), GFP_KERNEL);
- if (fd) {
- fd->rec_cpu_num = -1; /* no cpu affinity by default */
- fd->mm = current->mm;
- mmgrab(fd->mm);
- fd->dd = dd;
- kobject_get(&fd->dd->kobj);
- fp->private_data = fd;
- } else {
- fp->private_data = NULL;
-
- if (atomic_dec_and_test(&dd->user_refcount))
- complete(&dd->user_comp);
-
- return -ENOMEM;
- }
-
+ if (!fd || init_srcu_struct(&fd->pq_srcu))
+ goto nomem;
+ spin_lock_init(&fd->pq_rcu_lock);
+ spin_lock_init(&fd->tid_lock);
+ spin_lock_init(&fd->invalid_lock);
+ fd->rec_cpu_num = -1; /* no cpu affinity by default */
+ fd->mm = current->mm;
+ mmgrab(fd->mm);
+ fd->dd = dd;
+ kobject_get(&fd->dd->kobj);
+ fp->private_data = fd;
return 0;
+nomem:
+ kfree(fd);
+ fp->private_data = NULL;
+ if (atomic_dec_and_test(&dd->user_refcount))
+ complete(&dd->user_comp);
+ return -ENOMEM;
}
static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
@@ -301,21 +302,30 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
{
struct hfi1_filedata *fd = kiocb->ki_filp->private_data;
- struct hfi1_user_sdma_pkt_q *pq = fd->pq;
+ struct hfi1_user_sdma_pkt_q *pq;
struct hfi1_user_sdma_comp_q *cq = fd->cq;
int done = 0, reqs = 0;
unsigned long dim = from->nr_segs;
+ int idx;
- if (!cq || !pq)
+ idx = srcu_read_lock(&fd->pq_srcu);
+ pq = srcu_dereference(fd->pq, &fd->pq_srcu);
+ if (!cq || !pq) {
+ srcu_read_unlock(&fd->pq_srcu, idx);
return -EIO;
+ }
- if (!iter_is_iovec(from) || !dim)
+ if (!iter_is_iovec(from) || !dim) {
+ srcu_read_unlock(&fd->pq_srcu, idx);
return -EINVAL;
+ }
trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim);
- if (atomic_read(&pq->n_reqs) == pq->n_max_reqs)
+ if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) {
+ srcu_read_unlock(&fd->pq_srcu, idx);
return -ENOSPC;
+ }
while (dim) {
int ret;
@@ -333,6 +343,7 @@ static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
reqs++;
}
+ srcu_read_unlock(&fd->pq_srcu, idx);
return reqs;
}
@@ -505,12 +516,12 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
ret = -EINVAL;
goto done;
}
- if ((flags & VM_WRITE) || !uctxt->rcvhdrtail_kvaddr) {
+ if ((flags & VM_WRITE) || !hfi1_rcvhdrtail_kvaddr(uctxt)) {
ret = -EPERM;
goto done;
}
memlen = PAGE_SIZE;
- memvirt = (void *)uctxt->rcvhdrtail_kvaddr;
+ memvirt = (void *)hfi1_rcvhdrtail_kvaddr(uctxt);
flags &= ~VM_MAYWRITE;
break;
case SUBCTXT_UREGS:
@@ -707,6 +718,7 @@ done:
if (atomic_dec_and_test(&dd->user_refcount))
complete(&dd->user_comp);
+ cleanup_srcu_struct(&fdata->pq_srcu);
kfree(fdata);
return 0;
}
@@ -1090,7 +1102,7 @@ static void user_init(struct hfi1_ctxtdata *uctxt)
* don't have to wait to be sure the DMA update has happened
* (chip resets head/tail to 0 on transition to enable).
*/
- if (uctxt->rcvhdrtail_kvaddr)
+ if (hfi1_rcvhdrtail_kvaddr(uctxt))
clear_rcvhdrtail(uctxt);
/* Setup J_KEY before enabling the context */
@@ -1154,8 +1166,8 @@ static int get_ctxt_info(struct hfi1_filedata *fd, unsigned long arg, u32 len)
cinfo.send_ctxt = uctxt->sc->hw_context;
cinfo.egrtids = uctxt->egrbufs.alloced;
- cinfo.rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
- cinfo.rcvhdrq_entsize = uctxt->rcvhdrqentsize << 2;
+ cinfo.rcvhdrq_cnt = get_hdrq_cnt(uctxt);
+ cinfo.rcvhdrq_entsize = get_hdrqentsize(uctxt) << 2;
cinfo.sdma_ring_size = fd->cq->nentries;
cinfo.rcvegr_size = uctxt->egrbufs.rcvtid_size;
@@ -1543,7 +1555,7 @@ static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
* always resets it's tail register back to 0 on a
* transition from disabled to enabled.
*/
- if (uctxt->rcvhdrtail_kvaddr)
+ if (hfi1_rcvhdrtail_kvaddr(uctxt))
clear_rcvhdrtail(uctxt);
rcvctrl_op = HFI1_RCVCTRL_CTXT_ENB;
} else {
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index fc10d65fc3e1..cae12f416ca0 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -197,7 +197,9 @@ struct exp_tid_set {
u32 count;
};
-typedef int (*rhf_rcv_function_ptr)(struct hfi1_packet *packet);
+struct hfi1_ctxtdata;
+typedef int (*intr_handler)(struct hfi1_ctxtdata *rcd, int data);
+typedef void (*rhf_rcv_function_ptr)(struct hfi1_packet *packet);
struct tid_queue {
struct list_head queue_head;
@@ -226,7 +228,11 @@ struct hfi1_ctxtdata {
* be valid. Worst case is we process an extra interrupt and up to 64
* packets with the wrong interrupt handler.
*/
- int (*do_interrupt)(struct hfi1_ctxtdata *rcd, int threaded);
+ intr_handler do_interrupt;
+ /** fast handler after autoactive */
+ intr_handler fast_handler;
+ /** slow handler */
+ intr_handler slow_handler;
/* verbs rx_stats per rcd */
struct hfi1_opcode_stats_perctx *opstats;
/* clear interrupt mask */
@@ -1153,6 +1159,8 @@ struct hfi1_devdata {
char *boardname; /* human readable board info */
+ u64 ctx0_seq_drop;
+
/* reset value */
u64 z_int_counter;
u64 z_rcv_limit;
@@ -1310,7 +1318,7 @@ struct hfi1_devdata {
struct err_info_constraint err_info_xmit_constraint;
atomic_t drop_packet;
- u8 do_drop;
+ bool do_drop;
u8 err_info_uncorrectable;
u8 err_info_fmconfig;
@@ -1436,10 +1444,13 @@ struct mmu_rb_handler;
/* Private data for file operations */
struct hfi1_filedata {
+ struct srcu_struct pq_srcu;
struct hfi1_devdata *dd;
struct hfi1_ctxtdata *uctxt;
struct hfi1_user_sdma_comp_q *cq;
- struct hfi1_user_sdma_pkt_q *pq;
+ /* update side lock for SRCU */
+ spinlock_t pq_rcu_lock;
+ struct hfi1_user_sdma_pkt_q __rcu *pq;
u16 subctxt;
/* for cpu affinity; -1 if none */
int rec_cpu_num;
@@ -1507,12 +1518,148 @@ void hfi1_make_ud_req_16B(struct rvt_qp *qp,
#define RCV_PKT_LIMIT 0x1 /* stop, hit limit, start thread */
#define RCV_PKT_DONE 0x2 /* stop, no more packets detected */
+/**
+ * hfi1_rcd_head - add accessor for rcd head
+ * @rcd: the context
+ */
+static inline u32 hfi1_rcd_head(struct hfi1_ctxtdata *rcd)
+{
+ return rcd->head;
+}
+
+/**
+ * hfi1_set_rcd_head - add accessor for rcd head
+ * @rcd: the context
+ * @head: the new head
+ */
+static inline void hfi1_set_rcd_head(struct hfi1_ctxtdata *rcd, u32 head)
+{
+ rcd->head = head;
+}
+
/* calculate the current RHF address */
static inline __le32 *get_rhf_addr(struct hfi1_ctxtdata *rcd)
{
return (__le32 *)rcd->rcvhdrq + rcd->head + rcd->rhf_offset;
}
+/* return DMA_RTAIL configuration */
+static inline bool get_dma_rtail_setting(struct hfi1_ctxtdata *rcd)
+{
+ return !!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL);
+}
+
+/**
+ * hfi1_seq_incr_wrap - wrapping increment for sequence
+ * @seq: the current sequence number
+ *
+ * Returns: the incremented seq
+ */
+static inline u8 hfi1_seq_incr_wrap(u8 seq)
+{
+ if (++seq > RHF_MAX_SEQ)
+ seq = 1;
+ return seq;
+}
+
+/**
+ * hfi1_seq_cnt - return seq_cnt member
+ * @rcd: the receive context
+ *
+ * Return seq_cnt member
+ */
+static inline u8 hfi1_seq_cnt(struct hfi1_ctxtdata *rcd)
+{
+ return rcd->seq_cnt;
+}
+
+/**
+ * hfi1_set_seq_cnt - return seq_cnt member
+ * @rcd: the receive context
+ *
+ * Return seq_cnt member
+ */
+static inline void hfi1_set_seq_cnt(struct hfi1_ctxtdata *rcd, u8 cnt)
+{
+ rcd->seq_cnt = cnt;
+}
+
+/**
+ * last_rcv_seq - is last
+ * @rcd: the receive context
+ * @seq: sequence
+ *
+ * return true if last packet
+ */
+static inline bool last_rcv_seq(struct hfi1_ctxtdata *rcd, u32 seq)
+{
+ return seq != rcd->seq_cnt;
+}
+
+/**
+ * rcd_seq_incr - increment context sequence number
+ * @rcd: the receive context
+ * @seq: the current sequence number
+ *
+ * Returns: true if the this was the last packet
+ */
+static inline bool hfi1_seq_incr(struct hfi1_ctxtdata *rcd, u32 seq)
+{
+ rcd->seq_cnt = hfi1_seq_incr_wrap(rcd->seq_cnt);
+ return last_rcv_seq(rcd, seq);
+}
+
+/**
+ * get_hdrqentsize - return hdrq entry size
+ * @rcd: the receive context
+ */
+static inline u8 get_hdrqentsize(struct hfi1_ctxtdata *rcd)
+{
+ return rcd->rcvhdrqentsize;
+}
+
+/**
+ * get_hdrq_cnt - return hdrq count
+ * @rcd: the receive context
+ */
+static inline u16 get_hdrq_cnt(struct hfi1_ctxtdata *rcd)
+{
+ return rcd->rcvhdrq_cnt;
+}
+
+/**
+ * hfi1_is_slowpath - check if this context is slow path
+ * @rcd: the receive context
+ */
+static inline bool hfi1_is_slowpath(struct hfi1_ctxtdata *rcd)
+{
+ return rcd->do_interrupt == rcd->slow_handler;
+}
+
+/**
+ * hfi1_is_fastpath - check if this context is fast path
+ * @rcd: the receive context
+ */
+static inline bool hfi1_is_fastpath(struct hfi1_ctxtdata *rcd)
+{
+ if (rcd->ctxt == HFI1_CTRL_CTXT)
+ return false;
+
+ return rcd->do_interrupt == rcd->fast_handler;
+}
+
+/**
+ * hfi1_set_fast - change to the fast handler
+ * @rcd: the receive context
+ */
+static inline void hfi1_set_fast(struct hfi1_ctxtdata *rcd)
+{
+ if (unlikely(!rcd))
+ return;
+ if (unlikely(!hfi1_is_fastpath(rcd)))
+ rcd->do_interrupt = rcd->fast_handler;
+}
+
int hfi1_reset_device(int);
void receive_interrupt_work(struct work_struct *work);
@@ -2015,9 +2162,21 @@ int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr,
void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
size_t npages, bool dirty);
+/**
+ * hfi1_rcvhdrtail_kvaddr - return tail kvaddr
+ * @rcd - the receive context
+ */
+static inline __le64 *hfi1_rcvhdrtail_kvaddr(const struct hfi1_ctxtdata *rcd)
+{
+ return (__le64 *)rcd->rcvhdrtail_kvaddr;
+}
+
static inline void clear_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
{
- *((u64 *)rcd->rcvhdrtail_kvaddr) = 0ULL;
+ u64 *kv = (u64 *)hfi1_rcvhdrtail_kvaddr(rcd);
+
+ if (kv)
+ *kv = 0ULL;
}
static inline u32 get_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
@@ -2026,7 +2185,17 @@ static inline u32 get_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
* volatile because it's a DMA target from the chip, routine is
* inlined, and don't want register caching or reordering.
*/
- return (u32)le64_to_cpu(*rcd->rcvhdrtail_kvaddr);
+ return (u32)le64_to_cpu(*hfi1_rcvhdrtail_kvaddr(rcd));
+}
+
+static inline bool hfi1_packet_present(struct hfi1_ctxtdata *rcd)
+{
+ if (likely(!rcd->rcvhdrtail_kvaddr)) {
+ u32 seq = rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd)));
+
+ return !last_rcv_seq(rcd, seq);
+ }
+ return hfi1_rcd_head(rcd) != get_rcvhdrtail(rcd);
}
/*
@@ -2298,6 +2467,25 @@ static inline bool is_integrated(struct hfi1_devdata *dd)
return dd->pcidev->device == PCI_DEVICE_ID_INTEL1;
}
+/**
+ * hfi1_need_drop - detect need for drop
+ * @dd: - the device
+ *
+ * In some cases, the first packet needs to be dropped.
+ *
+ * Return true is the current packet needs to be dropped and false otherwise.
+ */
+static inline bool hfi1_need_drop(struct hfi1_devdata *dd)
+{
+ if (unlikely(dd->do_drop &&
+ atomic_xchg(&dd->drop_packet, DROP_PACKET_OFF) ==
+ DROP_PACKET_ON)) {
+ dd->do_drop = false;
+ return true;
+ }
+ return false;
+}
+
int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp);
#define DD_DEV_ENTRY(dd) __string(dev, dev_name(&(dd)->pcidev->dev))
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 26b792bb1027..e3acda7a0800 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -78,8 +78,6 @@
*/
#define HFI1_MIN_USER_CTXT_BUFCNT 7
-#define HFI1_MIN_HDRQ_EGRBUF_CNT 2
-#define HFI1_MAX_HDRQ_EGRBUF_CNT 16352
#define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */
#define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */
@@ -122,8 +120,6 @@ unsigned int user_credit_return_threshold = 33; /* default is 33% */
module_param(user_credit_return_threshold, uint, S_IRUGO);
MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)");
-static inline u64 encode_rcv_header_entry_size(u16 size);
-
DEFINE_XARRAY_FLAGS(hfi1_dev_table, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
static int hfi1_create_kctxt(struct hfi1_devdata *dd,
@@ -154,7 +150,12 @@ static int hfi1_create_kctxt(struct hfi1_devdata *dd,
/* Control context must use DMA_RTAIL */
if (rcd->ctxt == HFI1_CTRL_CTXT)
rcd->flags |= HFI1_CAP_DMA_RTAIL;
- rcd->seq_cnt = 1;
+ rcd->fast_handler = get_dma_rtail_setting(rcd) ?
+ handle_receive_interrupt_dma_rtail :
+ handle_receive_interrupt_nodma_rtail;
+ rcd->slow_handler = handle_receive_interrupt;
+
+ hfi1_set_seq_cnt(rcd, 1);
rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node);
if (!rcd->sc) {
@@ -511,23 +512,6 @@ void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd)
}
/*
- * Convert a receive header entry size that to the encoding used in the CSR.
- *
- * Return a zero if the given size is invalid.
- */
-static inline u64 encode_rcv_header_entry_size(u16 size)
-{
- /* there are only 3 valid receive header entry sizes */
- if (size == 2)
- return 1;
- if (size == 16)
- return 2;
- else if (size == 32)
- return 4;
- return 0; /* invalid */
-}
-
-/*
* Select the largest ccti value over all SLs to determine the intra-
* packet gap for the link.
*
@@ -892,10 +876,10 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
if (is_ax(dd)) {
atomic_set(&dd->drop_packet, DROP_PACKET_ON);
- dd->do_drop = 1;
+ dd->do_drop = true;
} else {
atomic_set(&dd->drop_packet, DROP_PACKET_OFF);
- dd->do_drop = 0;
+ dd->do_drop = false;
}
/* make sure the link is not "up" */
@@ -1149,9 +1133,9 @@ void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
dma_free_coherent(&dd->pcidev->dev, rcvhdrq_size(rcd),
rcd->rcvhdrq, rcd->rcvhdrq_dma);
rcd->rcvhdrq = NULL;
- if (rcd->rcvhdrtail_kvaddr) {
+ if (hfi1_rcvhdrtail_kvaddr(rcd)) {
dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
- (void *)rcd->rcvhdrtail_kvaddr,
+ (void *)hfi1_rcvhdrtail_kvaddr(rcd),
rcd->rcvhdrqtailaddr_dma);
rcd->rcvhdrtail_kvaddr = NULL;
}
@@ -1611,29 +1595,6 @@ static void postinit_cleanup(struct hfi1_devdata *dd)
hfi1_free_devdata(dd);
}
-static int init_validate_rcvhdrcnt(struct hfi1_devdata *dd, uint thecnt)
-{
- if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
- dd_dev_err(dd, "Receive header queue count too small\n");
- return -EINVAL;
- }
-
- if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
- dd_dev_err(dd,
- "Receive header queue count cannot be greater than %u\n",
- HFI1_MAX_HDRQ_EGRBUF_CNT);
- return -EINVAL;
- }
-
- if (thecnt % HDRQ_INCREMENT) {
- dd_dev_err(dd, "Receive header queue count %d must be divisible by %lu\n",
- thecnt, HDRQ_INCREMENT);
- return -EINVAL;
- }
-
- return 0;
-}
-
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int ret = 0, j, pidx, initfail;
@@ -1661,7 +1622,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Validate some global module parameters */
- ret = init_validate_rcvhdrcnt(dd, rcvhdrcnt);
+ ret = hfi1_validate_rcvhdrcnt(dd, rcvhdrcnt);
if (ret)
goto bail;
@@ -1842,7 +1803,6 @@ static void shutdown_one(struct pci_dev *pdev)
int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
{
unsigned amt;
- u64 reg;
if (!rcd->rcvhdrq) {
gfp_t gfp_flags;
@@ -1874,30 +1834,9 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
goto bail_free;
}
}
- /*
- * These values are per-context:
- * RcvHdrCnt
- * RcvHdrEntSize
- * RcvHdrSize
- */
- reg = ((u64)(rcd->rcvhdrq_cnt >> HDRQ_SIZE_SHIFT)
- & RCV_HDR_CNT_CNT_MASK)
- << RCV_HDR_CNT_CNT_SHIFT;
- write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_CNT, reg);
- reg = (encode_rcv_header_entry_size(rcd->rcvhdrqentsize)
- & RCV_HDR_ENT_SIZE_ENT_SIZE_MASK)
- << RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT;
- write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_ENT_SIZE, reg);
- reg = ((u64)DEFAULT_RCVHDRSIZE & RCV_HDR_SIZE_HDR_SIZE_MASK)
- << RCV_HDR_SIZE_HDR_SIZE_SHIFT;
- write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_SIZE, reg);
- /*
- * Program dummy tail address for every receive context
- * before enabling any receive context
- */
- write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_TAIL_ADDR,
- dd->rcvhdrtail_dummy_dma);
+ set_hdrq_regs(rcd->dd, rcd->ctxt, rcd->rcvhdrqentsize,
+ rcd->rcvhdrq_cnt);
return 0;
diff --git a/drivers/infiniband/hw/hfi1/iowait.c b/drivers/infiniband/hw/hfi1/iowait.c
index adb4a1ba921b..5836fe7b2817 100644
--- a/drivers/infiniband/hw/hfi1/iowait.c
+++ b/drivers/infiniband/hw/hfi1/iowait.c
@@ -81,7 +81,9 @@ void iowait_init(struct iowait *wait, u32 tx_limit,
void iowait_cancel_work(struct iowait *w)
{
cancel_work_sync(&iowait_get_ib_work(w)->iowork);
- cancel_work_sync(&iowait_get_tid_work(w)->iowork);
+ /* Make sure that the iowork for TID RDMA is used */
+ if (iowait_get_tid_work(w)->iowork.func)
+ cancel_work_sync(&iowait_get_tid_work(w)->iowork);
}
/**
diff --git a/drivers/infiniband/hw/hfi1/msix.c b/drivers/infiniband/hw/hfi1/msix.c
index d920b165d696..db82db497b2c 100644
--- a/drivers/infiniband/hw/hfi1/msix.c
+++ b/drivers/infiniband/hw/hfi1/msix.c
@@ -115,13 +115,11 @@ int msix_initialize(struct hfi1_devdata *dd)
*/
static int msix_request_irq(struct hfi1_devdata *dd, void *arg,
irq_handler_t handler, irq_handler_t thread,
- u32 idx, enum irq_type type)
+ enum irq_type type, const char *name)
{
unsigned long nr;
int irq;
int ret;
- const char *err_info;
- char name[MAX_NAME_SIZE];
struct hfi1_msix_entry *me;
/* Allocate an MSIx vector */
@@ -135,43 +133,15 @@ static int msix_request_irq(struct hfi1_devdata *dd, void *arg,
if (nr == dd->msix_info.max_requested)
return -ENOSPC;
- /* Specific verification and determine the name */
- switch (type) {
- case IRQ_GENERAL:
- /* general interrupt must be MSIx vector 0 */
- if (nr) {
- spin_lock(&dd->msix_info.msix_lock);
- __clear_bit(nr, dd->msix_info.in_use_msix);
- spin_unlock(&dd->msix_info.msix_lock);
- dd_dev_err(dd, "Invalid index %lu for GENERAL IRQ\n",
- nr);
- return -EINVAL;
- }
- snprintf(name, sizeof(name), DRIVER_NAME "_%d", dd->unit);
- err_info = "general";
- break;
- case IRQ_SDMA:
- snprintf(name, sizeof(name), DRIVER_NAME "_%d sdma%d",
- dd->unit, idx);
- err_info = "sdma";
- break;
- case IRQ_RCVCTXT:
- snprintf(name, sizeof(name), DRIVER_NAME "_%d kctxt%d",
- dd->unit, idx);
- err_info = "receive context";
- break;
- case IRQ_OTHER:
- default:
+ if (type < IRQ_SDMA || type >= IRQ_OTHER)
return -EINVAL;
- }
- name[sizeof(name) - 1] = 0;
irq = pci_irq_vector(dd->pcidev, nr);
ret = pci_request_irq(dd->pcidev, nr, handler, thread, arg, name);
if (ret) {
dd_dev_err(dd,
- "%s: request for IRQ %d failed, MSIx %d, err %d\n",
- err_info, irq, idx, ret);
+ "%s: request for IRQ %d failed, MSIx %lu, err %d\n",
+ name, irq, nr, ret);
spin_lock(&dd->msix_info.msix_lock);
__clear_bit(nr, dd->msix_info.in_use_msix);
spin_unlock(&dd->msix_info.msix_lock);
@@ -195,17 +165,13 @@ static int msix_request_irq(struct hfi1_devdata *dd, void *arg,
return nr;
}
-/**
- * msix_request_rcd_irq() - Helper function for RCVAVAIL IRQs
- * @rcd: valid rcd context
- *
- */
-int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd)
+static int msix_request_rcd_irq_common(struct hfi1_ctxtdata *rcd,
+ irq_handler_t handler,
+ irq_handler_t thread,
+ const char *name)
{
- int nr;
-
- nr = msix_request_irq(rcd->dd, rcd, receive_context_interrupt,
- receive_context_thread, rcd->ctxt, IRQ_RCVCTXT);
+ int nr = msix_request_irq(rcd->dd, rcd, handler, thread,
+ IRQ_RCVCTXT, name);
if (nr < 0)
return nr;
@@ -222,6 +188,22 @@ int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd)
}
/**
+ * msix_request_rcd_irq() - Helper function for RCVAVAIL IRQs
+ * @rcd: valid rcd context
+ *
+ */
+int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd)
+{
+ char name[MAX_NAME_SIZE];
+
+ snprintf(name, sizeof(name), DRIVER_NAME "_%d kctxt%d",
+ rcd->dd->unit, rcd->ctxt);
+
+ return msix_request_rcd_irq_common(rcd, receive_context_interrupt,
+ receive_context_thread, name);
+}
+
+/**
* msix_request_smda_ira() - Helper for getting SDMA IRQ resources
* @sde: valid sdma engine
*
@@ -229,9 +211,12 @@ int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd)
int msix_request_sdma_irq(struct sdma_engine *sde)
{
int nr;
+ char name[MAX_NAME_SIZE];
+ snprintf(name, sizeof(name), DRIVER_NAME "_%d sdma%d",
+ sde->dd->unit, sde->this_idx);
nr = msix_request_irq(sde->dd, sde, sdma_interrupt, NULL,
- sde->this_idx, IRQ_SDMA);
+ IRQ_SDMA, name);
if (nr < 0)
return nr;
sde->msix_intr = nr;
@@ -241,6 +226,32 @@ int msix_request_sdma_irq(struct sdma_engine *sde)
}
/**
+ * msix_request_general_irq(void) - Helper for getting general IRQ
+ * resources
+ * @dd: valid device data
+ */
+int msix_request_general_irq(struct hfi1_devdata *dd)
+{
+ int nr;
+ char name[MAX_NAME_SIZE];
+
+ snprintf(name, sizeof(name), DRIVER_NAME "_%d", dd->unit);
+ nr = msix_request_irq(dd, dd, general_interrupt, NULL, IRQ_GENERAL,
+ name);
+ if (nr < 0)
+ return nr;
+
+ /* general interrupt must be MSIx vector 0 */
+ if (nr) {
+ msix_free_irq(dd, (u8)nr);
+ dd_dev_err(dd, "Invalid index %d for GENERAL IRQ\n", nr);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
* enable_sdma_src() - Helper to enable SDMA IRQ srcs
* @dd: valid devdata structure
* @i: index of SDMA engine
@@ -265,10 +276,9 @@ static void enable_sdma_srcs(struct hfi1_devdata *dd, int i)
int msix_request_irqs(struct hfi1_devdata *dd)
{
int i;
- int ret;
+ int ret = msix_request_general_irq(dd);
- ret = msix_request_irq(dd, dd, general_interrupt, NULL, 0, IRQ_GENERAL);
- if (ret < 0)
+ if (ret)
return ret;
for (i = 0; i < dd->num_sdma; i++) {
diff --git a/drivers/infiniband/hw/hfi1/msix.h b/drivers/infiniband/hw/hfi1/msix.h
index a514881632a4..1a02ab7971c8 100644
--- a/drivers/infiniband/hw/hfi1/msix.h
+++ b/drivers/infiniband/hw/hfi1/msix.h
@@ -54,6 +54,7 @@
int msix_initialize(struct hfi1_devdata *dd);
int msix_request_irqs(struct hfi1_devdata *dd);
void msix_clean_up_interrupts(struct hfi1_devdata *dd);
+int msix_request_general_irq(struct hfi1_devdata *dd);
int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd);
int msix_request_sdma_irq(struct sdma_engine *sde);
void msix_free_irq(struct hfi1_devdata *dd, u8 msix_intr);
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 61362bd6d3ce..1a6268d61977 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -161,7 +161,7 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
return -EINVAL;
}
- dd->kregbase1 = ioremap_nocache(addr, RCV_ARRAY);
+ dd->kregbase1 = ioremap(addr, RCV_ARRAY);
if (!dd->kregbase1) {
dd_dev_err(dd, "UC mapping of kregbase1 failed\n");
return -ENOMEM;
@@ -179,7 +179,7 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
dd_dev_info(dd, "RcvArray count: %u\n", rcv_array_count);
dd->base2_start = RCV_ARRAY + rcv_array_count * 8;
- dd->kregbase2 = ioremap_nocache(
+ dd->kregbase2 = ioremap(
addr + dd->base2_start,
TXE_PIO_SEND - dd->base2_start);
if (!dd->kregbase2) {
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 1a3c647675a7..f1734e5e9ac4 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -2599,7 +2599,7 @@ static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data,
* to be sent before sending this one.
*/
e = NULL;
- old_req = 1;
+ old_req = true;
ibp->rvp.n_rc_dupreq++;
spin_lock_irqsave(&qp->s_lock, flags);
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index e53f542b60af..8a2e0d9351e9 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -4633,6 +4633,15 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
*/
fpsn = full_flow_psn(flow, flow->flow_state.spsn);
req->r_ack_psn = psn;
+ /*
+ * If resync_psn points to the last flow PSN for a
+ * segment and the new segment (likely from a new
+ * request) starts with a new generation number, we
+ * need to adjust resync_psn accordingly.
+ */
+ if (flow->flow_state.generation !=
+ (resync_psn >> HFI1_KDETH_BTH_SEQ_SHIFT))
+ resync_psn = mask_psn(fpsn - 1);
flow->resync_npkts +=
delta_psn(mask_psn(resync_psn + 1), fpsn);
/*
diff --git a/drivers/infiniband/hw/hfi1/trace_ctxts.h b/drivers/infiniband/hw/hfi1/trace_ctxts.h
index e00c8a7d559c..b5fc5c6cd52f 100644
--- a/drivers/infiniband/hw/hfi1/trace_ctxts.h
+++ b/drivers/infiniband/hw/hfi1/trace_ctxts.h
@@ -80,7 +80,7 @@ TRACE_EVENT(hfi1_uctxtdata,
__entry->credits = uctxt->sc->credits;
__entry->hw_free = le64_to_cpu(*uctxt->sc->hw_free);
__entry->piobase = uctxt->sc->base_addr;
- __entry->rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
+ __entry->rcvhdrq_cnt = get_hdrq_cnt(uctxt);
__entry->rcvhdrq_dma = uctxt->rcvhdrq_dma;
__entry->eager_cnt = uctxt->egrbufs.alloced;
__entry->rcvegr_dma = uctxt->egrbufs.rcvtids[0].dma;
diff --git a/drivers/infiniband/hw/hfi1/trace_rx.h b/drivers/infiniband/hw/hfi1/trace_rx.h
index 3cec960e9674..168079ed122c 100644
--- a/drivers/infiniband/hw/hfi1/trace_rx.h
+++ b/drivers/infiniband/hw/hfi1/trace_rx.h
@@ -106,19 +106,8 @@ TRACE_EVENT(hfi1_receive_interrupt,
),
TP_fast_assign(DD_DEV_ASSIGN(dd);
__entry->ctxt = rcd->ctxt;
- if (rcd->do_interrupt ==
- &handle_receive_interrupt) {
- __entry->slow_path = 1;
- __entry->dma_rtail = 0xFF;
- } else if (rcd->do_interrupt ==
- &handle_receive_interrupt_dma_rtail){
- __entry->dma_rtail = 1;
- __entry->slow_path = 0;
- } else if (rcd->do_interrupt ==
- &handle_receive_interrupt_nodma_rtail) {
- __entry->dma_rtail = 0;
- __entry->slow_path = 0;
- }
+ __entry->slow_path = hfi1_is_slowpath(rcd);
+ __entry->dma_rtail = get_dma_rtail_setting(rcd);
),
TP_printk("[%s] ctxt %d SlowPath: %d DmaRtail: %d",
__get_str(dev),
diff --git a/drivers/infiniband/hw/hfi1/trace_tid.h b/drivers/infiniband/hw/hfi1/trace_tid.h
index 343fb9894a82..985ffa9cc958 100644
--- a/drivers/infiniband/hw/hfi1/trace_tid.h
+++ b/drivers/infiniband/hw/hfi1/trace_tid.h
@@ -138,10 +138,10 @@ TRACE_EVENT(/* put_tid */
TP_ARGS(dd, index, type, pa, order),
TP_STRUCT__entry(/* entry */
DD_DEV_ENTRY(dd)
- __field(unsigned long, pa);
- __field(u32, index);
- __field(u32, type);
- __field(u16, order);
+ __field(unsigned long, pa)
+ __field(u32, index)
+ __field(u32, type)
+ __field(u16, order)
),
TP_fast_assign(/* assign */
DD_DEV_ASSIGN(dd);
diff --git a/drivers/infiniband/hw/hfi1/trace_tx.h b/drivers/infiniband/hw/hfi1/trace_tx.h
index 09eb0c9ada00..769e5e4710c6 100644
--- a/drivers/infiniband/hw/hfi1/trace_tx.h
+++ b/drivers/infiniband/hw/hfi1/trace_tx.h
@@ -588,7 +588,7 @@ TRACE_EVENT(hfi1_sdma_user_reqinfo,
TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 *i),
TP_ARGS(dd, ctxt, subctxt, i),
TP_STRUCT__entry(
- DD_DEV_ENTRY(dd);
+ DD_DEV_ENTRY(dd)
__field(u16, ctxt)
__field(u8, subctxt)
__field(u8, ver_opcode)
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index f05742ac0949..4da03f823474 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -87,9 +87,6 @@ int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd,
{
int ret = 0;
- spin_lock_init(&fd->tid_lock);
- spin_lock_init(&fd->invalid_lock);
-
fd->entry_to_rb = kcalloc(uctxt->expected_count,
sizeof(struct rb_node *),
GFP_KERNEL);
@@ -142,10 +139,12 @@ void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
{
struct hfi1_ctxtdata *uctxt = fd->uctxt;
+ mutex_lock(&uctxt->exp_mutex);
if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list))
unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd);
if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list))
unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd);
+ mutex_unlock(&uctxt->exp_mutex);
kfree(fd->invalid_tids);
fd->invalid_tids = NULL;
diff --git a/drivers/infiniband/hw/hfi1/user_pages.c b/drivers/infiniband/hw/hfi1/user_pages.c
index 469acb961fbd..3b505006c0a6 100644
--- a/drivers/infiniband/hw/hfi1/user_pages.c
+++ b/drivers/infiniband/hw/hfi1/user_pages.c
@@ -106,7 +106,7 @@ int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t np
int ret;
unsigned int gup_flags = FOLL_LONGTERM | (writable ? FOLL_WRITE : 0);
- ret = get_user_pages_fast(vaddr, npages, gup_flags, pages);
+ ret = pin_user_pages_fast(vaddr, npages, gup_flags, pages);
if (ret < 0)
return ret;
@@ -118,7 +118,7 @@ int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t np
void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
size_t npages, bool dirty)
{
- put_user_pages_dirty_lock(p, npages, dirty);
+ unpin_user_pages_dirty_lock(p, npages, dirty);
if (mm) { /* during close after signal, mm can be NULL */
atomic64_sub(npages, &mm->pinned_vm);
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index fd754a16475a..c2f0d9ba93de 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -179,7 +179,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
pq = kzalloc(sizeof(*pq), GFP_KERNEL);
if (!pq)
return -ENOMEM;
-
pq->dd = dd;
pq->ctxt = uctxt->ctxt;
pq->subctxt = fd->subctxt;
@@ -236,7 +235,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
goto pq_mmu_fail;
}
- fd->pq = pq;
+ rcu_assign_pointer(fd->pq, pq);
fd->cq = cq;
return 0;
@@ -264,8 +263,14 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
trace_hfi1_sdma_user_free_queues(uctxt->dd, uctxt->ctxt, fd->subctxt);
- pq = fd->pq;
+ spin_lock(&fd->pq_rcu_lock);
+ pq = srcu_dereference_check(fd->pq, &fd->pq_srcu,
+ lockdep_is_held(&fd->pq_rcu_lock));
if (pq) {
+ rcu_assign_pointer(fd->pq, NULL);
+ spin_unlock(&fd->pq_rcu_lock);
+ synchronize_srcu(&fd->pq_srcu);
+ /* at this point there can be no more new requests */
if (pq->handler)
hfi1_mmu_rb_unregister(pq->handler);
iowait_sdma_drain(&pq->busy);
@@ -277,7 +282,8 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
kfree(pq->req_in_use);
kmem_cache_destroy(pq->txreq_cache);
kfree(pq);
- fd->pq = NULL;
+ } else {
+ spin_unlock(&fd->pq_rcu_lock);
}
if (fd->cq) {
vfree(fd->cq->comps);
@@ -321,7 +327,8 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
{
int ret = 0, i;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
- struct hfi1_user_sdma_pkt_q *pq = fd->pq;
+ struct hfi1_user_sdma_pkt_q *pq =
+ srcu_dereference(fd->pq, &fd->pq_srcu);
struct hfi1_user_sdma_comp_q *cq = fd->cq;
struct hfi1_devdata *dd = pq->dd;
unsigned long idx = 0;
diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c
index b49e60e8397d..6b14581b9965 100644
--- a/drivers/infiniband/hw/hfi1/vnic_main.c
+++ b/drivers/infiniband/hw/hfi1/vnic_main.c
@@ -78,7 +78,7 @@ static int setup_vnic_ctxt(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt)
if (ret)
goto done;
- if (uctxt->rcvhdrtail_kvaddr)
+ if (hfi1_rcvhdrtail_kvaddr(uctxt))
clear_rcvhdrtail(uctxt);
rcvctrl_ops = HFI1_RCVCTRL_CTXT_ENB;
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index af1d8823b3f0..5ffe4c996ed3 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -163,7 +163,7 @@ static int get_cq_umem(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
u32 npages;
int ret;
- *umem = ib_umem_get(udata, ucmd.buf_addr, buf->size,
+ *umem = ib_umem_get(&hr_dev->ib_dev, ucmd.buf_addr, buf->size,
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(*umem))
return PTR_ERR(*umem);
@@ -370,6 +370,8 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
hr_cq->buf.size = hr_cq->cq_depth * hr_dev->caps.cq_entry_sz;
hr_cq->buf.page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
spin_lock_init(&hr_cq->lock);
+ INIT_LIST_HEAD(&hr_cq->sq_list);
+ INIT_LIST_HEAD(&hr_cq->rq_list);
if (udata) {
ret = create_user_cq(hr_dev, hr_cq, udata, &resp);
diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c
index 10af6958ab69..bff6abdccfb0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_db.c
+++ b/drivers/infiniband/hw/hns/hns_roce_db.c
@@ -31,7 +31,8 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
refcount_set(&page->refcount, 1);
page->user_virt = page_addr;
- page->umem = ib_umem_get(udata, page_addr, PAGE_SIZE, 0);
+ page->umem = ib_umem_get(context->ibucontext.device, page_addr,
+ PAGE_SIZE, 0);
if (IS_ERR(page->umem)) {
ret = PTR_ERR(page->umem);
kfree(page);
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 5617434cbfb4..a7c4ff975c28 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -45,8 +45,6 @@
#define HNS_ROCE_MAX_MSG_LEN 0x80000000
-#define HNS_ROCE_ALIGN_UP(a, b) ((((a) + (b) - 1) / (b)) * (b))
-
#define HNS_ROCE_IB_MIN_SQ_STRIDE 6
#define HNS_ROCE_BA_SIZE (32 * 4096)
@@ -107,11 +105,6 @@
#define NODE_DESC_SIZE 64
#define DB_REG_OFFSET 0x1000
-#define SERV_TYPE_RC 0
-#define SERV_TYPE_RD 1
-#define SERV_TYPE_UC 2
-#define SERV_TYPE_UD 3
-
/* Configure to HW for PAGE_SIZE larger than 4KB */
#define PG_SHIFT_OFFSET (PAGE_SHIFT - 12)
@@ -131,6 +124,13 @@
#define EQ_DEPTH_COEFF 2
enum {
+ SERV_TYPE_RC,
+ SERV_TYPE_UC,
+ SERV_TYPE_RD,
+ SERV_TYPE_UD,
+};
+
+enum {
HNS_ROCE_SUPPORT_RQ_RECORD_DB = 1 << 0,
HNS_ROCE_SUPPORT_SQ_RECORD_DB = 1 << 1,
};
@@ -423,7 +423,7 @@ struct hns_roce_mr_table {
struct hns_roce_wq {
u64 *wrid; /* Work request ID */
spinlock_t lock;
- int wqe_cnt; /* WQE num */
+ u32 wqe_cnt; /* WQE num */
int max_gs;
int offset;
int wqe_shift; /* WQE size */
@@ -498,6 +498,10 @@ struct hns_roce_cq {
u32 vector;
atomic_t refcount;
struct completion free;
+ struct list_head sq_list; /* all qps on this send cq */
+ struct list_head rq_list; /* all qps on this recv cq */
+ int is_armed; /* cq is armed */
+ struct list_head node; /* all armed cqs are on a list */
};
struct hns_roce_idx_que {
@@ -647,7 +651,6 @@ struct hns_roce_qp {
u8 sdb_en;
u32 doorbell_qpn;
u32 sq_signal_bits;
- u32 sq_next_wqe;
struct hns_roce_wq sq;
struct ib_umem *umem;
@@ -682,6 +685,9 @@ struct hns_roce_qp {
u32 next_sge;
struct hns_roce_rinl_buf rq_inl_buf;
+ struct list_head node; /* all qps are on a list */
+ struct list_head rq_node; /* all recv qps are on a list */
+ struct list_head sq_node; /* all send qps are on a list */
};
struct hns_roce_ib_iboe {
@@ -794,10 +800,8 @@ struct hns_roce_caps {
int reserved_qps;
int num_qpc_timer;
int num_cqc_timer;
- u32 max_srq_sg;
int num_srqs;
u32 max_wqes;
- u32 max_srqs;
u32 max_srq_wrs;
u32 max_srq_sges;
u32 max_sq_desc_sz;
@@ -811,7 +815,6 @@ struct hns_roce_caps {
u32 min_wqes;
int reserved_cqs;
int reserved_srqs;
- u32 max_srqwqes;
int num_aeq_vectors;
int num_comp_vectors;
int num_other_vectors;
@@ -895,6 +898,12 @@ struct hns_roce_caps {
u32 tpq_buf_pg_sz;
u32 chunk_sz; /* chunk size in non multihop mode */
u64 flags;
+ u16 default_ceq_max_cnt;
+ u16 default_ceq_period;
+ u16 default_aeq_max_cnt;
+ u16 default_aeq_period;
+ u16 default_aeq_arm_st;
+ u16 default_ceq_arm_st;
};
struct hns_roce_work {
@@ -911,6 +920,12 @@ struct hns_roce_dfx_hw {
int *buffer);
};
+enum hns_roce_device_state {
+ HNS_ROCE_DEVICE_STATE_INITED,
+ HNS_ROCE_DEVICE_STATE_RST_DOWN,
+ HNS_ROCE_DEVICE_STATE_UNINIT,
+};
+
struct hns_roce_hw {
int (*reset)(struct hns_roce_dev *hr_dev, bool enable);
int (*cmq_init)(struct hns_roce_dev *hr_dev);
@@ -993,6 +1008,9 @@ struct hns_roce_dev {
bool dis_db;
unsigned long reset_cnt;
struct hns_roce_ib_iboe iboe;
+ enum hns_roce_device_state state;
+ struct list_head qp_list; /* list of all qps on this dev */
+ spinlock_t qp_list_lock; /* protect qp_list */
struct list_head pgdir_list;
struct mutex pgdir_mutex;
@@ -1133,7 +1151,6 @@ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
-int hns_roce_init_eq_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev);
@@ -1257,6 +1274,7 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type);
int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index);
+void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev);
int hns_roce_init(struct hns_roce_dev *hr_dev);
void hns_roce_exit(struct hns_roce_dev *hr_dev);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 2a2b2112f886..c6e66586e533 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -74,8 +74,8 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
unsigned long flags = 0;
void *wqe = NULL;
__le32 doorbell[2];
+ u32 wqe_idx = 0;
int nreq = 0;
- u32 ind = 0;
int ret = 0;
u8 *smac;
int loopback;
@@ -88,7 +88,7 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
}
spin_lock_irqsave(&qp->sq.lock, flags);
- ind = qp->sq_next_wqe;
+
for (nreq = 0; wr; ++nreq, wr = wr->next) {
if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
ret = -ENOMEM;
@@ -96,6 +96,8 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
goto out;
}
+ wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
+
if (unlikely(wr->num_sge > qp->sq.max_gs)) {
dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
wr->num_sge, qp->sq.max_gs);
@@ -104,9 +106,8 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
goto out;
}
- wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
- qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
- wr->wr_id;
+ wqe = get_send_wqe(qp, wqe_idx);
+ qp->sq.wrid[wqe_idx] = wr->wr_id;
/* Corresponding to the RC and RD type wqe process separately */
if (ibqp->qp_type == IB_QPT_GSI) {
@@ -210,7 +211,6 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
cpu_to_le32((wr->sg_list[1].addr) >> 32);
ud_sq_wqe->l_key1 =
cpu_to_le32(wr->sg_list[1].lkey);
- ind++;
} else if (ibqp->qp_type == IB_QPT_RC) {
u32 tmp_len = 0;
@@ -308,7 +308,6 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
ctrl->flag |= cpu_to_le32(wr->num_sge <<
HNS_ROCE_WQE_SGE_NUM_BIT);
}
- ind++;
}
}
@@ -336,7 +335,6 @@ out:
doorbell[1] = sq_db.u32_8;
hns_roce_write64_k(doorbell, qp->sq.db_reg_l);
- qp->sq_next_wqe = ind;
}
spin_unlock_irqrestore(&qp->sq.lock, flags);
@@ -348,12 +346,6 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr)
{
- int ret = 0;
- int nreq = 0;
- int ind = 0;
- int i = 0;
- u32 reg_val;
- unsigned long flags = 0;
struct hns_roce_rq_wqe_ctrl *ctrl = NULL;
struct hns_roce_wqe_data_seg *scat = NULL;
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
@@ -361,9 +353,14 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_rq_db rq_db;
__le32 doorbell[2] = {0};
+ unsigned long flags = 0;
+ unsigned int wqe_idx;
+ int ret = 0;
+ int nreq = 0;
+ int i = 0;
+ u32 reg_val;
spin_lock_irqsave(&hr_qp->rq.lock, flags);
- ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
for (nreq = 0; wr; ++nreq, wr = wr->next) {
if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
@@ -373,6 +370,8 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
goto out;
}
+ wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
+
if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
wr->num_sge, hr_qp->rq.max_gs);
@@ -381,7 +380,7 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
goto out;
}
- ctrl = get_recv_wqe(hr_qp, ind);
+ ctrl = get_recv_wqe(hr_qp, wqe_idx);
roce_set_field(ctrl->rwqe_byte_12,
RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M,
@@ -393,9 +392,7 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
for (i = 0; i < wr->num_sge; i++)
set_data_seg(scat + i, wr->sg_list + i);
- hr_qp->rq.wrid[ind] = wr->wr_id;
-
- ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
+ hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
}
out:
@@ -2701,7 +2698,6 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
hr_qp->rq.tail = 0;
hr_qp->sq.head = 0;
hr_qp->sq.tail = 0;
- hr_qp->sq_next_wqe = 0;
}
kfree(context);
@@ -3315,7 +3311,6 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
hr_qp->rq.tail = 0;
hr_qp->sq.head = 0;
hr_qp->sq.tail = 0;
- hr_qp->sq_next_wqe = 0;
}
out:
kfree(context);
@@ -3614,14 +3609,18 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
if (ret)
return ret;
- send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
- recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
+ send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL;
+ recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL;
hns_roce_lock_cqs(send_cq, recv_cq);
if (!udata) {
- __hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
- to_hr_srq(hr_qp->ibqp.srq) : NULL);
- if (send_cq != recv_cq)
+ if (recv_cq)
+ __hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn,
+ (hr_qp->ibqp.srq ?
+ to_hr_srq(hr_qp->ibqp.srq) :
+ NULL));
+
+ if (send_cq && send_cq != recv_cq)
__hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL);
}
hns_roce_unlock_cqs(send_cq, recv_cq);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index cb8071a3e0d5..12c4cd8e9378 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -63,20 +63,15 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
struct hns_roce_mr *mr = to_hr_mr(wr->mr);
/* use ib_access_flags */
- roce_set_bit(rc_sq_wqe->byte_4,
- V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S,
+ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S,
wr->access & IB_ACCESS_MW_BIND ? 1 : 0);
- roce_set_bit(rc_sq_wqe->byte_4,
- V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S,
+ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S,
wr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
- roce_set_bit(rc_sq_wqe->byte_4,
- V2_RC_FRMR_WQE_BYTE_4_RR_S,
+ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_RR_S,
wr->access & IB_ACCESS_REMOTE_READ ? 1 : 0);
- roce_set_bit(rc_sq_wqe->byte_4,
- V2_RC_FRMR_WQE_BYTE_4_RW_S,
+ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_RW_S,
wr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
- roce_set_bit(rc_sq_wqe->byte_4,
- V2_RC_FRMR_WQE_BYTE_4_LW_S,
+ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_LW_S,
wr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
/* Data structure reuse may lead to confusion */
@@ -110,7 +105,7 @@ static void set_atomic_seg(struct hns_roce_wqe_atomic_seg *aseg,
}
static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
- unsigned int *sge_ind)
+ unsigned int *sge_ind, int valid_num_sge)
{
struct hns_roce_v2_wqe_data_seg *dseg;
struct ib_sge *sg;
@@ -123,7 +118,7 @@ static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
num_in_wqe = HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE;
- extend_sge_num = wr->num_sge - num_in_wqe;
+ extend_sge_num = valid_num_sge - num_in_wqe;
sg = wr->sg_list + num_in_wqe;
shift = qp->hr_buf.page_shift;
@@ -159,14 +154,16 @@ static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
void *wqe, unsigned int *sge_ind,
+ int valid_num_sge,
const struct ib_send_wr **bad_wr)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_v2_wqe_data_seg *dseg = wqe;
struct hns_roce_qp *qp = to_hr_qp(ibqp);
+ int j = 0;
int i;
- if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
+ if (wr->send_flags & IB_SEND_INLINE && valid_num_sge) {
if (le32_to_cpu(rc_sq_wqe->msg_len) >
hr_dev->caps.max_sq_inline) {
*bad_wr = wr;
@@ -190,7 +187,7 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
1);
} else {
- if (wr->num_sge <= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) {
+ if (valid_num_sge <= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) {
for (i = 0; i < wr->num_sge; i++) {
if (likely(wr->sg_list[i].length)) {
set_data_seg_v2(dseg, wr->sg_list + i);
@@ -203,19 +200,21 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
(*sge_ind) & (qp->sge.sge_cnt - 1));
- for (i = 0; i < HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE; i++) {
+ for (i = 0; i < wr->num_sge &&
+ j < HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE; i++) {
if (likely(wr->sg_list[i].length)) {
set_data_seg_v2(dseg, wr->sg_list + i);
dseg++;
+ j++;
}
}
- set_extend_sge(qp, wr, sge_ind);
+ set_extend_sge(qp, wr, sge_ind, valid_num_sge);
}
roce_set_field(rc_sq_wqe->byte_16,
V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
- V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, wr->num_sge);
+ V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
}
return 0;
@@ -226,6 +225,30 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
int attr_mask, enum ib_qp_state cur_state,
enum ib_qp_state new_state);
+static int check_send_valid(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp)
+{
+ struct ib_qp *ibqp = &hr_qp->ibqp;
+ struct device *dev = hr_dev->dev;
+
+ if (unlikely(ibqp->qp_type != IB_QPT_RC &&
+ ibqp->qp_type != IB_QPT_GSI &&
+ ibqp->qp_type != IB_QPT_UD)) {
+ dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type);
+ return -EOPNOTSUPP;
+ } else if (unlikely(hr_qp->state == IB_QPS_RESET ||
+ hr_qp->state == IB_QPS_INIT ||
+ hr_qp->state == IB_QPS_RTR)) {
+ dev_err(dev, "Post WQE fail, QP state %d!\n", hr_qp->state);
+ return -EINVAL;
+ } else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) {
+ dev_err(dev, "Post WQE fail, dev state %d!\n", hr_dev->state);
+ return -EIO;
+ }
+
+ return 0;
+}
+
static int hns_roce_v2_post_send(struct ib_qp *ibqp,
const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr)
@@ -239,38 +262,31 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
struct device *dev = hr_dev->dev;
struct hns_roce_v2_db sq_db;
struct ib_qp_attr attr;
- unsigned int sge_ind;
unsigned int owner_bit;
+ unsigned int sge_idx;
+ unsigned int wqe_idx;
unsigned long flags;
- unsigned int ind;
+ int valid_num_sge;
void *wqe = NULL;
bool loopback;
int attr_mask;
u32 tmp_len;
- int ret = 0;
u32 hr_op;
u8 *smac;
int nreq;
+ int ret;
int i;
- if (unlikely(ibqp->qp_type != IB_QPT_RC &&
- ibqp->qp_type != IB_QPT_GSI &&
- ibqp->qp_type != IB_QPT_UD)) {
- dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type);
- *bad_wr = wr;
- return -EOPNOTSUPP;
- }
+ spin_lock_irqsave(&qp->sq.lock, flags);
- if (unlikely(qp->state == IB_QPS_RESET || qp->state == IB_QPS_INIT ||
- qp->state == IB_QPS_RTR)) {
- dev_err(dev, "Post WQE fail, QP state %d err!\n", qp->state);
+ ret = check_send_valid(hr_dev, qp);
+ if (ret) {
*bad_wr = wr;
- return -EINVAL;
+ nreq = 0;
+ goto out;
}
- spin_lock_irqsave(&qp->sq.lock, flags);
- ind = qp->sq_next_wqe;
- sge_ind = qp->next_sge;
+ sge_idx = qp->next_sge;
for (nreq = 0; wr; ++nreq, wr = wr->next) {
if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
@@ -279,6 +295,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
goto out;
}
+ wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
+
if (unlikely(wr->num_sge > qp->sq.max_gs)) {
dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
wr->num_sge, qp->sq.max_gs);
@@ -287,14 +305,20 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
goto out;
}
- wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
- qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
- wr->wr_id;
-
+ wqe = get_send_wqe(qp, wqe_idx);
+ qp->sq.wrid[wqe_idx] = wr->wr_id;
owner_bit =
~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
+ valid_num_sge = 0;
tmp_len = 0;
+ for (i = 0; i < wr->num_sge; i++) {
+ if (likely(wr->sg_list[i].length)) {
+ tmp_len += wr->sg_list[i].length;
+ valid_num_sge++;
+ }
+ }
+
/* Corresponding to the QP type, wqe process separately */
if (ibqp->qp_type == IB_QPT_GSI) {
ud_sq_wqe = wqe;
@@ -330,9 +354,6 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
V2_UD_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_SEND);
- for (i = 0; i < wr->num_sge; i++)
- tmp_len += wr->sg_list[i].length;
-
ud_sq_wqe->msg_len =
cpu_to_le32(le32_to_cpu(ud_sq_wqe->msg_len) + tmp_len);
@@ -368,12 +389,12 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
roce_set_field(ud_sq_wqe->byte_16,
V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S,
- wr->num_sge);
+ valid_num_sge);
roce_set_field(ud_sq_wqe->byte_20,
V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
- sge_ind & (qp->sge.sge_cnt - 1));
+ sge_idx & (qp->sge.sge_cnt - 1));
roce_set_field(ud_sq_wqe->byte_24,
V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
@@ -423,13 +444,10 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0],
GID_LEN_V2);
- set_extend_sge(qp, wr, &sge_ind);
- ind++;
+ set_extend_sge(qp, wr, &sge_idx, valid_num_sge);
} else if (ibqp->qp_type == IB_QPT_RC) {
rc_sq_wqe = wqe;
memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
- for (i = 0; i < wr->num_sge; i++)
- tmp_len += wr->sg_list[i].length;
rc_sq_wqe->msg_len =
cpu_to_le32(le32_to_cpu(rc_sq_wqe->msg_len) + tmp_len);
@@ -550,15 +568,14 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
roce_set_field(rc_sq_wqe->byte_16,
V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
- wr->num_sge);
+ valid_num_sge);
} else if (wr->opcode != IB_WR_REG_MR) {
ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe,
- wqe, &sge_ind, bad_wr);
+ wqe, &sge_idx,
+ valid_num_sge, bad_wr);
if (ret)
goto out;
}
-
- ind++;
} else {
dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
spin_unlock_irqrestore(&qp->sq.lock, flags);
@@ -588,8 +605,7 @@ out:
hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l);
- qp->sq_next_wqe = ind;
- qp->next_sge = sge_ind;
+ qp->next_sge = sge_idx;
if (qp->state == IB_QPS_ERR) {
attr_mask = IB_QP_STATE;
@@ -610,6 +626,17 @@ out:
return ret;
}
+static int check_recv_valid(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp)
+{
+ if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN))
+ return -EIO;
+ else if (hr_qp->state == IB_QPS_RESET)
+ return -EINVAL;
+
+ return 0;
+}
+
static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr)
@@ -623,18 +650,18 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
unsigned long flags;
void *wqe = NULL;
int attr_mask;
- int ret = 0;
+ u32 wqe_idx;
int nreq;
- int ind;
+ int ret;
int i;
spin_lock_irqsave(&hr_qp->rq.lock, flags);
- ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
- if (hr_qp->state == IB_QPS_RESET) {
- spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
+ ret = check_recv_valid(hr_dev, hr_qp);
+ if (ret) {
*bad_wr = wr;
- return -EINVAL;
+ nreq = 0;
+ goto out;
}
for (nreq = 0; wr; ++nreq, wr = wr->next) {
@@ -645,6 +672,8 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
goto out;
}
+ wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
+
if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
wr->num_sge, hr_qp->rq.max_gs);
@@ -653,7 +682,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
goto out;
}
- wqe = get_recv_wqe(hr_qp, ind);
+ wqe = get_recv_wqe(hr_qp, wqe_idx);
dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
for (i = 0; i < wr->num_sge; i++) {
if (!wr->sg_list[i].length)
@@ -669,8 +698,8 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
/* rq support inline data */
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
- sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list;
- hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt =
+ sge_list = hr_qp->rq_inl_buf.wqe_list[wqe_idx].sg_list;
+ hr_qp->rq_inl_buf.wqe_list[wqe_idx].sge_cnt =
(u32)wr->num_sge;
for (i = 0; i < wr->num_sge; i++) {
sge_list[i].addr =
@@ -679,9 +708,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
}
}
- hr_qp->rq.wrid[ind] = wr->wr_id;
-
- ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
+ hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
}
out:
@@ -1254,7 +1281,6 @@ static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
}
out:
- dev_err(hr_dev->dev, "Func clear fail.\n");
hns_roce_func_clr_rst_prc(hr_dev, ret, fclr_write_fail_flag);
}
@@ -1378,8 +1404,7 @@ static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev)
return 0;
}
-static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
- int vf_id)
+static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev, int vf_id)
{
struct hns_roce_cmq_desc desc;
struct hns_roce_vf_switch *swt;
@@ -1388,13 +1413,12 @@ static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
swt = (struct hns_roce_vf_switch *)desc.data;
hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true);
swt->rocee_sel |= cpu_to_le32(HNS_ICL_SWITCH_CMD_ROCEE_SEL);
- roce_set_field(swt->fun_id,
- VF_SWITCH_DATA_FUN_ID_VF_ID_M,
- VF_SWITCH_DATA_FUN_ID_VF_ID_S,
- vf_id);
+ roce_set_field(swt->fun_id, VF_SWITCH_DATA_FUN_ID_VF_ID_M,
+ VF_SWITCH_DATA_FUN_ID_VF_ID_S, vf_id);
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret)
return ret;
+
desc.flag =
cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
@@ -1574,69 +1598,9 @@ static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
return hns_roce_cmq_send(hr_dev, &desc, 1);
}
-static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
+static void set_default_caps(struct hns_roce_dev *hr_dev)
{
struct hns_roce_caps *caps = &hr_dev->caps;
- int ret;
-
- ret = hns_roce_cmq_query_hw_info(hr_dev);
- if (ret) {
- dev_err(hr_dev->dev, "Query hardware version fail, ret = %d.\n",
- ret);
- return ret;
- }
-
- ret = hns_roce_query_fw_ver(hr_dev);
- if (ret) {
- dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n",
- ret);
- return ret;
- }
-
- ret = hns_roce_config_global_param(hr_dev);
- if (ret) {
- dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
- ret);
- return ret;
- }
-
- /* Get pf resource owned by every pf */
- ret = hns_roce_query_pf_resource(hr_dev);
- if (ret) {
- dev_err(hr_dev->dev, "Query pf resource fail, ret = %d.\n",
- ret);
- return ret;
- }
-
- if (hr_dev->pci_dev->revision == 0x21) {
- ret = hns_roce_query_pf_timer_resource(hr_dev);
- if (ret) {
- dev_err(hr_dev->dev,
- "Query pf timer resource fail, ret = %d.\n",
- ret);
- return ret;
- }
- }
-
- ret = hns_roce_alloc_vf_resource(hr_dev);
- if (ret) {
- dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
- ret);
- return ret;
- }
-
- if (hr_dev->pci_dev->revision == 0x21) {
- ret = hns_roce_set_vf_switch_param(hr_dev, 0);
- if (ret) {
- dev_err(hr_dev->dev,
- "Set function switch param fail, ret = %d.\n",
- ret);
- return ret;
- }
- }
-
- hr_dev->vendor_part_id = hr_dev->pci_dev->device;
- hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM;
caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM;
@@ -1644,17 +1608,15 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->num_srqs = HNS_ROCE_V2_MAX_SRQ_NUM;
caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
- caps->max_srqwqes = HNS_ROCE_V2_MAX_SRQWQE_NUM;
caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
caps->max_extend_sg = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
- caps->max_srq_sg = HNS_ROCE_V2_MAX_SRQ_SGE_NUM;
caps->num_uars = HNS_ROCE_V2_UAR_NUM;
caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM;
caps->num_aeq_vectors = HNS_ROCE_V2_AEQE_VEC_NUM;
caps->num_comp_vectors = HNS_ROCE_V2_COMP_VEC_NUM;
- caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
+ caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
@@ -1668,12 +1630,12 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->max_srq_desc_sz = HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
caps->qpc_entry_sz = HNS_ROCE_V2_QPC_ENTRY_SZ;
caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ;
- caps->trrl_entry_sz = HNS_ROCE_V2_TRRL_ENTRY_SZ;
+ caps->trrl_entry_sz = HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ;
caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ;
caps->srqc_entry_sz = HNS_ROCE_V2_SRQC_ENTRY_SZ;
caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ;
caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
- caps->idx_entry_sz = 4;
+ caps->idx_entry_sz = HNS_ROCE_V2_IDX_ENTRY_SZ;
caps->cq_entry_sz = HNS_ROCE_V2_CQE_ENTRY_SIZE;
caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
caps->reserved_lkey = 0;
@@ -1696,16 +1658,13 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->mpt_ba_pg_sz = 0;
caps->mpt_buf_pg_sz = 0;
caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
- caps->pbl_ba_pg_sz = 2;
- caps->pbl_buf_pg_sz = 0;
- caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
caps->mtt_ba_pg_sz = 0;
caps->mtt_buf_pg_sz = 0;
caps->mtt_hop_num = HNS_ROCE_MTT_HOP_NUM;
- caps->wqe_sq_hop_num = 2;
- caps->wqe_sge_hop_num = 1;
- caps->wqe_rq_hop_num = 2;
- caps->cqe_ba_pg_sz = 6;
+ caps->wqe_sq_hop_num = HNS_ROCE_SQWQE_HOP_NUM;
+ caps->wqe_sge_hop_num = HNS_ROCE_EXT_SGE_HOP_NUM;
+ caps->wqe_rq_hop_num = HNS_ROCE_RQWQE_HOP_NUM;
+ caps->cqe_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_256K;
caps->cqe_buf_pg_sz = 0;
caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM;
caps->srqwqe_ba_pg_sz = 0;
@@ -1714,10 +1673,6 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->idx_ba_pg_sz = 0;
caps->idx_buf_pg_sz = 0;
caps->idx_hop_num = HNS_ROCE_IDX_HOP_NUM;
- caps->eqe_ba_pg_sz = 0;
- caps->eqe_buf_pg_sz = 0;
- caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM;
- caps->tsq_buf_pg_sz = 0;
caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
@@ -1726,24 +1681,19 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
HNS_ROCE_CAP_FLAG_RECORD_DB |
HNS_ROCE_CAP_FLAG_SQ_RECORD_DB;
- if (hr_dev->pci_dev->revision == 0x21)
- caps->flags |= HNS_ROCE_CAP_FLAG_MW |
- HNS_ROCE_CAP_FLAG_FRMR;
-
caps->pkey_table_len[0] = 1;
- caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
+ caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
caps->aeqe_depth = HNS_ROCE_V2_ASYNC_EQE_NUM;
caps->local_ca_ack_delay = 0;
caps->max_mtu = IB_MTU_4096;
- caps->max_srqs = HNS_ROCE_V2_MAX_SRQ;
caps->max_srq_wrs = HNS_ROCE_V2_MAX_SRQ_WR;
caps->max_srq_sges = HNS_ROCE_V2_MAX_SRQ_SGE;
- if (hr_dev->pci_dev->revision == 0x21) {
- caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC |
- HNS_ROCE_CAP_FLAG_SRQ |
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B) {
+ caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC | HNS_ROCE_CAP_FLAG_MW |
+ HNS_ROCE_CAP_FLAG_SRQ | HNS_ROCE_CAP_FLAG_FRMR |
HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL;
caps->num_qpc_timer = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
@@ -1757,12 +1707,345 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->cqc_timer_buf_pg_sz = 0;
caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
- caps->sccc_entry_sz = HNS_ROCE_V2_SCCC_ENTRY_SZ;
- caps->sccc_ba_pg_sz = 0;
- caps->sccc_buf_pg_sz = 0;
- caps->sccc_hop_num = HNS_ROCE_SCCC_HOP_NUM;
+ caps->sccc_entry_sz = HNS_ROCE_V2_SCCC_ENTRY_SZ;
+ caps->sccc_ba_pg_sz = 0;
+ caps->sccc_buf_pg_sz = 0;
+ caps->sccc_hop_num = HNS_ROCE_SCCC_HOP_NUM;
+ }
+}
+
+static void calc_pg_sz(int obj_num, int obj_size, int hop_num, int ctx_bt_num,
+ int *buf_page_size, int *bt_page_size, u32 hem_type)
+{
+ u64 obj_per_chunk;
+ int bt_chunk_size = 1 << PAGE_SHIFT;
+ int buf_chunk_size = 1 << PAGE_SHIFT;
+ int obj_per_chunk_default = buf_chunk_size / obj_size;
+
+ *buf_page_size = 0;
+ *bt_page_size = 0;
+
+ switch (hop_num) {
+ case 3:
+ obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
+ (bt_chunk_size / BA_BYTE_LEN) *
+ (bt_chunk_size / BA_BYTE_LEN) *
+ obj_per_chunk_default;
+ break;
+ case 2:
+ obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
+ (bt_chunk_size / BA_BYTE_LEN) *
+ obj_per_chunk_default;
+ break;
+ case 1:
+ obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
+ obj_per_chunk_default;
+ break;
+ case HNS_ROCE_HOP_NUM_0:
+ obj_per_chunk = ctx_bt_num * obj_per_chunk_default;
+ break;
+ default:
+ pr_err("Table %d not support hop_num = %d!\n", hem_type,
+ hop_num);
+ return;
+ }
+
+ if (hem_type >= HEM_TYPE_MTT)
+ *bt_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
+ else
+ *buf_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
+}
+
+static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM];
+ struct hns_roce_caps *caps = &hr_dev->caps;
+ struct hns_roce_query_pf_caps_a *resp_a;
+ struct hns_roce_query_pf_caps_b *resp_b;
+ struct hns_roce_query_pf_caps_c *resp_c;
+ struct hns_roce_query_pf_caps_d *resp_d;
+ struct hns_roce_query_pf_caps_e *resp_e;
+ int ctx_hop_num;
+ int pbl_hop_num;
+ int ret;
+ int i;
+
+ for (i = 0; i < HNS_ROCE_QUERY_PF_CAPS_CMD_NUM; i++) {
+ hns_roce_cmq_setup_basic_desc(&desc[i],
+ HNS_ROCE_OPC_QUERY_PF_CAPS_NUM,
+ true);
+ if (i < (HNS_ROCE_QUERY_PF_CAPS_CMD_NUM - 1))
+ desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+ else
+ desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+ }
+
+ ret = hns_roce_cmq_send(hr_dev, desc, HNS_ROCE_QUERY_PF_CAPS_CMD_NUM);
+ if (ret)
+ return ret;
+
+ resp_a = (struct hns_roce_query_pf_caps_a *)desc[0].data;
+ resp_b = (struct hns_roce_query_pf_caps_b *)desc[1].data;
+ resp_c = (struct hns_roce_query_pf_caps_c *)desc[2].data;
+ resp_d = (struct hns_roce_query_pf_caps_d *)desc[3].data;
+ resp_e = (struct hns_roce_query_pf_caps_e *)desc[4].data;
+
+ caps->local_ca_ack_delay = resp_a->local_ca_ack_delay;
+ caps->max_sq_sg = le16_to_cpu(resp_a->max_sq_sg);
+ caps->max_sq_inline = le16_to_cpu(resp_a->max_sq_inline);
+ caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg);
+ caps->max_extend_sg = le32_to_cpu(resp_a->max_extend_sg);
+ caps->num_qpc_timer = le16_to_cpu(resp_a->num_qpc_timer);
+ caps->num_cqc_timer = le16_to_cpu(resp_a->num_cqc_timer);
+ caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges);
+ caps->num_aeq_vectors = resp_a->num_aeq_vectors;
+ caps->num_other_vectors = resp_a->num_other_vectors;
+ caps->max_sq_desc_sz = resp_a->max_sq_desc_sz;
+ caps->max_rq_desc_sz = resp_a->max_rq_desc_sz;
+ caps->max_srq_desc_sz = resp_a->max_srq_desc_sz;
+ caps->cq_entry_sz = resp_a->cq_entry_sz;
+
+ caps->mtpt_entry_sz = resp_b->mtpt_entry_sz;
+ caps->irrl_entry_sz = resp_b->irrl_entry_sz;
+ caps->trrl_entry_sz = resp_b->trrl_entry_sz;
+ caps->cqc_entry_sz = resp_b->cqc_entry_sz;
+ caps->srqc_entry_sz = resp_b->srqc_entry_sz;
+ caps->idx_entry_sz = resp_b->idx_entry_sz;
+ caps->sccc_entry_sz = resp_b->scc_ctx_entry_sz;
+ caps->max_mtu = resp_b->max_mtu;
+ caps->qpc_entry_sz = le16_to_cpu(resp_b->qpc_entry_sz);
+ caps->min_cqes = resp_b->min_cqes;
+ caps->min_wqes = resp_b->min_wqes;
+ caps->page_size_cap = le32_to_cpu(resp_b->page_size_cap);
+ caps->pkey_table_len[0] = resp_b->pkey_table_len;
+ caps->phy_num_uars = resp_b->phy_num_uars;
+ ctx_hop_num = resp_b->ctx_hop_num;
+ pbl_hop_num = resp_b->pbl_hop_num;
+
+ caps->num_pds = 1 << roce_get_field(resp_c->cap_flags_num_pds,
+ V2_QUERY_PF_CAPS_C_NUM_PDS_M,
+ V2_QUERY_PF_CAPS_C_NUM_PDS_S);
+ caps->flags = roce_get_field(resp_c->cap_flags_num_pds,
+ V2_QUERY_PF_CAPS_C_CAP_FLAGS_M,
+ V2_QUERY_PF_CAPS_C_CAP_FLAGS_S);
+ caps->num_cqs = 1 << roce_get_field(resp_c->max_gid_num_cqs,
+ V2_QUERY_PF_CAPS_C_NUM_CQS_M,
+ V2_QUERY_PF_CAPS_C_NUM_CQS_S);
+ caps->gid_table_len[0] = roce_get_field(resp_c->max_gid_num_cqs,
+ V2_QUERY_PF_CAPS_C_MAX_GID_M,
+ V2_QUERY_PF_CAPS_C_MAX_GID_S);
+ caps->max_cqes = 1 << roce_get_field(resp_c->cq_depth,
+ V2_QUERY_PF_CAPS_C_CQ_DEPTH_M,
+ V2_QUERY_PF_CAPS_C_CQ_DEPTH_S);
+ caps->num_mtpts = 1 << roce_get_field(resp_c->num_mrws,
+ V2_QUERY_PF_CAPS_C_NUM_MRWS_M,
+ V2_QUERY_PF_CAPS_C_NUM_MRWS_S);
+ caps->num_qps = 1 << roce_get_field(resp_c->ord_num_qps,
+ V2_QUERY_PF_CAPS_C_NUM_QPS_M,
+ V2_QUERY_PF_CAPS_C_NUM_QPS_S);
+ caps->max_qp_init_rdma = roce_get_field(resp_c->ord_num_qps,
+ V2_QUERY_PF_CAPS_C_MAX_ORD_M,
+ V2_QUERY_PF_CAPS_C_MAX_ORD_S);
+ caps->max_qp_dest_rdma = caps->max_qp_init_rdma;
+ caps->max_wqes = 1 << le16_to_cpu(resp_c->sq_depth);
+ caps->num_srqs = 1 << roce_get_field(resp_d->wq_hop_num_max_srqs,
+ V2_QUERY_PF_CAPS_D_NUM_SRQS_M,
+ V2_QUERY_PF_CAPS_D_NUM_SRQS_S);
+ caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth);
+ caps->ceqe_depth = 1 << roce_get_field(resp_d->num_ceqs_ceq_depth,
+ V2_QUERY_PF_CAPS_D_CEQ_DEPTH_M,
+ V2_QUERY_PF_CAPS_D_CEQ_DEPTH_S);
+ caps->num_comp_vectors = roce_get_field(resp_d->num_ceqs_ceq_depth,
+ V2_QUERY_PF_CAPS_D_NUM_CEQS_M,
+ V2_QUERY_PF_CAPS_D_NUM_CEQS_S);
+ caps->aeqe_depth = 1 << roce_get_field(resp_d->arm_st_aeq_depth,
+ V2_QUERY_PF_CAPS_D_AEQ_DEPTH_M,
+ V2_QUERY_PF_CAPS_D_AEQ_DEPTH_S);
+ caps->default_aeq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth,
+ V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_M,
+ V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_S);
+ caps->default_ceq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth,
+ V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_M,
+ V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_S);
+ caps->reserved_pds = roce_get_field(resp_d->num_uars_rsv_pds,
+ V2_QUERY_PF_CAPS_D_RSV_PDS_M,
+ V2_QUERY_PF_CAPS_D_RSV_PDS_S);
+ caps->num_uars = 1 << roce_get_field(resp_d->num_uars_rsv_pds,
+ V2_QUERY_PF_CAPS_D_NUM_UARS_M,
+ V2_QUERY_PF_CAPS_D_NUM_UARS_S);
+ caps->reserved_qps = roce_get_field(resp_d->rsv_uars_rsv_qps,
+ V2_QUERY_PF_CAPS_D_RSV_QPS_M,
+ V2_QUERY_PF_CAPS_D_RSV_QPS_S);
+ caps->reserved_uars = roce_get_field(resp_d->rsv_uars_rsv_qps,
+ V2_QUERY_PF_CAPS_D_RSV_UARS_M,
+ V2_QUERY_PF_CAPS_D_RSV_UARS_S);
+ caps->reserved_mrws = roce_get_field(resp_e->chunk_size_shift_rsv_mrws,
+ V2_QUERY_PF_CAPS_E_RSV_MRWS_M,
+ V2_QUERY_PF_CAPS_E_RSV_MRWS_S);
+ caps->chunk_sz = 1 << roce_get_field(resp_e->chunk_size_shift_rsv_mrws,
+ V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_M,
+ V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_S);
+ caps->reserved_cqs = roce_get_field(resp_e->rsv_cqs,
+ V2_QUERY_PF_CAPS_E_RSV_CQS_M,
+ V2_QUERY_PF_CAPS_E_RSV_CQS_S);
+ caps->reserved_srqs = roce_get_field(resp_e->rsv_srqs,
+ V2_QUERY_PF_CAPS_E_RSV_SRQS_M,
+ V2_QUERY_PF_CAPS_E_RSV_SRQS_S);
+ caps->reserved_lkey = roce_get_field(resp_e->rsv_lkey,
+ V2_QUERY_PF_CAPS_E_RSV_LKEYS_M,
+ V2_QUERY_PF_CAPS_E_RSV_LKEYS_S);
+ caps->default_ceq_max_cnt = le16_to_cpu(resp_e->ceq_max_cnt);
+ caps->default_ceq_period = le16_to_cpu(resp_e->ceq_period);
+ caps->default_aeq_max_cnt = le16_to_cpu(resp_e->aeq_max_cnt);
+ caps->default_aeq_period = le16_to_cpu(resp_e->aeq_period);
+
+ caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
+ caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
+ caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
+ caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
+ caps->mtt_ba_pg_sz = 0;
+ caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
+ caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
+ caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
+
+ caps->qpc_hop_num = ctx_hop_num;
+ caps->srqc_hop_num = ctx_hop_num;
+ caps->cqc_hop_num = ctx_hop_num;
+ caps->mpt_hop_num = ctx_hop_num;
+ caps->mtt_hop_num = pbl_hop_num;
+ caps->cqe_hop_num = pbl_hop_num;
+ caps->srqwqe_hop_num = pbl_hop_num;
+ caps->idx_hop_num = pbl_hop_num;
+ caps->wqe_sq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
+ V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_M,
+ V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_S);
+ caps->wqe_sge_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
+ V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_M,
+ V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_S);
+ caps->wqe_rq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
+ V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_M,
+ V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_S);
+
+ calc_pg_sz(caps->num_qps, caps->qpc_entry_sz, caps->qpc_hop_num,
+ caps->qpc_bt_num, &caps->qpc_buf_pg_sz, &caps->qpc_ba_pg_sz,
+ HEM_TYPE_QPC);
+ calc_pg_sz(caps->num_mtpts, caps->mtpt_entry_sz, caps->mpt_hop_num,
+ caps->mpt_bt_num, &caps->mpt_buf_pg_sz, &caps->mpt_ba_pg_sz,
+ HEM_TYPE_MTPT);
+ calc_pg_sz(caps->num_cqs, caps->cqc_entry_sz, caps->cqc_hop_num,
+ caps->cqc_bt_num, &caps->cqc_buf_pg_sz, &caps->cqc_ba_pg_sz,
+ HEM_TYPE_CQC);
+ calc_pg_sz(caps->num_srqs, caps->srqc_entry_sz, caps->srqc_hop_num,
+ caps->srqc_bt_num, &caps->srqc_buf_pg_sz,
+ &caps->srqc_ba_pg_sz, HEM_TYPE_SRQC);
+
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B) {
+ caps->sccc_hop_num = ctx_hop_num;
+ caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
+ caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
+
+ calc_pg_sz(caps->num_qps, caps->sccc_entry_sz,
+ caps->sccc_hop_num, caps->sccc_bt_num,
+ &caps->sccc_buf_pg_sz, &caps->sccc_ba_pg_sz,
+ HEM_TYPE_SCCC);
+ calc_pg_sz(caps->num_cqc_timer, caps->cqc_timer_entry_sz,
+ caps->cqc_timer_hop_num, caps->cqc_timer_bt_num,
+ &caps->cqc_timer_buf_pg_sz,
+ &caps->cqc_timer_ba_pg_sz, HEM_TYPE_CQC_TIMER);
+ }
+
+ calc_pg_sz(caps->num_cqe_segs, caps->mtt_entry_sz, caps->cqe_hop_num,
+ 1, &caps->cqe_buf_pg_sz, &caps->cqe_ba_pg_sz, HEM_TYPE_CQE);
+ calc_pg_sz(caps->num_srqwqe_segs, caps->mtt_entry_sz,
+ caps->srqwqe_hop_num, 1, &caps->srqwqe_buf_pg_sz,
+ &caps->srqwqe_ba_pg_sz, HEM_TYPE_SRQWQE);
+ calc_pg_sz(caps->num_idx_segs, caps->idx_entry_sz, caps->idx_hop_num,
+ 1, &caps->idx_buf_pg_sz, &caps->idx_ba_pg_sz, HEM_TYPE_IDX);
+
+ return 0;
+}
+
+static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_caps *caps = &hr_dev->caps;
+ int ret;
+
+ ret = hns_roce_cmq_query_hw_info(hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev, "Query hardware version fail, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ ret = hns_roce_query_fw_ver(hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ ret = hns_roce_config_global_param(hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ /* Get pf resource owned by every pf */
+ ret = hns_roce_query_pf_resource(hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev, "Query pf resource fail, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ if (hr_dev->pci_dev->revision == 0x21) {
+ ret = hns_roce_query_pf_timer_resource(hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev,
+ "Query pf timer resource fail, ret = %d.\n",
+ ret);
+ return ret;
+ }
+ }
+
+ ret = hns_roce_alloc_vf_resource(hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ if (hr_dev->pci_dev->revision == 0x21) {
+ ret = hns_roce_set_vf_switch_param(hr_dev, 0);
+ if (ret) {
+ dev_err(hr_dev->dev,
+ "Set function switch param fail, ret = %d.\n",
+ ret);
+ return ret;
+ }
}
+ hr_dev->vendor_part_id = hr_dev->pci_dev->device;
+ hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
+
+ caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
+ caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
+ caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
+ caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
+
+ caps->pbl_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_16K;
+ caps->pbl_buf_pg_sz = 0;
+ caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
+ caps->eqe_ba_pg_sz = 0;
+ caps->eqe_buf_pg_sz = 0;
+ caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM;
+ caps->tsq_buf_pg_sz = 0;
+
+ ret = hns_roce_query_pf_caps(hr_dev);
+ if (ret)
+ set_default_caps(hr_dev);
+
ret = hns_roce_v2_set_bt(hr_dev);
if (ret)
dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n",
@@ -1818,37 +2101,32 @@ static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
req_a->base_addr_h =
cpu_to_le32(link_tbl->table.map >> 32);
roce_set_field(req_a->depth_pgsz_init_en,
- CFG_LLM_QUE_DEPTH_M,
- CFG_LLM_QUE_DEPTH_S,
+ CFG_LLM_QUE_DEPTH_M, CFG_LLM_QUE_DEPTH_S,
link_tbl->npages);
roce_set_field(req_a->depth_pgsz_init_en,
- CFG_LLM_QUE_PGSZ_M,
- CFG_LLM_QUE_PGSZ_S,
+ CFG_LLM_QUE_PGSZ_M, CFG_LLM_QUE_PGSZ_S,
link_tbl->pg_sz);
req_a->head_ba_l = cpu_to_le32(entry[0].blk_ba0);
req_a->head_ba_h_nxtptr =
cpu_to_le32(entry[0].blk_ba1_nxt_ptr);
- roce_set_field(req_a->head_ptr,
- CFG_LLM_HEAD_PTR_M,
+ roce_set_field(req_a->head_ptr, CFG_LLM_HEAD_PTR_M,
CFG_LLM_HEAD_PTR_S, 0);
} else {
req_b->tail_ba_l =
cpu_to_le32(entry[page_num - 1].blk_ba0);
- roce_set_field(req_b->tail_ba_h,
- CFG_LLM_TAIL_BA_H_M,
+ roce_set_field(req_b->tail_ba_h, CFG_LLM_TAIL_BA_H_M,
CFG_LLM_TAIL_BA_H_S,
entry[page_num - 1].blk_ba1_nxt_ptr &
- HNS_ROCE_LINK_TABLE_BA1_M);
- roce_set_field(req_b->tail_ptr,
- CFG_LLM_TAIL_PTR_M,
+ HNS_ROCE_LINK_TABLE_BA1_M);
+ roce_set_field(req_b->tail_ptr, CFG_LLM_TAIL_PTR_M,
CFG_LLM_TAIL_PTR_S,
(entry[page_num - 2].blk_ba1_nxt_ptr &
- HNS_ROCE_LINK_TABLE_NXT_PTR_M) >>
- HNS_ROCE_LINK_TABLE_NXT_PTR_S);
+ HNS_ROCE_LINK_TABLE_NXT_PTR_M) >>
+ HNS_ROCE_LINK_TABLE_NXT_PTR_S);
}
}
- roce_set_field(req_a->depth_pgsz_init_en,
- CFG_LLM_INIT_EN_M, CFG_LLM_INIT_EN_S, 1);
+ roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_INIT_EN_M,
+ CFG_LLM_INIT_EN_S, 1);
return hns_roce_cmq_send(hr_dev, desc, 2);
}
@@ -2141,11 +2419,9 @@ static int hns_roce_config_sgid_table(struct hns_roce_dev *hr_dev,
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false);
- roce_set_field(sgid_tb->table_idx_rsv,
- CFG_SGID_TB_TABLE_IDX_M,
+ roce_set_field(sgid_tb->table_idx_rsv, CFG_SGID_TB_TABLE_IDX_M,
CFG_SGID_TB_TABLE_IDX_S, gid_index);
- roce_set_field(sgid_tb->vf_sgid_type_rsv,
- CFG_SGID_TB_VF_SGID_TYPE_M,
+ roce_set_field(sgid_tb->vf_sgid_type_rsv, CFG_SGID_TB_VF_SGID_TYPE_M,
CFG_SGID_TB_VF_SGID_TYPE_S, sgid_type);
p = (u32 *)&gid->raw[0];
@@ -2416,11 +2692,10 @@ static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
V2_MPT_BYTE_4_PD_S, mw->pdn);
- roce_set_field(mpt_entry->byte_4_pd_hop_st,
- V2_MPT_BYTE_4_PBL_HOP_NUM_M,
+ roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
V2_MPT_BYTE_4_PBL_HOP_NUM_S,
- mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ?
- 0 : mw->pbl_hop_num);
+ mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
+ mw->pbl_hop_num);
roce_set_field(mpt_entry->byte_4_pd_hop_st,
V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
@@ -2561,8 +2836,7 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M,
V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE);
roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
- V2_CQC_BYTE_4_SHIFT_S,
- ilog2(hr_cq->cq_depth));
+ V2_CQC_BYTE_4_SHIFT_S, ilog2(hr_cq->cq_depth));
roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
V2_CQC_BYTE_4_CEQN_S, hr_cq->vector);
@@ -2687,6 +2961,55 @@ static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
return 0;
}
+static int sw_comp(struct hns_roce_qp *hr_qp, struct hns_roce_wq *wq,
+ int num_entries, struct ib_wc *wc)
+{
+ unsigned int left;
+ int npolled = 0;
+
+ left = wq->head - wq->tail;
+ if (left == 0)
+ return 0;
+
+ left = min_t(unsigned int, (unsigned int)num_entries, left);
+ while (npolled < left) {
+ wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ wc->vendor_err = 0;
+ wc->qp = &hr_qp->ibqp;
+
+ wq->tail++;
+ wc++;
+ npolled++;
+ }
+
+ return npolled;
+}
+
+static int hns_roce_v2_sw_poll_cq(struct hns_roce_cq *hr_cq, int num_entries,
+ struct ib_wc *wc)
+{
+ struct hns_roce_qp *hr_qp;
+ int npolled = 0;
+
+ list_for_each_entry(hr_qp, &hr_cq->sq_list, sq_node) {
+ npolled += sw_comp(hr_qp, &hr_qp->sq,
+ num_entries - npolled, wc + npolled);
+ if (npolled >= num_entries)
+ goto out;
+ }
+
+ list_for_each_entry(hr_qp, &hr_cq->rq_list, rq_node) {
+ npolled += sw_comp(hr_qp, &hr_qp->rq,
+ num_entries - npolled, wc + npolled);
+ if (npolled >= num_entries)
+ goto out;
+ }
+
+out:
+ return npolled;
+}
+
static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
struct hns_roce_qp **cur_qp, struct ib_wc *wc)
{
@@ -2967,6 +3290,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
struct ib_wc *wc)
{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
struct hns_roce_qp *cur_qp = NULL;
unsigned long flags;
@@ -2974,6 +3298,18 @@ static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
spin_lock_irqsave(&hr_cq->lock, flags);
+ /*
+ * When the device starts to reset, the state is RST_DOWN. At this time,
+ * there may still be some valid CQEs in the hardware that are not
+ * polled. Therefore, it is not allowed to switch to the software mode
+ * immediately. When the state changes to UNINIT, CQE no longer exists
+ * in the hardware, and then switch to software mode.
+ */
+ if (hr_dev->state == HNS_ROCE_DEVICE_STATE_UNINIT) {
+ npolled = hns_roce_v2_sw_poll_cq(hr_cq, num_entries, wc);
+ goto out;
+ }
+
for (npolled = 0; npolled < num_entries; ++npolled) {
if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled))
break;
@@ -2985,6 +3321,7 @@ static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
}
+out:
spin_unlock_irqrestore(&hr_cq->lock, flags);
return npolled;
@@ -3159,8 +3496,6 @@ static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
}
static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
- enum ib_qp_state cur_state,
- enum ib_qp_state new_state,
struct hns_roce_v2_qp_context *context,
struct hns_roce_qp *hr_qp)
{
@@ -3210,6 +3545,9 @@ static void set_access_flags(struct hns_roce_qp *hr_qp,
roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
!!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_EXT_ATE_S,
+ !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_EXT_ATE_S, 0);
}
static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp,
@@ -3577,6 +3915,12 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
IB_ACCESS_REMOTE_ATOMIC));
roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
0);
+ roce_set_bit(context->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_EXT_ATE_S,
+ !!(attr->qp_access_flags &
+ IB_ACCESS_REMOTE_ATOMIC));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_EXT_ATE_S, 0);
} else {
roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
!!(hr_qp->access_flags & IB_ACCESS_REMOTE_READ));
@@ -3592,6 +3936,11 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
!!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC));
roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
0);
+ roce_set_bit(context->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_EXT_ATE_S,
+ !!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_EXT_ATE_S, 0);
}
roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
@@ -3838,13 +4187,11 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
/* Configure GID index */
port_num = rdma_ah_get_port_num(&attr->ah_attr);
roce_set_field(context->byte_20_smac_sgid_idx,
- V2_QPC_BYTE_20_SGID_IDX_M,
- V2_QPC_BYTE_20_SGID_IDX_S,
+ V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S,
hns_get_gid_index(hr_dev, port_num - 1,
grh->sgid_index));
roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
- V2_QPC_BYTE_20_SGID_IDX_M,
- V2_QPC_BYTE_20_SGID_IDX_S, 0);
+ V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0);
memcpy(&(context->dmac), dmac, sizeof(u32));
roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
@@ -4234,8 +4581,7 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
roce_set_field(context->byte_212_lsn,
V2_QPC_BYTE_212_RETRY_CNT_M,
- V2_QPC_BYTE_212_RETRY_CNT_S,
- attr->retry_cnt);
+ V2_QPC_BYTE_212_RETRY_CNT_S, attr->retry_cnt);
roce_set_field(qpc_mask->byte_212_lsn,
V2_QPC_BYTE_212_RETRY_CNT_M,
V2_QPC_BYTE_212_RETRY_CNT_S, 0);
@@ -4443,7 +4789,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
V2_QPC_BYTE_60_QP_ST_S, 0);
/* SW pass context to HW */
- ret = hns_roce_v2_qp_modify(hr_dev, cur_state, new_state, ctx, hr_qp);
+ ret = hns_roce_v2_qp_modify(hr_dev, ctx, hr_qp);
if (ret) {
dev_err(dev, "hns_roce_qp_modify failed(%d)\n", ret);
goto out;
@@ -4464,7 +4810,6 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
hr_qp->rq.tail = 0;
hr_qp->sq.head = 0;
hr_qp->sq.tail = 0;
- hr_qp->sq_next_wqe = 0;
hr_qp->next_sge = 0;
if (hr_qp->rq.wqe_cnt)
*hr_qp->rdb.db_record = 0;
@@ -4649,6 +4994,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
{
struct hns_roce_cq *send_cq, *recv_cq;
struct ib_device *ibdev = &hr_dev->ib_dev;
+ unsigned long flags;
int ret = 0;
if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) {
@@ -4659,21 +5005,32 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
ibdev_err(ibdev, "modify QP to Reset failed.\n");
}
- send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
- recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
+ send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL;
+ recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL;
+ spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
hns_roce_lock_cqs(send_cq, recv_cq);
+ list_del(&hr_qp->node);
+ list_del(&hr_qp->sq_node);
+ list_del(&hr_qp->rq_node);
+
if (!udata) {
- __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
- to_hr_srq(hr_qp->ibqp.srq) : NULL);
- if (send_cq != recv_cq)
+ if (recv_cq)
+ __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn,
+ (hr_qp->ibqp.srq ?
+ to_hr_srq(hr_qp->ibqp.srq) :
+ NULL));
+
+ if (send_cq && send_cq != recv_cq)
__hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL);
+
}
hns_roce_qp_remove(hr_dev, hr_qp);
hns_roce_unlock_cqs(send_cq, recv_cq);
+ spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
hns_roce_qp_free(hr_dev, hr_qp);
@@ -5155,8 +5512,7 @@ static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
*/
dma_rmb();
- cqn = roce_get_field(ceqe->comp,
- HNS_ROCE_V2_CEQE_COMP_CQN_M,
+ cqn = roce_get_field(ceqe->comp, HNS_ROCE_V2_CEQE_COMP_CQN_M,
HNS_ROCE_V2_CEQE_COMP_CQN_S);
hns_roce_cq_completion(hr_dev, cqn);
@@ -5409,126 +5765,98 @@ static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
eq->eqe_ba = eq->l0_dma;
/* set eqc state */
- roce_set_field(eqc->byte_4,
- HNS_ROCE_EQC_EQ_ST_M,
- HNS_ROCE_EQC_EQ_ST_S,
+ roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQ_ST_M, HNS_ROCE_EQC_EQ_ST_S,
HNS_ROCE_V2_EQ_STATE_VALID);
/* set eqe hop num */
- roce_set_field(eqc->byte_4,
- HNS_ROCE_EQC_HOP_NUM_M,
+ roce_set_field(eqc->byte_4, HNS_ROCE_EQC_HOP_NUM_M,
HNS_ROCE_EQC_HOP_NUM_S, eq->hop_num);
/* set eqc over_ignore */
- roce_set_field(eqc->byte_4,
- HNS_ROCE_EQC_OVER_IGNORE_M,
+ roce_set_field(eqc->byte_4, HNS_ROCE_EQC_OVER_IGNORE_M,
HNS_ROCE_EQC_OVER_IGNORE_S, eq->over_ignore);
/* set eqc coalesce */
- roce_set_field(eqc->byte_4,
- HNS_ROCE_EQC_COALESCE_M,
+ roce_set_field(eqc->byte_4, HNS_ROCE_EQC_COALESCE_M,
HNS_ROCE_EQC_COALESCE_S, eq->coalesce);
/* set eqc arm_state */
- roce_set_field(eqc->byte_4,
- HNS_ROCE_EQC_ARM_ST_M,
+ roce_set_field(eqc->byte_4, HNS_ROCE_EQC_ARM_ST_M,
HNS_ROCE_EQC_ARM_ST_S, eq->arm_st);
/* set eqn */
- roce_set_field(eqc->byte_4,
- HNS_ROCE_EQC_EQN_M,
- HNS_ROCE_EQC_EQN_S, eq->eqn);
+ roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQN_M, HNS_ROCE_EQC_EQN_S,
+ eq->eqn);
/* set eqe_cnt */
- roce_set_field(eqc->byte_4,
- HNS_ROCE_EQC_EQE_CNT_M,
- HNS_ROCE_EQC_EQE_CNT_S,
- HNS_ROCE_EQ_INIT_EQE_CNT);
+ roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQE_CNT_M,
+ HNS_ROCE_EQC_EQE_CNT_S, HNS_ROCE_EQ_INIT_EQE_CNT);
/* set eqe_ba_pg_sz */
- roce_set_field(eqc->byte_8,
- HNS_ROCE_EQC_BA_PG_SZ_M,
+ roce_set_field(eqc->byte_8, HNS_ROCE_EQC_BA_PG_SZ_M,
HNS_ROCE_EQC_BA_PG_SZ_S,
eq->eqe_ba_pg_sz + PG_SHIFT_OFFSET);
/* set eqe_buf_pg_sz */
- roce_set_field(eqc->byte_8,
- HNS_ROCE_EQC_BUF_PG_SZ_M,
+ roce_set_field(eqc->byte_8, HNS_ROCE_EQC_BUF_PG_SZ_M,
HNS_ROCE_EQC_BUF_PG_SZ_S,
eq->eqe_buf_pg_sz + PG_SHIFT_OFFSET);
/* set eq_producer_idx */
- roce_set_field(eqc->byte_8,
- HNS_ROCE_EQC_PROD_INDX_M,
- HNS_ROCE_EQC_PROD_INDX_S,
- HNS_ROCE_EQ_INIT_PROD_IDX);
+ roce_set_field(eqc->byte_8, HNS_ROCE_EQC_PROD_INDX_M,
+ HNS_ROCE_EQC_PROD_INDX_S, HNS_ROCE_EQ_INIT_PROD_IDX);
/* set eq_max_cnt */
- roce_set_field(eqc->byte_12,
- HNS_ROCE_EQC_MAX_CNT_M,
+ roce_set_field(eqc->byte_12, HNS_ROCE_EQC_MAX_CNT_M,
HNS_ROCE_EQC_MAX_CNT_S, eq->eq_max_cnt);
/* set eq_period */
- roce_set_field(eqc->byte_12,
- HNS_ROCE_EQC_PERIOD_M,
+ roce_set_field(eqc->byte_12, HNS_ROCE_EQC_PERIOD_M,
HNS_ROCE_EQC_PERIOD_S, eq->eq_period);
/* set eqe_report_timer */
- roce_set_field(eqc->eqe_report_timer,
- HNS_ROCE_EQC_REPORT_TIMER_M,
+ roce_set_field(eqc->eqe_report_timer, HNS_ROCE_EQC_REPORT_TIMER_M,
HNS_ROCE_EQC_REPORT_TIMER_S,
HNS_ROCE_EQ_INIT_REPORT_TIMER);
/* set eqe_ba [34:3] */
- roce_set_field(eqc->eqe_ba0,
- HNS_ROCE_EQC_EQE_BA_L_M,
+ roce_set_field(eqc->eqe_ba0, HNS_ROCE_EQC_EQE_BA_L_M,
HNS_ROCE_EQC_EQE_BA_L_S, eq->eqe_ba >> 3);
/* set eqe_ba [64:35] */
- roce_set_field(eqc->eqe_ba1,
- HNS_ROCE_EQC_EQE_BA_H_M,
+ roce_set_field(eqc->eqe_ba1, HNS_ROCE_EQC_EQE_BA_H_M,
HNS_ROCE_EQC_EQE_BA_H_S, eq->eqe_ba >> 35);
/* set eq shift */
- roce_set_field(eqc->byte_28,
- HNS_ROCE_EQC_SHIFT_M,
- HNS_ROCE_EQC_SHIFT_S, eq->shift);
+ roce_set_field(eqc->byte_28, HNS_ROCE_EQC_SHIFT_M, HNS_ROCE_EQC_SHIFT_S,
+ eq->shift);
/* set eq MSI_IDX */
- roce_set_field(eqc->byte_28,
- HNS_ROCE_EQC_MSI_INDX_M,
- HNS_ROCE_EQC_MSI_INDX_S,
- HNS_ROCE_EQ_INIT_MSI_IDX);
+ roce_set_field(eqc->byte_28, HNS_ROCE_EQC_MSI_INDX_M,
+ HNS_ROCE_EQC_MSI_INDX_S, HNS_ROCE_EQ_INIT_MSI_IDX);
/* set cur_eqe_ba [27:12] */
- roce_set_field(eqc->byte_28,
- HNS_ROCE_EQC_CUR_EQE_BA_L_M,
+ roce_set_field(eqc->byte_28, HNS_ROCE_EQC_CUR_EQE_BA_L_M,
HNS_ROCE_EQC_CUR_EQE_BA_L_S, eq->cur_eqe_ba >> 12);
/* set cur_eqe_ba [59:28] */
- roce_set_field(eqc->byte_32,
- HNS_ROCE_EQC_CUR_EQE_BA_M_M,
+ roce_set_field(eqc->byte_32, HNS_ROCE_EQC_CUR_EQE_BA_M_M,
HNS_ROCE_EQC_CUR_EQE_BA_M_S, eq->cur_eqe_ba >> 28);
/* set cur_eqe_ba [63:60] */
- roce_set_field(eqc->byte_36,
- HNS_ROCE_EQC_CUR_EQE_BA_H_M,
+ roce_set_field(eqc->byte_36, HNS_ROCE_EQC_CUR_EQE_BA_H_M,
HNS_ROCE_EQC_CUR_EQE_BA_H_S, eq->cur_eqe_ba >> 60);
/* set eq consumer idx */
- roce_set_field(eqc->byte_36,
- HNS_ROCE_EQC_CONS_INDX_M,
- HNS_ROCE_EQC_CONS_INDX_S,
- HNS_ROCE_EQ_INIT_CONS_IDX);
+ roce_set_field(eqc->byte_36, HNS_ROCE_EQC_CONS_INDX_M,
+ HNS_ROCE_EQC_CONS_INDX_S, HNS_ROCE_EQ_INIT_CONS_IDX);
/* set nex_eqe_ba[43:12] */
- roce_set_field(eqc->nxt_eqe_ba0,
- HNS_ROCE_EQC_NXT_EQE_BA_L_M,
+ roce_set_field(eqc->nxt_eqe_ba0, HNS_ROCE_EQC_NXT_EQE_BA_L_M,
HNS_ROCE_EQC_NXT_EQE_BA_L_S, eq->nxt_eqe_ba >> 12);
/* set nex_eqe_ba[63:44] */
- roce_set_field(eqc->nxt_eqe_ba1,
- HNS_ROCE_EQC_NXT_EQE_BA_H_M,
+ roce_set_field(eqc->nxt_eqe_ba1, HNS_ROCE_EQC_NXT_EQE_BA_H_M,
HNS_ROCE_EQC_NXT_EQE_BA_H_S, eq->nxt_eqe_ba >> 44);
}
@@ -5828,18 +6156,16 @@ static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
/* irq contains: abnormal + AEQ + CEQ */
for (j = 0; j < other_num; j++)
- snprintf((char *)hr_dev->irq_names[j],
- HNS_ROCE_INT_NAME_LEN, "hns-abn-%d", j);
+ snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
+ "hns-abn-%d", j);
for (j = other_num; j < (other_num + aeq_num); j++)
- snprintf((char *)hr_dev->irq_names[j],
- HNS_ROCE_INT_NAME_LEN, "hns-aeq-%d",
- j - other_num);
+ snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
+ "hns-aeq-%d", j - other_num);
for (j = (other_num + aeq_num); j < irq_num; j++)
- snprintf((char *)hr_dev->irq_names[j],
- HNS_ROCE_INT_NAME_LEN, "hns-ceq-%d",
- j - other_num - aeq_num);
+ snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
+ "hns-ceq-%d", j - other_num - aeq_num);
for (j = 0; j < irq_num; j++) {
if (j < other_num)
@@ -6448,6 +6774,10 @@ static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
return;
handle->priv = NULL;
+
+ hr_dev->state = HNS_ROCE_DEVICE_STATE_UNINIT;
+ hns_roce_handle_device_err(hr_dev);
+
hns_roce_exit(hr_dev);
kfree(hr_dev->priv);
ib_dealloc_device(&hr_dev->ib_dev);
@@ -6509,7 +6839,6 @@ static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
{
struct hns_roce_dev *hr_dev;
- struct ib_event event;
if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) {
set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
@@ -6527,10 +6856,7 @@ static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
hr_dev->active = false;
hr_dev->dis_db = true;
- event.event = IB_EVENT_DEVICE_FATAL;
- event.device = &hr_dev->ib_dev;
- event.element.port_num = 1;
- ib_dispatch_event(&event);
+ hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN;
return 0;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 76a14db7028d..2a117ff6a6be 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -81,10 +81,12 @@
#define HNS_ROCE_V2_QPC_ENTRY_SZ 256
#define HNS_ROCE_V2_IRRL_ENTRY_SZ 64
#define HNS_ROCE_V2_TRRL_ENTRY_SZ 48
+#define HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ 100
#define HNS_ROCE_V2_CQC_ENTRY_SZ 64
#define HNS_ROCE_V2_SRQC_ENTRY_SZ 64
#define HNS_ROCE_V2_MTPT_ENTRY_SZ 64
#define HNS_ROCE_V2_MTT_ENTRY_SZ 64
+#define HNS_ROCE_V2_IDX_ENTRY_SZ 4
#define HNS_ROCE_V2_CQE_ENTRY_SIZE 32
#define HNS_ROCE_V2_SCCC_ENTRY_SZ 32
#define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ PAGE_SIZE
@@ -109,7 +111,12 @@
#define HNS_ROCE_PBL_HOP_NUM 2
#define HNS_ROCE_EQE_HOP_NUM 2
#define HNS_ROCE_IDX_HOP_NUM 1
+#define HNS_ROCE_SQWQE_HOP_NUM 2
+#define HNS_ROCE_EXT_SGE_HOP_NUM 1
+#define HNS_ROCE_RQWQE_HOP_NUM 2
+#define HNS_ROCE_BA_PG_SZ_SUPPORTED_256K 6
+#define HNS_ROCE_BA_PG_SZ_SUPPORTED_16K 2
#define HNS_ROCE_V2_GID_INDEX_NUM 256
#define HNS_ROCE_V2_TABLE_CHUNK_SIZE (1 << 18)
@@ -237,6 +244,7 @@ enum hns_roce_opcode_type {
HNS_ROCE_OPC_CFG_EXT_LLM = 0x8403,
HNS_ROCE_OPC_CFG_TMOUT_LLM = 0x8404,
HNS_ROCE_OPC_QUERY_PF_TIMER_RES = 0x8406,
+ HNS_ROCE_OPC_QUERY_PF_CAPS_NUM = 0x8408,
HNS_ROCE_OPC_CFG_SGID_TB = 0x8500,
HNS_ROCE_OPC_CFG_SMAC_TB = 0x8501,
HNS_ROCE_OPC_POST_MB = 0x8504,
@@ -643,7 +651,7 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_76_ATE_S 27
#define V2_QPC_BYTE_76_RQIE_S 28
-
+#define V2_QPC_BYTE_76_EXT_ATE_S 29
#define V2_QPC_BYTE_76_RQ_VLAN_EN_S 30
#define V2_QPC_BYTE_80_RX_CQN_S 0
#define V2_QPC_BYTE_80_RX_CQN_M GENMASK(23, 0)
@@ -1569,6 +1577,155 @@ struct hns_roce_cfg_smac_tb {
#define CFG_SMAC_TB_VF_SMAC_H_S 0
#define CFG_SMAC_TB_VF_SMAC_H_M GENMASK(15, 0)
+#define HNS_ROCE_QUERY_PF_CAPS_CMD_NUM 5
+struct hns_roce_query_pf_caps_a {
+ u8 number_ports;
+ u8 local_ca_ack_delay;
+ __le16 max_sq_sg;
+ __le16 max_sq_inline;
+ __le16 max_rq_sg;
+ __le32 max_extend_sg;
+ __le16 num_qpc_timer;
+ __le16 num_cqc_timer;
+ __le16 max_srq_sges;
+ u8 num_aeq_vectors;
+ u8 num_other_vectors;
+ u8 max_sq_desc_sz;
+ u8 max_rq_desc_sz;
+ u8 max_srq_desc_sz;
+ u8 cq_entry_sz;
+};
+
+struct hns_roce_query_pf_caps_b {
+ u8 mtpt_entry_sz;
+ u8 irrl_entry_sz;
+ u8 trrl_entry_sz;
+ u8 cqc_entry_sz;
+ u8 srqc_entry_sz;
+ u8 idx_entry_sz;
+ u8 scc_ctx_entry_sz;
+ u8 max_mtu;
+ __le16 qpc_entry_sz;
+ __le16 qpc_timer_entry_sz;
+ __le16 cqc_timer_entry_sz;
+ u8 min_cqes;
+ u8 min_wqes;
+ __le32 page_size_cap;
+ u8 pkey_table_len;
+ u8 phy_num_uars;
+ u8 ctx_hop_num;
+ u8 pbl_hop_num;
+};
+
+struct hns_roce_query_pf_caps_c {
+ __le32 cap_flags_num_pds;
+ __le32 max_gid_num_cqs;
+ __le32 cq_depth;
+ __le32 num_mrws;
+ __le32 ord_num_qps;
+ __le16 sq_depth;
+ __le16 rq_depth;
+};
+
+#define V2_QUERY_PF_CAPS_C_NUM_PDS_S 0
+#define V2_QUERY_PF_CAPS_C_NUM_PDS_M GENMASK(19, 0)
+
+#define V2_QUERY_PF_CAPS_C_CAP_FLAGS_S 20
+#define V2_QUERY_PF_CAPS_C_CAP_FLAGS_M GENMASK(31, 20)
+
+#define V2_QUERY_PF_CAPS_C_NUM_CQS_S 0
+#define V2_QUERY_PF_CAPS_C_NUM_CQS_M GENMASK(19, 0)
+
+#define V2_QUERY_PF_CAPS_C_MAX_GID_S 20
+#define V2_QUERY_PF_CAPS_C_MAX_GID_M GENMASK(28, 20)
+
+#define V2_QUERY_PF_CAPS_C_CQ_DEPTH_S 0
+#define V2_QUERY_PF_CAPS_C_CQ_DEPTH_M GENMASK(22, 0)
+
+#define V2_QUERY_PF_CAPS_C_NUM_MRWS_S 0
+#define V2_QUERY_PF_CAPS_C_NUM_MRWS_M GENMASK(19, 0)
+
+#define V2_QUERY_PF_CAPS_C_NUM_QPS_S 0
+#define V2_QUERY_PF_CAPS_C_NUM_QPS_M GENMASK(19, 0)
+
+#define V2_QUERY_PF_CAPS_C_MAX_ORD_S 20
+#define V2_QUERY_PF_CAPS_C_MAX_ORD_M GENMASK(27, 20)
+
+struct hns_roce_query_pf_caps_d {
+ __le32 wq_hop_num_max_srqs;
+ __le16 srq_depth;
+ __le16 rsv;
+ __le32 num_ceqs_ceq_depth;
+ __le32 arm_st_aeq_depth;
+ __le32 num_uars_rsv_pds;
+ __le32 rsv_uars_rsv_qps;
+};
+#define V2_QUERY_PF_CAPS_D_NUM_SRQS_S 0
+#define V2_QUERY_PF_CAPS_D_NUM_SRQS_M GENMASK(20, 0)
+
+#define V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_S 20
+#define V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_M GENMASK(21, 20)
+
+#define V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_S 22
+#define V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_M GENMASK(23, 22)
+
+#define V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_S 24
+#define V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_M GENMASK(25, 24)
+
+
+#define V2_QUERY_PF_CAPS_D_CEQ_DEPTH_S 0
+#define V2_QUERY_PF_CAPS_D_CEQ_DEPTH_M GENMASK(21, 0)
+
+#define V2_QUERY_PF_CAPS_D_NUM_CEQS_S 22
+#define V2_QUERY_PF_CAPS_D_NUM_CEQS_M GENMASK(31, 22)
+
+#define V2_QUERY_PF_CAPS_D_AEQ_DEPTH_S 0
+#define V2_QUERY_PF_CAPS_D_AEQ_DEPTH_M GENMASK(21, 0)
+
+#define V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_S 22
+#define V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_M GENMASK(23, 22)
+
+#define V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_S 24
+#define V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_M GENMASK(25, 24)
+
+#define V2_QUERY_PF_CAPS_D_RSV_PDS_S 0
+#define V2_QUERY_PF_CAPS_D_RSV_PDS_M GENMASK(19, 0)
+
+#define V2_QUERY_PF_CAPS_D_NUM_UARS_S 20
+#define V2_QUERY_PF_CAPS_D_NUM_UARS_M GENMASK(27, 20)
+
+#define V2_QUERY_PF_CAPS_D_RSV_QPS_S 0
+#define V2_QUERY_PF_CAPS_D_RSV_QPS_M GENMASK(19, 0)
+
+#define V2_QUERY_PF_CAPS_D_RSV_UARS_S 20
+#define V2_QUERY_PF_CAPS_D_RSV_UARS_M GENMASK(27, 20)
+
+struct hns_roce_query_pf_caps_e {
+ __le32 chunk_size_shift_rsv_mrws;
+ __le32 rsv_cqs;
+ __le32 rsv_srqs;
+ __le32 rsv_lkey;
+ __le16 ceq_max_cnt;
+ __le16 ceq_period;
+ __le16 aeq_max_cnt;
+ __le16 aeq_period;
+};
+
+#define V2_QUERY_PF_CAPS_E_RSV_MRWS_S 0
+#define V2_QUERY_PF_CAPS_E_RSV_MRWS_M GENMASK(19, 0)
+
+#define V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_S 20
+#define V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_M GENMASK(31, 20)
+
+#define V2_QUERY_PF_CAPS_E_RSV_CQS_S 0
+#define V2_QUERY_PF_CAPS_E_RSV_CQS_M GENMASK(19, 0)
+
+#define V2_QUERY_PF_CAPS_E_RSV_SRQS_S 0
+#define V2_QUERY_PF_CAPS_E_RSV_SRQS_M GENMASK(19, 0)
+
+#define V2_QUERY_PF_CAPS_E_RSV_LKEYS_S 0
+#define V2_QUERY_PF_CAPS_E_RSV_LKEYS_M GENMASK(19, 0)
+
struct hns_roce_cmq_desc {
__le16 opcode;
__le16 flag;
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 854ef6e74788..d0031d559213 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -90,7 +90,7 @@ static int hns_roce_add_gid(const struct ib_gid_attr *attr, void **context)
static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context)
{
struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
- struct ib_gid_attr zattr = { };
+ struct ib_gid_attr zattr = {};
u8 port = attr->port_num - 1;
int ret;
@@ -210,7 +210,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
props->max_pkeys = 1;
props->local_ca_ack_delay = hr_dev->caps.local_ca_ack_delay;
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
- props->max_srq = hr_dev->caps.max_srqs;
+ props->max_srq = hr_dev->caps.num_srqs;
props->max_srq_wr = hr_dev->caps.max_srq_wrs;
props->max_srq_sge = hr_dev->caps.max_srq_sges;
}
@@ -259,11 +259,12 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
mtu = iboe_get_mtu(net_dev->mtu);
props->active_mtu = mtu ? min(props->max_mtu, mtu) : IB_MTU_256;
- props->state = (netif_running(net_dev) && netif_carrier_ok(net_dev)) ?
- IB_PORT_ACTIVE : IB_PORT_DOWN;
- props->phys_state = (props->state == IB_PORT_ACTIVE) ?
- IB_PORT_PHYS_STATE_LINK_UP :
- IB_PORT_PHYS_STATE_DISABLED;
+ props->state = netif_running(net_dev) && netif_carrier_ok(net_dev) ?
+ IB_PORT_ACTIVE :
+ IB_PORT_DOWN;
+ props->phys_state = props->state == IB_PORT_ACTIVE ?
+ IB_PORT_PHYS_STATE_LINK_UP :
+ IB_PORT_PHYS_STATE_DISABLED;
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
@@ -481,13 +482,13 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
ib_dev = &hr_dev->ib_dev;
- ib_dev->node_type = RDMA_NODE_IB_CA;
- ib_dev->dev.parent = dev;
+ ib_dev->node_type = RDMA_NODE_IB_CA;
+ ib_dev->dev.parent = dev;
- ib_dev->phys_port_cnt = hr_dev->caps.num_ports;
- ib_dev->local_dma_lkey = hr_dev->caps.reserved_lkey;
- ib_dev->num_comp_vectors = hr_dev->caps.num_comp_vectors;
- ib_dev->uverbs_cmd_mask =
+ ib_dev->phys_port_cnt = hr_dev->caps.num_ports;
+ ib_dev->local_dma_lkey = hr_dev->caps.reserved_lkey;
+ ib_dev->num_comp_vectors = hr_dev->caps.num_comp_vectors;
+ ib_dev->uverbs_cmd_mask =
(1ULL << IB_USER_VERBS_CMD_GET_CONTEXT) |
(1ULL << IB_USER_VERBS_CMD_QUERY_DEVICE) |
(1ULL << IB_USER_VERBS_CMD_QUERY_PORT) |
@@ -503,8 +504,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
(1ULL << IB_USER_VERBS_CMD_QUERY_QP) |
(1ULL << IB_USER_VERBS_CMD_DESTROY_QP);
- ib_dev->uverbs_ex_cmd_mask |=
- (1ULL << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
+ ib_dev->uverbs_ex_cmd_mask |= (1ULL << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_REREG_MR) {
ib_dev->uverbs_cmd_mask |= (1ULL << IB_USER_VERBS_CMD_REREG_MR);
@@ -589,11 +589,13 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) {
ret = hns_roce_init_hem_table(hr_dev,
- &hr_dev->mr_table.mtt_cqe_table,
- HEM_TYPE_CQE, hr_dev->caps.mtt_entry_sz,
- hr_dev->caps.num_cqe_segs, 1);
+ &hr_dev->mr_table.mtt_cqe_table,
+ HEM_TYPE_CQE,
+ hr_dev->caps.mtt_entry_sz,
+ hr_dev->caps.num_cqe_segs, 1);
if (ret) {
- dev_err(dev, "Failed to init MTT CQE context memory, aborting.\n");
+ dev_err(dev,
+ "Failed to init CQE context memory, aborting.\n");
goto err_unmap_cqe;
}
}
@@ -633,7 +635,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
hr_dev->caps.num_qps, 1);
if (ret) {
dev_err(dev,
- "Failed to init trrl_table memory, aborting.\n");
+ "Failed to init trrl_table memory, aborting.\n");
goto err_unmap_irrl;
}
}
@@ -653,7 +655,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
hr_dev->caps.num_srqs, 1);
if (ret) {
dev_err(dev,
- "Failed to init SRQ context memory, aborting.\n");
+ "Failed to init SRQ context memory, aborting.\n");
goto err_unmap_cq;
}
}
@@ -692,33 +694,31 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
hr_dev->caps.num_qps, 1);
if (ret) {
dev_err(dev,
- "Failed to init SCC context memory, aborting.\n");
+ "Failed to init SCC context memory, aborting.\n");
goto err_unmap_idx;
}
}
if (hr_dev->caps.qpc_timer_entry_sz) {
- ret = hns_roce_init_hem_table(hr_dev,
- &hr_dev->qpc_timer_table,
+ ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qpc_timer_table,
HEM_TYPE_QPC_TIMER,
hr_dev->caps.qpc_timer_entry_sz,
hr_dev->caps.num_qpc_timer, 1);
if (ret) {
dev_err(dev,
- "Failed to init QPC timer memory, aborting.\n");
+ "Failed to init QPC timer memory, aborting.\n");
goto err_unmap_ctx;
}
}
if (hr_dev->caps.cqc_timer_entry_sz) {
- ret = hns_roce_init_hem_table(hr_dev,
- &hr_dev->cqc_timer_table,
+ ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cqc_timer_table,
HEM_TYPE_CQC_TIMER,
hr_dev->caps.cqc_timer_entry_sz,
hr_dev->caps.num_cqc_timer, 1);
if (ret) {
dev_err(dev,
- "Failed to init CQC timer memory, aborting.\n");
+ "Failed to init CQC timer memory, aborting.\n");
goto err_unmap_qpc_timer;
}
}
@@ -727,8 +727,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
err_unmap_qpc_timer:
if (hr_dev->caps.qpc_timer_entry_sz)
- hns_roce_cleanup_hem_table(hr_dev,
- &hr_dev->qpc_timer_table);
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qpc_timer_table);
err_unmap_ctx:
if (hr_dev->caps.sccc_entry_sz)
@@ -863,6 +862,50 @@ err_uar_table_free:
return ret;
}
+static void check_and_get_armed_cq(struct list_head *cq_list, struct ib_cq *cq)
+{
+ struct hns_roce_cq *hr_cq = to_hr_cq(cq);
+ unsigned long flags;
+
+ spin_lock_irqsave(&hr_cq->lock, flags);
+ if (cq->comp_handler) {
+ if (!hr_cq->is_armed) {
+ hr_cq->is_armed = 1;
+ list_add_tail(&hr_cq->node, cq_list);
+ }
+ }
+ spin_unlock_irqrestore(&hr_cq->lock, flags);
+}
+
+void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_qp *hr_qp;
+ struct hns_roce_cq *hr_cq;
+ struct list_head cq_list;
+ unsigned long flags_qp;
+ unsigned long flags;
+
+ INIT_LIST_HEAD(&cq_list);
+
+ spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
+ list_for_each_entry(hr_qp, &hr_dev->qp_list, node) {
+ spin_lock_irqsave(&hr_qp->sq.lock, flags_qp);
+ if (hr_qp->sq.tail != hr_qp->sq.head)
+ check_and_get_armed_cq(&cq_list, hr_qp->ibqp.send_cq);
+ spin_unlock_irqrestore(&hr_qp->sq.lock, flags_qp);
+
+ spin_lock_irqsave(&hr_qp->rq.lock, flags_qp);
+ if ((!hr_qp->ibqp.srq) && (hr_qp->rq.tail != hr_qp->rq.head))
+ check_and_get_armed_cq(&cq_list, hr_qp->ibqp.recv_cq);
+ spin_unlock_irqrestore(&hr_qp->rq.lock, flags_qp);
+ }
+
+ list_for_each_entry(hr_cq, &cq_list, node)
+ hns_roce_cq_completion(hr_dev, hr_cq->cqn);
+
+ spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
+}
+
int hns_roce_init(struct hns_roce_dev *hr_dev)
{
int ret;
@@ -933,6 +976,9 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
}
}
+ INIT_LIST_HEAD(&hr_dev->qp_list);
+ spin_lock_init(&hr_dev->qp_list_lock);
+
ret = hns_roce_register_device(hr_dev);
if (ret)
goto error_failed_register_device;
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 9ad19170c3f9..b9898e71655a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -1064,8 +1064,8 @@ int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
if (!(npage % (1 << (mtt->page_shift - PAGE_SHIFT)))) {
if (page_addr & ((1 << mtt->page_shift) - 1)) {
dev_err(dev,
- "page_addr 0x%llx is not page_shift %d alignment!\n",
- page_addr, mtt->page_shift);
+ "page_addr is not page_shift %d alignment!\n",
+ mtt->page_shift);
ret = -EINVAL;
goto out;
}
@@ -1145,7 +1145,7 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr)
return ERR_PTR(-ENOMEM);
- mr->umem = ib_umem_get(udata, start, length, access_flags);
+ mr->umem = ib_umem_get(pd->device, start, length, access_flags);
if (IS_ERR(mr->umem)) {
ret = PTR_ERR(mr->umem);
goto err_free;
@@ -1230,7 +1230,7 @@ static int rereg_mr_trans(struct ib_mr *ibmr, int flags,
}
ib_umem_release(mr->umem);
- mr->umem = ib_umem_get(udata, start, length, mr_access_flags);
+ mr->umem = ib_umem_get(ibmr->device, start, length, mr_access_flags);
if (IS_ERR(mr->umem)) {
ret = PTR_ERR(mr->umem);
mr->umem = NULL;
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index a6565b674801..3257ad11be48 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -393,40 +393,38 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
/* Get buf size, SQ and RQ are aligned to page_szie */
if (hr_dev->caps.max_sq_sg <= 2) {
- hr_qp->buff_size = HNS_ROCE_ALIGN_UP((hr_qp->rq.wqe_cnt <<
+ hr_qp->buff_size = round_up((hr_qp->rq.wqe_cnt <<
hr_qp->rq.wqe_shift), PAGE_SIZE) +
- HNS_ROCE_ALIGN_UP((hr_qp->sq.wqe_cnt <<
+ round_up((hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift), PAGE_SIZE);
hr_qp->sq.offset = 0;
- hr_qp->rq.offset = HNS_ROCE_ALIGN_UP((hr_qp->sq.wqe_cnt <<
+ hr_qp->rq.offset = round_up((hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift), PAGE_SIZE);
} else {
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
hr_qp->sge.sge_cnt = ex_sge_num ?
max(page_size / (1 << hr_qp->sge.sge_shift), ex_sge_num) : 0;
- hr_qp->buff_size = HNS_ROCE_ALIGN_UP((hr_qp->rq.wqe_cnt <<
+ hr_qp->buff_size = round_up((hr_qp->rq.wqe_cnt <<
hr_qp->rq.wqe_shift), page_size) +
- HNS_ROCE_ALIGN_UP((hr_qp->sge.sge_cnt <<
+ round_up((hr_qp->sge.sge_cnt <<
hr_qp->sge.sge_shift), page_size) +
- HNS_ROCE_ALIGN_UP((hr_qp->sq.wqe_cnt <<
+ round_up((hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift), page_size);
hr_qp->sq.offset = 0;
if (ex_sge_num) {
- hr_qp->sge.offset = HNS_ROCE_ALIGN_UP(
- (hr_qp->sq.wqe_cnt <<
- hr_qp->sq.wqe_shift),
- page_size);
+ hr_qp->sge.offset = round_up((hr_qp->sq.wqe_cnt <<
+ hr_qp->sq.wqe_shift),
+ page_size);
hr_qp->rq.offset = hr_qp->sge.offset +
- HNS_ROCE_ALIGN_UP((hr_qp->sge.sge_cnt <<
- hr_qp->sge.sge_shift),
- page_size);
+ round_up((hr_qp->sge.sge_cnt <<
+ hr_qp->sge.sge_shift),
+ page_size);
} else {
- hr_qp->rq.offset = HNS_ROCE_ALIGN_UP(
- (hr_qp->sq.wqe_cnt <<
- hr_qp->sq.wqe_shift),
- page_size);
+ hr_qp->rq.offset = round_up((hr_qp->sq.wqe_cnt <<
+ hr_qp->sq.wqe_shift),
+ page_size);
}
}
@@ -593,20 +591,18 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
/* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
hr_qp->sq.offset = 0;
- size = HNS_ROCE_ALIGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift,
- page_size);
+ size = round_up(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift, page_size);
if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) {
hr_qp->sge.sge_cnt = max(page_size/(1 << hr_qp->sge.sge_shift),
- (u32)hr_qp->sge.sge_cnt);
+ (u32)hr_qp->sge.sge_cnt);
hr_qp->sge.offset = size;
- size += HNS_ROCE_ALIGN_UP(hr_qp->sge.sge_cnt <<
- hr_qp->sge.sge_shift, page_size);
+ size += round_up(hr_qp->sge.sge_cnt << hr_qp->sge.sge_shift,
+ page_size);
}
hr_qp->rq.offset = size;
- size += HNS_ROCE_ALIGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift),
- page_size);
+ size += round_up((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift), page_size);
hr_qp->buff_size = size;
/* Get wr and sge number which send */
@@ -681,6 +677,29 @@ static void free_rq_inline_buf(struct hns_roce_qp *hr_qp)
kfree(hr_qp->rq_inl_buf.wqe_list);
}
+static void add_qp_to_list(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct ib_cq *send_cq, struct ib_cq *recv_cq)
+{
+ struct hns_roce_cq *hr_send_cq, *hr_recv_cq;
+ unsigned long flags;
+
+ hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL;
+ hr_recv_cq = recv_cq ? to_hr_cq(recv_cq) : NULL;
+
+ spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
+ hns_roce_lock_cqs(hr_send_cq, hr_recv_cq);
+
+ list_add_tail(&hr_qp->node, &hr_dev->qp_list);
+ if (hr_send_cq)
+ list_add_tail(&hr_qp->sq_node, &hr_send_cq->sq_list);
+ if (hr_recv_cq)
+ list_add_tail(&hr_qp->rq_node, &hr_recv_cq->rq_list);
+
+ hns_roce_unlock_cqs(hr_send_cq, hr_recv_cq);
+ spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
+}
+
static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
struct ib_pd *ib_pd,
struct ib_qp_init_attr *init_attr,
@@ -744,7 +763,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
goto err_alloc_rq_inline_buf;
}
- hr_qp->umem = ib_umem_get(udata, ucmd.buf_addr,
+ hr_qp->umem = ib_umem_get(ib_pd->device, ucmd.buf_addr,
hr_qp->buff_size, 0);
if (IS_ERR(hr_qp->umem)) {
dev_err(dev, "ib_umem_get error for create qp\n");
@@ -950,6 +969,9 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
}
hr_qp->event = hns_roce_ib_qp_event;
+
+ add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq, init_attr->recv_cq);
+
hns_roce_free_buf_list(buf_list, hr_qp->region_cnt);
return 0;
@@ -1232,7 +1254,16 @@ out:
void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
__acquires(&send_cq->lock) __acquires(&recv_cq->lock)
{
- if (send_cq == recv_cq) {
+ if (unlikely(send_cq == NULL && recv_cq == NULL)) {
+ __acquire(&send_cq->lock);
+ __acquire(&recv_cq->lock);
+ } else if (unlikely(send_cq != NULL && recv_cq == NULL)) {
+ spin_lock_irq(&send_cq->lock);
+ __acquire(&recv_cq->lock);
+ } else if (unlikely(send_cq == NULL && recv_cq != NULL)) {
+ spin_lock_irq(&recv_cq->lock);
+ __acquire(&send_cq->lock);
+ } else if (send_cq == recv_cq) {
spin_lock_irq(&send_cq->lock);
__acquire(&recv_cq->lock);
} else if (send_cq->cqn < recv_cq->cqn) {
@@ -1248,7 +1279,16 @@ void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
struct hns_roce_cq *recv_cq) __releases(&send_cq->lock)
__releases(&recv_cq->lock)
{
- if (send_cq == recv_cq) {
+ if (unlikely(send_cq == NULL && recv_cq == NULL)) {
+ __release(&recv_cq->lock);
+ __release(&send_cq->lock);
+ } else if (unlikely(send_cq != NULL && recv_cq == NULL)) {
+ __release(&recv_cq->lock);
+ spin_unlock(&send_cq->lock);
+ } else if (unlikely(send_cq == NULL && recv_cq != NULL)) {
+ __release(&send_cq->lock);
+ spin_unlock(&recv_cq->lock);
+ } else if (send_cq == recv_cq) {
__release(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock);
} else if (send_cq->cqn < recv_cq->cqn) {
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index 7113ebfdb4f0..c6d5f06f9cde 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -186,7 +186,8 @@ static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata,
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
return -EFAULT;
- srq->umem = ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0);
+ srq->umem =
+ ib_umem_get(srq->ibsrq.device, ucmd.buf_addr, srq_buf_size, 0);
if (IS_ERR(srq->umem))
return PTR_ERR(srq->umem);
@@ -205,7 +206,7 @@ static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata,
goto err_user_srq_mtt;
/* config index queue BA */
- srq->idx_que.umem = ib_umem_get(udata, ucmd.que_addr,
+ srq->idx_que.umem = ib_umem_get(srq->ibsrq.device, ucmd.que_addr,
srq->idx_que.buf_size, 0);
if (IS_ERR(srq->idx_que.umem)) {
dev_err(hr_dev->dev, "ib_umem_get error for index queue\n");
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index d44cf33df81a..238614370927 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -1225,6 +1225,8 @@ static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev)
const struct in_ifaddr *ifa;
idev = in_dev_get(dev);
+ if (!idev)
+ continue;
in_dev_for_each_ifa_rtnl(ifa, idev) {
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
"IP=%pI4, vlan_id=%d, MAC=%pM\n", &ifa->ifa_address,
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 86375947bc67..c335de91508f 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -169,8 +169,7 @@ static void i40iw_dealloc_ucontext(struct ib_ucontext *context)
static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
struct i40iw_ucontext *ucontext;
- u64 db_addr_offset;
- u64 push_offset;
+ u64 db_addr_offset, push_offset, pfn;
ucontext = to_ucontext(context);
if (ucontext->iwdev->sc_dev.is_pf) {
@@ -189,7 +188,6 @@ static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) {
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_private_data = ucontext;
} else {
if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -197,12 +195,12 @@ static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
}
- if (io_remap_pfn_range(vma, vma->vm_start,
- vma->vm_pgoff + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> PAGE_SHIFT),
- PAGE_SIZE, vma->vm_page_prot))
- return -EAGAIN;
+ pfn = vma->vm_pgoff +
+ (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >>
+ PAGE_SHIFT);
- return 0;
+ return rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
+ vma->vm_page_prot, NULL);
}
/**
@@ -1758,12 +1756,15 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
int ret;
int pg_shift;
+ if (!udata)
+ return ERR_PTR(-EOPNOTSUPP);
+
if (iwdev->closing)
return ERR_PTR(-ENODEV);
if (length > I40IW_MAX_MR_SIZE)
return ERR_PTR(-EINVAL);
- region = ib_umem_get(udata, start, length, acc);
+ region = ib_umem_get(pd->device, start, length, acc);
if (IS_ERR(region))
return (struct ib_mr *)region;
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index ecd6cadd529a..b591861934b3 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -186,23 +186,6 @@ out:
kfree(ent);
}
-static void id_map_find_del(struct ib_device *ibdev, int pv_cm_id)
-{
- struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
- struct rb_root *sl_id_map = &sriov->sl_id_map;
- struct id_map_entry *ent, *found_ent;
-
- spin_lock(&sriov->id_map_lock);
- ent = xa_erase(&sriov->pv_id_table, pv_cm_id);
- if (!ent)
- goto out;
- found_ent = id_map_find_by_sl_id(ibdev, ent->slave_id, ent->sl_cm_id);
- if (found_ent && found_ent == ent)
- rb_erase(&found_ent->node, sl_id_map);
-out:
- spin_unlock(&sriov->id_map_lock);
-}
-
static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)
{
struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
@@ -294,7 +277,7 @@ static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
spin_lock(&sriov->id_map_lock);
spin_lock_irqsave(&sriov->going_down_lock, flags);
/*make sure that there is no schedule inside the scheduled work.*/
- if (!sriov->is_going_down) {
+ if (!sriov->is_going_down && !id->scheduled_delete) {
id->scheduled_delete = 1;
schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
}
@@ -341,9 +324,6 @@ cont:
if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
schedule_delayed(ibdev, id);
- else if (mad->mad_hdr.attr_id == CM_DREP_ATTR_ID)
- id_map_find_del(ibdev, pv_cm_id);
-
return 0;
}
@@ -382,12 +362,9 @@ int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
*slave = id->slave_id;
set_remote_comm_id(mad, id->sl_cm_id);
- if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
+ if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID ||
+ mad->mad_hdr.attr_id == CM_REJ_ATTR_ID)
schedule_delayed(ibdev, id);
- else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID ||
- mad->mad_hdr.attr_id == CM_DREP_ATTR_ID) {
- id_map_find_del(ibdev, (int) pv_cm_id);
- }
return 0;
}
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 306b21281fa2..f8b936b76dcd 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -144,7 +144,7 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_udata *udata,
int shift;
int n;
- *umem = ib_umem_get(udata, buf_addr, cqe * cqe_size,
+ *umem = ib_umem_get(&dev->ib_dev, buf_addr, cqe * cqe_size,
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(*umem))
return PTR_ERR(*umem);
@@ -568,18 +568,13 @@ static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
wc->vendor_err = cqe->vendor_err_syndrome;
}
-static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
+static int mlx4_ib_ipoib_csum_ok(__be16 status, u8 badfcs_enc, __be16 checksum)
{
- return ((status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
- MLX4_CQE_STATUS_IPV4F |
- MLX4_CQE_STATUS_IPV4OPT |
- MLX4_CQE_STATUS_IPV6 |
- MLX4_CQE_STATUS_IPOK)) ==
- cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
- MLX4_CQE_STATUS_IPOK)) &&
- (status & cpu_to_be16(MLX4_CQE_STATUS_UDP |
- MLX4_CQE_STATUS_TCP)) &&
- checksum == cpu_to_be16(0xffff);
+ return ((badfcs_enc & MLX4_CQE_STATUS_L4_CSUM) ||
+ ((status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
+ (status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
+ MLX4_CQE_STATUS_UDP)) &&
+ (checksum == cpu_to_be16(0xffff))));
}
static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
@@ -855,6 +850,7 @@ repoll:
wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status,
+ cqe->badfcs_enc,
cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
if (is_eth) {
wc->slid = 0;
diff --git a/drivers/infiniband/hw/mlx4/doorbell.c b/drivers/infiniband/hw/mlx4/doorbell.c
index 714f9df5bf39..d41f03ccb0e1 100644
--- a/drivers/infiniband/hw/mlx4/doorbell.c
+++ b/drivers/infiniband/hw/mlx4/doorbell.c
@@ -64,7 +64,8 @@ int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt,
page->user_virt = (virt & PAGE_MASK);
page->refcnt = 0;
- page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0);
+ page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK,
+ PAGE_SIZE, 0);
if (IS_ERR(page->umem)) {
err = PTR_ERR(page->umem);
kfree(page);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 34055cbab38c..2f5d9b181848 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -246,6 +246,13 @@ static int mlx4_ib_update_gids(struct gid_entry *gids,
return mlx4_ib_update_gids_v1(gids, ibdev, port_num);
}
+static void free_gid_entry(struct gid_entry *entry)
+{
+ memset(&entry->gid, 0, sizeof(entry->gid));
+ kfree(entry->ctx);
+ entry->ctx = NULL;
+}
+
static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
{
struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
@@ -313,6 +320,8 @@ static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
GFP_ATOMIC);
if (!gids) {
ret = -ENOMEM;
+ *context = NULL;
+ free_gid_entry(&port_gid_table->gids[free]);
} else {
for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
@@ -324,6 +333,12 @@ static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
if (!ret && hw_update) {
ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
+ if (ret) {
+ spin_lock_bh(&iboe->lock);
+ *context = NULL;
+ free_gid_entry(&port_gid_table->gids[free]);
+ spin_unlock_bh(&iboe->lock);
+ }
kfree(gids);
}
@@ -353,10 +368,7 @@ static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
if (!ctx->refcount) {
unsigned int real_index = ctx->real_index;
- memset(&port_gid_table->gids[real_index].gid, 0,
- sizeof(port_gid_table->gids[real_index].gid));
- kfree(port_gid_table->gids[real_index].ctx);
- port_gid_table->gids[real_index].ctx = NULL;
+ free_gid_entry(&port_gid_table->gids[real_index]);
hw_update = 1;
}
}
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index dfa17bcdcdbc..b0121c90c561 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -367,7 +367,7 @@ end:
return block_shift;
}
-static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start,
+static struct ib_umem *mlx4_get_umem_mr(struct ib_device *device, u64 start,
u64 length, int access_flags)
{
/*
@@ -398,7 +398,7 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start,
up_read(&current->mm->mmap_sem);
}
- return ib_umem_get(udata, start, length, access_flags);
+ return ib_umem_get(device, start, length, access_flags);
}
struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
@@ -415,7 +415,7 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr)
return ERR_PTR(-ENOMEM);
- mr->umem = mlx4_get_umem_mr(udata, start, length, access_flags);
+ mr->umem = mlx4_get_umem_mr(pd->device, start, length, access_flags);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
goto err_free;
@@ -504,7 +504,7 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
ib_umem_release(mmr->umem);
- mmr->umem = mlx4_get_umem_mr(udata, start, length,
+ mmr->umem = mlx4_get_umem_mr(mr->device, start, length,
mr_access_flags);
if (IS_ERR(mmr->umem)) {
err = PTR_ERR(mmr->umem);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 85f57b76e446..26425dd2d960 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -849,7 +849,7 @@ static void mlx4_ib_release_wqn(struct mlx4_ib_ucontext *context,
* reused for further WQN allocations.
* The next created WQ will allocate a new range.
*/
- range->dirty = 1;
+ range->dirty = true;
}
mutex_unlock(&context->wqn_ranges_mutex);
@@ -916,7 +916,7 @@ static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
(qp->sq.wqe_cnt << qp->sq.wqe_shift);
- qp->umem = ib_umem_get(udata, wq.buf_addr, qp->buf_size, 0);
+ qp->umem = ib_umem_get(pd->device, wq.buf_addr, qp->buf_size, 0);
if (IS_ERR(qp->umem)) {
err = PTR_ERR(qp->umem);
goto err;
@@ -1110,7 +1110,8 @@ static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
if (err)
goto err;
- qp->umem = ib_umem_get(udata, ucmd.buf_addr, qp->buf_size, 0);
+ qp->umem =
+ ib_umem_get(pd->device, ucmd.buf_addr, qp->buf_size, 0);
if (IS_ERR(qp->umem)) {
err = PTR_ERR(qp->umem);
goto err;
@@ -3084,7 +3085,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, const struct ib_ud_wr *wr,
}
if (ah->av.eth.vlan != cpu_to_be16(0xffff)) {
vlan = be16_to_cpu(ah->av.eth.vlan) & 0x0fff;
- is_vlan = 1;
+ is_vlan = true;
}
}
err = ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh,
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index 8dcf6e3d9ae2..8f9d5035142d 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -110,7 +110,8 @@ int mlx4_ib_create_srq(struct ib_srq *ib_srq,
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
return -EFAULT;
- srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0);
+ srq->umem =
+ ib_umem_get(ib_srq->device, ucmd.buf_addr, buf_size, 0);
if (IS_ERR(srq->umem))
return PTR_ERR(srq->umem);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index dd8d24ee8e1d..367a71bc5f4b 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -708,8 +708,8 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
*cqe_size = ucmd.cqe_size;
cq->buf.umem =
- ib_umem_get(udata, ucmd.buf_addr, entries * ucmd.cqe_size,
- IB_ACCESS_LOCAL_WRITE);
+ ib_umem_get(&dev->ib_dev, ucmd.buf_addr,
+ entries * ucmd.cqe_size, IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(cq->buf.umem)) {
err = PTR_ERR(cq->buf.umem);
return err;
@@ -1108,7 +1108,7 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
return -EINVAL;
- umem = ib_umem_get(udata, ucmd.buf_addr,
+ umem = ib_umem_get(&dev->ib_dev, ucmd.buf_addr,
(size_t)ucmd.cqe_size * entries,
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem)) {
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 9d0a18cf9e5e..46e1ab771f10 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -30,7 +30,7 @@ enum devx_obj_flags {
struct devx_async_data {
struct mlx5_ib_dev *mdev;
struct list_head list;
- struct ib_uobject *fd_uobj;
+ struct devx_async_cmd_event_file *ev_file;
struct mlx5_async_work cb_work;
u16 cmd_out_len;
/* must be last field in this structure */
@@ -72,7 +72,6 @@ struct devx_event_subscription {
struct rcu_head rcu;
u64 cookie;
struct devx_async_event_file *ev_file;
- struct file *filp; /* Upon hot unplug we need a direct access to */
struct eventfd_ctx *eventfd;
};
@@ -1674,21 +1673,20 @@ static void devx_query_callback(int status, struct mlx5_async_work *context)
{
struct devx_async_data *async_data =
container_of(context, struct devx_async_data, cb_work);
- struct ib_uobject *fd_uobj = async_data->fd_uobj;
- struct devx_async_cmd_event_file *ev_file;
- struct devx_async_event_queue *ev_queue;
+ struct devx_async_cmd_event_file *ev_file = async_data->ev_file;
+ struct devx_async_event_queue *ev_queue = &ev_file->ev_queue;
unsigned long flags;
- ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
- uobj);
- ev_queue = &ev_file->ev_queue;
-
+ /*
+ * Note that if the struct devx_async_cmd_event_file uobj begins to be
+ * destroyed it will block at mlx5_cmd_cleanup_async_ctx() until this
+ * routine returns, ensuring that it always remains valid here.
+ */
spin_lock_irqsave(&ev_queue->lock, flags);
list_add_tail(&async_data->list, &ev_queue->event_list);
spin_unlock_irqrestore(&ev_queue->lock, flags);
wake_up_interruptible(&ev_queue->poll_wait);
- fput(fd_uobj->object);
}
#define MAX_ASYNC_BYTES_IN_USE (1024 * 1024) /* 1MB */
@@ -1757,9 +1755,8 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)(
async_data->cmd_out_len = cmd_out_len;
async_data->mdev = mdev;
- async_data->fd_uobj = fd_uobj;
+ async_data->ev_file = ev_file;
- get_file(fd_uobj->object);
MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
err = mlx5_cmd_exec_cb(&ev_file->async_ctx, cmd_in,
uverbs_attr_get_len(attrs,
@@ -1769,12 +1766,10 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)(
devx_query_callback, &async_data->cb_work);
if (err)
- goto cb_err;
+ goto free_async;
return 0;
-cb_err:
- fput(fd_uobj->object);
free_async:
kvfree(async_data);
sub_bytes:
@@ -2032,6 +2027,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
goto err;
list_add_tail(&event_sub->event_list, &sub_list);
+ uverbs_uobject_get(&ev_file->uobj);
if (use_eventfd) {
event_sub->eventfd =
eventfd_ctx_fdget(redirect_fd);
@@ -2045,7 +2041,6 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
event_sub->cookie = cookie;
event_sub->ev_file = ev_file;
- event_sub->filp = fd_uobj->object;
/* May be needed upon cleanup the devx object/subscription */
event_sub->xa_key_level1 = key_level1;
event_sub->xa_key_level2 = obj_id;
@@ -2099,7 +2094,7 @@ err:
if (event_sub->eventfd)
eventfd_ctx_put(event_sub->eventfd);
-
+ uverbs_uobject_put(&event_sub->ev_file->uobj);
kfree(event_sub);
}
@@ -2134,7 +2129,7 @@ static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
if (err)
return err;
- obj->umem = ib_umem_get(&attrs->driver_udata, addr, size, access);
+ obj->umem = ib_umem_get(&dev->ib_dev, addr, size, access);
if (IS_ERR(obj->umem))
return PTR_ERR(obj->umem);
@@ -2324,7 +2319,8 @@ static int deliver_event(struct devx_event_subscription *event_sub,
if (ev_file->omit_data) {
spin_lock_irqsave(&ev_file->lock, flags);
- if (!list_empty(&event_sub->event_list)) {
+ if (!list_empty(&event_sub->event_list) ||
+ ev_file->is_destroyed) {
spin_unlock_irqrestore(&ev_file->lock, flags);
return 0;
}
@@ -2348,7 +2344,10 @@ static int deliver_event(struct devx_event_subscription *event_sub,
memcpy(event_data->hdr.out_data, data, sizeof(struct mlx5_eqe));
spin_lock_irqsave(&ev_file->lock, flags);
- list_add_tail(&event_data->list, &ev_file->event_list);
+ if (!ev_file->is_destroyed)
+ list_add_tail(&event_data->list, &ev_file->event_list);
+ else
+ kfree(event_data);
spin_unlock_irqrestore(&ev_file->lock, flags);
wake_up_interruptible(&ev_file->poll_wait);
@@ -2361,17 +2360,10 @@ static void dispatch_event_fd(struct list_head *fd_list,
struct devx_event_subscription *item;
list_for_each_entry_rcu(item, fd_list, xa_list) {
- if (!get_file_rcu(item->filp))
- continue;
-
- if (item->eventfd) {
+ if (item->eventfd)
eventfd_signal(item->eventfd, 1);
- fput(item->filp);
- continue;
- }
-
- deliver_event(item, data);
- fput(item->filp);
+ else
+ deliver_event(item, data);
}
}
@@ -2479,11 +2471,11 @@ static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
return -ERESTARTSYS;
}
- if (list_empty(&ev_queue->event_list) &&
- ev_queue->is_destroyed)
- return -EIO;
-
spin_lock_irq(&ev_queue->lock);
+ if (ev_queue->is_destroyed) {
+ spin_unlock_irq(&ev_queue->lock);
+ return -EIO;
+ }
}
event = list_entry(ev_queue->event_list.next,
@@ -2509,23 +2501,6 @@ static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
return ret;
}
-static int devx_async_cmd_event_close(struct inode *inode, struct file *filp)
-{
- struct ib_uobject *uobj = filp->private_data;
- struct devx_async_cmd_event_file *comp_ev_file = container_of(
- uobj, struct devx_async_cmd_event_file, uobj);
- struct devx_async_data *entry, *tmp;
-
- spin_lock_irq(&comp_ev_file->ev_queue.lock);
- list_for_each_entry_safe(entry, tmp,
- &comp_ev_file->ev_queue.event_list, list)
- kvfree(entry);
- spin_unlock_irq(&comp_ev_file->ev_queue.lock);
-
- uverbs_close_fd(filp);
- return 0;
-}
-
static __poll_t devx_async_cmd_event_poll(struct file *filp,
struct poll_table_struct *wait)
{
@@ -2549,7 +2524,7 @@ static const struct file_operations devx_async_cmd_event_fops = {
.owner = THIS_MODULE,
.read = devx_async_cmd_event_read,
.poll = devx_async_cmd_event_poll,
- .release = devx_async_cmd_event_close,
+ .release = uverbs_uobject_fd_release,
.llseek = no_llseek,
};
@@ -2574,10 +2549,6 @@ static ssize_t devx_async_event_read(struct file *filp, char __user *buf,
return -EOVERFLOW;
}
- if (ev_file->is_destroyed) {
- spin_unlock_irq(&ev_file->lock);
- return -EIO;
- }
while (list_empty(&ev_file->event_list)) {
spin_unlock_irq(&ev_file->lock);
@@ -2653,81 +2624,96 @@ static __poll_t devx_async_event_poll(struct file *filp,
return pollflags;
}
-static int devx_async_event_close(struct inode *inode, struct file *filp)
+static void devx_free_subscription(struct rcu_head *rcu)
{
- struct devx_async_event_file *ev_file = filp->private_data;
- struct devx_event_subscription *event_sub, *event_sub_tmp;
- struct devx_async_event_data *entry, *tmp;
- struct mlx5_ib_dev *dev = ev_file->dev;
-
- mutex_lock(&dev->devx_event_table.event_xa_lock);
- /* delete the subscriptions which are related to this FD */
- list_for_each_entry_safe(event_sub, event_sub_tmp,
- &ev_file->subscribed_events_list, file_list) {
- devx_cleanup_subscription(dev, event_sub);
- if (event_sub->eventfd)
- eventfd_ctx_put(event_sub->eventfd);
-
- list_del_rcu(&event_sub->file_list);
- /* subscription may not be used by the read API any more */
- kfree_rcu(event_sub, rcu);
- }
-
- mutex_unlock(&dev->devx_event_table.event_xa_lock);
-
- /* free the pending events allocation */
- if (!ev_file->omit_data) {
- spin_lock_irq(&ev_file->lock);
- list_for_each_entry_safe(entry, tmp,
- &ev_file->event_list, list)
- kfree(entry); /* read can't come any more */
- spin_unlock_irq(&ev_file->lock);
- }
+ struct devx_event_subscription *event_sub =
+ container_of(rcu, struct devx_event_subscription, rcu);
- uverbs_close_fd(filp);
- put_device(&dev->ib_dev.dev);
- return 0;
+ if (event_sub->eventfd)
+ eventfd_ctx_put(event_sub->eventfd);
+ uverbs_uobject_put(&event_sub->ev_file->uobj);
+ kfree(event_sub);
}
static const struct file_operations devx_async_event_fops = {
.owner = THIS_MODULE,
.read = devx_async_event_read,
.poll = devx_async_event_poll,
- .release = devx_async_event_close,
+ .release = uverbs_uobject_fd_release,
.llseek = no_llseek,
};
-static int devx_hot_unplug_async_cmd_event_file(struct ib_uobject *uobj,
- enum rdma_remove_reason why)
+static int devx_async_cmd_event_destroy_uobj(struct ib_uobject *uobj,
+ enum rdma_remove_reason why)
{
struct devx_async_cmd_event_file *comp_ev_file =
container_of(uobj, struct devx_async_cmd_event_file,
uobj);
struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
+ struct devx_async_data *entry, *tmp;
spin_lock_irq(&ev_queue->lock);
ev_queue->is_destroyed = 1;
spin_unlock_irq(&ev_queue->lock);
-
- if (why == RDMA_REMOVE_DRIVER_REMOVE)
- wake_up_interruptible(&ev_queue->poll_wait);
+ wake_up_interruptible(&ev_queue->poll_wait);
mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx);
+
+ spin_lock_irq(&comp_ev_file->ev_queue.lock);
+ list_for_each_entry_safe(entry, tmp,
+ &comp_ev_file->ev_queue.event_list, list) {
+ list_del(&entry->list);
+ kvfree(entry);
+ }
+ spin_unlock_irq(&comp_ev_file->ev_queue.lock);
return 0;
};
-static int devx_hot_unplug_async_event_file(struct ib_uobject *uobj,
- enum rdma_remove_reason why)
+static int devx_async_event_destroy_uobj(struct ib_uobject *uobj,
+ enum rdma_remove_reason why)
{
struct devx_async_event_file *ev_file =
container_of(uobj, struct devx_async_event_file,
uobj);
+ struct devx_event_subscription *event_sub, *event_sub_tmp;
+ struct mlx5_ib_dev *dev = ev_file->dev;
spin_lock_irq(&ev_file->lock);
ev_file->is_destroyed = 1;
- spin_unlock_irq(&ev_file->lock);
+ /* free the pending events allocation */
+ if (ev_file->omit_data) {
+ struct devx_event_subscription *event_sub, *tmp;
+
+ list_for_each_entry_safe(event_sub, tmp, &ev_file->event_list,
+ event_list)
+ list_del_init(&event_sub->event_list);
+
+ } else {
+ struct devx_async_event_data *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &ev_file->event_list,
+ list) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ }
+
+ spin_unlock_irq(&ev_file->lock);
wake_up_interruptible(&ev_file->poll_wait);
+
+ mutex_lock(&dev->devx_event_table.event_xa_lock);
+ /* delete the subscriptions which are related to this FD */
+ list_for_each_entry_safe(event_sub, event_sub_tmp,
+ &ev_file->subscribed_events_list, file_list) {
+ devx_cleanup_subscription(dev, event_sub);
+ list_del_rcu(&event_sub->file_list);
+ /* subscription may not be used by the read API any more */
+ call_rcu(&event_sub->rcu, devx_free_subscription);
+ }
+ mutex_unlock(&dev->devx_event_table.event_xa_lock);
+
+ put_device(&dev->ib_dev.dev);
return 0;
};
@@ -2913,7 +2899,7 @@ DECLARE_UVERBS_NAMED_METHOD(
DECLARE_UVERBS_NAMED_OBJECT(
MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_cmd_event_file),
- devx_hot_unplug_async_cmd_event_file,
+ devx_async_cmd_event_destroy_uobj,
&devx_async_cmd_event_fops, "[devx_async_cmd]",
O_RDONLY),
&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC));
@@ -2931,7 +2917,7 @@ DECLARE_UVERBS_NAMED_METHOD(
DECLARE_UVERBS_NAMED_OBJECT(
MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_event_file),
- devx_hot_unplug_async_event_file,
+ devx_async_event_destroy_uobj,
&devx_async_event_fops, "[devx_async_event]",
O_RDONLY),
&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC));
diff --git a/drivers/infiniband/hw/mlx5/doorbell.c b/drivers/infiniband/hw/mlx5/doorbell.c
index 12737c509aa2..61475b571531 100644
--- a/drivers/infiniband/hw/mlx5/doorbell.c
+++ b/drivers/infiniband/hw/mlx5/doorbell.c
@@ -64,7 +64,8 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
page->user_virt = (virt & PAGE_MASK);
page->refcnt = 0;
- page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0);
+ page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK,
+ PAGE_SIZE, 0);
if (IS_ERR(page->umem)) {
err = PTR_ERR(page->umem);
kfree(page);
diff --git a/drivers/infiniband/hw/mlx5/gsi.c b/drivers/infiniband/hw/mlx5/gsi.c
index ac4d8d1b9a07..1ae6fd95acaa 100644
--- a/drivers/infiniband/hw/mlx5/gsi.c
+++ b/drivers/infiniband/hw/mlx5/gsi.c
@@ -507,8 +507,7 @@ int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
ret = ib_post_send(tx_qp, &cur_wr.wr, bad_wr);
if (ret) {
/* Undo the effect of adding the outstanding wr */
- gsi->outstanding_pi = (gsi->outstanding_pi - 1) %
- gsi->cap.max_send_wr;
+ gsi->outstanding_pi--;
goto err;
}
spin_unlock_irqrestore(&gsi->lock, flags);
diff --git a/drivers/infiniband/hw/mlx5/ib_virt.c b/drivers/infiniband/hw/mlx5/ib_virt.c
index 4f0edd4832bd..b61165359954 100644
--- a/drivers/infiniband/hw/mlx5/ib_virt.c
+++ b/drivers/infiniband/hw/mlx5/ib_virt.c
@@ -164,8 +164,10 @@ static int set_vf_node_guid(struct ib_device *device, int vf, u8 port, u64 guid)
in->field_select = MLX5_HCA_VPORT_SEL_NODE_GUID;
in->node_guid = guid;
err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
- if (!err)
+ if (!err) {
vfs_ctx[vf].node_guid = guid;
+ vfs_ctx[vf].node_guid_valid = 1;
+ }
kfree(in);
return err;
}
@@ -185,8 +187,10 @@ static int set_vf_port_guid(struct ib_device *device, int vf, u8 port, u64 guid)
in->field_select = MLX5_HCA_VPORT_SEL_PORT_GUID;
in->port_guid = guid;
err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
- if (!err)
+ if (!err) {
vfs_ctx[vf].port_guid = guid;
+ vfs_ctx[vf].port_guid_valid = 1;
+ }
kfree(in);
return err;
}
@@ -208,20 +212,12 @@ int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
{
struct mlx5_ib_dev *dev = to_mdev(device);
struct mlx5_core_dev *mdev = dev->mdev;
- struct mlx5_hca_vport_context *rep;
- int err;
-
- rep = kzalloc(sizeof(*rep), GFP_KERNEL);
- if (!rep)
- return -ENOMEM;
+ struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx;
- err = mlx5_query_hca_vport_context(mdev, 1, 1, vf+1, rep);
- if (err)
- goto ex;
+ node_guid->guid =
+ vfs_ctx[vf].node_guid_valid ? vfs_ctx[vf].node_guid : 0;
+ port_guid->guid =
+ vfs_ctx[vf].port_guid_valid ? vfs_ctx[vf].port_guid : 0;
- port_guid->guid = rep->port_guid;
- node_guid->guid = rep->node_guid;
-ex:
- kfree(rep);
- return err;
+ return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 997cbfe4b90c..e4bcfa81b70a 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -40,7 +40,7 @@
#include <linux/slab.h>
#include <linux/bitmap.h>
#if defined(CONFIG_X86)
-#include <asm/pat.h>
+#include <asm/memtype.h>
#endif
#include <linux/sched.h>
#include <linux/sched/mm.h>
@@ -815,6 +815,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
struct ib_device_attr *props,
struct ib_udata *uhw)
{
+ size_t uhw_outlen = (uhw) ? uhw->outlen : 0;
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_core_dev *mdev = dev->mdev;
int err = -ENOMEM;
@@ -828,12 +829,12 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
u64 max_tso;
resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
- if (uhw->outlen && uhw->outlen < resp_len)
+ if (uhw_outlen && uhw_outlen < resp_len)
return -EINVAL;
resp.response_length = resp_len;
- if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
+ if (uhw && uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
return -EINVAL;
memset(props, 0, sizeof(*props));
@@ -897,7 +898,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->raw_packet_caps |=
IB_RAW_PACKET_CAP_CVLAN_STRIPPING;
- if (field_avail(typeof(resp), tso_caps, uhw->outlen)) {
+ if (field_avail(typeof(resp), tso_caps, uhw_outlen)) {
max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
if (max_tso) {
resp.tso_caps.max_tso = 1 << max_tso;
@@ -907,7 +908,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
}
- if (field_avail(typeof(resp), rss_caps, uhw->outlen)) {
+ if (field_avail(typeof(resp), rss_caps, uhw_outlen)) {
resp.rss_caps.rx_hash_function =
MLX5_RX_HASH_FUNC_TOEPLITZ;
resp.rss_caps.rx_hash_fields_mask =
@@ -927,9 +928,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
resp.response_length += sizeof(resp.rss_caps);
}
} else {
- if (field_avail(typeof(resp), tso_caps, uhw->outlen))
+ if (field_avail(typeof(resp), tso_caps, uhw_outlen))
resp.response_length += sizeof(resp.tso_caps);
- if (field_avail(typeof(resp), rss_caps, uhw->outlen))
+ if (field_avail(typeof(resp), rss_caps, uhw_outlen))
resp.response_length += sizeof(resp.rss_caps);
}
@@ -1014,6 +1015,23 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
props->odp_caps = dev->odp_caps;
+ if (!uhw) {
+ /* ODP for kernel QPs is not implemented for receive
+ * WQEs and SRQ WQEs
+ */
+ props->odp_caps.per_transport_caps.rc_odp_caps &=
+ ~(IB_ODP_SUPPORT_READ |
+ IB_ODP_SUPPORT_SRQ_RECV);
+ props->odp_caps.per_transport_caps.uc_odp_caps &=
+ ~(IB_ODP_SUPPORT_READ |
+ IB_ODP_SUPPORT_SRQ_RECV);
+ props->odp_caps.per_transport_caps.ud_odp_caps &=
+ ~(IB_ODP_SUPPORT_READ |
+ IB_ODP_SUPPORT_SRQ_RECV);
+ props->odp_caps.per_transport_caps.xrc_odp_caps &=
+ ~(IB_ODP_SUPPORT_READ |
+ IB_ODP_SUPPORT_SRQ_RECV);
+ }
}
if (MLX5_CAP_GEN(mdev, cd))
@@ -1054,7 +1072,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
MLX5_MAX_CQ_PERIOD;
}
- if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
+ if (field_avail(typeof(resp), cqe_comp_caps, uhw_outlen)) {
resp.response_length += sizeof(resp.cqe_comp_caps);
if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) {
@@ -1072,7 +1090,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
}
- if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen) &&
+ if (field_avail(typeof(resp), packet_pacing_caps, uhw_outlen) &&
raw_support) {
if (MLX5_CAP_QOS(mdev, packet_pacing) &&
MLX5_CAP_GEN(mdev, qos)) {
@@ -1091,7 +1109,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
- uhw->outlen)) {
+ uhw_outlen)) {
if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe))
resp.mlx5_ib_support_multi_pkt_send_wqes =
MLX5_IB_ALLOW_MPW;
@@ -1104,7 +1122,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
}
- if (field_avail(typeof(resp), flags, uhw->outlen)) {
+ if (field_avail(typeof(resp), flags, uhw_outlen)) {
resp.response_length += sizeof(resp.flags);
if (MLX5_CAP_GEN(mdev, cqe_compression_128))
@@ -1120,8 +1138,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT;
}
- if (field_avail(typeof(resp), sw_parsing_caps,
- uhw->outlen)) {
+ if (field_avail(typeof(resp), sw_parsing_caps, uhw_outlen)) {
resp.response_length += sizeof(resp.sw_parsing_caps);
if (MLX5_CAP_ETH(mdev, swp)) {
resp.sw_parsing_caps.sw_parsing_offloads |=
@@ -1141,7 +1158,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
}
- if (field_avail(typeof(resp), striding_rq_caps, uhw->outlen) &&
+ if (field_avail(typeof(resp), striding_rq_caps, uhw_outlen) &&
raw_support) {
resp.response_length += sizeof(resp.striding_rq_caps);
if (MLX5_CAP_GEN(mdev, striding_rq)) {
@@ -1164,8 +1181,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
}
- if (field_avail(typeof(resp), tunnel_offloads_caps,
- uhw->outlen)) {
+ if (field_avail(typeof(resp), tunnel_offloads_caps, uhw_outlen)) {
resp.response_length += sizeof(resp.tunnel_offloads_caps);
if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan))
resp.tunnel_offloads_caps |=
@@ -1186,7 +1202,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
}
- if (uhw->outlen) {
+ if (uhw_outlen) {
err = ib_copy_to_udata(uhw, &resp, resp.response_length);
if (err)
@@ -2078,6 +2094,7 @@ static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry)
{
struct mlx5_user_mmap_entry *mentry = to_mmmap(entry);
struct mlx5_ib_dev *dev = to_mdev(entry->ucontext->device);
+ struct mlx5_var_table *var_table = &dev->var_table;
struct mlx5_ib_dm *mdm;
switch (mentry->mmap_flag) {
@@ -2087,6 +2104,12 @@ static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry)
mdm->size);
kfree(mdm);
break;
+ case MLX5_IB_MMAP_TYPE_VAR:
+ mutex_lock(&var_table->bitmap_lock);
+ clear_bit(mentry->page_idx, var_table->bitmap);
+ mutex_unlock(&var_table->bitmap_lock);
+ kfree(mentry);
+ break;
default:
WARN_ON(true);
}
@@ -2246,7 +2269,10 @@ static int mlx5_ib_mmap_offset(struct mlx5_ib_dev *dev,
mentry = to_mmmap(entry);
pfn = (mentry->address >> PAGE_SHIFT);
- prot = pgprot_writecombine(vma->vm_page_prot);
+ if (mentry->mmap_flag == MLX5_IB_MMAP_TYPE_VAR)
+ prot = pgprot_noncached(vma->vm_page_prot);
+ else
+ prot = pgprot_writecombine(vma->vm_page_prot);
ret = rdma_user_mmap_io(ucontext, vma, pfn,
entry->npages * PAGE_SIZE,
prot,
@@ -2255,6 +2281,15 @@ static int mlx5_ib_mmap_offset(struct mlx5_ib_dev *dev,
return ret;
}
+static u64 mlx5_entry_to_mmap_offset(struct mlx5_user_mmap_entry *entry)
+{
+ u64 cmd = (entry->rdma_entry.start_pgoff >> 16) & 0xFFFF;
+ u64 index = entry->rdma_entry.start_pgoff & 0xFFFF;
+
+ return (((index >> 8) << 16) | (cmd << MLX5_IB_MMAP_CMD_SHIFT) |
+ (index & 0xFF)) << PAGE_SHIFT;
+}
+
static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
{
struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
@@ -3276,12 +3311,14 @@ static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns,
int num_entries, int num_groups,
u32 flags)
{
+ struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_table *ft;
- ft = mlx5_create_auto_grouped_flow_table(ns, priority,
- num_entries,
- num_groups,
- 0, flags);
+ ft_attr.prio = priority;
+ ft_attr.max_fte = num_entries;
+ ft_attr.flags = flags;
+ ft_attr.autogroup.max_num_groups = num_groups;
+ ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(ft))
return ERR_CAST(ft);
@@ -4771,7 +4808,6 @@ static int __get_port_caps(struct mlx5_ib_dev *dev, u8 port)
struct ib_device_attr *dprops = NULL;
struct ib_port_attr *pprops = NULL;
int err = -ENOMEM;
- struct ib_udata uhw = {.inlen = 0, .outlen = 0};
pprops = kzalloc(sizeof(*pprops), GFP_KERNEL);
if (!pprops)
@@ -4781,7 +4817,7 @@ static int __get_port_caps(struct mlx5_ib_dev *dev, u8 port)
if (!dprops)
goto out;
- err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw);
+ err = mlx5_ib_query_device(&dev->ib_dev, dprops, NULL);
if (err) {
mlx5_ib_warn(dev, "query_device failed %d\n", err);
goto out;
@@ -5351,6 +5387,14 @@ static const struct mlx5_ib_counter extended_err_cnts[] = {
INIT_Q_COUNTER(req_cqe_flush_error),
};
+static const struct mlx5_ib_counter roce_accl_cnts[] = {
+ INIT_Q_COUNTER(roce_adp_retrans),
+ INIT_Q_COUNTER(roce_adp_retrans_to),
+ INIT_Q_COUNTER(roce_slow_restart),
+ INIT_Q_COUNTER(roce_slow_restart_cnps),
+ INIT_Q_COUNTER(roce_slow_restart_trans),
+};
+
#define INIT_EXT_PPCNT_COUNTER(_name) \
{ .name = #_name, .offset = \
MLX5_BYTE_OFF(ppcnt_reg, \
@@ -5399,6 +5443,9 @@ static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters))
num_counters += ARRAY_SIZE(extended_err_cnts);
+ if (MLX5_CAP_GEN(dev->mdev, roce_accl))
+ num_counters += ARRAY_SIZE(roce_accl_cnts);
+
cnts->num_q_counters = num_counters;
if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
@@ -5459,6 +5506,13 @@ static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
}
}
+ if (MLX5_CAP_GEN(dev->mdev, roce_accl)) {
+ for (i = 0; i < ARRAY_SIZE(roce_accl_cnts); i++, j++) {
+ names[j] = roce_accl_cnts[i].name;
+ offsets[j] = roce_accl_cnts[i].offset;
+ }
+ }
+
if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
for (i = 0; i < ARRAY_SIZE(cong_cnts); i++, j++) {
names[j] = cong_cnts[i].name;
@@ -6034,6 +6088,145 @@ static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
mlx5_nic_vport_disable_roce(dev->mdev);
}
+static int var_obj_cleanup(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_user_mmap_entry *obj = uobject->object;
+
+ rdma_user_mmap_entry_remove(&obj->rdma_entry);
+ return 0;
+}
+
+static struct mlx5_user_mmap_entry *
+alloc_var_entry(struct mlx5_ib_ucontext *c)
+{
+ struct mlx5_user_mmap_entry *entry;
+ struct mlx5_var_table *var_table;
+ u32 page_idx;
+ int err;
+
+ var_table = &to_mdev(c->ibucontext.device)->var_table;
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&var_table->bitmap_lock);
+ page_idx = find_first_zero_bit(var_table->bitmap,
+ var_table->num_var_hw_entries);
+ if (page_idx >= var_table->num_var_hw_entries) {
+ err = -ENOSPC;
+ mutex_unlock(&var_table->bitmap_lock);
+ goto end;
+ }
+
+ set_bit(page_idx, var_table->bitmap);
+ mutex_unlock(&var_table->bitmap_lock);
+
+ entry->address = var_table->hw_start_addr +
+ (page_idx * var_table->stride_size);
+ entry->page_idx = page_idx;
+ entry->mmap_flag = MLX5_IB_MMAP_TYPE_VAR;
+
+ err = rdma_user_mmap_entry_insert_range(
+ &c->ibucontext, &entry->rdma_entry, var_table->stride_size,
+ MLX5_IB_MMAP_OFFSET_START << 16,
+ (MLX5_IB_MMAP_OFFSET_END << 16) + (1UL << 16) - 1);
+ if (err)
+ goto err_insert;
+
+ return entry;
+
+err_insert:
+ mutex_lock(&var_table->bitmap_lock);
+ clear_bit(page_idx, var_table->bitmap);
+ mutex_unlock(&var_table->bitmap_lock);
+end:
+ kfree(entry);
+ return ERR_PTR(err);
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_VAR_OBJ_ALLOC)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE);
+ struct mlx5_ib_ucontext *c;
+ struct mlx5_user_mmap_entry *entry;
+ u64 mmap_offset;
+ u32 length;
+ int err;
+
+ c = to_mucontext(ib_uverbs_get_ucontext(attrs));
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+
+ entry = alloc_var_entry(c);
+ if (IS_ERR(entry))
+ return PTR_ERR(entry);
+
+ mmap_offset = mlx5_entry_to_mmap_offset(entry);
+ length = entry->rdma_entry.npages * PAGE_SIZE;
+ uobj->object = entry;
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_OFFSET,
+ &mmap_offset, sizeof(mmap_offset));
+ if (err)
+ goto err;
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_PAGE_ID,
+ &entry->page_idx, sizeof(entry->page_idx));
+ if (err)
+ goto err;
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_LENGTH,
+ &length, sizeof(length));
+ if (err)
+ goto err;
+
+ return 0;
+
+err:
+ rdma_user_mmap_entry_remove(&entry->rdma_entry);
+ return err;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_VAR_OBJ_ALLOC,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE,
+ MLX5_IB_OBJECT_VAR,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_PAGE_ID,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_LENGTH,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_OFFSET,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_VAR_OBJ_DESTROY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_VAR_OBJ_DESTROY_HANDLE,
+ MLX5_IB_OBJECT_VAR,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_VAR,
+ UVERBS_TYPE_ALLOC_IDR(var_obj_cleanup),
+ &UVERBS_METHOD(MLX5_IB_METHOD_VAR_OBJ_ALLOC),
+ &UVERBS_METHOD(MLX5_IB_METHOD_VAR_OBJ_DESTROY));
+
+static bool var_is_supported(struct ib_device *device)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+
+ return (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q);
+}
+
ADD_UVERBS_ATTRIBUTES_SIMPLE(
mlx5_ib_dm,
UVERBS_OBJECT_DM,
@@ -6056,14 +6249,14 @@ ADD_UVERBS_ATTRIBUTES_SIMPLE(
enum mlx5_ib_uapi_flow_action_flags));
static const struct uapi_definition mlx5_ib_defs[] = {
-#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
UAPI_DEF_CHAIN(mlx5_ib_devx_defs),
UAPI_DEF_CHAIN(mlx5_ib_flow_defs),
-#endif
UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
&mlx5_ib_flow_action),
UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR,
+ UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)),
{}
};
@@ -6335,6 +6528,35 @@ static const struct ib_device_ops mlx5_ib_dev_dm_ops = {
.reg_dm_mr = mlx5_ib_reg_dm_mr,
};
+static int mlx5_ib_init_var_table(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_var_table *var_table = &dev->var_table;
+ u8 log_doorbell_bar_size;
+ u8 log_doorbell_stride;
+ u64 bar_size;
+
+ log_doorbell_bar_size = MLX5_CAP_DEV_VDPA_EMULATION(mdev,
+ log_doorbell_bar_size);
+ log_doorbell_stride = MLX5_CAP_DEV_VDPA_EMULATION(mdev,
+ log_doorbell_stride);
+ var_table->hw_start_addr = dev->mdev->bar_addr +
+ MLX5_CAP64_DEV_VDPA_EMULATION(mdev,
+ doorbell_bar_offset);
+ bar_size = (1ULL << log_doorbell_bar_size) * 4096;
+ var_table->stride_size = 1ULL << log_doorbell_stride;
+ var_table->num_var_hw_entries = div64_u64(bar_size, var_table->stride_size);
+ mutex_init(&var_table->bitmap_lock);
+ var_table->bitmap = bitmap_zalloc(var_table->num_var_hw_entries,
+ GFP_KERNEL);
+ return (var_table->bitmap) ? 0 : -ENOMEM;
+}
+
+static void mlx5_ib_stage_caps_cleanup(struct mlx5_ib_dev *dev)
+{
+ bitmap_free(dev->var_table.bitmap);
+}
+
static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
{
struct mlx5_core_dev *mdev = dev->mdev;
@@ -6422,6 +6644,13 @@ static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
mutex_init(&dev->lb.mutex);
+ if (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q) {
+ err = mlx5_ib_init_var_table(dev);
+ if (err)
+ return err;
+ }
+
dev->ib_dev.use_cq_dim = true;
return 0;
@@ -6725,6 +6954,8 @@ void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
const struct mlx5_ib_profile *profile,
int stage)
{
+ dev->ib_active = false;
+
/* Number of stages to cleanup */
while (stage) {
stage--;
@@ -6770,7 +7001,7 @@ static const struct mlx5_ib_profile pf_profile = {
mlx5_ib_stage_flow_db_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_CAPS,
mlx5_ib_stage_caps_init,
- NULL),
+ mlx5_ib_stage_caps_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
mlx5_ib_stage_non_default_cb,
NULL),
@@ -6827,7 +7058,7 @@ const struct mlx5_ib_profile raw_eth_profile = {
mlx5_ib_stage_flow_db_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_CAPS,
mlx5_ib_stage_caps_init,
- NULL),
+ mlx5_ib_stage_caps_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
mlx5_ib_stage_raw_eth_non_default_cb,
NULL),
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index 048f4e974a61..b90a3649e7d1 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -101,18 +101,6 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
*count = i;
}
-static u64 umem_dma_to_mtt(dma_addr_t umem_dma)
-{
- u64 mtt_entry = umem_dma & ODP_DMA_ADDR_MASK;
-
- if (umem_dma & ODP_READ_ALLOWED_BIT)
- mtt_entry |= MLX5_IB_MTT_READ;
- if (umem_dma & ODP_WRITE_ALLOWED_BIT)
- mtt_entry |= MLX5_IB_MTT_WRITE;
-
- return mtt_entry;
-}
-
/*
* Populate the given array with bus addresses from the umem.
*
@@ -139,19 +127,6 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
struct scatterlist *sg;
int entry;
- if (umem->is_odp) {
- WARN_ON(shift != 0);
- WARN_ON(access_flags != (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE));
-
- for (i = 0; i < num_pages; ++i) {
- dma_addr_t pa =
- to_ib_umem_odp(umem)->dma_list[offset + i];
-
- pas[i] = cpu_to_be64(umem_dma_to_mtt(pa));
- }
- return;
- }
-
i = 0;
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
len = sg_dma_len(sg) >> PAGE_SHIFT;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index b06f32ff5748..d9bffcc93587 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -72,6 +72,11 @@
#define MLX5_MKEY_PAGE_SHIFT_MASK __mlx5_mask(mkc, log_page_size)
enum {
+ MLX5_IB_MMAP_OFFSET_START = 9,
+ MLX5_IB_MMAP_OFFSET_END = 255,
+};
+
+enum {
MLX5_IB_MMAP_CMD_SHIFT = 8,
MLX5_IB_MMAP_CMD_MASK = 0xff,
};
@@ -120,6 +125,7 @@ enum {
enum mlx5_ib_mmap_type {
MLX5_IB_MMAP_TYPE_MEMIC = 1,
+ MLX5_IB_MMAP_TYPE_VAR = 2,
};
#define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) \
@@ -563,6 +569,7 @@ struct mlx5_user_mmap_entry {
struct rdma_user_mmap_entry rdma_entry;
u8 mmap_flag;
u64 address;
+ u32 page_idx;
};
struct mlx5_ib_dm {
@@ -959,6 +966,15 @@ struct mlx5_devx_event_table {
struct xarray event_xa;
};
+struct mlx5_var_table {
+ /* serialize updating the bitmap */
+ struct mutex bitmap_lock;
+ unsigned long *bitmap;
+ u64 hw_start_addr;
+ u32 stride_size;
+ u64 num_var_hw_entries;
+};
+
struct mlx5_ib_dev {
struct ib_device ib_dev;
struct mlx5_core_dev *mdev;
@@ -1013,6 +1029,7 @@ struct mlx5_ib_dev {
struct mlx5_srq_table srq_table;
struct mlx5_async_ctx async_ctx;
struct mlx5_devx_event_table devx_event_table;
+ struct mlx5_var_table var_table;
struct xarray sig_mrs;
};
@@ -1153,12 +1170,12 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr);
int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr);
-int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
- int buflen, size_t *bc);
-int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
- int buflen, size_t *bc);
-int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index,
- void *buffer, int buflen, size_t *bc);
+int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
+ size_t buflen, size_t *bc);
+int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
+ size_t buflen, size_t *bc);
+int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
+ size_t buflen, size_t *bc);
int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
@@ -1276,8 +1293,8 @@ void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev);
int __init mlx5_ib_odp_init(void);
void mlx5_ib_odp_cleanup(void);
void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent);
-void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
- size_t nentries, struct mlx5_ib_mr *mr, int flags);
+void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *mr, int flags);
int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
enum ib_uverbs_advise_mr_advice advice,
@@ -1293,9 +1310,8 @@ static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {}
static inline int mlx5_ib_odp_init(void) { return 0; }
static inline void mlx5_ib_odp_cleanup(void) {}
static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {}
-static inline void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
- size_t nentries, struct mlx5_ib_mr *mr,
- int flags) {}
+static inline void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *mr, int flags) {}
static inline int
mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
@@ -1363,14 +1379,14 @@ int mlx5_ib_fill_res_entry(struct sk_buff *msg,
int mlx5_ib_fill_stat_entry(struct sk_buff *msg,
struct rdma_restrack_entry *res);
+extern const struct uapi_definition mlx5_ib_devx_defs[];
+extern const struct uapi_definition mlx5_ib_flow_defs[];
+
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user);
void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid);
void mlx5_ib_devx_init_event_table(struct mlx5_ib_dev *dev);
void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev *dev);
-const struct uverbs_object_tree_def *mlx5_ib_get_devx_tree(void);
-extern const struct uapi_definition mlx5_ib_devx_defs[];
-extern const struct uapi_definition mlx5_ib_flow_defs[];
struct mlx5_ib_flow_handler *mlx5_ib_raw_fs_rule_add(
struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher,
struct mlx5_flow_context *flow_context,
@@ -1378,7 +1394,6 @@ struct mlx5_ib_flow_handler *mlx5_ib_raw_fs_rule_add(
void *cmd_in, int inlen, int dest_id, int dest_type);
bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type);
bool mlx5_ib_devx_is_flow_counter(void *obj, u32 offset, u32 *counter_id);
-int mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root);
void mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction);
#else
static inline int
@@ -1507,7 +1522,7 @@ int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter);
u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num);
static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
- bool do_modify_atomic)
+ bool do_modify_atomic, int access_flags)
{
if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
return false;
@@ -1517,6 +1532,9 @@ static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
return false;
+ if (access_flags & IB_ACCESS_RELAXED_ORDERING)
+ return false;
+
return true;
}
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index ea8bfc3e2d8d..6fa0a83c19de 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -147,7 +147,7 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
break;
}
mr->order = ent->order;
- mr->allocated_from_cache = 1;
+ mr->allocated_from_cache = true;
mr->dev = dev;
MLX5_SET(mkc, mkc, free, 1);
@@ -661,12 +661,21 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
struct ib_pd *pd)
{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+
MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
MLX5_SET(mkc, mkc, lr, 1);
+ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
+ MLX5_SET(mkc, mkc, relaxed_ordering_write,
+ !!(acc & IB_ACCESS_RELAXED_ORDERING));
+ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
+ MLX5_SET(mkc, mkc, relaxed_ordering_read,
+ !!(acc & IB_ACCESS_RELAXED_ORDERING));
+
MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
MLX5_SET64(mkc, mkc, start_addr, start_addr);
@@ -737,10 +746,9 @@ static int mr_cache_max_order(struct mlx5_ib_dev *dev)
return MLX5_MAX_UMR_SHIFT;
}
-static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
- u64 start, u64 length, int access_flags,
- struct ib_umem **umem, int *npages, int *page_shift,
- int *ncont, int *order)
+static int mr_umem_get(struct mlx5_ib_dev *dev, u64 start, u64 length,
+ int access_flags, struct ib_umem **umem, int *npages,
+ int *page_shift, int *ncont, int *order)
{
struct ib_umem *u;
@@ -749,7 +757,7 @@ static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
if (access_flags & IB_ACCESS_ON_DEMAND) {
struct ib_umem_odp *odp;
- odp = ib_umem_odp_get(udata, start, length, access_flags,
+ odp = ib_umem_odp_get(&dev->ib_dev, start, length, access_flags,
&mlx5_mn_ops);
if (IS_ERR(odp)) {
mlx5_ib_dbg(dev, "umem get failed (%ld)\n",
@@ -765,7 +773,7 @@ static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
if (order)
*order = ilog2(roundup_pow_of_two(*ncont));
} else {
- u = ib_umem_get(udata, start, length, access_flags);
+ u = ib_umem_get(&dev->ib_dev, start, length, access_flags);
if (IS_ERR(u)) {
mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(u));
return PTR_ERR(u);
@@ -868,36 +876,6 @@ static struct mlx5_ib_mr *alloc_mr_from_cache(
return mr;
}
-static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages,
- void *xlt, int page_shift, size_t size,
- int flags)
-{
- struct mlx5_ib_dev *dev = mr->dev;
- struct ib_umem *umem = mr->umem;
-
- if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
- if (!umr_can_use_indirect_mkey(dev))
- return -EPERM;
- mlx5_odp_populate_klm(xlt, idx, npages, mr, flags);
- return npages;
- }
-
- npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx);
-
- if (!(flags & MLX5_IB_UPD_XLT_ZAP)) {
- __mlx5_ib_populate_pas(dev, umem, page_shift,
- idx, npages, xlt,
- MLX5_IB_MTT_PRESENT);
- /* Clear padding after the pages
- * brought from the umem.
- */
- memset(xlt + (npages * sizeof(struct mlx5_mtt)), 0,
- size - npages * sizeof(struct mlx5_mtt));
- }
-
- return npages;
-}
-
#define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \
MLX5_UMR_MTT_ALIGNMENT)
#define MLX5_SPARE_UMR_CHUNK 0x10000
@@ -921,6 +899,7 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
size_t pages_mapped = 0;
size_t pages_to_map = 0;
size_t pages_iter = 0;
+ size_t size_to_map = 0;
gfp_t gfp;
bool use_emergency_page = false;
@@ -967,6 +946,15 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
goto free_xlt;
}
+ if (mr->umem->is_odp) {
+ if (!(flags & MLX5_IB_UPD_XLT_INDIRECT)) {
+ struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+ size_t max_pages = ib_umem_odp_num_pages(odp) - idx;
+
+ pages_to_map = min_t(size_t, pages_to_map, max_pages);
+ }
+ }
+
sg.addr = dma;
sg.lkey = dev->umrc.pd->local_dma_lkey;
@@ -989,14 +977,22 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
pages_mapped < pages_to_map && !err;
pages_mapped += pages_iter, idx += pages_iter) {
npages = min_t(int, pages_iter, pages_to_map - pages_mapped);
+ size_to_map = npages * desc_size;
dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
- npages = populate_xlt(mr, idx, npages, xlt,
- page_shift, size, flags);
-
+ if (mr->umem->is_odp) {
+ mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags);
+ } else {
+ __mlx5_ib_populate_pas(dev, mr->umem, page_shift, idx,
+ npages, xlt,
+ MLX5_IB_MTT_PRESENT);
+ /* Clear padding after the pages
+ * brought from the umem.
+ */
+ memset(xlt + size_to_map, 0, size - size_to_map);
+ }
dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
- sg.length = ALIGN(npages * desc_size,
- MLX5_UMR_MTT_ALIGNMENT);
+ sg.length = ALIGN(size_to_map, MLX5_UMR_MTT_ALIGNMENT);
if (pages_mapped + pages_iter >= pages_to_map) {
if (flags & MLX5_IB_UPD_XLT_ENABLE)
@@ -1075,6 +1071,12 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(mkc, mkc, free, !populate);
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
+ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
+ MLX5_SET(mkc, mkc, relaxed_ordering_write,
+ !!(access_flags & IB_ACCESS_RELAXED_ORDERING));
+ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
+ MLX5_SET(mkc, mkc, relaxed_ordering_read,
+ !!(access_flags & IB_ACCESS_RELAXED_ORDERING));
MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
@@ -1247,6 +1249,8 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && !start &&
length == U64_MAX) {
+ if (virt_addr != start)
+ return ERR_PTR(-EINVAL);
if (!(access_flags & IB_ACCESS_ON_DEMAND) ||
!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
return ERR_PTR(-EINVAL);
@@ -1257,13 +1261,13 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return &mr->ibmr;
}
- err = mr_umem_get(dev, udata, start, length, access_flags, &umem,
+ err = mr_umem_get(dev, start, length, access_flags, &umem,
&npages, &page_shift, &ncont, &order);
if (err < 0)
return ERR_PTR(err);
- use_umr = mlx5_ib_can_use_umr(dev, true);
+ use_umr = mlx5_ib_can_use_umr(dev, true, access_flags);
if (order <= mr_cache_max_order(dev) && use_umr) {
mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
@@ -1424,14 +1428,13 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
flags |= IB_MR_REREG_TRANS;
ib_umem_release(mr->umem);
mr->umem = NULL;
- err = mr_umem_get(dev, udata, addr, len, access_flags,
- &mr->umem, &npages, &page_shift, &ncont,
- &order);
+ err = mr_umem_get(dev, addr, len, access_flags, &mr->umem,
+ &npages, &page_shift, &ncont, &order);
if (err)
goto err;
}
- if (!mlx5_ib_can_use_umr(dev, true) ||
+ if (!mlx5_ib_can_use_umr(dev, true, access_flags) ||
(flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len))) {
/*
* UMR can't be used - MKey needs to be replaced.
@@ -1452,7 +1455,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
goto err;
}
- mr->allocated_from_cache = 0;
+ mr->allocated_from_cache = false;
} else {
/*
* Send a UMR WQE
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index f924250f80c2..4216814ba871 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -93,8 +93,8 @@ struct mlx5_pagefault {
static u64 mlx5_imr_ksm_entries;
-void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
- struct mlx5_ib_mr *imr, int flags)
+static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *imr, int flags)
{
struct mlx5_klm *end = pklm + nentries;
@@ -144,6 +144,44 @@ void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
}
}
+static u64 umem_dma_to_mtt(dma_addr_t umem_dma)
+{
+ u64 mtt_entry = umem_dma & ODP_DMA_ADDR_MASK;
+
+ if (umem_dma & ODP_READ_ALLOWED_BIT)
+ mtt_entry |= MLX5_IB_MTT_READ;
+ if (umem_dma & ODP_WRITE_ALLOWED_BIT)
+ mtt_entry |= MLX5_IB_MTT_WRITE;
+
+ return mtt_entry;
+}
+
+static void populate_mtt(__be64 *pas, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *mr, int flags)
+{
+ struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+ dma_addr_t pa;
+ size_t i;
+
+ if (flags & MLX5_IB_UPD_XLT_ZAP)
+ return;
+
+ for (i = 0; i < nentries; i++) {
+ pa = odp->dma_list[idx + i];
+ pas[i] = cpu_to_be64(umem_dma_to_mtt(pa));
+ }
+}
+
+void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *mr, int flags)
+{
+ if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
+ populate_klm(xlt, idx, nentries, mr, flags);
+ } else {
+ populate_mtt(xlt, idx, nentries, mr, flags);
+ }
+}
+
static void dma_fence_odp_mr(struct mlx5_ib_mr *mr)
{
struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
@@ -342,7 +380,7 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
memset(caps, 0, sizeof(*caps));
if (!MLX5_CAP_GEN(dev->mdev, pg) ||
- !mlx5_ib_can_use_umr(dev, true))
+ !mlx5_ib_can_use_umr(dev, true, 0))
return;
caps->general_caps = IB_ODP_SUPPORT;
@@ -497,7 +535,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
struct mlx5_ib_mr *imr;
int err;
- umem_odp = ib_umem_odp_alloc_implicit(udata, access_flags);
+ umem_odp = ib_umem_odp_alloc_implicit(&dev->ib_dev, access_flags);
if (IS_ERR(umem_odp))
return ERR_CAST(umem_odp);
@@ -624,11 +662,10 @@ static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
unsigned long current_seq;
u64 access_mask;
- u64 start_idx, page_mask;
+ u64 start_idx;
page_shift = odp->page_shift;
- page_mask = ~(BIT(page_shift) - 1);
- start_idx = (user_va - (mr->mmkey.iova & page_mask)) >> page_shift;
+ start_idx = (user_va - ib_umem_start(odp)) >> page_shift;
access_mask = ODP_READ_ALLOWED_BIT;
if (odp->umem.writable && !downgrade)
@@ -767,11 +804,19 @@ static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
{
struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+ if (unlikely(io_virt < mr->mmkey.iova))
+ return -EFAULT;
+
if (!odp->is_implicit_odp) {
- if (unlikely(io_virt < ib_umem_start(odp) ||
- ib_umem_end(odp) - io_virt < bcnt))
+ u64 user_va;
+
+ if (check_add_overflow(io_virt - mr->mmkey.iova,
+ (u64)odp->umem.address, &user_va))
+ return -EFAULT;
+ if (unlikely(user_va >= ib_umem_end(odp) ||
+ ib_umem_end(odp) - user_va < bcnt))
return -EFAULT;
- return pagefault_real_mr(mr, odp, io_virt, bcnt, bytes_mapped,
+ return pagefault_real_mr(mr, odp, user_va, bcnt, bytes_mapped,
flags);
}
return pagefault_implicit_mr(mr, odp, io_virt, bcnt, bytes_mapped,
@@ -1237,15 +1282,15 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
wqe = wqe_start;
qp = (res->res == MLX5_RES_QP) ? res_to_qp(res) : NULL;
if (qp && sq) {
- ret = mlx5_ib_read_user_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE,
- &bytes_copied);
+ ret = mlx5_ib_read_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE,
+ &bytes_copied);
if (ret)
goto read_user;
ret = mlx5_ib_mr_initiator_pfault_handler(
dev, pfault, qp, &wqe, &wqe_end, bytes_copied);
} else if (qp && !sq) {
- ret = mlx5_ib_read_user_wqe_rq(qp, wqe_index, wqe, PAGE_SIZE,
- &bytes_copied);
+ ret = mlx5_ib_read_wqe_rq(qp, wqe_index, wqe, PAGE_SIZE,
+ &bytes_copied);
if (ret)
goto read_user;
ret = mlx5_ib_mr_responder_pfault_handler_rq(
@@ -1253,8 +1298,8 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
} else if (!qp) {
struct mlx5_ib_srq *srq = res_to_srq(res);
- ret = mlx5_ib_read_user_wqe_srq(srq, wqe_index, wqe, PAGE_SIZE,
- &bytes_copied);
+ ret = mlx5_ib_read_wqe_srq(srq, wqe_index, wqe, PAGE_SIZE,
+ &bytes_copied);
if (ret)
goto read_user;
ret = mlx5_ib_mr_responder_pfault_handler_srq(
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 7e51870e9e01..957f3a52589b 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -129,14 +129,10 @@ static int is_sqp(enum ib_qp_type qp_type)
*
* Return: zero on success, or an error code.
*/
-static int mlx5_ib_read_user_wqe_common(struct ib_umem *umem,
- void *buffer,
- u32 buflen,
- int wqe_index,
- int wq_offset,
- int wq_wqe_cnt,
- int wq_wqe_shift,
- int bcnt,
+static int mlx5_ib_read_user_wqe_common(struct ib_umem *umem, void *buffer,
+ size_t buflen, int wqe_index,
+ int wq_offset, int wq_wqe_cnt,
+ int wq_wqe_shift, int bcnt,
size_t *bytes_copied)
{
size_t offset = wq_offset + ((wqe_index % wq_wqe_cnt) << wq_wqe_shift);
@@ -160,11 +156,43 @@ static int mlx5_ib_read_user_wqe_common(struct ib_umem *umem,
return 0;
}
-int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp,
- int wqe_index,
- void *buffer,
- int buflen,
- size_t *bc)
+static int mlx5_ib_read_kernel_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index,
+ void *buffer, size_t buflen, size_t *bc)
+{
+ struct mlx5_wqe_ctrl_seg *ctrl;
+ size_t bytes_copied = 0;
+ size_t wqe_length;
+ void *p;
+ int ds;
+
+ wqe_index = wqe_index & qp->sq.fbc.sz_m1;
+
+ /* read the control segment first */
+ p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index);
+ ctrl = p;
+ ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
+ wqe_length = ds * MLX5_WQE_DS_UNITS;
+
+ /* read rest of WQE if it spreads over more than one stride */
+ while (bytes_copied < wqe_length) {
+ size_t copy_length =
+ min_t(size_t, buflen - bytes_copied, MLX5_SEND_WQE_BB);
+
+ if (!copy_length)
+ break;
+
+ memcpy(buffer + bytes_copied, p, copy_length);
+ bytes_copied += copy_length;
+
+ wqe_index = (wqe_index + 1) & qp->sq.fbc.sz_m1;
+ p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index);
+ }
+ *bc = bytes_copied;
+ return 0;
+}
+
+static int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index,
+ void *buffer, size_t buflen, size_t *bc)
{
struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
struct ib_umem *umem = base->ubuffer.umem;
@@ -176,18 +204,10 @@ int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp,
int ret;
int ds;
- if (buflen < sizeof(*ctrl))
- return -EINVAL;
-
/* at first read as much as possible */
- ret = mlx5_ib_read_user_wqe_common(umem,
- buffer,
- buflen,
- wqe_index,
- wq->offset,
- wq->wqe_cnt,
- wq->wqe_shift,
- buflen,
+ ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index,
+ wq->offset, wq->wqe_cnt,
+ wq->wqe_shift, buflen,
&bytes_copied);
if (ret)
return ret;
@@ -210,13 +230,9 @@ int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp,
* so read the remaining bytes starting
* from wqe_index 0
*/
- ret = mlx5_ib_read_user_wqe_common(umem,
- buffer + bytes_copied,
- buflen - bytes_copied,
- 0,
- wq->offset,
- wq->wqe_cnt,
- wq->wqe_shift,
+ ret = mlx5_ib_read_user_wqe_common(umem, buffer + bytes_copied,
+ buflen - bytes_copied, 0, wq->offset,
+ wq->wqe_cnt, wq->wqe_shift,
wqe_length - bytes_copied,
&bytes_copied2);
@@ -226,11 +242,24 @@ int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp,
return 0;
}
-int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp,
- int wqe_index,
- void *buffer,
- int buflen,
- size_t *bc)
+int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
+ size_t buflen, size_t *bc)
+{
+ struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
+ struct ib_umem *umem = base->ubuffer.umem;
+
+ if (buflen < sizeof(struct mlx5_wqe_ctrl_seg))
+ return -EINVAL;
+
+ if (!umem)
+ return mlx5_ib_read_kernel_wqe_sq(qp, wqe_index, buffer,
+ buflen, bc);
+
+ return mlx5_ib_read_user_wqe_sq(qp, wqe_index, buffer, buflen, bc);
+}
+
+static int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index,
+ void *buffer, size_t buflen, size_t *bc)
{
struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
struct ib_umem *umem = base->ubuffer.umem;
@@ -238,14 +267,9 @@ int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp,
size_t bytes_copied;
int ret;
- ret = mlx5_ib_read_user_wqe_common(umem,
- buffer,
- buflen,
- wqe_index,
- wq->offset,
- wq->wqe_cnt,
- wq->wqe_shift,
- buflen,
+ ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index,
+ wq->offset, wq->wqe_cnt,
+ wq->wqe_shift, buflen,
&bytes_copied);
if (ret)
@@ -254,25 +278,33 @@ int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp,
return 0;
}
-int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq,
- int wqe_index,
- void *buffer,
- int buflen,
- size_t *bc)
+int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
+ size_t buflen, size_t *bc)
+{
+ struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
+ struct ib_umem *umem = base->ubuffer.umem;
+ struct mlx5_ib_wq *wq = &qp->rq;
+ size_t wqe_size = 1 << wq->wqe_shift;
+
+ if (buflen < wqe_size)
+ return -EINVAL;
+
+ if (!umem)
+ return -EOPNOTSUPP;
+
+ return mlx5_ib_read_user_wqe_rq(qp, wqe_index, buffer, buflen, bc);
+}
+
+static int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index,
+ void *buffer, size_t buflen, size_t *bc)
{
struct ib_umem *umem = srq->umem;
size_t bytes_copied;
int ret;
- ret = mlx5_ib_read_user_wqe_common(umem,
- buffer,
- buflen,
- wqe_index,
- 0,
- srq->msrq.max,
- srq->msrq.wqe_shift,
- buflen,
- &bytes_copied);
+ ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index, 0,
+ srq->msrq.max, srq->msrq.wqe_shift,
+ buflen, &bytes_copied);
if (ret)
return ret;
@@ -280,6 +312,21 @@ int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq,
return 0;
}
+int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
+ size_t buflen, size_t *bc)
+{
+ struct ib_umem *umem = srq->umem;
+ size_t wqe_size = 1 << srq->msrq.wqe_shift;
+
+ if (buflen < wqe_size)
+ return -EINVAL;
+
+ if (!umem)
+ return -EOPNOTSUPP;
+
+ return mlx5_ib_read_user_wqe_srq(srq, wqe_index, buffer, buflen, bc);
+}
+
static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
{
struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
@@ -749,7 +796,7 @@ static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
{
int err;
- *umem = ib_umem_get(udata, addr, size, 0);
+ *umem = ib_umem_get(&dev->ib_dev, addr, size, 0);
if (IS_ERR(*umem)) {
mlx5_ib_dbg(dev, "umem_get failed\n");
return PTR_ERR(*umem);
@@ -806,7 +853,7 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
if (!ucmd->buf_addr)
return -EINVAL;
- rwq->umem = ib_umem_get(udata, ucmd->buf_addr, rwq->buf_size, 0);
+ rwq->umem = ib_umem_get(&dev->ib_dev, ucmd->buf_addr, rwq->buf_size, 0);
if (IS_ERR(rwq->umem)) {
mlx5_ib_dbg(dev, "umem_get failed\n");
err = PTR_ERR(rwq->umem);
@@ -1871,7 +1918,7 @@ static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
{
enum ib_qp_type qpt = init_attr->qp_type;
int scqe_sz;
- bool allow_scat_cqe = 0;
+ bool allow_scat_cqe = false;
if (qpt == IB_QPT_UC || qpt == IB_QPT_UD)
return;
@@ -3394,9 +3441,6 @@ static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
struct mlx5_ib_qp_base *base;
u32 set_id;
- if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id))
- return 0;
-
if (counter)
set_id = counter->id;
else
@@ -4823,7 +4867,7 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
u8 flags = 0;
- if (!mlx5_ib_can_use_umr(dev, atomic)) {
+ if (!mlx5_ib_can_use_umr(dev, atomic, wr->access)) {
mlx5_ib_warn(to_mdev(qp->ibqp.device),
"Fast update of %s for MR is disabled\n",
(MLX5_CAP_GEN(dev->mdev,
@@ -6529,6 +6573,7 @@ void mlx5_ib_drain_rq(struct ib_qp *qp)
*/
int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter)
{
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
struct mlx5_ib_qp *mqp = to_mqp(qp);
int err = 0;
@@ -6538,6 +6583,11 @@ int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter)
goto out;
}
+ if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id)) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
if (mqp->state == IB_QPS_RTS) {
err = __mlx5_ib_qp_set_counter(qp, counter);
if (!err)
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 62939df3c692..b1a8a9175040 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -80,7 +80,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
- srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0);
+ srq->umem = ib_umem_get(pd->device, ucmd.buf_addr, buf_size, 0);
if (IS_ERR(srq->umem)) {
mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size);
err = PTR_ERR(srq->umem);
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index edccfd6e178f..78a48aea3faf 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -472,7 +472,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
goto out;
}
- ret = get_user_pages_fast(uaddr & PAGE_MASK, 1,
+ ret = pin_user_pages_fast(uaddr & PAGE_MASK, 1,
FOLL_WRITE | FOLL_LONGTERM, pages);
if (ret < 0)
goto out;
@@ -482,7 +482,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
if (ret < 0) {
- put_user_page(pages[0]);
+ unpin_user_page(pages[0]);
goto out;
}
@@ -490,7 +490,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
mthca_uarc_virt(dev, uar, i));
if (ret) {
pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
- put_user_page(sg_page(&db_tab->page[i].mem));
+ unpin_user_page(sg_page(&db_tab->page[i].mem));
goto out;
}
@@ -556,7 +556,7 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
if (db_tab->page[i].uvirt) {
mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1);
pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
- put_user_page(sg_page(&db_tab->page[i].mem));
+ unpin_user_page(sg_page(&db_tab->page[i].mem));
}
}
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 33002530fee7..ac19d57803b5 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -880,7 +880,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr)
return ERR_PTR(-ENOMEM);
- mr->umem = ib_umem_get(udata, start, length, acc);
+ mr->umem = ib_umem_get(pd->device, start, length, acc);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
goto err;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 9bc1ca6f6f9e..d47ea675734b 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -869,7 +869,7 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(status);
- mr->umem = ib_umem_get(udata, start, len, acc);
+ mr->umem = ib_umem_get(ibpd->device, start, len, acc);
if (IS_ERR(mr->umem)) {
status = -EFAULT;
goto umem_err;
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 4cd292966aa9..484b555150e0 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -312,7 +312,18 @@ int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
}
ctx->db_mmap_entry = &entry->rdma_entry;
- uresp.dpm_enabled = dev->user_dpm_enabled;
+ if (!dev->user_dpm_enabled)
+ uresp.dpm_flags = 0;
+ else if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ uresp.dpm_flags = QEDR_DPM_TYPE_IWARP_LEGACY;
+ else
+ uresp.dpm_flags = QEDR_DPM_TYPE_ROCE_ENHANCED |
+ QEDR_DPM_TYPE_ROCE_LEGACY;
+
+ uresp.dpm_flags |= QEDR_DPM_SIZES_SET;
+ uresp.ldpm_limit_size = QEDR_LDPM_MAX_SIZE;
+ uresp.edpm_trans_size = QEDR_EDPM_TRANS_SIZE;
+
uresp.wids_enabled = 1;
uresp.wid_count = oparams.wid_count;
uresp.db_pa = rdma_user_mmap_get_offset(ctx->db_mmap_entry);
@@ -772,7 +783,7 @@ static inline int qedr_init_user_queue(struct ib_udata *udata,
q->buf_addr = buf_addr;
q->buf_len = buf_len;
- q->umem = ib_umem_get(udata, q->buf_addr, q->buf_len, access);
+ q->umem = ib_umem_get(&dev->ibdev, q->buf_addr, q->buf_len, access);
if (IS_ERR(q->umem)) {
DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
PTR_ERR(q->umem));
@@ -1415,9 +1426,8 @@ static int qedr_init_srq_user_params(struct ib_udata *udata,
if (rc)
return rc;
- srq->prod_umem =
- ib_umem_get(udata, ureq->prod_pair_addr,
- sizeof(struct rdma_srq_producers), access);
+ srq->prod_umem = ib_umem_get(srq->ibsrq.device, ureq->prod_pair_addr,
+ sizeof(struct rdma_srq_producers), access);
if (IS_ERR(srq->prod_umem)) {
qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
ib_umem_release(srq->usrq.umem);
@@ -2839,7 +2849,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
mr->type = QEDR_MR_USER;
- mr->umem = ib_umem_get(udata, start, len, acc);
+ mr->umem = ib_umem_get(ibpd->device, start, len, acc);
if (IS_ERR(mr->umem)) {
rc = -EFAULT;
goto err0;
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index dd4843379f51..91d64dd71a8a 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -6630,7 +6630,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
/* vl15 buffers start just after the 4k buffers */
vl15off = dd->physaddr + (dd->piobufbase >> 32) +
dd->piobcnt4k * dd->align4k;
- dd->piovl15base = ioremap_nocache(vl15off,
+ dd->piovl15base = ioremap(vl15off,
NUM_VL15_BUFS * dd->align4k);
if (!dd->piovl15base) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index d4fd8a6cff7b..43c8ee1f46e0 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1759,7 +1759,7 @@ int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen)
qib_userlen = dd->ureg_align * dd->cfgctxts;
/* Sanity checks passed, now create the new mappings */
- qib_kregbase = ioremap_nocache(qib_physaddr, qib_kreglen);
+ qib_kregbase = ioremap(qib_physaddr, qib_kreglen);
if (!qib_kregbase)
goto bail;
@@ -1768,7 +1768,7 @@ int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen)
goto bail_kregbase;
if (qib_userlen) {
- qib_userbase = ioremap_nocache(qib_physaddr + dd->uregbase,
+ qib_userbase = ioremap(qib_physaddr + dd->uregbase,
qib_userlen);
if (!qib_userbase)
goto bail_piobase;
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 864f2af171f7..3dc6ce033319 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -145,7 +145,7 @@ int qib_pcie_ddinit(struct qib_devdata *dd, struct pci_dev *pdev,
addr = pci_resource_start(pdev, 0);
len = pci_resource_len(pdev, 0);
- dd->kregbase = ioremap_nocache(addr, len);
+ dd->kregbase = ioremap(addr, len);
if (!dd->kregbase)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index 6bf764e41891..342e3172ca40 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -40,7 +40,7 @@
static void __qib_release_user_pages(struct page **p, size_t num_pages,
int dirty)
{
- put_user_pages_dirty_lock(p, num_pages, dirty);
+ unpin_user_pages_dirty_lock(p, num_pages, dirty);
}
/**
@@ -108,7 +108,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages,
down_read(&current->mm->mmap_sem);
for (got = 0; got < num_pages; got += ret) {
- ret = get_user_pages(start_page + got * PAGE_SIZE,
+ ret = pin_user_pages(start_page + got * PAGE_SIZE,
num_pages - got,
FOLL_LONGTERM | FOLL_WRITE | FOLL_FORCE,
p + got, NULL);
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
index 05190edc2611..a67599b5a550 100644
--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
@@ -317,7 +317,7 @@ static int qib_user_sdma_page_to_frags(const struct qib_devdata *dd,
* the caller can ignore this page.
*/
if (put) {
- put_user_page(page);
+ unpin_user_page(page);
} else {
/* coalesce case */
kunmap(page);
@@ -631,7 +631,7 @@ static void qib_user_sdma_free_pkt_frag(struct device *dev,
kunmap(pkt->addr[i].page);
if (pkt->addr[i].put_page)
- put_user_page(pkt->addr[i].page);
+ unpin_user_page(pkt->addr[i].page);
else
__free_page(pkt->addr[i].page);
} else if (pkt->addr[i].kvaddr) {
@@ -670,7 +670,7 @@ static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
else
j = npages;
- ret = get_user_pages_fast(addr, j, FOLL_LONGTERM, pages);
+ ret = pin_user_pages_fast(addr, j, FOLL_LONGTERM, pages);
if (ret != j) {
i = 0;
j = ret;
@@ -706,7 +706,7 @@ static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
/* if error, return all pages not managed by pkt */
free_pages:
while (i < j)
- put_user_page(pages[i++]);
+ unpin_user_page(pages[i++]);
done:
return ret;
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index 62e6ffa9ad78..bd9f944b68fc 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -75,7 +75,7 @@ static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
for_each_sg(chunk->page_list, sg, chunk->nents, i) {
page = sg_page(sg);
pa = sg_phys(sg);
- put_user_pages_dirty_lock(&page, 1, dirty);
+ unpin_user_pages_dirty_lock(&page, 1, dirty);
usnic_dbg("pa: %pa\n", &pa);
}
kfree(chunk);
@@ -141,7 +141,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
ret = 0;
while (npages) {
- ret = get_user_pages(cur_base,
+ ret = pin_user_pages(cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof(struct page *)),
gup_flags | FOLL_LONGTERM,
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
index a26a4fd86bf4..4f6cc0de7ef9 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -135,7 +135,7 @@ int pvrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
goto err_cq;
}
- cq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size,
+ cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, ucmd.buf_size,
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(cq->umem)) {
ret = PTR_ERR(cq->umem);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
index c61e665ff261..b039f1f00e05 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
@@ -126,7 +126,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_PTR(-EINVAL);
}
- umem = ib_umem_get(udata, start, length, access_flags);
+ umem = ib_umem_get(pd->device, start, length, access_flags);
if (IS_ERR(umem)) {
dev_warn(&dev->pdev->dev,
"could not get umem for mem region\n");
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index f15809c28f67..9de1281f9a3b 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -276,8 +276,9 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
if (!is_srq) {
/* set qp->sq.wqe_cnt, shift, buf_size.. */
- qp->rumem = ib_umem_get(udata, ucmd.rbuf_addr,
- ucmd.rbuf_size, 0);
+ qp->rumem =
+ ib_umem_get(pd->device, ucmd.rbuf_addr,
+ ucmd.rbuf_size, 0);
if (IS_ERR(qp->rumem)) {
ret = PTR_ERR(qp->rumem);
goto err_qp;
@@ -288,7 +289,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
qp->srq = to_vsrq(init_attr->srq);
}
- qp->sumem = ib_umem_get(udata, ucmd.sbuf_addr,
+ qp->sumem = ib_umem_get(pd->device, ucmd.sbuf_addr,
ucmd.sbuf_size, 0);
if (IS_ERR(qp->sumem)) {
if (!is_srq)
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
index 98c8be71d91d..d330decfb80a 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
@@ -146,7 +146,7 @@ int pvrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
goto err_srq;
}
- srq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size, 0);
+ srq->umem = ib_umem_get(ibsrq->device, ucmd.buf_addr, ucmd.buf_size, 0);
if (IS_ERR(srq->umem)) {
ret = PTR_ERR(srq->umem);
goto err_srq;
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index b9a76bf74857..72f6534fbb52 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -390,7 +390,7 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (length == 0)
return ERR_PTR(-EINVAL);
- umem = ib_umem_get(udata, start, length, mr_access_flags);
+ umem = ib_umem_get(pd->device, start, length, mr_access_flags);
if (IS_ERR(umem))
return (void *)umem;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 3cdf75d0c7a4..7858d499db03 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -61,6 +61,8 @@
#define RVT_RWQ_COUNT_THRESHOLD 16
static void rvt_rc_timeout(struct timer_list *t);
+static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+ enum ib_qp_type type);
/*
* Convert the AETH RNR timeout code into the number of microseconds.
@@ -452,40 +454,41 @@ no_qp_table:
}
/**
- * free_all_qps - check for QPs still in use
+ * rvt_free_qp_cb - callback function to reset a qp
+ * @qp: the qp to reset
+ * @v: a 64-bit value
+ *
+ * This function resets the qp and removes it from the
+ * qp hash table.
+ */
+static void rvt_free_qp_cb(struct rvt_qp *qp, u64 v)
+{
+ unsigned int *qp_inuse = (unsigned int *)v;
+ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
+
+ /* Reset the qp and remove it from the qp hash list */
+ rvt_reset_qp(rdi, qp, qp->ibqp.qp_type);
+
+ /* Increment the qp_inuse count */
+ (*qp_inuse)++;
+}
+
+/**
+ * rvt_free_all_qps - check for QPs still in use
* @rdi: rvt device info structure
*
* There should not be any QPs still in use.
* Free memory for table.
+ * Return the number of QPs still in use.
*/
static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
{
- unsigned long flags;
- struct rvt_qp *qp;
- unsigned n, qp_inuse = 0;
- spinlock_t *ql; /* work around too long line below */
-
- if (rdi->driver_f.free_all_qps)
- qp_inuse = rdi->driver_f.free_all_qps(rdi);
+ unsigned int qp_inuse = 0;
qp_inuse += rvt_mcast_tree_empty(rdi);
- if (!rdi->qp_dev)
- return qp_inuse;
-
- ql = &rdi->qp_dev->qpt_lock;
- spin_lock_irqsave(ql, flags);
- for (n = 0; n < rdi->qp_dev->qp_table_size; n++) {
- qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n],
- lockdep_is_held(ql));
- RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL);
+ rvt_qp_iter(rdi, (u64)&qp_inuse, rvt_free_qp_cb);
- for (; qp; qp = rcu_dereference_protected(qp->next,
- lockdep_is_held(ql)))
- qp_inuse++;
- }
- spin_unlock_irqrestore(ql, flags);
- synchronize_rcu();
return qp_inuse;
}
@@ -902,14 +905,14 @@ static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
}
/**
- * rvt_reset_qp - initialize the QP state to the reset state
+ * _rvt_reset_qp - initialize the QP state to the reset state
* @qp: the QP to reset
* @type: the QP type
*
* r_lock, s_hlock, and s_lock are required to be held by the caller
*/
-static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
- enum ib_qp_type type)
+static void _rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+ enum ib_qp_type type)
__must_hold(&qp->s_lock)
__must_hold(&qp->s_hlock)
__must_hold(&qp->r_lock)
@@ -955,6 +958,27 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
lockdep_assert_held(&qp->s_lock);
}
+/**
+ * rvt_reset_qp - initialize the QP state to the reset state
+ * @rdi: the device info
+ * @qp: the QP to reset
+ * @type: the QP type
+ *
+ * This is the wrapper function to acquire the r_lock, s_hlock, and s_lock
+ * before calling _rvt_reset_qp().
+ */
+static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+ enum ib_qp_type type)
+{
+ spin_lock_irq(&qp->r_lock);
+ spin_lock(&qp->s_hlock);
+ spin_lock(&qp->s_lock);
+ _rvt_reset_qp(rdi, qp, type);
+ spin_unlock(&qp->s_lock);
+ spin_unlock(&qp->s_hlock);
+ spin_unlock_irq(&qp->r_lock);
+}
+
/** rvt_free_qpn - Free a qpn from the bit map
* @qpt: QP table
* @qpn: queue pair number to free
@@ -1546,7 +1570,7 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
switch (new_state) {
case IB_QPS_RESET:
if (qp->state != IB_QPS_RESET)
- rvt_reset_qp(rdi, qp, ibqp->qp_type);
+ _rvt_reset_qp(rdi, qp, ibqp->qp_type);
break;
case IB_QPS_RTR:
@@ -1695,13 +1719,7 @@ int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
- spin_lock_irq(&qp->r_lock);
- spin_lock(&qp->s_hlock);
- spin_lock(&qp->s_lock);
rvt_reset_qp(rdi, qp, ibqp->qp_type);
- spin_unlock(&qp->s_lock);
- spin_unlock(&qp->s_hlock);
- spin_unlock_irq(&qp->r_lock);
wait_event(qp->wait, !atomic_read(&qp->refcount));
/* qpn is now available for use again */
diff --git a/drivers/infiniband/sw/rdmavt/rc.c b/drivers/infiniband/sw/rdmavt/rc.c
index 890d7b760d2e..977906cc0d11 100644
--- a/drivers/infiniband/sw/rdmavt/rc.c
+++ b/drivers/infiniband/sw/rdmavt/rc.c
@@ -195,7 +195,14 @@ void rvt_get_credit(struct rvt_qp *qp, u32 aeth)
}
EXPORT_SYMBOL(rvt_get_credit);
-/* rvt_restart_sge - rewind the sge state for a wqe */
+/**
+ * rvt_restart_sge - rewind the sge state for a wqe
+ * @ss: the sge state pointer
+ * @wqe: the wqe to rewind
+ * @len: the data length from the start of the wqe in bytes
+ *
+ * Returns the remaining data length.
+ */
u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len)
{
ss->sge = wqe->sg_list[0];
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 116cafc9afcf..4bc88708b355 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -329,7 +329,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
qp->comp.psn = pkt->psn;
if (qp->req.wait_psn) {
qp->req.wait_psn = 0;
- rxe_run_task(&qp->req.task, 1);
+ rxe_run_task(&qp->req.task, 0);
}
}
return COMPST_ERROR_RETRY;
@@ -463,7 +463,7 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
*/
if (qp->req.wait_fence) {
qp->req.wait_fence = 0;
- rxe_run_task(&qp->req.task, 1);
+ rxe_run_task(&qp->req.task, 0);
}
}
@@ -479,7 +479,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
if (qp->req.need_rd_atomic) {
qp->comp.timeout_retry = 0;
qp->req.need_rd_atomic = 0;
- rxe_run_task(&qp->req.task, 1);
+ rxe_run_task(&qp->req.task, 0);
}
}
@@ -725,7 +725,7 @@ int rxe_completer(void *arg)
RXE_CNT_COMP_RETRY);
qp->req.need_retry = 1;
qp->comp.started_retry = 1;
- rxe_run_task(&qp->req.task, 1);
+ rxe_run_task(&qp->req.task, 0);
}
if (pkt) {
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 35a2baf2f364..e83c7b518bfa 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -169,7 +169,7 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
void *vaddr;
int err;
- umem = ib_umem_get(udata, start, length, access);
+ umem = ib_umem_get(pd->ibpd.device, start, length, access);
if (IS_ERR(umem)) {
pr_warn("err %d from rxe_umem_get\n",
(int)PTR_ERR(umem));
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index 353c6668249e..f59616b02477 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -34,6 +34,8 @@
#ifndef RXE_PARAM_H
#define RXE_PARAM_H
+#include <uapi/rdma/rdma_user_rxe.h>
+
static inline enum ib_mtu rxe_mtu_int_to_enum(int mtu)
{
if (mtu < 256)
@@ -64,7 +66,6 @@ enum rxe_device_param {
RXE_PAGE_SIZE_CAP = 0xfffff000,
RXE_MAX_QP = 0x10000,
RXE_MAX_QP_WR = 0x4000,
- RXE_MAX_INLINE_DATA = 400,
RXE_DEVICE_CAP_FLAGS = IB_DEVICE_BAD_PKEY_CNTR
| IB_DEVICE_BAD_QKEY_CNTR
| IB_DEVICE_AUTO_PATH_MIG
@@ -77,6 +78,10 @@ enum rxe_device_param {
| IB_DEVICE_MEM_MGT_EXTENSIONS
| IB_DEVICE_ALLOW_USER_UNREG,
RXE_MAX_SGE = 32,
+ RXE_MAX_WQE_SIZE = sizeof(struct rxe_send_wqe) +
+ sizeof(struct ib_sge) * RXE_MAX_SGE,
+ RXE_MAX_INLINE_DATA = RXE_MAX_WQE_SIZE -
+ sizeof(struct rxe_send_wqe),
RXE_MAX_SGE_RD = 32,
RXE_MAX_CQ = 16384,
RXE_MAX_LOG_CQE = 15,
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index e2c6d1cedf41..ec21f616ac98 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -237,19 +237,17 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
*/
qp->src_port = RXE_ROCE_V2_SPORT +
(hash_32_generic(qp_num(qp), 14) & 0x3fff);
-
qp->sq.max_wr = init->cap.max_send_wr;
- qp->sq.max_sge = init->cap.max_send_sge;
- qp->sq.max_inline = init->cap.max_inline_data;
- wqe_size = max_t(int, sizeof(struct rxe_send_wqe) +
- qp->sq.max_sge * sizeof(struct ib_sge),
- sizeof(struct rxe_send_wqe) +
- qp->sq.max_inline);
+ /* These caps are limited by rxe_qp_chk_cap() done by the caller */
+ wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge),
+ init->cap.max_inline_data);
+ qp->sq.max_sge = init->cap.max_send_sge =
+ wqe_size / sizeof(struct ib_sge);
+ qp->sq.max_inline = init->cap.max_inline_data = wqe_size;
+ wqe_size += sizeof(struct rxe_send_wqe);
- qp->sq.queue = rxe_queue_init(rxe,
- &qp->sq.max_wr,
- wqe_size);
+ qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, wqe_size);
if (!qp->sq.queue)
return -ENOMEM;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 95834206c80c..92de39c4a7c1 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -408,7 +408,7 @@ struct rxe_dev {
struct list_head pending_mmaps;
spinlock_t mmap_offset_lock; /* guard mmap_offset */
- int mmap_offset;
+ u64 mmap_offset;
atomic64_t stats_counters[RXE_NUM_OF_COUNTERS];
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index b939f489cd46..af5e9f8c0fcd 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -7,6 +7,7 @@
#define _SIW_H
#include <rdma/ib_verbs.h>
+#include <rdma/restrack.h>
#include <linux/socket.h>
#include <linux/skbuff.h>
#include <crypto/hash.h>
@@ -209,7 +210,6 @@ struct siw_cq {
u32 cq_put;
u32 cq_get;
u32 num_cqe;
- bool kernel_verbs;
struct rdma_user_mmap_entry *cq_entry; /* mmap info for CQE array */
u32 id; /* For debugging only */
};
@@ -254,8 +254,8 @@ struct siw_srq {
u32 rq_get;
u32 num_rqe; /* max # of wqe's allowed */
struct rdma_user_mmap_entry *srq_entry; /* mmap info for SRQ array */
- char armed; /* inform user if limit hit */
- char kernel_verbs; /* '1' if kernel client */
+ bool armed:1; /* inform user if limit hit */
+ bool is_kernel_res:1; /* true if kernel client */
};
struct siw_qp_attrs {
@@ -418,13 +418,11 @@ struct siw_iwarp_tx {
};
struct siw_qp {
+ struct ib_qp base_qp;
struct siw_device *sdev;
- struct ib_qp *ib_qp;
struct kref ref;
- u32 qp_num;
struct list_head devq;
int tx_cpu;
- bool kernel_verbs;
struct siw_qp_attrs attrs;
struct siw_cep *cep;
@@ -472,11 +470,6 @@ struct siw_qp {
struct rcu_head rcu;
};
-struct siw_base_qp {
- struct ib_qp base_qp;
- struct siw_qp *qp;
-};
-
/* helper macros */
#define rx_qp(rx) container_of(rx, struct siw_qp, rx_stream)
#define tx_qp(tx) container_of(tx, struct siw_qp, tx_ctx)
@@ -572,14 +565,9 @@ static inline struct siw_ucontext *to_siw_ctx(struct ib_ucontext *base_ctx)
return container_of(base_ctx, struct siw_ucontext, base_ucontext);
}
-static inline struct siw_base_qp *to_siw_base_qp(struct ib_qp *base_qp)
-{
- return container_of(base_qp, struct siw_base_qp, base_qp);
-}
-
static inline struct siw_qp *to_siw_qp(struct ib_qp *base_qp)
{
- return to_siw_base_qp(base_qp)->qp;
+ return container_of(base_qp, struct siw_qp, base_qp);
}
static inline struct siw_cq *to_siw_cq(struct ib_cq *base_cq)
@@ -624,7 +612,7 @@ static inline struct siw_qp *siw_qp_id2obj(struct siw_device *sdev, int id)
static inline u32 qp_id(struct siw_qp *qp)
{
- return qp->qp_num;
+ return qp->base_qp.qp_num;
}
static inline void siw_qp_get(struct siw_qp *qp)
@@ -735,7 +723,7 @@ static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len)
"MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__)
#define siw_dbg_cep(cep, fmt, ...) \
- ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%pK] %s: " fmt, \
+ ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%pK] %s: " fmt, \
cep, __func__, ##__VA_ARGS__)
void siw_cq_flush(struct siw_cq *cq);
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index 3bccfef40e7e..c5651a96b196 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -29,7 +29,7 @@
* MPA_V2_RDMA_NO_RTR, MPA_V2_RDMA_READ_RTR, MPA_V2_RDMA_WRITE_RTR
*/
static __be16 rtr_type = MPA_V2_RDMA_READ_RTR | MPA_V2_RDMA_WRITE_RTR;
-static const bool relaxed_ird_negotiation = 1;
+static const bool relaxed_ird_negotiation = true;
static void siw_cm_llp_state_change(struct sock *s);
static void siw_cm_llp_data_ready(struct sock *s);
@@ -1225,10 +1225,9 @@ static void siw_cm_llp_data_ready(struct sock *sk)
read_lock(&sk->sk_callback_lock);
cep = sk_to_cep(sk);
- if (!cep) {
- WARN_ON(1);
+ if (!cep)
goto out;
- }
+
siw_dbg_cep(cep, "state: %d\n", cep->state);
switch (cep->state) {
diff --git a/drivers/infiniband/sw/siw/siw_cq.c b/drivers/infiniband/sw/siw/siw_cq.c
index d8db3bee9da7..d68e37859e73 100644
--- a/drivers/infiniband/sw/siw/siw_cq.c
+++ b/drivers/infiniband/sw/siw/siw_cq.c
@@ -65,7 +65,7 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc)
* reaped here, which do not hold a QP reference
* and do not qualify for memory extension verbs.
*/
- if (likely(cq->kernel_verbs)) {
+ if (likely(rdma_is_kernel_res(&cq->base_cq.res))) {
if (cqe->flags & SIW_WQE_REM_INVAL) {
wc->ex.invalidate_rkey = cqe->inval_stag;
wc->wc_flags = IB_WC_WITH_INVALIDATE;
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index c147f0613d95..96ed349c0939 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -244,7 +244,7 @@ static struct ib_qp *siw_get_base_qp(struct ib_device *base_dev, int id)
* siw_qp_id2obj() increments object reference count
*/
siw_qp_put(qp);
- return qp->ib_qp;
+ return &qp->base_qp;
}
return NULL;
}
diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
index e99983f07663..e2061dc0b043 100644
--- a/drivers/infiniband/sw/siw/siw_mem.c
+++ b/drivers/infiniband/sw/siw/siw_mem.c
@@ -63,7 +63,7 @@ struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index)
static void siw_free_plist(struct siw_page_chunk *chunk, int num_pages,
bool dirty)
{
- put_user_pages_dirty_lock(chunk->plist, num_pages, dirty);
+ unpin_user_pages_dirty_lock(chunk->plist, num_pages, dirty);
}
void siw_umem_release(struct siw_umem *umem, bool dirty)
@@ -426,7 +426,7 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
while (nents) {
struct page **plist = &umem->page_chunk[i].plist[got];
- rv = get_user_pages(first_page_va, nents,
+ rv = pin_user_pages(first_page_va, nents,
foll_flags | FOLL_LONGTERM,
plist, NULL);
if (rv < 0)
diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c
index b4317480cee7..875d36d4b1c6 100644
--- a/drivers/infiniband/sw/siw/siw_qp.c
+++ b/drivers/infiniband/sw/siw/siw_qp.c
@@ -1070,8 +1070,8 @@ int siw_sqe_complete(struct siw_qp *qp, struct siw_sqe *sqe, u32 bytes,
cqe->imm_data = 0;
cqe->bytes = bytes;
- if (cq->kernel_verbs)
- cqe->base_qp = qp->ib_qp;
+ if (rdma_is_kernel_res(&cq->base_cq.res))
+ cqe->base_qp = &qp->base_qp;
else
cqe->qp_id = qp_id(qp);
@@ -1128,8 +1128,8 @@ int siw_rqe_complete(struct siw_qp *qp, struct siw_rqe *rqe, u32 bytes,
cqe->imm_data = 0;
cqe->bytes = bytes;
- if (cq->kernel_verbs) {
- cqe->base_qp = qp->ib_qp;
+ if (rdma_is_kernel_res(&cq->base_cq.res)) {
+ cqe->base_qp = &qp->base_qp;
if (inval_stag) {
cqe_flags |= SIW_WQE_REM_INVAL;
cqe->inval_stag = inval_stag;
@@ -1297,13 +1297,12 @@ void siw_rq_flush(struct siw_qp *qp)
int siw_qp_add(struct siw_device *sdev, struct siw_qp *qp)
{
- int rv = xa_alloc(&sdev->qp_xa, &qp->ib_qp->qp_num, qp, xa_limit_32b,
+ int rv = xa_alloc(&sdev->qp_xa, &qp->base_qp.qp_num, qp, xa_limit_32b,
GFP_KERNEL);
if (!rv) {
kref_init(&qp->ref);
qp->sdev = sdev;
- qp->qp_num = qp->ib_qp->qp_num;
siw_dbg_qp(qp, "new QP\n");
}
return rv;
@@ -1312,7 +1311,6 @@ int siw_qp_add(struct siw_device *sdev, struct siw_qp *qp)
void siw_free_qp(struct kref *ref)
{
struct siw_qp *found, *qp = container_of(ref, struct siw_qp, ref);
- struct siw_base_qp *siw_base_qp = to_siw_base_qp(qp->ib_qp);
struct siw_device *sdev = qp->sdev;
unsigned long flags;
@@ -1335,5 +1333,4 @@ void siw_free_qp(struct kref *ref)
atomic_dec(&sdev->num_qp);
siw_dbg_qp(qp, "free QP\n");
kfree_rcu(qp, rcu);
- kfree(siw_base_qp);
}
diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
index c0a887240325..9ccce2909ac4 100644
--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
@@ -68,7 +68,7 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem,
return -EFAULT;
}
if (srx->mpa_crc_hd) {
- if (rx_qp(srx)->kernel_verbs) {
+ if (rdma_is_kernel_res(&rx_qp(srx)->base_qp.res)) {
crypto_shash_update(srx->mpa_crc_hd,
(u8 *)(dest + pg_off), bytes);
kunmap_atomic(dest);
@@ -388,7 +388,7 @@ static struct siw_wqe *siw_rqe_get(struct siw_qp *qp)
struct siw_rqe *rqe2 = &srq->recvq[off];
if (!(rqe2->flags & SIW_WQE_VALID)) {
- srq->armed = 0;
+ srq->armed = false;
srq_event = true;
}
}
@@ -1264,7 +1264,7 @@ static int siw_rdmap_complete(struct siw_qp *qp, int error)
if (wc_status == SIW_WC_SUCCESS)
wc_status = SIW_WC_GENERAL_ERR;
- } else if (qp->kernel_verbs &&
+ } else if (rdma_is_kernel_res(&qp->base_qp.res) &&
rx_type(wqe) == SIW_OP_READ_LOCAL_INV) {
/*
* Handle any STag invalidation request
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index 5d97bba0ce6d..ae92c8080967 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -817,7 +817,7 @@ static int siw_qp_sq_proc_tx(struct siw_qp *qp, struct siw_wqe *wqe)
}
} else {
wqe->bytes = wqe->sqe.sge[0].length;
- if (!qp->kernel_verbs) {
+ if (!rdma_is_kernel_res(&qp->base_qp.res)) {
if (wqe->bytes > SIW_MAX_INLINE) {
rv = -EINVAL;
goto tx_error;
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 5fd6d6499b3d..07e30138aaa1 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -303,7 +303,6 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
struct ib_udata *udata)
{
struct siw_qp *qp = NULL;
- struct siw_base_qp *siw_base_qp = NULL;
struct ib_device *base_dev = pd->device;
struct siw_device *sdev = to_siw_dev(base_dev);
struct siw_ucontext *uctx =
@@ -357,26 +356,16 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
rv = -EINVAL;
goto err_out;
}
- siw_base_qp = kzalloc(sizeof(*siw_base_qp), GFP_KERNEL);
- if (!siw_base_qp) {
- rv = -ENOMEM;
- goto err_out;
- }
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp) {
rv = -ENOMEM;
goto err_out;
}
- siw_base_qp->qp = qp;
- qp->ib_qp = &siw_base_qp->base_qp;
-
init_rwsem(&qp->state_lock);
spin_lock_init(&qp->sq_lock);
spin_lock_init(&qp->rq_lock);
spin_lock_init(&qp->orq_lock);
- qp->kernel_verbs = !udata;
-
rv = siw_qp_add(sdev, qp);
if (rv)
goto err_out;
@@ -389,10 +378,10 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
num_sqe = roundup_pow_of_two(attrs->cap.max_send_wr);
num_rqe = roundup_pow_of_two(attrs->cap.max_recv_wr);
- if (qp->kernel_verbs)
- qp->sendq = vzalloc(num_sqe * sizeof(struct siw_sqe));
- else
+ if (udata)
qp->sendq = vmalloc_user(num_sqe * sizeof(struct siw_sqe));
+ else
+ qp->sendq = vzalloc(num_sqe * sizeof(struct siw_sqe));
if (qp->sendq == NULL) {
siw_dbg(base_dev, "SQ size %d alloc failed\n", num_sqe);
@@ -419,13 +408,14 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
*/
qp->srq = to_siw_srq(attrs->srq);
qp->attrs.rq_size = 0;
- siw_dbg(base_dev, "QP [%u]: SRQ attached\n", qp->qp_num);
+ siw_dbg(base_dev, "QP [%u]: SRQ attached\n",
+ qp->base_qp.qp_num);
} else if (num_rqe) {
- if (qp->kernel_verbs)
- qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe));
- else
+ if (udata)
qp->recvq =
vmalloc_user(num_rqe * sizeof(struct siw_rqe));
+ else
+ qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe));
if (qp->recvq == NULL) {
siw_dbg(base_dev, "RQ size %d alloc failed\n", num_rqe);
@@ -492,13 +482,11 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
list_add_tail(&qp->devq, &sdev->qp_list);
spin_unlock_irqrestore(&sdev->lock, flags);
- return qp->ib_qp;
+ return &qp->base_qp;
err_out_xa:
xa_erase(&sdev->qp_xa, qp_id(qp));
err_out:
- kfree(siw_base_qp);
-
if (qp) {
if (uctx) {
rdma_user_mmap_entry_remove(qp->sq_entry);
@@ -742,7 +730,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
unsigned long flags;
int rv = 0;
- if (wr && !qp->kernel_verbs) {
+ if (wr && !rdma_is_kernel_res(&qp->base_qp.res)) {
siw_dbg_qp(qp, "wr must be empty for user mapped sq\n");
*bad_wr = wr;
return -EINVAL;
@@ -939,7 +927,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
if (rv <= 0)
goto skip_direct_sending;
- if (qp->kernel_verbs) {
+ if (rdma_is_kernel_res(&qp->base_qp.res)) {
rv = siw_sq_start(qp);
} else {
qp->tx_ctx.in_syscall = 1;
@@ -984,8 +972,8 @@ int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
*bad_wr = wr;
return -EOPNOTSUPP; /* what else from errno.h? */
}
- if (!qp->kernel_verbs) {
- siw_dbg_qp(qp, "no kernel post_recv for user mapped sq\n");
+ if (!rdma_is_kernel_res(&qp->base_qp.res)) {
+ siw_dbg_qp(qp, "no kernel post_recv for user mapped rq\n");
*bad_wr = wr;
return -EINVAL;
}
@@ -1127,14 +1115,13 @@ int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
cq->base_cq.cqe = size;
cq->num_cqe = size;
- if (!udata) {
- cq->kernel_verbs = 1;
- cq->queue = vzalloc(size * sizeof(struct siw_cqe) +
- sizeof(struct siw_cq_ctrl));
- } else {
+ if (udata)
cq->queue = vmalloc_user(size * sizeof(struct siw_cqe) +
sizeof(struct siw_cq_ctrl));
- }
+ else
+ cq->queue = vzalloc(size * sizeof(struct siw_cqe) +
+ sizeof(struct siw_cq_ctrl));
+
if (cq->queue == NULL) {
rv = -ENOMEM;
goto err_out;
@@ -1589,9 +1576,9 @@ int siw_create_srq(struct ib_srq *base_srq,
srq->num_rqe = roundup_pow_of_two(attrs->max_wr);
srq->limit = attrs->srq_limit;
if (srq->limit)
- srq->armed = 1;
+ srq->armed = true;
- srq->kernel_verbs = !udata;
+ srq->is_kernel_res = !udata;
if (udata)
srq->recvq =
@@ -1671,9 +1658,9 @@ int siw_modify_srq(struct ib_srq *base_srq, struct ib_srq_attr *attrs,
rv = -EINVAL;
goto out;
}
- srq->armed = 1;
+ srq->armed = true;
} else {
- srq->armed = 0;
+ srq->armed = false;
}
srq->limit = attrs->srq_limit;
}
@@ -1745,7 +1732,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
unsigned long flags;
int rv = 0;
- if (unlikely(!srq->kernel_verbs)) {
+ if (unlikely(!srq->is_kernel_res)) {
siw_dbg_pd(base_srq->pd,
"[SRQ]: no kernel post_recv for mapped srq\n");
rv = -EINVAL;
@@ -1797,7 +1784,7 @@ out:
void siw_qp_event(struct siw_qp *qp, enum ib_event_type etype)
{
struct ib_event event;
- struct ib_qp *base_qp = qp->ib_qp;
+ struct ib_qp *base_qp = &qp->base_qp;
/*
* Do not report asynchronous errors on QP which gets
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index e5f438ab716c..4a0d3a9e72e1 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1182,7 +1182,7 @@ unref:
return NETDEV_TX_OK;
}
-static void ipoib_timeout(struct net_device *dev)
+static void ipoib_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 0f74dc6d12fa..7a8f24de3631 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -527,7 +527,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
if (unlikely(err))
goto err_reg;
- desc->sig_protected = 1;
+ desc->sig_protected = true;
}
return 0;
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 1f4a37a3c2b3..127887c6c03f 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -1093,7 +1093,7 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
int ret;
if (desc && desc->sig_protected) {
- desc->sig_protected = 0;
+ desc->sig_protected = false;
ret = ib_check_mr_status(desc->rsc.sig_mr,
IB_MR_CHECK_SIG_STATUS, &mr_status);
if (ret) {
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index a1a035270cab..b273e421e910 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -2575,17 +2575,6 @@ isert_wait4logout(struct isert_conn *isert_conn)
}
}
-static void
-isert_wait4cmds(struct iscsi_conn *conn)
-{
- isert_info("iscsi_conn %p\n", conn);
-
- if (conn->sess) {
- target_sess_cmd_list_set_waiting(conn->sess->se_sess);
- target_wait_for_sess_cmds(conn->sess->se_sess);
- }
-}
-
/**
* isert_put_unsol_pending_cmds() - Drop commands waiting for
* unsolicitate dataout
@@ -2633,7 +2622,6 @@ static void isert_wait_conn(struct iscsi_conn *conn)
ib_drain_qp(isert_conn->qp);
isert_put_unsol_pending_cmds(conn);
- isert_wait4cmds(conn);
isert_wait4logout(isert_conn);
queue_work(isert_release_wq, &isert_conn->release_work);
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h
index e4c9bf2ef7e2..4480092c68e0 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h
@@ -358,7 +358,7 @@ struct opa_veswport_summary_counters {
* @rx_drop_state: received packets in non-forwarding port state
* @rx_logic: other receive errors
*
- * All the above are counters of corresponding erorr conditions.
+ * All the above are counters of corresponding error conditions.
*/
struct opa_veswport_error_counters {
__be16 vp_instance;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index b7f7a5f7bd98..cd1181c39ed2 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -2546,7 +2546,8 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
if (lrsp->opcode == SRP_LOGIN_RSP) {
ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
- ch->use_imm_data = lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP;
+ ch->use_imm_data = srp_use_imm_data &&
+ (lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP);
ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
ch->use_imm_data,
target->max_it_iu_size);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 23c782e3d49a..98552749d71c 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -2810,8 +2810,6 @@ static void srpt_queue_response(struct se_cmd *cmd)
int resp_len, ret, i;
u8 srp_tm_status;
- BUG_ON(!ch);
-
state = ioctx->state;
switch (state) {
case SRPT_STATE_NEW:
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index d7dd6fcf2db0..cb6e3a5f509c 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -224,13 +224,13 @@ static void __pass_event(struct evdev_client *client,
*/
client->tail = (client->head - 2) & (client->bufsize - 1);
- client->buffer[client->tail].input_event_sec =
- event->input_event_sec;
- client->buffer[client->tail].input_event_usec =
- event->input_event_usec;
- client->buffer[client->tail].type = EV_SYN;
- client->buffer[client->tail].code = SYN_DROPPED;
- client->buffer[client->tail].value = 0;
+ client->buffer[client->tail] = (struct input_event) {
+ .input_event_sec = event->input_event_sec,
+ .input_event_usec = event->input_event_usec,
+ .type = EV_SYN,
+ .code = SYN_DROPPED,
+ .value = 0,
+ };
client->packet_head = client->tail;
}
@@ -484,10 +484,7 @@ static int evdev_open(struct inode *inode, struct file *file)
struct evdev_client *client;
int error;
- client = kzalloc(struct_size(client, buffer, bufsize),
- GFP_KERNEL | __GFP_NOWARN);
- if (!client)
- client = vzalloc(struct_size(client, buffer, bufsize));
+ client = kvzalloc(struct_size(client, buffer, bufsize), GFP_KERNEL);
if (!client)
return -ENOMEM;
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 55086279d044..fce43e62dd45 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -878,16 +878,18 @@ static int input_default_setkeycode(struct input_dev *dev,
}
}
- __clear_bit(*old_keycode, dev->keybit);
- __set_bit(ke->keycode, dev->keybit);
-
- for (i = 0; i < dev->keycodemax; i++) {
- if (input_fetch_keycode(dev, i) == *old_keycode) {
- __set_bit(*old_keycode, dev->keybit);
- break; /* Setting the bit twice is useless, so break */
+ if (*old_keycode <= KEY_MAX) {
+ __clear_bit(*old_keycode, dev->keybit);
+ for (i = 0; i < dev->keycodemax; i++) {
+ if (input_fetch_keycode(dev, i) == *old_keycode) {
+ __set_bit(*old_keycode, dev->keybit);
+ /* Setting the bit twice is useless, so break */
+ break;
+ }
}
}
+ __set_bit(ke->keycode, dev->keybit);
return 0;
}
@@ -943,9 +945,13 @@ int input_set_keycode(struct input_dev *dev,
* Simulate keyup event if keycode is not present
* in the keymap anymore
*/
- if (test_bit(EV_KEY, dev->evbit) &&
- !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
- __test_and_clear_bit(old_keycode, dev->key)) {
+ if (old_keycode > KEY_MAX) {
+ dev_warn(dev->dev.parent ?: &dev->dev,
+ "%s: got too big old keycode %#x\n",
+ __func__, old_keycode);
+ } else if (test_bit(EV_KEY, dev->evbit) &&
+ !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
+ __test_and_clear_bit(old_keycode, dev->key)) {
struct input_value vals[] = {
{ EV_KEY, old_keycode, 0 },
input_value_sync
@@ -1210,13 +1216,12 @@ static int input_proc_devices_open(struct inode *inode, struct file *file)
return seq_open(file, &input_devices_seq_ops);
}
-static const struct file_operations input_devices_fileops = {
- .owner = THIS_MODULE,
- .open = input_proc_devices_open,
- .poll = input_proc_devices_poll,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
+static const struct proc_ops input_devices_proc_ops = {
+ .proc_open = input_proc_devices_open,
+ .proc_poll = input_proc_devices_poll,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = seq_release,
};
static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos)
@@ -1274,12 +1279,11 @@ static int input_proc_handlers_open(struct inode *inode, struct file *file)
return seq_open(file, &input_handlers_seq_ops);
}
-static const struct file_operations input_handlers_fileops = {
- .owner = THIS_MODULE,
- .open = input_proc_handlers_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
+static const struct proc_ops input_handlers_proc_ops = {
+ .proc_open = input_proc_handlers_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = seq_release,
};
static int __init input_proc_init(void)
@@ -1291,12 +1295,12 @@ static int __init input_proc_init(void)
return -ENOMEM;
entry = proc_create("devices", 0, proc_bus_input_dir,
- &input_devices_fileops);
+ &input_devices_proc_ops);
if (!entry)
goto fail1;
entry = proc_create("handlers", 0, proc_bus_input_dir,
- &input_handlers_fileops);
+ &input_handlers_proc_ops);
if (!entry)
goto fail2;
diff --git a/drivers/input/keyboard/goldfish_events.c b/drivers/input/keyboard/goldfish_events.c
index bc8c85a52a10..57d435fc5c73 100644
--- a/drivers/input/keyboard/goldfish_events.c
+++ b/drivers/input/keyboard/goldfish_events.c
@@ -30,7 +30,7 @@ struct event_dev {
struct input_dev *input;
int irq;
void __iomem *addr;
- char name[0];
+ char name[];
};
static irqreturn_t events_interrupt(int irq, void *dev_id)
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 1f56d53454b2..53c9ff338dea 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -55,7 +55,7 @@ struct gpio_keys_drvdata {
struct input_dev *input;
struct mutex disable_lock;
unsigned short *keymap;
- struct gpio_button_data data[0];
+ struct gpio_button_data data[];
};
/*
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index 6eb0a2f3f9de..c3937d2fc744 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -38,7 +38,7 @@ struct gpio_keys_polled_dev {
const struct gpio_keys_platform_data *pdata;
unsigned long rel_axis_seen[BITS_TO_LONGS(REL_CNT)];
unsigned long abs_axis_seen[BITS_TO_LONGS(ABS_CNT)];
- struct gpio_keys_button_data data[0];
+ struct gpio_keys_button_data data[];
};
static void gpio_keys_button_event(struct input_dev *input,
diff --git a/drivers/input/keyboard/imx_sc_key.c b/drivers/input/keyboard/imx_sc_key.c
index 53799527dc75..9f809aeb785c 100644
--- a/drivers/input/keyboard/imx_sc_key.c
+++ b/drivers/input/keyboard/imx_sc_key.c
@@ -78,7 +78,13 @@ static void imx_sc_check_for_events(struct work_struct *work)
return;
}
- state = (bool)msg.state;
+ /*
+ * The response data from SCU firmware is 4 bytes,
+ * but ONLY the first byte is the key state, other
+ * 3 bytes could be some dirty data, so we should
+ * ONLY take the first byte as key state.
+ */
+ state = (bool)(msg.state & 0xff);
if (state ^ priv->keystate) {
priv->keystate = state;
diff --git a/drivers/input/keyboard/pxa930_rotary.c b/drivers/input/keyboard/pxa930_rotary.c
index f7414091d94e..2fe9dcfe0a6f 100644
--- a/drivers/input/keyboard/pxa930_rotary.c
+++ b/drivers/input/keyboard/pxa930_rotary.c
@@ -107,7 +107,7 @@ static int pxa930_rotary_probe(struct platform_device *pdev)
if (!r)
return -ENOMEM;
- r->mmio_base = ioremap_nocache(res->start, resource_size(res));
+ r->mmio_base = ioremap(res->start, resource_size(res));
if (r->mmio_base == NULL) {
dev_err(&pdev->dev, "failed to remap IO memory\n");
err = -ENXIO;
diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c
index 27ad73f43451..c155adebf96e 100644
--- a/drivers/input/keyboard/sh_keysc.c
+++ b/drivers/input/keyboard/sh_keysc.c
@@ -195,7 +195,7 @@ static int sh_keysc_probe(struct platform_device *pdev)
memcpy(&priv->pdata, dev_get_platdata(&pdev->dev), sizeof(priv->pdata));
pdata = &priv->pdata;
- priv->iomem_base = ioremap_nocache(res->start, resource_size(res));
+ priv->iomem_base = ioremap(res->start, resource_size(res));
if (priv->iomem_base == NULL) {
dev_err(&pdev->dev, "failed to remap I/O memory\n");
error = -ENXIO;
diff --git a/drivers/input/keyboard/tca6416-keypad.c b/drivers/input/keyboard/tca6416-keypad.c
index 2a14769de637..21758767ccf0 100644
--- a/drivers/input/keyboard/tca6416-keypad.c
+++ b/drivers/input/keyboard/tca6416-keypad.c
@@ -33,7 +33,7 @@ MODULE_DEVICE_TABLE(i2c, tca6416_id);
struct tca6416_drv_data {
struct input_dev *input;
- struct tca6416_button data[0];
+ struct tca6416_button data[];
};
struct tca6416_keypad_chip {
@@ -48,7 +48,7 @@ struct tca6416_keypad_chip {
int irqnum;
u16 pinmask;
bool use_polling;
- struct tca6416_button buttons[0];
+ struct tca6416_button buttons[];
};
static int tca6416_write_reg(struct tca6416_keypad_chip *chip, int reg, u16 val)
diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c
index 17c1cca74498..c8f87df93a50 100644
--- a/drivers/input/misc/axp20x-pek.c
+++ b/drivers/input/misc/axp20x-pek.c
@@ -191,9 +191,10 @@ static ssize_t axp20x_store_attr_shutdown(struct device *dev,
axp20x_pek->info->shutdown_mask, buf, count);
}
-DEVICE_ATTR(startup, 0644, axp20x_show_attr_startup, axp20x_store_attr_startup);
-DEVICE_ATTR(shutdown, 0644, axp20x_show_attr_shutdown,
- axp20x_store_attr_shutdown);
+static DEVICE_ATTR(startup, 0644, axp20x_show_attr_startup,
+ axp20x_store_attr_startup);
+static DEVICE_ATTR(shutdown, 0644, axp20x_show_attr_shutdown,
+ axp20x_store_attr_shutdown);
static struct attribute *axp20x_attrs[] = {
&dev_attr_startup.attr,
@@ -279,8 +280,7 @@ static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek,
return error;
}
- if (axp20x_pek->axp20x->variant == AXP288_ID)
- enable_irq_wake(axp20x_pek->irq_dbr);
+ device_init_wakeup(&pdev->dev, true);
return 0;
}
@@ -352,6 +352,40 @@ static int axp20x_pek_probe(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused axp20x_pek_suspend(struct device *dev)
+{
+ struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev);
+
+ /*
+ * As nested threaded IRQs are not automatically disabled during
+ * suspend, we must explicitly disable non-wakeup IRQs.
+ */
+ if (device_may_wakeup(dev)) {
+ enable_irq_wake(axp20x_pek->irq_dbf);
+ enable_irq_wake(axp20x_pek->irq_dbr);
+ } else {
+ disable_irq(axp20x_pek->irq_dbf);
+ disable_irq(axp20x_pek->irq_dbr);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused axp20x_pek_resume(struct device *dev)
+{
+ struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev)) {
+ disable_irq_wake(axp20x_pek->irq_dbf);
+ disable_irq_wake(axp20x_pek->irq_dbr);
+ } else {
+ enable_irq(axp20x_pek->irq_dbf);
+ enable_irq(axp20x_pek->irq_dbr);
+ }
+
+ return 0;
+}
+
static int __maybe_unused axp20x_pek_resume_noirq(struct device *dev)
{
struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev);
@@ -371,6 +405,7 @@ static int __maybe_unused axp20x_pek_resume_noirq(struct device *dev)
}
static const struct dev_pm_ops axp20x_pek_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(axp20x_pek_suspend, axp20x_pek_resume)
#ifdef CONFIG_PM_SLEEP
.resume_noirq = axp20x_pek_resume_noirq,
#endif
diff --git a/drivers/input/misc/keyspan_remote.c b/drivers/input/misc/keyspan_remote.c
index 83368f1e7c4e..4650f4a94989 100644
--- a/drivers/input/misc/keyspan_remote.c
+++ b/drivers/input/misc/keyspan_remote.c
@@ -336,7 +336,8 @@ static int keyspan_setup(struct usb_device* dev)
int retval = 0;
retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
- 0x11, 0x40, 0x5601, 0x0, NULL, 0, 0);
+ 0x11, 0x40, 0x5601, 0x0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
if (retval) {
dev_dbg(&dev->dev, "%s - failed to set bit rate due to error: %d\n",
__func__, retval);
@@ -344,7 +345,8 @@ static int keyspan_setup(struct usb_device* dev)
}
retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
- 0x44, 0x40, 0x0, 0x0, NULL, 0, 0);
+ 0x44, 0x40, 0x0, 0x0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
if (retval) {
dev_dbg(&dev->dev, "%s - failed to set resume sensitivity due to error: %d\n",
__func__, retval);
@@ -352,7 +354,8 @@ static int keyspan_setup(struct usb_device* dev)
}
retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
- 0x22, 0x40, 0x0, 0x0, NULL, 0, 0);
+ 0x22, 0x40, 0x0, 0x0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
if (retval) {
dev_dbg(&dev->dev, "%s - failed to turn receive on due to error: %d\n",
__func__, retval);
diff --git a/drivers/input/misc/max77650-onkey.c b/drivers/input/misc/max77650-onkey.c
index 4d875f2ac13d..ee55f22dbca5 100644
--- a/drivers/input/misc/max77650-onkey.c
+++ b/drivers/input/misc/max77650-onkey.c
@@ -108,9 +108,16 @@ static int max77650_onkey_probe(struct platform_device *pdev)
return input_register_device(onkey->input);
}
+static const struct of_device_id max77650_onkey_of_match[] = {
+ { .compatible = "maxim,max77650-onkey" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max77650_onkey_of_match);
+
static struct platform_driver max77650_onkey_driver = {
.driver = {
.name = "max77650-onkey",
+ .of_match_table = max77650_onkey_of_match,
},
.probe = max77650_onkey_probe,
};
diff --git a/drivers/input/misc/pm8xxx-vibrator.c b/drivers/input/misc/pm8xxx-vibrator.c
index ecd762f93732..53ad25eaf1a2 100644
--- a/drivers/input/misc/pm8xxx-vibrator.c
+++ b/drivers/input/misc/pm8xxx-vibrator.c
@@ -90,7 +90,7 @@ static int pm8xxx_vib_set(struct pm8xxx_vib *vib, bool on)
if (regs->enable_mask)
rc = regmap_update_bits(vib->regmap, regs->enable_addr,
- on ? regs->enable_mask : 0, val);
+ regs->enable_mask, on ? ~0 : 0);
return rc;
}
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index fd253781be71..f2593133e524 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -74,12 +74,16 @@ static int uinput_dev_event(struct input_dev *dev,
struct uinput_device *udev = input_get_drvdata(dev);
struct timespec64 ts;
- udev->buff[udev->head].type = type;
- udev->buff[udev->head].code = code;
- udev->buff[udev->head].value = value;
ktime_get_ts64(&ts);
- udev->buff[udev->head].input_event_sec = ts.tv_sec;
- udev->buff[udev->head].input_event_usec = ts.tv_nsec / NSEC_PER_USEC;
+
+ udev->buff[udev->head] = (struct input_event) {
+ .input_event_sec = ts.tv_sec,
+ .input_event_usec = ts.tv_nsec / NSEC_PER_USEC,
+ .type = type,
+ .code = code,
+ .value = value,
+ };
+
udev->head = (udev->head + 1) % UINPUT_BUFFER_SIZE;
wake_up_interruptible(&udev->waitq);
@@ -689,13 +693,14 @@ static ssize_t uinput_read(struct file *file, char __user *buffer,
static __poll_t uinput_poll(struct file *file, poll_table *wait)
{
struct uinput_device *udev = file->private_data;
+ __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* uinput is always writable */
poll_wait(file, &udev->waitq, wait);
if (udev->head != udev->tail)
- return EPOLLIN | EPOLLRDNORM;
+ mask |= EPOLLIN | EPOLLRDNORM;
- return EPOLLOUT | EPOLLWRNORM;
+ return mask;
}
static int uinput_release(struct inode *inode, struct file *file)
diff --git a/drivers/input/mouse/cyapa_gen5.c b/drivers/input/mouse/cyapa_gen5.c
index 14239fbd72cf..7f012bfa2658 100644
--- a/drivers/input/mouse/cyapa_gen5.c
+++ b/drivers/input/mouse/cyapa_gen5.c
@@ -250,7 +250,7 @@ struct cyapa_tsg_bin_image_data_record {
struct cyapa_tsg_bin_image {
struct cyapa_tsg_bin_image_head image_head;
- struct cyapa_tsg_bin_image_data_record records[0];
+ struct cyapa_tsg_bin_image_data_record records[];
} __packed;
struct pip_bl_packet_start {
@@ -271,7 +271,7 @@ struct pip_bl_cmd_head {
u8 report_id; /* Bootloader output report id, must be 40h */
u8 rsvd; /* Reserved, must be 0 */
struct pip_bl_packet_start packet_start;
- u8 data[0]; /* Command data variable based on commands */
+ u8 data[]; /* Command data variable based on commands */
} __packed;
/* Initiate bootload command data structure. */
@@ -300,7 +300,7 @@ struct tsg_bl_metadata_row_params {
struct tsg_bl_flash_row_head {
u8 flash_array_id;
__le16 flash_row_id;
- u8 flash_data[0];
+ u8 flash_data[];
} __packed;
struct pip_app_cmd_head {
@@ -314,7 +314,7 @@ struct pip_app_cmd_head {
* Bit 6-0: command code.
*/
u8 cmd_code;
- u8 parameter_data[0]; /* Parameter data variable based on cmd_code */
+ u8 parameter_data[]; /* Parameter data variable based on cmd_code */
} __packed;
/* Application get/set parameter command data structure */
diff --git a/drivers/input/mouse/psmouse-smbus.c b/drivers/input/mouse/psmouse-smbus.c
index 027efdd2b2ad..a472489ccbad 100644
--- a/drivers/input/mouse/psmouse-smbus.c
+++ b/drivers/input/mouse/psmouse-smbus.c
@@ -190,6 +190,7 @@ static int psmouse_smbus_create_companion(struct device *dev, void *data)
struct psmouse_smbus_dev *smbdev = data;
unsigned short addr_list[] = { smbdev->board.addr, I2C_CLIENT_END };
struct i2c_adapter *adapter;
+ struct i2c_client *client;
adapter = i2c_verify_adapter(dev);
if (!adapter)
@@ -198,12 +199,13 @@ static int psmouse_smbus_create_companion(struct device *dev, void *data)
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_HOST_NOTIFY))
return 0;
- smbdev->client = i2c_new_probed_device(adapter, &smbdev->board,
- addr_list, NULL);
- if (!smbdev->client)
+ client = i2c_new_scanned_device(adapter, &smbdev->board,
+ addr_list, NULL);
+ if (IS_ERR(client))
return 0;
/* We have our(?) device, stop iterating i2c bus. */
+ smbdev->client = client;
return 1;
}
diff --git a/drivers/input/mouse/pxa930_trkball.c b/drivers/input/mouse/pxa930_trkball.c
index 41acde60b60f..3332b77eef2a 100644
--- a/drivers/input/mouse/pxa930_trkball.c
+++ b/drivers/input/mouse/pxa930_trkball.c
@@ -167,7 +167,7 @@ static int pxa930_trkball_probe(struct platform_device *pdev)
goto failed;
}
- trkball->mmio_base = ioremap_nocache(res->start, resource_size(res));
+ trkball->mmio_base = ioremap(res->start, resource_size(res));
if (!trkball->mmio_base) {
dev_err(&pdev->dev, "failed to ioremap registers\n");
error = -ENXIO;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 1ae6f8bba9ae..2c666fb34625 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -146,7 +146,6 @@ static const char * const topbuttonpad_pnp_ids[] = {
"LEN0042", /* Yoga */
"LEN0045",
"LEN0047",
- "LEN0049",
"LEN2000", /* S540 */
"LEN2001", /* Edge E431 */
"LEN2002", /* Edge E531 */
@@ -166,9 +165,11 @@ static const char * const smbus_pnp_ids[] = {
/* all of the topbuttonpad_pnp_ids are valid, we just add some extras */
"LEN0048", /* X1 Carbon 3 */
"LEN0046", /* X250 */
+ "LEN0049", /* Yoga 11e */
"LEN004a", /* W541 */
"LEN005b", /* P50 */
"LEN005e", /* T560 */
+ "LEN006c", /* T470s */
"LEN0071", /* T480 */
"LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
"LEN0073", /* X1 Carbon G5 (Elantech) */
@@ -179,6 +180,7 @@ static const char * const smbus_pnp_ids[] = {
"LEN0097", /* X280 -> ALPS trackpoint */
"LEN009b", /* T580 */
"LEN200f", /* T450s */
+ "LEN2044", /* L470 */
"LEN2054", /* E480 */
"LEN2055", /* E580 */
"SYN3052", /* HP EliteBook 840 G4 */
diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c
index bbf9ae9f3f0c..6adea8a3e8fb 100644
--- a/drivers/input/rmi4/rmi_f11.c
+++ b/drivers/input/rmi4/rmi_f11.c
@@ -412,6 +412,10 @@ struct f11_2d_sensor_queries {
/* Defs for Ctrl0. */
#define RMI_F11_REPORT_MODE_MASK 0x07
+#define RMI_F11_REPORT_MODE_CONTINUOUS (0 << 0)
+#define RMI_F11_REPORT_MODE_REDUCED (1 << 0)
+#define RMI_F11_REPORT_MODE_FS_CHANGE (2 << 0)
+#define RMI_F11_REPORT_MODE_FP_CHANGE (3 << 0)
#define RMI_F11_ABS_POS_FILT (1 << 3)
#define RMI_F11_REL_POS_FILT (1 << 4)
#define RMI_F11_REL_BALLISTICS (1 << 5)
@@ -1195,6 +1199,16 @@ static int rmi_f11_initialize(struct rmi_function *fn)
ctrl->ctrl0_11[RMI_F11_DELTA_Y_THRESHOLD] =
sensor->axis_align.delta_y_threshold;
+ /*
+ * If distance threshold values are set, switch to reduced reporting
+ * mode so they actually get used by the controller.
+ */
+ if (ctrl->ctrl0_11[RMI_F11_DELTA_X_THRESHOLD] ||
+ ctrl->ctrl0_11[RMI_F11_DELTA_Y_THRESHOLD]) {
+ ctrl->ctrl0_11[0] &= ~RMI_F11_REPORT_MODE_MASK;
+ ctrl->ctrl0_11[0] |= RMI_F11_REPORT_MODE_REDUCED;
+ }
+
if (f11->sens_query.has_dribble) {
switch (sensor->dribble) {
case RMI_REG_STATE_OFF:
diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c
index 0bc01cfc2b51..6b23e679606e 100644
--- a/drivers/input/rmi4/rmi_f54.c
+++ b/drivers/input/rmi4/rmi_f54.c
@@ -24,6 +24,12 @@
#define F54_NUM_TX_OFFSET 1
#define F54_NUM_RX_OFFSET 0
+/*
+ * The smbus protocol can read only 32 bytes max at a time.
+ * But this should be fine for i2c/spi as well.
+ */
+#define F54_REPORT_DATA_SIZE 32
+
/* F54 commands */
#define F54_GET_REPORT 1
#define F54_FORCE_CAL 2
@@ -526,6 +532,7 @@ static void rmi_f54_work(struct work_struct *work)
int report_size;
u8 command;
int error;
+ int i;
report_size = rmi_f54_get_report_size(f54);
if (report_size == 0) {
@@ -558,23 +565,27 @@ static void rmi_f54_work(struct work_struct *work)
rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Get report command completed, reading data\n");
- fifo[0] = 0;
- fifo[1] = 0;
- error = rmi_write_block(fn->rmi_dev,
- fn->fd.data_base_addr + F54_FIFO_OFFSET,
- fifo, sizeof(fifo));
- if (error) {
- dev_err(&fn->dev, "Failed to set fifo start offset\n");
- goto abort;
- }
+ for (i = 0; i < report_size; i += F54_REPORT_DATA_SIZE) {
+ int size = min(F54_REPORT_DATA_SIZE, report_size - i);
+
+ fifo[0] = i & 0xff;
+ fifo[1] = i >> 8;
+ error = rmi_write_block(fn->rmi_dev,
+ fn->fd.data_base_addr + F54_FIFO_OFFSET,
+ fifo, sizeof(fifo));
+ if (error) {
+ dev_err(&fn->dev, "Failed to set fifo start offset\n");
+ goto abort;
+ }
- error = rmi_read_block(fn->rmi_dev, fn->fd.data_base_addr +
- F54_REPORT_DATA_OFFSET, f54->report_data,
- report_size);
- if (error) {
- dev_err(&fn->dev, "%s: read [%d bytes] returned %d\n",
- __func__, report_size, error);
- goto abort;
+ error = rmi_read_block(fn->rmi_dev, fn->fd.data_base_addr +
+ F54_REPORT_DATA_OFFSET,
+ f54->report_data + i, size);
+ if (error) {
+ dev_err(&fn->dev, "%s: read [%d bytes] returned %d\n",
+ __func__, size, error);
+ goto abort;
+ }
}
abort:
diff --git a/drivers/input/rmi4/rmi_smbus.c b/drivers/input/rmi4/rmi_smbus.c
index b313c579914f..2407ea43de59 100644
--- a/drivers/input/rmi4/rmi_smbus.c
+++ b/drivers/input/rmi4/rmi_smbus.c
@@ -163,6 +163,7 @@ static int rmi_smb_write_block(struct rmi_transport_dev *xport, u16 rmiaddr,
/* prepare to write next block of bytes */
cur_len -= SMB_MAX_COUNT;
databuff += SMB_MAX_COUNT;
+ rmiaddr += SMB_MAX_COUNT;
}
exit:
mutex_unlock(&rmi_smb->page_mutex);
@@ -214,6 +215,7 @@ static int rmi_smb_read_block(struct rmi_transport_dev *xport, u16 rmiaddr,
/* prepare to read next block of bytes */
cur_len -= SMB_MAX_COUNT;
databuff += SMB_MAX_COUNT;
+ rmiaddr += SMB_MAX_COUNT;
}
retval = 0;
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index f3e18f8ef9ca..373a1646019e 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -165,6 +165,16 @@ config SERIO_MACEPS2
To compile this driver as a module, choose M here: the
module will be called maceps2.
+config SERIO_SGI_IOC3
+ tristate "SGI IOC3 PS/2 controller"
+ depends on SGI_MFD_IOC3
+ help
+ Say Y here if you have an SGI Onyx2, SGI Octane or IOC3 PCI card
+ and you want to attach and use a keyboard, mouse, or both.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ioc3kbd.
+
config SERIO_LIBPS2
tristate "PS/2 driver library"
depends on SERIO_I8042 || SERIO_I8042=n
diff --git a/drivers/input/serio/Makefile b/drivers/input/serio/Makefile
index 67950a5ccb3f..6d97bad7b844 100644
--- a/drivers/input/serio/Makefile
+++ b/drivers/input/serio/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_HIL_MLC) += hp_sdc_mlc.o hil_mlc.o
obj-$(CONFIG_SERIO_PCIPS2) += pcips2.o
obj-$(CONFIG_SERIO_PS2MULT) += ps2mult.o
obj-$(CONFIG_SERIO_MACEPS2) += maceps2.o
+obj-$(CONFIG_SERIO_SGI_IOC3) += ioc3kbd.o
obj-$(CONFIG_SERIO_LIBPS2) += libps2.o
obj-$(CONFIG_SERIO_RAW) += serio_raw.o
obj-$(CONFIG_SERIO_AMS_DELTA) += ams_delta_serio.o
diff --git a/drivers/input/serio/apbps2.c b/drivers/input/serio/apbps2.c
index f290d5d146c3..594ac4e6f8ea 100644
--- a/drivers/input/serio/apbps2.c
+++ b/drivers/input/serio/apbps2.c
@@ -51,7 +51,7 @@ struct apbps2_regs {
struct apbps2_priv {
struct serio *io;
- struct apbps2_regs *regs;
+ struct apbps2_regs __iomem *regs;
};
static int apbps2_idx;
diff --git a/drivers/input/serio/gscps2.c b/drivers/input/serio/gscps2.c
index 96f9b5397367..2f9775de3c5b 100644
--- a/drivers/input/serio/gscps2.c
+++ b/drivers/input/serio/gscps2.c
@@ -349,7 +349,7 @@ static int __init gscps2_probe(struct parisc_device *dev)
ps2port->port = serio;
ps2port->padev = dev;
- ps2port->addr = ioremap_nocache(hpa, GSC_STATUS + 4);
+ ps2port->addr = ioremap(hpa, GSC_STATUS + 4);
spin_lock_init(&ps2port->lock);
gscps2_reset(ps2port);
diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c
index e486a8a74c40..df4e9f6f4529 100644
--- a/drivers/input/serio/hyperv-keyboard.c
+++ b/drivers/input/serio/hyperv-keyboard.c
@@ -259,6 +259,8 @@ static int hv_kbd_connect_to_vsp(struct hv_device *hv_dev)
u32 proto_status;
int error;
+ reinit_completion(&kbd_dev->wait_event);
+
request = &kbd_dev->protocol_req;
memset(request, 0, sizeof(struct synth_kbd_protocol_request));
request->header.type = __cpu_to_le32(SYNTH_KBD_PROTOCOL_REQUEST);
@@ -380,6 +382,29 @@ static int hv_kbd_remove(struct hv_device *hv_dev)
return 0;
}
+static int hv_kbd_suspend(struct hv_device *hv_dev)
+{
+ vmbus_close(hv_dev->channel);
+
+ return 0;
+}
+
+static int hv_kbd_resume(struct hv_device *hv_dev)
+{
+ int ret;
+
+ ret = vmbus_open(hv_dev->channel,
+ KBD_VSC_SEND_RING_BUFFER_SIZE,
+ KBD_VSC_RECV_RING_BUFFER_SIZE,
+ NULL, 0,
+ hv_kbd_on_channel_callback,
+ hv_dev);
+ if (ret == 0)
+ ret = hv_kbd_connect_to_vsp(hv_dev);
+
+ return ret;
+}
+
static const struct hv_vmbus_device_id id_table[] = {
/* Keyboard guid */
{ HV_KBD_GUID, },
@@ -393,6 +418,8 @@ static struct hv_driver hv_kbd_drv = {
.id_table = id_table,
.probe = hv_kbd_probe,
.remove = hv_kbd_remove,
+ .suspend = hv_kbd_suspend,
+ .resume = hv_kbd_resume,
.driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
diff --git a/drivers/input/serio/ioc3kbd.c b/drivers/input/serio/ioc3kbd.c
new file mode 100644
index 000000000000..d51bfe912db5
--- /dev/null
+++ b/drivers/input/serio/ioc3kbd.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SGI IOC3 PS/2 controller driver for linux
+ *
+ * Copyright (C) 2019 Thomas Bogendoerfer <tbogendoerfer@suse.de>
+ *
+ * Based on code Copyright (C) 2005 Stanislaw Skowronek <skylark@unaligned.org>
+ * Copyright (C) 2009 Johannes Dickgreber <tanzy@gmx.de>
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/serio.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/sn/ioc3.h>
+
+struct ioc3kbd_data {
+ struct ioc3_serioregs __iomem *regs;
+ struct serio *kbd, *aux;
+ bool kbd_exists, aux_exists;
+ int irq;
+};
+
+static int ioc3kbd_wait(struct ioc3_serioregs __iomem *regs, u32 mask)
+{
+ unsigned long timeout = 0;
+
+ while ((readl(&regs->km_csr) & mask) && (timeout < 250)) {
+ udelay(50);
+ timeout++;
+ }
+ return (timeout >= 250) ? -ETIMEDOUT : 0;
+}
+
+static int ioc3kbd_write(struct serio *dev, u8 val)
+{
+ struct ioc3kbd_data *d = dev->port_data;
+ int ret;
+
+ ret = ioc3kbd_wait(d->regs, KM_CSR_K_WRT_PEND);
+ if (ret)
+ return ret;
+
+ writel(val, &d->regs->k_wd);
+
+ return 0;
+}
+
+static int ioc3kbd_start(struct serio *dev)
+{
+ struct ioc3kbd_data *d = dev->port_data;
+
+ d->kbd_exists = true;
+ return 0;
+}
+
+static void ioc3kbd_stop(struct serio *dev)
+{
+ struct ioc3kbd_data *d = dev->port_data;
+
+ d->kbd_exists = false;
+}
+
+static int ioc3aux_write(struct serio *dev, u8 val)
+{
+ struct ioc3kbd_data *d = dev->port_data;
+ int ret;
+
+ ret = ioc3kbd_wait(d->regs, KM_CSR_M_WRT_PEND);
+ if (ret)
+ return ret;
+
+ writel(val, &d->regs->m_wd);
+
+ return 0;
+}
+
+static int ioc3aux_start(struct serio *dev)
+{
+ struct ioc3kbd_data *d = dev->port_data;
+
+ d->aux_exists = true;
+ return 0;
+}
+
+static void ioc3aux_stop(struct serio *dev)
+{
+ struct ioc3kbd_data *d = dev->port_data;
+
+ d->aux_exists = false;
+}
+
+static void ioc3kbd_process_data(struct serio *dev, u32 data)
+{
+ if (data & KM_RD_VALID_0)
+ serio_interrupt(dev, (data >> KM_RD_DATA_0_SHIFT) & 0xff, 0);
+ if (data & KM_RD_VALID_1)
+ serio_interrupt(dev, (data >> KM_RD_DATA_1_SHIFT) & 0xff, 0);
+ if (data & KM_RD_VALID_2)
+ serio_interrupt(dev, (data >> KM_RD_DATA_2_SHIFT) & 0xff, 0);
+}
+
+static irqreturn_t ioc3kbd_intr(int itq, void *dev_id)
+{
+ struct ioc3kbd_data *d = dev_id;
+ u32 data_k, data_m;
+
+ data_k = readl(&d->regs->k_rd);
+ if (d->kbd_exists)
+ ioc3kbd_process_data(d->kbd, data_k);
+
+ data_m = readl(&d->regs->m_rd);
+ if (d->aux_exists)
+ ioc3kbd_process_data(d->aux, data_m);
+
+ return IRQ_HANDLED;
+}
+
+static int ioc3kbd_probe(struct platform_device *pdev)
+{
+ struct ioc3_serioregs __iomem *regs;
+ struct device *dev = &pdev->dev;
+ struct ioc3kbd_data *d;
+ struct serio *sk, *sa;
+ int irq, ret;
+
+ regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -ENXIO;
+
+ d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+
+ sk = kzalloc(sizeof(*sk), GFP_KERNEL);
+ if (!sk)
+ return -ENOMEM;
+
+ sa = kzalloc(sizeof(*sa), GFP_KERNEL);
+ if (!sa) {
+ kfree(sk);
+ return -ENOMEM;
+ }
+
+ sk->id.type = SERIO_8042;
+ sk->write = ioc3kbd_write;
+ sk->start = ioc3kbd_start;
+ sk->stop = ioc3kbd_stop;
+ snprintf(sk->name, sizeof(sk->name), "IOC3 keyboard %d", pdev->id);
+ snprintf(sk->phys, sizeof(sk->phys), "ioc3/serio%dkbd", pdev->id);
+ sk->port_data = d;
+ sk->dev.parent = dev;
+
+ sa->id.type = SERIO_8042;
+ sa->write = ioc3aux_write;
+ sa->start = ioc3aux_start;
+ sa->stop = ioc3aux_stop;
+ snprintf(sa->name, sizeof(sa->name), "IOC3 auxiliary %d", pdev->id);
+ snprintf(sa->phys, sizeof(sa->phys), "ioc3/serio%daux", pdev->id);
+ sa->port_data = d;
+ sa->dev.parent = dev;
+
+ d->regs = regs;
+ d->kbd = sk;
+ d->aux = sa;
+ d->irq = irq;
+
+ platform_set_drvdata(pdev, d);
+ serio_register_port(d->kbd);
+ serio_register_port(d->aux);
+
+ ret = request_irq(irq, ioc3kbd_intr, IRQF_SHARED, "ioc3-kbd", d);
+ if (ret) {
+ dev_err(dev, "could not request IRQ %d\n", irq);
+ serio_unregister_port(d->kbd);
+ serio_unregister_port(d->aux);
+ return ret;
+ }
+
+ /* enable ports */
+ writel(KM_CSR_K_CLAMP_3 | KM_CSR_M_CLAMP_3, &regs->km_csr);
+
+ return 0;
+}
+
+static int ioc3kbd_remove(struct platform_device *pdev)
+{
+ struct ioc3kbd_data *d = platform_get_drvdata(pdev);
+
+ free_irq(d->irq, d);
+
+ serio_unregister_port(d->kbd);
+ serio_unregister_port(d->aux);
+
+ return 0;
+}
+
+static struct platform_driver ioc3kbd_driver = {
+ .probe = ioc3kbd_probe,
+ .remove = ioc3kbd_remove,
+ .driver = {
+ .name = "ioc3-kbd",
+ },
+};
+module_platform_driver(ioc3kbd_driver);
+
+MODULE_AUTHOR("Thomas Bogendoerfer <tbogendoerfer@suse.de>");
+MODULE_DESCRIPTION("SGI IOC3 serio driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index 2ca586fb914f..e08b0ef078e8 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -1713,7 +1713,7 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
aiptek->inputdev = inputdev;
aiptek->intf = intf;
- aiptek->ifnum = intf->altsetting[0].desc.bInterfaceNumber;
+ aiptek->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
aiptek->inDelay = 0;
aiptek->endDelay = 0;
aiptek->previousJitterable = 0;
@@ -1802,14 +1802,14 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0);
/* Verify that a device really has an endpoint */
- if (intf->altsetting[0].desc.bNumEndpoints < 1) {
+ if (intf->cur_altsetting->desc.bNumEndpoints < 1) {
dev_err(&intf->dev,
"interface has %d endpoints, but must have minimum 1\n",
- intf->altsetting[0].desc.bNumEndpoints);
+ intf->cur_altsetting->desc.bNumEndpoints);
err = -EINVAL;
goto fail3;
}
- endpoint = &intf->altsetting[0].endpoint[0].desc;
+ endpoint = &intf->cur_altsetting->endpoint[0].desc;
/* Go set up our URB, which is called when the tablet receives
* input.
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index 35031228a6d0..96d65575f75a 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -875,18 +875,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
}
/* Sanity check that a device has an endpoint */
- if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
+ if (usbinterface->cur_altsetting->desc.bNumEndpoints < 1) {
dev_err(&usbinterface->dev,
"Invalid number of endpoints\n");
error = -EINVAL;
goto err_free_urb;
}
- /*
- * The endpoint is always altsetting 0, we know this since we know
- * this device only has one interrupt endpoint
- */
- endpoint = &usbinterface->altsetting[0].endpoint[0].desc;
+ endpoint = &usbinterface->cur_altsetting->endpoint[0].desc;
/* Some debug */
dev_dbg(&usbinterface->dev, "gtco # interfaces: %d\n", usbinterface->num_altsetting);
@@ -896,7 +892,8 @@ static int gtco_probe(struct usb_interface *usbinterface,
if (usb_endpoint_xfer_int(endpoint))
dev_dbg(&usbinterface->dev, "endpoint: we have interrupt endpoint\n");
- dev_dbg(&usbinterface->dev, "endpoint extra len:%d\n", usbinterface->altsetting[0].extralen);
+ dev_dbg(&usbinterface->dev, "interface extra len:%d\n",
+ usbinterface->cur_altsetting->extralen);
/*
* Find the HID descriptor so we can find out the size of the
@@ -973,8 +970,6 @@ static int gtco_probe(struct usb_interface *usbinterface,
input_dev->dev.parent = &usbinterface->dev;
/* Setup the URB, it will be posted later on open of input device */
- endpoint = &usbinterface->altsetting[0].endpoint[0].desc;
-
usb_fill_int_urb(gtco->urbinfo,
udev,
usb_rcvintpipe(udev,
diff --git a/drivers/input/tablet/pegasus_notetaker.c b/drivers/input/tablet/pegasus_notetaker.c
index a1f3a0cb197e..38f087404f7a 100644
--- a/drivers/input/tablet/pegasus_notetaker.c
+++ b/drivers/input/tablet/pegasus_notetaker.c
@@ -275,7 +275,7 @@ static int pegasus_probe(struct usb_interface *intf,
return -ENODEV;
/* Sanity check that the device has an endpoint */
- if (intf->altsetting[0].desc.bNumEndpoints < 1) {
+ if (intf->cur_altsetting->desc.bNumEndpoints < 1) {
dev_err(&intf->dev, "Invalid number of endpoints\n");
return -EINVAL;
}
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 51ddb204ca1b..8fd7fc39c4fd 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -333,7 +333,8 @@ static int ads7846_read12_ser(struct device *dev, unsigned command)
req->xfer[1].len = 2;
/* for 1uF, settle for 800 usec; no cap, 100 usec. */
- req->xfer[1].delay_usecs = ts->vref_delay_usecs;
+ req->xfer[1].delay.value = ts->vref_delay_usecs;
+ req->xfer[1].delay.unit = SPI_DELAY_UNIT_USECS;
spi_message_add_tail(&req->xfer[1], &req->msg);
/* Enable reference voltage */
@@ -1018,7 +1019,8 @@ static void ads7846_setup_spi_msg(struct ads7846 *ts,
* have had enough time to stabilize.
*/
if (pdata->settle_delay_usecs) {
- x->delay_usecs = pdata->settle_delay_usecs;
+ x->delay.value = pdata->settle_delay_usecs;
+ x->delay.unit = SPI_DELAY_UNIT_USECS;
x++;
x->tx_buf = &packet->read_y;
@@ -1061,7 +1063,8 @@ static void ads7846_setup_spi_msg(struct ads7846 *ts,
/* ... maybe discard first sample ... */
if (pdata->settle_delay_usecs) {
- x->delay_usecs = pdata->settle_delay_usecs;
+ x->delay.value = pdata->settle_delay_usecs;
+ x->delay.unit = SPI_DELAY_UNIT_USECS;
x++;
x->tx_buf = &packet->read_x;
@@ -1094,7 +1097,8 @@ static void ads7846_setup_spi_msg(struct ads7846 *ts,
/* ... maybe discard first sample ... */
if (pdata->settle_delay_usecs) {
- x->delay_usecs = pdata->settle_delay_usecs;
+ x->delay.value = pdata->settle_delay_usecs;
+ x->delay.unit = SPI_DELAY_UNIT_USECS;
x++;
x->tx_buf = &packet->read_z1;
@@ -1125,7 +1129,8 @@ static void ads7846_setup_spi_msg(struct ads7846 *ts,
/* ... maybe discard first sample ... */
if (pdata->settle_delay_usecs) {
- x->delay_usecs = pdata->settle_delay_usecs;
+ x->delay.value = pdata->settle_delay_usecs;
+ x->delay.unit = SPI_DELAY_UNIT_USECS;
x++;
x->tx_buf = &packet->read_z2;
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index d61731c0037d..d2587724c52a 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -13,22 +13,23 @@
* http://www.glyn.com/Products/Displays
*/
-#include <linux/module.h>
-#include <linux/ratelimit.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/input.h>
-#include <linux/i2c.h>
-#include <linux/kernel.h>
-#include <linux/uaccess.h>
-#include <linux/delay.h>
#include <linux/debugfs.h>
-#include <linux/slab.h>
+#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
#include <linux/input/mt.h>
#include <linux/input/touchscreen.h>
-#include <asm/unaligned.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ratelimit.h>
#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <asm/unaligned.h>
#define WORK_REGISTER_THRESHOLD 0x00
#define WORK_REGISTER_REPORT_RATE 0x08
@@ -1050,6 +1051,7 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
{
const struct edt_i2c_chip_data *chip_data;
struct edt_ft5x06_ts_data *tsdata;
+ u8 buf[2] = { 0xfc, 0x00 };
struct input_dev *input;
unsigned long irq_flags;
int error;
@@ -1140,6 +1142,12 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
return error;
}
+ /*
+ * Dummy read access. EP0700MLP1 returns bogus data on the first
+ * register read access and ignores writes.
+ */
+ edt_ft5x06_ts_readwrite(tsdata->client, 2, buf, 2, buf);
+
edt_ft5x06_ts_set_regs(tsdata);
edt_ft5x06_ts_get_defaults(&client->dev, tsdata);
edt_ft5x06_ts_get_parameters(tsdata);
@@ -1200,7 +1208,6 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
return error;
edt_ft5x06_ts_prepare_debugfs(tsdata, dev_driver_string(&client->dev));
- device_init_wakeup(&client->dev, 1);
dev_dbg(&client->dev,
"EDT FT5x06 initialized: IRQ %d, WAKE pin %d, Reset pin %d.\n",
@@ -1220,29 +1227,6 @@ static int edt_ft5x06_ts_remove(struct i2c_client *client)
return 0;
}
-static int __maybe_unused edt_ft5x06_ts_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
-
- if (device_may_wakeup(dev))
- enable_irq_wake(client->irq);
-
- return 0;
-}
-
-static int __maybe_unused edt_ft5x06_ts_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
-
- if (device_may_wakeup(dev))
- disable_irq_wake(client->irq);
-
- return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(edt_ft5x06_ts_pm_ops,
- edt_ft5x06_ts_suspend, edt_ft5x06_ts_resume);
-
static const struct edt_i2c_chip_data edt_ft5x06_data = {
.max_support_points = 5,
};
@@ -1281,7 +1265,6 @@ static struct i2c_driver edt_ft5x06_ts_driver = {
.driver = {
.name = "edt_ft5x06",
.of_match_table = edt_ft5x06_of_match,
- .pm = &edt_ft5x06_ts_pm_ops,
},
.id_table = edt_ft5x06_ts_id,
.probe = edt_ft5x06_ts_probe,
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index d4ad24ea54c8..491179967b29 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -59,8 +59,10 @@
#define CMD_HEADER_WRITE 0x54
#define CMD_HEADER_READ 0x53
#define CMD_HEADER_6B_READ 0x5B
+#define CMD_HEADER_ROM_READ 0x96
#define CMD_HEADER_RESP 0x52
#define CMD_HEADER_6B_RESP 0x9B
+#define CMD_HEADER_ROM_RESP 0x95
#define CMD_HEADER_HELLO 0x55
#define CMD_HEADER_REK 0x66
@@ -200,6 +202,10 @@ static int elants_i2c_execute_command(struct i2c_client *client,
expected_response = CMD_HEADER_6B_RESP;
break;
+ case CMD_HEADER_ROM_READ:
+ expected_response = CMD_HEADER_ROM_RESP;
+ break;
+
default:
dev_err(&client->dev, "%s: invalid command %*ph\n",
__func__, (int)cmd_size, cmd);
@@ -556,6 +562,8 @@ static int elants_i2c_initialize(struct elants_data *ts)
/* hw version is available even if device in recovery state */
error2 = elants_i2c_query_hw_version(ts);
+ if (!error2)
+ error2 = elants_i2c_query_bc_version(ts);
if (!error)
error = error2;
@@ -564,8 +572,6 @@ static int elants_i2c_initialize(struct elants_data *ts)
if (!error)
error = elants_i2c_query_test_version(ts);
if (!error)
- error = elants_i2c_query_bc_version(ts);
- if (!error)
error = elants_i2c_query_ts_info(ts);
if (error)
@@ -613,39 +619,94 @@ static int elants_i2c_fw_write_page(struct i2c_client *client,
return error;
}
+static int elants_i2c_validate_remark_id(struct elants_data *ts,
+ const struct firmware *fw)
+{
+ struct i2c_client *client = ts->client;
+ int error;
+ const u8 cmd[] = { CMD_HEADER_ROM_READ, 0x80, 0x1F, 0x00, 0x00, 0x21 };
+ u8 resp[6] = { 0 };
+ u16 ts_remark_id = 0;
+ u16 fw_remark_id = 0;
+
+ /* Compare TS Remark ID and FW Remark ID */
+ error = elants_i2c_execute_command(client, cmd, sizeof(cmd),
+ resp, sizeof(resp));
+ if (error) {
+ dev_err(&client->dev, "failed to query Remark ID: %d\n", error);
+ return error;
+ }
+
+ ts_remark_id = get_unaligned_be16(&resp[3]);
+
+ fw_remark_id = get_unaligned_le16(&fw->data[fw->size - 4]);
+
+ if (fw_remark_id != ts_remark_id) {
+ dev_err(&client->dev,
+ "Remark ID Mismatched: ts_remark_id=0x%04x, fw_remark_id=0x%04x.\n",
+ ts_remark_id, fw_remark_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int elants_i2c_do_update_firmware(struct i2c_client *client,
const struct firmware *fw,
bool force)
{
+ struct elants_data *ts = i2c_get_clientdata(client);
const u8 enter_iap[] = { 0x45, 0x49, 0x41, 0x50 };
const u8 enter_iap2[] = { 0x54, 0x00, 0x12, 0x34 };
const u8 iap_ack[] = { 0x55, 0xaa, 0x33, 0xcc };
- const u8 close_idle[] = {0x54, 0x2c, 0x01, 0x01};
+ const u8 close_idle[] = { 0x54, 0x2c, 0x01, 0x01 };
u8 buf[HEADER_SIZE];
u16 send_id;
int page, n_fw_pages;
int error;
+ bool check_remark_id = ts->iap_version >= 0x60;
/* Recovery mode detection! */
if (force) {
dev_dbg(&client->dev, "Recovery mode procedure\n");
+
+ if (check_remark_id) {
+ error = elants_i2c_validate_remark_id(ts, fw);
+ if (error)
+ return error;
+ }
+
error = elants_i2c_send(client, enter_iap2, sizeof(enter_iap2));
+ if (error) {
+ dev_err(&client->dev, "failed to enter IAP mode: %d\n",
+ error);
+ return error;
+ }
} else {
/* Start IAP Procedure */
dev_dbg(&client->dev, "Normal IAP procedure\n");
+
/* Close idle mode */
error = elants_i2c_send(client, close_idle, sizeof(close_idle));
if (error)
dev_err(&client->dev, "Failed close idle: %d\n", error);
msleep(60);
+
elants_i2c_sw_reset(client);
msleep(20);
- error = elants_i2c_send(client, enter_iap, sizeof(enter_iap));
- }
- if (error) {
- dev_err(&client->dev, "failed to enter IAP mode: %d\n", error);
- return error;
+ if (check_remark_id) {
+ error = elants_i2c_validate_remark_id(ts, fw);
+ if (error)
+ return error;
+ }
+
+ error = elants_i2c_send(client, enter_iap, sizeof(enter_iap));
+ if (error) {
+ dev_err(&client->dev, "failed to enter IAP mode: %d\n",
+ error);
+ return error;
+ }
}
msleep(20);
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
index 4a17096e83e1..199cf3daec10 100644
--- a/drivers/input/touchscreen/ili210x.c
+++ b/drivers/input/touchscreen/ili210x.c
@@ -167,6 +167,36 @@ static const struct ili2xxx_chip ili211x_chip = {
.resolution = 2048,
};
+static bool ili212x_touchdata_to_coords(const u8 *touchdata,
+ unsigned int finger,
+ unsigned int *x, unsigned int *y)
+{
+ u16 val;
+
+ val = get_unaligned_be16(touchdata + 3 + (finger * 5) + 0);
+ if (!(val & BIT(15))) /* Touch indication */
+ return false;
+
+ *x = val & 0x3fff;
+ *y = get_unaligned_be16(touchdata + 3 + (finger * 5) + 2);
+
+ return true;
+}
+
+static bool ili212x_check_continue_polling(const u8 *data, bool touch)
+{
+ return touch;
+}
+
+static const struct ili2xxx_chip ili212x_chip = {
+ .read_reg = ili210x_read_reg,
+ .get_touch_data = ili210x_read_touch_data,
+ .parse_touch_data = ili212x_touchdata_to_coords,
+ .continue_polling = ili212x_check_continue_polling,
+ .max_touches = 10,
+ .has_calibrate_reg = true,
+};
+
static int ili251x_read_reg(struct i2c_client *client,
u8 reg, void *buf, size_t len)
{
@@ -321,7 +351,7 @@ static umode_t ili210x_calibrate_visible(struct kobject *kobj,
struct i2c_client *client = to_i2c_client(dev);
struct ili210x *priv = i2c_get_clientdata(client);
- return priv->chip->has_calibrate_reg;
+ return priv->chip->has_calibrate_reg ? attr->mode : 0;
}
static const struct attribute_group ili210x_attr_group = {
@@ -447,6 +477,7 @@ static int ili210x_i2c_probe(struct i2c_client *client,
static const struct i2c_device_id ili210x_i2c_id[] = {
{ "ili210x", (long)&ili210x_chip },
{ "ili2117", (long)&ili211x_chip },
+ { "ili2120", (long)&ili212x_chip },
{ "ili251x", (long)&ili251x_chip },
{ }
};
@@ -455,6 +486,7 @@ MODULE_DEVICE_TABLE(i2c, ili210x_i2c_id);
static const struct of_device_id ili210x_dt_ids[] = {
{ .compatible = "ilitek,ili210x", .data = &ili210x_chip },
{ .compatible = "ilitek,ili2117", .data = &ili211x_chip },
+ { .compatible = "ilitek,ili2120", .data = &ili212x_chip },
{ .compatible = "ilitek,ili251x", .data = &ili251x_chip },
{ }
};
diff --git a/drivers/input/touchscreen/sun4i-ts.c b/drivers/input/touchscreen/sun4i-ts.c
index 0af0fe8c40d7..742a7e96c1b5 100644
--- a/drivers/input/touchscreen/sun4i-ts.c
+++ b/drivers/input/touchscreen/sun4i-ts.c
@@ -237,6 +237,7 @@ static int sun4i_ts_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct device *hwmon;
+ struct thermal_zone_device *thermal;
int error;
u32 reg;
bool ts_attached;
@@ -355,7 +356,10 @@ static int sun4i_ts_probe(struct platform_device *pdev)
if (IS_ERR(hwmon))
return PTR_ERR(hwmon);
- devm_thermal_zone_of_sensor_register(ts->dev, 0, ts, &sun4i_ts_tz_ops);
+ thermal = devm_thermal_zone_of_sensor_register(ts->dev, 0, ts,
+ &sun4i_ts_tz_ops);
+ if (IS_ERR(thermal))
+ return PTR_ERR(thermal);
writel(TEMP_IRQ_EN(1), ts->base + TP_INT_FIFOC);
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index 1dd47dda71cd..34d31c7ec8ba 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -661,7 +661,7 @@ static int sur40_probe(struct usb_interface *interface,
int error;
/* Check if we really have the right interface. */
- iface_desc = &interface->altsetting[0];
+ iface_desc = interface->cur_altsetting;
if (iface_desc->desc.bInterfaceClass != 0xFF)
return -ENODEV;
diff --git a/drivers/interconnect/Makefile b/drivers/interconnect/Makefile
index 28f2ab0824d5..725029ae7a2c 100644
--- a/drivers/interconnect/Makefile
+++ b/drivers/interconnect/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
+CFLAGS_core.o := -I$(src)
icc-core-objs := core.o
obj-$(CONFIG_INTERCONNECT) += icc-core.o
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index c498796adc07..f277e467156f 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -19,45 +19,22 @@
#include <linux/of.h>
#include <linux/overflow.h>
+#include "internal.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
static DEFINE_IDR(icc_idr);
static LIST_HEAD(icc_providers);
static DEFINE_MUTEX(icc_lock);
static struct dentry *icc_debugfs_dir;
-/**
- * struct icc_req - constraints that are attached to each node
- * @req_node: entry in list of requests for the particular @node
- * @node: the interconnect node to which this constraint applies
- * @dev: reference to the device that sets the constraints
- * @tag: path tag (optional)
- * @avg_bw: an integer describing the average bandwidth in kBps
- * @peak_bw: an integer describing the peak bandwidth in kBps
- */
-struct icc_req {
- struct hlist_node req_node;
- struct icc_node *node;
- struct device *dev;
- u32 tag;
- u32 avg_bw;
- u32 peak_bw;
-};
-
-/**
- * struct icc_path - interconnect path structure
- * @num_nodes: number of hops (nodes)
- * @reqs: array of the requests applicable to this path of nodes
- */
-struct icc_path {
- size_t num_nodes;
- struct icc_req reqs[];
-};
-
static void icc_summary_show_one(struct seq_file *s, struct icc_node *n)
{
if (!n)
return;
- seq_printf(s, "%-30s %12u %12u\n",
+ seq_printf(s, "%-42s %12u %12u\n",
n->name, n->avg_bw, n->peak_bw);
}
@@ -65,8 +42,8 @@ static int icc_summary_show(struct seq_file *s, void *data)
{
struct icc_provider *provider;
- seq_puts(s, " node avg peak\n");
- seq_puts(s, "--------------------------------------------------------\n");
+ seq_puts(s, " node tag avg peak\n");
+ seq_puts(s, "--------------------------------------------------------------------\n");
mutex_lock(&icc_lock);
@@ -81,8 +58,8 @@ static int icc_summary_show(struct seq_file *s, void *data)
if (!r->dev)
continue;
- seq_printf(s, " %-26s %12u %12u\n",
- dev_name(r->dev), r->avg_bw,
+ seq_printf(s, " %-27s %12u %12u %12u\n",
+ dev_name(r->dev), r->tag, r->avg_bw,
r->peak_bw);
}
}
@@ -94,6 +71,70 @@ static int icc_summary_show(struct seq_file *s, void *data)
}
DEFINE_SHOW_ATTRIBUTE(icc_summary);
+static void icc_graph_show_link(struct seq_file *s, int level,
+ struct icc_node *n, struct icc_node *m)
+{
+ seq_printf(s, "%s\"%d:%s\" -> \"%d:%s\"\n",
+ level == 2 ? "\t\t" : "\t",
+ n->id, n->name, m->id, m->name);
+}
+
+static void icc_graph_show_node(struct seq_file *s, struct icc_node *n)
+{
+ seq_printf(s, "\t\t\"%d:%s\" [label=\"%d:%s",
+ n->id, n->name, n->id, n->name);
+ seq_printf(s, "\n\t\t\t|avg_bw=%ukBps", n->avg_bw);
+ seq_printf(s, "\n\t\t\t|peak_bw=%ukBps", n->peak_bw);
+ seq_puts(s, "\"]\n");
+}
+
+static int icc_graph_show(struct seq_file *s, void *data)
+{
+ struct icc_provider *provider;
+ struct icc_node *n;
+ int cluster_index = 0;
+ int i;
+
+ seq_puts(s, "digraph {\n\trankdir = LR\n\tnode [shape = record]\n");
+ mutex_lock(&icc_lock);
+
+ /* draw providers as cluster subgraphs */
+ cluster_index = 0;
+ list_for_each_entry(provider, &icc_providers, provider_list) {
+ seq_printf(s, "\tsubgraph cluster_%d {\n", ++cluster_index);
+ if (provider->dev)
+ seq_printf(s, "\t\tlabel = \"%s\"\n",
+ dev_name(provider->dev));
+
+ /* draw nodes */
+ list_for_each_entry(n, &provider->nodes, node_list)
+ icc_graph_show_node(s, n);
+
+ /* draw internal links */
+ list_for_each_entry(n, &provider->nodes, node_list)
+ for (i = 0; i < n->num_links; ++i)
+ if (n->provider == n->links[i]->provider)
+ icc_graph_show_link(s, 2, n,
+ n->links[i]);
+
+ seq_puts(s, "\t}\n");
+ }
+
+ /* draw external links */
+ list_for_each_entry(provider, &icc_providers, provider_list)
+ list_for_each_entry(n, &provider->nodes, node_list)
+ for (i = 0; i < n->num_links; ++i)
+ if (n->provider != n->links[i]->provider)
+ icc_graph_show_link(s, 1, n,
+ n->links[i]);
+
+ mutex_unlock(&icc_lock);
+ seq_puts(s, "}");
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(icc_graph);
+
static struct icc_node *node_find(const int id)
{
return idr_find(&icc_idr, id);
@@ -244,6 +285,16 @@ out:
return ret;
}
+int icc_std_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ *agg_avg += avg_bw;
+ *agg_peak = max(*agg_peak, peak_bw);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(icc_std_aggregate);
+
/* of_icc_xlate_onecell() - Translate function using a single index.
* @spec: OF phandle args to map into an interconnect node.
* @data: private data (pointer to struct icc_onecell_data)
@@ -382,9 +433,17 @@ struct icc_path *of_icc_get(struct device *dev, const char *name)
mutex_lock(&icc_lock);
path = path_find(dev, src_node, dst_node);
- if (IS_ERR(path))
- dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
mutex_unlock(&icc_lock);
+ if (IS_ERR(path)) {
+ dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
+ return path;
+ }
+
+ if (name)
+ path->name = kstrdup_const(name, GFP_KERNEL);
+ else
+ path->name = kasprintf(GFP_KERNEL, "%s-%s",
+ src_node->name, dst_node->name);
return path;
}
@@ -436,9 +495,12 @@ int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
size_t i;
int ret;
- if (!path || !path->num_nodes)
+ if (!path)
return 0;
+ if (WARN_ON(IS_ERR(path) || !path->num_nodes))
+ return -EINVAL;
+
mutex_lock(&icc_lock);
old_avg = path->reqs[0].avg_bw;
@@ -453,6 +515,8 @@ int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
/* aggregate requests for this node */
aggregate_requests(node);
+
+ trace_icc_set_bw(path, node, i, avg_bw, peak_bw);
}
ret = apply_constraints(path);
@@ -471,6 +535,8 @@ int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
mutex_unlock(&icc_lock);
+ trace_icc_set_bw_end(path, ret);
+
return ret;
}
EXPORT_SYMBOL_GPL(icc_set_bw);
@@ -507,9 +573,12 @@ struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id)
goto out;
path = path_find(dev, src, dst);
- if (IS_ERR(path))
+ if (IS_ERR(path)) {
dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
+ goto out;
+ }
+ path->name = kasprintf(GFP_KERNEL, "%s-%s", src->name, dst->name);
out:
mutex_unlock(&icc_lock);
return path;
@@ -545,6 +614,7 @@ void icc_put(struct icc_path *path)
}
mutex_unlock(&icc_lock);
+ kfree_const(path->name);
kfree(path);
}
EXPORT_SYMBOL_GPL(icc_put);
@@ -743,6 +813,28 @@ void icc_node_del(struct icc_node *node)
EXPORT_SYMBOL_GPL(icc_node_del);
/**
+ * icc_nodes_remove() - remove all previously added nodes from provider
+ * @provider: the interconnect provider we are removing nodes from
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+int icc_nodes_remove(struct icc_provider *provider)
+{
+ struct icc_node *n, *tmp;
+
+ if (WARN_ON(IS_ERR_OR_NULL(provider)))
+ return -EINVAL;
+
+ list_for_each_entry_safe_reverse(n, tmp, &provider->nodes, node_list) {
+ icc_node_del(n);
+ icc_node_destroy(n->id);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(icc_nodes_remove);
+
+/**
* icc_provider_add() - add a new interconnect provider
* @provider: the interconnect provider that will be added into topology
*
@@ -802,6 +894,8 @@ static int __init icc_init(void)
icc_debugfs_dir = debugfs_create_dir("interconnect", NULL);
debugfs_create_file("interconnect_summary", 0444,
icc_debugfs_dir, NULL, &icc_summary_fops);
+ debugfs_create_file("interconnect_graph", 0444,
+ icc_debugfs_dir, NULL, &icc_graph_fops);
return 0;
}
diff --git a/drivers/interconnect/internal.h b/drivers/interconnect/internal.h
new file mode 100644
index 000000000000..bf18cb7239df
--- /dev/null
+++ b/drivers/interconnect/internal.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Interconnect framework internal structs
+ *
+ * Copyright (c) 2019, Linaro Ltd.
+ * Author: Georgi Djakov <georgi.djakov@linaro.org>
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_INTERNAL_H
+#define __DRIVERS_INTERCONNECT_INTERNAL_H
+
+/**
+ * struct icc_req - constraints that are attached to each node
+ * @req_node: entry in list of requests for the particular @node
+ * @node: the interconnect node to which this constraint applies
+ * @dev: reference to the device that sets the constraints
+ * @tag: path tag (optional)
+ * @avg_bw: an integer describing the average bandwidth in kBps
+ * @peak_bw: an integer describing the peak bandwidth in kBps
+ */
+struct icc_req {
+ struct hlist_node req_node;
+ struct icc_node *node;
+ struct device *dev;
+ u32 tag;
+ u32 avg_bw;
+ u32 peak_bw;
+};
+
+/**
+ * struct icc_path - interconnect path structure
+ * @name: a string name of the path (useful for ftrace)
+ * @num_nodes: number of hops (nodes)
+ * @reqs: array of the requests applicable to this path of nodes
+ */
+struct icc_path {
+ const char *name;
+ size_t num_nodes;
+ struct icc_req reqs[];
+};
+
+#endif
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index 2f9304d1db49..76938ece1658 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -5,6 +5,15 @@ config INTERCONNECT_QCOM
help
Support for Qualcomm's Network-on-Chip interconnect hardware.
+config INTERCONNECT_QCOM_MSM8916
+ tristate "Qualcomm MSM8916 interconnect driver"
+ depends on INTERCONNECT_QCOM
+ depends on QCOM_SMD_RPM
+ select INTERCONNECT_QCOM_SMD_RPM
+ help
+ This is a driver for the Qualcomm Network-on-Chip on msm8916-based
+ platforms.
+
config INTERCONNECT_QCOM_MSM8974
tristate "Qualcomm MSM8974 interconnect driver"
depends on INTERCONNECT_QCOM
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
index 9adf9e380545..e8271575e3d8 100644
--- a/drivers/interconnect/qcom/Makefile
+++ b/drivers/interconnect/qcom/Makefile
@@ -1,10 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
+qnoc-msm8916-objs := msm8916.o
qnoc-msm8974-objs := msm8974.o
qnoc-qcs404-objs := qcs404.o
qnoc-sdm845-objs := sdm845.o
icc-smd-rpm-objs := smd-rpm.o
+obj-$(CONFIG_INTERCONNECT_QCOM_MSM8916) += qnoc-msm8916.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8974) += qnoc-msm8974.o
obj-$(CONFIG_INTERCONNECT_QCOM_QCS404) += qnoc-qcs404.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o
diff --git a/drivers/interconnect/qcom/msm8916.c b/drivers/interconnect/qcom/msm8916.c
new file mode 100644
index 000000000000..e94f3c5228b7
--- /dev/null
+++ b/drivers/interconnect/qcom/msm8916.c
@@ -0,0 +1,554 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2020 Linaro Ltd
+ * Author: Georgi Djakov <georgi.djakov@linaro.org>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interconnect-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+
+#include <dt-bindings/interconnect/qcom,msm8916.h>
+
+#include "smd-rpm.h"
+
+#define RPM_BUS_MASTER_REQ 0x73616d62
+#define RPM_BUS_SLAVE_REQ 0x766c7362
+
+enum {
+ MSM8916_BIMC_SNOC_MAS = 1,
+ MSM8916_BIMC_SNOC_SLV,
+ MSM8916_MASTER_AMPSS_M0,
+ MSM8916_MASTER_LPASS,
+ MSM8916_MASTER_BLSP_1,
+ MSM8916_MASTER_DEHR,
+ MSM8916_MASTER_GRAPHICS_3D,
+ MSM8916_MASTER_JPEG,
+ MSM8916_MASTER_MDP_PORT0,
+ MSM8916_MASTER_CRYPTO_CORE0,
+ MSM8916_MASTER_SDCC_1,
+ MSM8916_MASTER_SDCC_2,
+ MSM8916_MASTER_QDSS_BAM,
+ MSM8916_MASTER_QDSS_ETR,
+ MSM8916_MASTER_SNOC_CFG,
+ MSM8916_MASTER_SPDM,
+ MSM8916_MASTER_TCU0,
+ MSM8916_MASTER_TCU1,
+ MSM8916_MASTER_USB_HS,
+ MSM8916_MASTER_VFE,
+ MSM8916_MASTER_VIDEO_P0,
+ MSM8916_SNOC_MM_INT_0,
+ MSM8916_SNOC_MM_INT_1,
+ MSM8916_SNOC_MM_INT_2,
+ MSM8916_SNOC_MM_INT_BIMC,
+ MSM8916_PNOC_INT_0,
+ MSM8916_PNOC_INT_1,
+ MSM8916_PNOC_MAS_0,
+ MSM8916_PNOC_MAS_1,
+ MSM8916_PNOC_SLV_0,
+ MSM8916_PNOC_SLV_1,
+ MSM8916_PNOC_SLV_2,
+ MSM8916_PNOC_SLV_3,
+ MSM8916_PNOC_SLV_4,
+ MSM8916_PNOC_SLV_8,
+ MSM8916_PNOC_SLV_9,
+ MSM8916_PNOC_SNOC_MAS,
+ MSM8916_PNOC_SNOC_SLV,
+ MSM8916_SNOC_QDSS_INT,
+ MSM8916_SLAVE_AMPSS_L2,
+ MSM8916_SLAVE_APSS,
+ MSM8916_SLAVE_LPASS,
+ MSM8916_SLAVE_BIMC_CFG,
+ MSM8916_SLAVE_BLSP_1,
+ MSM8916_SLAVE_BOOT_ROM,
+ MSM8916_SLAVE_CAMERA_CFG,
+ MSM8916_SLAVE_CATS_128,
+ MSM8916_SLAVE_OCMEM_64,
+ MSM8916_SLAVE_CLK_CTL,
+ MSM8916_SLAVE_CRYPTO_0_CFG,
+ MSM8916_SLAVE_DEHR_CFG,
+ MSM8916_SLAVE_DISPLAY_CFG,
+ MSM8916_SLAVE_EBI_CH0,
+ MSM8916_SLAVE_GRAPHICS_3D_CFG,
+ MSM8916_SLAVE_IMEM_CFG,
+ MSM8916_SLAVE_IMEM,
+ MSM8916_SLAVE_MPM,
+ MSM8916_SLAVE_MSG_RAM,
+ MSM8916_SLAVE_MSS,
+ MSM8916_SLAVE_PDM,
+ MSM8916_SLAVE_PMIC_ARB,
+ MSM8916_SLAVE_PNOC_CFG,
+ MSM8916_SLAVE_PRNG,
+ MSM8916_SLAVE_QDSS_CFG,
+ MSM8916_SLAVE_QDSS_STM,
+ MSM8916_SLAVE_RBCPR_CFG,
+ MSM8916_SLAVE_SDCC_1,
+ MSM8916_SLAVE_SDCC_2,
+ MSM8916_SLAVE_SECURITY,
+ MSM8916_SLAVE_SNOC_CFG,
+ MSM8916_SLAVE_SPDM,
+ MSM8916_SLAVE_SRVC_SNOC,
+ MSM8916_SLAVE_TCSR,
+ MSM8916_SLAVE_TLMM,
+ MSM8916_SLAVE_USB_HS,
+ MSM8916_SLAVE_VENUS_CFG,
+ MSM8916_SNOC_BIMC_0_MAS,
+ MSM8916_SNOC_BIMC_0_SLV,
+ MSM8916_SNOC_BIMC_1_MAS,
+ MSM8916_SNOC_BIMC_1_SLV,
+ MSM8916_SNOC_INT_0,
+ MSM8916_SNOC_INT_1,
+ MSM8916_SNOC_INT_BIMC,
+ MSM8916_SNOC_PNOC_MAS,
+ MSM8916_SNOC_PNOC_SLV,
+};
+
+#define to_msm8916_provider(_provider) \
+ container_of(_provider, struct msm8916_icc_provider, provider)
+
+static const struct clk_bulk_data msm8916_bus_clocks[] = {
+ { .id = "bus" },
+ { .id = "bus_a" },
+};
+
+/**
+ * struct msm8916_icc_provider - Qualcomm specific interconnect provider
+ * @provider: generic interconnect provider
+ * @bus_clks: the clk_bulk_data table of bus clocks
+ * @num_clks: the total number of clk_bulk_data entries
+ */
+struct msm8916_icc_provider {
+ struct icc_provider provider;
+ struct clk_bulk_data *bus_clks;
+ int num_clks;
+};
+
+#define MSM8916_MAX_LINKS 8
+
+/**
+ * struct msm8916_icc_node - Qualcomm specific interconnect nodes
+ * @name: the node name used in debugfs
+ * @id: a unique node identifier
+ * @links: an array of nodes where we can go next while traversing
+ * @num_links: the total number of @links
+ * @buswidth: width of the interconnect between a node and the bus (bytes)
+ * @mas_rpm_id: RPM ID for devices that are bus masters
+ * @slv_rpm_id: RPM ID for devices that are bus slaves
+ * @rate: current bus clock rate in Hz
+ */
+struct msm8916_icc_node {
+ unsigned char *name;
+ u16 id;
+ u16 links[MSM8916_MAX_LINKS];
+ u16 num_links;
+ u16 buswidth;
+ int mas_rpm_id;
+ int slv_rpm_id;
+ u64 rate;
+};
+
+struct msm8916_icc_desc {
+ struct msm8916_icc_node **nodes;
+ size_t num_nodes;
+};
+
+#define DEFINE_QNODE(_name, _id, _buswidth, _mas_rpm_id, _slv_rpm_id, \
+ ...) \
+ static struct msm8916_icc_node _name = { \
+ .name = #_name, \
+ .id = _id, \
+ .buswidth = _buswidth, \
+ .mas_rpm_id = _mas_rpm_id, \
+ .slv_rpm_id = _slv_rpm_id, \
+ .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
+ .links = { __VA_ARGS__ }, \
+ }
+
+DEFINE_QNODE(bimc_snoc_mas, MSM8916_BIMC_SNOC_MAS, 8, -1, -1, MSM8916_BIMC_SNOC_SLV);
+DEFINE_QNODE(bimc_snoc_slv, MSM8916_BIMC_SNOC_SLV, 8, -1, -1, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_1);
+DEFINE_QNODE(mas_apss, MSM8916_MASTER_AMPSS_M0, 8, -1, -1, MSM8916_SLAVE_EBI_CH0, MSM8916_BIMC_SNOC_MAS, MSM8916_SLAVE_AMPSS_L2);
+DEFINE_QNODE(mas_audio, MSM8916_MASTER_LPASS, 4, -1, -1, MSM8916_PNOC_MAS_0);
+DEFINE_QNODE(mas_blsp_1, MSM8916_MASTER_BLSP_1, 4, -1, -1, MSM8916_PNOC_MAS_1);
+DEFINE_QNODE(mas_dehr, MSM8916_MASTER_DEHR, 4, -1, -1, MSM8916_PNOC_MAS_0);
+DEFINE_QNODE(mas_gfx, MSM8916_MASTER_GRAPHICS_3D, 8, -1, -1, MSM8916_SLAVE_EBI_CH0, MSM8916_BIMC_SNOC_MAS, MSM8916_SLAVE_AMPSS_L2);
+DEFINE_QNODE(mas_jpeg, MSM8916_MASTER_JPEG, 16, -1, -1, MSM8916_SNOC_MM_INT_0, MSM8916_SNOC_MM_INT_2);
+DEFINE_QNODE(mas_mdp, MSM8916_MASTER_MDP_PORT0, 16, -1, -1, MSM8916_SNOC_MM_INT_0, MSM8916_SNOC_MM_INT_2);
+DEFINE_QNODE(mas_pcnoc_crypto_0, MSM8916_MASTER_CRYPTO_CORE0, 8, -1, -1, MSM8916_PNOC_INT_1);
+DEFINE_QNODE(mas_pcnoc_sdcc_1, MSM8916_MASTER_SDCC_1, 8, -1, -1, MSM8916_PNOC_INT_1);
+DEFINE_QNODE(mas_pcnoc_sdcc_2, MSM8916_MASTER_SDCC_2, 8, -1, -1, MSM8916_PNOC_INT_1);
+DEFINE_QNODE(mas_qdss_bam, MSM8916_MASTER_QDSS_BAM, 8, -1, -1, MSM8916_SNOC_QDSS_INT);
+DEFINE_QNODE(mas_qdss_etr, MSM8916_MASTER_QDSS_ETR, 8, -1, -1, MSM8916_SNOC_QDSS_INT);
+DEFINE_QNODE(mas_snoc_cfg, MSM8916_MASTER_SNOC_CFG, 4, 20, -1, MSM8916_SNOC_QDSS_INT);
+DEFINE_QNODE(mas_spdm, MSM8916_MASTER_SPDM, 4, -1, -1, MSM8916_PNOC_MAS_0);
+DEFINE_QNODE(mas_tcu0, MSM8916_MASTER_TCU0, 8, -1, -1, MSM8916_SLAVE_EBI_CH0, MSM8916_BIMC_SNOC_MAS, MSM8916_SLAVE_AMPSS_L2);
+DEFINE_QNODE(mas_tcu1, MSM8916_MASTER_TCU1, 8, -1, -1, MSM8916_SLAVE_EBI_CH0, MSM8916_BIMC_SNOC_MAS, MSM8916_SLAVE_AMPSS_L2);
+DEFINE_QNODE(mas_usb_hs, MSM8916_MASTER_USB_HS, 4, -1, -1, MSM8916_PNOC_MAS_1);
+DEFINE_QNODE(mas_vfe, MSM8916_MASTER_VFE, 16, -1, -1, MSM8916_SNOC_MM_INT_1, MSM8916_SNOC_MM_INT_2);
+DEFINE_QNODE(mas_video, MSM8916_MASTER_VIDEO_P0, 16, -1, -1, MSM8916_SNOC_MM_INT_0, MSM8916_SNOC_MM_INT_2);
+DEFINE_QNODE(mm_int_0, MSM8916_SNOC_MM_INT_0, 16, -1, -1, MSM8916_SNOC_MM_INT_BIMC);
+DEFINE_QNODE(mm_int_1, MSM8916_SNOC_MM_INT_1, 16, -1, -1, MSM8916_SNOC_MM_INT_BIMC);
+DEFINE_QNODE(mm_int_2, MSM8916_SNOC_MM_INT_2, 16, -1, -1, MSM8916_SNOC_INT_0);
+DEFINE_QNODE(mm_int_bimc, MSM8916_SNOC_MM_INT_BIMC, 16, -1, -1, MSM8916_SNOC_BIMC_1_MAS);
+DEFINE_QNODE(pcnoc_int_0, MSM8916_PNOC_INT_0, 8, -1, -1, MSM8916_PNOC_SNOC_MAS, MSM8916_PNOC_SLV_0, MSM8916_PNOC_SLV_1, MSM8916_PNOC_SLV_2, MSM8916_PNOC_SLV_3, MSM8916_PNOC_SLV_4, MSM8916_PNOC_SLV_8, MSM8916_PNOC_SLV_9);
+DEFINE_QNODE(pcnoc_int_1, MSM8916_PNOC_INT_1, 8, -1, -1, MSM8916_PNOC_SNOC_MAS);
+DEFINE_QNODE(pcnoc_m_0, MSM8916_PNOC_MAS_0, 8, -1, -1, MSM8916_PNOC_INT_0);
+DEFINE_QNODE(pcnoc_m_1, MSM8916_PNOC_MAS_1, 8, -1, -1, MSM8916_PNOC_SNOC_MAS);
+DEFINE_QNODE(pcnoc_s_0, MSM8916_PNOC_SLV_0, 8, -1, -1, MSM8916_SLAVE_CLK_CTL, MSM8916_SLAVE_TLMM, MSM8916_SLAVE_TCSR, MSM8916_SLAVE_SECURITY, MSM8916_SLAVE_MSS);
+DEFINE_QNODE(pcnoc_s_1, MSM8916_PNOC_SLV_1, 8, -1, -1, MSM8916_SLAVE_IMEM_CFG, MSM8916_SLAVE_CRYPTO_0_CFG, MSM8916_SLAVE_MSG_RAM, MSM8916_SLAVE_PDM, MSM8916_SLAVE_PRNG);
+DEFINE_QNODE(pcnoc_s_2, MSM8916_PNOC_SLV_2, 8, -1, -1, MSM8916_SLAVE_SPDM, MSM8916_SLAVE_BOOT_ROM, MSM8916_SLAVE_BIMC_CFG, MSM8916_SLAVE_PNOC_CFG, MSM8916_SLAVE_PMIC_ARB);
+DEFINE_QNODE(pcnoc_s_3, MSM8916_PNOC_SLV_3, 8, -1, -1, MSM8916_SLAVE_MPM, MSM8916_SLAVE_SNOC_CFG, MSM8916_SLAVE_RBCPR_CFG, MSM8916_SLAVE_QDSS_CFG, MSM8916_SLAVE_DEHR_CFG);
+DEFINE_QNODE(pcnoc_s_4, MSM8916_PNOC_SLV_4, 8, -1, -1, MSM8916_SLAVE_VENUS_CFG, MSM8916_SLAVE_CAMERA_CFG, MSM8916_SLAVE_DISPLAY_CFG);
+DEFINE_QNODE(pcnoc_s_8, MSM8916_PNOC_SLV_8, 8, -1, -1, MSM8916_SLAVE_USB_HS, MSM8916_SLAVE_SDCC_1, MSM8916_SLAVE_BLSP_1);
+DEFINE_QNODE(pcnoc_s_9, MSM8916_PNOC_SLV_9, 8, -1, -1, MSM8916_SLAVE_SDCC_2, MSM8916_SLAVE_LPASS, MSM8916_SLAVE_GRAPHICS_3D_CFG);
+DEFINE_QNODE(pcnoc_snoc_mas, MSM8916_PNOC_SNOC_MAS, 8, 29, -1, MSM8916_PNOC_SNOC_SLV);
+DEFINE_QNODE(pcnoc_snoc_slv, MSM8916_PNOC_SNOC_SLV, 8, -1, 45, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_BIMC, MSM8916_SNOC_INT_1);
+DEFINE_QNODE(qdss_int, MSM8916_SNOC_QDSS_INT, 8, -1, -1, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_BIMC);
+DEFINE_QNODE(slv_apps_l2, MSM8916_SLAVE_AMPSS_L2, 8, -1, -1, 0);
+DEFINE_QNODE(slv_apss, MSM8916_SLAVE_APSS, 4, -1, 20, 0);
+DEFINE_QNODE(slv_audio, MSM8916_SLAVE_LPASS, 4, -1, -1, 0);
+DEFINE_QNODE(slv_bimc_cfg, MSM8916_SLAVE_BIMC_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_blsp_1, MSM8916_SLAVE_BLSP_1, 4, -1, -1, 0);
+DEFINE_QNODE(slv_boot_rom, MSM8916_SLAVE_BOOT_ROM, 4, -1, -1, 0);
+DEFINE_QNODE(slv_camera_cfg, MSM8916_SLAVE_CAMERA_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_cats_0, MSM8916_SLAVE_CATS_128, 16, -1, 106, 0);
+DEFINE_QNODE(slv_cats_1, MSM8916_SLAVE_OCMEM_64, 8, -1, 107, 0);
+DEFINE_QNODE(slv_clk_ctl, MSM8916_SLAVE_CLK_CTL, 4, -1, -1, 0);
+DEFINE_QNODE(slv_crypto_0_cfg, MSM8916_SLAVE_CRYPTO_0_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_dehr_cfg, MSM8916_SLAVE_DEHR_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_display_cfg, MSM8916_SLAVE_DISPLAY_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_ebi_ch0, MSM8916_SLAVE_EBI_CH0, 8, -1, 0, 0);
+DEFINE_QNODE(slv_gfx_cfg, MSM8916_SLAVE_GRAPHICS_3D_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_imem_cfg, MSM8916_SLAVE_IMEM_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_imem, MSM8916_SLAVE_IMEM, 8, -1, 26, 0);
+DEFINE_QNODE(slv_mpm, MSM8916_SLAVE_MPM, 4, -1, -1, 0);
+DEFINE_QNODE(slv_msg_ram, MSM8916_SLAVE_MSG_RAM, 4, -1, -1, 0);
+DEFINE_QNODE(slv_mss, MSM8916_SLAVE_MSS, 4, -1, -1, 0);
+DEFINE_QNODE(slv_pdm, MSM8916_SLAVE_PDM, 4, -1, -1, 0);
+DEFINE_QNODE(slv_pmic_arb, MSM8916_SLAVE_PMIC_ARB, 4, -1, -1, 0);
+DEFINE_QNODE(slv_pcnoc_cfg, MSM8916_SLAVE_PNOC_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_prng, MSM8916_SLAVE_PRNG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_qdss_cfg, MSM8916_SLAVE_QDSS_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_qdss_stm, MSM8916_SLAVE_QDSS_STM, 4, -1, 30, 0);
+DEFINE_QNODE(slv_rbcpr_cfg, MSM8916_SLAVE_RBCPR_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_sdcc_1, MSM8916_SLAVE_SDCC_1, 4, -1, -1, 0);
+DEFINE_QNODE(slv_sdcc_2, MSM8916_SLAVE_SDCC_2, 4, -1, -1, 0);
+DEFINE_QNODE(slv_security, MSM8916_SLAVE_SECURITY, 4, -1, -1, 0);
+DEFINE_QNODE(slv_snoc_cfg, MSM8916_SLAVE_SNOC_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_spdm, MSM8916_SLAVE_SPDM, 4, -1, -1, 0);
+DEFINE_QNODE(slv_srvc_snoc, MSM8916_SLAVE_SRVC_SNOC, 8, -1, 29, 0);
+DEFINE_QNODE(slv_tcsr, MSM8916_SLAVE_TCSR, 4, -1, -1, 0);
+DEFINE_QNODE(slv_tlmm, MSM8916_SLAVE_TLMM, 4, -1, -1, 0);
+DEFINE_QNODE(slv_usb_hs, MSM8916_SLAVE_USB_HS, 4, -1, -1, 0);
+DEFINE_QNODE(slv_venus_cfg, MSM8916_SLAVE_VENUS_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(snoc_bimc_0_mas, MSM8916_SNOC_BIMC_0_MAS, 8, 3, -1, MSM8916_SNOC_BIMC_0_SLV);
+DEFINE_QNODE(snoc_bimc_0_slv, MSM8916_SNOC_BIMC_0_SLV, 8, -1, 24, MSM8916_SLAVE_EBI_CH0);
+DEFINE_QNODE(snoc_bimc_1_mas, MSM8916_SNOC_BIMC_1_MAS, 16, -1, -1, MSM8916_SNOC_BIMC_1_SLV);
+DEFINE_QNODE(snoc_bimc_1_slv, MSM8916_SNOC_BIMC_1_SLV, 8, -1, -1, MSM8916_SLAVE_EBI_CH0);
+DEFINE_QNODE(snoc_int_0, MSM8916_SNOC_INT_0, 8, 99, 130, MSM8916_SLAVE_QDSS_STM, MSM8916_SLAVE_IMEM, MSM8916_SNOC_PNOC_MAS);
+DEFINE_QNODE(snoc_int_1, MSM8916_SNOC_INT_1, 8, 100, 131, MSM8916_SLAVE_APSS, MSM8916_SLAVE_CATS_128, MSM8916_SLAVE_OCMEM_64);
+DEFINE_QNODE(snoc_int_bimc, MSM8916_SNOC_INT_BIMC, 8, 101, 132, MSM8916_SNOC_BIMC_0_MAS);
+DEFINE_QNODE(snoc_pcnoc_mas, MSM8916_SNOC_PNOC_MAS, 8, -1, -1, MSM8916_SNOC_PNOC_SLV);
+DEFINE_QNODE(snoc_pcnoc_slv, MSM8916_SNOC_PNOC_SLV, 8, -1, -1, MSM8916_PNOC_INT_0);
+
+static struct msm8916_icc_node *msm8916_snoc_nodes[] = {
+ [BIMC_SNOC_SLV] = &bimc_snoc_slv,
+ [MASTER_JPEG] = &mas_jpeg,
+ [MASTER_MDP_PORT0] = &mas_mdp,
+ [MASTER_QDSS_BAM] = &mas_qdss_bam,
+ [MASTER_QDSS_ETR] = &mas_qdss_etr,
+ [MASTER_SNOC_CFG] = &mas_snoc_cfg,
+ [MASTER_VFE] = &mas_vfe,
+ [MASTER_VIDEO_P0] = &mas_video,
+ [SNOC_MM_INT_0] = &mm_int_0,
+ [SNOC_MM_INT_1] = &mm_int_1,
+ [SNOC_MM_INT_2] = &mm_int_2,
+ [SNOC_MM_INT_BIMC] = &mm_int_bimc,
+ [PCNOC_SNOC_SLV] = &pcnoc_snoc_slv,
+ [SLAVE_APSS] = &slv_apss,
+ [SLAVE_CATS_128] = &slv_cats_0,
+ [SLAVE_OCMEM_64] = &slv_cats_1,
+ [SLAVE_IMEM] = &slv_imem,
+ [SLAVE_QDSS_STM] = &slv_qdss_stm,
+ [SLAVE_SRVC_SNOC] = &slv_srvc_snoc,
+ [SNOC_BIMC_0_MAS] = &snoc_bimc_0_mas,
+ [SNOC_BIMC_1_MAS] = &snoc_bimc_1_mas,
+ [SNOC_INT_0] = &snoc_int_0,
+ [SNOC_INT_1] = &snoc_int_1,
+ [SNOC_INT_BIMC] = &snoc_int_bimc,
+ [SNOC_PCNOC_MAS] = &snoc_pcnoc_mas,
+ [SNOC_QDSS_INT] = &qdss_int,
+};
+
+static struct msm8916_icc_desc msm8916_snoc = {
+ .nodes = msm8916_snoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8916_snoc_nodes),
+};
+
+static struct msm8916_icc_node *msm8916_bimc_nodes[] = {
+ [BIMC_SNOC_MAS] = &bimc_snoc_mas,
+ [MASTER_AMPSS_M0] = &mas_apss,
+ [MASTER_GRAPHICS_3D] = &mas_gfx,
+ [MASTER_TCU0] = &mas_tcu0,
+ [MASTER_TCU1] = &mas_tcu1,
+ [SLAVE_AMPSS_L2] = &slv_apps_l2,
+ [SLAVE_EBI_CH0] = &slv_ebi_ch0,
+ [SNOC_BIMC_0_SLV] = &snoc_bimc_0_slv,
+ [SNOC_BIMC_1_SLV] = &snoc_bimc_1_slv,
+};
+
+static struct msm8916_icc_desc msm8916_bimc = {
+ .nodes = msm8916_bimc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8916_bimc_nodes),
+};
+
+static struct msm8916_icc_node *msm8916_pcnoc_nodes[] = {
+ [MASTER_BLSP_1] = &mas_blsp_1,
+ [MASTER_DEHR] = &mas_dehr,
+ [MASTER_LPASS] = &mas_audio,
+ [MASTER_CRYPTO_CORE0] = &mas_pcnoc_crypto_0,
+ [MASTER_SDCC_1] = &mas_pcnoc_sdcc_1,
+ [MASTER_SDCC_2] = &mas_pcnoc_sdcc_2,
+ [MASTER_SPDM] = &mas_spdm,
+ [MASTER_USB_HS] = &mas_usb_hs,
+ [PCNOC_INT_0] = &pcnoc_int_0,
+ [PCNOC_INT_1] = &pcnoc_int_1,
+ [PCNOC_MAS_0] = &pcnoc_m_0,
+ [PCNOC_MAS_1] = &pcnoc_m_1,
+ [PCNOC_SLV_0] = &pcnoc_s_0,
+ [PCNOC_SLV_1] = &pcnoc_s_1,
+ [PCNOC_SLV_2] = &pcnoc_s_2,
+ [PCNOC_SLV_3] = &pcnoc_s_3,
+ [PCNOC_SLV_4] = &pcnoc_s_4,
+ [PCNOC_SLV_8] = &pcnoc_s_8,
+ [PCNOC_SLV_9] = &pcnoc_s_9,
+ [PCNOC_SNOC_MAS] = &pcnoc_snoc_mas,
+ [SLAVE_BIMC_CFG] = &slv_bimc_cfg,
+ [SLAVE_BLSP_1] = &slv_blsp_1,
+ [SLAVE_BOOT_ROM] = &slv_boot_rom,
+ [SLAVE_CAMERA_CFG] = &slv_camera_cfg,
+ [SLAVE_CLK_CTL] = &slv_clk_ctl,
+ [SLAVE_CRYPTO_0_CFG] = &slv_crypto_0_cfg,
+ [SLAVE_DEHR_CFG] = &slv_dehr_cfg,
+ [SLAVE_DISPLAY_CFG] = &slv_display_cfg,
+ [SLAVE_GRAPHICS_3D_CFG] = &slv_gfx_cfg,
+ [SLAVE_IMEM_CFG] = &slv_imem_cfg,
+ [SLAVE_LPASS] = &slv_audio,
+ [SLAVE_MPM] = &slv_mpm,
+ [SLAVE_MSG_RAM] = &slv_msg_ram,
+ [SLAVE_MSS] = &slv_mss,
+ [SLAVE_PDM] = &slv_pdm,
+ [SLAVE_PMIC_ARB] = &slv_pmic_arb,
+ [SLAVE_PCNOC_CFG] = &slv_pcnoc_cfg,
+ [SLAVE_PRNG] = &slv_prng,
+ [SLAVE_QDSS_CFG] = &slv_qdss_cfg,
+ [SLAVE_RBCPR_CFG] = &slv_rbcpr_cfg,
+ [SLAVE_SDCC_1] = &slv_sdcc_1,
+ [SLAVE_SDCC_2] = &slv_sdcc_2,
+ [SLAVE_SECURITY] = &slv_security,
+ [SLAVE_SNOC_CFG] = &slv_snoc_cfg,
+ [SLAVE_SPDM] = &slv_spdm,
+ [SLAVE_TCSR] = &slv_tcsr,
+ [SLAVE_TLMM] = &slv_tlmm,
+ [SLAVE_USB_HS] = &slv_usb_hs,
+ [SLAVE_VENUS_CFG] = &slv_venus_cfg,
+ [SNOC_PCNOC_SLV] = &snoc_pcnoc_slv,
+};
+
+static struct msm8916_icc_desc msm8916_pcnoc = {
+ .nodes = msm8916_pcnoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8916_pcnoc_nodes),
+};
+
+static int msm8916_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct msm8916_icc_provider *qp;
+ struct msm8916_icc_node *qn;
+ u64 sum_bw, max_peak_bw, rate;
+ u32 agg_avg = 0, agg_peak = 0;
+ struct icc_provider *provider;
+ struct icc_node *n;
+ int ret, i;
+
+ qn = src->data;
+ provider = src->provider;
+ qp = to_msm8916_provider(provider);
+
+ list_for_each_entry(n, &provider->nodes, node_list)
+ provider->aggregate(n, 0, n->avg_bw, n->peak_bw,
+ &agg_avg, &agg_peak);
+
+ sum_bw = icc_units_to_bps(agg_avg);
+ max_peak_bw = icc_units_to_bps(agg_peak);
+
+ /* send bandwidth request message to the RPM processor */
+ if (qn->mas_rpm_id != -1) {
+ ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
+ RPM_BUS_MASTER_REQ,
+ qn->mas_rpm_id,
+ sum_bw);
+ if (ret) {
+ pr_err("qcom_icc_rpm_smd_send mas %d error %d\n",
+ qn->mas_rpm_id, ret);
+ return ret;
+ }
+ }
+
+ if (qn->slv_rpm_id != -1) {
+ ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
+ RPM_BUS_SLAVE_REQ,
+ qn->slv_rpm_id,
+ sum_bw);
+ if (ret) {
+ pr_err("qcom_icc_rpm_smd_send slv error %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ rate = max(sum_bw, max_peak_bw);
+
+ do_div(rate, qn->buswidth);
+
+ if (qn->rate == rate)
+ return 0;
+
+ for (i = 0; i < qp->num_clks; i++) {
+ ret = clk_set_rate(qp->bus_clks[i].clk, rate);
+ if (ret) {
+ pr_err("%s clk_set_rate error: %d\n",
+ qp->bus_clks[i].id, ret);
+ return ret;
+ }
+ }
+
+ qn->rate = rate;
+
+ return 0;
+}
+
+static int msm8916_qnoc_probe(struct platform_device *pdev)
+{
+ const struct msm8916_icc_desc *desc;
+ struct msm8916_icc_node **qnodes;
+ struct msm8916_icc_provider *qp;
+ struct device *dev = &pdev->dev;
+ struct icc_onecell_data *data;
+ struct icc_provider *provider;
+ struct icc_node *node;
+ size_t num_nodes, i;
+ int ret;
+
+ /* wait for the RPM proxy */
+ if (!qcom_icc_rpm_smd_available())
+ return -EPROBE_DEFER;
+
+ desc = of_device_get_match_data(dev);
+ if (!desc)
+ return -EINVAL;
+
+ qnodes = desc->nodes;
+ num_nodes = desc->num_nodes;
+
+ qp = devm_kzalloc(dev, sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return -ENOMEM;
+
+ data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ qp->bus_clks = devm_kmemdup(dev, msm8916_bus_clocks,
+ sizeof(msm8916_bus_clocks), GFP_KERNEL);
+ if (!qp->bus_clks)
+ return -ENOMEM;
+
+ qp->num_clks = ARRAY_SIZE(msm8916_bus_clocks);
+ ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(qp->num_clks, qp->bus_clks);
+ if (ret)
+ return ret;
+
+ provider = &qp->provider;
+ INIT_LIST_HEAD(&provider->nodes);
+ provider->dev = dev;
+ provider->set = msm8916_icc_set;
+ provider->aggregate = icc_std_aggregate;
+ provider->xlate = of_icc_xlate_onecell;
+ provider->data = data;
+
+ ret = icc_provider_add(provider);
+ if (ret) {
+ dev_err(dev, "error adding interconnect provider: %d\n", ret);
+ clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+ return ret;
+ }
+
+ for (i = 0; i < num_nodes; i++) {
+ size_t j;
+
+ node = icc_node_create(qnodes[i]->id);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto err;
+ }
+
+ node->name = qnodes[i]->name;
+ node->data = qnodes[i];
+ icc_node_add(node, provider);
+
+ for (j = 0; j < qnodes[i]->num_links; j++)
+ icc_link_create(node, qnodes[i]->links[j]);
+
+ data->nodes[i] = node;
+ }
+ data->num_nodes = num_nodes;
+
+ platform_set_drvdata(pdev, qp);
+
+ return 0;
+
+err:
+ icc_nodes_remove(provider);
+ icc_provider_del(provider);
+ clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+
+ return ret;
+}
+
+static int msm8916_qnoc_remove(struct platform_device *pdev)
+{
+ struct msm8916_icc_provider *qp = platform_get_drvdata(pdev);
+
+ icc_nodes_remove(&qp->provider);
+ clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+ return icc_provider_del(&qp->provider);
+}
+
+static const struct of_device_id msm8916_noc_of_match[] = {
+ { .compatible = "qcom,msm8916-bimc", .data = &msm8916_bimc },
+ { .compatible = "qcom,msm8916-pcnoc", .data = &msm8916_pcnoc },
+ { .compatible = "qcom,msm8916-snoc", .data = &msm8916_snoc },
+ { }
+};
+MODULE_DEVICE_TABLE(of, msm8916_noc_of_match);
+
+static struct platform_driver msm8916_noc_driver = {
+ .probe = msm8916_qnoc_probe,
+ .remove = msm8916_qnoc_remove,
+ .driver = {
+ .name = "qnoc-msm8916",
+ .of_match_table = msm8916_noc_of_match,
+ },
+};
+module_platform_driver(msm8916_noc_driver);
+MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>");
+MODULE_DESCRIPTION("Qualcomm MSM8916 NoC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/msm8974.c b/drivers/interconnect/qcom/msm8974.c
index bf8bd1aee358..3a313e11e73d 100644
--- a/drivers/interconnect/qcom/msm8974.c
+++ b/drivers/interconnect/qcom/msm8974.c
@@ -550,15 +550,6 @@ static struct msm8974_icc_desc msm8974_snoc = {
.num_nodes = ARRAY_SIZE(msm8974_snoc_nodes),
};
-static int msm8974_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
- u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
-{
- *agg_avg += avg_bw;
- *agg_peak = max(*agg_peak, peak_bw);
-
- return 0;
-}
-
static void msm8974_icc_rpm_smd_send(struct device *dev, int rsc_type,
char *name, int id, u64 val)
{
@@ -603,8 +594,8 @@ static int msm8974_icc_set(struct icc_node *src, struct icc_node *dst)
qp = to_msm8974_icc_provider(provider);
list_for_each_entry(n, &provider->nodes, node_list)
- msm8974_icc_aggregate(n, 0, n->avg_bw, n->peak_bw,
- &agg_avg, &agg_peak);
+ provider->aggregate(n, 0, n->avg_bw, n->peak_bw,
+ &agg_avg, &agg_peak);
sum_bw = icc_units_to_bps(agg_avg);
max_peak_bw = icc_units_to_bps(agg_peak);
@@ -652,7 +643,7 @@ static int msm8974_icc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct icc_onecell_data *data;
struct icc_provider *provider;
- struct icc_node *node, *tmp;
+ struct icc_node *node;
size_t num_nodes, i;
int ret;
@@ -694,7 +685,7 @@ static int msm8974_icc_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&provider->nodes);
provider->dev = dev;
provider->set = msm8974_icc_set;
- provider->aggregate = msm8974_icc_aggregate;
+ provider->aggregate = icc_std_aggregate;
provider->xlate = of_icc_xlate_onecell;
provider->data = data;
@@ -732,10 +723,7 @@ static int msm8974_icc_probe(struct platform_device *pdev)
return 0;
err_del_icc:
- list_for_each_entry_safe(node, tmp, &provider->nodes, node_list) {
- icc_node_del(node);
- icc_node_destroy(node->id);
- }
+ icc_nodes_remove(provider);
icc_provider_del(provider);
err_disable_clks:
@@ -747,16 +735,10 @@ err_disable_clks:
static int msm8974_icc_remove(struct platform_device *pdev)
{
struct msm8974_icc_provider *qp = platform_get_drvdata(pdev);
- struct icc_provider *provider = &qp->provider;
- struct icc_node *n, *tmp;
- list_for_each_entry_safe(n, tmp, &provider->nodes, node_list) {
- icc_node_del(n);
- icc_node_destroy(n->id);
- }
+ icc_nodes_remove(&qp->provider);
clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
-
- return icc_provider_del(provider);
+ return icc_provider_del(&qp->provider);
}
static const struct of_device_id msm8974_noc_of_match[] = {
diff --git a/drivers/interconnect/qcom/qcs404.c b/drivers/interconnect/qcom/qcs404.c
index 8e0735a87040..d4769a5ea182 100644
--- a/drivers/interconnect/qcom/qcs404.c
+++ b/drivers/interconnect/qcom/qcs404.c
@@ -327,15 +327,6 @@ static struct qcom_icc_desc qcs404_snoc = {
.num_nodes = ARRAY_SIZE(qcs404_snoc_nodes),
};
-static int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
- u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
-{
- *agg_avg += avg_bw;
- *agg_peak = max(*agg_peak, peak_bw);
-
- return 0;
-}
-
static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
{
struct qcom_icc_provider *qp;
@@ -354,8 +345,8 @@ static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
qp = to_qcom_provider(provider);
list_for_each_entry(n, &provider->nodes, node_list)
- qcom_icc_aggregate(n, 0, n->avg_bw, n->peak_bw,
- &agg_avg, &agg_peak);
+ provider->aggregate(n, 0, n->avg_bw, n->peak_bw,
+ &agg_avg, &agg_peak);
sum_bw = icc_units_to_bps(agg_avg);
max_peak_bw = icc_units_to_bps(agg_peak);
@@ -414,7 +405,7 @@ static int qnoc_probe(struct platform_device *pdev)
struct icc_provider *provider;
struct qcom_icc_node **qnodes;
struct qcom_icc_provider *qp;
- struct icc_node *node, *tmp;
+ struct icc_node *node;
size_t num_nodes, i;
int ret;
@@ -456,7 +447,7 @@ static int qnoc_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&provider->nodes);
provider->dev = dev;
provider->set = qcom_icc_set;
- provider->aggregate = qcom_icc_aggregate;
+ provider->aggregate = icc_std_aggregate;
provider->xlate = of_icc_xlate_onecell;
provider->data = data;
@@ -494,10 +485,7 @@ static int qnoc_probe(struct platform_device *pdev)
return 0;
err:
- list_for_each_entry_safe(node, tmp, &provider->nodes, node_list) {
- icc_node_del(node);
- icc_node_destroy(node->id);
- }
+ icc_nodes_remove(provider);
clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
icc_provider_del(provider);
@@ -507,16 +495,10 @@ err:
static int qnoc_remove(struct platform_device *pdev)
{
struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
- struct icc_provider *provider = &qp->provider;
- struct icc_node *n, *tmp;
- list_for_each_entry_safe(n, tmp, &provider->nodes, node_list) {
- icc_node_del(n);
- icc_node_destroy(n->id);
- }
+ icc_nodes_remove(&qp->provider);
clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
-
- return icc_provider_del(provider);
+ return icc_provider_del(&qp->provider);
}
static const struct of_device_id qcs404_noc_of_match[] = {
diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c
index 387267ee9648..f078cf0fce56 100644
--- a/drivers/interconnect/qcom/sdm845.c
+++ b/drivers/interconnect/qcom/sdm845.c
@@ -855,11 +855,7 @@ static int qnoc_probe(struct platform_device *pdev)
return ret;
err:
- list_for_each_entry(node, &provider->nodes, node_list) {
- icc_node_del(node);
- icc_node_destroy(node->id);
- }
-
+ icc_nodes_remove(provider);
icc_provider_del(provider);
return ret;
}
@@ -867,15 +863,9 @@ err:
static int qnoc_remove(struct platform_device *pdev)
{
struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
- struct icc_provider *provider = &qp->provider;
- struct icc_node *n, *tmp;
-
- list_for_each_entry_safe(n, tmp, &provider->nodes, node_list) {
- icc_node_del(n);
- icc_node_destroy(n->id);
- }
- return icc_provider_del(provider);
+ icc_nodes_remove(&qp->provider);
+ return icc_provider_del(&qp->provider);
}
static const struct of_device_id qnoc_of_match[] = {
diff --git a/drivers/interconnect/trace.h b/drivers/interconnect/trace.h
new file mode 100644
index 000000000000..3d668ff566bf
--- /dev/null
+++ b/drivers/interconnect/trace.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Interconnect framework tracepoints
+ * Copyright (c) 2019, Linaro Ltd.
+ * Author: Georgi Djakov <georgi.djakov@linaro.org>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM interconnect
+
+#if !defined(_TRACE_INTERCONNECT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_INTERCONNECT_H
+
+#include <linux/interconnect.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(icc_set_bw,
+
+ TP_PROTO(struct icc_path *p, struct icc_node *n, int i,
+ u32 avg_bw, u32 peak_bw),
+
+ TP_ARGS(p, n, i, avg_bw, peak_bw),
+
+ TP_STRUCT__entry(
+ __string(path_name, p->name)
+ __string(dev, dev_name(p->reqs[i].dev))
+ __string(node_name, n->name)
+ __field(u32, avg_bw)
+ __field(u32, peak_bw)
+ __field(u32, node_avg_bw)
+ __field(u32, node_peak_bw)
+ ),
+
+ TP_fast_assign(
+ __assign_str(path_name, p->name);
+ __assign_str(dev, dev_name(p->reqs[i].dev));
+ __assign_str(node_name, n->name);
+ __entry->avg_bw = avg_bw;
+ __entry->peak_bw = peak_bw;
+ __entry->node_avg_bw = n->avg_bw;
+ __entry->node_peak_bw = n->peak_bw;
+ ),
+
+ TP_printk("path=%s dev=%s node=%s avg_bw=%u peak_bw=%u agg_avg=%u agg_peak=%u",
+ __get_str(path_name),
+ __get_str(dev),
+ __get_str(node_name),
+ __entry->avg_bw,
+ __entry->peak_bw,
+ __entry->node_avg_bw,
+ __entry->node_peak_bw)
+);
+
+TRACE_EVENT(icc_set_bw_end,
+
+ TP_PROTO(struct icc_path *p, int ret),
+
+ TP_ARGS(p, ret),
+
+ TP_STRUCT__entry(
+ __string(path_name, p->name)
+ __string(dev, dev_name(p->reqs[0].dev))
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __assign_str(path_name, p->name);
+ __assign_str(dev, dev_name(p->reqs[0].dev));
+ __entry->ret = ret;
+ ),
+
+ TP_printk("path=%s dev=%s ret=%d",
+ __get_str(path_name),
+ __get_str(dev),
+ __entry->ret)
+);
+
+#endif /* _TRACE_INTERCONNECT_H */
+
+/* This part must be outside protection */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 0b9d78a0f3ac..d2fade984999 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -82,7 +82,7 @@ config IOMMU_DEBUGFS
config IOMMU_DEFAULT_PASSTHROUGH
bool "IOMMU passthrough by default"
depends on IOMMU_API
- help
+ help
Enable passthrough by default, removing the need to pass in
iommu.passthrough=on or iommu=pt through command line. If this
is enabled, you can still disable with iommu.passthrough=off
@@ -91,8 +91,8 @@ config IOMMU_DEFAULT_PASSTHROUGH
If unsure, say N here.
config OF_IOMMU
- def_bool y
- depends on OF && IOMMU_API
+ def_bool y
+ depends on OF && IOMMU_API
# IOMMU-agnostic DMA-mapping layer
config IOMMU_DMA
@@ -214,6 +214,7 @@ config INTEL_IOMMU_SVM
select PCI_PASID
select PCI_PRI
select MMU_NOTIFIER
+ select IOASID
help
Shared Virtual Memory (SVM) provides a facility for devices
to access DMA resources through process address space by
@@ -248,6 +249,18 @@ config INTEL_IOMMU_FLOPPY_WA
workaround will setup a 1:1 mapping for the first
16MiB to make floppy (an ISA device) work.
+config INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
+ bool "Enable Intel IOMMU scalable mode by default"
+ depends on INTEL_IOMMU
+ help
+ Selecting this option will enable by default the scalable mode if
+ hardware presents the capability. The scalable mode is defined in
+ VT-d 3.0. The scalable mode capability could be checked by reading
+ /sys/devices/virtual/iommu/dmar*/intel-iommu/ecap. If this option
+ is not selected, scalable mode support could also be enabled by
+ passing intel_iommu=sm_on to the kernel. If not sure, please use
+ the default value.
+
config IRQ_REMAP
bool "Support for Interrupt Remapping"
depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI
@@ -356,7 +369,7 @@ config SPAPR_TCE_IOMMU
# ARM IOMMU support
config ARM_SMMU
- bool "ARM Ltd. System MMU (SMMU) Support"
+ tristate "ARM Ltd. System MMU (SMMU) Support"
depends on (ARM64 || ARM) && MMU
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
@@ -368,6 +381,18 @@ config ARM_SMMU
Say Y here if your SoC includes an IOMMU device implementing
the ARM SMMU architecture.
+config ARM_SMMU_LEGACY_DT_BINDINGS
+ bool "Support the legacy \"mmu-masters\" devicetree bindings"
+ depends on ARM_SMMU=y && OF
+ help
+ Support for the badly designed and deprecated "mmu-masters"
+ devicetree bindings. This allows some DMA masters to attach
+ to the SMMU but does not provide any support via the DMA API.
+ If you're lucky, you might be able to get VFIO up and running.
+
+ If you say Y here then you'll make me very sad. Instead, say N
+ and move your firmware to the utopian future that was 2016.
+
config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT
bool "Default to disabling bypass on ARM SMMU v1 and v2"
depends on ARM_SMMU
@@ -394,7 +419,7 @@ config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT
config.
config ARM_SMMU_V3
- bool "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
+ tristate "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
depends on ARM64
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 97814cc861ea..2104fb8afc06 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -14,7 +14,8 @@ obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o
obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
-obj-$(CONFIG_ARM_SMMU) += arm-smmu.o arm-smmu-impl.o arm-smmu-qcom.o
+obj-$(CONFIG_ARM_SMMU) += arm-smmu-mod.o
+arm-smmu-mod-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-qcom.o
obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index bd25674ee4db..aac132bd1ef0 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -230,11 +230,8 @@ static struct pci_dev *setup_aliases(struct device *dev)
*/
ivrs_alias = amd_iommu_alias_table[pci_dev_id(pdev)];
if (ivrs_alias != pci_dev_id(pdev) &&
- PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
- pci_add_dma_alias(pdev, ivrs_alias & 0xff);
- pci_info(pdev, "Added PCI DMA alias %02x.%d\n",
- PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias));
- }
+ PCI_BUS_NUM(ivrs_alias) == pdev->bus->number)
+ pci_add_dma_alias(pdev, ivrs_alias & 0xff, 1);
clone_aliases(pdev);
@@ -2297,7 +2294,6 @@ int __init amd_iommu_init_api(void)
int __init amd_iommu_init_dma_ops(void)
{
swiotlb = (iommu_default_passthrough() || sme_me_mask) ? 1 : 0;
- iommu_detected = 1;
if (amd_iommu_unmap_flush)
pr_info("IO/TLB flush on unmap enabled\n");
@@ -2641,15 +2637,6 @@ static void amd_iommu_get_resv_regions(struct device *dev,
list_add_tail(&region->list, head);
}
-static void amd_iommu_put_resv_regions(struct device *dev,
- struct list_head *head)
-{
- struct iommu_resv_region *entry, *next;
-
- list_for_each_entry_safe(entry, next, head, list)
- kfree(entry);
-}
-
static bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
struct device *dev)
{
@@ -2688,7 +2675,7 @@ const struct iommu_ops amd_iommu_ops = {
.device_group = amd_iommu_device_group,
.domain_get_attr = amd_iommu_domain_get_attr,
.get_resv_regions = amd_iommu_get_resv_regions,
- .put_resv_regions = amd_iommu_put_resv_regions,
+ .put_resv_regions = generic_iommu_put_resv_regions,
.is_attach_deferred = amd_iommu_is_attach_deferred,
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
.flush_iotlb_all = amd_iommu_flush_iotlb_all,
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 568c52317757..2759a8d57b7f 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -71,6 +71,8 @@
#define IVHD_FLAG_ISOC_EN_MASK 0x08
#define IVMD_FLAG_EXCL_RANGE 0x08
+#define IVMD_FLAG_IW 0x04
+#define IVMD_FLAG_IR 0x02
#define IVMD_FLAG_UNITY_MAP 0x01
#define ACPI_DEVFLAG_INITPASS 0x01
@@ -147,7 +149,7 @@ bool amd_iommu_dump;
bool amd_iommu_irq_remap __read_mostly;
int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
-static int amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
+static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
static bool amd_iommu_detected;
static bool __initdata amd_iommu_disabled;
@@ -440,7 +442,7 @@ static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
return NULL;
}
- return (u8 __iomem *)ioremap_nocache(address, end);
+ return (u8 __iomem *)ioremap(address, end);
}
static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
@@ -714,7 +716,7 @@ static void iommu_enable_ppr_log(struct amd_iommu *iommu)
writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
- iommu_feature_enable(iommu, CONTROL_PPFLOG_EN);
+ iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
iommu_feature_enable(iommu, CONTROL_PPR_EN);
}
@@ -1116,21 +1118,17 @@ static int __init add_early_maps(void)
*/
static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
{
- struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
-
if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
return;
- if (iommu) {
- /*
- * We only can configure exclusion ranges per IOMMU, not
- * per device. But we can enable the exclusion range per
- * device. This is done here
- */
- set_dev_entry_bit(devid, DEV_ENTRY_EX);
- iommu->exclusion_start = m->range_start;
- iommu->exclusion_length = m->range_length;
- }
+ /*
+ * Treat per-device exclusion ranges as r/w unity-mapped regions
+ * since some buggy BIOSes might lead to the overwritten exclusion
+ * range (exclusion_start and exclusion_length members). This
+ * happens when there are multiple exclusion ranges (IVMD entries)
+ * defined in ACPI table.
+ */
+ m->flags = (IVMD_FLAG_IW | IVMD_FLAG_IR | IVMD_FLAG_UNITY_MAP);
}
/*
@@ -1523,8 +1521,6 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
- if (((h->efr_attr & (0x1 << IOMMU_FEAT_XTSUP_SHIFT)) == 0))
- amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
break;
case 0x11:
case 0x40:
@@ -1534,8 +1530,15 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0))
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
- if (((h->efr_reg & (0x1 << IOMMU_EFR_XTSUP_SHIFT)) == 0))
- amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
+ /*
+ * Note: Since iommu_update_intcapxt() leverages
+ * the IOMMU MMIO access to MSI capability block registers
+ * for MSI address lo/hi/data, we need to check both
+ * EFR[XtSup] and EFR[MsiCapMmioSup] for x2APIC support.
+ */
+ if ((h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) &&
+ (h->efr_reg & BIT(IOMMU_EFR_MSICAPMMIOSUP_SHIFT)))
+ amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
break;
default:
return -EINVAL;
@@ -1655,27 +1658,39 @@ static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
static void init_iommu_perf_ctr(struct amd_iommu *iommu)
{
struct pci_dev *pdev = iommu->dev;
- u64 val = 0xabcd, val2 = 0;
+ u64 val = 0xabcd, val2 = 0, save_reg = 0;
if (!iommu_feature(iommu, FEATURE_PC))
return;
amd_iommu_pc_present = true;
+ /* save the value to restore, if writable */
+ if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false))
+ goto pc_false;
+
/* Check if the performance counters can be written to */
if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
(iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
- (val != val2)) {
- pci_err(pdev, "Unable to write to IOMMU perf counter.\n");
- amd_iommu_pc_present = false;
- return;
- }
+ (val != val2))
+ goto pc_false;
+
+ /* restore */
+ if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true))
+ goto pc_false;
pci_info(pdev, "IOMMU performance counters supported\n");
val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
iommu->max_banks = (u8) ((val >> 12) & 0x3f);
iommu->max_counters = (u8) ((val >> 7) & 0xf);
+
+ return;
+
+pc_false:
+ pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n");
+ amd_iommu_pc_present = false;
+ return;
}
static ssize_t amd_iommu_show_cap(struct device *dev,
@@ -1715,7 +1730,6 @@ static const struct attribute_group *amd_iommu_groups[] = {
static int __init iommu_init_pci(struct amd_iommu *iommu)
{
int cap_ptr = iommu->cap_ptr;
- u32 range, misc, low, high;
int ret;
iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid),
@@ -1728,19 +1742,12 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
&iommu->cap);
- pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
- &range);
- pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
- &misc);
if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
amd_iommu_iotlb_sup = false;
/* read extended feature bits */
- low = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
- high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
-
- iommu->features = ((u64)high << 32) | low;
+ iommu->features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
if (iommu_feature(iommu, FEATURE_GT)) {
int glxval;
@@ -1984,8 +1991,8 @@ static int iommu_init_intcapxt(struct amd_iommu *iommu)
struct irq_affinity_notify *notify = &iommu->intcapxt_notify;
/**
- * IntCapXT requires XTSup=1, which can be inferred
- * amd_iommu_xt_mode.
+ * IntCapXT requires XTSup=1 and MsiCapMmioSup=1,
+ * which can be inferred from amd_iommu_xt_mode.
*/
if (amd_iommu_xt_mode != IRQ_REMAP_X2APIC_MODE)
return 0;
@@ -2032,7 +2039,7 @@ enable_faults:
iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
if (iommu->ppr_log != NULL)
- iommu_feature_enable(iommu, CONTROL_PPFINT_EN);
+ iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
iommu_ga_log_enable(iommu);
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index f52f59d5c6bd..f8d01d6b00da 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -147,8 +147,8 @@
#define CONTROL_COHERENT_EN 0x0aULL
#define CONTROL_ISOC_EN 0x0bULL
#define CONTROL_CMDBUF_EN 0x0cULL
-#define CONTROL_PPFLOG_EN 0x0dULL
-#define CONTROL_PPFINT_EN 0x0eULL
+#define CONTROL_PPRLOG_EN 0x0dULL
+#define CONTROL_PPRINT_EN 0x0eULL
#define CONTROL_PPR_EN 0x0fULL
#define CONTROL_GT_EN 0x10ULL
#define CONTROL_GA_EN 0x11ULL
@@ -377,12 +377,12 @@
#define IOMMU_CAP_EFR 27
/* IOMMU Feature Reporting Field (for IVHD type 10h */
-#define IOMMU_FEAT_XTSUP_SHIFT 0
#define IOMMU_FEAT_GASUP_SHIFT 6
/* IOMMU Extended Feature Register (EFR) */
#define IOMMU_EFR_XTSUP_SHIFT 2
#define IOMMU_EFR_GASUP_SHIFT 7
+#define IOMMU_EFR_MSICAPMMIOSUP_SHIFT 46
#define MAX_DOMAIN_ID 65536
@@ -463,7 +463,6 @@ struct amd_irte_ops;
* independent of their use.
*/
struct protection_domain {
- struct list_head list; /* for list of all protection domains */
struct list_head dev_list; /* List of all devices in this domain */
struct iommu_domain domain; /* generic domain handle used by
iommu core code */
diff --git a/drivers/iommu/arm-smmu-impl.c b/drivers/iommu/arm-smmu-impl.c
index b2fe72a8f019..74d97a886e93 100644
--- a/drivers/iommu/arm-smmu-impl.c
+++ b/drivers/iommu/arm-smmu-impl.c
@@ -119,7 +119,7 @@ int arm_mmu500_reset(struct arm_smmu_device *smmu)
* Secure has also cleared SACR.CACHE_LOCK for this to take effect...
*/
reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID7);
- major = FIELD_GET(ID7_MAJOR, reg);
+ major = FIELD_GET(ARM_SMMU_ID7_MAJOR, reg);
reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sACR);
if (major >= 2)
reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index effe72eb89e7..aa3ac2a03807 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -21,8 +21,7 @@
#include <linux/io-pgtable.h>
#include <linux/iommu.h>
#include <linux/iopoll.h>
-#include <linux/init.h>
-#include <linux/moduleparam.h>
+#include <linux/module.h>
#include <linux/msi.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -224,9 +223,15 @@
#define STRTAB_STE_0_S1FMT GENMASK_ULL(5, 4)
#define STRTAB_STE_0_S1FMT_LINEAR 0
+#define STRTAB_STE_0_S1FMT_64K_L2 2
#define STRTAB_STE_0_S1CTXPTR_MASK GENMASK_ULL(51, 6)
#define STRTAB_STE_0_S1CDMAX GENMASK_ULL(63, 59)
+#define STRTAB_STE_1_S1DSS GENMASK_ULL(1, 0)
+#define STRTAB_STE_1_S1DSS_TERMINATE 0x0
+#define STRTAB_STE_1_S1DSS_BYPASS 0x1
+#define STRTAB_STE_1_S1DSS_SSID0 0x2
+
#define STRTAB_STE_1_S1C_CACHE_NC 0UL
#define STRTAB_STE_1_S1C_CACHE_WBRA 1UL
#define STRTAB_STE_1_S1C_CACHE_WT 2UL
@@ -251,6 +256,13 @@
#define STRTAB_STE_2_S2VMID GENMASK_ULL(15, 0)
#define STRTAB_STE_2_VTCR GENMASK_ULL(50, 32)
+#define STRTAB_STE_2_VTCR_S2T0SZ GENMASK_ULL(5, 0)
+#define STRTAB_STE_2_VTCR_S2SL0 GENMASK_ULL(7, 6)
+#define STRTAB_STE_2_VTCR_S2IR0 GENMASK_ULL(9, 8)
+#define STRTAB_STE_2_VTCR_S2OR0 GENMASK_ULL(11, 10)
+#define STRTAB_STE_2_VTCR_S2SH0 GENMASK_ULL(13, 12)
+#define STRTAB_STE_2_VTCR_S2TG GENMASK_ULL(15, 14)
+#define STRTAB_STE_2_VTCR_S2PS GENMASK_ULL(18, 16)
#define STRTAB_STE_2_S2AA64 (1UL << 51)
#define STRTAB_STE_2_S2ENDI (1UL << 52)
#define STRTAB_STE_2_S2PTW (1UL << 54)
@@ -258,30 +270,34 @@
#define STRTAB_STE_3_S2TTB_MASK GENMASK_ULL(51, 4)
-/* Context descriptor (stage-1 only) */
+/*
+ * Context descriptors.
+ *
+ * Linear: when less than 1024 SSIDs are supported
+ * 2lvl: at most 1024 L1 entries,
+ * 1024 lazy entries per table.
+ */
+#define CTXDESC_SPLIT 10
+#define CTXDESC_L2_ENTRIES (1 << CTXDESC_SPLIT)
+
+#define CTXDESC_L1_DESC_DWORDS 1
+#define CTXDESC_L1_DESC_V (1UL << 0)
+#define CTXDESC_L1_DESC_L2PTR_MASK GENMASK_ULL(51, 12)
+
#define CTXDESC_CD_DWORDS 8
#define CTXDESC_CD_0_TCR_T0SZ GENMASK_ULL(5, 0)
-#define ARM64_TCR_T0SZ GENMASK_ULL(5, 0)
#define CTXDESC_CD_0_TCR_TG0 GENMASK_ULL(7, 6)
-#define ARM64_TCR_TG0 GENMASK_ULL(15, 14)
#define CTXDESC_CD_0_TCR_IRGN0 GENMASK_ULL(9, 8)
-#define ARM64_TCR_IRGN0 GENMASK_ULL(9, 8)
#define CTXDESC_CD_0_TCR_ORGN0 GENMASK_ULL(11, 10)
-#define ARM64_TCR_ORGN0 GENMASK_ULL(11, 10)
#define CTXDESC_CD_0_TCR_SH0 GENMASK_ULL(13, 12)
-#define ARM64_TCR_SH0 GENMASK_ULL(13, 12)
#define CTXDESC_CD_0_TCR_EPD0 (1ULL << 14)
-#define ARM64_TCR_EPD0 (1ULL << 7)
#define CTXDESC_CD_0_TCR_EPD1 (1ULL << 30)
-#define ARM64_TCR_EPD1 (1ULL << 23)
#define CTXDESC_CD_0_ENDI (1UL << 15)
#define CTXDESC_CD_0_V (1UL << 31)
#define CTXDESC_CD_0_TCR_IPS GENMASK_ULL(34, 32)
-#define ARM64_TCR_IPS GENMASK_ULL(34, 32)
#define CTXDESC_CD_0_TCR_TBI0 (1ULL << 38)
-#define ARM64_TCR_TBI0 (1ULL << 37)
#define CTXDESC_CD_0_AA64 (1UL << 41)
#define CTXDESC_CD_0_S (1UL << 44)
@@ -292,9 +308,11 @@
#define CTXDESC_CD_1_TTB0_MASK GENMASK_ULL(51, 4)
-/* Convert between AArch64 (CPU) TCR format and SMMU CD format */
-#define ARM_SMMU_TCR2CD(tcr, fld) FIELD_PREP(CTXDESC_CD_0_TCR_##fld, \
- FIELD_GET(ARM64_TCR_##fld, tcr))
+/*
+ * When the SMMU only supports linear context descriptor tables, pick a
+ * reasonable size limit (64kB).
+ */
+#define CTXDESC_LINEAR_CDMAX ilog2(SZ_64K / (CTXDESC_CD_DWORDS << 3))
/* Command queue */
#define CMDQ_ENT_SZ_SHIFT 4
@@ -323,6 +341,7 @@
#define CMDQ_PREFETCH_1_SIZE GENMASK_ULL(4, 0)
#define CMDQ_PREFETCH_1_ADDR_MASK GENMASK_ULL(63, 12)
+#define CMDQ_CFGI_0_SSID GENMASK_ULL(31, 12)
#define CMDQ_CFGI_0_SID GENMASK_ULL(63, 32)
#define CMDQ_CFGI_1_LEAF (1UL << 0)
#define CMDQ_CFGI_1_RANGE GENMASK_ULL(4, 0)
@@ -384,10 +403,6 @@
#define MSI_IOVA_BASE 0x8000000
#define MSI_IOVA_LENGTH 0x100000
-/*
- * not really modular, but the easiest way to keep compat with existing
- * bootargs behaviour is to continue using module_param_named here.
- */
static bool disable_bypass = 1;
module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
MODULE_PARM_DESC(disable_bypass,
@@ -440,8 +455,11 @@ struct arm_smmu_cmdq_ent {
#define CMDQ_OP_CFGI_STE 0x3
#define CMDQ_OP_CFGI_ALL 0x4
+ #define CMDQ_OP_CFGI_CD 0x5
+ #define CMDQ_OP_CFGI_CD_ALL 0x6
struct {
u32 sid;
+ u32 ssid;
union {
bool leaf;
u8 span;
@@ -547,16 +565,30 @@ struct arm_smmu_strtab_l1_desc {
dma_addr_t l2ptr_dma;
};
+struct arm_smmu_ctx_desc {
+ u16 asid;
+ u64 ttbr;
+ u64 tcr;
+ u64 mair;
+};
+
+struct arm_smmu_l1_ctx_desc {
+ __le64 *l2ptr;
+ dma_addr_t l2ptr_dma;
+};
+
+struct arm_smmu_ctx_desc_cfg {
+ __le64 *cdtab;
+ dma_addr_t cdtab_dma;
+ struct arm_smmu_l1_ctx_desc *l1_desc;
+ unsigned int num_l1_ents;
+};
+
struct arm_smmu_s1_cfg {
- __le64 *cdptr;
- dma_addr_t cdptr_dma;
-
- struct arm_smmu_ctx_desc {
- u16 asid;
- u64 ttbr;
- u64 tcr;
- u64 mair;
- } cd;
+ struct arm_smmu_ctx_desc_cfg cdcfg;
+ struct arm_smmu_ctx_desc cd;
+ u8 s1fmt;
+ u8 s1cdmax;
};
struct arm_smmu_s2_cfg {
@@ -638,6 +670,7 @@ struct arm_smmu_master {
u32 *sids;
unsigned int num_sids;
bool ats_enabled;
+ unsigned int ssid_bits;
};
/* SMMU private data for an IOMMU domain */
@@ -847,15 +880,22 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
cmd[1] |= FIELD_PREP(CMDQ_PREFETCH_1_SIZE, ent->prefetch.size);
cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK;
break;
+ case CMDQ_OP_CFGI_CD:
+ cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SSID, ent->cfgi.ssid);
+ /* Fallthrough */
case CMDQ_OP_CFGI_STE:
cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid);
cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_LEAF, ent->cfgi.leaf);
break;
+ case CMDQ_OP_CFGI_CD_ALL:
+ cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid);
+ break;
case CMDQ_OP_CFGI_ALL:
/* Cover the entire SID range */
cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_RANGE, 31);
break;
case CMDQ_OP_TLBI_NH_VA:
+ cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf);
cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
@@ -1443,50 +1483,238 @@ static int arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
}
/* Context descriptor manipulation functions */
-static u64 arm_smmu_cpu_tcr_to_cd(u64 tcr)
+static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
+ int ssid, bool leaf)
{
- u64 val = 0;
+ size_t i;
+ unsigned long flags;
+ struct arm_smmu_master *master;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct arm_smmu_cmdq_ent cmd = {
+ .opcode = CMDQ_OP_CFGI_CD,
+ .cfgi = {
+ .ssid = ssid,
+ .leaf = leaf,
+ },
+ };
+
+ spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+ list_for_each_entry(master, &smmu_domain->devices, domain_head) {
+ for (i = 0; i < master->num_sids; i++) {
+ cmd.cfgi.sid = master->sids[i];
+ arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+ }
+ }
+ spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
+
+ arm_smmu_cmdq_issue_sync(smmu);
+}
- /* Repack the TCR. Just care about TTBR0 for now */
- val |= ARM_SMMU_TCR2CD(tcr, T0SZ);
- val |= ARM_SMMU_TCR2CD(tcr, TG0);
- val |= ARM_SMMU_TCR2CD(tcr, IRGN0);
- val |= ARM_SMMU_TCR2CD(tcr, ORGN0);
- val |= ARM_SMMU_TCR2CD(tcr, SH0);
- val |= ARM_SMMU_TCR2CD(tcr, EPD0);
- val |= ARM_SMMU_TCR2CD(tcr, EPD1);
- val |= ARM_SMMU_TCR2CD(tcr, IPS);
+static int arm_smmu_alloc_cd_leaf_table(struct arm_smmu_device *smmu,
+ struct arm_smmu_l1_ctx_desc *l1_desc)
+{
+ size_t size = CTXDESC_L2_ENTRIES * (CTXDESC_CD_DWORDS << 3);
- return val;
+ l1_desc->l2ptr = dmam_alloc_coherent(smmu->dev, size,
+ &l1_desc->l2ptr_dma, GFP_KERNEL);
+ if (!l1_desc->l2ptr) {
+ dev_warn(smmu->dev,
+ "failed to allocate context descriptor table\n");
+ return -ENOMEM;
+ }
+ return 0;
}
-static void arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu,
- struct arm_smmu_s1_cfg *cfg)
+static void arm_smmu_write_cd_l1_desc(__le64 *dst,
+ struct arm_smmu_l1_ctx_desc *l1_desc)
{
- u64 val;
+ u64 val = (l1_desc->l2ptr_dma & CTXDESC_L1_DESC_L2PTR_MASK) |
+ CTXDESC_L1_DESC_V;
+
+ WRITE_ONCE(*dst, cpu_to_le64(val));
+}
+
+static __le64 *arm_smmu_get_cd_ptr(struct arm_smmu_domain *smmu_domain,
+ u32 ssid)
+{
+ __le64 *l1ptr;
+ unsigned int idx;
+ struct arm_smmu_l1_ctx_desc *l1_desc;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct arm_smmu_ctx_desc_cfg *cdcfg = &smmu_domain->s1_cfg.cdcfg;
+ if (smmu_domain->s1_cfg.s1fmt == STRTAB_STE_0_S1FMT_LINEAR)
+ return cdcfg->cdtab + ssid * CTXDESC_CD_DWORDS;
+
+ idx = ssid >> CTXDESC_SPLIT;
+ l1_desc = &cdcfg->l1_desc[idx];
+ if (!l1_desc->l2ptr) {
+ if (arm_smmu_alloc_cd_leaf_table(smmu, l1_desc))
+ return NULL;
+
+ l1ptr = cdcfg->cdtab + idx * CTXDESC_L1_DESC_DWORDS;
+ arm_smmu_write_cd_l1_desc(l1ptr, l1_desc);
+ /* An invalid L1CD can be cached */
+ arm_smmu_sync_cd(smmu_domain, ssid, false);
+ }
+ idx = ssid & (CTXDESC_L2_ENTRIES - 1);
+ return l1_desc->l2ptr + idx * CTXDESC_CD_DWORDS;
+}
+
+static int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain,
+ int ssid, struct arm_smmu_ctx_desc *cd)
+{
/*
- * We don't need to issue any invalidation here, as we'll invalidate
- * the STE when installing the new entry anyway.
+ * This function handles the following cases:
+ *
+ * (1) Install primary CD, for normal DMA traffic (SSID = 0).
+ * (2) Install a secondary CD, for SID+SSID traffic.
+ * (3) Update ASID of a CD. Atomically write the first 64 bits of the
+ * CD, then invalidate the old entry and mappings.
+ * (4) Remove a secondary CD.
*/
- val = arm_smmu_cpu_tcr_to_cd(cfg->cd.tcr) |
+ u64 val;
+ bool cd_live;
+ __le64 *cdptr;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+
+ if (WARN_ON(ssid >= (1 << smmu_domain->s1_cfg.s1cdmax)))
+ return -E2BIG;
+
+ cdptr = arm_smmu_get_cd_ptr(smmu_domain, ssid);
+ if (!cdptr)
+ return -ENOMEM;
+
+ val = le64_to_cpu(cdptr[0]);
+ cd_live = !!(val & CTXDESC_CD_0_V);
+
+ if (!cd) { /* (4) */
+ val = 0;
+ } else if (cd_live) { /* (3) */
+ val &= ~CTXDESC_CD_0_ASID;
+ val |= FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid);
+ /*
+ * Until CD+TLB invalidation, both ASIDs may be used for tagging
+ * this substream's traffic
+ */
+ } else { /* (1) and (2) */
+ cdptr[1] = cpu_to_le64(cd->ttbr & CTXDESC_CD_1_TTB0_MASK);
+ cdptr[2] = 0;
+ cdptr[3] = cpu_to_le64(cd->mair);
+
+ /*
+ * STE is live, and the SMMU might read dwords of this CD in any
+ * order. Ensure that it observes valid values before reading
+ * V=1.
+ */
+ arm_smmu_sync_cd(smmu_domain, ssid, true);
+
+ val = cd->tcr |
#ifdef __BIG_ENDIAN
- CTXDESC_CD_0_ENDI |
+ CTXDESC_CD_0_ENDI |
#endif
- CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET |
- CTXDESC_CD_0_AA64 | FIELD_PREP(CTXDESC_CD_0_ASID, cfg->cd.asid) |
- CTXDESC_CD_0_V;
+ CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET |
+ CTXDESC_CD_0_AA64 |
+ FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid) |
+ CTXDESC_CD_0_V;
- /* STALL_MODEL==0b10 && CD.S==0 is ILLEGAL */
- if (smmu->features & ARM_SMMU_FEAT_STALL_FORCE)
- val |= CTXDESC_CD_0_S;
+ /* STALL_MODEL==0b10 && CD.S==0 is ILLEGAL */
+ if (smmu->features & ARM_SMMU_FEAT_STALL_FORCE)
+ val |= CTXDESC_CD_0_S;
+ }
- cfg->cdptr[0] = cpu_to_le64(val);
+ /*
+ * The SMMU accesses 64-bit values atomically. See IHI0070Ca 3.21.3
+ * "Configuration structures and configuration invalidation completion"
+ *
+ * The size of single-copy atomic reads made by the SMMU is
+ * IMPLEMENTATION DEFINED but must be at least 64 bits. Any single
+ * field within an aligned 64-bit span of a structure can be altered
+ * without first making the structure invalid.
+ */
+ WRITE_ONCE(cdptr[0], cpu_to_le64(val));
+ arm_smmu_sync_cd(smmu_domain, ssid, true);
+ return 0;
+}
+
+static int arm_smmu_alloc_cd_tables(struct arm_smmu_domain *smmu_domain)
+{
+ int ret;
+ size_t l1size;
+ size_t max_contexts;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
+ struct arm_smmu_ctx_desc_cfg *cdcfg = &cfg->cdcfg;
+
+ max_contexts = 1 << cfg->s1cdmax;
+
+ if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB) ||
+ max_contexts <= CTXDESC_L2_ENTRIES) {
+ cfg->s1fmt = STRTAB_STE_0_S1FMT_LINEAR;
+ cdcfg->num_l1_ents = max_contexts;
- val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK;
- cfg->cdptr[1] = cpu_to_le64(val);
+ l1size = max_contexts * (CTXDESC_CD_DWORDS << 3);
+ } else {
+ cfg->s1fmt = STRTAB_STE_0_S1FMT_64K_L2;
+ cdcfg->num_l1_ents = DIV_ROUND_UP(max_contexts,
+ CTXDESC_L2_ENTRIES);
+
+ cdcfg->l1_desc = devm_kcalloc(smmu->dev, cdcfg->num_l1_ents,
+ sizeof(*cdcfg->l1_desc),
+ GFP_KERNEL);
+ if (!cdcfg->l1_desc)
+ return -ENOMEM;
+
+ l1size = cdcfg->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3);
+ }
+
+ cdcfg->cdtab = dmam_alloc_coherent(smmu->dev, l1size, &cdcfg->cdtab_dma,
+ GFP_KERNEL);
+ if (!cdcfg->cdtab) {
+ dev_warn(smmu->dev, "failed to allocate context descriptor\n");
+ ret = -ENOMEM;
+ goto err_free_l1;
+ }
+
+ return 0;
- cfg->cdptr[3] = cpu_to_le64(cfg->cd.mair);
+err_free_l1:
+ if (cdcfg->l1_desc) {
+ devm_kfree(smmu->dev, cdcfg->l1_desc);
+ cdcfg->l1_desc = NULL;
+ }
+ return ret;
+}
+
+static void arm_smmu_free_cd_tables(struct arm_smmu_domain *smmu_domain)
+{
+ int i;
+ size_t size, l1size;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct arm_smmu_ctx_desc_cfg *cdcfg = &smmu_domain->s1_cfg.cdcfg;
+
+ if (cdcfg->l1_desc) {
+ size = CTXDESC_L2_ENTRIES * (CTXDESC_CD_DWORDS << 3);
+
+ for (i = 0; i < cdcfg->num_l1_ents; i++) {
+ if (!cdcfg->l1_desc[i].l2ptr)
+ continue;
+
+ dmam_free_coherent(smmu->dev, size,
+ cdcfg->l1_desc[i].l2ptr,
+ cdcfg->l1_desc[i].l2ptr_dma);
+ }
+ devm_kfree(smmu->dev, cdcfg->l1_desc);
+ cdcfg->l1_desc = NULL;
+
+ l1size = cdcfg->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3);
+ } else {
+ l1size = cdcfg->num_l1_ents * (CTXDESC_CD_DWORDS << 3);
+ }
+
+ dmam_free_coherent(smmu->dev, l1size, cdcfg->cdtab, cdcfg->cdtab_dma);
+ cdcfg->cdtab_dma = 0;
+ cdcfg->cdtab = NULL;
}
/* Stream table manipulation functions */
@@ -1608,6 +1836,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
if (s1_cfg) {
BUG_ON(ste_live);
dst[1] = cpu_to_le64(
+ FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) |
FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) |
@@ -1617,8 +1846,10 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
!(smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
- val |= (s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
- FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS);
+ val |= (s1_cfg->cdcfg.cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
+ FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) |
+ FIELD_PREP(STRTAB_STE_0_S1CDMAX, s1_cfg->s1cdmax) |
+ FIELD_PREP(STRTAB_STE_0_S1FMT, s1_cfg->s1fmt);
}
if (s2_cfg) {
@@ -1642,7 +1873,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
STRTAB_STE_1_EATS_TRANS));
arm_smmu_sync_ste_for_sid(smmu, sid);
- dst[0] = cpu_to_le64(val);
+ /* See comment in arm_smmu_write_ctx_desc() */
+ WRITE_ONCE(dst[0], cpu_to_le64(val));
arm_smmu_sync_ste_for_sid(smmu, sid);
/* It's likely that we'll want to use the new STE soon */
@@ -1675,7 +1907,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
desc->span = STRTAB_SPLIT + 1;
desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
- GFP_KERNEL | __GFP_ZERO);
+ GFP_KERNEL);
if (!desc->l2ptr) {
dev_err(smmu->dev,
"failed to allocate l2 stream table for SID %u\n",
@@ -2131,12 +2363,8 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
- if (cfg->cdptr) {
- dmam_free_coherent(smmu_domain->smmu->dev,
- CTXDESC_CD_DWORDS << 3,
- cfg->cdptr,
- cfg->cdptr_dma);
-
+ if (cfg->cdcfg.cdtab) {
+ arm_smmu_free_cd_tables(smmu_domain);
arm_smmu_bitmap_free(smmu->asid_map, cfg->cd.asid);
}
} else {
@@ -2149,55 +2377,82 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
}
static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
+ struct arm_smmu_master *master,
struct io_pgtable_cfg *pgtbl_cfg)
{
int ret;
int asid;
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
+ typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = &pgtbl_cfg->arm_lpae_s1_cfg.tcr;
asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits);
if (asid < 0)
return asid;
- cfg->cdptr = dmam_alloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3,
- &cfg->cdptr_dma,
- GFP_KERNEL | __GFP_ZERO);
- if (!cfg->cdptr) {
- dev_warn(smmu->dev, "failed to allocate context descriptor\n");
- ret = -ENOMEM;
+ cfg->s1cdmax = master->ssid_bits;
+
+ ret = arm_smmu_alloc_cd_tables(smmu_domain);
+ if (ret)
goto out_free_asid;
- }
cfg->cd.asid = (u16)asid;
- cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
- cfg->cd.tcr = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
+ cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
+ cfg->cd.tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, tcr->tsz) |
+ FIELD_PREP(CTXDESC_CD_0_TCR_TG0, tcr->tg) |
+ FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, tcr->irgn) |
+ FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, tcr->orgn) |
+ FIELD_PREP(CTXDESC_CD_0_TCR_SH0, tcr->sh) |
+ FIELD_PREP(CTXDESC_CD_0_TCR_IPS, tcr->ips) |
+ CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64;
cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair;
+
+ /*
+ * Note that this will end up calling arm_smmu_sync_cd() before
+ * the master has been added to the devices list for this domain.
+ * This isn't an issue because the STE hasn't been installed yet.
+ */
+ ret = arm_smmu_write_ctx_desc(smmu_domain, 0, &cfg->cd);
+ if (ret)
+ goto out_free_cd_tables;
+
return 0;
+out_free_cd_tables:
+ arm_smmu_free_cd_tables(smmu_domain);
out_free_asid:
arm_smmu_bitmap_free(smmu->asid_map, asid);
return ret;
}
static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
+ struct arm_smmu_master *master,
struct io_pgtable_cfg *pgtbl_cfg)
{
int vmid;
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
+ typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr;
vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
if (vmid < 0)
return vmid;
+ vtcr = &pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
cfg->vmid = (u16)vmid;
cfg->vttbr = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
- cfg->vtcr = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
+ cfg->vtcr = FIELD_PREP(STRTAB_STE_2_VTCR_S2T0SZ, vtcr->tsz) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2SL0, vtcr->sl) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2IR0, vtcr->irgn) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2OR0, vtcr->orgn) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2SH0, vtcr->sh) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2TG, vtcr->tg) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2PS, vtcr->ps);
return 0;
}
-static int arm_smmu_domain_finalise(struct iommu_domain *domain)
+static int arm_smmu_domain_finalise(struct iommu_domain *domain,
+ struct arm_smmu_master *master)
{
int ret;
unsigned long ias, oas;
@@ -2205,6 +2460,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
struct io_pgtable_cfg pgtbl_cfg;
struct io_pgtable_ops *pgtbl_ops;
int (*finalise_stage_fn)(struct arm_smmu_domain *,
+ struct arm_smmu_master *,
struct io_pgtable_cfg *);
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu;
@@ -2259,7 +2515,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
domain->geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
domain->geometry.force_aperture = true;
- ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
+ ret = finalise_stage_fn(smmu_domain, master, &pgtbl_cfg);
if (ret < 0) {
free_io_pgtable_ops(pgtbl_ops);
return ret;
@@ -2412,7 +2668,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
if (!smmu_domain->smmu) {
smmu_domain->smmu = smmu;
- ret = arm_smmu_domain_finalise(domain);
+ ret = arm_smmu_domain_finalise(domain, master);
if (ret) {
smmu_domain->smmu = NULL;
goto out_unlock;
@@ -2424,6 +2680,13 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
dev_name(smmu->dev));
ret = -ENXIO;
goto out_unlock;
+ } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 &&
+ master->ssid_bits != smmu_domain->s1_cfg.s1cdmax) {
+ dev_err(dev,
+ "cannot attach to incompatible domain (%u SSID bits != %u)\n",
+ smmu_domain->s1_cfg.s1cdmax, master->ssid_bits);
+ ret = -EINVAL;
+ goto out_unlock;
}
master->domain = smmu_domain;
@@ -2431,9 +2694,6 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
if (smmu_domain->stage != ARM_SMMU_DOMAIN_BYPASS)
master->ats_enabled = arm_smmu_ats_supported(master);
- if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
- arm_smmu_write_ctx_desc(smmu, &smmu_domain->s1_cfg);
-
arm_smmu_install_ste_for_dev(master);
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
@@ -2534,51 +2794,66 @@ static int arm_smmu_add_device(struct device *dev)
if (!fwspec || fwspec->ops != &arm_smmu_ops)
return -ENODEV;
- /*
- * We _can_ actually withstand dodgy bus code re-calling add_device()
- * without an intervening remove_device()/of_xlate() sequence, but
- * we're not going to do so quietly...
- */
- if (WARN_ON_ONCE(fwspec->iommu_priv)) {
- master = fwspec->iommu_priv;
- smmu = master->smmu;
- } else {
- smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
- if (!smmu)
- return -ENODEV;
- master = kzalloc(sizeof(*master), GFP_KERNEL);
- if (!master)
- return -ENOMEM;
- master->dev = dev;
- master->smmu = smmu;
- master->sids = fwspec->ids;
- master->num_sids = fwspec->num_ids;
- fwspec->iommu_priv = master;
- }
+ if (WARN_ON_ONCE(fwspec->iommu_priv))
+ return -EBUSY;
+
+ smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
+ if (!smmu)
+ return -ENODEV;
+
+ master = kzalloc(sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return -ENOMEM;
+
+ master->dev = dev;
+ master->smmu = smmu;
+ master->sids = fwspec->ids;
+ master->num_sids = fwspec->num_ids;
+ fwspec->iommu_priv = master;
/* Check the SIDs are in range of the SMMU and our stream table */
for (i = 0; i < master->num_sids; i++) {
u32 sid = master->sids[i];
- if (!arm_smmu_sid_in_range(smmu, sid))
- return -ERANGE;
+ if (!arm_smmu_sid_in_range(smmu, sid)) {
+ ret = -ERANGE;
+ goto err_free_master;
+ }
/* Ensure l2 strtab is initialised */
if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
ret = arm_smmu_init_l2_strtab(smmu, sid);
if (ret)
- return ret;
+ goto err_free_master;
}
}
+ master->ssid_bits = min(smmu->ssid_bits, fwspec->num_pasid_bits);
+
+ if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB))
+ master->ssid_bits = min_t(u8, master->ssid_bits,
+ CTXDESC_LINEAR_CDMAX);
+
+ ret = iommu_device_link(&smmu->iommu, dev);
+ if (ret)
+ goto err_free_master;
+
group = iommu_group_get_for_dev(dev);
- if (!IS_ERR(group)) {
- iommu_group_put(group);
- iommu_device_link(&smmu->iommu, dev);
+ if (IS_ERR(group)) {
+ ret = PTR_ERR(group);
+ goto err_unlink;
}
- return PTR_ERR_OR_ZERO(group);
+ iommu_group_put(group);
+ return 0;
+
+err_unlink:
+ iommu_device_unlink(&smmu->iommu, dev);
+err_free_master:
+ kfree(master);
+ fwspec->iommu_priv = NULL;
+ return ret;
}
static void arm_smmu_remove_device(struct device *dev)
@@ -2710,15 +2985,6 @@ static void arm_smmu_get_resv_regions(struct device *dev,
iommu_dma_get_resv_regions(dev, head);
}
-static void arm_smmu_put_resv_regions(struct device *dev,
- struct list_head *head)
-{
- struct iommu_resv_region *entry, *next;
-
- list_for_each_entry_safe(entry, next, head, list)
- kfree(entry);
-}
-
static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
@@ -2736,7 +3002,7 @@ static struct iommu_ops arm_smmu_ops = {
.domain_set_attr = arm_smmu_domain_set_attr,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
- .put_resv_regions = arm_smmu_put_resv_regions,
+ .put_resv_regions = generic_iommu_put_resv_regions,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
};
@@ -2883,7 +3149,7 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
strtab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
- GFP_KERNEL | __GFP_ZERO);
+ GFP_KERNEL);
if (!strtab) {
dev_err(smmu->dev,
"failed to allocate l1 stream table (%u bytes)\n",
@@ -2910,7 +3176,7 @@ static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3);
strtab = dmam_alloc_coherent(smmu->dev, size, &cfg->strtab_dma,
- GFP_KERNEL | __GFP_ZERO);
+ GFP_KERNEL);
if (!strtab) {
dev_err(smmu->dev,
"failed to allocate linear stream table (%u bytes)\n",
@@ -3570,6 +3836,43 @@ static unsigned long arm_smmu_resource_size(struct arm_smmu_device *smmu)
return SZ_128K;
}
+static int arm_smmu_set_bus_ops(struct iommu_ops *ops)
+{
+ int err;
+
+#ifdef CONFIG_PCI
+ if (pci_bus_type.iommu_ops != ops) {
+ err = bus_set_iommu(&pci_bus_type, ops);
+ if (err)
+ return err;
+ }
+#endif
+#ifdef CONFIG_ARM_AMBA
+ if (amba_bustype.iommu_ops != ops) {
+ err = bus_set_iommu(&amba_bustype, ops);
+ if (err)
+ goto err_reset_pci_ops;
+ }
+#endif
+ if (platform_bus_type.iommu_ops != ops) {
+ err = bus_set_iommu(&platform_bus_type, ops);
+ if (err)
+ goto err_reset_amba_ops;
+ }
+
+ return 0;
+
+err_reset_amba_ops:
+#ifdef CONFIG_ARM_AMBA
+ bus_set_iommu(&amba_bustype, NULL);
+#endif
+err_reset_pci_ops: __maybe_unused;
+#ifdef CONFIG_PCI
+ bus_set_iommu(&pci_bus_type, NULL);
+#endif
+ return err;
+}
+
static int arm_smmu_device_probe(struct platform_device *pdev)
{
int irq, ret;
@@ -3599,7 +3902,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
/* Base address */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (resource_size(res) + 1 < arm_smmu_resource_size(smmu)) {
+ if (resource_size(res) < arm_smmu_resource_size(smmu)) {
dev_err(dev, "MMIO region too small (%pr)\n", res);
return -EINVAL;
}
@@ -3660,48 +3963,45 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
return ret;
}
-#ifdef CONFIG_PCI
- if (pci_bus_type.iommu_ops != &arm_smmu_ops) {
- pci_request_acs();
- ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
- if (ret)
- return ret;
- }
-#endif
-#ifdef CONFIG_ARM_AMBA
- if (amba_bustype.iommu_ops != &arm_smmu_ops) {
- ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops);
- if (ret)
- return ret;
- }
-#endif
- if (platform_bus_type.iommu_ops != &arm_smmu_ops) {
- ret = bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
- if (ret)
- return ret;
- }
- return 0;
+ return arm_smmu_set_bus_ops(&arm_smmu_ops);
}
-static void arm_smmu_device_shutdown(struct platform_device *pdev)
+static int arm_smmu_device_remove(struct platform_device *pdev)
{
struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
+ arm_smmu_set_bus_ops(NULL);
+ iommu_device_unregister(&smmu->iommu);
+ iommu_device_sysfs_remove(&smmu->iommu);
arm_smmu_device_disable(smmu);
+
+ return 0;
+}
+
+static void arm_smmu_device_shutdown(struct platform_device *pdev)
+{
+ arm_smmu_device_remove(pdev);
}
static const struct of_device_id arm_smmu_of_match[] = {
{ .compatible = "arm,smmu-v3", },
{ },
};
+MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
static struct platform_driver arm_smmu_driver = {
.driver = {
- .name = "arm-smmu-v3",
- .of_match_table = of_match_ptr(arm_smmu_of_match),
- .suppress_bind_attrs = true,
+ .name = "arm-smmu-v3",
+ .of_match_table = arm_smmu_of_match,
+ .suppress_bind_attrs = true,
},
.probe = arm_smmu_device_probe,
+ .remove = arm_smmu_device_remove,
.shutdown = arm_smmu_device_shutdown,
};
-builtin_platform_driver(arm_smmu_driver);
+module_platform_driver(arm_smmu_driver);
+
+MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
+MODULE_AUTHOR("Will Deacon <will@kernel.org>");
+MODULE_ALIAS("platform:arm-smmu-v3");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 4f1a350d9529..16c4b87af42b 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -27,8 +27,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
-#include <linux/init.h>
-#include <linux/moduleparam.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
@@ -60,10 +59,6 @@
#define MSI_IOVA_LENGTH 0x100000
static int force_stage;
-/*
- * not really modular, but the easiest way to keep compat with existing
- * bootargs behaviour is to continue using module_param() here.
- */
module_param(force_stage, int, S_IRUGO);
MODULE_PARM_DESC(force_stage,
"Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
@@ -131,6 +126,12 @@ static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
return container_of(dom, struct arm_smmu_domain, domain);
}
+static struct platform_driver arm_smmu_driver;
+static struct iommu_ops arm_smmu_ops;
+
+#ifdef CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS
+static int arm_smmu_bus_init(struct iommu_ops *ops);
+
static struct device_node *dev_get_dev_node(struct device *dev)
{
if (dev_is_pci(dev)) {
@@ -166,9 +167,6 @@ static int __find_legacy_master_phandle(struct device *dev, void *data)
return err == -ENOENT ? 0 : err;
}
-static struct platform_driver arm_smmu_driver;
-static struct iommu_ops arm_smmu_ops;
-
static int arm_smmu_register_legacy_master(struct device *dev,
struct arm_smmu_device **smmu)
{
@@ -220,6 +218,27 @@ static int arm_smmu_register_legacy_master(struct device *dev,
return err;
}
+/*
+ * With the legacy DT binding in play, we have no guarantees about
+ * probe order, but then we're also not doing default domains, so we can
+ * delay setting bus ops until we're sure every possible SMMU is ready,
+ * and that way ensure that no add_device() calls get missed.
+ */
+static int arm_smmu_legacy_bus_init(void)
+{
+ if (using_legacy_binding)
+ return arm_smmu_bus_init(&arm_smmu_ops);
+ return 0;
+}
+device_initcall_sync(arm_smmu_legacy_bus_init);
+#else
+static int arm_smmu_register_legacy_master(struct device *dev,
+ struct arm_smmu_device **smmu)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS */
+
static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
{
int idx;
@@ -252,7 +271,7 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
reg = arm_smmu_readl(smmu, page, status);
- if (!(reg & sTLBGSTATUS_GSACTIVE))
+ if (!(reg & ARM_SMMU_sTLBGSTATUS_GSACTIVE))
return;
cpu_relax();
}
@@ -459,7 +478,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
int idx = smmu_domain->cfg.cbndx;
fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
- if (!(fsr & FSR_FAULT))
+ if (!(fsr & ARM_SMMU_FSR_FAULT))
return IRQ_NONE;
fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
@@ -491,7 +510,7 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
if (__ratelimit(&rs)) {
if (IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT) &&
- (gfsr & sGFSR_USF))
+ (gfsr & ARM_SMMU_sGFSR_USF))
dev_err(smmu->dev,
"Blocked unknown Stream ID 0x%hx; boot with \"arm-smmu.disable_bypass=0\" to allow, but this may have security implications\n",
(u16)gfsynr1);
@@ -521,26 +540,28 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
} else {
- cb->tcr[0] = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
- cb->tcr[1] = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
- cb->tcr[1] |= FIELD_PREP(TCR2_SEP, TCR2_SEP_UPSTREAM);
+ cb->tcr[0] = arm_smmu_lpae_tcr(pgtbl_cfg);
+ cb->tcr[1] = arm_smmu_lpae_tcr2(pgtbl_cfg);
if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
- cb->tcr[1] |= TCR2_AS;
+ cb->tcr[1] |= ARM_SMMU_TCR2_AS;
+ else
+ cb->tcr[0] |= ARM_SMMU_TCR_EAE;
}
} else {
- cb->tcr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
+ cb->tcr[0] = arm_smmu_lpae_vtcr(pgtbl_cfg);
}
/* TTBRs */
if (stage1) {
if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
- cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
- cb->ttbr[1] = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
+ cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr;
+ cb->ttbr[1] = 0;
} else {
- cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
- cb->ttbr[0] |= FIELD_PREP(TTBRn_ASID, cfg->asid);
- cb->ttbr[1] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
- cb->ttbr[1] |= FIELD_PREP(TTBRn_ASID, cfg->asid);
+ cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
+ cb->ttbr[0] |= FIELD_PREP(ARM_SMMU_TTBRn_ASID,
+ cfg->asid);
+ cb->ttbr[1] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
+ cfg->asid);
}
} else {
cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
@@ -576,31 +597,33 @@ static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
/* CBA2R */
if (smmu->version > ARM_SMMU_V1) {
if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
- reg = CBA2R_VA64;
+ reg = ARM_SMMU_CBA2R_VA64;
else
reg = 0;
/* 16-bit VMIDs live in CBA2R */
if (smmu->features & ARM_SMMU_FEAT_VMID16)
- reg |= FIELD_PREP(CBA2R_VMID16, cfg->vmid);
+ reg |= FIELD_PREP(ARM_SMMU_CBA2R_VMID16, cfg->vmid);
arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBA2R(idx), reg);
}
/* CBAR */
- reg = FIELD_PREP(CBAR_TYPE, cfg->cbar);
+ reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, cfg->cbar);
if (smmu->version < ARM_SMMU_V2)
- reg |= FIELD_PREP(CBAR_IRPTNDX, cfg->irptndx);
+ reg |= FIELD_PREP(ARM_SMMU_CBAR_IRPTNDX, cfg->irptndx);
/*
* Use the weakest shareability/memory types, so they are
* overridden by the ttbcr/pte.
*/
if (stage1) {
- reg |= FIELD_PREP(CBAR_S1_BPSHCFG, CBAR_S1_BPSHCFG_NSH) |
- FIELD_PREP(CBAR_S1_MEMATTR, CBAR_S1_MEMATTR_WB);
+ reg |= FIELD_PREP(ARM_SMMU_CBAR_S1_BPSHCFG,
+ ARM_SMMU_CBAR_S1_BPSHCFG_NSH) |
+ FIELD_PREP(ARM_SMMU_CBAR_S1_MEMATTR,
+ ARM_SMMU_CBAR_S1_MEMATTR_WB);
} else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
/* 8-bit VMIDs live in CBAR */
- reg |= FIELD_PREP(CBAR_VMID, cfg->vmid);
+ reg |= FIELD_PREP(ARM_SMMU_CBAR_VMID, cfg->vmid);
}
arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(idx), reg);
@@ -632,11 +655,12 @@ static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
}
/* SCTLR */
- reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
+ reg = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE | ARM_SMMU_SCTLR_AFE |
+ ARM_SMMU_SCTLR_TRE | ARM_SMMU_SCTLR_M;
if (stage1)
- reg |= SCTLR_S1_ASIDPNE;
+ reg |= ARM_SMMU_SCTLR_S1_ASIDPNE;
if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
- reg |= SCTLR_E;
+ reg |= ARM_SMMU_SCTLR_E;
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
}
@@ -818,7 +842,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
if (ret < 0) {
dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
cfg->irptndx, irq);
- cfg->irptndx = INVALID_IRPTNDX;
+ cfg->irptndx = ARM_SMMU_INVALID_IRPTNDX;
}
mutex_unlock(&smmu_domain->init_mutex);
@@ -856,7 +880,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
smmu->cbs[cfg->cbndx].cfg = NULL;
arm_smmu_write_context_bank(smmu, cfg->cbndx);
- if (cfg->irptndx != INVALID_IRPTNDX) {
+ if (cfg->irptndx != ARM_SMMU_INVALID_IRPTNDX) {
irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
devm_free_irq(smmu->dev, irq, domain);
}
@@ -912,23 +936,24 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
{
struct arm_smmu_smr *smr = smmu->smrs + idx;
- u32 reg = FIELD_PREP(SMR_ID, smr->id) | FIELD_PREP(SMR_MASK, smr->mask);
+ u32 reg = FIELD_PREP(ARM_SMMU_SMR_ID, smr->id) |
+ FIELD_PREP(ARM_SMMU_SMR_MASK, smr->mask);
if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
- reg |= SMR_VALID;
+ reg |= ARM_SMMU_SMR_VALID;
arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), reg);
}
static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
{
struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
- u32 reg = FIELD_PREP(S2CR_TYPE, s2cr->type) |
- FIELD_PREP(S2CR_CBNDX, s2cr->cbndx) |
- FIELD_PREP(S2CR_PRIVCFG, s2cr->privcfg);
+ u32 reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, s2cr->type) |
+ FIELD_PREP(ARM_SMMU_S2CR_CBNDX, s2cr->cbndx) |
+ FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg);
if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
smmu->smrs[idx].valid)
- reg |= S2CR_EXIDVALID;
+ reg |= ARM_SMMU_S2CR_EXIDVALID;
arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
}
@@ -946,24 +971,37 @@ static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
{
u32 smr;
+ int i;
if (!smmu->smrs)
return;
-
+ /*
+ * If we've had to accommodate firmware memory regions, we may
+ * have live SMRs by now; tread carefully...
+ *
+ * Somewhat perversely, not having a free SMR for this test implies we
+ * can get away without it anyway, as we'll only be able to 'allocate'
+ * these SMRs for the ID/mask values we're already trusting to be OK.
+ */
+ for (i = 0; i < smmu->num_mapping_groups; i++)
+ if (!smmu->smrs[i].valid)
+ goto smr_ok;
+ return;
+smr_ok:
/*
* SMR.ID bits may not be preserved if the corresponding MASK
* bits are set, so check each one separately. We can reject
* masters later if they try to claim IDs outside these masks.
*/
- smr = FIELD_PREP(SMR_ID, smmu->streamid_mask);
- arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(0), smr);
- smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(0));
- smmu->streamid_mask = FIELD_GET(SMR_ID, smr);
+ smr = FIELD_PREP(ARM_SMMU_SMR_ID, smmu->streamid_mask);
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr);
+ smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
+ smmu->streamid_mask = FIELD_GET(ARM_SMMU_SMR_ID, smr);
- smr = FIELD_PREP(SMR_MASK, smmu->streamid_mask);
- arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(0), smr);
- smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(0));
- smmu->smr_mask_mask = FIELD_GET(SMR_MASK, smr);
+ smr = FIELD_PREP(ARM_SMMU_SMR_MASK, smmu->streamid_mask);
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr);
+ smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
+ smmu->smr_mask_mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
}
static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
@@ -1032,8 +1070,8 @@ static int arm_smmu_master_alloc_smes(struct device *dev)
mutex_lock(&smmu->stream_map_mutex);
/* Figure out a viable stream map entry allocation */
for_each_cfg_sme(fwspec, i, idx) {
- u16 sid = FIELD_GET(SMR_ID, fwspec->ids[i]);
- u16 mask = FIELD_GET(SMR_MASK, fwspec->ids[i]);
+ u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
+ u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
if (idx != INVALID_SMENDX) {
ret = -EEXIST;
@@ -1277,7 +1315,8 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR;
- if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ATSR_ACTIVE), 5, 50)) {
+ if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ARM_SMMU_ATSR_ACTIVE),
+ 5, 50)) {
spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
dev_err(dev,
"iova to phys timed out on %pad. Falling back to software table walk.\n",
@@ -1287,7 +1326,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
phys = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR);
spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
- if (phys & CB_PAR_F) {
+ if (phys & ARM_SMMU_CB_PAR_F) {
dev_err(dev, "translation fault!\n");
dev_err(dev, "PAR = 0x%llx\n", phys);
return 0;
@@ -1368,8 +1407,8 @@ static int arm_smmu_add_device(struct device *dev)
ret = -EINVAL;
for (i = 0; i < fwspec->num_ids; i++) {
- u16 sid = FIELD_GET(SMR_ID, fwspec->ids[i]);
- u16 mask = FIELD_GET(SMR_MASK, fwspec->ids[i]);
+ u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
+ u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
if (sid & ~smmu->streamid_mask) {
dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
@@ -1550,12 +1589,12 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
u32 mask, fwid = 0;
if (args->args_count > 0)
- fwid |= FIELD_PREP(SMR_ID, args->args[0]);
+ fwid |= FIELD_PREP(ARM_SMMU_SMR_ID, args->args[0]);
if (args->args_count > 1)
- fwid |= FIELD_PREP(SMR_MASK, args->args[1]);
+ fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, args->args[1]);
else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
- fwid |= FIELD_PREP(SMR_MASK, mask);
+ fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, mask);
return iommu_fwspec_add_ids(dev, &fwid, 1);
}
@@ -1576,15 +1615,6 @@ static void arm_smmu_get_resv_regions(struct device *dev,
iommu_dma_get_resv_regions(dev, head);
}
-static void arm_smmu_put_resv_regions(struct device *dev,
- struct list_head *head)
-{
- struct iommu_resv_region *entry, *next;
-
- list_for_each_entry_safe(entry, next, head, list)
- kfree(entry);
-}
-
static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
@@ -1602,7 +1632,7 @@ static struct iommu_ops arm_smmu_ops = {
.domain_set_attr = arm_smmu_domain_set_attr,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
- .put_resv_regions = arm_smmu_put_resv_regions,
+ .put_resv_regions = generic_iommu_put_resv_regions,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
};
@@ -1625,7 +1655,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
/* Make sure all context banks are disabled and clear CB_FSR */
for (i = 0; i < smmu->num_context_banks; ++i) {
arm_smmu_write_context_bank(smmu, i);
- arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, FSR_FAULT);
+ arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, ARM_SMMU_FSR_FAULT);
}
/* Invalidate the TLB, just in case */
@@ -1635,29 +1665,30 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
/* Enable fault reporting */
- reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
+ reg |= (ARM_SMMU_sCR0_GFRE | ARM_SMMU_sCR0_GFIE |
+ ARM_SMMU_sCR0_GCFGFRE | ARM_SMMU_sCR0_GCFGFIE);
/* Disable TLB broadcasting. */
- reg |= (sCR0_VMIDPNE | sCR0_PTM);
+ reg |= (ARM_SMMU_sCR0_VMIDPNE | ARM_SMMU_sCR0_PTM);
/* Enable client access, handling unmatched streams as appropriate */
- reg &= ~sCR0_CLIENTPD;
+ reg &= ~ARM_SMMU_sCR0_CLIENTPD;
if (disable_bypass)
- reg |= sCR0_USFCFG;
+ reg |= ARM_SMMU_sCR0_USFCFG;
else
- reg &= ~sCR0_USFCFG;
+ reg &= ~ARM_SMMU_sCR0_USFCFG;
/* Disable forced broadcasting */
- reg &= ~sCR0_FB;
+ reg &= ~ARM_SMMU_sCR0_FB;
/* Don't upgrade barriers */
- reg &= ~(sCR0_BSU);
+ reg &= ~(ARM_SMMU_sCR0_BSU);
if (smmu->features & ARM_SMMU_FEAT_VMID16)
- reg |= sCR0_VMID16EN;
+ reg |= ARM_SMMU_sCR0_VMID16EN;
if (smmu->features & ARM_SMMU_FEAT_EXIDS)
- reg |= sCR0_EXIDENABLE;
+ reg |= ARM_SMMU_sCR0_EXIDENABLE;
if (smmu->impl && smmu->impl->reset)
smmu->impl->reset(smmu);
@@ -1702,21 +1733,21 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
/* Restrict available stages based on module parameter */
if (force_stage == 1)
- id &= ~(ID0_S2TS | ID0_NTS);
+ id &= ~(ARM_SMMU_ID0_S2TS | ARM_SMMU_ID0_NTS);
else if (force_stage == 2)
- id &= ~(ID0_S1TS | ID0_NTS);
+ id &= ~(ARM_SMMU_ID0_S1TS | ARM_SMMU_ID0_NTS);
- if (id & ID0_S1TS) {
+ if (id & ARM_SMMU_ID0_S1TS) {
smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
dev_notice(smmu->dev, "\tstage 1 translation\n");
}
- if (id & ID0_S2TS) {
+ if (id & ARM_SMMU_ID0_S2TS) {
smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
dev_notice(smmu->dev, "\tstage 2 translation\n");
}
- if (id & ID0_NTS) {
+ if (id & ARM_SMMU_ID0_NTS) {
smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
dev_notice(smmu->dev, "\tnested translation\n");
}
@@ -1727,8 +1758,8 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
return -ENODEV;
}
- if ((id & ID0_S1TS) &&
- ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
+ if ((id & ARM_SMMU_ID0_S1TS) &&
+ ((smmu->version < ARM_SMMU_V2) || !(id & ARM_SMMU_ID0_ATOSNS))) {
smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
dev_notice(smmu->dev, "\taddress translation ops\n");
}
@@ -1739,7 +1770,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
* Fortunately, this also opens up a workaround for systems where the
* ID register value has ended up configured incorrectly.
*/
- cttw_reg = !!(id & ID0_CTTW);
+ cttw_reg = !!(id & ARM_SMMU_ID0_CTTW);
if (cttw_fw || cttw_reg)
dev_notice(smmu->dev, "\t%scoherent table walk\n",
cttw_fw ? "" : "non-");
@@ -1748,16 +1779,16 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
"\t(IDR0.CTTW overridden by FW configuration)\n");
/* Max. number of entries we have for stream matching/indexing */
- if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
+ if (smmu->version == ARM_SMMU_V2 && id & ARM_SMMU_ID0_EXIDS) {
smmu->features |= ARM_SMMU_FEAT_EXIDS;
size = 1 << 16;
} else {
- size = 1 << FIELD_GET(ID0_NUMSIDB, id);
+ size = 1 << FIELD_GET(ARM_SMMU_ID0_NUMSIDB, id);
}
smmu->streamid_mask = size - 1;
- if (id & ID0_SMS) {
+ if (id & ARM_SMMU_ID0_SMS) {
smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
- size = FIELD_GET(ID0_NUMSMRG, id);
+ size = FIELD_GET(ARM_SMMU_ID0_NUMSMRG, id);
if (size == 0) {
dev_err(smmu->dev,
"stream-matching supported, but no SMRs present!\n");
@@ -1785,18 +1816,19 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
mutex_init(&smmu->stream_map_mutex);
spin_lock_init(&smmu->global_sync_lock);
- if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
+ if (smmu->version < ARM_SMMU_V2 ||
+ !(id & ARM_SMMU_ID0_PTFS_NO_AARCH32)) {
smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
- if (!(id & ID0_PTFS_NO_AARCH32S))
+ if (!(id & ARM_SMMU_ID0_PTFS_NO_AARCH32S))
smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
}
/* ID1 */
id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID1);
- smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
+ smmu->pgshift = (id & ARM_SMMU_ID1_PAGESIZE) ? 16 : 12;
/* Check for size mismatch of SMMU address space from mapped region */
- size = 1 << (FIELD_GET(ID1_NUMPAGENDXB, id) + 1);
+ size = 1 << (FIELD_GET(ARM_SMMU_ID1_NUMPAGENDXB, id) + 1);
if (smmu->numpage != 2 * size << smmu->pgshift)
dev_warn(smmu->dev,
"SMMU address space size (0x%x) differs from mapped region size (0x%x)!\n",
@@ -1804,8 +1836,8 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
/* Now properly encode NUMPAGE to subsequently derive SMMU_CB_BASE */
smmu->numpage = size;
- smmu->num_s2_context_banks = FIELD_GET(ID1_NUMS2CB, id);
- smmu->num_context_banks = FIELD_GET(ID1_NUMCB, id);
+ smmu->num_s2_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMS2CB, id);
+ smmu->num_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMCB, id);
if (smmu->num_s2_context_banks > smmu->num_context_banks) {
dev_err(smmu->dev, "impossible number of S2 context banks!\n");
return -ENODEV;
@@ -1819,14 +1851,14 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
/* ID2 */
id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID2);
- size = arm_smmu_id_size_to_bits(FIELD_GET(ID2_IAS, id));
+ size = arm_smmu_id_size_to_bits(FIELD_GET(ARM_SMMU_ID2_IAS, id));
smmu->ipa_size = size;
/* The output mask is also applied for bypass */
- size = arm_smmu_id_size_to_bits(FIELD_GET(ID2_OAS, id));
+ size = arm_smmu_id_size_to_bits(FIELD_GET(ARM_SMMU_ID2_OAS, id));
smmu->pa_size = size;
- if (id & ID2_VMID16)
+ if (id & ARM_SMMU_ID2_VMID16)
smmu->features |= ARM_SMMU_FEAT_VMID16;
/*
@@ -1843,13 +1875,13 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
if (smmu->version == ARM_SMMU_V1_64K)
smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
} else {
- size = FIELD_GET(ID2_UBS, id);
+ size = FIELD_GET(ARM_SMMU_ID2_UBS, id);
smmu->va_size = arm_smmu_id_size_to_bits(size);
- if (id & ID2_PTFS_4K)
+ if (id & ARM_SMMU_ID2_PTFS_4K)
smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
- if (id & ID2_PTFS_16K)
+ if (id & ARM_SMMU_ID2_PTFS_16K)
smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
- if (id & ID2_PTFS_64K)
+ if (id & ARM_SMMU_ID2_PTFS_64K)
smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
}
@@ -1911,6 +1943,7 @@ static const struct of_device_id arm_smmu_of_match[] = {
{ .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
{ },
};
+MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
#ifdef CONFIG_ACPI
static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
@@ -1997,8 +2030,10 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev,
legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
if (legacy_binding && !using_generic_binding) {
- if (!using_legacy_binding)
- pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
+ if (!using_legacy_binding) {
+ pr_notice("deprecated \"mmu-masters\" DT property in use; %s support unavailable\n",
+ IS_ENABLED(CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS) ? "DMA API" : "SMMU");
+ }
using_legacy_binding = true;
} else if (!legacy_binding && !using_legacy_binding) {
using_generic_binding = true;
@@ -2013,25 +2048,50 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev,
return 0;
}
-static void arm_smmu_bus_init(void)
+static int arm_smmu_bus_init(struct iommu_ops *ops)
{
+ int err;
+
/* Oh, for a proper bus abstraction */
- if (!iommu_present(&platform_bus_type))
- bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
+ if (!iommu_present(&platform_bus_type)) {
+ err = bus_set_iommu(&platform_bus_type, ops);
+ if (err)
+ return err;
+ }
#ifdef CONFIG_ARM_AMBA
- if (!iommu_present(&amba_bustype))
- bus_set_iommu(&amba_bustype, &arm_smmu_ops);
+ if (!iommu_present(&amba_bustype)) {
+ err = bus_set_iommu(&amba_bustype, ops);
+ if (err)
+ goto err_reset_platform_ops;
+ }
#endif
#ifdef CONFIG_PCI
if (!iommu_present(&pci_bus_type)) {
- pci_request_acs();
- bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
+ err = bus_set_iommu(&pci_bus_type, ops);
+ if (err)
+ goto err_reset_amba_ops;
}
#endif
#ifdef CONFIG_FSL_MC_BUS
- if (!iommu_present(&fsl_mc_bus_type))
- bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops);
+ if (!iommu_present(&fsl_mc_bus_type)) {
+ err = bus_set_iommu(&fsl_mc_bus_type, ops);
+ if (err)
+ goto err_reset_pci_ops;
+ }
#endif
+ return 0;
+
+err_reset_pci_ops: __maybe_unused;
+#ifdef CONFIG_PCI
+ bus_set_iommu(&pci_bus_type, NULL);
+#endif
+err_reset_amba_ops: __maybe_unused;
+#ifdef CONFIG_ARM_AMBA
+ bus_set_iommu(&amba_bustype, NULL);
+#endif
+err_reset_platform_ops: __maybe_unused;
+ bus_set_iommu(&platform_bus_type, NULL);
+ return err;
}
static int arm_smmu_device_probe(struct platform_device *pdev)
@@ -2177,38 +2237,28 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
* ready to handle default domain setup as soon as any SMMU exists.
*/
if (!using_legacy_binding)
- arm_smmu_bus_init();
+ return arm_smmu_bus_init(&arm_smmu_ops);
return 0;
}
-/*
- * With the legacy DT binding in play, though, we have no guarantees about
- * probe order, but then we're also not doing default domains, so we can
- * delay setting bus ops until we're sure every possible SMMU is ready,
- * and that way ensure that no add_device() calls get missed.
- */
-static int arm_smmu_legacy_bus_init(void)
-{
- if (using_legacy_binding)
- arm_smmu_bus_init();
- return 0;
-}
-device_initcall_sync(arm_smmu_legacy_bus_init);
-
-static void arm_smmu_device_shutdown(struct platform_device *pdev)
+static int arm_smmu_device_remove(struct platform_device *pdev)
{
struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
if (!smmu)
- return;
+ return -ENODEV;
if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
dev_err(&pdev->dev, "removing device with active domains!\n");
+ arm_smmu_bus_init(NULL);
+ iommu_device_unregister(&smmu->iommu);
+ iommu_device_sysfs_remove(&smmu->iommu);
+
arm_smmu_rpm_get(smmu);
/* Turn the thing off */
- arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, sCR0_CLIENTPD);
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, ARM_SMMU_sCR0_CLIENTPD);
arm_smmu_rpm_put(smmu);
if (pm_runtime_enabled(smmu->dev))
@@ -2217,6 +2267,12 @@ static void arm_smmu_device_shutdown(struct platform_device *pdev)
clk_bulk_disable(smmu->num_clks, smmu->clks);
clk_bulk_unprepare(smmu->num_clks, smmu->clks);
+ return 0;
+}
+
+static void arm_smmu_device_shutdown(struct platform_device *pdev)
+{
+ arm_smmu_device_remove(pdev);
}
static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
@@ -2267,11 +2323,17 @@ static const struct dev_pm_ops arm_smmu_pm_ops = {
static struct platform_driver arm_smmu_driver = {
.driver = {
.name = "arm-smmu",
- .of_match_table = of_match_ptr(arm_smmu_of_match),
+ .of_match_table = arm_smmu_of_match,
.pm = &arm_smmu_pm_ops,
- .suppress_bind_attrs = true,
+ .suppress_bind_attrs = true,
},
.probe = arm_smmu_device_probe,
+ .remove = arm_smmu_device_remove,
.shutdown = arm_smmu_device_shutdown,
};
-builtin_platform_driver(arm_smmu_driver);
+module_platform_driver(arm_smmu_driver);
+
+MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
+MODULE_AUTHOR("Will Deacon <will@kernel.org>");
+MODULE_ALIAS("platform:arm-smmu");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/arm-smmu.h b/drivers/iommu/arm-smmu.h
index 62b9f0cec49b..8d1cd54d82a6 100644
--- a/drivers/iommu/arm-smmu.h
+++ b/drivers/iommu/arm-smmu.h
@@ -11,6 +11,7 @@
#define _ARM_SMMU_H
#include <linux/atomic.h>
+#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/device.h>
@@ -23,51 +24,51 @@
/* Configuration registers */
#define ARM_SMMU_GR0_sCR0 0x0
-#define sCR0_VMID16EN BIT(31)
-#define sCR0_BSU GENMASK(15, 14)
-#define sCR0_FB BIT(13)
-#define sCR0_PTM BIT(12)
-#define sCR0_VMIDPNE BIT(11)
-#define sCR0_USFCFG BIT(10)
-#define sCR0_GCFGFIE BIT(5)
-#define sCR0_GCFGFRE BIT(4)
-#define sCR0_EXIDENABLE BIT(3)
-#define sCR0_GFIE BIT(2)
-#define sCR0_GFRE BIT(1)
-#define sCR0_CLIENTPD BIT(0)
+#define ARM_SMMU_sCR0_VMID16EN BIT(31)
+#define ARM_SMMU_sCR0_BSU GENMASK(15, 14)
+#define ARM_SMMU_sCR0_FB BIT(13)
+#define ARM_SMMU_sCR0_PTM BIT(12)
+#define ARM_SMMU_sCR0_VMIDPNE BIT(11)
+#define ARM_SMMU_sCR0_USFCFG BIT(10)
+#define ARM_SMMU_sCR0_GCFGFIE BIT(5)
+#define ARM_SMMU_sCR0_GCFGFRE BIT(4)
+#define ARM_SMMU_sCR0_EXIDENABLE BIT(3)
+#define ARM_SMMU_sCR0_GFIE BIT(2)
+#define ARM_SMMU_sCR0_GFRE BIT(1)
+#define ARM_SMMU_sCR0_CLIENTPD BIT(0)
/* Auxiliary Configuration register */
#define ARM_SMMU_GR0_sACR 0x10
/* Identification registers */
#define ARM_SMMU_GR0_ID0 0x20
-#define ID0_S1TS BIT(30)
-#define ID0_S2TS BIT(29)
-#define ID0_NTS BIT(28)
-#define ID0_SMS BIT(27)
-#define ID0_ATOSNS BIT(26)
-#define ID0_PTFS_NO_AARCH32 BIT(25)
-#define ID0_PTFS_NO_AARCH32S BIT(24)
-#define ID0_NUMIRPT GENMASK(23, 16)
-#define ID0_CTTW BIT(14)
-#define ID0_NUMSIDB GENMASK(12, 9)
-#define ID0_EXIDS BIT(8)
-#define ID0_NUMSMRG GENMASK(7, 0)
+#define ARM_SMMU_ID0_S1TS BIT(30)
+#define ARM_SMMU_ID0_S2TS BIT(29)
+#define ARM_SMMU_ID0_NTS BIT(28)
+#define ARM_SMMU_ID0_SMS BIT(27)
+#define ARM_SMMU_ID0_ATOSNS BIT(26)
+#define ARM_SMMU_ID0_PTFS_NO_AARCH32 BIT(25)
+#define ARM_SMMU_ID0_PTFS_NO_AARCH32S BIT(24)
+#define ARM_SMMU_ID0_NUMIRPT GENMASK(23, 16)
+#define ARM_SMMU_ID0_CTTW BIT(14)
+#define ARM_SMMU_ID0_NUMSIDB GENMASK(12, 9)
+#define ARM_SMMU_ID0_EXIDS BIT(8)
+#define ARM_SMMU_ID0_NUMSMRG GENMASK(7, 0)
#define ARM_SMMU_GR0_ID1 0x24
-#define ID1_PAGESIZE BIT(31)
-#define ID1_NUMPAGENDXB GENMASK(30, 28)
-#define ID1_NUMS2CB GENMASK(23, 16)
-#define ID1_NUMCB GENMASK(7, 0)
+#define ARM_SMMU_ID1_PAGESIZE BIT(31)
+#define ARM_SMMU_ID1_NUMPAGENDXB GENMASK(30, 28)
+#define ARM_SMMU_ID1_NUMS2CB GENMASK(23, 16)
+#define ARM_SMMU_ID1_NUMCB GENMASK(7, 0)
#define ARM_SMMU_GR0_ID2 0x28
-#define ID2_VMID16 BIT(15)
-#define ID2_PTFS_64K BIT(14)
-#define ID2_PTFS_16K BIT(13)
-#define ID2_PTFS_4K BIT(12)
-#define ID2_UBS GENMASK(11, 8)
-#define ID2_OAS GENMASK(7, 4)
-#define ID2_IAS GENMASK(3, 0)
+#define ARM_SMMU_ID2_VMID16 BIT(15)
+#define ARM_SMMU_ID2_PTFS_64K BIT(14)
+#define ARM_SMMU_ID2_PTFS_16K BIT(13)
+#define ARM_SMMU_ID2_PTFS_4K BIT(12)
+#define ARM_SMMU_ID2_UBS GENMASK(11, 8)
+#define ARM_SMMU_ID2_OAS GENMASK(7, 4)
+#define ARM_SMMU_ID2_IAS GENMASK(3, 0)
#define ARM_SMMU_GR0_ID3 0x2c
#define ARM_SMMU_GR0_ID4 0x30
@@ -75,11 +76,11 @@
#define ARM_SMMU_GR0_ID6 0x38
#define ARM_SMMU_GR0_ID7 0x3c
-#define ID7_MAJOR GENMASK(7, 4)
-#define ID7_MINOR GENMASK(3, 0)
+#define ARM_SMMU_ID7_MAJOR GENMASK(7, 4)
+#define ARM_SMMU_ID7_MINOR GENMASK(3, 0)
#define ARM_SMMU_GR0_sGFSR 0x48
-#define sGFSR_USF BIT(1)
+#define ARM_SMMU_sGFSR_USF BIT(1)
#define ARM_SMMU_GR0_sGFSYNR0 0x50
#define ARM_SMMU_GR0_sGFSYNR1 0x54
@@ -92,106 +93,132 @@
#define ARM_SMMU_GR0_sTLBGSYNC 0x70
#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
-#define sTLBGSTATUS_GSACTIVE BIT(0)
+#define ARM_SMMU_sTLBGSTATUS_GSACTIVE BIT(0)
/* Stream mapping registers */
#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
-#define SMR_VALID BIT(31)
-#define SMR_MASK GENMASK(31, 16)
-#define SMR_ID GENMASK(15, 0)
+#define ARM_SMMU_SMR_VALID BIT(31)
+#define ARM_SMMU_SMR_MASK GENMASK(31, 16)
+#define ARM_SMMU_SMR_ID GENMASK(15, 0)
#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
-#define S2CR_PRIVCFG GENMASK(25, 24)
+#define ARM_SMMU_S2CR_PRIVCFG GENMASK(25, 24)
enum arm_smmu_s2cr_privcfg {
S2CR_PRIVCFG_DEFAULT,
S2CR_PRIVCFG_DIPAN,
S2CR_PRIVCFG_UNPRIV,
S2CR_PRIVCFG_PRIV,
};
-#define S2CR_TYPE GENMASK(17, 16)
+#define ARM_SMMU_S2CR_TYPE GENMASK(17, 16)
enum arm_smmu_s2cr_type {
S2CR_TYPE_TRANS,
S2CR_TYPE_BYPASS,
S2CR_TYPE_FAULT,
};
-#define S2CR_EXIDVALID BIT(10)
-#define S2CR_CBNDX GENMASK(7, 0)
+#define ARM_SMMU_S2CR_EXIDVALID BIT(10)
+#define ARM_SMMU_S2CR_CBNDX GENMASK(7, 0)
/* Context bank attribute registers */
#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
-#define CBAR_IRPTNDX GENMASK(31, 24)
-#define CBAR_TYPE GENMASK(17, 16)
+#define ARM_SMMU_CBAR_IRPTNDX GENMASK(31, 24)
+#define ARM_SMMU_CBAR_TYPE GENMASK(17, 16)
enum arm_smmu_cbar_type {
CBAR_TYPE_S2_TRANS,
CBAR_TYPE_S1_TRANS_S2_BYPASS,
CBAR_TYPE_S1_TRANS_S2_FAULT,
CBAR_TYPE_S1_TRANS_S2_TRANS,
};
-#define CBAR_S1_MEMATTR GENMASK(15, 12)
-#define CBAR_S1_MEMATTR_WB 0xf
-#define CBAR_S1_BPSHCFG GENMASK(9, 8)
-#define CBAR_S1_BPSHCFG_NSH 3
-#define CBAR_VMID GENMASK(7, 0)
+#define ARM_SMMU_CBAR_S1_MEMATTR GENMASK(15, 12)
+#define ARM_SMMU_CBAR_S1_MEMATTR_WB 0xf
+#define ARM_SMMU_CBAR_S1_BPSHCFG GENMASK(9, 8)
+#define ARM_SMMU_CBAR_S1_BPSHCFG_NSH 3
+#define ARM_SMMU_CBAR_VMID GENMASK(7, 0)
#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
-#define CBA2R_VMID16 GENMASK(31, 16)
-#define CBA2R_VA64 BIT(0)
+#define ARM_SMMU_CBA2R_VMID16 GENMASK(31, 16)
+#define ARM_SMMU_CBA2R_VA64 BIT(0)
#define ARM_SMMU_CB_SCTLR 0x0
-#define SCTLR_S1_ASIDPNE BIT(12)
-#define SCTLR_CFCFG BIT(7)
-#define SCTLR_CFIE BIT(6)
-#define SCTLR_CFRE BIT(5)
-#define SCTLR_E BIT(4)
-#define SCTLR_AFE BIT(2)
-#define SCTLR_TRE BIT(1)
-#define SCTLR_M BIT(0)
+#define ARM_SMMU_SCTLR_S1_ASIDPNE BIT(12)
+#define ARM_SMMU_SCTLR_CFCFG BIT(7)
+#define ARM_SMMU_SCTLR_CFIE BIT(6)
+#define ARM_SMMU_SCTLR_CFRE BIT(5)
+#define ARM_SMMU_SCTLR_E BIT(4)
+#define ARM_SMMU_SCTLR_AFE BIT(2)
+#define ARM_SMMU_SCTLR_TRE BIT(1)
+#define ARM_SMMU_SCTLR_M BIT(0)
#define ARM_SMMU_CB_ACTLR 0x4
#define ARM_SMMU_CB_RESUME 0x8
-#define RESUME_TERMINATE BIT(0)
+#define ARM_SMMU_RESUME_TERMINATE BIT(0)
#define ARM_SMMU_CB_TCR2 0x10
-#define TCR2_SEP GENMASK(17, 15)
-#define TCR2_SEP_UPSTREAM 0x7
-#define TCR2_AS BIT(4)
+#define ARM_SMMU_TCR2_SEP GENMASK(17, 15)
+#define ARM_SMMU_TCR2_SEP_UPSTREAM 0x7
+#define ARM_SMMU_TCR2_AS BIT(4)
+#define ARM_SMMU_TCR2_PASIZE GENMASK(3, 0)
#define ARM_SMMU_CB_TTBR0 0x20
#define ARM_SMMU_CB_TTBR1 0x28
-#define TTBRn_ASID GENMASK_ULL(63, 48)
+#define ARM_SMMU_TTBRn_ASID GENMASK_ULL(63, 48)
#define ARM_SMMU_CB_TCR 0x30
+#define ARM_SMMU_TCR_EAE BIT(31)
+#define ARM_SMMU_TCR_EPD1 BIT(23)
+#define ARM_SMMU_TCR_TG0 GENMASK(15, 14)
+#define ARM_SMMU_TCR_SH0 GENMASK(13, 12)
+#define ARM_SMMU_TCR_ORGN0 GENMASK(11, 10)
+#define ARM_SMMU_TCR_IRGN0 GENMASK(9, 8)
+#define ARM_SMMU_TCR_T0SZ GENMASK(5, 0)
+
+#define ARM_SMMU_VTCR_RES1 BIT(31)
+#define ARM_SMMU_VTCR_PS GENMASK(18, 16)
+#define ARM_SMMU_VTCR_TG0 ARM_SMMU_TCR_TG0
+#define ARM_SMMU_VTCR_SH0 ARM_SMMU_TCR_SH0
+#define ARM_SMMU_VTCR_ORGN0 ARM_SMMU_TCR_ORGN0
+#define ARM_SMMU_VTCR_IRGN0 ARM_SMMU_TCR_IRGN0
+#define ARM_SMMU_VTCR_SL0 GENMASK(7, 6)
+#define ARM_SMMU_VTCR_T0SZ ARM_SMMU_TCR_T0SZ
+
#define ARM_SMMU_CB_CONTEXTIDR 0x34
#define ARM_SMMU_CB_S1_MAIR0 0x38
#define ARM_SMMU_CB_S1_MAIR1 0x3c
#define ARM_SMMU_CB_PAR 0x50
-#define CB_PAR_F BIT(0)
+#define ARM_SMMU_CB_PAR_F BIT(0)
#define ARM_SMMU_CB_FSR 0x58
-#define FSR_MULTI BIT(31)
-#define FSR_SS BIT(30)
-#define FSR_UUT BIT(8)
-#define FSR_ASF BIT(7)
-#define FSR_TLBLKF BIT(6)
-#define FSR_TLBMCF BIT(5)
-#define FSR_EF BIT(4)
-#define FSR_PF BIT(3)
-#define FSR_AFF BIT(2)
-#define FSR_TF BIT(1)
-
-#define FSR_IGN (FSR_AFF | FSR_ASF | \
- FSR_TLBMCF | FSR_TLBLKF)
-#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
- FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
+#define ARM_SMMU_FSR_MULTI BIT(31)
+#define ARM_SMMU_FSR_SS BIT(30)
+#define ARM_SMMU_FSR_UUT BIT(8)
+#define ARM_SMMU_FSR_ASF BIT(7)
+#define ARM_SMMU_FSR_TLBLKF BIT(6)
+#define ARM_SMMU_FSR_TLBMCF BIT(5)
+#define ARM_SMMU_FSR_EF BIT(4)
+#define ARM_SMMU_FSR_PF BIT(3)
+#define ARM_SMMU_FSR_AFF BIT(2)
+#define ARM_SMMU_FSR_TF BIT(1)
+
+#define ARM_SMMU_FSR_IGN (ARM_SMMU_FSR_AFF | \
+ ARM_SMMU_FSR_ASF | \
+ ARM_SMMU_FSR_TLBMCF | \
+ ARM_SMMU_FSR_TLBLKF)
+
+#define ARM_SMMU_FSR_FAULT (ARM_SMMU_FSR_MULTI | \
+ ARM_SMMU_FSR_SS | \
+ ARM_SMMU_FSR_UUT | \
+ ARM_SMMU_FSR_EF | \
+ ARM_SMMU_FSR_PF | \
+ ARM_SMMU_FSR_TF | \
+ ARM_SMMU_FSR_IGN)
#define ARM_SMMU_CB_FAR 0x60
#define ARM_SMMU_CB_FSYNR0 0x68
-#define FSYNR0_WNR BIT(4)
+#define ARM_SMMU_FSYNR0_WNR BIT(4)
#define ARM_SMMU_CB_S1_TLBIVA 0x600
#define ARM_SMMU_CB_S1_TLBIASID 0x610
@@ -203,7 +230,7 @@ enum arm_smmu_cbar_type {
#define ARM_SMMU_CB_ATS1PR 0x800
#define ARM_SMMU_CB_ATSR 0x8f0
-#define ATSR_ACTIVE BIT(0)
+#define ARM_SMMU_ATSR_ACTIVE BIT(0)
/* Maximum number of context banks per SMMU */
@@ -297,7 +324,7 @@ struct arm_smmu_cfg {
enum arm_smmu_cbar_type cbar;
enum arm_smmu_context_fmt fmt;
};
-#define INVALID_IRPTNDX 0xff
+#define ARM_SMMU_INVALID_IRPTNDX 0xff
enum arm_smmu_domain_stage {
ARM_SMMU_DOMAIN_S1 = 0,
@@ -318,6 +345,33 @@ struct arm_smmu_domain {
struct iommu_domain domain;
};
+static inline u32 arm_smmu_lpae_tcr(struct io_pgtable_cfg *cfg)
+{
+ return ARM_SMMU_TCR_EPD1 |
+ FIELD_PREP(ARM_SMMU_TCR_TG0, cfg->arm_lpae_s1_cfg.tcr.tg) |
+ FIELD_PREP(ARM_SMMU_TCR_SH0, cfg->arm_lpae_s1_cfg.tcr.sh) |
+ FIELD_PREP(ARM_SMMU_TCR_ORGN0, cfg->arm_lpae_s1_cfg.tcr.orgn) |
+ FIELD_PREP(ARM_SMMU_TCR_IRGN0, cfg->arm_lpae_s1_cfg.tcr.irgn) |
+ FIELD_PREP(ARM_SMMU_TCR_T0SZ, cfg->arm_lpae_s1_cfg.tcr.tsz);
+}
+
+static inline u32 arm_smmu_lpae_tcr2(struct io_pgtable_cfg *cfg)
+{
+ return FIELD_PREP(ARM_SMMU_TCR2_PASIZE, cfg->arm_lpae_s1_cfg.tcr.ips) |
+ FIELD_PREP(ARM_SMMU_TCR2_SEP, ARM_SMMU_TCR2_SEP_UPSTREAM);
+}
+
+static inline u32 arm_smmu_lpae_vtcr(struct io_pgtable_cfg *cfg)
+{
+ return ARM_SMMU_VTCR_RES1 |
+ FIELD_PREP(ARM_SMMU_VTCR_PS, cfg->arm_lpae_s2_cfg.vtcr.ps) |
+ FIELD_PREP(ARM_SMMU_VTCR_TG0, cfg->arm_lpae_s2_cfg.vtcr.tg) |
+ FIELD_PREP(ARM_SMMU_VTCR_SH0, cfg->arm_lpae_s2_cfg.vtcr.sh) |
+ FIELD_PREP(ARM_SMMU_VTCR_ORGN0, cfg->arm_lpae_s2_cfg.vtcr.orgn) |
+ FIELD_PREP(ARM_SMMU_VTCR_IRGN0, cfg->arm_lpae_s2_cfg.vtcr.irgn) |
+ FIELD_PREP(ARM_SMMU_VTCR_SL0, cfg->arm_lpae_s2_cfg.vtcr.sl) |
+ FIELD_PREP(ARM_SMMU_VTCR_T0SZ, cfg->arm_lpae_s2_cfg.vtcr.tsz);
+}
/* Implementation details, yay! */
struct arm_smmu_impl {
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 0cc702a70a96..a2e96a5fd9a7 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -19,6 +19,7 @@
#include <linux/iova.h>
#include <linux/irq.h>
#include <linux/mm.h>
+#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/scatterlist.h>
#include <linux/vmalloc.h>
@@ -44,7 +45,6 @@ struct iommu_dma_cookie {
dma_addr_t msi_iova;
};
struct list_head msi_page_list;
- spinlock_t msi_lock;
/* Domain for flush queue callback; NULL if flush queue not in use */
struct iommu_domain *fq_domain;
@@ -63,7 +63,6 @@ static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
if (cookie) {
- spin_lock_init(&cookie->msi_lock);
INIT_LIST_HEAD(&cookie->msi_page_list);
cookie->type = type;
}
@@ -399,7 +398,7 @@ static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
}
static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
- size_t size, dma_addr_t dma_limit, struct device *dev)
+ size_t size, u64 dma_limit, struct device *dev)
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
@@ -424,7 +423,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
if (domain->geometry.force_aperture)
- dma_limit = min(dma_limit, domain->geometry.aperture_end);
+ dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
/* Try to get PCI devices a SAC address */
if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
@@ -477,7 +476,7 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
}
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
- size_t size, int prot, dma_addr_t dma_mask)
+ size_t size, int prot, u64 dma_mask)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
@@ -1176,7 +1175,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
if (msi_page->phys == msi_addr)
return msi_page;
- msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC);
+ msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL);
if (!msi_page)
return NULL;
@@ -1204,25 +1203,22 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
{
struct device *dev = msi_desc_to_dev(desc);
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- struct iommu_dma_cookie *cookie;
struct iommu_dma_msi_page *msi_page;
- unsigned long flags;
+ static DEFINE_MUTEX(msi_prepare_lock); /* see below */
if (!domain || !domain->iova_cookie) {
desc->iommu_cookie = NULL;
return 0;
}
- cookie = domain->iova_cookie;
-
/*
- * We disable IRQs to rule out a possible inversion against
- * irq_desc_lock if, say, someone tries to retarget the affinity
- * of an MSI from within an IPI handler.
+ * In fact the whole prepare operation should already be serialised by
+ * irq_domain_mutex further up the callchain, but that's pretty subtle
+ * on its own, so consider this locking as failsafe documentation...
*/
- spin_lock_irqsave(&cookie->msi_lock, flags);
+ mutex_lock(&msi_prepare_lock);
msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
- spin_unlock_irqrestore(&cookie->msi_lock, flags);
+ mutex_unlock(&msi_prepare_lock);
msi_desc_set_iommu_cookie(desc, msi_page);
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 3acfa6a25fa2..071bb42bbbc5 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -244,7 +244,7 @@ int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) ||
(scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE &&
(info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
- info->dev->class >> 8 != PCI_CLASS_BRIDGE_OTHER))) {
+ info->dev->class >> 16 != PCI_BASE_CLASS_BRIDGE))) {
pr_warn("Device scope type does not match for %s\n",
pci_name(info->dev));
return -EINVAL;
@@ -1354,7 +1354,6 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
struct qi_desc desc;
if (mask) {
- WARN_ON_ONCE(addr & ((1ULL << (VTD_PAGE_SHIFT + mask)) - 1));
addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
} else
@@ -1371,6 +1370,47 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
qi_submit_sync(&desc, iommu);
}
+/* PASID-based IOTLB invalidation */
+void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
+ unsigned long npages, bool ih)
+{
+ struct qi_desc desc = {.qw2 = 0, .qw3 = 0};
+
+ /*
+ * npages == -1 means a PASID-selective invalidation, otherwise,
+ * a positive value for Page-selective-within-PASID invalidation.
+ * 0 is not a valid input.
+ */
+ if (WARN_ON(!npages)) {
+ pr_err("Invalid input npages = %ld\n", npages);
+ return;
+ }
+
+ if (npages == -1) {
+ desc.qw0 = QI_EIOTLB_PASID(pasid) |
+ QI_EIOTLB_DID(did) |
+ QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
+ QI_EIOTLB_TYPE;
+ desc.qw1 = 0;
+ } else {
+ int mask = ilog2(__roundup_pow_of_two(npages));
+ unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
+
+ if (WARN_ON_ONCE(!ALIGN(addr, align)))
+ addr &= ~(align - 1);
+
+ desc.qw0 = QI_EIOTLB_PASID(pasid) |
+ QI_EIOTLB_DID(did) |
+ QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
+ QI_EIOTLB_TYPE;
+ desc.qw1 = QI_EIOTLB_ADDR(addr) |
+ QI_EIOTLB_IH(ih) |
+ QI_EIOTLB_AM(mask);
+ }
+
+ qi_submit_sync(&desc, iommu);
+}
+
/*
* Disable Queued Invalidation interface.
*/
diff --git a/drivers/iommu/intel-iommu-debugfs.c b/drivers/iommu/intel-iommu-debugfs.c
index 471f05d452e0..c1257bef553c 100644
--- a/drivers/iommu/intel-iommu-debugfs.c
+++ b/drivers/iommu/intel-iommu-debugfs.c
@@ -5,6 +5,7 @@
* Authors: Gayatri Kammela <gayatri.kammela@intel.com>
* Sohil Mehta <sohil.mehta@intel.com>
* Jacob Pan <jacob.jun.pan@linux.intel.com>
+ * Lu Baolu <baolu.lu@linux.intel.com>
*/
#include <linux/debugfs.h>
@@ -283,6 +284,77 @@ static int dmar_translation_struct_show(struct seq_file *m, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(dmar_translation_struct);
+static inline unsigned long level_to_directory_size(int level)
+{
+ return BIT_ULL(VTD_PAGE_SHIFT + VTD_STRIDE_SHIFT * (level - 1));
+}
+
+static inline void
+dump_page_info(struct seq_file *m, unsigned long iova, u64 *path)
+{
+ seq_printf(m, "0x%013lx |\t0x%016llx\t0x%016llx\t0x%016llx\t0x%016llx\t0x%016llx\n",
+ iova >> VTD_PAGE_SHIFT, path[5], path[4],
+ path[3], path[2], path[1]);
+}
+
+static void pgtable_walk_level(struct seq_file *m, struct dma_pte *pde,
+ int level, unsigned long start,
+ u64 *path)
+{
+ int i;
+
+ if (level > 5 || level < 1)
+ return;
+
+ for (i = 0; i < BIT_ULL(VTD_STRIDE_SHIFT);
+ i++, pde++, start += level_to_directory_size(level)) {
+ if (!dma_pte_present(pde))
+ continue;
+
+ path[level] = pde->val;
+ if (dma_pte_superpage(pde) || level == 1)
+ dump_page_info(m, start, path);
+ else
+ pgtable_walk_level(m, phys_to_virt(dma_pte_addr(pde)),
+ level - 1, start, path);
+ path[level] = 0;
+ }
+}
+
+static int show_device_domain_translation(struct device *dev, void *data)
+{
+ struct dmar_domain *domain = find_domain(dev);
+ struct seq_file *m = data;
+ u64 path[6] = { 0 };
+
+ if (!domain)
+ return 0;
+
+ seq_printf(m, "Device %s with pasid %d @0x%llx\n",
+ dev_name(dev), domain->default_pasid,
+ (u64)virt_to_phys(domain->pgd));
+ seq_puts(m, "IOVA_PFN\t\tPML5E\t\t\tPML4E\t\t\tPDPE\t\t\tPDE\t\t\tPTE\n");
+
+ pgtable_walk_level(m, domain->pgd, domain->agaw + 2, 0, path);
+ seq_putc(m, '\n');
+
+ return 0;
+}
+
+static int domain_translation_struct_show(struct seq_file *m, void *unused)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ ret = bus_for_each_dev(&pci_bus_type, NULL, m,
+ show_device_domain_translation);
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ return ret;
+}
+DEFINE_SHOW_ATTRIBUTE(domain_translation_struct);
+
#ifdef CONFIG_IRQ_REMAP
static void ir_tbl_remap_entry_show(struct seq_file *m,
struct intel_iommu *iommu)
@@ -396,6 +468,9 @@ void __init intel_iommu_debugfs_init(void)
&iommu_regset_fops);
debugfs_create_file("dmar_translation_struct", 0444, intel_iommu_debug,
NULL, &dmar_translation_struct_fops);
+ debugfs_create_file("domain_translation_struct", 0444,
+ intel_iommu_debug, NULL,
+ &domain_translation_struct_fops);
#ifdef CONFIG_IRQ_REMAP
debugfs_create_file("ir_translation_struct", 0444, intel_iommu_debug,
NULL, &ir_translation_struct_fops);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 0c8d81f56a30..9dc37672bf89 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -307,6 +307,20 @@ static int hw_pass_through = 1;
*/
#define DOMAIN_FLAG_LOSE_CHILDREN BIT(1)
+/*
+ * When VT-d works in the scalable mode, it allows DMA translation to
+ * happen through either first level or second level page table. This
+ * bit marks that the DMA translation for the domain goes through the
+ * first level page table, otherwise, it goes through the second level.
+ */
+#define DOMAIN_FLAG_USE_FIRST_LEVEL BIT(2)
+
+/*
+ * Domain represents a virtual machine which demands iommu nested
+ * translation mode support.
+ */
+#define DOMAIN_FLAG_NESTING_MODE BIT(3)
+
#define for_each_domain_iommu(idx, domain) \
for (idx = 0; idx < g_num_of_iommus; idx++) \
if (domain->iommu_refcnt[idx])
@@ -355,9 +369,14 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
int dmar_disabled = 0;
#else
int dmar_disabled = 1;
-#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
+#endif /* CONFIG_INTEL_IOMMU_DEFAULT_ON */
+#ifdef INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
+int intel_iommu_sm = 1;
+#else
int intel_iommu_sm;
+#endif /* INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON */
+
int intel_iommu_enabled = 0;
EXPORT_SYMBOL_GPL(intel_iommu_enabled);
@@ -368,7 +387,6 @@ static int intel_iommu_superpage = 1;
static int iommu_identity_mapping;
static int intel_no_bounce;
-#define IDENTMAP_ALL 1
#define IDENTMAP_GFX 2
#define IDENTMAP_AZALIA 4
@@ -377,7 +395,7 @@ EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
#define DEFER_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-2))
-static DEFINE_SPINLOCK(device_domain_lock);
+DEFINE_SPINLOCK(device_domain_lock);
static LIST_HEAD(device_domain_list);
#define device_needs_bounce(d) (!intel_no_bounce && dev_is_pci(d) && \
@@ -552,6 +570,11 @@ static inline int domain_type_is_si(struct dmar_domain *domain)
return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
}
+static inline bool domain_use_first_level(struct dmar_domain *domain)
+{
+ return domain->flags & DOMAIN_FLAG_USE_FIRST_LEVEL;
+}
+
static inline int domain_pfn_supported(struct dmar_domain *domain,
unsigned long pfn)
{
@@ -661,11 +684,12 @@ static int domain_update_iommu_snooping(struct intel_iommu *skip)
return ret;
}
-static int domain_update_iommu_superpage(struct intel_iommu *skip)
+static int domain_update_iommu_superpage(struct dmar_domain *domain,
+ struct intel_iommu *skip)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
- int mask = 0xf;
+ int mask = 0x3;
if (!intel_iommu_superpage) {
return 0;
@@ -675,7 +699,13 @@ static int domain_update_iommu_superpage(struct intel_iommu *skip)
rcu_read_lock();
for_each_active_iommu(iommu, drhd) {
if (iommu != skip) {
- mask &= cap_super_page_val(iommu->cap);
+ if (domain && domain_use_first_level(domain)) {
+ if (!cap_fl1gp_support(iommu->cap))
+ mask = 0x1;
+ } else {
+ mask &= cap_super_page_val(iommu->cap);
+ }
+
if (!mask)
break;
}
@@ -690,7 +720,7 @@ static void domain_update_iommu_cap(struct dmar_domain *domain)
{
domain_update_iommu_coherency(domain);
domain->iommu_snooping = domain_update_iommu_snooping(NULL);
- domain->iommu_superpage = domain_update_iommu_superpage(NULL);
+ domain->iommu_superpage = domain_update_iommu_superpage(domain, NULL);
}
struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
@@ -774,13 +804,7 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
if (dev_is_pci(dev)) {
struct pci_dev *pf_pdev;
- pdev = to_pci_dev(dev);
-
-#ifdef CONFIG_X86
- /* VMD child devices currently cannot be handled individually */
- if (is_vmd(pdev->bus))
- return NULL;
-#endif
+ pdev = pci_real_dma_dev(to_pci_dev(dev));
/* VFs aren't listed in scope tables; we need to look up
* the PF instead to find the IOMMU. */
@@ -913,6 +937,8 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
+ if (domain_use_first_level(domain))
+ pteval |= DMA_FL_PTE_XD;
if (cmpxchg64(&pte->val, 0ULL, pteval))
/* Someone else set it while we were thinking; use theirs. */
free_pgtable_page(tmp_page);
@@ -1483,6 +1509,20 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
spin_unlock_irqrestore(&device_domain_lock, flags);
}
+static void domain_flush_piotlb(struct intel_iommu *iommu,
+ struct dmar_domain *domain,
+ u64 addr, unsigned long npages, bool ih)
+{
+ u16 did = domain->iommu_did[iommu->seq_id];
+
+ if (domain->default_pasid)
+ qi_flush_piotlb(iommu, did, domain->default_pasid,
+ addr, npages, ih);
+
+ if (!list_empty(&domain->devices))
+ qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, npages, ih);
+}
+
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
struct dmar_domain *domain,
unsigned long pfn, unsigned int pages,
@@ -1496,18 +1536,23 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
if (ih)
ih = 1 << 6;
- /*
- * Fallback to domain selective flush if no PSI support or the size is
- * too big.
- * PSI requires page size to be 2 ^ x, and the base address is naturally
- * aligned to the size
- */
- if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
- iommu->flush.flush_iotlb(iommu, did, 0, 0,
- DMA_TLB_DSI_FLUSH);
- else
- iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
- DMA_TLB_PSI_FLUSH);
+
+ if (domain_use_first_level(domain)) {
+ domain_flush_piotlb(iommu, domain, addr, pages, ih);
+ } else {
+ /*
+ * Fallback to domain selective flush if no PSI support or
+ * the size is too big. PSI requires page size to be 2 ^ x,
+ * and the base address is naturally aligned to the size.
+ */
+ if (!cap_pgsel_inv(iommu->cap) ||
+ mask > cap_max_amask_val(iommu->cap))
+ iommu->flush.flush_iotlb(iommu, did, 0, 0,
+ DMA_TLB_DSI_FLUSH);
+ else
+ iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
+ DMA_TLB_PSI_FLUSH);
+ }
/*
* In caching mode, changes of pages from non-present to present require
@@ -1522,8 +1567,11 @@ static inline void __mapping_notify_one(struct intel_iommu *iommu,
struct dmar_domain *domain,
unsigned long pfn, unsigned int pages)
{
- /* It's a non-present to present mapping. Only flush if caching mode */
- if (cap_caching_mode(iommu->cap))
+ /*
+ * It's a non-present to present mapping. Only flush if caching mode
+ * and second level.
+ */
+ if (cap_caching_mode(iommu->cap) && !domain_use_first_level(domain))
iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1);
else
iommu_flush_write_buffer(iommu);
@@ -1540,7 +1588,11 @@ static void iommu_flush_iova(struct iova_domain *iovad)
struct intel_iommu *iommu = g_iommus[idx];
u16 did = domain->iommu_did[iommu->seq_id];
- iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
+ if (domain_use_first_level(domain))
+ domain_flush_piotlb(iommu, domain, 0, -1, 0);
+ else
+ iommu->flush.flush_iotlb(iommu, did, 0, 0,
+ DMA_TLB_DSI_FLUSH);
if (!cap_caching_mode(iommu->cap))
iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
@@ -1709,6 +1761,33 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
#endif
}
+/*
+ * Check and return whether first level is used by default for
+ * DMA translation.
+ */
+static bool first_level_by_default(void)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+ static int first_level_support = -1;
+
+ if (likely(first_level_support != -1))
+ return first_level_support;
+
+ first_level_support = 1;
+
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd) {
+ if (!sm_supported(iommu) || !ecap_flts(iommu->ecap)) {
+ first_level_support = 0;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return first_level_support;
+}
+
static struct dmar_domain *alloc_domain(int flags)
{
struct dmar_domain *domain;
@@ -1720,6 +1799,8 @@ static struct dmar_domain *alloc_domain(int flags)
memset(domain, 0, sizeof(*domain));
domain->nid = NUMA_NO_NODE;
domain->flags = flags;
+ if (first_level_by_default())
+ domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL;
domain->has_iotlb_device = false;
INIT_LIST_HEAD(&domain->devices);
@@ -1849,14 +1930,16 @@ static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
{
int adjust_width, agaw;
unsigned long sagaw;
- int err;
+ int ret;
init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
- err = init_iova_flush_queue(&domain->iovad,
- iommu_flush_iova, iova_entry_free);
- if (err)
- return err;
+ if (!intel_iommu_strict) {
+ ret = init_iova_flush_queue(&domain->iovad,
+ iommu_flush_iova, iova_entry_free);
+ if (ret)
+ pr_info("iova flush queue initialization failed\n");
+ }
domain_reserve_special_ranges(domain);
@@ -2229,17 +2312,20 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
unsigned long sg_res = 0;
unsigned int largepage_lvl = 0;
unsigned long lvl_pages = 0;
+ u64 attr;
BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
return -EINVAL;
- prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
+ attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
+ if (domain_use_first_level(domain))
+ attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD;
if (!sg) {
sg_res = nr_pages;
- pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
+ pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr;
}
while (nr_pages > 0) {
@@ -2251,7 +2337,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
sg_res = aligned_nrpages(sg->offset, sg->length);
sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff;
sg->dma_length = sg->length;
- pteval = (sg_phys(sg) - pgoff) | prot;
+ pteval = (sg_phys(sg) - pgoff) | attr;
phys_pfn = pteval >> VTD_PAGE_SHIFT;
}
@@ -2420,7 +2506,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
spin_unlock_irqrestore(&device_domain_lock, flags);
}
-static struct dmar_domain *find_domain(struct device *dev)
+struct dmar_domain *find_domain(struct device *dev)
{
struct device_domain_info *info;
@@ -2428,6 +2514,9 @@ static struct dmar_domain *find_domain(struct device *dev)
dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO))
return NULL;
+ if (dev_is_pci(dev))
+ dev = &pci_real_dma_dev(to_pci_dev(dev))->dev;
+
/* No lock here, assumes no domain exit in normal case */
info = dev->archdata.iommu;
if (likely(info))
@@ -2463,6 +2552,36 @@ dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
return NULL;
}
+static int domain_setup_first_level(struct intel_iommu *iommu,
+ struct dmar_domain *domain,
+ struct device *dev,
+ int pasid)
+{
+ int flags = PASID_FLAG_SUPERVISOR_MODE;
+ struct dma_pte *pgd = domain->pgd;
+ int agaw, level;
+
+ /*
+ * Skip top levels of page tables for iommu which has
+ * less agaw than default. Unnecessary for PT mode.
+ */
+ for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
+ pgd = phys_to_virt(dma_pte_addr(pgd));
+ if (!dma_pte_present(pgd))
+ return -ENOMEM;
+ }
+
+ level = agaw_to_level(agaw);
+ if (level != 4 && level != 5)
+ return -EINVAL;
+
+ flags |= (level == 5) ? PASID_FLAG_FL5LP : 0;
+
+ return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid,
+ domain->iommu_did[iommu->seq_id],
+ flags);
+}
+
static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
int bus, int devfn,
struct device *dev,
@@ -2562,6 +2681,9 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
if (hw_pass_through && domain_type_is_si(domain))
ret = intel_pasid_setup_pass_through(iommu, domain,
dev, PASID_RID2PASID);
+ else if (domain_use_first_level(domain))
+ ret = domain_setup_first_level(iommu, domain, dev,
+ PASID_RID2PASID);
else
ret = intel_pasid_setup_second_level(iommu, domain,
dev, PASID_RID2PASID);
@@ -2767,10 +2889,8 @@ static int __init si_domain_init(int hw)
}
/*
- * Normally we use DMA domains for devices which have RMRRs. But we
- * loose this requirement for graphic and usb devices. Identity map
- * the RMRRs for graphic and USB devices so that they could use the
- * si_domain.
+ * Identity map the RMRRs so that devices with RMRRs could also use
+ * the si_domain.
*/
for_each_rmrr_units(rmrr) {
for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
@@ -2778,9 +2898,6 @@ static int __init si_domain_init(int hw)
unsigned long long start = rmrr->base_address;
unsigned long long end = rmrr->end_address;
- if (device_is_rmrr_locked(dev))
- continue;
-
if (WARN_ON(end < start ||
end >> agaw_to_width(si_domain->agaw)))
continue;
@@ -2919,9 +3036,6 @@ static int device_def_domain_type(struct device *dev)
if (dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(dev);
- if (device_is_rmrr_locked(dev))
- return IOMMU_DOMAIN_DMA;
-
/*
* Prevent any device marked as untrusted from getting
* placed into the statically identity mapping domain.
@@ -2959,13 +3073,9 @@ static int device_def_domain_type(struct device *dev)
return IOMMU_DOMAIN_DMA;
} else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
return IOMMU_DOMAIN_DMA;
- } else {
- if (device_has_rmrr(dev))
- return IOMMU_DOMAIN_DMA;
}
- return (iommu_identity_mapping & IDENTMAP_ALL) ?
- IOMMU_DOMAIN_IDENTITY : 0;
+ return 0;
}
static void intel_iommu_init_qi(struct intel_iommu *iommu)
@@ -3294,10 +3404,7 @@ static int __init init_dmars(void)
if (!ecap_pass_through(iommu->ecap))
hw_pass_through = 0;
-#ifdef CONFIG_INTEL_IOMMU_SVM
- if (pasid_supported(iommu))
- intel_svm_init(iommu);
-#endif
+ intel_svm_check(iommu);
}
/*
@@ -3312,9 +3419,6 @@ static int __init init_dmars(void)
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
}
- if (iommu_default_passthrough())
- iommu_identity_mapping |= IDENTMAP_ALL;
-
#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
dmar_map_gfx = 0;
#endif
@@ -3387,8 +3491,21 @@ static unsigned long intel_alloc_iova(struct device *dev,
{
unsigned long iova_pfn;
- /* Restrict dma_mask to the width that the iommu can handle */
- dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
+ /*
+ * Restrict dma_mask to the width that the iommu can handle.
+ * First-level translation restricts the input-address to a
+ * canonical address (i.e., address bits 63:N have the same
+ * value as address bit [N-1], where N is 48-bits with 4-level
+ * paging and 57-bits with 5-level paging). Hence, skip bit
+ * [N-1].
+ */
+ if (domain_use_first_level(domain))
+ dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw - 1),
+ dma_mask);
+ else
+ dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw),
+ dma_mask);
+
/* Ensure we reserve the whole size-aligned region */
nrpages = __roundup_pow_of_two(nrpages);
@@ -3406,7 +3523,8 @@ static unsigned long intel_alloc_iova(struct device *dev,
iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
IOVA_PFN(dma_mask), true);
if (unlikely(!iova_pfn)) {
- dev_err(dev, "Allocating %ld-page iova failed", nrpages);
+ dev_err_once(dev, "Allocating %ld-page iova failed\n",
+ nrpages);
return 0;
}
@@ -3774,8 +3892,8 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
return 0;
}
- trace_map_sg(dev, iova_pfn << PAGE_SHIFT,
- sg_phys(sglist), size << VTD_PAGE_SHIFT);
+ for_each_sg(sglist, sg, nelems, i)
+ trace_map_sg(dev, i + 1, nelems, sg);
return nelems;
}
@@ -3987,6 +4105,9 @@ bounce_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
sg_dma_len(sg) = sg->length;
}
+ for_each_sg(sglist, sg, nelems, i)
+ trace_bounce_map_sg(dev, i + 1, nelems, sg);
+
return nelems;
out_unmap:
@@ -4315,16 +4436,31 @@ static void __init init_iommu_pm_ops(void)
static inline void init_iommu_pm_ops(void) {}
#endif /* CONFIG_PM */
+static int rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr)
+{
+ if (!IS_ALIGNED(rmrr->base_address, PAGE_SIZE) ||
+ !IS_ALIGNED(rmrr->end_address + 1, PAGE_SIZE) ||
+ rmrr->end_address <= rmrr->base_address ||
+ arch_rmrr_sanity_check(rmrr))
+ return -EINVAL;
+
+ return 0;
+}
+
int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
{
struct acpi_dmar_reserved_memory *rmrr;
struct dmar_rmrr_unit *rmrru;
- int ret;
rmrr = (struct acpi_dmar_reserved_memory *)header;
- ret = arch_rmrr_sanity_check(rmrr);
- if (ret)
- return ret;
+ if (rmrr_sanity_check(rmrr))
+ WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
+ "Your BIOS is broken; bad RMRR [%#018Lx-%#018Lx]\n"
+ "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+ rmrr->base_address, rmrr->end_address,
+ dmi_get_system_info(DMI_BIOS_VENDOR),
+ dmi_get_system_info(DMI_BIOS_VERSION),
+ dmi_get_system_info(DMI_PRODUCT_VERSION));
rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
if (!rmrru)
@@ -4470,7 +4606,7 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
iommu->name);
return -ENXIO;
}
- sp = domain_update_iommu_superpage(iommu) - 1;
+ sp = domain_update_iommu_superpage(NULL, iommu) - 1;
if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
pr_warn("%s: Doesn't support large page.\n",
iommu->name);
@@ -4490,10 +4626,7 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
if (ret)
goto out;
-#ifdef CONFIG_INTEL_IOMMU_SVM
- if (pasid_supported(iommu))
- intel_svm_init(iommu);
-#endif
+ intel_svm_check(iommu);
if (dmaru->ignored) {
/*
@@ -4898,7 +5031,7 @@ static int __init platform_optin_force_iommu(void)
* map for all devices except those marked as being untrusted.
*/
if (dmar_disabled)
- iommu_identity_mapping |= IDENTMAP_ALL;
+ iommu_set_default_passthrough(false);
dmar_disabled = 0;
no_iommu = 0;
@@ -5163,7 +5296,8 @@ static void dmar_remove_one_dev_info(struct device *dev)
spin_lock_irqsave(&device_domain_lock, flags);
info = dev->archdata.iommu;
- if (info)
+ if (info && info != DEFER_DEVICE_DOMAIN_INFO
+ && info != DUMMY_DEVICE_DOMAIN_INFO)
__dmar_remove_one_dev_info(info);
spin_unlock_irqrestore(&device_domain_lock, flags);
}
@@ -5197,6 +5331,7 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
{
struct dmar_domain *dmar_domain;
struct iommu_domain *domain;
+ int ret;
switch (type) {
case IOMMU_DOMAIN_DMA:
@@ -5213,11 +5348,12 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
return NULL;
}
- if (type == IOMMU_DOMAIN_DMA &&
- init_iova_flush_queue(&dmar_domain->iovad,
- iommu_flush_iova, iova_entry_free)) {
- pr_warn("iova flush queue initialization failed\n");
- intel_iommu_strict = 1;
+ if (!intel_iommu_strict && type == IOMMU_DOMAIN_DMA) {
+ ret = init_iova_flush_queue(&dmar_domain->iovad,
+ iommu_flush_iova,
+ iova_entry_free);
+ if (ret)
+ pr_info("iova flush queue initialization failed\n");
}
domain_update_iommu_cap(dmar_domain);
@@ -5283,7 +5419,7 @@ static void auxiliary_unlink_device(struct dmar_domain *domain,
domain->auxd_refcnt--;
if (!domain->auxd_refcnt && domain->default_pasid > 0)
- intel_pasid_free_id(domain->default_pasid);
+ ioasid_free(domain->default_pasid);
}
static int aux_domain_add_dev(struct dmar_domain *domain,
@@ -5301,10 +5437,11 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
if (domain->default_pasid <= 0) {
int pasid;
- pasid = intel_pasid_alloc_id(domain, PASID_MIN,
- pci_max_pasids(to_pci_dev(dev)),
- GFP_KERNEL);
- if (pasid <= 0) {
+ /* No private data needed for the default pasid */
+ pasid = ioasid_alloc(NULL, PASID_MIN,
+ pci_max_pasids(to_pci_dev(dev)) - 1,
+ NULL);
+ if (pasid == INVALID_IOASID) {
pr_err("Can't allocate default pasid\n");
return -ENODEV;
}
@@ -5322,8 +5459,12 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
goto attach_failed;
/* Setup the PASID entry for mediated devices: */
- ret = intel_pasid_setup_second_level(iommu, domain, dev,
- domain->default_pasid);
+ if (domain_use_first_level(domain))
+ ret = domain_setup_first_level(iommu, domain, dev,
+ domain->default_pasid);
+ else
+ ret = intel_pasid_setup_second_level(iommu, domain, dev,
+ domain->default_pasid);
if (ret)
goto table_failed;
spin_unlock(&iommu->lock);
@@ -5340,7 +5481,7 @@ attach_failed:
spin_unlock(&iommu->lock);
spin_unlock_irqrestore(&device_domain_lock, flags);
if (!domain->auxd_refcnt && domain->default_pasid > 0)
- intel_pasid_free_id(domain->default_pasid);
+ ioasid_free(domain->default_pasid);
return ret;
}
@@ -5478,9 +5619,6 @@ static int intel_iommu_map(struct iommu_domain *domain,
int prot = 0;
int ret;
- if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN)
- return -EINVAL;
-
if (iommu_prot & IOMMU_READ)
prot |= DMA_PTE_READ;
if (iommu_prot & IOMMU_WRITE)
@@ -5523,8 +5661,6 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
/* Cope with horrid API which requires us to unmap more than the
size argument if it happens to be a large-page mapping. */
BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
- if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN)
- return 0;
if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
size = VTD_PAGE_SIZE << level_to_offset_bits(level);
@@ -5556,9 +5692,6 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
int level = 0;
u64 phys = 0;
- if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN)
- return 0;
-
pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
if (pte)
phys = dma_pte_addr(pte);
@@ -5602,6 +5735,24 @@ static inline bool iommu_pasid_support(void)
return ret;
}
+static inline bool nested_mode_support(void)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+ bool ret = true;
+
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd) {
+ if (!sm_supported(iommu) || !ecap_nest(iommu->ecap)) {
+ ret = false;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
static bool intel_iommu_capable(enum iommu_cap cap)
{
if (cap == IOMMU_CAP_CACHE_COHERENCY)
@@ -5632,8 +5783,10 @@ static int intel_iommu_add_device(struct device *dev)
group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
+ if (IS_ERR(group)) {
+ ret = PTR_ERR(group);
+ goto unlink;
+ }
iommu_group_put(group);
@@ -5659,7 +5812,8 @@ static int intel_iommu_add_device(struct device *dev)
if (!get_private_domain_for_dev(dev)) {
dev_warn(dev,
"Failed to get a private domain.\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto unlink;
}
dev_info(dev,
@@ -5674,6 +5828,10 @@ static int intel_iommu_add_device(struct device *dev)
}
return 0;
+
+unlink:
+ iommu_device_unlink(&iommu->iommu, dev);
+ return ret;
}
static void intel_iommu_remove_device(struct device *dev)
@@ -5736,8 +5894,8 @@ static void intel_iommu_get_resv_regions(struct device *device,
struct pci_dev *pdev = to_pci_dev(device);
if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) {
- reg = iommu_alloc_resv_region(0, 1UL << 24, 0,
- IOMMU_RESV_DIRECT);
+ reg = iommu_alloc_resv_region(0, 1UL << 24, prot,
+ IOMMU_RESV_DIRECT_RELAXABLE);
if (reg)
list_add_tail(&reg->list, head);
}
@@ -5752,15 +5910,6 @@ static void intel_iommu_get_resv_regions(struct device *device,
list_add_tail(&reg->list, head);
}
-static void intel_iommu_put_resv_regions(struct device *dev,
- struct list_head *head)
-{
- struct iommu_resv_region *entry, *next;
-
- list_for_each_entry_safe(entry, next, head, list)
- kfree(entry);
-}
-
int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
{
struct device_domain_info *info;
@@ -5825,6 +5974,13 @@ static void intel_iommu_apply_resv_region(struct device *dev,
WARN_ON_ONCE(!reserve_iova(&dmar_domain->iovad, start, end));
}
+static struct iommu_group *intel_iommu_device_group(struct device *dev)
+{
+ if (dev_is_pci(dev))
+ return pci_device_group(dev);
+ return generic_device_group(dev);
+}
+
#ifdef CONFIG_INTEL_IOMMU_SVM
struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
{
@@ -5980,10 +6136,42 @@ static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain,
return dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO;
}
+static int
+intel_iommu_domain_set_attr(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data)
+{
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ unsigned long flags;
+ int ret = 0;
+
+ if (domain->type != IOMMU_DOMAIN_UNMANAGED)
+ return -EINVAL;
+
+ switch (attr) {
+ case DOMAIN_ATTR_NESTING:
+ spin_lock_irqsave(&device_domain_lock, flags);
+ if (nested_mode_support() &&
+ list_empty(&dmar_domain->devices)) {
+ dmar_domain->flags |= DOMAIN_FLAG_NESTING_MODE;
+ dmar_domain->flags &= ~DOMAIN_FLAG_USE_FIRST_LEVEL;
+ } else {
+ ret = -ENODEV;
+ }
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
const struct iommu_ops intel_iommu_ops = {
.capable = intel_iommu_capable,
.domain_alloc = intel_iommu_domain_alloc,
.domain_free = intel_iommu_domain_free,
+ .domain_set_attr = intel_iommu_domain_set_attr,
.attach_dev = intel_iommu_attach_device,
.detach_dev = intel_iommu_detach_device,
.aux_attach_dev = intel_iommu_aux_attach_device,
@@ -5995,9 +6183,9 @@ const struct iommu_ops intel_iommu_ops = {
.add_device = intel_iommu_add_device,
.remove_device = intel_iommu_remove_device,
.get_resv_regions = intel_iommu_get_resv_regions,
- .put_resv_regions = intel_iommu_put_resv_regions,
+ .put_resv_regions = generic_iommu_put_resv_regions,
.apply_resv_region = intel_iommu_apply_resv_region,
- .device_group = pci_device_group,
+ .device_group = intel_iommu_device_group,
.dev_has_feat = intel_iommu_dev_has_feat,
.dev_feat_enabled = intel_iommu_dev_feat_enabled,
.dev_enable_feat = intel_iommu_dev_enable_feat,
diff --git a/drivers/iommu/intel-pasid.c b/drivers/iommu/intel-pasid.c
index 040a445be300..22b30f10b396 100644
--- a/drivers/iommu/intel-pasid.c
+++ b/drivers/iommu/intel-pasid.c
@@ -26,42 +26,6 @@
*/
static DEFINE_SPINLOCK(pasid_lock);
u32 intel_pasid_max_id = PASID_MAX;
-static DEFINE_IDR(pasid_idr);
-
-int intel_pasid_alloc_id(void *ptr, int start, int end, gfp_t gfp)
-{
- int ret, min, max;
-
- min = max_t(int, start, PASID_MIN);
- max = min_t(int, end, intel_pasid_max_id);
-
- WARN_ON(in_interrupt());
- idr_preload(gfp);
- spin_lock(&pasid_lock);
- ret = idr_alloc(&pasid_idr, ptr, min, max, GFP_ATOMIC);
- spin_unlock(&pasid_lock);
- idr_preload_end();
-
- return ret;
-}
-
-void intel_pasid_free_id(int pasid)
-{
- spin_lock(&pasid_lock);
- idr_remove(&pasid_idr, pasid);
- spin_unlock(&pasid_lock);
-}
-
-void *intel_pasid_lookup_id(int pasid)
-{
- void *p;
-
- spin_lock(&pasid_lock);
- p = idr_find(&pasid_idr, pasid);
- spin_unlock(&pasid_lock);
-
- return p;
-}
/*
* Per device pasid table management:
@@ -465,6 +429,21 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
devtlb_invalidation_with_pasid(iommu, dev, pasid);
}
+static void pasid_flush_caches(struct intel_iommu *iommu,
+ struct pasid_entry *pte,
+ int pasid, u16 did)
+{
+ if (!ecap_coherent(iommu->ecap))
+ clflush_cache_range(pte, sizeof(*pte));
+
+ if (cap_caching_mode(iommu->cap)) {
+ pasid_cache_invalidation_with_pasid(iommu, did, pasid);
+ iotlb_invalidation_with_pasid(iommu, did, pasid);
+ } else {
+ iommu_flush_write_buffer(iommu);
+ }
+}
+
/*
* Set up the scalable mode pasid table entry for first only
* translation type.
@@ -498,10 +477,15 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
pasid_set_sre(pte);
}
-#ifdef CONFIG_X86
- if (cpu_feature_enabled(X86_FEATURE_LA57))
- pasid_set_flpm(pte, 1);
-#endif /* CONFIG_X86 */
+ if (flags & PASID_FLAG_FL5LP) {
+ if (cap_5lp_support(iommu->cap)) {
+ pasid_set_flpm(pte, 1);
+ } else {
+ pr_err("No 5-level paging support for first-level\n");
+ pasid_clear_entry(pte);
+ return -EINVAL;
+ }
+ }
pasid_set_domain_id(pte, did);
pasid_set_address_width(pte, iommu->agaw);
@@ -510,16 +494,7 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
/* Setup Present and PASID Granular Transfer Type: */
pasid_set_translation_type(pte, 1);
pasid_set_present(pte);
-
- if (!ecap_coherent(iommu->ecap))
- clflush_cache_range(pte, sizeof(*pte));
-
- if (cap_caching_mode(iommu->cap)) {
- pasid_cache_invalidation_with_pasid(iommu, did, pasid);
- iotlb_invalidation_with_pasid(iommu, did, pasid);
- } else {
- iommu_flush_write_buffer(iommu);
- }
+ pasid_flush_caches(iommu, pte, pasid, did);
return 0;
}
@@ -583,16 +558,7 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
*/
pasid_set_sre(pte);
pasid_set_present(pte);
-
- if (!ecap_coherent(iommu->ecap))
- clflush_cache_range(pte, sizeof(*pte));
-
- if (cap_caching_mode(iommu->cap)) {
- pasid_cache_invalidation_with_pasid(iommu, did, pasid);
- iotlb_invalidation_with_pasid(iommu, did, pasid);
- } else {
- iommu_flush_write_buffer(iommu);
- }
+ pasid_flush_caches(iommu, pte, pasid, did);
return 0;
}
@@ -626,16 +592,7 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
*/
pasid_set_sre(pte);
pasid_set_present(pte);
-
- if (!ecap_coherent(iommu->ecap))
- clflush_cache_range(pte, sizeof(*pte));
-
- if (cap_caching_mode(iommu->cap)) {
- pasid_cache_invalidation_with_pasid(iommu, did, pasid);
- iotlb_invalidation_with_pasid(iommu, did, pasid);
- } else {
- iommu_flush_write_buffer(iommu);
- }
+ pasid_flush_caches(iommu, pte, pasid, did);
return 0;
}
diff --git a/drivers/iommu/intel-pasid.h b/drivers/iommu/intel-pasid.h
index fc8cd8f17de1..92de6df24ccb 100644
--- a/drivers/iommu/intel-pasid.h
+++ b/drivers/iommu/intel-pasid.h
@@ -37,6 +37,12 @@
*/
#define PASID_FLAG_SUPERVISOR_MODE BIT(0)
+/*
+ * The PASID_FLAG_FL5LP flag Indicates using 5-level paging for first-
+ * level translation, otherwise, 4-level paging will be used.
+ */
+#define PASID_FLAG_FL5LP BIT(1)
+
struct pasid_dir_entry {
u64 val;
};
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index 9b159132405d..d7f2a5358900 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -17,25 +17,13 @@
#include <linux/dmar.h>
#include <linux/interrupt.h>
#include <linux/mm_types.h>
+#include <linux/ioasid.h>
#include <asm/page.h>
#include "intel-pasid.h"
static irqreturn_t prq_event_thread(int irq, void *d);
-int intel_svm_init(struct intel_iommu *iommu)
-{
- if (cpu_feature_enabled(X86_FEATURE_GBPAGES) &&
- !cap_fl1gp_support(iommu->cap))
- return -EINVAL;
-
- if (cpu_feature_enabled(X86_FEATURE_LA57) &&
- !cap_5lp_support(iommu->cap))
- return -EINVAL;
-
- return 0;
-}
-
#define PRQ_ORDER 0
int intel_svm_enable_prq(struct intel_iommu *iommu)
@@ -99,16 +87,39 @@ int intel_svm_finish_prq(struct intel_iommu *iommu)
return 0;
}
+static inline bool intel_svm_capable(struct intel_iommu *iommu)
+{
+ return iommu->flags & VTD_FLAG_SVM_CAPABLE;
+}
+
+void intel_svm_check(struct intel_iommu *iommu)
+{
+ if (!pasid_supported(iommu))
+ return;
+
+ if (cpu_feature_enabled(X86_FEATURE_GBPAGES) &&
+ !cap_fl1gp_support(iommu->cap)) {
+ pr_err("%s SVM disabled, incompatible 1GB page capability\n",
+ iommu->name);
+ return;
+ }
+
+ if (cpu_feature_enabled(X86_FEATURE_LA57) &&
+ !cap_5lp_support(iommu->cap)) {
+ pr_err("%s SVM disabled, incompatible paging mode\n",
+ iommu->name);
+ return;
+ }
+
+ iommu->flags |= VTD_FLAG_SVM_CAPABLE;
+}
+
static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,
unsigned long address, unsigned long pages, int ih)
{
struct qi_desc desc;
- /*
- * Do PASID granu IOTLB invalidation if page selective capability is
- * not available.
- */
- if (pages == -1 || !cap_pgsel_inv(svm->iommu->cap)) {
+ if (pages == -1) {
desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
QI_EIOTLB_DID(sdev->did) |
QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
@@ -211,6 +222,10 @@ static const struct mmu_notifier_ops intel_mmuops = {
static DEFINE_MUTEX(pasid_mutex);
static LIST_HEAD(global_svm_list);
+#define for_each_svm_dev(sdev, svm, d) \
+ list_for_each_entry((sdev), &(svm)->devs, list) \
+ if ((d) != (sdev)->dev) {} else
+
int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops)
{
struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
@@ -224,6 +239,9 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
if (!iommu || dmar_disabled)
return -EINVAL;
+ if (!intel_svm_capable(iommu))
+ return -ENOTSUPP;
+
if (dev_is_pci(dev)) {
pasid_max = pci_max_pasids(to_pci_dev(dev));
if (pasid_max < 0)
@@ -256,15 +274,14 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
goto out;
}
- list_for_each_entry(sdev, &svm->devs, list) {
- if (dev == sdev->dev) {
- if (sdev->ops != ops) {
- ret = -EBUSY;
- goto out;
- }
- sdev->users++;
- goto success;
+ /* Find the matching device in svm list */
+ for_each_svm_dev(sdev, svm, dev) {
+ if (sdev->ops != ops) {
+ ret = -EBUSY;
+ goto out;
}
+ sdev->users++;
+ goto success;
}
break;
@@ -318,16 +335,15 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
if (pasid_max > intel_pasid_max_id)
pasid_max = intel_pasid_max_id;
- /* Do not use PASID 0 in caching mode (virtualised IOMMU) */
- ret = intel_pasid_alloc_id(svm,
- !!cap_caching_mode(iommu->cap),
- pasid_max - 1, GFP_KERNEL);
- if (ret < 0) {
+ /* Do not use PASID 0, reserved for RID to PASID */
+ svm->pasid = ioasid_alloc(NULL, PASID_MIN,
+ pasid_max - 1, svm);
+ if (svm->pasid == INVALID_IOASID) {
kfree(svm);
kfree(sdev);
+ ret = -ENOSPC;
goto out;
}
- svm->pasid = ret;
svm->notifier.ops = &intel_mmuops;
svm->mm = mm;
svm->flags = flags;
@@ -337,7 +353,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
if (mm) {
ret = mmu_notifier_register(&svm->notifier, mm);
if (ret) {
- intel_pasid_free_id(svm->pasid);
+ ioasid_free(svm->pasid);
kfree(svm);
kfree(sdev);
goto out;
@@ -348,12 +364,14 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
ret = intel_pasid_setup_first_level(iommu, dev,
mm ? mm->pgd : init_mm.pgd,
svm->pasid, FLPT_DEFAULT_DID,
- mm ? 0 : PASID_FLAG_SUPERVISOR_MODE);
+ (mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
+ (cpu_feature_enabled(X86_FEATURE_LA57) ?
+ PASID_FLAG_FL5LP : 0));
spin_unlock(&iommu->lock);
if (ret) {
if (mm)
mmu_notifier_unregister(&svm->notifier, mm);
- intel_pasid_free_id(svm->pasid);
+ ioasid_free(svm->pasid);
kfree(svm);
kfree(sdev);
goto out;
@@ -369,7 +387,9 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
ret = intel_pasid_setup_first_level(iommu, dev,
mm ? mm->pgd : init_mm.pgd,
svm->pasid, FLPT_DEFAULT_DID,
- mm ? 0 : PASID_FLAG_SUPERVISOR_MODE);
+ (mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
+ (cpu_feature_enabled(X86_FEATURE_LA57) ?
+ PASID_FLAG_FL5LP : 0));
spin_unlock(&iommu->lock);
if (ret) {
kfree(sdev);
@@ -401,44 +421,45 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
if (!iommu)
goto out;
- svm = intel_pasid_lookup_id(pasid);
+ svm = ioasid_find(NULL, pasid, NULL);
if (!svm)
goto out;
- list_for_each_entry(sdev, &svm->devs, list) {
- if (dev == sdev->dev) {
- ret = 0;
- sdev->users--;
- if (!sdev->users) {
- list_del_rcu(&sdev->list);
- /* Flush the PASID cache and IOTLB for this device.
- * Note that we do depend on the hardware *not* using
- * the PASID any more. Just as we depend on other
- * devices never using PASIDs that they have no right
- * to use. We have a *shared* PASID table, because it's
- * large and has to be physically contiguous. So it's
- * hard to be as defensive as we might like. */
- intel_pasid_tear_down_entry(iommu, dev, svm->pasid);
- intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
- kfree_rcu(sdev, rcu);
-
- if (list_empty(&svm->devs)) {
- intel_pasid_free_id(svm->pasid);
- if (svm->mm)
- mmu_notifier_unregister(&svm->notifier, svm->mm);
-
- list_del(&svm->list);
-
- /* We mandate that no page faults may be outstanding
- * for the PASID when intel_svm_unbind_mm() is called.
- * If that is not obeyed, subtle errors will happen.
- * Let's make them less subtle... */
- memset(svm, 0x6b, sizeof(*svm));
- kfree(svm);
- }
+ if (IS_ERR(svm)) {
+ ret = PTR_ERR(svm);
+ goto out;
+ }
+
+ for_each_svm_dev(sdev, svm, dev) {
+ ret = 0;
+ sdev->users--;
+ if (!sdev->users) {
+ list_del_rcu(&sdev->list);
+ /* Flush the PASID cache and IOTLB for this device.
+ * Note that we do depend on the hardware *not* using
+ * the PASID any more. Just as we depend on other
+ * devices never using PASIDs that they have no right
+ * to use. We have a *shared* PASID table, because it's
+ * large and has to be physically contiguous. So it's
+ * hard to be as defensive as we might like. */
+ intel_pasid_tear_down_entry(iommu, dev, svm->pasid);
+ intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
+ kfree_rcu(sdev, rcu);
+
+ if (list_empty(&svm->devs)) {
+ ioasid_free(svm->pasid);
+ if (svm->mm)
+ mmu_notifier_unregister(&svm->notifier, svm->mm);
+ list_del(&svm->list);
+ /* We mandate that no page faults may be outstanding
+ * for the PASID when intel_svm_unbind_mm() is called.
+ * If that is not obeyed, subtle errors will happen.
+ * Let's make them less subtle... */
+ memset(svm, 0x6b, sizeof(*svm));
+ kfree(svm);
}
- break;
}
+ break;
}
out:
mutex_unlock(&pasid_mutex);
@@ -458,10 +479,14 @@ int intel_svm_is_pasid_valid(struct device *dev, int pasid)
if (!iommu)
goto out;
- svm = intel_pasid_lookup_id(pasid);
+ svm = ioasid_find(NULL, pasid, NULL);
if (!svm)
goto out;
+ if (IS_ERR(svm)) {
+ ret = PTR_ERR(svm);
+ goto out;
+ }
/* init_mm is used in this case */
if (!svm->mm)
ret = 1;
@@ -568,13 +593,12 @@ static irqreturn_t prq_event_thread(int irq, void *d)
if (!svm || svm->pasid != req->pasid) {
rcu_read_lock();
- svm = intel_pasid_lookup_id(req->pasid);
+ svm = ioasid_find(NULL, req->pasid, NULL);
/* It *can't* go away, because the driver is not permitted
* to unbind the mm while any page faults are outstanding.
* So we only need RCU to protect the internal idr code. */
rcu_read_unlock();
-
- if (!svm) {
+ if (IS_ERR_OR_NULL(svm)) {
pr_err("%s: Page request for invalid PASID %d: %08llx %08llx\n",
iommu->name, req->pasid, ((unsigned long long *)req)[0],
((unsigned long long *)req)[1]);
@@ -658,11 +682,10 @@ static irqreturn_t prq_event_thread(int irq, void *d)
if (req->priv_data_present)
memcpy(&resp.qw2, req->priv_data,
sizeof(req->priv_data));
+ resp.qw2 = 0;
+ resp.qw3 = 0;
+ qi_submit_sync(&resp, iommu);
}
- resp.qw2 = 0;
- resp.qw3 = 0;
- qi_submit_sync(&resp, iommu);
-
head = (head + sizeof(*req)) & PRQ_RING_MASK;
}
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index 7c3bd2c3cdca..4272fe4e17f4 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -149,8 +149,6 @@
#define ARM_V7S_TTBR_IRGN_ATTR(attr) \
((((attr) & 0x1) << 6) | (((attr) & 0x2) >> 1))
-#define ARM_V7S_TCR_PD1 BIT(5)
-
#ifdef CONFIG_ZONE_DMA32
#define ARM_V7S_TABLE_GFP_DMA GFP_DMA32
#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA32
@@ -798,8 +796,8 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
*/
cfg->pgsize_bitmap &= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
- /* TCR: T0SZ=0, disable TTBR1 */
- cfg->arm_v7s_cfg.tcr = ARM_V7S_TCR_PD1;
+ /* TCR: T0SZ=0, EAE=0 (if applicable) */
+ cfg->arm_v7s_cfg.tcr = 0;
/*
* TEX remap: the indices used map to the closest equivalent types
@@ -822,15 +820,13 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
/* Ensure the empty pgd is visible before any actual TTBR write */
wmb();
- /* TTBRs */
- cfg->arm_v7s_cfg.ttbr[0] = virt_to_phys(data->pgd) |
- ARM_V7S_TTBR_S | ARM_V7S_TTBR_NOS |
- (cfg->coherent_walk ?
- (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) |
- ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA)) :
- (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_NC) |
- ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_NC)));
- cfg->arm_v7s_cfg.ttbr[1] = 0;
+ /* TTBR */
+ cfg->arm_v7s_cfg.ttbr = virt_to_phys(data->pgd) | ARM_V7S_TTBR_S |
+ (cfg->coherent_walk ? (ARM_V7S_TTBR_NOS |
+ ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) |
+ ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA)) :
+ (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_NC) |
+ ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_NC)));
return &data->iop;
out_free_data:
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index bdf47f745268..983b08477e64 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -100,40 +100,29 @@
#define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
/* Register bits */
-#define ARM_32_LPAE_TCR_EAE (1 << 31)
-#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
+#define ARM_LPAE_TCR_TG0_4K 0
+#define ARM_LPAE_TCR_TG0_64K 1
+#define ARM_LPAE_TCR_TG0_16K 2
-#define ARM_LPAE_TCR_EPD1 (1 << 23)
+#define ARM_LPAE_TCR_TG1_16K 1
+#define ARM_LPAE_TCR_TG1_4K 2
+#define ARM_LPAE_TCR_TG1_64K 3
-#define ARM_LPAE_TCR_TG0_4K (0 << 14)
-#define ARM_LPAE_TCR_TG0_64K (1 << 14)
-#define ARM_LPAE_TCR_TG0_16K (2 << 14)
-
-#define ARM_LPAE_TCR_SH0_SHIFT 12
-#define ARM_LPAE_TCR_SH0_MASK 0x3
#define ARM_LPAE_TCR_SH_NS 0
#define ARM_LPAE_TCR_SH_OS 2
#define ARM_LPAE_TCR_SH_IS 3
-#define ARM_LPAE_TCR_ORGN0_SHIFT 10
-#define ARM_LPAE_TCR_IRGN0_SHIFT 8
-#define ARM_LPAE_TCR_RGN_MASK 0x3
#define ARM_LPAE_TCR_RGN_NC 0
#define ARM_LPAE_TCR_RGN_WBWA 1
#define ARM_LPAE_TCR_RGN_WT 2
#define ARM_LPAE_TCR_RGN_WB 3
-#define ARM_LPAE_TCR_SL0_SHIFT 6
-#define ARM_LPAE_TCR_SL0_MASK 0x3
+#define ARM_LPAE_VTCR_SL0_MASK 0x3
#define ARM_LPAE_TCR_T0SZ_SHIFT 0
-#define ARM_LPAE_TCR_SZ_MASK 0xf
-
-#define ARM_LPAE_TCR_PS_SHIFT 16
-#define ARM_LPAE_TCR_PS_MASK 0x7
-#define ARM_LPAE_TCR_IPS_SHIFT 32
-#define ARM_LPAE_TCR_IPS_MASK 0x7
+#define ARM_LPAE_VTCR_PS_SHIFT 16
+#define ARM_LPAE_VTCR_PS_MASK 0x7
#define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
#define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
@@ -293,17 +282,11 @@ static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
{
arm_lpae_iopte pte = prot;
- if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
- pte |= ARM_LPAE_PTE_NS;
-
if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1)
pte |= ARM_LPAE_PTE_TYPE_PAGE;
else
pte |= ARM_LPAE_PTE_TYPE_BLOCK;
- if (data->iop.fmt != ARM_MALI_LPAE)
- pte |= ARM_LPAE_PTE_AF;
- pte |= ARM_LPAE_PTE_SH_IS;
pte |= paddr_to_iopte(paddr, data);
__arm_lpae_set_pte(ptep, pte, &data->iop.cfg);
@@ -460,9 +443,20 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
}
+ if (prot & IOMMU_CACHE)
+ pte |= ARM_LPAE_PTE_SH_IS;
+ else
+ pte |= ARM_LPAE_PTE_SH_OS;
+
if (prot & IOMMU_NOEXEC)
pte |= ARM_LPAE_PTE_XN;
+ if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
+ pte |= ARM_LPAE_PTE_NS;
+
+ if (data->iop.fmt != ARM_MALI_LPAE)
+ pte |= ARM_LPAE_PTE_AF;
+
return pte;
}
@@ -474,6 +468,7 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
arm_lpae_iopte *ptep = data->pgd;
int ret, lvl = data->start_level;
arm_lpae_iopte prot;
+ long iaext = (long)iova >> cfg->ias;
/* If no access, then nothing to do */
if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
@@ -482,7 +477,9 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
return -EINVAL;
- if (WARN_ON(iova >> data->iop.cfg.ias || paddr >> data->iop.cfg.oas))
+ if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
+ iaext = ~iaext;
+ if (WARN_ON(iaext || paddr >> cfg->oas))
return -ERANGE;
prot = arm_lpae_prot_to_pte(data, iommu_prot);
@@ -648,11 +645,14 @@ static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
struct io_pgtable_cfg *cfg = &data->iop.cfg;
arm_lpae_iopte *ptep = data->pgd;
+ long iaext = (long)iova >> cfg->ias;
if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
return 0;
- if (WARN_ON(iova >> data->iop.cfg.ias))
+ if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
+ iaext = ~iaext;
+ if (WARN_ON(iaext))
return 0;
return __arm_lpae_unmap(data, gather, iova, size, data->start_level, ptep);
@@ -787,9 +787,12 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
{
u64 reg;
struct arm_lpae_io_pgtable *data;
+ typeof(&cfg->arm_lpae_s1_cfg.tcr) tcr = &cfg->arm_lpae_s1_cfg.tcr;
+ bool tg1;
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
- IO_PGTABLE_QUIRK_NON_STRICT))
+ IO_PGTABLE_QUIRK_NON_STRICT |
+ IO_PGTABLE_QUIRK_ARM_TTBR1))
return NULL;
data = arm_lpae_alloc_pgtable(cfg);
@@ -798,58 +801,55 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
/* TCR */
if (cfg->coherent_walk) {
- reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
- (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
- (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
+ tcr->sh = ARM_LPAE_TCR_SH_IS;
+ tcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
+ tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
} else {
- reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) |
- (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) |
- (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_ORGN0_SHIFT);
+ tcr->sh = ARM_LPAE_TCR_SH_OS;
+ tcr->irgn = ARM_LPAE_TCR_RGN_NC;
+ tcr->orgn = ARM_LPAE_TCR_RGN_NC;
}
+ tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1;
switch (ARM_LPAE_GRANULE(data)) {
case SZ_4K:
- reg |= ARM_LPAE_TCR_TG0_4K;
+ tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_4K : ARM_LPAE_TCR_TG0_4K;
break;
case SZ_16K:
- reg |= ARM_LPAE_TCR_TG0_16K;
+ tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_16K : ARM_LPAE_TCR_TG0_16K;
break;
case SZ_64K:
- reg |= ARM_LPAE_TCR_TG0_64K;
+ tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_64K : ARM_LPAE_TCR_TG0_64K;
break;
}
switch (cfg->oas) {
case 32:
- reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+ tcr->ips = ARM_LPAE_TCR_PS_32_BIT;
break;
case 36:
- reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+ tcr->ips = ARM_LPAE_TCR_PS_36_BIT;
break;
case 40:
- reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+ tcr->ips = ARM_LPAE_TCR_PS_40_BIT;
break;
case 42:
- reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+ tcr->ips = ARM_LPAE_TCR_PS_42_BIT;
break;
case 44:
- reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+ tcr->ips = ARM_LPAE_TCR_PS_44_BIT;
break;
case 48:
- reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+ tcr->ips = ARM_LPAE_TCR_PS_48_BIT;
break;
case 52:
- reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+ tcr->ips = ARM_LPAE_TCR_PS_52_BIT;
break;
default:
goto out_free_data;
}
- reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
-
- /* Disable speculative walks through TTBR1 */
- reg |= ARM_LPAE_TCR_EPD1;
- cfg->arm_lpae_s1_cfg.tcr = reg;
+ tcr->tsz = 64ULL - cfg->ias;
/* MAIRs */
reg = (ARM_LPAE_MAIR_ATTR_NC
@@ -872,9 +872,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
/* Ensure the empty pgd is visible before any actual TTBR write */
wmb();
- /* TTBRs */
- cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
- cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
+ /* TTBR */
+ cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd);
return &data->iop;
out_free_data:
@@ -885,8 +884,9 @@ out_free_data:
static struct io_pgtable *
arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
{
- u64 reg, sl;
+ u64 sl;
struct arm_lpae_io_pgtable *data;
+ typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr;
/* The NS quirk doesn't apply at stage 2 */
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NON_STRICT))
@@ -911,55 +911,59 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
}
/* VTCR */
- reg = ARM_64_LPAE_S2_TCR_RES1 |
- (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
- (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
- (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
+ if (cfg->coherent_walk) {
+ vtcr->sh = ARM_LPAE_TCR_SH_IS;
+ vtcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
+ vtcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
+ } else {
+ vtcr->sh = ARM_LPAE_TCR_SH_OS;
+ vtcr->irgn = ARM_LPAE_TCR_RGN_NC;
+ vtcr->orgn = ARM_LPAE_TCR_RGN_NC;
+ }
sl = data->start_level;
switch (ARM_LPAE_GRANULE(data)) {
case SZ_4K:
- reg |= ARM_LPAE_TCR_TG0_4K;
+ vtcr->tg = ARM_LPAE_TCR_TG0_4K;
sl++; /* SL0 format is different for 4K granule size */
break;
case SZ_16K:
- reg |= ARM_LPAE_TCR_TG0_16K;
+ vtcr->tg = ARM_LPAE_TCR_TG0_16K;
break;
case SZ_64K:
- reg |= ARM_LPAE_TCR_TG0_64K;
+ vtcr->tg = ARM_LPAE_TCR_TG0_64K;
break;
}
switch (cfg->oas) {
case 32:
- reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
+ vtcr->ps = ARM_LPAE_TCR_PS_32_BIT;
break;
case 36:
- reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
+ vtcr->ps = ARM_LPAE_TCR_PS_36_BIT;
break;
case 40:
- reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
+ vtcr->ps = ARM_LPAE_TCR_PS_40_BIT;
break;
case 42:
- reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
+ vtcr->ps = ARM_LPAE_TCR_PS_42_BIT;
break;
case 44:
- reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
+ vtcr->ps = ARM_LPAE_TCR_PS_44_BIT;
break;
case 48:
- reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
+ vtcr->ps = ARM_LPAE_TCR_PS_48_BIT;
break;
case 52:
- reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_PS_SHIFT);
+ vtcr->ps = ARM_LPAE_TCR_PS_52_BIT;
break;
default:
goto out_free_data;
}
- reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
- reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
- cfg->arm_lpae_s2_cfg.vtcr = reg;
+ vtcr->tsz = 64ULL - cfg->ias;
+ vtcr->sl = ~sl & ARM_LPAE_VTCR_SL0_MASK;
/* Allocate pgd pages */
data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
@@ -982,35 +986,21 @@ out_free_data:
static struct io_pgtable *
arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
{
- struct io_pgtable *iop;
-
if (cfg->ias > 32 || cfg->oas > 40)
return NULL;
cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
- iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
- if (iop) {
- cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
- cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
- }
-
- return iop;
+ return arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
}
static struct io_pgtable *
arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
{
- struct io_pgtable *iop;
-
if (cfg->ias > 40 || cfg->oas > 40)
return NULL;
cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
- iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
- if (iop)
- cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
-
- return iop;
+ return arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
}
static struct io_pgtable *
diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
index ced53e5b72b5..94394c81468f 100644
--- a/drivers/iommu/io-pgtable.c
+++ b/drivers/iommu/io-pgtable.c
@@ -63,7 +63,7 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops)
if (!ops)
return;
- iop = container_of(ops, struct io_pgtable, ops);
+ iop = io_pgtable_ops_to_pgtable(ops);
io_pgtable_tlb_flush_all(iop);
io_pgtable_init_table[iop->fmt]->free(iop);
}
diff --git a/drivers/iommu/iommu-sysfs.c b/drivers/iommu/iommu-sysfs.c
index e436ff813e7e..99869217fbec 100644
--- a/drivers/iommu/iommu-sysfs.c
+++ b/drivers/iommu/iommu-sysfs.c
@@ -87,6 +87,7 @@ error:
put_device(iommu->dev);
return ret;
}
+EXPORT_SYMBOL_GPL(iommu_device_sysfs_add);
void iommu_device_sysfs_remove(struct iommu_device *iommu)
{
@@ -94,6 +95,8 @@ void iommu_device_sysfs_remove(struct iommu_device *iommu)
device_unregister(iommu->dev);
iommu->dev = NULL;
}
+EXPORT_SYMBOL_GPL(iommu_device_sysfs_remove);
+
/*
* IOMMU drivers can indicate a device is managed by a given IOMMU using
* this interface. A link to the device will be created in the "devices"
@@ -119,6 +122,7 @@ int iommu_device_link(struct iommu_device *iommu, struct device *link)
return ret;
}
+EXPORT_SYMBOL_GPL(iommu_device_link);
void iommu_device_unlink(struct iommu_device *iommu, struct device *link)
{
@@ -128,3 +132,4 @@ void iommu_device_unlink(struct iommu_device *iommu, struct device *link)
sysfs_remove_link(&link->kobj, "iommu");
sysfs_remove_link_from_group(&iommu->dev->kobj, "devices", dev_name(link));
}
+EXPORT_SYMBOL_GPL(iommu_device_unlink);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index db7bfd4f2d20..3e3528436e0b 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -22,6 +22,7 @@
#include <linux/bitops.h>
#include <linux/property.h>
#include <linux/fsl/mc.h>
+#include <linux/module.h>
#include <trace/events/iommu.h>
static struct kset *iommu_group_kset;
@@ -141,6 +142,7 @@ int iommu_device_register(struct iommu_device *iommu)
spin_unlock(&iommu_device_lock);
return 0;
}
+EXPORT_SYMBOL_GPL(iommu_device_register);
void iommu_device_unregister(struct iommu_device *iommu)
{
@@ -148,6 +150,7 @@ void iommu_device_unregister(struct iommu_device *iommu)
list_del(&iommu->list);
spin_unlock(&iommu_device_lock);
}
+EXPORT_SYMBOL_GPL(iommu_device_unregister);
static struct iommu_param *iommu_get_dev_param(struct device *dev)
{
@@ -183,10 +186,21 @@ int iommu_probe_device(struct device *dev)
if (!iommu_get_dev_param(dev))
return -ENOMEM;
+ if (!try_module_get(ops->owner)) {
+ ret = -EINVAL;
+ goto err_free_dev_param;
+ }
+
ret = ops->add_device(dev);
if (ret)
- iommu_free_dev_param(dev);
+ goto err_module_put;
+ return 0;
+
+err_module_put:
+ module_put(ops->owner);
+err_free_dev_param:
+ iommu_free_dev_param(dev);
return ret;
}
@@ -197,7 +211,10 @@ void iommu_release_device(struct device *dev)
if (dev->iommu_group)
ops->remove_device(dev);
- iommu_free_dev_param(dev);
+ if (dev->iommu_param) {
+ module_put(ops->owner);
+ iommu_free_dev_param(dev);
+ }
}
static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
@@ -312,8 +329,8 @@ int iommu_insert_resv_region(struct iommu_resv_region *new,
list_for_each_entry_safe(iter, tmp, regions, list) {
phys_addr_t top_end, iter_end = iter->start + iter->length - 1;
- /* no merge needed on elements of different types than @nr */
- if (iter->type != nr->type) {
+ /* no merge needed on elements of different types than @new */
+ if (iter->type != new->type) {
list_move_tail(&iter->list, &stack);
continue;
}
@@ -751,6 +768,7 @@ err_put_group:
mutex_unlock(&group->mutex);
dev->iommu_group = NULL;
kobject_put(group->devices_kobj);
+ sysfs_remove_link(group->devices_kobj, device->name);
err_free_name:
kfree(device->name);
err_remove_link:
@@ -886,6 +904,7 @@ struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
kobject_get(group->devices_kobj);
return group;
}
+EXPORT_SYMBOL_GPL(iommu_group_ref_get);
/**
* iommu_group_put - Decrement group reference
@@ -1259,6 +1278,7 @@ struct iommu_group *generic_device_group(struct device *dev)
{
return iommu_group_alloc();
}
+EXPORT_SYMBOL_GPL(generic_device_group);
/*
* Use standard PCI bus topology, isolation features, and DMA alias quirks
@@ -1326,6 +1346,7 @@ struct iommu_group *pci_device_group(struct device *dev)
/* No shared group found, allocate new */
return iommu_group_alloc();
}
+EXPORT_SYMBOL_GPL(pci_device_group);
/* Get the IOMMU group for device on fsl-mc bus */
struct iommu_group *fsl_mc_device_group(struct device *dev)
@@ -1338,6 +1359,7 @@ struct iommu_group *fsl_mc_device_group(struct device *dev)
group = iommu_group_alloc();
return group;
}
+EXPORT_SYMBOL_GPL(fsl_mc_device_group);
/**
* iommu_group_get_for_dev - Find or create the IOMMU group for a device
@@ -1406,6 +1428,7 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
return group;
}
+EXPORT_SYMBOL(iommu_group_get_for_dev);
struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
{
@@ -1536,6 +1559,11 @@ int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
{
int err;
+ if (ops == NULL) {
+ bus->iommu_ops = NULL;
+ return 0;
+ }
+
if (bus->iommu_ops != NULL)
return -EBUSY;
@@ -2229,6 +2257,25 @@ void iommu_put_resv_regions(struct device *dev, struct list_head *list)
ops->put_resv_regions(dev, list);
}
+/**
+ * generic_iommu_put_resv_regions - Reserved region driver helper
+ * @dev: device for which to free reserved regions
+ * @list: reserved region list for device
+ *
+ * IOMMU drivers can use this to implement their .put_resv_regions() callback
+ * for simple reservations. Memory allocated for each reserved region will be
+ * freed. If an IOMMU driver allocates additional resources per region, it is
+ * going to have to implement a custom callback.
+ */
+void generic_iommu_put_resv_regions(struct device *dev, struct list_head *list)
+{
+ struct iommu_resv_region *entry, *next;
+
+ list_for_each_entry_safe(entry, next, list, list)
+ kfree(entry);
+}
+EXPORT_SYMBOL(generic_iommu_put_resv_regions);
+
struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
size_t length, int prot,
enum iommu_resv_type type)
@@ -2246,6 +2293,7 @@ struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
region->type = type;
return region;
}
+EXPORT_SYMBOL_GPL(iommu_alloc_resv_region);
static int
request_default_domain_for_dev(struct device *dev, unsigned long type)
@@ -2282,13 +2330,13 @@ request_default_domain_for_dev(struct device *dev, unsigned long type)
goto out;
}
- iommu_group_create_direct_mappings(group, dev);
-
/* Make the domain the default for this group */
if (group->default_domain)
iommu_domain_free(group->default_domain);
group->default_domain = domain;
+ iommu_group_create_direct_mappings(group, dev);
+
dev_info(dev, "Using iommu %s mapping\n",
type == IOMMU_DOMAIN_DMA ? "dma" : "direct");
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 41c605b0058f..0e6a9536eca6 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -233,7 +233,7 @@ static DEFINE_MUTEX(iova_cache_mutex);
struct iova *alloc_iova_mem(void)
{
- return kmem_cache_alloc(iova_cache, GFP_ATOMIC);
+ return kmem_cache_zalloc(iova_cache, GFP_ATOMIC | __GFP_NOWARN);
}
EXPORT_SYMBOL(alloc_iova_mem);
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index d02edd2751f3..ecb3f9464dd5 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -374,7 +374,7 @@ static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain)
u32 tmp;
/* TTBR0 */
- ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0];
+ ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr;
ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr);
ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32);
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 93f14bca26ee..94a6df1bddd6 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -279,8 +279,8 @@ static void __program_context(void __iomem *base, int ctx,
SET_V2PCFG(base, ctx, 0x3);
SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr);
- SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[0]);
- SET_TTBR1(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[1]);
+ SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr);
+ SET_TTBR1(base, ctx, 0);
/* Set prrr and nmrr */
SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr);
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 6fc1f5ecf91e..95945f467c03 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -367,7 +367,7 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
/* Update the pgtable base address register of the M4U HW */
if (!data->m4u_dom) {
data->m4u_dom = dom;
- writel(dom->cfg.arm_v7s_cfg.ttbr[0] & MMU_PT_ADDR_MASK,
+ writel(dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK,
data->base + REG_MMU_PT_BASE_ADDR);
}
@@ -765,7 +765,7 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev)
writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR);
writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG);
if (m4u_dom)
- writel(m4u_dom->cfg.arm_v7s_cfg.ttbr[0] & MMU_PT_ADDR_MASK,
+ writel(m4u_dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK,
base + REG_MMU_PT_BASE_ADDR);
return 0;
}
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 026ad2b29dcd..20738aacac89 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -8,11 +8,12 @@
#include <linux/export.h>
#include <linux/iommu.h>
#include <linux/limits.h>
-#include <linux/pci.h>
+#include <linux/module.h>
#include <linux/msi.h>
#include <linux/of.h>
#include <linux/of_iommu.h>
#include <linux/of_pci.h>
+#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/fsl/mc.h>
@@ -91,16 +92,16 @@ static int of_iommu_xlate(struct device *dev,
{
const struct iommu_ops *ops;
struct fwnode_handle *fwnode = &iommu_spec->np->fwnode;
- int err;
+ int ret;
ops = iommu_ops_from_fwnode(fwnode);
if ((ops && !ops->of_xlate) ||
!of_device_is_available(iommu_spec->np))
return NO_IOMMU;
- err = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops);
- if (err)
- return err;
+ ret = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops);
+ if (ret)
+ return ret;
/*
* The otherwise-empty fwspec handily serves to indicate the specific
* IOMMU device we're waiting for, which will be useful if we ever get
@@ -109,7 +110,12 @@ static int of_iommu_xlate(struct device *dev,
if (!ops)
return driver_deferred_probe_check_state(dev);
- return ops->of_xlate(dev, iommu_spec);
+ if (!try_module_get(ops->owner))
+ return -ENODEV;
+
+ ret = ops->of_xlate(dev, iommu_spec);
+ module_put(ops->owner);
+ return ret;
}
struct of_pci_iommu_alias_info {
@@ -179,6 +185,7 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
.np = master_np,
};
+ pci_request_acs();
err = pci_for_each_dma_alias(to_pci_dev(dev),
of_pci_iommu_init, &info);
} else if (dev_is_fsl_mc(dev)) {
@@ -196,8 +203,12 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
if (err)
break;
}
- }
+ fwspec = dev_iommu_fwspec_get(dev);
+ if (!err && fwspec)
+ of_property_read_u32(master_np, "pasid-num-bits",
+ &fwspec->num_pasid_bits);
+ }
/*
* Two success conditions can be represented by non-negative err here:
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
index 52f38292df5b..39759db4f003 100644
--- a/drivers/iommu/qcom_iommu.c
+++ b/drivers/iommu/qcom_iommu.c
@@ -201,7 +201,7 @@ static irqreturn_t qcom_iommu_fault(int irq, void *dev)
fsr = iommu_readl(ctx, ARM_SMMU_CB_FSR);
- if (!(fsr & FSR_FAULT))
+ if (!(fsr & ARM_SMMU_FSR_FAULT))
return IRQ_NONE;
fsynr = iommu_readl(ctx, ARM_SMMU_CB_FSYNR0);
@@ -215,7 +215,7 @@ static irqreturn_t qcom_iommu_fault(int irq, void *dev)
}
iommu_writel(ctx, ARM_SMMU_CB_FSR, fsr);
- iommu_writel(ctx, ARM_SMMU_CB_RESUME, RESUME_TERMINATE);
+ iommu_writel(ctx, ARM_SMMU_CB_RESUME, ARM_SMMU_RESUME_TERMINATE);
return IRQ_HANDLED;
}
@@ -269,18 +269,15 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
/* TTBRs */
iommu_writeq(ctx, ARM_SMMU_CB_TTBR0,
- pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0] |
- FIELD_PREP(TTBRn_ASID, ctx->asid));
- iommu_writeq(ctx, ARM_SMMU_CB_TTBR1,
- pgtbl_cfg.arm_lpae_s1_cfg.ttbr[1] |
- FIELD_PREP(TTBRn_ASID, ctx->asid));
+ pgtbl_cfg.arm_lpae_s1_cfg.ttbr |
+ FIELD_PREP(ARM_SMMU_TTBRn_ASID, ctx->asid));
+ iommu_writeq(ctx, ARM_SMMU_CB_TTBR1, 0);
/* TCR */
iommu_writel(ctx, ARM_SMMU_CB_TCR2,
- (pgtbl_cfg.arm_lpae_s1_cfg.tcr >> 32) |
- FIELD_PREP(TCR2_SEP, TCR2_SEP_UPSTREAM));
+ arm_smmu_lpae_tcr2(&pgtbl_cfg));
iommu_writel(ctx, ARM_SMMU_CB_TCR,
- pgtbl_cfg.arm_lpae_s1_cfg.tcr);
+ arm_smmu_lpae_tcr(&pgtbl_cfg) | ARM_SMMU_TCR_EAE);
/* MAIRs (stage-1 only) */
iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR0,
@@ -289,11 +286,13 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
pgtbl_cfg.arm_lpae_s1_cfg.mair >> 32);
/* SCTLR */
- reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE |
- SCTLR_M | SCTLR_S1_ASIDPNE | SCTLR_CFCFG;
+ reg = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE |
+ ARM_SMMU_SCTLR_AFE | ARM_SMMU_SCTLR_TRE |
+ ARM_SMMU_SCTLR_M | ARM_SMMU_SCTLR_S1_ASIDPNE |
+ ARM_SMMU_SCTLR_CFCFG;
if (IS_ENABLED(CONFIG_BIG_ENDIAN))
- reg |= SCTLR_E;
+ reg |= ARM_SMMU_SCTLR_E;
iommu_writel(ctx, ARM_SMMU_CB_SCTLR, reg);
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 315c7cc4f99d..cce329d71fba 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -837,14 +837,6 @@ static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
iommu_dma_get_resv_regions(dev, head);
}
-static void viommu_put_resv_regions(struct device *dev, struct list_head *head)
-{
- struct iommu_resv_region *entry, *next;
-
- list_for_each_entry_safe(entry, next, head, list)
- kfree(entry);
-}
-
static struct iommu_ops viommu_ops;
static struct virtio_driver virtio_iommu_drv;
@@ -914,7 +906,7 @@ static int viommu_add_device(struct device *dev)
err_unlink_dev:
iommu_device_unlink(&viommu->iommu, dev);
err_free_dev:
- viommu_put_resv_regions(dev, &vdev->resv_regions);
+ generic_iommu_put_resv_regions(dev, &vdev->resv_regions);
kfree(vdev);
return ret;
@@ -932,7 +924,7 @@ static void viommu_remove_device(struct device *dev)
iommu_group_remove_device(dev);
iommu_device_unlink(&vdev->viommu->iommu, dev);
- viommu_put_resv_regions(dev, &vdev->resv_regions);
+ generic_iommu_put_resv_regions(dev, &vdev->resv_regions);
kfree(vdev);
}
@@ -961,7 +953,7 @@ static struct iommu_ops viommu_ops = {
.remove_device = viommu_remove_device,
.device_group = viommu_device_group,
.get_resv_regions = viommu_get_resv_regions,
- .put_resv_regions = viommu_put_resv_regions,
+ .put_resv_regions = generic_iommu_put_resv_regions,
.of_xlate = viommu_of_xlate,
};
diff --git a/drivers/ipack/carriers/tpci200.c b/drivers/ipack/carriers/tpci200.c
index d246d74ec3a5..23445ebfda5c 100644
--- a/drivers/ipack/carriers/tpci200.c
+++ b/drivers/ipack/carriers/tpci200.c
@@ -298,7 +298,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
/* Map internal tpci200 driver user space */
tpci200->info->interface_regs =
- ioremap_nocache(pci_resource_start(tpci200->info->pdev,
+ ioremap(pci_resource_start(tpci200->info->pdev,
TPCI200_IP_INTERFACE_BAR),
TPCI200_IFACE_SIZE);
if (!tpci200->info->interface_regs) {
@@ -541,7 +541,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
ret = -EBUSY;
goto out_err_pci_request;
}
- tpci200->info->cfg_regs = ioremap_nocache(
+ tpci200->info->cfg_regs = ioremap(
pci_resource_start(pdev, TPCI200_CFG_MEM_BAR),
pci_resource_len(pdev, TPCI200_CFG_MEM_BAR));
if (!tpci200->info->cfg_regs) {
diff --git a/drivers/ipack/devices/ipoctal.c b/drivers/ipack/devices/ipoctal.c
index 9c2a4b5d30cf..d480a514c983 100644
--- a/drivers/ipack/devices/ipoctal.c
+++ b/drivers/ipack/devices/ipoctal.c
@@ -276,7 +276,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
ipoctal->board_id = ipoctal->dev->id_device;
region = &ipoctal->dev->region[IPACK_IO_SPACE];
- addr = devm_ioremap_nocache(&ipoctal->dev->dev,
+ addr = devm_ioremap(&ipoctal->dev->dev,
region->start, region->size);
if (!addr) {
dev_err(&ipoctal->dev->dev,
@@ -292,7 +292,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
region = &ipoctal->dev->region[IPACK_INT_SPACE];
ipoctal->int_space =
- devm_ioremap_nocache(&ipoctal->dev->dev,
+ devm_ioremap(&ipoctal->dev->dev,
region->start, region->size);
if (!ipoctal->int_space) {
dev_err(&ipoctal->dev->dev,
@@ -303,7 +303,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
region = &ipoctal->dev->region[IPACK_MEM8_SPACE];
ipoctal->mem8_space =
- devm_ioremap_nocache(&ipoctal->dev->dev,
+ devm_ioremap(&ipoctal->dev->dev,
region->start, 0x8000);
if (!ipoctal->mem8_space) {
dev_err(&ipoctal->dev->dev,
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 697e6a8ccaae..6d397732138d 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -438,7 +438,7 @@ config CSKY_MPINTC
help
Say yes here to enable C-SKY SMP interrupt controller driver used
for C-SKY SMP system.
- In fact it's not mmio map in hw and it use ld/st to visit the
+ In fact it's not mmio map in hardware and it uses ld/st to visit the
controller's register inside CPU.
config CSKY_APB_INTC
@@ -446,7 +446,7 @@ config CSKY_APB_INTC
depends on CSKY
help
Say yes here to enable C-SKY APB interrupt controller driver used
- by C-SKY single core SOC system. It use mmio map apb-bus to visit
+ by C-SKY single core SOC system. It uses mmio map apb-bus to visit
the controller's register.
config IMX_IRQSTEER
@@ -457,6 +457,12 @@ config IMX_IRQSTEER
help
Support for the i.MX IRQSTEER interrupt multiplexer/remapper.
+config IMX_INTMUX
+ def_bool y if ARCH_MXC
+ select IRQ_DOMAIN
+ help
+ Support for the i.MX INTMUX interrupt multiplexer.
+
config LS1X_IRQ
bool "Loongson-1 Interrupt Controller"
depends on MACH_LOONGSON32
@@ -490,6 +496,7 @@ config TI_SCI_INTA_IRQCHIP
config SIFIVE_PLIC
bool "SiFive Platform-Level Interrupt Controller"
depends on RISCV
+ select IRQ_DOMAIN_HIERARCHY
help
This enables support for the PLIC chip found in SiFive (and
potentially other) RISC-V systems. The PLIC controls devices
@@ -499,4 +506,11 @@ config SIFIVE_PLIC
If you don't know what to do here, say Y.
+config EXYNOS_IRQ_COMBINER
+ bool "Samsung Exynos IRQ combiner support" if COMPILE_TEST
+ depends on (ARCH_EXYNOS && ARM) || COMPILE_TEST
+ help
+ Say yes here to add support for the IRQ combiner devices embedded
+ in Samsung Exynos chips.
+
endmenu
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index e806dda690ea..eae0d78cbf22 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -9,7 +9,7 @@ obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o
obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2836.o
obj-$(CONFIG_DAVINCI_AINTC) += irq-davinci-aintc.o
obj-$(CONFIG_DAVINCI_CP_INTC) += irq-davinci-cp-intc.o
-obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o
+obj-$(CONFIG_EXYNOS_IRQ_COMBINER) += exynos-combiner.o
obj-$(CONFIG_FARADAY_FTINTC010) += irq-ftintc010.o
obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o
obj-$(CONFIG_ARCH_LPC32XX) += irq-lpc32xx.o
@@ -87,7 +87,7 @@ obj-$(CONFIG_MVEBU_SEI) += irq-mvebu-sei.o
obj-$(CONFIG_LS_EXTIRQ) += irq-ls-extirq.o
obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o
obj-$(CONFIG_EZNPS_GIC) += irq-eznps.o
-obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o
+obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o irq-aspeed-scu-ic.o
obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o
obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o
obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o
@@ -100,6 +100,7 @@ obj-$(CONFIG_CSKY_MPINTC) += irq-csky-mpintc.o
obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o
obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o
obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o
+obj-$(CONFIG_IMX_INTMUX) += irq-imx-intmux.o
obj-$(CONFIG_MADERA_IRQ) += irq-madera.o
obj-$(CONFIG_LS1X_IRQ) += irq-ls1x.o
obj-$(CONFIG_TI_SCI_INTR_IRQCHIP) += irq-ti-sci-intr.o
diff --git a/drivers/irqchip/irq-aspeed-scu-ic.c b/drivers/irqchip/irq-aspeed-scu-ic.c
new file mode 100644
index 000000000000..c90a3346b985
--- /dev/null
+++ b/drivers/irqchip/irq-aspeed-scu-ic.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Aspeed AST24XX, AST25XX, and AST26XX SCU Interrupt Controller
+ * Copyright 2019 IBM Corporation
+ *
+ * Eddie James <eajames@linux.ibm.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+
+#define ASPEED_SCU_IC_REG 0x018
+#define ASPEED_SCU_IC_SHIFT 0
+#define ASPEED_SCU_IC_ENABLE GENMASK(6, ASPEED_SCU_IC_SHIFT)
+#define ASPEED_SCU_IC_NUM_IRQS 7
+#define ASPEED_SCU_IC_STATUS_SHIFT 16
+
+#define ASPEED_AST2600_SCU_IC0_REG 0x560
+#define ASPEED_AST2600_SCU_IC0_SHIFT 0
+#define ASPEED_AST2600_SCU_IC0_ENABLE \
+ GENMASK(5, ASPEED_AST2600_SCU_IC0_SHIFT)
+#define ASPEED_AST2600_SCU_IC0_NUM_IRQS 6
+
+#define ASPEED_AST2600_SCU_IC1_REG 0x570
+#define ASPEED_AST2600_SCU_IC1_SHIFT 4
+#define ASPEED_AST2600_SCU_IC1_ENABLE \
+ GENMASK(5, ASPEED_AST2600_SCU_IC1_SHIFT)
+#define ASPEED_AST2600_SCU_IC1_NUM_IRQS 2
+
+struct aspeed_scu_ic {
+ unsigned long irq_enable;
+ unsigned long irq_shift;
+ unsigned int num_irqs;
+ unsigned int reg;
+ struct regmap *scu;
+ struct irq_domain *irq_domain;
+};
+
+static void aspeed_scu_ic_irq_handler(struct irq_desc *desc)
+{
+ unsigned int irq;
+ unsigned int sts;
+ unsigned long bit;
+ unsigned long enabled;
+ unsigned long max;
+ unsigned long status;
+ struct aspeed_scu_ic *scu_ic = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int mask = scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT;
+
+ chained_irq_enter(chip, desc);
+
+ /*
+ * The SCU IC has just one register to control its operation and read
+ * status. The interrupt enable bits occupy the lower 16 bits of the
+ * register, while the interrupt status bits occupy the upper 16 bits.
+ * The status bit for a given interrupt is always 16 bits shifted from
+ * the enable bit for the same interrupt.
+ * Therefore, perform the IRQ operations in the enable bit space by
+ * shifting the status down to get the mapping and then back up to
+ * clear the bit.
+ */
+ regmap_read(scu_ic->scu, scu_ic->reg, &sts);
+ enabled = sts & scu_ic->irq_enable;
+ status = (sts >> ASPEED_SCU_IC_STATUS_SHIFT) & enabled;
+
+ bit = scu_ic->irq_shift;
+ max = scu_ic->num_irqs + bit;
+
+ for_each_set_bit_from(bit, &status, max) {
+ irq = irq_find_mapping(scu_ic->irq_domain,
+ bit - scu_ic->irq_shift);
+ generic_handle_irq(irq);
+
+ regmap_update_bits(scu_ic->scu, scu_ic->reg, mask,
+ BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT));
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void aspeed_scu_ic_irq_mask(struct irq_data *data)
+{
+ struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data);
+ unsigned int mask = BIT(data->hwirq + scu_ic->irq_shift) |
+ (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT);
+
+ /*
+ * Status bits are cleared by writing 1. In order to prevent the mask
+ * operation from clearing the status bits, they should be under the
+ * mask and written with 0.
+ */
+ regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, 0);
+}
+
+static void aspeed_scu_ic_irq_unmask(struct irq_data *data)
+{
+ struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data);
+ unsigned int bit = BIT(data->hwirq + scu_ic->irq_shift);
+ unsigned int mask = bit |
+ (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT);
+
+ /*
+ * Status bits are cleared by writing 1. In order to prevent the unmask
+ * operation from clearing the status bits, they should be under the
+ * mask and written with 0.
+ */
+ regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, bit);
+}
+
+static int aspeed_scu_ic_irq_set_affinity(struct irq_data *data,
+ const struct cpumask *dest,
+ bool force)
+{
+ return -EINVAL;
+}
+
+static struct irq_chip aspeed_scu_ic_chip = {
+ .name = "aspeed-scu-ic",
+ .irq_mask = aspeed_scu_ic_irq_mask,
+ .irq_unmask = aspeed_scu_ic_irq_unmask,
+ .irq_set_affinity = aspeed_scu_ic_irq_set_affinity,
+};
+
+static int aspeed_scu_ic_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &aspeed_scu_ic_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops aspeed_scu_ic_domain_ops = {
+ .map = aspeed_scu_ic_map,
+};
+
+static int aspeed_scu_ic_of_init_common(struct aspeed_scu_ic *scu_ic,
+ struct device_node *node)
+{
+ int irq;
+ int rc = 0;
+
+ if (!node->parent) {
+ rc = -ENODEV;
+ goto err;
+ }
+
+ scu_ic->scu = syscon_node_to_regmap(node->parent);
+ if (IS_ERR(scu_ic->scu)) {
+ rc = PTR_ERR(scu_ic->scu);
+ goto err;
+ }
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq < 0) {
+ rc = irq;
+ goto err;
+ }
+
+ scu_ic->irq_domain = irq_domain_add_linear(node, scu_ic->num_irqs,
+ &aspeed_scu_ic_domain_ops,
+ scu_ic);
+ if (!scu_ic->irq_domain) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ irq_set_chained_handler_and_data(irq, aspeed_scu_ic_irq_handler,
+ scu_ic);
+
+ return 0;
+
+err:
+ kfree(scu_ic);
+
+ return rc;
+}
+
+static int __init aspeed_scu_ic_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL);
+
+ if (!scu_ic)
+ return -ENOMEM;
+
+ scu_ic->irq_enable = ASPEED_SCU_IC_ENABLE;
+ scu_ic->irq_shift = ASPEED_SCU_IC_SHIFT;
+ scu_ic->num_irqs = ASPEED_SCU_IC_NUM_IRQS;
+ scu_ic->reg = ASPEED_SCU_IC_REG;
+
+ return aspeed_scu_ic_of_init_common(scu_ic, node);
+}
+
+static int __init aspeed_ast2600_scu_ic0_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL);
+
+ if (!scu_ic)
+ return -ENOMEM;
+
+ scu_ic->irq_enable = ASPEED_AST2600_SCU_IC0_ENABLE;
+ scu_ic->irq_shift = ASPEED_AST2600_SCU_IC0_SHIFT;
+ scu_ic->num_irqs = ASPEED_AST2600_SCU_IC0_NUM_IRQS;
+ scu_ic->reg = ASPEED_AST2600_SCU_IC0_REG;
+
+ return aspeed_scu_ic_of_init_common(scu_ic, node);
+}
+
+static int __init aspeed_ast2600_scu_ic1_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL);
+
+ if (!scu_ic)
+ return -ENOMEM;
+
+ scu_ic->irq_enable = ASPEED_AST2600_SCU_IC1_ENABLE;
+ scu_ic->irq_shift = ASPEED_AST2600_SCU_IC1_SHIFT;
+ scu_ic->num_irqs = ASPEED_AST2600_SCU_IC1_NUM_IRQS;
+ scu_ic->reg = ASPEED_AST2600_SCU_IC1_REG;
+
+ return aspeed_scu_ic_of_init_common(scu_ic, node);
+}
+
+IRQCHIP_DECLARE(ast2400_scu_ic, "aspeed,ast2400-scu-ic", aspeed_scu_ic_of_init);
+IRQCHIP_DECLARE(ast2500_scu_ic, "aspeed,ast2500-scu-ic", aspeed_scu_ic_of_init);
+IRQCHIP_DECLARE(ast2600_scu_ic0, "aspeed,ast2600-scu-ic0",
+ aspeed_ast2600_scu_ic0_of_init);
+IRQCHIP_DECLARE(ast2600_scu_ic1, "aspeed,ast2600-scu-ic1",
+ aspeed_ast2600_scu_ic1_of_init);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index e05673bcd52b..83b1186ffcad 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -106,6 +106,7 @@ struct its_node {
u64 typer;
u64 cbaser_save;
u32 ctlr_save;
+ u32 mpidr;
struct list_head its_device_list;
u64 flags;
unsigned long list_nr;
@@ -116,12 +117,22 @@ struct its_node {
};
#define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS))
+#define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP))
#define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
#define ITS_ITT_ALIGN SZ_256
/* The maximum number of VPEID bits supported by VLPI commands */
-#define ITS_MAX_VPEID_BITS (16)
+#define ITS_MAX_VPEID_BITS \
+ ({ \
+ int nvpeid = 16; \
+ if (gic_rdists->has_rvpeid && \
+ gic_rdists->gicd_typer2 & GICD_TYPER2_VIL) \
+ nvpeid = 1 + (gic_rdists->gicd_typer2 & \
+ GICD_TYPER2_VID); \
+ \
+ nvpeid; \
+ })
#define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS))
/* Convert page order to size in bytes */
@@ -216,11 +227,27 @@ static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev,
return &its_dev->event_map.vlpi_maps[event];
}
-static struct its_collection *irq_to_col(struct irq_data *d)
+static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
+{
+ if (irqd_is_forwarded_to_vcpu(d)) {
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ u32 event = its_get_event_id(d);
+
+ return dev_event_to_vlpi_map(its_dev, event);
+ }
+
+ return NULL;
+}
+
+static int irq_to_cpuid(struct irq_data *d)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ struct its_vlpi_map *map = get_vlpi_map(d);
+
+ if (map)
+ return map->vpe->col_idx;
- return dev_event_to_col(its_dev, its_get_event_id(d));
+ return its_dev->event_map.col_map[its_get_event_id(d)];
}
static struct its_collection *valid_col(struct its_collection *col)
@@ -322,6 +349,10 @@ struct its_cmd_desc {
u16 seq_num;
u16 its_list;
} its_vmovp_cmd;
+
+ struct {
+ struct its_vpe *vpe;
+ } its_invdb_cmd;
};
};
@@ -438,6 +469,38 @@ static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
}
+static void its_encode_vconf_addr(struct its_cmd_block *cmd, u64 vconf_pa)
+{
+ its_mask_encode(&cmd->raw_cmd[0], vconf_pa >> 16, 51, 16);
+}
+
+static void its_encode_alloc(struct its_cmd_block *cmd, bool alloc)
+{
+ its_mask_encode(&cmd->raw_cmd[0], alloc, 8, 8);
+}
+
+static void its_encode_ptz(struct its_cmd_block *cmd, bool ptz)
+{
+ its_mask_encode(&cmd->raw_cmd[0], ptz, 9, 9);
+}
+
+static void its_encode_vmapp_default_db(struct its_cmd_block *cmd,
+ u32 vpe_db_lpi)
+{
+ its_mask_encode(&cmd->raw_cmd[1], vpe_db_lpi, 31, 0);
+}
+
+static void its_encode_vmovp_default_db(struct its_cmd_block *cmd,
+ u32 vpe_db_lpi)
+{
+ its_mask_encode(&cmd->raw_cmd[3], vpe_db_lpi, 31, 0);
+}
+
+static void its_encode_db(struct its_cmd_block *cmd, bool db)
+{
+ its_mask_encode(&cmd->raw_cmd[2], db, 63, 63);
+}
+
static inline void its_fixup_cmd(struct its_cmd_block *cmd)
{
/* Let's fixup BE commands */
@@ -598,7 +661,7 @@ static struct its_collection *its_build_invall_cmd(struct its_node *its,
struct its_cmd_desc *desc)
{
its_encode_cmd(cmd, GITS_CMD_INVALL);
- its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
+ its_encode_collection(cmd, desc->its_invall_cmd.col->col_id);
its_fixup_cmd(cmd);
@@ -621,19 +684,45 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
- unsigned long vpt_addr;
+ unsigned long vpt_addr, vconf_addr;
u64 target;
-
- vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
- target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
+ bool alloc;
its_encode_cmd(cmd, GITS_CMD_VMAPP);
its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
+
+ if (!desc->its_vmapp_cmd.valid) {
+ if (is_v4_1(its)) {
+ alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
+ its_encode_alloc(cmd, alloc);
+ }
+
+ goto out;
+ }
+
+ vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
+ target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
+
its_encode_target(cmd, target);
its_encode_vpt_addr(cmd, vpt_addr);
its_encode_vpt_size(cmd, LPI_NRBITS - 1);
+ if (!is_v4_1(its))
+ goto out;
+
+ vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));
+
+ alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
+
+ its_encode_alloc(cmd, alloc);
+
+ /* We can only signal PTZ when alloc==1. Why do we have two bits? */
+ its_encode_ptz(cmd, alloc);
+ its_encode_vconf_addr(cmd, vconf_addr);
+ its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi);
+
+out:
its_fixup_cmd(cmd);
return valid_vpe(its, desc->its_vmapp_cmd.vpe);
@@ -645,7 +734,7 @@ static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
{
u32 db;
- if (desc->its_vmapti_cmd.db_enabled)
+ if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled)
db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
else
db = 1023;
@@ -668,7 +757,7 @@ static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
{
u32 db;
- if (desc->its_vmovi_cmd.db_enabled)
+ if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled)
db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
else
db = 1023;
@@ -698,6 +787,11 @@ static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
its_encode_target(cmd, target);
+ if (is_v4_1(its)) {
+ its_encode_db(cmd, true);
+ its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi);
+ }
+
its_fixup_cmd(cmd);
return valid_vpe(its, desc->its_vmovp_cmd.vpe);
@@ -757,6 +851,21 @@ static struct its_vpe *its_build_vclear_cmd(struct its_node *its,
return valid_vpe(its, map->vpe);
}
+static struct its_vpe *its_build_invdb_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ if (WARN_ON(!is_v4_1(its)))
+ return NULL;
+
+ its_encode_cmd(cmd, GITS_CMD_INVDB);
+ its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id);
+
+ its_fixup_cmd(cmd);
+
+ return valid_vpe(its, desc->its_invdb_cmd.vpe);
+}
+
static u64 its_cmd_ptr_to_offset(struct its_node *its,
struct its_cmd_block *ptr)
{
@@ -1165,20 +1274,17 @@ static void its_send_vclear(struct its_device *dev, u32 event_id)
its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc);
}
-/*
- * irqchip functions - assumes MSI, mostly.
- */
-static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
+static void its_send_invdb(struct its_node *its, struct its_vpe *vpe)
{
- struct its_device *its_dev = irq_data_get_irq_chip_data(d);
- u32 event = its_get_event_id(d);
-
- if (!irqd_is_forwarded_to_vcpu(d))
- return NULL;
+ struct its_cmd_desc desc;
- return dev_event_to_vlpi_map(its_dev, event);
+ desc.its_invdb_cmd.vpe = vpe;
+ its_send_single_vcommand(its, its_build_invdb_cmd, &desc);
}
+/*
+ * irqchip functions - assumes MSI, mostly.
+ */
static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
{
struct its_vlpi_map *map = get_vlpi_map(d);
@@ -1221,13 +1327,25 @@ static void wait_for_syncr(void __iomem *rdbase)
static void direct_lpi_inv(struct irq_data *d)
{
- struct its_collection *col;
+ struct its_vlpi_map *map = get_vlpi_map(d);
void __iomem *rdbase;
+ u64 val;
+
+ if (map) {
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+
+ WARN_ON(!is_v4_1(its_dev->its));
+
+ val = GICR_INVLPIR_V;
+ val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id);
+ val |= FIELD_PREP(GICR_INVLPIR_INTID, map->vintid);
+ } else {
+ val = d->hwirq;
+ }
/* Target the redistributor this LPI is currently routed to */
- col = irq_to_col(d);
- rdbase = per_cpu_ptr(gic_rdists->rdist, col->col_id)->rd_base;
- gic_write_lpir(d->hwirq, rdbase + GICR_INVLPIR);
+ rdbase = per_cpu_ptr(gic_rdists->rdist, irq_to_cpuid(d))->rd_base;
+ gic_write_lpir(val, rdbase + GICR_INVLPIR);
wait_for_syncr(rdbase);
}
@@ -1237,7 +1355,8 @@ static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
lpi_write_config(d, clr, set);
- if (gic_rdists->has_direct_lpi && !irqd_is_forwarded_to_vcpu(d))
+ if (gic_rdists->has_direct_lpi &&
+ (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d)))
direct_lpi_inv(d);
else if (!irqd_is_forwarded_to_vcpu(d))
its_send_inv(its_dev, its_get_event_id(d));
@@ -1251,6 +1370,13 @@ static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
u32 event = its_get_event_id(d);
struct its_vlpi_map *map;
+ /*
+ * GICv4.1 does away with the per-LPI nonsense, nothing to do
+ * here.
+ */
+ if (is_v4_1(its_dev->its))
+ return;
+
map = dev_event_to_vlpi_map(its_dev, event);
if (map->db_enabled == enable)
@@ -2090,6 +2216,65 @@ static bool its_parse_indirect_baser(struct its_node *its,
return indirect;
}
+static u32 compute_common_aff(u64 val)
+{
+ u32 aff, clpiaff;
+
+ aff = FIELD_GET(GICR_TYPER_AFFINITY, val);
+ clpiaff = FIELD_GET(GICR_TYPER_COMMON_LPI_AFF, val);
+
+ return aff & ~(GENMASK(31, 0) >> (clpiaff * 8));
+}
+
+static u32 compute_its_aff(struct its_node *its)
+{
+ u64 val;
+ u32 svpet;
+
+ /*
+ * Reencode the ITS SVPET and MPIDR as a GICR_TYPER, and compute
+ * the resulting affinity. We then use that to see if this match
+ * our own affinity.
+ */
+ svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer);
+ val = FIELD_PREP(GICR_TYPER_COMMON_LPI_AFF, svpet);
+ val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr);
+ return compute_common_aff(val);
+}
+
+static struct its_node *find_sibling_its(struct its_node *cur_its)
+{
+ struct its_node *its;
+ u32 aff;
+
+ if (!FIELD_GET(GITS_TYPER_SVPET, cur_its->typer))
+ return NULL;
+
+ aff = compute_its_aff(cur_its);
+
+ list_for_each_entry(its, &its_nodes, entry) {
+ u64 baser;
+
+ if (!is_v4_1(its) || its == cur_its)
+ continue;
+
+ if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
+ continue;
+
+ if (aff != compute_its_aff(its))
+ continue;
+
+ /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
+ baser = its->tables[2].val;
+ if (!(baser & GITS_BASER_VALID))
+ continue;
+
+ return its;
+ }
+
+ return NULL;
+}
+
static void its_free_tables(struct its_node *its)
{
int i;
@@ -2132,6 +2317,17 @@ static int its_alloc_tables(struct its_node *its)
break;
case GITS_BASER_TYPE_VCPU:
+ if (is_v4_1(its)) {
+ struct its_node *sibling;
+
+ WARN_ON(i != 2);
+ if ((sibling = find_sibling_its(its))) {
+ *baser = sibling->tables[2];
+ its_write_baser(its, baser, baser->val);
+ continue;
+ }
+ }
+
indirect = its_parse_indirect_baser(its, baser,
psz, &order,
ITS_MAX_VPEID_BITS);
@@ -2153,6 +2349,287 @@ static int its_alloc_tables(struct its_node *its)
return 0;
}
+static u64 inherit_vpe_l1_table_from_its(void)
+{
+ struct its_node *its;
+ u64 val;
+ u32 aff;
+
+ val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
+ aff = compute_common_aff(val);
+
+ list_for_each_entry(its, &its_nodes, entry) {
+ u64 baser, addr;
+
+ if (!is_v4_1(its))
+ continue;
+
+ if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
+ continue;
+
+ if (aff != compute_its_aff(its))
+ continue;
+
+ /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
+ baser = its->tables[2].val;
+ if (!(baser & GITS_BASER_VALID))
+ continue;
+
+ /* We have a winner! */
+ gic_data_rdist()->vpe_l1_base = its->tables[2].base;
+
+ val = GICR_VPROPBASER_4_1_VALID;
+ if (baser & GITS_BASER_INDIRECT)
+ val |= GICR_VPROPBASER_4_1_INDIRECT;
+ val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE,
+ FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser));
+ switch (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)) {
+ case GIC_PAGE_SIZE_64K:
+ addr = GITS_BASER_ADDR_48_to_52(baser);
+ break;
+ default:
+ addr = baser & GENMASK_ULL(47, 12);
+ break;
+ }
+ val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12);
+ val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK,
+ FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser));
+ val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK,
+ FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser));
+ val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1);
+
+ return val;
+ }
+
+ return 0;
+}
+
+static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask)
+{
+ u32 aff;
+ u64 val;
+ int cpu;
+
+ val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
+ aff = compute_common_aff(val);
+
+ for_each_possible_cpu(cpu) {
+ void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
+
+ if (!base || cpu == smp_processor_id())
+ continue;
+
+ val = gic_read_typer(base + GICR_TYPER);
+ if (aff != compute_common_aff(val))
+ continue;
+
+ /*
+ * At this point, we have a victim. This particular CPU
+ * has already booted, and has an affinity that matches
+ * ours wrt CommonLPIAff. Let's use its own VPROPBASER.
+ * Make sure we don't write the Z bit in that case.
+ */
+ val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
+ val &= ~GICR_VPROPBASER_4_1_Z;
+
+ gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base;
+ *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask;
+
+ return val;
+ }
+
+ return 0;
+}
+
+static bool allocate_vpe_l2_table(int cpu, u32 id)
+{
+ void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
+ unsigned int psz, esz, idx, npg, gpsz;
+ u64 val;
+ struct page *page;
+ __le64 *table;
+
+ if (!gic_rdists->has_rvpeid)
+ return true;
+
+ val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
+
+ esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1;
+ gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
+ npg = FIELD_GET(GICR_VPROPBASER_4_1_SIZE, val) + 1;
+
+ switch (gpsz) {
+ default:
+ WARN_ON(1);
+ /* fall through */
+ case GIC_PAGE_SIZE_4K:
+ psz = SZ_4K;
+ break;
+ case GIC_PAGE_SIZE_16K:
+ psz = SZ_16K;
+ break;
+ case GIC_PAGE_SIZE_64K:
+ psz = SZ_64K;
+ break;
+ }
+
+ /* Don't allow vpe_id that exceeds single, flat table limit */
+ if (!(val & GICR_VPROPBASER_4_1_INDIRECT))
+ return (id < (npg * psz / (esz * SZ_8)));
+
+ /* Compute 1st level table index & check if that exceeds table limit */
+ idx = id >> ilog2(psz / (esz * SZ_8));
+ if (idx >= (npg * psz / GITS_LVL1_ENTRY_SIZE))
+ return false;
+
+ table = gic_data_rdist_cpu(cpu)->vpe_l1_base;
+
+ /* Allocate memory for 2nd level table */
+ if (!table[idx]) {
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
+ if (!page)
+ return false;
+
+ /* Flush Lvl2 table to PoC if hw doesn't support coherency */
+ if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
+ gic_flush_dcache_to_poc(page_address(page), psz);
+
+ table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
+
+ /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
+ if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
+ gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
+
+ /* Ensure updated table contents are visible to RD hardware */
+ dsb(sy);
+ }
+
+ return true;
+}
+
+static int allocate_vpe_l1_table(void)
+{
+ void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
+ u64 val, gpsz, npg, pa;
+ unsigned int psz = SZ_64K;
+ unsigned int np, epp, esz;
+ struct page *page;
+
+ if (!gic_rdists->has_rvpeid)
+ return 0;
+
+ /*
+ * if VPENDBASER.Valid is set, disable any previously programmed
+ * VPE by setting PendingLast while clearing Valid. This has the
+ * effect of making sure no doorbell will be generated and we can
+ * then safely clear VPROPBASER.Valid.
+ */
+ if (gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid)
+ gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
+ vlpi_base + GICR_VPENDBASER);
+
+ /*
+ * If we can inherit the configuration from another RD, let's do
+ * so. Otherwise, we have to go through the allocation process. We
+ * assume that all RDs have the exact same requirements, as
+ * nothing will work otherwise.
+ */
+ val = inherit_vpe_l1_table_from_rd(&gic_data_rdist()->vpe_table_mask);
+ if (val & GICR_VPROPBASER_4_1_VALID)
+ goto out;
+
+ gic_data_rdist()->vpe_table_mask = kzalloc(sizeof(cpumask_t), GFP_KERNEL);
+ if (!gic_data_rdist()->vpe_table_mask)
+ return -ENOMEM;
+
+ val = inherit_vpe_l1_table_from_its();
+ if (val & GICR_VPROPBASER_4_1_VALID)
+ goto out;
+
+ /* First probe the page size */
+ val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K);
+ gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
+ val = gicr_read_vpropbaser(vlpi_base + GICR_VPROPBASER);
+ gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
+ esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val);
+
+ switch (gpsz) {
+ default:
+ gpsz = GIC_PAGE_SIZE_4K;
+ /* fall through */
+ case GIC_PAGE_SIZE_4K:
+ psz = SZ_4K;
+ break;
+ case GIC_PAGE_SIZE_16K:
+ psz = SZ_16K;
+ break;
+ case GIC_PAGE_SIZE_64K:
+ psz = SZ_64K;
+ break;
+ }
+
+ /*
+ * Start populating the register from scratch, including RO fields
+ * (which we want to print in debug cases...)
+ */
+ val = 0;
+ val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, gpsz);
+ val |= FIELD_PREP(GICR_VPROPBASER_4_1_ENTRY_SIZE, esz);
+
+ /* How many entries per GIC page? */
+ esz++;
+ epp = psz / (esz * SZ_8);
+
+ /*
+ * If we need more than just a single L1 page, flag the table
+ * as indirect and compute the number of required L1 pages.
+ */
+ if (epp < ITS_MAX_VPEID) {
+ int nl2;
+
+ val |= GICR_VPROPBASER_4_1_INDIRECT;
+
+ /* Number of L2 pages required to cover the VPEID space */
+ nl2 = DIV_ROUND_UP(ITS_MAX_VPEID, epp);
+
+ /* Number of L1 pages to point to the L2 pages */
+ npg = DIV_ROUND_UP(nl2 * SZ_8, psz);
+ } else {
+ npg = 1;
+ }
+
+ val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg - 1);
+
+ /* Right, that's the number of CPU pages we need for L1 */
+ np = DIV_ROUND_UP(npg * psz, PAGE_SIZE);
+
+ pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n",
+ np, npg, psz, epp, esz);
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(np * PAGE_SIZE));
+ if (!page)
+ return -ENOMEM;
+
+ gic_data_rdist()->vpe_l1_base = page_address(page);
+ pa = virt_to_phys(page_address(page));
+ WARN_ON(!IS_ALIGNED(pa, psz));
+
+ val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12);
+ val |= GICR_VPROPBASER_RaWb;
+ val |= GICR_VPROPBASER_InnerShareable;
+ val |= GICR_VPROPBASER_4_1_Z;
+ val |= GICR_VPROPBASER_4_1_VALID;
+
+out:
+ gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
+ cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask);
+
+ pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n",
+ smp_processor_id(), val,
+ cpumask_pr_args(gic_data_rdist()->vpe_table_mask));
+
+ return 0;
+}
+
static int its_alloc_collections(struct its_node *its)
{
int i;
@@ -2244,18 +2721,20 @@ static int __init allocate_lpi_tables(void)
return 0;
}
-static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
+static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
{
u32 count = 1000000; /* 1s! */
bool clean;
u64 val;
- val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
+ val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
val &= ~GICR_VPENDBASER_Valid;
- gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+ val &= ~clr;
+ val |= set;
+ gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
do {
- val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
+ val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
clean = !(val & GICR_VPENDBASER_Dirty);
if (!clean) {
count--;
@@ -2264,6 +2743,11 @@ static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
}
} while (!clean && count);
+ if (unlikely(val & GICR_VPENDBASER_Dirty)) {
+ pr_err_ratelimited("ITS virtual pending table not cleaning\n");
+ val |= GICR_VPENDBASER_PendingLast;
+ }
+
return val;
}
@@ -2352,7 +2836,7 @@ static void its_cpu_init_lpis(void)
val |= GICR_CTLR_ENABLE_LPIS;
writel_relaxed(val, rbase + GICR_CTLR);
- if (gic_rdists->has_vlpis) {
+ if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) {
void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
/*
@@ -2365,15 +2849,24 @@ static void its_cpu_init_lpis(void)
val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
smp_processor_id(), val);
- gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
+ gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
/*
* Also clear Valid bit of GICR_VPENDBASER, in case some
* ancient programming gets left in and has possibility of
* corrupting memory.
*/
- val = its_clear_vpend_valid(vlpi_base);
- WARN_ON(val & GICR_VPENDBASER_Dirty);
+ val = its_clear_vpend_valid(vlpi_base, 0, 0);
+ }
+
+ if (allocate_vpe_l1_table()) {
+ /*
+ * If the allocation has failed, we're in massive trouble.
+ * Disable direct injection, and pray that no VM was
+ * already running...
+ */
+ gic_rdists->has_rvpeid = false;
+ gic_rdists->has_vlpis = false;
}
/* Make sure the GIC has seen the above */
@@ -2527,6 +3020,7 @@ static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
static bool its_alloc_vpe_table(u32 vpe_id)
{
struct its_node *its;
+ int cpu;
/*
* Make sure the L2 tables are allocated on *all* v4 ITSs. We
@@ -2549,6 +3043,19 @@ static bool its_alloc_vpe_table(u32 vpe_id)
return false;
}
+ /* Non v4.1? No need to iterate RDs and go back early. */
+ if (!gic_rdists->has_rvpeid)
+ return true;
+
+ /*
+ * Make sure the L2 tables are allocated for all copies of
+ * the L1 table on *all* v4.1 RDs.
+ */
+ for_each_possible_cpu(cpu) {
+ if (!allocate_vpe_l2_table(cpu, vpe_id))
+ return false;
+ }
+
return true;
}
@@ -2859,7 +3366,7 @@ static const struct irq_domain_ops its_domain_ops = {
/*
* This is insane.
*
- * If a GICv4 doesn't implement Direct LPIs (which is extremely
+ * If a GICv4.0 doesn't implement Direct LPIs (which is extremely
* likely), the only way to perform an invalidate is to use a fake
* device to issue an INV command, implying that the LPI has first
* been mapped to some event on that device. Since this is not exactly
@@ -2867,9 +3374,20 @@ static const struct irq_domain_ops its_domain_ops = {
* only issue an UNMAP if we're short on available slots.
*
* Broken by design(tm).
+ *
+ * GICv4.1, on the other hand, mandates that we're able to invalidate
+ * by writing to a MMIO register. It doesn't implement the whole of
+ * DirectLPI, but that's good enough. And most of the time, we don't
+ * even have to invalidate anything, as the redistributor can be told
+ * whether to generate a doorbell or not (we thus leave it enabled,
+ * always).
*/
static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
{
+ /* GICv4.1 doesn't use a proxy, so nothing to do here */
+ if (gic_rdists->has_rvpeid)
+ return;
+
/* Already unmapped? */
if (vpe->vpe_proxy_event == -1)
return;
@@ -2892,6 +3410,10 @@ static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
{
+ /* GICv4.1 doesn't use a proxy, so nothing to do here */
+ if (gic_rdists->has_rvpeid)
+ return;
+
if (!gic_rdists->has_direct_lpi) {
unsigned long flags;
@@ -2903,6 +3425,10 @@ static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
{
+ /* GICv4.1 doesn't use a proxy, so nothing to do here */
+ if (gic_rdists->has_rvpeid)
+ return;
+
/* Already mapped? */
if (vpe->vpe_proxy_event != -1)
return;
@@ -2925,6 +3451,10 @@ static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
unsigned long flags;
struct its_collection *target_col;
+ /* GICv4.1 doesn't use a proxy, so nothing to do here */
+ if (gic_rdists->has_rvpeid)
+ return;
+
if (gic_rdists->has_direct_lpi) {
void __iomem *rdbase;
@@ -2951,7 +3481,7 @@ static int its_vpe_set_affinity(struct irq_data *d,
bool force)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
- int cpu = cpumask_first(mask_val);
+ int from, cpu = cpumask_first(mask_val);
/*
* Changing affinity is mega expensive, so let's be as lazy as
@@ -2959,14 +3489,24 @@ static int its_vpe_set_affinity(struct irq_data *d,
* into the proxy device, we need to move the doorbell
* interrupt to its new location.
*/
- if (vpe->col_idx != cpu) {
- int from = vpe->col_idx;
+ if (vpe->col_idx == cpu)
+ goto out;
- vpe->col_idx = cpu;
- its_send_vmovp(vpe);
- its_vpe_db_proxy_move(vpe, from, cpu);
- }
+ from = vpe->col_idx;
+ vpe->col_idx = cpu;
+
+ /*
+ * GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD
+ * is sharing its VPE table with the current one.
+ */
+ if (gic_data_rdist_cpu(cpu)->vpe_table_mask &&
+ cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask))
+ goto out;
+
+ its_send_vmovp(vpe);
+ its_vpe_db_proxy_move(vpe, from, cpu);
+out:
irq_data_update_effective_affinity(d, cpumask_of(cpu));
return IRQ_SET_MASK_OK_DONE;
@@ -2983,7 +3523,7 @@ static void its_vpe_schedule(struct its_vpe *vpe)
val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
val |= GICR_VPROPBASER_RaWb;
val |= GICR_VPROPBASER_InnerShareable;
- gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
+ gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
val = virt_to_phys(page_address(vpe->vpt_page)) &
GENMASK_ULL(51, 16);
@@ -3001,7 +3541,7 @@ static void its_vpe_schedule(struct its_vpe *vpe)
val |= GICR_VPENDBASER_PendingLast;
val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
val |= GICR_VPENDBASER_Valid;
- gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+ gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
}
static void its_vpe_deschedule(struct its_vpe *vpe)
@@ -3009,16 +3549,10 @@ static void its_vpe_deschedule(struct its_vpe *vpe)
void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
u64 val;
- val = its_clear_vpend_valid(vlpi_base);
+ val = its_clear_vpend_valid(vlpi_base, 0, 0);
- if (unlikely(val & GICR_VPENDBASER_Dirty)) {
- pr_err_ratelimited("ITS virtual pending table not cleaning\n");
- vpe->idai = false;
- vpe->pending_last = true;
- } else {
- vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
- vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
- }
+ vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
+ vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
}
static void its_vpe_invall(struct its_vpe *vpe)
@@ -3151,6 +3685,139 @@ static struct irq_chip its_vpe_irq_chip = {
.irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
};
+static struct its_node *find_4_1_its(void)
+{
+ static struct its_node *its = NULL;
+
+ if (!its) {
+ list_for_each_entry(its, &its_nodes, entry) {
+ if (is_v4_1(its))
+ return its;
+ }
+
+ /* Oops? */
+ its = NULL;
+ }
+
+ return its;
+}
+
+static void its_vpe_4_1_send_inv(struct irq_data *d)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+ struct its_node *its;
+
+ /*
+ * GICv4.1 wants doorbells to be invalidated using the
+ * INVDB command in order to be broadcast to all RDs. Send
+ * it to the first valid ITS, and let the HW do its magic.
+ */
+ its = find_4_1_its();
+ if (its)
+ its_send_invdb(its, vpe);
+}
+
+static void its_vpe_4_1_mask_irq(struct irq_data *d)
+{
+ lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
+ its_vpe_4_1_send_inv(d);
+}
+
+static void its_vpe_4_1_unmask_irq(struct irq_data *d)
+{
+ lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
+ its_vpe_4_1_send_inv(d);
+}
+
+static void its_vpe_4_1_schedule(struct its_vpe *vpe,
+ struct its_cmd_info *info)
+{
+ void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
+ u64 val = 0;
+
+ /* Schedule the VPE */
+ val |= GICR_VPENDBASER_Valid;
+ val |= info->g0en ? GICR_VPENDBASER_4_1_VGRP0EN : 0;
+ val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0;
+ val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
+
+ gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+}
+
+static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
+ struct its_cmd_info *info)
+{
+ void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
+ u64 val;
+
+ if (info->req_db) {
+ /*
+ * vPE is going to block: make the vPE non-resident with
+ * PendingLast clear and DB set. The GIC guarantees that if
+ * we read-back PendingLast clear, then a doorbell will be
+ * delivered when an interrupt comes.
+ */
+ val = its_clear_vpend_valid(vlpi_base,
+ GICR_VPENDBASER_PendingLast,
+ GICR_VPENDBASER_4_1_DB);
+ vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
+ } else {
+ /*
+ * We're not blocking, so just make the vPE non-resident
+ * with PendingLast set, indicating that we'll be back.
+ */
+ val = its_clear_vpend_valid(vlpi_base,
+ 0,
+ GICR_VPENDBASER_PendingLast);
+ vpe->pending_last = true;
+ }
+}
+
+static void its_vpe_4_1_invall(struct its_vpe *vpe)
+{
+ void __iomem *rdbase;
+ u64 val;
+
+ val = GICR_INVALLR_V;
+ val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
+
+ /* Target the redistributor this vPE is currently known on */
+ rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
+ gic_write_lpir(val, rdbase + GICR_INVALLR);
+}
+
+static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+ struct its_cmd_info *info = vcpu_info;
+
+ switch (info->cmd_type) {
+ case SCHEDULE_VPE:
+ its_vpe_4_1_schedule(vpe, info);
+ return 0;
+
+ case DESCHEDULE_VPE:
+ its_vpe_4_1_deschedule(vpe, info);
+ return 0;
+
+ case INVALL_VPE:
+ its_vpe_4_1_invall(vpe);
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct irq_chip its_vpe_4_1_irq_chip = {
+ .name = "GICv4.1-vpe",
+ .irq_mask = its_vpe_4_1_mask_irq,
+ .irq_unmask = its_vpe_4_1_unmask_irq,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = its_vpe_set_affinity,
+ .irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity,
+};
+
static int its_vpe_id_alloc(void)
{
return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
@@ -3186,7 +3853,10 @@ static int its_vpe_init(struct its_vpe *vpe)
vpe->vpe_id = vpe_id;
vpe->vpt_page = vpt_page;
- vpe->vpe_proxy_event = -1;
+ if (gic_rdists->has_rvpeid)
+ atomic_set(&vpe->vmapp_count, 0);
+ else
+ vpe->vpe_proxy_event = -1;
return 0;
}
@@ -3228,6 +3898,7 @@ static void its_vpe_irq_domain_free(struct irq_domain *domain,
static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *args)
{
+ struct irq_chip *irqchip = &its_vpe_irq_chip;
struct its_vm *vm = args;
unsigned long *bitmap;
struct page *vprop_page;
@@ -3255,6 +3926,9 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
vm->nr_db_lpis = nr_ids;
vm->vprop_page = vprop_page;
+ if (gic_rdists->has_rvpeid)
+ irqchip = &its_vpe_4_1_irq_chip;
+
for (i = 0; i < nr_irqs; i++) {
vm->vpes[i]->vpe_db_lpi = base + i;
err = its_vpe_init(vm->vpes[i]);
@@ -3265,7 +3939,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
if (err)
break;
irq_domain_set_hwirq_and_chip(domain, virq + i, i,
- &its_vpe_irq_chip, vm->vpes[i]);
+ irqchip, vm->vpes[i]);
set_bit(i, bitmap);
}
@@ -3778,6 +4452,14 @@ static int __init its_probe_one(struct resource *res,
} else {
pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
}
+
+ if (is_v4_1(its)) {
+ u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer);
+ its->mpidr = readl_relaxed(its_base + GITS_MPIDR);
+
+ pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n",
+ &res->start, its->mpidr, svpet);
+ }
}
its->numa_node = numa_node;
@@ -4138,6 +4820,8 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
bool has_v4 = false;
int err;
+ gic_rdists = rdists;
+
its_parent = parent_domain;
of_node = to_of_node(handle);
if (of_node)
@@ -4150,8 +4834,6 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
return -ENXIO;
}
- gic_rdists = rdists;
-
err = allocate_lpi_tables();
if (err)
return err;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index d6218012097b..c1f7af9d9ae7 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -858,8 +858,21 @@ static int __gic_update_rdist_properties(struct redist_region *region,
void __iomem *ptr)
{
u64 typer = gic_read_typer(ptr + GICR_TYPER);
+
gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
- gic_data.rdists.has_direct_lpi &= !!(typer & GICR_TYPER_DirectLPIS);
+
+ /* RVPEID implies some form of DirectLPI, no matter what the doc says... :-/ */
+ gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID);
+ gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) |
+ gic_data.rdists.has_rvpeid);
+
+ /* Detect non-sensical configurations */
+ if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) {
+ gic_data.rdists.has_direct_lpi = false;
+ gic_data.rdists.has_vlpis = false;
+ gic_data.rdists.has_rvpeid = false;
+ }
+
gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr);
return 1;
@@ -872,9 +885,10 @@ static void gic_update_rdist_properties(void)
if (WARN_ON(gic_data.ppi_nr == UINT_MAX))
gic_data.ppi_nr = 0;
pr_info("%d PPIs implemented\n", gic_data.ppi_nr);
- pr_info("%sVLPI support, %sdirect LPI support\n",
+ pr_info("%sVLPI support, %sdirect LPI support, %sRVPEID support\n",
!gic_data.rdists.has_vlpis ? "no " : "",
- !gic_data.rdists.has_direct_lpi ? "no " : "");
+ !gic_data.rdists.has_direct_lpi ? "no " : "",
+ !gic_data.rdists.has_rvpeid ? "no " : "");
}
/* Check whether it's single security state view */
@@ -1562,10 +1576,14 @@ static int __init gic_init_bases(void __iomem *dist_base,
pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32);
pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR);
+
+ gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2);
+
gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
&gic_data);
irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
+ gic_data.rdists.has_rvpeid = true;
gic_data.rdists.has_vlpis = true;
gic_data.rdists.has_direct_lpi = true;
@@ -1821,6 +1839,7 @@ static struct
struct redist_region *redist_regs;
u32 nr_redist_regions;
bool single_redist;
+ int enabled_rdists;
u32 maint_irq;
int maint_irq_mode;
phys_addr_t vcpu_base;
@@ -1915,8 +1934,10 @@ static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header,
* If GICC is enabled and has valid gicr base address, then it means
* GICR base is presented via GICC
*/
- if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address)
+ if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) {
+ acpi_data.enabled_rdists++;
return 0;
+ }
/*
* It's perfectly valid firmware can pass disabled GICC entry, driver
@@ -1946,8 +1967,10 @@ static int __init gic_acpi_count_gicr_regions(void)
count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
gic_acpi_match_gicc, 0);
- if (count > 0)
+ if (count > 0) {
acpi_data.single_redist = true;
+ count = acpi_data.enabled_rdists;
+ }
return count;
}
diff --git a/drivers/irqchip/irq-imx-intmux.c b/drivers/irqchip/irq-imx-intmux.c
new file mode 100644
index 000000000000..c27577c81126
--- /dev/null
+++ b/drivers/irqchip/irq-imx-intmux.c
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright 2017 NXP
+
+/* INTMUX Block Diagram
+ *
+ * ________________
+ * interrupt source # 0 +---->| |
+ * | | |
+ * interrupt source # 1 +++-->| |
+ * ... | | | channel # 0 |--------->interrupt out # 0
+ * ... | | | |
+ * ... | | | |
+ * interrupt source # X-1 +++-->|________________|
+ * | | |
+ * | | |
+ * | | | ________________
+ * +---->| |
+ * | | | | |
+ * | +-->| |
+ * | | | | channel # 1 |--------->interrupt out # 1
+ * | | +>| |
+ * | | | | |
+ * | | | |________________|
+ * | | |
+ * | | |
+ * | | | ...
+ * | | | ...
+ * | | |
+ * | | | ________________
+ * +---->| |
+ * | | | |
+ * +-->| |
+ * | | channel # N |--------->interrupt out # N
+ * +>| |
+ * | |
+ * |________________|
+ *
+ *
+ * N: Interrupt Channel Instance Number (N=7)
+ * X: Interrupt Source Number for each channel (X=32)
+ *
+ * The INTMUX interrupt multiplexer has 8 channels, each channel receives 32
+ * interrupt sources and generates 1 interrupt output.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock.h>
+
+#define CHANIER(n) (0x10 + (0x40 * n))
+#define CHANIPR(n) (0x20 + (0x40 * n))
+
+#define CHAN_MAX_NUM 0x8
+
+struct intmux_irqchip_data {
+ int chanidx;
+ int irq;
+ struct irq_domain *domain;
+};
+
+struct intmux_data {
+ raw_spinlock_t lock;
+ void __iomem *regs;
+ struct clk *ipg_clk;
+ int channum;
+ struct intmux_irqchip_data irqchip_data[];
+};
+
+static void imx_intmux_irq_mask(struct irq_data *d)
+{
+ struct intmux_irqchip_data *irqchip_data = d->chip_data;
+ int idx = irqchip_data->chanidx;
+ struct intmux_data *data = container_of(irqchip_data, struct intmux_data,
+ irqchip_data[idx]);
+ unsigned long flags;
+ void __iomem *reg;
+ u32 val;
+
+ raw_spin_lock_irqsave(&data->lock, flags);
+ reg = data->regs + CHANIER(idx);
+ val = readl_relaxed(reg);
+ /* disable the interrupt source of this channel */
+ val &= ~BIT(d->hwirq);
+ writel_relaxed(val, reg);
+ raw_spin_unlock_irqrestore(&data->lock, flags);
+}
+
+static void imx_intmux_irq_unmask(struct irq_data *d)
+{
+ struct intmux_irqchip_data *irqchip_data = d->chip_data;
+ int idx = irqchip_data->chanidx;
+ struct intmux_data *data = container_of(irqchip_data, struct intmux_data,
+ irqchip_data[idx]);
+ unsigned long flags;
+ void __iomem *reg;
+ u32 val;
+
+ raw_spin_lock_irqsave(&data->lock, flags);
+ reg = data->regs + CHANIER(idx);
+ val = readl_relaxed(reg);
+ /* enable the interrupt source of this channel */
+ val |= BIT(d->hwirq);
+ writel_relaxed(val, reg);
+ raw_spin_unlock_irqrestore(&data->lock, flags);
+}
+
+static struct irq_chip imx_intmux_irq_chip = {
+ .name = "intmux",
+ .irq_mask = imx_intmux_irq_mask,
+ .irq_unmask = imx_intmux_irq_unmask,
+};
+
+static int imx_intmux_irq_map(struct irq_domain *h, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, h->host_data);
+ irq_set_chip_and_handler(irq, &imx_intmux_irq_chip, handle_level_irq);
+
+ return 0;
+}
+
+static int imx_intmux_irq_xlate(struct irq_domain *d, struct device_node *node,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq, unsigned int *out_type)
+{
+ struct intmux_irqchip_data *irqchip_data = d->host_data;
+ int idx = irqchip_data->chanidx;
+ struct intmux_data *data = container_of(irqchip_data, struct intmux_data,
+ irqchip_data[idx]);
+
+ /*
+ * two cells needed in interrupt specifier:
+ * the 1st cell: hw interrupt number
+ * the 2nd cell: channel index
+ */
+ if (WARN_ON(intsize != 2))
+ return -EINVAL;
+
+ if (WARN_ON(intspec[1] >= data->channum))
+ return -EINVAL;
+
+ *out_hwirq = intspec[0];
+ *out_type = IRQ_TYPE_LEVEL_HIGH;
+
+ return 0;
+}
+
+static int imx_intmux_irq_select(struct irq_domain *d, struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token)
+{
+ struct intmux_irqchip_data *irqchip_data = d->host_data;
+
+ /* Not for us */
+ if (fwspec->fwnode != d->fwnode)
+ return false;
+
+ return irqchip_data->chanidx == fwspec->param[1];
+}
+
+static const struct irq_domain_ops imx_intmux_domain_ops = {
+ .map = imx_intmux_irq_map,
+ .xlate = imx_intmux_irq_xlate,
+ .select = imx_intmux_irq_select,
+};
+
+static void imx_intmux_irq_handler(struct irq_desc *desc)
+{
+ struct intmux_irqchip_data *irqchip_data = irq_desc_get_handler_data(desc);
+ int idx = irqchip_data->chanidx;
+ struct intmux_data *data = container_of(irqchip_data, struct intmux_data,
+ irqchip_data[idx]);
+ unsigned long irqstat;
+ int pos, virq;
+
+ chained_irq_enter(irq_desc_get_chip(desc), desc);
+
+ /* read the interrupt source pending status of this channel */
+ irqstat = readl_relaxed(data->regs + CHANIPR(idx));
+
+ for_each_set_bit(pos, &irqstat, 32) {
+ virq = irq_find_mapping(irqchip_data->domain, pos);
+ if (virq)
+ generic_handle_irq(virq);
+ }
+
+ chained_irq_exit(irq_desc_get_chip(desc), desc);
+}
+
+static int imx_intmux_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct irq_domain *domain;
+ struct intmux_data *data;
+ int channum;
+ int i, ret;
+
+ channum = platform_irq_count(pdev);
+ if (channum == -EPROBE_DEFER) {
+ return -EPROBE_DEFER;
+ } else if (channum > CHAN_MAX_NUM) {
+ dev_err(&pdev->dev, "supports up to %d multiplex channels\n",
+ CHAN_MAX_NUM);
+ return -EINVAL;
+ }
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data) +
+ channum * sizeof(data->irqchip_data[0]), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->regs)) {
+ dev_err(&pdev->dev, "failed to initialize reg\n");
+ return PTR_ERR(data->regs);
+ }
+
+ data->ipg_clk = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(data->ipg_clk)) {
+ ret = PTR_ERR(data->ipg_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get ipg clk: %d\n", ret);
+ return ret;
+ }
+
+ data->channum = channum;
+ raw_spin_lock_init(&data->lock);
+
+ ret = clk_prepare_enable(data->ipg_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable ipg clk: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < channum; i++) {
+ data->irqchip_data[i].chanidx = i;
+
+ data->irqchip_data[i].irq = irq_of_parse_and_map(np, i);
+ if (data->irqchip_data[i].irq <= 0) {
+ ret = -EINVAL;
+ dev_err(&pdev->dev, "failed to get irq\n");
+ goto out;
+ }
+
+ domain = irq_domain_add_linear(np, 32, &imx_intmux_domain_ops,
+ &data->irqchip_data[i]);
+ if (!domain) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "failed to create IRQ domain\n");
+ goto out;
+ }
+ data->irqchip_data[i].domain = domain;
+
+ /* disable all interrupt sources of this channel firstly */
+ writel_relaxed(0, data->regs + CHANIER(i));
+
+ irq_set_chained_handler_and_data(data->irqchip_data[i].irq,
+ imx_intmux_irq_handler,
+ &data->irqchip_data[i]);
+ }
+
+ platform_set_drvdata(pdev, data);
+
+ return 0;
+out:
+ clk_disable_unprepare(data->ipg_clk);
+ return ret;
+}
+
+static int imx_intmux_remove(struct platform_device *pdev)
+{
+ struct intmux_data *data = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < data->channum; i++) {
+ /* disable all interrupt sources of this channel */
+ writel_relaxed(0, data->regs + CHANIER(i));
+
+ irq_set_chained_handler_and_data(data->irqchip_data[i].irq,
+ NULL, NULL);
+
+ irq_domain_remove(data->irqchip_data[i].domain);
+ }
+
+ clk_disable_unprepare(data->ipg_clk);
+
+ return 0;
+}
+
+static const struct of_device_id imx_intmux_id[] = {
+ { .compatible = "fsl,imx-intmux", },
+ { /* sentinel */ },
+};
+
+static struct platform_driver imx_intmux_driver = {
+ .driver = {
+ .name = "imx-intmux",
+ .of_match_table = imx_intmux_id,
+ },
+ .probe = imx_intmux_probe,
+ .remove = imx_intmux_remove,
+};
+builtin_platform_driver(imx_intmux_driver);
diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c
index 01d18b39069e..c5589ee0dfb3 100644
--- a/drivers/irqchip/irq-ingenic.c
+++ b/drivers/irqchip/irq-ingenic.c
@@ -17,7 +17,6 @@
#include <linux/delay.h>
#include <asm/io.h>
-#include <asm/mach-jz4740/irq.h>
struct ingenic_intc_data {
void __iomem *base;
@@ -50,7 +49,7 @@ static irqreturn_t intc_cascade(int irq, void *data)
while (pending) {
int bit = __fls(pending);
- irq = irq_find_mapping(domain, bit + (i * 32));
+ irq = irq_linear_revmap(domain, bit + (i * 32));
generic_handle_irq(irq);
pending &= ~BIT(bit);
}
@@ -97,8 +96,7 @@ static int __init ingenic_intc_of_init(struct device_node *node,
goto out_unmap_irq;
}
- domain = irq_domain_add_legacy(node, num_chips * 32,
- JZ4740_IRQ_BASE, 0,
+ domain = irq_domain_add_linear(node, num_chips * 32,
&irq_generic_chip_ops, NULL);
if (!domain) {
err = -ENOMEM;
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index 3f09f658e8e2..6b566bba263b 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -374,6 +374,7 @@ static struct platform_driver mbigen_platform_driver = {
.name = "Hisilicon MBIGEN-V2",
.of_match_table = mbigen_of_match,
.acpi_match_table = ACPI_PTR(mbigen_acpi_match),
+ .suppress_bind_attrs = true,
},
.probe = mbigen_device_probe,
};
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
index 829084b568fa..ccc7f823911b 100644
--- a/drivers/irqchip/irq-meson-gpio.c
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -24,50 +24,101 @@
#define REG_PIN_47_SEL 0x08
#define REG_FILTER_SEL 0x0c
+/* use for A1 like chips */
+#define REG_PIN_A1_SEL 0x04
+
/*
* Note: The S905X3 datasheet reports that BOTH_EDGE is controlled by
* bits 24 to 31. Tests on the actual HW show that these bits are
* stuck at 0. Bits 8 to 15 are responsive and have the expected
* effect.
*/
-#define REG_EDGE_POL_EDGE(x) BIT(x)
-#define REG_EDGE_POL_LOW(x) BIT(16 + (x))
-#define REG_BOTH_EDGE(x) BIT(8 + (x))
-#define REG_EDGE_POL_MASK(x) ( \
- REG_EDGE_POL_EDGE(x) | \
- REG_EDGE_POL_LOW(x) | \
- REG_BOTH_EDGE(x))
+#define REG_EDGE_POL_EDGE(params, x) BIT((params)->edge_single_offset + (x))
+#define REG_EDGE_POL_LOW(params, x) BIT((params)->pol_low_offset + (x))
+#define REG_BOTH_EDGE(params, x) BIT((params)->edge_both_offset + (x))
+#define REG_EDGE_POL_MASK(params, x) ( \
+ REG_EDGE_POL_EDGE(params, x) | \
+ REG_EDGE_POL_LOW(params, x) | \
+ REG_BOTH_EDGE(params, x))
#define REG_PIN_SEL_SHIFT(x) (((x) % 4) * 8)
#define REG_FILTER_SEL_SHIFT(x) ((x) * 4)
+struct meson_gpio_irq_controller;
+static void meson8_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl,
+ unsigned int channel, unsigned long hwirq);
+static void meson_gpio_irq_init_dummy(struct meson_gpio_irq_controller *ctl);
+static void meson_a1_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl,
+ unsigned int channel,
+ unsigned long hwirq);
+static void meson_a1_gpio_irq_init(struct meson_gpio_irq_controller *ctl);
+
+struct irq_ctl_ops {
+ void (*gpio_irq_sel_pin)(struct meson_gpio_irq_controller *ctl,
+ unsigned int channel, unsigned long hwirq);
+ void (*gpio_irq_init)(struct meson_gpio_irq_controller *ctl);
+};
+
struct meson_gpio_irq_params {
unsigned int nr_hwirq;
bool support_edge_both;
+ unsigned int edge_both_offset;
+ unsigned int edge_single_offset;
+ unsigned int pol_low_offset;
+ unsigned int pin_sel_mask;
+ struct irq_ctl_ops ops;
};
+#define INIT_MESON_COMMON(irqs, init, sel) \
+ .nr_hwirq = irqs, \
+ .ops = { \
+ .gpio_irq_init = init, \
+ .gpio_irq_sel_pin = sel, \
+ },
+
+#define INIT_MESON8_COMMON_DATA(irqs) \
+ INIT_MESON_COMMON(irqs, meson_gpio_irq_init_dummy, \
+ meson8_gpio_irq_sel_pin) \
+ .edge_single_offset = 0, \
+ .pol_low_offset = 16, \
+ .pin_sel_mask = 0xff, \
+
+#define INIT_MESON_A1_COMMON_DATA(irqs) \
+ INIT_MESON_COMMON(irqs, meson_a1_gpio_irq_init, \
+ meson_a1_gpio_irq_sel_pin) \
+ .support_edge_both = true, \
+ .edge_both_offset = 16, \
+ .edge_single_offset = 8, \
+ .pol_low_offset = 0, \
+ .pin_sel_mask = 0x7f, \
+
static const struct meson_gpio_irq_params meson8_params = {
- .nr_hwirq = 134,
+ INIT_MESON8_COMMON_DATA(134)
};
static const struct meson_gpio_irq_params meson8b_params = {
- .nr_hwirq = 119,
+ INIT_MESON8_COMMON_DATA(119)
};
static const struct meson_gpio_irq_params gxbb_params = {
- .nr_hwirq = 133,
+ INIT_MESON8_COMMON_DATA(133)
};
static const struct meson_gpio_irq_params gxl_params = {
- .nr_hwirq = 110,
+ INIT_MESON8_COMMON_DATA(110)
};
static const struct meson_gpio_irq_params axg_params = {
- .nr_hwirq = 100,
+ INIT_MESON8_COMMON_DATA(100)
};
static const struct meson_gpio_irq_params sm1_params = {
- .nr_hwirq = 100,
+ INIT_MESON8_COMMON_DATA(100)
.support_edge_both = true,
+ .edge_both_offset = 8,
+};
+
+static const struct meson_gpio_irq_params a1_params = {
+ INIT_MESON_A1_COMMON_DATA(62)
};
static const struct of_device_id meson_irq_gpio_matches[] = {
@@ -78,6 +129,7 @@ static const struct of_device_id meson_irq_gpio_matches[] = {
{ .compatible = "amlogic,meson-axg-gpio-intc", .data = &axg_params },
{ .compatible = "amlogic,meson-g12a-gpio-intc", .data = &axg_params },
{ .compatible = "amlogic,meson-sm1-gpio-intc", .data = &sm1_params },
+ { .compatible = "amlogic,meson-a1-gpio-intc", .data = &a1_params },
{ }
};
@@ -100,9 +152,43 @@ static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl,
writel_relaxed(tmp, ctl->base + reg);
}
-static unsigned int meson_gpio_irq_channel_to_reg(unsigned int channel)
+static void meson_gpio_irq_init_dummy(struct meson_gpio_irq_controller *ctl)
+{
+}
+
+static void meson8_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl,
+ unsigned int channel, unsigned long hwirq)
+{
+ unsigned int reg_offset;
+ unsigned int bit_offset;
+
+ reg_offset = (channel < 4) ? REG_PIN_03_SEL : REG_PIN_47_SEL;
+ bit_offset = REG_PIN_SEL_SHIFT(channel);
+
+ meson_gpio_irq_update_bits(ctl, reg_offset,
+ ctl->params->pin_sel_mask << bit_offset,
+ hwirq << bit_offset);
+}
+
+static void meson_a1_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl,
+ unsigned int channel,
+ unsigned long hwirq)
{
- return (channel < 4) ? REG_PIN_03_SEL : REG_PIN_47_SEL;
+ unsigned int reg_offset;
+ unsigned int bit_offset;
+
+ bit_offset = ((channel % 2) == 0) ? 0 : 16;
+ reg_offset = REG_PIN_A1_SEL + ((channel / 2) << 2);
+
+ meson_gpio_irq_update_bits(ctl, reg_offset,
+ ctl->params->pin_sel_mask << bit_offset,
+ hwirq << bit_offset);
+}
+
+/* For a1 or later chips like a1 there is a switch to enable/disable irq */
+static void meson_a1_gpio_irq_init(struct meson_gpio_irq_controller *ctl)
+{
+ meson_gpio_irq_update_bits(ctl, REG_EDGE_POL, BIT(31), BIT(31));
}
static int
@@ -110,7 +196,7 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
unsigned long hwirq,
u32 **channel_hwirq)
{
- unsigned int reg, idx;
+ unsigned int idx;
spin_lock(&ctl->lock);
@@ -129,10 +215,7 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
* Setup the mux of the channel to route the signal of the pad
* to the appropriate input of the GIC
*/
- reg = meson_gpio_irq_channel_to_reg(idx);
- meson_gpio_irq_update_bits(ctl, reg,
- 0xff << REG_PIN_SEL_SHIFT(idx),
- hwirq << REG_PIN_SEL_SHIFT(idx));
+ ctl->params->ops.gpio_irq_sel_pin(ctl, idx, hwirq);
/*
* Get the hwirq number assigned to this channel through
@@ -173,7 +256,9 @@ static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl,
{
u32 val = 0;
unsigned int idx;
+ const struct meson_gpio_irq_params *params;
+ params = ctl->params;
idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq);
/*
@@ -190,22 +275,22 @@ static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl,
* precedence over the other edge/polarity settings
*/
if (type == IRQ_TYPE_EDGE_BOTH) {
- if (!ctl->params->support_edge_both)
+ if (!params->support_edge_both)
return -EINVAL;
- val |= REG_BOTH_EDGE(idx);
+ val |= REG_BOTH_EDGE(params, idx);
} else {
if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
- val |= REG_EDGE_POL_EDGE(idx);
+ val |= REG_EDGE_POL_EDGE(params, idx);
if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING))
- val |= REG_EDGE_POL_LOW(idx);
+ val |= REG_EDGE_POL_LOW(params, idx);
}
spin_lock(&ctl->lock);
meson_gpio_irq_update_bits(ctl, REG_EDGE_POL,
- REG_EDGE_POL_MASK(idx), val);
+ REG_EDGE_POL_MASK(params, idx), val);
spin_unlock(&ctl->lock);
@@ -371,6 +456,8 @@ static int __init meson_gpio_irq_parse_dt(struct device_node *node,
return ret;
}
+ ctl->params->ops.gpio_irq_init(ctl);
+
return 0;
}
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index f3985469c221..d70507133c1d 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -716,7 +716,7 @@ static int __init gic_of_init(struct device_node *node,
__sync();
}
- mips_gic_base = ioremap_nocache(gic_base, gic_len);
+ mips_gic_base = ioremap(gic_base, gic_len);
gicconfig = read_gic_config();
gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS;
diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c
index a166d30deea2..f747e2209ea9 100644
--- a/drivers/irqchip/irq-nvic.c
+++ b/drivers/irqchip/irq-nvic.c
@@ -45,17 +45,6 @@ nvic_handle_irq(irq_hw_number_t hwirq, struct pt_regs *regs)
handle_IRQ(irq, regs);
}
-static int nvic_irq_domain_translate(struct irq_domain *d,
- struct irq_fwspec *fwspec,
- unsigned long *hwirq, unsigned int *type)
-{
- if (WARN_ON(fwspec->param_count < 1))
- return -EINVAL;
- *hwirq = fwspec->param[0];
- *type = IRQ_TYPE_NONE;
- return 0;
-}
-
static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg)
{
@@ -64,7 +53,7 @@ static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int type = IRQ_TYPE_NONE;
struct irq_fwspec *fwspec = arg;
- ret = nvic_irq_domain_translate(domain, fwspec, &hwirq, &type);
+ ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type);
if (ret)
return ret;
@@ -75,7 +64,7 @@ static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
}
static const struct irq_domain_ops nvic_irq_domain_ops = {
- .translate = nvic_irq_domain_translate,
+ .translate = irq_domain_translate_onecell,
.alloc = nvic_irq_domain_alloc,
.free = irq_domain_free_irqs_top,
};
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index f82bc60a6793..6e5e3172796b 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -460,7 +460,7 @@ static int intc_irqpin_probe(struct platform_device *pdev)
goto err0;
}
- i->iomem = devm_ioremap_nocache(dev, io[k]->start,
+ i->iomem = devm_ioremap(dev, io[k]->start,
resource_size(io[k]));
if (!i->iomem) {
dev_err(dev, "failed to remap IOMEM\n");
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 8df547d2d935..aa4af886e43a 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -154,15 +154,37 @@ static struct irq_chip plic_chip = {
static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
- irq_set_chip_and_handler(irq, &plic_chip, handle_fasteoi_irq);
- irq_set_chip_data(irq, NULL);
+ irq_domain_set_info(d, irq, hwirq, &plic_chip, d->host_data,
+ handle_fasteoi_irq, NULL, NULL);
irq_set_noprobe(irq);
return 0;
}
+static int plic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ int i, ret;
+ irq_hw_number_t hwirq;
+ unsigned int type;
+ struct irq_fwspec *fwspec = arg;
+
+ ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_irqs; i++) {
+ ret = plic_irqdomain_map(domain, virq + i, hwirq + i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct irq_domain_ops plic_irqdomain_ops = {
- .map = plic_irqdomain_map,
- .xlate = irq_domain_xlate_onecell,
+ .translate = irq_domain_translate_onecell,
+ .alloc = plic_irq_domain_alloc,
+ .free = irq_domain_free_irqs_top,
};
static struct irq_domain *plic_irqdomain;
@@ -256,7 +278,7 @@ static int __init plic_init(struct device_node *node,
* Skip contexts other than external interrupts for our
* privilege level.
*/
- if (parent.args[0] != IRQ_EXT)
+ if (parent.args[0] != RV_IRQ_EXT)
continue;
hartid = plic_find_hart_id(parent.np);
diff --git a/drivers/isdn/Makefile b/drivers/isdn/Makefile
index 63baf27a2c79..d14334f4007e 100644
--- a/drivers/isdn/Makefile
+++ b/drivers/isdn/Makefile
@@ -3,6 +3,6 @@
# Object files in subdirectories
-obj-$(CONFIG_ISDN_CAPI) += capi/
+obj-$(CONFIG_BT_CMTP) += capi/
obj-$(CONFIG_MISDN) += mISDN/
obj-$(CONFIG_ISDN) += hardware/
diff --git a/drivers/isdn/capi/Kconfig b/drivers/isdn/capi/Kconfig
index 573fea5500ce..cc408ad9aafb 100644
--- a/drivers/isdn/capi/Kconfig
+++ b/drivers/isdn/capi/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
-menuconfig ISDN_CAPI
- tristate "CAPI 2.0 subsystem"
+config ISDN_CAPI
+ def_bool ISDN && BT
help
This provides CAPI (the Common ISDN Application Programming
Interface) Version 2.0, a standard making it easy for programs to
@@ -15,42 +15,18 @@ menuconfig ISDN_CAPI
See CONFIG_BT_CMTP for the last remaining regular driver
in the kernel that uses the CAPI subsystem.
-if ISDN_CAPI
-
config CAPI_TRACE
- bool "CAPI trace support"
- default y
+ def_bool BT_CMTP
help
If you say Y here, the kernelcapi driver can make verbose traces
of CAPI messages. This feature can be enabled/disabled via IOCTL for
every controller (default disabled).
- This will increase the size of the kernelcapi module by 20 KB.
- If unsure, say Y.
-
-config ISDN_CAPI_CAPI20
- tristate "CAPI2.0 /dev/capi20 support"
- help
- This option will provide the CAPI 2.0 interface to userspace
- applications via /dev/capi20. Applications should use the
- standardized libcapi20 to access this functionality. You should say
- Y/M here.
config ISDN_CAPI_MIDDLEWARE
- bool "CAPI2.0 Middleware support"
- depends on ISDN_CAPI_CAPI20 && TTY
+ def_bool BT_CMTP && TTY
help
This option will enhance the capabilities of the /dev/capi20
interface. It will provide a means of moving a data connection,
established via the usual /dev/capi20 interface to a special tty
device. If you want to use pppd with pppdcapiplugin to dial up to
your ISP, say Y here.
-
-config ISDN_CAPI_CAPIDRV_VERBOSE
- bool "Verbose reason code reporting"
- depends on ISDN_CAPI_CAPIDRV
- help
- If you say Y here, the capidrv interface will give verbose reasons
- for disconnecting. This will increase the size of the kernel by 7 KB.
- If unsure, say N.
-
-endif
diff --git a/drivers/isdn/capi/Makefile b/drivers/isdn/capi/Makefile
index d299f3e75f89..352217ebabd8 100644
--- a/drivers/isdn/capi/Makefile
+++ b/drivers/isdn/capi/Makefile
@@ -1,17 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-# Makefile for the CAPI subsystem.
+# Makefile for the CAPI subsystem used by BT_CMTP
-# Ordering constraints: kernelcapi.o first
-
-# Each configuration option enables a list of files.
-
-obj-$(CONFIG_ISDN_CAPI) += kernelcapi.o
-obj-$(CONFIG_ISDN_CAPI_CAPI20) += capi.o
-obj-$(CONFIG_ISDN_CAPI_CAPIDRV) += capidrv.o
-
-# Multipart objects.
-
-kernelcapi-y := kcapi.o capiutil.o capilib.o
-kernelcapi-$(CONFIG_PROC_FS) += kcapi_proc.o
-
-ccflags-y += -I$(srctree)/$(src)/../include -I$(srctree)/$(src)/../include/uapi
+obj-$(CONFIG_BT_CMTP) += kernelcapi.o
+kernelcapi-y := kcapi.o capiutil.o capi.o kcapi_proc.o
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 1675da34239b..85767f52fe3c 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -39,7 +39,9 @@
#include <linux/isdn/capiutil.h>
#include <linux/isdn/capicmd.h>
-MODULE_DESCRIPTION("CAPI4Linux: Userspace /dev/capi20 interface");
+#include "kcapi.h"
+
+MODULE_DESCRIPTION("CAPI4Linux: kernel CAPI layer and /dev/capi20 interface");
MODULE_AUTHOR("Carsten Paeth");
MODULE_LICENSE("GPL");
@@ -1412,15 +1414,22 @@ static int __init capi_init(void)
{
const char *compileinfo;
int major_ret;
+ int ret;
+
+ ret = kcapi_init();
+ if (ret)
+ return ret;
major_ret = register_chrdev(capi_major, "capi20", &capi_fops);
if (major_ret < 0) {
printk(KERN_ERR "capi20: unable to get major %d\n", capi_major);
+ kcapi_exit();
return major_ret;
}
capi_class = class_create(THIS_MODULE, "capi");
if (IS_ERR(capi_class)) {
unregister_chrdev(capi_major, "capi20");
+ kcapi_exit();
return PTR_ERR(capi_class);
}
@@ -1430,6 +1439,7 @@ static int __init capi_init(void)
device_destroy(capi_class, MKDEV(capi_major, 0));
class_destroy(capi_class);
unregister_chrdev(capi_major, "capi20");
+ kcapi_exit();
return -ENOMEM;
}
@@ -1455,6 +1465,8 @@ static void __exit capi_exit(void)
unregister_chrdev(capi_major, "capi20");
capinc_tty_exit();
+
+ kcapi_exit();
}
module_init(capi_init);
diff --git a/drivers/isdn/capi/capilib.c b/drivers/isdn/capi/capilib.c
deleted file mode 100644
index a39ad3796bba..000000000000
--- a/drivers/isdn/capi/capilib.c
+++ /dev/null
@@ -1,202 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include <linux/slab.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/isdn/capilli.h>
-
-#define DBG(format, arg...) do { \
- printk(KERN_DEBUG "%s: " format "\n" , __func__ , ## arg); \
- } while (0)
-
-struct capilib_msgidqueue {
- struct capilib_msgidqueue *next;
- u16 msgid;
-};
-
-struct capilib_ncci {
- struct list_head list;
- u16 applid;
- u32 ncci;
- u32 winsize;
- int nmsg;
- struct capilib_msgidqueue *msgidqueue;
- struct capilib_msgidqueue *msgidlast;
- struct capilib_msgidqueue *msgidfree;
- struct capilib_msgidqueue msgidpool[CAPI_MAXDATAWINDOW];
-};
-
-// ---------------------------------------------------------------------------
-// NCCI Handling
-
-static inline void mq_init(struct capilib_ncci *np)
-{
- u_int i;
- np->msgidqueue = NULL;
- np->msgidlast = NULL;
- np->nmsg = 0;
- memset(np->msgidpool, 0, sizeof(np->msgidpool));
- np->msgidfree = &np->msgidpool[0];
- for (i = 1; i < np->winsize; i++) {
- np->msgidpool[i].next = np->msgidfree;
- np->msgidfree = &np->msgidpool[i];
- }
-}
-
-static inline int mq_enqueue(struct capilib_ncci *np, u16 msgid)
-{
- struct capilib_msgidqueue *mq;
- if ((mq = np->msgidfree) == NULL)
- return 0;
- np->msgidfree = mq->next;
- mq->msgid = msgid;
- mq->next = NULL;
- if (np->msgidlast)
- np->msgidlast->next = mq;
- np->msgidlast = mq;
- if (!np->msgidqueue)
- np->msgidqueue = mq;
- np->nmsg++;
- return 1;
-}
-
-static inline int mq_dequeue(struct capilib_ncci *np, u16 msgid)
-{
- struct capilib_msgidqueue **pp;
- for (pp = &np->msgidqueue; *pp; pp = &(*pp)->next) {
- if ((*pp)->msgid == msgid) {
- struct capilib_msgidqueue *mq = *pp;
- *pp = mq->next;
- if (mq == np->msgidlast)
- np->msgidlast = NULL;
- mq->next = np->msgidfree;
- np->msgidfree = mq;
- np->nmsg--;
- return 1;
- }
- }
- return 0;
-}
-
-void capilib_new_ncci(struct list_head *head, u16 applid, u32 ncci, u32 winsize)
-{
- struct capilib_ncci *np;
-
- np = kmalloc(sizeof(*np), GFP_ATOMIC);
- if (!np) {
- printk(KERN_WARNING "capilib_new_ncci: no memory.\n");
- return;
- }
- if (winsize > CAPI_MAXDATAWINDOW) {
- printk(KERN_ERR "capi_new_ncci: winsize %d too big\n",
- winsize);
- winsize = CAPI_MAXDATAWINDOW;
- }
- np->applid = applid;
- np->ncci = ncci;
- np->winsize = winsize;
- mq_init(np);
- list_add_tail(&np->list, head);
- DBG("kcapi: appl %d ncci 0x%x up", applid, ncci);
-}
-
-EXPORT_SYMBOL(capilib_new_ncci);
-
-void capilib_free_ncci(struct list_head *head, u16 applid, u32 ncci)
-{
- struct list_head *l;
- struct capilib_ncci *np;
-
- list_for_each(l, head) {
- np = list_entry(l, struct capilib_ncci, list);
- if (np->applid != applid)
- continue;
- if (np->ncci != ncci)
- continue;
- printk(KERN_INFO "kcapi: appl %d ncci 0x%x down\n", applid, ncci);
- list_del(&np->list);
- kfree(np);
- return;
- }
- printk(KERN_ERR "capilib_free_ncci: ncci 0x%x not found\n", ncci);
-}
-
-EXPORT_SYMBOL(capilib_free_ncci);
-
-void capilib_release_appl(struct list_head *head, u16 applid)
-{
- struct list_head *l, *n;
- struct capilib_ncci *np;
-
- list_for_each_safe(l, n, head) {
- np = list_entry(l, struct capilib_ncci, list);
- if (np->applid != applid)
- continue;
- printk(KERN_INFO "kcapi: appl %d ncci 0x%x forced down\n", applid, np->ncci);
- list_del(&np->list);
- kfree(np);
- }
-}
-
-EXPORT_SYMBOL(capilib_release_appl);
-
-void capilib_release(struct list_head *head)
-{
- struct list_head *l, *n;
- struct capilib_ncci *np;
-
- list_for_each_safe(l, n, head) {
- np = list_entry(l, struct capilib_ncci, list);
- printk(KERN_INFO "kcapi: appl %d ncci 0x%x forced down\n", np->applid, np->ncci);
- list_del(&np->list);
- kfree(np);
- }
-}
-
-EXPORT_SYMBOL(capilib_release);
-
-u16 capilib_data_b3_req(struct list_head *head, u16 applid, u32 ncci, u16 msgid)
-{
- struct list_head *l;
- struct capilib_ncci *np;
-
- list_for_each(l, head) {
- np = list_entry(l, struct capilib_ncci, list);
- if (np->applid != applid)
- continue;
- if (np->ncci != ncci)
- continue;
-
- if (mq_enqueue(np, msgid) == 0)
- return CAPI_SENDQUEUEFULL;
-
- return CAPI_NOERROR;
- }
- printk(KERN_ERR "capilib_data_b3_req: ncci 0x%x not found\n", ncci);
- return CAPI_NOERROR;
-}
-
-EXPORT_SYMBOL(capilib_data_b3_req);
-
-void capilib_data_b3_conf(struct list_head *head, u16 applid, u32 ncci, u16 msgid)
-{
- struct list_head *l;
- struct capilib_ncci *np;
-
- list_for_each(l, head) {
- np = list_entry(l, struct capilib_ncci, list);
- if (np->applid != applid)
- continue;
- if (np->ncci != ncci)
- continue;
-
- if (mq_dequeue(np, msgid) == 0) {
- printk(KERN_ERR "kcapi: msgid %hu ncci 0x%x not on queue\n",
- msgid, ncci);
- }
- return;
- }
- printk(KERN_ERR "capilib_data_b3_conf: ncci 0x%x not found\n", ncci);
-}
-
-EXPORT_SYMBOL(capilib_data_b3_conf);
diff --git a/drivers/isdn/capi/capiutil.c b/drivers/isdn/capi/capiutil.c
index 9846d82eb097..f26bf3c66d7e 100644
--- a/drivers/isdn/capi/capiutil.c
+++ b/drivers/isdn/capi/capiutil.c
@@ -20,6 +20,8 @@
#include <linux/isdn/capiutil.h>
#include <linux/slab.h>
+#include "kcapi.h"
+
/* from CAPI2.0 DDK AVM Berlin GmbH */
typedef struct {
@@ -245,190 +247,6 @@ static void jumpcstruct(_cmsg *cmsg)
}
}
}
-/*-------------------------------------------------------*/
-static void pars_2_message(_cmsg *cmsg)
-{
-
- for (; TYP != _CEND; cmsg->p++) {
- switch (TYP) {
- case _CBYTE:
- byteTLcpy(cmsg->m + cmsg->l, OFF);
- cmsg->l++;
- break;
- case _CWORD:
- wordTLcpy(cmsg->m + cmsg->l, OFF);
- cmsg->l += 2;
- break;
- case _CDWORD:
- dwordTLcpy(cmsg->m + cmsg->l, OFF);
- cmsg->l += 4;
- break;
- case _CSTRUCT:
- if (*(u8 **) OFF == NULL) {
- *(cmsg->m + cmsg->l) = '\0';
- cmsg->l++;
- } else if (**(_cstruct *) OFF != 0xff) {
- structTLcpy(cmsg->m + cmsg->l, *(_cstruct *) OFF, 1 + **(_cstruct *) OFF);
- cmsg->l += 1 + **(_cstruct *) OFF;
- } else {
- _cstruct s = *(_cstruct *) OFF;
- structTLcpy(cmsg->m + cmsg->l, s, 3 + *(u16 *) (s + 1));
- cmsg->l += 3 + *(u16 *) (s + 1);
- }
- break;
- case _CMSTRUCT:
-/*----- Metastruktur 0 -----*/
- if (*(_cmstruct *) OFF == CAPI_DEFAULT) {
- *(cmsg->m + cmsg->l) = '\0';
- cmsg->l++;
- jumpcstruct(cmsg);
- }
-/*----- Metastruktur wird composed -----*/
- else {
- unsigned _l = cmsg->l;
- unsigned _ls;
- cmsg->l++;
- cmsg->p++;
- pars_2_message(cmsg);
- _ls = cmsg->l - _l - 1;
- if (_ls < 255)
- (cmsg->m + _l)[0] = (u8) _ls;
- else {
- structTLcpyovl(cmsg->m + _l + 3, cmsg->m + _l + 1, _ls);
- (cmsg->m + _l)[0] = 0xff;
- wordTLcpy(cmsg->m + _l + 1, &_ls);
- }
- }
- break;
- }
- }
-}
-
-/**
- * capi_cmsg2message() - assemble CAPI 2.0 message from _cmsg structure
- * @cmsg: _cmsg structure
- * @msg: buffer for assembled message
- *
- * Return value: 0 for success
- */
-
-unsigned capi_cmsg2message(_cmsg *cmsg, u8 *msg)
-{
- cmsg->m = msg;
- cmsg->l = 8;
- cmsg->p = 0;
- cmsg->par = capi_cmd2par(cmsg->Command, cmsg->Subcommand);
- if (!cmsg->par)
- return 1; /* invalid command/subcommand */
-
- pars_2_message(cmsg);
-
- wordTLcpy(msg + 0, &cmsg->l);
- byteTLcpy(cmsg->m + 4, &cmsg->Command);
- byteTLcpy(cmsg->m + 5, &cmsg->Subcommand);
- wordTLcpy(cmsg->m + 2, &cmsg->ApplId);
- wordTLcpy(cmsg->m + 6, &cmsg->Messagenumber);
-
- return 0;
-}
-
-/*-------------------------------------------------------*/
-static void message_2_pars(_cmsg *cmsg)
-{
- for (; TYP != _CEND; cmsg->p++) {
-
- switch (TYP) {
- case _CBYTE:
- byteTRcpy(cmsg->m + cmsg->l, OFF);
- cmsg->l++;
- break;
- case _CWORD:
- wordTRcpy(cmsg->m + cmsg->l, OFF);
- cmsg->l += 2;
- break;
- case _CDWORD:
- dwordTRcpy(cmsg->m + cmsg->l, OFF);
- cmsg->l += 4;
- break;
- case _CSTRUCT:
- *(u8 **) OFF = cmsg->m + cmsg->l;
-
- if (cmsg->m[cmsg->l] != 0xff)
- cmsg->l += 1 + cmsg->m[cmsg->l];
- else
- cmsg->l += 3 + *(u16 *) (cmsg->m + cmsg->l + 1);
- break;
- case _CMSTRUCT:
-/*----- Metastruktur 0 -----*/
- if (cmsg->m[cmsg->l] == '\0') {
- *(_cmstruct *) OFF = CAPI_DEFAULT;
- cmsg->l++;
- jumpcstruct(cmsg);
- } else {
- unsigned _l = cmsg->l;
- *(_cmstruct *) OFF = CAPI_COMPOSE;
- cmsg->l = (cmsg->m + _l)[0] == 255 ? cmsg->l + 3 : cmsg->l + 1;
- cmsg->p++;
- message_2_pars(cmsg);
- }
- break;
- }
- }
-}
-
-/**
- * capi_message2cmsg() - disassemble CAPI 2.0 message into _cmsg structure
- * @cmsg: _cmsg structure
- * @msg: buffer for assembled message
- *
- * Return value: 0 for success
- */
-
-unsigned capi_message2cmsg(_cmsg *cmsg, u8 *msg)
-{
- memset(cmsg, 0, sizeof(_cmsg));
- cmsg->m = msg;
- cmsg->l = 8;
- cmsg->p = 0;
- byteTRcpy(cmsg->m + 4, &cmsg->Command);
- byteTRcpy(cmsg->m + 5, &cmsg->Subcommand);
- cmsg->par = capi_cmd2par(cmsg->Command, cmsg->Subcommand);
- if (!cmsg->par)
- return 1; /* invalid command/subcommand */
-
- message_2_pars(cmsg);
-
- wordTRcpy(msg + 0, &cmsg->l);
- wordTRcpy(cmsg->m + 2, &cmsg->ApplId);
- wordTRcpy(cmsg->m + 6, &cmsg->Messagenumber);
-
- return 0;
-}
-
-/**
- * capi_cmsg_header() - initialize header part of _cmsg structure
- * @cmsg: _cmsg structure
- * @_ApplId: ApplID field value
- * @_Command: Command field value
- * @_Subcommand: Subcommand field value
- * @_Messagenumber: Message Number field value
- * @_Controller: Controller/PLCI/NCCI field value
- *
- * Return value: 0 for success
- */
-
-unsigned capi_cmsg_header(_cmsg *cmsg, u16 _ApplId,
- u8 _Command, u8 _Subcommand,
- u16 _Messagenumber, u32 _Controller)
-{
- memset(cmsg, 0, sizeof(_cmsg));
- cmsg->ApplId = _ApplId;
- cmsg->Command = _Command;
- cmsg->Subcommand = _Subcommand;
- cmsg->Messagenumber = _Messagenumber;
- cmsg->adr.adrController = _Controller;
- return 0;
-}
/*-------------------------------------------------------*/
@@ -561,8 +379,6 @@ static char *pnames[] =
/*2f */ "Useruserdata"
};
-
-
#include <stdarg.h>
/*-------------------------------------------------------*/
@@ -800,37 +616,6 @@ _cdebbuf *capi_message2str(u8 *msg)
return cdb;
}
-/**
- * capi_cmsg2str() - format _cmsg structure for printing
- * @cmsg: _cmsg structure
- *
- * Allocates a CAPI debug buffer and fills it with a printable representation
- * of the CAPI 2.0 message stored in @cmsg by a previous call to
- * capi_cmsg2message() or capi_message2cmsg().
- * Return value: allocated debug buffer, NULL on error
- * The returned buffer should be freed by a call to cdebbuf_free() after use.
- */
-
-_cdebbuf *capi_cmsg2str(_cmsg *cmsg)
-{
- _cdebbuf *cdb;
-
- if (!cmsg->m)
- return NULL; /* no message */
- cdb = cdebbuf_alloc();
- if (!cdb)
- return NULL;
- cmsg->l = 8;
- cmsg->p = 0;
- cdb = bufprint(cdb, "%s ID=%03d #0x%04x LEN=%04d\n",
- capi_cmd2str(cmsg->Command, cmsg->Subcommand),
- ((u16 *) cmsg->m)[1],
- ((u16 *) cmsg->m)[3],
- ((u16 *) cmsg->m)[0]);
- cdb = protocol_message_2_pars(cdb, cmsg, 1);
- return cdb;
-}
-
int __init cdebug_init(void)
{
g_cmsg = kmalloc(sizeof(_cmsg), GFP_KERNEL);
@@ -854,7 +639,7 @@ int __init cdebug_init(void)
return 0;
}
-void __exit cdebug_exit(void)
+void cdebug_exit(void)
{
if (g_debbuf)
kfree(g_debbuf->buf);
@@ -885,16 +670,8 @@ int __init cdebug_init(void)
return 0;
}
-void __exit cdebug_exit(void)
+void cdebug_exit(void)
{
}
#endif
-
-EXPORT_SYMBOL(cdebbuf_free);
-EXPORT_SYMBOL(capi_cmsg2message);
-EXPORT_SYMBOL(capi_message2cmsg);
-EXPORT_SYMBOL(capi_cmsg_header);
-EXPORT_SYMBOL(capi_cmd2str);
-EXPORT_SYMBOL(capi_cmsg2str);
-EXPORT_SYMBOL(capi_message2str);
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index a4ceb61c5b60..7168778fbbe1 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -10,8 +10,6 @@
*
*/
-#define AVMB1_COMPAT
-
#include "kcapi.h"
#include <linux/module.h>
#include <linux/mm.h>
@@ -31,18 +29,12 @@
#include <linux/uaccess.h>
#include <linux/isdn/capicmd.h>
#include <linux/isdn/capiutil.h>
-#ifdef AVMB1_COMPAT
-#include <linux/b1lli.h>
-#endif
#include <linux/mutex.h>
#include <linux/rcupdate.h>
static int showcapimsgs = 0;
static struct workqueue_struct *kcapi_wq;
-MODULE_DESCRIPTION("CAPI4Linux: kernel CAPI layer");
-MODULE_AUTHOR("Carsten Paeth");
-MODULE_LICENSE("GPL");
module_param(showcapimsgs, uint, 0);
/* ------------------------------------------------------------- */
@@ -61,9 +53,6 @@ static char capi_manufakturer[64] = "AVM Berlin";
#define NCCI2CTRL(ncci) (((ncci) >> 24) & 0x7f)
-LIST_HEAD(capi_drivers);
-DEFINE_MUTEX(capi_drivers_lock);
-
struct capi_ctr *capi_controller[CAPI_MAXCONTR];
DEFINE_MUTEX(capi_controller_lock);
@@ -71,8 +60,6 @@ struct capi20_appl *capi_applications[CAPI_MAXAPPL];
static int ncontrollers;
-static BLOCKING_NOTIFIER_HEAD(ctr_notifier_list);
-
/* -------- controller ref counting -------------------------------------- */
static inline struct capi_ctr *
@@ -200,8 +187,6 @@ static void notify_up(u32 contr)
if (ap)
register_appl(ctr, applid, &ap->rparam);
}
-
- wake_up_interruptible_all(&ctr->state_wait_queue);
} else
printk(KERN_WARNING "%s: invalid contr %d\n", __func__, contr);
@@ -229,8 +214,6 @@ static void ctr_down(struct capi_ctr *ctr, int new_state)
if (ap)
capi_ctr_put(ctr);
}
-
- wake_up_interruptible_all(&ctr->state_wait_queue);
}
static void notify_down(u32 contr)
@@ -251,36 +234,23 @@ static void notify_down(u32 contr)
mutex_unlock(&capi_controller_lock);
}
-static int
-notify_handler(struct notifier_block *nb, unsigned long val, void *v)
+static void do_notify_work(struct work_struct *work)
{
- u32 contr = (long)v;
+ struct capictr_event *event =
+ container_of(work, struct capictr_event, work);
- switch (val) {
+ switch (event->type) {
case CAPICTR_UP:
- notify_up(contr);
+ notify_up(event->controller);
break;
case CAPICTR_DOWN:
- notify_down(contr);
+ notify_down(event->controller);
break;
}
- return NOTIFY_OK;
-}
-static void do_notify_work(struct work_struct *work)
-{
- struct capictr_event *event =
- container_of(work, struct capictr_event, work);
-
- blocking_notifier_call_chain(&ctr_notifier_list, event->type,
- (void *)(long)event->controller);
kfree(event);
}
-/*
- * The notifier will result in adding/deleteing of devices. Devices can
- * only removed in user process, not in bh.
- */
static int notify_push(unsigned int event_type, u32 controller)
{
struct capictr_event *event = kmalloc(sizeof(*event), GFP_ATOMIC);
@@ -296,18 +266,6 @@ static int notify_push(unsigned int event_type, u32 controller)
return 0;
}
-int register_capictr_notifier(struct notifier_block *nb)
-{
- return blocking_notifier_chain_register(&ctr_notifier_list, nb);
-}
-EXPORT_SYMBOL_GPL(register_capictr_notifier);
-
-int unregister_capictr_notifier(struct notifier_block *nb)
-{
- return blocking_notifier_chain_unregister(&ctr_notifier_list, nb);
-}
-EXPORT_SYMBOL_GPL(unregister_capictr_notifier);
-
/* -------- Receiver ------------------------------------------ */
static void recv_handler(struct work_struct *work)
@@ -454,48 +412,6 @@ void capi_ctr_down(struct capi_ctr *ctr)
EXPORT_SYMBOL(capi_ctr_down);
-/**
- * capi_ctr_suspend_output() - suspend controller
- * @ctr: controller descriptor structure.
- *
- * Called by hardware driver to stop data flow.
- *
- * Note: The caller is responsible for synchronizing concurrent state changes
- * as well as invocations of capi_ctr_handle_message.
- */
-
-void capi_ctr_suspend_output(struct capi_ctr *ctr)
-{
- if (!ctr->blocked) {
- printk(KERN_DEBUG "kcapi: controller [%03d] suspend\n",
- ctr->cnr);
- ctr->blocked = 1;
- }
-}
-
-EXPORT_SYMBOL(capi_ctr_suspend_output);
-
-/**
- * capi_ctr_resume_output() - resume controller
- * @ctr: controller descriptor structure.
- *
- * Called by hardware driver to resume data flow.
- *
- * Note: The caller is responsible for synchronizing concurrent state changes
- * as well as invocations of capi_ctr_handle_message.
- */
-
-void capi_ctr_resume_output(struct capi_ctr *ctr)
-{
- if (ctr->blocked) {
- printk(KERN_DEBUG "kcapi: controller [%03d] resumed\n",
- ctr->cnr);
- ctr->blocked = 0;
- }
-}
-
-EXPORT_SYMBOL(capi_ctr_resume_output);
-
/* ------------------------------------------------------------- */
/**
@@ -531,7 +447,6 @@ int attach_capi_ctr(struct capi_ctr *ctr)
ctr->state = CAPI_CTR_DETECTED;
ctr->blocked = 0;
ctr->traceflag = showcapimsgs;
- init_waitqueue_head(&ctr->state_wait_queue);
sprintf(ctr->procfn, "capi/controllers/%d", ctr->cnr);
ctr->procent = proc_create_single_data(ctr->procfn, 0, NULL,
@@ -586,38 +501,6 @@ unlock_out:
EXPORT_SYMBOL(detach_capi_ctr);
-/**
- * register_capi_driver() - register CAPI driver
- * @driver: driver descriptor structure.
- *
- * Called by hardware driver to register itself with the CAPI subsystem.
- */
-
-void register_capi_driver(struct capi_driver *driver)
-{
- mutex_lock(&capi_drivers_lock);
- list_add_tail(&driver->list, &capi_drivers);
- mutex_unlock(&capi_drivers_lock);
-}
-
-EXPORT_SYMBOL(register_capi_driver);
-
-/**
- * unregister_capi_driver() - unregister CAPI driver
- * @driver: driver descriptor structure.
- *
- * Called by hardware driver to unregister itself from the CAPI subsystem.
- */
-
-void unregister_capi_driver(struct capi_driver *driver)
-{
- mutex_lock(&capi_drivers_lock);
- list_del(&driver->list);
- mutex_unlock(&capi_drivers_lock);
-}
-
-EXPORT_SYMBOL(unregister_capi_driver);
-
/* ------------------------------------------------------------- */
/* -------- CAPI2.0 Interface ---------------------------------- */
/* ------------------------------------------------------------- */
@@ -648,8 +531,6 @@ u16 capi20_isinstalled(void)
return ret;
}
-EXPORT_SYMBOL(capi20_isinstalled);
-
/**
* capi20_register() - CAPI 2.0 operation CAPI_REGISTER
* @ap: CAPI application descriptor structure.
@@ -711,8 +592,6 @@ u16 capi20_register(struct capi20_appl *ap)
return CAPI_NOERROR;
}
-EXPORT_SYMBOL(capi20_register);
-
/**
* capi20_release() - CAPI 2.0 operation CAPI_RELEASE
* @ap: CAPI application descriptor structure.
@@ -755,8 +634,6 @@ u16 capi20_release(struct capi20_appl *ap)
return CAPI_NOERROR;
}
-EXPORT_SYMBOL(capi20_release);
-
/**
* capi20_put_message() - CAPI 2.0 operation CAPI_PUT_MESSAGE
* @ap: CAPI application descriptor structure.
@@ -834,8 +711,6 @@ u16 capi20_put_message(struct capi20_appl *ap, struct sk_buff *skb)
return ctr->send_message(ctr, skb);
}
-EXPORT_SYMBOL(capi20_put_message);
-
/**
* capi20_get_manufacturer() - CAPI 2.0 operation CAPI_GET_MANUFACTURER
* @contr: controller number.
@@ -869,8 +744,6 @@ u16 capi20_get_manufacturer(u32 contr, u8 *buf)
return ret;
}
-EXPORT_SYMBOL(capi20_get_manufacturer);
-
/**
* capi20_get_version() - CAPI 2.0 operation CAPI_GET_VERSION
* @contr: controller number.
@@ -904,8 +777,6 @@ u16 capi20_get_version(u32 contr, struct capi_version *verp)
return ret;
}
-EXPORT_SYMBOL(capi20_get_version);
-
/**
* capi20_get_serial() - CAPI 2.0 operation CAPI_GET_SERIAL_NUMBER
* @contr: controller number.
@@ -939,8 +810,6 @@ u16 capi20_get_serial(u32 contr, u8 *serial)
return ret;
}
-EXPORT_SYMBOL(capi20_get_serial);
-
/**
* capi20_get_profile() - CAPI 2.0 operation CAPI_GET_PROFILE
* @contr: controller number.
@@ -974,209 +843,6 @@ u16 capi20_get_profile(u32 contr, struct capi_profile *profp)
return ret;
}
-EXPORT_SYMBOL(capi20_get_profile);
-
-/* Must be called with capi_controller_lock held. */
-static int wait_on_ctr_state(struct capi_ctr *ctr, unsigned int state)
-{
- DEFINE_WAIT(wait);
- int retval = 0;
-
- ctr = capi_ctr_get(ctr);
- if (!ctr)
- return -ESRCH;
-
- for (;;) {
- prepare_to_wait(&ctr->state_wait_queue, &wait,
- TASK_INTERRUPTIBLE);
-
- if (ctr->state == state)
- break;
- if (ctr->state == CAPI_CTR_DETACHED) {
- retval = -ESRCH;
- break;
- }
- if (signal_pending(current)) {
- retval = -EINTR;
- break;
- }
-
- mutex_unlock(&capi_controller_lock);
- schedule();
- mutex_lock(&capi_controller_lock);
- }
- finish_wait(&ctr->state_wait_queue, &wait);
-
- capi_ctr_put(ctr);
-
- return retval;
-}
-
-#ifdef AVMB1_COMPAT
-static int old_capi_manufacturer(unsigned int cmd, void __user *data)
-{
- avmb1_loadandconfigdef ldef;
- avmb1_extcarddef cdef;
- avmb1_resetdef rdef;
- capicardparams cparams;
- struct capi_ctr *ctr;
- struct capi_driver *driver = NULL;
- capiloaddata ldata;
- struct list_head *l;
- int retval;
-
- switch (cmd) {
- case AVMB1_ADDCARD:
- case AVMB1_ADDCARD_WITH_TYPE:
- if (cmd == AVMB1_ADDCARD) {
- if ((retval = copy_from_user(&cdef, data,
- sizeof(avmb1_carddef))))
- return -EFAULT;
- cdef.cardtype = AVM_CARDTYPE_B1;
- cdef.cardnr = 0;
- } else {
- if ((retval = copy_from_user(&cdef, data,
- sizeof(avmb1_extcarddef))))
- return -EFAULT;
- }
- cparams.port = cdef.port;
- cparams.irq = cdef.irq;
- cparams.cardnr = cdef.cardnr;
-
- mutex_lock(&capi_drivers_lock);
-
- switch (cdef.cardtype) {
- case AVM_CARDTYPE_B1:
- list_for_each(l, &capi_drivers) {
- driver = list_entry(l, struct capi_driver, list);
- if (strcmp(driver->name, "b1isa") == 0)
- break;
- }
- break;
- case AVM_CARDTYPE_T1:
- list_for_each(l, &capi_drivers) {
- driver = list_entry(l, struct capi_driver, list);
- if (strcmp(driver->name, "t1isa") == 0)
- break;
- }
- break;
- default:
- driver = NULL;
- break;
- }
- if (!driver) {
- printk(KERN_ERR "kcapi: driver not loaded.\n");
- retval = -EIO;
- } else if (!driver->add_card) {
- printk(KERN_ERR "kcapi: driver has no add card function.\n");
- retval = -EIO;
- } else
- retval = driver->add_card(driver, &cparams);
-
- mutex_unlock(&capi_drivers_lock);
- return retval;
-
- case AVMB1_LOAD:
- case AVMB1_LOAD_AND_CONFIG:
-
- if (cmd == AVMB1_LOAD) {
- if (copy_from_user(&ldef, data,
- sizeof(avmb1_loaddef)))
- return -EFAULT;
- ldef.t4config.len = 0;
- ldef.t4config.data = NULL;
- } else {
- if (copy_from_user(&ldef, data,
- sizeof(avmb1_loadandconfigdef)))
- return -EFAULT;
- }
-
- mutex_lock(&capi_controller_lock);
-
- ctr = get_capi_ctr_by_nr(ldef.contr);
- if (!ctr) {
- retval = -EINVAL;
- goto load_unlock_out;
- }
-
- if (ctr->load_firmware == NULL) {
- printk(KERN_DEBUG "kcapi: load: no load function\n");
- retval = -ESRCH;
- goto load_unlock_out;
- }
-
- if (ldef.t4file.len <= 0) {
- printk(KERN_DEBUG "kcapi: load: invalid parameter: length of t4file is %d ?\n", ldef.t4file.len);
- retval = -EINVAL;
- goto load_unlock_out;
- }
- if (ldef.t4file.data == NULL) {
- printk(KERN_DEBUG "kcapi: load: invalid parameter: dataptr is 0\n");
- retval = -EINVAL;
- goto load_unlock_out;
- }
-
- ldata.firmware.user = 1;
- ldata.firmware.data = ldef.t4file.data;
- ldata.firmware.len = ldef.t4file.len;
- ldata.configuration.user = 1;
- ldata.configuration.data = ldef.t4config.data;
- ldata.configuration.len = ldef.t4config.len;
-
- if (ctr->state != CAPI_CTR_DETECTED) {
- printk(KERN_INFO "kcapi: load: contr=%d not in detect state\n", ldef.contr);
- retval = -EBUSY;
- goto load_unlock_out;
- }
- ctr->state = CAPI_CTR_LOADING;
-
- retval = ctr->load_firmware(ctr, &ldata);
- if (retval) {
- ctr->state = CAPI_CTR_DETECTED;
- goto load_unlock_out;
- }
-
- retval = wait_on_ctr_state(ctr, CAPI_CTR_RUNNING);
-
- load_unlock_out:
- mutex_unlock(&capi_controller_lock);
- return retval;
-
- case AVMB1_RESETCARD:
- if (copy_from_user(&rdef, data, sizeof(avmb1_resetdef)))
- return -EFAULT;
-
- retval = 0;
-
- mutex_lock(&capi_controller_lock);
-
- ctr = get_capi_ctr_by_nr(rdef.contr);
- if (!ctr) {
- retval = -ESRCH;
- goto reset_unlock_out;
- }
-
- if (ctr->state == CAPI_CTR_DETECTED)
- goto reset_unlock_out;
-
- if (ctr->reset_ctr == NULL) {
- printk(KERN_DEBUG "kcapi: reset: no reset function\n");
- retval = -ESRCH;
- goto reset_unlock_out;
- }
-
- ctr->reset_ctr(ctr);
-
- retval = wait_on_ctr_state(ctr, CAPI_CTR_DETECTED);
-
- reset_unlock_out:
- mutex_unlock(&capi_controller_lock);
- return retval;
- }
- return -EINVAL;
-}
-#endif
-
/**
* capi20_manufacturer() - CAPI 2.0 operation CAPI_MANUFACTURER
* @cmd: command.
@@ -1192,14 +858,6 @@ int capi20_manufacturer(unsigned long cmd, void __user *data)
int retval;
switch (cmd) {
-#ifdef AVMB1_COMPAT
- case AVMB1_LOAD:
- case AVMB1_LOAD_AND_CONFIG:
- case AVMB1_RESETCARD:
- case AVMB1_GET_CARDINFO:
- case AVMB1_REMOVECARD:
- return old_capi_manufacturer(cmd, data);
-#endif
case KCAPI_CMD_TRACE:
{
kcapi_flagdef fdef;
@@ -1222,43 +880,6 @@ int capi20_manufacturer(unsigned long cmd, void __user *data)
return retval;
}
- case KCAPI_CMD_ADDCARD:
- {
- struct list_head *l;
- struct capi_driver *driver = NULL;
- capicardparams cparams;
- kcapi_carddef cdef;
-
- if ((retval = copy_from_user(&cdef, data, sizeof(cdef))))
- return -EFAULT;
-
- cparams.port = cdef.port;
- cparams.irq = cdef.irq;
- cparams.membase = cdef.membase;
- cparams.cardnr = cdef.cardnr;
- cparams.cardtype = 0;
- cdef.driver[sizeof(cdef.driver) - 1] = 0;
-
- mutex_lock(&capi_drivers_lock);
-
- list_for_each(l, &capi_drivers) {
- driver = list_entry(l, struct capi_driver, list);
- if (strcmp(driver->name, cdef.driver) == 0)
- break;
- }
- if (driver == NULL) {
- printk(KERN_ERR "kcapi: driver \"%s\" not loaded.\n",
- cdef.driver);
- retval = -ESRCH;
- } else if (!driver->add_card) {
- printk(KERN_ERR "kcapi: driver \"%s\" has no add card function.\n", cdef.driver);
- retval = -EIO;
- } else
- retval = driver->add_card(driver, &cparams);
-
- mutex_unlock(&capi_drivers_lock);
- return retval;
- }
default:
printk(KERN_ERR "kcapi: manufacturer command %lu unknown.\n",
@@ -1269,8 +890,6 @@ int capi20_manufacturer(unsigned long cmd, void __user *data)
return -EINVAL;
}
-EXPORT_SYMBOL(capi20_manufacturer);
-
/* ------------------------------------------------------------- */
/* -------- Init & Cleanup ------------------------------------- */
/* ------------------------------------------------------------- */
@@ -1279,12 +898,7 @@ EXPORT_SYMBOL(capi20_manufacturer);
* init / exit functions
*/
-static struct notifier_block capictr_nb = {
- .notifier_call = notify_handler,
- .priority = INT_MAX,
-};
-
-static int __init kcapi_init(void)
+int __init kcapi_init(void)
{
int err;
@@ -1292,11 +906,8 @@ static int __init kcapi_init(void)
if (!kcapi_wq)
return -ENOMEM;
- register_capictr_notifier(&capictr_nb);
-
err = cdebug_init();
if (err) {
- unregister_capictr_notifier(&capictr_nb);
destroy_workqueue(kcapi_wq);
return err;
}
@@ -1305,14 +916,10 @@ static int __init kcapi_init(void)
return 0;
}
-static void __exit kcapi_exit(void)
+void kcapi_exit(void)
{
kcapi_proc_exit();
- unregister_capictr_notifier(&capictr_nb);
cdebug_exit();
destroy_workqueue(kcapi_wq);
}
-
-module_init(kcapi_init);
-module_exit(kcapi_exit);
diff --git a/drivers/isdn/capi/kcapi.h b/drivers/isdn/capi/kcapi.h
index 6d439f9a76b2..479623e1db2a 100644
--- a/drivers/isdn/capi/kcapi.h
+++ b/drivers/isdn/capi/kcapi.h
@@ -30,22 +30,153 @@ enum {
CAPI_CTR_RUNNING = 3,
};
-extern struct list_head capi_drivers;
-extern struct mutex capi_drivers_lock;
-
extern struct capi_ctr *capi_controller[CAPI_MAXCONTR];
extern struct mutex capi_controller_lock;
extern struct capi20_appl *capi_applications[CAPI_MAXAPPL];
-#ifdef CONFIG_PROC_FS
-
void kcapi_proc_init(void);
void kcapi_proc_exit(void);
-#else
+struct capi20_appl {
+ u16 applid;
+ capi_register_params rparam;
+ void (*recv_message)(struct capi20_appl *ap, struct sk_buff *skb);
+ void *private;
-static inline void kcapi_proc_init(void) { };
-static inline void kcapi_proc_exit(void) { };
+ /* internal to kernelcapi.o */
+ unsigned long nrecvctlpkt;
+ unsigned long nrecvdatapkt;
+ unsigned long nsentctlpkt;
+ unsigned long nsentdatapkt;
+ struct mutex recv_mtx;
+ struct sk_buff_head recv_queue;
+ struct work_struct recv_work;
+ int release_in_progress;
+};
-#endif
+u16 capi20_isinstalled(void);
+u16 capi20_register(struct capi20_appl *ap);
+u16 capi20_release(struct capi20_appl *ap);
+u16 capi20_put_message(struct capi20_appl *ap, struct sk_buff *skb);
+u16 capi20_get_manufacturer(u32 contr, u8 buf[CAPI_MANUFACTURER_LEN]);
+u16 capi20_get_version(u32 contr, struct capi_version *verp);
+u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN]);
+u16 capi20_get_profile(u32 contr, struct capi_profile *profp);
+int capi20_manufacturer(unsigned long cmd, void __user *data);
+
+#define CAPICTR_UP 0
+#define CAPICTR_DOWN 1
+
+int kcapi_init(void);
+void kcapi_exit(void);
+
+/*----- basic-type definitions -----*/
+
+typedef __u8 *_cstruct;
+
+typedef enum {
+ CAPI_COMPOSE,
+ CAPI_DEFAULT
+} _cmstruct;
+
+/*
+ The _cmsg structure contains all possible CAPI 2.0 parameter.
+ All parameters are stored here first. The function CAPI_CMSG_2_MESSAGE
+ assembles the parameter and builds CAPI2.0 conform messages.
+ CAPI_MESSAGE_2_CMSG disassembles CAPI 2.0 messages and stores the
+ parameter in the _cmsg structure
+ */
+
+typedef struct {
+ /* Header */
+ __u16 ApplId;
+ __u8 Command;
+ __u8 Subcommand;
+ __u16 Messagenumber;
+
+ /* Parameter */
+ union {
+ __u32 adrController;
+ __u32 adrPLCI;
+ __u32 adrNCCI;
+ } adr;
+
+ _cmstruct AdditionalInfo;
+ _cstruct B1configuration;
+ __u16 B1protocol;
+ _cstruct B2configuration;
+ __u16 B2protocol;
+ _cstruct B3configuration;
+ __u16 B3protocol;
+ _cstruct BC;
+ _cstruct BChannelinformation;
+ _cmstruct BProtocol;
+ _cstruct CalledPartyNumber;
+ _cstruct CalledPartySubaddress;
+ _cstruct CallingPartyNumber;
+ _cstruct CallingPartySubaddress;
+ __u32 CIPmask;
+ __u32 CIPmask2;
+ __u16 CIPValue;
+ __u32 Class;
+ _cstruct ConnectedNumber;
+ _cstruct ConnectedSubaddress;
+ __u32 Data;
+ __u16 DataHandle;
+ __u16 DataLength;
+ _cstruct FacilityConfirmationParameter;
+ _cstruct Facilitydataarray;
+ _cstruct FacilityIndicationParameter;
+ _cstruct FacilityRequestParameter;
+ __u16 FacilitySelector;
+ __u16 Flags;
+ __u32 Function;
+ _cstruct HLC;
+ __u16 Info;
+ _cstruct InfoElement;
+ __u32 InfoMask;
+ __u16 InfoNumber;
+ _cstruct Keypadfacility;
+ _cstruct LLC;
+ _cstruct ManuData;
+ __u32 ManuID;
+ _cstruct NCPI;
+ __u16 Reason;
+ __u16 Reason_B3;
+ __u16 Reject;
+ _cstruct Useruserdata;
+
+ /* intern */
+ unsigned l, p;
+ unsigned char *par;
+ __u8 *m;
+
+ /* buffer to construct message */
+ __u8 buf[180];
+
+} _cmsg;
+
+/*-----------------------------------------------------------------------*/
+
+/*
+ * Debugging / Tracing functions
+ */
+
+char *capi_cmd2str(__u8 cmd, __u8 subcmd);
+
+typedef struct {
+ u_char *buf;
+ u_char *p;
+ size_t size;
+ size_t pos;
+} _cdebbuf;
+
+#define CDEBUG_SIZE 1024
+#define CDEBUG_GSIZE 4096
+
+void cdebbuf_free(_cdebbuf *cdb);
+int cdebug_init(void);
+void cdebug_exit(void);
+
+_cdebbuf *capi_message2str(__u8 *msg);
diff --git a/drivers/isdn/capi/kcapi_proc.c b/drivers/isdn/capi/kcapi_proc.c
index c94bd12c0f7c..b5ed4ea145cb 100644
--- a/drivers/isdn/capi/kcapi_proc.c
+++ b/drivers/isdn/capi/kcapi_proc.c
@@ -192,37 +192,15 @@ static const struct seq_operations seq_applstats_ops = {
// ---------------------------------------------------------------------------
-static void *capi_driver_start(struct seq_file *seq, loff_t *pos)
- __acquires(&capi_drivers_lock)
+/* /proc/capi/drivers is always empty */
+static ssize_t empty_read(struct file *file, char __user *buf,
+ size_t size, loff_t *off)
{
- mutex_lock(&capi_drivers_lock);
- return seq_list_start(&capi_drivers, *pos);
-}
-
-static void *capi_driver_next(struct seq_file *seq, void *v, loff_t *pos)
-{
- return seq_list_next(v, &capi_drivers, pos);
-}
-
-static void capi_driver_stop(struct seq_file *seq, void *v)
- __releases(&capi_drivers_lock)
-{
- mutex_unlock(&capi_drivers_lock);
-}
-
-static int capi_driver_show(struct seq_file *seq, void *v)
-{
- struct capi_driver *drv = list_entry(v, struct capi_driver, list);
-
- seq_printf(seq, "%-32s %s\n", drv->name, drv->revision);
return 0;
}
-static const struct seq_operations seq_capi_driver_ops = {
- .start = capi_driver_start,
- .next = capi_driver_next,
- .stop = capi_driver_stop,
- .show = capi_driver_show,
+static const struct proc_ops empty_proc_ops = {
+ .proc_read = empty_read,
};
// ---------------------------------------------------------------------------
@@ -236,10 +214,10 @@ kcapi_proc_init(void)
proc_create_seq("capi/contrstats", 0, NULL, &seq_contrstats_ops);
proc_create_seq("capi/applications", 0, NULL, &seq_applications_ops);
proc_create_seq("capi/applstats", 0, NULL, &seq_applstats_ops);
- proc_create_seq("capi/driver", 0, NULL, &seq_capi_driver_ops);
+ proc_create("capi/driver", 0, NULL, &empty_proc_ops);
}
-void __exit
+void
kcapi_proc_exit(void)
{
remove_proc_entry("capi/driver", NULL);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 4b68520ac251..d82f1dea3711 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -836,6 +836,16 @@ config LEDS_LM36274
Say Y to enable the LM36274 LED driver for TI LMU devices.
This supports the LED device LM36274.
+config LEDS_TPS6105X
+ tristate "LED support for TI TPS6105X"
+ depends on LEDS_CLASS
+ depends on TPS6105X
+ default y if TPS6105X
+ help
+ This driver supports TPS61050/TPS61052 LED chips.
+ It is a single boost converter primarily for white LEDs and
+ audio amplifiers.
+
comment "LED Triggers"
source "drivers/leds/trigger/Kconfig"
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 2da39e896ce8..d7e1107753fb 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -85,6 +85,7 @@ obj-$(CONFIG_LEDS_LM3601X) += leds-lm3601x.o
obj-$(CONFIG_LEDS_TI_LMU_COMMON) += leds-ti-lmu-common.o
obj-$(CONFIG_LEDS_LM3697) += leds-lm3697.o
obj-$(CONFIG_LEDS_LM36274) += leds-lm36274.o
+obj-$(CONFIG_LEDS_TPS6105X) += leds-tps6105x.o
# LED SPI Drivers
obj-$(CONFIG_LEDS_CR0014114) += leds-cr0014114.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 438774315e6c..1fc40e8af75e 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -19,6 +19,7 @@
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <uapi/linux/uleds.h>
+#include <linux/of.h>
#include "leds.h"
static struct class *leds_class;
@@ -214,6 +215,98 @@ static int led_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(leds_class_dev_pm_ops, led_suspend, led_resume);
+/**
+ * of_led_get() - request a LED device via the LED framework
+ * @np: device node to get the LED device from
+ * @index: the index of the LED
+ *
+ * Returns the LED device parsed from the phandle specified in the "leds"
+ * property of a device tree node or a negative error-code on failure.
+ */
+struct led_classdev *of_led_get(struct device_node *np, int index)
+{
+ struct device *led_dev;
+ struct led_classdev *led_cdev;
+ struct device_node *led_node;
+
+ led_node = of_parse_phandle(np, "leds", index);
+ if (!led_node)
+ return ERR_PTR(-ENOENT);
+
+ led_dev = class_find_device_by_of_node(leds_class, led_node);
+ of_node_put(led_node);
+
+ if (!led_dev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ led_cdev = dev_get_drvdata(led_dev);
+
+ if (!try_module_get(led_cdev->dev->parent->driver->owner))
+ return ERR_PTR(-ENODEV);
+
+ return led_cdev;
+}
+EXPORT_SYMBOL_GPL(of_led_get);
+
+/**
+ * led_put() - release a LED device
+ * @led_cdev: LED device
+ */
+void led_put(struct led_classdev *led_cdev)
+{
+ module_put(led_cdev->dev->parent->driver->owner);
+}
+EXPORT_SYMBOL_GPL(led_put);
+
+static void devm_led_release(struct device *dev, void *res)
+{
+ struct led_classdev **p = res;
+
+ led_put(*p);
+}
+
+/**
+ * devm_of_led_get - Resource-managed request of a LED device
+ * @dev: LED consumer
+ * @index: index of the LED to obtain in the consumer
+ *
+ * The device node of the device is parse to find the request LED device.
+ * The LED device returned from this function is automatically released
+ * on driver detach.
+ *
+ * @return a pointer to a LED device or ERR_PTR(errno) on failure.
+ */
+struct led_classdev *__must_check devm_of_led_get(struct device *dev,
+ int index)
+{
+ struct led_classdev *led;
+ struct led_classdev **dr;
+
+ if (!dev)
+ return ERR_PTR(-EINVAL);
+
+ /* Not using device tree? */
+ if (!IS_ENABLED(CONFIG_OF) || !dev->of_node)
+ return ERR_PTR(-ENOTSUPP);
+
+ led = of_led_get(dev->of_node, index);
+ if (IS_ERR(led))
+ return led;
+
+ dr = devres_alloc(devm_led_release, sizeof(struct led_classdev *),
+ GFP_KERNEL);
+ if (!dr) {
+ led_put(led);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ *dr = led;
+ devres_add(dev, dr);
+
+ return led;
+}
+EXPORT_SYMBOL_GPL(devm_of_led_get);
+
static int led_classdev_next_name(const char *init_name, char *name,
size_t len)
{
@@ -276,8 +369,10 @@ int led_classdev_register_ext(struct device *parent,
mutex_unlock(&led_cdev->led_access);
return PTR_ERR(led_cdev->dev);
}
- if (init_data && init_data->fwnode)
+ if (init_data && init_data->fwnode) {
led_cdev->dev->fwnode = init_data->fwnode;
+ led_cdev->dev->of_node = to_of_node(init_data->fwnode);
+ }
if (ret)
dev_warn(parent, "Led %s renamed to %s due to name collision",
diff --git a/drivers/leds/leds-as3645a.c b/drivers/leds/leds-as3645a.c
index b7e0ae1af8fa..e8922fa03379 100644
--- a/drivers/leds/leds-as3645a.c
+++ b/drivers/leds/leds-as3645a.c
@@ -493,16 +493,17 @@ static int as3645a_parse_node(struct as3645a *flash,
switch (id) {
case AS_LED_FLASH:
flash->flash_node = child;
+ fwnode_handle_get(child);
break;
case AS_LED_INDICATOR:
flash->indicator_node = child;
+ fwnode_handle_get(child);
break;
default:
dev_warn(&flash->client->dev,
"unknown LED %u encountered, ignoring\n", id);
break;
}
- fwnode_handle_get(child);
}
if (!flash->flash_node) {
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index e7ec6bff2b5f..bd61a823d0ca 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -10,7 +10,7 @@
#include <linux/module.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/leds.h>
#include <linux/leds-bd2802.h>
@@ -67,6 +67,7 @@ struct led_state {
struct bd2802_led {
struct bd2802_led_platform_data *pdata;
struct i2c_client *client;
+ struct gpio_desc *reset;
struct rw_semaphore rwsem;
struct led_state led[2];
@@ -200,7 +201,7 @@ static void bd2802_update_state(struct bd2802_led *led, enum led_ids id,
return;
if (bd2802_is_all_off(led) && !led->adf_on) {
- gpio_set_value(led->pdata->reset_gpio, 0);
+ gpiod_set_value(led->reset, 1);
return;
}
@@ -226,7 +227,7 @@ static void bd2802_configure(struct bd2802_led *led)
static void bd2802_reset_cancel(struct bd2802_led *led)
{
- gpio_set_value(led->pdata->reset_gpio, 1);
+ gpiod_set_value(led->reset, 0);
udelay(100);
bd2802_configure(led);
}
@@ -420,7 +421,7 @@ static void bd2802_disable_adv_conf(struct bd2802_led *led)
bd2802_addr_attributes[i]);
if (bd2802_is_all_off(led))
- gpio_set_value(led->pdata->reset_gpio, 0);
+ gpiod_set_value(led->reset, 1);
led->adf_on = 0;
}
@@ -670,8 +671,16 @@ static int bd2802_probe(struct i2c_client *client,
pdata = led->pdata = dev_get_platdata(&client->dev);
i2c_set_clientdata(client, led);
- /* Configure RESET GPIO (L: RESET, H: RESET cancel) */
- gpio_request_one(pdata->reset_gpio, GPIOF_OUT_INIT_HIGH, "RGB_RESETB");
+ /*
+ * Configure RESET GPIO (L: RESET, H: RESET cancel)
+ *
+ * We request the reset GPIO as OUT_LOW which means de-asserted,
+ * board files specifying this GPIO line in a machine descriptor
+ * table should take care to specify GPIO_ACTIVE_LOW for this line.
+ */
+ led->reset = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(led->reset))
+ return PTR_ERR(led->reset);
/* Tacss = min 0.1ms */
udelay(100);
@@ -685,7 +694,7 @@ static int bd2802_probe(struct i2c_client *client,
dev_info(&client->dev, "return 0x%02x\n", ret);
/* To save the power, reset BD2802 after detecting */
- gpio_set_value(led->pdata->reset_gpio, 0);
+ gpiod_set_value(led->reset, 1);
/* Default attributes */
led->wave_pattern = BD2802_PATTERN_HALF;
@@ -720,7 +729,7 @@ static int bd2802_remove(struct i2c_client *client)
struct bd2802_led *led = i2c_get_clientdata(client);
int i;
- gpio_set_value(led->pdata->reset_gpio, 0);
+ gpiod_set_value(led->reset, 1);
bd2802_unregister_led_classdev(led);
if (led->adf_on)
bd2802_disable_adv_conf(led);
@@ -750,7 +759,7 @@ static int bd2802_suspend(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
struct bd2802_led *led = i2c_get_clientdata(client);
- gpio_set_value(led->pdata->reset_gpio, 0);
+ gpiod_set_value(led->reset, 1);
return 0;
}
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index a5c73f3d5f79..2bf74595610f 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -151,9 +151,14 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
struct gpio_led led = {};
const char *state = NULL;
+ /*
+ * Acquire gpiod from DT with uninitialized label, which
+ * will be updated after LED class device is registered,
+ * Only then the final LED name is known.
+ */
led.gpiod = devm_fwnode_get_gpiod_from_child(dev, NULL, child,
GPIOD_ASIS,
- led.name);
+ NULL);
if (IS_ERR(led.gpiod)) {
fwnode_handle_put(child);
return ERR_CAST(led.gpiod);
@@ -186,6 +191,9 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
fwnode_handle_put(child);
return ERR_PTR(ret);
}
+ /* Set gpiod label to match the corresponding LED name. */
+ gpiod_set_consumer_name(led_dat->gpiod,
+ led_dat->cdev.dev->kobj.name);
priv->num_leds++;
}
diff --git a/drivers/leds/leds-lm3532.c b/drivers/leds/leds-lm3532.c
index 0507c6575c08..188a57da981a 100644
--- a/drivers/leds/leds-lm3532.c
+++ b/drivers/leds/leds-lm3532.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
// TI LM3532 LED driver
// Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+// http://www.ti.com/lit/ds/symlink/lm3532.pdf
#include <linux/i2c.h>
#include <linux/leds.h>
@@ -577,6 +578,12 @@ static int lm3532_parse_node(struct lm3532_data *priv)
priv->runtime_ramp_down = lm3532_get_ramp_index(ramp_time);
device_for_each_child_node(priv->dev, child) {
+ struct led_init_data idata = {
+ .fwnode = child,
+ .default_label = ":",
+ .devicename = priv->client->name,
+ };
+
led = &priv->leds[i];
ret = fwnode_property_read_u32(child, "reg", &control_bank);
@@ -623,7 +630,7 @@ static int lm3532_parse_node(struct lm3532_data *priv)
led->num_leds = fwnode_property_count_u32(child, "led-sources");
if (led->num_leds > LM3532_MAX_LED_STRINGS) {
- dev_err(&priv->client->dev, "To many LED string defined\n");
+ dev_err(&priv->client->dev, "Too many LED string defined\n");
continue;
}
@@ -651,7 +658,7 @@ static int lm3532_parse_node(struct lm3532_data *priv)
led->led_dev.name = led->label;
led->led_dev.brightness_set_blocking = lm3532_brightness_set;
- ret = devm_led_classdev_register(priv->dev, &led->led_dev);
+ ret = devm_led_classdev_register_ext(priv->dev, &led->led_dev, &idata);
if (ret) {
dev_err(&priv->client->dev, "led register err: %d\n",
ret);
diff --git a/drivers/leds/leds-lm3642.c b/drivers/leds/leds-lm3642.c
index 480575442ed8..4232906fcb75 100644
--- a/drivers/leds/leds-lm3642.c
+++ b/drivers/leds/leds-lm3642.c
@@ -106,7 +106,7 @@ static int lm3642_control(struct lm3642_chip_data *chip,
ret = regmap_read(chip->regmap, REG_FLAG, &chip->last_flag);
if (ret < 0) {
dev_err(chip->dev, "Failed to read REG_FLAG Register\n");
- goto out;
+ return ret;
}
if (chip->last_flag)
@@ -146,11 +146,11 @@ static int lm3642_control(struct lm3642_chip_data *chip,
break;
default:
- return ret;
+ return -EINVAL;
}
if (ret < 0) {
dev_err(chip->dev, "Failed to write REG_I_CTRL Register\n");
- goto out;
+ return ret;
}
if (chip->tx_pin)
@@ -159,13 +159,12 @@ static int lm3642_control(struct lm3642_chip_data *chip,
ret = regmap_update_bits(chip->regmap, REG_ENABLE,
MODE_BITS_MASK << MODE_BITS_SHIFT,
opmode << MODE_BITS_SHIFT);
-out:
return ret;
}
/* torch */
-/* torch pin config for lm3642*/
+/* torch pin config for lm3642 */
static ssize_t lm3642_torch_pin_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
@@ -178,7 +177,7 @@ static ssize_t lm3642_torch_pin_store(struct device *dev,
ret = kstrtouint(buf, 10, &state);
if (ret)
- goto out_strtoint;
+ return ret;
if (state != 0)
state = 0x01 << TORCH_PIN_EN_SHIFT;
@@ -186,16 +185,12 @@ static ssize_t lm3642_torch_pin_store(struct device *dev,
ret = regmap_update_bits(chip->regmap, REG_ENABLE,
TORCH_PIN_EN_MASK << TORCH_PIN_EN_SHIFT,
state);
- if (ret < 0)
- goto out;
+ if (ret < 0) {
+ dev_err(chip->dev, "%s:i2c access fail to register\n", __func__);
+ return ret;
+ }
return size;
-out:
- dev_err(chip->dev, "%s:i2c access fail to register\n", __func__);
- return ret;
-out_strtoint:
- dev_err(chip->dev, "%s: fail to change str to int\n", __func__);
- return ret;
}
static DEVICE_ATTR(torch_pin, S_IWUSR, NULL, lm3642_torch_pin_store);
@@ -229,7 +224,7 @@ static ssize_t lm3642_strobe_pin_store(struct device *dev,
ret = kstrtouint(buf, 10, &state);
if (ret)
- goto out_strtoint;
+ return ret;
if (state != 0)
state = 0x01 << STROBE_PIN_EN_SHIFT;
@@ -237,16 +232,12 @@ static ssize_t lm3642_strobe_pin_store(struct device *dev,
ret = regmap_update_bits(chip->regmap, REG_ENABLE,
STROBE_PIN_EN_MASK << STROBE_PIN_EN_SHIFT,
state);
- if (ret < 0)
- goto out;
+ if (ret < 0) {
+ dev_err(chip->dev, "%s:i2c access fail to register\n", __func__);
+ return ret;
+ }
return size;
-out:
- dev_err(chip->dev, "%s:i2c access fail to register\n", __func__);
- return ret;
-out_strtoint:
- dev_err(chip->dev, "%s: fail to change str to int\n", __func__);
- return ret;
}
static DEVICE_ATTR(strobe_pin, S_IWUSR, NULL, lm3642_strobe_pin_store);
diff --git a/drivers/leds/leds-lm3692x.c b/drivers/leds/leds-lm3692x.c
index 8b408102e138..28a51aeb28de 100644
--- a/drivers/leds/leds-lm3692x.c
+++ b/drivers/leds/leds-lm3692x.c
@@ -6,6 +6,7 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/leds.h>
+#include <linux/log2.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
@@ -114,6 +115,9 @@ struct lm3692x_led {
struct regulator *regulator;
int led_enable;
int model_id;
+
+ u8 boost_ctrl, brightness_ctrl;
+ bool enabled;
};
static const struct reg_default lm3692x_reg_defs[] = {
@@ -162,44 +166,14 @@ static int lm3692x_fault_check(struct lm3692x_led *led)
return read_buf;
}
-static int lm3692x_brightness_set(struct led_classdev *led_cdev,
- enum led_brightness brt_val)
-{
- struct lm3692x_led *led =
- container_of(led_cdev, struct lm3692x_led, led_dev);
- int ret;
- int led_brightness_lsb = (brt_val >> 5);
-
- mutex_lock(&led->lock);
-
- ret = lm3692x_fault_check(led);
- if (ret) {
- dev_err(&led->client->dev, "Cannot read/clear faults: %d\n",
- ret);
- goto out;
- }
-
- ret = regmap_write(led->regmap, LM3692X_BRT_MSB, brt_val);
- if (ret) {
- dev_err(&led->client->dev, "Cannot write MSB: %d\n", ret);
- goto out;
- }
-
- ret = regmap_write(led->regmap, LM3692X_BRT_LSB, led_brightness_lsb);
- if (ret) {
- dev_err(&led->client->dev, "Cannot write LSB: %d\n", ret);
- goto out;
- }
-out:
- mutex_unlock(&led->lock);
- return ret;
-}
-
-static int lm3692x_init(struct lm3692x_led *led)
+static int lm3692x_leds_enable(struct lm3692x_led *led)
{
int enable_state;
int ret, reg_ret;
+ if (led->enabled)
+ return 0;
+
if (led->regulator) {
ret = regulator_enable(led->regulator);
if (ret) {
@@ -249,10 +223,7 @@ static int lm3692x_init(struct lm3692x_led *led)
if (ret)
goto out;
- ret = regmap_write(led->regmap, LM3692X_BOOST_CTRL,
- LM3692X_BOOST_SW_1MHZ |
- LM3692X_BOOST_SW_NO_SHIFT |
- LM3692X_OCP_PROT_1_5A);
+ ret = regmap_write(led->regmap, LM3692X_BOOST_CTRL, led->boost_ctrl);
if (ret)
goto out;
@@ -305,6 +276,7 @@ static int lm3692x_init(struct lm3692x_led *led)
ret = regmap_update_bits(led->regmap, LM3692X_EN, LM3692X_ENABLE_MASK,
enable_state | LM3692X_DEVICE_EN);
+ led->enabled = true;
return ret;
out:
dev_err(&led->client->dev, "Fail writing initialization values\n");
@@ -322,10 +294,92 @@ out:
return ret;
}
+static int lm3692x_leds_disable(struct lm3692x_led *led)
+{
+ int ret;
+
+ if (!led->enabled)
+ return 0;
+
+ ret = regmap_update_bits(led->regmap, LM3692X_EN, LM3692X_DEVICE_EN, 0);
+ if (ret) {
+ dev_err(&led->client->dev, "Failed to disable regulator: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (led->enable_gpio)
+ gpiod_direction_output(led->enable_gpio, 0);
+
+ if (led->regulator) {
+ ret = regulator_disable(led->regulator);
+ if (ret)
+ dev_err(&led->client->dev,
+ "Failed to disable regulator: %d\n", ret);
+ }
+
+ led->enabled = false;
+ return ret;
+}
+
+static int lm3692x_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brt_val)
+{
+ struct lm3692x_led *led =
+ container_of(led_cdev, struct lm3692x_led, led_dev);
+ int ret;
+ int led_brightness_lsb = (brt_val >> 5);
+
+ mutex_lock(&led->lock);
+
+ if (brt_val == 0) {
+ ret = lm3692x_leds_disable(led);
+ goto out;
+ } else {
+ lm3692x_leds_enable(led);
+ }
+
+ ret = lm3692x_fault_check(led);
+ if (ret) {
+ dev_err(&led->client->dev, "Cannot read/clear faults: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = regmap_write(led->regmap, LM3692X_BRT_MSB, brt_val);
+ if (ret) {
+ dev_err(&led->client->dev, "Cannot write MSB: %d\n", ret);
+ goto out;
+ }
+
+ ret = regmap_write(led->regmap, LM3692X_BRT_LSB, led_brightness_lsb);
+ if (ret) {
+ dev_err(&led->client->dev, "Cannot write LSB: %d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&led->lock);
+ return ret;
+}
+
+static enum led_brightness lm3692x_max_brightness(struct lm3692x_led *led,
+ u32 max_cur)
+{
+ u32 max_code;
+
+ /* see p.12 of LM36922 data sheet for brightness formula */
+ max_code = ((max_cur * 1000) - 37806) / 12195;
+ if (max_code > 0x7FF)
+ max_code = 0x7FF;
+
+ return max_code >> 3;
+}
+
static int lm3692x_probe_dt(struct lm3692x_led *led)
{
struct fwnode_handle *child = NULL;
struct led_init_data init_data = {};
+ u32 ovp, max_cur;
int ret;
led->enable_gpio = devm_gpiod_get_optional(&led->client->dev,
@@ -350,6 +404,32 @@ static int lm3692x_probe_dt(struct lm3692x_led *led)
led->regulator = NULL;
}
+ led->boost_ctrl = LM3692X_BOOST_SW_1MHZ |
+ LM3692X_BOOST_SW_NO_SHIFT |
+ LM3692X_OCP_PROT_1_5A;
+ ret = device_property_read_u32(&led->client->dev,
+ "ti,ovp-microvolt", &ovp);
+ if (ret) {
+ led->boost_ctrl |= LM3692X_OVP_29V;
+ } else {
+ switch (ovp) {
+ case 17000000:
+ break;
+ case 21000000:
+ led->boost_ctrl |= LM3692X_OVP_21V;
+ break;
+ case 25000000:
+ led->boost_ctrl |= LM3692X_OVP_25V;
+ break;
+ case 29000000:
+ led->boost_ctrl |= LM3692X_OVP_29V;
+ break;
+ default:
+ dev_err(&led->client->dev, "Invalid OVP %d\n", ovp);
+ return -EINVAL;
+ }
+ }
+
child = device_get_next_child_node(&led->client->dev, child);
if (!child) {
dev_err(&led->client->dev, "No LED Child node\n");
@@ -365,6 +445,10 @@ static int lm3692x_probe_dt(struct lm3692x_led *led)
return ret;
}
+ ret = fwnode_property_read_u32(child, "led-max-microamp", &max_cur);
+ led->led_dev.max_brightness = ret ? LED_FULL :
+ lm3692x_max_brightness(led, max_cur);
+
init_data.fwnode = child;
init_data.devicename = led->client->name;
init_data.default_label = ":";
@@ -407,7 +491,7 @@ static int lm3692x_probe(struct i2c_client *client,
if (ret)
return ret;
- ret = lm3692x_init(led);
+ ret = lm3692x_leds_enable(led);
if (ret)
return ret;
@@ -419,23 +503,9 @@ static int lm3692x_remove(struct i2c_client *client)
struct lm3692x_led *led = i2c_get_clientdata(client);
int ret;
- ret = regmap_update_bits(led->regmap, LM3692X_EN, LM3692X_DEVICE_EN, 0);
- if (ret) {
- dev_err(&led->client->dev, "Failed to disable regulator: %d\n",
- ret);
+ ret = lm3692x_leds_disable(led);
+ if (ret)
return ret;
- }
-
- if (led->enable_gpio)
- gpiod_direction_output(led->enable_gpio, 0);
-
- if (led->regulator) {
- ret = regulator_disable(led->regulator);
- if (ret)
- dev_err(&led->client->dev,
- "Failed to disable regulator: %d\n", ret);
- }
-
mutex_destroy(&led->lock);
return 0;
diff --git a/drivers/leds/leds-max77650.c b/drivers/leds/leds-max77650.c
index 4c2d0b3c6dad..a0d4b725c917 100644
--- a/drivers/leds/leds-max77650.c
+++ b/drivers/leds/leds-max77650.c
@@ -135,9 +135,16 @@ err_node_put:
return rv;
}
+static const struct of_device_id max77650_led_of_match[] = {
+ { .compatible = "maxim,max77650-led" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max77650_led_of_match);
+
static struct platform_driver max77650_led_driver = {
.driver = {
.name = "max77650-led",
+ .of_match_table = max77650_led_of_match,
},
.probe = max77650_led_probe,
};
diff --git a/drivers/leds/leds-pca963x.c b/drivers/leds/leds-pca963x.c
index 4afc317901a8..66cdc003b8f4 100644
--- a/drivers/leds/leds-pca963x.c
+++ b/drivers/leds/leds-pca963x.c
@@ -40,6 +40,8 @@
#define PCA963X_LED_PWM 0x2 /* Controlled through PWM */
#define PCA963X_LED_GRP_PWM 0x3 /* Controlled through PWM/GRPPWM */
+#define PCA963X_MODE2_OUTDRV 0x04 /* Open-drain or totem pole */
+#define PCA963X_MODE2_INVRT 0x10 /* Normal or inverted direction */
#define PCA963X_MODE2_DMBLNK 0x20 /* Enable blinking */
#define PCA963X_MODE1 0x00
@@ -438,12 +440,12 @@ static int pca963x_probe(struct i2c_client *client,
PCA963X_MODE2);
/* Configure output: open-drain or totem pole (push-pull) */
if (pdata->outdrv == PCA963X_OPEN_DRAIN)
- mode2 |= 0x01;
+ mode2 &= ~PCA963X_MODE2_OUTDRV;
else
- mode2 |= 0x05;
+ mode2 |= PCA963X_MODE2_OUTDRV;
/* Configure direction: normal or inverted */
if (pdata->dir == PCA963X_INVERTED)
- mode2 |= 0x10;
+ mode2 |= PCA963X_MODE2_INVRT;
i2c_smbus_write_byte_data(pca963x->chip->client, PCA963X_MODE2,
mode2);
}
diff --git a/drivers/leds/leds-rb532.c b/drivers/leds/leds-rb532.c
index db5af83f0cec..b6447c1721b4 100644
--- a/drivers/leds/leds-rb532.c
+++ b/drivers/leds/leds-rb532.c
@@ -21,7 +21,6 @@ static void rb532_led_set(struct led_classdev *cdev,
{
if (brightness)
set_latch_u5(LO_ULED, 0);
-
else
set_latch_u5(0, LO_ULED);
}
diff --git a/drivers/leds/leds-tps6105x.c b/drivers/leds/leds-tps6105x.c
new file mode 100644
index 000000000000..09fd88a6c8f0
--- /dev/null
+++ b/drivers/leds/leds-tps6105x.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2019 Sven Van Asbroeck
+ */
+
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/tps6105x.h>
+#include <linux/regmap.h>
+
+struct tps6105x_priv {
+ struct regmap *regmap;
+ struct led_classdev cdev;
+ struct fwnode_handle *fwnode;
+};
+
+static void tps6105x_handle_put(void *data)
+{
+ struct tps6105x_priv *priv = data;
+
+ fwnode_handle_put(priv->fwnode);
+}
+
+static int tps6105x_brightness_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct tps6105x_priv *priv = container_of(cdev, struct tps6105x_priv,
+ cdev);
+
+ return regmap_update_bits(priv->regmap, TPS6105X_REG_0,
+ TPS6105X_REG0_TORCHC_MASK,
+ brightness << TPS6105X_REG0_TORCHC_SHIFT);
+}
+
+static int tps6105x_led_probe(struct platform_device *pdev)
+{
+ struct tps6105x *tps6105x = dev_get_platdata(&pdev->dev);
+ struct tps6105x_platform_data *pdata = tps6105x->pdata;
+ struct led_init_data init_data = { };
+ struct tps6105x_priv *priv;
+ int ret;
+
+ /* This instance is not set for torch mode so bail out */
+ if (pdata->mode != TPS6105X_MODE_TORCH) {
+ dev_info(&pdev->dev,
+ "chip not in torch mode, exit probe");
+ return -EINVAL;
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ /* fwnode/devicetree is optional. NULL is allowed for priv->fwnode */
+ priv->fwnode = device_get_next_child_node(pdev->dev.parent, NULL);
+ ret = devm_add_action_or_reset(&pdev->dev, tps6105x_handle_put, priv);
+ if (ret)
+ return ret;
+ priv->regmap = tps6105x->regmap;
+ priv->cdev.brightness_set_blocking = tps6105x_brightness_set;
+ priv->cdev.max_brightness = 7;
+ init_data.devicename = "tps6105x";
+ init_data.default_label = ":torch";
+ init_data.fwnode = priv->fwnode;
+
+ ret = regmap_update_bits(tps6105x->regmap, TPS6105X_REG_0,
+ TPS6105X_REG0_MODE_MASK |
+ TPS6105X_REG0_TORCHC_MASK,
+ TPS6105X_REG0_MODE_TORCH <<
+ TPS6105X_REG0_MODE_SHIFT);
+ if (ret)
+ return ret;
+
+ return devm_led_classdev_register_ext(&pdev->dev, &priv->cdev,
+ &init_data);
+}
+
+static struct platform_driver led_driver = {
+ .probe = tps6105x_led_probe,
+ .driver = {
+ .name = "tps6105x-leds",
+ },
+};
+
+module_platform_driver(led_driver);
+
+MODULE_DESCRIPTION("TPS6105x LED driver");
+MODULE_AUTHOR("Sven Van Asbroeck <TheSven73@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/trigger/ledtrig-pattern.c b/drivers/leds/trigger/ledtrig-pattern.c
index 718729c89440..3abcafe46278 100644
--- a/drivers/leds/trigger/ledtrig-pattern.c
+++ b/drivers/leds/trigger/ledtrig-pattern.c
@@ -455,7 +455,7 @@ static void __exit pattern_trig_exit(void)
module_init(pattern_trig_init);
module_exit(pattern_trig_exit);
-MODULE_AUTHOR("Raphael Teysseyre <rteysseyre@gmail.com");
-MODULE_AUTHOR("Baolin Wang <baolin.wang@linaro.org");
+MODULE_AUTHOR("Raphael Teysseyre <rteysseyre@gmail.com>");
+MODULE_AUTHOR("Baolin Wang <baolin.wang@linaro.org>");
MODULE_DESCRIPTION("LED Pattern trigger");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/lightnvm/pblk-trace.h b/drivers/lightnvm/pblk-trace.h
index 9534503b69d9..47b67c6bff7a 100644
--- a/drivers/lightnvm/pblk-trace.h
+++ b/drivers/lightnvm/pblk-trace.h
@@ -46,7 +46,7 @@ TRACE_EVENT(pblk_chunk_reset,
TP_STRUCT__entry(
__string(name, name)
__field(u64, ppa)
- __field(int, state);
+ __field(int, state)
),
TP_fast_assign(
@@ -72,7 +72,7 @@ TRACE_EVENT(pblk_chunk_state,
TP_STRUCT__entry(
__string(name, name)
__field(u64, ppa)
- __field(int, state);
+ __field(int, state)
),
TP_fast_assign(
@@ -98,7 +98,7 @@ TRACE_EVENT(pblk_line_state,
TP_STRUCT__entry(
__string(name, name)
__field(int, line)
- __field(int, state);
+ __field(int, state)
),
TP_fast_assign(
@@ -121,7 +121,7 @@ TRACE_EVENT(pblk_state,
TP_STRUCT__entry(
__string(name, name)
- __field(int, state);
+ __field(int, state)
),
TP_fast_assign(
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index 574e122ae105..cbd46c1c5bf7 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -178,7 +178,7 @@ config THERM_ADT746X
depends on I2C && I2C_POWERMAC && PPC_PMAC && !PPC_PMAC64
help
This driver provides some thermostat and fan control for the
- iBook G4, and the ATI based aluminium PowerBooks, allowing slightly
+ iBook G4, and the ATI based aluminium PowerBooks, allowing slightly
better fan behaviour by default, and some manual control.
config WINDFARM
@@ -214,7 +214,7 @@ config WINDFARM_PM91
select I2C_POWERMAC
help
This driver provides thermal control for the PowerMac9,1
- which is the recent (SMU based) single CPU desktop G5
+ which is the recent (SMU based) single CPU desktop G5
config WINDFARM_PM112
tristate "Support for thermal management on PowerMac11,2"
@@ -242,7 +242,7 @@ config PMAC_RACKMETER
depends on PPC_PMAC
help
This driver provides some support to control the front panel
- blue LEDs "vu-meter" of the XServer macs.
+ blue LEDs "vu-meter" of the XServer macs.
config SENSORS_AMS
tristate "Apple Motion Sensor driver"
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 21d532a78fa4..d38fb78a3b23 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -212,7 +212,7 @@ static int pmu_info_proc_show(struct seq_file *m, void *v);
static int pmu_irqstats_proc_show(struct seq_file *m, void *v);
static int pmu_battery_proc_show(struct seq_file *m, void *v);
static void pmu_pass_intr(unsigned char *data, int len);
-static const struct file_operations pmu_options_proc_fops;
+static const struct proc_ops pmu_options_proc_ops;
#ifdef CONFIG_ADB
const struct adb_driver via_pmu_driver = {
@@ -573,7 +573,7 @@ static int __init via_pmu_dev_init(void)
proc_pmu_irqstats = proc_create_single("interrupts", 0,
proc_pmu_root, pmu_irqstats_proc_show);
proc_pmu_options = proc_create("options", 0600, proc_pmu_root,
- &pmu_options_proc_fops);
+ &pmu_options_proc_ops);
}
return 0;
}
@@ -974,13 +974,12 @@ static ssize_t pmu_options_proc_write(struct file *file,
return fcount;
}
-static const struct file_operations pmu_options_proc_fops = {
- .owner = THIS_MODULE,
- .open = pmu_options_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = pmu_options_proc_write,
+static const struct proc_ops pmu_options_proc_ops = {
+ .proc_open = pmu_options_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = pmu_options_proc_write,
};
#ifdef CONFIG_ADB
diff --git a/drivers/mailbox/armada-37xx-rwtm-mailbox.c b/drivers/mailbox/armada-37xx-rwtm-mailbox.c
index 19f086716dc5..02b7b28e6969 100644
--- a/drivers/mailbox/armada-37xx-rwtm-mailbox.c
+++ b/drivers/mailbox/armada-37xx-rwtm-mailbox.c
@@ -143,7 +143,6 @@ static const struct mbox_chan_ops a37xx_mbox_ops = {
static int armada_37xx_mbox_probe(struct platform_device *pdev)
{
struct a37xx_mbox *mbox;
- struct resource *regs;
struct mbox_chan *chans;
int ret;
@@ -156,9 +155,7 @@ static int armada_37xx_mbox_probe(struct platform_device *pdev)
if (!chans)
return -ENOMEM;
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- mbox->base = devm_ioremap_resource(&pdev->dev, regs);
+ mbox->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mbox->base)) {
dev_err(&pdev->dev, "ioremap failed\n");
return PTR_ERR(mbox->base);
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index a1df0d95151c..8bc1faf71ff2 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -67,6 +67,7 @@
#include <linux/blkdev.h>
#include <linux/kthread.h>
#include <linux/random.h>
+#include <linux/sched/signal.h>
#include <trace/events/bcache.h>
#define MAX_OPEN_BUCKETS 128
@@ -733,8 +734,21 @@ int bch_open_buckets_alloc(struct cache_set *c)
int bch_cache_allocator_start(struct cache *ca)
{
- struct task_struct *k = kthread_run(bch_allocator_thread,
- ca, "bcache_allocator");
+ struct task_struct *k;
+
+ /*
+ * In case previous btree check operation occupies too many
+ * system memory for bcache btree node cache, and the
+ * registering process is selected by OOM killer. Here just
+ * ignore the SIGKILL sent by OOM killer if there is, to
+ * avoid kthread_run() being failed by pending signals. The
+ * bcache registering process will exit after the registration
+ * done.
+ */
+ if (signal_pending(current))
+ flush_signals(current);
+
+ k = kthread_run(bch_allocator_thread, ca, "bcache_allocator");
if (IS_ERR(k))
return PTR_ERR(k);
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 9198c1b480d9..74a9849ea164 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -301,6 +301,7 @@ struct cached_dev {
struct block_device *bdev;
struct cache_sb sb;
+ struct cache_sb_disk *sb_disk;
struct bio sb_bio;
struct bio_vec sb_bv[1];
struct closure sb_write;
@@ -329,6 +330,9 @@ struct cached_dev {
*/
atomic_t has_dirty;
+#define BCH_CACHE_READA_ALL 0
+#define BCH_CACHE_READA_META_ONLY 1
+ unsigned int cache_readahead_policy;
struct bch_ratelimit writeback_rate;
struct delayed_work writeback_rate_update;
@@ -403,6 +407,7 @@ enum alloc_reserve {
struct cache {
struct cache_set *set;
struct cache_sb sb;
+ struct cache_sb_disk *sb_disk;
struct bio sb_bio;
struct bio_vec sb_bv[1];
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index cffcdc9feefb..4385303836d8 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -1257,6 +1257,11 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
* Our temporary buffer is the same size as the btree node's
* buffer, we can just swap buffers instead of doing a big
* memcpy()
+ *
+ * Don't worry event 'out' is allocated from mempool, it can
+ * still be swapped here. Because state->pool is a page mempool
+ * creaated by by mempool_init_page_pool(), which allocates
+ * pages by alloc_pages() indeed.
*/
out->magic = b->set->data->magic;
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index c71365e7c1fa..a50dcfda656f 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -397,7 +397,8 @@ void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *state);
/* Bkey utility code */
-#define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, (i)->keys)
+#define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, \
+ (unsigned int)(i)->keys)
static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned int idx)
{
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 14d6c33b0957..b12186c87f52 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -34,6 +34,7 @@
#include <linux/random.h>
#include <linux/rcupdate.h>
#include <linux/sched/clock.h>
+#include <linux/sched/signal.h>
#include <linux/rculist.h>
#include <linux/delay.h>
#include <trace/events/bcache.h>
@@ -734,34 +735,32 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
i = 0;
btree_cache_used = c->btree_cache_used;
- list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
+ list_for_each_entry_safe_reverse(b, t, &c->btree_cache_freeable, list) {
if (nr <= 0)
goto out;
- if (++i > 3 &&
- !mca_reap(b, 0, false)) {
+ if (!mca_reap(b, 0, false)) {
mca_data_free(b);
rw_unlock(true, b);
freed++;
}
nr--;
+ i++;
}
- for (; (nr--) && i < btree_cache_used; i++) {
- if (list_empty(&c->btree_cache))
+ list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
+ if (nr <= 0 || i >= btree_cache_used)
goto out;
- b = list_first_entry(&c->btree_cache, struct btree, list);
- list_rotate_left(&c->btree_cache);
-
- if (!b->accessed &&
- !mca_reap(b, 0, false)) {
+ if (!mca_reap(b, 0, false)) {
mca_bucket_free(b);
mca_data_free(b);
rw_unlock(true, b);
freed++;
- } else
- b->accessed = 0;
+ }
+
+ nr--;
+ i++;
}
out:
mutex_unlock(&c->bucket_lock);
@@ -1069,7 +1068,6 @@ retry:
BUG_ON(!b->written);
b->parent = parent;
- b->accessed = 1;
for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
prefetch(b->keys.set[i].tree);
@@ -1160,7 +1158,6 @@ retry:
goto retry;
}
- b->accessed = 1;
b->parent = parent;
bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
@@ -1917,6 +1914,18 @@ static int bch_gc_thread(void *arg)
int bch_gc_thread_start(struct cache_set *c)
{
+ /*
+ * In case previous btree check operation occupies too many
+ * system memory for bcache btree node cache, and the
+ * registering process is selected by OOM killer. Here just
+ * ignore the SIGKILL sent by OOM killer if there is, to
+ * avoid kthread_run() being failed by pending signals. The
+ * bcache registering process will exit after the registration
+ * done.
+ */
+ if (signal_pending(current))
+ flush_signals(current);
+
c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
return PTR_ERR_OR_ZERO(c->gc_thread);
}
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 76cfd121a486..f4dcca449391 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -121,8 +121,6 @@ struct btree {
/* Key/pointer for this btree node */
BKEY_PADDED(key);
- /* Single bit - set when accessed, cleared by shrinker */
- unsigned long accessed;
unsigned long seq;
struct rw_semaphore lock;
struct cache_set *c;
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index be2a2a201603..0e3ff9745ac7 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -420,7 +420,10 @@ err:
static void btree_flush_write(struct cache_set *c)
{
struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR];
- unsigned int i, n;
+ unsigned int i, nr;
+ int ref_nr;
+ atomic_t *fifo_front_p, *now_fifo_front_p;
+ size_t mask;
if (c->journal.btree_flushing)
return;
@@ -433,12 +436,50 @@ static void btree_flush_write(struct cache_set *c)
c->journal.btree_flushing = true;
spin_unlock(&c->journal.flush_write_lock);
+ /* get the oldest journal entry and check its refcount */
+ spin_lock(&c->journal.lock);
+ fifo_front_p = &fifo_front(&c->journal.pin);
+ ref_nr = atomic_read(fifo_front_p);
+ if (ref_nr <= 0) {
+ /*
+ * do nothing if no btree node references
+ * the oldest journal entry
+ */
+ spin_unlock(&c->journal.lock);
+ goto out;
+ }
+ spin_unlock(&c->journal.lock);
+
+ mask = c->journal.pin.mask;
+ nr = 0;
atomic_long_inc(&c->flush_write);
memset(btree_nodes, 0, sizeof(btree_nodes));
- n = 0;
mutex_lock(&c->bucket_lock);
list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
+ /*
+ * It is safe to get now_fifo_front_p without holding
+ * c->journal.lock here, because we don't need to know
+ * the exactly accurate value, just check whether the
+ * front pointer of c->journal.pin is changed.
+ */
+ now_fifo_front_p = &fifo_front(&c->journal.pin);
+ /*
+ * If the oldest journal entry is reclaimed and front
+ * pointer of c->journal.pin changes, it is unnecessary
+ * to scan c->btree_cache anymore, just quit the loop and
+ * flush out what we have already.
+ */
+ if (now_fifo_front_p != fifo_front_p)
+ break;
+ /*
+ * quit this loop if all matching btree nodes are
+ * scanned and record in btree_nodes[] already.
+ */
+ ref_nr = atomic_read(fifo_front_p);
+ if (nr >= ref_nr)
+ break;
+
if (btree_node_journal_flush(b))
pr_err("BUG: flush_write bit should not be set here!");
@@ -454,17 +495,43 @@ static void btree_flush_write(struct cache_set *c)
continue;
}
+ /*
+ * Only select the btree node which exactly references
+ * the oldest journal entry.
+ *
+ * If the journal entry pointed by fifo_front_p is
+ * reclaimed in parallel, don't worry:
+ * - the list_for_each_xxx loop will quit when checking
+ * next now_fifo_front_p.
+ * - If there are matched nodes recorded in btree_nodes[],
+ * they are clean now (this is why and how the oldest
+ * journal entry can be reclaimed). These selected nodes
+ * will be ignored and skipped in the folowing for-loop.
+ */
+ if (((btree_current_write(b)->journal - fifo_front_p) &
+ mask) != 0) {
+ mutex_unlock(&b->write_lock);
+ continue;
+ }
+
set_btree_node_journal_flush(b);
mutex_unlock(&b->write_lock);
- btree_nodes[n++] = b;
- if (n == BTREE_FLUSH_NR)
+ btree_nodes[nr++] = b;
+ /*
+ * To avoid holding c->bucket_lock too long time,
+ * only scan for BTREE_FLUSH_NR matched btree nodes
+ * at most. If there are more btree nodes reference
+ * the oldest journal entry, try to flush them next
+ * time when btree_flush_write() is called.
+ */
+ if (nr == BTREE_FLUSH_NR)
break;
}
mutex_unlock(&c->bucket_lock);
- for (i = 0; i < n; i++) {
+ for (i = 0; i < nr; i++) {
b = btree_nodes[i];
if (!b) {
pr_err("BUG: btree_nodes[%d] is NULL", i);
@@ -497,6 +564,7 @@ static void btree_flush_write(struct cache_set *c)
mutex_unlock(&b->write_lock);
}
+out:
spin_lock(&c->journal.flush_write_lock);
c->journal.btree_flushing = false;
spin_unlock(&c->journal.flush_write_lock);
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 73478a91a342..820d8402a1dc 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -379,13 +379,20 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
goto skip;
/*
- * Flag for bypass if the IO is for read-ahead or background,
- * unless the read-ahead request is for metadata
+ * If the bio is for read-ahead or background IO, bypass it or
+ * not depends on the following situations,
+ * - If the IO is for meta data, always cache it and no bypass
+ * - If the IO is not meta data, check dc->cache_reada_policy,
+ * BCH_CACHE_READA_ALL: cache it and not bypass
+ * BCH_CACHE_READA_META_ONLY: not cache it and bypass
+ * That is, read-ahead request for metadata always get cached
* (eg, for gfs2 or xfs).
*/
- if (bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND) &&
- !(bio->bi_opf & (REQ_META|REQ_PRIO)))
- goto skip;
+ if ((bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND))) {
+ if (!(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
+ (dc->cache_readahead_policy != BCH_CACHE_READA_ALL))
+ goto skip;
+ }
if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
bio_sectors(bio) & (c->sb.block_size - 1)) {
diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c
index ba1c93791d8d..503aafe188dc 100644
--- a/drivers/md/bcache/stats.c
+++ b/drivers/md/bcache/stats.c
@@ -109,9 +109,13 @@ int bch_cache_accounting_add_kobjs(struct cache_accounting *acc,
void bch_cache_accounting_clear(struct cache_accounting *acc)
{
- memset(&acc->total.cache_hits,
- 0,
- sizeof(struct cache_stats));
+ acc->total.cache_hits = 0;
+ acc->total.cache_misses = 0;
+ acc->total.cache_bypass_hits = 0;
+ acc->total.cache_bypass_misses = 0;
+ acc->total.cache_readaheads = 0;
+ acc->total.cache_miss_collisions = 0;
+ acc->total.sectors_bypassed = 0;
}
void bch_cache_accounting_destroy(struct cache_accounting *acc)
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 77e9869345e7..0c3c5419c52b 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -15,7 +15,6 @@
#include "writeback.h"
#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
#include <linux/debugfs.h>
#include <linux/genhd.h>
#include <linux/idr.h>
@@ -60,17 +59,18 @@ struct workqueue_struct *bch_journal_wq;
/* Superblock */
static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
- struct page **res)
+ struct cache_sb_disk **res)
{
const char *err;
- struct cache_sb *s;
- struct buffer_head *bh = __bread(bdev, 1, SB_SIZE);
+ struct cache_sb_disk *s;
+ struct page *page;
unsigned int i;
- if (!bh)
+ page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
+ SB_OFFSET >> PAGE_SHIFT, GFP_KERNEL);
+ if (IS_ERR(page))
return "IO error";
-
- s = (struct cache_sb *) bh->b_data;
+ s = page_address(page) + offset_in_page(SB_OFFSET);
sb->offset = le64_to_cpu(s->offset);
sb->version = le64_to_cpu(s->version);
@@ -188,12 +188,10 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
}
sb->last_mount = (u32)ktime_get_real_seconds();
- err = NULL;
-
- get_page(bh->b_page);
- *res = bh->b_page;
+ *res = s;
+ return NULL;
err:
- put_bh(bh);
+ put_page(page);
return err;
}
@@ -207,15 +205,15 @@ static void write_bdev_super_endio(struct bio *bio)
closure_put(&dc->sb_write);
}
-static void __write_super(struct cache_sb *sb, struct bio *bio)
+static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out,
+ struct bio *bio)
{
- struct cache_sb *out = page_address(bio_first_page_all(bio));
unsigned int i;
+ bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META;
bio->bi_iter.bi_sector = SB_SECTOR;
- bio->bi_iter.bi_size = SB_SIZE;
- bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META);
- bch_bio_map(bio, NULL);
+ __bio_add_page(bio, virt_to_page(out), SB_SIZE,
+ offset_in_page(out));
out->offset = cpu_to_le64(sb->offset);
out->version = cpu_to_le64(sb->version);
@@ -257,14 +255,14 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
down(&dc->sb_write_mutex);
closure_init(cl, parent);
- bio_reset(bio);
+ bio_init(bio, dc->sb_bv, 1);
bio_set_dev(bio, dc->bdev);
bio->bi_end_io = write_bdev_super_endio;
bio->bi_private = dc;
closure_get(cl);
/* I/O request sent to backing device */
- __write_super(&dc->sb, bio);
+ __write_super(&dc->sb, dc->sb_disk, bio);
closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
}
@@ -306,13 +304,13 @@ void bcache_write_super(struct cache_set *c)
SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
- bio_reset(bio);
+ bio_init(bio, ca->sb_bv, 1);
bio_set_dev(bio, ca->bdev);
bio->bi_end_io = write_super_endio;
bio->bi_private = ca;
closure_get(cl);
- __write_super(&ca->sb, bio);
+ __write_super(&ca->sb, ca->sb_disk, bio);
}
closure_return_with_destructor(cl, bcache_write_super_unlock);
@@ -611,12 +609,13 @@ int bch_prio_write(struct cache *ca, bool wait)
return 0;
}
-static void prio_read(struct cache *ca, uint64_t bucket)
+static int prio_read(struct cache *ca, uint64_t bucket)
{
struct prio_set *p = ca->disk_buckets;
struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d;
struct bucket *b;
unsigned int bucket_nr = 0;
+ int ret = -EIO;
for (b = ca->buckets;
b < ca->buckets + ca->sb.nbuckets;
@@ -629,11 +628,15 @@ static void prio_read(struct cache *ca, uint64_t bucket)
prio_io(ca, bucket, REQ_OP_READ, 0);
if (p->csum !=
- bch_crc64(&p->magic, bucket_bytes(ca) - 8))
+ bch_crc64(&p->magic, bucket_bytes(ca) - 8)) {
pr_warn("bad csum reading priorities");
+ goto out;
+ }
- if (p->magic != pset_magic(&ca->sb))
+ if (p->magic != pset_magic(&ca->sb)) {
pr_warn("bad magic reading priorities");
+ goto out;
+ }
bucket = p->next_bucket;
d = p->data;
@@ -642,6 +645,10 @@ static void prio_read(struct cache *ca, uint64_t bucket)
b->prio = le16_to_cpu(d->prio);
b->gen = b->last_gc = d->gen;
}
+
+ ret = 0;
+out:
+ return ret;
}
/* Bcache device */
@@ -1275,6 +1282,9 @@ static void cached_dev_free(struct closure *cl)
mutex_unlock(&bch_register_lock);
+ if (dc->sb_disk)
+ put_page(virt_to_page(dc->sb_disk));
+
if (!IS_ERR_OR_NULL(dc->bdev))
blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
@@ -1350,7 +1360,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
/* Cached device - bcache superblock */
-static int register_bdev(struct cache_sb *sb, struct page *sb_page,
+static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
struct block_device *bdev,
struct cached_dev *dc)
{
@@ -1362,11 +1372,7 @@ static int register_bdev(struct cache_sb *sb, struct page *sb_page,
memcpy(&dc->sb, sb, sizeof(struct cache_sb));
dc->bdev = bdev;
dc->bdev->bd_holder = dc;
-
- bio_init(&dc->sb_bio, dc->sb_bio.bi_inline_vecs, 1);
- bio_first_bvec_all(&dc->sb_bio)->bv_page = sb_page;
- get_page(sb_page);
-
+ dc->sb_disk = sb_disk;
if (cached_dev_init(dc, sb->block_size << 9))
goto err;
@@ -1876,8 +1882,10 @@ static int run_cache_set(struct cache_set *c)
j = &list_entry(journal.prev, struct journal_replay, list)->j;
err = "IO error reading priorities";
- for_each_cache(ca, c, i)
- prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]);
+ for_each_cache(ca, c, i) {
+ if (prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]))
+ goto err;
+ }
/*
* If prio_read() fails it'll call cache_set_error and we'll
@@ -1909,23 +1917,6 @@ static int run_cache_set(struct cache_set *c)
if (bch_btree_check(c))
goto err;
- /*
- * bch_btree_check() may occupy too much system memory which
- * has negative effects to user space application (e.g. data
- * base) performance. Shrink the mca cache memory proactively
- * here to avoid competing memory with user space workloads..
- */
- if (!c->shrinker_disabled) {
- struct shrink_control sc;
-
- sc.gfp_mask = GFP_KERNEL;
- sc.nr_to_scan = c->btree_cache_used * c->btree_pages;
- /* first run to clear b->accessed tag */
- c->shrink.scan_objects(&c->shrink, &sc);
- /* second run to reap non-accessed nodes */
- c->shrink.scan_objects(&c->shrink, &sc);
- }
-
bch_journal_mark(c, &journal);
bch_initial_gc_finish(c);
pr_debug("btree_check() done");
@@ -2136,8 +2127,8 @@ void bch_cache_release(struct kobject *kobj)
for (i = 0; i < RESERVE_NR; i++)
free_fifo(&ca->free[i]);
- if (ca->sb_bio.bi_inline_vecs[0].bv_page)
- put_page(bio_first_page_all(&ca->sb_bio));
+ if (ca->sb_disk)
+ put_page(virt_to_page(ca->sb_disk));
if (!IS_ERR_OR_NULL(ca->bdev))
blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
@@ -2259,7 +2250,7 @@ err_free:
return ret;
}
-static int register_cache(struct cache_sb *sb, struct page *sb_page,
+static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
struct block_device *bdev, struct cache *ca)
{
const char *err = NULL; /* must be set for any error case */
@@ -2269,10 +2260,7 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
memcpy(&ca->sb, sb, sizeof(struct cache_sb));
ca->bdev = bdev;
ca->bdev->bd_holder = ca;
-
- bio_init(&ca->sb_bio, ca->sb_bio.bi_inline_vecs, 1);
- bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page;
- get_page(sb_page);
+ ca->sb_disk = sb_disk;
if (blk_queue_discard(bdev_get_queue(bdev)))
ca->discard = CACHE_DISCARD(&ca->sb);
@@ -2372,29 +2360,35 @@ static bool bch_is_open(struct block_device *bdev)
static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
const char *buffer, size_t size)
{
- ssize_t ret = -EINVAL;
- const char *err = "cannot allocate memory";
+ const char *err;
char *path = NULL;
- struct cache_sb *sb = NULL;
- struct block_device *bdev = NULL;
- struct page *sb_page = NULL;
+ struct cache_sb *sb;
+ struct cache_sb_disk *sb_disk;
+ struct block_device *bdev;
+ ssize_t ret;
+ ret = -EBUSY;
+ err = "failed to reference bcache module";
if (!try_module_get(THIS_MODULE))
- return -EBUSY;
+ goto out;
/* For latest state of bcache_is_reboot */
smp_mb();
+ err = "bcache is in reboot";
if (bcache_is_reboot)
- return -EBUSY;
+ goto out_module_put;
+ ret = -ENOMEM;
+ err = "cannot allocate memory";
path = kstrndup(buffer, size, GFP_KERNEL);
if (!path)
- goto err;
+ goto out_module_put;
sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL);
if (!sb)
- goto err;
+ goto out_free_path;
+ ret = -EINVAL;
err = "failed to open device";
bdev = blkdev_get_by_path(strim(path),
FMODE_READ|FMODE_WRITE|FMODE_EXCL,
@@ -2411,57 +2405,63 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (!IS_ERR(bdev))
bdput(bdev);
if (attr == &ksysfs_register_quiet)
- goto quiet_out;
+ goto done;
}
- goto err;
+ goto out_free_sb;
}
err = "failed to set blocksize";
if (set_blocksize(bdev, 4096))
- goto err_close;
+ goto out_blkdev_put;
- err = read_super(sb, bdev, &sb_page);
+ err = read_super(sb, bdev, &sb_disk);
if (err)
- goto err_close;
+ goto out_blkdev_put;
err = "failed to register device";
if (SB_IS_BDEV(sb)) {
struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
if (!dc)
- goto err_close;
+ goto out_put_sb_page;
mutex_lock(&bch_register_lock);
- ret = register_bdev(sb, sb_page, bdev, dc);
+ ret = register_bdev(sb, sb_disk, bdev, dc);
mutex_unlock(&bch_register_lock);
/* blkdev_put() will be called in cached_dev_free() */
if (ret < 0)
- goto err;
+ goto out_free_sb;
} else {
struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
if (!ca)
- goto err_close;
+ goto out_put_sb_page;
/* blkdev_put() will be called in bch_cache_release() */
- if (register_cache(sb, sb_page, bdev, ca) != 0)
- goto err;
+ if (register_cache(sb, sb_disk, bdev, ca) != 0)
+ goto out_free_sb;
}
-quiet_out:
- ret = size;
-out:
- if (sb_page)
- put_page(sb_page);
+
+done:
kfree(sb);
kfree(path);
module_put(THIS_MODULE);
- return ret;
+ return size;
-err_close:
- blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
-err:
- pr_info("error %s: %s", path, err);
- goto out;
+out_put_sb_page:
+ put_page(virt_to_page(sb_disk));
+out_blkdev_put:
+ blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+out_free_sb:
+ kfree(sb);
+out_free_path:
+ kfree(path);
+ path = NULL;
+out_module_put:
+ module_put(THIS_MODULE);
+out:
+ pr_info("error %s: %s", path?path:"", err);
+ return ret;
}
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 733e2ddf3c78..3470fae4eabc 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -27,6 +27,12 @@ static const char * const bch_cache_modes[] = {
NULL
};
+static const char * const bch_reada_cache_policies[] = {
+ "all",
+ "meta-only",
+ NULL
+};
+
/* Default is 0 ("auto") */
static const char * const bch_stop_on_failure_modes[] = {
"auto",
@@ -100,6 +106,7 @@ rw_attribute(congested_write_threshold_us);
rw_attribute(sequential_cutoff);
rw_attribute(data_csum);
rw_attribute(cache_mode);
+rw_attribute(readahead_cache_policy);
rw_attribute(stop_when_cache_set_failed);
rw_attribute(writeback_metadata);
rw_attribute(writeback_running);
@@ -168,6 +175,11 @@ SHOW(__bch_cached_dev)
bch_cache_modes,
BDEV_CACHE_MODE(&dc->sb));
+ if (attr == &sysfs_readahead_cache_policy)
+ return bch_snprint_string_list(buf, PAGE_SIZE,
+ bch_reada_cache_policies,
+ dc->cache_readahead_policy);
+
if (attr == &sysfs_stop_when_cache_set_failed)
return bch_snprint_string_list(buf, PAGE_SIZE,
bch_stop_on_failure_modes,
@@ -353,6 +365,15 @@ STORE(__cached_dev)
}
}
+ if (attr == &sysfs_readahead_cache_policy) {
+ v = __sysfs_match_string(bch_reada_cache_policies, -1, buf);
+ if (v < 0)
+ return v;
+
+ if ((unsigned int) v != dc->cache_readahead_policy)
+ dc->cache_readahead_policy = v;
+ }
+
if (attr == &sysfs_stop_when_cache_set_failed) {
v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf);
if (v < 0)
@@ -467,6 +488,7 @@ static struct attribute *bch_cached_dev_files[] = {
&sysfs_data_csum,
#endif
&sysfs_cache_mode,
+ &sysfs_readahead_cache_policy,
&sysfs_stop_when_cache_set_failed,
&sysfs_writeback_metadata,
&sysfs_writeback_running,
diff --git a/drivers/md/dm-bio-prison-v2.c b/drivers/md/dm-bio-prison-v2.c
index 8ee019eda32d..9dec3b61cf70 100644
--- a/drivers/md/dm-bio-prison-v2.c
+++ b/drivers/md/dm-bio-prison-v2.c
@@ -324,7 +324,7 @@ static bool __unlock(struct dm_bio_prison_v2 *prison,
bio_list_init(&cell->bios);
if (cell->shared_count) {
- cell->exclusive_lock = 0;
+ cell->exclusive_lock = false;
return false;
}
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index eb9782fc93fe..c6a529873d0f 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1,8 +1,8 @@
/*
* Copyright (C) 2003 Jana Saout <jana@saout.de>
* Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
- * Copyright (C) 2006-2017 Red Hat, Inc. All rights reserved.
- * Copyright (C) 2013-2017 Milan Broz <gmazyland@gmail.com>
+ * Copyright (C) 2006-2020 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2013-2020 Milan Broz <gmazyland@gmail.com>
*
* This file is released under the GPL.
*/
@@ -115,6 +115,11 @@ struct iv_tcw_private {
u8 *whitening;
};
+#define ELEPHANT_MAX_KEY_SIZE 32
+struct iv_elephant_private {
+ struct crypto_skcipher *tfm;
+};
+
/*
* Crypt: maps a linear range of a block device
* and encrypts / decrypts at the same time.
@@ -125,6 +130,7 @@ enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
enum cipher_flags {
CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cihper */
CRYPT_IV_LARGE_SECTORS, /* Calculate IV from sector_size, not 512B sectors */
+ CRYPT_ENCRYPT_PREPROCESS, /* Must preprocess data for encryption (elephant) */
};
/*
@@ -152,6 +158,7 @@ struct crypt_config {
struct iv_benbi_private benbi;
struct iv_lmk_private lmk;
struct iv_tcw_private tcw;
+ struct iv_elephant_private elephant;
} iv_gen_private;
u64 iv_offset;
unsigned int iv_size;
@@ -285,6 +292,11 @@ static struct crypto_aead *any_tfm_aead(struct crypt_config *cc)
* eboiv: Encrypted byte-offset IV (used in Bitlocker in CBC mode)
* The IV is encrypted little-endian byte-offset (with the same key
* and cipher as the volume).
+ *
+ * elephant: The extended version of eboiv with additional Elephant diffuser
+ * used with Bitlocker CBC mode.
+ * This mode was used in older Windows systems
+ * http://download.microsoft.com/download/0/2/3/0238acaf-d3bf-4a6d-b3d6-0a0be4bbb36e/bitlockercipher200608.pdf
*/
static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
@@ -331,8 +343,14 @@ static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
const char *opts)
{
- unsigned bs = crypto_skcipher_blocksize(any_tfm(cc));
- int log = ilog2(bs);
+ unsigned bs;
+ int log;
+
+ if (test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags))
+ bs = crypto_aead_blocksize(any_tfm_aead(cc));
+ else
+ bs = crypto_skcipher_blocksize(any_tfm(cc));
+ log = ilog2(bs);
/* we need to calculate how far we must shift the sector count
* to get the cipher block count, we use this shift in _gen */
@@ -717,7 +735,7 @@ static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
struct crypto_wait wait;
int err;
- req = skcipher_request_alloc(any_tfm(cc), GFP_KERNEL | GFP_NOFS);
+ req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO);
if (!req)
return -ENOMEM;
@@ -734,6 +752,290 @@ static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
return err;
}
+static void crypt_iv_elephant_dtr(struct crypt_config *cc)
+{
+ struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
+
+ crypto_free_skcipher(elephant->tfm);
+ elephant->tfm = NULL;
+}
+
+static int crypt_iv_elephant_ctr(struct crypt_config *cc, struct dm_target *ti,
+ const char *opts)
+{
+ struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
+ int r;
+
+ elephant->tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
+ if (IS_ERR(elephant->tfm)) {
+ r = PTR_ERR(elephant->tfm);
+ elephant->tfm = NULL;
+ return r;
+ }
+
+ r = crypt_iv_eboiv_ctr(cc, ti, NULL);
+ if (r)
+ crypt_iv_elephant_dtr(cc);
+ return r;
+}
+
+static void diffuser_disk_to_cpu(u32 *d, size_t n)
+{
+#ifndef __LITTLE_ENDIAN
+ int i;
+
+ for (i = 0; i < n; i++)
+ d[i] = le32_to_cpu((__le32)d[i]);
+#endif
+}
+
+static void diffuser_cpu_to_disk(__le32 *d, size_t n)
+{
+#ifndef __LITTLE_ENDIAN
+ int i;
+
+ for (i = 0; i < n; i++)
+ d[i] = cpu_to_le32((u32)d[i]);
+#endif
+}
+
+static void diffuser_a_decrypt(u32 *d, size_t n)
+{
+ int i, i1, i2, i3;
+
+ for (i = 0; i < 5; i++) {
+ i1 = 0;
+ i2 = n - 2;
+ i3 = n - 5;
+
+ while (i1 < (n - 1)) {
+ d[i1] += d[i2] ^ (d[i3] << 9 | d[i3] >> 23);
+ i1++; i2++; i3++;
+
+ if (i3 >= n)
+ i3 -= n;
+
+ d[i1] += d[i2] ^ d[i3];
+ i1++; i2++; i3++;
+
+ if (i2 >= n)
+ i2 -= n;
+
+ d[i1] += d[i2] ^ (d[i3] << 13 | d[i3] >> 19);
+ i1++; i2++; i3++;
+
+ d[i1] += d[i2] ^ d[i3];
+ i1++; i2++; i3++;
+ }
+ }
+}
+
+static void diffuser_a_encrypt(u32 *d, size_t n)
+{
+ int i, i1, i2, i3;
+
+ for (i = 0; i < 5; i++) {
+ i1 = n - 1;
+ i2 = n - 2 - 1;
+ i3 = n - 5 - 1;
+
+ while (i1 > 0) {
+ d[i1] -= d[i2] ^ d[i3];
+ i1--; i2--; i3--;
+
+ d[i1] -= d[i2] ^ (d[i3] << 13 | d[i3] >> 19);
+ i1--; i2--; i3--;
+
+ if (i2 < 0)
+ i2 += n;
+
+ d[i1] -= d[i2] ^ d[i3];
+ i1--; i2--; i3--;
+
+ if (i3 < 0)
+ i3 += n;
+
+ d[i1] -= d[i2] ^ (d[i3] << 9 | d[i3] >> 23);
+ i1--; i2--; i3--;
+ }
+ }
+}
+
+static void diffuser_b_decrypt(u32 *d, size_t n)
+{
+ int i, i1, i2, i3;
+
+ for (i = 0; i < 3; i++) {
+ i1 = 0;
+ i2 = 2;
+ i3 = 5;
+
+ while (i1 < (n - 1)) {
+ d[i1] += d[i2] ^ d[i3];
+ i1++; i2++; i3++;
+
+ d[i1] += d[i2] ^ (d[i3] << 10 | d[i3] >> 22);
+ i1++; i2++; i3++;
+
+ if (i2 >= n)
+ i2 -= n;
+
+ d[i1] += d[i2] ^ d[i3];
+ i1++; i2++; i3++;
+
+ if (i3 >= n)
+ i3 -= n;
+
+ d[i1] += d[i2] ^ (d[i3] << 25 | d[i3] >> 7);
+ i1++; i2++; i3++;
+ }
+ }
+}
+
+static void diffuser_b_encrypt(u32 *d, size_t n)
+{
+ int i, i1, i2, i3;
+
+ for (i = 0; i < 3; i++) {
+ i1 = n - 1;
+ i2 = 2 - 1;
+ i3 = 5 - 1;
+
+ while (i1 > 0) {
+ d[i1] -= d[i2] ^ (d[i3] << 25 | d[i3] >> 7);
+ i1--; i2--; i3--;
+
+ if (i3 < 0)
+ i3 += n;
+
+ d[i1] -= d[i2] ^ d[i3];
+ i1--; i2--; i3--;
+
+ if (i2 < 0)
+ i2 += n;
+
+ d[i1] -= d[i2] ^ (d[i3] << 10 | d[i3] >> 22);
+ i1--; i2--; i3--;
+
+ d[i1] -= d[i2] ^ d[i3];
+ i1--; i2--; i3--;
+ }
+ }
+}
+
+static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *dmreq)
+{
+ struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
+ u8 *es, *ks, *data, *data2, *data_offset;
+ struct skcipher_request *req;
+ struct scatterlist *sg, *sg2, src, dst;
+ struct crypto_wait wait;
+ int i, r;
+
+ req = skcipher_request_alloc(elephant->tfm, GFP_NOIO);
+ es = kzalloc(16, GFP_NOIO); /* Key for AES */
+ ks = kzalloc(32, GFP_NOIO); /* Elephant sector key */
+
+ if (!req || !es || !ks) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ *(__le64 *)es = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
+
+ /* E(Ks, e(s)) */
+ sg_init_one(&src, es, 16);
+ sg_init_one(&dst, ks, 16);
+ skcipher_request_set_crypt(req, &src, &dst, 16, NULL);
+ skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
+ r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
+ if (r)
+ goto out;
+
+ /* E(Ks, e'(s)) */
+ es[15] = 0x80;
+ sg_init_one(&dst, &ks[16], 16);
+ r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
+ if (r)
+ goto out;
+
+ sg = crypt_get_sg_data(cc, dmreq->sg_out);
+ data = kmap_atomic(sg_page(sg));
+ data_offset = data + sg->offset;
+
+ /* Cannot modify original bio, copy to sg_out and apply Elephant to it */
+ if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
+ sg2 = crypt_get_sg_data(cc, dmreq->sg_in);
+ data2 = kmap_atomic(sg_page(sg2));
+ memcpy(data_offset, data2 + sg2->offset, cc->sector_size);
+ kunmap_atomic(data2);
+ }
+
+ if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
+ diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32));
+ diffuser_b_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
+ diffuser_a_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
+ diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32));
+ }
+
+ for (i = 0; i < (cc->sector_size / 32); i++)
+ crypto_xor(data_offset + i * 32, ks, 32);
+
+ if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
+ diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32));
+ diffuser_a_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
+ diffuser_b_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
+ diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32));
+ }
+
+ kunmap_atomic(data);
+out:
+ kzfree(ks);
+ kzfree(es);
+ skcipher_request_free(req);
+ return r;
+}
+
+static int crypt_iv_elephant_gen(struct crypt_config *cc, u8 *iv,
+ struct dm_crypt_request *dmreq)
+{
+ int r;
+
+ if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
+ r = crypt_iv_elephant(cc, dmreq);
+ if (r)
+ return r;
+ }
+
+ return crypt_iv_eboiv_gen(cc, iv, dmreq);
+}
+
+static int crypt_iv_elephant_post(struct crypt_config *cc, u8 *iv,
+ struct dm_crypt_request *dmreq)
+{
+ if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
+ return crypt_iv_elephant(cc, dmreq);
+
+ return 0;
+}
+
+static int crypt_iv_elephant_init(struct crypt_config *cc)
+{
+ struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
+ int key_offset = cc->key_size - cc->key_extra_size;
+
+ return crypto_skcipher_setkey(elephant->tfm, &cc->key[key_offset], cc->key_extra_size);
+}
+
+static int crypt_iv_elephant_wipe(struct crypt_config *cc)
+{
+ struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
+ u8 key[ELEPHANT_MAX_KEY_SIZE];
+
+ memset(key, 0, cc->key_extra_size);
+ return crypto_skcipher_setkey(elephant->tfm, key, cc->key_extra_size);
+}
+
static const struct crypt_iv_operations crypt_iv_plain_ops = {
.generator = crypt_iv_plain_gen
};
@@ -787,6 +1089,15 @@ static struct crypt_iv_operations crypt_iv_eboiv_ops = {
.generator = crypt_iv_eboiv_gen
};
+static struct crypt_iv_operations crypt_iv_elephant_ops = {
+ .ctr = crypt_iv_elephant_ctr,
+ .dtr = crypt_iv_elephant_dtr,
+ .init = crypt_iv_elephant_init,
+ .wipe = crypt_iv_elephant_wipe,
+ .generator = crypt_iv_elephant_gen,
+ .post = crypt_iv_elephant_post
+};
+
/*
* Integrity extensions
*/
@@ -1103,6 +1414,9 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
if (r < 0)
return r;
+ /* Data can be already preprocessed in generator */
+ if (test_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags))
+ sg_in = sg_out;
/* Store generated IV in integrity metadata */
if (cc->integrity_iv_size)
memcpy(tag_iv, org_iv, cc->integrity_iv_size);
@@ -2191,7 +2505,14 @@ static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode)
cc->iv_gen_ops = &crypt_iv_null_ops;
else if (strcmp(ivmode, "eboiv") == 0)
cc->iv_gen_ops = &crypt_iv_eboiv_ops;
- else if (strcmp(ivmode, "lmk") == 0) {
+ else if (strcmp(ivmode, "elephant") == 0) {
+ cc->iv_gen_ops = &crypt_iv_elephant_ops;
+ cc->key_parts = 2;
+ cc->key_extra_size = cc->key_size / 2;
+ if (cc->key_extra_size > ELEPHANT_MAX_KEY_SIZE)
+ return -EINVAL;
+ set_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags);
+ } else if (strcmp(ivmode, "lmk") == 0) {
cc->iv_gen_ops = &crypt_iv_lmk_ops;
/*
* Version 2 and 3 is recognised according
@@ -2959,7 +3280,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 19, 0},
+ .version = {1, 20, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-dust.c b/drivers/md/dm-dust.c
index eb37584427a4..ff03b90072c5 100644
--- a/drivers/md/dm-dust.c
+++ b/drivers/md/dm-dust.c
@@ -207,16 +207,16 @@ static int dust_map_write(struct dust_device *dd, sector_t thisblock,
bool fail_read_on_bb)
{
unsigned long flags;
- int ret = DM_MAPIO_REMAPPED;
+ int r = DM_MAPIO_REMAPPED;
if (fail_read_on_bb) {
thisblock >>= dd->sect_per_block_shift;
spin_lock_irqsave(&dd->dust_lock, flags);
- ret = __dust_map_write(dd, thisblock);
+ r = __dust_map_write(dd, thisblock);
spin_unlock_irqrestore(&dd->dust_lock, flags);
}
- return ret;
+ return r;
}
static int dust_map(struct dm_target *ti, struct bio *bio)
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index e0c32793c248..2bc18c9c3abc 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -20,6 +20,7 @@
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/time.h>
+#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <scsi/scsi_dh.h>
@@ -29,6 +30,9 @@
#define DM_MSG_PREFIX "multipath"
#define DM_PG_INIT_DELAY_MSECS 2000
#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
+#define QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT 0
+
+static unsigned long queue_if_no_path_timeout_secs = QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT;
/* Path properties */
struct pgpath {
@@ -91,6 +95,8 @@ struct multipath {
struct work_struct process_queued_bios;
struct bio_list queued_bios;
+
+ struct timer_list nopath_timer; /* Timeout for queue_if_no_path */
};
/*
@@ -108,6 +114,7 @@ static void trigger_event(struct work_struct *work);
static void activate_or_offline_path(struct pgpath *pgpath);
static void activate_path_work(struct work_struct *work);
static void process_queued_bios(struct work_struct *work);
+static void queue_if_no_path_timeout_work(struct timer_list *t);
/*-----------------------------------------------
* Multipath state flags.
@@ -195,6 +202,8 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
m->ti = ti;
ti->private = m;
+
+ timer_setup(&m->nopath_timer, queue_if_no_path_timeout_work, 0);
}
return m;
@@ -718,6 +727,43 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
}
/*
+ * If the queue_if_no_path timeout fires, turn off queue_if_no_path and
+ * process any queued I/O.
+ */
+static void queue_if_no_path_timeout_work(struct timer_list *t)
+{
+ struct multipath *m = from_timer(m, t, nopath_timer);
+ struct mapped_device *md = dm_table_get_md(m->ti->table);
+
+ DMWARN("queue_if_no_path timeout on %s, failing queued IO", dm_device_name(md));
+ queue_if_no_path(m, false, false);
+}
+
+/*
+ * Enable the queue_if_no_path timeout if necessary.
+ * Called with m->lock held.
+ */
+static void enable_nopath_timeout(struct multipath *m)
+{
+ unsigned long queue_if_no_path_timeout =
+ READ_ONCE(queue_if_no_path_timeout_secs) * HZ;
+
+ lockdep_assert_held(&m->lock);
+
+ if (queue_if_no_path_timeout > 0 &&
+ atomic_read(&m->nr_valid_paths) == 0 &&
+ test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
+ mod_timer(&m->nopath_timer,
+ jiffies + queue_if_no_path_timeout);
+ }
+}
+
+static void disable_nopath_timeout(struct multipath *m)
+{
+ del_timer_sync(&m->nopath_timer);
+}
+
+/*
* An event is triggered whenever a path is taken out of use.
* Includes path failure and PG bypass.
*/
@@ -1090,6 +1136,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
struct dm_arg_set as;
unsigned pg_count = 0;
unsigned next_pg_num;
+ unsigned long flags;
as.argc = argc;
as.argv = argv;
@@ -1154,6 +1201,10 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
+ spin_lock_irqsave(&m->lock, flags);
+ enable_nopath_timeout(m);
+ spin_unlock_irqrestore(&m->lock, flags);
+
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
ti->num_write_same_bios = 1;
@@ -1208,6 +1259,7 @@ static void multipath_dtr(struct dm_target *ti)
{
struct multipath *m = ti->private;
+ disable_nopath_timeout(m);
flush_multipath_work(m);
free_multipath(m);
}
@@ -1241,6 +1293,8 @@ static int fail_path(struct pgpath *pgpath)
schedule_work(&m->trigger_event);
+ enable_nopath_timeout(m);
+
out:
spin_unlock_irqrestore(&m->lock, flags);
@@ -1291,6 +1345,9 @@ out:
process_queued_io_list(m);
}
+ if (pgpath->is_active)
+ disable_nopath_timeout(m);
+
return r;
}
@@ -1444,7 +1501,7 @@ static void pg_init_done(void *data, int errors)
break;
case SCSI_DH_RETRY:
/* Wait before retrying. */
- delay_retry = 1;
+ delay_retry = true;
/* fall through */
case SCSI_DH_IMM_RETRY:
case SCSI_DH_RES_TEMP_UNAVAIL:
@@ -1789,6 +1846,7 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv,
struct dm_dev *dev;
struct multipath *m = ti->private;
action_fn action;
+ unsigned long flags;
mutex_lock(&m->work_mutex);
@@ -1800,9 +1858,13 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv,
if (argc == 1) {
if (!strcasecmp(argv[0], "queue_if_no_path")) {
r = queue_if_no_path(m, true, false);
+ spin_lock_irqsave(&m->lock, flags);
+ enable_nopath_timeout(m);
+ spin_unlock_irqrestore(&m->lock, flags);
goto out;
} else if (!strcasecmp(argv[0], "fail_if_no_path")) {
r = queue_if_no_path(m, false, false);
+ disable_nopath_timeout(m);
goto out;
}
}
@@ -2065,6 +2127,10 @@ static void __exit dm_multipath_exit(void)
module_init(dm_multipath_init);
module_exit(dm_multipath_exit);
+module_param_named(queue_if_no_path_timeout_secs,
+ queue_if_no_path_timeout_secs, ulong, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(queue_if_no_path_timeout_secs, "No available paths queue IO timeout in seconds");
+
MODULE_DESCRIPTION(DM_NAME " multipath target");
MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index c412eaa975fc..9a18bef0a5ff 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -129,7 +129,9 @@ struct raid_dev {
CTR_FLAG_RAID10_COPIES | \
CTR_FLAG_RAID10_FORMAT | \
CTR_FLAG_DELTA_DISKS | \
- CTR_FLAG_DATA_OFFSET)
+ CTR_FLAG_DATA_OFFSET | \
+ CTR_FLAG_JOURNAL_DEV | \
+ CTR_FLAG_JOURNAL_MODE)
/* Valid options definitions per raid level... */
@@ -3001,7 +3003,6 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{ 1, 254, "Cannot understand number of raid devices parameters" }
};
- /* Must have <raid_type> */
arg = dm_shift_arg(&as);
if (!arg) {
ti->error = "No arguments";
@@ -3508,8 +3509,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
unsigned long recovery;
unsigned int raid_param_cnt = 1; /* at least 1 for chunksize */
unsigned int sz = 0;
- unsigned int rebuild_disks;
- unsigned int write_mostly_params = 0;
+ unsigned int rebuild_writemostly_count = 0;
sector_t progress, resync_max_sectors, resync_mismatches;
enum sync_state state;
struct raid_type *rt;
@@ -3593,18 +3593,20 @@ static void raid_status(struct dm_target *ti, status_type_t type,
case STATUSTYPE_TABLE:
/* Report the table line string you would use to construct this raid set */
- /* Calculate raid parameter count */
- for (i = 0; i < rs->raid_disks; i++)
- if (test_bit(WriteMostly, &rs->dev[i].rdev.flags))
- write_mostly_params += 2;
- rebuild_disks = memweight(rs->rebuild_disks, DISKS_ARRAY_ELEMS * sizeof(*rs->rebuild_disks));
- raid_param_cnt += rebuild_disks * 2 +
- write_mostly_params +
+ /*
+ * Count any rebuild or writemostly argument pairs and subtract the
+ * hweight count being added below of any rebuild and writemostly ctr flags.
+ */
+ for (i = 0; i < rs->raid_disks; i++) {
+ rebuild_writemostly_count += (test_bit(i, (void *) rs->rebuild_disks) ? 2 : 0) +
+ (test_bit(WriteMostly, &rs->dev[i].rdev.flags) ? 2 : 0);
+ }
+ rebuild_writemostly_count -= (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) ? 2 : 0) +
+ (test_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags) ? 2 : 0);
+ /* Calculate raid parameter count based on ^ rebuild/writemostly argument counts and ctr flags set. */
+ raid_param_cnt += rebuild_writemostly_count +
hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_NO_ARGS) +
- hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_ONE_ARG) * 2 +
- (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags) ? 2 : 0) +
- (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags) ? 2 : 0);
-
+ hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_ONE_ARG) * 2;
/* Emit table line */
/* This has to be in the documented order for userspace! */
DMEMIT("%s %u %u", rs->raid_type->name, raid_param_cnt, mddev->new_chunk_sectors);
@@ -3612,11 +3614,10 @@ static void raid_status(struct dm_target *ti, status_type_t type,
DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_SYNC));
if (test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC));
- if (rebuild_disks)
+ if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags))
for (i = 0; i < rs->raid_disks; i++)
- if (test_bit(rs->dev[i].rdev.raid_disk, (void *) rs->rebuild_disks))
- DMEMIT(" %s %u", dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD),
- rs->dev[i].rdev.raid_disk);
+ if (test_bit(i, (void *) rs->rebuild_disks))
+ DMEMIT(" %s %u", dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD), i);
if (test_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags))
DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP),
mddev->bitmap_info.daemon_sleep);
@@ -3626,7 +3627,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
if (test_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags))
DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE),
mddev->sync_speed_max);
- if (write_mostly_params)
+ if (test_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags))
for (i = 0; i < rs->raid_disks; i++)
if (test_bit(WriteMostly, &rs->dev[i].rdev.flags))
DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY),
@@ -4029,7 +4030,7 @@ static void raid_resume(struct dm_target *ti)
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 15, 0},
+ .version = {1, 15, 1},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 3c50c4e4da8f..963d3774c93e 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -17,7 +17,7 @@
#include <linux/dm-bufio.h>
#define DM_MSG_PREFIX "persistent snapshot"
-#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */
+#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32U /* 16KB */
#define DM_PREFETCH_CHUNKS 12
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 4fb1a40e68a0..6b11a266299f 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1061,7 +1061,7 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
DMERR("Read error in exception store: "
"shutting down merge");
down_write(&s->lock);
- s->merge_failed = 1;
+ s->merge_failed = true;
up_write(&s->lock);
}
goto shut;
@@ -1149,7 +1149,7 @@ static void merge_callback(int read_err, unsigned long write_err, void *context)
shut:
down_write(&s->lock);
- s->merge_failed = 1;
+ s->merge_failed = true;
b = __release_queued_bios_after_merge(s);
up_write(&s->lock);
error_bios(b);
@@ -1314,7 +1314,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
INIT_LIST_HEAD(&s->list);
spin_lock_init(&s->pe_lock);
s->state_bits = 0;
- s->merge_failed = 0;
+ s->merge_failed = false;
s->first_merging_chunk = 0;
s->num_merging_chunks = 0;
bio_list_init(&s->bios_queued_during_merge);
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index b88d6d701f5b..fc9947d6210c 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -28,7 +28,7 @@
*
* - A hierarchical btree, with 2 levels which effectively maps (thin
* dev id, virtual block) -> block_time. Block time is a 64-bit
- * field holding the time in the low 24 bits, and block in the top 48
+ * field holding the time in the low 24 bits, and block in the top 40
* bits.
*
* BTrees consist solely of btree_nodes, that fill a block. Some are
@@ -387,16 +387,15 @@ static int subtree_equal(void *context, const void *value1_le, const void *value
* Variant that is used for in-core only changes or code that
* shouldn't put the pool in service on its own (e.g. commit).
*/
-static inline void __pmd_write_lock(struct dm_pool_metadata *pmd)
+static inline void pmd_write_lock_in_core(struct dm_pool_metadata *pmd)
__acquires(pmd->root_lock)
{
down_write(&pmd->root_lock);
}
-#define pmd_write_lock_in_core(pmd) __pmd_write_lock((pmd))
static inline void pmd_write_lock(struct dm_pool_metadata *pmd)
{
- __pmd_write_lock(pmd);
+ pmd_write_lock_in_core(pmd);
if (unlikely(!pmd->in_service))
pmd->in_service = true;
}
@@ -811,7 +810,7 @@ static int __write_changed_details(struct dm_pool_metadata *pmd)
return r;
if (td->open_count)
- td->changed = 0;
+ td->changed = false;
else {
list_del(&td->list);
kfree(td);
@@ -831,6 +830,7 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
* We need to know if the thin_disk_superblock exceeds a 512-byte sector.
*/
BUILD_BUG_ON(sizeof(struct thin_disk_superblock) > 512);
+ BUG_ON(!rwsem_is_locked(&pmd->root_lock));
if (unlikely(!pmd->in_service))
return 0;
@@ -953,6 +953,7 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
return -EBUSY;
}
+ pmd_write_lock_in_core(pmd);
if (!dm_bm_is_read_only(pmd->bm) && !pmd->fail_io) {
r = __commit_transaction(pmd);
if (r < 0)
@@ -961,6 +962,7 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
}
if (!pmd->fail_io)
__destroy_persistent_data_objects(pmd);
+ pmd_write_unlock(pmd);
kfree(pmd);
return 0;
@@ -1106,7 +1108,7 @@ static int __set_snapshot_details(struct dm_pool_metadata *pmd,
if (r)
return r;
- td->changed = 1;
+ td->changed = true;
td->snapshotted_time = time;
snap->mapped_blocks = td->mapped_blocks;
@@ -1618,7 +1620,7 @@ static int __insert(struct dm_thin_device *td, dm_block_t block,
if (r)
return r;
- td->changed = 1;
+ td->changed = true;
if (inserted)
td->mapped_blocks++;
@@ -1649,7 +1651,7 @@ static int __remove(struct dm_thin_device *td, dm_block_t block)
return r;
td->mapped_blocks--;
- td->changed = 1;
+ td->changed = true;
return 0;
}
@@ -1703,7 +1705,7 @@ static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_
}
td->mapped_blocks -= total_count;
- td->changed = 1;
+ td->changed = true;
/*
* Reinsert the mapping tree.
@@ -1841,7 +1843,7 @@ int dm_pool_commit_metadata(struct dm_pool_metadata *pmd)
* Care is taken to not have commit be what
* triggers putting the thin-pool in-service.
*/
- __pmd_write_lock(pmd);
+ pmd_write_lock_in_core(pmd);
if (pmd->fail_io)
goto out;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 57626c27a54b..fa8d5464c1fb 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -231,6 +231,7 @@ struct pool {
struct dm_target *ti; /* Only set if a pool target is bound */
struct mapped_device *pool_md;
+ struct block_device *data_dev;
struct block_device *md_dev;
struct dm_pool_metadata *pmd;
@@ -281,6 +282,8 @@ struct pool {
struct dm_bio_prison_cell **cell_sort_array;
mempool_t mapping_pool;
+
+ struct bio flush_bio;
};
static void metadata_operation_failed(struct pool *pool, const char *op, int r);
@@ -328,7 +331,6 @@ struct pool_c {
dm_block_t low_water_blocks;
struct pool_features requested_pf; /* Features requested during table load */
struct pool_features adjusted_pf; /* Features used after adjusting for constituent devices */
- struct bio flush_bio;
};
/*
@@ -2924,6 +2926,7 @@ static void __pool_destroy(struct pool *pool)
if (pool->next_mapping)
mempool_free(pool->next_mapping, &pool->mapping_pool);
mempool_exit(&pool->mapping_pool);
+ bio_uninit(&pool->flush_bio);
dm_deferred_set_destroy(pool->shared_read_ds);
dm_deferred_set_destroy(pool->all_io_ds);
kfree(pool);
@@ -2933,6 +2936,7 @@ static struct kmem_cache *_new_mapping_cache;
static struct pool *pool_create(struct mapped_device *pool_md,
struct block_device *metadata_dev,
+ struct block_device *data_dev,
unsigned long block_size,
int read_only, char **error)
{
@@ -3003,6 +3007,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
pool->low_water_triggered = false;
pool->suspended = true;
pool->out_of_data_space = false;
+ bio_init(&pool->flush_bio, NULL, 0);
pool->shared_read_ds = dm_deferred_set_create();
if (!pool->shared_read_ds) {
@@ -3040,6 +3045,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
pool->last_commit_jiffies = jiffies;
pool->pool_md = pool_md;
pool->md_dev = metadata_dev;
+ pool->data_dev = data_dev;
__pool_table_insert(pool);
return pool;
@@ -3081,6 +3087,7 @@ static void __pool_dec(struct pool *pool)
static struct pool *__pool_find(struct mapped_device *pool_md,
struct block_device *metadata_dev,
+ struct block_device *data_dev,
unsigned long block_size, int read_only,
char **error, int *created)
{
@@ -3091,19 +3098,23 @@ static struct pool *__pool_find(struct mapped_device *pool_md,
*error = "metadata device already in use by a pool";
return ERR_PTR(-EBUSY);
}
+ if (pool->data_dev != data_dev) {
+ *error = "data device already in use by a pool";
+ return ERR_PTR(-EBUSY);
+ }
__pool_inc(pool);
} else {
pool = __pool_table_lookup(pool_md);
if (pool) {
- if (pool->md_dev != metadata_dev) {
+ if (pool->md_dev != metadata_dev || pool->data_dev != data_dev) {
*error = "different pool cannot replace a pool";
return ERR_PTR(-EINVAL);
}
__pool_inc(pool);
} else {
- pool = pool_create(pool_md, metadata_dev, block_size, read_only, error);
+ pool = pool_create(pool_md, metadata_dev, data_dev, block_size, read_only, error);
*created = 1;
}
}
@@ -3124,7 +3135,6 @@ static void pool_dtr(struct dm_target *ti)
__pool_dec(pt->pool);
dm_put_device(ti, pt->metadata_dev);
dm_put_device(ti, pt->data_dev);
- bio_uninit(&pt->flush_bio);
kfree(pt);
mutex_unlock(&dm_thin_pool_table.mutex);
@@ -3203,11 +3213,11 @@ static void metadata_low_callback(void *context)
*/
static int metadata_pre_commit_callback(void *context)
{
- struct pool_c *pt = context;
- struct bio *flush_bio = &pt->flush_bio;
+ struct pool *pool = context;
+ struct bio *flush_bio = &pool->flush_bio;
bio_reset(flush_bio);
- bio_set_dev(flush_bio, pt->data_dev->bdev);
+ bio_set_dev(flush_bio, pool->data_dev);
flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
return submit_bio_wait(flush_bio);
@@ -3356,7 +3366,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto out;
}
- pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
+ pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev, data_dev->bdev,
block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created);
if (IS_ERR(pool)) {
r = PTR_ERR(pool);
@@ -3381,7 +3391,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
pt->data_dev = data_dev;
pt->low_water_blocks = low_water_blocks;
pt->adjusted_pf = pt->requested_pf = pf;
- bio_init(&pt->flush_bio, NULL, 0);
ti->num_flush_bios = 1;
/*
@@ -3408,9 +3417,8 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (r)
goto out_flags_changed;
- dm_pool_register_pre_commit_callback(pt->pool->pmd,
- metadata_pre_commit_callback,
- pt);
+ dm_pool_register_pre_commit_callback(pool->pmd,
+ metadata_pre_commit_callback, pool);
pt->callbacks.congested_fn = pool_is_congested;
dm_table_add_target_callbacks(ti->table, &pt->callbacks);
@@ -4099,7 +4107,7 @@ static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
- .version = {1, 21, 0},
+ .version = {1, 22, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -4476,7 +4484,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type thin_target = {
.name = "thin",
- .version = {1, 21, 0},
+ .version = {1, 22, 0},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 4fb33e7562c5..0d61e9c67986 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -611,8 +611,22 @@ no_prefetch_cluster:
static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io)
{
+ sector_t block = io->block;
+ unsigned int n_blocks = io->n_blocks;
struct dm_verity_prefetch_work *pw;
+ if (v->validated_blocks) {
+ while (n_blocks && test_bit(block, v->validated_blocks)) {
+ block++;
+ n_blocks--;
+ }
+ while (n_blocks && test_bit(block + n_blocks - 1,
+ v->validated_blocks))
+ n_blocks--;
+ if (!n_blocks)
+ return;
+ }
+
pw = kmalloc(sizeof(struct dm_verity_prefetch_work),
GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
@@ -621,8 +635,8 @@ static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io)
INIT_WORK(&pw->work, verity_prefetch_io);
pw->v = v;
- pw->block = io->block;
- pw->n_blocks = io->n_blocks;
+ pw->block = block;
+ pw->n_blocks = n_blocks;
queue_work(v->verify_wq, &pw->work);
}
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 7d727a72aa13..b9e27e37a943 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -442,7 +442,13 @@ static void writecache_notify_io(unsigned long error, void *context)
complete(&endio->c);
}
-static void ssd_commit_flushed(struct dm_writecache *wc)
+static void writecache_wait_for_ios(struct dm_writecache *wc, int direction)
+{
+ wait_event(wc->bio_in_progress_wait[direction],
+ !atomic_read(&wc->bio_in_progress[direction]));
+}
+
+static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
{
struct dm_io_region region;
struct dm_io_request req;
@@ -488,17 +494,20 @@ static void ssd_commit_flushed(struct dm_writecache *wc)
writecache_notify_io(0, &endio);
wait_for_completion_io(&endio.c);
+ if (wait_for_ios)
+ writecache_wait_for_ios(wc, WRITE);
+
writecache_disk_flush(wc, wc->ssd_dev);
memset(wc->dirty_bitmap, 0, wc->dirty_bitmap_size);
}
-static void writecache_commit_flushed(struct dm_writecache *wc)
+static void writecache_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
{
if (WC_MODE_PMEM(wc))
wmb();
else
- ssd_commit_flushed(wc);
+ ssd_commit_flushed(wc, wait_for_ios);
}
static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev)
@@ -522,12 +531,6 @@ static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev)
writecache_error(wc, r, "error flushing metadata: %d", r);
}
-static void writecache_wait_for_ios(struct dm_writecache *wc, int direction)
-{
- wait_event(wc->bio_in_progress_wait[direction],
- !atomic_read(&wc->bio_in_progress[direction]));
-}
-
#define WFE_RETURN_FOLLOWING 1
#define WFE_LOWEST_SEQ 2
@@ -622,7 +625,7 @@ static void writecache_add_to_freelist(struct dm_writecache *wc, struct wc_entry
wc->freelist_size++;
}
-static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc)
+static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc, sector_t expected_sector)
{
struct wc_entry *e;
@@ -631,6 +634,8 @@ static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc)
if (unlikely(!wc->current_free))
return NULL;
e = wc->current_free;
+ if (expected_sector != (sector_t)-1 && unlikely(cache_sector(wc, e) != expected_sector))
+ return NULL;
next = rb_next(&e->rb_node);
rb_erase(&e->rb_node, &wc->freetree);
if (unlikely(!next))
@@ -640,6 +645,8 @@ static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc)
if (unlikely(list_empty(&wc->freelist)))
return NULL;
e = container_of(wc->freelist.next, struct wc_entry, lru);
+ if (expected_sector != (sector_t)-1 && unlikely(cache_sector(wc, e) != expected_sector))
+ return NULL;
list_del(&e->lru);
}
wc->freelist_size--;
@@ -724,15 +731,12 @@ static void writecache_flush(struct dm_writecache *wc)
e = e2;
cond_resched();
}
- writecache_commit_flushed(wc);
-
- if (!WC_MODE_PMEM(wc))
- writecache_wait_for_ios(wc, WRITE);
+ writecache_commit_flushed(wc, true);
wc->seq_count++;
pmem_assign(sb(wc)->seq_count, cpu_to_le64(wc->seq_count));
writecache_flush_region(wc, &sb(wc)->seq_count, sizeof sb(wc)->seq_count);
- writecache_commit_flushed(wc);
+ writecache_commit_flushed(wc, false);
wc->overwrote_committed = false;
@@ -756,7 +760,7 @@ static void writecache_flush(struct dm_writecache *wc)
}
if (need_flush_after_free)
- writecache_commit_flushed(wc);
+ writecache_commit_flushed(wc, false);
}
static void writecache_flush_work(struct work_struct *work)
@@ -809,7 +813,7 @@ static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_
}
if (discarded_something)
- writecache_commit_flushed(wc);
+ writecache_commit_flushed(wc, false);
}
static bool writecache_wait_for_writeback(struct dm_writecache *wc)
@@ -958,7 +962,7 @@ erase_this:
if (need_flush) {
writecache_flush_all_metadata(wc);
- writecache_commit_flushed(wc);
+ writecache_commit_flushed(wc, false);
}
wc_unlock(wc);
@@ -1193,7 +1197,7 @@ read_next_block:
goto bio_copy;
}
}
- e = writecache_pop_from_freelist(wc);
+ e = writecache_pop_from_freelist(wc, (sector_t)-1);
if (unlikely(!e)) {
writecache_wait_on_freelist(wc);
continue;
@@ -1205,9 +1209,26 @@ bio_copy:
if (WC_MODE_PMEM(wc)) {
bio_copy_block(wc, bio, memory_data(wc, e));
} else {
- dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT);
+ unsigned bio_size = wc->block_size;
+ sector_t start_cache_sec = cache_sector(wc, e);
+ sector_t current_cache_sec = start_cache_sec + (bio_size >> SECTOR_SHIFT);
+
+ while (bio_size < bio->bi_iter.bi_size) {
+ struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec);
+ if (!f)
+ break;
+ write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector +
+ (bio_size >> SECTOR_SHIFT), wc->seq_count);
+ writecache_insert_entry(wc, f);
+ wc->uncommitted_blocks++;
+ bio_size += wc->block_size;
+ current_cache_sec += wc->block_size >> SECTOR_SHIFT;
+ }
+
bio_set_dev(bio, wc->ssd_dev->bdev);
- bio->bi_iter.bi_sector = cache_sector(wc, e);
+ bio->bi_iter.bi_sector = start_cache_sec;
+ dm_accept_partial_bio(bio, bio_size >> SECTOR_SHIFT);
+
if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks)) {
wc->uncommitted_blocks = 0;
queue_work(wc->writeback_wq, &wc->flush_work);
@@ -1342,7 +1363,7 @@ static void __writecache_endio_pmem(struct dm_writecache *wc, struct list_head *
wc->writeback_size--;
n_walked++;
if (unlikely(n_walked >= ENDIO_LATENCY)) {
- writecache_commit_flushed(wc);
+ writecache_commit_flushed(wc, false);
wc_unlock(wc);
wc_lock(wc);
n_walked = 0;
@@ -1423,7 +1444,7 @@ pop_from_list:
writecache_wait_for_ios(wc, READ);
}
- writecache_commit_flushed(wc);
+ writecache_commit_flushed(wc, false);
wc_unlock(wc);
}
@@ -1766,10 +1787,10 @@ static int init_memory(struct dm_writecache *wc)
write_original_sector_seq_count(wc, &wc->entries[b], -1, -1);
writecache_flush_all_metadata(wc);
- writecache_commit_flushed(wc);
+ writecache_commit_flushed(wc, false);
pmem_assign(sb(wc)->magic, cpu_to_le32(MEMORY_SUPERBLOCK_MAGIC));
writecache_flush_region(wc, &sb(wc)->magic, sizeof sb(wc)->magic);
- writecache_commit_flushed(wc);
+ writecache_commit_flushed(wc, false);
return 0;
}
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 22b3cb0050a7..516c7b671d25 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -134,6 +134,7 @@ struct dmz_metadata {
sector_t zone_bitmap_size;
unsigned int zone_nr_bitmap_blocks;
+ unsigned int zone_bits_per_mblk;
unsigned int nr_bitmap_blocks;
unsigned int nr_map_blocks;
@@ -1161,7 +1162,10 @@ static int dmz_init_zones(struct dmz_metadata *zmd)
/* Init */
zmd->zone_bitmap_size = dev->zone_nr_blocks >> 3;
- zmd->zone_nr_bitmap_blocks = zmd->zone_bitmap_size >> DMZ_BLOCK_SHIFT;
+ zmd->zone_nr_bitmap_blocks =
+ max_t(sector_t, 1, zmd->zone_bitmap_size >> DMZ_BLOCK_SHIFT);
+ zmd->zone_bits_per_mblk = min_t(sector_t, dev->zone_nr_blocks,
+ DMZ_BLOCK_SIZE_BITS);
/* Allocate zone array */
zmd->zones = kcalloc(dev->nr_zones, sizeof(struct dm_zone), GFP_KERNEL);
@@ -1956,7 +1960,7 @@ int dmz_copy_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
dmz_release_mblock(zmd, to_mblk);
dmz_release_mblock(zmd, from_mblk);
- chunk_block += DMZ_BLOCK_SIZE_BITS;
+ chunk_block += zmd->zone_bits_per_mblk;
}
to_zone->weight = from_zone->weight;
@@ -2017,7 +2021,7 @@ int dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
/* Set bits */
bit = chunk_block & DMZ_BLOCK_MASK_BITS;
- nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit);
+ nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit);
count = dmz_set_bits((unsigned long *)mblk->data, bit, nr_bits);
if (count) {
@@ -2096,7 +2100,7 @@ int dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
/* Clear bits */
bit = chunk_block & DMZ_BLOCK_MASK_BITS;
- nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit);
+ nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit);
count = dmz_clear_bits((unsigned long *)mblk->data,
bit, nr_bits);
@@ -2156,6 +2160,7 @@ static int dmz_to_next_set_block(struct dmz_metadata *zmd, struct dm_zone *zone,
{
struct dmz_mblock *mblk;
unsigned int bit, set_bit, nr_bits;
+ unsigned int zone_bits = zmd->zone_bits_per_mblk;
unsigned long *bitmap;
int n = 0;
@@ -2170,15 +2175,15 @@ static int dmz_to_next_set_block(struct dmz_metadata *zmd, struct dm_zone *zone,
/* Get offset */
bitmap = (unsigned long *) mblk->data;
bit = chunk_block & DMZ_BLOCK_MASK_BITS;
- nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit);
+ nr_bits = min(nr_blocks, zone_bits - bit);
if (set)
- set_bit = find_next_bit(bitmap, DMZ_BLOCK_SIZE_BITS, bit);
+ set_bit = find_next_bit(bitmap, zone_bits, bit);
else
- set_bit = find_next_zero_bit(bitmap, DMZ_BLOCK_SIZE_BITS, bit);
+ set_bit = find_next_zero_bit(bitmap, zone_bits, bit);
dmz_release_mblock(zmd, mblk);
n += set_bit - bit;
- if (set_bit < DMZ_BLOCK_SIZE_BITS)
+ if (set_bit < zone_bits)
break;
nr_blocks -= nr_bits;
@@ -2281,7 +2286,7 @@ static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone)
/* Count bits in this block */
bitmap = mblk->data;
bit = chunk_block & DMZ_BLOCK_MASK_BITS;
- nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit);
+ nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit);
n += dmz_count_bits(bitmap, bit, nr_bits);
dmz_release_mblock(zmd, mblk);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index e8f9661a10a1..b89f07ee2eff 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1859,6 +1859,7 @@ static void dm_init_normal_md_queue(struct mapped_device *md)
/*
* Initialize aspects of queue that aren't relevant for blk-mq
*/
+ md->queue->backing_dev_info->congested_data = md;
md->queue->backing_dev_info->congested_fn = dm_any_congested;
}
@@ -1949,7 +1950,12 @@ static struct mapped_device *alloc_dev(int minor)
if (!md->queue)
goto bad;
md->queue->queuedata = md;
- md->queue->backing_dev_info->congested_data = md;
+ /*
+ * default to bio-based required ->make_request_fn until DM
+ * table is loaded and md->type established. If request-based
+ * table is loaded: blk-mq will override accordingly.
+ */
+ blk_queue_make_request(md->queue, dm_make_request);
md->disk = alloc_disk_node(1, md->numa_node_id);
if (!md->disk)
@@ -2264,7 +2270,6 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
case DM_TYPE_DAX_BIO_BASED:
case DM_TYPE_NVME_BIO_BASED:
dm_init_normal_md_queue(md);
- blk_queue_make_request(md->queue, dm_make_request);
break;
case DM_TYPE_NONE:
WARN_ON_ONCE(true);
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 3ad18246fcb3..b952bd45bd6a 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -364,7 +364,7 @@ static int read_page(struct file *file, unsigned long index,
int ret = 0;
struct inode *inode = file_inode(file);
struct buffer_head *bh;
- sector_t block;
+ sector_t block, blk_cur;
pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
(unsigned long long)index << PAGE_SHIFT);
@@ -375,17 +375,21 @@ static int read_page(struct file *file, unsigned long index,
goto out;
}
attach_page_buffers(page, bh);
- block = index << (PAGE_SHIFT - inode->i_blkbits);
+ blk_cur = index << (PAGE_SHIFT - inode->i_blkbits);
while (bh) {
+ block = blk_cur;
+
if (count == 0)
bh->b_blocknr = 0;
else {
- bh->b_blocknr = bmap(inode, block);
- if (bh->b_blocknr == 0) {
- /* Cannot use this file! */
+ ret = bmap(inode, &block);
+ if (ret || !block) {
ret = -EINVAL;
+ bh->b_blocknr = 0;
goto out;
}
+
+ bh->b_blocknr = block;
bh->b_bdev = inode->i_sb->s_bdev;
if (count < (1<<inode->i_blkbits))
count = 0;
@@ -399,7 +403,7 @@ static int read_page(struct file *file, unsigned long index,
set_buffer_mapped(bh);
submit_bh(REQ_OP_READ, 0, bh);
}
- block++;
+ blk_cur++;
bh = bh->b_this_page;
}
page->index = index;
@@ -1019,8 +1023,6 @@ void md_bitmap_unplug(struct bitmap *bitmap)
/* look at each page to see if there are any set bits that need to be
* flushed out to disk */
for (i = 0; i < bitmap->storage.file_pages; i++) {
- if (!bitmap->storage.filemap)
- return;
dirty = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
need_write = test_and_clear_page_attr(bitmap, i,
BITMAP_PAGE_NEEDWRITE);
@@ -1338,7 +1340,8 @@ void md_bitmap_daemon_work(struct mddev *mddev)
BITMAP_PAGE_DIRTY))
/* bitmap_unplug will handle the rest */
break;
- if (test_and_clear_page_attr(bitmap, j,
+ if (bitmap->storage.filemap &&
+ test_and_clear_page_attr(bitmap, j,
BITMAP_PAGE_NEEDWRITE)) {
write_page(bitmap, bitmap->storage.filemap[j], 0);
}
@@ -1790,8 +1793,8 @@ void md_bitmap_destroy(struct mddev *mddev)
return;
md_bitmap_wait_behind_writes(mddev);
- mempool_destroy(mddev->wb_info_pool);
- mddev->wb_info_pool = NULL;
+ if (!mddev->serialize_policy)
+ mddev_destroy_serial_pool(mddev, NULL, true);
mutex_lock(&mddev->bitmap_info.mutex);
spin_lock(&mddev->lock);
@@ -1908,7 +1911,7 @@ int md_bitmap_load(struct mddev *mddev)
goto out;
rdev_for_each(rdev, mddev)
- mddev_create_wb_pool(mddev, rdev, true);
+ mddev_create_serial_pool(mddev, rdev, true);
if (mddev_is_clustered(mddev))
md_cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes);
@@ -2475,16 +2478,16 @@ backlog_store(struct mddev *mddev, const char *buf, size_t len)
if (backlog > COUNTER_MAX)
return -EINVAL;
mddev->bitmap_info.max_write_behind = backlog;
- if (!backlog && mddev->wb_info_pool) {
- /* wb_info_pool is not needed if backlog is zero */
- mempool_destroy(mddev->wb_info_pool);
- mddev->wb_info_pool = NULL;
- } else if (backlog && !mddev->wb_info_pool) {
- /* wb_info_pool is needed since backlog is not zero */
+ if (!backlog && mddev->serial_info_pool) {
+ /* serial_info_pool is not needed if backlog is zero */
+ if (!mddev->serialize_policy)
+ mddev_destroy_serial_pool(mddev, NULL, false);
+ } else if (backlog && !mddev->serial_info_pool) {
+ /* serial_info_pool is needed since backlog is not zero */
struct md_rdev *rdev;
rdev_for_each(rdev, mddev)
- mddev_create_wb_pool(mddev, rdev, false);
+ mddev_create_serial_pool(mddev, rdev, false);
}
if (old_mwb != backlog)
md_bitmap_update_sb(mddev->bitmap);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4e7c9f398bc6..469f551863be 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -125,74 +125,165 @@ static inline int speed_max(struct mddev *mddev)
mddev->sync_speed_max : sysctl_speed_limit_max;
}
-static int rdev_init_wb(struct md_rdev *rdev)
+static void rdev_uninit_serial(struct md_rdev *rdev)
{
- if (rdev->bdev->bd_queue->nr_hw_queues == 1)
+ if (!test_and_clear_bit(CollisionCheck, &rdev->flags))
+ return;
+
+ kvfree(rdev->serial);
+ rdev->serial = NULL;
+}
+
+static void rdevs_uninit_serial(struct mddev *mddev)
+{
+ struct md_rdev *rdev;
+
+ rdev_for_each(rdev, mddev)
+ rdev_uninit_serial(rdev);
+}
+
+static int rdev_init_serial(struct md_rdev *rdev)
+{
+ /* serial_nums equals with BARRIER_BUCKETS_NR */
+ int i, serial_nums = 1 << ((PAGE_SHIFT - ilog2(sizeof(atomic_t))));
+ struct serial_in_rdev *serial = NULL;
+
+ if (test_bit(CollisionCheck, &rdev->flags))
return 0;
- spin_lock_init(&rdev->wb_list_lock);
- INIT_LIST_HEAD(&rdev->wb_list);
- init_waitqueue_head(&rdev->wb_io_wait);
- set_bit(WBCollisionCheck, &rdev->flags);
+ serial = kvmalloc(sizeof(struct serial_in_rdev) * serial_nums,
+ GFP_KERNEL);
+ if (!serial)
+ return -ENOMEM;
- return 1;
+ for (i = 0; i < serial_nums; i++) {
+ struct serial_in_rdev *serial_tmp = &serial[i];
+
+ spin_lock_init(&serial_tmp->serial_lock);
+ serial_tmp->serial_rb = RB_ROOT_CACHED;
+ init_waitqueue_head(&serial_tmp->serial_io_wait);
+ }
+
+ rdev->serial = serial;
+ set_bit(CollisionCheck, &rdev->flags);
+
+ return 0;
+}
+
+static int rdevs_init_serial(struct mddev *mddev)
+{
+ struct md_rdev *rdev;
+ int ret = 0;
+
+ rdev_for_each(rdev, mddev) {
+ ret = rdev_init_serial(rdev);
+ if (ret)
+ break;
+ }
+
+ /* Free all resources if pool is not existed */
+ if (ret && !mddev->serial_info_pool)
+ rdevs_uninit_serial(mddev);
+
+ return ret;
}
/*
- * Create wb_info_pool if rdev is the first multi-queue device flaged
- * with writemostly, also write-behind mode is enabled.
+ * rdev needs to enable serial stuffs if it meets the conditions:
+ * 1. it is multi-queue device flaged with writemostly.
+ * 2. the write-behind mode is enabled.
*/
-void mddev_create_wb_pool(struct mddev *mddev, struct md_rdev *rdev,
- bool is_suspend)
+static int rdev_need_serial(struct md_rdev *rdev)
{
- if (mddev->bitmap_info.max_write_behind == 0)
- return;
+ return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 &&
+ rdev->bdev->bd_queue->nr_hw_queues != 1 &&
+ test_bit(WriteMostly, &rdev->flags));
+}
+
+/*
+ * Init resource for rdev(s), then create serial_info_pool if:
+ * 1. rdev is the first device which return true from rdev_enable_serial.
+ * 2. rdev is NULL, means we want to enable serialization for all rdevs.
+ */
+void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
+ bool is_suspend)
+{
+ int ret = 0;
- if (!test_bit(WriteMostly, &rdev->flags) || !rdev_init_wb(rdev))
+ if (rdev && !rdev_need_serial(rdev) &&
+ !test_bit(CollisionCheck, &rdev->flags))
return;
- if (mddev->wb_info_pool == NULL) {
+ if (!is_suspend)
+ mddev_suspend(mddev);
+
+ if (!rdev)
+ ret = rdevs_init_serial(mddev);
+ else
+ ret = rdev_init_serial(rdev);
+ if (ret)
+ goto abort;
+
+ if (mddev->serial_info_pool == NULL) {
unsigned int noio_flag;
- if (!is_suspend)
- mddev_suspend(mddev);
noio_flag = memalloc_noio_save();
- mddev->wb_info_pool = mempool_create_kmalloc_pool(NR_WB_INFOS,
- sizeof(struct wb_info));
+ mddev->serial_info_pool =
+ mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
+ sizeof(struct serial_info));
memalloc_noio_restore(noio_flag);
- if (!mddev->wb_info_pool)
- pr_err("can't alloc memory pool for writemostly\n");
- if (!is_suspend)
- mddev_resume(mddev);
+ if (!mddev->serial_info_pool) {
+ rdevs_uninit_serial(mddev);
+ pr_err("can't alloc memory pool for serialization\n");
+ }
}
+
+abort:
+ if (!is_suspend)
+ mddev_resume(mddev);
}
-EXPORT_SYMBOL_GPL(mddev_create_wb_pool);
/*
- * destroy wb_info_pool if rdev is the last device flaged with WBCollisionCheck.
+ * Free resource from rdev(s), and destroy serial_info_pool under conditions:
+ * 1. rdev is the last device flaged with CollisionCheck.
+ * 2. when bitmap is destroyed while policy is not enabled.
+ * 3. for disable policy, the pool is destroyed only when no rdev needs it.
*/
-static void mddev_destroy_wb_pool(struct mddev *mddev, struct md_rdev *rdev)
+void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
+ bool is_suspend)
{
- if (!test_and_clear_bit(WBCollisionCheck, &rdev->flags))
+ if (rdev && !test_bit(CollisionCheck, &rdev->flags))
return;
- if (mddev->wb_info_pool) {
+ if (mddev->serial_info_pool) {
struct md_rdev *temp;
- int num = 0;
+ int num = 0; /* used to track if other rdevs need the pool */
- /*
- * Check if other rdevs need wb_info_pool.
- */
- rdev_for_each(temp, mddev)
- if (temp != rdev &&
- test_bit(WBCollisionCheck, &temp->flags))
+ if (!is_suspend)
+ mddev_suspend(mddev);
+ rdev_for_each(temp, mddev) {
+ if (!rdev) {
+ if (!mddev->serialize_policy ||
+ !rdev_need_serial(temp))
+ rdev_uninit_serial(temp);
+ else
+ num++;
+ } else if (temp != rdev &&
+ test_bit(CollisionCheck, &temp->flags))
num++;
- if (!num) {
- mddev_suspend(rdev->mddev);
- mempool_destroy(mddev->wb_info_pool);
- mddev->wb_info_pool = NULL;
- mddev_resume(rdev->mddev);
}
+
+ if (rdev)
+ rdev_uninit_serial(rdev);
+
+ if (num)
+ pr_info("The mempool could be used by other devices\n");
+ else {
+ mempool_destroy(mddev->serial_info_pool);
+ mddev->serial_info_pool = NULL;
+ }
+ if (!is_suspend)
+ mddev_resume(mddev);
}
}
@@ -2337,7 +2428,7 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
pr_debug("md: bind<%s>\n", b);
if (mddev->raid_disks)
- mddev_create_wb_pool(mddev, rdev, false);
+ mddev_create_serial_pool(mddev, rdev, false);
if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
goto fail;
@@ -2375,7 +2466,7 @@ static void unbind_rdev_from_array(struct md_rdev *rdev)
bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
list_del_rcu(&rdev->same_set);
pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b));
- mddev_destroy_wb_pool(rdev->mddev, rdev);
+ mddev_destroy_serial_pool(rdev->mddev, rdev, false);
rdev->mddev = NULL;
sysfs_remove_link(&rdev->kobj, "block");
sysfs_put(rdev->sysfs_state);
@@ -2888,10 +2979,10 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
}
} else if (cmd_match(buf, "writemostly")) {
set_bit(WriteMostly, &rdev->flags);
- mddev_create_wb_pool(rdev->mddev, rdev, false);
+ mddev_create_serial_pool(rdev->mddev, rdev, false);
err = 0;
} else if (cmd_match(buf, "-writemostly")) {
- mddev_destroy_wb_pool(rdev->mddev, rdev);
+ mddev_destroy_serial_pool(rdev->mddev, rdev, false);
clear_bit(WriteMostly, &rdev->flags);
err = 0;
} else if (cmd_match(buf, "blocked")) {
@@ -5277,6 +5368,57 @@ static struct md_sysfs_entry md_fail_last_dev =
__ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show,
fail_last_dev_store);
+static ssize_t serialize_policy_show(struct mddev *mddev, char *page)
+{
+ if (mddev->pers == NULL || (mddev->pers->level != 1))
+ return sprintf(page, "n/a\n");
+ else
+ return sprintf(page, "%d\n", mddev->serialize_policy);
+}
+
+/*
+ * Setting serialize_policy to true to enforce write IO is not reordered
+ * for raid1.
+ */
+static ssize_t
+serialize_policy_store(struct mddev *mddev, const char *buf, size_t len)
+{
+ int err;
+ bool value;
+
+ err = kstrtobool(buf, &value);
+ if (err)
+ return err;
+
+ if (value == mddev->serialize_policy)
+ return len;
+
+ err = mddev_lock(mddev);
+ if (err)
+ return err;
+ if (mddev->pers == NULL || (mddev->pers->level != 1)) {
+ pr_err("md: serialize_policy is only effective for raid1\n");
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ mddev_suspend(mddev);
+ if (value)
+ mddev_create_serial_pool(mddev, NULL, true);
+ else
+ mddev_destroy_serial_pool(mddev, NULL, true);
+ mddev->serialize_policy = value;
+ mddev_resume(mddev);
+unlock:
+ mddev_unlock(mddev);
+ return err ?: len;
+}
+
+static struct md_sysfs_entry md_serialize_policy =
+__ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show,
+ serialize_policy_store);
+
+
static struct attribute *md_default_attrs[] = {
&md_level.attr,
&md_layout.attr,
@@ -5294,6 +5436,7 @@ static struct attribute *md_default_attrs[] = {
&max_corr_read_errors.attr,
&md_consistency_policy.attr,
&md_fail_last_dev.attr,
+ &md_serialize_policy.attr,
NULL,
};
@@ -5769,18 +5912,18 @@ int md_run(struct mddev *mddev)
goto bitmap_abort;
if (mddev->bitmap_info.max_write_behind > 0) {
- bool creat_pool = false;
+ bool create_pool = false;
rdev_for_each(rdev, mddev) {
if (test_bit(WriteMostly, &rdev->flags) &&
- rdev_init_wb(rdev))
- creat_pool = true;
- }
- if (creat_pool && mddev->wb_info_pool == NULL) {
- mddev->wb_info_pool =
- mempool_create_kmalloc_pool(NR_WB_INFOS,
- sizeof(struct wb_info));
- if (!mddev->wb_info_pool) {
+ rdev_init_serial(rdev))
+ create_pool = true;
+ }
+ if (create_pool && mddev->serial_info_pool == NULL) {
+ mddev->serial_info_pool =
+ mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
+ sizeof(struct serial_info));
+ if (!mddev->serial_info_pool) {
err = -ENOMEM;
goto bitmap_abort;
}
@@ -6025,8 +6168,9 @@ static void __md_stop_writes(struct mddev *mddev)
mddev->in_sync = 1;
md_update_sb(mddev, 1);
}
- mempool_destroy(mddev->wb_info_pool);
- mddev->wb_info_pool = NULL;
+ /* disable policy to guarantee rdevs free resources for serialization */
+ mddev->serialize_policy = 0;
+ mddev_destroy_serial_pool(mddev, NULL, true);
}
void md_stop_writes(struct mddev *mddev)
@@ -8135,13 +8279,12 @@ static __poll_t mdstat_poll(struct file *filp, poll_table *wait)
return mask;
}
-static const struct file_operations md_seq_fops = {
- .owner = THIS_MODULE,
- .open = md_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
- .poll = mdstat_poll,
+static const struct proc_ops mdstat_proc_ops = {
+ .proc_open = md_seq_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = seq_release,
+ .proc_poll = mdstat_poll,
};
int register_md_personality(struct md_personality *p)
@@ -9310,7 +9453,7 @@ static void md_geninit(void)
{
pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
- proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
+ proc_create("mdstat", S_IRUGO, NULL, &mdstat_proc_ops);
}
static int __init md_init(void)
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 5f86f8adb0a4..acd681939112 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -32,6 +32,16 @@
* be retried.
*/
#define MD_FAILFAST (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT)
+
+/*
+ * The struct embedded in rdev is used to serialize IO.
+ */
+struct serial_in_rdev {
+ struct rb_root_cached serial_rb;
+ spinlock_t serial_lock;
+ wait_queue_head_t serial_io_wait;
+};
+
/*
* MD's 'extended' device
*/
@@ -110,12 +120,7 @@ struct md_rdev {
* in superblock.
*/
- /*
- * The members for check collision of write behind IOs.
- */
- struct list_head wb_list;
- spinlock_t wb_list_lock;
- wait_queue_head_t wb_io_wait;
+ struct serial_in_rdev *serial; /* used for raid1 io serialization */
struct work_struct del_work; /* used for delayed sysfs removal */
@@ -201,9 +206,9 @@ enum flag_bits {
* it didn't fail, so don't use FailFast
* any more for metadata
*/
- WBCollisionCheck, /*
- * multiqueue device should check if there
- * is collision between write behind bios.
+ CollisionCheck, /*
+ * check if there is collision between raid1
+ * serial bios.
*/
};
@@ -263,12 +268,13 @@ enum mddev_sb_flags {
MD_SB_NEED_REWRITE, /* metadata write needs to be repeated */
};
-#define NR_WB_INFOS 8
-/* record current range of write behind IOs */
-struct wb_info {
- sector_t lo;
- sector_t hi;
- struct list_head list;
+#define NR_SERIAL_INFOS 8
+/* record current range of serialize IOs */
+struct serial_info {
+ struct rb_node node;
+ sector_t start; /* start sector of rb node */
+ sector_t last; /* end sector of rb node */
+ sector_t _subtree_last; /* highest sector in subtree of rb node */
};
struct mddev {
@@ -487,13 +493,14 @@ struct mddev {
*/
struct work_struct flush_work;
struct work_struct event_work; /* used by dm to report failure event */
- mempool_t *wb_info_pool;
+ mempool_t *serial_info_pool;
void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
struct md_cluster_info *cluster_info;
unsigned int good_device_nr; /* good device num within cluster raid */
bool has_superblocks:1;
bool fail_last_dev:1;
+ bool serialize_policy:1;
};
enum recovery_flags {
@@ -737,8 +744,10 @@ extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
extern void md_reload_sb(struct mddev *mddev, int raid_disk);
extern void md_update_sb(struct mddev *mddev, int force);
extern void md_kick_rdev_from_array(struct md_rdev * rdev);
-extern void mddev_create_wb_pool(struct mddev *mddev, struct md_rdev *rdev,
- bool is_suspend);
+extern void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
+ bool is_suspend);
+extern void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
+ bool is_suspend);
struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev);
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index bd68f6fef694..d8b4125e338c 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -380,6 +380,33 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
return -ENOSPC;
}
+int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll,
+ dm_block_t begin, dm_block_t end, dm_block_t *b)
+{
+ int r;
+ uint32_t count;
+
+ do {
+ r = sm_ll_find_free_block(new_ll, begin, new_ll->nr_blocks, b);
+ if (r)
+ break;
+
+ /* double check this block wasn't used in the old transaction */
+ if (*b >= old_ll->nr_blocks)
+ count = 0;
+ else {
+ r = sm_ll_lookup(old_ll, *b, &count);
+ if (r)
+ break;
+
+ if (count)
+ begin = *b + 1;
+ }
+ } while (count);
+
+ return r;
+}
+
static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
int (*mutator)(void *context, uint32_t old, uint32_t *new),
void *context, enum allocation_event *ev)
diff --git a/drivers/md/persistent-data/dm-space-map-common.h b/drivers/md/persistent-data/dm-space-map-common.h
index b3078d5eda0c..8de63ce39bdd 100644
--- a/drivers/md/persistent-data/dm-space-map-common.h
+++ b/drivers/md/persistent-data/dm-space-map-common.h
@@ -109,6 +109,8 @@ int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result);
int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result);
int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
dm_block_t end, dm_block_t *result);
+int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll,
+ dm_block_t begin, dm_block_t end, dm_block_t *result);
int sm_ll_insert(struct ll_disk *ll, dm_block_t b, uint32_t ref_count, enum allocation_event *ev);
int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev);
int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev);
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
index 32adf6b4a9c7..bf4c5e2ccb6f 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.c
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
@@ -167,8 +167,10 @@ static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
enum allocation_event ev;
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
- /* FIXME: we should loop round a couple of times */
- r = sm_ll_find_free_block(&smd->old_ll, smd->begin, smd->old_ll.nr_blocks, b);
+ /*
+ * Any block we allocate has to be free in both the old and current ll.
+ */
+ r = sm_ll_find_common_free_block(&smd->old_ll, &smd->ll, smd->begin, smd->ll.nr_blocks, b);
if (r)
return r;
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 25328582cc48..9e3c64ec2026 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -448,7 +448,10 @@ static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b)
enum allocation_event ev;
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
- r = sm_ll_find_free_block(&smm->old_ll, smm->begin, smm->old_ll.nr_blocks, b);
+ /*
+ * Any block we allocate has to be free in both the old and current ll.
+ */
+ r = sm_ll_find_common_free_block(&smm->old_ll, &smm->ll, smm->begin, smm->ll.nr_blocks, b);
if (r)
return r;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index b7c20979bd19..322386ff5d22 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -87,7 +87,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
char b[BDEVNAME_SIZE];
char b2[BDEVNAME_SIZE];
struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
- unsigned short blksize = 512;
+ unsigned blksize = 512;
*private_conf = ERR_PTR(-ENOMEM);
if (!conf)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 201fd8aec59a..cd810e195086 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -29,6 +29,7 @@
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/ratelimit.h>
+#include <linux/interval_tree_generic.h>
#include <trace/events/block.h>
@@ -50,55 +51,71 @@ static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
#include "raid1-10.c"
-static int check_and_add_wb(struct md_rdev *rdev, sector_t lo, sector_t hi)
+#define START(node) ((node)->start)
+#define LAST(node) ((node)->last)
+INTERVAL_TREE_DEFINE(struct serial_info, node, sector_t, _subtree_last,
+ START, LAST, static inline, raid1_rb);
+
+static int check_and_add_serial(struct md_rdev *rdev, struct r1bio *r1_bio,
+ struct serial_info *si, int idx)
{
- struct wb_info *wi, *temp_wi;
unsigned long flags;
int ret = 0;
- struct mddev *mddev = rdev->mddev;
-
- wi = mempool_alloc(mddev->wb_info_pool, GFP_NOIO);
-
- spin_lock_irqsave(&rdev->wb_list_lock, flags);
- list_for_each_entry(temp_wi, &rdev->wb_list, list) {
- /* collision happened */
- if (hi > temp_wi->lo && lo < temp_wi->hi) {
- ret = -EBUSY;
- break;
- }
+ sector_t lo = r1_bio->sector;
+ sector_t hi = lo + r1_bio->sectors;
+ struct serial_in_rdev *serial = &rdev->serial[idx];
+
+ spin_lock_irqsave(&serial->serial_lock, flags);
+ /* collision happened */
+ if (raid1_rb_iter_first(&serial->serial_rb, lo, hi))
+ ret = -EBUSY;
+ else {
+ si->start = lo;
+ si->last = hi;
+ raid1_rb_insert(si, &serial->serial_rb);
}
-
- if (!ret) {
- wi->lo = lo;
- wi->hi = hi;
- list_add(&wi->list, &rdev->wb_list);
- } else
- mempool_free(wi, mddev->wb_info_pool);
- spin_unlock_irqrestore(&rdev->wb_list_lock, flags);
+ spin_unlock_irqrestore(&serial->serial_lock, flags);
return ret;
}
-static void remove_wb(struct md_rdev *rdev, sector_t lo, sector_t hi)
+static void wait_for_serialization(struct md_rdev *rdev, struct r1bio *r1_bio)
+{
+ struct mddev *mddev = rdev->mddev;
+ struct serial_info *si;
+ int idx = sector_to_idx(r1_bio->sector);
+ struct serial_in_rdev *serial = &rdev->serial[idx];
+
+ if (WARN_ON(!mddev->serial_info_pool))
+ return;
+ si = mempool_alloc(mddev->serial_info_pool, GFP_NOIO);
+ wait_event(serial->serial_io_wait,
+ check_and_add_serial(rdev, r1_bio, si, idx) == 0);
+}
+
+static void remove_serial(struct md_rdev *rdev, sector_t lo, sector_t hi)
{
- struct wb_info *wi;
+ struct serial_info *si;
unsigned long flags;
int found = 0;
struct mddev *mddev = rdev->mddev;
-
- spin_lock_irqsave(&rdev->wb_list_lock, flags);
- list_for_each_entry(wi, &rdev->wb_list, list)
- if (hi == wi->hi && lo == wi->lo) {
- list_del(&wi->list);
- mempool_free(wi, mddev->wb_info_pool);
+ int idx = sector_to_idx(lo);
+ struct serial_in_rdev *serial = &rdev->serial[idx];
+
+ spin_lock_irqsave(&serial->serial_lock, flags);
+ for (si = raid1_rb_iter_first(&serial->serial_rb, lo, hi);
+ si; si = raid1_rb_iter_next(si, lo, hi)) {
+ if (si->start == lo && si->last == hi) {
+ raid1_rb_remove(si, &serial->serial_rb);
+ mempool_free(si, mddev->serial_info_pool);
found = 1;
break;
}
-
+ }
if (!found)
- WARN(1, "The write behind IO is not recorded\n");
- spin_unlock_irqrestore(&rdev->wb_list_lock, flags);
- wake_up(&rdev->wb_io_wait);
+ WARN(1, "The write IO is not recorded for serialization\n");
+ spin_unlock_irqrestore(&serial->serial_lock, flags);
+ wake_up(&serial->serial_io_wait);
}
/*
@@ -430,6 +447,8 @@ static void raid1_end_write_request(struct bio *bio)
int mirror = find_bio_disk(r1_bio, bio);
struct md_rdev *rdev = conf->mirrors[mirror].rdev;
bool discard_error;
+ sector_t lo = r1_bio->sector;
+ sector_t hi = r1_bio->sector + r1_bio->sectors;
discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
@@ -499,12 +518,8 @@ static void raid1_end_write_request(struct bio *bio)
}
if (behind) {
- if (test_bit(WBCollisionCheck, &rdev->flags)) {
- sector_t lo = r1_bio->sector;
- sector_t hi = r1_bio->sector + r1_bio->sectors;
-
- remove_wb(rdev, lo, hi);
- }
+ if (test_bit(CollisionCheck, &rdev->flags))
+ remove_serial(rdev, lo, hi);
if (test_bit(WriteMostly, &rdev->flags))
atomic_dec(&r1_bio->behind_remaining);
@@ -527,7 +542,8 @@ static void raid1_end_write_request(struct bio *bio)
call_bio_endio(r1_bio);
}
}
- }
+ } else if (rdev->mddev->serialize_policy)
+ remove_serial(rdev, lo, hi);
if (r1_bio->bios[mirror] == NULL)
rdev_dec_pending(rdev, conf->mddev);
@@ -1479,6 +1495,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
for (i = 0; i < disks; i++) {
struct bio *mbio = NULL;
+ struct md_rdev *rdev = conf->mirrors[i].rdev;
if (!r1_bio->bios[i])
continue;
@@ -1506,18 +1523,12 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
if (r1_bio->behind_master_bio) {
- struct md_rdev *rdev = conf->mirrors[i].rdev;
-
- if (test_bit(WBCollisionCheck, &rdev->flags)) {
- sector_t lo = r1_bio->sector;
- sector_t hi = r1_bio->sector + r1_bio->sectors;
-
- wait_event(rdev->wb_io_wait,
- check_and_add_wb(rdev, lo, hi) == 0);
- }
+ if (test_bit(CollisionCheck, &rdev->flags))
+ wait_for_serialization(rdev, r1_bio);
if (test_bit(WriteMostly, &rdev->flags))
atomic_inc(&r1_bio->behind_remaining);
- }
+ } else if (mddev->serialize_policy)
+ wait_for_serialization(rdev, r1_bio);
r1_bio->bios[i] = mbio;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index d4d3b67ffbba..ba00e9877f02 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6598,7 +6598,6 @@ raid5_show_group_thread_cnt(struct mddev *mddev, char *page)
static int alloc_thread_groups(struct r5conf *conf, int cnt,
int *group_cnt,
- int *worker_cnt_per_group,
struct r5worker_group **worker_groups);
static ssize_t
raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
@@ -6607,7 +6606,7 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
unsigned int new;
int err;
struct r5worker_group *new_groups, *old_groups;
- int group_cnt, worker_cnt_per_group;
+ int group_cnt;
if (len >= PAGE_SIZE)
return -EINVAL;
@@ -6630,13 +6629,11 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
if (old_groups)
flush_workqueue(raid5_wq);
- err = alloc_thread_groups(conf, new,
- &group_cnt, &worker_cnt_per_group,
- &new_groups);
+ err = alloc_thread_groups(conf, new, &group_cnt, &new_groups);
if (!err) {
spin_lock_irq(&conf->device_lock);
conf->group_cnt = group_cnt;
- conf->worker_cnt_per_group = worker_cnt_per_group;
+ conf->worker_cnt_per_group = new;
conf->worker_groups = new_groups;
spin_unlock_irq(&conf->device_lock);
@@ -6672,16 +6669,13 @@ static struct attribute_group raid5_attrs_group = {
.attrs = raid5_attrs,
};
-static int alloc_thread_groups(struct r5conf *conf, int cnt,
- int *group_cnt,
- int *worker_cnt_per_group,
+static int alloc_thread_groups(struct r5conf *conf, int cnt, int *group_cnt,
struct r5worker_group **worker_groups)
{
int i, j, k;
ssize_t size;
struct r5worker *workers;
- *worker_cnt_per_group = cnt;
if (cnt == 0) {
*group_cnt = 0;
*worker_groups = NULL;
@@ -6882,7 +6876,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
struct disk_info *disk;
char pers_name[6];
int i;
- int group_cnt, worker_cnt_per_group;
+ int group_cnt;
struct r5worker_group *new_group;
int ret;
@@ -6928,10 +6922,9 @@ static struct r5conf *setup_conf(struct mddev *mddev)
for (i = 0; i < PENDING_IO_MAX; i++)
list_add(&conf->pending_data[i].sibling, &conf->free_list);
/* Don't enable multi-threading by default*/
- if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group,
- &new_group)) {
+ if (!alloc_thread_groups(conf, 0, &group_cnt, &new_group)) {
conf->group_cnt = group_cnt;
- conf->worker_cnt_per_group = worker_cnt_per_group;
+ conf->worker_cnt_per_group = 0;
conf->worker_groups = new_group;
} else
goto abort;
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index 9340435a94a0..6c95dc471d4c 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -380,7 +380,8 @@ static void cec_data_cancel(struct cec_data *data, u8 tx_status)
} else {
list_del_init(&data->list);
if (!(data->msg.tx_status & CEC_TX_STATUS_OK))
- data->adap->transmit_queue_sz--;
+ if (!WARN_ON(!data->adap->transmit_queue_sz))
+ data->adap->transmit_queue_sz--;
}
if (data->msg.tx_status & CEC_TX_STATUS_OK) {
@@ -432,6 +433,14 @@ static void cec_flush(struct cec_adapter *adap)
* need to do anything special in that case.
*/
}
+ /*
+ * If something went wrong and this counter isn't what it should
+ * be, then this will reset it back to 0. Warn if it is not 0,
+ * since it indicates a bug, either in this framework or in a
+ * CEC driver.
+ */
+ if (WARN_ON(adap->transmit_queue_sz))
+ adap->transmit_queue_sz = 0;
}
/*
@@ -456,7 +465,7 @@ int cec_thread_func(void *_adap)
bool timeout = false;
u8 attempts;
- if (adap->transmitting) {
+ if (adap->transmit_in_progress) {
int err;
/*
@@ -491,7 +500,7 @@ int cec_thread_func(void *_adap)
goto unlock;
}
- if (adap->transmitting && timeout) {
+ if (adap->transmit_in_progress && timeout) {
/*
* If we timeout, then log that. Normally this does
* not happen and it is an indication of a faulty CEC
@@ -500,14 +509,18 @@ int cec_thread_func(void *_adap)
* so much traffic on the bus that the adapter was
* unable to transmit for CEC_XFER_TIMEOUT_MS (2.1s).
*/
- pr_warn("cec-%s: message %*ph timed out\n", adap->name,
- adap->transmitting->msg.len,
- adap->transmitting->msg.msg);
+ if (adap->transmitting) {
+ pr_warn("cec-%s: message %*ph timed out\n", adap->name,
+ adap->transmitting->msg.len,
+ adap->transmitting->msg.msg);
+ /* Just give up on this. */
+ cec_data_cancel(adap->transmitting,
+ CEC_TX_STATUS_TIMEOUT);
+ } else {
+ pr_warn("cec-%s: transmit timed out\n", adap->name);
+ }
adap->transmit_in_progress = false;
adap->tx_timeouts++;
- /* Just give up on this. */
- cec_data_cancel(adap->transmitting,
- CEC_TX_STATUS_TIMEOUT);
goto unlock;
}
@@ -522,7 +535,8 @@ int cec_thread_func(void *_adap)
data = list_first_entry(&adap->transmit_queue,
struct cec_data, list);
list_del_init(&data->list);
- adap->transmit_queue_sz--;
+ if (!WARN_ON(!data->adap->transmit_queue_sz))
+ adap->transmit_queue_sz--;
/* Make this the current transmitting message */
adap->transmitting = data;
@@ -1085,11 +1099,11 @@ void cec_received_msg_ts(struct cec_adapter *adap,
valid_la = false;
else if (!cec_msg_is_broadcast(msg) && !(dir_fl & DIRECTED))
valid_la = false;
- else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST1_4))
+ else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST))
valid_la = false;
else if (cec_msg_is_broadcast(msg) &&
- adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0 &&
- !(dir_fl & BCAST2_0))
+ adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0 &&
+ !(dir_fl & BCAST1_4))
valid_la = false;
}
if (valid_la && min_len) {
diff --git a/drivers/media/cec/cec-core.c b/drivers/media/cec/cec-core.c
index db7adffcdc76..0c52e1bb3910 100644
--- a/drivers/media/cec/cec-core.c
+++ b/drivers/media/cec/cec-core.c
@@ -183,24 +183,6 @@ static void cec_devnode_unregister(struct cec_adapter *adap)
put_device(&devnode->dev);
}
-#ifdef CONFIG_CEC_NOTIFIER
-static void cec_cec_notify(struct cec_adapter *adap, u16 pa)
-{
- cec_s_phys_addr(adap, pa, false);
-}
-
-void cec_register_cec_notifier(struct cec_adapter *adap,
- struct cec_notifier *notifier)
-{
- if (WARN_ON(!cec_is_registered(adap)))
- return;
-
- adap->notifier = notifier;
- cec_notifier_register(adap->notifier, adap, cec_cec_notify);
-}
-EXPORT_SYMBOL_GPL(cec_register_cec_notifier);
-#endif
-
#ifdef CONFIG_DEBUG_FS
static ssize_t cec_error_inj_write(struct file *file,
const char __user *ubuf, size_t count, loff_t *ppos)
@@ -416,8 +398,7 @@ void cec_unregister_adapter(struct cec_adapter *adap)
#endif
debugfs_remove_recursive(adap->cec_dir);
#ifdef CONFIG_CEC_NOTIFIER
- if (adap->notifier)
- cec_notifier_unregister(adap->notifier);
+ cec_notifier_cec_adap_unregister(adap->notifier, adap);
#endif
cec_devnode_unregister(adap);
}
diff --git a/drivers/media/cec/cec-notifier.c b/drivers/media/cec/cec-notifier.c
index 7cf42b133dbc..4a841bee5cc2 100644
--- a/drivers/media/cec/cec-notifier.c
+++ b/drivers/media/cec/cec-notifier.c
@@ -25,7 +25,6 @@ struct cec_notifier {
struct cec_connector_info conn_info;
const char *conn_name;
struct cec_adapter *cec_adap;
- void (*callback)(struct cec_adapter *adap, u16 pa);
u16 phys_addr;
};
@@ -81,13 +80,12 @@ static void cec_notifier_release(struct kref *kref)
kfree(n);
}
-void cec_notifier_put(struct cec_notifier *n)
+static void cec_notifier_put(struct cec_notifier *n)
{
mutex_lock(&cec_notifiers_lock);
kref_put(&n->kref, cec_notifier_release);
mutex_unlock(&cec_notifiers_lock);
}
-EXPORT_SYMBOL_GPL(cec_notifier_put);
struct cec_notifier *
cec_notifier_conn_register(struct device *hdmi_dev, const char *conn_name,
@@ -162,7 +160,6 @@ void cec_notifier_cec_adap_unregister(struct cec_notifier *n,
mutex_lock(&n->lock);
adap->notifier = NULL;
n->cec_adap = NULL;
- n->callback = NULL;
mutex_unlock(&n->lock);
cec_notifier_put(n);
}
@@ -175,9 +172,7 @@ void cec_notifier_set_phys_addr(struct cec_notifier *n, u16 pa)
mutex_lock(&n->lock);
n->phys_addr = pa;
- if (n->callback)
- n->callback(n->cec_adap, n->phys_addr);
- else if (n->cec_adap)
+ if (n->cec_adap)
cec_s_phys_addr(n->cec_adap, n->phys_addr, false);
mutex_unlock(&n->lock);
}
@@ -198,34 +193,6 @@ void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n,
}
EXPORT_SYMBOL_GPL(cec_notifier_set_phys_addr_from_edid);
-void cec_notifier_register(struct cec_notifier *n,
- struct cec_adapter *adap,
- void (*callback)(struct cec_adapter *adap, u16 pa))
-{
- kref_get(&n->kref);
- mutex_lock(&n->lock);
- n->cec_adap = adap;
- n->callback = callback;
- n->callback(adap, n->phys_addr);
- mutex_unlock(&n->lock);
-}
-EXPORT_SYMBOL_GPL(cec_notifier_register);
-
-void cec_notifier_unregister(struct cec_notifier *n)
-{
- /* Do nothing unless cec_notifier_register was called first */
- if (!n->callback)
- return;
-
- mutex_lock(&n->lock);
- n->callback = NULL;
- n->cec_adap->notifier = NULL;
- n->cec_adap = NULL;
- mutex_unlock(&n->lock);
- cec_notifier_put(n);
-}
-EXPORT_SYMBOL_GPL(cec_notifier_unregister);
-
struct device *cec_notifier_parse_hdmi_phandle(struct device *dev)
{
struct platform_device *hdmi_pdev;
diff --git a/drivers/media/cec/cec-priv.h b/drivers/media/cec/cec-priv.h
index 7bdf855aaecd..9bbd05053d42 100644
--- a/drivers/media/cec/cec-priv.h
+++ b/drivers/media/cec/cec-priv.h
@@ -9,7 +9,7 @@
#define _CEC_PRIV_H
#include <linux/cec-funcs.h>
-#include <media/cec.h>
+#include <media/cec-notifier.h>
#define dprintk(lvl, fmt, arg...) \
do { \
diff --git a/drivers/media/common/saa7146/saa7146_video.c b/drivers/media/common/saa7146/saa7146_video.c
index d16122039b0c..ccd15b4d4920 100644
--- a/drivers/media/common/saa7146/saa7146_video.c
+++ b/drivers/media/common/saa7146/saa7146_video.c
@@ -345,7 +345,8 @@ static int video_begin(struct saa7146_fh *fh)
fmt = saa7146_format_by_fourcc(dev, vv->video_fmt.pixelformat);
/* we need to have a valid format set here */
- BUG_ON(NULL == fmt);
+ if (!fmt)
+ return -EINVAL;
if (0 != (fmt->flags & FORMAT_IS_PLANAR)) {
resource = RESOURCE_DMA1_HPS|RESOURCE_DMA2_CLP|RESOURCE_DMA3_BRS;
@@ -398,7 +399,8 @@ static int video_end(struct saa7146_fh *fh, struct file *file)
fmt = saa7146_format_by_fourcc(dev, vv->video_fmt.pixelformat);
/* we need to have a valid format set here */
- BUG_ON(NULL == fmt);
+ if (!fmt)
+ return -EINVAL;
if (0 != (fmt->flags & FORMAT_IS_PLANAR)) {
resource = RESOURCE_DMA1_HPS|RESOURCE_DMA2_CLP|RESOURCE_DMA3_BRS;
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index e652f4318284..eb5d5db96552 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -146,7 +146,7 @@ static void __copy_timestamp(struct vb2_buffer *vb, const void *pb)
* and the timecode field and flag if needed.
*/
if (q->copy_timestamp)
- vb->timestamp = v4l2_timeval_to_ns(&b->timestamp);
+ vb->timestamp = v4l2_buffer_get_timestamp(b);
vbuf->flags |= b->flags & V4L2_BUF_FLAG_TIMECODE;
if (b->flags & V4L2_BUF_FLAG_TIMECODE)
vbuf->timecode = b->timecode;
@@ -482,7 +482,7 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb)
b->flags = vbuf->flags;
b->field = vbuf->field;
- b->timestamp = ns_to_timeval(vb->timestamp);
+ v4l2_buffer_set_timestamp(b, vb->timestamp);
b->timecode = vbuf->timecode;
b->sequence = vbuf->sequence;
b->reserved2 = 0;
diff --git a/drivers/media/common/videobuf2/videobuf2-vmalloc.c b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
index 4d5af352e249..1a4f0ca87c7c 100644
--- a/drivers/media/common/videobuf2/videobuf2-vmalloc.c
+++ b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
@@ -105,7 +105,7 @@ static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
if (nums[i-1] + 1 != nums[i])
goto fail_map;
buf->vaddr = (__force void *)
- ioremap_nocache(__pfn_to_phys(nums[0]), size + offset);
+ ioremap(__pfn_to_phys(nums[0]), size + offset);
} else {
buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1,
PAGE_KERNEL);
diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c
index 39a2c6ccf31d..5fde1d38b3e3 100644
--- a/drivers/media/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb-core/dvb_demux.c
@@ -971,6 +971,7 @@ static int dmx_section_feed_start_filtering(struct dmx_section_feed *feed)
dvbdmxfeed->feed.sec.secbuf = dvbdmxfeed->feed.sec.secbuf_base;
dvbdmxfeed->feed.sec.secbufp = 0;
dvbdmxfeed->feed.sec.seclen = 0;
+ dvbdmxfeed->pusi_seen = false;
if (!dvbdmx->start_feed) {
mutex_unlock(&dvbdmx->mutex);
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index 917fe034af37..80b6a71aa33e 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -983,8 +983,8 @@ struct i2c_client *dvb_module_probe(const char *module_name,
board_info->addr = addr;
board_info->platform_data = platform_data;
request_module(module_name);
- client = i2c_new_device(adap, board_info);
- if (client == NULL || client->dev.driver == NULL) {
+ client = i2c_new_client_device(adap, board_info);
+ if (!i2c_client_has_driver(client)) {
kfree(board_info);
return NULL;
}
diff --git a/drivers/media/dvb-frontends/as102_fe.c b/drivers/media/dvb-frontends/as102_fe.c
index 496ebb8176c0..bc72d954dc1f 100644
--- a/drivers/media/dvb-frontends/as102_fe.c
+++ b/drivers/media/dvb-frontends/as102_fe.c
@@ -290,7 +290,8 @@ static int as102_fe_get_frontend(struct dvb_frontend *fe,
}
static int as102_fe_get_tune_settings(struct dvb_frontend *fe,
- struct dvb_frontend_tune_settings *settings) {
+ struct dvb_frontend_tune_settings *settings)
+{
settings->min_delay_ms = 1000;
diff --git a/drivers/media/dvb-frontends/au8522_decoder.c b/drivers/media/dvb-frontends/au8522_decoder.c
index c1717dde874b..8cdca051e51b 100644
--- a/drivers/media/dvb-frontends/au8522_decoder.c
+++ b/drivers/media/dvb-frontends/au8522_decoder.c
@@ -562,7 +562,7 @@ static int au8522_s_video_routing(struct v4l2_subdev *sd,
{
struct au8522_state *state = to_state(sd);
- switch(input) {
+ switch (input) {
case AU8522_COMPOSITE_CH1:
case AU8522_SVIDEO_CH13:
case AU8522_COMPOSITE_CH4_SIF:
diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c
index d137199e13e6..b1618339eec0 100644
--- a/drivers/media/dvb-frontends/cxd2820r_core.c
+++ b/drivers/media/dvb-frontends/cxd2820r_core.c
@@ -530,8 +530,8 @@ struct dvb_frontend *cxd2820r_attach(const struct cxd2820r_config *config,
strscpy(board_info.type, "cxd2820r", I2C_NAME_SIZE);
board_info.addr = config->i2c_address;
board_info.platform_data = &pdata;
- client = i2c_new_device(adapter, &board_info);
- if (!client || !client->dev.driver)
+ client = i2c_new_client_device(adapter, &board_info);
+ if (!i2c_client_has_driver(client))
return NULL;
return pdata.get_dvb_frontend(client);
diff --git a/drivers/media/dvb-frontends/dib0070.c b/drivers/media/dvb-frontends/dib0070.c
index 3b26f61785d8..cafb41dba861 100644
--- a/drivers/media/dvb-frontends/dib0070.c
+++ b/drivers/media/dvb-frontends/dib0070.c
@@ -189,7 +189,8 @@ static int dib0070_captrim(struct dib0070_state *state, enum frontend_tune_state
adc = dib0070_read_reg(state, 0x19);
- dprintk("CAPTRIM=%hd; ADC = %hd (ADC) & %dmV\n", state->captrim, adc, (u32) adc*(u32)1800/(u32)1024);
+ dprintk("CAPTRIM=%d; ADC = %hd (ADC) & %dmV\n", state->captrim,
+ adc, (u32)adc * (u32)1800 / (u32)1024);
if (adc >= 400) {
adc -= 400;
@@ -200,7 +201,8 @@ static int dib0070_captrim(struct dib0070_state *state, enum frontend_tune_state
}
if (adc < state->adc_diff) {
- dprintk("CAPTRIM=%hd is closer to target (%hd/%hd)\n", state->captrim, adc, state->adc_diff);
+ dprintk("CAPTRIM=%d is closer to target (%hd/%hd)\n",
+ state->captrim, adc, state->adc_diff);
state->adc_diff = adc;
state->fcaptrim = state->captrim;
}
@@ -364,7 +366,7 @@ static int dib0070_tune_digital(struct dvb_frontend *fe)
}
if (*tune_state == CT_TUNER_START) {
- dprintk("Tuning for Band: %hd (%d kHz)\n", band, freq);
+ dprintk("Tuning for Band: %d (%d kHz)\n", band, freq);
if (state->current_rf != freq) {
u8 REFDIV;
u32 FBDiv, Rest, FREF, VCOF_kHz;
@@ -442,12 +444,17 @@ static int dib0070_tune_digital(struct dvb_frontend *fe)
dib0070_write_reg(state, 0x20,
0x0040 | 0x0020 | 0x0010 | 0x0008 | 0x0002 | 0x0001 | state->current_tune_table_index->tuner_enable);
- dprintk("REFDIV: %hd, FREF: %d\n", REFDIV, FREF);
+ dprintk("REFDIV: %u, FREF: %d\n", REFDIV, FREF);
dprintk("FBDIV: %d, Rest: %d\n", FBDiv, Rest);
- dprintk("Num: %hd, Den: %hd, SD: %hd\n", (u16) Rest, Den, (state->lo4 >> 12) & 0x1);
- dprintk("HFDIV code: %hd\n", state->current_tune_table_index->hfdiv);
- dprintk("VCO = %hd\n", state->current_tune_table_index->vco_band);
- dprintk("VCOF: ((%hd*%d) << 1))\n", state->current_tune_table_index->vco_multi, freq);
+ dprintk("Num: %u, Den: %u, SD: %d\n", (u16)Rest, Den,
+ (state->lo4 >> 12) & 0x1);
+ dprintk("HFDIV code: %u\n",
+ state->current_tune_table_index->hfdiv);
+ dprintk("VCO = %u\n",
+ state->current_tune_table_index->vco_band);
+ dprintk("VCOF: ((%u*%d) << 1))\n",
+ state->current_tune_table_index->vco_multi,
+ freq);
*tune_state = CT_TUNER_STEP_0;
} else { /* we are already tuned to this frequency - the configuration is correct */
diff --git a/drivers/media/dvb-frontends/dib0090.c b/drivers/media/dvb-frontends/dib0090.c
index d13d2e81f8c9..bc374750529b 100644
--- a/drivers/media/dvb-frontends/dib0090.c
+++ b/drivers/media/dvb-frontends/dib0090.c
@@ -1748,7 +1748,8 @@ static int dib0090_dc_offset_calibration(struct dib0090_state *state, enum front
}
dib0090_set_trim(state);
- dprintk("BB Offset Cal, BBreg=%hd,Offset=%hd,Value Set=%hd\n", state->dc->addr, state->adc_diff, state->step);
+ dprintk("BB Offset Cal, BBreg=%u,Offset=%d,Value Set=%d\n",
+ state->dc->addr, state->adc_diff, state->step);
state->dc++;
if (state->dc->addr == 0) /* done */
diff --git a/drivers/media/dvb-frontends/dib7000m.c b/drivers/media/dvb-frontends/dib7000m.c
index e211830c9c99..97ce97789c9e 100644
--- a/drivers/media/dvb-frontends/dib7000m.c
+++ b/drivers/media/dvb-frontends/dib7000m.c
@@ -808,7 +808,7 @@ static int dib7000m_agc_startup(struct dvb_frontend *demod)
dib7000m_restart_agc(state);
- dprintk("SPLIT %p: %hd\n", demod, agc_split);
+ dprintk("SPLIT %p: %u\n", demod, agc_split);
(*agc_state)++;
ret = 5;
diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c
index 0d22c700016d..0a7790c4bad3 100644
--- a/drivers/media/dvb-frontends/dib7000p.c
+++ b/drivers/media/dvb-frontends/dib7000p.c
@@ -915,7 +915,7 @@ static int dib7000p_agc_startup(struct dvb_frontend *demod)
dib7000p_restart_agc(state);
- dprintk("SPLIT %p: %hd\n", demod, agc_split);
+ dprintk("SPLIT %p: %u\n", demod, agc_split);
(*agc_state)++;
ret = 5;
diff --git a/drivers/media/dvb-frontends/dvb_dummy_fe.c b/drivers/media/dvb-frontends/dvb_dummy_fe.c
index 4db679cb70ad..9ff1ebaa5e04 100644
--- a/drivers/media/dvb-frontends/dvb_dummy_fe.c
+++ b/drivers/media/dvb-frontends/dvb_dummy_fe.c
@@ -31,25 +31,26 @@ static int dvb_dummy_fe_read_status(struct dvb_frontend *fe,
return 0;
}
-static int dvb_dummy_fe_read_ber(struct dvb_frontend* fe, u32* ber)
+static int dvb_dummy_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
{
*ber = 0;
return 0;
}
-static int dvb_dummy_fe_read_signal_strength(struct dvb_frontend* fe, u16* strength)
+static int dvb_dummy_fe_read_signal_strength(struct dvb_frontend *fe,
+ u16 *strength)
{
*strength = 0;
return 0;
}
-static int dvb_dummy_fe_read_snr(struct dvb_frontend* fe, u16* snr)
+static int dvb_dummy_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
{
*snr = 0;
return 0;
}
-static int dvb_dummy_fe_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
+static int dvb_dummy_fe_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
{
*ucblocks = 0;
return 0;
@@ -77,12 +78,12 @@ static int dvb_dummy_fe_set_frontend(struct dvb_frontend *fe)
return 0;
}
-static int dvb_dummy_fe_sleep(struct dvb_frontend* fe)
+static int dvb_dummy_fe_sleep(struct dvb_frontend *fe)
{
return 0;
}
-static int dvb_dummy_fe_init(struct dvb_frontend* fe)
+static int dvb_dummy_fe_init(struct dvb_frontend *fe)
{
return 0;
}
@@ -99,17 +100,18 @@ static int dvb_dummy_fe_set_voltage(struct dvb_frontend *fe,
return 0;
}
-static void dvb_dummy_fe_release(struct dvb_frontend* fe)
+static void dvb_dummy_fe_release(struct dvb_frontend *fe)
{
- struct dvb_dummy_fe_state* state = fe->demodulator_priv;
+ struct dvb_dummy_fe_state *state = fe->demodulator_priv;
+
kfree(state);
}
static const struct dvb_frontend_ops dvb_dummy_fe_ofdm_ops;
-struct dvb_frontend* dvb_dummy_fe_ofdm_attach(void)
+struct dvb_frontend *dvb_dummy_fe_ofdm_attach(void)
{
- struct dvb_dummy_fe_state* state = NULL;
+ struct dvb_dummy_fe_state *state = NULL;
/* allocate memory for the internal state */
state = kzalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL);
@@ -117,16 +119,20 @@ struct dvb_frontend* dvb_dummy_fe_ofdm_attach(void)
return NULL;
/* create dvb_frontend */
- memcpy(&state->frontend.ops, &dvb_dummy_fe_ofdm_ops, sizeof(struct dvb_frontend_ops));
+ memcpy(&state->frontend.ops,
+ &dvb_dummy_fe_ofdm_ops,
+ sizeof(struct dvb_frontend_ops));
+
state->frontend.demodulator_priv = state;
return &state->frontend;
}
+EXPORT_SYMBOL(dvb_dummy_fe_ofdm_attach);
static const struct dvb_frontend_ops dvb_dummy_fe_qpsk_ops;
struct dvb_frontend *dvb_dummy_fe_qpsk_attach(void)
{
- struct dvb_dummy_fe_state* state = NULL;
+ struct dvb_dummy_fe_state *state = NULL;
/* allocate memory for the internal state */
state = kzalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL);
@@ -134,16 +140,20 @@ struct dvb_frontend *dvb_dummy_fe_qpsk_attach(void)
return NULL;
/* create dvb_frontend */
- memcpy(&state->frontend.ops, &dvb_dummy_fe_qpsk_ops, sizeof(struct dvb_frontend_ops));
+ memcpy(&state->frontend.ops,
+ &dvb_dummy_fe_qpsk_ops,
+ sizeof(struct dvb_frontend_ops));
+
state->frontend.demodulator_priv = state;
return &state->frontend;
}
+EXPORT_SYMBOL(dvb_dummy_fe_qpsk_attach);
static const struct dvb_frontend_ops dvb_dummy_fe_qam_ops;
struct dvb_frontend *dvb_dummy_fe_qam_attach(void)
{
- struct dvb_dummy_fe_state* state = NULL;
+ struct dvb_dummy_fe_state *state = NULL;
/* allocate memory for the internal state */
state = kzalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL);
@@ -151,10 +161,14 @@ struct dvb_frontend *dvb_dummy_fe_qam_attach(void)
return NULL;
/* create dvb_frontend */
- memcpy(&state->frontend.ops, &dvb_dummy_fe_qam_ops, sizeof(struct dvb_frontend_ops));
+ memcpy(&state->frontend.ops,
+ &dvb_dummy_fe_qam_ops,
+ sizeof(struct dvb_frontend_ops));
+
state->frontend.demodulator_priv = state;
return &state->frontend;
}
+EXPORT_SYMBOL(dvb_dummy_fe_qam_attach);
static const struct dvb_frontend_ops dvb_dummy_fe_ofdm_ops = {
.delsys = { SYS_DVBT },
@@ -163,13 +177,21 @@ static const struct dvb_frontend_ops dvb_dummy_fe_ofdm_ops = {
.frequency_min_hz = 0,
.frequency_max_hz = 863250 * kHz,
.frequency_stepsize_hz = 62500,
- .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
- FE_CAN_FEC_4_5 | FE_CAN_FEC_5_6 | FE_CAN_FEC_6_7 |
- FE_CAN_FEC_7_8 | FE_CAN_FEC_8_9 | FE_CAN_FEC_AUTO |
- FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
- FE_CAN_TRANSMISSION_MODE_AUTO |
- FE_CAN_GUARD_INTERVAL_AUTO |
- FE_CAN_HIERARCHY_AUTO,
+ .caps = FE_CAN_FEC_1_2 |
+ FE_CAN_FEC_2_3 |
+ FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_4_5 |
+ FE_CAN_FEC_5_6 |
+ FE_CAN_FEC_6_7 |
+ FE_CAN_FEC_7_8 |
+ FE_CAN_FEC_8_9 |
+ FE_CAN_FEC_AUTO |
+ FE_CAN_QAM_16 |
+ FE_CAN_QAM_64 |
+ FE_CAN_QAM_AUTO |
+ FE_CAN_TRANSMISSION_MODE_AUTO |
+ FE_CAN_GUARD_INTERVAL_AUTO |
+ FE_CAN_HIERARCHY_AUTO,
},
.release = dvb_dummy_fe_release,
@@ -194,11 +216,16 @@ static const struct dvb_frontend_ops dvb_dummy_fe_qam_ops = {
.frequency_min_hz = 51 * MHz,
.frequency_max_hz = 858 * MHz,
.frequency_stepsize_hz = 62500,
- .symbol_rate_min = (57840000 / 2) / 64, /* SACLK/64 == (XIN/2)/64 */
+ /* symbol_rate_min: SACLK/64 == (XIN/2)/64 */
+ .symbol_rate_min = (57840000 / 2) / 64,
.symbol_rate_max = (57840000 / 2) / 4, /* SACLK/4 */
- .caps = FE_CAN_QAM_16 | FE_CAN_QAM_32 | FE_CAN_QAM_64 |
- FE_CAN_QAM_128 | FE_CAN_QAM_256 |
- FE_CAN_FEC_AUTO | FE_CAN_INVERSION_AUTO
+ .caps = FE_CAN_QAM_16 |
+ FE_CAN_QAM_32 |
+ FE_CAN_QAM_64 |
+ FE_CAN_QAM_128 |
+ FE_CAN_QAM_256 |
+ FE_CAN_FEC_AUTO |
+ FE_CAN_INVERSION_AUTO
},
.release = dvb_dummy_fe_release,
@@ -227,8 +254,12 @@ static const struct dvb_frontend_ops dvb_dummy_fe_qpsk_ops = {
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.caps = FE_CAN_INVERSION_AUTO |
- FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
- FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
+ FE_CAN_FEC_1_2 |
+ FE_CAN_FEC_2_3 |
+ FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_5_6 |
+ FE_CAN_FEC_7_8 |
+ FE_CAN_FEC_AUTO |
FE_CAN_QPSK
},
@@ -253,7 +284,3 @@ static const struct dvb_frontend_ops dvb_dummy_fe_qpsk_ops = {
MODULE_DESCRIPTION("DVB DUMMY Frontend");
MODULE_AUTHOR("Emard");
MODULE_LICENSE("GPL");
-
-EXPORT_SYMBOL(dvb_dummy_fe_ofdm_attach);
-EXPORT_SYMBOL(dvb_dummy_fe_qam_attach);
-EXPORT_SYMBOL(dvb_dummy_fe_qpsk_attach);
diff --git a/drivers/media/dvb-frontends/dvb_dummy_fe.h b/drivers/media/dvb-frontends/dvb_dummy_fe.h
index 526fabd7751f..463abf5ebd56 100644
--- a/drivers/media/dvb-frontends/dvb_dummy_fe.h
+++ b/drivers/media/dvb-frontends/dvb_dummy_fe.h
@@ -12,23 +12,23 @@
#include <media/dvb_frontend.h>
#if IS_REACHABLE(CONFIG_DVB_DUMMY_FE)
-extern struct dvb_frontend* dvb_dummy_fe_ofdm_attach(void);
-extern struct dvb_frontend* dvb_dummy_fe_qpsk_attach(void);
-extern struct dvb_frontend* dvb_dummy_fe_qam_attach(void);
+struct dvb_frontend *dvb_dummy_fe_ofdm_attach(void);
+struct dvb_frontend *dvb_dummy_fe_qpsk_attach(void);
+struct dvb_frontend *dvb_dummy_fe_qam_attach(void);
#else
static inline struct dvb_frontend *dvb_dummy_fe_ofdm_attach(void)
{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ pr_warn("%s: driver disabled by Kconfig\n", __func__);
return NULL;
}
static inline struct dvb_frontend *dvb_dummy_fe_qpsk_attach(void)
{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ pr_warn("%s: driver disabled by Kconfig\n", __func__);
return NULL;
}
static inline struct dvb_frontend *dvb_dummy_fe_qam_attach(void)
{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ pr_warn("%s: driver disabled by Kconfig\n", __func__);
return NULL;
}
#endif /* CONFIG_DVB_DUMMY_FE */
diff --git a/drivers/media/dvb-frontends/lgdt330x.c b/drivers/media/dvb-frontends/lgdt330x.c
index 651c8aa75e17..da3a8c5e18d8 100644
--- a/drivers/media/dvb-frontends/lgdt330x.c
+++ b/drivers/media/dvb-frontends/lgdt330x.c
@@ -922,8 +922,8 @@ struct dvb_frontend *lgdt330x_attach(const struct lgdt330x_config *_config,
strscpy(board_info.type, "lgdt330x", sizeof(board_info.type));
board_info.addr = demod_address;
board_info.platform_data = &config;
- client = i2c_new_device(i2c, &board_info);
- if (!client || !client->dev.driver)
+ client = i2c_new_client_device(i2c, &board_info);
+ if (!i2c_client_has_driver(client))
return NULL;
return lgdt330x_get_dvb_frontend(client);
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index 3a367a585084..c96f05ff5f2f 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -1277,8 +1277,8 @@ struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg,
strscpy(board_info.type, "m88ds3103", I2C_NAME_SIZE);
board_info.addr = cfg->i2c_addr;
board_info.platform_data = &pdata;
- client = i2c_new_device(i2c, &board_info);
- if (!client || !client->dev.driver)
+ client = i2c_new_client_device(i2c, &board_info);
+ if (!i2c_client_has_driver(client))
return NULL;
*tuner_i2c_adapter = pdata.get_i2c_adapter(client);
diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c
index 6c24d6d0d4c9..234607b02edb 100644
--- a/drivers/media/dvb-frontends/ts2020.c
+++ b/drivers/media/dvb-frontends/ts2020.c
@@ -519,8 +519,8 @@ struct dvb_frontend *ts2020_attach(struct dvb_frontend *fe,
strscpy(board_info.type, "ts2020", I2C_NAME_SIZE);
board_info.addr = config->tuner_address;
board_info.platform_data = &pdata;
- client = i2c_new_device(i2c, &board_info);
- if (!client || !client->dev.driver)
+ client = i2c_new_client_device(i2c, &board_info);
+ if (!i2c_client_has_driver(client))
return NULL;
return fe;
diff --git a/drivers/media/i2c/adv748x/adv748x.h b/drivers/media/i2c/adv748x/adv748x.h
index 5042f9e94aee..fccb388ce179 100644
--- a/drivers/media/i2c/adv748x/adv748x.h
+++ b/drivers/media/i2c/adv748x/adv748x.h
@@ -394,10 +394,10 @@ int adv748x_write_block(struct adv748x_state *state, int client_page,
#define io_read(s, r) adv748x_read(s, ADV748X_PAGE_IO, r)
#define io_write(s, r, v) adv748x_write(s, ADV748X_PAGE_IO, r, v)
-#define io_clrset(s, r, m, v) io_write(s, r, (io_read(s, r) & ~m) | v)
+#define io_clrset(s, r, m, v) io_write(s, r, (io_read(s, r) & ~(m)) | (v))
#define hdmi_read(s, r) adv748x_read(s, ADV748X_PAGE_HDMI, r)
-#define hdmi_read16(s, r, m) (((hdmi_read(s, r) << 8) | hdmi_read(s, r+1)) & m)
+#define hdmi_read16(s, r, m) (((hdmi_read(s, r) << 8) | hdmi_read(s, (r)+1)) & (m))
#define hdmi_write(s, r, v) adv748x_write(s, ADV748X_PAGE_HDMI, r, v)
#define repeater_read(s, r) adv748x_read(s, ADV748X_PAGE_REPEATER, r)
@@ -405,11 +405,11 @@ int adv748x_write_block(struct adv748x_state *state, int client_page,
#define sdp_read(s, r) adv748x_read(s, ADV748X_PAGE_SDP, r)
#define sdp_write(s, r, v) adv748x_write(s, ADV748X_PAGE_SDP, r, v)
-#define sdp_clrset(s, r, m, v) sdp_write(s, r, (sdp_read(s, r) & ~m) | v)
+#define sdp_clrset(s, r, m, v) sdp_write(s, r, (sdp_read(s, r) & ~(m)) | (v))
#define cp_read(s, r) adv748x_read(s, ADV748X_PAGE_CP, r)
#define cp_write(s, r, v) adv748x_write(s, ADV748X_PAGE_CP, r, v)
-#define cp_clrset(s, r, m, v) cp_write(s, r, (cp_read(s, r) & ~m) | v)
+#define cp_clrset(s, r, m, v) cp_write(s, r, (cp_read(s, r) & ~(m)) | (v))
#define tx_read(t, r) adv748x_read(t->state, t->page, r)
#define tx_write(t, r, v) adv748x_write(t->state, t->page, r, v)
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 2dedd6ebb236..09004d928d11 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -1503,23 +1503,14 @@ static void adv76xx_fill_optional_dv_timings_fields(struct v4l2_subdev *sd,
static unsigned int adv7604_read_hdmi_pixelclock(struct v4l2_subdev *sd)
{
- unsigned int freq;
int a, b;
a = hdmi_read(sd, 0x06);
b = hdmi_read(sd, 0x3b);
if (a < 0 || b < 0)
return 0;
- freq = a * 1000000 + ((b & 0x30) >> 4) * 250000;
- if (is_hdmi(sd)) {
- /* adjust for deep color mode */
- unsigned bits_per_channel = ((hdmi_read(sd, 0x0b) & 0x60) >> 4) + 8;
-
- freq = freq * 8 / bits_per_channel;
- }
-
- return freq;
+ return a * 1000000 + ((b & 0x30) >> 4) * 250000;
}
static unsigned int adv7611_read_hdmi_pixelclock(struct v4l2_subdev *sd)
@@ -1530,9 +1521,28 @@ static unsigned int adv7611_read_hdmi_pixelclock(struct v4l2_subdev *sd)
b = hdmi_read(sd, 0x52);
if (a < 0 || b < 0)
return 0;
+
return ((a << 1) | (b >> 7)) * 1000000 + (b & 0x7f) * 1000000 / 128;
}
+static unsigned int adv76xx_read_hdmi_pixelclock(struct v4l2_subdev *sd)
+{
+ struct adv76xx_state *state = to_state(sd);
+ const struct adv76xx_chip_info *info = state->info;
+ unsigned int freq, bits_per_channel, pixelrepetition;
+
+ freq = info->read_hdmi_pixelclock(sd);
+ if (is_hdmi(sd)) {
+ /* adjust for deep color mode and pixel repetition */
+ bits_per_channel = ((hdmi_read(sd, 0x0b) & 0x60) >> 4) + 8;
+ pixelrepetition = (hdmi_read(sd, 0x05) & 0x0f) + 1;
+
+ freq = freq * 8 / bits_per_channel / pixelrepetition;
+ }
+
+ return freq;
+}
+
static int adv76xx_query_dv_timings(struct v4l2_subdev *sd,
struct v4l2_dv_timings *timings)
{
@@ -1579,7 +1589,7 @@ static int adv76xx_query_dv_timings(struct v4l2_subdev *sd,
bt->width = w;
bt->height = h;
- bt->pixelclock = info->read_hdmi_pixelclock(sd);
+ bt->pixelclock = adv76xx_read_hdmi_pixelclock(sd);
bt->hfrontporch = hdmi_read16(sd, 0x20, info->hfrontporch_mask);
bt->hsync = hdmi_read16(sd, 0x22, info->hsync_mask);
bt->hbackporch = hdmi_read16(sd, 0x24, info->hbackporch_mask);
diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
index 4b9b98cf6674..5bd3ae82992f 100644
--- a/drivers/media/i2c/mt9v032.c
+++ b/drivers/media/i2c/mt9v032.c
@@ -428,10 +428,12 @@ static int mt9v032_enum_mbus_code(struct v4l2_subdev *subdev,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
+ struct mt9v032 *mt9v032 = to_mt9v032(subdev);
+
if (code->index > 0)
return -EINVAL;
- code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ code->code = mt9v032->format.code;
return 0;
}
@@ -439,7 +441,11 @@ static int mt9v032_enum_frame_size(struct v4l2_subdev *subdev,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
- if (fse->index >= 3 || fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ struct mt9v032 *mt9v032 = to_mt9v032(subdev);
+
+ if (fse->index >= 3)
+ return -EINVAL;
+ if (mt9v032->format.code != fse->code)
return -EINVAL;
fse->min_width = MT9V032_WINDOW_WIDTH_DEF / (1 << fse->index);
diff --git a/drivers/media/i2c/mt9v111.c b/drivers/media/i2c/mt9v111.c
index bb41bea950ac..61ae6a0d5679 100644
--- a/drivers/media/i2c/mt9v111.c
+++ b/drivers/media/i2c/mt9v111.c
@@ -103,7 +103,7 @@
#define MT9V111_MAX_CLKIN 27000000
/* The default sensor configuration at startup time. */
-static struct v4l2_mbus_framefmt mt9v111_def_fmt = {
+static const struct v4l2_mbus_framefmt mt9v111_def_fmt = {
.width = 640,
.height = 480,
.code = MEDIA_BUS_FMT_UYVY8_2X8,
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index 5e495c833d32..854031f0b64a 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -189,6 +189,7 @@ struct ov5640_mode_info {
u32 vtot;
const struct reg_value *reg_data;
u32 reg_data_size;
+ u32 max_fps;
};
struct ov5640_ctrls {
@@ -544,6 +545,7 @@ static const struct ov5640_mode_info ov5640_mode_init_data = {
0, SUBSAMPLING, 640, 1896, 480, 984,
ov5640_init_setting_30fps_VGA,
ARRAY_SIZE(ov5640_init_setting_30fps_VGA),
+ OV5640_30_FPS,
};
static const struct ov5640_mode_info
@@ -551,39 +553,48 @@ ov5640_mode_data[OV5640_NUM_MODES] = {
{OV5640_MODE_QCIF_176_144, SUBSAMPLING,
176, 1896, 144, 984,
ov5640_setting_QCIF_176_144,
- ARRAY_SIZE(ov5640_setting_QCIF_176_144)},
+ ARRAY_SIZE(ov5640_setting_QCIF_176_144),
+ OV5640_30_FPS},
{OV5640_MODE_QVGA_320_240, SUBSAMPLING,
320, 1896, 240, 984,
ov5640_setting_QVGA_320_240,
- ARRAY_SIZE(ov5640_setting_QVGA_320_240)},
+ ARRAY_SIZE(ov5640_setting_QVGA_320_240),
+ OV5640_30_FPS},
{OV5640_MODE_VGA_640_480, SUBSAMPLING,
640, 1896, 480, 1080,
ov5640_setting_VGA_640_480,
- ARRAY_SIZE(ov5640_setting_VGA_640_480)},
+ ARRAY_SIZE(ov5640_setting_VGA_640_480),
+ OV5640_60_FPS},
{OV5640_MODE_NTSC_720_480, SUBSAMPLING,
720, 1896, 480, 984,
ov5640_setting_NTSC_720_480,
- ARRAY_SIZE(ov5640_setting_NTSC_720_480)},
+ ARRAY_SIZE(ov5640_setting_NTSC_720_480),
+ OV5640_30_FPS},
{OV5640_MODE_PAL_720_576, SUBSAMPLING,
720, 1896, 576, 984,
ov5640_setting_PAL_720_576,
- ARRAY_SIZE(ov5640_setting_PAL_720_576)},
+ ARRAY_SIZE(ov5640_setting_PAL_720_576),
+ OV5640_30_FPS},
{OV5640_MODE_XGA_1024_768, SUBSAMPLING,
1024, 1896, 768, 1080,
ov5640_setting_XGA_1024_768,
- ARRAY_SIZE(ov5640_setting_XGA_1024_768)},
+ ARRAY_SIZE(ov5640_setting_XGA_1024_768),
+ OV5640_30_FPS},
{OV5640_MODE_720P_1280_720, SUBSAMPLING,
1280, 1892, 720, 740,
ov5640_setting_720P_1280_720,
- ARRAY_SIZE(ov5640_setting_720P_1280_720)},
+ ARRAY_SIZE(ov5640_setting_720P_1280_720),
+ OV5640_30_FPS},
{OV5640_MODE_1080P_1920_1080, SCALING,
1920, 2500, 1080, 1120,
ov5640_setting_1080P_1920_1080,
- ARRAY_SIZE(ov5640_setting_1080P_1920_1080)},
+ ARRAY_SIZE(ov5640_setting_1080P_1920_1080),
+ OV5640_30_FPS},
{OV5640_MODE_QSXGA_2592_1944, SCALING,
2592, 2844, 1944, 1968,
ov5640_setting_QSXGA_2592_1944,
- ARRAY_SIZE(ov5640_setting_QSXGA_2592_1944)},
+ ARRAY_SIZE(ov5640_setting_QSXGA_2592_1944),
+ OV5640_15_FPS},
};
static int ov5640_init_slave_id(struct ov5640_dev *sensor)
@@ -874,7 +885,7 @@ static unsigned long ov5640_calc_sys_clk(struct ov5640_dev *sensor,
* We have reached the maximum allowed PLL1 output,
* increase sysdiv.
*/
- if (!rate)
+ if (!_rate)
break;
/*
@@ -1606,14 +1617,8 @@ ov5640_find_mode(struct ov5640_dev *sensor, enum ov5640_frame_rate fr,
(!nearest && (mode->hact != width || mode->vact != height)))
return NULL;
- /* Only 640x480 can operate at 60fps (for now) */
- if (fr == OV5640_60_FPS &&
- !(mode->hact == 640 && mode->vact == 480))
- return NULL;
-
- /* 2592x1944 only works at 15fps max */
- if ((mode->hact == 2592 && mode->vact == 1944) &&
- fr > OV5640_15_FPS)
+ /* Check to see if the current mode exceeds the max frame rate */
+ if (ov5640_framerates[fr] > ov5640_framerates[mode->max_fps])
return NULL;
return mode;
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index 84f9771b5fed..a80d7701b519 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -413,21 +413,14 @@ static int smiapp_set_ctrl(struct v4l2_ctrl *ctrl)
struct smiapp_sensor *sensor =
container_of(ctrl->handler, struct smiapp_subdev, ctrl_handler)
->sensor;
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ int pm_status;
u32 orient = 0;
+ unsigned int i;
int exposure;
int rval;
switch (ctrl->id) {
- case V4L2_CID_ANALOGUE_GAIN:
- return smiapp_write(
- sensor,
- SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_GLOBAL, ctrl->val);
-
- case V4L2_CID_EXPOSURE:
- return smiapp_write(
- sensor,
- SMIAPP_REG_U16_COARSE_INTEGRATION_TIME, ctrl->val);
-
case V4L2_CID_HFLIP:
case V4L2_CID_VFLIP:
if (sensor->streaming)
@@ -440,15 +433,10 @@ static int smiapp_set_ctrl(struct v4l2_ctrl *ctrl)
orient |= SMIAPP_IMAGE_ORIENTATION_VFLIP;
orient ^= sensor->hvflip_inv_mask;
- rval = smiapp_write(sensor, SMIAPP_REG_U8_IMAGE_ORIENTATION,
- orient);
- if (rval < 0)
- return rval;
smiapp_update_mbus_formats(sensor);
- return 0;
-
+ break;
case V4L2_CID_VBLANK:
exposure = sensor->exposure->val;
@@ -461,59 +449,105 @@ static int smiapp_set_ctrl(struct v4l2_ctrl *ctrl)
return rval;
}
- return smiapp_write(
- sensor, SMIAPP_REG_U16_FRAME_LENGTH_LINES,
- sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height
- + ctrl->val);
-
- case V4L2_CID_HBLANK:
- return smiapp_write(
- sensor, SMIAPP_REG_U16_LINE_LENGTH_PCK,
- sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].width
- + ctrl->val);
-
+ break;
case V4L2_CID_LINK_FREQ:
if (sensor->streaming)
return -EBUSY;
- return smiapp_pll_update(sensor);
-
- case V4L2_CID_TEST_PATTERN: {
- unsigned int i;
+ rval = smiapp_pll_update(sensor);
+ if (rval)
+ return rval;
+ return 0;
+ case V4L2_CID_TEST_PATTERN:
for (i = 0; i < ARRAY_SIZE(sensor->test_data); i++)
v4l2_ctrl_activate(
sensor->test_data[i],
ctrl->val ==
V4L2_SMIAPP_TEST_PATTERN_MODE_SOLID_COLOUR);
- return smiapp_write(
- sensor, SMIAPP_REG_U16_TEST_PATTERN_MODE, ctrl->val);
+ break;
}
+ pm_runtime_get_noresume(&client->dev);
+ pm_status = pm_runtime_get_if_in_use(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+ if (!pm_status)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ rval = smiapp_write(
+ sensor,
+ SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_GLOBAL, ctrl->val);
+
+ break;
+ case V4L2_CID_EXPOSURE:
+ rval = smiapp_write(
+ sensor,
+ SMIAPP_REG_U16_COARSE_INTEGRATION_TIME, ctrl->val);
+
+ break;
+ case V4L2_CID_HFLIP:
+ case V4L2_CID_VFLIP:
+ rval = smiapp_write(sensor, SMIAPP_REG_U8_IMAGE_ORIENTATION,
+ orient);
+
+ break;
+ case V4L2_CID_VBLANK:
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U16_FRAME_LENGTH_LINES,
+ sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height
+ + ctrl->val);
+
+ break;
+ case V4L2_CID_HBLANK:
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U16_LINE_LENGTH_PCK,
+ sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].width
+ + ctrl->val);
+
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U16_TEST_PATTERN_MODE, ctrl->val);
+
+ break;
case V4L2_CID_TEST_PATTERN_RED:
- return smiapp_write(
+ rval = smiapp_write(
sensor, SMIAPP_REG_U16_TEST_DATA_RED, ctrl->val);
+ break;
case V4L2_CID_TEST_PATTERN_GREENR:
- return smiapp_write(
+ rval = smiapp_write(
sensor, SMIAPP_REG_U16_TEST_DATA_GREENR, ctrl->val);
+ break;
case V4L2_CID_TEST_PATTERN_BLUE:
- return smiapp_write(
+ rval = smiapp_write(
sensor, SMIAPP_REG_U16_TEST_DATA_BLUE, ctrl->val);
+ break;
case V4L2_CID_TEST_PATTERN_GREENB:
- return smiapp_write(
+ rval = smiapp_write(
sensor, SMIAPP_REG_U16_TEST_DATA_GREENB, ctrl->val);
+ break;
case V4L2_CID_PIXEL_RATE:
/* For v4l2_ctrl_s_ctrl_int64() used internally. */
- return 0;
+ rval = 0;
+ break;
default:
- return -EINVAL;
+ rval = -EINVAL;
}
+
+ if (pm_status > 0) {
+ pm_runtime_mark_last_busy(&client->dev);
+ pm_runtime_put_autosuspend(&client->dev);
+ }
+
+ return rval;
}
static const struct v4l2_ctrl_ops smiapp_ctrl_ops = {
@@ -1184,10 +1218,6 @@ static int smiapp_power_on(struct device *dev)
sleep = SMIAPP_RESET_DELAY(sensor->hwcfg->ext_clk);
usleep_range(sleep, sleep);
- mutex_lock(&sensor->mutex);
-
- sensor->active = true;
-
/*
* Failures to respond to the address change command have been noticed.
* Those failures seem to be caused by the sensor requiring a longer
@@ -1270,24 +1300,9 @@ static int smiapp_power_on(struct device *dev)
goto out_cci_addr_fail;
}
- /* Are we still initialising...? If not, proceed with control setup. */
- if (sensor->pixel_array) {
- rval = __v4l2_ctrl_handler_setup(
- &sensor->pixel_array->ctrl_handler);
- if (rval)
- goto out_cci_addr_fail;
-
- rval = __v4l2_ctrl_handler_setup(&sensor->src->ctrl_handler);
- if (rval)
- goto out_cci_addr_fail;
- }
-
- mutex_unlock(&sensor->mutex);
-
return 0;
out_cci_addr_fail:
- mutex_unlock(&sensor->mutex);
gpiod_set_value(sensor->xshutdown, 0);
clk_disable_unprepare(sensor->ext_clk);
@@ -1305,8 +1320,6 @@ static int smiapp_power_off(struct device *dev)
struct smiapp_sensor *sensor =
container_of(ssd, struct smiapp_sensor, ssds[0]);
- mutex_lock(&sensor->mutex);
-
/*
* Currently power/clock to lens are enable/disabled separately
* but they are essentially the same signals. So if the sensor is
@@ -1319,10 +1332,6 @@ static int smiapp_power_off(struct device *dev)
SMIAPP_REG_U8_SOFTWARE_RESET,
SMIAPP_SOFTWARE_RESET);
- sensor->active = false;
-
- mutex_unlock(&sensor->mutex);
-
gpiod_set_value(sensor->xshutdown, 0);
clk_disable_unprepare(sensor->ext_clk);
usleep_range(5000, 5000);
@@ -1507,6 +1516,30 @@ out:
* V4L2 subdev video operations
*/
+static int smiapp_pm_get_init(struct smiapp_sensor *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ int rval;
+
+ rval = pm_runtime_get_sync(&client->dev);
+ if (rval < 0) {
+ if (rval != -EBUSY && rval != -EAGAIN)
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+
+ return rval;
+ } else if (!rval) {
+ rval = v4l2_ctrl_handler_setup(&sensor->pixel_array->
+ ctrl_handler);
+ if (rval)
+ return rval;
+
+ return v4l2_ctrl_handler_setup(&sensor->src->ctrl_handler);
+ }
+
+ return 0;
+}
+
static int smiapp_set_stream(struct v4l2_subdev *subdev, int enable)
{
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
@@ -1516,22 +1549,23 @@ static int smiapp_set_stream(struct v4l2_subdev *subdev, int enable)
if (sensor->streaming == enable)
return 0;
- if (enable) {
- rval = pm_runtime_get_sync(&client->dev);
- if (rval < 0) {
- if (rval != -EBUSY && rval != -EAGAIN)
- pm_runtime_set_active(&client->dev);
- pm_runtime_put(&client->dev);
- return rval;
- }
+ if (!enable) {
+ smiapp_stop_streaming(sensor);
+ sensor->streaming = false;
+ pm_runtime_mark_last_busy(&client->dev);
+ pm_runtime_put_autosuspend(&client->dev);
- sensor->streaming = true;
+ return 0;
+ }
- rval = smiapp_start_streaming(sensor);
- if (rval < 0)
- sensor->streaming = false;
- } else {
- rval = smiapp_stop_streaming(sensor);
+ rval = smiapp_pm_get_init(sensor);
+ if (rval)
+ return rval;
+
+ sensor->streaming = true;
+
+ rval = smiapp_start_streaming(sensor);
+ if (rval < 0) {
sensor->streaming = false;
pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
@@ -2291,13 +2325,9 @@ smiapp_sysfs_nvm_read(struct device *dev, struct device_attribute *attr,
if (!sensor->dev_init_done)
return -EBUSY;
- rval = pm_runtime_get_sync(&client->dev);
- if (rval < 0) {
- if (rval != -EBUSY && rval != -EAGAIN)
- pm_runtime_set_active(&client->dev);
- pm_runtime_put_noidle(&client->dev);
+ rval = smiapp_pm_get_init(sensor);
+ if (rval < 0)
return -ENODEV;
- }
rval = smiapp_read_nvm(sensor, buf, PAGE_SIZE);
if (rval < 0) {
diff --git a/drivers/media/i2c/smiapp/smiapp-regs.c b/drivers/media/i2c/smiapp/smiapp-regs.c
index 0470e47c2f7a..ce8c1d47fbf0 100644
--- a/drivers/media/i2c/smiapp/smiapp-regs.c
+++ b/drivers/media/i2c/smiapp/smiapp-regs.c
@@ -223,9 +223,6 @@ int smiapp_write_no_quirk(struct smiapp_sensor *sensor, u32 reg, u32 val)
len != SMIAPP_REG_32BIT) || flags)
return -EINVAL;
- if (!sensor->active)
- return 0;
-
msg.addr = client->addr;
msg.flags = 0; /* Write */
msg.len = 2 + len;
diff --git a/drivers/media/i2c/smiapp/smiapp.h b/drivers/media/i2c/smiapp/smiapp.h
index 3ab874a5deba..4837d80dc453 100644
--- a/drivers/media/i2c/smiapp/smiapp.h
+++ b/drivers/media/i2c/smiapp/smiapp.h
@@ -198,7 +198,6 @@ struct smiapp_sensor {
u8 hvflip_inv_mask; /* H/VFLIP inversion due to sensor orientation */
u8 frame_skip;
- bool active; /* is the sensor powered on? */
u16 embedded_start; /* embedded data start line */
u16 embedded_end;
u16 image_start; /* image data start line */
diff --git a/drivers/media/pci/bt8xx/bttv-input.c b/drivers/media/pci/bt8xx/bttv-input.c
index 492bc85c2700..41226f1d0e5b 100644
--- a/drivers/media/pci/bt8xx/bttv-input.c
+++ b/drivers/media/pci/bt8xx/bttv-input.c
@@ -386,7 +386,7 @@ void init_bttv_i2c_ir(struct bttv *btv)
if (btv->init_data.name) {
info.platform_data = &btv->init_data;
- i2c_dev = i2c_new_device(&btv->c.i2c_adap, &info);
+ i2c_dev = i2c_new_client_device(&btv->c.i2c_adap, &info);
} else {
/*
* The external IR receiver is at i2c address 0x34 (0x35 for
@@ -396,9 +396,9 @@ void init_bttv_i2c_ir(struct bttv *btv)
* internal.
* That's why we probe 0x1a (~0x34) first. CB
*/
- i2c_dev = i2c_new_probed_device(&btv->c.i2c_adap, &info, addr_list, NULL);
+ i2c_dev = i2c_new_scanned_device(&btv->c.i2c_adap, &info, addr_list, NULL);
}
- if (NULL == i2c_dev)
+ if (IS_ERR(i2c_dev))
return;
#if defined(CONFIG_MODULES) && defined(MODULE)
diff --git a/drivers/media/pci/cobalt/cobalt-alsa-pcm.c b/drivers/media/pci/cobalt/cobalt-alsa-pcm.c
index 38d00935a292..9e7504e3cfd8 100644
--- a/drivers/media/pci/cobalt/cobalt-alsa-pcm.c
+++ b/drivers/media/pci/cobalt/cobalt-alsa-pcm.c
@@ -9,7 +9,6 @@
#include <linux/init.h>
#include <linux/kernel.h>
-#include <linux/vmalloc.h>
#include <linux/delay.h>
#include <media/v4l2-device.h>
@@ -238,54 +237,6 @@ static int snd_cobalt_pcm_capture_close(struct snd_pcm_substream *substream)
return 0;
}
-static int snd_cobalt_pcm_ioctl(struct snd_pcm_substream *substream,
- unsigned int cmd, void *arg)
-{
- return snd_pcm_lib_ioctl(substream, cmd, arg);
-}
-
-
-static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
- size_t size)
-{
- struct snd_pcm_runtime *runtime = subs->runtime;
-
- dprintk("Allocating vbuffer\n");
- if (runtime->dma_area) {
- if (runtime->dma_bytes > size)
- return 0;
-
- vfree(runtime->dma_area);
- }
- runtime->dma_area = vmalloc(size);
- if (!runtime->dma_area)
- return -ENOMEM;
-
- runtime->dma_bytes = size;
-
- return 0;
-}
-
-static int snd_cobalt_pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- dprintk("%s called\n", __func__);
-
- return snd_pcm_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(params));
-}
-
-static int snd_cobalt_pcm_hw_free(struct snd_pcm_substream *substream)
-{
- if (substream->runtime->dma_area) {
- dprintk("freeing pcm capture region\n");
- vfree(substream->runtime->dma_area);
- substream->runtime->dma_area = NULL;
- }
-
- return 0;
-}
-
static int snd_cobalt_pcm_prepare(struct snd_pcm_substream *substream)
{
struct snd_cobalt_card *cobsc = snd_pcm_substream_chip(substream);
@@ -490,36 +441,20 @@ snd_pcm_uframes_t snd_cobalt_pcm_pb_pointer(struct snd_pcm_substream *substream)
substream->runtime->buffer_size;
}
-static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
- unsigned long offset)
-{
- void *pageptr = subs->runtime->dma_area + offset;
-
- return vmalloc_to_page(pageptr);
-}
-
static const struct snd_pcm_ops snd_cobalt_pcm_capture_ops = {
.open = snd_cobalt_pcm_capture_open,
.close = snd_cobalt_pcm_capture_close,
- .ioctl = snd_cobalt_pcm_ioctl,
- .hw_params = snd_cobalt_pcm_hw_params,
- .hw_free = snd_cobalt_pcm_hw_free,
.prepare = snd_cobalt_pcm_prepare,
.trigger = snd_cobalt_pcm_trigger,
.pointer = snd_cobalt_pcm_pointer,
- .page = snd_pcm_get_vmalloc_page,
};
static const struct snd_pcm_ops snd_cobalt_pcm_playback_ops = {
.open = snd_cobalt_pcm_playback_open,
.close = snd_cobalt_pcm_playback_close,
- .ioctl = snd_cobalt_pcm_ioctl,
- .hw_params = snd_cobalt_pcm_hw_params,
- .hw_free = snd_cobalt_pcm_hw_free,
.prepare = snd_cobalt_pcm_pb_prepare,
.trigger = snd_cobalt_pcm_pb_trigger,
.pointer = snd_cobalt_pcm_pb_pointer,
- .page = snd_pcm_get_vmalloc_page,
};
int snd_cobalt_pcm_create(struct snd_cobalt_card *cobsc)
@@ -555,6 +490,8 @@ int snd_cobalt_pcm_create(struct snd_cobalt_card *cobsc)
snd_pcm_set_ops(sp, SNDRV_PCM_STREAM_CAPTURE,
&snd_cobalt_pcm_capture_ops);
+ snd_pcm_set_managed_buffer_all(sp, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
sp->info_flags = 0;
sp->private_data = cobsc;
strscpy(sp->name, "cobalt", sizeof(sp->name));
@@ -579,6 +516,8 @@ int snd_cobalt_pcm_create(struct snd_cobalt_card *cobsc)
snd_pcm_set_ops(sp, SNDRV_PCM_STREAM_PLAYBACK,
&snd_cobalt_pcm_playback_ops);
+ snd_pcm_set_managed_buffer_all(sp, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
sp->info_flags = 0;
sp->private_data = cobsc;
strscpy(sp->name, "cobalt", sizeof(sp->name));
diff --git a/drivers/media/pci/cx18/cx18-alsa-pcm.c b/drivers/media/pci/cx18/cx18-alsa-pcm.c
index 13f858c41836..bed28b4b41f7 100644
--- a/drivers/media/pci/cx18/cx18-alsa-pcm.c
+++ b/drivers/media/pci/cx18/cx18-alsa-pcm.c
@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/kernel.h>
-#include <linux/vmalloc.h>
#include <media/v4l2-device.h>
@@ -201,67 +200,6 @@ static int snd_cx18_pcm_capture_close(struct snd_pcm_substream *substream)
return 0;
}
-static int snd_cx18_pcm_ioctl(struct snd_pcm_substream *substream,
- unsigned int cmd, void *arg)
-{
- struct snd_cx18_card *cxsc = snd_pcm_substream_chip(substream);
- int ret;
-
- snd_cx18_lock(cxsc);
- ret = snd_pcm_lib_ioctl(substream, cmd, arg);
- snd_cx18_unlock(cxsc);
- return ret;
-}
-
-
-static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
- size_t size)
-{
- struct snd_pcm_runtime *runtime = subs->runtime;
-
- dprintk("Allocating vbuffer\n");
- if (runtime->dma_area) {
- if (runtime->dma_bytes > size)
- return 0;
-
- vfree(runtime->dma_area);
- }
- runtime->dma_area = vmalloc(size);
- if (!runtime->dma_area)
- return -ENOMEM;
-
- runtime->dma_bytes = size;
-
- return 0;
-}
-
-static int snd_cx18_pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- dprintk("%s called\n", __func__);
-
- return snd_pcm_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(params));
-}
-
-static int snd_cx18_pcm_hw_free(struct snd_pcm_substream *substream)
-{
- struct snd_cx18_card *cxsc = snd_pcm_substream_chip(substream);
- unsigned long flags;
- unsigned char *dma_area = NULL;
-
- spin_lock_irqsave(&cxsc->slock, flags);
- if (substream->runtime->dma_area) {
- dprintk("freeing pcm capture region\n");
- dma_area = substream->runtime->dma_area;
- substream->runtime->dma_area = NULL;
- }
- spin_unlock_irqrestore(&cxsc->slock, flags);
- vfree(dma_area);
-
- return 0;
-}
-
static int snd_cx18_pcm_prepare(struct snd_pcm_substream *substream)
{
struct snd_cx18_card *cxsc = snd_pcm_substream_chip(substream);
@@ -291,24 +229,12 @@ snd_pcm_uframes_t snd_cx18_pcm_pointer(struct snd_pcm_substream *substream)
return hwptr_done;
}
-static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
- unsigned long offset)
-{
- void *pageptr = subs->runtime->dma_area + offset;
-
- return vmalloc_to_page(pageptr);
-}
-
static const struct snd_pcm_ops snd_cx18_pcm_capture_ops = {
.open = snd_cx18_pcm_capture_open,
.close = snd_cx18_pcm_capture_close,
- .ioctl = snd_cx18_pcm_ioctl,
- .hw_params = snd_cx18_pcm_hw_params,
- .hw_free = snd_cx18_pcm_hw_free,
.prepare = snd_cx18_pcm_prepare,
.trigger = snd_cx18_pcm_trigger,
.pointer = snd_cx18_pcm_pointer,
- .page = snd_pcm_get_vmalloc_page,
};
int snd_cx18_pcm_create(struct snd_cx18_card *cxsc)
@@ -334,6 +260,7 @@ int snd_cx18_pcm_create(struct snd_cx18_card *cxsc)
snd_pcm_set_ops(sp, SNDRV_PCM_STREAM_CAPTURE,
&snd_cx18_pcm_capture_ops);
+ snd_pcm_set_managed_buffer_all(sp, SNDRV_DMA_TYPE_VMALLOC, NULL, 0, 0);
sp->info_flags = 0;
sp->private_data = cxsc;
strscpy(sp->name, cx->card_name, sizeof(sp->name));
diff --git a/drivers/media/pci/cx18/cx18-cards.c b/drivers/media/pci/cx18/cx18-cards.c
index cf118760d124..ecbe76f1ca63 100644
--- a/drivers/media/pci/cx18/cx18-cards.c
+++ b/drivers/media/pci/cx18/cx18-cards.c
@@ -245,7 +245,7 @@ static const struct cx18_card cx18_card_mpc718 = {
.type = CX18_CARD_YUAN_MPC718,
.name = "Yuan MPC718 MiniPCI DVB-T/Analog",
.comment = "Experimenters needed for device to work well.\n"
- "\tTo help, mail the ivtv-devel list (www.ivtvdriver.org).\n",
+ "\tTo help, mail the linux-media list (www.linuxtv.org).\n",
.v4l2_capabilities = CX18_CAP_ENCODER,
.hw_audio_ctrl = CX18_HW_418_AV,
.hw_muxer = CX18_HW_GPIO_MUX,
@@ -305,7 +305,7 @@ static const struct cx18_card cx18_card_gotview_dvd3 = {
.type = CX18_CARD_GOTVIEW_PCI_DVD3,
.name = "GoTView PCI DVD3 Hybrid",
.comment = "Experimenters needed for device to work well.\n"
- "\tTo help, mail the ivtv-devel list (www.ivtvdriver.org).\n",
+ "\tTo help, mail the linux-media list (www.linuxtv.org).\n",
.v4l2_capabilities = CX18_CAP_ENCODER,
.hw_audio_ctrl = CX18_HW_418_AV,
.hw_muxer = CX18_HW_GPIO_MUX,
@@ -419,7 +419,7 @@ static const struct cx18_card cx18_card_toshiba_qosmio_dvbt = {
.type = CX18_CARD_TOSHIBA_QOSMIO_DVBT,
.name = "Toshiba Qosmio DVB-T/Analog",
.comment = "Experimenters and photos needed for device to work well.\n"
- "\tTo help, mail the ivtv-devel list (www.ivtvdriver.org).\n",
+ "\tTo help, mail the linux-media list (www.linuxtv.org).\n",
.v4l2_capabilities = CX18_CAP_ENCODER,
.hw_audio_ctrl = CX18_HW_418_AV,
.hw_all = CX18_HW_418_AV | CX18_HW_TUNER | CX18_HW_GPIO_RESET_CTRL,
@@ -462,7 +462,7 @@ static const struct cx18_card cx18_card_leadtek_pvr2100 = {
.type = CX18_CARD_LEADTEK_PVR2100,
.name = "Leadtek WinFast PVR2100",
.comment = "Experimenters and photos needed for device to work well.\n"
- "\tTo help, mail the ivtv-devel list (www.ivtvdriver.org).\n",
+ "\tTo help, mail the linux-media list (www.linuxtv.org).\n",
.v4l2_capabilities = CX18_CAP_ENCODER,
.hw_audio_ctrl = CX18_HW_418_AV,
.hw_muxer = CX18_HW_GPIO_MUX,
diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
index fd47bd07ffd8..95aed00f353b 100644
--- a/drivers/media/pci/cx18/cx18-driver.c
+++ b/drivers/media/pci/cx18/cx18-driver.c
@@ -676,7 +676,7 @@ done:
cx->pci_dev->subsystem_device);
CX18_ERR("Defaulting to %s card\n", cx->card->name);
CX18_ERR("Please mail the vendor/device and subsystem vendor/device IDs and what kind of\n");
- CX18_ERR("card you have to the ivtv-devel mailinglist (www.ivtvdriver.org)\n");
+ CX18_ERR("card you have to the linux-media mailinglist (www.linuxtv.org)\n");
CX18_ERR("Prefix your subject line with [UNKNOWN CX18 CARD].\n");
}
cx->v4l2_cap = cx->card->v4l2_capabilities;
@@ -938,7 +938,7 @@ static int cx18_probe(struct pci_dev *pci_dev,
/* map io memory */
CX18_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
(u64)cx->base_addr + CX18_MEM_OFFSET, CX18_MEM_SIZE);
- cx->enc_mem = ioremap_nocache(cx->base_addr + CX18_MEM_OFFSET,
+ cx->enc_mem = ioremap(cx->base_addr + CX18_MEM_OFFSET,
CX18_MEM_SIZE);
if (!cx->enc_mem) {
CX18_ERR("ioremap failed. Can't get a window into CX23418 memory and register space\n");
diff --git a/drivers/media/pci/cx18/cx18-i2c.c b/drivers/media/pci/cx18/cx18-i2c.c
index 1ef7ccf4a722..a83435245251 100644
--- a/drivers/media/pci/cx18/cx18-i2c.c
+++ b/drivers/media/pci/cx18/cx18-i2c.c
@@ -88,7 +88,7 @@ static int cx18_i2c_new_ir(struct cx18 *cx, struct i2c_adapter *adap, u32 hw,
break;
}
- return i2c_new_probed_device(adap, &info, addr_list, NULL) == NULL ?
+ return IS_ERR(i2c_new_scanned_device(adap, &info, addr_list, NULL)) ?
-1 : 0;
}
diff --git a/drivers/media/pci/cx23885/cx23885-alsa.c b/drivers/media/pci/cx23885/cx23885-alsa.c
index a8e980c6dacb..df44ed7393a0 100644
--- a/drivers/media/pci/cx23885/cx23885-alsa.c
+++ b/drivers/media/pci/cx23885/cx23885-alsa.c
@@ -495,7 +495,6 @@ static struct page *snd_cx23885_page(struct snd_pcm_substream *substream,
static const struct snd_pcm_ops snd_cx23885_pcm_ops = {
.open = snd_cx23885_pcm_open,
.close = snd_cx23885_close,
- .ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_cx23885_hw_params,
.hw_free = snd_cx23885_hw_free,
.prepare = snd_cx23885_prepare,
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index 8644205d3cd3..8e5a2c580821 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -801,6 +801,25 @@ struct cx23885_board cx23885_boards[] = {
.name = "Hauppauge WinTV-Starburst2",
.portb = CX23885_MPEG_DVB,
},
+ [CX23885_BOARD_AVERMEDIA_CE310B] = {
+ .name = "AVerMedia CE310B",
+ .porta = CX23885_ANALOG_VIDEO,
+ .force_bff = 1,
+ .input = {{
+ .type = CX23885_VMUX_COMPOSITE1,
+ .vmux = CX25840_VIN1_CH1 |
+ CX25840_NONE_CH2 |
+ CX25840_NONE0_CH3,
+ .amux = CX25840_AUDIO7,
+ }, {
+ .type = CX23885_VMUX_SVIDEO,
+ .vmux = CX25840_VIN8_CH1 |
+ CX25840_NONE_CH2 |
+ CX25840_VIN7_CH3 |
+ CX25840_SVIDEO_ON,
+ .amux = CX25840_AUDIO7,
+ } },
+ },
};
const unsigned int cx23885_bcount = ARRAY_SIZE(cx23885_boards);
@@ -1124,6 +1143,10 @@ struct cx23885_subid cx23885_subids[] = {
.subvendor = 0x0070,
.subdevice = 0xf02a,
.card = CX23885_BOARD_HAUPPAUGE_STARBURST2,
+ }, {
+ .subvendor = 0x1461,
+ .subdevice = 0x3100,
+ .card = CX23885_BOARD_AVERMEDIA_CE310B,
},
};
const unsigned int cx23885_idcount = ARRAY_SIZE(cx23885_subids);
@@ -2348,6 +2371,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_DVBSKY_T982:
case CX23885_BOARD_VIEWCAST_260E:
case CX23885_BOARD_VIEWCAST_460E:
+ case CX23885_BOARD_AVERMEDIA_CE310B:
dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev,
&dev->i2c_bus[2].i2c_adap,
"cx25840", 0x88 >> 1, NULL);
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index 4f386db33a11..494751a067a3 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -1159,8 +1159,8 @@ static int dvb_register_ci_mac(struct cx23885_tsport *port)
info.addr = 0x40;
info.platform_data = &sp2_config;
request_module(info.type);
- client_ci = i2c_new_device(&i2c_bus->i2c_adap, &info);
- if (client_ci == NULL || client_ci->dev.driver == NULL)
+ client_ci = i2c_new_client_device(&i2c_bus->i2c_adap, &info);
+ if (!i2c_client_has_driver(client_ci))
return -ENODEV;
if (!try_module_get(client_ci->dev.driver->owner)) {
i2c_unregister_device(client_ci);
@@ -1826,8 +1826,8 @@ static int dvb_register(struct cx23885_tsport *port)
info.addr = 0x05;
info.platform_data = &tda10071_pdata;
request_module("tda10071");
- client_demod = i2c_new_device(&i2c_bus->i2c_adap, &info);
- if (!client_demod || !client_demod->dev.driver)
+ client_demod = i2c_new_client_device(&i2c_bus->i2c_adap, &info);
+ if (!i2c_client_has_driver(client_demod))
goto frontend_detach;
if (!try_module_get(client_demod->dev.driver->owner)) {
i2c_unregister_device(client_demod);
@@ -1843,8 +1843,8 @@ static int dvb_register(struct cx23885_tsport *port)
info.addr = 0x0b;
info.platform_data = &a8293_pdata;
request_module("a8293");
- client_sec = i2c_new_device(&i2c_bus->i2c_adap, &info);
- if (!client_sec || !client_sec->dev.driver)
+ client_sec = i2c_new_client_device(&i2c_bus->i2c_adap, &info);
+ if (!i2c_client_has_driver(client_sec))
goto frontend_detach;
if (!try_module_get(client_sec->dev.driver->owner)) {
i2c_unregister_device(client_sec);
@@ -1864,9 +1864,8 @@ static int dvb_register(struct cx23885_tsport *port)
info.addr = 0x64;
info.platform_data = &si2165_pdata;
request_module(info.type);
- client_demod = i2c_new_device(&i2c_bus->i2c_adap, &info);
- if (client_demod == NULL ||
- client_demod->dev.driver == NULL)
+ client_demod = i2c_new_client_device(&i2c_bus->i2c_adap, &info);
+ if (!i2c_client_has_driver(client_demod))
goto frontend_detach;
if (!try_module_get(client_demod->dev.driver->owner)) {
i2c_unregister_device(client_demod);
@@ -1898,8 +1897,8 @@ static int dvb_register(struct cx23885_tsport *port)
info.addr = 0x05;
info.platform_data = &tda10071_pdata;
request_module("tda10071");
- client_demod = i2c_new_device(&i2c_bus->i2c_adap, &info);
- if (!client_demod || !client_demod->dev.driver)
+ client_demod = i2c_new_client_device(&i2c_bus->i2c_adap, &info);
+ if (!i2c_client_has_driver(client_demod))
goto frontend_detach;
if (!try_module_get(client_demod->dev.driver->owner)) {
i2c_unregister_device(client_demod);
@@ -1915,8 +1914,8 @@ static int dvb_register(struct cx23885_tsport *port)
info.addr = 0x0b;
info.platform_data = &a8293_pdata;
request_module("a8293");
- client_sec = i2c_new_device(&i2c_bus->i2c_adap, &info);
- if (!client_sec || !client_sec->dev.driver)
+ client_sec = i2c_new_client_device(&i2c_bus->i2c_adap, &info);
+ if (!i2c_client_has_driver(client_sec))
goto frontend_detach;
if (!try_module_get(client_sec->dev.driver->owner)) {
i2c_unregister_device(client_sec);
@@ -1948,9 +1947,8 @@ static int dvb_register(struct cx23885_tsport *port)
info.addr = 0x60;
info.platform_data = &ts2020_config;
request_module(info.type);
- client_tuner = i2c_new_device(adapter, &info);
- if (client_tuner == NULL ||
- client_tuner->dev.driver == NULL)
+ client_tuner = i2c_new_client_device(adapter, &info);
+ if (!i2c_client_has_driver(client_tuner))
goto frontend_detach;
if (!try_module_get(client_tuner->dev.driver->owner)) {
i2c_unregister_device(client_tuner);
@@ -1985,9 +1983,8 @@ static int dvb_register(struct cx23885_tsport *port)
info.addr = 0x64;
info.platform_data = &si2168_config;
request_module(info.type);
- client_demod = i2c_new_device(&i2c_bus->i2c_adap, &info);
- if (client_demod == NULL ||
- client_demod->dev.driver == NULL)
+ client_demod = i2c_new_client_device(&i2c_bus->i2c_adap, &info);
+ if (!i2c_client_has_driver(client_demod))
goto frontend_detach;
if (!try_module_get(client_demod->dev.driver->owner)) {
i2c_unregister_device(client_demod);
@@ -2004,9 +2001,8 @@ static int dvb_register(struct cx23885_tsport *port)
info.addr = 0x60;
info.platform_data = &si2157_config;
request_module(info.type);
- client_tuner = i2c_new_device(adapter, &info);
- if (client_tuner == NULL ||
- client_tuner->dev.driver == NULL)
+ client_tuner = i2c_new_client_device(adapter, &info);
+ if (!i2c_client_has_driver(client_tuner))
goto frontend_detach;
if (!try_module_get(client_tuner->dev.driver->owner)) {
@@ -2032,8 +2028,8 @@ static int dvb_register(struct cx23885_tsport *port)
info.addr = 0x64;
info.platform_data = &si2168_config;
request_module(info.type);
- client_demod = i2c_new_device(&i2c_bus2->i2c_adap, &info);
- if (client_demod == NULL || client_demod->dev.driver == NULL)
+ client_demod = i2c_new_client_device(&i2c_bus2->i2c_adap, &info);
+ if (!i2c_client_has_driver(client_demod))
goto frontend_detach;
if (!try_module_get(client_demod->dev.driver->owner)) {
i2c_unregister_device(client_demod);
@@ -2050,9 +2046,8 @@ static int dvb_register(struct cx23885_tsport *port)
info.addr = 0x60;
info.platform_data = &si2157_config;
request_module(info.type);
- client_tuner = i2c_new_device(adapter, &info);
- if (client_tuner == NULL ||
- client_tuner->dev.driver == NULL)
+ client_tuner = i2c_new_client_device(adapter, &info);
+ if (!i2c_client_has_driver(client_tuner))
goto frontend_detach;
if (!try_module_get(client_tuner->dev.driver->owner)) {
i2c_unregister_device(client_tuner);
@@ -2080,8 +2075,8 @@ static int dvb_register(struct cx23885_tsport *port)
info.addr = 0x60;
info.platform_data = &ts2020_config;
request_module(info.type);
- client_tuner = i2c_new_device(adapter, &info);
- if (client_tuner == NULL || client_tuner->dev.driver == NULL)
+ client_tuner = i2c_new_client_device(adapter, &info);
+ if (!i2c_client_has_driver(client_tuner))
goto frontend_detach;
if (!try_module_get(client_tuner->dev.driver->owner)) {
i2c_unregister_device(client_tuner);
@@ -2129,8 +2124,8 @@ static int dvb_register(struct cx23885_tsport *port)
info.addr = 0x68;
info.platform_data = &m88ds3103_pdata;
request_module(info.type);
- client_demod = i2c_new_device(&i2c_bus->i2c_adap, &info);
- if (client_demod == NULL || client_demod->dev.driver == NULL)
+ client_demod = i2c_new_client_device(&i2c_bus->i2c_adap, &info);
+ if (!i2c_client_has_driver(client_demod))
goto frontend_detach;
if (!try_module_get(client_demod->dev.driver->owner)) {
i2c_unregister_device(client_demod);
@@ -2149,8 +2144,8 @@ static int dvb_register(struct cx23885_tsport *port)
info.addr = 0x60;
info.platform_data = &ts2020_config;
request_module(info.type);
- client_tuner = i2c_new_device(adapter, &info);
- if (client_tuner == NULL || client_tuner->dev.driver == NULL)
+ client_tuner = i2c_new_client_device(adapter, &info);
+ if (!i2c_client_has_driver(client_tuner))
goto frontend_detach;
if (!try_module_get(client_tuner->dev.driver->owner)) {
i2c_unregister_device(client_tuner);
@@ -2194,8 +2189,8 @@ static int dvb_register(struct cx23885_tsport *port)
info.addr = 0x64;
info.platform_data = &si2168_config;
request_module(info.type);
- client_demod = i2c_new_device(&i2c_bus->i2c_adap, &info);
- if (client_demod == NULL || client_demod->dev.driver == NULL)
+ client_demod = i2c_new_client_device(&i2c_bus->i2c_adap, &info);
+ if (!i2c_client_has_driver(client_demod))
goto frontend_detach;
if (!try_module_get(client_demod->dev.driver->owner)) {
i2c_unregister_device(client_demod);
@@ -2212,9 +2207,8 @@ static int dvb_register(struct cx23885_tsport *port)
info.addr = 0x60;
info.platform_data = &si2157_config;
request_module(info.type);
- client_tuner = i2c_new_device(adapter, &info);
- if (client_tuner == NULL ||
- client_tuner->dev.driver == NULL)
+ client_tuner = i2c_new_client_device(adapter, &info);
+ if (!i2c_client_has_driver(client_tuner))
goto frontend_detach;
if (!try_module_get(client_tuner->dev.driver->owner)) {
i2c_unregister_device(client_tuner);
@@ -2245,8 +2239,8 @@ static int dvb_register(struct cx23885_tsport *port)
info.addr = 0x0b;
info.platform_data = &a8293_pdata;
request_module("a8293");
- client_sec = i2c_new_device(&i2c_bus->i2c_adap, &info);
- if (!client_sec || !client_sec->dev.driver)
+ client_sec = i2c_new_client_device(&i2c_bus->i2c_adap, &info);
+ if (!i2c_client_has_driver(client_sec))
goto frontend_detach;
if (!try_module_get(client_sec->dev.driver->owner)) {
i2c_unregister_device(client_sec);
@@ -2262,8 +2256,8 @@ static int dvb_register(struct cx23885_tsport *port)
info.addr = 0x21;
info.platform_data = &m88rs6000t_config;
request_module("%s", info.type);
- client_tuner = i2c_new_device(adapter, &info);
- if (!client_tuner || !client_tuner->dev.driver)
+ client_tuner = i2c_new_client_device(adapter, &info);
+ if (!i2c_client_has_driver(client_tuner))
goto frontend_detach;
if (!try_module_get(client_tuner->dev.driver->owner)) {
i2c_unregister_device(client_tuner);
@@ -2287,8 +2281,8 @@ static int dvb_register(struct cx23885_tsport *port)
info.addr = 0x64;
info.platform_data = &si2168_config;
request_module("%s", info.type);
- client_demod = i2c_new_device(&i2c_bus->i2c_adap, &info);
- if (!client_demod || !client_demod->dev.driver)
+ client_demod = i2c_new_client_device(&i2c_bus->i2c_adap, &info);
+ if (!i2c_client_has_driver(client_demod))
goto frontend_detach;
if (!try_module_get(client_demod->dev.driver->owner)) {
i2c_unregister_device(client_demod);
@@ -2305,8 +2299,8 @@ static int dvb_register(struct cx23885_tsport *port)
info.addr = 0x60;
info.platform_data = &si2157_config;
request_module("%s", info.type);
- client_tuner = i2c_new_device(&i2c_bus2->i2c_adap, &info);
- if (!client_tuner || !client_tuner->dev.driver) {
+ client_tuner = i2c_new_client_device(&i2c_bus2->i2c_adap, &info);
+ if (!i2c_client_has_driver(client_tuner)) {
module_put(client_demod->dev.driver->owner);
i2c_unregister_device(client_demod);
port->i2c_client_demod = NULL;
@@ -2340,8 +2334,8 @@ static int dvb_register(struct cx23885_tsport *port)
info.addr = 0x64;
info.platform_data = &si2168_config;
request_module("%s", info.type);
- client_demod = i2c_new_device(&dev->i2c_bus[0].i2c_adap, &info);
- if (!client_demod || !client_demod->dev.driver)
+ client_demod = i2c_new_client_device(&dev->i2c_bus[0].i2c_adap, &info);
+ if (!i2c_client_has_driver(client_demod))
goto frontend_detach;
if (!try_module_get(client_demod->dev.driver->owner)) {
i2c_unregister_device(client_demod);
@@ -2358,8 +2352,8 @@ static int dvb_register(struct cx23885_tsport *port)
info.addr = 0x60;
info.platform_data = &si2157_config;
request_module("%s", info.type);
- client_tuner = i2c_new_device(&dev->i2c_bus[1].i2c_adap, &info);
- if (!client_tuner || !client_tuner->dev.driver) {
+ client_tuner = i2c_new_client_device(&dev->i2c_bus[1].i2c_adap, &info);
+ if (!i2c_client_has_driver(client_tuner)) {
module_put(client_demod->dev.driver->owner);
i2c_unregister_device(client_demod);
port->i2c_client_demod = NULL;
@@ -2387,8 +2381,8 @@ static int dvb_register(struct cx23885_tsport *port)
info.addr = 0x66;
info.platform_data = &si2168_config;
request_module("%s", info.type);
- client_demod = i2c_new_device(&dev->i2c_bus[0].i2c_adap, &info);
- if (!client_demod || !client_demod->dev.driver)
+ client_demod = i2c_new_client_device(&dev->i2c_bus[0].i2c_adap, &info);
+ if (!i2c_client_has_driver(client_demod))
goto frontend_detach;
if (!try_module_get(client_demod->dev.driver->owner)) {
i2c_unregister_device(client_demod);
@@ -2405,8 +2399,8 @@ static int dvb_register(struct cx23885_tsport *port)
info.addr = 0x62;
info.platform_data = &si2157_config;
request_module("%s", info.type);
- client_tuner = i2c_new_device(&dev->i2c_bus[1].i2c_adap, &info);
- if (!client_tuner || !client_tuner->dev.driver) {
+ client_tuner = i2c_new_client_device(&dev->i2c_bus[1].i2c_adap, &info);
+ if (!i2c_client_has_driver(client_tuner)) {
module_put(client_demod->dev.driver->owner);
i2c_unregister_device(client_demod);
port->i2c_client_demod = NULL;
@@ -2447,8 +2441,8 @@ static int dvb_register(struct cx23885_tsport *port)
info.addr = 0x60;
info.platform_data = &si2157_config;
request_module("%s", info.type);
- client_tuner = i2c_new_device(&dev->i2c_bus[1].i2c_adap, &info);
- if (!client_tuner || !client_tuner->dev.driver) {
+ client_tuner = i2c_new_client_device(&dev->i2c_bus[1].i2c_adap, &info);
+ if (!i2c_client_has_driver(client_tuner)) {
module_put(client_demod->dev.driver->owner);
i2c_unregister_device(client_demod);
port->i2c_client_demod = NULL;
@@ -2483,8 +2477,8 @@ static int dvb_register(struct cx23885_tsport *port)
info.addr = 0x62;
info.platform_data = &si2157_config;
request_module("%s", info.type);
- client_tuner = i2c_new_device(&dev->i2c_bus[1].i2c_adap, &info);
- if (!client_tuner || !client_tuner->dev.driver) {
+ client_tuner = i2c_new_client_device(&dev->i2c_bus[1].i2c_adap, &info);
+ if (!i2c_client_has_driver(client_tuner)) {
module_put(client_demod->dev.driver->owner);
i2c_unregister_device(client_demod);
port->i2c_client_demod = NULL;
@@ -2523,8 +2517,8 @@ static int dvb_register(struct cx23885_tsport *port)
info.addr = 0x60;
info.platform_data = &si2157_config;
request_module("%s", info.type);
- client_tuner = i2c_new_device(&dev->i2c_bus[1].i2c_adap, &info);
- if (!client_tuner || !client_tuner->dev.driver)
+ client_tuner = i2c_new_client_device(&dev->i2c_bus[1].i2c_adap, &info);
+ if (!i2c_client_has_driver(client_tuner))
goto frontend_detach;
if (!try_module_get(client_tuner->dev.driver->owner)) {
diff --git a/drivers/media/pci/cx23885/cx23885-i2c.c b/drivers/media/pci/cx23885/cx23885-i2c.c
index 4f327ee9659e..f51fad33dc04 100644
--- a/drivers/media/pci/cx23885/cx23885-i2c.c
+++ b/drivers/media/pci/cx23885/cx23885-i2c.c
@@ -337,8 +337,8 @@ int cx23885_i2c_register(struct cx23885_i2c *bus)
strscpy(info.type, "ir_video", I2C_NAME_SIZE);
/* Use quick read command for probe, some IR chips don't
* support writes */
- i2c_new_probed_device(&bus->i2c_adap, &info, addr_list,
- i2c_probe_func_quick_read);
+ i2c_new_scanned_device(&bus->i2c_adap, &info, addr_list,
+ i2c_probe_func_quick_read);
}
return bus->i2c_rc;
diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
index 8098b15493de..7fc408ee4934 100644
--- a/drivers/media/pci/cx23885/cx23885-video.c
+++ b/drivers/media/pci/cx23885/cx23885-video.c
@@ -257,7 +257,8 @@ static int cx23885_video_mux(struct cx23885_dev *dev, unsigned int input)
(dev->board == CX23885_BOARD_MYGICA_X8507) ||
(dev->board == CX23885_BOARD_AVERMEDIA_HC81R) ||
(dev->board == CX23885_BOARD_VIEWCAST_260E) ||
- (dev->board == CX23885_BOARD_VIEWCAST_460E)) {
+ (dev->board == CX23885_BOARD_VIEWCAST_460E) ||
+ (dev->board == CX23885_BOARD_AVERMEDIA_CE310B)) {
/* Configure audio routing */
v4l2_subdev_call(dev->sd_cx25840, audio, s_routing,
INPUT(input)->amux, 0, 0);
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
index a95a2e4c6a0d..c472498e57c4 100644
--- a/drivers/media/pci/cx23885/cx23885.h
+++ b/drivers/media/pci/cx23885/cx23885.h
@@ -101,6 +101,7 @@
#define CX23885_BOARD_HAUPPAUGE_STARBURST2 59
#define CX23885_BOARD_HAUPPAUGE_QUADHD_DVB_885 60
#define CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC_885 61
+#define CX23885_BOARD_AVERMEDIA_CE310B 62
#define GPIO_0 0x00000001
#define GPIO_1 0x00000002
diff --git a/drivers/media/pci/cx25821/cx25821-alsa.c b/drivers/media/pci/cx25821/cx25821-alsa.c
index c2f2d7c782c7..301616426d8a 100644
--- a/drivers/media/pci/cx25821/cx25821-alsa.c
+++ b/drivers/media/pci/cx25821/cx25821-alsa.c
@@ -639,7 +639,6 @@ static struct page *snd_cx25821_page(struct snd_pcm_substream *substream,
static const struct snd_pcm_ops snd_cx25821_pcm_ops = {
.open = snd_cx25821_pcm_open,
.close = snd_cx25821_close,
- .ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_cx25821_hw_params,
.hw_free = snd_cx25821_hw_free,
.prepare = snd_cx25821_prepare,
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
index e1e71ae293ed..7d7aceecc985 100644
--- a/drivers/media/pci/cx88/cx88-alsa.c
+++ b/drivers/media/pci/cx88/cx88-alsa.c
@@ -585,7 +585,6 @@ static struct page *snd_cx88_page(struct snd_pcm_substream *substream,
static const struct snd_pcm_ops snd_cx88_pcm_ops = {
.open = snd_cx88_pcm_open,
.close = snd_cx88_close,
- .ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_cx88_hw_params,
.hw_free = snd_cx88_hw_free,
.prepare = snd_cx88_prepare,
diff --git a/drivers/media/pci/cx88/cx88-input.c b/drivers/media/pci/cx88/cx88-input.c
index 589f52d961eb..c7c2acd55266 100644
--- a/drivers/media/pci/cx88/cx88-input.c
+++ b/drivers/media/pci/cx88/cx88-input.c
@@ -613,7 +613,7 @@ void cx88_i2c_init_ir(struct cx88_core *core)
}
/*
- * We can't call i2c_new_probed_device() because it uses
+ * We can't call i2c_new_scanned_device() because it uses
* quick writes for probing and at least some RC receiver
* devices only reply to reads.
* Also, Hauppauge XVR needs to be specified, as address 0x71
diff --git a/drivers/media/pci/ivtv/Kconfig b/drivers/media/pci/ivtv/Kconfig
index 36c089103cf9..c729e54692c4 100644
--- a/drivers/media/pci/ivtv/Kconfig
+++ b/drivers/media/pci/ivtv/Kconfig
@@ -24,7 +24,7 @@ config VIDEO_IVTV
PCI personal video recorder devices.
This is used in devices such as the Hauppauge PVR-150/250/350/500
- cards. There is a driver homepage at <http://www.ivtvdriver.org>.
+ cards.
To compile this driver as a module, choose M here: the
module will be called ivtv.
@@ -67,8 +67,7 @@ config VIDEO_FB_IVTV
This is a framebuffer driver for the Conexant cx23415 MPEG
encoder/decoder.
- This is used in the Hauppauge PVR-350 card. There is a driver
- homepage at <http://www.ivtvdriver.org>.
+ This is used in the Hauppauge PVR-350 card.
To compile this driver as a module, choose M here: the
module will be called ivtvfb.
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
index 9e6019a159f4..8f346d7da9c8 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
+++ b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
@@ -16,8 +16,6 @@
#include "ivtv-alsa.h"
#include "ivtv-alsa-pcm.h"
-#include <linux/vmalloc.h>
-
#include <sound/core.h>
#include <sound/pcm.h>
@@ -206,67 +204,6 @@ static int snd_ivtv_pcm_capture_close(struct snd_pcm_substream *substream)
return 0;
}
-static int snd_ivtv_pcm_ioctl(struct snd_pcm_substream *substream,
- unsigned int cmd, void *arg)
-{
- struct snd_ivtv_card *itvsc = snd_pcm_substream_chip(substream);
- int ret;
-
- snd_ivtv_lock(itvsc);
- ret = snd_pcm_lib_ioctl(substream, cmd, arg);
- snd_ivtv_unlock(itvsc);
- return ret;
-}
-
-
-static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
- size_t size)
-{
- struct snd_pcm_runtime *runtime = subs->runtime;
-
- dprintk("Allocating vbuffer\n");
- if (runtime->dma_area) {
- if (runtime->dma_bytes > size)
- return 0;
-
- vfree(runtime->dma_area);
- }
- runtime->dma_area = vmalloc(size);
- if (!runtime->dma_area)
- return -ENOMEM;
-
- runtime->dma_bytes = size;
-
- return 0;
-}
-
-static int snd_ivtv_pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- dprintk("%s called\n", __func__);
-
- return snd_pcm_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(params));
-}
-
-static int snd_ivtv_pcm_hw_free(struct snd_pcm_substream *substream)
-{
- struct snd_ivtv_card *itvsc = snd_pcm_substream_chip(substream);
- unsigned long flags;
- unsigned char *dma_area = NULL;
-
- spin_lock_irqsave(&itvsc->slock, flags);
- if (substream->runtime->dma_area) {
- dprintk("freeing pcm capture region\n");
- dma_area = substream->runtime->dma_area;
- substream->runtime->dma_area = NULL;
- }
- spin_unlock_irqrestore(&itvsc->slock, flags);
- vfree(dma_area);
-
- return 0;
-}
-
static int snd_ivtv_pcm_prepare(struct snd_pcm_substream *substream)
{
struct snd_ivtv_card *itvsc = snd_pcm_substream_chip(substream);
@@ -296,24 +233,12 @@ snd_pcm_uframes_t snd_ivtv_pcm_pointer(struct snd_pcm_substream *substream)
return hwptr_done;
}
-static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
- unsigned long offset)
-{
- void *pageptr = subs->runtime->dma_area + offset;
-
- return vmalloc_to_page(pageptr);
-}
-
static const struct snd_pcm_ops snd_ivtv_pcm_capture_ops = {
.open = snd_ivtv_pcm_capture_open,
.close = snd_ivtv_pcm_capture_close,
- .ioctl = snd_ivtv_pcm_ioctl,
- .hw_params = snd_ivtv_pcm_hw_params,
- .hw_free = snd_ivtv_pcm_hw_free,
.prepare = snd_ivtv_pcm_prepare,
.trigger = snd_ivtv_pcm_trigger,
.pointer = snd_ivtv_pcm_pointer,
- .page = snd_pcm_get_vmalloc_page,
};
int snd_ivtv_pcm_create(struct snd_ivtv_card *itvsc)
@@ -339,6 +264,7 @@ int snd_ivtv_pcm_create(struct snd_ivtv_card *itvsc)
snd_pcm_set_ops(sp, SNDRV_PCM_STREAM_CAPTURE,
&snd_ivtv_pcm_capture_ops);
+ snd_pcm_set_managed_buffer_all(sp, SNDRV_DMA_TYPE_VMALLOC, NULL, 0, 0);
sp->info_flags = 0;
sp->private_data = itvsc;
strscpy(sp->name, itv->card_name, sizeof(sp->name));
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index 3f3f40ea890b..b1dde1be6f5e 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -23,7 +23,6 @@
* Driver for the Conexant CX23415/CX23416 chip.
* Author: Kevin Thayer (nufan_wfk at yahoo.com)
* License: GPL
- * http://www.ivtvdriver.org
*
* -----
* MPG600/MPG160 support by T.Adachi <tadachi@tadachi-net.com>
@@ -723,7 +722,7 @@ done:
IVTV_ERR(" %s based\n", chipname);
IVTV_ERR("Defaulting to %s card\n", itv->card->name);
IVTV_ERR("Please mail the vendor/device and subsystem vendor/device IDs and what kind of\n");
- IVTV_ERR("card you have to the ivtv-devel mailinglist (www.ivtvdriver.org)\n");
+ IVTV_ERR("card you have to the linux-media mailinglist (www.linuxtv.org)\n");
IVTV_ERR("Prefix your subject line with [UNKNOWN IVTV CARD].\n");
}
itv->v4l2_cap = itv->card->v4l2_capabilities;
@@ -1042,7 +1041,7 @@ static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
/* map io memory */
IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
(u64)itv->base_addr + IVTV_ENCODER_OFFSET, IVTV_ENCODER_SIZE);
- itv->enc_mem = ioremap_nocache(itv->base_addr + IVTV_ENCODER_OFFSET,
+ itv->enc_mem = ioremap(itv->base_addr + IVTV_ENCODER_OFFSET,
IVTV_ENCODER_SIZE);
if (!itv->enc_mem) {
IVTV_ERR("ioremap failed. Can't get a window into CX23415/6 encoder memory\n");
@@ -1056,7 +1055,7 @@ static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
if (itv->has_cx23415) {
IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
(u64)itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE);
- itv->dec_mem = ioremap_nocache(itv->base_addr + IVTV_DECODER_OFFSET,
+ itv->dec_mem = ioremap(itv->base_addr + IVTV_DECODER_OFFSET,
IVTV_DECODER_SIZE);
if (!itv->dec_mem) {
IVTV_ERR("ioremap failed. Can't get a window into CX23415 decoder memory\n");
@@ -1075,7 +1074,7 @@ static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
(u64)itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
itv->reg_mem =
- ioremap_nocache(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
+ ioremap(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
if (!itv->reg_mem) {
IVTV_ERR("ioremap failed. Can't get a window into CX23415/6 register space\n");
IVTV_ERR("Each capture card with a CX23415/6 needs 64 kB of vmalloc address space for this window\n");
diff --git a/drivers/media/pci/ivtv/ivtv-driver.h b/drivers/media/pci/ivtv/ivtv-driver.h
index cafba6b1055d..e5efe525ad7b 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.h
+++ b/drivers/media/pci/ivtv/ivtv-driver.h
@@ -28,7 +28,6 @@
* Driver for the cx23415/6 chip.
* Author: Kevin Thayer (nufan_wfk at yahoo.com)
* License: GPL
- * http://www.ivtvdriver.org
*
* -----
* MPG600/MPG160 support by T.Adachi <tadachi@tadachi-net.com>
diff --git a/drivers/media/pci/ivtv/ivtv-i2c.c b/drivers/media/pci/ivtv/ivtv-i2c.c
index 0772d757a389..982045c4eea8 100644
--- a/drivers/media/pci/ivtv/ivtv-i2c.c
+++ b/drivers/media/pci/ivtv/ivtv-i2c.c
@@ -208,12 +208,12 @@ static int ivtv_i2c_new_ir(struct ivtv *itv, u32 hw, const char *type, u8 addr)
info.platform_data = init_data;
strscpy(info.type, type, I2C_NAME_SIZE);
- return i2c_new_probed_device(adap, &info, addr_list, NULL) == NULL ?
+ return IS_ERR(i2c_new_scanned_device(adap, &info, addr_list, NULL)) ?
-1 : 0;
}
/* Instantiate the IR receiver device using probing -- undesirable */
-struct i2c_client *ivtv_i2c_new_ir_legacy(struct ivtv *itv)
+void ivtv_i2c_new_ir_legacy(struct ivtv *itv)
{
struct i2c_board_info info;
/*
@@ -235,7 +235,7 @@ struct i2c_client *ivtv_i2c_new_ir_legacy(struct ivtv *itv)
memset(&info, 0, sizeof(struct i2c_board_info));
strscpy(info.type, "ir_video", I2C_NAME_SIZE);
- return i2c_new_probed_device(&itv->i2c_adap, &info, addr_list, NULL);
+ i2c_new_scanned_device(&itv->i2c_adap, &info, addr_list, NULL);
}
int ivtv_i2c_register(struct ivtv *itv, unsigned idx)
diff --git a/drivers/media/pci/ivtv/ivtv-i2c.h b/drivers/media/pci/ivtv/ivtv-i2c.h
index 462f73449a6e..2d9cdaa682c5 100644
--- a/drivers/media/pci/ivtv/ivtv-i2c.h
+++ b/drivers/media/pci/ivtv/ivtv-i2c.h
@@ -9,7 +9,7 @@
#ifndef IVTV_I2C_H
#define IVTV_I2C_H
-struct i2c_client *ivtv_i2c_new_ir_legacy(struct ivtv *itv);
+void ivtv_i2c_new_ir_legacy(struct ivtv *itv);
int ivtv_i2c_register(struct ivtv *itv, unsigned idx);
struct v4l2_subdev *ivtv_find_hw(struct ivtv *itv, u32 hw);
diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c
index f2922b554b09..0c2859844081 100644
--- a/drivers/media/pci/ivtv/ivtvfb.c
+++ b/drivers/media/pci/ivtv/ivtvfb.c
@@ -37,7 +37,7 @@
#include <linux/ivtvfb.h>
#ifdef CONFIG_X86_64
-#include <asm/pat.h>
+#include <asm/memtype.h>
#endif
/* card parameters */
diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c
index 0e61c81356ef..3a4c29bc0ba5 100644
--- a/drivers/media/pci/meye/meye.c
+++ b/drivers/media/pci/meye/meye.c
@@ -1266,7 +1266,7 @@ static int vidioc_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf)
buf->flags |= V4L2_BUF_FLAG_DONE;
buf->field = V4L2_FIELD_NONE;
- buf->timestamp = ns_to_timeval(meye.grab_buffer[index].ts);
+ v4l2_buffer_set_timestamp(buf, meye.grab_buffer[index].ts);
buf->sequence = meye.grab_buffer[index].sequence;
buf->memory = V4L2_MEMORY_MMAP;
buf->m.offset = index * gbufsize;
@@ -1332,7 +1332,7 @@ static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
buf->bytesused = meye.grab_buffer[reqnr].size;
buf->flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
buf->field = V4L2_FIELD_NONE;
- buf->timestamp = ns_to_timeval(meye.grab_buffer[reqnr].ts);
+ v4l2_buffer_set_timestamp(buf, meye.grab_buffer[reqnr].ts);
buf->sequence = meye.grab_buffer[reqnr].sequence;
buf->memory = V4L2_MEMORY_MMAP;
buf->m.offset = reqnr * gbufsize;
diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c
index 0385127dd7ff..544ca57eee75 100644
--- a/drivers/media/pci/saa7134/saa7134-alsa.c
+++ b/drivers/media/pci/saa7134/saa7134-alsa.c
@@ -865,7 +865,6 @@ static struct page *snd_card_saa7134_page(struct snd_pcm_substream *substream,
static const struct snd_pcm_ops snd_card_saa7134_capture_ops = {
.open = snd_card_saa7134_capture_open,
.close = snd_card_saa7134_capture_close,
- .ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_card_saa7134_hw_params,
.hw_free = snd_card_saa7134_hw_free,
.prepare = snd_card_saa7134_capture_prepare,
diff --git a/drivers/media/pci/saa7164/saa7164-dvb.c b/drivers/media/pci/saa7164/saa7164-dvb.c
index 05ab4734ea67..bf8c2bb8852e 100644
--- a/drivers/media/pci/saa7164/saa7164-dvb.c
+++ b/drivers/media/pci/saa7164/saa7164-dvb.c
@@ -116,8 +116,8 @@ static int si2157_attach(struct saa7164_port *port, struct i2c_adapter *adapter,
request_module(bi.type);
- tuner = i2c_new_device(adapter, &bi);
- if (tuner == NULL || tuner->dev.driver == NULL)
+ tuner = i2c_new_client_device(adapter, &bi);
+ if (!i2c_client_has_driver(tuner))
return -ENODEV;
if (!try_module_get(tuner->dev.driver->owner)) {
@@ -637,9 +637,8 @@ int saa7164_dvb_register(struct saa7164_port *port)
info.addr = 0xc8 >> 1;
info.platform_data = &si2168_config;
request_module(info.type);
- client_demod = i2c_new_device(&dev->i2c_bus[2].i2c_adap,
- &info);
- if (!client_demod || !client_demod->dev.driver)
+ client_demod = i2c_new_client_device(&dev->i2c_bus[2].i2c_adap, &info);
+ if (!i2c_client_has_driver(client_demod))
goto frontend_detach;
if (!try_module_get(client_demod->dev.driver->owner)) {
@@ -657,9 +656,8 @@ int saa7164_dvb_register(struct saa7164_port *port)
info.addr = 0xc0 >> 1;
info.platform_data = &si2157_config;
request_module(info.type);
- client_tuner = i2c_new_device(&dev->i2c_bus[0].i2c_adap,
- &info);
- if (!client_tuner || !client_tuner->dev.driver) {
+ client_tuner = i2c_new_client_device(&dev->i2c_bus[0].i2c_adap, &info);
+ if (!i2c_client_has_driver(client_tuner)) {
module_put(client_demod->dev.driver->owner);
i2c_unregister_device(client_demod);
goto frontend_detach;
@@ -682,9 +680,8 @@ int saa7164_dvb_register(struct saa7164_port *port)
info.addr = 0xcc >> 1;
info.platform_data = &si2168_config;
request_module(info.type);
- client_demod = i2c_new_device(&dev->i2c_bus[2].i2c_adap,
- &info);
- if (!client_demod || !client_demod->dev.driver)
+ client_demod = i2c_new_client_device(&dev->i2c_bus[2].i2c_adap, &info);
+ if (!i2c_client_has_driver(client_demod))
goto frontend_detach;
if (!try_module_get(client_demod->dev.driver->owner)) {
@@ -702,9 +699,8 @@ int saa7164_dvb_register(struct saa7164_port *port)
info.addr = 0xc0 >> 1;
info.platform_data = &si2157_config;
request_module(info.type);
- client_tuner = i2c_new_device(&dev->i2c_bus[1].i2c_adap,
- &info);
- if (!client_tuner || !client_tuner->dev.driver) {
+ client_tuner = i2c_new_client_device(&dev->i2c_bus[1].i2c_adap, &info);
+ if (!i2c_client_has_driver(client_tuner)) {
module_put(client_demod->dev.driver->owner);
i2c_unregister_device(client_demod);
goto frontend_detach;
diff --git a/drivers/media/pci/smipcie/smipcie-main.c b/drivers/media/pci/smipcie/smipcie-main.c
index 1fb78462e081..9ca0fc3e6f80 100644
--- a/drivers/media/pci/smipcie/smipcie-main.c
+++ b/drivers/media/pci/smipcie/smipcie-main.c
@@ -484,8 +484,8 @@ static struct i2c_client *smi_add_i2c_client(struct i2c_adapter *adapter,
struct i2c_client *client;
request_module(info->type);
- client = i2c_new_device(adapter, info);
- if (client == NULL || client->dev.driver == NULL)
+ client = i2c_new_client_device(adapter, info);
+ if (!i2c_client_has_driver(client))
goto err_add_i2c_client;
if (!try_module_get(client->dev.driver->owner)) {
diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
index eaa57d835ea8..d6d16e8fd997 100644
--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
+++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
@@ -97,17 +97,6 @@ void solo_g723_isr(struct solo_dev *solo_dev)
}
}
-static int snd_solo_hw_params(struct snd_pcm_substream *ss,
- struct snd_pcm_hw_params *hw_params)
-{
- return snd_pcm_lib_malloc_pages(ss, params_buffer_bytes(hw_params));
-}
-
-static int snd_solo_hw_free(struct snd_pcm_substream *ss)
-{
- return snd_pcm_lib_free_pages(ss);
-}
-
static const struct snd_pcm_hardware snd_solo_pcm_hw = {
.info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
@@ -270,9 +259,6 @@ static int snd_solo_pcm_copy_kernel(struct snd_pcm_substream *ss, int channel,
static const struct snd_pcm_ops snd_solo_pcm_ops = {
.open = snd_solo_pcm_open,
.close = snd_solo_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_solo_hw_params,
- .hw_free = snd_solo_hw_free,
.prepare = snd_solo_pcm_prepare,
.trigger = snd_solo_pcm_trigger,
.pointer = snd_solo_pcm_pointer,
@@ -351,11 +337,11 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
ss; ss = ss->next, i++)
sprintf(ss->name, "Camera #%d Audio", i);
- snd_pcm_lib_preallocate_pages_for_all(pcm,
- SNDRV_DMA_TYPE_CONTINUOUS,
- NULL,
- G723_PERIOD_BYTES * PERIODS,
- G723_PERIOD_BYTES * PERIODS);
+ snd_pcm_set_managed_buffer_all(pcm,
+ SNDRV_DMA_TYPE_CONTINUOUS,
+ NULL,
+ G723_PERIOD_BYTES * PERIODS,
+ G723_PERIOD_BYTES * PERIODS);
solo_dev->snd_pcm = pcm;
diff --git a/drivers/media/pci/tw686x/tw686x-audio.c b/drivers/media/pci/tw686x/tw686x-audio.c
index 7786e51d19ae..54144e23a487 100644
--- a/drivers/media/pci/tw686x/tw686x-audio.c
+++ b/drivers/media/pci/tw686x/tw686x-audio.c
@@ -78,17 +78,6 @@ void tw686x_audio_irq(struct tw686x_dev *dev, unsigned long requests,
}
}
-static int tw686x_pcm_hw_params(struct snd_pcm_substream *ss,
- struct snd_pcm_hw_params *hw_params)
-{
- return snd_pcm_lib_malloc_pages(ss, params_buffer_bytes(hw_params));
-}
-
-static int tw686x_pcm_hw_free(struct snd_pcm_substream *ss)
-{
- return snd_pcm_lib_free_pages(ss);
-}
-
/*
* Audio parameters are global and shared among all
* capture channels. The driver prevents changes to
@@ -269,9 +258,6 @@ static snd_pcm_uframes_t tw686x_pcm_pointer(struct snd_pcm_substream *ss)
static const struct snd_pcm_ops tw686x_pcm_ops = {
.open = tw686x_pcm_open,
.close = tw686x_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = tw686x_pcm_hw_params,
- .hw_free = tw686x_pcm_hw_free,
.prepare = tw686x_pcm_prepare,
.trigger = tw686x_pcm_trigger,
.pointer = tw686x_pcm_pointer,
@@ -298,7 +284,7 @@ static int tw686x_snd_pcm_init(struct tw686x_dev *dev)
ss; ss = ss->next, i++)
snprintf(ss->name, sizeof(ss->name), "vch%u audio", i);
- snd_pcm_lib_preallocate_pages_for_all(pcm,
+ snd_pcm_set_managed_buffer_all(pcm,
SNDRV_DMA_TYPE_DEV,
&dev->pci_dev->dev,
TW686X_AUDIO_PAGE_MAX * AUDIO_DMA_SIZE_MAX,
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index e84f35d3a68e..f65e98d3adf2 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -151,7 +151,7 @@ source "drivers/media/platform/sunxi/Kconfig"
config VIDEO_TI_CAL
tristate "TI CAL (Camera Adaptation Layer) driver"
depends on VIDEO_DEV && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on SOC_DRA7XX || COMPILE_TEST
+ depends on SOC_DRA7XX || ARCH_K3 || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
select V4L2_FWNODE
help
@@ -200,7 +200,7 @@ config VIDEO_IMX_PXP
config VIDEO_MEDIATEK_JPEG
tristate "Mediatek JPEG Codec driver"
- depends on MTK_IOMMU_V1 || COMPILE_TEST
+ depends on MTK_IOMMU_V1 || MTK_IOMMU || COMPILE_TEST
depends on VIDEO_DEV && VIDEO_V4L2
depends on ARCH_MEDIATEK || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
diff --git a/drivers/media/platform/atmel/atmel-isc-base.c b/drivers/media/platform/atmel/atmel-isc-base.c
index c1c776b348a9..d7669a03e98e 100644
--- a/drivers/media/platform/atmel/atmel-isc-base.c
+++ b/drivers/media/platform/atmel/atmel-isc-base.c
@@ -73,6 +73,9 @@ const struct isc_format controller_formats[] = {
{
.fourcc = V4L2_PIX_FMT_GREY,
},
+ {
+ .fourcc = V4L2_PIX_FMT_Y10,
+ },
};
/* This is a list of formats that the ISC can receive as *input* */
@@ -164,6 +167,12 @@ struct isc_format formats_list[] = {
.mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE,
.pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
},
+ {
+ .fourcc = V4L2_PIX_FMT_Y10,
+ .mbus_code = MEDIA_BUS_FMT_Y10_1X10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ },
+
};
/* Gamma table with gamma 1/2.2 */
@@ -211,6 +220,10 @@ const u32 isc_gamma_table[GAMMA_MAX + 1][GAMMA_ENTRIES] = {
#define ISC_IS_FORMAT_RAW(mbus_code) \
(((mbus_code) & 0xf000) == 0x3000)
+#define ISC_IS_FORMAT_GREY(mbus_code) \
+ (((mbus_code) == MEDIA_BUS_FMT_Y10_1X10) | \
+ (((mbus_code) == MEDIA_BUS_FMT_Y8_1X8)))
+
static inline void isc_update_awb_ctrls(struct isc_device *isc)
{
struct isc_ctrls *ctrls = &isc->ctrls;
@@ -1003,6 +1016,7 @@ static int isc_try_validate_formats(struct isc_device *isc)
rgb = true;
break;
case V4L2_PIX_FMT_GREY:
+ case V4L2_PIX_FMT_Y10:
ret = 0;
grey = true;
break;
@@ -1010,34 +1024,29 @@ static int isc_try_validate_formats(struct isc_device *isc)
/* any other different formats are not supported */
ret = -EINVAL;
}
-
- /* we cannot output RAW/Grey if we do not receive RAW */
- if ((bayer || grey) &&
- !ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code))
- return -EINVAL;
-
v4l2_dbg(1, debug, &isc->v4l2_dev,
"Format validation, requested rgb=%u, yuv=%u, grey=%u, bayer=%u\n",
rgb, yuv, grey, bayer);
+ /* we cannot output RAW if we do not receive RAW */
+ if ((bayer) && !ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code))
+ return -EINVAL;
+
+ /* we cannot output GREY if we do not receive RAW/GREY */
+ if (grey && !ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code) &&
+ !ISC_IS_FORMAT_GREY(isc->try_config.sd_format->mbus_code))
+ return -EINVAL;
+
return ret;
}
/*
* Configures the RLP and DMA modules, depending on the output format
* configured for the ISC.
- * If direct_dump == true, just dump raw data 8 bits.
+ * If direct_dump == true, just dump raw data 8/16 bits depending on format.
*/
static int isc_try_configure_rlp_dma(struct isc_device *isc, bool direct_dump)
{
- if (direct_dump) {
- isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8;
- isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED8;
- isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
- isc->try_config.bpp = 16;
- return 0;
- }
-
switch (isc->try_config.fourcc) {
case V4L2_PIX_FMT_SBGGR8:
case V4L2_PIX_FMT_SGBRG8:
@@ -1115,9 +1124,23 @@ static int isc_try_configure_rlp_dma(struct isc_device *isc, bool direct_dump)
isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
isc->try_config.bpp = 8;
break;
+ case V4L2_PIX_FMT_Y10:
+ isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_DATY10;
+ isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED16;
+ isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
+ isc->try_config.bpp = 16;
+ break;
default:
return -EINVAL;
}
+
+ if (direct_dump) {
+ isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8;
+ isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED8;
+ isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
+ return 0;
+ }
+
return 0;
}
@@ -1187,13 +1210,44 @@ static int isc_try_configure_pipeline(struct isc_device *isc)
return 0;
}
+static void isc_try_fse(struct isc_device *isc,
+ struct v4l2_subdev_pad_config *pad_cfg)
+{
+ int ret;
+ struct v4l2_subdev_frame_size_enum fse = {};
+
+ /*
+ * If we do not know yet which format the subdev is using, we cannot
+ * do anything.
+ */
+ if (!isc->try_config.sd_format)
+ return;
+
+ fse.code = isc->try_config.sd_format->mbus_code;
+ fse.which = V4L2_SUBDEV_FORMAT_TRY;
+
+ ret = v4l2_subdev_call(isc->current_subdev->sd, pad, enum_frame_size,
+ pad_cfg, &fse);
+ /*
+ * Attempt to obtain format size from subdev. If not available,
+ * just use the maximum ISC can receive.
+ */
+ if (ret) {
+ pad_cfg->try_crop.width = ISC_MAX_SUPPORT_WIDTH;
+ pad_cfg->try_crop.height = ISC_MAX_SUPPORT_HEIGHT;
+ } else {
+ pad_cfg->try_crop.width = fse.max_width;
+ pad_cfg->try_crop.height = fse.max_height;
+ }
+}
+
static int isc_try_fmt(struct isc_device *isc, struct v4l2_format *f,
u32 *code)
{
int i;
struct isc_format *sd_fmt = NULL, *direct_fmt = NULL;
struct v4l2_pix_format *pixfmt = &f->fmt.pix;
- struct v4l2_subdev_pad_config pad_cfg;
+ struct v4l2_subdev_pad_config pad_cfg = {};
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
@@ -1290,6 +1344,9 @@ static int isc_try_fmt(struct isc_device *isc, struct v4l2_format *f,
if (ret)
goto isc_try_fmt_err;
+ /* Obtain frame sizes if possible to have crop requirements ready */
+ isc_try_fse(isc, &pad_cfg);
+
v4l2_fill_mbus_format(&format.format, pixfmt, mbus_code);
ret = v4l2_subdev_call(isc->current_subdev->sd, pad, set_fmt,
&pad_cfg, &format);
@@ -1414,6 +1471,7 @@ static int isc_enum_framesizes(struct file *file, void *fh,
{
struct isc_device *isc = video_drvdata(file);
struct v4l2_subdev_frame_size_enum fse = {
+ .code = isc->config.sd_format->mbus_code,
.index = fsize->index,
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
@@ -1436,8 +1494,6 @@ static int isc_enum_framesizes(struct file *file, void *fh,
if (ret)
return ret;
- fse.code = isc->config.sd_format->mbus_code;
-
fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
fsize->discrete.width = fse.max_width;
fsize->discrete.height = fse.max_height;
@@ -1450,6 +1506,7 @@ static int isc_enum_frameintervals(struct file *file, void *fh,
{
struct isc_device *isc = video_drvdata(file);
struct v4l2_subdev_frame_interval_enum fie = {
+ .code = isc->config.sd_format->mbus_code,
.index = fival->index,
.width = fival->width,
.height = fival->height,
@@ -1474,7 +1531,6 @@ static int isc_enum_frameintervals(struct file *file, void *fh,
if (ret)
return ret;
- fie.code = isc->config.sd_format->mbus_code;
fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
fival->discrete = fie.interval;
diff --git a/drivers/media/platform/atmel/atmel-isi.c b/drivers/media/platform/atmel/atmel-isi.c
index 428f117caa59..963dfd6e750e 100644
--- a/drivers/media/platform/atmel/atmel-isi.c
+++ b/drivers/media/platform/atmel/atmel-isi.c
@@ -148,7 +148,8 @@ static void configure_geometry(struct atmel_isi *isi)
u32 fourcc = isi->current_fmt->fourcc;
isi->enable_preview_path = fourcc == V4L2_PIX_FMT_RGB565 ||
- fourcc == V4L2_PIX_FMT_RGB32;
+ fourcc == V4L2_PIX_FMT_RGB32 ||
+ fourcc == V4L2_PIX_FMT_Y16;
/* According to sensor's output format to set cfg2 */
cfg2 = isi->current_fmt->swap;
@@ -554,12 +555,36 @@ static const struct isi_format *find_format_by_fourcc(struct atmel_isi *isi,
return NULL;
}
+static void isi_try_fse(struct atmel_isi *isi, const struct isi_format *isi_fmt,
+ struct v4l2_subdev_pad_config *pad_cfg)
+{
+ int ret;
+ struct v4l2_subdev_frame_size_enum fse = {
+ .code = isi_fmt->mbus_code,
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ };
+
+ ret = v4l2_subdev_call(isi->entity.subdev, pad, enum_frame_size,
+ pad_cfg, &fse);
+ /*
+ * Attempt to obtain format size from subdev. If not available,
+ * just use the maximum ISI can receive.
+ */
+ if (ret) {
+ pad_cfg->try_crop.width = MAX_SUPPORT_WIDTH;
+ pad_cfg->try_crop.height = MAX_SUPPORT_HEIGHT;
+ } else {
+ pad_cfg->try_crop.width = fse.max_width;
+ pad_cfg->try_crop.height = fse.max_height;
+ }
+}
+
static int isi_try_fmt(struct atmel_isi *isi, struct v4l2_format *f,
const struct isi_format **current_fmt)
{
const struct isi_format *isi_fmt;
struct v4l2_pix_format *pixfmt = &f->fmt.pix;
- struct v4l2_subdev_pad_config pad_cfg;
+ struct v4l2_subdev_pad_config pad_cfg = {};
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
@@ -576,6 +601,9 @@ static int isi_try_fmt(struct atmel_isi *isi, struct v4l2_format *f,
pixfmt->height = clamp(pixfmt->height, 0U, MAX_SUPPORT_HEIGHT);
v4l2_fill_mbus_format(&format.format, pixfmt, isi_fmt->mbus_code);
+
+ isi_try_fse(isi, isi_fmt, &pad_cfg);
+
ret = v4l2_subdev_call(isi->entity.subdev, pad, set_fmt,
&pad_cfg, &format);
if (ret < 0)
@@ -990,6 +1018,16 @@ static const struct isi_format isi_formats[] = {
.mbus_code = MEDIA_BUS_FMT_VYUY8_2X8,
.bpp = 2,
.swap = ISI_CFG2_YCC_SWAP_MODE_1,
+ }, {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .mbus_code = MEDIA_BUS_FMT_Y10_1X10,
+ .bpp = 1,
+ .swap = ISI_CFG2_GS_MODE_2_PIXEL | ISI_CFG2_GRAYSCALE,
+ }, {
+ .fourcc = V4L2_PIX_FMT_Y16,
+ .mbus_code = MEDIA_BUS_FMT_Y10_1X10,
+ .bpp = 2,
+ .swap = ISI_CFG2_GS_MODE_2_PIXEL | ISI_CFG2_GRAYSCALE,
},
};
diff --git a/drivers/media/platform/atmel/atmel-isi.h b/drivers/media/platform/atmel/atmel-isi.h
index 47a9108dba55..7ad3895a2c87 100644
--- a/drivers/media/platform/atmel/atmel-isi.h
+++ b/drivers/media/platform/atmel/atmel-isi.h
@@ -62,6 +62,8 @@
#define ISI_CFG1_THMASK_BEATS_16 (2 << 13)
/* Bitfields in CFG2 */
+#define ISI_CFG2_GS_MODE_2_PIXEL (0 << 11)
+#define ISI_CFG2_GS_MODE_1_PIXEL (1 << 11)
#define ISI_CFG2_GRAYSCALE (1 << 13)
#define ISI_CFG2_COL_SPACE_YCbCr (0 << 15)
#define ISI_CFG2_COL_SPACE_RGB (1 << 15)
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index 00c7bed3dd57..3443396ba5f3 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -1629,6 +1629,9 @@ static void coda_finish_encode(struct coda_ctx *ctx)
struct coda_dev *dev = ctx->dev;
u32 wr_ptr, start_ptr;
+ if (ctx->aborting)
+ return;
+
/*
* Lock to make sure that an encoder stop command running in parallel
* will either already have marked src_buf as last, or it will wake up
@@ -2165,16 +2168,21 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
} else {
if (dev->devtype->product == CODA_960) {
/*
- * The CODA960 seems to have an internal list of
- * buffers with 64 entries that includes the
- * registered frame buffers as well as the rotator
- * buffer output.
- *
- * ROT_INDEX needs to be < 0x40, but >
- * ctx->num_internal_frames.
+ * It was previously assumed that the CODA960 has an
+ * internal list of 64 buffer entries that contains
+ * both the registered internal frame buffers as well
+ * as the rotator buffer output, and that the ROT_INDEX
+ * register must be set to a value between the last
+ * internal frame buffers' index and 64.
+ * At least on firmware version 3.1.1 it turns out that
+ * setting ROT_INDEX to any value >= 32 causes CODA
+ * hangups that it can not recover from with the SRC VPU
+ * reset.
+ * It does appear to work however, to just set it to a
+ * fixed value in the [ctx->num_internal_frames, 31]
+ * range, for example CODA_MAX_FRAMEBUFFERS.
*/
- coda_write(dev,
- CODA_MAX_FRAMEBUFFERS + dst_buf->vb2_buf.index,
+ coda_write(dev, CODA_MAX_FRAMEBUFFERS,
CODA9_CMD_DEC_PIC_ROT_INDEX);
reg_addr = CODA9_CMD_DEC_PIC_ROT_ADDR_Y;
@@ -2266,6 +2274,9 @@ static void coda_finish_decode(struct coda_ctx *ctx)
int err_vdoa = 0;
u32 val;
+ if (ctx->aborting)
+ return;
+
/* Update kfifo out pointer from coda bitstream read pointer */
coda_kfifo_sync_from_device(ctx);
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index 94fb4d2ecc43..acff10ad257a 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -155,6 +155,7 @@ static const struct coda_codec coda7_codecs[] = {
static const struct coda_codec coda9_codecs[] = {
CODA_CODEC(CODA9_MODE_ENCODE_H264, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_H264, 1920, 1088),
CODA_CODEC(CODA9_MODE_ENCODE_MP4, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_MPEG4, 1920, 1088),
+ CODA_CODEC(CODA9_MODE_ENCODE_MJPG, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_JPEG, 8192, 8192),
CODA_CODEC(CODA9_MODE_DECODE_H264, V4L2_PIX_FMT_H264, V4L2_PIX_FMT_YUV420, 1920, 1088),
CODA_CODEC(CODA9_MODE_DECODE_MP2, V4L2_PIX_FMT_MPEG2, V4L2_PIX_FMT_YUV420, 1920, 1088),
CODA_CODEC(CODA9_MODE_DECODE_MP4, V4L2_PIX_FMT_MPEG4, V4L2_PIX_FMT_YUV420, 1920, 1088),
@@ -235,6 +236,22 @@ static const struct coda_video_device coda_bit_jpeg_decoder = {
},
};
+static const struct coda_video_device coda9_jpeg_encoder = {
+ .name = "coda-jpeg-encoder",
+ .type = CODA_INST_ENCODER,
+ .ops = &coda9_jpeg_encode_ops,
+ .direct = true,
+ .src_formats = {
+ V4L2_PIX_FMT_NV12,
+ V4L2_PIX_FMT_YUV420,
+ V4L2_PIX_FMT_YVU420,
+ V4L2_PIX_FMT_YUV422P,
+ },
+ .dst_formats = {
+ V4L2_PIX_FMT_JPEG,
+ },
+};
+
static const struct coda_video_device *codadx6_video_devices[] = {
&coda_bit_encoder,
};
@@ -252,6 +269,7 @@ static const struct coda_video_device *coda7_video_devices[] = {
};
static const struct coda_video_device *coda9_video_devices[] = {
+ &coda9_jpeg_encoder,
&coda_bit_encoder,
&coda_bit_decoder,
};
@@ -721,7 +739,8 @@ static int coda_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f,
ctx->tiled_map_type = GDI_TILED_FRAME_MB_RASTER_MAP;
break;
case V4L2_PIX_FMT_NV12:
- if (!disable_tiling && ctx->dev->devtype->product == CODA_960) {
+ if (!disable_tiling && ctx->use_bit &&
+ ctx->dev->devtype->product == CODA_960) {
ctx->tiled_map_type = GDI_TILED_FRAME_MB_RASTER_MAP;
break;
}
@@ -1421,7 +1440,7 @@ static void coda_pic_run_work(struct work_struct *work)
if (ctx->ops->run_timeout)
ctx->ops->run_timeout(ctx);
- } else if (!ctx->aborting) {
+ } else {
ctx->ops->finish_run(ctx);
}
@@ -1787,7 +1806,7 @@ static void coda_buf_queue(struct vb2_buffer *vb)
coda_queue_source_change_event(ctx);
}
} else {
- if (ctx->inst_type == CODA_INST_ENCODER &&
+ if ((ctx->inst_type == CODA_INST_ENCODER || !ctx->use_bit) &&
vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
vbuf->sequence = ctx->qsequence++;
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
@@ -2984,10 +3003,8 @@ static int coda_probe(struct platform_device *pdev)
irq = platform_get_irq_byname(pdev, "bit");
if (irq < 0)
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "failed to get irq resource\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(&pdev->dev, irq, coda_irq_handler, 0,
dev_name(&pdev->dev), dev);
@@ -2996,6 +3013,22 @@ static int coda_probe(struct platform_device *pdev)
return ret;
}
+ /* JPEG IRQ */
+ if (dev->devtype->product == CODA_960) {
+ irq = platform_get_irq_byname(pdev, "jpeg");
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ coda9_jpeg_irq_handler,
+ IRQF_ONESHOT, CODA_NAME " jpeg",
+ dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request jpeg irq\n");
+ return ret;
+ }
+ }
+
dev->rstc = devm_reset_control_get_optional_exclusive(&pdev->dev,
NULL);
if (IS_ERR(dev->rstc)) {
diff --git a/drivers/media/platform/coda/coda-jpeg.c b/drivers/media/platform/coda/coda-jpeg.c
index bf61a3ecc580..92234fd1f4fd 100644
--- a/drivers/media/platform/coda/coda-jpeg.c
+++ b/drivers/media/platform/coda/coda-jpeg.c
@@ -5,46 +5,68 @@
* Copyright (C) 2014 Philipp Zabel, Pengutronix
*/
+#include <asm/unaligned.h>
+#include <linux/irqreturn.h>
#include <linux/kernel.h>
+#include <linux/ktime.h>
+#include <linux/slab.h>
#include <linux/swab.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
#include "coda.h"
#include "trace.h"
#define SOI_MARKER 0xffd8
+#define DRI_MARKER 0xffdd
+#define DQT_MARKER 0xffdb
+#define DHT_MARKER 0xffc4
+#define SOF_MARKER 0xffc0
#define EOI_MARKER 0xffd9
+enum {
+ CODA9_JPEG_FORMAT_420,
+ CODA9_JPEG_FORMAT_422,
+ CODA9_JPEG_FORMAT_224,
+ CODA9_JPEG_FORMAT_444,
+ CODA9_JPEG_FORMAT_400,
+};
+
+#define CODA9_JPEG_ENC_HUFF_DATA_SIZE (256 + 256 + 16 + 16)
+
/*
* Typical Huffman tables for 8-bit precision luminance and
* chrominance from JPEG ITU-T.81 (ISO/IEC 10918-1) Annex K.3
*/
-static const unsigned char luma_dc_bits[16] = {
+static const unsigned char luma_dc[16 + 12] = {
+ /* bits */
0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-};
-
-static const unsigned char luma_dc_value[12] = {
+ /* values */
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b,
};
-static const unsigned char chroma_dc_bits[16] = {
+static const unsigned char chroma_dc[16 + 12] = {
+ /* bits */
0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
-};
-
-static const unsigned char chroma_dc_value[12] = {
+ /* values */
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b,
};
-static const unsigned char luma_ac_bits[16] = {
+static const unsigned char luma_ac[16 + 162 + 2] = {
+ /* bits */
0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03,
0x05, 0x05, 0x04, 0x04, 0x00, 0x00, 0x01, 0x7d,
-};
-
-static const unsigned char luma_ac_value[162 + 2] = {
+ /* values */
0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12,
0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07,
0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08,
@@ -68,12 +90,11 @@ static const unsigned char luma_ac_value[162 + 2] = {
0xf9, 0xfa, /* padded to 32-bit */
};
-static const unsigned char chroma_ac_bits[16] = {
+static const unsigned char chroma_ac[16 + 162 + 2] = {
+ /* bits */
0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04,
0x07, 0x05, 0x04, 0x04, 0x00, 0x01, 0x02, 0x77,
-};
-
-static const unsigned char chroma_ac_value[162 + 2] = {
+ /* values */
0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21,
0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71,
0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91,
@@ -124,6 +145,38 @@ static unsigned char chroma_q[64] = {
0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
};
+static const unsigned char width_align[] = {
+ [CODA9_JPEG_FORMAT_420] = 16,
+ [CODA9_JPEG_FORMAT_422] = 16,
+ [CODA9_JPEG_FORMAT_224] = 8,
+ [CODA9_JPEG_FORMAT_444] = 8,
+ [CODA9_JPEG_FORMAT_400] = 8,
+};
+
+static const unsigned char height_align[] = {
+ [CODA9_JPEG_FORMAT_420] = 16,
+ [CODA9_JPEG_FORMAT_422] = 8,
+ [CODA9_JPEG_FORMAT_224] = 16,
+ [CODA9_JPEG_FORMAT_444] = 8,
+ [CODA9_JPEG_FORMAT_400] = 8,
+};
+
+static int coda9_jpeg_chroma_format(u32 pixfmt)
+{
+ switch (pixfmt) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_NV12:
+ return CODA9_JPEG_FORMAT_420;
+ case V4L2_PIX_FMT_YUV422P:
+ return CODA9_JPEG_FORMAT_422;
+ case V4L2_PIX_FMT_YUV444:
+ return CODA9_JPEG_FORMAT_444;
+ case V4L2_PIX_FMT_GREY:
+ return CODA9_JPEG_FORMAT_400;
+ }
+ return -EINVAL;
+}
+
struct coda_memcpy_desc {
int offset;
const void *src;
@@ -148,14 +201,10 @@ int coda_jpeg_write_tables(struct coda_ctx *ctx)
{
int i;
static const struct coda_memcpy_desc huff[8] = {
- { 0, luma_dc_bits, sizeof(luma_dc_bits) },
- { 16, luma_dc_value, sizeof(luma_dc_value) },
- { 32, luma_ac_bits, sizeof(luma_ac_bits) },
- { 48, luma_ac_value, sizeof(luma_ac_value) },
- { 216, chroma_dc_bits, sizeof(chroma_dc_bits) },
- { 232, chroma_dc_value, sizeof(chroma_dc_value) },
- { 248, chroma_ac_bits, sizeof(chroma_ac_bits) },
- { 264, chroma_ac_value, sizeof(chroma_ac_value) },
+ { 0, luma_dc, sizeof(luma_dc) },
+ { 32, luma_ac, sizeof(luma_ac) },
+ { 216, chroma_dc, sizeof(chroma_dc) },
+ { 248, chroma_ac, sizeof(chroma_ac) },
};
struct coda_memcpy_desc qmat[3] = {
{ 512, ctx->params.jpeg_qmat_tab[0], 64 },
@@ -198,6 +247,379 @@ bool coda_jpeg_check_buffer(struct coda_ctx *ctx, struct vb2_buffer *vb)
return false;
}
+static const int bus_req_num[] = {
+ [CODA9_JPEG_FORMAT_420] = 2,
+ [CODA9_JPEG_FORMAT_422] = 3,
+ [CODA9_JPEG_FORMAT_224] = 3,
+ [CODA9_JPEG_FORMAT_444] = 4,
+ [CODA9_JPEG_FORMAT_400] = 4,
+};
+
+#define MCU_INFO(mcu_block_num, comp_num, comp0_info, comp1_info, comp2_info) \
+ (((mcu_block_num) << CODA9_JPEG_MCU_BLOCK_NUM_OFFSET) | \
+ ((comp_num) << CODA9_JPEG_COMP_NUM_OFFSET) | \
+ ((comp0_info) << CODA9_JPEG_COMP0_INFO_OFFSET) | \
+ ((comp1_info) << CODA9_JPEG_COMP1_INFO_OFFSET) | \
+ ((comp2_info) << CODA9_JPEG_COMP2_INFO_OFFSET))
+
+static const u32 mcu_info[] = {
+ [CODA9_JPEG_FORMAT_420] = MCU_INFO(6, 3, 10, 5, 5),
+ [CODA9_JPEG_FORMAT_422] = MCU_INFO(4, 3, 9, 5, 5),
+ [CODA9_JPEG_FORMAT_224] = MCU_INFO(4, 3, 6, 5, 5),
+ [CODA9_JPEG_FORMAT_444] = MCU_INFO(3, 3, 5, 5, 5),
+ [CODA9_JPEG_FORMAT_400] = MCU_INFO(1, 1, 5, 0, 0),
+};
+
+/*
+ * Convert Huffman table specifcations to tables of codes and code lengths.
+ * For reference, see JPEG ITU-T.81 (ISO/IEC 10918-1) [1]
+ *
+ * [1] https://www.w3.org/Graphics/JPEG/itu-t81.pdf
+ */
+static int coda9_jpeg_gen_enc_huff_tab(struct coda_ctx *ctx, int tab_num,
+ int *ehufsi, int *ehufco)
+{
+ int i, j, k, lastk, si, code, maxsymbol;
+ const u8 *bits, *huffval;
+ struct {
+ int size[256];
+ int code[256];
+ } *huff;
+ static const unsigned char *huff_tabs[4] = {
+ luma_dc, luma_ac, chroma_dc, chroma_ac,
+ };
+ int ret = -EINVAL;
+
+ huff = kzalloc(sizeof(*huff), GFP_KERNEL);
+ if (!huff)
+ return -ENOMEM;
+
+ bits = huff_tabs[tab_num];
+ huffval = huff_tabs[tab_num] + 16;
+
+ maxsymbol = tab_num & 1 ? 256 : 16;
+
+ /* Figure C.1 - Generation of table of Huffman code sizes */
+ k = 0;
+ for (i = 1; i <= 16; i++) {
+ j = bits[i - 1];
+ if (k + j > maxsymbol)
+ goto out;
+ while (j--)
+ huff->size[k++] = i;
+ }
+ lastk = k;
+
+ /* Figure C.2 - Generation of table of Huffman codes */
+ k = 0;
+ code = 0;
+ si = huff->size[0];
+ while (k < lastk) {
+ while (huff->size[k] == si) {
+ huff->code[k++] = code;
+ code++;
+ }
+ if (code >= (1 << si))
+ goto out;
+ code <<= 1;
+ si++;
+ }
+
+ /* Figure C.3 - Ordering procedure for encoding procedure code tables */
+ for (k = 0; k < lastk; k++) {
+ i = huffval[k];
+ if (i >= maxsymbol || ehufsi[i])
+ goto out;
+ ehufco[i] = huff->code[k];
+ ehufsi[i] = huff->size[k];
+ }
+
+ ret = 0;
+out:
+ kfree(huff);
+ return ret;
+}
+
+#define DC_TABLE_INDEX0 0
+#define AC_TABLE_INDEX0 1
+#define DC_TABLE_INDEX1 2
+#define AC_TABLE_INDEX1 3
+
+static int coda9_jpeg_load_huff_tab(struct coda_ctx *ctx)
+{
+ struct {
+ int size[4][256];
+ int code[4][256];
+ } *huff;
+ u32 *huff_data;
+ int i, j;
+ int ret;
+
+ huff = kzalloc(sizeof(*huff), GFP_KERNEL);
+ if (!huff)
+ return -ENOMEM;
+
+ /* Generate all four (luma/chroma DC/AC) code/size lookup tables */
+ for (i = 0; i < 4; i++) {
+ ret = coda9_jpeg_gen_enc_huff_tab(ctx, i, huff->size[i],
+ huff->code[i]);
+ if (ret)
+ goto out;
+ }
+
+ if (!ctx->params.jpeg_huff_data) {
+ ctx->params.jpeg_huff_data =
+ kzalloc(sizeof(u32) * CODA9_JPEG_ENC_HUFF_DATA_SIZE,
+ GFP_KERNEL);
+ if (!ctx->params.jpeg_huff_data) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+ huff_data = ctx->params.jpeg_huff_data;
+
+ for (j = 0; j < 4; j++) {
+ /* Store Huffman lookup tables in AC0, AC1, DC0, DC1 order */
+ int t = (j == 0) ? AC_TABLE_INDEX0 :
+ (j == 1) ? AC_TABLE_INDEX1 :
+ (j == 2) ? DC_TABLE_INDEX0 :
+ DC_TABLE_INDEX1;
+ /* DC tables only have 16 entries */
+ int len = (j < 2) ? 256 : 16;
+
+ for (i = 0; i < len; i++) {
+ if (huff->size[t][i] == 0 && huff->code[t][i] == 0)
+ *(huff_data++) = 0;
+ else
+ *(huff_data++) =
+ ((huff->size[t][i] - 1) << 16) |
+ huff->code[t][i];
+ }
+ }
+
+ ret = 0;
+out:
+ kfree(huff);
+ return ret;
+}
+
+static void coda9_jpeg_write_huff_tab(struct coda_ctx *ctx)
+{
+ struct coda_dev *dev = ctx->dev;
+ u32 *huff_data = ctx->params.jpeg_huff_data;
+ int i;
+
+ /* Write Huffman size/code lookup tables in AC0, AC1, DC0, DC1 order */
+ coda_write(dev, 0x3, CODA9_REG_JPEG_HUFF_CTRL);
+ for (i = 0; i < CODA9_JPEG_ENC_HUFF_DATA_SIZE; i++)
+ coda_write(dev, *(huff_data++), CODA9_REG_JPEG_HUFF_DATA);
+ coda_write(dev, 0x0, CODA9_REG_JPEG_HUFF_CTRL);
+}
+
+static inline void coda9_jpeg_write_qmat_quotients(struct coda_dev *dev,
+ u8 *qmat, int index)
+{
+ int i;
+
+ coda_write(dev, index | 0x3, CODA9_REG_JPEG_QMAT_CTRL);
+ for (i = 0; i < 64; i++)
+ coda_write(dev, 0x80000 / qmat[i], CODA9_REG_JPEG_QMAT_DATA);
+ coda_write(dev, index, CODA9_REG_JPEG_QMAT_CTRL);
+}
+
+static void coda9_jpeg_load_qmat_tab(struct coda_ctx *ctx)
+{
+ struct coda_dev *dev = ctx->dev;
+ u8 *luma_tab;
+ u8 *chroma_tab;
+
+ luma_tab = ctx->params.jpeg_qmat_tab[0];
+ if (!luma_tab)
+ luma_tab = luma_q;
+
+ chroma_tab = ctx->params.jpeg_qmat_tab[1];
+ if (!chroma_tab)
+ chroma_tab = chroma_q;
+
+ coda9_jpeg_write_qmat_quotients(dev, luma_tab, 0x00);
+ coda9_jpeg_write_qmat_quotients(dev, chroma_tab, 0x40);
+ coda9_jpeg_write_qmat_quotients(dev, chroma_tab, 0x80);
+}
+
+struct coda_jpeg_stream {
+ u8 *curr;
+ u8 *end;
+};
+
+static inline int coda_jpeg_put_byte(u8 byte, struct coda_jpeg_stream *stream)
+{
+ if (stream->curr >= stream->end)
+ return -EINVAL;
+
+ *stream->curr++ = byte;
+
+ return 0;
+}
+
+static inline int coda_jpeg_put_word(u16 word, struct coda_jpeg_stream *stream)
+{
+ if (stream->curr + sizeof(__be16) > stream->end)
+ return -EINVAL;
+
+ put_unaligned_be16(word, stream->curr);
+ stream->curr += sizeof(__be16);
+
+ return 0;
+}
+
+static int coda_jpeg_put_table(u16 marker, u8 index, const u8 *table,
+ size_t len, struct coda_jpeg_stream *stream)
+{
+ int i, ret;
+
+ ret = coda_jpeg_put_word(marker, stream);
+ if (ret < 0)
+ return ret;
+ ret = coda_jpeg_put_word(3 + len, stream);
+ if (ret < 0)
+ return ret;
+ ret = coda_jpeg_put_byte(index, stream);
+ for (i = 0; i < len && ret == 0; i++)
+ ret = coda_jpeg_put_byte(table[i], stream);
+
+ return ret;
+}
+
+static int coda_jpeg_define_quantization_table(struct coda_ctx *ctx, u8 index,
+ struct coda_jpeg_stream *stream)
+{
+ return coda_jpeg_put_table(DQT_MARKER, index,
+ ctx->params.jpeg_qmat_tab[index], 64,
+ stream);
+}
+
+static int coda_jpeg_define_huffman_table(u8 index, const u8 *table, size_t len,
+ struct coda_jpeg_stream *stream)
+{
+ return coda_jpeg_put_table(DHT_MARKER, index, table, len, stream);
+}
+
+static int coda9_jpeg_encode_header(struct coda_ctx *ctx, int len, u8 *buf)
+{
+ struct coda_jpeg_stream stream = { buf, buf + len };
+ struct coda_q_data *q_data_src;
+ int chroma_format, comp_num;
+ int i, ret, pad;
+
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ chroma_format = coda9_jpeg_chroma_format(q_data_src->fourcc);
+ if (chroma_format < 0)
+ return 0;
+
+ /* Start Of Image */
+ ret = coda_jpeg_put_word(SOI_MARKER, &stream);
+ if (ret < 0)
+ return ret;
+
+ /* Define Restart Interval */
+ if (ctx->params.jpeg_restart_interval) {
+ ret = coda_jpeg_put_word(DRI_MARKER, &stream);
+ if (ret < 0)
+ return ret;
+ ret = coda_jpeg_put_word(4, &stream);
+ if (ret < 0)
+ return ret;
+ ret = coda_jpeg_put_word(ctx->params.jpeg_restart_interval,
+ &stream);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Define Quantization Tables */
+ ret = coda_jpeg_define_quantization_table(ctx, 0x00, &stream);
+ if (ret < 0)
+ return ret;
+ if (chroma_format != CODA9_JPEG_FORMAT_400) {
+ ret = coda_jpeg_define_quantization_table(ctx, 0x01, &stream);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Define Huffman Tables */
+ ret = coda_jpeg_define_huffman_table(0x00, luma_dc, 16 + 12, &stream);
+ if (ret < 0)
+ return ret;
+ ret = coda_jpeg_define_huffman_table(0x10, luma_ac, 16 + 162, &stream);
+ if (ret < 0)
+ return ret;
+ if (chroma_format != CODA9_JPEG_FORMAT_400) {
+ ret = coda_jpeg_define_huffman_table(0x01, chroma_dc, 16 + 12,
+ &stream);
+ if (ret < 0)
+ return ret;
+ ret = coda_jpeg_define_huffman_table(0x11, chroma_ac, 16 + 162,
+ &stream);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Start Of Frame */
+ ret = coda_jpeg_put_word(SOF_MARKER, &stream);
+ if (ret < 0)
+ return ret;
+ comp_num = (chroma_format == CODA9_JPEG_FORMAT_400) ? 1 : 3;
+ ret = coda_jpeg_put_word(8 + comp_num * 3, &stream);
+ if (ret < 0)
+ return ret;
+ ret = coda_jpeg_put_byte(0x08, &stream);
+ if (ret < 0)
+ return ret;
+ ret = coda_jpeg_put_word(q_data_src->height, &stream);
+ if (ret < 0)
+ return ret;
+ ret = coda_jpeg_put_word(q_data_src->width, &stream);
+ if (ret < 0)
+ return ret;
+ ret = coda_jpeg_put_byte(comp_num, &stream);
+ if (ret < 0)
+ return ret;
+ for (i = 0; i < comp_num; i++) {
+ static unsigned char subsampling[5][3] = {
+ [CODA9_JPEG_FORMAT_420] = { 0x22, 0x11, 0x11 },
+ [CODA9_JPEG_FORMAT_422] = { 0x21, 0x11, 0x11 },
+ [CODA9_JPEG_FORMAT_224] = { 0x12, 0x11, 0x11 },
+ [CODA9_JPEG_FORMAT_444] = { 0x11, 0x11, 0x11 },
+ [CODA9_JPEG_FORMAT_400] = { 0x11 },
+ };
+
+ /* Component identifier, matches SOS */
+ ret = coda_jpeg_put_byte(i + 1, &stream);
+ if (ret < 0)
+ return ret;
+ ret = coda_jpeg_put_byte(subsampling[chroma_format][i],
+ &stream);
+ if (ret < 0)
+ return ret;
+ /* Chroma table index */
+ ret = coda_jpeg_put_byte((i == 0) ? 0 : 1, &stream);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Pad to multiple of 8 bytes */
+ pad = (stream.curr - buf) % 8;
+ if (pad) {
+ pad = 8 - pad;
+ while (pad--) {
+ ret = coda_jpeg_put_byte(0x00, &stream);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return stream.curr - buf;
+}
+
/*
* Scale quantization table using nonlinear scaling factor
* u8 qtab[64], scale [50,190]
@@ -247,3 +669,279 @@ void coda_set_jpeg_compression_quality(struct coda_ctx *ctx, int quality)
coda_scale_quant_table(ctx->params.jpeg_qmat_tab[1], scale);
}
}
+
+/*
+ * Encoder context operations
+ */
+
+static int coda9_jpeg_start_encoding(struct coda_ctx *ctx)
+{
+ struct coda_dev *dev = ctx->dev;
+ int ret;
+
+ ret = coda9_jpeg_load_huff_tab(ctx);
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev, "error loading Huffman tables\n");
+ return ret;
+ }
+ if (!ctx->params.jpeg_qmat_tab[0])
+ ctx->params.jpeg_qmat_tab[0] = kmalloc(64, GFP_KERNEL);
+ if (!ctx->params.jpeg_qmat_tab[1])
+ ctx->params.jpeg_qmat_tab[1] = kmalloc(64, GFP_KERNEL);
+ coda_set_jpeg_compression_quality(ctx, ctx->params.jpeg_quality);
+
+ return 0;
+}
+
+static int coda9_jpeg_prepare_encode(struct coda_ctx *ctx)
+{
+ struct coda_q_data *q_data_src;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ struct coda_dev *dev = ctx->dev;
+ u32 start_addr, end_addr;
+ u16 aligned_width, aligned_height;
+ bool chroma_interleave;
+ int chroma_format;
+ int header_len;
+ int ret;
+ ktime_t timeout;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+
+ if (vb2_get_plane_payload(&src_buf->vb2_buf, 0) == 0)
+ vb2_set_plane_payload(&src_buf->vb2_buf, 0,
+ vb2_plane_size(&src_buf->vb2_buf, 0));
+
+ src_buf->sequence = ctx->osequence;
+ dst_buf->sequence = ctx->osequence;
+ ctx->osequence++;
+
+ src_buf->flags |= V4L2_BUF_FLAG_KEYFRAME;
+ src_buf->flags &= ~V4L2_BUF_FLAG_PFRAME;
+
+ coda_set_gdi_regs(ctx);
+
+ start_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+ end_addr = start_addr + vb2_plane_size(&dst_buf->vb2_buf, 0);
+
+ chroma_format = coda9_jpeg_chroma_format(q_data_src->fourcc);
+ if (chroma_format < 0)
+ return chroma_format;
+
+ /* Round image dimensions to multiple of MCU size */
+ aligned_width = round_up(q_data_src->width, width_align[chroma_format]);
+ aligned_height = round_up(q_data_src->height,
+ height_align[chroma_format]);
+ if (aligned_width != q_data_src->bytesperline) {
+ v4l2_err(&dev->v4l2_dev, "wrong stride: %d instead of %d\n",
+ aligned_width, q_data_src->bytesperline);
+ }
+
+ header_len =
+ coda9_jpeg_encode_header(ctx,
+ vb2_plane_size(&dst_buf->vb2_buf, 0),
+ vb2_plane_vaddr(&dst_buf->vb2_buf, 0));
+ if (header_len < 0)
+ return header_len;
+
+ coda_write(dev, start_addr + header_len, CODA9_REG_JPEG_BBC_BAS_ADDR);
+ coda_write(dev, end_addr, CODA9_REG_JPEG_BBC_END_ADDR);
+ coda_write(dev, start_addr + header_len, CODA9_REG_JPEG_BBC_WR_PTR);
+ coda_write(dev, start_addr + header_len, CODA9_REG_JPEG_BBC_RD_PTR);
+ coda_write(dev, 0, CODA9_REG_JPEG_BBC_CUR_POS);
+ /* 64 words per 256-byte page */
+ coda_write(dev, 64, CODA9_REG_JPEG_BBC_DATA_CNT);
+ coda_write(dev, start_addr, CODA9_REG_JPEG_BBC_EXT_ADDR);
+ coda_write(dev, 0, CODA9_REG_JPEG_BBC_INT_ADDR);
+
+ coda_write(dev, 0, CODA9_REG_JPEG_GBU_BT_PTR);
+ coda_write(dev, 0, CODA9_REG_JPEG_GBU_WD_PTR);
+ coda_write(dev, 0, CODA9_REG_JPEG_GBU_BBSR);
+ coda_write(dev, 0, CODA9_REG_JPEG_BBC_STRM_CTRL);
+ coda_write(dev, 0, CODA9_REG_JPEG_GBU_CTRL);
+ coda_write(dev, 0, CODA9_REG_JPEG_GBU_FF_RPTR);
+ coda_write(dev, 127, CODA9_REG_JPEG_GBU_BBER);
+ coda_write(dev, 64, CODA9_REG_JPEG_GBU_BBIR);
+ coda_write(dev, 64, CODA9_REG_JPEG_GBU_BBHR);
+
+ chroma_interleave = (q_data_src->fourcc == V4L2_PIX_FMT_NV12);
+ coda_write(dev, CODA9_JPEG_PIC_CTRL_TC_DIRECTION |
+ CODA9_JPEG_PIC_CTRL_ENCODER_EN, CODA9_REG_JPEG_PIC_CTRL);
+ coda_write(dev, 0, CODA9_REG_JPEG_SCL_INFO);
+ coda_write(dev, chroma_interleave, CODA9_REG_JPEG_DPB_CONFIG);
+ coda_write(dev, ctx->params.jpeg_restart_interval,
+ CODA9_REG_JPEG_RST_INTVAL);
+ coda_write(dev, 1, CODA9_REG_JPEG_BBC_CTRL);
+
+ coda_write(dev, bus_req_num[chroma_format], CODA9_REG_JPEG_OP_INFO);
+
+ coda9_jpeg_write_huff_tab(ctx);
+ coda9_jpeg_load_qmat_tab(ctx);
+
+ if (ctx->params.rot_mode & CODA_ROT_90) {
+ aligned_width = aligned_height;
+ aligned_height = q_data_src->bytesperline;
+ if (chroma_format == CODA9_JPEG_FORMAT_422)
+ chroma_format = CODA9_JPEG_FORMAT_224;
+ else if (chroma_format == CODA9_JPEG_FORMAT_224)
+ chroma_format = CODA9_JPEG_FORMAT_422;
+ }
+ /* These need to be multiples of MCU size */
+ coda_write(dev, aligned_width << 16 | aligned_height,
+ CODA9_REG_JPEG_PIC_SIZE);
+ coda_write(dev, ctx->params.rot_mode ?
+ (CODA_ROT_MIR_ENABLE | ctx->params.rot_mode) : 0,
+ CODA9_REG_JPEG_ROT_INFO);
+
+ coda_write(dev, mcu_info[chroma_format], CODA9_REG_JPEG_MCU_INFO);
+
+ coda_write(dev, 1, CODA9_GDI_CONTROL);
+ timeout = ktime_add_us(ktime_get(), 100000);
+ do {
+ ret = coda_read(dev, CODA9_GDI_STATUS);
+ if (ktime_compare(ktime_get(), timeout) > 0) {
+ v4l2_err(&dev->v4l2_dev, "timeout waiting for GDI\n");
+ return -ETIMEDOUT;
+ }
+ } while (!ret);
+
+ coda_write(dev, (chroma_format << 17) | (chroma_interleave << 16) |
+ q_data_src->bytesperline, CODA9_GDI_INFO_CONTROL);
+ /* The content of this register seems to be irrelevant: */
+ coda_write(dev, aligned_width << 16 | aligned_height,
+ CODA9_GDI_INFO_PIC_SIZE);
+
+ coda_write_base(ctx, q_data_src, src_buf, CODA9_GDI_INFO_BASE_Y);
+
+ coda_write(dev, 0, CODA9_REG_JPEG_DPB_BASE00);
+ coda_write(dev, 0, CODA9_GDI_CONTROL);
+ coda_write(dev, 1, CODA9_GDI_PIC_INIT_HOST);
+
+ coda_write(dev, 1, CODA9_GDI_WPROT_ERR_CLR);
+ coda_write(dev, 0, CODA9_GDI_WPROT_RGN_EN);
+
+ trace_coda_jpeg_run(ctx, src_buf);
+
+ coda_write(dev, 1, CODA9_REG_JPEG_PIC_START);
+
+ return 0;
+}
+
+static void coda9_jpeg_finish_encode(struct coda_ctx *ctx)
+{
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ struct coda_dev *dev = ctx->dev;
+ u32 wr_ptr, start_ptr;
+ u32 err_mb;
+
+ if (ctx->aborting) {
+ coda_write(ctx->dev, 0, CODA9_REG_JPEG_BBC_FLUSH_CMD);
+ return;
+ }
+
+ /*
+ * Lock to make sure that an encoder stop command running in parallel
+ * will either already have marked src_buf as last, or it will wake up
+ * the capture queue after the buffers are returned.
+ */
+ mutex_lock(&ctx->wakeup_mutex);
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ trace_coda_jpeg_done(ctx, dst_buf);
+
+ /*
+ * Set plane payload to the number of bytes written out
+ * by the JPEG processing unit
+ */
+ start_ptr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+ wr_ptr = coda_read(dev, CODA9_REG_JPEG_BBC_WR_PTR);
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, wr_ptr - start_ptr);
+
+ err_mb = coda_read(dev, CODA9_REG_JPEG_PIC_ERRMB);
+ if (err_mb)
+ coda_dbg(1, ctx, "ERRMB: 0x%x\n", err_mb);
+
+ coda_write(dev, 0, CODA9_REG_JPEG_BBC_FLUSH_CMD);
+
+ dst_buf->flags &= ~(V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_LAST);
+ dst_buf->flags |= V4L2_BUF_FLAG_KEYFRAME;
+ dst_buf->flags |= src_buf->flags & V4L2_BUF_FLAG_LAST;
+
+ v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, false);
+
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+ coda_m2m_buf_done(ctx, dst_buf, err_mb ? VB2_BUF_STATE_ERROR :
+ VB2_BUF_STATE_DONE);
+ mutex_unlock(&ctx->wakeup_mutex);
+
+ coda_dbg(1, ctx, "job finished: encoded frame (%u)%s\n",
+ dst_buf->sequence,
+ (dst_buf->flags & V4L2_BUF_FLAG_LAST) ? " (last)" : "");
+}
+
+static void coda9_jpeg_release(struct coda_ctx *ctx)
+{
+ int i;
+
+ if (ctx->params.jpeg_qmat_tab[0] == luma_q)
+ ctx->params.jpeg_qmat_tab[0] = NULL;
+ if (ctx->params.jpeg_qmat_tab[1] == chroma_q)
+ ctx->params.jpeg_qmat_tab[1] = NULL;
+ for (i = 0; i < 3; i++)
+ kfree(ctx->params.jpeg_qmat_tab[i]);
+ kfree(ctx->params.jpeg_huff_data);
+}
+
+const struct coda_context_ops coda9_jpeg_encode_ops = {
+ .queue_init = coda_encoder_queue_init,
+ .start_streaming = coda9_jpeg_start_encoding,
+ .prepare_run = coda9_jpeg_prepare_encode,
+ .finish_run = coda9_jpeg_finish_encode,
+ .release = coda9_jpeg_release,
+};
+
+irqreturn_t coda9_jpeg_irq_handler(int irq, void *data)
+{
+ struct coda_dev *dev = data;
+ struct coda_ctx *ctx;
+ int status;
+ int err_mb;
+
+ status = coda_read(dev, CODA9_REG_JPEG_PIC_STATUS);
+ if (status == 0)
+ return IRQ_HANDLED;
+ coda_write(dev, status, CODA9_REG_JPEG_PIC_STATUS);
+
+ if (status & CODA9_JPEG_STATUS_OVERFLOW)
+ v4l2_err(&dev->v4l2_dev, "JPEG overflow\n");
+
+ if (status & CODA9_JPEG_STATUS_BBC_INT)
+ v4l2_err(&dev->v4l2_dev, "JPEG BBC interrupt\n");
+
+ if (status & CODA9_JPEG_STATUS_ERROR) {
+ v4l2_err(&dev->v4l2_dev, "JPEG error\n");
+
+ err_mb = coda_read(dev, CODA9_REG_JPEG_PIC_ERRMB);
+ if (err_mb) {
+ v4l2_err(&dev->v4l2_dev,
+ "ERRMB: 0x%x: rst idx %d, mcu pos (%d,%d)\n",
+ err_mb, err_mb >> 24, (err_mb >> 12) & 0xfff,
+ err_mb & 0xfff);
+ }
+ }
+
+ ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
+ if (!ctx) {
+ v4l2_err(&dev->v4l2_dev,
+ "Instance released before the end of transaction\n");
+ mutex_unlock(&dev->coda_mutex);
+ return IRQ_HANDLED;
+ }
+
+ complete(&ctx->completion);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
index 9f226140b486..43bda175f517 100644
--- a/drivers/media/platform/coda/coda.h
+++ b/drivers/media/platform/coda/coda.h
@@ -126,6 +126,7 @@ struct coda_params {
u8 jpeg_quality;
u8 jpeg_restart_interval;
u8 *jpeg_qmat_tab[3];
+ u32 *jpeg_huff_data;
int codec_mode;
int codec_mode_aux;
enum v4l2_mpeg_video_multi_slice_mode slice_mode;
@@ -366,7 +367,9 @@ void coda_set_jpeg_compression_quality(struct coda_ctx *ctx, int quality);
extern const struct coda_context_ops coda_bit_encode_ops;
extern const struct coda_context_ops coda_bit_decode_ops;
+extern const struct coda_context_ops coda9_jpeg_encode_ops;
irqreturn_t coda_irq_handler(int irq, void *data);
+irqreturn_t coda9_jpeg_irq_handler(int irq, void *data);
#endif /* __CODA_H__ */
diff --git a/drivers/media/platform/coda/coda_regs.h b/drivers/media/platform/coda/coda_regs.h
index b17464b56d3d..da5bb3212528 100644
--- a/drivers/media/platform/coda/coda_regs.h
+++ b/drivers/media/platform/coda/coda_regs.h
@@ -451,12 +451,21 @@
#define CODA9_CMD_FIRMWARE_CODE_REV 0x1c4
#define CODA9_GDMA_BASE 0x1000
+#define CODA9_GDI_CONTROL (CODA9_GDMA_BASE + 0x034)
+#define CODA9_GDI_PIC_INIT_HOST (CODA9_GDMA_BASE + 0x038)
+#define CODA9_GDI_STATUS (CODA9_GDMA_BASE + 0x080)
#define CODA9_GDI_WPROT_ERR_CLR (CODA9_GDMA_BASE + 0x0a0)
#define CODA9_GDI_WPROT_RGN_EN (CODA9_GDMA_BASE + 0x0ac)
#define CODA9_GDI_BUS_CTRL (CODA9_GDMA_BASE + 0x0f0)
#define CODA9_GDI_BUS_STATUS (CODA9_GDMA_BASE + 0x0f4)
+#define CODA9_GDI_INFO_CONTROL (CODA9_GDMA_BASE + 0x400)
+#define CODA9_GDI_INFO_PIC_SIZE (CODA9_GDMA_BASE + 0x404)
+#define CODA9_GDI_INFO_BASE_Y (CODA9_GDMA_BASE + 0x408)
+#define CODA9_GDI_INFO_BASE_CB (CODA9_GDMA_BASE + 0x40c)
+#define CODA9_GDI_INFO_BASE_CR (CODA9_GDMA_BASE + 0x410)
+
#define CODA9_GDI_XY2_CAS_0 (CODA9_GDMA_BASE + 0x800)
#define CODA9_GDI_XY2_CAS_F (CODA9_GDMA_BASE + 0x83c)
@@ -477,4 +486,78 @@
#define CODA9_GDI_RBC2_AXI_1F (CODA9_GDMA_BASE + 0x91c)
#define CODA9_GDI_TILEDBUF_BASE (CODA9_GDMA_BASE + 0x920)
+#define CODA9_JPEG_BASE 0x3000
+#define CODA9_REG_JPEG_PIC_START (CODA9_JPEG_BASE + 0x000)
+#define CODA9_REG_JPEG_PIC_STATUS (CODA9_JPEG_BASE + 0x004)
+#define CODA9_JPEG_STATUS_OVERFLOW BIT(3)
+#define CODA9_JPEG_STATUS_BBC_INT BIT(2)
+#define CODA9_JPEG_STATUS_ERROR BIT(1)
+#define CODA9_JPEG_STATUS_DONE BIT(0)
+#define CODA9_REG_JPEG_PIC_ERRMB (CODA9_JPEG_BASE + 0x008)
+#define CODA9_JPEG_ERRMB_RESTART_IDX_MASK (0xf << 24)
+#define CODA9_JPEG_ERRMB_MCU_POS_X_MASK (0xfff << 12)
+#define CODA9_JPEG_ERRMB_MCU_POS_Y_MASK 0xfff
+#define CODA9_REG_JPEG_PIC_CTRL (CODA9_JPEG_BASE + 0x010)
+#define CODA9_JPEG_PIC_CTRL_USER_HUFFMAN_EN BIT(6)
+#define CODA9_JPEG_PIC_CTRL_TC_DIRECTION BIT(4)
+#define CODA9_JPEG_PIC_CTRL_ENCODER_EN BIT(3)
+#define CODA9_REG_JPEG_PIC_SIZE (CODA9_JPEG_BASE + 0x014)
+#define CODA9_REG_JPEG_MCU_INFO (CODA9_JPEG_BASE + 0x018)
+#define CODA9_JPEG_MCU_BLOCK_NUM_OFFSET 16
+#define CODA9_JPEG_COMP_NUM_OFFSET 12
+#define CODA9_JPEG_COMP0_INFO_OFFSET 8
+#define CODA9_JPEG_COMP1_INFO_OFFSET 4
+#define CODA9_JPEG_COMP2_INFO_OFFSET 0
+#define CODA9_REG_JPEG_ROT_INFO (CODA9_JPEG_BASE + 0x01c)
+#define CODA9_JPEG_ROT_MIR_ENABLE BIT(4)
+#define CODA9_JPEG_ROT_MIR_MODE_MASK 0xf
+#define CODA9_REG_JPEG_SCL_INFO (CODA9_JPEG_BASE + 0x020)
+#define CODA9_JPEG_SCL_ENABLE BIT(4)
+#define CODA9_JPEG_SCL_HOR_MODE_MASK (0x3 << 2)
+#define CODA9_JPEG_SCL_VER_MODE_MASK (0x3 << 0)
+#define CODA9_REG_JPEG_IF_INFO (CODA9_JPEG_BASE + 0x024)
+#define CODA9_JPEG_SENS_IF_CLR BIT(1)
+#define CODA9_JPEG_DISP_IF_CLR BIT(0)
+#define CODA9_REG_JPEG_OP_INFO (CODA9_JPEG_BASE + 0x02c)
+#define CODA9_JPEG_BUS_REQ_NUM_OFFSET 0
+#define CODA9_JPEG_BUS_REQ_NUM_MASK 0x7
+#define CODA9_REG_JPEG_DPB_CONFIG (CODA9_JPEG_BASE + 0x030)
+#define CODA9_REG_JPEG_DPB_BASE00 (CODA9_JPEG_BASE + 0x040)
+#define CODA9_REG_JPEG_HUFF_CTRL (CODA9_JPEG_BASE + 0x080)
+#define CODA9_REG_JPEG_HUFF_ADDR (CODA9_JPEG_BASE + 0x084)
+#define CODA9_REG_JPEG_HUFF_DATA (CODA9_JPEG_BASE + 0x088)
+#define CODA9_REG_JPEG_QMAT_CTRL (CODA9_JPEG_BASE + 0x090)
+#define CODA9_REG_JPEG_QMAT_ADDR (CODA9_JPEG_BASE + 0x094)
+#define CODA9_REG_JPEG_QMAT_DATA (CODA9_JPEG_BASE + 0x098)
+#define CODA9_REG_JPEG_RST_INTVAL (CODA9_JPEG_BASE + 0x0b0)
+#define CODA9_REG_JPEG_RST_INDEX (CODA9_JPEG_BASE + 0x0b4)
+#define CODA9_REG_JPEG_RST_COUNT (CODA9_JPEG_BASE + 0x0b8)
+#define CODA9_REG_JPEG_DPCM_DIFF_Y (CODA9_JPEG_BASE + 0x0f0)
+#define CODA9_REG_JPEG_DPCM_DIFF_CB (CODA9_JPEG_BASE + 0x0f4)
+#define CODA9_REG_JPEG_DPCM_DIFF_CR (CODA9_JPEG_BASE + 0x0f8)
+#define CODA9_REG_JPEG_GBU_CTRL (CODA9_JPEG_BASE + 0x100)
+#define CODA9_REG_JPEG_GBU_BT_PTR (CODA9_JPEG_BASE + 0x110)
+#define CODA9_REG_JPEG_GBU_WD_PTR (CODA9_JPEG_BASE + 0x114)
+#define CODA9_REG_JPEG_GBU_TT_CNT (CODA9_JPEG_BASE + 0x118)
+#define CODA9_REG_JPEG_GBU_BBSR (CODA9_JPEG_BASE + 0x140)
+#define CODA9_REG_JPEG_GBU_BBER (CODA9_JPEG_BASE + 0x144)
+#define CODA9_REG_JPEG_GBU_BBIR (CODA9_JPEG_BASE + 0x148)
+#define CODA9_REG_JPEG_GBU_BBHR (CODA9_JPEG_BASE + 0x14c)
+#define CODA9_REG_JPEG_GBU_BCNT (CODA9_JPEG_BASE + 0x158)
+#define CODA9_REG_JPEG_GBU_FF_RPTR (CODA9_JPEG_BASE + 0x160)
+#define CODA9_REG_JPEG_GBU_FF_WPTR (CODA9_JPEG_BASE + 0x164)
+#define CODA9_REG_JPEG_BBC_END_ADDR (CODA9_JPEG_BASE + 0x208)
+#define CODA9_REG_JPEG_BBC_WR_PTR (CODA9_JPEG_BASE + 0x20c)
+#define CODA9_REG_JPEG_BBC_RD_PTR (CODA9_JPEG_BASE + 0x210)
+#define CODA9_REG_JPEG_BBC_EXT_ADDR (CODA9_JPEG_BASE + 0x214)
+#define CODA9_REG_JPEG_BBC_INT_ADDR (CODA9_JPEG_BASE + 0x218)
+#define CODA9_REG_JPEG_BBC_DATA_CNT (CODA9_JPEG_BASE + 0x21c)
+#define CODA9_REG_JPEG_BBC_COMMAND (CODA9_JPEG_BASE + 0x220)
+#define CODA9_REG_JPEG_BBC_BUSY (CODA9_JPEG_BASE + 0x224)
+#define CODA9_REG_JPEG_BBC_CTRL (CODA9_JPEG_BASE + 0x228)
+#define CODA9_REG_JPEG_BBC_CUR_POS (CODA9_JPEG_BASE + 0x22c)
+#define CODA9_REG_JPEG_BBC_BAS_ADDR (CODA9_JPEG_BASE + 0x230)
+#define CODA9_REG_JPEG_BBC_STRM_CTRL (CODA9_JPEG_BASE + 0x234)
+#define CODA9_REG_JPEG_BBC_FLUSH_CMD (CODA9_JPEG_BASE + 0x238)
+
#endif
diff --git a/drivers/media/platform/coda/trace.h b/drivers/media/platform/coda/trace.h
index 6cf58237fff2..c0791c847f7c 100644
--- a/drivers/media/platform/coda/trace.h
+++ b/drivers/media/platform/coda/trace.h
@@ -154,6 +154,16 @@ DEFINE_EVENT(coda_buf_meta_class, coda_dec_rot_done,
TP_ARGS(ctx, buf, meta)
);
+DEFINE_EVENT(coda_buf_class, coda_jpeg_run,
+ TP_PROTO(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf),
+ TP_ARGS(ctx, buf)
+);
+
+DEFINE_EVENT(coda_buf_class, coda_jpeg_done,
+ TP_PROTO(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf),
+ TP_ARGS(ctx, buf)
+);
+
#endif /* __CODA_TRACE_H__ */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/media/platform/cros-ec-cec/cros-ec-cec.c b/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
index f048e8994785..0e7e2772f08f 100644
--- a/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
+++ b/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
@@ -14,7 +14,6 @@
#include <linux/cec.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/mfd/cros_ec.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <media/cec.h>
diff --git a/drivers/media/platform/davinci/dm355_ccdc.c b/drivers/media/platform/davinci/dm355_ccdc.c
index f299baf7cbe0..e06d113dfe96 100644
--- a/drivers/media/platform/davinci/dm355_ccdc.c
+++ b/drivers/media/platform/davinci/dm355_ccdc.c
@@ -883,7 +883,7 @@ static int dm355_ccdc_probe(struct platform_device *pdev)
goto fail_nores;
}
- ccdc_cfg.base_addr = ioremap_nocache(res->start, resource_size(res));
+ ccdc_cfg.base_addr = ioremap(res->start, resource_size(res));
if (!ccdc_cfg.base_addr) {
status = -ENOMEM;
goto fail_nomem;
diff --git a/drivers/media/platform/davinci/dm644x_ccdc.c b/drivers/media/platform/davinci/dm644x_ccdc.c
index 2fc6c9c38f9c..c6378c4e0074 100644
--- a/drivers/media/platform/davinci/dm644x_ccdc.c
+++ b/drivers/media/platform/davinci/dm644x_ccdc.c
@@ -817,7 +817,7 @@ static int dm644x_ccdc_probe(struct platform_device *pdev)
goto fail_nores;
}
- ccdc_cfg.base_addr = ioremap_nocache(res->start, resource_size(res));
+ ccdc_cfg.base_addr = ioremap(res->start, resource_size(res));
if (!ccdc_cfg.base_addr) {
status = -ENOMEM;
goto fail_nomem;
diff --git a/drivers/media/platform/davinci/isif.c b/drivers/media/platform/davinci/isif.c
index e2e7ab7b7f45..b49378b18e5d 100644
--- a/drivers/media/platform/davinci/isif.c
+++ b/drivers/media/platform/davinci/isif.c
@@ -1045,7 +1045,7 @@ static int isif_probe(struct platform_device *pdev)
status = -EBUSY;
goto fail_nobase_res;
}
- addr = ioremap_nocache(res->start, resource_size(res));
+ addr = ioremap(res->start, resource_size(res));
if (!addr) {
status = -ENOMEM;
goto fail_base_iomap;
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index 916ed743d716..9b1d9643589b 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -168,21 +168,22 @@ int vpfe_register_ccdc_device(const struct ccdc_hw_device *dev)
int ret = 0;
printk(KERN_NOTICE "vpfe_register_ccdc_device: %s\n", dev->name);
- BUG_ON(!dev->hw_ops.open);
- BUG_ON(!dev->hw_ops.enable);
- BUG_ON(!dev->hw_ops.set_hw_if_params);
- BUG_ON(!dev->hw_ops.configure);
- BUG_ON(!dev->hw_ops.set_buftype);
- BUG_ON(!dev->hw_ops.get_buftype);
- BUG_ON(!dev->hw_ops.enum_pix);
- BUG_ON(!dev->hw_ops.set_frame_format);
- BUG_ON(!dev->hw_ops.get_frame_format);
- BUG_ON(!dev->hw_ops.get_pixel_format);
- BUG_ON(!dev->hw_ops.set_pixel_format);
- BUG_ON(!dev->hw_ops.set_image_window);
- BUG_ON(!dev->hw_ops.get_image_window);
- BUG_ON(!dev->hw_ops.get_line_length);
- BUG_ON(!dev->hw_ops.getfid);
+ if (!dev->hw_ops.open ||
+ !dev->hw_ops.enable ||
+ !dev->hw_ops.set_hw_if_params ||
+ !dev->hw_ops.configure ||
+ !dev->hw_ops.set_buftype ||
+ !dev->hw_ops.get_buftype ||
+ !dev->hw_ops.enum_pix ||
+ !dev->hw_ops.set_frame_format ||
+ !dev->hw_ops.get_frame_format ||
+ !dev->hw_ops.get_pixel_format ||
+ !dev->hw_ops.set_pixel_format ||
+ !dev->hw_ops.set_image_window ||
+ !dev->hw_ops.get_image_window ||
+ !dev->hw_ops.get_line_length ||
+ !dev->hw_ops.getfid)
+ return -EINVAL;
mutex_lock(&ccdc_lock);
if (!ccdc_cfg) {
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
index c1e29a46ae69..aeaed2cf4458 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
@@ -229,6 +229,9 @@ static int mtk_mdp_remove(struct platform_device *pdev)
mtk_mdp_unregister_m2m_device(mdp);
v4l2_device_unregister(&mdp->v4l2_dev);
+ flush_workqueue(mdp->wdt_wq);
+ destroy_workqueue(mdp->wdt_wq);
+
flush_workqueue(mdp->job_wq);
destroy_workqueue(mdp->job_wq);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
index 858727824889..0f3e710aed4e 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
@@ -104,6 +104,7 @@ static struct vb2_buffer *get_display_buffer(struct mtk_vcodec_ctx *ctx)
{
struct vdec_fb *disp_frame_buffer = NULL;
struct mtk_video_dec_buf *dstbuf;
+ struct vb2_v4l2_buffer *vb;
mtk_v4l2_debug(3, "[%d]", ctx->id);
if (vdec_if_get_param(ctx,
@@ -121,25 +122,26 @@ static struct vb2_buffer *get_display_buffer(struct mtk_vcodec_ctx *ctx)
dstbuf = container_of(disp_frame_buffer, struct mtk_video_dec_buf,
frame_buffer);
+ vb = &dstbuf->m2m_buf.vb;
mutex_lock(&ctx->lock);
if (dstbuf->used) {
- vb2_set_plane_payload(&dstbuf->vb.vb2_buf, 0,
- ctx->picinfo.fb_sz[0]);
+ vb2_set_plane_payload(&vb->vb2_buf, 0,
+ ctx->picinfo.fb_sz[0]);
if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 2)
- vb2_set_plane_payload(&dstbuf->vb.vb2_buf, 1,
+ vb2_set_plane_payload(&vb->vb2_buf, 1,
ctx->picinfo.fb_sz[1]);
mtk_v4l2_debug(2,
"[%d]status=%x queue id=%d to done_list %d",
ctx->id, disp_frame_buffer->status,
- dstbuf->vb.vb2_buf.index,
+ vb->vb2_buf.index,
dstbuf->queued_in_vb2);
- v4l2_m2m_buf_done(&dstbuf->vb, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(vb, VB2_BUF_STATE_DONE);
ctx->decoded_frame_cnt++;
}
mutex_unlock(&ctx->lock);
- return &dstbuf->vb.vb2_buf;
+ return &vb->vb2_buf;
}
/*
@@ -154,6 +156,7 @@ static struct vb2_buffer *get_free_buffer(struct mtk_vcodec_ctx *ctx)
{
struct mtk_video_dec_buf *dstbuf;
struct vdec_fb *free_frame_buffer = NULL;
+ struct vb2_v4l2_buffer *vb;
if (vdec_if_get_param(ctx,
GET_PARAM_FREE_FRAME_BUFFER,
@@ -171,6 +174,7 @@ static struct vb2_buffer *get_free_buffer(struct mtk_vcodec_ctx *ctx)
dstbuf = container_of(free_frame_buffer, struct mtk_video_dec_buf,
frame_buffer);
+ vb = &dstbuf->m2m_buf.vb;
mutex_lock(&ctx->lock);
if (dstbuf->used) {
@@ -187,9 +191,9 @@ static struct vb2_buffer *get_free_buffer(struct mtk_vcodec_ctx *ctx)
mtk_v4l2_debug(2,
"[%d]status=%x queue id=%d to rdy_queue %d",
ctx->id, free_frame_buffer->status,
- dstbuf->vb.vb2_buf.index,
+ vb->vb2_buf.index,
dstbuf->queued_in_vb2);
- v4l2_m2m_buf_queue(ctx->m2m_ctx, &dstbuf->vb);
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
} else if ((dstbuf->queued_in_vb2 == false) &&
(dstbuf->queued_in_v4l2 == true)) {
/*
@@ -205,8 +209,8 @@ static struct vb2_buffer *get_free_buffer(struct mtk_vcodec_ctx *ctx)
mtk_v4l2_debug(2,
"[%d]status=%x queue id=%d to rdy_queue",
ctx->id, free_frame_buffer->status,
- dstbuf->vb.vb2_buf.index);
- v4l2_m2m_buf_queue(ctx->m2m_ctx, &dstbuf->vb);
+ vb->vb2_buf.index);
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
dstbuf->queued_in_vb2 = true;
} else {
/*
@@ -219,14 +223,14 @@ static struct vb2_buffer *get_free_buffer(struct mtk_vcodec_ctx *ctx)
*/
mtk_v4l2_debug(3, "[%d]status=%x err queue id=%d %d %d",
ctx->id, free_frame_buffer->status,
- dstbuf->vb.vb2_buf.index,
+ vb->vb2_buf.index,
dstbuf->queued_in_vb2,
dstbuf->queued_in_v4l2);
}
dstbuf->used = false;
}
mutex_unlock(&ctx->lock);
- return &dstbuf->vb.vb2_buf;
+ return &vb->vb2_buf;
}
static void clean_display_buffer(struct mtk_vcodec_ctx *ctx)
@@ -365,8 +369,10 @@ static void mtk_vdec_worker(struct work_struct *work)
return;
}
- src_buf_info = container_of(src_buf, struct mtk_video_dec_buf, vb);
- dst_buf_info = container_of(dst_buf, struct mtk_video_dec_buf, vb);
+ src_buf_info = container_of(src_buf, struct mtk_video_dec_buf,
+ m2m_buf.vb);
+ dst_buf_info = container_of(dst_buf, struct mtk_video_dec_buf,
+ m2m_buf.vb);
pfb = &dst_buf_info->frame_buffer;
pfb->base_y.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
@@ -397,11 +403,11 @@ static void mtk_vdec_worker(struct work_struct *work)
vdec_if_decode(ctx, NULL, NULL, &res_chg);
clean_display_buffer(ctx);
- vb2_set_plane_payload(&dst_buf_info->vb.vb2_buf, 0, 0);
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, 0);
if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 2)
- vb2_set_plane_payload(&dst_buf_info->vb.vb2_buf, 1, 0);
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 1, 0);
dst_buf->flags |= V4L2_BUF_FLAG_LAST;
- v4l2_m2m_buf_done(&dst_buf_info->vb, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
clean_free_buffer(ctx);
v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
return;
@@ -417,10 +423,8 @@ static void mtk_vdec_worker(struct work_struct *work)
}
mtk_v4l2_debug(3, "[%d] Bitstream VA=%p DMA=%pad Size=%zx vb=%p",
ctx->id, buf.va, &buf.dma_addr, buf.size, src_buf);
- dst_buf_info->vb.vb2_buf.timestamp
- = src_buf_info->vb.vb2_buf.timestamp;
- dst_buf_info->vb.timecode
- = src_buf_info->vb.timecode;
+ dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
+ dst_buf->timecode = src_buf->timecode;
mutex_lock(&ctx->lock);
dst_buf_info->used = true;
mutex_unlock(&ctx->lock);
@@ -434,7 +438,7 @@ static void mtk_vdec_worker(struct work_struct *work)
ctx->id,
src_buf->vb2_buf.index,
buf.size,
- src_buf_info->vb.vb2_buf.timestamp,
+ src_buf->vb2_buf.timestamp,
dst_buf->vb2_buf.index,
ret, res_chg);
src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
@@ -443,14 +447,14 @@ static void mtk_vdec_worker(struct work_struct *work)
src_buf_info->error = true;
mutex_unlock(&ctx->lock);
}
- v4l2_m2m_buf_done(&src_buf_info->vb, VB2_BUF_STATE_ERROR);
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
} else if (res_chg == false) {
/*
* we only return src buffer with VB2_BUF_STATE_DONE
* when decode success without resolution change
*/
src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
- v4l2_m2m_buf_done(&src_buf_info->vb, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
}
dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
@@ -522,7 +526,8 @@ static int vidioc_decoder_cmd(struct file *file, void *priv,
mtk_v4l2_debug(1, "Capture stream is off. No need to flush.");
return 0;
}
- v4l2_m2m_buf_queue(ctx->m2m_ctx, &ctx->empty_flush_buf->vb);
+ v4l2_m2m_buf_queue(ctx->m2m_ctx,
+ &ctx->empty_flush_buf->m2m_buf.vb);
v4l2_m2m_try_schedule(ctx->m2m_ctx);
break;
@@ -1148,7 +1153,8 @@ static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb)
*/
if (vb->vb2_queue->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
vb2_v4l2 = to_vb2_v4l2_buffer(vb);
- buf = container_of(vb2_v4l2, struct mtk_video_dec_buf, vb);
+ buf = container_of(vb2_v4l2, struct mtk_video_dec_buf,
+ m2m_buf.vb);
mutex_lock(&ctx->lock);
if (buf->used == false) {
v4l2_m2m_buf_queue(ctx->m2m_ctx, vb2_v4l2);
@@ -1175,7 +1181,7 @@ static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb)
mtk_v4l2_err("No src buffer");
return;
}
- buf = container_of(src_buf, struct mtk_video_dec_buf, vb);
+ buf = container_of(src_buf, struct mtk_video_dec_buf, m2m_buf.vb);
if (buf->lastframe) {
/* This shouldn't happen. Just in case. */
mtk_v4l2_err("Invalid flush buffer.");
@@ -1256,7 +1262,7 @@ static void vb2ops_vdec_buf_finish(struct vb2_buffer *vb)
bool buf_error;
vb2_v4l2 = container_of(vb, struct vb2_v4l2_buffer, vb2_buf);
- buf = container_of(vb2_v4l2, struct mtk_video_dec_buf, vb);
+ buf = container_of(vb2_v4l2, struct mtk_video_dec_buf, m2m_buf.vb);
mutex_lock(&ctx->lock);
if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
buf->queued_in_v4l2 = false;
@@ -1276,7 +1282,7 @@ static int vb2ops_vdec_buf_init(struct vb2_buffer *vb)
struct vb2_v4l2_buffer *vb2_v4l2 = container_of(vb,
struct vb2_v4l2_buffer, vb2_buf);
struct mtk_video_dec_buf *buf = container_of(vb2_v4l2,
- struct mtk_video_dec_buf, vb);
+ struct mtk_video_dec_buf, m2m_buf.vb);
if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
buf->used = false;
@@ -1309,7 +1315,7 @@ static void vb2ops_vdec_stop_streaming(struct vb2_queue *q)
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
while ((src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx))) {
struct mtk_video_dec_buf *buf_info = container_of(
- src_buf, struct mtk_video_dec_buf, vb);
+ src_buf, struct mtk_video_dec_buf, m2m_buf.vb);
if (!buf_info->lastframe)
v4l2_m2m_buf_done(src_buf,
VB2_BUF_STATE_ERROR);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
index e0c5338bde3d..cf26b6c1486a 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
@@ -9,7 +9,7 @@
#define _MTK_VCODEC_DEC_H_
#include <media/videobuf2-core.h>
-#include <media/videobuf2-v4l2.h>
+#include <media/v4l2-mem2mem.h>
#define VCODEC_CAPABILITY_4K_DISABLED 0x10
#define VCODEC_DEC_4K_CODED_WIDTH 4096U
@@ -33,7 +33,7 @@ struct vdec_fb {
/**
* struct mtk_video_dec_buf - Private data related to each VB2 buffer.
- * @b: VB2 buffer
+ * @m2m_buf: M2M buffer
* @list: link list
* @used: Capture buffer contain decoded frame data and keep in
* codec data structure
@@ -47,8 +47,7 @@ struct vdec_fb {
* Note : These status information help us track and debug buffer state
*/
struct mtk_video_dec_buf {
- struct vb2_v4l2_buffer vb;
- struct list_head list;
+ struct v4l2_m2m_buffer m2m_buf;
bool used;
bool queued_in_vb2;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
index 944771ee5f5c..100ae8c5e702 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
@@ -137,7 +137,7 @@ static int fops_vcodec_open(struct file *file)
}
src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
- ctx->empty_flush_buf->vb.vb2_buf.vb2_queue = src_vq;
+ ctx->empty_flush_buf->m2m_buf.vb.vb2_buf.vb2_queue = src_vq;
ctx->empty_flush_buf->lastframe = true;
mtk_vcodec_dec_set_default_params(ctx);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
index fd8de027e83e..d469ff6464b2 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
@@ -332,14 +332,12 @@ static int vidioc_try_fmt(struct v4l2_format *f,
pix_fmt_mp->num_planes = fmt->num_planes;
pix_fmt_mp->plane_fmt[0].sizeimage =
- pix_fmt_mp->width * pix_fmt_mp->height +
- ((ALIGN(pix_fmt_mp->width, 16) * 2) * 16);
+ pix_fmt_mp->width * pix_fmt_mp->height;
pix_fmt_mp->plane_fmt[0].bytesperline = pix_fmt_mp->width;
if (pix_fmt_mp->num_planes == 2) {
pix_fmt_mp->plane_fmt[1].sizeimage =
- (pix_fmt_mp->width * pix_fmt_mp->height) / 2 +
- (ALIGN(pix_fmt_mp->width, 16) * 16);
+ (pix_fmt_mp->width * pix_fmt_mp->height) / 2;
pix_fmt_mp->plane_fmt[2].sizeimage = 0;
pix_fmt_mp->plane_fmt[1].bytesperline =
pix_fmt_mp->width;
@@ -347,8 +345,7 @@ static int vidioc_try_fmt(struct v4l2_format *f,
} else if (pix_fmt_mp->num_planes == 3) {
pix_fmt_mp->plane_fmt[1].sizeimage =
pix_fmt_mp->plane_fmt[2].sizeimage =
- (pix_fmt_mp->width * pix_fmt_mp->height) / 4 +
- ((ALIGN(pix_fmt_mp->width, 16) / 2) * 16);
+ (pix_fmt_mp->width * pix_fmt_mp->height) / 4;
pix_fmt_mp->plane_fmt[1].bytesperline =
pix_fmt_mp->plane_fmt[2].bytesperline =
pix_fmt_mp->width / 2;
@@ -798,13 +795,14 @@ static void vb2ops_venc_buf_queue(struct vb2_buffer *vb)
container_of(vb, struct vb2_v4l2_buffer, vb2_buf);
struct mtk_video_enc_buf *mtk_buf =
- container_of(vb2_v4l2, struct mtk_video_enc_buf, vb);
+ container_of(vb2_v4l2, struct mtk_video_enc_buf,
+ m2m_buf.vb);
if ((vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) &&
(ctx->param_change != MTK_ENCODE_PARAM_NONE)) {
mtk_v4l2_debug(1, "[%d] Before id=%d encode parameter change %x",
ctx->id,
- mtk_buf->vb.vb2_buf.index,
+ vb2_v4l2->vb2_buf.index,
ctx->param_change);
mtk_buf->param_change = ctx->param_change;
mtk_buf->enc_params = ctx->enc_params;
@@ -986,7 +984,8 @@ static int mtk_venc_param_change(struct mtk_vcodec_ctx *ctx)
struct venc_enc_param enc_prm;
struct vb2_v4l2_buffer *vb2_v4l2 = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
struct mtk_video_enc_buf *mtk_buf =
- container_of(vb2_v4l2, struct mtk_video_enc_buf, vb);
+ container_of(vb2_v4l2, struct mtk_video_enc_buf,
+ m2m_buf.vb);
int ret = 0;
@@ -998,7 +997,7 @@ static int mtk_venc_param_change(struct mtk_vcodec_ctx *ctx)
enc_prm.bitrate = mtk_buf->enc_params.bitrate;
mtk_v4l2_debug(1, "[%d] id=%d, change param br=%d",
ctx->id,
- mtk_buf->vb.vb2_buf.index,
+ vb2_v4l2->vb2_buf.index,
enc_prm.bitrate);
ret |= venc_if_set_param(ctx,
VENC_SET_PARAM_ADJUST_BITRATE,
@@ -1009,7 +1008,7 @@ static int mtk_venc_param_change(struct mtk_vcodec_ctx *ctx)
mtk_buf->enc_params.framerate_denom;
mtk_v4l2_debug(1, "[%d] id=%d, change param fr=%d",
ctx->id,
- mtk_buf->vb.vb2_buf.index,
+ vb2_v4l2->vb2_buf.index,
enc_prm.frm_rate);
ret |= venc_if_set_param(ctx,
VENC_SET_PARAM_ADJUST_FRAMERATE,
@@ -1026,7 +1025,7 @@ static int mtk_venc_param_change(struct mtk_vcodec_ctx *ctx)
if (!ret && mtk_buf->param_change & MTK_ENCODE_PARAM_FORCE_INTRA) {
mtk_v4l2_debug(1, "[%d] id=%d, change param force I=%d",
ctx->id,
- mtk_buf->vb.vb2_buf.index,
+ vb2_v4l2->vb2_buf.index,
mtk_buf->enc_params.force_intra);
if (mtk_buf->enc_params.force_intra)
ret |= venc_if_set_param(ctx,
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.h
index a9c9f86b9c83..513ee7993e34 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.h
@@ -9,7 +9,7 @@
#define _MTK_VCODEC_ENC_H_
#include <media/videobuf2-core.h>
-#include <media/videobuf2-v4l2.h>
+#include <media/v4l2-mem2mem.h>
#define MTK_VENC_IRQ_STATUS_SPS 0x1
#define MTK_VENC_IRQ_STATUS_PPS 0x2
@@ -23,15 +23,15 @@
/**
* struct mtk_video_enc_buf - Private data related to each VB2 buffer.
- * @vb: Pointer to related VB2 buffer.
+ * @m2m_buf: M2M buffer
* @list: list that buffer link to
* @param_change: Types of encode parameter change before encoding this
* buffer
* @enc_params: Encode parameters changed before encode this buffer
*/
struct mtk_video_enc_buf {
- struct vb2_v4l2_buffer vb;
- struct list_head list;
+ struct v4l2_m2m_buffer m2m_buf;
+
u32 param_change;
struct mtk_enc_params enc_params;
};
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 327c5716922a..a4ee6b86663e 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -810,6 +810,10 @@ static int isp_pipeline_disable(struct isp_pipeline *pipe)
ret = v4l2_subdev_call(subdev, video, s_stream, 0);
+ /* Stop at the first external sub-device. */
+ if (subdev->dev != isp->dev)
+ break;
+
if (subdev == &isp->isp_res.subdev)
ret |= isp_pipeline_wait(isp, isp_pipeline_wait_resizer);
else if (subdev == &isp->isp_prev.subdev)
@@ -837,10 +841,6 @@ static int isp_pipeline_disable(struct isp_pipeline *pipe)
&subdev->entity);
failure = -ETIMEDOUT;
}
-
- /* Stop at the first external sub-device. */
- if (subdev->dev != isp->dev)
- break;
}
return failure;
diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c
index e2f336c715a4..471ae7cdb813 100644
--- a/drivers/media/platform/omap3isp/ispccdc.c
+++ b/drivers/media/platform/omap3isp/ispccdc.c
@@ -1607,6 +1607,11 @@ static int ccdc_isr_buffer(struct isp_ccdc_device *ccdc)
return 0;
}
+ /* Don't restart CCDC if we're just about to stop streaming. */
+ if (ccdc->state == ISP_PIPELINE_STREAM_CONTINUOUS &&
+ ccdc->stopping & CCDC_STOP_REQUEST)
+ return 0;
+
if (!ccdc_has_all_fields(ccdc))
return 1;
@@ -1661,16 +1666,15 @@ static void ccdc_vd0_isr(struct isp_ccdc_device *ccdc)
spin_unlock_irqrestore(&ccdc->lock, flags);
}
- if (ccdc->output & CCDC_OUTPUT_MEMORY)
- restart = ccdc_isr_buffer(ccdc);
-
spin_lock_irqsave(&ccdc->lock, flags);
-
if (ccdc_handle_stopping(ccdc, CCDC_EVENT_VD0)) {
spin_unlock_irqrestore(&ccdc->lock, flags);
return;
}
+ if (ccdc->output & CCDC_OUTPUT_MEMORY)
+ restart = ccdc_isr_buffer(ccdc);
+
if (!ccdc->shadow_update)
ccdc_apply_controls(ccdc);
spin_unlock_irqrestore(&ccdc->lock, flags);
diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
index 8d47ea0c33f8..43ae645d866b 100644
--- a/drivers/media/platform/pxa_camera.c
+++ b/drivers/media/platform/pxa_camera.c
@@ -2530,6 +2530,7 @@ exit_free_v4l2dev:
v4l2_device_unregister(&pcdev->v4l2_dev);
exit_deactivate:
pxa_camera_deactivate(pcdev);
+ tasklet_kill(&pcdev->task_eof);
exit_free_dma:
dma_release_channel(pcdev->dma_chans[2]);
exit_free_dma_u:
@@ -2544,6 +2545,7 @@ static int pxa_camera_remove(struct platform_device *pdev)
struct pxa_camera_dev *pcdev = dev_get_drvdata(&pdev->dev);
pxa_camera_deactivate(pcdev);
+ tasklet_kill(&pcdev->task_eof);
dma_release_channel(pcdev->dma_chans[0]);
dma_release_channel(pcdev->dma_chans[1]);
dma_release_channel(pcdev->dma_chans[2]);
diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c
index 9e2e63ffcc47..5ff565e76bca 100644
--- a/drivers/media/platform/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c
@@ -144,7 +144,7 @@ static void rvin_format_align(struct rvin_dev *vin, struct v4l2_pix_format *pix)
}
/* HW limit width to a multiple of 32 (2^5) for NV12/16 else 2 (2^1) */
- switch (vin->format.pixelformat) {
+ switch (pix->pixelformat) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV16:
walign = 5;
diff --git a/drivers/media/platform/sti/bdisp/bdisp-hw.c b/drivers/media/platform/sti/bdisp/bdisp-hw.c
index 4372abbb5950..a74e9fd65238 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-hw.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-hw.c
@@ -14,8 +14,8 @@
#define MAX_SRC_WIDTH 2048
/* Reset & boot poll config */
-#define POLL_RST_MAX 50
-#define POLL_RST_DELAY_MS 20
+#define POLL_RST_MAX 500
+#define POLL_RST_DELAY_MS 2
enum bdisp_target_plan {
BDISP_RGB,
@@ -382,7 +382,7 @@ int bdisp_hw_reset(struct bdisp_dev *bdisp)
for (i = 0; i < POLL_RST_MAX; i++) {
if (readl(bdisp->regs + BLT_STA1) & BLT_STA1_IDLE)
break;
- msleep(POLL_RST_DELAY_MS);
+ udelay(POLL_RST_DELAY_MS * 1000);
}
if (i == POLL_RST_MAX)
dev_err(bdisp->dev, "Reset timeout\n");
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
index 675b5f2b4c2e..d1025a53709f 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -1274,6 +1274,8 @@ static int bdisp_remove(struct platform_device *pdev)
if (!IS_ERR(bdisp->clock))
clk_unprepare(bdisp->clock);
+ destroy_workqueue(bdisp->work_queue);
+
dev_dbg(&pdev->dev, "%s driver unloaded\n", pdev->name);
return 0;
@@ -1317,20 +1319,22 @@ static int bdisp_probe(struct platform_device *pdev)
bdisp->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(bdisp->regs)) {
dev_err(dev, "failed to get regs\n");
- return PTR_ERR(bdisp->regs);
+ ret = PTR_ERR(bdisp->regs);
+ goto err_wq;
}
bdisp->clock = devm_clk_get(dev, BDISP_NAME);
if (IS_ERR(bdisp->clock)) {
dev_err(dev, "failed to get clock\n");
- return PTR_ERR(bdisp->clock);
+ ret = PTR_ERR(bdisp->clock);
+ goto err_wq;
}
ret = clk_prepare(bdisp->clock);
if (ret < 0) {
dev_err(dev, "clock prepare failed\n");
bdisp->clock = ERR_PTR(-EINVAL);
- return ret;
+ goto err_wq;
}
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -1402,7 +1406,8 @@ err_v4l2:
err_clk:
if (!IS_ERR(bdisp->clock))
clk_unprepare(bdisp->clock);
-
+err_wq:
+ destroy_workqueue(bdisp->work_queue);
return ret;
}
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c
index a79250a7f812..0560a9cb004b 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c
@@ -170,8 +170,9 @@ int c8sectpfe_frontend_attach(struct dvb_frontend **fe,
/* attach tuner */
request_module("tda18212");
- client = i2c_new_device(tsin->i2c_adapter, &tda18212_info);
- if (!client || !client->dev.driver) {
+ client = i2c_new_client_device(tsin->i2c_adapter,
+ &tda18212_info);
+ if (!i2c_client_has_driver(client)) {
dvb_frontend_detach(*fe);
return -ENODEV;
}
diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
index f36dc6258900..eff34ded6305 100644
--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
+++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -28,6 +29,12 @@
#include "sun4i_csi.h"
+struct sun4i_csi_traits {
+ unsigned int channels;
+ unsigned int max_width;
+ bool has_isp;
+};
+
static const struct media_entity_operations sun4i_csi_video_entity_ops = {
.link_validate = v4l2_subdev_link_validate,
};
@@ -155,6 +162,31 @@ static int sun4i_csi_probe(struct platform_device *pdev)
subdev = &csi->subdev;
vdev = &csi->vdev;
+ csi->traits = of_device_get_match_data(&pdev->dev);
+ if (!csi->traits)
+ return -EINVAL;
+
+ /*
+ * On Allwinner SoCs, some high memory bandwidth devices do DMA
+ * directly over the memory bus (called MBUS), instead of the
+ * system bus. The memory bus has a different addressing scheme
+ * without the DRAM starting offset.
+ *
+ * In some cases this can be described by an interconnect in
+ * the device tree. In other cases where the hardware is not
+ * fully understood and the interconnect is left out of the
+ * device tree, fall back to a default offset.
+ */
+ if (of_find_property(csi->dev->of_node, "interconnects", NULL)) {
+ ret = of_dma_configure(csi->dev, csi->dev->of_node, true);
+ if (ret)
+ return ret;
+ } else {
+#ifdef PHYS_PFN_OFFSET
+ csi->dev->dma_pfn_offset = PHYS_PFN_OFFSET;
+#endif
+ }
+
csi->mdev.dev = csi->dev;
strscpy(csi->mdev.model, "Allwinner Video Capture Device",
sizeof(csi->mdev.model));
@@ -177,10 +209,12 @@ static int sun4i_csi_probe(struct platform_device *pdev)
return PTR_ERR(csi->bus_clk);
}
- csi->isp_clk = devm_clk_get(&pdev->dev, "isp");
- if (IS_ERR(csi->isp_clk)) {
- dev_err(&pdev->dev, "Couldn't get our ISP clock\n");
- return PTR_ERR(csi->isp_clk);
+ if (csi->traits->has_isp) {
+ csi->isp_clk = devm_clk_get(&pdev->dev, "isp");
+ if (IS_ERR(csi->isp_clk)) {
+ dev_err(&pdev->dev, "Couldn't get our ISP clock\n");
+ return PTR_ERR(csi->isp_clk);
+ }
}
csi->ram_clk = devm_clk_get(&pdev->dev, "ram");
@@ -258,8 +292,21 @@ static int sun4i_csi_remove(struct platform_device *pdev)
return 0;
}
+static const struct sun4i_csi_traits sun4i_a10_csi1_traits = {
+ .channels = 1,
+ .max_width = 24,
+ .has_isp = false,
+};
+
+static const struct sun4i_csi_traits sun7i_a20_csi0_traits = {
+ .channels = 4,
+ .max_width = 16,
+ .has_isp = true,
+};
+
static const struct of_device_id sun4i_csi_of_match[] = {
- { .compatible = "allwinner,sun7i-a20-csi0" },
+ { .compatible = "allwinner,sun4i-a10-csi1", .data = &sun4i_a10_csi1_traits },
+ { .compatible = "allwinner,sun7i-a20-csi0", .data = &sun7i_a20_csi0_traits },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, sun4i_csi_of_match);
diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h
index 001c8bde006c..0f67ff652c2e 100644
--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h
+++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h
@@ -22,8 +22,8 @@
#define CSI_CFG_INPUT_FMT(fmt) ((fmt) << 20)
#define CSI_CFG_OUTPUT_FMT(fmt) ((fmt) << 16)
#define CSI_CFG_YUV_DATA_SEQ(seq) ((seq) << 8)
-#define CSI_CFG_VSYNC_POL(pol) ((pol) << 2)
-#define CSI_CFG_HSYNC_POL(pol) ((pol) << 1)
+#define CSI_CFG_VREF_POL(pol) ((pol) << 2)
+#define CSI_CFG_HREF_POL(pol) ((pol) << 1)
#define CSI_CFG_PCLK_POL(pol) ((pol) << 0)
#define CSI_CPT_CTRL_REG 0x08
@@ -108,6 +108,8 @@ struct sun4i_csi {
/* Device resources */
struct device *dev;
+ const struct sun4i_csi_traits *traits;
+
void __iomem *regs;
struct clk *bus_clk;
struct clk *isp_clk;
diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
index d6979e11a67b..78fa1c535ac6 100644
--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
+++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
@@ -228,7 +228,7 @@ static int sun4i_csi_start_streaming(struct vb2_queue *vq, unsigned int count)
struct sun4i_csi *csi = vb2_get_drv_priv(vq);
struct v4l2_fwnode_bus_parallel *bus = &csi->bus;
const struct sun4i_csi_format *csi_fmt;
- unsigned long hsync_pol, pclk_pol, vsync_pol;
+ unsigned long href_pol, pclk_pol, vref_pol;
unsigned long flags;
unsigned int i;
int ret;
@@ -278,13 +278,21 @@ static int sun4i_csi_start_streaming(struct vb2_queue *vq, unsigned int count)
writel(CSI_WIN_CTRL_H_ACTIVE(csi->fmt.height),
csi->regs + CSI_WIN_CTRL_H_REG);
- hsync_pol = !!(bus->flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH);
- pclk_pol = !!(bus->flags & V4L2_MBUS_DATA_ACTIVE_HIGH);
- vsync_pol = !!(bus->flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH);
+ /*
+ * This hardware uses [HV]REF instead of [HV]SYNC. Based on the
+ * provided timing diagrams in the manual, positive polarity
+ * equals active high [HV]REF.
+ *
+ * When the back porch is 0, [HV]REF is more or less equivalent
+ * to [HV]SYNC inverted.
+ */
+ href_pol = !!(bus->flags & V4L2_MBUS_HSYNC_ACTIVE_LOW);
+ vref_pol = !!(bus->flags & V4L2_MBUS_VSYNC_ACTIVE_LOW);
+ pclk_pol = !!(bus->flags & V4L2_MBUS_PCLK_SAMPLE_RISING);
writel(CSI_CFG_INPUT_FMT(csi_fmt->input) |
CSI_CFG_OUTPUT_FMT(csi_fmt->output) |
- CSI_CFG_VSYNC_POL(vsync_pol) |
- CSI_CFG_HSYNC_POL(hsync_pol) |
+ CSI_CFG_VREF_POL(vref_pol) |
+ CSI_CFG_HREF_POL(href_pol) |
CSI_CFG_PCLK_POL(pclk_pol),
csi->regs + CSI_CFG_REG);
diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
index aaa1dc159ac2..b61f3dea7c93 100644
--- a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
+++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
@@ -834,11 +834,8 @@ static int deinterlace_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dev->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(dev->base)) {
- dev_err(dev->dev, "Failed to map registers\n");
-
+ if (IS_ERR(dev->base))
return PTR_ERR(dev->base);
- }
dev->bus_clk = devm_clk_get(dev->dev, "bus");
if (IS_ERR(dev->bus_clk)) {
diff --git a/drivers/media/platform/tegra-cec/tegra_cec.c b/drivers/media/platform/tegra-cec/tegra_cec.c
index a99caac59f44..1ac0c70a5981 100644
--- a/drivers/media/platform/tegra-cec/tegra_cec.c
+++ b/drivers/media/platform/tegra-cec/tegra_cec.c
@@ -351,7 +351,7 @@ static int tegra_cec_probe(struct platform_device *pdev)
if (cec->tegra_cec_irq <= 0)
return -EBUSY;
- cec->cec_base = devm_ioremap_nocache(&pdev->dev, res->start,
+ cec->cec_base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!cec->cec_base) {
diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
index 223161f9c403..be54806180a5 100644
--- a/drivers/media/platform/ti-vpe/cal.c
+++ b/drivers/media/platform/ti-vpe/cal.c
@@ -14,6 +14,8 @@
#include <linux/delay.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include <linux/videodev2.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
@@ -32,8 +34,8 @@
#define CAL_MODULE_NAME "cal"
-#define MAX_WIDTH 1920
-#define MAX_HEIGHT 1200
+#define MAX_WIDTH_BYTES (8192 * 8)
+#define MAX_HEIGHT_LINES 16383
#define CAL_VERSION "0.1.0"
@@ -71,8 +73,6 @@ static const struct v4l2_fract
#define CAL_NUM_INPUT 1
#define CAL_NUM_CONTEXT 2
-#define bytes_per_line(pixel, bpp) (ALIGN(pixel * bpp, 16))
-
#define reg_read(dev, offset) ioread32(dev->base + offset)
#define reg_write(dev, offset, val) iowrite32(val, dev->base + offset)
@@ -91,102 +91,103 @@ static const struct v4l2_fract
struct cal_fmt {
u32 fourcc;
u32 code;
- u8 depth;
+ /* Bits per pixel */
+ u8 bpp;
};
static struct cal_fmt cal_formats[] = {
{
.fourcc = V4L2_PIX_FMT_YUYV,
.code = MEDIA_BUS_FMT_YUYV8_2X8,
- .depth = 16,
+ .bpp = 16,
}, {
.fourcc = V4L2_PIX_FMT_UYVY,
.code = MEDIA_BUS_FMT_UYVY8_2X8,
- .depth = 16,
+ .bpp = 16,
}, {
.fourcc = V4L2_PIX_FMT_YVYU,
.code = MEDIA_BUS_FMT_YVYU8_2X8,
- .depth = 16,
+ .bpp = 16,
}, {
.fourcc = V4L2_PIX_FMT_VYUY,
.code = MEDIA_BUS_FMT_VYUY8_2X8,
- .depth = 16,
+ .bpp = 16,
}, {
.fourcc = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */
.code = MEDIA_BUS_FMT_RGB565_2X8_LE,
- .depth = 16,
+ .bpp = 16,
}, {
.fourcc = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */
.code = MEDIA_BUS_FMT_RGB565_2X8_BE,
- .depth = 16,
+ .bpp = 16,
}, {
.fourcc = V4L2_PIX_FMT_RGB555, /* gggbbbbb arrrrrgg */
.code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
- .depth = 16,
+ .bpp = 16,
}, {
.fourcc = V4L2_PIX_FMT_RGB555X, /* arrrrrgg gggbbbbb */
.code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE,
- .depth = 16,
+ .bpp = 16,
}, {
.fourcc = V4L2_PIX_FMT_RGB24, /* rgb */
.code = MEDIA_BUS_FMT_RGB888_2X12_LE,
- .depth = 24,
+ .bpp = 24,
}, {
.fourcc = V4L2_PIX_FMT_BGR24, /* bgr */
.code = MEDIA_BUS_FMT_RGB888_2X12_BE,
- .depth = 24,
+ .bpp = 24,
}, {
.fourcc = V4L2_PIX_FMT_RGB32, /* argb */
.code = MEDIA_BUS_FMT_ARGB8888_1X32,
- .depth = 32,
+ .bpp = 32,
}, {
.fourcc = V4L2_PIX_FMT_SBGGR8,
.code = MEDIA_BUS_FMT_SBGGR8_1X8,
- .depth = 8,
+ .bpp = 8,
}, {
.fourcc = V4L2_PIX_FMT_SGBRG8,
.code = MEDIA_BUS_FMT_SGBRG8_1X8,
- .depth = 8,
+ .bpp = 8,
}, {
.fourcc = V4L2_PIX_FMT_SGRBG8,
.code = MEDIA_BUS_FMT_SGRBG8_1X8,
- .depth = 8,
+ .bpp = 8,
}, {
.fourcc = V4L2_PIX_FMT_SRGGB8,
.code = MEDIA_BUS_FMT_SRGGB8_1X8,
- .depth = 8,
+ .bpp = 8,
}, {
.fourcc = V4L2_PIX_FMT_SBGGR10,
.code = MEDIA_BUS_FMT_SBGGR10_1X10,
- .depth = 16,
+ .bpp = 10,
}, {
.fourcc = V4L2_PIX_FMT_SGBRG10,
.code = MEDIA_BUS_FMT_SGBRG10_1X10,
- .depth = 16,
+ .bpp = 10,
}, {
.fourcc = V4L2_PIX_FMT_SGRBG10,
.code = MEDIA_BUS_FMT_SGRBG10_1X10,
- .depth = 16,
+ .bpp = 10,
}, {
.fourcc = V4L2_PIX_FMT_SRGGB10,
.code = MEDIA_BUS_FMT_SRGGB10_1X10,
- .depth = 16,
+ .bpp = 10,
}, {
.fourcc = V4L2_PIX_FMT_SBGGR12,
.code = MEDIA_BUS_FMT_SBGGR12_1X12,
- .depth = 16,
+ .bpp = 12,
}, {
.fourcc = V4L2_PIX_FMT_SGBRG12,
.code = MEDIA_BUS_FMT_SGBRG12_1X12,
- .depth = 16,
+ .bpp = 12,
}, {
.fourcc = V4L2_PIX_FMT_SGRBG12,
.code = MEDIA_BUS_FMT_SGRBG12_1X12,
- .depth = 16,
+ .bpp = 12,
}, {
.fourcc = V4L2_PIX_FMT_SRGGB12,
.code = MEDIA_BUS_FMT_SRGGB12_1X12,
- .depth = 16,
+ .bpp = 12,
},
};
@@ -220,20 +221,118 @@ struct cal_dmaqueue {
int ini_jiffies;
};
-struct cm_data {
+struct cc_data {
void __iomem *base;
struct resource *res;
- unsigned int camerrx_control;
-
struct platform_device *pdev;
};
-struct cc_data {
- void __iomem *base;
- struct resource *res;
+/* CTRL_CORE_CAMERRX_CONTROL register field id */
+enum cal_camerarx_field {
+ F_CTRLCLKEN,
+ F_CAMMODE,
+ F_LANEENABLE,
+ F_CSI_MODE,
- struct platform_device *pdev;
+ F_MAX_FIELDS,
+};
+
+struct cal_csi2_phy {
+ struct regmap_field *fields[F_MAX_FIELDS];
+ struct reg_field *base_fields;
+ const int num_lanes;
+};
+
+struct cal_data {
+ const int num_csi2_phy;
+ struct cal_csi2_phy *csi2_phy_core;
+
+ const unsigned int flags;
+};
+
+static struct reg_field dra72x_ctrl_core_csi0_reg_fields[F_MAX_FIELDS] = {
+ [F_CTRLCLKEN] = REG_FIELD(0, 10, 10),
+ [F_CAMMODE] = REG_FIELD(0, 11, 12),
+ [F_LANEENABLE] = REG_FIELD(0, 13, 16),
+ [F_CSI_MODE] = REG_FIELD(0, 17, 17),
+};
+
+static struct reg_field dra72x_ctrl_core_csi1_reg_fields[F_MAX_FIELDS] = {
+ [F_CTRLCLKEN] = REG_FIELD(0, 0, 0),
+ [F_CAMMODE] = REG_FIELD(0, 1, 2),
+ [F_LANEENABLE] = REG_FIELD(0, 3, 4),
+ [F_CSI_MODE] = REG_FIELD(0, 5, 5),
+};
+
+static struct cal_csi2_phy dra72x_cal_csi_phy[] = {
+ {
+ .base_fields = dra72x_ctrl_core_csi0_reg_fields,
+ .num_lanes = 4,
+ },
+ {
+ .base_fields = dra72x_ctrl_core_csi1_reg_fields,
+ .num_lanes = 2,
+ },
+};
+
+static const struct cal_data dra72x_cal_data = {
+ .csi2_phy_core = dra72x_cal_csi_phy,
+ .num_csi2_phy = ARRAY_SIZE(dra72x_cal_csi_phy),
+};
+
+static const struct cal_data dra72x_es1_cal_data = {
+ .csi2_phy_core = dra72x_cal_csi_phy,
+ .num_csi2_phy = ARRAY_SIZE(dra72x_cal_csi_phy),
+ .flags = DRA72_CAL_PRE_ES2_LDO_DISABLE,
+};
+
+static struct reg_field dra76x_ctrl_core_csi0_reg_fields[F_MAX_FIELDS] = {
+ [F_CTRLCLKEN] = REG_FIELD(0, 8, 8),
+ [F_CAMMODE] = REG_FIELD(0, 9, 10),
+ [F_CSI_MODE] = REG_FIELD(0, 11, 11),
+ [F_LANEENABLE] = REG_FIELD(0, 27, 31),
+};
+
+static struct reg_field dra76x_ctrl_core_csi1_reg_fields[F_MAX_FIELDS] = {
+ [F_CTRLCLKEN] = REG_FIELD(0, 0, 0),
+ [F_CAMMODE] = REG_FIELD(0, 1, 2),
+ [F_CSI_MODE] = REG_FIELD(0, 3, 3),
+ [F_LANEENABLE] = REG_FIELD(0, 24, 26),
+};
+
+static struct cal_csi2_phy dra76x_cal_csi_phy[] = {
+ {
+ .base_fields = dra76x_ctrl_core_csi0_reg_fields,
+ .num_lanes = 5,
+ },
+ {
+ .base_fields = dra76x_ctrl_core_csi1_reg_fields,
+ .num_lanes = 3,
+ },
+};
+
+static const struct cal_data dra76x_cal_data = {
+ .csi2_phy_core = dra76x_cal_csi_phy,
+ .num_csi2_phy = ARRAY_SIZE(dra76x_cal_csi_phy),
+};
+
+static struct reg_field am654_ctrl_core_csi0_reg_fields[F_MAX_FIELDS] = {
+ [F_CTRLCLKEN] = REG_FIELD(0, 15, 15),
+ [F_CAMMODE] = REG_FIELD(0, 24, 25),
+ [F_LANEENABLE] = REG_FIELD(0, 0, 4),
+};
+
+static struct cal_csi2_phy am654_cal_csi_phy[] = {
+ {
+ .base_fields = am654_ctrl_core_csi0_reg_fields,
+ .num_lanes = 5,
+ },
+};
+
+static const struct cal_data am654_cal_data = {
+ .csi2_phy_core = am654_cal_csi_phy,
+ .num_csi2_phy = ARRAY_SIZE(am654_cal_csi_phy),
};
/*
@@ -247,8 +346,15 @@ struct cal_dev {
struct platform_device *pdev;
struct v4l2_device v4l2_dev;
+ /* Controller flags for special cases */
+ unsigned int flags;
+
+ const struct cal_data *data;
+
/* Control Module handle */
- struct cm_data *cm;
+ struct regmap *syscon_camerrx;
+ u32 syscon_camerrx_offset;
+
/* Camera Core Module handle */
struct cc_data *cc[CAL_NUM_CSI2_PORTS];
@@ -359,73 +465,115 @@ static inline void set_field(u32 *valp, u32 field, u32 mask)
*valp = val;
}
-/*
- * Control Module block access
- */
-static struct cm_data *cm_create(struct cal_dev *dev)
+static u32 cal_data_get_phy_max_lanes(struct cal_ctx *ctx)
{
- struct platform_device *pdev = dev->pdev;
- struct cm_data *cm;
+ struct cal_dev *dev = ctx->dev;
+ u32 phy_id = ctx->csi2_port - 1;
- cm = devm_kzalloc(&pdev->dev, sizeof(*cm), GFP_KERNEL);
- if (!cm)
- return ERR_PTR(-ENOMEM);
+ return dev->data->csi2_phy_core[phy_id].num_lanes;
+}
+
+static u32 cal_data_get_num_csi2_phy(struct cal_dev *dev)
+{
+ return dev->data->num_csi2_phy;
+}
+
+static int cal_camerarx_regmap_init(struct cal_dev *dev)
+{
+ struct reg_field *field;
+ struct cal_csi2_phy *phy;
+ int i, j;
+
+ if (!dev->data)
+ return -EINVAL;
+
+ for (i = 0; i < cal_data_get_num_csi2_phy(dev); i++) {
+ phy = &dev->data->csi2_phy_core[i];
+ for (j = 0; j < F_MAX_FIELDS; j++) {
+ field = &phy->base_fields[j];
+ /*
+ * Here we update the reg offset with the
+ * value found in DT
+ */
+ field->reg = dev->syscon_camerrx_offset;
+ phy->fields[j] =
+ devm_regmap_field_alloc(&dev->pdev->dev,
+ dev->syscon_camerrx,
+ *field);
+ if (IS_ERR(phy->fields[j])) {
+ cal_err(dev, "Unable to allocate regmap fields\n");
+ return PTR_ERR(phy->fields[j]);
+ }
+ }
+ }
+ return 0;
+}
+
+static const struct regmap_config cal_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
- cm->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "camerrx_control");
- cm->base = devm_ioremap_resource(&pdev->dev, cm->res);
- if (IS_ERR(cm->base)) {
+static struct regmap *cal_get_camerarx_regmap(struct cal_dev *dev)
+{
+ struct platform_device *pdev = dev->pdev;
+ struct regmap *regmap;
+ void __iomem *base;
+ u32 reg_io_width;
+ struct regmap_config r_config = cal_regmap_config;
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "camerrx_control");
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base)) {
cal_err(dev, "failed to ioremap\n");
- return ERR_CAST(cm->base);
+ return ERR_CAST(base);
}
cal_dbg(1, dev, "ioresource %s at %pa - %pa\n",
- cm->res->name, &cm->res->start, &cm->res->end);
+ res->name, &res->start, &res->end);
- return cm;
+ reg_io_width = 4;
+ r_config.reg_stride = reg_io_width;
+ r_config.val_bits = reg_io_width * 8;
+ r_config.max_register = resource_size(res) - reg_io_width;
+
+ regmap = regmap_init_mmio(NULL, base, &r_config);
+ if (IS_ERR(regmap))
+ pr_err("regmap init failed\n");
+
+ return regmap;
}
+/*
+ * Control Module CAMERARX block access
+ */
static void camerarx_phy_enable(struct cal_ctx *ctx)
{
- u32 val;
-
- if (!ctx->dev->cm->base) {
- ctx_err(ctx, "cm not mapped\n");
- return;
- }
-
- val = reg_read(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL);
- if (ctx->csi2_port == 1) {
- set_field(&val, 1, CM_CAMERRX_CTRL_CSI0_CTRLCLKEN_MASK);
- set_field(&val, 0, CM_CAMERRX_CTRL_CSI0_CAMMODE_MASK);
- /* enable all lanes by default */
- set_field(&val, 0xf, CM_CAMERRX_CTRL_CSI0_LANEENABLE_MASK);
- set_field(&val, 1, CM_CAMERRX_CTRL_CSI0_MODE_MASK);
- } else if (ctx->csi2_port == 2) {
- set_field(&val, 1, CM_CAMERRX_CTRL_CSI1_CTRLCLKEN_MASK);
- set_field(&val, 0, CM_CAMERRX_CTRL_CSI1_CAMMODE_MASK);
- /* enable all lanes by default */
- set_field(&val, 0x3, CM_CAMERRX_CTRL_CSI1_LANEENABLE_MASK);
- set_field(&val, 1, CM_CAMERRX_CTRL_CSI1_MODE_MASK);
- }
- reg_write(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL, val);
+ struct cal_csi2_phy *phy;
+ u32 phy_id = ctx->csi2_port - 1;
+ u32 max_lanes;
+
+ phy = &ctx->dev->data->csi2_phy_core[phy_id];
+ regmap_field_write(phy->fields[F_CAMMODE], 0);
+ /* Always enable all lanes at the phy control level */
+ max_lanes = (1 << cal_data_get_phy_max_lanes(ctx)) - 1;
+ regmap_field_write(phy->fields[F_LANEENABLE], max_lanes);
+ /* F_CSI_MODE is not present on every architecture */
+ if (phy->fields[F_CSI_MODE])
+ regmap_field_write(phy->fields[F_CSI_MODE], 1);
+ regmap_field_write(phy->fields[F_CTRLCLKEN], 1);
}
static void camerarx_phy_disable(struct cal_ctx *ctx)
{
- u32 val;
-
- if (!ctx->dev->cm->base) {
- ctx_err(ctx, "cm not mapped\n");
- return;
- }
+ struct cal_csi2_phy *phy;
+ u32 phy_id = ctx->csi2_port - 1;
- val = reg_read(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL);
- if (ctx->csi2_port == 1)
- set_field(&val, 0x0, CM_CAMERRX_CTRL_CSI0_CTRLCLKEN_MASK);
- else if (ctx->csi2_port == 2)
- set_field(&val, 0x0, CM_CAMERRX_CTRL_CSI1_CTRLCLKEN_MASK);
- reg_write(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL, val);
+ phy = &ctx->dev->data->csi2_phy_core[phy_id];
+ regmap_field_write(phy->fields[F_CTRLCLKEN], 0);
}
/*
@@ -474,9 +622,52 @@ static void cal_get_hwinfo(struct cal_dev *dev)
hwinfo);
}
-static inline int cal_runtime_get(struct cal_dev *dev)
+/*
+ * Errata i913: CSI2 LDO Needs to be disabled when module is powered on
+ *
+ * Enabling CSI2 LDO shorts it to core supply. It is crucial the 2 CSI2
+ * LDOs on the device are disabled if CSI-2 module is powered on
+ * (0x4845 B304 | 0x4845 B384 [28:27] = 0x1) or in ULPS (0x4845 B304
+ * | 0x4845 B384 [28:27] = 0x2) mode. Common concerns include: high
+ * current draw on the module supply in active mode.
+ *
+ * Errata does not apply when CSI-2 module is powered off
+ * (0x4845 B304 | 0x4845 B384 [28:27] = 0x0).
+ *
+ * SW Workaround:
+ * Set the following register bits to disable the LDO,
+ * which is essentially CSI2 REG10 bit 6:
+ *
+ * Core 0: 0x4845 B828 = 0x0000 0040
+ * Core 1: 0x4845 B928 = 0x0000 0040
+ */
+static void i913_errata(struct cal_dev *dev, unsigned int port)
+{
+ u32 reg10 = reg_read(dev->cc[port], CAL_CSI2_PHY_REG10);
+
+ set_field(&reg10, CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_DISABLE,
+ CAL_CSI2_PHY_REG10_I933_LDO_DISABLE_MASK);
+
+ cal_dbg(1, dev, "CSI2_%d_REG10 = 0x%08x\n", port, reg10);
+ reg_write(dev->cc[port], CAL_CSI2_PHY_REG10, reg10);
+}
+
+static int cal_runtime_get(struct cal_dev *dev)
{
- return pm_runtime_get_sync(&dev->pdev->dev);
+ int r;
+
+ r = pm_runtime_get_sync(&dev->pdev->dev);
+
+ if (dev->flags & DRA72_CAL_PRE_ES2_LDO_DISABLE) {
+ /*
+ * Apply errata on both port eveytime we (re-)enable
+ * the clock
+ */
+ i913_errata(dev, 0);
+ i913_errata(dev, 1);
+ }
+
+ return r;
}
static inline void cal_runtime_put(struct cal_dev *dev)
@@ -508,12 +699,6 @@ static void cal_quickdump_regs(struct cal_dev *dev)
resource_size(dev->ctx[1]->cc->res),
false);
}
-
- cal_info(dev, "CAMERRX_Control Registers @ %pa:\n",
- &dev->cm->res->start);
- print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
- (__force const void *)dev->cm->base,
- resource_size(dev->cm->res), false);
}
/*
@@ -551,29 +736,76 @@ static void disable_irqs(struct cal_ctx *ctx)
reg_write(ctx->dev, CAL_CSI2_VC_IRQENABLE(1), 0);
}
-static void csi2_init(struct cal_ctx *ctx)
+static void csi2_phy_config(struct cal_ctx *ctx);
+
+static void csi2_phy_init(struct cal_ctx *ctx)
{
int i;
u32 val;
+ /* Steps
+ * 1. Configure D-PHY mode and enable required lanes
+ * 2. Reset complex IO - Wait for completion of reset
+ * Note if the external sensor is not sending byte clock,
+ * the reset will timeout
+ * 3 Program Stop States
+ * A. Program THS_TERM, THS_SETTLE, etc... Timings parameters
+ * in terms of DDR clock periods
+ * B. Enable stop state transition timeouts
+ * 4.Force FORCERXMODE
+ * D. Enable pull down using pad control
+ * E. Power up PHY
+ * F. Wait for power up completion
+ * G. Wait for all enabled lane to reach stop state
+ * H. Disable pull down using pad control
+ */
+
+ /* 1. Configure D-PHY mode and enable required lanes */
+ camerarx_phy_enable(ctx);
+
+ /* 2. Reset complex IO - Do not wait for reset completion */
+ val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port));
+ set_field(&val, CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_OPERATIONAL,
+ CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK);
+ reg_write(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port), val);
+ ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x De-assert Complex IO Reset\n",
+ ctx->csi2_port,
+ reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port)));
+
+ /* Dummy read to allow SCP to complete */
+ val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port));
+
+ /* 3.A. Program Phy Timing Parameters */
+ csi2_phy_config(ctx);
+
+ /* 3.B. Program Stop States */
val = reg_read(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port));
set_field(&val, CAL_GEN_ENABLE,
- CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK);
- set_field(&val, CAL_GEN_ENABLE,
CAL_CSI2_TIMING_STOP_STATE_X16_IO1_MASK);
set_field(&val, CAL_GEN_DISABLE,
CAL_CSI2_TIMING_STOP_STATE_X4_IO1_MASK);
set_field(&val, 407, CAL_CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK);
reg_write(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port), val);
- ctx_dbg(3, ctx, "CAL_CSI2_TIMING(%d) = 0x%08x\n", ctx->csi2_port,
+ ctx_dbg(3, ctx, "CAL_CSI2_TIMING(%d) = 0x%08x Stop States\n",
+ ctx->csi2_port,
+ reg_read(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port)));
+
+ /* 4. Force FORCERXMODE */
+ val = reg_read(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port));
+ set_field(&val, CAL_GEN_ENABLE,
+ CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK);
+ reg_write(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port), val);
+ ctx_dbg(3, ctx, "CAL_CSI2_TIMING(%d) = 0x%08x Force RXMODE\n",
+ ctx->csi2_port,
reg_read(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port)));
+ /* E. Power up the PHY using the complex IO */
val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port));
- set_field(&val, CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_OPERATIONAL,
- CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK);
set_field(&val, CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ON,
CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_MASK);
reg_write(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port), val);
+
+ /* F. Wait for power up completion */
for (i = 0; i < 10; i++) {
if (reg_read_field(ctx->dev,
CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port),
@@ -582,18 +814,104 @@ static void csi2_init(struct cal_ctx *ctx)
break;
usleep_range(1000, 1100);
}
- ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x\n", ctx->csi2_port,
- reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port)));
+ ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x Powered UP %s\n",
+ ctx->csi2_port,
+ reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port)),
+ (i >= 10) ? "(timeout)" : "");
+}
- val = reg_read(ctx->dev, CAL_CTRL);
- set_field(&val, CAL_CTRL_BURSTSIZE_BURST128, CAL_CTRL_BURSTSIZE_MASK);
- set_field(&val, 0xF, CAL_CTRL_TAGCNT_MASK);
- set_field(&val, CAL_CTRL_POSTED_WRITES_NONPOSTED,
- CAL_CTRL_POSTED_WRITES_MASK);
- set_field(&val, 0xFF, CAL_CTRL_MFLAGL_MASK);
- set_field(&val, 0xFF, CAL_CTRL_MFLAGH_MASK);
- reg_write(ctx->dev, CAL_CTRL, val);
- ctx_dbg(3, ctx, "CAL_CTRL = 0x%08x\n", reg_read(ctx->dev, CAL_CTRL));
+static void csi2_wait_for_phy(struct cal_ctx *ctx)
+{
+ int i;
+
+ /* Steps
+ * 2. Wait for completion of reset
+ * Note if the external sensor is not sending byte clock,
+ * the reset will timeout
+ * 4.Force FORCERXMODE
+ * G. Wait for all enabled lane to reach stop state
+ * H. Disable pull down using pad control
+ */
+
+ /* 2. Wait for reset completion */
+ for (i = 0; i < 250; i++) {
+ if (reg_read_field(ctx->dev,
+ CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port),
+ CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_MASK) ==
+ CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_RESETCOMPLETED)
+ break;
+ usleep_range(1000, 1100);
+ }
+ ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x Complex IO Reset Done (%d) %s\n",
+ ctx->csi2_port,
+ reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port)), i,
+ (i >= 250) ? "(timeout)" : "");
+
+ /* 4. G. Wait for all enabled lane to reach stop state */
+ for (i = 0; i < 10; i++) {
+ if (reg_read_field(ctx->dev,
+ CAL_CSI2_TIMING(ctx->csi2_port),
+ CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK) ==
+ CAL_GEN_DISABLE)
+ break;
+ usleep_range(1000, 1100);
+ }
+ ctx_dbg(3, ctx, "CAL_CSI2_TIMING(%d) = 0x%08x Stop State Reached %s\n",
+ ctx->csi2_port,
+ reg_read(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port)),
+ (i >= 10) ? "(timeout)" : "");
+
+ ctx_dbg(1, ctx, "CSI2_%d_REG1 = 0x%08x (Bit(31,28) should be set!)\n",
+ (ctx->csi2_port - 1), reg_read(ctx->cc, CAL_CSI2_PHY_REG1));
+}
+
+static void csi2_phy_deinit(struct cal_ctx *ctx)
+{
+ int i;
+ u32 val;
+
+ /* Power down the PHY using the complex IO */
+ val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port));
+ set_field(&val, CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_OFF,
+ CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_MASK);
+ reg_write(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port), val);
+
+ /* Wait for power down completion */
+ for (i = 0; i < 10; i++) {
+ if (reg_read_field(ctx->dev,
+ CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port),
+ CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_MASK) ==
+ CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_OFF)
+ break;
+ usleep_range(1000, 1100);
+ }
+ ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x Powered Down %s\n",
+ ctx->csi2_port,
+ reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port)),
+ (i >= 10) ? "(timeout)" : "");
+
+ /* Assert Comple IO Reset */
+ val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port));
+ set_field(&val, CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL,
+ CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK);
+ reg_write(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port), val);
+
+ /* Wait for power down completion */
+ for (i = 0; i < 10; i++) {
+ if (reg_read_field(ctx->dev,
+ CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port),
+ CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_MASK) ==
+ CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_RESETONGOING)
+ break;
+ usleep_range(1000, 1100);
+ }
+ ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x Complex IO in Reset (%d) %s\n",
+ ctx->csi2_port,
+ reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port)), i,
+ (i >= 10) ? "(timeout)" : "");
+
+ /* Disable the phy */
+ camerarx_phy_disable(ctx);
}
static void csi2_lane_config(struct cal_ctx *ctx)
@@ -665,13 +983,48 @@ static void csi2_ctx_config(struct cal_ctx *ctx)
static void pix_proc_config(struct cal_ctx *ctx)
{
- u32 val;
+ u32 val, extract, pack;
+
+ switch (ctx->fmt->bpp) {
+ case 8:
+ extract = CAL_PIX_PROC_EXTRACT_B8;
+ pack = CAL_PIX_PROC_PACK_B8;
+ break;
+ case 10:
+ extract = CAL_PIX_PROC_EXTRACT_B10_MIPI;
+ pack = CAL_PIX_PROC_PACK_B16;
+ break;
+ case 12:
+ extract = CAL_PIX_PROC_EXTRACT_B12_MIPI;
+ pack = CAL_PIX_PROC_PACK_B16;
+ break;
+ case 16:
+ extract = CAL_PIX_PROC_EXTRACT_B16_LE;
+ pack = CAL_PIX_PROC_PACK_B16;
+ break;
+ default:
+ /*
+ * If you see this warning then it means that you added
+ * some new entry in the cal_formats[] array with a different
+ * bit per pixel values then the one supported below.
+ * Either add support for the new bpp value below or adjust
+ * the new entry to use one of the value below.
+ *
+ * Instead of failing here just use 8 bpp as a default.
+ */
+ dev_warn_once(&ctx->dev->pdev->dev,
+ "%s:%d:%s: bpp:%d unsupported! Overwritten with 8.\n",
+ __FILE__, __LINE__, __func__, ctx->fmt->bpp);
+ extract = CAL_PIX_PROC_EXTRACT_B8;
+ pack = CAL_PIX_PROC_PACK_B8;
+ break;
+ }
val = reg_read(ctx->dev, CAL_PIX_PROC(ctx->csi2_port));
- set_field(&val, CAL_PIX_PROC_EXTRACT_B8, CAL_PIX_PROC_EXTRACT_MASK);
+ set_field(&val, extract, CAL_PIX_PROC_EXTRACT_MASK);
set_field(&val, CAL_PIX_PROC_DPCMD_BYPASS, CAL_PIX_PROC_DPCMD_MASK);
set_field(&val, CAL_PIX_PROC_DPCME_BYPASS, CAL_PIX_PROC_DPCME_MASK);
- set_field(&val, CAL_PIX_PROC_PACK_B8, CAL_PIX_PROC_PACK_MASK);
+ set_field(&val, pack, CAL_PIX_PROC_PACK_MASK);
set_field(&val, ctx->csi2_port, CAL_PIX_PROC_CPORT_MASK);
set_field(&val, CAL_GEN_ENABLE, CAL_PIX_PROC_EN_MASK);
reg_write(ctx->dev, CAL_PIX_PROC(ctx->csi2_port), val);
@@ -680,12 +1033,13 @@ static void pix_proc_config(struct cal_ctx *ctx)
}
static void cal_wr_dma_config(struct cal_ctx *ctx,
- unsigned int width)
+ unsigned int width, unsigned int height)
{
u32 val;
val = reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port));
set_field(&val, ctx->csi2_port, CAL_WR_DMA_CTRL_CPORT_MASK);
+ set_field(&val, height, CAL_WR_DMA_CTRL_YSIZE_MASK);
set_field(&val, CAL_WR_DMA_CTRL_DTAG_PIX_DAT,
CAL_WR_DMA_CTRL_DTAG_MASK);
set_field(&val, CAL_WR_DMA_CTRL_MODE_CONST,
@@ -720,6 +1074,16 @@ static void cal_wr_dma_config(struct cal_ctx *ctx,
reg_write(ctx->dev, CAL_WR_DMA_XSIZE(ctx->csi2_port), val);
ctx_dbg(3, ctx, "CAL_WR_DMA_XSIZE(%d) = 0x%08x\n", ctx->csi2_port,
reg_read(ctx->dev, CAL_WR_DMA_XSIZE(ctx->csi2_port)));
+
+ val = reg_read(ctx->dev, CAL_CTRL);
+ set_field(&val, CAL_CTRL_BURSTSIZE_BURST128, CAL_CTRL_BURSTSIZE_MASK);
+ set_field(&val, 0xF, CAL_CTRL_TAGCNT_MASK);
+ set_field(&val, CAL_CTRL_POSTED_WRITES_NONPOSTED,
+ CAL_CTRL_POSTED_WRITES_MASK);
+ set_field(&val, 0xFF, CAL_CTRL_MFLAGL_MASK);
+ set_field(&val, 0xFF, CAL_CTRL_MFLAGH_MASK);
+ reg_write(ctx->dev, CAL_CTRL, val);
+ ctx_dbg(3, ctx, "CAL_CTRL = 0x%08x\n", reg_read(ctx->dev, CAL_CTRL));
}
static void cal_wr_dma_addr(struct cal_ctx *ctx, unsigned int dmaaddr)
@@ -733,41 +1097,28 @@ static void cal_wr_dma_addr(struct cal_ctx *ctx, unsigned int dmaaddr)
#define TCLK_TERM 0
#define TCLK_MISS 1
#define TCLK_SETTLE 14
-#define THS_SETTLE 15
static void csi2_phy_config(struct cal_ctx *ctx)
{
unsigned int reg0, reg1;
unsigned int ths_term, ths_settle;
- unsigned int ddrclkperiod_us;
+ unsigned int csi2_ddrclk_khz;
+ struct v4l2_fwnode_bus_mipi_csi2 *mipi_csi2 =
+ &ctx->endpoint.bus.mipi_csi2;
+ u32 num_lanes = mipi_csi2->num_data_lanes;
- /*
- * THS_TERM: Programmed value = floor(20 ns/DDRClk period) - 2.
- */
- ddrclkperiod_us = ctx->external_rate / 2000000;
- ddrclkperiod_us = 1000000 / ddrclkperiod_us;
- ctx_dbg(1, ctx, "ddrclkperiod_us: %d\n", ddrclkperiod_us);
+ /* DPHY timing configuration */
+ /* CSI-2 is DDR and we only count used lanes. */
+ csi2_ddrclk_khz = ctx->external_rate / 1000
+ / (2 * num_lanes) * ctx->fmt->bpp;
+ ctx_dbg(1, ctx, "csi2_ddrclk_khz: %d\n", csi2_ddrclk_khz);
- ths_term = 20000 / ddrclkperiod_us;
- ths_term = (ths_term >= 2) ? ths_term - 2 : ths_term;
+ /* THS_TERM: Programmed value = floor(20 ns/DDRClk period) */
+ ths_term = 20 * csi2_ddrclk_khz / 1000000;
ctx_dbg(1, ctx, "ths_term: %d (0x%02x)\n", ths_term, ths_term);
- /*
- * THS_SETTLE: Programmed value = floor(176.3 ns/CtrlClk period) - 1.
- * Since CtrlClk is fixed at 96Mhz then we get
- * ths_settle = floor(176.3 / 10.416) - 1 = 15
- * If we ever switch to a dynamic clock then this code might be useful
- *
- * unsigned int ctrlclkperiod_us;
- * ctrlclkperiod_us = 96000000 / 1000000;
- * ctrlclkperiod_us = 1000000 / ctrlclkperiod_us;
- * ctx_dbg(1, ctx, "ctrlclkperiod_us: %d\n", ctrlclkperiod_us);
-
- * ths_settle = 176300 / ctrlclkperiod_us;
- * ths_settle = (ths_settle > 1) ? ths_settle - 1 : ths_settle;
- */
-
- ths_settle = THS_SETTLE;
+ /* THS_SETTLE: Programmed value = floor(105 ns/DDRClk period) + 4 */
+ ths_settle = (105 * csi2_ddrclk_khz / 1000000) + 4;
ctx_dbg(1, ctx, "ths_settle: %d (0x%02x)\n", ths_settle, ths_settle);
reg0 = reg_read(ctx->cc, CAL_CSI2_PHY_REG0);
@@ -979,15 +1330,25 @@ static int cal_calc_format_size(struct cal_ctx *ctx,
const struct cal_fmt *fmt,
struct v4l2_format *f)
{
+ u32 bpl, max_width;
+
if (!fmt) {
ctx_dbg(3, ctx, "No cal_fmt provided!\n");
return -EINVAL;
}
- v4l_bound_align_image(&f->fmt.pix.width, 48, MAX_WIDTH, 2,
- &f->fmt.pix.height, 32, MAX_HEIGHT, 0, 0);
- f->fmt.pix.bytesperline = bytes_per_line(f->fmt.pix.width,
- fmt->depth >> 3);
+ /*
+ * Maximum width is bound by the DMA max width in bytes.
+ * We need to recalculate the actual maxi width depending on the
+ * number of bytes per pixels required.
+ */
+ max_width = MAX_WIDTH_BYTES / (ALIGN(fmt->bpp, 8) >> 3);
+ v4l_bound_align_image(&f->fmt.pix.width, 48, max_width, 2,
+ &f->fmt.pix.height, 32, MAX_HEIGHT_LINES, 0, 0);
+
+ bpl = (f->fmt.pix.width * ALIGN(fmt->bpp, 8)) >> 3;
+ f->fmt.pix.bytesperline = ALIGN(bpl, 16);
+
f->fmt.pix.sizeimage = f->fmt.pix.height *
f->fmt.pix.bytesperline;
@@ -1132,6 +1493,7 @@ static int cal_enum_framesizes(struct file *file, void *fh,
fse.index = fsize->index;
fse.pad = 0;
fse.code = fmt->code;
+ fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
ret = v4l2_subdev_call(ctx->sensor, pad, enum_frame_size, NULL, &fse);
if (ret)
@@ -1299,36 +1661,50 @@ static int cal_start_streaming(struct vb2_queue *vq, unsigned int count)
if (ret < 0)
goto err;
+ ret = v4l2_subdev_call(ctx->sensor, core, s_power, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) {
+ ctx_err(ctx, "power on failed in subdev\n");
+ goto err;
+ }
+
cal_runtime_get(ctx->dev);
- enable_irqs(ctx);
- camerarx_phy_enable(ctx);
- csi2_init(ctx);
- csi2_phy_config(ctx);
- csi2_lane_config(ctx);
csi2_ctx_config(ctx);
pix_proc_config(ctx);
- cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline);
- cal_wr_dma_addr(ctx, addr);
- csi2_ppi_enable(ctx);
+ cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline,
+ ctx->v_fmt.fmt.pix.height);
+ csi2_lane_config(ctx);
+
+ enable_irqs(ctx);
+ csi2_phy_init(ctx);
ret = v4l2_subdev_call(ctx->sensor, video, s_stream, 1);
if (ret) {
+ v4l2_subdev_call(ctx->sensor, core, s_power, 0);
ctx_err(ctx, "stream on failed in subdev\n");
cal_runtime_put(ctx->dev);
goto err;
}
+ csi2_wait_for_phy(ctx);
+ cal_wr_dma_addr(ctx, addr);
+ csi2_ppi_enable(ctx);
+
if (debug >= 4)
cal_quickdump_regs(ctx->dev);
return 0;
err:
+ spin_lock_irqsave(&ctx->slock, flags);
+ vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
+ ctx->cur_frm = NULL;
+ ctx->next_frm = NULL;
list_for_each_entry_safe(buf, tmp, &dma_q->active, list) {
list_del(&buf->list);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
}
+ spin_unlock_irqrestore(&ctx->slock, flags);
return ret;
}
@@ -1338,12 +1714,18 @@ static void cal_stop_streaming(struct vb2_queue *vq)
struct cal_dmaqueue *dma_q = &ctx->vidq;
struct cal_buffer *buf, *tmp;
unsigned long flags;
+ int ret;
+
+ csi2_ppi_disable(ctx);
+ disable_irqs(ctx);
+ csi2_phy_deinit(ctx);
if (v4l2_subdev_call(ctx->sensor, video, s_stream, 0))
ctx_err(ctx, "stream off failed in subdev\n");
- csi2_ppi_disable(ctx);
- disable_irqs(ctx);
+ ret = v4l2_subdev_call(ctx->sensor, core, s_power, 0);
+ if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ ctx_err(ctx, "power off failed in subdev\n");
/* Release all active buffers */
spin_lock_irqsave(&ctx->slock, flags);
@@ -1399,6 +1781,7 @@ static const struct v4l2_ioctl_ops cal_ioctl_ops = {
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_qbuf = vb2_ioctl_qbuf,
.vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
.vidioc_enum_input = cal_enum_input,
.vidioc_g_input = cal_g_input,
.vidioc_s_input = cal_s_input,
@@ -1451,6 +1834,7 @@ static int cal_async_bound(struct v4l2_async_notifier *notifier,
memset(&mbus_code, 0, sizeof(mbus_code));
mbus_code.index = j;
+ mbus_code.which = V4L2_SUBDEV_FORMAT_ACTIVE;
ret = v4l2_subdev_call(subdev, pad, enum_mbus_code,
NULL, &mbus_code);
if (ret)
@@ -1804,10 +2188,15 @@ err_exit:
return NULL;
}
+static const struct of_device_id cal_of_match[];
+
static int cal_probe(struct platform_device *pdev)
{
struct cal_dev *dev;
struct cal_ctx *ctx;
+ struct device_node *parent = pdev->dev.of_node;
+ struct regmap *syscon_camerrx = NULL;
+ u32 syscon_camerrx_offset = 0;
int ret;
int irq;
int i;
@@ -1816,6 +2205,14 @@ static int cal_probe(struct platform_device *pdev)
if (!dev)
return -ENOMEM;
+ dev->data = of_device_get_match_data(&pdev->dev);
+ if (!dev->data) {
+ dev_err(&pdev->dev, "Could not get feature data based on compatible version\n");
+ return -ENODEV;
+ }
+
+ dev->flags = dev->data->flags;
+
/* set pseudo v4l2 device name so we can use v4l2_printk */
strscpy(dev->v4l2_dev.name, CAL_MODULE_NAME,
sizeof(dev->v4l2_dev.name));
@@ -1823,6 +2220,38 @@ static int cal_probe(struct platform_device *pdev)
/* save pdev pointer */
dev->pdev = pdev;
+ syscon_camerrx = syscon_regmap_lookup_by_phandle(parent,
+ "ti,camerrx-control");
+ ret = of_property_read_u32_index(parent, "ti,camerrx-control", 1,
+ &syscon_camerrx_offset);
+ if (IS_ERR(syscon_camerrx))
+ ret = PTR_ERR(syscon_camerrx);
+ if (ret) {
+ dev_warn(&pdev->dev, "failed to get ti,camerrx-control: %d\n",
+ ret);
+
+ /*
+ * Backward DTS compatibility.
+ * If syscon entry is not present then check if the
+ * camerrx_control resource is present.
+ */
+ syscon_camerrx = cal_get_camerarx_regmap(dev);
+ if (IS_ERR(syscon_camerrx)) {
+ dev_err(&pdev->dev, "failed to get camerrx_control regmap\n");
+ return PTR_ERR(syscon_camerrx);
+ }
+ /* In this case the base already point to the direct
+ * CM register so no need for an offset
+ */
+ syscon_camerrx_offset = 0;
+ }
+
+ dev->syscon_camerrx = syscon_camerrx;
+ dev->syscon_camerrx_offset = syscon_camerrx_offset;
+ ret = cal_camerarx_regmap_init(dev);
+ if (ret)
+ return ret;
+
dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"cal_top");
dev->base = devm_ioremap_resource(&pdev->dev, dev->res);
@@ -1841,23 +2270,24 @@ static int cal_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
- dev->cm = cm_create(dev);
- if (IS_ERR(dev->cm))
- return PTR_ERR(dev->cm);
-
dev->cc[0] = cc_create(dev, 0);
if (IS_ERR(dev->cc[0]))
return PTR_ERR(dev->cc[0]);
- dev->cc[1] = cc_create(dev, 1);
- if (IS_ERR(dev->cc[1]))
- return PTR_ERR(dev->cc[1]);
+ if (cal_data_get_num_csi2_phy(dev) > 1) {
+ dev->cc[1] = cc_create(dev, 1);
+ if (IS_ERR(dev->cc[1]))
+ return PTR_ERR(dev->cc[1]);
+ } else {
+ dev->cc[1] = NULL;
+ }
dev->ctx[0] = NULL;
dev->ctx[1] = NULL;
dev->ctx[0] = cal_create_instance(dev, 0);
- dev->ctx[1] = cal_create_instance(dev, 1);
+ if (cal_data_get_num_csi2_phy(dev) > 1)
+ dev->ctx[1] = cal_create_instance(dev, 1);
if (!dev->ctx[0] && !dev->ctx[1]) {
cal_err(dev, "Neither port is configured, no point in staying up\n");
return -ENODEV;
@@ -1924,7 +2354,22 @@ static int cal_remove(struct platform_device *pdev)
#if defined(CONFIG_OF)
static const struct of_device_id cal_of_match[] = {
- { .compatible = "ti,dra72-cal", },
+ {
+ .compatible = "ti,dra72-cal",
+ .data = (void *)&dra72x_cal_data,
+ },
+ {
+ .compatible = "ti,dra72-pre-es2-cal",
+ .data = (void *)&dra72x_es1_cal_data,
+ },
+ {
+ .compatible = "ti,dra76-cal",
+ .data = (void *)&dra76x_cal_data,
+ },
+ {
+ .compatible = "ti,am654-cal",
+ .data = (void *)&am654_cal_data,
+ },
{},
};
MODULE_DEVICE_TABLE(of, cal_of_match);
diff --git a/drivers/media/platform/ti-vpe/cal_regs.h b/drivers/media/platform/ti-vpe/cal_regs.h
index 68cfc922b422..0b76d1186074 100644
--- a/drivers/media/platform/ti-vpe/cal_regs.h
+++ b/drivers/media/platform/ti-vpe/cal_regs.h
@@ -10,6 +10,30 @@
#ifndef __TI_CAL_REGS_H
#define __TI_CAL_REGS_H
+/*
+ * struct cal_dev.flags possibilities
+ *
+ * DRA72_CAL_PRE_ES2_LDO_DISABLE:
+ * Errata i913: CSI2 LDO Needs to be disabled when module is powered on
+ *
+ * Enabling CSI2 LDO shorts it to core supply. It is crucial the 2 CSI2
+ * LDOs on the device are disabled if CSI-2 module is powered on
+ * (0x4845 B304 | 0x4845 B384 [28:27] = 0x1) or in ULPS (0x4845 B304
+ * | 0x4845 B384 [28:27] = 0x2) mode. Common concerns include: high
+ * current draw on the module supply in active mode.
+ *
+ * Errata does not apply when CSI-2 module is powered off
+ * (0x4845 B304 | 0x4845 B384 [28:27] = 0x0).
+ *
+ * SW Workaround:
+ * Set the following register bits to disable the LDO,
+ * which is essentially CSI2 REG10 bit 6:
+ *
+ * Core 0: 0x4845 B828 = 0x0000 0040
+ * Core 1: 0x4845 B928 = 0x0000 0040
+ */
+#define DRA72_CAL_PRE_ES2_LDO_DISABLE BIT(0)
+
#define CAL_NUM_CSI2_PORTS 2
/* CAL register offsets */
@@ -71,6 +95,7 @@
#define CAL_CSI2_PHY_REG0 0x000
#define CAL_CSI2_PHY_REG1 0x004
#define CAL_CSI2_PHY_REG2 0x008
+#define CAL_CSI2_PHY_REG10 0x028
/* CAL Control Module Core Camerrx Control register offsets */
#define CM_CTRL_CORE_CAMERRX_CONTROL 0x000
@@ -110,7 +135,7 @@
#define CAL_HL_HWINFO_NPPI_CONTEXTS_EIGHT 2
#define CAL_HL_HWINFO_NPPI_CONTEXTS_RESERVED 3
-#define CAL_HL_SYSCONFIG_SOFTRESET_MASK BIT_MASK(0)
+#define CAL_HL_SYSCONFIG_SOFTRESET_MASK BIT(0)
#define CAL_HL_SYSCONFIG_SOFTRESET_DONE 0x0
#define CAL_HL_SYSCONFIG_SOFTRESET_PENDING 0x1
#define CAL_HL_SYSCONFIG_SOFTRESET_NOACTION 0x0
@@ -121,11 +146,11 @@
#define CAL_HL_SYSCONFIG_IDLEMODE_SMART1 2
#define CAL_HL_SYSCONFIG_IDLEMODE_SMART2 3
-#define CAL_HL_IRQ_EOI_LINE_NUMBER_MASK BIT_MASK(0)
+#define CAL_HL_IRQ_EOI_LINE_NUMBER_MASK BIT(0)
#define CAL_HL_IRQ_EOI_LINE_NUMBER_READ0 0
#define CAL_HL_IRQ_EOI_LINE_NUMBER_EOI0 0
-#define CAL_HL_IRQ_MASK(m) BIT_MASK(m-1)
+#define CAL_HL_IRQ_MASK(m) BIT((m) - 1)
#define CAL_HL_IRQ_NOACTION 0x0
#define CAL_HL_IRQ_ENABLE 0x1
#define CAL_HL_IRQ_CLEAR 0x1
@@ -133,7 +158,7 @@
#define CAL_HL_IRQ_ENABLED 0x1
#define CAL_HL_IRQ_PENDING 0x1
-#define CAL_PIX_PROC_EN_MASK BIT_MASK(0)
+#define CAL_PIX_PROC_EN_MASK BIT(0)
#define CAL_PIX_PROC_EXTRACT_MASK GENMASK(4, 1)
#define CAL_PIX_PROC_EXTRACT_B6 0x0
#define CAL_PIX_PROC_EXTRACT_B7 0x1
@@ -179,7 +204,7 @@
#define CAL_PIX_PROC_PACK_ARGB 0x6
#define CAL_PIX_PROC_CPORT_MASK GENMASK(23, 19)
-#define CAL_CTRL_POSTED_WRITES_MASK BIT_MASK(0)
+#define CAL_CTRL_POSTED_WRITES_MASK BIT(0)
#define CAL_CTRL_POSTED_WRITES_NONPOSTED 0
#define CAL_CTRL_POSTED_WRITES 1
#define CAL_CTRL_TAGCNT_MASK GENMASK(4, 1)
@@ -190,10 +215,10 @@
#define CAL_CTRL_BURSTSIZE_BURST128 0x3
#define CAL_CTRL_LL_FORCE_STATE_MASK GENMASK(12, 7)
#define CAL_CTRL_MFLAGL_MASK GENMASK(20, 13)
-#define CAL_CTRL_PWRSCPCLK_MASK BIT_MASK(21)
+#define CAL_CTRL_PWRSCPCLK_MASK BIT(21)
#define CAL_CTRL_PWRSCPCLK_AUTO 0
#define CAL_CTRL_PWRSCPCLK_FORCE 1
-#define CAL_CTRL_RD_DMA_STALL_MASK BIT_MASK(22)
+#define CAL_CTRL_RD_DMA_STALL_MASK BIT(22)
#define CAL_CTRL_MFLAGH_MASK GENMASK(31, 24)
#define CAL_CTRL1_PPI_GROUPING_MASK GENMASK(1, 0)
@@ -218,18 +243,18 @@
#define CAL_VPORT_CTRL1_PCLK_MASK GENMASK(16, 0)
#define CAL_VPORT_CTRL1_XBLK_MASK GENMASK(24, 17)
#define CAL_VPORT_CTRL1_YBLK_MASK GENMASK(30, 25)
-#define CAL_VPORT_CTRL1_WIDTH_MASK BIT_MASK(31)
+#define CAL_VPORT_CTRL1_WIDTH_MASK BIT(31)
#define CAL_VPORT_CTRL1_WIDTH_ONE 0
#define CAL_VPORT_CTRL1_WIDTH_TWO 1
#define CAL_VPORT_CTRL2_CPORT_MASK GENMASK(4, 0)
-#define CAL_VPORT_CTRL2_FREERUNNING_MASK BIT_MASK(15)
+#define CAL_VPORT_CTRL2_FREERUNNING_MASK BIT(15)
#define CAL_VPORT_CTRL2_FREERUNNING_GATED 0
#define CAL_VPORT_CTRL2_FREERUNNING_FREE 1
-#define CAL_VPORT_CTRL2_FS_RESETS_MASK BIT_MASK(16)
+#define CAL_VPORT_CTRL2_FS_RESETS_MASK BIT(16)
#define CAL_VPORT_CTRL2_FS_RESETS_NO 0
#define CAL_VPORT_CTRL2_FS_RESETS_YES 1
-#define CAL_VPORT_CTRL2_FSM_RESET_MASK BIT_MASK(17)
+#define CAL_VPORT_CTRL2_FSM_RESET_MASK BIT(17)
#define CAL_VPORT_CTRL2_FSM_RESET_NOEFFECT 0
#define CAL_VPORT_CTRL2_FSM_RESET 1
#define CAL_VPORT_CTRL2_RDY_THR_MASK GENMASK(31, 18)
@@ -237,23 +262,23 @@
#define CAL_BYS_CTRL1_PCLK_MASK GENMASK(16, 0)
#define CAL_BYS_CTRL1_XBLK_MASK GENMASK(24, 17)
#define CAL_BYS_CTRL1_YBLK_MASK GENMASK(30, 25)
-#define CAL_BYS_CTRL1_BYSINEN_MASK BIT_MASK(31)
+#define CAL_BYS_CTRL1_BYSINEN_MASK BIT(31)
#define CAL_BYS_CTRL2_CPORTIN_MASK GENMASK(4, 0)
#define CAL_BYS_CTRL2_CPORTOUT_MASK GENMASK(9, 5)
-#define CAL_BYS_CTRL2_DUPLICATEDDATA_MASK BIT_MASK(10)
+#define CAL_BYS_CTRL2_DUPLICATEDDATA_MASK BIT(10)
#define CAL_BYS_CTRL2_DUPLICATEDDATA_NO 0
#define CAL_BYS_CTRL2_DUPLICATEDDATA_YES 1
-#define CAL_BYS_CTRL2_FREERUNNING_MASK BIT_MASK(11)
+#define CAL_BYS_CTRL2_FREERUNNING_MASK BIT(11)
#define CAL_BYS_CTRL2_FREERUNNING_NO 0
#define CAL_BYS_CTRL2_FREERUNNING_YES 1
-#define CAL_RD_DMA_CTRL_GO_MASK BIT_MASK(0)
+#define CAL_RD_DMA_CTRL_GO_MASK BIT(0)
#define CAL_RD_DMA_CTRL_GO_DIS 0
#define CAL_RD_DMA_CTRL_GO_EN 1
#define CAL_RD_DMA_CTRL_GO_IDLE 0
#define CAL_RD_DMA_CTRL_GO_BUSY 1
-#define CAL_RD_DMA_CTRL_INIT_MASK BIT_MASK(1)
+#define CAL_RD_DMA_CTRL_INIT_MASK BIT(1)
#define CAL_RD_DMA_CTRL_BW_LIMITER_MASK GENMASK(10, 2)
#define CAL_RD_DMA_CTRL_OCP_TAG_CNT_MASK GENMASK(14, 11)
#define CAL_RD_DMA_CTRL_PCLK_MASK GENMASK(31, 15)
@@ -277,13 +302,13 @@
#define CAL_RD_DMA_CTRL2_CIRC_MODE_SIXTEEN 3
#define CAL_RD_DMA_CTRL2_CIRC_MODE_SIXTYFOUR 4
#define CAL_RD_DMA_CTRL2_CIRC_MODE_RESERVED 5
-#define CAL_RD_DMA_CTRL2_ICM_CSTART_MASK BIT_MASK(3)
+#define CAL_RD_DMA_CTRL2_ICM_CSTART_MASK BIT(3)
#define CAL_RD_DMA_CTRL2_PATTERN_MASK GENMASK(5, 4)
#define CAL_RD_DMA_CTRL2_PATTERN_LINEAR 0
#define CAL_RD_DMA_CTRL2_PATTERN_YUV420 1
#define CAL_RD_DMA_CTRL2_PATTERN_RD2SKIP2 2
#define CAL_RD_DMA_CTRL2_PATTERN_RD2SKIP4 3
-#define CAL_RD_DMA_CTRL2_BYSOUT_LE_WAIT_MASK BIT_MASK(6)
+#define CAL_RD_DMA_CTRL2_BYSOUT_LE_WAIT_MASK BIT(6)
#define CAL_RD_DMA_CTRL2_BYSOUT_LE_WAIT_FREERUNNING 0
#define CAL_RD_DMA_CTRL2_BYSOUT_LE_WAIT_WAITFORBYSOUT 1
#define CAL_RD_DMA_CTRL2_CIRC_SIZE_MASK GENMASK(29, 16)
@@ -300,7 +325,7 @@
#define CAL_WR_DMA_CTRL_PATTERN_WR2SKIP2 2
#define CAL_WR_DMA_CTRL_PATTERN_WR2SKIP4 3
#define CAL_WR_DMA_CTRL_PATTERN_RESERVED 1
-#define CAL_WR_DMA_CTRL_ICM_PSTART_MASK BIT_MASK(5)
+#define CAL_WR_DMA_CTRL_ICM_PSTART_MASK BIT(5)
#define CAL_WR_DMA_CTRL_DTAG_MASK GENMASK(8, 6)
#define CAL_WR_DMA_CTRL_DTAG_ATT_HDR 0
#define CAL_WR_DMA_CTRL_DTAG_ATT_DAT 1
@@ -311,7 +336,7 @@
#define CAL_WR_DMA_CTRL_DTAG_D6 6
#define CAL_WR_DMA_CTRL_DTAG_D7 7
#define CAL_WR_DMA_CTRL_CPORT_MASK GENMASK(13, 9)
-#define CAL_WR_DMA_CTRL_STALL_RD_MASK BIT_MASK(14)
+#define CAL_WR_DMA_CTRL_STALL_RD_MASK BIT(14)
#define CAL_WR_DMA_CTRL_YSIZE_MASK GENMASK(31, 18)
#define CAL_WR_DMA_ADDR_MASK GENMASK(31, 4)
@@ -327,9 +352,9 @@
#define CAL_WR_DMA_XSIZE_XSKIP_MASK GENMASK(15, 3)
#define CAL_WR_DMA_XSIZE_MASK GENMASK(31, 19)
-#define CAL_CSI2_PPI_CTRL_IF_EN_MASK BIT_MASK(0)
-#define CAL_CSI2_PPI_CTRL_ECC_EN_MASK BIT_MASK(2)
-#define CAL_CSI2_PPI_CTRL_FRAME_MASK BIT_MASK(3)
+#define CAL_CSI2_PPI_CTRL_IF_EN_MASK BIT(0)
+#define CAL_CSI2_PPI_CTRL_ECC_EN_MASK BIT(2)
+#define CAL_CSI2_PPI_CTRL_FRAME_MASK BIT(3)
#define CAL_CSI2_PPI_CTRL_FRAME_IMMEDIATE 0
#define CAL_CSI2_PPI_CTRL_FRAME 1
@@ -340,18 +365,18 @@
#define CAL_CSI2_COMPLEXIO_CFG_POSITION_2 2
#define CAL_CSI2_COMPLEXIO_CFG_POSITION_1 1
#define CAL_CSI2_COMPLEXIO_CFG_POSITION_NOT_USED 0
-#define CAL_CSI2_COMPLEXIO_CFG_CLOCK_POL_MASK BIT_MASK(3)
+#define CAL_CSI2_COMPLEXIO_CFG_CLOCK_POL_MASK BIT(3)
#define CAL_CSI2_COMPLEXIO_CFG_POL_PLUSMINUS 0
#define CAL_CSI2_COMPLEXIO_CFG_POL_MINUSPLUS 1
#define CAL_CSI2_COMPLEXIO_CFG_DATA1_POSITION_MASK GENMASK(6, 4)
-#define CAL_CSI2_COMPLEXIO_CFG_DATA1_POL_MASK BIT_MASK(7)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA1_POL_MASK BIT(7)
#define CAL_CSI2_COMPLEXIO_CFG_DATA2_POSITION_MASK GENMASK(10, 8)
-#define CAL_CSI2_COMPLEXIO_CFG_DATA2_POL_MASK BIT_MASK(11)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA2_POL_MASK BIT(11)
#define CAL_CSI2_COMPLEXIO_CFG_DATA3_POSITION_MASK GENMASK(14, 12)
-#define CAL_CSI2_COMPLEXIO_CFG_DATA3_POL_MASK BIT_MASK(15)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA3_POL_MASK BIT(15)
#define CAL_CSI2_COMPLEXIO_CFG_DATA4_POSITION_MASK GENMASK(18, 16)
-#define CAL_CSI2_COMPLEXIO_CFG_DATA4_POL_MASK BIT_MASK(19)
-#define CAL_CSI2_COMPLEXIO_CFG_PWR_AUTO_MASK BIT_MASK(24)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA4_POL_MASK BIT(19)
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_AUTO_MASK BIT(24)
#define CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_MASK GENMASK(26, 25)
#define CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_OFF 0
#define CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_ON 1
@@ -360,83 +385,83 @@
#define CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_OFF 0
#define CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ON 1
#define CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ULP 2
-#define CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_MASK BIT_MASK(29)
+#define CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_MASK BIT(29)
#define CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_RESETCOMPLETED 1
#define CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_RESETONGOING 0
-#define CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK BIT_MASK(30)
+#define CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK BIT(30)
#define CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL 0
#define CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_OPERATIONAL 1
#define CAL_CSI2_SHORT_PACKET_MASK GENMASK(23, 0)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS1_MASK BIT_MASK(0)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS2_MASK BIT_MASK(1)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS3_MASK BIT_MASK(2)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS4_MASK BIT_MASK(3)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS5_MASK BIT_MASK(4)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS1_MASK BIT_MASK(5)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS2_MASK BIT_MASK(6)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS3_MASK BIT_MASK(7)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS4_MASK BIT_MASK(8)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS5_MASK BIT_MASK(9)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC1_MASK BIT_MASK(10)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC2_MASK BIT_MASK(11)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC3_MASK BIT_MASK(12)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC4_MASK BIT_MASK(13)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC5_MASK BIT_MASK(14)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL1_MASK BIT_MASK(15)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL2_MASK BIT_MASK(16)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL3_MASK BIT_MASK(17)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL4_MASK BIT_MASK(18)
-#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL5_MASK BIT_MASK(19)
-#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM1_MASK BIT_MASK(20)
-#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM2_MASK BIT_MASK(21)
-#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM3_MASK BIT_MASK(22)
-#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM4_MASK BIT_MASK(23)
-#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM5_MASK BIT_MASK(24)
-#define CAL_CSI2_COMPLEXIO_IRQ_STATEALLULPMENTER_MASK BIT_MASK(25)
-#define CAL_CSI2_COMPLEXIO_IRQ_STATEALLULPMEXIT_MASK BIT_MASK(26)
-#define CAL_CSI2_COMPLEXIO_IRQ_FIFO_OVR_MASK BIT_MASK(27)
-#define CAL_CSI2_COMPLEXIO_IRQ_SHORT_PACKET_MASK BIT_MASK(28)
-#define CAL_CSI2_COMPLEXIO_IRQ_ECC_NO_CORRECTION_MASK BIT_MASK(30)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS1_MASK BIT(0)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS2_MASK BIT(1)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS3_MASK BIT(2)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS4_MASK BIT(3)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS5_MASK BIT(4)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS1_MASK BIT(5)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS2_MASK BIT(6)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS3_MASK BIT(7)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS4_MASK BIT(8)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS5_MASK BIT(9)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC1_MASK BIT(10)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC2_MASK BIT(11)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC3_MASK BIT(12)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC4_MASK BIT(13)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC5_MASK BIT(14)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL1_MASK BIT(15)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL2_MASK BIT(16)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL3_MASK BIT(17)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL4_MASK BIT(18)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL5_MASK BIT(19)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM1_MASK BIT(20)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM2_MASK BIT(21)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM3_MASK BIT(22)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM4_MASK BIT(23)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM5_MASK BIT(24)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEALLULPMENTER_MASK BIT(25)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEALLULPMEXIT_MASK BIT(26)
+#define CAL_CSI2_COMPLEXIO_IRQ_FIFO_OVR_MASK BIT(27)
+#define CAL_CSI2_COMPLEXIO_IRQ_SHORT_PACKET_MASK BIT(28)
+#define CAL_CSI2_COMPLEXIO_IRQ_ECC_NO_CORRECTION_MASK BIT(30)
#define CAL_CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK GENMASK(12, 0)
-#define CAL_CSI2_TIMING_STOP_STATE_X4_IO1_MASK BIT_MASK(13)
-#define CAL_CSI2_TIMING_STOP_STATE_X16_IO1_MASK BIT_MASK(14)
-#define CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK BIT_MASK(15)
-
-#define CAL_CSI2_VC_IRQ_FS_IRQ_0_MASK BIT_MASK(0)
-#define CAL_CSI2_VC_IRQ_FE_IRQ_0_MASK BIT_MASK(1)
-#define CAL_CSI2_VC_IRQ_LS_IRQ_0_MASK BIT_MASK(2)
-#define CAL_CSI2_VC_IRQ_LE_IRQ_0_MASK BIT_MASK(3)
-#define CAL_CSI2_VC_IRQ_CS_IRQ_0_MASK BIT_MASK(4)
-#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_0_MASK BIT_MASK(5)
-#define CAL_CSI2_VC_IRQ_FS_IRQ_1_MASK BIT_MASK(8)
-#define CAL_CSI2_VC_IRQ_FE_IRQ_1_MASK BIT_MASK(9)
-#define CAL_CSI2_VC_IRQ_LS_IRQ_1_MASK BIT_MASK(10)
-#define CAL_CSI2_VC_IRQ_LE_IRQ_1_MASK BIT_MASK(11)
-#define CAL_CSI2_VC_IRQ_CS_IRQ_1_MASK BIT_MASK(12)
-#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_1_MASK BIT_MASK(13)
-#define CAL_CSI2_VC_IRQ_FS_IRQ_2_MASK BIT_MASK(16)
-#define CAL_CSI2_VC_IRQ_FE_IRQ_2_MASK BIT_MASK(17)
-#define CAL_CSI2_VC_IRQ_LS_IRQ_2_MASK BIT_MASK(18)
-#define CAL_CSI2_VC_IRQ_LE_IRQ_2_MASK BIT_MASK(19)
-#define CAL_CSI2_VC_IRQ_CS_IRQ_2_MASK BIT_MASK(20)
-#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_2_MASK BIT_MASK(21)
-#define CAL_CSI2_VC_IRQ_FS_IRQ_3_MASK BIT_MASK(24)
-#define CAL_CSI2_VC_IRQ_FE_IRQ_3_MASK BIT_MASK(25)
-#define CAL_CSI2_VC_IRQ_LS_IRQ_3_MASK BIT_MASK(26)
-#define CAL_CSI2_VC_IRQ_LE_IRQ_3_MASK BIT_MASK(27)
-#define CAL_CSI2_VC_IRQ_CS_IRQ_3_MASK BIT_MASK(28)
-#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_3_MASK BIT_MASK(29)
+#define CAL_CSI2_TIMING_STOP_STATE_X4_IO1_MASK BIT(13)
+#define CAL_CSI2_TIMING_STOP_STATE_X16_IO1_MASK BIT(14)
+#define CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK BIT(15)
+
+#define CAL_CSI2_VC_IRQ_FS_IRQ_0_MASK BIT(0)
+#define CAL_CSI2_VC_IRQ_FE_IRQ_0_MASK BIT(1)
+#define CAL_CSI2_VC_IRQ_LS_IRQ_0_MASK BIT(2)
+#define CAL_CSI2_VC_IRQ_LE_IRQ_0_MASK BIT(3)
+#define CAL_CSI2_VC_IRQ_CS_IRQ_0_MASK BIT(4)
+#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_0_MASK BIT(5)
+#define CAL_CSI2_VC_IRQ_FS_IRQ_1_MASK BIT(8)
+#define CAL_CSI2_VC_IRQ_FE_IRQ_1_MASK BIT(9)
+#define CAL_CSI2_VC_IRQ_LS_IRQ_1_MASK BIT(10)
+#define CAL_CSI2_VC_IRQ_LE_IRQ_1_MASK BIT(11)
+#define CAL_CSI2_VC_IRQ_CS_IRQ_1_MASK BIT(12)
+#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_1_MASK BIT(13)
+#define CAL_CSI2_VC_IRQ_FS_IRQ_2_MASK BIT(16)
+#define CAL_CSI2_VC_IRQ_FE_IRQ_2_MASK BIT(17)
+#define CAL_CSI2_VC_IRQ_LS_IRQ_2_MASK BIT(18)
+#define CAL_CSI2_VC_IRQ_LE_IRQ_2_MASK BIT(19)
+#define CAL_CSI2_VC_IRQ_CS_IRQ_2_MASK BIT(20)
+#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_2_MASK BIT(21)
+#define CAL_CSI2_VC_IRQ_FS_IRQ_3_MASK BIT(24)
+#define CAL_CSI2_VC_IRQ_FE_IRQ_3_MASK BIT(25)
+#define CAL_CSI2_VC_IRQ_LS_IRQ_3_MASK BIT(26)
+#define CAL_CSI2_VC_IRQ_LE_IRQ_3_MASK BIT(27)
+#define CAL_CSI2_VC_IRQ_CS_IRQ_3_MASK BIT(28)
+#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_3_MASK BIT(29)
#define CAL_CSI2_CTX_DT_MASK GENMASK(5, 0)
#define CAL_CSI2_CTX_VC_MASK GENMASK(7, 6)
#define CAL_CSI2_CTX_CPORT_MASK GENMASK(12, 8)
-#define CAL_CSI2_CTX_ATT_MASK BIT_MASK(13)
+#define CAL_CSI2_CTX_ATT_MASK BIT(13)
#define CAL_CSI2_CTX_ATT_PIX 0
#define CAL_CSI2_CTX_ATT 1
-#define CAL_CSI2_CTX_PACK_MODE_MASK BIT_MASK(14)
+#define CAL_CSI2_CTX_PACK_MODE_MASK BIT(14)
#define CAL_CSI2_CTX_PACK_MODE_LINE 0
#define CAL_CSI2_CTX_PACK_MODE_FRAME 1
#define CAL_CSI2_CTX_LINES_MASK GENMASK(29, 16)
@@ -445,7 +470,7 @@
#define CAL_CSI2_PHY_REG0_THS_SETTLE_MASK GENMASK(7, 0)
#define CAL_CSI2_PHY_REG0_THS_TERM_MASK GENMASK(15, 8)
-#define CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_MASK BIT_MASK(24)
+#define CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_MASK BIT(24)
#define CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_DISABLE 1
#define CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_ENABLE 0
@@ -453,24 +478,26 @@
#define CAL_CSI2_PHY_REG1_CTRLCLK_DIV_FACTOR_MASK GENMASK(9, 8)
#define CAL_CSI2_PHY_REG1_DPHY_HS_SYNC_PATTERN_MASK GENMASK(17, 10)
#define CAL_CSI2_PHY_REG1_TCLK_TERM_MASK GENMASK(24, 18)
-#define CAL_CSI2_PHY_REG1_CLOCK_MISS_DETECTOR_STATUS_MASK BIT_MASK(25)
+#define CAL_CSI2_PHY_REG1_CLOCK_MISS_DETECTOR_STATUS_MASK BIT(25)
#define CAL_CSI2_PHY_REG1_CLOCK_MISS_DETECTOR_STATUS_ERROR 1
#define CAL_CSI2_PHY_REG1_CLOCK_MISS_DETECTOR_STATUS_SUCCESS 0
#define CAL_CSI2_PHY_REG1_RESET_DONE_STATUS_MASK GENMASK(29, 28)
+#define CAL_CSI2_PHY_REG10_I933_LDO_DISABLE_MASK BIT(6)
+
#define CAL_CSI2_PHY_REG2_CCP2_SYNC_PATTERN_MASK GENMASK(23, 0)
#define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC3_MASK GENMASK(25, 24)
#define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC2_MASK GENMASK(27, 26)
#define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC1_MASK GENMASK(29, 28)
#define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC0_MASK GENMASK(31, 30)
-#define CM_CAMERRX_CTRL_CSI1_CTRLCLKEN_MASK BIT_MASK(0)
+#define CM_CAMERRX_CTRL_CSI1_CTRLCLKEN_MASK BIT(0)
#define CM_CAMERRX_CTRL_CSI1_CAMMODE_MASK GENMASK(2, 1)
#define CM_CAMERRX_CTRL_CSI1_LANEENABLE_MASK GENMASK(4, 3)
-#define CM_CAMERRX_CTRL_CSI1_MODE_MASK BIT_MASK(5)
-#define CM_CAMERRX_CTRL_CSI0_CTRLCLKEN_MASK BIT_MASK(10)
+#define CM_CAMERRX_CTRL_CSI1_MODE_MASK BIT(5)
+#define CM_CAMERRX_CTRL_CSI0_CTRLCLKEN_MASK BIT(10)
#define CM_CAMERRX_CTRL_CSI0_CAMMODE_MASK GENMASK(12, 11)
#define CM_CAMERRX_CTRL_CSI0_LANEENABLE_MASK GENMASK(16, 13)
-#define CM_CAMERRX_CTRL_CSI0_MODE_MASK BIT_MASK(17)
+#define CM_CAMERRX_CTRL_CSI0_MODE_MASK BIT(17)
#endif
diff --git a/drivers/media/platform/ti-vpe/csc.c b/drivers/media/platform/ti-vpe/csc.c
index 834114a4eebe..f4e0cf72d1cf 100644
--- a/drivers/media/platform/ti-vpe/csc.c
+++ b/drivers/media/platform/ti-vpe/csc.c
@@ -149,36 +149,28 @@ void csc_set_coeff(struct csc_data *csc, u32 *csc_reg0,
enum v4l2_quantization src_quantization, dst_quantization;
u32 src_pixelformat, dst_pixelformat;
- switch (src_fmt->type) {
- case V4L2_BUF_TYPE_VIDEO_OUTPUT:
- pix = &src_fmt->fmt.pix;
- src_pixelformat = pix->pixelformat;
- src_ycbcr_enc = pix->ycbcr_enc;
- src_quantization = pix->quantization;
- break;
- case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- default:
+ if (V4L2_TYPE_IS_MULTIPLANAR(src_fmt->type)) {
mp = &src_fmt->fmt.pix_mp;
src_pixelformat = mp->pixelformat;
src_ycbcr_enc = mp->ycbcr_enc;
src_quantization = mp->quantization;
- break;
+ } else {
+ pix = &src_fmt->fmt.pix;
+ src_pixelformat = pix->pixelformat;
+ src_ycbcr_enc = pix->ycbcr_enc;
+ src_quantization = pix->quantization;
}
- switch (dst_fmt->type) {
- case V4L2_BUF_TYPE_VIDEO_OUTPUT:
- pix = &dst_fmt->fmt.pix;
- dst_pixelformat = pix->pixelformat;
- dst_ycbcr_enc = pix->ycbcr_enc;
- dst_quantization = pix->quantization;
- break;
- case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- default:
+ if (V4L2_TYPE_IS_MULTIPLANAR(dst_fmt->type)) {
mp = &dst_fmt->fmt.pix_mp;
dst_pixelformat = mp->pixelformat;
dst_ycbcr_enc = mp->ycbcr_enc;
dst_quantization = mp->quantization;
- break;
+ } else {
+ pix = &dst_fmt->fmt.pix;
+ dst_pixelformat = pix->pixelformat;
+ dst_ycbcr_enc = pix->ycbcr_enc;
+ dst_quantization = pix->quantization;
}
src_finfo = v4l2_format_info(src_pixelformat);
diff --git a/drivers/media/platform/vimc/vimc-scaler.c b/drivers/media/platform/vimc/vimc-scaler.c
index 2f88a7d9d67b..e2e551bc3ded 100644
--- a/drivers/media/platform/vimc/vimc-scaler.c
+++ b/drivers/media/platform/vimc/vimc-scaler.c
@@ -8,6 +8,7 @@
#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
#include <linux/v4l2-mediabus.h>
+#include <media/v4l2-rect.h>
#include <media/v4l2-subdev.h>
#include "vimc-common.h"
@@ -18,6 +19,9 @@ MODULE_PARM_DESC(sca_mult, " the image size multiplier");
#define MAX_ZOOM 8
+#define VIMC_SCA_FMT_WIDTH_DEFAULT 640
+#define VIMC_SCA_FMT_HEIGHT_DEFAULT 480
+
struct vimc_sca_device {
struct vimc_ent_device ved;
struct v4l2_subdev sd;
@@ -25,6 +29,7 @@ struct vimc_sca_device {
* with the width and hight multiplied by mult
*/
struct v4l2_mbus_framefmt sink_fmt;
+ struct v4l2_rect crop_rect;
/* Values calculated when the stream starts */
u8 *src_frame;
unsigned int src_line_size;
@@ -33,22 +38,64 @@ struct vimc_sca_device {
};
static const struct v4l2_mbus_framefmt sink_fmt_default = {
- .width = 640,
- .height = 480,
+ .width = VIMC_SCA_FMT_WIDTH_DEFAULT,
+ .height = VIMC_SCA_FMT_HEIGHT_DEFAULT,
.code = MEDIA_BUS_FMT_RGB888_1X24,
.field = V4L2_FIELD_NONE,
.colorspace = V4L2_COLORSPACE_DEFAULT,
};
+static const struct v4l2_rect crop_rect_default = {
+ .width = VIMC_SCA_FMT_WIDTH_DEFAULT,
+ .height = VIMC_SCA_FMT_HEIGHT_DEFAULT,
+ .top = 0,
+ .left = 0,
+};
+
+static const struct v4l2_rect crop_rect_min = {
+ .width = VIMC_FRAME_MIN_WIDTH,
+ .height = VIMC_FRAME_MIN_HEIGHT,
+ .top = 0,
+ .left = 0,
+};
+
+static struct v4l2_rect
+vimc_sca_get_crop_bound_sink(const struct v4l2_mbus_framefmt *sink_fmt)
+{
+ /* Get the crop bounds to clamp the crop rectangle correctly */
+ struct v4l2_rect r = {
+ .left = 0,
+ .top = 0,
+ .width = sink_fmt->width,
+ .height = sink_fmt->height,
+ };
+ return r;
+}
+
+static void vimc_sca_adjust_sink_crop(struct v4l2_rect *r,
+ const struct v4l2_mbus_framefmt *sink_fmt)
+{
+ const struct v4l2_rect sink_rect =
+ vimc_sca_get_crop_bound_sink(sink_fmt);
+
+ /* Disallow rectangles smaller than the minimal one. */
+ v4l2_rect_set_min_size(r, &crop_rect_min);
+ v4l2_rect_map_inside(r, &sink_rect);
+}
+
static int vimc_sca_init_cfg(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg)
{
struct v4l2_mbus_framefmt *mf;
+ struct v4l2_rect *r;
unsigned int i;
mf = v4l2_subdev_get_try_format(sd, cfg, 0);
*mf = sink_fmt_default;
+ r = v4l2_subdev_get_try_crop(sd, cfg, 0);
+ *r = crop_rect_default;
+
for (i = 1; i < sd->entity.num_pads; i++) {
mf = v4l2_subdev_get_try_format(sd, cfg, i);
*mf = sink_fmt_default;
@@ -107,16 +154,21 @@ static int vimc_sca_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_format *format)
{
struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
+ struct v4l2_rect *crop_rect;
/* Get the current sink format */
- format->format = (format->which == V4L2_SUBDEV_FORMAT_TRY) ?
- *v4l2_subdev_get_try_format(sd, cfg, 0) :
- vsca->sink_fmt;
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ format->format = *v4l2_subdev_get_try_format(sd, cfg, 0);
+ crop_rect = v4l2_subdev_get_try_crop(sd, cfg, 0);
+ } else {
+ format->format = vsca->sink_fmt;
+ crop_rect = &vsca->crop_rect;
+ }
/* Scale the frame size for the source pad */
if (VIMC_IS_SRC(format->pad)) {
- format->format.width = vsca->sink_fmt.width * sca_mult;
- format->format.height = vsca->sink_fmt.height * sca_mult;
+ format->format.width = crop_rect->width * sca_mult;
+ format->format.height = crop_rect->height * sca_mult;
}
return 0;
@@ -148,6 +200,7 @@ static int vimc_sca_set_fmt(struct v4l2_subdev *sd,
{
struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *sink_fmt;
+ struct v4l2_rect *crop_rect;
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
/* Do not change the format while stream is on */
@@ -155,8 +208,10 @@ static int vimc_sca_set_fmt(struct v4l2_subdev *sd,
return -EBUSY;
sink_fmt = &vsca->sink_fmt;
+ crop_rect = &vsca->crop_rect;
} else {
sink_fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
+ crop_rect = v4l2_subdev_get_try_crop(sd, cfg, 0);
}
/*
@@ -165,8 +220,8 @@ static int vimc_sca_set_fmt(struct v4l2_subdev *sd,
*/
if (VIMC_IS_SRC(fmt->pad)) {
fmt->format = *sink_fmt;
- fmt->format.width = sink_fmt->width * sca_mult;
- fmt->format.height = sink_fmt->height * sca_mult;
+ fmt->format.width = crop_rect->width * sca_mult;
+ fmt->format.height = crop_rect->height * sca_mult;
} else {
/* Set the new format in the sink pad */
vimc_sca_adjust_sink_fmt(&fmt->format);
@@ -184,6 +239,78 @@ static int vimc_sca_set_fmt(struct v4l2_subdev *sd,
fmt->format.xfer_func, fmt->format.ycbcr_enc);
*sink_fmt = fmt->format;
+
+ /* Do the crop, but respect the current bounds */
+ vimc_sca_adjust_sink_crop(crop_rect, sink_fmt);
+ }
+
+ return 0;
+}
+
+static int vimc_sca_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *sink_fmt;
+ struct v4l2_rect *crop_rect;
+
+ if (VIMC_IS_SRC(sel->pad))
+ return -EINVAL;
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ sink_fmt = &vsca->sink_fmt;
+ crop_rect = &vsca->crop_rect;
+ } else {
+ sink_fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
+ crop_rect = v4l2_subdev_get_try_crop(sd, cfg, 0);
+ }
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *crop_rect;
+ break;
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r = vimc_sca_get_crop_bound_sink(sink_fmt);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vimc_sca_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *sink_fmt;
+ struct v4l2_rect *crop_rect;
+
+ if (VIMC_IS_SRC(sel->pad))
+ return -EINVAL;
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ /* Do not change the format while stream is on */
+ if (vsca->src_frame)
+ return -EBUSY;
+
+ crop_rect = &vsca->crop_rect;
+ sink_fmt = &vsca->sink_fmt;
+ } else {
+ crop_rect = v4l2_subdev_get_try_crop(sd, cfg, 0);
+ sink_fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
+ }
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ /* Do the crop, but respect the current bounds */
+ vimc_sca_adjust_sink_crop(&sel->r, sink_fmt);
+ *crop_rect = sel->r;
+ break;
+ default:
+ return -EINVAL;
}
return 0;
@@ -195,6 +322,8 @@ static const struct v4l2_subdev_pad_ops vimc_sca_pad_ops = {
.enum_frame_size = vimc_sca_enum_frame_size,
.get_fmt = vimc_sca_get_fmt,
.set_fmt = vimc_sca_set_fmt,
+ .get_selection = vimc_sca_get_selection,
+ .set_selection = vimc_sca_set_selection,
};
static int vimc_sca_s_stream(struct v4l2_subdev *sd, int enable)
@@ -213,11 +342,11 @@ static int vimc_sca_s_stream(struct v4l2_subdev *sd, int enable)
vsca->bpp = vpix->bpp;
/* Calculate the width in bytes of the src frame */
- vsca->src_line_size = vsca->sink_fmt.width *
+ vsca->src_line_size = vsca->crop_rect.width *
sca_mult * vsca->bpp;
/* Calculate the frame size of the source pad */
- frame_size = vsca->src_line_size * vsca->sink_fmt.height *
+ frame_size = vsca->src_line_size * vsca->crop_rect.height *
sca_mult;
/* Allocate the frame buffer. Use vmalloc to be able to
@@ -259,9 +388,10 @@ static void vimc_sca_fill_pix(u8 *const ptr,
}
static void vimc_sca_scale_pix(const struct vimc_sca_device *const vsca,
- const unsigned int lin, const unsigned int col,
+ unsigned int lin, unsigned int col,
const u8 *const sink_frame)
{
+ const struct v4l2_rect crop_rect = vsca->crop_rect;
unsigned int i, j, index;
const u8 *pixel;
@@ -278,8 +408,10 @@ static void vimc_sca_scale_pix(const struct vimc_sca_device *const vsca,
/* point to the place we are going to put the first pixel
* in the scaled src frame
*/
+ lin -= crop_rect.top;
+ col -= crop_rect.left;
index = VIMC_FRAME_INDEX(lin * sca_mult, col * sca_mult,
- vsca->sink_fmt.width * sca_mult, vsca->bpp);
+ crop_rect.width * sca_mult, vsca->bpp);
dev_dbg(vsca->ved.dev, "sca: %s: scale_pix src pos %dx%d, index %d\n",
vsca->sd.name, lin * sca_mult, col * sca_mult, index);
@@ -307,12 +439,13 @@ static void vimc_sca_scale_pix(const struct vimc_sca_device *const vsca,
static void vimc_sca_fill_src_frame(const struct vimc_sca_device *const vsca,
const u8 *const sink_frame)
{
+ const struct v4l2_rect r = vsca->crop_rect;
unsigned int i, j;
/* Scale each pixel from the original sink frame */
/* TODO: implement scale down, only scale up is supported for now */
- for (i = 0; i < vsca->sink_fmt.height; i++)
- for (j = 0; j < vsca->sink_fmt.width; j++)
+ for (i = r.top; i < r.top + r.height; i++)
+ for (j = r.left; j < r.left + r.width; j++)
vimc_sca_scale_pix(vsca, i, j, sink_frame);
}
@@ -384,5 +517,8 @@ struct vimc_ent_device *vimc_sca_add(struct vimc_device *vimc,
/* Initialize the frame format */
vsca->sink_fmt = sink_fmt_default;
+ /* Initialize the crop selection */
+ vsca->crop_rect = crop_rect_default;
+
return &vsca->ved;
}
diff --git a/drivers/media/platform/vivid/Makefile b/drivers/media/platform/vivid/Makefile
index e8a50c506dc9..b12ad0152a3e 100644
--- a/drivers/media/platform/vivid/Makefile
+++ b/drivers/media/platform/vivid/Makefile
@@ -3,7 +3,8 @@ vivid-objs := vivid-core.o vivid-ctrls.o vivid-vid-common.o vivid-vbi-gen.o \
vivid-vid-cap.o vivid-vid-out.o vivid-kthread-cap.o vivid-kthread-out.o \
vivid-radio-rx.o vivid-radio-tx.o vivid-radio-common.o \
vivid-rds-gen.o vivid-sdr-cap.o vivid-vbi-cap.o vivid-vbi-out.o \
- vivid-osd.o vivid-meta-cap.o vivid-meta-out.o
+ vivid-osd.o vivid-meta-cap.o vivid-meta-out.o \
+ vivid-kthread-touch.o vivid-touch-cap.o
ifeq ($(CONFIG_VIDEO_VIVID_CEC),y)
vivid-objs += vivid-cec.o
endif
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index c184f9b0be69..15091cbf6de7 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -39,6 +39,7 @@
#include "vivid-ctrls.h"
#include "vivid-meta-cap.h"
#include "vivid-meta-out.h"
+#include "vivid-touch-cap.h"
#define VIVID_MODULE_NAME "vivid"
@@ -89,6 +90,10 @@ static int meta_out_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
module_param_array(meta_out_nr, int, NULL, 0444);
MODULE_PARM_DESC(meta_out_nr, " videoX start number, -1 is autodetect");
+static int touch_cap_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
+module_param_array(touch_cap_nr, int, NULL, 0444);
+MODULE_PARM_DESC(touch_cap_nr, " v4l-touchX start number, -1 is autodetect");
+
static int ccs_cap_mode[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
module_param_array(ccs_cap_mode, int, NULL, 0444);
MODULE_PARM_DESC(ccs_cap_mode, " capture crop/compose/scale mode:\n"
@@ -110,10 +115,10 @@ MODULE_PARM_DESC(multiplanar, " 1 (default) creates a single planar device, 2 cr
* vbi-out + vid-out + meta-cap
*/
static unsigned int node_types[VIVID_MAX_DEVS] = {
- [0 ... (VIVID_MAX_DEVS - 1)] = 0x61d3d
+ [0 ... (VIVID_MAX_DEVS - 1)] = 0xe1d3d
};
module_param_array(node_types, uint, NULL, 0444);
-MODULE_PARM_DESC(node_types, " node types, default is 0x61d3d. Bitmask with the following meaning:\n"
+MODULE_PARM_DESC(node_types, " node types, default is 0xe1d3d. Bitmask with the following meaning:\n"
"\t\t bit 0: Video Capture node\n"
"\t\t bit 2-3: VBI Capture node: 0 = none, 1 = raw vbi, 2 = sliced vbi, 3 = both\n"
"\t\t bit 4: Radio Receiver node\n"
@@ -123,7 +128,8 @@ MODULE_PARM_DESC(node_types, " node types, default is 0x61d3d. Bitmask with the
"\t\t bit 12: Radio Transmitter node\n"
"\t\t bit 16: Framebuffer for testing overlays\n"
"\t\t bit 17: Metadata Capture node\n"
- "\t\t bit 18: Metadata Output node\n");
+ "\t\t bit 18: Metadata Output node\n"
+ "\t\t bit 19: Touch Capture node\n");
/* Default: 4 inputs */
static unsigned num_inputs[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 4 };
@@ -223,7 +229,8 @@ static int vidioc_querycap(struct file *file, void *priv,
dev->vbi_cap_caps | dev->vbi_out_caps |
dev->radio_rx_caps | dev->radio_tx_caps |
dev->sdr_cap_caps | dev->meta_cap_caps |
- dev->meta_out_caps | V4L2_CAP_DEVICE_CAPS;
+ dev->meta_out_caps | dev->touch_cap_caps |
+ V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -377,6 +384,8 @@ static int vidioc_g_parm(struct file *file, void *fh,
{
struct video_device *vdev = video_devdata(file);
+ if (vdev->vfl_type == VFL_TYPE_TOUCH)
+ return vivid_g_parm_tch(file, fh, parm);
if (vdev->vfl_dir == VFL_DIR_RX)
return vivid_vid_cap_g_parm(file, fh, parm);
return vivid_vid_out_g_parm(file, fh, parm);
@@ -432,6 +441,104 @@ static __poll_t vivid_radio_poll(struct file *file, struct poll_table_struct *wa
return vivid_radio_tx_poll(file, wait);
}
+static int vivid_enum_input(struct file *file, void *priv,
+ struct v4l2_input *inp)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_type == VFL_TYPE_TOUCH)
+ return vivid_enum_input_tch(file, priv, inp);
+ return vidioc_enum_input(file, priv, inp);
+}
+
+static int vivid_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_type == VFL_TYPE_TOUCH)
+ return vivid_g_input_tch(file, priv, i);
+ return vidioc_g_input(file, priv, i);
+}
+
+static int vivid_s_input(struct file *file, void *priv, unsigned int i)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_type == VFL_TYPE_TOUCH)
+ return vivid_s_input_tch(file, priv, i);
+ return vidioc_s_input(file, priv, i);
+}
+
+static int vivid_enum_fmt_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_type == VFL_TYPE_TOUCH)
+ return vivid_enum_fmt_tch(file, priv, f);
+ return vivid_enum_fmt_vid(file, priv, f);
+}
+
+static int vivid_g_fmt_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_type == VFL_TYPE_TOUCH)
+ return vivid_g_fmt_tch(file, priv, f);
+ return vidioc_g_fmt_vid_cap(file, priv, f);
+}
+
+static int vivid_try_fmt_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_type == VFL_TYPE_TOUCH)
+ return vivid_g_fmt_tch(file, priv, f);
+ return vidioc_try_fmt_vid_cap(file, priv, f);
+}
+
+static int vivid_s_fmt_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_type == VFL_TYPE_TOUCH)
+ return vivid_g_fmt_tch(file, priv, f);
+ return vidioc_s_fmt_vid_cap(file, priv, f);
+}
+
+static int vivid_g_fmt_cap_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_type == VFL_TYPE_TOUCH)
+ return vivid_g_fmt_tch_mplane(file, priv, f);
+ return vidioc_g_fmt_vid_cap_mplane(file, priv, f);
+}
+
+static int vivid_try_fmt_cap_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_type == VFL_TYPE_TOUCH)
+ return vivid_g_fmt_tch_mplane(file, priv, f);
+ return vidioc_try_fmt_vid_cap_mplane(file, priv, f);
+}
+
+static int vivid_s_fmt_cap_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_type == VFL_TYPE_TOUCH)
+ return vivid_g_fmt_tch_mplane(file, priv, f);
+ return vidioc_s_fmt_vid_cap_mplane(file, priv, f);
+}
+
static bool vivid_is_in_use(struct video_device *vdev)
{
unsigned long flags;
@@ -453,7 +560,8 @@ static bool vivid_is_last_user(struct vivid_dev *dev)
vivid_is_in_use(&dev->radio_rx_dev) +
vivid_is_in_use(&dev->radio_tx_dev) +
vivid_is_in_use(&dev->meta_cap_dev) +
- vivid_is_in_use(&dev->meta_out_dev);
+ vivid_is_in_use(&dev->meta_out_dev) +
+ vivid_is_in_use(&dev->touch_cap_dev);
return uses == 1;
}
@@ -481,6 +589,7 @@ static int vivid_fop_release(struct file *file)
set_bit(V4L2_FL_REGISTERED, &dev->radio_tx_dev.flags);
set_bit(V4L2_FL_REGISTERED, &dev->meta_cap_dev.flags);
set_bit(V4L2_FL_REGISTERED, &dev->meta_out_dev.flags);
+ set_bit(V4L2_FL_REGISTERED, &dev->touch_cap_dev.flags);
}
mutex_unlock(&dev->mutex);
if (file->private_data == dev->overlay_cap_owner)
@@ -522,13 +631,13 @@ static const struct v4l2_file_operations vivid_radio_fops = {
static const struct v4l2_ioctl_ops vivid_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vivid_enum_fmt_vid,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
- .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt_vid_cap_mplane,
- .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt_vid_cap_mplane,
- .vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt_vid_cap_mplane,
+ .vidioc_enum_fmt_vid_cap = vivid_enum_fmt_cap,
+ .vidioc_g_fmt_vid_cap = vivid_g_fmt_cap,
+ .vidioc_try_fmt_vid_cap = vivid_try_fmt_cap,
+ .vidioc_s_fmt_vid_cap = vivid_s_fmt_cap,
+ .vidioc_g_fmt_vid_cap_mplane = vivid_g_fmt_cap_mplane,
+ .vidioc_try_fmt_vid_cap_mplane = vivid_try_fmt_cap_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = vivid_s_fmt_cap_mplane,
.vidioc_enum_fmt_vid_out = vivid_enum_fmt_vid,
.vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out,
@@ -590,9 +699,9 @@ static const struct v4l2_ioctl_ops vivid_ioctl_ops = {
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
- .vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
+ .vidioc_enum_input = vivid_enum_input,
+ .vidioc_g_input = vivid_g_input,
+ .vidioc_s_input = vivid_s_input,
.vidioc_s_audio = vidioc_s_audio,
.vidioc_g_audio = vidioc_g_audio,
.vidioc_enumaudio = vidioc_enumaudio,
@@ -921,6 +1030,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->has_scaler_out ? 'Y' : 'N');
}
+ /* do we create a touch capture device */
+ dev->has_touch_cap = node_type & 0x80000;
+
/* end detecting feature set */
if (dev->has_vid_cap) {
@@ -994,6 +1106,13 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
if (dev->has_audio_outputs)
dev->meta_out_caps |= V4L2_CAP_AUDIO;
}
+ /* set up the capabilities of the touch capture device */
+ if (dev->has_touch_cap) {
+ dev->touch_cap_caps = V4L2_CAP_TOUCH | V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE;
+ dev->touch_cap_caps |= dev->multiplanar ?
+ V4L2_CAP_VIDEO_CAPTURE_MPLANE : V4L2_CAP_VIDEO_CAPTURE;
+ }
ret = -ENOMEM;
/* initialize the test pattern generator */
@@ -1129,6 +1248,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_G_FREQUENCY);
v4l2_disable_ioctl(&dev->meta_out_dev, VIDIOC_S_FREQUENCY);
v4l2_disable_ioctl(&dev->meta_out_dev, VIDIOC_G_FREQUENCY);
+ v4l2_disable_ioctl(&dev->touch_cap_dev, VIDIOC_S_PARM);
+ v4l2_disable_ioctl(&dev->touch_cap_dev, VIDIOC_ENUM_FRAMESIZES);
+ v4l2_disable_ioctl(&dev->touch_cap_dev, VIDIOC_ENUM_FRAMEINTERVALS);
/* configure internal data */
dev->fmt_cap = &vivid_formats[0];
@@ -1201,6 +1323,11 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->fb_cap.fmt.bytesperline = dev->src_rect.width * tpg_g_twopixelsize(&dev->tpg, 0) / 2;
dev->fb_cap.fmt.sizeimage = dev->src_rect.height * dev->fb_cap.fmt.bytesperline;
+ /* update touch configuration */
+ dev->timeperframe_tch_cap.numerator = 1;
+ dev->timeperframe_tch_cap.denominator = 10;
+ vivid_set_touch(dev, 0);
+
/* initialize locks */
spin_lock_init(&dev->slock);
mutex_init(&dev->mutex);
@@ -1213,6 +1340,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
INIT_LIST_HEAD(&dev->sdr_cap_active);
INIT_LIST_HEAD(&dev->meta_cap_active);
INIT_LIST_HEAD(&dev->meta_out_active);
+ INIT_LIST_HEAD(&dev->touch_cap_active);
INIT_LIST_HEAD(&dev->cec_work_list);
spin_lock_init(&dev->cec_slock);
@@ -1294,6 +1422,15 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
goto unreg_dev;
}
+ if (dev->has_touch_cap) {
+ /* initialize touch_cap queue */
+ ret = vivid_create_queue(dev, &dev->vb_touch_cap_q,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE, 1,
+ &vivid_touch_cap_qops);
+ if (ret)
+ goto unreg_dev;
+ }
+
if (dev->has_fb) {
/* Create framebuffer for testing capture/output overlay */
ret = vivid_fb_init(dev);
@@ -1345,6 +1482,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
v4l2_ctrl_handler_setup(&dev->ctrl_hdl_sdr_cap);
v4l2_ctrl_handler_setup(&dev->ctrl_hdl_meta_cap);
v4l2_ctrl_handler_setup(&dev->ctrl_hdl_meta_out);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_touch_cap);
/* finally start creating the device nodes */
if (dev->has_vid_cap) {
@@ -1635,6 +1773,35 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
video_device_node_name(vfd));
}
+ if (dev->has_touch_cap) {
+ vfd = &dev->touch_cap_dev;
+ snprintf(vfd->name, sizeof(vfd->name),
+ "vivid-%03d-touch-cap", inst);
+ vfd->fops = &vivid_fops;
+ vfd->ioctl_ops = &vivid_ioctl_ops;
+ vfd->device_caps = dev->touch_cap_caps;
+ vfd->release = video_device_release_empty;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->queue = &dev->vb_touch_cap_q;
+ vfd->tvnorms = tvnorms_cap;
+ vfd->lock = &dev->mutex;
+ video_set_drvdata(vfd, dev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->touch_cap_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vfd->entity, 1,
+ &dev->touch_cap_pad);
+ if (ret)
+ goto unreg_dev;
+#endif
+ ret = video_register_device(vfd, VFL_TYPE_TOUCH,
+ touch_cap_nr[inst]);
+ if (ret < 0)
+ goto unreg_dev;
+ v4l2_info(&dev->v4l2_dev,
+ "V4L2 touch capture device registered as %s\n",
+ video_device_node_name(vfd));
+ }
+
#ifdef CONFIG_MEDIA_CONTROLLER
/* Register the media device */
ret = media_device_register(&dev->mdev);
@@ -1651,6 +1818,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
return 0;
unreg_dev:
+ video_unregister_device(&dev->touch_cap_dev);
video_unregister_device(&dev->meta_out_dev);
video_unregister_device(&dev->meta_cap_dev);
video_unregister_device(&dev->radio_tx_dev);
@@ -1778,6 +1946,11 @@ static int vivid_remove(struct platform_device *pdev)
video_device_node_name(&dev->meta_out_dev));
video_unregister_device(&dev->meta_out_dev);
}
+ if (dev->has_touch_cap) {
+ v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
+ video_device_node_name(&dev->touch_cap_dev));
+ video_unregister_device(&dev->touch_cap_dev);
+ }
cec_unregister_adapter(dev->cec_rx_adap);
for (j = 0; j < MAX_OUTPUTS; j++)
cec_unregister_adapter(dev->cec_tx_adap[j]);
diff --git a/drivers/media/platform/vivid/vivid-core.h b/drivers/media/platform/vivid/vivid-core.h
index 59192b67231c..99e69b8f770f 100644
--- a/drivers/media/platform/vivid/vivid-core.h
+++ b/drivers/media/platform/vivid/vivid-core.h
@@ -133,6 +133,7 @@ struct vivid_dev {
struct media_pad sdr_cap_pad;
struct media_pad meta_cap_pad;
struct media_pad meta_out_pad;
+ struct media_pad touch_cap_pad;
#endif
struct v4l2_ctrl_handler ctrl_hdl_user_gen;
struct v4l2_ctrl_handler ctrl_hdl_user_vid;
@@ -159,6 +160,8 @@ struct vivid_dev {
struct v4l2_ctrl_handler ctrl_hdl_meta_cap;
struct video_device meta_out_dev;
struct v4l2_ctrl_handler ctrl_hdl_meta_out;
+ struct video_device touch_cap_dev;
+ struct v4l2_ctrl_handler ctrl_hdl_touch_cap;
spinlock_t slock;
struct mutex mutex;
@@ -173,6 +176,7 @@ struct vivid_dev {
u32 radio_tx_caps;
u32 meta_cap_caps;
u32 meta_out_caps;
+ u32 touch_cap_caps;
/* supported features */
bool multiplanar;
@@ -201,6 +205,7 @@ struct vivid_dev {
bool has_meta_cap;
bool has_meta_out;
bool has_tv_tuner;
+ bool has_touch_cap;
bool can_loop_video;
@@ -404,6 +409,8 @@ struct vivid_dev {
struct list_head vbi_cap_active;
struct vb2_queue vb_meta_cap_q;
struct list_head meta_cap_active;
+ struct vb2_queue vb_touch_cap_q;
+ struct list_head touch_cap_active;
/* thread for generating video capture stream */
struct task_struct *kthread_vid_cap;
@@ -425,6 +432,19 @@ struct vivid_dev {
u32 meta_cap_seq_count;
bool meta_cap_streaming;
+ /* Touch capture */
+ struct task_struct *kthread_touch_cap;
+ unsigned long jiffies_touch_cap;
+ u64 touch_cap_stream_start;
+ u32 touch_cap_seq_offset;
+ bool touch_cap_seq_resync;
+ u32 touch_cap_seq_start;
+ u32 touch_cap_seq_count;
+ bool touch_cap_streaming;
+ struct v4l2_fract timeperframe_tch_cap;
+ struct v4l2_pix_format tch_format;
+ int tch_pat_random;
+
/* video output */
const struct vivid_fmt *fmt_out;
struct v4l2_fract timeperframe_vid_out;
diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c
index 68e8124c7973..334130568dcb 100644
--- a/drivers/media/platform/vivid/vivid-ctrls.c
+++ b/drivers/media/platform/vivid/vivid-ctrls.c
@@ -1508,6 +1508,7 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
struct v4l2_ctrl_handler *hdl_sdr_cap = &dev->ctrl_hdl_sdr_cap;
struct v4l2_ctrl_handler *hdl_meta_cap = &dev->ctrl_hdl_meta_cap;
struct v4l2_ctrl_handler *hdl_meta_out = &dev->ctrl_hdl_meta_out;
+ struct v4l2_ctrl_handler *hdl_tch_cap = &dev->ctrl_hdl_touch_cap;
struct v4l2_ctrl_config vivid_ctrl_dv_timings = {
.ops = &vivid_vid_cap_ctrl_ops,
@@ -1551,6 +1552,8 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
v4l2_ctrl_new_custom(hdl_meta_cap, &vivid_ctrl_class, NULL);
v4l2_ctrl_handler_init(hdl_meta_out, 2);
v4l2_ctrl_new_custom(hdl_meta_out, &vivid_ctrl_class, NULL);
+ v4l2_ctrl_handler_init(hdl_tch_cap, 2);
+ v4l2_ctrl_new_custom(hdl_tch_cap, &vivid_ctrl_class, NULL);
/* User Controls */
dev->volume = v4l2_ctrl_new_std(hdl_user_aud, NULL,
@@ -1904,6 +1907,13 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
return hdl_meta_out->error;
dev->meta_out_dev.ctrl_handler = hdl_meta_out;
}
+ if (dev->has_touch_cap) {
+ v4l2_ctrl_add_handler(hdl_tch_cap, hdl_user_gen, NULL, false);
+ v4l2_ctrl_add_handler(hdl_tch_cap, hdl_streaming, NULL, false);
+ if (hdl_tch_cap->error)
+ return hdl_tch_cap->error;
+ dev->touch_cap_dev.ctrl_handler = hdl_tch_cap;
+ }
return 0;
}
@@ -1925,4 +1935,5 @@ void vivid_free_controls(struct vivid_dev *dev)
v4l2_ctrl_handler_free(&dev->ctrl_hdl_fb);
v4l2_ctrl_handler_free(&dev->ctrl_hdl_meta_cap);
v4l2_ctrl_handler_free(&dev->ctrl_hdl_meta_out);
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_touch_cap);
}
diff --git a/drivers/media/platform/vivid/vivid-kthread-touch.c b/drivers/media/platform/vivid/vivid-kthread-touch.c
new file mode 100644
index 000000000000..674507b5ccb5
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-kthread-touch.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vivid-kthread-touch.c - touch capture thread support functions.
+ *
+ */
+
+#include <linux/freezer.h>
+#include "vivid-core.h"
+#include "vivid-kthread-touch.h"
+#include "vivid-touch-cap.h"
+
+static noinline_for_stack void vivid_thread_tch_cap_tick(struct vivid_dev *dev,
+ int dropped_bufs)
+{
+ struct vivid_buffer *tch_cap_buf = NULL;
+
+ spin_lock(&dev->slock);
+ if (!list_empty(&dev->touch_cap_active)) {
+ tch_cap_buf = list_entry(dev->touch_cap_active.next,
+ struct vivid_buffer, list);
+ list_del(&tch_cap_buf->list);
+ }
+
+ spin_unlock(&dev->slock);
+
+ if (tch_cap_buf) {
+ v4l2_ctrl_request_setup(tch_cap_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_touch_cap);
+
+ vivid_fillbuff_tch(dev, tch_cap_buf);
+ v4l2_ctrl_request_complete(tch_cap_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_touch_cap);
+ vb2_buffer_done(&tch_cap_buf->vb.vb2_buf, dev->dqbuf_error ?
+ VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+ dprintk(dev, 2, "touch_cap buffer %d done\n",
+ tch_cap_buf->vb.vb2_buf.index);
+
+ tch_cap_buf->vb.vb2_buf.timestamp = ktime_get_ns() + dev->time_wrap_offset;
+ }
+ dev->dqbuf_error = false;
+}
+
+static int vivid_thread_touch_cap(void *data)
+{
+ struct vivid_dev *dev = data;
+ u64 numerators_since_start;
+ u64 buffers_since_start;
+ u64 next_jiffies_since_start;
+ unsigned long jiffies_since_start;
+ unsigned long cur_jiffies;
+ unsigned int wait_jiffies;
+ unsigned int numerator;
+ unsigned int denominator;
+ int dropped_bufs;
+
+ dprintk(dev, 1, "Touch Capture Thread Start\n");
+
+ set_freezable();
+
+ /* Resets frame counters */
+ dev->touch_cap_seq_offset = 0;
+ dev->touch_cap_seq_count = 0;
+ dev->touch_cap_seq_resync = false;
+ dev->jiffies_touch_cap = jiffies;
+
+ for (;;) {
+ try_to_freeze();
+ if (kthread_should_stop())
+ break;
+
+ if (!mutex_trylock(&dev->mutex)) {
+ schedule_timeout_uninterruptible(1);
+ continue;
+ }
+ cur_jiffies = jiffies;
+ if (dev->touch_cap_seq_resync) {
+ dev->jiffies_touch_cap = cur_jiffies;
+ dev->touch_cap_seq_offset = dev->touch_cap_seq_count + 1;
+ dev->touch_cap_seq_count = 0;
+ dev->cap_seq_resync = false;
+ }
+ denominator = dev->timeperframe_tch_cap.denominator;
+ numerator = dev->timeperframe_tch_cap.numerator;
+
+ /* Calculate the number of jiffies since we started streaming */
+ jiffies_since_start = cur_jiffies - dev->jiffies_touch_cap;
+ /* Get the number of buffers streamed since the start */
+ buffers_since_start = (u64)jiffies_since_start * denominator +
+ (HZ * numerator) / 2;
+ do_div(buffers_since_start, HZ * numerator);
+
+ /*
+ * After more than 0xf0000000 (rounded down to a multiple of
+ * 'jiffies-per-day' to ease jiffies_to_msecs calculation)
+ * jiffies have passed since we started streaming reset the
+ * counters and keep track of the sequence offset.
+ */
+ if (jiffies_since_start > JIFFIES_RESYNC) {
+ dev->jiffies_touch_cap = cur_jiffies;
+ dev->cap_seq_offset = buffers_since_start;
+ buffers_since_start = 0;
+ }
+ dropped_bufs = buffers_since_start + dev->touch_cap_seq_offset - dev->touch_cap_seq_count;
+ dev->touch_cap_seq_count = buffers_since_start + dev->touch_cap_seq_offset;
+
+ vivid_thread_tch_cap_tick(dev, dropped_bufs);
+
+ /*
+ * Calculate the number of 'numerators' streamed
+ * since we started, including the current buffer.
+ */
+ numerators_since_start = ++buffers_since_start * numerator;
+
+ /* And the number of jiffies since we started */
+ jiffies_since_start = jiffies - dev->jiffies_touch_cap;
+
+ mutex_unlock(&dev->mutex);
+
+ /*
+ * Calculate when that next buffer is supposed to start
+ * in jiffies since we started streaming.
+ */
+ next_jiffies_since_start = numerators_since_start * HZ +
+ denominator / 2;
+ do_div(next_jiffies_since_start, denominator);
+ /* If it is in the past, then just schedule asap */
+ if (next_jiffies_since_start < jiffies_since_start)
+ next_jiffies_since_start = jiffies_since_start;
+
+ wait_jiffies = next_jiffies_since_start - jiffies_since_start;
+ schedule_timeout_interruptible(wait_jiffies ? wait_jiffies : 1);
+ }
+ dprintk(dev, 1, "Touch Capture Thread End\n");
+ return 0;
+}
+
+int vivid_start_generating_touch_cap(struct vivid_dev *dev)
+{
+ if (dev->kthread_touch_cap) {
+ dev->touch_cap_streaming = true;
+ return 0;
+ }
+
+ dev->kthread_touch_cap = kthread_run(vivid_thread_touch_cap, dev,
+ "%s-tch-cap", dev->v4l2_dev.name);
+
+ if (IS_ERR(dev->kthread_touch_cap)) {
+ int err = PTR_ERR(dev->kthread_touch_cap);
+
+ dev->kthread_touch_cap = NULL;
+ v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
+ return err;
+ }
+ dev->touch_cap_streaming = true;
+ dprintk(dev, 1, "returning from %s\n", __func__);
+ return 0;
+}
+
+void vivid_stop_generating_touch_cap(struct vivid_dev *dev)
+{
+ if (!dev->kthread_touch_cap)
+ return;
+
+ dev->touch_cap_streaming = false;
+
+ while (!list_empty(&dev->touch_cap_active)) {
+ struct vivid_buffer *buf;
+
+ buf = list_entry(dev->touch_cap_active.next,
+ struct vivid_buffer, list);
+ list_del(&buf->list);
+ v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_touch_cap);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ dprintk(dev, 2, "touch_cap buffer %d done\n",
+ buf->vb.vb2_buf.index);
+ }
+
+ kthread_stop(dev->kthread_touch_cap);
+ dev->kthread_touch_cap = NULL;
+}
diff --git a/drivers/media/platform/vivid/vivid-kthread-touch.h b/drivers/media/platform/vivid/vivid-kthread-touch.h
new file mode 100644
index 000000000000..ecf79b46209e
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-kthread-touch.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * vivid-kthread-cap.h - video/vbi capture thread support functions.
+ *
+ */
+
+#ifndef _VIVID_KTHREAD_CAP_H_
+#define _VIVID_KTHREAD_CAP_H_
+
+int vivid_start_generating_touch_cap(struct vivid_dev *dev);
+void vivid_stop_generating_touch_cap(struct vivid_dev *dev);
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-touch-cap.c b/drivers/media/platform/vivid/vivid-touch-cap.c
new file mode 100644
index 000000000000..ebb00b128030
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-touch-cap.c
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vivid-touch-cap.c - touch support functions.
+ */
+
+#include "vivid-core.h"
+#include "vivid-kthread-touch.h"
+#include "vivid-vid-common.h"
+#include "vivid-touch-cap.h"
+
+static int touch_cap_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format *f = &dev->tch_format;
+ unsigned int size = f->sizeimage;
+
+ if (*nplanes) {
+ if (sizes[0] < size)
+ return -EINVAL;
+ } else {
+ sizes[0] = size;
+ }
+
+ if (vq->num_buffers + *nbuffers < 2)
+ *nbuffers = 2 - vq->num_buffers;
+
+ *nplanes = 1;
+ return 0;
+}
+
+static int touch_cap_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct v4l2_pix_format *f = &dev->tch_format;
+ unsigned int size = f->sizeimage;
+
+ if (dev->buf_prepare_error) {
+ /*
+ * Error injection: test what happens if buf_prepare() returns
+ * an error.
+ */
+ dev->buf_prepare_error = false;
+ return -EINVAL;
+ }
+ if (vb2_plane_size(vb, 0) < size) {
+ dprintk(dev, 1, "%s data will not fit into plane (%lu < %u)\n",
+ __func__, vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb, 0, size);
+
+ return 0;
+}
+
+static void touch_cap_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb);
+
+ vbuf->field = V4L2_FIELD_NONE;
+ spin_lock(&dev->slock);
+ list_add_tail(&buf->list, &dev->touch_cap_active);
+ spin_unlock(&dev->slock);
+}
+
+static int touch_cap_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ int err;
+
+ dev->touch_cap_seq_count = 0;
+ if (dev->start_streaming_error) {
+ dev->start_streaming_error = false;
+ err = -EINVAL;
+ } else {
+ err = vivid_start_generating_touch_cap(dev);
+ }
+ if (err) {
+ struct vivid_buffer *buf, *tmp;
+
+ list_for_each_entry_safe(buf, tmp,
+ &dev->touch_cap_active, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
+ }
+ }
+ return err;
+}
+
+/* abort streaming and wait for last buffer */
+static void touch_cap_stop_streaming(struct vb2_queue *vq)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+
+ vivid_stop_generating_touch_cap(dev);
+}
+
+static void touch_cap_buf_request_complete(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_touch_cap);
+}
+
+const struct vb2_ops vivid_touch_cap_qops = {
+ .queue_setup = touch_cap_queue_setup,
+ .buf_prepare = touch_cap_buf_prepare,
+ .buf_queue = touch_cap_buf_queue,
+ .start_streaming = touch_cap_start_streaming,
+ .stop_streaming = touch_cap_stop_streaming,
+ .buf_request_complete = touch_cap_buf_request_complete,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+int vivid_enum_fmt_tch(struct file *file, void *priv, struct v4l2_fmtdesc *f)
+{
+ if (f->index)
+ return -EINVAL;
+
+ f->pixelformat = V4L2_TCH_FMT_DELTA_TD16;
+ return 0;
+}
+
+int vivid_g_fmt_tch(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (dev->multiplanar)
+ return -ENOTTY;
+ f->fmt.pix = dev->tch_format;
+ return 0;
+}
+
+int vivid_g_fmt_tch_mplane(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_format sp_fmt;
+
+ if (!dev->multiplanar)
+ return -ENOTTY;
+ sp_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ sp_fmt.fmt.pix = dev->tch_format;
+ fmt_sp2mp(&sp_fmt, f);
+ return 0;
+}
+
+int vivid_g_parm_tch(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (parm->type != (dev->multiplanar ?
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
+ V4L2_BUF_TYPE_VIDEO_CAPTURE))
+ return -EINVAL;
+
+ parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ parm->parm.capture.timeperframe = dev->timeperframe_tch_cap;
+ parm->parm.capture.readbuffers = 1;
+ return 0;
+}
+
+int vivid_enum_input_tch(struct file *file, void *priv, struct v4l2_input *inp)
+{
+ if (inp->index)
+ return -EINVAL;
+
+ inp->type = V4L2_INPUT_TYPE_TOUCH;
+ strscpy(inp->name, "Vivid Touch", sizeof(inp->name));
+ inp->capabilities = 0;
+ return 0;
+}
+
+int vivid_g_input_tch(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+int vivid_set_touch(struct vivid_dev *dev, unsigned int i)
+{
+ struct v4l2_pix_format *f = &dev->tch_format;
+
+ if (i)
+ return -EINVAL;
+
+ f->pixelformat = V4L2_TCH_FMT_DELTA_TD16;
+ f->width = VIVID_TCH_WIDTH;
+ f->height = VIVID_TCH_HEIGHT;
+ f->field = V4L2_FIELD_NONE;
+ f->colorspace = V4L2_COLORSPACE_RAW;
+ f->bytesperline = f->width * sizeof(s16);
+ f->sizeimage = f->width * f->height * sizeof(s16);
+ return 0;
+}
+
+int vivid_s_input_tch(struct file *file, void *priv, unsigned int i)
+{
+ return vivid_set_touch(video_drvdata(file), i);
+}
+
+static void vivid_fill_buff_noise(__s16 *tch_buf, int size)
+{
+ int i;
+
+ /* Fill 10% of the values within range -3 and 3, zero the others */
+ for (i = 0; i < size; i++) {
+ unsigned int rand = get_random_int();
+
+ if (rand % 10)
+ tch_buf[i] = 0;
+ else
+ tch_buf[i] = (rand / 10) % 7 - 3;
+ }
+}
+
+static inline int get_random_pressure(void)
+{
+ return get_random_int() % VIVID_PRESSURE_LIMIT;
+}
+
+static void vivid_tch_buf_set(struct v4l2_pix_format *f,
+ __s16 *tch_buf,
+ int index)
+{
+ unsigned int x = index % f->width;
+ unsigned int y = index / f->width;
+ unsigned int offset = VIVID_MIN_PRESSURE;
+
+ tch_buf[index] = offset + get_random_pressure();
+ offset /= 2;
+ if (x)
+ tch_buf[index - 1] = offset + get_random_pressure();
+ if (x < f->width - 1)
+ tch_buf[index + 1] = offset + get_random_pressure();
+ if (y)
+ tch_buf[index - f->width] = offset + get_random_pressure();
+ if (y < f->height - 1)
+ tch_buf[index + f->width] = offset + get_random_pressure();
+ offset /= 2;
+ if (x && y)
+ tch_buf[index - 1 - f->width] = offset + get_random_pressure();
+ if (x < f->width - 1 && y)
+ tch_buf[index + 1 - f->width] = offset + get_random_pressure();
+ if (x && y < f->height - 1)
+ tch_buf[index - 1 + f->width] = offset + get_random_pressure();
+ if (x < f->width - 1 && y < f->height - 1)
+ tch_buf[index + 1 + f->width] = offset + get_random_pressure();
+}
+
+void vivid_fillbuff_tch(struct vivid_dev *dev, struct vivid_buffer *buf)
+{
+ struct v4l2_pix_format *f = &dev->tch_format;
+ int size = f->width * f->height;
+ int x, y, xstart, ystart, offset_x, offset_y;
+ unsigned int test_pattern, test_pat_idx, rand;
+
+ __s16 *tch_buf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
+
+ buf->vb.sequence = dev->touch_cap_seq_count;
+ test_pattern = (buf->vb.sequence / TCH_SEQ_COUNT) % TEST_CASE_MAX;
+ test_pat_idx = buf->vb.sequence % TCH_SEQ_COUNT;
+
+ vivid_fill_buff_noise(tch_buf, size);
+
+ if (test_pat_idx >= TCH_PATTERN_COUNT)
+ return;
+
+ if (test_pat_idx == 0)
+ dev->tch_pat_random = get_random_int();
+ rand = dev->tch_pat_random;
+
+ switch (test_pattern) {
+ case SINGLE_TAP:
+ if (test_pat_idx == 2)
+ vivid_tch_buf_set(f, tch_buf, rand % size);
+ break;
+ case DOUBLE_TAP:
+ if (test_pat_idx == 2 || test_pat_idx == 4)
+ vivid_tch_buf_set(f, tch_buf, rand % size);
+ break;
+ case TRIPLE_TAP:
+ if (test_pat_idx == 2 || test_pat_idx == 4 || test_pat_idx == 6)
+ vivid_tch_buf_set(f, tch_buf, rand % size);
+ break;
+ case MOVE_LEFT_TO_RIGHT:
+ vivid_tch_buf_set(f, tch_buf,
+ (rand % f->height) * f->width +
+ test_pat_idx *
+ (f->width / TCH_PATTERN_COUNT));
+ break;
+ case ZOOM_IN:
+ x = f->width / 2;
+ y = f->height / 2;
+ offset_x = ((TCH_PATTERN_COUNT - 1 - test_pat_idx) * x) /
+ TCH_PATTERN_COUNT;
+ offset_y = ((TCH_PATTERN_COUNT - 1 - test_pat_idx) * y) /
+ TCH_PATTERN_COUNT;
+ vivid_tch_buf_set(f, tch_buf,
+ (x - offset_x) + f->width * (y - offset_y));
+ vivid_tch_buf_set(f, tch_buf,
+ (x + offset_x) + f->width * (y + offset_y));
+ break;
+ case ZOOM_OUT:
+ x = f->width / 2;
+ y = f->height / 2;
+ offset_x = (test_pat_idx * x) / TCH_PATTERN_COUNT;
+ offset_y = (test_pat_idx * y) / TCH_PATTERN_COUNT;
+ vivid_tch_buf_set(f, tch_buf,
+ (x - offset_x) + f->width * (y - offset_y));
+ vivid_tch_buf_set(f, tch_buf,
+ (x + offset_x) + f->width * (y + offset_y));
+ break;
+ case PALM_PRESS:
+ for (x = 0; x < f->width; x++)
+ for (y = f->height / 2; y < f->height; y++)
+ tch_buf[x + f->width * y] = VIVID_MIN_PRESSURE +
+ get_random_pressure();
+ break;
+ case MULTIPLE_PRESS:
+ /* 16 pressure points */
+ for (y = 0; y < 4; y++) {
+ for (x = 0; x < 4; x++) {
+ ystart = (y * f->height) / 4 + f->height / 8;
+ xstart = (x * f->width) / 4 + f->width / 8;
+ vivid_tch_buf_set(f, tch_buf,
+ ystart * f->width + xstart);
+ }
+ }
+ break;
+ }
+#ifdef __BIG_ENDIAN__
+ for (x = 0; x < size; x++)
+ tch_buf[x] = (__force s16)__cpu_to_le16((u16)tch_buf[x]);
+#endif
+}
diff --git a/drivers/media/platform/vivid/vivid-touch-cap.h b/drivers/media/platform/vivid/vivid-touch-cap.h
new file mode 100644
index 000000000000..07e514046ae8
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-touch-cap.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * vivid-touch-cap.h - touch support functions.
+ */
+#ifndef _VIVID_TOUCH_CAP_H_
+#define _VIVID_TOUCH_CAP_H_
+
+#define VIVID_TCH_HEIGHT 12
+#define VIVID_TCH_WIDTH 21
+#define VIVID_MIN_PRESSURE 180
+#define VIVID_PRESSURE_LIMIT 40
+#define TCH_SEQ_COUNT 16
+#define TCH_PATTERN_COUNT 12
+
+enum vivid_tch_test {
+ SINGLE_TAP,
+ DOUBLE_TAP,
+ TRIPLE_TAP,
+ MOVE_LEFT_TO_RIGHT,
+ ZOOM_IN,
+ ZOOM_OUT,
+ PALM_PRESS,
+ MULTIPLE_PRESS,
+ TEST_CASE_MAX
+};
+
+extern const struct vb2_ops vivid_touch_cap_qops;
+
+int vivid_enum_fmt_tch(struct file *file, void *priv, struct v4l2_fmtdesc *f);
+int vivid_g_fmt_tch(struct file *file, void *priv, struct v4l2_format *f);
+int vivid_g_fmt_tch_mplane(struct file *file, void *priv, struct v4l2_format *f);
+int vivid_enum_input_tch(struct file *file, void *priv, struct v4l2_input *inp);
+int vivid_g_input_tch(struct file *file, void *priv, unsigned int *i);
+int vivid_s_input_tch(struct file *file, void *priv, unsigned int i);
+void vivid_fillbuff_tch(struct vivid_dev *dev, struct vivid_buffer *buf);
+int vivid_set_touch(struct vivid_dev *dev, unsigned int i);
+int vivid_g_parm_tch(struct file *file, void *priv,
+ struct v4l2_streamparm *parm);
+#endif
diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
index 8665dfd25eb4..76b0be670ebb 100644
--- a/drivers/media/platform/vivid/vivid-vid-common.c
+++ b/drivers/media/platform/vivid/vivid-vid-common.c
@@ -813,7 +813,7 @@ void fmt_sp2mp(const struct v4l2_format *sp_fmt, struct v4l2_format *mp_fmt)
memset(mp->reserved, 0, sizeof(mp->reserved));
mp_fmt->type = is_out ? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE :
- V4L2_CAP_VIDEO_CAPTURE_MPLANE;
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
mp->width = pix->width;
mp->height = pix->height;
mp->pixelformat = pix->pixelformat;
diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c
index 872d6441e512..a7deca1fefb7 100644
--- a/drivers/media/rc/iguanair.c
+++ b/drivers/media/rc/iguanair.c
@@ -413,7 +413,7 @@ static int iguanair_probe(struct usb_interface *intf,
int ret, pipein, pipeout;
struct usb_host_interface *idesc;
- idesc = intf->altsetting;
+ idesc = intf->cur_altsetting;
if (idesc->desc.bNumEndpoints < 2)
return -ENODEV;
diff --git a/drivers/media/rc/ir-hix5hd2.c b/drivers/media/rc/ir-hix5hd2.c
index 32ccefeff57d..d80cfa455c73 100644
--- a/drivers/media/rc/ir-hix5hd2.c
+++ b/drivers/media/rc/ir-hix5hd2.c
@@ -37,10 +37,13 @@
#define INT_CLR_RCV BIT(16)
#define INT_CLR_RCVTIMEOUT (BIT(16) | BIT(17))
-#define IR_CLK 0x48
#define IR_CLK_ENABLE BIT(4)
#define IR_CLK_RESET BIT(5)
+/* IR_ENABLE register bits */
+#define IR_ENABLE_EN BIT(0)
+#define IR_ENABLE_EN_EXTRA BIT(8)
+
#define IR_CFG_WIDTH_MASK 0xffff
#define IR_CFG_WIDTH_SHIFT 16
#define IR_CFG_FORMAT_MASK 0x3
@@ -58,6 +61,23 @@
#define IR_HIX5HD2_NAME "hix5hd2-ir"
+/* Need to set extra bit for enabling IR */
+#define HIX5HD2_FLAG_EXTRA_ENABLE BIT(0)
+
+struct hix5hd2_soc_data {
+ u32 clk_reg;
+ u32 flags;
+};
+
+static const struct hix5hd2_soc_data hix5hd2_data = {
+ .clk_reg = 0x48,
+};
+
+static const struct hix5hd2_soc_data hi3796cv300_data = {
+ .clk_reg = 0x60,
+ .flags = HIX5HD2_FLAG_EXTRA_ENABLE,
+};
+
struct hix5hd2_ir_priv {
int irq;
void __iomem *base;
@@ -66,15 +86,17 @@ struct hix5hd2_ir_priv {
struct regmap *regmap;
struct clk *clock;
unsigned long rate;
+ const struct hix5hd2_soc_data *socdata;
};
-static int hix5hd2_ir_enable(struct hix5hd2_ir_priv *dev, bool on)
+static int hix5hd2_ir_clk_enable(struct hix5hd2_ir_priv *dev, bool on)
{
+ u32 clk_reg = dev->socdata->clk_reg;
u32 val;
int ret = 0;
if (dev->regmap) {
- regmap_read(dev->regmap, IR_CLK, &val);
+ regmap_read(dev->regmap, clk_reg, &val);
if (on) {
val &= ~IR_CLK_RESET;
val |= IR_CLK_ENABLE;
@@ -82,7 +104,7 @@ static int hix5hd2_ir_enable(struct hix5hd2_ir_priv *dev, bool on)
val &= ~IR_CLK_ENABLE;
val |= IR_CLK_RESET;
}
- regmap_write(dev->regmap, IR_CLK, val);
+ regmap_write(dev->regmap, clk_reg, val);
} else {
if (on)
ret = clk_prepare_enable(dev->clock);
@@ -92,12 +114,23 @@ static int hix5hd2_ir_enable(struct hix5hd2_ir_priv *dev, bool on)
return ret;
}
+static inline void hix5hd2_ir_enable(struct hix5hd2_ir_priv *priv)
+{
+ u32 val = IR_ENABLE_EN;
+
+ if (priv->socdata->flags & HIX5HD2_FLAG_EXTRA_ENABLE)
+ val |= IR_ENABLE_EN_EXTRA;
+
+ writel_relaxed(val, priv->base + IR_ENABLE);
+}
+
static int hix5hd2_ir_config(struct hix5hd2_ir_priv *priv)
{
int timeout = 10000;
u32 val, rate;
- writel_relaxed(0x01, priv->base + IR_ENABLE);
+ hix5hd2_ir_enable(priv);
+
while (readl_relaxed(priv->base + IR_BUSY)) {
if (timeout--) {
udelay(1);
@@ -128,13 +161,13 @@ static int hix5hd2_ir_open(struct rc_dev *rdev)
struct hix5hd2_ir_priv *priv = rdev->priv;
int ret;
- ret = hix5hd2_ir_enable(priv, true);
+ ret = hix5hd2_ir_clk_enable(priv, true);
if (ret)
return ret;
ret = hix5hd2_ir_config(priv);
if (ret) {
- hix5hd2_ir_enable(priv, false);
+ hix5hd2_ir_clk_enable(priv, false);
return ret;
}
return 0;
@@ -144,7 +177,7 @@ static void hix5hd2_ir_close(struct rc_dev *rdev)
{
struct hix5hd2_ir_priv *priv = rdev->priv;
- hix5hd2_ir_enable(priv, false);
+ hix5hd2_ir_clk_enable(priv, false);
}
static irqreturn_t hix5hd2_ir_rx_interrupt(int irq, void *data)
@@ -205,6 +238,13 @@ static irqreturn_t hix5hd2_ir_rx_interrupt(int irq, void *data)
return IRQ_HANDLED;
}
+static const struct of_device_id hix5hd2_ir_table[] = {
+ { .compatible = "hisilicon,hix5hd2-ir", &hix5hd2_data, },
+ { .compatible = "hisilicon,hi3796cv300-ir", &hi3796cv300_data, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hix5hd2_ir_table);
+
static int hix5hd2_ir_probe(struct platform_device *pdev)
{
struct rc_dev *rdev;
@@ -212,6 +252,7 @@ static int hix5hd2_ir_probe(struct platform_device *pdev)
struct resource *res;
struct hix5hd2_ir_priv *priv;
struct device_node *node = pdev->dev.of_node;
+ const struct of_device_id *of_id;
const char *map_name;
int ret;
@@ -219,6 +260,13 @@ static int hix5hd2_ir_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ of_id = of_match_device(hix5hd2_ir_table, dev);
+ if (!of_id) {
+ dev_err(dev, "Unable to initialize IR data\n");
+ return -ENODEV;
+ }
+ priv->socdata = of_id->data;
+
priv->regmap = syscon_regmap_lookup_by_phandle(node,
"hisilicon,power-syscon");
if (IS_ERR(priv->regmap)) {
@@ -309,7 +357,7 @@ static int hix5hd2_ir_suspend(struct device *dev)
struct hix5hd2_ir_priv *priv = dev_get_drvdata(dev);
clk_disable_unprepare(priv->clock);
- hix5hd2_ir_enable(priv, false);
+ hix5hd2_ir_clk_enable(priv, false);
return 0;
}
@@ -319,17 +367,18 @@ static int hix5hd2_ir_resume(struct device *dev)
struct hix5hd2_ir_priv *priv = dev_get_drvdata(dev);
int ret;
- ret = hix5hd2_ir_enable(priv, true);
+ ret = hix5hd2_ir_clk_enable(priv, true);
if (ret)
return ret;
ret = clk_prepare_enable(priv->clock);
if (ret) {
- hix5hd2_ir_enable(priv, false);
+ hix5hd2_ir_clk_enable(priv, false);
return ret;
}
- writel_relaxed(0x01, priv->base + IR_ENABLE);
+ hix5hd2_ir_enable(priv);
+
writel_relaxed(0x00, priv->base + IR_INTM);
writel_relaxed(0xff, priv->base + IR_INTC);
writel_relaxed(0x01, priv->base + IR_START);
@@ -341,12 +390,6 @@ static int hix5hd2_ir_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(hix5hd2_ir_pm_ops, hix5hd2_ir_suspend,
hix5hd2_ir_resume);
-static const struct of_device_id hix5hd2_ir_table[] = {
- { .compatible = "hisilicon,hix5hd2-ir", },
- {},
-};
-MODULE_DEVICE_TABLE(of, hix5hd2_ir_table);
-
static struct platform_driver hix5hd2_ir_driver = {
.driver = {
.name = IR_HIX5HD2_NAME,
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 7741151606ef..6f80c251f641 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -1891,23 +1891,28 @@ int rc_register_device(struct rc_dev *dev)
dev->registered = true;
- if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
- rc = rc_setup_rx_device(dev);
- if (rc)
- goto out_dev;
- }
-
- /* Ensure that the lirc kfifo is setup before we start the thread */
+ /*
+ * once the the input device is registered in rc_setup_rx_device,
+ * userspace can open the input device and rc_open() will be called
+ * as a result. This results in driver code being allowed to submit
+ * keycodes with rc_keydown, so lirc must be registered first.
+ */
if (dev->allowed_protocols != RC_PROTO_BIT_CEC) {
rc = ir_lirc_register(dev);
if (rc < 0)
- goto out_rx;
+ goto out_dev;
+ }
+
+ if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
+ rc = rc_setup_rx_device(dev);
+ if (rc)
+ goto out_lirc;
}
if (dev->driver_type == RC_DRIVER_IR_RAW) {
rc = ir_raw_event_register(dev);
if (rc < 0)
- goto out_lirc;
+ goto out_rx;
}
dev_dbg(&dev->dev, "Registered rc%u (driver: %s)\n", dev->minor,
@@ -1915,11 +1920,11 @@ int rc_register_device(struct rc_dev *dev)
return 0;
+out_rx:
+ rc_free_rx_device(dev);
out_lirc:
if (dev->allowed_protocols != RC_PROTO_BIT_CEC)
ir_lirc_unregister(dev);
-out_rx:
- rc_free_rx_device(dev);
out_dev:
device_del(&dev->dev);
out_rx_free:
diff --git a/drivers/media/rc/serial_ir.c b/drivers/media/rc/serial_ir.c
index 7652e982173f..d77507ba0fb5 100644
--- a/drivers/media/rc/serial_ir.c
+++ b/drivers/media/rc/serial_ir.c
@@ -353,7 +353,7 @@ static irqreturn_t serial_ir_irq_handler(int i, void *blah)
dcd = (status & hardware[type].signal_pin) ? 1 : 0;
if (dcd == last_dcd) {
- dev_err(&serial_ir.pdev->dev,
+ dev_dbg(&serial_ir.pdev->dev,
"ignoring spike: %d %d %lldns %lldns\n",
dcd, sense, ktime_to_ns(kt),
ktime_to_ns(serial_ir.lastkt));
diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c
index 626264a56517..9d3d05125d7b 100644
--- a/drivers/media/usb/cpia2/cpia2_v4l.c
+++ b/drivers/media/usb/cpia2/cpia2_v4l.c
@@ -800,7 +800,7 @@ static int cpia2_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf)
break;
case FRAME_READY:
buf->bytesused = cam->buffers[buf->index].length;
- buf->timestamp = ns_to_timeval(cam->buffers[buf->index].ts);
+ v4l2_buffer_set_timestamp(buf, cam->buffers[buf->index].ts);
buf->sequence = cam->buffers[buf->index].seq;
buf->flags = V4L2_BUF_FLAG_DONE;
break;
@@ -907,7 +907,7 @@ static int cpia2_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
buf->flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_DONE
| V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
buf->field = V4L2_FIELD_NONE;
- buf->timestamp = ns_to_timeval(cam->buffers[buf->index].ts);
+ v4l2_buffer_set_timestamp(buf, cam->buffers[buf->index].ts);
buf->sequence = cam->buffers[buf->index].seq;
buf->m.offset = cam->buffers[buf->index].data - cam->frame_buffer;
buf->length = cam->frame_size;
diff --git a/drivers/media/usb/cx231xx/cx231xx-audio.c b/drivers/media/usb/cx231xx/cx231xx-audio.c
index fd6e2df3d1b7..de42db6f6ad1 100644
--- a/drivers/media/usb/cx231xx/cx231xx-audio.c
+++ b/drivers/media/usb/cx231xx/cx231xx-audio.c
@@ -13,7 +13,6 @@
#include <linux/spinlock.h>
#include <linux/soundcard.h>
#include <linux/slab.h>
-#include <linux/vmalloc.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/pcm.h>
@@ -372,28 +371,6 @@ static int cx231xx_init_audio_bulk(struct cx231xx *dev)
return errCode;
}
-static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
- size_t size)
-{
- struct snd_pcm_runtime *runtime = subs->runtime;
- struct cx231xx *dev = snd_pcm_substream_chip(subs);
-
- dev_dbg(dev->dev, "Allocating vbuffer\n");
- if (runtime->dma_area) {
- if (runtime->dma_bytes > size)
- return 0;
-
- vfree(runtime->dma_area);
- }
- runtime->dma_area = vmalloc(size);
- if (!runtime->dma_area)
- return -ENOMEM;
-
- runtime->dma_bytes = size;
-
- return 0;
-}
-
static const struct snd_pcm_hardware snd_cx231xx_hw_capture = {
.info = SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP |
@@ -484,11 +461,6 @@ static int snd_cx231xx_pcm_close(struct snd_pcm_substream *substream)
}
dev->adev.users--;
- if (substream->runtime->dma_area) {
- dev_dbg(dev->dev, "freeing\n");
- vfree(substream->runtime->dma_area);
- substream->runtime->dma_area = NULL;
- }
mutex_unlock(&dev->lock);
if (dev->adev.users == 0 && dev->adev.shutdown == 1) {
@@ -504,44 +476,6 @@ static int snd_cx231xx_pcm_close(struct snd_pcm_substream *substream)
return 0;
}
-static int snd_cx231xx_hw_capture_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params)
-{
- struct cx231xx *dev = snd_pcm_substream_chip(substream);
- int ret;
-
- dev_dbg(dev->dev, "Setting capture parameters\n");
-
- ret = snd_pcm_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
-#if 0
- /* TODO: set up cx231xx audio chip to deliver the correct audio format,
- current default is 48000hz multiplexed => 96000hz mono
- which shouldn't matter since analogue TV only supports mono */
- unsigned int channels, rate, format;
-
- format = params_format(hw_params);
- rate = params_rate(hw_params);
- channels = params_channels(hw_params);
-#endif
-
- return ret;
-}
-
-static int snd_cx231xx_hw_capture_free(struct snd_pcm_substream *substream)
-{
- struct cx231xx *dev = snd_pcm_substream_chip(substream);
-
- dev_dbg(dev->dev, "Stop capture, if needed\n");
-
- if (atomic_read(&dev->stream_started) > 0) {
- atomic_set(&dev->stream_started, 0);
- schedule_work(&dev->wq_trigger);
- }
-
- return 0;
-}
-
static int snd_cx231xx_prepare(struct snd_pcm_substream *substream)
{
struct cx231xx *dev = snd_pcm_substream_chip(substream);
@@ -614,24 +548,12 @@ static snd_pcm_uframes_t snd_cx231xx_capture_pointer(struct snd_pcm_substream
return hwptr_done;
}
-static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
- unsigned long offset)
-{
- void *pageptr = subs->runtime->dma_area + offset;
-
- return vmalloc_to_page(pageptr);
-}
-
static const struct snd_pcm_ops snd_cx231xx_pcm_capture = {
.open = snd_cx231xx_capture_open,
.close = snd_cx231xx_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_cx231xx_hw_capture_params,
- .hw_free = snd_cx231xx_hw_capture_free,
.prepare = snd_cx231xx_prepare,
.trigger = snd_cx231xx_capture_trigger,
.pointer = snd_cx231xx_capture_pointer,
- .page = snd_pcm_get_vmalloc_page,
};
static int cx231xx_audio_init(struct cx231xx *dev)
@@ -666,6 +588,7 @@ static int cx231xx_audio_init(struct cx231xx *dev)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
&snd_cx231xx_pcm_capture);
+ snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_VMALLOC, NULL, 0, 0);
pcm->info_flags = 0;
pcm->private_data = dev;
strscpy(pcm->name, "Conexant cx231xx Capture", sizeof(pcm->name));
diff --git a/drivers/media/usb/cx231xx/cx231xx-i2c.c b/drivers/media/usb/cx231xx/cx231xx-i2c.c
index f33b6a077d57..c6659253c6fb 100644
--- a/drivers/media/usb/cx231xx/cx231xx-i2c.c
+++ b/drivers/media/usb/cx231xx/cx231xx-i2c.c
@@ -515,7 +515,8 @@ int cx231xx_i2c_register(struct cx231xx_i2c *bus)
{
struct cx231xx *dev = bus->dev;
- BUG_ON(!dev->cx231xx_send_usb_command);
+ if (!dev->cx231xx_send_usb_command)
+ return -EINVAL;
bus->i2c_adap = cx231xx_adap_template;
bus->i2c_adap.dev.parent = dev->dev;
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 792667ee5ebc..b1f69c11c839 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -208,8 +208,8 @@ static int af9035_add_i2c_dev(struct dvb_usb_device *d, const char *type,
request_module("%s", board_info.type);
/* register I2C device */
- client = i2c_new_device(adapter, &board_info);
- if (client == NULL || client->dev.driver == NULL) {
+ client = i2c_new_client_device(adapter, &board_info);
+ if (!i2c_client_has_driver(client)) {
ret = -ENODEV;
goto err;
}
@@ -1621,9 +1621,10 @@ static int it930x_tuner_attach(struct dvb_usb_adapter *adap)
si2157_config.fe = adap->fe[0];
/*
- * HACK: The Logilink VG0022A has a bug: when the si2157
- * firmware that came with the device is replaced by a new
- * one, the I2C transfers to the tuner will return just 0xff.
+ * HACK: The Logilink VG0022A and TerraTec TC2 Stick have
+ * a bug: when the si2157 firmware that came with the device
+ * is replaced by a new one, the I2C transfers to the tuner
+ * will return just 0xff.
*
* Probably, the vendor firmware has some patch specifically
* designed for this device. So, we can't replace by the
@@ -1633,8 +1634,10 @@ static int it930x_tuner_attach(struct dvb_usb_adapter *adap)
* while we don't have that, the next best solution is to just
* keep the original firmware at the device.
*/
- if (le16_to_cpu(d->udev->descriptor.idVendor) == USB_VID_DEXATEK &&
- le16_to_cpu(d->udev->descriptor.idProduct) == 0x0100)
+ if ((le16_to_cpu(d->udev->descriptor.idVendor) == USB_VID_DEXATEK &&
+ le16_to_cpu(d->udev->descriptor.idProduct) == 0x0100) ||
+ (le16_to_cpu(d->udev->descriptor.idVendor) == USB_VID_TERRATEC &&
+ le16_to_cpu(d->udev->descriptor.idProduct) == USB_PID_TERRATEC_CINERGY_TC2_STICK))
si2157_config.dont_load_firmware = true;
si2157_config.if_port = it930x_addresses_table[state->it930x_addresses].tuner_if_port;
@@ -2150,6 +2153,8 @@ static const struct usb_device_id af9035_id_table[] = {
&it930x_props, "AVerMedia TD310 DVB-T2", NULL) },
{ DVB_USB_DEVICE(USB_VID_DEXATEK, 0x0100,
&it930x_props, "Logilink VG0022A", NULL) },
+ { DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_TC2_STICK,
+ &it930x_props, "TerraTec Cinergy TC2 Stick", NULL) },
{ }
};
MODULE_DEVICE_TABLE(usb, af9035_id_table);
diff --git a/drivers/media/usb/dvb-usb-v2/anysee.c b/drivers/media/usb/dvb-usb-v2/anysee.c
index fb6d99dea31a..0514e87405b6 100644
--- a/drivers/media/usb/dvb-usb-v2/anysee.c
+++ b/drivers/media/usb/dvb-usb-v2/anysee.c
@@ -649,8 +649,8 @@ static int anysee_add_i2c_dev(struct dvb_usb_device *d, const char *type,
request_module("%s", board_info.type);
/* register I2C device */
- client = i2c_new_device(adapter, &board_info);
- if (client == NULL || client->dev.driver == NULL) {
+ client = i2c_new_client_device(adapter, &board_info);
+ if (!i2c_client_has_driver(client)) {
ret = -ENODEV;
goto err;
}
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index 5016ede7b35f..c6881a1b3232 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -697,8 +697,8 @@ static int rtl2831u_frontend_attach(struct dvb_usb_adapter *adap)
board_info.addr = 0x10;
board_info.platform_data = pdata;
request_module("%s", board_info.type);
- client = i2c_new_device(&d->i2c_adap, &board_info);
- if (client == NULL || client->dev.driver == NULL) {
+ client = i2c_new_client_device(&d->i2c_adap, &board_info);
+ if (!i2c_client_has_driver(client)) {
ret = -ENODEV;
goto err;
}
@@ -918,8 +918,8 @@ static int rtl2832u_frontend_attach(struct dvb_usb_adapter *adap)
board_info.addr = 0x10;
board_info.platform_data = pdata;
request_module("%s", board_info.type);
- client = i2c_new_device(&d->i2c_adap, &board_info);
- if (client == NULL || client->dev.driver == NULL) {
+ client = i2c_new_client_device(&d->i2c_adap, &board_info);
+ if (!i2c_client_has_driver(client)) {
ret = -ENODEV;
goto err;
}
@@ -960,8 +960,8 @@ static int rtl2832u_frontend_attach(struct dvb_usb_adapter *adap)
info.addr = 0x18;
info.platform_data = &mn88472_config;
request_module(info.type);
- client = i2c_new_device(&d->i2c_adap, &info);
- if (client == NULL || client->dev.driver == NULL) {
+ client = i2c_new_client_device(&d->i2c_adap, &info);
+ if (!i2c_client_has_driver(client)) {
dev->slave_demod = SLAVE_DEMOD_NONE;
goto err_slave_demod_failed;
}
@@ -982,8 +982,8 @@ static int rtl2832u_frontend_attach(struct dvb_usb_adapter *adap)
info.addr = 0x18;
info.platform_data = &mn88473_config;
request_module(info.type);
- client = i2c_new_device(&d->i2c_adap, &info);
- if (client == NULL || client->dev.driver == NULL) {
+ client = i2c_new_client_device(&d->i2c_adap, &info);
+ if (!i2c_client_has_driver(client)) {
dev->slave_demod = SLAVE_DEMOD_NONE;
goto err_slave_demod_failed;
}
@@ -1025,8 +1025,8 @@ static int rtl2832u_frontend_attach(struct dvb_usb_adapter *adap)
info.addr = 0x64;
info.platform_data = &si2168_config;
request_module(info.type);
- client = i2c_new_device(&d->i2c_adap, &info);
- if (client == NULL || client->dev.driver == NULL) {
+ client = i2c_new_client_device(&d->i2c_adap, &info);
+ if (!i2c_client_has_driver(client)) {
dev->slave_demod = SLAVE_DEMOD_NONE;
goto err_slave_demod_failed;
}
@@ -1217,8 +1217,9 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
info.platform_data = &e4000_config;
request_module(info.type);
- client = i2c_new_device(dev->demod_i2c_adapter, &info);
- if (client == NULL || client->dev.driver == NULL)
+ client = i2c_new_client_device(dev->demod_i2c_adapter,
+ &info);
+ if (!i2c_client_has_driver(client))
break;
if (!try_module_get(client->dev.driver->owner)) {
@@ -1240,9 +1241,9 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
board_info.addr = 0x56;
board_info.platform_data = &fc2580_pdata;
request_module("fc2580");
- client = i2c_new_device(dev->demod_i2c_adapter,
- &board_info);
- if (client == NULL || client->dev.driver == NULL)
+ client = i2c_new_client_device(dev->demod_i2c_adapter,
+ &board_info);
+ if (!i2c_client_has_driver(client))
break;
if (!try_module_get(client->dev.driver->owner)) {
i2c_unregister_device(client);
@@ -1271,8 +1272,9 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
board_info.addr = 0x60;
board_info.platform_data = &tua9001_pdata;
request_module("tua9001");
- client = i2c_new_device(dev->demod_i2c_adapter, &board_info);
- if (client == NULL || client->dev.driver == NULL)
+ client = i2c_new_client_device(dev->demod_i2c_adapter,
+ &board_info);
+ if (!i2c_client_has_driver(client))
break;
if (!try_module_get(client->dev.driver->owner)) {
i2c_unregister_device(client);
@@ -1316,8 +1318,8 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
info.addr = 0x60;
info.platform_data = &si2157_config;
request_module(info.type);
- client = i2c_new_device(&d->i2c_adap, &info);
- if (client == NULL || client->dev.driver == NULL)
+ client = i2c_new_client_device(&d->i2c_adap, &info);
+ if (!i2c_client_has_driver(client))
break;
if (!try_module_get(client->dev.driver->owner)) {
@@ -1955,6 +1957,8 @@ static const struct usb_device_id rtl28xxu_id_table[] = {
&rtl28xxu_props, "Sveon STV27", NULL) },
{ DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_TURBOX_DTT_2000,
&rtl28xxu_props, "TURBO-X Pure TV Tuner DTT-2000", NULL) },
+ { DVB_USB_DEVICE(USB_VID_GTEK, USB_PID_PROLECTRIX_DV107669,
+ &rtl28xxu_props, "PROlectrix DV107669", NULL) },
/* RTL2832P devices: */
{ DVB_USB_DEVICE(USB_VID_HANFTEK, 0x0131,
diff --git a/drivers/media/usb/dvb-usb-v2/zd1301.c b/drivers/media/usb/dvb-usb-v2/zd1301.c
index 63b66b207b64..ba2c1b0d3989 100644
--- a/drivers/media/usb/dvb-usb-v2/zd1301.c
+++ b/drivers/media/usb/dvb-usb-v2/zd1301.c
@@ -172,8 +172,8 @@ static int zd1301_frontend_attach(struct dvb_usb_adapter *adap)
board_info.addr = 0x60;
board_info.platform_data = &dev->mt2060_pdata;
request_module("%s", "mt2060");
- client = i2c_new_device(adapter, &board_info);
- if (!client || !client->dev.driver) {
+ client = i2c_new_client_device(adapter, &board_info);
+ if (!i2c_client_has_driver(client)) {
ret = -ENODEV;
goto err_module_put_demod;
}
diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
index ac93e88d7038..89b4b5d84cdf 100644
--- a/drivers/media/usb/dvb-usb/af9005.c
+++ b/drivers/media/usb/dvb-usb/af9005.c
@@ -554,7 +554,7 @@ static int af9005_boot_packet(struct usb_device *udev, int type, u8 *reply,
u8 *buf, int size)
{
u16 checksum;
- int act_len, i, ret;
+ int act_len = 0, i, ret;
memset(buf, 0, size);
buf[0] = (u8) (FW_BULKOUT_SIZE & 0xff);
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index fac19ec46089..c421b603be44 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -54,9 +54,6 @@ MODULE_PARM_DESC(debug, "set debugging level (see cxusb.h)."
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
-#define deb_info(args...) dprintk(dvb_usb_cxusb_debug, CXUSB_DBG_MISC, args)
-#define deb_i2c(args...) dprintk(dvb_usb_cxusb_debug, CXUSB_DBG_I2C, args)
-
enum cxusb_table_index {
MEDION_MD95700,
DVICO_BLUEBIRD_LG064F_COLD,
@@ -125,7 +122,7 @@ static void cxusb_gpio_tuner(struct dvb_usb_device *d, int onoff)
cxusb_ctrl_msg(d, CMD_GPIO_WRITE, o, 2, &i, 1);
if (i != 0x01)
- deb_info("gpio_write failed.\n");
+ dev_info(&d->udev->dev, "gpio_write failed.\n");
st->gpio_write_state[GPIO_TUNER] = onoff;
st->gpio_write_refresh[GPIO_TUNER] = false;
@@ -142,7 +139,7 @@ static int cxusb_bluebird_gpio_rw(struct dvb_usb_device *d, u8 changemask,
rc = cxusb_ctrl_msg(d, CMD_BLUEBIRD_GPIO_RW, o, 2, &gpio_state, 1);
if (rc < 0 || (gpio_state & changemask) != (newval & changemask))
- deb_info("bluebird_gpio_write failed.\n");
+ dev_info(&d->udev->dev, "bluebird_gpio_write failed.\n");
return rc < 0 ? rc : gpio_state;
}
@@ -174,7 +171,7 @@ static int cxusb_d680_dmb_gpio_tuner(struct dvb_usb_device *d,
if (i == 0x01)
return 0;
- deb_info("gpio_write failed.\n");
+ dev_info(&d->udev->dev, "gpio_write failed.\n");
return -EIO;
}
@@ -248,7 +245,7 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
break;
if (ibuf[0] != 0x08)
- deb_i2c("i2c read may have failed\n");
+ dev_info(&d->udev->dev, "i2c read may have failed\n");
memcpy(msg[i + 1].buf, &ibuf[1], msg[i + 1].len);
@@ -271,7 +268,7 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
2 + msg[i].len, &ibuf, 1) < 0)
break;
if (ibuf != 0x08)
- deb_i2c("i2c write may have failed\n");
+ dev_info(&d->udev->dev, "i2c write may have failed\n");
}
}
@@ -299,7 +296,7 @@ static int _cxusb_power_ctrl(struct dvb_usb_device *d, int onoff)
{
u8 b = 0;
- deb_info("setting power %s\n", onoff ? "ON" : "OFF");
+ dev_info(&d->udev->dev, "setting power %s\n", onoff ? "ON" : "OFF");
if (onoff)
return cxusb_ctrl_msg(d, CMD_POWER_ON, &b, 1, NULL, 0);
@@ -318,7 +315,7 @@ static int cxusb_power_ctrl(struct dvb_usb_device *d, int onoff)
mutex_lock(&cxdev->open_lock);
if (cxdev->open_type == CXUSB_OPEN_ANALOG) {
- deb_info("preventing DVB core from setting power OFF while we are in analog mode\n");
+ dev_info(&d->udev->dev, "preventing DVB core from setting power OFF while we are in analog mode\n");
ret = -EBUSY;
goto ret_unlock;
}
@@ -754,16 +751,16 @@ static int dvico_bluebird_xc2028_callback(void *ptr, int component,
switch (command) {
case XC2028_TUNER_RESET:
- deb_info("%s: XC2028_TUNER_RESET %d\n", __func__, arg);
+ dev_info(&d->udev->dev, "XC2028_TUNER_RESET %d\n", arg);
cxusb_bluebird_gpio_pulse(d, 0x01, 1);
break;
case XC2028_RESET_CLK:
- deb_info("%s: XC2028_RESET_CLK %d\n", __func__, arg);
+ dev_info(&d->udev->dev, "XC2028_RESET_CLK %d\n", arg);
break;
case XC2028_I2C_FLUSH:
break;
default:
- deb_info("%s: unknown command %d, arg %d\n", __func__,
+ dev_info(&d->udev->dev, "unknown command %d, arg %d\n",
command, arg);
return -EINVAL;
}
@@ -1444,7 +1441,7 @@ int cxusb_medion_get(struct dvb_usb_device *dvbdev,
if (cxdev->open_ctr == 0) {
if (cxdev->open_type != open_type) {
- deb_info("will acquire and switch to %s\n",
+ dev_info(&dvbdev->udev->dev, "will acquire and switch to %s\n",
open_type == CXUSB_OPEN_ANALOG ?
"analog" : "digital");
@@ -1476,7 +1473,7 @@ int cxusb_medion_get(struct dvb_usb_device *dvbdev,
cxdev->open_type = open_type;
} else {
- deb_info("reacquired idle %s\n",
+ dev_info(&dvbdev->udev->dev, "reacquired idle %s\n",
open_type == CXUSB_OPEN_ANALOG ?
"analog" : "digital");
}
@@ -1484,8 +1481,8 @@ int cxusb_medion_get(struct dvb_usb_device *dvbdev,
cxdev->open_ctr = 1;
} else if (cxdev->open_type == open_type) {
cxdev->open_ctr++;
- deb_info("acquired %s\n", open_type == CXUSB_OPEN_ANALOG ?
- "analog" : "digital");
+ dev_info(&dvbdev->udev->dev, "acquired %s\n",
+ open_type == CXUSB_OPEN_ANALOG ? "analog" : "digital");
} else {
ret = -EBUSY;
}
@@ -1511,7 +1508,7 @@ void cxusb_medion_put(struct dvb_usb_device *dvbdev)
if (!WARN_ON(cxdev->open_ctr < 1)) {
cxdev->open_ctr--;
- deb_info("release %s\n",
+ dev_info(&dvbdev->udev->dev, "release %s\n",
cxdev->open_type == CXUSB_OPEN_ANALOG ?
"analog" : "digital");
}
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index ab7a100ec84f..4ef3fa98d20f 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -3772,8 +3772,8 @@ static int xbox_one_attach(struct dvb_usb_adapter *adap)
info.addr = 0x18;
info.platform_data = &mn88472_config;
request_module(info.type);
- client_demod = i2c_new_device(&d->i2c_adap, &info);
- if (client_demod == NULL || client_demod->dev.driver == NULL)
+ client_demod = i2c_new_client_device(&d->i2c_adap, &info);
+ if (!i2c_client_has_driver(client_demod))
goto fail_demod_device;
if (!try_module_get(client_demod->dev.driver->owner))
goto fail_demod_module;
@@ -3800,8 +3800,8 @@ static int xbox_one_attach(struct dvb_usb_adapter *adap)
info.platform_data = &tda18250_config;
request_module(info.type);
- client_tuner = i2c_new_device(&adap->dev->i2c_adap, &info);
- if (client_tuner == NULL || client_tuner->dev.driver == NULL)
+ client_tuner = i2c_new_client_device(&adap->dev->i2c_adap, &info);
+ if (!i2c_client_has_driver(client_tuner))
goto fail_tuner_device;
if (!try_module_get(client_tuner->dev.driver->owner))
goto fail_tuner_module;
diff --git a/drivers/media/usb/dvb-usb/digitv.c b/drivers/media/usb/dvb-usb/digitv.c
index dd5bb230cec1..99a39339d45d 100644
--- a/drivers/media/usb/dvb-usb/digitv.c
+++ b/drivers/media/usb/dvb-usb/digitv.c
@@ -230,18 +230,22 @@ static struct rc_map_table rc_map_digitv_table[] = {
static int digitv_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
{
- int i;
+ int ret, i;
u8 key[5];
u8 b[4] = { 0 };
*event = 0;
*state = REMOTE_NO_KEY_PRESSED;
- digitv_ctrl_msg(d,USB_READ_REMOTE,0,NULL,0,&key[1],4);
+ ret = digitv_ctrl_msg(d, USB_READ_REMOTE, 0, NULL, 0, &key[1], 4);
+ if (ret)
+ return ret;
/* Tell the device we've read the remote. Not sure how necessary
this is, but the Nebula SDK does it. */
- digitv_ctrl_msg(d,USB_WRITE_REMOTE,0,b,4,NULL,0);
+ ret = digitv_ctrl_msg(d, USB_WRITE_REMOTE, 0, b, 4, NULL, 0);
+ if (ret)
+ return ret;
/* if something is inside the buffer, simulate key press */
if (key[1] != 0)
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-urb.c b/drivers/media/usb/dvb-usb/dvb-usb-urb.c
index c1b4e94a37f8..2aabf90d8697 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-urb.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-urb.c
@@ -12,7 +12,7 @@
int dvb_usb_generic_rw(struct dvb_usb_device *d, u8 *wbuf, u16 wlen, u8 *rbuf,
u16 rlen, int delay_ms)
{
- int actlen,ret = -ENOMEM;
+ int actlen = 0, ret = -ENOMEM;
if (!d || wbuf == NULL || wlen == 0)
return -EINVAL;
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index b960abd00d48..8b584507dd59 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -1590,8 +1590,8 @@ static int tt_s2_4600_frontend_attach(struct dvb_usb_adapter *adap)
board_info.addr = 0x68;
board_info.platform_data = &m88ds3103_pdata;
request_module("m88ds3103");
- client = i2c_new_device(&d->i2c_adap, &board_info);
- if (client == NULL || client->dev.driver == NULL)
+ client = i2c_new_client_device(&d->i2c_adap, &board_info);
+ if (!i2c_client_has_driver(client))
return -ENODEV;
if (!try_module_get(client->dev.driver->owner)) {
i2c_unregister_device(client);
@@ -1609,9 +1609,9 @@ static int tt_s2_4600_frontend_attach(struct dvb_usb_adapter *adap)
board_info.addr = 0x60;
board_info.platform_data = &ts2020_config;
request_module("ts2020");
- client = i2c_new_device(i2c_adapter, &board_info);
+ client = i2c_new_client_device(i2c_adapter, &board_info);
- if (client == NULL || client->dev.driver == NULL) {
+ if (!i2c_client_has_driver(client)) {
dvb_frontend_detach(adap->fe_adap[0].fe);
return -ENODEV;
}
diff --git a/drivers/media/usb/dvb-usb/vp7045.c b/drivers/media/usb/dvb-usb/vp7045.c
index 80c1cf05384b..2baf57216d19 100644
--- a/drivers/media/usb/dvb-usb/vp7045.c
+++ b/drivers/media/usb/dvb-usb/vp7045.c
@@ -96,10 +96,14 @@ static int vp7045_power_ctrl(struct dvb_usb_device *d, int onoff)
static int vp7045_rc_query(struct dvb_usb_device *d)
{
+ int ret;
u8 key;
- vp7045_usb_op(d,RC_VAL_READ,NULL,0,&key,1,20);
- deb_rc("remote query key: %x %d\n",key,key);
+ ret = vp7045_usb_op(d, RC_VAL_READ, NULL, 0, &key, 1, 20);
+ if (ret)
+ return ret;
+
+ deb_rc("remote query key: %x\n", key);
if (key != 0x44) {
/*
@@ -115,15 +119,18 @@ static int vp7045_rc_query(struct dvb_usb_device *d)
static int vp7045_read_eeprom(struct dvb_usb_device *d,u8 *buf, int len, int offset)
{
- int i = 0;
- u8 v,br[2];
+ int i, ret;
+ u8 v, br[2];
for (i=0; i < len; i++) {
v = offset + i;
- vp7045_usb_op(d,GET_EE_VALUE,&v,1,br,2,5);
+ ret = vp7045_usb_op(d, GET_EE_VALUE, &v, 1, br, 2, 5);
+ if (ret)
+ return ret;
+
buf[i] = br[1];
}
- deb_info("VP7045 EEPROM read (offs: %d, len: %d) : ",offset, i);
- debug_dump(buf,i,deb_info);
+ deb_info("VP7045 EEPROM read (offs: %d, len: %d) : ", offset, i);
+ debug_dump(buf, i, deb_info);
return 0;
}
diff --git a/drivers/media/usb/em28xx/em28xx-audio.c b/drivers/media/usb/em28xx/em28xx-audio.c
index 79dfbb25714b..6833b5bfe293 100644
--- a/drivers/media/usb/em28xx/em28xx-audio.c
+++ b/drivers/media/usb/em28xx/em28xx-audio.c
@@ -30,7 +30,6 @@
#include <linux/spinlock.h>
#include <linux/soundcard.h>
#include <linux/slab.h>
-#include <linux/vmalloc.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/pcm.h>
@@ -192,28 +191,6 @@ static int em28xx_init_audio_isoc(struct em28xx *dev)
return 0;
}
-static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
- size_t size)
-{
- struct em28xx *dev = snd_pcm_substream_chip(subs);
- struct snd_pcm_runtime *runtime = subs->runtime;
-
- dprintk("Allocating vbuffer\n");
- if (runtime->dma_area) {
- if (runtime->dma_bytes > size)
- return 0;
-
- vfree(runtime->dma_area);
- }
- runtime->dma_area = vmalloc(size);
- if (!runtime->dma_area)
- return -ENOMEM;
-
- runtime->dma_bytes = size;
-
- return 0;
-}
-
static const struct snd_pcm_hardware snd_em28xx_hw_capture = {
.info = SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP |
@@ -341,63 +318,12 @@ static int snd_em28xx_pcm_close(struct snd_pcm_substream *substream)
}
em28xx_audio_analog_set(dev);
- if (substream->runtime->dma_area) {
- dprintk("freeing\n");
- vfree(substream->runtime->dma_area);
- substream->runtime->dma_area = NULL;
- }
mutex_unlock(&dev->lock);
kref_put(&dev->ref, em28xx_free_device);
return 0;
}
-static int snd_em28xx_hw_capture_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params)
-{
- int ret;
- struct em28xx *dev = snd_pcm_substream_chip(substream);
-
- if (dev->disconnected)
- return -ENODEV;
-
- dprintk("Setting capture parameters\n");
-
- ret = snd_pcm_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
- if (ret < 0)
- return ret;
-#if 0
- /*
- * TODO: set up em28xx audio chip to deliver the correct audio format,
- * current default is 48000hz multiplexed => 96000hz mono
- * which shouldn't matter since analogue TV only supports mono
- */
- unsigned int channels, rate, format;
-
- format = params_format(hw_params);
- rate = params_rate(hw_params);
- channels = params_channels(hw_params);
-#endif
-
- return 0;
-}
-
-static int snd_em28xx_hw_capture_free(struct snd_pcm_substream *substream)
-{
- struct em28xx *dev = snd_pcm_substream_chip(substream);
- struct em28xx_audio *adev = &dev->adev;
-
- dprintk("Stop capture, if needed\n");
-
- if (atomic_read(&adev->stream_started) > 0) {
- atomic_set(&adev->stream_started, 0);
- schedule_work(&adev->wq_trigger);
- }
-
- return 0;
-}
-
static int snd_em28xx_prepare(struct snd_pcm_substream *substream)
{
struct em28xx *dev = snd_pcm_substream_chip(substream);
@@ -471,14 +397,6 @@ static snd_pcm_uframes_t snd_em28xx_capture_pointer(struct snd_pcm_substream
return hwptr_done;
}
-static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
- unsigned long offset)
-{
- void *pageptr = subs->runtime->dma_area + offset;
-
- return vmalloc_to_page(pageptr);
-}
-
/*
* AC97 volume control support
*/
@@ -708,13 +626,9 @@ static int em28xx_cvol_new(struct snd_card *card, struct em28xx *dev,
static const struct snd_pcm_ops snd_em28xx_pcm_capture = {
.open = snd_em28xx_capture_open,
.close = snd_em28xx_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_em28xx_hw_capture_params,
- .hw_free = snd_em28xx_hw_capture_free,
.prepare = snd_em28xx_prepare,
.trigger = snd_em28xx_capture_trigger,
.pointer = snd_em28xx_capture_pointer,
- .page = snd_pcm_get_vmalloc_page,
};
static void em28xx_audio_free_urb(struct em28xx *dev)
@@ -936,6 +850,7 @@ static int em28xx_audio_init(struct em28xx *dev)
goto card_free;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_em28xx_pcm_capture);
+ snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_VMALLOC, NULL, 0, 0);
pcm->info_flags = 0;
pcm->private_data = dev;
strscpy(pcm->name, "Empia 28xx Capture", sizeof(pcm->name));
diff --git a/drivers/media/usb/go7007/s2250-board.c b/drivers/media/usb/go7007/s2250-board.c
index 49e75a1a1f3f..b9e45124673b 100644
--- a/drivers/media/usb/go7007/s2250-board.c
+++ b/drivers/media/usb/go7007/s2250-board.c
@@ -607,6 +607,7 @@ static int s2250_remove(struct i2c_client *client)
{
struct s2250 *state = to_state(i2c_get_clientdata(client));
+ i2c_unregister_device(state->audio);
v4l2_device_unregister_subdev(&state->sd);
v4l2_ctrl_handler_free(&state->hdl);
kfree(state);
diff --git a/drivers/media/usb/go7007/snd-go7007.c b/drivers/media/usb/go7007/snd-go7007.c
index b05fa227ffb2..2ce85ab38db5 100644
--- a/drivers/media/usb/go7007/snd-go7007.c
+++ b/drivers/media/usb/go7007/snd-go7007.c
@@ -9,7 +9,6 @@
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/sched.h>
-#include <linux/vmalloc.h>
#include <linux/time.h>
#include <linux/mm.h>
#include <linux/i2c.h>
@@ -100,16 +99,7 @@ static int go7007_snd_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct go7007 *go = snd_pcm_substream_chip(substream);
- unsigned int bytes;
-
- bytes = params_buffer_bytes(hw_params);
- if (substream->runtime->dma_bytes > 0)
- vfree(substream->runtime->dma_area);
- substream->runtime->dma_bytes = 0;
- substream->runtime->dma_area = vmalloc(bytes);
- if (substream->runtime->dma_area == NULL)
- return -ENOMEM;
- substream->runtime->dma_bytes = bytes;
+
go->audio_deliver = parse_audio_stream_data;
return 0;
}
@@ -119,9 +109,6 @@ static int go7007_snd_hw_free(struct snd_pcm_substream *substream)
struct go7007 *go = snd_pcm_substream_chip(substream);
go->audio_deliver = NULL;
- if (substream->runtime->dma_bytes > 0)
- vfree(substream->runtime->dma_area);
- substream->runtime->dma_bytes = 0;
return 0;
}
@@ -185,22 +172,14 @@ static snd_pcm_uframes_t go7007_snd_pcm_pointer(struct snd_pcm_substream *substr
return gosnd->hw_ptr;
}
-static struct page *go7007_snd_pcm_page(struct snd_pcm_substream *substream,
- unsigned long offset)
-{
- return vmalloc_to_page(substream->runtime->dma_area + offset);
-}
-
static const struct snd_pcm_ops go7007_snd_capture_ops = {
.open = go7007_snd_capture_open,
.close = go7007_snd_capture_close,
- .ioctl = snd_pcm_lib_ioctl,
.hw_params = go7007_snd_hw_params,
.hw_free = go7007_snd_hw_free,
.prepare = go7007_snd_pcm_prepare,
.trigger = go7007_snd_pcm_trigger,
.pointer = go7007_snd_pcm_pointer,
- .page = go7007_snd_pcm_page,
};
static int go7007_snd_free(struct snd_device *device)
@@ -236,22 +215,18 @@ int go7007_snd_init(struct go7007 *go)
gosnd->capturing = 0;
ret = snd_card_new(go->dev, index[dev], id[dev], THIS_MODULE, 0,
&gosnd->card);
- if (ret < 0) {
- kfree(gosnd);
- return ret;
- }
+ if (ret < 0)
+ goto free_snd;
+
ret = snd_device_new(gosnd->card, SNDRV_DEV_LOWLEVEL, go,
&go7007_snd_device_ops);
- if (ret < 0) {
- kfree(gosnd);
- return ret;
- }
+ if (ret < 0)
+ goto free_card;
+
ret = snd_pcm_new(gosnd->card, "go7007", 0, 0, 1, &gosnd->pcm);
- if (ret < 0) {
- snd_card_free(gosnd->card);
- kfree(gosnd);
- return ret;
- }
+ if (ret < 0)
+ goto free_card;
+
strscpy(gosnd->card->driver, "go7007", sizeof(gosnd->card->driver));
strscpy(gosnd->card->shortname, go->name, sizeof(gosnd->card->shortname));
strscpy(gosnd->card->longname, gosnd->card->shortname,
@@ -260,13 +235,12 @@ int go7007_snd_init(struct go7007 *go)
gosnd->pcm->private_data = go;
snd_pcm_set_ops(gosnd->pcm, SNDRV_PCM_STREAM_CAPTURE,
&go7007_snd_capture_ops);
+ snd_pcm_set_managed_buffer_all(gosnd->pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
ret = snd_card_register(gosnd->card);
- if (ret < 0) {
- snd_card_free(gosnd->card);
- kfree(gosnd);
- return ret;
- }
+ if (ret < 0)
+ goto free_card;
gosnd->substream = NULL;
go->snd_context = gosnd;
@@ -274,6 +248,12 @@ int go7007_snd_init(struct go7007 *go)
++dev;
return 0;
+
+free_card:
+ snd_card_free(gosnd->card);
+free_snd:
+ kfree(gosnd);
+ return ret;
}
EXPORT_SYMBOL(go7007_snd_init);
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index 4add2b12d330..c1b307bbe540 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -1461,7 +1461,7 @@ int gspca_dev_probe2(struct usb_interface *intf,
pr_err("couldn't kzalloc gspca struct\n");
return -ENOMEM;
}
- gspca_dev->usb_buf = kmalloc(USB_BUF_SZ, GFP_KERNEL);
+ gspca_dev->usb_buf = kzalloc(USB_BUF_SZ, GFP_KERNEL);
if (!gspca_dev->usb_buf) {
pr_err("out of memory\n");
ret = -ENOMEM;
diff --git a/drivers/media/usb/pulse8-cec/pulse8-cec.c b/drivers/media/usb/pulse8-cec/pulse8-cec.c
index ac88ade94cda..afda438d4e0a 100644
--- a/drivers/media/usb/pulse8-cec/pulse8-cec.c
+++ b/drivers/media/usb/pulse8-cec/pulse8-cec.c
@@ -49,7 +49,7 @@ static int debug;
static int persistent_config;
module_param(debug, int, 0644);
module_param(persistent_config, int, 0644);
-MODULE_PARM_DESC(debug, "debug level (0-1)");
+MODULE_PARM_DESC(debug, "debug level (0-2)");
MODULE_PARM_DESC(persistent_config, "read config from persistent memory (0-1)");
enum pulse8_msgcodes {
@@ -100,6 +100,61 @@ enum pulse8_msgcodes {
MSGCODE_FRAME_ACK = 0x40,
};
+static const char * const pulse8_msgnames[] = {
+ "NOTHING",
+ "PING",
+ "TIMEOUT_ERROR",
+ "HIGH_ERROR",
+ "LOW_ERROR",
+ "FRAME_START",
+ "FRAME_DATA",
+ "RECEIVE_FAILED",
+ "COMMAND_ACCEPTED",
+ "COMMAND_REJECTED",
+ "SET_ACK_MASK",
+ "TRANSMIT",
+ "TRANSMIT_EOM",
+ "TRANSMIT_IDLETIME",
+ "TRANSMIT_ACK_POLARITY",
+ "TRANSMIT_LINE_TIMEOUT",
+ "TRANSMIT_SUCCEEDED",
+ "TRANSMIT_FAILED_LINE",
+ "TRANSMIT_FAILED_ACK",
+ "TRANSMIT_FAILED_TIMEOUT_DATA",
+ "TRANSMIT_FAILED_TIMEOUT_LINE",
+ "FIRMWARE_VERSION",
+ "START_BOOTLOADER",
+ "GET_BUILDDATE",
+ "SET_CONTROLLED",
+ "GET_AUTO_ENABLED",
+ "SET_AUTO_ENABLED",
+ "GET_DEFAULT_LOGICAL_ADDRESS",
+ "SET_DEFAULT_LOGICAL_ADDRESS",
+ "GET_LOGICAL_ADDRESS_MASK",
+ "SET_LOGICAL_ADDRESS_MASK",
+ "GET_PHYSICAL_ADDRESS",
+ "SET_PHYSICAL_ADDRESS",
+ "GET_DEVICE_TYPE",
+ "SET_DEVICE_TYPE",
+ "GET_HDMI_VERSION",
+ "SET_HDMI_VERSION",
+ "GET_OSD_NAME",
+ "SET_OSD_NAME",
+ "WRITE_EEPROM",
+ "GET_ADAPTER_TYPE",
+ "SET_ACTIVE_SOURCE",
+};
+
+static const char *pulse8_msgname(u8 cmd)
+{
+ static char unknown_msg[5];
+
+ if ((cmd & 0x3f) < ARRAY_SIZE(pulse8_msgnames))
+ return pulse8_msgnames[cmd & 0x3f];
+ snprintf(unknown_msg, sizeof(unknown_msg), "0x%02x", cmd);
+ return unknown_msg;
+}
+
#define MSGSTART 0xff
#define MSGEND 0xfe
#define MSGESC 0xfd
@@ -109,57 +164,203 @@ enum pulse8_msgcodes {
#define PING_PERIOD (15 * HZ)
+#define NUM_MSGS 8
+
struct pulse8 {
struct device *dev;
struct serio *serio;
struct cec_adapter *adap;
unsigned int vers;
- struct completion cmd_done;
- struct work_struct work;
+
struct delayed_work ping_eeprom_work;
- struct cec_msg rx_msg;
+
+ struct work_struct irq_work;
+ struct cec_msg rx_msg[NUM_MSGS];
+ unsigned int rx_msg_cur_idx, rx_msg_num;
+ /* protect rx_msg_cur_idx and rx_msg_num */
+ spinlock_t msg_lock;
+ u8 new_rx_msg[CEC_MAX_MSG_SIZE];
+ u8 new_rx_msg_len;
+
+ struct work_struct tx_work;
+ u32 tx_done_status;
+ u32 tx_signal_free_time;
+ struct cec_msg tx_msg;
+ bool tx_msg_is_bcast;
+
+ struct completion cmd_done;
u8 data[DATA_SIZE];
unsigned int len;
u8 buf[DATA_SIZE];
unsigned int idx;
bool escape;
bool started;
- struct mutex config_lock;
- struct mutex write_lock;
+
+ /* locks access to the adapter */
+ struct mutex lock;
bool config_pending;
bool restoring_config;
bool autonomous;
};
-static void pulse8_ping_eeprom_work_handler(struct work_struct *work);
+static int pulse8_send(struct serio *serio, const u8 *command, u8 cmd_len)
+{
+ int err = 0;
+
+ err = serio_write(serio, MSGSTART);
+ if (err)
+ return err;
+ for (; !err && cmd_len; command++, cmd_len--) {
+ if (*command >= MSGESC) {
+ err = serio_write(serio, MSGESC);
+ if (!err)
+ err = serio_write(serio, *command - MSGOFFSET);
+ } else {
+ err = serio_write(serio, *command);
+ }
+ }
+ if (!err)
+ err = serio_write(serio, MSGEND);
+
+ return err;
+}
+
+static int pulse8_send_and_wait_once(struct pulse8 *pulse8,
+ const u8 *cmd, u8 cmd_len,
+ u8 response, u8 size)
+{
+ int err;
+
+ if (debug > 1)
+ dev_info(pulse8->dev, "transmit %s: %*ph\n",
+ pulse8_msgname(cmd[0]), cmd_len, cmd);
+ init_completion(&pulse8->cmd_done);
+
+ err = pulse8_send(pulse8->serio, cmd, cmd_len);
+ if (err)
+ return err;
+
+ if (!wait_for_completion_timeout(&pulse8->cmd_done, HZ))
+ return -ETIMEDOUT;
+ if ((pulse8->data[0] & 0x3f) == MSGCODE_COMMAND_REJECTED &&
+ cmd[0] != MSGCODE_SET_CONTROLLED &&
+ cmd[0] != MSGCODE_SET_AUTO_ENABLED &&
+ cmd[0] != MSGCODE_GET_BUILDDATE)
+ return -ENOTTY;
+ if (response &&
+ ((pulse8->data[0] & 0x3f) != response || pulse8->len < size + 1)) {
+ dev_info(pulse8->dev, "transmit %s failed with %s\n",
+ pulse8_msgname(cmd[0]),
+ pulse8_msgname(pulse8->data[0]));
+ return -EIO;
+ }
+ return 0;
+}
+
+static int pulse8_send_and_wait(struct pulse8 *pulse8,
+ const u8 *cmd, u8 cmd_len, u8 response, u8 size)
+{
+ u8 cmd_sc[2];
+ int err;
+
+ err = pulse8_send_and_wait_once(pulse8, cmd, cmd_len, response, size);
+ if (err != -ENOTTY)
+ return err;
+
+ cmd_sc[0] = MSGCODE_SET_CONTROLLED;
+ cmd_sc[1] = 1;
+ err = pulse8_send_and_wait_once(pulse8, cmd_sc, 2,
+ MSGCODE_COMMAND_ACCEPTED, 1);
+ if (!err)
+ err = pulse8_send_and_wait_once(pulse8, cmd, cmd_len,
+ response, size);
+ return err == -ENOTTY ? -EIO : err;
+}
+
+static void pulse8_tx_work_handler(struct work_struct *work)
+{
+ struct pulse8 *pulse8 = container_of(work, struct pulse8, tx_work);
+ struct cec_msg *msg = &pulse8->tx_msg;
+ unsigned int i;
+ u8 cmd[2];
+ int err;
+
+ if (msg->len == 0)
+ return;
+
+ mutex_lock(&pulse8->lock);
+ cmd[0] = MSGCODE_TRANSMIT_IDLETIME;
+ cmd[1] = pulse8->tx_signal_free_time;
+ err = pulse8_send_and_wait(pulse8, cmd, 2,
+ MSGCODE_COMMAND_ACCEPTED, 1);
+ cmd[0] = MSGCODE_TRANSMIT_ACK_POLARITY;
+ cmd[1] = cec_msg_is_broadcast(msg);
+ pulse8->tx_msg_is_bcast = cec_msg_is_broadcast(msg);
+ if (!err)
+ err = pulse8_send_and_wait(pulse8, cmd, 2,
+ MSGCODE_COMMAND_ACCEPTED, 1);
+ cmd[0] = msg->len == 1 ? MSGCODE_TRANSMIT_EOM : MSGCODE_TRANSMIT;
+ cmd[1] = msg->msg[0];
+ if (!err)
+ err = pulse8_send_and_wait(pulse8, cmd, 2,
+ MSGCODE_COMMAND_ACCEPTED, 1);
+ if (!err && msg->len > 1) {
+ for (i = 1; !err && i < msg->len; i++) {
+ cmd[0] = ((i == msg->len - 1)) ?
+ MSGCODE_TRANSMIT_EOM : MSGCODE_TRANSMIT;
+ cmd[1] = msg->msg[i];
+ err = pulse8_send_and_wait(pulse8, cmd, 2,
+ MSGCODE_COMMAND_ACCEPTED, 1);
+ }
+ }
+ if (err && debug)
+ dev_info(pulse8->dev, "%s(0x%02x) failed with error %d for msg %*ph\n",
+ pulse8_msgname(cmd[0]), cmd[1],
+ err, msg->len, msg->msg);
+ msg->len = 0;
+ mutex_unlock(&pulse8->lock);
+ if (err)
+ cec_transmit_attempt_done(pulse8->adap, CEC_TX_STATUS_ERROR);
+}
static void pulse8_irq_work_handler(struct work_struct *work)
{
struct pulse8 *pulse8 =
- container_of(work, struct pulse8, work);
+ container_of(work, struct pulse8, irq_work);
+ unsigned long flags;
+ u32 status;
- switch (pulse8->data[0] & 0x3f) {
- case MSGCODE_FRAME_DATA:
- cec_received_msg(pulse8->adap, &pulse8->rx_msg);
- break;
- case MSGCODE_TRANSMIT_SUCCEEDED:
- cec_transmit_attempt_done(pulse8->adap, CEC_TX_STATUS_OK);
- break;
- case MSGCODE_TRANSMIT_FAILED_ACK:
- cec_transmit_attempt_done(pulse8->adap, CEC_TX_STATUS_NACK);
- break;
- case MSGCODE_TRANSMIT_FAILED_LINE:
- case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA:
- case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE:
- cec_transmit_attempt_done(pulse8->adap, CEC_TX_STATUS_ERROR);
- break;
+ spin_lock_irqsave(&pulse8->msg_lock, flags);
+ while (pulse8->rx_msg_num) {
+ spin_unlock_irqrestore(&pulse8->msg_lock, flags);
+ if (debug)
+ dev_info(pulse8->dev, "adap received %*ph\n",
+ pulse8->rx_msg[pulse8->rx_msg_cur_idx].len,
+ pulse8->rx_msg[pulse8->rx_msg_cur_idx].msg);
+ cec_received_msg(pulse8->adap,
+ &pulse8->rx_msg[pulse8->rx_msg_cur_idx]);
+ spin_lock_irqsave(&pulse8->msg_lock, flags);
+ if (pulse8->rx_msg_num)
+ pulse8->rx_msg_num--;
+ pulse8->rx_msg_cur_idx =
+ (pulse8->rx_msg_cur_idx + 1) % NUM_MSGS;
}
+ spin_unlock_irqrestore(&pulse8->msg_lock, flags);
+
+ mutex_lock(&pulse8->lock);
+ status = pulse8->tx_done_status;
+ pulse8->tx_done_status = 0;
+ mutex_unlock(&pulse8->lock);
+ if (status)
+ cec_transmit_attempt_done(pulse8->adap, status);
}
static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data,
unsigned int flags)
{
struct pulse8 *pulse8 = serio_get_drvdata(serio);
+ unsigned long irq_flags;
+ unsigned int idx;
if (!pulse8->started && data != MSGSTART)
return IRQ_HANDLED;
@@ -171,35 +372,80 @@ static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data,
data += MSGOFFSET;
pulse8->escape = false;
} else if (data == MSGEND) {
- struct cec_msg *msg = &pulse8->rx_msg;
+ u8 msgcode = pulse8->buf[0];
- if (debug)
- dev_info(pulse8->dev, "received: %*ph\n",
+ if (debug > 1)
+ dev_info(pulse8->dev, "received %s: %*ph\n",
+ pulse8_msgname(msgcode),
pulse8->idx, pulse8->buf);
- pulse8->data[0] = pulse8->buf[0];
- switch (pulse8->buf[0] & 0x3f) {
+ switch (msgcode & 0x3f) {
case MSGCODE_FRAME_START:
- msg->len = 1;
- msg->msg[0] = pulse8->buf[1];
- break;
+ /*
+ * Test if we are receiving a new msg when a previous
+ * message is still pending.
+ */
+ if (!(msgcode & MSGCODE_FRAME_EOM)) {
+ pulse8->new_rx_msg_len = 1;
+ pulse8->new_rx_msg[0] = pulse8->buf[1];
+ break;
+ }
+ /* fall through */
case MSGCODE_FRAME_DATA:
- if (msg->len == CEC_MAX_MSG_SIZE)
+ if (pulse8->new_rx_msg_len < CEC_MAX_MSG_SIZE)
+ pulse8->new_rx_msg[pulse8->new_rx_msg_len++] =
+ pulse8->buf[1];
+ if (!(msgcode & MSGCODE_FRAME_EOM))
+ break;
+
+ spin_lock_irqsave(&pulse8->msg_lock, irq_flags);
+ idx = (pulse8->rx_msg_cur_idx + pulse8->rx_msg_num) %
+ NUM_MSGS;
+ if (pulse8->rx_msg_num == NUM_MSGS) {
+ dev_warn(pulse8->dev,
+ "message queue is full, dropping %*ph\n",
+ pulse8->new_rx_msg_len,
+ pulse8->new_rx_msg);
+ spin_unlock_irqrestore(&pulse8->msg_lock,
+ irq_flags);
+ pulse8->new_rx_msg_len = 0;
break;
- msg->msg[msg->len++] = pulse8->buf[1];
- if (pulse8->buf[0] & MSGCODE_FRAME_EOM)
- schedule_work(&pulse8->work);
+ }
+ pulse8->rx_msg_num++;
+ memcpy(pulse8->rx_msg[idx].msg, pulse8->new_rx_msg,
+ pulse8->new_rx_msg_len);
+ pulse8->rx_msg[idx].len = pulse8->new_rx_msg_len;
+ spin_unlock_irqrestore(&pulse8->msg_lock, irq_flags);
+ schedule_work(&pulse8->irq_work);
+ pulse8->new_rx_msg_len = 0;
break;
case MSGCODE_TRANSMIT_SUCCEEDED:
- case MSGCODE_TRANSMIT_FAILED_LINE:
+ WARN_ON(pulse8->tx_done_status);
+ pulse8->tx_done_status = CEC_TX_STATUS_OK;
+ schedule_work(&pulse8->irq_work);
+ break;
case MSGCODE_TRANSMIT_FAILED_ACK:
+ /*
+ * A NACK for a broadcast message makes no sense, these
+ * seem to be spurious messages and are skipped.
+ */
+ if (pulse8->tx_msg_is_bcast)
+ break;
+ WARN_ON(pulse8->tx_done_status);
+ pulse8->tx_done_status = CEC_TX_STATUS_NACK;
+ schedule_work(&pulse8->irq_work);
+ break;
+ case MSGCODE_TRANSMIT_FAILED_LINE:
case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA:
case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE:
- schedule_work(&pulse8->work);
+ WARN_ON(pulse8->tx_done_status);
+ pulse8->tx_done_status = CEC_TX_STATUS_ERROR;
+ schedule_work(&pulse8->irq_work);
break;
case MSGCODE_HIGH_ERROR:
case MSGCODE_LOW_ERROR:
case MSGCODE_RECEIVE_FAILED:
case MSGCODE_TIMEOUT_ERROR:
+ pulse8->new_rx_msg_len = 0;
break;
case MSGCODE_COMMAND_ACCEPTED:
case MSGCODE_COMMAND_REJECTED:
@@ -229,92 +475,183 @@ static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data,
return IRQ_HANDLED;
}
-static void pulse8_disconnect(struct serio *serio)
+static int pulse8_cec_adap_enable(struct cec_adapter *adap, bool enable)
{
- struct pulse8 *pulse8 = serio_get_drvdata(serio);
+ struct pulse8 *pulse8 = cec_get_drvdata(adap);
+ u8 cmd[16];
+ int err;
- cec_unregister_adapter(pulse8->adap);
- cancel_delayed_work_sync(&pulse8->ping_eeprom_work);
- dev_info(&serio->dev, "disconnected\n");
- serio_close(serio);
- serio_set_drvdata(serio, NULL);
- kfree(pulse8);
+ mutex_lock(&pulse8->lock);
+ cmd[0] = MSGCODE_SET_CONTROLLED;
+ cmd[1] = enable;
+ err = pulse8_send_and_wait(pulse8, cmd, 2,
+ MSGCODE_COMMAND_ACCEPTED, 1);
+ if (!enable) {
+ pulse8->rx_msg_num = 0;
+ pulse8->tx_done_status = 0;
+ }
+ mutex_unlock(&pulse8->lock);
+ return enable ? err : 0;
}
-static int pulse8_send(struct serio *serio, const u8 *command, u8 cmd_len)
+static int pulse8_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
{
+ struct pulse8 *pulse8 = cec_get_drvdata(adap);
+ u16 mask = 0;
+ u16 pa = adap->phys_addr;
+ u8 cmd[16];
int err = 0;
- err = serio_write(serio, MSGSTART);
+ mutex_lock(&pulse8->lock);
+ if (log_addr != CEC_LOG_ADDR_INVALID)
+ mask = 1 << log_addr;
+ cmd[0] = MSGCODE_SET_ACK_MASK;
+ cmd[1] = mask >> 8;
+ cmd[2] = mask & 0xff;
+ err = pulse8_send_and_wait(pulse8, cmd, 3,
+ MSGCODE_COMMAND_ACCEPTED, 0);
+ if ((err && mask != 0) || pulse8->restoring_config)
+ goto unlock;
+
+ cmd[0] = MSGCODE_SET_AUTO_ENABLED;
+ cmd[1] = log_addr == CEC_LOG_ADDR_INVALID ? 0 : 1;
+ err = pulse8_send_and_wait(pulse8, cmd, 2,
+ MSGCODE_COMMAND_ACCEPTED, 0);
if (err)
- return err;
- for (; !err && cmd_len; command++, cmd_len--) {
- if (*command >= MSGESC) {
- err = serio_write(serio, MSGESC);
- if (!err)
- err = serio_write(serio, *command - MSGOFFSET);
- } else {
- err = serio_write(serio, *command);
- }
- }
- if (!err)
- err = serio_write(serio, MSGEND);
+ goto unlock;
+ pulse8->autonomous = cmd[1];
+ if (log_addr == CEC_LOG_ADDR_INVALID)
+ goto unlock;
- return err;
-}
+ cmd[0] = MSGCODE_SET_DEVICE_TYPE;
+ cmd[1] = adap->log_addrs.primary_device_type[0];
+ err = pulse8_send_and_wait(pulse8, cmd, 2,
+ MSGCODE_COMMAND_ACCEPTED, 0);
+ if (err)
+ goto unlock;
-static int pulse8_send_and_wait_once(struct pulse8 *pulse8,
- const u8 *cmd, u8 cmd_len,
- u8 response, u8 size)
-{
- int err;
+ switch (adap->log_addrs.primary_device_type[0]) {
+ case CEC_OP_PRIM_DEVTYPE_TV:
+ mask = CEC_LOG_ADDR_MASK_TV;
+ break;
+ case CEC_OP_PRIM_DEVTYPE_RECORD:
+ mask = CEC_LOG_ADDR_MASK_RECORD;
+ break;
+ case CEC_OP_PRIM_DEVTYPE_TUNER:
+ mask = CEC_LOG_ADDR_MASK_TUNER;
+ break;
+ case CEC_OP_PRIM_DEVTYPE_PLAYBACK:
+ mask = CEC_LOG_ADDR_MASK_PLAYBACK;
+ break;
+ case CEC_OP_PRIM_DEVTYPE_AUDIOSYSTEM:
+ mask = CEC_LOG_ADDR_MASK_AUDIOSYSTEM;
+ break;
+ case CEC_OP_PRIM_DEVTYPE_SWITCH:
+ mask = CEC_LOG_ADDR_MASK_UNREGISTERED;
+ break;
+ case CEC_OP_PRIM_DEVTYPE_PROCESSOR:
+ mask = CEC_LOG_ADDR_MASK_SPECIFIC;
+ break;
+ default:
+ mask = 0;
+ break;
+ }
+ cmd[0] = MSGCODE_SET_LOGICAL_ADDRESS_MASK;
+ cmd[1] = mask >> 8;
+ cmd[2] = mask & 0xff;
+ err = pulse8_send_and_wait(pulse8, cmd, 3,
+ MSGCODE_COMMAND_ACCEPTED, 0);
+ if (err)
+ goto unlock;
- /*dev_info(pulse8->dev, "transmit: %*ph\n", cmd_len, cmd);*/
- init_completion(&pulse8->cmd_done);
+ cmd[0] = MSGCODE_SET_DEFAULT_LOGICAL_ADDRESS;
+ cmd[1] = log_addr;
+ err = pulse8_send_and_wait(pulse8, cmd, 2,
+ MSGCODE_COMMAND_ACCEPTED, 0);
+ if (err)
+ goto unlock;
- err = pulse8_send(pulse8->serio, cmd, cmd_len);
+ cmd[0] = MSGCODE_SET_PHYSICAL_ADDRESS;
+ cmd[1] = pa >> 8;
+ cmd[2] = pa & 0xff;
+ err = pulse8_send_and_wait(pulse8, cmd, 3,
+ MSGCODE_COMMAND_ACCEPTED, 0);
if (err)
- return err;
+ goto unlock;
- if (!wait_for_completion_timeout(&pulse8->cmd_done, HZ))
- return -ETIMEDOUT;
- if ((pulse8->data[0] & 0x3f) == MSGCODE_COMMAND_REJECTED &&
- cmd[0] != MSGCODE_SET_CONTROLLED &&
- cmd[0] != MSGCODE_SET_AUTO_ENABLED &&
- cmd[0] != MSGCODE_GET_BUILDDATE)
- return -ENOTTY;
- if (response &&
- ((pulse8->data[0] & 0x3f) != response || pulse8->len < size + 1)) {
- dev_info(pulse8->dev, "transmit: failed %02x\n",
- pulse8->data[0] & 0x3f);
- return -EIO;
+ cmd[0] = MSGCODE_SET_HDMI_VERSION;
+ cmd[1] = adap->log_addrs.cec_version;
+ err = pulse8_send_and_wait(pulse8, cmd, 2,
+ MSGCODE_COMMAND_ACCEPTED, 0);
+ if (err)
+ goto unlock;
+
+ if (adap->log_addrs.osd_name[0]) {
+ size_t osd_len = strlen(adap->log_addrs.osd_name);
+ char *osd_str = cmd + 1;
+
+ cmd[0] = MSGCODE_SET_OSD_NAME;
+ strscpy(cmd + 1, adap->log_addrs.osd_name, sizeof(cmd) - 1);
+ if (osd_len < 4) {
+ memset(osd_str + osd_len, ' ', 4 - osd_len);
+ osd_len = 4;
+ osd_str[osd_len] = '\0';
+ strscpy(adap->log_addrs.osd_name, osd_str,
+ sizeof(adap->log_addrs.osd_name));
+ }
+ err = pulse8_send_and_wait(pulse8, cmd, 1 + osd_len,
+ MSGCODE_COMMAND_ACCEPTED, 0);
+ if (err)
+ goto unlock;
}
+
+unlock:
+ if (pulse8->restoring_config)
+ pulse8->restoring_config = false;
+ else
+ pulse8->config_pending = true;
+ mutex_unlock(&pulse8->lock);
+ return log_addr == CEC_LOG_ADDR_INVALID ? 0 : err;
+}
+
+static int pulse8_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct pulse8 *pulse8 = cec_get_drvdata(adap);
+
+ pulse8->tx_msg = *msg;
+ if (debug)
+ dev_info(pulse8->dev, "adap transmit %*ph\n",
+ msg->len, msg->msg);
+ pulse8->tx_signal_free_time = signal_free_time;
+ schedule_work(&pulse8->tx_work);
return 0;
}
-static int pulse8_send_and_wait(struct pulse8 *pulse8,
- const u8 *cmd, u8 cmd_len, u8 response, u8 size)
+static void pulse8_cec_adap_free(struct cec_adapter *adap)
{
- u8 cmd_sc[2];
- int err;
+ struct pulse8 *pulse8 = cec_get_drvdata(adap);
- mutex_lock(&pulse8->write_lock);
- err = pulse8_send_and_wait_once(pulse8, cmd, cmd_len, response, size);
+ cancel_delayed_work_sync(&pulse8->ping_eeprom_work);
+ cancel_work_sync(&pulse8->irq_work);
+ cancel_work_sync(&pulse8->tx_work);
+ serio_close(pulse8->serio);
+ serio_set_drvdata(pulse8->serio, NULL);
+ kfree(pulse8);
+}
- if (err == -ENOTTY) {
- cmd_sc[0] = MSGCODE_SET_CONTROLLED;
- cmd_sc[1] = 1;
- err = pulse8_send_and_wait_once(pulse8, cmd_sc, 2,
- MSGCODE_COMMAND_ACCEPTED, 1);
- if (err)
- goto unlock;
- err = pulse8_send_and_wait_once(pulse8, cmd, cmd_len,
- response, size);
- }
+static const struct cec_adap_ops pulse8_cec_adap_ops = {
+ .adap_enable = pulse8_cec_adap_enable,
+ .adap_log_addr = pulse8_cec_adap_log_addr,
+ .adap_transmit = pulse8_cec_adap_transmit,
+ .adap_free = pulse8_cec_adap_free,
+};
-unlock:
- mutex_unlock(&pulse8->write_lock);
- return err == -ENOTTY ? -EIO : err;
+static void pulse8_disconnect(struct serio *serio)
+{
+ struct pulse8 *pulse8 = serio_get_drvdata(serio);
+
+ cec_unregister_adapter(pulse8->adap);
}
static int pulse8_setup(struct pulse8 *pulse8, struct serio *serio,
@@ -451,191 +788,34 @@ static int pulse8_apply_persistent_config(struct pulse8 *pulse8,
return 0;
}
-static int pulse8_cec_adap_enable(struct cec_adapter *adap, bool enable)
-{
- struct pulse8 *pulse8 = cec_get_drvdata(adap);
- u8 cmd[16];
- int err;
-
- cmd[0] = MSGCODE_SET_CONTROLLED;
- cmd[1] = enable;
- err = pulse8_send_and_wait(pulse8, cmd, 2,
- MSGCODE_COMMAND_ACCEPTED, 1);
- return enable ? err : 0;
-}
-
-static int pulse8_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
+static void pulse8_ping_eeprom_work_handler(struct work_struct *work)
{
- struct pulse8 *pulse8 = cec_get_drvdata(adap);
- u16 mask = 0;
- u16 pa = adap->phys_addr;
- u8 cmd[16];
- int err = 0;
-
- mutex_lock(&pulse8->config_lock);
- if (log_addr != CEC_LOG_ADDR_INVALID)
- mask = 1 << log_addr;
- cmd[0] = MSGCODE_SET_ACK_MASK;
- cmd[1] = mask >> 8;
- cmd[2] = mask & 0xff;
- err = pulse8_send_and_wait(pulse8, cmd, 3,
- MSGCODE_COMMAND_ACCEPTED, 0);
- if ((err && mask != 0) || pulse8->restoring_config)
- goto unlock;
-
- cmd[0] = MSGCODE_SET_AUTO_ENABLED;
- cmd[1] = log_addr == CEC_LOG_ADDR_INVALID ? 0 : 1;
- err = pulse8_send_and_wait(pulse8, cmd, 2,
- MSGCODE_COMMAND_ACCEPTED, 0);
- if (err)
- goto unlock;
- pulse8->autonomous = cmd[1];
- if (log_addr == CEC_LOG_ADDR_INVALID)
- goto unlock;
-
- cmd[0] = MSGCODE_SET_DEVICE_TYPE;
- cmd[1] = adap->log_addrs.primary_device_type[0];
- err = pulse8_send_and_wait(pulse8, cmd, 2,
- MSGCODE_COMMAND_ACCEPTED, 0);
- if (err)
- goto unlock;
-
- switch (adap->log_addrs.primary_device_type[0]) {
- case CEC_OP_PRIM_DEVTYPE_TV:
- mask = CEC_LOG_ADDR_MASK_TV;
- break;
- case CEC_OP_PRIM_DEVTYPE_RECORD:
- mask = CEC_LOG_ADDR_MASK_RECORD;
- break;
- case CEC_OP_PRIM_DEVTYPE_TUNER:
- mask = CEC_LOG_ADDR_MASK_TUNER;
- break;
- case CEC_OP_PRIM_DEVTYPE_PLAYBACK:
- mask = CEC_LOG_ADDR_MASK_PLAYBACK;
- break;
- case CEC_OP_PRIM_DEVTYPE_AUDIOSYSTEM:
- mask = CEC_LOG_ADDR_MASK_AUDIOSYSTEM;
- break;
- case CEC_OP_PRIM_DEVTYPE_SWITCH:
- mask = CEC_LOG_ADDR_MASK_UNREGISTERED;
- break;
- case CEC_OP_PRIM_DEVTYPE_PROCESSOR:
- mask = CEC_LOG_ADDR_MASK_SPECIFIC;
- break;
- default:
- mask = 0;
- break;
- }
- cmd[0] = MSGCODE_SET_LOGICAL_ADDRESS_MASK;
- cmd[1] = mask >> 8;
- cmd[2] = mask & 0xff;
- err = pulse8_send_and_wait(pulse8, cmd, 3,
- MSGCODE_COMMAND_ACCEPTED, 0);
- if (err)
- goto unlock;
-
- cmd[0] = MSGCODE_SET_DEFAULT_LOGICAL_ADDRESS;
- cmd[1] = log_addr;
- err = pulse8_send_and_wait(pulse8, cmd, 2,
- MSGCODE_COMMAND_ACCEPTED, 0);
- if (err)
- goto unlock;
+ struct pulse8 *pulse8 =
+ container_of(work, struct pulse8, ping_eeprom_work.work);
+ u8 cmd;
- cmd[0] = MSGCODE_SET_PHYSICAL_ADDRESS;
- cmd[1] = pa >> 8;
- cmd[2] = pa & 0xff;
- err = pulse8_send_and_wait(pulse8, cmd, 3,
- MSGCODE_COMMAND_ACCEPTED, 0);
- if (err)
- goto unlock;
+ mutex_lock(&pulse8->lock);
+ cmd = MSGCODE_PING;
+ pulse8_send_and_wait(pulse8, &cmd, 1,
+ MSGCODE_COMMAND_ACCEPTED, 0);
- cmd[0] = MSGCODE_SET_HDMI_VERSION;
- cmd[1] = adap->log_addrs.cec_version;
- err = pulse8_send_and_wait(pulse8, cmd, 2,
- MSGCODE_COMMAND_ACCEPTED, 0);
- if (err)
+ if (pulse8->vers < 2)
goto unlock;
- if (adap->log_addrs.osd_name[0]) {
- size_t osd_len = strlen(adap->log_addrs.osd_name);
- char *osd_str = cmd + 1;
-
- cmd[0] = MSGCODE_SET_OSD_NAME;
- strscpy(cmd + 1, adap->log_addrs.osd_name, sizeof(cmd) - 1);
- if (osd_len < 4) {
- memset(osd_str + osd_len, ' ', 4 - osd_len);
- osd_len = 4;
- osd_str[osd_len] = '\0';
- strscpy(adap->log_addrs.osd_name, osd_str,
- sizeof(adap->log_addrs.osd_name));
- }
- err = pulse8_send_and_wait(pulse8, cmd, 1 + osd_len,
- MSGCODE_COMMAND_ACCEPTED, 0);
- if (err)
- goto unlock;
+ if (pulse8->config_pending && persistent_config) {
+ dev_dbg(pulse8->dev, "writing pending config to EEPROM\n");
+ cmd = MSGCODE_WRITE_EEPROM;
+ if (pulse8_send_and_wait(pulse8, &cmd, 1,
+ MSGCODE_COMMAND_ACCEPTED, 0))
+ dev_info(pulse8->dev, "failed to write pending config to EEPROM\n");
+ else
+ pulse8->config_pending = false;
}
-
unlock:
- if (pulse8->restoring_config)
- pulse8->restoring_config = false;
- else
- pulse8->config_pending = true;
- mutex_unlock(&pulse8->config_lock);
- return log_addr == CEC_LOG_ADDR_INVALID ? 0 : err;
-}
-
-static int pulse8_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
- u32 signal_free_time, struct cec_msg *msg)
-{
- struct pulse8 *pulse8 = cec_get_drvdata(adap);
- u8 cmd[2];
- unsigned int i;
- int err;
-
- cmd[0] = MSGCODE_TRANSMIT_IDLETIME;
- cmd[1] = signal_free_time;
- err = pulse8_send_and_wait(pulse8, cmd, 2,
- MSGCODE_COMMAND_ACCEPTED, 1);
- cmd[0] = MSGCODE_TRANSMIT_ACK_POLARITY;
- cmd[1] = cec_msg_is_broadcast(msg);
- if (!err)
- err = pulse8_send_and_wait(pulse8, cmd, 2,
- MSGCODE_COMMAND_ACCEPTED, 1);
- cmd[0] = msg->len == 1 ? MSGCODE_TRANSMIT_EOM : MSGCODE_TRANSMIT;
- cmd[1] = msg->msg[0];
- if (!err)
- err = pulse8_send_and_wait(pulse8, cmd, 2,
- MSGCODE_COMMAND_ACCEPTED, 1);
- if (!err && msg->len > 1) {
- cmd[0] = msg->len == 2 ? MSGCODE_TRANSMIT_EOM :
- MSGCODE_TRANSMIT;
- cmd[1] = msg->msg[1];
- err = pulse8_send_and_wait(pulse8, cmd, 2,
- MSGCODE_COMMAND_ACCEPTED, 1);
- for (i = 0; !err && i + 2 < msg->len; i++) {
- cmd[0] = (i + 2 == msg->len - 1) ?
- MSGCODE_TRANSMIT_EOM : MSGCODE_TRANSMIT;
- cmd[1] = msg->msg[i + 2];
- err = pulse8_send_and_wait(pulse8, cmd, 2,
- MSGCODE_COMMAND_ACCEPTED, 1);
- }
- }
-
- return err;
-}
-
-static int pulse8_received(struct cec_adapter *adap, struct cec_msg *msg)
-{
- return -ENOMSG;
+ schedule_delayed_work(&pulse8->ping_eeprom_work, PING_PERIOD);
+ mutex_unlock(&pulse8->lock);
}
-static const struct cec_adap_ops pulse8_cec_adap_ops = {
- .adap_enable = pulse8_cec_adap_enable,
- .adap_log_addr = pulse8_cec_adap_log_addr,
- .adap_transmit = pulse8_cec_adap_transmit,
- .received = pulse8_received,
-};
-
static int pulse8_connect(struct serio *serio, struct serio_driver *drv)
{
u32 caps = CEC_CAP_DEFAULTS | CEC_CAP_PHYS_ADDR | CEC_CAP_MONITOR_ALL;
@@ -658,9 +838,10 @@ static int pulse8_connect(struct serio *serio, struct serio_driver *drv)
pulse8->dev = &serio->dev;
serio_set_drvdata(serio, pulse8);
- INIT_WORK(&pulse8->work, pulse8_irq_work_handler);
- mutex_init(&pulse8->write_lock);
- mutex_init(&pulse8->config_lock);
+ INIT_WORK(&pulse8->irq_work, pulse8_irq_work_handler);
+ INIT_WORK(&pulse8->tx_work, pulse8_tx_work_handler);
+ mutex_init(&pulse8->lock);
+ spin_lock_init(&pulse8->msg_lock);
pulse8->config_pending = false;
err = serio_open(serio, drv);
@@ -700,33 +881,6 @@ free_device:
return err;
}
-static void pulse8_ping_eeprom_work_handler(struct work_struct *work)
-{
- struct pulse8 *pulse8 =
- container_of(work, struct pulse8, ping_eeprom_work.work);
- u8 cmd;
-
- schedule_delayed_work(&pulse8->ping_eeprom_work, PING_PERIOD);
- cmd = MSGCODE_PING;
- pulse8_send_and_wait(pulse8, &cmd, 1,
- MSGCODE_COMMAND_ACCEPTED, 0);
-
- if (pulse8->vers < 2)
- return;
-
- mutex_lock(&pulse8->config_lock);
- if (pulse8->config_pending && persistent_config) {
- dev_dbg(pulse8->dev, "writing pending config to EEPROM\n");
- cmd = MSGCODE_WRITE_EEPROM;
- if (pulse8_send_and_wait(pulse8, &cmd, 1,
- MSGCODE_COMMAND_ACCEPTED, 0))
- dev_info(pulse8->dev, "failed to write pending config to EEPROM\n");
- else
- pulse8->config_pending = false;
- }
- mutex_unlock(&pulse8->config_lock);
-}
-
static const struct serio_device_id pulse8_serio_ids[] = {
{
.type = SERIO_RS232,
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-encoder.c b/drivers/media/usb/pvrusb2/pvrusb2-encoder.c
index fb3178d909ce..f6005d1296ef 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-encoder.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-encoder.c
@@ -284,8 +284,8 @@ rdData[0]);
wrData[0] = 0x0;
ret = pvr2_encoder_write_words(hdw,MBOX_BASE,wrData,1);
break;
-
- }; LOCK_GIVE(hdw->ctl_lock);
+ }
+ LOCK_GIVE(hdw->ctl_lock);
return ret;
}
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index 21f90a887485..b22501f76b78 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -1125,7 +1125,7 @@ static int stk_vidioc_dqbuf(struct file *filp,
sbuf->v4lbuf.flags &= ~V4L2_BUF_FLAG_QUEUED;
sbuf->v4lbuf.flags |= V4L2_BUF_FLAG_DONE;
sbuf->v4lbuf.sequence = ++dev->sequence;
- sbuf->v4lbuf.timestamp = ns_to_timeval(ktime_get_ns());
+ v4l2_buffer_set_timestamp(&sbuf->v4lbuf, ktime_get_ns());
*buf = sbuf->v4lbuf;
return 0;
diff --git a/drivers/media/usb/tm6000/tm6000-alsa.c b/drivers/media/usb/tm6000/tm6000-alsa.c
index d6c79c13b332..c26a0ff60a64 100644
--- a/drivers/media/usb/tm6000/tm6000-alsa.c
+++ b/drivers/media/usb/tm6000/tm6000-alsa.c
@@ -10,7 +10,6 @@
#include <linux/interrupt.h>
#include <linux/usb.h>
#include <linux/slab.h>
-#include <linux/vmalloc.h>
#include <linux/delay.h>
#include <sound/core.h>
@@ -94,40 +93,6 @@ static int _tm6000_stop_audio_dma(struct snd_tm6000_card *chip)
return 0;
}
-static void dsp_buffer_free(struct snd_pcm_substream *substream)
-{
- struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream);
-
- dprintk(2, "Freeing buffer\n");
-
- vfree(substream->runtime->dma_area);
- substream->runtime->dma_area = NULL;
- substream->runtime->dma_bytes = 0;
-}
-
-static int dsp_buffer_alloc(struct snd_pcm_substream *substream, int size)
-{
- struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream);
-
- dprintk(2, "Allocating buffer\n");
-
- if (substream->runtime->dma_area) {
- if (substream->runtime->dma_bytes > size)
- return 0;
-
- dsp_buffer_free(substream);
- }
-
- substream->runtime->dma_area = vmalloc(size);
- if (!substream->runtime->dma_area)
- return -ENOMEM;
-
- substream->runtime->dma_bytes = size;
-
- return 0;
-}
-
-
/****************************************************************************
ALSA PCM Interface
****************************************************************************/
@@ -269,40 +234,6 @@ static int tm6000_fillbuf(struct tm6000_core *core, char *buf, int size)
}
/*
- * hw_params callback
- */
-static int snd_tm6000_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params)
-{
- int size, rc;
-
- size = params_period_bytes(hw_params) * params_periods(hw_params);
-
- rc = dsp_buffer_alloc(substream, size);
- if (rc < 0)
- return rc;
-
- return 0;
-}
-
-/*
- * hw free callback
- */
-static int snd_tm6000_hw_free(struct snd_pcm_substream *substream)
-{
- struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream);
- struct tm6000_core *core = chip->core;
-
- if (atomic_read(&core->stream_started) > 0) {
- atomic_set(&core->stream_started, 0);
- schedule_work(&core->wq_trigger);
- }
-
- dsp_buffer_free(substream);
- return 0;
-}
-
-/*
* prepare callback
*/
static int snd_tm6000_prepare(struct snd_pcm_substream *substream)
@@ -369,27 +300,15 @@ static snd_pcm_uframes_t snd_tm6000_pointer(struct snd_pcm_substream *substream)
return chip->buf_pos;
}
-static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
- unsigned long offset)
-{
- void *pageptr = subs->runtime->dma_area + offset;
-
- return vmalloc_to_page(pageptr);
-}
-
/*
* operators
*/
static const struct snd_pcm_ops snd_tm6000_pcm_ops = {
.open = snd_tm6000_pcm_open,
.close = snd_tm6000_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_tm6000_hw_params,
- .hw_free = snd_tm6000_hw_free,
.prepare = snd_tm6000_prepare,
.trigger = snd_tm6000_card_trigger,
.pointer = snd_tm6000_pointer,
- .page = snd_pcm_get_vmalloc_page,
};
/*
@@ -459,6 +378,7 @@ static int tm6000_audio_init(struct tm6000_core *dev)
strscpy(pcm->name, "Trident TM5600/60x0", sizeof(pcm->name));
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_tm6000_pcm_ops);
+ snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_VMALLOC, NULL, 0, 0);
INIT_WORK(&dev->wq_trigger, audio_trigger);
rc = snd_card_register(card);
diff --git a/drivers/media/usb/usbtv/usbtv-audio.c b/drivers/media/usb/usbtv/usbtv-audio.c
index e746c8ddfc49..b57e94fb1977 100644
--- a/drivers/media/usb/usbtv/usbtv-audio.c
+++ b/drivers/media/usb/usbtv/usbtv-audio.c
@@ -85,30 +85,6 @@ static int snd_usbtv_pcm_close(struct snd_pcm_substream *substream)
return 0;
}
-static int snd_usbtv_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params)
-{
- int rv;
- struct usbtv *chip = snd_pcm_substream_chip(substream);
-
- rv = snd_pcm_lib_malloc_pages(substream,
- params_buffer_bytes(hw_params));
-
- if (rv < 0) {
- dev_warn(chip->dev, "pcm audio buffer allocation failure %i\n",
- rv);
- return rv;
- }
-
- return 0;
-}
-
-static int snd_usbtv_hw_free(struct snd_pcm_substream *substream)
-{
- snd_pcm_lib_free_pages(substream);
- return 0;
-}
-
static int snd_usbtv_prepare(struct snd_pcm_substream *substream)
{
struct usbtv *chip = snd_pcm_substream_chip(substream);
@@ -336,9 +312,6 @@ static snd_pcm_uframes_t snd_usbtv_pointer(struct snd_pcm_substream *substream)
static const struct snd_pcm_ops snd_usbtv_pcm_ops = {
.open = snd_usbtv_pcm_open,
.close = snd_usbtv_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_usbtv_hw_params,
- .hw_free = snd_usbtv_hw_free,
.prepare = snd_usbtv_prepare,
.trigger = snd_usbtv_card_trigger,
.pointer = snd_usbtv_pointer,
@@ -377,7 +350,7 @@ int usbtv_audio_init(struct usbtv *usbtv)
pcm->private_data = usbtv;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_usbtv_pcm_ops);
- snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
+ snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
NULL, USBTV_AUDIO_BUFFER, USBTV_AUDIO_BUFFER);
rv = snd_card_register(card);
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index 93d36aab824f..5ca2c2f35fe2 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -696,7 +696,7 @@ static int vidioc_querybuf(struct file *file,
vb->length = usbvision->curwidth *
usbvision->curheight *
usbvision->palette.bytes_per_pixel;
- vb->timestamp = ns_to_timeval(usbvision->frame[vb->index].ts);
+ v4l2_buffer_set_timestamp(vb, usbvision->frame[vb->index].ts);
vb->sequence = usbvision->frame[vb->index].sequence;
return 0;
}
@@ -765,7 +765,7 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *vb)
V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
vb->index = f->index;
vb->sequence = f->sequence;
- vb->timestamp = ns_to_timeval(f->ts);
+ v4l2_buffer_set_timestamp(vb, f->ts);
vb->field = V4L2_FIELD_NONE;
vb->bytesused = f->scanlength;
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 428235ca2635..99883550375e 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -497,6 +497,22 @@ static int uvc_parse_format(struct uvc_device *dev,
}
}
+ /* Some devices report bpp that doesn't match the format. */
+ if (dev->quirks & UVC_QUIRK_FORCE_BPP) {
+ const struct v4l2_format_info *info =
+ v4l2_format_info(format->fcc);
+
+ if (info) {
+ unsigned int div = info->hdiv * info->vdiv;
+
+ n = info->bpp[0] * div;
+ for (i = 1; i < info->comp_planes; i++)
+ n += info->bpp[i];
+
+ format->bpp = DIV_ROUND_UP(8 * n, div);
+ }
+ }
+
if (buffer[2] == UVC_VS_FORMAT_UNCOMPRESSED) {
ftype = UVC_VS_FRAME_UNCOMPRESSED;
} else {
@@ -1493,6 +1509,11 @@ static int uvc_scan_chain_forward(struct uvc_video_chain *chain,
break;
if (forward == prev)
continue;
+ if (forward->chain.next || forward->chain.prev) {
+ uvc_trace(UVC_TRACE_DESCR, "Found reference to "
+ "entity %d already in chain.\n", forward->id);
+ return -EINVAL;
+ }
switch (UVC_ENTITY_TYPE(forward)) {
case UVC_VC_EXTENSION_UNIT:
@@ -1574,6 +1595,13 @@ static int uvc_scan_chain_backward(struct uvc_video_chain *chain,
return -1;
}
+ if (term->chain.next || term->chain.prev) {
+ uvc_trace(UVC_TRACE_DESCR, "Found reference to "
+ "entity %d already in chain.\n",
+ term->id);
+ return -EINVAL;
+ }
+
if (uvc_trace_param & UVC_TRACE_PROBE)
printk(KERN_CONT " %d", term->id);
@@ -2862,6 +2890,15 @@ static const struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = (kernel_ulong_t)&uvc_quirk_force_y8 },
+ /* GEO Semiconductor GC6500 */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x29fe,
+ .idProduct = 0x4d53,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_FORCE_BPP) },
/* Intel RealSense D4M */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index f773dc5d802c..6ab972c643e3 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -198,6 +198,7 @@
#define UVC_QUIRK_RESTRICT_FRAME_RATE 0x00000200
#define UVC_QUIRK_RESTORE_CTRLS_ON_INIT 0x00000400
#define UVC_QUIRK_FORCE_Y8 0x00000800
+#define UVC_QUIRK_FORCE_BPP 0x00001000
/* Format flags */
#define UVC_FMT_FLAG_COMPRESSED 0x00000001
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index e1eaf1135c7f..a99e82ec9ab6 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -468,13 +468,43 @@ struct v4l2_plane32 {
__u32 reserved[11];
};
+/*
+ * This is correct for all architectures including i386, but not x32,
+ * which has different alignment requirements for timestamp
+ */
struct v4l2_buffer32 {
__u32 index;
__u32 type; /* enum v4l2_buf_type */
__u32 bytesused;
__u32 flags;
__u32 field; /* enum v4l2_field */
- struct compat_timeval timestamp;
+ struct {
+ compat_s64 tv_sec;
+ compat_s64 tv_usec;
+ } timestamp;
+ struct v4l2_timecode timecode;
+ __u32 sequence;
+
+ /* memory location */
+ __u32 memory; /* enum v4l2_memory */
+ union {
+ __u32 offset;
+ compat_long_t userptr;
+ compat_caddr_t planes;
+ __s32 fd;
+ } m;
+ __u32 length;
+ __u32 reserved2;
+ __s32 request_fd;
+};
+
+struct v4l2_buffer32_time32 {
+ __u32 index;
+ __u32 type; /* enum v4l2_buf_type */
+ __u32 bytesused;
+ __u32 flags;
+ __u32 field; /* enum v4l2_field */
+ struct old_timeval32 timestamp;
struct v4l2_timecode timecode;
__u32 sequence;
@@ -581,6 +611,31 @@ static int bufsize_v4l2_buffer(struct v4l2_buffer32 __user *p32, u32 *size)
return 0;
}
+static int bufsize_v4l2_buffer_time32(struct v4l2_buffer32_time32 __user *p32, u32 *size)
+{
+ u32 type;
+ u32 length;
+
+ if (!access_ok(p32, sizeof(*p32)) ||
+ get_user(type, &p32->type) ||
+ get_user(length, &p32->length))
+ return -EFAULT;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
+ if (length > VIDEO_MAX_PLANES)
+ return -EINVAL;
+
+ /*
+ * We don't really care if userspace decides to kill itself
+ * by passing a very big length value
+ */
+ *size = length * sizeof(struct v4l2_plane);
+ } else {
+ *size = 0;
+ }
+ return 0;
+}
+
static int get_v4l2_buffer32(struct v4l2_buffer __user *p64,
struct v4l2_buffer32 __user *p32,
void __user *aux_buf, u32 aux_space)
@@ -681,6 +736,106 @@ static int get_v4l2_buffer32(struct v4l2_buffer __user *p64,
return 0;
}
+static int get_v4l2_buffer32_time32(struct v4l2_buffer_time32 __user *p64,
+ struct v4l2_buffer32_time32 __user *p32,
+ void __user *aux_buf, u32 aux_space)
+{
+ u32 type;
+ u32 length;
+ s32 request_fd;
+ enum v4l2_memory memory;
+ struct v4l2_plane32 __user *uplane32;
+ struct v4l2_plane __user *uplane;
+ compat_caddr_t p;
+ int ret;
+
+ if (!access_ok(p32, sizeof(*p32)) ||
+ assign_in_user(&p64->index, &p32->index) ||
+ get_user(type, &p32->type) ||
+ put_user(type, &p64->type) ||
+ assign_in_user(&p64->flags, &p32->flags) ||
+ get_user(memory, &p32->memory) ||
+ put_user(memory, &p64->memory) ||
+ get_user(length, &p32->length) ||
+ put_user(length, &p64->length) ||
+ get_user(request_fd, &p32->request_fd) ||
+ put_user(request_fd, &p64->request_fd))
+ return -EFAULT;
+
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ if (assign_in_user(&p64->bytesused, &p32->bytesused) ||
+ assign_in_user(&p64->field, &p32->field) ||
+ assign_in_user(&p64->timestamp.tv_sec,
+ &p32->timestamp.tv_sec) ||
+ assign_in_user(&p64->timestamp.tv_usec,
+ &p32->timestamp.tv_usec))
+ return -EFAULT;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
+ u32 num_planes = length;
+
+ if (num_planes == 0) {
+ /*
+ * num_planes == 0 is legal, e.g. when userspace doesn't
+ * need planes array on DQBUF
+ */
+ return put_user(NULL, &p64->m.planes);
+ }
+ if (num_planes > VIDEO_MAX_PLANES)
+ return -EINVAL;
+
+ if (get_user(p, &p32->m.planes))
+ return -EFAULT;
+
+ uplane32 = compat_ptr(p);
+ if (!access_ok(uplane32,
+ num_planes * sizeof(*uplane32)))
+ return -EFAULT;
+
+ /*
+ * We don't really care if userspace decides to kill itself
+ * by passing a very big num_planes value
+ */
+ if (aux_space < num_planes * sizeof(*uplane))
+ return -EFAULT;
+
+ uplane = aux_buf;
+ if (put_user_force(uplane, &p64->m.planes))
+ return -EFAULT;
+
+ while (num_planes--) {
+ ret = get_v4l2_plane32(uplane, uplane32, memory);
+ if (ret)
+ return ret;
+ uplane++;
+ uplane32++;
+ }
+ } else {
+ switch (memory) {
+ case V4L2_MEMORY_MMAP:
+ case V4L2_MEMORY_OVERLAY:
+ if (assign_in_user(&p64->m.offset, &p32->m.offset))
+ return -EFAULT;
+ break;
+ case V4L2_MEMORY_USERPTR: {
+ compat_ulong_t userptr;
+
+ if (get_user(userptr, &p32->m.userptr) ||
+ put_user((unsigned long)compat_ptr(userptr),
+ &p64->m.userptr))
+ return -EFAULT;
+ break;
+ }
+ case V4L2_MEMORY_DMABUF:
+ if (assign_in_user(&p64->m.fd, &p32->m.fd))
+ return -EFAULT;
+ break;
+ }
+ }
+
+ return 0;
+}
+
static int put_v4l2_buffer32(struct v4l2_buffer __user *p64,
struct v4l2_buffer32 __user *p32)
{
@@ -761,6 +916,86 @@ static int put_v4l2_buffer32(struct v4l2_buffer __user *p64,
return 0;
}
+static int put_v4l2_buffer32_time32(struct v4l2_buffer_time32 __user *p64,
+ struct v4l2_buffer32_time32 __user *p32)
+{
+ u32 type;
+ u32 length;
+ enum v4l2_memory memory;
+ struct v4l2_plane32 __user *uplane32;
+ struct v4l2_plane *uplane;
+ compat_caddr_t p;
+ int ret;
+
+ if (!access_ok(p32, sizeof(*p32)) ||
+ assign_in_user(&p32->index, &p64->index) ||
+ get_user(type, &p64->type) ||
+ put_user(type, &p32->type) ||
+ assign_in_user(&p32->flags, &p64->flags) ||
+ get_user(memory, &p64->memory) ||
+ put_user(memory, &p32->memory))
+ return -EFAULT;
+
+ if (assign_in_user(&p32->bytesused, &p64->bytesused) ||
+ assign_in_user(&p32->field, &p64->field) ||
+ assign_in_user(&p32->timestamp.tv_sec, &p64->timestamp.tv_sec) ||
+ assign_in_user(&p32->timestamp.tv_usec, &p64->timestamp.tv_usec) ||
+ copy_in_user(&p32->timecode, &p64->timecode, sizeof(p64->timecode)) ||
+ assign_in_user(&p32->sequence, &p64->sequence) ||
+ assign_in_user(&p32->reserved2, &p64->reserved2) ||
+ assign_in_user(&p32->request_fd, &p64->request_fd) ||
+ get_user(length, &p64->length) ||
+ put_user(length, &p32->length))
+ return -EFAULT;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
+ u32 num_planes = length;
+
+ if (num_planes == 0)
+ return 0;
+ /* We need to define uplane without __user, even though
+ * it does point to data in userspace here. The reason is
+ * that v4l2-ioctl.c copies it from userspace to kernelspace,
+ * so its definition in videodev2.h doesn't have a
+ * __user markup. Defining uplane with __user causes
+ * smatch warnings, so instead declare it without __user
+ * and cast it as a userspace pointer to put_v4l2_plane32().
+ */
+ if (get_user(uplane, &p64->m.planes))
+ return -EFAULT;
+ if (get_user(p, &p32->m.planes))
+ return -EFAULT;
+ uplane32 = compat_ptr(p);
+
+ while (num_planes--) {
+ ret = put_v4l2_plane32((void __user *)uplane,
+ uplane32, memory);
+ if (ret)
+ return ret;
+ ++uplane;
+ ++uplane32;
+ }
+ } else {
+ switch (memory) {
+ case V4L2_MEMORY_MMAP:
+ case V4L2_MEMORY_OVERLAY:
+ if (assign_in_user(&p32->m.offset, &p64->m.offset))
+ return -EFAULT;
+ break;
+ case V4L2_MEMORY_USERPTR:
+ if (assign_in_user(&p32->m.userptr, &p64->m.userptr))
+ return -EFAULT;
+ break;
+ case V4L2_MEMORY_DMABUF:
+ if (assign_in_user(&p32->m.fd, &p64->m.fd))
+ return -EFAULT;
+ break;
+ }
+ }
+
+ return 0;
+}
+
struct v4l2_framebuffer32 {
__u32 capability;
__u32 flags;
@@ -1028,6 +1263,15 @@ static int put_v4l2_ext_controls32(struct file *file,
return 0;
}
+#ifdef CONFIG_X86_64
+/*
+ * x86 is the only compat architecture with different struct alignment
+ * between 32-bit and 64-bit tasks.
+ *
+ * On all other architectures, v4l2_event32 and v4l2_event32_time32 are
+ * the same as v4l2_event and v4l2_event_time32, so we can use the native
+ * handlers, converting v4l2_event to v4l2_event_time32 if necessary.
+ */
struct v4l2_event32 {
__u32 type;
union {
@@ -1036,7 +1280,23 @@ struct v4l2_event32 {
} u;
__u32 pending;
__u32 sequence;
- struct compat_timespec timestamp;
+ struct {
+ compat_s64 tv_sec;
+ compat_s64 tv_nsec;
+ } timestamp;
+ __u32 id;
+ __u32 reserved[8];
+};
+
+struct v4l2_event32_time32 {
+ __u32 type;
+ union {
+ compat_s64 value64;
+ __u8 data[64];
+ } u;
+ __u32 pending;
+ __u32 sequence;
+ struct old_timespec32 timestamp;
__u32 id;
__u32 reserved[8];
};
@@ -1057,6 +1317,23 @@ static int put_v4l2_event32(struct v4l2_event __user *p64,
return 0;
}
+static int put_v4l2_event32_time32(struct v4l2_event_time32 __user *p64,
+ struct v4l2_event32_time32 __user *p32)
+{
+ if (!access_ok(p32, sizeof(*p32)) ||
+ assign_in_user(&p32->type, &p64->type) ||
+ copy_in_user(&p32->u, &p64->u, sizeof(p64->u)) ||
+ assign_in_user(&p32->pending, &p64->pending) ||
+ assign_in_user(&p32->sequence, &p64->sequence) ||
+ assign_in_user(&p32->timestamp.tv_sec, &p64->timestamp.tv_sec) ||
+ assign_in_user(&p32->timestamp.tv_nsec, &p64->timestamp.tv_nsec) ||
+ assign_in_user(&p32->id, &p64->id) ||
+ copy_in_user(p32->reserved, p64->reserved, sizeof(p32->reserved)))
+ return -EFAULT;
+ return 0;
+}
+#endif
+
struct v4l2_edid32 {
__u32 pad;
__u32 start_block;
@@ -1108,10 +1385,13 @@ static int put_v4l2_edid32(struct v4l2_edid __user *p64,
#define VIDIOC_G_FMT32 _IOWR('V', 4, struct v4l2_format32)
#define VIDIOC_S_FMT32 _IOWR('V', 5, struct v4l2_format32)
#define VIDIOC_QUERYBUF32 _IOWR('V', 9, struct v4l2_buffer32)
+#define VIDIOC_QUERYBUF32_TIME32 _IOWR('V', 9, struct v4l2_buffer32_time32)
#define VIDIOC_G_FBUF32 _IOR ('V', 10, struct v4l2_framebuffer32)
#define VIDIOC_S_FBUF32 _IOW ('V', 11, struct v4l2_framebuffer32)
#define VIDIOC_QBUF32 _IOWR('V', 15, struct v4l2_buffer32)
+#define VIDIOC_QBUF32_TIME32 _IOWR('V', 15, struct v4l2_buffer32_time32)
#define VIDIOC_DQBUF32 _IOWR('V', 17, struct v4l2_buffer32)
+#define VIDIOC_DQBUF32_TIME32 _IOWR('V', 17, struct v4l2_buffer32_time32)
#define VIDIOC_ENUMSTD32 _IOWR('V', 25, struct v4l2_standard32)
#define VIDIOC_ENUMINPUT32 _IOWR('V', 26, struct v4l2_input32)
#define VIDIOC_G_EDID32 _IOWR('V', 40, struct v4l2_edid32)
@@ -1121,8 +1401,10 @@ static int put_v4l2_edid32(struct v4l2_edid __user *p64,
#define VIDIOC_S_EXT_CTRLS32 _IOWR('V', 72, struct v4l2_ext_controls32)
#define VIDIOC_TRY_EXT_CTRLS32 _IOWR('V', 73, struct v4l2_ext_controls32)
#define VIDIOC_DQEVENT32 _IOR ('V', 89, struct v4l2_event32)
+#define VIDIOC_DQEVENT32_TIME32 _IOR ('V', 89, struct v4l2_event32_time32)
#define VIDIOC_CREATE_BUFS32 _IOWR('V', 92, struct v4l2_create_buffers32)
#define VIDIOC_PREPARE_BUF32 _IOWR('V', 93, struct v4l2_buffer32)
+#define VIDIOC_PREPARE_BUF32_TIME32 _IOWR('V', 93, struct v4l2_buffer32_time32)
#define VIDIOC_OVERLAY32 _IOW ('V', 14, s32)
#define VIDIOC_STREAMON32 _IOW ('V', 18, s32)
@@ -1183,36 +1465,45 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
u32 aux_space;
int compatible_arg = 1;
long err = 0;
+ unsigned int ncmd;
/*
* 1. When struct size is different, converts the command.
*/
switch (cmd) {
- case VIDIOC_G_FMT32: cmd = VIDIOC_G_FMT; break;
- case VIDIOC_S_FMT32: cmd = VIDIOC_S_FMT; break;
- case VIDIOC_QUERYBUF32: cmd = VIDIOC_QUERYBUF; break;
- case VIDIOC_G_FBUF32: cmd = VIDIOC_G_FBUF; break;
- case VIDIOC_S_FBUF32: cmd = VIDIOC_S_FBUF; break;
- case VIDIOC_QBUF32: cmd = VIDIOC_QBUF; break;
- case VIDIOC_DQBUF32: cmd = VIDIOC_DQBUF; break;
- case VIDIOC_ENUMSTD32: cmd = VIDIOC_ENUMSTD; break;
- case VIDIOC_ENUMINPUT32: cmd = VIDIOC_ENUMINPUT; break;
- case VIDIOC_TRY_FMT32: cmd = VIDIOC_TRY_FMT; break;
- case VIDIOC_G_EXT_CTRLS32: cmd = VIDIOC_G_EXT_CTRLS; break;
- case VIDIOC_S_EXT_CTRLS32: cmd = VIDIOC_S_EXT_CTRLS; break;
- case VIDIOC_TRY_EXT_CTRLS32: cmd = VIDIOC_TRY_EXT_CTRLS; break;
- case VIDIOC_DQEVENT32: cmd = VIDIOC_DQEVENT; break;
- case VIDIOC_OVERLAY32: cmd = VIDIOC_OVERLAY; break;
- case VIDIOC_STREAMON32: cmd = VIDIOC_STREAMON; break;
- case VIDIOC_STREAMOFF32: cmd = VIDIOC_STREAMOFF; break;
- case VIDIOC_G_INPUT32: cmd = VIDIOC_G_INPUT; break;
- case VIDIOC_S_INPUT32: cmd = VIDIOC_S_INPUT; break;
- case VIDIOC_G_OUTPUT32: cmd = VIDIOC_G_OUTPUT; break;
- case VIDIOC_S_OUTPUT32: cmd = VIDIOC_S_OUTPUT; break;
- case VIDIOC_CREATE_BUFS32: cmd = VIDIOC_CREATE_BUFS; break;
- case VIDIOC_PREPARE_BUF32: cmd = VIDIOC_PREPARE_BUF; break;
- case VIDIOC_G_EDID32: cmd = VIDIOC_G_EDID; break;
- case VIDIOC_S_EDID32: cmd = VIDIOC_S_EDID; break;
+ case VIDIOC_G_FMT32: ncmd = VIDIOC_G_FMT; break;
+ case VIDIOC_S_FMT32: ncmd = VIDIOC_S_FMT; break;
+ case VIDIOC_QUERYBUF32: ncmd = VIDIOC_QUERYBUF; break;
+ case VIDIOC_QUERYBUF32_TIME32: ncmd = VIDIOC_QUERYBUF_TIME32; break;
+ case VIDIOC_G_FBUF32: ncmd = VIDIOC_G_FBUF; break;
+ case VIDIOC_S_FBUF32: ncmd = VIDIOC_S_FBUF; break;
+ case VIDIOC_QBUF32: ncmd = VIDIOC_QBUF; break;
+ case VIDIOC_QBUF32_TIME32: ncmd = VIDIOC_QBUF_TIME32; break;
+ case VIDIOC_DQBUF32: ncmd = VIDIOC_DQBUF; break;
+ case VIDIOC_DQBUF32_TIME32: ncmd = VIDIOC_DQBUF_TIME32; break;
+ case VIDIOC_ENUMSTD32: ncmd = VIDIOC_ENUMSTD; break;
+ case VIDIOC_ENUMINPUT32: ncmd = VIDIOC_ENUMINPUT; break;
+ case VIDIOC_TRY_FMT32: ncmd = VIDIOC_TRY_FMT; break;
+ case VIDIOC_G_EXT_CTRLS32: ncmd = VIDIOC_G_EXT_CTRLS; break;
+ case VIDIOC_S_EXT_CTRLS32: ncmd = VIDIOC_S_EXT_CTRLS; break;
+ case VIDIOC_TRY_EXT_CTRLS32: ncmd = VIDIOC_TRY_EXT_CTRLS; break;
+#ifdef CONFIG_X86_64
+ case VIDIOC_DQEVENT32: ncmd = VIDIOC_DQEVENT; break;
+ case VIDIOC_DQEVENT32_TIME32: ncmd = VIDIOC_DQEVENT_TIME32; break;
+#endif
+ case VIDIOC_OVERLAY32: ncmd = VIDIOC_OVERLAY; break;
+ case VIDIOC_STREAMON32: ncmd = VIDIOC_STREAMON; break;
+ case VIDIOC_STREAMOFF32: ncmd = VIDIOC_STREAMOFF; break;
+ case VIDIOC_G_INPUT32: ncmd = VIDIOC_G_INPUT; break;
+ case VIDIOC_S_INPUT32: ncmd = VIDIOC_S_INPUT; break;
+ case VIDIOC_G_OUTPUT32: ncmd = VIDIOC_G_OUTPUT; break;
+ case VIDIOC_S_OUTPUT32: ncmd = VIDIOC_S_OUTPUT; break;
+ case VIDIOC_CREATE_BUFS32: ncmd = VIDIOC_CREATE_BUFS; break;
+ case VIDIOC_PREPARE_BUF32: ncmd = VIDIOC_PREPARE_BUF; break;
+ case VIDIOC_PREPARE_BUF32_TIME32: ncmd = VIDIOC_PREPARE_BUF_TIME32; break;
+ case VIDIOC_G_EDID32: ncmd = VIDIOC_G_EDID; break;
+ case VIDIOC_S_EDID32: ncmd = VIDIOC_S_EDID; break;
+ default: ncmd = cmd; break;
}
/*
@@ -1221,11 +1512,11 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
* argument into it.
*/
switch (cmd) {
- case VIDIOC_OVERLAY:
- case VIDIOC_STREAMON:
- case VIDIOC_STREAMOFF:
- case VIDIOC_S_INPUT:
- case VIDIOC_S_OUTPUT:
+ case VIDIOC_OVERLAY32:
+ case VIDIOC_STREAMON32:
+ case VIDIOC_STREAMOFF32:
+ case VIDIOC_S_INPUT32:
+ case VIDIOC_S_OUTPUT32:
err = alloc_userspace(sizeof(unsigned int), 0, &new_p64);
if (!err && assign_in_user((unsigned int __user *)new_p64,
(compat_uint_t __user *)p32))
@@ -1233,23 +1524,23 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
compatible_arg = 0;
break;
- case VIDIOC_G_INPUT:
- case VIDIOC_G_OUTPUT:
+ case VIDIOC_G_INPUT32:
+ case VIDIOC_G_OUTPUT32:
err = alloc_userspace(sizeof(unsigned int), 0, &new_p64);
compatible_arg = 0;
break;
- case VIDIOC_G_EDID:
- case VIDIOC_S_EDID:
+ case VIDIOC_G_EDID32:
+ case VIDIOC_S_EDID32:
err = alloc_userspace(sizeof(struct v4l2_edid), 0, &new_p64);
if (!err)
err = get_v4l2_edid32(new_p64, p32);
compatible_arg = 0;
break;
- case VIDIOC_G_FMT:
- case VIDIOC_S_FMT:
- case VIDIOC_TRY_FMT:
+ case VIDIOC_G_FMT32:
+ case VIDIOC_S_FMT32:
+ case VIDIOC_TRY_FMT32:
err = bufsize_v4l2_format(p32, &aux_space);
if (!err)
err = alloc_userspace(sizeof(struct v4l2_format),
@@ -1262,7 +1553,7 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
compatible_arg = 0;
break;
- case VIDIOC_CREATE_BUFS:
+ case VIDIOC_CREATE_BUFS32:
err = bufsize_v4l2_create(p32, &aux_space);
if (!err)
err = alloc_userspace(sizeof(struct v4l2_create_buffers),
@@ -1275,10 +1566,10 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
compatible_arg = 0;
break;
- case VIDIOC_PREPARE_BUF:
- case VIDIOC_QUERYBUF:
- case VIDIOC_QBUF:
- case VIDIOC_DQBUF:
+ case VIDIOC_PREPARE_BUF32:
+ case VIDIOC_QUERYBUF32:
+ case VIDIOC_QBUF32:
+ case VIDIOC_DQBUF32:
err = bufsize_v4l2_buffer(p32, &aux_space);
if (!err)
err = alloc_userspace(sizeof(struct v4l2_buffer),
@@ -1291,7 +1582,23 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
compatible_arg = 0;
break;
- case VIDIOC_S_FBUF:
+ case VIDIOC_PREPARE_BUF32_TIME32:
+ case VIDIOC_QUERYBUF32_TIME32:
+ case VIDIOC_QBUF32_TIME32:
+ case VIDIOC_DQBUF32_TIME32:
+ err = bufsize_v4l2_buffer_time32(p32, &aux_space);
+ if (!err)
+ err = alloc_userspace(sizeof(struct v4l2_buffer),
+ aux_space, &new_p64);
+ if (!err) {
+ aux_buf = new_p64 + sizeof(struct v4l2_buffer);
+ err = get_v4l2_buffer32_time32(new_p64, p32,
+ aux_buf, aux_space);
+ }
+ compatible_arg = 0;
+ break;
+
+ case VIDIOC_S_FBUF32:
err = alloc_userspace(sizeof(struct v4l2_framebuffer), 0,
&new_p64);
if (!err)
@@ -1299,13 +1606,13 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
compatible_arg = 0;
break;
- case VIDIOC_G_FBUF:
+ case VIDIOC_G_FBUF32:
err = alloc_userspace(sizeof(struct v4l2_framebuffer), 0,
&new_p64);
compatible_arg = 0;
break;
- case VIDIOC_ENUMSTD:
+ case VIDIOC_ENUMSTD32:
err = alloc_userspace(sizeof(struct v4l2_standard), 0,
&new_p64);
if (!err)
@@ -1313,16 +1620,16 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
compatible_arg = 0;
break;
- case VIDIOC_ENUMINPUT:
+ case VIDIOC_ENUMINPUT32:
err = alloc_userspace(sizeof(struct v4l2_input), 0, &new_p64);
if (!err)
err = get_v4l2_input32(new_p64, p32);
compatible_arg = 0;
break;
- case VIDIOC_G_EXT_CTRLS:
- case VIDIOC_S_EXT_CTRLS:
- case VIDIOC_TRY_EXT_CTRLS:
+ case VIDIOC_G_EXT_CTRLS32:
+ case VIDIOC_S_EXT_CTRLS32:
+ case VIDIOC_TRY_EXT_CTRLS32:
err = bufsize_v4l2_ext_controls(p32, &aux_space);
if (!err)
err = alloc_userspace(sizeof(struct v4l2_ext_controls),
@@ -1334,10 +1641,16 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
}
compatible_arg = 0;
break;
- case VIDIOC_DQEVENT:
+#ifdef CONFIG_X86_64
+ case VIDIOC_DQEVENT32:
err = alloc_userspace(sizeof(struct v4l2_event), 0, &new_p64);
compatible_arg = 0;
break;
+ case VIDIOC_DQEVENT32_TIME32:
+ err = alloc_userspace(sizeof(struct v4l2_event_time32), 0, &new_p64);
+ compatible_arg = 0;
+ break;
+#endif
}
if (err)
return err;
@@ -1352,9 +1665,9 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
* Otherwise, it will pass the newly allocated @new_p64 argument.
*/
if (compatible_arg)
- err = native_ioctl(file, cmd, (unsigned long)p32);
+ err = native_ioctl(file, ncmd, (unsigned long)p32);
else
- err = native_ioctl(file, cmd, (unsigned long)new_p64);
+ err = native_ioctl(file, ncmd, (unsigned long)new_p64);
if (err == -ENOTTY)
return err;
@@ -1370,13 +1683,13 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
* the blocks to maximum allowed value.
*/
switch (cmd) {
- case VIDIOC_G_EXT_CTRLS:
- case VIDIOC_S_EXT_CTRLS:
- case VIDIOC_TRY_EXT_CTRLS:
+ case VIDIOC_G_EXT_CTRLS32:
+ case VIDIOC_S_EXT_CTRLS32:
+ case VIDIOC_TRY_EXT_CTRLS32:
if (put_v4l2_ext_controls32(file, new_p64, p32))
err = -EFAULT;
break;
- case VIDIOC_S_EDID:
+ case VIDIOC_S_EDID32:
if (put_v4l2_edid32(new_p64, p32))
err = -EFAULT;
break;
@@ -1389,49 +1702,62 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
* the original 32 bits structure.
*/
switch (cmd) {
- case VIDIOC_S_INPUT:
- case VIDIOC_S_OUTPUT:
- case VIDIOC_G_INPUT:
- case VIDIOC_G_OUTPUT:
+ case VIDIOC_S_INPUT32:
+ case VIDIOC_S_OUTPUT32:
+ case VIDIOC_G_INPUT32:
+ case VIDIOC_G_OUTPUT32:
if (assign_in_user((compat_uint_t __user *)p32,
((unsigned int __user *)new_p64)))
err = -EFAULT;
break;
- case VIDIOC_G_FBUF:
+ case VIDIOC_G_FBUF32:
err = put_v4l2_framebuffer32(new_p64, p32);
break;
- case VIDIOC_DQEVENT:
+#ifdef CONFIG_X86_64
+ case VIDIOC_DQEVENT32:
err = put_v4l2_event32(new_p64, p32);
break;
- case VIDIOC_G_EDID:
+ case VIDIOC_DQEVENT32_TIME32:
+ err = put_v4l2_event32_time32(new_p64, p32);
+ break;
+#endif
+
+ case VIDIOC_G_EDID32:
err = put_v4l2_edid32(new_p64, p32);
break;
- case VIDIOC_G_FMT:
- case VIDIOC_S_FMT:
- case VIDIOC_TRY_FMT:
+ case VIDIOC_G_FMT32:
+ case VIDIOC_S_FMT32:
+ case VIDIOC_TRY_FMT32:
err = put_v4l2_format32(new_p64, p32);
break;
- case VIDIOC_CREATE_BUFS:
+ case VIDIOC_CREATE_BUFS32:
err = put_v4l2_create32(new_p64, p32);
break;
- case VIDIOC_PREPARE_BUF:
- case VIDIOC_QUERYBUF:
- case VIDIOC_QBUF:
- case VIDIOC_DQBUF:
+ case VIDIOC_PREPARE_BUF32:
+ case VIDIOC_QUERYBUF32:
+ case VIDIOC_QBUF32:
+ case VIDIOC_DQBUF32:
err = put_v4l2_buffer32(new_p64, p32);
break;
- case VIDIOC_ENUMSTD:
+ case VIDIOC_PREPARE_BUF32_TIME32:
+ case VIDIOC_QUERYBUF32_TIME32:
+ case VIDIOC_QBUF32_TIME32:
+ case VIDIOC_DQBUF32_TIME32:
+ err = put_v4l2_buffer32_time32(new_p64, p32);
+ break;
+
+ case VIDIOC_ENUMSTD32:
err = put_v4l2_standard32(new_p64, p32);
break;
- case VIDIOC_ENUMINPUT:
+ case VIDIOC_ENUMINPUT32:
err = put_v4l2_input32(new_p64, p32);
break;
}
diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
index 9d673d113d7a..290c6b213179 100644
--- a/drivers/media/v4l2-core/v4l2-event.c
+++ b/drivers/media/v4l2-core/v4l2-event.c
@@ -27,6 +27,7 @@ static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
{
struct v4l2_kevent *kev;
+ struct timespec64 ts;
unsigned long flags;
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
@@ -44,7 +45,9 @@ static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
kev->event.pending = fh->navailable;
*event = kev->event;
- event->timestamp = ns_to_timespec(kev->ts);
+ ts = ns_to_timespec64(kev->ts);
+ event->timestamp.tv_sec = ts.tv_sec;
+ event->timestamp.tv_nsec = ts.tv_nsec;
kev->sev->first = sev_pos(kev->sev, 1);
kev->sev->in_use--;
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
index 192cac076761..6ece4320e1d2 100644
--- a/drivers/media/v4l2-core/v4l2-fwnode.c
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -422,7 +422,7 @@ static int __v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode,
sizeof(*vep) - offsetof(typeof(*vep), bus));
}
- pr_debug("===== begin V4L2 endpoint properties\n");
+ pr_debug("===== begin parsing endpoint %pfw\n", fwnode);
/*
* Zero the fwnode graph endpoint memory in case we don't end up parsing
@@ -500,7 +500,7 @@ int v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode,
ret = __v4l2_fwnode_endpoint_parse(fwnode, vep);
- pr_debug("===== end V4L2 endpoint properties\n");
+ pr_debug("===== end parsing endpoint %pfw\n", fwnode);
return ret;
}
@@ -551,7 +551,7 @@ int v4l2_fwnode_endpoint_alloc_parse(struct fwnode_handle *fwnode,
vep->link_frequencies[i]);
}
- pr_debug("===== end V4L2 endpoint properties\n");
+ pr_debug("===== end parsing endpoint %pfw\n", fwnode);
return 0;
}
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 003b7422aeef..aaf83e254272 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -474,10 +474,10 @@ static void v4l_print_buffer(const void *arg, bool write_only)
const struct v4l2_plane *plane;
int i;
- pr_cont("%02ld:%02d:%02d.%08ld index=%d, type=%s, request_fd=%d, flags=0x%08x, field=%s, sequence=%d, memory=%s",
- p->timestamp.tv_sec / 3600,
- (int)(p->timestamp.tv_sec / 60) % 60,
- (int)(p->timestamp.tv_sec % 60),
+ pr_cont("%02d:%02d:%02d.%09ld index=%d, type=%s, request_fd=%d, flags=0x%08x, field=%s, sequence=%d, memory=%s",
+ (int)p->timestamp.tv_sec / 3600,
+ ((int)p->timestamp.tv_sec / 60) % 60,
+ ((int)p->timestamp.tv_sec % 60),
(long)p->timestamp.tv_usec,
p->index,
prt_names(p->type, v4l2_type_names), p->request_fd,
@@ -821,7 +821,7 @@ static void v4l_print_event(const void *arg, bool write_only)
const struct v4l2_event *p = arg;
const struct v4l2_event_ctrl *c;
- pr_cont("type=0x%x, pending=%u, sequence=%u, id=%u, timestamp=%lu.%9.9lu\n",
+ pr_cont("type=0x%x, pending=%u, sequence=%u, id=%u, timestamp=%llu.%9.9llu\n",
p->type, p->pending, p->sequence, p->id,
p->timestamp.tv_sec, p->timestamp.tv_nsec);
switch (p->type) {
@@ -961,7 +961,7 @@ static int check_fmt(struct file *file, enum v4l2_buf_type type)
return 0;
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- if (is_vid && is_rx && ops->vidioc_g_fmt_vid_cap_mplane)
+ if ((is_vid || is_tch) && is_rx && ops->vidioc_g_fmt_vid_cap_mplane)
return 0;
break;
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
@@ -3023,8 +3023,162 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
return ret;
}
+static unsigned int video_translate_cmd(unsigned int cmd)
+{
+ switch (cmd) {
+#ifdef CONFIG_COMPAT_32BIT_TIME
+ case VIDIOC_DQEVENT_TIME32:
+ return VIDIOC_DQEVENT;
+ case VIDIOC_QUERYBUF_TIME32:
+ return VIDIOC_QUERYBUF;
+ case VIDIOC_QBUF_TIME32:
+ return VIDIOC_QBUF;
+ case VIDIOC_DQBUF_TIME32:
+ return VIDIOC_DQBUF;
+ case VIDIOC_PREPARE_BUF_TIME32:
+ return VIDIOC_PREPARE_BUF;
+#endif
+ }
+
+ return cmd;
+}
+
+static int video_get_user(void __user *arg, void *parg, unsigned int cmd,
+ bool *always_copy)
+{
+ unsigned int n = _IOC_SIZE(cmd);
+
+ if (!(_IOC_DIR(cmd) & _IOC_WRITE)) {
+ /* read-only ioctl */
+ memset(parg, 0, n);
+ return 0;
+ }
+
+ switch (cmd) {
+#ifdef CONFIG_COMPAT_32BIT_TIME
+ case VIDIOC_QUERYBUF_TIME32:
+ case VIDIOC_QBUF_TIME32:
+ case VIDIOC_DQBUF_TIME32:
+ case VIDIOC_PREPARE_BUF_TIME32: {
+ struct v4l2_buffer_time32 vb32;
+ struct v4l2_buffer *vb = parg;
+
+ if (copy_from_user(&vb32, arg, sizeof(vb32)))
+ return -EFAULT;
+
+ *vb = (struct v4l2_buffer) {
+ .index = vb32.index,
+ .type = vb32.type,
+ .bytesused = vb32.bytesused,
+ .flags = vb32.flags,
+ .field = vb32.field,
+ .timestamp.tv_sec = vb32.timestamp.tv_sec,
+ .timestamp.tv_usec = vb32.timestamp.tv_usec,
+ .timecode = vb32.timecode,
+ .sequence = vb32.sequence,
+ .memory = vb32.memory,
+ .m.userptr = vb32.m.userptr,
+ .length = vb32.length,
+ .request_fd = vb32.request_fd,
+ };
+
+ if (cmd == VIDIOC_QUERYBUF_TIME32)
+ vb->request_fd = 0;
+
+ break;
+ }
+#endif
+ default:
+ /*
+ * In some cases, only a few fields are used as input,
+ * i.e. when the app sets "index" and then the driver
+ * fills in the rest of the structure for the thing
+ * with that index. We only need to copy up the first
+ * non-input field.
+ */
+ if (v4l2_is_known_ioctl(cmd)) {
+ u32 flags = v4l2_ioctls[_IOC_NR(cmd)].flags;
+
+ if (flags & INFO_FL_CLEAR_MASK)
+ n = (flags & INFO_FL_CLEAR_MASK) >> 16;
+ *always_copy = flags & INFO_FL_ALWAYS_COPY;
+ }
+
+ if (copy_from_user(parg, (void __user *)arg, n))
+ return -EFAULT;
+
+ /* zero out anything we don't copy from userspace */
+ if (n < _IOC_SIZE(cmd))
+ memset((u8 *)parg + n, 0, _IOC_SIZE(cmd) - n);
+ break;
+ }
+
+ return 0;
+}
+
+static int video_put_user(void __user *arg, void *parg, unsigned int cmd)
+{
+ if (!(_IOC_DIR(cmd) & _IOC_READ))
+ return 0;
+
+ switch (cmd) {
+#ifdef CONFIG_COMPAT_32BIT_TIME
+ case VIDIOC_DQEVENT_TIME32: {
+ struct v4l2_event *ev = parg;
+ struct v4l2_event_time32 ev32 = {
+ .type = ev->type,
+ .pending = ev->pending,
+ .sequence = ev->sequence,
+ .timestamp.tv_sec = ev->timestamp.tv_sec,
+ .timestamp.tv_nsec = ev->timestamp.tv_nsec,
+ .id = ev->id,
+ };
+
+ memcpy(&ev32.u, &ev->u, sizeof(ev->u));
+ memcpy(&ev32.reserved, &ev->reserved, sizeof(ev->reserved));
+
+ if (copy_to_user(arg, &ev32, sizeof(ev32)))
+ return -EFAULT;
+ break;
+ }
+ case VIDIOC_QUERYBUF_TIME32:
+ case VIDIOC_QBUF_TIME32:
+ case VIDIOC_DQBUF_TIME32:
+ case VIDIOC_PREPARE_BUF_TIME32: {
+ struct v4l2_buffer *vb = parg;
+ struct v4l2_buffer_time32 vb32 = {
+ .index = vb->index,
+ .type = vb->type,
+ .bytesused = vb->bytesused,
+ .flags = vb->flags,
+ .field = vb->field,
+ .timestamp.tv_sec = vb->timestamp.tv_sec,
+ .timestamp.tv_usec = vb->timestamp.tv_usec,
+ .timecode = vb->timecode,
+ .sequence = vb->sequence,
+ .memory = vb->memory,
+ .m.userptr = vb->m.userptr,
+ .length = vb->length,
+ .request_fd = vb->request_fd,
+ };
+
+ if (copy_to_user(arg, &vb32, sizeof(vb32)))
+ return -EFAULT;
+ break;
+ }
+#endif
+ default:
+ /* Copy results into user buffer */
+ if (copy_to_user(arg, parg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+ break;
+ }
+
+ return 0;
+}
+
long
-video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
+video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg,
v4l2_kioctl func)
{
char sbuf[128];
@@ -3036,6 +3190,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
size_t array_size = 0;
void __user *user_ptr = NULL;
void **kernel_ptr = NULL;
+ unsigned int cmd = video_translate_cmd(orig_cmd);
const size_t ioc_size = _IOC_SIZE(cmd);
/* Copy arguments into temp kernel buffer */
@@ -3050,35 +3205,10 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
parg = mbuf;
}
- err = -EFAULT;
- if (_IOC_DIR(cmd) & _IOC_WRITE) {
- unsigned int n = ioc_size;
-
- /*
- * In some cases, only a few fields are used as input,
- * i.e. when the app sets "index" and then the driver
- * fills in the rest of the structure for the thing
- * with that index. We only need to copy up the first
- * non-input field.
- */
- if (v4l2_is_known_ioctl(cmd)) {
- u32 flags = v4l2_ioctls[_IOC_NR(cmd)].flags;
-
- if (flags & INFO_FL_CLEAR_MASK)
- n = (flags & INFO_FL_CLEAR_MASK) >> 16;
- always_copy = flags & INFO_FL_ALWAYS_COPY;
- }
-
- if (copy_from_user(parg, (void __user *)arg, n))
- goto out;
-
- /* zero out anything we don't copy from userspace */
- if (n < ioc_size)
- memset((u8 *)parg + n, 0, ioc_size - n);
- } else {
- /* read-only ioctl */
- memset(parg, 0, ioc_size);
- }
+ err = video_get_user((void __user *)arg, parg, orig_cmd,
+ &always_copy);
+ if (err)
+ goto out;
}
err = check_array_args(cmd, parg, &array_size, &user_ptr, &kernel_ptr);
@@ -3131,15 +3261,8 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
goto out;
out_array_args:
- /* Copy results into user buffer */
- switch (_IOC_DIR(cmd)) {
- case _IOC_READ:
- case (_IOC_WRITE | _IOC_READ):
- if (copy_to_user((void __user *)arg, parg, ioc_size))
- err = -EFAULT;
- break;
- }
-
+ if (video_put_user((void __user *)arg, parg, orig_cmd))
+ err = -EFAULT;
out:
kvfree(mbuf);
return err;
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 9e987c0f840e..a376b351135f 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -331,8 +331,8 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
struct v4l2_fh *vfh = file->private_data;
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
- int rval;
#endif
+ int rval;
switch (cmd) {
case VIDIOC_QUERYCTRL:
@@ -392,6 +392,30 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK);
+ case VIDIOC_DQEVENT_TIME32: {
+ struct v4l2_event_time32 *ev32 = arg;
+ struct v4l2_event ev = { };
+
+ if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
+ return -ENOIOCTLCMD;
+
+ rval = v4l2_event_dequeue(vfh, &ev, file->f_flags & O_NONBLOCK);
+
+ *ev32 = (struct v4l2_event_time32) {
+ .type = ev.type,
+ .pending = ev.pending,
+ .sequence = ev.sequence,
+ .timestamp.tv_sec = ev.timestamp.tv_sec,
+ .timestamp.tv_nsec = ev.timestamp.tv_nsec,
+ .id = ev.id,
+ };
+
+ memcpy(&ev32->u, &ev.u, sizeof(ev.u));
+ memcpy(&ev32->reserved, &ev.reserved, sizeof(ev.reserved));
+
+ return rval;
+ }
+
case VIDIOC_SUBSCRIBE_EVENT:
return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
index 939fc11cf080..2686f03b322e 100644
--- a/drivers/media/v4l2-core/videobuf-core.c
+++ b/drivers/media/v4l2-core/videobuf-core.c
@@ -19,6 +19,7 @@
#include <linux/interrupt.h>
#include <media/videobuf-core.h>
+#include <media/v4l2-common.h>
#define MAGIC_BUFFER 0x20070728
#define MAGIC_CHECK(is, should) \
@@ -364,7 +365,7 @@ static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
}
b->field = vb->field;
- b->timestamp = ns_to_timeval(vb->ts);
+ v4l2_buffer_set_timestamp(b, vb->ts);
b->bytesused = vb->size;
b->sequence = vb->field_count >> 1;
}
@@ -578,7 +579,7 @@ int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
|| q->type == V4L2_BUF_TYPE_SDR_OUTPUT) {
buf->size = b->bytesused;
buf->field = b->field;
- buf->ts = v4l2_timeval_to_ns(&b->timestamp);
+ buf->ts = v4l2_buffer_get_timestamp(b);
}
break;
case V4L2_MEMORY_USERPTR:
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index 66a6c6c236a7..13b65ed9e74c 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -183,12 +183,12 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
data, size, dma->nr_pages);
- err = get_user_pages(data & PAGE_MASK, dma->nr_pages,
+ err = pin_user_pages(data & PAGE_MASK, dma->nr_pages,
flags | FOLL_LONGTERM, dma->pages, NULL);
if (err != dma->nr_pages) {
dma->nr_pages = (err >= 0) ? err : 0;
- dprintk(1, "get_user_pages: err=%d [%d]\n", err,
+ dprintk(1, "pin_user_pages: err=%d [%d]\n", err,
dma->nr_pages);
return err < 0 ? err : -EINVAL;
}
@@ -349,8 +349,8 @@ int videobuf_dma_free(struct videobuf_dmabuf *dma)
BUG_ON(dma->sglen);
if (dma->pages) {
- for (i = 0; i < dma->nr_pages; i++)
- put_page(dma->pages[i]);
+ unpin_user_pages_dirty_lock(dma->pages, dma->nr_pages,
+ dma->direction == DMA_FROM_DEVICE);
kfree(dma->pages);
dma->pages = NULL;
}
diff --git a/drivers/memory/mvebu-devbus.c b/drivers/memory/mvebu-devbus.c
index 095f8a3b2cfc..886aea587276 100644
--- a/drivers/memory/mvebu-devbus.c
+++ b/drivers/memory/mvebu-devbus.c
@@ -267,7 +267,6 @@ static int mvebu_devbus_probe(struct platform_device *pdev)
struct devbus_read_params r;
struct devbus_write_params w;
struct devbus *devbus;
- struct resource *res;
struct clk *clk;
unsigned long rate;
int err;
@@ -277,8 +276,7 @@ static int mvebu_devbus_probe(struct platform_device *pdev)
return -ENOMEM;
devbus->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- devbus->base = devm_ioremap_resource(&pdev->dev, res);
+ devbus->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(devbus->base))
return PTR_ERR(devbus->base);
diff --git a/drivers/memory/samsung/Kconfig b/drivers/memory/samsung/Kconfig
index e9c3ce92350c..20a8406ce786 100644
--- a/drivers/memory/samsung/Kconfig
+++ b/drivers/memory/samsung/Kconfig
@@ -8,7 +8,7 @@ config SAMSUNG_MC
if SAMSUNG_MC
config EXYNOS5422_DMC
- tristate "EXYNOS5422 Dynamic Memory Controller driver"
+ tristate "Exynos5422 Dynamic Memory Controller driver"
depends on ARCH_EXYNOS || (COMPILE_TEST && HAS_IOMEM)
select DDR
depends on DEVFREQ_GOV_SIMPLE_ONDEMAND
diff --git a/drivers/memory/samsung/exynos-srom.c b/drivers/memory/samsung/exynos-srom.c
index c27c6105c66d..6510d7bab217 100644
--- a/drivers/memory/samsung/exynos-srom.c
+++ b/drivers/memory/samsung/exynos-srom.c
@@ -3,7 +3,7 @@
// Copyright (c) 2015 Samsung Electronics Co., Ltd.
// http://www.samsung.com/
//
-// EXYNOS - SROM Controller support
+// Exynos - SROM Controller support
// Author: Pankaj Dubey <pankaj.dubey@samsung.com>
#include <linux/io.h>
diff --git a/drivers/memory/samsung/exynos5422-dmc.c b/drivers/memory/samsung/exynos5422-dmc.c
index 47dbf6d1789f..81a1b1d01683 100644
--- a/drivers/memory/samsung/exynos5422-dmc.c
+++ b/drivers/memory/samsung/exynos5422-dmc.c
@@ -1374,7 +1374,6 @@ static int exynos5_dmc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct exynos5_dmc *dmc;
- struct resource *res;
int irq[2];
dmc = devm_kzalloc(dev, sizeof(*dmc), GFP_KERNEL);
@@ -1386,13 +1385,11 @@ static int exynos5_dmc_probe(struct platform_device *pdev)
dmc->dev = dev;
platform_set_drvdata(pdev, dmc);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dmc->base_drexi0 = devm_ioremap_resource(dev, res);
+ dmc->base_drexi0 = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dmc->base_drexi0))
return PTR_ERR(dmc->base_drexi0);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- dmc->base_drexi1 = devm_ioremap_resource(dev, res);
+ dmc->base_drexi1 = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(dmc->base_drexi1))
return PTR_ERR(dmc->base_drexi1);
diff --git a/drivers/memory/tegra/Makefile b/drivers/memory/tegra/Makefile
index 3d23c4261104..529d10bc5650 100644
--- a/drivers/memory/tegra/Makefile
+++ b/drivers/memory/tegra/Makefile
@@ -13,4 +13,5 @@ obj-$(CONFIG_TEGRA_MC) += tegra-mc.o
obj-$(CONFIG_TEGRA20_EMC) += tegra20-emc.o
obj-$(CONFIG_TEGRA30_EMC) += tegra30-emc.o
obj-$(CONFIG_TEGRA124_EMC) += tegra124-emc.o
-obj-$(CONFIG_ARCH_TEGRA_186_SOC) += tegra186.o
+obj-$(CONFIG_ARCH_TEGRA_186_SOC) += tegra186.o tegra186-emc.o
+obj-$(CONFIG_ARCH_TEGRA_194_SOC) += tegra186.o tegra186-emc.o
diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c
index 464f0ceaee63..21f05240682b 100644
--- a/drivers/memory/tegra/tegra124-emc.c
+++ b/drivers/memory/tegra/tegra124-emc.c
@@ -467,12 +467,20 @@ struct tegra_emc {
void __iomem *regs;
+ struct clk *clk;
+
enum emc_dram_type dram_type;
unsigned int dram_num;
struct emc_timing last_timing;
struct emc_timing *timings;
unsigned int num_timings;
+
+ struct {
+ struct dentry *root;
+ unsigned long min_rate;
+ unsigned long max_rate;
+ } debugfs;
};
/* Timing change sequence functions */
@@ -998,38 +1006,51 @@ tegra_emc_find_node_by_ram_code(struct device_node *node, u32 ram_code)
return NULL;
}
-/* Debugfs entry */
+/*
+ * debugfs interface
+ *
+ * The memory controller driver exposes some files in debugfs that can be used
+ * to control the EMC frequency. The top-level directory can be found here:
+ *
+ * /sys/kernel/debug/emc
+ *
+ * It contains the following files:
+ *
+ * - available_rates: This file contains a list of valid, space-separated
+ * EMC frequencies.
+ *
+ * - min_rate: Writing a value to this file sets the given frequency as the
+ * floor of the permitted range. If this is higher than the currently
+ * configured EMC frequency, this will cause the frequency to be
+ * increased so that it stays within the valid range.
+ *
+ * - max_rate: Similarily to the min_rate file, writing a value to this file
+ * sets the given frequency as the ceiling of the permitted range. If
+ * the value is lower than the currently configured EMC frequency, this
+ * will cause the frequency to be decreased so that it stays within the
+ * valid range.
+ */
-static int emc_debug_rate_get(void *data, u64 *rate)
+static bool tegra_emc_validate_rate(struct tegra_emc *emc, unsigned long rate)
{
- struct clk *c = data;
-
- *rate = clk_get_rate(c);
-
- return 0;
-}
+ unsigned int i;
-static int emc_debug_rate_set(void *data, u64 rate)
-{
- struct clk *c = data;
+ for (i = 0; i < emc->num_timings; i++)
+ if (rate == emc->timings[i].rate)
+ return true;
- return clk_set_rate(c, rate);
+ return false;
}
-DEFINE_SIMPLE_ATTRIBUTE(emc_debug_rate_fops, emc_debug_rate_get,
- emc_debug_rate_set, "%lld\n");
-
-static int emc_debug_supported_rates_show(struct seq_file *s, void *data)
+static int tegra_emc_debug_available_rates_show(struct seq_file *s,
+ void *data)
{
struct tegra_emc *emc = s->private;
const char *prefix = "";
unsigned int i;
for (i = 0; i < emc->num_timings; i++) {
- struct emc_timing *timing = &emc->timings[i];
-
- seq_printf(s, "%s%lu", prefix, timing->rate);
-
+ seq_printf(s, "%s%lu", prefix, emc->timings[i].rate);
prefix = " ";
}
@@ -1038,46 +1059,126 @@ static int emc_debug_supported_rates_show(struct seq_file *s, void *data)
return 0;
}
-static int emc_debug_supported_rates_open(struct inode *inode,
- struct file *file)
+static int tegra_emc_debug_available_rates_open(struct inode *inode,
+ struct file *file)
{
- return single_open(file, emc_debug_supported_rates_show,
+ return single_open(file, tegra_emc_debug_available_rates_show,
inode->i_private);
}
-static const struct file_operations emc_debug_supported_rates_fops = {
- .open = emc_debug_supported_rates_open,
+static const struct file_operations tegra_emc_debug_available_rates_fops = {
+ .open = tegra_emc_debug_available_rates_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
+static int tegra_emc_debug_min_rate_get(void *data, u64 *rate)
+{
+ struct tegra_emc *emc = data;
+
+ *rate = emc->debugfs.min_rate;
+
+ return 0;
+}
+
+static int tegra_emc_debug_min_rate_set(void *data, u64 rate)
+{
+ struct tegra_emc *emc = data;
+ int err;
+
+ if (!tegra_emc_validate_rate(emc, rate))
+ return -EINVAL;
+
+ err = clk_set_min_rate(emc->clk, rate);
+ if (err < 0)
+ return err;
+
+ emc->debugfs.min_rate = rate;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_min_rate_fops,
+ tegra_emc_debug_min_rate_get,
+ tegra_emc_debug_min_rate_set, "%llu\n");
+
+static int tegra_emc_debug_max_rate_get(void *data, u64 *rate)
+{
+ struct tegra_emc *emc = data;
+
+ *rate = emc->debugfs.max_rate;
+
+ return 0;
+}
+
+static int tegra_emc_debug_max_rate_set(void *data, u64 rate)
+{
+ struct tegra_emc *emc = data;
+ int err;
+
+ if (!tegra_emc_validate_rate(emc, rate))
+ return -EINVAL;
+
+ err = clk_set_max_rate(emc->clk, rate);
+ if (err < 0)
+ return err;
+
+ emc->debugfs.max_rate = rate;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_max_rate_fops,
+ tegra_emc_debug_max_rate_get,
+ tegra_emc_debug_max_rate_set, "%llu\n");
+
static void emc_debugfs_init(struct device *dev, struct tegra_emc *emc)
{
- struct dentry *root, *file;
- struct clk *clk;
+ unsigned int i;
+ int err;
- root = debugfs_create_dir("emc", NULL);
- if (!root) {
- dev_err(dev, "failed to create debugfs directory\n");
- return;
+ emc->clk = devm_clk_get(dev, "emc");
+ if (IS_ERR(emc->clk)) {
+ if (PTR_ERR(emc->clk) != -ENODEV) {
+ dev_err(dev, "failed to get EMC clock: %ld\n",
+ PTR_ERR(emc->clk));
+ return;
+ }
}
- clk = clk_get_sys("tegra-clk-debug", "emc");
- if (IS_ERR(clk)) {
- dev_err(dev, "failed to get debug clock: %ld\n", PTR_ERR(clk));
+ emc->debugfs.min_rate = ULONG_MAX;
+ emc->debugfs.max_rate = 0;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate < emc->debugfs.min_rate)
+ emc->debugfs.min_rate = emc->timings[i].rate;
+
+ if (emc->timings[i].rate > emc->debugfs.max_rate)
+ emc->debugfs.max_rate = emc->timings[i].rate;
+ }
+
+ err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate,
+ emc->debugfs.max_rate);
+ if (err < 0) {
+ dev_err(dev, "failed to set rate range [%lu-%lu] for %pC\n",
+ emc->debugfs.min_rate, emc->debugfs.max_rate,
+ emc->clk);
return;
}
- file = debugfs_create_file("rate", S_IRUGO | S_IWUSR, root, clk,
- &emc_debug_rate_fops);
- if (!file)
- dev_err(dev, "failed to create debugfs entry\n");
+ emc->debugfs.root = debugfs_create_dir("emc", NULL);
+ if (!emc->debugfs.root) {
+ dev_err(dev, "failed to create debugfs directory\n");
+ return;
+ }
- file = debugfs_create_file("supported_rates", S_IRUGO, root, emc,
- &emc_debug_supported_rates_fops);
- if (!file)
- dev_err(dev, "failed to create debugfs entry\n");
+ debugfs_create_file("available_rates", S_IRUGO, emc->debugfs.root, emc,
+ &tegra_emc_debug_available_rates_fops);
+ debugfs_create_file("min_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+ emc, &tegra_emc_debug_min_rate_fops);
+ debugfs_create_file("max_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+ emc, &tegra_emc_debug_max_rate_fops);
}
static int tegra_emc_probe(struct platform_device *pdev)
diff --git a/drivers/memory/tegra/tegra186-emc.c b/drivers/memory/tegra/tegra186-emc.c
new file mode 100644
index 000000000000..97f26bc77ad4
--- /dev/null
+++ b/drivers/memory/tegra/tegra186-emc.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2019 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+
+#include <soc/tegra/bpmp.h>
+
+struct tegra186_emc_dvfs {
+ unsigned long latency;
+ unsigned long rate;
+};
+
+struct tegra186_emc {
+ struct tegra_bpmp *bpmp;
+ struct device *dev;
+ struct clk *clk;
+
+ struct tegra186_emc_dvfs *dvfs;
+ unsigned int num_dvfs;
+
+ struct {
+ struct dentry *root;
+ unsigned long min_rate;
+ unsigned long max_rate;
+ } debugfs;
+};
+
+/*
+ * debugfs interface
+ *
+ * The memory controller driver exposes some files in debugfs that can be used
+ * to control the EMC frequency. The top-level directory can be found here:
+ *
+ * /sys/kernel/debug/emc
+ *
+ * It contains the following files:
+ *
+ * - available_rates: This file contains a list of valid, space-separated
+ * EMC frequencies.
+ *
+ * - min_rate: Writing a value to this file sets the given frequency as the
+ * floor of the permitted range. If this is higher than the currently
+ * configured EMC frequency, this will cause the frequency to be
+ * increased so that it stays within the valid range.
+ *
+ * - max_rate: Similarily to the min_rate file, writing a value to this file
+ * sets the given frequency as the ceiling of the permitted range. If
+ * the value is lower than the currently configured EMC frequency, this
+ * will cause the frequency to be decreased so that it stays within the
+ * valid range.
+ */
+
+static bool tegra186_emc_validate_rate(struct tegra186_emc *emc,
+ unsigned long rate)
+{
+ unsigned int i;
+
+ for (i = 0; i < emc->num_dvfs; i++)
+ if (rate == emc->dvfs[i].rate)
+ return true;
+
+ return false;
+}
+
+static int tegra186_emc_debug_available_rates_show(struct seq_file *s,
+ void *data)
+{
+ struct tegra186_emc *emc = s->private;
+ const char *prefix = "";
+ unsigned int i;
+
+ for (i = 0; i < emc->num_dvfs; i++) {
+ seq_printf(s, "%s%lu", prefix, emc->dvfs[i].rate);
+ prefix = " ";
+ }
+
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static int tegra186_emc_debug_available_rates_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, tegra186_emc_debug_available_rates_show,
+ inode->i_private);
+}
+
+static const struct file_operations tegra186_emc_debug_available_rates_fops = {
+ .open = tegra186_emc_debug_available_rates_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int tegra186_emc_debug_min_rate_get(void *data, u64 *rate)
+{
+ struct tegra186_emc *emc = data;
+
+ *rate = emc->debugfs.min_rate;
+
+ return 0;
+}
+
+static int tegra186_emc_debug_min_rate_set(void *data, u64 rate)
+{
+ struct tegra186_emc *emc = data;
+ int err;
+
+ if (!tegra186_emc_validate_rate(emc, rate))
+ return -EINVAL;
+
+ err = clk_set_min_rate(emc->clk, rate);
+ if (err < 0)
+ return err;
+
+ emc->debugfs.min_rate = rate;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(tegra186_emc_debug_min_rate_fops,
+ tegra186_emc_debug_min_rate_get,
+ tegra186_emc_debug_min_rate_set, "%llu\n");
+
+static int tegra186_emc_debug_max_rate_get(void *data, u64 *rate)
+{
+ struct tegra186_emc *emc = data;
+
+ *rate = emc->debugfs.max_rate;
+
+ return 0;
+}
+
+static int tegra186_emc_debug_max_rate_set(void *data, u64 rate)
+{
+ struct tegra186_emc *emc = data;
+ int err;
+
+ if (!tegra186_emc_validate_rate(emc, rate))
+ return -EINVAL;
+
+ err = clk_set_max_rate(emc->clk, rate);
+ if (err < 0)
+ return err;
+
+ emc->debugfs.max_rate = rate;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(tegra186_emc_debug_max_rate_fops,
+ tegra186_emc_debug_max_rate_get,
+ tegra186_emc_debug_max_rate_set, "%llu\n");
+
+static int tegra186_emc_probe(struct platform_device *pdev)
+{
+ struct mrq_emc_dvfs_latency_response response;
+ struct tegra_bpmp_message msg;
+ struct tegra186_emc *emc;
+ unsigned int i;
+ int err;
+
+ emc = devm_kzalloc(&pdev->dev, sizeof(*emc), GFP_KERNEL);
+ if (!emc)
+ return -ENOMEM;
+
+ emc->bpmp = tegra_bpmp_get(&pdev->dev);
+ if (IS_ERR(emc->bpmp)) {
+ err = PTR_ERR(emc->bpmp);
+
+ if (err != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get BPMP: %d\n", err);
+
+ return err;
+ }
+
+ emc->clk = devm_clk_get(&pdev->dev, "emc");
+ if (IS_ERR(emc->clk)) {
+ err = PTR_ERR(emc->clk);
+ dev_err(&pdev->dev, "failed to get EMC clock: %d\n", err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, emc);
+ emc->dev = &pdev->dev;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_EMC_DVFS_LATENCY;
+ msg.tx.data = NULL;
+ msg.tx.size = 0;
+ msg.rx.data = &response;
+ msg.rx.size = sizeof(response);
+
+ err = tegra_bpmp_transfer(emc->bpmp, &msg);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to EMC DVFS pairs: %d\n", err);
+ return err;
+ }
+
+ emc->debugfs.min_rate = ULONG_MAX;
+ emc->debugfs.max_rate = 0;
+
+ emc->num_dvfs = response.num_pairs;
+
+ emc->dvfs = devm_kmalloc_array(&pdev->dev, emc->num_dvfs,
+ sizeof(*emc->dvfs), GFP_KERNEL);
+ if (!emc->dvfs)
+ return -ENOMEM;
+
+ dev_dbg(&pdev->dev, "%u DVFS pairs:\n", emc->num_dvfs);
+
+ for (i = 0; i < emc->num_dvfs; i++) {
+ emc->dvfs[i].rate = response.pairs[i].freq * 1000;
+ emc->dvfs[i].latency = response.pairs[i].latency;
+
+ if (emc->dvfs[i].rate < emc->debugfs.min_rate)
+ emc->debugfs.min_rate = emc->dvfs[i].rate;
+
+ if (emc->dvfs[i].rate > emc->debugfs.max_rate)
+ emc->debugfs.max_rate = emc->dvfs[i].rate;
+
+ dev_dbg(&pdev->dev, " %2u: %lu Hz -> %lu us\n", i,
+ emc->dvfs[i].rate, emc->dvfs[i].latency);
+ }
+
+ err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate,
+ emc->debugfs.max_rate);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "failed to set rate range [%lu-%lu] for %pC\n",
+ emc->debugfs.min_rate, emc->debugfs.max_rate,
+ emc->clk);
+ return err;
+ }
+
+ emc->debugfs.root = debugfs_create_dir("emc", NULL);
+ if (!emc->debugfs.root) {
+ dev_err(&pdev->dev, "failed to create debugfs directory\n");
+ return 0;
+ }
+
+ debugfs_create_file("available_rates", S_IRUGO, emc->debugfs.root,
+ emc, &tegra186_emc_debug_available_rates_fops);
+ debugfs_create_file("min_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+ emc, &tegra186_emc_debug_min_rate_fops);
+ debugfs_create_file("max_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+ emc, &tegra186_emc_debug_max_rate_fops);
+
+ return 0;
+}
+
+static int tegra186_emc_remove(struct platform_device *pdev)
+{
+ struct tegra186_emc *emc = platform_get_drvdata(pdev);
+
+ debugfs_remove_recursive(emc->debugfs.root);
+ tegra_bpmp_put(emc->bpmp);
+
+ return 0;
+}
+
+static const struct of_device_id tegra186_emc_of_match[] = {
+#if defined(CONFIG_ARCH_TEGRA186_SOC)
+ { .compatible = "nvidia,tegra186-emc" },
+#endif
+#if defined(CONFIG_ARCH_TEGRA194_SOC)
+ { .compatible = "nvidia,tegra194-emc" },
+#endif
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tegra186_emc_of_match);
+
+static struct platform_driver tegra186_emc_driver = {
+ .driver = {
+ .name = "tegra186-emc",
+ .of_match_table = tegra186_emc_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = tegra186_emc_probe,
+ .remove = tegra186_emc_remove,
+};
+module_platform_driver(tegra186_emc_driver);
+
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra186 External Memory Controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/tegra/tegra186.c b/drivers/memory/tegra/tegra186.c
index 441213a35930..5d53f11ca7b6 100644
--- a/drivers/memory/tegra/tegra186.c
+++ b/drivers/memory/tegra/tegra186.c
@@ -6,16 +6,18 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
+#if defined(CONFIG_ARCH_TEGRA_186_SOC)
#include <dt-bindings/memory/tegra186-mc.h>
+#endif
-struct tegra_mc {
- struct device *dev;
- void __iomem *regs;
-};
+#if defined(CONFIG_ARCH_TEGRA_194_SOC)
+#include <dt-bindings/memory/tegra194-mc.h>
+#endif
-struct tegra_mc_client {
+struct tegra186_mc_client {
const char *name;
unsigned int sid;
struct {
@@ -24,7 +26,46 @@ struct tegra_mc_client {
} regs;
};
-static const struct tegra_mc_client tegra186_mc_clients[] = {
+struct tegra186_mc_soc {
+ const struct tegra186_mc_client *clients;
+ unsigned int num_clients;
+};
+
+struct tegra186_mc {
+ struct device *dev;
+ void __iomem *regs;
+
+ const struct tegra186_mc_soc *soc;
+};
+
+static void tegra186_mc_program_sid(struct tegra186_mc *mc)
+{
+ unsigned int i;
+
+ for (i = 0; i < mc->soc->num_clients; i++) {
+ const struct tegra186_mc_client *client = &mc->soc->clients[i];
+ u32 override, security;
+
+ override = readl(mc->regs + client->regs.override);
+ security = readl(mc->regs + client->regs.security);
+
+ dev_dbg(mc->dev, "client %s: override: %x security: %x\n",
+ client->name, override, security);
+
+ dev_dbg(mc->dev, "setting SID %u for %s\n", client->sid,
+ client->name);
+ writel(client->sid, mc->regs + client->regs.override);
+
+ override = readl(mc->regs + client->regs.override);
+ security = readl(mc->regs + client->regs.security);
+
+ dev_dbg(mc->dev, "client %s: override: %x security: %x\n",
+ client->name, override, security);
+ }
+}
+
+#if defined(CONFIG_ARCH_TEGRA_186_SOC)
+static const struct tegra186_mc_client tegra186_mc_clients[] = {
{
.name = "ptcr",
.sid = TEGRA186_SID_PASSTHROUGH,
@@ -532,17 +573,966 @@ static const struct tegra_mc_client tegra186_mc_clients[] = {
},
};
+static const struct tegra186_mc_soc tegra186_mc_soc = {
+ .num_clients = ARRAY_SIZE(tegra186_mc_clients),
+ .clients = tegra186_mc_clients,
+};
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_194_SOC)
+static const struct tegra186_mc_client tegra194_mc_clients[] = {
+ {
+ .name = "ptcr",
+ .sid = TEGRA194_SID_PASSTHROUGH,
+ .regs = {
+ .override = 0x000,
+ .security = 0x004,
+ },
+ }, {
+ .name = "miu7r",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .override = 0x008,
+ .security = 0x00c,
+ },
+ }, {
+ .name = "miu7w",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .override = 0x010,
+ .security = 0x014,
+ },
+ }, {
+ .name = "hdar",
+ .sid = TEGRA194_SID_HDA,
+ .regs = {
+ .override = 0x0a8,
+ .security = 0x0ac,
+ },
+ }, {
+ .name = "host1xdmar",
+ .sid = TEGRA194_SID_HOST1X,
+ .regs = {
+ .override = 0x0b0,
+ .security = 0x0b4,
+ },
+ }, {
+ .name = "nvencsrd",
+ .sid = TEGRA194_SID_NVENC,
+ .regs = {
+ .override = 0x0e0,
+ .security = 0x0e4,
+ },
+ }, {
+ .name = "satar",
+ .sid = TEGRA194_SID_SATA,
+ .regs = {
+ .override = 0x0f8,
+ .security = 0x0fc,
+ },
+ }, {
+ .name = "mpcorer",
+ .sid = TEGRA194_SID_PASSTHROUGH,
+ .regs = {
+ .override = 0x138,
+ .security = 0x13c,
+ },
+ }, {
+ .name = "nvencswr",
+ .sid = TEGRA194_SID_NVENC,
+ .regs = {
+ .override = 0x158,
+ .security = 0x15c,
+ },
+ }, {
+ .name = "hdaw",
+ .sid = TEGRA194_SID_HDA,
+ .regs = {
+ .override = 0x1a8,
+ .security = 0x1ac,
+ },
+ }, {
+ .name = "mpcorew",
+ .sid = TEGRA194_SID_PASSTHROUGH,
+ .regs = {
+ .override = 0x1c8,
+ .security = 0x1cc,
+ },
+ }, {
+ .name = "sataw",
+ .sid = TEGRA194_SID_SATA,
+ .regs = {
+ .override = 0x1e8,
+ .security = 0x1ec,
+ },
+ }, {
+ .name = "ispra",
+ .sid = TEGRA194_SID_ISP,
+ .regs = {
+ .override = 0x220,
+ .security = 0x224,
+ },
+ }, {
+ .name = "ispfalr",
+ .sid = TEGRA194_SID_ISP_FALCON,
+ .regs = {
+ .override = 0x228,
+ .security = 0x22c,
+ },
+ }, {
+ .name = "ispwa",
+ .sid = TEGRA194_SID_ISP,
+ .regs = {
+ .override = 0x230,
+ .security = 0x234,
+ },
+ }, {
+ .name = "ispwb",
+ .sid = TEGRA194_SID_ISP,
+ .regs = {
+ .override = 0x238,
+ .security = 0x23c,
+ },
+ }, {
+ .name = "xusb_hostr",
+ .sid = TEGRA194_SID_XUSB_HOST,
+ .regs = {
+ .override = 0x250,
+ .security = 0x254,
+ },
+ }, {
+ .name = "xusb_hostw",
+ .sid = TEGRA194_SID_XUSB_HOST,
+ .regs = {
+ .override = 0x258,
+ .security = 0x25c,
+ },
+ }, {
+ .name = "xusb_devr",
+ .sid = TEGRA194_SID_XUSB_DEV,
+ .regs = {
+ .override = 0x260,
+ .security = 0x264,
+ },
+ }, {
+ .name = "xusb_devw",
+ .sid = TEGRA194_SID_XUSB_DEV,
+ .regs = {
+ .override = 0x268,
+ .security = 0x26c,
+ },
+ }, {
+ .name = "sdmmcra",
+ .sid = TEGRA194_SID_SDMMC1,
+ .regs = {
+ .override = 0x300,
+ .security = 0x304,
+ },
+ }, {
+ .name = "sdmmcr",
+ .sid = TEGRA194_SID_SDMMC3,
+ .regs = {
+ .override = 0x310,
+ .security = 0x314,
+ },
+ }, {
+ .name = "sdmmcrab",
+ .sid = TEGRA194_SID_SDMMC4,
+ .regs = {
+ .override = 0x318,
+ .security = 0x31c,
+ },
+ }, {
+ .name = "sdmmcwa",
+ .sid = TEGRA194_SID_SDMMC1,
+ .regs = {
+ .override = 0x320,
+ .security = 0x324,
+ },
+ }, {
+ .name = "sdmmcw",
+ .sid = TEGRA194_SID_SDMMC3,
+ .regs = {
+ .override = 0x330,
+ .security = 0x334,
+ },
+ }, {
+ .name = "sdmmcwab",
+ .sid = TEGRA194_SID_SDMMC4,
+ .regs = {
+ .override = 0x338,
+ .security = 0x33c,
+ },
+ }, {
+ .name = "vicsrd",
+ .sid = TEGRA194_SID_VIC,
+ .regs = {
+ .override = 0x360,
+ .security = 0x364,
+ },
+ }, {
+ .name = "vicswr",
+ .sid = TEGRA194_SID_VIC,
+ .regs = {
+ .override = 0x368,
+ .security = 0x36c,
+ },
+ }, {
+ .name = "viw",
+ .sid = TEGRA194_SID_VI,
+ .regs = {
+ .override = 0x390,
+ .security = 0x394,
+ },
+ }, {
+ .name = "nvdecsrd",
+ .sid = TEGRA194_SID_NVDEC,
+ .regs = {
+ .override = 0x3c0,
+ .security = 0x3c4,
+ },
+ }, {
+ .name = "nvdecswr",
+ .sid = TEGRA194_SID_NVDEC,
+ .regs = {
+ .override = 0x3c8,
+ .security = 0x3cc,
+ },
+ }, {
+ .name = "aper",
+ .sid = TEGRA194_SID_APE,
+ .regs = {
+ .override = 0x3c0,
+ .security = 0x3c4,
+ },
+ }, {
+ .name = "apew",
+ .sid = TEGRA194_SID_APE,
+ .regs = {
+ .override = 0x3d0,
+ .security = 0x3d4,
+ },
+ }, {
+ .name = "nvjpgsrd",
+ .sid = TEGRA194_SID_NVJPG,
+ .regs = {
+ .override = 0x3f0,
+ .security = 0x3f4,
+ },
+ }, {
+ .name = "nvjpgswr",
+ .sid = TEGRA194_SID_NVJPG,
+ .regs = {
+ .override = 0x3f0,
+ .security = 0x3f4,
+ },
+ }, {
+ .name = "axiapr",
+ .sid = TEGRA194_SID_PASSTHROUGH,
+ .regs = {
+ .override = 0x410,
+ .security = 0x414,
+ },
+ }, {
+ .name = "axiapw",
+ .sid = TEGRA194_SID_PASSTHROUGH,
+ .regs = {
+ .override = 0x418,
+ .security = 0x41c,
+ },
+ }, {
+ .name = "etrr",
+ .sid = TEGRA194_SID_ETR,
+ .regs = {
+ .override = 0x420,
+ .security = 0x424,
+ },
+ }, {
+ .name = "etrw",
+ .sid = TEGRA194_SID_ETR,
+ .regs = {
+ .override = 0x428,
+ .security = 0x42c,
+ },
+ }, {
+ .name = "axisr",
+ .sid = TEGRA194_SID_PASSTHROUGH,
+ .regs = {
+ .override = 0x460,
+ .security = 0x464,
+ },
+ }, {
+ .name = "axisw",
+ .sid = TEGRA194_SID_PASSTHROUGH,
+ .regs = {
+ .override = 0x468,
+ .security = 0x46c,
+ },
+ }, {
+ .name = "eqosr",
+ .sid = TEGRA194_SID_EQOS,
+ .regs = {
+ .override = 0x470,
+ .security = 0x474,
+ },
+ }, {
+ .name = "eqosw",
+ .sid = TEGRA194_SID_EQOS,
+ .regs = {
+ .override = 0x478,
+ .security = 0x47c,
+ },
+ }, {
+ .name = "ufshcr",
+ .sid = TEGRA194_SID_UFSHC,
+ .regs = {
+ .override = 0x480,
+ .security = 0x484,
+ },
+ }, {
+ .name = "ufshcw",
+ .sid = TEGRA194_SID_UFSHC,
+ .regs = {
+ .override = 0x488,
+ .security = 0x48c,
+ },
+ }, {
+ .name = "nvdisplayr",
+ .sid = TEGRA194_SID_NVDISPLAY,
+ .regs = {
+ .override = 0x490,
+ .security = 0x494,
+ },
+ }, {
+ .name = "bpmpr",
+ .sid = TEGRA194_SID_BPMP,
+ .regs = {
+ .override = 0x498,
+ .security = 0x49c,
+ },
+ }, {
+ .name = "bpmpw",
+ .sid = TEGRA194_SID_BPMP,
+ .regs = {
+ .override = 0x4a0,
+ .security = 0x4a4,
+ },
+ }, {
+ .name = "bpmpdmar",
+ .sid = TEGRA194_SID_BPMP,
+ .regs = {
+ .override = 0x4a8,
+ .security = 0x4ac,
+ },
+ }, {
+ .name = "bpmpdmaw",
+ .sid = TEGRA194_SID_BPMP,
+ .regs = {
+ .override = 0x4b0,
+ .security = 0x4b4,
+ },
+ }, {
+ .name = "aonr",
+ .sid = TEGRA194_SID_AON,
+ .regs = {
+ .override = 0x4b8,
+ .security = 0x4bc,
+ },
+ }, {
+ .name = "aonw",
+ .sid = TEGRA194_SID_AON,
+ .regs = {
+ .override = 0x4c0,
+ .security = 0x4c4,
+ },
+ }, {
+ .name = "aondmar",
+ .sid = TEGRA194_SID_AON,
+ .regs = {
+ .override = 0x4c8,
+ .security = 0x4cc,
+ },
+ }, {
+ .name = "aondmaw",
+ .sid = TEGRA194_SID_AON,
+ .regs = {
+ .override = 0x4d0,
+ .security = 0x4d4,
+ },
+ }, {
+ .name = "scer",
+ .sid = TEGRA194_SID_SCE,
+ .regs = {
+ .override = 0x4d8,
+ .security = 0x4dc,
+ },
+ }, {
+ .name = "scew",
+ .sid = TEGRA194_SID_SCE,
+ .regs = {
+ .override = 0x4e0,
+ .security = 0x4e4,
+ },
+ }, {
+ .name = "scedmar",
+ .sid = TEGRA194_SID_SCE,
+ .regs = {
+ .override = 0x4e8,
+ .security = 0x4ec,
+ },
+ }, {
+ .name = "scedmaw",
+ .sid = TEGRA194_SID_SCE,
+ .regs = {
+ .override = 0x4f0,
+ .security = 0x4f4,
+ },
+ }, {
+ .name = "apedmar",
+ .sid = TEGRA194_SID_APE,
+ .regs = {
+ .override = 0x4f8,
+ .security = 0x4fc,
+ },
+ }, {
+ .name = "apedmaw",
+ .sid = TEGRA194_SID_APE,
+ .regs = {
+ .override = 0x500,
+ .security = 0x504,
+ },
+ }, {
+ .name = "nvdisplayr1",
+ .sid = TEGRA194_SID_NVDISPLAY,
+ .regs = {
+ .override = 0x508,
+ .security = 0x50c,
+ },
+ }, {
+ .name = "vicsrd1",
+ .sid = TEGRA194_SID_VIC,
+ .regs = {
+ .override = 0x510,
+ .security = 0x514,
+ },
+ }, {
+ .name = "nvdecsrd1",
+ .sid = TEGRA194_SID_NVDEC,
+ .regs = {
+ .override = 0x518,
+ .security = 0x51c,
+ },
+ }, {
+ .name = "miu0r",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .override = 0x530,
+ .security = 0x534,
+ },
+ }, {
+ .name = "miu0w",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .override = 0x538,
+ .security = 0x53c,
+ },
+ }, {
+ .name = "miu1r",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .override = 0x540,
+ .security = 0x544,
+ },
+ }, {
+ .name = "miu1w",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .override = 0x548,
+ .security = 0x54c,
+ },
+ }, {
+ .name = "miu2r",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .override = 0x570,
+ .security = 0x574,
+ },
+ }, {
+ .name = "miu2w",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .override = 0x578,
+ .security = 0x57c,
+ },
+ }, {
+ .name = "miu3r",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .override = 0x580,
+ .security = 0x584,
+ },
+ }, {
+ .name = "miu3w",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .override = 0x588,
+ .security = 0x58c,
+ },
+ }, {
+ .name = "miu4r",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .override = 0x590,
+ .security = 0x594,
+ },
+ }, {
+ .name = "miu4w",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .override = 0x598,
+ .security = 0x59c,
+ },
+ }, {
+ .name = "dpmur",
+ .sid = TEGRA194_SID_PASSTHROUGH,
+ .regs = {
+ .override = 0x598,
+ .security = 0x59c,
+ },
+ }, {
+ .name = "vifalr",
+ .sid = TEGRA194_SID_VI_FALCON,
+ .regs = {
+ .override = 0x5e0,
+ .security = 0x5e4,
+ },
+ }, {
+ .name = "vifalw",
+ .sid = TEGRA194_SID_VI_FALCON,
+ .regs = {
+ .override = 0x5e8,
+ .security = 0x5ec,
+ },
+ }, {
+ .name = "dla0rda",
+ .sid = TEGRA194_SID_NVDLA0,
+ .regs = {
+ .override = 0x5f0,
+ .security = 0x5f4,
+ },
+ }, {
+ .name = "dla0falrdb",
+ .sid = TEGRA194_SID_NVDLA0,
+ .regs = {
+ .override = 0x5f8,
+ .security = 0x5fc,
+ },
+ }, {
+ .name = "dla0wra",
+ .sid = TEGRA194_SID_NVDLA0,
+ .regs = {
+ .override = 0x600,
+ .security = 0x604,
+ },
+ }, {
+ .name = "dla0falwrb",
+ .sid = TEGRA194_SID_NVDLA0,
+ .regs = {
+ .override = 0x608,
+ .security = 0x60c,
+ },
+ }, {
+ .name = "dla1rda",
+ .sid = TEGRA194_SID_NVDLA1,
+ .regs = {
+ .override = 0x610,
+ .security = 0x614,
+ },
+ }, {
+ .name = "dla1falrdb",
+ .sid = TEGRA194_SID_NVDLA1,
+ .regs = {
+ .override = 0x618,
+ .security = 0x61c,
+ },
+ }, {
+ .name = "dla1wra",
+ .sid = TEGRA194_SID_NVDLA1,
+ .regs = {
+ .override = 0x620,
+ .security = 0x624,
+ },
+ }, {
+ .name = "dla1falwrb",
+ .sid = TEGRA194_SID_NVDLA1,
+ .regs = {
+ .override = 0x628,
+ .security = 0x62c,
+ },
+ }, {
+ .name = "pva0rda",
+ .sid = TEGRA194_SID_PVA0,
+ .regs = {
+ .override = 0x630,
+ .security = 0x634,
+ },
+ }, {
+ .name = "pva0rdb",
+ .sid = TEGRA194_SID_PVA0,
+ .regs = {
+ .override = 0x638,
+ .security = 0x63c,
+ },
+ }, {
+ .name = "pva0rdc",
+ .sid = TEGRA194_SID_PVA0,
+ .regs = {
+ .override = 0x640,
+ .security = 0x644,
+ },
+ }, {
+ .name = "pva0wra",
+ .sid = TEGRA194_SID_PVA0,
+ .regs = {
+ .override = 0x648,
+ .security = 0x64c,
+ },
+ }, {
+ .name = "pva0wrb",
+ .sid = TEGRA194_SID_PVA0,
+ .regs = {
+ .override = 0x650,
+ .security = 0x654,
+ },
+ }, {
+ .name = "pva0wrc",
+ .sid = TEGRA194_SID_PVA0,
+ .regs = {
+ .override = 0x658,
+ .security = 0x65c,
+ },
+ }, {
+ .name = "pva1rda",
+ .sid = TEGRA194_SID_PVA1,
+ .regs = {
+ .override = 0x660,
+ .security = 0x664,
+ },
+ }, {
+ .name = "pva1rdb",
+ .sid = TEGRA194_SID_PVA1,
+ .regs = {
+ .override = 0x668,
+ .security = 0x66c,
+ },
+ }, {
+ .name = "pva1rdc",
+ .sid = TEGRA194_SID_PVA1,
+ .regs = {
+ .override = 0x670,
+ .security = 0x674,
+ },
+ }, {
+ .name = "pva1wra",
+ .sid = TEGRA194_SID_PVA1,
+ .regs = {
+ .override = 0x678,
+ .security = 0x67c,
+ },
+ }, {
+ .name = "pva1wrb",
+ .sid = TEGRA194_SID_PVA1,
+ .regs = {
+ .override = 0x680,
+ .security = 0x684,
+ },
+ }, {
+ .name = "pva1wrc",
+ .sid = TEGRA194_SID_PVA1,
+ .regs = {
+ .override = 0x688,
+ .security = 0x68c,
+ },
+ }, {
+ .name = "rcer",
+ .sid = TEGRA194_SID_RCE,
+ .regs = {
+ .override = 0x690,
+ .security = 0x694,
+ },
+ }, {
+ .name = "rcew",
+ .sid = TEGRA194_SID_RCE,
+ .regs = {
+ .override = 0x698,
+ .security = 0x69c,
+ },
+ }, {
+ .name = "rcedmar",
+ .sid = TEGRA194_SID_RCE,
+ .regs = {
+ .override = 0x6a0,
+ .security = 0x6a4,
+ },
+ }, {
+ .name = "rcedmaw",
+ .sid = TEGRA194_SID_RCE,
+ .regs = {
+ .override = 0x6a8,
+ .security = 0x6ac,
+ },
+ }, {
+ .name = "nvenc1srd",
+ .sid = TEGRA194_SID_NVENC1,
+ .regs = {
+ .override = 0x6b0,
+ .security = 0x6b4,
+ },
+ }, {
+ .name = "nvenc1swr",
+ .sid = TEGRA194_SID_NVENC1,
+ .regs = {
+ .override = 0x6b8,
+ .security = 0x6bc,
+ },
+ }, {
+ .name = "pcie0r",
+ .sid = TEGRA194_SID_PCIE0,
+ .regs = {
+ .override = 0x6c0,
+ .security = 0x6c4,
+ },
+ }, {
+ .name = "pcie0w",
+ .sid = TEGRA194_SID_PCIE0,
+ .regs = {
+ .override = 0x6c8,
+ .security = 0x6cc,
+ },
+ }, {
+ .name = "pcie1r",
+ .sid = TEGRA194_SID_PCIE1,
+ .regs = {
+ .override = 0x6d0,
+ .security = 0x6d4,
+ },
+ }, {
+ .name = "pcie1w",
+ .sid = TEGRA194_SID_PCIE1,
+ .regs = {
+ .override = 0x6d8,
+ .security = 0x6dc,
+ },
+ }, {
+ .name = "pcie2ar",
+ .sid = TEGRA194_SID_PCIE2,
+ .regs = {
+ .override = 0x6e0,
+ .security = 0x6e4,
+ },
+ }, {
+ .name = "pcie2aw",
+ .sid = TEGRA194_SID_PCIE2,
+ .regs = {
+ .override = 0x6e8,
+ .security = 0x6ec,
+ },
+ }, {
+ .name = "pcie3r",
+ .sid = TEGRA194_SID_PCIE3,
+ .regs = {
+ .override = 0x6f0,
+ .security = 0x6f4,
+ },
+ }, {
+ .name = "pcie3w",
+ .sid = TEGRA194_SID_PCIE3,
+ .regs = {
+ .override = 0x6f8,
+ .security = 0x6fc,
+ },
+ }, {
+ .name = "pcie4r",
+ .sid = TEGRA194_SID_PCIE4,
+ .regs = {
+ .override = 0x700,
+ .security = 0x704,
+ },
+ }, {
+ .name = "pcie4w",
+ .sid = TEGRA194_SID_PCIE4,
+ .regs = {
+ .override = 0x708,
+ .security = 0x70c,
+ },
+ }, {
+ .name = "pcie5r",
+ .sid = TEGRA194_SID_PCIE5,
+ .regs = {
+ .override = 0x710,
+ .security = 0x714,
+ },
+ }, {
+ .name = "pcie5w",
+ .sid = TEGRA194_SID_PCIE5,
+ .regs = {
+ .override = 0x718,
+ .security = 0x71c,
+ },
+ }, {
+ .name = "ispfalw",
+ .sid = TEGRA194_SID_ISP_FALCON,
+ .regs = {
+ .override = 0x720,
+ .security = 0x724,
+ },
+ }, {
+ .name = "dla0rda1",
+ .sid = TEGRA194_SID_NVDLA0,
+ .regs = {
+ .override = 0x748,
+ .security = 0x74c,
+ },
+ }, {
+ .name = "dla1rda1",
+ .sid = TEGRA194_SID_NVDLA1,
+ .regs = {
+ .override = 0x750,
+ .security = 0x754,
+ },
+ }, {
+ .name = "pva0rda1",
+ .sid = TEGRA194_SID_PVA0,
+ .regs = {
+ .override = 0x758,
+ .security = 0x75c,
+ },
+ }, {
+ .name = "pva0rdb1",
+ .sid = TEGRA194_SID_PVA0,
+ .regs = {
+ .override = 0x760,
+ .security = 0x764,
+ },
+ }, {
+ .name = "pva1rda1",
+ .sid = TEGRA194_SID_PVA1,
+ .regs = {
+ .override = 0x768,
+ .security = 0x76c,
+ },
+ }, {
+ .name = "pva1rdb1",
+ .sid = TEGRA194_SID_PVA1,
+ .regs = {
+ .override = 0x770,
+ .security = 0x774,
+ },
+ }, {
+ .name = "pcie5r1",
+ .sid = TEGRA194_SID_PCIE5,
+ .regs = {
+ .override = 0x778,
+ .security = 0x77c,
+ },
+ }, {
+ .name = "nvencsrd1",
+ .sid = TEGRA194_SID_NVENC,
+ .regs = {
+ .override = 0x780,
+ .security = 0x784,
+ },
+ }, {
+ .name = "nvenc1srd1",
+ .sid = TEGRA194_SID_NVENC1,
+ .regs = {
+ .override = 0x788,
+ .security = 0x78c,
+ },
+ }, {
+ .name = "ispra1",
+ .sid = TEGRA194_SID_ISP,
+ .regs = {
+ .override = 0x790,
+ .security = 0x794,
+ },
+ }, {
+ .name = "pcie0r1",
+ .sid = TEGRA194_SID_PCIE0,
+ .regs = {
+ .override = 0x798,
+ .security = 0x79c,
+ },
+ }, {
+ .name = "nvdec1srd",
+ .sid = TEGRA194_SID_NVDEC1,
+ .regs = {
+ .override = 0x7c8,
+ .security = 0x7cc,
+ },
+ }, {
+ .name = "nvdec1srd1",
+ .sid = TEGRA194_SID_NVDEC1,
+ .regs = {
+ .override = 0x7d0,
+ .security = 0x7d4,
+ },
+ }, {
+ .name = "nvdec1swr",
+ .sid = TEGRA194_SID_NVDEC1,
+ .regs = {
+ .override = 0x7d8,
+ .security = 0x7dc,
+ },
+ }, {
+ .name = "miu5r",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .override = 0x7e0,
+ .security = 0x7e4,
+ },
+ }, {
+ .name = "miu5w",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .override = 0x7e8,
+ .security = 0x7ec,
+ },
+ }, {
+ .name = "miu6r",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .override = 0x7f0,
+ .security = 0x7f4,
+ },
+ }, {
+ .name = "miu6w",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .override = 0x7f8,
+ .security = 0x7fc,
+ },
+ },
+};
+
+static const struct tegra186_mc_soc tegra194_mc_soc = {
+ .num_clients = ARRAY_SIZE(tegra194_mc_clients),
+ .clients = tegra194_mc_clients,
+};
+#endif
+
static int tegra186_mc_probe(struct platform_device *pdev)
{
+ struct tegra186_mc *mc;
struct resource *res;
- struct tegra_mc *mc;
- unsigned int i;
- int err = 0;
+ int err;
mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
if (!mc)
return -ENOMEM;
+ mc->soc = of_device_get_match_data(&pdev->dev);
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mc->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(mc->regs))
@@ -550,46 +1540,63 @@ static int tegra186_mc_probe(struct platform_device *pdev)
mc->dev = &pdev->dev;
- for (i = 0; i < ARRAY_SIZE(tegra186_mc_clients); i++) {
- const struct tegra_mc_client *client = &tegra186_mc_clients[i];
- u32 override, security;
-
- override = readl(mc->regs + client->regs.override);
- security = readl(mc->regs + client->regs.security);
+ err = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ if (err < 0)
+ return err;
- dev_dbg(&pdev->dev, "client %s: override: %x security: %x\n",
- client->name, override, security);
-
- dev_dbg(&pdev->dev, "setting SID %u for %s\n", client->sid,
- client->name);
- writel(client->sid, mc->regs + client->regs.override);
+ platform_set_drvdata(pdev, mc);
+ tegra186_mc_program_sid(mc);
- override = readl(mc->regs + client->regs.override);
- security = readl(mc->regs + client->regs.security);
+ return 0;
+}
- dev_dbg(&pdev->dev, "client %s: override: %x security: %x\n",
- client->name, override, security);
- }
+static int tegra186_mc_remove(struct platform_device *pdev)
+{
+ struct tegra186_mc *mc = platform_get_drvdata(pdev);
- platform_set_drvdata(pdev, mc);
+ of_platform_depopulate(mc->dev);
- return err;
+ return 0;
}
static const struct of_device_id tegra186_mc_of_match[] = {
- { .compatible = "nvidia,tegra186-mc", },
+#if defined(CONFIG_ARCH_TEGRA_186_SOC)
+ { .compatible = "nvidia,tegra186-mc", .data = &tegra186_mc_soc },
+#endif
+#if defined(CONFIG_ARCH_TEGRA_194_SOC)
+ { .compatible = "nvidia,tegra194-mc", .data = &tegra194_mc_soc },
+#endif
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, tegra186_mc_of_match);
+static int tegra186_mc_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int tegra186_mc_resume(struct device *dev)
+{
+ struct tegra186_mc *mc = dev_get_drvdata(dev);
+
+ tegra186_mc_program_sid(mc);
+
+ return 0;
+}
+
+static const struct dev_pm_ops tegra186_mc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tegra186_mc_suspend, tegra186_mc_resume)
+};
+
static struct platform_driver tegra186_mc_driver = {
.driver = {
.name = "tegra186-mc",
.of_match_table = tegra186_mc_of_match,
+ .pm = &tegra186_mc_pm_ops,
.suppress_bind_attrs = true,
},
- .prevent_deferred_probe = true,
.probe = tegra186_mc_probe,
+ .remove = tegra186_mc_remove,
};
module_platform_driver(tegra186_mc_driver);
diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
index 1b23b1c34476..8ae474d9bfb9 100644
--- a/drivers/memory/tegra/tegra20-emc.c
+++ b/drivers/memory/tegra/tegra20-emc.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/clk/tegra.h>
#include <linux/completion.h>
+#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -150,6 +151,12 @@ struct tegra_emc {
struct emc_timing *timings;
unsigned int num_timings;
+
+ struct {
+ struct dentry *root;
+ unsigned long min_rate;
+ unsigned long max_rate;
+ } debugfs;
};
static irqreturn_t tegra_emc_isr(int irq, void *data)
@@ -478,6 +485,171 @@ static long emc_round_rate(unsigned long rate,
return timing->rate;
}
+/*
+ * debugfs interface
+ *
+ * The memory controller driver exposes some files in debugfs that can be used
+ * to control the EMC frequency. The top-level directory can be found here:
+ *
+ * /sys/kernel/debug/emc
+ *
+ * It contains the following files:
+ *
+ * - available_rates: This file contains a list of valid, space-separated
+ * EMC frequencies.
+ *
+ * - min_rate: Writing a value to this file sets the given frequency as the
+ * floor of the permitted range. If this is higher than the currently
+ * configured EMC frequency, this will cause the frequency to be
+ * increased so that it stays within the valid range.
+ *
+ * - max_rate: Similarily to the min_rate file, writing a value to this file
+ * sets the given frequency as the ceiling of the permitted range. If
+ * the value is lower than the currently configured EMC frequency, this
+ * will cause the frequency to be decreased so that it stays within the
+ * valid range.
+ */
+
+static bool tegra_emc_validate_rate(struct tegra_emc *emc, unsigned long rate)
+{
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++)
+ if (rate == emc->timings[i].rate)
+ return true;
+
+ return false;
+}
+
+static int tegra_emc_debug_available_rates_show(struct seq_file *s, void *data)
+{
+ struct tegra_emc *emc = s->private;
+ const char *prefix = "";
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ seq_printf(s, "%s%lu", prefix, emc->timings[i].rate);
+ prefix = " ";
+ }
+
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static int tegra_emc_debug_available_rates_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, tegra_emc_debug_available_rates_show,
+ inode->i_private);
+}
+
+static const struct file_operations tegra_emc_debug_available_rates_fops = {
+ .open = tegra_emc_debug_available_rates_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int tegra_emc_debug_min_rate_get(void *data, u64 *rate)
+{
+ struct tegra_emc *emc = data;
+
+ *rate = emc->debugfs.min_rate;
+
+ return 0;
+}
+
+static int tegra_emc_debug_min_rate_set(void *data, u64 rate)
+{
+ struct tegra_emc *emc = data;
+ int err;
+
+ if (!tegra_emc_validate_rate(emc, rate))
+ return -EINVAL;
+
+ err = clk_set_min_rate(emc->clk, rate);
+ if (err < 0)
+ return err;
+
+ emc->debugfs.min_rate = rate;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_min_rate_fops,
+ tegra_emc_debug_min_rate_get,
+ tegra_emc_debug_min_rate_set, "%llu\n");
+
+static int tegra_emc_debug_max_rate_get(void *data, u64 *rate)
+{
+ struct tegra_emc *emc = data;
+
+ *rate = emc->debugfs.max_rate;
+
+ return 0;
+}
+
+static int tegra_emc_debug_max_rate_set(void *data, u64 rate)
+{
+ struct tegra_emc *emc = data;
+ int err;
+
+ if (!tegra_emc_validate_rate(emc, rate))
+ return -EINVAL;
+
+ err = clk_set_max_rate(emc->clk, rate);
+ if (err < 0)
+ return err;
+
+ emc->debugfs.max_rate = rate;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_max_rate_fops,
+ tegra_emc_debug_max_rate_get,
+ tegra_emc_debug_max_rate_set, "%llu\n");
+
+static void tegra_emc_debugfs_init(struct tegra_emc *emc)
+{
+ struct device *dev = emc->dev;
+ unsigned int i;
+ int err;
+
+ emc->debugfs.min_rate = ULONG_MAX;
+ emc->debugfs.max_rate = 0;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate < emc->debugfs.min_rate)
+ emc->debugfs.min_rate = emc->timings[i].rate;
+
+ if (emc->timings[i].rate > emc->debugfs.max_rate)
+ emc->debugfs.max_rate = emc->timings[i].rate;
+ }
+
+ err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate,
+ emc->debugfs.max_rate);
+ if (err < 0) {
+ dev_err(dev, "failed to set rate range [%lu-%lu] for %pC\n",
+ emc->debugfs.min_rate, emc->debugfs.max_rate,
+ emc->clk);
+ }
+
+ emc->debugfs.root = debugfs_create_dir("emc", NULL);
+ if (!emc->debugfs.root) {
+ dev_err(emc->dev, "failed to create debugfs directory\n");
+ return;
+ }
+
+ debugfs_create_file("available_rates", S_IRUGO, emc->debugfs.root,
+ emc, &tegra_emc_debug_available_rates_fops);
+ debugfs_create_file("min_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+ emc, &tegra_emc_debug_min_rate_fops);
+ debugfs_create_file("max_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+ emc, &tegra_emc_debug_max_rate_fops);
+}
+
static int tegra_emc_probe(struct platform_device *pdev)
{
struct device_node *np;
@@ -550,6 +722,9 @@ static int tegra_emc_probe(struct platform_device *pdev)
goto unset_cb;
}
+ platform_set_drvdata(pdev, emc);
+ tegra_emc_debugfs_init(emc);
+
return 0;
unset_cb:
diff --git a/drivers/memory/tegra/tegra210.c b/drivers/memory/tegra/tegra210.c
index b420268173fc..cc0482434c75 100644
--- a/drivers/memory/tegra/tegra210.c
+++ b/drivers/memory/tegra/tegra210.c
@@ -436,7 +436,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x37c,
.shift = 0,
.mask = 0xff,
- .def = 0x39,
+ .def = 0x7a,
},
}, {
.id = 0x4b,
diff --git a/drivers/memory/tegra/tegra30-emc.c b/drivers/memory/tegra/tegra30-emc.c
index 0b6a5e451ea3..e3efd9529506 100644
--- a/drivers/memory/tegra/tegra30-emc.c
+++ b/drivers/memory/tegra/tegra30-emc.c
@@ -12,6 +12,7 @@
#include <linux/clk.h>
#include <linux/clk/tegra.h>
#include <linux/completion.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -331,7 +332,9 @@ struct tegra_emc {
struct clk *clk;
void __iomem *regs;
unsigned int irq;
+ bool bad_state;
+ struct emc_timing *new_timing;
struct emc_timing *timings;
unsigned int num_timings;
@@ -345,10 +348,74 @@ struct tegra_emc {
bool vref_cal_toggle : 1;
bool zcal_long : 1;
bool dll_on : 1;
- bool prepared : 1;
- bool bad_state : 1;
+
+ struct {
+ struct dentry *root;
+ unsigned long min_rate;
+ unsigned long max_rate;
+ } debugfs;
};
+static int emc_seq_update_timing(struct tegra_emc *emc)
+{
+ u32 val;
+ int err;
+
+ writel_relaxed(EMC_TIMING_UPDATE, emc->regs + EMC_TIMING_CONTROL);
+
+ err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_STATUS, val,
+ !(val & EMC_STATUS_TIMING_UPDATE_STALLED),
+ 1, 200);
+ if (err) {
+ dev_err(emc->dev, "failed to update timing: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void emc_complete_clk_change(struct tegra_emc *emc)
+{
+ struct emc_timing *timing = emc->new_timing;
+ unsigned int dram_num;
+ bool failed = false;
+ int err;
+
+ /* re-enable auto-refresh */
+ dram_num = tegra_mc_get_emem_device_count(emc->mc);
+ writel_relaxed(EMC_REFCTRL_ENABLE_ALL(dram_num),
+ emc->regs + EMC_REFCTRL);
+
+ /* restore auto-calibration */
+ if (emc->vref_cal_toggle)
+ writel_relaxed(timing->emc_auto_cal_interval,
+ emc->regs + EMC_AUTO_CAL_INTERVAL);
+
+ /* restore dynamic self-refresh */
+ if (timing->emc_cfg_dyn_self_ref) {
+ emc->emc_cfg |= EMC_CFG_DYN_SREF_ENABLE;
+ writel_relaxed(emc->emc_cfg, emc->regs + EMC_CFG);
+ }
+
+ /* set number of clocks to wait after each ZQ command */
+ if (emc->zcal_long)
+ writel_relaxed(timing->emc_zcal_cnt_long,
+ emc->regs + EMC_ZCAL_WAIT_CNT);
+
+ /* wait for writes to settle */
+ udelay(2);
+
+ /* update restored timing */
+ err = emc_seq_update_timing(emc);
+ if (err)
+ failed = true;
+
+ /* restore early ACK */
+ mc_writel(emc->mc, emc->mc_override, MC_EMEM_ARB_OVERRIDE);
+
+ WRITE_ONCE(emc->bad_state, failed);
+}
+
static irqreturn_t tegra_emc_isr(int irq, void *data)
{
struct tegra_emc *emc = data;
@@ -359,10 +426,6 @@ static irqreturn_t tegra_emc_isr(int irq, void *data)
if (!status)
return IRQ_NONE;
- /* notify about EMC-CAR handshake completion */
- if (status & EMC_CLKCHANGE_COMPLETE_INT)
- complete(&emc->clk_handshake_complete);
-
/* notify about HW problem */
if (status & EMC_REFRESH_OVERFLOW_INT)
dev_err_ratelimited(emc->dev,
@@ -371,6 +434,18 @@ static irqreturn_t tegra_emc_isr(int irq, void *data)
/* clear interrupts */
writel_relaxed(status, emc->regs + EMC_INTSTATUS);
+ /* notify about EMC-CAR handshake completion */
+ if (status & EMC_CLKCHANGE_COMPLETE_INT) {
+ if (completion_done(&emc->clk_handshake_complete)) {
+ dev_err_ratelimited(emc->dev,
+ "bogus handshake interrupt\n");
+ return IRQ_NONE;
+ }
+
+ emc_complete_clk_change(emc);
+ complete(&emc->clk_handshake_complete);
+ }
+
return IRQ_HANDLED;
}
@@ -438,24 +513,6 @@ static bool emc_dqs_preset(struct tegra_emc *emc, struct emc_timing *timing,
return preset;
}
-static int emc_seq_update_timing(struct tegra_emc *emc)
-{
- u32 val;
- int err;
-
- writel_relaxed(EMC_TIMING_UPDATE, emc->regs + EMC_TIMING_CONTROL);
-
- err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_STATUS, val,
- !(val & EMC_STATUS_TIMING_UPDATE_STALLED),
- 1, 200);
- if (err) {
- dev_err(emc->dev, "failed to update timing: %d\n", err);
- return err;
- }
-
- return 0;
-}
-
static int emc_prepare_mc_clk_cfg(struct tegra_emc *emc, unsigned long rate)
{
struct tegra_mc *mc = emc->mc;
@@ -582,8 +639,7 @@ static int emc_prepare_timing_change(struct tegra_emc *emc, unsigned long rate)
!(val & EMC_AUTO_CAL_STATUS_ACTIVE), 1, 300);
if (err) {
dev_err(emc->dev,
- "failed to disable auto-cal: %d\n",
- err);
+ "auto-cal finish timeout: %d\n", err);
return err;
}
@@ -621,9 +677,6 @@ static int emc_prepare_timing_change(struct tegra_emc *emc, unsigned long rate)
writel_relaxed(val, emc->regs + EMC_MRS_WAIT_CNT);
}
- /* disable interrupt since read access is prohibited after stalling */
- disable_irq(emc->irq);
-
/* this read also completes the writes */
val = readl_relaxed(emc->regs + EMC_SEL_DPD_CTRL);
@@ -739,20 +792,18 @@ static int emc_prepare_timing_change(struct tegra_emc *emc, unsigned long rate)
emc->regs + EMC_ZQ_CAL);
}
- /* re-enable auto-refresh */
- writel_relaxed(EMC_REFCTRL_ENABLE_ALL(dram_num),
- emc->regs + EMC_REFCTRL);
-
/* flow control marker 3 */
writel_relaxed(0x1, emc->regs + EMC_UNSTALL_RW_AFTER_CLKCHANGE);
- reinit_completion(&emc->clk_handshake_complete);
+ /*
+ * Read and discard an arbitrary MC register (Note: EMC registers
+ * can't be used) to ensure the register writes are completed.
+ */
+ mc_readl(emc->mc, MC_EMEM_ARB_OVERRIDE);
- /* interrupt can be re-enabled now */
- enable_irq(emc->irq);
+ reinit_completion(&emc->clk_handshake_complete);
- emc->bad_state = false;
- emc->prepared = true;
+ emc->new_timing = timing;
return 0;
}
@@ -760,52 +811,25 @@ static int emc_prepare_timing_change(struct tegra_emc *emc, unsigned long rate)
static int emc_complete_timing_change(struct tegra_emc *emc,
unsigned long rate)
{
- struct emc_timing *timing = emc_find_timing(emc, rate);
unsigned long timeout;
- int ret;
timeout = wait_for_completion_timeout(&emc->clk_handshake_complete,
msecs_to_jiffies(100));
if (timeout == 0) {
dev_err(emc->dev, "emc-car handshake failed\n");
- emc->bad_state = true;
return -EIO;
}
- /* restore auto-calibration */
- if (emc->vref_cal_toggle)
- writel_relaxed(timing->emc_auto_cal_interval,
- emc->regs + EMC_AUTO_CAL_INTERVAL);
-
- /* restore dynamic self-refresh */
- if (timing->emc_cfg_dyn_self_ref) {
- emc->emc_cfg |= EMC_CFG_DYN_SREF_ENABLE;
- writel_relaxed(emc->emc_cfg, emc->regs + EMC_CFG);
- }
-
- /* set number of clocks to wait after each ZQ command */
- if (emc->zcal_long)
- writel_relaxed(timing->emc_zcal_cnt_long,
- emc->regs + EMC_ZCAL_WAIT_CNT);
-
- udelay(2);
- /* update restored timing */
- ret = emc_seq_update_timing(emc);
- if (ret)
- emc->bad_state = true;
-
- /* restore early ACK */
- mc_writel(emc->mc, emc->mc_override, MC_EMEM_ARB_OVERRIDE);
-
- emc->prepared = false;
+ if (READ_ONCE(emc->bad_state))
+ return -EIO;
- return ret;
+ return 0;
}
static int emc_unprepare_timing_change(struct tegra_emc *emc,
unsigned long rate)
{
- if (emc->prepared && !emc->bad_state) {
+ if (!emc->bad_state) {
/* shouldn't ever happen in practice */
dev_err(emc->dev, "timing configuration can't be reverted\n");
emc->bad_state = true;
@@ -823,7 +847,13 @@ static int emc_clk_change_notify(struct notifier_block *nb,
switch (msg) {
case PRE_RATE_CHANGE:
+ /*
+ * Disable interrupt since read accesses are prohibited after
+ * stalling.
+ */
+ disable_irq(emc->irq);
err = emc_prepare_timing_change(emc, cnd->new_rate);
+ enable_irq(emc->irq);
break;
case ABORT_RATE_CHANGE:
@@ -1083,6 +1113,171 @@ static long emc_round_rate(unsigned long rate,
return timing->rate;
}
+/*
+ * debugfs interface
+ *
+ * The memory controller driver exposes some files in debugfs that can be used
+ * to control the EMC frequency. The top-level directory can be found here:
+ *
+ * /sys/kernel/debug/emc
+ *
+ * It contains the following files:
+ *
+ * - available_rates: This file contains a list of valid, space-separated
+ * EMC frequencies.
+ *
+ * - min_rate: Writing a value to this file sets the given frequency as the
+ * floor of the permitted range. If this is higher than the currently
+ * configured EMC frequency, this will cause the frequency to be
+ * increased so that it stays within the valid range.
+ *
+ * - max_rate: Similarily to the min_rate file, writing a value to this file
+ * sets the given frequency as the ceiling of the permitted range. If
+ * the value is lower than the currently configured EMC frequency, this
+ * will cause the frequency to be decreased so that it stays within the
+ * valid range.
+ */
+
+static bool tegra_emc_validate_rate(struct tegra_emc *emc, unsigned long rate)
+{
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++)
+ if (rate == emc->timings[i].rate)
+ return true;
+
+ return false;
+}
+
+static int tegra_emc_debug_available_rates_show(struct seq_file *s, void *data)
+{
+ struct tegra_emc *emc = s->private;
+ const char *prefix = "";
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ seq_printf(s, "%s%lu", prefix, emc->timings[i].rate);
+ prefix = " ";
+ }
+
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static int tegra_emc_debug_available_rates_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, tegra_emc_debug_available_rates_show,
+ inode->i_private);
+}
+
+static const struct file_operations tegra_emc_debug_available_rates_fops = {
+ .open = tegra_emc_debug_available_rates_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int tegra_emc_debug_min_rate_get(void *data, u64 *rate)
+{
+ struct tegra_emc *emc = data;
+
+ *rate = emc->debugfs.min_rate;
+
+ return 0;
+}
+
+static int tegra_emc_debug_min_rate_set(void *data, u64 rate)
+{
+ struct tegra_emc *emc = data;
+ int err;
+
+ if (!tegra_emc_validate_rate(emc, rate))
+ return -EINVAL;
+
+ err = clk_set_min_rate(emc->clk, rate);
+ if (err < 0)
+ return err;
+
+ emc->debugfs.min_rate = rate;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_min_rate_fops,
+ tegra_emc_debug_min_rate_get,
+ tegra_emc_debug_min_rate_set, "%llu\n");
+
+static int tegra_emc_debug_max_rate_get(void *data, u64 *rate)
+{
+ struct tegra_emc *emc = data;
+
+ *rate = emc->debugfs.max_rate;
+
+ return 0;
+}
+
+static int tegra_emc_debug_max_rate_set(void *data, u64 rate)
+{
+ struct tegra_emc *emc = data;
+ int err;
+
+ if (!tegra_emc_validate_rate(emc, rate))
+ return -EINVAL;
+
+ err = clk_set_max_rate(emc->clk, rate);
+ if (err < 0)
+ return err;
+
+ emc->debugfs.max_rate = rate;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_max_rate_fops,
+ tegra_emc_debug_max_rate_get,
+ tegra_emc_debug_max_rate_set, "%llu\n");
+
+static void tegra_emc_debugfs_init(struct tegra_emc *emc)
+{
+ struct device *dev = emc->dev;
+ unsigned int i;
+ int err;
+
+ emc->debugfs.min_rate = ULONG_MAX;
+ emc->debugfs.max_rate = 0;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate < emc->debugfs.min_rate)
+ emc->debugfs.min_rate = emc->timings[i].rate;
+
+ if (emc->timings[i].rate > emc->debugfs.max_rate)
+ emc->debugfs.max_rate = emc->timings[i].rate;
+ }
+
+ err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate,
+ emc->debugfs.max_rate);
+ if (err < 0) {
+ dev_err(dev, "failed to set rate range [%lu-%lu] for %pC\n",
+ emc->debugfs.min_rate, emc->debugfs.max_rate,
+ emc->clk);
+ }
+
+ emc->debugfs.root = debugfs_create_dir("emc", NULL);
+ if (!emc->debugfs.root) {
+ dev_err(emc->dev, "failed to create debugfs directory\n");
+ return;
+ }
+
+ debugfs_create_file("available_rates", S_IRUGO, emc->debugfs.root,
+ emc, &tegra_emc_debug_available_rates_fops);
+ debugfs_create_file("min_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+ emc, &tegra_emc_debug_min_rate_fops);
+ debugfs_create_file("max_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+ emc, &tegra_emc_debug_max_rate_fops);
+}
+
static int tegra_emc_probe(struct platform_device *pdev)
{
struct platform_device *mc;
@@ -1169,6 +1364,7 @@ static int tegra_emc_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, emc);
+ tegra_emc_debugfs_init(emc);
return 0;
@@ -1181,13 +1377,17 @@ unset_cb:
static int tegra_emc_suspend(struct device *dev)
{
struct tegra_emc *emc = dev_get_drvdata(dev);
+ int err;
- /*
- * Suspending in a bad state will hang machine. The "prepared" var
- * shall be always false here unless it's a kernel bug that caused
- * suspending in a wrong order.
- */
- if (WARN_ON(emc->prepared) || emc->bad_state)
+ /* take exclusive control over the clock's rate */
+ err = clk_rate_exclusive_get(emc->clk);
+ if (err) {
+ dev_err(emc->dev, "failed to acquire clk: %d\n", err);
+ return err;
+ }
+
+ /* suspending in a bad state will hang machine */
+ if (WARN(emc->bad_state, "hardware in a bad state\n"))
return -EINVAL;
emc->bad_state = true;
@@ -1202,6 +1402,8 @@ static int tegra_emc_resume(struct device *dev)
emc_setup_hw(emc);
emc->bad_state = false;
+ clk_rate_exclusive_put(emc->clk);
+
return 0;
}
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index f9ac22413000..1074b882c57c 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -100,19 +100,19 @@ struct buflist {
* Function prototypes. Called from OS entry point mptctl_ioctl.
* arg contents specific to function.
*/
-static int mptctl_fw_download(unsigned long arg);
-static int mptctl_getiocinfo(unsigned long arg, unsigned int cmd);
-static int mptctl_gettargetinfo(unsigned long arg);
-static int mptctl_readtest(unsigned long arg);
-static int mptctl_mpt_command(unsigned long arg);
-static int mptctl_eventquery(unsigned long arg);
-static int mptctl_eventenable(unsigned long arg);
-static int mptctl_eventreport(unsigned long arg);
-static int mptctl_replace_fw(unsigned long arg);
-
-static int mptctl_do_reset(unsigned long arg);
-static int mptctl_hp_hostinfo(unsigned long arg, unsigned int cmd);
-static int mptctl_hp_targetinfo(unsigned long arg);
+static int mptctl_fw_download(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_getiocinfo(MPT_ADAPTER *iocp, unsigned long arg, unsigned int cmd);
+static int mptctl_gettargetinfo(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_readtest(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_mpt_command(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_eventquery(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_eventenable(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_eventreport(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_replace_fw(MPT_ADAPTER *iocp, unsigned long arg);
+
+static int mptctl_do_reset(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_hp_hostinfo(MPT_ADAPTER *iocp, unsigned long arg, unsigned int cmd);
+static int mptctl_hp_targetinfo(MPT_ADAPTER *iocp, unsigned long arg);
static int mptctl_probe(struct pci_dev *, const struct pci_device_id *);
static void mptctl_remove(struct pci_dev *);
@@ -123,8 +123,8 @@ static long compat_mpctl_ioctl(struct file *f, unsigned cmd, unsigned long arg);
/*
* Private function calls.
*/
-static int mptctl_do_mpt_command(struct mpt_ioctl_command karg, void __user *mfPtr);
-static int mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen);
+static int mptctl_do_mpt_command(MPT_ADAPTER *iocp, struct mpt_ioctl_command karg, void __user *mfPtr);
+static int mptctl_do_fw_download(MPT_ADAPTER *iocp, char __user *ufwbuf, size_t fwlen);
static MptSge_t *kbuf_alloc_2_sgl(int bytes, u32 dir, int sge_offset, int *frags,
struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc);
static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma,
@@ -656,19 +656,19 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
* by TM and FW reloads.
*/
if ((cmd & ~IOCSIZE_MASK) == (MPTIOCINFO & ~IOCSIZE_MASK)) {
- return mptctl_getiocinfo(arg, _IOC_SIZE(cmd));
+ return mptctl_getiocinfo(iocp, arg, _IOC_SIZE(cmd));
} else if (cmd == MPTTARGETINFO) {
- return mptctl_gettargetinfo(arg);
+ return mptctl_gettargetinfo(iocp, arg);
} else if (cmd == MPTTEST) {
- return mptctl_readtest(arg);
+ return mptctl_readtest(iocp, arg);
} else if (cmd == MPTEVENTQUERY) {
- return mptctl_eventquery(arg);
+ return mptctl_eventquery(iocp, arg);
} else if (cmd == MPTEVENTENABLE) {
- return mptctl_eventenable(arg);
+ return mptctl_eventenable(iocp, arg);
} else if (cmd == MPTEVENTREPORT) {
- return mptctl_eventreport(arg);
+ return mptctl_eventreport(iocp, arg);
} else if (cmd == MPTFWREPLACE) {
- return mptctl_replace_fw(arg);
+ return mptctl_replace_fw(iocp, arg);
}
/* All of these commands require an interrupt or
@@ -678,15 +678,15 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return ret;
if (cmd == MPTFWDOWNLOAD)
- ret = mptctl_fw_download(arg);
+ ret = mptctl_fw_download(iocp, arg);
else if (cmd == MPTCOMMAND)
- ret = mptctl_mpt_command(arg);
+ ret = mptctl_mpt_command(iocp, arg);
else if (cmd == MPTHARDRESET)
- ret = mptctl_do_reset(arg);
+ ret = mptctl_do_reset(iocp, arg);
else if ((cmd & ~IOCSIZE_MASK) == (HP_GETHOSTINFO & ~IOCSIZE_MASK))
- ret = mptctl_hp_hostinfo(arg, _IOC_SIZE(cmd));
+ ret = mptctl_hp_hostinfo(iocp, arg, _IOC_SIZE(cmd));
else if (cmd == HP_GETTARGETINFO)
- ret = mptctl_hp_targetinfo(arg);
+ ret = mptctl_hp_targetinfo(iocp, arg);
else
ret = -EINVAL;
@@ -705,11 +705,10 @@ mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return ret;
}
-static int mptctl_do_reset(unsigned long arg)
+static int mptctl_do_reset(MPT_ADAPTER *iocp, unsigned long arg)
{
struct mpt_ioctl_diag_reset __user *urinfo = (void __user *) arg;
struct mpt_ioctl_diag_reset krinfo;
- MPT_ADAPTER *iocp;
if (copy_from_user(&krinfo, urinfo, sizeof(struct mpt_ioctl_diag_reset))) {
printk(KERN_ERR MYNAM "%s@%d::mptctl_do_reset - "
@@ -718,12 +717,6 @@ static int mptctl_do_reset(unsigned long arg)
return -EFAULT;
}
- if (mpt_verify_adapter(krinfo.hdr.iocnum, &iocp) < 0) {
- printk(KERN_DEBUG MYNAM "%s@%d::mptctl_do_reset - ioc%d not found!\n",
- __FILE__, __LINE__, krinfo.hdr.iocnum);
- return -ENODEV; /* (-6) No such device or address */
- }
-
dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "mptctl_do_reset called.\n",
iocp->name));
@@ -754,7 +747,7 @@ static int mptctl_do_reset(unsigned long arg)
* -ENOMSG if FW upload returned bad status
*/
static int
-mptctl_fw_download(unsigned long arg)
+mptctl_fw_download(MPT_ADAPTER *iocp, unsigned long arg)
{
struct mpt_fw_xfer __user *ufwdl = (void __user *) arg;
struct mpt_fw_xfer kfwdl;
@@ -766,7 +759,7 @@ mptctl_fw_download(unsigned long arg)
return -EFAULT;
}
- return mptctl_do_fw_download(kfwdl.iocnum, kfwdl.bufp, kfwdl.fwlen);
+ return mptctl_do_fw_download(iocp, kfwdl.bufp, kfwdl.fwlen);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -784,11 +777,10 @@ mptctl_fw_download(unsigned long arg)
* -ENOMSG if FW upload returned bad status
*/
static int
-mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
+mptctl_do_fw_download(MPT_ADAPTER *iocp, char __user *ufwbuf, size_t fwlen)
{
FWDownload_t *dlmsg;
MPT_FRAME_HDR *mf;
- MPT_ADAPTER *iocp;
FWDownloadTCSGE_t *ptsge;
MptSge_t *sgl, *sgIn;
char *sgOut;
@@ -808,17 +800,10 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
pFWDownloadReply_t ReplyMsg = NULL;
unsigned long timeleft;
- if (mpt_verify_adapter(ioc, &iocp) < 0) {
- printk(KERN_DEBUG MYNAM "ioctl_fwdl - ioc%d not found!\n",
- ioc);
- return -ENODEV; /* (-6) No such device or address */
- } else {
-
- /* Valid device. Get a message frame and construct the FW download message.
- */
- if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL)
- return -EAGAIN;
- }
+ /* Valid device. Get a message frame and construct the FW download message.
+ */
+ if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL)
+ return -EAGAIN;
dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT
"mptctl_do_fwdl called. mptctl_id = %xh.\n", iocp->name, mptctl_id));
@@ -826,8 +811,6 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
iocp->name, ufwbuf));
dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.fwlen = %d\n",
iocp->name, (int)fwlen));
- dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.ioc = %04xh\n",
- iocp->name, ioc));
dlmsg = (FWDownload_t*) mf;
ptsge = (FWDownloadTCSGE_t *) &dlmsg->SGL;
@@ -1238,13 +1221,11 @@ kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, struct buflist *buflist, MPT_ADAPTE
* -ENODEV if no such device/adapter
*/
static int
-mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
+mptctl_getiocinfo (MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size)
{
struct mpt_ioctl_iocinfo __user *uarg = (void __user *) arg;
struct mpt_ioctl_iocinfo *karg;
- MPT_ADAPTER *ioc;
struct pci_dev *pdev;
- int iocnum;
unsigned int port;
int cim_rev;
struct scsi_device *sdev;
@@ -1272,14 +1253,6 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
return PTR_ERR(karg);
}
- if (((iocnum = mpt_verify_adapter(karg->hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_getiocinfo() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- kfree(karg);
- return -ENODEV;
- }
-
/* Verify the data transfer size is correct. */
if (karg->hdr.maxDataSize != data_size) {
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_getiocinfo - "
@@ -1385,15 +1358,13 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
* -ENODEV if no such device/adapter
*/
static int
-mptctl_gettargetinfo (unsigned long arg)
+mptctl_gettargetinfo (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_targetinfo __user *uarg = (void __user *) arg;
struct mpt_ioctl_targetinfo karg;
- MPT_ADAPTER *ioc;
VirtDevice *vdevice;
char *pmem;
int *pdata;
- int iocnum;
int numDevices = 0;
int lun;
int maxWordsLeft;
@@ -1408,13 +1379,6 @@ mptctl_gettargetinfo (unsigned long arg)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_gettargetinfo() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
-
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_gettargetinfo called.\n",
ioc->name));
/* Get the port number and set the maximum number of bytes
@@ -1510,12 +1474,10 @@ mptctl_gettargetinfo (unsigned long arg)
* -ENODEV if no such device/adapter
*/
static int
-mptctl_readtest (unsigned long arg)
+mptctl_readtest (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_test __user *uarg = (void __user *) arg;
struct mpt_ioctl_test karg;
- MPT_ADAPTER *ioc;
- int iocnum;
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_test))) {
printk(KERN_ERR MYNAM "%s@%d::mptctl_readtest - "
@@ -1524,13 +1486,6 @@ mptctl_readtest (unsigned long arg)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_readtest() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
-
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_readtest called.\n",
ioc->name));
/* Fill in the data and return the structure to the calling
@@ -1571,12 +1526,10 @@ mptctl_readtest (unsigned long arg)
* -ENODEV if no such device/adapter
*/
static int
-mptctl_eventquery (unsigned long arg)
+mptctl_eventquery (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_eventquery __user *uarg = (void __user *) arg;
struct mpt_ioctl_eventquery karg;
- MPT_ADAPTER *ioc;
- int iocnum;
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventquery))) {
printk(KERN_ERR MYNAM "%s@%d::mptctl_eventquery - "
@@ -1585,13 +1538,6 @@ mptctl_eventquery (unsigned long arg)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_eventquery() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
-
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventquery called.\n",
ioc->name));
karg.eventEntries = MPTCTL_EVENT_LOG_SIZE;
@@ -1610,12 +1556,10 @@ mptctl_eventquery (unsigned long arg)
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
-mptctl_eventenable (unsigned long arg)
+mptctl_eventenable (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_eventenable __user *uarg = (void __user *) arg;
struct mpt_ioctl_eventenable karg;
- MPT_ADAPTER *ioc;
- int iocnum;
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventenable))) {
printk(KERN_ERR MYNAM "%s@%d::mptctl_eventenable - "
@@ -1624,13 +1568,6 @@ mptctl_eventenable (unsigned long arg)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_eventenable() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
-
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventenable called.\n",
ioc->name));
if (ioc->events == NULL) {
@@ -1658,12 +1595,10 @@ mptctl_eventenable (unsigned long arg)
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
-mptctl_eventreport (unsigned long arg)
+mptctl_eventreport (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_eventreport __user *uarg = (void __user *) arg;
struct mpt_ioctl_eventreport karg;
- MPT_ADAPTER *ioc;
- int iocnum;
int numBytes, maxEvents, max;
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventreport))) {
@@ -1673,12 +1608,6 @@ mptctl_eventreport (unsigned long arg)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_eventreport() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventreport called.\n",
ioc->name));
@@ -1712,12 +1641,10 @@ mptctl_eventreport (unsigned long arg)
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
-mptctl_replace_fw (unsigned long arg)
+mptctl_replace_fw (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_replace_fw __user *uarg = (void __user *) arg;
struct mpt_ioctl_replace_fw karg;
- MPT_ADAPTER *ioc;
- int iocnum;
int newFwSize;
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_replace_fw))) {
@@ -1727,13 +1654,6 @@ mptctl_replace_fw (unsigned long arg)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_replace_fw() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
-
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_replace_fw called.\n",
ioc->name));
/* If caching FW, Free the old FW image
@@ -1780,12 +1700,10 @@ mptctl_replace_fw (unsigned long arg)
* -ENOMEM if memory allocation error
*/
static int
-mptctl_mpt_command (unsigned long arg)
+mptctl_mpt_command (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_command __user *uarg = (void __user *) arg;
struct mpt_ioctl_command karg;
- MPT_ADAPTER *ioc;
- int iocnum;
int rc;
@@ -1796,14 +1714,7 @@ mptctl_mpt_command (unsigned long arg)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_mpt_command() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
-
- rc = mptctl_do_mpt_command (karg, &uarg->MF);
+ rc = mptctl_do_mpt_command (ioc, karg, &uarg->MF);
return rc;
}
@@ -1821,9 +1732,8 @@ mptctl_mpt_command (unsigned long arg)
* -EPERM if SCSI I/O and target is untagged
*/
static int
-mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
+mptctl_do_mpt_command (MPT_ADAPTER *ioc, struct mpt_ioctl_command karg, void __user *mfPtr)
{
- MPT_ADAPTER *ioc;
MPT_FRAME_HDR *mf = NULL;
MPIHeader_t *hdr;
char *psge;
@@ -1832,7 +1742,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
dma_addr_t dma_addr_in;
dma_addr_t dma_addr_out;
int sgSize = 0; /* Num SG elements */
- int iocnum, flagsLength;
+ int flagsLength;
int sz, rc = 0;
int msgContext;
u16 req_idx;
@@ -1847,13 +1757,6 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
bufIn.kptr = bufOut.kptr = NULL;
bufIn.len = bufOut.len = 0;
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_do_mpt_command() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
-
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
if (ioc->ioc_reset_in_progress) {
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
@@ -2418,17 +2321,15 @@ done_free_mem:
* -ENOMEM if memory allocation error
*/
static int
-mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
+mptctl_hp_hostinfo(MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size)
{
hp_host_info_t __user *uarg = (void __user *) arg;
- MPT_ADAPTER *ioc;
struct pci_dev *pdev;
char *pbuf=NULL;
dma_addr_t buf_dma;
hp_host_info_t karg;
CONFIGPARMS cfg;
ConfigPageHeader_t hdr;
- int iocnum;
int rc, cim_rev;
ToolboxIstwiReadWriteRequest_t *IstwiRWRequest;
MPT_FRAME_HDR *mf = NULL;
@@ -2452,12 +2353,6 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_hp_hostinfo() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": mptctl_hp_hostinfo called.\n",
ioc->name));
@@ -2659,15 +2554,13 @@ retry_wait:
* -ENOMEM if memory allocation error
*/
static int
-mptctl_hp_targetinfo(unsigned long arg)
+mptctl_hp_targetinfo(MPT_ADAPTER *ioc, unsigned long arg)
{
hp_target_info_t __user *uarg = (void __user *) arg;
SCSIDevicePage0_t *pg0_alloc;
SCSIDevicePage3_t *pg3_alloc;
- MPT_ADAPTER *ioc;
MPT_SCSI_HOST *hd = NULL;
hp_target_info_t karg;
- int iocnum;
int data_sz;
dma_addr_t page_dma;
CONFIGPARMS cfg;
@@ -2681,12 +2574,6 @@ mptctl_hp_targetinfo(unsigned long arg)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_hp_targetinfo() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
if (karg.hdr.id >= MPT_MAX_FC_DEVICES)
return -EINVAL;
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n",
@@ -2854,7 +2741,7 @@ compat_mptfwxfer_ioctl(struct file *filp, unsigned int cmd,
kfw.fwlen = kfw32.fwlen;
kfw.bufp = compat_ptr(kfw32.bufp);
- ret = mptctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen);
+ ret = mptctl_do_fw_download(iocp, kfw.bufp, kfw.fwlen);
mutex_unlock(&iocp->ioctl_cmds.mutex);
@@ -2908,7 +2795,7 @@ compat_mpt_command(struct file *filp, unsigned int cmd,
/* Pass new structure to do_mpt_command
*/
- ret = mptctl_do_mpt_command (karg, &uarg->MF);
+ ret = mptctl_do_mpt_command (iocp, karg, &uarg->MF);
mutex_unlock(&iocp->ioctl_cmds.mutex);
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index ebc00d47abf5..7d3784aa20e5 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -552,7 +552,7 @@ mpt_lan_close(struct net_device *dev)
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/* Tx timeout handler. */
static void
-mpt_lan_tx_timeout(struct net_device *dev)
+mpt_lan_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mpt_lan_priv *priv = netdev_priv(dev);
MPT_ADAPTER *mpt_dev = priv->mpt_dev;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 420900852166..2b203290e7b9 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -758,6 +758,7 @@ config MFD_MAX77650
depends on OF || COMPILE_TEST
select MFD_CORE
select REGMAP_I2C
+ select REGMAP_IRQ
help
Say Y here to add support for Maxim Semiconductor MAX77650 and
MAX77651 Power Management ICs. This is the core multifunction
@@ -1065,7 +1066,7 @@ config MFD_RN5T618
functionality of the device.
config MFD_SEC_CORE
- tristate "SAMSUNG Electronics PMIC Series Support"
+ tristate "Samsung Electronics PMIC Series Support"
depends on I2C=y
select MFD_CORE
select REGMAP_I2C
@@ -1906,6 +1907,21 @@ config MFD_ROHM_BD70528
10 bits SAR ADC for battery temperature monitor and 1S battery
charger.
+config MFD_ROHM_BD71828
+ tristate "ROHM BD71828 Power Management IC"
+ depends on I2C=y
+ depends on OF
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ select MFD_CORE
+ help
+ Select this option to get support for the ROHM BD71828 Power
+ Management IC. BD71828GW is a single-chip power management IC for
+ battery-powered portable devices. The IC integrates 7 buck
+ converters, 7 LDOs, and a 1500 mA single-cell linear charger.
+ Also included is a Coulomb counter, a real-time clock (RTC), and
+ a 32.768 kHz clock gate.
+
config MFD_STM32_LPTIMER
tristate "Support for STM32 Low-Power Timer"
depends on (ARCH_STM32 && OF) || COMPILE_TEST
@@ -1960,6 +1976,18 @@ config MFD_STMFX
additional drivers must be enabled in order to use the functionality
of the device.
+config MFD_WCD934X
+ tristate "Support for WCD9340/WCD9341 Codec"
+ depends on SLIMBUS
+ select REGMAP
+ select REGMAP_SLIMBUS
+ select REGMAP_IRQ
+ select MFD_CORE
+ help
+ Support for the Qualcomm WCD9340/WCD9341 Codec.
+ This driver provides common support WCD934x audio codec and its
+ associated Pin Controller, Soundwire Controller and Audio codec.
+
menu "Multimedia Capabilities Port drivers"
depends on ARCH_SA1100
@@ -2004,5 +2032,18 @@ config RAVE_SP_CORE
Select this to get support for the Supervisory Processor
device found on several devices in RAVE line of hardware.
+config SGI_MFD_IOC3
+ tristate "SGI IOC3 core driver"
+ depends on PCI && MIPS && 64BIT
+ select MFD_CORE
+ help
+ This option enables basic support for the SGI IOC3-based
+ controller cards. This option does not enable any specific
+ functions on such a card, but provides necessary infrastructure
+ for other drivers to utilize.
+
+ If you have an SGI Origin, Octane, or a PCI IOC3 card,
+ then say Y. Otherwise say N.
+
endmenu
endif
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index aed99f08739f..b83f172545e1 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -58,6 +58,7 @@ endif
ifeq ($(CONFIG_MFD_CS47L24),y)
obj-$(CONFIG_MFD_ARIZONA) += cs47l24-tables.o
endif
+obj-$(CONFIG_MFD_WCD934X) += wcd934x.o
obj-$(CONFIG_MFD_WM8400) += wm8400-core.o
wm831x-objs := wm831x-core.o wm831x-irq.o wm831x-otp.o
wm831x-objs += wm831x-auxadc.o
@@ -252,6 +253,8 @@ obj-$(CONFIG_MFD_MXS_LRADC) += mxs-lradc.o
obj-$(CONFIG_MFD_SC27XX_PMIC) += sprd-sc27xx-spi.o
obj-$(CONFIG_RAVE_SP_CORE) += rave-sp.o
obj-$(CONFIG_MFD_ROHM_BD70528) += rohm-bd70528.o
+obj-$(CONFIG_MFD_ROHM_BD71828) += rohm-bd71828.o
obj-$(CONFIG_MFD_ROHM_BD718XX) += rohm-bd718x7.o
obj-$(CONFIG_MFD_STMFX) += stmfx.o
+obj-$(CONFIG_SGI_MFD_IOC3) += ioc3.o
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index bafc729fc434..a3bac9da8cbb 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -631,8 +631,8 @@ static const struct mfd_cell ab8500_devs[] = {
NULL, NULL, 0, 0, "stericsson,ab8500-ext-regulator"),
OF_MFD_CELL("ab8500-regulator",
NULL, NULL, 0, 0, "stericsson,ab8500-regulator"),
- OF_MFD_CELL("abx500-clk",
- NULL, NULL, 0, 0, "stericsson,abx500-clk"),
+ OF_MFD_CELL("ab8500-clk",
+ NULL, NULL, 0, 0, "stericsson,ab8500-clk"),
OF_MFD_CELL("ab8500-gpadc",
NULL, NULL, 0, 0, "stericsson,ab8500-gpadc"),
OF_MFD_CELL("ab8500-rtc",
@@ -718,17 +718,20 @@ static const struct mfd_cell ab8505_devs[] = {
#ifdef CONFIG_DEBUG_FS
{
.name = "ab8500-debug",
+ .of_compatible = "stericsson,ab8500-debug",
},
#endif
{
.name = "ab8500-sysctrl",
+ .of_compatible = "stericsson,ab8500-sysctrl",
},
{
.name = "ab8500-regulator",
+ .of_compatible = "stericsson,ab8505-regulator",
},
{
.name = "abx500-clk",
- .of_compatible = "stericsson,abx500-clk",
+ .of_compatible = "stericsson,ab8500-clk",
},
{
.name = "ab8500-gpadc",
@@ -736,25 +739,32 @@ static const struct mfd_cell ab8505_devs[] = {
},
{
.name = "ab8500-rtc",
+ .of_compatible = "stericsson,ab8500-rtc",
},
{
.name = "ab8500-acc-det",
+ .of_compatible = "stericsson,ab8500-acc-det",
},
{
.name = "ab8500-poweron-key",
+ .of_compatible = "stericsson,ab8500-poweron-key",
},
{
.name = "ab8500-pwm",
+ .of_compatible = "stericsson,ab8500-pwm",
.id = 1,
},
{
.name = "pinctrl-ab8505",
+ .of_compatible = "stericsson,ab8505-gpio",
},
{
.name = "ab8500-usb",
+ .of_compatible = "stericsson,ab8500-usb",
},
{
.name = "ab8500-codec",
+ .of_compatible = "stericsson,ab8500-codec",
},
{
.name = "ab-iddet",
@@ -1276,7 +1286,7 @@ static int ab8500_probe(struct platform_device *pdev)
static const struct platform_device_id ab8500_id[] = {
{ "ab8500-core", AB8500_VERSION_AB8500 },
- { "ab8505-i2c", AB8500_VERSION_AB8505 },
+ { "ab8505-core", AB8500_VERSION_AB8505 },
{ "ab9540-i2c", AB8500_VERSION_AB9540 },
{ "ab8540-i2c", AB8500_VERSION_AB8540 },
{ }
diff --git a/drivers/mfd/atmel-hlcdc.c b/drivers/mfd/atmel-hlcdc.c
index 64013c57a920..3c2414ba4b01 100644
--- a/drivers/mfd/atmel-hlcdc.c
+++ b/drivers/mfd/atmel-hlcdc.c
@@ -19,6 +19,7 @@
struct atmel_hlcdc_regmap {
void __iomem *regs;
+ struct device *dev;
};
static const struct mfd_cell atmel_hlcdc_cells[] = {
@@ -39,10 +40,17 @@ static int regmap_atmel_hlcdc_reg_write(void *context, unsigned int reg,
if (reg <= ATMEL_HLCDC_DIS) {
u32 status;
-
- readl_poll_timeout_atomic(hregmap->regs + ATMEL_HLCDC_SR,
- status, !(status & ATMEL_HLCDC_SIP),
- 1, 100);
+ int ret;
+
+ ret = readl_poll_timeout_atomic(hregmap->regs + ATMEL_HLCDC_SR,
+ status,
+ !(status & ATMEL_HLCDC_SIP),
+ 1, 100);
+ if (ret) {
+ dev_err(hregmap->dev,
+ "Timeout! Clock domain synchronization is in progress!\n");
+ return ret;
+ }
}
writel(val, hregmap->regs + reg);
@@ -90,6 +98,8 @@ static int atmel_hlcdc_probe(struct platform_device *pdev)
if (IS_ERR(hregmap->regs))
return PTR_ERR(hregmap->regs);
+ hregmap->dev = &pdev->dev;
+
hlcdc->irq = platform_get_irq(pdev, 0);
if (hlcdc->irq < 0)
return hlcdc->irq;
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index a4aaadaa0cb0..aa59496e4376 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -126,7 +126,7 @@ static const struct regmap_range axp288_writeable_ranges[] = {
static const struct regmap_range axp288_volatile_ranges[] = {
regmap_reg_range(AXP20X_PWR_INPUT_STATUS, AXP288_POWER_REASON),
regmap_reg_range(AXP288_BC_GLOBAL, AXP288_BC_GLOBAL),
- regmap_reg_range(AXP288_BC_DET_STAT, AXP288_BC_DET_STAT),
+ regmap_reg_range(AXP288_BC_DET_STAT, AXP20X_VBUS_IPSOUT_MGMT),
regmap_reg_range(AXP20X_CHRG_BAK_CTRL, AXP20X_CHRG_BAK_CTRL),
regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IPSOUT_V_HIGH_L),
regmap_reg_range(AXP20X_TIMER_CTRL, AXP20X_TIMER_CTRL),
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index c4b977a5dd96..39e611695053 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -5,8 +5,8 @@
* Copyright (C) 2014 Google, Inc.
*/
+#include <linux/kconfig.h>
#include <linux/mfd/core.h>
-#include <linux/mfd/cros_ec.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/of_platform.h>
@@ -87,6 +87,10 @@ static const struct mfd_cell cros_usbpd_charger_cells[] = {
{ .name = "cros-usbpd-logger", },
};
+static const struct mfd_cell cros_usbpd_notify_cells[] = {
+ { .name = "cros-usbpd-notify", },
+};
+
static const struct cros_feature_to_cells cros_subdevices[] = {
{
.id = EC_FEATURE_CEC,
@@ -203,6 +207,23 @@ static int ec_device_probe(struct platform_device *pdev)
}
/*
+ * The PD notifier driver cell is separate since it only needs to be
+ * explicitly added on platforms that don't have the PD notifier ACPI
+ * device entry defined.
+ */
+ if (IS_ENABLED(CONFIG_OF)) {
+ if (cros_ec_check_features(ec, EC_FEATURE_USB_PD)) {
+ retval = mfd_add_hotplug_devices(ec->dev,
+ cros_usbpd_notify_cells,
+ ARRAY_SIZE(cros_usbpd_notify_cells));
+ if (retval)
+ dev_err(ec->dev,
+ "failed to add PD notify devices: %d\n",
+ retval);
+ }
+ }
+
+ /*
* The following subdevices cannot be detected by sending the
* EC_FEATURE_GET_CMD to the Embedded Controller device.
*/
diff --git a/drivers/mfd/cs47l15-tables.c b/drivers/mfd/cs47l15-tables.c
index f81b45336690..3c77f0a24e9b 100644
--- a/drivers/mfd/cs47l15-tables.c
+++ b/drivers/mfd/cs47l15-tables.c
@@ -112,6 +112,7 @@ static const struct reg_default cs47l15_reg_default[] = {
{ 0x000001dd, 0x0011 }, /* R477 (0x1DD) - FLL AO Control 11 */
{ 0x00000218, 0x00e6 }, /* R536 (0x218) - Mic Bias Ctrl 1 */
{ 0x0000021c, 0x0222 }, /* R540 (0x21C) - Mic Bias Ctrl 5 */
+ { 0x00000293, 0x0080 }, /* R659 (0x293) - Accessory Detect Mode 1 */
{ 0x00000299, 0x0000 }, /* R665 (0x299) - Headphone Detect 0 */
{ 0x0000029b, 0x0000 }, /* R667 (0x29B) - Headphone Detect 1 */
{ 0x000002a2, 0x0010 }, /* R674 (0x2A2) - Mic Detect 1 Control 0 */
diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c
index e69626867c26..419c73533401 100644
--- a/drivers/mfd/da9062-core.c
+++ b/drivers/mfd/da9062-core.c
@@ -233,6 +233,14 @@ static struct resource da9062_onkey_resources[] = {
DEFINE_RES_NAMED(DA9062_IRQ_ONKEY, 1, "ONKEY", IORESOURCE_IRQ),
};
+static struct resource da9062_gpio_resources[] = {
+ DEFINE_RES_NAMED(DA9062_IRQ_GPI0, 1, "GPI0", IORESOURCE_IRQ),
+ DEFINE_RES_NAMED(DA9062_IRQ_GPI1, 1, "GPI1", IORESOURCE_IRQ),
+ DEFINE_RES_NAMED(DA9062_IRQ_GPI2, 1, "GPI2", IORESOURCE_IRQ),
+ DEFINE_RES_NAMED(DA9062_IRQ_GPI3, 1, "GPI3", IORESOURCE_IRQ),
+ DEFINE_RES_NAMED(DA9062_IRQ_GPI4, 1, "GPI4", IORESOURCE_IRQ),
+};
+
static const struct mfd_cell da9062_devs[] = {
{
.name = "da9062-core",
@@ -248,7 +256,7 @@ static const struct mfd_cell da9062_devs[] = {
.name = "da9062-watchdog",
.num_resources = ARRAY_SIZE(da9062_wdt_resources),
.resources = da9062_wdt_resources,
- .of_compatible = "dlg,da9062-wdt",
+ .of_compatible = "dlg,da9062-watchdog",
},
{
.name = "da9062-thermal",
@@ -266,7 +274,13 @@ static const struct mfd_cell da9062_devs[] = {
.name = "da9062-onkey",
.num_resources = ARRAY_SIZE(da9062_onkey_resources),
.resources = da9062_onkey_resources,
- .of_compatible = "dlg,da9062-onkey",
+ .of_compatible = "dlg,da9062-onkey",
+ },
+ {
+ .name = "da9062-gpio",
+ .num_resources = ARRAY_SIZE(da9062_gpio_resources),
+ .resources = da9062_gpio_resources,
+ .of_compatible = "dlg,da9062-gpio",
},
};
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 57ac58b4b5f3..0452b43b0423 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -542,102 +542,6 @@ static struct dsiescclk dsiescclk[3] = {
}
};
-
-/*
-* Used by MCDE to setup all necessary PRCMU registers
-*/
-#define PRCMU_RESET_DSIPLL 0x00004000
-#define PRCMU_UNCLAMP_DSIPLL 0x00400800
-
-#define PRCMU_CLK_PLL_DIV_SHIFT 0
-#define PRCMU_CLK_PLL_SW_SHIFT 5
-#define PRCMU_CLK_38 (1 << 9)
-#define PRCMU_CLK_38_SRC (1 << 10)
-#define PRCMU_CLK_38_DIV (1 << 11)
-
-/* PLLDIV=12, PLLSW=4 (PLLDDR) */
-#define PRCMU_DSI_CLOCK_SETTING 0x0000008C
-
-/* DPI 50000000 Hz */
-#define PRCMU_DPI_CLOCK_SETTING ((1 << PRCMU_CLK_PLL_SW_SHIFT) | \
- (16 << PRCMU_CLK_PLL_DIV_SHIFT))
-#define PRCMU_DSI_LP_CLOCK_SETTING 0x00000E00
-
-/* D=101, N=1, R=4, SELDIV2=0 */
-#define PRCMU_PLLDSI_FREQ_SETTING 0x00040165
-
-#define PRCMU_ENABLE_PLLDSI 0x00000001
-#define PRCMU_DISABLE_PLLDSI 0x00000000
-#define PRCMU_RELEASE_RESET_DSS 0x0000400C
-#define PRCMU_DSI_PLLOUT_SEL_SETTING 0x00000202
-/* ESC clk, div0=1, div1=1, div2=3 */
-#define PRCMU_ENABLE_ESCAPE_CLOCK_DIV 0x07030101
-#define PRCMU_DISABLE_ESCAPE_CLOCK_DIV 0x00030101
-#define PRCMU_DSI_RESET_SW 0x00000007
-
-#define PRCMU_PLLDSI_LOCKP_LOCKED 0x3
-
-int db8500_prcmu_enable_dsipll(void)
-{
- int i;
-
- /* Clear DSIPLL_RESETN */
- writel(PRCMU_RESET_DSIPLL, PRCM_APE_RESETN_CLR);
- /* Unclamp DSIPLL in/out */
- writel(PRCMU_UNCLAMP_DSIPLL, PRCM_MMIP_LS_CLAMP_CLR);
-
- /* Set DSI PLL FREQ */
- writel(PRCMU_PLLDSI_FREQ_SETTING, PRCM_PLLDSI_FREQ);
- writel(PRCMU_DSI_PLLOUT_SEL_SETTING, PRCM_DSI_PLLOUT_SEL);
- /* Enable Escape clocks */
- writel(PRCMU_ENABLE_ESCAPE_CLOCK_DIV, PRCM_DSITVCLK_DIV);
-
- /* Start DSI PLL */
- writel(PRCMU_ENABLE_PLLDSI, PRCM_PLLDSI_ENABLE);
- /* Reset DSI PLL */
- writel(PRCMU_DSI_RESET_SW, PRCM_DSI_SW_RESET);
- for (i = 0; i < 10; i++) {
- if ((readl(PRCM_PLLDSI_LOCKP) & PRCMU_PLLDSI_LOCKP_LOCKED)
- == PRCMU_PLLDSI_LOCKP_LOCKED)
- break;
- udelay(100);
- }
- /* Set DSIPLL_RESETN */
- writel(PRCMU_RESET_DSIPLL, PRCM_APE_RESETN_SET);
- return 0;
-}
-
-int db8500_prcmu_disable_dsipll(void)
-{
- /* Disable dsi pll */
- writel(PRCMU_DISABLE_PLLDSI, PRCM_PLLDSI_ENABLE);
- /* Disable escapeclock */
- writel(PRCMU_DISABLE_ESCAPE_CLOCK_DIV, PRCM_DSITVCLK_DIV);
- return 0;
-}
-
-int db8500_prcmu_set_display_clocks(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&clk_mgt_lock, flags);
-
- /* Grab the HW semaphore. */
- while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
- cpu_relax();
-
- writel(PRCMU_DSI_CLOCK_SETTING, prcmu_base + PRCM_HDMICLK_MGT);
- writel(PRCMU_DSI_LP_CLOCK_SETTING, prcmu_base + PRCM_TVCLK_MGT);
- writel(PRCMU_DPI_CLOCK_SETTING, prcmu_base + PRCM_LCDCLK_MGT);
-
- /* Release the HW semaphore. */
- writel(0, PRCM_SEM);
-
- spin_unlock_irqrestore(&clk_mgt_lock, flags);
-
- return 0;
-}
-
u32 db8500_prcmu_read(unsigned int reg)
{
return readl(prcmu_base + reg);
@@ -3060,30 +2964,44 @@ static const struct mfd_cell db8500_prcmu_devs[] = {
static int db8500_prcmu_register_ab8500(struct device *parent)
{
struct device_node *np;
- struct resource ab8500_resource;
+ struct resource ab850x_resource;
const struct mfd_cell ab8500_cell = {
.name = "ab8500-core",
.of_compatible = "stericsson,ab8500",
.id = AB8500_VERSION_AB8500,
- .resources = &ab8500_resource,
+ .resources = &ab850x_resource,
.num_resources = 1,
};
+ const struct mfd_cell ab8505_cell = {
+ .name = "ab8505-core",
+ .of_compatible = "stericsson,ab8505",
+ .id = AB8500_VERSION_AB8505,
+ .resources = &ab850x_resource,
+ .num_resources = 1,
+ };
+ const struct mfd_cell *ab850x_cell;
if (!parent->of_node)
return -ENODEV;
/* Look up the device node, sneak the IRQ out of it */
for_each_child_of_node(parent->of_node, np) {
- if (of_device_is_compatible(np, ab8500_cell.of_compatible))
+ if (of_device_is_compatible(np, ab8500_cell.of_compatible)) {
+ ab850x_cell = &ab8500_cell;
break;
+ }
+ if (of_device_is_compatible(np, ab8505_cell.of_compatible)) {
+ ab850x_cell = &ab8505_cell;
+ break;
+ }
}
if (!np) {
- dev_info(parent, "could not find AB8500 node in the device tree\n");
+ dev_info(parent, "could not find AB850X node in the device tree\n");
return -ENODEV;
}
- of_irq_to_resource_table(np, &ab8500_resource, 1);
+ of_irq_to_resource_table(np, &ab850x_resource, 1);
- return mfd_add_devices(parent, 0, &ab8500_cell, 1, NULL, 0, NULL);
+ return mfd_add_devices(parent, 0, ab850x_cell, 1, NULL, 0, NULL);
}
/**
diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c
index 381593fbe50f..7841c11411d0 100644
--- a/drivers/mfd/dln2.c
+++ b/drivers/mfd/dln2.c
@@ -722,6 +722,8 @@ static int dln2_probe(struct usb_interface *interface,
const struct usb_device_id *usb_id)
{
struct usb_host_interface *hostif = interface->cur_altsetting;
+ struct usb_endpoint_descriptor *epin;
+ struct usb_endpoint_descriptor *epout;
struct device *dev = &interface->dev;
struct dln2_dev *dln2;
int ret;
@@ -731,12 +733,19 @@ static int dln2_probe(struct usb_interface *interface,
hostif->desc.bNumEndpoints < 2)
return -ENODEV;
+ epin = &hostif->endpoint[0].desc;
+ epout = &hostif->endpoint[1].desc;
+ if (!usb_endpoint_is_bulk_out(epout))
+ return -ENODEV;
+ if (!usb_endpoint_is_bulk_in(epin))
+ return -ENODEV;
+
dln2 = kzalloc(sizeof(*dln2), GFP_KERNEL);
if (!dln2)
return -ENOMEM;
- dln2->ep_out = hostif->endpoint[0].desc.bEndpointAddress;
- dln2->ep_in = hostif->endpoint[1].desc.bEndpointAddress;
+ dln2->ep_out = epout->bEndpointAddress;
+ dln2->ep_in = epin->bEndpointAddress;
dln2->usb_dev = usb_get_dev(interface_to_usbdev(interface));
dln2->interface = interface;
usb_set_intfdata(interface, dln2);
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index b33030e3385c..c40a6c7d0cf8 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -240,6 +240,19 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x4b79), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x4b7a), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x4b7b), (kernel_ulong_t)&bxt_i2c_info },
+ /* JSL */
+ { PCI_VDEVICE(INTEL, 0x4da8), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x4da9), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x4daa), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x4dab), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x4daf), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x4dc5), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x4dc6), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x4de8), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x4de9), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x4dea), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x4deb), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x4dfb), (kernel_ulong_t)&spt_info },
/* APL */
{ PCI_VDEVICE(INTEL, 0x5aac), (kernel_ulong_t)&apl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x5aae), (kernel_ulong_t)&apl_i2c_info },
diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c
index 47188df3080d..ddd64f9e3341 100644
--- a/drivers/mfd/intel_soc_pmic_core.c
+++ b/drivers/mfd/intel_soc_pmic_core.c
@@ -9,8 +9,6 @@
*/
#include <linux/acpi.h>
-#include <linux/gpio/consumer.h>
-#include <linux/gpio/machine.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/module.h>
@@ -25,17 +23,6 @@
#define BYT_CRC_HRV 2
#define CHT_CRC_HRV 3
-/* Lookup table for the Panel Enable/Disable line as GPIO signals */
-static struct gpiod_lookup_table panel_gpio_table = {
- /* Intel GFX is consumer */
- .dev_id = "0000:00:02.0",
- .table = {
- /* Panel EN/DISABLE */
- GPIO_LOOKUP("gpio_crystalcove", 94, "panel", GPIO_ACTIVE_HIGH),
- { },
- },
-};
-
/* PWM consumed by the Intel GFX */
static struct pwm_lookup crc_pwm_lookup[] = {
PWM_LOOKUP("crystal_cove_pwm", 0, "0000:00:02.0", "pwm_pmic_backlight", 0, PWM_POLARITY_NORMAL),
@@ -96,9 +83,6 @@ static int intel_soc_pmic_i2c_probe(struct i2c_client *i2c,
if (ret)
dev_warn(dev, "Can't enable IRQ as wake source: %d\n", ret);
- /* Add lookup table binding for Panel Control to the GPIO Chip */
- gpiod_add_lookup_table(&panel_gpio_table);
-
/* Add lookup table for crc-pwm */
pwm_add_table(crc_pwm_lookup, ARRAY_SIZE(crc_pwm_lookup));
@@ -121,9 +105,6 @@ static int intel_soc_pmic_i2c_remove(struct i2c_client *i2c)
regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data);
- /* Remove lookup table for Panel Control from the GPIO Chip */
- gpiod_remove_lookup_table(&panel_gpio_table);
-
/* remove crc-pwm lookup table */
pwm_remove_table(crc_pwm_lookup, ARRAY_SIZE(crc_pwm_lookup));
diff --git a/drivers/mfd/ioc3.c b/drivers/mfd/ioc3.c
new file mode 100644
index 000000000000..02998d4eb74b
--- /dev/null
+++ b/drivers/mfd/ioc3.c
@@ -0,0 +1,669 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SGI IOC3 multifunction device driver
+ *
+ * Copyright (C) 2018, 2019 Thomas Bogendoerfer <tbogendoerfer@suse.de>
+ *
+ * Based on work by:
+ * Stanislaw Skowronek <skylark@unaligned.org>
+ * Joshua Kinard <kumba@gentoo.org>
+ * Brent Casavant <bcasavan@sgi.com> - IOC4 master driver
+ * Pat Gefre <pfg@sgi.com> - IOC3 serial port IRQ demuxer
+ */
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/sgi-w1.h>
+#include <linux/rtc/ds1685.h>
+
+#include <asm/pci/bridge.h>
+#include <asm/sn/ioc3.h>
+
+#define IOC3_IRQ_SERIAL_A 6
+#define IOC3_IRQ_SERIAL_B 15
+#define IOC3_IRQ_KBD 22
+
+/* Bitmask for selecting which IRQs are level triggered */
+#define IOC3_LVL_MASK (BIT(IOC3_IRQ_SERIAL_A) | BIT(IOC3_IRQ_SERIAL_B))
+
+#define M48T35_REG_SIZE 32768 /* size of m48t35 registers */
+
+/* 1.2 us latency timer (40 cycles at 33 MHz) */
+#define IOC3_LATENCY 40
+
+struct ioc3_priv_data {
+ struct irq_domain *domain;
+ struct ioc3 __iomem *regs;
+ struct pci_dev *pdev;
+ int domain_irq;
+};
+
+static void ioc3_irq_ack(struct irq_data *d)
+{
+ struct ioc3_priv_data *ipd = irq_data_get_irq_chip_data(d);
+ unsigned int hwirq = irqd_to_hwirq(d);
+
+ writel(BIT(hwirq), &ipd->regs->sio_ir);
+}
+
+static void ioc3_irq_mask(struct irq_data *d)
+{
+ struct ioc3_priv_data *ipd = irq_data_get_irq_chip_data(d);
+ unsigned int hwirq = irqd_to_hwirq(d);
+
+ writel(BIT(hwirq), &ipd->regs->sio_iec);
+}
+
+static void ioc3_irq_unmask(struct irq_data *d)
+{
+ struct ioc3_priv_data *ipd = irq_data_get_irq_chip_data(d);
+ unsigned int hwirq = irqd_to_hwirq(d);
+
+ writel(BIT(hwirq), &ipd->regs->sio_ies);
+}
+
+static struct irq_chip ioc3_irq_chip = {
+ .name = "IOC3",
+ .irq_ack = ioc3_irq_ack,
+ .irq_mask = ioc3_irq_mask,
+ .irq_unmask = ioc3_irq_unmask,
+};
+
+static int ioc3_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ /* Set level IRQs for every interrupt contained in IOC3_LVL_MASK */
+ if (BIT(hwirq) & IOC3_LVL_MASK)
+ irq_set_chip_and_handler(irq, &ioc3_irq_chip, handle_level_irq);
+ else
+ irq_set_chip_and_handler(irq, &ioc3_irq_chip, handle_edge_irq);
+
+ irq_set_chip_data(irq, d->host_data);
+ return 0;
+}
+
+static void ioc3_irq_domain_unmap(struct irq_domain *d, unsigned int irq)
+{
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
+}
+
+static const struct irq_domain_ops ioc3_irq_domain_ops = {
+ .map = ioc3_irq_domain_map,
+ .unmap = ioc3_irq_domain_unmap,
+};
+
+static void ioc3_irq_handler(struct irq_desc *desc)
+{
+ struct irq_domain *domain = irq_desc_get_handler_data(desc);
+ struct ioc3_priv_data *ipd = domain->host_data;
+ struct ioc3 __iomem *regs = ipd->regs;
+ u32 pending, mask;
+ unsigned int irq;
+
+ pending = readl(&regs->sio_ir);
+ mask = readl(&regs->sio_ies);
+ pending &= mask; /* Mask off not enabled interrupts */
+
+ if (pending) {
+ irq = irq_find_mapping(domain, __ffs(pending));
+ if (irq)
+ generic_handle_irq(irq);
+ } else {
+ spurious_interrupt();
+ }
+}
+
+/*
+ * System boards/BaseIOs use more interrupt pins of the bridge ASIC
+ * to which the IOC3 is connected. Since the IOC3 MFD driver
+ * knows wiring of these extra pins, we use the map_irq function
+ * to get interrupts activated
+ */
+static int ioc3_map_irq(struct pci_dev *pdev, int slot, int pin)
+{
+ struct pci_host_bridge *hbrg = pci_find_host_bridge(pdev->bus);
+
+ return hbrg->map_irq(pdev, slot, pin);
+}
+
+static int ioc3_irq_domain_setup(struct ioc3_priv_data *ipd, int irq)
+{
+ struct irq_domain *domain;
+ struct fwnode_handle *fn;
+
+ fn = irq_domain_alloc_named_fwnode("IOC3");
+ if (!fn)
+ goto err;
+
+ domain = irq_domain_create_linear(fn, 24, &ioc3_irq_domain_ops, ipd);
+ if (!domain)
+ goto err;
+
+ irq_domain_free_fwnode(fn);
+ ipd->domain = domain;
+
+ irq_set_chained_handler_and_data(irq, ioc3_irq_handler, domain);
+ ipd->domain_irq = irq;
+ return 0;
+
+err:
+ dev_err(&ipd->pdev->dev, "irq domain setup failed\n");
+ return -ENOMEM;
+}
+
+static struct resource ioc3_uarta_resources[] = {
+ DEFINE_RES_MEM(offsetof(struct ioc3, sregs.uarta),
+ sizeof_field(struct ioc3, sregs.uarta)),
+ DEFINE_RES_IRQ(IOC3_IRQ_SERIAL_A)
+};
+
+static struct resource ioc3_uartb_resources[] = {
+ DEFINE_RES_MEM(offsetof(struct ioc3, sregs.uartb),
+ sizeof_field(struct ioc3, sregs.uartb)),
+ DEFINE_RES_IRQ(IOC3_IRQ_SERIAL_B)
+};
+
+static struct mfd_cell ioc3_serial_cells[] = {
+ {
+ .name = "ioc3-serial8250",
+ .resources = ioc3_uarta_resources,
+ .num_resources = ARRAY_SIZE(ioc3_uarta_resources),
+ },
+ {
+ .name = "ioc3-serial8250",
+ .resources = ioc3_uartb_resources,
+ .num_resources = ARRAY_SIZE(ioc3_uartb_resources),
+ }
+};
+
+static int ioc3_serial_setup(struct ioc3_priv_data *ipd)
+{
+ int ret;
+
+ /* Set gpio pins for RS232/RS422 mode selection */
+ writel(GPCR_UARTA_MODESEL | GPCR_UARTB_MODESEL,
+ &ipd->regs->gpcr_s);
+ /* Select RS232 mode for uart a */
+ writel(0, &ipd->regs->gppr[6]);
+ /* Select RS232 mode for uart b */
+ writel(0, &ipd->regs->gppr[7]);
+
+ /* Switch both ports to 16650 mode */
+ writel(readl(&ipd->regs->port_a.sscr) & ~SSCR_DMA_EN,
+ &ipd->regs->port_a.sscr);
+ writel(readl(&ipd->regs->port_b.sscr) & ~SSCR_DMA_EN,
+ &ipd->regs->port_b.sscr);
+ udelay(1000); /* Wait until mode switch is done */
+
+ ret = mfd_add_devices(&ipd->pdev->dev, PLATFORM_DEVID_AUTO,
+ ioc3_serial_cells, ARRAY_SIZE(ioc3_serial_cells),
+ &ipd->pdev->resource[0], 0, ipd->domain);
+ if (ret) {
+ dev_err(&ipd->pdev->dev, "Failed to add 16550 subdevs\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct resource ioc3_kbd_resources[] = {
+ DEFINE_RES_MEM(offsetof(struct ioc3, serio),
+ sizeof_field(struct ioc3, serio)),
+ DEFINE_RES_IRQ(IOC3_IRQ_KBD)
+};
+
+static struct mfd_cell ioc3_kbd_cells[] = {
+ {
+ .name = "ioc3-kbd",
+ .resources = ioc3_kbd_resources,
+ .num_resources = ARRAY_SIZE(ioc3_kbd_resources),
+ }
+};
+
+static int ioc3_kbd_setup(struct ioc3_priv_data *ipd)
+{
+ int ret;
+
+ ret = mfd_add_devices(&ipd->pdev->dev, PLATFORM_DEVID_AUTO,
+ ioc3_kbd_cells, ARRAY_SIZE(ioc3_kbd_cells),
+ &ipd->pdev->resource[0], 0, ipd->domain);
+ if (ret) {
+ dev_err(&ipd->pdev->dev, "Failed to add 16550 subdevs\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct resource ioc3_eth_resources[] = {
+ DEFINE_RES_MEM(offsetof(struct ioc3, eth),
+ sizeof_field(struct ioc3, eth)),
+ DEFINE_RES_MEM(offsetof(struct ioc3, ssram),
+ sizeof_field(struct ioc3, ssram)),
+ DEFINE_RES_IRQ(0)
+};
+
+static struct resource ioc3_w1_resources[] = {
+ DEFINE_RES_MEM(offsetof(struct ioc3, mcr),
+ sizeof_field(struct ioc3, mcr)),
+};
+static struct sgi_w1_platform_data ioc3_w1_platform_data;
+
+static struct mfd_cell ioc3_eth_cells[] = {
+ {
+ .name = "ioc3-eth",
+ .resources = ioc3_eth_resources,
+ .num_resources = ARRAY_SIZE(ioc3_eth_resources),
+ },
+ {
+ .name = "sgi_w1",
+ .resources = ioc3_w1_resources,
+ .num_resources = ARRAY_SIZE(ioc3_w1_resources),
+ .platform_data = &ioc3_w1_platform_data,
+ .pdata_size = sizeof(ioc3_w1_platform_data),
+ }
+};
+
+static int ioc3_eth_setup(struct ioc3_priv_data *ipd)
+{
+ int ret;
+
+ /* Enable One-Wire bus */
+ writel(GPCR_MLAN_EN, &ipd->regs->gpcr_s);
+
+ /* Generate unique identifier */
+ snprintf(ioc3_w1_platform_data.dev_id,
+ sizeof(ioc3_w1_platform_data.dev_id), "ioc3-%012llx",
+ ipd->pdev->resource->start);
+
+ ret = mfd_add_devices(&ipd->pdev->dev, PLATFORM_DEVID_AUTO,
+ ioc3_eth_cells, ARRAY_SIZE(ioc3_eth_cells),
+ &ipd->pdev->resource[0], ipd->pdev->irq, NULL);
+ if (ret) {
+ dev_err(&ipd->pdev->dev, "Failed to add ETH/W1 subdev\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct resource ioc3_m48t35_resources[] = {
+ DEFINE_RES_MEM(IOC3_BYTEBUS_DEV0, M48T35_REG_SIZE)
+};
+
+static struct mfd_cell ioc3_m48t35_cells[] = {
+ {
+ .name = "rtc-m48t35",
+ .resources = ioc3_m48t35_resources,
+ .num_resources = ARRAY_SIZE(ioc3_m48t35_resources),
+ }
+};
+
+static int ioc3_m48t35_setup(struct ioc3_priv_data *ipd)
+{
+ int ret;
+
+ ret = mfd_add_devices(&ipd->pdev->dev, PLATFORM_DEVID_AUTO,
+ ioc3_m48t35_cells, ARRAY_SIZE(ioc3_m48t35_cells),
+ &ipd->pdev->resource[0], 0, ipd->domain);
+ if (ret)
+ dev_err(&ipd->pdev->dev, "Failed to add M48T35 subdev\n");
+
+ return ret;
+}
+
+static struct ds1685_rtc_platform_data ip30_rtc_platform_data = {
+ .bcd_mode = false,
+ .no_irq = false,
+ .uie_unsupported = true,
+ .access_type = ds1685_reg_indirect,
+};
+
+static struct resource ioc3_rtc_ds1685_resources[] = {
+ DEFINE_RES_MEM(IOC3_BYTEBUS_DEV1, 1),
+ DEFINE_RES_MEM(IOC3_BYTEBUS_DEV2, 1),
+ DEFINE_RES_IRQ(0)
+};
+
+static struct mfd_cell ioc3_ds1685_cells[] = {
+ {
+ .name = "rtc-ds1685",
+ .resources = ioc3_rtc_ds1685_resources,
+ .num_resources = ARRAY_SIZE(ioc3_rtc_ds1685_resources),
+ .platform_data = &ip30_rtc_platform_data,
+ .pdata_size = sizeof(ip30_rtc_platform_data),
+ .id = PLATFORM_DEVID_NONE,
+ }
+};
+
+static int ioc3_ds1685_setup(struct ioc3_priv_data *ipd)
+{
+ int ret, irq;
+
+ irq = ioc3_map_irq(ipd->pdev, 6, 0);
+
+ ret = mfd_add_devices(&ipd->pdev->dev, 0, ioc3_ds1685_cells,
+ ARRAY_SIZE(ioc3_ds1685_cells),
+ &ipd->pdev->resource[0], irq, NULL);
+ if (ret)
+ dev_err(&ipd->pdev->dev, "Failed to add DS1685 subdev\n");
+
+ return ret;
+};
+
+
+static struct resource ioc3_leds_resources[] = {
+ DEFINE_RES_MEM(offsetof(struct ioc3, gppr[0]),
+ sizeof_field(struct ioc3, gppr[0])),
+ DEFINE_RES_MEM(offsetof(struct ioc3, gppr[1]),
+ sizeof_field(struct ioc3, gppr[1])),
+};
+
+static struct mfd_cell ioc3_led_cells[] = {
+ {
+ .name = "ip30-leds",
+ .resources = ioc3_leds_resources,
+ .num_resources = ARRAY_SIZE(ioc3_leds_resources),
+ .id = PLATFORM_DEVID_NONE,
+ }
+};
+
+static int ioc3_led_setup(struct ioc3_priv_data *ipd)
+{
+ int ret;
+
+ ret = mfd_add_devices(&ipd->pdev->dev, 0, ioc3_led_cells,
+ ARRAY_SIZE(ioc3_led_cells),
+ &ipd->pdev->resource[0], 0, ipd->domain);
+ if (ret)
+ dev_err(&ipd->pdev->dev, "Failed to add LED subdev\n");
+
+ return ret;
+}
+
+static int ip27_baseio_setup(struct ioc3_priv_data *ipd)
+{
+ int ret, io_irq;
+
+ io_irq = ioc3_map_irq(ipd->pdev, PCI_SLOT(ipd->pdev->devfn),
+ PCI_INTERRUPT_INTB);
+ ret = ioc3_irq_domain_setup(ipd, io_irq);
+ if (ret)
+ return ret;
+
+ ret = ioc3_eth_setup(ipd);
+ if (ret)
+ return ret;
+
+ ret = ioc3_serial_setup(ipd);
+ if (ret)
+ return ret;
+
+ return ioc3_m48t35_setup(ipd);
+}
+
+static int ip27_baseio6g_setup(struct ioc3_priv_data *ipd)
+{
+ int ret, io_irq;
+
+ io_irq = ioc3_map_irq(ipd->pdev, PCI_SLOT(ipd->pdev->devfn),
+ PCI_INTERRUPT_INTB);
+ ret = ioc3_irq_domain_setup(ipd, io_irq);
+ if (ret)
+ return ret;
+
+ ret = ioc3_eth_setup(ipd);
+ if (ret)
+ return ret;
+
+ ret = ioc3_serial_setup(ipd);
+ if (ret)
+ return ret;
+
+ ret = ioc3_m48t35_setup(ipd);
+ if (ret)
+ return ret;
+
+ return ioc3_kbd_setup(ipd);
+}
+
+static int ip27_mio_setup(struct ioc3_priv_data *ipd)
+{
+ int ret;
+
+ ret = ioc3_irq_domain_setup(ipd, ipd->pdev->irq);
+ if (ret)
+ return ret;
+
+ ret = ioc3_serial_setup(ipd);
+ if (ret)
+ return ret;
+
+ return ioc3_kbd_setup(ipd);
+}
+
+static int ip30_sysboard_setup(struct ioc3_priv_data *ipd)
+{
+ int ret, io_irq;
+
+ io_irq = ioc3_map_irq(ipd->pdev, PCI_SLOT(ipd->pdev->devfn),
+ PCI_INTERRUPT_INTB);
+ ret = ioc3_irq_domain_setup(ipd, io_irq);
+ if (ret)
+ return ret;
+
+ ret = ioc3_eth_setup(ipd);
+ if (ret)
+ return ret;
+
+ ret = ioc3_serial_setup(ipd);
+ if (ret)
+ return ret;
+
+ ret = ioc3_kbd_setup(ipd);
+ if (ret)
+ return ret;
+
+ ret = ioc3_ds1685_setup(ipd);
+ if (ret)
+ return ret;
+
+ return ioc3_led_setup(ipd);
+}
+
+static int ioc3_menet_setup(struct ioc3_priv_data *ipd)
+{
+ int ret, io_irq;
+
+ io_irq = ioc3_map_irq(ipd->pdev, PCI_SLOT(ipd->pdev->devfn),
+ PCI_INTERRUPT_INTB);
+ ret = ioc3_irq_domain_setup(ipd, io_irq);
+ if (ret)
+ return ret;
+
+ ret = ioc3_eth_setup(ipd);
+ if (ret)
+ return ret;
+
+ return ioc3_serial_setup(ipd);
+}
+
+static int ioc3_menet4_setup(struct ioc3_priv_data *ipd)
+{
+ return ioc3_eth_setup(ipd);
+}
+
+static int ioc3_cad_duo_setup(struct ioc3_priv_data *ipd)
+{
+ int ret, io_irq;
+
+ io_irq = ioc3_map_irq(ipd->pdev, PCI_SLOT(ipd->pdev->devfn),
+ PCI_INTERRUPT_INTB);
+ ret = ioc3_irq_domain_setup(ipd, io_irq);
+ if (ret)
+ return ret;
+
+ ret = ioc3_eth_setup(ipd);
+ if (ret)
+ return ret;
+
+ return ioc3_kbd_setup(ipd);
+}
+
+/* Helper macro for filling ioc3_info array */
+#define IOC3_SID(_name, _sid, _setup) \
+ { \
+ .name = _name, \
+ .sid = PCI_VENDOR_ID_SGI | (IOC3_SUBSYS_ ## _sid << 16), \
+ .setup = _setup, \
+ }
+
+static struct {
+ const char *name;
+ u32 sid;
+ int (*setup)(struct ioc3_priv_data *ipd);
+} ioc3_infos[] = {
+ IOC3_SID("IP27 BaseIO6G", IP27_BASEIO6G, &ip27_baseio6g_setup),
+ IOC3_SID("IP27 MIO", IP27_MIO, &ip27_mio_setup),
+ IOC3_SID("IP27 BaseIO", IP27_BASEIO, &ip27_baseio_setup),
+ IOC3_SID("IP29 System Board", IP29_SYSBOARD, &ip27_baseio6g_setup),
+ IOC3_SID("IP30 System Board", IP30_SYSBOARD, &ip30_sysboard_setup),
+ IOC3_SID("MENET", MENET, &ioc3_menet_setup),
+ IOC3_SID("MENET4", MENET4, &ioc3_menet4_setup)
+};
+#undef IOC3_SID
+
+static int ioc3_setup(struct ioc3_priv_data *ipd)
+{
+ u32 sid;
+ int i;
+
+ /* Clear IRQs */
+ writel(~0, &ipd->regs->sio_iec);
+ writel(~0, &ipd->regs->sio_ir);
+ writel(0, &ipd->regs->eth.eier);
+ writel(~0, &ipd->regs->eth.eisr);
+
+ /* Read subsystem vendor id and subsystem id */
+ pci_read_config_dword(ipd->pdev, PCI_SUBSYSTEM_VENDOR_ID, &sid);
+
+ for (i = 0; i < ARRAY_SIZE(ioc3_infos); i++)
+ if (sid == ioc3_infos[i].sid) {
+ pr_info("ioc3: %s\n", ioc3_infos[i].name);
+ return ioc3_infos[i].setup(ipd);
+ }
+
+ /* Treat everything not identified by PCI subid as CAD DUO */
+ pr_info("ioc3: CAD DUO\n");
+ return ioc3_cad_duo_setup(ipd);
+}
+
+static int ioc3_mfd_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_id)
+{
+ struct ioc3_priv_data *ipd;
+ struct ioc3 __iomem *regs;
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, IOC3_LATENCY);
+ pci_set_master(pdev);
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ pr_err("%s: No usable DMA configuration, aborting.\n",
+ pci_name(pdev));
+ goto out_disable_device;
+ }
+
+ /* Set up per-IOC3 data */
+ ipd = devm_kzalloc(&pdev->dev, sizeof(struct ioc3_priv_data),
+ GFP_KERNEL);
+ if (!ipd) {
+ ret = -ENOMEM;
+ goto out_disable_device;
+ }
+ ipd->pdev = pdev;
+
+ /*
+ * Map all IOC3 registers. These are shared between subdevices
+ * so the main IOC3 module manages them.
+ */
+ regs = pci_ioremap_bar(pdev, 0);
+ if (!regs) {
+ dev_warn(&pdev->dev, "ioc3: Unable to remap PCI BAR for %s.\n",
+ pci_name(pdev));
+ ret = -ENOMEM;
+ goto out_disable_device;
+ }
+ ipd->regs = regs;
+
+ /* Track PCI-device specific data */
+ pci_set_drvdata(pdev, ipd);
+
+ ret = ioc3_setup(ipd);
+ if (ret) {
+ /* Remove all already added MFD devices */
+ mfd_remove_devices(&ipd->pdev->dev);
+ if (ipd->domain) {
+ irq_domain_remove(ipd->domain);
+ free_irq(ipd->domain_irq, (void *)ipd);
+ }
+ pci_iounmap(pdev, regs);
+ goto out_disable_device;
+ }
+
+ return 0;
+
+out_disable_device:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static void ioc3_mfd_remove(struct pci_dev *pdev)
+{
+ struct ioc3_priv_data *ipd;
+
+ ipd = pci_get_drvdata(pdev);
+
+ /* Clear and disable all IRQs */
+ writel(~0, &ipd->regs->sio_iec);
+ writel(~0, &ipd->regs->sio_ir);
+
+ /* Release resources */
+ mfd_remove_devices(&ipd->pdev->dev);
+ if (ipd->domain) {
+ irq_domain_remove(ipd->domain);
+ free_irq(ipd->domain_irq, (void *)ipd);
+ }
+ pci_iounmap(pdev, ipd->regs);
+ pci_disable_device(pdev);
+}
+
+static struct pci_device_id ioc3_mfd_id_table[] = {
+ { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID },
+ { 0, },
+};
+MODULE_DEVICE_TABLE(pci, ioc3_mfd_id_table);
+
+static struct pci_driver ioc3_mfd_driver = {
+ .name = "IOC3",
+ .id_table = ioc3_mfd_id_table,
+ .probe = ioc3_mfd_probe,
+ .remove = ioc3_mfd_remove,
+};
+
+module_pci_driver(ioc3_mfd_driver);
+
+MODULE_AUTHOR("Thomas Bogendoerfer <tbogendoerfer@suse.de>");
+MODULE_DESCRIPTION("SGI IOC3 MFD driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/madera-core.c b/drivers/mfd/madera-core.c
index a8cfadc1fc01..7e0835cb062b 100644
--- a/drivers/mfd/madera-core.c
+++ b/drivers/mfd/madera-core.c
@@ -35,6 +35,9 @@
#define MADERA_32KZ_MCLK2 1
+#define MADERA_RESET_MIN_US 2000
+#define MADERA_RESET_MAX_US 3000
+
static const char * const madera_core_supplies[] = {
"AVDD",
"DBVDD1",
@@ -199,7 +202,7 @@ EXPORT_SYMBOL_GPL(madera_name_from_type);
#define MADERA_BOOT_POLL_INTERVAL_USEC 5000
#define MADERA_BOOT_POLL_TIMEOUT_USEC 25000
-static int madera_wait_for_boot(struct madera *madera)
+static int madera_wait_for_boot_noack(struct madera *madera)
{
ktime_t timeout;
unsigned int val = 0;
@@ -226,6 +229,13 @@ static int madera_wait_for_boot(struct madera *madera)
ret = -ETIMEDOUT;
}
+ return ret;
+}
+
+static int madera_wait_for_boot(struct madera *madera)
+{
+ int ret = madera_wait_for_boot_noack(madera);
+
/*
* BOOT_DONE defaults to unmasked on boot so we must ack it.
* Do this even after a timeout to avoid interrupt storms.
@@ -249,16 +259,13 @@ static int madera_soft_reset(struct madera *madera)
}
/* Allow time for internal clocks to startup after reset */
- usleep_range(1000, 2000);
+ usleep_range(MADERA_RESET_MIN_US, MADERA_RESET_MAX_US);
return 0;
}
static void madera_enable_hard_reset(struct madera *madera)
{
- if (!madera->pdata.reset)
- return;
-
/*
* There are many existing out-of-tree users of these codecs that we
* can't break so preserve the expected behaviour of setting the line
@@ -269,11 +276,9 @@ static void madera_enable_hard_reset(struct madera *madera)
static void madera_disable_hard_reset(struct madera *madera)
{
- if (!madera->pdata.reset)
- return;
-
gpiod_set_raw_value_cansleep(madera->pdata.reset, 1);
- usleep_range(1000, 2000);
+
+ usleep_range(MADERA_RESET_MIN_US, MADERA_RESET_MAX_US);
}
static int __maybe_unused madera_runtime_resume(struct device *dev)
@@ -292,6 +297,8 @@ static int __maybe_unused madera_runtime_resume(struct device *dev)
regcache_cache_only(madera->regmap, false);
regcache_cache_only(madera->regmap_32bit, false);
+ usleep_range(MADERA_RESET_MIN_US, MADERA_RESET_MAX_US);
+
ret = madera_wait_for_boot(madera);
if (ret)
goto err;
@@ -545,6 +552,12 @@ int madera_dev_init(struct madera *madera)
regcache_cache_only(madera->regmap, false);
regcache_cache_only(madera->regmap_32bit, false);
+ ret = madera_wait_for_boot_noack(madera);
+ if (ret) {
+ dev_err(madera->dev, "Device failed initial boot: %d\n", ret);
+ goto err_reset;
+ }
+
/*
* Now we can power up and verify that this is a chip we know about
* before we start doing any writes to its registers.
@@ -650,7 +663,7 @@ int madera_dev_init(struct madera *madera)
ret = madera_wait_for_boot(madera);
if (ret) {
- dev_err(madera->dev, "Device failed initial boot: %d\n", ret);
+ dev_err(madera->dev, "Failed to clear boot done: %d\n", ret);
goto err_reset;
}
diff --git a/drivers/mfd/rn5t618.c b/drivers/mfd/rn5t618.c
index da5cd9c92a59..ead2e79036a9 100644
--- a/drivers/mfd/rn5t618.c
+++ b/drivers/mfd/rn5t618.c
@@ -26,6 +26,7 @@ static bool rn5t618_volatile_reg(struct device *dev, unsigned int reg)
case RN5T618_WATCHDOGCNT:
case RN5T618_DCIRQ:
case RN5T618_ILIMDATAH ... RN5T618_AIN0DATAL:
+ case RN5T618_ADCCNT3:
case RN5T618_IR_ADC1 ... RN5T618_IR_ADC3:
case RN5T618_IR_GPR:
case RN5T618_IR_GPF:
diff --git a/drivers/mfd/rohm-bd70528.c b/drivers/mfd/rohm-bd70528.c
index ef6786fd3b00..5c44d3b77b3e 100644
--- a/drivers/mfd/rohm-bd70528.c
+++ b/drivers/mfd/rohm-bd70528.c
@@ -48,7 +48,7 @@ static struct mfd_cell bd70528_mfd_cells[] = {
* We use BD71837 driver to drive the clock block. Only differences to
* BD70528 clock gate are the register address and mask.
*/
- { .name = "bd718xx-clk", },
+ { .name = "bd70528-clk", },
{ .name = "bd70528-wdt", },
{
.name = "bd70528-power",
@@ -236,7 +236,6 @@ static int bd70528_i2c_probe(struct i2c_client *i2c,
dev_set_drvdata(&i2c->dev, &bd70528->chip);
- bd70528->chip.chip_type = ROHM_CHIP_TYPE_BD70528;
bd70528->chip.regmap = devm_regmap_init_i2c(i2c, &bd70528_regmap);
if (IS_ERR(bd70528->chip.regmap)) {
dev_err(&i2c->dev, "Failed to initialize Regmap\n");
diff --git a/drivers/mfd/rohm-bd71828.c b/drivers/mfd/rohm-bd71828.c
new file mode 100644
index 000000000000..210261d026f2
--- /dev/null
+++ b/drivers/mfd/rohm-bd71828.c
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (C) 2019 ROHM Semiconductors
+//
+// ROHM BD71828 PMIC driver
+
+#include <linux/gpio_keys.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/rohm-bd71828.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+static struct gpio_keys_button button = {
+ .code = KEY_POWER,
+ .gpio = -1,
+ .type = EV_KEY,
+};
+
+static struct gpio_keys_platform_data bd71828_powerkey_data = {
+ .buttons = &button,
+ .nbuttons = 1,
+ .name = "bd71828-pwrkey",
+};
+
+static const struct resource rtc_irqs[] = {
+ DEFINE_RES_IRQ_NAMED(BD71828_INT_RTC0, "bd71828-rtc-alm-0"),
+ DEFINE_RES_IRQ_NAMED(BD71828_INT_RTC1, "bd71828-rtc-alm-1"),
+ DEFINE_RES_IRQ_NAMED(BD71828_INT_RTC2, "bd71828-rtc-alm-2"),
+};
+
+static struct mfd_cell bd71828_mfd_cells[] = {
+ { .name = "bd71828-pmic", },
+ { .name = "bd71828-gpio", },
+ { .name = "bd71828-led", .of_compatible = "rohm,bd71828-leds" },
+ /*
+ * We use BD71837 driver to drive the clock block. Only differences to
+ * BD70528 clock gate are the register address and mask.
+ */
+ { .name = "bd71828-clk", },
+ { .name = "bd71827-power", },
+ {
+ .name = "bd71828-rtc",
+ .resources = rtc_irqs,
+ .num_resources = ARRAY_SIZE(rtc_irqs),
+ }, {
+ .name = "gpio-keys",
+ .platform_data = &bd71828_powerkey_data,
+ .pdata_size = sizeof(bd71828_powerkey_data),
+ },
+};
+
+static const struct regmap_range volatile_ranges[] = {
+ {
+ .range_min = BD71828_REG_PS_CTRL_1,
+ .range_max = BD71828_REG_PS_CTRL_1,
+ }, {
+ .range_min = BD71828_REG_PS_CTRL_3,
+ .range_max = BD71828_REG_PS_CTRL_3,
+ }, {
+ .range_min = BD71828_REG_RTC_SEC,
+ .range_max = BD71828_REG_RTC_YEAR,
+ }, {
+ /*
+ * For now make all charger registers volatile because many
+ * needs to be and because the charger block is not that
+ * performance critical.
+ */
+ .range_min = BD71828_REG_CHG_STATE,
+ .range_max = BD71828_REG_CHG_FULL,
+ }, {
+ .range_min = BD71828_REG_INT_MAIN,
+ .range_max = BD71828_REG_IO_STAT,
+ },
+};
+
+static const struct regmap_access_table volatile_regs = {
+ .yes_ranges = &volatile_ranges[0],
+ .n_yes_ranges = ARRAY_SIZE(volatile_ranges),
+};
+
+static struct regmap_config bd71828_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .volatile_table = &volatile_regs,
+ .max_register = BD71828_MAX_REGISTER,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+/*
+ * Mapping of main IRQ register bits to sub-IRQ register offsets so that we can
+ * access corect sub-IRQ registers based on bits that are set in main IRQ
+ * register.
+ */
+
+static unsigned int bit0_offsets[] = {11}; /* RTC IRQ */
+static unsigned int bit1_offsets[] = {10}; /* TEMP IRQ */
+static unsigned int bit2_offsets[] = {6, 7, 8, 9}; /* BAT MON IRQ */
+static unsigned int bit3_offsets[] = {5}; /* BAT IRQ */
+static unsigned int bit4_offsets[] = {4}; /* CHG IRQ */
+static unsigned int bit5_offsets[] = {3}; /* VSYS IRQ */
+static unsigned int bit6_offsets[] = {1, 2}; /* DCIN IRQ */
+static unsigned int bit7_offsets[] = {0}; /* BUCK IRQ */
+
+static struct regmap_irq_sub_irq_map bd71828_sub_irq_offsets[] = {
+ REGMAP_IRQ_MAIN_REG_OFFSET(bit0_offsets),
+ REGMAP_IRQ_MAIN_REG_OFFSET(bit1_offsets),
+ REGMAP_IRQ_MAIN_REG_OFFSET(bit2_offsets),
+ REGMAP_IRQ_MAIN_REG_OFFSET(bit3_offsets),
+ REGMAP_IRQ_MAIN_REG_OFFSET(bit4_offsets),
+ REGMAP_IRQ_MAIN_REG_OFFSET(bit5_offsets),
+ REGMAP_IRQ_MAIN_REG_OFFSET(bit6_offsets),
+ REGMAP_IRQ_MAIN_REG_OFFSET(bit7_offsets),
+};
+
+static struct regmap_irq bd71828_irqs[] = {
+ REGMAP_IRQ_REG(BD71828_INT_BUCK1_OCP, 0, BD71828_INT_BUCK1_OCP_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_BUCK2_OCP, 0, BD71828_INT_BUCK2_OCP_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_BUCK3_OCP, 0, BD71828_INT_BUCK3_OCP_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_BUCK4_OCP, 0, BD71828_INT_BUCK4_OCP_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_BUCK5_OCP, 0, BD71828_INT_BUCK5_OCP_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_BUCK6_OCP, 0, BD71828_INT_BUCK6_OCP_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_BUCK7_OCP, 0, BD71828_INT_BUCK7_OCP_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_PGFAULT, 0, BD71828_INT_PGFAULT_MASK),
+ /* DCIN1 interrupts */
+ REGMAP_IRQ_REG(BD71828_INT_DCIN_DET, 1, BD71828_INT_DCIN_DET_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_DCIN_RMV, 1, BD71828_INT_DCIN_RMV_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_CLPS_OUT, 1, BD71828_INT_CLPS_OUT_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_CLPS_IN, 1, BD71828_INT_CLPS_IN_MASK),
+ /* DCIN2 interrupts */
+ REGMAP_IRQ_REG(BD71828_INT_DCIN_MON_RES, 2,
+ BD71828_INT_DCIN_MON_RES_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_DCIN_MON_DET, 2,
+ BD71828_INT_DCIN_MON_DET_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_LONGPUSH, 2, BD71828_INT_LONGPUSH_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_MIDPUSH, 2, BD71828_INT_MIDPUSH_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_SHORTPUSH, 2, BD71828_INT_SHORTPUSH_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_PUSH, 2, BD71828_INT_PUSH_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_WDOG, 2, BD71828_INT_WDOG_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_SWRESET, 2, BD71828_INT_SWRESET_MASK),
+ /* Vsys */
+ REGMAP_IRQ_REG(BD71828_INT_VSYS_UV_RES, 3,
+ BD71828_INT_VSYS_UV_RES_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_VSYS_UV_DET, 3,
+ BD71828_INT_VSYS_UV_DET_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_VSYS_LOW_RES, 3,
+ BD71828_INT_VSYS_LOW_RES_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_VSYS_LOW_DET, 3,
+ BD71828_INT_VSYS_LOW_DET_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_VSYS_HALL_IN, 3,
+ BD71828_INT_VSYS_HALL_IN_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_VSYS_HALL_TOGGLE, 3,
+ BD71828_INT_VSYS_HALL_TOGGLE_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_VSYS_MON_RES, 3,
+ BD71828_INT_VSYS_MON_RES_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_VSYS_MON_DET, 3,
+ BD71828_INT_VSYS_MON_DET_MASK),
+ /* Charger */
+ REGMAP_IRQ_REG(BD71828_INT_CHG_DCIN_ILIM, 4,
+ BD71828_INT_CHG_DCIN_ILIM_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_CHG_TOPOFF_TO_DONE, 4,
+ BD71828_INT_CHG_TOPOFF_TO_DONE_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_CHG_WDG_TEMP, 4,
+ BD71828_INT_CHG_WDG_TEMP_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_CHG_WDG_TIME, 4,
+ BD71828_INT_CHG_WDG_TIME_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_CHG_RECHARGE_RES, 4,
+ BD71828_INT_CHG_RECHARGE_RES_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_CHG_RECHARGE_DET, 4,
+ BD71828_INT_CHG_RECHARGE_DET_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_CHG_RANGED_TEMP_TRANSITION, 4,
+ BD71828_INT_CHG_RANGED_TEMP_TRANSITION_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_CHG_STATE_TRANSITION, 4,
+ BD71828_INT_CHG_STATE_TRANSITION_MASK),
+ /* Battery */
+ REGMAP_IRQ_REG(BD71828_INT_BAT_TEMP_NORMAL, 5,
+ BD71828_INT_BAT_TEMP_NORMAL_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_BAT_TEMP_ERANGE, 5,
+ BD71828_INT_BAT_TEMP_ERANGE_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_BAT_TEMP_WARN, 5,
+ BD71828_INT_BAT_TEMP_WARN_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_BAT_REMOVED, 5,
+ BD71828_INT_BAT_REMOVED_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_BAT_DETECTED, 5,
+ BD71828_INT_BAT_DETECTED_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_THERM_REMOVED, 5,
+ BD71828_INT_THERM_REMOVED_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_THERM_DETECTED, 5,
+ BD71828_INT_THERM_DETECTED_MASK),
+ /* Battery Mon 1 */
+ REGMAP_IRQ_REG(BD71828_INT_BAT_DEAD, 6, BD71828_INT_BAT_DEAD_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_BAT_SHORTC_RES, 6,
+ BD71828_INT_BAT_SHORTC_RES_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_BAT_SHORTC_DET, 6,
+ BD71828_INT_BAT_SHORTC_DET_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_BAT_LOW_VOLT_RES, 6,
+ BD71828_INT_BAT_LOW_VOLT_RES_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_BAT_LOW_VOLT_DET, 6,
+ BD71828_INT_BAT_LOW_VOLT_DET_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_BAT_OVER_VOLT_RES, 6,
+ BD71828_INT_BAT_OVER_VOLT_RES_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_BAT_OVER_VOLT_DET, 6,
+ BD71828_INT_BAT_OVER_VOLT_DET_MASK),
+ /* Battery Mon 2 */
+ REGMAP_IRQ_REG(BD71828_INT_BAT_MON_RES, 7,
+ BD71828_INT_BAT_MON_RES_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_BAT_MON_DET, 7,
+ BD71828_INT_BAT_MON_DET_MASK),
+ /* Battery Mon 3 (Coulomb counter) */
+ REGMAP_IRQ_REG(BD71828_INT_BAT_CC_MON1, 8,
+ BD71828_INT_BAT_CC_MON1_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_BAT_CC_MON2, 8,
+ BD71828_INT_BAT_CC_MON2_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_BAT_CC_MON3, 8,
+ BD71828_INT_BAT_CC_MON3_MASK),
+ /* Battery Mon 4 */
+ REGMAP_IRQ_REG(BD71828_INT_BAT_OVER_CURR_1_RES, 9,
+ BD71828_INT_BAT_OVER_CURR_1_RES_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_BAT_OVER_CURR_1_DET, 9,
+ BD71828_INT_BAT_OVER_CURR_1_DET_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_BAT_OVER_CURR_2_RES, 9,
+ BD71828_INT_BAT_OVER_CURR_2_RES_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_BAT_OVER_CURR_2_DET, 9,
+ BD71828_INT_BAT_OVER_CURR_2_DET_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_BAT_OVER_CURR_3_RES, 9,
+ BD71828_INT_BAT_OVER_CURR_3_RES_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_BAT_OVER_CURR_3_DET, 9,
+ BD71828_INT_BAT_OVER_CURR_3_DET_MASK),
+ /* Temperature */
+ REGMAP_IRQ_REG(BD71828_INT_TEMP_BAT_LOW_RES, 10,
+ BD71828_INT_TEMP_BAT_LOW_RES_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_TEMP_BAT_LOW_DET, 10,
+ BD71828_INT_TEMP_BAT_LOW_DET_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_TEMP_BAT_HI_RES, 10,
+ BD71828_INT_TEMP_BAT_HI_RES_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_TEMP_BAT_HI_DET, 10,
+ BD71828_INT_TEMP_BAT_HI_DET_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_TEMP_CHIP_OVER_125_RES, 10,
+ BD71828_INT_TEMP_CHIP_OVER_125_RES_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_TEMP_CHIP_OVER_125_DET, 10,
+ BD71828_INT_TEMP_CHIP_OVER_125_DET_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_TEMP_CHIP_OVER_VF_DET, 10,
+ BD71828_INT_TEMP_CHIP_OVER_VF_DET_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_TEMP_CHIP_OVER_VF_RES, 10,
+ BD71828_INT_TEMP_CHIP_OVER_VF_RES_MASK),
+ /* RTC Alarm */
+ REGMAP_IRQ_REG(BD71828_INT_RTC0, 11, BD71828_INT_RTC0_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_RTC1, 11, BD71828_INT_RTC1_MASK),
+ REGMAP_IRQ_REG(BD71828_INT_RTC2, 11, BD71828_INT_RTC2_MASK),
+};
+
+static struct regmap_irq_chip bd71828_irq_chip = {
+ .name = "bd71828_irq",
+ .main_status = BD71828_REG_INT_MAIN,
+ .irqs = &bd71828_irqs[0],
+ .num_irqs = ARRAY_SIZE(bd71828_irqs),
+ .status_base = BD71828_REG_INT_BUCK,
+ .mask_base = BD71828_REG_INT_MASK_BUCK,
+ .ack_base = BD71828_REG_INT_BUCK,
+ .mask_invert = true,
+ .init_ack_masked = true,
+ .num_regs = 12,
+ .num_main_regs = 1,
+ .sub_reg_offsets = &bd71828_sub_irq_offsets[0],
+ .num_main_status_bits = 8,
+ .irq_reg_stride = 1,
+};
+
+static int bd71828_i2c_probe(struct i2c_client *i2c)
+{
+ struct rohm_regmap_dev *chip;
+ struct regmap_irq_chip_data *irq_data;
+ int ret;
+
+ if (!i2c->irq) {
+ dev_err(&i2c->dev, "No IRQ configured\n");
+ return -EINVAL;
+ }
+
+ chip = devm_kzalloc(&i2c->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ dev_set_drvdata(&i2c->dev, chip);
+
+ chip->regmap = devm_regmap_init_i2c(i2c, &bd71828_regmap);
+ if (IS_ERR(chip->regmap)) {
+ dev_err(&i2c->dev, "Failed to initialize Regmap\n");
+ return PTR_ERR(chip->regmap);
+ }
+
+ ret = devm_regmap_add_irq_chip(&i2c->dev, chip->regmap,
+ i2c->irq, IRQF_ONESHOT, 0,
+ &bd71828_irq_chip, &irq_data);
+ if (ret) {
+ dev_err(&i2c->dev, "Failed to add IRQ chip\n");
+ return ret;
+ }
+
+ dev_dbg(&i2c->dev, "Registered %d IRQs for chip\n",
+ bd71828_irq_chip.num_irqs);
+
+ ret = regmap_irq_get_virq(irq_data, BD71828_INT_SHORTPUSH);
+ if (ret < 0) {
+ dev_err(&i2c->dev, "Failed to get the power-key IRQ\n");
+ return ret;
+ }
+
+ button.irq = ret;
+
+ ret = devm_mfd_add_devices(&i2c->dev, PLATFORM_DEVID_AUTO,
+ bd71828_mfd_cells,
+ ARRAY_SIZE(bd71828_mfd_cells), NULL, 0,
+ regmap_irq_get_domain(irq_data));
+ if (ret)
+ dev_err(&i2c->dev, "Failed to create subdevices\n");
+
+ return ret;
+}
+
+static const struct of_device_id bd71828_of_match[] = {
+ { .compatible = "rohm,bd71828", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, bd71828_of_match);
+
+static struct i2c_driver bd71828_drv = {
+ .driver = {
+ .name = "rohm-bd71828",
+ .of_match_table = bd71828_of_match,
+ },
+ .probe_new = &bd71828_i2c_probe,
+};
+module_i2c_driver(bd71828_drv);
+
+MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
+MODULE_DESCRIPTION("ROHM BD71828 Power Management IC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/rohm-bd718x7.c b/drivers/mfd/rohm-bd718x7.c
index 85e7f5133365..c32c1b6c98fa 100644
--- a/drivers/mfd/rohm-bd718x7.c
+++ b/drivers/mfd/rohm-bd718x7.c
@@ -30,14 +30,24 @@ static struct gpio_keys_platform_data bd718xx_powerkey_data = {
.name = "bd718xx-pwrkey",
};
-static struct mfd_cell bd718xx_mfd_cells[] = {
+static struct mfd_cell bd71837_mfd_cells[] = {
{
.name = "gpio-keys",
.platform_data = &bd718xx_powerkey_data,
.pdata_size = sizeof(bd718xx_powerkey_data),
},
- { .name = "bd718xx-clk", },
- { .name = "bd718xx-pmic", },
+ { .name = "bd71837-clk", },
+ { .name = "bd71837-pmic", },
+};
+
+static struct mfd_cell bd71847_mfd_cells[] = {
+ {
+ .name = "gpio-keys",
+ .platform_data = &bd718xx_powerkey_data,
+ .pdata_size = sizeof(bd718xx_powerkey_data),
+ },
+ { .name = "bd71847-clk", },
+ { .name = "bd71847-pmic", },
};
static const struct regmap_irq bd718xx_irqs[] = {
@@ -124,6 +134,9 @@ static int bd718xx_i2c_probe(struct i2c_client *i2c,
{
struct bd718xx *bd718xx;
int ret;
+ unsigned int chip_type;
+ struct mfd_cell *mfd;
+ int cells;
if (!i2c->irq) {
dev_err(&i2c->dev, "No IRQ configured\n");
@@ -136,8 +149,21 @@ static int bd718xx_i2c_probe(struct i2c_client *i2c,
return -ENOMEM;
bd718xx->chip_irq = i2c->irq;
- bd718xx->chip.chip_type = (unsigned int)(uintptr_t)
- of_device_get_match_data(&i2c->dev);
+ chip_type = (unsigned int)(uintptr_t)
+ of_device_get_match_data(&i2c->dev);
+ switch (chip_type) {
+ case ROHM_CHIP_TYPE_BD71837:
+ mfd = bd71837_mfd_cells;
+ cells = ARRAY_SIZE(bd71837_mfd_cells);
+ break;
+ case ROHM_CHIP_TYPE_BD71847:
+ mfd = bd71847_mfd_cells;
+ cells = ARRAY_SIZE(bd71847_mfd_cells);
+ break;
+ default:
+ dev_err(&i2c->dev, "Unknown device type");
+ return -EINVAL;
+ }
bd718xx->chip.dev = &i2c->dev;
dev_set_drvdata(&i2c->dev, bd718xx);
@@ -170,8 +196,7 @@ static int bd718xx_i2c_probe(struct i2c_client *i2c,
button.irq = ret;
ret = devm_mfd_add_devices(bd718xx->chip.dev, PLATFORM_DEVID_AUTO,
- bd718xx_mfd_cells,
- ARRAY_SIZE(bd718xx_mfd_cells), NULL, 0,
+ mfd, cells, NULL, 0,
regmap_irq_get_domain(bd718xx->irq_data));
if (ret)
dev_err(&i2c->dev, "Failed to create subdevices\n");
@@ -188,6 +213,10 @@ static const struct of_device_id bd718xx_of_match[] = {
.compatible = "rohm,bd71847",
.data = (void *)ROHM_CHIP_TYPE_BD71847,
},
+ {
+ .compatible = "rohm,bd71850",
+ .data = (void *)ROHM_CHIP_TYPE_BD71847,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, bd718xx_of_match);
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index 154270f8d8d7..e49787e6bb93 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -1086,8 +1086,7 @@ static int sm501_register_gpio(struct sm501_devdata *sm)
iounmap(gpio->regs);
err_claimed:
- release_resource(gpio->regs_res);
- kfree(gpio->regs_res);
+ release_mem_region(iobase, 0x20);
return ret;
}
@@ -1095,6 +1094,7 @@ static int sm501_register_gpio(struct sm501_devdata *sm)
static void sm501_gpio_remove(struct sm501_devdata *sm)
{
struct sm501_gpio *gpio = &sm->gpio;
+ resource_size_t iobase = sm->io_res->start + SM501_GPIO;
if (!sm->gpio.registered)
return;
@@ -1103,8 +1103,7 @@ static void sm501_gpio_remove(struct sm501_devdata *sm)
gpiochip_remove(&gpio->high.gpio);
iounmap(gpio->regs);
- release_resource(gpio->regs_res);
- kfree(gpio->regs_res);
+ release_mem_region(iobase, 0x20);
}
static inline int sm501_gpio_isregistered(struct sm501_devdata *sm)
@@ -1427,8 +1426,7 @@ static int sm501_plat_probe(struct platform_device *dev)
return sm501_init_dev(sm);
err_claim:
- release_resource(sm->regs_claim);
- kfree(sm->regs_claim);
+ release_mem_region(sm->io_res->start, 0x100);
err_res:
kfree(sm);
err1:
@@ -1637,8 +1635,7 @@ static int sm501_pci_probe(struct pci_dev *dev,
return 0;
err4:
- release_resource(sm->regs_claim);
- kfree(sm->regs_claim);
+ release_mem_region(sm->io_res->start, 0x100);
err3:
pci_disable_device(dev);
err2:
@@ -1673,8 +1670,7 @@ static void sm501_pci_remove(struct pci_dev *dev)
sm501_dev_remove(sm);
iounmap(sm->regs);
- release_resource(sm->regs_claim);
- kfree(sm->regs_claim);
+ release_mem_region(sm->io_res->start, 0x100);
pci_disable_device(dev);
}
@@ -1686,8 +1682,7 @@ static int sm501_plat_remove(struct platform_device *dev)
sm501_dev_remove(sm);
iounmap(sm->regs);
- release_resource(sm->regs_claim);
- kfree(sm->regs_claim);
+ release_mem_region(sm->io_res->start, 0x100);
return 0;
}
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index e22197c832e8..3a97816d0cba 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -224,6 +224,35 @@ struct regmap *syscon_regmap_lookup_by_phandle(struct device_node *np,
}
EXPORT_SYMBOL_GPL(syscon_regmap_lookup_by_phandle);
+struct regmap *syscon_regmap_lookup_by_phandle_args(struct device_node *np,
+ const char *property,
+ int arg_count,
+ unsigned int *out_args)
+{
+ struct device_node *syscon_np;
+ struct of_phandle_args args;
+ struct regmap *regmap;
+ unsigned int index;
+ int rc;
+
+ rc = of_parse_phandle_with_fixed_args(np, property, arg_count,
+ 0, &args);
+ if (rc)
+ return ERR_PTR(rc);
+
+ syscon_np = args.np;
+ if (!syscon_np)
+ return ERR_PTR(-ENODEV);
+
+ regmap = syscon_node_to_regmap(syscon_np);
+ for (index = 0; index < arg_count; index++)
+ out_args[index] = args.args[index];
+ of_node_put(syscon_np);
+
+ return regmap;
+}
+EXPORT_SYMBOL_GPL(syscon_regmap_lookup_by_phandle_args);
+
static int syscon_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -245,7 +274,7 @@ static int syscon_probe(struct platform_device *pdev)
if (!base)
return -ENOMEM;
- syscon_config.max_register = res->end - res->start - 3;
+ syscon_config.max_register = resource_size(res) - 4;
if (pdata)
syscon_config.name = pdata->label;
syscon->regmap = devm_regmap_init_mmio(dev, base, &syscon_config);
diff --git a/drivers/mfd/tqmx86.c b/drivers/mfd/tqmx86.c
index 22d2f02d855c..b9f48e588d95 100644
--- a/drivers/mfd/tqmx86.c
+++ b/drivers/mfd/tqmx86.c
@@ -158,7 +158,7 @@ static int tqmx86_board_id_to_clk_rate(u8 board_id)
static int tqmx86_probe(struct platform_device *pdev)
{
- u8 board_id, rev, i2c_det, i2c_ien, io_ext_int_val;
+ u8 board_id, rev, i2c_det, io_ext_int_val;
struct device *dev = &pdev->dev;
u8 gpio_irq_cfg, readback;
const char *board_name;
@@ -196,7 +196,6 @@ static int tqmx86_probe(struct platform_device *pdev)
board_name, board_id, rev >> 4, rev & 0xf);
i2c_det = ioread8(io_base + TQMX86_REG_I2C_DETECT);
- i2c_ien = ioread8(io_base + TQMX86_REG_I2C_INT_EN);
if (gpio_irq_cfg) {
io_ext_int_val =
diff --git a/drivers/mfd/wcd934x.c b/drivers/mfd/wcd934x.c
new file mode 100644
index 000000000000..90341f3c6810
--- /dev/null
+++ b/drivers/mfd/wcd934x.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019, Linaro Limited
+
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/wcd934x/registers.h>
+#include <linux/mfd/wcd934x/wcd934x.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slimbus.h>
+
+static const struct mfd_cell wcd934x_devices[] = {
+ {
+ .name = "wcd934x-codec",
+ }, {
+ .name = "wcd934x-gpio",
+ .of_compatible = "qcom,wcd9340-gpio",
+ }, {
+ .name = "wcd934x-soundwire",
+ .of_compatible = "qcom,soundwire-v1.3.0",
+ },
+};
+
+static const struct regmap_irq wcd934x_irqs[] = {
+ [WCD934X_IRQ_SLIMBUS] = {
+ .reg_offset = 0,
+ .mask = BIT(0),
+ .type = {
+ .type_reg_offset = 0,
+ .types_supported = IRQ_TYPE_EDGE_BOTH,
+ .type_reg_mask = BIT(0),
+ .type_level_low_val = BIT(0),
+ .type_level_high_val = BIT(0),
+ .type_falling_val = 0,
+ .type_rising_val = 0,
+ },
+ },
+ [WCD934X_IRQ_SOUNDWIRE] = {
+ .reg_offset = 2,
+ .mask = BIT(4),
+ .type = {
+ .type_reg_offset = 2,
+ .types_supported = IRQ_TYPE_EDGE_BOTH,
+ .type_reg_mask = BIT(4),
+ .type_level_low_val = BIT(4),
+ .type_level_high_val = BIT(4),
+ .type_falling_val = 0,
+ .type_rising_val = 0,
+ },
+ },
+};
+
+static const struct regmap_irq_chip wcd934x_regmap_irq_chip = {
+ .name = "wcd934x_irq",
+ .status_base = WCD934X_INTR_PIN1_STATUS0,
+ .mask_base = WCD934X_INTR_PIN1_MASK0,
+ .ack_base = WCD934X_INTR_PIN1_CLEAR0,
+ .type_base = WCD934X_INTR_LEVEL0,
+ .num_type_reg = 4,
+ .type_in_mask = false,
+ .num_regs = 4,
+ .irqs = wcd934x_irqs,
+ .num_irqs = ARRAY_SIZE(wcd934x_irqs),
+};
+
+static bool wcd934x_is_volatile_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case WCD934X_INTR_PIN1_STATUS0...WCD934X_INTR_PIN2_CLEAR3:
+ case WCD934X_SWR_AHB_BRIDGE_RD_DATA_0:
+ case WCD934X_SWR_AHB_BRIDGE_RD_DATA_1:
+ case WCD934X_SWR_AHB_BRIDGE_RD_DATA_2:
+ case WCD934X_SWR_AHB_BRIDGE_RD_DATA_3:
+ case WCD934X_SWR_AHB_BRIDGE_ACCESS_STATUS:
+ case WCD934X_ANA_MBHC_RESULT_3:
+ case WCD934X_ANA_MBHC_RESULT_2:
+ case WCD934X_ANA_MBHC_RESULT_1:
+ case WCD934X_ANA_MBHC_MECH:
+ case WCD934X_ANA_MBHC_ELECT:
+ case WCD934X_ANA_MBHC_ZDET:
+ case WCD934X_ANA_MICB2:
+ case WCD934X_ANA_RCO:
+ case WCD934X_ANA_BIAS:
+ return true;
+ default:
+ return false;
+ }
+};
+
+static const struct regmap_range_cfg wcd934x_ranges[] = {
+ { .name = "WCD934X",
+ .range_min = 0x0,
+ .range_max = WCD934X_MAX_REGISTER,
+ .selector_reg = WCD934X_SEL_REGISTER,
+ .selector_mask = WCD934X_SEL_MASK,
+ .selector_shift = WCD934X_SEL_SHIFT,
+ .window_start = WCD934X_WINDOW_START,
+ .window_len = WCD934X_WINDOW_LENGTH,
+ },
+};
+
+static struct regmap_config wcd934x_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+ .max_register = 0xffff,
+ .can_multi_write = true,
+ .ranges = wcd934x_ranges,
+ .num_ranges = ARRAY_SIZE(wcd934x_ranges),
+ .volatile_reg = wcd934x_is_volatile_register,
+};
+
+static int wcd934x_bring_up(struct wcd934x_ddata *ddata)
+{
+ struct regmap *regmap = ddata->regmap;
+ u16 id_minor, id_major;
+ int ret;
+
+ ret = regmap_bulk_read(regmap, WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE0,
+ (u8 *)&id_minor, sizeof(u16));
+ if (ret)
+ return ret;
+
+ ret = regmap_bulk_read(regmap, WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE2,
+ (u8 *)&id_major, sizeof(u16));
+ if (ret)
+ return ret;
+
+ dev_info(ddata->dev, "WCD934x chip id major 0x%x, minor 0x%x\n",
+ id_major, id_minor);
+
+ regmap_write(regmap, WCD934X_CODEC_RPM_RST_CTL, 0x01);
+ regmap_write(regmap, WCD934X_SIDO_NEW_VOUT_A_STARTUP, 0x19);
+ regmap_write(regmap, WCD934X_SIDO_NEW_VOUT_D_STARTUP, 0x15);
+ /* Add 1msec delay for VOUT to settle */
+ usleep_range(1000, 1100);
+ regmap_write(regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x5);
+ regmap_write(regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x7);
+ regmap_write(regmap, WCD934X_CODEC_RPM_RST_CTL, 0x3);
+ regmap_write(regmap, WCD934X_CODEC_RPM_RST_CTL, 0x7);
+ regmap_write(regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x3);
+
+ return 0;
+}
+
+static int wcd934x_slim_status_up(struct slim_device *sdev)
+{
+ struct device *dev = &sdev->dev;
+ struct wcd934x_ddata *ddata;
+ int ret;
+
+ ddata = dev_get_drvdata(dev);
+
+ ddata->regmap = regmap_init_slimbus(sdev, &wcd934x_regmap_config);
+ if (IS_ERR(ddata->regmap)) {
+ dev_err(dev, "Error allocating slim regmap\n");
+ return PTR_ERR(ddata->regmap);
+ }
+
+ ret = wcd934x_bring_up(ddata);
+ if (ret) {
+ dev_err(dev, "Failed to bring up WCD934X: err = %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_regmap_add_irq_chip(dev, ddata->regmap, ddata->irq,
+ IRQF_TRIGGER_HIGH, 0,
+ &wcd934x_regmap_irq_chip,
+ &ddata->irq_data);
+ if (ret) {
+ dev_err(dev, "Failed to add IRQ chip: err = %d\n", ret);
+ return ret;
+ }
+
+ ret = mfd_add_devices(dev, PLATFORM_DEVID_AUTO, wcd934x_devices,
+ ARRAY_SIZE(wcd934x_devices), NULL, 0, NULL);
+ if (ret) {
+ dev_err(dev, "Failed to add child devices: err = %d\n",
+ ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int wcd934x_slim_status(struct slim_device *sdev,
+ enum slim_device_status status)
+{
+ switch (status) {
+ case SLIM_DEVICE_STATUS_UP:
+ return wcd934x_slim_status_up(sdev);
+ case SLIM_DEVICE_STATUS_DOWN:
+ mfd_remove_devices(&sdev->dev);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int wcd934x_slim_probe(struct slim_device *sdev)
+{
+ struct device *dev = &sdev->dev;
+ struct device_node *np = dev->of_node;
+ struct wcd934x_ddata *ddata;
+ int reset_gpio, ret;
+
+ ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ ddata->irq = of_irq_get(np, 0);
+ if (ddata->irq < 0) {
+ if (ddata->irq != -EPROBE_DEFER)
+ dev_err(ddata->dev, "Failed to get IRQ: err = %d\n",
+ ddata->irq);
+ return ddata->irq;
+ }
+
+ reset_gpio = of_get_named_gpio(np, "reset-gpios", 0);
+ if (reset_gpio < 0) {
+ dev_err(dev, "Failed to get reset gpio: err = %d\n",
+ reset_gpio);
+ return reset_gpio;
+ }
+
+ ddata->extclk = devm_clk_get(dev, "extclk");
+ if (IS_ERR(ddata->extclk)) {
+ dev_err(dev, "Failed to get extclk");
+ return PTR_ERR(ddata->extclk);
+ }
+
+ ddata->supplies[0].supply = "vdd-buck";
+ ddata->supplies[1].supply = "vdd-buck-sido";
+ ddata->supplies[2].supply = "vdd-tx";
+ ddata->supplies[3].supply = "vdd-rx";
+ ddata->supplies[4].supply = "vdd-io";
+
+ ret = regulator_bulk_get(dev, WCD934X_MAX_SUPPLY, ddata->supplies);
+ if (ret) {
+ dev_err(dev, "Failed to get supplies: err = %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_bulk_enable(WCD934X_MAX_SUPPLY, ddata->supplies);
+ if (ret) {
+ dev_err(dev, "Failed to enable supplies: err = %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * For WCD934X, it takes about 600us for the Vout_A and
+ * Vout_D to be ready after BUCK_SIDO is powered up.
+ * SYS_RST_N shouldn't be pulled high during this time
+ */
+ usleep_range(600, 650);
+ gpio_direction_output(reset_gpio, 0);
+ msleep(20);
+ gpio_set_value(reset_gpio, 1);
+ msleep(20);
+
+ ddata->dev = dev;
+ dev_set_drvdata(dev, ddata);
+
+ return 0;
+}
+
+static void wcd934x_slim_remove(struct slim_device *sdev)
+{
+ struct wcd934x_ddata *ddata = dev_get_drvdata(&sdev->dev);
+
+ regulator_bulk_disable(WCD934X_MAX_SUPPLY, ddata->supplies);
+ mfd_remove_devices(&sdev->dev);
+ kfree(ddata);
+}
+
+static const struct slim_device_id wcd934x_slim_id[] = {
+ { SLIM_MANF_ID_QCOM, SLIM_PROD_CODE_WCD9340,
+ SLIM_DEV_IDX_WCD9340, SLIM_DEV_INSTANCE_ID_WCD9340 },
+ {}
+};
+
+static struct slim_driver wcd934x_slim_driver = {
+ .driver = {
+ .name = "wcd934x-slim",
+ },
+ .probe = wcd934x_slim_probe,
+ .remove = wcd934x_slim_remove,
+ .device_status = wcd934x_slim_status,
+ .id_table = wcd934x_slim_id,
+};
+
+module_slim_driver(wcd934x_slim_driver);
+MODULE_DESCRIPTION("WCD934X slim driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("slim:217:250:*");
+MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org>");
diff --git a/drivers/misc/cardreader/alcor_pci.c b/drivers/misc/cardreader/alcor_pci.c
index 259fe1dfec03..cd402c89189e 100644
--- a/drivers/misc/cardreader/alcor_pci.c
+++ b/drivers/misc/cardreader/alcor_pci.c
@@ -38,12 +38,18 @@ static const struct alcor_dev_cfg au6621_cfg = {
.dma = 1,
};
+static const struct alcor_dev_cfg au6625_cfg = {
+ .dma = 0,
+};
+
static const struct pci_device_id pci_ids[] = {
{ PCI_DEVICE(PCI_ID_ALCOR_MICRO, PCI_ID_AU6601),
.driver_data = (kernel_ulong_t)&alcor_cfg },
{ PCI_DEVICE(PCI_ID_ALCOR_MICRO, PCI_ID_AU6621),
.driver_data = (kernel_ulong_t)&au6621_cfg },
- { },
+ { PCI_DEVICE(PCI_ID_ALCOR_MICRO, PCI_ID_AU6625),
+ .driver_data = (kernel_ulong_t)&au6625_cfg },
+ {},
};
MODULE_DEVICE_TABLE(pci, pci_ids);
diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c
index 32dcec2e9dfd..bc4967a6efa1 100644
--- a/drivers/misc/cardreader/rts5261.c
+++ b/drivers/misc/cardreader/rts5261.c
@@ -628,7 +628,8 @@ int rts5261_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
{
int err, clk;
- u8 n, clk_divider, mcu_cnt, div;
+ u16 n;
+ u8 clk_divider, mcu_cnt, div;
static const u8 depth[] = {
[RTSX_SSC_DEPTH_4M] = RTS5261_SSC_DEPTH_4M,
[RTSX_SSC_DEPTH_2M] = RTS5261_SSC_DEPTH_2M,
@@ -661,13 +662,13 @@ int rts5261_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
return 0;
if (pcr->ops->conv_clk_and_div_n)
- n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
+ n = pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
else
- n = (u8)(clk - 4);
+ n = clk - 4;
if ((clk <= 4) || (n > 396))
return -EINVAL;
- mcu_cnt = (u8)(125/clk + 3);
+ mcu_cnt = 125/clk + 3;
if (mcu_cnt > 15)
mcu_cnt = 15;
@@ -676,7 +677,7 @@ int rts5261_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
if (pcr->ops->conv_clk_and_div_n) {
int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
DIV_N_TO_CLK) * 2;
- n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
+ n = pcr->ops->conv_clk_and_div_n(dbl_clk,
CLK_TO_DIV_N);
} else {
n = (n + 4) * 2 - 4;
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index fd7b2167103d..06038b325b02 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -1512,7 +1512,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
bar = 1;
len = pci_resource_len(pcidev, bar);
base = pci_resource_start(pcidev, bar);
- pcr->remap_addr = ioremap_nocache(base, len);
+ pcr->remap_addr = ioremap(base, len);
if (!pcr->remap_addr) {
ret = -ENOMEM;
goto free_handle;
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index aed9c445d1e2..fb2eff69e449 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -352,7 +352,7 @@ void cxl_context_free(struct cxl_context *ctx)
void cxl_context_mm_count_get(struct cxl_context *ctx)
{
if (ctx->mm)
- atomic_inc(&ctx->mm->mm_count);
+ mmgrab(ctx->mm);
}
void cxl_context_mm_count_put(struct cxl_context *ctx)
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 0681d5fdd538..031eb64549af 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* at24.c - handle most I2C EEPROMs
*
@@ -6,23 +6,23 @@
* Copyright (C) 2008 Wolfram Sang, Pengutronix
*/
-#include <linux/kernel.h>
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
#include <linux/mutex.h>
-#include <linux/mod_devicetable.h>
-#include <linux/bitops.h>
-#include <linux/jiffies.h>
-#include <linux/property.h>
-#include <linux/acpi.h>
-#include <linux/i2c.h>
#include <linux/nvmem-provider.h>
-#include <linux/regmap.h>
+#include <linux/of_device.h>
#include <linux/pm_runtime.h>
-#include <linux/gpio/consumer.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
/* Address pointer is 16 bit. */
#define AT24_FLAG_ADDR16 BIT(7)
@@ -88,8 +88,7 @@ struct at24_data {
u8 flags;
struct nvmem_device *nvmem;
-
- struct gpio_desc *wp_gpio;
+ struct regulator *vcc_reg;
/*
* Some chips tie up multiple I2C addresses; dummy devices reserve
@@ -457,12 +456,10 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
* from this host, but not from other I2C masters.
*/
mutex_lock(&at24->lock);
- gpiod_set_value_cansleep(at24->wp_gpio, 0);
while (count) {
ret = at24_regmap_write(at24, buf, off, count);
if (ret < 0) {
- gpiod_set_value_cansleep(at24->wp_gpio, 1);
mutex_unlock(&at24->lock);
pm_runtime_put(dev);
return ret;
@@ -472,7 +469,6 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
count -= ret;
}
- gpiod_set_value_cansleep(at24->wp_gpio, 1);
mutex_unlock(&at24->lock);
pm_runtime_put(dev);
@@ -662,9 +658,9 @@ static int at24_probe(struct i2c_client *client)
at24->client[0].client = client;
at24->client[0].regmap = regmap;
- at24->wp_gpio = devm_gpiod_get_optional(dev, "wp", GPIOD_OUT_HIGH);
- if (IS_ERR(at24->wp_gpio))
- return PTR_ERR(at24->wp_gpio);
+ at24->vcc_reg = devm_regulator_get(dev, "vcc");
+ if (IS_ERR(at24->vcc_reg))
+ return PTR_ERR(at24->vcc_reg);
writable = !(flags & AT24_FLAG_READONLY);
if (writable) {
@@ -701,6 +697,12 @@ static int at24_probe(struct i2c_client *client)
i2c_set_clientdata(client, at24);
+ err = regulator_enable(at24->vcc_reg);
+ if (err) {
+ dev_err(dev, "Failed to enable vcc regulator\n");
+ return err;
+ }
+
/* enable runtime pm */
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
@@ -713,6 +715,7 @@ static int at24_probe(struct i2c_client *client)
pm_runtime_idle(dev);
if (err) {
pm_runtime_disable(dev);
+ regulator_disable(at24->vcc_reg);
return -ENODEV;
}
@@ -728,15 +731,42 @@ static int at24_probe(struct i2c_client *client)
static int at24_remove(struct i2c_client *client)
{
+ struct at24_data *at24 = i2c_get_clientdata(client);
+
pm_runtime_disable(&client->dev);
+ if (!pm_runtime_status_suspended(&client->dev))
+ regulator_disable(at24->vcc_reg);
pm_runtime_set_suspended(&client->dev);
return 0;
}
+static int __maybe_unused at24_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct at24_data *at24 = i2c_get_clientdata(client);
+
+ return regulator_disable(at24->vcc_reg);
+}
+
+static int __maybe_unused at24_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct at24_data *at24 = i2c_get_clientdata(client);
+
+ return regulator_enable(at24->vcc_reg);
+}
+
+static const struct dev_pm_ops at24_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(at24_suspend, at24_resume, NULL)
+};
+
static struct i2c_driver at24_driver = {
.driver = {
.name = "at24",
+ .pm = &at24_pm_ops,
.of_match_table = at24_of_match,
.acpi_match_table = ACPI_PTR(at24_acpi_ids),
},
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 6d27ccfe0680..3c2d405bc79b 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -406,10 +406,9 @@ int enclosure_remove_device(struct enclosure_device *edev, struct device *dev)
cdev = &edev->component[i];
if (cdev->dev == dev) {
enclosure_remove_links(cdev);
- device_del(&cdev->cdev);
put_device(dev);
cdev->dev = NULL;
- return device_add(&cdev->cdev);
+ return 0;
}
}
return -ENODEV;
diff --git a/drivers/misc/genwqe/card_ddcb.c b/drivers/misc/genwqe/card_ddcb.c
index 026c6ca24540..905106579935 100644
--- a/drivers/misc/genwqe/card_ddcb.c
+++ b/drivers/misc/genwqe/card_ddcb.c
@@ -1084,7 +1084,7 @@ static int setup_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue)
queue->ddcb_daddr);
queue->ddcb_vaddr = NULL;
queue->ddcb_daddr = 0ull;
- return -ENODEV;
+ return rc;
}
@@ -1179,7 +1179,7 @@ static irqreturn_t genwqe_vf_isr(int irq, void *dev_id)
*/
static int genwqe_card_thread(void *data)
{
- int should_stop = 0, rc = 0;
+ int should_stop = 0;
struct genwqe_dev *cd = (struct genwqe_dev *)data;
while (!kthread_should_stop()) {
@@ -1187,12 +1187,12 @@ static int genwqe_card_thread(void *data)
genwqe_check_ddcb_queue(cd, &cd->queue);
if (GENWQE_POLLING_ENABLED) {
- rc = wait_event_interruptible_timeout(
+ wait_event_interruptible_timeout(
cd->queue_waitq,
genwqe_ddcbs_in_flight(cd) ||
(should_stop = kthread_should_stop()), 1);
} else {
- rc = wait_event_interruptible_timeout(
+ wait_event_interruptible_timeout(
cd->queue_waitq,
genwqe_next_ddcb_ready(cd) ||
(should_stop = kthread_should_stop()), HZ);
diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c
index 8850f475a413..0bf08678431b 100644
--- a/drivers/misc/habanalabs/command_submission.c
+++ b/drivers/misc/habanalabs/command_submission.c
@@ -824,8 +824,9 @@ int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
memset(args, 0, sizeof(*args));
if (rc < 0) {
- dev_err(hdev->dev, "Error %ld on waiting for CS handle %llu\n",
- rc, seq);
+ dev_err_ratelimited(hdev->dev,
+ "Error %ld on waiting for CS handle %llu\n",
+ rc, seq);
if (rc == -ERESTARTSYS) {
args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED;
rc = -EINTR;
diff --git a/drivers/misc/habanalabs/context.c b/drivers/misc/habanalabs/context.c
index 17db7b3dfb4c..2df6fb87e7ff 100644
--- a/drivers/misc/habanalabs/context.c
+++ b/drivers/misc/habanalabs/context.c
@@ -176,7 +176,7 @@ struct dma_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
spin_lock(&ctx->cs_lock);
if (seq >= ctx->cs_sequence) {
- dev_notice(hdev->dev,
+ dev_notice_ratelimited(hdev->dev,
"Can't wait on seq %llu because current CS is at seq %llu\n",
seq, ctx->cs_sequence);
spin_unlock(&ctx->cs_lock);
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index c8d16aa4382c..7344e8a222ae 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -2192,7 +2192,7 @@ static int goya_push_linux_to_device(struct hl_device *hdev)
static int goya_pldm_init_cpu(struct hl_device *hdev)
{
- u32 val, unit_rst_val;
+ u32 unit_rst_val;
int rc;
/* Must initialize SRAM scrambler before pushing u-boot to SRAM */
@@ -2200,14 +2200,14 @@ static int goya_pldm_init_cpu(struct hl_device *hdev)
/* Put ARM cores into reset */
WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL, CPU_RESET_ASSERT);
- val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
+ RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
/* Reset the CA53 MACRO */
unit_rst_val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, CA53_RESET);
- val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
+ RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, unit_rst_val);
- val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
+ RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
rc = goya_push_uboot_to_device(hdev);
if (rc)
@@ -2228,7 +2228,7 @@ static int goya_pldm_init_cpu(struct hl_device *hdev)
/* Release ARM core 0 from reset */
WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL,
CPU_RESET_CORE0_DEASSERT);
- val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
+ RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
return 0;
}
@@ -2502,13 +2502,12 @@ err:
static int goya_hw_init(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- u32 val;
int rc;
dev_info(hdev->dev, "Starting initialization of H/W\n");
/* Perform read from the device to make sure device is up */
- val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+ RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
/*
* Let's mark in the H/W that we have reached this point. We check
@@ -2560,7 +2559,7 @@ static int goya_hw_init(struct hl_device *hdev)
goto disable_queues;
/* Perform read from the device to flush all MSI-X configuration */
- val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+ RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
return 0;
diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c
index b6125620eb8f..fc5ff2805b94 100644
--- a/drivers/misc/isl29020.c
+++ b/drivers/misc/isl29020.c
@@ -173,6 +173,7 @@ static int isl29020_probe(struct i2c_client *client,
static int isl29020_remove(struct i2c_client *client)
{
+ pm_runtime_disable(&client->dev);
sysfs_remove_group(&client->dev.kobj, &m_als_gr);
return 0;
}
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index a4fdad04809a..de87693cf557 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -278,7 +278,7 @@ void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
void lkdtm_UNSET_SMEP(void)
{
-#ifdef CONFIG_X86_64
+#if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML)
#define MOV_CR4_DEPTH 64
void (*direct_write_cr4)(unsigned long val);
unsigned char *insn;
@@ -338,13 +338,13 @@ void lkdtm_UNSET_SMEP(void)
native_write_cr4(cr4);
}
#else
- pr_err("FAIL: this test is x86_64-only\n");
+ pr_err("XFAIL: this test is x86_64-only\n");
#endif
}
-#ifdef CONFIG_X86_32
void lkdtm_DOUBLE_FAULT(void)
{
+#ifdef CONFIG_X86_32
/*
* Trigger #DF by setting the stack limit to zero. This clobbers
* a GDT TLS slot, which is okay because the current task will die
@@ -373,6 +373,8 @@ void lkdtm_DOUBLE_FAULT(void)
asm volatile ("movw %0, %%ss; addl $0, (%%esp)" ::
"r" ((unsigned short)(GDT_ENTRY_TLS_MIN << 3)));
- panic("tried to double fault but didn't die\n");
-}
+ pr_err("FAIL: tried to double fault but didn't die\n");
+#else
+ pr_err("XFAIL: this test is ia32-only\n");
#endif
+}
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index a0a495c95e3c..8d468e0a950a 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -765,7 +765,7 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *a,
struct mei_cl_device *cldev = to_mei_cl_device(dev);
const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
- return scnprintf(buf, PAGE_SIZE, "%pUl", uuid);
+ return sprintf(buf, "%pUl", uuid);
}
static DEVICE_ATTR_RO(uuid);
@@ -775,7 +775,7 @@ static ssize_t version_show(struct device *dev, struct device_attribute *a,
struct mei_cl_device *cldev = to_mei_cl_device(dev);
u8 version = mei_me_cl_ver(cldev->me_cl);
- return scnprintf(buf, PAGE_SIZE, "%02X", version);
+ return sprintf(buf, "%02X", version);
}
static DEVICE_ATTR_RO(version);
@@ -797,7 +797,7 @@ static ssize_t max_conn_show(struct device *dev, struct device_attribute *a,
struct mei_cl_device *cldev = to_mei_cl_device(dev);
u8 maxconn = mei_me_cl_max_conn(cldev->me_cl);
- return scnprintf(buf, PAGE_SIZE, "%d", maxconn);
+ return sprintf(buf, "%d", maxconn);
}
static DEVICE_ATTR_RO(max_conn);
@@ -807,7 +807,7 @@ static ssize_t fixed_show(struct device *dev, struct device_attribute *a,
struct mei_cl_device *cldev = to_mei_cl_device(dev);
u8 fixed = mei_me_cl_fixed(cldev->me_cl);
- return scnprintf(buf, PAGE_SIZE, "%d", fixed);
+ return sprintf(buf, "%d", fixed);
}
static DEVICE_ATTR_RO(fixed);
@@ -817,7 +817,7 @@ static ssize_t max_len_show(struct device *dev, struct device_attribute *a,
struct mei_cl_device *cldev = to_mei_cl_device(dev);
u32 maxlen = mei_me_cl_max_len(cldev->me_cl);
- return scnprintf(buf, PAGE_SIZE, "%u", maxlen);
+ return sprintf(buf, "%u", maxlen);
}
static DEVICE_ATTR_RO(max_len);
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c
index 93027fd96c71..4c596c646ac0 100644
--- a/drivers/misc/mei/hdcp/mei_hdcp.c
+++ b/drivers/misc/mei/hdcp/mei_hdcp.c
@@ -757,11 +757,38 @@ static const struct component_master_ops mei_component_master_ops = {
.unbind = mei_component_master_unbind,
};
+/**
+ * mei_hdcp_component_match - compare function for matching mei hdcp.
+ *
+ * The function checks if the driver is i915, the subcomponent is HDCP
+ * and the grand parent of hdcp and the parent of i915 are the same
+ * PCH device.
+ *
+ * @dev: master device
+ * @subcomponent: subcomponent to match (I915_COMPONENT_HDCP)
+ * @data: compare data (mei hdcp device)
+ *
+ * Return:
+ * * 1 - if components match
+ * * 0 - otherwise
+ */
static int mei_hdcp_component_match(struct device *dev, int subcomponent,
void *data)
{
- return !strcmp(dev->driver->name, "i915") &&
- subcomponent == I915_COMPONENT_HDCP;
+ struct device *base = data;
+
+ if (strcmp(dev->driver->name, "i915") ||
+ subcomponent != I915_COMPONENT_HDCP)
+ return 0;
+
+ base = base->parent;
+ if (!base)
+ return 0;
+
+ base = base->parent;
+ dev = dev->parent;
+
+ return (base && dev && dev == base);
}
static int mei_hdcp_probe(struct mei_cl_device *cldev,
@@ -785,7 +812,7 @@ static int mei_hdcp_probe(struct mei_cl_device *cldev,
master_match = NULL;
component_match_add_typed(&cldev->dev, &master_match,
- mei_hdcp_component_match, comp_master);
+ mei_hdcp_component_match, &cldev->dev);
if (IS_ERR_OR_NULL(master_match)) {
ret = -ENOMEM;
goto err_exit;
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 7cd67fb2365d..87a0201ba6b3 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -81,10 +81,16 @@
#define MEI_DEV_ID_CMP_LP 0x02e0 /* Comet Point LP */
#define MEI_DEV_ID_CMP_LP_3 0x02e4 /* Comet Point LP 3 (iTouch) */
+
#define MEI_DEV_ID_CMP_V 0xA3BA /* Comet Point Lake V */
+#define MEI_DEV_ID_CMP_H 0x06e0 /* Comet Lake H */
+#define MEI_DEV_ID_CMP_H_3 0x06e4 /* Comet Lake H 3 (iTouch) */
+
#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */
+#define MEI_DEV_ID_JSP_N 0x4DE0 /* Jasper Lake Point N */
+
#define MEI_DEV_ID_TGP_LP 0xA0E0 /* Tiger Lake Point LP */
#define MEI_DEV_ID_MCC 0x4B70 /* Mule Creek Canyon (EHL) */
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index c845b7e40f26..2711451b3d87 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -99,11 +99,15 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_V, MEI_ME_PCH12_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H, MEI_ME_PCH12_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H_3, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_JSP_N, MEI_ME_PCH15_CFG)},
+
{MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)},
diff --git a/drivers/misc/mic/card/mic_debugfs.c b/drivers/misc/mic/card/mic_debugfs.c
index 3ee3d2402634..b58608829b18 100644
--- a/drivers/misc/mic/card/mic_debugfs.c
+++ b/drivers/misc/mic/card/mic_debugfs.c
@@ -65,9 +65,6 @@ void __init mic_create_card_debug_dir(struct mic_driver *mdrv)
*/
void mic_delete_card_debug_dir(struct mic_driver *mdrv)
{
- if (!mdrv->dbg_dir)
- return;
-
debugfs_remove_recursive(mdrv->dbg_dir);
}
diff --git a/drivers/misc/mic/cosm/cosm_debugfs.c b/drivers/misc/mic/cosm/cosm_debugfs.c
index 2fc9f4bf7001..68a731fd86de 100644
--- a/drivers/misc/mic/cosm/cosm_debugfs.c
+++ b/drivers/misc/mic/cosm/cosm_debugfs.c
@@ -102,9 +102,6 @@ void cosm_create_debug_dir(struct cosm_device *cdev)
void cosm_delete_debug_dir(struct cosm_device *cdev)
{
- if (!cdev->dbg_dir)
- return;
-
debugfs_remove_recursive(cdev->dbg_dir);
}
diff --git a/drivers/misc/mic/host/mic_debugfs.c b/drivers/misc/mic/host/mic_debugfs.c
index 8a8e41677501..ab0db7a2ac8c 100644
--- a/drivers/misc/mic/host/mic_debugfs.c
+++ b/drivers/misc/mic/host/mic_debugfs.c
@@ -129,9 +129,6 @@ void mic_create_debug_dir(struct mic_device *mdev)
*/
void mic_delete_debug_dir(struct mic_device *mdev)
{
- if (!mdev->dbg_dir)
- return;
-
debugfs_remove_recursive(mdev->dbg_dir);
}
diff --git a/drivers/misc/mic/scif/scif_nodeqp.c b/drivers/misc/mic/scif/scif_nodeqp.c
index c25fd40f3bd0..fcd999f50d14 100644
--- a/drivers/misc/mic/scif/scif_nodeqp.c
+++ b/drivers/misc/mic/scif/scif_nodeqp.c
@@ -788,7 +788,7 @@ scif_node_add(struct scif_dev *scifdev, struct scifmsg *msg)
"failed to setup interrupts for %d\n", msg->src.node);
goto interrupt_setup_error;
}
- newdev->mmio.va = ioremap_nocache(msg->payload[1], sdev->mmio->len);
+ newdev->mmio.va = ioremap(msg->payload[1], sdev->mmio->len);
if (!newdev->mmio.va) {
dev_err(&scifdev->sdev->dev,
"failed to map mmio for %d\n", msg->src.node);
diff --git a/drivers/misc/ocxl/Kconfig b/drivers/misc/ocxl/Kconfig
index 1916fa65f2f2..2d2266c1439e 100644
--- a/drivers/misc/ocxl/Kconfig
+++ b/drivers/misc/ocxl/Kconfig
@@ -11,6 +11,7 @@ config OCXL
tristate "OpenCAPI coherent accelerator support"
depends on PPC_POWERNV && PCI && EEH
select OCXL_BASE
+ select HOTPLUG_PCI_POWERNV
default m
help
Select this option to enable the ocxl driver for Open
diff --git a/drivers/misc/ocxl/context.c b/drivers/misc/ocxl/context.c
index 994563a078eb..de8a66b9d76b 100644
--- a/drivers/misc/ocxl/context.c
+++ b/drivers/misc/ocxl/context.c
@@ -10,18 +10,17 @@ int ocxl_context_alloc(struct ocxl_context **context, struct ocxl_afu *afu,
int pasid;
struct ocxl_context *ctx;
- *context = kzalloc(sizeof(struct ocxl_context), GFP_KERNEL);
- if (!*context)
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
return -ENOMEM;
- ctx = *context;
-
ctx->afu = afu;
mutex_lock(&afu->contexts_lock);
pasid = idr_alloc(&afu->contexts_idr, ctx, afu->pasid_base,
afu->pasid_base + afu->pasid_max, GFP_KERNEL);
if (pasid < 0) {
mutex_unlock(&afu->contexts_lock);
+ kfree(ctx);
return pasid;
}
afu->pasid_count++;
@@ -43,6 +42,7 @@ int ocxl_context_alloc(struct ocxl_context **context, struct ocxl_afu *afu,
* duration of the life of the context
*/
ocxl_afu_get(afu);
+ *context = ctx;
return 0;
}
EXPORT_SYMBOL_GPL(ocxl_context_alloc);
diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c
index 2870c25da166..4d1b44de1492 100644
--- a/drivers/misc/ocxl/file.c
+++ b/drivers/misc/ocxl/file.c
@@ -18,18 +18,15 @@ static struct class *ocxl_class;
static struct mutex minors_idr_lock;
static struct idr minors_idr;
-static struct ocxl_file_info *find_file_info(dev_t devno)
+static struct ocxl_file_info *find_and_get_file_info(dev_t devno)
{
struct ocxl_file_info *info;
- /*
- * We don't declare an RCU critical section here, as our AFU
- * is protected by a reference counter on the device. By the time the
- * info reference is removed from the idr, the ref count of
- * the device is already at 0, so no user API will access that AFU and
- * this function can't return it.
- */
+ mutex_lock(&minors_idr_lock);
info = idr_find(&minors_idr, MINOR(devno));
+ if (info)
+ get_device(&info->dev);
+ mutex_unlock(&minors_idr_lock);
return info;
}
@@ -58,14 +55,16 @@ static int afu_open(struct inode *inode, struct file *file)
pr_debug("%s for device %x\n", __func__, inode->i_rdev);
- info = find_file_info(inode->i_rdev);
+ info = find_and_get_file_info(inode->i_rdev);
if (!info)
return -ENODEV;
rc = ocxl_context_alloc(&ctx, info->afu, inode->i_mapping);
- if (rc)
+ if (rc) {
+ put_device(&info->dev);
return rc;
-
+ }
+ put_device(&info->dev);
file->private_data = ctx;
return 0;
}
@@ -487,7 +486,6 @@ static void info_release(struct device *dev)
{
struct ocxl_file_info *info = container_of(dev, struct ocxl_file_info, dev);
- free_minor(info);
ocxl_afu_put(info->afu);
kfree(info);
}
@@ -577,6 +575,7 @@ void ocxl_file_unregister_afu(struct ocxl_afu *afu)
ocxl_file_make_invisible(info);
ocxl_sysfs_unregister_afu(info);
+ free_minor(info);
device_unregister(&info->dev);
}
diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c
index 359c5bab45ac..b7f510676cd6 100644
--- a/drivers/misc/pti.c
+++ b/drivers/misc/pti.c
@@ -792,7 +792,7 @@ static int pti_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
unsigned int a;
- int retval = -EINVAL;
+ int retval;
int pci_bar = 1;
dev_dbg(&pdev->dev, "%s %s(%d): PTI PCI ID %04x:%04x\n", __FILE__,
@@ -834,7 +834,7 @@ static int pti_pci_probe(struct pci_dev *pdev,
}
drv_data->aperture_base = drv_data->pti_addr+APERTURE_14;
drv_data->pti_ioaddr =
- ioremap_nocache((u32)drv_data->aperture_base,
+ ioremap((u32)drv_data->aperture_base,
APERTURE_LEN);
if (!drv_data->pti_ioaddr) {
retval = -ENOMEM;
@@ -910,7 +910,7 @@ static struct pci_driver pti_pci_driver = {
*/
static int __init pti_init(void)
{
- int retval = -EINVAL;
+ int retval;
/* First register module as tty device */
diff --git a/drivers/misc/pvpanic.c b/drivers/misc/pvpanic.c
index 95ff7c5a1dfb..a6e1a8983e1f 100644
--- a/drivers/misc/pvpanic.c
+++ b/drivers/misc/pvpanic.c
@@ -10,16 +10,16 @@
#include <linux/acpi.h>
#include <linux/kernel.h>
+#include <linux/kexec.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/types.h>
+#include <uapi/misc/pvpanic.h>
static void __iomem *base;
-#define PVPANIC_PANICKED (1 << 0)
-
MODULE_AUTHOR("Hu Tao <hutao@cn.fujitsu.com>");
MODULE_DESCRIPTION("pvpanic device driver");
MODULE_LICENSE("GPL");
@@ -34,7 +34,13 @@ static int
pvpanic_panic_notify(struct notifier_block *nb, unsigned long code,
void *unused)
{
- pvpanic_send_event(PVPANIC_PANICKED);
+ unsigned int event = PVPANIC_PANICKED;
+
+ if (kexec_crash_loaded())
+ event = PVPANIC_CRASH_LOADED;
+
+ pvpanic_send_event(event);
+
return NOTIFY_DONE;
}
diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
index 2817f4751306..97b8b38ab47d 100644
--- a/drivers/misc/sgi-gru/gruprocfs.c
+++ b/drivers/misc/sgi-gru/gruprocfs.c
@@ -255,28 +255,28 @@ static int options_open(struct inode *inode, struct file *file)
}
/* *INDENT-OFF* */
-static const struct file_operations statistics_fops = {
- .open = statistics_open,
- .read = seq_read,
- .write = statistics_write,
- .llseek = seq_lseek,
- .release = single_release,
+static const struct proc_ops statistics_proc_ops = {
+ .proc_open = statistics_open,
+ .proc_read = seq_read,
+ .proc_write = statistics_write,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
};
-static const struct file_operations mcs_statistics_fops = {
- .open = mcs_statistics_open,
- .read = seq_read,
- .write = mcs_statistics_write,
- .llseek = seq_lseek,
- .release = single_release,
+static const struct proc_ops mcs_statistics_proc_ops = {
+ .proc_open = mcs_statistics_open,
+ .proc_read = seq_read,
+ .proc_write = mcs_statistics_write,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
};
-static const struct file_operations options_fops = {
- .open = options_open,
- .read = seq_read,
- .write = options_write,
- .llseek = seq_lseek,
- .release = single_release,
+static const struct proc_ops options_proc_ops = {
+ .proc_open = options_open,
+ .proc_read = seq_read,
+ .proc_write = options_write,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
};
static struct proc_dir_entry *proc_gru __read_mostly;
@@ -286,11 +286,11 @@ int gru_proc_init(void)
proc_gru = proc_mkdir("sgi_uv/gru", NULL);
if (!proc_gru)
return -1;
- if (!proc_create("statistics", 0644, proc_gru, &statistics_fops))
+ if (!proc_create("statistics", 0644, proc_gru, &statistics_proc_ops))
goto err;
- if (!proc_create("mcs_statistics", 0644, proc_gru, &mcs_statistics_fops))
+ if (!proc_create("mcs_statistics", 0644, proc_gru, &mcs_statistics_proc_ops))
goto err;
- if (!proc_create("debug_options", 0644, proc_gru, &options_fops))
+ if (!proc_create("debug_options", 0644, proc_gru, &options_proc_ops))
goto err;
if (!proc_create_seq("cch_status", 0444, proc_gru, &cch_seq_ops))
goto err;
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index f7d610a22347..ada94e6a3c91 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -496,7 +496,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
* Deal with transmit timeouts coming from the network layer.
*/
static void
-xpnet_dev_tx_timeout(struct net_device *dev)
+xpnet_dev_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
dev->stats.tx_errors++;
}
diff --git a/drivers/misc/sram-exec.c b/drivers/misc/sram-exec.c
index d054e2842a5f..cb57ac6ab4c3 100644
--- a/drivers/misc/sram-exec.c
+++ b/drivers/misc/sram-exec.c
@@ -85,6 +85,7 @@ void *sram_exec_copy(struct gen_pool *pool, void *dst, void *src,
unsigned long base;
int pages;
void *dst_cpy;
+ int ret;
mutex_lock(&exec_pool_list_mutex);
list_for_each_entry(p, &exec_pool_list, list) {
@@ -104,16 +105,28 @@ void *sram_exec_copy(struct gen_pool *pool, void *dst, void *src,
mutex_lock(&part->lock);
- set_memory_nx((unsigned long)base, pages);
- set_memory_rw((unsigned long)base, pages);
+ ret = set_memory_nx((unsigned long)base, pages);
+ if (ret)
+ goto error_out;
+ ret = set_memory_rw((unsigned long)base, pages);
+ if (ret)
+ goto error_out;
dst_cpy = fncpy(dst, src, size);
- set_memory_ro((unsigned long)base, pages);
- set_memory_x((unsigned long)base, pages);
+ ret = set_memory_ro((unsigned long)base, pages);
+ if (ret)
+ goto error_out;
+ ret = set_memory_x((unsigned long)base, pages);
+ if (ret)
+ goto error_out;
mutex_unlock(&part->lock);
return dst_cpy;
+
+error_out:
+ mutex_unlock(&part->lock);
+ return NULL;
}
EXPORT_SYMBOL_GPL(sram_exec_copy);
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 2ae9948a91e1..14136d2cc8f9 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -736,8 +736,8 @@ static int st_tty_open(struct tty_struct *tty)
static void st_tty_close(struct tty_struct *tty)
{
- unsigned char i = ST_MAX_CHANNELS;
- unsigned long flags = 0;
+ unsigned char i;
+ unsigned long flags;
struct st_data_s *st_gdata = tty->disc_data;
pr_info("%s ", __func__);
diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c
index 09db397df287..6d71865c8042 100644
--- a/drivers/misc/tsl2550.c
+++ b/drivers/misc/tsl2550.c
@@ -148,16 +148,14 @@ static int tsl2550_calculate_lux(u8 ch0, u8 ch1)
u16 c0 = count_lut[ch0];
u16 c1 = count_lut[ch1];
- /*
- * Calculate ratio.
- * Note: the "128" is a scaling factor
- */
- u8 r = 128;
-
/* Avoid division by 0 and count 1 cannot be greater than count 0 */
if (c1 <= c0)
if (c0) {
- r = c1 * 128 / c0;
+ /*
+ * Calculate ratio.
+ * Note: the "128" is a scaling factor
+ */
+ u8 r = c1 * 128 / c0;
/* Calculate LUX */
lux = ((c0 - c1) * ratio_lut[r]) / 256;
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 5e6be1527571..b837e7eba5f7 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -17,6 +17,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c
index 11835969e982..71bbaa56bdb5 100644
--- a/drivers/misc/xilinx_sdfec.c
+++ b/drivers/misc/xilinx_sdfec.c
@@ -733,7 +733,7 @@ static int xsdfec_set_order(struct xsdfec_dev *xsdfec, void __user *arg)
enum xsdfec_order order;
int err;
- err = get_user(order, (enum xsdfec_order *)arg);
+ err = get_user(order, (enum xsdfec_order __user *)arg);
if (err)
return -EFAULT;
@@ -1025,25 +1025,25 @@ static long xsdfec_dev_compat_ioctl(struct file *file, unsigned int cmd,
}
#endif
-static unsigned int xsdfec_poll(struct file *file, poll_table *wait)
+static __poll_t xsdfec_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
struct xsdfec_dev *xsdfec;
xsdfec = container_of(file->private_data, struct xsdfec_dev, miscdev);
if (!xsdfec)
- return POLLNVAL | POLLHUP;
+ return EPOLLNVAL | EPOLLHUP;
poll_wait(file, &xsdfec->waitq, wait);
/* XSDFEC ISR detected an error */
spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
if (xsdfec->state_updated)
- mask |= POLLIN | POLLPRI;
+ mask |= EPOLLIN | EPOLLPRI;
if (xsdfec->stats_updated)
- mask |= POLLIN | POLLRDNORM;
+ mask |= EPOLLIN | EPOLLRDNORM;
spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
return mask;
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 95b41c0891d0..663d87924e5e 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1107,7 +1107,7 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
card->erase_arg == MMC_TRIM_ARG ?
INAND_CMD38_ARG_TRIM :
INAND_CMD38_ARG_ERASE,
- 0);
+ card->ext_csd.generic_cmd6_time);
}
if (!err)
err = mmc_erase(card, from, nr, card->erase_arg);
@@ -1149,7 +1149,7 @@ retry:
arg == MMC_SECURE_TRIM1_ARG ?
INAND_CMD38_ARG_SECTRIM1 :
INAND_CMD38_ARG_SECERASE,
- 0);
+ card->ext_csd.generic_cmd6_time);
if (err)
goto out_retry;
}
@@ -1167,7 +1167,7 @@ retry:
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
INAND_CMD38_ARG_EXT_CSD,
INAND_CMD38_ARG_SECTRIM2,
- 0);
+ card->ext_csd.generic_cmd6_time);
if (err)
goto out_retry;
}
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index abf8f5eb0a1c..aa54d359dab7 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -2330,7 +2330,13 @@ void mmc_rescan(struct work_struct *work)
}
for (i = 0; i < ARRAY_SIZE(freqs); i++) {
- if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
+ unsigned int freq = freqs[i];
+ if (freq > host->f_max) {
+ if (i + 1 < ARRAY_SIZE(freqs))
+ continue;
+ freq = host->f_max;
+ }
+ if (!mmc_rescan_try_freq(host, max(freq, host->f_min)))
break;
if (freqs[i] <= host->f_min)
break;
@@ -2344,7 +2350,7 @@ void mmc_rescan(struct work_struct *work)
void mmc_start_host(struct mmc_host *host)
{
- host->f_init = max(freqs[0], host->f_min);
+ host->f_init = max(min(freqs[0], host->f_max), host->f_min);
host->rescan_disable = 0;
host->ios.power_mode = MMC_POWER_UNDEFINED;
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 105b7a7c0251..c8768726d925 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -175,8 +175,6 @@ int mmc_of_parse(struct mmc_host *host)
struct device *dev = host->parent;
u32 bus_width, drv_type, cd_debounce_delay_ms;
int ret;
- bool cd_cap_invert, cd_gpio_invert = false;
- bool ro_cap_invert, ro_gpio_invert = false;
if (!dev || !dev_fwnode(dev))
return 0;
@@ -219,10 +217,12 @@ int mmc_of_parse(struct mmc_host *host)
*/
/* Parse Card Detection */
+
if (device_property_read_bool(dev, "non-removable")) {
host->caps |= MMC_CAP_NONREMOVABLE;
} else {
- cd_cap_invert = device_property_read_bool(dev, "cd-inverted");
+ if (device_property_read_bool(dev, "cd-inverted"))
+ host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
if (device_property_read_u32(dev, "cd-debounce-delay-ms",
&cd_debounce_delay_ms))
@@ -232,32 +232,19 @@ int mmc_of_parse(struct mmc_host *host)
host->caps |= MMC_CAP_NEEDS_POLL;
ret = mmc_gpiod_request_cd(host, "cd", 0, false,
- cd_debounce_delay_ms * 1000,
- &cd_gpio_invert);
+ cd_debounce_delay_ms * 1000);
if (!ret)
dev_info(host->parent, "Got CD GPIO\n");
else if (ret != -ENOENT && ret != -ENOSYS)
return ret;
-
- /*
- * There are two ways to flag that the CD line is inverted:
- * through the cd-inverted flag and by the GPIO line itself
- * being inverted from the GPIO subsystem. This is a leftover
- * from the times when the GPIO subsystem did not make it
- * possible to flag a line as inverted.
- *
- * If the capability on the host AND the GPIO line are
- * both inverted, the end result is that the CD line is
- * not inverted.
- */
- if (cd_cap_invert ^ cd_gpio_invert)
- host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
}
/* Parse Write Protection */
- ro_cap_invert = device_property_read_bool(dev, "wp-inverted");
- ret = mmc_gpiod_request_ro(host, "wp", 0, 0, &ro_gpio_invert);
+ if (device_property_read_bool(dev, "wp-inverted"))
+ host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+
+ ret = mmc_gpiod_request_ro(host, "wp", 0, 0);
if (!ret)
dev_info(host->parent, "Got WP GPIO\n");
else if (ret != -ENOENT && ret != -ENOSYS)
@@ -266,10 +253,6 @@ int mmc_of_parse(struct mmc_host *host)
if (device_property_read_bool(dev, "disable-wp"))
host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
- /* See the comment on CD inversion above */
- if (ro_cap_invert ^ ro_gpio_invert)
- host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
-
if (device_property_read_bool(dev, "cap-sd-highspeed"))
host->caps |= MMC_CAP_SD_HIGHSPEED;
if (device_property_read_bool(dev, "cap-mmc-highspeed"))
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 09113b9ad679..da425ee2d9bf 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -19,7 +19,9 @@
#include "host.h"
#include "mmc_ops.h"
-#define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
+#define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10min*/
+#define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */
+#define MMC_CACHE_FLUSH_TIMEOUT_MS (30 * 1000) /* 30s */
static const u8 tuning_blk_pattern_4bit[] = {
0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
@@ -458,10 +460,6 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
bool expired = false;
bool busy = false;
- /* We have an unspecified cmd timeout, use the fallback value. */
- if (!timeout_ms)
- timeout_ms = MMC_OPS_TIMEOUT_MS;
-
/*
* In cases when not allowed to poll by using CMD13 or because we aren't
* capable of polling by using ->card_busy(), then rely on waiting the
@@ -534,14 +532,19 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
mmc_retune_hold(host);
+ if (!timeout_ms) {
+ pr_warn("%s: unspecified timeout for CMD6 - use generic\n",
+ mmc_hostname(host));
+ timeout_ms = card->ext_csd.generic_cmd6_time;
+ }
+
/*
- * If the cmd timeout and the max_busy_timeout of the host are both
- * specified, let's validate them. A failure means we need to prevent
- * the host from doing hw busy detection, which is done by converting
- * to a R1 response instead of a R1B.
+ * If the max_busy_timeout of the host is specified, make sure it's
+ * enough to fit the used timeout_ms. In case it's not, let's instruct
+ * the host to avoid HW busy detection, by converting to a R1 response
+ * instead of a R1B.
*/
- if (timeout_ms && host->max_busy_timeout &&
- (timeout_ms > host->max_busy_timeout))
+ if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
use_r1b_resp = false;
cmd.opcode = MMC_SWITCH;
@@ -552,10 +555,6 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
cmd.flags = MMC_CMD_AC;
if (use_r1b_resp) {
cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
- /*
- * A busy_timeout of zero means the host can decide to use
- * whatever value it finds suitable.
- */
cmd.busy_timeout = timeout_ms;
} else {
cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
@@ -941,7 +940,7 @@ void mmc_run_bkops(struct mmc_card *card)
* urgent levels by using an asynchronous background task, when idle.
*/
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_BKOPS_START, 1, MMC_OPS_TIMEOUT_MS);
+ EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS);
if (err)
pr_warn("%s: Error %d starting bkops\n",
mmc_hostname(card->host), err);
@@ -961,7 +960,8 @@ int mmc_flush_cache(struct mmc_card *card)
(card->ext_csd.cache_size > 0) &&
(card->ext_csd.cache_ctrl & 1)) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_FLUSH_CACHE, 1, 0);
+ EXT_CSD_FLUSH_CACHE, 1,
+ MMC_CACHE_FLUSH_TIMEOUT_MS);
if (err)
pr_err("%s: cache flush error %d\n",
mmc_hostname(card->host), err);
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index da2596c5fa28..05e907451df9 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -19,7 +19,6 @@
struct mmc_gpio {
struct gpio_desc *ro_gpio;
struct gpio_desc *cd_gpio;
- bool override_cd_active_level;
irqreturn_t (*cd_gpio_isr)(int irq, void *dev_id);
char *ro_label;
char *cd_label;
@@ -80,13 +79,6 @@ int mmc_gpio_get_cd(struct mmc_host *host)
return -ENOSYS;
cansleep = gpiod_cansleep(ctx->cd_gpio);
- if (ctx->override_cd_active_level) {
- int value = cansleep ?
- gpiod_get_raw_value_cansleep(ctx->cd_gpio) :
- gpiod_get_raw_value(ctx->cd_gpio);
- return !value ^ !!(host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH);
- }
-
return cansleep ?
gpiod_get_value_cansleep(ctx->cd_gpio) :
gpiod_get_value(ctx->cd_gpio);
@@ -168,8 +160,6 @@ EXPORT_SYMBOL(mmc_gpio_set_cd_isr);
* @idx: index of the GPIO to obtain in the consumer
* @override_active_level: ignore %GPIO_ACTIVE_LOW flag
* @debounce: debounce time in microseconds
- * @gpio_invert: will return whether the GPIO line is inverted or not, set
- * to NULL to ignore
*
* Note that this must be called prior to mmc_add_host()
* otherwise the caller must also call mmc_gpiod_request_cd_irq().
@@ -178,7 +168,7 @@ EXPORT_SYMBOL(mmc_gpio_set_cd_isr);
*/
int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
unsigned int idx, bool override_active_level,
- unsigned int debounce, bool *gpio_invert)
+ unsigned int debounce)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
struct gpio_desc *desc;
@@ -194,10 +184,14 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
ctx->cd_debounce_delay_ms = debounce / 1000;
}
- if (gpio_invert)
- *gpio_invert = !gpiod_is_active_low(desc);
+ /* override forces default (active-low) polarity ... */
+ if (override_active_level && !gpiod_is_active_low(desc))
+ gpiod_toggle_active_low(desc);
+
+ /* ... or active-high */
+ if (host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH)
+ gpiod_toggle_active_low(desc);
- ctx->override_cd_active_level = override_active_level;
ctx->cd_gpio = desc;
return 0;
@@ -218,14 +212,11 @@ EXPORT_SYMBOL(mmc_can_gpio_cd);
* @con_id: function within the GPIO consumer
* @idx: index of the GPIO to obtain in the consumer
* @debounce: debounce time in microseconds
- * @gpio_invert: will return whether the GPIO line is inverted or not,
- * set to NULL to ignore
*
* Returns zero on success, else an error.
*/
int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
- unsigned int idx,
- unsigned int debounce, bool *gpio_invert)
+ unsigned int idx, unsigned int debounce)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
struct gpio_desc *desc;
@@ -241,8 +232,8 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
return ret;
}
- if (gpio_invert)
- *gpio_invert = !gpiod_is_active_low(desc);
+ if (host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH)
+ gpiod_toggle_active_low(desc);
ctx->ro_gpio = desc;
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index d06b2dfe3c95..3a5089f0332c 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -501,6 +501,7 @@ config MMC_SDHCI_MSM
depends on ARCH_QCOM || (ARM && COMPILE_TEST)
depends on MMC_SDHCI_PLTFM
select MMC_SDHCI_IO_ACCESSORS
+ select MMC_CQHCI
help
This selects the Secure Digital Host Controller Interface (SDHCI)
support present in Qualcomm SOCs. The controller supports
@@ -990,6 +991,7 @@ config MMC_SDHCI_BRCMSTB
tristate "Broadcom SDIO/SD/MMC support"
depends on ARCH_BRCMSTB || BMIPS_GENERIC
depends on MMC_SDHCI_PLTFM
+ select MMC_CQHCI
default y
help
This selects support for the SDIO/SD/MMC Host Controller on
@@ -1010,6 +1012,7 @@ config MMC_SDHCI_OMAP
depends on MMC_SDHCI_PLTFM && OF
select THERMAL
imply TI_SOC_THERMAL
+ select MMC_SDHCI_EXTERNAL_DMA if DMA_ENGINE
help
This selects the Secure Digital Host Controller Interface (SDHCI)
support present in TI's DRA7 SOCs. The controller supports
@@ -1040,3 +1043,6 @@ config MMC_OWL
help
This selects support for the SD/MMC Host Controller on
Actions Semi Owl SoCs.
+
+config MMC_SDHCI_EXTERNAL_DMA
+ bool
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 6f065bb5c55a..aeaaa5314924 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -2645,7 +2645,7 @@ static int atmci_runtime_resume(struct device *dev)
{
struct atmel_mci *host = dev_get_drvdata(dev);
- pinctrl_pm_select_default_state(dev);
+ pinctrl_select_default_state(dev);
return clk_prepare_enable(host->mck);
}
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index bc8aeb47a7b4..8823680ca42c 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -984,12 +984,9 @@ static int au1xmmc_probe(struct platform_device *pdev)
goto out2;
}
- r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!r) {
- dev_err(&pdev->dev, "no IRQ defined\n");
+ host->irq = platform_get_irq(pdev, 0);
+ if (host->irq < 0)
goto out3;
- }
- host->irq = r->start;
mmc->ops = &au1xmmc_ops;
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index 99f61fd2a658..c3d949847cbd 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -1393,7 +1393,17 @@ static int bcm2835_probe(struct platform_device *pdev)
host->dma_chan = NULL;
host->dma_desc = NULL;
- host->dma_chan_rxtx = dma_request_slave_channel(dev, "rx-tx");
+ host->dma_chan_rxtx = dma_request_chan(dev, "rx-tx");
+ if (IS_ERR(host->dma_chan_rxtx)) {
+ ret = PTR_ERR(host->dma_chan_rxtx);
+ host->dma_chan_rxtx = NULL;
+
+ if (ret == -EPROBE_DEFER)
+ goto err;
+
+ /* Ignore errors to fall back to PIO mode */
+ }
+
clk = devm_clk_get(dev, NULL);
if (IS_ERR(clk)) {
diff --git a/drivers/mmc/host/cavium-thunderx.c b/drivers/mmc/host/cavium-thunderx.c
index eee08d81b242..76013bbbcff3 100644
--- a/drivers/mmc/host/cavium-thunderx.c
+++ b/drivers/mmc/host/cavium-thunderx.c
@@ -76,8 +76,10 @@ static int thunder_mmc_probe(struct pci_dev *pdev,
return ret;
host->base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
- if (!host->base)
- return -EINVAL;
+ if (!host->base) {
+ ret = -EINVAL;
+ goto error;
+ }
/* On ThunderX these are identical */
host->dma_base = host->base;
@@ -86,12 +88,14 @@ static int thunder_mmc_probe(struct pci_dev *pdev,
host->reg_off_dma = 0x160;
host->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(host->clk))
- return PTR_ERR(host->clk);
+ if (IS_ERR(host->clk)) {
+ ret = PTR_ERR(host->clk);
+ goto error;
+ }
ret = clk_prepare_enable(host->clk);
if (ret)
- return ret;
+ goto error;
host->sys_freq = clk_get_rate(host->clk);
spin_lock_init(&host->irq_handler_lock);
@@ -157,6 +161,7 @@ error:
}
}
clk_disable_unprepare(host->clk);
+ pci_release_regions(pdev);
return ret;
}
@@ -175,6 +180,7 @@ static void thunder_mmc_remove(struct pci_dev *pdev)
writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
clk_disable_unprepare(host->clk);
+ pci_release_regions(pdev);
}
static const struct pci_device_id thunder_mmc_id_table[] = {
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index ebfaeb33bc8c..f01fecd75833 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -1174,13 +1174,13 @@ static int mmc_davinci_parse_pdata(struct mmc_host *mmc)
mmc->caps |= pdata->caps;
/* Register a cd gpio, if there is not one, enable polling */
- ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
if (ret == -EPROBE_DEFER)
return ret;
else if (ret)
mmc->caps |= MMC_CAP_NEEDS_POLL;
- ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL);
+ ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0);
if (ret == -EPROBE_DEFER)
return ret;
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index fc9d4d000f97..bc5278ab5707 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -833,12 +833,14 @@ static int dw_mci_edmac_init(struct dw_mci *host)
if (!host->dms)
return -ENOMEM;
- host->dms->ch = dma_request_slave_channel(host->dev, "rx-tx");
- if (!host->dms->ch) {
+ host->dms->ch = dma_request_chan(host->dev, "rx-tx");
+ if (IS_ERR(host->dms->ch)) {
+ int ret = PTR_ERR(host->dms->ch);
+
dev_err(host->dev, "Failed to get external DMA channel.\n");
kfree(host->dms);
host->dms = NULL;
- return -ENXIO;
+ return ret;
}
return 0;
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 78383f60a3dc..fbae87d1f017 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -1108,7 +1108,7 @@ static int jz4740_mmc_suspend(struct device *dev)
static int jz4740_mmc_resume(struct device *dev)
{
- return pinctrl_pm_select_default_state(dev);
+ return pinctrl_select_default_state(dev);
}
static SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend,
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index e712315c7e8d..35400cf2a2e4 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -161,7 +161,6 @@ struct meson_host {
bool dram_access_quirk;
struct pinctrl *pinctrl;
- struct pinctrl_state *pins_default;
struct pinctrl_state *pins_clk_gate;
unsigned int bounce_buf_size;
@@ -327,7 +326,7 @@ static void meson_mmc_clk_ungate(struct meson_host *host)
u32 cfg;
if (host->pins_clk_gate)
- pinctrl_select_state(host->pinctrl, host->pins_default);
+ pinctrl_select_default_state(host->dev);
/* Make sure the clock is not stopped in the controller */
cfg = readl(host->regs + SD_EMMC_CFG);
@@ -1101,13 +1100,6 @@ static int meson_mmc_probe(struct platform_device *pdev)
goto free_host;
}
- host->pins_default = pinctrl_lookup_state(host->pinctrl,
- PINCTRL_STATE_DEFAULT);
- if (IS_ERR(host->pins_default)) {
- ret = PTR_ERR(host->pins_default);
- goto free_host;
- }
-
host->pins_clk_gate = pinctrl_lookup_state(host->pinctrl,
"clk-gate");
if (IS_ERR(host->pins_clk_gate)) {
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
index ba9a63db73da..8b038e7b2cd3 100644
--- a/drivers/mmc/host/meson-mx-sdio.c
+++ b/drivers/mmc/host/meson-mx-sdio.c
@@ -638,7 +638,6 @@ static int meson_mx_mmc_probe(struct platform_device *pdev)
struct platform_device *slot_pdev;
struct mmc_host *mmc;
struct meson_mx_mmc_host *host;
- struct resource *res;
int ret, irq;
u32 conf;
@@ -663,8 +662,7 @@ static int meson_mx_mmc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->base = devm_ioremap_resource(host->controller_dev, res);
+ host->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->base)) {
ret = PTR_ERR(host->base);
goto error_free_mmc;
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 74c6cfbf9172..951f76dc1ddd 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1134,17 +1134,22 @@ static void mmc_spi_initsequence(struct mmc_spi_host *host)
* SPI protocol. Another is that when chipselect is released while
* the card returns BUSY status, the clock must issue several cycles
* with chipselect high before the card will stop driving its output.
+ *
+ * SPI_CS_HIGH means "asserted" here. In some cases like when using
+ * GPIOs for chip select, SPI_CS_HIGH is set but this will be logically
+ * inverted by gpiolib, so if we want to ascertain to drive it high
+ * we should toggle the default with an XOR as we do here.
*/
- host->spi->mode |= SPI_CS_HIGH;
+ host->spi->mode ^= SPI_CS_HIGH;
if (spi_setup(host->spi) != 0) {
/* Just warn; most cards work without it. */
dev_warn(&host->spi->dev,
"can't change chip-select polarity\n");
- host->spi->mode &= ~SPI_CS_HIGH;
+ host->spi->mode ^= SPI_CS_HIGH;
} else {
mmc_spi_readbytes(host, 18);
- host->spi->mode &= ~SPI_CS_HIGH;
+ host->spi->mode ^= SPI_CS_HIGH;
if (spi_setup(host->spi) != 0) {
/* Wot, we can't get the same setup we had before? */
dev_err(&host->spi->dev,
@@ -1421,7 +1426,7 @@ static int mmc_spi_probe(struct spi_device *spi)
* Index 0 is card detect
* Old boardfiles were specifying 1 ms as debounce
*/
- status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000, NULL);
+ status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000);
if (status == -EPROBE_DEFER)
goto fail_add_host;
if (!status) {
@@ -1436,7 +1441,7 @@ static int mmc_spi_probe(struct spi_device *spi)
mmc_detect_change(mmc, 0);
/* Index 1 is write protect/read only */
- status = mmc_gpiod_request_ro(mmc, NULL, 1, 0, NULL);
+ status = mmc_gpiod_request_ro(mmc, NULL, 1, 0);
if (status == -EPROBE_DEFER)
goto fail_add_host;
if (!status)
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 40e72c30ea84..e9ffce8d41ea 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -169,6 +169,8 @@ static struct variant_data variant_ux500 = {
.cmdreg_srsp = MCI_CPSM_RESPONSE,
.datalength_bits = 24,
.datactrl_blocksz = 11,
+ .datactrl_any_blocksz = true,
+ .dma_power_of_2 = true,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.st_clkdiv = true,
@@ -202,6 +204,8 @@ static struct variant_data variant_ux500v2 = {
.datactrl_mask_ddrmode = MCI_DPSM_ST_DDRMODE,
.datalength_bits = 24,
.datactrl_blocksz = 11,
+ .datactrl_any_blocksz = true,
+ .dma_power_of_2 = true,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.st_clkdiv = true,
@@ -261,6 +265,7 @@ static struct variant_data variant_stm32_sdmmc = {
.datacnt_useless = true,
.datalength_bits = 25,
.datactrl_blocksz = 14,
+ .datactrl_any_blocksz = true,
.stm32_idmabsize_mask = GENMASK(12, 5),
.busy_timeout = true,
.busy_detect = true,
@@ -284,6 +289,7 @@ static struct variant_data variant_qcom = {
.data_cmd_enable = MCI_CPSM_QCOM_DATCMD,
.datalength_bits = 24,
.datactrl_blocksz = 11,
+ .datactrl_any_blocksz = true,
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 208000000,
.explicit_mclk_control = true,
@@ -452,10 +458,11 @@ static void mmci_dma_setup(struct mmci_host *host)
static int mmci_validate_data(struct mmci_host *host,
struct mmc_data *data)
{
+ struct variant_data *variant = host->variant;
+
if (!data)
return 0;
-
- if (!is_power_of_2(data->blksz)) {
+ if (!is_power_of_2(data->blksz) && !variant->datactrl_any_blocksz) {
dev_err(mmc_dev(host->mmc),
"unsupported block size (%d bytes)\n", data->blksz);
return -EINVAL;
@@ -520,7 +527,9 @@ static int mmci_dma_start(struct mmci_host *host, unsigned int datactrl)
"Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
data->sg_len, data->blksz, data->blocks, data->flags);
- host->ops->dma_start(host, &datactrl);
+ ret = host->ops->dma_start(host, &datactrl);
+ if (ret)
+ return ret;
/* Trigger the DMA transfer */
mmci_write_datactrlreg(host, datactrl);
@@ -706,10 +715,20 @@ int mmci_dmae_setup(struct mmci_host *host)
host->dma_priv = dmae;
- dmae->rx_channel = dma_request_slave_channel(mmc_dev(host->mmc),
- "rx");
- dmae->tx_channel = dma_request_slave_channel(mmc_dev(host->mmc),
- "tx");
+ dmae->rx_channel = dma_request_chan(mmc_dev(host->mmc), "rx");
+ if (IS_ERR(dmae->rx_channel)) {
+ int ret = PTR_ERR(dmae->rx_channel);
+ dmae->rx_channel = NULL;
+ return ret;
+ }
+
+ dmae->tx_channel = dma_request_chan(mmc_dev(host->mmc), "tx");
+ if (IS_ERR(dmae->tx_channel)) {
+ if (PTR_ERR(dmae->tx_channel) == -EPROBE_DEFER)
+ dev_warn(mmc_dev(host->mmc),
+ "Deferred probe for TX channel ignored\n");
+ dmae->tx_channel = NULL;
+ }
/*
* If only an RX channel is specified, the driver will
@@ -888,6 +907,18 @@ static int _mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data,
if (data->blksz * data->blocks <= variant->fifosize)
return -EINVAL;
+ /*
+ * This is necessary to get SDIO working on the Ux500. We do not yet
+ * know if this is a bug in:
+ * - The Ux500 DMA controller (DMA40)
+ * - The MMCI DMA interface on the Ux500
+ * some power of two blocks (such as 64 bytes) are sent regularly
+ * during SDIO traffic and those work fine so for these we enable DMA
+ * transfers.
+ */
+ if (host->variant->dma_power_of_2 && !is_power_of_2(data->blksz))
+ return -EINVAL;
+
device = chan->device;
nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len,
mmc_get_dma_dir(data));
@@ -938,9 +969,14 @@ int mmci_dmae_prep_data(struct mmci_host *host,
int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl)
{
struct mmci_dmae_priv *dmae = host->dma_priv;
+ int ret;
host->dma_in_progress = true;
- dmaengine_submit(dmae->desc_current);
+ ret = dma_submit_error(dmaengine_submit(dmae->desc_current));
+ if (ret < 0) {
+ host->dma_in_progress = false;
+ return ret;
+ }
dma_async_issue_pending(dmae->cur);
*datactrl |= MCI_DPSM_DMAENABLE;
@@ -1321,6 +1357,7 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
} else if (host->variant->busy_timeout && busy_resp &&
status & MCI_DATATIMEOUT) {
cmd->error = -ETIMEDOUT;
+ host->irq_action = IRQ_WAKE_THREAD;
} else {
cmd->resp[0] = readl(base + MMCIRESPONSE0);
cmd->resp[1] = readl(base + MMCIRESPONSE1);
@@ -1339,7 +1376,10 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
return;
}
}
- mmci_request_end(host, host->mrq);
+
+ if (host->irq_action != IRQ_WAKE_THREAD)
+ mmci_request_end(host, host->mrq);
+
} else if (sbc) {
mmci_start_command(host, host->mrq->cmd, 0);
} else if (!host->variant->datactrl_first &&
@@ -1532,9 +1572,9 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
{
struct mmci_host *host = dev_id;
u32 status;
- int ret = 0;
spin_lock(&host->lock);
+ host->irq_action = IRQ_HANDLED;
do {
status = readl(host->base + MMCISTATUS);
@@ -1574,12 +1614,41 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
if (host->variant->busy_detect_flag)
status &= ~host->variant->busy_detect_flag;
- ret = 1;
} while (status);
spin_unlock(&host->lock);
- return IRQ_RETVAL(ret);
+ return host->irq_action;
+}
+
+/*
+ * mmci_irq_thread() - A threaded IRQ handler that manages a reset of the HW.
+ *
+ * A reset is needed for some variants, where a datatimeout for a R1B request
+ * causes the DPSM to stay busy (non-functional).
+ */
+static irqreturn_t mmci_irq_thread(int irq, void *dev_id)
+{
+ struct mmci_host *host = dev_id;
+ unsigned long flags;
+
+ if (host->rst) {
+ reset_control_assert(host->rst);
+ udelay(2);
+ reset_control_deassert(host->rst);
+ }
+
+ spin_lock_irqsave(&host->lock, flags);
+ writel(host->clk_reg, host->base + MMCICLOCK);
+ writel(host->pwr_reg, host->base + MMCIPOWER);
+ writel(MCI_IRQENABLE | host->variant->start_err,
+ host->base + MMCIMASK0);
+
+ host->irq_action = IRQ_HANDLED;
+ mmci_request_end(host, host->mrq);
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return host->irq_action;
}
static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -1704,7 +1773,7 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
pinctrl_select_state(host->pinctrl, host->pins_opendrain);
else
- pinctrl_select_state(host->pinctrl, host->pins_default);
+ pinctrl_select_default_state(mmc_dev(mmc));
}
/*
@@ -1877,14 +1946,6 @@ static int mmci_probe(struct amba_device *dev,
goto host_free;
}
- host->pins_default = pinctrl_lookup_state(host->pinctrl,
- PINCTRL_STATE_DEFAULT);
- if (IS_ERR(host->pins_default)) {
- dev_err(mmc_dev(mmc), "Can't select default pins\n");
- ret = PTR_ERR(host->pins_default);
- goto host_free;
- }
-
host->pins_opendrain = pinctrl_lookup_state(host->pinctrl,
MMCI_PINCTRL_STATE_OPENDRAIN);
if (IS_ERR(host->pins_opendrain)) {
@@ -2062,17 +2123,18 @@ static int mmci_probe(struct amba_device *dev,
* silently of these do not exist
*/
if (!np) {
- ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
if (ret == -EPROBE_DEFER)
goto clk_disable;
- ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL);
+ ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0);
if (ret == -EPROBE_DEFER)
goto clk_disable;
}
- ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED,
- DRIVER_NAME " (cmd)", host);
+ ret = devm_request_threaded_irq(&dev->dev, dev->irq[0], mmci_irq,
+ mmci_irq_thread, IRQF_SHARED,
+ DRIVER_NAME " (cmd)", host);
if (ret)
goto clk_disable;
@@ -2203,7 +2265,7 @@ static int mmci_runtime_resume(struct device *dev)
struct mmci_host *host = mmc_priv(mmc);
clk_prepare_enable(host->clk);
mmci_restore(host);
- pinctrl_pm_select_default_state(dev);
+ pinctrl_select_default_state(dev);
}
return 0;
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 158e1231aa23..ea6a0b5779d4 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -279,7 +279,11 @@ struct mmci_host;
* @stm32_clkdiv: true if using a STM32-specific clock divider algorithm
* @datactrl_mask_ddrmode: ddr mode mask in datactrl register.
* @datactrl_mask_sdio: SDIO enable mask in datactrl register
- * @datactrl_blksz: block size in power of two
+ * @datactrl_blocksz: block size in power of two
+ * @datactrl_any_blocksz: true if block any block sizes are accepted by
+ * hardware, such as with some SDIO traffic that send
+ * odd packets.
+ * @dma_power_of_2: DMA only works with blocks that are a power of 2.
* @datactrl_first: true if data must be setup before send command
* @datacnt_useless: true if you could not use datacnt register to read
* remaining data
@@ -326,6 +330,8 @@ struct variant_data {
unsigned int datactrl_mask_ddrmode;
unsigned int datactrl_mask_sdio;
unsigned int datactrl_blocksz;
+ u8 datactrl_any_blocksz:1;
+ u8 dma_power_of_2:1;
u8 datactrl_first:1;
u8 datacnt_useless:1;
u8 st_sdio:1;
@@ -404,7 +410,6 @@ struct mmci_host {
struct mmci_host_ops *ops;
struct variant_data *variant;
struct pinctrl *pinctrl;
- struct pinctrl_state *pins_default;
struct pinctrl_state *pins_opendrain;
u8 hw_designer;
@@ -412,6 +417,7 @@ struct mmci_host {
struct timer_list timer;
unsigned int oldstat;
+ u32 irq_action;
/* pio stuff */
struct sg_mapping_iter sg_miter;
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 189e42674d85..7726dcf48f2c 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -228,6 +228,7 @@
#define MSDC_PATCH_BIT_SPCPUSH (0x1 << 29) /* RW */
#define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */
+#define MSDC_PATCH_BIT1_CMDTA (0x7 << 3) /* RW */
#define MSDC_PATCH_BIT1_STOP_DLY (0xf << 8) /* RW */
#define MSDC_PATCH_BIT2_CFGRESP (0x1 << 15) /* RW */
@@ -1881,6 +1882,7 @@ static int hs400_tune_response(struct mmc_host *mmc, u32 opcode)
/* select EMMC50 PAD CMD tune */
sdr_set_bits(host->base + PAD_CMD_TUNE, BIT(0));
+ sdr_set_field(host->base + MSDC_PATCH_BIT1, MSDC_PATCH_BIT1_CMDTA, 2);
if (mmc->ios.timing == MMC_TIMING_MMC_HS200 ||
mmc->ios.timing == MMC_TIMING_UHS_SDR104)
@@ -2192,8 +2194,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
if (ret)
goto host_free;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->base = devm_ioremap_resource(&pdev->dev, res);
+ host->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->base)) {
ret = PTR_ERR(host->base);
goto host_free;
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 74a0a7fbbf7f..203b61712601 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -696,16 +696,14 @@ static int mvsd_probe(struct platform_device *pdev)
struct mmc_host *mmc = NULL;
struct mvsd_host *host = NULL;
const struct mbus_dram_target_info *dram;
- struct resource *r;
int ret, irq;
if (!np) {
dev_err(&pdev->dev, "no DT node\n");
return -ENODEV;
}
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
- if (!r || irq < 0)
+ if (irq < 0)
return -ENXIO;
mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev);
@@ -758,7 +756,7 @@ static int mvsd_probe(struct platform_device *pdev)
spin_lock_init(&host->lock);
- host->base = devm_ioremap_resource(&pdev->dev, r);
+ host->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->base)) {
ret = PTR_ERR(host->base);
goto out;
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 011b59a3602e..b3d654c688e5 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -1121,7 +1121,16 @@ static int mxcmci_probe(struct platform_device *pdev)
mxcmci_writel(host, host->default_irq_mask, MMC_REG_INT_CNTR);
if (!host->pdata) {
- host->dma = dma_request_slave_channel(&pdev->dev, "rx-tx");
+ host->dma = dma_request_chan(&pdev->dev, "rx-tx");
+ if (IS_ERR(host->dma)) {
+ if (PTR_ERR(host->dma) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto out_clk_put;
+ }
+
+ /* Ignore errors to fall back to PIO mode */
+ host->dma = NULL;
+ }
} else {
res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (res) {
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 4031217d21c3..d82674aed447 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -623,11 +623,11 @@ static int mxs_mmc_probe(struct platform_device *pdev)
goto out_clk_disable;
}
- ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx");
- if (!ssp->dmach) {
+ ssp->dmach = dma_request_chan(&pdev->dev, "rx-tx");
+ if (IS_ERR(ssp->dmach)) {
dev_err(mmc_dev(host->mmc),
"%s: failed to request dma\n", __func__);
- ret = -ENODEV;
+ ret = PTR_ERR(ssp->dmach);
goto out_clk_disable;
}
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 767e964ca5a2..a379c45b985c 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1605,12 +1605,6 @@ static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host)
ret = PTR_ERR(p);
goto err_free_irq;
}
- if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_DEFAULT))) {
- dev_info(host->dev, "missing default pinctrl state\n");
- devm_pinctrl_put(p);
- ret = -EINVAL;
- goto err_free_irq;
- }
if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_IDLE))) {
dev_info(host->dev, "missing idle pinctrl state\n");
@@ -2153,14 +2147,14 @@ static int omap_hsmmc_runtime_resume(struct device *dev)
if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
(host->flags & HSMMC_SDIO_IRQ_ENABLED)) {
- pinctrl_pm_select_default_state(host->dev);
+ pinctrl_select_default_state(host->dev);
/* irq lost, if pinmux incorrect */
OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN);
OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN);
} else {
- pinctrl_pm_select_default_state(host->dev);
+ pinctrl_select_default_state(host->dev);
}
spin_unlock_irqrestore(&host->irq_lock, flags);
return 0;
diff --git a/drivers/mmc/host/owl-mmc.c b/drivers/mmc/host/owl-mmc.c
index 771e3d00f1bb..01ffe51f413d 100644
--- a/drivers/mmc/host/owl-mmc.c
+++ b/drivers/mmc/host/owl-mmc.c
@@ -616,10 +616,10 @@ static int owl_mmc_probe(struct platform_device *pdev)
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- owl_host->dma = dma_request_slave_channel(&pdev->dev, "mmc");
- if (!owl_host->dma) {
+ owl_host->dma = dma_request_chan(&pdev->dev, "mmc");
+ if (IS_ERR(owl_host->dma)) {
dev_err(owl_host->dev, "Failed to get external DMA channel.\n");
- ret = -ENXIO;
+ ret = PTR_ERR(owl_host->dma);
goto err_free_host;
}
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 024acc1b0a2e..3a9333475a2b 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -710,17 +710,19 @@ static int pxamci_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mmc);
- host->dma_chan_rx = dma_request_slave_channel(dev, "rx");
- if (host->dma_chan_rx == NULL) {
+ host->dma_chan_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(host->dma_chan_rx)) {
dev_err(dev, "unable to request rx dma channel\n");
- ret = -ENODEV;
+ ret = PTR_ERR(host->dma_chan_rx);
+ host->dma_chan_rx = NULL;
goto out;
}
- host->dma_chan_tx = dma_request_slave_channel(dev, "tx");
- if (host->dma_chan_tx == NULL) {
+ host->dma_chan_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(host->dma_chan_tx)) {
dev_err(dev, "unable to request tx dma channel\n");
- ret = -ENODEV;
+ ret = PTR_ERR(host->dma_chan_tx);
+ host->dma_chan_tx = NULL;
goto out;
}
@@ -734,22 +736,22 @@ static int pxamci_probe(struct platform_device *pdev)
}
/* FIXME: should we pass detection delay to debounce? */
- ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
if (ret && ret != -ENOENT) {
dev_err(dev, "Failed requesting gpio_cd\n");
goto out;
}
- ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL);
+ if (!host->pdata->gpio_card_ro_invert)
+ mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+
+ ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0);
if (ret && ret != -ENOENT) {
dev_err(dev, "Failed requesting gpio_ro\n");
goto out;
}
- if (!ret) {
+ if (!ret)
host->use_ro_gpio = true;
- mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
- 0 : MMC_CAP2_RO_ACTIVE_HIGH;
- }
if (host->pdata->init)
host->pdata->init(dev, pxamci_detect_irq, mmc);
diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h
index c0504aa90857..f524251d5113 100644
--- a/drivers/mmc/host/renesas_sdhi.h
+++ b/drivers/mmc/host/renesas_sdhi.h
@@ -14,8 +14,8 @@
struct renesas_sdhi_scc {
unsigned long clk_rate; /* clock rate for SDR104 */
- u32 tap; /* sampling clock position for SDR104 */
- u32 tap_hs400; /* sampling clock position for HS400 */
+ u32 tap; /* sampling clock position for SDR104/HS400 (8 TAP) */
+ u32 tap_hs400_4tap; /* sampling clock position for HS400 (4 TAP) */
};
struct renesas_sdhi_of_data {
@@ -33,6 +33,11 @@ struct renesas_sdhi_of_data {
unsigned short max_segs;
};
+struct renesas_sdhi_quirks {
+ bool hs400_disabled;
+ bool hs400_4taps;
+};
+
struct tmio_mmc_dma {
enum dma_slave_buswidth dma_buswidth;
bool (*filter)(struct dma_chan *chan, void *arg);
@@ -46,6 +51,7 @@ struct renesas_sdhi {
struct clk *clk_cd;
struct tmio_mmc_data mmc_data;
struct tmio_mmc_dma dma_priv;
+ const struct renesas_sdhi_quirks *quirks;
struct pinctrl *pinctrl;
struct pinctrl_state *pins_default, *pins_uhs;
void __iomem *scc_ctl;
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 234551a68739..35cb24cd45b4 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -46,11 +46,6 @@
#define SDHI_VER_GEN3_SD 0xcc10
#define SDHI_VER_GEN3_SDMMC 0xcd10
-struct renesas_sdhi_quirks {
- bool hs400_disabled;
- bool hs400_4taps;
-};
-
static void renesas_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width)
{
u32 val;
@@ -355,7 +350,7 @@ static void renesas_sdhi_hs400_complete(struct tmio_mmc_host *host)
0x4 << SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT);
- if (host->pdata->flags & TMIO_MMC_HAVE_4TAP_HS400)
+ if (priv->quirks && priv->quirks->hs400_4taps)
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET,
host->tap_set / 2);
@@ -493,7 +488,7 @@ static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host)
static bool renesas_sdhi_check_scc_error(struct tmio_mmc_host *host)
{
struct renesas_sdhi *priv = host_to_priv(host);
- bool use_4tap = host->pdata->flags & TMIO_MMC_HAVE_4TAP_HS400;
+ bool use_4tap = priv->quirks && priv->quirks->hs400_4taps;
/*
* Skip checking SCC errors when running on 4 taps in HS400 mode as
@@ -627,10 +622,10 @@ static const struct renesas_sdhi_quirks sdhi_quirks_nohs400 = {
};
static const struct soc_device_attribute sdhi_quirks_match[] = {
+ { .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
{ .soc_id = "r8a7795", .revision = "ES1.*", .data = &sdhi_quirks_4tap_nohs400 },
{ .soc_id = "r8a7795", .revision = "ES2.0", .data = &sdhi_quirks_4tap },
{ .soc_id = "r8a7796", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
- { .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
{ .soc_id = "r8a77980", .data = &sdhi_quirks_nohs400 },
{ /* Sentinel. */ },
};
@@ -665,6 +660,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (!priv)
return -ENOMEM;
+ priv->quirks = quirks;
mmc_data = &priv->mmc_data;
dma_priv = &priv->dma_priv;
@@ -724,9 +720,6 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (quirks && quirks->hs400_disabled)
host->mmc->caps2 &= ~(MMC_CAP2_HS400 | MMC_CAP2_HS400_ES);
- if (quirks && quirks->hs400_4taps)
- mmc_data->flags |= TMIO_MMC_HAVE_4TAP_HS400;
-
/* For some SoC, we disable internal WP. GPIO may override this */
if (mmc_can_gpio_ro(host->mmc))
mmc_data->capabilities2 &= ~MMC_CAP2_NO_WRITE_PROTECT;
@@ -800,20 +793,23 @@ int renesas_sdhi_probe(struct platform_device *pdev,
host->mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR |
MMC_CAP2_HS400_1_8V))) {
const struct renesas_sdhi_scc *taps = of_data->taps;
+ bool use_4tap = priv->quirks && priv->quirks->hs400_4taps;
bool hit = false;
for (i = 0; i < of_data->taps_num; i++) {
if (taps[i].clk_rate == 0 ||
taps[i].clk_rate == host->mmc->f_max) {
priv->scc_tappos = taps->tap;
- priv->scc_tappos_hs400 = taps->tap_hs400;
+ priv->scc_tappos_hs400 = use_4tap ?
+ taps->tap_hs400_4tap :
+ taps->tap;
hit = true;
break;
}
}
if (!hit)
- dev_warn(&host->pdev->dev, "Unknown clock rate for SDR104\n");
+ dev_warn(&host->pdev->dev, "Unknown clock rate for tuning\n");
host->init_tuning = renesas_sdhi_init_tuning;
host->prepare_tuning = renesas_sdhi_prepare_tuning;
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 18839a10594c..47ac53e91241 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -82,7 +82,7 @@ static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = {
{
.clk_rate = 0,
.tap = 0x00000300,
- .tap_hs400 = 0x00000704,
+ .tap_hs400_4tap = 0x00000100,
},
};
@@ -298,38 +298,23 @@ static const struct tmio_mmc_dma_ops renesas_sdhi_internal_dmac_dma_ops = {
* Whitelist of specific R-Car Gen3 SoC ES versions to use this DMAC
* implementation as others may use a different implementation.
*/
-static const struct soc_device_attribute soc_whitelist[] = {
- /* specific ones */
+static const struct soc_device_attribute soc_dma_quirks[] = {
{ .soc_id = "r7s9210",
.data = (void *)BIT(SDHI_INTERNAL_DMAC_ADDR_MODE_FIXED_ONLY) },
{ .soc_id = "r8a7795", .revision = "ES1.*",
.data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) },
{ .soc_id = "r8a7796", .revision = "ES1.0",
.data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) },
- /* generic ones */
- { .soc_id = "r8a774a1" },
- { .soc_id = "r8a774b1" },
- { .soc_id = "r8a774c0" },
- { .soc_id = "r8a77470" },
- { .soc_id = "r8a7795" },
- { .soc_id = "r8a7796" },
- { .soc_id = "r8a77965" },
- { .soc_id = "r8a77970" },
- { .soc_id = "r8a77980" },
- { .soc_id = "r8a77990" },
- { .soc_id = "r8a77995" },
{ /* sentinel */ }
};
static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev)
{
- const struct soc_device_attribute *soc = soc_device_match(soc_whitelist);
+ const struct soc_device_attribute *soc = soc_device_match(soc_dma_quirks);
struct device *dev = &pdev->dev;
- if (!soc)
- return -ENODEV;
-
- global_flags |= (unsigned long)soc->data;
+ if (soc)
+ global_flags |= (unsigned long)soc->data;
dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL);
if (!dev->dma_parms)
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index bce9c33bc4b5..1e616ae56b13 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1505,14 +1505,14 @@ static int s3cmci_probe_pdata(struct s3cmci_host *host)
mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
/* If we get -ENOENT we have no card detect GPIO line */
- ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
if (ret != -ENOENT) {
dev_err(&pdev->dev, "error requesting GPIO for CD %d\n",
ret);
return ret;
}
- ret = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0, NULL);
+ ret = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0);
if (ret != -ENOENT) {
dev_err(&pdev->dev, "error requesting GPIO for WP %d\n",
ret);
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 105e73d4a3b9..9651dca6863e 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -719,7 +719,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
goto err_free;
}
- host->ioaddr = devm_ioremap_nocache(dev, iomem->start,
+ host->ioaddr = devm_ioremap(dev, iomem->start,
resource_size(iomem));
if (host->ioaddr == NULL) {
err = -ENOMEM;
@@ -752,7 +752,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
if (sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD)) {
bool v = sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL);
- err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0, NULL);
+ err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0);
if (err) {
if (err == -EPROBE_DEFER)
goto err_free;
diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
index 73bb440aaf93..ad01f6451a95 100644
--- a/drivers/mmc/host/sdhci-brcmstb.c
+++ b/drivers/mmc/host/sdhci-brcmstb.c
@@ -9,29 +9,236 @@
#include <linux/mmc/host.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
#include "sdhci-pltfm.h"
+#include "cqhci.h"
-static const struct sdhci_ops sdhci_brcmstb_ops = {
+#define SDHCI_VENDOR 0x78
+#define SDHCI_VENDOR_ENHANCED_STRB 0x1
+
+#define BRCMSTB_PRIV_FLAGS_NO_64BIT BIT(0)
+#define BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT BIT(1)
+
+#define SDHCI_ARASAN_CQE_BASE_ADDR 0x200
+
+struct sdhci_brcmstb_priv {
+ void __iomem *cfg_regs;
+ bool has_cqe;
+};
+
+struct brcmstb_match_priv {
+ void (*hs400es)(struct mmc_host *mmc, struct mmc_ios *ios);
+ struct sdhci_ops *ops;
+ unsigned int flags;
+};
+
+static void sdhci_brcmstb_hs400es(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ u32 reg;
+
+ dev_dbg(mmc_dev(mmc), "%s(): Setting HS400-Enhanced-Strobe mode\n",
+ __func__);
+ reg = readl(host->ioaddr + SDHCI_VENDOR);
+ if (ios->enhanced_strobe)
+ reg |= SDHCI_VENDOR_ENHANCED_STRB;
+ else
+ reg &= ~SDHCI_VENDOR_ENHANCED_STRB;
+ writel(reg, host->ioaddr + SDHCI_VENDOR);
+}
+
+static void sdhci_brcmstb_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ u16 clk;
+
+ host->mmc->actual_clock = 0;
+
+ clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ if (clock == 0)
+ return;
+
+ sdhci_enable_clk(host, clk);
+}
+
+static void sdhci_brcmstb_set_uhs_signaling(struct sdhci_host *host,
+ unsigned int timing)
+{
+ u16 ctrl_2;
+
+ dev_dbg(mmc_dev(host->mmc), "%s: Setting UHS signaling for %d timing\n",
+ __func__, timing);
+ ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ /* Select Bus Speed Mode for host */
+ ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+ if ((timing == MMC_TIMING_MMC_HS200) ||
+ (timing == MMC_TIMING_UHS_SDR104))
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
+ else if (timing == MMC_TIMING_UHS_SDR12)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
+ else if (timing == MMC_TIMING_SD_HS ||
+ timing == MMC_TIMING_MMC_HS ||
+ timing == MMC_TIMING_UHS_SDR25)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
+ else if (timing == MMC_TIMING_UHS_SDR50)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
+ else if ((timing == MMC_TIMING_UHS_DDR50) ||
+ (timing == MMC_TIMING_MMC_DDR52))
+ ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
+ else if (timing == MMC_TIMING_MMC_HS400)
+ ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
+ sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+}
+
+static void sdhci_brcmstb_dumpregs(struct mmc_host *mmc)
+{
+ sdhci_dumpregs(mmc_priv(mmc));
+}
+
+static void sdhci_brcmstb_cqe_enable(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u32 reg;
+
+ reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ while (reg & SDHCI_DATA_AVAILABLE) {
+ sdhci_readl(host, SDHCI_BUFFER);
+ reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ }
+
+ sdhci_cqe_enable(mmc);
+}
+
+static const struct cqhci_host_ops sdhci_brcmstb_cqhci_ops = {
+ .enable = sdhci_brcmstb_cqe_enable,
+ .disable = sdhci_cqe_disable,
+ .dumpregs = sdhci_brcmstb_dumpregs,
+};
+
+static struct sdhci_ops sdhci_brcmstb_ops = {
.set_clock = sdhci_set_clock,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
-static const struct sdhci_pltfm_data sdhci_brcmstb_pdata = {
+static struct sdhci_ops sdhci_brcmstb_ops_7216 = {
+ .set_clock = sdhci_brcmstb_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_brcmstb_set_uhs_signaling,
+};
+
+static struct brcmstb_match_priv match_priv_7425 = {
+ .flags = BRCMSTB_PRIV_FLAGS_NO_64BIT |
+ BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT,
.ops = &sdhci_brcmstb_ops,
};
+static struct brcmstb_match_priv match_priv_7445 = {
+ .flags = BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT,
+ .ops = &sdhci_brcmstb_ops,
+};
+
+static const struct brcmstb_match_priv match_priv_7216 = {
+ .hs400es = sdhci_brcmstb_hs400es,
+ .ops = &sdhci_brcmstb_ops_7216,
+};
+
+static const struct of_device_id sdhci_brcm_of_match[] = {
+ { .compatible = "brcm,bcm7425-sdhci", .data = &match_priv_7425 },
+ { .compatible = "brcm,bcm7445-sdhci", .data = &match_priv_7445 },
+ { .compatible = "brcm,bcm7216-sdhci", .data = &match_priv_7216 },
+ {},
+};
+
+static u32 sdhci_brcmstb_cqhci_irq(struct sdhci_host *host, u32 intmask)
+{
+ int cmd_error = 0;
+ int data_error = 0;
+
+ if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+ return intmask;
+
+ cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+
+ return 0;
+}
+
+static int sdhci_brcmstb_add_host(struct sdhci_host *host,
+ struct sdhci_brcmstb_priv *priv)
+{
+ struct cqhci_host *cq_host;
+ bool dma64;
+ int ret;
+
+ if (!priv->has_cqe)
+ return sdhci_add_host(host);
+
+ dev_dbg(mmc_dev(host->mmc), "CQE is enabled\n");
+ host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
+ ret = sdhci_setup_host(host);
+ if (ret)
+ return ret;
+
+ cq_host = devm_kzalloc(mmc_dev(host->mmc),
+ sizeof(*cq_host), GFP_KERNEL);
+ if (!cq_host) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ cq_host->mmio = host->ioaddr + SDHCI_ARASAN_CQE_BASE_ADDR;
+ cq_host->ops = &sdhci_brcmstb_cqhci_ops;
+
+ dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
+ if (dma64) {
+ dev_dbg(mmc_dev(host->mmc), "Using 64 bit DMA\n");
+ cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
+ cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ;
+ }
+
+ ret = cqhci_init(cq_host, host->mmc, dma64);
+ if (ret)
+ goto cleanup;
+
+ ret = __sdhci_add_host(host);
+ if (ret)
+ goto cleanup;
+
+ return 0;
+
+cleanup:
+ sdhci_cleanup_host(host);
+ return ret;
+}
+
static int sdhci_brcmstb_probe(struct platform_device *pdev)
{
- struct sdhci_host *host;
+ const struct brcmstb_match_priv *match_priv;
+ struct sdhci_pltfm_data brcmstb_pdata;
struct sdhci_pltfm_host *pltfm_host;
+ const struct of_device_id *match;
+ struct sdhci_brcmstb_priv *priv;
+ struct sdhci_host *host;
+ struct resource *iomem;
+ bool has_cqe = false;
struct clk *clk;
int res;
+ match = of_match_node(sdhci_brcm_of_match, pdev->dev.of_node);
+ match_priv = match->data;
+
+ dev_dbg(&pdev->dev, "Probe found match for %s\n", match->compatible);
+
clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
+ if (PTR_ERR(clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
dev_err(&pdev->dev, "Clock not found in Device Tree\n");
clk = NULL;
}
@@ -39,36 +246,64 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
if (res)
return res;
- host = sdhci_pltfm_init(pdev, &sdhci_brcmstb_pdata, 0);
+ memset(&brcmstb_pdata, 0, sizeof(brcmstb_pdata));
+ if (device_property_read_bool(&pdev->dev, "supports-cqe")) {
+ has_cqe = true;
+ match_priv->ops->irq = sdhci_brcmstb_cqhci_irq;
+ }
+ brcmstb_pdata.ops = match_priv->ops;
+ host = sdhci_pltfm_init(pdev, &brcmstb_pdata,
+ sizeof(struct sdhci_brcmstb_priv));
if (IS_ERR(host)) {
res = PTR_ERR(host);
goto err_clk;
}
+ pltfm_host = sdhci_priv(host);
+ priv = sdhci_pltfm_priv(pltfm_host);
+ priv->has_cqe = has_cqe;
+
+ /* Map in the non-standard CFG registers */
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->cfg_regs = devm_ioremap_resource(&pdev->dev, iomem);
+ if (IS_ERR(priv->cfg_regs)) {
+ res = PTR_ERR(priv->cfg_regs);
+ goto err;
+ }
+
sdhci_get_of_property(pdev);
res = mmc_of_parse(host->mmc);
if (res)
goto err;
/*
+ * If the chip has enhanced strobe and it's enabled, add
+ * callback
+ */
+ if (match_priv->hs400es &&
+ (host->mmc->caps2 & MMC_CAP2_HS400_ES))
+ host->mmc_host_ops.hs400_enhanced_strobe = match_priv->hs400es;
+
+ /*
* Supply the existing CAPS, but clear the UHS modes. This
* will allow these modes to be specified by device tree
* properties through mmc_of_parse().
*/
host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
- if (of_device_is_compatible(pdev->dev.of_node, "brcm,bcm7425-sdhci"))
+ if (match_priv->flags & BRCMSTB_PRIV_FLAGS_NO_64BIT)
host->caps &= ~SDHCI_CAN_64BIT;
host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
- SDHCI_SUPPORT_DDR50);
- host->quirks |= SDHCI_QUIRK_MISSING_CAPS |
- SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+ SDHCI_SUPPORT_DDR50);
+ host->quirks |= SDHCI_QUIRK_MISSING_CAPS;
+
+ if (match_priv->flags & BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT)
+ host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
- res = sdhci_add_host(host);
+ res = sdhci_brcmstb_add_host(host, priv);
if (res)
goto err;
- pltfm_host = sdhci_priv(host);
pltfm_host->clk = clk;
return res;
@@ -79,11 +314,15 @@ err_clk:
return res;
}
-static const struct of_device_id sdhci_brcm_of_match[] = {
- { .compatible = "brcm,bcm7425-sdhci" },
- { .compatible = "brcm,bcm7445-sdhci" },
- {},
-};
+static void sdhci_brcmstb_shutdown(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = sdhci_pltfm_unregister(pdev);
+ if (ret)
+ dev_err(&pdev->dev, "failed to shutdown\n");
+}
+
MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match);
static struct platform_driver sdhci_brcmstb_driver = {
@@ -94,6 +333,7 @@ static struct platform_driver sdhci_brcmstb_driver = {
},
.probe = sdhci_brcmstb_probe,
.remove = sdhci_pltfm_unregister,
+ .shutdown = sdhci_brcmstb_shutdown,
};
module_platform_driver(sdhci_brcmstb_driver);
diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c
index ae0ec27dd7cc..5827d3751b81 100644
--- a/drivers/mmc/host/sdhci-cadence.c
+++ b/drivers/mmc/host/sdhci-cadence.c
@@ -158,7 +158,7 @@ static int sdhci_cdns_phy_init(struct sdhci_cdns_priv *priv)
return 0;
}
-static inline void *sdhci_cdns_priv(struct sdhci_host *host)
+static void *sdhci_cdns_priv(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 1c988d6a2433..382f25b2fa45 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -224,7 +224,6 @@ static struct esdhc_soc_data usdhc_imx8qxp_data = {
struct pltfm_imx_data {
u32 scratchpad;
struct pinctrl *pinctrl;
- struct pinctrl_state *pins_default;
struct pinctrl_state *pins_100mhz;
struct pinctrl_state *pins_200mhz;
const struct esdhc_soc_data *socdata;
@@ -951,7 +950,6 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
dev_dbg(mmc_dev(host->mmc), "change pinctrl state for uhs %d\n", uhs);
if (IS_ERR(imx_data->pinctrl) ||
- IS_ERR(imx_data->pins_default) ||
IS_ERR(imx_data->pins_100mhz) ||
IS_ERR(imx_data->pins_200mhz))
return -EINVAL;
@@ -968,7 +966,7 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
break;
default:
/* back to default state for other legacy timing */
- pinctrl = imx_data->pins_default;
+ return pinctrl_select_default_state(mmc_dev(host->mmc));
}
return pinctrl_select_state(imx_data->pinctrl, pinctrl);
@@ -1338,7 +1336,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
mmc_of_parse_voltage(np, &host->ocr_mask);
- if (esdhc_is_usdhc(imx_data) && !IS_ERR(imx_data->pins_default)) {
+ if (esdhc_is_usdhc(imx_data)) {
imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
ESDHC_PINCTRL_STATE_100MHZ);
imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
@@ -1381,19 +1379,20 @@ static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev,
host->mmc->parent->platform_data);
/* write_protect */
if (boarddata->wp_type == ESDHC_WP_GPIO) {
- err = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0, NULL);
+ host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+
+ err = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0);
if (err) {
dev_err(mmc_dev(host->mmc),
"failed to request write-protect gpio!\n");
return err;
}
- host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
}
/* card_detect */
switch (boarddata->cd_type) {
case ESDHC_CD_GPIO:
- err = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0, NULL);
+ err = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0);
if (err) {
dev_err(mmc_dev(host->mmc),
"failed to request card-detect gpio!\n");
@@ -1492,11 +1491,6 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
goto disable_ahb_clk;
}
- imx_data->pins_default = pinctrl_lookup_state(imx_data->pinctrl,
- PINCTRL_STATE_DEFAULT);
- if (IS_ERR(imx_data->pins_default))
- dev_warn(mmc_dev(host->mmc), "could not get default state\n");
-
if (esdhc_is_usdhc(imx_data)) {
host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
host->mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR;
diff --git a/drivers/mmc/host/sdhci-milbeaut.c b/drivers/mmc/host/sdhci-milbeaut.c
index a1aa21b9ae1c..92f30a1db435 100644
--- a/drivers/mmc/host/sdhci-milbeaut.c
+++ b/drivers/mmc/host/sdhci-milbeaut.c
@@ -242,15 +242,12 @@ static int sdhci_milbeaut_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
struct device *dev = &pdev->dev;
- struct resource *res;
int irq, ret = 0;
struct f_sdhost_priv *priv;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "%s: no irq specified\n", __func__);
+ if (irq < 0)
return irq;
- }
host = sdhci_alloc_host(dev, sizeof(struct f_sdhost_priv));
if (IS_ERR(host))
@@ -280,8 +277,7 @@ static int sdhci_milbeaut_probe(struct platform_device *pdev)
host->ops = &sdhci_milbeaut_ops;
host->irq = irq;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->ioaddr)) {
ret = PTR_ERR(host->ioaddr);
goto err;
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index b75c82d8d6c1..c3a160c18047 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -15,6 +15,7 @@
#include <linux/regulator/consumer.h>
#include "sdhci-pltfm.h"
+#include "cqhci.h"
#define CORE_MCI_VERSION 0x50
#define CORE_VERSION_MAJOR_SHIFT 28
@@ -99,7 +100,7 @@
#define CORE_PWRSAVE_DLL BIT(3)
-#define DDR_CONFIG_POR_VAL 0x80040853
+#define DDR_CONFIG_POR_VAL 0x80040873
#define INVALID_TUNING_PHASE -1
@@ -122,6 +123,10 @@
#define msm_host_writel(msm_host, val, host, offset) \
msm_host->var_ops->msm_writel_relaxed(val, host, offset)
+/* CQHCI vendor specific registers */
+#define CQHCI_VENDOR_CFG1 0xA00
+#define CQHCI_VENDOR_DIS_RST_ON_CQ_EN (0x3 << 13)
+
struct sdhci_msm_offset {
u32 core_hc_mode;
u32 core_mci_data_cnt;
@@ -148,8 +153,9 @@ struct sdhci_msm_offset {
u32 core_ddr_200_cfg;
u32 core_vendor_spec3;
u32 core_dll_config_2;
+ u32 core_dll_config_3;
+ u32 core_ddr_config_old; /* Applicable to sdcc minor ver < 0x49 */
u32 core_ddr_config;
- u32 core_ddr_config_2;
};
static const struct sdhci_msm_offset sdhci_msm_v5_offset = {
@@ -177,8 +183,8 @@ static const struct sdhci_msm_offset sdhci_msm_v5_offset = {
.core_ddr_200_cfg = 0x224,
.core_vendor_spec3 = 0x250,
.core_dll_config_2 = 0x254,
- .core_ddr_config = 0x258,
- .core_ddr_config_2 = 0x25c,
+ .core_dll_config_3 = 0x258,
+ .core_ddr_config = 0x25c,
};
static const struct sdhci_msm_offset sdhci_msm_mci_offset = {
@@ -207,8 +213,8 @@ static const struct sdhci_msm_offset sdhci_msm_mci_offset = {
.core_ddr_200_cfg = 0x184,
.core_vendor_spec3 = 0x1b0,
.core_dll_config_2 = 0x1b4,
- .core_ddr_config = 0x1b8,
- .core_ddr_config_2 = 0x1bc,
+ .core_ddr_config_old = 0x1b8,
+ .core_ddr_config = 0x1bc,
};
struct sdhci_msm_variant_ops {
@@ -253,6 +259,7 @@ struct sdhci_msm_host {
const struct sdhci_msm_offset *offset;
bool use_cdr;
u32 transfer_mode;
+ bool updated_ddr_cfg;
};
static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host)
@@ -924,8 +931,10 @@ out:
static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
{
struct mmc_host *mmc = host->mmc;
- u32 dll_status, config;
+ u32 dll_status, config, ddr_cfg_offset;
int ret;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
const struct sdhci_msm_offset *msm_offset =
sdhci_priv_msm_offset(host);
@@ -938,8 +947,11 @@ static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
* bootloaders. In the future, if this changes, then the desired
* values will need to be programmed appropriately.
*/
- writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr +
- msm_offset->core_ddr_config);
+ if (msm_host->updated_ddr_cfg)
+ ddr_cfg_offset = msm_offset->core_ddr_config;
+ else
+ ddr_cfg_offset = msm_offset->core_ddr_config_old;
+ writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr + ddr_cfg_offset);
if (mmc->ios.enhanced_strobe) {
config = readl_relaxed(host->ioaddr +
@@ -1560,6 +1572,127 @@ out:
__sdhci_msm_set_clock(host, clock);
}
+/*****************************************************************************\
+ * *
+ * MSM Command Queue Engine (CQE) *
+ * *
+\*****************************************************************************/
+
+static u32 sdhci_msm_cqe_irq(struct sdhci_host *host, u32 intmask)
+{
+ int cmd_error = 0;
+ int data_error = 0;
+
+ if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+ return intmask;
+
+ cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+ return 0;
+}
+
+void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ u32 ctrl;
+
+ /*
+ * When CQE is halted, the legacy SDHCI path operates only
+ * on 16-byte descriptors in 64bit mode.
+ */
+ if (host->flags & SDHCI_USE_64_BIT_DMA)
+ host->desc_sz = 16;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ /*
+ * During CQE command transfers, command complete bit gets latched.
+ * So s/w should clear command complete interrupt status when CQE is
+ * either halted or disabled. Otherwise unexpected SDCHI legacy
+ * interrupt gets triggered when CQE is halted/disabled.
+ */
+ ctrl = sdhci_readl(host, SDHCI_INT_ENABLE);
+ ctrl |= SDHCI_INT_RESPONSE;
+ sdhci_writel(host, ctrl, SDHCI_INT_ENABLE);
+ sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ sdhci_cqe_disable(mmc, recovery);
+}
+
+static const struct cqhci_host_ops sdhci_msm_cqhci_ops = {
+ .enable = sdhci_cqe_enable,
+ .disable = sdhci_msm_cqe_disable,
+};
+
+static int sdhci_msm_cqe_add_host(struct sdhci_host *host,
+ struct platform_device *pdev)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ struct cqhci_host *cq_host;
+ bool dma64;
+ u32 cqcfg;
+ int ret;
+
+ /*
+ * When CQE is halted, SDHC operates only on 16byte ADMA descriptors.
+ * So ensure ADMA table is allocated for 16byte descriptors.
+ */
+ if (host->caps & SDHCI_CAN_64BIT)
+ host->alloc_desc_sz = 16;
+
+ ret = sdhci_setup_host(host);
+ if (ret)
+ return ret;
+
+ cq_host = cqhci_pltfm_init(pdev);
+ if (IS_ERR(cq_host)) {
+ ret = PTR_ERR(cq_host);
+ dev_err(&pdev->dev, "cqhci-pltfm init: failed: %d\n", ret);
+ goto cleanup;
+ }
+
+ msm_host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
+ cq_host->ops = &sdhci_msm_cqhci_ops;
+
+ dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
+
+ ret = cqhci_init(cq_host, host->mmc, dma64);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: CQE init: failed (%d)\n",
+ mmc_hostname(host->mmc), ret);
+ goto cleanup;
+ }
+
+ /* Disable cqe reset due to cqe enable signal */
+ cqcfg = cqhci_readl(cq_host, CQHCI_VENDOR_CFG1);
+ cqcfg |= CQHCI_VENDOR_DIS_RST_ON_CQ_EN;
+ cqhci_writel(cq_host, cqcfg, CQHCI_VENDOR_CFG1);
+
+ /*
+ * SDHC expects 12byte ADMA descriptors till CQE is enabled.
+ * So limit desc_sz to 12 so that the data commands that are sent
+ * during card initialization (before CQE gets enabled) would
+ * get executed without any issues.
+ */
+ if (host->flags & SDHCI_USE_64_BIT_DMA)
+ host->desc_sz = 12;
+
+ ret = __sdhci_add_host(host);
+ if (ret)
+ goto cleanup;
+
+ dev_info(&pdev->dev, "%s: CQE init: success\n",
+ mmc_hostname(host->mmc));
+ return ret;
+
+cleanup:
+ sdhci_cleanup_host(host);
+ return ret;
+}
+
/*
* Platform specific register write functions. This is so that, if any
* register write needs to be followed up by platform specific actions,
@@ -1724,6 +1857,7 @@ static const struct sdhci_ops sdhci_msm_ops = {
.set_uhs_signaling = sdhci_msm_set_uhs_signaling,
.write_w = sdhci_msm_writew,
.write_b = sdhci_msm_writeb,
+ .irq = sdhci_msm_cqe_irq,
};
static const struct sdhci_pltfm_data sdhci_msm_pdata = {
@@ -1739,7 +1873,6 @@ static int sdhci_msm_probe(struct platform_device *pdev)
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_msm_host *msm_host;
- struct resource *core_memres;
struct clk *clk;
int ret;
u16 host_version, core_minor;
@@ -1747,6 +1880,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
u8 core_major;
const struct sdhci_msm_offset *msm_offset;
const struct sdhci_msm_variant_info *var_info;
+ struct device_node *node = pdev->dev.of_node;
host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host));
if (IS_ERR(host))
@@ -1840,10 +1974,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
}
if (!msm_host->mci_removed) {
- core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- msm_host->core_mem = devm_ioremap_resource(&pdev->dev,
- core_memres);
-
+ msm_host->core_mem = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(msm_host->core_mem)) {
ret = PTR_ERR(msm_host->core_mem);
goto clk_disable;
@@ -1899,6 +2030,9 @@ static int sdhci_msm_probe(struct platform_device *pdev)
msm_offset->core_vendor_spec_capabilities0);
}
+ if (core_major == 1 && core_minor >= 0x49)
+ msm_host->updated_ddr_cfg = true;
+
/*
* Power on reset state may trigger power irq if previous status of
* PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq
@@ -1942,7 +2076,10 @@ static int sdhci_msm_probe(struct platform_device *pdev)
pm_runtime_use_autosuspend(&pdev->dev);
host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning;
- ret = sdhci_add_host(host);
+ if (of_property_read_bool(node, "supports-cqe"))
+ ret = sdhci_msm_cqe_add_host(host, pdev);
+ else
+ ret = sdhci_add_host(host);
if (ret)
goto pm_runtime_disable;
sdhci_msm_set_regulator_caps(msm_host);
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 5959e394b416..ab2bd314a390 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -33,7 +33,14 @@
#define SDHCI_AT91_PRESET_COMMON_CONF 0x400 /* drv type B, programmable clock mode */
+struct sdhci_at91_soc_data {
+ const struct sdhci_pltfm_data *pdata;
+ bool baseclk_is_generated_internally;
+ unsigned int divider_for_baseclk;
+};
+
struct sdhci_at91_priv {
+ const struct sdhci_at91_soc_data *soc_data;
struct clk *hclock;
struct clk *gck;
struct clk *mainck;
@@ -141,12 +148,24 @@ static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
.set_power = sdhci_at91_set_power,
};
-static const struct sdhci_pltfm_data soc_data_sama5d2 = {
+static const struct sdhci_pltfm_data sdhci_sama5d2_pdata = {
.ops = &sdhci_at91_sama5d2_ops,
};
+static const struct sdhci_at91_soc_data soc_data_sama5d2 = {
+ .pdata = &sdhci_sama5d2_pdata,
+ .baseclk_is_generated_internally = false,
+};
+
+static const struct sdhci_at91_soc_data soc_data_sam9x60 = {
+ .pdata = &sdhci_sama5d2_pdata,
+ .baseclk_is_generated_internally = true,
+ .divider_for_baseclk = 2,
+};
+
static const struct of_device_id sdhci_at91_dt_match[] = {
{ .compatible = "atmel,sama5d2-sdhci", .data = &soc_data_sama5d2 },
+ { .compatible = "microchip,sam9x60-sdhci", .data = &soc_data_sam9x60 },
{}
};
MODULE_DEVICE_TABLE(of, sdhci_at91_dt_match);
@@ -156,50 +175,37 @@ static int sdhci_at91_set_clks_presets(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host);
- int ret;
unsigned int caps0, caps1;
unsigned int clk_base, clk_mul;
- unsigned int gck_rate, real_gck_rate;
+ unsigned int gck_rate, clk_base_rate;
unsigned int preset_div;
- /*
- * The mult clock is provided by as a generated clock by the PMC
- * controller. In order to set the rate of gck, we have to get the
- * base clock rate and the clock mult from capabilities.
- */
clk_prepare_enable(priv->hclock);
caps0 = readl(host->ioaddr + SDHCI_CAPABILITIES);
caps1 = readl(host->ioaddr + SDHCI_CAPABILITIES_1);
- clk_base = (caps0 & SDHCI_CLOCK_V3_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
- clk_mul = (caps1 & SDHCI_CLOCK_MUL_MASK) >> SDHCI_CLOCK_MUL_SHIFT;
- gck_rate = clk_base * 1000000 * (clk_mul + 1);
- ret = clk_set_rate(priv->gck, gck_rate);
- if (ret < 0) {
- dev_err(dev, "failed to set gck");
- clk_disable_unprepare(priv->hclock);
- return ret;
- }
- /*
- * We need to check if we have the requested rate for gck because in
- * some cases this rate could be not supported. If it happens, the rate
- * is the closest one gck can provide. We have to update the value
- * of clk mul.
- */
- real_gck_rate = clk_get_rate(priv->gck);
- if (real_gck_rate != gck_rate) {
- clk_mul = real_gck_rate / (clk_base * 1000000) - 1;
- caps1 &= (~SDHCI_CLOCK_MUL_MASK);
- caps1 |= ((clk_mul << SDHCI_CLOCK_MUL_SHIFT) &
- SDHCI_CLOCK_MUL_MASK);
- /* Set capabilities in r/w mode. */
- writel(SDMMC_CACR_KEY | SDMMC_CACR_CAPWREN,
- host->ioaddr + SDMMC_CACR);
- writel(caps1, host->ioaddr + SDHCI_CAPABILITIES_1);
- /* Set capabilities in ro mode. */
- writel(0, host->ioaddr + SDMMC_CACR);
- dev_info(dev, "update clk mul to %u as gck rate is %u Hz\n",
- clk_mul, real_gck_rate);
- }
+
+ gck_rate = clk_get_rate(priv->gck);
+ if (priv->soc_data->baseclk_is_generated_internally)
+ clk_base_rate = gck_rate / priv->soc_data->divider_for_baseclk;
+ else
+ clk_base_rate = clk_get_rate(priv->mainck);
+
+ clk_base = clk_base_rate / 1000000;
+ clk_mul = gck_rate / clk_base_rate - 1;
+
+ caps0 &= ~SDHCI_CLOCK_V3_BASE_MASK;
+ caps0 |= (clk_base << SDHCI_CLOCK_BASE_SHIFT) & SDHCI_CLOCK_V3_BASE_MASK;
+ caps1 &= ~SDHCI_CLOCK_MUL_MASK;
+ caps1 |= (clk_mul << SDHCI_CLOCK_MUL_SHIFT) & SDHCI_CLOCK_MUL_MASK;
+ /* Set capabilities in r/w mode. */
+ writel(SDMMC_CACR_KEY | SDMMC_CACR_CAPWREN, host->ioaddr + SDMMC_CACR);
+ writel(caps0, host->ioaddr + SDHCI_CAPABILITIES);
+ writel(caps1, host->ioaddr + SDHCI_CAPABILITIES_1);
+ /* Set capabilities in ro mode. */
+ writel(0, host->ioaddr + SDMMC_CACR);
+
+ dev_info(dev, "update clk mul to %u as gck rate is %u Hz and clk base is %u Hz\n",
+ clk_mul, gck_rate, clk_base_rate);
/*
* We have to set preset values because it depends on the clk_mul
@@ -207,19 +213,19 @@ static int sdhci_at91_set_clks_presets(struct device *dev)
* maximum sd clock value is 120 MHz instead of 208 MHz. For that
* reason, we need to use presets to support SDR104.
*/
- preset_div = DIV_ROUND_UP(real_gck_rate, 24000000) - 1;
+ preset_div = DIV_ROUND_UP(gck_rate, 24000000) - 1;
writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
host->ioaddr + SDHCI_PRESET_FOR_SDR12);
- preset_div = DIV_ROUND_UP(real_gck_rate, 50000000) - 1;
+ preset_div = DIV_ROUND_UP(gck_rate, 50000000) - 1;
writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
host->ioaddr + SDHCI_PRESET_FOR_SDR25);
- preset_div = DIV_ROUND_UP(real_gck_rate, 100000000) - 1;
+ preset_div = DIV_ROUND_UP(gck_rate, 100000000) - 1;
writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
host->ioaddr + SDHCI_PRESET_FOR_SDR50);
- preset_div = DIV_ROUND_UP(real_gck_rate, 120000000) - 1;
+ preset_div = DIV_ROUND_UP(gck_rate, 120000000) - 1;
writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
host->ioaddr + SDHCI_PRESET_FOR_SDR104);
- preset_div = DIV_ROUND_UP(real_gck_rate, 50000000) - 1;
+ preset_div = DIV_ROUND_UP(gck_rate, 50000000) - 1;
writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
host->ioaddr + SDHCI_PRESET_FOR_DDR50);
@@ -314,7 +320,7 @@ static const struct dev_pm_ops sdhci_at91_dev_pm_ops = {
static int sdhci_at91_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
- const struct sdhci_pltfm_data *soc_data;
+ const struct sdhci_at91_soc_data *soc_data;
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_at91_priv *priv;
@@ -325,29 +331,37 @@ static int sdhci_at91_probe(struct platform_device *pdev)
return -EINVAL;
soc_data = match->data;
- host = sdhci_pltfm_init(pdev, soc_data, sizeof(*priv));
+ host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*priv));
if (IS_ERR(host))
return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
priv = sdhci_pltfm_priv(pltfm_host);
+ priv->soc_data = soc_data;
priv->mainck = devm_clk_get(&pdev->dev, "baseclk");
if (IS_ERR(priv->mainck)) {
- dev_err(&pdev->dev, "failed to get baseclk\n");
- return PTR_ERR(priv->mainck);
+ if (soc_data->baseclk_is_generated_internally) {
+ priv->mainck = NULL;
+ } else {
+ dev_err(&pdev->dev, "failed to get baseclk\n");
+ ret = PTR_ERR(priv->mainck);
+ goto sdhci_pltfm_free;
+ }
}
priv->hclock = devm_clk_get(&pdev->dev, "hclock");
if (IS_ERR(priv->hclock)) {
dev_err(&pdev->dev, "failed to get hclock\n");
- return PTR_ERR(priv->hclock);
+ ret = PTR_ERR(priv->hclock);
+ goto sdhci_pltfm_free;
}
priv->gck = devm_clk_get(&pdev->dev, "multclk");
if (IS_ERR(priv->gck)) {
dev_err(&pdev->dev, "failed to get multclk\n");
- return PTR_ERR(priv->gck);
+ ret = PTR_ERR(priv->gck);
+ goto sdhci_pltfm_free;
}
ret = sdhci_at91_set_clks_presets(&pdev->dev);
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 5cca3fa4610b..5d8dd870bd44 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -80,6 +80,7 @@ struct sdhci_esdhc {
bool quirk_tuning_erratum_type1;
bool quirk_tuning_erratum_type2;
bool quirk_ignore_data_inhibit;
+ bool quirk_delay_before_data_reset;
bool in_sw_tuning;
unsigned int peripheral_clock;
const struct esdhc_clk_fixup *clk_fixup;
@@ -172,6 +173,9 @@ static u16 esdhc_readw_fixup(struct sdhci_host *host,
u16 ret;
int shift = (spec_reg & 0x2) * 8;
+ if (spec_reg == SDHCI_TRANSFER_MODE)
+ return pltfm_host->xfer_mode_shadow;
+
if (spec_reg == SDHCI_HOST_VERSION)
ret = value & 0xffff;
else
@@ -561,32 +565,46 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
{
- u32 val;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
ktime_t timeout;
+ u32 val, clk_en;
+
+ clk_en = ESDHC_CLOCK_SDCLKEN;
+
+ /*
+ * IPGEN/HCKEN/PEREN bits exist on eSDHC whose vendor version
+ * is 2.2 or lower.
+ */
+ if (esdhc->vendor_ver <= VENDOR_V_22)
+ clk_en |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN |
+ ESDHC_CLOCK_PEREN);
val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
if (enable)
- val |= ESDHC_CLOCK_SDCLKEN;
+ val |= clk_en;
else
- val &= ~ESDHC_CLOCK_SDCLKEN;
+ val &= ~clk_en;
sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
- /* Wait max 20 ms */
+ /*
+ * Wait max 20 ms. If vendor version is 2.2 or lower, do not
+ * wait clock stable bit which does not exist.
+ */
timeout = ktime_add_ms(ktime_get(), 20);
- val = ESDHC_CLOCK_STABLE;
- while (1) {
+ while (esdhc->vendor_ver > VENDOR_V_22) {
bool timedout = ktime_after(ktime_get(), timeout);
- if (sdhci_readl(host, ESDHC_PRSSTAT) & val)
+ if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
break;
if (timedout) {
pr_err("%s: Internal clock never stabilised.\n",
mmc_hostname(host->mmc));
break;
}
- udelay(10);
+ usleep_range(10, 20);
}
}
@@ -620,77 +638,97 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
- int pre_div = 1;
- int div = 1;
- int division;
+ unsigned int pre_div = 1, div = 1;
+ unsigned int clock_fixup = 0;
ktime_t timeout;
- long fixup = 0;
u32 temp;
- host->mmc->actual_clock = 0;
-
if (clock == 0) {
+ host->mmc->actual_clock = 0;
esdhc_clock_enable(host, false);
return;
}
- /* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */
+ /* Start pre_div at 2 for vendor version < 2.3. */
if (esdhc->vendor_ver < VENDOR_V_23)
pre_div = 2;
+ /* Fix clock value. */
if (host->mmc->card && mmc_card_sd(host->mmc->card) &&
- esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY)
- fixup = esdhc->clk_fixup->sd_dflt_max_clk;
+ esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY)
+ clock_fixup = esdhc->clk_fixup->sd_dflt_max_clk;
else if (esdhc->clk_fixup)
- fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing];
+ clock_fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing];
- if (fixup && clock > fixup)
- clock = fixup;
+ if (clock_fixup == 0 || clock < clock_fixup)
+ clock_fixup = clock;
- temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
- temp &= ~(ESDHC_CLOCK_SDCLKEN | ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN |
- ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK);
- sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
-
- while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
+ /* Calculate pre_div and div. */
+ while (host->max_clk / pre_div / 16 > clock_fixup && pre_div < 256)
pre_div *= 2;
- while (host->max_clk / pre_div / div > clock && div < 16)
+ while (host->max_clk / pre_div / div > clock_fixup && div < 16)
div++;
+ esdhc->div_ratio = pre_div * div;
+
+ /* Limit clock division for HS400 200MHz clock for quirk. */
if (esdhc->quirk_limited_clk_division &&
clock == MMC_HS200_MAX_DTR &&
(host->mmc->ios.timing == MMC_TIMING_MMC_HS400 ||
host->flags & SDHCI_HS400_TUNING)) {
- division = pre_div * div;
- if (division <= 4) {
+ if (esdhc->div_ratio <= 4) {
pre_div = 4;
div = 1;
- } else if (division <= 8) {
+ } else if (esdhc->div_ratio <= 8) {
pre_div = 4;
div = 2;
- } else if (division <= 12) {
+ } else if (esdhc->div_ratio <= 12) {
pre_div = 4;
div = 3;
} else {
pr_warn("%s: using unsupported clock division.\n",
mmc_hostname(host->mmc));
}
+ esdhc->div_ratio = pre_div * div;
}
+ host->mmc->actual_clock = host->max_clk / esdhc->div_ratio;
+
dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
- clock, host->max_clk / pre_div / div);
- host->mmc->actual_clock = host->max_clk / pre_div / div;
- esdhc->div_ratio = pre_div * div;
+ clock, host->mmc->actual_clock);
+
+ /* Set clock division into register. */
pre_div >>= 1;
div--;
+ esdhc_clock_enable(host, false);
+
temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
- temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
- | (div << ESDHC_DIVIDER_SHIFT)
- | (pre_div << ESDHC_PREDIV_SHIFT));
+ temp &= ~ESDHC_CLOCK_MASK;
+ temp |= ((div << ESDHC_DIVIDER_SHIFT) |
+ (pre_div << ESDHC_PREDIV_SHIFT));
sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+ /*
+ * Wait max 20 ms. If vendor version is 2.2 or lower, do not
+ * wait clock stable bit which does not exist.
+ */
+ timeout = ktime_add_ms(ktime_get(), 20);
+ while (esdhc->vendor_ver > VENDOR_V_22) {
+ bool timedout = ktime_after(ktime_get(), timeout);
+
+ if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
+ break;
+ if (timedout) {
+ pr_err("%s: Internal clock never stabilised.\n",
+ mmc_hostname(host->mmc));
+ break;
+ }
+ usleep_range(10, 20);
+ }
+
+ /* Additional setting for HS400. */
if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 &&
clock == MMC_HS200_MAX_DTR) {
temp = sdhci_readl(host, ESDHC_TBCTL);
@@ -710,25 +748,7 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
esdhc_clock_enable(host, false);
esdhc_flush_async_fifo(host);
}
-
- /* Wait max 20 ms */
- timeout = ktime_add_ms(ktime_get(), 20);
- while (1) {
- bool timedout = ktime_after(ktime_get(), timeout);
-
- if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
- break;
- if (timedout) {
- pr_err("%s: Internal clock never stabilised.\n",
- mmc_hostname(host->mmc));
- return;
- }
- udelay(10);
- }
-
- temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
- temp |= ESDHC_CLOCK_SDCLKEN;
- sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+ esdhc_clock_enable(host, true);
}
static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
@@ -757,21 +777,58 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
- u32 val;
+ u32 val, bus_width = 0;
+
+ /*
+ * Add delay to make sure all the DMA transfers are finished
+ * for quirk.
+ */
+ if (esdhc->quirk_delay_before_data_reset &&
+ (mask & SDHCI_RESET_DATA) &&
+ (host->flags & SDHCI_REQ_USE_DMA))
+ mdelay(5);
+
+ /*
+ * Save bus-width for eSDHC whose vendor version is 2.2
+ * or lower for data reset.
+ */
+ if ((mask & SDHCI_RESET_DATA) &&
+ (esdhc->vendor_ver <= VENDOR_V_22)) {
+ val = sdhci_readl(host, ESDHC_PROCTL);
+ bus_width = val & ESDHC_CTRL_BUSWIDTH_MASK;
+ }
sdhci_reset(host, mask);
- sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
- sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ /*
+ * Restore bus-width setting and interrupt registers for eSDHC
+ * whose vendor version is 2.2 or lower for data reset.
+ */
+ if ((mask & SDHCI_RESET_DATA) &&
+ (esdhc->vendor_ver <= VENDOR_V_22)) {
+ val = sdhci_readl(host, ESDHC_PROCTL);
+ val &= ~ESDHC_CTRL_BUSWIDTH_MASK;
+ val |= bus_width;
+ sdhci_writel(host, val, ESDHC_PROCTL);
- if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc"))
- mdelay(5);
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ }
- if (mask & SDHCI_RESET_ALL) {
+ /*
+ * Some bits have to be cleaned manually for eSDHC whose spec
+ * version is higher than 3.0 for all reset.
+ */
+ if ((mask & SDHCI_RESET_ALL) &&
+ (esdhc->spec_ver >= SDHCI_SPEC_300)) {
val = sdhci_readl(host, ESDHC_TBCTL);
val &= ~ESDHC_TB_EN;
sdhci_writel(host, val, ESDHC_TBCTL);
+ /*
+ * Initialize eSDHC_DLLCFG1[DLL_PD_PULSE_STRETCH_SEL] to
+ * 0 for quirk.
+ */
if (esdhc->quirk_unreliable_pulse_detection) {
val = sdhci_readl(host, ESDHC_DLLCFG1);
val &= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL;
@@ -851,20 +908,20 @@ static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
}
static struct soc_device_attribute soc_tuning_erratum_type1[] = {
- { .family = "QorIQ T1023", .revision = "1.0", },
- { .family = "QorIQ T1040", .revision = "1.0", },
- { .family = "QorIQ T2080", .revision = "1.0", },
- { .family = "QorIQ LS1021A", .revision = "1.0", },
+ { .family = "QorIQ T1023", },
+ { .family = "QorIQ T1040", },
+ { .family = "QorIQ T2080", },
+ { .family = "QorIQ LS1021A", },
{ },
};
static struct soc_device_attribute soc_tuning_erratum_type2[] = {
- { .family = "QorIQ LS1012A", .revision = "1.0", },
- { .family = "QorIQ LS1043A", .revision = "1.*", },
- { .family = "QorIQ LS1046A", .revision = "1.0", },
- { .family = "QorIQ LS1080A", .revision = "1.0", },
- { .family = "QorIQ LS2080A", .revision = "1.0", },
- { .family = "QorIQ LA1575A", .revision = "1.0", },
+ { .family = "QorIQ LS1012A", },
+ { .family = "QorIQ LS1043A", },
+ { .family = "QorIQ LS1046A", },
+ { .family = "QorIQ LS1080A", },
+ { .family = "QorIQ LS2080A", },
+ { .family = "QorIQ LA1575A", },
{ },
};
@@ -885,20 +942,11 @@ static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
esdhc_clock_enable(host, true);
}
-static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
+static void esdhc_tuning_window_ptr(struct sdhci_host *host, u8 *window_start,
u8 *window_end)
{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
- u8 tbstat_15_8, tbstat_7_0;
u32 val;
- if (esdhc->quirk_tuning_erratum_type1) {
- *window_start = 5 * esdhc->div_ratio;
- *window_end = 3 * esdhc->div_ratio;
- return;
- }
-
/* Write TBCTL[11:8]=4'h8 */
val = sdhci_readl(host, ESDHC_TBCTL);
val &= ~(0xf << 8);
@@ -917,20 +965,37 @@ static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
val = sdhci_readl(host, ESDHC_TBSTAT);
val = sdhci_readl(host, ESDHC_TBSTAT);
+ *window_end = val & 0xff;
+ *window_start = (val >> 8) & 0xff;
+}
+
+static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
+ u8 *window_end)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
+ u8 start_ptr, end_ptr;
+
+ if (esdhc->quirk_tuning_erratum_type1) {
+ *window_start = 5 * esdhc->div_ratio;
+ *window_end = 3 * esdhc->div_ratio;
+ return;
+ }
+
+ esdhc_tuning_window_ptr(host, &start_ptr, &end_ptr);
+
/* Reset data lines by setting ESDHCCTL[RSTD] */
sdhci_reset(host, SDHCI_RESET_DATA);
/* Write 32'hFFFF_FFFF to IRQSTAT register */
sdhci_writel(host, 0xFFFFFFFF, SDHCI_INT_STATUS);
- /* If TBSTAT[15:8]-TBSTAT[7:0] > 4 * div_ratio
- * or TBSTAT[7:0]-TBSTAT[15:8] > 4 * div_ratio,
+ /* If TBSTAT[15:8]-TBSTAT[7:0] > (4 * div_ratio) + 2
+ * or TBSTAT[7:0]-TBSTAT[15:8] > (4 * div_ratio) + 2,
* then program TBPTR[TB_WNDW_END_PTR] = 4 * div_ratio
* and program TBPTR[TB_WNDW_START_PTR] = 8 * div_ratio.
*/
- tbstat_7_0 = val & 0xff;
- tbstat_15_8 = (val >> 8) & 0xff;
- if (abs(tbstat_15_8 - tbstat_7_0) > (4 * esdhc->div_ratio)) {
+ if (abs(start_ptr - end_ptr) > (4 * esdhc->div_ratio + 2)) {
*window_start = 8 * esdhc->div_ratio;
*window_end = 4 * esdhc->div_ratio;
} else {
@@ -1003,6 +1068,19 @@ static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
if (ret)
break;
+ /* For type2 affected platforms of the tuning erratum,
+ * tuning may succeed although eSDHC might not have
+ * tuned properly. Need to check tuning window.
+ */
+ if (esdhc->quirk_tuning_erratum_type2 &&
+ !host->tuning_err) {
+ esdhc_tuning_window_ptr(host, &window_start,
+ &window_end);
+ if (abs(window_start - window_end) >
+ (4 * esdhc->div_ratio + 2))
+ host->tuning_err = -EAGAIN;
+ }
+
/* If HW tuning fails and triggers erratum,
* try workaround.
*/
@@ -1221,6 +1299,10 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
if (match)
esdhc->clk_fixup = match->data;
np = pdev->dev.of_node;
+
+ if (of_device_is_compatible(np, "fsl,p2020-esdhc"))
+ esdhc->quirk_delay_before_data_reset = true;
+
clk = of_clk_get(np, 0);
if (!IS_ERR(clk)) {
/*
@@ -1231,7 +1313,8 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
* 1/2 peripheral clock.
*/
if (of_device_is_compatible(np, "fsl,ls1046a-esdhc") ||
- of_device_is_compatible(np, "fsl,ls1028a-esdhc"))
+ of_device_is_compatible(np, "fsl,ls1028a-esdhc") ||
+ of_device_is_compatible(np, "fsl,ls1088a-esdhc"))
esdhc->peripheral_clock = clk_get_rate(clk) / 2;
else
esdhc->peripheral_clock = clk_get_rate(clk);
@@ -1303,8 +1386,8 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) {
- host->quirks2 |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
- host->quirks2 |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+ host->quirks |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
+ host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
}
if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index 083e7e053c95..882053151a47 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -7,6 +7,7 @@
*/
#include <linux/delay.h>
+#include <linux/mmc/mmc.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -85,6 +86,7 @@
/* sdhci-omap controller flags */
#define SDHCI_OMAP_REQUIRE_IODELAY BIT(0)
+#define SDHCI_OMAP_SPECIAL_RESET BIT(1)
struct sdhci_omap_data {
u32 offset;
@@ -685,7 +687,11 @@ static int sdhci_omap_enable_dma(struct sdhci_host *host)
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
- reg |= CON_DMA_MASTER;
+ reg &= ~CON_DMA_MASTER;
+ /* Switch to DMA slave mode when using external DMA */
+ if (!host->use_external_dma)
+ reg |= CON_DMA_MASTER;
+
sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
return 0;
@@ -774,15 +780,35 @@ static void sdhci_omap_set_uhs_signaling(struct sdhci_host *host,
sdhci_omap_start_clock(omap_host);
}
+#define MMC_TIMEOUT_US 20000 /* 20000 micro Sec */
static void sdhci_omap_reset(struct sdhci_host *host, u8 mask)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+ unsigned long limit = MMC_TIMEOUT_US;
+ unsigned long i = 0;
/* Don't reset data lines during tuning operation */
if (omap_host->is_tuning)
mask &= ~SDHCI_RESET_DATA;
+ if (omap_host->flags & SDHCI_OMAP_SPECIAL_RESET) {
+ sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
+ while ((!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)) &&
+ (i++ < limit))
+ udelay(1);
+ i = 0;
+ while ((sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) &&
+ (i++ < limit))
+ udelay(1);
+
+ if (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)
+ dev_err(mmc_dev(host->mmc),
+ "Timeout waiting on controller reset in %s\n",
+ __func__);
+ return;
+ }
+
sdhci_reset(host, mask);
}
@@ -823,6 +849,15 @@ static u32 sdhci_omap_irq(struct sdhci_host *host, u32 intmask)
return intmask;
}
+static void sdhci_omap_set_timeout(struct sdhci_host *host,
+ struct mmc_command *cmd)
+{
+ if (cmd->opcode == MMC_ERASE)
+ sdhci_set_data_timeout_irq(host, false);
+
+ __sdhci_set_timeout(host, cmd);
+}
+
static struct sdhci_ops sdhci_omap_ops = {
.set_clock = sdhci_omap_set_clock,
.set_power = sdhci_omap_set_power,
@@ -834,6 +869,7 @@ static struct sdhci_ops sdhci_omap_ops = {
.reset = sdhci_omap_reset,
.set_uhs_signaling = sdhci_omap_set_uhs_signaling,
.irq = sdhci_omap_irq,
+ .set_timeout = sdhci_omap_set_timeout,
};
static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host)
@@ -883,6 +919,16 @@ static const struct sdhci_omap_data k2g_data = {
.offset = 0x200,
};
+static const struct sdhci_omap_data am335_data = {
+ .offset = 0x200,
+ .flags = SDHCI_OMAP_SPECIAL_RESET,
+};
+
+static const struct sdhci_omap_data am437_data = {
+ .offset = 0x200,
+ .flags = SDHCI_OMAP_SPECIAL_RESET,
+};
+
static const struct sdhci_omap_data dra7_data = {
.offset = 0x200,
.flags = SDHCI_OMAP_REQUIRE_IODELAY,
@@ -891,6 +937,8 @@ static const struct sdhci_omap_data dra7_data = {
static const struct of_device_id omap_sdhci_match[] = {
{ .compatible = "ti,dra7-sdhci", .data = &dra7_data },
{ .compatible = "ti,k2g-sdhci", .data = &k2g_data },
+ { .compatible = "ti,am335-sdhci", .data = &am335_data },
+ { .compatible = "ti,am437-sdhci", .data = &am437_data },
{},
};
MODULE_DEVICE_TABLE(of, omap_sdhci_match);
@@ -1037,6 +1085,7 @@ static int sdhci_omap_probe(struct platform_device *pdev)
const struct of_device_id *match;
struct sdhci_omap_data *data;
const struct soc_device_attribute *soc;
+ struct resource *regs;
match = of_match_device(omap_sdhci_match, dev);
if (!match)
@@ -1049,6 +1098,10 @@ static int sdhci_omap_probe(struct platform_device *pdev)
}
offset = data->offset;
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs)
+ return -ENXIO;
+
host = sdhci_pltfm_init(pdev, &sdhci_omap_pdata,
sizeof(*omap_host));
if (IS_ERR(host)) {
@@ -1065,6 +1118,7 @@ static int sdhci_omap_probe(struct platform_device *pdev)
omap_host->timing = MMC_TIMING_LEGACY;
omap_host->flags = data->flags;
host->ioaddr += offset;
+ host->mapbase = regs->start + offset;
mmc = host->mmc;
sdhci_get_of_property(pdev);
@@ -1134,6 +1188,10 @@ static int sdhci_omap_probe(struct platform_device *pdev)
host->mmc_host_ops.execute_tuning = sdhci_omap_execute_tuning;
host->mmc_host_ops.enable_sdio_irq = sdhci_omap_enable_sdio_irq;
+ /* Switch to external DMA only if there is the "dmas" property */
+ if (of_find_property(dev->of_node, "dmas", NULL))
+ sdhci_switch_external_dma(host, true);
+
ret = sdhci_setup_host(host);
if (ret)
goto err_put_sync;
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index acefb76b4e15..525de2454a4d 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -27,6 +27,7 @@
#include <linux/mmc/slot-gpio.h>
#include <linux/mmc/sdhci-pci-data.h>
#include <linux/acpi.h>
+#include <linux/dmi.h>
#ifdef CONFIG_X86
#include <asm/iosf_mbi.h>
@@ -783,11 +784,18 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
return 0;
}
+static bool glk_broken_cqhci(struct sdhci_pci_slot *slot)
+{
+ return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
+ dmi_match(DMI_BIOS_VENDOR, "LENOVO");
+}
+
static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot)
{
int ret = byt_emmc_probe_slot(slot);
- slot->host->mmc->caps2 |= MMC_CAP2_CQE;
+ if (!glk_broken_cqhci(slot))
+ slot->host->mmc->caps2 |= MMC_CAP2_CQE;
if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) {
slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES,
@@ -1983,12 +1991,12 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
if (slot->cd_idx >= 0) {
ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx,
- slot->cd_override_level, 0, NULL);
+ slot->cd_override_level, 0);
if (ret && ret != -EPROBE_DEFER)
ret = mmc_gpiod_request_cd(host->mmc, NULL,
slot->cd_idx,
slot->cd_override_level,
- 0, NULL);
+ 0);
if (ret == -EPROBE_DEFER)
goto remove;
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 51e096f27388..64200c78e90d 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -117,7 +117,6 @@ struct sdhci_s3c {
struct s3c_sdhci_platdata *pdata;
int cur_clk;
int ext_cd_irq;
- int ext_cd_gpio;
struct clk *clk_io;
struct clk *clk_bus[MAX_BUS_CLK];
@@ -481,7 +480,6 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct sdhci_host *host;
struct sdhci_s3c *sc;
- struct resource *res;
int ret, irq, ptr, clks;
if (!pdev->dev.platform_data && !pdev->dev.of_node) {
@@ -512,7 +510,6 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
goto err_pdata_io_clk;
} else {
memcpy(pdata, pdev->dev.platform_data, sizeof(*pdata));
- sc->ext_cd_gpio = -1; /* invalid gpio number */
}
drv_data = sdhci_s3c_get_driver_data(pdev);
@@ -555,8 +552,7 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
goto err_no_busclks;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->ioaddr)) {
ret = PTR_ERR(host->ioaddr);
goto err_req_regs;
diff --git a/drivers/mmc/host/sdhci-sirf.c b/drivers/mmc/host/sdhci-sirf.c
index e43143223320..f4b05dd6c20a 100644
--- a/drivers/mmc/host/sdhci-sirf.c
+++ b/drivers/mmc/host/sdhci-sirf.c
@@ -194,7 +194,7 @@ static int sdhci_sirf_probe(struct platform_device *pdev)
* We must request the IRQ after sdhci_add_host(), as the tasklet only
* gets setup in sdhci_add_host() and we oops.
*/
- ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0);
if (ret == -EPROBE_DEFER)
goto err_request_cd;
if (!ret)
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index 916b5b09c3d1..b4b63089a4e2 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -43,7 +43,6 @@ static const struct sdhci_ops sdhci_pltfm_ops = {
static int sdhci_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
- struct resource *iomem;
struct spear_sdhci *sdhci;
struct device *dev;
int ret;
@@ -56,8 +55,7 @@ static int sdhci_probe(struct platform_device *pdev)
goto err;
}
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->ioaddr = devm_ioremap_resource(&pdev->dev, iomem);
+ host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->ioaddr)) {
ret = PTR_ERR(host->ioaddr);
dev_dbg(&pdev->dev, "unable to map iomem: %d\n", ret);
@@ -98,7 +96,7 @@ static int sdhci_probe(struct platform_device *pdev)
* It is optional to use GPIOs for sdhci card detection. If we
* find a descriptor using slot GPIO, we use it.
*/
- ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0);
if (ret == -EPROBE_DEFER)
goto disable_clk;
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 7bc950520fd9..403ac44a7378 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -386,7 +386,7 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
- if (soc_data->nvquirks & SDHCI_MISC_CTRL_ENABLE_SDR50)
+ if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
}
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 3140fe2e5dba..63db84481dff 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -10,6 +10,7 @@
*/
#include <linux/delay.h>
+#include <linux/dmaengine.h>
#include <linux/ktime.h>
#include <linux/highmem.h>
#include <linux/io.h>
@@ -992,7 +993,7 @@ static void sdhci_set_transfer_irqs(struct sdhci_host *host)
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
}
-static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
+void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
{
if (enable)
host->ier |= SDHCI_INT_DATA_TIMEOUT;
@@ -1001,42 +1002,36 @@ static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
}
+EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq);
-static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
+void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
{
- u8 count;
-
- if (host->ops->set_timeout) {
- host->ops->set_timeout(host, cmd);
- } else {
- bool too_big = false;
-
- count = sdhci_calc_timeout(host, cmd, &too_big);
-
- if (too_big &&
- host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
- sdhci_calc_sw_timeout(host, cmd);
- sdhci_set_data_timeout_irq(host, false);
- } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
- sdhci_set_data_timeout_irq(host, true);
- }
-
- sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
+ bool too_big = false;
+ u8 count = sdhci_calc_timeout(host, cmd, &too_big);
+
+ if (too_big &&
+ host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
+ sdhci_calc_sw_timeout(host, cmd);
+ sdhci_set_data_timeout_irq(host, false);
+ } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
+ sdhci_set_data_timeout_irq(host, true);
}
+
+ sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
}
+EXPORT_SYMBOL_GPL(__sdhci_set_timeout);
-static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
{
- struct mmc_data *data = cmd->data;
-
- host->data_timeout = 0;
-
- if (sdhci_data_line_cmd(cmd))
- sdhci_set_timeout(host, cmd);
-
- if (!data)
- return;
+ if (host->ops->set_timeout)
+ host->ops->set_timeout(host, cmd);
+ else
+ __sdhci_set_timeout(host, cmd);
+}
+static void sdhci_initialize_data(struct sdhci_host *host,
+ struct mmc_data *data)
+{
WARN_ON(host->data);
/* Sanity checks */
@@ -1047,6 +1042,34 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
host->data = data;
host->data_early = 0;
host->data->bytes_xfered = 0;
+}
+
+static inline void sdhci_set_block_info(struct sdhci_host *host,
+ struct mmc_data *data)
+{
+ /* Set the DMA boundary value and block size */
+ sdhci_writew(host,
+ SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
+ SDHCI_BLOCK_SIZE);
+ /*
+ * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
+ * can be supported, in that case 16-bit block count register must be 0.
+ */
+ if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
+ (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
+ if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
+ sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
+ sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
+ } else {
+ sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
+ }
+}
+
+static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+{
+ struct mmc_data *data = cmd->data;
+
+ sdhci_initialize_data(host, data);
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
struct scatterlist *sg;
@@ -1133,24 +1156,192 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
sdhci_set_transfer_irqs(host);
- /* Set the DMA boundary value and block size */
- sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
- SDHCI_BLOCK_SIZE);
+ sdhci_set_block_info(host, data);
+}
- /*
- * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
- * can be supported, in that case 16-bit block count register must be 0.
- */
- if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
- (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
- if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
- sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
- sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
+#if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA)
+
+static int sdhci_external_dma_init(struct sdhci_host *host)
+{
+ int ret = 0;
+ struct mmc_host *mmc = host->mmc;
+
+ host->tx_chan = dma_request_chan(mmc->parent, "tx");
+ if (IS_ERR(host->tx_chan)) {
+ ret = PTR_ERR(host->tx_chan);
+ if (ret != -EPROBE_DEFER)
+ pr_warn("Failed to request TX DMA channel.\n");
+ host->tx_chan = NULL;
+ return ret;
+ }
+
+ host->rx_chan = dma_request_chan(mmc->parent, "rx");
+ if (IS_ERR(host->rx_chan)) {
+ if (host->tx_chan) {
+ dma_release_channel(host->tx_chan);
+ host->tx_chan = NULL;
+ }
+
+ ret = PTR_ERR(host->rx_chan);
+ if (ret != -EPROBE_DEFER)
+ pr_warn("Failed to request RX DMA channel.\n");
+ host->rx_chan = NULL;
+ }
+
+ return ret;
+}
+
+static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
+ struct mmc_data *data)
+{
+ return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
+}
+
+static int sdhci_external_dma_setup(struct sdhci_host *host,
+ struct mmc_command *cmd)
+{
+ int ret, i;
+ enum dma_transfer_direction dir;
+ struct dma_async_tx_descriptor *desc;
+ struct mmc_data *data = cmd->data;
+ struct dma_chan *chan;
+ struct dma_slave_config cfg;
+ dma_cookie_t cookie;
+ int sg_cnt;
+
+ if (!host->mapbase)
+ return -EINVAL;
+
+ cfg.src_addr = host->mapbase + SDHCI_BUFFER;
+ cfg.dst_addr = host->mapbase + SDHCI_BUFFER;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_maxburst = data->blksz / 4;
+ cfg.dst_maxburst = data->blksz / 4;
+
+ /* Sanity check: all the SG entries must be aligned by block size. */
+ for (i = 0; i < data->sg_len; i++) {
+ if ((data->sg + i)->length % data->blksz)
+ return -EINVAL;
+ }
+
+ chan = sdhci_external_dma_channel(host, data);
+
+ ret = dmaengine_slave_config(chan, &cfg);
+ if (ret)
+ return ret;
+
+ sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
+ if (sg_cnt <= 0)
+ return -EINVAL;
+
+ dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
+ desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc)
+ return -EINVAL;
+
+ desc->callback = NULL;
+ desc->callback_param = NULL;
+
+ cookie = dmaengine_submit(desc);
+ if (dma_submit_error(cookie))
+ ret = cookie;
+
+ return ret;
+}
+
+static void sdhci_external_dma_release(struct sdhci_host *host)
+{
+ if (host->tx_chan) {
+ dma_release_channel(host->tx_chan);
+ host->tx_chan = NULL;
+ }
+
+ if (host->rx_chan) {
+ dma_release_channel(host->rx_chan);
+ host->rx_chan = NULL;
+ }
+
+ sdhci_switch_external_dma(host, false);
+}
+
+static void __sdhci_external_dma_prepare_data(struct sdhci_host *host,
+ struct mmc_command *cmd)
+{
+ struct mmc_data *data = cmd->data;
+
+ sdhci_initialize_data(host, data);
+
+ host->flags |= SDHCI_REQ_USE_DMA;
+ sdhci_set_transfer_irqs(host);
+
+ sdhci_set_block_info(host, data);
+}
+
+static void sdhci_external_dma_prepare_data(struct sdhci_host *host,
+ struct mmc_command *cmd)
+{
+ if (!sdhci_external_dma_setup(host, cmd)) {
+ __sdhci_external_dma_prepare_data(host, cmd);
} else {
- sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
+ sdhci_external_dma_release(host);
+ pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n",
+ mmc_hostname(host->mmc));
+ sdhci_prepare_data(host, cmd);
}
}
+static void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
+ struct mmc_command *cmd)
+{
+ struct dma_chan *chan;
+
+ if (!cmd->data)
+ return;
+
+ chan = sdhci_external_dma_channel(host, cmd->data);
+ if (chan)
+ dma_async_issue_pending(chan);
+}
+
+#else
+
+static inline int sdhci_external_dma_init(struct sdhci_host *host)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void sdhci_external_dma_release(struct sdhci_host *host)
+{
+}
+
+static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host,
+ struct mmc_command *cmd)
+{
+ /* This should never happen */
+ WARN_ON_ONCE(1);
+}
+
+static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
+ struct mmc_command *cmd)
+{
+}
+
+static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
+ struct mmc_data *data)
+{
+ return NULL;
+}
+
+#endif
+
+void sdhci_switch_external_dma(struct sdhci_host *host, bool en)
+{
+ host->use_external_dma = en;
+}
+EXPORT_SYMBOL_GPL(sdhci_switch_external_dma);
+
static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
struct mmc_request *mrq)
{
@@ -1245,22 +1436,10 @@ static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
(host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
}
-static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
+static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq)
{
int i;
- if (host->cmd && host->cmd->mrq == mrq)
- host->cmd = NULL;
-
- if (host->data_cmd && host->data_cmd->mrq == mrq)
- host->data_cmd = NULL;
-
- if (host->data && host->data->mrq == mrq)
- host->data = NULL;
-
- if (sdhci_needs_reset(host, mrq))
- host->pending_reset = true;
-
for (i = 0; i < SDHCI_MAX_MRQS; i++) {
if (host->mrqs_done[i] == mrq) {
WARN_ON(1);
@@ -1276,6 +1455,23 @@ static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
}
WARN_ON(i >= SDHCI_MAX_MRQS);
+}
+
+static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
+{
+ if (host->cmd && host->cmd->mrq == mrq)
+ host->cmd = NULL;
+
+ if (host->data_cmd && host->data_cmd->mrq == mrq)
+ host->data_cmd = NULL;
+
+ if (host->data && host->data->mrq == mrq)
+ host->data = NULL;
+
+ if (sdhci_needs_reset(host, mrq))
+ host->pending_reset = true;
+
+ sdhci_set_mrq_done(host, mrq);
sdhci_del_timer(host, mrq);
@@ -1326,12 +1522,12 @@ static void sdhci_finish_data(struct sdhci_host *host)
/*
* Need to send CMD12 if -
- * a) open-ended multiblock transfer (no CMD23)
+ * a) open-ended multiblock transfer not using auto CMD12 (no CMD23)
* b) error in multiblock transfer
*/
if (data->stop &&
- (data->error ||
- !data->mrq->sbc)) {
+ ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) ||
+ data->error)) {
/*
* 'cap_cmd_during_tfr' request must not use the command line
* after mmc_command_done() has been called. It is upper layer's
@@ -1390,12 +1586,19 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
}
host->cmd = cmd;
+ host->data_timeout = 0;
if (sdhci_data_line_cmd(cmd)) {
WARN_ON(host->data_cmd);
host->data_cmd = cmd;
+ sdhci_set_timeout(host, cmd);
}
- sdhci_prepare_data(host, cmd);
+ if (cmd->data) {
+ if (host->use_external_dma)
+ sdhci_external_dma_prepare_data(host, cmd);
+ else
+ sdhci_prepare_data(host, cmd);
+ }
sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
@@ -1437,6 +1640,9 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
timeout += 10 * HZ;
sdhci_mod_timer(host, cmd->mrq, timeout);
+ if (host->use_external_dma)
+ sdhci_external_dma_pre_transfer(host, cmd);
+
sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
}
EXPORT_SYMBOL_GPL(sdhci_send_command);
@@ -1825,17 +2031,6 @@ void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
sdhci_led_activate(host);
- /*
- * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
- * requests if Auto-CMD12 is enabled.
- */
- if (sdhci_auto_cmd12(host, mrq)) {
- if (mrq->stop) {
- mrq->data->stop = NULL;
- mrq->stop = NULL;
- }
- }
-
if (!present || host->flags & SDHCI_DEVICE_DEAD) {
mrq->cmd->error = -ENOMEDIUM;
sdhci_finish_mrq(host, mrq);
@@ -1882,9 +2077,7 @@ void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
else if (timing == MMC_TIMING_UHS_SDR12)
ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
- else if (timing == MMC_TIMING_SD_HS ||
- timing == MMC_TIMING_MMC_HS ||
- timing == MMC_TIMING_UHS_SDR25)
+ else if (timing == MMC_TIMING_UHS_SDR25)
ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
else if (timing == MMC_TIMING_UHS_SDR50)
ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
@@ -2419,8 +2612,8 @@ static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
sdhci_send_tuning(host, opcode);
if (!host->tuning_done) {
- pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n",
- mmc_hostname(host->mmc));
+ pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n",
+ mmc_hostname(host->mmc));
sdhci_abort_tuning(host, opcode);
return -ETIMEDOUT;
}
@@ -2663,6 +2856,17 @@ static bool sdhci_request_done(struct sdhci_host *host)
if (host->flags & SDHCI_REQ_USE_DMA) {
struct mmc_data *data = mrq->data;
+ if (host->use_external_dma && data &&
+ (mrq->cmd->error || data->error)) {
+ struct dma_chan *chan = sdhci_external_dma_channel(host, data);
+
+ host->mrqs_done[i] = NULL;
+ spin_unlock_irqrestore(&host->lock, flags);
+ dmaengine_terminate_sync(chan);
+ spin_lock_irqsave(&host->lock, flags);
+ sdhci_set_mrq_done(host, mrq);
+ }
+
if (data && data->host_cookie == COOKIE_MAPPED) {
if (host->bounce_buffer) {
/*
@@ -3769,6 +3973,9 @@ int sdhci_setup_host(struct sdhci_host *host)
mmc_hostname(mmc), host->version);
}
+ if (host->quirks & SDHCI_QUIRK_BROKEN_CQE)
+ mmc->caps2 &= ~MMC_CAP2_CQE;
+
if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
host->flags |= SDHCI_USE_SDMA;
else if (!(host->caps & SDHCI_CAN_DO_SDMA))
@@ -3795,6 +4002,21 @@ int sdhci_setup_host(struct sdhci_host *host)
if (sdhci_can_64bit_dma(host))
host->flags |= SDHCI_USE_64_BIT_DMA;
+ if (host->use_external_dma) {
+ ret = sdhci_external_dma_init(host);
+ if (ret == -EPROBE_DEFER)
+ goto unreg;
+ /*
+ * Fall back to use the DMA/PIO integrated in standard SDHCI
+ * instead of external DMA devices.
+ */
+ else if (ret)
+ sdhci_switch_external_dma(host, false);
+ /* Disable internal DMA sources */
+ else
+ host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
+ }
+
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
if (host->ops->set_dma_mask)
ret = host->ops->set_dma_mask(host);
@@ -3821,15 +4043,13 @@ int sdhci_setup_host(struct sdhci_host *host)
dma_addr_t dma;
void *buf;
- if (host->flags & SDHCI_USE_64_BIT_DMA) {
- host->adma_table_sz = host->adma_table_cnt *
- SDHCI_ADMA2_64_DESC_SZ(host);
- host->desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
- } else {
- host->adma_table_sz = host->adma_table_cnt *
- SDHCI_ADMA2_32_DESC_SZ;
- host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
- }
+ if (!(host->flags & SDHCI_USE_64_BIT_DMA))
+ host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ;
+ else if (!host->alloc_desc_sz)
+ host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
+
+ host->desc_sz = host->alloc_desc_sz;
+ host->adma_table_sz = host->adma_table_cnt * host->desc_sz;
host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
/*
@@ -3912,11 +4132,13 @@ int sdhci_setup_host(struct sdhci_host *host)
if (host->ops->get_min_clock)
mmc->f_min = host->ops->get_min_clock(host);
else if (host->version >= SDHCI_SPEC_300) {
- if (host->clk_mul) {
- mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
+ if (host->clk_mul)
max_clk = host->max_clk * host->clk_mul;
- } else
- mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
+ /*
+ * Divided Clock Mode minimum clock rate is always less than
+ * Programmable Clock Mode minimum clock rate.
+ */
+ mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
} else
mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
@@ -4275,6 +4497,10 @@ void sdhci_cleanup_host(struct sdhci_host *host)
dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
host->adma_table_sz, host->align_buffer,
host->align_addr);
+
+ if (host->use_external_dma)
+ sdhci_external_dma_release(host);
+
host->adma_table = NULL;
host->align_buffer = NULL;
}
@@ -4320,6 +4546,7 @@ int __sdhci_add_host(struct sdhci_host *host)
pr_info("%s: SDHCI controller on %s [%s] using %s\n",
mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
+ host->use_external_dma ? "External DMA" :
(host->flags & SDHCI_USE_ADMA) ?
(host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
(host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
@@ -4408,6 +4635,9 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
host->adma_table_sz, host->align_buffer,
host->align_addr);
+ if (host->use_external_dma)
+ sdhci_external_dma_release(host);
+
host->adma_table = NULL;
host->align_buffer = NULL;
}
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 0ed3e0eaef5f..a6a3ddcf97e7 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -409,6 +409,8 @@ struct sdhci_host {
#define SDHCI_QUIRK_BROKEN_CARD_DETECTION (1<<15)
/* Controller reports inverted write-protect state */
#define SDHCI_QUIRK_INVERTED_WRITE_PROTECT (1<<16)
+/* Controller has unusable command queue engine */
+#define SDHCI_QUIRK_BROKEN_CQE (1<<17)
/* Controller does not like fast PIO transfers */
#define SDHCI_QUIRK_PIO_NEEDS_DELAY (1<<18)
/* Controller does not have a LED */
@@ -485,6 +487,7 @@ struct sdhci_host {
int irq; /* Device IRQ */
void __iomem *ioaddr; /* Mapped address */
+ phys_addr_t mapbase; /* physical address base */
char *bounce_buffer; /* For packing SDMA reads/writes */
dma_addr_t bounce_addr;
unsigned int bounce_buffer_size;
@@ -533,6 +536,7 @@ struct sdhci_host {
bool pending_reset; /* Cmd/data reset is pending */
bool irq_wake_enabled; /* IRQ wakeup is enabled */
bool v4_mode; /* Host Version 4 Enable */
+ bool use_external_dma; /* Host selects to use external DMA */
struct mmc_request *mrqs_done[SDHCI_MAX_MRQS]; /* Requests done */
struct mmc_command *cmd; /* Current command */
@@ -554,7 +558,8 @@ struct sdhci_host {
dma_addr_t adma_addr; /* Mapped ADMA descr. table */
dma_addr_t align_addr; /* Mapped bounce buffer */
- unsigned int desc_sz; /* ADMA descriptor size */
+ unsigned int desc_sz; /* ADMA current descriptor size */
+ unsigned int alloc_desc_sz; /* ADMA descr. max size host supports */
struct workqueue_struct *complete_wq; /* Request completion wq */
struct work_struct complete_work; /* Request completion work */
@@ -562,6 +567,11 @@ struct sdhci_host {
struct timer_list timer; /* Timer for timeouts */
struct timer_list data_timer; /* Timer for data timeouts */
+#if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA)
+ struct dma_chan *rx_chan;
+ struct dma_chan *tx_chan;
+#endif
+
u32 caps; /* CAPABILITY_0 */
u32 caps1; /* CAPABILITY_1 */
bool read_caps; /* Capability flags have been read */
@@ -793,5 +803,8 @@ void sdhci_end_tuning(struct sdhci_host *host);
void sdhci_reset_tuning(struct sdhci_host *host);
void sdhci_send_tuning(struct sdhci_host *host, u32 opcode);
void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode);
+void sdhci_switch_external_dma(struct sdhci_host *host, bool en);
+void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable);
+void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd);
#endif /* __SDHCI_HW_H */
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index b8e897e31e2e..3afea580fbea 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -240,6 +240,35 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg)
writeb(val, host->ioaddr + reg);
}
+static int sdhci_am654_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ int err = sdhci_execute_tuning(mmc, opcode);
+
+ if (err)
+ return err;
+ /*
+ * Tuning data remains in the buffer after tuning.
+ * Do a command and data reset to get rid of it
+ */
+ sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+
+ return 0;
+}
+
+static u32 sdhci_am654_cqhci_irq(struct sdhci_host *host, u32 intmask)
+{
+ int cmd_error = 0;
+ int data_error = 0;
+
+ if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+ return intmask;
+
+ cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+
+ return 0;
+}
+
static struct sdhci_ops sdhci_am654_ops = {
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
@@ -248,13 +277,13 @@ static struct sdhci_ops sdhci_am654_ops = {
.set_power = sdhci_am654_set_power,
.set_clock = sdhci_am654_set_clock,
.write_b = sdhci_am654_write_b,
+ .irq = sdhci_am654_cqhci_irq,
.reset = sdhci_reset,
};
static const struct sdhci_pltfm_data sdhci_am654_pdata = {
.ops = &sdhci_am654_ops,
- .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
- SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+ .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
};
@@ -263,19 +292,6 @@ static const struct sdhci_am654_driver_data sdhci_am654_drvdata = {
.flags = IOMUX_PRESENT | FREQSEL_2_BIT | STRBSEL_4_BIT | DLL_PRESENT,
};
-static u32 sdhci_am654_cqhci_irq(struct sdhci_host *host, u32 intmask)
-{
- int cmd_error = 0;
- int data_error = 0;
-
- if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
- return intmask;
-
- cqhci_irq(host->mmc, intmask, cmd_error, data_error);
-
- return 0;
-}
-
static struct sdhci_ops sdhci_j721e_8bit_ops = {
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
@@ -290,8 +306,7 @@ static struct sdhci_ops sdhci_j721e_8bit_ops = {
static const struct sdhci_pltfm_data sdhci_j721e_8bit_pdata = {
.ops = &sdhci_j721e_8bit_ops,
- .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
- SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+ .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
};
@@ -314,8 +329,7 @@ static struct sdhci_ops sdhci_j721e_4bit_ops = {
static const struct sdhci_pltfm_data sdhci_j721e_4bit_pdata = {
.ops = &sdhci_j721e_4bit_ops,
- .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
- SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+ .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
};
@@ -491,7 +505,6 @@ static int sdhci_am654_probe(struct platform_device *pdev)
struct sdhci_am654_data *sdhci_am654;
const struct of_device_id *match;
struct sdhci_host *host;
- struct resource *res;
struct clk *clk_xin;
struct device *dev = &pdev->dev;
void __iomem *base;
@@ -524,8 +537,7 @@ static int sdhci_am654_probe(struct platform_device *pdev)
goto pm_runtime_disable;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
goto pm_runtime_put;
@@ -549,6 +561,8 @@ static int sdhci_am654_probe(struct platform_device *pdev)
goto pm_runtime_put;
}
+ host->mmc_host_ops.execute_tuning = sdhci_am654_execute_tuning;
+
ret = sdhci_am654_init(host);
if (ret)
goto pm_runtime_put;
diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c
index fa0dfc657c22..4625cc071b61 100644
--- a/drivers/mmc/host/sdhci_f_sdh30.c
+++ b/drivers/mmc/host/sdhci_f_sdh30.c
@@ -89,7 +89,6 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
struct device *dev = &pdev->dev;
- struct resource *res;
int irq, ctrl = 0, ret = 0;
struct f_sdhost_priv *priv;
u32 reg = 0;
@@ -123,8 +122,7 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
host->ops = &sdhci_f_sdh30_ops;
host->irq = irq;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->ioaddr)) {
ret = PTR_ERR(host->ioaddr);
goto err;
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 98c575de43c7..7e1fd557109c 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -432,8 +432,12 @@ static void sh_mmcif_request_dma(struct sh_mmcif_host *host)
host->chan_rx = sh_mmcif_request_dma_pdata(host,
pdata->slave_id_rx);
} else {
- host->chan_tx = dma_request_slave_channel(dev, "tx");
- host->chan_rx = dma_request_slave_channel(dev, "rx");
+ host->chan_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(host->chan_tx))
+ host->chan_tx = NULL;
+ host->chan_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(host->chan_rx))
+ host->chan_rx = NULL;
}
dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx,
host->chan_rx);
@@ -1388,7 +1392,6 @@ static int sh_mmcif_probe(struct platform_device *pdev)
struct sh_mmcif_host *host;
struct device *dev = &pdev->dev;
struct sh_mmcif_plat_data *pd = dev->platform_data;
- struct resource *res;
void __iomem *reg;
const char *name;
@@ -1397,8 +1400,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
if (irq[0] < 0)
return -ENXIO;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(dev, res);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index d577a6b0ceae..f87d7967457f 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -1273,8 +1273,7 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
if (ret)
return ret;
- host->reg_base = devm_ioremap_resource(&pdev->dev,
- platform_get_resource(pdev, IORESOURCE_MEM, 0));
+ host->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->reg_base))
return PTR_ERR(host->reg_base);
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index c4a1d49fbea4..1e424bcdbd5f 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -1109,12 +1109,10 @@ struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev,
{
struct tmio_mmc_host *host;
struct mmc_host *mmc;
- struct resource *res;
void __iomem *ctl;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctl = devm_ioremap_resource(&pdev->dev, res);
+ ctl = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctl))
return ERR_CAST(ctl);
@@ -1181,7 +1179,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
* Look for a card detect GPIO, if it fails with anything
* else than a probe deferral, just live without it.
*/
- ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
if (ret == -EPROBE_DEFER)
return ret;
diff --git a/drivers/mmc/host/uniphier-sd.c b/drivers/mmc/host/uniphier-sd.c
index 0c72ec5546c3..a1683c49cb90 100644
--- a/drivers/mmc/host/uniphier-sd.c
+++ b/drivers/mmc/host/uniphier-sd.c
@@ -59,7 +59,6 @@
struct uniphier_sd_priv {
struct tmio_mmc_data tmio_data;
struct pinctrl *pinctrl;
- struct pinctrl_state *pinstate_default;
struct pinctrl_state *pinstate_uhs;
struct clk *clk;
struct reset_control *rst;
@@ -500,13 +499,12 @@ static int uniphier_sd_start_signal_voltage_switch(struct mmc_host *mmc,
{
struct tmio_mmc_host *host = mmc_priv(mmc);
struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
- struct pinctrl_state *pinstate;
+ struct pinctrl_state *pinstate = NULL;
u32 val, tmp;
switch (ios->signal_voltage) {
case MMC_SIGNAL_VOLTAGE_330:
val = UNIPHIER_SD_VOLT_330;
- pinstate = priv->pinstate_default;
break;
case MMC_SIGNAL_VOLTAGE_180:
val = UNIPHIER_SD_VOLT_180;
@@ -521,7 +519,10 @@ static int uniphier_sd_start_signal_voltage_switch(struct mmc_host *mmc,
tmp |= FIELD_PREP(UNIPHIER_SD_VOLT_MASK, val);
writel(tmp, host->ctl + UNIPHIER_SD_VOLT);
- pinctrl_select_state(priv->pinctrl, pinstate);
+ if (pinstate)
+ pinctrl_select_state(priv->pinctrl, pinstate);
+ else
+ pinctrl_select_default_state(mmc_dev(mmc));
return 0;
}
@@ -533,11 +534,6 @@ static int uniphier_sd_uhs_init(struct tmio_mmc_host *host,
if (IS_ERR(priv->pinctrl))
return PTR_ERR(priv->pinctrl);
- priv->pinstate_default = pinctrl_lookup_state(priv->pinctrl,
- PINCTRL_STATE_DEFAULT);
- if (IS_ERR(priv->pinstate_default))
- return PTR_ERR(priv->pinstate_default);
-
priv->pinstate_uhs = pinctrl_lookup_state(priv->pinctrl, "uhs");
if (IS_ERR(priv->pinstate_uhs))
return PTR_ERR(priv->pinstate_uhs);
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index b11ac2314328..9a0b1e4e405d 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -199,7 +199,6 @@ struct usdhi6_host {
/* Pin control */
struct pinctrl *pinctrl;
- struct pinctrl_state *pins_default;
struct pinctrl_state *pins_uhs;
};
@@ -677,12 +676,14 @@ static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start)
};
int ret;
- host->chan_tx = dma_request_slave_channel(mmc_dev(host->mmc), "tx");
+ host->chan_tx = dma_request_chan(mmc_dev(host->mmc), "tx");
dev_dbg(mmc_dev(host->mmc), "%s: TX: got channel %p\n", __func__,
host->chan_tx);
- if (!host->chan_tx)
+ if (IS_ERR(host->chan_tx)) {
+ host->chan_tx = NULL;
return;
+ }
cfg.direction = DMA_MEM_TO_DEV;
cfg.dst_addr = start + USDHI6_SD_BUF0;
@@ -692,12 +693,14 @@ static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start)
if (ret < 0)
goto e_release_tx;
- host->chan_rx = dma_request_slave_channel(mmc_dev(host->mmc), "rx");
+ host->chan_rx = dma_request_chan(mmc_dev(host->mmc), "rx");
dev_dbg(mmc_dev(host->mmc), "%s: RX: got channel %p\n", __func__,
host->chan_rx);
- if (!host->chan_rx)
+ if (IS_ERR(host->chan_rx)) {
+ host->chan_rx = NULL;
goto e_release_tx;
+ }
cfg.direction = DMA_DEV_TO_MEM;
cfg.src_addr = cfg.dst_addr;
@@ -1162,8 +1165,7 @@ static int usdhi6_set_pinstates(struct usdhi6_host *host, int voltage)
host->pins_uhs);
default:
- return pinctrl_select_state(host->pinctrl,
- host->pins_default);
+ return pinctrl_select_default_state(mmc_dev(host->mmc));
}
}
@@ -1770,17 +1772,6 @@ static int usdhi6_probe(struct platform_device *pdev)
}
host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs");
- if (!IS_ERR(host->pins_uhs)) {
- host->pins_default = pinctrl_lookup_state(host->pinctrl,
- PINCTRL_STATE_DEFAULT);
-
- if (IS_ERR(host->pins_default)) {
- dev_err(dev,
- "UHS pinctrl requires a default pin state.\n");
- ret = PTR_ERR(host->pins_default);
- goto e_free_mmc;
- }
- }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host->base = devm_ioremap_resource(dev, res);
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index f4ac064ff471..e48bddd95ce6 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -1106,7 +1106,7 @@ static int via_sd_probe(struct pci_dev *pcidev,
len = pci_resource_len(pcidev, 0);
base = pci_resource_start(pcidev, 0);
- sdhost->mmiobase = ioremap_nocache(base, len);
+ sdhost->mmiobase = ioremap(base, len);
if (!sdhost->mmiobase) {
ret = -ENOMEM;
goto free_mmc_host;
diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c
index eccf2e5d905e..3af50db8b21b 100644
--- a/drivers/mtd/devices/bcm47xxsflash.c
+++ b/drivers/mtd/devices/bcm47xxsflash.c
@@ -320,7 +320,7 @@ static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
* ChipCommon revision.
*/
if (b47s->bcma_cc->core->id.rev == 54)
- b47s->window = ioremap_nocache(res->start, resource_size(res));
+ b47s->window = ioremap(res->start, resource_size(res));
else
b47s->window = ioremap_cache(res->start, resource_size(res));
if (!b47s->window) {
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 410a321682e6..36aa082f6db0 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -44,7 +44,7 @@ struct block2mtd_dev {
static LIST_HEAD(blkmtd_device_list);
-static struct page *page_read(struct address_space *mapping, int index)
+static struct page *page_read(struct address_space *mapping, pgoff_t index)
{
return read_mapping_page(mapping, index, NULL);
}
@@ -54,7 +54,7 @@ static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
{
struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
struct page *page;
- int index = to >> PAGE_SHIFT; // page index
+ pgoff_t index = to >> PAGE_SHIFT; // page index
int pages = len >> PAGE_SHIFT;
u_long *p;
u_long *max;
@@ -103,7 +103,7 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
{
struct block2mtd_dev *dev = mtd->priv;
struct page *page;
- int index = from >> PAGE_SHIFT;
+ pgoff_t index = from >> PAGE_SHIFT;
int offset = from & (PAGE_SIZE-1);
int cpylen;
@@ -137,7 +137,7 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
{
struct page *page;
struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
- int index = to >> PAGE_SHIFT; // page index
+ pgoff_t index = to >> PAGE_SHIFT; // page index
int offset = to & ~PAGE_MASK; // page offset
int cpylen;
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
index 462fadb56bdb..42a95ba40f2c 100644
--- a/drivers/mtd/maps/amd76xrom.c
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -163,7 +163,7 @@ static int amd76xrom_init_one(struct pci_dev *pdev,
/* FIXME handle registers 0x80 - 0x8C the bios region locks */
/* For write accesses caches are useless */
- window->virt = ioremap_nocache(window->phys, window->size);
+ window->virt = ioremap(window->phys, window->size);
if (!window->virt) {
printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
window->phys, window->size);
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
index c9b7b4d5a923..460494212f6a 100644
--- a/drivers/mtd/maps/ck804xrom.c
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -191,7 +191,7 @@ static int __init ck804xrom_init_one(struct pci_dev *pdev,
/* FIXME handle registers 0x80 - 0x8C the bios region locks */
/* For write accesses caches are useless */
- window->virt = ioremap_nocache(window->phys, window->size);
+ window->virt = ioremap(window->phys, window->size);
if (!window->virt) {
printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
window->phys, window->size);
diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c
index 5c27c6994896..85e14150a073 100644
--- a/drivers/mtd/maps/esb2rom.c
+++ b/drivers/mtd/maps/esb2rom.c
@@ -249,7 +249,7 @@ static int __init esb2rom_init_one(struct pci_dev *pdev,
}
/* Map the firmware hub into my address space. */
- window->virt = ioremap_nocache(window->phys, window->size);
+ window->virt = ioremap(window->phys, window->size);
if (!window->virt) {
printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
window->phys, window->size);
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index 6b989f391baa..fda72c5fd8f9 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -184,7 +184,7 @@ static int __init ichxrom_init_one(struct pci_dev *pdev,
}
/* Map the firmware hub into my address space. */
- window->virt = ioremap_nocache(window->phys, window->size);
+ window->virt = ioremap(window->phys, window->size);
if (!window->virt) {
printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
window->phys, window->size);
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
index 69503aef981e..d67b845b0e89 100644
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -133,7 +133,7 @@ static int vr_nor_init_maps(struct vr_nor_mtd *p)
if (win_len < (CS0_START + CS0_SIZE))
return -ENXIO;
- p->csr_base = ioremap_nocache(csr_phys, csr_len);
+ p->csr_base = ioremap(csr_phys, csr_len);
if (!p->csr_base)
return -ENOMEM;
@@ -152,7 +152,7 @@ static int vr_nor_init_maps(struct vr_nor_mtd *p)
p->map.bankwidth = (exp_timing_cs0 & TIMING_BYTE_EN) ? 1 : 2;
p->map.phys = win_phys + CS0_START;
p->map.size = CS0_SIZE;
- p->map.virt = ioremap_nocache(p->map.phys, p->map.size);
+ p->map.virt = ioremap(p->map.phys, p->map.size);
if (!p->map.virt) {
err = -ENOMEM;
goto release;
diff --git a/drivers/mtd/maps/l440gx.c b/drivers/mtd/maps/l440gx.c
index 0eeadfeb620d..832b880d1aaf 100644
--- a/drivers/mtd/maps/l440gx.c
+++ b/drivers/mtd/maps/l440gx.c
@@ -78,7 +78,7 @@ static int __init init_l440gx(void)
return -ENODEV;
}
- l440gx_map.virt = ioremap_nocache(WINDOW_ADDR, WINDOW_SIZE);
+ l440gx_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE);
if (!l440gx_map.virt) {
printk(KERN_WARNING "Failed to ioremap L440GX flash region\n");
diff --git a/drivers/mtd/maps/netsc520.c b/drivers/mtd/maps/netsc520.c
index abc52b70bb00..0bb651624f05 100644
--- a/drivers/mtd/maps/netsc520.c
+++ b/drivers/mtd/maps/netsc520.c
@@ -82,10 +82,10 @@ static int __init init_netsc520(void)
printk(KERN_NOTICE "NetSc520 flash device: 0x%Lx at 0x%Lx\n",
(unsigned long long)netsc520_map.size,
(unsigned long long)netsc520_map.phys);
- netsc520_map.virt = ioremap_nocache(netsc520_map.phys, netsc520_map.size);
+ netsc520_map.virt = ioremap(netsc520_map.phys, netsc520_map.size);
if (!netsc520_map.virt) {
- printk("Failed to ioremap_nocache\n");
+ printk("Failed to ioremap\n");
return -EIO;
}
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c
index 50046d497398..7d349874ffeb 100644
--- a/drivers/mtd/maps/nettel.c
+++ b/drivers/mtd/maps/nettel.c
@@ -176,7 +176,7 @@ static int __init nettel_init(void)
#endif
int rc = 0;
- nettel_mmcrp = (void *) ioremap_nocache(0xfffef000, 4096);
+ nettel_mmcrp = (void *) ioremap(0xfffef000, 4096);
if (nettel_mmcrp == NULL) {
printk("SNAPGEAR: failed to disable MMCR cache??\n");
return(-EIO);
@@ -217,7 +217,7 @@ static int __init nettel_init(void)
__asm__ ("wbinvd");
nettel_amd_map.phys = amdaddr;
- nettel_amd_map.virt = ioremap_nocache(amdaddr, maxsize);
+ nettel_amd_map.virt = ioremap(amdaddr, maxsize);
if (!nettel_amd_map.virt) {
printk("SNAPGEAR: failed to ioremap() BOOTCS\n");
iounmap(nettel_mmcrp);
@@ -303,7 +303,7 @@ static int __init nettel_init(void)
/* Probe for the size of the first Intel flash */
nettel_intel_map.size = maxsize;
nettel_intel_map.phys = intel0addr;
- nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize);
+ nettel_intel_map.virt = ioremap(intel0addr, maxsize);
if (!nettel_intel_map.virt) {
printk("SNAPGEAR: failed to ioremap() ROMCS1\n");
rc = -EIO;
@@ -337,7 +337,7 @@ static int __init nettel_init(void)
iounmap(nettel_intel_map.virt);
nettel_intel_map.size = maxsize;
- nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize);
+ nettel_intel_map.virt = ioremap(intel0addr, maxsize);
if (!nettel_intel_map.virt) {
printk("SNAPGEAR: failed to ioremap() ROMCS1/2\n");
rc = -EIO;
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index 9a49f8a06fb8..377ef0fc4e3e 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -94,7 +94,7 @@ intel_iq80310_init(struct pci_dev *dev, struct map_pci_info *map)
map->map.write = mtd_pci_write8,
map->map.size = 0x00800000;
- map->base = ioremap_nocache(pci_resource_start(dev, 0),
+ map->base = ioremap(pci_resource_start(dev, 0),
pci_resource_len(dev, 0));
if (!map->base)
@@ -188,7 +188,7 @@ intel_dc21285_init(struct pci_dev *dev, struct map_pci_info *map)
map->map.read = mtd_pci_read32,
map->map.write = mtd_pci_write32,
map->map.size = len;
- map->base = ioremap_nocache(base, len);
+ map->base = ioremap(base, len);
if (!map->base)
return -ENOMEM;
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index 70bb403f69f7..2ac79e1cedd9 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -294,16 +294,15 @@ static void pcmcia_copy_to(struct map_info *map, unsigned long to, const void *f
}
-static DEFINE_SPINLOCK(pcmcia_vpp_lock);
+static DEFINE_MUTEX(pcmcia_vpp_lock);
static int pcmcia_vpp_refcnt;
static void pcmciamtd_set_vpp(struct map_info *map, int on)
{
struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
struct pcmcia_device *link = dev->p_dev;
- unsigned long flags;
pr_debug("dev = %p on = %d vpp = %d\n\n", dev, on, dev->vpp);
- spin_lock_irqsave(&pcmcia_vpp_lock, flags);
+ mutex_lock(&pcmcia_vpp_lock);
if (on) {
if (++pcmcia_vpp_refcnt == 1) /* first nested 'on' */
pcmcia_fixup_vpp(link, dev->vpp);
@@ -311,7 +310,7 @@ static void pcmciamtd_set_vpp(struct map_info *map, int on)
if (--pcmcia_vpp_refcnt == 0) /* last nested 'off' */
pcmcia_fixup_vpp(link, 0);
}
- spin_unlock_irqrestore(&pcmcia_vpp_lock, flags);
+ mutex_unlock(&pcmcia_vpp_lock);
}
diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
index a9f7964e2edb..8f7f966fa9a7 100644
--- a/drivers/mtd/maps/physmap-core.c
+++ b/drivers/mtd/maps/physmap-core.c
@@ -38,6 +38,7 @@
#include <linux/mtd/cfi_endian.h>
#include <linux/io.h>
#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
#include <linux/gpio/consumer.h>
#include "physmap-gemini.h"
@@ -64,16 +65,16 @@ static int physmap_flash_remove(struct platform_device *dev)
{
struct physmap_flash_info *info;
struct physmap_flash_data *physmap_data;
- int i, err;
+ int i, err = 0;
info = platform_get_drvdata(dev);
if (!info)
- return 0;
+ goto out;
if (info->cmtd) {
err = mtd_device_unregister(info->cmtd);
if (err)
- return err;
+ goto out;
if (info->cmtd != info->mtds[0])
mtd_concat_destroy(info->cmtd);
@@ -88,7 +89,10 @@ static int physmap_flash_remove(struct platform_device *dev)
if (physmap_data && physmap_data->exit)
physmap_data->exit(dev);
- return 0;
+out:
+ pm_runtime_put(&dev->dev);
+ pm_runtime_disable(&dev->dev);
+ return err;
}
static void physmap_set_vpp(struct map_info *map, int state)
@@ -484,13 +488,19 @@ static int physmap_flash_probe(struct platform_device *dev)
return -EINVAL;
}
+ pm_runtime_enable(&dev->dev);
+ pm_runtime_get_sync(&dev->dev);
+
if (dev->dev.of_node)
err = physmap_flash_of_init(dev);
else
err = physmap_flash_pdata_init(dev);
- if (err)
+ if (err) {
+ pm_runtime_put(&dev->dev);
+ pm_runtime_disable(&dev->dev);
return err;
+ }
for (i = 0; i < info->nmaps; i++) {
struct resource *res;
diff --git a/drivers/mtd/maps/sc520cdp.c b/drivers/mtd/maps/sc520cdp.c
index 03af2df90d47..9902b37e18b4 100644
--- a/drivers/mtd/maps/sc520cdp.c
+++ b/drivers/mtd/maps/sc520cdp.c
@@ -174,8 +174,8 @@ static void sc520cdp_setup_par(void)
int i, j;
/* map in SC520's MMCR area */
- mmcr = ioremap_nocache(SC520_MMCR_BASE, SC520_MMCR_EXTENT);
- if(!mmcr) { /* ioremap_nocache failed: skip the PAR reprogramming */
+ mmcr = ioremap(SC520_MMCR_BASE, SC520_MMCR_EXTENT);
+ if(!mmcr) { /* ioremap failed: skip the PAR reprogramming */
/* force physical address fields to BIOS defaults: */
for(i = 0; i < NUM_FLASH_BANKS; i++)
sc520cdp_map[i].phys = par_table[i].default_address;
@@ -225,10 +225,10 @@ static int __init init_sc520cdp(void)
(unsigned long long)sc520cdp_map[i].size,
(unsigned long long)sc520cdp_map[i].phys);
- sc520cdp_map[i].virt = ioremap_nocache(sc520cdp_map[i].phys, sc520cdp_map[i].size);
+ sc520cdp_map[i].virt = ioremap(sc520cdp_map[i].phys, sc520cdp_map[i].size);
if (!sc520cdp_map[i].virt) {
- printk("Failed to ioremap_nocache\n");
+ printk("Failed to ioremap\n");
for (j = 0; j < i; j++) {
if (mymtd[j]) {
map_destroy(mymtd[j]);
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index 2afb253bf456..57303f904bc1 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -152,7 +152,7 @@ static int scb2_flash_probe(struct pci_dev *dev,
}
/* remap the IO window (w/o caching) */
- scb2_ioaddr = ioremap_nocache(SCB2_ADDR, SCB2_WINDOW);
+ scb2_ioaddr = ioremap(SCB2_ADDR, SCB2_WINDOW);
if (!scb2_ioaddr) {
printk(KERN_ERR MODNAME ": Failed to ioremap window!\n");
if (!region_fail)
diff --git a/drivers/mtd/maps/ts5500_flash.c b/drivers/mtd/maps/ts5500_flash.c
index 6cfc8783c0e5..70d6e865f555 100644
--- a/drivers/mtd/maps/ts5500_flash.c
+++ b/drivers/mtd/maps/ts5500_flash.c
@@ -56,10 +56,10 @@ static int __init init_ts5500_map(void)
{
int rc = 0;
- ts5500_map.virt = ioremap_nocache(ts5500_map.phys, ts5500_map.size);
+ ts5500_map.virt = ioremap(ts5500_map.phys, ts5500_map.size);
if (!ts5500_map.virt) {
- printk(KERN_ERR "Failed to ioremap_nocache\n");
+ printk(KERN_ERR "Failed to ioremap\n");
rc = -EIO;
goto err2;
}
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 170a7221b35f..1d6c9e7e7b7d 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -841,10 +841,7 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
return &concat->mtd;
}
-/*
- * This function destroys an MTD object obtained from concat_mtd_devs()
- */
-
+/* Cleans the context obtained from mtd_concat_create() */
void mtd_concat_destroy(struct mtd_info *mtd)
{
struct mtd_concat *concat = CONCAT(mtd);
diff --git a/drivers/mtd/nand/onenand/Kconfig b/drivers/mtd/nand/onenand/Kconfig
index ae0b8fe5b990..572b8fe69abb 100644
--- a/drivers/mtd/nand/onenand/Kconfig
+++ b/drivers/mtd/nand/onenand/Kconfig
@@ -25,7 +25,7 @@ config MTD_ONENAND_GENERIC
config MTD_ONENAND_OMAP2
tristate "OneNAND on OMAP2/OMAP3 support"
- depends on ARCH_OMAP2 || ARCH_OMAP3
+ depends on ARCH_OMAP2 || ARCH_OMAP3 || (COMPILE_TEST && ARM)
depends on OF || COMPILE_TEST
help
Support for a OneNAND flash device connected to an OMAP2/OMAP3 SoC
@@ -33,12 +33,12 @@ config MTD_ONENAND_OMAP2
Enable dmaengine and gpiolib for better performance.
config MTD_ONENAND_SAMSUNG
- tristate "OneNAND on Samsung SOC controller support"
- depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS4
- help
- Support for a OneNAND flash device connected to an Samsung SOC.
- S3C64XX uses command mapping method.
- S5PC110/S5PC210 use generic OneNAND method.
+ tristate "OneNAND on Samsung SOC controller support"
+ depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS4 || COMPILE_TEST
+ help
+ Support for a OneNAND flash device connected to an Samsung SOC.
+ S3C64XX uses command mapping method.
+ S5PC110/S5PC210 use generic OneNAND method.
config MTD_ONENAND_OTP
bool "OneNAND OTP Support"
diff --git a/drivers/mtd/nand/onenand/Makefile b/drivers/mtd/nand/onenand/Makefile
index a27b635eb23a..a0761c7e0288 100644
--- a/drivers/mtd/nand/onenand/Makefile
+++ b/drivers/mtd/nand/onenand/Makefile
@@ -8,7 +8,7 @@ obj-$(CONFIG_MTD_ONENAND) += onenand.o
# Board specific.
obj-$(CONFIG_MTD_ONENAND_GENERIC) += generic.o
-obj-$(CONFIG_MTD_ONENAND_OMAP2) += omap2.o
-obj-$(CONFIG_MTD_ONENAND_SAMSUNG) += samsung_mtd.o
+obj-$(CONFIG_MTD_ONENAND_OMAP2) += onenand_omap2.o
+obj-$(CONFIG_MTD_ONENAND_SAMSUNG) += onenand_samsung.o
onenand-objs = onenand_base.o onenand_bbt.o
diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c
index 77bd32a683e1..d5326d19b136 100644
--- a/drivers/mtd/nand/onenand/onenand_base.c
+++ b/drivers/mtd/nand/onenand/onenand_base.c
@@ -1248,44 +1248,44 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
stats = mtd->ecc_stats;
- /* Read-while-load method */
+ /* Read-while-load method */
- /* Do first load to bufferRAM */
- if (read < len) {
- if (!onenand_check_bufferram(mtd, from)) {
+ /* Do first load to bufferRAM */
+ if (read < len) {
+ if (!onenand_check_bufferram(mtd, from)) {
this->command(mtd, ONENAND_CMD_READ, from, writesize);
- ret = this->wait(mtd, FL_READING);
- onenand_update_bufferram(mtd, from, !ret);
+ ret = this->wait(mtd, FL_READING);
+ onenand_update_bufferram(mtd, from, !ret);
if (mtd_is_eccerr(ret))
ret = 0;
- }
- }
+ }
+ }
thislen = min_t(int, writesize, len - read);
column = from & (writesize - 1);
if (column + thislen > writesize)
thislen = writesize - column;
- while (!ret) {
- /* If there is more to load then start next load */
- from += thislen;
- if (read + thislen < len) {
+ while (!ret) {
+ /* If there is more to load then start next load */
+ from += thislen;
+ if (read + thislen < len) {
this->command(mtd, ONENAND_CMD_READ, from, writesize);
- /*
- * Chip boundary handling in DDP
- * Now we issued chip 1 read and pointed chip 1
+ /*
+ * Chip boundary handling in DDP
+ * Now we issued chip 1 read and pointed chip 1
* bufferram so we have to point chip 0 bufferram.
- */
- if (ONENAND_IS_DDP(this) &&
- unlikely(from == (this->chipsize >> 1))) {
- this->write_word(ONENAND_DDP_CHIP0, this->base + ONENAND_REG_START_ADDRESS2);
- boundary = 1;
- } else
- boundary = 0;
- ONENAND_SET_PREV_BUFFERRAM(this);
- }
- /* While load is going, read from last bufferRAM */
- this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen);
+ */
+ if (ONENAND_IS_DDP(this) &&
+ unlikely(from == (this->chipsize >> 1))) {
+ this->write_word(ONENAND_DDP_CHIP0, this->base + ONENAND_REG_START_ADDRESS2);
+ boundary = 1;
+ } else
+ boundary = 0;
+ ONENAND_SET_PREV_BUFFERRAM(this);
+ }
+ /* While load is going, read from last bufferRAM */
+ this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen);
/* Read oob area if needed */
if (oobbuf) {
@@ -1301,24 +1301,24 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
oobcolumn = 0;
}
- /* See if we are done */
- read += thislen;
- if (read == len)
- break;
- /* Set up for next read from bufferRAM */
- if (unlikely(boundary))
- this->write_word(ONENAND_DDP_CHIP1, this->base + ONENAND_REG_START_ADDRESS2);
- ONENAND_SET_NEXT_BUFFERRAM(this);
- buf += thislen;
+ /* See if we are done */
+ read += thislen;
+ if (read == len)
+ break;
+ /* Set up for next read from bufferRAM */
+ if (unlikely(boundary))
+ this->write_word(ONENAND_DDP_CHIP1, this->base + ONENAND_REG_START_ADDRESS2);
+ ONENAND_SET_NEXT_BUFFERRAM(this);
+ buf += thislen;
thislen = min_t(int, writesize, len - read);
- column = 0;
- cond_resched();
- /* Now wait for load */
- ret = this->wait(mtd, FL_READING);
- onenand_update_bufferram(mtd, from, !ret);
+ column = 0;
+ cond_resched();
+ /* Now wait for load */
+ ret = this->wait(mtd, FL_READING);
+ onenand_update_bufferram(mtd, from, !ret);
if (mtd_is_eccerr(ret))
ret = 0;
- }
+ }
/*
* Return success, if no ECC failures, else -EBADMSG
@@ -2853,7 +2853,7 @@ static int onenand_otp_write_oob_nolock(struct mtd_info *mtd, loff_t to,
/* Exit OTP access mode */
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
- this->wait(mtd, FL_RESETING);
+ this->wait(mtd, FL_RESETTING);
status = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
status &= 0x60;
@@ -2924,7 +2924,7 @@ static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len,
/* Exit OTP access mode */
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
- this->wait(mtd, FL_RESETING);
+ this->wait(mtd, FL_RESETTING);
return ret;
}
@@ -2968,7 +2968,7 @@ static int do_otp_write(struct mtd_info *mtd, loff_t to, size_t len,
/* Exit OTP access mode */
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
- this->wait(mtd, FL_RESETING);
+ this->wait(mtd, FL_RESETTING);
return ret;
}
@@ -3008,7 +3008,7 @@ static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len,
/* Exit OTP access mode */
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
- this->wait(mtd, FL_RESETING);
+ this->wait(mtd, FL_RESETTING);
} else {
ops.mode = MTD_OPS_PLACE_OOB;
ops.ooblen = len;
@@ -3413,7 +3413,7 @@ static int flexonenand_get_boundary(struct mtd_info *mtd)
this->boundary[die] = bdry & FLEXONENAND_PI_MASK;
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
- this->wait(mtd, FL_RESETING);
+ this->wait(mtd, FL_RESETTING);
printk(KERN_INFO "Die %d boundary: %d%s\n", die,
this->boundary[die], locked ? "(Locked)" : "(Unlocked)");
@@ -3635,7 +3635,7 @@ static int flexonenand_set_boundary(struct mtd_info *mtd, int die,
ret = this->wait(mtd, FL_WRITING);
out:
this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_REG_COMMAND);
- this->wait(mtd, FL_RESETING);
+ this->wait(mtd, FL_RESETTING);
if (!ret)
/* Recalculate device size on boundary change*/
flexonenand_get_size(mtd);
@@ -3671,7 +3671,7 @@ static int onenand_chip_probe(struct mtd_info *mtd)
/* Reset OneNAND to read default register values */
this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_BOOTRAM);
/* Wait reset */
- this->wait(mtd, FL_RESETING);
+ this->wait(mtd, FL_RESETTING);
/* Restore system configuration 1 */
this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1);
diff --git a/drivers/mtd/nand/onenand/omap2.c b/drivers/mtd/nand/onenand/onenand_omap2.c
index edf94ee54ec7..aa9368bf7a0c 100644
--- a/drivers/mtd/nand/onenand/omap2.c
+++ b/drivers/mtd/nand/onenand/onenand_omap2.c
@@ -148,13 +148,13 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
unsigned long timeout;
u32 syscfg;
- if (state == FL_RESETING || state == FL_PREPARING_ERASE ||
+ if (state == FL_RESETTING || state == FL_PREPARING_ERASE ||
state == FL_VERIFYING_ERASE) {
int i = 21;
unsigned int intr_flags = ONENAND_INT_MASTER;
switch (state) {
- case FL_RESETING:
+ case FL_RESETTING:
intr_flags |= ONENAND_INT_RESET;
break;
case FL_PREPARING_ERASE:
@@ -328,7 +328,8 @@ static inline int omap2_onenand_dma_transfer(struct omap2_onenand *c,
struct dma_async_tx_descriptor *tx;
dma_cookie_t cookie;
- tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count, 0);
+ tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count,
+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
if (!tx) {
dev_err(&c->pdev->dev, "Failed to prepare DMA memcpy\n");
return -EIO;
@@ -375,7 +376,7 @@ static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
* context fallback to PIO mode.
*/
if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
- count < 384 || in_interrupt() || oops_in_progress )
+ count < 384 || in_interrupt() || oops_in_progress)
goto out_copy;
xtra = count & 3;
@@ -422,7 +423,7 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
* context fallback to PIO mode.
*/
if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
- count < 384 || in_interrupt() || oops_in_progress )
+ count < 384 || in_interrupt() || oops_in_progress)
goto out_copy;
dma_src = dma_map_single(dev, buf, count, DMA_TO_DEVICE);
@@ -528,7 +529,8 @@ static int omap2_onenand_probe(struct platform_device *pdev)
c->gpmc_cs, c->phys_base, c->onenand.base,
c->dma_chan ? "DMA" : "PIO");
- if ((r = onenand_scan(&c->mtd, 1)) < 0)
+ r = onenand_scan(&c->mtd, 1);
+ if (r < 0)
goto err_release_dma;
freq = omap2_onenand_get_freq(c->onenand.version_id);
diff --git a/drivers/mtd/nand/onenand/samsung_mtd.c b/drivers/mtd/nand/onenand/onenand_samsung.c
index 55e5536a5850..87b28e397d67 100644
--- a/drivers/mtd/nand/onenand/samsung_mtd.c
+++ b/drivers/mtd/nand/onenand/onenand_samsung.c
@@ -248,7 +248,7 @@ static unsigned short s3c_onenand_readw(void __iomem *addr)
}
/* BootRAM access control */
- if ((unsigned int) addr < ONENAND_DATARAM && onenand->bootram_command) {
+ if ((unsigned long)addr < ONENAND_DATARAM && onenand->bootram_command) {
if (word_addr == 0)
return s3c_read_reg(MANUFACT_ID_OFFSET);
if (word_addr == 1)
@@ -289,7 +289,7 @@ static void s3c_onenand_writew(unsigned short value, void __iomem *addr)
}
/* BootRAM access control */
- if ((unsigned int)addr < ONENAND_DATARAM) {
+ if ((unsigned long)addr < ONENAND_DATARAM) {
if (value == ONENAND_CMD_READID) {
onenand->bootram_command = 1;
return;
@@ -658,7 +658,7 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
dma_dst = dma_map_single(dev, buf, count, DMA_FROM_DEVICE);
}
if (dma_mapping_error(dev, dma_dst)) {
- dev_err(dev, "Couldn't map a %d byte buffer for DMA\n", count);
+ dev_err(dev, "Couldn't map a %zu byte buffer for DMA\n", count);
goto normal;
}
err = s5pc110_dma_ops(dma_dst, dma_src,
@@ -675,12 +675,12 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
normal:
if (count != mtd->writesize) {
/* Copy the bufferram to memory to prevent unaligned access */
- memcpy(this->page_buf, p, mtd->writesize);
- p = this->page_buf + offset;
+ memcpy_fromio(this->page_buf, p, mtd->writesize);
+ memcpy(buffer, this->page_buf + offset, count);
+ } else {
+ memcpy_fromio(buffer, p, count);
}
- memcpy(buffer, p, count);
-
return 0;
}
@@ -728,13 +728,12 @@ static void s3c_onenand_check_lock_status(struct mtd_info *mtd)
struct onenand_chip *this = mtd->priv;
struct device *dev = &onenand->pdev->dev;
unsigned int block, end;
- int tmp;
end = this->chipsize >> this->erase_shift;
for (block = 0; block < end; block++) {
unsigned int mem_addr = onenand->mem_addr(block, 0, 0);
- tmp = s3c_read_cmd(CMD_MAP_01(onenand, mem_addr));
+ s3c_read_cmd(CMD_MAP_01(onenand, mem_addr));
if (s3c_read_reg(INT_ERR_STAT_OFFSET) & LOCKED_BLK) {
dev_err(dev, "block %d is write-protected!\n", block);
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index 74fb91adeb46..a80a46bb5b8b 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -452,7 +452,7 @@ config MTD_NAND_PLATFORM
config MTD_NAND_CADENCE
tristate "Support Cadence NAND (HPNFC) controller"
- depends on OF || COMPILE_TEST
+ depends on (OF || COMPILE_TEST) && HAS_IOMEM
help
Enable the driver for NAND flash on platforms using a Cadence NAND
controller.
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index 8d6be90a6fe8..3ba17a98df4d 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -1578,9 +1578,8 @@ static struct atmel_nand *atmel_nand_create(struct atmel_nand_controller *nc,
nand->numcs = numcs;
- gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev, "det", 0,
- &np->fwnode, GPIOD_IN,
- "nand-det");
+ gpio = devm_fwnode_gpiod_get(nc->dev, of_fwnode_handle(np),
+ "det", GPIOD_IN, "nand-det");
if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
dev_err(nc->dev,
"Failed to get detect gpio (err = %ld)\n",
@@ -1624,9 +1623,10 @@ static struct atmel_nand *atmel_nand_create(struct atmel_nand_controller *nc,
nand->cs[i].rb.type = ATMEL_NAND_NATIVE_RB;
nand->cs[i].rb.id = val;
} else {
- gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev,
- "rb", i, &np->fwnode,
- GPIOD_IN, "nand-rb");
+ gpio = devm_fwnode_gpiod_get_index(nc->dev,
+ of_fwnode_handle(np),
+ "rb", i, GPIOD_IN,
+ "nand-rb");
if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
dev_err(nc->dev,
"Failed to get R/B gpio (err = %ld)\n",
@@ -1640,10 +1640,10 @@ static struct atmel_nand *atmel_nand_create(struct atmel_nand_controller *nc,
}
}
- gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev, "cs",
- i, &np->fwnode,
- GPIOD_OUT_HIGH,
- "nand-cs");
+ gpio = devm_fwnode_gpiod_get_index(nc->dev,
+ of_fwnode_handle(np),
+ "cs", i, GPIOD_OUT_HIGH,
+ "nand-cs");
if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
dev_err(nc->dev,
"Failed to get CS gpio (err = %ld)\n",
diff --git a/drivers/mtd/nand/raw/au1550nd.c b/drivers/mtd/nand/raw/au1550nd.c
index e10b76089048..75eb3e97fae3 100644
--- a/drivers/mtd/nand/raw/au1550nd.c
+++ b/drivers/mtd/nand/raw/au1550nd.c
@@ -404,7 +404,7 @@ static int au1550nd_probe(struct platform_device *pdev)
goto out1;
}
- ctx->base = ioremap_nocache(r->start, 0x1000);
+ ctx->base = ioremap(r->start, 0x1000);
if (!ctx->base) {
dev_err(&pdev->dev, "cannot remap NAND memory area\n");
ret = -ENODEV;
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index 1a66b1cd51c0..44518dada75b 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -2635,6 +2635,16 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
/* initialize the dma version */
brcmnand_flash_dma_revision_init(ctrl);
+ ret = -EIO;
+ if (ctrl->nand_version >= 0x0700)
+ ret = dma_set_mask_and_coherent(&pdev->dev,
+ DMA_BIT_MASK(40));
+ if (ret)
+ ret = dma_set_mask_and_coherent(&pdev->dev,
+ DMA_BIT_MASK(32));
+ if (ret)
+ goto err;
+
/* linked-list and stop on error */
flash_dma_writel(ctrl, FLASH_DMA_MODE, FLASH_DMA_MODE_MASK);
flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c
index 3a36285a8d8a..f6c7102a1e32 100644
--- a/drivers/mtd/nand/raw/cadence-nand-controller.c
+++ b/drivers/mtd/nand/raw/cadence-nand-controller.c
@@ -914,8 +914,8 @@ static void cadence_nand_get_caps(struct cdns_nand_ctrl *cdns_ctrl)
/* Prepare CDMA descriptor. */
static void
cadence_nand_cdma_desc_prepare(struct cdns_nand_ctrl *cdns_ctrl,
- char nf_mem, u32 flash_ptr, char *mem_ptr,
- char *ctrl_data_ptr, u16 ctype)
+ char nf_mem, u32 flash_ptr, dma_addr_t mem_ptr,
+ dma_addr_t ctrl_data_ptr, u16 ctype)
{
struct cadence_nand_cdma_desc *cdma_desc = cdns_ctrl->cdma_desc;
@@ -931,13 +931,13 @@ cadence_nand_cdma_desc_prepare(struct cdns_nand_ctrl *cdns_ctrl,
cdma_desc->command_flags |= CDMA_CF_DMA_MASTER;
cdma_desc->command_flags |= CDMA_CF_INT;
- cdma_desc->memory_pointer = (uintptr_t)mem_ptr;
+ cdma_desc->memory_pointer = mem_ptr;
cdma_desc->status = 0;
cdma_desc->sync_flag_pointer = 0;
cdma_desc->sync_arguments = 0;
cdma_desc->command_type = ctype;
- cdma_desc->ctrl_data_ptr = (uintptr_t)ctrl_data_ptr;
+ cdma_desc->ctrl_data_ptr = ctrl_data_ptr;
}
static u8 cadence_nand_check_desc_error(struct cdns_nand_ctrl *cdns_ctrl,
@@ -1280,8 +1280,7 @@ cadence_nand_cdma_transfer(struct cdns_nand_ctrl *cdns_ctrl, u8 chip_nr,
}
cadence_nand_cdma_desc_prepare(cdns_ctrl, chip_nr, page,
- (void *)dma_buf, (void *)dma_ctrl_dat,
- ctype);
+ dma_buf, dma_ctrl_dat, ctype);
status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
@@ -1360,7 +1359,7 @@ static int cadence_nand_erase(struct nand_chip *chip, u32 page)
cadence_nand_cdma_desc_prepare(cdns_ctrl,
cdns_chip->cs[chip->cur_cs],
- page, NULL, NULL,
+ page, 0, 0,
CDMA_CT_ERASE);
status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
if (status) {
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
index 3102ddbd8abd..fafd0a0aa8e2 100644
--- a/drivers/mtd/nand/raw/denali.c
+++ b/drivers/mtd/nand/raw/denali.c
@@ -21,7 +21,6 @@
#include "denali.h"
#define DENALI_NAND_NAME "denali-nand"
-#define DENALI_DEFAULT_OOB_SKIP_BYTES 8
/* for Indexed Addressing */
#define DENALI_INDEXED_CTRL 0x00
@@ -1302,15 +1301,16 @@ int denali_init(struct denali_controller *denali)
/*
* Set how many bytes should be skipped before writing data in OOB.
- * If a non-zero value has already been set (by firmware or something),
- * just use it. Otherwise, set the driver's default.
+ * If a platform requests a non-zero value, set it to the register.
+ * Otherwise, read the value out, expecting it has already been set up
+ * by firmware.
*/
- denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES);
- if (!denali->oob_skip_bytes) {
- denali->oob_skip_bytes = DENALI_DEFAULT_OOB_SKIP_BYTES;
+ if (denali->oob_skip_bytes)
iowrite32(denali->oob_skip_bytes,
denali->reg + SPARE_AREA_SKIP_BYTES);
- }
+ else
+ denali->oob_skip_bytes = ioread32(denali->reg +
+ SPARE_AREA_SKIP_BYTES);
iowrite32(0, denali->reg + TRANSFER_SPARE_REG);
iowrite32(GENMASK(denali->nbanks - 1, 0), denali->reg + RB_PIN_ENABLED);
diff --git a/drivers/mtd/nand/raw/denali_dt.c b/drivers/mtd/nand/raw/denali_dt.c
index 8b779a899dcf..f08740ae282b 100644
--- a/drivers/mtd/nand/raw/denali_dt.c
+++ b/drivers/mtd/nand/raw/denali_dt.c
@@ -6,6 +6,7 @@
*/
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/ioport.h>
@@ -14,6 +15,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#include "denali.h"
@@ -22,11 +24,14 @@ struct denali_dt {
struct clk *clk; /* core clock */
struct clk *clk_x; /* bus interface clock */
struct clk *clk_ecc; /* ECC circuit clock */
+ struct reset_control *rst; /* core reset */
+ struct reset_control *rst_reg; /* register reset */
};
struct denali_dt_data {
unsigned int revision;
unsigned int caps;
+ unsigned int oob_skip_bytes;
const struct nand_ecc_caps *ecc_caps;
};
@@ -34,6 +39,7 @@ NAND_ECC_CAPS_SINGLE(denali_socfpga_ecc_caps, denali_calc_ecc_bytes,
512, 8, 15);
static const struct denali_dt_data denali_socfpga_data = {
.caps = DENALI_CAP_HW_ECC_FIXUP,
+ .oob_skip_bytes = 2,
.ecc_caps = &denali_socfpga_ecc_caps,
};
@@ -42,6 +48,7 @@ NAND_ECC_CAPS_SINGLE(denali_uniphier_v5a_ecc_caps, denali_calc_ecc_bytes,
static const struct denali_dt_data denali_uniphier_v5a_data = {
.caps = DENALI_CAP_HW_ECC_FIXUP |
DENALI_CAP_DMA_64BIT,
+ .oob_skip_bytes = 8,
.ecc_caps = &denali_uniphier_v5a_ecc_caps,
};
@@ -51,6 +58,7 @@ static const struct denali_dt_data denali_uniphier_v5b_data = {
.revision = 0x0501,
.caps = DENALI_CAP_HW_ECC_FIXUP |
DENALI_CAP_DMA_64BIT,
+ .oob_skip_bytes = 8,
.ecc_caps = &denali_uniphier_v5b_ecc_caps,
};
@@ -118,11 +126,13 @@ static int denali_dt_probe(struct platform_device *pdev)
denali = &dt->controller;
data = of_device_get_match_data(dev);
- if (data) {
- denali->revision = data->revision;
- denali->caps = data->caps;
- denali->ecc_caps = data->ecc_caps;
- }
+ if (WARN_ON(!data))
+ return -EINVAL;
+
+ denali->revision = data->revision;
+ denali->caps = data->caps;
+ denali->oob_skip_bytes = data->oob_skip_bytes;
+ denali->ecc_caps = data->ecc_caps;
denali->dev = dev;
denali->irq = platform_get_irq(pdev, 0);
@@ -151,6 +161,14 @@ static int denali_dt_probe(struct platform_device *pdev)
if (IS_ERR(dt->clk_ecc))
return PTR_ERR(dt->clk_ecc);
+ dt->rst = devm_reset_control_get_optional_shared(dev, "nand");
+ if (IS_ERR(dt->rst))
+ return PTR_ERR(dt->rst);
+
+ dt->rst_reg = devm_reset_control_get_optional_shared(dev, "reg");
+ if (IS_ERR(dt->rst_reg))
+ return PTR_ERR(dt->rst_reg);
+
ret = clk_prepare_enable(dt->clk);
if (ret)
return ret;
@@ -166,10 +184,30 @@ static int denali_dt_probe(struct platform_device *pdev)
denali->clk_rate = clk_get_rate(dt->clk);
denali->clk_x_rate = clk_get_rate(dt->clk_x);
- ret = denali_init(denali);
+ /*
+ * Deassert the register reset, and the core reset in this order.
+ * Deasserting the core reset while the register reset is asserted
+ * will cause unpredictable behavior in the controller.
+ */
+ ret = reset_control_deassert(dt->rst_reg);
if (ret)
goto out_disable_clk_ecc;
+ ret = reset_control_deassert(dt->rst);
+ if (ret)
+ goto out_assert_rst_reg;
+
+ /*
+ * When the reset is deasserted, the initialization sequence is kicked
+ * (bootstrap process). The driver must wait until it finished.
+ * Otherwise, it will result in unpredictable behavior.
+ */
+ usleep_range(200, 1000);
+
+ ret = denali_init(denali);
+ if (ret)
+ goto out_assert_rst;
+
for_each_child_of_node(dev->of_node, np) {
ret = denali_dt_chip_init(denali, np);
if (ret) {
@@ -184,6 +222,10 @@ static int denali_dt_probe(struct platform_device *pdev)
out_remove_denali:
denali_remove(denali);
+out_assert_rst:
+ reset_control_assert(dt->rst);
+out_assert_rst_reg:
+ reset_control_assert(dt->rst_reg);
out_disable_clk_ecc:
clk_disable_unprepare(dt->clk_ecc);
out_disable_clk_x:
@@ -199,6 +241,8 @@ static int denali_dt_remove(struct platform_device *pdev)
struct denali_dt *dt = platform_get_drvdata(pdev);
denali_remove(&dt->controller);
+ reset_control_assert(dt->rst);
+ reset_control_assert(dt->rst_reg);
clk_disable_unprepare(dt->clk_ecc);
clk_disable_unprepare(dt->clk_x);
clk_disable_unprepare(dt->clk);
diff --git a/drivers/mtd/nand/raw/denali_pci.c b/drivers/mtd/nand/raw/denali_pci.c
index d62aa5271753..2f77ee55e1bf 100644
--- a/drivers/mtd/nand/raw/denali_pci.c
+++ b/drivers/mtd/nand/raw/denali_pci.c
@@ -74,15 +74,15 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
return ret;
}
- denali->reg = ioremap_nocache(csr_base, csr_len);
+ denali->reg = ioremap(csr_base, csr_len);
if (!denali->reg) {
dev_err(&dev->dev, "Spectra: Unable to remap memory region\n");
return -ENOMEM;
}
- denali->host = ioremap_nocache(mem_base, mem_len);
+ denali->host = ioremap(mem_base, mem_len);
if (!denali->host) {
- dev_err(&dev->dev, "Spectra: ioremap_nocache failed!");
+ dev_err(&dev->dev, "Spectra: ioremap failed!");
ret = -ENOMEM;
goto out_unmap_reg;
}
diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c
index 1054cc070747..f31fae3a4c68 100644
--- a/drivers/mtd/nand/raw/fsl_upm.c
+++ b/drivers/mtd/nand/raw/fsl_upm.c
@@ -285,7 +285,7 @@ static int fun_probe(struct platform_device *ofdev)
fun->wait_flags = FSL_UPM_WAIT_RUN_PATTERN |
FSL_UPM_WAIT_WRITE_BYTE;
- fun->io_base = devm_ioremap_nocache(&ofdev->dev, io_res.start,
+ fun->io_base = devm_ioremap(&ofdev->dev, io_res.start,
resource_size(&io_res));
if (!fun->io_base) {
ret = -ENOMEM;
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index 334fe3130285..b9d5d55a5edb 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -148,6 +148,10 @@ static int gpmi_init(struct gpmi_nand_data *this)
struct resources *r = &this->resources;
int ret;
+ ret = pm_runtime_get_sync(this->dev);
+ if (ret < 0)
+ return ret;
+
ret = gpmi_reset_block(r->gpmi_regs, false);
if (ret)
goto err_out;
@@ -179,8 +183,9 @@ static int gpmi_init(struct gpmi_nand_data *this)
*/
writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET);
- return 0;
err_out:
+ pm_runtime_mark_last_busy(this->dev);
+ pm_runtime_put_autosuspend(this->dev);
return ret;
}
@@ -2722,6 +2727,10 @@ static int gpmi_pm_resume(struct device *dev)
return ret;
}
+ /* Set flag to get timing setup restored for next exec_op */
+ if (this->hw.clk_rate)
+ this->hw.must_apply_timings = true;
+
/* re-init the BCH registers */
ret = bch_set_geometry(this);
if (ret) {
diff --git a/drivers/mtd/nand/raw/mpc5121_nfc.c b/drivers/mtd/nand/raw/mpc5121_nfc.c
index 8b90def6686f..a2fcb739e5f8 100644
--- a/drivers/mtd/nand/raw/mpc5121_nfc.c
+++ b/drivers/mtd/nand/raw/mpc5121_nfc.c
@@ -438,7 +438,7 @@ static void mpc5121_nfc_copy_spare(struct mtd_info *mtd, uint offset,
buffer += blksize;
offset += blksize;
size -= blksize;
- };
+ }
}
/* Copy data from/to NFC main and spare buffers */
diff --git a/drivers/mtd/nand/raw/nand_macronix.c b/drivers/mtd/nand/raw/nand_macronix.c
index 58511aeb0c9a..3ff7ce00cbdb 100644
--- a/drivers/mtd/nand/raw/nand_macronix.c
+++ b/drivers/mtd/nand/raw/nand_macronix.c
@@ -59,7 +59,7 @@ static void macronix_nand_onfi_init(struct nand_chip *chip)
*/
static void macronix_nand_fix_broken_get_timings(struct nand_chip *chip)
{
- unsigned int i;
+ int i;
static const char * const broken_get_timings[] = {
"MX30LF1G18AC",
"MX30LF1G28AC",
@@ -80,12 +80,9 @@ static void macronix_nand_fix_broken_get_timings(struct nand_chip *chip)
if (!chip->parameters.supports_set_get_features)
return;
- for (i = 0; i < ARRAY_SIZE(broken_get_timings); i++) {
- if (!strcmp(broken_get_timings[i], chip->parameters.model))
- break;
- }
-
- if (i == ARRAY_SIZE(broken_get_timings))
+ i = match_string(broken_get_timings, ARRAY_SIZE(broken_get_timings),
+ chip->parameters.model);
+ if (i < 0)
return;
bitmap_clear(chip->parameters.get_feature_list,
diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
index 9e63800f768a..3ba73f18841f 100644
--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
@@ -37,6 +37,7 @@
/* Max ECC buffer length */
#define FMC2_MAX_ECC_BUF_LEN (FMC2_BCHDSRS_LEN * FMC2_MAX_SG)
+#define FMC2_TIMEOUT_US 1000
#define FMC2_TIMEOUT_MS 1000
/* Timings */
@@ -53,6 +54,8 @@
#define FMC2_PMEM 0x88
#define FMC2_PATT 0x8c
#define FMC2_HECCR 0x94
+#define FMC2_ISR 0x184
+#define FMC2_ICR 0x188
#define FMC2_CSQCR 0x200
#define FMC2_CSQCFGR1 0x204
#define FMC2_CSQCFGR2 0x208
@@ -118,6 +121,12 @@
#define FMC2_PATT_ATTHIZ(x) (((x) & 0xff) << 24)
#define FMC2_PATT_DEFAULT 0x0a0a0a0a
+/* Register: FMC2_ISR */
+#define FMC2_ISR_IHLF BIT(1)
+
+/* Register: FMC2_ICR */
+#define FMC2_ICR_CIHLF BIT(1)
+
/* Register: FMC2_CSQCR */
#define FMC2_CSQCR_CSQSTART BIT(0)
@@ -1322,6 +1331,31 @@ static void stm32_fmc2_write_data(struct nand_chip *chip, const void *buf,
stm32_fmc2_set_buswidth_16(fmc2, true);
}
+static int stm32_fmc2_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
+{
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ const struct nand_sdr_timings *timings;
+ u32 isr, sr;
+
+ /* Check if there is no pending requests to the NAND flash */
+ if (readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_SR, sr,
+ sr & FMC2_SR_NWRF, 1,
+ FMC2_TIMEOUT_US))
+ dev_warn(fmc2->dev, "Waitrdy timeout\n");
+
+ /* Wait tWB before R/B# signal is low */
+ timings = nand_get_sdr_timings(&chip->data_interface);
+ ndelay(PSEC_TO_NSEC(timings->tWB_max));
+
+ /* R/B# signal is low, clear high level flag */
+ writel_relaxed(FMC2_ICR_CIHLF, fmc2->io_base + FMC2_ICR);
+
+ /* Wait R/B# signal is high */
+ return readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_ISR,
+ isr, isr & FMC2_ISR_IHLF,
+ 5, 1000 * timeout_ms);
+}
+
static int stm32_fmc2_exec_op(struct nand_chip *chip,
const struct nand_operation *op,
bool check_only)
@@ -1366,8 +1400,8 @@ static int stm32_fmc2_exec_op(struct nand_chip *chip,
break;
case NAND_OP_WAITRDY_INSTR:
- ret = nand_soft_waitrdy(chip,
- instr->ctx.waitrdy.timeout_ms);
+ ret = stm32_fmc2_waitrdy(chip,
+ instr->ctx.waitrdy.timeout_ms);
break;
}
}
diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c
index 1cb3760ff779..0db5ee4e82af 100644
--- a/drivers/mtd/nand/spi/toshiba.c
+++ b/drivers/mtd/nand/spi/toshiba.c
@@ -124,6 +124,16 @@ static const struct spinand_info toshiba_spinand_table[] = {
0,
SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
tc58cxgxsx_ecc_get_status)),
+ /* 3.3V 4Gb */
+ SPINAND_INFO("TC58CVG2S0", 0xED,
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
+ tc58cxgxsx_ecc_get_status)),
/* 1.8V 1Gb */
SPINAND_INFO("TC58CYG0S3", 0xB2,
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
diff --git a/drivers/mtd/parsers/sharpslpart.c b/drivers/mtd/parsers/sharpslpart.c
index e5ea6127ab5a..671a61845bd5 100644
--- a/drivers/mtd/parsers/sharpslpart.c
+++ b/drivers/mtd/parsers/sharpslpart.c
@@ -165,10 +165,10 @@ static int sharpsl_nand_get_logical_num(u8 *oob)
static int sharpsl_nand_init_ftl(struct mtd_info *mtd, struct sharpsl_ftl *ftl)
{
- unsigned int block_num, log_num, phymax;
+ unsigned int block_num, phymax;
+ int i, ret, log_num;
loff_t block_adr;
u8 *oob;
- int i, ret;
oob = kzalloc(mtd->oobsize, GFP_KERNEL);
if (!oob)
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 4744bf94ad9a..b9f272408c4d 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -247,7 +247,8 @@ static int sm_read_sector(struct sm_ftl *ftl,
/* FTL can contain -1 entries that are by default filled with bits */
if (block == -1) {
- memset(buffer, 0xFF, SM_SECTOR_SIZE);
+ if (buffer)
+ memset(buffer, 0xFF, SM_SECTOR_SIZE);
return 0;
}
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
index f237fcdf7f86..c1eda67d1ad2 100644
--- a/drivers/mtd/spi-nor/Kconfig
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -46,11 +46,11 @@ config SPI_CADENCE_QUADSPI
Flash as an MTD device.
config SPI_HISI_SFC
- tristate "Hisilicon SPI-NOR Flash Controller(SFC)"
+ tristate "Hisilicon FMC SPI-NOR Flash Controller(SFC)"
depends on ARCH_HISI || COMPILE_TEST
depends on HAS_IOMEM
help
- This enables support for hisilicon SPI-NOR flash controller.
+ This enables support for HiSilicon FMC SPI-NOR flash controller.
config SPI_MTK_QUADSPI
tristate "MediaTek Quad SPI controller"
diff --git a/drivers/mtd/spi-nor/aspeed-smc.c b/drivers/mtd/spi-nor/aspeed-smc.c
index 2b7cabbb680c..395127349aa8 100644
--- a/drivers/mtd/spi-nor/aspeed-smc.c
+++ b/drivers/mtd/spi-nor/aspeed-smc.c
@@ -305,7 +305,7 @@ static void aspeed_smc_stop_user(struct spi_nor *nor)
writel(ctl, chip->ctl); /* default to fread or read mode */
}
-static int aspeed_smc_prep(struct spi_nor *nor, enum spi_nor_ops ops)
+static int aspeed_smc_prep(struct spi_nor *nor)
{
struct aspeed_smc_chip *chip = nor->priv;
@@ -313,7 +313,7 @@ static int aspeed_smc_prep(struct spi_nor *nor, enum spi_nor_ops ops)
return 0;
}
-static void aspeed_smc_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
+static void aspeed_smc_unprep(struct spi_nor *nor)
{
struct aspeed_smc_chip *chip = nor->priv;
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index 06f997247d0f..494dcab4aaaa 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -1062,7 +1062,7 @@ static int cqspi_erase(struct spi_nor *nor, loff_t offs)
return 0;
}
-static int cqspi_prep(struct spi_nor *nor, enum spi_nor_ops ops)
+static int cqspi_prep(struct spi_nor *nor)
{
struct cqspi_flash_pdata *f_pdata = nor->priv;
struct cqspi_st *cqspi = f_pdata->cqspi;
@@ -1072,7 +1072,7 @@ static int cqspi_prep(struct spi_nor *nor, enum spi_nor_ops ops)
return 0;
}
-static void cqspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
+static void cqspi_unprep(struct spi_nor *nor)
{
struct cqspi_flash_pdata *f_pdata = nor->priv;
struct cqspi_st *cqspi = f_pdata->cqspi;
diff --git a/drivers/mtd/spi-nor/hisi-sfc.c b/drivers/mtd/spi-nor/hisi-sfc.c
index a1258216f89d..6c7a4118752e 100644
--- a/drivers/mtd/spi-nor/hisi-sfc.c
+++ b/drivers/mtd/spi-nor/hisi-sfc.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * HiSilicon SPI Nor Flash Controller Driver
+ * HiSilicon FMC SPI-NOR flash controller driver
*
* Copyright (c) 2015-2016 HiSilicon Technologies Co., Ltd.
*/
@@ -144,7 +144,7 @@ static void hisi_spi_nor_init(struct hifmc_host *host)
writel(reg, host->regbase + FMC_SPI_TIMING_CFG);
}
-static int hisi_spi_nor_prep(struct spi_nor *nor, enum spi_nor_ops ops)
+static int hisi_spi_nor_prep(struct spi_nor *nor)
{
struct hifmc_priv *priv = nor->priv;
struct hifmc_host *host = priv->host;
@@ -167,7 +167,7 @@ out:
return ret;
}
-static void hisi_spi_nor_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
+static void hisi_spi_nor_unprep(struct spi_nor *nor)
{
struct hifmc_priv *priv = nor->priv;
struct hifmc_host *host = priv->host;
diff --git a/drivers/mtd/spi-nor/intel-spi-pci.c b/drivers/mtd/spi-nor/intel-spi-pci.c
index 3d8987baea2a..81329f680bec 100644
--- a/drivers/mtd/spi-nor/intel-spi-pci.c
+++ b/drivers/mtd/spi-nor/intel-spi-pci.c
@@ -70,10 +70,12 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x4b24), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x4da4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0xa324), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0xa3a4), (unsigned long)&bxt_info },
{ },
};
MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids);
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index f4afe123e9dc..4fc632ec18fe 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -85,7 +85,7 @@ struct sfdp_header {
#define BFPT_DWORD(i) ((i) - 1)
#define BFPT_DWORD_MAX 16
-/* The first version of JESB216 defined only 9 DWORDs. */
+/* The first version of JESD216 defined only 9 DWORDs. */
#define BFPT_DWORD_MAX_JESD216 9
/* 1st DWORD. */
@@ -196,7 +196,7 @@ struct flash_info {
u16 page_size;
u16 addr_width;
- u16 flags;
+ u32 flags;
#define SECT_4K BIT(0) /* SPINOR_OP_BE_4K works uniformly */
#define SPI_NOR_NO_ERASE BIT(1) /* No erase command needed */
#define SST_WRITE BIT(2) /* use SST byte programming */
@@ -233,6 +233,11 @@ struct flash_info {
#define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */
#define USE_CLSR BIT(14) /* use CLSR command */
#define SPI_NOR_OCTAL_READ BIT(15) /* Flash supports Octal Read */
+#define SPI_NOR_TB_SR_BIT6 BIT(16) /*
+ * Top/Bottom (TB) is bit 6 of
+ * status register. Must be used with
+ * SPI_NOR_HAS_TB.
+ */
/* Part specific fixup hooks. */
const struct spi_nor_fixups *fixups;
@@ -1307,14 +1312,14 @@ static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
}
}
-static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
+static int spi_nor_lock_and_prep(struct spi_nor *nor)
{
int ret = 0;
mutex_lock(&nor->lock);
if (nor->controller_ops && nor->controller_ops->prepare) {
- ret = nor->controller_ops->prepare(nor, ops);
+ ret = nor->controller_ops->prepare(nor);
if (ret) {
mutex_unlock(&nor->lock);
return ret;
@@ -1323,10 +1328,10 @@ static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
return ret;
}
-static void spi_nor_unlock_and_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
+static void spi_nor_unlock_and_unprep(struct spi_nor *nor)
{
if (nor->controller_ops && nor->controller_ops->unprepare)
- nor->controller_ops->unprepare(nor, ops);
+ nor->controller_ops->unprepare(nor);
mutex_unlock(&nor->lock);
}
@@ -1688,7 +1693,7 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
addr = instr->addr;
len = instr->len;
- ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_ERASE);
+ ret = spi_nor_lock_and_prep(nor);
if (ret)
return ret;
@@ -1751,7 +1756,7 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
ret = spi_nor_write_disable(nor);
erase_err:
- spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
+ spi_nor_unlock_and_unprep(nor);
return ret;
}
@@ -1761,9 +1766,13 @@ static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
{
struct mtd_info *mtd = &nor->mtd;
u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
+ u8 tb_mask = SR_TB_BIT5;
int shift = ffs(mask) - 1;
int pow;
+ if (nor->flags & SNOR_F_HAS_SR_TB_BIT6)
+ tb_mask = SR_TB_BIT6;
+
if (!(sr & mask)) {
/* No protection */
*ofs = 0;
@@ -1771,7 +1780,7 @@ static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
} else {
pow = ((sr & mask) ^ mask) >> shift;
*len = mtd->size >> pow;
- if (nor->flags & SNOR_F_HAS_SR_TB && sr & SR_TB)
+ if (nor->flags & SNOR_F_HAS_SR_TB && sr & tb_mask)
*ofs = 0;
else
*ofs = mtd->size - *len;
@@ -1850,6 +1859,7 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
struct mtd_info *mtd = &nor->mtd;
int ret, status_old, status_new;
u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
+ u8 tb_mask = SR_TB_BIT5;
u8 shift = ffs(mask) - 1, pow, val;
loff_t lock_len;
bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
@@ -1886,6 +1896,9 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
else
lock_len = ofs + len;
+ if (nor->flags & SNOR_F_HAS_SR_TB_BIT6)
+ tb_mask = SR_TB_BIT6;
+
/*
* Need smallest pow such that:
*
@@ -1903,13 +1916,13 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
if (!(val & mask))
return -EINVAL;
- status_new = (status_old & ~mask & ~SR_TB) | val;
+ status_new = (status_old & ~mask & ~tb_mask) | val;
/* Disallow further writes if WP pin is asserted */
status_new |= SR_SRWD;
if (!use_top)
- status_new |= SR_TB;
+ status_new |= tb_mask;
/* Don't bother if they're the same */
if (status_new == status_old)
@@ -1932,6 +1945,7 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
struct mtd_info *mtd = &nor->mtd;
int ret, status_old, status_new;
u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
+ u8 tb_mask = SR_TB_BIT5;
u8 shift = ffs(mask) - 1, pow, val;
loff_t lock_len;
bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
@@ -1968,6 +1982,8 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
else
lock_len = ofs;
+ if (nor->flags & SNOR_F_HAS_SR_TB_BIT6)
+ tb_mask = SR_TB_BIT6;
/*
* Need largest pow such that:
*
@@ -1987,14 +2003,14 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
return -EINVAL;
}
- status_new = (status_old & ~mask & ~SR_TB) | val;
+ status_new = (status_old & ~mask & ~tb_mask) | val;
/* Don't protect status register if we're fully unlocked */
if (lock_len == 0)
status_new &= ~SR_SRWD;
if (!use_top)
- status_new |= SR_TB;
+ status_new |= tb_mask;
/* Don't bother if they're the same */
if (status_new == status_old)
@@ -2036,13 +2052,13 @@ static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
struct spi_nor *nor = mtd_to_spi_nor(mtd);
int ret;
- ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_LOCK);
+ ret = spi_nor_lock_and_prep(nor);
if (ret)
return ret;
ret = nor->params.locking_ops->lock(nor, ofs, len);
- spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK);
+ spi_nor_unlock_and_unprep(nor);
return ret;
}
@@ -2051,13 +2067,13 @@ static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
struct spi_nor *nor = mtd_to_spi_nor(mtd);
int ret;
- ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
+ ret = spi_nor_lock_and_prep(nor);
if (ret)
return ret;
ret = nor->params.locking_ops->unlock(nor, ofs, len);
- spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
+ spi_nor_unlock_and_unprep(nor);
return ret;
}
@@ -2066,13 +2082,13 @@ static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
struct spi_nor *nor = mtd_to_spi_nor(mtd);
int ret;
- ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
+ ret = spi_nor_lock_and_prep(nor);
if (ret)
return ret;
ret = nor->params.locking_ops->is_locked(nor, ofs, len);
- spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
+ spi_nor_unlock_and_unprep(nor);
return ret;
}
@@ -2124,6 +2140,8 @@ static int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor)
if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)
return 0;
+ nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
+
return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]);
}
@@ -2307,6 +2325,9 @@ static const struct flash_info spi_nor_ids[] = {
{ "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
{ "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
+ { "at25sl321", INFO(0x1f4216, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+
{ "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
{ "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
{ "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
@@ -2372,6 +2393,11 @@ static const struct flash_info spi_nor_ids[] = {
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
},
{
+ "gd25lq128d", INFO(0xc86018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
"gd25q128", INFO(0xc84018, 0, 64 * 1024, 256,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
@@ -2379,7 +2405,8 @@ static const struct flash_info spi_nor_ids[] = {
{
"gd25q256", INFO(0xc84019, 0, 64 * 1024, 512,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
+ SPI_NOR_TB_SR_BIT6)
.fixups = &gd25q256_fixups,
},
@@ -2434,6 +2461,8 @@ static const struct flash_info spi_nor_ids[] = {
{ "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
{ "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
+ { "mx25r3235f", INFO(0xc22816, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "mx25u12835f", INFO(0xc22538, 0, 64 * 1024, 256,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512,
@@ -2454,20 +2483,35 @@ static const struct flash_info spi_nor_ids[] = {
{ "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
{ "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K |
+ USE_FSR | SPI_NOR_QUAD_READ) },
+ { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K |
+ USE_FSR | SPI_NOR_QUAD_READ) },
+ { "mt25ql256a", INFO6(0x20ba19, 0x104400, 64 * 1024, 512,
+ SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K |
+ USE_FSR | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "mt25qu256a", INFO6(0x20bb19, 0x104400, 64 * 1024, 512,
+ SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512, SECT_4K |
+ USE_FSR | SPI_NOR_QUAD_READ) },
+ { "mt25ql512a", INFO6(0x20ba20, 0x104400, 64 * 1024, 1024,
+ SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
+ { "mt25qu512a", INFO6(0x20bb20, 0x104400, 64 * 1024, 1024,
+ SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K |
+ USE_FSR | SPI_NOR_QUAD_READ) },
{ "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
{ "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
{ "mt25ql02g", INFO(0x20ba22, 0, 64 * 1024, 4096,
SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
NO_CHIP_ERASE) },
- { "mt25qu512a (n25q512a)", INFO(0x20bb20, 0, 64 * 1024, 1024,
- SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ |
- SPI_NOR_4B_OPCODES) },
{ "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
/* Micron */
@@ -2538,6 +2582,8 @@ static const struct flash_info spi_nor_ids[] = {
{ "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
{ "sst26wf016b", INFO(0xbf2651, 0, 64 * 1024, 32, SECT_4K |
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "sst26vf016b", INFO(0xbf2641, 0, 64 * 1024, 32, SECT_4K |
+ SPI_NOR_DUAL_READ) },
{ "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
/* ST Microelectronics -- newer production may have feature updates */
@@ -2608,6 +2654,11 @@ static const struct flash_info spi_nor_ids[] = {
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
},
+ {
+ "w25q32jwm", INFO(0xef8016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
{
@@ -2628,7 +2679,9 @@ static const struct flash_info spi_nor_ids[] = {
{ "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
- { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES) },
{ "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "w25q256jw", INFO(0xef6019, 0, 64 * 1024, 512,
@@ -2699,7 +2752,7 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
- ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_READ);
+ ret = spi_nor_lock_and_prep(nor);
if (ret)
return ret;
@@ -2726,7 +2779,7 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
ret = 0;
read_err:
- spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ);
+ spi_nor_unlock_and_unprep(nor);
return ret;
}
@@ -2739,7 +2792,7 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
- ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
+ ret = spi_nor_lock_and_prep(nor);
if (ret)
return ret;
@@ -2812,7 +2865,7 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
}
out:
*retlen += actual;
- spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
+ spi_nor_unlock_and_unprep(nor);
return ret;
}
@@ -2830,7 +2883,7 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
- ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
+ ret = spi_nor_lock_and_prep(nor);
if (ret)
return ret;
@@ -2876,7 +2929,7 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
}
write_err:
- spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
+ spi_nor_unlock_and_unprep(nor);
return ret;
}
@@ -4596,6 +4649,7 @@ static void sst_set_default_init(struct spi_nor *nor)
static void st_micron_set_default_init(struct spi_nor *nor)
{
nor->flags |= SNOR_F_HAS_LOCK;
+ nor->flags &= ~SNOR_F_HAS_16BIT_SR;
nor->params.quad_enable = NULL;
nor->params.set_4byte = st_micron_set_4byte;
}
@@ -4768,9 +4822,7 @@ static void spi_nor_info_init_params(struct spi_nor *nor)
static void spansion_post_sfdp_fixups(struct spi_nor *nor)
{
- struct mtd_info *mtd = &nor->mtd;
-
- if (mtd->size <= SZ_16M)
+ if (nor->params.size <= SZ_16M)
return;
nor->flags |= SNOR_F_4B_OPCODES;
@@ -5142,8 +5194,12 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
if (info->flags & USE_FSR)
nor->flags |= SNOR_F_USE_FSR;
- if (info->flags & SPI_NOR_HAS_TB)
+ if (info->flags & SPI_NOR_HAS_TB) {
nor->flags |= SNOR_F_HAS_SR_TB;
+ if (info->flags & SPI_NOR_TB_SR_BIT6)
+ nor->flags |= SNOR_F_HAS_SR_TB_BIT6;
+ }
+
if (info->flags & NO_CHIP_ERASE)
nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
if (info->flags & USE_CLSR)
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index 10b2459f8951..ea7440ac913b 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -1640,7 +1640,7 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
out_wl:
ubi_wl_close(ubi);
out_vtbl:
- ubi_free_internal_volumes(ubi);
+ ubi_free_all_volumes(ubi);
vfree(ubi->vtbl);
out_ai:
destroy_ai(ai);
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index d636bbe214cb..2f93c25bbaee 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -503,21 +503,42 @@ static void uif_close(struct ubi_device *ubi)
}
/**
- * ubi_free_internal_volumes - free internal volumes.
+ * ubi_free_volumes_from - free volumes from specific index.
* @ubi: UBI device description object
+ * @from: the start index used for volume free.
*/
-void ubi_free_internal_volumes(struct ubi_device *ubi)
+static void ubi_free_volumes_from(struct ubi_device *ubi, int from)
{
int i;
- for (i = ubi->vtbl_slots;
- i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
+ for (i = from; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
+ if (!ubi->volumes[i])
+ continue;
ubi_eba_replace_table(ubi->volumes[i], NULL);
ubi_fastmap_destroy_checkmap(ubi->volumes[i]);
kfree(ubi->volumes[i]);
+ ubi->volumes[i] = NULL;
}
}
+/**
+ * ubi_free_all_volumes - free all volumes.
+ * @ubi: UBI device description object
+ */
+void ubi_free_all_volumes(struct ubi_device *ubi)
+{
+ ubi_free_volumes_from(ubi, 0);
+}
+
+/**
+ * ubi_free_internal_volumes - free internal volumes.
+ * @ubi: UBI device description object
+ */
+void ubi_free_internal_volumes(struct ubi_device *ubi)
+{
+ ubi_free_volumes_from(ubi, ubi->vtbl_slots);
+}
+
static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
{
int limit, device_pebs;
@@ -1013,7 +1034,7 @@ out_uif:
out_detach:
ubi_devices[ubi_num] = NULL;
ubi_wl_close(ubi);
- ubi_free_internal_volumes(ubi);
+ ubi_free_all_volumes(ubi);
vfree(ubi->vtbl);
out_free:
vfree(ubi->peb_buf);
@@ -1159,7 +1180,7 @@ static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
* MTD device name.
*/
mtd = get_mtd_device_nm(mtd_dev);
- if (IS_ERR(mtd) && PTR_ERR(mtd) == -ENODEV)
+ if (PTR_ERR(mtd) == -ENODEV)
/* Probably this is an MTD character device node path */
mtd = open_mtd_by_chdev(mtd_dev);
} else
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 1c7be4eb3ba6..53f448e7433a 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -64,7 +64,7 @@ static int self_check_seen(struct ubi_device *ubi, unsigned long *seen)
return 0;
for (pnum = 0; pnum < ubi->peb_count; pnum++) {
- if (test_bit(pnum, seen) && ubi->lookuptbl[pnum]) {
+ if (!test_bit(pnum, seen) && ubi->lookuptbl[pnum]) {
ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
ret = -EINVAL;
}
@@ -1137,7 +1137,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
struct rb_node *tmp_rb;
int ret, i, j, free_peb_count, used_peb_count, vol_count;
int scrub_peb_count, erase_peb_count;
- unsigned long *seen_pebs = NULL;
+ unsigned long *seen_pebs;
fm_raw = ubi->fm_buf;
memset(ubi->fm_buf, 0, ubi->fm_size);
@@ -1151,7 +1151,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
dvbuf = new_fm_vbuf(ubi, UBI_FM_DATA_VOLUME_ID);
if (!dvbuf) {
ret = -ENOMEM;
- goto out_kfree;
+ goto out_free_avbuf;
}
avhdr = ubi_get_vid_hdr(avbuf);
@@ -1160,7 +1160,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
seen_pebs = init_seen(ubi);
if (IS_ERR(seen_pebs)) {
ret = PTR_ERR(seen_pebs);
- goto out_kfree;
+ goto out_free_dvbuf;
}
spin_lock(&ubi->volumes_lock);
@@ -1328,7 +1328,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avbuf);
if (ret) {
ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
- goto out_kfree;
+ goto out_free_seen;
}
for (i = 0; i < new_fm->used_blocks; i++) {
@@ -1350,7 +1350,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
if (ret) {
ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
new_fm->e[i]->pnum);
- goto out_kfree;
+ goto out_free_seen;
}
}
@@ -1360,7 +1360,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
if (ret) {
ubi_err(ubi, "unable to write fastmap to PEB %i!",
new_fm->e[i]->pnum);
- goto out_kfree;
+ goto out_free_seen;
}
}
@@ -1370,10 +1370,13 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
ret = self_check_seen(ubi, seen_pebs);
dbg_bld("fastmap written!");
-out_kfree:
- ubi_free_vid_buf(avbuf);
- ubi_free_vid_buf(dvbuf);
+out_free_seen:
free_seen(seen_pebs);
+out_free_dvbuf:
+ ubi_free_vid_buf(dvbuf);
+out_free_avbuf:
+ ubi_free_vid_buf(avbuf);
+
out:
return ret;
}
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 9688b411c930..73c67e5c08f8 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -950,6 +950,7 @@ int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol,
int ubi_notify_all(struct ubi_device *ubi, int ntype,
struct notifier_block *nb);
int ubi_enumerate_volumes(struct notifier_block *nb);
+void ubi_free_all_volumes(struct ubi_device *ubi);
void ubi_free_internal_volumes(struct ubi_device *ubi);
/* kapi.c */
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 53d8ab54e181..f700f0e4f2ec 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -782,7 +782,7 @@ static int check_attaching_info(const struct ubi_device *ubi,
*/
int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
- int i, err;
+ int err;
struct ubi_ainf_volume *av;
empty_vtbl_record.crc = cpu_to_be32(0xf116c36b);
@@ -851,11 +851,7 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
out_free:
vfree(ubi->vtbl);
- for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
- ubi_fastmap_destroy_checkmap(ubi->volumes[i]);
- kfree(ubi->volumes[i]);
- ubi->volumes[i] = NULL;
- }
+ ubi_free_all_volumes(ubi);
return err;
}
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 5d77a38dba54..837d690a8c60 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -319,7 +319,7 @@ static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
struct rb_root *root, int diff)
{
struct rb_node *p;
- struct ubi_wl_entry *e, *prev_e = NULL;
+ struct ubi_wl_entry *e;
int max;
e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
@@ -334,7 +334,6 @@ static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
p = p->rb_left;
else {
p = p->rb_right;
- prev_e = e;
e = e1;
}
}
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index d02f12a5254e..25a8f9387d5a 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -71,6 +71,49 @@ config DUMMY
To compile this driver as a module, choose M here: the module
will be called dummy.
+config WIREGUARD
+ tristate "WireGuard secure network tunnel"
+ depends on NET && INET
+ depends on IPV6 || !IPV6
+ select NET_UDP_TUNNEL
+ select DST_CACHE
+ select CRYPTO
+ select CRYPTO_LIB_CURVE25519
+ select CRYPTO_LIB_CHACHA20POLY1305
+ select CRYPTO_LIB_BLAKE2S
+ select CRYPTO_CHACHA20_X86_64 if X86 && 64BIT
+ select CRYPTO_POLY1305_X86_64 if X86 && 64BIT
+ select CRYPTO_BLAKE2S_X86 if X86 && 64BIT
+ select CRYPTO_CURVE25519_X86 if X86 && 64BIT
+ select ARM_CRYPTO if ARM
+ select ARM64_CRYPTO if ARM64
+ select CRYPTO_CHACHA20_NEON if (ARM || ARM64) && KERNEL_MODE_NEON
+ select CRYPTO_POLY1305_NEON if ARM64 && KERNEL_MODE_NEON
+ select CRYPTO_POLY1305_ARM if ARM
+ select CRYPTO_CURVE25519_NEON if ARM && KERNEL_MODE_NEON
+ select CRYPTO_CHACHA_MIPS if CPU_MIPS32_R2
+ select CRYPTO_POLY1305_MIPS if CPU_MIPS32 || (CPU_MIPS64 && 64BIT)
+ help
+ WireGuard is a secure, fast, and easy to use replacement for IPSec
+ that uses modern cryptography and clever networking tricks. It's
+ designed to be fairly general purpose and abstract enough to fit most
+ use cases, while at the same time remaining extremely simple to
+ configure. See www.wireguard.com for more info.
+
+ It's safe to say Y or M here, as the driver is very lightweight and
+ is only in use when an administrator chooses to add an interface.
+
+config WIREGUARD_DEBUG
+ bool "Debugging checks and verbose messages"
+ depends on WIREGUARD
+ help
+ This will write log messages for handshake and other events
+ that occur for a WireGuard interface. It will also perform some
+ extra validation checks and unit tests at various points. This is
+ only useful for debugging.
+
+ Say N here unless you know what you're doing.
+
config EQUALIZER
tristate "EQL (serial line load balancing) support"
---help---
@@ -489,12 +532,12 @@ config FUJITSU_ES
This driver provides support for Extended Socket network device
on Extended Partitioning of FUJITSU PRIMEQUEST 2000 E2 series.
-config THUNDERBOLT_NET
- tristate "Networking over Thunderbolt cable"
- depends on THUNDERBOLT && INET
+config USB4_NET
+ tristate "Networking over USB4 and Thunderbolt cables"
+ depends on USB4 && INET
help
- Select this if you want to create network between two
- computers over a Thunderbolt cable. The driver supports Apple
+ Select this if you want to create network between two computers
+ over a USB4 and Thunderbolt cables. The driver supports Apple
ThunderboltIP protocol and allows communication with any host
supporting the same protocol including Windows and macOS.
@@ -506,6 +549,8 @@ source "drivers/net/hyperv/Kconfig"
config NETDEVSIM
tristate "Simulated networking device"
depends on DEBUG_FS
+ depends on INET
+ depends on IPV6 || IPV6=n
select NET_DEVLINK
help
This driver is a developer testing tool and software model that can
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 0d3ba056cda3..71b88ffc5587 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_BONDING) += bonding/
obj-$(CONFIG_IPVLAN) += ipvlan/
obj-$(CONFIG_IPVTAP) += ipvlan/
obj-$(CONFIG_DUMMY) += dummy.o
+obj-$(CONFIG_WIREGUARD) += wireguard/
obj-$(CONFIG_EQUALIZER) += eql.o
obj-$(CONFIG_IFB) += ifb.o
obj-$(CONFIG_MACSEC) += macsec.o
@@ -76,6 +77,6 @@ obj-$(CONFIG_NTB_NETDEV) += ntb_netdev.o
obj-$(CONFIG_FUJITSU_ES) += fjes/
thunderbolt-net-y += thunderbolt.o
-obj-$(CONFIG_THUNDERBOLT_NET) += thunderbolt-net.o
+obj-$(CONFIG_USB4_NET) += thunderbolt-net.o
obj-$(CONFIG_NETDEVSIM) += netdevsim/
obj-$(CONFIG_NET_FAILOVER) += net_failover.o
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index b3c63d2f16aa..18428e104445 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -189,7 +189,7 @@ static int cops_nodeid (struct net_device *dev, int nodeid);
static irqreturn_t cops_interrupt (int irq, void *dev_id);
static void cops_poll(struct timer_list *t);
-static void cops_timeout(struct net_device *dev);
+static void cops_timeout(struct net_device *dev, unsigned int txqueue);
static void cops_rx (struct net_device *dev);
static netdev_tx_t cops_send_packet (struct sk_buff *skb,
struct net_device *dev);
@@ -844,7 +844,7 @@ static void cops_rx(struct net_device *dev)
netif_rx(skb);
}
-static void cops_timeout(struct net_device *dev)
+static void cops_timeout(struct net_device *dev, unsigned int txqueue)
{
struct cops_local *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
diff --git a/drivers/net/arcnet/arcdevice.h b/drivers/net/arcnet/arcdevice.h
index b0f5bc07aef5..22a49c6d7ae6 100644
--- a/drivers/net/arcnet/arcdevice.h
+++ b/drivers/net/arcnet/arcdevice.h
@@ -356,7 +356,7 @@ int arcnet_open(struct net_device *dev);
int arcnet_close(struct net_device *dev);
netdev_tx_t arcnet_send_packet(struct sk_buff *skb,
struct net_device *dev);
-void arcnet_timeout(struct net_device *dev);
+void arcnet_timeout(struct net_device *dev, unsigned int txqueue);
/* I/O equivalents */
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index 553776cc1d29..e04efc0a5c97 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -763,7 +763,7 @@ static int go_tx(struct net_device *dev)
}
/* Called by the kernel when transmit times out */
-void arcnet_timeout(struct net_device *dev)
+void arcnet_timeout(struct net_device *dev, unsigned int txqueue)
{
unsigned long flags;
struct arcnet_local *lp = netdev_priv(dev);
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index e3b25f310936..31e43a2197a3 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -31,16 +31,6 @@
#define AD_CHURN_DETECTION_TIME 60
#define AD_AGGREGATE_WAIT_TIME 2
-/* Port state definitions (43.4.2.2 in the 802.3ad standard) */
-#define AD_STATE_LACP_ACTIVITY 0x1
-#define AD_STATE_LACP_TIMEOUT 0x2
-#define AD_STATE_AGGREGATION 0x4
-#define AD_STATE_SYNCHRONIZATION 0x8
-#define AD_STATE_COLLECTING 0x10
-#define AD_STATE_DISTRIBUTING 0x20
-#define AD_STATE_DEFAULTED 0x40
-#define AD_STATE_EXPIRED 0x80
-
/* Port Variables definitions used by the State Machines (43.4.7 in the
* 802.3ad standard)
*/
@@ -457,8 +447,8 @@ static void __choose_matched(struct lacpdu *lacpdu, struct port *port)
MAC_ADDRESS_EQUAL(&(lacpdu->partner_system), &(port->actor_system)) &&
(ntohs(lacpdu->partner_system_priority) == port->actor_system_priority) &&
(ntohs(lacpdu->partner_key) == port->actor_oper_port_key) &&
- ((lacpdu->partner_state & AD_STATE_AGGREGATION) == (port->actor_oper_port_state & AD_STATE_AGGREGATION))) ||
- ((lacpdu->actor_state & AD_STATE_AGGREGATION) == 0)
+ ((lacpdu->partner_state & LACP_STATE_AGGREGATION) == (port->actor_oper_port_state & LACP_STATE_AGGREGATION))) ||
+ ((lacpdu->actor_state & LACP_STATE_AGGREGATION) == 0)
) {
port->sm_vars |= AD_PORT_MATCHED;
} else {
@@ -492,18 +482,18 @@ static void __record_pdu(struct lacpdu *lacpdu, struct port *port)
partner->port_state = lacpdu->actor_state;
/* set actor_oper_port_state.defaulted to FALSE */
- port->actor_oper_port_state &= ~AD_STATE_DEFAULTED;
+ port->actor_oper_port_state &= ~LACP_STATE_DEFAULTED;
/* set the partner sync. to on if the partner is sync,
* and the port is matched
*/
if ((port->sm_vars & AD_PORT_MATCHED) &&
- (lacpdu->actor_state & AD_STATE_SYNCHRONIZATION)) {
- partner->port_state |= AD_STATE_SYNCHRONIZATION;
+ (lacpdu->actor_state & LACP_STATE_SYNCHRONIZATION)) {
+ partner->port_state |= LACP_STATE_SYNCHRONIZATION;
slave_dbg(port->slave->bond->dev, port->slave->dev,
"partner sync=1\n");
} else {
- partner->port_state &= ~AD_STATE_SYNCHRONIZATION;
+ partner->port_state &= ~LACP_STATE_SYNCHRONIZATION;
slave_dbg(port->slave->bond->dev, port->slave->dev,
"partner sync=0\n");
}
@@ -526,7 +516,7 @@ static void __record_default(struct port *port)
sizeof(struct port_params));
/* set actor_oper_port_state.defaulted to true */
- port->actor_oper_port_state |= AD_STATE_DEFAULTED;
+ port->actor_oper_port_state |= LACP_STATE_DEFAULTED;
}
}
@@ -556,7 +546,7 @@ static void __update_selected(struct lacpdu *lacpdu, struct port *port)
!MAC_ADDRESS_EQUAL(&lacpdu->actor_system, &partner->system) ||
ntohs(lacpdu->actor_system_priority) != partner->system_priority ||
ntohs(lacpdu->actor_key) != partner->key ||
- (lacpdu->actor_state & AD_STATE_AGGREGATION) != (partner->port_state & AD_STATE_AGGREGATION)) {
+ (lacpdu->actor_state & LACP_STATE_AGGREGATION) != (partner->port_state & LACP_STATE_AGGREGATION)) {
port->sm_vars &= ~AD_PORT_SELECTED;
}
}
@@ -588,8 +578,8 @@ static void __update_default_selected(struct port *port)
!MAC_ADDRESS_EQUAL(&admin->system, &oper->system) ||
admin->system_priority != oper->system_priority ||
admin->key != oper->key ||
- (admin->port_state & AD_STATE_AGGREGATION)
- != (oper->port_state & AD_STATE_AGGREGATION)) {
+ (admin->port_state & LACP_STATE_AGGREGATION)
+ != (oper->port_state & LACP_STATE_AGGREGATION)) {
port->sm_vars &= ~AD_PORT_SELECTED;
}
}
@@ -619,10 +609,10 @@ static void __update_ntt(struct lacpdu *lacpdu, struct port *port)
!MAC_ADDRESS_EQUAL(&(lacpdu->partner_system), &(port->actor_system)) ||
(ntohs(lacpdu->partner_system_priority) != port->actor_system_priority) ||
(ntohs(lacpdu->partner_key) != port->actor_oper_port_key) ||
- ((lacpdu->partner_state & AD_STATE_LACP_ACTIVITY) != (port->actor_oper_port_state & AD_STATE_LACP_ACTIVITY)) ||
- ((lacpdu->partner_state & AD_STATE_LACP_TIMEOUT) != (port->actor_oper_port_state & AD_STATE_LACP_TIMEOUT)) ||
- ((lacpdu->partner_state & AD_STATE_SYNCHRONIZATION) != (port->actor_oper_port_state & AD_STATE_SYNCHRONIZATION)) ||
- ((lacpdu->partner_state & AD_STATE_AGGREGATION) != (port->actor_oper_port_state & AD_STATE_AGGREGATION))
+ ((lacpdu->partner_state & LACP_STATE_LACP_ACTIVITY) != (port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY)) ||
+ ((lacpdu->partner_state & LACP_STATE_LACP_TIMEOUT) != (port->actor_oper_port_state & LACP_STATE_LACP_TIMEOUT)) ||
+ ((lacpdu->partner_state & LACP_STATE_SYNCHRONIZATION) != (port->actor_oper_port_state & LACP_STATE_SYNCHRONIZATION)) ||
+ ((lacpdu->partner_state & LACP_STATE_AGGREGATION) != (port->actor_oper_port_state & LACP_STATE_AGGREGATION))
) {
port->ntt = true;
}
@@ -978,7 +968,7 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
* edable port will take place only after this timer)
*/
if ((port->sm_vars & AD_PORT_SELECTED) &&
- (port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION) &&
+ (port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) &&
!__check_agg_selection_timer(port)) {
if (port->aggregator->is_active)
port->sm_mux_state =
@@ -996,14 +986,14 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
port->sm_mux_state = AD_MUX_DETACHED;
} else if (port->aggregator->is_active) {
port->actor_oper_port_state |=
- AD_STATE_SYNCHRONIZATION;
+ LACP_STATE_SYNCHRONIZATION;
}
break;
case AD_MUX_COLLECTING_DISTRIBUTING:
if (!(port->sm_vars & AD_PORT_SELECTED) ||
(port->sm_vars & AD_PORT_STANDBY) ||
- !(port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION) ||
- !(port->actor_oper_port_state & AD_STATE_SYNCHRONIZATION)) {
+ !(port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) ||
+ !(port->actor_oper_port_state & LACP_STATE_SYNCHRONIZATION)) {
port->sm_mux_state = AD_MUX_ATTACHED;
} else {
/* if port state hasn't changed make
@@ -1032,11 +1022,11 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
port->sm_mux_state);
switch (port->sm_mux_state) {
case AD_MUX_DETACHED:
- port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION;
+ port->actor_oper_port_state &= ~LACP_STATE_SYNCHRONIZATION;
ad_disable_collecting_distributing(port,
update_slave_arr);
- port->actor_oper_port_state &= ~AD_STATE_COLLECTING;
- port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING;
+ port->actor_oper_port_state &= ~LACP_STATE_COLLECTING;
+ port->actor_oper_port_state &= ~LACP_STATE_DISTRIBUTING;
port->ntt = true;
break;
case AD_MUX_WAITING:
@@ -1045,20 +1035,20 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
case AD_MUX_ATTACHED:
if (port->aggregator->is_active)
port->actor_oper_port_state |=
- AD_STATE_SYNCHRONIZATION;
+ LACP_STATE_SYNCHRONIZATION;
else
port->actor_oper_port_state &=
- ~AD_STATE_SYNCHRONIZATION;
- port->actor_oper_port_state &= ~AD_STATE_COLLECTING;
- port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING;
+ ~LACP_STATE_SYNCHRONIZATION;
+ port->actor_oper_port_state &= ~LACP_STATE_COLLECTING;
+ port->actor_oper_port_state &= ~LACP_STATE_DISTRIBUTING;
ad_disable_collecting_distributing(port,
update_slave_arr);
port->ntt = true;
break;
case AD_MUX_COLLECTING_DISTRIBUTING:
- port->actor_oper_port_state |= AD_STATE_COLLECTING;
- port->actor_oper_port_state |= AD_STATE_DISTRIBUTING;
- port->actor_oper_port_state |= AD_STATE_SYNCHRONIZATION;
+ port->actor_oper_port_state |= LACP_STATE_COLLECTING;
+ port->actor_oper_port_state |= LACP_STATE_DISTRIBUTING;
+ port->actor_oper_port_state |= LACP_STATE_SYNCHRONIZATION;
ad_enable_collecting_distributing(port,
update_slave_arr);
port->ntt = true;
@@ -1156,7 +1146,7 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
port->sm_vars |= AD_PORT_LACP_ENABLED;
port->sm_vars &= ~AD_PORT_SELECTED;
__record_default(port);
- port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
+ port->actor_oper_port_state &= ~LACP_STATE_EXPIRED;
port->sm_rx_state = AD_RX_PORT_DISABLED;
/* Fall Through */
@@ -1166,9 +1156,9 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
case AD_RX_LACP_DISABLED:
port->sm_vars &= ~AD_PORT_SELECTED;
__record_default(port);
- port->partner_oper.port_state &= ~AD_STATE_AGGREGATION;
+ port->partner_oper.port_state &= ~LACP_STATE_AGGREGATION;
port->sm_vars |= AD_PORT_MATCHED;
- port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
+ port->actor_oper_port_state &= ~LACP_STATE_EXPIRED;
break;
case AD_RX_EXPIRED:
/* Reset of the Synchronization flag (Standard 43.4.12)
@@ -1177,19 +1167,19 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
* case of EXPIRED even if LINK_DOWN didn't arrive for
* the port.
*/
- port->partner_oper.port_state &= ~AD_STATE_SYNCHRONIZATION;
+ port->partner_oper.port_state &= ~LACP_STATE_SYNCHRONIZATION;
port->sm_vars &= ~AD_PORT_MATCHED;
- port->partner_oper.port_state |= AD_STATE_LACP_TIMEOUT;
- port->partner_oper.port_state |= AD_STATE_LACP_ACTIVITY;
+ port->partner_oper.port_state |= LACP_STATE_LACP_TIMEOUT;
+ port->partner_oper.port_state |= LACP_STATE_LACP_ACTIVITY;
port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(AD_SHORT_TIMEOUT));
- port->actor_oper_port_state |= AD_STATE_EXPIRED;
+ port->actor_oper_port_state |= LACP_STATE_EXPIRED;
port->sm_vars |= AD_PORT_CHURNED;
break;
case AD_RX_DEFAULTED:
__update_default_selected(port);
__record_default(port);
port->sm_vars |= AD_PORT_MATCHED;
- port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
+ port->actor_oper_port_state &= ~LACP_STATE_EXPIRED;
break;
case AD_RX_CURRENT:
/* detect loopback situation */
@@ -1202,8 +1192,8 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
__update_selected(lacpdu, port);
__update_ntt(lacpdu, port);
__record_pdu(lacpdu, port);
- port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(port->actor_oper_port_state & AD_STATE_LACP_TIMEOUT));
- port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
+ port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(port->actor_oper_port_state & LACP_STATE_LACP_TIMEOUT));
+ port->actor_oper_port_state &= ~LACP_STATE_EXPIRED;
break;
default:
break;
@@ -1231,7 +1221,7 @@ static void ad_churn_machine(struct port *port)
if (port->sm_churn_actor_timer_counter &&
!(--port->sm_churn_actor_timer_counter) &&
port->sm_churn_actor_state == AD_CHURN_MONITOR) {
- if (port->actor_oper_port_state & AD_STATE_SYNCHRONIZATION) {
+ if (port->actor_oper_port_state & LACP_STATE_SYNCHRONIZATION) {
port->sm_churn_actor_state = AD_NO_CHURN;
} else {
port->churn_actor_count++;
@@ -1241,7 +1231,7 @@ static void ad_churn_machine(struct port *port)
if (port->sm_churn_partner_timer_counter &&
!(--port->sm_churn_partner_timer_counter) &&
port->sm_churn_partner_state == AD_CHURN_MONITOR) {
- if (port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION) {
+ if (port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) {
port->sm_churn_partner_state = AD_NO_CHURN;
} else {
port->churn_partner_count++;
@@ -1298,7 +1288,7 @@ static void ad_periodic_machine(struct port *port)
/* check if port was reinitialized */
if (((port->sm_vars & AD_PORT_BEGIN) || !(port->sm_vars & AD_PORT_LACP_ENABLED) || !port->is_enabled) ||
- (!(port->actor_oper_port_state & AD_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & AD_STATE_LACP_ACTIVITY))
+ (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY))
) {
port->sm_periodic_state = AD_NO_PERIODIC;
}
@@ -1315,11 +1305,11 @@ static void ad_periodic_machine(struct port *port)
switch (port->sm_periodic_state) {
case AD_FAST_PERIODIC:
if (!(port->partner_oper.port_state
- & AD_STATE_LACP_TIMEOUT))
+ & LACP_STATE_LACP_TIMEOUT))
port->sm_periodic_state = AD_SLOW_PERIODIC;
break;
case AD_SLOW_PERIODIC:
- if ((port->partner_oper.port_state & AD_STATE_LACP_TIMEOUT)) {
+ if ((port->partner_oper.port_state & LACP_STATE_LACP_TIMEOUT)) {
port->sm_periodic_timer_counter = 0;
port->sm_periodic_state = AD_PERIODIC_TX;
}
@@ -1335,7 +1325,7 @@ static void ad_periodic_machine(struct port *port)
break;
case AD_PERIODIC_TX:
if (!(port->partner_oper.port_state &
- AD_STATE_LACP_TIMEOUT))
+ LACP_STATE_LACP_TIMEOUT))
port->sm_periodic_state = AD_SLOW_PERIODIC;
else
port->sm_periodic_state = AD_FAST_PERIODIC;
@@ -1542,7 +1532,7 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr)
ad_agg_selection_logic(aggregator, update_slave_arr);
if (!port->aggregator->is_active)
- port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION;
+ port->actor_oper_port_state &= ~LACP_STATE_SYNCHRONIZATION;
}
/* Decide if "agg" is a better choice for the new active aggregator that
@@ -1848,13 +1838,13 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
port->actor_port_priority = 0xff;
port->actor_port_aggregator_identifier = 0;
port->ntt = false;
- port->actor_admin_port_state = AD_STATE_AGGREGATION |
- AD_STATE_LACP_ACTIVITY;
- port->actor_oper_port_state = AD_STATE_AGGREGATION |
- AD_STATE_LACP_ACTIVITY;
+ port->actor_admin_port_state = LACP_STATE_AGGREGATION |
+ LACP_STATE_LACP_ACTIVITY;
+ port->actor_oper_port_state = LACP_STATE_AGGREGATION |
+ LACP_STATE_LACP_ACTIVITY;
if (lacp_fast)
- port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT;
+ port->actor_oper_port_state |= LACP_STATE_LACP_TIMEOUT;
memcpy(&port->partner_admin, &tmpl, sizeof(tmpl));
memcpy(&port->partner_oper, &tmpl, sizeof(tmpl));
@@ -2105,10 +2095,10 @@ void bond_3ad_unbind_slave(struct slave *slave)
aggregator->aggregator_identifier);
/* Tell the partner that this port is not suitable for aggregation */
- port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION;
- port->actor_oper_port_state &= ~AD_STATE_COLLECTING;
- port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING;
- port->actor_oper_port_state &= ~AD_STATE_AGGREGATION;
+ port->actor_oper_port_state &= ~LACP_STATE_SYNCHRONIZATION;
+ port->actor_oper_port_state &= ~LACP_STATE_COLLECTING;
+ port->actor_oper_port_state &= ~LACP_STATE_DISTRIBUTING;
+ port->actor_oper_port_state &= ~LACP_STATE_AGGREGATION;
__update_lacpdu_from_port(port);
ad_lacpdu_send(port);
@@ -2695,9 +2685,9 @@ void bond_3ad_update_lacp_rate(struct bonding *bond)
bond_for_each_slave(bond, slave, iter) {
port = &(SLAVE_AD_INFO(slave)->port);
if (lacp_fast)
- port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT;
+ port->actor_oper_port_state |= LACP_STATE_LACP_TIMEOUT;
else
- port->actor_oper_port_state &= ~AD_STATE_LACP_TIMEOUT;
+ port->actor_oper_port_state &= ~LACP_STATE_LACP_TIMEOUT;
}
spin_unlock_bh(&bond->mode_lock);
}
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 4f2e6910c623..1cc2cd894f87 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1383,26 +1383,31 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
bool do_tx_balance = true;
u32 hash_index = 0;
const u8 *hash_start = NULL;
- struct ipv6hdr *ip6hdr;
skb_reset_mac_header(skb);
eth_data = eth_hdr(skb);
switch (ntohs(skb->protocol)) {
case ETH_P_IP: {
- const struct iphdr *iph = ip_hdr(skb);
+ const struct iphdr *iph;
if (is_broadcast_ether_addr(eth_data->h_dest) ||
- iph->daddr == ip_bcast ||
- iph->protocol == IPPROTO_IGMP) {
+ !pskb_network_may_pull(skb, sizeof(*iph))) {
+ do_tx_balance = false;
+ break;
+ }
+ iph = ip_hdr(skb);
+ if (iph->daddr == ip_bcast || iph->protocol == IPPROTO_IGMP) {
do_tx_balance = false;
break;
}
hash_start = (char *)&(iph->daddr);
hash_size = sizeof(iph->daddr);
- }
break;
- case ETH_P_IPV6:
+ }
+ case ETH_P_IPV6: {
+ const struct ipv6hdr *ip6hdr;
+
/* IPv6 doesn't really use broadcast mac address, but leave
* that here just in case.
*/
@@ -1419,7 +1424,11 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
break;
}
- /* Additianally, DAD probes should not be tx-balanced as that
+ if (!pskb_network_may_pull(skb, sizeof(*ip6hdr))) {
+ do_tx_balance = false;
+ break;
+ }
+ /* Additionally, DAD probes should not be tx-balanced as that
* will lead to false positives for duplicate addresses and
* prevent address configuration from working.
*/
@@ -1429,17 +1438,26 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
break;
}
- hash_start = (char *)&(ipv6_hdr(skb)->daddr);
- hash_size = sizeof(ipv6_hdr(skb)->daddr);
+ hash_start = (char *)&ip6hdr->daddr;
+ hash_size = sizeof(ip6hdr->daddr);
break;
- case ETH_P_IPX:
- if (ipx_hdr(skb)->ipx_checksum != IPX_NO_CHECKSUM) {
+ }
+ case ETH_P_IPX: {
+ const struct ipxhdr *ipxhdr;
+
+ if (pskb_network_may_pull(skb, sizeof(*ipxhdr))) {
+ do_tx_balance = false;
+ break;
+ }
+ ipxhdr = (struct ipxhdr *)skb_network_header(skb);
+
+ if (ipxhdr->ipx_checksum != IPX_NO_CHECKSUM) {
/* something is wrong with this packet */
do_tx_balance = false;
break;
}
- if (ipx_hdr(skb)->ipx_type != IPX_TYPE_NCP) {
+ if (ipxhdr->ipx_type != IPX_TYPE_NCP) {
/* The only protocol worth balancing in
* this family since it has an "ARP" like
* mechanism
@@ -1448,9 +1466,11 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
break;
}
+ eth_data = eth_hdr(skb);
hash_start = (char *)eth_data->h_dest;
hash_size = ETH_ALEN;
break;
+ }
case ETH_P_ARP:
do_tx_balance = false;
if (bond_info->rlb_enabled)
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index fcb7c2f7f001..48d5ec770b94 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2272,9 +2272,6 @@ static void bond_miimon_commit(struct bonding *bond)
} else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
/* make it immediately active */
bond_set_active_slave(slave);
- } else if (slave != primary) {
- /* prevent it from being the active one */
- bond_set_backup_slave(slave);
}
slave_info(bond->dev, slave->dev, "link status definitely up, %u Mbps %s duplex\n",
@@ -3702,32 +3699,35 @@ static int bond_neigh_init(struct neighbour *n)
const struct net_device_ops *slave_ops;
struct neigh_parms parms;
struct slave *slave;
- int ret;
+ int ret = 0;
- slave = bond_first_slave(bond);
+ rcu_read_lock();
+ slave = bond_first_slave_rcu(bond);
if (!slave)
- return 0;
+ goto out;
slave_ops = slave->dev->netdev_ops;
if (!slave_ops->ndo_neigh_setup)
- return 0;
-
- parms.neigh_setup = NULL;
- parms.neigh_cleanup = NULL;
- ret = slave_ops->ndo_neigh_setup(slave->dev, &parms);
- if (ret)
- return ret;
+ goto out;
- /* Assign slave's neigh_cleanup to neighbour in case cleanup is called
- * after the last slave has been detached. Assumes that all slaves
- * utilize the same neigh_cleanup (true at this writing as only user
- * is ipoib).
+ /* TODO: find another way [1] to implement this.
+ * Passing a zeroed structure is fragile,
+ * but at least we do not pass garbage.
+ *
+ * [1] One way would be that ndo_neigh_setup() never touch
+ * struct neigh_parms, but propagate the new neigh_setup()
+ * back to ___neigh_create() / neigh_parms_alloc()
*/
- n->parms->neigh_cleanup = parms.neigh_cleanup;
+ memset(&parms, 0, sizeof(parms));
+ ret = slave_ops->ndo_neigh_setup(slave->dev, &parms);
- if (!parms.neigh_setup)
- return 0;
+ if (ret)
+ goto out;
- return parms.neigh_setup(n);
+ if (parms.neigh_setup)
+ ret = parms.neigh_setup(n);
+out:
+ rcu_read_unlock();
+ return ret;
}
/* The bonding ndo_neigh_setup is called at init time beofre any
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index bd40b114d6cd..d737ceb61203 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -270,7 +270,9 @@ static int caif_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ser_device *ser;
- BUG_ON(dev == NULL);
+ if (WARN_ON(!dev))
+ return -EINVAL;
+
ser = netdev_priv(dev);
/* Send flow off once, on high water mark */
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index c8e1a04ba384..9df2007b5e56 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1302,7 +1302,7 @@ static int at91_can_probe(struct platform_device *pdev)
goto exit_put;
}
- addr = ioremap_nocache(res->start, resource_size(res));
+ addr = ioremap(res->start, resource_size(res));
if (!addr) {
err = -ENOMEM;
goto exit_release;
diff --git a/drivers/net/can/cc770/cc770_isa.c b/drivers/net/can/cc770/cc770_isa.c
index b9047d8110d5..194c86e0f340 100644
--- a/drivers/net/can/cc770/cc770_isa.c
+++ b/drivers/net/can/cc770/cc770_isa.c
@@ -175,7 +175,7 @@ static int cc770_isa_probe(struct platform_device *pdev)
err = -EBUSY;
goto exit;
}
- base = ioremap_nocache(mem[idx], iosize);
+ base = ioremap(mem[idx], iosize);
if (!base) {
err = -ENOMEM;
goto exit_release;
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index a929cdda9ab2..94d10ec954a0 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -389,6 +389,34 @@ static struct flexcan_mb __iomem *flexcan_get_mb(const struct flexcan_priv *priv
(&priv->regs->mb[bank][priv->mb_size * mb_index]);
}
+static int flexcan_low_power_enter_ack(struct flexcan_priv *priv)
+{
+ struct flexcan_regs __iomem *regs = priv->regs;
+ unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
+
+ while (timeout-- && !(priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+ udelay(10);
+
+ if (!(priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int flexcan_low_power_exit_ack(struct flexcan_priv *priv)
+{
+ struct flexcan_regs __iomem *regs = priv->regs;
+ unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
+
+ while (timeout-- && (priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+ udelay(10);
+
+ if (priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
static void flexcan_enable_wakeup_irq(struct flexcan_priv *priv, bool enable)
{
struct flexcan_regs __iomem *regs = priv->regs;
@@ -407,7 +435,6 @@ static void flexcan_enable_wakeup_irq(struct flexcan_priv *priv, bool enable)
static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv)
{
struct flexcan_regs __iomem *regs = priv->regs;
- unsigned int ackval;
u32 reg_mcr;
reg_mcr = priv->read(&regs->mcr);
@@ -418,36 +445,24 @@ static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv)
regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
1 << priv->stm.req_bit, 1 << priv->stm.req_bit);
- /* get stop acknowledgment */
- if (regmap_read_poll_timeout(priv->stm.gpr, priv->stm.ack_gpr,
- ackval, ackval & (1 << priv->stm.ack_bit),
- 0, FLEXCAN_TIMEOUT_US))
- return -ETIMEDOUT;
-
- return 0;
+ return flexcan_low_power_enter_ack(priv);
}
static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv)
{
struct flexcan_regs __iomem *regs = priv->regs;
- unsigned int ackval;
u32 reg_mcr;
/* remove stop request */
regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
1 << priv->stm.req_bit, 0);
- /* get stop acknowledgment */
- if (regmap_read_poll_timeout(priv->stm.gpr, priv->stm.ack_gpr,
- ackval, !(ackval & (1 << priv->stm.ack_bit)),
- 0, FLEXCAN_TIMEOUT_US))
- return -ETIMEDOUT;
reg_mcr = priv->read(&regs->mcr);
reg_mcr &= ~FLEXCAN_MCR_SLF_WAK;
priv->write(reg_mcr, &regs->mcr);
- return 0;
+ return flexcan_low_power_exit_ack(priv);
}
static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv)
@@ -506,39 +521,25 @@ static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv)
static int flexcan_chip_enable(struct flexcan_priv *priv)
{
struct flexcan_regs __iomem *regs = priv->regs;
- unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
u32 reg;
reg = priv->read(&regs->mcr);
reg &= ~FLEXCAN_MCR_MDIS;
priv->write(reg, &regs->mcr);
- while (timeout-- && (priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
- udelay(10);
-
- if (priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)
- return -ETIMEDOUT;
-
- return 0;
+ return flexcan_low_power_exit_ack(priv);
}
static int flexcan_chip_disable(struct flexcan_priv *priv)
{
struct flexcan_regs __iomem *regs = priv->regs;
- unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
u32 reg;
reg = priv->read(&regs->mcr);
reg |= FLEXCAN_MCR_MDIS;
priv->write(reg, &regs->mcr);
- while (timeout-- && !(priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
- udelay(10);
-
- if (!(priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
- return -ETIMEDOUT;
-
- return 0;
+ return flexcan_low_power_enter_ack(priv);
}
static int flexcan_chip_freeze(struct flexcan_priv *priv)
@@ -1722,6 +1723,9 @@ static int __maybe_unused flexcan_resume(struct device *device)
netif_start_queue(dev);
if (device_may_wakeup(device)) {
disable_irq_wake(dev->irq);
+ err = flexcan_exit_stop_mode(priv);
+ if (err)
+ return err;
} else {
err = pm_runtime_force_resume(device);
if (err)
@@ -1767,14 +1771,9 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device)
{
struct net_device *dev = dev_get_drvdata(device);
struct flexcan_priv *priv = netdev_priv(dev);
- int err;
- if (netif_running(dev) && device_may_wakeup(device)) {
+ if (netif_running(dev) && device_may_wakeup(device))
flexcan_enable_wakeup_irq(priv, false);
- err = flexcan_exit_stop_mode(priv);
- if (err)
- return err;
- }
return 0;
}
diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c
index 3db619209fe1..eacd428e07e9 100644
--- a/drivers/net/can/m_can/tcan4x5x.c
+++ b/drivers/net/can/m_can/tcan4x5x.c
@@ -101,6 +101,9 @@
#define TCAN4X5X_MODE_STANDBY BIT(6)
#define TCAN4X5X_MODE_NORMAL BIT(7)
+#define TCAN4X5X_DISABLE_WAKE_MSK (BIT(31) | BIT(30))
+#define TCAN4X5X_DISABLE_INH_MSK BIT(9)
+
#define TCAN4X5X_SW_RESET BIT(2)
#define TCAN4X5X_MCAN_CONFIGURED BIT(5)
@@ -164,6 +167,28 @@ static void tcan4x5x_check_wake(struct tcan4x5x_priv *priv)
}
}
+static int tcan4x5x_reset(struct tcan4x5x_priv *priv)
+{
+ int ret = 0;
+
+ if (priv->reset_gpio) {
+ gpiod_set_value(priv->reset_gpio, 1);
+
+ /* tpulse_width minimum 30us */
+ usleep_range(30, 100);
+ gpiod_set_value(priv->reset_gpio, 0);
+ } else {
+ ret = regmap_write(priv->regmap, TCAN4X5X_CONFIG,
+ TCAN4X5X_SW_RESET);
+ if (ret)
+ return ret;
+ }
+
+ usleep_range(700, 1000);
+
+ return ret;
+}
+
static int regmap_spi_gather_write(void *context, const void *reg,
size_t reg_len, const void *val,
size_t val_len)
@@ -338,15 +363,34 @@ static int tcan4x5x_init(struct m_can_classdev *cdev)
return ret;
}
+static int tcan4x5x_disable_wake(struct m_can_classdev *cdev)
+{
+ struct tcan4x5x_priv *tcan4x5x = cdev->device_data;
+
+ return regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG,
+ TCAN4X5X_DISABLE_WAKE_MSK, 0x00);
+}
+
+static int tcan4x5x_disable_state(struct m_can_classdev *cdev)
+{
+ struct tcan4x5x_priv *tcan4x5x = cdev->device_data;
+
+ return regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG,
+ TCAN4X5X_DISABLE_INH_MSK, 0x01);
+}
+
static int tcan4x5x_parse_config(struct m_can_classdev *cdev)
{
struct tcan4x5x_priv *tcan4x5x = cdev->device_data;
+ int ret;
tcan4x5x->device_wake_gpio = devm_gpiod_get(cdev->dev, "device-wake",
GPIOD_OUT_HIGH);
if (IS_ERR(tcan4x5x->device_wake_gpio)) {
- dev_err(cdev->dev, "device-wake gpio not defined\n");
- return -EINVAL;
+ if (PTR_ERR(tcan4x5x->device_wake_gpio) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ tcan4x5x_disable_wake(cdev);
}
tcan4x5x->reset_gpio = devm_gpiod_get_optional(cdev->dev, "reset",
@@ -354,16 +398,17 @@ static int tcan4x5x_parse_config(struct m_can_classdev *cdev)
if (IS_ERR(tcan4x5x->reset_gpio))
tcan4x5x->reset_gpio = NULL;
+ ret = tcan4x5x_reset(tcan4x5x);
+ if (ret)
+ return ret;
+
tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev,
"device-state",
GPIOD_IN);
- if (IS_ERR(tcan4x5x->device_state_gpio))
+ if (IS_ERR(tcan4x5x->device_state_gpio)) {
tcan4x5x->device_state_gpio = NULL;
-
- tcan4x5x->power = devm_regulator_get_optional(cdev->dev,
- "vsup");
- if (PTR_ERR(tcan4x5x->power) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ tcan4x5x_disable_state(cdev);
+ }
return 0;
}
@@ -398,6 +443,12 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
if (!priv)
return -ENOMEM;
+ priv->power = devm_regulator_get_optional(&spi->dev, "vsup");
+ if (PTR_ERR(priv->power) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ else
+ priv->power = NULL;
+
mcan_class->device_data = priv;
m_can_class_get_clocks(mcan_class);
@@ -428,10 +479,6 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
spi_set_drvdata(spi, priv);
- ret = tcan4x5x_parse_config(mcan_class);
- if (ret)
- goto out_clk;
-
/* Configure the SPI bus */
spi->bits_per_word = 32;
ret = spi_setup(spi);
@@ -441,7 +488,17 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
priv->regmap = devm_regmap_init(&spi->dev, &tcan4x5x_bus,
&spi->dev, &tcan4x5x_regmap);
- tcan4x5x_power_enable(priv->power, 1);
+ ret = tcan4x5x_power_enable(priv->power, 1);
+ if (ret)
+ goto out_clk;
+
+ ret = tcan4x5x_parse_config(mcan_class);
+ if (ret)
+ goto out_power;
+
+ ret = tcan4x5x_init(mcan_class);
+ if (ret)
+ goto out_power;
ret = m_can_class_register(mcan_class);
if (ret)
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 8caf7af0dee2..99101d7027a8 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -381,13 +381,12 @@ static int mscan_rx_poll(struct napi_struct *napi, int quota)
struct net_device *dev = napi->dev;
struct mscan_regs __iomem *regs = priv->reg_base;
struct net_device_stats *stats = &dev->stats;
- int npackets = 0;
- int ret = 1;
+ int work_done = 0;
struct sk_buff *skb;
struct can_frame *frame;
u8 canrflg;
- while (npackets < quota) {
+ while (work_done < quota) {
canrflg = in_8(&regs->canrflg);
if (!(canrflg & (MSCAN_RXF | MSCAN_ERR_IF)))
break;
@@ -408,18 +407,18 @@ static int mscan_rx_poll(struct napi_struct *napi, int quota)
stats->rx_packets++;
stats->rx_bytes += frame->can_dlc;
- npackets++;
+ work_done++;
netif_receive_skb(skb);
}
- if (!(in_8(&regs->canrflg) & (MSCAN_RXF | MSCAN_ERR_IF))) {
- napi_complete(&priv->napi);
- clear_bit(F_RX_PROGRESS, &priv->flags);
- if (priv->can.state < CAN_STATE_BUS_OFF)
- out_8(&regs->canrier, priv->shadow_canrier);
- ret = 0;
+ if (work_done < quota) {
+ if (likely(napi_complete_done(&priv->napi, work_done))) {
+ clear_bit(F_RX_PROGRESS, &priv->flags);
+ if (priv->can.state < CAN_STATE_BUS_OFF)
+ out_8(&regs->canrier, priv->shadow_canrier);
+ }
}
- return ret;
+ return work_done;
}
static irqreturn_t mscan_isr(int irq, void *dev_id)
diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c
index 1c4d32d1a542..d513fac50718 100644
--- a/drivers/net/can/sja1000/sja1000_isa.c
+++ b/drivers/net/can/sja1000/sja1000_isa.c
@@ -130,7 +130,7 @@ static int sja1000_isa_probe(struct platform_device *pdev)
err = -EBUSY;
goto exit;
}
- base = ioremap_nocache(mem[idx], iosize);
+ base = ioremap(mem[idx], iosize);
if (!base) {
err = -ENOMEM;
goto exit_release;
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index ff5a96f34085..d7222ba46622 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -229,7 +229,7 @@ static int sp_probe(struct platform_device *pdev)
resource_size(res_mem), DRV_NAME))
return -EBUSY;
- addr = devm_ioremap_nocache(&pdev->dev, res_mem->start,
+ addr = devm_ioremap(&pdev->dev, res_mem->start,
resource_size(res_mem));
if (!addr)
return -ENOMEM;
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 2e57122f02fb..2f5c287eac95 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -344,9 +344,16 @@ static void slcan_transmit(struct work_struct *work)
*/
static void slcan_write_wakeup(struct tty_struct *tty)
{
- struct slcan *sl = tty->disc_data;
+ struct slcan *sl;
+
+ rcu_read_lock();
+ sl = rcu_dereference(tty->disc_data);
+ if (!sl)
+ goto out;
schedule_work(&sl->tx_work);
+out:
+ rcu_read_unlock();
}
/* Send a can_frame to a TTY queue. */
@@ -644,10 +651,11 @@ static void slcan_close(struct tty_struct *tty)
return;
spin_lock_bh(&sl->lock);
- tty->disc_data = NULL;
+ rcu_assign_pointer(tty->disc_data, NULL);
sl->tty = NULL;
spin_unlock_bh(&sl->lock);
+ synchronize_rcu();
flush_work(&sl->tx_work);
/* Flush network side */
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 8242fb287cbb..d1ddf763b188 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -777,7 +777,7 @@ static int softing_pdev_probe(struct platform_device *pdev)
goto platform_resource_failed;
card->dpram_phys = pres->start;
card->dpram_size = resource_size(pres);
- card->dpram = ioremap_nocache(card->dpram_phys, card->dpram_size);
+ card->dpram = ioremap(card->dpram_phys, card->dpram_size);
if (!card->dpram) {
dev_alert(&card->pdev->dev, "dpram ioremap failed\n");
goto ioremap_failed;
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 2f74f6704c12..a4b4b742c80c 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -918,7 +918,7 @@ static int gs_usb_probe(struct usb_interface *intf,
GS_USB_BREQ_HOST_FORMAT,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
1,
- intf->altsetting[0].desc.bInterfaceNumber,
+ intf->cur_altsetting->desc.bInterfaceNumber,
hconf,
sizeof(*hconf),
1000);
@@ -941,7 +941,7 @@ static int gs_usb_probe(struct usb_interface *intf,
GS_USB_BREQ_DEVICE_CONFIG,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
1,
- intf->altsetting[0].desc.bInterfaceNumber,
+ intf->cur_altsetting->desc.bInterfaceNumber,
dconf,
sizeof(*dconf),
1000);
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index 5fc0be564274..7ab87a758754 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -1590,7 +1590,7 @@ static int kvaser_usb_hydra_setup_endpoints(struct kvaser_usb *dev)
struct usb_endpoint_descriptor *ep;
int i;
- iface_desc = &dev->intf->altsetting[0];
+ iface_desc = dev->intf->cur_altsetting;
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
ep = &iface_desc->endpoint[i].desc;
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
index 07d2f3aa2c02..1b9957f12459 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
@@ -608,7 +608,7 @@ static int kvaser_usb_leaf_simple_cmd_async(struct kvaser_usb_net_priv *priv,
struct kvaser_cmd *cmd;
int err;
- cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
if (!cmd)
return -ENOMEM;
@@ -1140,7 +1140,7 @@ static int kvaser_usb_leaf_set_opt_mode(const struct kvaser_usb_net_priv *priv)
struct kvaser_cmd *cmd;
int rc;
- cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
@@ -1206,7 +1206,7 @@ static int kvaser_usb_leaf_flush_queue(struct kvaser_usb_net_priv *priv)
struct kvaser_cmd *cmd;
int rc;
- cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
@@ -1310,7 +1310,7 @@ static int kvaser_usb_leaf_setup_endpoints(struct kvaser_usb *dev)
struct usb_endpoint_descriptor *endpoint;
int i;
- iface_desc = &dev->intf->altsetting[0];
+ iface_desc = dev->intf->cur_altsetting;
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 464af939cd8a..c1dbab8c896d 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -60,6 +60,8 @@ enum xcan_reg {
XCAN_TXMSG_BASE_OFFSET = 0x0100, /* TX Message Space */
XCAN_RXMSG_BASE_OFFSET = 0x1100, /* RX Message Space */
XCAN_RXMSG_2_BASE_OFFSET = 0x2100, /* RX Message Space */
+ XCAN_AFR_2_MASK_OFFSET = 0x0A00, /* Acceptance Filter MASK */
+ XCAN_AFR_2_ID_OFFSET = 0x0A04, /* Acceptance Filter ID */
};
#define XCAN_FRAME_ID_OFFSET(frame_base) ((frame_base) + 0x00)
@@ -1809,6 +1811,11 @@ static int xcan_probe(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
+ if (priv->devtype.flags & XCAN_FLAG_CANFD_2) {
+ priv->write_reg(priv, XCAN_AFR_2_ID_OFFSET, 0x00000000);
+ priv->write_reg(priv, XCAN_AFR_2_MASK_OFFSET, 0x00000000);
+ }
+
netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n",
priv->reg_base, ndev->irq, priv->can.clock.freq,
hw_tx_max, priv->tx_max);
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index c7667645f04a..2d38dbc9dd8c 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -54,6 +54,8 @@ source "drivers/net/dsa/mv88e6xxx/Kconfig"
source "drivers/net/dsa/ocelot/Kconfig"
+source "drivers/net/dsa/qca/Kconfig"
+
source "drivers/net/dsa/sja1105/Kconfig"
config NET_DSA_QCA8K
@@ -103,7 +105,6 @@ config NET_DSA_SMSC_LAN9303_MDIO
config NET_DSA_VITESSE_VSC73XX
tristate
- depends on OF
depends on NET_DSA
select FIXED_PHY
select VITESSE_PHY
@@ -114,7 +115,6 @@ config NET_DSA_VITESSE_VSC73XX
config NET_DSA_VITESSE_VSC73XX_SPI
tristate "Vitesse VSC7385/7388/7395/7398 SPI mode support"
- depends on OF
depends on NET_DSA
depends on SPI
select NET_DSA_VITESSE_VSC73XX
@@ -124,7 +124,6 @@ config NET_DSA_VITESSE_VSC73XX_SPI
config NET_DSA_VITESSE_VSC73XX_PLATFORM
tristate "Vitesse VSC7385/7388/7395/7398 Platform mode support"
- depends on OF
depends on NET_DSA
depends on HAS_IOMEM
select NET_DSA_VITESSE_VSC73XX
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index 9d384a32b3a2..4a943ccc2ca4 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -21,4 +21,5 @@ obj-y += b53/
obj-y += microchip/
obj-y += mv88e6xxx/
obj-y += ocelot/
+obj-y += qca/
obj-y += sja1105/
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 36828f210030..449a22172e07 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -347,7 +347,7 @@ static void b53_set_forwarding(struct b53_device *dev, int enable)
* frames should be flooded or not.
*/
b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
- mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN;
+ mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN;
b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
}
@@ -371,8 +371,6 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable,
b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5);
}
- mgmt &= ~SM_SW_FWD_MODE;
-
if (enable) {
vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN;
@@ -526,6 +524,8 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+ b53_br_egress_floods(ds, port, true, true);
+
if (dev->ops->irq_enable)
ret = dev->ops->irq_enable(dev, port);
if (ret)
@@ -571,9 +571,8 @@ EXPORT_SYMBOL(b53_disable_port);
void b53_brcm_hdr_setup(struct dsa_switch *ds, int port)
{
- bool tag_en = !(ds->ops->get_tag_protocol(ds, port) ==
- DSA_TAG_PROTO_NONE);
struct b53_device *dev = ds->priv;
+ bool tag_en = !(dev->tag_protocol == DSA_TAG_PROTO_NONE);
u8 hdr_ctl, val;
u16 reg;
@@ -593,6 +592,22 @@ void b53_brcm_hdr_setup(struct dsa_switch *ds, int port)
break;
}
+ /* Enable management mode if tagging is requested */
+ b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &hdr_ctl);
+ if (tag_en)
+ hdr_ctl |= SM_SW_FWD_MODE;
+ else
+ hdr_ctl &= ~SM_SW_FWD_MODE;
+ b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, hdr_ctl);
+
+ /* Configure the appropriate IMP port */
+ b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &hdr_ctl);
+ if (port == 8)
+ hdr_ctl |= GC_FRM_MGMT_PORT_MII;
+ else if (port == 5)
+ hdr_ctl |= GC_FRM_MGMT_PORT_M;
+ b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, hdr_ctl);
+
/* Enable Broadcom tags for IMP port */
b53_read8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, &hdr_ctl);
if (tag_en)
@@ -641,6 +656,8 @@ static void b53_enable_cpu_port(struct b53_device *dev, int port)
b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl);
b53_brcm_hdr_setup(dev->ds, port);
+
+ b53_br_egress_floods(dev->ds, port, true, true);
}
static void b53_enable_mib(struct b53_device *dev)
@@ -676,7 +693,7 @@ int b53_configure_vlan(struct dsa_switch *ds)
b53_do_vlan_op(dev, VTA_CMD_CLEAR);
}
- b53_enable_vlan(dev, false, ds->vlan_filtering);
+ b53_enable_vlan(dev, dev->vlan_enabled, ds->vlan_filtering);
b53_for_each_port(dev, i)
b53_write16(dev, B53_VLAN_PAGE,
@@ -1821,19 +1838,26 @@ int b53_br_egress_floods(struct dsa_switch *ds, int port,
struct b53_device *dev = ds->priv;
u16 uc, mc;
- b53_read16(dev, B53_CTRL_PAGE, B53_UC_FWD_EN, &uc);
+ b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc);
if (unicast)
uc |= BIT(port);
else
uc &= ~BIT(port);
- b53_write16(dev, B53_CTRL_PAGE, B53_UC_FWD_EN, uc);
+ b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc);
+
+ b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc);
+ if (multicast)
+ mc |= BIT(port);
+ else
+ mc &= ~BIT(port);
+ b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc);
- b53_read16(dev, B53_CTRL_PAGE, B53_MC_FWD_EN, &mc);
+ b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc);
if (multicast)
mc |= BIT(port);
else
mc &= ~BIT(port);
- b53_write16(dev, B53_CTRL_PAGE, B53_MC_FWD_EN, mc);
+ b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc);
return 0;
@@ -1855,36 +1879,57 @@ static bool b53_possible_cpu_port(struct dsa_switch *ds, int port)
return false;
}
-static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port)
+static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol tag_protocol)
{
bool ret = b53_possible_cpu_port(ds, port);
- if (!ret)
+ if (!ret) {
dev_warn(ds->dev, "Port %d is not Broadcom tag capable\n",
port);
+ return ret;
+ }
+
+ switch (tag_protocol) {
+ case DSA_TAG_PROTO_BRCM:
+ case DSA_TAG_PROTO_BRCM_PREPEND:
+ dev_warn(ds->dev,
+ "Port %d is stacked to Broadcom tag switch\n", port);
+ ret = false;
+ break;
+ default:
+ ret = true;
+ break;
+ }
+
return ret;
}
-enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port)
+enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol mprot)
{
struct b53_device *dev = ds->priv;
/* Older models (5325, 5365) support a different tag format that we do
- * not support in net/dsa/tag_brcm.c yet. 539x and 531x5 require managed
- * mode to be turned on which means we need to specifically manage ARL
- * misses on multicast addresses (TBD).
+ * not support in net/dsa/tag_brcm.c yet.
*/
- if (is5325(dev) || is5365(dev) || is539x(dev) || is531x5(dev) ||
- !b53_can_enable_brcm_tags(ds, port))
- return DSA_TAG_PROTO_NONE;
+ if (is5325(dev) || is5365(dev) ||
+ !b53_can_enable_brcm_tags(ds, port, mprot)) {
+ dev->tag_protocol = DSA_TAG_PROTO_NONE;
+ goto out;
+ }
/* Broadcom BCM58xx chips have a flow accelerator on Port 8
* which requires us to use the prepended Broadcom tag type
*/
- if (dev->chip_id == BCM58XX_DEVICE_ID && port == B53_CPU_PORT)
- return DSA_TAG_PROTO_BRCM_PREPEND;
+ if (dev->chip_id == BCM58XX_DEVICE_ID && port == B53_CPU_PORT) {
+ dev->tag_protocol = DSA_TAG_PROTO_BRCM_PREPEND;
+ goto out;
+ }
- return DSA_TAG_PROTO_BRCM;
+ dev->tag_protocol = DSA_TAG_PROTO_BRCM;
+out:
+ return dev->tag_protocol;
}
EXPORT_SYMBOL(b53_get_tag_protocol);
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 1877acf05081..3c30f3a7eb29 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -118,6 +118,7 @@ struct b53_device {
u8 jumbo_size_reg;
int reset_gpio;
u8 num_arl_entries;
+ enum dsa_tag_protocol tag_protocol;
/* used ports mask */
u16 enabled_ports;
@@ -359,7 +360,8 @@ int b53_mdb_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb);
int b53_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror, bool ingress);
-enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port);
+enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol mprot);
void b53_mirror_del(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror);
int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index e43040c9f9ee..d1955543acd1 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -69,6 +69,8 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
/* Force link status for IMP port */
reg = core_readl(priv, offset);
reg |= (MII_SW_OR | LINK_STS);
+ if (priv->type == BCM7278_DEVICE_ID)
+ reg |= GMII_SPEED_UP_2G;
core_writel(priv, reg, offset);
/* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index f3f0c3f07391..1962c8330daa 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -358,7 +358,7 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
return -EINVAL;
}
- ip_frag = be32_to_cpu(fs->m_ext.data[0]);
+ ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
/* Locate the first rule available */
if (fs->location == RX_CLS_LOC_ANY)
@@ -569,7 +569,7 @@ static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port,
if (rule->fs.flow_type != fs->flow_type ||
rule->fs.ring_cookie != fs->ring_cookie ||
- rule->fs.m_ext.data[0] != fs->m_ext.data[0])
+ rule->fs.h_ext.data[0] != fs->h_ext.data[0])
continue;
switch (fs->flow_type & ~FLOW_EXT) {
@@ -621,7 +621,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
return -EINVAL;
}
- ip_frag = be32_to_cpu(fs->m_ext.data[0]);
+ ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
layout = &udf_tcpip6_layout;
slice_num = bcm_sf2_get_slice_number(layout, 0);
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index c8d7ef27fd72..fdcb70b9f0e4 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -61,7 +61,8 @@ struct dsa_loop_priv {
static struct phy_device *phydevs[PHY_MAX_ADDR];
static enum dsa_tag_protocol dsa_loop_get_protocol(struct dsa_switch *ds,
- int port)
+ int port,
+ enum dsa_tag_protocol mp)
{
dev_dbg(ds->dev, "%s: port: %d\n", __func__, port);
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index e3c333a8f45d..cc17a44dd3a8 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -883,7 +883,8 @@ static int lan9303_check_device(struct lan9303 *chip)
/* ---------------------------- DSA -----------------------------------*/
static enum dsa_tag_protocol lan9303_get_tag_protocol(struct dsa_switch *ds,
- int port)
+ int port,
+ enum dsa_tag_protocol mp)
{
return DSA_TAG_PROTO_LAN9303;
}
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 955324968b74..0369c22fe3e1 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -841,7 +841,8 @@ static int gswip_setup(struct dsa_switch *ds)
}
static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds,
- int port)
+ int port,
+ enum dsa_tag_protocol mp)
{
return DSA_TAG_PROTO_GSWIP;
}
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index 24a5e99f7fd5..47d65b77caf7 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -645,7 +645,8 @@ static void ksz8795_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
}
static enum dsa_tag_protocol ksz8795_get_tag_protocol(struct dsa_switch *ds,
- int port)
+ int port,
+ enum dsa_tag_protocol mp)
{
return DSA_TAG_PROTO_KSZ8795;
}
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 50ffc63d6231..9a51b8a4de5d 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -295,7 +295,8 @@ static void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
}
static enum dsa_tag_protocol ksz9477_get_tag_protocol(struct dsa_switch *ds,
- int port)
+ int port,
+ enum dsa_tag_protocol mp)
{
enum dsa_tag_protocol proto = DSA_TAG_PROTO_KSZ9477;
struct ksz_device *dev = ds->priv;
diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c
index c5f64959a184..1142768969c2 100644
--- a/drivers/net/dsa/microchip/ksz9477_spi.c
+++ b/drivers/net/dsa/microchip/ksz9477_spi.c
@@ -101,6 +101,12 @@ static struct spi_driver ksz9477_spi_driver = {
module_spi_driver(ksz9477_spi_driver);
+MODULE_ALIAS("spi:ksz9477");
+MODULE_ALIAS("spi:ksz9897");
+MODULE_ALIAS("spi:ksz9893");
+MODULE_ALIAS("spi:ksz9563");
+MODULE_ALIAS("spi:ksz8563");
+MODULE_ALIAS("spi:ksz9567");
MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch SPI access Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index ed1ec10ec62b..022466ca1c19 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -1223,7 +1223,8 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
}
static enum dsa_tag_protocol
-mtk_get_tag_protocol(struct dsa_switch *ds, int port)
+mtk_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol mp)
{
struct mt7530_priv *priv = ds->priv;
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index a5a37f47b320..24b8219fd607 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -43,7 +43,8 @@ static const char *mv88e6060_get_name(struct mii_bus *bus, int sw_addr)
}
static enum dsa_tag_protocol mv88e6060_get_tag_protocol(struct dsa_switch *ds,
- int port)
+ int port,
+ enum dsa_tag_protocol m)
{
return DSA_TAG_PROTO_TRAILER;
}
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 3bd988529178..8c9289549688 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -340,11 +340,14 @@ static int mv88e6xxx_g1_irq_setup(struct mv88e6xxx_chip *chip)
*/
irq_set_lockdep_class(chip->irq, &lock_key, &request_key);
+ snprintf(chip->irq_name, sizeof(chip->irq_name),
+ "mv88e6xxx-%s", dev_name(chip->dev));
+
mv88e6xxx_reg_unlock(chip);
err = request_threaded_irq(chip->irq, NULL,
mv88e6xxx_g1_irq_thread_fn,
IRQF_ONESHOT | IRQF_SHARED,
- dev_name(chip->dev), chip);
+ chip->irq_name, chip);
mv88e6xxx_reg_lock(chip);
if (err)
mv88e6xxx_g1_irq_free_common(chip);
@@ -2303,10 +2306,14 @@ static int mv88e6xxx_serdes_irq_request(struct mv88e6xxx_chip *chip, int port,
if (!irq)
return 0;
+ snprintf(dev_id->serdes_irq_name, sizeof(dev_id->serdes_irq_name),
+ "mv88e6xxx-%s-serdes-%d", dev_name(chip->dev), port);
+
/* Requesting the IRQ will trigger IRQ callbacks, so release the lock */
mv88e6xxx_reg_unlock(chip);
err = request_threaded_irq(irq, NULL, mv88e6xxx_serdes_irq_thread_fn,
- IRQF_ONESHOT, "mv88e6xxx-serdes", dev_id);
+ IRQF_ONESHOT, dev_id->serdes_irq_name,
+ dev_id);
mv88e6xxx_reg_lock(chip);
if (err)
return err;
@@ -3838,6 +3845,9 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_irq_enable = mv88e6390_serdes_irq_enable,
.serdes_irq_status = mv88e6390_serdes_irq_status,
+ .serdes_get_strings = mv88e6390_serdes_get_strings,
+ .serdes_get_stats = mv88e6390_serdes_get_stats,
+ .phylink_validate = mv88e6390_phylink_validate,
.gpio_ops = &mv88e6352_gpio_ops,
.phylink_validate = mv88e6390_phylink_validate,
};
@@ -3889,6 +3899,9 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_irq_enable = mv88e6390_serdes_irq_enable,
.serdes_irq_status = mv88e6390_serdes_irq_status,
+ .serdes_get_strings = mv88e6390_serdes_get_strings,
+ .serdes_get_stats = mv88e6390_serdes_get_stats,
+ .phylink_validate = mv88e6390_phylink_validate,
.gpio_ops = &mv88e6352_gpio_ops,
.phylink_validate = mv88e6390x_phylink_validate,
};
@@ -3939,6 +3952,9 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_irq_enable = mv88e6390_serdes_irq_enable,
.serdes_irq_status = mv88e6390_serdes_irq_status,
+ .serdes_get_strings = mv88e6390_serdes_get_strings,
+ .serdes_get_stats = mv88e6390_serdes_get_stats,
+ .phylink_validate = mv88e6390_phylink_validate,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
.phylink_validate = mv88e6390_phylink_validate,
@@ -4085,6 +4101,9 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_irq_enable = mv88e6390_serdes_irq_enable,
.serdes_irq_status = mv88e6390_serdes_irq_status,
+ .serdes_get_strings = mv88e6390_serdes_get_strings,
+ .serdes_get_stats = mv88e6390_serdes_get_stats,
+ .phylink_validate = mv88e6390_phylink_validate,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
@@ -4424,6 +4443,9 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
+ .serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
+ .serdes_get_strings = mv88e6390_serdes_get_strings,
+ .serdes_get_stats = mv88e6390_serdes_get_stats,
.phylink_validate = mv88e6390_phylink_validate,
};
@@ -4476,6 +4498,9 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_irq_enable = mv88e6390_serdes_irq_enable,
.serdes_irq_status = mv88e6390_serdes_irq_status,
+ .serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
+ .serdes_get_strings = mv88e6390_serdes_get_strings,
+ .serdes_get_stats = mv88e6390_serdes_get_stats,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
@@ -5207,7 +5232,8 @@ static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev)
}
static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds,
- int port)
+ int port,
+ enum dsa_tag_protocol m)
{
struct mv88e6xxx_chip *chip = ds->priv;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 8a8e38bfb161..79cad5e751c6 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -236,6 +236,7 @@ struct mv88e6xxx_port {
bool mirror_ingress;
bool mirror_egress;
unsigned int serdes_irq;
+ char serdes_irq_name[64];
};
struct mv88e6xxx_chip {
@@ -292,11 +293,16 @@ struct mv88e6xxx_chip {
struct mv88e6xxx_irq g1_irq;
struct mv88e6xxx_irq g2_irq;
int irq;
+ char irq_name[64];
int device_irq;
+ char device_irq_name[64];
int watchdog_irq;
+ char watchdog_irq_name[64];
int atu_prob_irq;
+ char atu_prob_irq_name[64];
int vtu_prob_irq;
+ char vtu_prob_irq_name[64];
struct kthread_worker *kworker;
struct kthread_delayed_work irq_poll_work;
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index 120a65d3e3ef..b016cc205f81 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -360,6 +360,11 @@ int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port)
{
u16 ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST;
+ /* Use the default high priority for management frames sent to
+ * the CPU.
+ */
+ port |= MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI;
+
return mv88e6390_g1_monitor_write(chip, ptr, port);
}
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index bc5a6b2bb1e4..5324c6f4ae90 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -211,6 +211,7 @@
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST 0x2000
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST 0x2100
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST 0x3000
+#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI 0x00e0
#define MV88E6390_G1_MONITOR_MGMT_CTL_DATA_MASK 0x00ff
/* Offset 0x1C: Global Control 2 */
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index bdcd25560dd2..bac9a8a68e50 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -425,9 +425,12 @@ int mv88e6xxx_g1_atu_prob_irq_setup(struct mv88e6xxx_chip *chip)
if (chip->atu_prob_irq < 0)
return chip->atu_prob_irq;
+ snprintf(chip->atu_prob_irq_name, sizeof(chip->atu_prob_irq_name),
+ "mv88e6xxx-%s-g1-atu-prob", dev_name(chip->dev));
+
err = request_threaded_irq(chip->atu_prob_irq, NULL,
mv88e6xxx_g1_atu_prob_irq_thread_fn,
- IRQF_ONESHOT, "mv88e6xxx-g1-atu-prob",
+ IRQF_ONESHOT, chip->atu_prob_irq_name,
chip);
if (err)
irq_dispose_mapping(chip->atu_prob_irq);
diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
index 33056a609e96..48390b7b18ad 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
@@ -631,9 +631,12 @@ int mv88e6xxx_g1_vtu_prob_irq_setup(struct mv88e6xxx_chip *chip)
if (chip->vtu_prob_irq < 0)
return chip->vtu_prob_irq;
+ snprintf(chip->vtu_prob_irq_name, sizeof(chip->vtu_prob_irq_name),
+ "mv88e6xxx-%s-g1-vtu-prob", dev_name(chip->dev));
+
err = request_threaded_irq(chip->vtu_prob_irq, NULL,
mv88e6xxx_g1_vtu_prob_irq_thread_fn,
- IRQF_ONESHOT, "mv88e6xxx-g1-vtu-prob",
+ IRQF_ONESHOT, chip->vtu_prob_irq_name,
chip);
if (err)
irq_dispose_mapping(chip->vtu_prob_irq);
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index 87bfe7c8c9cd..01503014b1ee 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -948,10 +948,13 @@ static int mv88e6xxx_g2_watchdog_setup(struct mv88e6xxx_chip *chip)
if (chip->watchdog_irq < 0)
return chip->watchdog_irq;
+ snprintf(chip->watchdog_irq_name, sizeof(chip->watchdog_irq_name),
+ "mv88e6xxx-%s-watchdog", dev_name(chip->dev));
+
err = request_threaded_irq(chip->watchdog_irq, NULL,
mv88e6xxx_g2_watchdog_thread_fn,
IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
- "mv88e6xxx-watchdog", chip);
+ chip->watchdog_irq_name, chip);
if (err)
return err;
@@ -1114,9 +1117,12 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip)
goto out;
}
+ snprintf(chip->device_irq_name, sizeof(chip->device_irq_name),
+ "mv88e6xxx-%s-g2", dev_name(chip->dev));
+
err = request_threaded_irq(chip->device_irq, NULL,
mv88e6xxx_g2_irq_thread_fn,
- IRQF_ONESHOT, "mv88e6xxx-g2", chip);
+ IRQF_ONESHOT, chip->device_irq_name, chip);
if (err)
goto out;
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 7fe256c5739d..0b43c650e100 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -393,7 +393,7 @@ phy_interface_t mv88e6390x_port_max_speed_mode(int port)
}
static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
- phy_interface_t mode)
+ phy_interface_t mode, bool force)
{
u8 lane;
u16 cmode;
@@ -427,8 +427,8 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
cmode = 0;
}
- /* cmode doesn't change, nothing to do for us */
- if (cmode == chip->ports[port].cmode)
+ /* cmode doesn't change, nothing to do for us unless forced */
+ if (cmode == chip->ports[port].cmode && !force)
return 0;
lane = mv88e6xxx_serdes_get_lane(chip, port);
@@ -484,7 +484,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
if (port != 9 && port != 10)
return -EOPNOTSUPP;
- return mv88e6xxx_port_set_cmode(chip, port, mode);
+ return mv88e6xxx_port_set_cmode(chip, port, mode, false);
}
int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
@@ -504,7 +504,7 @@ int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
break;
}
- return mv88e6xxx_port_set_cmode(chip, port, mode);
+ return mv88e6xxx_port_set_cmode(chip, port, mode, false);
}
static int mv88e6341_port_set_cmode_writable(struct mv88e6xxx_chip *chip,
@@ -555,7 +555,7 @@ int mv88e6341_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
if (err)
return err;
- return mv88e6xxx_port_set_cmode(chip, port, mode);
+ return mv88e6xxx_port_set_cmode(chip, port, mode, true);
}
int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode)
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index 902feb398746..8d8b3b74aee1 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -405,22 +405,116 @@ static int mv88e6390_serdes_power_sgmii(struct mv88e6xxx_chip *chip, u8 lane,
return err;
}
+struct mv88e6390_serdes_hw_stat {
+ char string[ETH_GSTRING_LEN];
+ int reg;
+};
+
+static struct mv88e6390_serdes_hw_stat mv88e6390_serdes_hw_stats[] = {
+ { "serdes_rx_pkts", 0xf021 },
+ { "serdes_rx_bytes", 0xf024 },
+ { "serdes_rx_pkts_error", 0xf027 },
+};
+
+int mv88e6390_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port)
+{
+ if (mv88e6390_serdes_get_lane(chip, port) == 0)
+ return 0;
+
+ return ARRAY_SIZE(mv88e6390_serdes_hw_stats);
+}
+
+int mv88e6390_serdes_get_strings(struct mv88e6xxx_chip *chip,
+ int port, uint8_t *data)
+{
+ struct mv88e6390_serdes_hw_stat *stat;
+ int i;
+
+ if (mv88e6390_serdes_get_lane(chip, port) == 0)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(mv88e6390_serdes_hw_stats); i++) {
+ stat = &mv88e6390_serdes_hw_stats[i];
+ memcpy(data + i * ETH_GSTRING_LEN, stat->string,
+ ETH_GSTRING_LEN);
+ }
+ return ARRAY_SIZE(mv88e6390_serdes_hw_stats);
+}
+
+static uint64_t mv88e6390_serdes_get_stat(struct mv88e6xxx_chip *chip, int lane,
+ struct mv88e6390_serdes_hw_stat *stat)
+{
+ u16 reg[3];
+ int err, i;
+
+ for (i = 0; i < 3; i++) {
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ stat->reg + i, &reg[i]);
+ if (err) {
+ dev_err(chip->dev, "failed to read statistic\n");
+ return 0;
+ }
+ }
+
+ return reg[0] | ((u64)reg[1] << 16) | ((u64)reg[2] << 32);
+}
+
+int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data)
+{
+ struct mv88e6390_serdes_hw_stat *stat;
+ int lane;
+ int i;
+
+ lane = mv88e6390_serdes_get_lane(chip, port);
+ if (lane == 0)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(mv88e6390_serdes_hw_stats); i++) {
+ stat = &mv88e6390_serdes_hw_stats[i];
+ data[i] = mv88e6390_serdes_get_stat(chip, lane, stat);
+ }
+
+ return ARRAY_SIZE(mv88e6390_serdes_hw_stats);
+}
+
+static int mv88e6390_serdes_enable_checker(struct mv88e6xxx_chip *chip, u8 lane)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_PG_CONTROL, &reg);
+ if (err)
+ return err;
+
+ reg |= MV88E6390_PG_CONTROL_ENABLE_PC;
+ return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_PG_CONTROL, reg);
+}
+
int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane,
bool up)
{
u8 cmode = chip->ports[port].cmode;
+ int err = 0;
switch (cmode) {
case MV88E6XXX_PORT_STS_CMODE_SGMII:
case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
- return mv88e6390_serdes_power_sgmii(chip, lane, up);
+ err = mv88e6390_serdes_power_sgmii(chip, lane, up);
+ break;
case MV88E6XXX_PORT_STS_CMODE_XAUI:
case MV88E6XXX_PORT_STS_CMODE_RXAUI:
- return mv88e6390_serdes_power_10g(chip, lane, up);
+ err = mv88e6390_serdes_power_10g(chip, lane, up);
+ break;
}
- return 0;
+ if (!err && up)
+ err = mv88e6390_serdes_enable_checker(chip, lane);
+
+ return err;
}
static void mv88e6390_serdes_irq_link_sgmii(struct mv88e6xxx_chip *chip,
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
index bd8df36ab537..d16ef4da20b0 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.h
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -74,6 +74,10 @@
#define MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID BIT(11)
#define MV88E6390_SGMII_PHY_STATUS_LINK BIT(10)
+/* Packet generator pad packet checker */
+#define MV88E6390_PG_CONTROL 0xf010
+#define MV88E6390_PG_CONTROL_ENABLE_PC BIT(0)
+
u8 mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
u8 mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
u8 mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
@@ -99,6 +103,11 @@ int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip,
int port, uint8_t *data);
int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
uint64_t *data);
+int mv88e6390_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port);
+int mv88e6390_serdes_get_strings(struct mv88e6xxx_chip *chip,
+ int port, uint8_t *data);
+int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data);
/* Return the (first) SERDES lane address a port is using, 0 otherwise. */
static inline u8 mv88e6xxx_serdes_get_lane(struct mv88e6xxx_chip *chip,
diff --git a/drivers/net/dsa/ocelot/Kconfig b/drivers/net/dsa/ocelot/Kconfig
index 0031ca814346..a5b7cca03d09 100644
--- a/drivers/net/dsa/ocelot/Kconfig
+++ b/drivers/net/dsa/ocelot/Kconfig
@@ -2,8 +2,11 @@
config NET_DSA_MSCC_FELIX
tristate "Ocelot / Felix Ethernet switch support"
depends on NET_DSA && PCI
+ depends on NET_VENDOR_MICROSEMI
+ depends on NET_VENDOR_FREESCALE
select MSCC_OCELOT_SWITCH
select NET_DSA_TAG_OCELOT
+ select FSL_ENETC_MDIO
help
This driver supports the VSC9959 network switch, which is a member of
the Vitesse / Microsemi / Microchip Ocelot family of switching cores.
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index b7f92464815d..3257962c147e 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -2,16 +2,22 @@
/* Copyright 2019 NXP Semiconductors
*/
#include <uapi/linux/if_bridge.h>
+#include <soc/mscc/ocelot_qsys.h>
+#include <soc/mscc/ocelot_sys.h>
+#include <soc/mscc/ocelot_dev.h>
+#include <soc/mscc/ocelot_ana.h>
#include <soc/mscc/ocelot.h>
#include <linux/packing.h>
#include <linux/module.h>
+#include <linux/of_net.h>
#include <linux/pci.h>
#include <linux/of.h>
#include <net/dsa.h>
#include "felix.h"
static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds,
- int port)
+ int port,
+ enum dsa_tag_protocol mp)
{
return DSA_TAG_PROTO_OCELOT;
}
@@ -26,14 +32,6 @@ static int felix_set_ageing_time(struct dsa_switch *ds,
return 0;
}
-static void felix_adjust_link(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
-{
- struct ocelot *ocelot = ds->priv;
-
- ocelot_adjust_link(ocelot, port, phydev);
-}
-
static int felix_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
@@ -155,6 +153,141 @@ static void felix_port_disable(struct dsa_switch *ds, int port)
return ocelot_port_disable(ocelot, port);
}
+static void felix_phylink_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ state->interface != ocelot_port->phy_mode) {
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ return;
+ }
+
+ /* No half-duplex. */
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Autoneg);
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 1000baseT_Full);
+
+ /* The internal ports that run at 2.5G are overclocked GMII */
+ if (state->interface == PHY_INTERFACE_MODE_GMII ||
+ state->interface == PHY_INTERFACE_MODE_2500BASEX ||
+ state->interface == PHY_INTERFACE_MODE_USXGMII) {
+ phylink_set(mask, 2500baseT_Full);
+ phylink_set(mask, 2500baseX_Full);
+ }
+
+ bitmap_and(supported, supported, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static int felix_phylink_mac_pcs_get_state(struct dsa_switch *ds, int port,
+ struct phylink_link_state *state)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+
+ if (felix->info->pcs_link_state)
+ felix->info->pcs_link_state(ocelot, port, state);
+
+ return 0;
+}
+
+static void felix_phylink_mac_config(struct dsa_switch *ds, int port,
+ unsigned int link_an_mode,
+ const struct phylink_link_state *state)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct felix *felix = ocelot_to_felix(ocelot);
+ u32 mac_fc_cfg;
+
+ /* Take port out of reset by clearing the MAC_TX_RST, MAC_RX_RST and
+ * PORT_RST bits in CLOCK_CFG
+ */
+ ocelot_port_writel(ocelot_port, DEV_CLOCK_CFG_LINK_SPEED(state->speed),
+ DEV_CLOCK_CFG);
+
+ /* Flow control. Link speed is only used here to evaluate the time
+ * specification in incoming pause frames.
+ */
+ mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(state->speed);
+
+ /* handle Rx pause in all cases, with 2500base-X this is used for rate
+ * adaptation.
+ */
+ mac_fc_cfg |= SYS_MAC_FC_CFG_RX_FC_ENA;
+
+ if (state->pause & MLO_PAUSE_TX)
+ mac_fc_cfg |= SYS_MAC_FC_CFG_TX_FC_ENA |
+ SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) |
+ SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) |
+ SYS_MAC_FC_CFG_ZERO_PAUSE_ENA;
+ ocelot_write_rix(ocelot, mac_fc_cfg, SYS_MAC_FC_CFG, port);
+
+ ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port);
+
+ if (felix->info->pcs_init)
+ felix->info->pcs_init(ocelot, port, link_an_mode, state);
+}
+
+static void felix_phylink_mac_an_restart(struct dsa_switch *ds, int port)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+
+ if (felix->info->pcs_an_restart)
+ felix->info->pcs_an_restart(ocelot, port);
+}
+
+static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int link_an_mode,
+ phy_interface_t interface)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ ocelot_port_writel(ocelot_port, 0, DEV_MAC_ENA_CFG);
+ ocelot_rmw_rix(ocelot, 0, QSYS_SWITCH_PORT_MODE_PORT_ENA,
+ QSYS_SWITCH_PORT_MODE, port);
+}
+
+static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int link_an_mode,
+ phy_interface_t interface,
+ struct phy_device *phydev)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ /* Enable MAC module */
+ ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA |
+ DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG);
+
+ /* Enable receiving frames on the port, and activate auto-learning of
+ * MAC addresses.
+ */
+ ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_LEARNAUTO |
+ ANA_PORT_PORT_CFG_RECV_ENA |
+ ANA_PORT_PORT_CFG_PORTID_VAL(port),
+ ANA_PORT_PORT_CFG, port);
+
+ /* Core: Enable port for frame transfer */
+ ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE |
+ QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) |
+ QSYS_SWITCH_PORT_MODE_PORT_ENA,
+ QSYS_SWITCH_PORT_MODE, port);
+}
+
static void felix_get_strings(struct dsa_switch *ds, int port,
u32 stringset, u8 *data)
{
@@ -185,10 +318,76 @@ static int felix_get_ts_info(struct dsa_switch *ds, int port,
return ocelot_get_ts_info(ocelot, port, info);
}
+static int felix_parse_ports_node(struct felix *felix,
+ struct device_node *ports_node,
+ phy_interface_t *port_phy_modes)
+{
+ struct ocelot *ocelot = &felix->ocelot;
+ struct device *dev = felix->ocelot.dev;
+ struct device_node *child;
+
+ for_each_available_child_of_node(ports_node, child) {
+ phy_interface_t phy_mode;
+ u32 port;
+ int err;
+
+ /* Get switch port number from DT */
+ if (of_property_read_u32(child, "reg", &port) < 0) {
+ dev_err(dev, "Port number not defined in device tree "
+ "(property \"reg\")\n");
+ of_node_put(child);
+ return -ENODEV;
+ }
+
+ /* Get PHY mode from DT */
+ err = of_get_phy_mode(child, &phy_mode);
+ if (err) {
+ dev_err(dev, "Failed to read phy-mode or "
+ "phy-interface-type property for port %d\n",
+ port);
+ of_node_put(child);
+ return -ENODEV;
+ }
+
+ err = felix->info->prevalidate_phy_mode(ocelot, port, phy_mode);
+ if (err < 0) {
+ dev_err(dev, "Unsupported PHY mode %s on port %d\n",
+ phy_modes(phy_mode), port);
+ return err;
+ }
+
+ port_phy_modes[port] = phy_mode;
+ }
+
+ return 0;
+}
+
+static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes)
+{
+ struct device *dev = felix->ocelot.dev;
+ struct device_node *switch_node;
+ struct device_node *ports_node;
+ int err;
+
+ switch_node = dev->of_node;
+
+ ports_node = of_get_child_by_name(switch_node, "ports");
+ if (!ports_node) {
+ dev_err(dev, "Incorrect bindings: absent \"ports\" node\n");
+ return -ENODEV;
+ }
+
+ err = felix_parse_ports_node(felix, ports_node, port_phy_modes);
+ of_node_put(ports_node);
+
+ return err;
+}
+
static int felix_init_structs(struct felix *felix, int num_phys_ports)
{
struct ocelot *ocelot = &felix->ocelot;
- resource_size_t base;
+ phy_interface_t *port_phy_modes;
+ resource_size_t switch_base;
int port, i, err;
ocelot->num_phys_ports = num_phys_ports;
@@ -203,7 +402,19 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
ocelot->shared_queue_sz = felix->info->shared_queue_sz;
ocelot->ops = felix->info->ops;
- base = pci_resource_start(felix->pdev, felix->info->pci_bar);
+ port_phy_modes = kcalloc(num_phys_ports, sizeof(phy_interface_t),
+ GFP_KERNEL);
+ if (!port_phy_modes)
+ return -ENOMEM;
+
+ err = felix_parse_dt(felix, port_phy_modes);
+ if (err) {
+ kfree(port_phy_modes);
+ return err;
+ }
+
+ switch_base = pci_resource_start(felix->pdev,
+ felix->info->switch_pci_bar);
for (i = 0; i < TARGET_MAX; i++) {
struct regmap *target;
@@ -214,13 +425,14 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
res = &felix->info->target_io_res[i];
res->flags = IORESOURCE_MEM;
- res->start += base;
- res->end += base;
+ res->start += switch_base;
+ res->end += switch_base;
target = ocelot_regmap_init(ocelot, res);
if (IS_ERR(target)) {
dev_err(ocelot->dev,
"Failed to map device memory space\n");
+ kfree(port_phy_modes);
return PTR_ERR(target);
}
@@ -230,6 +442,7 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
err = ocelot_regfields_init(ocelot, felix->info->regfields);
if (err) {
dev_err(ocelot->dev, "failed to init reg fields map\n");
+ kfree(port_phy_modes);
return err;
}
@@ -244,26 +457,37 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
if (!ocelot_port) {
dev_err(ocelot->dev,
"failed to allocate port memory\n");
+ kfree(port_phy_modes);
return -ENOMEM;
}
res = &felix->info->port_io_res[port];
res->flags = IORESOURCE_MEM;
- res->start += base;
- res->end += base;
+ res->start += switch_base;
+ res->end += switch_base;
port_regs = devm_ioremap_resource(ocelot->dev, res);
if (IS_ERR(port_regs)) {
dev_err(ocelot->dev,
"failed to map registers for port %d\n", port);
+ kfree(port_phy_modes);
return PTR_ERR(port_regs);
}
+ ocelot_port->phy_mode = port_phy_modes[port];
ocelot_port->ocelot = ocelot;
ocelot_port->regs = port_regs;
ocelot->ports[port] = ocelot_port;
}
+ kfree(port_phy_modes);
+
+ if (felix->info->mdio_bus_alloc) {
+ err = felix->info->mdio_bus_alloc(ocelot);
+ if (err < 0)
+ return err;
+ }
+
return 0;
}
@@ -293,12 +517,22 @@ static int felix_setup(struct dsa_switch *ds)
OCELOT_TAG_PREFIX_LONG);
}
+ /* It looks like the MAC/PCS interrupt register - PM0_IEVENT (0x8040)
+ * isn't instantiated for the Felix PF.
+ * In-band AN may take a few ms to complete, so we need to poll.
+ */
+ ds->pcs_poll = true;
+
return 0;
}
static void felix_teardown(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+
+ if (felix->info->mdio_bus_free)
+ felix->info->mdio_bus_free(ocelot);
/* stop workqueue thread */
ocelot_deinit(ocelot);
@@ -369,7 +603,12 @@ static const struct dsa_switch_ops felix_switch_ops = {
.get_ethtool_stats = felix_get_ethtool_stats,
.get_sset_count = felix_get_sset_count,
.get_ts_info = felix_get_ts_info,
- .adjust_link = felix_adjust_link,
+ .phylink_validate = felix_phylink_validate,
+ .phylink_mac_link_state = felix_phylink_mac_pcs_get_state,
+ .phylink_mac_config = felix_phylink_mac_config,
+ .phylink_mac_an_restart = felix_phylink_mac_an_restart,
+ .phylink_mac_link_down = felix_phylink_mac_link_down,
+ .phylink_mac_link_up = felix_phylink_mac_link_up,
.port_enable = felix_port_enable,
.port_disable = felix_port_disable,
.port_fdb_dump = felix_fdb_dump,
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index 204296e51d0c..3a7580015b62 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -10,6 +10,7 @@
struct felix_info {
struct resource *target_io_res;
struct resource *port_io_res;
+ struct resource *imdio_res;
const struct reg_field *regfields;
const u32 *const *map;
const struct ocelot_ops *ops;
@@ -17,7 +18,18 @@ struct felix_info {
const struct ocelot_stat_layout *stats_layout;
unsigned int num_stats;
int num_ports;
- int pci_bar;
+ int switch_pci_bar;
+ int imdio_pci_bar;
+ int (*mdio_bus_alloc)(struct ocelot *ocelot);
+ void (*mdio_bus_free)(struct ocelot *ocelot);
+ void (*pcs_init)(struct ocelot *ocelot, int port,
+ unsigned int link_an_mode,
+ const struct phylink_link_state *state);
+ void (*pcs_an_restart)(struct ocelot *ocelot, int port);
+ void (*pcs_link_state)(struct ocelot *ocelot, int port,
+ struct phylink_link_state *state);
+ int (*prevalidate_phy_mode)(struct ocelot *ocelot, int port,
+ phy_interface_t phy_mode);
};
extern struct felix_info felix_info_vsc9959;
@@ -32,6 +44,8 @@ struct felix {
struct pci_dev *pdev;
struct felix_info *info;
struct ocelot ocelot;
+ struct mii_bus *imdio;
+ struct phy_device **pcs;
};
#endif
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index b9758b0d18c7..2c812b481778 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -2,12 +2,33 @@
/* Copyright 2017 Microsemi Corporation
* Copyright 2018-2019 NXP Semiconductors
*/
+#include <linux/fsl/enetc_mdio.h>
#include <soc/mscc/ocelot_sys.h>
#include <soc/mscc/ocelot.h>
#include <linux/iopoll.h>
#include <linux/pci.h>
#include "felix.h"
+/* TODO: should find a better place for these */
+#define USXGMII_BMCR_RESET BIT(15)
+#define USXGMII_BMCR_AN_EN BIT(12)
+#define USXGMII_BMCR_RST_AN BIT(9)
+#define USXGMII_BMSR_LNKS(status) (((status) & GENMASK(2, 2)) >> 2)
+#define USXGMII_BMSR_AN_CMPL(status) (((status) & GENMASK(5, 5)) >> 5)
+#define USXGMII_ADVERTISE_LNKS(x) (((x) << 15) & BIT(15))
+#define USXGMII_ADVERTISE_FDX BIT(12)
+#define USXGMII_ADVERTISE_SPEED(x) (((x) << 9) & GENMASK(11, 9))
+#define USXGMII_LPA_LNKS(lpa) ((lpa) >> 15)
+#define USXGMII_LPA_DUPLEX(lpa) (((lpa) & GENMASK(12, 12)) >> 12)
+#define USXGMII_LPA_SPEED(lpa) (((lpa) & GENMASK(11, 9)) >> 9)
+
+enum usxgmii_speed {
+ USXGMII_SPEED_10 = 0,
+ USXGMII_SPEED_100 = 1,
+ USXGMII_SPEED_1000 = 2,
+ USXGMII_SPEED_2500 = 4,
+};
+
static const u32 vsc9959_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x0089a0),
REG(ANA_VLANMASK, 0x0089a4),
@@ -386,6 +407,15 @@ static struct resource vsc9959_port_io_res[] = {
},
};
+/* Port MAC 0 Internal MDIO bus through which the SerDes acting as an
+ * SGMII/QSGMII MAC PCS can be found.
+ */
+static struct resource vsc9959_imdio_res = {
+ .start = 0x8030,
+ .end = 0x8040,
+ .name = "imdio",
+};
+
static const struct reg_field vsc9959_regfields[] = {
[ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 6, 6),
[ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 5),
@@ -565,13 +595,495 @@ static int vsc9959_reset(struct ocelot *ocelot)
return 0;
}
+static void vsc9959_pcs_an_restart_sgmii(struct phy_device *pcs)
+{
+ phy_set_bits(pcs, MII_BMCR, BMCR_ANRESTART);
+}
+
+static void vsc9959_pcs_an_restart_usxgmii(struct phy_device *pcs)
+{
+ phy_write_mmd(pcs, MDIO_MMD_VEND2, MII_BMCR,
+ USXGMII_BMCR_RESET |
+ USXGMII_BMCR_AN_EN |
+ USXGMII_BMCR_RST_AN);
+}
+
+static void vsc9959_pcs_an_restart(struct ocelot *ocelot, int port)
+{
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct phy_device *pcs = felix->pcs[port];
+
+ if (!pcs)
+ return;
+
+ switch (pcs->interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ vsc9959_pcs_an_restart_sgmii(pcs);
+ break;
+ case PHY_INTERFACE_MODE_USXGMII:
+ vsc9959_pcs_an_restart_usxgmii(pcs);
+ break;
+ default:
+ dev_err(ocelot->dev, "Invalid PCS interface type %s\n",
+ phy_modes(pcs->interface));
+ break;
+ }
+}
+
+/* We enable SGMII AN only when the PHY has managed = "in-band-status" in the
+ * device tree. If we are in MLO_AN_PHY mode, we program directly state->speed
+ * into the PCS, which is retrieved out-of-band over MDIO. This also has the
+ * benefit of working with SGMII fixed-links, like downstream switches, where
+ * both link partners attempt to operate as AN slaves and therefore AN never
+ * completes. But it also has the disadvantage that some PHY chips don't pass
+ * traffic if SGMII AN is enabled but not completed (acknowledged by us), so
+ * setting MLO_AN_INBAND is actually required for those.
+ */
+static void vsc9959_pcs_init_sgmii(struct phy_device *pcs,
+ unsigned int link_an_mode,
+ const struct phylink_link_state *state)
+{
+ if (link_an_mode == MLO_AN_INBAND) {
+ int bmsr, bmcr;
+
+ /* Some PHYs like VSC8234 don't like it when AN restarts on
+ * their system side and they restart line side AN too, going
+ * into an endless link up/down loop. Don't restart PCS AN if
+ * link is up already.
+ * We do check that AN is enabled just in case this is the 1st
+ * call, PCS detects a carrier but AN is disabled from power on
+ * or by boot loader.
+ */
+ bmcr = phy_read(pcs, MII_BMCR);
+ if (bmcr < 0)
+ return;
+
+ bmsr = phy_read(pcs, MII_BMSR);
+ if (bmsr < 0)
+ return;
+
+ if ((bmcr & BMCR_ANENABLE) && (bmsr & BMSR_LSTATUS))
+ return;
+
+ /* SGMII spec requires tx_config_Reg[15:0] to be exactly 0x4001
+ * for the MAC PCS in order to acknowledge the AN.
+ */
+ phy_write(pcs, MII_ADVERTISE, ADVERTISE_SGMII |
+ ADVERTISE_LPACK);
+
+ phy_write(pcs, ENETC_PCS_IF_MODE,
+ ENETC_PCS_IF_MODE_SGMII_EN |
+ ENETC_PCS_IF_MODE_USE_SGMII_AN);
+
+ /* Adjust link timer for SGMII */
+ phy_write(pcs, ENETC_PCS_LINK_TIMER1,
+ ENETC_PCS_LINK_TIMER1_VAL);
+ phy_write(pcs, ENETC_PCS_LINK_TIMER2,
+ ENETC_PCS_LINK_TIMER2_VAL);
+
+ phy_write(pcs, MII_BMCR, BMCR_ANRESTART | BMCR_ANENABLE);
+ } else {
+ int speed;
+
+ if (state->duplex == DUPLEX_HALF) {
+ phydev_err(pcs, "Half duplex not supported\n");
+ return;
+ }
+ switch (state->speed) {
+ case SPEED_1000:
+ speed = ENETC_PCS_SPEED_1000;
+ break;
+ case SPEED_100:
+ speed = ENETC_PCS_SPEED_100;
+ break;
+ case SPEED_10:
+ speed = ENETC_PCS_SPEED_10;
+ break;
+ case SPEED_UNKNOWN:
+ /* Silently don't do anything */
+ return;
+ default:
+ phydev_err(pcs, "Invalid PCS speed %d\n", state->speed);
+ return;
+ }
+
+ phy_write(pcs, ENETC_PCS_IF_MODE,
+ ENETC_PCS_IF_MODE_SGMII_EN |
+ ENETC_PCS_IF_MODE_SGMII_SPEED(speed));
+
+ /* Yes, not a mistake: speed is given by IF_MODE. */
+ phy_write(pcs, MII_BMCR, BMCR_RESET |
+ BMCR_SPEED1000 |
+ BMCR_FULLDPLX);
+ }
+}
+
+/* 2500Base-X is SerDes protocol 7 on Felix and 6 on ENETC. It is a SerDes lane
+ * clocked at 3.125 GHz which encodes symbols with 8b/10b and does not have
+ * auto-negotiation of any link parameters. Electrically it is compatible with
+ * a single lane of XAUI.
+ * The hardware reference manual wants to call this mode SGMII, but it isn't
+ * really, since the fundamental features of SGMII:
+ * - Downgrading the link speed by duplicating symbols
+ * - Auto-negotiation
+ * are not there.
+ * The speed is configured at 1000 in the IF_MODE and BMCR MDIO registers
+ * because the clock frequency is actually given by a PLL configured in the
+ * Reset Configuration Word (RCW).
+ * Since there is no difference between fixed speed SGMII w/o AN and 802.3z w/o
+ * AN, we call this PHY interface type 2500Base-X. In case a PHY negotiates a
+ * lower link speed on line side, the system-side interface remains fixed at
+ * 2500 Mbps and we do rate adaptation through pause frames.
+ */
+static void vsc9959_pcs_init_2500basex(struct phy_device *pcs,
+ unsigned int link_an_mode,
+ const struct phylink_link_state *state)
+{
+ if (link_an_mode == MLO_AN_INBAND) {
+ phydev_err(pcs, "AN not supported on 3.125GHz SerDes lane\n");
+ return;
+ }
+
+ phy_write(pcs, ENETC_PCS_IF_MODE,
+ ENETC_PCS_IF_MODE_SGMII_EN |
+ ENETC_PCS_IF_MODE_SGMII_SPEED(ENETC_PCS_SPEED_2500));
+
+ phy_write(pcs, MII_BMCR, BMCR_SPEED1000 |
+ BMCR_FULLDPLX |
+ BMCR_RESET);
+}
+
+static void vsc9959_pcs_init_usxgmii(struct phy_device *pcs,
+ unsigned int link_an_mode,
+ const struct phylink_link_state *state)
+{
+ if (link_an_mode != MLO_AN_INBAND) {
+ phydev_err(pcs, "USXGMII only supports in-band AN for now\n");
+ return;
+ }
+
+ /* Configure device ability for the USXGMII Replicator */
+ phy_write_mmd(pcs, MDIO_MMD_VEND2, MII_ADVERTISE,
+ USXGMII_ADVERTISE_SPEED(USXGMII_SPEED_2500) |
+ USXGMII_ADVERTISE_LNKS(1) |
+ ADVERTISE_SGMII |
+ ADVERTISE_LPACK |
+ USXGMII_ADVERTISE_FDX);
+}
+
+static void vsc9959_pcs_init(struct ocelot *ocelot, int port,
+ unsigned int link_an_mode,
+ const struct phylink_link_state *state)
+{
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct phy_device *pcs = felix->pcs[port];
+
+ if (!pcs)
+ return;
+
+ /* The PCS does not implement the BMSR register fully, so capability
+ * detection via genphy_read_abilities does not work. Since we can get
+ * the PHY config word from the LPA register though, there is still
+ * value in using the generic phy_resolve_aneg_linkmode function. So
+ * populate the supported and advertising link modes manually here.
+ */
+ linkmode_set_bit_array(phy_basic_ports_array,
+ ARRAY_SIZE(phy_basic_ports_array),
+ pcs->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, pcs->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, pcs->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, pcs->supported);
+ if (pcs->interface == PHY_INTERFACE_MODE_2500BASEX ||
+ pcs->interface == PHY_INTERFACE_MODE_USXGMII)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
+ pcs->supported);
+ if (pcs->interface != PHY_INTERFACE_MODE_2500BASEX)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ pcs->supported);
+ phy_advertise_supported(pcs);
+
+ switch (pcs->interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ vsc9959_pcs_init_sgmii(pcs, link_an_mode, state);
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ vsc9959_pcs_init_2500basex(pcs, link_an_mode, state);
+ break;
+ case PHY_INTERFACE_MODE_USXGMII:
+ vsc9959_pcs_init_usxgmii(pcs, link_an_mode, state);
+ break;
+ default:
+ dev_err(ocelot->dev, "Unsupported link mode %s\n",
+ phy_modes(pcs->interface));
+ }
+}
+
+static void vsc9959_pcs_link_state_resolve(struct phy_device *pcs,
+ struct phylink_link_state *state)
+{
+ state->an_complete = pcs->autoneg_complete;
+ state->an_enabled = pcs->autoneg;
+ state->link = pcs->link;
+ state->duplex = pcs->duplex;
+ state->speed = pcs->speed;
+ /* SGMII AN does not negotiate flow control, but that's ok,
+ * since phylink already knows that, and does:
+ * link_state.pause |= pl->phy_state.pause;
+ */
+ state->pause = MLO_PAUSE_NONE;
+
+ phydev_dbg(pcs,
+ "mode=%s/%s/%s adv=%*pb lpa=%*pb link=%u an_enabled=%u an_complete=%u\n",
+ phy_modes(pcs->interface),
+ phy_speed_to_str(pcs->speed),
+ phy_duplex_to_str(pcs->duplex),
+ __ETHTOOL_LINK_MODE_MASK_NBITS, pcs->advertising,
+ __ETHTOOL_LINK_MODE_MASK_NBITS, pcs->lp_advertising,
+ pcs->link, pcs->autoneg, pcs->autoneg_complete);
+}
+
+static void vsc9959_pcs_link_state_sgmii(struct phy_device *pcs,
+ struct phylink_link_state *state)
+{
+ int err;
+
+ err = genphy_update_link(pcs);
+ if (err < 0)
+ return;
+
+ if (pcs->autoneg_complete) {
+ u16 lpa = phy_read(pcs, MII_LPA);
+
+ mii_lpa_to_linkmode_lpa_sgmii(pcs->lp_advertising, lpa);
+
+ phy_resolve_aneg_linkmode(pcs);
+ }
+}
+
+static void vsc9959_pcs_link_state_2500basex(struct phy_device *pcs,
+ struct phylink_link_state *state)
+{
+ int err;
+
+ err = genphy_update_link(pcs);
+ if (err < 0)
+ return;
+
+ pcs->speed = SPEED_2500;
+ pcs->asym_pause = true;
+ pcs->pause = true;
+}
+
+static void vsc9959_pcs_link_state_usxgmii(struct phy_device *pcs,
+ struct phylink_link_state *state)
+{
+ int status, lpa;
+
+ status = phy_read_mmd(pcs, MDIO_MMD_VEND2, MII_BMSR);
+ if (status < 0)
+ return;
+
+ pcs->autoneg = true;
+ pcs->autoneg_complete = USXGMII_BMSR_AN_CMPL(status);
+ pcs->link = USXGMII_BMSR_LNKS(status);
+
+ if (!pcs->link || !pcs->autoneg_complete)
+ return;
+
+ lpa = phy_read_mmd(pcs, MDIO_MMD_VEND2, MII_LPA);
+ if (lpa < 0)
+ return;
+
+ switch (USXGMII_LPA_SPEED(lpa)) {
+ case USXGMII_SPEED_10:
+ pcs->speed = SPEED_10;
+ break;
+ case USXGMII_SPEED_100:
+ pcs->speed = SPEED_100;
+ break;
+ case USXGMII_SPEED_1000:
+ pcs->speed = SPEED_1000;
+ break;
+ case USXGMII_SPEED_2500:
+ pcs->speed = SPEED_2500;
+ break;
+ default:
+ break;
+ }
+
+ if (USXGMII_LPA_DUPLEX(lpa))
+ pcs->duplex = DUPLEX_FULL;
+ else
+ pcs->duplex = DUPLEX_HALF;
+}
+
+static void vsc9959_pcs_link_state(struct ocelot *ocelot, int port,
+ struct phylink_link_state *state)
+{
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct phy_device *pcs = felix->pcs[port];
+
+ if (!pcs)
+ return;
+
+ pcs->speed = SPEED_UNKNOWN;
+ pcs->duplex = DUPLEX_UNKNOWN;
+ pcs->pause = 0;
+ pcs->asym_pause = 0;
+
+ switch (pcs->interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ vsc9959_pcs_link_state_sgmii(pcs, state);
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ vsc9959_pcs_link_state_2500basex(pcs, state);
+ break;
+ case PHY_INTERFACE_MODE_USXGMII:
+ vsc9959_pcs_link_state_usxgmii(pcs, state);
+ break;
+ default:
+ return;
+ }
+
+ vsc9959_pcs_link_state_resolve(pcs, state);
+}
+
+static int vsc9959_prevalidate_phy_mode(struct ocelot *ocelot, int port,
+ phy_interface_t phy_mode)
+{
+ switch (phy_mode) {
+ case PHY_INTERFACE_MODE_GMII:
+ /* Only supported on internal to-CPU ports */
+ if (port != 4 && port != 5)
+ return -ENOTSUPP;
+ return 0;
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ /* Not supported on internal to-CPU ports */
+ if (port == 4 || port == 5)
+ return -ENOTSUPP;
+ return 0;
+ default:
+ return -ENOTSUPP;
+ }
+}
+
static const struct ocelot_ops vsc9959_ops = {
.reset = vsc9959_reset,
};
+static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
+{
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct enetc_mdio_priv *mdio_priv;
+ struct device *dev = ocelot->dev;
+ resource_size_t imdio_base;
+ void __iomem *imdio_regs;
+ struct resource *res;
+ struct enetc_hw *hw;
+ struct mii_bus *bus;
+ int port;
+ int rc;
+
+ felix->pcs = devm_kcalloc(dev, felix->info->num_ports,
+ sizeof(struct phy_device *),
+ GFP_KERNEL);
+ if (!felix->pcs) {
+ dev_err(dev, "failed to allocate array for PCS PHYs\n");
+ return -ENOMEM;
+ }
+
+ imdio_base = pci_resource_start(felix->pdev,
+ felix->info->imdio_pci_bar);
+
+ res = felix->info->imdio_res;
+ res->flags = IORESOURCE_MEM;
+ res->start += imdio_base;
+ res->end += imdio_base;
+
+ imdio_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(imdio_regs)) {
+ dev_err(dev, "failed to map internal MDIO registers\n");
+ return PTR_ERR(imdio_regs);
+ }
+
+ hw = enetc_hw_alloc(dev, imdio_regs);
+ if (IS_ERR(hw)) {
+ dev_err(dev, "failed to allocate ENETC HW structure\n");
+ return PTR_ERR(hw);
+ }
+
+ bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "VSC9959 internal MDIO bus";
+ bus->read = enetc_mdio_read;
+ bus->write = enetc_mdio_write;
+ bus->parent = dev;
+ mdio_priv = bus->priv;
+ mdio_priv->hw = hw;
+ /* This gets added to imdio_regs, which already maps addresses
+ * starting with the proper offset.
+ */
+ mdio_priv->mdio_base = 0;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-imdio", dev_name(dev));
+
+ /* Needed in order to initialize the bus mutex lock */
+ rc = mdiobus_register(bus);
+ if (rc < 0) {
+ dev_err(dev, "failed to register MDIO bus\n");
+ return rc;
+ }
+
+ felix->imdio = bus;
+
+ for (port = 0; port < felix->info->num_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct phy_device *pcs;
+ bool is_c45 = false;
+
+ if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_USXGMII)
+ is_c45 = true;
+
+ pcs = get_phy_device(felix->imdio, port, is_c45);
+ if (IS_ERR(pcs))
+ continue;
+
+ pcs->interface = ocelot_port->phy_mode;
+ felix->pcs[port] = pcs;
+
+ dev_info(dev, "Found PCS at internal MDIO address %d\n", port);
+ }
+
+ return 0;
+}
+
+static void vsc9959_mdio_bus_free(struct ocelot *ocelot)
+{
+ struct felix *felix = ocelot_to_felix(ocelot);
+ int port;
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct phy_device *pcs = felix->pcs[port];
+
+ if (!pcs)
+ continue;
+
+ put_device(&pcs->mdio.dev);
+ }
+ mdiobus_unregister(felix->imdio);
+}
+
struct felix_info felix_info_vsc9959 = {
.target_io_res = vsc9959_target_io_res,
.port_io_res = vsc9959_port_io_res,
+ .imdio_res = &vsc9959_imdio_res,
.regfields = vsc9959_regfields,
.map = vsc9959_regmap,
.ops = &vsc9959_ops,
@@ -579,5 +1091,12 @@ struct felix_info felix_info_vsc9959 = {
.num_stats = ARRAY_SIZE(vsc9959_stats_layout),
.shared_queue_sz = 128 * 1024,
.num_ports = 6,
- .pci_bar = 4,
+ .switch_pci_bar = 4,
+ .imdio_pci_bar = 0,
+ .mdio_bus_alloc = vsc9959_mdio_bus_alloc,
+ .mdio_bus_free = vsc9959_mdio_bus_free,
+ .pcs_init = vsc9959_pcs_init,
+ .pcs_an_restart = vsc9959_pcs_an_restart,
+ .pcs_link_state = vsc9959_pcs_link_state,
+ .prevalidate_phy_mode = vsc9959_prevalidate_phy_mode,
};
diff --git a/drivers/net/dsa/qca/Kconfig b/drivers/net/dsa/qca/Kconfig
new file mode 100644
index 000000000000..e3c8d715a18f
--- /dev/null
+++ b/drivers/net/dsa/qca/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config NET_DSA_AR9331
+ tristate "Qualcomm Atheros AR9331 Ethernet switch support"
+ depends on NET_DSA
+ select NET_DSA_TAG_AR9331
+ select REGMAP
+ ---help---
+ This enables support for the Qualcomm Atheros AR9331 built-in Ethernet
+ switch.
diff --git a/drivers/net/dsa/qca/Makefile b/drivers/net/dsa/qca/Makefile
new file mode 100644
index 000000000000..274022319066
--- /dev/null
+++ b/drivers/net/dsa/qca/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_NET_DSA_AR9331) += ar9331.o
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
new file mode 100644
index 000000000000..de25f99e995a
--- /dev/null
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -0,0 +1,856 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2019 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+/*
+ * +----------------------+
+ * GMAC1----RGMII----|--MAC0 |
+ * \---MDIO1----|--REGs |----MDIO3----\
+ * | | | +------+
+ * | | +--| |
+ * | MAC1-|----RMII--M-----| PHY0 |-o P0
+ * | | | | +------+
+ * | | | +--| |
+ * | MAC2-|----RMII--------| PHY1 |-o P1
+ * | | | | +------+
+ * | | | +--| |
+ * | MAC3-|----RMII--------| PHY2 |-o P2
+ * | | | | +------+
+ * | | | +--| |
+ * | MAC4-|----RMII--------| PHY3 |-o P3
+ * | | | | +------+
+ * | | | +--| |
+ * | MAC5-|--+-RMII--M-----|-PHY4-|-o P4
+ * | | | | +------+
+ * +----------------------+ | \--CFG_SW_PHY_SWAP
+ * GMAC0---------------RMII--------------------/ \-CFG_SW_PHY_ADDR_SWAP
+ * \---MDIO0--NC
+ *
+ * GMAC0 and MAC5 are connected together and use same PHY. Depending on
+ * configuration it can be PHY4 (default) or PHY0. Only GMAC0 or MAC5 can be
+ * used at same time. If GMAC0 is used (default) then MAC5 should be disabled.
+ *
+ * CFG_SW_PHY_SWAP - swap connections of PHY0 and PHY4. If this bit is not set
+ * PHY4 is connected to GMAC0/MAC5 bundle and PHY0 is connected to MAC1. If this
+ * bit is set, PHY4 is connected to MAC1 and PHY0 is connected to GMAC0/MAC5
+ * bundle.
+ *
+ * CFG_SW_PHY_ADDR_SWAP - swap addresses of PHY0 and PHY4
+ *
+ * CFG_SW_PHY_SWAP and CFG_SW_PHY_ADDR_SWAP are part of SoC specific register
+ * set and not related to switch internal registers.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <net/dsa.h>
+
+#define AR9331_SW_NAME "ar9331_switch"
+#define AR9331_SW_PORTS 6
+
+/* dummy reg to change page */
+#define AR9331_SW_REG_PAGE 0x40000
+
+/* Global Interrupt */
+#define AR9331_SW_REG_GINT 0x10
+#define AR9331_SW_REG_GINT_MASK 0x14
+#define AR9331_SW_GINT_PHY_INT BIT(2)
+
+#define AR9331_SW_REG_FLOOD_MASK 0x2c
+#define AR9331_SW_FLOOD_MASK_BROAD_TO_CPU BIT(26)
+
+#define AR9331_SW_REG_GLOBAL_CTRL 0x30
+#define AR9331_SW_GLOBAL_CTRL_MFS_M GENMASK(13, 0)
+
+#define AR9331_SW_REG_MDIO_CTRL 0x98
+#define AR9331_SW_MDIO_CTRL_BUSY BIT(31)
+#define AR9331_SW_MDIO_CTRL_MASTER_EN BIT(30)
+#define AR9331_SW_MDIO_CTRL_CMD_READ BIT(27)
+#define AR9331_SW_MDIO_CTRL_PHY_ADDR_M GENMASK(25, 21)
+#define AR9331_SW_MDIO_CTRL_REG_ADDR_M GENMASK(20, 16)
+#define AR9331_SW_MDIO_CTRL_DATA_M GENMASK(16, 0)
+
+#define AR9331_SW_REG_PORT_STATUS(_port) (0x100 + (_port) * 0x100)
+
+/* FLOW_LINK_EN - enable mac flow control config auto-neg with phy.
+ * If not set, mac can be config by software.
+ */
+#define AR9331_SW_PORT_STATUS_FLOW_LINK_EN BIT(12)
+
+/* LINK_EN - If set, MAC is configured from PHY link status.
+ * If not set, MAC should be configured by software.
+ */
+#define AR9331_SW_PORT_STATUS_LINK_EN BIT(9)
+#define AR9331_SW_PORT_STATUS_DUPLEX_MODE BIT(6)
+#define AR9331_SW_PORT_STATUS_RX_FLOW_EN BIT(5)
+#define AR9331_SW_PORT_STATUS_TX_FLOW_EN BIT(4)
+#define AR9331_SW_PORT_STATUS_RXMAC BIT(3)
+#define AR9331_SW_PORT_STATUS_TXMAC BIT(2)
+#define AR9331_SW_PORT_STATUS_SPEED_M GENMASK(1, 0)
+#define AR9331_SW_PORT_STATUS_SPEED_1000 2
+#define AR9331_SW_PORT_STATUS_SPEED_100 1
+#define AR9331_SW_PORT_STATUS_SPEED_10 0
+
+#define AR9331_SW_PORT_STATUS_MAC_MASK \
+ (AR9331_SW_PORT_STATUS_TXMAC | AR9331_SW_PORT_STATUS_RXMAC)
+
+#define AR9331_SW_PORT_STATUS_LINK_MASK \
+ (AR9331_SW_PORT_STATUS_LINK_EN | AR9331_SW_PORT_STATUS_FLOW_LINK_EN | \
+ AR9331_SW_PORT_STATUS_DUPLEX_MODE | \
+ AR9331_SW_PORT_STATUS_RX_FLOW_EN | AR9331_SW_PORT_STATUS_TX_FLOW_EN | \
+ AR9331_SW_PORT_STATUS_SPEED_M)
+
+/* Phy bypass mode
+ * ------------------------------------------------------------------------
+ * Bit: | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |10 |11 |12 |13 |14 |15 |
+ *
+ * real | start | OP | PhyAddr | Reg Addr | TA |
+ * atheros| start | OP | 2'b00 |PhyAdd[2:0]| Reg Addr[4:0] | TA |
+ *
+ *
+ * Bit: |16 |17 |18 |19 |20 |21 |22 |23 |24 |25 |26 |27 |28 |29 |30 |31 |
+ * real | Data |
+ * atheros| Data |
+ *
+ * ------------------------------------------------------------------------
+ * Page address mode
+ * ------------------------------------------------------------------------
+ * Bit: | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |10 |11 |12 |13 |14 |15 |
+ * real | start | OP | PhyAddr | Reg Addr | TA |
+ * atheros| start | OP | 2'b11 | 8'b0 | TA |
+ *
+ * Bit: |16 |17 |18 |19 |20 |21 |22 |23 |24 |25 |26 |27 |28 |29 |30 |31 |
+ * real | Data |
+ * atheros| | Page [9:0] |
+ */
+/* In case of Page Address mode, Bit[18:9] of 32 bit register address should be
+ * written to bits[9:0] of mdio data register.
+ */
+#define AR9331_SW_ADDR_PAGE GENMASK(18, 9)
+
+/* ------------------------------------------------------------------------
+ * Normal register access mode
+ * ------------------------------------------------------------------------
+ * Bit: | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |10 |11 |12 |13 |14 |15 |
+ * real | start | OP | PhyAddr | Reg Addr | TA |
+ * atheros| start | OP | 2'b10 | low_addr[7:0] | TA |
+ *
+ * Bit: |16 |17 |18 |19 |20 |21 |22 |23 |24 |25 |26 |27 |28 |29 |30 |31 |
+ * real | Data |
+ * atheros| Data |
+ * ------------------------------------------------------------------------
+ */
+#define AR9331_SW_LOW_ADDR_PHY GENMASK(8, 6)
+#define AR9331_SW_LOW_ADDR_REG GENMASK(5, 1)
+
+#define AR9331_SW_MDIO_PHY_MODE_M GENMASK(4, 3)
+#define AR9331_SW_MDIO_PHY_MODE_PAGE 3
+#define AR9331_SW_MDIO_PHY_MODE_REG 2
+#define AR9331_SW_MDIO_PHY_MODE_BYPASS 0
+#define AR9331_SW_MDIO_PHY_ADDR_M GENMASK(2, 0)
+
+/* Empirical determined values */
+#define AR9331_SW_MDIO_POLL_SLEEP_US 1
+#define AR9331_SW_MDIO_POLL_TIMEOUT_US 20
+
+struct ar9331_sw_priv {
+ struct device *dev;
+ struct dsa_switch ds;
+ struct dsa_switch_ops ops;
+ struct irq_domain *irqdomain;
+ struct mii_bus *mbus; /* mdio master */
+ struct mii_bus *sbus; /* mdio slave */
+ struct regmap *regmap;
+ struct reset_control *sw_reset;
+};
+
+/* Warning: switch reset will reset last AR9331_SW_MDIO_PHY_MODE_PAGE request
+ * If some kind of optimization is used, the request should be repeated.
+ */
+static int ar9331_sw_reset(struct ar9331_sw_priv *priv)
+{
+ int ret;
+
+ ret = reset_control_assert(priv->sw_reset);
+ if (ret)
+ goto error;
+
+ /* AR9331 doc do not provide any information about proper reset
+ * sequence. The AR8136 (the closes switch to the AR9331) doc says:
+ * reset duration should be greater than 10ms. So, let's use this value
+ * for now.
+ */
+ usleep_range(10000, 15000);
+ ret = reset_control_deassert(priv->sw_reset);
+ if (ret)
+ goto error;
+ /* There is no information on how long should we wait after reset.
+ * AR8136 has an EEPROM and there is an Interrupt for EEPROM load
+ * status. AR9331 has no EEPROM support.
+ * For now, do not wait. In case AR8136 will be needed, the after
+ * reset delay can be added as well.
+ */
+
+ return 0;
+error:
+ dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
+ return ret;
+}
+
+static int ar9331_sw_mbus_write(struct mii_bus *mbus, int port, int regnum,
+ u16 data)
+{
+ struct ar9331_sw_priv *priv = mbus->priv;
+ struct regmap *regmap = priv->regmap;
+ u32 val;
+ int ret;
+
+ ret = regmap_write(regmap, AR9331_SW_REG_MDIO_CTRL,
+ AR9331_SW_MDIO_CTRL_BUSY |
+ AR9331_SW_MDIO_CTRL_MASTER_EN |
+ FIELD_PREP(AR9331_SW_MDIO_CTRL_PHY_ADDR_M, port) |
+ FIELD_PREP(AR9331_SW_MDIO_CTRL_REG_ADDR_M, regnum) |
+ FIELD_PREP(AR9331_SW_MDIO_CTRL_DATA_M, data));
+ if (ret)
+ goto error;
+
+ ret = regmap_read_poll_timeout(regmap, AR9331_SW_REG_MDIO_CTRL, val,
+ !(val & AR9331_SW_MDIO_CTRL_BUSY),
+ AR9331_SW_MDIO_POLL_SLEEP_US,
+ AR9331_SW_MDIO_POLL_TIMEOUT_US);
+ if (ret)
+ goto error;
+
+ return 0;
+error:
+ dev_err_ratelimited(priv->dev, "PHY write error: %i\n", ret);
+ return ret;
+}
+
+static int ar9331_sw_mbus_read(struct mii_bus *mbus, int port, int regnum)
+{
+ struct ar9331_sw_priv *priv = mbus->priv;
+ struct regmap *regmap = priv->regmap;
+ u32 val;
+ int ret;
+
+ ret = regmap_write(regmap, AR9331_SW_REG_MDIO_CTRL,
+ AR9331_SW_MDIO_CTRL_BUSY |
+ AR9331_SW_MDIO_CTRL_MASTER_EN |
+ AR9331_SW_MDIO_CTRL_CMD_READ |
+ FIELD_PREP(AR9331_SW_MDIO_CTRL_PHY_ADDR_M, port) |
+ FIELD_PREP(AR9331_SW_MDIO_CTRL_REG_ADDR_M, regnum));
+ if (ret)
+ goto error;
+
+ ret = regmap_read_poll_timeout(regmap, AR9331_SW_REG_MDIO_CTRL, val,
+ !(val & AR9331_SW_MDIO_CTRL_BUSY),
+ AR9331_SW_MDIO_POLL_SLEEP_US,
+ AR9331_SW_MDIO_POLL_TIMEOUT_US);
+ if (ret)
+ goto error;
+
+ ret = regmap_read(regmap, AR9331_SW_REG_MDIO_CTRL, &val);
+ if (ret)
+ goto error;
+
+ return FIELD_GET(AR9331_SW_MDIO_CTRL_DATA_M, val);
+
+error:
+ dev_err_ratelimited(priv->dev, "PHY read error: %i\n", ret);
+ return ret;
+}
+
+static int ar9331_sw_mbus_init(struct ar9331_sw_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct mii_bus *mbus;
+ struct device_node *np, *mnp;
+ int ret;
+
+ np = dev->of_node;
+
+ mbus = devm_mdiobus_alloc(dev);
+ if (!mbus)
+ return -ENOMEM;
+
+ mbus->name = np->full_name;
+ snprintf(mbus->id, MII_BUS_ID_SIZE, "%pOF", np);
+
+ mbus->read = ar9331_sw_mbus_read;
+ mbus->write = ar9331_sw_mbus_write;
+ mbus->priv = priv;
+ mbus->parent = dev;
+
+ mnp = of_get_child_by_name(np, "mdio");
+ if (!mnp)
+ return -ENODEV;
+
+ ret = of_mdiobus_register(mbus, mnp);
+ of_node_put(mnp);
+ if (ret)
+ return ret;
+
+ priv->mbus = mbus;
+
+ return 0;
+}
+
+static int ar9331_sw_setup(struct dsa_switch *ds)
+{
+ struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct regmap *regmap = priv->regmap;
+ int ret;
+
+ ret = ar9331_sw_reset(priv);
+ if (ret)
+ return ret;
+
+ /* Reset will set proper defaults. CPU - Port0 will be enabled and
+ * configured. All other ports (ports 1 - 5) are disabled
+ */
+ ret = ar9331_sw_mbus_init(priv);
+ if (ret)
+ return ret;
+
+ /* Do not drop broadcast frames */
+ ret = regmap_write_bits(regmap, AR9331_SW_REG_FLOOD_MASK,
+ AR9331_SW_FLOOD_MASK_BROAD_TO_CPU,
+ AR9331_SW_FLOOD_MASK_BROAD_TO_CPU);
+ if (ret)
+ goto error;
+
+ /* Set max frame size to the maximum supported value */
+ ret = regmap_write_bits(regmap, AR9331_SW_REG_GLOBAL_CTRL,
+ AR9331_SW_GLOBAL_CTRL_MFS_M,
+ AR9331_SW_GLOBAL_CTRL_MFS_M);
+ if (ret)
+ goto error;
+
+ return 0;
+error:
+ dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
+ return ret;
+}
+
+static void ar9331_sw_port_disable(struct dsa_switch *ds, int port)
+{
+ struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct regmap *regmap = priv->regmap;
+ int ret;
+
+ ret = regmap_write(regmap, AR9331_SW_REG_PORT_STATUS(port), 0);
+ if (ret)
+ dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
+}
+
+static enum dsa_tag_protocol ar9331_sw_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol m)
+{
+ return DSA_TAG_PROTO_AR9331;
+}
+
+static void ar9331_sw_phylink_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ switch (port) {
+ case 0:
+ if (state->interface != PHY_INTERFACE_MODE_GMII)
+ goto unsupported;
+
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseT_Half);
+ break;
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ if (state->interface != PHY_INTERFACE_MODE_INTERNAL)
+ goto unsupported;
+ break;
+ default:
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ dev_err(ds->dev, "Unsupported port: %i\n", port);
+ return;
+ }
+
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+
+ bitmap_and(supported, supported, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+
+ return;
+
+unsupported:
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ dev_err(ds->dev, "Unsupported interface: %d, port: %d\n",
+ state->interface, port);
+}
+
+static void ar9331_sw_phylink_mac_config(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct regmap *regmap = priv->regmap;
+ int ret;
+ u32 val;
+
+ switch (state->speed) {
+ case SPEED_1000:
+ val = AR9331_SW_PORT_STATUS_SPEED_1000;
+ break;
+ case SPEED_100:
+ val = AR9331_SW_PORT_STATUS_SPEED_100;
+ break;
+ case SPEED_10:
+ val = AR9331_SW_PORT_STATUS_SPEED_10;
+ break;
+ default:
+ return;
+ }
+
+ if (state->duplex)
+ val |= AR9331_SW_PORT_STATUS_DUPLEX_MODE;
+
+ if (state->pause & MLO_PAUSE_TX)
+ val |= AR9331_SW_PORT_STATUS_TX_FLOW_EN;
+
+ if (state->pause & MLO_PAUSE_RX)
+ val |= AR9331_SW_PORT_STATUS_RX_FLOW_EN;
+
+ ret = regmap_update_bits(regmap, AR9331_SW_REG_PORT_STATUS(port),
+ AR9331_SW_PORT_STATUS_LINK_MASK, val);
+ if (ret)
+ dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
+}
+
+static void ar9331_sw_phylink_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct regmap *regmap = priv->regmap;
+ int ret;
+
+ ret = regmap_update_bits(regmap, AR9331_SW_REG_PORT_STATUS(port),
+ AR9331_SW_PORT_STATUS_MAC_MASK, 0);
+ if (ret)
+ dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
+}
+
+static void ar9331_sw_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev)
+{
+ struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct regmap *regmap = priv->regmap;
+ int ret;
+
+ ret = regmap_update_bits(regmap, AR9331_SW_REG_PORT_STATUS(port),
+ AR9331_SW_PORT_STATUS_MAC_MASK,
+ AR9331_SW_PORT_STATUS_MAC_MASK);
+ if (ret)
+ dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
+}
+
+static const struct dsa_switch_ops ar9331_sw_ops = {
+ .get_tag_protocol = ar9331_sw_get_tag_protocol,
+ .setup = ar9331_sw_setup,
+ .port_disable = ar9331_sw_port_disable,
+ .phylink_validate = ar9331_sw_phylink_validate,
+ .phylink_mac_config = ar9331_sw_phylink_mac_config,
+ .phylink_mac_link_down = ar9331_sw_phylink_mac_link_down,
+ .phylink_mac_link_up = ar9331_sw_phylink_mac_link_up,
+};
+
+static irqreturn_t ar9331_sw_irq(int irq, void *data)
+{
+ struct ar9331_sw_priv *priv = data;
+ struct regmap *regmap = priv->regmap;
+ u32 stat;
+ int ret;
+
+ ret = regmap_read(regmap, AR9331_SW_REG_GINT, &stat);
+ if (ret) {
+ dev_err(priv->dev, "can't read interrupt status\n");
+ return IRQ_NONE;
+ }
+
+ if (!stat)
+ return IRQ_NONE;
+
+ if (stat & AR9331_SW_GINT_PHY_INT) {
+ int child_irq;
+
+ child_irq = irq_find_mapping(priv->irqdomain, 0);
+ handle_nested_irq(child_irq);
+ }
+
+ ret = regmap_write(regmap, AR9331_SW_REG_GINT, stat);
+ if (ret) {
+ dev_err(priv->dev, "can't write interrupt status\n");
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void ar9331_sw_mask_irq(struct irq_data *d)
+{
+ struct ar9331_sw_priv *priv = irq_data_get_irq_chip_data(d);
+ struct regmap *regmap = priv->regmap;
+ int ret;
+
+ ret = regmap_update_bits(regmap, AR9331_SW_REG_GINT_MASK,
+ AR9331_SW_GINT_PHY_INT, 0);
+ if (ret)
+ dev_err(priv->dev, "could not mask IRQ\n");
+}
+
+static void ar9331_sw_unmask_irq(struct irq_data *d)
+{
+ struct ar9331_sw_priv *priv = irq_data_get_irq_chip_data(d);
+ struct regmap *regmap = priv->regmap;
+ int ret;
+
+ ret = regmap_update_bits(regmap, AR9331_SW_REG_GINT_MASK,
+ AR9331_SW_GINT_PHY_INT,
+ AR9331_SW_GINT_PHY_INT);
+ if (ret)
+ dev_err(priv->dev, "could not unmask IRQ\n");
+}
+
+static struct irq_chip ar9331_sw_irq_chip = {
+ .name = AR9331_SW_NAME,
+ .irq_mask = ar9331_sw_mask_irq,
+ .irq_unmask = ar9331_sw_unmask_irq,
+};
+
+static int ar9331_sw_irq_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_chip_and_handler(irq, &ar9331_sw_irq_chip, handle_simple_irq);
+ irq_set_nested_thread(irq, 1);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static void ar9331_sw_irq_unmap(struct irq_domain *d, unsigned int irq)
+{
+ irq_set_nested_thread(irq, 0);
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
+}
+
+static const struct irq_domain_ops ar9331_sw_irqdomain_ops = {
+ .map = ar9331_sw_irq_map,
+ .unmap = ar9331_sw_irq_unmap,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int ar9331_sw_irq_init(struct ar9331_sw_priv *priv)
+{
+ struct device_node *np = priv->dev->of_node;
+ struct device *dev = priv->dev;
+ int ret, irq;
+
+ irq = of_irq_get(np, 0);
+ if (irq <= 0) {
+ dev_err(dev, "failed to get parent IRQ\n");
+ return irq ? irq : -EINVAL;
+ }
+
+ ret = devm_request_threaded_irq(dev, irq, NULL, ar9331_sw_irq,
+ IRQF_ONESHOT, AR9331_SW_NAME, priv);
+ if (ret) {
+ dev_err(dev, "unable to request irq: %d\n", ret);
+ return ret;
+ }
+
+ priv->irqdomain = irq_domain_add_linear(np, 1, &ar9331_sw_irqdomain_ops,
+ priv);
+ if (!priv->irqdomain) {
+ dev_err(dev, "failed to create IRQ domain\n");
+ return -EINVAL;
+ }
+
+ irq_set_parent(irq_create_mapping(priv->irqdomain, 0), irq);
+
+ return 0;
+}
+
+static int __ar9331_mdio_write(struct mii_bus *sbus, u8 mode, u16 reg, u16 val)
+{
+ u8 r, p;
+
+ p = FIELD_PREP(AR9331_SW_MDIO_PHY_MODE_M, mode) |
+ FIELD_GET(AR9331_SW_LOW_ADDR_PHY, reg);
+ r = FIELD_GET(AR9331_SW_LOW_ADDR_REG, reg);
+
+ return mdiobus_write(sbus, p, r, val);
+}
+
+static int __ar9331_mdio_read(struct mii_bus *sbus, u16 reg)
+{
+ u8 r, p;
+
+ p = FIELD_PREP(AR9331_SW_MDIO_PHY_MODE_M, AR9331_SW_MDIO_PHY_MODE_REG) |
+ FIELD_GET(AR9331_SW_LOW_ADDR_PHY, reg);
+ r = FIELD_GET(AR9331_SW_LOW_ADDR_REG, reg);
+
+ return mdiobus_read(sbus, p, r);
+}
+
+static int ar9331_mdio_read(void *ctx, const void *reg_buf, size_t reg_len,
+ void *val_buf, size_t val_len)
+{
+ struct ar9331_sw_priv *priv = ctx;
+ struct mii_bus *sbus = priv->sbus;
+ u32 reg = *(u32 *)reg_buf;
+ int ret;
+
+ if (reg == AR9331_SW_REG_PAGE) {
+ /* We cannot read the page selector register from hardware and
+ * we cache its value in regmap. Return all bits set here,
+ * that regmap will always write the page on first use.
+ */
+ *(u32 *)val_buf = GENMASK(9, 0);
+ return 0;
+ }
+
+ ret = __ar9331_mdio_read(sbus, reg);
+ if (ret < 0)
+ goto error;
+
+ *(u32 *)val_buf = ret;
+ ret = __ar9331_mdio_read(sbus, reg + 2);
+ if (ret < 0)
+ goto error;
+
+ *(u32 *)val_buf |= ret << 16;
+
+ return 0;
+error:
+ dev_err_ratelimited(&sbus->dev, "Bus error. Failed to read register.\n");
+ return ret;
+}
+
+static int ar9331_mdio_write(void *ctx, u32 reg, u32 val)
+{
+ struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ctx;
+ struct mii_bus *sbus = priv->sbus;
+ int ret;
+
+ if (reg == AR9331_SW_REG_PAGE) {
+ ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_PAGE,
+ 0, val);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+ }
+
+ ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg, val);
+ if (ret < 0)
+ goto error;
+
+ ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg + 2,
+ val >> 16);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+error:
+ dev_err_ratelimited(&sbus->dev, "Bus error. Failed to write register.\n");
+ return ret;
+}
+
+static int ar9331_sw_bus_write(void *context, const void *data, size_t count)
+{
+ u32 reg = *(u32 *)data;
+ u32 val = *((u32 *)data + 1);
+
+ return ar9331_mdio_write(context, reg, val);
+}
+
+static const struct regmap_range ar9331_valid_regs[] = {
+ regmap_reg_range(0x0, 0x0),
+ regmap_reg_range(0x10, 0x14),
+ regmap_reg_range(0x20, 0x24),
+ regmap_reg_range(0x2c, 0x30),
+ regmap_reg_range(0x40, 0x44),
+ regmap_reg_range(0x50, 0x78),
+ regmap_reg_range(0x80, 0x98),
+
+ regmap_reg_range(0x100, 0x120),
+ regmap_reg_range(0x200, 0x220),
+ regmap_reg_range(0x300, 0x320),
+ regmap_reg_range(0x400, 0x420),
+ regmap_reg_range(0x500, 0x520),
+ regmap_reg_range(0x600, 0x620),
+
+ regmap_reg_range(0x20000, 0x200a4),
+ regmap_reg_range(0x20100, 0x201a4),
+ regmap_reg_range(0x20200, 0x202a4),
+ regmap_reg_range(0x20300, 0x203a4),
+ regmap_reg_range(0x20400, 0x204a4),
+ regmap_reg_range(0x20500, 0x205a4),
+
+ /* dummy page selector reg */
+ regmap_reg_range(AR9331_SW_REG_PAGE, AR9331_SW_REG_PAGE),
+};
+
+static const struct regmap_range ar9331_nonvolatile_regs[] = {
+ regmap_reg_range(AR9331_SW_REG_PAGE, AR9331_SW_REG_PAGE),
+};
+
+static const struct regmap_range_cfg ar9331_regmap_range[] = {
+ {
+ .selector_reg = AR9331_SW_REG_PAGE,
+ .selector_mask = GENMASK(9, 0),
+ .selector_shift = 0,
+
+ .window_start = 0,
+ .window_len = 512,
+
+ .range_min = 0,
+ .range_max = AR9331_SW_REG_PAGE - 4,
+ },
+};
+
+static const struct regmap_access_table ar9331_register_set = {
+ .yes_ranges = ar9331_valid_regs,
+ .n_yes_ranges = ARRAY_SIZE(ar9331_valid_regs),
+};
+
+static const struct regmap_access_table ar9331_volatile_set = {
+ .no_ranges = ar9331_nonvolatile_regs,
+ .n_no_ranges = ARRAY_SIZE(ar9331_nonvolatile_regs),
+};
+
+static const struct regmap_config ar9331_mdio_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = AR9331_SW_REG_PAGE,
+
+ .ranges = ar9331_regmap_range,
+ .num_ranges = ARRAY_SIZE(ar9331_regmap_range),
+
+ .volatile_table = &ar9331_volatile_set,
+ .wr_table = &ar9331_register_set,
+ .rd_table = &ar9331_register_set,
+
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static struct regmap_bus ar9331_sw_bus = {
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .read = ar9331_mdio_read,
+ .write = ar9331_sw_bus_write,
+ .max_raw_read = 4,
+ .max_raw_write = 4,
+};
+
+static int ar9331_sw_probe(struct mdio_device *mdiodev)
+{
+ struct ar9331_sw_priv *priv;
+ struct dsa_switch *ds;
+ int ret;
+
+ priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regmap = devm_regmap_init(&mdiodev->dev, &ar9331_sw_bus, priv,
+ &ar9331_mdio_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ ret = PTR_ERR(priv->regmap);
+ dev_err(&mdiodev->dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ priv->sw_reset = devm_reset_control_get(&mdiodev->dev, "switch");
+ if (IS_ERR(priv->sw_reset)) {
+ dev_err(&mdiodev->dev, "missing switch reset\n");
+ return PTR_ERR(priv->sw_reset);
+ }
+
+ priv->sbus = mdiodev->bus;
+ priv->dev = &mdiodev->dev;
+
+ ret = ar9331_sw_irq_init(priv);
+ if (ret)
+ return ret;
+
+ ds = &priv->ds;
+ ds->dev = &mdiodev->dev;
+ ds->num_ports = AR9331_SW_PORTS;
+ ds->priv = priv;
+ priv->ops = ar9331_sw_ops;
+ ds->ops = &priv->ops;
+ dev_set_drvdata(&mdiodev->dev, priv);
+
+ ret = dsa_register_switch(ds);
+ if (ret)
+ goto err_remove_irq;
+
+ return 0;
+
+err_remove_irq:
+ irq_domain_remove(priv->irqdomain);
+
+ return ret;
+}
+
+static void ar9331_sw_remove(struct mdio_device *mdiodev)
+{
+ struct ar9331_sw_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ irq_domain_remove(priv->irqdomain);
+ mdiobus_unregister(priv->mbus);
+ dsa_unregister_switch(&priv->ds);
+
+ reset_control_assert(priv->sw_reset);
+}
+
+static const struct of_device_id ar9331_sw_of_match[] = {
+ { .compatible = "qca,ar9331-switch" },
+ { },
+};
+
+static struct mdio_driver ar9331_sw_mdio_driver = {
+ .probe = ar9331_sw_probe,
+ .remove = ar9331_sw_remove,
+ .mdiodrv.driver = {
+ .name = AR9331_SW_NAME,
+ .of_match_table = ar9331_sw_of_match,
+ },
+};
+
+mdio_module_driver(ar9331_sw_mdio_driver);
+
+MODULE_AUTHOR("Oleksij Rempel <kernel@pengutronix.de>");
+MODULE_DESCRIPTION("Driver for Atheros AR9331 switch");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index e548289df31e..9f4205b4439b 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -1017,7 +1017,8 @@ qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
}
static enum dsa_tag_protocol
-qca8k_get_tag_protocol(struct dsa_switch *ds, int port)
+qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol mp)
{
return DSA_TAG_PROTO_QCA;
}
diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c
index f5cc8b0a7c74..fd1977590cb4 100644
--- a/drivers/net/dsa/rtl8366rb.c
+++ b/drivers/net/dsa/rtl8366rb.c
@@ -964,7 +964,8 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
}
static enum dsa_tag_protocol rtl8366_get_tag_protocol(struct dsa_switch *ds,
- int port)
+ int port,
+ enum dsa_tag_protocol mp)
{
/* For now, the RTL switches are handled without any custom tags.
*
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index a51ac088c0bc..03ba6d25f7fe 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -426,14 +426,6 @@ static int sja1105_init_general_params(struct sja1105_private *priv)
.tpid2 = ETH_P_SJA1105,
};
struct sja1105_table *table;
- int i, k = 0;
-
- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
- if (dsa_is_dsa_port(priv->ds, i))
- default_general_params.casc_port = i;
- else if (dsa_is_user_port(priv->ds, i))
- priv->ports[i].mgmt_slot = k++;
- }
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
@@ -582,7 +574,7 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
struct device *dev = &priv->spidev->dev;
struct device_node *child;
- for_each_child_of_node(ports_node, child) {
+ for_each_available_child_of_node(ports_node, child) {
struct device_node *phy_node;
phy_interface_t phy_mode;
u32 index;
@@ -1542,7 +1534,8 @@ static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled)
}
static enum dsa_tag_protocol
-sja1105_get_tag_protocol(struct dsa_switch *ds, int port)
+sja1105_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol mp)
{
return DSA_TAG_PROTO_SJA1105;
}
@@ -1569,8 +1562,8 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
if (enabled) {
/* Enable VLAN filtering. */
- tpid = ETH_P_8021AD;
- tpid2 = ETH_P_8021Q;
+ tpid = ETH_P_8021Q;
+ tpid2 = ETH_P_8021AD;
} else {
/* Disable VLAN filtering. */
tpid = ETH_P_SJA1105;
@@ -1579,9 +1572,9 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
general_params = table->entries;
- /* EtherType used to identify outer tagged (S-tag) VLAN traffic */
- general_params->tpid = tpid;
/* EtherType used to identify inner tagged (C-tag) VLAN traffic */
+ general_params->tpid = tpid;
+ /* EtherType used to identify outer tagged (S-tag) VLAN traffic */
general_params->tpid2 = tpid2;
/* When VLAN filtering is on, we need to at least be able to
* decode management traffic through the "backup plan".
@@ -1740,6 +1733,16 @@ static int sja1105_setup(struct dsa_switch *ds)
static void sja1105_teardown(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
+ int port;
+
+ for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ struct sja1105_port *sp = &priv->ports[port];
+
+ if (!dsa_is_user_port(ds, port))
+ continue;
+
+ kthread_destroy_worker(sp->xmit_worker);
+ }
sja1105_tas_teardown(ds);
sja1105_ptp_clock_unregister(ds);
@@ -1761,6 +1764,18 @@ static int sja1105_port_enable(struct dsa_switch *ds, int port,
return 0;
}
+static void sja1105_port_disable(struct dsa_switch *ds, int port)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_port *sp = &priv->ports[port];
+
+ if (!dsa_is_user_port(ds, port))
+ return;
+
+ kthread_cancel_work_sync(&sp->xmit_work);
+ skb_queue_purge(&sp->xmit_queue);
+}
+
static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
struct sk_buff *skb, bool takets)
{
@@ -1819,47 +1834,36 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
return NETDEV_TX_OK;
}
+#define work_to_port(work) \
+ container_of((work), struct sja1105_port, xmit_work)
+#define tagger_to_sja1105(t) \
+ container_of((t), struct sja1105_private, tagger_data)
+
/* Deferred work is unfortunately necessary because setting up the management
* route cannot be done from atomit context (SPI transfer takes a sleepable
* lock on the bus)
*/
-static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port,
- struct sk_buff *skb)
+static void sja1105_port_deferred_xmit(struct kthread_work *work)
{
- struct sja1105_private *priv = ds->priv;
- struct sja1105_port *sp = &priv->ports[port];
- int slot = sp->mgmt_slot;
- struct sk_buff *clone;
-
- /* The tragic fact about the switch having 4x2 slots for installing
- * management routes is that all of them except one are actually
- * useless.
- * If 2 slots are simultaneously configured for two BPDUs sent to the
- * same (multicast) DMAC but on different egress ports, the switch
- * would confuse them and redirect first frame it receives on the CPU
- * port towards the port configured on the numerically first slot
- * (therefore wrong port), then second received frame on second slot
- * (also wrong port).
- * So for all practical purposes, there needs to be a lock that
- * prevents that from happening. The slot used here is utterly useless
- * (could have simply been 0 just as fine), but we are doing it
- * nonetheless, in case a smarter idea ever comes up in the future.
- */
- mutex_lock(&priv->mgmt_lock);
+ struct sja1105_port *sp = work_to_port(work);
+ struct sja1105_tagger_data *tagger_data = sp->data;
+ struct sja1105_private *priv = tagger_to_sja1105(tagger_data);
+ int port = sp - priv->ports;
+ struct sk_buff *skb;
- /* The clone, if there, was made by dsa_skb_tx_timestamp */
- clone = DSA_SKB_CB(skb)->clone;
+ while ((skb = skb_dequeue(&sp->xmit_queue)) != NULL) {
+ struct sk_buff *clone = DSA_SKB_CB(skb)->clone;
- sja1105_mgmt_xmit(ds, port, slot, skb, !!clone);
+ mutex_lock(&priv->mgmt_lock);
- if (!clone)
- goto out;
+ sja1105_mgmt_xmit(priv->ds, port, 0, skb, !!clone);
- sja1105_ptp_txtstamp_skb(ds, slot, clone);
+ /* The clone, if there, was made by dsa_skb_tx_timestamp */
+ if (clone)
+ sja1105_ptp_txtstamp_skb(priv->ds, port, clone);
-out:
- mutex_unlock(&priv->mgmt_lock);
- return NETDEV_TX_OK;
+ mutex_unlock(&priv->mgmt_lock);
+ }
}
/* The MAXAGE setting belongs to the L2 Forwarding Parameters table,
@@ -1990,6 +1994,7 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.get_sset_count = sja1105_get_sset_count,
.get_ts_info = sja1105_get_ts_info,
.port_enable = sja1105_port_enable,
+ .port_disable = sja1105_port_disable,
.port_fdb_dump = sja1105_fdb_dump,
.port_fdb_add = sja1105_fdb_add,
.port_fdb_del = sja1105_fdb_del,
@@ -2003,7 +2008,6 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.port_mdb_prepare = sja1105_mdb_prepare,
.port_mdb_add = sja1105_mdb_add,
.port_mdb_del = sja1105_mdb_del,
- .port_deferred_xmit = sja1105_port_deferred_xmit,
.port_hwtstamp_get = sja1105_hwtstamp_get,
.port_hwtstamp_set = sja1105_hwtstamp_set,
.port_rxtstamp = sja1105_port_rxtstamp,
@@ -2055,7 +2059,7 @@ static int sja1105_probe(struct spi_device *spi)
struct device *dev = &spi->dev;
struct sja1105_private *priv;
struct dsa_switch *ds;
- int rc, i;
+ int rc, port;
if (!dev->of_node) {
dev_err(dev, "No DTS bindings for SJA1105 driver\n");
@@ -2120,15 +2124,42 @@ static int sja1105_probe(struct spi_device *spi)
return rc;
/* Connections between dsa_port and sja1105_port */
- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
- struct sja1105_port *sp = &priv->ports[i];
+ for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ struct sja1105_port *sp = &priv->ports[port];
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct net_device *slave;
- dsa_to_port(ds, i)->priv = sp;
- sp->dp = dsa_to_port(ds, i);
+ if (!dsa_is_user_port(ds, port))
+ continue;
+
+ dp->priv = sp;
+ sp->dp = dp;
sp->data = tagger_data;
+ slave = dp->slave;
+ kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit);
+ sp->xmit_worker = kthread_create_worker(0, "%s_xmit",
+ slave->name);
+ if (IS_ERR(sp->xmit_worker)) {
+ rc = PTR_ERR(sp->xmit_worker);
+ dev_err(ds->dev,
+ "failed to create deferred xmit thread: %d\n",
+ rc);
+ goto out;
+ }
+ skb_queue_head_init(&sp->xmit_queue);
}
return 0;
+out:
+ while (port-- > 0) {
+ struct sja1105_port *sp = &priv->ports[port];
+
+ if (!dsa_is_user_port(ds, port))
+ continue;
+
+ kthread_destroy_worker(sp->xmit_worker);
+ }
+ return rc;
}
static int sja1105_remove(struct spi_device *spi)
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
index 54258a25031d..a836fc38c4a4 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.c
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -83,6 +83,7 @@ static int sja1105_init_avb_params(struct sja1105_private *priv,
static int sja1105_change_rxtstamping(struct sja1105_private *priv,
bool on)
{
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
struct sja1105_general_params_entry *general_params;
struct sja1105_table *table;
int rc;
@@ -101,6 +102,8 @@ static int sja1105_change_rxtstamping(struct sja1105_private *priv,
kfree_skb(priv->tagger_data.stampable_skb);
priv->tagger_data.stampable_skb = NULL;
}
+ ptp_cancel_worker_sync(ptp_data->clock);
+ skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
return sja1105_static_config_reload(priv, SJA1105_RX_HWTSTAMPING);
}
@@ -234,7 +237,7 @@ int sja1105_ptp_commit(struct dsa_switch *ds, struct sja1105_ptp_cmd *cmd,
if (rw == SPI_WRITE)
priv->info->ptp_cmd_packing(buf, cmd, PACK);
- rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->ptp_control, buf,
+ rc = sja1105_xfer_buf(priv, rw, regs->ptp_control, buf,
SJA1105_SIZE_PTP_CMD);
if (rw == SPI_READ)
@@ -367,22 +370,16 @@ static int sja1105_ptpclkval_write(struct sja1105_private *priv, u64 ticks,
ptp_sts);
}
-#define rxtstamp_to_tagger(d) \
- container_of((d), struct sja1105_tagger_data, rxtstamp_work)
-#define tagger_to_sja1105(d) \
- container_of((d), struct sja1105_private, tagger_data)
-
-static void sja1105_rxtstamp_work(struct work_struct *work)
+static long sja1105_rxtstamp_work(struct ptp_clock_info *ptp)
{
- struct sja1105_tagger_data *tagger_data = rxtstamp_to_tagger(work);
- struct sja1105_private *priv = tagger_to_sja1105(tagger_data);
- struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
struct dsa_switch *ds = priv->ds;
struct sk_buff *skb;
mutex_lock(&ptp_data->lock);
- while ((skb = skb_dequeue(&tagger_data->skb_rxtstamp_queue)) != NULL) {
+ while ((skb = skb_dequeue(&ptp_data->skb_rxtstamp_queue)) != NULL) {
struct skb_shared_hwtstamps *shwt = skb_hwtstamps(skb);
u64 ticks, ts;
int rc;
@@ -404,6 +401,9 @@ static void sja1105_rxtstamp_work(struct work_struct *work)
}
mutex_unlock(&ptp_data->lock);
+
+ /* Don't restart */
+ return -1;
}
/* Called from dsa_skb_defer_rx_timestamp */
@@ -411,16 +411,16 @@ bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
struct sk_buff *skb, unsigned int type)
{
struct sja1105_private *priv = ds->priv;
- struct sja1105_tagger_data *tagger_data = &priv->tagger_data;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
- if (!test_bit(SJA1105_HWTS_RX_EN, &tagger_data->state))
+ if (!test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state))
return false;
/* We need to read the full PTP clock to reconstruct the Rx
* timestamp. For that we need a sleepable context.
*/
- skb_queue_tail(&tagger_data->skb_rxtstamp_queue, skb);
- schedule_work(&tagger_data->rxtstamp_work);
+ skb_queue_tail(&ptp_data->skb_rxtstamp_queue, skb);
+ ptp_schedule_worker(ptp_data->clock, 0);
return true;
}
@@ -628,11 +628,11 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds)
.adjtime = sja1105_ptp_adjtime,
.gettimex64 = sja1105_ptp_gettimex,
.settime64 = sja1105_ptp_settime,
+ .do_aux_work = sja1105_rxtstamp_work,
.max_adj = SJA1105_MAX_ADJ_PPB,
};
- skb_queue_head_init(&tagger_data->skb_rxtstamp_queue);
- INIT_WORK(&tagger_data->rxtstamp_work, sja1105_rxtstamp_work);
+ skb_queue_head_init(&ptp_data->skb_rxtstamp_queue);
spin_lock_init(&tagger_data->meta_lock);
ptp_data->clock = ptp_clock_register(&ptp_data->caps, ds->dev);
@@ -653,13 +653,13 @@ void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
if (IS_ERR_OR_NULL(ptp_data->clock))
return;
- cancel_work_sync(&priv->tagger_data.rxtstamp_work);
- skb_queue_purge(&priv->tagger_data.skb_rxtstamp_queue);
+ ptp_cancel_worker_sync(ptp_data->clock);
+ skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
ptp_clock_unregister(ptp_data->clock);
ptp_data->clock = NULL;
}
-void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot,
+void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int port,
struct sk_buff *skb)
{
struct sja1105_private *priv = ds->priv;
@@ -679,7 +679,7 @@ void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot,
goto out;
}
- rc = sja1105_ptpegr_ts_poll(ds, slot, &ts);
+ rc = sja1105_ptpegr_ts_poll(ds, port, &ts);
if (rc < 0) {
dev_err(ds->dev, "timed out polling for tstamp\n");
kfree_skb(skb);
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h
index 470f44b76318..6f4a19eec709 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.h
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.h
@@ -30,6 +30,7 @@ struct sja1105_ptp_cmd {
};
struct sja1105_ptp_data {
+ struct sk_buff_head skb_rxtstamp_queue;
struct ptp_clock_info caps;
struct ptp_clock *clock;
struct sja1105_ptp_cmd cmd;
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c
index 0d03e13e9909..63d2311817c4 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.c
@@ -142,6 +142,9 @@ static size_t sja1105et_general_params_entry_packing(void *buf, void *entry_ptr,
return size;
}
+/* TPID and TPID2 are intentionally reversed so that semantic
+ * compatibility with E/T is kept.
+ */
static size_t
sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
@@ -166,9 +169,9 @@ sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr,
sja1105_packing(buf, &entry->mirr_port, 141, 139, size, op);
sja1105_packing(buf, &entry->vlmarker, 138, 107, size, op);
sja1105_packing(buf, &entry->vlmask, 106, 75, size, op);
- sja1105_packing(buf, &entry->tpid, 74, 59, size, op);
+ sja1105_packing(buf, &entry->tpid2, 74, 59, size, op);
sja1105_packing(buf, &entry->ignore2stf, 58, 58, size, op);
- sja1105_packing(buf, &entry->tpid2, 57, 42, size, op);
+ sja1105_packing(buf, &entry->tpid, 57, 42, size, op);
sja1105_packing(buf, &entry->queue_ts, 41, 41, size, op);
sja1105_packing(buf, &entry->egrmirrvid, 40, 29, size, op);
sja1105_packing(buf, &entry->egrmirrpcp, 28, 26, size, op);
diff --git a/drivers/net/dsa/sja1105/sja1105_tas.c b/drivers/net/dsa/sja1105/sja1105_tas.c
index 26b925b5dace..fa6750d973d7 100644
--- a/drivers/net/dsa/sja1105/sja1105_tas.c
+++ b/drivers/net/dsa/sja1105/sja1105_tas.c
@@ -477,11 +477,6 @@ int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
if (admin->cycle_time_extension)
return -ENOTSUPP;
- if (!ns_to_sja1105_delta(admin->base_time)) {
- dev_err(ds->dev, "A base time of zero is not hardware-allowed\n");
- return -ERANGE;
- }
-
for (i = 0; i < admin->num_entries; i++) {
s64 delta_ns = admin->entries[i].interval;
s64 delta_cycles = ns_to_sja1105_delta(delta_ns);
diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
index 42c1574d45f2..6e21a2a5cf01 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -542,7 +542,8 @@ static int vsc73xx_phy_write(struct dsa_switch *ds, int phy, int regnum,
}
static enum dsa_tag_protocol vsc73xx_get_tag_protocol(struct dsa_switch *ds,
- int port)
+ int port,
+ enum dsa_tag_protocol mp)
{
/* The switch internally uses a 8 byte header with length,
* source port, tag, LPA and priority. This is supposedly
@@ -1111,7 +1112,9 @@ static int vsc73xx_gpio_probe(struct vsc73xx *vsc)
vsc->gc.ngpio = 4;
vsc->gc.owner = THIS_MODULE;
vsc->gc.parent = vsc->dev;
+#if IS_ENABLED(CONFIG_OF_GPIO)
vsc->gc.of_node = vsc->dev->of_node;
+#endif
vsc->gc.base = -1;
vsc->gc.get = vsc73xx_gpio_get;
vsc->gc.set = vsc73xx_gpio_set;
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 3da97996bdf3..8cafd06ff0c4 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -196,7 +196,7 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev);
static int el3_rx(struct net_device *dev);
static int el3_close(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
-static void el3_tx_timeout (struct net_device *dev);
+static void el3_tx_timeout (struct net_device *dev, unsigned int txqueue);
static void el3_down(struct net_device *dev);
static void el3_up(struct net_device *dev);
static const struct ethtool_ops ethtool_ops;
@@ -689,7 +689,7 @@ el3_open(struct net_device *dev)
}
static void
-el3_tx_timeout (struct net_device *dev)
+el3_tx_timeout (struct net_device *dev, unsigned int txqueue)
{
int ioaddr = dev->base_addr;
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index b15752267c8d..1e233e2f0a5a 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -371,7 +371,7 @@ static void corkscrew_timer(struct timer_list *t);
static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static int corkscrew_rx(struct net_device *dev);
-static void corkscrew_timeout(struct net_device *dev);
+static void corkscrew_timeout(struct net_device *dev, unsigned int txqueue);
static int boomerang_rx(struct net_device *dev);
static irqreturn_t corkscrew_interrupt(int irq, void *dev_id);
static int corkscrew_close(struct net_device *dev);
@@ -961,7 +961,7 @@ static void corkscrew_timer(struct timer_list *t)
#endif /* AUTOMEDIA */
}
-static void corkscrew_timeout(struct net_device *dev)
+static void corkscrew_timeout(struct net_device *dev, unsigned int txqueue)
{
int i;
struct corkscrew_private *vp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c
index 3044a6f35f04..ef1c3151fbb2 100644
--- a/drivers/net/ethernet/3com/3c574_cs.c
+++ b/drivers/net/ethernet/3com/3c574_cs.c
@@ -234,7 +234,7 @@ static void update_stats(struct net_device *dev);
static struct net_device_stats *el3_get_stats(struct net_device *dev);
static int el3_rx(struct net_device *dev, int worklimit);
static int el3_close(struct net_device *dev);
-static void el3_tx_timeout(struct net_device *dev);
+static void el3_tx_timeout(struct net_device *dev, unsigned int txqueue);
static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static void set_rx_mode(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
@@ -690,7 +690,7 @@ static int el3_open(struct net_device *dev)
return 0;
}
-static void el3_tx_timeout(struct net_device *dev)
+static void el3_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
unsigned int ioaddr = dev->base_addr;
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 2b2695311bda..d47cde6c5f08 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -173,7 +173,7 @@ static void update_stats(struct net_device *dev);
static struct net_device_stats *el3_get_stats(struct net_device *dev);
static int el3_rx(struct net_device *dev);
static int el3_close(struct net_device *dev);
-static void el3_tx_timeout(struct net_device *dev);
+static void el3_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void set_rx_mode(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
static const struct ethtool_ops netdev_ethtool_ops;
@@ -526,7 +526,7 @@ static int el3_open(struct net_device *dev)
return 0;
}
-static void el3_tx_timeout(struct net_device *dev)
+static void el3_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
unsigned int ioaddr = dev->base_addr;
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 8785c2ff3825..a2b7f7ab8170 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -776,7 +776,7 @@ static void set_rx_mode(struct net_device *dev);
#ifdef CONFIG_PCI
static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
#endif
-static void vortex_tx_timeout(struct net_device *dev);
+static void vortex_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void acpi_set_WOL(struct net_device *dev);
static const struct ethtool_ops vortex_ethtool_ops;
static void set_8021q_mode(struct net_device *dev, int enable);
@@ -1548,7 +1548,7 @@ vortex_up(struct net_device *dev)
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
unsigned int config;
- int i, mii_reg1, mii_reg5, err = 0;
+ int i, mii_reg5, err = 0;
if (VORTEX_PCI(vp)) {
pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */
@@ -1605,7 +1605,7 @@ vortex_up(struct net_device *dev)
window_write32(vp, config, 3, Wn3_Config);
if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
- mii_reg1 = mdio_read(dev, vp->phys[0], MII_BMSR);
+ mdio_read(dev, vp->phys[0], MII_BMSR);
mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA);
vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0);
vp->mii.full_duplex = vp->full_duplex;
@@ -1877,7 +1877,7 @@ leave_media_alone:
iowrite16(FakeIntr, ioaddr + EL3_CMD);
}
-static void vortex_tx_timeout(struct net_device *dev)
+static void vortex_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index be823c186517..14fce6658106 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -2013,7 +2013,7 @@ typhoon_stop_runtime(struct typhoon *tp, int wait_type)
}
static void
-typhoon_tx_timeout(struct net_device *dev)
+typhoon_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct typhoon *tp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/8390/8390.c b/drivers/net/ethernet/8390/8390.c
index 78f3e532c600..0e0aa4016858 100644
--- a/drivers/net/ethernet/8390/8390.c
+++ b/drivers/net/ethernet/8390/8390.c
@@ -36,9 +36,9 @@ void ei_set_multicast_list(struct net_device *dev)
}
EXPORT_SYMBOL(ei_set_multicast_list);
-void ei_tx_timeout(struct net_device *dev)
+void ei_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
- __ei_tx_timeout(dev);
+ __ei_tx_timeout(dev, txqueue);
}
EXPORT_SYMBOL(ei_tx_timeout);
diff --git a/drivers/net/ethernet/8390/8390.h b/drivers/net/ethernet/8390/8390.h
index 3e2f2c2e7b58..529c728f334a 100644
--- a/drivers/net/ethernet/8390/8390.h
+++ b/drivers/net/ethernet/8390/8390.h
@@ -32,7 +32,7 @@ void NS8390_init(struct net_device *dev, int startp);
int ei_open(struct net_device *dev);
int ei_close(struct net_device *dev);
irqreturn_t ei_interrupt(int irq, void *dev_id);
-void ei_tx_timeout(struct net_device *dev);
+void ei_tx_timeout(struct net_device *dev, unsigned int txqueue);
netdev_tx_t ei_start_xmit(struct sk_buff *skb, struct net_device *dev);
void ei_set_multicast_list(struct net_device *dev);
struct net_device_stats *ei_get_stats(struct net_device *dev);
@@ -50,7 +50,7 @@ void NS8390p_init(struct net_device *dev, int startp);
int eip_open(struct net_device *dev);
int eip_close(struct net_device *dev);
irqreturn_t eip_interrupt(int irq, void *dev_id);
-void eip_tx_timeout(struct net_device *dev);
+void eip_tx_timeout(struct net_device *dev, unsigned int txqueue);
netdev_tx_t eip_start_xmit(struct sk_buff *skb, struct net_device *dev);
void eip_set_multicast_list(struct net_device *dev);
struct net_device_stats *eip_get_stats(struct net_device *dev);
diff --git a/drivers/net/ethernet/8390/8390p.c b/drivers/net/ethernet/8390/8390p.c
index 6cf36992a2c6..6834742057b3 100644
--- a/drivers/net/ethernet/8390/8390p.c
+++ b/drivers/net/ethernet/8390/8390p.c
@@ -41,9 +41,9 @@ void eip_set_multicast_list(struct net_device *dev)
}
EXPORT_SYMBOL(eip_set_multicast_list);
-void eip_tx_timeout(struct net_device *dev)
+void eip_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
- __ei_tx_timeout(dev);
+ __ei_tx_timeout(dev, txqueue);
}
EXPORT_SYMBOL(eip_tx_timeout);
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index 0b6bbf63f7ca..aeae7966a082 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -83,7 +83,7 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static struct net_device_stats *get_stats(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
-static void axnet_tx_timeout(struct net_device *dev);
+static void axnet_tx_timeout(struct net_device *dev, unsigned int txqueue);
static irqreturn_t ei_irq_wrapper(int irq, void *dev_id);
static void ei_watchdog(struct timer_list *t);
static void axnet_reset_8390(struct net_device *dev);
@@ -903,7 +903,7 @@ static int ax_close(struct net_device *dev)
* completed (or failed) - i.e. never posted a Tx related interrupt.
*/
-static void axnet_tx_timeout(struct net_device *dev)
+static void axnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
long e8390_base = dev->base_addr;
struct ei_device *ei_local = netdev_priv(dev);
diff --git a/drivers/net/ethernet/8390/lib8390.c b/drivers/net/ethernet/8390/lib8390.c
index c9c55c9eab9f..babc92e2692e 100644
--- a/drivers/net/ethernet/8390/lib8390.c
+++ b/drivers/net/ethernet/8390/lib8390.c
@@ -251,7 +251,7 @@ static int __ei_close(struct net_device *dev)
* completed (or failed) - i.e. never posted a Tx related interrupt.
*/
-static void __ei_tx_timeout(struct net_device *dev)
+static void __ei_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
unsigned long e8390_base = dev->base_addr;
struct ei_device *ei_local = netdev_priv(dev);
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 816540e6beac..165d18405b0c 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -576,7 +576,7 @@ static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
static int netdev_open(struct net_device *dev);
static void check_duplex(struct net_device *dev);
-static void tx_timeout(struct net_device *dev);
+static void tx_timeout(struct net_device *dev, unsigned int txqueue);
static void init_ring(struct net_device *dev);
static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t intr_handler(int irq, void *dev_instance);
@@ -1105,7 +1105,7 @@ static void check_duplex(struct net_device *dev)
}
-static void tx_timeout(struct net_device *dev)
+static void tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->base;
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 174344c450af..cb6a761d5c11 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -3651,15 +3651,6 @@ static int et131x_close(struct net_device *netdev)
return del_timer_sync(&adapter->error_timer);
}
-static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf,
- int cmd)
-{
- if (!netdev->phydev)
- return -EINVAL;
-
- return phy_mii_ioctl(netdev->phydev, reqbuf, cmd);
-}
-
/* et131x_set_packet_filter - Configures the Rx Packet filtering */
static int et131x_set_packet_filter(struct et131x_adapter *adapter)
{
@@ -3811,7 +3802,7 @@ drop_err:
* specified by the 'tx_timeo" element in the net_device structure (see
* et131x_alloc_device() to see how this value is set).
*/
-static void et131x_tx_timeout(struct net_device *netdev)
+static void et131x_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
struct tx_ring *tx_ring = &adapter->tx_ring;
@@ -3899,7 +3890,7 @@ static const struct net_device_ops et131x_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_get_stats = et131x_stats,
- .ndo_do_ioctl = et131x_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl,
};
static int et131x_pci_setup(struct pci_dev *pdev,
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index 80ef3e15bd22..9daef4c8feef 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -1791,7 +1791,7 @@ static int slic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
sdev->is_fiber = slic_is_fiber(pdev->subsystem_device);
sdev->pdev = pdev;
sdev->netdev = dev;
- sdev->regs = ioremap_nocache(pci_resource_start(pdev, 0),
+ sdev->regs = ioremap(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
if (!sdev->regs) {
dev_err(&pdev->dev, "failed to map registers\n");
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 0537df06a9b5..22cadfbeedfb 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -207,19 +207,6 @@ static void emac_inblk_32bit(void __iomem *reg, void *data, int count)
readsl(reg, data, round_up(count, 4) / 4);
}
-static int emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct phy_device *phydev = dev->phydev;
-
- if (!netif_running(dev))
- return -EINVAL;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_mii_ioctl(phydev, rq, cmd);
-}
-
/* ethtool ops */
static void emac_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
@@ -407,7 +394,7 @@ static void emac_init_device(struct net_device *dev)
}
/* Our watchdog timed out. Called by the networking layer */
-static void emac_timeout(struct net_device *dev)
+static void emac_timeout(struct net_device *dev, unsigned int txqueue)
{
struct emac_board_info *db = netdev_priv(dev);
unsigned long flags;
@@ -791,7 +778,7 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_start_xmit = emac_start_xmit,
.ndo_tx_timeout = emac_timeout,
.ndo_set_rx_mode = emac_set_rx_mode,
- .ndo_do_ioctl = emac_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = emac_set_mac_address,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 46b4207d3266..f366faf88eee 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -437,7 +437,7 @@ static const struct ethtool_ops ace_ethtool_ops = {
.set_link_ksettings = ace_set_link_ksettings,
};
-static void ace_watchdog(struct net_device *dev);
+static void ace_watchdog(struct net_device *dev, unsigned int txqueue);
static const struct net_device_ops ace_netdev_ops = {
.ndo_open = ace_open,
@@ -1542,7 +1542,7 @@ static void ace_set_rxtx_parms(struct net_device *dev, int jumbo)
}
-static void ace_watchdog(struct net_device *data)
+static void ace_watchdog(struct net_device *data, unsigned int txqueue)
{
struct net_device *dev = data;
struct ace_private *ap = netdev_priv(dev);
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 4cd53fc338b5..1671c1f36691 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -1332,10 +1332,10 @@ static int request_and_map(struct platform_device *pdev, const char *name,
return -EBUSY;
}
- *ptr = devm_ioremap_nocache(device, region->start,
+ *ptr = devm_ioremap(device, region->start,
resource_size(region));
if (*ptr == NULL) {
- dev_err(device, "ioremap_nocache of %s failed!", name);
+ dev_err(device, "ioremap of %s failed!", name);
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index ea62604fdf8c..1fb58f9ad80b 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -200,6 +200,11 @@ static void comp_ctxt_release(struct ena_com_admin_queue *queue,
static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
u16 command_id, bool capture)
{
+ if (unlikely(!queue->comp_ctx)) {
+ pr_err("Completion context is NULL\n");
+ return NULL;
+ }
+
if (unlikely(command_id >= queue->q_depth)) {
pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
command_id, queue->q_depth);
@@ -1041,9 +1046,41 @@ static int ena_com_get_feature(struct ena_com_dev *ena_dev,
feature_ver);
}
+int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
+{
+ return ena_dev->rss.hash_func;
+}
+
+static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
+{
+ struct ena_admin_feature_rss_flow_hash_control *hash_key =
+ (ena_dev->rss).hash_key;
+
+ netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key));
+ /* The key is stored in the device in u32 array
+ * as well as the API requires the key to be passed in this
+ * format. Thus the size of our array should be divided by 4
+ */
+ hash_key->keys_num = sizeof(hash_key->key) / sizeof(u32);
+}
+
static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
{
struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_feature_rss_flow_hash_control *hash_key;
+ struct ena_admin_get_feat_resp get_resp;
+ int rc;
+
+ hash_key = (ena_dev->rss).hash_key;
+
+ rc = ena_com_get_feature_ex(ena_dev, &get_resp,
+ ENA_ADMIN_RSS_HASH_FUNCTION,
+ ena_dev->rss.hash_key_dma_addr,
+ sizeof(ena_dev->rss.hash_key), 0);
+ if (unlikely(rc)) {
+ hash_key = NULL;
+ return -EOPNOTSUPP;
+ }
rss->hash_key =
dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
@@ -1254,30 +1291,6 @@ static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
return 0;
}
-static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
-{
- u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
- struct ena_rss *rss = &ena_dev->rss;
- u8 idx;
- u16 i;
-
- for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
- dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
-
- for (i = 0; i < 1 << rss->tbl_log_size; i++) {
- if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
- return -EINVAL;
- idx = (u8)rss->rss_ind_tbl[i].cq_idx;
-
- if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
- return -EINVAL;
-
- rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
- }
-
- return 0;
-}
-
static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
u16 intr_delay_resolution)
{
@@ -2297,15 +2310,16 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
switch (func) {
case ENA_ADMIN_TOEPLITZ:
- if (key_len > sizeof(hash_key->key)) {
- pr_err("key len (%hu) is bigger than the max supported (%zu)\n",
- key_len, sizeof(hash_key->key));
- return -EINVAL;
+ if (key) {
+ if (key_len != sizeof(hash_key->key)) {
+ pr_err("key len (%hu) doesn't equal the supported size (%zu)\n",
+ key_len, sizeof(hash_key->key));
+ return -EINVAL;
+ }
+ memcpy(hash_key->key, key, key_len);
+ rss->hash_init_val = init_val;
+ hash_key->keys_num = key_len >> 2;
}
-
- memcpy(hash_key->key, key, key_len);
- rss->hash_init_val = init_val;
- hash_key->keys_num = key_len >> 2;
break;
case ENA_ADMIN_CRC32:
rss->hash_init_val = init_val;
@@ -2342,7 +2356,11 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
if (unlikely(rc))
return rc;
- rss->hash_func = get_resp.u.flow_hash_func.selected_func;
+ /* ffs() returns 1 in case the lsb is set */
+ rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func);
+ if (rss->hash_func)
+ rss->hash_func--;
+
if (func)
*func = rss->hash_func;
@@ -2606,10 +2624,6 @@ int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
if (!ind_tbl)
return 0;
- rc = ena_com_ind_tbl_convert_from_device(ena_dev);
- if (unlikely(rc))
- return rc;
-
for (i = 0; i < (1 << rss->tbl_log_size); i++)
ind_tbl[i] = rss->host_rss_ind_tbl[i];
@@ -2626,9 +2640,15 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
if (unlikely(rc))
goto err_indr_tbl;
+ /* The following function might return unsupported in case the
+ * device doesn't support setting the key / hash function. We can safely
+ * ignore this error and have indirection table support only.
+ */
rc = ena_com_hash_key_allocate(ena_dev);
- if (unlikely(rc))
+ if (unlikely(rc) && rc != -EOPNOTSUPP)
goto err_hash_key;
+ else if (rc != -EOPNOTSUPP)
+ ena_com_hash_key_fill_default_key(ena_dev);
rc = ena_com_hash_ctrl_init(ena_dev);
if (unlikely(rc))
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 7c941eba0bc9..469f298199a7 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -44,6 +44,7 @@
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/wait.h>
+#include <linux/netdevice.h>
#include "ena_common_defs.h"
#include "ena_admin_defs.h"
@@ -72,7 +73,7 @@
/*****************************************************************************/
/* ENA adaptive interrupt moderation settings */
-#define ENA_INTR_INITIAL_TX_INTERVAL_USECS 196
+#define ENA_INTR_INITIAL_TX_INTERVAL_USECS 64
#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 0
#define ENA_DEFAULT_INTR_DELAY_RESOLUTION 1
@@ -655,6 +656,14 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size);
*/
void ena_com_rss_destroy(struct ena_com_dev *ena_dev);
+/* ena_com_get_current_hash_function - Get RSS hash function
+ * @ena_dev: ENA communication layer struct
+ *
+ * Return the current hash function.
+ * @return: 0 or one of the ena_admin_hash_functions values.
+ */
+int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev);
+
/* ena_com_fill_hash_function - Fill RSS hash function
* @ena_dev: ENA communication layer struct
* @func: The hash function (Toeplitz or crc)
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index a3250dcf7d53..ced1d577b62a 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -315,10 +315,9 @@ static int ena_get_coalesce(struct net_device *net_dev,
ena_com_get_nonadaptive_moderation_interval_tx(ena_dev) *
ena_dev->intr_delay_resolution;
- if (!ena_com_get_adaptive_moderation_enabled(ena_dev))
- coalesce->rx_coalesce_usecs =
- ena_com_get_nonadaptive_moderation_interval_rx(ena_dev)
- * ena_dev->intr_delay_resolution;
+ coalesce->rx_coalesce_usecs =
+ ena_com_get_nonadaptive_moderation_interval_rx(ena_dev)
+ * ena_dev->intr_delay_resolution;
coalesce->use_adaptive_rx_coalesce =
ena_com_get_adaptive_moderation_enabled(ena_dev);
@@ -367,12 +366,6 @@ static int ena_set_coalesce(struct net_device *net_dev,
ena_update_tx_rings_intr_moderation(adapter);
- if (coalesce->use_adaptive_rx_coalesce) {
- if (!ena_com_get_adaptive_moderation_enabled(ena_dev))
- ena_com_enable_adaptive_moderation(ena_dev);
- return 0;
- }
-
rc = ena_com_update_nonadaptive_moderation_interval_rx(ena_dev,
coalesce->rx_coalesce_usecs);
if (rc)
@@ -380,10 +373,13 @@ static int ena_set_coalesce(struct net_device *net_dev,
ena_update_rx_rings_intr_moderation(adapter);
- if (!coalesce->use_adaptive_rx_coalesce) {
- if (ena_com_get_adaptive_moderation_enabled(ena_dev))
- ena_com_disable_adaptive_moderation(ena_dev);
- }
+ if (coalesce->use_adaptive_rx_coalesce &&
+ !ena_com_get_adaptive_moderation_enabled(ena_dev))
+ ena_com_enable_adaptive_moderation(ena_dev);
+
+ if (!coalesce->use_adaptive_rx_coalesce &&
+ ena_com_get_adaptive_moderation_enabled(ena_dev))
+ ena_com_disable_adaptive_moderation(ena_dev);
return 0;
}
@@ -640,6 +636,28 @@ static u32 ena_get_rxfh_key_size(struct net_device *netdev)
return ENA_HASH_KEY_SIZE;
}
+static int ena_indirection_table_get(struct ena_adapter *adapter, u32 *indir)
+{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ int i, rc;
+
+ if (!indir)
+ return 0;
+
+ rc = ena_com_indirect_table_get(ena_dev, indir);
+ if (rc)
+ return rc;
+
+ /* Our internal representation of the indices is: even indices
+ * for Tx and uneven indices for Rx. We need to convert the Rx
+ * indices to be consecutive
+ */
+ for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++)
+ indir[i] = ENA_IO_RXQ_IDX_TO_COMBINED_IDX(indir[i]);
+
+ return rc;
+}
+
static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
{
@@ -648,11 +666,25 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 func;
int rc;
- rc = ena_com_indirect_table_get(adapter->ena_dev, indir);
+ rc = ena_indirection_table_get(adapter, indir);
if (rc)
return rc;
+ /* We call this function in order to check if the device
+ * supports getting/setting the hash function.
+ */
rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func, key);
+
+ if (rc) {
+ if (rc == -EOPNOTSUPP) {
+ key = NULL;
+ hfunc = NULL;
+ rc = 0;
+ }
+
+ return rc;
+ }
+
if (rc)
return rc;
@@ -661,7 +693,7 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
func = ETH_RSS_HASH_TOP;
break;
case ENA_ADMIN_CRC32:
- func = ETH_RSS_HASH_XOR;
+ func = ETH_RSS_HASH_CRC32;
break;
default:
netif_err(adapter, drv, netdev,
@@ -704,10 +736,13 @@ static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
}
switch (hfunc) {
+ case ETH_RSS_HASH_NO_CHANGE:
+ func = ena_com_get_current_hash_function(ena_dev);
+ break;
case ETH_RSS_HASH_TOP:
func = ENA_ADMIN_TOEPLITZ;
break;
- case ETH_RSS_HASH_XOR:
+ case ETH_RSS_HASH_CRC32:
func = ENA_ADMIN_CRC32;
break;
default:
@@ -744,7 +779,9 @@ static int ena_set_channels(struct net_device *netdev,
struct ena_adapter *adapter = netdev_priv(netdev);
u32 count = channels->combined_count;
/* The check for max value is already done in ethtool */
- if (count < ENA_MIN_NUM_IO_QUEUES)
+ if (count < ENA_MIN_NUM_IO_QUEUES ||
+ (ena_xdp_present(adapter) &&
+ !ena_xdp_legal_queue_count(adapter, channels->combined_count)))
return -EINVAL;
return ena_update_queue_count(adapter, count);
@@ -816,6 +853,7 @@ static const struct ethtool_ops ena_ethtool_ops = {
.set_channels = ena_set_channels,
.get_tunable = ena_get_tunable,
.set_tunable = ena_set_tunable,
+ .get_ts_info = ethtool_op_get_ts_info,
};
void ena_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index d46a912002ff..0b2fd96b93d7 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -36,7 +36,6 @@
#include <linux/cpu_rmap.h>
#endif /* CONFIG_RFS_ACCEL */
#include <linux/ethtool.h>
-#include <linux/if_vlan.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/numa.h>
@@ -47,6 +46,7 @@
#include <net/ip.h>
#include "ena_netdev.h"
+#include <linux/bpf_trace.h>
#include "ena_pci_id_tbl.h"
static char version[] = DEVICE_NAME " v" DRV_MODULE_VERSION "\n";
@@ -78,7 +78,37 @@ static void check_for_admin_com_state(struct ena_adapter *adapter);
static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
static int ena_restore_device(struct ena_adapter *adapter);
-static void ena_tx_timeout(struct net_device *dev)
+static void ena_init_io_rings(struct ena_adapter *adapter,
+ int first_index, int count);
+static void ena_init_napi_in_range(struct ena_adapter *adapter, int first_index,
+ int count);
+static void ena_del_napi_in_range(struct ena_adapter *adapter, int first_index,
+ int count);
+static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid);
+static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
+ int first_index,
+ int count);
+static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid);
+static void ena_free_tx_resources(struct ena_adapter *adapter, int qid);
+static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget);
+static void ena_destroy_all_tx_queues(struct ena_adapter *adapter);
+static void ena_free_all_io_tx_resources(struct ena_adapter *adapter);
+static void ena_napi_disable_in_range(struct ena_adapter *adapter,
+ int first_index, int count);
+static void ena_napi_enable_in_range(struct ena_adapter *adapter,
+ int first_index, int count);
+static int ena_up(struct ena_adapter *adapter);
+static void ena_down(struct ena_adapter *adapter);
+static void ena_unmask_interrupt(struct ena_ring *tx_ring,
+ struct ena_ring *rx_ring);
+static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
+ struct ena_ring *rx_ring);
+static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
+ struct ena_tx_buffer *tx_info);
+static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
+ int first_index, int count);
+
+static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ena_adapter *adapter = netdev_priv(dev);
@@ -123,6 +153,448 @@ static int ena_change_mtu(struct net_device *dev, int new_mtu)
return ret;
}
+static int ena_xmit_common(struct net_device *dev,
+ struct ena_ring *ring,
+ struct ena_tx_buffer *tx_info,
+ struct ena_com_tx_ctx *ena_tx_ctx,
+ u16 next_to_use,
+ u32 bytes)
+{
+ struct ena_adapter *adapter = netdev_priv(dev);
+ int rc, nb_hw_desc;
+
+ if (unlikely(ena_com_is_doorbell_needed(ring->ena_com_io_sq,
+ ena_tx_ctx))) {
+ netif_dbg(adapter, tx_queued, dev,
+ "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
+ ring->qid);
+ ena_com_write_sq_doorbell(ring->ena_com_io_sq);
+ }
+
+ /* prepare the packet's descriptors to dma engine */
+ rc = ena_com_prepare_tx(ring->ena_com_io_sq, ena_tx_ctx,
+ &nb_hw_desc);
+
+ /* In case there isn't enough space in the queue for the packet,
+ * we simply drop it. All other failure reasons of
+ * ena_com_prepare_tx() are fatal and therefore require a device reset.
+ */
+ if (unlikely(rc)) {
+ netif_err(adapter, tx_queued, dev,
+ "failed to prepare tx bufs\n");
+ u64_stats_update_begin(&ring->syncp);
+ ring->tx_stats.prepare_ctx_err++;
+ u64_stats_update_end(&ring->syncp);
+ if (rc != -ENOMEM) {
+ adapter->reset_reason =
+ ENA_REGS_RESET_DRIVER_INVALID_STATE;
+ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ }
+ return rc;
+ }
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->tx_stats.cnt++;
+ ring->tx_stats.bytes += bytes;
+ u64_stats_update_end(&ring->syncp);
+
+ tx_info->tx_descs = nb_hw_desc;
+ tx_info->last_jiffies = jiffies;
+ tx_info->print_once = 0;
+
+ ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
+ ring->ring_size);
+ return 0;
+}
+
+/* This is the XDP napi callback. XDP queues use a separate napi callback
+ * than Rx/Tx queues.
+ */
+static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
+{
+ struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
+ u32 xdp_work_done, xdp_budget;
+ struct ena_ring *xdp_ring;
+ int napi_comp_call = 0;
+ int ret;
+
+ xdp_ring = ena_napi->xdp_ring;
+ xdp_ring->first_interrupt = ena_napi->first_interrupt;
+
+ xdp_budget = budget;
+
+ if (!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags) ||
+ test_bit(ENA_FLAG_TRIGGER_RESET, &xdp_ring->adapter->flags)) {
+ napi_complete_done(napi, 0);
+ return 0;
+ }
+
+ xdp_work_done = ena_clean_xdp_irq(xdp_ring, xdp_budget);
+
+ /* If the device is about to reset or down, avoid unmask
+ * the interrupt and return 0 so NAPI won't reschedule
+ */
+ if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags))) {
+ napi_complete_done(napi, 0);
+ ret = 0;
+ } else if (xdp_budget > xdp_work_done) {
+ napi_comp_call = 1;
+ if (napi_complete_done(napi, xdp_work_done))
+ ena_unmask_interrupt(xdp_ring, NULL);
+ ena_update_ring_numa_node(xdp_ring, NULL);
+ ret = xdp_work_done;
+ } else {
+ ret = xdp_budget;
+ }
+
+ u64_stats_update_begin(&xdp_ring->syncp);
+ xdp_ring->tx_stats.napi_comp += napi_comp_call;
+ xdp_ring->tx_stats.tx_poll++;
+ u64_stats_update_end(&xdp_ring->syncp);
+
+ return ret;
+}
+
+static int ena_xdp_tx_map_buff(struct ena_ring *xdp_ring,
+ struct ena_tx_buffer *tx_info,
+ struct xdp_buff *xdp,
+ void **push_hdr,
+ u32 *push_len)
+{
+ struct ena_adapter *adapter = xdp_ring->adapter;
+ struct ena_com_buf *ena_buf;
+ dma_addr_t dma = 0;
+ u32 size;
+
+ tx_info->xdpf = convert_to_xdp_frame(xdp);
+ size = tx_info->xdpf->len;
+ ena_buf = tx_info->bufs;
+
+ /* llq push buffer */
+ *push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
+ *push_hdr = tx_info->xdpf->data;
+
+ if (size - *push_len > 0) {
+ dma = dma_map_single(xdp_ring->dev,
+ *push_hdr + *push_len,
+ size - *push_len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(xdp_ring->dev, dma)))
+ goto error_report_dma_error;
+
+ tx_info->map_linear_data = 1;
+ tx_info->num_of_bufs = 1;
+ }
+
+ ena_buf->paddr = dma;
+ ena_buf->len = size;
+
+ return 0;
+
+error_report_dma_error:
+ u64_stats_update_begin(&xdp_ring->syncp);
+ xdp_ring->tx_stats.dma_mapping_err++;
+ u64_stats_update_end(&xdp_ring->syncp);
+ netdev_warn(adapter->netdev, "failed to map xdp buff\n");
+
+ xdp_return_frame_rx_napi(tx_info->xdpf);
+ tx_info->xdpf = NULL;
+ tx_info->num_of_bufs = 0;
+
+ return -EINVAL;
+}
+
+static int ena_xdp_xmit_buff(struct net_device *dev,
+ struct xdp_buff *xdp,
+ int qid,
+ struct ena_rx_buffer *rx_info)
+{
+ struct ena_adapter *adapter = netdev_priv(dev);
+ struct ena_com_tx_ctx ena_tx_ctx = {0};
+ struct ena_tx_buffer *tx_info;
+ struct ena_ring *xdp_ring;
+ u16 next_to_use, req_id;
+ int rc;
+ void *push_hdr;
+ u32 push_len;
+
+ xdp_ring = &adapter->tx_ring[qid];
+ next_to_use = xdp_ring->next_to_use;
+ req_id = xdp_ring->free_ids[next_to_use];
+ tx_info = &xdp_ring->tx_buffer_info[req_id];
+ tx_info->num_of_bufs = 0;
+ page_ref_inc(rx_info->page);
+ tx_info->xdp_rx_page = rx_info->page;
+
+ rc = ena_xdp_tx_map_buff(xdp_ring, tx_info, xdp, &push_hdr, &push_len);
+ if (unlikely(rc))
+ goto error_drop_packet;
+
+ ena_tx_ctx.ena_bufs = tx_info->bufs;
+ ena_tx_ctx.push_header = push_hdr;
+ ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
+ ena_tx_ctx.req_id = req_id;
+ ena_tx_ctx.header_len = push_len;
+
+ rc = ena_xmit_common(dev,
+ xdp_ring,
+ tx_info,
+ &ena_tx_ctx,
+ next_to_use,
+ xdp->data_end - xdp->data);
+ if (rc)
+ goto error_unmap_dma;
+ /* trigger the dma engine. ena_com_write_sq_doorbell()
+ * has a mb
+ */
+ ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq);
+ u64_stats_update_begin(&xdp_ring->syncp);
+ xdp_ring->tx_stats.doorbells++;
+ u64_stats_update_end(&xdp_ring->syncp);
+
+ return NETDEV_TX_OK;
+
+error_unmap_dma:
+ ena_unmap_tx_buff(xdp_ring, tx_info);
+ tx_info->xdpf = NULL;
+error_drop_packet:
+
+ return NETDEV_TX_OK;
+}
+
+static int ena_xdp_execute(struct ena_ring *rx_ring,
+ struct xdp_buff *xdp,
+ struct ena_rx_buffer *rx_info)
+{
+ struct bpf_prog *xdp_prog;
+ u32 verdict = XDP_PASS;
+
+ rcu_read_lock();
+ xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
+
+ if (!xdp_prog)
+ goto out;
+
+ verdict = bpf_prog_run_xdp(xdp_prog, xdp);
+
+ if (verdict == XDP_TX)
+ ena_xdp_xmit_buff(rx_ring->netdev,
+ xdp,
+ rx_ring->qid + rx_ring->adapter->num_io_queues,
+ rx_info);
+ else if (unlikely(verdict == XDP_ABORTED))
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
+ else if (unlikely(verdict > XDP_TX))
+ bpf_warn_invalid_xdp_action(verdict);
+out:
+ rcu_read_unlock();
+ return verdict;
+}
+
+static void ena_init_all_xdp_queues(struct ena_adapter *adapter)
+{
+ adapter->xdp_first_ring = adapter->num_io_queues;
+ adapter->xdp_num_queues = adapter->num_io_queues;
+
+ ena_init_io_rings(adapter,
+ adapter->xdp_first_ring,
+ adapter->xdp_num_queues);
+}
+
+static int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter)
+{
+ int rc = 0;
+
+ rc = ena_setup_tx_resources_in_range(adapter, adapter->xdp_first_ring,
+ adapter->xdp_num_queues);
+ if (rc)
+ goto setup_err;
+
+ rc = ena_create_io_tx_queues_in_range(adapter,
+ adapter->xdp_first_ring,
+ adapter->xdp_num_queues);
+ if (rc)
+ goto create_err;
+
+ return 0;
+
+create_err:
+ ena_free_all_io_tx_resources(adapter);
+setup_err:
+ return rc;
+}
+
+/* Provides a way for both kernel and bpf-prog to know
+ * more about the RX-queue a given XDP frame arrived on.
+ */
+static int ena_xdp_register_rxq_info(struct ena_ring *rx_ring)
+{
+ int rc;
+
+ rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid);
+
+ if (rc) {
+ netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
+ "Failed to register xdp rx queue info. RX queue num %d rc: %d\n",
+ rx_ring->qid, rc);
+ goto err;
+ }
+
+ rc = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED,
+ NULL);
+
+ if (rc) {
+ netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
+ "Failed to register xdp rx queue info memory model. RX queue num %d rc: %d\n",
+ rx_ring->qid, rc);
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+ }
+
+err:
+ return rc;
+}
+
+static void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring)
+{
+ xdp_rxq_info_unreg_mem_model(&rx_ring->xdp_rxq);
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+}
+
+void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
+ struct bpf_prog *prog,
+ int first,
+ int count)
+{
+ struct ena_ring *rx_ring;
+ int i = 0;
+
+ for (i = first; i < count; i++) {
+ rx_ring = &adapter->rx_ring[i];
+ xchg(&rx_ring->xdp_bpf_prog, prog);
+ if (prog) {
+ ena_xdp_register_rxq_info(rx_ring);
+ rx_ring->rx_headroom = XDP_PACKET_HEADROOM;
+ } else {
+ ena_xdp_unregister_rxq_info(rx_ring);
+ rx_ring->rx_headroom = 0;
+ }
+ }
+}
+
+void ena_xdp_exchange_program(struct ena_adapter *adapter,
+ struct bpf_prog *prog)
+{
+ struct bpf_prog *old_bpf_prog = xchg(&adapter->xdp_bpf_prog, prog);
+
+ ena_xdp_exchange_program_rx_in_range(adapter,
+ prog,
+ 0,
+ adapter->num_io_queues);
+
+ if (old_bpf_prog)
+ bpf_prog_put(old_bpf_prog);
+}
+
+static int ena_destroy_and_free_all_xdp_queues(struct ena_adapter *adapter)
+{
+ bool was_up;
+ int rc;
+
+ was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
+
+ if (was_up)
+ ena_down(adapter);
+
+ adapter->xdp_first_ring = 0;
+ adapter->xdp_num_queues = 0;
+ ena_xdp_exchange_program(adapter, NULL);
+ if (was_up) {
+ rc = ena_up(adapter);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct bpf_prog *prog = bpf->prog;
+ struct bpf_prog *old_bpf_prog;
+ int rc, prev_mtu;
+ bool is_up;
+
+ is_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
+ rc = ena_xdp_allowed(adapter);
+ if (rc == ENA_XDP_ALLOWED) {
+ old_bpf_prog = adapter->xdp_bpf_prog;
+ if (prog) {
+ if (!is_up) {
+ ena_init_all_xdp_queues(adapter);
+ } else if (!old_bpf_prog) {
+ ena_down(adapter);
+ ena_init_all_xdp_queues(adapter);
+ }
+ ena_xdp_exchange_program(adapter, prog);
+
+ if (is_up && !old_bpf_prog) {
+ rc = ena_up(adapter);
+ if (rc)
+ return rc;
+ }
+ } else if (old_bpf_prog) {
+ rc = ena_destroy_and_free_all_xdp_queues(adapter);
+ if (rc)
+ return rc;
+ }
+
+ prev_mtu = netdev->max_mtu;
+ netdev->max_mtu = prog ? ENA_XDP_MAX_MTU : adapter->max_mtu;
+
+ if (!old_bpf_prog)
+ netif_info(adapter, drv, adapter->netdev,
+ "xdp program set, changing the max_mtu from %d to %d",
+ prev_mtu, netdev->max_mtu);
+
+ } else if (rc == ENA_XDP_CURRENT_MTU_TOO_LARGE) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on",
+ netdev->mtu, ENA_XDP_MAX_MTU);
+ NL_SET_ERR_MSG_MOD(bpf->extack,
+ "Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info");
+ return -EINVAL;
+ } else if (rc == ENA_XDP_NO_ENOUGH_QUEUES) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Failed to set xdp program, the Rx/Tx channel count should be at most half of the maximum allowed channel count. The current queue count (%d), the maximal queue count (%d)\n",
+ adapter->num_io_queues, adapter->max_num_io_queues);
+ NL_SET_ERR_MSG_MOD(bpf->extack,
+ "Failed to set xdp program, there is no enough space for allocating XDP queues, Check the dmesg for more info");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* This is the main xdp callback, it's used by the kernel to set/unset the xdp
+ * program as well as to query the current xdp program id.
+ */
+static int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return ena_xdp_set(netdev, bpf);
+ case XDP_QUERY_PROG:
+ bpf->prog_id = adapter->xdp_bpf_prog ?
+ adapter->xdp_bpf_prog->aux->id : 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
{
#ifdef CONFIG_RFS_ACCEL
@@ -164,7 +636,8 @@ static void ena_init_io_rings_common(struct ena_adapter *adapter,
u64_stats_init(&ring->syncp);
}
-static void ena_init_io_rings(struct ena_adapter *adapter)
+static void ena_init_io_rings(struct ena_adapter *adapter,
+ int first_index, int count)
{
struct ena_com_dev *ena_dev;
struct ena_ring *txr, *rxr;
@@ -172,13 +645,12 @@ static void ena_init_io_rings(struct ena_adapter *adapter)
ena_dev = adapter->ena_dev;
- for (i = 0; i < adapter->num_io_queues; i++) {
+ for (i = first_index; i < first_index + count; i++) {
txr = &adapter->tx_ring[i];
rxr = &adapter->rx_ring[i];
- /* TX/RX common ring state */
+ /* TX common ring state */
ena_init_io_rings_common(adapter, txr, i);
- ena_init_io_rings_common(adapter, rxr, i);
/* TX specific ring state */
txr->ring_size = adapter->requested_tx_ring_size;
@@ -188,14 +660,20 @@ static void ena_init_io_rings(struct ena_adapter *adapter)
txr->smoothed_interval =
ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
- /* RX specific ring state */
- rxr->ring_size = adapter->requested_rx_ring_size;
- rxr->rx_copybreak = adapter->rx_copybreak;
- rxr->sgl_size = adapter->max_rx_sgl_size;
- rxr->smoothed_interval =
- ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
- rxr->empty_rx_queue = 0;
- adapter->ena_napi[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ /* Don't init RX queues for xdp queues */
+ if (!ENA_IS_XDP_INDEX(adapter, i)) {
+ /* RX common ring state */
+ ena_init_io_rings_common(adapter, rxr, i);
+
+ /* RX specific ring state */
+ rxr->ring_size = adapter->requested_rx_ring_size;
+ rxr->rx_copybreak = adapter->rx_copybreak;
+ rxr->sgl_size = adapter->max_rx_sgl_size;
+ rxr->smoothed_interval =
+ ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
+ rxr->empty_rx_queue = 0;
+ adapter->ena_napi[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ }
}
}
@@ -285,16 +763,13 @@ static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
tx_ring->push_buf_intermediate_buf = NULL;
}
-/* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues
- * @adapter: private structure
- *
- * Return 0 on success, negative on failure
- */
-static int ena_setup_all_tx_resources(struct ena_adapter *adapter)
+static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
+ int first_index,
+ int count)
{
int i, rc = 0;
- for (i = 0; i < adapter->num_io_queues; i++) {
+ for (i = first_index; i < first_index + count; i++) {
rc = ena_setup_tx_resources(adapter, i);
if (rc)
goto err_setup_tx;
@@ -308,11 +783,20 @@ err_setup_tx:
"Tx queue %d: allocation failed\n", i);
/* rewind the index freeing the rings as we go */
- while (i--)
+ while (first_index < i--)
ena_free_tx_resources(adapter, i);
return rc;
}
+static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
+ int first_index, int count)
+{
+ int i;
+
+ for (i = first_index; i < first_index + count; i++)
+ ena_free_tx_resources(adapter, i);
+}
+
/* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
* @adapter: board private structure
*
@@ -320,10 +804,10 @@ err_setup_tx:
*/
static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
{
- int i;
-
- for (i = 0; i < adapter->num_io_queues; i++)
- ena_free_tx_resources(adapter, i);
+ ena_free_all_io_tx_resources_in_range(adapter,
+ 0,
+ adapter->xdp_num_queues +
+ adapter->num_io_queues);
}
static int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id)
@@ -495,8 +979,8 @@ static int ena_alloc_rx_page(struct ena_ring *rx_ring,
rx_info->page = page;
rx_info->page_offset = 0;
ena_buf = &rx_info->ena_buf;
- ena_buf->paddr = dma;
- ena_buf->len = ENA_PAGE_SIZE;
+ ena_buf->paddr = dma + rx_ring->rx_headroom;
+ ena_buf->len = ENA_PAGE_SIZE - rx_ring->rx_headroom;
return 0;
}
@@ -513,7 +997,9 @@ static void ena_free_rx_page(struct ena_ring *rx_ring,
return;
}
- dma_unmap_page(rx_ring->dev, ena_buf->paddr, ENA_PAGE_SIZE,
+ dma_unmap_page(rx_ring->dev,
+ ena_buf->paddr - rx_ring->rx_headroom,
+ ENA_PAGE_SIZE,
DMA_FROM_DEVICE);
__free_page(page);
@@ -620,8 +1106,8 @@ static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
ena_free_rx_bufs(adapter, i);
}
-static void ena_unmap_tx_skb(struct ena_ring *tx_ring,
- struct ena_tx_buffer *tx_info)
+static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
+ struct ena_tx_buffer *tx_info)
{
struct ena_com_buf *ena_buf;
u32 cnt;
@@ -675,7 +1161,7 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
tx_ring->qid, i);
}
- ena_unmap_tx_skb(tx_ring, tx_info);
+ ena_unmap_tx_buff(tx_ring, tx_info);
dev_kfree_skb_any(tx_info->skb);
}
@@ -688,7 +1174,7 @@ static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
struct ena_ring *tx_ring;
int i;
- for (i = 0; i < adapter->num_io_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
tx_ring = &adapter->tx_ring[i];
ena_free_tx_bufs(tx_ring);
}
@@ -699,7 +1185,7 @@ static void ena_destroy_all_tx_queues(struct ena_adapter *adapter)
u16 ena_qid;
int i;
- for (i = 0; i < adapter->num_io_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
ena_qid = ENA_IO_TXQ_IDX(i);
ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
}
@@ -723,6 +1209,32 @@ static void ena_destroy_all_io_queues(struct ena_adapter *adapter)
ena_destroy_all_rx_queues(adapter);
}
+static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
+ struct ena_tx_buffer *tx_info, bool is_xdp)
+{
+ if (tx_info)
+ netif_err(ring->adapter,
+ tx_done,
+ ring->netdev,
+ "tx_info doesn't have valid %s",
+ is_xdp ? "xdp frame" : "skb");
+ else
+ netif_err(ring->adapter,
+ tx_done,
+ ring->netdev,
+ "Invalid req_id: %hu\n",
+ req_id);
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->tx_stats.bad_req_id++;
+ u64_stats_update_end(&ring->syncp);
+
+ /* Trigger device reset */
+ ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
+ set_bit(ENA_FLAG_TRIGGER_RESET, &ring->adapter->flags);
+ return -EFAULT;
+}
+
static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
{
struct ena_tx_buffer *tx_info = NULL;
@@ -733,21 +1245,20 @@ static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
return 0;
}
- if (tx_info)
- netif_err(tx_ring->adapter, tx_done, tx_ring->netdev,
- "tx_info doesn't have valid skb\n");
- else
- netif_err(tx_ring->adapter, tx_done, tx_ring->netdev,
- "Invalid req_id: %hu\n", req_id);
+ return handle_invalid_req_id(tx_ring, req_id, tx_info, false);
+}
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.bad_req_id++;
- u64_stats_update_end(&tx_ring->syncp);
+static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id)
+{
+ struct ena_tx_buffer *tx_info = NULL;
- /* Trigger device reset */
- tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
- set_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags);
- return -EFAULT;
+ if (likely(req_id < xdp_ring->ring_size)) {
+ tx_info = &xdp_ring->tx_buffer_info[req_id];
+ if (likely(tx_info->xdpf))
+ return 0;
+ }
+
+ return handle_invalid_req_id(xdp_ring, req_id, tx_info, true);
}
static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
@@ -786,7 +1297,7 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
tx_info->skb = NULL;
tx_info->last_jiffies = 0;
- ena_unmap_tx_skb(tx_ring, tx_info);
+ ena_unmap_tx_buff(tx_ring, tx_info);
netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
"tx_poll: q %d skb %p completed\n", tx_ring->qid,
@@ -1037,6 +1548,33 @@ static void ena_set_rx_hash(struct ena_ring *rx_ring,
}
}
+int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
+{
+ struct ena_rx_buffer *rx_info;
+ int ret;
+
+ rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
+ xdp->data = page_address(rx_info->page) +
+ rx_info->page_offset + rx_ring->rx_headroom;
+ xdp_set_data_meta_invalid(xdp);
+ xdp->data_hard_start = page_address(rx_info->page);
+ xdp->data_end = xdp->data + rx_ring->ena_bufs[0].len;
+ /* If for some reason we received a bigger packet than
+ * we expect, then we simply drop it
+ */
+ if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))
+ return XDP_DROP;
+
+ ret = ena_xdp_execute(rx_ring, xdp, rx_info);
+
+ /* The xdp program might expand the headers */
+ if (ret == XDP_PASS) {
+ rx_info->page_offset = xdp->data - xdp->data_hard_start;
+ rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data;
+ }
+
+ return ret;
+}
/* ena_clean_rx_irq - Cleanup RX irq
* @rx_ring: RX ring to clean
* @napi: napi handler
@@ -1048,23 +1586,27 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
u32 budget)
{
u16 next_to_clean = rx_ring->next_to_clean;
- u32 res_budget, work_done;
-
struct ena_com_rx_ctx ena_rx_ctx;
struct ena_adapter *adapter;
+ u32 res_budget, work_done;
+ int rx_copybreak_pkt = 0;
+ int refill_threshold;
struct sk_buff *skb;
int refill_required;
- int refill_threshold;
- int rc = 0;
+ struct xdp_buff xdp;
int total_len = 0;
- int rx_copybreak_pkt = 0;
+ int xdp_verdict;
+ int rc = 0;
int i;
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"%s qid %d\n", __func__, rx_ring->qid);
res_budget = budget;
+ xdp.rxq = &rx_ring->xdp_rxq;
do {
+ xdp_verdict = XDP_PASS;
+ skb = NULL;
ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
ena_rx_ctx.max_bufs = rx_ring->sgl_size;
ena_rx_ctx.descs = 0;
@@ -1082,12 +1624,22 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
+ if (ena_xdp_present_ring(rx_ring))
+ xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp);
+
/* allocate skb and fill it */
- skb = ena_rx_skb(rx_ring, rx_ring->ena_bufs, ena_rx_ctx.descs,
- &next_to_clean);
+ if (xdp_verdict == XDP_PASS)
+ skb = ena_rx_skb(rx_ring,
+ rx_ring->ena_bufs,
+ ena_rx_ctx.descs,
+ &next_to_clean);
- /* exit if we failed to retrieve a buffer */
if (unlikely(!skb)) {
+ if (xdp_verdict == XDP_TX) {
+ ena_free_rx_page(rx_ring,
+ &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]);
+ res_budget--;
+ }
for (i = 0; i < ena_rx_ctx.descs; i++) {
rx_ring->free_ids[next_to_clean] =
rx_ring->ena_bufs[i].req_id;
@@ -1095,6 +1647,8 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
ENA_RX_RING_IDX_NEXT(next_to_clean,
rx_ring->ring_size);
}
+ if (xdp_verdict == XDP_TX || xdp_verdict == XDP_DROP)
+ continue;
break;
}
@@ -1188,9 +1742,14 @@ static void ena_unmask_interrupt(struct ena_ring *tx_ring,
struct ena_ring *rx_ring)
{
struct ena_eth_io_intr_reg intr_reg;
- u32 rx_interval = ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev) ?
- rx_ring->smoothed_interval :
- ena_com_get_nonadaptive_moderation_interval_rx(rx_ring->ena_dev);
+ u32 rx_interval = 0;
+ /* Rx ring can be NULL when for XDP tx queues which don't have an
+ * accompanying rx_ring pair.
+ */
+ if (rx_ring)
+ rx_interval = ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev) ?
+ rx_ring->smoothed_interval :
+ ena_com_get_nonadaptive_moderation_interval_rx(rx_ring->ena_dev);
/* Update intr register: rx intr delay,
* tx intr delay and interrupt unmask
@@ -1203,8 +1762,9 @@ static void ena_unmask_interrupt(struct ena_ring *tx_ring,
/* It is a shared MSI-X.
* Tx and Rx CQ have pointer to it.
* So we use one of them to reach the intr reg
+ * The Tx ring is used because the rx_ring is NULL for XDP queues
*/
- ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg);
+ ena_com_unmask_intr(tx_ring->ena_com_io_cq, &intr_reg);
}
static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
@@ -1222,24 +1782,84 @@ static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
if (numa_node != NUMA_NO_NODE) {
ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
- ena_com_update_numa_node(rx_ring->ena_com_io_cq, numa_node);
+ if (rx_ring)
+ ena_com_update_numa_node(rx_ring->ena_com_io_cq,
+ numa_node);
}
tx_ring->cpu = cpu;
- rx_ring->cpu = cpu;
+ if (rx_ring)
+ rx_ring->cpu = cpu;
return;
out:
put_cpu();
}
+static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget)
+{
+ u32 total_done = 0;
+ u16 next_to_clean;
+ u32 tx_bytes = 0;
+ int tx_pkts = 0;
+ u16 req_id;
+ int rc;
+
+ if (unlikely(!xdp_ring))
+ return 0;
+ next_to_clean = xdp_ring->next_to_clean;
+
+ while (tx_pkts < budget) {
+ struct ena_tx_buffer *tx_info;
+ struct xdp_frame *xdpf;
+
+ rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq,
+ &req_id);
+ if (rc)
+ break;
+
+ rc = validate_xdp_req_id(xdp_ring, req_id);
+ if (rc)
+ break;
+
+ tx_info = &xdp_ring->tx_buffer_info[req_id];
+ xdpf = tx_info->xdpf;
+
+ tx_info->xdpf = NULL;
+ tx_info->last_jiffies = 0;
+ ena_unmap_tx_buff(xdp_ring, tx_info);
+
+ netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
+ "tx_poll: q %d skb %p completed\n", xdp_ring->qid,
+ xdpf);
+
+ tx_bytes += xdpf->len;
+ tx_pkts++;
+ total_done += tx_info->tx_descs;
+
+ __free_page(tx_info->xdp_rx_page);
+ xdp_ring->free_ids[next_to_clean] = req_id;
+ next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
+ xdp_ring->ring_size);
+ }
+
+ xdp_ring->next_to_clean = next_to_clean;
+ ena_com_comp_ack(xdp_ring->ena_com_io_sq, total_done);
+ ena_com_update_dev_comp_head(xdp_ring->ena_com_io_cq);
+
+ netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
+ "tx_poll: q %d done. total pkts: %d\n",
+ xdp_ring->qid, tx_pkts);
+
+ return tx_pkts;
+}
+
static int ena_io_poll(struct napi_struct *napi, int budget)
{
struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
struct ena_ring *tx_ring, *rx_ring;
-
- u32 tx_work_done;
- u32 rx_work_done;
+ int tx_work_done;
+ int rx_work_done = 0;
int tx_budget;
int napi_comp_call = 0;
int ret;
@@ -1247,6 +1867,9 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
tx_ring = ena_napi->tx_ring;
rx_ring = ena_napi->rx_ring;
+ tx_ring->first_interrupt = ena_napi->first_interrupt;
+ rx_ring->first_interrupt = ena_napi->first_interrupt;
+
tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
@@ -1256,7 +1879,11 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
}
tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
- rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
+ /* On netpoll the budget is zero and the handler should only clean the
+ * tx completions.
+ */
+ if (likely(budget))
+ rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
/* If the device is about to reset or down, avoid unmask
* the interrupt and return 0 so NAPI won't reschedule
@@ -1318,8 +1945,7 @@ static irqreturn_t ena_intr_msix_io(int irq, void *data)
{
struct ena_napi *ena_napi = data;
- ena_napi->tx_ring->first_interrupt = true;
- ena_napi->rx_ring->first_interrupt = true;
+ ena_napi->first_interrupt = true;
napi_schedule_irqoff(&ena_napi->napi);
@@ -1394,10 +2020,12 @@ static void ena_setup_io_intr(struct ena_adapter *adapter)
{
struct net_device *netdev;
int irq_idx, i, cpu;
+ int io_queue_count;
netdev = adapter->netdev;
+ io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
- for (i = 0; i < adapter->num_io_queues; i++) {
+ for (i = 0; i < io_queue_count; i++) {
irq_idx = ENA_IO_IRQ_IDX(i);
cpu = i % num_online_cpus();
@@ -1525,45 +2153,64 @@ static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
synchronize_irq(adapter->irq_tbl[i].vector);
}
-static void ena_del_napi(struct ena_adapter *adapter)
+static void ena_del_napi_in_range(struct ena_adapter *adapter,
+ int first_index,
+ int count)
{
int i;
- for (i = 0; i < adapter->num_io_queues; i++)
- netif_napi_del(&adapter->ena_napi[i].napi);
+ for (i = first_index; i < first_index + count; i++) {
+ /* Check if napi was initialized before */
+ if (!ENA_IS_XDP_INDEX(adapter, i) ||
+ adapter->ena_napi[i].xdp_ring)
+ netif_napi_del(&adapter->ena_napi[i].napi);
+ else
+ WARN_ON(ENA_IS_XDP_INDEX(adapter, i) &&
+ adapter->ena_napi[i].xdp_ring);
+ }
}
-static void ena_init_napi(struct ena_adapter *adapter)
+static void ena_init_napi_in_range(struct ena_adapter *adapter,
+ int first_index, int count)
{
- struct ena_napi *napi;
+ struct ena_napi *napi = {0};
int i;
- for (i = 0; i < adapter->num_io_queues; i++) {
+ for (i = first_index; i < first_index + count; i++) {
napi = &adapter->ena_napi[i];
netif_napi_add(adapter->netdev,
&adapter->ena_napi[i].napi,
- ena_io_poll,
+ ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll,
ENA_NAPI_BUDGET);
- napi->rx_ring = &adapter->rx_ring[i];
- napi->tx_ring = &adapter->tx_ring[i];
+
+ if (!ENA_IS_XDP_INDEX(adapter, i)) {
+ napi->rx_ring = &adapter->rx_ring[i];
+ napi->tx_ring = &adapter->tx_ring[i];
+ } else {
+ napi->xdp_ring = &adapter->tx_ring[i];
+ }
napi->qid = i;
}
}
-static void ena_napi_disable_all(struct ena_adapter *adapter)
+static void ena_napi_disable_in_range(struct ena_adapter *adapter,
+ int first_index,
+ int count)
{
int i;
- for (i = 0; i < adapter->num_io_queues; i++)
+ for (i = first_index; i < first_index + count; i++)
napi_disable(&adapter->ena_napi[i].napi);
}
-static void ena_napi_enable_all(struct ena_adapter *adapter)
+static void ena_napi_enable_in_range(struct ena_adapter *adapter,
+ int first_index,
+ int count)
{
int i;
- for (i = 0; i < adapter->num_io_queues; i++)
+ for (i = first_index; i < first_index + count; i++)
napi_enable(&adapter->ena_napi[i].napi);
}
@@ -1578,7 +2225,7 @@ static int ena_rss_configure(struct ena_adapter *adapter)
rc = ena_rss_init_default(adapter);
if (rc && (rc != -EOPNOTSUPP)) {
netif_err(adapter, ifup, adapter->netdev,
- "Failed to init RSS rc: %d\n", rc);
+ "Failed to init RSS rc: %d\n", rc);
return rc;
}
}
@@ -1616,7 +2263,9 @@ static int ena_up_complete(struct ena_adapter *adapter)
/* enable transmits */
netif_tx_start_all_queues(adapter->netdev);
- ena_napi_enable_all(adapter);
+ ena_napi_enable_in_range(adapter,
+ 0,
+ adapter->xdp_num_queues + adapter->num_io_queues);
return 0;
}
@@ -1649,7 +2298,7 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
if (rc) {
netif_err(adapter, ifup, adapter->netdev,
"Failed to create I/O TX queue num %d rc: %d\n",
- qid, rc);
+ qid, rc);
return rc;
}
@@ -1668,12 +2317,13 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
return rc;
}
-static int ena_create_all_io_tx_queues(struct ena_adapter *adapter)
+static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
+ int first_index, int count)
{
struct ena_com_dev *ena_dev = adapter->ena_dev;
int rc, i;
- for (i = 0; i < adapter->num_io_queues; i++) {
+ for (i = first_index; i < first_index + count; i++) {
rc = ena_create_io_tx_queue(adapter, i);
if (rc)
goto create_err;
@@ -1682,7 +2332,7 @@ static int ena_create_all_io_tx_queues(struct ena_adapter *adapter)
return 0;
create_err:
- while (i--)
+ while (i-- > first_index)
ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
return rc;
@@ -1727,13 +2377,15 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
netif_err(adapter, ifup, adapter->netdev,
"Failed to get RX queue handlers. RX queue num %d rc: %d\n",
qid, rc);
- ena_com_destroy_io_queue(ena_dev, ena_qid);
- return rc;
+ goto err;
}
ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node);
return rc;
+err:
+ ena_com_destroy_io_queue(ena_dev, ena_qid);
+ return rc;
}
static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
@@ -1760,7 +2412,8 @@ create_err:
}
static void set_io_rings_size(struct ena_adapter *adapter,
- int new_tx_size, int new_rx_size)
+ int new_tx_size,
+ int new_rx_size)
{
int i;
@@ -1794,14 +2447,24 @@ static int create_queues_with_size_backoff(struct ena_adapter *adapter)
* ones due to past queue allocation failures.
*/
set_io_rings_size(adapter, adapter->requested_tx_ring_size,
- adapter->requested_rx_ring_size);
+ adapter->requested_rx_ring_size);
while (1) {
- rc = ena_setup_all_tx_resources(adapter);
+ if (ena_xdp_present(adapter)) {
+ rc = ena_setup_and_create_all_xdp_queues(adapter);
+
+ if (rc)
+ goto err_setup_tx;
+ }
+ rc = ena_setup_tx_resources_in_range(adapter,
+ 0,
+ adapter->num_io_queues);
if (rc)
goto err_setup_tx;
- rc = ena_create_all_io_tx_queues(adapter);
+ rc = ena_create_io_tx_queues_in_range(adapter,
+ 0,
+ adapter->num_io_queues);
if (rc)
goto err_create_tx_queues;
@@ -1825,7 +2488,7 @@ err_setup_tx:
if (rc != -ENOMEM) {
netif_err(adapter, ifup, adapter->netdev,
"Queue creation failed with error code %d\n",
- rc);
+ rc);
return rc;
}
@@ -1848,7 +2511,7 @@ err_setup_tx:
new_rx_ring_size = cur_rx_ring_size / 2;
if (new_tx_ring_size < ENA_MIN_RING_SIZE ||
- new_rx_ring_size < ENA_MIN_RING_SIZE) {
+ new_rx_ring_size < ENA_MIN_RING_SIZE) {
netif_err(adapter, ifup, adapter->netdev,
"Queue creation failed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n",
ENA_MIN_RING_SIZE);
@@ -1867,10 +2530,11 @@ err_setup_tx:
static int ena_up(struct ena_adapter *adapter)
{
- int rc, i;
+ int io_queue_count, rc, i;
netdev_dbg(adapter->netdev, "%s\n", __func__);
+ io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
ena_setup_io_intr(adapter);
/* napi poll functions should be initialized before running
@@ -1878,7 +2542,7 @@ static int ena_up(struct ena_adapter *adapter)
* interrupt, causing the ISR to fire immediately while the poll
* function wasn't set yet, causing a null dereference
*/
- ena_init_napi(adapter);
+ ena_init_napi_in_range(adapter, 0, io_queue_count);
rc = ena_request_io_irq(adapter);
if (rc)
@@ -1909,7 +2573,7 @@ static int ena_up(struct ena_adapter *adapter)
/* schedule napi in case we had pending packets
* from the last time we disable napi
*/
- for (i = 0; i < adapter->num_io_queues; i++)
+ for (i = 0; i < io_queue_count; i++)
napi_schedule(&adapter->ena_napi[i].napi);
return rc;
@@ -1922,13 +2586,15 @@ err_up:
err_create_queues_with_backoff:
ena_free_io_irq(adapter);
err_req_irq:
- ena_del_napi(adapter);
+ ena_del_napi_in_range(adapter, 0, io_queue_count);
return rc;
}
static void ena_down(struct ena_adapter *adapter)
{
+ int io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
+
netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__);
clear_bit(ENA_FLAG_DEV_UP, &adapter->flags);
@@ -1941,7 +2607,7 @@ static void ena_down(struct ena_adapter *adapter)
netif_tx_disable(adapter->netdev);
/* After this point the napi handler won't enable the tx queue */
- ena_napi_disable_all(adapter);
+ ena_napi_disable_in_range(adapter, 0, io_queue_count);
/* After destroy the queue there won't be any new interrupts */
@@ -1959,7 +2625,7 @@ static void ena_down(struct ena_adapter *adapter)
ena_disable_io_intr_sync(adapter);
ena_free_io_irq(adapter);
- ena_del_napi(adapter);
+ ena_del_napi_in_range(adapter, 0, io_queue_count);
ena_free_all_tx_bufs(adapter);
ena_free_all_rx_bufs(adapter);
@@ -2049,23 +2715,47 @@ int ena_update_queue_sizes(struct ena_adapter *adapter,
ena_close(adapter->netdev);
adapter->requested_tx_ring_size = new_tx_size;
adapter->requested_rx_ring_size = new_rx_size;
- ena_init_io_rings(adapter);
+ ena_init_io_rings(adapter,
+ 0,
+ adapter->xdp_num_queues +
+ adapter->num_io_queues);
return dev_was_up ? ena_up(adapter) : 0;
}
int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count)
{
struct ena_com_dev *ena_dev = adapter->ena_dev;
+ int prev_channel_count;
bool dev_was_up;
dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
ena_close(adapter->netdev);
+ prev_channel_count = adapter->num_io_queues;
adapter->num_io_queues = new_channel_count;
+ if (ena_xdp_present(adapter) &&
+ ena_xdp_allowed(adapter) == ENA_XDP_ALLOWED) {
+ adapter->xdp_first_ring = new_channel_count;
+ adapter->xdp_num_queues = new_channel_count;
+ if (prev_channel_count > new_channel_count)
+ ena_xdp_exchange_program_rx_in_range(adapter,
+ NULL,
+ new_channel_count,
+ prev_channel_count);
+ else
+ ena_xdp_exchange_program_rx_in_range(adapter,
+ adapter->xdp_bpf_prog,
+ prev_channel_count,
+ new_channel_count);
+ }
+
/* We need to destroy the rss table so that the indirection
* table will be reinitialized by ena_up()
*/
ena_com_rss_destroy(ena_dev);
- ena_init_io_rings(adapter);
+ ena_init_io_rings(adapter,
+ 0,
+ adapter->xdp_num_queues +
+ adapter->num_io_queues);
return dev_was_up ? ena_open(adapter->netdev) : 0;
}
@@ -2249,7 +2939,7 @@ error_report_dma_error:
tx_info->skb = NULL;
tx_info->num_of_bufs += i;
- ena_unmap_tx_skb(tx_ring, tx_info);
+ ena_unmap_tx_buff(tx_ring, tx_info);
return -EINVAL;
}
@@ -2264,7 +2954,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct netdev_queue *txq;
void *push_hdr;
u16 next_to_use, req_id, header_len;
- int qid, rc, nb_hw_desc;
+ int qid, rc;
netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
/* Determine which tx ring we will be placed on */
@@ -2299,50 +2989,17 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* set flags and meta data */
ena_tx_csum(&ena_tx_ctx, skb);
- if (unlikely(ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, &ena_tx_ctx))) {
- netif_dbg(adapter, tx_queued, dev,
- "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
- qid);
- ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
- }
-
- /* prepare the packet's descriptors to dma engine */
- rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx,
- &nb_hw_desc);
-
- /* ena_com_prepare_tx() can't fail due to overflow of tx queue,
- * since the number of free descriptors in the queue is checked
- * after sending the previous packet. In case there isn't enough
- * space in the queue for the next packet, it is stopped
- * until there is again enough available space in the queue.
- * All other failure reasons of ena_com_prepare_tx() are fatal
- * and therefore require a device reset.
- */
- if (unlikely(rc)) {
- netif_err(adapter, tx_queued, dev,
- "failed to prepare tx bufs\n");
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.prepare_ctx_err++;
- u64_stats_update_end(&tx_ring->syncp);
- adapter->reset_reason = ENA_REGS_RESET_DRIVER_INVALID_STATE;
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ rc = ena_xmit_common(dev,
+ tx_ring,
+ tx_info,
+ &ena_tx_ctx,
+ next_to_use,
+ skb->len);
+ if (rc)
goto error_unmap_dma;
- }
netdev_tx_sent_queue(txq, skb->len);
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.cnt++;
- tx_ring->tx_stats.bytes += skb->len;
- u64_stats_update_end(&tx_ring->syncp);
-
- tx_info->tx_descs = nb_hw_desc;
- tx_info->last_jiffies = jiffies;
- tx_info->print_once = 0;
-
- tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
- tx_ring->ring_size);
-
/* stop the queue when no more space available, the packet can have up
* to sgl_size + 2. one for the meta descriptor and one for header
* (if the header is larger than tx_max_header_size).
@@ -2389,7 +3046,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
error_unmap_dma:
- ena_unmap_tx_skb(tx_ring, tx_info);
+ ena_unmap_tx_buff(tx_ring, tx_info);
tx_info->skb = NULL;
error_drop_packet:
@@ -2568,6 +3225,7 @@ static const struct net_device_ops ena_netdev_ops = {
.ndo_change_mtu = ena_change_mtu,
.ndo_set_mac_address = NULL,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_bpf = ena_xdp,
};
static int ena_device_validate_params(struct ena_adapter *adapter,
@@ -2947,7 +3605,9 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
struct ena_ring *tx_ring;
struct ena_ring *rx_ring;
int i, budget, rc;
+ int io_queue_count;
+ io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues;
/* Make sure the driver doesn't turn the device in other process */
smp_rmb();
@@ -2962,7 +3622,7 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
budget = ENA_MONITORED_TX_QUEUES;
- for (i = adapter->last_monitored_tx_qid; i < adapter->num_io_queues; i++) {
+ for (i = adapter->last_monitored_tx_qid; i < io_queue_count; i++) {
tx_ring = &adapter->tx_ring[i];
rx_ring = &adapter->rx_ring[i];
@@ -2970,7 +3630,8 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
if (unlikely(rc))
return;
- rc = check_for_rx_interrupt_queue(adapter, rx_ring);
+ rc = !ENA_IS_XDP_INDEX(adapter, i) ?
+ check_for_rx_interrupt_queue(adapter, rx_ring) : 0;
if (unlikely(rc))
return;
@@ -2979,7 +3640,7 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
break;
}
- adapter->last_monitored_tx_qid = i % adapter->num_io_queues;
+ adapter->last_monitored_tx_qid = i % io_queue_count;
}
/* trigger napi schedule after 2 consecutive detections */
@@ -3045,8 +3706,8 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter)
if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
return;
- keep_alive_expired = round_jiffies(adapter->last_keep_alive_jiffies +
- adapter->keep_alive_timeout);
+ keep_alive_expired = adapter->last_keep_alive_jiffies +
+ adapter->keep_alive_timeout;
if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
netif_err(adapter, drv, adapter->netdev,
"Keep alive watchdog timeout.\n");
@@ -3148,7 +3809,7 @@ static void ena_timer_service(struct timer_list *t)
}
/* Reset the timer */
- mod_timer(&adapter->timer_service, jiffies + HZ);
+ mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
}
static int ena_calc_max_io_queue_num(struct pci_dev *pdev,
@@ -3556,6 +4217,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->num_io_queues = max_num_io_queues;
adapter->max_num_io_queues = max_num_io_queues;
+ adapter->xdp_first_ring = 0;
+ adapter->xdp_num_queues = 0;
+
adapter->last_monitored_tx_qid = 0;
adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK;
@@ -3569,7 +4233,10 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
"Failed to query interrupt moderation feature\n");
goto err_netdev_destroy;
}
- ena_init_io_rings(adapter);
+ ena_init_io_rings(adapter,
+ 0,
+ adapter->xdp_num_queues +
+ adapter->num_io_queues);
netdev->netdev_ops = &ena_netdev_ops;
netdev->watchdog_timeo = TX_TIMEOUT;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index bffd778f2ce3..8795e0b1dc3c 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -36,6 +36,7 @@
#include <linux/bitops.h>
#include <linux/dim.h>
#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
#include <linux/inetdevice.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
@@ -129,6 +130,8 @@
#define ENA_IO_TXQ_IDX(q) (2 * (q))
#define ENA_IO_RXQ_IDX(q) (2 * (q) + 1)
+#define ENA_IO_TXQ_IDX_TO_COMBINED_IDX(q) ((q) / 2)
+#define ENA_IO_RXQ_IDX_TO_COMBINED_IDX(q) (((q) - 1) / 2)
#define ENA_MGMNT_IRQ_IDX 0
#define ENA_IO_IRQ_FIRST_IDX 1
@@ -142,6 +145,18 @@
#define ENA_MMIO_DISABLE_REG_READ BIT(0)
+/* The max MTU size is configured to be the ethernet frame size without
+ * the overhead of the ethernet header, which can have a VLAN header, and
+ * a frame check sequence (FCS).
+ * The buffer size we share with the device is defined to be ENA_PAGE_SIZE
+ */
+
+#define ENA_XDP_MAX_MTU (ENA_PAGE_SIZE - ETH_HLEN - ETH_FCS_LEN - \
+ VLAN_HLEN - XDP_PACKET_HEADROOM)
+
+#define ENA_IS_XDP_INDEX(adapter, index) (((index) >= (adapter)->xdp_first_ring) && \
+ ((index) < (adapter)->xdp_first_ring + (adapter)->xdp_num_queues))
+
struct ena_irq {
irq_handler_t handler;
void *data;
@@ -155,6 +170,8 @@ struct ena_napi {
struct napi_struct napi ____cacheline_aligned;
struct ena_ring *tx_ring;
struct ena_ring *rx_ring;
+ struct ena_ring *xdp_ring;
+ bool first_interrupt;
u32 qid;
struct dim dim;
};
@@ -180,6 +197,17 @@ struct ena_tx_buffer {
/* num of buffers used by this skb */
u32 num_of_bufs;
+ /* XDP buffer structure which is used for sending packets in
+ * the xdp queues
+ */
+ struct xdp_frame *xdpf;
+ /* The rx page for the rx buffer that was received in rx and
+ * re transmitted on xdp tx queues as a result of XDP_TX action.
+ * We need to free the page once we finished cleaning the buffer in
+ * clean_xdp_irq()
+ */
+ struct page *xdp_rx_page;
+
/* Indicate if bufs[0] map the linear data of the skb. */
u8 map_linear_data;
@@ -258,10 +286,13 @@ struct ena_ring {
struct ena_adapter *adapter;
struct ena_com_io_cq *ena_com_io_cq;
struct ena_com_io_sq *ena_com_io_sq;
+ struct bpf_prog *xdp_bpf_prog;
+ struct xdp_rxq_info xdp_rxq;
u16 next_to_use;
u16 next_to_clean;
u16 rx_copybreak;
+ u16 rx_headroom;
u16 qid;
u16 mtu;
u16 sgl_size;
@@ -379,6 +410,10 @@ struct ena_adapter {
u32 last_monitored_tx_qid;
enum ena_regs_reset_reason_types reset_reason;
+
+ struct bpf_prog *xdp_bpf_prog;
+ u32 xdp_first_ring;
+ u32 xdp_num_queues;
};
void ena_set_ethtool_ops(struct net_device *netdev);
@@ -390,8 +425,48 @@ void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
int ena_update_queue_sizes(struct ena_adapter *adapter,
u32 new_tx_size,
u32 new_rx_size);
+
int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count);
int ena_get_sset_count(struct net_device *netdev, int sset);
+enum ena_xdp_errors_t {
+ ENA_XDP_ALLOWED = 0,
+ ENA_XDP_CURRENT_MTU_TOO_LARGE,
+ ENA_XDP_NO_ENOUGH_QUEUES,
+};
+
+static inline bool ena_xdp_queues_present(struct ena_adapter *adapter)
+{
+ return adapter->xdp_first_ring != 0;
+}
+
+static inline bool ena_xdp_present(struct ena_adapter *adapter)
+{
+ return !!adapter->xdp_bpf_prog;
+}
+
+static inline bool ena_xdp_present_ring(struct ena_ring *ring)
+{
+ return !!ring->xdp_bpf_prog;
+}
+
+static inline int ena_xdp_legal_queue_count(struct ena_adapter *adapter,
+ u32 queues)
+{
+ return 2 * queues <= adapter->max_num_io_queues;
+}
+
+static inline enum ena_xdp_errors_t ena_xdp_allowed(struct ena_adapter *adapter)
+{
+ enum ena_xdp_errors_t rc = ENA_XDP_ALLOWED;
+
+ if (adapter->netdev->mtu > ENA_XDP_MAX_MTU)
+ rc = ENA_XDP_CURRENT_MTU_TOO_LARGE;
+ else if (!ena_xdp_legal_queue_count(adapter, adapter->num_io_queues))
+ rc = ENA_XDP_NO_ENOUGH_QUEUES;
+
+ return rc;
+}
+
#endif /* !(ENA_H) */
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index ab30761003da..cf3562e82ca9 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -527,7 +527,7 @@ int lance_close(struct net_device *dev)
}
EXPORT_SYMBOL_GPL(lance_close);
-void lance_tx_timeout(struct net_device *dev)
+void lance_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
printk("lance_tx_timeout\n");
lance_reset(dev);
diff --git a/drivers/net/ethernet/amd/7990.h b/drivers/net/ethernet/amd/7990.h
index 741cdc392c6b..8266b3c1fefc 100644
--- a/drivers/net/ethernet/amd/7990.h
+++ b/drivers/net/ethernet/amd/7990.h
@@ -243,7 +243,7 @@ int lance_open(struct net_device *dev);
int lance_close(struct net_device *dev);
int lance_start_xmit(struct sk_buff *skb, struct net_device *dev);
void lance_set_multicast(struct net_device *dev);
-void lance_tx_timeout(struct net_device *dev);
+void lance_tx_timeout(struct net_device *dev, unsigned int txqueue);
#ifdef CONFIG_NET_POLL_CONTROLLER
void lance_poll(struct net_device *dev);
#endif
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index 212fe72a190b..2f808dbc8b0e 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -118,10 +118,6 @@ struct lance_private {
int auto_select; /* cable-selection by carrier */
unsigned short busmaster_regval;
-#ifdef CONFIG_SUNLANCE
- struct Linux_SBus_DMA *ledma; /* if set this points to ledma and arch=4m */
- int burst_sizes; /* ledma SBus burst sizes */
-#endif
struct timer_list multicast_timer;
struct net_device *dev;
};
@@ -522,7 +518,7 @@ static inline int lance_reset(struct net_device *dev)
return status;
}
-static void lance_tx_timeout(struct net_device *dev)
+static void lance_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_regs *ll = lp->ll;
@@ -551,11 +547,10 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
if (!lance_tx_buffs_avail(lp))
goto out_free;
-#ifdef DEBUG
/* dump the packet */
- print_hex_dump(KERN_DEBUG, "skb->data: ", DUMP_PREFIX_NONE,
- 16, 1, skb->data, 64, true);
-#endif
+ print_hex_dump_debug("skb->data: ", DUMP_PREFIX_NONE, 16, 1, skb->data,
+ 64, true);
+
entry = lp->tx_new & lp->tx_ring_mod_mask;
ib->btx_ring[entry].length = (-skblen) | 0xf000;
ib->btx_ring[entry].misc = 0;
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
index 0842da492a64..1c53408f5d47 100644
--- a/drivers/net/ethernet/amd/am79c961a.c
+++ b/drivers/net/ethernet/amd/am79c961a.c
@@ -422,7 +422,7 @@ static void am79c961_setmulticastlist (struct net_device *dev)
spin_unlock_irqrestore(&priv->chip_lock, flags);
}
-static void am79c961_timeout(struct net_device *dev)
+static void am79c961_timeout(struct net_device *dev, unsigned int txqueue)
{
printk(KERN_WARNING "%s: transmit timed out, network cable problem?\n",
dev->name);
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 573e88fc8ede..0f3b743425e8 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1569,7 +1569,7 @@ static int amd8111e_enable_link_change(struct amd8111e_priv *lp)
* failed or the interface is locked up. This function will reinitialize
* the hardware.
*/
-static void amd8111e_tx_timeout(struct net_device *dev)
+static void amd8111e_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct amd8111e_priv *lp = netdev_priv(dev);
int err;
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index 4b6a5cb85dd2..5e0f645f5bde 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -530,7 +530,7 @@ static inline void ariadne_reset(struct net_device *dev)
netif_start_queue(dev);
}
-static void ariadne_tx_timeout(struct net_device *dev)
+static void ariadne_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index d3d44e07afbc..4e36122609a3 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -346,7 +346,7 @@ static int lance_rx( struct net_device *dev );
static int lance_close( struct net_device *dev );
static void set_multicast_list( struct net_device *dev );
static int lance_set_mac_address( struct net_device *dev, void *addr );
-static void lance_tx_timeout (struct net_device *dev);
+static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue);
/************************* End of Prototypes **************************/
@@ -727,7 +727,7 @@ static void lance_init_ring( struct net_device *dev )
/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
-static void lance_tx_timeout (struct net_device *dev)
+static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue)
{
struct lance_private *lp = netdev_priv(dev);
struct lance_ioreg *IO = lp->iobase;
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 1793950f0582..089a4fbc61a0 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1014,7 +1014,7 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
* The Tx ring has been full longer than the watchdog timeout
* value. The transmitter must be hung?
*/
-static void au1000_tx_timeout(struct net_device *dev)
+static void au1000_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
netdev_err(dev, "au1000_tx_timeout: dev=%p\n", dev);
au1000_reset_mac(dev);
@@ -1053,23 +1053,12 @@ static void au1000_multicast_list(struct net_device *dev)
writel(reg, &aup->mac->control);
}
-static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- if (!netif_running(dev))
- return -EINVAL;
-
- if (!dev->phydev)
- return -EINVAL; /* PHY not controllable */
-
- return phy_mii_ioctl(dev->phydev, rq, cmd);
-}
-
static const struct net_device_ops au1000_netdev_ops = {
.ndo_open = au1000_open,
.ndo_stop = au1000_close,
.ndo_start_xmit = au1000_tx,
.ndo_set_rx_mode = au1000_multicast_list,
- .ndo_do_ioctl = au1000_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_tx_timeout = au1000_tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
@@ -1161,7 +1150,7 @@ static int au1000_probe(struct platform_device *pdev)
/* aup->mac is the base address of the MAC's registers */
aup->mac = (struct mac_reg *)
- ioremap_nocache(base->start, resource_size(base));
+ ioremap(base->start, resource_size(base));
if (!aup->mac) {
dev_err(&pdev->dev, "failed to ioremap MAC registers\n");
err = -ENXIO;
@@ -1169,7 +1158,7 @@ static int au1000_probe(struct platform_device *pdev)
}
/* Setup some variables for quick register address access */
- aup->enable = (u32 *)ioremap_nocache(macen->start,
+ aup->enable = (u32 *)ioremap(macen->start,
resource_size(macen));
if (!aup->enable) {
dev_err(&pdev->dev, "failed to ioremap MAC enable register\n");
@@ -1178,7 +1167,7 @@ static int au1000_probe(struct platform_device *pdev)
}
aup->mac_id = pdev->id;
- aup->macdma = ioremap_nocache(macdma->start, resource_size(macdma));
+ aup->macdma = ioremap(macdma->start, resource_size(macdma));
if (!aup->macdma) {
dev_err(&pdev->dev, "failed to ioremap MACDMA registers\n");
err = -ENXIO;
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index dac4a2fcad6a..7282ce55ffb8 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -608,7 +608,7 @@ static int lance_rx(struct net_device *dev)
len = (*rds_ptr(rd, mblength, lp->type) & 0xfff) - 4;
skb = netdev_alloc_skb(dev, len + 2);
- if (skb == 0) {
+ if (!skb) {
dev->stats.rx_dropped++;
*rds_ptr(rd, mblength, lp->type) = 0;
*rds_ptr(rd, rmd1, lp->type) =
@@ -884,7 +884,7 @@ static inline int lance_reset(struct net_device *dev)
return status;
}
-static void lance_tx_timeout(struct net_device *dev)
+static void lance_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_regs *ll = lp->ll;
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index f90b454b1642..aff44241988c 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -306,7 +306,7 @@ static irqreturn_t lance_interrupt(int irq, void *dev_id);
static int lance_close(struct net_device *dev);
static struct net_device_stats *lance_get_stats(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
-static void lance_tx_timeout (struct net_device *dev);
+static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue);
@@ -913,7 +913,7 @@ lance_restart(struct net_device *dev, unsigned int csr0_bits, int must_reinit)
}
-static void lance_tx_timeout (struct net_device *dev)
+static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue)
{
struct lance_private *lp = (struct lance_private *) dev->ml_priv;
int ioaddr = dev->base_addr;
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
index c6c2a54c1121..c38edf6f03a3 100644
--- a/drivers/net/ethernet/amd/ni65.c
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -254,7 +254,7 @@ static int ni65_lance_reinit(struct net_device *dev);
static void ni65_init_lance(struct priv *p,unsigned char*,int,int);
static netdev_tx_t ni65_send_packet(struct sk_buff *skb,
struct net_device *dev);
-static void ni65_timeout(struct net_device *dev);
+static void ni65_timeout(struct net_device *dev, unsigned int txqueue);
static int ni65_close(struct net_device *dev);
static int ni65_alloc_buffer(struct net_device *dev);
static void ni65_free_buffer(struct priv *p);
@@ -1133,7 +1133,7 @@ static void ni65_recv_intr(struct net_device *dev,int csr0)
* kick xmitter ..
*/
-static void ni65_timeout(struct net_device *dev)
+static void ni65_timeout(struct net_device *dev, unsigned int txqueue)
{
int i;
struct priv *p = dev->ml_priv;
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 9c152d85840d..023aecf6ab30 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -407,7 +407,7 @@ static int mace_open(struct net_device *dev);
static int mace_close(struct net_device *dev);
static netdev_tx_t mace_start_xmit(struct sk_buff *skb,
struct net_device *dev);
-static void mace_tx_timeout(struct net_device *dev);
+static void mace_tx_timeout(struct net_device *dev, unsigned int txqueue);
static irqreturn_t mace_interrupt(int irq, void *dev_id);
static struct net_device_stats *mace_get_stats(struct net_device *dev);
static int mace_rx(struct net_device *dev, unsigned char RxCnt);
@@ -837,7 +837,7 @@ mace_start_xmit
failed, put skb back into a list."
---------------------------------------------------------------------------- */
-static void mace_tx_timeout(struct net_device *dev)
+static void mace_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
mace_private *lp = netdev_priv(dev);
struct pcmcia_device *link = lp->p_dev;
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index f5ad12c10934..dc7d88227e76 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -314,7 +314,7 @@ static int pcnet32_open(struct net_device *);
static int pcnet32_init_ring(struct net_device *);
static netdev_tx_t pcnet32_start_xmit(struct sk_buff *,
struct net_device *);
-static void pcnet32_tx_timeout(struct net_device *dev);
+static void pcnet32_tx_timeout(struct net_device *dev, unsigned int txqueue);
static irqreturn_t pcnet32_interrupt(int, void *);
static int pcnet32_close(struct net_device *);
static struct net_device_stats *pcnet32_get_stats(struct net_device *);
@@ -2455,7 +2455,7 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
lp->a->write_csr(ioaddr, CSR0, csr0_bits);
}
-static void pcnet32_tx_timeout(struct net_device *dev)
+static void pcnet32_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct pcnet32_private *lp = netdev_priv(dev);
unsigned long ioaddr = dev->base_addr, flags;
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index ebcbf8ca4829..b00e00881253 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -1097,7 +1097,7 @@ static void lance_piozero(void __iomem *dest, int len)
sbus_writeb(0, piobuf);
}
-static void lance_tx_timeout(struct net_device *dev)
+static void lance_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct lance_private *lp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 98f8f2033154..b71f9b04a51e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -2152,7 +2152,7 @@ static int xgbe_change_mtu(struct net_device *netdev, int mtu)
return 0;
}
-static void xgbe_tx_timeout(struct net_device *netdev)
+static void xgbe_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 128cd648ba99..46c3c1ca38d6 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -1227,7 +1227,7 @@ static bool xgbe_phy_sfp_verify_eeprom(u8 cc_in, u8 *buf, unsigned int len)
for (cc = 0; len; buf++, len--)
cc += *buf;
- return (cc == cc_in) ? true : false;
+ return cc == cc_in;
}
static int xgbe_phy_sfp_read_eeprom(struct xgbe_prv_data *pdata)
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
index 02b4f3af02b5..c48f60996761 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.c
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -575,7 +575,7 @@ static void xge_free_pending_skb(struct net_device *ndev)
}
}
-static void xge_timeout(struct net_device *ndev)
+static void xge_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct xge_pdata *pdata = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index d8612131c55e..6aee2f0fc0db 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -859,7 +859,7 @@ static int xgene_enet_napi(struct napi_struct *napi, const int budget)
return processed;
}
-static void xgene_enet_timeout(struct net_device *ndev)
+static void xgene_enet_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct netdev_queue *txq;
@@ -2020,7 +2020,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
int ret;
ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
- XGENE_NUM_RX_RING, XGENE_NUM_TX_RING);
+ XGENE_NUM_TX_RING, XGENE_NUM_RX_RING);
if (!ndev)
return -ENOMEM;
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index 8d03578d5e8c..95d3061c61be 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -91,7 +91,7 @@ static int mace_set_address(struct net_device *dev, void *addr);
static void mace_reset(struct net_device *dev);
static irqreturn_t mace_interrupt(int irq, void *dev_id);
static irqreturn_t mace_dma_intr(int irq, void *dev_id);
-static void mace_tx_timeout(struct net_device *dev);
+static void mace_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void __mace_set_address(struct net_device *dev, void *addr);
/*
@@ -600,7 +600,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void mace_tx_timeout(struct net_device *dev)
+static void mace_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mace_data *mp = netdev_priv(dev);
volatile struct mace *mb = mp->mace;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index a17a4da7bc15..c85e3e29012c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -403,6 +403,8 @@ int aq_nic_start(struct aq_nic_s *self)
if (err < 0)
goto err_exit;
+ aq_nic_set_loopback(self);
+
err = self->aq_hw_ops->hw_start(self->aq_hw);
if (err < 0)
goto err_exit;
@@ -413,8 +415,6 @@ int aq_nic_start(struct aq_nic_s *self)
INIT_WORK(&self->service_task, aq_nic_service_task);
- aq_nic_set_loopback(self);
-
timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
aq_nic_service_timer_cb(&self->service_timer);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 2bb329606794..6b27af0db499 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -253,7 +253,7 @@ static int aq_pci_probe(struct pci_dev *pdev,
goto err_free_aq_hw;
}
- self->aq_hw->mmio = ioremap_nocache(mmio_pa, reg_sz);
+ self->aq_hw->mmio = ioremap(mmio_pa, reg_sz);
if (!self->aq_hw->mmio) {
err = -EIO;
goto err_free_aq_hw;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 58e891af6e09..ec041f78d063 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -1525,9 +1525,6 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
.rx_extract_ts = hw_atl_b0_rx_extract_ts,
.extract_hwts = hw_atl_b0_extract_hwts,
.hw_set_offload = hw_atl_b0_hw_offload_set,
- .hw_get_hw_stats = hw_atl_utils_get_hw_stats,
- .hw_get_fw_version = hw_atl_utils_get_fw_version,
- .hw_set_offload = hw_atl_b0_hw_offload_set,
.hw_set_loopback = hw_atl_b0_set_loopback,
.hw_set_fc = hw_atl_b0_set_fc,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 8910b62e67ed..f547baa6c954 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -667,9 +667,7 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
u32 speed;
mpi_state = hw_atl_utils_mpi_get_state(self);
- speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G |
- FW2X_RATE_2G5 | FW2X_RATE_5G |
- FW2X_RATE_10G);
+ speed = mpi_state >> HW_ATL_MPI_SPEED_SHIFT;
if (!speed) {
link_status->mbps = 0U;
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 6f2c867785fe..17bda4e8cc45 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -781,18 +781,6 @@ static int arc_emac_set_address(struct net_device *ndev, void *p)
return 0;
}
-static int arc_emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- if (!netif_running(dev))
- return -EINVAL;
-
- if (!dev->phydev)
- return -ENODEV;
-
- return phy_mii_ioctl(dev->phydev, rq, cmd);
-}
-
-
/**
* arc_emac_restart - Restart EMAC
* @ndev: Pointer to net_device structure.
@@ -857,7 +845,7 @@ static const struct net_device_ops arc_emac_netdev_ops = {
.ndo_set_mac_address = arc_emac_set_address,
.ndo_get_stats = arc_emac_stats,
.ndo_set_rx_mode = arc_emac_set_rx_mode,
- .ndo_do_ioctl = arc_emac_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = arc_emac_poll_controller,
#endif
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 8f5021091eee..e95687a780fb 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -313,7 +313,7 @@ struct ag71xx {
struct ag71xx_desc *stop_desc;
dma_addr_t stop_desc_dma;
- int phy_if_mode;
+ phy_interface_t phy_if_mode;
struct delayed_work restart_work;
struct timer_list oom_timer;
@@ -1394,14 +1394,6 @@ err_drop:
return NETDEV_TX_OK;
}
-static int ag71xx_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
-{
- if (!ndev->phydev)
- return -EINVAL;
-
- return phy_mii_ioctl(ndev->phydev, ifr, cmd);
-}
-
static void ag71xx_oom_timer_handler(struct timer_list *t)
{
struct ag71xx *ag = from_timer(ag, t, oom_timer);
@@ -1409,7 +1401,7 @@ static void ag71xx_oom_timer_handler(struct timer_list *t)
napi_schedule(&ag->napi);
}
-static void ag71xx_tx_timeout(struct net_device *ndev)
+static void ag71xx_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct ag71xx *ag = netdev_priv(ndev);
@@ -1618,7 +1610,7 @@ static const struct net_device_ops ag71xx_netdev_ops = {
.ndo_open = ag71xx_open,
.ndo_stop = ag71xx_stop,
.ndo_start_xmit = ag71xx_hard_start_xmit,
- .ndo_do_ioctl = ag71xx_do_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl,
.ndo_tx_timeout = ag71xx_tx_timeout,
.ndo_change_mtu = ag71xx_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
@@ -1687,8 +1679,7 @@ static int ag71xx_probe(struct platform_device *pdev)
goto err_free;
}
- ag->mac_base = devm_ioremap_nocache(&pdev->dev, res->start,
- resource_size(res));
+ ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!ag->mac_base) {
err = -ENOMEM;
goto err_free;
@@ -1744,7 +1735,7 @@ static int ag71xx_probe(struct platform_device *pdev)
eth_random_addr(ndev->dev_addr);
}
- err = of_get_phy_mode(np, ag->phy_if_mode);
+ err = of_get_phy_mode(np, &ag->phy_if_mode);
if (err) {
netif_err(ag, probe, ndev, "missing phy-mode property in DT\n");
goto err_free;
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index d4bbcdfd691a..1dcbc486eca9 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1553,7 +1553,7 @@ static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
return alx_start_xmit_ring(skb, alx_tx_queue_mapping(alx, skb));
}
-static void alx_tx_timeout(struct net_device *dev)
+static void alx_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct alx_priv *alx = netdev_priv(dev);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 2b239ecea05f..4c0b1f8551dd 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -350,7 +350,7 @@ static void atl1c_del_timer(struct atl1c_adapter *adapter)
* atl1c_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
*/
-static void atl1c_tx_timeout(struct net_device *netdev)
+static void atl1c_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 4f7b65825c15..e0d89942d537 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -251,7 +251,7 @@ static void atl1e_cancel_work(struct atl1e_adapter *adapter)
* atl1e_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
*/
-static void atl1e_tx_timeout(struct net_device *netdev)
+static void atl1e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 3aba38322717..b81a4e0c5b57 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1001,7 +1001,7 @@ static int atl2_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
* atl2_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
*/
-static void atl2_tx_timeout(struct net_device *netdev)
+static void atl2_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
index 505a22c703f7..0941d07d0833 100644
--- a/drivers/net/ethernet/atheros/atlx/atlx.c
+++ b/drivers/net/ethernet/atheros/atlx/atlx.c
@@ -183,7 +183,7 @@ static void atlx_clear_phy_int(struct atlx_adapter *adapter)
* atlx_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
*/
-static void atlx_tx_timeout(struct net_device *netdev)
+static void atlx_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct atlx_adapter *adapter = netdev_priv(netdev);
/* Do the reset outside of interrupt context */
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index 30b455013bf3..bc273e0db7ff 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -1005,18 +1005,13 @@ static int nb8800_stop(struct net_device *dev)
return 0;
}
-static int nb8800_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- return phy_mii_ioctl(dev->phydev, rq, cmd);
-}
-
static const struct net_device_ops nb8800_netdev_ops = {
.ndo_open = nb8800_open,
.ndo_stop = nb8800_stop,
.ndo_start_xmit = nb8800_xmit,
.ndo_set_mac_address = nb8800_set_mac_address,
.ndo_set_rx_mode = nb8800_set_rx_mode,
- .ndo_do_ioctl = nb8800_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 035dbb1b2c98..a780b7215021 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -948,7 +948,7 @@ irq_ack:
return IRQ_RETVAL(handled);
}
-static void b44_tx_timeout(struct net_device *dev)
+static void b44_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct b44 *bp = netdev_priv(dev);
@@ -1516,8 +1516,10 @@ static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
int ethaddr_bytes = ETH_ALEN;
memset(ppattern + offset, 0xff, magicsync);
- for (j = 0; j < magicsync; j++)
- set_bit(len++, (unsigned long *) pmask);
+ for (j = 0; j < magicsync; j++) {
+ pmask[len >> 3] |= BIT(len & 7);
+ len++;
+ }
for (j = 0; j < B44_MAX_PATTERNS; j++) {
if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
@@ -1529,7 +1531,8 @@ static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
for (k = 0; k< ethaddr_bytes; k++) {
ppattern[offset + magicsync +
(j * ETH_ALEN) + k] = macaddr[k];
- set_bit(len++, (unsigned long *) pmask);
+ pmask[len >> 3] |= BIT(len & 7);
+ len++;
}
}
return len - 1;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 825af709708e..e0611cba87f9 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1354,7 +1354,7 @@ out:
return ret;
}
-static void bcm_sysport_tx_timeout(struct net_device *dev)
+static void bcm_sysport_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
netdev_warn(dev, "transmit timeout!\n");
@@ -2323,7 +2323,7 @@ static int bcm_sysport_map_queues(struct notifier_block *nb,
ring->switch_queue = qp;
ring->switch_port = port;
ring->inspect = true;
- priv->ring_map[q + port * num_tx_queues] = ring;
+ priv->ring_map[qp + port * num_tx_queues] = ring;
qp++;
}
@@ -2338,7 +2338,7 @@ static int bcm_sysport_unmap_queues(struct notifier_block *nb,
struct net_device *slave_dev;
unsigned int num_tx_queues;
struct net_device *dev;
- unsigned int q, port;
+ unsigned int q, qp, port;
priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
if (priv->netdev != info->master)
@@ -2364,7 +2364,8 @@ static int bcm_sysport_unmap_queues(struct notifier_block *nb,
continue;
ring->inspect = false;
- priv->ring_map[q + port * num_tx_queues] = NULL;
+ qp = ring->switch_queue;
+ priv->ring_map[qp + port * num_tx_queues] = NULL;
}
return 0;
@@ -2427,6 +2428,14 @@ static int bcm_sysport_probe(struct platform_device *pdev)
if (!of_id || !of_id->data)
return -EINVAL;
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
+ if (ret)
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "unable to set DMA mask: %d\n", ret);
+ return ret;
+ }
+
/* Fairly quickly we need to know the type of adapter we have */
params = of_id->data;
@@ -2727,6 +2736,9 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
umac_reset(priv);
+ /* Disable the UniMAC RX/TX */
+ umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
+
/* We may have been suspended and never received a WOL event that
* would turn off MPD detection, take care of that now
*/
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 148734b166f0..1bb07a5d82c9 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1248,14 +1248,6 @@ static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
return 0;
}
-static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
-{
- if (!netif_running(net_dev))
- return -EINVAL;
-
- return phy_mii_ioctl(net_dev->phydev, ifr, cmd);
-}
-
static const struct net_device_ops bgmac_netdev_ops = {
.ndo_open = bgmac_open,
.ndo_stop = bgmac_stop,
@@ -1263,7 +1255,7 @@ static const struct net_device_ops bgmac_netdev_ops = {
.ndo_set_rx_mode = bgmac_set_rx_mode,
.ndo_set_mac_address = bgmac_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = bgmac_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
};
/**************************************************
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index fbc196b480b6..dbb7874607ca 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -6575,7 +6575,7 @@ bnx2_dump_state(struct bnx2 *bp)
}
static void
-bnx2_tx_timeout(struct net_device *dev)
+bnx2_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct bnx2 *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 5e037a305b83..ee9e9290f112 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -4970,7 +4970,7 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
return 0;
}
-void bnx2x_tx_timeout(struct net_device *dev)
+void bnx2x_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct bnx2x *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 8b08cb18e363..6f1352d51cb2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -617,7 +617,7 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features);
*
* @dev: net device
*/
-void bnx2x_tx_timeout(struct net_device *dev);
+void bnx2x_tx_timeout(struct net_device *dev, unsigned int txqueue);
/** bnx2x_get_c2s_mapping - read inner-to-outer vlan configuration
* c2s_map should have BNX2X_MAX_PRIORITY entries.
@@ -1109,7 +1109,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
for (i = 0; i < E1H_FUNC_MAX / 2; i++) {
u32 func_config =
MF_CFG_RD(bp,
- func_mf_config[BP_PORT(bp) + 2 * i].
+ func_mf_config[BP_PATH(bp) + 2 * i].
config);
func_num +=
((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index 066765fbef06..0a59a09ef82f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -296,7 +296,6 @@ static inline void bnx2x_dcb_config_qm(struct bnx2x *bp, enum cos_mode mode,
* possible, the driver should only write the valid vnics into the internal
* ram according to the appropriate port mode.
*/
-#define BITS_TO_BYTES(x) ((x)/8)
/* CMNG constants, as derived from system spec calculations */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 192ff8d5da32..1c26fa962233 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -9976,10 +9976,18 @@ static void bnx2x_recovery_failed(struct bnx2x *bp)
*/
static void bnx2x_parity_recover(struct bnx2x *bp)
{
- bool global = false;
u32 error_recovered, error_unrecovered;
- bool is_parity;
+ bool is_parity, global = false;
+#ifdef CONFIG_BNX2X_SRIOV
+ int vf_idx;
+
+ for (vf_idx = 0; vf_idx < bp->requested_nr_virtfn; vf_idx++) {
+ struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
+ if (vf)
+ vf->state = VF_LOST;
+ }
+#endif
DP(NETIF_MSG_HW, "Handling parity\n");
while (1) {
switch (bp->recovery_state) {
@@ -14045,7 +14053,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
rc = -ENOMEM;
goto init_one_freemem;
}
- bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
+ bp->doorbells = ioremap(pci_resource_start(pdev, 2),
doorbell_size);
}
if (!bp->doorbells) {
@@ -15402,6 +15410,7 @@ int bnx2x_configure_ptp_filters(struct bnx2x *bp)
REG_WR(bp, rule, BNX2X_PTP_TX_ON_RULE_MASK);
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
+ case HWTSTAMP_TX_ONESTEP_P2P:
BNX2X_ERR("One-step timestamping is not supported\n");
return -ERANGE;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 7a6e82db4231..bacc8552bce1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -1536,8 +1536,11 @@ void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_MAC_CREDIT_CNT) / \
func_num + GET_NUM_VFS_PER_PF(bp) * VF_MAC_CREDIT_CNT)
+#define BNX2X_VFS_VLAN_CREDIT(bp) \
+ (GET_NUM_VFS_PER_PATH(bp) * VF_VLAN_CREDIT_CNT)
+
#define PF_VLAN_CREDIT_E2(bp, func_num) \
- ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_VLAN_CREDIT_CNT) / \
+ ((MAX_VLAN_CREDIT_E2 - 1 - BNX2X_VFS_VLAN_CREDIT(bp)) / \
func_num + GET_NUM_VFS_PER_PF(bp) * VF_VLAN_CREDIT_CNT)
#endif /* BNX2X_SP_VERBS */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index b6ebd92ec565..3a716c015415 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -139,6 +139,7 @@ struct bnx2x_virtf {
#define VF_ACQUIRED 1 /* VF acquired, but not initialized */
#define VF_ENABLED 2 /* VF Enabled */
#define VF_RESET 3 /* VF FLR'd, pending cleanup */
+#define VF_LOST 4 /* Recovery while VFs are loaded */
bool flr_clnup_stage; /* true during flr cleanup */
bool malicious; /* true if FW indicated so, until FLR */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 0752b7fa4d9c..ea0e9394f898 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -2107,6 +2107,18 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
{
int i;
+ if (vf->state == VF_LOST) {
+ /* Just ack the FW and return if VFs are lost
+ * in case of parity error. VFs are supposed to be timedout
+ * on waiting for PF response.
+ */
+ DP(BNX2X_MSG_IOV,
+ "VF 0x%x lost, not handling the request\n", vf->abs_vfid);
+
+ storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
+ return;
+ }
+
/* check if tlv type is known */
if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) {
/* Lock the per vf op mutex and note the locker's identity.
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 85983f0e3134..597e6fd5bfea 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -944,6 +944,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
dma_addr -= bp->rx_dma_offset;
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
+ page_pool_release_page(rxr->page_pool, page);
if (unlikely(!payload))
payload = eth_get_headlen(bp->dev, data_ptr, len);
@@ -2001,6 +2002,9 @@ static int bnxt_async_event_process(struct bnxt *bp,
case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
u32 data1 = le32_to_cpu(cmpl->event_data1);
+ if (!bp->fw_health)
+ goto async_event_process_exit;
+
bp->fw_reset_timestamp = jiffies;
bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
if (!bp->fw_reset_min_dsecs)
@@ -4421,8 +4425,9 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
- flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE |
- FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
+ flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
+ if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
+ flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
@@ -6186,7 +6191,7 @@ static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
val = clamp_t(u16, tmr, 1,
coal_cap->cmpl_aggr_dma_tmr_during_int_max);
- req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr);
+ req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
req->enables |=
cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
}
@@ -6993,7 +6998,6 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
pf->fw_fid = le16_to_cpu(resp->fid);
pf->port_id = le16_to_cpu(resp->port_id);
- bp->dev->dev_port = pf->port_id;
memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
pf->max_vfs = le16_to_cpu(resp->max_vfs);
@@ -7115,14 +7119,6 @@ static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
goto err_recovery_out;
- if (!fw_health) {
- fw_health = kzalloc(sizeof(*fw_health), GFP_KERNEL);
- bp->fw_health = fw_health;
- if (!fw_health) {
- rc = -ENOMEM;
- goto err_recovery_out;
- }
- }
fw_health->flags = le32_to_cpu(resp->flags);
if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
!(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
@@ -7292,6 +7288,7 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
bp->chip_num = le16_to_cpu(resp->chip_num);
+ bp->chip_rev = resp->chip_rev;
if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
!resp->chip_metal)
bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
@@ -7896,7 +7893,7 @@ static void bnxt_setup_msix(struct bnxt *bp)
int tcs, i;
tcs = netdev_get_num_tc(dev);
- if (tcs > 1) {
+ if (tcs) {
int i, off, count;
for (i = 0; i < tcs; i++) {
@@ -8796,6 +8793,9 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
if (fw_reset) {
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
bnxt_ulp_stop(bp);
+ bnxt_free_ctx_mem(bp);
+ kfree(bp->ctx);
+ bp->ctx = NULL;
rc = bnxt_fw_init_one(bp);
if (rc) {
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
@@ -9064,7 +9064,7 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
/* The last close may have shutdown the link, so need to call
* PHY_CFG to bring it back up.
*/
- if (!netif_carrier_ok(bp->dev))
+ if (!bp->link_info.link_up)
update_link = true;
if (!bnxt_eee_config_ok(bp))
@@ -9241,6 +9241,17 @@ void bnxt_half_close_nic(struct bnxt *bp)
bnxt_free_mem(bp, false);
}
+static void bnxt_reenable_sriov(struct bnxt *bp)
+{
+ if (BNXT_PF(bp)) {
+ struct bnxt_pf_info *pf = &bp->pf;
+ int n = pf->active_vfs;
+
+ if (n)
+ bnxt_cfg_hw_sriov(bp, &n, true);
+ }
+}
+
static int bnxt_open(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
@@ -9259,15 +9270,10 @@ static int bnxt_open(struct net_device *dev)
bnxt_hwrm_if_change(bp, false);
} else {
if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
- if (BNXT_PF(bp)) {
- struct bnxt_pf_info *pf = &bp->pf;
- int n = pf->active_vfs;
-
- if (n)
- bnxt_cfg_hw_sriov(bp, &n, true);
- }
- if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
bnxt_ulp_start(bp, 0);
+ bnxt_reenable_sriov(bp);
+ }
}
bnxt_hwmon_open(bp);
}
@@ -9307,10 +9313,6 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
bnxt_debug_dev_exit(bp);
bnxt_disable_napi(bp);
del_timer_sync(&bp->timer);
- if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) &&
- pci_is_enabled(bp->pdev))
- pci_disable_device(bp->pdev);
-
bnxt_free_skbs(bp);
/* Save ring stats before shutdown */
@@ -9976,7 +9978,7 @@ static void bnxt_reset_task(struct bnxt *bp, bool silent)
}
}
-static void bnxt_tx_timeout(struct net_device *dev)
+static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct bnxt *bp = netdev_priv(dev);
@@ -9990,8 +9992,7 @@ static void bnxt_fw_health_check(struct bnxt *bp)
struct bnxt_fw_health *fw_health = bp->fw_health;
u32 val;
- if (!fw_health || !fw_health->enabled ||
- test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
return;
if (fw_health->tmr_counter) {
@@ -10042,6 +10043,13 @@ static void bnxt_timer(struct timer_list *t)
bnxt_queue_sp_work(bp);
}
+#ifdef CONFIG_RFS_ACCEL
+ if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) {
+ set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
+ bnxt_queue_sp_work(bp);
+ }
+#endif /*CONFIG_RFS_ACCEL*/
+
if (bp->link_info.phy_retry) {
if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
bp->link_info.phy_retry = false;
@@ -10052,7 +10060,8 @@ static void bnxt_timer(struct timer_list *t)
}
}
- if ((bp->flags & BNXT_FLAG_CHIP_P5) && netif_carrier_ok(dev)) {
+ if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
+ netif_carrier_ok(dev)) {
set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
bnxt_queue_sp_work(bp);
}
@@ -10089,9 +10098,16 @@ static void bnxt_reset(struct bnxt *bp, bool silent)
static void bnxt_fw_reset_close(struct bnxt *bp)
{
bnxt_ulp_stop(bp);
+ /* When firmware is fatal state, disable PCI device to prevent
+ * any potential bad DMAs before freeing kernel memory.
+ */
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ pci_disable_device(bp->pdev);
__bnxt_close_nic(bp, true, false);
bnxt_clear_int_mode(bp);
bnxt_hwrm_func_drv_unrgtr(bp);
+ if (pci_is_enabled(bp->pdev))
+ pci_disable_device(bp->pdev);
bnxt_free_ctx_mem(bp);
kfree(bp->ctx);
bp->ctx = NULL;
@@ -10482,6 +10498,23 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
}
+static void bnxt_alloc_fw_health(struct bnxt *bp)
+{
+ if (bp->fw_health)
+ return;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
+ !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
+ return;
+
+ bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
+ if (!bp->fw_health) {
+ netdev_warn(bp->dev, "Failed to allocate fw_health\n");
+ bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
+ bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
+ }
+}
+
static int bnxt_fw_init_one_p1(struct bnxt *bp)
{
int rc;
@@ -10528,6 +10561,7 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp)
netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
rc);
+ bnxt_alloc_fw_health(bp);
rc = bnxt_hwrm_error_recovery_qcfg(bp);
if (rc)
netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
@@ -10552,7 +10586,7 @@ static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
- if (BNXT_CHIP_P4(bp) && bp->hwrm_spec_code >= 0x10501) {
+ if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
@@ -10609,6 +10643,12 @@ static int bnxt_fw_init_one(struct bnxt *bp)
rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
if (rc)
return rc;
+
+ /* In case fw capabilities have changed, destroy the unneeded
+ * reporters and create newly capable ones.
+ */
+ bnxt_dl_fw_reporters_destroy(bp, false);
+ bnxt_dl_fw_reporters_create(bp);
bnxt_fw_init_one_p3(bp);
return 0;
}
@@ -10751,8 +10791,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
return;
case BNXT_FW_RESET_STATE_ENABLE_DEV:
- if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
- bp->fw_health) {
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
u32 val;
val = bnxt_fw_health_readl(bp,
@@ -10801,6 +10840,9 @@ static void bnxt_fw_reset_task(struct work_struct *work)
smp_mb__before_atomic();
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
bnxt_ulp_start(bp, rc);
+ if (!rc)
+ bnxt_reenable_sriov(bp);
+ bnxt_dl_health_recovery_done(bp);
bnxt_dl_health_status_update(bp, true);
rtnl_unlock();
break;
@@ -11044,11 +11086,23 @@ static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
struct flow_keys *keys1 = &f1->fkeys;
struct flow_keys *keys2 = &f2->fkeys;
- if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
- keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
- keys1->ports.ports == keys2->ports.ports &&
- keys1->basic.ip_proto == keys2->basic.ip_proto &&
- keys1->basic.n_proto == keys2->basic.n_proto &&
+ if (keys1->basic.n_proto != keys2->basic.n_proto ||
+ keys1->basic.ip_proto != keys2->basic.ip_proto)
+ return false;
+
+ if (keys1->basic.n_proto == htons(ETH_P_IP)) {
+ if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
+ keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
+ return false;
+ } else {
+ if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
+ sizeof(keys1->addrs.v6addrs.src)) ||
+ memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
+ sizeof(keys1->addrs.v6addrs.dst)))
+ return false;
+ }
+
+ if (keys1->ports.ports == keys2->ports.ports &&
keys1->control.flags == keys2->control.flags &&
ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
@@ -11066,6 +11120,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
int rc = 0, idx, bit_id, l2_idx = 0;
struct hlist_head *head;
+ u32 flags;
if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
@@ -11105,8 +11160,9 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
rc = -EPROTONOSUPPORT;
goto err_free;
}
- if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) &&
- bp->hwrm_spec_code < 0x10601) {
+ flags = fkeys->control.flags;
+ if (((flags & FLOW_DIS_ENCAPSULATION) &&
+ bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
rc = -EPROTONOSUPPORT;
goto err_free;
}
@@ -11340,11 +11396,11 @@ int bnxt_get_port_parent_id(struct net_device *dev,
return -EOPNOTSUPP;
/* The PF and it's VF-reps only support the switchdev framework */
- if (!BNXT_PF(bp))
+ if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
return -EOPNOTSUPP;
- ppid->id_len = sizeof(bp->switch_id);
- memcpy(ppid->id, bp->switch_id, ppid->id_len);
+ ppid->id_len = sizeof(bp->dsn);
+ memcpy(ppid->id, bp->dsn, ppid->id_len);
return 0;
}
@@ -11396,13 +11452,13 @@ static void bnxt_remove_one(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(dev);
- if (BNXT_PF(bp)) {
+ if (BNXT_PF(bp))
bnxt_sriov_disable(bp);
- bnxt_dl_unregister(bp);
- }
+ bnxt_dl_fw_reporters_destroy(bp, true);
pci_disable_pcie_error_reporting(pdev);
unregister_netdev(dev);
+ bnxt_dl_unregister(bp);
bnxt_shutdown_tc(bp);
bnxt_cancel_sp_work(bp);
bp->sp_event = 0;
@@ -11415,6 +11471,8 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_dcb_free(bp);
kfree(bp->edev);
bp->edev = NULL;
+ kfree(bp->fw_health);
+ bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
bnxt_free_ctx_mem(bp);
kfree(bp->ctx);
@@ -11434,6 +11492,9 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
rc);
return rc;
}
+ if (!fw_dflt)
+ return 0;
+
rc = bnxt_update_link(bp, false);
if (rc) {
netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
@@ -11447,9 +11508,6 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
link_info->support_auto_speeds = link_info->support_speeds;
- if (!fw_dflt)
- return 0;
-
bnxt_init_ethtool_link_settings(bp);
return 0;
}
@@ -11711,6 +11769,7 @@ static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
put_unaligned_le32(dw, &dsn[0]);
pci_read_config_dword(pdev, pos + 4, &dw);
put_unaligned_le32(dw, &dsn[4]);
+ bp->flags |= BNXT_FLAG_DSN_VALID;
return 0;
}
@@ -11822,9 +11881,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (BNXT_PF(bp)) {
/* Read the adapter's DSN to use as the eswitch switch_id */
- rc = bnxt_pcie_dsn_get(bp, bp->switch_id);
- if (rc)
- goto init_err_pci_clean;
+ rc = bnxt_pcie_dsn_get(bp, bp->dsn);
}
/* MTU range: 60 - FW defined max */
@@ -11871,12 +11928,15 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_init_tc(bp);
}
+ bnxt_dl_register(bp);
+
rc = register_netdev(dev);
if (rc)
- goto init_err_cleanup_tc;
+ goto init_err_cleanup;
if (BNXT_PF(bp))
- bnxt_dl_register(bp);
+ devlink_port_type_eth_set(&bp->dl_port, bp->dev);
+ bnxt_dl_fw_reporters_create(bp);
netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
board_info[ent->driver_data].name,
@@ -11885,7 +11945,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
-init_err_cleanup_tc:
+init_err_cleanup:
+ bnxt_dl_unregister(bp);
bnxt_shutdown_tc(bp);
bnxt_clear_int_mode(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 505af5cfb1bd..cabef0b4f5fb 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1457,6 +1457,8 @@ struct bnxt {
#define CHIP_NUM_58804 0xd804
#define CHIP_NUM_58808 0xd808
+ u8 chip_rev;
+
#define BNXT_CHIP_NUM_5730X(chip_num) \
((chip_num) >= CHIP_NUM_57301 && \
(chip_num) <= CHIP_NUM_57304)
@@ -1532,6 +1534,7 @@ struct bnxt {
#define BNXT_FLAG_NO_AGG_RINGS 0x20000
#define BNXT_FLAG_RX_PAGE_MODE 0x40000
#define BNXT_FLAG_MULTI_HOST 0x100000
+ #define BNXT_FLAG_DSN_VALID 0x200000
#define BNXT_FLAG_DOUBLE_DB 0x400000
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
#define BNXT_FLAG_DIM 0x2000000
@@ -1845,7 +1848,7 @@ struct bnxt {
enum devlink_eswitch_mode eswitch_mode;
struct bnxt_vf_rep **vf_reps; /* array of vf-rep ptrs */
u16 *cfa_code_map; /* cfa_code -> vf_idx map */
- u8 switch_id[8];
+ u8 dsn[8];
struct bnxt_tc_info *tc_info;
struct list_head tc_indr_block_list;
struct notifier_block tc_netdev_nb;
@@ -1936,9 +1939,6 @@ static inline bool bnxt_cfa_hwrm_message(u16 req_type)
case HWRM_CFA_ENCAP_RECORD_FREE:
case HWRM_CFA_DECAP_FILTER_ALLOC:
case HWRM_CFA_DECAP_FILTER_FREE:
- case HWRM_CFA_NTUPLE_FILTER_ALLOC:
- case HWRM_CFA_NTUPLE_FILTER_FREE:
- case HWRM_CFA_NTUPLE_FILTER_CFG:
case HWRM_CFA_EM_FLOW_ALLOC:
case HWRM_CFA_EM_FLOW_FREE:
case HWRM_CFA_EM_FLOW_CFG:
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index acb2dd64c023..eec0168330b7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -21,6 +21,7 @@ bnxt_dl_flash_update(struct devlink *dl, const char *filename,
const char *region, struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+ int rc;
if (region)
return -EOPNOTSUPP;
@@ -31,7 +32,18 @@ bnxt_dl_flash_update(struct devlink *dl, const char *filename,
return -EPERM;
}
- return bnxt_flash_package_from_file(bp->dev, filename, 0);
+ devlink_flash_update_begin_notify(dl);
+ devlink_flash_update_status_notify(dl, "Preparing to flash", region, 0,
+ 0);
+ rc = bnxt_flash_package_from_file(bp->dev, filename, 0);
+ if (!rc)
+ devlink_flash_update_status_notify(dl, "Flashing done", region,
+ 0, 0);
+ else
+ devlink_flash_update_status_notify(dl, "Flashing failed",
+ region, 0, 0);
+ devlink_flash_update_end_notify(dl);
+ return rc;
}
static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
@@ -39,11 +51,10 @@ static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
struct netlink_ext_ack *extack)
{
struct bnxt *bp = devlink_health_reporter_priv(reporter);
- struct bnxt_fw_health *health = bp->fw_health;
u32 val, health_status;
int rc;
- if (!health || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
return 0;
val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
@@ -90,7 +101,7 @@ static int bnxt_fw_reset_recover(struct devlink_health_reporter *reporter,
return -EOPNOTSUPP;
bnxt_fw_reset(bp);
- return 0;
+ return -EINPROGRESS;
}
static const
@@ -117,7 +128,7 @@ static int bnxt_fw_fatal_recover(struct devlink_health_reporter *reporter,
else if (event == BNXT_FW_EXCEPTION_SP_EVENT)
bnxt_fw_exception(bp);
- return 0;
+ return -EINPROGRESS;
}
static const
@@ -126,21 +137,15 @@ struct devlink_health_reporter_ops bnxt_dl_fw_fatal_reporter_ops = {
.recover = bnxt_fw_fatal_recover,
};
-static void bnxt_dl_fw_reporters_create(struct bnxt *bp)
+void bnxt_dl_fw_reporters_create(struct bnxt *bp)
{
struct bnxt_fw_health *health = bp->fw_health;
- if (!health)
+ if (!bp->dl || !health)
return;
- health->fw_reporter =
- devlink_health_reporter_create(bp->dl, &bnxt_dl_fw_reporter_ops,
- 0, false, bp);
- if (IS_ERR(health->fw_reporter)) {
- netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n",
- PTR_ERR(health->fw_reporter));
- health->fw_reporter = NULL;
- }
+ if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) || health->fw_reset_reporter)
+ goto err_recovery;
health->fw_reset_reporter =
devlink_health_reporter_create(bp->dl,
@@ -150,8 +155,30 @@ static void bnxt_dl_fw_reporters_create(struct bnxt *bp)
netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
PTR_ERR(health->fw_reset_reporter));
health->fw_reset_reporter = NULL;
+ bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
+ }
+
+err_recovery:
+ if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
+ return;
+
+ if (!health->fw_reporter) {
+ health->fw_reporter =
+ devlink_health_reporter_create(bp->dl,
+ &bnxt_dl_fw_reporter_ops,
+ 0, false, bp);
+ if (IS_ERR(health->fw_reporter)) {
+ netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n",
+ PTR_ERR(health->fw_reporter));
+ health->fw_reporter = NULL;
+ bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
+ return;
+ }
}
+ if (health->fw_fatal_reporter)
+ return;
+
health->fw_fatal_reporter =
devlink_health_reporter_create(bp->dl,
&bnxt_dl_fw_fatal_reporter_ops,
@@ -160,24 +187,35 @@ static void bnxt_dl_fw_reporters_create(struct bnxt *bp)
netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
PTR_ERR(health->fw_fatal_reporter));
health->fw_fatal_reporter = NULL;
+ bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
}
}
-static void bnxt_dl_fw_reporters_destroy(struct bnxt *bp)
+void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all)
{
struct bnxt_fw_health *health = bp->fw_health;
- if (!health)
+ if (!bp->dl || !health)
return;
- if (health->fw_reporter)
- devlink_health_reporter_destroy(health->fw_reporter);
-
- if (health->fw_reset_reporter)
+ if ((all || !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) &&
+ health->fw_reset_reporter) {
devlink_health_reporter_destroy(health->fw_reset_reporter);
+ health->fw_reset_reporter = NULL;
+ }
- if (health->fw_fatal_reporter)
+ if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && !all)
+ return;
+
+ if (health->fw_reporter) {
+ devlink_health_reporter_destroy(health->fw_reporter);
+ health->fw_reporter = NULL;
+ }
+
+ if (health->fw_fatal_reporter) {
devlink_health_reporter_destroy(health->fw_fatal_reporter);
+ health->fw_fatal_reporter = NULL;
+ }
}
void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event)
@@ -185,9 +223,6 @@ void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event)
struct bnxt_fw_health *fw_health = bp->fw_health;
struct bnxt_fw_reporter_ctx fw_reporter_ctx;
- if (!fw_health)
- return;
-
fw_reporter_ctx.sp_event = event;
switch (event) {
case BNXT_FW_RESET_NOTIFY_SP_EVENT:
@@ -239,14 +274,30 @@ void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy)
health->fatal = false;
}
+void bnxt_dl_health_recovery_done(struct bnxt *bp)
+{
+ struct bnxt_fw_health *hlth = bp->fw_health;
+
+ if (hlth->fatal)
+ devlink_health_reporter_recovery_done(hlth->fw_fatal_reporter);
+ else
+ devlink_health_reporter_recovery_done(hlth->fw_reset_reporter);
+}
+
+static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack);
+
static const struct devlink_ops bnxt_dl_ops = {
#ifdef CONFIG_BNXT_SRIOV
.eswitch_mode_set = bnxt_dl_eswitch_mode_set,
.eswitch_mode_get = bnxt_dl_eswitch_mode_get,
#endif /* CONFIG_BNXT_SRIOV */
+ .info_get = bnxt_dl_info_get,
.flash_update = bnxt_dl_flash_update,
};
+static const struct devlink_ops bnxt_vf_dl_ops;
+
enum bnxt_dl_param_id {
BNXT_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK,
@@ -308,6 +359,136 @@ static void bnxt_copy_from_nvm_data(union devlink_param_value *dst,
dst->vu8 = (u8)val32;
}
+static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp,
+ union devlink_param_value *nvm_cfg_ver)
+{
+ struct hwrm_nvm_get_variable_input req = {0};
+ union bnxt_nvm_data *data;
+ dma_addr_t data_dma_addr;
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1);
+ data = dma_alloc_coherent(&bp->pdev->dev, sizeof(*data),
+ &data_dma_addr, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ req.dest_data_addr = cpu_to_le64(data_dma_addr);
+ req.data_len = cpu_to_le16(BNXT_NVM_CFG_VER_BITS);
+ req.option_num = cpu_to_le16(NVM_OFF_NVM_CFG_VER);
+
+ rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ bnxt_copy_from_nvm_data(nvm_cfg_ver, data,
+ BNXT_NVM_CFG_VER_BITS,
+ BNXT_NVM_CFG_VER_BYTES);
+
+ dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr);
+ return rc;
+}
+
+static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+ union devlink_param_value nvm_cfg_ver;
+ struct hwrm_ver_get_output *ver_resp;
+ char mgmt_ver[FW_VER_STR_LEN];
+ char roce_ver[FW_VER_STR_LEN];
+ char fw_ver[FW_VER_STR_LEN];
+ char buf[32];
+ int rc;
+
+ rc = devlink_info_driver_name_put(req, DRV_MODULE_NAME);
+ if (rc)
+ return rc;
+
+ sprintf(buf, "%X", bp->chip_num);
+ rc = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_ID, buf);
+ if (rc)
+ return rc;
+
+ ver_resp = &bp->ver_resp;
+ sprintf(buf, "%X", ver_resp->chip_rev);
+ rc = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_REV, buf);
+ if (rc)
+ return rc;
+
+ if (BNXT_PF(bp)) {
+ sprintf(buf, "%02X-%02X-%02X-%02X-%02X-%02X-%02X-%02X",
+ bp->dsn[7], bp->dsn[6], bp->dsn[5], bp->dsn[4],
+ bp->dsn[3], bp->dsn[2], bp->dsn[1], bp->dsn[0]);
+ rc = devlink_info_serial_number_put(req, buf);
+ if (rc)
+ return rc;
+ }
+
+ if (strlen(ver_resp->active_pkg_name)) {
+ rc =
+ devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ ver_resp->active_pkg_name);
+ if (rc)
+ return rc;
+ }
+
+ if (BNXT_PF(bp) && !bnxt_hwrm_get_nvm_cfg_ver(bp, &nvm_cfg_ver)) {
+ u32 ver = nvm_cfg_ver.vu32;
+
+ sprintf(buf, "%X.%X.%X", (ver >> 16) & 0xF, (ver >> 8) & 0xF,
+ ver & 0xF);
+ rc = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_PSID, buf);
+ if (rc)
+ return rc;
+ }
+
+ if (ver_resp->flags & VER_GET_RESP_FLAGS_EXT_VER_AVAIL) {
+ snprintf(fw_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->hwrm_fw_major, ver_resp->hwrm_fw_minor,
+ ver_resp->hwrm_fw_build, ver_resp->hwrm_fw_patch);
+
+ snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->mgmt_fw_major, ver_resp->mgmt_fw_minor,
+ ver_resp->mgmt_fw_build, ver_resp->mgmt_fw_patch);
+
+ snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->roce_fw_major, ver_resp->roce_fw_minor,
+ ver_resp->roce_fw_build, ver_resp->roce_fw_patch);
+ } else {
+ snprintf(fw_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->hwrm_fw_maj_8b, ver_resp->hwrm_fw_min_8b,
+ ver_resp->hwrm_fw_bld_8b, ver_resp->hwrm_fw_rsvd_8b);
+
+ snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->mgmt_fw_maj_8b, ver_resp->mgmt_fw_min_8b,
+ ver_resp->mgmt_fw_bld_8b, ver_resp->mgmt_fw_rsvd_8b);
+
+ snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->roce_fw_maj_8b, ver_resp->roce_fw_min_8b,
+ ver_resp->roce_fw_bld_8b, ver_resp->roce_fw_rsvd_8b);
+ }
+ rc = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_APP, fw_ver);
+ if (rc)
+ return rc;
+
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
+ rc = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, mgmt_ver);
+ if (rc)
+ return rc;
+
+ rc = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
int msg_len, union devlink_param_value *val)
{
@@ -450,17 +631,53 @@ static const struct devlink_param bnxt_dl_params[] = {
static const struct devlink_param bnxt_dl_port_params[] = {
};
-int bnxt_dl_register(struct bnxt *bp)
+static int bnxt_dl_params_register(struct bnxt *bp)
{
- struct devlink *dl;
int rc;
- if (bp->hwrm_spec_code < 0x10600) {
- netdev_warn(bp->dev, "Firmware does not support NVM params");
- return -ENOTSUPP;
+ if (bp->hwrm_spec_code < 0x10600)
+ return 0;
+
+ rc = devlink_params_register(bp->dl, bnxt_dl_params,
+ ARRAY_SIZE(bnxt_dl_params));
+ if (rc) {
+ netdev_warn(bp->dev, "devlink_params_register failed. rc=%d",
+ rc);
+ return rc;
+ }
+ rc = devlink_port_params_register(&bp->dl_port, bnxt_dl_port_params,
+ ARRAY_SIZE(bnxt_dl_port_params));
+ if (rc) {
+ netdev_err(bp->dev, "devlink_port_params_register failed");
+ devlink_params_unregister(bp->dl, bnxt_dl_params,
+ ARRAY_SIZE(bnxt_dl_params));
+ return rc;
}
+ devlink_params_publish(bp->dl);
- dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl));
+ return 0;
+}
+
+static void bnxt_dl_params_unregister(struct bnxt *bp)
+{
+ if (bp->hwrm_spec_code < 0x10600)
+ return;
+
+ devlink_params_unregister(bp->dl, bnxt_dl_params,
+ ARRAY_SIZE(bnxt_dl_params));
+ devlink_port_params_unregister(&bp->dl_port, bnxt_dl_port_params,
+ ARRAY_SIZE(bnxt_dl_port_params));
+}
+
+int bnxt_dl_register(struct bnxt *bp)
+{
+ struct devlink *dl;
+ int rc;
+
+ if (BNXT_PF(bp))
+ dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl));
+ else
+ dl = devlink_alloc(&bnxt_vf_dl_ops, sizeof(struct bnxt_dl));
if (!dl) {
netdev_warn(bp->dev, "devlink_alloc failed");
return -ENOMEM;
@@ -479,42 +696,26 @@ int bnxt_dl_register(struct bnxt *bp)
goto err_dl_free;
}
- rc = devlink_params_register(dl, bnxt_dl_params,
- ARRAY_SIZE(bnxt_dl_params));
- if (rc) {
- netdev_warn(bp->dev, "devlink_params_register failed. rc=%d",
- rc);
- goto err_dl_unreg;
- }
+ if (!BNXT_PF(bp))
+ return 0;
devlink_port_attrs_set(&bp->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
- bp->pf.port_id, false, 0,
- bp->switch_id, sizeof(bp->switch_id));
+ bp->pf.port_id, false, 0, bp->dsn,
+ sizeof(bp->dsn));
rc = devlink_port_register(dl, &bp->dl_port, bp->pf.port_id);
if (rc) {
netdev_err(bp->dev, "devlink_port_register failed");
- goto err_dl_param_unreg;
+ goto err_dl_unreg;
}
- devlink_port_type_eth_set(&bp->dl_port, bp->dev);
- rc = devlink_port_params_register(&bp->dl_port, bnxt_dl_port_params,
- ARRAY_SIZE(bnxt_dl_port_params));
- if (rc) {
- netdev_err(bp->dev, "devlink_port_params_register failed");
+ rc = bnxt_dl_params_register(bp);
+ if (rc)
goto err_dl_port_unreg;
- }
-
- devlink_params_publish(dl);
-
- bnxt_dl_fw_reporters_create(bp);
return 0;
err_dl_port_unreg:
devlink_port_unregister(&bp->dl_port);
-err_dl_param_unreg:
- devlink_params_unregister(dl, bnxt_dl_params,
- ARRAY_SIZE(bnxt_dl_params));
err_dl_unreg:
devlink_unregister(dl);
err_dl_free:
@@ -530,12 +731,10 @@ void bnxt_dl_unregister(struct bnxt *bp)
if (!dl)
return;
- bnxt_dl_fw_reporters_destroy(bp);
- devlink_port_params_unregister(&bp->dl_port, bnxt_dl_port_params,
- ARRAY_SIZE(bnxt_dl_port_params));
- devlink_port_unregister(&bp->dl_port);
- devlink_params_unregister(dl, bnxt_dl_params,
- ARRAY_SIZE(bnxt_dl_params));
+ if (BNXT_PF(bp)) {
+ bnxt_dl_params_unregister(bp);
+ devlink_port_unregister(&bp->dl_port);
+ }
devlink_unregister(dl);
devlink_free(dl);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index 665d4bdcd8c0..95f893f2a74d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -38,6 +38,10 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)
#define NVM_OFF_IGNORE_ARI 164
#define NVM_OFF_DIS_GRE_VER_CHECK 171
#define NVM_OFF_ENABLE_SRIOV 401
+#define NVM_OFF_NVM_CFG_VER 602
+
+#define BNXT_NVM_CFG_VER_BITS 24
+#define BNXT_NVM_CFG_VER_BYTES 4
#define BNXT_MSIX_VEC_MAX 1280
#define BNXT_MSIX_VEC_MIN_MAX 128
@@ -58,6 +62,9 @@ struct bnxt_dl_nvm_param {
void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event);
void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy);
+void bnxt_dl_health_recovery_done(struct bnxt *bp);
+void bnxt_dl_fw_reporters_create(struct bnxt *bp);
+void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all);
int bnxt_dl_register(struct bnxt *bp);
void bnxt_dl_unregister(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 2ccf79cdcb1e..6171fa8b3677 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1462,15 +1462,15 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
ethtool_link_ksettings_add_link_mode(lk_ksettings,
advertising, Autoneg);
base->autoneg = AUTONEG_ENABLE;
- if (link_info->phy_link_status == BNXT_LINK_LINK)
+ base->duplex = DUPLEX_UNKNOWN;
+ if (link_info->phy_link_status == BNXT_LINK_LINK) {
bnxt_fw_to_ethtool_lp_adv(link_info, lk_ksettings);
+ if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
+ base->duplex = DUPLEX_FULL;
+ else
+ base->duplex = DUPLEX_HALF;
+ }
ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
- if (!netif_carrier_ok(dev))
- base->duplex = DUPLEX_UNKNOWN;
- else if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
- base->duplex = DUPLEX_FULL;
- else
- base->duplex = DUPLEX_HALF;
} else {
base->autoneg = AUTONEG_DISABLE;
ethtool_speed =
@@ -2707,7 +2707,7 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
return rc;
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
- if (netif_carrier_ok(bp->dev))
+ if (bp->link_info.link_up)
fw_speed = bp->link_info.link_speed;
else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB)
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
@@ -3071,8 +3071,15 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len,
}
}
- if (info->dest_buf)
- memcpy(info->dest_buf + off, dma_buf, len);
+ if (info->dest_buf) {
+ if ((info->seg_start + off + len) <=
+ BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
+ memcpy(info->dest_buf + off, dma_buf, len);
+ } else {
+ rc = -ENOBUFS;
+ break;
+ }
+ }
if (cmn_req->req_type ==
cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
@@ -3126,7 +3133,7 @@ static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
u16 segment_id, u32 *seg_len,
- void *buf, u32 offset)
+ void *buf, u32 buf_len, u32 offset)
{
struct hwrm_dbg_coredump_retrieve_input req = {0};
struct bnxt_hwrm_dbg_dma_info info = {NULL};
@@ -3141,8 +3148,11 @@ static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
seq_no);
info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output,
data_len);
- if (buf)
+ if (buf) {
info.dest_buf = buf + offset;
+ info.buf_len = buf_len;
+ info.seg_start = offset;
+ }
rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info);
if (!rc)
@@ -3232,14 +3242,17 @@ bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
{
u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output);
+ u32 offset = 0, seg_hdr_len, seg_record_len, buf_len = 0;
struct coredump_segment_record *seg_record = NULL;
- u32 offset = 0, seg_hdr_len, seg_record_len;
struct bnxt_coredump_segment_hdr seg_hdr;
struct bnxt_coredump coredump = {NULL};
time64_t start_time;
u16 start_utc;
int rc = 0, i;
+ if (buf)
+ buf_len = *dump_len;
+
start_time = ktime_get_real_seconds();
start_utc = sys_tz.tz_minuteswest * 60;
seg_hdr_len = sizeof(seg_hdr);
@@ -3272,6 +3285,12 @@ static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
u32 duration = 0, seg_len = 0;
unsigned long start, end;
+ if (buf && ((offset + seg_hdr_len) >
+ BNXT_COREDUMP_BUF_LEN(buf_len))) {
+ rc = -ENOBUFS;
+ goto err;
+ }
+
start = jiffies;
rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id);
@@ -3284,9 +3303,11 @@ static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
/* Write segment data into the buffer */
rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id,
- &seg_len, buf,
+ &seg_len, buf, buf_len,
offset + seg_hdr_len);
- if (rc)
+ if (rc && rc == -ENOBUFS)
+ goto err;
+ else if (rc)
netdev_err(bp->dev,
"Failed to retrieve coredump for seg = %d\n",
seg_record->segment_id);
@@ -3316,7 +3337,8 @@ err:
rc);
kfree(coredump.data);
*dump_len += sizeof(struct bnxt_coredump_record);
-
+ if (rc == -ENOBUFS)
+ netdev_err(bp->dev, "Firmware returned large coredump buffer");
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index 4428d0abcbc1..3576d951727b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -31,6 +31,8 @@ struct bnxt_coredump {
u16 total_segs;
};
+#define BNXT_COREDUMP_BUF_LEN(len) ((len) - sizeof(struct bnxt_coredump_record))
+
struct bnxt_hwrm_dbg_dma_info {
void *dest_buf;
int dest_buf_size;
@@ -38,6 +40,8 @@ struct bnxt_hwrm_dbg_dma_info {
u16 seq_off;
u16 data_len_off;
u16 segs;
+ u32 seg_start;
+ u32 buf_len;
};
struct hwrm_dbg_cmn_input {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index c601ff7b8f61..4a316c4b3fa8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -113,8 +113,10 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_hw_resc *hw_resc;
int max_idx, max_cp_rings;
int avail_msix, idx;
+ int total_vecs;
int rc = 0;
ASSERT_RTNL();
@@ -142,7 +144,10 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
}
edev->ulp_tbl[ulp_id].msix_base = idx;
edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
- if (bp->total_irqs < (idx + avail_msix)) {
+ hw_resc = &bp->hw_resc;
+ total_vecs = idx + avail_msix;
+ if (bp->total_irqs < total_vecs ||
+ (BNXT_NEW_RM(bp) && hw_resc->resv_irqs < total_vecs)) {
if (netif_running(dev)) {
bnxt_close_nic(bp, true, false);
rc = bnxt_open_nic(bp, true, false);
@@ -156,7 +161,6 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
}
if (BNXT_NEW_RM(bp)) {
- struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
int resv_msix;
resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index f9bf7d7250ab..b010b34cdaf8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -398,6 +398,9 @@ static int bnxt_vf_reps_create(struct bnxt *bp)
struct net_device *dev;
int rc, i;
+ if (!(bp->flags & BNXT_FLAG_DSN_VALID))
+ return -ENODEV;
+
bp->vf_reps = kcalloc(num_vfs, sizeof(vf_rep), GFP_KERNEL);
if (!bp->vf_reps)
return -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 120fa05a39ff..e50a15397e11 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) controller driver
*
- * Copyright (c) 2014-2017 Broadcom
+ * Copyright (c) 2014-2019 Broadcom
*/
#define pr_fmt(fmt) "bcmgenet: " fmt
@@ -508,8 +508,8 @@ static int bcmgenet_set_link_ksettings(struct net_device *dev,
return phy_ethtool_ksettings_set(dev->phydev, cmd);
}
-static int bcmgenet_set_rx_csum(struct net_device *dev,
- netdev_features_t wanted)
+static void bcmgenet_set_rx_csum(struct net_device *dev,
+ netdev_features_t wanted)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
u32 rbuf_chk_ctrl;
@@ -521,7 +521,7 @@ static int bcmgenet_set_rx_csum(struct net_device *dev,
/* enable rx checksumming */
if (rx_csum_en)
- rbuf_chk_ctrl |= RBUF_RXCHK_EN;
+ rbuf_chk_ctrl |= RBUF_RXCHK_EN | RBUF_L3_PARSE_DIS;
else
rbuf_chk_ctrl &= ~RBUF_RXCHK_EN;
priv->desc_rxchk_en = rx_csum_en;
@@ -535,12 +535,10 @@ static int bcmgenet_set_rx_csum(struct net_device *dev,
rbuf_chk_ctrl &= ~RBUF_SKIP_FCS;
bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL);
-
- return 0;
}
-static int bcmgenet_set_tx_csum(struct net_device *dev,
- netdev_features_t wanted)
+static void bcmgenet_set_tx_csum(struct net_device *dev,
+ netdev_features_t wanted)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
bool desc_64b_en;
@@ -549,7 +547,7 @@ static int bcmgenet_set_tx_csum(struct net_device *dev,
tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv);
rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
- desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
+ desc_64b_en = !!(wanted & NETIF_F_HW_CSUM);
/* enable 64 bytes descriptor in both directions (RBUF and TBUF) */
if (desc_64b_en) {
@@ -563,21 +561,27 @@ static int bcmgenet_set_tx_csum(struct net_device *dev,
bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl);
bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL);
-
- return 0;
}
static int bcmgenet_set_features(struct net_device *dev,
netdev_features_t features)
{
- netdev_features_t changed = features ^ dev->features;
- netdev_features_t wanted = dev->wanted_features;
- int ret = 0;
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ u32 reg;
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
- if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
- ret = bcmgenet_set_tx_csum(dev, wanted);
- if (changed & (NETIF_F_RXCSUM))
- ret = bcmgenet_set_rx_csum(dev, wanted);
+ /* Make sure we reflect the value of CRC_CMD_FWD */
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
+
+ bcmgenet_set_tx_csum(dev, features);
+ bcmgenet_set_rx_csum(dev, features);
+
+ clk_disable_unprepare(priv->clk);
return ret;
}
@@ -857,6 +861,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
+ STAT_GENET_SOFT_MIB("tx_realloc_tsb", mib.tx_realloc_tsb),
+ STAT_GENET_SOFT_MIB("tx_realloc_tsb_failed",
+ mib.tx_realloc_tsb_failed),
/* Per TX queues */
STAT_GENET_Q(0),
STAT_GENET_Q(1),
@@ -1218,18 +1225,6 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
}
}
-/* ioctl handle special commands that are not present in ethtool. */
-static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- if (!netif_running(dev))
- return -EINVAL;
-
- if (!dev->phydev)
- return -ENODEV;
-
- return phy_mii_ioctl(dev->phydev, rq, cmd);
-}
-
static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
struct bcmgenet_tx_ring *ring)
{
@@ -1483,6 +1478,7 @@ static void bcmgenet_tx_reclaim_all(struct net_device *dev)
static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
struct sk_buff *skb)
{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
struct status_64 *status = NULL;
struct sk_buff *new_skb;
u16 offset;
@@ -1495,12 +1491,15 @@ static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
* enough headroom for us to insert 64B status block.
*/
new_skb = skb_realloc_headroom(skb, sizeof(*status));
- dev_kfree_skb(skb);
if (!new_skb) {
+ dev_kfree_skb_any(skb);
+ priv->mib.tx_realloc_tsb_failed++;
dev->stats.tx_dropped++;
return NULL;
}
+ dev_consume_skb_any(skb);
skb = new_skb;
+ priv->mib.tx_realloc_tsb++;
}
skb_push(skb, sizeof(*status));
@@ -1516,24 +1515,19 @@ static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
ip_proto = ipv6_hdr(skb)->nexthdr;
break;
default:
- return skb;
+ /* don't use UDP flag */
+ ip_proto = 0;
+ break;
}
offset = skb_checksum_start_offset(skb) - sizeof(*status);
tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
- (offset + skb->csum_offset);
+ (offset + skb->csum_offset) |
+ STATUS_TX_CSUM_LV;
- /* Set the length valid bit for TCP and UDP and just set
- * the special UDP flag for IPv4, else just set to 0.
- */
- if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
- tx_csum_info |= STATUS_TX_CSUM_LV;
- if (ip_proto == IPPROTO_UDP &&
- ip_ver == htons(ETH_P_IP))
- tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
- } else {
- tx_csum_info = 0;
- }
+ /* Set the special UDP flag for UDP */
+ if (ip_proto == IPPROTO_UDP)
+ tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
status->tx_csum_info = tx_csum_info;
}
@@ -1744,7 +1738,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
unsigned int bytes_processed = 0;
unsigned int p_index, mask;
unsigned int discards;
- unsigned int chksum_ok = 0;
/* Clear status before servicing to reduce spurious interrupts */
if (ring->index == DESC_INDEX) {
@@ -1795,9 +1788,15 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
dmadesc_get_length_status(priv, cb->bd_addr);
} else {
struct status_64 *status;
+ __be16 rx_csum;
status = (struct status_64 *)skb->data;
dma_length_status = status->length_status;
+ rx_csum = (__force __be16)(status->rx_csum & 0xffff);
+ if (priv->desc_rxchk_en) {
+ skb->csum = (__force __wsum)ntohs(rx_csum);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
}
/* DMA flags and length are still valid no matter how
@@ -1840,18 +1839,12 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
goto next;
} /* error packet */
- chksum_ok = (dma_flag & priv->dma_rx_chk_bit) &&
- priv->desc_rxchk_en;
-
skb_put(skb, len);
if (priv->desc_64b_en) {
skb_pull(skb, 64);
len -= 64;
}
- if (likely(chksum_ok))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
/* remove hardware 2bytes added for IP alignment */
skb_pull(skb, 2);
len -= 2;
@@ -2164,8 +2157,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
DMA_END_ADDR);
/* Initialize Tx NAPI */
- netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
- NAPI_POLL_WEIGHT);
+ netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
+ NAPI_POLL_WEIGHT);
}
/* Initialize a RDMA ring */
@@ -2886,9 +2879,10 @@ static int bcmgenet_open(struct net_device *dev)
init_umac(priv);
- /* Make sure we reflect the value of CRC_CMD_FWD */
- reg = bcmgenet_umac_readl(priv, UMAC_CMD);
- priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
+ /* Apply features again in case we changed them while interface was
+ * down
+ */
+ bcmgenet_set_features(dev, dev->features);
bcmgenet_set_hw_addr(priv, dev->dev_addr);
@@ -3055,7 +3049,7 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
ring->cb_ptr, ring->end_ptr);
}
-static void bcmgenet_timeout(struct net_device *dev)
+static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
u32 int0_enable = 0;
@@ -3216,7 +3210,7 @@ static const struct net_device_ops bcmgenet_netdev_ops = {
.ndo_tx_timeout = bcmgenet_timeout,
.ndo_set_rx_mode = bcmgenet_set_rx_mode,
.ndo_set_mac_address = bcmgenet_set_mac_addr,
- .ndo_do_ioctl = bcmgenet_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_set_features = bcmgenet_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = bcmgenet_poll_controller,
@@ -3327,19 +3321,15 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) {
bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
genet_dma_ring_regs = genet_dma_ring_regs_v4;
- priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
} else if (GENET_IS_V3(priv)) {
bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
- priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
} else if (GENET_IS_V2(priv)) {
bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
- priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
} else if (GENET_IS_V1(priv)) {
bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
- priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
}
/* enum genet_version starts at 1 */
@@ -3535,9 +3525,11 @@ static int bcmgenet_probe(struct platform_device *pdev)
priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
- /* Set hardware features */
- dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
+ /* Set default features */
+ dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
+ NETIF_F_RXCSUM;
+ dev->hw_features |= dev->features;
+ dev->vlan_features |= dev->features;
/* Request the WOL interrupt and advertise suspend if available */
priv->wol_irq_disabled = true;
@@ -3574,6 +3566,14 @@ static int bcmgenet_probe(struct platform_device *pdev)
bcmgenet_set_hw_params(priv);
+ err = -EIO;
+ if (priv->hw_params->flags & GENET_HAS_40BITS)
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
+ if (err)
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ goto err;
+
/* Mii wait queue */
init_waitqueue_head(&priv->wq);
/* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
@@ -3689,6 +3689,9 @@ static int bcmgenet_resume(struct device *d)
genphy_config_aneg(dev->phydev);
bcmgenet_mii_config(priv->dev, false);
+ /* Restore enabled features */
+ bcmgenet_set_features(dev, dev->features);
+
bcmgenet_set_hw_addr(priv, dev->dev_addr);
if (priv->internal_phy) {
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index a5659197598f..61a6fe9f4cec 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -144,6 +144,8 @@ struct bcmgenet_mib_counters {
u32 alloc_rx_buff_failed;
u32 rx_dma_failed;
u32 tx_dma_failed;
+ u32 tx_realloc_tsb;
+ u32 tx_realloc_tsb_failed;
};
#define UMAC_HD_BKP_CTRL 0x004
@@ -251,6 +253,7 @@ struct bcmgenet_mib_counters {
#define RBUF_CHK_CTRL 0x14
#define RBUF_RXCHK_EN (1 << 0)
#define RBUF_SKIP_FCS (1 << 4)
+#define RBUF_L3_PARSE_DIS (1 << 5)
#define RBUF_ENERGY_CTRL 0x9c
#define RBUF_EEE_EN (1 << 0)
@@ -663,7 +666,6 @@ struct bcmgenet_priv {
bool desc_rxchk_en;
bool crc_fwd_en;
- unsigned int dma_rx_chk_bit;
u32 dma_max_burst_length;
u32 msg_enable;
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index 1604ad32e920..5b4568c2ad1c 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -294,7 +294,7 @@ static int sbmac_set_duplex(struct sbmac_softc *s, enum sbmac_duplex duplex,
enum sbmac_fc fc);
static int sbmac_open(struct net_device *dev);
-static void sbmac_tx_timeout (struct net_device *dev);
+static void sbmac_tx_timeout (struct net_device *dev, unsigned int txqueue);
static void sbmac_set_rx_mode(struct net_device *dev);
static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static int sbmac_close(struct net_device *dev);
@@ -2419,7 +2419,7 @@ static void sbmac_mii_poll(struct net_device *dev)
}
-static void sbmac_tx_timeout (struct net_device *dev)
+static void sbmac_tx_timeout (struct net_device *dev, unsigned int txqueue)
{
struct sbmac_softc *sc = netdev_priv(dev);
unsigned long flags;
@@ -2537,7 +2537,7 @@ static int sbmac_probe(struct platform_device *pldev)
res = platform_get_resource(pldev, IORESOURCE_MEM, 0);
BUG_ON(!res);
- sbm_base = ioremap_nocache(res->start, resource_size(res));
+ sbm_base = ioremap(res->start, resource_size(res));
if (!sbm_base) {
printk(KERN_ERR "%s: unable to map device registers\n",
dev_name(&pldev->dev));
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index ca3aa1250dd1..88466255bf66 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7645,7 +7645,7 @@ static void tg3_poll_controller(struct net_device *dev)
}
#endif
-static void tg3_tx_timeout(struct net_device *dev)
+static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct tg3 *tp = netdev_priv(dev);
@@ -7874,8 +7874,8 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
struct netdev_queue *txq, struct sk_buff *skb)
{
- struct sk_buff *segs, *nskb;
u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
+ struct sk_buff *segs, *seg, *next;
/* Estimate the number of fragments in the worst case */
if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
@@ -7898,12 +7898,10 @@ static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
if (IS_ERR(segs) || !segs)
goto tg3_tso_bug_end;
- do {
- nskb = segs;
- segs = segs->next;
- nskb->next = NULL;
- tg3_start_xmit(nskb, tp->dev);
- } while (segs);
+ skb_list_walk_safe(segs, seg, next) {
+ skb_mark_not_on_list(seg);
+ tg3_start_xmit(seg, tp->dev);
+ }
tg3_tso_bug_end:
dev_consume_skb_any(skb);
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 4042c2185e98..e17bfc87da90 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -1124,11 +1124,10 @@ bfa_nw_ioc_sem_release(void __iomem *sem_reg)
static void
bfa_ioc_fwver_clear(struct bfa_ioc *ioc)
{
- u32 pgnum, pgoff, loff = 0;
+ u32 pgnum, loff = 0;
int i;
pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
- pgoff = PSS_SMEM_PGOFF(loff);
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); i++) {
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index e338272931d1..01a50a4b2113 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -3477,7 +3477,7 @@ bnad_init(struct bnad *bnad,
bnad->pcidev = pdev;
bnad->mmio_start = pci_resource_start(pdev, 0);
bnad->mmio_len = pci_resource_len(pdev, 0);
- bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
+ bnad->bar0 = ioremap(bnad->mmio_start, bnad->mmio_len);
if (!bnad->bar0) {
dev_err(&pdev->dev, "ioremap for bar0 failed\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 19fe4f4867c7..dbf7070fcdba 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -630,10 +630,17 @@
#define GEM_CLK_DIV96 5
/* Constants for MAN register */
-#define MACB_MAN_SOF 1
-#define MACB_MAN_WRITE 1
-#define MACB_MAN_READ 2
-#define MACB_MAN_CODE 2
+#define MACB_MAN_C22_SOF 1
+#define MACB_MAN_C22_WRITE 1
+#define MACB_MAN_C22_READ 2
+#define MACB_MAN_C22_CODE 2
+
+#define MACB_MAN_C45_SOF 0
+#define MACB_MAN_C45_ADDR 0
+#define MACB_MAN_C45_WRITE 1
+#define MACB_MAN_C45_POST_READ_INCR 2
+#define MACB_MAN_C45_READ 3
+#define MACB_MAN_C45_CODE 2
/* Capability mask bits */
#define MACB_CAPS_ISR_CLEAR_ON_WRITE 0x00000001
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 9c767ee252ac..4508f0d150da 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -73,7 +73,11 @@ struct sifive_fu540_macb_mgmt {
/* Max length of transmit frame must be a multiple of 8 bytes */
#define MACB_TX_LEN_ALIGN 8
#define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
-#define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
+/* Limit maximum TX length as per Cadence TSO errata. This is to avoid a
+ * false amba_error in TX path from the DMA assuming there is not enough
+ * space in the SRAM (16KB) even when there is.
+ */
+#define GEM_MAX_TX_LEN (unsigned int)(0x3FC0)
#define GEM_MTU_MIN_SIZE ETH_MIN_MTU
#define MACB_NETIF_LSO NETIF_F_TSO
@@ -337,11 +341,30 @@ static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
if (status < 0)
goto mdio_read_exit;
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
- | MACB_BF(RW, MACB_MAN_READ)
- | MACB_BF(PHYA, mii_id)
- | MACB_BF(REGA, regnum)
- | MACB_BF(CODE, MACB_MAN_CODE)));
+ if (regnum & MII_ADDR_C45) {
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
+ | MACB_BF(RW, MACB_MAN_C45_ADDR)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, (regnum >> 16) & 0x1F)
+ | MACB_BF(DATA, regnum & 0xFFFF)
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)));
+
+ status = macb_mdio_wait_for_idle(bp);
+ if (status < 0)
+ goto mdio_read_exit;
+
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
+ | MACB_BF(RW, MACB_MAN_C45_READ)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, (regnum >> 16) & 0x1F)
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)));
+ } else {
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
+ | MACB_BF(RW, MACB_MAN_C22_READ)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, regnum)
+ | MACB_BF(CODE, MACB_MAN_C22_CODE)));
+ }
status = macb_mdio_wait_for_idle(bp);
if (status < 0)
@@ -370,12 +393,32 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
if (status < 0)
goto mdio_write_exit;
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
- | MACB_BF(RW, MACB_MAN_WRITE)
- | MACB_BF(PHYA, mii_id)
- | MACB_BF(REGA, regnum)
- | MACB_BF(CODE, MACB_MAN_CODE)
- | MACB_BF(DATA, value)));
+ if (regnum & MII_ADDR_C45) {
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
+ | MACB_BF(RW, MACB_MAN_C45_ADDR)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, (regnum >> 16) & 0x1F)
+ | MACB_BF(DATA, regnum & 0xFFFF)
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)));
+
+ status = macb_mdio_wait_for_idle(bp);
+ if (status < 0)
+ goto mdio_write_exit;
+
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
+ | MACB_BF(RW, MACB_MAN_C45_WRITE)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, (regnum >> 16) & 0x1F)
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)
+ | MACB_BF(DATA, value)));
+ } else {
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
+ | MACB_BF(RW, MACB_MAN_C22_WRITE)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, regnum)
+ | MACB_BF(CODE, MACB_MAN_C22_CODE)
+ | MACB_BF(DATA, value)));
+ }
status = macb_mdio_wait_for_idle(bp);
if (status < 0)
@@ -611,21 +654,24 @@ static const struct phylink_mac_ops macb_phylink_ops = {
.mac_link_up = macb_mac_link_up,
};
+static bool macb_phy_handle_exists(struct device_node *dn)
+{
+ dn = of_parse_phandle(dn, "phy-handle", 0);
+ of_node_put(dn);
+ return dn != NULL;
+}
+
static int macb_phylink_connect(struct macb *bp)
{
+ struct device_node *dn = bp->pdev->dev.of_node;
struct net_device *dev = bp->dev;
struct phy_device *phydev;
int ret;
- if (bp->pdev->dev.of_node &&
- of_parse_phandle(bp->pdev->dev.of_node, "phy-handle", 0)) {
- ret = phylink_of_phy_connect(bp->phylink, bp->pdev->dev.of_node,
- 0);
- if (ret) {
- netdev_err(dev, "Could not attach PHY (%d)\n", ret);
- return ret;
- }
- } else {
+ if (dn)
+ ret = phylink_of_phy_connect(bp->phylink, dn, 0);
+
+ if (!dn || (ret && !macb_phy_handle_exists(dn))) {
phydev = phy_find_first(bp->mii_bus);
if (!phydev) {
netdev_err(dev, "no PHY found\n");
@@ -634,10 +680,11 @@ static int macb_phylink_connect(struct macb *bp)
/* attach the mac to the phy */
ret = phylink_connect_phy(bp->phylink, phydev);
- if (ret) {
- netdev_err(dev, "Could not attach to PHY (%d)\n", ret);
- return ret;
- }
+ }
+
+ if (ret) {
+ netdev_err(dev, "Could not attach PHY (%d)\n", ret);
+ return ret;
}
phylink_start(bp->phylink);
@@ -664,9 +711,30 @@ static int macb_mii_probe(struct net_device *dev)
return 0;
}
+static int macb_mdiobus_register(struct macb *bp)
+{
+ struct device_node *child, *np = bp->pdev->dev.of_node;
+
+ /* Only create the PHY from the device tree if at least one PHY is
+ * described. Otherwise scan the entire MDIO bus. We do this to support
+ * old device tree that did not follow the best practices and did not
+ * describe their network PHYs.
+ */
+ for_each_available_child_of_node(np, child)
+ if (of_mdiobus_child_is_phy(child)) {
+ /* The loop increments the child refcount,
+ * decrement it before returning.
+ */
+ of_node_put(child);
+
+ return of_mdiobus_register(bp->mii_bus, np);
+ }
+
+ return mdiobus_register(bp->mii_bus);
+}
+
static int macb_mii_init(struct macb *bp)
{
- struct device_node *np;
int err = -ENXIO;
/* Enable management port */
@@ -688,9 +756,7 @@ static int macb_mii_init(struct macb *bp)
dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
- np = bp->pdev->dev.of_node;
-
- err = of_mdiobus_register(bp->mii_bus, np);
+ err = macb_mdiobus_register(bp);
if (err)
goto err_out_free_mdiobus;
@@ -1729,16 +1795,14 @@ static netdev_features_t macb_features_check(struct sk_buff *skb,
/* Validate LSO compatibility */
- /* there is only one buffer */
- if (!skb_is_nonlinear(skb))
+ /* there is only one buffer or protocol is not UDP */
+ if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP))
return features;
/* length of header */
hdrlen = skb_transport_offset(skb);
- if (ip_hdr(skb)->protocol == IPPROTO_TCP)
- hdrlen += tcp_hdrlen(skb);
- /* For LSO:
+ /* For UFO only:
* When software supplies two or more payload buffers all payload buffers
* apart from the last must be a multiple of 8 bytes in size.
*/
@@ -4069,7 +4133,7 @@ static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk,
mgmt->rate = 0;
mgmt->hw.init = &init;
- *tx_clk = clk_register(NULL, &mgmt->hw);
+ *tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw);
if (IS_ERR(*tx_clk))
return PTR_ERR(*tx_clk);
@@ -4397,7 +4461,6 @@ err_out_free_netdev:
err_disable_clocks:
clk_disable_unprepare(tx_clk);
- clk_unregister(tx_clk);
clk_disable_unprepare(hclk);
clk_disable_unprepare(pclk);
clk_disable_unprepare(rx_clk);
@@ -4427,7 +4490,6 @@ static int macb_remove(struct platform_device *pdev)
pm_runtime_dont_use_autosuspend(&pdev->dev);
if (!pm_runtime_suspended(&pdev->dev)) {
clk_disable_unprepare(bp->tx_clk);
- clk_unregister(bp->tx_clk);
clk_disable_unprepare(bp->hclk);
clk_disable_unprepare(bp->pclk);
clk_disable_unprepare(bp->rx_clk);
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index af04a2c81adb..05a3d067c3fc 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1251,7 +1251,7 @@ static int xgmac_poll(struct napi_struct *napi, int budget)
* netdev structure and arrange for the device to be reset to a sane state
* in order to transmit a new packet.
*/
-static void xgmac_tx_timeout(struct net_device *dev)
+static void xgmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct xgmac_priv *priv = netdev_priv(dev);
schedule_work(&priv->tx_timeout_work);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 7f3b2e3b0868..eab05b5534ea 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -2562,7 +2562,7 @@ lio_xmit_failed:
/** \brief Network device Tx timeout
* @param netdev pointer to network device
*/
-static void liquidio_tx_timeout(struct net_device *netdev)
+static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct lio *lio;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 370d76822ee0..7a77544a54f5 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -1628,7 +1628,7 @@ lio_xmit_failed:
/** \brief Network device Tx timeout
* @param netdev pointer to network device
*/
-static void liquidio_tx_timeout(struct net_device *netdev)
+static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct lio *lio;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
index f3f2e71431ac..600de587d7a9 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
@@ -31,7 +31,7 @@ static int lio_vf_rep_open(struct net_device *ndev);
static int lio_vf_rep_stop(struct net_device *ndev);
static netdev_tx_t lio_vf_rep_pkt_xmit(struct sk_buff *skb,
struct net_device *ndev);
-static void lio_vf_rep_tx_timeout(struct net_device *netdev);
+static void lio_vf_rep_tx_timeout(struct net_device *netdev, unsigned int txqueue);
static int lio_vf_rep_phys_port_name(struct net_device *dev,
char *buf, size_t len);
static void lio_vf_rep_get_stats64(struct net_device *dev,
@@ -172,7 +172,7 @@ lio_vf_rep_stop(struct net_device *ndev)
}
static void
-lio_vf_rep_tx_timeout(struct net_device *ndev)
+lio_vf_rep_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
netif_trans_update(ndev);
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index cdd7e5da4a74..e9575887a4f8 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -790,9 +790,7 @@ static int octeon_mgmt_ioctl(struct net_device *netdev,
case SIOCSHWTSTAMP:
return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd);
default:
- if (netdev->phydev)
- return phy_mii_ioctl(netdev->phydev, rq, cmd);
- return -EINVAL;
+ return phy_do_ioctl(netdev, rq, cmd);
}
}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index f28409279ea4..016957285f99 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1741,7 +1741,7 @@ static void nicvf_get_stats64(struct net_device *netdev,
}
-static void nicvf_tx_timeout(struct net_device *dev)
+static void nicvf_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct nicvf *nic = netdev_priv(dev);
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index c4f6ec0cd183..17a4110c2e49 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1039,7 +1039,7 @@ static int phy_interface_mode(u8 lmac_type)
if (lmac_type == BGX_MODE_QSGMII)
return PHY_INTERFACE_MODE_QSGMII;
if (lmac_type == BGX_MODE_RGMII)
- return PHY_INTERFACE_MODE_RGMII;
+ return PHY_INTERFACE_MODE_RGMII_RXID;
return PHY_INTERFACE_MODE_SGMII;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 58f89f6a040f..883cfa9c4b6d 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -2448,6 +2448,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
if (!is_offload(adapter))
return -EOPNOTSUPP;
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
if (!(adapter->flags & FULL_INIT_DONE))
return -EIO; /* need the memory controllers */
if (copy_from_user(&t, useraddr, sizeof(t)))
@@ -3265,7 +3267,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_free_adapter;
}
- adapter->regs = ioremap_nocache(mmio_start, mmio_len);
+ adapter->regs = ioremap(mmio_start, mmio_len);
if (!adapter->regs) {
dev_err(&pdev->dev, "cannot map device registers\n");
err = -ENOMEM;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index a70ac2097892..8b7d156f79d3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -56,6 +56,7 @@
#include <asm/io.h>
#include "t4_chip_type.h"
#include "cxgb4_uld.h"
+#include "t4fw_api.h"
#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
extern struct list_head adapter_list;
@@ -68,6 +69,16 @@ extern struct mutex uld_mutex;
#define ETHTXQ_STOP_THRES \
(1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
+#define FW_PARAM_DEV(param) \
+ (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
+
+#define FW_PARAM_PFVF(param) \
+ (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) | \
+ FW_PARAMS_PARAM_Y_V(0) | \
+ FW_PARAMS_PARAM_Z_V(0))
+
enum {
MAX_NPORTS = 4, /* max # of ports */
SERNUM_LEN = 24, /* Serial # length */
@@ -504,6 +515,7 @@ struct link_config {
enum cc_pause requested_fc; /* flow control user has requested */
enum cc_pause fc; /* actual link flow control */
+ enum cc_pause advertised_fc; /* actual advertised flow control */
enum cc_fec requested_fec; /* Forward Error Correction: */
enum cc_fec fec; /* requested and actual in use */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 93868dca186a..de30d61af065 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -70,8 +70,7 @@ static void *seq_tab_start(struct seq_file *seq, loff_t *pos)
static void *seq_tab_next(struct seq_file *seq, void *v, loff_t *pos)
{
v = seq_tab_get_idx(seq->private, *pos + 1);
- if (v)
- ++*pos;
+ ++(*pos);
return v;
}
@@ -3048,6 +3047,9 @@ static int sge_queue_entries(const struct adapter *adap)
int tot_uld_entries = 0;
int i;
+ if (!is_uld(adap))
+ goto lld_only;
+
mutex_lock(&uld_mutex);
for (i = 0; i < CXGB4_TX_MAX; i++)
tot_uld_entries += sge_qinfo_uld_txq_entries(adap, i);
@@ -3058,6 +3060,7 @@ static int sge_queue_entries(const struct adapter *adap)
}
mutex_unlock(&uld_mutex);
+lld_only:
return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
(adap->sge.eohw_txq ? DIV_ROUND_UP(adap->sge.eoqsets, 4) : 0) +
tot_uld_entries +
@@ -3171,14 +3174,12 @@ static const struct file_operations mem_debugfs_fops = {
static int tid_info_show(struct seq_file *seq, void *v)
{
- unsigned int tid_start = 0;
struct adapter *adap = seq->private;
- const struct tid_info *t = &adap->tids;
- enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
-
- if (chip > CHELSIO_T5)
- tid_start = t4_read_reg(adap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
+ const struct tid_info *t;
+ enum chip_type chip;
+ t = &adap->tids;
+ chip = CHELSIO_CHIP_VERSION(adap->params.chip);
if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
unsigned int sb;
seq_printf(seq, "Connections in use: %u\n",
@@ -3190,9 +3191,9 @@ static int tid_info_show(struct seq_file *seq, void *v)
sb = t4_read_reg(adap, LE_DB_SRVR_START_INDEX_A);
if (sb) {
- seq_printf(seq, "TID range: %u..%u/%u..%u", tid_start,
+ seq_printf(seq, "TID range: %u..%u/%u..%u", t->tid_base,
sb - 1, adap->tids.hash_base,
- t->ntids - 1);
+ t->tid_base + t->ntids - 1);
seq_printf(seq, ", in use: %u/%u\n",
atomic_read(&t->tids_in_use),
atomic_read(&t->hash_tids_in_use));
@@ -3201,14 +3202,14 @@ static int tid_info_show(struct seq_file *seq, void *v)
t->aftid_base,
t->aftid_end,
adap->tids.hash_base,
- t->ntids - 1);
+ t->tid_base + t->ntids - 1);
seq_printf(seq, ", in use: %u/%u\n",
atomic_read(&t->tids_in_use),
atomic_read(&t->hash_tids_in_use));
} else {
seq_printf(seq, "TID range: %u..%u",
adap->tids.hash_base,
- t->ntids - 1);
+ t->tid_base + t->ntids - 1);
seq_printf(seq, ", in use: %u\n",
atomic_read(&t->hash_tids_in_use));
}
@@ -3216,8 +3217,8 @@ static int tid_info_show(struct seq_file *seq, void *v)
seq_printf(seq, "Connections in use: %u\n",
atomic_read(&t->conns_in_use));
- seq_printf(seq, "TID range: %u..%u", tid_start,
- tid_start + t->ntids - 1);
+ seq_printf(seq, "TID range: %u..%u", t->tid_base,
+ t->tid_base + t->ntids - 1);
seq_printf(seq, ", in use: %u\n",
atomic_read(&t->tids_in_use));
}
@@ -3240,6 +3241,9 @@ static int tid_info_show(struct seq_file *seq, void *v)
seq_printf(seq, "SFTID range: %u..%u in use: %u\n",
t->sftid_base, t->sftid_base + t->nsftids - 2,
t->sftids_in_use);
+ if (t->nhpftids)
+ seq_printf(seq, "HPFTID range: %u..%u\n", t->hpftid_base,
+ t->hpftid_base + t->nhpftids - 1);
if (t->ntids)
seq_printf(seq, "HW TID usage: %u IP users, %u IPv6 users\n",
t4_read_reg(adap, LE_DB_ACT_CNT_IPV4_A),
@@ -3399,6 +3403,13 @@ static int chcr_stats_show(struct seq_file *seq, void *v)
atomic_read(&adap->chcr_stats.fallback));
seq_printf(seq, "IPSec PDU: %10u\n",
atomic_read(&adap->chcr_stats.ipsec_cnt));
+ seq_printf(seq, "TLS PDU Tx: %10u\n",
+ atomic_read(&adap->chcr_stats.tls_pdu_tx));
+ seq_printf(seq, "TLS PDU Rx: %10u\n",
+ atomic_read(&adap->chcr_stats.tls_pdu_rx));
+ seq_printf(seq, "TLS Keys (DDR) Count: %10u\n",
+ atomic_read(&adap->chcr_stats.tls_key));
+
return 0;
}
DEFINE_SHOW_ATTRIBUTE(chcr_stats);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 20ab3b6285a2..c837382ee522 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -807,8 +807,8 @@ static void get_pauseparam(struct net_device *dev,
struct port_info *p = netdev_priv(dev);
epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
- epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
- epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
+ epause->rx_pause = (p->link_cfg.advertised_fc & PAUSE_RX) != 0;
+ epause->tx_pause = (p->link_cfg.advertised_fc & PAUSE_TX) != 0;
}
static int set_pauseparam(struct net_device *dev,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 1d39fca11810..2a2938bbb93a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -361,20 +361,22 @@ static int get_filter_count(struct adapter *adapter, unsigned int fidx,
tcb_base = t4_read_reg(adapter, TP_CMM_TCB_BASE_A);
if (is_hashfilter(adapter) && hash) {
- if (fidx < adapter->tids.ntids) {
- f = adapter->tids.tid_tab[fidx];
- if (!f)
- return -EINVAL;
- } else {
+ if (tid_out_of_range(&adapter->tids, fidx))
return -E2BIG;
- }
+ f = adapter->tids.tid_tab[fidx - adapter->tids.tid_base];
+ if (!f)
+ return -EINVAL;
} else {
- if ((fidx != (adapter->tids.nftids +
- adapter->tids.nsftids - 1)) &&
- fidx >= adapter->tids.nftids)
+ if ((fidx != (adapter->tids.nftids + adapter->tids.nsftids +
+ adapter->tids.nhpftids - 1)) &&
+ fidx >= (adapter->tids.nftids + adapter->tids.nhpftids))
return -E2BIG;
- f = &adapter->tids.ftid_tab[fidx];
+ if (fidx < adapter->tids.nhpftids)
+ f = &adapter->tids.hpftid_tab[fidx];
+ else
+ f = &adapter->tids.ftid_tab[fidx -
+ adapter->tids.nhpftids];
if (!f->valid)
return -EINVAL;
}
@@ -480,6 +482,7 @@ int cxgb4_get_free_ftid(struct net_device *dev, int family)
ftid -= n;
}
spin_unlock_bh(&t->ftid_lock);
+ ftid += t->nhpftids;
return found ? ftid : -ENOMEM;
}
@@ -507,6 +510,24 @@ static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family,
return 0;
}
+static int cxgb4_set_hpftid(struct tid_info *t, int fidx, int family)
+{
+ spin_lock_bh(&t->ftid_lock);
+
+ if (test_bit(fidx, t->hpftid_bmap)) {
+ spin_unlock_bh(&t->ftid_lock);
+ return -EBUSY;
+ }
+
+ if (family == PF_INET)
+ __set_bit(fidx, t->hpftid_bmap);
+ else
+ bitmap_allocate_region(t->hpftid_bmap, fidx, 1);
+
+ spin_unlock_bh(&t->ftid_lock);
+ return 0;
+}
+
static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family,
unsigned int chip_ver)
{
@@ -522,33 +543,58 @@ static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family,
spin_unlock_bh(&t->ftid_lock);
}
+static void cxgb4_clear_hpftid(struct tid_info *t, int fidx, int family)
+{
+ spin_lock_bh(&t->ftid_lock);
+
+ if (family == PF_INET)
+ __clear_bit(fidx, t->hpftid_bmap);
+ else
+ bitmap_release_region(t->hpftid_bmap, fidx, 1);
+
+ spin_unlock_bh(&t->ftid_lock);
+}
+
bool cxgb4_filter_prio_in_range(struct net_device *dev, u32 idx, u32 prio)
{
+ struct filter_entry *prev_fe, *next_fe, *tab;
struct adapter *adap = netdev2adap(dev);
- struct filter_entry *prev_fe, *next_fe;
+ u32 prev_ftid, next_ftid, max_tid;
struct tid_info *t = &adap->tids;
- u32 prev_ftid, next_ftid;
+ unsigned long *bmap;
bool valid = true;
+ if (idx < t->nhpftids) {
+ bmap = t->hpftid_bmap;
+ tab = t->hpftid_tab;
+ max_tid = t->nhpftids;
+ } else {
+ idx -= t->nhpftids;
+ bmap = t->ftid_bmap;
+ tab = t->ftid_tab;
+ max_tid = t->nftids;
+ }
+
/* Only insert the rule if both of the following conditions
* are met:
* 1. The immediate previous rule has priority <= @prio.
* 2. The immediate next rule has priority >= @prio.
*/
spin_lock_bh(&t->ftid_lock);
+
/* Don't insert if there's a rule already present at @idx. */
- if (test_bit(idx, t->ftid_bmap)) {
+ if (test_bit(idx, bmap)) {
valid = false;
goto out_unlock;
}
- next_ftid = find_next_bit(t->ftid_bmap, t->nftids, idx);
- if (next_ftid >= t->nftids)
+ next_ftid = find_next_bit(bmap, max_tid, idx);
+ if (next_ftid >= max_tid)
next_ftid = idx;
- next_fe = &adap->tids.ftid_tab[next_ftid];
+ next_fe = &tab[next_ftid];
- prev_ftid = find_last_bit(t->ftid_bmap, idx);
+ prev_ftid = find_last_bit(bmap, idx);
if (prev_ftid >= idx)
prev_ftid = idx;
@@ -558,13 +604,13 @@ bool cxgb4_filter_prio_in_range(struct net_device *dev, u32 idx, u32 prio)
* accordingly.
*/
if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6) {
- prev_fe = &adap->tids.ftid_tab[prev_ftid & ~0x3];
+ prev_fe = &tab[prev_ftid & ~0x3];
if (!prev_fe->fs.type)
- prev_fe = &adap->tids.ftid_tab[prev_ftid];
+ prev_fe = &tab[prev_ftid];
} else {
- prev_fe = &adap->tids.ftid_tab[prev_ftid & ~0x1];
+ prev_fe = &tab[prev_ftid & ~0x1];
if (!prev_fe->fs.type)
- prev_fe = &adap->tids.ftid_tab[prev_ftid];
+ prev_fe = &tab[prev_ftid];
}
if ((prev_fe->valid && prio < prev_fe->fs.tc_prio) ||
@@ -579,11 +625,16 @@ out_unlock:
/* Delete the filter at a specified index. */
static int del_filter_wr(struct adapter *adapter, int fidx)
{
- struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
struct fw_filter_wr *fwr;
+ struct filter_entry *f;
struct sk_buff *skb;
unsigned int len;
+ if (fidx < adapter->tids.nhpftids)
+ f = &adapter->tids.hpftid_tab[fidx];
+ else
+ f = &adapter->tids.ftid_tab[fidx - adapter->tids.nhpftids];
+
len = sizeof(*fwr);
skb = alloc_skb(len, GFP_KERNEL);
@@ -609,10 +660,15 @@ static int del_filter_wr(struct adapter *adapter, int fidx)
*/
int set_filter_wr(struct adapter *adapter, int fidx)
{
- struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
struct fw_filter2_wr *fwr;
+ struct filter_entry *f;
struct sk_buff *skb;
+ if (fidx < adapter->tids.nhpftids)
+ f = &adapter->tids.hpftid_tab[fidx];
+ else
+ f = &adapter->tids.ftid_tab[fidx - adapter->tids.nhpftids];
+
skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
if (!skb)
return -ENOMEM;
@@ -762,10 +818,14 @@ int delete_filter(struct adapter *adapter, unsigned int fidx)
struct filter_entry *f;
int ret;
- if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
+ if (fidx >= adapter->tids.nftids + adapter->tids.nsftids +
+ adapter->tids.nhpftids)
return -EINVAL;
- f = &adapter->tids.ftid_tab[fidx];
+ if (fidx < adapter->tids.nhpftids)
+ f = &adapter->tids.hpftid_tab[fidx];
+ else
+ f = &adapter->tids.ftid_tab[fidx - adapter->tids.nhpftids];
ret = writable_filter(f);
if (ret)
return ret;
@@ -811,12 +871,22 @@ void clear_all_filters(struct adapter *adapter)
struct net_device *dev = adapter->port[0];
unsigned int i;
+ if (adapter->tids.hpftid_tab) {
+ struct filter_entry *f = &adapter->tids.hpftid_tab[0];
+
+ for (i = 0; i < adapter->tids.nhpftids; i++, f++)
+ if (f->valid || f->pending)
+ cxgb4_del_filter(dev, i, &f->fs);
+ }
+
if (adapter->tids.ftid_tab) {
struct filter_entry *f = &adapter->tids.ftid_tab[0];
unsigned int max_ftid = adapter->tids.nftids +
- adapter->tids.nsftids;
+ adapter->tids.nsftids +
+ adapter->tids.nhpftids;
+
/* Clear all TCAM filters */
- for (i = 0; i < max_ftid; i++, f++)
+ for (i = adapter->tids.nhpftids; i < max_ftid; i++, f++)
if (f->valid || f->pending)
cxgb4_del_filter(dev, i, &f->fs);
}
@@ -1319,17 +1389,17 @@ out_err:
* filter specification in order to facilitate signaling completion of the
* operation.
*/
-int __cxgb4_set_filter(struct net_device *dev, int filter_id,
+int __cxgb4_set_filter(struct net_device *dev, int ftid,
struct ch_filter_specification *fs,
struct filter_ctx *ctx)
{
struct adapter *adapter = netdev2adap(dev);
- unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
- unsigned int max_fidx, fidx;
- struct filter_entry *f;
+ unsigned int max_fidx, fidx, chip_ver;
+ int iq, ret, filter_id = ftid;
+ struct filter_entry *f, *tab;
u32 iconf;
- int iq, ret;
+ chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
if (fs->hash) {
if (is_hashfilter(adapter))
return cxgb4_set_hash_filter(dev, fs, ctx);
@@ -1338,7 +1408,7 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
return -EINVAL;
}
- max_fidx = adapter->tids.nftids;
+ max_fidx = adapter->tids.nftids + adapter->tids.nhpftids;
if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
filter_id >= max_fidx)
return -E2BIG;
@@ -1353,6 +1423,13 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
if (iq < 0)
return iq;
+ if (fs->prio) {
+ tab = &adapter->tids.hpftid_tab[0];
+ } else {
+ tab = &adapter->tids.ftid_tab[0];
+ filter_id = ftid - adapter->tids.nhpftids;
+ }
+
/* IPv6 filters occupy four slots and must be aligned on
* four-slot boundaries. IPv4 filters only occupy a single
* slot and have no alignment requirements but writing a new
@@ -1373,9 +1450,8 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
else
fidx = filter_id & ~0x1;
- if (fidx != filter_id &&
- adapter->tids.ftid_tab[fidx].fs.type) {
- f = &adapter->tids.ftid_tab[fidx];
+ if (fidx != filter_id && tab[fidx].fs.type) {
+ f = &tab[fidx];
if (f->valid) {
dev_err(adapter->pdev_dev,
"Invalid location. IPv6 requires 4 slots and is occupying slots %u to %u\n",
@@ -1399,7 +1475,7 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
*/
for (fidx = filter_id + 1; fidx < filter_id + 4;
fidx++) {
- f = &adapter->tids.ftid_tab[fidx];
+ f = &tab[fidx];
if (f->valid) {
dev_err(adapter->pdev_dev,
"Invalid location. IPv6 requires 4 slots and an IPv4 filter exists at %u\n",
@@ -1415,7 +1491,7 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
return -EINVAL;
/* Check overlapping IPv4 filter slot */
fidx = filter_id + 1;
- f = &adapter->tids.ftid_tab[fidx];
+ f = &tab[fidx];
if (f->valid) {
pr_err("%s: IPv6 filter requires 2 indices. IPv4 filter already present at %d. Please remove IPv4 filter first.\n",
__func__, fidx);
@@ -1427,36 +1503,35 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
/* Check to make sure that provided filter index is not
* already in use by someone else
*/
- f = &adapter->tids.ftid_tab[filter_id];
+ f = &tab[filter_id];
if (f->valid)
return -EBUSY;
- fidx = filter_id + adapter->tids.ftid_base;
- ret = cxgb4_set_ftid(&adapter->tids, filter_id,
- fs->type ? PF_INET6 : PF_INET,
- chip_ver);
+ if (fs->prio) {
+ fidx = filter_id + adapter->tids.hpftid_base;
+ ret = cxgb4_set_hpftid(&adapter->tids, filter_id,
+ fs->type ? PF_INET6 : PF_INET);
+ } else {
+ fidx = filter_id + adapter->tids.ftid_base;
+ ret = cxgb4_set_ftid(&adapter->tids, filter_id,
+ fs->type ? PF_INET6 : PF_INET,
+ chip_ver);
+ }
+
if (ret)
return ret;
/* Check t make sure the filter requested is writable ... */
ret = writable_filter(f);
- if (ret) {
- /* Clear the bits we have set above */
- cxgb4_clear_ftid(&adapter->tids, filter_id,
- fs->type ? PF_INET6 : PF_INET,
- chip_ver);
- return ret;
- }
+ if (ret)
+ goto free_tid;
if (is_t6(adapter->params.chip) && fs->type &&
ipv6_addr_type((const struct in6_addr *)fs->val.lip) !=
IPV6_ADDR_ANY) {
ret = cxgb4_clip_get(dev, (const u32 *)&fs->val.lip, 1);
- if (ret) {
- cxgb4_clear_ftid(&adapter->tids, filter_id, PF_INET6,
- chip_ver);
- return ret;
- }
+ if (ret)
+ goto free_tid;
}
/* Convert the filter specification into our internal format.
@@ -1487,7 +1562,7 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
f->fs.mask.vni,
0, 1, 1);
if (ret < 0)
- goto free_clip;
+ goto free_tid;
f->fs.val.ovlan = ret;
f->fs.mask.ovlan = 0x1ff;
@@ -1501,21 +1576,22 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
*/
f->ctx = ctx;
f->tid = fidx; /* Save the actual tid */
- ret = set_filter_wr(adapter, filter_id);
- if (ret) {
+ ret = set_filter_wr(adapter, ftid);
+ if (ret)
+ goto free_tid;
+
+ return ret;
+
+free_tid:
+ if (f->fs.prio)
+ cxgb4_clear_hpftid(&adapter->tids, filter_id,
+ fs->type ? PF_INET6 : PF_INET);
+ else
cxgb4_clear_ftid(&adapter->tids, filter_id,
fs->type ? PF_INET6 : PF_INET,
chip_ver);
- clear_filter(adapter, f);
- }
-
- return ret;
-free_clip:
- if (is_t6(adapter->params.chip) && f->fs.type)
- cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
- cxgb4_clear_ftid(&adapter->tids, filter_id,
- fs->type ? PF_INET6 : PF_INET, chip_ver);
+ clear_filter(adapter, f);
return ret;
}
@@ -1537,7 +1613,7 @@ static int cxgb4_del_hash_filter(struct net_device *dev, int filter_id,
netdev_dbg(dev, "%s: filter_id = %d ; nftids = %d\n",
__func__, filter_id, adapter->tids.nftids);
- if (filter_id > adapter->tids.ntids)
+ if (tid_out_of_range(t, filter_id))
return -E2BIG;
f = lookup_tid(t, filter_id);
@@ -1590,11 +1666,11 @@ int __cxgb4_del_filter(struct net_device *dev, int filter_id,
struct filter_ctx *ctx)
{
struct adapter *adapter = netdev2adap(dev);
- unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
+ unsigned int max_fidx, chip_ver;
struct filter_entry *f;
- unsigned int max_fidx;
int ret;
+ chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
if (fs && fs->hash) {
if (is_hashfilter(adapter))
return cxgb4_del_hash_filter(dev, filter_id, ctx);
@@ -1603,21 +1679,31 @@ int __cxgb4_del_filter(struct net_device *dev, int filter_id,
return -EINVAL;
}
- max_fidx = adapter->tids.nftids;
+ max_fidx = adapter->tids.nftids + adapter->tids.nhpftids;
if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
filter_id >= max_fidx)
return -E2BIG;
- f = &adapter->tids.ftid_tab[filter_id];
+ if (filter_id < adapter->tids.nhpftids)
+ f = &adapter->tids.hpftid_tab[filter_id];
+ else
+ f = &adapter->tids.ftid_tab[filter_id - adapter->tids.nhpftids];
+
ret = writable_filter(f);
if (ret)
return ret;
if (f->valid) {
f->ctx = ctx;
- cxgb4_clear_ftid(&adapter->tids, filter_id,
- f->fs.type ? PF_INET6 : PF_INET,
- chip_ver);
+ if (f->fs.prio)
+ cxgb4_clear_hpftid(&adapter->tids,
+ f->tid - adapter->tids.hpftid_base,
+ f->fs.type ? PF_INET6 : PF_INET);
+ else
+ cxgb4_clear_ftid(&adapter->tids,
+ f->tid - adapter->tids.ftid_base,
+ f->fs.type ? PF_INET6 : PF_INET,
+ chip_ver);
return del_filter_wr(adapter, filter_id);
}
@@ -1842,11 +1928,18 @@ void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
max_fidx = adap->tids.nftids + adap->tids.nsftids;
/* Get the corresponding filter entry for this tid */
if (adap->tids.ftid_tab) {
- /* Check this in normal filter region */
- idx = tid - adap->tids.ftid_base;
- if (idx >= max_fidx)
- return;
- f = &adap->tids.ftid_tab[idx];
+ idx = tid - adap->tids.hpftid_base;
+ if (idx < adap->tids.nhpftids) {
+ f = &adap->tids.hpftid_tab[idx];
+ } else {
+ /* Check this in normal filter region */
+ idx = tid - adap->tids.ftid_base;
+ if (idx >= max_fidx)
+ return;
+ f = &adap->tids.ftid_tab[idx];
+ idx += adap->tids.nhpftids;
+ }
+
if (f->tid != tid)
return;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 12ff69b3ba91..649842a8aa28 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -804,6 +804,26 @@ static int setup_ppod_edram(struct adapter *adap)
return 0;
}
+static void adap_config_hpfilter(struct adapter *adapter)
+{
+ u32 param, val = 0;
+ int ret;
+
+ /* Enable HP filter region. Older fw will fail this request and
+ * it is fine.
+ */
+ param = FW_PARAM_DEV(HPFILTER_REGION_SUPPORT);
+ ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0,
+ 1, &param, &val);
+
+ /* An error means FW doesn't know about HP filter support,
+ * it's not a problem, don't return an error.
+ */
+ if (ret < 0)
+ dev_err(adapter->pdev_dev,
+ "HP filter region isn't supported by FW\n");
+}
+
/**
* cxgb4_write_rss - write the RSS table for a given port
* @pi: the port
@@ -1427,8 +1447,8 @@ static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
unsigned int tid)
{
- void **p = &t->tid_tab[tid];
struct adapter *adap = container_of(t, struct adapter, tids);
+ void **p = &t->tid_tab[tid - t->tid_base];
spin_lock_bh(&adap->tid_release_lock);
*p = adap->tid_release_head;
@@ -1480,13 +1500,13 @@ static void process_tid_release_list(struct work_struct *work)
void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
unsigned short family)
{
- struct sk_buff *skb;
struct adapter *adap = container_of(t, struct adapter, tids);
+ struct sk_buff *skb;
- WARN_ON(tid >= t->ntids);
+ WARN_ON(tid_out_of_range(&adap->tids, tid));
- if (t->tid_tab[tid]) {
- t->tid_tab[tid] = NULL;
+ if (t->tid_tab[tid - adap->tids.tid_base]) {
+ t->tid_tab[tid - adap->tids.tid_base] = NULL;
atomic_dec(&t->conns_in_use);
if (t->hash_base && (tid >= t->hash_base)) {
if (family == AF_INET6)
@@ -1518,6 +1538,7 @@ static int tid_init(struct tid_info *t)
struct adapter *adap = container_of(t, struct adapter, tids);
unsigned int max_ftids = t->nftids + t->nsftids;
unsigned int natids = t->natids;
+ unsigned int hpftid_bmap_size;
unsigned int eotid_bmap_size;
unsigned int stid_bmap_size;
unsigned int ftid_bmap_size;
@@ -1525,12 +1546,15 @@ static int tid_init(struct tid_info *t)
stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
ftid_bmap_size = BITS_TO_LONGS(t->nftids);
+ hpftid_bmap_size = BITS_TO_LONGS(t->nhpftids);
eotid_bmap_size = BITS_TO_LONGS(t->neotids);
size = t->ntids * sizeof(*t->tid_tab) +
natids * sizeof(*t->atid_tab) +
t->nstids * sizeof(*t->stid_tab) +
t->nsftids * sizeof(*t->stid_tab) +
stid_bmap_size * sizeof(long) +
+ t->nhpftids * sizeof(*t->hpftid_tab) +
+ hpftid_bmap_size * sizeof(long) +
max_ftids * sizeof(*t->ftid_tab) +
ftid_bmap_size * sizeof(long) +
t->neotids * sizeof(*t->eotid_tab) +
@@ -1543,7 +1567,9 @@ static int tid_init(struct tid_info *t)
t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
- t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
+ t->hpftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
+ t->hpftid_bmap = (unsigned long *)&t->hpftid_tab[t->nhpftids];
+ t->ftid_tab = (struct filter_entry *)&t->hpftid_bmap[hpftid_bmap_size];
t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids];
t->eotid_tab = (struct eotid_entry *)&t->ftid_bmap[ftid_bmap_size];
t->eotid_bmap = (unsigned long *)&t->eotid_tab[t->neotids];
@@ -1578,6 +1604,8 @@ static int tid_init(struct tid_info *t)
bitmap_zero(t->eotid_bmap, t->neotids);
}
+ if (t->nhpftids)
+ bitmap_zero(t->hpftid_bmap, t->nhpftids);
bitmap_zero(t->ftid_bmap, t->nftids);
return 0;
}
@@ -3135,9 +3163,9 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
+ struct ch_sched_queue qe = { 0 };
+ struct ch_sched_params p = { 0 };
struct sched_class *e;
- struct ch_sched_params p;
- struct ch_sched_queue qe;
u32 req_rate;
int err = 0;
@@ -3154,6 +3182,15 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
return -EINVAL;
}
+ qe.queue = index;
+ e = cxgb4_sched_queue_lookup(dev, &qe);
+ if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CL_RL) {
+ dev_err(adap->pdev_dev,
+ "Queue %u already bound to class %u of type: %u\n",
+ index, e->idx, e->info.u.params.level);
+ return -EBUSY;
+ }
+
/* Convert from Mbps to Kbps */
req_rate = rate * 1000;
@@ -3183,7 +3220,6 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
return 0;
/* Fetch any available unused or matching scheduling class */
- memset(&p, 0, sizeof(p));
p.type = SCHED_CLASS_TYPE_PACKET;
p.u.params.level = SCHED_CLASS_LEVEL_CL_RL;
p.u.params.mode = SCHED_CLASS_MODE_CLASS;
@@ -4351,6 +4387,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
"HMA configuration failed with error %d\n", ret);
if (is_t6(adapter->params.chip)) {
+ adap_config_hpfilter(adapter);
ret = setup_ppod_edram(adapter);
if (!ret)
dev_info(adapter->pdev_dev, "Successfully enabled "
@@ -4660,16 +4697,6 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
/*
* Grab some of our basic fundamental operating parameters.
*/
-#define FW_PARAM_DEV(param) \
- (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
- FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
-
-#define FW_PARAM_PFVF(param) \
- FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
- FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
- FW_PARAMS_PARAM_Y_V(0) | \
- FW_PARAMS_PARAM_Z_V(0)
-
params[0] = FW_PARAM_PFVF(EQ_START);
params[1] = FW_PARAM_PFVF(L2T_START);
params[2] = FW_PARAM_PFVF(L2T_END);
@@ -4687,6 +4714,16 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
adap->sge.ingr_start = val[5];
if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
+ params[0] = FW_PARAM_PFVF(HPFILTER_START);
+ params[1] = FW_PARAM_PFVF(HPFILTER_END);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
+ params, val);
+ if (ret < 0)
+ goto bye;
+
+ adap->tids.hpftid_base = val[0];
+ adap->tids.nhpftids = val[1] - val[0] + 1;
+
/* Read the raw mps entries. In T6, the last 2 tcam entries
* are reserved for raw mac addresses (rawf = 2, one per port).
*/
@@ -4698,6 +4735,9 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
adap->rawf_start = val[0];
adap->rawf_cnt = val[1] - val[0] + 1;
}
+
+ adap->tids.tid_base =
+ t4_read_reg(adap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
}
/* qids (ingress/egress) returned from firmware can be anywhere
@@ -5050,8 +5090,6 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
}
adap->params.crypto = ntohs(caps_cmd.cryptocaps);
}
-#undef FW_PARAM_PFVF
-#undef FW_PARAM_DEV
/* The MTU/MSS Table is initialized by now, so load their values. If
* we're initializing the adapter, then we'll make any modifications
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index 0fa80bef575d..bb5513bdd293 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -672,10 +672,14 @@ int cxgb4_tc_flower_replace(struct net_device *dev,
* 0 to driver. However, the hardware TCAM index
* starts from 0. Hence, the -1 here.
*/
- if (cls->common.prio <= adap->tids.nftids)
+ if (cls->common.prio <= (adap->tids.nftids +
+ adap->tids.nhpftids)) {
fidx = cls->common.prio - 1;
- else
+ if (fidx < adap->tids.nhpftids)
+ fs->prio = 1;
+ } else {
fidx = cxgb4_get_free_ftid(dev, inet_family);
+ }
/* Only insert FLOWER rule if its priority doesn't
* conflict with existing rules in the LETCAM.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
index 102b370fbd3e..1b7681a4eb32 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
@@ -15,6 +15,8 @@ static int cxgb4_matchall_egress_validate(struct net_device *dev,
struct flow_action *actions = &cls->rule->action;
struct port_info *pi = netdev2pinfo(dev);
struct flow_action_entry *entry;
+ struct ch_sched_queue qe;
+ struct sched_class *e;
u64 max_link_rate;
u32 i, speed;
int ret;
@@ -60,9 +62,61 @@ static int cxgb4_matchall_egress_validate(struct net_device *dev,
}
}
+ for (i = 0; i < pi->nqsets; i++) {
+ memset(&qe, 0, sizeof(qe));
+ qe.queue = i;
+
+ e = cxgb4_sched_queue_lookup(dev, &qe);
+ if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CH_RL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Some queues are already bound to different class");
+ return -EBUSY;
+ }
+ }
+
return 0;
}
+static int cxgb4_matchall_tc_bind_queues(struct net_device *dev, u32 tc)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct ch_sched_queue qe;
+ int ret;
+ u32 i;
+
+ for (i = 0; i < pi->nqsets; i++) {
+ qe.queue = i;
+ qe.class = tc;
+ ret = cxgb4_sched_class_bind(dev, &qe, SCHED_QUEUE);
+ if (ret)
+ goto out_free;
+ }
+
+ return 0;
+
+out_free:
+ while (i--) {
+ qe.queue = i;
+ qe.class = SCHED_CLS_NONE;
+ cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE);
+ }
+
+ return ret;
+}
+
+static void cxgb4_matchall_tc_unbind_queues(struct net_device *dev)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct ch_sched_queue qe;
+ u32 i;
+
+ for (i = 0; i < pi->nqsets; i++) {
+ qe.queue = i;
+ qe.class = SCHED_CLS_NONE;
+ cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE);
+ }
+}
+
static int cxgb4_matchall_alloc_tc(struct net_device *dev,
struct tc_cls_matchall_offload *cls)
{
@@ -83,6 +137,7 @@ static int cxgb4_matchall_alloc_tc(struct net_device *dev,
struct adapter *adap = netdev2adap(dev);
struct flow_action_entry *entry;
struct sched_class *e;
+ int ret;
u32 i;
tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
@@ -101,10 +156,21 @@ static int cxgb4_matchall_alloc_tc(struct net_device *dev,
return -ENOMEM;
}
+ ret = cxgb4_matchall_tc_bind_queues(dev, e->idx);
+ if (ret) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Could not bind queues to traffic class");
+ goto out_free;
+ }
+
tc_port_matchall->egress.hwtc = e->idx;
tc_port_matchall->egress.cookie = cls->cookie;
tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_ENABLED;
return 0;
+
+out_free:
+ cxgb4_sched_class_free(dev, e->idx);
+ return ret;
}
static void cxgb4_matchall_free_tc(struct net_device *dev)
@@ -114,6 +180,7 @@ static void cxgb4_matchall_free_tc(struct net_device *dev)
struct adapter *adap = netdev2adap(dev);
tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+ cxgb4_matchall_tc_unbind_queues(dev);
cxgb4_sched_class_free(dev, tc_port_matchall->egress.hwtc);
tc_port_matchall->egress.hwtc = SCHED_CLS_NONE;
@@ -137,7 +204,7 @@ static int cxgb4_matchall_alloc_filter(struct net_device *dev,
* -1 here. 1 slot is enough to create a wildcard matchall
* VIID rule.
*/
- if (cls->common.prio <= adap->tids.nftids)
+ if (cls->common.prio <= (adap->tids.nftids + adap->tids.nhpftids))
fidx = cls->common.prio - 1;
else
fidx = cxgb4_get_free_ftid(dev, PF_INET);
@@ -156,6 +223,8 @@ static int cxgb4_matchall_alloc_filter(struct net_device *dev,
fs = &tc_port_matchall->ingress.fs;
memset(fs, 0, sizeof(*fs));
+ if (fidx < adap->tids.nhpftids)
+ fs->prio = 1;
fs->tc_prio = cls->common.prio;
fs->tc_cookie = cls->cookie;
fs->hitcnts = 1;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
index 477973d2e341..ec3eb45ee3b4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
@@ -12,8 +12,9 @@ static int cxgb4_mqprio_validate(struct net_device *dev,
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
u32 speed, qcount = 0, qoffset = 0;
+ u32 start_a, start_b, end_a, end_b;
int ret;
- u8 i;
+ u8 i, j;
if (!mqprio->qopt.num_tc)
return 0;
@@ -47,6 +48,31 @@ static int cxgb4_mqprio_validate(struct net_device *dev,
qoffset = max_t(u16, mqprio->qopt.offset[i], qoffset);
qcount += mqprio->qopt.count[i];
+ start_a = mqprio->qopt.offset[i];
+ end_a = start_a + mqprio->qopt.count[i] - 1;
+ for (j = i + 1; j < mqprio->qopt.num_tc; j++) {
+ start_b = mqprio->qopt.offset[j];
+ end_b = start_b + mqprio->qopt.count[j] - 1;
+
+ /* If queue count is 0, then the traffic
+ * belonging to this class will not use
+ * ETHOFLD queues. So, no need to validate
+ * further.
+ */
+ if (!mqprio->qopt.count[i])
+ break;
+
+ if (!mqprio->qopt.count[j])
+ continue;
+
+ if (max_t(u32, start_a, start_b) <=
+ min_t(u32, end_a, end_b)) {
+ netdev_err(dev,
+ "Queues can't overlap across tc\n");
+ return -EINVAL;
+ }
+ }
+
/* Convert byte per second to bits per second */
min_rate += (mqprio->min_rate[i] * 8);
max_rate += (mqprio->max_rate[i] * 8);
@@ -145,6 +171,10 @@ static int cxgb4_mqprio_alloc_hw_resources(struct net_device *dev)
kfree(adap->sge.eohw_rxq);
return -ENOMEM;
}
+
+ refcount_set(&adap->tc_mqprio->refcnt, 1);
+ } else {
+ refcount_inc(&adap->tc_mqprio->refcnt);
}
if (!(adap->flags & CXGB4_USING_MSIX))
@@ -205,7 +235,6 @@ static int cxgb4_mqprio_alloc_hw_resources(struct net_device *dev)
cxgb4_enable_rx(adap, &eorxq->rspq);
}
- refcount_inc(&adap->tc_mqprio->refcnt);
return 0;
out_free_msix:
@@ -234,9 +263,10 @@ out_free_queues:
t4_sge_free_ethofld_txq(adap, eotxq);
}
- kfree(adap->sge.eohw_txq);
- kfree(adap->sge.eohw_rxq);
-
+ if (refcount_dec_and_test(&adap->tc_mqprio->refcnt)) {
+ kfree(adap->sge.eohw_txq);
+ kfree(adap->sge.eohw_rxq);
+ }
return ret;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index 133f8623ba86..269b8d9e25e0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -176,7 +176,7 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
/* Only insert U32 rule if its priority doesn't conflict with
* existing rules in the LETCAM.
*/
- if (filter_id >= adapter->tids.nftids ||
+ if (filter_id >= adapter->tids.nftids + adapter->tids.nhpftids ||
!cxgb4_filter_prio_in_range(dev, filter_id, cls->common.prio)) {
NL_SET_ERR_MSG_MOD(extack,
"No free LETCAM index available");
@@ -199,6 +199,8 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
memset(&fs, 0, sizeof(fs));
+ if (filter_id < adapter->tids.nhpftids)
+ fs.prio = 1;
fs.tc_prio = cls->common.prio;
fs.tc_cookie = cls->knode.handle;
@@ -355,6 +357,7 @@ int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
unsigned int filter_id, max_tids, i, j;
struct cxgb4_link *link = NULL;
struct cxgb4_tc_u32_table *t;
+ struct filter_entry *f;
u32 handle, uhtid;
int ret;
@@ -363,8 +366,15 @@ int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
/* Fetch the location to delete the filter. */
filter_id = TC_U32_NODE(cls->knode.handle) - 1;
- if (filter_id >= adapter->tids.nftids ||
- cls->knode.handle != adapter->tids.ftid_tab[filter_id].fs.tc_cookie)
+ if (filter_id >= adapter->tids.nftids + adapter->tids.nhpftids)
+ return -ERANGE;
+
+ if (filter_id < adapter->tids.nhpftids)
+ f = &adapter->tids.hpftid_tab[filter_id];
+ else
+ f = &adapter->tids.ftid_tab[filter_id - adapter->tids.nhpftids];
+
+ if (cls->knode.handle != f->fs.tc_cookie)
return -ERANGE;
t = adapter->tc_u32;
@@ -445,7 +455,7 @@ void cxgb4_cleanup_tc_u32(struct adapter *adap)
struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap)
{
- unsigned int max_tids = adap->tids.nftids;
+ unsigned int max_tids = adap->tids.nftids + adap->tids.nhpftids;
struct cxgb4_tc_u32_table *t;
unsigned int i;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 861b25d28ed6..d9d27bc1ae67 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -99,6 +99,7 @@ struct eotid_entry {
*/
struct tid_info {
void **tid_tab;
+ unsigned int tid_base;
unsigned int ntids;
struct serv_entry *stid_tab;
@@ -111,6 +112,11 @@ struct tid_info {
unsigned int natids;
unsigned int atid_base;
+ struct filter_entry *hpftid_tab;
+ unsigned long *hpftid_bmap;
+ unsigned int nhpftids;
+ unsigned int hpftid_base;
+
struct filter_entry *ftid_tab;
unsigned long *ftid_bmap;
unsigned int nftids;
@@ -147,9 +153,15 @@ struct tid_info {
static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
{
+ tid -= t->tid_base;
return tid < t->ntids ? t->tid_tab[tid] : NULL;
}
+static inline bool tid_out_of_range(const struct tid_info *t, unsigned int tid)
+{
+ return ((tid - t->tid_base) >= t->ntids);
+}
+
static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
{
return atid < t->natids ? t->atid_tab[atid].data : NULL;
@@ -171,7 +183,7 @@ static inline void *lookup_stid(const struct tid_info *t, unsigned int stid)
static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
unsigned int tid, unsigned short family)
{
- t->tid_tab[tid] = data;
+ t->tid_tab[tid - t->tid_base] = data;
if (t->hash_base && (tid >= t->hash_base)) {
if (family == AF_INET6)
atomic_add(2, &t->hash_tids_in_use);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index e9e45006632d..1a16449e9deb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -678,8 +678,7 @@ static void *l2t_seq_start(struct seq_file *seq, loff_t *pos)
static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
v = l2t_get_idx(seq, *pos);
- if (v)
- ++*pos;
+ ++(*pos);
return v;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index 3e61bd5d0c29..cebe1412d960 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -165,6 +165,22 @@ static void *t4_sched_entry_lookup(struct port_info *pi,
return found;
}
+struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
+ struct ch_sched_queue *p)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct sched_queue_entry *qe = NULL;
+ struct adapter *adap = pi->adapter;
+ struct sge_eth_txq *txq;
+
+ if (p->queue < 0 || p->queue >= pi->nqsets)
+ return NULL;
+
+ txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
+ qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id);
+ return qe ? &pi->sched_tbl->tab[qe->param.class] : NULL;
+}
+
static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
{
struct sched_queue_entry *qe = NULL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.h b/drivers/net/ethernet/chelsio/cxgb4/sched.h
index e92ff68bdd0a..5cc74a5a1774 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.h
@@ -103,6 +103,8 @@ static inline bool valid_class_id(struct net_device *dev, u8 class_id)
return true;
}
+struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
+ struct ch_sched_queue *p);
int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
enum sched_bind_type type);
int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 19d18acfc9a6..844fdcf55118 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -4089,7 +4089,8 @@ static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
if (cc_pause & PAUSE_TX)
fw_pause |= FW_PORT_CAP32_802_3_PAUSE;
else
- fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR;
+ fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR |
+ FW_PORT_CAP32_802_3_PAUSE;
} else if (cc_pause & PAUSE_TX) {
fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR;
}
@@ -8563,17 +8564,17 @@ static fw_port_cap32_t lstatus_to_fwcap(u32 lstatus)
void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
{
const struct fw_port_cmd *cmd = (const void *)rpl;
- int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
- struct adapter *adapter = pi->adapter;
+ fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
struct link_config *lc = &pi->link_cfg;
- int link_ok, linkdnrc;
- enum fw_port_type port_type;
+ struct adapter *adapter = pi->adapter;
+ unsigned int speed, fc, fec, adv_fc;
enum fw_port_module_type mod_type;
- unsigned int speed, fc, fec;
- fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
+ int action, link_ok, linkdnrc;
+ enum fw_port_type port_type;
/* Extract the various fields from the Port Information message.
*/
+ action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
switch (action) {
case FW_PORT_ACTION_GET_PORT_INFO: {
u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
@@ -8611,6 +8612,7 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
}
fec = fwcap_to_cc_fec(acaps);
+ adv_fc = fwcap_to_cc_pause(acaps);
fc = fwcap_to_cc_pause(linkattr);
speed = fwcap_to_speed(linkattr);
@@ -8667,7 +8669,9 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
}
if (link_ok != lc->link_ok || speed != lc->speed ||
- fc != lc->fc || fec != lc->fec) { /* something changed */
+ fc != lc->fc || adv_fc != lc->advertised_fc ||
+ fec != lc->fec) {
+ /* something changed */
if (!link_ok && lc->link_ok) {
lc->link_down_rc = linkdnrc;
dev_warn_ratelimited(adapter->pdev_dev,
@@ -8677,6 +8681,7 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
}
lc->link_ok = link_ok;
lc->speed = speed;
+ lc->advertised_fc = adv_fc;
lc->fc = fc;
lc->fec = fec;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index ac4fb43bdec6..accad1101ad1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -1321,6 +1321,7 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_RDMA_WRITE_WITH_IMM = 0x21,
FW_PARAMS_PARAM_DEV_PPOD_EDRAM = 0x23,
FW_PARAMS_PARAM_DEV_RI_WRITE_CMPL_WR = 0x24,
+ FW_PARAMS_PARAM_DEV_HPFILTER_REGION_SUPPORT = 0x26,
FW_PARAMS_PARAM_DEV_OPAQUE_VIID_SMT_EXTN = 0x27,
FW_PARAMS_PARAM_DEV_HASHFILTER_WITH_OFLD = 0x28,
FW_PARAMS_PARAM_DEV_DBQ_TIMER = 0x29,
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index f6fc0875d5b0..f4d41f968afa 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1690,8 +1690,8 @@ static void cxgb4vf_get_pauseparam(struct net_device *dev,
struct port_info *pi = netdev_priv(dev);
pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
- pauseparam->rx_pause = (pi->link_cfg.fc & PAUSE_RX) != 0;
- pauseparam->tx_pause = (pi->link_cfg.fc & PAUSE_TX) != 0;
+ pauseparam->rx_pause = (pi->link_cfg.advertised_fc & PAUSE_RX) != 0;
+ pauseparam->tx_pause = (pi->link_cfg.advertised_fc & PAUSE_TX) != 0;
}
/*
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index ccca67cf4487..57cfd10a99ec 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -135,6 +135,7 @@ struct link_config {
enum cc_pause requested_fc; /* flow control user has requested */
enum cc_pause fc; /* actual link flow control */
+ enum cc_pause advertised_fc; /* actual advertised flow control */
enum cc_fec auto_fec; /* Forward Error Correction: */
enum cc_fec requested_fec; /* "automatic" (IEEE 802.3), */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 8a389d617a23..9d49ff211cc1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -1913,16 +1913,16 @@ static const char *t4vf_link_down_rc_str(unsigned char link_down_rc)
static void t4vf_handle_get_port_info(struct port_info *pi,
const struct fw_port_cmd *cmd)
{
- int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
- struct adapter *adapter = pi->adapter;
+ fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
struct link_config *lc = &pi->link_cfg;
- int link_ok, linkdnrc;
- enum fw_port_type port_type;
+ struct adapter *adapter = pi->adapter;
+ unsigned int speed, fc, fec, adv_fc;
enum fw_port_module_type mod_type;
- unsigned int speed, fc, fec;
- fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
+ int action, link_ok, linkdnrc;
+ enum fw_port_type port_type;
/* Extract the various fields from the Port Information message. */
+ action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
switch (action) {
case FW_PORT_ACTION_GET_PORT_INFO: {
u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
@@ -1982,6 +1982,7 @@ static void t4vf_handle_get_port_info(struct port_info *pi,
}
fec = fwcap_to_cc_fec(acaps);
+ adv_fc = fwcap_to_cc_pause(acaps);
fc = fwcap_to_cc_pause(linkattr);
speed = fwcap_to_speed(linkattr);
@@ -2012,7 +2013,9 @@ static void t4vf_handle_get_port_info(struct port_info *pi,
}
if (link_ok != lc->link_ok || speed != lc->speed ||
- fc != lc->fc || fec != lc->fec) { /* something changed */
+ fc != lc->fc || adv_fc != lc->advertised_fc ||
+ fec != lc->fec) {
+ /* something changed */
if (!link_ok && lc->link_ok) {
lc->link_down_rc = linkdnrc;
dev_warn_ratelimited(adapter->pdev_dev,
@@ -2022,6 +2025,7 @@ static void t4vf_handle_get_port_info(struct port_info *pi,
}
lc->link_ok = link_ok;
lc->speed = speed;
+ lc->advertised_fc = adv_fc;
lc->fc = fc;
lc->fec = fec;
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index c9aebcde403a..33ace3307059 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1128,7 +1128,7 @@ net_get_stats(struct net_device *dev)
return &dev->stats;
}
-static void net_timeout(struct net_device *dev)
+static void net_timeout(struct net_device *dev, unsigned int txqueue)
{
/* If we get here, some higher level has decided we are broken.
There should really be a "kick me" function call instead. */
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index acb2856936d2..ddf60dc9ad16 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1095,7 +1095,7 @@ static void enic_set_rx_mode(struct net_device *netdev)
}
/* netif_tx_lock held, BHs disabled */
-static void enic_tx_timeout(struct net_device *netdev)
+static void enic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct enic *enic = netdev_priv(netdev);
schedule_work(&enic->tx_hang_reset);
@@ -2013,10 +2013,10 @@ static int enic_stop(struct net_device *netdev)
napi_disable(&enic->napi[i]);
netif_carrier_off(netdev);
- netif_tx_disable(netdev);
if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
for (i = 0; i < enic->wq_count; i++)
napi_disable(&enic->napi[enic_cq_wq(enic, i)]);
+ netif_tx_disable(netdev);
if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
enic_dev_del_station_addr(enic);
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index a8f4c69252ff..f30fa8e6ef80 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -576,6 +576,8 @@ static int gmac_setup_txqs(struct net_device *netdev)
if (port->txq_dma_base & ~DMA_Q_BASE_MASK) {
dev_warn(geth->dev, "TX queue base is not aligned\n");
+ dma_free_coherent(geth->dev, len * sizeof(*desc_ring),
+ desc_ring, port->txq_dma_base);
kfree(skb_tab);
return -ENOMEM;
}
@@ -1296,7 +1298,7 @@ out_drop:
return NETDEV_TX_OK;
}
-static void gmac_tx_timeout(struct net_device *netdev)
+static void gmac_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
netdev_err(netdev, "Tx timeout\n");
gmac_dump_dma_state(netdev);
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index cce90b5925d9..1ea3372775e6 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -964,7 +964,7 @@ dm9000_init_dm9000(struct net_device *dev)
}
/* Our watchdog timed out. Called by the networking layer */
-static void dm9000_timeout(struct net_device *dev)
+static void dm9000_timeout(struct net_device *dev, unsigned int txqueue)
{
struct board_info *db = netdev_priv(dev);
u8 reg_save;
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index f1a2da15dd0a..42b798a3fad4 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -417,7 +417,10 @@ static void de_rx (struct de_private *de)
if (status & DescOwn)
break;
- len = ((status >> 16) & 0x7ff) - 4;
+ /* the length is actually a 15 bit value here according
+ * to Table 4-1 in the DE2104x spec so mask is 0x7fff
+ */
+ len = ((status >> 16) & 0x7fff) - 4;
mapping = de->rx_skb[rx_tail].mapping;
if (unlikely(drop)) {
@@ -1436,7 +1439,7 @@ static int de_close (struct net_device *dev)
return 0;
}
-static void de_tx_timeout (struct net_device *dev)
+static void de_tx_timeout (struct net_device *dev, unsigned int txqueue)
{
struct de_private *de = netdev_priv(dev);
const int irq = de->pdev->irq;
@@ -2039,7 +2042,7 @@ static int de_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* remap CSR registers */
- regs = ioremap_nocache(pciaddr, DE_REGS_SIZE);
+ regs = ioremap(pciaddr, DE_REGS_SIZE);
if (!regs) {
rc = -EIO;
pr_err("Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 0efdbd1a4a6f..32d470d4122a 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -2214,15 +2214,16 @@ static int __init dmfe_init_module(void)
if (cr6set)
dmfe_cr6_user_set = cr6set;
- switch(mode) {
- case DMFE_10MHF:
+ switch (mode) {
+ case DMFE_10MHF:
case DMFE_100MHF:
case DMFE_10MFD:
case DMFE_100MFD:
case DMFE_1M_HPNA:
dmfe_media_mode = mode;
break;
- default:dmfe_media_mode = DMFE_AUTO;
+ default:
+ dmfe_media_mode = DMFE_AUTO;
break;
}
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 3e3e08698876..9e9d9eee29d9 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -255,7 +255,7 @@ MODULE_DEVICE_TABLE(pci, tulip_pci_tbl);
const char tulip_media_cap[32] =
{0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20, 28,31,0,0, };
-static void tulip_tx_timeout(struct net_device *dev);
+static void tulip_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void tulip_init_ring(struct net_device *dev);
static void tulip_free_ring(struct net_device *dev);
static netdev_tx_t tulip_start_xmit(struct sk_buff *skb,
@@ -534,7 +534,7 @@ free_ring:
}
-static void tulip_tx_timeout(struct net_device *dev)
+static void tulip_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct tulip_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->base_addr;
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index b1f30b194300..117ffe08800d 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -1809,8 +1809,8 @@ static int __init uli526x_init_module(void)
if (cr6set)
uli526x_cr6_user_set = cr6set;
- switch (mode) {
- case ULI526X_10MHF:
+ switch (mode) {
+ case ULI526X_10MHF:
case ULI526X_100MHF:
case ULI526X_10MFD:
case ULI526X_100MFD:
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 70cb2d689c2c..7f136488e67c 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -331,7 +331,7 @@ static void netdev_timer(struct timer_list *t);
static void init_rxtx_rings(struct net_device *dev);
static void free_rxtx_rings(struct netdev_private *np);
static void init_registers(struct net_device *dev);
-static void tx_timeout(struct net_device *dev);
+static void tx_timeout(struct net_device *dev, unsigned int txqueue);
static int alloc_ringdesc(struct net_device *dev);
static void free_ringdesc(struct netdev_private *np);
static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
@@ -921,7 +921,7 @@ static void init_registers(struct net_device *dev)
iowrite32(0, ioaddr + RxStartDemand);
}
-static void tx_timeout(struct net_device *dev)
+static void tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->base_addr;
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 55e720d2ea0c..26c5da032b1e 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -66,7 +66,7 @@ static const int multicast_filter_limit = 0x40;
static int rio_open (struct net_device *dev);
static void rio_timer (struct timer_list *t);
-static void rio_tx_timeout (struct net_device *dev);
+static void rio_tx_timeout (struct net_device *dev, unsigned int txqueue);
static netdev_tx_t start_xmit (struct sk_buff *skb, struct net_device *dev);
static irqreturn_t rio_interrupt (int irq, void *dev_instance);
static void rio_free_tx (struct net_device *dev, int irq);
@@ -696,7 +696,7 @@ rio_timer (struct timer_list *t)
}
static void
-rio_tx_timeout (struct net_device *dev)
+rio_tx_timeout (struct net_device *dev, unsigned int txqueue)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->ioaddr;
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index 4a37a69764ce..b91387c456ba 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -432,7 +432,7 @@ static int mdio_wait_link(struct net_device *dev, int wait);
static int netdev_open(struct net_device *dev);
static void check_duplex(struct net_device *dev);
static void netdev_timer(struct timer_list *t);
-static void tx_timeout(struct net_device *dev);
+static void tx_timeout(struct net_device *dev, unsigned int txqueue);
static void init_ring(struct net_device *dev);
static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
static int reset_tx (struct net_device *dev);
@@ -969,7 +969,7 @@ static void netdev_timer(struct timer_list *t)
add_timer(&np->timer);
}
-static void tx_timeout(struct net_device *dev)
+static void tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->base;
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index e24979010969..5f8fa1145db6 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -725,19 +725,6 @@ static struct net_device_stats *dnet_get_stats(struct net_device *dev)
return nstat;
}
-static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct phy_device *phydev = dev->phydev;
-
- if (!netif_running(dev))
- return -EINVAL;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_mii_ioctl(phydev, rq, cmd);
-}
-
static void dnet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
@@ -759,7 +746,7 @@ static const struct net_device_ops dnet_netdev_ops = {
.ndo_stop = dnet_close,
.ndo_get_stats = dnet_get_stats,
.ndo_start_xmit = dnet_start_xmit,
- .ndo_do_ioctl = dnet_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 39eb7d525043..56f59db6ebf2 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1417,7 +1417,7 @@ drop:
return NETDEV_TX_OK;
}
-static void be_tx_timeout(struct net_device *netdev)
+static void be_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct device *dev = &adapter->pdev->dev;
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index ea4f17f5cce7..a817ca661c1f 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -869,7 +869,7 @@ static int ethoc_change_mtu(struct net_device *dev, int new_mtu)
return -ENOSYS;
}
-static void ethoc_tx_timeout(struct net_device *dev)
+static void ethoc_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ethoc *priv = netdev_priv(dev);
u32 pending = ethoc_read(priv, INT_SOURCE);
@@ -1087,7 +1087,7 @@ static int ethoc_probe(struct platform_device *pdev)
priv = netdev_priv(netdev);
priv->netdev = netdev;
- priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr,
+ priv->iobase = devm_ioremap(&pdev->dev, netdev->base_addr,
resource_size(mmio));
if (!priv->iobase) {
dev_err(&pdev->dev, "cannot remap I/O memory space\n");
@@ -1096,7 +1096,7 @@ static int ethoc_probe(struct platform_device *pdev)
}
if (netdev->mem_end) {
- priv->membase = devm_ioremap_nocache(&pdev->dev,
+ priv->membase = devm_ioremap(&pdev->dev,
netdev->mem_start, resource_size(mem));
if (!priv->membase) {
dev_err(&pdev->dev, "cannot remap memory space\n");
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 8ed85037f021..4572797f00d7 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1536,16 +1536,7 @@ static int ftgmac100_stop(struct net_device *netdev)
return 0;
}
-/* optional */
-static int ftgmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- if (!netdev->phydev)
- return -ENXIO;
-
- return phy_mii_ioctl(netdev->phydev, ifr, cmd);
-}
-
-static void ftgmac100_tx_timeout(struct net_device *netdev)
+static void ftgmac100_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct ftgmac100 *priv = netdev_priv(netdev);
@@ -1597,7 +1588,7 @@ static const struct net_device_ops ftgmac100_netdev_ops = {
.ndo_start_xmit = ftgmac100_hard_start_xmit,
.ndo_set_mac_address = ftgmac100_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = ftgmac100_do_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl,
.ndo_tx_timeout = ftgmac100_tx_timeout,
.ndo_set_rx_mode = ftgmac100_set_rx_mode,
.ndo_set_features = ftgmac100_set_features,
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index c24fd56a2c71..84f10970299a 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -428,7 +428,7 @@ static void getlinktype(struct net_device *dev);
static void getlinkstatus(struct net_device *dev);
static void netdev_timer(struct timer_list *t);
static void reset_timer(struct timer_list *t);
-static void fealnx_tx_timeout(struct net_device *dev);
+static void fealnx_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void init_ring(struct net_device *dev);
static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t intr_handler(int irq, void *dev_instance);
@@ -1191,7 +1191,7 @@ static void reset_timer(struct timer_list *t)
}
-static void fealnx_tx_timeout(struct net_device *dev)
+static void fealnx_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->mem;
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 6a7e8993119f..2bd7ace0a953 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -74,7 +74,7 @@ config FSL_XGMAC_MDIO
config UCC_GETH
tristate "Freescale QE Gigabit Ethernet"
- depends on QUICC_ENGINE
+ depends on QUICC_ENGINE && PPC32
select FSL_PQ_MDIO
select PHYLIB
---help---
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index 6a93293d31e0..67c436400352 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -25,4 +25,5 @@ obj-$(CONFIG_FSL_DPAA_ETH) += dpaa/
obj-$(CONFIG_FSL_DPAA2_ETH) += dpaa2/
obj-$(CONFIG_FSL_ENETC) += enetc/
+obj-$(CONFIG_FSL_ENETC_MDIO) += enetc/
obj-$(CONFIG_FSL_ENETC_VF) += enetc/
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 6a9d12dad5d9..fd93d542f497 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -288,7 +288,7 @@ static int dpaa_stop(struct net_device *net_dev)
return err;
}
-static void dpaa_tx_timeout(struct net_device *net_dev)
+static void dpaa_tx_timeout(struct net_device *net_dev, unsigned int txqueue)
{
struct dpaa_percpu_priv *percpu_priv;
const struct dpaa_priv *priv;
@@ -1719,7 +1719,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
int page_offset;
unsigned int sz;
int *count_ptr;
- int i;
+ int i, j;
vaddr = phys_to_virt(addr);
WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
@@ -1736,14 +1736,14 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
SMP_CACHE_BYTES));
+ dma_unmap_page(priv->rx_dma_dev, sg_addr,
+ DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
+
/* We may use multiple Rx pools */
dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
if (!dpaa_bp)
goto free_buffers;
- count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
- dma_unmap_page(priv->rx_dma_dev, sg_addr,
- DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
if (!skb) {
sz = dpaa_bp->size +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -1786,7 +1786,9 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
skb_add_rx_frag(skb, i - 1, head_page, frag_off,
frag_len, dpaa_bp->size);
}
+
/* Update the pool count for the current {cpu x bpool} */
+ count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
(*count_ptr)--;
if (qm_sg_entry_is_final(&sgt[i]))
@@ -1800,26 +1802,25 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
return skb;
free_buffers:
- /* compensate sw bpool counter changes */
- for (i--; i >= 0; i--) {
- dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
- if (dpaa_bp) {
- count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
- (*count_ptr)++;
- }
- }
/* free all the SG entries */
- for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) {
- sg_addr = qm_sg_addr(&sgt[i]);
+ for (j = 0; j < DPAA_SGT_MAX_ENTRIES ; j++) {
+ sg_addr = qm_sg_addr(&sgt[j]);
sg_vaddr = phys_to_virt(sg_addr);
+ /* all pages 0..i were unmaped */
+ if (j > i)
+ dma_unmap_page(priv->rx_dma_dev, qm_sg_addr(&sgt[j]),
+ DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
free_pages((unsigned long)sg_vaddr, 0);
- dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
- if (dpaa_bp) {
- count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
- (*count_ptr)--;
+ /* counters 0..i-1 were decremented */
+ if (j >= i) {
+ dpaa_bp = dpaa_bpid2pool(sgt[j].bpid);
+ if (dpaa_bp) {
+ count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
+ (*count_ptr)--;
+ }
}
- if (qm_sg_entry_is_final(&sgt[i]))
+ if (qm_sg_entry_is_final(&sgt[j]))
break;
}
/* free the SGT fragment */
@@ -2452,6 +2453,9 @@ static void dpaa_adjust_link(struct net_device *net_dev)
mac_dev->adjust_link(mac_dev);
}
+/* The Aquantia PHYs are capable of performing rate adaptation */
+#define PHY_VEND_AQUANTIA 0x03a1b400
+
static int dpaa_phy_init(struct net_device *net_dev)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
@@ -2470,9 +2474,14 @@ static int dpaa_phy_init(struct net_device *net_dev)
return -ENODEV;
}
- /* Remove any features not supported by the controller */
- ethtool_convert_legacy_u32_to_link_mode(mask, mac_dev->if_support);
- linkmode_and(phy_dev->supported, phy_dev->supported, mask);
+ /* Unless the PHY is capable of rate adaptation */
+ if (mac_dev->phy_if != PHY_INTERFACE_MODE_XGMII ||
+ ((phy_dev->drv->phy_id & GENMASK(31, 10)) != PHY_VEND_AQUANTIA)) {
+ /* remove any features not supported by the controller */
+ ethtool_convert_legacy_u32_to_link_mode(mask,
+ mac_dev->if_support);
+ linkmode_and(phy_dev->supported, phy_dev->supported, mask);
+ }
phy_support_asym_pause(phy_dev);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
index a9503aea527f..cc1b7f85e433 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
@@ -27,6 +27,20 @@ static int dpaa2_ptp_enable(struct ptp_clock_info *ptp,
mc_dev = to_fsl_mc_device(dev);
switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ switch (rq->extts.index) {
+ case 0:
+ bit = DPRTC_EVENT_ETS1;
+ break;
+ case 1:
+ bit = DPRTC_EVENT_ETS2;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (on)
+ extts_clean_up(ptp_qoriq, rq->extts.index, false);
+ break;
case PTP_CLK_REQ_PPS:
bit = DPRTC_EVENT_PPS;
break;
@@ -96,6 +110,12 @@ static irqreturn_t dpaa2_ptp_irq_handler_thread(int irq, void *priv)
ptp_clock_event(ptp_qoriq->clock, &event);
}
+ if (status & DPRTC_EVENT_ETS1)
+ extts_clean_up(ptp_qoriq, 0, true);
+
+ if (status & DPRTC_EVENT_ETS2)
+ extts_clean_up(ptp_qoriq, 1, true);
+
err = dprtc_clear_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle,
DPRTC_IRQ_INDEX, status);
if (unlikely(err)) {
@@ -160,10 +180,10 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
irq = mc_dev->irqs[0];
ptp_qoriq->irq = irq->msi_desc->irq;
- err = devm_request_threaded_irq(dev, ptp_qoriq->irq, NULL,
- dpaa2_ptp_irq_handler_thread,
- IRQF_NO_SUSPEND | IRQF_ONESHOT,
- dev_name(dev), ptp_qoriq);
+ err = request_threaded_irq(ptp_qoriq->irq, NULL,
+ dpaa2_ptp_irq_handler_thread,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ dev_name(dev), ptp_qoriq);
if (err < 0) {
dev_err(dev, "devm_request_threaded_irq(): %d\n", err);
goto err_free_mc_irq;
@@ -173,18 +193,20 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
DPRTC_IRQ_INDEX, 1);
if (err < 0) {
dev_err(dev, "dprtc_set_irq_enable(): %d\n", err);
- goto err_free_mc_irq;
+ goto err_free_threaded_irq;
}
err = ptp_qoriq_init(ptp_qoriq, base, &dpaa2_ptp_caps);
if (err)
- goto err_free_mc_irq;
+ goto err_free_threaded_irq;
dpaa2_phc_index = ptp_qoriq->phc_index;
dev_set_drvdata(dev, ptp_qoriq);
return 0;
+err_free_threaded_irq:
+ free_irq(ptp_qoriq->irq, ptp_qoriq);
err_free_mc_irq:
fsl_mc_free_irqs(mc_dev);
err_unmap:
diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h
index 4ac05bfef338..96ffeb948f08 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h
@@ -9,9 +9,11 @@
/* Command versioning */
#define DPRTC_CMD_BASE_VERSION 1
+#define DPRTC_CMD_VERSION_2 2
#define DPRTC_CMD_ID_OFFSET 4
#define DPRTC_CMD(id) (((id) << DPRTC_CMD_ID_OFFSET) | DPRTC_CMD_BASE_VERSION)
+#define DPRTC_CMD_V2(id) (((id) << DPRTC_CMD_ID_OFFSET) | DPRTC_CMD_VERSION_2)
/* Command IDs */
#define DPRTC_CMDID_CLOSE DPRTC_CMD(0x800)
@@ -19,7 +21,7 @@
#define DPRTC_CMDID_SET_IRQ_ENABLE DPRTC_CMD(0x012)
#define DPRTC_CMDID_GET_IRQ_ENABLE DPRTC_CMD(0x013)
-#define DPRTC_CMDID_SET_IRQ_MASK DPRTC_CMD(0x014)
+#define DPRTC_CMDID_SET_IRQ_MASK DPRTC_CMD_V2(0x014)
#define DPRTC_CMDID_GET_IRQ_MASK DPRTC_CMD(0x015)
#define DPRTC_CMDID_GET_IRQ_STATUS DPRTC_CMD(0x016)
#define DPRTC_CMDID_CLEAR_IRQ_STATUS DPRTC_CMD(0x017)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc.h b/drivers/net/ethernet/freescale/dpaa2/dprtc.h
index 311c184e1aef..05c413719e55 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dprtc.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dprtc.h
@@ -20,6 +20,8 @@ struct fsl_mc_io;
#define DPRTC_IRQ_INDEX 0
#define DPRTC_EVENT_PPS 0x08000000
+#define DPRTC_EVENT_ETS1 0x00800000
+#define DPRTC_EVENT_ETS2 0x00400000
int dprtc_open(struct fsl_mc_io *mc_io,
u32 cmd_flags,
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index edad4ca46327..fe942de19597 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -2,6 +2,7 @@
config FSL_ENETC
tristate "ENETC PF driver"
depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST)
+ select FSL_ENETC_MDIO
select PHYLIB
help
This driver supports NXP ENETC gigabit ethernet controller PCIe
diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile
index d0db33e5b6b7..74f7ac253b8b 100644
--- a/drivers/net/ethernet/freescale/enetc/Makefile
+++ b/drivers/net/ethernet/freescale/enetc/Makefile
@@ -3,7 +3,7 @@
common-objs := enetc.o enetc_cbdr.o enetc_ethtool.o
obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o
-fsl-enetc-y := enetc_pf.o enetc_mdio.o $(common-objs)
+fsl-enetc-y := enetc_pf.o $(common-objs)
fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o
fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 17739906c966..1f79e36116a3 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -149,11 +149,21 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
if (enetc_tx_csum(skb, &temp_bd))
flags |= ENETC_TXBD_FLAGS_CSUM | ENETC_TXBD_FLAGS_L4CS;
+ else if (tx_ring->tsd_enable)
+ flags |= ENETC_TXBD_FLAGS_TSE | ENETC_TXBD_FLAGS_TXSTART;
/* first BD needs frm_len and offload flags set */
temp_bd.frm_len = cpu_to_le16(skb->len);
temp_bd.flags = flags;
+ if (flags & ENETC_TXBD_FLAGS_TSE) {
+ u32 temp;
+
+ temp = (skb->skb_mstamp_ns >> 5 & ENETC_TXBD_TXSTART_MASK)
+ | (flags << ENETC_TXBD_FLAGS_OFFSET);
+ temp_bd.txstart = cpu_to_le32(temp);
+ }
+
if (flags & ENETC_TXBD_FLAGS_EX) {
u8 e_flags = 0;
*txbd = temp_bd;
@@ -227,6 +237,8 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
enetc_bdr_idx_inc(tx_ring, &i);
tx_ring->next_to_use = i;
+ skb_tx_timestamp(skb);
+
/* let H/W know BD ring has been updated */
enetc_wr_reg(tx_ring->tpir, i); /* includes wmb() */
@@ -1503,6 +1515,8 @@ int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
return enetc_setup_tc_taprio(ndev, type_data);
case TC_SETUP_QDISC_CBS:
return enetc_setup_tc_cbs(ndev, type_data);
+ case TC_SETUP_QDISC_ETF:
+ return enetc_setup_tc_txtime(ndev, type_data);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 7ee0da6d0015..dd4a227ffc7a 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -72,6 +72,7 @@ struct enetc_bdr {
struct enetc_ring_stats stats;
dma_addr_t bd_dma_base;
+ u8 tsd_enable; /* Time specific departure */
} ____cacheline_aligned_in_smp;
static inline void enetc_bdr_idx_inc(struct enetc_bdr *bdr, int *i)
@@ -256,8 +257,10 @@ int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd);
int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data);
void enetc_sched_speed_set(struct net_device *ndev);
int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data);
+int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data);
#else
#define enetc_setup_tc_taprio(ndev, type_data) -EOPNOTSUPP
#define enetc_sched_speed_set(ndev) (void)0
#define enetc_setup_tc_cbs(ndev, type_data) -EOPNOTSUPP
+#define enetc_setup_tc_txtime(ndev, type_data) -EOPNOTSUPP
#endif
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index 880a8ed8bb47..301ee0dde02d 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -579,6 +579,7 @@ static int enetc_get_ts_info(struct net_device *ndev,
(1 << HWTSTAMP_FILTER_ALL);
#else
info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE;
#endif
return 0;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 51f543ef37a8..62554f28ce07 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -200,6 +200,7 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PFPMR 0x1900
#define ENETC_PFPMR_PMACE BIT(1)
#define ENETC_PFPMR_MWLM BIT(0)
+#define ENETC_EMDIO_BASE 0x1c00
#define ENETC_PSIUMHFR0(n, err) (((err) ? 0x1d08 : 0x1d00) + (n) * 0x10)
#define ENETC_PSIUMHFR1(n) (0x1d04 + (n) * 0x10)
#define ENETC_PSIMMHFR0(n, err) (((err) ? 0x1d00 : 0x1d08) + (n) * 0x10)
@@ -358,6 +359,7 @@ union enetc_tx_bd {
u8 l4_csoff;
u8 flags;
}; /* default layout */
+ __le32 txstart;
__le32 lstatus;
};
};
@@ -378,11 +380,14 @@ union enetc_tx_bd {
};
#define ENETC_TXBD_FLAGS_L4CS BIT(0)
+#define ENETC_TXBD_FLAGS_TSE BIT(1)
#define ENETC_TXBD_FLAGS_W BIT(2)
#define ENETC_TXBD_FLAGS_CSUM BIT(3)
+#define ENETC_TXBD_FLAGS_TXSTART BIT(4)
#define ENETC_TXBD_FLAGS_EX BIT(6)
#define ENETC_TXBD_FLAGS_F BIT(7)
-
+#define ENETC_TXBD_TXSTART_MASK GENMASK(24, 0)
+#define ENETC_TXBD_FLAGS_OFFSET 24
static inline void enetc_clear_tx_bd(union enetc_tx_bd *txbd)
{
memset(txbd, 0, sizeof(*txbd));
@@ -615,3 +620,7 @@ struct enetc_cbd {
/* Port time gating capability register */
#define ENETC_QBV_PTGCAPR_OFFSET 0x11a08
#define ENETC_QBV_MAX_GCL_LEN_MASK GENMASK(15, 0)
+
+/* Port time specific departure */
+#define ENETC_PTCTSDR(n) (0x1210 + 4 * (n))
+#define ENETC_TSDE BIT(31)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
index 149883c8f0b8..48c32a171afa 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
@@ -1,41 +1,56 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2019 NXP */
+#include <linux/fsl/enetc_mdio.h>
#include <linux/mdio.h>
#include <linux/of_mdio.h>
#include <linux/iopoll.h>
#include <linux/of.h>
-#include "enetc_mdio.h"
+#include "enetc_pf.h"
-#define ENETC_MDIO_REG_OFFSET 0x1c00
#define ENETC_MDIO_CFG 0x0 /* MDIO configuration and status */
#define ENETC_MDIO_CTL 0x4 /* MDIO control */
#define ENETC_MDIO_DATA 0x8 /* MDIO data */
#define ENETC_MDIO_ADDR 0xc /* MDIO address */
-#define enetc_mdio_rd(hw, off) \
- enetc_port_rd(hw, ENETC_##off + ENETC_MDIO_REG_OFFSET)
-#define enetc_mdio_wr(hw, off, val) \
- enetc_port_wr(hw, ENETC_##off + ENETC_MDIO_REG_OFFSET, val)
-#define enetc_mdio_rd_reg(off) enetc_mdio_rd(hw, off)
+static inline u32 _enetc_mdio_rd(struct enetc_mdio_priv *mdio_priv, int off)
+{
+ return enetc_port_rd(mdio_priv->hw, mdio_priv->mdio_base + off);
+}
+
+static inline void _enetc_mdio_wr(struct enetc_mdio_priv *mdio_priv, int off,
+ u32 val)
+{
+ enetc_port_wr(mdio_priv->hw, mdio_priv->mdio_base + off, val);
+}
-#define ENETC_MDC_DIV 258
+#define enetc_mdio_rd(mdio_priv, off) \
+ _enetc_mdio_rd(mdio_priv, ENETC_##off)
+#define enetc_mdio_wr(mdio_priv, off, val) \
+ _enetc_mdio_wr(mdio_priv, ENETC_##off, val)
+#define enetc_mdio_rd_reg(off) enetc_mdio_rd(mdio_priv, off)
#define MDIO_CFG_CLKDIV(x) ((((x) >> 1) & 0xff) << 8)
#define MDIO_CFG_BSY BIT(0)
#define MDIO_CFG_RD_ER BIT(1)
+#define MDIO_CFG_HOLD(x) (((x) << 2) & GENMASK(4, 2))
#define MDIO_CFG_ENC45 BIT(6)
/* external MDIO only - driven on neg MDC edge */
#define MDIO_CFG_NEG BIT(23)
+#define ENETC_EMDIO_CFG \
+ (MDIO_CFG_HOLD(2) | \
+ MDIO_CFG_CLKDIV(258) | \
+ MDIO_CFG_NEG)
+
#define MDIO_CTL_DEV_ADDR(x) ((x) & 0x1f)
#define MDIO_CTL_PORT_ADDR(x) (((x) & 0x1f) << 5)
#define MDIO_CTL_READ BIT(15)
#define MDIO_DATA(x) ((x) & 0xffff)
#define TIMEOUT 1000
-static int enetc_mdio_wait_complete(struct enetc_hw *hw)
+static int enetc_mdio_wait_complete(struct enetc_mdio_priv *mdio_priv)
{
u32 val;
@@ -46,12 +61,11 @@ static int enetc_mdio_wait_complete(struct enetc_hw *hw)
int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
{
struct enetc_mdio_priv *mdio_priv = bus->priv;
- struct enetc_hw *hw = mdio_priv->hw;
u32 mdio_ctl, mdio_cfg;
u16 dev_addr;
int ret;
- mdio_cfg = MDIO_CFG_CLKDIV(ENETC_MDC_DIV) | MDIO_CFG_NEG;
+ mdio_cfg = ENETC_EMDIO_CFG;
if (regnum & MII_ADDR_C45) {
dev_addr = (regnum >> 16) & 0x1f;
mdio_cfg |= MDIO_CFG_ENC45;
@@ -61,44 +75,44 @@ int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
mdio_cfg &= ~MDIO_CFG_ENC45;
}
- enetc_mdio_wr(hw, MDIO_CFG, mdio_cfg);
+ enetc_mdio_wr(mdio_priv, MDIO_CFG, mdio_cfg);
- ret = enetc_mdio_wait_complete(hw);
+ ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
return ret;
/* set port and dev addr */
mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
- enetc_mdio_wr(hw, MDIO_CTL, mdio_ctl);
+ enetc_mdio_wr(mdio_priv, MDIO_CTL, mdio_ctl);
/* set the register address */
if (regnum & MII_ADDR_C45) {
- enetc_mdio_wr(hw, MDIO_ADDR, regnum & 0xffff);
+ enetc_mdio_wr(mdio_priv, MDIO_ADDR, regnum & 0xffff);
- ret = enetc_mdio_wait_complete(hw);
+ ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
return ret;
}
/* write the value */
- enetc_mdio_wr(hw, MDIO_DATA, MDIO_DATA(value));
+ enetc_mdio_wr(mdio_priv, MDIO_DATA, MDIO_DATA(value));
- ret = enetc_mdio_wait_complete(hw);
+ ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
return ret;
return 0;
}
+EXPORT_SYMBOL_GPL(enetc_mdio_write);
int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
{
struct enetc_mdio_priv *mdio_priv = bus->priv;
- struct enetc_hw *hw = mdio_priv->hw;
u32 mdio_ctl, mdio_cfg;
u16 dev_addr, value;
int ret;
- mdio_cfg = MDIO_CFG_CLKDIV(ENETC_MDC_DIV) | MDIO_CFG_NEG;
+ mdio_cfg = ENETC_EMDIO_CFG;
if (regnum & MII_ADDR_C45) {
dev_addr = (regnum >> 16) & 0x1f;
mdio_cfg |= MDIO_CFG_ENC45;
@@ -107,86 +121,56 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
mdio_cfg &= ~MDIO_CFG_ENC45;
}
- enetc_mdio_wr(hw, MDIO_CFG, mdio_cfg);
+ enetc_mdio_wr(mdio_priv, MDIO_CFG, mdio_cfg);
- ret = enetc_mdio_wait_complete(hw);
+ ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
return ret;
/* set port and device addr */
mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
- enetc_mdio_wr(hw, MDIO_CTL, mdio_ctl);
+ enetc_mdio_wr(mdio_priv, MDIO_CTL, mdio_ctl);
/* set the register address */
if (regnum & MII_ADDR_C45) {
- enetc_mdio_wr(hw, MDIO_ADDR, regnum & 0xffff);
+ enetc_mdio_wr(mdio_priv, MDIO_ADDR, regnum & 0xffff);
- ret = enetc_mdio_wait_complete(hw);
+ ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
return ret;
}
/* initiate the read */
- enetc_mdio_wr(hw, MDIO_CTL, mdio_ctl | MDIO_CTL_READ);
+ enetc_mdio_wr(mdio_priv, MDIO_CTL, mdio_ctl | MDIO_CTL_READ);
- ret = enetc_mdio_wait_complete(hw);
+ ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
return ret;
/* return all Fs if nothing was there */
- if (enetc_mdio_rd(hw, MDIO_CFG) & MDIO_CFG_RD_ER) {
+ if (enetc_mdio_rd(mdio_priv, MDIO_CFG) & MDIO_CFG_RD_ER) {
dev_dbg(&bus->dev,
"Error while reading PHY%d reg at %d.%hhu\n",
phy_id, dev_addr, regnum);
return 0xffff;
}
- value = enetc_mdio_rd(hw, MDIO_DATA) & 0xffff;
+ value = enetc_mdio_rd(mdio_priv, MDIO_DATA) & 0xffff;
return value;
}
+EXPORT_SYMBOL_GPL(enetc_mdio_read);
-int enetc_mdio_probe(struct enetc_pf *pf)
+struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs)
{
- struct device *dev = &pf->si->pdev->dev;
- struct enetc_mdio_priv *mdio_priv;
- struct device_node *np;
- struct mii_bus *bus;
- int err;
-
- bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
- if (!bus)
- return -ENOMEM;
-
- bus->name = "Freescale ENETC MDIO Bus";
- bus->read = enetc_mdio_read;
- bus->write = enetc_mdio_write;
- bus->parent = dev;
- mdio_priv = bus->priv;
- mdio_priv->hw = &pf->si->hw;
- snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
-
- np = of_get_child_by_name(dev->of_node, "mdio");
- if (!np) {
- dev_err(dev, "MDIO node missing\n");
- return -EINVAL;
- }
-
- err = of_mdiobus_register(bus, np);
- if (err) {
- of_node_put(np);
- dev_err(dev, "cannot register MDIO bus\n");
- return err;
- }
+ struct enetc_hw *hw;
- of_node_put(np);
- pf->mdio = bus;
+ hw = devm_kzalloc(dev, sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return ERR_PTR(-ENOMEM);
- return 0;
-}
+ hw->port = port_regs;
-void enetc_mdio_remove(struct enetc_pf *pf)
-{
- if (pf->mdio)
- mdiobus_unregister(pf->mdio);
+ return hw;
}
+EXPORT_SYMBOL_GPL(enetc_hw_alloc);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.h b/drivers/net/ethernet/freescale/enetc/enetc_mdio.h
deleted file mode 100644
index 60c9a3889824..000000000000
--- a/drivers/net/ethernet/freescale/enetc/enetc_mdio.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
-/* Copyright 2019 NXP */
-
-#include <linux/phy.h>
-#include "enetc_pf.h"
-
-struct enetc_mdio_priv {
- struct enetc_hw *hw;
-};
-
-int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value);
-int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
index fbd41ce01f06..ebc635f8a4cc 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2019 NXP */
+#include <linux/fsl/enetc_mdio.h>
#include <linux/of_mdio.h>
-#include "enetc_mdio.h"
+#include "enetc_pf.h"
#define ENETC_MDIO_DEV_ID 0xee01
#define ENETC_MDIO_DEV_NAME "FSL PCIe IE Central MDIO"
@@ -13,17 +14,29 @@ static int enetc_pci_mdio_probe(struct pci_dev *pdev,
{
struct enetc_mdio_priv *mdio_priv;
struct device *dev = &pdev->dev;
+ void __iomem *port_regs;
struct enetc_hw *hw;
struct mii_bus *bus;
int err;
- hw = devm_kzalloc(dev, sizeof(*hw), GFP_KERNEL);
- if (!hw)
- return -ENOMEM;
+ port_regs = pci_iomap(pdev, 0, 0);
+ if (!port_regs) {
+ dev_err(dev, "iomap failed\n");
+ err = -ENXIO;
+ goto err_ioremap;
+ }
+
+ hw = enetc_hw_alloc(dev, port_regs);
+ if (IS_ERR(hw)) {
+ err = PTR_ERR(hw);
+ goto err_hw_alloc;
+ }
bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
- if (!bus)
- return -ENOMEM;
+ if (!bus) {
+ err = -ENOMEM;
+ goto err_mdiobus_alloc;
+ }
bus->name = ENETC_MDIO_BUS_NAME;
bus->read = enetc_mdio_read;
@@ -31,13 +44,14 @@ static int enetc_pci_mdio_probe(struct pci_dev *pdev,
bus->parent = dev;
mdio_priv = bus->priv;
mdio_priv->hw = hw;
+ mdio_priv->mdio_base = ENETC_EMDIO_BASE;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
pcie_flr(pdev);
err = pci_enable_device_mem(pdev);
if (err) {
dev_err(dev, "device enable failed\n");
- return err;
+ goto err_pci_enable;
}
err = pci_request_region(pdev, 0, KBUILD_MODNAME);
@@ -46,13 +60,6 @@ static int enetc_pci_mdio_probe(struct pci_dev *pdev,
goto err_pci_mem_reg;
}
- hw->port = pci_iomap(pdev, 0, 0);
- if (!hw->port) {
- err = -ENXIO;
- dev_err(dev, "iomap failed\n");
- goto err_ioremap;
- }
-
err = of_mdiobus_register(bus, dev->of_node);
if (err)
goto err_mdiobus_reg;
@@ -62,12 +69,14 @@ static int enetc_pci_mdio_probe(struct pci_dev *pdev,
return 0;
err_mdiobus_reg:
- iounmap(mdio_priv->hw->port);
-err_ioremap:
pci_release_mem_regions(pdev);
err_pci_mem_reg:
pci_disable_device(pdev);
-
+err_pci_enable:
+err_mdiobus_alloc:
+ iounmap(port_regs);
+err_hw_alloc:
+err_ioremap:
return err;
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index e7482d483b28..fc0d7d99e9a1 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -2,6 +2,7 @@
/* Copyright 2017-2019 NXP */
#include <linux/module.h>
+#include <linux/fsl/enetc_mdio.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include "enetc_pf.h"
@@ -749,6 +750,52 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr);
}
+static int enetc_mdio_probe(struct enetc_pf *pf)
+{
+ struct device *dev = &pf->si->pdev->dev;
+ struct enetc_mdio_priv *mdio_priv;
+ struct device_node *np;
+ struct mii_bus *bus;
+ int err;
+
+ bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "Freescale ENETC MDIO Bus";
+ bus->read = enetc_mdio_read;
+ bus->write = enetc_mdio_write;
+ bus->parent = dev;
+ mdio_priv = bus->priv;
+ mdio_priv->hw = &pf->si->hw;
+ mdio_priv->mdio_base = ENETC_EMDIO_BASE;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+
+ np = of_get_child_by_name(dev->of_node, "mdio");
+ if (!np) {
+ dev_err(dev, "MDIO node missing\n");
+ return -EINVAL;
+ }
+
+ err = of_mdiobus_register(bus, np);
+ if (err) {
+ of_node_put(np);
+ dev_err(dev, "cannot register MDIO bus\n");
+ return err;
+ }
+
+ of_node_put(np);
+ pf->mdio = bus;
+
+ return 0;
+}
+
+static void enetc_mdio_remove(struct enetc_pf *pf)
+{
+ if (pf->mdio)
+ mdiobus_unregister(pf->mdio);
+}
+
static int enetc_of_get_phy(struct enetc_ndev_priv *priv)
{
struct enetc_pf *pf = enetc_si_priv(priv->si);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.h b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
index 10dd1b53bb08..59e65a6f6c3e 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
@@ -49,7 +49,3 @@ struct enetc_pf {
int enetc_msg_psi_init(struct enetc_pf *pf);
void enetc_msg_psi_free(struct enetc_pf *pf);
void enetc_msg_handle_rxmsg(struct enetc_pf *pf, int mbox_id, u16 *status);
-
-/* MDIO */
-int enetc_mdio_probe(struct enetc_pf *pf);
-void enetc_mdio_remove(struct enetc_pf *pf);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index 2e99438cb1bf..0c6bf3a55a9a 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -36,7 +36,6 @@ void enetc_sched_speed_set(struct net_device *ndev)
case SPEED_10:
default:
pspeed = ENETC_PMR_PSPEED_10M;
- netdev_err(ndev, "Qbv PSPEED set speed link down.\n");
}
priv->speed = speed;
@@ -156,6 +155,11 @@ int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
int err;
int i;
+ /* TSD and Qbv are mutually exclusive in hardware */
+ for (i = 0; i < priv->num_tx_rings; i++)
+ if (priv->tx_ring[i]->tsd_enable)
+ return -EBUSY;
+
for (i = 0; i < priv->num_tx_rings; i++)
enetc_set_bdr_prio(&priv->si->hw,
priv->tx_ring[i]->index,
@@ -192,7 +196,6 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
u32 hi_credit_bit, hi_credit_reg;
u32 max_interference_size;
u32 port_frame_max_size;
- u32 tc_max_sized_frame;
u8 tc = cbs->queue;
u8 prio_top, prio_next;
int bw_sum = 0;
@@ -250,7 +253,7 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
return -EINVAL;
}
- tc_max_sized_frame = enetc_port_rd(&si->hw, ENETC_PTCMSDUR(tc));
+ enetc_port_rd(&si->hw, ENETC_PTCMSDUR(tc));
/* For top prio TC, the max_interfrence_size is maxSizedFrame.
*
@@ -298,3 +301,33 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
return 0;
}
+
+int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct tc_etf_qopt_offload *qopt = type_data;
+ u8 tc_nums = netdev_get_num_tc(ndev);
+ int tc;
+
+ if (!tc_nums)
+ return -EOPNOTSUPP;
+
+ tc = qopt->queue;
+
+ if (tc < 0 || tc >= priv->num_tx_rings)
+ return -EINVAL;
+
+ /* Do not support TXSTART and TX CSUM offload simutaniously */
+ if (ndev->features & NETIF_F_CSUM_MASK)
+ return -EBUSY;
+
+ /* TSD and Qbv are mutually exclusive in hardware */
+ if (enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE)
+ return -EBUSY;
+
+ priv->tx_ring[tc]->tsd_enable = qopt->enable;
+ enetc_port_wr(&priv->si->hw, ENETC_PTCTSDR(tc),
+ qopt->enable ? ENETC_TSDE : 0);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 05c1899f6628..4432a59904c7 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1141,7 +1141,7 @@ fec_stop(struct net_device *ndev)
static void
-fec_timeout(struct net_device *ndev)
+fec_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -2199,8 +2199,14 @@ static void fec_enet_get_regs(struct net_device *ndev,
{
struct fec_enet_private *fep = netdev_priv(ndev);
u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
+ struct device *dev = &fep->pdev->dev;
u32 *buf = (u32 *)regbuf;
u32 i, off;
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ return;
regs->version = fec_enet_register_version;
@@ -2216,6 +2222,9 @@ static void fec_enet_get_regs(struct net_device *ndev,
off >>= 2;
buf[off] = readl(&theregs[off]);
}
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
}
static int fec_enet_get_ts_info(struct net_device *ndev,
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 30cdb246d020..7a3f066e611d 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -84,7 +84,7 @@ static int debug = -1; /* the above default */
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "debugging messages level");
-static void mpc52xx_fec_tx_timeout(struct net_device *dev)
+static void mpc52xx_fec_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
unsigned long flags;
@@ -785,16 +785,6 @@ static const struct ethtool_ops mpc52xx_fec_ethtool_ops = {
};
-static int mpc52xx_fec_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct phy_device *phydev = dev->phydev;
-
- if (!phydev)
- return -ENOTSUPP;
-
- return phy_mii_ioctl(phydev, rq, cmd);
-}
-
static const struct net_device_ops mpc52xx_fec_netdev_ops = {
.ndo_open = mpc52xx_fec_open,
.ndo_stop = mpc52xx_fec_close,
@@ -802,7 +792,7 @@ static const struct net_device_ops mpc52xx_fec_netdev_ops = {
.ndo_set_rx_mode = mpc52xx_fec_set_multicast_list,
.ndo_set_mac_address = mpc52xx_fec_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = mpc52xx_fec_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl,
.ndo_tx_timeout = mpc52xx_fec_tx_timeout,
.ndo_get_stats = mpc52xx_fec_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 41c6fa200e74..e1901874c19f 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -110,7 +110,7 @@ do { \
/* Interface Mode Register (IF_MODE) */
#define IF_MODE_MASK 0x00000003 /* 30-31 Mask on i/f mode bits */
-#define IF_MODE_XGMII 0x00000000 /* 30-31 XGMII (10G) interface */
+#define IF_MODE_10G 0x00000000 /* 30-31 10G interface */
#define IF_MODE_GMII 0x00000002 /* 30-31 GMII (1G) interface */
#define IF_MODE_RGMII 0x00000004
#define IF_MODE_RGMII_AUTO 0x00008000
@@ -440,7 +440,7 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
tmp = 0;
switch (phy_if) {
case PHY_INTERFACE_MODE_XGMII:
- tmp |= IF_MODE_XGMII;
+ tmp |= IF_MODE_10G;
break;
default:
tmp |= IF_MODE_GMII;
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index f0806ace1ae2..55f2122c3217 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -692,7 +692,7 @@ static int mac_probe(struct platform_device *_of_dev)
mac_dev->res = __devm_request_region(dev,
fman_get_mem_region(priv->fman),
- res.start, res.end + 1 - res.start,
+ res.start, resource_size(&res),
"mac");
if (!mac_dev->res) {
dev_err(dev, "__devm_request_mem_region(mac) failed\n");
@@ -701,7 +701,7 @@ static int mac_probe(struct platform_device *_of_dev)
}
priv->vaddr = devm_ioremap(dev, mac_dev->res->start,
- mac_dev->res->end + 1 - mac_dev->res->start);
+ resource_size(mac_dev->res));
if (!priv->vaddr) {
dev_err(dev, "devm_ioremap() failed\n");
err = -EIO;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 3981c06f082f..add61fed33ee 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -641,7 +641,7 @@ static void fs_timeout_work(struct work_struct *work)
netif_wake_queue(dev);
}
-static void fs_timeout(struct net_device *dev)
+static void fs_timeout(struct net_device *dev, unsigned int txqueue)
{
struct fs_enet_private *fep = netdev_priv(dev);
@@ -882,14 +882,6 @@ static const struct ethtool_ops fs_ethtool_ops = {
.set_tunable = fs_set_tunable,
};
-static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- if (!netif_running(dev))
- return -EINVAL;
-
- return phy_mii_ioctl(dev->phydev, rq, cmd);
-}
-
extern int fs_mii_connect(struct net_device *dev);
extern void fs_mii_disconnect(struct net_device *dev);
@@ -907,7 +899,7 @@ static const struct net_device_ops fs_enet_netdev_ops = {
.ndo_start_xmit = fs_enet_start_xmit,
.ndo_tx_timeout = fs_timeout,
.ndo_set_rx_mode = fs_set_multicast_list,
- .ndo_do_ioctl = fs_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 72868a28b621..f7e5cafe89a9 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2093,7 +2093,7 @@ static void gfar_reset_task(struct work_struct *work)
reset_gfar(priv->ndev);
}
-static void gfar_timeout(struct net_device *dev)
+static void gfar_timeout(struct net_device *dev, unsigned int txqueue)
{
struct gfar_private *priv = netdev_priv(dev);
@@ -2205,13 +2205,17 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
skb_dirtytx = tx_queue->skb_dirtytx;
while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
+ bool do_tstamp;
+
+ do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en;
frags = skb_shinfo(skb)->nr_frags;
/* When time stamping, one additional TxBD must be freed.
* Also, we need to dma_unmap_single() the TxPAL.
*/
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
+ if (unlikely(do_tstamp))
nr_txbds = frags + 2;
else
nr_txbds = frags + 1;
@@ -2225,7 +2229,7 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
(lstatus & BD_LENGTH_MASK))
break;
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+ if (unlikely(do_tstamp)) {
next = next_txbd(bdp, base, tx_ring_size);
buflen = be16_to_cpu(next->length) +
GMAC_FCB_LEN + GMAC_TXPAL_LEN;
@@ -2235,7 +2239,7 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
buflen, DMA_TO_DEVICE);
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+ if (unlikely(do_tstamp)) {
struct skb_shared_hwtstamps shhwtstamps;
u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
~0x7UL);
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index f839fa94ebdd..0d101c00286f 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3545,7 +3545,7 @@ static void ucc_geth_timeout_work(struct work_struct *work)
* ucc_geth_timeout gets called when a packet has not been
* transmitted after a set amount of time.
*/
-static void ucc_geth_timeout(struct net_device *dev)
+static void ucc_geth_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ucc_geth_private *ugeth = netdev_priv(dev);
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index e03b30c60dcf..c82c85ef5fb3 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -49,6 +49,7 @@ struct tgec_mdio_controller {
struct mdio_fsl_priv {
struct tgec_mdio_controller __iomem *mdio_base;
bool is_little_endian;
+ bool has_a011043;
};
static u32 xgmac_read32(void __iomem *regs,
@@ -226,7 +227,8 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
return ret;
/* Return all Fs if nothing was there */
- if (xgmac_read32(&regs->mdio_stat, endian) & MDIO_STAT_RD_ER) {
+ if ((xgmac_read32(&regs->mdio_stat, endian) & MDIO_STAT_RD_ER) &&
+ !priv->has_a011043) {
dev_err(&bus->dev,
"Error while reading PHY%d reg at %d.%hhu\n",
phy_id, dev_addr, regnum);
@@ -274,6 +276,9 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
priv->is_little_endian = of_property_read_bool(pdev->dev.of_node,
"little-endian");
+ priv->has_a011043 = of_property_read_bool(pdev->dev.of_node,
+ "fsl,erratum-a011043");
+
ret = of_mdiobus_register(bus, np);
if (ret) {
dev_err(&pdev->dev, "cannot register MDIO bus\n");
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index 1eca0fdb9933..a7b7a4aace79 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -93,7 +93,7 @@ static irqreturn_t fjn_interrupt(int irq, void *dev_id);
static void fjn_rx(struct net_device *dev);
static void fjn_reset(struct net_device *dev);
static void set_rx_mode(struct net_device *dev);
-static void fjn_tx_timeout(struct net_device *dev);
+static void fjn_tx_timeout(struct net_device *dev, unsigned int txqueue);
static const struct ethtool_ops netdev_ethtool_ops;
/*
@@ -774,7 +774,7 @@ static irqreturn_t fjn_interrupt(int dummy, void *dev_id)
/*====================================================================*/
-static void fjn_tx_timeout(struct net_device *dev)
+static void fjn_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct local_info *lp = netdev_priv(dev);
unsigned int ioaddr = dev->base_addr;
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 9b7a8db9860f..e032563ceefd 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -845,7 +845,7 @@ static void gve_turnup(struct gve_priv *priv)
gve_set_napi_enabled(priv);
}
-static void gve_tx_timeout(struct net_device *dev)
+static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct gve_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index edec61dfc868..9f52e72ff641 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -418,8 +418,6 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
rx->cnt = cnt;
rx->fill_cnt += work_done;
- /* restock desc ring slots */
- dma_wmb(); /* Ensure descs are visible before ringing doorbell */
gve_rx_write_doorbell(priv, rx);
return gve_rx_work_pending(rx);
}
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index f4889431f9b7..d0244feb0301 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -487,10 +487,6 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
* may have added descriptors without ringing the doorbell.
*/
- /* Ensure tx descs from a prior gve_tx are visible before
- * ringing doorbell.
- */
- dma_wmb();
gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
return NETDEV_TX_BUSY;
}
@@ -505,8 +501,6 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more())
return NETDEV_TX_OK;
- /* Ensure tx descs are visible before ringing doorbell */
- dma_wmb();
gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 3e9b6d543c77..d9718b87279d 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -543,9 +543,9 @@ hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_tx_timestamp(skb);
hip04_set_xmit_desc(priv, phys);
- priv->tx_head = TX_NEXT(tx_head);
count++;
netdev_sent_queue(ndev, skb->len);
+ priv->tx_head = TX_NEXT(tx_head);
stats->tx_bytes += skb->len;
stats->tx_packets++;
@@ -779,7 +779,7 @@ static int hip04_mac_stop(struct net_device *ndev)
return 0;
}
-static void hip04_timeout(struct net_device *ndev)
+static void hip04_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct hip04_priv *priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
index 90ab7ade44c4..57c3bc4f7089 100644
--- a/drivers/net/ethernet/hisilicon/hisi_femac.c
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -675,18 +675,6 @@ static void hisi_femac_net_set_rx_mode(struct net_device *dev)
}
}
-static int hisi_femac_net_ioctl(struct net_device *dev,
- struct ifreq *ifreq, int cmd)
-{
- if (!netif_running(dev))
- return -EINVAL;
-
- if (!dev->phydev)
- return -EINVAL;
-
- return phy_mii_ioctl(dev->phydev, ifreq, cmd);
-}
-
static const struct ethtool_ops hisi_femac_ethtools_ops = {
.get_link = ethtool_op_get_link,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
@@ -697,7 +685,7 @@ static const struct net_device_ops hisi_femac_netdev_ops = {
.ndo_open = hisi_femac_net_open,
.ndo_stop = hisi_femac_net_close,
.ndo_start_xmit = hisi_femac_net_xmit,
- .ndo_do_ioctl = hisi_femac_net_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_set_mac_address = hisi_femac_set_mac_address,
.ndo_set_rx_mode = hisi_femac_net_set_rx_mode,
};
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index 247de9105d10..4fb776920a93 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -893,7 +893,7 @@ static void hix5hd2_tx_timeout_task(struct work_struct *work)
hix5hd2_net_open(priv->netdev);
}
-static void hix5hd2_net_timeout(struct net_device *dev)
+static void hix5hd2_net_timeout(struct net_device *dev, unsigned int txqueue)
{
struct hix5hd2_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 14ab20491fd0..c117074c16e3 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -565,7 +565,6 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
skb = *out_skb = napi_alloc_skb(&ring_data->napi,
HNS_RX_HEAD_SIZE);
if (unlikely(!skb)) {
- netdev_err(ndev, "alloc rx skb fail\n");
ring->stats.sw_err_cnt++;
return -ENOMEM;
}
@@ -1056,7 +1055,6 @@ static int hns_nic_common_poll(struct napi_struct *napi, int budget)
container_of(napi, struct hns_nic_ring_data, napi);
struct hnae_ring *ring = ring_data->ring;
-try_again:
clean_complete += ring_data->poll_one(
ring_data, budget - clean_complete,
ring_data->ex_process);
@@ -1066,7 +1064,7 @@ try_again:
napi_complete(napi);
ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
} else {
- goto try_again;
+ return budget;
}
}
@@ -1485,7 +1483,7 @@ static int hns_nic_net_stop(struct net_device *ndev)
static void hns_tx_timeout_reset(struct hns_nic_priv *priv);
#define HNS_TX_TIMEO_LIMIT (40 * HZ)
-static void hns_nic_net_timeout(struct net_device *ndev)
+static void hns_nic_net_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
@@ -1499,20 +1497,6 @@ static void hns_nic_net_timeout(struct net_device *ndev)
}
}
-static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
- int cmd)
-{
- struct phy_device *phy_dev = netdev->phydev;
-
- if (!netif_running(netdev))
- return -EINVAL;
-
- if (!phy_dev)
- return -ENOTSUPP;
-
- return phy_mii_ioctl(phy_dev, ifr, cmd);
-}
-
static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
@@ -1960,7 +1944,7 @@ static const struct net_device_ops hns_nic_netdev_ops = {
.ndo_tx_timeout = hns_nic_net_timeout,
.ndo_set_mac_address = hns_nic_net_set_mac_address,
.ndo_change_mtu = hns_nic_change_mtu,
- .ndo_do_ioctl = hns_nic_do_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_set_features = hns_nic_set_features,
.ndo_fix_features = hns_nic_fix_features,
.ndo_get_stats64 = hns_nic_get_stats64,
diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile
index d01bf536eb86..7aa2fac76c5e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/Makefile
@@ -3,6 +3,8 @@
# Makefile for the HISILICON network device drivers.
#
+ccflags-y += -I$(srctree)/$(src)
+
obj-$(CONFIG_HNS3) += hns3pf/
obj-$(CONFIG_HNS3) += hns3vf/
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 3b5e2d7251e7..a3e4081b84ba 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -164,11 +164,7 @@ enum hnae3_reset_type {
HNAE3_IMP_RESET,
HNAE3_UNKNOWN_RESET,
HNAE3_NONE_RESET,
-};
-
-enum hnae3_flr_state {
- HNAE3_FLR_DOWN,
- HNAE3_FLR_DONE,
+ HNAE3_MAX_RESET,
};
enum hnae3_port_base_vlan_state {
@@ -575,8 +571,7 @@ struct hnae3_ae_algo {
const struct pci_device_id *pdev_id_table;
};
-#define HNAE3_INT_NAME_EXT_LEN 32 /* Max extra information length */
-#define HNAE3_INT_NAME_LEN (IFNAMSIZ + HNAE3_INT_NAME_EXT_LEN)
+#define HNAE3_INT_NAME_LEN 32
#define HNAE3_ITR_COUNTDOWN_START 100
struct hnae3_tc_info {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 6b328a259efc..1d4ffc5f408a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -176,7 +176,7 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
return -EINVAL;
}
- ring = &priv->ring[q_num];
+ ring = &priv->ring[q_num];
value = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
tx_index = (cnt == 1) ? value : tx_index;
@@ -209,10 +209,10 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
le16_to_cpu(tx_desc->tx.bdtp_fe_sc_vld_ra_ri));
dev_info(dev, "(TX)mss: %u\n", le16_to_cpu(tx_desc->tx.mss));
- ring = &priv->ring[q_num + h->kinfo.num_tqps];
+ ring = &priv->ring[q_num + h->kinfo.num_tqps];
value = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_TAIL_REG);
rx_index = (cnt == 1) ? value : tx_index;
- rx_desc = &ring->desc[rx_index];
+ rx_desc = &ring->desc[rx_index];
addr = le64_to_cpu(rx_desc->addr);
dev_info(dev, "RX Queue Num: %u, BD Index: %u\n", q_num, rx_index);
@@ -297,8 +297,8 @@ static ssize_t hns3_dbg_cmd_read(struct file *filp, char __user *buffer,
if (!buf)
return -ENOMEM;
- len = snprintf(buf, HNS3_DBG_READ_LEN, "%s\n",
- "Please echo help to cmd to get help information");
+ len = scnprintf(buf, HNS3_DBG_READ_LEN, "%s\n",
+ "Please echo help to cmd to get help information");
uncopy_bytes = copy_to_user(buffer, buf, len);
kfree(buf);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 69545dd6c938..acb796cc10d0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -24,6 +24,12 @@
#include "hnae3.h"
#include "hns3_enet.h"
+/* All hns3 tracepoints are defined by the include below, which
+ * must be included exactly once across the whole kernel with
+ * CREATE_TRACE_POINTS defined
+ */
+#define CREATE_TRACE_POINTS
+#include "hns3_trace.h"
#define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift)))
#define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
@@ -54,6 +60,8 @@ MODULE_PARM_DESC(debug, " Network interface message level setting");
#define HNS3_INNER_VLAN_TAG 1
#define HNS3_OUTER_VLAN_TAG 2
+#define HNS3_MIN_TX_LEN 33U
+
/* hns3_pci_tbl - PCI Device ID Table
*
* Last entry must be all 0s
@@ -127,18 +135,21 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
continue;
if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
- snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
- "%s-%s-%d", priv->netdev->name, "TxRx",
- txrx_int_idx++);
+ snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN,
+ "%s-%s-%s-%d", hns3_driver_name,
+ pci_name(priv->ae_handle->pdev),
+ "TxRx", txrx_int_idx++);
txrx_int_idx++;
} else if (tqp_vectors->rx_group.ring) {
- snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
- "%s-%s-%d", priv->netdev->name, "Rx",
- rx_int_idx++);
+ snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN,
+ "%s-%s-%s-%d", hns3_driver_name,
+ pci_name(priv->ae_handle->pdev),
+ "Rx", rx_int_idx++);
} else if (tqp_vectors->tx_group.ring) {
- snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
- "%s-%s-%d", priv->netdev->name, "Tx",
- tx_int_idx++);
+ snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN,
+ "%s-%s-%s-%d", hns3_driver_name,
+ pci_name(priv->ae_handle->pdev),
+ "Tx", tx_int_idx++);
} else {
/* Skip this unused q_vector */
continue;
@@ -155,6 +166,8 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
return ret;
}
+ disable_irq(tqp_vectors->vector_irq);
+
irq_set_affinity_hint(tqp_vectors->vector_irq,
&tqp_vectors->affinity_mask);
@@ -173,6 +186,7 @@ static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
{
napi_enable(&tqp_vector->napi);
+ enable_irq(tqp_vector->vector_irq);
/* enable vector */
hns3_mask_vector_irq(tqp_vector, 1);
@@ -372,18 +386,6 @@ static int hns3_nic_net_up(struct net_device *netdev)
if (ret)
return ret;
- /* the device can work without cpu rmap, only aRFS needs it */
- ret = hns3_set_rx_cpu_rmap(netdev);
- if (ret)
- netdev_warn(netdev, "set rx cpu rmap fail, ret=%d!\n", ret);
-
- /* get irq resource for all vectors */
- ret = hns3_nic_init_irq(priv);
- if (ret) {
- netdev_err(netdev, "init irq failed! ret=%d\n", ret);
- goto free_rmap;
- }
-
clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
/* enable the vectors */
@@ -396,22 +398,15 @@ static int hns3_nic_net_up(struct net_device *netdev)
/* start the ae_dev */
ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
- if (ret)
- goto out_start_err;
-
- return 0;
-
-out_start_err:
- set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
- while (j--)
- hns3_tqp_disable(h->kinfo.tqp[j]);
+ if (ret) {
+ set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+ while (j--)
+ hns3_tqp_disable(h->kinfo.tqp[j]);
- for (j = i - 1; j >= 0; j--)
- hns3_vector_disable(&priv->tqp_vector[j]);
+ for (j = i - 1; j >= 0; j--)
+ hns3_vector_disable(&priv->tqp_vector[j]);
+ }
- hns3_nic_uninit_irq(priv);
-free_rmap:
- hns3_free_rx_cpu_rmap(netdev);
return ret;
}
@@ -508,11 +503,6 @@ static void hns3_nic_net_down(struct net_device *netdev)
if (ops->stop)
ops->stop(priv->ae_handle);
- hns3_free_rx_cpu_rmap(netdev);
-
- /* free irq resources */
- hns3_nic_uninit_irq(priv);
-
/* delay ring buffer clearing to hns3_reset_notify_uninit_enet
* during reset process, because driver may not be able
* to disable the ring through firmware when downing the netdev.
@@ -734,6 +724,8 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
/* get MSS for TSO */
*mss = skb_shinfo(skb)->gso_size;
+ trace_hns3_tso(skb);
+
return 0;
}
@@ -1138,6 +1130,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
desc->tx.bdtp_fe_sc_vld_ra_ri =
cpu_to_le16(BIT(HNS3_TXD_VLD_B));
+ trace_hns3_tx_desc(ring, ring->next_to_use);
ring_ptr_move_fw(ring, next_to_use);
return HNS3_LIKELY_BD_NUM;
}
@@ -1161,6 +1154,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
desc->tx.bdtp_fe_sc_vld_ra_ri =
cpu_to_le16(BIT(HNS3_TXD_VLD_B));
+ trace_hns3_tx_desc(ring, ring->next_to_use);
/* move ring pointer to next */
ring_ptr_move_fw(ring, next_to_use);
@@ -1286,6 +1280,14 @@ static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
return false;
}
+void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
+{
+ int i = 0;
+
+ for (i = 0; i < MAX_SKB_FRAGS; i++)
+ size[i] = skb_frag_size(&shinfo->frags[i]);
+}
+
static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
struct net_device *netdev,
struct sk_buff *skb)
@@ -1297,8 +1299,10 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
bd_num = hns3_tx_bd_num(skb, bd_size);
if (unlikely(bd_num > HNS3_MAX_NON_TSO_BD_NUM)) {
if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) &&
- !hns3_skb_need_linearized(skb, bd_size, bd_num))
+ !hns3_skb_need_linearized(skb, bd_size, bd_num)) {
+ trace_hns3_over_8bd(skb);
goto out;
+ }
if (__skb_linearize(skb))
return -ENOMEM;
@@ -1306,8 +1310,10 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
bd_num = hns3_tx_bd_count(skb->len);
if ((skb_is_gso(skb) && bd_num > HNS3_MAX_TSO_BD_NUM) ||
(!skb_is_gso(skb) &&
- bd_num > HNS3_MAX_NON_TSO_BD_NUM))
+ bd_num > HNS3_MAX_NON_TSO_BD_NUM)) {
+ trace_hns3_over_8bd(skb);
return -ENOMEM;
+ }
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_copy++;
@@ -1405,6 +1411,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
int bd_num = 0;
int ret;
+ /* Hardware can only handle short frames above 32 bytes */
+ if (skb_put_padto(skb, HNS3_MIN_TX_LEN))
+ return NETDEV_TX_OK;
+
/* Prefetch the data used later */
prefetch(skb->data);
@@ -1448,6 +1458,7 @@ out:
(ring->desc_num - 1);
ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |=
cpu_to_le16(BIT(HNS3_TXD_FE_B));
+ trace_hns3_tx_desc(ring, pre_ntu);
/* Complete translate all packets */
dev_queue = netdev_get_tx_queue(netdev, ring->queue_index);
@@ -1556,6 +1567,37 @@ static int hns3_nic_set_features(struct net_device *netdev,
return 0;
}
+static netdev_features_t hns3_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+#define HNS3_MAX_HDR_LEN 480U
+#define HNS3_MAX_L4_HDR_LEN 60U
+
+ size_t len;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return features;
+
+ if (skb->encapsulation)
+ len = skb_inner_transport_header(skb) - skb->data;
+ else
+ len = skb_transport_header(skb) - skb->data;
+
+ /* Assume L4 is 60 byte as TCP is the only protocol with a
+ * a flexible value, and it's max len is 60 bytes.
+ */
+ len += HNS3_MAX_L4_HDR_LEN;
+
+ /* Hardware only supports checksum on the skb with a max header
+ * len of 480 bytes.
+ */
+ if (len > HNS3_MAX_HDR_LEN)
+ features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+
+ return features;
+}
+
static void hns3_nic_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
@@ -1869,7 +1911,7 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
return true;
}
-static void hns3_nic_net_timeout(struct net_device *ndev)
+static void hns3_nic_net_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
@@ -1970,6 +2012,7 @@ static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_do_ioctl = hns3_nic_do_ioctl,
.ndo_change_mtu = hns3_nic_change_mtu,
.ndo_set_features = hns3_nic_set_features,
+ .ndo_features_check = hns3_features_check,
.ndo_get_stats64 = hns3_nic_get_stats64,
.ndo_setup_tc = hns3_nic_setup_tc,
.ndo_set_rx_mode = hns3_nic_set_rx_mode,
@@ -2051,10 +2094,8 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int ret;
ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL);
- if (!ae_dev) {
- ret = -ENOMEM;
- return ret;
- }
+ if (!ae_dev)
+ return -ENOMEM;
ae_dev->pdev = pdev;
ae_dev->flag = ent->driver_data;
@@ -2497,8 +2538,8 @@ void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
rmb(); /* Make sure head is ready before touch any data */
if (unlikely(!is_valid_clean_head(ring, head))) {
- netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
- ring->next_to_use, ring->next_to_clean);
+ hns3_rl_err(netdev, "wrong head (%d, %d-%d)\n", head,
+ ring->next_to_use, ring->next_to_clean);
u64_stats_update_begin(&ring->syncp);
ring->stats.io_err_cnt++;
@@ -2584,6 +2625,12 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
}
+static bool hns3_page_is_reusable(struct page *page)
+{
+ return page_to_nid(page) == numa_mem_id() &&
+ !page_is_pfmemalloc(page);
+}
+
static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
struct hns3_enet_ring *ring, int pull_len,
struct hns3_desc_cb *desc_cb)
@@ -2598,7 +2645,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
/* Avoid re-using remote pages, or the stack is still using the page
* when page_offset rollback to zero, flag default unreuse
*/
- if (unlikely(page_to_nid(desc_cb->priv) != numa_mem_id()) ||
+ if (unlikely(!hns3_page_is_reusable(desc_cb->priv)) ||
(!desc_cb->page_offset && page_count(desc_cb->priv) > 1))
return;
@@ -2668,6 +2715,9 @@ static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
skb->csum_start = (unsigned char *)th - skb->head;
skb->csum_offset = offsetof(struct tcphdr, check);
skb->ip_summed = CHECKSUM_PARTIAL;
+
+ trace_hns3_gro(skb);
+
return 0;
}
@@ -2788,7 +2838,6 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
unsigned char *va)
{
-#define HNS3_NEED_ADD_FRAG 1
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
struct net_device *netdev = ring_to_netdev(ring);
struct sk_buff *skb;
@@ -2805,6 +2854,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
return -ENOMEM;
}
+ trace_hns3_rx_desc(ring);
prefetchw(skb->data);
ring->pending_buf = 1;
@@ -2814,7 +2864,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
/* We can reuse buffer as-is, just make sure it is local */
- if (likely(page_to_nid(desc_cb->priv) == numa_mem_id()))
+ if (likely(hns3_page_is_reusable(desc_cb->priv)))
desc_cb->reuse_flag = 1;
else /* This page cannot be reused so discard it */
put_page(desc_cb->priv);
@@ -2832,33 +2882,19 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
desc_cb);
ring_ptr_move_fw(ring, next_to_clean);
- return HNS3_NEED_ADD_FRAG;
+ return 0;
}
-static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
- bool pending)
+static int hns3_add_frag(struct hns3_enet_ring *ring)
{
struct sk_buff *skb = ring->skb;
struct sk_buff *head_skb = skb;
struct sk_buff *new_skb;
struct hns3_desc_cb *desc_cb;
- struct hns3_desc *pre_desc;
+ struct hns3_desc *desc;
u32 bd_base_info;
- int pre_bd;
- /* if there is pending bd, the SW param next_to_clean has moved
- * to next and the next is NULL
- */
- if (pending) {
- pre_bd = (ring->next_to_clean - 1 + ring->desc_num) %
- ring->desc_num;
- pre_desc = &ring->desc[pre_bd];
- bd_base_info = le32_to_cpu(pre_desc->rx.bd_base_info);
- } else {
- bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
- }
-
- while (!(bd_base_info & BIT(HNS3_RXD_FE_B))) {
+ do {
desc = &ring->desc[ring->next_to_clean];
desc_cb = &ring->desc_cb[ring->next_to_clean];
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
@@ -2893,9 +2929,10 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
}
hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
+ trace_hns3_rx_desc(ring);
ring_ptr_move_fw(ring, next_to_clean);
ring->pending_buf++;
- }
+ } while (!(bd_base_info & BIT(HNS3_RXD_FE_B)));
return 0;
}
@@ -3063,28 +3100,23 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring)
if (ret < 0) /* alloc buffer fail */
return ret;
- if (ret > 0) { /* need add frag */
- ret = hns3_add_frag(ring, desc, false);
+ if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) { /* need add frag */
+ ret = hns3_add_frag(ring);
if (ret)
return ret;
-
- /* As the head data may be changed when GRO enable, copy
- * the head data in after other data rx completed
- */
- memcpy(skb->data, ring->va,
- ALIGN(ring->pull_len, sizeof(long)));
}
} else {
- ret = hns3_add_frag(ring, desc, true);
+ ret = hns3_add_frag(ring);
if (ret)
return ret;
+ }
- /* As the head data may be changed when GRO enable, copy
- * the head data in after other data rx completed
- */
+ /* As the head data may be changed when GRO enable, copy
+ * the head data in after other data rx completed
+ */
+ if (skb->len > HNS3_RX_HEAD_SIZE)
memcpy(skb->data, ring->va,
ALIGN(ring->pull_len, sizeof(long)));
- }
ret = hns3_handle_bdinfo(ring, skb);
if (unlikely(ret)) {
@@ -3590,26 +3622,25 @@ static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring)
continue;
- hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain);
+ /* Since the mapping can be overwritten, when fail to get the
+ * chain between vector and ring, we should go on to deal with
+ * the remaining options.
+ */
+ if (hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain))
+ dev_warn(priv->dev, "failed to get ring chain\n");
h->ae_algo->ops->unmap_ring_from_vector(h,
tqp_vector->vector_irq, &vector_ring_chain);
hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
- if (tqp_vector->irq_init_flag == HNS3_VECTOR_INITED) {
- irq_set_affinity_hint(tqp_vector->vector_irq, NULL);
- free_irq(tqp_vector->vector_irq, tqp_vector);
- tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED;
- }
-
hns3_clear_ring_group(&tqp_vector->rx_group);
hns3_clear_ring_group(&tqp_vector->tx_group);
netif_napi_del(&priv->tqp_vector[i].napi);
}
}
-static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
+static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
{
struct hnae3_handle *h = priv->ae_handle;
struct pci_dev *pdev = h->pdev;
@@ -3621,11 +3652,10 @@ static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
tqp_vector = &priv->tqp_vector[i];
ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
if (ret)
- return ret;
+ return;
}
devm_kfree(&pdev->dev, priv->tqp_vector);
- return 0;
}
static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
@@ -4024,6 +4054,18 @@ static int hns3_client_init(struct hnae3_handle *handle)
goto out_reg_netdev_fail;
}
+ /* the device can work without cpu rmap, only aRFS needs it */
+ ret = hns3_set_rx_cpu_rmap(netdev);
+ if (ret)
+ dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret);
+
+ ret = hns3_nic_init_irq(priv);
+ if (ret) {
+ dev_err(priv->dev, "init irq failed! ret=%d\n", ret);
+ hns3_free_rx_cpu_rmap(netdev);
+ goto out_init_irq_fail;
+ }
+
ret = hns3_client_start(handle);
if (ret) {
dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
@@ -4045,6 +4087,9 @@ static int hns3_client_init(struct hnae3_handle *handle)
return ret;
out_client_start:
+ hns3_free_rx_cpu_rmap(netdev);
+ hns3_nic_uninit_irq(priv);
+out_init_irq_fail:
unregister_netdev(netdev);
out_reg_netdev_fail:
hns3_uninit_phy(netdev);
@@ -4082,15 +4127,17 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
goto out_netdev_free;
}
+ hns3_free_rx_cpu_rmap(netdev);
+
+ hns3_nic_uninit_irq(priv);
+
hns3_del_all_fd_rules(netdev, true);
hns3_clear_all_ring(handle, true);
hns3_nic_uninit_vector_data(priv);
- ret = hns3_nic_dealloc_vector_data(priv);
- if (ret)
- netdev_err(netdev, "dealloc vector error\n");
+ hns3_nic_dealloc_vector_data(priv);
ret = hns3_uninit_all_ring(priv);
if (ret)
@@ -4417,17 +4464,32 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
if (ret)
goto err_uninit_vector;
+ /* the device can work without cpu rmap, only aRFS needs it */
+ ret = hns3_set_rx_cpu_rmap(netdev);
+ if (ret)
+ dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret);
+
+ ret = hns3_nic_init_irq(priv);
+ if (ret) {
+ dev_err(priv->dev, "init irq failed! ret=%d\n", ret);
+ hns3_free_rx_cpu_rmap(netdev);
+ goto err_init_irq_fail;
+ }
+
ret = hns3_client_start(handle);
if (ret) {
dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
- goto err_uninit_ring;
+ goto err_client_start_fail;
}
set_bit(HNS3_NIC_STATE_INITED, &priv->state);
return ret;
-err_uninit_ring:
+err_client_start_fail:
+ hns3_free_rx_cpu_rmap(netdev);
+ hns3_nic_uninit_irq(priv);
+err_init_irq_fail:
hns3_uninit_all_ring(priv);
err_uninit_vector:
hns3_nic_uninit_vector_data(priv);
@@ -4477,6 +4539,8 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
return 0;
}
+ hns3_free_rx_cpu_rmap(netdev);
+ hns3_nic_uninit_irq(priv);
hns3_clear_all_ring(handle, true);
hns3_reset_tx_queue(priv->ae_handle);
@@ -4484,9 +4548,7 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
hns3_store_coal(priv);
- ret = hns3_nic_dealloc_vector_data(priv);
- if (ret)
- netdev_err(netdev, "dealloc vector error\n");
+ hns3_nic_dealloc_vector_data(priv);
ret = hns3_uninit_all_ring(priv);
if (ret)
@@ -4652,7 +4714,7 @@ static int __init hns3_init_module(void)
pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
client.type = HNAE3_CLIENT_KNIC;
- snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
+ snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH, "%s",
hns3_driver_name);
client.ops = &client_ops;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 9d47abd5c37c..abefd7a179f7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -673,4 +673,5 @@ void hns3_dbg_init(struct hnae3_handle *handle);
void hns3_dbg_uninit(struct hnae3_handle *handle);
void hns3_dbg_register_debugfs(const char *debugfs_dir_name);
void hns3_dbg_unregister_debugfs(void);
+void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 6e0212b79438..c03856e63320 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -423,9 +423,8 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
data[ETH_GSTRING_LEN - 1] = '\0';
/* first, prepend the prefix string */
- n1 = snprintf(data, MAX_PREFIX_SIZE, "%s%d_",
- prefix, i);
- n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1);
+ n1 = scnprintf(data, MAX_PREFIX_SIZE, "%s%d_",
+ prefix, i);
size_left = (ETH_GSTRING_LEN - 1) - n1;
/* now, concatenate the stats string to it */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
new file mode 100644
index 000000000000..7bddcca148a5
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2018-2019 Hisilicon Limited. */
+
+/* This must be outside ifdef _HNS3_TRACE_H */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hns3
+
+#if !defined(_HNS3_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _HNS3_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+#define DESC_NR (sizeof(struct hns3_desc) / sizeof(u32))
+
+DECLARE_EVENT_CLASS(hns3_skb_template,
+ TP_PROTO(struct sk_buff *skb),
+ TP_ARGS(skb),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, headlen)
+ __field(unsigned int, len)
+ __field(__u8, nr_frags)
+ __field(__u8, ip_summed)
+ __field(unsigned int, hdr_len)
+ __field(unsigned short, gso_size)
+ __field(unsigned short, gso_segs)
+ __field(unsigned int, gso_type)
+ __field(bool, fraglist)
+ __array(__u32, size, MAX_SKB_FRAGS)
+ ),
+
+ TP_fast_assign(
+ __entry->headlen = skb_headlen(skb);
+ __entry->len = skb->len;
+ __entry->nr_frags = skb_shinfo(skb)->nr_frags;
+ __entry->gso_size = skb_shinfo(skb)->gso_size;
+ __entry->gso_segs = skb_shinfo(skb)->gso_segs;
+ __entry->gso_type = skb_shinfo(skb)->gso_type;
+ __entry->hdr_len = skb->encapsulation ?
+ skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb) :
+ skb_transport_offset(skb) + tcp_hdrlen(skb);
+ __entry->ip_summed = skb->ip_summed;
+ __entry->fraglist = skb_has_frag_list(skb);
+ hns3_shinfo_pack(skb_shinfo(skb), __entry->size);
+ ),
+
+ TP_printk(
+ "len: %u, %u, %u, cs: %u, gso: %u, %u, %x, frag(%d %u): %s",
+ __entry->headlen, __entry->len, __entry->hdr_len,
+ __entry->ip_summed, __entry->gso_size, __entry->gso_segs,
+ __entry->gso_type, __entry->fraglist, __entry->nr_frags,
+ __print_array(__entry->size, MAX_SKB_FRAGS, sizeof(__u32))
+ )
+);
+
+DEFINE_EVENT(hns3_skb_template, hns3_over_8bd,
+ TP_PROTO(struct sk_buff *skb),
+ TP_ARGS(skb));
+
+DEFINE_EVENT(hns3_skb_template, hns3_gro,
+ TP_PROTO(struct sk_buff *skb),
+ TP_ARGS(skb));
+
+DEFINE_EVENT(hns3_skb_template, hns3_tso,
+ TP_PROTO(struct sk_buff *skb),
+ TP_ARGS(skb));
+
+TRACE_EVENT(hns3_tx_desc,
+ TP_PROTO(struct hns3_enet_ring *ring, int cur_ntu),
+ TP_ARGS(ring, cur_ntu),
+
+ TP_STRUCT__entry(
+ __field(int, index)
+ __field(int, ntu)
+ __field(int, ntc)
+ __field(dma_addr_t, desc_dma)
+ __array(u32, desc, DESC_NR)
+ __string(devname, ring->tqp->handle->kinfo.netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->index = ring->tqp->tqp_index;
+ __entry->ntu = ring->next_to_use;
+ __entry->ntc = ring->next_to_clean;
+ __entry->desc_dma = ring->desc_dma_addr,
+ memcpy(__entry->desc, &ring->desc[cur_ntu],
+ sizeof(struct hns3_desc));
+ __assign_str(devname, ring->tqp->handle->kinfo.netdev->name);
+ ),
+
+ TP_printk(
+ "%s-%d-%d/%d desc(%pad): %s",
+ __get_str(devname), __entry->index, __entry->ntu,
+ __entry->ntc, &__entry->desc_dma,
+ __print_array(__entry->desc, DESC_NR, sizeof(u32))
+ )
+);
+
+TRACE_EVENT(hns3_rx_desc,
+ TP_PROTO(struct hns3_enet_ring *ring),
+ TP_ARGS(ring),
+
+ TP_STRUCT__entry(
+ __field(int, index)
+ __field(int, ntu)
+ __field(int, ntc)
+ __field(dma_addr_t, desc_dma)
+ __field(dma_addr_t, buf_dma)
+ __array(u32, desc, DESC_NR)
+ __string(devname, ring->tqp->handle->kinfo.netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->index = ring->tqp->tqp_index;
+ __entry->ntu = ring->next_to_use;
+ __entry->ntc = ring->next_to_clean;
+ __entry->desc_dma = ring->desc_dma_addr;
+ __entry->buf_dma = ring->desc_cb[ring->next_to_clean].dma;
+ memcpy(__entry->desc, &ring->desc[ring->next_to_clean],
+ sizeof(struct hns3_desc));
+ __assign_str(devname, ring->tqp->handle->kinfo.netdev->name);
+ ),
+
+ TP_printk(
+ "%s-%d-%d/%d desc(%pad) buf(%pad): %s",
+ __get_str(devname), __entry->index, __entry->ntu,
+ __entry->ntc, &__entry->desc_dma, &__entry->buf_dma,
+ __print_array(__entry->desc, DESC_NR, sizeof(u32))
+ )
+);
+
+#endif /* _HNS3_TRACE_H_ */
+
+/* This must be outside ifdef _HNS3_TRACE_H */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE hns3_trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 940ead3970d1..7f509eff562e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -479,19 +479,6 @@ static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
}
-static void hclge_destroy_queue(struct hclge_cmq_ring *ring)
-{
- spin_lock(&ring->lock);
- hclge_free_cmd_desc(ring);
- spin_unlock(&ring->lock);
-}
-
-static void hclge_destroy_cmd_queue(struct hclge_hw *hw)
-{
- hclge_destroy_queue(&hw->cmq.csq);
- hclge_destroy_queue(&hw->cmq.crq);
-}
-
void hclge_cmd_uninit(struct hclge_dev *hdev)
{
spin_lock_bh(&hdev->hw.cmq.csq.lock);
@@ -501,5 +488,6 @@ void hclge_cmd_uninit(struct hclge_dev *hdev)
spin_unlock(&hdev->hw.cmq.crq.lock);
spin_unlock_bh(&hdev->hw.cmq.csq.lock);
- hclge_destroy_cmd_queue(&hdev->hw);
+ hclge_free_cmd_desc(&hdev->hw.cmq.csq);
+ hclge_free_cmd_desc(&hdev->hw.cmq.crq);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index d97da67f07a1..96498d9b4754 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include <linux/io.h>
#include <linux/etherdevice.h>
+#include "hnae3.h"
#define HCLGE_CMDQ_TX_TIMEOUT 30000
#define HCLGE_DESC_DATA_LEN 6
@@ -63,6 +64,7 @@ enum hclge_cmd_status {
struct hclge_misc_vector {
u8 __iomem *addr;
int vector_irq;
+ char name[HNAE3_INT_NAME_LEN];
};
struct hclge_cmq {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 112df34b3869..67fad80035d3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -73,8 +73,6 @@ static struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset)
{
-#define HCLGE_GET_DFX_REG_TYPE_CNT 4
-
struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
int entries_per_desc;
int index;
@@ -886,8 +884,8 @@ static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
}
}
-static void hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
- bool sel_x, u32 loc)
+static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
+ bool sel_x, u32 loc)
{
struct hclge_fd_tcam_config_1_cmd *req1;
struct hclge_fd_tcam_config_2_cmd *req2;
@@ -912,7 +910,7 @@ static void hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
ret = hclge_cmd_send(&hdev->hw, desc, 3);
if (ret)
- return;
+ return ret;
dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n",
sel_x ? "x" : "y", loc);
@@ -931,16 +929,76 @@ static void hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
req = (u32 *)req3->tcam_data;
for (i = 0; i < 5; i++)
dev_info(&hdev->pdev->dev, "%08x\n", *req++);
+
+ return ret;
+}
+
+static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs)
+{
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node;
+ int cnt = 0;
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
+ rule_locs[cnt] = rule->location;
+ cnt++;
+ }
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ if (cnt != hdev->hclge_fd_rule_num)
+ return -EINVAL;
+
+ return cnt;
}
static void hclge_dbg_fd_tcam(struct hclge_dev *hdev)
{
- u32 i;
+ int i, ret, rule_cnt;
+ u16 *rule_locs;
+
+ if (!hnae3_dev_fd_supported(hdev)) {
+ dev_err(&hdev->pdev->dev,
+ "Only FD-supported dev supports dump fd tcam\n");
+ return;
+ }
+
+ if (!hdev->hclge_fd_rule_num ||
+ !hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
+ return;
- for (i = 0; i < hdev->fd_cfg.rule_num[0]; i++) {
- hclge_dbg_fd_tcam_read(hdev, 0, true, i);
- hclge_dbg_fd_tcam_read(hdev, 0, false, i);
+ rule_locs = kcalloc(hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
+ sizeof(u16), GFP_KERNEL);
+ if (!rule_locs)
+ return;
+
+ rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs);
+ if (rule_cnt <= 0) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get rule number, ret = %d\n", rule_cnt);
+ kfree(rule_locs);
+ return;
}
+
+ for (i = 0; i < rule_cnt; i++) {
+ ret = hclge_dbg_fd_tcam_read(hdev, 0, true, rule_locs[i]);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get fd tcam key x, ret = %d\n", ret);
+ kfree(rule_locs);
+ return;
+ }
+
+ ret = hclge_dbg_fd_tcam_read(hdev, 0, false, rule_locs[i]);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get fd tcam key y, ret = %d\n", ret);
+ kfree(rule_locs);
+ return;
+ }
+ }
+
+ kfree(rule_locs);
}
void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
@@ -976,6 +1034,14 @@ void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
}
+static void hclge_dbg_dump_serv_info(struct hclge_dev *hdev)
+{
+ dev_info(&hdev->pdev->dev, "last_serv_processed: %lu\n",
+ hdev->last_serv_processed);
+ dev_info(&hdev->pdev->dev, "last_serv_cnt: %lu\n",
+ hdev->serv_processed_cnt);
+}
+
static void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
{
struct hclge_desc *desc_src, *desc_tmp;
@@ -1227,6 +1293,8 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
hclge_dbg_dump_reg_cmd(hdev, &cmd_buf[sizeof(DUMP_REG)]);
} else if (strncmp(cmd_buf, "dump reset info", 15) == 0) {
hclge_dbg_dump_rst_info(hdev);
+ } else if (strncmp(cmd_buf, "dump serv info", 14) == 0) {
+ hclge_dbg_dump_serv_info(hdev);
} else if (strncmp(cmd_buf, "dump m7 info", 12) == 0) {
hclge_dbg_get_m7_stats_info(hdev);
} else if (strncmp(cmd_buf, "dump ncl_config", 15) == 0) {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index dc66b4e13377..c85b72dc44d2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -505,7 +505,7 @@ static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = {
static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = {
{ .int_msk = BIT(0), .msg = "roc_pkt_without_key_port",
- .reset_level = HNAE3_GLOBAL_RESET },
+ .reset_level = HNAE3_FUNC_RESET },
{ .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(2), .msg = "igu_pkt_without_key_port",
@@ -599,7 +599,7 @@ static const struct hclge_hw_error hclge_ssu_ets_tcg_int[] = {
static const struct hclge_hw_error hclge_ssu_port_based_pf_int[] = {
{ .int_msk = BIT(0), .msg = "roc_pkt_without_key_port",
- .reset_level = HNAE3_GLOBAL_RESET },
+ .reset_level = HNAE3_FUNC_RESET },
{ .int_msk = BIT(9), .msg = "low_water_line_err_port",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(10), .msg = "hi_water_line_err_port",
@@ -1898,10 +1898,8 @@ static int hclge_handle_all_hw_msix_error(struct hclge_dev *hdev,
bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
- if (!desc) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!desc)
+ return -ENOMEM;
ret = hclge_handle_mpf_msix_error(hdev, desc, mpf_bd_num,
reset_requests);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 13dbd249f35f..492bc9446463 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -72,6 +72,8 @@ static int hclge_set_default_loopback(struct hclge_dev *hdev);
static struct hnae3_ae_algo ae_algo;
+static struct workqueue_struct *hclge_wq;
+
static const struct pci_device_id ae_algo_pci_tbl[] = {
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
@@ -416,7 +418,7 @@ static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
{
#define HCLGE_MAC_CMD_NUM 21
- u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
+ u64 *data = (u64 *)(&hdev->mac_stats);
struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
__le64 *desc_data;
int i, k, n;
@@ -453,7 +455,7 @@ static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
{
- u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
+ u64 *data = (u64 *)(&hdev->mac_stats);
struct hclge_desc *desc;
__le64 *desc_data;
u16 i, k, n;
@@ -802,7 +804,7 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
struct hclge_dev *hdev = vport->back;
u64 *p;
- p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
+ p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
ARRAY_SIZE(g_mac_stats_string), data);
p = hclge_tqps_get_stats(handle, p);
}
@@ -815,8 +817,8 @@ static void hclge_get_mac_stat(struct hnae3_handle *handle,
hclge_update_stats(handle, NULL);
- mac_stats->tx_pause_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
- mac_stats->rx_pause_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
+ mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
+ mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
}
static int hclge_parse_func_status(struct hclge_dev *hdev,
@@ -860,9 +862,7 @@ static int hclge_query_function_status(struct hclge_dev *hdev)
usleep_range(1000, 2000);
} while (timeout++ < HCLGE_QUERY_MAX_CNT);
- ret = hclge_parse_func_status(hdev, req);
-
- return ret;
+ return hclge_parse_func_status(hdev, req);
}
static int hclge_query_pf_resource(struct hclge_dev *hdev)
@@ -880,12 +880,12 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
}
req = (struct hclge_pf_res_cmd *)desc.data;
- hdev->num_tqps = __le16_to_cpu(req->tqp_num);
- hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
+ hdev->num_tqps = le16_to_cpu(req->tqp_num);
+ hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
if (req->tx_buf_size)
hdev->tx_buf_size =
- __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
+ le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
else
hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
@@ -893,7 +893,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
if (req->dv_buf_size)
hdev->dv_buf_size =
- __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
+ le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
else
hdev->dv_buf_size = HCLGE_DEFAULT_DV;
@@ -901,10 +901,10 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
if (hnae3_dev_roce_supported(hdev)) {
hdev->roce_base_msix_offset =
- hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
+ hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
hdev->num_roce_msi =
- hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
+ hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
/* nic's msix numbers is always equals to the roce's. */
@@ -917,7 +917,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
hdev->roce_base_msix_offset;
} else {
hdev->num_msi =
- hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
+ hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
hdev->num_nic_msi = hdev->num_msi;
@@ -1331,11 +1331,7 @@ static int hclge_get_cap(struct hclge_dev *hdev)
}
/* get pf resource */
- ret = hclge_query_pf_resource(hdev);
- if (ret)
- dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
-
- return ret;
+ return hclge_query_pf_resource(hdev);
}
static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
@@ -2619,30 +2615,21 @@ static int hclge_mac_init(struct hclge_dev *hdev)
hdev->hw.mac.duplex = HCLGE_MAC_FULL;
ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
hdev->hw.mac.duplex);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Config mac speed dup fail ret=%d\n", ret);
+ if (ret)
return ret;
- }
if (hdev->hw.mac.support_autoneg) {
ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Config mac autoneg fail ret=%d\n", ret);
+ if (ret)
return ret;
- }
}
mac->link = 0;
if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Fec mode init fail, ret = %d\n", ret);
+ if (ret)
return ret;
- }
}
ret = hclge_set_mac_mtu(hdev, hdev->mps);
@@ -2665,31 +2652,27 @@ static int hclge_mac_init(struct hclge_dev *hdev)
static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
{
- if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
+ if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
- queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
- &hdev->mbx_service_task);
+ mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
+ hclge_wq, &hdev->service_task, 0);
}
static void hclge_reset_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
- queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
- &hdev->rst_service_task);
+ mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
+ hclge_wq, &hdev->service_task, 0);
}
void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
{
- if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
- !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
- !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) {
- hdev->hw_stats.stats_timer++;
- hdev->fd_arfs_expire_timer++;
+ if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
+ !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
- system_wq, &hdev->service_task,
+ hclge_wq, &hdev->service_task,
delay_time);
- }
}
static int hclge_get_mac_link_status(struct hclge_dev *hdev)
@@ -2748,6 +2731,10 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
if (!client)
return;
+
+ if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
+ return;
+
state = hclge_get_mac_phy_link(hdev);
if (state != hdev->hw.mac.link) {
for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
@@ -2761,6 +2748,8 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
}
hdev->hw.mac.link = state;
}
+
+ clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
}
static void hclge_update_port_capability(struct hclge_mac *mac)
@@ -2831,6 +2820,12 @@ static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
return ret;
}
+ /* In some case, mac speed get from IMP may be 0, it shouldn't be
+ * set to mac->speed.
+ */
+ if (!le32_to_cpu(resp->speed))
+ return 0;
+
mac->speed = le32_to_cpu(resp->speed);
/* if resp->speed_ability is 0, it means it's an old version
* firmware, do not update these params
@@ -2906,7 +2901,7 @@ static int hclge_get_status(struct hnae3_handle *handle)
static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
{
- if (pci_num_vf(hdev->pdev) == 0) {
+ if (!pci_num_vf(hdev->pdev)) {
dev_err(&hdev->pdev->dev,
"SRIOV is disabled, can not get vport(%d) info.\n", vf);
return NULL;
@@ -2940,6 +2935,9 @@ static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
ivf->trusted = vport->vf_info.trusted;
ivf->min_tx_rate = 0;
ivf->max_tx_rate = vport->vf_info.max_tx_rate;
+ ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
+ ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
+ ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
ether_addr_copy(ivf->mac, vport->vf_info.mac);
return 0;
@@ -2998,8 +2996,6 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
/* check for vector0 msix event source */
if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
- dev_info(&hdev->pdev->dev, "received event 0x%x\n",
- msix_src_reg);
*clearval = msix_src_reg;
return HCLGE_VECTOR0_EVENT_ERR;
}
@@ -3172,8 +3168,10 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
hclge_get_misc_vector(hdev);
/* this would be explicitly freed in the end */
+ snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
+ HCLGE_NAME, pci_name(hdev->pdev));
ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
- 0, "hclge_misc", hdev);
+ 0, hdev->misc_vector.name, hdev);
if (ret) {
hclge_free_vector(hdev, 0);
dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
@@ -3247,7 +3245,8 @@ static int hclge_notify_roce_client(struct hclge_dev *hdev,
static int hclge_reset_wait(struct hclge_dev *hdev)
{
#define HCLGE_RESET_WATI_MS 100
-#define HCLGE_RESET_WAIT_CNT 200
+#define HCLGE_RESET_WAIT_CNT 350
+
u32 val, reg, reg_bit;
u32 cnt = 0;
@@ -3264,8 +3263,6 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
reg = HCLGE_FUN_RST_ING;
reg_bit = HCLGE_FUN_RST_ING_B;
break;
- case HNAE3_FLR_RESET:
- break;
default:
dev_err(&hdev->pdev->dev,
"Wait for unsupported reset type: %d\n",
@@ -3273,20 +3270,6 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
return -EINVAL;
}
- if (hdev->reset_type == HNAE3_FLR_RESET) {
- while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
- cnt++ < HCLGE_RESET_WAIT_CNT)
- msleep(HCLGE_RESET_WATI_MS);
-
- if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
- dev_err(&hdev->pdev->dev,
- "flr wait timeout: %u\n", cnt);
- return -EBUSY;
- }
-
- return 0;
- }
-
val = hclge_read_dev(&hdev->hw, reg);
while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
msleep(HCLGE_RESET_WATI_MS);
@@ -3352,7 +3335,19 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
return 0;
}
-static int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
+static void hclge_mailbox_service_task(struct hclge_dev *hdev)
+{
+ if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
+ test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
+ test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
+ return;
+
+ hclge_mbx_handler(hdev);
+
+ clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
+}
+
+static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
{
struct hclge_pf_rst_sync_cmd *req;
struct hclge_desc desc;
@@ -3363,26 +3358,28 @@ static int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
do {
+ /* vf need to down netdev by mbx during PF or FLR reset */
+ hclge_mailbox_service_task(hdev);
+
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
/* for compatible with old firmware, wait
* 100 ms for VF to stop IO
*/
if (ret == -EOPNOTSUPP) {
msleep(HCLGE_RESET_SYNC_TIME);
- return 0;
+ return;
} else if (ret) {
- dev_err(&hdev->pdev->dev, "sync with VF fail %d!\n",
- ret);
- return ret;
+ dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
+ ret);
+ return;
} else if (req->all_vf_ready) {
- return 0;
+ return;
}
msleep(HCLGE_PF_RESET_SYNC_TIME);
hclge_cmd_reuse_desc(&desc, true);
} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
- dev_err(&hdev->pdev->dev, "sync with VF timeout!\n");
- return -ETIME;
+ dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
}
void hclge_report_hw_error(struct hclge_dev *hdev,
@@ -3462,12 +3459,6 @@ static void hclge_do_reset(struct hclge_dev *hdev)
set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
hclge_reset_task_schedule(hdev);
break;
- case HNAE3_FLR_RESET:
- dev_info(&pdev->dev, "FLR requested\n");
- /* schedule again to check later */
- set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
- hclge_reset_task_schedule(hdev);
- break;
default:
dev_warn(&pdev->dev,
"Unsupported reset type: %d\n", hdev->reset_type);
@@ -3483,10 +3474,15 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
/* first, resolve any unknown reset type to the known type(s) */
if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
+ u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
+ HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
/* we will intentionally ignore any errors from this function
* as we will end up in *some* reset request in any case
*/
- hclge_handle_hw_msix_error(hdev, addr);
+ if (hclge_handle_hw_msix_error(hdev, addr))
+ dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
+ msix_sts_reg);
+
clear_bit(HNAE3_UNKNOWN_RESET, addr);
/* We defered the clearing of the error event which caused
* interrupt since it was not posssible to do that in
@@ -3551,23 +3547,6 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
hclge_enable_vector(&hdev->misc_vector, true);
}
-static int hclge_reset_prepare_down(struct hclge_dev *hdev)
-{
- int ret = 0;
-
- switch (hdev->reset_type) {
- case HNAE3_FUNC_RESET:
- /* fall through */
- case HNAE3_FLR_RESET:
- ret = hclge_set_all_vf_rst(hdev, true);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
{
u32 reg_val;
@@ -3581,6 +3560,19 @@ static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
}
+static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
+{
+ int ret;
+
+ ret = hclge_set_all_vf_rst(hdev, true);
+ if (ret)
+ return ret;
+
+ hclge_func_reset_sync_vf(hdev);
+
+ return 0;
+}
+
static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
{
u32 reg_val;
@@ -3588,10 +3580,7 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
switch (hdev->reset_type) {
case HNAE3_FUNC_RESET:
- /* to confirm whether all running VF is ready
- * before request PF reset
- */
- ret = hclge_func_reset_sync_vf(hdev);
+ ret = hclge_func_reset_notify_vf(hdev);
if (ret)
return ret;
@@ -3611,16 +3600,9 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
hdev->rst_stats.pf_rst_cnt++;
break;
case HNAE3_FLR_RESET:
- /* to confirm whether all running VF is ready
- * before request PF reset
- */
- ret = hclge_func_reset_sync_vf(hdev);
+ ret = hclge_func_reset_notify_vf(hdev);
if (ret)
return ret;
-
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
- set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
- hdev->rst_stats.flr_rst_cnt++;
break;
case HNAE3_IMP_RESET:
hclge_handle_imp_error(hdev);
@@ -3672,6 +3654,8 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
hclge_dbg_dump_rst_info(hdev);
+ set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
+
return false;
}
@@ -3747,10 +3731,9 @@ static int hclge_reset_stack(struct hclge_dev *hdev)
return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
}
-static void hclge_reset(struct hclge_dev *hdev)
+static int hclge_reset_prepare(struct hclge_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
- enum hnae3_reset_type reset_level;
int ret;
/* Initialize ae_dev reset status as well, in case enet layer wants to
@@ -3761,45 +3744,41 @@ static void hclge_reset(struct hclge_dev *hdev)
/* perform reset of the stack & ae device for a client */
ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
if (ret)
- goto err_reset;
-
- ret = hclge_reset_prepare_down(hdev);
- if (ret)
- goto err_reset;
+ return ret;
rtnl_lock();
ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
- if (ret)
- goto err_reset_lock;
-
rtnl_unlock();
-
- ret = hclge_reset_prepare_wait(hdev);
if (ret)
- goto err_reset;
+ return ret;
- if (hclge_reset_wait(hdev))
- goto err_reset;
+ return hclge_reset_prepare_wait(hdev);
+}
+
+static int hclge_reset_rebuild(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ enum hnae3_reset_type reset_level;
+ int ret;
hdev->rst_stats.hw_reset_done_cnt++;
ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
if (ret)
- goto err_reset;
+ return ret;
rtnl_lock();
-
ret = hclge_reset_stack(hdev);
+ rtnl_unlock();
if (ret)
- goto err_reset_lock;
+ return ret;
hclge_clear_reset_cause(hdev);
ret = hclge_reset_prepare_up(hdev);
if (ret)
- goto err_reset_lock;
+ return ret;
- rtnl_unlock();
ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
@@ -3807,24 +3786,23 @@ static void hclge_reset(struct hclge_dev *hdev)
*/
if (ret &&
hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
- goto err_reset;
+ return ret;
rtnl_lock();
-
ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
- if (ret)
- goto err_reset_lock;
-
rtnl_unlock();
+ if (ret)
+ return ret;
ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
if (ret)
- goto err_reset;
+ return ret;
hdev->last_reset_time = jiffies;
hdev->rst_stats.reset_fail_cnt = 0;
hdev->rst_stats.reset_done_cnt++;
ae_dev->reset_type = HNAE3_NONE_RESET;
+ clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
/* if default_reset_request has a higher level reset request,
* it should be handled as soon as possible. since some errors
@@ -3835,10 +3813,22 @@ static void hclge_reset(struct hclge_dev *hdev)
if (reset_level != HNAE3_NONE_RESET)
set_bit(reset_level, &hdev->reset_request);
+ return 0;
+}
+
+static void hclge_reset(struct hclge_dev *hdev)
+{
+ if (hclge_reset_prepare(hdev))
+ goto err_reset;
+
+ if (hclge_reset_wait(hdev))
+ goto err_reset;
+
+ if (hclge_reset_rebuild(hdev))
+ goto err_reset;
+
return;
-err_reset_lock:
- rtnl_unlock();
err_reset:
if (hclge_reset_err_handle(hdev))
hclge_reset_task_schedule(hdev);
@@ -3939,34 +3929,18 @@ static void hclge_reset_subtask(struct hclge_dev *hdev)
hdev->reset_type = HNAE3_NONE_RESET;
}
-static void hclge_reset_service_task(struct work_struct *work)
+static void hclge_reset_service_task(struct hclge_dev *hdev)
{
- struct hclge_dev *hdev =
- container_of(work, struct hclge_dev, rst_service_task);
-
- if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
return;
- clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
+ down(&hdev->reset_sem);
+ set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
hclge_reset_subtask(hdev);
clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
-}
-
-static void hclge_mailbox_service_task(struct work_struct *work)
-{
- struct hclge_dev *hdev =
- container_of(work, struct hclge_dev, mbx_service_task);
-
- if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
- return;
-
- clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
-
- hclge_mbx_handler(hdev);
-
- clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
}
static void hclge_update_vport_alive(struct hclge_dev *hdev)
@@ -3986,29 +3960,62 @@ static void hclge_update_vport_alive(struct hclge_dev *hdev)
}
}
-static void hclge_service_task(struct work_struct *work)
+static void hclge_periodic_service_task(struct hclge_dev *hdev)
{
- struct hclge_dev *hdev =
- container_of(work, struct hclge_dev, service_task.work);
+ unsigned long delta = round_jiffies_relative(HZ);
+
+ /* Always handle the link updating to make sure link state is
+ * updated when it is triggered by mbx.
+ */
+ hclge_update_link_status(hdev);
- clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
+ if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
+ delta = jiffies - hdev->last_serv_processed;
- if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
- hclge_update_stats_for_all(hdev);
- hdev->hw_stats.stats_timer = 0;
+ if (delta < round_jiffies_relative(HZ)) {
+ delta = round_jiffies_relative(HZ) - delta;
+ goto out;
+ }
}
- hclge_update_port_info(hdev);
- hclge_update_link_status(hdev);
+ hdev->serv_processed_cnt++;
hclge_update_vport_alive(hdev);
+
+ if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
+ hdev->last_serv_processed = jiffies;
+ goto out;
+ }
+
+ if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
+ hclge_update_stats_for_all(hdev);
+
+ hclge_update_port_info(hdev);
hclge_sync_vlan_filter(hdev);
- if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
+ if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
hclge_rfs_filter_expire(hdev);
- hdev->fd_arfs_expire_timer = 0;
- }
- hclge_task_schedule(hdev, round_jiffies_relative(HZ));
+ hdev->last_serv_processed = jiffies;
+
+out:
+ hclge_task_schedule(hdev, delta);
+}
+
+static void hclge_service_task(struct work_struct *work)
+{
+ struct hclge_dev *hdev =
+ container_of(work, struct hclge_dev, service_task.work);
+
+ hclge_reset_service_task(hdev);
+ hclge_mailbox_service_task(hdev);
+ hclge_periodic_service_task(hdev);
+
+ /* Handle reset and mbx again in case periodical task delays the
+ * handling by calling hclge_task_schedule() in
+ * hclge_periodic_service_task().
+ */
+ hclge_reset_service_task(hdev);
+ hclge_mailbox_service_task(hdev);
}
struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
@@ -4079,7 +4086,7 @@ static int hclge_put_vector(struct hnae3_handle *handle, int vector)
vector_id = hclge_get_vector_index(hdev, vector);
if (vector_id < 0) {
dev_err(&hdev->pdev->dev,
- "Get vector index fail. vector_id =%d\n", vector_id);
+ "Get vector index fail. vector = %d\n", vector);
return vector_id;
}
@@ -4654,7 +4661,7 @@ static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
vector_id = hclge_get_vector_index(hdev, vector);
if (vector_id < 0) {
dev_err(&hdev->pdev->dev,
- "Get vector index fail. vector_id =%d\n", vector_id);
+ "failed to get vector index. vector=%d\n", vector);
return vector_id;
}
@@ -6106,6 +6113,9 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
struct hclge_fd_rule_tuples *tuples)
{
+#define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
+#define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
+
tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
tuples->ip_proto = fkeys->basic.ip_proto;
tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
@@ -6114,12 +6124,12 @@ static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
} else {
- memcpy(tuples->src_ip,
- fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
- sizeof(tuples->src_ip));
- memcpy(tuples->dst_ip,
- fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
- sizeof(tuples->dst_ip));
+ int i;
+
+ for (i = 0; i < IPV6_SIZE; i++) {
+ tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
+ tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
+ }
}
}
@@ -6562,7 +6572,7 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
hclge_cfg_mac_mode(hdev, en);
- ret = hclge_mac_phy_link_status_wait(hdev, en, FALSE);
+ ret = hclge_mac_phy_link_status_wait(hdev, en, false);
if (ret)
dev_err(&hdev->pdev->dev,
"serdes loopback config mac mode timeout\n");
@@ -6620,7 +6630,7 @@ static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
hclge_cfg_mac_mode(hdev, en);
- ret = hclge_mac_phy_link_status_wait(hdev, en, TRUE);
+ ret = hclge_mac_phy_link_status_wait(hdev, en, true);
if (ret)
dev_err(&hdev->pdev->dev,
"phy loopback config mac mode timeout\n");
@@ -6734,6 +6744,19 @@ static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
}
}
+static void hclge_flush_link_update(struct hclge_dev *hdev)
+{
+#define HCLGE_FLUSH_LINK_TIMEOUT 100000
+
+ unsigned long last = hdev->serv_processed_cnt;
+ int i = 0;
+
+ while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
+ i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
+ last == hdev->serv_processed_cnt)
+ usleep_range(1, 1);
+}
+
static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -6742,12 +6765,12 @@ static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
if (enable) {
hclge_task_schedule(hdev, round_jiffies_relative(HZ));
} else {
- /* Set the DOWN flag here to disable the service to be
- * scheduled again
- */
+ /* Set the DOWN flag here to disable link updating */
set_bit(HCLGE_STATE_DOWN, &hdev->state);
- cancel_delayed_work_sync(&hdev->service_task);
- clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
+
+ /* flush memory to make sure DOWN is seen by service task */
+ smp_mb__before_atomic();
+ hclge_flush_link_update(hdev);
}
}
@@ -7483,7 +7506,6 @@ void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
struct hclge_vport *vport;
int i;
- mutex_lock(&hdev->vport_cfg_mutex);
for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
@@ -7496,7 +7518,6 @@ void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
kfree(mac);
}
}
- mutex_unlock(&hdev->vport_cfg_mutex);
}
static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
@@ -8257,7 +8278,6 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
struct hclge_vport *vport;
int i;
- mutex_lock(&hdev->vport_cfg_mutex);
for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
@@ -8265,7 +8285,6 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
kfree(vlan);
}
}
- mutex_unlock(&hdev->vport_cfg_mutex);
}
static void hclge_restore_vlan_table(struct hnae3_handle *handle)
@@ -8277,7 +8296,6 @@ static void hclge_restore_vlan_table(struct hnae3_handle *handle)
u16 state, vlan_id;
int i;
- mutex_lock(&hdev->vport_cfg_mutex);
for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
@@ -8303,8 +8321,6 @@ static void hclge_restore_vlan_table(struct hnae3_handle *handle)
break;
}
}
-
- mutex_unlock(&hdev->vport_cfg_mutex);
}
int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
@@ -9256,6 +9272,7 @@ static void hclge_state_init(struct hclge_dev *hdev)
set_bit(HCLGE_STATE_DOWN, &hdev->state);
clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
}
@@ -9269,38 +9286,57 @@ static void hclge_state_uninit(struct hclge_dev *hdev)
del_timer_sync(&hdev->reset_timer);
if (hdev->service_task.work.func)
cancel_delayed_work_sync(&hdev->service_task);
- if (hdev->rst_service_task.func)
- cancel_work_sync(&hdev->rst_service_task);
- if (hdev->mbx_service_task.func)
- cancel_work_sync(&hdev->mbx_service_task);
}
static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
{
-#define HCLGE_FLR_WAIT_MS 100
-#define HCLGE_FLR_WAIT_CNT 50
- struct hclge_dev *hdev = ae_dev->priv;
- int cnt = 0;
+#define HCLGE_FLR_RETRY_WAIT_MS 500
+#define HCLGE_FLR_RETRY_CNT 5
- clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
- clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
- set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
- hclge_reset_event(hdev->pdev, NULL);
+ struct hclge_dev *hdev = ae_dev->priv;
+ int retry_cnt = 0;
+ int ret;
- while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
- cnt++ < HCLGE_FLR_WAIT_CNT)
- msleep(HCLGE_FLR_WAIT_MS);
+retry:
+ down(&hdev->reset_sem);
+ set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ hdev->reset_type = HNAE3_FLR_RESET;
+ ret = hclge_reset_prepare(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
+ ret);
+ if (hdev->reset_pending ||
+ retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
+ dev_err(&hdev->pdev->dev,
+ "reset_pending:0x%lx, retry_cnt:%d\n",
+ hdev->reset_pending, retry_cnt);
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
+ msleep(HCLGE_FLR_RETRY_WAIT_MS);
+ goto retry;
+ }
+ }
- if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
- dev_err(&hdev->pdev->dev,
- "flr wait down timeout: %d\n", cnt);
+ /* disable misc vector before FLR done */
+ hclge_enable_vector(&hdev->misc_vector, false);
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ hdev->rst_stats.flr_rst_cnt++;
}
static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
{
struct hclge_dev *hdev = ae_dev->priv;
+ int ret;
- set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
+ hclge_enable_vector(&hdev->misc_vector, true);
+
+ ret = hclge_reset_rebuild(hdev);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
+
+ hdev->reset_type = HNAE3_NONE_RESET;
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
}
static void hclge_clear_resetting_state(struct hclge_dev *hdev)
@@ -9342,21 +9378,17 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
mutex_init(&hdev->vport_lock);
- mutex_init(&hdev->vport_cfg_mutex);
spin_lock_init(&hdev->fd_rule_lock);
+ sema_init(&hdev->reset_sem, 1);
ret = hclge_pci_init(hdev);
- if (ret) {
- dev_err(&pdev->dev, "PCI init failed\n");
+ if (ret)
goto out;
- }
/* Firmware command queue initialize */
ret = hclge_cmd_queue_init(hdev);
- if (ret) {
- dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
+ if (ret)
goto err_pci_uninit;
- }
/* Firmware command initialize */
ret = hclge_cmd_init(hdev);
@@ -9364,11 +9396,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_cmd_uninit;
ret = hclge_get_cap(hdev);
- if (ret) {
- dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
- ret);
+ if (ret)
goto err_cmd_uninit;
- }
ret = hclge_configure(hdev);
if (ret) {
@@ -9383,12 +9412,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
}
ret = hclge_misc_irq_init(hdev);
- if (ret) {
- dev_err(&pdev->dev,
- "Misc IRQ(vector0) init error, ret = %d.\n",
- ret);
+ if (ret)
goto err_msi_uninit;
- }
ret = hclge_alloc_tqps(hdev);
if (ret) {
@@ -9397,31 +9422,22 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
}
ret = hclge_alloc_vport(hdev);
- if (ret) {
- dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
+ if (ret)
goto err_msi_irq_uninit;
- }
ret = hclge_map_tqp(hdev);
- if (ret) {
- dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
+ if (ret)
goto err_msi_irq_uninit;
- }
if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
ret = hclge_mac_mdio_config(hdev);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "mdio config fail ret=%d\n", ret);
+ if (ret)
goto err_msi_irq_uninit;
- }
}
ret = hclge_init_umv_space(hdev);
- if (ret) {
- dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
+ if (ret)
goto err_mdiobus_unreg;
- }
ret = hclge_mac_init(hdev);
if (ret) {
@@ -9477,8 +9493,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
- INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
- INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
/* Setup affinity after service timer setup because add_timer_on
* is called in affinity notify.
@@ -9512,6 +9526,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
HCLGE_DRIVER_NAME);
+ hclge_task_schedule(hdev, round_jiffies_relative(HZ));
+
return 0;
err_mdiobus_unreg:
@@ -9534,7 +9550,7 @@ out:
static void hclge_stats_clear(struct hclge_dev *hdev)
{
- memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
+ memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
}
static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
@@ -9821,6 +9837,13 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ ret = init_mgr_tbl(hdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to reinit manager table, ret = %d\n", ret);
+ return ret;
+ }
+
ret = hclge_init_fd_config(hdev);
if (ret) {
dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
@@ -9895,7 +9918,6 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
mutex_destroy(&hdev->vport_lock);
hclge_uninit_vport_mac_table(hdev);
hclge_uninit_vport_vlan_table(hdev);
- mutex_destroy(&hdev->vport_cfg_mutex);
ae_dev->priv = NULL;
}
@@ -10157,10 +10179,8 @@ static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
int *bd_num_list,
u32 type_num)
{
-#define HCLGE_DFX_REG_BD_NUM 4
-
u32 entries_per_desc, desc_index, index, offset, i;
- struct hclge_desc desc[HCLGE_DFX_REG_BD_NUM];
+ struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
int ret;
ret = hclge_query_bd_num_cmd_send(hdev, desc);
@@ -10273,10 +10293,8 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
buf_len = sizeof(*desc_src) * bd_num_max;
desc_src = kzalloc(buf_len, GFP_KERNEL);
- if (!desc_src) {
- dev_err(&hdev->pdev->dev, "%s kzalloc failed\n", __func__);
+ if (!desc_src)
return -ENOMEM;
- }
for (i = 0; i < dfx_reg_type_num; i++) {
bd_num = bd_num_list[i];
@@ -10611,6 +10629,12 @@ static int hclge_init(void)
{
pr_info("%s is initializing\n", HCLGE_NAME);
+ hclge_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGE_NAME);
+ if (!hclge_wq) {
+ pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
+ return -ENOMEM;
+ }
+
hnae3_register_ae_algo(&ae_algo);
return 0;
@@ -10619,6 +10643,7 @@ static int hclge_init(void)
static void hclge_exit(void)
{
hnae3_unregister_ae_algo(&ae_algo);
+ destroy_workqueue(hclge_wq);
}
module_init(hclge_init);
module_exit(hclge_exit);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index ebb4c6e9aed3..f78cbb4cc85e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -139,6 +139,8 @@
#define HCLGE_PHY_MDIX_STATUS_B 6
#define HCLGE_PHY_SPEED_DUP_RESOLVE_B 11
+#define HCLGE_GET_DFX_REG_TYPE_CNT 4
+
/* Factor used to calculate offset and bitmap of VF num */
#define HCLGE_VF_NUM_PER_CMD 64
@@ -208,13 +210,14 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_NIC_REGISTERED,
HCLGE_STATE_ROCE_REGISTERED,
HCLGE_STATE_SERVICE_INITED,
- HCLGE_STATE_SERVICE_SCHED,
HCLGE_STATE_RST_SERVICE_SCHED,
HCLGE_STATE_RST_HANDLING,
HCLGE_STATE_MBX_SERVICE_SCHED,
HCLGE_STATE_MBX_HANDLING,
HCLGE_STATE_STATISTICS_UPDATING,
HCLGE_STATE_CMD_DISABLE,
+ HCLGE_STATE_LINK_UPDATING,
+ HCLGE_STATE_RST_FAIL,
HCLGE_STATE_MAX
};
@@ -454,11 +457,7 @@ struct hclge_mac_stats {
u64 mac_rx_ctrl_pkt_num;
};
-#define HCLGE_STATS_TIMER_INTERVAL (60 * 5)
-struct hclge_hw_stats {
- struct hclge_mac_stats mac_stats;
- u32 stats_timer;
-};
+#define HCLGE_STATS_TIMER_INTERVAL 300UL
struct hclge_vlan_type_cfg {
u16 rx_ot_fst_vlan_type;
@@ -549,7 +548,7 @@ struct key_info {
/* assigned by firmware, the real filter number for each pf may be less */
#define MAX_FD_FILTER_NUM 4096
-#define HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL 5
+#define HCLGE_ARFS_EXPIRE_INTERVAL 5UL
enum HCLGE_FD_ACTIVE_RULE_TYPE {
HCLGE_FD_RULE_NONE,
@@ -712,7 +711,7 @@ struct hclge_dev {
struct hnae3_ae_dev *ae_dev;
struct hclge_hw hw;
struct hclge_misc_vector misc_vector;
- struct hclge_hw_stats hw_stats;
+ struct hclge_mac_stats mac_stats;
unsigned long state;
unsigned long flr_state;
unsigned long last_reset_time;
@@ -723,6 +722,7 @@ struct hclge_dev {
unsigned long reset_request; /* reset has been requested */
unsigned long reset_pending; /* client rst is pending to be served */
struct hclge_rst_stats rst_stats;
+ struct semaphore reset_sem; /* protect reset process */
u32 fw_version;
u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */
u16 num_tqps; /* Num task queue pairs of this PF */
@@ -774,8 +774,6 @@ struct hclge_dev {
unsigned long service_timer_previous;
struct timer_list reset_timer;
struct delayed_work service_task;
- struct work_struct rst_service_task;
- struct work_struct mbx_service_task;
bool cur_promisc;
int num_alloc_vfs; /* Actual number of VFs allocated */
@@ -811,7 +809,8 @@ struct hclge_dev {
struct hlist_head fd_rule_list;
spinlock_t fd_rule_lock; /* protect fd_rule_list and fd_bmap */
u16 hclge_fd_rule_num;
- u16 fd_arfs_expire_timer;
+ unsigned long serv_processed_cnt;
+ unsigned long last_serv_processed;
unsigned long fd_bmap[BITS_TO_LONGS(MAX_FD_FILTER_NUM)];
enum HCLGE_FD_ACTIVE_RULE_TYPE fd_active_type;
u8 fd_en;
@@ -825,8 +824,6 @@ struct hclge_dev {
u16 share_umv_size;
struct mutex umv_mutex; /* protect share_umv_size */
- struct mutex vport_cfg_mutex; /* Protect stored vf table */
-
DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats,
HCLGE_MAC_TNL_LOG_SIZE);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 0b433ebe6a2d..a3c0822191a9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -86,10 +86,12 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
{
struct hclge_dev *hdev = vport->back;
- enum hnae3_reset_type reset_type;
+ u16 reset_type;
u8 msg_data[2];
u8 dest_vfid;
+ BUILD_BUG_ON(HNAE3_MAX_RESET > U16_MAX);
+
dest_vfid = (u8)vport->vport_id;
if (hdev->reset_type == HNAE3_FUNC_RESET)
@@ -635,7 +637,6 @@ static void hclge_handle_link_change_event(struct hclge_dev *hdev,
#define LINK_STATUS_OFFSET 1
#define LINK_FAIL_CODE_OFFSET 2
- clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
hclge_task_schedule(hdev, 0);
if (!req->msg[LINK_STATUS_OFFSET])
@@ -798,13 +799,11 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
hclge_get_link_mode(vport, req);
break;
case HCLGE_MBX_GET_VF_FLR_STATUS:
- mutex_lock(&hdev->vport_cfg_mutex);
hclge_rm_vport_all_mac_table(vport, true,
HCLGE_MAC_ADDR_UC);
hclge_rm_vport_all_mac_table(vport, true,
HCLGE_MAC_ADDR_MC);
hclge_rm_vport_all_vlan_table(vport, true);
- mutex_unlock(&hdev->vport_cfg_mutex);
break;
case HCLGE_MBX_GET_MEDIA_TYPE:
ret = hclge_get_vf_media_type(vport, req);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 180224eab1ca..28db13253a5e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -566,7 +566,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
*/
kinfo->num_tc = vport->vport_id ? 1 :
min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
- vport->qs_offset = (vport->vport_id ? hdev->tm_info.num_tc : 0) +
+ vport->qs_offset = (vport->vport_id ? HNAE3_MAX_TC : 0) +
(vport->vport_id ? (vport->vport_id - 1) : 0);
max_rss_size = min_t(u16, hdev->rss_size_max,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index af2245e3bb95..f38d236ebf4f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -443,7 +443,7 @@ void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
{
spin_lock_bh(&hdev->hw.cmq.csq.lock);
spin_lock(&hdev->hw.cmq.crq.lock);
- clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
hclgevf_cmd_uninit_regs(&hdev->hw);
spin_unlock(&hdev->hw.cmq.crq.lock);
spin_unlock_bh(&hdev->hw.cmq.csq.lock);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 25d78a5aaa34..d6597206e692 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -16,6 +16,8 @@
static int hclgevf_reset_hdev(struct hclgevf_dev *hdev);
static struct hnae3_ae_algo ae_algovf;
+static struct workqueue_struct *hclgevf_wq;
+
static const struct pci_device_id ae_algovf_pci_tbl[] = {
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
@@ -440,6 +442,9 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
struct hnae3_client *rclient;
struct hnae3_client *client;
+ if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state))
+ return;
+
client = handle->client;
rclient = hdev->roce_client;
@@ -452,6 +457,8 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
rclient->ops->link_status_change(rhandle, !!link_state);
hdev->hw.mac.link = link_state;
}
+
+ clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state);
}
static void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
@@ -1309,14 +1316,13 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
msg_data[0] = is_kill;
memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
memcpy(&msg_data[3], &proto, sizeof(proto));
- ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
- HCLGE_MBX_VLAN_FILTER, msg_data,
- HCLGEVF_VLAN_MBX_MSG_LEN, true, NULL, 0);
-
/* when remove hw vlan filter failed, record the vlan id,
* and try to remove it from hw later, to be consistence
* with stack.
*/
+ ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_VLAN_FILTER, msg_data,
+ HCLGEVF_VLAN_MBX_MSG_LEN, true, NULL, 0);
if (is_kill && ret)
set_bit(vlan_id, hdev->vlan_del_fail_bmap);
@@ -1404,32 +1410,6 @@ static int hclgevf_notify_client(struct hclgevf_dev *hdev,
return ret;
}
-static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev)
-{
- struct hclgevf_dev *hdev = ae_dev->priv;
-
- set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
-}
-
-static int hclgevf_flr_poll_timeout(struct hclgevf_dev *hdev,
- unsigned long delay_us,
- unsigned long wait_cnt)
-{
- unsigned long cnt = 0;
-
- while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
- cnt++ < wait_cnt)
- usleep_range(delay_us, delay_us * 2);
-
- if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
- dev_err(&hdev->pdev->dev,
- "flr wait timeout\n");
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
{
#define HCLGEVF_RESET_WAIT_US 20000
@@ -1440,11 +1420,7 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
u32 val;
int ret;
- if (hdev->reset_type == HNAE3_FLR_RESET)
- return hclgevf_flr_poll_timeout(hdev,
- HCLGEVF_RESET_WAIT_US,
- HCLGEVF_RESET_WAIT_CNT);
- else if (hdev->reset_type == HNAE3_VF_RESET)
+ if (hdev->reset_type == HNAE3_VF_RESET)
ret = readl_poll_timeout(hdev->hw.io_base +
HCLGEVF_VF_RST_ING, val,
!(val & HCLGEVF_VF_RST_ING_BIT),
@@ -1516,7 +1492,8 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
/* clear handshake status with IMP */
hclgevf_reset_handshake(hdev, false);
- return 0;
+ /* bring up the nic to enable TX/RX again */
+ return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
}
static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
@@ -1525,18 +1502,10 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
int ret = 0;
- switch (hdev->reset_type) {
- case HNAE3_VF_FUNC_RESET:
+ if (hdev->reset_type == HNAE3_VF_FUNC_RESET) {
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
0, true, NULL, sizeof(u8));
hdev->rst_stats.vf_func_rst_cnt++;
- break;
- case HNAE3_FLR_RESET:
- set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
- hdev->rst_stats.flr_rst_cnt++;
- break;
- default:
- break;
}
set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
@@ -1591,11 +1560,12 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev);
} else {
+ set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
hclgevf_dump_rst_info(hdev);
}
}
-static int hclgevf_reset(struct hclgevf_dev *hdev)
+static int hclgevf_reset_prepare(struct hclgevf_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
int ret;
@@ -1605,61 +1575,64 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
*/
ae_dev->reset_type = hdev->reset_type;
hdev->rst_stats.rst_cnt++;
- rtnl_lock();
+ rtnl_lock();
/* bring down the nic to stop any ongoing TX/RX */
ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
- if (ret)
- goto err_reset_lock;
-
rtnl_unlock();
-
- ret = hclgevf_reset_prepare_wait(hdev);
if (ret)
- goto err_reset;
+ return ret;
- /* check if VF could successfully fetch the hardware reset completion
- * status from the hardware
- */
- ret = hclgevf_reset_wait(hdev);
- if (ret) {
- /* can't do much in this situation, will disable VF */
- dev_err(&hdev->pdev->dev,
- "VF failed(=%d) to fetch H/W reset completion status\n",
- ret);
- goto err_reset;
- }
+ return hclgevf_reset_prepare_wait(hdev);
+}
+
+static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ int ret;
hdev->rst_stats.hw_rst_done_cnt++;
rtnl_lock();
-
/* now, re-initialize the nic client and ae device */
ret = hclgevf_reset_stack(hdev);
+ rtnl_unlock();
if (ret) {
dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
- goto err_reset_lock;
+ return ret;
}
- /* bring up the nic to enable TX/RX again */
- ret = hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
- if (ret)
- goto err_reset_lock;
-
- rtnl_unlock();
-
hdev->last_reset_time = jiffies;
ae_dev->reset_type = HNAE3_NONE_RESET;
hdev->rst_stats.rst_done_cnt++;
hdev->rst_stats.rst_fail_cnt = 0;
+ clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
+
+ return 0;
+}
+
+static void hclgevf_reset(struct hclgevf_dev *hdev)
+{
+ if (hclgevf_reset_prepare(hdev))
+ goto err_reset;
+
+ /* check if VF could successfully fetch the hardware reset completion
+ * status from the hardware
+ */
+ if (hclgevf_reset_wait(hdev)) {
+ /* can't do much in this situation, will disable VF */
+ dev_err(&hdev->pdev->dev,
+ "failed to fetch H/W reset completion status\n");
+ goto err_reset;
+ }
+
+ if (hclgevf_reset_rebuild(hdev))
+ goto err_reset;
+
+ return;
- return ret;
-err_reset_lock:
- rtnl_unlock();
err_reset:
hclgevf_reset_err_handle(hdev);
-
- return ret;
}
static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev,
@@ -1722,25 +1695,60 @@ static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
set_bit(rst_type, &hdev->default_reset_request);
}
+static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
+{
+ writel(en ? 1 : 0, vector->addr);
+}
+
static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev)
{
-#define HCLGEVF_FLR_WAIT_MS 100
-#define HCLGEVF_FLR_WAIT_CNT 50
+#define HCLGEVF_FLR_RETRY_WAIT_MS 500
+#define HCLGEVF_FLR_RETRY_CNT 5
+
struct hclgevf_dev *hdev = ae_dev->priv;
- int cnt = 0;
+ int retry_cnt = 0;
+ int ret;
- clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
- clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
- set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
- hclgevf_reset_event(hdev->pdev, NULL);
+retry:
+ down(&hdev->reset_sem);
+ set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+ hdev->reset_type = HNAE3_FLR_RESET;
+ ret = hclgevf_reset_prepare(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
+ ret);
+ if (hdev->reset_pending ||
+ retry_cnt++ < HCLGEVF_FLR_RETRY_CNT) {
+ dev_err(&hdev->pdev->dev,
+ "reset_pending:0x%lx, retry_cnt:%d\n",
+ hdev->reset_pending, retry_cnt);
+ clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
+ msleep(HCLGEVF_FLR_RETRY_WAIT_MS);
+ goto retry;
+ }
+ }
- while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
- cnt++ < HCLGEVF_FLR_WAIT_CNT)
- msleep(HCLGEVF_FLR_WAIT_MS);
+ /* disable misc vector before FLR done */
+ hclgevf_enable_vector(&hdev->misc_vector, false);
+ hdev->rst_stats.flr_rst_cnt++;
+}
- if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
- dev_err(&hdev->pdev->dev,
- "flr wait down timeout: %d\n", cnt);
+static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+ int ret;
+
+ hclgevf_enable_vector(&hdev->misc_vector, true);
+
+ ret = hclgevf_reset_rebuild(hdev);
+ if (ret)
+ dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n",
+ ret);
+
+ hdev->reset_type = HNAE3_NONE_RESET;
+ clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
}
static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
@@ -1767,62 +1775,37 @@ static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
{
- if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) &&
- !test_bit(HCLGEVF_STATE_REMOVING, &hdev->state)) {
- set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
- schedule_work(&hdev->rst_service_task);
- }
+ if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
+ !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED,
+ &hdev->state))
+ mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
}
void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
{
- if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) &&
- !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) {
- set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
- schedule_work(&hdev->mbx_service_task);
- }
+ if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
+ !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED,
+ &hdev->state))
+ mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
}
-static void hclgevf_task_schedule(struct hclgevf_dev *hdev)
+static void hclgevf_task_schedule(struct hclgevf_dev *hdev,
+ unsigned long delay)
{
- if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) &&
- !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state))
- schedule_work(&hdev->service_task);
+ if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
+ !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state))
+ mod_delayed_work(hclgevf_wq, &hdev->service_task, delay);
}
-static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev)
-{
- /* if we have any pending mailbox event then schedule the mbx task */
- if (hdev->mbx_event_pending)
- hclgevf_mbx_task_schedule(hdev);
-
- if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state))
- hclgevf_reset_task_schedule(hdev);
-}
-
-static void hclgevf_service_timer(struct timer_list *t)
-{
- struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer);
-
- mod_timer(&hdev->service_timer, jiffies +
- HCLGEVF_GENERAL_TASK_INTERVAL * HZ);
-
- hdev->stats_timer++;
- hclgevf_task_schedule(hdev);
-}
-
-static void hclgevf_reset_service_task(struct work_struct *work)
+static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
{
#define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3
- struct hclgevf_dev *hdev =
- container_of(work, struct hclgevf_dev, rst_service_task);
- int ret;
-
- if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
+ if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state))
return;
- clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
+ down(&hdev->reset_sem);
+ set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
&hdev->reset_state)) {
@@ -1836,12 +1819,8 @@ static void hclgevf_reset_service_task(struct work_struct *work)
hdev->last_reset_time = jiffies;
while ((hdev->reset_type =
hclgevf_get_reset_level(hdev, &hdev->reset_pending))
- != HNAE3_NONE_RESET) {
- ret = hclgevf_reset(hdev);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "VF stack reset failed %d.\n", ret);
- }
+ != HNAE3_NONE_RESET)
+ hclgevf_reset(hdev);
} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
&hdev->reset_state)) {
/* we could be here when either of below happens:
@@ -1882,42 +1861,29 @@ static void hclgevf_reset_service_task(struct work_struct *work)
hclgevf_reset_task_schedule(hdev);
}
+ hdev->reset_type = HNAE3_NONE_RESET;
clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
}
-static void hclgevf_mailbox_service_task(struct work_struct *work)
+static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev)
{
- struct hclgevf_dev *hdev;
-
- hdev = container_of(work, struct hclgevf_dev, mbx_service_task);
+ if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state))
+ return;
if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
return;
- clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
-
hclgevf_mbx_async_handler(hdev);
clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
}
-static void hclgevf_keep_alive_timer(struct timer_list *t)
-{
- struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer);
-
- schedule_work(&hdev->keep_alive_task);
- mod_timer(&hdev->keep_alive_timer, jiffies +
- HCLGEVF_KEEP_ALIVE_TASK_INTERVAL * HZ);
-}
-
-static void hclgevf_keep_alive_task(struct work_struct *work)
+static void hclgevf_keep_alive(struct hclgevf_dev *hdev)
{
- struct hclgevf_dev *hdev;
u8 respmsg;
int ret;
- hdev = container_of(work, struct hclgevf_dev, keep_alive_task);
-
if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
return;
@@ -1928,19 +1894,32 @@ static void hclgevf_keep_alive_task(struct work_struct *work)
"VF sends keep alive cmd failed(=%d)\n", ret);
}
-static void hclgevf_service_task(struct work_struct *work)
+static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev)
{
- struct hnae3_handle *handle;
- struct hclgevf_dev *hdev;
+ unsigned long delta = round_jiffies_relative(HZ);
+ struct hnae3_handle *handle = &hdev->nic;
- hdev = container_of(work, struct hclgevf_dev, service_task);
- handle = &hdev->nic;
+ if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
+ delta = jiffies - hdev->last_serv_processed;
- if (hdev->stats_timer >= HCLGEVF_STATS_TIMER_INTERVAL) {
- hclgevf_tqps_update_stats(handle);
- hdev->stats_timer = 0;
+ if (delta < round_jiffies_relative(HZ)) {
+ delta = round_jiffies_relative(HZ) - delta;
+ goto out;
+ }
}
+ hdev->serv_processed_cnt++;
+ if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL))
+ hclgevf_keep_alive(hdev);
+
+ if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) {
+ hdev->last_serv_processed = jiffies;
+ goto out;
+ }
+
+ if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL))
+ hclgevf_tqps_update_stats(handle);
+
/* request the link status from the PF. PF would be able to tell VF
* about such updates in future so we might remove this later
*/
@@ -1950,9 +1929,27 @@ static void hclgevf_service_task(struct work_struct *work)
hclgevf_sync_vlan_filter(hdev);
- hclgevf_deferred_task_schedule(hdev);
+ hdev->last_serv_processed = jiffies;
- clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
+out:
+ hclgevf_task_schedule(hdev, delta);
+}
+
+static void hclgevf_service_task(struct work_struct *work)
+{
+ struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev,
+ service_task.work);
+
+ hclgevf_reset_service_task(hdev);
+ hclgevf_mailbox_service_task(hdev);
+ hclgevf_periodic_service_task(hdev);
+
+ /* Handle reset and mbx again in case periodical task delays the
+ * handling by calling hclgevf_task_schedule() in
+ * hclgevf_periodic_service_task()
+ */
+ hclgevf_reset_service_task(hdev);
+ hclgevf_mailbox_service_task(hdev);
}
static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
@@ -2010,11 +2007,6 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
return HCLGEVF_VECTOR0_EVENT_OTHER;
}
-static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
-{
- writel(en ? 1 : 0, vector->addr);
-}
-
static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
{
enum hclgevf_evt_cause event_cause;
@@ -2189,16 +2181,31 @@ static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
false);
}
+static void hclgevf_flush_link_update(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_FLUSH_LINK_TIMEOUT 100000
+
+ unsigned long last = hdev->serv_processed_cnt;
+ int i = 0;
+
+ while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) &&
+ i++ < HCLGEVF_FLUSH_LINK_TIMEOUT &&
+ last == hdev->serv_processed_cnt)
+ usleep_range(1, 1);
+}
+
static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
if (enable) {
- mod_timer(&hdev->service_timer, jiffies + HZ);
+ hclgevf_task_schedule(hdev, 0);
} else {
- del_timer_sync(&hdev->service_timer);
- cancel_work_sync(&hdev->service_task);
- clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
+ set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+
+ /* flush memory to make sure DOWN is seen by service task */
+ smp_mb__before_atomic();
+ hclgevf_flush_link_update(hdev);
}
}
@@ -2245,16 +2252,12 @@ static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
static int hclgevf_client_start(struct hnae3_handle *handle)
{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
int ret;
ret = hclgevf_set_alive(handle, true);
if (ret)
return ret;
- mod_timer(&hdev->keep_alive_timer, jiffies +
- HCLGEVF_KEEP_ALIVE_TASK_INTERVAL * HZ);
-
return 0;
}
@@ -2267,27 +2270,18 @@ static void hclgevf_client_stop(struct hnae3_handle *handle)
if (ret)
dev_warn(&hdev->pdev->dev,
"%s failed %d\n", __func__, ret);
-
- del_timer_sync(&hdev->keep_alive_timer);
- cancel_work_sync(&hdev->keep_alive_task);
}
static void hclgevf_state_init(struct hclgevf_dev *hdev)
{
- /* setup tasks for the MBX */
- INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task);
clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
+ clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
- /* setup tasks for service timer */
- timer_setup(&hdev->service_timer, hclgevf_service_timer, 0);
-
- INIT_WORK(&hdev->service_task, hclgevf_service_task);
- clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
-
- INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task);
+ INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task);
mutex_init(&hdev->mbx_resp.mbx_mutex);
+ sema_init(&hdev->reset_sem, 1);
/* bring the device down */
set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
@@ -2298,18 +2292,8 @@ static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
set_bit(HCLGEVF_STATE_REMOVING, &hdev->state);
- if (hdev->keep_alive_timer.function)
- del_timer_sync(&hdev->keep_alive_timer);
- if (hdev->keep_alive_task.func)
- cancel_work_sync(&hdev->keep_alive_task);
- if (hdev->service_timer.function)
- del_timer_sync(&hdev->service_timer);
- if (hdev->service_task.func)
- cancel_work_sync(&hdev->service_task);
- if (hdev->mbx_service_task.func)
- cancel_work_sync(&hdev->mbx_service_task);
- if (hdev->rst_service_task.func)
- cancel_work_sync(&hdev->rst_service_task);
+ if (hdev->service_task.work.func)
+ cancel_delayed_work_sync(&hdev->service_task);
mutex_destroy(&hdev->mbx_resp.mbx_mutex);
}
@@ -2383,8 +2367,10 @@ static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
hclgevf_get_misc_vector(hdev);
+ snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
+ HCLGEVF_NAME, pci_name(hdev->pdev));
ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
- 0, "hclgevf_cmd", hdev);
+ 0, hdev->misc_vector.name, hdev);
if (ret) {
dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
hdev->misc_vector.vector_irq);
@@ -2611,11 +2597,11 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
if (hnae3_dev_roce_supported(hdev)) {
hdev->roce_base_msix_offset =
- hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
+ hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
HCLGEVF_MSIX_OFT_ROCEE_M,
HCLGEVF_MSIX_OFT_ROCEE_S);
hdev->num_roce_msix =
- hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
+ hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number),
HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
/* nic's msix numbers is always equals to the roce's. */
@@ -2628,7 +2614,7 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
hdev->roce_base_msix_offset;
} else {
hdev->num_msi =
- hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
+ hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number),
HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
hdev->num_nic_msix = hdev->num_msi;
@@ -2725,16 +2711,12 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
int ret;
ret = hclgevf_pci_init(hdev);
- if (ret) {
- dev_err(&pdev->dev, "PCI initialization failed\n");
+ if (ret)
return ret;
- }
ret = hclgevf_cmd_queue_init(hdev);
- if (ret) {
- dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret);
+ if (ret)
goto err_cmd_queue_init;
- }
ret = hclgevf_cmd_init(hdev);
if (ret)
@@ -2742,11 +2724,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
/* Get vf resource */
ret = hclgevf_query_vf_resource(hdev);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Query vf status error, ret = %d.\n", ret);
+ if (ret)
goto err_cmd_init;
- }
ret = hclgevf_init_msi(hdev);
if (ret) {
@@ -2756,13 +2735,11 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
hclgevf_state_init(hdev);
hdev->reset_level = HNAE3_VF_FUNC_RESET;
+ hdev->reset_type = HNAE3_NONE_RESET;
ret = hclgevf_misc_irq_init(hdev);
- if (ret) {
- dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
- ret);
+ if (ret)
goto err_misc_irq_init;
- }
set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
@@ -2779,10 +2756,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
}
ret = hclgevf_set_handle_info(hdev);
- if (ret) {
- dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret);
+ if (ret)
goto err_config;
- }
ret = hclgevf_config_gro(hdev, true);
if (ret)
@@ -2807,6 +2782,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
dev_info(&hdev->pdev->dev, "finished initializing %s driver\n",
HCLGEVF_DRIVER_NAME);
+ hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
+
return 0;
err_config:
@@ -2838,7 +2815,6 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct pci_dev *pdev = ae_dev->pdev;
- struct hclgevf_dev *hdev;
int ret;
ret = hclgevf_alloc_hdev(ae_dev);
@@ -2853,10 +2829,6 @@ static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
- hdev = ae_dev->priv;
- timer_setup(&hdev->keep_alive_timer, hclgevf_keep_alive_timer, 0);
- INIT_WORK(&hdev->keep_alive_task, hclgevf_keep_alive_task);
-
return 0;
}
@@ -3213,6 +3185,12 @@ static int hclgevf_init(void)
{
pr_info("%s is initializing\n", HCLGEVF_NAME);
+ hclgevf_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGEVF_NAME);
+ if (!hclgevf_wq) {
+ pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME);
+ return -ENOMEM;
+ }
+
hnae3_register_ae_algo(&ae_algovf);
return 0;
@@ -3221,6 +3199,7 @@ static int hclgevf_init(void)
static void hclgevf_exit(void)
{
hnae3_unregister_ae_algo(&ae_algovf);
+ destroy_workqueue(hclgevf_wq);
}
module_init(hclgevf_init);
module_exit(hclgevf_exit);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 2f4c81bf4169..fee8d97f323c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -142,12 +142,13 @@ enum hclgevf_states {
HCLGEVF_STATE_REMOVING,
HCLGEVF_STATE_NIC_REGISTERED,
/* task states */
- HCLGEVF_STATE_SERVICE_SCHED,
HCLGEVF_STATE_RST_SERVICE_SCHED,
HCLGEVF_STATE_RST_HANDLING,
HCLGEVF_STATE_MBX_SERVICE_SCHED,
HCLGEVF_STATE_MBX_HANDLING,
HCLGEVF_STATE_CMD_DISABLE,
+ HCLGEVF_STATE_LINK_UPDATING,
+ HCLGEVF_STATE_RST_FAIL,
};
struct hclgevf_mac {
@@ -220,6 +221,7 @@ struct hclgevf_rss_cfg {
struct hclgevf_misc_vector {
u8 __iomem *addr;
int vector_irq;
+ char name[HNAE3_INT_NAME_LEN];
};
struct hclgevf_rst_stats {
@@ -251,6 +253,7 @@ struct hclgevf_dev {
unsigned long reset_state; /* requested, pending */
struct hclgevf_rst_stats rst_stats;
u32 reset_attempts;
+ struct semaphore reset_sem; /* protect reset process */
u32 fw_version;
u16 num_tqps; /* num task queue pairs of this PF */
@@ -283,12 +286,7 @@ struct hclgevf_dev {
struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
- struct timer_list service_timer;
- struct timer_list keep_alive_timer;
- struct work_struct service_task;
- struct work_struct keep_alive_task;
- struct work_struct rst_service_task;
- struct work_struct mbx_service_task;
+ struct delayed_work service_task;
struct hclgevf_tqp *htqp;
@@ -298,7 +296,8 @@ struct hclgevf_dev {
struct hnae3_client *nic_client;
struct hnae3_client *roce_client;
u32 flag;
- u32 stats_timer;
+ unsigned long serv_processed_cnt;
+ unsigned long last_serv_processed;
};
static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 2411ad270c98..02a14f5e7fe3 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -766,7 +766,7 @@ static void hinic_set_rx_mode(struct net_device *netdev)
queue_work(nic_dev->workq, &rx_mode_work->work);
}
-static void hinic_tx_timeout(struct net_device *netdev)
+static void hinic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index 92929750f832..bef676d93339 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -363,7 +363,7 @@ static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t i596_interrupt(int irq, void *dev_id);
static int i596_close(struct net_device *dev);
static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
-static void i596_tx_timeout (struct net_device *dev);
+static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue);
static void print_eth(unsigned char *buf, char *str);
static void set_multicast_list(struct net_device *dev);
@@ -1019,7 +1019,7 @@ err_irq_dev:
return res;
}
-static void i596_tx_timeout (struct net_device *dev)
+static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue)
{
struct i596_private *lp = dev->ml_priv;
int ioaddr = dev->base_addr;
diff --git a/drivers/net/ethernet/i825xx/ether1.c b/drivers/net/ethernet/i825xx/ether1.c
index bb3b8adbe4f0..a0bfb509e002 100644
--- a/drivers/net/ethernet/i825xx/ether1.c
+++ b/drivers/net/ethernet/i825xx/ether1.c
@@ -66,7 +66,7 @@ static netdev_tx_t ether1_sendpacket(struct sk_buff *skb,
static irqreturn_t ether1_interrupt(int irq, void *dev_id);
static int ether1_close(struct net_device *dev);
static void ether1_setmulticastlist(struct net_device *dev);
-static void ether1_timeout(struct net_device *dev);
+static void ether1_timeout(struct net_device *dev, unsigned int txqueue);
/* ------------------------------------------------------------------------- */
@@ -650,7 +650,7 @@ ether1_open (struct net_device *dev)
}
static void
-ether1_timeout(struct net_device *dev)
+ether1_timeout(struct net_device *dev, unsigned int txqueue)
{
printk(KERN_WARNING "%s: transmit timeout, network cable problem?\n",
dev->name);
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index f9742af7f142..b03757e169e4 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -351,7 +351,7 @@ static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t i596_interrupt(int irq, void *dev_id);
static int i596_close(struct net_device *dev);
static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
-static void i596_tx_timeout (struct net_device *dev);
+static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue);
static void print_eth(unsigned char *buf, char *str);
static void set_multicast_list(struct net_device *dev);
static inline void ca(struct net_device *dev);
@@ -936,7 +936,7 @@ out_remove_rx_bufs:
return -EAGAIN;
}
-static void i596_tx_timeout (struct net_device *dev)
+static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue)
{
struct i596_private *lp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
index 6436a98c5953..22f5887578b2 100644
--- a/drivers/net/ethernet/i825xx/sni_82596.c
+++ b/drivers/net/ethernet/i825xx/sni_82596.c
@@ -91,10 +91,10 @@ static int sni_82596_probe(struct platform_device *dev)
idprom = platform_get_resource(dev, IORESOURCE_MEM, 2);
if (!res || !ca || !options || !idprom)
return -ENODEV;
- mpu_addr = ioremap_nocache(res->start, 4);
+ mpu_addr = ioremap(res->start, 4);
if (!mpu_addr)
return -ENOMEM;
- ca_addr = ioremap_nocache(ca->start, 4);
+ ca_addr = ioremap(ca->start, 4);
if (!ca_addr)
goto probe_failed_free_mpu;
@@ -110,7 +110,7 @@ static int sni_82596_probe(struct platform_device *dev)
netdevice->base_addr = res->start;
netdevice->irq = platform_get_irq(dev, 0);
- eth_addr = ioremap_nocache(idprom->start, 0x10);
+ eth_addr = ioremap(idprom->start, 0x10);
if (!eth_addr)
goto probe_failed;
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
index 1a86184d44c0..4564ee02c95f 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.c
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -125,7 +125,7 @@ static netdev_tx_t sun3_82586_send_packet(struct sk_buff *,
struct net_device *);
static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
-static void sun3_82586_timeout(struct net_device *dev);
+static void sun3_82586_timeout(struct net_device *dev, unsigned int txqueue);
#if 0
static void sun3_82586_dump(struct net_device *,void *);
#endif
@@ -965,7 +965,7 @@ static void startrecv586(struct net_device *dev)
WAIT_4_SCB_CMD_RUC(); /* wait for accept cmd. (no timeout!!) */
}
-static void sun3_82586_timeout(struct net_device *dev)
+static void sun3_82586_timeout(struct net_device *dev, unsigned int txqueue)
{
struct priv *p = netdev_priv(dev);
#ifndef NO_NOPCOMMANDS
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 13e30eba5349..0273fb7a9d01 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -2786,7 +2786,7 @@ out:
return;
}
-static void ehea_tx_watchdog(struct net_device *dev)
+static void ehea_tx_watchdog(struct net_device *dev, unsigned int txqueue)
{
struct ehea_port *port = netdev_priv(dev);
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 2e40425d8a34..b7fc17756c51 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -776,7 +776,7 @@ static void emac_reset_work(struct work_struct *work)
mutex_unlock(&dev->link_lock);
}
-static void emac_tx_timeout(struct net_device *ndev)
+static void emac_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct emac_instance *dev = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index c90080781924..c75239d8820f 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -184,7 +184,7 @@ static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
netdev_err(netdev, "Device down!\n");
return -ENODEV;
}
- if (retry--)
+ if (!retry--)
break;
if (wait_for_completion_timeout(comp_done, div_timeout))
return 0;
@@ -2282,7 +2282,7 @@ err:
return -ret;
}
-static void ibmvnic_tx_timeout(struct net_device *dev)
+static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ibmvnic_adapter *adapter = netdev_priv(dev);
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index a65d5a9ba7db..1b8d015ebfb0 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2316,7 +2316,7 @@ static void e100_down(struct nic *nic)
e100_rx_clean_list(nic);
}
-static void e100_tx_timeout(struct net_device *netdev)
+static void e100_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct nic *nic = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index aca97b084003..2bced34c19ba 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -134,7 +134,7 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
int cmd);
static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
-static void e1000_tx_timeout(struct net_device *dev);
+static void e1000_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void e1000_reset_task(struct work_struct *work);
static void e1000_smartspeed(struct e1000_adapter *adapter);
static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
@@ -3488,7 +3488,7 @@ exit:
* e1000_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
**/
-static void e1000_tx_timeout(struct net_device *netdev)
+static void e1000_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 6c51b1bad8c4..37a2314d3e6b 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -185,13 +185,12 @@ struct e1000_phy_regs {
/* board specific private data structure */
struct e1000_adapter {
+ struct timer_list watchdog_timer;
struct timer_list phy_info_timer;
struct timer_list blink_timer;
struct work_struct reset_task;
- struct delayed_work watchdog_task;
-
- struct workqueue_struct *e1000_workqueue;
+ struct work_struct watchdog_task;
const struct e1000_info *ei;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index fe7997c18a10..db4ea58bac82 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1780,8 +1780,7 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
}
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
- mod_delayed_work(adapter->e1000_workqueue,
- &adapter->watchdog_task, HZ);
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
/* Reset on uncorrectable ECC error */
@@ -1861,8 +1860,7 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data)
}
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
- mod_delayed_work(adapter->e1000_workqueue,
- &adapter->watchdog_task, HZ);
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
/* Reset on uncorrectable ECC error */
@@ -1907,8 +1905,7 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)
hw->mac.get_link_status = true;
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
- mod_delayed_work(adapter->e1000_workqueue,
- &adapter->watchdog_task, HZ);
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
if (!test_bit(__E1000_DOWN, &adapter->state))
@@ -4284,6 +4281,7 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset)
napi_synchronize(&adapter->napi);
+ del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
spin_lock(&adapter->stats64_lock);
@@ -4723,7 +4721,7 @@ int e1000e_close(struct net_device *netdev)
e1000_free_irq(adapter);
/* Link status message must follow this format */
- pr_info("%s NIC Link is Down\n", netdev->name);
+ netdev_info(netdev, "NIC Link is Down\n");
}
napi_disable(&adapter->napi);
@@ -5073,12 +5071,13 @@ static void e1000_print_link_info(struct e1000_adapter *adapter)
u32 ctrl = er32(CTRL);
/* Link status message must follow this format for user tools */
- pr_info("%s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
- adapter->netdev->name, adapter->link_speed,
- adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half",
- (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" :
- (ctrl & E1000_CTRL_RFCE) ? "Rx" :
- (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None");
+ netdev_info(adapter->netdev,
+ "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
+ adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half",
+ (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" :
+ (ctrl & E1000_CTRL_RFCE) ? "Rx" :
+ (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None");
}
static bool e1000e_has_link(struct e1000_adapter *adapter)
@@ -5155,11 +5154,25 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
}
}
+/**
+ * e1000_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void e1000_watchdog(struct timer_list *t)
+{
+ struct e1000_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+
+ /* Do the rest outside of interrupt context */
+ schedule_work(&adapter->watchdog_task);
+
+ /* TODO: make this use queue_delayed_work() */
+}
+
static void e1000_watchdog_task(struct work_struct *work)
{
struct e1000_adapter *adapter = container_of(work,
struct e1000_adapter,
- watchdog_task.work);
+ watchdog_task);
struct net_device *netdev = adapter->netdev;
struct e1000_mac_info *mac = &adapter->hw.mac;
struct e1000_phy_info *phy = &adapter->hw.phy;
@@ -5307,7 +5320,7 @@ static void e1000_watchdog_task(struct work_struct *work)
adapter->link_speed = 0;
adapter->link_duplex = 0;
/* Link status message must follow this format */
- pr_info("%s NIC Link is Down\n", adapter->netdev->name);
+ netdev_info(netdev, "NIC Link is Down\n");
netif_carrier_off(netdev);
netif_stop_queue(netdev);
if (!test_bit(__E1000_DOWN, &adapter->state))
@@ -5407,9 +5420,8 @@ link_up:
/* Reset the timer */
if (!test_bit(__E1000_DOWN, &adapter->state))
- queue_delayed_work(adapter->e1000_workqueue,
- &adapter->watchdog_task,
- round_jiffies(2 * HZ));
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + 2 * HZ));
}
#define E1000_TX_FLAGS_CSUM 0x00000001
@@ -5929,7 +5941,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
* e1000_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
**/
-static void e1000_tx_timeout(struct net_device *netdev)
+static void e1000_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -7449,21 +7461,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_eeprom;
}
- adapter->e1000_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
- e1000e_driver_name);
-
- if (!adapter->e1000_workqueue) {
- err = -ENOMEM;
- goto err_workqueue;
- }
-
- INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog_task);
- queue_delayed_work(adapter->e1000_workqueue, &adapter->watchdog_task,
- 0);
-
+ timer_setup(&adapter->watchdog_timer, e1000_watchdog, 0);
timer_setup(&adapter->phy_info_timer, e1000_update_phy_info, 0);
INIT_WORK(&adapter->reset_task, e1000_reset_task);
+ INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
@@ -7557,9 +7559,6 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_register:
- flush_workqueue(adapter->e1000_workqueue);
- destroy_workqueue(adapter->e1000_workqueue);
-err_workqueue:
if (!(adapter->flags & FLAG_HAS_AMT))
e1000e_release_hw_control(adapter);
err_eeprom:
@@ -7604,17 +7603,15 @@ static void e1000_remove(struct pci_dev *pdev)
* from being rescheduled.
*/
set_bit(__E1000_DOWN, &adapter->state);
+ del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
cancel_work_sync(&adapter->reset_task);
+ cancel_work_sync(&adapter->watchdog_task);
cancel_work_sync(&adapter->downshift_task);
cancel_work_sync(&adapter->update_phy_task);
cancel_work_sync(&adapter->print_hang_task);
- cancel_delayed_work(&adapter->watchdog_task);
- flush_workqueue(adapter->e1000_workqueue);
- destroy_workqueue(adapter->e1000_workqueue);
-
if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
cancel_work_sync(&adapter->tx_hwtstamp_work);
if (adapter->tx_hwtstamp_skb) {
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 68baee04dc58..0637ccadee79 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -696,21 +696,24 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev)
/**
* fm10k_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: the index of the Tx queue that timed out
**/
-static void fm10k_tx_timeout(struct net_device *netdev)
+static void fm10k_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
+ struct fm10k_ring *tx_ring;
bool real_tx_hang = false;
- int i;
-
-#define TX_TIMEO_LIMIT 16000
- for (i = 0; i < interface->num_tx_queues; i++) {
- struct fm10k_ring *tx_ring = interface->tx_ring[i];
- if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring))
- real_tx_hang = true;
+ if (txqueue >= interface->num_tx_queues) {
+ WARN(1, "invalid Tx queue index %d", txqueue);
+ return;
}
+ tx_ring = interface->tx_ring[txqueue];
+ if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring))
+ real_tx_hang = true;
+
+#define TX_TIMEO_LIMIT 16000
if (real_tx_hang) {
fm10k_tx_timeout_reset(interface);
} else {
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index cb6367334ca7..4833187bd259 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -1152,7 +1152,7 @@ void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags);
static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
{
- return !!vsi->xdp_prog;
+ return !!READ_ONCE(vsi->xdp_prog);
}
int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 9f0a4e92a231..37514a75f928 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -536,6 +536,11 @@ static void i40e_set_hw_flags(struct i40e_hw *hw)
(aq->api_maj_ver == 1 &&
aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722))
hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722))
+ hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
/* fall through */
default:
break;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index d4055037af89..45b90eb11adb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1113,7 +1113,7 @@ i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
*/
pba_size--;
if (pba_num_size < (((u32)pba_size * 2) + 1)) {
- hw_dbg(hw, "Buffer to small for PBA data.\n");
+ hw_dbg(hw, "Buffer too small for PBA data.\n");
return I40E_ERR_PARAM;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 1ccabeafa44c..8c3e753bfb9d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -301,43 +301,24 @@ void i40e_service_event_schedule(struct i40e_pf *pf)
* device is munged, not just the one netdev port, so go for the full
* reset.
**/
-static void i40e_tx_timeout(struct net_device *netdev)
+static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
struct i40e_ring *tx_ring = NULL;
- unsigned int i, hung_queue = 0;
+ unsigned int i;
u32 head, val;
pf->tx_timeout_count++;
- /* find the stopped queue the same way the stack does */
- for (i = 0; i < netdev->num_tx_queues; i++) {
- struct netdev_queue *q;
- unsigned long trans_start;
-
- q = netdev_get_tx_queue(netdev, i);
- trans_start = q->trans_start;
- if (netif_xmit_stopped(q) &&
- time_after(jiffies,
- (trans_start + netdev->watchdog_timeo))) {
- hung_queue = i;
- break;
- }
- }
-
- if (i == netdev->num_tx_queues) {
- netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
- } else {
- /* now that we have an index, find the tx_ring struct */
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
- if (hung_queue ==
- vsi->tx_rings[i]->queue_index) {
- tx_ring = vsi->tx_rings[i];
- break;
- }
+ /* with txqueue index, find the tx_ring struct */
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
+ if (txqueue ==
+ vsi->tx_rings[i]->queue_index) {
+ tx_ring = vsi->tx_rings[i];
+ break;
}
}
}
@@ -363,14 +344,14 @@ static void i40e_tx_timeout(struct net_device *netdev)
val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
- vsi->seid, hung_queue, tx_ring->next_to_clean,
+ vsi->seid, txqueue, tx_ring->next_to_clean,
head, tx_ring->next_to_use,
readl(tx_ring->tail), val);
}
pf->tx_timeout_last_recovery = jiffies;
- netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
- pf->tx_timeout_recovery_level, hung_queue);
+ netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n",
+ pf->tx_timeout_recovery_level, txqueue);
switch (pf->tx_timeout_recovery_level) {
case 1:
@@ -6823,8 +6804,8 @@ void i40e_down(struct i40e_vsi *vsi)
for (i = 0; i < vsi->num_queue_pairs; i++) {
i40e_clean_tx_ring(vsi->tx_rings[i]);
if (i40e_enabled_xdp_vsi(vsi)) {
- /* Make sure that in-progress ndo_xdp_xmit
- * calls are completed.
+ /* Make sure that in-progress ndo_xdp_xmit and
+ * ndo_xsk_wakeup calls are completed.
*/
synchronize_rcu();
i40e_clean_tx_ring(vsi->xdp_rings[i]);
@@ -12546,8 +12527,12 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi,
old_prog = xchg(&vsi->xdp_prog, prog);
- if (need_reset)
+ if (need_reset) {
+ if (!prog)
+ /* Wait until ndo_xsk_wakeup completes. */
+ synchronize_rcu();
i40e_reset_and_rebuild(pf, true, true);
+ }
for (i = 0; i < vsi->num_queue_pairs; i++)
WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 6a3f0fc56c3b..56b9e445732b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2322,6 +2322,22 @@ static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
}
/**
+ * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL
+ * @vqs: virtchnl_queue_select structure containing bitmaps to validate
+ *
+ * Returns true if validation was successful, else false.
+ */
+static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
+{
+ if ((!vqs->rx_queues && !vqs->tx_queues) ||
+ vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) ||
+ vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES))
+ return false;
+
+ return true;
+}
+
+/**
* i40e_vc_enable_queues_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2346,7 +2362,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
goto error_param;
}
- if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
+ if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2408,9 +2424,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
goto error_param;
}
- if ((vqs->rx_queues == 0 && vqs->tx_queues == 0) ||
- vqs->rx_queues > I40E_MAX_VF_QUEUES ||
- vqs->tx_queues > I40E_MAX_VF_QUEUES) {
+ if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index d07e1a890428..0b7d29192b2c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -269,7 +269,7 @@ static bool i40e_alloc_buffer_zc(struct i40e_ring *rx_ring,
bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
- xsk_umem_discard_addr(umem);
+ xsk_umem_release_addr(umem);
return true;
}
@@ -306,7 +306,7 @@ static bool i40e_alloc_buffer_slow_zc(struct i40e_ring *rx_ring,
bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
- xsk_umem_discard_addr_rq(umem);
+ xsk_umem_release_addr_rq(umem);
return true;
}
@@ -787,8 +787,12 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
{
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
struct i40e_ring *ring;
+ if (test_bit(__I40E_CONFIG_BUSY, pf->state))
+ return -EAGAIN;
+
if (test_bit(__I40E_VSI_DOWN, vsi->state))
return -ENETDOWN;
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 29de3ae96ef2..bd1b1ed323f4 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -415,4 +415,6 @@ void iavf_enable_channels(struct iavf_adapter *adapter);
void iavf_disable_channels(struct iavf_adapter *adapter);
void iavf_add_cloud_filter(struct iavf_adapter *adapter);
void iavf_del_cloud_filter(struct iavf_adapter *adapter);
+struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
+ const u8 *macaddr);
#endif /* _IAVF_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 821987da5698..62fe56ddcb6e 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -159,7 +159,7 @@ void iavf_schedule_reset(struct iavf_adapter *adapter)
* iavf_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
**/
-static void iavf_tx_timeout(struct net_device *netdev)
+static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
@@ -743,9 +743,8 @@ iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
*
* Returns ptr to the filter object or NULL when no memory available.
**/
-static struct
-iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
- const u8 *macaddr)
+struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
+ const u8 *macaddr)
{
struct iavf_mac_filter *f;
@@ -2065,9 +2064,9 @@ static void iavf_reset_task(struct work_struct *work)
struct virtchnl_vf_resource *vfres = adapter->vf_res;
struct net_device *netdev = adapter->netdev;
struct iavf_hw *hw = &adapter->hw;
+ struct iavf_mac_filter *f, *ftmp;
struct iavf_vlan_filter *vlf;
struct iavf_cloud_filter *cf;
- struct iavf_mac_filter *f;
u32 reg_val;
int i = 0, err;
bool running;
@@ -2181,6 +2180,16 @@ continue_reset:
spin_lock_bh(&adapter->mac_vlan_list_lock);
+ /* Delete filter for the current MAC address, it could have
+ * been changed by the PF via administratively set MAC.
+ * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES.
+ */
+ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+ if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) {
+ list_del(&f->list);
+ kfree(f);
+ }
+ }
/* re-add all MAC filters */
list_for_each_entry(f, &adapter->mac_filter_list, list) {
f->add = true;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index c46770eba320..1ab9cb339acb 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -1359,6 +1359,9 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
ether_addr_copy(netdev->perm_addr,
adapter->hw.mac.addr);
}
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+ iavf_add_filter(adapter, adapter->hw.mac.addr);
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
iavf_process_config(adapter);
}
break;
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 7cb829132d28..59544b0fc086 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -17,7 +17,8 @@ ice-y := ice_main.o \
ice_lib.o \
ice_txrx_lib.o \
ice_txrx.o \
- ice_flex_pipe.o \
+ ice_flex_pipe.o \
+ ice_flow.o \
ice_ethtool.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index f972dce8aebb..cb10abb14e11 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -174,6 +174,8 @@ struct ice_sw {
struct ice_pf *pf;
u16 sw_id; /* switch ID for this switch */
u16 bridge_mode; /* VEB/VEPA/Port Virtualizer */
+ struct ice_vsi *dflt_vsi; /* default VSI for this switch */
+ u8 dflt_vsi_ena:1; /* true if above dflt_vsi is enabled */
};
enum ice_state {
@@ -275,6 +277,7 @@ struct ice_vsi {
u8 current_isup:1; /* Sync 'link up' logging */
u8 stat_offsets_loaded:1;
u8 vlan_ena:1;
+ u16 num_vlan;
/* queue information */
u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
@@ -462,12 +465,13 @@ static inline void ice_set_ring_xdp(struct ice_ring *ring)
static inline struct xdp_umem *ice_xsk_umem(struct ice_ring *ring)
{
struct xdp_umem **umems = ring->vsi->xsk_umems;
- int qid = ring->q_index;
+ u16 qid = ring->q_index;
if (ice_ring_is_xdp(ring))
qid -= ring->vsi->num_xdp_txq;
- if (!umems || !umems[qid] || !ice_is_xdp_ena_vsi(ring->vsi))
+ if (qid >= ring->vsi->num_xsk_umems || !umems || !umems[qid] ||
+ !ice_is_xdp_ena_vsi(ring->vsi))
return NULL;
return umems[qid];
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 5421fc413f94..6873998cf145 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -232,6 +232,13 @@ struct ice_aqc_get_sw_cfg_resp {
*/
#define ICE_AQC_RES_TYPE_VSI_LIST_REP 0x03
#define ICE_AQC_RES_TYPE_VSI_LIST_PRUNE 0x04
+#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID 0x60
+#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM 0x61
+
+#define ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM BIT(12)
+#define ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX BIT(13)
+
+#define ICE_AQC_RES_TYPE_FLAG_DEDICATED 0x00
/* Allocate Resources command (indirect 0x0208)
* Free Resources command (indirect 0x0209)
@@ -1653,6 +1660,7 @@ struct ice_aqc_get_pkg_info_resp {
__le32 count;
struct ice_aqc_get_pkg_info pkg_info[1];
};
+
/**
* struct ice_aq_desc - Admin Queue (AQ) descriptor
* @flags: ICE_AQ_FLAG_* flags
@@ -1849,6 +1857,7 @@ enum ice_adminq_opc {
/* package commands */
ice_aqc_opc_download_pkg = 0x0C40,
+ ice_aqc_opc_update_pkg = 0x0C42,
ice_aqc_opc_get_pkg_info_list = 0x0C43,
/* debug commands */
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 77d6a0291e97..81885efadc7a 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -93,7 +93,8 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
* @vsi: the VSI being configured
* @v_idx: index of the vector in the VSI struct
*
- * We allocate one q_vector. If allocation fails we return -ENOMEM.
+ * We allocate one q_vector and set default value for ITR setting associated
+ * with this q_vector. If allocation fails we return -ENOMEM.
*/
static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
{
@@ -108,6 +109,8 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
q_vector->vsi = vsi;
q_vector->v_idx = v_idx;
+ q_vector->tx.itr_setting = ICE_DFLT_TX_ITR;
+ q_vector->rx.itr_setting = ICE_DFLT_RX_ITR;
if (vsi->type == ICE_VSI_VF)
goto out;
/* only set affinity_mask if the CPU is online */
@@ -299,6 +302,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
if (ring->vsi->type == ICE_VSI_PF) {
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
+ /* coverity[check_return] */
xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index);
@@ -320,10 +324,12 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
if (err)
return err;
- dev_info(&vsi->back->pdev->dev, "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
+ dev_info(ice_pf_to_dev(vsi->back), "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
ring->q_index);
} else {
+ ring->zca.free = NULL;
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
+ /* coverity[check_return] */
xdp_rxq_info_reg(&ring->xdp_rxq,
ring->netdev,
ring->q_index);
@@ -399,8 +405,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
/* Absolute queue number out of 2K needs to be passed */
err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
if (err) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
pf_q, err);
return -EIO;
}
@@ -422,8 +427,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
ice_alloc_rx_bufs_slow_zc(ring, ICE_DESC_UNUSED(ring)) :
ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
if (err)
- dev_info(&vsi->back->pdev->dev,
- "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n",
+ dev_info(ice_pf_to_dev(vsi->back), "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n",
ring->xsk_umem ? "UMEM enabled " : "",
ring->q_index, pf_q);
@@ -484,8 +488,7 @@ int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
/* wait for the change to finish */
ret = ice_pf_rxq_wait(pf, pf_q, ena);
if (ret)
- dev_err(ice_pf_to_dev(pf),
- "VSI idx %d Rx ring %d %sable timeout\n",
+ dev_err(ice_pf_to_dev(pf), "VSI idx %d Rx ring %d %sable timeout\n",
vsi->idx, pf_q, (ena ? "en" : "dis"));
return ret;
@@ -500,20 +503,15 @@ int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
*/
int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
{
- struct ice_pf *pf = vsi->back;
- int v_idx = 0, num_q_vectors;
- struct device *dev;
- int err;
+ struct device *dev = ice_pf_to_dev(vsi->back);
+ int v_idx, err;
- dev = ice_pf_to_dev(pf);
if (vsi->q_vectors[0]) {
dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num);
return -EEXIST;
}
- num_q_vectors = vsi->num_q_vectors;
-
- for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
+ for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) {
err = ice_vsi_alloc_q_vector(vsi, v_idx);
if (err)
goto err_out;
@@ -642,8 +640,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
1, qg_buf, buf_len, NULL);
if (status) {
- dev_err(ice_pf_to_dev(pf),
- "Failed to set LAN Tx queue context, error: %d\n",
+ dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n",
status);
return -ENODEV;
}
@@ -674,10 +671,6 @@ void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
if (q_vector->num_ring_rx) {
struct ice_ring_container *rc = &q_vector->rx;
- /* if this value is set then don't overwrite with default */
- if (!rc->itr_setting)
- rc->itr_setting = ICE_DFLT_RX_ITR;
-
rc->target_itr = ITR_TO_REG(rc->itr_setting);
rc->next_update = jiffies + 1;
rc->current_itr = rc->target_itr;
@@ -688,10 +681,6 @@ void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
if (q_vector->num_ring_tx) {
struct ice_ring_container *rc = &q_vector->tx;
- /* if this value is set then don't overwrite with default */
- if (!rc->itr_setting)
- rc->itr_setting = ICE_DFLT_TX_ITR;
-
rc->target_itr = ITR_TO_REG(rc->itr_setting);
rc->next_update = jiffies + 1;
rc->current_itr = rc->target_itr;
@@ -817,14 +806,12 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
* queues at the hardware level anyway.
*/
if (status == ICE_ERR_RESET_ONGOING) {
- dev_dbg(&vsi->back->pdev->dev,
- "Reset in progress. LAN Tx queues already disabled\n");
+ dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n");
} else if (status == ICE_ERR_DOES_NOT_EXIST) {
- dev_dbg(&vsi->back->pdev->dev,
- "LAN Tx queues do not exist, nothing to disable\n");
+ dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n");
} else if (status) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to disable LAN Tx queues, error: %d\n", status);
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n",
+ status);
return -ENODEV;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index fb1d930470c7..04d5db0a25bf 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -4,28 +4,10 @@
#include "ice_common.h"
#include "ice_sched.h"
#include "ice_adminq_cmd.h"
+#include "ice_flow.h"
#define ICE_PF_RESET_WAIT_COUNT 200
-#define ICE_PROG_FLEX_ENTRY(hw, rxdid, mdid, idx) \
- wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(rxdid), \
- ((ICE_RX_OPC_MDID << \
- GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \
- GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \
- (((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \
- GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M))
-
-#define ICE_PROG_FLG_ENTRY(hw, rxdid, flg_0, flg_1, flg_2, flg_3, idx) \
- wr32((hw), GLFLXP_RXDID_FLAGS(rxdid, idx), \
- (((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \
- GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \
- (((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \
- GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M) | \
- (((flg_2) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) & \
- GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M) | \
- (((flg_3) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) & \
- GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M))
-
/**
* ice_set_mac_type - Sets MAC type
* @hw: pointer to the HW structure
@@ -43,20 +25,6 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
}
/**
- * ice_dev_onetime_setup - Temporary HW/FW workarounds
- * @hw: pointer to the HW structure
- *
- * This function provides temporary workarounds for certain issues
- * that are expected to be fixed in the HW/FW.
- */
-void ice_dev_onetime_setup(struct ice_hw *hw)
-{
-#define MBX_PF_VT_PFALLOC 0x00231E80
- /* set VFs per PF */
- wr32(hw, MBX_PF_VT_PFALLOC, rd32(hw, PF_VT_PFALLOC_HIF));
-}
-
-/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
*
@@ -348,88 +316,6 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
}
/**
- * ice_init_flex_flags
- * @hw: pointer to the hardware structure
- * @prof_id: Rx Descriptor Builder profile ID
- *
- * Function to initialize Rx flex flags
- */
-static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id)
-{
- u8 idx = 0;
-
- /* Flex-flag fields (0-2) are programmed with FLG64 bits with layout:
- * flexiflags0[5:0] - TCP flags, is_packet_fragmented, is_packet_UDP_GRE
- * flexiflags1[3:0] - Not used for flag programming
- * flexiflags2[7:0] - Tunnel and VLAN types
- * 2 invalid fields in last index
- */
- switch (prof_id) {
- /* Rx flex flags are currently programmed for the NIC profiles only.
- * Different flag bit programming configurations can be added per
- * profile as needed.
- */
- case ICE_RXDID_FLEX_NIC:
- case ICE_RXDID_FLEX_NIC_2:
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_FRG,
- ICE_FLG_UDP_GRE, ICE_FLG_PKT_DSI,
- ICE_FLG_FIN, idx++);
- /* flex flag 1 is not used for flexi-flag programming, skipping
- * these four FLG64 bits.
- */
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_SYN, ICE_FLG_RST,
- ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx++);
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_DSI,
- ICE_FLG_PKT_DSI, ICE_FLG_EVLAN_x8100,
- ICE_FLG_EVLAN_x9100, idx++);
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_VLAN_x8100,
- ICE_FLG_TNL_VLAN, ICE_FLG_TNL_MAC,
- ICE_FLG_TNL0, idx++);
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_TNL1, ICE_FLG_TNL2,
- ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx);
- break;
-
- default:
- ice_debug(hw, ICE_DBG_INIT,
- "Flag programming for profile ID %d not supported\n",
- prof_id);
- }
-}
-
-/**
- * ice_init_flex_flds
- * @hw: pointer to the hardware structure
- * @prof_id: Rx Descriptor Builder profile ID
- *
- * Function to initialize flex descriptors
- */
-static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id)
-{
- enum ice_flex_rx_mdid mdid;
-
- switch (prof_id) {
- case ICE_RXDID_FLEX_NIC:
- case ICE_RXDID_FLEX_NIC_2:
- ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_LOW, 0);
- ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_HIGH, 1);
- ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_FLOW_ID_LOWER, 2);
-
- mdid = (prof_id == ICE_RXDID_FLEX_NIC_2) ?
- ICE_RX_MDID_SRC_VSI : ICE_RX_MDID_FLOW_ID_HIGH;
-
- ICE_PROG_FLEX_ENTRY(hw, prof_id, mdid, 3);
-
- ice_init_flex_flags(hw, prof_id);
- break;
-
- default:
- ice_debug(hw, ICE_DBG_INIT,
- "Field init for profile ID %d not supported\n",
- prof_id);
- }
-}
-
-/**
* ice_init_fltr_mgmt_struct - initializes filter management list and locks
* @hw: pointer to the HW struct
*/
@@ -702,10 +588,10 @@ void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
}
/**
- * ice_get_itr_intrl_gran - determine int/intrl granularity
+ * ice_get_itr_intrl_gran
* @hw: pointer to the HW struct
*
- * Determines the ITR/intrl granularities based on the maximum aggregate
+ * Determines the ITR/INTRL granularities based on the maximum aggregate
* bandwidth according to the device's configuration during power-on.
*/
static void ice_get_itr_intrl_gran(struct ice_hw *hw)
@@ -863,8 +749,6 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_sched;
- ice_dev_onetime_setup(hw);
-
/* Get MAC information */
/* A single port can report up to two (LAN and WoL) addresses */
mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
@@ -882,9 +766,6 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_fltr_mgmt_struct;
-
- ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC);
- ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2);
status = ice_init_hw_tbls(hw);
if (status)
goto err_unroll_fltr_mgmt_struct;
@@ -937,7 +818,7 @@ void ice_deinit_hw(struct ice_hw *hw)
*/
enum ice_status ice_check_reset(struct ice_hw *hw)
{
- u32 cnt, reg = 0, grst_delay;
+ u32 cnt, reg = 0, grst_delay, uld_mask;
/* Poll for Device Active state in case a recent CORER, GLOBR,
* or EMPR has occurred. The grst delay value is in 100ms units.
@@ -959,13 +840,20 @@ enum ice_status ice_check_reset(struct ice_hw *hw)
return ICE_ERR_RESET_FAILED;
}
-#define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \
- GLNVM_ULD_GLOBR_DONE_M)
+#define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
+ GLNVM_ULD_PCIER_DONE_1_M |\
+ GLNVM_ULD_CORER_DONE_M |\
+ GLNVM_ULD_GLOBR_DONE_M |\
+ GLNVM_ULD_POR_DONE_M |\
+ GLNVM_ULD_POR_DONE_1_M |\
+ GLNVM_ULD_PCIER_DONE_2_M)
+
+ uld_mask = ICE_RESET_DONE_MASK;
/* Device is Active; check Global Reset processes are done */
for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
- reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK;
- if (reg == ICE_RESET_DONE_MASK) {
+ reg = rd32(hw, GLNVM_ULD) & uld_mask;
+ if (reg == uld_mask) {
ice_debug(hw, ICE_DBG_INIT,
"Global reset processes done. %d\n", cnt);
break;
@@ -1601,6 +1489,114 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
}
/**
+ * ice_aq_alloc_free_res - command to allocate/free resources
+ * @hw: pointer to the HW struct
+ * @num_entries: number of resource entries in buffer
+ * @buf: Indirect buffer to hold data parameters and response
+ * @buf_size: size of buffer for indirect commands
+ * @opc: pass in the command opcode
+ * @cd: pointer to command details structure or NULL
+ *
+ * Helper function to allocate/free resources using the admin queue commands
+ */
+enum ice_status
+ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
+ struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
+ enum ice_adminq_opc opc, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_alloc_free_res_cmd *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.sw_res_ctrl;
+
+ if (!buf)
+ return ICE_ERR_PARAM;
+
+ if (buf_size < (num_entries * sizeof(buf->elem[0])))
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, opc);
+
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ cmd->num_entries = cpu_to_le16(num_entries);
+
+ return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+}
+
+/**
+ * ice_alloc_hw_res - allocate resource
+ * @hw: pointer to the HW struct
+ * @type: type of resource
+ * @num: number of resources to allocate
+ * @btm: allocate from bottom
+ * @res: pointer to array that will receive the resources
+ */
+enum ice_status
+ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
+{
+ struct ice_aqc_alloc_free_res_elem *buf;
+ enum ice_status status;
+ u16 buf_len;
+
+ buf_len = struct_size(buf, elem, num - 1);
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return ICE_ERR_NO_MEMORY;
+
+ /* Prepare buffer to allocate resource. */
+ buf->num_elems = cpu_to_le16(num);
+ buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
+ ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
+ if (btm)
+ buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
+
+ status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
+ ice_aqc_opc_alloc_res, NULL);
+ if (status)
+ goto ice_alloc_res_exit;
+
+ memcpy(res, buf->elem, sizeof(buf->elem) * num);
+
+ice_alloc_res_exit:
+ kfree(buf);
+ return status;
+}
+
+/**
+ * ice_free_hw_res - free allocated HW resource
+ * @hw: pointer to the HW struct
+ * @type: type of resource to free
+ * @num: number of resources
+ * @res: pointer to array that contains the resources to free
+ */
+enum ice_status
+ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
+{
+ struct ice_aqc_alloc_free_res_elem *buf;
+ enum ice_status status;
+ u16 buf_len;
+
+ buf_len = struct_size(buf, elem, num - 1);
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return ICE_ERR_NO_MEMORY;
+
+ /* Prepare buffer to free resource. */
+ buf->num_elems = cpu_to_le16(num);
+ buf->res_type = cpu_to_le16(type);
+ memcpy(buf->elem, res, sizeof(buf->elem) * num);
+
+ status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
+ ice_aqc_opc_free_res, NULL);
+ if (status)
+ ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
+
+ kfree(buf);
+ return status;
+}
+
+/**
* ice_get_num_per_func - determine number of resources per PF
* @hw: pointer to the HW structure
* @max: value to be evenly split between each PF
@@ -3510,7 +3506,10 @@ enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
if (status)
return status;
}
-
+ /* Replay per VSI all RSS configurations */
+ status = ice_replay_rss_cfg(hw, vsi_handle);
+ if (status)
+ return status;
/* Replay per VSI all filters */
status = ice_replay_vsi_all_fltr(hw, vsi_handle);
return status;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index b22aa561e253..f9fc005d35a7 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -34,10 +34,18 @@ enum ice_status
ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u32 timeout);
void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res);
+enum ice_status
+ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res);
+enum ice_status
+ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res);
enum ice_status ice_init_nvm(struct ice_hw *hw);
enum ice_status
ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data);
enum ice_status
+ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
+ struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
+ enum ice_adminq_opc opc, struct ice_sq_cd *cd);
+enum ice_status
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
@@ -46,8 +54,6 @@ enum ice_status ice_get_caps(struct ice_hw *hw);
void ice_set_safe_mode_caps(struct ice_hw *hw);
-void ice_dev_onetime_setup(struct ice_hw *hw);
-
enum ice_status
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index 713e8a892e14..adb8dab765c8 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -1323,13 +1323,13 @@ enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi)
}
/**
- * ice_aq_query_port_ets - query port ets configuration
+ * ice_aq_query_port_ets - query port ETS configuration
* @pi: port information structure
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
* @cd: pointer to command details structure or NULL
*
- * query current port ets configuration
+ * query current port ETS configuration
*/
static enum ice_status
ice_aq_query_port_ets(struct ice_port_info *pi,
@@ -1416,13 +1416,13 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
}
/**
- * ice_query_port_ets - query port ets configuration
+ * ice_query_port_ets - query port ETS configuration
* @pi: port information structure
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
* @cd: pointer to command details structure or NULL
*
- * query current port ets configuration and update the
+ * query current port ETS configuration and update the
* SW DB with the TC changes
*/
enum ice_status
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index d3d3ec29def9..7108fb41b604 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -315,9 +315,9 @@ ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
*/
void ice_dcb_rebuild(struct ice_pf *pf)
{
- struct ice_dcbx_cfg *local_dcbx_cfg, *desired_dcbx_cfg, *prev_cfg;
struct ice_aqc_port_ets_elem buf = { 0 };
struct device *dev = ice_pf_to_dev(pf);
+ struct ice_dcbx_cfg *err_cfg;
enum ice_status ret;
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
@@ -330,53 +330,25 @@ void ice_dcb_rebuild(struct ice_pf *pf)
if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags))
return;
- local_dcbx_cfg = &pf->hw.port_info->local_dcbx_cfg;
- desired_dcbx_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+ mutex_lock(&pf->tc_mutex);
- /* Save current willing state and force FW to unwilling */
- local_dcbx_cfg->etscfg.willing = 0x0;
- local_dcbx_cfg->pfc.willing = 0x0;
- local_dcbx_cfg->app_mode = ICE_DCBX_APPS_NON_WILLING;
+ if (!pf->hw.port_info->is_sw_lldp)
+ ice_cfg_etsrec_defaults(pf->hw.port_info);
- ice_cfg_etsrec_defaults(pf->hw.port_info);
ret = ice_set_dcb_cfg(pf->hw.port_info);
if (ret) {
- dev_err(dev, "Failed to set DCB to unwilling\n");
+ dev_err(dev, "Failed to set DCB config in rebuild\n");
goto dcb_error;
}
- /* Retrieve DCB config and ensure same as current in SW */
- prev_cfg = kmemdup(local_dcbx_cfg, sizeof(*prev_cfg), GFP_KERNEL);
- if (!prev_cfg)
- goto dcb_error;
-
- ice_init_dcb(&pf->hw, true);
- if (pf->hw.port_info->dcbx_status == ICE_DCBX_STATUS_DIS)
- pf->hw.port_info->is_sw_lldp = true;
- else
- pf->hw.port_info->is_sw_lldp = false;
-
- if (ice_dcb_need_recfg(pf, prev_cfg, local_dcbx_cfg)) {
- /* difference in cfg detected - disable DCB till next MIB */
- dev_err(dev, "Set local MIB not accurate\n");
- kfree(prev_cfg);
- goto dcb_error;
+ if (!pf->hw.port_info->is_sw_lldp) {
+ ret = ice_cfg_lldp_mib_change(&pf->hw, true);
+ if (ret && !pf->hw.port_info->is_sw_lldp) {
+ dev_err(dev, "Failed to register for MIB changes\n");
+ goto dcb_error;
+ }
}
- /* fetched config congruent to previous configuration */
- kfree(prev_cfg);
-
- /* Set the local desired config */
- if (local_dcbx_cfg->dcbx_mode == ICE_DCBX_MODE_CEE)
- memcpy(local_dcbx_cfg, desired_dcbx_cfg,
- sizeof(*local_dcbx_cfg));
-
- ice_cfg_etsrec_defaults(pf->hw.port_info);
- ret = ice_set_dcb_cfg(pf->hw.port_info);
- if (ret) {
- dev_err(dev, "Failed to set desired config\n");
- goto dcb_error;
- }
dev_info(dev, "DCB restored after reset\n");
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
@@ -384,20 +356,32 @@ void ice_dcb_rebuild(struct ice_pf *pf)
goto dcb_error;
}
+ mutex_unlock(&pf->tc_mutex);
+
return;
dcb_error:
dev_err(dev, "Disabling DCB until new settings occur\n");
- prev_cfg = kzalloc(sizeof(*prev_cfg), GFP_KERNEL);
- if (!prev_cfg)
+ err_cfg = kzalloc(sizeof(*err_cfg), GFP_KERNEL);
+ if (!err_cfg) {
+ mutex_unlock(&pf->tc_mutex);
return;
+ }
- prev_cfg->etscfg.willing = true;
- prev_cfg->etscfg.tcbwtable[0] = ICE_TC_MAX_BW;
- prev_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
- memcpy(&prev_cfg->etsrec, &prev_cfg->etscfg, sizeof(prev_cfg->etsrec));
- ice_pf_dcb_cfg(pf, prev_cfg, false);
- kfree(prev_cfg);
+ err_cfg->etscfg.willing = true;
+ err_cfg->etscfg.tcbwtable[0] = ICE_TC_MAX_BW;
+ err_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
+ memcpy(&err_cfg->etsrec, &err_cfg->etscfg, sizeof(err_cfg->etsrec));
+ /* Coverity warns the return code of ice_pf_dcb_cfg() is not checked
+ * here as is done for other calls to that function. That check is
+ * not necessary since this is in this function's error cleanup path.
+ * Suppress the Coverity warning with the following comment...
+ */
+ /* coverity[check_return] */
+ ice_pf_dcb_cfg(pf, err_cfg, false);
+ kfree(err_cfg);
+
+ mutex_unlock(&pf->tc_mutex);
}
/**
@@ -428,9 +412,9 @@ static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked)
}
/**
- * ice_dcb_sw_default_config - Apply a default DCB config
+ * ice_dcb_sw_dflt_cfg - Apply a default DCB config
* @pf: PF to apply config to
- * @ets_willing: configure ets willing
+ * @ets_willing: configure ETS willing
* @locked: was this function called with RTNL held
*/
static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked)
@@ -593,8 +577,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
goto dcb_init_err;
}
- dev_info(dev,
- "DCB is enabled in the hardware, max number of TCs supported on this port are %d\n",
+ dev_info(dev, "DCB is enabled in the hardware, max number of TCs supported on this port are %d\n",
pf->hw.func_caps.common_cap.maxtc);
if (err) {
struct ice_vsi *pf_vsi;
@@ -604,8 +587,8 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
err = ice_dcb_sw_dflt_cfg(pf, true, locked);
if (err) {
- dev_err(dev,
- "Failed to set local DCB config %d\n", err);
+ dev_err(dev, "Failed to set local DCB config %d\n",
+ err);
err = -EIO;
goto dcb_init_err;
}
@@ -771,6 +754,8 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
}
}
+ mutex_lock(&pf->tc_mutex);
+
/* store the old configuration */
tmp_dcbx_cfg = pf->hw.port_info->local_dcbx_cfg;
@@ -781,20 +766,20 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
ret = ice_get_dcb_cfg(pf->hw.port_info);
if (ret) {
dev_err(dev, "Failed to get DCB config\n");
- return;
+ goto out;
}
/* No change detected in DCBX configs */
if (!memcmp(&tmp_dcbx_cfg, &pi->local_dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
dev_dbg(dev, "No change detected in DCBX configuration.\n");
- return;
+ goto out;
}
need_reconfig = ice_dcb_need_recfg(pf, &tmp_dcbx_cfg,
&pi->local_dcbx_cfg);
ice_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &pi->local_dcbx_cfg);
if (!need_reconfig)
- return;
+ goto out;
/* Enable DCB tagging only when more than one TC */
if (ice_dcb_get_num_tc(&pi->local_dcbx_cfg) > 1) {
@@ -808,7 +793,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
pf_vsi = ice_get_main_vsi(pf);
if (!pf_vsi) {
dev_dbg(dev, "PF VSI doesn't exist\n");
- return;
+ goto out;
}
rtnl_lock();
@@ -817,13 +802,15 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
dev_err(dev, "Query Port ETS failed\n");
- rtnl_unlock();
- return;
+ goto unlock_rtnl;
}
/* changes in configuration update VSI */
ice_pf_dcb_recfg(pf);
ice_ena_vsi(pf_vsi, true);
+unlock_rtnl:
rtnl_unlock();
+out:
+ mutex_unlock(&pf->tc_mutex);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
index d870c1aedc17..b61aba428adb 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
@@ -297,8 +297,7 @@ ice_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, u8 *setting)
return;
*setting = (pi->local_dcbx_cfg.pfc.pfcena >> prio) & 0x1;
- dev_dbg(ice_pf_to_dev(pf),
- "Get PFC Config up=%d, setting=%d, pfcenable=0x%x\n",
+ dev_dbg(ice_pf_to_dev(pf), "Get PFC Config up=%d, setting=%d, pfcenable=0x%x\n",
prio, *setting, pi->local_dcbx_cfg.pfc.pfcena);
}
@@ -418,8 +417,8 @@ ice_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int prio,
return;
*pgid = pi->local_dcbx_cfg.etscfg.prio_table[prio];
- dev_dbg(ice_pf_to_dev(pf),
- "Get PG config prio=%d tc=%d\n", prio, *pgid);
+ dev_dbg(ice_pf_to_dev(pf), "Get PG config prio=%d tc=%d\n", prio,
+ *pgid);
}
/**
@@ -713,13 +712,13 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
return -EINVAL;
mutex_lock(&pf->tc_mutex);
- ret = dcb_ieee_delapp(netdev, app);
- if (ret)
- goto delapp_out;
-
old_cfg = &pf->hw.port_info->local_dcbx_cfg;
- if (old_cfg->numapps == 1)
+ if (old_cfg->numapps <= 1)
+ goto delapp_out;
+
+ ret = dcb_ieee_delapp(netdev, app);
+ if (ret)
goto delapp_out;
new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
@@ -882,8 +881,7 @@ ice_dcbnl_vsi_del_app(struct ice_vsi *vsi,
sapp.protocol = app->prot_id;
sapp.priority = app->priority;
err = ice_dcbnl_delapp(vsi->netdev, &sapp);
- dev_dbg(&vsi->back->pdev->dev,
- "Deleting app for VSI idx=%d err=%d sel=%d proto=0x%x, prio=%d\n",
+ dev_dbg(ice_pf_to_dev(vsi->back), "Deleting app for VSI idx=%d err=%d sel=%d proto=0x%x, prio=%d\n",
vsi->idx, err, app->selector, app->prot_id, app->priority);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index f8d5c661d0ba..ce63017c56c7 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -11,5 +11,23 @@
#define ICE_DEV_ID_E810C_QSFP 0x1592
/* Intel(R) Ethernet Controller E810-C for SFP */
#define ICE_DEV_ID_E810C_SFP 0x1593
+/* Intel(R) Ethernet Connection E822-C for backplane */
+#define ICE_DEV_ID_E822C_BACKPLANE 0x1890
+/* Intel(R) Ethernet Connection E822-C for QSFP */
+#define ICE_DEV_ID_E822C_QSFP 0x1891
+/* Intel(R) Ethernet Connection E822-C for SFP */
+#define ICE_DEV_ID_E822C_SFP 0x1892
+/* Intel(R) Ethernet Connection E822-C/X557-AT 10GBASE-T */
+#define ICE_DEV_ID_E822C_10G_BASE_T 0x1893
+/* Intel(R) Ethernet Connection E822-C 1GbE */
+#define ICE_DEV_ID_E822C_SGMII 0x1894
+/* Intel(R) Ethernet Connection E822-X for backplane */
+#define ICE_DEV_ID_E822X_BACKPLANE 0x1897
+/* Intel(R) Ethernet Connection E822-L for SFP */
+#define ICE_DEV_ID_E822L_SFP 0x1898
+/* Intel(R) Ethernet Connection E822-L/X557-AT 10GBASE-T */
+#define ICE_DEV_ID_E822L_10G_BASE_T 0x1899
+/* Intel(R) Ethernet Connection E822-L 1GbE */
+#define ICE_DEV_ID_E822L_SGMII 0x189A
#endif /* _ICE_DEVIDS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 9ebd93e79aeb..b002ab4e5838 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -4,6 +4,7 @@
/* ethtool support for ice */
#include "ice.h"
+#include "ice_flow.h"
#include "ice_lib.h"
#include "ice_dcb_lib.h"
@@ -165,13 +166,24 @@ static void
ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ u8 oem_ver, oem_patch, nvm_ver_hi, nvm_ver_lo;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ u16 oem_build;
strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, ice_drv_ver, sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, ice_nvm_version_str(&pf->hw),
- sizeof(drvinfo->fw_version));
+
+ /* Display NVM version (from which the firmware version can be
+ * determined) which contains more pertinent information.
+ */
+ ice_get_nvm_version(hw, &oem_ver, &oem_build, &oem_patch,
+ &nvm_ver_hi, &nvm_ver_lo);
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%x.%02x 0x%x %d.%d.%d", nvm_ver_hi, nvm_ver_lo,
+ hw->nvm.eetrack, oem_ver, oem_build, oem_patch);
+
strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE;
@@ -283,12 +295,15 @@ out:
*/
static bool ice_active_vfs(struct ice_pf *pf)
{
- struct ice_vf *vf = pf->vf;
int i;
- for (i = 0; i < pf->num_alloc_vfs; i++, vf++)
+ ice_for_each_vf(pf, i) {
+ struct ice_vf *vf = &pf->vf[i];
+
if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
return true;
+ }
+
return false;
}
@@ -359,8 +374,7 @@ static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask)
val = rd32(hw, reg);
if (val == pattern)
continue;
- dev_err(dev,
- "%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n"
+ dev_err(dev, "%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n"
, __func__, reg, pattern, val);
return 1;
}
@@ -368,8 +382,7 @@ static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask)
wr32(hw, reg, orig_val);
val = rd32(hw, reg);
if (val != orig_val) {
- dev_err(dev,
- "%s: reg restore test failed - reg 0x%08x orig 0x%08x val 0x%08x\n"
+ dev_err(dev, "%s: reg restore test failed - reg 0x%08x orig 0x%08x val 0x%08x\n"
, __func__, reg, orig_val, val);
return 1;
}
@@ -787,8 +800,7 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
set_bit(__ICE_TESTING, pf->state);
if (ice_active_vfs(pf)) {
- dev_warn(dev,
- "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
+ dev_warn(dev, "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
data[ICE_ETH_TEST_REG] = 1;
data[ICE_ETH_TEST_EEPROM] = 1;
data[ICE_ETH_TEST_INTR] = 1;
@@ -1043,7 +1055,7 @@ ice_set_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
fec = ICE_FEC_NONE;
break;
default:
- dev_warn(&vsi->back->pdev->dev, "Unsupported FEC mode: %d\n",
+ dev_warn(ice_pf_to_dev(vsi->back), "Unsupported FEC mode: %d\n",
fecparam->fec);
return -EINVAL;
}
@@ -1196,8 +1208,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
* events to respond to.
*/
if (status)
- dev_info(dev,
- "Failed to unreg for LLDP events\n");
+ dev_info(dev, "Failed to unreg for LLDP events\n");
/* The AQ call to stop the FW LLDP agent will generate
* an error if the agent is already stopped.
@@ -1252,8 +1263,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
/* Register for MIB change events */
status = ice_cfg_lldp_mib_change(&pf->hw, true);
if (status)
- dev_dbg(dev,
- "Fail to enable MIB change events\n");
+ dev_dbg(dev, "Fail to enable MIB change events\n");
}
}
if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) {
@@ -1706,291 +1716,13 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_port_info *pi = np->vsi->port_info;
- struct ethtool_link_ksettings cap_ksettings;
struct ice_link_status *link_info;
struct ice_vsi *vsi = np->vsi;
- bool unrecog_phy_high = false;
- bool unrecog_phy_low = false;
link_info = &vsi->port_info->phy.link_info;
- /* Initialize supported and advertised settings based on PHY settings */
- switch (link_info->phy_type_low) {
- case ICE_PHY_TYPE_LOW_100BASE_TX:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100baseT_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 100baseT_Full);
- break;
- case ICE_PHY_TYPE_LOW_100M_SGMII:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100baseT_Full);
- break;
- case ICE_PHY_TYPE_LOW_1000BASE_T:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 1000baseT_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 1000baseT_Full);
- break;
- case ICE_PHY_TYPE_LOW_1G_SGMII:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 1000baseT_Full);
- break;
- case ICE_PHY_TYPE_LOW_1000BASE_SX:
- case ICE_PHY_TYPE_LOW_1000BASE_LX:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 1000baseX_Full);
- break;
- case ICE_PHY_TYPE_LOW_1000BASE_KX:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 1000baseKX_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 1000baseKX_Full);
- break;
- case ICE_PHY_TYPE_LOW_2500BASE_T:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 2500baseT_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 2500baseT_Full);
- break;
- case ICE_PHY_TYPE_LOW_2500BASE_X:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 2500baseX_Full);
- break;
- case ICE_PHY_TYPE_LOW_2500BASE_KX:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 2500baseX_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 2500baseX_Full);
- break;
- case ICE_PHY_TYPE_LOW_5GBASE_T:
- case ICE_PHY_TYPE_LOW_5GBASE_KR:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 5000baseT_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 5000baseT_Full);
- break;
- case ICE_PHY_TYPE_LOW_10GBASE_T:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 10000baseT_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 10000baseT_Full);
- break;
- case ICE_PHY_TYPE_LOW_10G_SFI_DA:
- case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
- case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 10000baseT_Full);
- break;
- case ICE_PHY_TYPE_LOW_10GBASE_SR:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 10000baseSR_Full);
- break;
- case ICE_PHY_TYPE_LOW_10GBASE_LR:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 10000baseLR_Full);
- break;
- case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 10000baseKR_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 10000baseKR_Full);
- break;
- case ICE_PHY_TYPE_LOW_25GBASE_T:
- case ICE_PHY_TYPE_LOW_25GBASE_CR:
- case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
- case ICE_PHY_TYPE_LOW_25GBASE_CR1:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 25000baseCR_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 25000baseCR_Full);
- break;
- case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
- case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 25000baseCR_Full);
- break;
- case ICE_PHY_TYPE_LOW_25GBASE_SR:
- case ICE_PHY_TYPE_LOW_25GBASE_LR:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 25000baseSR_Full);
- break;
- case ICE_PHY_TYPE_LOW_25GBASE_KR:
- case ICE_PHY_TYPE_LOW_25GBASE_KR1:
- case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 25000baseKR_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 25000baseKR_Full);
- break;
- case ICE_PHY_TYPE_LOW_40GBASE_CR4:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 40000baseCR4_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 40000baseCR4_Full);
- break;
- case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
- case ICE_PHY_TYPE_LOW_40G_XLAUI:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 40000baseCR4_Full);
- break;
- case ICE_PHY_TYPE_LOW_40GBASE_SR4:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 40000baseSR4_Full);
- break;
- case ICE_PHY_TYPE_LOW_40GBASE_LR4:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 40000baseLR4_Full);
- break;
- case ICE_PHY_TYPE_LOW_40GBASE_KR4:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 40000baseKR4_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 40000baseKR4_Full);
- break;
- case ICE_PHY_TYPE_LOW_50GBASE_CR2:
- case ICE_PHY_TYPE_LOW_50GBASE_CP:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 50000baseCR2_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 50000baseCR2_Full);
- break;
- case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
- case ICE_PHY_TYPE_LOW_50G_LAUI2:
- case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
- case ICE_PHY_TYPE_LOW_50G_AUI2:
- case ICE_PHY_TYPE_LOW_50GBASE_SR:
- case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
- case ICE_PHY_TYPE_LOW_50G_AUI1:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 50000baseCR2_Full);
- break;
- case ICE_PHY_TYPE_LOW_50GBASE_KR2:
- case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 50000baseKR2_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 50000baseKR2_Full);
- break;
- case ICE_PHY_TYPE_LOW_50GBASE_SR2:
- case ICE_PHY_TYPE_LOW_50GBASE_LR2:
- case ICE_PHY_TYPE_LOW_50GBASE_FR:
- case ICE_PHY_TYPE_LOW_50GBASE_LR:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 50000baseSR2_Full);
- break;
- case ICE_PHY_TYPE_LOW_100GBASE_CR4:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100000baseCR4_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 100000baseCR4_Full);
- break;
- case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
- case ICE_PHY_TYPE_LOW_100G_CAUI4:
- case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
- case ICE_PHY_TYPE_LOW_100G_AUI4:
- case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100000baseCR4_Full);
- break;
- case ICE_PHY_TYPE_LOW_100GBASE_CP2:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100000baseCR4_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 100000baseCR4_Full);
- break;
- case ICE_PHY_TYPE_LOW_100GBASE_SR4:
- case ICE_PHY_TYPE_LOW_100GBASE_SR2:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100000baseSR4_Full);
- break;
- case ICE_PHY_TYPE_LOW_100GBASE_LR4:
- case ICE_PHY_TYPE_LOW_100GBASE_DR:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100000baseLR4_ER4_Full);
- break;
- case ICE_PHY_TYPE_LOW_100GBASE_KR4:
- case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100000baseKR4_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 100000baseKR4_Full);
- break;
- default:
- unrecog_phy_low = true;
- }
-
- switch (link_info->phy_type_high) {
- case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100000baseKR4_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 100000baseKR4_Full);
- break;
- case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
- case ICE_PHY_TYPE_HIGH_100G_CAUI2:
- case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
- case ICE_PHY_TYPE_HIGH_100G_AUI2:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100000baseCR4_Full);
- break;
- default:
- unrecog_phy_high = true;
- }
-
- if (unrecog_phy_low && unrecog_phy_high) {
- /* if we got here and link is up something bad is afoot */
- netdev_info(netdev,
- "WARNING: Unrecognized PHY_Low (0x%llx).\n",
- (u64)link_info->phy_type_low);
- netdev_info(netdev,
- "WARNING: Unrecognized PHY_High (0x%llx).\n",
- (u64)link_info->phy_type_high);
- }
-
- /* Now that we've worked out everything that could be supported by the
- * current PHY type, get what is supported by the NVM and intersect
- * them to get what is truly supported
- */
- memset(&cap_ksettings, 0, sizeof(cap_ksettings));
- ice_phy_type_to_ethtool(netdev, &cap_ksettings);
- ethtool_intersect_link_masks(ks, &cap_ksettings);
+ /* Get supported and advertised settings from PHY ability with media */
+ ice_phy_type_to_ethtool(netdev, ks);
switch (link_info->link_speed) {
case ICE_AQ_LINK_SPEED_100GB:
@@ -2024,8 +1756,7 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
ks->base.speed = SPEED_100;
break;
default:
- netdev_info(netdev,
- "WARNING: Unrecognized link_speed (0x%x).\n",
+ netdev_info(netdev, "WARNING: Unrecognized link_speed (0x%x).\n",
link_info->link_speed);
break;
}
@@ -2531,6 +2262,243 @@ done:
}
/**
+ * ice_parse_hdrs - parses headers from RSS hash input
+ * @nfc: ethtool rxnfc command
+ *
+ * This function parses the rxnfc command and returns intended
+ * header types for RSS configuration
+ */
+static u32 ice_parse_hdrs(struct ethtool_rxnfc *nfc)
+{
+ u32 hdrs = ICE_FLOW_SEG_HDR_NONE;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4;
+ break;
+ case UDP_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4;
+ break;
+ case SCTP_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4;
+ break;
+ case TCP_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6;
+ break;
+ case UDP_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6;
+ break;
+ case SCTP_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6;
+ break;
+ default:
+ break;
+ }
+ return hdrs;
+}
+
+#define ICE_FLOW_HASH_FLD_IPV4_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)
+#define ICE_FLOW_HASH_FLD_IPV6_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)
+#define ICE_FLOW_HASH_FLD_IPV4_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)
+#define ICE_FLOW_HASH_FLD_IPV6_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)
+#define ICE_FLOW_HASH_FLD_TCP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)
+#define ICE_FLOW_HASH_FLD_TCP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)
+#define ICE_FLOW_HASH_FLD_UDP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)
+#define ICE_FLOW_HASH_FLD_UDP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)
+#define ICE_FLOW_HASH_FLD_SCTP_SRC_PORT \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)
+#define ICE_FLOW_HASH_FLD_SCTP_DST_PORT \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)
+
+/**
+ * ice_parse_hash_flds - parses hash fields from RSS hash input
+ * @nfc: ethtool rxnfc command
+ *
+ * This function parses the rxnfc command and returns intended
+ * hash fields for RSS configuration
+ */
+static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc)
+{
+ u64 hfld = ICE_HASH_INVALID;
+
+ if (nfc->data & RXH_IP_SRC || nfc->data & RXH_IP_DST) {
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ if (nfc->data & RXH_IP_SRC)
+ hfld |= ICE_FLOW_HASH_FLD_IPV4_SA;
+ if (nfc->data & RXH_IP_DST)
+ hfld |= ICE_FLOW_HASH_FLD_IPV4_DA;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ if (nfc->data & RXH_IP_SRC)
+ hfld |= ICE_FLOW_HASH_FLD_IPV6_SA;
+ if (nfc->data & RXH_IP_DST)
+ hfld |= ICE_FLOW_HASH_FLD_IPV6_DA;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (nfc->data & RXH_L4_B_0_1 || nfc->data & RXH_L4_B_2_3) {
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ if (nfc->data & RXH_L4_B_0_1)
+ hfld |= ICE_FLOW_HASH_FLD_TCP_SRC_PORT;
+ if (nfc->data & RXH_L4_B_2_3)
+ hfld |= ICE_FLOW_HASH_FLD_TCP_DST_PORT;
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ if (nfc->data & RXH_L4_B_0_1)
+ hfld |= ICE_FLOW_HASH_FLD_UDP_SRC_PORT;
+ if (nfc->data & RXH_L4_B_2_3)
+ hfld |= ICE_FLOW_HASH_FLD_UDP_DST_PORT;
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ if (nfc->data & RXH_L4_B_0_1)
+ hfld |= ICE_FLOW_HASH_FLD_SCTP_SRC_PORT;
+ if (nfc->data & RXH_L4_B_2_3)
+ hfld |= ICE_FLOW_HASH_FLD_SCTP_DST_PORT;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return hfld;
+}
+
+/**
+ * ice_set_rss_hash_opt - Enable/Disable flow types for RSS hash
+ * @vsi: the VSI being configured
+ * @nfc: ethtool rxnfc command
+ *
+ * Returns Success if the flow input set is supported.
+ */
+static int
+ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
+{
+ struct ice_pf *pf = vsi->back;
+ enum ice_status status;
+ struct device *dev;
+ u64 hashed_flds;
+ u32 hdrs;
+
+ dev = ice_pf_to_dev(pf);
+ if (ice_is_safe_mode(pf)) {
+ dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
+ vsi->vsi_num);
+ return -EINVAL;
+ }
+
+ hashed_flds = ice_parse_hash_flds(nfc);
+ if (hashed_flds == ICE_HASH_INVALID) {
+ dev_dbg(dev, "Invalid hash fields, vsi num = %d\n",
+ vsi->vsi_num);
+ return -EINVAL;
+ }
+
+ hdrs = ice_parse_hdrs(nfc);
+ if (hdrs == ICE_FLOW_SEG_HDR_NONE) {
+ dev_dbg(dev, "Header type is not valid, vsi num = %d\n",
+ vsi->vsi_num);
+ return -EINVAL;
+ }
+
+ status = ice_add_rss_cfg(&pf->hw, vsi->idx, hashed_flds, hdrs);
+ if (status) {
+ dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %d\n",
+ vsi->vsi_num, status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_get_rss_hash_opt - Retrieve hash fields for a given flow-type
+ * @vsi: the VSI being configured
+ * @nfc: ethtool rxnfc command
+ */
+static void
+ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
+{
+ struct ice_pf *pf = vsi->back;
+ struct device *dev;
+ u64 hash_flds;
+ u32 hdrs;
+
+ dev = ice_pf_to_dev(pf);
+
+ nfc->data = 0;
+ if (ice_is_safe_mode(pf)) {
+ dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
+ vsi->vsi_num);
+ return;
+ }
+
+ hdrs = ice_parse_hdrs(nfc);
+ if (hdrs == ICE_FLOW_SEG_HDR_NONE) {
+ dev_dbg(dev, "Header type is not valid, vsi num = %d\n",
+ vsi->vsi_num);
+ return;
+ }
+
+ hash_flds = ice_get_rss_cfg(&pf->hw, vsi->idx, hdrs);
+ if (hash_flds == ICE_HASH_INVALID) {
+ dev_dbg(dev, "No hash fields found for the given header type, vsi num = %d\n",
+ vsi->vsi_num);
+ return;
+ }
+
+ if (hash_flds & ICE_FLOW_HASH_FLD_IPV4_SA ||
+ hash_flds & ICE_FLOW_HASH_FLD_IPV6_SA)
+ nfc->data |= (u64)RXH_IP_SRC;
+
+ if (hash_flds & ICE_FLOW_HASH_FLD_IPV4_DA ||
+ hash_flds & ICE_FLOW_HASH_FLD_IPV6_DA)
+ nfc->data |= (u64)RXH_IP_DST;
+
+ if (hash_flds & ICE_FLOW_HASH_FLD_TCP_SRC_PORT ||
+ hash_flds & ICE_FLOW_HASH_FLD_UDP_SRC_PORT ||
+ hash_flds & ICE_FLOW_HASH_FLD_SCTP_SRC_PORT)
+ nfc->data |= (u64)RXH_L4_B_0_1;
+
+ if (hash_flds & ICE_FLOW_HASH_FLD_TCP_DST_PORT ||
+ hash_flds & ICE_FLOW_HASH_FLD_UDP_DST_PORT ||
+ hash_flds & ICE_FLOW_HASH_FLD_SCTP_DST_PORT)
+ nfc->data |= (u64)RXH_L4_B_2_3;
+}
+
+/**
+ * ice_set_rxnfc - command to set Rx flow rules.
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns 0 for success and negative values for errors
+ */
+static int ice_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ return ice_set_rss_hash_opt(vsi, cmd);
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+/**
* ice_get_rxnfc - command to get Rx flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -2551,6 +2519,10 @@ ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
cmd->data = vsi->rss_size;
ret = 0;
break;
+ case ETHTOOL_GRXFH:
+ ice_get_rss_hash_opt(vsi, cmd);
+ ret = 0;
+ break;
default:
break;
}
@@ -2600,13 +2572,11 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
new_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE);
if (new_tx_cnt != ring->tx_pending)
- netdev_info(netdev,
- "Requested Tx descriptor count rounded up to %d\n",
+ netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n",
new_tx_cnt);
new_rx_cnt = ALIGN(ring->rx_pending, ICE_REQ_DESC_MULTIPLE);
if (new_rx_cnt != ring->rx_pending)
- netdev_info(netdev,
- "Requested Rx descriptor count rounded up to %d\n",
+ netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n",
new_rx_cnt);
/* if nothing to do return success */
@@ -3473,8 +3443,7 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
if (ec->rx_coalesce_usecs_high > ICE_MAX_INTRL ||
(ec->rx_coalesce_usecs_high &&
ec->rx_coalesce_usecs_high < pf->hw.intrl_gran)) {
- netdev_info(vsi->netdev,
- "Invalid value, %s-usecs-high valid values are 0 (disabled), %d-%d\n",
+ netdev_info(vsi->netdev, "Invalid value, %s-usecs-high valid values are 0 (disabled), %d-%d\n",
c_type_str, pf->hw.intrl_gran,
ICE_MAX_INTRL);
return -EINVAL;
@@ -3492,8 +3461,7 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
break;
case ICE_TX_CONTAINER:
if (ec->tx_coalesce_usecs_high) {
- netdev_info(vsi->netdev,
- "setting %s-usecs-high is not supported\n",
+ netdev_info(vsi->netdev, "setting %s-usecs-high is not supported\n",
c_type_str);
return -EINVAL;
}
@@ -3510,23 +3478,20 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
itr_setting = rc->itr_setting & ~ICE_ITR_DYNAMIC;
if (coalesce_usecs != itr_setting && use_adaptive_coalesce) {
- netdev_info(vsi->netdev,
- "%s interrupt throttling cannot be changed if adaptive-%s is enabled\n",
+ netdev_info(vsi->netdev, "%s interrupt throttling cannot be changed if adaptive-%s is enabled\n",
c_type_str, c_type_str);
return -EINVAL;
}
if (coalesce_usecs > ICE_ITR_MAX) {
- netdev_info(vsi->netdev,
- "Invalid value, %s-usecs range is 0-%d\n",
+ netdev_info(vsi->netdev, "Invalid value, %s-usecs range is 0-%d\n",
c_type_str, ICE_ITR_MAX);
return -EINVAL;
}
/* hardware only supports an ITR granularity of 2us */
if (coalesce_usecs % 2 != 0) {
- netdev_info(vsi->netdev,
- "Invalid value, %s-usecs must be even\n",
+ netdev_info(vsi->netdev, "Invalid value, %s-usecs must be even\n",
c_type_str);
return -EINVAL;
}
@@ -3585,6 +3550,53 @@ ice_set_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
}
/**
+ * ice_is_coalesce_param_invalid - check for unsupported coalesce parameters
+ * @netdev: pointer to the netdev associated with this query
+ * @ec: ethtool structure to fill with driver's coalesce settings
+ *
+ * Print netdev info if driver doesn't support one of the parameters
+ * and return error. When any parameters will be implemented, remove only
+ * this parameter from param array.
+ */
+static int
+ice_is_coalesce_param_invalid(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct ice_ethtool_not_used {
+ u32 value;
+ const char *name;
+ } param[] = {
+ {ec->stats_block_coalesce_usecs, "stats-block-usecs"},
+ {ec->rate_sample_interval, "sample-interval"},
+ {ec->pkt_rate_low, "pkt-rate-low"},
+ {ec->pkt_rate_high, "pkt-rate-high"},
+ {ec->rx_max_coalesced_frames, "rx-frames"},
+ {ec->rx_coalesce_usecs_irq, "rx-usecs-irq"},
+ {ec->rx_max_coalesced_frames_irq, "rx-frames-irq"},
+ {ec->tx_max_coalesced_frames, "tx-frames"},
+ {ec->tx_coalesce_usecs_irq, "tx-usecs-irq"},
+ {ec->tx_max_coalesced_frames_irq, "tx-frames-irq"},
+ {ec->rx_coalesce_usecs_low, "rx-usecs-low"},
+ {ec->rx_max_coalesced_frames_low, "rx-frames-low"},
+ {ec->tx_coalesce_usecs_low, "tx-usecs-low"},
+ {ec->tx_max_coalesced_frames_low, "tx-frames-low"},
+ {ec->rx_max_coalesced_frames_high, "rx-frames-high"},
+ {ec->tx_max_coalesced_frames_high, "tx-frames-high"}
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(param); i++) {
+ if (param[i].value) {
+ netdev_info(netdev, "Setting %s not supported\n",
+ param[i].name);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
* __ice_set_coalesce - set ITR/INTRL values for the device
* @netdev: pointer to the netdev associated with this query
* @ec: ethtool structure to fill with driver's coalesce settings
@@ -3600,6 +3612,9 @@ __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
+ if (ice_is_coalesce_param_invalid(netdev, ec))
+ return -EINVAL;
+
if (q_num < 0) {
int v_idx;
@@ -3717,8 +3732,7 @@ ice_get_module_info(struct net_device *netdev,
}
break;
default:
- netdev_warn(netdev,
- "SFF Module Type not recognized.\n");
+ netdev_warn(netdev, "SFF Module Type not recognized.\n");
return -EINVAL;
}
return 0;
@@ -3786,11 +3800,11 @@ ice_get_module_eeprom(struct net_device *netdev,
static const struct ethtool_ops ice_ethtool_ops = {
.get_link_ksettings = ice_get_link_ksettings,
.set_link_ksettings = ice_set_link_ksettings,
- .get_drvinfo = ice_get_drvinfo,
- .get_regs_len = ice_get_regs_len,
- .get_regs = ice_get_regs,
- .get_msglevel = ice_get_msglevel,
- .set_msglevel = ice_set_msglevel,
+ .get_drvinfo = ice_get_drvinfo,
+ .get_regs_len = ice_get_regs_len,
+ .get_regs = ice_get_regs,
+ .get_msglevel = ice_get_msglevel,
+ .set_msglevel = ice_set_msglevel,
.self_test = ice_self_test,
.get_link = ethtool_op_get_link,
.get_eeprom_len = ice_get_eeprom_len,
@@ -3804,6 +3818,7 @@ static const struct ethtool_ops ice_ethtool_ops = {
.set_priv_flags = ice_set_priv_flags,
.get_sset_count = ice_get_sset_count,
.get_rxnfc = ice_get_rxnfc,
+ .set_rxnfc = ice_set_rxnfc,
.get_ringparam = ice_get_ringparam,
.set_ringparam = ice_set_ringparam,
.nway_reset = ice_nway_reset,
@@ -3816,8 +3831,8 @@ static const struct ethtool_ops ice_ethtool_ops = {
.get_channels = ice_get_channels,
.set_channels = ice_set_channels,
.get_ts_info = ethtool_op_get_ts_info,
- .get_per_queue_coalesce = ice_get_per_q_coalesce,
- .set_per_queue_coalesce = ice_set_per_q_coalesce,
+ .get_per_queue_coalesce = ice_get_per_q_coalesce,
+ .set_per_queue_coalesce = ice_set_per_q_coalesce,
.get_fecparam = ice_get_fecparam,
.set_fecparam = ice_set_fecparam,
.get_module_info = ice_get_module_info,
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index cbd53b586c36..99208946224c 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -3,6 +3,87 @@
#include "ice_common.h"
#include "ice_flex_pipe.h"
+#include "ice_flow.h"
+
+static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
+ /* SWITCH */
+ {
+ ICE_SID_XLT0_SW,
+ ICE_SID_XLT_KEY_BUILDER_SW,
+ ICE_SID_XLT1_SW,
+ ICE_SID_XLT2_SW,
+ ICE_SID_PROFID_TCAM_SW,
+ ICE_SID_PROFID_REDIR_SW,
+ ICE_SID_FLD_VEC_SW,
+ ICE_SID_CDID_KEY_BUILDER_SW,
+ ICE_SID_CDID_REDIR_SW
+ },
+
+ /* ACL */
+ {
+ ICE_SID_XLT0_ACL,
+ ICE_SID_XLT_KEY_BUILDER_ACL,
+ ICE_SID_XLT1_ACL,
+ ICE_SID_XLT2_ACL,
+ ICE_SID_PROFID_TCAM_ACL,
+ ICE_SID_PROFID_REDIR_ACL,
+ ICE_SID_FLD_VEC_ACL,
+ ICE_SID_CDID_KEY_BUILDER_ACL,
+ ICE_SID_CDID_REDIR_ACL
+ },
+
+ /* FD */
+ {
+ ICE_SID_XLT0_FD,
+ ICE_SID_XLT_KEY_BUILDER_FD,
+ ICE_SID_XLT1_FD,
+ ICE_SID_XLT2_FD,
+ ICE_SID_PROFID_TCAM_FD,
+ ICE_SID_PROFID_REDIR_FD,
+ ICE_SID_FLD_VEC_FD,
+ ICE_SID_CDID_KEY_BUILDER_FD,
+ ICE_SID_CDID_REDIR_FD
+ },
+
+ /* RSS */
+ {
+ ICE_SID_XLT0_RSS,
+ ICE_SID_XLT_KEY_BUILDER_RSS,
+ ICE_SID_XLT1_RSS,
+ ICE_SID_XLT2_RSS,
+ ICE_SID_PROFID_TCAM_RSS,
+ ICE_SID_PROFID_REDIR_RSS,
+ ICE_SID_FLD_VEC_RSS,
+ ICE_SID_CDID_KEY_BUILDER_RSS,
+ ICE_SID_CDID_REDIR_RSS
+ },
+
+ /* PE */
+ {
+ ICE_SID_XLT0_PE,
+ ICE_SID_XLT_KEY_BUILDER_PE,
+ ICE_SID_XLT1_PE,
+ ICE_SID_XLT2_PE,
+ ICE_SID_PROFID_TCAM_PE,
+ ICE_SID_PROFID_REDIR_PE,
+ ICE_SID_FLD_VEC_PE,
+ ICE_SID_CDID_KEY_BUILDER_PE,
+ ICE_SID_CDID_REDIR_PE
+ }
+};
+
+/**
+ * ice_sect_id - returns section ID
+ * @blk: block type
+ * @sect: section type
+ *
+ * This helper function returns the proper section ID given a block type and a
+ * section type.
+ */
+static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
+{
+ return ice_sect_lkup[blk][sect];
+}
/**
* ice_pkg_val_buf
@@ -158,6 +239,176 @@ ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
return state->sect;
}
+/* Key creation */
+
+#define ICE_DC_KEY 0x1 /* don't care */
+#define ICE_DC_KEYINV 0x1
+#define ICE_NM_KEY 0x0 /* never match */
+#define ICE_NM_KEYINV 0x0
+#define ICE_0_KEY 0x1 /* match 0 */
+#define ICE_0_KEYINV 0x0
+#define ICE_1_KEY 0x0 /* match 1 */
+#define ICE_1_KEYINV 0x1
+
+/**
+ * ice_gen_key_word - generate 16-bits of a key/mask word
+ * @val: the value
+ * @valid: valid bits mask (change only the valid bits)
+ * @dont_care: don't care mask
+ * @nvr_mtch: never match mask
+ * @key: pointer to an array of where the resulting key portion
+ * @key_inv: pointer to an array of where the resulting key invert portion
+ *
+ * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask
+ * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits
+ * of key and 8 bits of key invert.
+ *
+ * '0' = b01, always match a 0 bit
+ * '1' = b10, always match a 1 bit
+ * '?' = b11, don't care bit (always matches)
+ * '~' = b00, never match bit
+ *
+ * Input:
+ * val: b0 1 0 1 0 1
+ * dont_care: b0 0 1 1 0 0
+ * never_mtch: b0 0 0 0 1 1
+ * ------------------------------
+ * Result: key: b01 10 11 11 00 00
+ */
+static enum ice_status
+ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
+ u8 *key_inv)
+{
+ u8 in_key = *key, in_key_inv = *key_inv;
+ u8 i;
+
+ /* 'dont_care' and 'nvr_mtch' masks cannot overlap */
+ if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
+ return ICE_ERR_CFG;
+
+ *key = 0;
+ *key_inv = 0;
+
+ /* encode the 8 bits into 8-bit key and 8-bit key invert */
+ for (i = 0; i < 8; i++) {
+ *key >>= 1;
+ *key_inv >>= 1;
+
+ if (!(valid & 0x1)) { /* change only valid bits */
+ *key |= (in_key & 0x1) << 7;
+ *key_inv |= (in_key_inv & 0x1) << 7;
+ } else if (dont_care & 0x1) { /* don't care bit */
+ *key |= ICE_DC_KEY << 7;
+ *key_inv |= ICE_DC_KEYINV << 7;
+ } else if (nvr_mtch & 0x1) { /* never match bit */
+ *key |= ICE_NM_KEY << 7;
+ *key_inv |= ICE_NM_KEYINV << 7;
+ } else if (val & 0x01) { /* exact 1 match */
+ *key |= ICE_1_KEY << 7;
+ *key_inv |= ICE_1_KEYINV << 7;
+ } else { /* exact 0 match */
+ *key |= ICE_0_KEY << 7;
+ *key_inv |= ICE_0_KEYINV << 7;
+ }
+
+ dont_care >>= 1;
+ nvr_mtch >>= 1;
+ valid >>= 1;
+ val >>= 1;
+ in_key >>= 1;
+ in_key_inv >>= 1;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_bits_max_set - determine if the number of bits set is within a maximum
+ * @mask: pointer to the byte array which is the mask
+ * @size: the number of bytes in the mask
+ * @max: the max number of set bits
+ *
+ * This function determines if there are at most 'max' number of bits set in an
+ * array. Returns true if the number for bits set is <= max or will return false
+ * otherwise.
+ */
+static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
+{
+ u16 count = 0;
+ u16 i;
+
+ /* check each byte */
+ for (i = 0; i < size; i++) {
+ /* if 0, go to next byte */
+ if (!mask[i])
+ continue;
+
+ /* We know there is at least one set bit in this byte because of
+ * the above check; if we already have found 'max' number of
+ * bits set, then we can return failure now.
+ */
+ if (count == max)
+ return false;
+
+ /* count the bits in this byte, checking threshold */
+ count += hweight8(mask[i]);
+ if (count > max)
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ice_set_key - generate a variable sized key with multiples of 16-bits
+ * @key: pointer to where the key will be stored
+ * @size: the size of the complete key in bytes (must be even)
+ * @val: array of 8-bit values that makes up the value portion of the key
+ * @upd: array of 8-bit masks that determine what key portion to update
+ * @dc: array of 8-bit masks that make up the don't care mask
+ * @nm: array of 8-bit masks that make up the never match mask
+ * @off: the offset of the first byte in the key to update
+ * @len: the number of bytes in the key update
+ *
+ * This function generates a key from a value, a don't care mask and a never
+ * match mask.
+ * upd, dc, and nm are optional parameters, and can be NULL:
+ * upd == NULL --> udp mask is all 1's (update all bits)
+ * dc == NULL --> dc mask is all 0's (no don't care bits)
+ * nm == NULL --> nm mask is all 0's (no never match bits)
+ */
+static enum ice_status
+ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
+ u16 len)
+{
+ u16 half_size;
+ u16 i;
+
+ /* size must be a multiple of 2 bytes. */
+ if (size % 2)
+ return ICE_ERR_CFG;
+
+ half_size = size / 2;
+ if (off + len > half_size)
+ return ICE_ERR_CFG;
+
+ /* Make sure at most one bit is set in the never match mask. Having more
+ * than one never match mask bit set will cause HW to consume excessive
+ * power otherwise; this is a power management efficiency check.
+ */
+#define ICE_NVR_MTCH_BITS_MAX 1
+ if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
+ return ICE_ERR_CFG;
+
+ for (i = 0; i < len; i++)
+ if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
+ dc ? dc[i] : 0, nm ? nm[i] : 0,
+ key + off + i, key + half_size + off + i))
+ return ICE_ERR_CFG;
+
+ return 0;
+}
+
/**
* ice_acquire_global_cfg_lock
* @hw: pointer to the HW structure
@@ -205,6 +456,31 @@ static void ice_release_global_cfg_lock(struct ice_hw *hw)
}
/**
+ * ice_acquire_change_lock
+ * @hw: pointer to the HW structure
+ * @access: access type (read or write)
+ *
+ * This function will request ownership of the change lock.
+ */
+static enum ice_status
+ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
+{
+ return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
+ ICE_CHANGE_LOCK_TIMEOUT);
+}
+
+/**
+ * ice_release_change_lock
+ * @hw: pointer to the HW structure
+ *
+ * This function will release the change lock using the proper Admin Command.
+ */
+static void ice_release_change_lock(struct ice_hw *hw)
+{
+ ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
+}
+
+/**
* ice_aq_download_pkg
* @hw: pointer to the hardware structure
* @pkg_buf: the package buffer to transfer
@@ -253,6 +529,54 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
}
/**
+ * ice_aq_update_pkg
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package cmd buffer
+ * @buf_size: the size of the package cmd buffer
+ * @last_buf: last buffer indicator
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cd: pointer to command details structure or NULL
+ *
+ * Update Package (0x0C42)
+ */
+static enum ice_status
+ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
+ bool last_buf, u32 *error_offset, u32 *error_info,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_download_pkg *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ if (error_offset)
+ *error_offset = 0;
+ if (error_info)
+ *error_info = 0;
+
+ cmd = &desc.params.download_pkg;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ if (last_buf)
+ cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
+
+ status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+ if (status == ICE_ERR_AQ_ERROR) {
+ /* Read error from buffer only when the FW returned an error */
+ struct ice_aqc_download_pkg_resp *resp;
+
+ resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
+ if (error_offset)
+ *error_offset = le32_to_cpu(resp->error_offset);
+ if (error_info)
+ *error_info = le32_to_cpu(resp->error_info);
+ }
+
+ return status;
+}
+
+/**
* ice_find_seg_in_pkg
* @hw: pointer to the hardware structure
* @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
@@ -287,6 +611,44 @@ ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
}
/**
+ * ice_update_pkg
+ * @hw: pointer to the hardware structure
+ * @bufs: pointer to an array of buffers
+ * @count: the number of buffers in the array
+ *
+ * Obtains change lock and updates package.
+ */
+static enum ice_status
+ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+{
+ enum ice_status status;
+ u32 offset, info, i;
+
+ status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
+ if (status)
+ return status;
+
+ for (i = 0; i < count; i++) {
+ struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
+ bool last = ((i + 1) == count);
+
+ status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end),
+ last, &offset, &info, NULL);
+
+ if (status) {
+ ice_debug(hw, ICE_DBG_PKG,
+ "Update pkg failed: err %d off %d inf %d\n",
+ status, offset, info);
+ break;
+ }
+ }
+
+ ice_release_change_lock(hw);
+
+ return status;
+}
+
+/**
* ice_dwnld_cfg_bufs
* @hw: pointer to the hardware structure
* @bufs: pointer to an array of buffers
@@ -767,6 +1129,169 @@ enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
return status;
}
+/**
+ * ice_pkg_buf_alloc
+ * @hw: pointer to the HW structure
+ *
+ * Allocates a package buffer and returns a pointer to the buffer header.
+ * Note: all package contents must be in Little Endian form.
+ */
+static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
+{
+ struct ice_buf_build *bld;
+ struct ice_buf_hdr *buf;
+
+ bld = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*bld), GFP_KERNEL);
+ if (!bld)
+ return NULL;
+
+ buf = (struct ice_buf_hdr *)bld;
+ buf->data_end = cpu_to_le16(offsetof(struct ice_buf_hdr,
+ section_entry));
+ return bld;
+}
+
+/**
+ * ice_pkg_buf_free
+ * @hw: pointer to the HW structure
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Frees a package buffer
+ */
+static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
+{
+ devm_kfree(ice_hw_to_dev(hw), bld);
+}
+
+/**
+ * ice_pkg_buf_reserve_section
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ * @count: the number of sections to reserve
+ *
+ * Reserves one or more section table entries in a package buffer. This routine
+ * can be called multiple times as long as they are made before calling
+ * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
+ * is called once, the number of sections that can be allocated will not be able
+ * to be increased; not using all reserved sections is fine, but this will
+ * result in some wasted space in the buffer.
+ * Note: all package contents must be in Little Endian form.
+ */
+static enum ice_status
+ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
+{
+ struct ice_buf_hdr *buf;
+ u16 section_count;
+ u16 data_end;
+
+ if (!bld)
+ return ICE_ERR_PARAM;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+
+ /* already an active section, can't increase table size */
+ section_count = le16_to_cpu(buf->section_count);
+ if (section_count > 0)
+ return ICE_ERR_CFG;
+
+ if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
+ return ICE_ERR_CFG;
+ bld->reserved_section_table_entries += count;
+
+ data_end = le16_to_cpu(buf->data_end) +
+ (count * sizeof(buf->section_entry[0]));
+ buf->data_end = cpu_to_le16(data_end);
+
+ return 0;
+}
+
+/**
+ * ice_pkg_buf_alloc_section
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ * @type: the section type value
+ * @size: the size of the section to reserve (in bytes)
+ *
+ * Reserves memory in the buffer for a section's content and updates the
+ * buffers' status accordingly. This routine returns a pointer to the first
+ * byte of the section start within the buffer, which is used to fill in the
+ * section contents.
+ * Note: all package contents must be in Little Endian form.
+ */
+static void *
+ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
+{
+ struct ice_buf_hdr *buf;
+ u16 sect_count;
+ u16 data_end;
+
+ if (!bld || !type || !size)
+ return NULL;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+
+ /* check for enough space left in buffer */
+ data_end = le16_to_cpu(buf->data_end);
+
+ /* section start must align on 4 byte boundary */
+ data_end = ALIGN(data_end, 4);
+
+ if ((data_end + size) > ICE_MAX_S_DATA_END)
+ return NULL;
+
+ /* check for more available section table entries */
+ sect_count = le16_to_cpu(buf->section_count);
+ if (sect_count < bld->reserved_section_table_entries) {
+ void *section_ptr = ((u8 *)buf) + data_end;
+
+ buf->section_entry[sect_count].offset = cpu_to_le16(data_end);
+ buf->section_entry[sect_count].size = cpu_to_le16(size);
+ buf->section_entry[sect_count].type = cpu_to_le32(type);
+
+ data_end += size;
+ buf->data_end = cpu_to_le16(data_end);
+
+ buf->section_count = cpu_to_le16(sect_count + 1);
+ return section_ptr;
+ }
+
+ /* no free section table entries */
+ return NULL;
+}
+
+/**
+ * ice_pkg_buf_get_active_sections
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Returns the number of active sections. Before using the package buffer
+ * in an update package command, the caller should make sure that there is at
+ * least one active section - otherwise, the buffer is not legal and should
+ * not be used.
+ * Note: all package contents must be in Little Endian form.
+ */
+static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
+{
+ struct ice_buf_hdr *buf;
+
+ if (!bld)
+ return 0;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+ return le16_to_cpu(buf->section_count);
+}
+
+/**
+ * ice_pkg_buf
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Return a pointer to the buffer's header
+ */
+static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
+{
+ if (!bld)
+ return NULL;
+
+ return &bld->buf;
+}
+
/* PTG Management */
/**
@@ -951,6 +1476,48 @@ enum ice_sid_all {
ICE_SID_OFF_COUNT,
};
+/* Characteristic handling */
+
+/**
+ * ice_match_prop_lst - determine if properties of two lists match
+ * @list1: first properties list
+ * @list2: second properties list
+ *
+ * Count, cookies and the order must match in order to be considered equivalent.
+ */
+static bool
+ice_match_prop_lst(struct list_head *list1, struct list_head *list2)
+{
+ struct ice_vsig_prof *tmp1;
+ struct ice_vsig_prof *tmp2;
+ u16 chk_count = 0;
+ u16 count = 0;
+
+ /* compare counts */
+ list_for_each_entry(tmp1, list1, list)
+ count++;
+ list_for_each_entry(tmp2, list2, list)
+ chk_count++;
+ if (!count || count != chk_count)
+ return false;
+
+ tmp1 = list_first_entry(list1, struct ice_vsig_prof, list);
+ tmp2 = list_first_entry(list2, struct ice_vsig_prof, list);
+
+ /* profile cookies must compare, and in the exact same order to take
+ * into account priority
+ */
+ while (count--) {
+ if (tmp2->profile_cookie != tmp1->profile_cookie)
+ return false;
+
+ tmp1 = list_next_entry(tmp1, list);
+ tmp2 = list_next_entry(tmp2, list);
+ }
+
+ return true;
+}
+
/* VSIG Management */
/**
@@ -999,6 +1566,117 @@ static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
}
/**
+ * ice_vsig_alloc - Finds a free entry and allocates a new VSIG
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ *
+ * This function will iterate through the VSIG list and mark the first
+ * unused entry for the new VSIG entry as used and return that value.
+ */
+static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
+{
+ u16 i;
+
+ for (i = 1; i < ICE_MAX_VSIGS; i++)
+ if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use)
+ return ice_vsig_alloc_val(hw, blk, i);
+
+ return ICE_DEFAULT_VSIG;
+}
+
+/**
+ * ice_find_dup_props_vsig - find VSI group with a specified set of properties
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @chs: characteristic list
+ * @vsig: returns the VSIG with the matching profiles, if found
+ *
+ * Each VSIG is associated with a characteristic set; i.e. all VSIs under
+ * a group have the same characteristic set. To check if there exists a VSIG
+ * which has the same characteristics as the input characteristics; this
+ * function will iterate through the XLT2 list and return the VSIG that has a
+ * matching configuration. In order to make sure that priorities are accounted
+ * for, the list must match exactly, including the order in which the
+ * characteristics are listed.
+ */
+static enum ice_status
+ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
+ struct list_head *chs, u16 *vsig)
+{
+ struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2;
+ u16 i;
+
+ for (i = 0; i < xlt2->count; i++)
+ if (xlt2->vsig_tbl[i].in_use &&
+ ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) {
+ *vsig = ICE_VSIG_VALUE(i, hw->pf_id);
+ return 0;
+ }
+
+ return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ice_vsig_free - free VSI group
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @vsig: VSIG to remove
+ *
+ * The function will remove all VSIs associated with the input VSIG and move
+ * them to the DEFAULT_VSIG and mark the VSIG available.
+ */
+static enum ice_status
+ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
+{
+ struct ice_vsig_prof *dtmp, *del;
+ struct ice_vsig_vsi *vsi_cur;
+ u16 idx;
+
+ idx = vsig & ICE_VSIG_IDX_M;
+ if (idx >= ICE_MAX_VSIGS)
+ return ICE_ERR_PARAM;
+
+ if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
+
+ vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
+ /* If the VSIG has at least 1 VSI then iterate through the
+ * list and remove the VSIs before deleting the group.
+ */
+ if (vsi_cur) {
+ /* remove all vsis associated with this VSIG XLT2 entry */
+ do {
+ struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
+
+ vsi_cur->vsig = ICE_DEFAULT_VSIG;
+ vsi_cur->changed = 1;
+ vsi_cur->next_vsi = NULL;
+ vsi_cur = tmp;
+ } while (vsi_cur);
+
+ /* NULL terminate head of VSI list */
+ hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL;
+ }
+
+ /* free characteristic list */
+ list_for_each_entry_safe(del, dtmp,
+ &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+ list) {
+ list_del(&del->list);
+ devm_kfree(ice_hw_to_dev(hw), del);
+ }
+
+ /* if VSIG characteristic list was cleared for reset
+ * re-initialize the list head
+ */
+ INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
+
+ return 0;
+}
+
+/**
* ice_vsig_remove_vsi - remove VSI from VSIG
* @hw: pointer to the hardware structure
* @blk: HW block
@@ -1117,6 +1795,215 @@ ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
return 0;
}
+/**
+ * ice_find_prof_id - find profile ID for a given field vector
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @fv: field vector to search for
+ * @prof_id: receives the profile ID
+ */
+static enum ice_status
+ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
+ struct ice_fv_word *fv, u8 *prof_id)
+{
+ struct ice_es *es = &hw->blk[blk].es;
+ u16 off, i;
+
+ for (i = 0; i < es->count; i++) {
+ off = i * es->fvw;
+
+ if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
+ continue;
+
+ *prof_id = i;
+ return 0;
+ }
+
+ return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ice_prof_id_rsrc_type - get profile ID resource type for a block type
+ * @blk: the block type
+ * @rsrc_type: pointer to variable to receive the resource type
+ */
+static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
+{
+ switch (blk) {
+ case ICE_BLK_RSS:
+ *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID;
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+/**
+ * ice_tcam_ent_rsrc_type - get TCAM entry resource type for a block type
+ * @blk: the block type
+ * @rsrc_type: pointer to variable to receive the resource type
+ */
+static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
+{
+ switch (blk) {
+ case ICE_BLK_RSS:
+ *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM;
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+/**
+ * ice_alloc_tcam_ent - allocate hardware TCAM entry
+ * @hw: pointer to the HW struct
+ * @blk: the block to allocate the TCAM for
+ * @tcam_idx: pointer to variable to receive the TCAM entry
+ *
+ * This function allocates a new entry in a Profile ID TCAM for a specific
+ * block.
+ */
+static enum ice_status
+ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 *tcam_idx)
+{
+ u16 res_type;
+
+ if (!ice_tcam_ent_rsrc_type(blk, &res_type))
+ return ICE_ERR_PARAM;
+
+ return ice_alloc_hw_res(hw, res_type, 1, true, tcam_idx);
+}
+
+/**
+ * ice_free_tcam_ent - free hardware TCAM entry
+ * @hw: pointer to the HW struct
+ * @blk: the block from which to free the TCAM entry
+ * @tcam_idx: the TCAM entry to free
+ *
+ * This function frees an entry in a Profile ID TCAM for a specific block.
+ */
+static enum ice_status
+ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
+{
+ u16 res_type;
+
+ if (!ice_tcam_ent_rsrc_type(blk, &res_type))
+ return ICE_ERR_PARAM;
+
+ return ice_free_hw_res(hw, res_type, 1, &tcam_idx);
+}
+
+/**
+ * ice_alloc_prof_id - allocate profile ID
+ * @hw: pointer to the HW struct
+ * @blk: the block to allocate the profile ID for
+ * @prof_id: pointer to variable to receive the profile ID
+ *
+ * This function allocates a new profile ID, which also corresponds to a Field
+ * Vector (Extraction Sequence) entry.
+ */
+static enum ice_status
+ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
+{
+ enum ice_status status;
+ u16 res_type;
+ u16 get_prof;
+
+ if (!ice_prof_id_rsrc_type(blk, &res_type))
+ return ICE_ERR_PARAM;
+
+ status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof);
+ if (!status)
+ *prof_id = (u8)get_prof;
+
+ return status;
+}
+
+/**
+ * ice_free_prof_id - free profile ID
+ * @hw: pointer to the HW struct
+ * @blk: the block from which to free the profile ID
+ * @prof_id: the profile ID to free
+ *
+ * This function frees a profile ID, which also corresponds to a Field Vector.
+ */
+static enum ice_status
+ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
+{
+ u16 tmp_prof_id = (u16)prof_id;
+ u16 res_type;
+
+ if (!ice_prof_id_rsrc_type(blk, &res_type))
+ return ICE_ERR_PARAM;
+
+ return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id);
+}
+
+/**
+ * ice_prof_inc_ref - increment reference count for profile
+ * @hw: pointer to the HW struct
+ * @blk: the block from which to free the profile ID
+ * @prof_id: the profile ID for which to increment the reference count
+ */
+static enum ice_status
+ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
+{
+ if (prof_id > hw->blk[blk].es.count)
+ return ICE_ERR_PARAM;
+
+ hw->blk[blk].es.ref_count[prof_id]++;
+
+ return 0;
+}
+
+/**
+ * ice_write_es - write an extraction sequence to hardware
+ * @hw: pointer to the HW struct
+ * @blk: the block in which to write the extraction sequence
+ * @prof_id: the profile ID to write
+ * @fv: pointer to the extraction sequence to write - NULL to clear extraction
+ */
+static void
+ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
+ struct ice_fv_word *fv)
+{
+ u16 off;
+
+ off = prof_id * hw->blk[blk].es.fvw;
+ if (!fv) {
+ memset(&hw->blk[blk].es.t[off], 0,
+ hw->blk[blk].es.fvw * sizeof(*fv));
+ hw->blk[blk].es.written[prof_id] = false;
+ } else {
+ memcpy(&hw->blk[blk].es.t[off], fv,
+ hw->blk[blk].es.fvw * sizeof(*fv));
+ }
+}
+
+/**
+ * ice_prof_dec_ref - decrement reference count for profile
+ * @hw: pointer to the HW struct
+ * @blk: the block from which to free the profile ID
+ * @prof_id: the profile ID for which to decrement the reference count
+ */
+static enum ice_status
+ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
+{
+ if (prof_id > hw->blk[blk].es.count)
+ return ICE_ERR_PARAM;
+
+ if (hw->blk[blk].es.ref_count[prof_id] > 0) {
+ if (!--hw->blk[blk].es.ref_count[prof_id]) {
+ ice_write_es(hw, blk, prof_id, NULL);
+ return ice_free_prof_id(hw, blk, prof_id);
+ }
+ }
+
+ return 0;
+}
+
/* Block / table section IDs */
static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = {
/* SWITCH */
@@ -1374,16 +2261,85 @@ void ice_fill_blk_tbls(struct ice_hw *hw)
}
/**
+ * ice_free_prof_map - free profile map
+ * @hw: pointer to the hardware structure
+ * @blk_idx: HW block index
+ */
+static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx)
+{
+ struct ice_es *es = &hw->blk[blk_idx].es;
+ struct ice_prof_map *del, *tmp;
+
+ mutex_lock(&es->prof_map_lock);
+ list_for_each_entry_safe(del, tmp, &es->prof_map, list) {
+ list_del(&del->list);
+ devm_kfree(ice_hw_to_dev(hw), del);
+ }
+ INIT_LIST_HEAD(&es->prof_map);
+ mutex_unlock(&es->prof_map_lock);
+}
+
+/**
+ * ice_free_flow_profs - free flow profile entries
+ * @hw: pointer to the hardware structure
+ * @blk_idx: HW block index
+ */
+static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
+{
+ struct ice_flow_prof *p, *tmp;
+
+ mutex_lock(&hw->fl_profs_locks[blk_idx]);
+ list_for_each_entry_safe(p, tmp, &hw->fl_profs[blk_idx], l_entry) {
+ list_del(&p->l_entry);
+ devm_kfree(ice_hw_to_dev(hw), p);
+ }
+ mutex_unlock(&hw->fl_profs_locks[blk_idx]);
+
+ /* if driver is in reset and tables are being cleared
+ * re-initialize the flow profile list heads
+ */
+ INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
+}
+
+/**
+ * ice_free_vsig_tbl - free complete VSIG table entries
+ * @hw: pointer to the hardware structure
+ * @blk: the HW block on which to free the VSIG table entries
+ */
+static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk)
+{
+ u16 i;
+
+ if (!hw->blk[blk].xlt2.vsig_tbl)
+ return;
+
+ for (i = 1; i < ICE_MAX_VSIGS; i++)
+ if (hw->blk[blk].xlt2.vsig_tbl[i].in_use)
+ ice_vsig_free(hw, blk, i);
+}
+
+/**
* ice_free_hw_tbls - free hardware table memory
* @hw: pointer to the hardware structure
*/
void ice_free_hw_tbls(struct ice_hw *hw)
{
+ struct ice_rss_cfg *r, *rt;
u8 i;
for (i = 0; i < ICE_BLK_COUNT; i++) {
- hw->blk[i].is_list_init = false;
+ if (hw->blk[i].is_list_init) {
+ struct ice_es *es = &hw->blk[i].es;
+
+ ice_free_prof_map(hw, i);
+ mutex_destroy(&es->prof_map_lock);
+ ice_free_flow_profs(hw, i);
+ mutex_destroy(&hw->fl_profs_locks[i]);
+
+ hw->blk[i].is_list_init = false;
+ }
+ ice_free_vsig_tbl(hw, (enum ice_block)i);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptypes);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptg_tbl);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.t);
@@ -1397,10 +2353,26 @@ void ice_free_hw_tbls(struct ice_hw *hw)
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written);
}
+ list_for_each_entry_safe(r, rt, &hw->rss_list_head, l_entry) {
+ list_del(&r->l_entry);
+ devm_kfree(ice_hw_to_dev(hw), r);
+ }
+ mutex_destroy(&hw->rss_locks);
memset(hw->blk, 0, sizeof(hw->blk));
}
/**
+ * ice_init_flow_profs - init flow profile locks and list heads
+ * @hw: pointer to the hardware structure
+ * @blk_idx: HW block index
+ */
+static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
+{
+ mutex_init(&hw->fl_profs_locks[blk_idx]);
+ INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
+}
+
+/**
* ice_clear_hw_tbls - clear HW tables and flow profiles
* @hw: pointer to the hardware structure
*/
@@ -1415,6 +2387,13 @@ void ice_clear_hw_tbls(struct ice_hw *hw)
struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
struct ice_es *es = &hw->blk[i].es;
+ if (hw->blk[i].is_list_init) {
+ ice_free_prof_map(hw, i);
+ ice_free_flow_profs(hw, i);
+ }
+
+ ice_free_vsig_tbl(hw, (enum ice_block)i);
+
memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes));
memset(xlt1->ptg_tbl, 0,
ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl));
@@ -1443,6 +2422,8 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
{
u8 i;
+ mutex_init(&hw->rss_locks);
+ INIT_LIST_HEAD(&hw->rss_list_head);
for (i = 0; i < ICE_BLK_COUNT; i++) {
struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
struct ice_prof_tcam *prof = &hw->blk[i].prof;
@@ -1454,6 +2435,9 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
if (hw->blk[i].is_list_init)
continue;
+ ice_init_flow_profs(hw, i);
+ mutex_init(&es->prof_map_lock);
+ INIT_LIST_HEAD(&es->prof_map);
hw->blk[i].is_list_init = true;
hw->blk[i].overwrite = blk_sizes[i].overwrite;
@@ -1547,3 +2531,1580 @@ err:
ice_free_hw_tbls(hw);
return ICE_ERR_NO_MEMORY;
}
+
+/**
+ * ice_prof_gen_key - generate profile ID key
+ * @hw: pointer to the HW struct
+ * @blk: the block in which to write profile ID to
+ * @ptg: packet type group (PTG) portion of key
+ * @vsig: VSIG portion of key
+ * @cdid: CDID portion of key
+ * @flags: flag portion of key
+ * @vl_msk: valid mask
+ * @dc_msk: don't care mask
+ * @nm_msk: never match mask
+ * @key: output of profile ID key
+ */
+static enum ice_status
+ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
+ u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
+ u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],
+ u8 key[ICE_TCAM_KEY_SZ])
+{
+ struct ice_prof_id_key inkey;
+
+ inkey.xlt1 = ptg;
+ inkey.xlt2_cdid = cpu_to_le16(vsig);
+ inkey.flags = cpu_to_le16(flags);
+
+ switch (hw->blk[blk].prof.cdid_bits) {
+ case 0:
+ break;
+ case 2:
+#define ICE_CD_2_M 0xC000U
+#define ICE_CD_2_S 14
+ inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_2_M);
+ inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_2_S);
+ break;
+ case 4:
+#define ICE_CD_4_M 0xF000U
+#define ICE_CD_4_S 12
+ inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_4_M);
+ inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_4_S);
+ break;
+ case 8:
+#define ICE_CD_8_M 0xFF00U
+#define ICE_CD_8_S 16
+ inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_8_M);
+ inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_8_S);
+ break;
+ default:
+ ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n");
+ break;
+ }
+
+ return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk,
+ nm_msk, 0, ICE_TCAM_KEY_SZ / 2);
+}
+
+/**
+ * ice_tcam_write_entry - write TCAM entry
+ * @hw: pointer to the HW struct
+ * @blk: the block in which to write profile ID to
+ * @idx: the entry index to write to
+ * @prof_id: profile ID
+ * @ptg: packet type group (PTG) portion of key
+ * @vsig: VSIG portion of key
+ * @cdid: CDID portion of key
+ * @flags: flag portion of key
+ * @vl_msk: valid mask
+ * @dc_msk: don't care mask
+ * @nm_msk: never match mask
+ */
+static enum ice_status
+ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
+ u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags,
+ u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
+ u8 dc_msk[ICE_TCAM_KEY_VAL_SZ],
+ u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])
+{
+ struct ice_prof_tcam_entry;
+ enum ice_status status;
+
+ status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
+ dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
+ if (!status) {
+ hw->blk[blk].prof.t[idx].addr = cpu_to_le16(idx);
+ hw->blk[blk].prof.t[idx].prof_id = prof_id;
+ }
+
+ return status;
+}
+
+/**
+ * ice_vsig_get_ref - returns number of VSIs belong to a VSIG
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @vsig: VSIG to query
+ * @refs: pointer to variable to receive the reference count
+ */
+static enum ice_status
+ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
+{
+ u16 idx = vsig & ICE_VSIG_IDX_M;
+ struct ice_vsig_vsi *ptr;
+
+ *refs = 0;
+
+ if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
+ while (ptr) {
+ (*refs)++;
+ ptr = ptr->next_vsi;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_has_prof_vsig - check to see if VSIG has a specific profile
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @vsig: VSIG to check against
+ * @hdl: profile handle
+ */
+static bool
+ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
+{
+ u16 idx = vsig & ICE_VSIG_IDX_M;
+ struct ice_vsig_prof *ent;
+
+ list_for_each_entry(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+ list)
+ if (ent->profile_cookie == hdl)
+ return true;
+
+ ice_debug(hw, ICE_DBG_INIT,
+ "Characteristic list for VSI group %d not found.\n",
+ vsig);
+ return false;
+}
+
+/**
+ * ice_prof_bld_es - build profile ID extraction sequence changes
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @bld: the update package buffer build to add to
+ * @chgs: the list of changes to make in hardware
+ */
+static enum ice_status
+ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
+ struct ice_buf_build *bld, struct list_head *chgs)
+{
+ u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word);
+ struct ice_chs_chg *tmp;
+
+ list_for_each_entry(tmp, chgs, list_entry)
+ if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) {
+ u16 off = tmp->prof_id * hw->blk[blk].es.fvw;
+ struct ice_pkg_es *p;
+ u32 id;
+
+ id = ice_sect_id(blk, ICE_VEC_TBL);
+ p = (struct ice_pkg_es *)
+ ice_pkg_buf_alloc_section(bld, id, sizeof(*p) +
+ vec_size -
+ sizeof(p->es[0]));
+
+ if (!p)
+ return ICE_ERR_MAX_LIMIT;
+
+ p->count = cpu_to_le16(1);
+ p->offset = cpu_to_le16(tmp->prof_id);
+
+ memcpy(p->es, &hw->blk[blk].es.t[off], vec_size);
+ }
+
+ return 0;
+}
+
+/**
+ * ice_prof_bld_tcam - build profile ID TCAM changes
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @bld: the update package buffer build to add to
+ * @chgs: the list of changes to make in hardware
+ */
+static enum ice_status
+ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
+ struct ice_buf_build *bld, struct list_head *chgs)
+{
+ struct ice_chs_chg *tmp;
+
+ list_for_each_entry(tmp, chgs, list_entry)
+ if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) {
+ struct ice_prof_id_section *p;
+ u32 id;
+
+ id = ice_sect_id(blk, ICE_PROF_TCAM);
+ p = (struct ice_prof_id_section *)
+ ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+
+ if (!p)
+ return ICE_ERR_MAX_LIMIT;
+
+ p->count = cpu_to_le16(1);
+ p->entry[0].addr = cpu_to_le16(tmp->tcam_idx);
+ p->entry[0].prof_id = tmp->prof_id;
+
+ memcpy(p->entry[0].key,
+ &hw->blk[blk].prof.t[tmp->tcam_idx].key,
+ sizeof(hw->blk[blk].prof.t->key));
+ }
+
+ return 0;
+}
+
+/**
+ * ice_prof_bld_xlt1 - build XLT1 changes
+ * @blk: hardware block
+ * @bld: the update package buffer build to add to
+ * @chgs: the list of changes to make in hardware
+ */
+static enum ice_status
+ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
+ struct list_head *chgs)
+{
+ struct ice_chs_chg *tmp;
+
+ list_for_each_entry(tmp, chgs, list_entry)
+ if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) {
+ struct ice_xlt1_section *p;
+ u32 id;
+
+ id = ice_sect_id(blk, ICE_XLT1);
+ p = (struct ice_xlt1_section *)
+ ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+
+ if (!p)
+ return ICE_ERR_MAX_LIMIT;
+
+ p->count = cpu_to_le16(1);
+ p->offset = cpu_to_le16(tmp->ptype);
+ p->value[0] = tmp->ptg;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_prof_bld_xlt2 - build XLT2 changes
+ * @blk: hardware block
+ * @bld: the update package buffer build to add to
+ * @chgs: the list of changes to make in hardware
+ */
+static enum ice_status
+ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
+ struct list_head *chgs)
+{
+ struct ice_chs_chg *tmp;
+
+ list_for_each_entry(tmp, chgs, list_entry) {
+ struct ice_xlt2_section *p;
+ u32 id;
+
+ switch (tmp->type) {
+ case ICE_VSIG_ADD:
+ case ICE_VSI_MOVE:
+ case ICE_VSIG_REM:
+ id = ice_sect_id(blk, ICE_XLT2);
+ p = (struct ice_xlt2_section *)
+ ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+
+ if (!p)
+ return ICE_ERR_MAX_LIMIT;
+
+ p->count = cpu_to_le16(1);
+ p->offset = cpu_to_le16(tmp->vsi);
+ p->value[0] = cpu_to_le16(tmp->vsig);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_upd_prof_hw - update hardware using the change list
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @chgs: the list of changes to make in hardware
+ */
+static enum ice_status
+ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
+ struct list_head *chgs)
+{
+ struct ice_buf_build *b;
+ struct ice_chs_chg *tmp;
+ enum ice_status status;
+ u16 pkg_sects;
+ u16 xlt1 = 0;
+ u16 xlt2 = 0;
+ u16 tcam = 0;
+ u16 es = 0;
+ u16 sects;
+
+ /* count number of sections we need */
+ list_for_each_entry(tmp, chgs, list_entry) {
+ switch (tmp->type) {
+ case ICE_PTG_ES_ADD:
+ if (tmp->add_ptg)
+ xlt1++;
+ if (tmp->add_prof)
+ es++;
+ break;
+ case ICE_TCAM_ADD:
+ tcam++;
+ break;
+ case ICE_VSIG_ADD:
+ case ICE_VSI_MOVE:
+ case ICE_VSIG_REM:
+ xlt2++;
+ break;
+ default:
+ break;
+ }
+ }
+ sects = xlt1 + xlt2 + tcam + es;
+
+ if (!sects)
+ return 0;
+
+ /* Build update package buffer */
+ b = ice_pkg_buf_alloc(hw);
+ if (!b)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_pkg_buf_reserve_section(b, sects);
+ if (status)
+ goto error_tmp;
+
+ /* Preserve order of table update: ES, TCAM, PTG, VSIG */
+ if (es) {
+ status = ice_prof_bld_es(hw, blk, b, chgs);
+ if (status)
+ goto error_tmp;
+ }
+
+ if (tcam) {
+ status = ice_prof_bld_tcam(hw, blk, b, chgs);
+ if (status)
+ goto error_tmp;
+ }
+
+ if (xlt1) {
+ status = ice_prof_bld_xlt1(blk, b, chgs);
+ if (status)
+ goto error_tmp;
+ }
+
+ if (xlt2) {
+ status = ice_prof_bld_xlt2(blk, b, chgs);
+ if (status)
+ goto error_tmp;
+ }
+
+ /* After package buffer build check if the section count in buffer is
+ * non-zero and matches the number of sections detected for package
+ * update.
+ */
+ pkg_sects = ice_pkg_buf_get_active_sections(b);
+ if (!pkg_sects || pkg_sects != sects) {
+ status = ICE_ERR_INVAL_SIZE;
+ goto error_tmp;
+ }
+
+ /* update package */
+ status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
+ if (status == ICE_ERR_AQ_ERROR)
+ ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n");
+
+error_tmp:
+ ice_pkg_buf_free(hw, b);
+ return status;
+}
+
+/**
+ * ice_add_prof - add profile
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
+ * @es: extraction sequence (length of array is determined by the block)
+ *
+ * This function registers a profile, which matches a set of PTGs with a
+ * particular extraction sequence. While the hardware profile is allocated
+ * it will not be written until the first call to ice_add_flow that specifies
+ * the ID value used here.
+ */
+enum ice_status
+ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
+ struct ice_fv_word *es)
+{
+ u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
+ DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
+ struct ice_prof_map *prof;
+ enum ice_status status;
+ u32 byte = 0;
+ u8 prof_id;
+
+ bitmap_zero(ptgs_used, ICE_XLT1_CNT);
+
+ mutex_lock(&hw->blk[blk].es.prof_map_lock);
+
+ /* search for existing profile */
+ status = ice_find_prof_id(hw, blk, es, &prof_id);
+ if (status) {
+ /* allocate profile ID */
+ status = ice_alloc_prof_id(hw, blk, &prof_id);
+ if (status)
+ goto err_ice_add_prof;
+
+ /* and write new es */
+ ice_write_es(hw, blk, prof_id, es);
+ }
+
+ ice_prof_inc_ref(hw, blk, prof_id);
+
+ /* add profile info */
+ prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*prof), GFP_KERNEL);
+ if (!prof)
+ goto err_ice_add_prof;
+
+ prof->profile_cookie = id;
+ prof->prof_id = prof_id;
+ prof->ptg_cnt = 0;
+ prof->context = 0;
+
+ /* build list of ptgs */
+ while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
+ u32 bit;
+
+ if (!ptypes[byte]) {
+ bytes--;
+ byte++;
+ continue;
+ }
+
+ /* Examine 8 bits per byte */
+ for_each_set_bit(bit, (unsigned long *)&ptypes[byte],
+ BITS_PER_BYTE) {
+ u16 ptype;
+ u8 ptg;
+ u8 m;
+
+ ptype = byte * BITS_PER_BYTE + bit;
+
+ /* The package should place all ptypes in a non-zero
+ * PTG, so the following call should never fail.
+ */
+ if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
+ continue;
+
+ /* If PTG is already added, skip and continue */
+ if (test_bit(ptg, ptgs_used))
+ continue;
+
+ set_bit(ptg, ptgs_used);
+ prof->ptg[prof->ptg_cnt] = ptg;
+
+ if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
+ break;
+
+ /* nothing left in byte, then exit */
+ m = ~((1 << (bit + 1)) - 1);
+ if (!(ptypes[byte] & m))
+ break;
+ }
+
+ bytes--;
+ byte++;
+ }
+
+ list_add(&prof->list, &hw->blk[blk].es.prof_map);
+ status = 0;
+
+err_ice_add_prof:
+ mutex_unlock(&hw->blk[blk].es.prof_map_lock);
+ return status;
+}
+
+/**
+ * ice_search_prof_id_low - Search for a profile tracking ID low level
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ *
+ * This will search for a profile tracking ID which was previously added. This
+ * version assumes that the caller has already acquired the prof map lock.
+ */
+static struct ice_prof_map *
+ice_search_prof_id_low(struct ice_hw *hw, enum ice_block blk, u64 id)
+{
+ struct ice_prof_map *entry = NULL;
+ struct ice_prof_map *map;
+
+ list_for_each_entry(map, &hw->blk[blk].es.prof_map, list)
+ if (map->profile_cookie == id) {
+ entry = map;
+ break;
+ }
+
+ return entry;
+}
+
+/**
+ * ice_search_prof_id - Search for a profile tracking ID
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ *
+ * This will search for a profile tracking ID which was previously added.
+ */
+static struct ice_prof_map *
+ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
+{
+ struct ice_prof_map *entry;
+
+ mutex_lock(&hw->blk[blk].es.prof_map_lock);
+ entry = ice_search_prof_id_low(hw, blk, id);
+ mutex_unlock(&hw->blk[blk].es.prof_map_lock);
+
+ return entry;
+}
+
+/**
+ * ice_vsig_prof_id_count - count profiles in a VSIG
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsig: VSIG to remove the profile from
+ */
+static u16
+ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
+{
+ u16 idx = vsig & ICE_VSIG_IDX_M, count = 0;
+ struct ice_vsig_prof *p;
+
+ list_for_each_entry(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+ list)
+ count++;
+
+ return count;
+}
+
+/**
+ * ice_rel_tcam_idx - release a TCAM index
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @idx: the index to release
+ */
+static enum ice_status
+ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
+{
+ /* Masks to invoke a never match entry */
+ u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+ u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF };
+ u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
+ enum ice_status status;
+
+ /* write the TCAM entry */
+ status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
+ dc_msk, nm_msk);
+ if (status)
+ return status;
+
+ /* release the TCAM entry */
+ status = ice_free_tcam_ent(hw, blk, idx);
+
+ return status;
+}
+
+/**
+ * ice_rem_prof_id - remove one profile from a VSIG
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @prof: pointer to profile structure to remove
+ */
+static enum ice_status
+ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
+ struct ice_vsig_prof *prof)
+{
+ enum ice_status status;
+ u16 i;
+
+ for (i = 0; i < prof->tcam_count; i++)
+ if (prof->tcam[i].in_use) {
+ prof->tcam[i].in_use = false;
+ status = ice_rel_tcam_idx(hw, blk,
+ prof->tcam[i].tcam_idx);
+ if (status)
+ return ICE_ERR_HW_TABLE;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_rem_vsig - remove VSIG
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsig: the VSIG to remove
+ * @chg: the change list
+ */
+static enum ice_status
+ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
+ struct list_head *chg)
+{
+ u16 idx = vsig & ICE_VSIG_IDX_M;
+ struct ice_vsig_vsi *vsi_cur;
+ struct ice_vsig_prof *d, *t;
+ enum ice_status status;
+
+ /* remove TCAM entries */
+ list_for_each_entry_safe(d, t,
+ &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+ list) {
+ status = ice_rem_prof_id(hw, blk, d);
+ if (status)
+ return status;
+
+ list_del(&d->list);
+ devm_kfree(ice_hw_to_dev(hw), d);
+ }
+
+ /* Move all VSIS associated with this VSIG to the default VSIG */
+ vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
+ /* If the VSIG has at least 1 VSI then iterate through the list
+ * and remove the VSIs before deleting the group.
+ */
+ if (vsi_cur)
+ do {
+ struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
+ struct ice_chs_chg *p;
+
+ p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
+ GFP_KERNEL);
+ if (!p)
+ return ICE_ERR_NO_MEMORY;
+
+ p->type = ICE_VSIG_REM;
+ p->orig_vsig = vsig;
+ p->vsig = ICE_DEFAULT_VSIG;
+ p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis;
+
+ list_add(&p->list_entry, chg);
+
+ vsi_cur = tmp;
+ } while (vsi_cur);
+
+ return ice_vsig_free(hw, blk, vsig);
+}
+
+/**
+ * ice_rem_prof_id_vsig - remove a specific profile from a VSIG
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsig: VSIG to remove the profile from
+ * @hdl: profile handle indicating which profile to remove
+ * @chg: list to receive a record of changes
+ */
+static enum ice_status
+ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
+ struct list_head *chg)
+{
+ u16 idx = vsig & ICE_VSIG_IDX_M;
+ struct ice_vsig_prof *p, *t;
+ enum ice_status status;
+
+ list_for_each_entry_safe(p, t,
+ &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+ list)
+ if (p->profile_cookie == hdl) {
+ if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
+ /* this is the last profile, remove the VSIG */
+ return ice_rem_vsig(hw, blk, vsig, chg);
+
+ status = ice_rem_prof_id(hw, blk, p);
+ if (!status) {
+ list_del(&p->list);
+ devm_kfree(ice_hw_to_dev(hw), p);
+ }
+ return status;
+ }
+
+ return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ice_rem_flow_all - remove all flows with a particular profile
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ */
+static enum ice_status
+ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
+{
+ struct ice_chs_chg *del, *tmp;
+ enum ice_status status;
+ struct list_head chg;
+ u16 i;
+
+ INIT_LIST_HEAD(&chg);
+
+ for (i = 1; i < ICE_MAX_VSIGS; i++)
+ if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) {
+ if (ice_has_prof_vsig(hw, blk, i, id)) {
+ status = ice_rem_prof_id_vsig(hw, blk, i, id,
+ &chg);
+ if (status)
+ goto err_ice_rem_flow_all;
+ }
+ }
+
+ status = ice_upd_prof_hw(hw, blk, &chg);
+
+err_ice_rem_flow_all:
+ list_for_each_entry_safe(del, tmp, &chg, list_entry) {
+ list_del(&del->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), del);
+ }
+
+ return status;
+}
+
+/**
+ * ice_rem_prof - remove profile
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ *
+ * This will remove the profile specified by the ID parameter, which was
+ * previously created through ice_add_prof. If any existing entries
+ * are associated with this profile, they will be removed as well.
+ */
+enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
+{
+ struct ice_prof_map *pmap;
+ enum ice_status status;
+
+ mutex_lock(&hw->blk[blk].es.prof_map_lock);
+
+ pmap = ice_search_prof_id_low(hw, blk, id);
+ if (!pmap) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto err_ice_rem_prof;
+ }
+
+ /* remove all flows with this profile */
+ status = ice_rem_flow_all(hw, blk, pmap->profile_cookie);
+ if (status)
+ goto err_ice_rem_prof;
+
+ /* dereference profile, and possibly remove */
+ ice_prof_dec_ref(hw, blk, pmap->prof_id);
+
+ list_del(&pmap->list);
+ devm_kfree(ice_hw_to_dev(hw), pmap);
+
+err_ice_rem_prof:
+ mutex_unlock(&hw->blk[blk].es.prof_map_lock);
+ return status;
+}
+
+/**
+ * ice_get_prof - get profile
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @hdl: profile handle
+ * @chg: change list
+ */
+static enum ice_status
+ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
+ struct list_head *chg)
+{
+ struct ice_prof_map *map;
+ struct ice_chs_chg *p;
+ u16 i;
+
+ /* Get the details on the profile specified by the handle ID */
+ map = ice_search_prof_id(hw, blk, hdl);
+ if (!map)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ for (i = 0; i < map->ptg_cnt; i++)
+ if (!hw->blk[blk].es.written[map->prof_id]) {
+ /* add ES to change list */
+ p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
+ GFP_KERNEL);
+ if (!p)
+ goto err_ice_get_prof;
+
+ p->type = ICE_PTG_ES_ADD;
+ p->ptype = 0;
+ p->ptg = map->ptg[i];
+ p->add_ptg = 0;
+
+ p->add_prof = 1;
+ p->prof_id = map->prof_id;
+
+ hw->blk[blk].es.written[map->prof_id] = true;
+
+ list_add(&p->list_entry, chg);
+ }
+
+ return 0;
+
+err_ice_get_prof:
+ /* let caller clean up the change list */
+ return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_get_profs_vsig - get a copy of the list of profiles from a VSIG
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsig: VSIG from which to copy the list
+ * @lst: output list
+ *
+ * This routine makes a copy of the list of profiles in the specified VSIG.
+ */
+static enum ice_status
+ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
+ struct list_head *lst)
+{
+ struct ice_vsig_prof *ent1, *ent2;
+ u16 idx = vsig & ICE_VSIG_IDX_M;
+
+ list_for_each_entry(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+ list) {
+ struct ice_vsig_prof *p;
+
+ /* copy to the input list */
+ p = devm_kmemdup(ice_hw_to_dev(hw), ent1, sizeof(*p),
+ GFP_KERNEL);
+ if (!p)
+ goto err_ice_get_profs_vsig;
+
+ list_add_tail(&p->list, lst);
+ }
+
+ return 0;
+
+err_ice_get_profs_vsig:
+ list_for_each_entry_safe(ent1, ent2, lst, list) {
+ list_del(&ent1->list);
+ devm_kfree(ice_hw_to_dev(hw), ent1);
+ }
+
+ return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_add_prof_to_lst - add profile entry to a list
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @lst: the list to be added to
+ * @hdl: profile handle of entry to add
+ */
+static enum ice_status
+ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
+ struct list_head *lst, u64 hdl)
+{
+ struct ice_prof_map *map;
+ struct ice_vsig_prof *p;
+ u16 i;
+
+ map = ice_search_prof_id(hw, blk, hdl);
+ if (!map)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return ICE_ERR_NO_MEMORY;
+
+ p->profile_cookie = map->profile_cookie;
+ p->prof_id = map->prof_id;
+ p->tcam_count = map->ptg_cnt;
+
+ for (i = 0; i < map->ptg_cnt; i++) {
+ p->tcam[i].prof_id = map->prof_id;
+ p->tcam[i].tcam_idx = ICE_INVALID_TCAM;
+ p->tcam[i].ptg = map->ptg[i];
+ }
+
+ list_add(&p->list, lst);
+
+ return 0;
+}
+
+/**
+ * ice_move_vsi - move VSI to another VSIG
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsi: the VSI to move
+ * @vsig: the VSIG to move the VSI to
+ * @chg: the change list
+ */
+static enum ice_status
+ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
+ struct list_head *chg)
+{
+ enum ice_status status;
+ struct ice_chs_chg *p;
+ u16 orig_vsig;
+
+ p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
+ if (!status)
+ status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
+
+ if (status) {
+ devm_kfree(ice_hw_to_dev(hw), p);
+ return status;
+ }
+
+ p->type = ICE_VSI_MOVE;
+ p->vsi = vsi;
+ p->orig_vsig = orig_vsig;
+ p->vsig = vsig;
+
+ list_add(&p->list_entry, chg);
+
+ return 0;
+}
+
+/**
+ * ice_prof_tcam_ena_dis - add enable or disable TCAM change
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @enable: true to enable, false to disable
+ * @vsig: the VSIG of the TCAM entry
+ * @tcam: pointer the TCAM info structure of the TCAM to disable
+ * @chg: the change list
+ *
+ * This function appends an enable or disable TCAM entry in the change log
+ */
+static enum ice_status
+ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
+ u16 vsig, struct ice_tcam_inf *tcam,
+ struct list_head *chg)
+{
+ enum ice_status status;
+ struct ice_chs_chg *p;
+
+ /* Default: enable means change the low flag bit to don't care */
+ u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
+ u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
+ u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
+
+ /* if disabling, free the TCAM */
+ if (!enable) {
+ status = ice_free_tcam_ent(hw, blk, tcam->tcam_idx);
+ tcam->tcam_idx = 0;
+ tcam->in_use = 0;
+ return status;
+ }
+
+ /* for re-enabling, reallocate a TCAM */
+ status = ice_alloc_tcam_ent(hw, blk, &tcam->tcam_idx);
+ if (status)
+ return status;
+
+ /* add TCAM to change list */
+ p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
+ tcam->ptg, vsig, 0, 0, vl_msk, dc_msk,
+ nm_msk);
+ if (status)
+ goto err_ice_prof_tcam_ena_dis;
+
+ tcam->in_use = 1;
+
+ p->type = ICE_TCAM_ADD;
+ p->add_tcam_idx = true;
+ p->prof_id = tcam->prof_id;
+ p->ptg = tcam->ptg;
+ p->vsig = 0;
+ p->tcam_idx = tcam->tcam_idx;
+
+ /* log change */
+ list_add(&p->list_entry, chg);
+
+ return 0;
+
+err_ice_prof_tcam_ena_dis:
+ devm_kfree(ice_hw_to_dev(hw), p);
+ return status;
+}
+
+/**
+ * ice_adj_prof_priorities - adjust profile based on priorities
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsig: the VSIG for which to adjust profile priorities
+ * @chg: the change list
+ */
+static enum ice_status
+ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
+ struct list_head *chg)
+{
+ DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
+ struct ice_vsig_prof *t;
+ enum ice_status status;
+ u16 idx;
+
+ bitmap_zero(ptgs_used, ICE_XLT1_CNT);
+ idx = vsig & ICE_VSIG_IDX_M;
+
+ /* Priority is based on the order in which the profiles are added. The
+ * newest added profile has highest priority and the oldest added
+ * profile has the lowest priority. Since the profile property list for
+ * a VSIG is sorted from newest to oldest, this code traverses the list
+ * in order and enables the first of each PTG that it finds (that is not
+ * already enabled); it also disables any duplicate PTGs that it finds
+ * in the older profiles (that are currently enabled).
+ */
+
+ list_for_each_entry(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+ list) {
+ u16 i;
+
+ for (i = 0; i < t->tcam_count; i++) {
+ /* Scan the priorities from newest to oldest.
+ * Make sure that the newest profiles take priority.
+ */
+ if (test_bit(t->tcam[i].ptg, ptgs_used) &&
+ t->tcam[i].in_use) {
+ /* need to mark this PTG as never match, as it
+ * was already in use and therefore duplicate
+ * (and lower priority)
+ */
+ status = ice_prof_tcam_ena_dis(hw, blk, false,
+ vsig,
+ &t->tcam[i],
+ chg);
+ if (status)
+ return status;
+ } else if (!test_bit(t->tcam[i].ptg, ptgs_used) &&
+ !t->tcam[i].in_use) {
+ /* need to enable this PTG, as it in not in use
+ * and not enabled (highest priority)
+ */
+ status = ice_prof_tcam_ena_dis(hw, blk, true,
+ vsig,
+ &t->tcam[i],
+ chg);
+ if (status)
+ return status;
+ }
+
+ /* keep track of used ptgs */
+ set_bit(t->tcam[i].ptg, ptgs_used);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_add_prof_id_vsig - add profile to VSIG
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsig: the VSIG to which this profile is to be added
+ * @hdl: the profile handle indicating the profile to add
+ * @chg: the change list
+ */
+static enum ice_status
+ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
+ struct list_head *chg)
+{
+ /* Masks that ignore flags */
+ u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+ u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
+ u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
+ struct ice_prof_map *map;
+ struct ice_vsig_prof *t;
+ struct ice_chs_chg *p;
+ u16 i;
+
+ /* Get the details on the profile specified by the handle ID */
+ map = ice_search_prof_id(hw, blk, hdl);
+ if (!map)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ /* Error, if this VSIG already has this profile */
+ if (ice_has_prof_vsig(hw, blk, vsig, hdl))
+ return ICE_ERR_ALREADY_EXISTS;
+
+ /* new VSIG profile structure */
+ t = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*t), GFP_KERNEL);
+ if (!t)
+ return ICE_ERR_NO_MEMORY;
+
+ t->profile_cookie = map->profile_cookie;
+ t->prof_id = map->prof_id;
+ t->tcam_count = map->ptg_cnt;
+
+ /* create TCAM entries */
+ for (i = 0; i < map->ptg_cnt; i++) {
+ enum ice_status status;
+ u16 tcam_idx;
+
+ /* add TCAM to change list */
+ p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
+ if (!p)
+ goto err_ice_add_prof_id_vsig;
+
+ /* allocate the TCAM entry index */
+ status = ice_alloc_tcam_ent(hw, blk, &tcam_idx);
+ if (status) {
+ devm_kfree(ice_hw_to_dev(hw), p);
+ goto err_ice_add_prof_id_vsig;
+ }
+
+ t->tcam[i].ptg = map->ptg[i];
+ t->tcam[i].prof_id = map->prof_id;
+ t->tcam[i].tcam_idx = tcam_idx;
+ t->tcam[i].in_use = true;
+
+ p->type = ICE_TCAM_ADD;
+ p->add_tcam_idx = true;
+ p->prof_id = t->tcam[i].prof_id;
+ p->ptg = t->tcam[i].ptg;
+ p->vsig = vsig;
+ p->tcam_idx = t->tcam[i].tcam_idx;
+
+ /* write the TCAM entry */
+ status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
+ t->tcam[i].prof_id,
+ t->tcam[i].ptg, vsig, 0, 0,
+ vl_msk, dc_msk, nm_msk);
+ if (status)
+ goto err_ice_add_prof_id_vsig;
+
+ /* log change */
+ list_add(&p->list_entry, chg);
+ }
+
+ /* add profile to VSIG */
+ list_add(&t->list,
+ &hw->blk[blk].xlt2.vsig_tbl[(vsig & ICE_VSIG_IDX_M)].prop_lst);
+
+ return 0;
+
+err_ice_add_prof_id_vsig:
+ /* let caller clean up the change list */
+ devm_kfree(ice_hw_to_dev(hw), t);
+ return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_create_prof_id_vsig - add a new VSIG with a single profile
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsi: the initial VSI that will be in VSIG
+ * @hdl: the profile handle of the profile that will be added to the VSIG
+ * @chg: the change list
+ */
+static enum ice_status
+ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
+ struct list_head *chg)
+{
+ enum ice_status status;
+ struct ice_chs_chg *p;
+ u16 new_vsig;
+
+ p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return ICE_ERR_NO_MEMORY;
+
+ new_vsig = ice_vsig_alloc(hw, blk);
+ if (!new_vsig) {
+ status = ICE_ERR_HW_TABLE;
+ goto err_ice_create_prof_id_vsig;
+ }
+
+ status = ice_move_vsi(hw, blk, vsi, new_vsig, chg);
+ if (status)
+ goto err_ice_create_prof_id_vsig;
+
+ status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, chg);
+ if (status)
+ goto err_ice_create_prof_id_vsig;
+
+ p->type = ICE_VSIG_ADD;
+ p->vsi = vsi;
+ p->orig_vsig = ICE_DEFAULT_VSIG;
+ p->vsig = new_vsig;
+
+ list_add(&p->list_entry, chg);
+
+ return 0;
+
+err_ice_create_prof_id_vsig:
+ /* let caller clean up the change list */
+ devm_kfree(ice_hw_to_dev(hw), p);
+ return status;
+}
+
+/**
+ * ice_create_vsig_from_lst - create a new VSIG with a list of profiles
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsi: the initial VSI that will be in VSIG
+ * @lst: the list of profile that will be added to the VSIG
+ * @chg: the change list
+ */
+static enum ice_status
+ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
+ struct list_head *lst, struct list_head *chg)
+{
+ struct ice_vsig_prof *t;
+ enum ice_status status;
+ u16 vsig;
+
+ vsig = ice_vsig_alloc(hw, blk);
+ if (!vsig)
+ return ICE_ERR_HW_TABLE;
+
+ status = ice_move_vsi(hw, blk, vsi, vsig, chg);
+ if (status)
+ return status;
+
+ list_for_each_entry(t, lst, list) {
+ status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
+ chg);
+ if (status)
+ return status;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_find_prof_vsig - find a VSIG with a specific profile handle
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @hdl: the profile handle of the profile to search for
+ * @vsig: returns the VSIG with the matching profile
+ */
+static bool
+ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
+{
+ struct ice_vsig_prof *t;
+ enum ice_status status;
+ struct list_head lst;
+
+ INIT_LIST_HEAD(&lst);
+
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (!t)
+ return false;
+
+ t->profile_cookie = hdl;
+ list_add(&t->list, &lst);
+
+ status = ice_find_dup_props_vsig(hw, blk, &lst, vsig);
+
+ list_del(&t->list);
+ kfree(t);
+
+ return !status;
+}
+
+/**
+ * ice_add_prof_id_flow - add profile flow
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsi: the VSI to enable with the profile specified by ID
+ * @hdl: profile handle
+ *
+ * Calling this function will update the hardware tables to enable the
+ * profile indicated by the ID parameter for the VSIs specified in the VSI
+ * array. Once successfully called, the flow will be enabled.
+ */
+enum ice_status
+ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
+{
+ struct ice_vsig_prof *tmp1, *del1;
+ struct ice_chs_chg *tmp, *del;
+ struct list_head union_lst;
+ enum ice_status status;
+ struct list_head chg;
+ u16 vsig;
+
+ INIT_LIST_HEAD(&union_lst);
+ INIT_LIST_HEAD(&chg);
+
+ /* Get profile */
+ status = ice_get_prof(hw, blk, hdl, &chg);
+ if (status)
+ return status;
+
+ /* determine if VSI is already part of a VSIG */
+ status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
+ if (!status && vsig) {
+ bool only_vsi;
+ u16 or_vsig;
+ u16 ref;
+
+ /* found in VSIG */
+ or_vsig = vsig;
+
+ /* make sure that there is no overlap/conflict between the new
+ * characteristics and the existing ones; we don't support that
+ * scenario
+ */
+ if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
+ status = ICE_ERR_ALREADY_EXISTS;
+ goto err_ice_add_prof_id_flow;
+ }
+
+ /* last VSI in the VSIG? */
+ status = ice_vsig_get_ref(hw, blk, vsig, &ref);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+ only_vsi = (ref == 1);
+
+ /* create a union of the current profiles and the one being
+ * added
+ */
+ status = ice_get_profs_vsig(hw, blk, vsig, &union_lst);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+
+ status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+
+ /* search for an existing VSIG with an exact charc match */
+ status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig);
+ if (!status) {
+ /* move VSI to the VSIG that matches */
+ status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+
+ /* VSI has been moved out of or_vsig. If the or_vsig had
+ * only that VSI it is now empty and can be removed.
+ */
+ if (only_vsi) {
+ status = ice_rem_vsig(hw, blk, or_vsig, &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+ }
+ } else if (only_vsi) {
+ /* If the original VSIG only contains one VSI, then it
+ * will be the requesting VSI. In this case the VSI is
+ * not sharing entries and we can simply add the new
+ * profile to the VSIG.
+ */
+ status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+
+ /* Adjust priorities */
+ status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+ } else {
+ /* No match, so we need a new VSIG */
+ status = ice_create_vsig_from_lst(hw, blk, vsi,
+ &union_lst, &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+
+ /* Adjust priorities */
+ status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+ }
+ } else {
+ /* need to find or add a VSIG */
+ /* search for an existing VSIG with an exact charc match */
+ if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) {
+ /* found an exact match */
+ /* add or move VSI to the VSIG that matches */
+ status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+ } else {
+ /* we did not find an exact match */
+ /* we need to add a VSIG */
+ status = ice_create_prof_id_vsig(hw, blk, vsi, hdl,
+ &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+ }
+ }
+
+ /* update hardware */
+ if (!status)
+ status = ice_upd_prof_hw(hw, blk, &chg);
+
+err_ice_add_prof_id_flow:
+ list_for_each_entry_safe(del, tmp, &chg, list_entry) {
+ list_del(&del->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), del);
+ }
+
+ list_for_each_entry_safe(del1, tmp1, &union_lst, list) {
+ list_del(&del1->list);
+ devm_kfree(ice_hw_to_dev(hw), del1);
+ }
+
+ return status;
+}
+
+/**
+ * ice_rem_prof_from_list - remove a profile from list
+ * @hw: pointer to the HW struct
+ * @lst: list to remove the profile from
+ * @hdl: the profile handle indicating the profile to remove
+ */
+static enum ice_status
+ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl)
+{
+ struct ice_vsig_prof *ent, *tmp;
+
+ list_for_each_entry_safe(ent, tmp, lst, list)
+ if (ent->profile_cookie == hdl) {
+ list_del(&ent->list);
+ devm_kfree(ice_hw_to_dev(hw), ent);
+ return 0;
+ }
+
+ return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ice_rem_prof_id_flow - remove flow
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsi: the VSI from which to remove the profile specified by ID
+ * @hdl: profile tracking handle
+ *
+ * Calling this function will update the hardware tables to remove the
+ * profile indicated by the ID parameter for the VSIs specified in the VSI
+ * array. Once successfully called, the flow will be disabled.
+ */
+enum ice_status
+ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
+{
+ struct ice_vsig_prof *tmp1, *del1;
+ struct ice_chs_chg *tmp, *del;
+ struct list_head chg, copy;
+ enum ice_status status;
+ u16 vsig;
+
+ INIT_LIST_HEAD(&copy);
+ INIT_LIST_HEAD(&chg);
+
+ /* determine if VSI is already part of a VSIG */
+ status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
+ if (!status && vsig) {
+ bool last_profile;
+ bool only_vsi;
+ u16 ref;
+
+ /* found in VSIG */
+ last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1;
+ status = ice_vsig_get_ref(hw, blk, vsig, &ref);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+ only_vsi = (ref == 1);
+
+ if (only_vsi) {
+ /* If the original VSIG only contains one reference,
+ * which will be the requesting VSI, then the VSI is not
+ * sharing entries and we can simply remove the specific
+ * characteristics from the VSIG.
+ */
+
+ if (last_profile) {
+ /* If there are no profiles left for this VSIG,
+ * then simply remove the the VSIG.
+ */
+ status = ice_rem_vsig(hw, blk, vsig, &chg);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+ } else {
+ status = ice_rem_prof_id_vsig(hw, blk, vsig,
+ hdl, &chg);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+
+ /* Adjust priorities */
+ status = ice_adj_prof_priorities(hw, blk, vsig,
+ &chg);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+ }
+
+ } else {
+ /* Make a copy of the VSIG's list of Profiles */
+ status = ice_get_profs_vsig(hw, blk, vsig, &copy);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+
+ /* Remove specified profile entry from the list */
+ status = ice_rem_prof_from_list(hw, &copy, hdl);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+
+ if (list_empty(&copy)) {
+ status = ice_move_vsi(hw, blk, vsi,
+ ICE_DEFAULT_VSIG, &chg);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+
+ } else if (!ice_find_dup_props_vsig(hw, blk, &copy,
+ &vsig)) {
+ /* found an exact match */
+ /* add or move VSI to the VSIG that matches */
+ /* Search for a VSIG with a matching profile
+ * list
+ */
+
+ /* Found match, move VSI to the matching VSIG */
+ status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+ } else {
+ /* since no existing VSIG supports this
+ * characteristic pattern, we need to create a
+ * new VSIG and TCAM entries
+ */
+ status = ice_create_vsig_from_lst(hw, blk, vsi,
+ &copy, &chg);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+
+ /* Adjust priorities */
+ status = ice_adj_prof_priorities(hw, blk, vsig,
+ &chg);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+ }
+ }
+ } else {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ }
+
+ /* update hardware tables */
+ if (!status)
+ status = ice_upd_prof_hw(hw, blk, &chg);
+
+err_ice_rem_prof_id_flow:
+ list_for_each_entry_safe(del, tmp, &chg, list_entry) {
+ list_del(&del->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), del);
+ }
+
+ list_for_each_entry_safe(del1, tmp1, &copy, list) {
+ list_del(&del1->list);
+ devm_kfree(ice_hw_to_dev(hw), del1);
+ }
+
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index 37eb282742d1..c7b5e1a6ea2b 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -18,6 +18,13 @@
#define ICE_PKG_CNT 4
+enum ice_status
+ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
+ struct ice_fv_word *es);
+enum ice_status
+ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
+enum ice_status
+ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len);
enum ice_status
ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len);
@@ -26,4 +33,6 @@ void ice_free_seg(struct ice_hw *hw);
void ice_fill_blk_tbls(struct ice_hw *hw);
void ice_clear_hw_tbls(struct ice_hw *hw);
void ice_free_hw_tbls(struct ice_hw *hw);
+enum ice_status
+ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id);
#endif /* _ICE_FLEX_PIPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index 5d5a7eaffa30..0fb3fe3ff3ea 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -3,6 +3,9 @@
#ifndef _ICE_FLEX_TYPE_H_
#define _ICE_FLEX_TYPE_H_
+
+#define ICE_FV_OFFSET_INVAL 0x1FF
+
/* Extraction Sequence (Field Vector) Table */
struct ice_fv_word {
u8 prot_id;
@@ -105,37 +108,57 @@ struct ice_buf_hdr {
sizeof(struct ice_buf_hdr) - (hd_sz)) / (ent_sz))
/* ice package section IDs */
+#define ICE_SID_XLT0_SW 10
+#define ICE_SID_XLT_KEY_BUILDER_SW 11
#define ICE_SID_XLT1_SW 12
#define ICE_SID_XLT2_SW 13
#define ICE_SID_PROFID_TCAM_SW 14
#define ICE_SID_PROFID_REDIR_SW 15
#define ICE_SID_FLD_VEC_SW 16
+#define ICE_SID_CDID_KEY_BUILDER_SW 17
+#define ICE_SID_CDID_REDIR_SW 18
+#define ICE_SID_XLT0_ACL 20
+#define ICE_SID_XLT_KEY_BUILDER_ACL 21
#define ICE_SID_XLT1_ACL 22
#define ICE_SID_XLT2_ACL 23
#define ICE_SID_PROFID_TCAM_ACL 24
#define ICE_SID_PROFID_REDIR_ACL 25
#define ICE_SID_FLD_VEC_ACL 26
+#define ICE_SID_CDID_KEY_BUILDER_ACL 27
+#define ICE_SID_CDID_REDIR_ACL 28
+#define ICE_SID_XLT0_FD 30
+#define ICE_SID_XLT_KEY_BUILDER_FD 31
#define ICE_SID_XLT1_FD 32
#define ICE_SID_XLT2_FD 33
#define ICE_SID_PROFID_TCAM_FD 34
#define ICE_SID_PROFID_REDIR_FD 35
#define ICE_SID_FLD_VEC_FD 36
+#define ICE_SID_CDID_KEY_BUILDER_FD 37
+#define ICE_SID_CDID_REDIR_FD 38
+#define ICE_SID_XLT0_RSS 40
+#define ICE_SID_XLT_KEY_BUILDER_RSS 41
#define ICE_SID_XLT1_RSS 42
#define ICE_SID_XLT2_RSS 43
#define ICE_SID_PROFID_TCAM_RSS 44
#define ICE_SID_PROFID_REDIR_RSS 45
#define ICE_SID_FLD_VEC_RSS 46
+#define ICE_SID_CDID_KEY_BUILDER_RSS 47
+#define ICE_SID_CDID_REDIR_RSS 48
#define ICE_SID_RXPARSER_BOOST_TCAM 56
+#define ICE_SID_XLT0_PE 80
+#define ICE_SID_XLT_KEY_BUILDER_PE 81
#define ICE_SID_XLT1_PE 82
#define ICE_SID_XLT2_PE 83
#define ICE_SID_PROFID_TCAM_PE 84
#define ICE_SID_PROFID_REDIR_PE 85
#define ICE_SID_FLD_VEC_PE 86
+#define ICE_SID_CDID_KEY_BUILDER_PE 87
+#define ICE_SID_CDID_REDIR_PE 88
/* Label Metadata section IDs */
#define ICE_SID_LBL_FIRST 0x80000010
@@ -152,6 +175,19 @@ enum ice_block {
ICE_BLK_COUNT
};
+enum ice_sect {
+ ICE_XLT0 = 0,
+ ICE_XLT_KB,
+ ICE_XLT1,
+ ICE_XLT2,
+ ICE_PROF_TCAM,
+ ICE_PROF_REDIR,
+ ICE_VEC_TBL,
+ ICE_CDID_KB,
+ ICE_CDID_REDIR,
+ ICE_SECT_COUNT
+};
+
/* package labels */
struct ice_label {
__le16 value;
@@ -234,6 +270,13 @@ struct ice_prof_redir_section {
u8 redir_value[1];
};
+/* package buffer building */
+
+struct ice_buf_build {
+ struct ice_buf buf;
+ u16 reserved_section_table_entries;
+};
+
struct ice_pkg_enum {
struct ice_buf_table *buf_table;
u32 buf_idx;
@@ -248,6 +291,12 @@ struct ice_pkg_enum {
void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
};
+struct ice_pkg_es {
+ __le16 count;
+ __le16 offset;
+ struct ice_fv_word es[1];
+};
+
struct ice_es {
u32 sid;
u16 count;
@@ -280,6 +329,35 @@ struct ice_ptg_ptype {
u8 ptg;
};
+#define ICE_MAX_TCAM_PER_PROFILE 32
+#define ICE_MAX_PTG_PER_PROFILE 32
+
+struct ice_prof_map {
+ struct list_head list;
+ u64 profile_cookie;
+ u64 context;
+ u8 prof_id;
+ u8 ptg_cnt;
+ u8 ptg[ICE_MAX_PTG_PER_PROFILE];
+};
+
+#define ICE_INVALID_TCAM 0xFFFF
+
+struct ice_tcam_inf {
+ u16 tcam_idx;
+ u8 ptg;
+ u8 prof_id;
+ u8 in_use;
+};
+
+struct ice_vsig_prof {
+ struct list_head list;
+ u64 profile_cookie;
+ u8 prof_id;
+ u8 tcam_count;
+ struct ice_tcam_inf tcam[ICE_MAX_TCAM_PER_PROFILE];
+};
+
struct ice_vsig_entry {
struct list_head prop_lst;
struct ice_vsig_vsi *first_vsi;
@@ -329,6 +407,13 @@ struct ice_xlt2 {
u16 count;
};
+/* Profile ID Management */
+struct ice_prof_id_key {
+ __le16 flags;
+ u8 xlt1;
+ __le16 xlt2_cdid;
+} __packed;
+
/* Keys are made up of two values, each one-half the size of the key.
* For TCAM, the entire key is 80 bits wide (or 2, 40-bit wide values)
*/
@@ -371,4 +456,31 @@ struct ice_blk_info {
u8 is_list_init;
};
+enum ice_chg_type {
+ ICE_TCAM_NONE = 0,
+ ICE_PTG_ES_ADD,
+ ICE_TCAM_ADD,
+ ICE_VSIG_ADD,
+ ICE_VSIG_REM,
+ ICE_VSI_MOVE,
+};
+
+struct ice_chs_chg {
+ struct list_head list_entry;
+ enum ice_chg_type type;
+
+ u8 add_ptg;
+ u8 add_vsig;
+ u8 add_tcam_idx;
+ u8 add_prof;
+ u16 ptype;
+ u8 ptg;
+ u8 prof_id;
+ u16 vsi;
+ u16 vsig;
+ u16 orig_vsig;
+ u16 tcam_idx;
+};
+
+#define ICE_FLOW_PTYPE_MAX ICE_XLT1_CNT
#endif /* _ICE_FLEX_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
new file mode 100644
index 000000000000..a05ceb59863b
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -0,0 +1,1275 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019, Intel Corporation. */
+
+#include "ice_common.h"
+#include "ice_flow.h"
+
+/* Describe properties of a protocol header field */
+struct ice_flow_field_info {
+ enum ice_flow_seg_hdr hdr;
+ s16 off; /* Offset from start of a protocol header, in bits */
+ u16 size; /* Size of fields in bits */
+};
+
+#define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
+ .hdr = _hdr, \
+ .off = (_offset_bytes) * BITS_PER_BYTE, \
+ .size = (_size_bytes) * BITS_PER_BYTE, \
+}
+
+/* Table containing properties of supported protocol header fields */
+static const
+struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
+ /* IPv4 / IPv6 */
+ /* ICE_FLOW_FIELD_IDX_IPV4_SA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, sizeof(struct in_addr)),
+ /* ICE_FLOW_FIELD_IDX_IPV4_DA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, sizeof(struct in_addr)),
+ /* ICE_FLOW_FIELD_IDX_IPV6_SA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, sizeof(struct in6_addr)),
+ /* ICE_FLOW_FIELD_IDX_IPV6_DA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, sizeof(struct in6_addr)),
+ /* Transport */
+ /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, sizeof(__be16)),
+ /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, sizeof(__be16)),
+ /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, sizeof(__be16)),
+ /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, sizeof(__be16)),
+ /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, sizeof(__be16)),
+ /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)),
+
+};
+
+/* Bitmaps indicating relevant packet types for a particular protocol header
+ *
+ * Packet types for packets with an Outer/First/Single IPv4 header
+ */
+static const u32 ice_ptypes_ipv4_ofos[] = {
+ 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last IPv4 header */
+static const u32 ice_ptypes_ipv4_il[] = {
+ 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
+ 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outer/First/Single IPv6 header */
+static const u32 ice_ptypes_ipv6_ofos[] = {
+ 0x00000000, 0x00000000, 0x77000000, 0x10002000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last IPv6 header */
+static const u32 ice_ptypes_ipv6_il[] = {
+ 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
+ 0x00000770, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* UDP Packet types for non-tunneled packets or tunneled
+ * packets with inner UDP.
+ */
+static const u32 ice_ptypes_udp_il[] = {
+ 0x81000000, 0x20204040, 0x04000010, 0x80810102,
+ 0x00000040, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last TCP header */
+static const u32 ice_ptypes_tcp_il[] = {
+ 0x04000000, 0x80810102, 0x10000040, 0x02040408,
+ 0x00000102, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last SCTP header */
+static const u32 ice_ptypes_sctp_il[] = {
+ 0x08000000, 0x01020204, 0x20000081, 0x04080810,
+ 0x00000204, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Manage parameters and info. used during the creation of a flow profile */
+struct ice_flow_prof_params {
+ enum ice_block blk;
+ u16 entry_length; /* # of bytes formatted entry will require */
+ u8 es_cnt;
+ struct ice_flow_prof *prof;
+
+ /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
+ * This will give us the direction flags.
+ */
+ struct ice_fv_word es[ICE_MAX_FV_WORDS];
+ DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX);
+};
+
+#define ICE_FLOW_SEG_HDRS_L3_MASK \
+ (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
+#define ICE_FLOW_SEG_HDRS_L4_MASK \
+ (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
+
+/**
+ * ice_flow_val_hdrs - validates packet segments for valid protocol headers
+ * @segs: array of one or more packet segments that describe the flow
+ * @segs_cnt: number of packet segments provided
+ */
+static enum ice_status
+ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
+{
+ u8 i;
+
+ for (i = 0; i < segs_cnt; i++) {
+ /* Multiple L3 headers */
+ if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
+ !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
+ return ICE_ERR_PARAM;
+
+ /* Multiple L4 headers */
+ if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
+ !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
+ return ICE_ERR_PARAM;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
+ * @params: information about the flow to be processed
+ *
+ * This function identifies the packet types associated with the protocol
+ * headers being present in packet segments of the specified flow profile.
+ */
+static enum ice_status
+ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
+{
+ struct ice_flow_prof *prof;
+ u8 i;
+
+ memset(params->ptypes, 0xff, sizeof(params->ptypes));
+
+ prof = params->prof;
+
+ for (i = 0; i < params->prof->segs_cnt; i++) {
+ const unsigned long *src;
+ u32 hdrs;
+
+ hdrs = prof->segs[i].hdrs;
+
+ if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
+ src = !i ? (const unsigned long *)ice_ptypes_ipv4_ofos :
+ (const unsigned long *)ice_ptypes_ipv4_il;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
+ src = !i ? (const unsigned long *)ice_ptypes_ipv6_ofos :
+ (const unsigned long *)ice_ptypes_ipv6_il;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ }
+
+ if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
+ src = (const unsigned long *)ice_ptypes_udp_il;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
+ bitmap_and(params->ptypes, params->ptypes,
+ (const unsigned long *)ice_ptypes_tcp_il,
+ ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
+ src = (const unsigned long *)ice_ptypes_sctp_il;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
+ * @hw: pointer to the HW struct
+ * @params: information about the flow to be processed
+ * @seg: packet segment index of the field to be extracted
+ * @fld: ID of field to be extracted
+ *
+ * This function determines the protocol ID, offset, and size of the given
+ * field. It then allocates one or more extraction sequence entries for the
+ * given field, and fill the entries with protocol ID and offset information.
+ */
+static enum ice_status
+ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
+ u8 seg, enum ice_flow_field fld)
+{
+ enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
+ u8 fv_words = hw->blk[params->blk].es.fvw;
+ struct ice_flow_fld_info *flds;
+ u16 cnt, ese_bits, i;
+ u16 off;
+
+ flds = params->prof->segs[seg].fields;
+
+ switch (fld) {
+ case ICE_FLOW_FIELD_IDX_IPV4_SA:
+ case ICE_FLOW_FIELD_IDX_IPV4_DA:
+ prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
+ break;
+ case ICE_FLOW_FIELD_IDX_IPV6_SA:
+ case ICE_FLOW_FIELD_IDX_IPV6_DA:
+ prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
+ break;
+ case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
+ case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
+ prot_id = ICE_PROT_TCP_IL;
+ break;
+ case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
+ case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
+ prot_id = ICE_PROT_UDP_IL_OR_S;
+ break;
+ case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
+ case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
+ prot_id = ICE_PROT_SCTP_IL;
+ break;
+ default:
+ return ICE_ERR_NOT_IMPL;
+ }
+
+ /* Each extraction sequence entry is a word in size, and extracts a
+ * word-aligned offset from a protocol header.
+ */
+ ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
+
+ flds[fld].xtrct.prot_id = prot_id;
+ flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
+ ICE_FLOW_FV_EXTRACT_SZ;
+ flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
+ flds[fld].xtrct.idx = params->es_cnt;
+
+ /* Adjust the next field-entry index after accommodating the number of
+ * entries this field consumes
+ */
+ cnt = DIV_ROUND_UP(flds[fld].xtrct.disp + ice_flds_info[fld].size,
+ ese_bits);
+
+ /* Fill in the extraction sequence entries needed for this field */
+ off = flds[fld].xtrct.off;
+ for (i = 0; i < cnt; i++) {
+ u8 idx;
+
+ /* Make sure the number of extraction sequence required
+ * does not exceed the block's capability
+ */
+ if (params->es_cnt >= fv_words)
+ return ICE_ERR_MAX_LIMIT;
+
+ /* some blocks require a reversed field vector layout */
+ if (hw->blk[params->blk].es.reverse)
+ idx = fv_words - params->es_cnt - 1;
+ else
+ idx = params->es_cnt;
+
+ params->es[idx].prot_id = prot_id;
+ params->es[idx].off = off;
+ params->es_cnt++;
+
+ off += ICE_FLOW_FV_EXTRACT_SZ;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
+ * @hw: pointer to the HW struct
+ * @params: information about the flow to be processed
+ *
+ * This function iterates through all matched fields in the given segments, and
+ * creates an extraction sequence for the fields.
+ */
+static enum ice_status
+ice_flow_create_xtrct_seq(struct ice_hw *hw,
+ struct ice_flow_prof_params *params)
+{
+ struct ice_flow_prof *prof = params->prof;
+ enum ice_status status = 0;
+ u8 i;
+
+ for (i = 0; i < prof->segs_cnt; i++) {
+ u8 j;
+
+ for_each_set_bit(j, (unsigned long *)&prof->segs[i].match,
+ ICE_FLOW_FIELD_IDX_MAX) {
+ status = ice_flow_xtract_fld(hw, params, i,
+ (enum ice_flow_field)j);
+ if (status)
+ return status;
+ }
+ }
+
+ return status;
+}
+
+/**
+ * ice_flow_proc_segs - process all packet segments associated with a profile
+ * @hw: pointer to the HW struct
+ * @params: information about the flow to be processed
+ */
+static enum ice_status
+ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
+{
+ enum ice_status status;
+
+ status = ice_flow_proc_seg_hdrs(params);
+ if (status)
+ return status;
+
+ status = ice_flow_create_xtrct_seq(hw, params);
+ if (status)
+ return status;
+
+ switch (params->blk) {
+ case ICE_BLK_RSS:
+ /* Only header information is provided for RSS configuration.
+ * No further processing is needed.
+ */
+ status = 0;
+ break;
+ default:
+ return ICE_ERR_NOT_IMPL;
+ }
+
+ return status;
+}
+
+#define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
+#define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
+#define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
+
+/**
+ * ice_flow_find_prof_conds - Find a profile matching headers and conditions
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @dir: flow direction
+ * @segs: array of one or more packet segments that describe the flow
+ * @segs_cnt: number of packet segments provided
+ * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
+ * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
+ */
+static struct ice_flow_prof *
+ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
+ enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
+ u8 segs_cnt, u16 vsi_handle, u32 conds)
+{
+ struct ice_flow_prof *p, *prof = NULL;
+
+ mutex_lock(&hw->fl_profs_locks[blk]);
+ list_for_each_entry(p, &hw->fl_profs[blk], l_entry)
+ if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
+ segs_cnt && segs_cnt == p->segs_cnt) {
+ u8 i;
+
+ /* Check for profile-VSI association if specified */
+ if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
+ ice_is_vsi_valid(hw, vsi_handle) &&
+ !test_bit(vsi_handle, p->vsis))
+ continue;
+
+ /* Protocol headers must be checked. Matched fields are
+ * checked if specified.
+ */
+ for (i = 0; i < segs_cnt; i++)
+ if (segs[i].hdrs != p->segs[i].hdrs ||
+ ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
+ segs[i].match != p->segs[i].match))
+ break;
+
+ /* A match is found if all segments are matched */
+ if (i == segs_cnt) {
+ prof = p;
+ break;
+ }
+ }
+ mutex_unlock(&hw->fl_profs_locks[blk]);
+
+ return prof;
+}
+
+/**
+ * ice_flow_find_prof_id - Look up a profile with given profile ID
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @prof_id: unique ID to identify this flow profile
+ */
+static struct ice_flow_prof *
+ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
+{
+ struct ice_flow_prof *p;
+
+ list_for_each_entry(p, &hw->fl_profs[blk], l_entry)
+ if (p->id == prof_id)
+ return p;
+
+ return NULL;
+}
+
+/**
+ * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @dir: flow direction
+ * @prof_id: unique ID to identify this flow profile
+ * @segs: array of one or more packet segments that describe the flow
+ * @segs_cnt: number of packet segments provided
+ * @prof: stores the returned flow profile added
+ *
+ * Assumption: the caller has acquired the lock to the profile list
+ */
+static enum ice_status
+ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
+ enum ice_flow_dir dir, u64 prof_id,
+ struct ice_flow_seg_info *segs, u8 segs_cnt,
+ struct ice_flow_prof **prof)
+{
+ struct ice_flow_prof_params params;
+ enum ice_status status;
+ u8 i;
+
+ if (!prof)
+ return ICE_ERR_BAD_PTR;
+
+ memset(&params, 0, sizeof(params));
+ params.prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params.prof),
+ GFP_KERNEL);
+ if (!params.prof)
+ return ICE_ERR_NO_MEMORY;
+
+ /* initialize extraction sequence to all invalid (0xff) */
+ for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
+ params.es[i].prot_id = ICE_PROT_INVALID;
+ params.es[i].off = ICE_FV_OFFSET_INVAL;
+ }
+
+ params.blk = blk;
+ params.prof->id = prof_id;
+ params.prof->dir = dir;
+ params.prof->segs_cnt = segs_cnt;
+
+ /* Make a copy of the segments that need to be persistent in the flow
+ * profile instance
+ */
+ for (i = 0; i < segs_cnt; i++)
+ memcpy(&params.prof->segs[i], &segs[i], sizeof(*segs));
+
+ status = ice_flow_proc_segs(hw, &params);
+ if (status) {
+ ice_debug(hw, ICE_DBG_FLOW,
+ "Error processing a flow's packet segments\n");
+ goto out;
+ }
+
+ /* Add a HW profile for this flow profile */
+ status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes, params.es);
+ if (status) {
+ ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&params.prof->entries);
+ mutex_init(&params.prof->entries_lock);
+ *prof = params.prof;
+
+out:
+ if (status)
+ devm_kfree(ice_hw_to_dev(hw), params.prof);
+
+ return status;
+}
+
+/**
+ * ice_flow_rem_prof_sync - remove a flow profile
+ * @hw: pointer to the hardware structure
+ * @blk: classification stage
+ * @prof: pointer to flow profile to remove
+ *
+ * Assumption: the caller has acquired the lock to the profile list
+ */
+static enum ice_status
+ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
+ struct ice_flow_prof *prof)
+{
+ enum ice_status status;
+
+ /* Remove all hardware profiles associated with this flow profile */
+ status = ice_rem_prof(hw, blk, prof->id);
+ if (!status) {
+ list_del(&prof->l_entry);
+ mutex_destroy(&prof->entries_lock);
+ devm_kfree(ice_hw_to_dev(hw), prof);
+ }
+
+ return status;
+}
+
+/**
+ * ice_flow_assoc_prof - associate a VSI with a flow profile
+ * @hw: pointer to the hardware structure
+ * @blk: classification stage
+ * @prof: pointer to flow profile
+ * @vsi_handle: software VSI handle
+ *
+ * Assumption: the caller has acquired the lock to the profile list
+ * and the software VSI handle has been validated
+ */
+static enum ice_status
+ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
+ struct ice_flow_prof *prof, u16 vsi_handle)
+{
+ enum ice_status status = 0;
+
+ if (!test_bit(vsi_handle, prof->vsis)) {
+ status = ice_add_prof_id_flow(hw, blk,
+ ice_get_hw_vsi_num(hw,
+ vsi_handle),
+ prof->id);
+ if (!status)
+ set_bit(vsi_handle, prof->vsis);
+ else
+ ice_debug(hw, ICE_DBG_FLOW,
+ "HW profile add failed, %d\n",
+ status);
+ }
+
+ return status;
+}
+
+/**
+ * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
+ * @hw: pointer to the hardware structure
+ * @blk: classification stage
+ * @prof: pointer to flow profile
+ * @vsi_handle: software VSI handle
+ *
+ * Assumption: the caller has acquired the lock to the profile list
+ * and the software VSI handle has been validated
+ */
+static enum ice_status
+ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
+ struct ice_flow_prof *prof, u16 vsi_handle)
+{
+ enum ice_status status = 0;
+
+ if (test_bit(vsi_handle, prof->vsis)) {
+ status = ice_rem_prof_id_flow(hw, blk,
+ ice_get_hw_vsi_num(hw,
+ vsi_handle),
+ prof->id);
+ if (!status)
+ clear_bit(vsi_handle, prof->vsis);
+ else
+ ice_debug(hw, ICE_DBG_FLOW,
+ "HW profile remove failed, %d\n",
+ status);
+ }
+
+ return status;
+}
+
+/**
+ * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @dir: flow direction
+ * @prof_id: unique ID to identify this flow profile
+ * @segs: array of one or more packet segments that describe the flow
+ * @segs_cnt: number of packet segments provided
+ * @prof: stores the returned flow profile added
+ */
+static enum ice_status
+ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
+ u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
+ struct ice_flow_prof **prof)
+{
+ enum ice_status status;
+
+ if (segs_cnt > ICE_FLOW_SEG_MAX)
+ return ICE_ERR_MAX_LIMIT;
+
+ if (!segs_cnt)
+ return ICE_ERR_PARAM;
+
+ if (!segs)
+ return ICE_ERR_BAD_PTR;
+
+ status = ice_flow_val_hdrs(segs, segs_cnt);
+ if (status)
+ return status;
+
+ mutex_lock(&hw->fl_profs_locks[blk]);
+
+ status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
+ prof);
+ if (!status)
+ list_add(&(*prof)->l_entry, &hw->fl_profs[blk]);
+
+ mutex_unlock(&hw->fl_profs_locks[blk]);
+
+ return status;
+}
+
+/**
+ * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
+ * @hw: pointer to the HW struct
+ * @blk: the block for which the flow profile is to be removed
+ * @prof_id: unique ID of the flow profile to be removed
+ */
+static enum ice_status
+ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
+{
+ struct ice_flow_prof *prof;
+ enum ice_status status;
+
+ mutex_lock(&hw->fl_profs_locks[blk]);
+
+ prof = ice_flow_find_prof_id(hw, blk, prof_id);
+ if (!prof) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto out;
+ }
+
+ /* prof becomes invalid after the call */
+ status = ice_flow_rem_prof_sync(hw, blk, prof);
+
+out:
+ mutex_unlock(&hw->fl_profs_locks[blk]);
+
+ return status;
+}
+
+/**
+ * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
+ * @seg: packet segment the field being set belongs to
+ * @fld: field to be set
+ * @type: type of the field
+ * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
+ * entry's input buffer
+ * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
+ * input buffer
+ * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
+ * entry's input buffer
+ *
+ * This helper function stores information of a field being matched, including
+ * the type of the field and the locations of the value to match, the mask, and
+ * and the upper-bound value in the start of the input buffer for a flow entry.
+ * This function should only be used for fixed-size data structures.
+ *
+ * This function also opportunistically determines the protocol headers to be
+ * present based on the fields being set. Some fields cannot be used alone to
+ * determine the protocol headers present. Sometimes, fields for particular
+ * protocol headers are not matched. In those cases, the protocol headers
+ * must be explicitly set.
+ */
+static void
+ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
+ enum ice_flow_fld_match_type type, u16 val_loc,
+ u16 mask_loc, u16 last_loc)
+{
+ u64 bit = BIT_ULL(fld);
+
+ seg->match |= bit;
+ if (type == ICE_FLOW_FLD_TYPE_RANGE)
+ seg->range |= bit;
+
+ seg->fields[fld].type = type;
+ seg->fields[fld].src.val = val_loc;
+ seg->fields[fld].src.mask = mask_loc;
+ seg->fields[fld].src.last = last_loc;
+
+ ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
+}
+
+/**
+ * ice_flow_set_fld - specifies locations of field from entry's input buffer
+ * @seg: packet segment the field being set belongs to
+ * @fld: field to be set
+ * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
+ * entry's input buffer
+ * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
+ * input buffer
+ * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
+ * entry's input buffer
+ * @range: indicate if field being matched is to be in a range
+ *
+ * This function specifies the locations, in the form of byte offsets from the
+ * start of the input buffer for a flow entry, from where the value to match,
+ * the mask value, and upper value can be extracted. These locations are then
+ * stored in the flow profile. When adding a flow entry associated with the
+ * flow profile, these locations will be used to quickly extract the values and
+ * create the content of a match entry. This function should only be used for
+ * fixed-size data structures.
+ */
+static void
+ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
+ u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
+{
+ enum ice_flow_fld_match_type t = range ?
+ ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
+
+ ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
+}
+
+#define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
+ (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
+
+#define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
+ (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
+
+#define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
+ (ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
+ ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
+
+/**
+ * ice_flow_set_rss_seg_info - setup packet segments for RSS
+ * @segs: pointer to the flow field segment(s)
+ * @hash_fields: fields to be hashed on for the segment(s)
+ * @flow_hdr: protocol header fields within a packet segment
+ *
+ * Helper function to extract fields from hash bitmap and use flow
+ * header value to set flow field segment for further use in flow
+ * profile entry or removal.
+ */
+static enum ice_status
+ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
+ u32 flow_hdr)
+{
+ u64 val;
+ u8 i;
+
+ for_each_set_bit(i, (unsigned long *)&hash_fields,
+ ICE_FLOW_FIELD_IDX_MAX)
+ ice_flow_set_fld(segs, (enum ice_flow_field)i,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+
+ ICE_FLOW_SET_HDRS(segs, flow_hdr);
+
+ if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS)
+ return ICE_ERR_PARAM;
+
+ val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
+ if (val && !is_power_of_2(val))
+ return ICE_ERR_CFG;
+
+ val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
+ if (val && !is_power_of_2(val))
+ return ICE_ERR_CFG;
+
+ return 0;
+}
+
+/**
+ * ice_rem_vsi_rss_list - remove VSI from RSS list
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ *
+ * Remove the VSI from all RSS configurations in the list.
+ */
+void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
+{
+ struct ice_rss_cfg *r, *tmp;
+
+ if (list_empty(&hw->rss_list_head))
+ return;
+
+ mutex_lock(&hw->rss_locks);
+ list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry)
+ if (test_and_clear_bit(vsi_handle, r->vsis))
+ if (bitmap_empty(r->vsis, ICE_MAX_VSI)) {
+ list_del(&r->l_entry);
+ devm_kfree(ice_hw_to_dev(hw), r);
+ }
+ mutex_unlock(&hw->rss_locks);
+}
+
+/**
+ * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ *
+ * This function will iterate through all flow profiles and disassociate
+ * the VSI from that profile. If the flow profile has no VSIs it will
+ * be removed.
+ */
+enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
+{
+ const enum ice_block blk = ICE_BLK_RSS;
+ struct ice_flow_prof *p, *t;
+ enum ice_status status = 0;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ if (list_empty(&hw->fl_profs[blk]))
+ return 0;
+
+ mutex_lock(&hw->fl_profs_locks[blk]);
+ list_for_each_entry_safe(p, t, &hw->fl_profs[blk], l_entry)
+ if (test_bit(vsi_handle, p->vsis)) {
+ status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
+ if (status)
+ break;
+
+ if (bitmap_empty(p->vsis, ICE_MAX_VSI)) {
+ status = ice_flow_rem_prof_sync(hw, blk, p);
+ if (status)
+ break;
+ }
+ }
+ mutex_unlock(&hw->fl_profs_locks[blk]);
+
+ return status;
+}
+
+/**
+ * ice_rem_rss_list - remove RSS configuration from list
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @prof: pointer to flow profile
+ *
+ * Assumption: lock has already been acquired for RSS list
+ */
+static void
+ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
+{
+ struct ice_rss_cfg *r, *tmp;
+
+ /* Search for RSS hash fields associated to the VSI that match the
+ * hash configurations associated to the flow profile. If found
+ * remove from the RSS entry list of the VSI context and delete entry.
+ */
+ list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry)
+ if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
+ r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
+ clear_bit(vsi_handle, r->vsis);
+ if (bitmap_empty(r->vsis, ICE_MAX_VSI)) {
+ list_del(&r->l_entry);
+ devm_kfree(ice_hw_to_dev(hw), r);
+ }
+ return;
+ }
+}
+
+/**
+ * ice_add_rss_list - add RSS configuration to list
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @prof: pointer to flow profile
+ *
+ * Assumption: lock has already been acquired for RSS list
+ */
+static enum ice_status
+ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
+{
+ struct ice_rss_cfg *r, *rss_cfg;
+
+ list_for_each_entry(r, &hw->rss_list_head, l_entry)
+ if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
+ r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
+ set_bit(vsi_handle, r->vsis);
+ return 0;
+ }
+
+ rss_cfg = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rss_cfg),
+ GFP_KERNEL);
+ if (!rss_cfg)
+ return ICE_ERR_NO_MEMORY;
+
+ rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
+ rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
+ set_bit(vsi_handle, rss_cfg->vsis);
+
+ list_add_tail(&rss_cfg->l_entry, &hw->rss_list_head);
+
+ return 0;
+}
+
+#define ICE_FLOW_PROF_HASH_S 0
+#define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
+#define ICE_FLOW_PROF_HDR_S 32
+#define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
+#define ICE_FLOW_PROF_ENCAP_S 63
+#define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
+
+#define ICE_RSS_OUTER_HEADERS 1
+
+/* Flow profile ID format:
+ * [0:31] - Packet match fields
+ * [32:62] - Protocol header
+ * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
+ */
+#define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
+ (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
+ (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
+ ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
+
+/**
+ * ice_add_rss_cfg_sync - add an RSS configuration
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
+ * @addl_hdrs: protocol header fields
+ * @segs_cnt: packet segment count
+ *
+ * Assumption: lock has already been acquired for RSS list
+ */
+static enum ice_status
+ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
+ u32 addl_hdrs, u8 segs_cnt)
+{
+ const enum ice_block blk = ICE_BLK_RSS;
+ struct ice_flow_prof *prof = NULL;
+ struct ice_flow_seg_info *segs;
+ enum ice_status status;
+
+ if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
+ return ICE_ERR_PARAM;
+
+ segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
+ if (!segs)
+ return ICE_ERR_NO_MEMORY;
+
+ /* Construct the packet segment info from the hashed fields */
+ status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
+ addl_hdrs);
+ if (status)
+ goto exit;
+
+ /* Search for a flow profile that has matching headers, hash fields
+ * and has the input VSI associated to it. If found, no further
+ * operations required and exit.
+ */
+ prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
+ vsi_handle,
+ ICE_FLOW_FIND_PROF_CHK_FLDS |
+ ICE_FLOW_FIND_PROF_CHK_VSI);
+ if (prof)
+ goto exit;
+
+ /* Check if a flow profile exists with the same protocol headers and
+ * associated with the input VSI. If so disassociate the VSI from
+ * this profile. The VSI will be added to a new profile created with
+ * the protocol header and new hash field configuration.
+ */
+ prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
+ vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
+ if (prof) {
+ status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
+ if (!status)
+ ice_rem_rss_list(hw, vsi_handle, prof);
+ else
+ goto exit;
+
+ /* Remove profile if it has no VSIs associated */
+ if (bitmap_empty(prof->vsis, ICE_MAX_VSI)) {
+ status = ice_flow_rem_prof(hw, blk, prof->id);
+ if (status)
+ goto exit;
+ }
+ }
+
+ /* Search for a profile that has same match fields only. If this
+ * exists then associate the VSI to this profile.
+ */
+ prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
+ vsi_handle,
+ ICE_FLOW_FIND_PROF_CHK_FLDS);
+ if (prof) {
+ status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
+ if (!status)
+ status = ice_add_rss_list(hw, vsi_handle, prof);
+ goto exit;
+ }
+
+ /* Create a new flow profile with generated profile and packet
+ * segment information.
+ */
+ status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
+ ICE_FLOW_GEN_PROFID(hashed_flds,
+ segs[segs_cnt - 1].hdrs,
+ segs_cnt),
+ segs, segs_cnt, &prof);
+ if (status)
+ goto exit;
+
+ status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
+ /* If association to a new flow profile failed then this profile can
+ * be removed.
+ */
+ if (status) {
+ ice_flow_rem_prof(hw, blk, prof->id);
+ goto exit;
+ }
+
+ status = ice_add_rss_list(hw, vsi_handle, prof);
+
+exit:
+ kfree(segs);
+ return status;
+}
+
+/**
+ * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
+ * @addl_hdrs: protocol header fields
+ *
+ * This function will generate a flow profile based on fields associated with
+ * the input fields to hash on, the flow type and use the VSI number to add
+ * a flow entry to the profile.
+ */
+enum ice_status
+ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
+ u32 addl_hdrs)
+{
+ enum ice_status status;
+
+ if (hashed_flds == ICE_HASH_INVALID ||
+ !ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ mutex_lock(&hw->rss_locks);
+ status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
+ ICE_RSS_OUTER_HEADERS);
+ mutex_unlock(&hw->rss_locks);
+
+ return status;
+}
+
+/* Mapping of AVF hash bit fields to an L3-L4 hash combination.
+ * As the ice_flow_avf_hdr_field represent individual bit shifts in a hash,
+ * convert its values to their appropriate flow L3, L4 values.
+ */
+#define ICE_FLOW_AVF_RSS_IPV4_MASKS \
+ (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4))
+#define ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS \
+ (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP))
+#define ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS \
+ (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP))
+#define ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS \
+ (ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS | \
+ ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP))
+
+#define ICE_FLOW_AVF_RSS_IPV6_MASKS \
+ (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6))
+#define ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS \
+ (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP))
+#define ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS \
+ (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP))
+#define ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS \
+ (ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS | \
+ ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP))
+
+/**
+ * ice_add_avf_rss_cfg - add an RSS configuration for AVF driver
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure
+ *
+ * This function will take the hash bitmap provided by the AVF driver via a
+ * message, convert it to ICE-compatible values, and configure RSS flow
+ * profiles.
+ */
+enum ice_status
+ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
+{
+ enum ice_status status = 0;
+ u64 hash_flds;
+
+ if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
+ !ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ /* Make sure no unsupported bits are specified */
+ if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS |
+ ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS))
+ return ICE_ERR_CFG;
+
+ hash_flds = avf_hash;
+
+ /* Always create an L3 RSS configuration for any L4 RSS configuration */
+ if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS)
+ hash_flds |= ICE_FLOW_AVF_RSS_IPV4_MASKS;
+
+ if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS)
+ hash_flds |= ICE_FLOW_AVF_RSS_IPV6_MASKS;
+
+ /* Create the corresponding RSS configuration for each valid hash bit */
+ while (hash_flds) {
+ u64 rss_hash = ICE_HASH_INVALID;
+
+ if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS) {
+ if (hash_flds & ICE_FLOW_AVF_RSS_IPV4_MASKS) {
+ rss_hash = ICE_FLOW_HASH_IPV4;
+ hash_flds &= ~ICE_FLOW_AVF_RSS_IPV4_MASKS;
+ } else if (hash_flds &
+ ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS) {
+ rss_hash = ICE_FLOW_HASH_IPV4 |
+ ICE_FLOW_HASH_TCP_PORT;
+ hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS;
+ } else if (hash_flds &
+ ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS) {
+ rss_hash = ICE_FLOW_HASH_IPV4 |
+ ICE_FLOW_HASH_UDP_PORT;
+ hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS;
+ } else if (hash_flds &
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) {
+ rss_hash = ICE_FLOW_HASH_IPV4 |
+ ICE_FLOW_HASH_SCTP_PORT;
+ hash_flds &=
+ ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP);
+ }
+ } else if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) {
+ if (hash_flds & ICE_FLOW_AVF_RSS_IPV6_MASKS) {
+ rss_hash = ICE_FLOW_HASH_IPV6;
+ hash_flds &= ~ICE_FLOW_AVF_RSS_IPV6_MASKS;
+ } else if (hash_flds &
+ ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS) {
+ rss_hash = ICE_FLOW_HASH_IPV6 |
+ ICE_FLOW_HASH_TCP_PORT;
+ hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS;
+ } else if (hash_flds &
+ ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS) {
+ rss_hash = ICE_FLOW_HASH_IPV6 |
+ ICE_FLOW_HASH_UDP_PORT;
+ hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS;
+ } else if (hash_flds &
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) {
+ rss_hash = ICE_FLOW_HASH_IPV6 |
+ ICE_FLOW_HASH_SCTP_PORT;
+ hash_flds &=
+ ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP);
+ }
+ }
+
+ if (rss_hash == ICE_HASH_INVALID)
+ return ICE_ERR_OUT_OF_RANGE;
+
+ status = ice_add_rss_cfg(hw, vsi_handle, rss_hash,
+ ICE_FLOW_SEG_HDR_NONE);
+ if (status)
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * ice_replay_rss_cfg - replay RSS configurations associated with VSI
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ */
+enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
+{
+ enum ice_status status = 0;
+ struct ice_rss_cfg *r;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ mutex_lock(&hw->rss_locks);
+ list_for_each_entry(r, &hw->rss_list_head, l_entry) {
+ if (test_bit(vsi_handle, r->vsis)) {
+ status = ice_add_rss_cfg_sync(hw, vsi_handle,
+ r->hashed_flds,
+ r->packet_hdr,
+ ICE_RSS_OUTER_HEADERS);
+ if (status)
+ break;
+ }
+ }
+ mutex_unlock(&hw->rss_locks);
+
+ return status;
+}
+
+/**
+ * ice_get_rss_cfg - returns hashed fields for the given header types
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @hdrs: protocol header type
+ *
+ * This function will return the match fields of the first instance of flow
+ * profile having the given header types and containing input VSI
+ */
+u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
+{
+ struct ice_rss_cfg *r, *rss_cfg = NULL;
+
+ /* verify if the protocol header is non zero and VSI is valid */
+ if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_HASH_INVALID;
+
+ mutex_lock(&hw->rss_locks);
+ list_for_each_entry(r, &hw->rss_list_head, l_entry)
+ if (test_bit(vsi_handle, r->vsis) &&
+ r->packet_hdr == hdrs) {
+ rss_cfg = r;
+ break;
+ }
+ mutex_unlock(&hw->rss_locks);
+
+ return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
new file mode 100644
index 000000000000..5558627bd5eb
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Intel Corporation. */
+
+#ifndef _ICE_FLOW_H_
+#define _ICE_FLOW_H_
+
+#define ICE_FLOW_ENTRY_HANDLE_INVAL 0
+#define ICE_FLOW_FLD_OFF_INVAL 0xffff
+
+/* Generate flow hash field from flow field type(s) */
+#define ICE_FLOW_HASH_IPV4 \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA))
+#define ICE_FLOW_HASH_IPV6 \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA))
+#define ICE_FLOW_HASH_TCP_PORT \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) | \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT))
+#define ICE_FLOW_HASH_UDP_PORT \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) | \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT))
+#define ICE_FLOW_HASH_SCTP_PORT \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) | \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT))
+
+#define ICE_HASH_INVALID 0
+#define ICE_HASH_TCP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_TCP_PORT)
+#define ICE_HASH_TCP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_TCP_PORT)
+#define ICE_HASH_UDP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT)
+#define ICE_HASH_UDP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT)
+
+/* Protocol header fields within a packet segment. A segment consists of one or
+ * more protocol headers that make up a logical group of protocol headers. Each
+ * logical group of protocol headers encapsulates or is encapsulated using/by
+ * tunneling or encapsulation protocols for network virtualization such as GRE,
+ * VxLAN, etc.
+ */
+enum ice_flow_seg_hdr {
+ ICE_FLOW_SEG_HDR_NONE = 0x00000000,
+ ICE_FLOW_SEG_HDR_IPV4 = 0x00000004,
+ ICE_FLOW_SEG_HDR_IPV6 = 0x00000008,
+ ICE_FLOW_SEG_HDR_TCP = 0x00000040,
+ ICE_FLOW_SEG_HDR_UDP = 0x00000080,
+ ICE_FLOW_SEG_HDR_SCTP = 0x00000100,
+};
+
+enum ice_flow_field {
+ /* L3 */
+ ICE_FLOW_FIELD_IDX_IPV4_SA,
+ ICE_FLOW_FIELD_IDX_IPV4_DA,
+ ICE_FLOW_FIELD_IDX_IPV6_SA,
+ ICE_FLOW_FIELD_IDX_IPV6_DA,
+ /* L4 */
+ ICE_FLOW_FIELD_IDX_TCP_SRC_PORT,
+ ICE_FLOW_FIELD_IDX_TCP_DST_PORT,
+ ICE_FLOW_FIELD_IDX_UDP_SRC_PORT,
+ ICE_FLOW_FIELD_IDX_UDP_DST_PORT,
+ ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
+ ICE_FLOW_FIELD_IDX_SCTP_DST_PORT,
+ /* The total number of enums must not exceed 64 */
+ ICE_FLOW_FIELD_IDX_MAX
+};
+
+/* Flow headers and fields for AVF support */
+enum ice_flow_avf_hdr_field {
+ /* Values 0 - 28 are reserved for future use */
+ ICE_AVF_FLOW_FIELD_INVALID = 0,
+ ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP = 29,
+ ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP,
+ ICE_AVF_FLOW_FIELD_IPV4_UDP,
+ ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK,
+ ICE_AVF_FLOW_FIELD_IPV4_TCP,
+ ICE_AVF_FLOW_FIELD_IPV4_SCTP,
+ ICE_AVF_FLOW_FIELD_IPV4_OTHER,
+ ICE_AVF_FLOW_FIELD_FRAG_IPV4,
+ /* Values 37-38 are reserved */
+ ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP = 39,
+ ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP,
+ ICE_AVF_FLOW_FIELD_IPV6_UDP,
+ ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK,
+ ICE_AVF_FLOW_FIELD_IPV6_TCP,
+ ICE_AVF_FLOW_FIELD_IPV6_SCTP,
+ ICE_AVF_FLOW_FIELD_IPV6_OTHER,
+ ICE_AVF_FLOW_FIELD_FRAG_IPV6,
+ ICE_AVF_FLOW_FIELD_RSVD47,
+ ICE_AVF_FLOW_FIELD_FCOE_OX,
+ ICE_AVF_FLOW_FIELD_FCOE_RX,
+ ICE_AVF_FLOW_FIELD_FCOE_OTHER,
+ /* Values 51-62 are reserved */
+ ICE_AVF_FLOW_FIELD_L2_PAYLOAD = 63,
+ ICE_AVF_FLOW_FIELD_MAX
+};
+
+/* Supported RSS offloads This macro is defined to support
+ * VIRTCHNL_OP_GET_RSS_HENA_CAPS ops. PF driver sends the RSS hardware
+ * capabilities to the caller of this ops.
+ */
+#define ICE_DEFAULT_RSS_HENA ( \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP))
+
+enum ice_flow_dir {
+ ICE_FLOW_RX = 0x02,
+};
+
+enum ice_flow_priority {
+ ICE_FLOW_PRIO_LOW,
+ ICE_FLOW_PRIO_NORMAL,
+ ICE_FLOW_PRIO_HIGH
+};
+
+#define ICE_FLOW_SEG_MAX 2
+#define ICE_FLOW_FV_EXTRACT_SZ 2
+
+#define ICE_FLOW_SET_HDRS(seg, val) ((seg)->hdrs |= (u32)(val))
+
+struct ice_flow_seg_xtrct {
+ u8 prot_id; /* Protocol ID of extracted header field */
+ u16 off; /* Starting offset of the field in header in bytes */
+ u8 idx; /* Index of FV entry used */
+ u8 disp; /* Displacement of field in bits fr. FV entry's start */
+};
+
+enum ice_flow_fld_match_type {
+ ICE_FLOW_FLD_TYPE_REG, /* Value, mask */
+ ICE_FLOW_FLD_TYPE_RANGE, /* Value, mask, last (upper bound) */
+ ICE_FLOW_FLD_TYPE_PREFIX, /* IP address, prefix, size of prefix */
+ ICE_FLOW_FLD_TYPE_SIZE, /* Value, mask, size of match */
+};
+
+struct ice_flow_fld_loc {
+ /* Describe offsets of field information relative to the beginning of
+ * input buffer provided when adding flow entries.
+ */
+ u16 val; /* Offset where the value is located */
+ u16 mask; /* Offset where the mask/prefix value is located */
+ u16 last; /* Length or offset where the upper value is located */
+};
+
+struct ice_flow_fld_info {
+ enum ice_flow_fld_match_type type;
+ /* Location where to retrieve data from an input buffer */
+ struct ice_flow_fld_loc src;
+ /* Location where to put the data into the final entry buffer */
+ struct ice_flow_fld_loc entry;
+ struct ice_flow_seg_xtrct xtrct;
+};
+
+struct ice_flow_seg_info {
+ u32 hdrs; /* Bitmask indicating protocol headers present */
+ u64 match; /* Bitmask indicating header fields to be matched */
+ u64 range; /* Bitmask indicating header fields matched as ranges */
+
+ struct ice_flow_fld_info fields[ICE_FLOW_FIELD_IDX_MAX];
+};
+
+struct ice_flow_prof {
+ struct list_head l_entry;
+
+ u64 id;
+ enum ice_flow_dir dir;
+ u8 segs_cnt;
+
+ /* Keep track of flow entries associated with this flow profile */
+ struct mutex entries_lock;
+ struct list_head entries;
+
+ struct ice_flow_seg_info segs[ICE_FLOW_SEG_MAX];
+
+ /* software VSI handles referenced by this flow profile */
+ DECLARE_BITMAP(vsis, ICE_MAX_VSI);
+};
+
+struct ice_rss_cfg {
+ struct list_head l_entry;
+ /* bitmap of VSIs added to the RSS entry */
+ DECLARE_BITMAP(vsis, ICE_MAX_VSI);
+ u64 hashed_flds;
+ u32 packet_hdr;
+};
+
+enum ice_status ice_flow_rem_entry(struct ice_hw *hw, u64 entry_h);
+void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
+enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
+enum ice_status
+ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds);
+enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
+enum ice_status
+ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
+ u32 addl_hdrs);
+u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs);
+#endif /* _ICE_FLOW_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index e8f32350fed2..6db3d0494127 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -60,15 +60,6 @@
#define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0)
#define GL_PREEXT_L2_PMASK0(_i) (0x0020F0FC + ((_i) * 4))
#define GL_PREEXT_L2_PMASK1(_i) (0x0020F108 + ((_i) * 4))
-#define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256))
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, 0)
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S 8
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M ICE_M(0x3F, 8)
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S 16
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M ICE_M(0x3F, 16)
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S 24
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M ICE_M(0x3F, 24)
#define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4))
#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_S 0
#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_M ICE_M(0xFF, 0)
@@ -276,8 +267,14 @@
#define GLNVM_GENS_SR_SIZE_S 5
#define GLNVM_GENS_SR_SIZE_M ICE_M(0x7, 5)
#define GLNVM_ULD 0x000B6008
+#define GLNVM_ULD_PCIER_DONE_M BIT(0)
+#define GLNVM_ULD_PCIER_DONE_1_M BIT(1)
#define GLNVM_ULD_CORER_DONE_M BIT(3)
#define GLNVM_ULD_GLOBR_DONE_M BIT(4)
+#define GLNVM_ULD_POR_DONE_M BIT(5)
+#define GLNVM_ULD_POR_DONE_1_M BIT(8)
+#define GLNVM_ULD_PCIER_DONE_2_M BIT(9)
+#define GLNVM_ULD_PE_DONE_M BIT(10)
#define GLPCI_CNF2 0x000BE004
#define GLPCI_CNF2_CACHELINE_SIZE_M BIT(1)
#define PF_FUNC_RID 0x0009E880
@@ -340,7 +337,6 @@
#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4))
#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
-#define PF_VT_PFALLOC_HIF 0x0009DD80
#define VSIQF_HKEY_MAX_INDEX 12
#define VSIQF_HLUT_MAX_INDEX 15
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 0997d352709b..878e125d8b42 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -199,6 +199,14 @@ enum ice_rxdid {
/* Receive Flex Descriptor Rx opcode values */
#define ICE_RX_OPC_MDID 0x01
+/* Receive Descriptor MDID values that access packet flags */
+enum ice_flex_mdid_pkt_flags {
+ ICE_RX_MDID_PKT_FLAGS_15_0 = 20,
+ ICE_RX_MDID_PKT_FLAGS_31_16,
+ ICE_RX_MDID_PKT_FLAGS_47_32,
+ ICE_RX_MDID_PKT_FLAGS_63_48,
+};
+
/* Receive Descriptor MDID values */
enum ice_flex_rx_mdid {
ICE_RX_MDID_FLOW_ID_LOWER = 5,
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index e7449248fab4..d974e2fa3e63 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -3,6 +3,7 @@
#include "ice.h"
#include "ice_base.h"
+#include "ice_flow.h"
#include "ice_lib.h"
#include "ice_dcb_lib.h"
@@ -116,8 +117,7 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
break;
default:
- dev_dbg(&vsi->back->pdev->dev,
- "Not setting number of Tx/Rx descriptors for VSI type %d\n",
+ dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n",
vsi->type);
break;
}
@@ -493,7 +493,28 @@ bool ice_is_safe_mode(struct ice_pf *pf)
}
/**
- * ice_rss_clean - Delete RSS related VSI structures that hold user inputs
+ * ice_vsi_clean_rss_flow_fld - Delete RSS configuration
+ * @vsi: the VSI being cleaned up
+ *
+ * This function deletes RSS input set for all flows that were configured
+ * for this VSI
+ */
+static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ enum ice_status status;
+
+ if (ice_is_safe_mode(pf))
+ return;
+
+ status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
+ if (status)
+ dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %d\n",
+ vsi->vsi_num, status);
+}
+
+/**
+ * ice_rss_clean - Delete RSS related VSI structures and configuration
* @vsi: the VSI being removed
*/
static void ice_rss_clean(struct ice_vsi *vsi)
@@ -507,6 +528,11 @@ static void ice_rss_clean(struct ice_vsi *vsi)
devm_kfree(dev, vsi->rss_hkey_user);
if (vsi->rss_lut_user)
devm_kfree(dev, vsi->rss_lut_user);
+
+ ice_vsi_clean_rss_flow_fld(vsi);
+ /* remove RSS replay list */
+ if (!ice_is_safe_mode(pf))
+ ice_rem_vsi_rss_list(&pf->hw, vsi->idx);
}
/**
@@ -697,7 +723,7 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
vsi->num_txq = tx_count;
if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
- dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
+ dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
/* since there is a chance that num_rxq could have been changed
* in the above for loop, make num_txq equal to num_rxq.
*/
@@ -817,12 +843,23 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
ctxt->info.valid_sections |=
cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
- /* Enable MAC Antispoof with new VSI being initialized or updated */
- if (vsi->type == ICE_VSI_VF && pf->vf[vsi->vf_id].spoofchk) {
+ /* enable/disable MAC and VLAN anti-spoof when spoofchk is on/off
+ * respectively
+ */
+ if (vsi->type == ICE_VSI_VF) {
ctxt->info.valid_sections |=
cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
- ctxt->info.sec_flags |=
- ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
+ if (pf->vf[vsi->vf_id].spoofchk) {
+ ctxt->info.sec_flags |=
+ ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
+ (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
+ } else {
+ ctxt->info.sec_flags &=
+ ~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
+ (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
+ }
}
/* Allow control frames out of main VSI */
@@ -891,8 +928,7 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
vsi->idx);
if (vsi->base_vector < 0) {
- dev_err(dev,
- "Failed to get tracking for %d vectors for VSI %d, err=%d\n",
+ dev_err(dev, "Failed to get tracking for %d vectors for VSI %d, err=%d\n",
num_q_vectors, vsi->vsi_num, vsi->base_vector);
return -ENOENT;
}
@@ -1076,6 +1112,115 @@ ice_vsi_cfg_rss_exit:
}
/**
+ * ice_vsi_set_vf_rss_flow_fld - Sets VF VSI RSS input set for different flows
+ * @vsi: VSI to be configured
+ *
+ * This function will only be called during the VF VSI setup. Upon successful
+ * completion of package download, this function will configure default RSS
+ * input sets for VF VSI.
+ */
+static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ enum ice_status status;
+ struct device *dev;
+
+ dev = ice_pf_to_dev(pf);
+ if (ice_is_safe_mode(pf)) {
+ dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
+ vsi->vsi_num);
+ return;
+ }
+
+ status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA);
+ if (status)
+ dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n",
+ vsi->vsi_num, status);
+}
+
+/**
+ * ice_vsi_set_rss_flow_fld - Sets RSS input set for different flows
+ * @vsi: VSI to be configured
+ *
+ * This function will only be called after successful download package call
+ * during initialization of PF. Since the downloaded package will erase the
+ * RSS section, this function will configure RSS input sets for different
+ * flow types. The last profile added has the highest priority, therefore 2
+ * tuple profiles (i.e. IPv4 src/dst) are added before 4 tuple profiles
+ * (i.e. IPv4 src/dst TCP src/dst port).
+ */
+static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
+{
+ u16 vsi_handle = vsi->idx, vsi_num = vsi->vsi_num;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ struct device *dev;
+
+ dev = ice_pf_to_dev(pf);
+ if (ice_is_safe_mode(pf)) {
+ dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
+ vsi_num);
+ return;
+ }
+ /* configure RSS for IPv4 with input set IP src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
+ ICE_FLOW_SEG_HDR_IPV4);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+
+ /* configure RSS for IPv6 with input set IPv6 src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
+ ICE_FLOW_SEG_HDR_IPV6);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+
+ /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV4,
+ ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+
+ /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV4,
+ ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+
+ /* configure RSS for sctp4 with input set IP src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
+ ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+
+ /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6,
+ ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+
+ /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV6,
+ ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+
+ /* configure RSS for sctp6 with input set IPv6 src/dst */
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
+ ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+}
+
+/**
* ice_add_mac_to_list - Add a MAC address filter entry to the list
* @vsi: the VSI to be forwarded to
* @add_list: pointer to the list which contains MAC filter entries
@@ -1085,8 +1230,9 @@ ice_vsi_cfg_rss_exit:
*
* Returns 0 on success or ENOMEM on failure.
*/
-int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
- const u8 *macaddr)
+int
+ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
+ const u8 *macaddr)
{
struct ice_fltr_list_entry *tmp;
struct ice_pf *pf = vsi->back;
@@ -1245,12 +1391,10 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
status = ice_remove_vlan(&pf->hw, &tmp_add_list);
if (status == ICE_ERR_DOES_NOT_EXIST) {
- dev_dbg(dev,
- "Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n",
+ dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n",
vid, vsi->vsi_num, status);
} else if (status) {
- dev_err(dev,
- "Error removing VLAN %d on vsi %i error: %d\n",
+ dev_err(dev, "Error removing VLAN %d on vsi %i error: %d\n",
vid, vsi->vsi_num, status);
err = -EIO;
}
@@ -1306,8 +1450,7 @@ setup_rings:
err = ice_setup_rx_ctx(vsi->rx_rings[i]);
if (err) {
- dev_err(&vsi->back->pdev->dev,
- "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
+ dev_err(ice_pf_to_dev(vsi->back), "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
i, err);
return err;
}
@@ -1476,7 +1619,7 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_err(&vsi->back->pdev->dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
ret = -EIO;
goto out;
@@ -1522,7 +1665,7 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_err(&vsi->back->pdev->dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
ena, status, hw->adminq.sq_last_status);
ret = -EIO;
goto out;
@@ -1636,22 +1779,14 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
ctxt->info = vsi->info;
- if (ena) {
- ctxt->info.sec_flags |=
- ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
- ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
+ if (ena)
ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
- } else {
- ctxt->info.sec_flags &=
- ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
- ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
+ else
ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
- }
if (!vlan_promisc)
ctxt->info.valid_sections =
- cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID |
- ICE_AQ_VSI_PROP_SW_VALID);
+ cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL);
if (status) {
@@ -1661,7 +1796,6 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
goto err_out;
}
- vsi->info.sec_flags = ctxt->info.sec_flags;
vsi->info.sw_flags2 = ctxt->info.sw_flags2;
kfree(ctxt);
@@ -1696,8 +1830,7 @@ ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi)
struct ice_q_vector *q_vector = vsi->q_vectors[i];
if (!q_vector) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to set reg_idx on q_vector %d VSI %d\n",
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to set reg_idx on q_vector %d VSI %d\n",
i, vsi->vsi_num);
goto clear_reg_idx;
}
@@ -1760,8 +1893,7 @@ ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule)
status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);
if (status)
- dev_err(dev,
- "Failure Adding or Removing Ethertype on VSI %i error: %d\n",
+ dev_err(dev, "Failure Adding or Removing Ethertype on VSI %i error: %d\n",
vsi->vsi_num, status);
ice_free_fltr_list(dev, &tmp_add_list);
@@ -1899,8 +2031,10 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
* receive traffic on first queue. Hence no need to capture
* return value
*/
- if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
ice_vsi_cfg_rss_lut_key(vsi);
+ ice_vsi_set_rss_flow_fld(vsi);
+ }
break;
case ICE_VSI_VF:
/* VF driver will take care of creating netdev for this type and
@@ -1924,8 +2058,10 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
* receive traffic on first queue. Hence no need to capture
* return value
*/
- if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
ice_vsi_cfg_rss_lut_key(vsi);
+ ice_vsi_set_vf_rss_flow_fld(vsi);
+ }
break;
case ICE_VSI_LB:
ret = ice_vsi_alloc_rings(vsi);
@@ -2242,8 +2378,7 @@ ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
return -EINVAL;
if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
- dev_err(ice_pf_to_dev(pf),
- "param err: needed=%d, num_entries = %d id=0x%04x\n",
+ dev_err(ice_pf_to_dev(pf), "param err: needed=%d, num_entries = %d id=0x%04x\n",
needed, res->num_entries, id);
return -EINVAL;
}
@@ -2402,6 +2537,97 @@ int ice_vsi_release(struct ice_vsi *vsi)
}
/**
+ * ice_vsi_rebuild_update_coalesce - set coalesce for a q_vector
+ * @q_vector: pointer to q_vector which is being updated
+ * @coalesce: pointer to array of struct with stored coalesce
+ *
+ * Set coalesce param in q_vector and update these parameters in HW.
+ */
+static void
+ice_vsi_rebuild_update_coalesce(struct ice_q_vector *q_vector,
+ struct ice_coalesce_stored *coalesce)
+{
+ struct ice_ring_container *rx_rc = &q_vector->rx;
+ struct ice_ring_container *tx_rc = &q_vector->tx;
+ struct ice_hw *hw = &q_vector->vsi->back->hw;
+
+ tx_rc->itr_setting = coalesce->itr_tx;
+ rx_rc->itr_setting = coalesce->itr_rx;
+
+ /* dynamic ITR values will be updated during Tx/Rx */
+ if (!ITR_IS_DYNAMIC(tx_rc->itr_setting))
+ wr32(hw, GLINT_ITR(tx_rc->itr_idx, q_vector->reg_idx),
+ ITR_REG_ALIGN(tx_rc->itr_setting) >>
+ ICE_ITR_GRAN_S);
+ if (!ITR_IS_DYNAMIC(rx_rc->itr_setting))
+ wr32(hw, GLINT_ITR(rx_rc->itr_idx, q_vector->reg_idx),
+ ITR_REG_ALIGN(rx_rc->itr_setting) >>
+ ICE_ITR_GRAN_S);
+
+ q_vector->intrl = coalesce->intrl;
+ wr32(hw, GLINT_RATE(q_vector->reg_idx),
+ ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
+}
+
+/**
+ * ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors
+ * @vsi: VSI connected with q_vectors
+ * @coalesce: array of struct with stored coalesce
+ *
+ * Returns array size.
+ */
+static int
+ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
+ struct ice_coalesce_stored *coalesce)
+{
+ int i;
+
+ ice_for_each_q_vector(vsi, i) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[i];
+
+ coalesce[i].itr_tx = q_vector->tx.itr_setting;
+ coalesce[i].itr_rx = q_vector->rx.itr_setting;
+ coalesce[i].intrl = q_vector->intrl;
+ }
+
+ return vsi->num_q_vectors;
+}
+
+/**
+ * ice_vsi_rebuild_set_coalesce - set coalesce from earlier saved arrays
+ * @vsi: VSI connected with q_vectors
+ * @coalesce: pointer to array of struct with stored coalesce
+ * @size: size of coalesce array
+ *
+ * Before this function, ice_vsi_rebuild_get_coalesce should be called to save
+ * ITR params in arrays. If size is 0 or coalesce wasn't stored set coalesce
+ * to default value.
+ */
+static void
+ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
+ struct ice_coalesce_stored *coalesce, int size)
+{
+ int i;
+
+ if ((size && !coalesce) || !vsi)
+ return;
+
+ for (i = 0; i < size && i < vsi->num_q_vectors; i++)
+ ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
+ &coalesce[i]);
+
+ for (; i < vsi->num_q_vectors; i++) {
+ struct ice_coalesce_stored coalesce_dflt = {
+ .itr_tx = ICE_DFLT_TX_ITR,
+ .itr_rx = ICE_DFLT_RX_ITR,
+ .intrl = 0
+ };
+ ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
+ &coalesce_dflt);
+ }
+}
+
+/**
* ice_vsi_rebuild - Rebuild VSI after reset
* @vsi: VSI to be rebuild
* @init_vsi: is this an initialization or a reconfigure of the VSI
@@ -2411,6 +2637,8 @@ int ice_vsi_release(struct ice_vsi *vsi)
int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ struct ice_coalesce_stored *coalesce;
+ int prev_num_q_vectors = 0;
struct ice_vf *vf = NULL;
enum ice_status status;
struct ice_pf *pf;
@@ -2423,6 +2651,11 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
if (vsi->type == ICE_VSI_VF)
vf = &pf->vf[vsi->vf_id];
+ coalesce = kcalloc(vsi->num_q_vectors,
+ sizeof(struct ice_coalesce_stored), GFP_KERNEL);
+ if (coalesce)
+ prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi,
+ coalesce);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ice_vsi_free_q_vectors(vsi);
@@ -2446,7 +2679,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
ice_vsi_put_qs(vsi);
ice_vsi_clear_rings(vsi);
ice_vsi_free_arrays(vsi);
- ice_dev_onetime_setup(&pf->hw);
if (vsi->type == ICE_VSI_VF)
ice_vsi_set_num_qs(vsi, vf->vf_id);
else
@@ -2525,8 +2757,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (status) {
- dev_err(ice_pf_to_dev(pf),
- "VSI %d failed lan queue config, error %d\n",
+ dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %d\n",
vsi->vsi_num, status);
if (init_vsi) {
ret = -EIO;
@@ -2535,6 +2766,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
return ice_schedule_reset(pf, ICE_RESET_PFR);
}
}
+ ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);
+ kfree(coalesce);
+
return 0;
err_vectors:
@@ -2549,6 +2783,7 @@ err_rings:
err_vsi:
ice_vsi_clear(vsi);
set_bit(__ICE_RESET_FAILED, pf->state);
+ kfree(coalesce);
return ret;
}
@@ -2590,8 +2825,8 @@ static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
- struct ice_vsi_ctx *ctx;
struct ice_pf *pf = vsi->back;
+ struct ice_vsi_ctx *ctx;
enum ice_status status;
struct device *dev;
int i, ret = 0;
@@ -2648,25 +2883,6 @@ out:
#endif /* CONFIG_DCB */
/**
- * ice_nvm_version_str - format the NVM version strings
- * @hw: ptr to the hardware info
- */
-char *ice_nvm_version_str(struct ice_hw *hw)
-{
- u8 oem_ver, oem_patch, ver_hi, ver_lo;
- static char buf[ICE_NVM_VER_LEN];
- u16 oem_build;
-
- ice_get_nvm_version(hw, &oem_ver, &oem_build, &oem_patch, &ver_hi,
- &ver_lo);
-
- snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d", ver_hi, ver_lo,
- hw->nvm.eetrack, oem_ver, oem_build, oem_patch);
-
- return buf;
-}
-
-/**
* ice_update_ring_stats - Update ring statistics
* @ring: ring to update
* @cont: used to increment per-vector counters
@@ -2737,6 +2953,121 @@ ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set)
status = ice_remove_mac(&vsi->back->hw, &tmp_add_list);
cfg_mac_fltr_exit:
- ice_free_fltr_list(&vsi->back->pdev->dev, &tmp_add_list);
+ ice_free_fltr_list(ice_pf_to_dev(vsi->back), &tmp_add_list);
return status;
}
+
+/**
+ * ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used
+ * @sw: switch to check if its default forwarding VSI is free
+ *
+ * Return true if the default forwarding VSI is already being used, else returns
+ * false signalling that it's available to use.
+ */
+bool ice_is_dflt_vsi_in_use(struct ice_sw *sw)
+{
+ return (sw->dflt_vsi && sw->dflt_vsi_ena);
+}
+
+/**
+ * ice_is_vsi_dflt_vsi - check if the VSI passed in is the default VSI
+ * @sw: switch for the default forwarding VSI to compare against
+ * @vsi: VSI to compare against default forwarding VSI
+ *
+ * If this VSI passed in is the default forwarding VSI then return true, else
+ * return false
+ */
+bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
+{
+ return (sw->dflt_vsi == vsi && sw->dflt_vsi_ena);
+}
+
+/**
+ * ice_set_dflt_vsi - set the default forwarding VSI
+ * @sw: switch used to assign the default forwarding VSI
+ * @vsi: VSI getting set as the default forwarding VSI on the switch
+ *
+ * If the VSI passed in is already the default VSI and it's enabled just return
+ * success.
+ *
+ * If there is already a default VSI on the switch and it's enabled then return
+ * -EEXIST since there can only be one default VSI per switch.
+ *
+ * Otherwise try to set the VSI passed in as the switch's default VSI and
+ * return the result.
+ */
+int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
+{
+ enum ice_status status;
+ struct device *dev;
+
+ if (!sw || !vsi)
+ return -EINVAL;
+
+ dev = ice_pf_to_dev(vsi->back);
+
+ /* the VSI passed in is already the default VSI */
+ if (ice_is_vsi_dflt_vsi(sw, vsi)) {
+ dev_dbg(dev, "VSI %d passed in is already the default forwarding VSI, nothing to do\n",
+ vsi->vsi_num);
+ return 0;
+ }
+
+ /* another VSI is already the default VSI for this switch */
+ if (ice_is_dflt_vsi_in_use(sw)) {
+ dev_err(dev, "Default forwarding VSI %d already in use, disable it and try again\n",
+ sw->dflt_vsi->vsi_num);
+ return -EEXIST;
+ }
+
+ status = ice_cfg_dflt_vsi(&vsi->back->hw, vsi->idx, true, ICE_FLTR_RX);
+ if (status) {
+ dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %d\n",
+ vsi->vsi_num, status);
+ return -EIO;
+ }
+
+ sw->dflt_vsi = vsi;
+ sw->dflt_vsi_ena = true;
+
+ return 0;
+}
+
+/**
+ * ice_clear_dflt_vsi - clear the default forwarding VSI
+ * @sw: switch used to clear the default VSI
+ *
+ * If the switch has no default VSI or it's not enabled then return error.
+ *
+ * Otherwise try to clear the default VSI and return the result.
+ */
+int ice_clear_dflt_vsi(struct ice_sw *sw)
+{
+ struct ice_vsi *dflt_vsi;
+ enum ice_status status;
+ struct device *dev;
+
+ if (!sw)
+ return -EINVAL;
+
+ dev = ice_pf_to_dev(sw->pf);
+
+ dflt_vsi = sw->dflt_vsi;
+
+ /* there is no default VSI configured */
+ if (!ice_is_dflt_vsi_in_use(sw))
+ return -ENODEV;
+
+ status = ice_cfg_dflt_vsi(&dflt_vsi->back->hw, dflt_vsi->idx, false,
+ ICE_FLTR_RX);
+ if (status) {
+ dev_err(dev, "Failed to clear the default forwarding VSI %d, error %d\n",
+ dflt_vsi->vsi_num, status);
+ return -EIO;
+ }
+
+ sw->dflt_vsi = NULL;
+ sw->dflt_vsi_ena = false;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 6e31e30aba39..e2c0dadce920 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -97,10 +97,16 @@ void ice_vsi_cfg_frame_size(struct ice_vsi *vsi);
u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran);
-char *ice_nvm_version_str(struct ice_hw *hw);
-
enum ice_status
ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
bool ice_is_safe_mode(struct ice_pf *pf);
+
+bool ice_is_dflt_vsi_in_use(struct ice_sw *sw);
+
+bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi);
+
+int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi);
+
+int ice_clear_dflt_vsi(struct ice_sw *sw);
#endif /* !_ICE_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 69bff085acf7..5ef28052c0f8 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -13,7 +13,7 @@
#define DRV_VERSION_MAJOR 0
#define DRV_VERSION_MINOR 8
-#define DRV_VERSION_BUILD 1
+#define DRV_VERSION_BUILD 2
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
@@ -162,8 +162,7 @@ unregister:
* had an error
*/
if (status && vsi->netdev->reg_state == NETREG_REGISTERED) {
- dev_err(ice_pf_to_dev(pf),
- "Could not add MAC filters error %d. Unregistering device\n",
+ dev_err(ice_pf_to_dev(pf), "Could not add MAC filters error %d. Unregistering device\n",
status);
unregister_netdev(vsi->netdev);
free_netdev(vsi->netdev);
@@ -269,7 +268,7 @@ static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
*/
static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
{
- struct device *dev = &vsi->back->pdev->dev;
+ struct device *dev = ice_pf_to_dev(vsi->back);
struct net_device *netdev = vsi->netdev;
bool promisc_forced_on = false;
struct ice_pf *pf = vsi->back;
@@ -335,8 +334,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
!test_and_set_bit(__ICE_FLTR_OVERFLOW_PROMISC,
vsi->state)) {
promisc_forced_on = true;
- netdev_warn(netdev,
- "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
+ netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
vsi->vsi_num);
} else {
err = -EIO;
@@ -379,25 +377,27 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
if (vsi->current_netdev_flags & IFF_PROMISC) {
/* Apply Rx filter rule to get traffic from wire */
- status = ice_cfg_dflt_vsi(hw, vsi->idx, true,
- ICE_FLTR_RX);
- if (status) {
- netdev_err(netdev, "Error setting default VSI %i Rx rule\n",
- vsi->vsi_num);
- vsi->current_netdev_flags &= ~IFF_PROMISC;
- err = -EIO;
- goto out_promisc;
+ if (!ice_is_dflt_vsi_in_use(pf->first_sw)) {
+ err = ice_set_dflt_vsi(pf->first_sw, vsi);
+ if (err && err != -EEXIST) {
+ netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
+ err, vsi->vsi_num);
+ vsi->current_netdev_flags &=
+ ~IFF_PROMISC;
+ goto out_promisc;
+ }
}
} else {
/* Clear Rx filter to remove traffic from wire */
- status = ice_cfg_dflt_vsi(hw, vsi->idx, false,
- ICE_FLTR_RX);
- if (status) {
- netdev_err(netdev, "Error clearing default VSI %i Rx rule\n",
- vsi->vsi_num);
- vsi->current_netdev_flags |= IFF_PROMISC;
- err = -EIO;
- goto out_promisc;
+ if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) {
+ err = ice_clear_dflt_vsi(pf->first_sw);
+ if (err) {
+ netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
+ err, vsi->vsi_num);
+ vsi->current_netdev_flags |=
+ IFF_PROMISC;
+ goto out_promisc;
+ }
}
}
}
@@ -472,7 +472,7 @@ ice_prepare_for_reset(struct ice_pf *pf)
ice_vc_notify_reset(pf);
/* Disable VFs until reset is completed */
- for (i = 0; i < pf->num_alloc_vfs; i++)
+ ice_for_each_vf(pf, i)
ice_set_vf_state_qs_dis(&pf->vf[i]);
/* clear SW filtering DB */
@@ -748,7 +748,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
kfree(caps);
done:
- netdev_info(vsi->netdev, "NIC Link is up %sbps, Requested FEC: %s, FEC: %s, Autoneg: %s, Flow Control: %s\n",
+ netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
speed, fec_req, fec, an, fc);
ice_print_topo_conflict(vsi);
}
@@ -811,8 +811,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
*/
result = ice_update_link_info(pi);
if (result)
- dev_dbg(dev,
- "Failed to update link status and re-enable link events for port %d\n",
+ dev_dbg(dev, "Failed to update link status and re-enable link events for port %d\n",
pi->lport);
/* if the old link up/down and speed is the same as the new */
@@ -830,18 +829,17 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
result = ice_aq_set_link_restart_an(pi, false, NULL);
if (result) {
- dev_dbg(dev,
- "Failed to set link down, VSI %d error %d\n",
+ dev_dbg(dev, "Failed to set link down, VSI %d error %d\n",
vsi->vsi_num, result);
return result;
}
}
+ ice_dcb_rebuild(pf);
ice_vsi_link_event(vsi, link_up);
ice_print_link_msg(vsi, link_up);
- if (pf->num_alloc_vfs)
- ice_vc_notify_link_state(pf);
+ ice_vc_notify_link_state(pf);
return result;
}
@@ -889,15 +887,13 @@ static int ice_init_link_events(struct ice_port_info *pi)
ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
- dev_dbg(ice_hw_to_dev(pi->hw),
- "Failed to set link event mask for port %d\n",
+ dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
pi->lport);
return -EIO;
}
if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
- dev_dbg(ice_hw_to_dev(pi->hw),
- "Failed to enable link events for port %d\n",
+ dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
pi->lport);
return -EIO;
}
@@ -926,8 +922,8 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
!!(link_data->link_info & ICE_AQ_LINK_UP),
le16_to_cpu(link_data->link_speed));
if (status)
- dev_dbg(ice_pf_to_dev(pf),
- "Could not process link event, error %d\n", status);
+ dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
+ status);
return status;
}
@@ -976,13 +972,11 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
dev_dbg(dev, "%s Receive Queue VF Error detected\n",
qtype);
if (val & PF_FW_ARQLEN_ARQOVFL_M) {
- dev_dbg(dev,
- "%s Receive Queue Overflow Error detected\n",
+ dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
qtype);
}
if (val & PF_FW_ARQLEN_ARQCRIT_M)
- dev_dbg(dev,
- "%s Receive Queue Critical Error detected\n",
+ dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
qtype);
val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
PF_FW_ARQLEN_ARQCRIT_M);
@@ -995,8 +989,8 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
PF_FW_ATQLEN_ATQCRIT_M)) {
oldval = val;
if (val & PF_FW_ATQLEN_ATQVFE_M)
- dev_dbg(dev,
- "%s Send Queue VF Error detected\n", qtype);
+ dev_dbg(dev, "%s Send Queue VF Error detected\n",
+ qtype);
if (val & PF_FW_ATQLEN_ATQOVFL_M) {
dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
qtype);
@@ -1045,8 +1039,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
ice_dcb_process_lldp_set_mib_change(pf, &event);
break;
default:
- dev_dbg(dev,
- "%s Receive Queue unknown event 0x%04x ignored\n",
+ dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
qtype, opcode);
break;
}
@@ -1235,7 +1228,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
GL_MDET_TX_TCLAN_QNUM_S);
- if (netif_msg_rx_err(pf))
+ if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
@@ -1291,7 +1284,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
}
/* check to see if one of the VFs caused the MDD */
- for (i = 0; i < pf->num_alloc_vfs; i++) {
+ ice_for_each_vf(pf, i) {
struct ice_vf *vf = &pf->vf[i];
bool vf_mdd_detected = false;
@@ -1332,8 +1325,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
vf->num_mdd_events++;
if (vf->num_mdd_events &&
vf->num_mdd_events <= ICE_MDD_EVENTS_THRESHOLD)
- dev_info(dev,
- "VF %d has had %llu MDD events since last boot, Admin might need to reload AVF driver with this number of events\n",
+ dev_info(dev, "VF %d has had %llu MDD events since last boot, Admin might need to reload AVF driver with this number of events\n",
i, vf->num_mdd_events);
}
}
@@ -1364,7 +1356,7 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
if (vsi->type != ICE_VSI_PF)
return 0;
- dev = &vsi->back->pdev->dev;
+ dev = ice_pf_to_dev(vsi->back);
pi = vsi->port_info;
@@ -1375,8 +1367,7 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
NULL);
if (retcode) {
- dev_err(dev,
- "Failed to get phy capabilities, VSI %d error %d\n",
+ dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
vsi->vsi_num, retcode);
retcode = -EIO;
goto out;
@@ -1646,8 +1637,8 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
err = devm_request_irq(dev, irq_num, vsi->irq_handler, 0,
q_vector->name, q_vector);
if (err) {
- netdev_err(vsi->netdev,
- "MSIX request_irq failed, error: %d\n", err);
+ netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
+ err);
goto free_q_irqs;
}
@@ -1682,7 +1673,7 @@ free_q_irqs:
*/
static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
{
- struct device *dev = &vsi->back->pdev->dev;
+ struct device *dev = ice_pf_to_dev(vsi->back);
int i;
for (i = 0; i < vsi->num_xdp_txq; i++) {
@@ -2330,7 +2321,8 @@ static void ice_set_netdev_features(struct net_device *netdev)
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
- tso_features = NETIF_F_TSO;
+ tso_features = NETIF_F_TSO |
+ NETIF_F_GSO_UDP_L4;
/* set features that user can change */
netdev->hw_features = dflt_features | csumo_features |
@@ -2660,14 +2652,12 @@ static void ice_set_pf_caps(struct ice_pf *pf)
clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
if (func_caps->common_cap.dcb)
set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
-#ifdef CONFIG_PCI_IOV
clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
if (func_caps->common_cap.sr_iov_1_1) {
set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs,
ICE_MAX_VF_COUNT);
}
-#endif /* CONFIG_PCI_IOV */
clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
if (func_caps->common_cap.rss_table_size)
set_bit(ICE_FLAG_RSS_ENA, pf->flags);
@@ -2760,8 +2750,7 @@ static int ice_ena_msix_range(struct ice_pf *pf)
}
if (v_actual < v_budget) {
- dev_warn(dev,
- "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
+ dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
v_budget, v_actual);
/* 2 vectors for LAN (traffic + OICR) */
#define ICE_MIN_LAN_VECS 2
@@ -2783,8 +2772,7 @@ msix_err:
goto exit_err;
no_hw_vecs_left_err:
- dev_err(dev,
- "not enough device MSI-X vectors. requested = %d, available = %d\n",
+ dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n",
needed, v_left);
err = -ERANGE;
exit_err:
@@ -2917,16 +2905,14 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
!memcmp(hw->pkg_name, hw->active_pkg_name,
sizeof(hw->pkg_name))) {
if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST)
- dev_info(dev,
- "DDP package already present on device: %s version %d.%d.%d.%d\n",
+ dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
hw->active_pkg_name,
hw->active_pkg_ver.major,
hw->active_pkg_ver.minor,
hw->active_pkg_ver.update,
hw->active_pkg_ver.draft);
else
- dev_info(dev,
- "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
+ dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
hw->active_pkg_name,
hw->active_pkg_ver.major,
hw->active_pkg_ver.minor,
@@ -2934,8 +2920,7 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
hw->active_pkg_ver.draft);
} else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
- dev_err(dev,
- "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
+ dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
hw->active_pkg_name,
hw->active_pkg_ver.major,
hw->active_pkg_ver.minor,
@@ -2943,8 +2928,7 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
*status = ICE_ERR_NOT_SUPPORTED;
} else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
- dev_info(dev,
- "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
+ dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
hw->active_pkg_name,
hw->active_pkg_ver.major,
hw->active_pkg_ver.minor,
@@ -2956,54 +2940,46 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
hw->pkg_ver.update,
hw->pkg_ver.draft);
} else {
- dev_err(dev,
- "An unknown error occurred when loading the DDP package, please reboot the system. If the problem persists, update the NVM. Entering Safe Mode.\n");
+ dev_err(dev, "An unknown error occurred when loading the DDP package, please reboot the system. If the problem persists, update the NVM. Entering Safe Mode.\n");
*status = ICE_ERR_NOT_SUPPORTED;
}
break;
case ICE_ERR_BUF_TOO_SHORT:
/* fall-through */
case ICE_ERR_CFG:
- dev_err(dev,
- "The DDP package file is invalid. Entering Safe Mode.\n");
+ dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
break;
case ICE_ERR_NOT_SUPPORTED:
/* Package File version not supported */
if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ ||
(hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR))
- dev_err(dev,
- "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n");
+ dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n");
else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ ||
(hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR))
- dev_err(dev,
- "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n",
+ dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n",
ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
break;
case ICE_ERR_AQ_ERROR:
switch (hw->pkg_dwnld_status) {
case ICE_AQ_RC_ENOSEC:
case ICE_AQ_RC_EBADSIG:
- dev_err(dev,
- "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n");
+ dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n");
return;
case ICE_AQ_RC_ESVN:
- dev_err(dev,
- "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n");
+ dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n");
return;
case ICE_AQ_RC_EBADMAN:
case ICE_AQ_RC_EBADBUF:
- dev_err(dev,
- "An error occurred on the device while loading the DDP package. The device will be reset.\n");
+ dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n");
return;
default:
break;
}
/* fall-through */
default:
- dev_err(dev,
- "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n",
+ dev_err(dev, "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n",
*status);
break;
}
@@ -3034,8 +3010,7 @@ ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
ice_log_pkg_init(hw, &status);
} else {
- dev_err(dev,
- "The DDP package file failed to load. Entering Safe Mode.\n");
+ dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
}
if (status) {
@@ -3061,8 +3036,7 @@ ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
static void ice_verify_cacheline_size(struct ice_pf *pf)
{
if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
- dev_warn(ice_pf_to_dev(pf),
- "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
+ dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
ICE_CACHE_LINE_BYTES);
}
@@ -3155,8 +3129,7 @@ static void ice_request_fw(struct ice_pf *pf)
dflt_pkg_load:
err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
if (err) {
- dev_err(dev,
- "The DDP package file was not found or could not be read. Entering Safe Mode\n");
+ dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
return;
}
@@ -3180,7 +3153,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
struct ice_hw *hw;
int err;
- /* this driver uses devres, see Documentation/driver-api/driver-model/devres.rst */
+ /* this driver uses devres, see
+ * Documentation/driver-api/driver-model/devres.rst
+ */
err = pcim_enable_device(pdev);
if (err)
return err;
@@ -3241,11 +3216,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
goto err_exit_unroll;
}
- dev_info(dev, "firmware %d.%d.%d api %d.%d.%d nvm %s build 0x%08x\n",
- hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch,
- hw->api_maj_ver, hw->api_min_ver, hw->api_patch,
- ice_nvm_version_str(hw), hw->fw_build);
-
ice_request_fw(pf);
/* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
@@ -3253,8 +3223,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
* true
*/
if (ice_is_safe_mode(pf)) {
- dev_err(dev,
- "Package download failed. Advanced features disabled - Device now in Safe Mode\n");
+ dev_err(dev, "Package download failed. Advanced features disabled - Device now in Safe Mode\n");
/* we already got function/device capabilities but these don't
* reflect what the driver needs to do in safe mode. Instead of
* adding conditional logic everywhere to ignore these
@@ -3331,8 +3300,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
/* tell the firmware we are up */
err = ice_send_version(pf);
if (err) {
- dev_err(dev,
- "probe failed sending driver version %s. error: %d\n",
+ dev_err(dev, "probe failed sending driver version %s. error: %d\n",
ice_drv_ver, err);
goto err_alloc_sw_unroll;
}
@@ -3473,8 +3441,7 @@ static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
err = pci_enable_device_mem(pdev);
if (err) {
- dev_err(&pdev->dev,
- "Cannot re-enable PCI device after reset, error %d\n",
+ dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
err);
result = PCI_ERS_RESULT_DISCONNECT;
} else {
@@ -3493,8 +3460,7 @@ static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
err = pci_cleanup_aer_uncorrect_error_status(pdev);
if (err)
- dev_dbg(&pdev->dev,
- "pci_cleanup_aer_uncorrect_error_status failed, error %d\n",
+ dev_dbg(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status failed, error %d\n",
err);
/* non-fatal, continue */
@@ -3513,8 +3479,8 @@ static void ice_pci_err_resume(struct pci_dev *pdev)
struct ice_pf *pf = pci_get_drvdata(pdev);
if (!pf) {
- dev_err(&pdev->dev,
- "%s failed, device is unrecoverable\n", __func__);
+ dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
+ __func__);
return;
}
@@ -3568,6 +3534,15 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822X_BACKPLANE), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
/* required last entry */
{ 0, }
};
@@ -3753,8 +3728,7 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
/* Validate maxrate requested is within permitted range */
if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
- netdev_err(netdev,
- "Invalid max rate %d specified for the queue %d\n",
+ netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
maxrate, queue_index);
return -EINVAL;
}
@@ -3770,8 +3744,8 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
q_handle, ICE_MAX_BW, maxrate * 1000);
if (status) {
- netdev_err(netdev,
- "Unable to set Tx max rate, error %d\n", status);
+ netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
+ status);
return -EIO;
}
@@ -3863,15 +3837,13 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
/* Don't set any netdev advanced features with device in Safe Mode */
if (ice_is_safe_mode(vsi->back)) {
- dev_err(&vsi->back->pdev->dev,
- "Device is in Safe Mode - not enabling advanced netdev features\n");
+ dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n");
return ret;
}
/* Do not change setting during reset */
if (ice_is_reset_in_progress(pf->state)) {
- dev_err(&vsi->back->pdev->dev,
- "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
+ dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
return -EBUSY;
}
@@ -4359,21 +4331,18 @@ int ice_down(struct ice_vsi *vsi)
tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
if (tx_err)
- netdev_err(vsi->netdev,
- "Failed stop Tx rings, VSI %d error %d\n",
+ netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
vsi->vsi_num, tx_err);
if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
if (tx_err)
- netdev_err(vsi->netdev,
- "Failed stop XDP rings, VSI %d error %d\n",
+ netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
vsi->vsi_num, tx_err);
}
rx_err = ice_vsi_stop_rx_rings(vsi);
if (rx_err)
- netdev_err(vsi->netdev,
- "Failed stop Rx rings, VSI %d error %d\n",
+ netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
vsi->vsi_num, rx_err);
ice_napi_disable_all(vsi);
@@ -4381,8 +4350,7 @@ int ice_down(struct ice_vsi *vsi)
if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
link_err = ice_force_phys_link_state(vsi, false);
if (link_err)
- netdev_err(vsi->netdev,
- "Failed to set physical link down, VSI %d error %d\n",
+ netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
vsi->vsi_num, link_err);
}
@@ -4393,8 +4361,7 @@ int ice_down(struct ice_vsi *vsi)
ice_clean_rx_ring(vsi->rx_rings[i]);
if (tx_err || rx_err || link_err) {
- netdev_err(vsi->netdev,
- "Failed to close VSI 0x%04X on switch 0x%04X\n",
+ netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
vsi->vsi_num, vsi->vsw->sw_id);
return -EIO;
}
@@ -4413,7 +4380,7 @@ int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
int i, err = 0;
if (!vsi->num_txq) {
- dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n",
+ dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
vsi->vsi_num);
return -EINVAL;
}
@@ -4444,7 +4411,7 @@ int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
int i, err = 0;
if (!vsi->num_rxq) {
- dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n",
+ dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
vsi->vsi_num);
return -EINVAL;
}
@@ -4541,8 +4508,7 @@ static void ice_vsi_release_all(struct ice_pf *pf)
err = ice_vsi_release(pf->vsi[i]);
if (err)
- dev_dbg(ice_pf_to_dev(pf),
- "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
+ dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
i, err, pf->vsi[i]->vsi_num);
}
}
@@ -4569,8 +4535,7 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
/* rebuild the VSI */
err = ice_vsi_rebuild(vsi, true);
if (err) {
- dev_err(dev,
- "rebuild VSI failed, err %d, VSI index %d, type %s\n",
+ dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
err, vsi->idx, ice_vsi_type_str(type));
return err;
}
@@ -4578,8 +4543,7 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
/* replay filters for the VSI */
status = ice_replay_vsi(&pf->hw, vsi->idx);
if (status) {
- dev_err(dev,
- "replay VSI failed, status %d, VSI index %d, type %s\n",
+ dev_err(dev, "replay VSI failed, status %d, VSI index %d, type %s\n",
status, vsi->idx, ice_vsi_type_str(type));
return -EIO;
}
@@ -4592,8 +4556,7 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
/* enable the VSI */
err = ice_ena_vsi(vsi, false);
if (err) {
- dev_err(dev,
- "enable VSI failed, err %d, VSI index %d, type %s\n",
+ dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
err, vsi->idx, ice_vsi_type_str(type));
return err;
}
@@ -4670,6 +4633,12 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_init_ctrlq;
}
+ if (pf->first_sw->dflt_vsi_ena)
+ dev_info(dev, "Clearing default VSI, re-enable after reset completes\n");
+ /* clear the default VSI configuration if it exists */
+ pf->first_sw->dflt_vsi = NULL;
+ pf->first_sw->dflt_vsi_ena = false;
+
ice_clear_pxe_mode(hw);
ret = ice_get_caps(hw);
@@ -4716,8 +4685,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
/* tell the firmware we are up */
ret = ice_send_version(pf);
if (ret) {
- dev_err(dev,
- "Rebuild failed due to error sending driver version: %d\n",
+ dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
ret);
goto err_vsi_rebuild;
}
@@ -4825,7 +4793,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
}
}
- netdev_info(netdev, "changed MTU to %d\n", new_mtu);
+ netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
return 0;
}
@@ -4973,7 +4941,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_err(&vsi->back->pdev->dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
bmode, status, hw->adminq.sq_last_status);
ret = -EIO;
goto out;
@@ -5060,42 +5028,23 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
* ice_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
*/
-static void ice_tx_timeout(struct net_device *netdev)
+static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_ring *tx_ring = NULL;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
- int hung_queue = -1;
u32 i;
pf->tx_timeout_count++;
- /* find the stopped queue the same way dev_watchdog() does */
- for (i = 0; i < netdev->num_tx_queues; i++) {
- unsigned long trans_start;
- struct netdev_queue *q;
-
- q = netdev_get_tx_queue(netdev, i);
- trans_start = q->trans_start;
- if (netif_xmit_stopped(q) &&
- time_after(jiffies,
- trans_start + netdev->watchdog_timeo)) {
- hung_queue = i;
- break;
- }
- }
-
- if (i == netdev->num_tx_queues)
- netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
- else
- /* now that we have an index, find the tx_ring struct */
- for (i = 0; i < vsi->num_txq; i++)
- if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
- if (hung_queue == vsi->tx_rings[i]->q_index) {
- tx_ring = vsi->tx_rings[i];
- break;
- }
+ /* now that we have an index, find the tx_ring struct */
+ for (i = 0; i < vsi->num_txq; i++)
+ if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
+ if (txqueue == vsi->tx_rings[i]->q_index) {
+ tx_ring = vsi->tx_rings[i];
+ break;
+ }
/* Reset recovery level if enough time has elapsed after last timeout.
* Also ensure no new reset action happens before next timeout period.
@@ -5110,19 +5059,19 @@ static void ice_tx_timeout(struct net_device *netdev)
struct ice_hw *hw = &pf->hw;
u32 head, val = 0;
- head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[hung_queue])) &
+ head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
/* Read interrupt register */
val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
- vsi->vsi_num, hung_queue, tx_ring->next_to_clean,
+ vsi->vsi_num, txqueue, tx_ring->next_to_clean,
head, tx_ring->next_to_use, val);
}
pf->tx_timeout_last_recovery = jiffies;
- netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
- pf->tx_timeout_recovery_level, hung_queue);
+ netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n",
+ pf->tx_timeout_recovery_level, txqueue);
switch (pf->tx_timeout_recovery_level) {
case 1:
@@ -5184,8 +5133,7 @@ int ice_open(struct net_device *netdev)
if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
err = ice_force_phys_link_state(vsi, true);
if (err) {
- netdev_err(netdev,
- "Failed to set physical link up, error %d\n",
+ netdev_err(netdev, "Failed to set physical link up, error %d\n",
err);
return err;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 57c73f613f32..7525ac50742e 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -289,6 +289,18 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
+ /* the following devices do not have boot_cfg_tlv yet */
+ if (hw->device_id == ICE_DEV_ID_E822C_BACKPLANE ||
+ hw->device_id == ICE_DEV_ID_E822C_QSFP ||
+ hw->device_id == ICE_DEV_ID_E822C_10G_BASE_T ||
+ hw->device_id == ICE_DEV_ID_E822C_SGMII ||
+ hw->device_id == ICE_DEV_ID_E822C_SFP ||
+ hw->device_id == ICE_DEV_ID_E822X_BACKPLANE ||
+ hw->device_id == ICE_DEV_ID_E822L_SFP ||
+ hw->device_id == ICE_DEV_ID_E822L_10G_BASE_T ||
+ hw->device_id == ICE_DEV_ID_E822L_SGMII)
+ return status;
+
status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len,
ICE_SR_BOOT_CFG_PTR);
if (status) {
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
new file mode 100644
index 000000000000..71647566964e
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Intel Corporation. */
+
+#ifndef _ICE_PROTOCOL_TYPE_H_
+#define _ICE_PROTOCOL_TYPE_H_
+/* Decoders for ice_prot_id:
+ * - F: First
+ * - I: Inner
+ * - L: Last
+ * - O: Outer
+ * - S: Single
+ */
+enum ice_prot_id {
+ ICE_PROT_ID_INVAL = 0,
+ ICE_PROT_IPV4_OF_OR_S = 32,
+ ICE_PROT_IPV4_IL = 33,
+ ICE_PROT_IPV6_OF_OR_S = 40,
+ ICE_PROT_IPV6_IL = 41,
+ ICE_PROT_TCP_IL = 49,
+ ICE_PROT_UDP_IL_OR_S = 53,
+ ICE_PROT_SCTP_IL = 96,
+ ICE_PROT_META_ID = 255, /* when offset == metadata */
+ ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */
+};
+#endif /* _ICE_PROTOCOL_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h
index c01597885629..a9a8bc3aca42 100644
--- a/drivers/net/ethernet/intel/ice/ice_status.h
+++ b/drivers/net/ethernet/intel/ice/ice_status.h
@@ -26,6 +26,7 @@ enum ice_status {
ICE_ERR_IN_USE = -16,
ICE_ERR_MAX_LIMIT = -17,
ICE_ERR_RESET_ONGOING = -18,
+ ICE_ERR_HW_TABLE = -19,
ICE_ERR_NVM_CHECKSUM = -51,
ICE_ERR_BUF_TOO_SHORT = -52,
ICE_ERR_NVM_BLANK_MODE = -53,
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index b5a53f862a83..431266081a80 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -50,42 +50,6 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
/**
- * ice_aq_alloc_free_res - command to allocate/free resources
- * @hw: pointer to the HW struct
- * @num_entries: number of resource entries in buffer
- * @buf: Indirect buffer to hold data parameters and response
- * @buf_size: size of buffer for indirect commands
- * @opc: pass in the command opcode
- * @cd: pointer to command details structure or NULL
- *
- * Helper function to allocate/free resources using the admin queue commands
- */
-static enum ice_status
-ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
- struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
- enum ice_adminq_opc opc, struct ice_sq_cd *cd)
-{
- struct ice_aqc_alloc_free_res_cmd *cmd;
- struct ice_aq_desc desc;
-
- cmd = &desc.params.sw_res_ctrl;
-
- if (!buf)
- return ICE_ERR_PARAM;
-
- if (buf_size < (num_entries * sizeof(buf->elem[0])))
- return ICE_ERR_PARAM;
-
- ice_fill_dflt_direct_cmd_desc(&desc, opc);
-
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
-
- cmd->num_entries = cpu_to_le16(num_entries);
-
- return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
-}
-
-/**
* ice_init_def_sw_recp - initialize the recipe book keeping tables
* @hw: pointer to the HW struct
*
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 2c212f64d99f..4de61dbedd36 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -644,7 +644,7 @@ static bool ice_page_is_reserved(struct page *page)
* Update the offset within page so that Rx buf will be ready to be reused.
* For systems with PAGE_SIZE < 8192 this function will flip the page offset
* so the second half of page assigned to Rx buffer will be used, otherwise
- * the offset is moved by the @size bytes
+ * the offset is moved by "size" bytes
*/
static void
ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
@@ -1071,13 +1071,14 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
ice_put_rx_buf(rx_ring, rx_buf);
continue;
construct_skb:
- if (skb)
+ if (skb) {
ice_add_rx_frag(rx_ring, rx_buf, skb, size);
- else if (ice_ring_uses_build_skb(rx_ring))
- skb = ice_build_skb(rx_ring, rx_buf, &xdp);
- else
- skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
-
+ } else if (likely(xdp.data)) {
+ if (ice_ring_uses_build_skb(rx_ring))
+ skb = ice_build_skb(rx_ring, rx_buf, &xdp);
+ else
+ skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
+ }
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_buf_failed++;
@@ -1618,11 +1619,11 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
{
u64 td_offset, td_tag, td_cmd;
u16 i = tx_ring->next_to_use;
- skb_frag_t *frag;
unsigned int data_len, size;
struct ice_tx_desc *tx_desc;
struct ice_tx_buf *tx_buf;
struct sk_buff *skb;
+ skb_frag_t *frag;
dma_addr_t dma;
td_tag = off->td_l2tag1;
@@ -1735,9 +1736,8 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* notify HW of packet */
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
writel(i, tx_ring->tail);
- }
return;
@@ -1925,6 +1925,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
} ip;
union {
struct tcphdr *tcp;
+ struct udphdr *udp;
unsigned char *hdr;
} l4;
u64 cd_mss, cd_tso_len;
@@ -1958,10 +1959,18 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
/* remove payload length from checksum */
paylen = skb->len - l4_start;
- csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
- /* compute length of segmentation header */
- off->header_len = (l4.tcp->doff * 4) + l4_start;
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ csum_replace_by_diff(&l4.udp->check,
+ (__force __wsum)htonl(paylen));
+ /* compute length of UDP segmentation header */
+ off->header_len = sizeof(l4.udp) + l4_start;
+ } else {
+ csum_replace_by_diff(&l4.tcp->check,
+ (__force __wsum)htonl(paylen));
+ /* compute length of TCP segmentation header */
+ off->header_len = (l4.tcp->doff * 4) + l4_start;
+ }
/* update gso_segs and bytecount */
first->gso_segs = skb_shinfo(skb)->gso_segs;
@@ -2066,7 +2075,7 @@ static bool __ice_chk_linearize(struct sk_buff *skb)
frag = &skb_shinfo(skb)->frags[0];
/* Initialize size to the negative value of gso_size minus 1. We
- * use this as the worst case scenerio in which the frag ahead
+ * use this as the worst case scenario in which the frag ahead
* of us only provides one byte which is why we are limited to 6
* descriptors for a single transmit as the header and previous
* fragment are already consuming 2 descriptors.
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index a84cc0e6dd27..14a1bf445889 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -33,8 +33,8 @@
* frame.
*
* Note: For cache line sizes 256 or larger this value is going to end
- * up negative. In these cases we should fall back to the legacy
- * receive path.
+ * up negative. In these cases we should fall back to the legacy
+ * receive path.
*/
#if (PAGE_SIZE < 8192)
#define ICE_2K_TOO_SMALL_WITH_PADDING \
@@ -341,6 +341,12 @@ struct ice_ring_container {
u16 itr_setting;
};
+struct ice_coalesce_stored {
+ u16 itr_tx;
+ u16 itr_rx;
+ u8 intrl;
+};
+
/* iterator for handling rings in ring container */
#define ice_for_each_ring(pos, head) \
for (pos = (head).ring; pos; pos = pos->next)
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 35bbc4ff603c..6da048a6ca7c 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -10,7 +10,7 @@
*/
void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
{
- u16 prev_ntu = rx_ring->next_to_use;
+ u16 prev_ntu = rx_ring->next_to_use & ~0x7;
rx_ring->next_to_use = val;
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index c4854a987130..db0ef6ba907f 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -13,6 +13,7 @@
#include "ice_controlq.h"
#include "ice_lan_tx_rx.h"
#include "ice_flex_type.h"
+#include "ice_protocol_type.h"
static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
{
@@ -41,6 +42,7 @@ static inline u32 ice_round_to_num(u32 N, u32 R)
#define ICE_DBG_QCTX BIT_ULL(6)
#define ICE_DBG_NVM BIT_ULL(7)
#define ICE_DBG_LAN BIT_ULL(8)
+#define ICE_DBG_FLOW BIT_ULL(9)
#define ICE_DBG_SW BIT_ULL(13)
#define ICE_DBG_SCHED BIT_ULL(14)
#define ICE_DBG_PKG BIT_ULL(16)
@@ -515,7 +517,7 @@ struct ice_hw {
struct ice_fw_log_cfg fw_log;
/* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL
- * register. Used for determining the ITR/intrl granularity during
+ * register. Used for determining the ITR/INTRL granularity during
* initialization.
*/
#define ICE_MAX_AGG_BW_200G 0x0
@@ -559,6 +561,10 @@ struct ice_hw {
/* HW block tables */
struct ice_blk_info blk[ICE_BLK_COUNT];
+ struct mutex fl_profs_locks[ICE_BLK_COUNT]; /* lock fltr profiles */
+ struct list_head fl_profs[ICE_BLK_COUNT];
+ struct mutex rss_locks; /* protect RSS configuration */
+ struct list_head rss_list_head;
};
/* Statistics collected by each port, VSI, VEB, and S-channel */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index edb374296d1f..262714d5f54a 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -35,37 +35,6 @@ static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
}
/**
- * ice_err_to_virt err - translate errors for VF return code
- * @ice_err: error return code
- */
-static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
-{
- switch (ice_err) {
- case ICE_SUCCESS:
- return VIRTCHNL_STATUS_SUCCESS;
- case ICE_ERR_BAD_PTR:
- case ICE_ERR_INVAL_SIZE:
- case ICE_ERR_DEVICE_NOT_SUPPORTED:
- case ICE_ERR_PARAM:
- case ICE_ERR_CFG:
- return VIRTCHNL_STATUS_ERR_PARAM;
- case ICE_ERR_NO_MEMORY:
- return VIRTCHNL_STATUS_ERR_NO_MEMORY;
- case ICE_ERR_NOT_READY:
- case ICE_ERR_RESET_FAILED:
- case ICE_ERR_FW_API_VER:
- case ICE_ERR_AQ_ERROR:
- case ICE_ERR_AQ_TIMEOUT:
- case ICE_ERR_AQ_FULL:
- case ICE_ERR_AQ_NO_WORK:
- case ICE_ERR_AQ_EMPTY:
- return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
- default:
- return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
- }
-}
-
-/**
* ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
* @pf: pointer to the PF structure
* @v_opcode: operation code
@@ -78,10 +47,11 @@ ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
{
struct ice_hw *hw = &pf->hw;
- struct ice_vf *vf = pf->vf;
int i;
- for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
+ ice_for_each_vf(pf, i) {
+ struct ice_vf *vf = &pf->vf[i];
+
/* Not all vfs are enabled so skip the ones that are not */
if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
@@ -121,26 +91,6 @@ ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
}
/**
- * ice_set_pfe_link_forced - Force the virtchnl_pf_event link speed/status
- * @vf: pointer to the VF structure
- * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
- * @link_up: whether or not to set the link up/down
- */
-static void
-ice_set_pfe_link_forced(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
- bool link_up)
-{
- u16 link_speed;
-
- if (link_up)
- link_speed = ICE_AQ_LINK_SPEED_100GB;
- else
- link_speed = ICE_AQ_LINK_SPEED_UNKNOWN;
-
- ice_set_pfe_link(vf, pfe, link_speed, link_up);
-}
-
-/**
* ice_vc_notify_vf_link_state - Inform a VF of link status
* @vf: pointer to the VF structure
*
@@ -160,13 +110,17 @@ static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
pfe.severity = PF_EVENT_SEVERITY_INFO;
/* Always report link is down if the VF queues aren't enabled */
- if (!vf->num_qs_ena)
+ if (!vf->num_qs_ena) {
ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
- else if (vf->link_forced)
- ice_set_pfe_link_forced(vf, &pfe, vf->link_up);
- else
- ice_set_pfe_link(vf, &pfe, ls->link_speed, ls->link_info &
- ICE_AQ_LINK_UP);
+ } else if (vf->link_forced) {
+ u16 link_speed = vf->link_up ?
+ ls->link_speed : ICE_AQ_LINK_SPEED_UNKNOWN;
+
+ ice_set_pfe_link(vf, &pfe, link_speed, vf->link_up);
+ } else {
+ ice_set_pfe_link(vf, &pfe, ls->link_speed,
+ ls->link_info & ICE_AQ_LINK_UP);
+ }
ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
@@ -245,8 +199,7 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
else
- dev_err(dev,
- "Scattered mode for VF Rx queues is not yet implemented\n");
+ dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
}
/**
@@ -331,7 +284,7 @@ void ice_free_vfs(struct ice_pf *pf)
usleep_range(1000, 2000);
/* Avoid wait time by stopping all VFs at the same time */
- for (i = 0; i < pf->num_alloc_vfs; i++)
+ ice_for_each_vf(pf, i)
if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
ice_dis_vf_qs(&pf->vf[i]);
@@ -448,8 +401,7 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
if ((reg & VF_TRANS_PENDING_M) == 0)
break;
- dev_err(dev,
- "VF %d PCI transactions stuck\n", vf->vf_id);
+ dev_err(dev, "VF %d PCI transactions stuck\n", vf->vf_id);
udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
}
}
@@ -508,7 +460,7 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_info(&vsi->back->pdev->dev, "update VSI for port VLAN failed, err %d aq_err %d\n",
+ dev_info(ice_pf_to_dev(vsi->back), "update VSI for port VLAN failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
ret = -EIO;
goto out;
@@ -991,10 +943,17 @@ static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
/* reallocate VF resources to finish resetting the VSI state */
if (!ice_alloc_vf_res(vf)) {
+ struct ice_vsi *vsi;
+
ice_ena_vf_mappings(vf);
set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
- vf->num_vlan = 0;
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (ice_vsi_add_vlan(vsi, 0))
+ dev_warn(ice_pf_to_dev(pf),
+ "Failed to add VLAN 0 filter for VF %d, MDD events will trigger. Reset the VF, disable spoofchk, or enable 8021q module on the guest",
+ vf->vf_id);
}
/* Tell the VF driver the reset is done. This needs to be done only
@@ -1023,7 +982,7 @@ ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
struct ice_hw *hw;
hw = &pf->hw;
- if (vf->num_vlan) {
+ if (vsi->num_vlan) {
status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
rm_promisc);
} else if (vf->port_vlan_id) {
@@ -1070,7 +1029,7 @@ static bool ice_config_res_vfs(struct ice_pf *pf)
ice_irq_dynamic_ena(hw, NULL, NULL);
/* Finish resetting each VF and allocate resources */
- for (v = 0; v < pf->num_alloc_vfs; v++) {
+ ice_for_each_vf(pf, v) {
struct ice_vf *vf = &pf->vf[v];
vf->num_vf_qs = pf->num_vf_qps;
@@ -1113,10 +1072,10 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
return false;
/* Begin reset on all VFs at once */
- for (v = 0; v < pf->num_alloc_vfs; v++)
+ ice_for_each_vf(pf, v)
ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
- for (v = 0; v < pf->num_alloc_vfs; v++) {
+ ice_for_each_vf(pf, v) {
struct ice_vsi *vsi;
vf = &pf->vf[v];
@@ -1134,7 +1093,6 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
* finished resetting.
*/
for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
-
/* Check each VF in sequence */
while (v < pf->num_alloc_vfs) {
u32 reg;
@@ -1161,7 +1119,7 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
dev_warn(dev, "VF reset check timeout\n");
/* free VF resources to begin resetting the VSI state */
- for (v = 0; v < pf->num_alloc_vfs; v++) {
+ ice_for_each_vf(pf, v) {
vf = &pf->vf[v];
ice_free_vf_res(vf);
@@ -1273,7 +1231,7 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
*/
if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
- if (vf->port_vlan_id || vf->num_vlan)
+ if (vf->port_vlan_id || vsi->num_vlan)
promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
else
promisc_m = ICE_UCAST_PROMISC_BITS;
@@ -1301,7 +1259,7 @@ void ice_vc_notify_link_state(struct ice_pf *pf)
{
int i;
- for (i = 0; i < pf->num_alloc_vfs; i++)
+ ice_for_each_vf(pf, i)
ice_vc_notify_vf_link_state(&pf->vf[i]);
}
@@ -1385,9 +1343,10 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
goto err_pci_disable_sriov;
}
pf->vf = vfs;
+ pf->num_alloc_vfs = num_alloc_vfs;
/* apply default profile */
- for (i = 0; i < num_alloc_vfs; i++) {
+ ice_for_each_vf(pf, i) {
vfs[i].pf = pf;
vfs[i].vf_sw_id = pf->first_sw;
vfs[i].vf_id = i;
@@ -1396,7 +1355,6 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
vfs[i].spoofchk = true;
}
- pf->num_alloc_vfs = num_alloc_vfs;
/* VF resources get allocated with initialization */
if (!ice_config_res_vfs(pf)) {
@@ -1535,7 +1493,7 @@ void ice_process_vflr_event(struct ice_pf *pf)
!pf->num_alloc_vfs)
return;
- for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
+ ice_for_each_vf(pf, vf_id) {
struct ice_vf *vf = &pf->vf[vf_id];
u32 reg_idx, bit_idx;
@@ -1592,8 +1550,7 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id,
v_opcode, v_retval);
if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
- dev_err(dev,
- "Number of invalid messages exceeded for VF %d\n",
+ dev_err(dev, "Number of invalid messages exceeded for VF %d\n",
vf->vf_id);
dev_err(dev, "Use PF Control I/F to enable the VF\n");
set_bit(ICE_VF_STATE_DIS, vf->vf_states);
@@ -1608,8 +1565,7 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
msg, msglen, NULL);
if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
- dev_info(dev,
- "Unable to send the message to VF %d ret %d aq_err %d\n",
+ dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %d\n",
vf->vf_id, aq_ret, pf->hw.mailboxq.sq_last_status);
return -EIO;
}
@@ -1918,6 +1874,87 @@ error_param:
}
/**
+ * ice_set_vf_spoofchk
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @ena: flag to enable or disable feature
+ *
+ * Enable or disable VF spoof checking
+ */
+int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+ struct ice_vsi_ctx *ctx;
+ struct ice_vsi *vf_vsi;
+ enum ice_status status;
+ struct device *dev;
+ struct ice_vf *vf;
+ int ret = 0;
+
+ dev = ice_pf_to_dev(pf);
+ if (ice_validate_vf_id(pf, vf_id))
+ return -EINVAL;
+
+ vf = &pf->vf[vf_id];
+
+ if (ice_check_vf_init(pf, vf))
+ return -EBUSY;
+
+ vf_vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vf_vsi) {
+ netdev_err(netdev, "VSI %d for VF %d is null\n",
+ vf->lan_vsi_idx, vf->vf_id);
+ return -EINVAL;
+ }
+
+ if (vf_vsi->type != ICE_VSI_VF) {
+ netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
+ vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
+ return -ENODEV;
+ }
+
+ if (ena == vf->spoofchk) {
+ dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
+ return 0;
+ }
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->info.sec_flags = vf_vsi->info.sec_flags;
+ ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
+ if (ena) {
+ ctx->info.sec_flags |=
+ ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
+ (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
+ } else {
+ ctx->info.sec_flags &=
+ ~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
+ (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
+ }
+
+ status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
+ if (status) {
+ dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %d",
+ ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, status);
+ ret = -EIO;
+ goto out;
+ }
+
+ /* only update spoofchk state and VSI context on success */
+ vf_vsi->info.sec_flags = ctx->info.sec_flags;
+ vf->spoofchk = ena;
+
+out:
+ kfree(ctx);
+ return ret;
+}
+
+/**
* ice_vc_get_stats_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2019,8 +2056,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
continue;
if (ice_vsi_ctrl_rx_ring(vsi, true, vf_q_id)) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to enable Rx ring %d on VSI %d\n",
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
vf_q_id, vsi->vsi_num);
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2122,8 +2158,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
ring, &txq_meta)) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to stop Tx ring %d on VSI %d\n",
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
vf_q_id, vsi->vsi_num);
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2149,8 +2184,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
continue;
if (ice_vsi_ctrl_rx_ring(vsi, false, vf_q_id)) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to stop Rx ring %d on VSI %d\n",
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
vf_q_id, vsi->vsi_num);
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2313,8 +2347,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF ||
qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
- dev_err(ice_pf_to_dev(pf),
- "VF-%d requesting more than supported number of queues: %d\n",
+ dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2409,6 +2442,83 @@ static bool ice_can_vf_change_mac(struct ice_vf *vf)
}
/**
+ * ice_vc_add_mac_addr - attempt to add the MAC address passed in
+ * @vf: pointer to the VF info
+ * @vsi: pointer to the VF's VSI
+ * @mac_addr: MAC address to add
+ */
+static int
+ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ enum ice_status status;
+
+ /* default unicast MAC already added */
+ if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
+ return 0;
+
+ if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
+ dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
+ return -EPERM;
+ }
+
+ status = ice_vsi_cfg_mac_fltr(vsi, mac_addr, true);
+ if (status == ICE_ERR_ALREADY_EXISTS) {
+ dev_err(dev, "MAC %pM already exists for VF %d\n", mac_addr,
+ vf->vf_id);
+ return -EEXIST;
+ } else if (status) {
+ dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n",
+ mac_addr, vf->vf_id, status);
+ return -EIO;
+ }
+
+ /* only set dflt_lan_addr once */
+ if (is_zero_ether_addr(vf->dflt_lan_addr.addr) &&
+ is_unicast_ether_addr(mac_addr))
+ ether_addr_copy(vf->dflt_lan_addr.addr, mac_addr);
+
+ vf->num_mac++;
+
+ return 0;
+}
+
+/**
+ * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
+ * @vf: pointer to the VF info
+ * @vsi: pointer to the VF's VSI
+ * @mac_addr: MAC address to delete
+ */
+static int
+ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ enum ice_status status;
+
+ if (!ice_can_vf_change_mac(vf) &&
+ ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
+ return 0;
+
+ status = ice_vsi_cfg_mac_fltr(vsi, mac_addr, false);
+ if (status == ICE_ERR_DOES_NOT_EXIST) {
+ dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
+ vf->vf_id);
+ return -ENOENT;
+ } else if (status) {
+ dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n",
+ mac_addr, vf->vf_id, status);
+ return -EIO;
+ }
+
+ if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
+ eth_zero_addr(vf->dflt_lan_addr.addr);
+
+ vf->num_mac--;
+
+ return 0;
+}
+
+/**
* ice_vc_handle_mac_addr_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2419,23 +2529,23 @@ static bool ice_can_vf_change_mac(struct ice_vf *vf)
static int
ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
{
+ int (*ice_vc_cfg_mac)
+ (struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr);
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_ether_addr_list *al =
(struct virtchnl_ether_addr_list *)msg;
struct ice_pf *pf = vf->pf;
enum virtchnl_ops vc_op;
- enum ice_status status;
struct ice_vsi *vsi;
- struct device *dev;
- int mac_count = 0;
int i;
- dev = ice_pf_to_dev(pf);
-
- if (set)
+ if (set) {
vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
- else
+ ice_vc_cfg_mac = ice_vc_add_mac_addr;
+ } else {
vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
+ ice_vc_cfg_mac = ice_vc_del_mac_addr;
+ }
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
!ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
@@ -2443,14 +2553,14 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
goto handle_mac_exit;
}
+ /* If this VF is not privileged, then we can't add more than a
+ * limited number of addresses. Check to make sure that the
+ * additions do not push us over the limit.
+ */
if (set && !ice_is_vf_trusted(vf) &&
(vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
- dev_err(dev,
- "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
+ dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
vf->vf_id);
- /* There is no need to let VF know about not being trusted
- * to add more MAC addr, so we can just return success message.
- */
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto handle_mac_exit;
}
@@ -2462,70 +2572,22 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
}
for (i = 0; i < al->num_elements; i++) {
- u8 *maddr = al->list[i].addr;
-
- if (ether_addr_equal(maddr, vf->dflt_lan_addr.addr) ||
- is_broadcast_ether_addr(maddr)) {
- if (set) {
- /* VF is trying to add filters that the PF
- * already added. Just continue.
- */
- dev_info(dev,
- "MAC %pM already set for VF %d\n",
- maddr, vf->vf_id);
- continue;
- } else {
- /* VF can't remove dflt_lan_addr/bcast MAC */
- dev_err(dev,
- "VF can't remove default MAC address or MAC %pM programmed by PF for VF %d\n",
- maddr, vf->vf_id);
- continue;
- }
- }
-
- /* check for the invalid cases and bail if necessary */
- if (is_zero_ether_addr(maddr)) {
- dev_err(dev,
- "invalid MAC %pM provided for VF %d\n",
- maddr, vf->vf_id);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto handle_mac_exit;
- }
+ u8 *mac_addr = al->list[i].addr;
+ int result;
- if (is_unicast_ether_addr(maddr) &&
- !ice_can_vf_change_mac(vf)) {
- dev_err(dev,
- "can't change unicast MAC for untrusted VF %d\n",
- vf->vf_id);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto handle_mac_exit;
- }
+ if (is_broadcast_ether_addr(mac_addr) ||
+ is_zero_ether_addr(mac_addr))
+ continue;
- /* program the updated filter list */
- status = ice_vsi_cfg_mac_fltr(vsi, maddr, set);
- if (status == ICE_ERR_DOES_NOT_EXIST ||
- status == ICE_ERR_ALREADY_EXISTS) {
- dev_info(dev,
- "can't %s MAC filters %pM for VF %d, error %d\n",
- set ? "add" : "remove", maddr, vf->vf_id,
- status);
- } else if (status) {
- dev_err(dev,
- "can't %s MAC filters for VF %d, error %d\n",
- set ? "add" : "remove", vf->vf_id, status);
- v_ret = ice_err_to_virt_err(status);
+ result = ice_vc_cfg_mac(vf, vsi, mac_addr);
+ if (result == -EEXIST || result == -ENOENT) {
+ continue;
+ } else if (result) {
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
goto handle_mac_exit;
}
-
- mac_count++;
}
- /* Track number of MAC filters programmed for the VF VSI */
- if (set)
- vf->num_mac += mac_count;
- else
- vf->num_mac -= mac_count;
-
handle_mac_exit:
/* send the response to the VF */
return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
@@ -2574,8 +2636,8 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
struct ice_pf *pf = vf->pf;
u16 max_allowed_vf_queues;
u16 tx_rx_queue_left;
- u16 cur_queues;
struct device *dev;
+ u16 cur_queues;
dev = ice_pf_to_dev(pf);
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
@@ -2596,8 +2658,7 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF;
} else if (req_queues > cur_queues &&
req_queues - cur_queues > tx_rx_queue_left) {
- dev_warn(dev,
- "VF %d requested %u more queues, but only %u left.\n",
+ dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
ICE_MAX_BASE_QS_PER_VF);
@@ -2744,22 +2805,11 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
goto error_param;
}
- if (add_v && !ice_is_vf_trusted(vf) &&
- vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
- dev_info(dev,
- "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
- vf->vf_id);
- /* There is no need to let VF know about being not trusted,
- * so we can just return success message here
- */
- goto error_param;
- }
-
for (i = 0; i < vfl->num_elements; i++) {
if (vfl->vlan_id[i] > ICE_MAX_VLANID) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(dev,
- "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
+ dev_err(dev, "invalid VF VLAN id %d\n",
+ vfl->vlan_id[i]);
goto error_param;
}
}
@@ -2771,6 +2821,16 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
goto error_param;
}
+ if (add_v && !ice_is_vf_trusted(vf) &&
+ vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
+ dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
+ vf->vf_id);
+ /* There is no need to let VF know about being not trusted,
+ * so we can just return success message here
+ */
+ goto error_param;
+ }
+
if (vsi->info.pvid) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2785,9 +2845,8 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
u16 vid = vfl->vlan_id[i];
if (!ice_is_vf_trusted(vf) &&
- vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
- dev_info(dev,
- "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
+ vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
+ dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
vf->vf_id);
/* There is no need to let VF know about being
* not trusted, so we can just return success
@@ -2796,19 +2855,26 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
goto error_param;
}
- if (ice_vsi_add_vlan(vsi, vid)) {
+ /* we add VLAN 0 by default for each VF so we can enable
+ * Tx VLAN anti-spoof without triggering MDD events so
+ * we don't need to add it again here
+ */
+ if (!vid)
+ continue;
+
+ status = ice_vsi_add_vlan(vsi, vid);
+ if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- vf->num_vlan++;
+ vsi->num_vlan++;
/* Enable VLAN pruning when VLAN is added */
if (!vlan_promisc) {
status = ice_cfg_vlan_pruning(vsi, true, false);
if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(dev,
- "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
+ dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
vid, status);
goto error_param;
}
@@ -2821,8 +2887,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
promisc_m, vid);
if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(dev,
- "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
+ dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
vid, status);
}
}
@@ -2837,21 +2902,29 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
*/
int num_vf_vlan;
- num_vf_vlan = vf->num_vlan;
+ num_vf_vlan = vsi->num_vlan;
for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
u16 vid = vfl->vlan_id[i];
+ /* we add VLAN 0 by default for each VF so we can enable
+ * Tx VLAN anti-spoof without triggering MDD events so
+ * we don't want a VIRTCHNL request to remove it
+ */
+ if (!vid)
+ continue;
+
/* Make sure ice_vsi_kill_vlan is successful before
* updating VLAN information
*/
- if (ice_vsi_kill_vlan(vsi, vid)) {
+ status = ice_vsi_kill_vlan(vsi, vid);
+ if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- vf->num_vlan--;
+ vsi->num_vlan--;
/* Disable VLAN pruning when the last VLAN is removed */
- if (!vf->num_vlan)
+ if (!vsi->num_vlan)
ice_cfg_vlan_pruning(vsi, false, false);
/* Disable Unicast/Multicast VLAN promiscuous mode */
@@ -3050,8 +3123,7 @@ error_handler:
case VIRTCHNL_OP_GET_VF_RESOURCES:
err = ice_vc_get_vf_res_msg(vf, msg);
if (ice_vf_init_vlan_stripping(vf))
- dev_err(dev,
- "Failed to initialize VLAN stripping for VF %d\n",
+ dev_err(dev, "Failed to initialize VLAN stripping for VF %d\n",
vf->vf_id);
ice_vc_notify_vf_link_state(vf);
break;
@@ -3165,65 +3237,6 @@ ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
}
/**
- * ice_set_vf_spoofchk
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @ena: flag to enable or disable feature
- *
- * Enable or disable VF spoof checking
- */
-int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
-{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct ice_vsi *vsi = pf->vsi[0];
- struct ice_vsi_ctx *ctx;
- enum ice_status status;
- struct device *dev;
- struct ice_vf *vf;
- int ret = 0;
-
- dev = ice_pf_to_dev(pf);
- if (ice_validate_vf_id(pf, vf_id))
- return -EINVAL;
-
- vf = &pf->vf[vf_id];
- if (ice_check_vf_init(pf, vf))
- return -EBUSY;
-
- if (ena == vf->spoofchk) {
- dev_dbg(dev, "VF spoofchk already %s\n",
- ena ? "ON" : "OFF");
- return 0;
- }
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
-
- ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
-
- if (ena) {
- ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
- ctx->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M;
- }
-
- status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
- if (status) {
- dev_dbg(dev,
- "Error %d, failed to update VSI* parameters\n", status);
- ret = -EIO;
- goto out;
- }
-
- vf->spoofchk = ena;
- vsi->info.sec_flags = ctx->info.sec_flags;
- vsi->info.sw_flags2 = ctx->info.sw_flags2;
-out:
- kfree(ctx);
- return ret;
-}
-
-/**
* ice_wait_on_vf_reset
* @vf: The VF being resseting
*
@@ -3282,8 +3295,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
*/
ether_addr_copy(vf->dflt_lan_addr.addr, mac);
vf->pf_set_mac = true;
- netdev_info(netdev,
- "MAC on VF %d set to %pM. VF driver will be reinitialized\n",
+ netdev_info(netdev, "MAC on VF %d set to %pM. VF driver will be reinitialized\n",
vf_id, mac);
ice_vc_reset_vf(vf);
@@ -3301,10 +3313,8 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct device *dev;
struct ice_vf *vf;
- dev = ice_pf_to_dev(pf);
if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
@@ -3327,7 +3337,7 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
vf->trusted = trusted;
ice_vc_reset_vf(vf);
- dev_info(dev, "VF %u is now %strusted\n",
+ dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
vf_id, trusted ? "" : "un");
return 0;
@@ -3344,28 +3354,18 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct virtchnl_pf_event pfe = { 0 };
- struct ice_link_status *ls;
struct ice_vf *vf;
- struct ice_hw *hw;
if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
vf = &pf->vf[vf_id];
- hw = &pf->hw;
- ls = &pf->hw.port_info->phy.link_info;
-
if (ice_check_vf_init(pf, vf))
return -EBUSY;
- pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
- pfe.severity = PF_EVENT_SEVERITY_INFO;
-
switch (link_state) {
case IFLA_VF_LINK_STATE_AUTO:
vf->link_forced = false;
- vf->link_up = ls->link_info & ICE_AQ_LINK_UP;
break;
case IFLA_VF_LINK_STATE_ENABLE:
vf->link_forced = true;
@@ -3379,15 +3379,7 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
return -EINVAL;
}
- if (vf->link_forced)
- ice_set_pfe_link_forced(vf, &pfe, vf->link_up);
- else
- ice_set_pfe_link(vf, &pfe, ls->link_speed, vf->link_up);
-
- /* Notify the VF of its new link state */
- ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
- VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
- sizeof(pfe), NULL);
+ ice_vc_notify_vf_link_state(vf);
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 88aa65d5cb31..4647d636ed36 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -40,6 +40,9 @@
#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
#define ICE_MAX_VF_RESET_WAIT 15
+#define ice_for_each_vf(pf, i) \
+ for ((i) = 0; (i) < (pf)->num_alloc_vfs; (i)++)
+
/* Specific VF states */
enum ice_vf_states {
ICE_VF_STATE_INIT = 0, /* PF is initializing VF */
@@ -91,7 +94,6 @@ struct ice_vf {
unsigned long vf_caps; /* VF's adv. capabilities */
u8 num_req_qs; /* num of queue pairs requested by VF */
u16 num_mac;
- u16 num_vlan;
u16 num_vf_qs; /* num of queue configured per VF */
u16 num_qs_ena; /* total num of Tx/Rx queue enabled */
};
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index cf9b8b22d24f..4d3407bbd4c4 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -338,8 +338,8 @@ static int ice_xsk_umem_dma_map(struct ice_vsi *vsi, struct xdp_umem *umem)
DMA_BIDIRECTIONAL,
ICE_RX_DMA_ATTR);
if (dma_mapping_error(dev, dma)) {
- dev_dbg(dev,
- "XSK UMEM DMA mapping error on page num %d", i);
+ dev_dbg(dev, "XSK UMEM DMA mapping error on page num %d\n",
+ i);
goto out_unmap;
}
@@ -414,7 +414,8 @@ ice_xsk_umem_enable(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
if (vsi->type != ICE_VSI_PF)
return -EINVAL;
- vsi->num_xsk_umems = min_t(u16, vsi->num_rxq, vsi->num_txq);
+ if (!vsi->num_xsk_umems)
+ vsi->num_xsk_umems = min_t(u16, vsi->num_rxq, vsi->num_txq);
if (qid >= vsi->num_xsk_umems)
return -EINVAL;
@@ -555,7 +556,7 @@ ice_alloc_buf_fast_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
rx_buf->handle = handle + umem->headroom;
- xsk_umem_discard_addr(umem);
+ xsk_umem_release_addr(umem);
return true;
}
@@ -591,7 +592,7 @@ ice_alloc_buf_slow_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
rx_buf->handle = handle + umem->headroom;
- xsk_umem_discard_addr_rq(umem);
+ xsk_umem_release_addr_rq(umem);
return true;
}
@@ -1019,8 +1020,8 @@ bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
s16 ntc = xdp_ring->next_to_clean;
struct ice_tx_desc *tx_desc;
struct ice_tx_buf *tx_buf;
- bool xmit_done = true;
u32 xsk_frames = 0;
+ bool xmit_done;
tx_desc = ICE_TX_DESC(xdp_ring, ntc);
tx_buf = &xdp_ring->tx_buf[ntc];
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 8a6ef3514129..438b42ce2cd9 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -530,7 +530,7 @@ static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw)
dev_spec->module_plugged = true;
if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) {
hw->phy.media_type = e1000_media_type_internal_serdes;
- } else if (eth_flags->e100_base_fx) {
+ } else if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) {
dev_spec->sgmii_active = true;
hw->phy.media_type = e1000_media_type_internal_serdes;
} else if (eth_flags->e1000_base_t) {
@@ -657,14 +657,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
break;
}
- /* do not change link mode for 100BaseFX */
- if (dev_spec->eth_flags.e100_base_fx)
- break;
-
/* change current link mode setting */
ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
- if (hw->phy.media_type == e1000_media_type_copper)
+ if (dev_spec->sgmii_active)
ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII;
else
ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index ca54e268d157..49b5fa9d4783 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -661,6 +661,7 @@ void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
void igb_setup_tctl(struct igb_adapter *);
void igb_setup_rctl(struct igb_adapter *);
+void igb_setup_srrctl(struct igb_adapter *, struct igb_ring *);
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
void igb_alloc_rx_buffers(struct igb_ring *, u16);
void igb_update_stats(struct igb_adapter *);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 4690d6c87f39..f96ffa83efbe 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -181,7 +181,7 @@ static int igb_get_link_ksettings(struct net_device *netdev,
advertising &= ~ADVERTISED_1000baseKX_Full;
}
}
- if (eth_flags->e100_base_fx) {
+ if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) {
supported |= SUPPORTED_100baseT_Full;
advertising |= ADVERTISED_100baseT_Full;
}
@@ -396,6 +396,7 @@ static int igb_set_pauseparam(struct net_device *netdev,
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
int retval = 0;
+ int i;
/* 100basefx does not support setting link flow control */
if (hw->dev_spec._82575.eth_flags.e100_base_fx)
@@ -428,6 +429,13 @@ static int igb_set_pauseparam(struct net_device *netdev,
retval = ((hw->phy.media_type == e1000_media_type_copper) ?
igb_force_mac_fc(hw) : igb_setup_link(hw));
+
+ /* Make sure SRRCTL considers new fc settings for each ring */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct igb_ring *ring = adapter->rx_ring[i];
+
+ igb_setup_srrctl(adapter, ring);
+ }
}
clear_bit(__IGB_RESETTING, &adapter->state);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 98346eb064d5..b46bff8fe056 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -146,7 +146,7 @@ static int igb_poll(struct napi_struct *, int);
static bool igb_clean_tx_irq(struct igb_q_vector *, int);
static int igb_clean_rx_irq(struct igb_q_vector *, int);
static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
-static void igb_tx_timeout(struct net_device *);
+static void igb_tx_timeout(struct net_device *, unsigned int txqueue);
static void igb_reset_task(struct work_struct *);
static void igb_vlan_mode(struct net_device *netdev,
netdev_features_t features);
@@ -4468,6 +4468,37 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter,
}
/**
+ * igb_setup_srrctl - configure the split and replication receive control
+ * registers
+ * @adapter: Board private structure
+ * @ring: receive ring to be configured
+ **/
+void igb_setup_srrctl(struct igb_adapter *adapter, struct igb_ring *ring)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int reg_idx = ring->reg_idx;
+ u32 srrctl = 0;
+
+ srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
+ if (ring_uses_large_buffer(ring))
+ srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+ else
+ srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+ srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
+ if (hw->mac.type >= e1000_82580)
+ srrctl |= E1000_SRRCTL_TIMESTAMP;
+ /* Only set Drop Enable if VFs allocated, or we are supporting multiple
+ * queues and rx flow control is disabled
+ */
+ if (adapter->vfs_allocated_count ||
+ (!(hw->fc.current_mode & e1000_fc_rx_pause) &&
+ adapter->num_rx_queues > 1))
+ srrctl |= E1000_SRRCTL_DROP_EN;
+
+ wr32(E1000_SRRCTL(reg_idx), srrctl);
+}
+
+/**
* igb_configure_rx_ring - Configure a receive ring after Reset
* @adapter: board private structure
* @ring: receive ring to be configured
@@ -4481,7 +4512,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
union e1000_adv_rx_desc *rx_desc;
u64 rdba = ring->dma;
int reg_idx = ring->reg_idx;
- u32 srrctl = 0, rxdctl = 0;
+ u32 rxdctl = 0;
/* disable the queue */
wr32(E1000_RXDCTL(reg_idx), 0);
@@ -4499,19 +4530,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
writel(0, ring->tail);
/* set descriptor configuration */
- srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
- if (ring_uses_large_buffer(ring))
- srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
- else
- srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
- srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
- if (hw->mac.type >= e1000_82580)
- srrctl |= E1000_SRRCTL_TIMESTAMP;
- /* Only set Drop Enable if we are supporting multiple queues */
- if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
- srrctl |= E1000_SRRCTL_DROP_EN;
-
- wr32(E1000_SRRCTL(reg_idx), srrctl);
+ igb_setup_srrctl(adapter, ring);
/* set filtering for VMDQ pools */
igb_set_vmolr(adapter, reg_idx & 0x7, true);
@@ -6184,7 +6203,7 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
* igb_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
**/
-static void igb_tx_timeout(struct net_device *netdev)
+static void igb_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 6003dc3ff5fd..5b1800c3ba82 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2375,7 +2375,7 @@ static netdev_tx_t igbvf_xmit_frame(struct sk_buff *skb,
* igbvf_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
**/
-static void igbvf_tx_timeout(struct net_device *netdev)
+static void igbvf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/intel/igc/Makefile b/drivers/net/ethernet/intel/igc/Makefile
index 88c6f88baac5..49fb1e1965cd 100644
--- a/drivers/net/ethernet/intel/igc/Makefile
+++ b/drivers/net/ethernet/intel/igc/Makefile
@@ -8,4 +8,4 @@
obj-$(CONFIG_IGC) += igc.o
igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \
-igc_ethtool.o
+igc_ethtool.o igc_ptp.o
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 0868677d43ed..52066bdbbad0 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -10,6 +10,9 @@
#include <linux/vmalloc.h>
#include <linux/ethtool.h>
#include <linux/sctp.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
+#include <linux/net_tstamp.h>
#include "igc_hw.h"
@@ -45,11 +48,15 @@ extern char igc_driver_version[];
#define IGC_REGS_LEN 740
#define IGC_RETA_SIZE 128
+/* flags controlling PTP/1588 function */
+#define IGC_PTP_ENABLED BIT(0)
+
/* Interrupt defines */
#define IGC_START_ITR 648 /* ~6000 ints/sec */
#define IGC_FLAG_HAS_MSI BIT(0)
#define IGC_FLAG_QUEUE_PAIRS BIT(3)
#define IGC_FLAG_DMAC BIT(4)
+#define IGC_FLAG_PTP BIT(8)
#define IGC_FLAG_NEED_LINK_UPDATE BIT(9)
#define IGC_FLAG_MEDIA_RESET BIT(10)
#define IGC_FLAG_MAS_ENABLE BIT(12)
@@ -100,6 +107,20 @@ extern char igc_driver_version[];
#define AUTO_ALL_MODES 0
#define IGC_RX_HDR_LEN IGC_RXBUFFER_256
+/* Transmit and receive latency (for PTP timestamps) */
+/* FIXME: These values were estimated using the ones that i210 has as
+ * basis, they seem to provide good numbers with ptp4l/phc2sys, but we
+ * need to confirm them.
+ */
+#define IGC_I225_TX_LATENCY_10 9542
+#define IGC_I225_TX_LATENCY_100 1024
+#define IGC_I225_TX_LATENCY_1000 178
+#define IGC_I225_TX_LATENCY_2500 64
+#define IGC_I225_RX_LATENCY_10 20662
+#define IGC_I225_RX_LATENCY_100 2213
+#define IGC_I225_RX_LATENCY_1000 448
+#define IGC_I225_RX_LATENCY_2500 160
+
/* RX and TX descriptor control thresholds.
* PTHRESH - MAC will consider prefetch if it has fewer than this number of
* descriptors available in its onboard memory.
@@ -370,6 +391,8 @@ struct igc_adapter {
struct timer_list dma_err_timer;
struct timer_list phy_info_timer;
+ u32 wol;
+ u32 en_mng_pt;
u16 link_speed;
u16 link_duplex;
@@ -430,6 +453,20 @@ struct igc_adapter {
unsigned long link_check_timeout;
struct igc_info ei;
+
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_caps;
+ struct work_struct ptp_tx_work;
+ struct sk_buff *ptp_tx_skb;
+ struct hwtstamp_config tstamp_config;
+ unsigned long ptp_tx_start;
+ unsigned long last_rx_ptp_check;
+ unsigned long last_rx_timestamp;
+ unsigned int ptp_flags;
+ /* System time value lock */
+ spinlock_t tmreg_lock;
+ struct cyclecounter cc;
+ struct timecounter tc;
};
/* igc_desc_unused - calculate if we have unused descriptors */
@@ -513,6 +550,16 @@ int igc_add_filter(struct igc_adapter *adapter,
int igc_erase_filter(struct igc_adapter *adapter,
struct igc_nfc_filter *input);
+void igc_ptp_init(struct igc_adapter *adapter);
+void igc_ptp_reset(struct igc_adapter *adapter);
+void igc_ptp_stop(struct igc_adapter *adapter);
+void igc_ptp_rx_rgtstamp(struct igc_q_vector *q_vector, struct sk_buff *skb);
+void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va,
+ struct sk_buff *skb);
+int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
+int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
+void igc_ptp_tx_hang(struct igc_adapter *adapter);
+
#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring))
#define IGC_TXD_DCMD (IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS)
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index db289bcce21d..5a506440560a 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -212,6 +212,7 @@ static s32 igc_get_invariants_base(struct igc_hw *hw)
case IGC_DEV_ID_I225_I:
case IGC_DEV_ID_I220_V:
case IGC_DEV_ID_I225_K:
+ case IGC_DEV_ID_I225_BLANK_NVM:
mac->type = igc_i225;
break;
default:
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index f3788f0b95b4..58efa7a02c68 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -10,6 +10,37 @@
#define IGC_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */
+/* Definitions for power management and wakeup registers */
+/* Wake Up Control */
+#define IGC_WUC_PME_EN 0x00000002 /* PME Enable */
+
+/* Wake Up Filter Control */
+#define IGC_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define IGC_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
+
+#define IGC_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
+
+/* Wake Up Status */
+#define IGC_WUS_EX 0x00000004 /* Directed Exact */
+#define IGC_WUS_ARPD 0x00000020 /* Directed ARP Request */
+#define IGC_WUS_IPV4 0x00000040 /* Directed IPv4 */
+#define IGC_WUS_IPV6 0x00000080 /* Directed IPv6 */
+#define IGC_WUS_NSD 0x00000400 /* Directed IPv6 Neighbor Solicitation */
+
+/* Packet types that are enabled for wake packet delivery */
+#define WAKE_PKT_WUS ( \
+ IGC_WUS_EX | \
+ IGC_WUS_ARPD | \
+ IGC_WUS_IPV4 | \
+ IGC_WUS_IPV6 | \
+ IGC_WUS_NSD)
+
+/* Wake Up Packet Length */
+#define IGC_WUPL_MASK 0x00000FFF
+
+/* Wake Up Packet Memory stores the first 128 bytes of the wake up packet */
+#define IGC_WUPM_BYTES 128
+
/* Physical Func Reset Done Indication */
#define IGC_CTRL_EXT_LINK_MODE_MASK 0x00C00000
@@ -187,6 +218,7 @@
#define IGC_ICR_RXDMT0 BIT(4) /* Rx desc min. threshold (0) */
#define IGC_ICR_RXO BIT(6) /* Rx overrun */
#define IGC_ICR_RXT0 BIT(7) /* Rx timer intr (ring 0) */
+#define IGC_ICR_TS BIT(19) /* Time Sync Interrupt */
#define IGC_ICR_DRSTA BIT(30) /* Device Reset Asserted */
/* If this bit asserted, the driver should claim the interrupt */
@@ -209,6 +241,7 @@
#define IGC_IMS_DRSTA IGC_ICR_DRSTA /* Device Reset Asserted */
#define IGC_IMS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */
#define IGC_IMS_RXDMT0 IGC_ICR_RXDMT0 /* Rx desc min. threshold */
+#define IGC_IMS_TS IGC_ICR_TS /* Time Sync Interrupt */
#define IGC_QVECTOR_MASK 0x7FFC /* Q-vector mask */
#define IGC_ITR_VAL_MASK 0x04 /* ITR value mask */
@@ -249,6 +282,10 @@
#define IGC_TXD_STAT_TC 0x00000004 /* Tx Underrun */
#define IGC_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */
+/* IPSec Encrypt Enable */
+#define IGC_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
+#define IGC_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
+
/* Transmit Control */
#define IGC_TCTL_EN 0x00000002 /* enable Tx */
#define IGC_TCTL_PSP 0x00000008 /* pad short packets */
@@ -281,12 +318,21 @@
#define IGC_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */
#define IGC_RCTL_BAM 0x00008000 /* broadcast enable */
+/* Split Replication Receive Control */
+#define IGC_SRRCTL_TIMESTAMP 0x40000000
+#define IGC_SRRCTL_TIMER1SEL(timer) (((timer) & 0x3) << 14)
+#define IGC_SRRCTL_TIMER0SEL(timer) (((timer) & 0x3) << 17)
+
/* Receive Descriptor bit definitions */
#define IGC_RXD_STAT_EOP 0x02 /* End of Packet */
#define IGC_RXD_STAT_IXSM 0x04 /* Ignore checksum */
#define IGC_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
#define IGC_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
+/* Advanced Receive Descriptor bit definitions */
+#define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
+#define IGC_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */
+
#define IGC_RXDEXT_STATERR_CE 0x01000000
#define IGC_RXDEXT_STATERR_SE 0x02000000
#define IGC_RXDEXT_STATERR_SEQ 0x04000000
@@ -323,6 +369,61 @@
#define I225_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */
#define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
+#define IGC_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */
+
+/* Time Sync Interrupt Causes */
+#define IGC_TSICR_SYS_WRAP BIT(0) /* SYSTIM Wrap around. */
+#define IGC_TSICR_TXTS BIT(1) /* Transmit Timestamp. */
+#define IGC_TSICR_TT0 BIT(3) /* Target Time 0 Trigger. */
+#define IGC_TSICR_TT1 BIT(4) /* Target Time 1 Trigger. */
+#define IGC_TSICR_AUTT0 BIT(5) /* Auxiliary Timestamp 0 Taken. */
+#define IGC_TSICR_AUTT1 BIT(6) /* Auxiliary Timestamp 1 Taken. */
+
+#define IGC_TSICR_INTERRUPTS IGC_TSICR_TXTS
+
+/* PTP Queue Filter */
+#define IGC_ETQF_1588 BIT(30)
+
+#define IGC_FTQF_VF_BP 0x00008000
+#define IGC_FTQF_1588_TIME_STAMP 0x08000000
+#define IGC_FTQF_MASK 0xF0000000
+#define IGC_FTQF_MASK_PROTO_BP 0x10000000
+
+/* Time Sync Receive Control bit definitions */
+#define IGC_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */
+#define IGC_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */
+#define IGC_TSYNCRXCTL_TYPE_L2_V2 0x00
+#define IGC_TSYNCRXCTL_TYPE_L4_V1 0x02
+#define IGC_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
+#define IGC_TSYNCRXCTL_TYPE_ALL 0x08
+#define IGC_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
+#define IGC_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */
+#define IGC_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */
+#define IGC_TSYNCRXCTL_RXSYNSIG 0x00000400 /* Sample RX tstamp in PHY sop */
+
+/* Time Sync Receive Configuration */
+#define IGC_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF
+#define IGC_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00
+#define IGC_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01
+
+/* Immediate Interrupt Receive */
+#define IGC_IMIR_CLEAR_MASK 0xF001FFFF /* IMIR Reg Clear Mask */
+#define IGC_IMIR_PORT_BYPASS 0x20000 /* IMIR Port Bypass Bit */
+#define IGC_IMIR_PRIORITY_SHIFT 29 /* IMIR Priority Shift */
+#define IGC_IMIREXT_CLEAR_MASK 0x7FFFF /* IMIREXT Reg Clear Mask */
+
+/* Immediate Interrupt Receive Extended */
+#define IGC_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */
+#define IGC_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
+
+/* Time Sync Transmit Control bit definitions */
+#define IGC_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
+#define IGC_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */
+#define IGC_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK 0x0000F000 /* max delay */
+#define IGC_TSYNCTXCTL_SYNC_COMP_ERR 0x20000000 /* sync err */
+#define IGC_TSYNCTXCTL_SYNC_COMP 0x40000000 /* sync complete */
+#define IGC_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */
+#define IGC_TSYNCTXCTL_TXSYNSIG 0x00000020 /* Sample TX tstamp in PHY sop */
/* Receive Checksum Control */
#define IGC_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */
@@ -363,6 +464,7 @@
/* PHY Status Register */
#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
+#define IGC_PHY_RST_COMP 0x0100 /* Internal PHY reset completion */
/* PHY 1000 MII Register/Bit Definitions */
/* PHY Registers defined by IEEE */
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 455c1cdceb6e..ee07011e13e9 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -1600,6 +1600,39 @@ static int igc_set_channels(struct net_device *netdev,
return 0;
}
+static int igc_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct igc_adapter *adapter = netdev_priv(dev);
+
+ if (adapter->ptp_clock)
+ info->phc_index = ptp_clock_index(adapter->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ switch (adapter->hw.mac.type) {
+ case igc_i225:
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->tx_types =
+ BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static u32 igc_get_priv_flags(struct net_device *netdev)
{
struct igc_adapter *adapter = netdev_priv(netdev);
@@ -1847,6 +1880,7 @@ static const struct ethtool_ops igc_ethtool_ops = {
.get_rxfh_indir_size = igc_get_rxfh_indir_size,
.get_rxfh = igc_get_rxfh,
.set_rxfh = igc_set_rxfh,
+ .get_ts_info = igc_get_ts_info,
.get_channels = igc_get_channels,
.set_channels = igc_set_channels,
.get_priv_flags = igc_get_priv_flags,
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index 20f710645746..90ac0e0144d8 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -21,8 +21,7 @@
#define IGC_DEV_ID_I225_I 0x15F8
#define IGC_DEV_ID_I220_V 0x15F7
#define IGC_DEV_ID_I225_K 0x3100
-
-#define IGC_FUNC_0 0
+#define IGC_DEV_ID_I225_BLANK_NVM 0x15FD
/* Function pointers for the MAC. */
struct igc_mac_operations {
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 9700527dd797..d9d5425fe8d9 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -8,6 +8,7 @@
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/ip.h>
+#include <linux/pm_runtime.h>
#include <net/ipv6.h>
@@ -44,31 +45,13 @@ static const struct pci_device_id igc_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base },
/* required last entry */
{0, }
};
MODULE_DEVICE_TABLE(pci, igc_pci_tbl);
-/* forward declaration */
-static void igc_clean_tx_ring(struct igc_ring *tx_ring);
-static int igc_sw_init(struct igc_adapter *);
-static void igc_configure(struct igc_adapter *adapter);
-static void igc_power_down_link(struct igc_adapter *adapter);
-static void igc_set_default_mac_filter(struct igc_adapter *adapter);
-static void igc_set_rx_mode(struct net_device *netdev);
-static void igc_write_itr(struct igc_q_vector *q_vector);
-static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector);
-static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx);
-static void igc_set_interrupt_capability(struct igc_adapter *adapter,
- bool msix);
-static void igc_free_q_vectors(struct igc_adapter *adapter);
-static void igc_irq_disable(struct igc_adapter *adapter);
-static void igc_irq_enable(struct igc_adapter *adapter);
-static void igc_configure_msix(struct igc_adapter *adapter);
-static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
- struct igc_rx_buffer *bi);
-
enum latency_range {
lowest_latency = 0,
low_latency = 1,
@@ -76,6 +59,16 @@ enum latency_range {
latency_invalid = 255
};
+/**
+ * igc_power_down_link - Power down the phy/serdes link
+ * @adapter: address of board private structure
+ */
+static void igc_power_down_link(struct igc_adapter *adapter)
+{
+ if (adapter->hw.phy.media_type == igc_media_type_copper)
+ igc_power_down_phy_copper_base(&adapter->hw);
+}
+
void igc_reset(struct igc_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
@@ -110,11 +103,14 @@ void igc_reset(struct igc_adapter *adapter)
if (!netif_running(adapter->netdev))
igc_power_down_link(adapter);
+ /* Re-enable PTP, where applicable. */
+ igc_ptp_reset(adapter);
+
igc_get_phy_info(hw);
}
/**
- * igc_power_up_link - Power up the phy/serdes link
+ * igc_power_up_link - Power up the phy link
* @adapter: address of board private structure
*/
static void igc_power_up_link(struct igc_adapter *adapter)
@@ -128,16 +124,6 @@ static void igc_power_up_link(struct igc_adapter *adapter)
}
/**
- * igc_power_down_link - Power down the phy/serdes link
- * @adapter: address of board private structure
- */
-static void igc_power_down_link(struct igc_adapter *adapter)
-{
- if (adapter->hw.phy.media_type == igc_media_type_copper)
- igc_power_down_phy_copper_base(&adapter->hw);
-}
-
-/**
* igc_release_hw_control - release control of the h/w to f/w
* @adapter: address of board private structure
*
@@ -176,43 +162,6 @@ static void igc_get_hw_control(struct igc_adapter *adapter)
}
/**
- * igc_free_tx_resources - Free Tx Resources per Queue
- * @tx_ring: Tx descriptor ring for a specific queue
- *
- * Free all transmit software resources
- */
-void igc_free_tx_resources(struct igc_ring *tx_ring)
-{
- igc_clean_tx_ring(tx_ring);
-
- vfree(tx_ring->tx_buffer_info);
- tx_ring->tx_buffer_info = NULL;
-
- /* if not set, then don't free */
- if (!tx_ring->desc)
- return;
-
- dma_free_coherent(tx_ring->dev, tx_ring->size,
- tx_ring->desc, tx_ring->dma);
-
- tx_ring->desc = NULL;
-}
-
-/**
- * igc_free_all_tx_resources - Free Tx Resources for All Queues
- * @adapter: board private structure
- *
- * Free all transmit software resources
- */
-static void igc_free_all_tx_resources(struct igc_adapter *adapter)
-{
- int i;
-
- for (i = 0; i < adapter->num_tx_queues; i++)
- igc_free_tx_resources(adapter->tx_ring[i]);
-}
-
-/**
* igc_clean_tx_ring - Free Tx Buffers
* @tx_ring: ring to be cleaned
*/
@@ -274,6 +223,43 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
}
/**
+ * igc_free_tx_resources - Free Tx Resources per Queue
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ */
+void igc_free_tx_resources(struct igc_ring *tx_ring)
+{
+ igc_clean_tx_ring(tx_ring);
+
+ vfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+
+ /* if not set, then don't free */
+ if (!tx_ring->desc)
+ return;
+
+ dma_free_coherent(tx_ring->dev, tx_ring->size,
+ tx_ring->desc, tx_ring->dma);
+
+ tx_ring->desc = NULL;
+}
+
+/**
+ * igc_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ */
+static void igc_free_all_tx_resources(struct igc_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ igc_free_tx_resources(adapter->tx_ring[i]);
+}
+
+/**
* igc_clean_all_tx_rings - Free Tx Buffers for all queues
* @adapter: board private structure
*/
@@ -771,6 +757,51 @@ static void igc_setup_tctl(struct igc_adapter *adapter)
}
/**
+ * igc_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table
+ * @adapter: address of board private structure
+ * @index: Index of the RAR entry which need to be synced with MAC table
+ */
+static void igc_rar_set_index(struct igc_adapter *adapter, u32 index)
+{
+ u8 *addr = adapter->mac_table[index].addr;
+ struct igc_hw *hw = &adapter->hw;
+ u32 rar_low, rar_high;
+
+ /* HW expects these to be in network order when they are plugged
+ * into the registers which are little endian. In order to guarantee
+ * that ordering we need to do an leXX_to_cpup here in order to be
+ * ready for the byteswap that occurs with writel
+ */
+ rar_low = le32_to_cpup((__le32 *)(addr));
+ rar_high = le16_to_cpup((__le16 *)(addr + 4));
+
+ /* Indicate to hardware the Address is Valid. */
+ if (adapter->mac_table[index].state & IGC_MAC_STATE_IN_USE) {
+ if (is_valid_ether_addr(addr))
+ rar_high |= IGC_RAH_AV;
+
+ rar_high |= IGC_RAH_POOL_1 <<
+ adapter->mac_table[index].queue;
+ }
+
+ wr32(IGC_RAL(index), rar_low);
+ wrfl();
+ wr32(IGC_RAH(index), rar_high);
+ wrfl();
+}
+
+/* Set default MAC address for the PF in the first RAR entry */
+static void igc_set_default_mac_filter(struct igc_adapter *adapter)
+{
+ struct igc_mac_addr *mac_table = &adapter->mac_table[0];
+
+ ether_addr_copy(mac_table->addr, adapter->hw.mac.addr);
+ mac_table->state = IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE;
+
+ igc_rar_set_index(adapter, 0);
+}
+
+/**
* igc_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
* @p: pointer to an address structure
@@ -850,7 +881,7 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
/* set bits to identify this as an advanced context descriptor */
type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT;
- /* For 82575, context index must be unique per ring. */
+ /* For i225, context index must be unique per ring. */
if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
mss_l4len_idx |= tx_ring->reg_idx << 4;
@@ -957,6 +988,11 @@ static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
return __igc_maybe_stop_tx(tx_ring, size);
}
+#define IGC_SET_FLAG(_input, _flag, _result) \
+ (((_flag) <= (_result)) ? \
+ ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \
+ ((u32)((_input) & (_flag)) / ((_flag) / (_result))))
+
static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
{
/* set type for advanced descriptor with frame checksum insertion */
@@ -964,6 +1000,14 @@ static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
IGC_ADVTXD_DCMD_DEXT |
IGC_ADVTXD_DCMD_IFCS;
+ /* set segmentation bits for TSO */
+ cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO,
+ (IGC_ADVTXD_DCMD_TSE));
+
+ /* set timestamp bit if present */
+ cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP,
+ (IGC_ADVTXD_MAC_TSTAMP));
+
return cmd_type;
}
@@ -1131,6 +1175,100 @@ dma_error:
return -1;
}
+static int igc_tso(struct igc_ring *tx_ring,
+ struct igc_tx_buffer *first,
+ u8 *hdr_len)
+{
+ u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
+ struct sk_buff *skb = first->skb;
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ union {
+ struct tcphdr *tcp;
+ struct udphdr *udp;
+ unsigned char *hdr;
+ } l4;
+ u32 paylen, l4_offset;
+ int err;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
+
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
+
+ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
+ type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
+
+ /* initialize outer IP header fields */
+ if (ip.v4->version == 4) {
+ unsigned char *csum_start = skb_checksum_start(skb);
+ unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
+
+ /* IP header will have to cancel out any data that
+ * is not a part of the outer IP header
+ */
+ ip.v4->check = csum_fold(csum_partial(trans_start,
+ csum_start - trans_start,
+ 0));
+ type_tucmd |= IGC_ADVTXD_TUCMD_IPV4;
+
+ ip.v4->tot_len = 0;
+ first->tx_flags |= IGC_TX_FLAGS_TSO |
+ IGC_TX_FLAGS_CSUM |
+ IGC_TX_FLAGS_IPV4;
+ } else {
+ ip.v6->payload_len = 0;
+ first->tx_flags |= IGC_TX_FLAGS_TSO |
+ IGC_TX_FLAGS_CSUM;
+ }
+
+ /* determine offset of inner transport header */
+ l4_offset = l4.hdr - skb->data;
+
+ /* remove payload length from inner checksum */
+ paylen = skb->len - l4_offset;
+ if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) {
+ /* compute length of segmentation header */
+ *hdr_len = (l4.tcp->doff * 4) + l4_offset;
+ csum_replace_by_diff(&l4.tcp->check,
+ (__force __wsum)htonl(paylen));
+ } else {
+ /* compute length of segmentation header */
+ *hdr_len = sizeof(*l4.udp) + l4_offset;
+ csum_replace_by_diff(&l4.udp->check,
+ (__force __wsum)htonl(paylen));
+ }
+
+ /* update gso size and bytecount with header size */
+ first->gso_segs = skb_shinfo(skb)->gso_segs;
+ first->bytecount += (first->gso_segs - 1) * *hdr_len;
+
+ /* MSS L4LEN IDX */
+ mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT;
+
+ /* VLAN MACLEN IPLEN */
+ vlan_macip_lens = l4.hdr - ip.hdr;
+ vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
+
+ igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
+ type_tucmd, mss_l4len_idx);
+
+ return 1;
+}
+
static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
struct igc_ring *tx_ring)
{
@@ -1140,6 +1278,7 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
u32 tx_flags = 0;
unsigned short f;
u8 hdr_len = 0;
+ int tso = 0;
/* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD,
* + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD,
@@ -1162,15 +1301,45 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
first->bytecount = skb->len;
first->gso_segs = 1;
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
+
+ /* FIXME: add support for retrieving timestamps from
+ * the other timer registers before skipping the
+ * timestamping request.
+ */
+ if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
+ !test_and_set_bit_lock(__IGC_PTP_TX_IN_PROGRESS,
+ &adapter->state)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ tx_flags |= IGC_TX_FLAGS_TSTAMP;
+
+ adapter->ptp_tx_skb = skb_get(skb);
+ adapter->ptp_tx_start = jiffies;
+ } else {
+ adapter->tx_hwtstamp_skipped++;
+ }
+ }
+
/* record initial flags and protocol */
first->tx_flags = tx_flags;
first->protocol = protocol;
- igc_tx_csum(tx_ring, first);
+ tso = igc_tso(tx_ring, first, &hdr_len);
+ if (tso < 0)
+ goto out_drop;
+ else if (!tso)
+ igc_tx_csum(tx_ring, first);
igc_tx_map(tx_ring, first, hdr_len);
return NETDEV_TX_OK;
+
+out_drop:
+ dev_kfree_skb_any(first->skb);
+ first->skb = NULL;
+
+ return NETDEV_TX_OK;
}
static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter,
@@ -1269,6 +1438,10 @@ static void igc_process_skb_fields(struct igc_ring *rx_ring,
igc_rx_checksum(rx_ring, rx_desc, skb);
+ if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TS) &&
+ !igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP))
+ igc_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
+
skb_record_rx_queue(skb, rx_ring->queue_index);
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
@@ -1388,6 +1561,12 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
if (unlikely(!skb))
return NULL;
+ if (unlikely(igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP))) {
+ igc_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
+ va += IGC_TS_HDR_LEN;
+ size -= IGC_TS_HDR_LEN;
+ }
+
/* Determine available headroom for copy */
headlen = size;
if (headlen > IGC_RX_HDR_LEN)
@@ -1485,7 +1664,6 @@ static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer)
* igc_is_non_eop - process handling of non-EOP buffers
* @rx_ring: Rx ring being processed
* @rx_desc: Rx descriptor for current buffer
- * @skb: current socket buffer containing buffer in progress
*
* This function updates next to clean. If the buffer is an EOP buffer
* this function exits returning false, otherwise it will place the
@@ -1565,9 +1743,56 @@ static void igc_put_rx_buffer(struct igc_ring *rx_ring,
rx_buffer->page = NULL;
}
+static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring)
+{
+ return ring_uses_build_skb(rx_ring) ? IGC_SKB_PAD : 0;
+}
+
+static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
+ struct igc_rx_buffer *bi)
+{
+ struct page *page = bi->page;
+ dma_addr_t dma;
+
+ /* since we are recycling buffers we should seldom need to alloc */
+ if (likely(page))
+ return true;
+
+ /* alloc new page for storage */
+ page = dev_alloc_pages(igc_rx_pg_order(rx_ring));
+ if (unlikely(!page)) {
+ rx_ring->rx_stats.alloc_failed++;
+ return false;
+ }
+
+ /* map page for use */
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0,
+ igc_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ IGC_RX_DMA_ATTR);
+
+ /* if mapping failed free memory back to system since
+ * there isn't much point in holding memory we can't use
+ */
+ if (dma_mapping_error(rx_ring->dev, dma)) {
+ __free_page(page);
+
+ rx_ring->rx_stats.alloc_failed++;
+ return false;
+ }
+
+ bi->dma = dma;
+ bi->page = page;
+ bi->page_offset = igc_rx_offset(rx_ring);
+ bi->pagecnt_bias = 1;
+
+ return true;
+}
+
/**
* igc_alloc_rx_buffers - Replace used receive buffers; packet split
- * @adapter: address of board private structure
+ * @rx_ring: rx descriptor ring
+ * @cleaned_count: number of buffers to clean
*/
static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count)
{
@@ -1725,52 +1950,6 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
return total_packets;
}
-static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring)
-{
- return ring_uses_build_skb(rx_ring) ? IGC_SKB_PAD : 0;
-}
-
-static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
- struct igc_rx_buffer *bi)
-{
- struct page *page = bi->page;
- dma_addr_t dma;
-
- /* since we are recycling buffers we should seldom need to alloc */
- if (likely(page))
- return true;
-
- /* alloc new page for storage */
- page = dev_alloc_pages(igc_rx_pg_order(rx_ring));
- if (unlikely(!page)) {
- rx_ring->rx_stats.alloc_failed++;
- return false;
- }
-
- /* map page for use */
- dma = dma_map_page_attrs(rx_ring->dev, page, 0,
- igc_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE,
- IGC_RX_DMA_ATTR);
-
- /* if mapping failed free memory back to system since
- * there isn't much point in holding memory we can't use
- */
- if (dma_mapping_error(rx_ring->dev, dma)) {
- __free_page(page);
-
- rx_ring->rx_stats.alloc_failed++;
- return false;
- }
-
- bi->dma = dma;
- bi->page = page;
- bi->page_offset = igc_rx_offset(rx_ring);
- bi->pagecnt_bias = 1;
-
- return true;
-}
-
/**
* igc_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: pointer to q_vector containing needed info
@@ -1942,6 +2121,1128 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
return !!budget;
}
+static void igc_nfc_filter_restore(struct igc_adapter *adapter)
+{
+ struct igc_nfc_filter *rule;
+
+ spin_lock(&adapter->nfc_lock);
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
+ igc_add_filter(adapter, rule);
+
+ spin_unlock(&adapter->nfc_lock);
+}
+
+/* If the filter to be added and an already existing filter express
+ * the same address and address type, it should be possible to only
+ * override the other configurations, for example the queue to steer
+ * traffic.
+ */
+static bool igc_mac_entry_can_be_used(const struct igc_mac_addr *entry,
+ const u8 *addr, const u8 flags)
+{
+ if (!(entry->state & IGC_MAC_STATE_IN_USE))
+ return true;
+
+ if ((entry->state & IGC_MAC_STATE_SRC_ADDR) !=
+ (flags & IGC_MAC_STATE_SRC_ADDR))
+ return false;
+
+ if (!ether_addr_equal(addr, entry->addr))
+ return false;
+
+ return true;
+}
+
+/* Add a MAC filter for 'addr' directing matching traffic to 'queue',
+ * 'flags' is used to indicate what kind of match is made, match is by
+ * default for the destination address, if matching by source address
+ * is desired the flag IGC_MAC_STATE_SRC_ADDR can be used.
+ */
+static int igc_add_mac_filter(struct igc_adapter *adapter,
+ const u8 *addr, const u8 queue)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int rar_entries = hw->mac.rar_entry_count;
+ int i;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ /* Search for the first empty entry in the MAC table.
+ * Do not touch entries at the end of the table reserved for the VF MAC
+ * addresses.
+ */
+ for (i = 0; i < rar_entries; i++) {
+ if (!igc_mac_entry_can_be_used(&adapter->mac_table[i],
+ addr, 0))
+ continue;
+
+ ether_addr_copy(adapter->mac_table[i].addr, addr);
+ adapter->mac_table[i].queue = queue;
+ adapter->mac_table[i].state |= IGC_MAC_STATE_IN_USE;
+
+ igc_rar_set_index(adapter, i);
+ return i;
+ }
+
+ return -ENOSPC;
+}
+
+/* Remove a MAC filter for 'addr' directing matching traffic to
+ * 'queue', 'flags' is used to indicate what kind of match need to be
+ * removed, match is by default for the destination address, if
+ * matching by source address is to be removed the flag
+ * IGC_MAC_STATE_SRC_ADDR can be used.
+ */
+static int igc_del_mac_filter(struct igc_adapter *adapter,
+ const u8 *addr, const u8 queue)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int rar_entries = hw->mac.rar_entry_count;
+ int i;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ /* Search for matching entry in the MAC table based on given address
+ * and queue. Do not touch entries at the end of the table reserved
+ * for the VF MAC addresses.
+ */
+ for (i = 0; i < rar_entries; i++) {
+ if (!(adapter->mac_table[i].state & IGC_MAC_STATE_IN_USE))
+ continue;
+ if (adapter->mac_table[i].state != 0)
+ continue;
+ if (adapter->mac_table[i].queue != queue)
+ continue;
+ if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
+ continue;
+
+ /* When a filter for the default address is "deleted",
+ * we return it to its initial configuration
+ */
+ if (adapter->mac_table[i].state & IGC_MAC_STATE_DEFAULT) {
+ adapter->mac_table[i].state =
+ IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE;
+ adapter->mac_table[i].queue = 0;
+ } else {
+ adapter->mac_table[i].state = 0;
+ adapter->mac_table[i].queue = 0;
+ memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ }
+
+ igc_rar_set_index(adapter, i);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ int ret;
+
+ ret = igc_add_mac_filter(adapter, addr, adapter->num_rx_queues);
+
+ return min_t(int, ret, 0);
+}
+
+static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ igc_del_mac_filter(adapter, addr, adapter->num_rx_queues);
+
+ return 0;
+}
+
+/**
+ * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_rx_mode entry point is called whenever the unicast or multicast
+ * address lists or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper unicast, multicast,
+ * promiscuous mode, and all-multi behavior.
+ */
+static void igc_set_rx_mode(struct net_device *netdev)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
+ int count;
+
+ /* Check for Promiscuous and All Multicast modes */
+ if (netdev->flags & IFF_PROMISC) {
+ rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE;
+ } else {
+ if (netdev->flags & IFF_ALLMULTI) {
+ rctl |= IGC_RCTL_MPE;
+ } else {
+ /* Write addresses to the MTA, if the attempt fails
+ * then we should just turn on promiscuous mode so
+ * that we can at least receive multicast traffic
+ */
+ count = igc_write_mc_addr_list(netdev);
+ if (count < 0)
+ rctl |= IGC_RCTL_MPE;
+ }
+ }
+
+ /* Write addresses to available RAR registers, if there is not
+ * sufficient space to store all the addresses then enable
+ * unicast promiscuous mode
+ */
+ if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync))
+ rctl |= IGC_RCTL_UPE;
+
+ /* update state of unicast and multicast */
+ rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE);
+ wr32(IGC_RCTL, rctl);
+
+#if (PAGE_SIZE < 8192)
+ if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB)
+ rlpml = IGC_MAX_FRAME_BUILD_SKB;
+#endif
+ wr32(IGC_RLPML, rlpml);
+}
+
+/**
+ * igc_configure - configure the hardware for RX and TX
+ * @adapter: private board structure
+ */
+static void igc_configure(struct igc_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int i = 0;
+
+ igc_get_hw_control(adapter);
+ igc_set_rx_mode(netdev);
+
+ igc_setup_tctl(adapter);
+ igc_setup_mrqc(adapter);
+ igc_setup_rctl(adapter);
+
+ igc_nfc_filter_restore(adapter);
+ igc_configure_tx(adapter);
+ igc_configure_rx(adapter);
+
+ igc_rx_fifo_flush_base(&adapter->hw);
+
+ /* call igc_desc_unused which always leaves
+ * at least 1 descriptor unused to make sure
+ * next_to_use != next_to_clean
+ */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct igc_ring *ring = adapter->rx_ring[i];
+
+ igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
+ }
+}
+
+/**
+ * igc_write_ivar - configure ivar for given MSI-X vector
+ * @hw: pointer to the HW structure
+ * @msix_vector: vector number we are allocating to a given ring
+ * @index: row index of IVAR register to write within IVAR table
+ * @offset: column offset of in IVAR, should be multiple of 8
+ *
+ * The IVAR table consists of 2 columns,
+ * each containing an cause allocation for an Rx and Tx ring, and a
+ * variable number of rows depending on the number of queues supported.
+ */
+static void igc_write_ivar(struct igc_hw *hw, int msix_vector,
+ int index, int offset)
+{
+ u32 ivar = array_rd32(IGC_IVAR0, index);
+
+ /* clear any bits that are currently set */
+ ivar &= ~((u32)0xFF << offset);
+
+ /* write vector and valid bit */
+ ivar |= (msix_vector | IGC_IVAR_VALID) << offset;
+
+ array_wr32(IGC_IVAR0, index, ivar);
+}
+
+static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector)
+{
+ struct igc_adapter *adapter = q_vector->adapter;
+ struct igc_hw *hw = &adapter->hw;
+ int rx_queue = IGC_N0_QUEUE;
+ int tx_queue = IGC_N0_QUEUE;
+
+ if (q_vector->rx.ring)
+ rx_queue = q_vector->rx.ring->reg_idx;
+ if (q_vector->tx.ring)
+ tx_queue = q_vector->tx.ring->reg_idx;
+
+ switch (hw->mac.type) {
+ case igc_i225:
+ if (rx_queue > IGC_N0_QUEUE)
+ igc_write_ivar(hw, msix_vector,
+ rx_queue >> 1,
+ (rx_queue & 0x1) << 4);
+ if (tx_queue > IGC_N0_QUEUE)
+ igc_write_ivar(hw, msix_vector,
+ tx_queue >> 1,
+ ((tx_queue & 0x1) << 4) + 8);
+ q_vector->eims_value = BIT(msix_vector);
+ break;
+ default:
+ WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n");
+ break;
+ }
+
+ /* add q_vector eims value to global eims_enable_mask */
+ adapter->eims_enable_mask |= q_vector->eims_value;
+
+ /* configure q_vector to set itr on first interrupt */
+ q_vector->set_itr = 1;
+}
+
+/**
+ * igc_configure_msix - Configure MSI-X hardware
+ * @adapter: Pointer to adapter structure
+ *
+ * igc_configure_msix sets up the hardware to properly
+ * generate MSI-X interrupts.
+ */
+static void igc_configure_msix(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int i, vector = 0;
+ u32 tmp;
+
+ adapter->eims_enable_mask = 0;
+
+ /* set vector for other causes, i.e. link changes */
+ switch (hw->mac.type) {
+ case igc_i225:
+ /* Turn on MSI-X capability first, or our settings
+ * won't stick. And it will take days to debug.
+ */
+ wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE |
+ IGC_GPIE_PBA | IGC_GPIE_EIAME |
+ IGC_GPIE_NSICR);
+
+ /* enable msix_other interrupt */
+ adapter->eims_other = BIT(vector);
+ tmp = (vector++ | IGC_IVAR_VALID) << 8;
+
+ wr32(IGC_IVAR_MISC, tmp);
+ break;
+ default:
+ /* do nothing, since nothing else supports MSI-X */
+ break;
+ } /* switch (hw->mac.type) */
+
+ adapter->eims_enable_mask |= adapter->eims_other;
+
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ igc_assign_vector(adapter->q_vector[i], vector++);
+
+ wrfl();
+}
+
+/**
+ * igc_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ */
+static void igc_irq_enable(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+
+ if (adapter->msix_entries) {
+ u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA;
+ u32 regval = rd32(IGC_EIAC);
+
+ wr32(IGC_EIAC, regval | adapter->eims_enable_mask);
+ regval = rd32(IGC_EIAM);
+ wr32(IGC_EIAM, regval | adapter->eims_enable_mask);
+ wr32(IGC_EIMS, adapter->eims_enable_mask);
+ wr32(IGC_IMS, ims);
+ } else {
+ wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
+ wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
+ }
+}
+
+/**
+ * igc_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ */
+static void igc_irq_disable(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+
+ if (adapter->msix_entries) {
+ u32 regval = rd32(IGC_EIAM);
+
+ wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask);
+ wr32(IGC_EIMC, adapter->eims_enable_mask);
+ regval = rd32(IGC_EIAC);
+ wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask);
+ }
+
+ wr32(IGC_IAM, 0);
+ wr32(IGC_IMC, ~0);
+ wrfl();
+
+ if (adapter->msix_entries) {
+ int vector = 0, i;
+
+ synchronize_irq(adapter->msix_entries[vector++].vector);
+
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ synchronize_irq(adapter->msix_entries[vector++].vector);
+ } else {
+ synchronize_irq(adapter->pdev->irq);
+ }
+}
+
+void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
+ const u32 max_rss_queues)
+{
+ /* Determine if we need to pair queues. */
+ /* If rss_queues > half of max_rss_queues, pair the queues in
+ * order to conserve interrupts due to limited supply.
+ */
+ if (adapter->rss_queues > (max_rss_queues / 2))
+ adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
+ else
+ adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS;
+}
+
+unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter)
+{
+ unsigned int max_rss_queues;
+
+ /* Determine the maximum number of RSS queues supported. */
+ max_rss_queues = IGC_MAX_RX_QUEUES;
+
+ return max_rss_queues;
+}
+
+static void igc_init_queue_configuration(struct igc_adapter *adapter)
+{
+ u32 max_rss_queues;
+
+ max_rss_queues = igc_get_max_rss_queues(adapter);
+ adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
+
+ igc_set_flag_queue_pairs(adapter, max_rss_queues);
+}
+
+/**
+ * igc_reset_q_vector - Reset config for interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_idx: Index of vector to be reset
+ *
+ * If NAPI is enabled it will delete any references to the
+ * NAPI struct. This is preparation for igc_free_q_vector.
+ */
+static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx)
+{
+ struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
+
+ /* if we're coming from igc_set_interrupt_capability, the vectors are
+ * not yet allocated
+ */
+ if (!q_vector)
+ return;
+
+ if (q_vector->tx.ring)
+ adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
+
+ if (q_vector->rx.ring)
+ adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
+
+ netif_napi_del(&q_vector->napi);
+}
+
+/**
+ * igc_free_q_vector - Free memory allocated for specific interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_idx: Index of vector to be freed
+ *
+ * This function frees the memory allocated to the q_vector.
+ */
+static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx)
+{
+ struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
+
+ adapter->q_vector[v_idx] = NULL;
+
+ /* igc_get_stats64() might access the rings on this vector,
+ * we must wait a grace period before freeing it.
+ */
+ if (q_vector)
+ kfree_rcu(q_vector, rcu);
+}
+
+/**
+ * igc_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ */
+static void igc_free_q_vectors(struct igc_adapter *adapter)
+{
+ int v_idx = adapter->num_q_vectors;
+
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+ adapter->num_q_vectors = 0;
+
+ while (v_idx--) {
+ igc_reset_q_vector(adapter, v_idx);
+ igc_free_q_vector(adapter, v_idx);
+ }
+}
+
+/**
+ * igc_update_itr - update the dynamic ITR value based on statistics
+ * @q_vector: pointer to q_vector
+ * @ring_container: ring info to update the itr for
+ *
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ * NOTE: These calculations are only valid when operating in a single-
+ * queue environment.
+ */
+static void igc_update_itr(struct igc_q_vector *q_vector,
+ struct igc_ring_container *ring_container)
+{
+ unsigned int packets = ring_container->total_packets;
+ unsigned int bytes = ring_container->total_bytes;
+ u8 itrval = ring_container->itr;
+
+ /* no packets, exit with status unchanged */
+ if (packets == 0)
+ return;
+
+ switch (itrval) {
+ case lowest_latency:
+ /* handle TSO and jumbo frames */
+ if (bytes / packets > 8000)
+ itrval = bulk_latency;
+ else if ((packets < 5) && (bytes > 512))
+ itrval = low_latency;
+ break;
+ case low_latency: /* 50 usec aka 20000 ints/s */
+ if (bytes > 10000) {
+ /* this if handles the TSO accounting */
+ if (bytes / packets > 8000)
+ itrval = bulk_latency;
+ else if ((packets < 10) || ((bytes / packets) > 1200))
+ itrval = bulk_latency;
+ else if ((packets > 35))
+ itrval = lowest_latency;
+ } else if (bytes / packets > 2000) {
+ itrval = bulk_latency;
+ } else if (packets <= 2 && bytes < 512) {
+ itrval = lowest_latency;
+ }
+ break;
+ case bulk_latency: /* 250 usec aka 4000 ints/s */
+ if (bytes > 25000) {
+ if (packets > 35)
+ itrval = low_latency;
+ } else if (bytes < 1500) {
+ itrval = low_latency;
+ }
+ break;
+ }
+
+ /* clear work counters since we have the values we need */
+ ring_container->total_bytes = 0;
+ ring_container->total_packets = 0;
+
+ /* write updated itr to ring container */
+ ring_container->itr = itrval;
+}
+
+static void igc_set_itr(struct igc_q_vector *q_vector)
+{
+ struct igc_adapter *adapter = q_vector->adapter;
+ u32 new_itr = q_vector->itr_val;
+ u8 current_itr = 0;
+
+ /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ case SPEED_100:
+ current_itr = 0;
+ new_itr = IGC_4K_ITR;
+ goto set_itr_now;
+ default:
+ break;
+ }
+
+ igc_update_itr(q_vector, &q_vector->tx);
+ igc_update_itr(q_vector, &q_vector->rx);
+
+ current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
+
+ /* conservative mode (itr 3) eliminates the lowest_latency setting */
+ if (current_itr == lowest_latency &&
+ ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
+ (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
+ current_itr = low_latency;
+
+ switch (current_itr) {
+ /* counts and packets in update_itr are dependent on these numbers */
+ case lowest_latency:
+ new_itr = IGC_70K_ITR; /* 70,000 ints/sec */
+ break;
+ case low_latency:
+ new_itr = IGC_20K_ITR; /* 20,000 ints/sec */
+ break;
+ case bulk_latency:
+ new_itr = IGC_4K_ITR; /* 4,000 ints/sec */
+ break;
+ default:
+ break;
+ }
+
+set_itr_now:
+ if (new_itr != q_vector->itr_val) {
+ /* this attempts to bias the interrupt rate towards Bulk
+ * by adding intermediate steps when interrupt rate is
+ * increasing
+ */
+ new_itr = new_itr > q_vector->itr_val ?
+ max((new_itr * q_vector->itr_val) /
+ (new_itr + (q_vector->itr_val >> 2)),
+ new_itr) : new_itr;
+ /* Don't write the value here; it resets the adapter's
+ * internal timer, and causes us to delay far longer than
+ * we should between interrupts. Instead, we write the ITR
+ * value at the beginning of the next interrupt so the timing
+ * ends up being correct.
+ */
+ q_vector->itr_val = new_itr;
+ q_vector->set_itr = 1;
+ }
+}
+
+static void igc_reset_interrupt_capability(struct igc_adapter *adapter)
+{
+ int v_idx = adapter->num_q_vectors;
+
+ if (adapter->msix_entries) {
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ } else if (adapter->flags & IGC_FLAG_HAS_MSI) {
+ pci_disable_msi(adapter->pdev);
+ }
+
+ while (v_idx--)
+ igc_reset_q_vector(adapter, v_idx);
+}
+
+/**
+ * igc_set_interrupt_capability - set MSI or MSI-X if supported
+ * @adapter: Pointer to adapter structure
+ * @msix: boolean value for MSI-X capability
+ *
+ * Attempt to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ */
+static void igc_set_interrupt_capability(struct igc_adapter *adapter,
+ bool msix)
+{
+ int numvecs, i;
+ int err;
+
+ if (!msix)
+ goto msi_only;
+ adapter->flags |= IGC_FLAG_HAS_MSIX;
+
+ /* Number of supported queues. */
+ adapter->num_rx_queues = adapter->rss_queues;
+
+ adapter->num_tx_queues = adapter->rss_queues;
+
+ /* start with one vector for every Rx queue */
+ numvecs = adapter->num_rx_queues;
+
+ /* if Tx handler is separate add 1 for every Tx queue */
+ if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS))
+ numvecs += adapter->num_tx_queues;
+
+ /* store the number of vectors reserved for queues */
+ adapter->num_q_vectors = numvecs;
+
+ /* add 1 vector for link status interrupts */
+ numvecs++;
+
+ adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
+ GFP_KERNEL);
+
+ if (!adapter->msix_entries)
+ return;
+
+ /* populate entry values */
+ for (i = 0; i < numvecs; i++)
+ adapter->msix_entries[i].entry = i;
+
+ err = pci_enable_msix_range(adapter->pdev,
+ adapter->msix_entries,
+ numvecs,
+ numvecs);
+ if (err > 0)
+ return;
+
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+
+ igc_reset_interrupt_capability(adapter);
+
+msi_only:
+ adapter->flags &= ~IGC_FLAG_HAS_MSIX;
+
+ adapter->rss_queues = 1;
+ adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
+ adapter->num_rx_queues = 1;
+ adapter->num_tx_queues = 1;
+ adapter->num_q_vectors = 1;
+ if (!pci_enable_msi(adapter->pdev))
+ adapter->flags |= IGC_FLAG_HAS_MSI;
+}
+
+/**
+ * igc_update_ring_itr - update the dynamic ITR value based on packet size
+ * @q_vector: pointer to q_vector
+ *
+ * Stores a new ITR value based on strictly on packet size. This
+ * algorithm is less sophisticated than that used in igc_update_itr,
+ * due to the difficulty of synchronizing statistics across multiple
+ * receive rings. The divisors and thresholds used by this function
+ * were determined based on theoretical maximum wire speed and testing
+ * data, in order to minimize response time while increasing bulk
+ * throughput.
+ * NOTE: This function is called only when operating in a multiqueue
+ * receive environment.
+ */
+static void igc_update_ring_itr(struct igc_q_vector *q_vector)
+{
+ struct igc_adapter *adapter = q_vector->adapter;
+ int new_val = q_vector->itr_val;
+ int avg_wire_size = 0;
+ unsigned int packets;
+
+ /* For non-gigabit speeds, just fix the interrupt rate at 4000
+ * ints/sec - ITR timer value of 120 ticks.
+ */
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ case SPEED_100:
+ new_val = IGC_4K_ITR;
+ goto set_itr_val;
+ default:
+ break;
+ }
+
+ packets = q_vector->rx.total_packets;
+ if (packets)
+ avg_wire_size = q_vector->rx.total_bytes / packets;
+
+ packets = q_vector->tx.total_packets;
+ if (packets)
+ avg_wire_size = max_t(u32, avg_wire_size,
+ q_vector->tx.total_bytes / packets);
+
+ /* if avg_wire_size isn't set no work was done */
+ if (!avg_wire_size)
+ goto clear_counts;
+
+ /* Add 24 bytes to size to account for CRC, preamble, and gap */
+ avg_wire_size += 24;
+
+ /* Don't starve jumbo frames */
+ avg_wire_size = min(avg_wire_size, 3000);
+
+ /* Give a little boost to mid-size frames */
+ if (avg_wire_size > 300 && avg_wire_size < 1200)
+ new_val = avg_wire_size / 3;
+ else
+ new_val = avg_wire_size / 2;
+
+ /* conservative mode (itr 3) eliminates the lowest_latency setting */
+ if (new_val < IGC_20K_ITR &&
+ ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
+ (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
+ new_val = IGC_20K_ITR;
+
+set_itr_val:
+ if (new_val != q_vector->itr_val) {
+ q_vector->itr_val = new_val;
+ q_vector->set_itr = 1;
+ }
+clear_counts:
+ q_vector->rx.total_bytes = 0;
+ q_vector->rx.total_packets = 0;
+ q_vector->tx.total_bytes = 0;
+ q_vector->tx.total_packets = 0;
+}
+
+static void igc_ring_irq_enable(struct igc_q_vector *q_vector)
+{
+ struct igc_adapter *adapter = q_vector->adapter;
+ struct igc_hw *hw = &adapter->hw;
+
+ if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
+ (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
+ if (adapter->num_q_vectors == 1)
+ igc_set_itr(q_vector);
+ else
+ igc_update_ring_itr(q_vector);
+ }
+
+ if (!test_bit(__IGC_DOWN, &adapter->state)) {
+ if (adapter->msix_entries)
+ wr32(IGC_EIMS, q_vector->eims_value);
+ else
+ igc_irq_enable(adapter);
+ }
+}
+
+static void igc_add_ring(struct igc_ring *ring,
+ struct igc_ring_container *head)
+{
+ head->ring = ring;
+ head->count++;
+}
+
+/**
+ * igc_cache_ring_register - Descriptor ring to register mapping
+ * @adapter: board private structure to initialize
+ *
+ * Once we know the feature-set enabled for the device, we'll cache
+ * the register offset the descriptor ring is assigned to.
+ */
+static void igc_cache_ring_register(struct igc_adapter *adapter)
+{
+ int i = 0, j = 0;
+
+ switch (adapter->hw.mac.type) {
+ case igc_i225:
+ /* Fall through */
+ default:
+ for (; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i]->reg_idx = i;
+ for (; j < adapter->num_tx_queues; j++)
+ adapter->tx_ring[j]->reg_idx = j;
+ break;
+ }
+}
+
+/**
+ * igc_poll - NAPI Rx polling callback
+ * @napi: napi polling structure
+ * @budget: count of how many packets we should handle
+ */
+static int igc_poll(struct napi_struct *napi, int budget)
+{
+ struct igc_q_vector *q_vector = container_of(napi,
+ struct igc_q_vector,
+ napi);
+ bool clean_complete = true;
+ int work_done = 0;
+
+ if (q_vector->tx.ring)
+ clean_complete = igc_clean_tx_irq(q_vector, budget);
+
+ if (q_vector->rx.ring) {
+ int cleaned = igc_clean_rx_irq(q_vector, budget);
+
+ work_done += cleaned;
+ if (cleaned >= budget)
+ clean_complete = false;
+ }
+
+ /* If all work not completed, return budget and keep polling */
+ if (!clean_complete)
+ return budget;
+
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done)))
+ igc_ring_irq_enable(q_vector);
+
+ return min(work_done, budget - 1);
+}
+
+/**
+ * igc_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_count: q_vectors allocated on adapter, used for ring interleaving
+ * @v_idx: index of vector in adapter struct
+ * @txr_count: total number of Tx rings to allocate
+ * @txr_idx: index of first Tx ring to allocate
+ * @rxr_count: total number of Rx rings to allocate
+ * @rxr_idx: index of first Rx ring to allocate
+ *
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
+ */
+static int igc_alloc_q_vector(struct igc_adapter *adapter,
+ unsigned int v_count, unsigned int v_idx,
+ unsigned int txr_count, unsigned int txr_idx,
+ unsigned int rxr_count, unsigned int rxr_idx)
+{
+ struct igc_q_vector *q_vector;
+ struct igc_ring *ring;
+ int ring_count;
+
+ /* igc only supports 1 Tx and/or 1 Rx queue per vector */
+ if (txr_count > 1 || rxr_count > 1)
+ return -ENOMEM;
+
+ ring_count = txr_count + rxr_count;
+
+ /* allocate q_vector and rings */
+ q_vector = adapter->q_vector[v_idx];
+ if (!q_vector)
+ q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
+ GFP_KERNEL);
+ else
+ memset(q_vector, 0, struct_size(q_vector, ring, ring_count));
+ if (!q_vector)
+ return -ENOMEM;
+
+ /* initialize NAPI */
+ netif_napi_add(adapter->netdev, &q_vector->napi,
+ igc_poll, 64);
+
+ /* tie q_vector and adapter together */
+ adapter->q_vector[v_idx] = q_vector;
+ q_vector->adapter = adapter;
+
+ /* initialize work limits */
+ q_vector->tx.work_limit = adapter->tx_work_limit;
+
+ /* initialize ITR configuration */
+ q_vector->itr_register = adapter->io_addr + IGC_EITR(0);
+ q_vector->itr_val = IGC_START_ITR;
+
+ /* initialize pointer to rings */
+ ring = q_vector->ring;
+
+ /* initialize ITR */
+ if (rxr_count) {
+ /* rx or rx/tx vector */
+ if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
+ q_vector->itr_val = adapter->rx_itr_setting;
+ } else {
+ /* tx only vector */
+ if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
+ q_vector->itr_val = adapter->tx_itr_setting;
+ }
+
+ if (txr_count) {
+ /* assign generic ring traits */
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
+
+ /* update q_vector Tx values */
+ igc_add_ring(ring, &q_vector->tx);
+
+ /* apply Tx specific ring traits */
+ ring->count = adapter->tx_ring_count;
+ ring->queue_index = txr_idx;
+
+ /* assign ring to adapter */
+ adapter->tx_ring[txr_idx] = ring;
+
+ /* push pointer to next ring */
+ ring++;
+ }
+
+ if (rxr_count) {
+ /* assign generic ring traits */
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
+
+ /* update q_vector Rx values */
+ igc_add_ring(ring, &q_vector->rx);
+
+ /* apply Rx specific ring traits */
+ ring->count = adapter->rx_ring_count;
+ ring->queue_index = rxr_idx;
+
+ /* assign ring to adapter */
+ adapter->rx_ring[rxr_idx] = ring;
+ }
+
+ return 0;
+}
+
+/**
+ * igc_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ */
+static int igc_alloc_q_vectors(struct igc_adapter *adapter)
+{
+ int rxr_remaining = adapter->num_rx_queues;
+ int txr_remaining = adapter->num_tx_queues;
+ int rxr_idx = 0, txr_idx = 0, v_idx = 0;
+ int q_vectors = adapter->num_q_vectors;
+ int err;
+
+ if (q_vectors >= (rxr_remaining + txr_remaining)) {
+ for (; rxr_remaining; v_idx++) {
+ err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
+ 0, 0, 1, rxr_idx);
+
+ if (err)
+ goto err_out;
+
+ /* update counts and index */
+ rxr_remaining--;
+ rxr_idx++;
+ }
+ }
+
+ for (; v_idx < q_vectors; v_idx++) {
+ int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
+ int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
+
+ err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
+ tqpv, txr_idx, rqpv, rxr_idx);
+
+ if (err)
+ goto err_out;
+
+ /* update counts and index */
+ rxr_remaining -= rqpv;
+ txr_remaining -= tqpv;
+ rxr_idx++;
+ txr_idx++;
+ }
+
+ return 0;
+
+err_out:
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+ adapter->num_q_vectors = 0;
+
+ while (v_idx--)
+ igc_free_q_vector(adapter, v_idx);
+
+ return -ENOMEM;
+}
+
+/**
+ * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
+ * @adapter: Pointer to adapter structure
+ * @msix: boolean for MSI-X capability
+ *
+ * This function initializes the interrupts and allocates all of the queues.
+ */
+static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int err = 0;
+
+ igc_set_interrupt_capability(adapter, msix);
+
+ err = igc_alloc_q_vectors(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
+ goto err_alloc_q_vectors;
+ }
+
+ igc_cache_ring_register(adapter);
+
+ return 0;
+
+err_alloc_q_vectors:
+ igc_reset_interrupt_capability(adapter);
+ return err;
+}
+
+/**
+ * igc_sw_init - Initialize general software structures (struct igc_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * igc_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ */
+static int igc_sw_init(struct igc_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct igc_hw *hw = &adapter->hw;
+
+ int size = sizeof(struct igc_mac_addr) * hw->mac.rar_entry_count;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
+
+ /* set default ring sizes */
+ adapter->tx_ring_count = IGC_DEFAULT_TXD;
+ adapter->rx_ring_count = IGC_DEFAULT_RXD;
+
+ /* set default ITR values */
+ adapter->rx_itr_setting = IGC_DEFAULT_ITR;
+ adapter->tx_itr_setting = IGC_DEFAULT_ITR;
+
+ /* set default work limits */
+ adapter->tx_work_limit = IGC_DEFAULT_TX_WORK;
+
+ /* adjust max frame to be at least the size of a standard frame */
+ adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
+ VLAN_HLEN;
+ adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+
+ spin_lock_init(&adapter->nfc_lock);
+ spin_lock_init(&adapter->stats64_lock);
+ /* Assume MSI-X interrupts, will be checked during IRQ allocation */
+ adapter->flags |= IGC_FLAG_HAS_MSIX;
+
+ adapter->mac_table = kzalloc(size, GFP_ATOMIC);
+ if (!adapter->mac_table)
+ return -ENOMEM;
+
+ igc_init_queue_configuration(adapter);
+
+ /* This call may decrease the number of queues */
+ if (igc_init_interrupt_scheme(adapter, true)) {
+ dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+
+ /* Explicitly disable IRQ since the NIC can be in any state. */
+ igc_irq_disable(adapter);
+
+ set_bit(__IGC_DOWN, &adapter->state);
+
+ return 0;
+}
+
/**
* igc_up - Open the interface and prepare it to handle traffic
* @adapter: board private structure
@@ -2163,18 +3464,6 @@ static void igc_nfc_filter_exit(struct igc_adapter *adapter)
spin_unlock(&adapter->nfc_lock);
}
-static void igc_nfc_filter_restore(struct igc_adapter *adapter)
-{
- struct igc_nfc_filter *rule;
-
- spin_lock(&adapter->nfc_lock);
-
- hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
- igc_add_filter(adapter, rule);
-
- spin_unlock(&adapter->nfc_lock);
-}
-
/**
* igc_down - Close the interface
* @adapter: board private structure
@@ -2398,105 +3687,6 @@ igc_features_check(struct sk_buff *skb, struct net_device *dev,
return features;
}
-/**
- * igc_configure - configure the hardware for RX and TX
- * @adapter: private board structure
- */
-static void igc_configure(struct igc_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- int i = 0;
-
- igc_get_hw_control(adapter);
- igc_set_rx_mode(netdev);
-
- igc_setup_tctl(adapter);
- igc_setup_mrqc(adapter);
- igc_setup_rctl(adapter);
-
- igc_nfc_filter_restore(adapter);
- igc_configure_tx(adapter);
- igc_configure_rx(adapter);
-
- igc_rx_fifo_flush_base(&adapter->hw);
-
- /* call igc_desc_unused which always leaves
- * at least 1 descriptor unused to make sure
- * next_to_use != next_to_clean
- */
- for (i = 0; i < adapter->num_rx_queues; i++) {
- struct igc_ring *ring = adapter->rx_ring[i];
-
- igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
- }
-}
-
-/**
- * igc_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table
- * @adapter: address of board private structure
- * @index: Index of the RAR entry which need to be synced with MAC table
- */
-static void igc_rar_set_index(struct igc_adapter *adapter, u32 index)
-{
- u8 *addr = adapter->mac_table[index].addr;
- struct igc_hw *hw = &adapter->hw;
- u32 rar_low, rar_high;
-
- /* HW expects these to be in network order when they are plugged
- * into the registers which are little endian. In order to guarantee
- * that ordering we need to do an leXX_to_cpup here in order to be
- * ready for the byteswap that occurs with writel
- */
- rar_low = le32_to_cpup((__le32 *)(addr));
- rar_high = le16_to_cpup((__le16 *)(addr + 4));
-
- /* Indicate to hardware the Address is Valid. */
- if (adapter->mac_table[index].state & IGC_MAC_STATE_IN_USE) {
- if (is_valid_ether_addr(addr))
- rar_high |= IGC_RAH_AV;
-
- rar_high |= IGC_RAH_POOL_1 <<
- adapter->mac_table[index].queue;
- }
-
- wr32(IGC_RAL(index), rar_low);
- wrfl();
- wr32(IGC_RAH(index), rar_high);
- wrfl();
-}
-
-/* Set default MAC address for the PF in the first RAR entry */
-static void igc_set_default_mac_filter(struct igc_adapter *adapter)
-{
- struct igc_mac_addr *mac_table = &adapter->mac_table[0];
-
- ether_addr_copy(mac_table->addr, adapter->hw.mac.addr);
- mac_table->state = IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE;
-
- igc_rar_set_index(adapter, 0);
-}
-
-/* If the filter to be added and an already existing filter express
- * the same address and address type, it should be possible to only
- * override the other configurations, for example the queue to steer
- * traffic.
- */
-static bool igc_mac_entry_can_be_used(const struct igc_mac_addr *entry,
- const u8 *addr, const u8 flags)
-{
- if (!(entry->state & IGC_MAC_STATE_IN_USE))
- return true;
-
- if ((entry->state & IGC_MAC_STATE_SRC_ADDR) !=
- (flags & IGC_MAC_STATE_SRC_ADDR))
- return false;
-
- if (!ether_addr_equal(addr, entry->addr))
- return false;
-
- return true;
-}
-
/* Add a MAC filter for 'addr' directing matching traffic to 'queue',
* 'flags' is used to indicate what kind of match is made, match is by
* default for the destination address, if matching by source address
@@ -2597,159 +3787,20 @@ int igc_del_mac_steering_filter(struct igc_adapter *adapter,
IGC_MAC_STATE_QUEUE_STEERING | flags);
}
-/* Add a MAC filter for 'addr' directing matching traffic to 'queue',
- * 'flags' is used to indicate what kind of match is made, match is by
- * default for the destination address, if matching by source address
- * is desired the flag IGC_MAC_STATE_SRC_ADDR can be used.
- */
-static int igc_add_mac_filter(struct igc_adapter *adapter,
- const u8 *addr, const u8 queue)
+static void igc_tsync_interrupt(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
- int rar_entries = hw->mac.rar_entry_count;
- int i;
-
- if (is_zero_ether_addr(addr))
- return -EINVAL;
-
- /* Search for the first empty entry in the MAC table.
- * Do not touch entries at the end of the table reserved for the VF MAC
- * addresses.
- */
- for (i = 0; i < rar_entries; i++) {
- if (!igc_mac_entry_can_be_used(&adapter->mac_table[i],
- addr, 0))
- continue;
-
- ether_addr_copy(adapter->mac_table[i].addr, addr);
- adapter->mac_table[i].queue = queue;
- adapter->mac_table[i].state |= IGC_MAC_STATE_IN_USE;
-
- igc_rar_set_index(adapter, i);
- return i;
- }
-
- return -ENOSPC;
-}
-
-/* Remove a MAC filter for 'addr' directing matching traffic to
- * 'queue', 'flags' is used to indicate what kind of match need to be
- * removed, match is by default for the destination address, if
- * matching by source address is to be removed the flag
- * IGC_MAC_STATE_SRC_ADDR can be used.
- */
-static int igc_del_mac_filter(struct igc_adapter *adapter,
- const u8 *addr, const u8 queue)
-{
- struct igc_hw *hw = &adapter->hw;
- int rar_entries = hw->mac.rar_entry_count;
- int i;
-
- if (is_zero_ether_addr(addr))
- return -EINVAL;
-
- /* Search for matching entry in the MAC table based on given address
- * and queue. Do not touch entries at the end of the table reserved
- * for the VF MAC addresses.
- */
- for (i = 0; i < rar_entries; i++) {
- if (!(adapter->mac_table[i].state & IGC_MAC_STATE_IN_USE))
- continue;
- if (adapter->mac_table[i].state != 0)
- continue;
- if (adapter->mac_table[i].queue != queue)
- continue;
- if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
- continue;
-
- /* When a filter for the default address is "deleted",
- * we return it to its initial configuration
- */
- if (adapter->mac_table[i].state & IGC_MAC_STATE_DEFAULT) {
- adapter->mac_table[i].state =
- IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE;
- adapter->mac_table[i].queue = 0;
- } else {
- adapter->mac_table[i].state = 0;
- adapter->mac_table[i].queue = 0;
- memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
- }
+ u32 tsicr = rd32(IGC_TSICR);
+ u32 ack = 0;
- igc_rar_set_index(adapter, i);
- return 0;
+ if (tsicr & IGC_TSICR_TXTS) {
+ /* retrieve hardware timestamp */
+ schedule_work(&adapter->ptp_tx_work);
+ ack |= IGC_TSICR_TXTS;
}
- return -ENOENT;
-}
-
-static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr)
-{
- struct igc_adapter *adapter = netdev_priv(netdev);
- int ret;
-
- ret = igc_add_mac_filter(adapter, addr, adapter->num_rx_queues);
-
- return min_t(int, ret, 0);
-}
-
-static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr)
-{
- struct igc_adapter *adapter = netdev_priv(netdev);
-
- igc_del_mac_filter(adapter, addr, adapter->num_rx_queues);
-
- return 0;
-}
-
-/**
- * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
- * @netdev: network interface device structure
- *
- * The set_rx_mode entry point is called whenever the unicast or multicast
- * address lists or the network interface flags are updated. This routine is
- * responsible for configuring the hardware for proper unicast, multicast,
- * promiscuous mode, and all-multi behavior.
- */
-static void igc_set_rx_mode(struct net_device *netdev)
-{
- struct igc_adapter *adapter = netdev_priv(netdev);
- struct igc_hw *hw = &adapter->hw;
- u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
- int count;
-
- /* Check for Promiscuous and All Multicast modes */
- if (netdev->flags & IFF_PROMISC) {
- rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE;
- } else {
- if (netdev->flags & IFF_ALLMULTI) {
- rctl |= IGC_RCTL_MPE;
- } else {
- /* Write addresses to the MTA, if the attempt fails
- * then we should just turn on promiscuous mode so
- * that we can at least receive multicast traffic
- */
- count = igc_write_mc_addr_list(netdev);
- if (count < 0)
- rctl |= IGC_RCTL_MPE;
- }
- }
-
- /* Write addresses to available RAR registers, if there is not
- * sufficient space to store all the addresses then enable
- * unicast promiscuous mode
- */
- if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync))
- rctl |= IGC_RCTL_UPE;
-
- /* update state of unicast and multicast */
- rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE);
- wr32(IGC_RCTL, rctl);
-
-#if (PAGE_SIZE < 8192)
- if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB)
- rlpml = IGC_MAX_FRAME_BUILD_SKB;
-#endif
- wr32(IGC_RLPML, rlpml);
+ /* acknowledge the interrupts */
+ wr32(IGC_TSICR, ack);
}
/**
@@ -2779,114 +3830,28 @@ static irqreturn_t igc_msix_other(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
+ if (icr & IGC_ICR_TS)
+ igc_tsync_interrupt(adapter);
+
wr32(IGC_EIMS, adapter->eims_other);
return IRQ_HANDLED;
}
-/**
- * igc_write_ivar - configure ivar for given MSI-X vector
- * @hw: pointer to the HW structure
- * @msix_vector: vector number we are allocating to a given ring
- * @index: row index of IVAR register to write within IVAR table
- * @offset: column offset of in IVAR, should be multiple of 8
- *
- * The IVAR table consists of 2 columns,
- * each containing an cause allocation for an Rx and Tx ring, and a
- * variable number of rows depending on the number of queues supported.
- */
-static void igc_write_ivar(struct igc_hw *hw, int msix_vector,
- int index, int offset)
-{
- u32 ivar = array_rd32(IGC_IVAR0, index);
-
- /* clear any bits that are currently set */
- ivar &= ~((u32)0xFF << offset);
-
- /* write vector and valid bit */
- ivar |= (msix_vector | IGC_IVAR_VALID) << offset;
-
- array_wr32(IGC_IVAR0, index, ivar);
-}
-
-static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector)
-{
- struct igc_adapter *adapter = q_vector->adapter;
- struct igc_hw *hw = &adapter->hw;
- int rx_queue = IGC_N0_QUEUE;
- int tx_queue = IGC_N0_QUEUE;
-
- if (q_vector->rx.ring)
- rx_queue = q_vector->rx.ring->reg_idx;
- if (q_vector->tx.ring)
- tx_queue = q_vector->tx.ring->reg_idx;
-
- switch (hw->mac.type) {
- case igc_i225:
- if (rx_queue > IGC_N0_QUEUE)
- igc_write_ivar(hw, msix_vector,
- rx_queue >> 1,
- (rx_queue & 0x1) << 4);
- if (tx_queue > IGC_N0_QUEUE)
- igc_write_ivar(hw, msix_vector,
- tx_queue >> 1,
- ((tx_queue & 0x1) << 4) + 8);
- q_vector->eims_value = BIT(msix_vector);
- break;
- default:
- WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n");
- break;
- }
-
- /* add q_vector eims value to global eims_enable_mask */
- adapter->eims_enable_mask |= q_vector->eims_value;
-
- /* configure q_vector to set itr on first interrupt */
- q_vector->set_itr = 1;
-}
-
-/**
- * igc_configure_msix - Configure MSI-X hardware
- * @adapter: Pointer to adapter structure
- *
- * igc_configure_msix sets up the hardware to properly
- * generate MSI-X interrupts.
- */
-static void igc_configure_msix(struct igc_adapter *adapter)
+static void igc_write_itr(struct igc_q_vector *q_vector)
{
- struct igc_hw *hw = &adapter->hw;
- int i, vector = 0;
- u32 tmp;
-
- adapter->eims_enable_mask = 0;
-
- /* set vector for other causes, i.e. link changes */
- switch (hw->mac.type) {
- case igc_i225:
- /* Turn on MSI-X capability first, or our settings
- * won't stick. And it will take days to debug.
- */
- wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE |
- IGC_GPIE_PBA | IGC_GPIE_EIAME |
- IGC_GPIE_NSICR);
-
- /* enable msix_other interrupt */
- adapter->eims_other = BIT(vector);
- tmp = (vector++ | IGC_IVAR_VALID) << 8;
+ u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK;
- wr32(IGC_IVAR_MISC, tmp);
- break;
- default:
- /* do nothing, since nothing else supports MSI-X */
- break;
- } /* switch (hw->mac.type) */
+ if (!q_vector->set_itr)
+ return;
- adapter->eims_enable_mask |= adapter->eims_other;
+ if (!itr_val)
+ itr_val = IGC_ITR_VAL_MASK;
- for (i = 0; i < adapter->num_q_vectors; i++)
- igc_assign_vector(adapter->q_vector[i], vector++);
+ itr_val |= IGC_EITR_CNT_IGNR;
- wrfl();
+ writel(itr_val, q_vector->itr_register);
+ q_vector->set_itr = 0;
}
static irqreturn_t igc_msix_ring(int irq, void *data)
@@ -2961,49 +3926,6 @@ err_out:
}
/**
- * igc_reset_q_vector - Reset config for interrupt vector
- * @adapter: board private structure to initialize
- * @v_idx: Index of vector to be reset
- *
- * If NAPI is enabled it will delete any references to the
- * NAPI struct. This is preparation for igc_free_q_vector.
- */
-static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx)
-{
- struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
-
- /* if we're coming from igc_set_interrupt_capability, the vectors are
- * not yet allocated
- */
- if (!q_vector)
- return;
-
- if (q_vector->tx.ring)
- adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
-
- if (q_vector->rx.ring)
- adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
-
- netif_napi_del(&q_vector->napi);
-}
-
-static void igc_reset_interrupt_capability(struct igc_adapter *adapter)
-{
- int v_idx = adapter->num_q_vectors;
-
- if (adapter->msix_entries) {
- pci_disable_msix(adapter->pdev);
- kfree(adapter->msix_entries);
- adapter->msix_entries = NULL;
- } else if (adapter->flags & IGC_FLAG_HAS_MSI) {
- pci_disable_msi(adapter->pdev);
- }
-
- while (v_idx--)
- igc_reset_q_vector(adapter, v_idx);
-}
-
-/**
* igc_clear_interrupt_scheme - reset the device to a state of no interrupts
* @adapter: Pointer to adapter structure
*
@@ -3016,48 +3938,6 @@ static void igc_clear_interrupt_scheme(struct igc_adapter *adapter)
igc_reset_interrupt_capability(adapter);
}
-/**
- * igc_free_q_vectors - Free memory allocated for interrupt vectors
- * @adapter: board private structure to initialize
- *
- * This function frees the memory allocated to the q_vectors. In addition if
- * NAPI is enabled it will delete any references to the NAPI struct prior
- * to freeing the q_vector.
- */
-static void igc_free_q_vectors(struct igc_adapter *adapter)
-{
- int v_idx = adapter->num_q_vectors;
-
- adapter->num_tx_queues = 0;
- adapter->num_rx_queues = 0;
- adapter->num_q_vectors = 0;
-
- while (v_idx--) {
- igc_reset_q_vector(adapter, v_idx);
- igc_free_q_vector(adapter, v_idx);
- }
-}
-
-/**
- * igc_free_q_vector - Free memory allocated for specific interrupt vector
- * @adapter: board private structure to initialize
- * @v_idx: Index of vector to be freed
- *
- * This function frees the memory allocated to the q_vector.
- */
-static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx)
-{
- struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
-
- adapter->q_vector[v_idx] = NULL;
-
- /* igc_get_stats64() might access the rings on this vector,
- * we must wait a grace period before freeing it.
- */
- if (q_vector)
- kfree_rcu(q_vector, rcu);
-}
-
/* Need to wait a few seconds after link up to get diagnostic information from
* the phy
*/
@@ -3109,7 +3989,7 @@ bool igc_has_link(struct igc_adapter *adapter)
/**
* igc_watchdog - Timer Call-back
- * @data: pointer to adapter cast into an unsigned long
+ * @t: timer for the watchdog
*/
static void igc_watchdog(struct timer_list *t)
{
@@ -3282,6 +4162,8 @@ no_wait:
wr32(IGC_ICS, IGC_ICS_RXDMT0);
}
+ igc_ptp_tx_hang(adapter);
+
/* Reset the timer */
if (!test_bit(__IGC_DOWN, &adapter->state)) {
if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)
@@ -3294,149 +4176,6 @@ no_wait:
}
/**
- * igc_update_ring_itr - update the dynamic ITR value based on packet size
- * @q_vector: pointer to q_vector
- *
- * Stores a new ITR value based on strictly on packet size. This
- * algorithm is less sophisticated than that used in igc_update_itr,
- * due to the difficulty of synchronizing statistics across multiple
- * receive rings. The divisors and thresholds used by this function
- * were determined based on theoretical maximum wire speed and testing
- * data, in order to minimize response time while increasing bulk
- * throughput.
- * NOTE: This function is called only when operating in a multiqueue
- * receive environment.
- */
-static void igc_update_ring_itr(struct igc_q_vector *q_vector)
-{
- struct igc_adapter *adapter = q_vector->adapter;
- int new_val = q_vector->itr_val;
- int avg_wire_size = 0;
- unsigned int packets;
-
- /* For non-gigabit speeds, just fix the interrupt rate at 4000
- * ints/sec - ITR timer value of 120 ticks.
- */
- switch (adapter->link_speed) {
- case SPEED_10:
- case SPEED_100:
- new_val = IGC_4K_ITR;
- goto set_itr_val;
- default:
- break;
- }
-
- packets = q_vector->rx.total_packets;
- if (packets)
- avg_wire_size = q_vector->rx.total_bytes / packets;
-
- packets = q_vector->tx.total_packets;
- if (packets)
- avg_wire_size = max_t(u32, avg_wire_size,
- q_vector->tx.total_bytes / packets);
-
- /* if avg_wire_size isn't set no work was done */
- if (!avg_wire_size)
- goto clear_counts;
-
- /* Add 24 bytes to size to account for CRC, preamble, and gap */
- avg_wire_size += 24;
-
- /* Don't starve jumbo frames */
- avg_wire_size = min(avg_wire_size, 3000);
-
- /* Give a little boost to mid-size frames */
- if (avg_wire_size > 300 && avg_wire_size < 1200)
- new_val = avg_wire_size / 3;
- else
- new_val = avg_wire_size / 2;
-
- /* conservative mode (itr 3) eliminates the lowest_latency setting */
- if (new_val < IGC_20K_ITR &&
- ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
- (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
- new_val = IGC_20K_ITR;
-
-set_itr_val:
- if (new_val != q_vector->itr_val) {
- q_vector->itr_val = new_val;
- q_vector->set_itr = 1;
- }
-clear_counts:
- q_vector->rx.total_bytes = 0;
- q_vector->rx.total_packets = 0;
- q_vector->tx.total_bytes = 0;
- q_vector->tx.total_packets = 0;
-}
-
-/**
- * igc_update_itr - update the dynamic ITR value based on statistics
- * @q_vector: pointer to q_vector
- * @ring_container: ring info to update the itr for
- *
- * Stores a new ITR value based on packets and byte
- * counts during the last interrupt. The advantage of per interrupt
- * computation is faster updates and more accurate ITR for the current
- * traffic pattern. Constants in this function were computed
- * based on theoretical maximum wire speed and thresholds were set based
- * on testing data as well as attempting to minimize response time
- * while increasing bulk throughput.
- * NOTE: These calculations are only valid when operating in a single-
- * queue environment.
- */
-static void igc_update_itr(struct igc_q_vector *q_vector,
- struct igc_ring_container *ring_container)
-{
- unsigned int packets = ring_container->total_packets;
- unsigned int bytes = ring_container->total_bytes;
- u8 itrval = ring_container->itr;
-
- /* no packets, exit with status unchanged */
- if (packets == 0)
- return;
-
- switch (itrval) {
- case lowest_latency:
- /* handle TSO and jumbo frames */
- if (bytes / packets > 8000)
- itrval = bulk_latency;
- else if ((packets < 5) && (bytes > 512))
- itrval = low_latency;
- break;
- case low_latency: /* 50 usec aka 20000 ints/s */
- if (bytes > 10000) {
- /* this if handles the TSO accounting */
- if (bytes / packets > 8000)
- itrval = bulk_latency;
- else if ((packets < 10) || ((bytes / packets) > 1200))
- itrval = bulk_latency;
- else if ((packets > 35))
- itrval = lowest_latency;
- } else if (bytes / packets > 2000) {
- itrval = bulk_latency;
- } else if (packets <= 2 && bytes < 512) {
- itrval = lowest_latency;
- }
- break;
- case bulk_latency: /* 250 usec aka 4000 ints/s */
- if (bytes > 25000) {
- if (packets > 35)
- itrval = low_latency;
- } else if (bytes < 1500) {
- itrval = low_latency;
- }
- break;
- }
-
- /* clear work counters since we have the values we need */
- ring_container->total_bytes = 0;
- ring_container->total_packets = 0;
-
- /* write updated itr to ring container */
- ring_container->itr = itrval;
-}
-
-/**
* igc_intr_msi - Interrupt Handler
* @irq: interrupt number
* @data: pointer to a network interface device structure
@@ -3513,424 +4252,6 @@ static irqreturn_t igc_intr(int irq, void *data)
return IRQ_HANDLED;
}
-static void igc_set_itr(struct igc_q_vector *q_vector)
-{
- struct igc_adapter *adapter = q_vector->adapter;
- u32 new_itr = q_vector->itr_val;
- u8 current_itr = 0;
-
- /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
- switch (adapter->link_speed) {
- case SPEED_10:
- case SPEED_100:
- current_itr = 0;
- new_itr = IGC_4K_ITR;
- goto set_itr_now;
- default:
- break;
- }
-
- igc_update_itr(q_vector, &q_vector->tx);
- igc_update_itr(q_vector, &q_vector->rx);
-
- current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
-
- /* conservative mode (itr 3) eliminates the lowest_latency setting */
- if (current_itr == lowest_latency &&
- ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
- (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
- current_itr = low_latency;
-
- switch (current_itr) {
- /* counts and packets in update_itr are dependent on these numbers */
- case lowest_latency:
- new_itr = IGC_70K_ITR; /* 70,000 ints/sec */
- break;
- case low_latency:
- new_itr = IGC_20K_ITR; /* 20,000 ints/sec */
- break;
- case bulk_latency:
- new_itr = IGC_4K_ITR; /* 4,000 ints/sec */
- break;
- default:
- break;
- }
-
-set_itr_now:
- if (new_itr != q_vector->itr_val) {
- /* this attempts to bias the interrupt rate towards Bulk
- * by adding intermediate steps when interrupt rate is
- * increasing
- */
- new_itr = new_itr > q_vector->itr_val ?
- max((new_itr * q_vector->itr_val) /
- (new_itr + (q_vector->itr_val >> 2)),
- new_itr) : new_itr;
- /* Don't write the value here; it resets the adapter's
- * internal timer, and causes us to delay far longer than
- * we should between interrupts. Instead, we write the ITR
- * value at the beginning of the next interrupt so the timing
- * ends up being correct.
- */
- q_vector->itr_val = new_itr;
- q_vector->set_itr = 1;
- }
-}
-
-static void igc_ring_irq_enable(struct igc_q_vector *q_vector)
-{
- struct igc_adapter *adapter = q_vector->adapter;
- struct igc_hw *hw = &adapter->hw;
-
- if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
- (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
- if (adapter->num_q_vectors == 1)
- igc_set_itr(q_vector);
- else
- igc_update_ring_itr(q_vector);
- }
-
- if (!test_bit(__IGC_DOWN, &adapter->state)) {
- if (adapter->msix_entries)
- wr32(IGC_EIMS, q_vector->eims_value);
- else
- igc_irq_enable(adapter);
- }
-}
-
-/**
- * igc_poll - NAPI Rx polling callback
- * @napi: napi polling structure
- * @budget: count of how many packets we should handle
- */
-static int igc_poll(struct napi_struct *napi, int budget)
-{
- struct igc_q_vector *q_vector = container_of(napi,
- struct igc_q_vector,
- napi);
- bool clean_complete = true;
- int work_done = 0;
-
- if (q_vector->tx.ring)
- clean_complete = igc_clean_tx_irq(q_vector, budget);
-
- if (q_vector->rx.ring) {
- int cleaned = igc_clean_rx_irq(q_vector, budget);
-
- work_done += cleaned;
- if (cleaned >= budget)
- clean_complete = false;
- }
-
- /* If all work not completed, return budget and keep polling */
- if (!clean_complete)
- return budget;
-
- /* Exit the polling mode, but don't re-enable interrupts if stack might
- * poll us due to busy-polling
- */
- if (likely(napi_complete_done(napi, work_done)))
- igc_ring_irq_enable(q_vector);
-
- return min(work_done, budget - 1);
-}
-
-/**
- * igc_set_interrupt_capability - set MSI or MSI-X if supported
- * @adapter: Pointer to adapter structure
- *
- * Attempt to configure interrupts using the best available
- * capabilities of the hardware and kernel.
- */
-static void igc_set_interrupt_capability(struct igc_adapter *adapter,
- bool msix)
-{
- int numvecs, i;
- int err;
-
- if (!msix)
- goto msi_only;
- adapter->flags |= IGC_FLAG_HAS_MSIX;
-
- /* Number of supported queues. */
- adapter->num_rx_queues = adapter->rss_queues;
-
- adapter->num_tx_queues = adapter->rss_queues;
-
- /* start with one vector for every Rx queue */
- numvecs = adapter->num_rx_queues;
-
- /* if Tx handler is separate add 1 for every Tx queue */
- if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS))
- numvecs += adapter->num_tx_queues;
-
- /* store the number of vectors reserved for queues */
- adapter->num_q_vectors = numvecs;
-
- /* add 1 vector for link status interrupts */
- numvecs++;
-
- adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
- GFP_KERNEL);
-
- if (!adapter->msix_entries)
- return;
-
- /* populate entry values */
- for (i = 0; i < numvecs; i++)
- adapter->msix_entries[i].entry = i;
-
- err = pci_enable_msix_range(adapter->pdev,
- adapter->msix_entries,
- numvecs,
- numvecs);
- if (err > 0)
- return;
-
- kfree(adapter->msix_entries);
- adapter->msix_entries = NULL;
-
- igc_reset_interrupt_capability(adapter);
-
-msi_only:
- adapter->flags &= ~IGC_FLAG_HAS_MSIX;
-
- adapter->rss_queues = 1;
- adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
- adapter->num_rx_queues = 1;
- adapter->num_tx_queues = 1;
- adapter->num_q_vectors = 1;
- if (!pci_enable_msi(adapter->pdev))
- adapter->flags |= IGC_FLAG_HAS_MSI;
-}
-
-static void igc_add_ring(struct igc_ring *ring,
- struct igc_ring_container *head)
-{
- head->ring = ring;
- head->count++;
-}
-
-/**
- * igc_alloc_q_vector - Allocate memory for a single interrupt vector
- * @adapter: board private structure to initialize
- * @v_count: q_vectors allocated on adapter, used for ring interleaving
- * @v_idx: index of vector in adapter struct
- * @txr_count: total number of Tx rings to allocate
- * @txr_idx: index of first Tx ring to allocate
- * @rxr_count: total number of Rx rings to allocate
- * @rxr_idx: index of first Rx ring to allocate
- *
- * We allocate one q_vector. If allocation fails we return -ENOMEM.
- */
-static int igc_alloc_q_vector(struct igc_adapter *adapter,
- unsigned int v_count, unsigned int v_idx,
- unsigned int txr_count, unsigned int txr_idx,
- unsigned int rxr_count, unsigned int rxr_idx)
-{
- struct igc_q_vector *q_vector;
- struct igc_ring *ring;
- int ring_count;
-
- /* igc only supports 1 Tx and/or 1 Rx queue per vector */
- if (txr_count > 1 || rxr_count > 1)
- return -ENOMEM;
-
- ring_count = txr_count + rxr_count;
-
- /* allocate q_vector and rings */
- q_vector = adapter->q_vector[v_idx];
- if (!q_vector)
- q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
- GFP_KERNEL);
- else
- memset(q_vector, 0, struct_size(q_vector, ring, ring_count));
- if (!q_vector)
- return -ENOMEM;
-
- /* initialize NAPI */
- netif_napi_add(adapter->netdev, &q_vector->napi,
- igc_poll, 64);
-
- /* tie q_vector and adapter together */
- adapter->q_vector[v_idx] = q_vector;
- q_vector->adapter = adapter;
-
- /* initialize work limits */
- q_vector->tx.work_limit = adapter->tx_work_limit;
-
- /* initialize ITR configuration */
- q_vector->itr_register = adapter->io_addr + IGC_EITR(0);
- q_vector->itr_val = IGC_START_ITR;
-
- /* initialize pointer to rings */
- ring = q_vector->ring;
-
- /* initialize ITR */
- if (rxr_count) {
- /* rx or rx/tx vector */
- if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
- q_vector->itr_val = adapter->rx_itr_setting;
- } else {
- /* tx only vector */
- if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
- q_vector->itr_val = adapter->tx_itr_setting;
- }
-
- if (txr_count) {
- /* assign generic ring traits */
- ring->dev = &adapter->pdev->dev;
- ring->netdev = adapter->netdev;
-
- /* configure backlink on ring */
- ring->q_vector = q_vector;
-
- /* update q_vector Tx values */
- igc_add_ring(ring, &q_vector->tx);
-
- /* apply Tx specific ring traits */
- ring->count = adapter->tx_ring_count;
- ring->queue_index = txr_idx;
-
- /* assign ring to adapter */
- adapter->tx_ring[txr_idx] = ring;
-
- /* push pointer to next ring */
- ring++;
- }
-
- if (rxr_count) {
- /* assign generic ring traits */
- ring->dev = &adapter->pdev->dev;
- ring->netdev = adapter->netdev;
-
- /* configure backlink on ring */
- ring->q_vector = q_vector;
-
- /* update q_vector Rx values */
- igc_add_ring(ring, &q_vector->rx);
-
- /* apply Rx specific ring traits */
- ring->count = adapter->rx_ring_count;
- ring->queue_index = rxr_idx;
-
- /* assign ring to adapter */
- adapter->rx_ring[rxr_idx] = ring;
- }
-
- return 0;
-}
-
-/**
- * igc_alloc_q_vectors - Allocate memory for interrupt vectors
- * @adapter: board private structure to initialize
- *
- * We allocate one q_vector per queue interrupt. If allocation fails we
- * return -ENOMEM.
- */
-static int igc_alloc_q_vectors(struct igc_adapter *adapter)
-{
- int rxr_remaining = adapter->num_rx_queues;
- int txr_remaining = adapter->num_tx_queues;
- int rxr_idx = 0, txr_idx = 0, v_idx = 0;
- int q_vectors = adapter->num_q_vectors;
- int err;
-
- if (q_vectors >= (rxr_remaining + txr_remaining)) {
- for (; rxr_remaining; v_idx++) {
- err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
- 0, 0, 1, rxr_idx);
-
- if (err)
- goto err_out;
-
- /* update counts and index */
- rxr_remaining--;
- rxr_idx++;
- }
- }
-
- for (; v_idx < q_vectors; v_idx++) {
- int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
- int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
-
- err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
- tqpv, txr_idx, rqpv, rxr_idx);
-
- if (err)
- goto err_out;
-
- /* update counts and index */
- rxr_remaining -= rqpv;
- txr_remaining -= tqpv;
- rxr_idx++;
- txr_idx++;
- }
-
- return 0;
-
-err_out:
- adapter->num_tx_queues = 0;
- adapter->num_rx_queues = 0;
- adapter->num_q_vectors = 0;
-
- while (v_idx--)
- igc_free_q_vector(adapter, v_idx);
-
- return -ENOMEM;
-}
-
-/**
- * igc_cache_ring_register - Descriptor ring to register mapping
- * @adapter: board private structure to initialize
- *
- * Once we know the feature-set enabled for the device, we'll cache
- * the register offset the descriptor ring is assigned to.
- */
-static void igc_cache_ring_register(struct igc_adapter *adapter)
-{
- int i = 0, j = 0;
-
- switch (adapter->hw.mac.type) {
- case igc_i225:
- /* Fall through */
- default:
- for (; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i]->reg_idx = i;
- for (; j < adapter->num_tx_queues; j++)
- adapter->tx_ring[j]->reg_idx = j;
- break;
- }
-}
-
-/**
- * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
- * @adapter: Pointer to adapter structure
- *
- * This function initializes the interrupts and allocates all of the queues.
- */
-static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix)
-{
- struct pci_dev *pdev = adapter->pdev;
- int err = 0;
-
- igc_set_interrupt_capability(adapter, msix);
-
- err = igc_alloc_q_vectors(adapter);
- if (err) {
- dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
- goto err_alloc_q_vectors;
- }
-
- igc_cache_ring_register(adapter);
-
- return 0;
-
-err_alloc_q_vectors:
- igc_reset_interrupt_capability(adapter);
- return err;
-}
-
static void igc_free_irq(struct igc_adapter *adapter)
{
if (adapter->msix_entries) {
@@ -3947,62 +4268,6 @@ static void igc_free_irq(struct igc_adapter *adapter)
}
/**
- * igc_irq_disable - Mask off interrupt generation on the NIC
- * @adapter: board private structure
- */
-static void igc_irq_disable(struct igc_adapter *adapter)
-{
- struct igc_hw *hw = &adapter->hw;
-
- if (adapter->msix_entries) {
- u32 regval = rd32(IGC_EIAM);
-
- wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask);
- wr32(IGC_EIMC, adapter->eims_enable_mask);
- regval = rd32(IGC_EIAC);
- wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask);
- }
-
- wr32(IGC_IAM, 0);
- wr32(IGC_IMC, ~0);
- wrfl();
-
- if (adapter->msix_entries) {
- int vector = 0, i;
-
- synchronize_irq(adapter->msix_entries[vector++].vector);
-
- for (i = 0; i < adapter->num_q_vectors; i++)
- synchronize_irq(adapter->msix_entries[vector++].vector);
- } else {
- synchronize_irq(adapter->pdev->irq);
- }
-}
-
-/**
- * igc_irq_enable - Enable default interrupt generation settings
- * @adapter: board private structure
- */
-static void igc_irq_enable(struct igc_adapter *adapter)
-{
- struct igc_hw *hw = &adapter->hw;
-
- if (adapter->msix_entries) {
- u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA;
- u32 regval = rd32(IGC_EIAC);
-
- wr32(IGC_EIAC, regval | adapter->eims_enable_mask);
- regval = rd32(IGC_EIAM);
- wr32(IGC_EIAM, regval | adapter->eims_enable_mask);
- wr32(IGC_EIMS, adapter->eims_enable_mask);
- wr32(IGC_IMS, ims);
- } else {
- wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
- wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
- }
-}
-
-/**
* igc_request_irq - initialize interrupts
* @adapter: Pointer to adapter structure
*
@@ -4056,25 +4321,10 @@ request_done:
return err;
}
-static void igc_write_itr(struct igc_q_vector *q_vector)
-{
- u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK;
-
- if (!q_vector->set_itr)
- return;
-
- if (!itr_val)
- itr_val = IGC_ITR_VAL_MASK;
-
- itr_val |= IGC_EITR_CNT_IGNR;
-
- writel(itr_val, q_vector->itr_register);
- q_vector->set_itr = 0;
-}
-
/**
- * igc_open - Called when a network interface is made active
+ * __igc_open - Called when a network interface is made active
* @netdev: network interface device structure
+ * @resuming: boolean indicating if the device is resuming
*
* Returns 0 on success, negative value on failure
*
@@ -4164,8 +4414,9 @@ static int igc_open(struct net_device *netdev)
}
/**
- * igc_close - Disables a network interface
+ * __igc_close - Disables a network interface
* @netdev: network interface device structure
+ * @suspending: boolean indicating the device is suspending
*
* Returns 0, this is not allowed to fail
*
@@ -4199,6 +4450,24 @@ static int igc_close(struct net_device *netdev)
return 0;
}
+/**
+ * igc_ioctl - Access the hwtstamp interface
+ * @netdev: network interface device structure
+ * @ifreq: interface request data
+ * @cmd: ioctl command
+ **/
+static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCGHWTSTAMP:
+ return igc_ptp_get_ts_config(netdev, ifr);
+ case SIOCSHWTSTAMP:
+ return igc_ptp_set_ts_config(netdev, ifr);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops igc_netdev_ops = {
.ndo_open = igc_open,
.ndo_stop = igc_close,
@@ -4210,6 +4479,7 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_fix_features = igc_fix_features,
.ndo_set_features = igc_set_features,
.ndo_features_check = igc_features_check,
+ .ndo_do_ioctl = igc_ioctl,
};
/* PCIe configuration access */
@@ -4345,32 +4615,26 @@ static int igc_probe(struct pci_dev *pdev,
struct net_device *netdev;
struct igc_hw *hw;
const struct igc_info *ei = igc_info_tbl[ent->driver_data];
- int err;
+ int err, pci_using_dac;
err = pci_enable_device_mem(pdev);
if (err)
return err;
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ pci_using_dac = 0;
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(64));
+ pci_using_dac = 1;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "igc: Wrong DMA config\n");
- goto err_dma;
- }
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
}
- err = pci_request_selected_regions(pdev,
- pci_select_bars(pdev,
- IORESOURCE_MEM),
- igc_driver_name);
+ err = pci_request_mem_regions(pdev, igc_driver_name);
if (err)
goto err_pci_reg;
@@ -4433,6 +4697,9 @@ static int igc_probe(struct pci_dev *pdev,
goto err_sw_init;
/* Add supported features to the features list*/
+ netdev->features |= NETIF_F_SG;
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
netdev->features |= NETIF_F_RXCSUM;
netdev->features |= NETIF_F_HW_CSUM;
netdev->features |= NETIF_F_SCTP_CRC;
@@ -4446,6 +4713,9 @@ static int igc_probe(struct pci_dev *pdev,
netdev->hw_features |= NETIF_F_NTUPLE;
netdev->hw_features |= netdev->features;
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
@@ -4512,6 +4782,9 @@ static int igc_probe(struct pci_dev *pdev,
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
+ /* do hw tstamp init after resetting */
+ igc_ptp_init(adapter);
+
/* Check if Media Autosense is enabled */
adapter->ei = *ei;
@@ -4532,8 +4805,7 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
pci_disable_device(pdev);
@@ -4554,6 +4826,8 @@ static void igc_remove(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct igc_adapter *adapter = netdev_priv(netdev);
+ igc_ptp_stop(adapter);
+
set_bit(__IGC_DOWN, &adapter->state);
del_timer_sync(&adapter->watchdog_timer);
@@ -4580,105 +4854,216 @@ static void igc_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct pci_driver igc_driver = {
- .name = igc_driver_name,
- .id_table = igc_pci_tbl,
- .probe = igc_probe,
- .remove = igc_remove,
-};
-
-void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
- const u32 max_rss_queues)
+static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake,
+ bool runtime)
{
- /* Determine if we need to pair queues. */
- /* If rss_queues > half of max_rss_queues, pair the queues in
- * order to conserve interrupts due to limited supply.
- */
- if (adapter->rss_queues > (max_rss_queues / 2))
- adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol;
+ struct igc_hw *hw = &adapter->hw;
+ u32 ctrl, rctl, status;
+ bool wake;
+
+ rtnl_lock();
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ __igc_close(netdev, true);
+
+ igc_clear_interrupt_scheme(adapter);
+ rtnl_unlock();
+
+ status = rd32(IGC_STATUS);
+ if (status & IGC_STATUS_LU)
+ wufc &= ~IGC_WUFC_LNKC;
+
+ if (wufc) {
+ igc_setup_rctl(adapter);
+ igc_set_rx_mode(netdev);
+
+ /* turn on all-multi mode if wake on multicast is enabled */
+ if (wufc & IGC_WUFC_MC) {
+ rctl = rd32(IGC_RCTL);
+ rctl |= IGC_RCTL_MPE;
+ wr32(IGC_RCTL, rctl);
+ }
+
+ ctrl = rd32(IGC_CTRL);
+ ctrl |= IGC_CTRL_ADVD3WUC;
+ wr32(IGC_CTRL, ctrl);
+
+ /* Allow time for pending master requests to run */
+ igc_disable_pcie_master(hw);
+
+ wr32(IGC_WUC, IGC_WUC_PME_EN);
+ wr32(IGC_WUFC, wufc);
+ } else {
+ wr32(IGC_WUC, 0);
+ wr32(IGC_WUFC, 0);
+ }
+
+ wake = wufc || adapter->en_mng_pt;
+ if (!wake)
+ igc_power_down_link(adapter);
else
- adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS;
-}
+ igc_power_up_link(adapter);
-unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter)
-{
- unsigned int max_rss_queues;
+ if (enable_wake)
+ *enable_wake = wake;
- /* Determine the maximum number of RSS queues supported. */
- max_rss_queues = IGC_MAX_RX_QUEUES;
+ /* Release control of h/w to f/w. If f/w is AMT enabled, this
+ * would have already happened in close and is redundant.
+ */
+ igc_release_hw_control(adapter);
- return max_rss_queues;
+ pci_disable_device(pdev);
+
+ return 0;
}
-static void igc_init_queue_configuration(struct igc_adapter *adapter)
+#ifdef CONFIG_PM
+static int __maybe_unused igc_runtime_suspend(struct device *dev)
{
- u32 max_rss_queues;
-
- max_rss_queues = igc_get_max_rss_queues(adapter);
- adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
-
- igc_set_flag_queue_pairs(adapter, max_rss_queues);
+ return __igc_shutdown(to_pci_dev(dev), NULL, 1);
}
-/**
- * igc_sw_init - Initialize general software structures (struct igc_adapter)
- * @adapter: board private structure to initialize
- *
- * igc_sw_init initializes the Adapter private data structure.
- * Fields are initialized based on PCI device information and
- * OS network device settings (MTU size).
- */
-static int igc_sw_init(struct igc_adapter *adapter)
+static void igc_deliver_wake_packet(struct net_device *netdev)
{
- struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
+ struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
+ struct sk_buff *skb;
+ u32 wupl;
- int size = sizeof(struct igc_mac_addr) * hw->mac.rar_entry_count;
+ wupl = rd32(IGC_WUPL) & IGC_WUPL_MASK;
- pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
+ /* WUPM stores only the first 128 bytes of the wake packet.
+ * Read the packet only if we have the whole thing.
+ */
+ if (wupl == 0 || wupl > IGC_WUPM_BYTES)
+ return;
- /* set default ring sizes */
- adapter->tx_ring_count = IGC_DEFAULT_TXD;
- adapter->rx_ring_count = IGC_DEFAULT_RXD;
+ skb = netdev_alloc_skb_ip_align(netdev, IGC_WUPM_BYTES);
+ if (!skb)
+ return;
- /* set default ITR values */
- adapter->rx_itr_setting = IGC_DEFAULT_ITR;
- adapter->tx_itr_setting = IGC_DEFAULT_ITR;
+ skb_put(skb, wupl);
- /* set default work limits */
- adapter->tx_work_limit = IGC_DEFAULT_TX_WORK;
+ /* Ensure reads are 32-bit aligned */
+ wupl = roundup(wupl, 4);
- /* adjust max frame to be at least the size of a standard frame */
- adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
- VLAN_HLEN;
- adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+ memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl);
- spin_lock_init(&adapter->nfc_lock);
- spin_lock_init(&adapter->stats64_lock);
- /* Assume MSI-X interrupts, will be checked during IRQ allocation */
- adapter->flags |= IGC_FLAG_HAS_MSIX;
+ skb->protocol = eth_type_trans(skb, netdev);
+ netif_rx(skb);
+}
- adapter->mac_table = kzalloc(size, GFP_ATOMIC);
- if (!adapter->mac_table)
- return -ENOMEM;
+static int __maybe_unused igc_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ u32 err, val;
- igc_init_queue_configuration(adapter);
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ if (!pci_device_is_present(pdev))
+ return -ENODEV;
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "igc: Cannot enable PCI device from suspend\n");
+ return err;
+ }
+ pci_set_master(pdev);
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
- /* This call may decrease the number of queues */
if (igc_init_interrupt_scheme(adapter, true)) {
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
return -ENOMEM;
}
- /* Explicitly disable IRQ since the NIC can be in any state. */
- igc_irq_disable(adapter);
+ igc_reset(adapter);
- set_bit(__IGC_DOWN, &adapter->state);
+ /* let the f/w know that the h/w is now under the control of the
+ * driver.
+ */
+ igc_get_hw_control(adapter);
- return 0;
+ val = rd32(IGC_WUS);
+ if (val & WAKE_PKT_WUS)
+ igc_deliver_wake_packet(netdev);
+
+ wr32(IGC_WUS, ~0);
+
+ rtnl_lock();
+ if (!err && netif_running(netdev))
+ err = __igc_open(netdev, true);
+
+ if (!err)
+ netif_device_attach(netdev);
+ rtnl_unlock();
+
+ return err;
+}
+
+static int __maybe_unused igc_runtime_resume(struct device *dev)
+{
+ return igc_resume(dev);
+}
+
+static int __maybe_unused igc_suspend(struct device *dev)
+{
+ return __igc_shutdown(to_pci_dev(dev), NULL, 0);
}
+static int __maybe_unused igc_runtime_idle(struct device *dev)
+{
+ struct net_device *netdev = dev_get_drvdata(dev);
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ if (!igc_has_link(adapter))
+ pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
+
+ return -EBUSY;
+}
+#endif /* CONFIG_PM */
+
+static void igc_shutdown(struct pci_dev *pdev)
+{
+ bool wake;
+
+ __igc_shutdown(pdev, &wake, 0);
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, wake);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops igc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume)
+ SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume,
+ igc_runtime_idle)
+};
+#endif
+
+static struct pci_driver igc_driver = {
+ .name = igc_driver_name,
+ .id_table = igc_pci_tbl,
+ .probe = igc_probe,
+ .remove = igc_remove,
+#ifdef CONFIG_PM
+ .driver.pm = &igc_pm_ops,
+#endif
+ .shutdown = igc_shutdown,
+};
+
/**
* igc_reinit_queues - return error
* @adapter: pointer to adapter structure
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
index f4b05af0dd2f..8e1799508edc 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.c
+++ b/drivers/net/ethernet/intel/igc/igc_phy.c
@@ -173,6 +173,7 @@ s32 igc_check_downshift(struct igc_hw *hw)
s32 igc_phy_hw_reset(struct igc_hw *hw)
{
struct igc_phy_info *phy = &hw->phy;
+ u32 phpm = 0, timeout = 10000;
s32 ret_val;
u32 ctrl;
@@ -186,6 +187,8 @@ s32 igc_phy_hw_reset(struct igc_hw *hw)
if (ret_val)
goto out;
+ phpm = rd32(IGC_I225_PHPM);
+
ctrl = rd32(IGC_CTRL);
wr32(IGC_CTRL, ctrl | IGC_CTRL_PHY_RST);
wrfl();
@@ -195,7 +198,18 @@ s32 igc_phy_hw_reset(struct igc_hw *hw)
wr32(IGC_CTRL, ctrl);
wrfl();
- usleep_range(1500, 2000);
+ /* SW should guarantee 100us for the completion of the PHY reset */
+ usleep_range(100, 150);
+ do {
+ phpm = rd32(IGC_I225_PHPM);
+ timeout--;
+ udelay(1);
+ } while (!(phpm & IGC_PHY_RST_COMP) && timeout);
+
+ if (!timeout)
+ hw_dbg("Timeout is expired after a phy reset\n");
+
+ usleep_range(100, 150);
phy->ops.release(hw);
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
new file mode 100644
index 000000000000..693506587198
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -0,0 +1,716 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Intel Corporation */
+
+#include "igc.h"
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/ptp_classify.h>
+#include <linux/clocksource.h>
+
+#define INCVALUE_MASK 0x7fffffff
+#define ISGN 0x80000000
+
+#define IGC_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9)
+#define IGC_PTP_TX_TIMEOUT (HZ * 15)
+
+/* SYSTIM read access for I225 */
+static void igc_ptp_read_i225(struct igc_adapter *adapter,
+ struct timespec64 *ts)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 sec, nsec;
+
+ /* The timestamp latches on lowest register read. For I210/I211, the
+ * lowest register is SYSTIMR. Since we only need to provide nanosecond
+ * resolution, we can ignore it.
+ */
+ rd32(IGC_SYSTIMR);
+ nsec = rd32(IGC_SYSTIML);
+ sec = rd32(IGC_SYSTIMH);
+
+ ts->tv_sec = sec;
+ ts->tv_nsec = nsec;
+}
+
+static void igc_ptp_write_i225(struct igc_adapter *adapter,
+ const struct timespec64 *ts)
+{
+ struct igc_hw *hw = &adapter->hw;
+
+ /* Writing the SYSTIMR register is not necessary as it only
+ * provides sub-nanosecond resolution.
+ */
+ wr32(IGC_SYSTIML, ts->tv_nsec);
+ wr32(IGC_SYSTIMH, ts->tv_sec);
+}
+
+static int igc_ptp_adjfine_i225(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct igc_adapter *igc = container_of(ptp, struct igc_adapter,
+ ptp_caps);
+ struct igc_hw *hw = &igc->hw;
+ int neg_adj = 0;
+ u64 rate;
+ u32 inca;
+
+ if (scaled_ppm < 0) {
+ neg_adj = 1;
+ scaled_ppm = -scaled_ppm;
+ }
+ rate = scaled_ppm;
+ rate <<= 14;
+ rate = div_u64(rate, 78125);
+
+ inca = rate & INCVALUE_MASK;
+ if (neg_adj)
+ inca |= ISGN;
+
+ wr32(IGC_TIMINCA, inca);
+
+ return 0;
+}
+
+static int igc_ptp_adjtime_i225(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct igc_adapter *igc = container_of(ptp, struct igc_adapter,
+ ptp_caps);
+ struct timespec64 now, then = ns_to_timespec64(delta);
+ unsigned long flags;
+
+ spin_lock_irqsave(&igc->tmreg_lock, flags);
+
+ igc_ptp_read_i225(igc, &now);
+ now = timespec64_add(now, then);
+ igc_ptp_write_i225(igc, (const struct timespec64 *)&now);
+
+ spin_unlock_irqrestore(&igc->tmreg_lock, flags);
+
+ return 0;
+}
+
+static int igc_ptp_gettimex64_i225(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct igc_adapter *igc = container_of(ptp, struct igc_adapter,
+ ptp_caps);
+ struct igc_hw *hw = &igc->hw;
+ unsigned long flags;
+
+ spin_lock_irqsave(&igc->tmreg_lock, flags);
+
+ ptp_read_system_prets(sts);
+ rd32(IGC_SYSTIMR);
+ ptp_read_system_postts(sts);
+ ts->tv_nsec = rd32(IGC_SYSTIML);
+ ts->tv_sec = rd32(IGC_SYSTIMH);
+
+ spin_unlock_irqrestore(&igc->tmreg_lock, flags);
+
+ return 0;
+}
+
+static int igc_ptp_settime_i225(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct igc_adapter *igc = container_of(ptp, struct igc_adapter,
+ ptp_caps);
+ unsigned long flags;
+
+ spin_lock_irqsave(&igc->tmreg_lock, flags);
+
+ igc_ptp_write_i225(igc, ts);
+
+ spin_unlock_irqrestore(&igc->tmreg_lock, flags);
+
+ return 0;
+}
+
+static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * igc_ptp_systim_to_hwtstamp - convert system time value to HW timestamp
+ * @adapter: board private structure
+ * @hwtstamps: timestamp structure to update
+ * @systim: unsigned 64bit system time value
+ *
+ * We need to convert the system time value stored in the RX/TXSTMP registers
+ * into a hwtstamp which can be used by the upper level timestamping functions.
+ **/
+static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,
+ struct skb_shared_hwtstamps *hwtstamps,
+ u64 systim)
+{
+ switch (adapter->hw.mac.type) {
+ case igc_i225:
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ /* Upper 32 bits contain s, lower 32 bits contain ns. */
+ hwtstamps->hwtstamp = ktime_set(systim >> 32,
+ systim & 0xFFFFFFFF);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * igc_ptp_rx_pktstamp - retrieve Rx per packet timestamp
+ * @q_vector: Pointer to interrupt specific structure
+ * @va: Pointer to address containing Rx buffer
+ * @skb: Buffer containing timestamp and packet
+ *
+ * This function is meant to retrieve the first timestamp from the
+ * first buffer of an incoming frame. The value is stored in little
+ * endian format starting on byte 0. There's a second timestamp
+ * starting on byte 8.
+ **/
+void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va,
+ struct sk_buff *skb)
+{
+ struct igc_adapter *adapter = q_vector->adapter;
+ __le64 *regval = (__le64 *)va;
+ int adjust = 0;
+
+ /* The timestamp is recorded in little endian format.
+ * DWORD: | 0 | 1 | 2 | 3
+ * Field: | Timer0 Low | Timer0 High | Timer1 Low | Timer1 High
+ */
+ igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb),
+ le64_to_cpu(regval[0]));
+
+ /* adjust timestamp for the RX latency based on link speed */
+ if (adapter->hw.mac.type == igc_i225) {
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ adjust = IGC_I225_RX_LATENCY_10;
+ break;
+ case SPEED_100:
+ adjust = IGC_I225_RX_LATENCY_100;
+ break;
+ case SPEED_1000:
+ adjust = IGC_I225_RX_LATENCY_1000;
+ break;
+ case SPEED_2500:
+ adjust = IGC_I225_RX_LATENCY_2500;
+ break;
+ }
+ }
+ skb_hwtstamps(skb)->hwtstamp =
+ ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
+}
+
+/**
+ * igc_ptp_rx_rgtstamp - retrieve Rx timestamp stored in register
+ * @q_vector: Pointer to interrupt specific structure
+ * @skb: Buffer containing timestamp and packet
+ *
+ * This function is meant to retrieve a timestamp from the internal registers
+ * of the adapter and store it in the skb.
+ */
+void igc_ptp_rx_rgtstamp(struct igc_q_vector *q_vector,
+ struct sk_buff *skb)
+{
+ struct igc_adapter *adapter = q_vector->adapter;
+ struct igc_hw *hw = &adapter->hw;
+ u64 regval;
+
+ /* If this bit is set, then the RX registers contain the time
+ * stamp. No other packet will be time stamped until we read
+ * these registers, so read the registers to make them
+ * available again. Because only one packet can be time
+ * stamped at a time, we know that the register values must
+ * belong to this one here and therefore we don't need to
+ * compare any of the additional attributes stored for it.
+ *
+ * If nothing went wrong, then it should have a shared
+ * tx_flags that we can turn into a skb_shared_hwtstamps.
+ */
+ if (!(rd32(IGC_TSYNCRXCTL) & IGC_TSYNCRXCTL_VALID))
+ return;
+
+ regval = rd32(IGC_RXSTMPL);
+ regval |= (u64)rd32(IGC_RXSTMPH) << 32;
+
+ igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
+
+ /* Update the last_rx_timestamp timer in order to enable watchdog check
+ * for error case of latched timestamp on a dropped packet.
+ */
+ adapter->last_rx_timestamp = jiffies;
+}
+
+/**
+ * igc_ptp_enable_tstamp_rxqueue - Enable RX timestamp for a queue
+ * @rx_ring: Pointer to RX queue
+ * @timer: Index for timer
+ *
+ * This function enables RX timestamping for a queue, and selects
+ * which 1588 timer will provide the timestamp.
+ */
+static void igc_ptp_enable_tstamp_rxqueue(struct igc_adapter *adapter,
+ struct igc_ring *rx_ring, u8 timer)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int reg_idx = rx_ring->reg_idx;
+ u32 srrctl = rd32(IGC_SRRCTL(reg_idx));
+
+ srrctl |= IGC_SRRCTL_TIMESTAMP;
+ srrctl |= IGC_SRRCTL_TIMER1SEL(timer);
+ srrctl |= IGC_SRRCTL_TIMER0SEL(timer);
+
+ wr32(IGC_SRRCTL(reg_idx), srrctl);
+}
+
+static void igc_ptp_enable_tstamp_all_rxqueues(struct igc_adapter *adapter,
+ u8 timer)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct igc_ring *ring = adapter->rx_ring[i];
+
+ igc_ptp_enable_tstamp_rxqueue(adapter, ring, timer);
+ }
+}
+
+/**
+ * igc_ptp_set_timestamp_mode - setup hardware for timestamping
+ * @adapter: networking device structure
+ * @config: hwtstamp configuration
+ *
+ * Outgoing time stamping can be enabled and disabled. Play nice and
+ * disable it when requested, although it shouldn't case any overhead
+ * when no packet needs it. At most one packet in the queue may be
+ * marked for time stamping, otherwise it would be impossible to tell
+ * for sure to which packet the hardware time stamp belongs.
+ *
+ * Incoming time stamping has to be configured via the hardware
+ * filters. Not all combinations are supported, in particular event
+ * type has to be specified. Matching the kind of event packet is
+ * not supported, with the exception of "all V2 events regardless of
+ * level 2 or 4".
+ *
+ */
+static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter,
+ struct hwtstamp_config *config)
+{
+ u32 tsync_tx_ctl = IGC_TSYNCTXCTL_ENABLED;
+ u32 tsync_rx_ctl = IGC_TSYNCRXCTL_ENABLED;
+ struct igc_hw *hw = &adapter->hw;
+ u32 tsync_rx_cfg = 0;
+ bool is_l4 = false;
+ bool is_l2 = false;
+ u32 regval;
+
+ /* reserved for future extensions */
+ if (config->flags)
+ return -EINVAL;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ tsync_tx_ctl = 0;
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ tsync_rx_ctl = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_L4_V1;
+ tsync_rx_cfg = IGC_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_L4_V1;
+ tsync_rx_cfg = IGC_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_EVENT_V2;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ is_l2 = true;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ case HWTSTAMP_FILTER_ALL:
+ tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ /* fall through */
+ default:
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
+ return -ERANGE;
+ }
+
+ /* Per-packet timestamping only works if all packets are
+ * timestamped, so enable timestamping in all packets as long
+ * as one Rx filter was configured.
+ */
+ if (tsync_rx_ctl) {
+ tsync_rx_ctl = IGC_TSYNCRXCTL_ENABLED;
+ tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_ALL;
+ tsync_rx_ctl |= IGC_TSYNCRXCTL_RXSYNSIG;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ is_l2 = true;
+ is_l4 = true;
+
+ if (hw->mac.type == igc_i225) {
+ regval = rd32(IGC_RXPBS);
+ regval |= IGC_RXPBS_CFG_TS_EN;
+ wr32(IGC_RXPBS, regval);
+
+ /* FIXME: For now, only support retrieving RX
+ * timestamps from timer 0
+ */
+ igc_ptp_enable_tstamp_all_rxqueues(adapter, 0);
+ }
+ }
+
+ if (tsync_tx_ctl) {
+ tsync_tx_ctl = IGC_TSYNCTXCTL_ENABLED;
+ tsync_tx_ctl |= IGC_TSYNCTXCTL_TXSYNSIG;
+ }
+
+ /* enable/disable TX */
+ regval = rd32(IGC_TSYNCTXCTL);
+ regval &= ~IGC_TSYNCTXCTL_ENABLED;
+ regval |= tsync_tx_ctl;
+ wr32(IGC_TSYNCTXCTL, regval);
+
+ /* enable/disable RX */
+ regval = rd32(IGC_TSYNCRXCTL);
+ regval &= ~(IGC_TSYNCRXCTL_ENABLED | IGC_TSYNCRXCTL_TYPE_MASK);
+ regval |= tsync_rx_ctl;
+ wr32(IGC_TSYNCRXCTL, regval);
+
+ /* define which PTP packets are time stamped */
+ wr32(IGC_TSYNCRXCFG, tsync_rx_cfg);
+
+ /* define ethertype filter for timestamped packets */
+ if (is_l2)
+ wr32(IGC_ETQF(3),
+ (IGC_ETQF_FILTER_ENABLE | /* enable filter */
+ IGC_ETQF_1588 | /* enable timestamping */
+ ETH_P_1588)); /* 1588 eth protocol type */
+ else
+ wr32(IGC_ETQF(3), 0);
+
+ /* L4 Queue Filter[3]: filter by destination port and protocol */
+ if (is_l4) {
+ u32 ftqf = (IPPROTO_UDP /* UDP */
+ | IGC_FTQF_VF_BP /* VF not compared */
+ | IGC_FTQF_1588_TIME_STAMP /* Enable Timestamp */
+ | IGC_FTQF_MASK); /* mask all inputs */
+ ftqf &= ~IGC_FTQF_MASK_PROTO_BP; /* enable protocol check */
+
+ wr32(IGC_IMIR(3), htons(PTP_EV_PORT));
+ wr32(IGC_IMIREXT(3),
+ (IGC_IMIREXT_SIZE_BP | IGC_IMIREXT_CTRL_BP));
+ wr32(IGC_FTQF(3), ftqf);
+ } else {
+ wr32(IGC_FTQF(3), IGC_FTQF_MASK);
+ }
+ wrfl();
+
+ /* clear TX/RX time stamp registers, just to be sure */
+ regval = rd32(IGC_TXSTMPL);
+ regval = rd32(IGC_TXSTMPH);
+ regval = rd32(IGC_RXSTMPL);
+ regval = rd32(IGC_RXSTMPH);
+
+ return 0;
+}
+
+void igc_ptp_tx_hang(struct igc_adapter *adapter)
+{
+ bool timeout = time_is_before_jiffies(adapter->ptp_tx_start +
+ IGC_PTP_TX_TIMEOUT);
+ struct igc_hw *hw = &adapter->hw;
+
+ if (!adapter->ptp_tx_skb)
+ return;
+
+ if (!test_bit(__IGC_PTP_TX_IN_PROGRESS, &adapter->state))
+ return;
+
+ /* If we haven't received a timestamp within the timeout, it is
+ * reasonable to assume that it will never occur, so we can unlock the
+ * timestamp bit when this occurs.
+ */
+ if (timeout) {
+ cancel_work_sync(&adapter->ptp_tx_work);
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
+ adapter->tx_hwtstamp_timeouts++;
+ /* Clear the Tx valid bit in TSYNCTXCTL register to enable
+ * interrupt
+ */
+ rd32(IGC_TXSTMPH);
+ dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n");
+ }
+}
+
+/**
+ * igc_ptp_tx_hwtstamp - utility function which checks for TX time stamp
+ * @adapter: Board private structure
+ *
+ * If we were asked to do hardware stamping and such a time stamp is
+ * available, then it must have been for this skb here because we only
+ * allow only one such packet into the queue.
+ */
+static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
+{
+ struct sk_buff *skb = adapter->ptp_tx_skb;
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct igc_hw *hw = &adapter->hw;
+ u64 regval;
+
+ regval = rd32(IGC_TXSTMPL);
+ regval |= (u64)rd32(IGC_TXSTMPH) << 32;
+ igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
+
+ /* Clear the lock early before calling skb_tstamp_tx so that
+ * applications are not woken up before the lock bit is clear. We use
+ * a copy of the skb pointer to ensure other threads can't change it
+ * while we're notifying the stack.
+ */
+ adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
+
+ /* Notify the stack and free the skb after we've unlocked */
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
+}
+
+/**
+ * igc_ptp_tx_work
+ * @work: pointer to work struct
+ *
+ * This work function polls the TSYNCTXCTL valid bit to determine when a
+ * timestamp has been taken for the current stored skb.
+ */
+void igc_ptp_tx_work(struct work_struct *work)
+{
+ struct igc_adapter *adapter = container_of(work, struct igc_adapter,
+ ptp_tx_work);
+ struct igc_hw *hw = &adapter->hw;
+ u32 tsynctxctl;
+
+ if (!adapter->ptp_tx_skb)
+ return;
+
+ if (time_is_before_jiffies(adapter->ptp_tx_start +
+ IGC_PTP_TX_TIMEOUT)) {
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
+ adapter->tx_hwtstamp_timeouts++;
+ /* Clear the tx valid bit in TSYNCTXCTL register to enable
+ * interrupt
+ */
+ rd32(IGC_TXSTMPH);
+ dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n");
+ return;
+ }
+
+ tsynctxctl = rd32(IGC_TSYNCTXCTL);
+ if (tsynctxctl & IGC_TSYNCTXCTL_VALID)
+ igc_ptp_tx_hwtstamp(adapter);
+ else
+ /* reschedule to check later */
+ schedule_work(&adapter->ptp_tx_work);
+}
+
+/**
+ * igc_ptp_set_ts_config - set hardware time stamping config
+ * @netdev: network interface device structure
+ * @ifreq: interface request data
+ *
+ **/
+int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct hwtstamp_config config;
+ int err;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ err = igc_ptp_set_timestamp_mode(adapter, &config);
+ if (err)
+ return err;
+
+ /* save these settings for future reference */
+ memcpy(&adapter->tstamp_config, &config,
+ sizeof(adapter->tstamp_config));
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+/**
+ * igc_ptp_get_ts_config - get hardware time stamping config
+ * @netdev: network interface device structure
+ * @ifreq: interface request data
+ *
+ * Get the hwtstamp_config settings to return to the user. Rather than attempt
+ * to deconstruct the settings from the registers, just return a shadow copy
+ * of the last known settings.
+ **/
+int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct hwtstamp_config *config = &adapter->tstamp_config;
+
+ return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+ -EFAULT : 0;
+}
+
+/**
+ * igc_ptp_init - Initialize PTP functionality
+ * @adapter: Board private structure
+ *
+ * This function is called at device probe to initialize the PTP
+ * functionality.
+ */
+void igc_ptp_init(struct igc_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct igc_hw *hw = &adapter->hw;
+
+ switch (hw->mac.type) {
+ case igc_i225:
+ snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
+ adapter->ptp_caps.owner = THIS_MODULE;
+ adapter->ptp_caps.max_adj = 62499999;
+ adapter->ptp_caps.adjfine = igc_ptp_adjfine_i225;
+ adapter->ptp_caps.adjtime = igc_ptp_adjtime_i225;
+ adapter->ptp_caps.gettimex64 = igc_ptp_gettimex64_i225;
+ adapter->ptp_caps.settime64 = igc_ptp_settime_i225;
+ adapter->ptp_caps.enable = igc_ptp_feature_enable_i225;
+ break;
+ default:
+ adapter->ptp_clock = NULL;
+ return;
+ }
+
+ spin_lock_init(&adapter->tmreg_lock);
+ INIT_WORK(&adapter->ptp_tx_work, igc_ptp_tx_work);
+
+ adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+
+ igc_ptp_reset(adapter);
+
+ adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
+ &adapter->pdev->dev);
+ if (IS_ERR(adapter->ptp_clock)) {
+ adapter->ptp_clock = NULL;
+ dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n");
+ } else if (adapter->ptp_clock) {
+ dev_info(&adapter->pdev->dev, "added PHC on %s\n",
+ adapter->netdev->name);
+ adapter->ptp_flags |= IGC_PTP_ENABLED;
+ }
+}
+
+/**
+ * igc_ptp_suspend - Disable PTP work items and prepare for suspend
+ * @adapter: Board private structure
+ *
+ * This function stops the overflow check work and PTP Tx timestamp work, and
+ * will prepare the device for OS suspend.
+ */
+void igc_ptp_suspend(struct igc_adapter *adapter)
+{
+ if (!(adapter->ptp_flags & IGC_PTP_ENABLED))
+ return;
+
+ cancel_work_sync(&adapter->ptp_tx_work);
+ if (adapter->ptp_tx_skb) {
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
+ }
+}
+
+/**
+ * igc_ptp_stop - Disable PTP device and stop the overflow check.
+ * @adapter: Board private structure.
+ *
+ * This function stops the PTP support and cancels the delayed work.
+ **/
+void igc_ptp_stop(struct igc_adapter *adapter)
+{
+ igc_ptp_suspend(adapter);
+
+ if (adapter->ptp_clock) {
+ ptp_clock_unregister(adapter->ptp_clock);
+ dev_info(&adapter->pdev->dev, "removed PHC on %s\n",
+ adapter->netdev->name);
+ adapter->ptp_flags &= ~IGC_PTP_ENABLED;
+ }
+}
+
+/**
+ * igc_ptp_reset - Re-enable the adapter for PTP following a reset.
+ * @adapter: Board private structure.
+ *
+ * This function handles the reset work required to re-enable the PTP device.
+ **/
+void igc_ptp_reset(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ unsigned long flags;
+
+ /* reset the tstamp_config */
+ igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
+
+ spin_lock_irqsave(&adapter->tmreg_lock, flags);
+
+ switch (adapter->hw.mac.type) {
+ case igc_i225:
+ wr32(IGC_TSAUXC, 0x0);
+ wr32(IGC_TSSDP, 0x0);
+ wr32(IGC_TSIM, IGC_TSICR_INTERRUPTS);
+ wr32(IGC_IMS, IGC_IMS_TS);
+ break;
+ default:
+ /* No work to do. */
+ goto out;
+ }
+
+ /* Re-initialize the timer. */
+ if (hw->mac.type == igc_i225) {
+ struct timespec64 ts64 = ktime_to_timespec64(ktime_get_real());
+
+ igc_ptp_write_i225(adapter, &ts64);
+ } else {
+ timecounter_init(&adapter->tc, &adapter->cc,
+ ktime_to_ns(ktime_get_real()));
+ }
+out:
+ spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
+ wrfl();
+}
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index 50d7c04dccf5..c9029b549b90 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -12,6 +12,7 @@
#define IGC_MDIC 0x00020 /* MDI Control - RW */
#define IGC_MDICNFG 0x00E04 /* MDC/MDIO Configuration - RW */
#define IGC_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
+#define IGC_I225_PHPM 0x00E14 /* I225 PHY Power Management */
/* Internal Packet Buffer Size Registers */
#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
@@ -209,12 +210,48 @@
#define IGC_LENERRS 0x04138 /* Length Errors Count */
#define IGC_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */
+/* Time sync registers */
+#define IGC_TSICR 0x0B66C /* Time Sync Interrupt Cause */
+#define IGC_TSIM 0x0B674 /* Time Sync Interrupt Mask Register */
+#define IGC_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */
+#define IGC_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
+#define IGC_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
+#define IGC_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */
+#define IGC_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */
+
+#define IGC_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */
+#define IGC_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/
+
+#define IGC_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */
+
+#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
+
+/* System Time Registers */
+#define IGC_SYSTIML 0x0B600 /* System time register Low - RO */
+#define IGC_SYSTIMH 0x0B604 /* System time register High - RO */
+#define IGC_SYSTIMR 0x0B6F8 /* System time register Residue */
+#define IGC_TIMINCA 0x0B608 /* Increment attributes register - RW */
+
+#define IGC_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */
+#define IGC_RXSTMPH 0x0B628 /* Rx timestamp High - RO */
+#define IGC_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */
+#define IGC_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */
+
/* Management registers */
#define IGC_MANC 0x05820 /* Management Control - RW */
/* Shadow Ram Write Register - RW */
#define IGC_SRWR 0x12018
+/* Wake Up registers */
+#define IGC_WUC 0x05800 /* Wakeup Control - RW */
+#define IGC_WUFC 0x05808 /* Wakeup Filter Control - RW */
+#define IGC_WUS 0x05810 /* Wakeup Status - R/W1C */
+#define IGC_WUPL 0x05900 /* Wakeup Packet Length - RW */
+
+/* Wake Up packet memory */
+#define IGC_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
+
/* forward declaration */
struct igc_hw;
u32 igc_rd32(struct igc_hw *hw, u32 reg);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 3d8c051dd327..b64e91ea3465 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -70,7 +70,7 @@ static int ixgb_clean(struct napi_struct *, int);
static bool ixgb_clean_rx_irq(struct ixgb_adapter *, int *, int);
static void ixgb_alloc_rx_buffers(struct ixgb_adapter *, int);
-static void ixgb_tx_timeout(struct net_device *dev);
+static void ixgb_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void ixgb_tx_timeout_task(struct work_struct *work);
static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
@@ -1538,7 +1538,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
**/
static void
-ixgb_tx_timeout(struct net_device *netdev)
+ixgb_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
index 171cdc552961..5b1cf49df3d3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -166,7 +166,9 @@ static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp,
ixgbe_dbg_netdev_ops_buf[len] = '\0';
if (strncmp(ixgbe_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
- adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev);
+ /* TX Queue number below is wrong, but ixgbe does not use it */
+ adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev,
+ UINT_MAX);
e_dev_info("tx_timeout called\n");
} else {
e_dev_info("Unknown command: %s\n", ixgbe_dbg_netdev_ops_buf);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 25c097cd8100..718931d951bc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -5239,7 +5239,7 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct hlist_node *node2;
struct ixgbe_fdir_filter *filter;
- u64 action;
+ u8 queue;
spin_lock(&adapter->fdir_perfect_lock);
@@ -5248,17 +5248,34 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
hlist_for_each_entry_safe(filter, node2,
&adapter->fdir_filter_list, fdir_node) {
- action = filter->action;
- if (action != IXGBE_FDIR_DROP_QUEUE && action != 0)
- action =
- (action >> ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF) - 1;
+ if (filter->action == IXGBE_FDIR_DROP_QUEUE) {
+ queue = IXGBE_FDIR_DROP_QUEUE;
+ } else {
+ u32 ring = ethtool_get_flow_spec_ring(filter->action);
+ u8 vf = ethtool_get_flow_spec_ring_vf(filter->action);
+
+ if (!vf && (ring >= adapter->num_rx_queues)) {
+ e_err(drv, "FDIR restore failed without VF, ring: %u\n",
+ ring);
+ continue;
+ } else if (vf &&
+ ((vf > adapter->num_vfs) ||
+ ring >= adapter->num_rx_queues_per_pool)) {
+ e_err(drv, "FDIR restore failed with VF, vf: %hhu, ring: %u\n",
+ vf, ring);
+ continue;
+ }
+
+ /* Map the ring onto the absolute queue index */
+ if (!vf)
+ queue = adapter->rx_ring[ring]->reg_idx;
+ else
+ queue = ((vf - 1) *
+ adapter->num_rx_queues_per_pool) + ring;
+ }
ixgbe_fdir_write_perfect_filter_82599(hw,
- &filter->filter,
- filter->sw_idx,
- (action == IXGBE_FDIR_DROP_QUEUE) ?
- IXGBE_FDIR_DROP_QUEUE :
- adapter->rx_ring[action]->reg_idx);
+ &filter->filter, filter->sw_idx, queue);
}
spin_unlock(&adapter->fdir_perfect_lock);
@@ -6158,7 +6175,7 @@ static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter)
* ixgbe_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
**/
-static void ixgbe_tx_timeout(struct net_device *netdev)
+static void ixgbe_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -10261,7 +10278,12 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
/* If transitioning XDP modes reconfigure rings */
if (need_reset) {
- int err = ixgbe_setup_tc(dev, adapter->hw_tcs);
+ int err;
+
+ if (!prog)
+ /* Wait until ndo_xsk_wakeup completes. */
+ synchronize_rcu();
+ err = ixgbe_setup_tc(dev, adapter->hw_tcs);
if (err) {
rcu_assign_pointer(adapter->xdp_prog, old_prog);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index d6feaacfbf89..74b540ebb3dc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -277,7 +277,7 @@ static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring,
bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
- xsk_umem_discard_addr(umem);
+ xsk_umem_release_addr(umem);
return true;
}
@@ -304,7 +304,7 @@ static bool ixgbe_alloc_buffer_slow_zc(struct ixgbe_ring *rx_ring,
bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
- xsk_umem_discard_addr_rq(umem);
+ xsk_umem_release_addr_rq(umem);
return true;
}
@@ -709,10 +709,14 @@ int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
if (qid >= adapter->num_xdp_queues)
return -ENXIO;
- if (!adapter->xdp_ring[qid]->xsk_umem)
+ ring = adapter->xdp_ring[qid];
+
+ if (test_bit(__IXGBE_TX_DISABLED, &ring->state))
+ return -ENETDOWN;
+
+ if (!ring->xsk_umem)
return -ENXIO;
- ring = adapter->xdp_ring[qid];
if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
u64 eics = BIT_ULL(ring->q_vector->v_idx);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 076f2da36f27..4622c4ea2e46 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -250,7 +250,7 @@ static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
* ixgbevf_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
**/
-static void ixgbevf_tx_timeout(struct net_device *netdev)
+static void ixgbevf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
@@ -2081,11 +2081,6 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
struct ixgbe_hw *hw = &adapter->hw;
int count = 0;
- if ((netdev_uc_count(netdev)) > 10) {
- pr_err("Too many unicast filters - No Space\n");
- return -ENOSPC;
- }
-
if (!netdev_uc_empty(netdev)) {
struct netdev_hw_addr *ha;
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 25aa400e2e3c..2e4975572e9f 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2337,7 +2337,7 @@ jme_change_mtu(struct net_device *netdev, int new_mtu)
}
static void
-jme_tx_timeout(struct net_device *netdev)
+jme_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct jme_adapter *jme = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index ae195f8adff5..f1d84921e42b 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -917,7 +917,7 @@ static void korina_restart_task(struct work_struct *work)
enable_irq(lp->rx_irq);
}
-static void korina_tx_timeout(struct net_device *dev)
+static void korina_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct korina_private *lp = netdev_priv(dev);
@@ -1043,7 +1043,7 @@ static int korina_probe(struct platform_device *pdev)
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_regs");
dev->base_addr = r->start;
- lp->eth_regs = ioremap_nocache(r->start, resource_size(r));
+ lp->eth_regs = ioremap(r->start, resource_size(r));
if (!lp->eth_regs) {
printk(KERN_ERR DRV_NAME ": cannot remap registers\n");
rc = -ENXIO;
@@ -1051,7 +1051,7 @@ static int korina_probe(struct platform_device *pdev)
}
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_rx");
- lp->rx_dma_regs = ioremap_nocache(r->start, resource_size(r));
+ lp->rx_dma_regs = ioremap(r->start, resource_size(r));
if (!lp->rx_dma_regs) {
printk(KERN_ERR DRV_NAME ": cannot remap Rx DMA registers\n");
rc = -ENXIO;
@@ -1059,7 +1059,7 @@ static int korina_probe(struct platform_device *pdev)
}
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_tx");
- lp->tx_dma_regs = ioremap_nocache(r->start, resource_size(r));
+ lp->tx_dma_regs = ioremap(r->start, resource_size(r));
if (!lp->tx_dma_regs) {
printk(KERN_ERR DRV_NAME ": cannot remap Tx DMA registers\n");
rc = -ENXIO;
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 6e73ffe6f928..2d0c52f7106b 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -510,13 +510,6 @@ ltq_etop_change_mtu(struct net_device *dev, int new_mtu)
}
static int
-ltq_etop_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- /* TODO: mii-toll reports "No MII transceiver present!." ?!*/
- return phy_mii_ioctl(dev->phydev, rq, cmd);
-}
-
-static int
ltq_etop_set_mac_address(struct net_device *dev, void *p)
{
int ret = eth_mac_addr(dev, p);
@@ -594,7 +587,7 @@ err_hw:
}
static void
-ltq_etop_tx_timeout(struct net_device *dev)
+ltq_etop_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
int err;
@@ -616,7 +609,7 @@ static const struct net_device_ops ltq_eth_netdev_ops = {
.ndo_stop = ltq_etop_stop,
.ndo_start_xmit = ltq_etop_tx,
.ndo_change_mtu = ltq_etop_change_mtu,
- .ndo_do_ioctl = ltq_etop_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl,
.ndo_set_mac_address = ltq_etop_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = ltq_etop_set_multicast_list,
@@ -649,7 +642,7 @@ ltq_etop_probe(struct platform_device *pdev)
goto err_out;
}
- ltq_etop_membase = devm_ioremap_nocache(&pdev->dev,
+ ltq_etop_membase = devm_ioremap(&pdev->dev,
res->start, resource_size(res));
if (!ltq_etop_membase) {
dev_err(&pdev->dev, "failed to remap etop engine %d\n",
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 65a093216dac..3c8125cbc84d 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2590,7 +2590,7 @@ static void tx_timeout_task(struct work_struct *ugly)
}
}
-static void mv643xx_eth_tx_timeout(struct net_device *dev)
+static void mv643xx_eth_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 71a872d46bc4..98017e7d5dd0 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -324,8 +324,7 @@
ETH_HLEN + ETH_FCS_LEN, \
cache_line_size())
-#define MVNETA_SKB_HEADROOM (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + \
- NET_IP_ALIGN)
+#define MVNETA_SKB_HEADROOM max(XDP_PACKET_HEADROOM, NET_SKB_PAD)
#define MVNETA_SKB_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \
MVNETA_SKB_HEADROOM))
#define MVNETA_SKB_SIZE(len) (SKB_DATA_ALIGN(len) + MVNETA_SKB_PAD)
@@ -402,6 +401,8 @@ struct mvneta_pcpu_stats {
struct u64_stats_sync syncp;
u64 rx_packets;
u64 rx_bytes;
+ u64 rx_dropped;
+ u64 rx_errors;
u64 tx_packets;
u64 tx_bytes;
};
@@ -739,6 +740,8 @@ mvneta_get_stats64(struct net_device *dev,
struct mvneta_pcpu_stats *cpu_stats;
u64 rx_packets;
u64 rx_bytes;
+ u64 rx_dropped;
+ u64 rx_errors;
u64 tx_packets;
u64 tx_bytes;
@@ -747,19 +750,20 @@ mvneta_get_stats64(struct net_device *dev,
start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
rx_packets = cpu_stats->rx_packets;
rx_bytes = cpu_stats->rx_bytes;
+ rx_dropped = cpu_stats->rx_dropped;
+ rx_errors = cpu_stats->rx_errors;
tx_packets = cpu_stats->tx_packets;
tx_bytes = cpu_stats->tx_bytes;
} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
stats->rx_packets += rx_packets;
stats->rx_bytes += rx_bytes;
+ stats->rx_dropped += rx_dropped;
+ stats->rx_errors += rx_errors;
stats->tx_packets += tx_packets;
stats->tx_bytes += tx_bytes;
}
- stats->rx_errors = dev->stats.rx_errors;
- stats->rx_dropped = dev->stats.rx_dropped;
-
stats->tx_dropped = dev->stats.tx_dropped;
}
@@ -1167,6 +1171,7 @@ bm_mtu_err:
mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id);
pp->bm_priv = NULL;
+ pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1);
netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n");
}
@@ -1736,8 +1741,14 @@ static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
static void mvneta_rx_error(struct mvneta_port *pp,
struct mvneta_rx_desc *rx_desc)
{
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
u32 status = rx_desc->status;
+ /* update per-cpu counter */
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_errors++;
+ u64_stats_update_end(&stats->syncp);
+
switch (status & MVNETA_RXD_ERR_CODE_MASK) {
case MVNETA_RXD_ERR_CRC:
netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
@@ -2081,7 +2092,11 @@ static int
mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
struct bpf_prog *prog, struct xdp_buff *xdp)
{
- u32 ret, act = bpf_prog_run_xdp(prog, xdp);
+ unsigned int len;
+ u32 ret, act;
+
+ len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
+ act = bpf_prog_run_xdp(prog, xdp);
switch (act) {
case XDP_PASS:
@@ -2094,9 +2109,8 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
if (err) {
ret = MVNETA_XDP_DROPPED;
__page_pool_put_page(rxq->page_pool,
- virt_to_head_page(xdp->data),
- xdp->data_end - xdp->data_hard_start,
- true);
+ virt_to_head_page(xdp->data),
+ len, true);
} else {
ret = MVNETA_XDP_REDIR;
}
@@ -2106,9 +2120,8 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
ret = mvneta_xdp_xmit_back(pp, xdp);
if (ret != MVNETA_XDP_TX)
__page_pool_put_page(rxq->page_pool,
- virt_to_head_page(xdp->data),
- xdp->data_end - xdp->data_hard_start,
- true);
+ virt_to_head_page(xdp->data),
+ len, true);
break;
default:
bpf_warn_invalid_xdp_action(act);
@@ -2119,8 +2132,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
case XDP_DROP:
__page_pool_put_page(rxq->page_pool,
virt_to_head_page(xdp->data),
- xdp->data_end - xdp->data_hard_start,
- true);
+ len, true);
ret = MVNETA_XDP_DROPPED;
break;
}
@@ -2178,11 +2190,15 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
rxq->skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
if (unlikely(!rxq->skb)) {
- netdev_err(dev,
- "Can't allocate skb on queue %d\n",
- rxq->id);
- dev->stats.rx_dropped++;
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+
+ netdev_err(dev, "Can't allocate skb on queue %d\n", rxq->id);
rxq->skb_alloc_err++;
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_dropped++;
+ u64_stats_update_end(&stats->syncp);
+
return -ENOMEM;
}
page_pool_release_page(rxq->page_pool, page);
@@ -2269,7 +2285,6 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
/* Check errors only for FIRST descriptor */
if (rx_status & MVNETA_RXD_ERR_SUMMARY) {
mvneta_rx_error(pp, rx_desc);
- dev->stats.rx_errors++;
/* leave the descriptor untouched */
continue;
}
@@ -2371,7 +2386,6 @@ err_drop_frame_ret_pool:
mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
rx_desc->buf_phys_addr);
err_drop_frame:
- dev->stats.rx_errors++;
mvneta_rx_error(pp, rx_desc);
/* leave the descriptor untouched */
continue;
@@ -3071,7 +3085,7 @@ static int mvneta_create_page_pool(struct mvneta_port *pp,
.order = 0,
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
.pool_size = size,
- .nid = cpu_to_node(0),
+ .nid = NUMA_NO_NODE,
.dev = pp->dev->dev.parent,
.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
.offset = pp->rx_offset_correction,
@@ -4225,6 +4239,12 @@ static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
return -EOPNOTSUPP;
}
+ if (pp->bm_priv) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Hardware Buffer Management not supported on XDP");
+ return -EOPNOTSUPP;
+ }
+
need_update = !!pp->xdp_prog != !!prog;
if (running && need_update)
mvneta_stop(dev);
@@ -4941,7 +4961,6 @@ static int mvneta_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
pp->id = global_port_id++;
- pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
/* Obtain access to BM resources if enabled and already initialized */
bm_node = of_parse_phandle(dn, "buffer-manager", 0);
@@ -4966,6 +4985,10 @@ static int mvneta_probe(struct platform_device *pdev)
}
of_node_put(bm_node);
+ /* sw buffer management */
+ if (!pp->bm_priv)
+ pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
+
err = mvneta_init(&pdev->dev, pp);
if (err < 0)
goto err_netdev;
@@ -5123,6 +5146,7 @@ static int mvneta_resume(struct device *device)
err = mvneta_bm_port_init(pdev, pp);
if (err < 0) {
dev_info(&pdev->dev, "use SW buffer management\n");
+ pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
pp->bm_priv = NULL;
}
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 62dc2f362a16..72133cbe55d4 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -1114,7 +1114,7 @@ mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
/* Port configuration routines */
static bool mvpp2_is_xlg(phy_interface_t interface)
{
- return interface == PHY_INTERFACE_MODE_10GKR ||
+ return interface == PHY_INTERFACE_MODE_10GBASER ||
interface == PHY_INTERFACE_MODE_XAUI;
}
@@ -1200,7 +1200,7 @@ static int mvpp22_gop_init(struct mvpp2_port *port)
case PHY_INTERFACE_MODE_2500BASEX:
mvpp22_gop_init_sgmii(port);
break;
- case PHY_INTERFACE_MODE_10GKR:
+ case PHY_INTERFACE_MODE_10GBASER:
if (port->gop_id != 0)
goto invalid_conf;
mvpp22_gop_init_10gkr(port);
@@ -1649,7 +1649,7 @@ static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port)
xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
switch (port->phy_interface) {
- case PHY_INTERFACE_MODE_10GKR:
+ case PHY_INTERFACE_MODE_10GBASER:
val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX |
MAC_CLK_RESET_SD_TX;
@@ -3680,7 +3680,7 @@ static int mvpp2_open(struct net_device *dev)
valid = true;
}
- if (priv->hw_version == MVPP22 && port->link_irq && !port->phylink) {
+ if (priv->hw_version == MVPP22 && port->link_irq) {
err = request_irq(port->link_irq, mvpp2_link_status_isr, 0,
dev->name, port);
if (err) {
@@ -4758,7 +4758,7 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
/* Invalid combinations */
switch (state->interface) {
- case PHY_INTERFACE_MODE_10GKR:
+ case PHY_INTERFACE_MODE_10GBASER:
case PHY_INTERFACE_MODE_XAUI:
if (port->gop_id != 0)
goto empty_set;
@@ -4780,7 +4780,7 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
phylink_set(mask, Asym_Pause);
switch (state->interface) {
- case PHY_INTERFACE_MODE_10GKR:
+ case PHY_INTERFACE_MODE_10GBASER:
case PHY_INTERFACE_MODE_XAUI:
case PHY_INTERFACE_MODE_NA:
if (port->gop_id == 0) {
@@ -4792,6 +4792,8 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
phylink_set(mask, 10000baseER_Full);
phylink_set(mask, 10000baseKR_Full);
}
+ if (state->interface != PHY_INTERFACE_MODE_NA)
+ break;
/* Fall-through */
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
@@ -4802,13 +4804,23 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
phylink_set(mask, 10baseT_Full);
phylink_set(mask, 100baseT_Half);
phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+ if (state->interface != PHY_INTERFACE_MODE_NA)
+ break;
/* Fall-through */
case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_2500BASEX:
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
+ if (port->comphy ||
+ state->interface != PHY_INTERFACE_MODE_2500BASEX) {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+ }
+ if (port->comphy ||
+ state->interface == PHY_INTERFACE_MODE_2500BASEX) {
+ phylink_set(mask, 2500baseT_Full);
+ phylink_set(mask, 2500baseX_Full);
+ }
break;
default:
goto empty_set;
@@ -4817,6 +4829,8 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
bitmap_and(state->advertising, state->advertising, mask,
__ETHTOOL_LINK_MODE_MASK_NBITS);
+
+ phylink_helper_basex_speed(state);
return;
empty_set:
@@ -5233,6 +5247,15 @@ static int mvpp2_port_probe(struct platform_device *pdev,
goto err_free_netdev;
}
+ /*
+ * Rewrite 10GBASE-KR to 10GBASE-R for compatibility with existing DT.
+ * Existing usage of 10GBASE-KR is not correct; no backplane
+ * negotiation is done, and this driver does not actually support
+ * 10GBASE-KR.
+ */
+ if (phy_mode == PHY_INTERFACE_MODE_10GKR)
+ phy_mode = PHY_INTERFACE_MODE_10GBASER;
+
if (port_node) {
comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
if (IS_ERR(comphy)) {
@@ -5411,6 +5434,16 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port->phylink = NULL;
}
+ /* Cycle the comphy to power it down, saving 270mW per port -
+ * don't worry about an error powering it up. When the comphy
+ * driver does this, we can remove this code.
+ */
+ if (port->comphy) {
+ err = mvpp22_comphy_init(port);
+ if (err == 0)
+ phy_power_off(port->comphy);
+ }
+
err = register_netdev(dev);
if (err < 0) {
dev_err(&pdev->dev, "failed to register netdev\n");
diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig
index fb34fbd62088..ced514c05c97 100644
--- a/drivers/net/ethernet/marvell/octeontx2/Kconfig
+++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig
@@ -25,3 +25,11 @@ config NDC_DIS_DYNAMIC_CACHING
This config option disables caching of dynamic entries such as NIX SQEs
, NPA stack pages etc in NDC. Also locks down NIX SQ/CQ/RQ/RSS and
NPA Aura/Pool contexts.
+
+config OCTEONTX2_PF
+ tristate "Marvell OcteonTX2 NIC Physical Function driver"
+ select OCTEONTX2_MBOX
+ depends on (64BIT && COMPILE_TEST) || ARM64
+ depends on PCI
+ help
+ This driver supports Marvell's OcteonTX2 NIC physical function.
diff --git a/drivers/net/ethernet/marvell/octeontx2/Makefile b/drivers/net/ethernet/marvell/octeontx2/Makefile
index e579dcd54c97..0064a69e0f72 100644
--- a/drivers/net/ethernet/marvell/octeontx2/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/Makefile
@@ -3,4 +3,6 @@
# Makefile for Marvell OcteonTX2 device drivers.
#
+obj-$(CONFIG_OCTEONTX2_MBOX) += af/
obj-$(CONFIG_OCTEONTX2_AF) += af/
+obj-$(CONFIG_OCTEONTX2_PF) += nic/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index 784207bae5f8..cd33c2e6ca5f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -143,8 +143,13 @@ enum nix_scheduler {
NIX_TXSCH_LVL_CNT = 0x5,
};
-#define TXSCH_TL1_DFLT_RR_QTM ((1 << 24) - 1)
-#define TXSCH_TL1_DFLT_RR_PRIO (0x1ull)
+#define TXSCH_RR_QTM_MAX ((1 << 24) - 1)
+#define TXSCH_TL1_DFLT_RR_QTM TXSCH_RR_QTM_MAX
+#define TXSCH_TL1_DFLT_RR_PRIO (0x1ull)
+#define MAX_SCHED_WEIGHT 0xFF
+#define DFLT_RR_WEIGHT 71
+#define DFLT_RR_QTM ((DFLT_RR_WEIGHT * TXSCH_RR_QTM_MAX) \
+ / MAX_SCHED_WEIGHT)
/* Min/Max packet sizes, excluding FCS */
#define NIC_HW_MIN_FRS 40
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index a589748f1240..8bbc1f1d81f5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -210,7 +210,8 @@ M(NIX_SET_RX_CFG, 0x8010, nix_set_rx_cfg, nix_rx_cfg, msg_rsp) \
M(NIX_LSO_FORMAT_CFG, 0x8011, nix_lso_format_cfg, \
nix_lso_format_cfg, \
nix_lso_format_cfg_rsp) \
-M(NIX_RXVLAN_ALLOC, 0x8012, nix_rxvlan_alloc, msg_req, msg_rsp)
+M(NIX_RXVLAN_ALLOC, 0x8012, nix_rxvlan_alloc, msg_req, msg_rsp) \
+M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \
/* Messages initiated by AF (range 0xC00 - 0xDFF) */
#define MBOX_UP_CGX_MESSAGES \
@@ -618,6 +619,11 @@ struct nix_set_mac_addr {
u8 mac_addr[ETH_ALEN]; /* MAC address to be set for this pcifunc */
};
+struct nix_get_mac_addr_rsp {
+ struct mbox_msghdr hdr;
+ u8 mac_addr[ETH_ALEN];
+};
+
struct nix_mark_format_cfg {
struct mbox_msghdr hdr;
u8 offset;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 8a59f7d53fbf..eb5e542424e7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -2546,6 +2546,23 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
return 0;
}
+int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
+ struct msg_req *req,
+ struct nix_get_mac_addr_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+
+ if (!is_nixlf_attached(rvu, pcifunc))
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
+
+ return 0;
+}
+
int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
struct msg_rsp *rsp)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
new file mode 100644
index 000000000000..41bf00cf5b1d
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell's OcteonTX2 ethernet device drivers
+#
+
+obj-$(CONFIG_OCTEONTX2_PF) += octeontx2_nicpf.o
+
+octeontx2_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o
+
+ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
new file mode 100644
index 000000000000..b945bd3d5d88
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -0,0 +1,1410 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <net/tso.h>
+
+#include "otx2_reg.h"
+#include "otx2_common.h"
+#include "otx2_struct.h"
+
+static void otx2_nix_rq_op_stats(struct queue_stats *stats,
+ struct otx2_nic *pfvf, int qidx)
+{
+ u64 incr = (u64)qidx << 32;
+ u64 *ptr;
+
+ ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS);
+ stats->bytes = otx2_atomic64_add(incr, ptr);
+
+ ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS);
+ stats->pkts = otx2_atomic64_add(incr, ptr);
+}
+
+static void otx2_nix_sq_op_stats(struct queue_stats *stats,
+ struct otx2_nic *pfvf, int qidx)
+{
+ u64 incr = (u64)qidx << 32;
+ u64 *ptr;
+
+ ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS);
+ stats->bytes = otx2_atomic64_add(incr, ptr);
+
+ ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS);
+ stats->pkts = otx2_atomic64_add(incr, ptr);
+}
+
+void otx2_update_lmac_stats(struct otx2_nic *pfvf)
+{
+ struct msg_req *req;
+
+ if (!netif_running(pfvf->netdev))
+ return;
+
+ otx2_mbox_lock(&pfvf->mbox);
+ req = otx2_mbox_alloc_msg_cgx_stats(&pfvf->mbox);
+ if (!req) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return;
+ }
+
+ otx2_sync_mbox_msg(&pfvf->mbox);
+ otx2_mbox_unlock(&pfvf->mbox);
+}
+
+int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx)
+{
+ struct otx2_rcv_queue *rq = &pfvf->qset.rq[qidx];
+
+ if (!pfvf->qset.rq)
+ return 0;
+
+ otx2_nix_rq_op_stats(&rq->stats, pfvf, qidx);
+ return 1;
+}
+
+int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx)
+{
+ struct otx2_snd_queue *sq = &pfvf->qset.sq[qidx];
+
+ if (!pfvf->qset.sq)
+ return 0;
+
+ otx2_nix_sq_op_stats(&sq->stats, pfvf, qidx);
+ return 1;
+}
+
+void otx2_get_dev_stats(struct otx2_nic *pfvf)
+{
+ struct otx2_dev_stats *dev_stats = &pfvf->hw.dev_stats;
+
+#define OTX2_GET_RX_STATS(reg) \
+ otx2_read64(pfvf, NIX_LF_RX_STATX(reg))
+#define OTX2_GET_TX_STATS(reg) \
+ otx2_read64(pfvf, NIX_LF_TX_STATX(reg))
+
+ dev_stats->rx_bytes = OTX2_GET_RX_STATS(RX_OCTS);
+ dev_stats->rx_drops = OTX2_GET_RX_STATS(RX_DROP);
+ dev_stats->rx_bcast_frames = OTX2_GET_RX_STATS(RX_BCAST);
+ dev_stats->rx_mcast_frames = OTX2_GET_RX_STATS(RX_MCAST);
+ dev_stats->rx_ucast_frames = OTX2_GET_RX_STATS(RX_UCAST);
+ dev_stats->rx_frames = dev_stats->rx_bcast_frames +
+ dev_stats->rx_mcast_frames +
+ dev_stats->rx_ucast_frames;
+
+ dev_stats->tx_bytes = OTX2_GET_TX_STATS(TX_OCTS);
+ dev_stats->tx_drops = OTX2_GET_TX_STATS(TX_DROP);
+ dev_stats->tx_bcast_frames = OTX2_GET_TX_STATS(TX_BCAST);
+ dev_stats->tx_mcast_frames = OTX2_GET_TX_STATS(TX_MCAST);
+ dev_stats->tx_ucast_frames = OTX2_GET_TX_STATS(TX_UCAST);
+ dev_stats->tx_frames = dev_stats->tx_bcast_frames +
+ dev_stats->tx_mcast_frames +
+ dev_stats->tx_ucast_frames;
+}
+
+void otx2_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct otx2_dev_stats *dev_stats;
+
+ otx2_get_dev_stats(pfvf);
+
+ dev_stats = &pfvf->hw.dev_stats;
+ stats->rx_bytes = dev_stats->rx_bytes;
+ stats->rx_packets = dev_stats->rx_frames;
+ stats->rx_dropped = dev_stats->rx_drops;
+ stats->multicast = dev_stats->rx_mcast_frames;
+
+ stats->tx_bytes = dev_stats->tx_bytes;
+ stats->tx_packets = dev_stats->tx_frames;
+ stats->tx_dropped = dev_stats->tx_drops;
+}
+
+/* Sync MAC address with RVU AF */
+static int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac)
+{
+ struct nix_set_mac_addr *req;
+ int err;
+
+ otx2_mbox_lock(&pfvf->mbox);
+ req = otx2_mbox_alloc_msg_nix_set_mac_addr(&pfvf->mbox);
+ if (!req) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(req->mac_addr, mac);
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ otx2_mbox_unlock(&pfvf->mbox);
+ return err;
+}
+
+static int otx2_hw_get_mac_addr(struct otx2_nic *pfvf,
+ struct net_device *netdev)
+{
+ struct nix_get_mac_addr_rsp *rsp;
+ struct mbox_msghdr *msghdr;
+ struct msg_req *req;
+ int err;
+
+ otx2_mbox_lock(&pfvf->mbox);
+ req = otx2_mbox_alloc_msg_nix_get_mac_addr(&pfvf->mbox);
+ if (!req) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return err;
+ }
+
+ msghdr = otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(msghdr)) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return PTR_ERR(msghdr);
+ }
+ rsp = (struct nix_get_mac_addr_rsp *)msghdr;
+ ether_addr_copy(netdev->dev_addr, rsp->mac_addr);
+ otx2_mbox_unlock(&pfvf->mbox);
+
+ return 0;
+}
+
+int otx2_set_mac_address(struct net_device *netdev, void *p)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data))
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ else
+ return -EPERM;
+
+ return 0;
+}
+
+int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
+{
+ struct nix_frs_cfg *req;
+ int err;
+
+ otx2_mbox_lock(&pfvf->mbox);
+ req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox);
+ if (!req) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return -ENOMEM;
+ }
+
+ /* SMQ config limits maximum pkt size that can be transmitted */
+ req->update_smq = true;
+ pfvf->max_frs = mtu + OTX2_ETH_HLEN;
+ req->maxlen = pfvf->max_frs;
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ otx2_mbox_unlock(&pfvf->mbox);
+ return err;
+}
+
+int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
+{
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ struct nix_rss_flowkey_cfg *req;
+ int err;
+
+ otx2_mbox_lock(&pfvf->mbox);
+ req = otx2_mbox_alloc_msg_nix_rss_flowkey_cfg(&pfvf->mbox);
+ if (!req) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return -ENOMEM;
+ }
+ req->mcam_index = -1; /* Default or reserved index */
+ req->flowkey_cfg = rss->flowkey_cfg;
+ req->group = DEFAULT_RSS_CONTEXT_GROUP;
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ otx2_mbox_unlock(&pfvf->mbox);
+ return err;
+}
+
+int otx2_set_rss_table(struct otx2_nic *pfvf)
+{
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ struct mbox *mbox = &pfvf->mbox;
+ struct nix_aq_enq_req *aq;
+ int idx, err;
+
+ otx2_mbox_lock(mbox);
+ /* Get memory to put this msg */
+ for (idx = 0; idx < rss->rss_size; idx++) {
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq) {
+ /* The shared memory buffer can be full.
+ * Flush it and retry
+ */
+ err = otx2_sync_mbox_msg(mbox);
+ if (err) {
+ otx2_mbox_unlock(mbox);
+ return err;
+ }
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq) {
+ otx2_mbox_unlock(mbox);
+ return -ENOMEM;
+ }
+ }
+
+ aq->rss.rq = rss->ind_tbl[idx];
+
+ /* Fill AQ info */
+ aq->qidx = idx;
+ aq->ctype = NIX_AQ_CTYPE_RSS;
+ aq->op = NIX_AQ_INSTOP_INIT;
+ }
+ err = otx2_sync_mbox_msg(mbox);
+ otx2_mbox_unlock(mbox);
+ return err;
+}
+
+void otx2_set_rss_key(struct otx2_nic *pfvf)
+{
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ u64 *key = (u64 *)&rss->key[4];
+ int idx;
+
+ /* 352bit or 44byte key needs to be configured as below
+ * NIX_LF_RX_SECRETX0 = key<351:288>
+ * NIX_LF_RX_SECRETX1 = key<287:224>
+ * NIX_LF_RX_SECRETX2 = key<223:160>
+ * NIX_LF_RX_SECRETX3 = key<159:96>
+ * NIX_LF_RX_SECRETX4 = key<95:32>
+ * NIX_LF_RX_SECRETX5<63:32> = key<31:0>
+ */
+ otx2_write64(pfvf, NIX_LF_RX_SECRETX(5),
+ (u64)(*((u32 *)&rss->key)) << 32);
+ idx = sizeof(rss->key) / sizeof(u64);
+ while (idx > 0) {
+ idx--;
+ otx2_write64(pfvf, NIX_LF_RX_SECRETX(idx), *key++);
+ }
+}
+
+int otx2_rss_init(struct otx2_nic *pfvf)
+{
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ int idx, ret = 0;
+
+ rss->rss_size = sizeof(rss->ind_tbl);
+
+ /* Init RSS key if it is not setup already */
+ if (!rss->enable)
+ netdev_rss_key_fill(rss->key, sizeof(rss->key));
+ otx2_set_rss_key(pfvf);
+
+ if (!netif_is_rxfh_configured(pfvf->netdev)) {
+ /* Default indirection table */
+ for (idx = 0; idx < rss->rss_size; idx++)
+ rss->ind_tbl[idx] =
+ ethtool_rxfh_indir_default(idx,
+ pfvf->hw.rx_queues);
+ }
+ ret = otx2_set_rss_table(pfvf);
+ if (ret)
+ return ret;
+
+ /* Flowkey or hash config to be used for generating flow tag */
+ rss->flowkey_cfg = rss->enable ? rss->flowkey_cfg :
+ NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6 |
+ NIX_FLOW_KEY_TYPE_TCP | NIX_FLOW_KEY_TYPE_UDP |
+ NIX_FLOW_KEY_TYPE_SCTP;
+
+ ret = otx2_set_flowkey_cfg(pfvf);
+ if (ret)
+ return ret;
+
+ rss->enable = true;
+ return 0;
+}
+
+void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx)
+{
+ /* Configure CQE interrupt coalescing parameters
+ *
+ * HW triggers an irq when ECOUNT > cq_ecount_wait, hence
+ * set 1 less than cq_ecount_wait. And cq_time_wait is in
+ * usecs, convert that to 100ns count.
+ */
+ otx2_write64(pfvf, NIX_LF_CINTX_WAIT(qidx),
+ ((u64)(pfvf->hw.cq_time_wait * 10) << 48) |
+ ((u64)pfvf->hw.cq_qcount_wait << 32) |
+ (pfvf->hw.cq_ecount_wait - 1));
+}
+
+dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ gfp_t gfp)
+{
+ dma_addr_t iova;
+
+ /* Check if request can be accommodated in previous allocated page */
+ if (pool->page && ((pool->page_offset + pool->rbsize) <=
+ (PAGE_SIZE << pool->rbpage_order))) {
+ pool->pageref++;
+ goto ret;
+ }
+
+ otx2_get_page(pool);
+
+ /* Allocate a new page */
+ pool->page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
+ pool->rbpage_order);
+ if (unlikely(!pool->page))
+ return -ENOMEM;
+
+ pool->page_offset = 0;
+ret:
+ iova = (u64)otx2_dma_map_page(pfvf, pool->page, pool->page_offset,
+ pool->rbsize, DMA_FROM_DEVICE);
+ if (!iova) {
+ if (!pool->page_offset)
+ __free_pages(pool->page, pool->rbpage_order);
+ pool->page = NULL;
+ return -ENOMEM;
+ }
+ pool->page_offset += pool->rbsize;
+ return iova;
+}
+
+void otx2_tx_timeout(struct net_device *netdev, unsigned int txq)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ schedule_work(&pfvf->reset_task);
+}
+
+void otx2_get_mac_from_af(struct net_device *netdev)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ int err;
+
+ err = otx2_hw_get_mac_addr(pfvf, netdev);
+ if (err)
+ dev_warn(pfvf->dev, "Failed to read mac from hardware\n");
+
+ /* If AF doesn't provide a valid MAC, generate a random one */
+ if (!is_valid_ether_addr(netdev->dev_addr))
+ eth_hw_addr_random(netdev);
+}
+
+static int otx2_get_link(struct otx2_nic *pfvf)
+{
+ int link = 0;
+ u16 map;
+
+ /* cgx lmac link */
+ if (pfvf->hw.tx_chan_base >= CGX_CHAN_BASE) {
+ map = pfvf->hw.tx_chan_base & 0x7FF;
+ link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF);
+ }
+ /* LBK channel */
+ if (pfvf->hw.tx_chan_base < SDP_CHAN_BASE)
+ link = 12;
+
+ return link;
+}
+
+int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+ struct nix_txschq_config *req;
+ u64 schq, parent;
+
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ req->lvl = lvl;
+ req->num_regs = 1;
+
+ schq = hw->txschq_list[lvl][0];
+ /* Set topology e.t.c configuration */
+ if (lvl == NIX_TXSCH_LVL_SMQ) {
+ req->reg[0] = NIX_AF_SMQX_CFG(schq);
+ req->regval[0] = ((pfvf->netdev->mtu + OTX2_ETH_HLEN) << 8) |
+ OTX2_MIN_MTU;
+
+ req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) |
+ (0x2ULL << 36);
+ req->num_regs++;
+ /* MDQ config */
+ parent = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
+ req->reg[1] = NIX_AF_MDQX_PARENT(schq);
+ req->regval[1] = parent << 16;
+ req->num_regs++;
+ /* Set DWRR quantum */
+ req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq);
+ req->regval[2] = DFLT_RR_QTM;
+ } else if (lvl == NIX_TXSCH_LVL_TL4) {
+ parent = hw->txschq_list[NIX_TXSCH_LVL_TL3][0];
+ req->reg[0] = NIX_AF_TL4X_PARENT(schq);
+ req->regval[0] = parent << 16;
+ req->num_regs++;
+ req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq);
+ req->regval[1] = DFLT_RR_QTM;
+ } else if (lvl == NIX_TXSCH_LVL_TL3) {
+ parent = hw->txschq_list[NIX_TXSCH_LVL_TL2][0];
+ req->reg[0] = NIX_AF_TL3X_PARENT(schq);
+ req->regval[0] = parent << 16;
+ req->num_regs++;
+ req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq);
+ req->regval[1] = DFLT_RR_QTM;
+ } else if (lvl == NIX_TXSCH_LVL_TL2) {
+ parent = hw->txschq_list[NIX_TXSCH_LVL_TL1][0];
+ req->reg[0] = NIX_AF_TL2X_PARENT(schq);
+ req->regval[0] = parent << 16;
+
+ req->num_regs++;
+ req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq);
+ req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | DFLT_RR_QTM;
+
+ req->num_regs++;
+ req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
+ otx2_get_link(pfvf));
+ /* Enable this queue and backpressure */
+ req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
+
+ } else if (lvl == NIX_TXSCH_LVL_TL1) {
+ /* Default config for TL1.
+ * For VF this is always ignored.
+ */
+
+ /* Set DWRR quantum */
+ req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq);
+ req->regval[0] = TXSCH_TL1_DFLT_RR_QTM;
+
+ req->num_regs++;
+ req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq);
+ req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1);
+
+ req->num_regs++;
+ req->reg[2] = NIX_AF_TL1X_CIR(schq);
+ req->regval[2] = 0;
+ }
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+int otx2_txsch_alloc(struct otx2_nic *pfvf)
+{
+ struct nix_txsch_alloc_req *req;
+ int lvl;
+
+ /* Get memory to put this msg */
+ req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ /* Request one schq per level */
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
+ req->schq[lvl] = 1;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+int otx2_txschq_stop(struct otx2_nic *pfvf)
+{
+ struct nix_txsch_free_req *free_req;
+ int lvl, schq, err;
+
+ otx2_mbox_lock(&pfvf->mbox);
+ /* Free the transmit schedulers */
+ free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox);
+ if (!free_req) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return -ENOMEM;
+ }
+
+ free_req->flags = TXSCHQ_FREE_ALL;
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ otx2_mbox_unlock(&pfvf->mbox);
+
+ /* Clear the txschq list */
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ for (schq = 0; schq < MAX_TXSCHQ_PER_FUNC; schq++)
+ pfvf->hw.txschq_list[lvl][schq] = 0;
+ }
+ return err;
+}
+
+void otx2_sqb_flush(struct otx2_nic *pfvf)
+{
+ int qidx, sqe_tail, sqe_head;
+ u64 incr, *ptr, val;
+
+ ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
+ for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
+ incr = (u64)qidx << 32;
+ while (1) {
+ val = otx2_atomic64_add(incr, ptr);
+ sqe_head = (val >> 20) & 0x3F;
+ sqe_tail = (val >> 28) & 0x3F;
+ if (sqe_head == sqe_tail)
+ break;
+ usleep_range(1, 3);
+ }
+ }
+}
+
+/* RED and drop levels of CQ on packet reception.
+ * For CQ level is measure of emptiness ( 0x0 = full, 255 = empty).
+ */
+#define RQ_PASS_LVL_CQ(skid, qsize) ((((skid) + 16) * 256) / (qsize))
+#define RQ_DROP_LVL_CQ(skid, qsize) (((skid) * 256) / (qsize))
+
+/* RED and drop levels of AURA for packet reception.
+ * For AURA level is measure of fullness (0x0 = empty, 255 = full).
+ * Eg: For RQ length 1K, for pass/drop level 204/230.
+ * RED accepts pkts if free pointers > 102 & <= 205.
+ * Drops pkts if free pointers < 102.
+ */
+#define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */
+#define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */
+
+/* Send skid of 2000 packets required for CQ size of 4K CQEs. */
+#define SEND_CQ_SKID 2000
+
+static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
+{
+ struct otx2_qset *qset = &pfvf->qset;
+ struct nix_aq_enq_req *aq;
+
+ /* Get memory to put this msg */
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ aq->rq.cq = qidx;
+ aq->rq.ena = 1;
+ aq->rq.pb_caching = 1;
+ aq->rq.lpb_aura = lpb_aura; /* Use large packet buffer aura */
+ aq->rq.lpb_sizem1 = (DMA_BUFFER_LEN(pfvf->rbsize) / 8) - 1;
+ aq->rq.xqe_imm_size = 0; /* Copying of packet to CQE not needed */
+ aq->rq.flow_tagw = 32; /* Copy full 32bit flow_tag to CQE header */
+ aq->rq.qint_idx = 0;
+ aq->rq.lpb_drop_ena = 1; /* Enable RED dropping for AURA */
+ aq->rq.xqe_drop_ena = 1; /* Enable RED dropping for CQ/SSO */
+ aq->rq.xqe_pass = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
+ aq->rq.xqe_drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
+ aq->rq.lpb_aura_pass = RQ_PASS_LVL_AURA;
+ aq->rq.lpb_aura_drop = RQ_DROP_LVL_AURA;
+
+ /* Fill AQ info */
+ aq->qidx = qidx;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_INIT;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
+{
+ struct otx2_qset *qset = &pfvf->qset;
+ struct otx2_snd_queue *sq;
+ struct nix_aq_enq_req *aq;
+ struct otx2_pool *pool;
+ int err;
+
+ pool = &pfvf->qset.pool[sqb_aura];
+ sq = &qset->sq[qidx];
+ sq->sqe_size = NIX_SQESZ_W16 ? 64 : 128;
+ sq->sqe_cnt = qset->sqe_cnt;
+
+ err = qmem_alloc(pfvf->dev, &sq->sqe, 1, sq->sqe_size);
+ if (err)
+ return err;
+
+ err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt,
+ TSO_HEADER_SIZE);
+ if (err)
+ return err;
+
+ sq->sqe_base = sq->sqe->base;
+ sq->sg = kcalloc(qset->sqe_cnt, sizeof(struct sg_list), GFP_KERNEL);
+ if (!sq->sg)
+ return -ENOMEM;
+
+ sq->head = 0;
+ sq->sqe_per_sqb = (pfvf->hw.sqb_size / sq->sqe_size) - 1;
+ sq->num_sqbs = (qset->sqe_cnt + sq->sqe_per_sqb) / sq->sqe_per_sqb;
+ /* Set SQE threshold to 10% of total SQEs */
+ sq->sqe_thresh = ((sq->num_sqbs * sq->sqe_per_sqb) * 10) / 100;
+ sq->aura_id = sqb_aura;
+ sq->aura_fc_addr = pool->fc_addr->base;
+ sq->lmt_addr = (__force u64 *)(pfvf->reg_base + LMT_LF_LMTLINEX(qidx));
+ sq->io_addr = (__force u64)otx2_get_regaddr(pfvf, NIX_LF_OP_SENDX(0));
+
+ sq->stats.bytes = 0;
+ sq->stats.pkts = 0;
+
+ /* Get memory to put this msg */
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ aq->sq.cq = pfvf->hw.rx_queues + qidx;
+ aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
+ aq->sq.cq_ena = 1;
+ aq->sq.ena = 1;
+ /* Only one SMQ is allocated, map all SQ's to that SMQ */
+ aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
+ aq->sq.smq_rr_quantum = DFLT_RR_QTM;
+ aq->sq.default_chan = pfvf->hw.tx_chan_base;
+ aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
+ aq->sq.sqb_aura = sqb_aura;
+ aq->sq.sq_int_ena = NIX_SQINT_BITS;
+ aq->sq.qint_idx = 0;
+ /* Due pipelining impact minimum 2000 unused SQ CQE's
+ * need to maintain to avoid CQ overflow.
+ */
+ aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (sq->sqe_cnt));
+
+ /* Fill AQ info */
+ aq->qidx = qidx;
+ aq->ctype = NIX_AQ_CTYPE_SQ;
+ aq->op = NIX_AQ_INSTOP_INIT;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
+{
+ struct otx2_qset *qset = &pfvf->qset;
+ struct nix_aq_enq_req *aq;
+ struct otx2_cq_queue *cq;
+ int err, pool_id;
+
+ cq = &qset->cq[qidx];
+ cq->cq_idx = qidx;
+ if (qidx < pfvf->hw.rx_queues) {
+ cq->cq_type = CQ_RX;
+ cq->cint_idx = qidx;
+ cq->cqe_cnt = qset->rqe_cnt;
+ } else {
+ cq->cq_type = CQ_TX;
+ cq->cint_idx = qidx - pfvf->hw.rx_queues;
+ cq->cqe_cnt = qset->sqe_cnt;
+ }
+ cq->cqe_size = pfvf->qset.xqe_size;
+
+ /* Allocate memory for CQEs */
+ err = qmem_alloc(pfvf->dev, &cq->cqe, cq->cqe_cnt, cq->cqe_size);
+ if (err)
+ return err;
+
+ /* Save CQE CPU base for faster reference */
+ cq->cqe_base = cq->cqe->base;
+ /* In case where all RQs auras point to single pool,
+ * all CQs receive buffer pool also point to same pool.
+ */
+ pool_id = ((cq->cq_type == CQ_RX) &&
+ (pfvf->hw.rqpool_cnt != pfvf->hw.rx_queues)) ? 0 : qidx;
+ cq->rbpool = &qset->pool[pool_id];
+ cq->refill_task_sched = false;
+
+ /* Get memory to put this msg */
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ aq->cq.ena = 1;
+ aq->cq.qsize = Q_SIZE(cq->cqe_cnt, 4);
+ aq->cq.caching = 1;
+ aq->cq.base = cq->cqe->iova;
+ aq->cq.cint_idx = cq->cint_idx;
+ aq->cq.cq_err_int_ena = NIX_CQERRINT_BITS;
+ aq->cq.qint_idx = 0;
+ aq->cq.avg_level = 255;
+
+ if (qidx < pfvf->hw.rx_queues) {
+ aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt);
+ aq->cq.drop_ena = 1;
+ }
+
+ /* Fill AQ info */
+ aq->qidx = qidx;
+ aq->ctype = NIX_AQ_CTYPE_CQ;
+ aq->op = NIX_AQ_INSTOP_INIT;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+static void otx2_pool_refill_task(struct work_struct *work)
+{
+ struct otx2_cq_queue *cq;
+ struct otx2_pool *rbpool;
+ struct refill_work *wrk;
+ int qidx, free_ptrs = 0;
+ struct otx2_nic *pfvf;
+ s64 bufptr;
+
+ wrk = container_of(work, struct refill_work, pool_refill_work.work);
+ pfvf = wrk->pf;
+ qidx = wrk - pfvf->refill_wrk;
+ cq = &pfvf->qset.cq[qidx];
+ rbpool = cq->rbpool;
+ free_ptrs = cq->pool_ptrs;
+
+ while (cq->pool_ptrs) {
+ bufptr = otx2_alloc_rbuf(pfvf, rbpool, GFP_KERNEL);
+ if (bufptr <= 0) {
+ /* Schedule a WQ if we fails to free atleast half of the
+ * pointers else enable napi for this RQ.
+ */
+ if (!((free_ptrs - cq->pool_ptrs) > free_ptrs / 2)) {
+ struct delayed_work *dwork;
+
+ dwork = &wrk->pool_refill_work;
+ schedule_delayed_work(dwork,
+ msecs_to_jiffies(100));
+ } else {
+ cq->refill_task_sched = false;
+ }
+ return;
+ }
+ otx2_aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM);
+ cq->pool_ptrs--;
+ }
+ cq->refill_task_sched = false;
+}
+
+int otx2_config_nix_queues(struct otx2_nic *pfvf)
+{
+ int qidx, err;
+
+ /* Initialize RX queues */
+ for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) {
+ u16 lpb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx);
+
+ err = otx2_rq_init(pfvf, qidx, lpb_aura);
+ if (err)
+ return err;
+ }
+
+ /* Initialize TX queues */
+ for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
+ u16 sqb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
+
+ err = otx2_sq_init(pfvf, qidx, sqb_aura);
+ if (err)
+ return err;
+ }
+
+ /* Initialize completion queues */
+ for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) {
+ err = otx2_cq_init(pfvf, qidx);
+ if (err)
+ return err;
+ }
+
+ /* Initialize work queue for receive buffer refill */
+ pfvf->refill_wrk = devm_kcalloc(pfvf->dev, pfvf->qset.cq_cnt,
+ sizeof(struct refill_work), GFP_KERNEL);
+ if (!pfvf->refill_wrk)
+ return -ENOMEM;
+
+ for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) {
+ pfvf->refill_wrk[qidx].pf = pfvf;
+ INIT_DELAYED_WORK(&pfvf->refill_wrk[qidx].pool_refill_work,
+ otx2_pool_refill_task);
+ }
+ return 0;
+}
+
+int otx2_config_nix(struct otx2_nic *pfvf)
+{
+ struct nix_lf_alloc_req *nixlf;
+ struct nix_lf_alloc_rsp *rsp;
+ int err;
+
+ pfvf->qset.xqe_size = NIX_XQESZ_W16 ? 128 : 512;
+
+ /* Get memory to put this msg */
+ nixlf = otx2_mbox_alloc_msg_nix_lf_alloc(&pfvf->mbox);
+ if (!nixlf)
+ return -ENOMEM;
+
+ /* Set RQ/SQ/CQ counts */
+ nixlf->rq_cnt = pfvf->hw.rx_queues;
+ nixlf->sq_cnt = pfvf->hw.tx_queues;
+ nixlf->cq_cnt = pfvf->qset.cq_cnt;
+ nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE;
+ nixlf->rss_grps = 1; /* Single RSS indir table supported, for now */
+ nixlf->xqe_sz = NIX_XQESZ_W16;
+ /* We don't know absolute NPA LF idx attached.
+ * AF will replace 'RVU_DEFAULT_PF_FUNC' with
+ * NPA LF attached to this RVU PF/VF.
+ */
+ nixlf->npa_func = RVU_DEFAULT_PF_FUNC;
+ /* Disable alignment pad, enable L2 length check,
+ * enable L4 TCP/UDP checksum verification.
+ */
+ nixlf->rx_cfg = BIT_ULL(33) | BIT_ULL(35) | BIT_ULL(37);
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ return err;
+
+ rsp = (struct nix_lf_alloc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0,
+ &nixlf->hdr);
+ if (IS_ERR(rsp))
+ return PTR_ERR(rsp);
+
+ if (rsp->qints < 1)
+ return -ENXIO;
+
+ return rsp->hdr.rc;
+}
+
+void otx2_sq_free_sqbs(struct otx2_nic *pfvf)
+{
+ struct otx2_qset *qset = &pfvf->qset;
+ struct otx2_hw *hw = &pfvf->hw;
+ struct otx2_snd_queue *sq;
+ int sqb, qidx;
+ u64 iova, pa;
+
+ for (qidx = 0; qidx < hw->tx_queues; qidx++) {
+ sq = &qset->sq[qidx];
+ if (!sq->sqb_ptrs)
+ continue;
+ for (sqb = 0; sqb < sq->sqb_count; sqb++) {
+ if (!sq->sqb_ptrs[sqb])
+ continue;
+ iova = sq->sqb_ptrs[sqb];
+ pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
+ dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ put_page(virt_to_page(phys_to_virt(pa)));
+ }
+ sq->sqb_count = 0;
+ }
+}
+
+void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type)
+{
+ int pool_id, pool_start = 0, pool_end = 0, size = 0;
+ u64 iova, pa;
+
+ if (type == AURA_NIX_SQ) {
+ pool_start = otx2_get_pool_idx(pfvf, type, 0);
+ pool_end = pool_start + pfvf->hw.sqpool_cnt;
+ size = pfvf->hw.sqb_size;
+ }
+ if (type == AURA_NIX_RQ) {
+ pool_start = otx2_get_pool_idx(pfvf, type, 0);
+ pool_end = pfvf->hw.rqpool_cnt;
+ size = pfvf->rbsize;
+ }
+
+ /* Free SQB and RQB pointers from the aura pool */
+ for (pool_id = pool_start; pool_id < pool_end; pool_id++) {
+ iova = otx2_aura_allocptr(pfvf, pool_id);
+ while (iova) {
+ if (type == AURA_NIX_RQ)
+ iova -= OTX2_HEAD_ROOM;
+
+ pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
+ dma_unmap_page_attrs(pfvf->dev, iova, size,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ put_page(virt_to_page(phys_to_virt(pa)));
+ iova = otx2_aura_allocptr(pfvf, pool_id);
+ }
+ }
+}
+
+void otx2_aura_pool_free(struct otx2_nic *pfvf)
+{
+ struct otx2_pool *pool;
+ int pool_id;
+
+ if (!pfvf->qset.pool)
+ return;
+
+ for (pool_id = 0; pool_id < pfvf->hw.pool_cnt; pool_id++) {
+ pool = &pfvf->qset.pool[pool_id];
+ qmem_free(pfvf->dev, pool->stack);
+ qmem_free(pfvf->dev, pool->fc_addr);
+ }
+ devm_kfree(pfvf->dev, pfvf->qset.pool);
+}
+
+static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
+ int pool_id, int numptrs)
+{
+ struct npa_aq_enq_req *aq;
+ struct otx2_pool *pool;
+ int err;
+
+ pool = &pfvf->qset.pool[pool_id];
+
+ /* Allocate memory for HW to update Aura count.
+ * Alloc one cache line, so that it fits all FC_STYPE modes.
+ */
+ if (!pool->fc_addr) {
+ err = qmem_alloc(pfvf->dev, &pool->fc_addr, 1, OTX2_ALIGN);
+ if (err)
+ return err;
+ }
+
+ /* Initialize this aura's context via AF */
+ aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!aq) {
+ /* Shared mbox memory buffer is full, flush it and retry */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ return err;
+ aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+ }
+
+ aq->aura_id = aura_id;
+ /* Will be filled by AF with correct pool context address */
+ aq->aura.pool_addr = pool_id;
+ aq->aura.pool_caching = 1;
+ aq->aura.shift = ilog2(numptrs) - 8;
+ aq->aura.count = numptrs;
+ aq->aura.limit = numptrs;
+ aq->aura.avg_level = 255;
+ aq->aura.ena = 1;
+ aq->aura.fc_ena = 1;
+ aq->aura.fc_addr = pool->fc_addr->iova;
+ aq->aura.fc_hyst_bits = 0; /* Store count on all updates */
+
+ /* Fill AQ info */
+ aq->ctype = NPA_AQ_CTYPE_AURA;
+ aq->op = NPA_AQ_INSTOP_INIT;
+
+ return 0;
+}
+
+static int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
+ int stack_pages, int numptrs, int buf_size)
+{
+ struct npa_aq_enq_req *aq;
+ struct otx2_pool *pool;
+ int err;
+
+ pool = &pfvf->qset.pool[pool_id];
+ /* Alloc memory for stack which is used to store buffer pointers */
+ err = qmem_alloc(pfvf->dev, &pool->stack,
+ stack_pages, pfvf->hw.stack_pg_bytes);
+ if (err)
+ return err;
+
+ pool->rbsize = buf_size;
+ pool->rbpage_order = get_order(buf_size);
+
+ /* Initialize this pool's context via AF */
+ aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!aq) {
+ /* Shared mbox memory buffer is full, flush it and retry */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ qmem_free(pfvf->dev, pool->stack);
+ return err;
+ }
+ aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!aq) {
+ qmem_free(pfvf->dev, pool->stack);
+ return -ENOMEM;
+ }
+ }
+
+ aq->aura_id = pool_id;
+ aq->pool.stack_base = pool->stack->iova;
+ aq->pool.stack_caching = 1;
+ aq->pool.ena = 1;
+ aq->pool.buf_size = buf_size / 128;
+ aq->pool.stack_max_pages = stack_pages;
+ aq->pool.shift = ilog2(numptrs) - 8;
+ aq->pool.ptr_start = 0;
+ aq->pool.ptr_end = ~0ULL;
+
+ /* Fill AQ info */
+ aq->ctype = NPA_AQ_CTYPE_POOL;
+ aq->op = NPA_AQ_INSTOP_INIT;
+
+ return 0;
+}
+
+int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
+{
+ int qidx, pool_id, stack_pages, num_sqbs;
+ struct otx2_qset *qset = &pfvf->qset;
+ struct otx2_hw *hw = &pfvf->hw;
+ struct otx2_snd_queue *sq;
+ struct otx2_pool *pool;
+ int err, ptr;
+ s64 bufptr;
+
+ /* Calculate number of SQBs needed.
+ *
+ * For a 128byte SQE, and 4K size SQB, 31 SQEs will fit in one SQB.
+ * Last SQE is used for pointing to next SQB.
+ */
+ num_sqbs = (hw->sqb_size / 128) - 1;
+ num_sqbs = (qset->sqe_cnt + num_sqbs) / num_sqbs;
+
+ /* Get no of stack pages needed */
+ stack_pages =
+ (num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
+
+ for (qidx = 0; qidx < hw->tx_queues; qidx++) {
+ pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
+ /* Initialize aura context */
+ err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs);
+ if (err)
+ goto fail;
+
+ /* Initialize pool context */
+ err = otx2_pool_init(pfvf, pool_id, stack_pages,
+ num_sqbs, hw->sqb_size);
+ if (err)
+ goto fail;
+ }
+
+ /* Flush accumulated messages */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ goto fail;
+
+ /* Allocate pointers and free them to aura/pool */
+ for (qidx = 0; qidx < hw->tx_queues; qidx++) {
+ pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
+ pool = &pfvf->qset.pool[pool_id];
+
+ sq = &qset->sq[qidx];
+ sq->sqb_count = 0;
+ sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(u64 *), GFP_KERNEL);
+ if (!sq->sqb_ptrs)
+ return -ENOMEM;
+
+ for (ptr = 0; ptr < num_sqbs; ptr++) {
+ bufptr = otx2_alloc_rbuf(pfvf, pool, GFP_KERNEL);
+ if (bufptr <= 0)
+ return bufptr;
+ otx2_aura_freeptr(pfvf, pool_id, bufptr);
+ sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
+ }
+ otx2_get_page(pool);
+ }
+
+ return 0;
+fail:
+ otx2_mbox_reset(&pfvf->mbox.mbox, 0);
+ otx2_aura_pool_free(pfvf);
+ return err;
+}
+
+int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+ int stack_pages, pool_id, rq;
+ struct otx2_pool *pool;
+ int err, ptr, num_ptrs;
+ s64 bufptr;
+
+ num_ptrs = pfvf->qset.rqe_cnt;
+
+ stack_pages =
+ (num_ptrs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
+
+ for (rq = 0; rq < hw->rx_queues; rq++) {
+ pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, rq);
+ /* Initialize aura context */
+ err = otx2_aura_init(pfvf, pool_id, pool_id, num_ptrs);
+ if (err)
+ goto fail;
+ }
+ for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
+ err = otx2_pool_init(pfvf, pool_id, stack_pages,
+ num_ptrs, pfvf->rbsize);
+ if (err)
+ goto fail;
+ }
+
+ /* Flush accumulated messages */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ goto fail;
+
+ /* Allocate pointers and free them to aura/pool */
+ for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
+ pool = &pfvf->qset.pool[pool_id];
+ for (ptr = 0; ptr < num_ptrs; ptr++) {
+ bufptr = otx2_alloc_rbuf(pfvf, pool, GFP_KERNEL);
+ if (bufptr <= 0)
+ return bufptr;
+ otx2_aura_freeptr(pfvf, pool_id,
+ bufptr + OTX2_HEAD_ROOM);
+ }
+ otx2_get_page(pool);
+ }
+
+ return 0;
+fail:
+ otx2_mbox_reset(&pfvf->mbox.mbox, 0);
+ otx2_aura_pool_free(pfvf);
+ return err;
+}
+
+int otx2_config_npa(struct otx2_nic *pfvf)
+{
+ struct otx2_qset *qset = &pfvf->qset;
+ struct npa_lf_alloc_req *npalf;
+ struct otx2_hw *hw = &pfvf->hw;
+ int aura_cnt;
+
+ /* Pool - Stack of free buffer pointers
+ * Aura - Alloc/frees pointers from/to pool for NIX DMA.
+ */
+
+ if (!hw->pool_cnt)
+ return -EINVAL;
+
+ qset->pool = devm_kzalloc(pfvf->dev, sizeof(struct otx2_pool) *
+ hw->pool_cnt, GFP_KERNEL);
+ if (!qset->pool)
+ return -ENOMEM;
+
+ /* Get memory to put this msg */
+ npalf = otx2_mbox_alloc_msg_npa_lf_alloc(&pfvf->mbox);
+ if (!npalf)
+ return -ENOMEM;
+
+ /* Set aura and pool counts */
+ npalf->nr_pools = hw->pool_cnt;
+ aura_cnt = ilog2(roundup_pow_of_two(hw->pool_cnt));
+ npalf->aura_sz = (aura_cnt >= ilog2(128)) ? (aura_cnt - 6) : 1;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+int otx2_detach_resources(struct mbox *mbox)
+{
+ struct rsrc_detach *detach;
+
+ otx2_mbox_lock(mbox);
+ detach = otx2_mbox_alloc_msg_detach_resources(mbox);
+ if (!detach) {
+ otx2_mbox_unlock(mbox);
+ return -ENOMEM;
+ }
+
+ /* detach all */
+ detach->partial = false;
+
+ /* Send detach request to AF */
+ otx2_mbox_msg_send(&mbox->mbox, 0);
+ otx2_mbox_unlock(mbox);
+ return 0;
+}
+
+int otx2_attach_npa_nix(struct otx2_nic *pfvf)
+{
+ struct rsrc_attach *attach;
+ struct msg_req *msix;
+ int err;
+
+ otx2_mbox_lock(&pfvf->mbox);
+ /* Get memory to put this msg */
+ attach = otx2_mbox_alloc_msg_attach_resources(&pfvf->mbox);
+ if (!attach) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return -ENOMEM;
+ }
+
+ attach->npalf = true;
+ attach->nixlf = true;
+
+ /* Send attach request to AF */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return err;
+ }
+
+ pfvf->nix_blkaddr = BLKADDR_NIX0;
+
+ /* If the platform has two NIX blocks then LF may be
+ * allocated from NIX1.
+ */
+ if (otx2_read64(pfvf, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_NIX1)) & 0x1FFULL)
+ pfvf->nix_blkaddr = BLKADDR_NIX1;
+
+ /* Get NPA and NIX MSIX vector offsets */
+ msix = otx2_mbox_alloc_msg_msix_offset(&pfvf->mbox);
+ if (!msix) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return err;
+ }
+ otx2_mbox_unlock(&pfvf->mbox);
+
+ if (pfvf->hw.npa_msixoff == MSIX_VECTOR_INVALID ||
+ pfvf->hw.nix_msixoff == MSIX_VECTOR_INVALID) {
+ dev_err(pfvf->dev,
+ "RVUPF: Invalid MSIX vector offset for NPA/NIX\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void otx2_ctx_disable(struct mbox *mbox, int type, bool npa)
+{
+ struct hwctx_disable_req *req;
+
+ otx2_mbox_lock(mbox);
+ /* Request AQ to disable this context */
+ if (npa)
+ req = otx2_mbox_alloc_msg_npa_hwctx_disable(mbox);
+ else
+ req = otx2_mbox_alloc_msg_nix_hwctx_disable(mbox);
+
+ if (!req) {
+ otx2_mbox_unlock(mbox);
+ return;
+ }
+
+ req->ctype = type;
+
+ if (otx2_sync_mbox_msg(mbox))
+ dev_err(mbox->pfvf->dev, "%s failed to disable context\n",
+ __func__);
+
+ otx2_mbox_unlock(mbox);
+}
+
+/* Mbox message handlers */
+void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
+ struct cgx_stats_rsp *rsp)
+{
+ int id;
+
+ for (id = 0; id < CGX_RX_STATS_COUNT; id++)
+ pfvf->hw.cgx_rx_stats[id] = rsp->rx_stats[id];
+ for (id = 0; id < CGX_TX_STATS_COUNT; id++)
+ pfvf->hw.cgx_tx_stats[id] = rsp->tx_stats[id];
+}
+
+void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
+ struct nix_txsch_alloc_rsp *rsp)
+{
+ int lvl, schq;
+
+ /* Setup transmit scheduler list */
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
+ for (schq = 0; schq < rsp->schq[lvl]; schq++)
+ pf->hw.txschq_list[lvl][schq] =
+ rsp->schq_list[lvl][schq];
+}
+
+void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf,
+ struct npa_lf_alloc_rsp *rsp)
+{
+ pfvf->hw.stack_pg_ptrs = rsp->stack_pg_ptrs;
+ pfvf->hw.stack_pg_bytes = rsp->stack_pg_bytes;
+}
+
+void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
+ struct nix_lf_alloc_rsp *rsp)
+{
+ pfvf->hw.sqb_size = rsp->sqb_size;
+ pfvf->hw.rx_chan_base = rsp->rx_chan_base;
+ pfvf->hw.tx_chan_base = rsp->tx_chan_base;
+ pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx;
+ pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx;
+}
+
+void mbox_handler_msix_offset(struct otx2_nic *pfvf,
+ struct msix_offset_rsp *rsp)
+{
+ pfvf->hw.npa_msixoff = rsp->npa_msixoff;
+ pfvf->hw.nix_msixoff = rsp->nix_msixoff;
+}
+
+void otx2_free_cints(struct otx2_nic *pfvf, int n)
+{
+ struct otx2_qset *qset = &pfvf->qset;
+ struct otx2_hw *hw = &pfvf->hw;
+ int irq, qidx;
+
+ for (qidx = 0, irq = hw->nix_msixoff + NIX_LF_CINT_VEC_START;
+ qidx < n;
+ qidx++, irq++) {
+ int vector = pci_irq_vector(pfvf->pdev, irq);
+
+ irq_set_affinity_hint(vector, NULL);
+ free_cpumask_var(hw->affinity_mask[irq]);
+ free_irq(vector, &qset->napi[qidx]);
+ }
+}
+
+void otx2_set_cints_affinity(struct otx2_nic *pfvf)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+ int vec, cpu, irq, cint;
+
+ vec = hw->nix_msixoff + NIX_LF_CINT_VEC_START;
+ cpu = cpumask_first(cpu_online_mask);
+
+ /* CQ interrupts */
+ for (cint = 0; cint < pfvf->hw.cint_cnt; cint++, vec++) {
+ if (!alloc_cpumask_var(&hw->affinity_mask[vec], GFP_KERNEL))
+ return;
+
+ cpumask_set_cpu(cpu, hw->affinity_mask[vec]);
+
+ irq = pci_irq_vector(pfvf->pdev, vec);
+ irq_set_affinity_hint(irq, hw->affinity_mask[vec]);
+
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ if (unlikely(cpu >= nr_cpu_ids))
+ cpu = 0;
+ }
+}
+
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+int __weak \
+otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
+ struct _req_type *req, \
+ struct _rsp_type *rsp) \
+{ \
+ /* Nothing to do here */ \
+ return 0; \
+} \
+EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name);
+MBOX_UP_CGX_MESSAGES
+#undef M
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
new file mode 100644
index 000000000000..320f3b7bf57f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -0,0 +1,615 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef OTX2_COMMON_H
+#define OTX2_COMMON_H
+
+#include <linux/pci.h>
+#include <linux/iommu.h>
+
+#include <mbox.h>
+#include "otx2_reg.h"
+#include "otx2_txrx.h"
+
+/* PCI device IDs */
+#define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063
+
+#define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200
+
+/* PCI BAR nos */
+#define PCI_CFG_REG_BAR_NUM 2
+#define PCI_MBOX_BAR_NUM 4
+
+#define NAME_SIZE 32
+
+enum arua_mapped_qtypes {
+ AURA_NIX_RQ,
+ AURA_NIX_SQ,
+};
+
+/* NIX LF interrupts range*/
+#define NIX_LF_QINT_VEC_START 0x00
+#define NIX_LF_CINT_VEC_START 0x40
+#define NIX_LF_GINT_VEC 0x80
+#define NIX_LF_ERR_VEC 0x81
+#define NIX_LF_POISON_VEC 0x82
+
+/* RSS configuration */
+struct otx2_rss_info {
+ u8 enable;
+ u32 flowkey_cfg;
+ u16 rss_size;
+ u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE];
+#define RSS_HASH_KEY_SIZE 44 /* 352 bit key */
+ u8 key[RSS_HASH_KEY_SIZE];
+};
+
+/* NIX (or NPC) RX errors */
+enum otx2_errlvl {
+ NPC_ERRLVL_RE,
+ NPC_ERRLVL_LID_LA,
+ NPC_ERRLVL_LID_LB,
+ NPC_ERRLVL_LID_LC,
+ NPC_ERRLVL_LID_LD,
+ NPC_ERRLVL_LID_LE,
+ NPC_ERRLVL_LID_LF,
+ NPC_ERRLVL_LID_LG,
+ NPC_ERRLVL_LID_LH,
+ NPC_ERRLVL_NIX = 0x0F,
+};
+
+enum otx2_errcodes_re {
+ /* NPC_ERRLVL_RE errcodes */
+ ERRCODE_FCS = 0x7,
+ ERRCODE_FCS_RCV = 0x8,
+ ERRCODE_UNDERSIZE = 0x10,
+ ERRCODE_OVERSIZE = 0x11,
+ ERRCODE_OL2_LEN_MISMATCH = 0x12,
+ /* NPC_ERRLVL_NIX errcodes */
+ ERRCODE_OL3_LEN = 0x10,
+ ERRCODE_OL4_LEN = 0x11,
+ ERRCODE_OL4_CSUM = 0x12,
+ ERRCODE_IL3_LEN = 0x20,
+ ERRCODE_IL4_LEN = 0x21,
+ ERRCODE_IL4_CSUM = 0x22,
+};
+
+/* NIX TX stats */
+enum nix_stat_lf_tx {
+ TX_UCAST = 0x0,
+ TX_BCAST = 0x1,
+ TX_MCAST = 0x2,
+ TX_DROP = 0x3,
+ TX_OCTS = 0x4,
+ TX_STATS_ENUM_LAST,
+};
+
+/* NIX RX stats */
+enum nix_stat_lf_rx {
+ RX_OCTS = 0x0,
+ RX_UCAST = 0x1,
+ RX_BCAST = 0x2,
+ RX_MCAST = 0x3,
+ RX_DROP = 0x4,
+ RX_DROP_OCTS = 0x5,
+ RX_FCS = 0x6,
+ RX_ERR = 0x7,
+ RX_DRP_BCAST = 0x8,
+ RX_DRP_MCAST = 0x9,
+ RX_DRP_L3BCAST = 0xa,
+ RX_DRP_L3MCAST = 0xb,
+ RX_STATS_ENUM_LAST,
+};
+
+struct otx2_dev_stats {
+ u64 rx_bytes;
+ u64 rx_frames;
+ u64 rx_ucast_frames;
+ u64 rx_bcast_frames;
+ u64 rx_mcast_frames;
+ u64 rx_drops;
+
+ u64 tx_bytes;
+ u64 tx_frames;
+ u64 tx_ucast_frames;
+ u64 tx_bcast_frames;
+ u64 tx_mcast_frames;
+ u64 tx_drops;
+};
+
+/* Driver counted stats */
+struct otx2_drv_stats {
+ atomic_t rx_fcs_errs;
+ atomic_t rx_oversize_errs;
+ atomic_t rx_undersize_errs;
+ atomic_t rx_csum_errs;
+ atomic_t rx_len_errs;
+ atomic_t rx_other_errs;
+};
+
+struct mbox {
+ struct otx2_mbox mbox;
+ struct work_struct mbox_wrk;
+ struct otx2_mbox mbox_up;
+ struct work_struct mbox_up_wrk;
+ struct otx2_nic *pfvf;
+ void *bbuf_base; /* Bounce buffer for mbox memory */
+ struct mutex lock; /* serialize mailbox access */
+ int num_msgs; /* mbox number of messages */
+ int up_num_msgs; /* mbox_up number of messages */
+};
+
+struct otx2_hw {
+ struct pci_dev *pdev;
+ struct otx2_rss_info rss_info;
+ u16 rx_queues;
+ u16 tx_queues;
+ u16 max_queues;
+ u16 pool_cnt;
+ u16 rqpool_cnt;
+ u16 sqpool_cnt;
+
+ /* NPA */
+ u32 stack_pg_ptrs; /* No of ptrs per stack page */
+ u32 stack_pg_bytes; /* Size of stack page */
+ u16 sqb_size;
+
+ /* NIX */
+ u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+
+ /* HW settings, coalescing etc */
+ u16 rx_chan_base;
+ u16 tx_chan_base;
+ u16 cq_qcount_wait;
+ u16 cq_ecount_wait;
+ u16 rq_skid;
+ u8 cq_time_wait;
+
+ /* For TSO segmentation */
+ u8 lso_tsov4_idx;
+ u8 lso_tsov6_idx;
+ u8 hw_tso;
+
+ /* MSI-X */
+ u8 cint_cnt; /* CQ interrupt count */
+ u16 npa_msixoff; /* Offset of NPA vectors */
+ u16 nix_msixoff; /* Offset of NIX vectors */
+ char *irq_name;
+ cpumask_var_t *affinity_mask;
+
+ /* Stats */
+ struct otx2_dev_stats dev_stats;
+ struct otx2_drv_stats drv_stats;
+ u64 cgx_rx_stats[CGX_RX_STATS_COUNT];
+ u64 cgx_tx_stats[CGX_TX_STATS_COUNT];
+};
+
+struct refill_work {
+ struct delayed_work pool_refill_work;
+ struct otx2_nic *pf;
+};
+
+struct otx2_nic {
+ void __iomem *reg_base;
+ struct net_device *netdev;
+ void *iommu_domain;
+ u16 max_frs;
+ u16 rbsize; /* Receive buffer size */
+
+#define OTX2_FLAG_INTF_DOWN BIT_ULL(2)
+ u64 flags;
+
+ struct otx2_qset qset;
+ struct otx2_hw hw;
+ struct pci_dev *pdev;
+ struct device *dev;
+
+ /* Mbox */
+ struct mbox mbox;
+ struct workqueue_struct *mbox_wq;
+
+ u16 pcifunc; /* RVU PF_FUNC */
+ struct cgx_link_user_info linfo;
+
+ u64 reset_count;
+ struct work_struct reset_task;
+ struct refill_work *refill_wrk;
+
+ /* Ethtool stuff */
+ u32 msg_enable;
+
+ /* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */
+ int nix_blkaddr;
+};
+
+static inline bool is_96xx_A0(struct pci_dev *pdev)
+{
+ return (pdev->revision == 0x00) &&
+ (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF);
+}
+
+static inline bool is_96xx_B0(struct pci_dev *pdev)
+{
+ return (pdev->revision == 0x01) &&
+ (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF);
+}
+
+static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+
+ pfvf->hw.cq_time_wait = CQ_TIMER_THRESH_DEFAULT;
+ pfvf->hw.cq_ecount_wait = CQ_CQE_THRESH_DEFAULT;
+ pfvf->hw.cq_qcount_wait = CQ_QCOUNT_DEFAULT;
+
+ hw->hw_tso = true;
+
+ if (is_96xx_A0(pfvf->pdev)) {
+ hw->hw_tso = false;
+
+ /* Time based irq coalescing is not supported */
+ pfvf->hw.cq_qcount_wait = 0x0;
+
+ /* Due to HW issue previous silicons required minimum
+ * 600 unused CQE to avoid CQ overflow.
+ */
+ pfvf->hw.rq_skid = 600;
+ pfvf->qset.rqe_cnt = Q_COUNT(Q_SIZE_1K);
+ }
+}
+
+/* Register read/write APIs */
+static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset)
+{
+ u64 blkaddr;
+
+ switch ((offset >> RVU_FUNC_BLKADDR_SHIFT) & RVU_FUNC_BLKADDR_MASK) {
+ case BLKTYPE_NIX:
+ blkaddr = nic->nix_blkaddr;
+ break;
+ case BLKTYPE_NPA:
+ blkaddr = BLKADDR_NPA;
+ break;
+ default:
+ blkaddr = BLKADDR_RVUM;
+ break;
+ };
+
+ offset &= ~(RVU_FUNC_BLKADDR_MASK << RVU_FUNC_BLKADDR_SHIFT);
+ offset |= (blkaddr << RVU_FUNC_BLKADDR_SHIFT);
+
+ return nic->reg_base + offset;
+}
+
+static inline void otx2_write64(struct otx2_nic *nic, u64 offset, u64 val)
+{
+ void __iomem *addr = otx2_get_regaddr(nic, offset);
+
+ writeq(val, addr);
+}
+
+static inline u64 otx2_read64(struct otx2_nic *nic, u64 offset)
+{
+ void __iomem *addr = otx2_get_regaddr(nic, offset);
+
+ return readq(addr);
+}
+
+/* Mbox bounce buffer APIs */
+static inline int otx2_mbox_bbuf_init(struct mbox *mbox, struct pci_dev *pdev)
+{
+ struct otx2_mbox *otx2_mbox;
+ struct otx2_mbox_dev *mdev;
+
+ mbox->bbuf_base = devm_kmalloc(&pdev->dev, MBOX_SIZE, GFP_KERNEL);
+ if (!mbox->bbuf_base)
+ return -ENOMEM;
+
+ /* Overwrite mbox mbase to point to bounce buffer, so that PF/VF
+ * prepare all mbox messages in bounce buffer instead of directly
+ * in hw mbox memory.
+ */
+ otx2_mbox = &mbox->mbox;
+ mdev = &otx2_mbox->dev[0];
+ mdev->mbase = mbox->bbuf_base;
+
+ otx2_mbox = &mbox->mbox_up;
+ mdev = &otx2_mbox->dev[0];
+ mdev->mbase = mbox->bbuf_base;
+ return 0;
+}
+
+static inline void otx2_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid)
+{
+ u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+ void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_hdr *hdr;
+ u64 msg_size;
+
+ if (mdev->mbase == hw_mbase)
+ return;
+
+ hdr = hw_mbase + mbox->rx_start;
+ msg_size = hdr->msg_size;
+
+ if (msg_size > mbox->rx_size - msgs_offset)
+ msg_size = mbox->rx_size - msgs_offset;
+
+ /* Copy mbox messages from mbox memory to bounce buffer */
+ memcpy(mdev->mbase + mbox->rx_start,
+ hw_mbase + mbox->rx_start, msg_size + msgs_offset);
+}
+
+static inline void otx2_mbox_lock_init(struct mbox *mbox)
+{
+ mutex_init(&mbox->lock);
+}
+
+static inline void otx2_mbox_lock(struct mbox *mbox)
+{
+ mutex_lock(&mbox->lock);
+}
+
+static inline void otx2_mbox_unlock(struct mbox *mbox)
+{
+ mutex_unlock(&mbox->lock);
+}
+
+/* With the absence of API for 128-bit IO memory access for arm64,
+ * implement required operations at place.
+ */
+#if defined(CONFIG_ARM64)
+static inline void otx2_write128(u64 lo, u64 hi, void __iomem *addr)
+{
+ __asm__ volatile("stp %x[x0], %x[x1], [%x[p1],#0]!"
+ ::[x0]"r"(lo), [x1]"r"(hi), [p1]"r"(addr));
+}
+
+static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr)
+{
+ u64 result;
+
+ __asm__ volatile(".cpu generic+lse\n"
+ "ldadd %x[i], %x[r], [%[b]]"
+ : [r]"=r"(result), "+m"(*ptr)
+ : [i]"r"(incr), [b]"r"(ptr)
+ : "memory");
+ return result;
+}
+
+static inline u64 otx2_lmt_flush(uint64_t addr)
+{
+ u64 result = 0;
+
+ __asm__ volatile(".cpu generic+lse\n"
+ "ldeor xzr,%x[rf],[%[rs]]"
+ : [rf]"=r"(result)
+ : [rs]"r"(addr));
+ return result;
+}
+
+#else
+#define otx2_write128(lo, hi, addr)
+#define otx2_atomic64_add(incr, ptr) ({ *ptr += incr; })
+#define otx2_lmt_flush(addr) ({ 0; })
+#endif
+
+/* Alloc pointer from pool/aura */
+static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura)
+{
+ u64 *ptr = (u64 *)otx2_get_regaddr(pfvf,
+ NPA_LF_AURA_OP_ALLOCX(0));
+ u64 incr = (u64)aura | BIT_ULL(63);
+
+ return otx2_atomic64_add(incr, ptr);
+}
+
+/* Free pointer to a pool/aura */
+static inline void otx2_aura_freeptr(struct otx2_nic *pfvf,
+ int aura, s64 buf)
+{
+ otx2_write128((u64)buf, (u64)aura | BIT_ULL(63),
+ otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0));
+}
+
+/* Update page ref count */
+static inline void otx2_get_page(struct otx2_pool *pool)
+{
+ if (!pool->page)
+ return;
+
+ if (pool->pageref)
+ page_ref_add(pool->page, pool->pageref);
+ pool->pageref = 0;
+ pool->page = NULL;
+}
+
+static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx)
+{
+ if (type == AURA_NIX_SQ)
+ return pfvf->hw.rqpool_cnt + idx;
+
+ /* AURA_NIX_RQ */
+ return idx;
+}
+
+/* Mbox APIs */
+static inline int otx2_sync_mbox_msg(struct mbox *mbox)
+{
+ int err;
+
+ if (!otx2_mbox_nonempty(&mbox->mbox, 0))
+ return 0;
+ otx2_mbox_msg_send(&mbox->mbox, 0);
+ err = otx2_mbox_wait_for_rsp(&mbox->mbox, 0);
+ if (err)
+ return err;
+
+ return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0);
+}
+
+static inline int otx2_sync_mbox_up_msg(struct mbox *mbox, int devid)
+{
+ int err;
+
+ if (!otx2_mbox_nonempty(&mbox->mbox_up, devid))
+ return 0;
+ otx2_mbox_msg_send(&mbox->mbox_up, devid);
+ err = otx2_mbox_wait_for_rsp(&mbox->mbox_up, devid);
+ if (err)
+ return err;
+
+ return otx2_mbox_check_rsp_msgs(&mbox->mbox_up, devid);
+}
+
+/* Use this API to send mbox msgs in atomic context
+ * where sleeping is not allowed
+ */
+static inline int otx2_sync_mbox_msg_busy_poll(struct mbox *mbox)
+{
+ int err;
+
+ if (!otx2_mbox_nonempty(&mbox->mbox, 0))
+ return 0;
+ otx2_mbox_msg_send(&mbox->mbox, 0);
+ err = otx2_mbox_busy_poll_for_rsp(&mbox->mbox, 0);
+ if (err)
+ return err;
+
+ return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0);
+}
+
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+static struct _req_type __maybe_unused \
+*otx2_mbox_alloc_msg_ ## _fn_name(struct mbox *mbox) \
+{ \
+ struct _req_type *req; \
+ \
+ req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
+ &mbox->mbox, 0, sizeof(struct _req_type), \
+ sizeof(struct _rsp_type)); \
+ if (!req) \
+ return NULL; \
+ req->hdr.sig = OTX2_MBOX_REQ_SIG; \
+ req->hdr.id = _id; \
+ return req; \
+}
+
+MBOX_MESSAGES
+#undef M
+
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+int \
+otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
+ struct _req_type *req, \
+ struct _rsp_type *rsp); \
+
+MBOX_UP_CGX_MESSAGES
+#undef M
+
+/* Time to wait before watchdog kicks off */
+#define OTX2_TX_TIMEOUT (100 * HZ)
+
+#define RVU_PFVF_PF_SHIFT 10
+#define RVU_PFVF_PF_MASK 0x3F
+#define RVU_PFVF_FUNC_SHIFT 0
+#define RVU_PFVF_FUNC_MASK 0x3FF
+
+static inline int rvu_get_pf(u16 pcifunc)
+{
+ return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+}
+
+static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf,
+ struct page *page,
+ size_t offset, size_t size,
+ enum dma_data_direction dir)
+{
+ dma_addr_t iova;
+
+ iova = dma_map_page_attrs(pfvf->dev, page,
+ offset, size, dir, DMA_ATTR_SKIP_CPU_SYNC);
+ if (unlikely(dma_mapping_error(pfvf->dev, iova)))
+ return (dma_addr_t)NULL;
+ return iova;
+}
+
+static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf,
+ dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ dma_unmap_page_attrs(pfvf->dev, addr, size,
+ dir, DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+/* MSI-X APIs */
+void otx2_free_cints(struct otx2_nic *pfvf, int n);
+void otx2_set_cints_affinity(struct otx2_nic *pfvf);
+int otx2_set_mac_address(struct net_device *netdev, void *p);
+int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu);
+void otx2_tx_timeout(struct net_device *netdev, unsigned int txq);
+void otx2_get_mac_from_af(struct net_device *netdev);
+void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx);
+
+/* RVU block related APIs */
+int otx2_attach_npa_nix(struct otx2_nic *pfvf);
+int otx2_detach_resources(struct mbox *mbox);
+int otx2_config_npa(struct otx2_nic *pfvf);
+int otx2_sq_aura_pool_init(struct otx2_nic *pfvf);
+int otx2_rq_aura_pool_init(struct otx2_nic *pfvf);
+void otx2_aura_pool_free(struct otx2_nic *pfvf);
+void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type);
+void otx2_sq_free_sqbs(struct otx2_nic *pfvf);
+int otx2_config_nix(struct otx2_nic *pfvf);
+int otx2_config_nix_queues(struct otx2_nic *pfvf);
+int otx2_txschq_config(struct otx2_nic *pfvf, int lvl);
+int otx2_txsch_alloc(struct otx2_nic *pfvf);
+int otx2_txschq_stop(struct otx2_nic *pfvf);
+void otx2_sqb_flush(struct otx2_nic *pfvf);
+dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ gfp_t gfp);
+int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
+void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
+void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
+void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
+
+/* RSS configuration APIs*/
+int otx2_rss_init(struct otx2_nic *pfvf);
+int otx2_set_flowkey_cfg(struct otx2_nic *pfvf);
+void otx2_set_rss_key(struct otx2_nic *pfvf);
+int otx2_set_rss_table(struct otx2_nic *pfvf);
+
+/* Mbox handlers */
+void mbox_handler_msix_offset(struct otx2_nic *pfvf,
+ struct msix_offset_rsp *rsp);
+void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf,
+ struct npa_lf_alloc_rsp *rsp);
+void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
+ struct nix_lf_alloc_rsp *rsp);
+void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
+ struct nix_txsch_alloc_rsp *rsp);
+void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
+ struct cgx_stats_rsp *rsp);
+
+/* Device stats APIs */
+void otx2_get_dev_stats(struct otx2_nic *pfvf);
+void otx2_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats);
+void otx2_update_lmac_stats(struct otx2_nic *pfvf);
+int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx);
+int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx);
+void otx2_set_ethtool_ops(struct net_device *netdev);
+
+int otx2_open(struct net_device *netdev);
+int otx2_stop(struct net_device *netdev);
+int otx2_set_real_num_queues(struct net_device *netdev,
+ int tx_queues, int rx_queues);
+#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
new file mode 100644
index 000000000000..60fcf82dd8cb
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -0,0 +1,662 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/pci.h>
+#include <linux/ethtool.h>
+#include <linux/stddef.h>
+#include <linux/etherdevice.h>
+#include <linux/log2.h>
+
+#include "otx2_common.h"
+
+#define DRV_NAME "octeontx2-nicpf"
+
+struct otx2_stat {
+ char name[ETH_GSTRING_LEN];
+ unsigned int index;
+};
+
+/* HW device stats */
+#define OTX2_DEV_STAT(stat) { \
+ .name = #stat, \
+ .index = offsetof(struct otx2_dev_stats, stat) / sizeof(u64), \
+}
+
+static const struct otx2_stat otx2_dev_stats[] = {
+ OTX2_DEV_STAT(rx_ucast_frames),
+ OTX2_DEV_STAT(rx_bcast_frames),
+ OTX2_DEV_STAT(rx_mcast_frames),
+
+ OTX2_DEV_STAT(tx_ucast_frames),
+ OTX2_DEV_STAT(tx_bcast_frames),
+ OTX2_DEV_STAT(tx_mcast_frames),
+};
+
+/* Driver level stats */
+#define OTX2_DRV_STAT(stat) { \
+ .name = #stat, \
+ .index = offsetof(struct otx2_drv_stats, stat) / sizeof(atomic_t), \
+}
+
+static const struct otx2_stat otx2_drv_stats[] = {
+ OTX2_DRV_STAT(rx_fcs_errs),
+ OTX2_DRV_STAT(rx_oversize_errs),
+ OTX2_DRV_STAT(rx_undersize_errs),
+ OTX2_DRV_STAT(rx_csum_errs),
+ OTX2_DRV_STAT(rx_len_errs),
+ OTX2_DRV_STAT(rx_other_errs),
+};
+
+static const struct otx2_stat otx2_queue_stats[] = {
+ { "bytes", 0 },
+ { "frames", 1 },
+};
+
+static const unsigned int otx2_n_dev_stats = ARRAY_SIZE(otx2_dev_stats);
+static const unsigned int otx2_n_drv_stats = ARRAY_SIZE(otx2_drv_stats);
+static const unsigned int otx2_n_queue_stats = ARRAY_SIZE(otx2_queue_stats);
+
+static void otx2_dev_open(struct net_device *netdev)
+{
+ otx2_open(netdev);
+}
+
+static void otx2_dev_stop(struct net_device *netdev)
+{
+ otx2_stop(netdev);
+}
+
+static void otx2_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->bus_info, pci_name(pfvf->pdev), sizeof(info->bus_info));
+}
+
+static void otx2_get_qset_strings(struct otx2_nic *pfvf, u8 **data, int qset)
+{
+ int start_qidx = qset * pfvf->hw.rx_queues;
+ int qidx, stats;
+
+ for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) {
+ for (stats = 0; stats < otx2_n_queue_stats; stats++) {
+ sprintf(*data, "rxq%d: %s", qidx + start_qidx,
+ otx2_queue_stats[stats].name);
+ *data += ETH_GSTRING_LEN;
+ }
+ }
+ for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
+ for (stats = 0; stats < otx2_n_queue_stats; stats++) {
+ sprintf(*data, "txq%d: %s", qidx + start_qidx,
+ otx2_queue_stats[stats].name);
+ *data += ETH_GSTRING_LEN;
+ }
+ }
+}
+
+static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ int stats;
+
+ if (sset != ETH_SS_STATS)
+ return;
+
+ for (stats = 0; stats < otx2_n_dev_stats; stats++) {
+ memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+ for (stats = 0; stats < otx2_n_drv_stats; stats++) {
+ memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+ otx2_get_qset_strings(pfvf, &data, 0);
+
+ for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) {
+ sprintf(data, "cgx_rxstat%d: ", stats);
+ data += ETH_GSTRING_LEN;
+ }
+
+ for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) {
+ sprintf(data, "cgx_txstat%d: ", stats);
+ data += ETH_GSTRING_LEN;
+ }
+
+ strcpy(data, "reset_count");
+ data += ETH_GSTRING_LEN;
+}
+
+static void otx2_get_qset_stats(struct otx2_nic *pfvf,
+ struct ethtool_stats *stats, u64 **data)
+{
+ int stat, qidx;
+
+ if (!pfvf)
+ return;
+ for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) {
+ if (!otx2_update_rq_stats(pfvf, qidx)) {
+ for (stat = 0; stat < otx2_n_queue_stats; stat++)
+ *((*data)++) = 0;
+ continue;
+ }
+ for (stat = 0; stat < otx2_n_queue_stats; stat++)
+ *((*data)++) = ((u64 *)&pfvf->qset.rq[qidx].stats)
+ [otx2_queue_stats[stat].index];
+ }
+
+ for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
+ if (!otx2_update_sq_stats(pfvf, qidx)) {
+ for (stat = 0; stat < otx2_n_queue_stats; stat++)
+ *((*data)++) = 0;
+ continue;
+ }
+ for (stat = 0; stat < otx2_n_queue_stats; stat++)
+ *((*data)++) = ((u64 *)&pfvf->qset.sq[qidx].stats)
+ [otx2_queue_stats[stat].index];
+ }
+}
+
+/* Get device and per queue statistics */
+static void otx2_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ int stat;
+
+ otx2_get_dev_stats(pfvf);
+ for (stat = 0; stat < otx2_n_dev_stats; stat++)
+ *(data++) = ((u64 *)&pfvf->hw.dev_stats)
+ [otx2_dev_stats[stat].index];
+
+ for (stat = 0; stat < otx2_n_drv_stats; stat++)
+ *(data++) = atomic_read(&((atomic_t *)&pfvf->hw.drv_stats)
+ [otx2_drv_stats[stat].index]);
+
+ otx2_get_qset_stats(pfvf, stats, &data);
+ otx2_update_lmac_stats(pfvf);
+ for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++)
+ *(data++) = pfvf->hw.cgx_rx_stats[stat];
+ for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++)
+ *(data++) = pfvf->hw.cgx_tx_stats[stat];
+ *(data++) = pfvf->reset_count;
+}
+
+static int otx2_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ int qstats_count;
+
+ if (sset != ETH_SS_STATS)
+ return -EINVAL;
+
+ qstats_count = otx2_n_queue_stats *
+ (pfvf->hw.rx_queues + pfvf->hw.tx_queues);
+
+ return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count +
+ CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT + 1;
+}
+
+/* Get no of queues device supports and current queue count */
+static void otx2_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+
+ channel->max_rx = pfvf->hw.max_queues;
+ channel->max_tx = pfvf->hw.max_queues;
+
+ channel->rx_count = pfvf->hw.rx_queues;
+ channel->tx_count = pfvf->hw.tx_queues;
+}
+
+/* Set no of Tx, Rx queues to be used */
+static int otx2_set_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ bool if_up = netif_running(dev);
+ int err = 0;
+
+ if (!channel->rx_count || !channel->tx_count)
+ return -EINVAL;
+
+ if (if_up)
+ otx2_dev_stop(dev);
+
+ err = otx2_set_real_num_queues(dev, channel->tx_count,
+ channel->rx_count);
+ if (err)
+ goto fail;
+
+ pfvf->hw.rx_queues = channel->rx_count;
+ pfvf->hw.tx_queues = channel->tx_count;
+ pfvf->qset.cq_cnt = pfvf->hw.tx_queues + pfvf->hw.rx_queues;
+
+fail:
+ if (if_up)
+ otx2_dev_open(dev);
+
+ netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
+ pfvf->hw.tx_queues, pfvf->hw.rx_queues);
+
+ return err;
+}
+
+static void otx2_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct otx2_qset *qs = &pfvf->qset;
+
+ ring->rx_max_pending = Q_COUNT(Q_SIZE_MAX);
+ ring->rx_pending = qs->rqe_cnt ? qs->rqe_cnt : Q_COUNT(Q_SIZE_256);
+ ring->tx_max_pending = Q_COUNT(Q_SIZE_MAX);
+ ring->tx_pending = qs->sqe_cnt ? qs->sqe_cnt : Q_COUNT(Q_SIZE_4K);
+}
+
+static int otx2_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ bool if_up = netif_running(netdev);
+ struct otx2_qset *qs = &pfvf->qset;
+ u32 rx_count, tx_count;
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending)
+ return -EINVAL;
+
+ /* Permitted lengths are 16 64 256 1K 4K 16K 64K 256K 1M */
+ rx_count = ring->rx_pending;
+ /* On some silicon variants a skid or reserved CQEs are
+ * needed to avoid CQ overflow.
+ */
+ if (rx_count < pfvf->hw.rq_skid)
+ rx_count = pfvf->hw.rq_skid;
+ rx_count = Q_COUNT(Q_SIZE(rx_count, 3));
+
+ /* Due pipelining impact minimum 2000 unused SQ CQE's
+ * need to be maintained to avoid CQ overflow, hence the
+ * minimum 4K size.
+ */
+ tx_count = clamp_t(u32, ring->tx_pending,
+ Q_COUNT(Q_SIZE_4K), Q_COUNT(Q_SIZE_MAX));
+ tx_count = Q_COUNT(Q_SIZE(tx_count, 3));
+
+ if (tx_count == qs->sqe_cnt && rx_count == qs->rqe_cnt)
+ return 0;
+
+ if (if_up)
+ otx2_dev_stop(netdev);
+
+ /* Assigned to the nearest possible exponent. */
+ qs->sqe_cnt = tx_count;
+ qs->rqe_cnt = rx_count;
+
+ if (if_up)
+ otx2_dev_open(netdev);
+ return 0;
+}
+
+static int otx2_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct otx2_hw *hw = &pfvf->hw;
+
+ cmd->rx_coalesce_usecs = hw->cq_time_wait;
+ cmd->rx_max_coalesced_frames = hw->cq_ecount_wait;
+ cmd->tx_coalesce_usecs = hw->cq_time_wait;
+ cmd->tx_max_coalesced_frames = hw->cq_ecount_wait;
+
+ return 0;
+}
+
+static int otx2_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct otx2_hw *hw = &pfvf->hw;
+ int qidx;
+
+ if (ec->use_adaptive_rx_coalesce || ec->use_adaptive_tx_coalesce ||
+ ec->rx_coalesce_usecs_irq || ec->rx_max_coalesced_frames_irq ||
+ ec->tx_coalesce_usecs_irq || ec->tx_max_coalesced_frames_irq ||
+ ec->stats_block_coalesce_usecs || ec->pkt_rate_low ||
+ ec->rx_coalesce_usecs_low || ec->rx_max_coalesced_frames_low ||
+ ec->tx_coalesce_usecs_low || ec->tx_max_coalesced_frames_low ||
+ ec->pkt_rate_high || ec->rx_coalesce_usecs_high ||
+ ec->rx_max_coalesced_frames_high || ec->tx_coalesce_usecs_high ||
+ ec->tx_max_coalesced_frames_high || ec->rate_sample_interval)
+ return -EOPNOTSUPP;
+
+ if (!ec->rx_max_coalesced_frames || !ec->tx_max_coalesced_frames)
+ return 0;
+
+ /* 'cq_time_wait' is 8bit and is in multiple of 100ns,
+ * so clamp the user given value to the range of 1 to 25usec.
+ */
+ ec->rx_coalesce_usecs = clamp_t(u32, ec->rx_coalesce_usecs,
+ 1, CQ_TIMER_THRESH_MAX);
+ ec->tx_coalesce_usecs = clamp_t(u32, ec->tx_coalesce_usecs,
+ 1, CQ_TIMER_THRESH_MAX);
+
+ /* Rx and Tx are mapped to same CQ, check which one
+ * is changed, if both then choose the min.
+ */
+ if (hw->cq_time_wait == ec->rx_coalesce_usecs)
+ hw->cq_time_wait = ec->tx_coalesce_usecs;
+ else if (hw->cq_time_wait == ec->tx_coalesce_usecs)
+ hw->cq_time_wait = ec->rx_coalesce_usecs;
+ else
+ hw->cq_time_wait = min_t(u8, ec->rx_coalesce_usecs,
+ ec->tx_coalesce_usecs);
+
+ /* Max ecount_wait supported is 16bit,
+ * so clamp the user given value to the range of 1 to 64k.
+ */
+ ec->rx_max_coalesced_frames = clamp_t(u32, ec->rx_max_coalesced_frames,
+ 1, U16_MAX);
+ ec->tx_max_coalesced_frames = clamp_t(u32, ec->tx_max_coalesced_frames,
+ 1, U16_MAX);
+
+ /* Rx and Tx are mapped to same CQ, check which one
+ * is changed, if both then choose the min.
+ */
+ if (hw->cq_ecount_wait == ec->rx_max_coalesced_frames)
+ hw->cq_ecount_wait = ec->tx_max_coalesced_frames;
+ else if (hw->cq_ecount_wait == ec->tx_max_coalesced_frames)
+ hw->cq_ecount_wait = ec->rx_max_coalesced_frames;
+ else
+ hw->cq_ecount_wait = min_t(u16, ec->rx_max_coalesced_frames,
+ ec->tx_max_coalesced_frames);
+
+ if (netif_running(netdev)) {
+ for (qidx = 0; qidx < pfvf->hw.cint_cnt; qidx++)
+ otx2_config_irq_coalescing(pfvf, qidx);
+ }
+
+ return 0;
+}
+
+static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf,
+ struct ethtool_rxnfc *nfc)
+{
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+
+ if (!(rss->flowkey_cfg &
+ (NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6)))
+ return 0;
+
+ /* Mimimum is IPv4 and IPv6, SIP/DIP */
+ nfc->data = RXH_IP_SRC | RXH_IP_DST;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_TCP)
+ nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_UDP)
+ nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_SCTP)
+ nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case IPV4_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IPV6_FLOW:
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf,
+ struct ethtool_rxnfc *nfc)
+{
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ u32 rxh_l4 = RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ u32 rss_cfg = rss->flowkey_cfg;
+
+ if (!rss->enable) {
+ netdev_err(pfvf->netdev,
+ "RSS is disabled, cannot change settings\n");
+ return -EIO;
+ }
+
+ /* Mimimum is IPv4 and IPv6, SIP/DIP */
+ if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ /* Different config for v4 and v6 is not supported.
+ * Both of them have to be either 4-tuple or 2-tuple.
+ */
+ switch (nfc->data & rxh_l4) {
+ case 0:
+ rss_cfg &= ~NIX_FLOW_KEY_TYPE_TCP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_cfg |= NIX_FLOW_KEY_TYPE_TCP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ switch (nfc->data & rxh_l4) {
+ case 0:
+ rss_cfg &= ~NIX_FLOW_KEY_TYPE_UDP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_cfg |= NIX_FLOW_KEY_TYPE_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ switch (nfc->data & rxh_l4) {
+ case 0:
+ rss_cfg &= ~NIX_FLOW_KEY_TYPE_SCTP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_cfg |= NIX_FLOW_KEY_TYPE_SCTP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ rss_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rss->flowkey_cfg = rss_cfg;
+ otx2_set_flowkey_cfg(pfvf);
+ return 0;
+}
+
+static int otx2_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *nfc, u32 *rules)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (nfc->cmd) {
+ case ETHTOOL_GRXRINGS:
+ nfc->data = pfvf->hw.rx_queues;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXFH:
+ return otx2_get_rss_hash_opts(pfvf, nfc);
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (nfc->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = otx2_set_rss_hash_opts(pfvf, nfc);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static u32 otx2_get_rxfh_key_size(struct net_device *netdev)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct otx2_rss_info *rss;
+
+ rss = &pfvf->hw.rss_info;
+
+ return sizeof(rss->key);
+}
+
+static u32 otx2_get_rxfh_indir_size(struct net_device *dev)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+
+ return pfvf->hw.rss_info.rss_size;
+}
+
+/* Get RSS configuration */
+static int otx2_get_rxfh(struct net_device *dev, u32 *indir,
+ u8 *hkey, u8 *hfunc)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ struct otx2_rss_info *rss;
+ int idx;
+
+ rss = &pfvf->hw.rss_info;
+
+ if (indir) {
+ for (idx = 0; idx < rss->rss_size; idx++)
+ indir[idx] = rss->ind_tbl[idx];
+ }
+
+ if (hkey)
+ memcpy(hkey, rss->key, sizeof(rss->key));
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ return 0;
+}
+
+/* Configure RSS table and hash key */
+static int otx2_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *hkey, const u8 hfunc)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ struct otx2_rss_info *rss;
+ int idx;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ rss = &pfvf->hw.rss_info;
+
+ if (!rss->enable) {
+ netdev_err(dev, "RSS is disabled, cannot change settings\n");
+ return -EIO;
+ }
+
+ if (indir) {
+ for (idx = 0; idx < rss->rss_size; idx++)
+ rss->ind_tbl[idx] = indir[idx];
+ }
+
+ if (hkey) {
+ memcpy(rss->key, hkey, sizeof(rss->key));
+ otx2_set_rss_key(pfvf);
+ }
+
+ otx2_set_rss_table(pfvf);
+ return 0;
+}
+
+static u32 otx2_get_msglevel(struct net_device *netdev)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ return pfvf->msg_enable;
+}
+
+static void otx2_set_msglevel(struct net_device *netdev, u32 val)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ pfvf->msg_enable = val;
+}
+
+static u32 otx2_get_link(struct net_device *netdev)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ return pfvf->linfo.link_up;
+}
+
+static const struct ethtool_ops otx2_ethtool_ops = {
+ .get_link = otx2_get_link,
+ .get_drvinfo = otx2_get_drvinfo,
+ .get_strings = otx2_get_strings,
+ .get_ethtool_stats = otx2_get_ethtool_stats,
+ .get_sset_count = otx2_get_sset_count,
+ .set_channels = otx2_set_channels,
+ .get_channels = otx2_get_channels,
+ .get_ringparam = otx2_get_ringparam,
+ .set_ringparam = otx2_set_ringparam,
+ .get_coalesce = otx2_get_coalesce,
+ .set_coalesce = otx2_set_coalesce,
+ .get_rxnfc = otx2_get_rxnfc,
+ .set_rxnfc = otx2_set_rxnfc,
+ .get_rxfh_key_size = otx2_get_rxfh_key_size,
+ .get_rxfh_indir_size = otx2_get_rxfh_indir_size,
+ .get_rxfh = otx2_get_rxfh,
+ .set_rxfh = otx2_set_rxfh,
+ .get_msglevel = otx2_get_msglevel,
+ .set_msglevel = otx2_set_msglevel,
+};
+
+void otx2_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &otx2_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
new file mode 100644
index 000000000000..85f9b9ba6bd5
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -0,0 +1,1349 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Physcial Function ethernet driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/of.h>
+#include <linux/if_vlan.h>
+#include <linux/iommu.h>
+#include <net/ip.h>
+
+#include "otx2_reg.h"
+#include "otx2_common.h"
+#include "otx2_txrx.h"
+#include "otx2_struct.h"
+
+#define DRV_NAME "octeontx2-nicpf"
+#define DRV_STRING "Marvell OcteonTX2 NIC Physical Function Driver"
+#define DRV_VERSION "1.0"
+
+/* Supported devices */
+static const struct pci_device_id otx2_pf_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF) },
+ { 0, } /* end of table */
+};
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION(DRV_STRING);
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, otx2_pf_id_table);
+
+enum {
+ TYPE_PFAF,
+ TYPE_PFVF,
+};
+
+static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ bool if_up = netif_running(netdev);
+ int err = 0;
+
+ if (if_up)
+ otx2_stop(netdev);
+
+ netdev_info(netdev, "Changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
+ netdev->mtu = new_mtu;
+
+ if (if_up)
+ err = otx2_open(netdev);
+
+ return err;
+}
+
+static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
+ int first, int mdevs, u64 intr, int type)
+{
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *mbox;
+ struct mbox_hdr *hdr;
+ int i;
+
+ for (i = first; i < mdevs; i++) {
+ /* start from 0 */
+ if (!(intr & BIT_ULL(i - first)))
+ continue;
+
+ mbox = &mw->mbox;
+ mdev = &mbox->dev[i];
+ if (type == TYPE_PFAF)
+ otx2_sync_mbox_bbuf(mbox, i);
+ hdr = mdev->mbase + mbox->rx_start;
+ /* The hdr->num_msgs is set to zero immediately in the interrupt
+ * handler to ensure that it holds a correct value next time
+ * when the interrupt handler is called.
+ * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
+ * pf>mbox.up_num_msgs holds the data for use in
+ * pfaf_mbox_up_handler.
+ */
+ if (hdr->num_msgs) {
+ mw[i].num_msgs = hdr->num_msgs;
+ hdr->num_msgs = 0;
+ if (type == TYPE_PFAF)
+ memset(mbox->hwbase + mbox->rx_start, 0,
+ ALIGN(sizeof(struct mbox_hdr),
+ sizeof(u64)));
+
+ queue_work(mbox_wq, &mw[i].mbox_wrk);
+ }
+
+ mbox = &mw->mbox_up;
+ mdev = &mbox->dev[i];
+ if (type == TYPE_PFAF)
+ otx2_sync_mbox_bbuf(mbox, i);
+ hdr = mdev->mbase + mbox->rx_start;
+ if (hdr->num_msgs) {
+ mw[i].up_num_msgs = hdr->num_msgs;
+ hdr->num_msgs = 0;
+ if (type == TYPE_PFAF)
+ memset(mbox->hwbase + mbox->rx_start, 0,
+ ALIGN(sizeof(struct mbox_hdr),
+ sizeof(u64)));
+
+ queue_work(mbox_wq, &mw[i].mbox_up_wrk);
+ }
+ }
+}
+
+static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
+ struct mbox_msghdr *msg)
+{
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(pf->dev,
+ "Mbox msg with unknown ID 0x%x\n", msg->id);
+ return;
+ }
+
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(pf->dev,
+ "Mbox msg with wrong signature %x, ID 0x%x\n",
+ msg->sig, msg->id);
+ return;
+ }
+
+ switch (msg->id) {
+ case MBOX_MSG_READY:
+ pf->pcifunc = msg->pcifunc;
+ break;
+ case MBOX_MSG_MSIX_OFFSET:
+ mbox_handler_msix_offset(pf, (struct msix_offset_rsp *)msg);
+ break;
+ case MBOX_MSG_NPA_LF_ALLOC:
+ mbox_handler_npa_lf_alloc(pf, (struct npa_lf_alloc_rsp *)msg);
+ break;
+ case MBOX_MSG_NIX_LF_ALLOC:
+ mbox_handler_nix_lf_alloc(pf, (struct nix_lf_alloc_rsp *)msg);
+ break;
+ case MBOX_MSG_NIX_TXSCH_ALLOC:
+ mbox_handler_nix_txsch_alloc(pf,
+ (struct nix_txsch_alloc_rsp *)msg);
+ break;
+ case MBOX_MSG_CGX_STATS:
+ mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg);
+ break;
+ default:
+ if (msg->rc)
+ dev_err(pf->dev,
+ "Mbox msg response has err %d, ID 0x%x\n",
+ msg->rc, msg->id);
+ break;
+ }
+}
+
+static void otx2_pfaf_mbox_handler(struct work_struct *work)
+{
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+ struct otx2_mbox *mbox;
+ struct mbox *af_mbox;
+ struct otx2_nic *pf;
+ int offset, id;
+
+ af_mbox = container_of(work, struct mbox, mbox_wrk);
+ mbox = &af_mbox->mbox;
+ mdev = &mbox->dev[0];
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+
+ offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+ pf = af_mbox->pfvf;
+
+ for (id = 0; id < af_mbox->num_msgs; id++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + offset);
+ otx2_process_pfaf_mbox_msg(pf, msg);
+ offset = mbox->rx_start + msg->next_msgoff;
+ mdev->msgs_acked++;
+ }
+
+ otx2_mbox_reset(mbox, 0);
+}
+
+static void otx2_handle_link_event(struct otx2_nic *pf)
+{
+ struct cgx_link_user_info *linfo = &pf->linfo;
+ struct net_device *netdev = pf->netdev;
+
+ pr_info("%s NIC Link is %s %d Mbps %s duplex\n", netdev->name,
+ linfo->link_up ? "UP" : "DOWN", linfo->speed,
+ linfo->full_duplex ? "Full" : "Half");
+ if (linfo->link_up) {
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+ } else {
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
+ }
+}
+
+int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf,
+ struct cgx_link_info_msg *msg,
+ struct msg_rsp *rsp)
+{
+ /* Copy the link info sent by AF */
+ pf->linfo = msg->link_info;
+
+ /* interface has not been fully configured yet */
+ if (pf->flags & OTX2_FLAG_INTF_DOWN)
+ return 0;
+
+ otx2_handle_link_event(pf);
+ return 0;
+}
+
+static int otx2_process_mbox_msg_up(struct otx2_nic *pf,
+ struct mbox_msghdr *req)
+{
+ /* Check if valid, if not reply with a invalid msg */
+ if (req->sig != OTX2_MBOX_REQ_SIG) {
+ otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
+ return -ENODEV;
+ }
+
+ switch (req->id) {
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+ case _id: { \
+ struct _rsp_type *rsp; \
+ int err; \
+ \
+ rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
+ &pf->mbox.mbox_up, 0, \
+ sizeof(struct _rsp_type)); \
+ if (!rsp) \
+ return -ENOMEM; \
+ \
+ rsp->hdr.id = _id; \
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
+ rsp->hdr.pcifunc = 0; \
+ rsp->hdr.rc = 0; \
+ \
+ err = otx2_mbox_up_handler_ ## _fn_name( \
+ pf, (struct _req_type *)req, rsp); \
+ return err; \
+ }
+MBOX_UP_CGX_MESSAGES
+#undef M
+ break;
+ default:
+ otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
+{
+ struct mbox *af_mbox = container_of(work, struct mbox, mbox_up_wrk);
+ struct otx2_mbox *mbox = &af_mbox->mbox_up;
+ struct otx2_mbox_dev *mdev = &mbox->dev[0];
+ struct otx2_nic *pf = af_mbox->pfvf;
+ int offset, id, devid = 0;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+
+ offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+
+ for (id = 0; id < af_mbox->up_num_msgs; id++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + offset);
+
+ devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
+ /* Skip processing VF's messages */
+ if (!devid)
+ otx2_process_mbox_msg_up(pf, msg);
+ offset = mbox->rx_start + msg->next_msgoff;
+ }
+
+ otx2_mbox_msg_send(mbox, 0);
+}
+
+static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
+{
+ struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
+ struct mbox *mbox;
+
+ /* Clear the IRQ */
+ otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
+
+ mbox = &pf->mbox;
+ otx2_queue_work(mbox, pf->mbox_wq, 0, 1, 1, TYPE_PFAF);
+
+ return IRQ_HANDLED;
+}
+
+static void otx2_disable_mbox_intr(struct otx2_nic *pf)
+{
+ int vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX);
+
+ /* Disable AF => PF mailbox IRQ */
+ otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0));
+ free_irq(vector, pf);
+}
+
+static int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af)
+{
+ struct otx2_hw *hw = &pf->hw;
+ struct msg_req *req;
+ char *irq_name;
+ int err;
+
+ /* Register mailbox interrupt handler */
+ irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "RVUPFAF Mbox");
+ err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX),
+ otx2_pfaf_mbox_intr_handler, 0, irq_name, pf);
+ if (err) {
+ dev_err(pf->dev,
+ "RVUPF: IRQ registration failed for PFAF mbox irq\n");
+ return err;
+ }
+
+ /* Enable mailbox interrupt for msgs coming from AF.
+ * First clear to avoid spurious interrupts, if any.
+ */
+ otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
+ otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0));
+
+ if (!probe_af)
+ return 0;
+
+ /* Check mailbox communication with AF */
+ req = otx2_mbox_alloc_msg_ready(&pf->mbox);
+ if (!req) {
+ otx2_disable_mbox_intr(pf);
+ return -ENOMEM;
+ }
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err) {
+ dev_warn(pf->dev,
+ "AF not responding to mailbox, deferring probe\n");
+ otx2_disable_mbox_intr(pf);
+ return -EPROBE_DEFER;
+ }
+
+ return 0;
+}
+
+static void otx2_pfaf_mbox_destroy(struct otx2_nic *pf)
+{
+ struct mbox *mbox = &pf->mbox;
+
+ if (pf->mbox_wq) {
+ flush_workqueue(pf->mbox_wq);
+ destroy_workqueue(pf->mbox_wq);
+ pf->mbox_wq = NULL;
+ }
+
+ if (mbox->mbox.hwbase)
+ iounmap((void __iomem *)mbox->mbox.hwbase);
+
+ otx2_mbox_destroy(&mbox->mbox);
+ otx2_mbox_destroy(&mbox->mbox_up);
+}
+
+static int otx2_pfaf_mbox_init(struct otx2_nic *pf)
+{
+ struct mbox *mbox = &pf->mbox;
+ void __iomem *hwbase;
+ int err;
+
+ mbox->pfvf = pf;
+ pf->mbox_wq = alloc_workqueue("otx2_pfaf_mailbox",
+ WQ_UNBOUND | WQ_HIGHPRI |
+ WQ_MEM_RECLAIM, 1);
+ if (!pf->mbox_wq)
+ return -ENOMEM;
+
+ /* Mailbox is a reserved memory (in RAM) region shared between
+ * admin function (i.e AF) and this PF, shouldn't be mapped as
+ * device memory to allow unaligned accesses.
+ */
+ hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM),
+ pci_resource_len(pf->pdev, PCI_MBOX_BAR_NUM));
+ if (!hwbase) {
+ dev_err(pf->dev, "Unable to map PFAF mailbox region\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base,
+ MBOX_DIR_PFAF, 1);
+ if (err)
+ goto exit;
+
+ err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base,
+ MBOX_DIR_PFAF_UP, 1);
+ if (err)
+ goto exit;
+
+ err = otx2_mbox_bbuf_init(mbox, pf->pdev);
+ if (err)
+ goto exit;
+
+ INIT_WORK(&mbox->mbox_wrk, otx2_pfaf_mbox_handler);
+ INIT_WORK(&mbox->mbox_up_wrk, otx2_pfaf_mbox_up_handler);
+ otx2_mbox_lock_init(&pf->mbox);
+
+ return 0;
+exit:
+ otx2_pfaf_mbox_destroy(pf);
+ return err;
+}
+
+static int otx2_cgx_config_linkevents(struct otx2_nic *pf, bool enable)
+{
+ struct msg_req *msg;
+ int err;
+
+ otx2_mbox_lock(&pf->mbox);
+ if (enable)
+ msg = otx2_mbox_alloc_msg_cgx_start_linkevents(&pf->mbox);
+ else
+ msg = otx2_mbox_alloc_msg_cgx_stop_linkevents(&pf->mbox);
+
+ if (!msg) {
+ otx2_mbox_unlock(&pf->mbox);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ otx2_mbox_unlock(&pf->mbox);
+ return err;
+}
+
+static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
+{
+ struct msg_req *msg;
+ int err;
+
+ otx2_mbox_lock(&pf->mbox);
+ if (enable)
+ msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox);
+ else
+ msg = otx2_mbox_alloc_msg_cgx_intlbk_disable(&pf->mbox);
+
+ if (!msg) {
+ otx2_mbox_unlock(&pf->mbox);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ otx2_mbox_unlock(&pf->mbox);
+ return err;
+}
+
+int otx2_set_real_num_queues(struct net_device *netdev,
+ int tx_queues, int rx_queues)
+{
+ int err;
+
+ err = netif_set_real_num_tx_queues(netdev, tx_queues);
+ if (err) {
+ netdev_err(netdev,
+ "Failed to set no of Tx queues: %d\n", tx_queues);
+ return err;
+ }
+
+ err = netif_set_real_num_rx_queues(netdev, rx_queues);
+ if (err)
+ netdev_err(netdev,
+ "Failed to set no of Rx queues: %d\n", rx_queues);
+ return err;
+}
+
+static irqreturn_t otx2_q_intr_handler(int irq, void *data)
+{
+ struct otx2_nic *pf = data;
+ u64 val, *ptr;
+ u64 qidx = 0;
+
+ /* CQ */
+ for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) {
+ ptr = otx2_get_regaddr(pf, NIX_LF_CQ_OP_INT);
+ val = otx2_atomic64_add((qidx << 44), ptr);
+
+ otx2_write64(pf, NIX_LF_CQ_OP_INT, (qidx << 44) |
+ (val & NIX_CQERRINT_BITS));
+ if (!(val & (NIX_CQERRINT_BITS | BIT_ULL(42))))
+ continue;
+
+ if (val & BIT_ULL(42)) {
+ netdev_err(pf->netdev, "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
+ qidx, otx2_read64(pf, NIX_LF_ERR_INT));
+ } else {
+ if (val & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
+ netdev_err(pf->netdev, "CQ%lld: Doorbell error",
+ qidx);
+ if (val & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
+ netdev_err(pf->netdev, "CQ%lld: Memory fault on CQE write to LLC/DRAM",
+ qidx);
+ }
+
+ schedule_work(&pf->reset_task);
+ }
+
+ /* SQ */
+ for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
+ ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT);
+ val = otx2_atomic64_add((qidx << 44), ptr);
+ otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) |
+ (val & NIX_SQINT_BITS));
+
+ if (!(val & (NIX_SQINT_BITS | BIT_ULL(42))))
+ continue;
+
+ if (val & BIT_ULL(42)) {
+ netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
+ qidx, otx2_read64(pf, NIX_LF_ERR_INT));
+ } else {
+ if (val & BIT_ULL(NIX_SQINT_LMT_ERR)) {
+ netdev_err(pf->netdev, "SQ%lld: LMT store error NIX_LF_SQ_OP_ERR_DBG:0x%llx",
+ qidx,
+ otx2_read64(pf,
+ NIX_LF_SQ_OP_ERR_DBG));
+ otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG,
+ BIT_ULL(44));
+ }
+ if (val & BIT_ULL(NIX_SQINT_MNQ_ERR)) {
+ netdev_err(pf->netdev, "SQ%lld: Meta-descriptor enqueue error NIX_LF_MNQ_ERR_DGB:0x%llx\n",
+ qidx,
+ otx2_read64(pf, NIX_LF_MNQ_ERR_DBG));
+ otx2_write64(pf, NIX_LF_MNQ_ERR_DBG,
+ BIT_ULL(44));
+ }
+ if (val & BIT_ULL(NIX_SQINT_SEND_ERR)) {
+ netdev_err(pf->netdev, "SQ%lld: Send error, NIX_LF_SEND_ERR_DBG 0x%llx",
+ qidx,
+ otx2_read64(pf,
+ NIX_LF_SEND_ERR_DBG));
+ otx2_write64(pf, NIX_LF_SEND_ERR_DBG,
+ BIT_ULL(44));
+ }
+ if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL))
+ netdev_err(pf->netdev, "SQ%lld: SQB allocation failed",
+ qidx);
+ }
+
+ schedule_work(&pf->reset_task);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq)
+{
+ struct otx2_cq_poll *cq_poll = (struct otx2_cq_poll *)cq_irq;
+ struct otx2_nic *pf = (struct otx2_nic *)cq_poll->dev;
+ int qidx = cq_poll->cint_idx;
+
+ /* Disable interrupts.
+ *
+ * Completion interrupts behave in a level-triggered interrupt
+ * fashion, and hence have to be cleared only after it is serviced.
+ */
+ otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
+
+ /* Schedule NAPI */
+ napi_schedule_irqoff(&cq_poll->napi);
+
+ return IRQ_HANDLED;
+}
+
+static void otx2_disable_napi(struct otx2_nic *pf)
+{
+ struct otx2_qset *qset = &pf->qset;
+ struct otx2_cq_poll *cq_poll;
+ int qidx;
+
+ for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
+ cq_poll = &qset->napi[qidx];
+ napi_disable(&cq_poll->napi);
+ netif_napi_del(&cq_poll->napi);
+ }
+}
+
+static void otx2_free_cq_res(struct otx2_nic *pf)
+{
+ struct otx2_qset *qset = &pf->qset;
+ struct otx2_cq_queue *cq;
+ int qidx;
+
+ /* Disable CQs */
+ otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_CQ, false);
+ for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
+ cq = &qset->cq[qidx];
+ qmem_free(pf->dev, cq->cqe);
+ }
+}
+
+static void otx2_free_sq_res(struct otx2_nic *pf)
+{
+ struct otx2_qset *qset = &pf->qset;
+ struct otx2_snd_queue *sq;
+ int qidx;
+
+ /* Disable SQs */
+ otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false);
+ /* Free SQB pointers */
+ otx2_sq_free_sqbs(pf);
+ for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
+ sq = &qset->sq[qidx];
+ qmem_free(pf->dev, sq->sqe);
+ qmem_free(pf->dev, sq->tso_hdrs);
+ kfree(sq->sg);
+ kfree(sq->sqb_ptrs);
+ }
+}
+
+static int otx2_init_hw_resources(struct otx2_nic *pf)
+{
+ struct mbox *mbox = &pf->mbox;
+ struct otx2_hw *hw = &pf->hw;
+ struct msg_req *req;
+ int err = 0, lvl;
+
+ /* Set required NPA LF's pool counts
+ * Auras and Pools are used in a 1:1 mapping,
+ * so, aura count = pool count.
+ */
+ hw->rqpool_cnt = hw->rx_queues;
+ hw->sqpool_cnt = hw->tx_queues;
+ hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
+
+ /* Get the size of receive buffers to allocate */
+ pf->rbsize = RCV_FRAG_LEN(pf->netdev->mtu + OTX2_ETH_HLEN);
+
+ otx2_mbox_lock(mbox);
+ /* NPA init */
+ err = otx2_config_npa(pf);
+ if (err)
+ goto exit;
+
+ /* NIX init */
+ err = otx2_config_nix(pf);
+ if (err)
+ goto err_free_npa_lf;
+
+ /* Init Auras and pools used by NIX RQ, for free buffer ptrs */
+ err = otx2_rq_aura_pool_init(pf);
+ if (err) {
+ otx2_mbox_unlock(mbox);
+ goto err_free_nix_lf;
+ }
+ /* Init Auras and pools used by NIX SQ, for queueing SQEs */
+ err = otx2_sq_aura_pool_init(pf);
+ if (err) {
+ otx2_mbox_unlock(mbox);
+ goto err_free_rq_ptrs;
+ }
+
+ err = otx2_txsch_alloc(pf);
+ if (err) {
+ otx2_mbox_unlock(mbox);
+ goto err_free_sq_ptrs;
+ }
+
+ err = otx2_config_nix_queues(pf);
+ if (err) {
+ otx2_mbox_unlock(mbox);
+ goto err_free_txsch;
+ }
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ err = otx2_txschq_config(pf, lvl);
+ if (err) {
+ otx2_mbox_unlock(mbox);
+ goto err_free_nix_queues;
+ }
+ }
+ otx2_mbox_unlock(mbox);
+ return err;
+
+err_free_nix_queues:
+ otx2_free_sq_res(pf);
+ otx2_free_cq_res(pf);
+ otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
+err_free_txsch:
+ if (otx2_txschq_stop(pf))
+ dev_err(pf->dev, "%s failed to stop TX schedulers\n", __func__);
+err_free_sq_ptrs:
+ otx2_sq_free_sqbs(pf);
+err_free_rq_ptrs:
+ otx2_free_aura_ptr(pf, AURA_NIX_RQ);
+ otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
+ otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
+ otx2_aura_pool_free(pf);
+err_free_nix_lf:
+ otx2_mbox_lock(mbox);
+ req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
+ if (req) {
+ if (otx2_sync_mbox_msg(mbox))
+ dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
+ }
+err_free_npa_lf:
+ /* Reset NPA LF */
+ req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
+ if (req) {
+ if (otx2_sync_mbox_msg(mbox))
+ dev_err(pf->dev, "%s failed to free npalf\n", __func__);
+ }
+exit:
+ otx2_mbox_unlock(mbox);
+ return err;
+}
+
+static void otx2_free_hw_resources(struct otx2_nic *pf)
+{
+ struct otx2_qset *qset = &pf->qset;
+ struct mbox *mbox = &pf->mbox;
+ struct otx2_cq_queue *cq;
+ struct msg_req *req;
+ int qidx, err;
+
+ /* Ensure all SQE are processed */
+ otx2_sqb_flush(pf);
+
+ /* Stop transmission */
+ err = otx2_txschq_stop(pf);
+ if (err)
+ dev_err(pf->dev, "RVUPF: Failed to stop/free TX schedulers\n");
+
+ /* Disable RQs */
+ otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
+
+ /*Dequeue all CQEs */
+ for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
+ cq = &qset->cq[qidx];
+ if (cq->cq_type == CQ_RX)
+ otx2_cleanup_rx_cqes(pf, cq);
+ else
+ otx2_cleanup_tx_cqes(pf, cq);
+ }
+
+ otx2_free_sq_res(pf);
+
+ /* Free RQ buffer pointers*/
+ otx2_free_aura_ptr(pf, AURA_NIX_RQ);
+
+ otx2_free_cq_res(pf);
+
+ otx2_mbox_lock(mbox);
+ /* Reset NIX LF */
+ req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
+ if (req) {
+ if (otx2_sync_mbox_msg(mbox))
+ dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
+ }
+ otx2_mbox_unlock(mbox);
+
+ /* Disable NPA Pool and Aura hw context */
+ otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
+ otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
+ otx2_aura_pool_free(pf);
+
+ otx2_mbox_lock(mbox);
+ /* Reset NPA LF */
+ req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
+ if (req) {
+ if (otx2_sync_mbox_msg(mbox))
+ dev_err(pf->dev, "%s failed to free npalf\n", __func__);
+ }
+ otx2_mbox_unlock(mbox);
+}
+
+int otx2_open(struct net_device *netdev)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct otx2_cq_poll *cq_poll = NULL;
+ struct otx2_qset *qset = &pf->qset;
+ int err = 0, qidx, vec;
+ char *irq_name;
+
+ netif_carrier_off(netdev);
+
+ pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tx_queues;
+ /* RQ and SQs are mapped to different CQs,
+ * so find out max CQ IRQs (i.e CINTs) needed.
+ */
+ pf->hw.cint_cnt = max(pf->hw.rx_queues, pf->hw.tx_queues);
+ qset->napi = kcalloc(pf->hw.cint_cnt, sizeof(*cq_poll), GFP_KERNEL);
+ if (!qset->napi)
+ return -ENOMEM;
+
+ /* CQ size of RQ */
+ qset->rqe_cnt = qset->rqe_cnt ? qset->rqe_cnt : Q_COUNT(Q_SIZE_256);
+ /* CQ size of SQ */
+ qset->sqe_cnt = qset->sqe_cnt ? qset->sqe_cnt : Q_COUNT(Q_SIZE_4K);
+
+ err = -ENOMEM;
+ qset->cq = kcalloc(pf->qset.cq_cnt,
+ sizeof(struct otx2_cq_queue), GFP_KERNEL);
+ if (!qset->cq)
+ goto err_free_mem;
+
+ qset->sq = kcalloc(pf->hw.tx_queues,
+ sizeof(struct otx2_snd_queue), GFP_KERNEL);
+ if (!qset->sq)
+ goto err_free_mem;
+
+ qset->rq = kcalloc(pf->hw.rx_queues,
+ sizeof(struct otx2_rcv_queue), GFP_KERNEL);
+ if (!qset->rq)
+ goto err_free_mem;
+
+ err = otx2_init_hw_resources(pf);
+ if (err)
+ goto err_free_mem;
+
+ /* Register NAPI handler */
+ for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
+ cq_poll = &qset->napi[qidx];
+ cq_poll->cint_idx = qidx;
+ /* RQ0 & SQ0 are mapped to CINT0 and so on..
+ * 'cq_ids[0]' points to RQ's CQ and
+ * 'cq_ids[1]' points to SQ's CQ and
+ */
+ cq_poll->cq_ids[CQ_RX] =
+ (qidx < pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ;
+ cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ?
+ qidx + pf->hw.rx_queues : CINT_INVALID_CQ;
+ cq_poll->dev = (void *)pf;
+ netif_napi_add(netdev, &cq_poll->napi,
+ otx2_napi_handler, NAPI_POLL_WEIGHT);
+ napi_enable(&cq_poll->napi);
+ }
+
+ /* Set maximum frame size allowed in HW */
+ err = otx2_hw_set_mtu(pf, netdev->mtu);
+ if (err)
+ goto err_disable_napi;
+
+ /* Initialize RSS */
+ err = otx2_rss_init(pf);
+ if (err)
+ goto err_disable_napi;
+
+ /* Register Queue IRQ handlers */
+ vec = pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START;
+ irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
+
+ snprintf(irq_name, NAME_SIZE, "%s-qerr", pf->netdev->name);
+
+ err = request_irq(pci_irq_vector(pf->pdev, vec),
+ otx2_q_intr_handler, 0, irq_name, pf);
+ if (err) {
+ dev_err(pf->dev,
+ "RVUPF%d: IRQ registration failed for QERR\n",
+ rvu_get_pf(pf->pcifunc));
+ goto err_disable_napi;
+ }
+
+ /* Enable QINT IRQ */
+ otx2_write64(pf, NIX_LF_QINTX_ENA_W1S(0), BIT_ULL(0));
+
+ /* Register CQ IRQ handlers */
+ vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
+ for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
+ irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
+
+ snprintf(irq_name, NAME_SIZE, "%s-rxtx-%d", pf->netdev->name,
+ qidx);
+
+ err = request_irq(pci_irq_vector(pf->pdev, vec),
+ otx2_cq_intr_handler, 0, irq_name,
+ &qset->napi[qidx]);
+ if (err) {
+ dev_err(pf->dev,
+ "RVUPF%d: IRQ registration failed for CQ%d\n",
+ rvu_get_pf(pf->pcifunc), qidx);
+ goto err_free_cints;
+ }
+ vec++;
+
+ otx2_config_irq_coalescing(pf, qidx);
+
+ /* Enable CQ IRQ */
+ otx2_write64(pf, NIX_LF_CINTX_INT(qidx), BIT_ULL(0));
+ otx2_write64(pf, NIX_LF_CINTX_ENA_W1S(qidx), BIT_ULL(0));
+ }
+
+ otx2_set_cints_affinity(pf);
+
+ pf->flags &= ~OTX2_FLAG_INTF_DOWN;
+ /* 'intf_down' may be checked on any cpu */
+ smp_wmb();
+
+ /* we have already received link status notification */
+ if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK))
+ otx2_handle_link_event(pf);
+
+ err = otx2_rxtx_enable(pf, true);
+ if (err)
+ goto err_free_cints;
+
+ return 0;
+
+err_free_cints:
+ otx2_free_cints(pf, qidx);
+ vec = pci_irq_vector(pf->pdev,
+ pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
+ otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
+ synchronize_irq(vec);
+ free_irq(vec, pf);
+err_disable_napi:
+ otx2_disable_napi(pf);
+ otx2_free_hw_resources(pf);
+err_free_mem:
+ kfree(qset->sq);
+ kfree(qset->cq);
+ kfree(qset->rq);
+ kfree(qset->napi);
+ return err;
+}
+
+int otx2_stop(struct net_device *netdev)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct otx2_cq_poll *cq_poll = NULL;
+ struct otx2_qset *qset = &pf->qset;
+ int qidx, vec, wrk;
+
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+
+ pf->flags |= OTX2_FLAG_INTF_DOWN;
+ /* 'intf_down' may be checked on any cpu */
+ smp_wmb();
+
+ /* First stop packet Rx/Tx */
+ otx2_rxtx_enable(pf, false);
+
+ /* Cleanup Queue IRQ */
+ vec = pci_irq_vector(pf->pdev,
+ pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
+ otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
+ synchronize_irq(vec);
+ free_irq(vec, pf);
+
+ /* Cleanup CQ NAPI and IRQ */
+ vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
+ for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
+ /* Disable interrupt */
+ otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
+
+ synchronize_irq(pci_irq_vector(pf->pdev, vec));
+
+ cq_poll = &qset->napi[qidx];
+ napi_synchronize(&cq_poll->napi);
+ vec++;
+ }
+
+ netif_tx_disable(netdev);
+
+ otx2_free_hw_resources(pf);
+ otx2_free_cints(pf, pf->hw.cint_cnt);
+ otx2_disable_napi(pf);
+
+ for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
+ netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
+
+ for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++)
+ cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work);
+ devm_kfree(pf->dev, pf->refill_wrk);
+
+ kfree(qset->sq);
+ kfree(qset->cq);
+ kfree(qset->rq);
+ kfree(qset->napi);
+ /* Do not clear RQ/SQ ringsize settings */
+ memset((void *)qset + offsetof(struct otx2_qset, sqe_cnt), 0,
+ sizeof(*qset) - offsetof(struct otx2_qset, sqe_cnt));
+ return 0;
+}
+
+static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ int qidx = skb_get_queue_mapping(skb);
+ struct otx2_snd_queue *sq;
+ struct netdev_queue *txq;
+
+ /* Check for minimum and maximum packet length */
+ if (skb->len <= ETH_HLEN ||
+ (!skb_shinfo(skb)->gso_size && skb->len > pf->max_frs)) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ sq = &pf->qset.sq[qidx];
+ txq = netdev_get_tx_queue(netdev, qidx);
+
+ if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
+ netif_tx_stop_queue(txq);
+
+ /* Check again, incase SQBs got freed up */
+ smp_mb();
+ if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb)
+ > sq->sqe_thresh)
+ netif_tx_wake_queue(txq);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static void otx2_set_rx_mode(struct net_device *netdev)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct nix_rx_mode *req;
+
+ if (!(netdev->flags & IFF_UP))
+ return;
+
+ otx2_mbox_lock(&pf->mbox);
+ req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
+ if (!req) {
+ otx2_mbox_unlock(&pf->mbox);
+ return;
+ }
+
+ req->mode = NIX_RX_MODE_UCAST;
+
+ /* We don't support MAC address filtering yet */
+ if (netdev->flags & IFF_PROMISC)
+ req->mode |= NIX_RX_MODE_PROMISC;
+ else if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
+ req->mode |= NIX_RX_MODE_ALLMULTI;
+
+ otx2_sync_mbox_msg(&pf->mbox);
+ otx2_mbox_unlock(&pf->mbox);
+}
+
+static int otx2_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = features ^ netdev->features;
+ struct otx2_nic *pf = netdev_priv(netdev);
+
+ if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
+ return otx2_cgx_config_loopback(pf,
+ features & NETIF_F_LOOPBACK);
+ return 0;
+}
+
+static void otx2_reset_task(struct work_struct *work)
+{
+ struct otx2_nic *pf = container_of(work, struct otx2_nic, reset_task);
+
+ if (!netif_running(pf->netdev))
+ return;
+
+ otx2_stop(pf->netdev);
+ pf->reset_count++;
+ otx2_open(pf->netdev);
+ netif_trans_update(pf->netdev);
+}
+
+static const struct net_device_ops otx2_netdev_ops = {
+ .ndo_open = otx2_open,
+ .ndo_stop = otx2_stop,
+ .ndo_start_xmit = otx2_xmit,
+ .ndo_set_mac_address = otx2_set_mac_address,
+ .ndo_change_mtu = otx2_change_mtu,
+ .ndo_set_rx_mode = otx2_set_rx_mode,
+ .ndo_set_features = otx2_set_features,
+ .ndo_tx_timeout = otx2_tx_timeout,
+ .ndo_get_stats64 = otx2_get_stats64,
+};
+
+static int otx2_check_pf_usable(struct otx2_nic *nic)
+{
+ u64 rev;
+
+ rev = otx2_read64(nic, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
+ rev = (rev >> 12) & 0xFF;
+ /* Check if AF has setup revision for RVUM block,
+ * otherwise this driver probe should be deferred
+ * until AF driver comes up.
+ */
+ if (!rev) {
+ dev_warn(nic->dev,
+ "AF is not initialized, deferring probe\n");
+ return -EPROBE_DEFER;
+ }
+ return 0;
+}
+
+static int otx2_realloc_msix_vectors(struct otx2_nic *pf)
+{
+ struct otx2_hw *hw = &pf->hw;
+ int num_vec, err;
+
+ /* NPA interrupts are inot registered, so alloc only
+ * upto NIX vector offset.
+ */
+ num_vec = hw->nix_msixoff;
+ num_vec += NIX_LF_CINT_VEC_START + hw->max_queues;
+
+ otx2_disable_mbox_intr(pf);
+ pci_free_irq_vectors(hw->pdev);
+ pci_free_irq_vectors(hw->pdev);
+ err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
+ if (err < 0) {
+ dev_err(pf->dev, "%s: Failed to realloc %d IRQ vectors\n",
+ __func__, num_vec);
+ return err;
+ }
+
+ return otx2_register_mbox_intr(pf, false);
+}
+
+static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct net_device *netdev;
+ struct otx2_nic *pf;
+ struct otx2_hw *hw;
+ int err, qcount;
+ int num_vec;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ return err;
+ }
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "DMA mask config failed, abort\n");
+ goto err_release_regions;
+ }
+
+ pci_set_master(pdev);
+
+ /* Set number of queues */
+ qcount = min_t(int, num_online_cpus(), OTX2_MAX_CQ_CNT);
+
+ netdev = alloc_etherdev_mqs(sizeof(*pf), qcount, qcount);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ pci_set_drvdata(pdev, netdev);
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ pf = netdev_priv(netdev);
+ pf->netdev = netdev;
+ pf->pdev = pdev;
+ pf->dev = dev;
+ pf->flags |= OTX2_FLAG_INTF_DOWN;
+
+ hw = &pf->hw;
+ hw->pdev = pdev;
+ hw->rx_queues = qcount;
+ hw->tx_queues = qcount;
+ hw->max_queues = qcount;
+
+ num_vec = pci_msix_vec_count(pdev);
+ hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
+ GFP_KERNEL);
+ if (!hw->irq_name)
+ goto err_free_netdev;
+
+ hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
+ sizeof(cpumask_var_t), GFP_KERNEL);
+ if (!hw->affinity_mask)
+ goto err_free_netdev;
+
+ /* Map CSRs */
+ pf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!pf->reg_base) {
+ dev_err(dev, "Unable to map physical function CSRs, aborting\n");
+ err = -ENOMEM;
+ goto err_free_netdev;
+ }
+
+ err = otx2_check_pf_usable(pf);
+ if (err)
+ goto err_free_netdev;
+
+ err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT,
+ RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
+ if (err < 0) {
+ dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n",
+ __func__, num_vec);
+ goto err_free_netdev;
+ }
+
+ /* Init PF <=> AF mailbox stuff */
+ err = otx2_pfaf_mbox_init(pf);
+ if (err)
+ goto err_free_irq_vectors;
+
+ /* Register mailbox interrupt */
+ err = otx2_register_mbox_intr(pf, true);
+ if (err)
+ goto err_mbox_destroy;
+
+ /* Request AF to attach NPA and NIX LFs to this PF.
+ * NIX and NPA LFs are needed for this PF to function as a NIC.
+ */
+ err = otx2_attach_npa_nix(pf);
+ if (err)
+ goto err_disable_mbox_intr;
+
+ err = otx2_realloc_msix_vectors(pf);
+ if (err)
+ goto err_detach_rsrc;
+
+ err = otx2_set_real_num_queues(netdev, hw->tx_queues, hw->rx_queues);
+ if (err)
+ goto err_detach_rsrc;
+
+ otx2_setup_dev_hw_settings(pf);
+
+ /* Assign default mac address */
+ otx2_get_mac_from_af(netdev);
+
+ /* NPA's pool is a stack to which SW frees buffer pointers via Aura.
+ * HW allocates buffer pointer from stack and uses it for DMA'ing
+ * ingress packet. In some scenarios HW can free back allocated buffer
+ * pointers to pool. This makes it impossible for SW to maintain a
+ * parallel list where physical addresses of buffer pointers (IOVAs)
+ * given to HW can be saved for later reference.
+ *
+ * So the only way to convert Rx packet's buffer address is to use
+ * IOMMU's iova_to_phys() handler which translates the address by
+ * walking through the translation tables.
+ */
+ pf->iommu_domain = iommu_get_domain_for_dev(dev);
+
+ netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
+ NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6);
+ netdev->features |= netdev->hw_features;
+
+ netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
+
+ netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
+ netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
+
+ netdev->netdev_ops = &otx2_netdev_ops;
+
+ /* MTU range: 64 - 9190 */
+ netdev->min_mtu = OTX2_MIN_MTU;
+ netdev->max_mtu = OTX2_MAX_MTU;
+
+ INIT_WORK(&pf->reset_task, otx2_reset_task);
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(dev, "Failed to register netdevice\n");
+ goto err_detach_rsrc;
+ }
+
+ otx2_set_ethtool_ops(netdev);
+
+ /* Enable link notifications */
+ otx2_cgx_config_linkevents(pf, true);
+
+ return 0;
+
+err_detach_rsrc:
+ otx2_detach_resources(&pf->mbox);
+err_disable_mbox_intr:
+ otx2_disable_mbox_intr(pf);
+err_mbox_destroy:
+ otx2_pfaf_mbox_destroy(pf);
+err_free_irq_vectors:
+ pci_free_irq_vectors(hw->pdev);
+err_free_netdev:
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+err_release_regions:
+ pci_release_regions(pdev);
+ return err;
+}
+
+static void otx2_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct otx2_nic *pf;
+
+ if (!netdev)
+ return;
+
+ pf = netdev_priv(netdev);
+
+ /* Disable link notifications */
+ otx2_cgx_config_linkevents(pf, false);
+
+ unregister_netdev(netdev);
+ otx2_detach_resources(&pf->mbox);
+ otx2_disable_mbox_intr(pf);
+ otx2_pfaf_mbox_destroy(pf);
+ pci_free_irq_vectors(pf->pdev);
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+
+ pci_release_regions(pdev);
+}
+
+static struct pci_driver otx2_pf_driver = {
+ .name = DRV_NAME,
+ .id_table = otx2_pf_id_table,
+ .probe = otx2_probe,
+ .shutdown = otx2_remove,
+ .remove = otx2_remove,
+};
+
+static int __init otx2_rvupf_init_module(void)
+{
+ pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
+
+ return pci_register_driver(&otx2_pf_driver);
+}
+
+static void __exit otx2_rvupf_cleanup_module(void)
+{
+ pci_unregister_driver(&otx2_pf_driver);
+}
+
+module_init(otx2_rvupf_init_module);
+module_exit(otx2_rvupf_cleanup_module);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
new file mode 100644
index 000000000000..7963d418886a
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef OTX2_REG_H
+#define OTX2_REG_H
+
+#include <rvu_struct.h>
+
+/* RVU PF registers */
+#define RVU_PF_VFX_PFVF_MBOX0 (0x00000)
+#define RVU_PF_VFX_PFVF_MBOX1 (0x00008)
+#define RVU_PF_VFX_PFVF_MBOXX(a, b) (0x0 | (a) << 12 | (b) << 3)
+#define RVU_PF_VF_BAR4_ADDR (0x10)
+#define RVU_PF_BLOCK_ADDRX_DISC(a) (0x200 | (a) << 3)
+#define RVU_PF_VFME_STATUSX(a) (0x800 | (a) << 3)
+#define RVU_PF_VFTRPENDX(a) (0x820 | (a) << 3)
+#define RVU_PF_VFTRPEND_W1SX(a) (0x840 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INTX(a) (0x880 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_W1SX(a) (0x8A0 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_ENA_W1SX(a) (0x8C0 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_ENA_W1CX(a) (0x8E0 | (a) << 3)
+#define RVU_PF_VFFLR_INTX(a) (0x900 | (a) << 3)
+#define RVU_PF_VFFLR_INT_W1SX(a) (0x920 | (a) << 3)
+#define RVU_PF_VFFLR_INT_ENA_W1SX(a) (0x940 | (a) << 3)
+#define RVU_PF_VFFLR_INT_ENA_W1CX(a) (0x960 | (a) << 3)
+#define RVU_PF_VFME_INTX(a) (0x980 | (a) << 3)
+#define RVU_PF_VFME_INT_W1SX(a) (0x9A0 | (a) << 3)
+#define RVU_PF_VFME_INT_ENA_W1SX(a) (0x9C0 | (a) << 3)
+#define RVU_PF_VFME_INT_ENA_W1CX(a) (0x9E0 | (a) << 3)
+#define RVU_PF_PFAF_MBOX0 (0xC00)
+#define RVU_PF_PFAF_MBOX1 (0xC08)
+#define RVU_PF_PFAF_MBOXX(a) (0xC00 | (a) << 3)
+#define RVU_PF_INT (0xc20)
+#define RVU_PF_INT_W1S (0xc28)
+#define RVU_PF_INT_ENA_W1S (0xc30)
+#define RVU_PF_INT_ENA_W1C (0xc38)
+#define RVU_PF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4)
+#define RVU_PF_MSIX_VECX_CTL(a) (0x008 | (a) << 4)
+#define RVU_PF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
+
+#define RVU_FUNC_BLKADDR_SHIFT 20
+#define RVU_FUNC_BLKADDR_MASK 0x1FULL
+
+/* NPA LF registers */
+#define NPA_LFBASE (BLKTYPE_NPA << RVU_FUNC_BLKADDR_SHIFT)
+#define NPA_LF_AURA_OP_ALLOCX(a) (NPA_LFBASE | 0x10 | (a) << 3)
+#define NPA_LF_AURA_OP_FREE0 (NPA_LFBASE | 0x20)
+#define NPA_LF_AURA_OP_FREE1 (NPA_LFBASE | 0x28)
+#define NPA_LF_AURA_OP_CNT (NPA_LFBASE | 0x30)
+#define NPA_LF_AURA_OP_LIMIT (NPA_LFBASE | 0x50)
+#define NPA_LF_AURA_OP_INT (NPA_LFBASE | 0x60)
+#define NPA_LF_AURA_OP_THRESH (NPA_LFBASE | 0x70)
+#define NPA_LF_POOL_OP_PC (NPA_LFBASE | 0x100)
+#define NPA_LF_POOL_OP_AVAILABLE (NPA_LFBASE | 0x110)
+#define NPA_LF_POOL_OP_PTR_START0 (NPA_LFBASE | 0x120)
+#define NPA_LF_POOL_OP_PTR_START1 (NPA_LFBASE | 0x128)
+#define NPA_LF_POOL_OP_PTR_END0 (NPA_LFBASE | 0x130)
+#define NPA_LF_POOL_OP_PTR_END1 (NPA_LFBASE | 0x138)
+#define NPA_LF_POOL_OP_INT (NPA_LFBASE | 0x160)
+#define NPA_LF_POOL_OP_THRESH (NPA_LFBASE | 0x170)
+#define NPA_LF_ERR_INT (NPA_LFBASE | 0x200)
+#define NPA_LF_ERR_INT_W1S (NPA_LFBASE | 0x208)
+#define NPA_LF_ERR_INT_ENA_W1C (NPA_LFBASE | 0x210)
+#define NPA_LF_ERR_INT_ENA_W1S (NPA_LFBASE | 0x218)
+#define NPA_LF_RAS (NPA_LFBASE | 0x220)
+#define NPA_LF_RAS_W1S (NPA_LFBASE | 0x228)
+#define NPA_LF_RAS_ENA_W1C (NPA_LFBASE | 0x230)
+#define NPA_LF_RAS_ENA_W1S (NPA_LFBASE | 0x238)
+#define NPA_LF_QINTX_CNT(a) (NPA_LFBASE | 0x300 | (a) << 12)
+#define NPA_LF_QINTX_INT(a) (NPA_LFBASE | 0x310 | (a) << 12)
+#define NPA_LF_QINTX_INT_W1S(a) (NPA_LFBASE | 0x318 | (a) << 12)
+#define NPA_LF_QINTX_ENA_W1S(a) (NPA_LFBASE | 0x320 | (a) << 12)
+#define NPA_LF_QINTX_ENA_W1C(a) (NPA_LFBASE | 0x330 | (a) << 12)
+
+/* NIX LF registers */
+#define NIX_LFBASE (BLKTYPE_NIX << RVU_FUNC_BLKADDR_SHIFT)
+#define NIX_LF_RX_SECRETX(a) (NIX_LFBASE | 0x0 | (a) << 3)
+#define NIX_LF_CFG (NIX_LFBASE | 0x100)
+#define NIX_LF_GINT (NIX_LFBASE | 0x200)
+#define NIX_LF_GINT_W1S (NIX_LFBASE | 0x208)
+#define NIX_LF_GINT_ENA_W1C (NIX_LFBASE | 0x210)
+#define NIX_LF_GINT_ENA_W1S (NIX_LFBASE | 0x218)
+#define NIX_LF_ERR_INT (NIX_LFBASE | 0x220)
+#define NIX_LF_ERR_INT_W1S (NIX_LFBASE | 0x228)
+#define NIX_LF_ERR_INT_ENA_W1C (NIX_LFBASE | 0x230)
+#define NIX_LF_ERR_INT_ENA_W1S (NIX_LFBASE | 0x238)
+#define NIX_LF_RAS (NIX_LFBASE | 0x240)
+#define NIX_LF_RAS_W1S (NIX_LFBASE | 0x248)
+#define NIX_LF_RAS_ENA_W1C (NIX_LFBASE | 0x250)
+#define NIX_LF_RAS_ENA_W1S (NIX_LFBASE | 0x258)
+#define NIX_LF_SQ_OP_ERR_DBG (NIX_LFBASE | 0x260)
+#define NIX_LF_MNQ_ERR_DBG (NIX_LFBASE | 0x270)
+#define NIX_LF_SEND_ERR_DBG (NIX_LFBASE | 0x280)
+#define NIX_LF_TX_STATX(a) (NIX_LFBASE | 0x300 | (a) << 3)
+#define NIX_LF_RX_STATX(a) (NIX_LFBASE | 0x400 | (a) << 3)
+#define NIX_LF_OP_SENDX(a) (NIX_LFBASE | 0x800 | (a) << 3)
+#define NIX_LF_RQ_OP_INT (NIX_LFBASE | 0x900)
+#define NIX_LF_RQ_OP_OCTS (NIX_LFBASE | 0x910)
+#define NIX_LF_RQ_OP_PKTS (NIX_LFBASE | 0x920)
+#define NIX_LF_OP_IPSEC_DYNO_CN (NIX_LFBASE | 0x980)
+#define NIX_LF_SQ_OP_INT (NIX_LFBASE | 0xa00)
+#define NIX_LF_SQ_OP_OCTS (NIX_LFBASE | 0xa10)
+#define NIX_LF_SQ_OP_PKTS (NIX_LFBASE | 0xa20)
+#define NIX_LF_SQ_OP_STATUS (NIX_LFBASE | 0xa30)
+#define NIX_LF_CQ_OP_INT (NIX_LFBASE | 0xb00)
+#define NIX_LF_CQ_OP_DOOR (NIX_LFBASE | 0xb30)
+#define NIX_LF_CQ_OP_STATUS (NIX_LFBASE | 0xb40)
+#define NIX_LF_QINTX_CNT(a) (NIX_LFBASE | 0xC00 | (a) << 12)
+#define NIX_LF_QINTX_INT(a) (NIX_LFBASE | 0xC10 | (a) << 12)
+#define NIX_LF_QINTX_INT_W1S(a) (NIX_LFBASE | 0xC18 | (a) << 12)
+#define NIX_LF_QINTX_ENA_W1S(a) (NIX_LFBASE | 0xC20 | (a) << 12)
+#define NIX_LF_QINTX_ENA_W1C(a) (NIX_LFBASE | 0xC30 | (a) << 12)
+#define NIX_LF_CINTX_CNT(a) (NIX_LFBASE | 0xD00 | (a) << 12)
+#define NIX_LF_CINTX_WAIT(a) (NIX_LFBASE | 0xD10 | (a) << 12)
+#define NIX_LF_CINTX_INT(a) (NIX_LFBASE | 0xD20 | (a) << 12)
+#define NIX_LF_CINTX_INT_W1S(a) (NIX_LFBASE | 0xD30 | (a) << 12)
+#define NIX_LF_CINTX_ENA_W1S(a) (NIX_LFBASE | 0xD40 | (a) << 12)
+#define NIX_LF_CINTX_ENA_W1C(a) (NIX_LFBASE | 0xD50 | (a) << 12)
+
+/* NIX AF transmit scheduler registers */
+#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
+#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (a) << 16)
+#define NIX_AF_TL1X_CIR(a) (0xC20 | (a) << 16)
+#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (a) << 16)
+#define NIX_AF_TL2X_PARENT(a) (0xE88 | (a) << 16)
+#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (a) << 16)
+#define NIX_AF_TL3X_PARENT(a) (0x1088 | (a) << 16)
+#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (a) << 16)
+#define NIX_AF_TL4X_PARENT(a) (0x1288 | (a) << 16)
+#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (a) << 16)
+#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (a) << 16)
+#define NIX_AF_MDQX_PARENT(a) (0x1480 | (a) << 16)
+#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3)
+
+/* LMT LF registers */
+#define LMT_LFBASE BIT_ULL(RVU_FUNC_BLKADDR_SHIFT)
+#define LMT_LF_LMTLINEX(a) (LMT_LFBASE | 0x000 | (a) << 12)
+#define LMT_LF_LMTCANCEL (LMT_LFBASE | 0x400)
+
+#endif /* OTX2_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
new file mode 100644
index 000000000000..cba59ddf71bb
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
@@ -0,0 +1,276 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef OTX2_STRUCT_H
+#define OTX2_STRUCT_H
+
+/* NIX WQE/CQE size 128 byte or 512 byte */
+enum nix_cqesz_e {
+ NIX_XQESZ_W64 = 0x0,
+ NIX_XQESZ_W16 = 0x1,
+};
+
+enum nix_sqes_e {
+ NIX_SQESZ_W16 = 0x0,
+ NIX_SQESZ_W8 = 0x1,
+};
+
+enum nix_send_ldtype {
+ NIX_SEND_LDTYPE_LDD = 0x0,
+ NIX_SEND_LDTYPE_LDT = 0x1,
+ NIX_SEND_LDTYPE_LDWB = 0x2,
+};
+
+/* CSUM offload */
+enum nix_sendl3type {
+ NIX_SENDL3TYPE_NONE = 0x0,
+ NIX_SENDL3TYPE_IP4 = 0x2,
+ NIX_SENDL3TYPE_IP4_CKSUM = 0x3,
+ NIX_SENDL3TYPE_IP6 = 0x4,
+};
+
+enum nix_sendl4type {
+ NIX_SENDL4TYPE_NONE,
+ NIX_SENDL4TYPE_TCP_CKSUM,
+ NIX_SENDL4TYPE_SCTP_CKSUM,
+ NIX_SENDL4TYPE_UDP_CKSUM,
+};
+
+/* NIX wqe/cqe types */
+enum nix_xqe_type {
+ NIX_XQE_TYPE_INVALID = 0x0,
+ NIX_XQE_TYPE_RX = 0x1,
+ NIX_XQE_TYPE_RX_IPSECS = 0x2,
+ NIX_XQE_TYPE_RX_IPSECH = 0x3,
+ NIX_XQE_TYPE_RX_IPSECD = 0x4,
+ NIX_XQE_TYPE_SEND = 0x8,
+};
+
+/* NIX CQE/SQE subdescriptor types */
+enum nix_subdc {
+ NIX_SUBDC_NOP = 0x0,
+ NIX_SUBDC_EXT = 0x1,
+ NIX_SUBDC_CRC = 0x2,
+ NIX_SUBDC_IMM = 0x3,
+ NIX_SUBDC_SG = 0x4,
+ NIX_SUBDC_MEM = 0x5,
+ NIX_SUBDC_JUMP = 0x6,
+ NIX_SUBDC_WORK = 0x7,
+ NIX_SUBDC_SOD = 0xf,
+};
+
+/* Algorithm for nix_sqe_mem_s header (value of the `alg` field) */
+enum nix_sendmemalg {
+ NIX_SENDMEMALG_E_SET = 0x0,
+ NIX_SENDMEMALG_E_SETTSTMP = 0x1,
+ NIX_SENDMEMALG_E_SETRSLT = 0x2,
+ NIX_SENDMEMALG_E_ADD = 0x8,
+ NIX_SENDMEMALG_E_SUB = 0x9,
+ NIX_SENDMEMALG_E_ADDLEN = 0xa,
+ NIX_SENDMEMALG_E_SUBLEN = 0xb,
+ NIX_SENDMEMALG_E_ADDMBUF = 0xc,
+ NIX_SENDMEMALG_E_SUBMBUF = 0xd,
+ NIX_SENDMEMALG_E_ENUM_LAST = 0xe,
+};
+
+/* NIX CQE header structure */
+struct nix_cqe_hdr_s {
+ u64 flow_tag : 32;
+ u64 q : 20;
+ u64 reserved_52_57 : 6;
+ u64 node : 2;
+ u64 cqe_type : 4;
+};
+
+/* NIX CQE RX parse structure */
+struct nix_rx_parse_s {
+ u64 chan : 12;
+ u64 desc_sizem1 : 5;
+ u64 rsvd_17 : 1;
+ u64 express : 1;
+ u64 wqwd : 1;
+ u64 errlev : 4;
+ u64 errcode : 8;
+ u64 latype : 4;
+ u64 lbtype : 4;
+ u64 lctype : 4;
+ u64 ldtype : 4;
+ u64 letype : 4;
+ u64 lftype : 4;
+ u64 lgtype : 4;
+ u64 lhtype : 4;
+ u64 pkt_lenm1 : 16; /* W1 */
+ u64 l2m : 1;
+ u64 l2b : 1;
+ u64 l3m : 1;
+ u64 l3b : 1;
+ u64 vtag0_valid : 1;
+ u64 vtag0_gone : 1;
+ u64 vtag1_valid : 1;
+ u64 vtag1_gone : 1;
+ u64 pkind : 6;
+ u64 rsvd_95_94 : 2;
+ u64 vtag0_tci : 16;
+ u64 vtag1_tci : 16;
+ u64 laflags : 8; /* W2 */
+ u64 lbflags : 8;
+ u64 lcflags : 8;
+ u64 ldflags : 8;
+ u64 leflags : 8;
+ u64 lfflags : 8;
+ u64 lgflags : 8;
+ u64 lhflags : 8;
+ u64 eoh_ptr : 8; /* W3 */
+ u64 wqe_aura : 20;
+ u64 pb_aura : 20;
+ u64 match_id : 16;
+ u64 laptr : 8; /* W4 */
+ u64 lbptr : 8;
+ u64 lcptr : 8;
+ u64 ldptr : 8;
+ u64 leptr : 8;
+ u64 lfptr : 8;
+ u64 lgptr : 8;
+ u64 lhptr : 8;
+ u64 vtag0_ptr : 8; /* W5 */
+ u64 vtag1_ptr : 8;
+ u64 flow_key_alg : 5;
+ u64 rsvd_383_341 : 43;
+ u64 rsvd_447_384; /* W6 */
+};
+
+/* NIX CQE RX scatter/gather subdescriptor structure */
+struct nix_rx_sg_s {
+ u64 seg_size : 16; /* W0 */
+ u64 seg2_size : 16;
+ u64 seg3_size : 16;
+ u64 segs : 2;
+ u64 rsvd_59_50 : 10;
+ u64 subdc : 4;
+ u64 seg_addr;
+ u64 seg2_addr;
+ u64 seg3_addr;
+};
+
+struct nix_send_comp_s {
+ u64 status : 8;
+ u64 sqe_id : 16;
+ u64 rsvd_24_63 : 40;
+};
+
+struct nix_cqe_rx_s {
+ struct nix_cqe_hdr_s hdr;
+ struct nix_rx_parse_s parse;
+ struct nix_rx_sg_s sg;
+};
+
+struct nix_cqe_tx_s {
+ struct nix_cqe_hdr_s hdr;
+ struct nix_send_comp_s comp;
+};
+
+/* NIX SQE header structure */
+struct nix_sqe_hdr_s {
+ u64 total : 18; /* W0 */
+ u64 reserved_18 : 1;
+ u64 df : 1;
+ u64 aura : 20;
+ u64 sizem1 : 3;
+ u64 pnc : 1;
+ u64 sq : 20;
+ u64 ol3ptr : 8; /* W1 */
+ u64 ol4ptr : 8;
+ u64 il3ptr : 8;
+ u64 il4ptr : 8;
+ u64 ol3type : 4;
+ u64 ol4type : 4;
+ u64 il3type : 4;
+ u64 il4type : 4;
+ u64 sqe_id : 16;
+
+};
+
+/* NIX send extended header subdescriptor structure */
+struct nix_sqe_ext_s {
+ u64 lso_mps : 14; /* W0 */
+ u64 lso : 1;
+ u64 tstmp : 1;
+ u64 lso_sb : 8;
+ u64 lso_format : 5;
+ u64 rsvd_31_29 : 3;
+ u64 shp_chg : 9;
+ u64 shp_dis : 1;
+ u64 shp_ra : 2;
+ u64 markptr : 8;
+ u64 markform : 7;
+ u64 mark_en : 1;
+ u64 subdc : 4;
+ u64 vlan0_ins_ptr : 8; /* W1 */
+ u64 vlan0_ins_tci : 16;
+ u64 vlan1_ins_ptr : 8;
+ u64 vlan1_ins_tci : 16;
+ u64 vlan0_ins_ena : 1;
+ u64 vlan1_ins_ena : 1;
+ u64 rsvd_127_114 : 14;
+};
+
+struct nix_sqe_sg_s {
+ u64 seg1_size : 16;
+ u64 seg2_size : 16;
+ u64 seg3_size : 16;
+ u64 segs : 2;
+ u64 rsvd_54_50 : 5;
+ u64 i1 : 1;
+ u64 i2 : 1;
+ u64 i3 : 1;
+ u64 ld_type : 2;
+ u64 subdc : 4;
+};
+
+/* NIX send memory subdescriptor structure */
+struct nix_sqe_mem_s {
+ u64 offset : 16; /* W0 */
+ u64 rsvd_52_16 : 37;
+ u64 wmem : 1;
+ u64 dsz : 2;
+ u64 alg : 4;
+ u64 subdc : 4;
+ u64 addr; /* W1 */
+};
+
+enum nix_cqerrint_e {
+ NIX_CQERRINT_DOOR_ERR = 0,
+ NIX_CQERRINT_WR_FULL = 1,
+ NIX_CQERRINT_CQE_FAULT = 2,
+};
+
+#define NIX_CQERRINT_BITS (BIT_ULL(NIX_CQERRINT_DOOR_ERR) | \
+ BIT_ULL(NIX_CQERRINT_CQE_FAULT))
+
+enum nix_rqint_e {
+ NIX_RQINT_DROP = 0,
+ NIX_RQINT_RED = 1,
+};
+
+#define NIX_RQINT_BITS (BIT_ULL(NIX_RQINT_DROP) | BIT_ULL(NIX_RQINT_RED))
+
+enum nix_sqint_e {
+ NIX_SQINT_LMT_ERR = 0,
+ NIX_SQINT_MNQ_ERR = 1,
+ NIX_SQINT_SEND_ERR = 2,
+ NIX_SQINT_SQB_ALLOC_FAIL = 3,
+};
+
+#define NIX_SQINT_BITS (BIT_ULL(NIX_SQINT_LMT_ERR) | \
+ BIT_ULL(NIX_SQINT_MNQ_ERR) | \
+ BIT_ULL(NIX_SQINT_SEND_ERR) | \
+ BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL))
+
+#endif /* OTX2_STRUCT_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
new file mode 100644
index 000000000000..bef4c20fe314
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -0,0 +1,848 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/etherdevice.h>
+#include <net/ip.h>
+#include <net/tso.h>
+
+#include "otx2_reg.h"
+#include "otx2_common.h"
+#include "otx2_struct.h"
+#include "otx2_txrx.h"
+
+#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
+
+static struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq)
+{
+ struct nix_cqe_hdr_s *cqe_hdr;
+
+ cqe_hdr = (struct nix_cqe_hdr_s *)CQE_ADDR(cq, cq->cq_head);
+ if (cqe_hdr->cqe_type == NIX_XQE_TYPE_INVALID)
+ return NULL;
+
+ cq->cq_head++;
+ cq->cq_head &= (cq->cqe_cnt - 1);
+
+ return cqe_hdr;
+}
+
+static unsigned int frag_num(unsigned int i)
+{
+#ifdef __BIG_ENDIAN
+ return (i & ~3) + 3 - (i & 3);
+#else
+ return i;
+#endif
+}
+
+static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
+ struct sk_buff *skb, int seg, int *len)
+{
+ const skb_frag_t *frag;
+ struct page *page;
+ int offset;
+
+ /* First segment is always skb->data */
+ if (!seg) {
+ page = virt_to_page(skb->data);
+ offset = offset_in_page(skb->data);
+ *len = skb_headlen(skb);
+ } else {
+ frag = &skb_shinfo(skb)->frags[seg - 1];
+ page = skb_frag_page(frag);
+ offset = skb_frag_off(frag);
+ *len = skb_frag_size(frag);
+ }
+ return otx2_dma_map_page(pfvf, page, offset, *len, DMA_TO_DEVICE);
+}
+
+static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
+{
+ int seg;
+
+ for (seg = 0; seg < sg->num_segs; seg++) {
+ otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
+ sg->size[seg], DMA_TO_DEVICE);
+ }
+ sg->num_segs = 0;
+}
+
+static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
+ struct otx2_cq_queue *cq,
+ struct otx2_snd_queue *sq,
+ struct nix_cqe_tx_s *cqe,
+ int budget, int *tx_pkts, int *tx_bytes)
+{
+ struct nix_send_comp_s *snd_comp = &cqe->comp;
+ struct sk_buff *skb = NULL;
+ struct sg_list *sg;
+
+ if (unlikely(snd_comp->status) && netif_msg_tx_err(pfvf))
+ net_err_ratelimited("%s: TX%d: Error in send CQ status:%x\n",
+ pfvf->netdev->name, cq->cint_idx,
+ snd_comp->status);
+
+ sg = &sq->sg[snd_comp->sqe_id];
+ skb = (struct sk_buff *)sg->skb;
+ if (unlikely(!skb))
+ return;
+
+ *tx_bytes += skb->len;
+ (*tx_pkts)++;
+ otx2_dma_unmap_skb_frags(pfvf, sg);
+ napi_consume_skb(skb, budget);
+ sg->skb = (u64)NULL;
+}
+
+static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
+ u64 iova, int len)
+{
+ struct page *page;
+ void *va;
+
+ va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova));
+ page = virt_to_page(va);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ va - page_address(page), len, pfvf->rbsize);
+
+ otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM,
+ pfvf->rbsize, DMA_FROM_DEVICE);
+}
+
+static void otx2_set_rxhash(struct otx2_nic *pfvf,
+ struct nix_cqe_rx_s *cqe, struct sk_buff *skb)
+{
+ enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
+ struct otx2_rss_info *rss;
+ u32 hash = 0;
+
+ if (!(pfvf->netdev->features & NETIF_F_RXHASH))
+ return;
+
+ rss = &pfvf->hw.rss_info;
+ if (rss->flowkey_cfg) {
+ if (rss->flowkey_cfg &
+ ~(NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6))
+ hash_type = PKT_HASH_TYPE_L4;
+ else
+ hash_type = PKT_HASH_TYPE_L3;
+ hash = cqe->hdr.flow_tag;
+ }
+ skb_set_hash(skb, hash, hash_type);
+}
+
+static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
+ struct nix_cqe_rx_s *cqe, int qidx)
+{
+ struct otx2_drv_stats *stats = &pfvf->hw.drv_stats;
+ struct nix_rx_parse_s *parse = &cqe->parse;
+
+ if (netif_msg_rx_err(pfvf))
+ netdev_err(pfvf->netdev,
+ "RQ%d: Error pkt with errlev:0x%x errcode:0x%x\n",
+ qidx, parse->errlev, parse->errcode);
+
+ if (parse->errlev == NPC_ERRLVL_RE) {
+ switch (parse->errcode) {
+ case ERRCODE_FCS:
+ case ERRCODE_FCS_RCV:
+ atomic_inc(&stats->rx_fcs_errs);
+ break;
+ case ERRCODE_UNDERSIZE:
+ atomic_inc(&stats->rx_undersize_errs);
+ break;
+ case ERRCODE_OVERSIZE:
+ atomic_inc(&stats->rx_oversize_errs);
+ break;
+ case ERRCODE_OL2_LEN_MISMATCH:
+ atomic_inc(&stats->rx_len_errs);
+ break;
+ default:
+ atomic_inc(&stats->rx_other_errs);
+ break;
+ }
+ } else if (parse->errlev == NPC_ERRLVL_NIX) {
+ switch (parse->errcode) {
+ case ERRCODE_OL3_LEN:
+ case ERRCODE_OL4_LEN:
+ case ERRCODE_IL3_LEN:
+ case ERRCODE_IL4_LEN:
+ atomic_inc(&stats->rx_len_errs);
+ break;
+ case ERRCODE_OL4_CSUM:
+ case ERRCODE_IL4_CSUM:
+ atomic_inc(&stats->rx_csum_errs);
+ break;
+ default:
+ atomic_inc(&stats->rx_other_errs);
+ break;
+ }
+ } else {
+ atomic_inc(&stats->rx_other_errs);
+ /* For now ignore all the NPC parser errors and
+ * pass the packets to stack.
+ */
+ return false;
+ }
+
+ /* If RXALL is enabled pass on packets to stack. */
+ if (cqe->sg.segs && (pfvf->netdev->features & NETIF_F_RXALL))
+ return false;
+
+ /* Free buffer back to pool */
+ if (cqe->sg.segs)
+ otx2_aura_freeptr(pfvf, qidx, cqe->sg.seg_addr & ~0x07ULL);
+ return true;
+}
+
+static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
+ struct napi_struct *napi,
+ struct otx2_cq_queue *cq,
+ struct nix_cqe_rx_s *cqe)
+{
+ struct nix_rx_parse_s *parse = &cqe->parse;
+ struct sk_buff *skb = NULL;
+
+ if (unlikely(parse->errlev || parse->errcode)) {
+ if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx))
+ return;
+ }
+
+ skb = napi_get_frags(napi);
+ if (unlikely(!skb))
+ return;
+
+ otx2_skb_add_frag(pfvf, skb, cqe->sg.seg_addr, cqe->sg.seg_size);
+ cq->pool_ptrs++;
+
+ otx2_set_rxhash(pfvf, cqe, skb);
+
+ skb_record_rx_queue(skb, cq->cq_idx);
+ if (pfvf->netdev->features & NETIF_F_RXCSUM)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ napi_gro_frags(napi);
+}
+
+static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
+ struct napi_struct *napi,
+ struct otx2_cq_queue *cq, int budget)
+{
+ struct nix_cqe_rx_s *cqe;
+ int processed_cqe = 0;
+ s64 bufptr;
+
+ while (likely(processed_cqe < budget)) {
+ cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head);
+ if (cqe->hdr.cqe_type == NIX_XQE_TYPE_INVALID ||
+ !cqe->sg.seg_addr) {
+ if (!processed_cqe)
+ return 0;
+ break;
+ }
+ cq->cq_head++;
+ cq->cq_head &= (cq->cqe_cnt - 1);
+
+ otx2_rcv_pkt_handler(pfvf, napi, cq, cqe);
+
+ cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
+ cqe->sg.seg_addr = 0x00;
+ processed_cqe++;
+ }
+
+ /* Free CQEs to HW */
+ otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
+ ((u64)cq->cq_idx << 32) | processed_cqe);
+
+ if (unlikely(!cq->pool_ptrs))
+ return 0;
+
+ /* Refill pool with new buffers */
+ while (cq->pool_ptrs) {
+ bufptr = otx2_alloc_rbuf(pfvf, cq->rbpool, GFP_ATOMIC);
+ if (unlikely(bufptr <= 0)) {
+ struct refill_work *work;
+ struct delayed_work *dwork;
+
+ work = &pfvf->refill_wrk[cq->cq_idx];
+ dwork = &work->pool_refill_work;
+ /* Schedule a task if no other task is running */
+ if (!cq->refill_task_sched) {
+ cq->refill_task_sched = true;
+ schedule_delayed_work(dwork,
+ msecs_to_jiffies(100));
+ }
+ break;
+ }
+ otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
+ cq->pool_ptrs--;
+ }
+
+ return processed_cqe;
+}
+
+static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
+ struct otx2_cq_queue *cq, int budget)
+{
+ int tx_pkts = 0, tx_bytes = 0;
+ struct nix_cqe_tx_s *cqe;
+ int processed_cqe = 0;
+
+ while (likely(processed_cqe < budget)) {
+ cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
+ if (unlikely(!cqe)) {
+ if (!processed_cqe)
+ return 0;
+ break;
+ }
+ otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[cq->cint_idx],
+ cqe, budget, &tx_pkts, &tx_bytes);
+
+ cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
+ processed_cqe++;
+ }
+
+ /* Free CQEs to HW */
+ otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
+ ((u64)cq->cq_idx << 32) | processed_cqe);
+
+ if (likely(tx_pkts)) {
+ struct netdev_queue *txq;
+
+ txq = netdev_get_tx_queue(pfvf->netdev, cq->cint_idx);
+ netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
+ /* Check if queue was stopped earlier due to ring full */
+ smp_mb();
+ if (netif_tx_queue_stopped(txq) &&
+ netif_carrier_ok(pfvf->netdev))
+ netif_tx_wake_queue(txq);
+ }
+ return 0;
+}
+
+int otx2_napi_handler(struct napi_struct *napi, int budget)
+{
+ struct otx2_cq_poll *cq_poll;
+ int workdone = 0, cq_idx, i;
+ struct otx2_cq_queue *cq;
+ struct otx2_qset *qset;
+ struct otx2_nic *pfvf;
+
+ cq_poll = container_of(napi, struct otx2_cq_poll, napi);
+ pfvf = (struct otx2_nic *)cq_poll->dev;
+ qset = &pfvf->qset;
+
+ for (i = CQS_PER_CINT - 1; i >= 0; i--) {
+ cq_idx = cq_poll->cq_ids[i];
+ if (unlikely(cq_idx == CINT_INVALID_CQ))
+ continue;
+ cq = &qset->cq[cq_idx];
+ if (cq->cq_type == CQ_RX) {
+ /* If the RQ refill WQ task is running, skip napi
+ * scheduler for this queue.
+ */
+ if (cq->refill_task_sched)
+ continue;
+ workdone += otx2_rx_napi_handler(pfvf, napi,
+ cq, budget);
+ } else {
+ workdone += otx2_tx_napi_handler(pfvf, cq, budget);
+ }
+ }
+
+ /* Clear the IRQ */
+ otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
+
+ if (workdone < budget && napi_complete_done(napi, workdone)) {
+ /* If interface is going down, don't re-enable IRQ */
+ if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
+ return workdone;
+
+ /* Re-enable interrupts */
+ otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
+ BIT_ULL(0));
+ }
+ return workdone;
+}
+
+static void otx2_sqe_flush(struct otx2_snd_queue *sq, int size)
+{
+ u64 status;
+
+ /* Packet data stores should finish before SQE is flushed to HW */
+ dma_wmb();
+
+ do {
+ memcpy(sq->lmt_addr, sq->sqe_base, size);
+ status = otx2_lmt_flush(sq->io_addr);
+ } while (status == 0);
+
+ sq->head++;
+ sq->head &= (sq->sqe_cnt - 1);
+}
+
+#define MAX_SEGS_PER_SG 3
+/* Add SQE scatter/gather subdescriptor structure */
+static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int num_segs, int *offset)
+{
+ struct nix_sqe_sg_s *sg = NULL;
+ u64 dma_addr, *iova = NULL;
+ u16 *sg_lens = NULL;
+ int seg, len;
+
+ sq->sg[sq->head].num_segs = 0;
+
+ for (seg = 0; seg < num_segs; seg++) {
+ if ((seg % MAX_SEGS_PER_SG) == 0) {
+ sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
+ sg->ld_type = NIX_SEND_LDTYPE_LDD;
+ sg->subdc = NIX_SUBDC_SG;
+ sg->segs = 0;
+ sg_lens = (void *)sg;
+ iova = (void *)sg + sizeof(*sg);
+ /* Next subdc always starts at a 16byte boundary.
+ * So if sg->segs is whether 2 or 3, offset += 16bytes.
+ */
+ if ((num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
+ *offset += sizeof(*sg) + (3 * sizeof(u64));
+ else
+ *offset += sizeof(*sg) + sizeof(u64);
+ }
+ dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
+ if (dma_mapping_error(pfvf->dev, dma_addr))
+ return false;
+
+ sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = len;
+ sg->segs++;
+ *iova++ = dma_addr;
+
+ /* Save DMA mapping info for later unmapping */
+ sq->sg[sq->head].dma_addr[seg] = dma_addr;
+ sq->sg[sq->head].size[seg] = len;
+ sq->sg[sq->head].num_segs++;
+ }
+
+ sq->sg[sq->head].skb = (u64)skb;
+ return true;
+}
+
+/* Add SQE extended header subdescriptor */
+static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int *offset)
+{
+ struct nix_sqe_ext_s *ext;
+
+ ext = (struct nix_sqe_ext_s *)(sq->sqe_base + *offset);
+ ext->subdc = NIX_SUBDC_EXT;
+ if (skb_shinfo(skb)->gso_size) {
+ ext->lso = 1;
+ ext->lso_sb = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ ext->lso_mps = skb_shinfo(skb)->gso_size;
+
+ /* Only TSOv4 and TSOv6 GSO offloads are supported */
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
+ ext->lso_format = pfvf->hw.lso_tsov4_idx;
+
+ /* HW adds payload size to 'ip_hdr->tot_len' while
+ * sending TSO segment, hence set payload length
+ * in IP header of the packet to just header length.
+ */
+ ip_hdr(skb)->tot_len =
+ htons(ext->lso_sb - skb_network_offset(skb));
+ } else {
+ ext->lso_format = pfvf->hw.lso_tsov6_idx;
+ ipv6_hdr(skb)->payload_len =
+ htons(ext->lso_sb - skb_network_offset(skb));
+ }
+ }
+ *offset += sizeof(*ext);
+}
+
+/* Add SQE header subdescriptor structure */
+static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct nix_sqe_hdr_s *sqe_hdr,
+ struct sk_buff *skb, u16 qidx)
+{
+ int proto = 0;
+
+ /* Check if SQE was framed before, if yes then no need to
+ * set these constants again and again.
+ */
+ if (!sqe_hdr->total) {
+ /* Don't free Tx buffers to Aura */
+ sqe_hdr->df = 1;
+ sqe_hdr->aura = sq->aura_id;
+ /* Post a CQE Tx after pkt transmission */
+ sqe_hdr->pnc = 1;
+ sqe_hdr->sq = qidx;
+ }
+ sqe_hdr->total = skb->len;
+ /* Set SQE identifier which will be used later for freeing SKB */
+ sqe_hdr->sqe_id = sq->head;
+
+ /* Offload TCP/UDP checksum to HW */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ sqe_hdr->ol3ptr = skb_network_offset(skb);
+ sqe_hdr->ol4ptr = skb_transport_offset(skb);
+ /* get vlan protocol Ethertype */
+ if (eth_type_vlan(skb->protocol))
+ skb->protocol = vlan_get_protocol(skb);
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ proto = ip_hdr(skb)->protocol;
+ /* In case of TSO, HW needs this to be explicitly set.
+ * So set this always, instead of adding a check.
+ */
+ sqe_hdr->ol3type = NIX_SENDL3TYPE_IP4_CKSUM;
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ proto = ipv6_hdr(skb)->nexthdr;
+ }
+
+ if (proto == IPPROTO_TCP)
+ sqe_hdr->ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
+ else if (proto == IPPROTO_UDP)
+ sqe_hdr->ol4type = NIX_SENDL4TYPE_UDP_CKSUM;
+ }
+}
+
+static int otx2_dma_map_tso_skb(struct otx2_nic *pfvf,
+ struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int sqe, int hdr_len)
+{
+ int num_segs = skb_shinfo(skb)->nr_frags + 1;
+ struct sg_list *sg = &sq->sg[sqe];
+ u64 dma_addr;
+ int seg, len;
+
+ sg->num_segs = 0;
+
+ /* Get payload length at skb->data */
+ len = skb_headlen(skb) - hdr_len;
+
+ for (seg = 0; seg < num_segs; seg++) {
+ /* Skip skb->data, if there is no payload */
+ if (!seg && !len)
+ continue;
+ dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
+ if (dma_mapping_error(pfvf->dev, dma_addr))
+ goto unmap;
+
+ /* Save DMA mapping info for later unmapping */
+ sg->dma_addr[sg->num_segs] = dma_addr;
+ sg->size[sg->num_segs] = len;
+ sg->num_segs++;
+ }
+ return 0;
+unmap:
+ otx2_dma_unmap_skb_frags(pfvf, sg);
+ return -EINVAL;
+}
+
+static u64 otx2_tso_frag_dma_addr(struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int seg,
+ u64 seg_addr, int hdr_len, int sqe)
+{
+ struct sg_list *sg = &sq->sg[sqe];
+ const skb_frag_t *frag;
+ int offset;
+
+ if (seg < 0)
+ return sg->dma_addr[0] + (seg_addr - (u64)skb->data);
+
+ frag = &skb_shinfo(skb)->frags[seg];
+ offset = seg_addr - (u64)skb_frag_address(frag);
+ if (skb_headlen(skb) - hdr_len)
+ seg++;
+ return sg->dma_addr[seg] + offset;
+}
+
+static void otx2_sqe_tso_add_sg(struct otx2_snd_queue *sq,
+ struct sg_list *list, int *offset)
+{
+ struct nix_sqe_sg_s *sg = NULL;
+ u16 *sg_lens = NULL;
+ u64 *iova = NULL;
+ int seg;
+
+ /* Add SG descriptors with buffer addresses */
+ for (seg = 0; seg < list->num_segs; seg++) {
+ if ((seg % MAX_SEGS_PER_SG) == 0) {
+ sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
+ sg->ld_type = NIX_SEND_LDTYPE_LDD;
+ sg->subdc = NIX_SUBDC_SG;
+ sg->segs = 0;
+ sg_lens = (void *)sg;
+ iova = (void *)sg + sizeof(*sg);
+ /* Next subdc always starts at a 16byte boundary.
+ * So if sg->segs is whether 2 or 3, offset += 16bytes.
+ */
+ if ((list->num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
+ *offset += sizeof(*sg) + (3 * sizeof(u64));
+ else
+ *offset += sizeof(*sg) + sizeof(u64);
+ }
+ sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = list->size[seg];
+ *iova++ = list->dma_addr[seg];
+ sg->segs++;
+ }
+}
+
+static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, u16 qidx)
+{
+ struct netdev_queue *txq = netdev_get_tx_queue(pfvf->netdev, qidx);
+ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ int tcp_data, seg_len, pkt_len, offset;
+ struct nix_sqe_hdr_s *sqe_hdr;
+ int first_sqe = sq->head;
+ struct sg_list list;
+ struct tso_t tso;
+
+ /* Map SKB's fragments to DMA.
+ * It's done here to avoid mapping for every TSO segment's packet.
+ */
+ if (otx2_dma_map_tso_skb(pfvf, sq, skb, first_sqe, hdr_len)) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ tso_start(skb, &tso);
+ tcp_data = skb->len - hdr_len;
+ while (tcp_data > 0) {
+ char *hdr;
+
+ seg_len = min_t(int, skb_shinfo(skb)->gso_size, tcp_data);
+ tcp_data -= seg_len;
+
+ /* Set SQE's SEND_HDR */
+ memset(sq->sqe_base, 0, sq->sqe_size);
+ sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
+ otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx);
+ offset = sizeof(*sqe_hdr);
+
+ /* Add TSO segment's pkt header */
+ hdr = sq->tso_hdrs->base + (sq->head * TSO_HEADER_SIZE);
+ tso_build_hdr(skb, hdr, &tso, seg_len, tcp_data == 0);
+ list.dma_addr[0] =
+ sq->tso_hdrs->iova + (sq->head * TSO_HEADER_SIZE);
+ list.size[0] = hdr_len;
+ list.num_segs = 1;
+
+ /* Add TSO segment's payload data fragments */
+ pkt_len = hdr_len;
+ while (seg_len > 0) {
+ int size;
+
+ size = min_t(int, tso.size, seg_len);
+
+ list.size[list.num_segs] = size;
+ list.dma_addr[list.num_segs] =
+ otx2_tso_frag_dma_addr(sq, skb,
+ tso.next_frag_idx - 1,
+ (u64)tso.data, hdr_len,
+ first_sqe);
+ list.num_segs++;
+ pkt_len += size;
+ seg_len -= size;
+ tso_build_data(skb, &tso, size);
+ }
+ sqe_hdr->total = pkt_len;
+ otx2_sqe_tso_add_sg(sq, &list, &offset);
+
+ /* DMA mappings and skb needs to be freed only after last
+ * TSO segment is transmitted out. So set 'PNC' only for
+ * last segment. Also point last segment's sqe_id to first
+ * segment's SQE index where skb address and DMA mappings
+ * are saved.
+ */
+ if (!tcp_data) {
+ sqe_hdr->pnc = 1;
+ sqe_hdr->sqe_id = first_sqe;
+ sq->sg[first_sqe].skb = (u64)skb;
+ } else {
+ sqe_hdr->pnc = 0;
+ }
+
+ sqe_hdr->sizem1 = (offset / 16) - 1;
+
+ /* Flush SQE to HW */
+ otx2_sqe_flush(sq, offset);
+ }
+}
+
+static bool is_hw_tso_supported(struct otx2_nic *pfvf,
+ struct sk_buff *skb)
+{
+ int payload_len, last_seg_size;
+
+ if (!pfvf->hw.hw_tso)
+ return false;
+
+ /* HW has an issue due to which when the payload of the last LSO
+ * segment is shorter than 16 bytes, some header fields may not
+ * be correctly modified, hence don't offload such TSO segments.
+ */
+ if (!is_96xx_B0(pfvf->pdev))
+ return true;
+
+ payload_len = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ last_seg_size = payload_len % skb_shinfo(skb)->gso_size;
+ if (last_seg_size && last_seg_size < 16)
+ return false;
+
+ return true;
+}
+
+static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb)
+{
+ if (!skb_shinfo(skb)->gso_size)
+ return 1;
+
+ /* HW TSO */
+ if (is_hw_tso_supported(pfvf, skb))
+ return 1;
+
+ /* SW TSO */
+ return skb_shinfo(skb)->gso_segs;
+}
+
+bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, u16 qidx)
+{
+ struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx);
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ int offset, num_segs, free_sqe;
+ struct nix_sqe_hdr_s *sqe_hdr;
+
+ /* Check if there is room for new SQE.
+ * 'Num of SQBs freed to SQ's pool - SQ's Aura count'
+ * will give free SQE count.
+ */
+ free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb;
+
+ if (free_sqe < sq->sqe_thresh ||
+ free_sqe < otx2_get_sqe_count(pfvf, skb))
+ return false;
+
+ num_segs = skb_shinfo(skb)->nr_frags + 1;
+
+ /* If SKB doesn't fit in a single SQE, linearize it.
+ * TODO: Consider adding JUMP descriptor instead.
+ */
+ if (unlikely(num_segs > OTX2_MAX_FRAGS_IN_SQE)) {
+ if (__skb_linearize(skb)) {
+ dev_kfree_skb_any(skb);
+ return true;
+ }
+ num_segs = skb_shinfo(skb)->nr_frags + 1;
+ }
+
+ if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) {
+ otx2_sq_append_tso(pfvf, sq, skb, qidx);
+ return true;
+ }
+
+ /* Set SQE's SEND_HDR.
+ * Do not clear the first 64bit as it contains constant info.
+ */
+ memset(sq->sqe_base + 8, 0, sq->sqe_size - 8);
+ sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
+ otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx);
+ offset = sizeof(*sqe_hdr);
+
+ /* Add extended header if needed */
+ otx2_sqe_add_ext(pfvf, sq, skb, &offset);
+
+ /* Add SG subdesc with data frags */
+ if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) {
+ otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]);
+ return false;
+ }
+
+ sqe_hdr->sizem1 = (offset / 16) - 1;
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ /* Flush SQE to HW */
+ otx2_sqe_flush(sq, offset);
+
+ return true;
+}
+
+void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
+{
+ struct nix_cqe_rx_s *cqe;
+ int processed_cqe = 0;
+ u64 iova, pa;
+
+ while ((cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq))) {
+ if (!cqe->sg.subdc)
+ continue;
+ iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
+ pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
+ otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, DMA_FROM_DEVICE);
+ put_page(virt_to_page(phys_to_virt(pa)));
+ processed_cqe++;
+ }
+
+ /* Free CQEs to HW */
+ otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
+ ((u64)cq->cq_idx << 32) | processed_cqe);
+}
+
+void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
+{
+ struct sk_buff *skb = NULL;
+ struct otx2_snd_queue *sq;
+ struct nix_cqe_tx_s *cqe;
+ int processed_cqe = 0;
+ struct sg_list *sg;
+
+ sq = &pfvf->qset.sq[cq->cint_idx];
+
+ while ((cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq))) {
+ sg = &sq->sg[cqe->comp.sqe_id];
+ skb = (struct sk_buff *)sg->skb;
+ if (skb) {
+ otx2_dma_unmap_skb_frags(pfvf, sg);
+ dev_kfree_skb_any(skb);
+ sg->skb = (u64)NULL;
+ }
+ processed_cqe++;
+ }
+
+ /* Free CQEs to HW */
+ otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
+ ((u64)cq->cq_idx << 32) | processed_cqe);
+}
+
+int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
+{
+ struct msg_req *msg;
+ int err;
+
+ otx2_mbox_lock(&pfvf->mbox);
+ if (enable)
+ msg = otx2_mbox_alloc_msg_nix_lf_start_rx(&pfvf->mbox);
+ else
+ msg = otx2_mbox_alloc_msg_nix_lf_stop_rx(&pfvf->mbox);
+
+ if (!msg) {
+ otx2_mbox_unlock(&pfvf->mbox);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ otx2_mbox_unlock(&pfvf->mbox);
+ return err;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
new file mode 100644
index 000000000000..4ab32d3adb78
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef OTX2_TXRX_H
+#define OTX2_TXRX_H
+
+#include <linux/etherdevice.h>
+#include <linux/iommu.h>
+#include <linux/if_vlan.h>
+
+#define LBK_CHAN_BASE 0x000
+#define SDP_CHAN_BASE 0x700
+#define CGX_CHAN_BASE 0x800
+
+#define OTX2_DATA_ALIGN(X) ALIGN(X, OTX2_ALIGN)
+#define OTX2_HEAD_ROOM OTX2_ALIGN
+
+#define OTX2_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN)
+#define OTX2_MIN_MTU 64
+#define OTX2_MAX_MTU (9212 - OTX2_ETH_HLEN)
+
+#define OTX2_MAX_GSO_SEGS 255
+#define OTX2_MAX_FRAGS_IN_SQE 9
+
+/* Rx buffer size should be in multiples of 128bytes */
+#define RCV_FRAG_LEN1(x) \
+ ((OTX2_HEAD_ROOM + OTX2_DATA_ALIGN(x)) + \
+ OTX2_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+/* Prefer 2048 byte buffers for better last level cache
+ * utilization or data distribution across regions.
+ */
+#define RCV_FRAG_LEN(x) \
+ ((RCV_FRAG_LEN1(x) < 2048) ? 2048 : RCV_FRAG_LEN1(x))
+
+#define DMA_BUFFER_LEN(x) \
+ ((x) - OTX2_HEAD_ROOM - \
+ OTX2_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+/* IRQ triggered when NIX_LF_CINTX_CNT[ECOUNT]
+ * is equal to this value.
+ */
+#define CQ_CQE_THRESH_DEFAULT 10
+
+/* IRQ triggered when NIX_LF_CINTX_CNT[ECOUNT]
+ * is nonzero and this much time elapses after that.
+ */
+#define CQ_TIMER_THRESH_DEFAULT 1 /* 1 usec */
+#define CQ_TIMER_THRESH_MAX 25 /* 25 usec */
+
+/* Min number of CQs (of the ones mapped to this CINT)
+ * with valid CQEs.
+ */
+#define CQ_QCOUNT_DEFAULT 1
+
+struct queue_stats {
+ u64 bytes;
+ u64 pkts;
+};
+
+struct otx2_rcv_queue {
+ struct queue_stats stats;
+};
+
+struct sg_list {
+ u16 num_segs;
+ u64 skb;
+ u64 size[OTX2_MAX_FRAGS_IN_SQE];
+ u64 dma_addr[OTX2_MAX_FRAGS_IN_SQE];
+};
+
+struct otx2_snd_queue {
+ u8 aura_id;
+ u16 head;
+ u16 sqe_size;
+ u32 sqe_cnt;
+ u16 num_sqbs;
+ u16 sqe_thresh;
+ u8 sqe_per_sqb;
+ u64 io_addr;
+ u64 *aura_fc_addr;
+ u64 *lmt_addr;
+ void *sqe_base;
+ struct qmem *sqe;
+ struct qmem *tso_hdrs;
+ struct sg_list *sg;
+ struct queue_stats stats;
+ u16 sqb_count;
+ u64 *sqb_ptrs;
+} ____cacheline_aligned_in_smp;
+
+enum cq_type {
+ CQ_RX,
+ CQ_TX,
+ CQS_PER_CINT = 2, /* RQ + SQ */
+};
+
+struct otx2_cq_poll {
+ void *dev;
+#define CINT_INVALID_CQ 255
+ u8 cint_idx;
+ u8 cq_ids[CQS_PER_CINT];
+ struct napi_struct napi;
+};
+
+struct otx2_pool {
+ struct qmem *stack;
+ struct qmem *fc_addr;
+ u8 rbpage_order;
+ u16 rbsize;
+ u32 page_offset;
+ u16 pageref;
+ struct page *page;
+};
+
+struct otx2_cq_queue {
+ u8 cq_idx;
+ u8 cq_type;
+ u8 cint_idx; /* CQ interrupt id */
+ u8 refill_task_sched;
+ u16 cqe_size;
+ u16 pool_ptrs;
+ u32 cqe_cnt;
+ u32 cq_head;
+ void *cqe_base;
+ struct qmem *cqe;
+ struct otx2_pool *rbpool;
+} ____cacheline_aligned_in_smp;
+
+struct otx2_qset {
+ u32 rqe_cnt;
+ u32 sqe_cnt; /* Keep these two at top */
+#define OTX2_MAX_CQ_CNT 64
+ u16 cq_cnt;
+ u16 xqe_size;
+ struct otx2_pool *pool;
+ struct otx2_cq_poll *napi;
+ struct otx2_cq_queue *cq;
+ struct otx2_snd_queue *sq;
+ struct otx2_rcv_queue *rq;
+};
+
+/* Translate IOVA to physical address */
+static inline u64 otx2_iova_to_phys(void *iommu_domain, dma_addr_t dma_addr)
+{
+ /* Translation is installed only when IOMMU is present */
+ if (likely(iommu_domain))
+ return iommu_iova_to_phys(iommu_domain, dma_addr);
+ return dma_addr;
+}
+
+int otx2_napi_handler(struct napi_struct *napi, int budget);
+bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, u16 qidx);
+#endif /* OTX2_TXRX_H */
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 3fb7ee3d4d13..7a0d785b826c 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -742,7 +742,7 @@ txq_reclaim_end:
return released;
}
-static void pxa168_eth_tx_timeout(struct net_device *dev)
+static void pxa168_eth_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct pxa168_eth_private *pep = netdev_priv(dev);
@@ -1344,15 +1344,6 @@ static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum,
return 0;
}
-static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,
- int cmd)
-{
- if (dev->phydev)
- return phy_mii_ioctl(dev->phydev, ifr, cmd);
-
- return -EOPNOTSUPP;
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
static void pxa168_eth_netpoll(struct net_device *dev)
{
@@ -1387,7 +1378,7 @@ static const struct net_device_ops pxa168_eth_netdev_ops = {
.ndo_set_rx_mode = pxa168_eth_set_rx_mode,
.ndo_set_mac_address = pxa168_eth_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = pxa168_eth_do_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl,
.ndo_change_mtu = pxa168_eth_change_mtu,
.ndo_tx_timeout = pxa168_eth_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 095f6c71b4fa..97f270d30cce 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -2884,7 +2884,7 @@ static void skge_tx_clean(struct net_device *dev)
skge->tx_ring.to_clean = e;
}
-static void skge_tx_timeout(struct net_device *dev)
+static void skge_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct skge_port *skge = netdev_priv(dev);
@@ -3932,7 +3932,7 @@ static int skge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&hw->phy_lock);
tasklet_init(&hw->phy_task, skge_extirq, (unsigned long) hw);
- hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
+ hw->regs = ioremap(pci_resource_start(pdev, 0), 0x4000);
if (!hw->regs) {
dev_err(&pdev->dev, "cannot map device registers\n");
goto err_out_free_hw;
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 5f56ee83e3b1..ebfd0ceac884 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2358,7 +2358,7 @@ static void sky2_qlink_intr(struct sky2_hw *hw)
/* Transmit timeout is only called if we are running, carrier is up
* and tx queue is full (stopped).
*/
-static void sky2_tx_timeout(struct net_device *dev)
+static void sky2_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
@@ -5022,7 +5022,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->pdev = pdev;
sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
- hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
+ hw->regs = ioremap(pci_resource_start(pdev, 0), 0x4000);
if (!hw->regs) {
dev_err(&pdev->dev, "cannot map device registers\n");
goto err_out_free_hw;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 527ad2aadcca..8c6cfd15481c 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -2081,7 +2081,7 @@ static void mtk_dma_free(struct mtk_eth *eth)
kfree(eth->scratch_head);
}
-static void mtk_tx_timeout(struct net_device *dev)
+static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
diff --git a/drivers/net/ethernet/mellanox/mlx4/crdump.c b/drivers/net/ethernet/mellanox/mlx4/crdump.c
index eaf08f7ad128..64ed725aec28 100644
--- a/drivers/net/ethernet/mellanox/mlx4/crdump.c
+++ b/drivers/net/ethernet/mellanox/mlx4/crdump.c
@@ -182,7 +182,7 @@ int mlx4_crdump_collect(struct mlx4_dev *dev)
crdump_enable_crspace_access(dev, cr_space);
/* Get the available snapshot ID for the dumps */
- id = devlink_region_shapshot_id_get(devlink);
+ id = devlink_region_snapshot_id_get(devlink);
/* Try to capture dumps */
mlx4_crdump_collect_crspace(dev, cr_space, id);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 7af75b63245f..43dcbd8214c6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1363,24 +1363,18 @@ static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv)
}
}
-static void mlx4_en_tx_timeout(struct net_device *dev)
+static void mlx4_en_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
- int i;
+ struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][txqueue];
if (netif_msg_timer(priv))
en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
- for (i = 0; i < priv->tx_ring_num[TX]; i++) {
- struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][i];
-
- if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
- continue;
- en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
- i, tx_ring->qpn, tx_ring->sp_cqn,
- tx_ring->cons, tx_ring->prod);
- }
+ en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
+ txqueue, tx_ring->qpn, tx_ring->sp_cqn,
+ tx_ring->cons, tx_ring->prod);
priv->port_stats.tx_timeout++;
en_dbg(DRV, priv, "Scheduling watchdog\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index a6f390fdb971..d3e06cec8317 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -42,7 +42,7 @@ mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o
# Core extra
#
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \
- ecpf.o rdma.o
+ ecpf.o rdma.o eswitch_offloads_chains.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
index d787bc0a4155..e09bc3858d57 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
@@ -45,7 +45,7 @@ void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id);
static inline bool mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev)
{
- if (!MLX5_CAP_GEN(mdev, tls))
+ if (!MLX5_CAP_GEN(mdev, tls_tx))
return false;
if (!MLX5_CAP_GEN(mdev, log_max_dek))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 549f962cd86e..42198e64a7f4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -71,8 +71,8 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
return cpu_handle;
}
-int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
- struct mlx5_frag_buf *buf, int node)
+static int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
+ struct mlx5_frag_buf *buf, int node)
{
dma_addr_t t;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 2c16add0b642..220ef9f06f84 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -135,7 +135,7 @@ struct page_pool;
#define MLX5E_LOG_INDIR_RQT_SIZE 0x7
#define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE)
#define MLX5E_MIN_NUM_CHANNELS 0x1
-#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1)
+#define MLX5E_MAX_NUM_CHANNELS MLX5E_INDIR_RQT_SIZE
#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
#define MLX5E_TX_CQ_POLL_BUDGET 128
#define MLX5E_TX_XSK_POLL_BUDGET 64
@@ -760,7 +760,7 @@ enum {
MLX5E_STATE_OPENED,
MLX5E_STATE_DESTROYING,
MLX5E_STATE_XDP_TX_ENABLED,
- MLX5E_STATE_XDP_OPEN,
+ MLX5E_STATE_XDP_ACTIVE,
};
struct mlx5e_rqt {
@@ -892,6 +892,8 @@ struct mlx5e_profile {
int (*update_rx)(struct mlx5e_priv *priv);
void (*update_stats)(struct mlx5e_priv *priv);
void (*update_carrier)(struct mlx5e_priv *priv);
+ unsigned int (*stats_grps_num)(struct mlx5e_priv *priv);
+ mlx5e_stats_grp_t *stats_grps;
struct {
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
@@ -964,7 +966,6 @@ struct sk_buff *
mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
-void mlx5e_update_stats(struct mlx5e_priv *priv);
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
@@ -1175,11 +1176,11 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv);
void mlx5e_detach_netdev(struct mlx5e_priv *priv);
void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv);
-void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
+void mlx5e_build_nic_params(struct mlx5e_priv *priv,
struct mlx5e_xsk *xsk,
struct mlx5e_rss_params *rss_params,
struct mlx5e_params *params,
- u16 max_channels, u16 mtu);
+ u16 mtu);
void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params);
void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 68d593074f6c..0416f7712109 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -21,6 +21,7 @@ struct mlx5e_tc_table {
DECLARE_HASHTABLE(hairpin_tbl, 8);
struct notifier_block netdevice_nb;
+ struct netdev_net_notifier netdevice_nn;
};
struct mlx5e_flow_table {
@@ -122,6 +123,22 @@ enum {
#endif
};
+#define MLX5E_TTC_NUM_GROUPS 3
+#define MLX5E_TTC_GROUP1_SIZE (BIT(3) + MLX5E_NUM_TUNNEL_TT)
+#define MLX5E_TTC_GROUP2_SIZE BIT(1)
+#define MLX5E_TTC_GROUP3_SIZE BIT(0)
+#define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\
+ MLX5E_TTC_GROUP2_SIZE +\
+ MLX5E_TTC_GROUP3_SIZE)
+
+#define MLX5E_INNER_TTC_NUM_GROUPS 3
+#define MLX5E_INNER_TTC_GROUP1_SIZE BIT(3)
+#define MLX5E_INNER_TTC_GROUP2_SIZE BIT(1)
+#define MLX5E_INNER_TTC_GROUP3_SIZE BIT(0)
+#define MLX5E_INNER_TTC_TABLE_SIZE (MLX5E_INNER_TTC_GROUP1_SIZE +\
+ MLX5E_INNER_TTC_GROUP2_SIZE +\
+ MLX5E_INNER_TTC_GROUP3_SIZE)
+
#ifdef CONFIG_MLX5_EN_RXNFC
struct mlx5e_ethtool_table {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
index 1d6b58860da6..3a975641f902 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
@@ -197,9 +197,10 @@ int mlx5e_health_report(struct mlx5e_priv *priv,
struct devlink_health_reporter *reporter, char *err_str,
struct mlx5e_err_ctx *err_ctx)
{
- if (!reporter) {
- netdev_err(priv->netdev, err_str);
+ netdev_err(priv->netdev, err_str);
+
+ if (!reporter)
return err_ctx->recover(&err_ctx->ctx);
- }
+
return devlink_health_report(reporter, err_str, err_ctx);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index 36ac1e3816b9..d7587f40ecae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -75,12 +75,18 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv)
{
set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
+
+ if (priv->channels.params.xdp_prog)
+ set_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state);
}
static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv)
{
+ if (priv->channels.params.xdp_prog)
+ clear_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state);
+
clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
- /* let other device's napi(s) see our new state */
+ /* Let other device's napi(s) and XSK wakeups see our new state. */
synchronize_rcu();
}
@@ -89,19 +95,9 @@ static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv)
return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
}
-static inline void mlx5e_xdp_set_open(struct mlx5e_priv *priv)
-{
- set_bit(MLX5E_STATE_XDP_OPEN, &priv->state);
-}
-
-static inline void mlx5e_xdp_set_closed(struct mlx5e_priv *priv)
-{
- clear_bit(MLX5E_STATE_XDP_OPEN, &priv->state);
-}
-
-static inline bool mlx5e_xdp_is_open(struct mlx5e_priv *priv)
+static inline bool mlx5e_xdp_is_active(struct mlx5e_priv *priv)
{
- return test_bit(MLX5E_STATE_XDP_OPEN, &priv->state);
+ return test_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state);
}
static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index 475b6bd5d29b..62fc8a128a8d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -35,7 +35,7 @@ int mlx5e_xsk_page_alloc_umem(struct mlx5e_rq *rq,
*/
dma_info->addr = xdp_umem_get_dma(umem, handle);
- xsk_umem_discard_addr_rq(umem);
+ xsk_umem_release_addr_rq(umem);
dma_sync_single_for_device(rq->pdev, dma_info->addr, PAGE_SIZE,
DMA_BIDIRECTIONAL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 631af8dee517..c28cbae42331 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -144,6 +144,7 @@ void mlx5e_close_xsk(struct mlx5e_channel *c)
{
clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
napi_synchronize(&c->napi);
+ synchronize_rcu(); /* Sync with the XSK wakeup. */
mlx5e_close_rq(&c->xskrq);
mlx5e_close_cq(&c->xskrq.cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
index 87827477d38c..fe2d596cb361 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
@@ -14,7 +14,7 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
struct mlx5e_channel *c;
u16 ix;
- if (unlikely(!mlx5e_xdp_is_open(priv)))
+ if (unlikely(!mlx5e_xdp_is_active(priv)))
return -ENETDOWN;
if (unlikely(!mlx5e_qid_get_ch_if_in_group(params, qid, MLX5E_RQ_GROUP_XSK, &ix)))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 778dab1af8fc..f260dd96873b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -180,7 +180,7 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
struct tx_sync_info {
u64 rcd_sn;
- s32 sync_len;
+ u32 sync_len;
int nr_frags;
skb_frag_t frags[MAX_SKB_FRAGS];
};
@@ -193,13 +193,14 @@ enum mlx5e_ktls_sync_retval {
static enum mlx5e_ktls_sync_retval
tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
- u32 tcp_seq, struct tx_sync_info *info)
+ u32 tcp_seq, int datalen, struct tx_sync_info *info)
{
struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
struct tls_record_info *record;
int remaining, i = 0;
unsigned long flags;
+ bool ends_before;
spin_lock_irqsave(&tx_ctx->lock, flags);
record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
@@ -209,9 +210,21 @@ tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
goto out;
}
- if (unlikely(tcp_seq < tls_record_start_seq(record))) {
- ret = tls_record_is_start_marker(record) ?
- MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
+ /* There are the following cases:
+ * 1. packet ends before start marker: bypass offload.
+ * 2. packet starts before start marker and ends after it: drop,
+ * not supported, breaks contract with kernel.
+ * 3. packet ends before tls record info starts: drop,
+ * this packet was already acknowledged and its record info
+ * was released.
+ */
+ ends_before = before(tcp_seq + datalen, tls_record_start_seq(record));
+
+ if (unlikely(tls_record_is_start_marker(record))) {
+ ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
+ goto out;
+ } else if (ends_before) {
+ ret = MLX5E_KTLS_SYNC_FAIL;
goto out;
}
@@ -337,7 +350,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
u8 num_wqebbs;
int i = 0;
- ret = tx_sync_info_get(priv_tx, seq, &info);
+ ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
stats->tls_skip_no_sync_data++;
@@ -351,14 +364,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
goto err_out;
}
- if (unlikely(info.sync_len < 0)) {
- if (likely(datalen <= -info.sync_len))
- return MLX5E_KTLS_SYNC_DONE;
-
- stats->tls_drop_bypass_req++;
- goto err_out;
- }
-
stats->tls_ooo++;
tx_post_resync_params(sq, priv_tx, info.rcd_sn);
@@ -378,8 +383,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
if (unlikely(contig_wqebbs_room < num_wqebbs))
mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
- tx_post_resync_params(sq, priv_tx, info.rcd_sn);
-
for (; i < info.nr_frags; i++) {
unsigned int orig_fsz, frag_offset = 0, n = 0;
skb_frag_t *f = &info.frags[i];
@@ -455,12 +458,18 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
enum mlx5e_ktls_sync_retval ret =
mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
- if (likely(ret == MLX5E_KTLS_SYNC_DONE))
+ switch (ret) {
+ case MLX5E_KTLS_SYNC_DONE:
*wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
- else if (ret == MLX5E_KTLS_SYNC_FAIL)
+ break;
+ case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
+ if (likely(!skb->decrypted))
+ goto out;
+ WARN_ON_ONCE(1);
+ /* fall-through */
+ default: /* MLX5E_KTLS_SYNC_FAIL */
goto err_out;
- else /* ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA */
- goto out;
+ }
}
priv_tx->expected_seq = seq + datalen;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
index 71384ad1a443..ef1ed15a53b4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
@@ -269,7 +269,7 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
int datalen;
u32 skb_seq;
- if (MLX5_CAP_GEN(sq->channel->mdev, tls)) {
+ if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx)) {
skb = mlx5e_ktls_handle_tx_skb(netdev, sq, skb, wqe, pi);
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index c6776f308d5e..d674cb679895 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -218,13 +218,9 @@ static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS];
int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset)
{
- int i, num_stats = 0;
-
switch (sset) {
case ETH_SS_STATS:
- for (i = 0; i < mlx5e_num_stats_grps; i++)
- num_stats += mlx5e_stats_grps[i].get_num_stats(priv);
- return num_stats;
+ return mlx5e_stats_total_num(priv);
case ETH_SS_PRIV_FLAGS:
return MLX5E_NUM_PFLAGS;
case ETH_SS_TEST:
@@ -242,14 +238,6 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
return mlx5e_ethtool_get_sset_count(priv, sset);
}
-static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, u8 *data)
-{
- int i, idx = 0;
-
- for (i = 0; i < mlx5e_num_stats_grps; i++)
- idx = mlx5e_stats_grps[i].fill_strings(priv, data, idx);
-}
-
void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data)
{
int i;
@@ -268,7 +256,7 @@ void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data)
break;
case ETH_SS_STATS:
- mlx5e_fill_stats_strings(priv, data);
+ mlx5e_stats_fill_strings(priv, data);
break;
}
}
@@ -283,14 +271,13 @@ static void mlx5e_get_strings(struct net_device *dev, u32 stringset, u8 *data)
void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
struct ethtool_stats *stats, u64 *data)
{
- int i, idx = 0;
+ int idx = 0;
mutex_lock(&priv->state_lock);
- mlx5e_update_stats(priv);
+ mlx5e_stats_update(priv);
mutex_unlock(&priv->state_lock);
- for (i = 0; i < mlx5e_num_stats_grps; i++)
- idx = mlx5e_stats_grps[i].fill_stats(priv, data, idx);
+ mlx5e_stats_fill(priv, data, idx);
}
static void mlx5e_get_ethtool_stats(struct net_device *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 15b7f0f1427c..73d3dc07331f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -904,22 +904,6 @@ del_rules:
return err;
}
-#define MLX5E_TTC_NUM_GROUPS 3
-#define MLX5E_TTC_GROUP1_SIZE (BIT(3) + MLX5E_NUM_TUNNEL_TT)
-#define MLX5E_TTC_GROUP2_SIZE BIT(1)
-#define MLX5E_TTC_GROUP3_SIZE BIT(0)
-#define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\
- MLX5E_TTC_GROUP2_SIZE +\
- MLX5E_TTC_GROUP3_SIZE)
-
-#define MLX5E_INNER_TTC_NUM_GROUPS 3
-#define MLX5E_INNER_TTC_GROUP1_SIZE BIT(3)
-#define MLX5E_INNER_TTC_GROUP2_SIZE BIT(1)
-#define MLX5E_INNER_TTC_GROUP3_SIZE BIT(0)
-#define MLX5E_INNER_TTC_TABLE_SIZE (MLX5E_INNER_TTC_GROUP1_SIZE +\
- MLX5E_INNER_TTC_GROUP2_SIZE +\
- MLX5E_INNER_TTC_GROUP3_SIZE)
-
static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc,
bool use_ipv)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index acd946f2ddbe..3bc2ac3d53fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -58,6 +58,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
struct ethtool_rx_flow_spec *fs,
int num_tuples)
{
+ struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_ethtool_table *eth_ft;
struct mlx5_flow_namespace *ns;
struct mlx5_flow_table *ft;
@@ -102,9 +103,11 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
flow_table_properties_nic_receive.log_max_ft_size)),
MLX5E_ETHTOOL_NUM_ENTRIES);
- ft = mlx5_create_auto_grouped_flow_table(ns, prio,
- table_size,
- MLX5E_ETHTOOL_NUM_GROUPS, 0, 0);
+
+ ft_attr.prio = prio;
+ ft_attr.max_fte = table_size;
+ ft_attr.autogroup.max_num_groups = MLX5E_ETHTOOL_NUM_GROUPS;
+ ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(ft))
return (void *)ft;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 4980e80a5e85..454d3459bd8b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -159,23 +159,14 @@ static void mlx5e_update_carrier_work(struct work_struct *work)
mutex_unlock(&priv->state_lock);
}
-void mlx5e_update_stats(struct mlx5e_priv *priv)
-{
- int i;
-
- for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
- if (mlx5e_stats_grps[i].update_stats)
- mlx5e_stats_grps[i].update_stats(priv);
-}
-
void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
{
int i;
- for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
- if (mlx5e_stats_grps[i].update_stats_mask &
+ for (i = mlx5e_nic_stats_grps_num(priv) - 1; i >= 0; i--)
+ if (mlx5e_nic_stats_grps[i]->update_stats_mask &
MLX5E_NDO_UPDATE_STATS)
- mlx5e_stats_grps[i].update_stats(priv);
+ mlx5e_nic_stats_grps[i]->update_stats(priv);
}
static void mlx5e_update_stats_work(struct work_struct *work)
@@ -3000,12 +2991,9 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
int mlx5e_open_locked(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- bool is_xdp = priv->channels.params.xdp_prog;
int err;
set_bit(MLX5E_STATE_OPENED, &priv->state);
- if (is_xdp)
- mlx5e_xdp_set_open(priv);
err = mlx5e_open_channels(priv, &priv->channels);
if (err)
@@ -3020,8 +3008,6 @@ int mlx5e_open_locked(struct net_device *netdev)
return 0;
err_clear_state_opened_flag:
- if (is_xdp)
- mlx5e_xdp_set_closed(priv);
clear_bit(MLX5E_STATE_OPENED, &priv->state);
return err;
}
@@ -3053,8 +3039,6 @@ int mlx5e_close_locked(struct net_device *netdev)
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return 0;
- if (priv->channels.params.xdp_prog)
- mlx5e_xdp_set_closed(priv);
clear_bit(MLX5E_STATE_OPENED, &priv->state);
netif_carrier_off(priv->netdev);
@@ -4332,7 +4316,7 @@ unlock:
rtnl_unlock();
}
-static void mlx5e_tx_timeout(struct net_device *dev)
+static void mlx5e_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -4371,16 +4355,6 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
return 0;
}
-static int mlx5e_xdp_update_state(struct mlx5e_priv *priv)
-{
- if (priv->channels.params.xdp_prog)
- mlx5e_xdp_set_open(priv);
- else
- mlx5e_xdp_set_closed(priv);
-
- return 0;
-}
-
static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -4415,7 +4389,7 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
mlx5e_set_rq_type(priv->mdev, &new_channels.params);
old_prog = priv->channels.params.xdp_prog;
- err = mlx5e_safe_switch_channels(priv, &new_channels, mlx5e_xdp_update_state);
+ err = mlx5e_safe_switch_channels(priv, &new_channels, NULL);
if (err)
goto unlock;
} else {
@@ -4756,17 +4730,19 @@ void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
tirc_default_config[tt].rx_hash_fields;
}
-void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
+void mlx5e_build_nic_params(struct mlx5e_priv *priv,
struct mlx5e_xsk *xsk,
struct mlx5e_rss_params *rss_params,
struct mlx5e_params *params,
- u16 max_channels, u16 mtu)
+ u16 mtu)
{
+ struct mlx5_core_dev *mdev = priv->mdev;
u8 rx_cq_period_mode;
params->sw_mtu = mtu;
params->hard_mtu = MLX5E_ETH_HARD_MTU;
- params->num_channels = max_channels;
+ params->num_channels = min_t(unsigned int, MLX5E_MAX_NUM_CHANNELS / 2,
+ priv->max_nch);
params->num_tc = 1;
/* SQ */
@@ -4893,6 +4869,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_GRE)) {
@@ -5003,8 +4981,8 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
if (err)
return err;
- mlx5e_build_nic_params(mdev, &priv->xsk, rss, &priv->channels.params,
- priv->max_nch, netdev->mtu);
+ mlx5e_build_nic_params(priv, &priv->xsk, rss, &priv->channels.params,
+ netdev->mtu);
mlx5e_timestamp_init(priv);
@@ -5166,6 +5144,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
static void mlx5e_nic_disable(struct mlx5e_priv *priv)
{
+ struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
#ifdef CONFIG_MLX5_CORE_EN_DCB
@@ -5186,7 +5165,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
mlx5e_monitor_counter_cleanup(priv);
mlx5e_disable_async_events(priv);
- mlx5_lag_remove(mdev);
+ mlx5_lag_remove(mdev, netdev);
}
int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
@@ -5210,6 +5189,8 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
.max_tc = MLX5E_MAX_NUM_TC,
.rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
+ .stats_grps = mlx5e_nic_stats_grps,
+ .stats_grps_num = mlx5e_nic_stats_grps_num,
};
/* mlx5e generic netdev management API (move to en_common.c) */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index f175cb24bb67..7b48ccacebe2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -41,6 +41,7 @@
#include <net/ipv6_stubs.h>
#include "eswitch.h"
+#include "eswitch_offloads_chains.h"
#include "en.h"
#include "en_rep.h"
#include "en_tc.h"
@@ -116,24 +117,71 @@ static const struct counter_desc vport_rep_stats_desc[] = {
#define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
#define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
-static void mlx5e_rep_get_strings(struct net_device *dev,
- u32 stringset, uint8_t *data)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw_rep)
{
- int i, j;
+ return NUM_VPORT_REP_SW_COUNTERS;
+}
- switch (stringset) {
- case ETH_SS_STATS:
- for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
- strcpy(data + (i * ETH_GSTRING_LEN),
- sw_rep_stats_desc[i].format);
- for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
- strcpy(data + (i * ETH_GSTRING_LEN),
- vport_rep_stats_desc[j].format);
- break;
- }
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw_rep)
+{
+ int i;
+
+ for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ sw_rep_stats_desc[i].format);
+ return idx;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw_rep)
+{
+ int i;
+
+ for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
+ sw_rep_stats_desc, i);
+ return idx;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw_rep)
+{
+ struct mlx5e_sw_stats *s = &priv->stats.sw;
+ struct rtnl_link_stats64 stats64 = {};
+
+ memset(s, 0, sizeof(*s));
+ mlx5e_fold_sw_stats64(priv, &stats64);
+
+ s->rx_packets = stats64.rx_packets;
+ s->rx_bytes = stats64.rx_bytes;
+ s->tx_packets = stats64.tx_packets;
+ s->tx_bytes = stats64.tx_bytes;
+ s->tx_queue_dropped = stats64.tx_dropped;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport_rep)
+{
+ return NUM_VPORT_REP_HW_COUNTERS;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep)
+{
+ int i;
+
+ for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_rep_stats_desc[i].format);
+ return idx;
}
-static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep)
+{
+ int i;
+
+ for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
+ vport_rep_stats_desc, i);
+ return idx;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -156,64 +204,33 @@ static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
vport_stats->tx_bytes = vf_stats.rx_bytes;
}
-static void mlx5e_uplink_rep_update_hw_counters(struct mlx5e_priv *priv)
-{
- struct mlx5e_pport_stats *pstats = &priv->stats.pport;
- struct rtnl_link_stats64 *vport_stats;
-
- mlx5e_grp_802_3_update_stats(priv);
-
- vport_stats = &priv->stats.vf_vport;
-
- vport_stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
- vport_stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
- vport_stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
- vport_stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
-}
-
-static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
+static void mlx5e_rep_get_strings(struct net_device *dev,
+ u32 stringset, uint8_t *data)
{
- struct mlx5e_sw_stats *s = &priv->stats.sw;
- struct rtnl_link_stats64 stats64 = {};
-
- memset(s, 0, sizeof(*s));
- mlx5e_fold_sw_stats64(priv, &stats64);
+ struct mlx5e_priv *priv = netdev_priv(dev);
- s->rx_packets = stats64.rx_packets;
- s->rx_bytes = stats64.rx_bytes;
- s->tx_packets = stats64.tx_packets;
- s->tx_bytes = stats64.tx_bytes;
- s->tx_queue_dropped = stats64.tx_dropped;
+ switch (stringset) {
+ case ETH_SS_STATS:
+ mlx5e_stats_fill_strings(priv, data);
+ break;
+ }
}
static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- int i, j;
-
- if (!data)
- return;
-
- mutex_lock(&priv->state_lock);
- mlx5e_rep_update_sw_counters(priv);
- priv->profile->update_stats(priv);
- mutex_unlock(&priv->state_lock);
- for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
- data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
- sw_rep_stats_desc, i);
-
- for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
- data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
- vport_rep_stats_desc, j);
+ mlx5e_ethtool_get_ethtool_stats(priv, stats, data);
}
static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
switch (sset) {
case ETH_SS_STATS:
- return NUM_VPORT_REP_SW_COUNTERS + NUM_VPORT_REP_HW_COUNTERS;
+ return mlx5e_stats_total_num(priv);
default:
return -EOPNOTSUPP;
}
@@ -1247,8 +1264,7 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
void *cb_priv)
{
- struct flow_cls_offload *f = type_data;
- struct flow_cls_offload cls_flower;
+ struct flow_cls_offload tmp, *f = type_data;
struct mlx5e_priv *priv = cb_priv;
struct mlx5_eswitch *esw;
unsigned long flags;
@@ -1261,16 +1277,30 @@ static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
switch (type) {
case TC_SETUP_CLSFLOWER:
- if (!mlx5_eswitch_prios_supported(esw) || f->common.chain_index)
+ memcpy(&tmp, f, sizeof(*f));
+
+ if (!mlx5_esw_chains_prios_supported(esw) ||
+ tmp.common.chain_index)
return -EOPNOTSUPP;
/* Re-use tc offload path by moving the ft flow to the
* reserved ft chain.
+ *
+ * FT offload can use prio range [0, INT_MAX], so we normalize
+ * it to range [1, mlx5_esw_chains_get_prio_range(esw)]
+ * as with tc, where prio 0 isn't supported.
+ *
+ * We only support chain 0 of FT offload.
*/
- memcpy(&cls_flower, f, sizeof(*f));
- cls_flower.common.chain_index = FDB_FT_CHAIN;
- err = mlx5e_rep_setup_tc_cls_flower(priv, &cls_flower, flags);
- memcpy(&f->stats, &cls_flower.stats, sizeof(f->stats));
+ if (tmp.common.prio >= mlx5_esw_chains_get_prio_range(esw))
+ return -EOPNOTSUPP;
+ if (tmp.common.chain_index != 0)
+ return -EOPNOTSUPP;
+
+ tmp.common.chain_index = mlx5_esw_chains_get_ft_chain(esw);
+ tmp.common.prio++;
+ err = mlx5e_rep_setup_tc_cls_flower(priv, &tmp, flags);
+ memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
return err;
default:
return -EOPNOTSUPP;
@@ -1660,10 +1690,65 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
mlx5e_close_drop_rq(&priv->drop_rq);
}
+static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv)
+{
+ int err = mlx5e_init_rep_rx(priv);
+
+ if (err)
+ return err;
+
+ mlx5e_create_q_counters(priv);
+ return 0;
+}
+
+static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv)
+{
+ mlx5e_destroy_q_counters(priv);
+ mlx5e_cleanup_rep_rx(priv);
+}
+
+static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
+{
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct net_device *netdev;
+ struct mlx5e_priv *priv;
+ int err;
+
+ netdev = rpriv->netdev;
+ priv = netdev_priv(netdev);
+ uplink_priv = &rpriv->uplink_priv;
+
+ mutex_init(&uplink_priv->unready_flows_lock);
+ INIT_LIST_HEAD(&uplink_priv->unready_flows);
+
+ /* init shared tc flow table */
+ err = mlx5e_tc_esw_init(&uplink_priv->tc_ht);
+ if (err)
+ return err;
+
+ mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev);
+
+ /* init indirect block notifications */
+ INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list);
+ uplink_priv->netdevice_nb.notifier_call = mlx5e_nic_rep_netdevice_event;
+ err = register_netdevice_notifier_dev_net(rpriv->netdev,
+ &uplink_priv->netdevice_nb,
+ &uplink_priv->netdevice_nn);
+ if (err) {
+ mlx5_core_err(priv->mdev, "Failed to register netdev notifier\n");
+ goto tc_esw_cleanup;
+ }
+
+ return 0;
+
+tc_esw_cleanup:
+ mlx5e_tc_esw_cleanup(&uplink_priv->tc_ht);
+ return err;
+}
+
static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5_rep_uplink_priv *uplink_priv;
int err;
err = mlx5e_create_tises(priv);
@@ -1673,52 +1758,41 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
}
if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
- uplink_priv = &rpriv->uplink_priv;
-
- mutex_init(&uplink_priv->unready_flows_lock);
- INIT_LIST_HEAD(&uplink_priv->unready_flows);
-
- /* init shared tc flow table */
- err = mlx5e_tc_esw_init(&uplink_priv->tc_ht);
+ err = mlx5e_init_uplink_rep_tx(rpriv);
if (err)
goto destroy_tises;
-
- mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev);
-
- /* init indirect block notifications */
- INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list);
- uplink_priv->netdevice_nb.notifier_call = mlx5e_nic_rep_netdevice_event;
- err = register_netdevice_notifier(&uplink_priv->netdevice_nb);
- if (err) {
- mlx5_core_err(priv->mdev, "Failed to register netdev notifier\n");
- goto tc_esw_cleanup;
- }
}
return 0;
-tc_esw_cleanup:
- mlx5e_tc_esw_cleanup(&uplink_priv->tc_ht);
destroy_tises:
mlx5e_destroy_tises(priv);
return err;
}
+static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
+{
+ struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
+
+ /* clean indirect TC block notifications */
+ unregister_netdevice_notifier_dev_net(rpriv->netdev,
+ &uplink_priv->netdevice_nb,
+ &uplink_priv->netdevice_nn);
+ mlx5e_rep_indr_clean_block_privs(rpriv);
+
+ /* delete shared tc flow table */
+ mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
+ mutex_destroy(&rpriv->uplink_priv.unready_flows_lock);
+}
+
static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
mlx5e_destroy_tises(priv);
- if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
- /* clean indirect TC block notifications */
- unregister_netdevice_notifier(&rpriv->uplink_priv.netdevice_nb);
- mlx5e_rep_indr_clean_block_privs(rpriv);
-
- /* delete shared tc flow table */
- mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
- mutex_destroy(&rpriv->uplink_priv.unready_flows_lock);
- }
+ if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
+ mlx5e_cleanup_uplink_rep_tx(rpriv);
}
static void mlx5e_rep_enable(struct mlx5e_priv *priv)
@@ -1787,6 +1861,7 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
{
+ struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -1795,7 +1870,44 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
#endif
mlx5_notifier_unregister(mdev, &priv->events_nb);
cancel_work_sync(&rpriv->uplink_priv.reoffload_flows_work);
- mlx5_lag_remove(mdev);
+ mlx5_lag_remove(mdev, netdev);
+}
+
+static MLX5E_DEFINE_STATS_GRP(sw_rep, 0);
+static MLX5E_DEFINE_STATS_GRP(vport_rep, MLX5E_NDO_UPDATE_STATS);
+
+/* The stats groups order is opposite to the update_stats() order calls */
+static mlx5e_stats_grp_t mlx5e_rep_stats_grps[] = {
+ &MLX5E_STATS_GRP(sw_rep),
+ &MLX5E_STATS_GRP(vport_rep),
+};
+
+static unsigned int mlx5e_rep_stats_grps_num(struct mlx5e_priv *priv)
+{
+ return ARRAY_SIZE(mlx5e_rep_stats_grps);
+}
+
+/* The stats groups order is opposite to the update_stats() order calls */
+static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = {
+ &MLX5E_STATS_GRP(sw),
+ &MLX5E_STATS_GRP(qcnt),
+ &MLX5E_STATS_GRP(vnic_env),
+ &MLX5E_STATS_GRP(vport),
+ &MLX5E_STATS_GRP(802_3),
+ &MLX5E_STATS_GRP(2863),
+ &MLX5E_STATS_GRP(2819),
+ &MLX5E_STATS_GRP(phy),
+ &MLX5E_STATS_GRP(eth_ext),
+ &MLX5E_STATS_GRP(pcie),
+ &MLX5E_STATS_GRP(per_prio),
+ &MLX5E_STATS_GRP(pme),
+ &MLX5E_STATS_GRP(channels),
+ &MLX5E_STATS_GRP(per_port_buff_congest),
+};
+
+static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv)
+{
+ return ARRAY_SIZE(mlx5e_ul_rep_stats_grps);
}
static const struct mlx5e_profile mlx5e_rep_profile = {
@@ -1807,29 +1919,33 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
.cleanup_tx = mlx5e_cleanup_rep_tx,
.enable = mlx5e_rep_enable,
.update_rx = mlx5e_update_rep_rx,
- .update_stats = mlx5e_rep_update_hw_counters,
+ .update_stats = mlx5e_update_ndo_stats,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
.max_tc = 1,
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
+ .stats_grps = mlx5e_rep_stats_grps,
+ .stats_grps_num = mlx5e_rep_stats_grps_num,
};
static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
.init = mlx5e_init_rep,
.cleanup = mlx5e_cleanup_rep,
- .init_rx = mlx5e_init_rep_rx,
- .cleanup_rx = mlx5e_cleanup_rep_rx,
+ .init_rx = mlx5e_init_ul_rep_rx,
+ .cleanup_rx = mlx5e_cleanup_ul_rep_rx,
.init_tx = mlx5e_init_rep_tx,
.cleanup_tx = mlx5e_cleanup_rep_tx,
.enable = mlx5e_uplink_rep_enable,
.disable = mlx5e_uplink_rep_disable,
.update_rx = mlx5e_update_rep_rx,
- .update_stats = mlx5e_uplink_rep_update_hw_counters,
+ .update_stats = mlx5e_update_ndo_stats,
.update_carrier = mlx5e_update_carrier,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
.max_tc = MLX5E_MAX_NUM_TC,
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
+ .stats_grps = mlx5e_ul_rep_stats_grps,
+ .stats_grps_num = mlx5e_ul_rep_stats_grps_num,
};
static bool
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index 31f83c8adcc9..3f756d51435f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -73,6 +73,7 @@ struct mlx5_rep_uplink_priv {
*/
struct list_head tc_indr_block_priv_list;
struct notifier_block netdevice_nb;
+ struct netdev_net_notifier netdevice_nn;
struct mlx5_tun_entropy tun_entropy;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 9e9960146e5b..1c3ab69cbd96 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -613,13 +613,6 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
wqe_counter = be16_to_cpu(cqe->wqe_counter);
- if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
- netdev_WARN_ONCE(cq->channel->netdev,
- "Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe));
- if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
- queue_work(cq->channel->priv->wq, &sq->recover_work);
- break;
- }
do {
struct mlx5e_sq_wqe_info *wi;
u16 ci;
@@ -629,6 +622,15 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
wi = &sq->db.ico_wqe[ci];
+ if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
+ netdev_WARN_ONCE(cq->channel->netdev,
+ "Bad OP in ICOSQ CQE: 0x%x\n",
+ get_cqe_opcode(cqe));
+ if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
+ queue_work(cq->channel->priv->wq, &sq->recover_work);
+ break;
+ }
+
if (likely(wi->opcode == MLX5_OPCODE_UMR)) {
sqcc += MLX5E_UMR_WQEBBS;
wi->umr.rq->mpwqe.umr_completed++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 9f09253f9f46..30b216d9284c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -35,6 +35,58 @@
#include "en_accel/ipsec.h"
#include "en_accel/tls.h"
+static unsigned int stats_grps_num(struct mlx5e_priv *priv)
+{
+ return !priv->profile->stats_grps_num ? 0 :
+ priv->profile->stats_grps_num(priv);
+}
+
+unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv)
+{
+ mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
+ const unsigned int num_stats_grps = stats_grps_num(priv);
+ unsigned int total = 0;
+ int i;
+
+ for (i = 0; i < num_stats_grps; i++)
+ total += stats_grps[i]->get_num_stats(priv);
+
+ return total;
+}
+
+void mlx5e_stats_update(struct mlx5e_priv *priv)
+{
+ mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
+ const unsigned int num_stats_grps = stats_grps_num(priv);
+ int i;
+
+ for (i = num_stats_grps - 1; i >= 0; i--)
+ if (stats_grps[i]->update_stats)
+ stats_grps[i]->update_stats(priv);
+}
+
+void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx)
+{
+ mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
+ const unsigned int num_stats_grps = stats_grps_num(priv);
+ int i;
+
+ for (i = 0; i < num_stats_grps; i++)
+ idx = stats_grps[i]->fill_stats(priv, data, idx);
+}
+
+void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data)
+{
+ mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
+ const unsigned int num_stats_grps = stats_grps_num(priv);
+ int i, idx = 0;
+
+ for (i = 0; i < num_stats_grps; i++)
+ idx = stats_grps[i]->fill_strings(priv, data, idx);
+}
+
+/* Concrete NIC Stats */
+
static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
@@ -146,12 +198,12 @@ static const struct counter_desc sw_stats_desc[] = {
#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
-static int mlx5e_grp_sw_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw)
{
return NUM_SW_COUNTERS;
}
-static int mlx5e_grp_sw_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)
{
int i;
@@ -160,7 +212,7 @@ static int mlx5e_grp_sw_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
return idx;
}
-static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)
{
int i;
@@ -169,7 +221,7 @@ static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
return idx;
}
-static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
{
struct mlx5e_sw_stats *s = &priv->stats.sw;
int i;
@@ -297,6 +349,9 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req;
#endif
s->tx_cqes += sq_stats->cqes;
+
+ /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
+ barrier();
}
}
}
@@ -312,7 +367,7 @@ static const struct counter_desc drop_rq_stats_desc[] = {
#define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
#define NUM_DROP_RQ_COUNTERS ARRAY_SIZE(drop_rq_stats_desc)
-static int mlx5e_grp_q_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)
{
int num_stats = 0;
@@ -325,7 +380,7 @@ static int mlx5e_grp_q_get_num_stats(struct mlx5e_priv *priv)
return num_stats;
}
-static int mlx5e_grp_q_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)
{
int i;
@@ -340,7 +395,7 @@ static int mlx5e_grp_q_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
return idx;
}
-static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
{
int i;
@@ -353,7 +408,7 @@ static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
return idx;
}
-static void mlx5e_grp_q_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)
{
struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
@@ -388,14 +443,13 @@ static const struct counter_desc vnic_env_stats_dev_oob_desc[] = {
(MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \
ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0)
-static int mlx5e_grp_vnic_env_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)
{
return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) +
NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev);
}
-static int mlx5e_grp_vnic_env_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)
{
int i;
@@ -409,8 +463,7 @@ static int mlx5e_grp_vnic_env_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
-static int mlx5e_grp_vnic_env_fill_stats(struct mlx5e_priv *priv, u64 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
{
int i;
@@ -424,7 +477,7 @@ static int mlx5e_grp_vnic_env_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
-static void mlx5e_grp_vnic_env_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
{
u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
int outlen = MLX5_ST_SZ_BYTES(query_vnic_env_out);
@@ -487,13 +540,12 @@ static const struct counter_desc vport_stats_desc[] = {
#define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
-static int mlx5e_grp_vport_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport)
{
return NUM_VPORT_COUNTERS;
}
-static int mlx5e_grp_vport_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)
{
int i;
@@ -502,8 +554,7 @@ static int mlx5e_grp_vport_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
-static int mlx5e_grp_vport_fill_stats(struct mlx5e_priv *priv, u64 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
{
int i;
@@ -513,7 +564,7 @@ static int mlx5e_grp_vport_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
-static void mlx5e_grp_vport_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)
{
int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
u32 *out = (u32 *)priv->stats.vport.query_vport_out;
@@ -552,13 +603,12 @@ static const struct counter_desc pport_802_3_stats_desc[] = {
#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
-static int mlx5e_grp_802_3_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(802_3)
{
return NUM_PPORT_802_3_COUNTERS;
}
-static int mlx5e_grp_802_3_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(802_3)
{
int i;
@@ -567,8 +617,7 @@ static int mlx5e_grp_802_3_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
-static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3)
{
int i;
@@ -581,7 +630,7 @@ static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data,
#define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
(MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1)
-void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3)
{
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
struct mlx5_core_dev *mdev = priv->mdev;
@@ -609,13 +658,12 @@ static const struct counter_desc pport_2863_stats_desc[] = {
#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
-static int mlx5e_grp_2863_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2863)
{
return NUM_PPORT_2863_COUNTERS;
}
-static int mlx5e_grp_2863_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2863)
{
int i;
@@ -624,8 +672,7 @@ static int mlx5e_grp_2863_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
-static int mlx5e_grp_2863_fill_stats(struct mlx5e_priv *priv, u64 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863)
{
int i;
@@ -635,7 +682,7 @@ static int mlx5e_grp_2863_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
-static void mlx5e_grp_2863_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2863)
{
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
struct mlx5_core_dev *mdev = priv->mdev;
@@ -670,13 +717,12 @@ static const struct counter_desc pport_2819_stats_desc[] = {
#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
-static int mlx5e_grp_2819_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2819)
{
return NUM_PPORT_2819_COUNTERS;
}
-static int mlx5e_grp_2819_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2819)
{
int i;
@@ -685,8 +731,7 @@ static int mlx5e_grp_2819_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
-static int mlx5e_grp_2819_fill_stats(struct mlx5e_priv *priv, u64 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819)
{
int i;
@@ -696,7 +741,7 @@ static int mlx5e_grp_2819_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
-static void mlx5e_grp_2819_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819)
{
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
struct mlx5_core_dev *mdev = priv->mdev;
@@ -734,7 +779,7 @@ pport_phy_statistical_err_lanes_stats_desc[] = {
#define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
-static int mlx5e_grp_phy_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)
{
struct mlx5_core_dev *mdev = priv->mdev;
int num_stats;
@@ -751,8 +796,7 @@ static int mlx5e_grp_phy_get_num_stats(struct mlx5e_priv *priv)
return num_stats;
}
-static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)
{
struct mlx5_core_dev *mdev = priv->mdev;
int i;
@@ -774,7 +818,7 @@ static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
-static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
{
struct mlx5_core_dev *mdev = priv->mdev;
int i;
@@ -800,7 +844,7 @@ static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
return idx;
}
-static void mlx5e_grp_phy_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
{
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
struct mlx5_core_dev *mdev = priv->mdev;
@@ -830,7 +874,7 @@ static const struct counter_desc pport_eth_ext_stats_desc[] = {
#define NUM_PPORT_ETH_EXT_COUNTERS ARRAY_SIZE(pport_eth_ext_stats_desc)
-static int mlx5e_grp_eth_ext_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext)
{
if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
return NUM_PPORT_ETH_EXT_COUNTERS;
@@ -838,8 +882,7 @@ static int mlx5e_grp_eth_ext_get_num_stats(struct mlx5e_priv *priv)
return 0;
}
-static int mlx5e_grp_eth_ext_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)
{
int i;
@@ -850,8 +893,7 @@ static int mlx5e_grp_eth_ext_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
-static int mlx5e_grp_eth_ext_fill_stats(struct mlx5e_priv *priv, u64 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)
{
int i;
@@ -863,7 +905,7 @@ static int mlx5e_grp_eth_ext_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
-static void mlx5e_grp_eth_ext_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)
{
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
struct mlx5_core_dev *mdev = priv->mdev;
@@ -904,7 +946,7 @@ static const struct counter_desc pcie_perf_stall_stats_desc[] = {
#define NUM_PCIE_PERF_COUNTERS64 ARRAY_SIZE(pcie_perf_stats_desc64)
#define NUM_PCIE_PERF_STALL_COUNTERS ARRAY_SIZE(pcie_perf_stall_stats_desc)
-static int mlx5e_grp_pcie_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie)
{
int num_stats = 0;
@@ -920,8 +962,7 @@ static int mlx5e_grp_pcie_get_num_stats(struct mlx5e_priv *priv)
return num_stats;
}
-static int mlx5e_grp_pcie_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)
{
int i;
@@ -942,8 +983,7 @@ static int mlx5e_grp_pcie_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
-static int mlx5e_grp_pcie_fill_stats(struct mlx5e_priv *priv, u64 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)
{
int i;
@@ -967,7 +1007,7 @@ static int mlx5e_grp_pcie_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
-static void mlx5e_grp_pcie_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)
{
struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
struct mlx5_core_dev *mdev = priv->mdev;
@@ -1015,8 +1055,7 @@ static int mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv *priv)
return NUM_PPORT_PER_TC_PRIO_COUNTERS * NUM_PPORT_PRIO;
}
-static int mlx5e_grp_per_port_buffer_congest_fill_strings(struct mlx5e_priv *priv,
- u8 *data, int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)
{
struct mlx5_core_dev *mdev = priv->mdev;
int i, prio;
@@ -1036,8 +1075,7 @@ static int mlx5e_grp_per_port_buffer_congest_fill_strings(struct mlx5e_priv *pri
return idx;
}
-static int mlx5e_grp_per_port_buffer_congest_fill_stats(struct mlx5e_priv *priv,
- u64 *data, int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)
{
struct mlx5e_pport_stats *pport = &priv->stats.pport;
struct mlx5_core_dev *mdev = priv->mdev;
@@ -1112,13 +1150,13 @@ static void mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv *priv)
}
}
-static int mlx5e_grp_per_port_buffer_congest_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest)
{
return mlx5e_grp_per_tc_prio_get_num_stats(priv) +
mlx5e_grp_per_tc_congest_prio_get_num_stats(priv);
}
-static void mlx5e_grp_per_port_buffer_congest_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest)
{
mlx5e_grp_per_tc_prio_update_stats(priv);
mlx5e_grp_per_tc_congest_prio_update_stats(priv);
@@ -1130,6 +1168,7 @@ static void mlx5e_grp_per_port_buffer_congest_update_stats(struct mlx5e_priv *pr
static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
{ "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
{ "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
+ { "rx_prio%d_discards", PPORT_PER_PRIO_OFF(rx_discards) },
{ "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
{ "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
};
@@ -1292,29 +1331,27 @@ static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
return idx;
}
-static int mlx5e_grp_per_prio_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)
{
return mlx5e_grp_per_prio_traffic_get_num_stats() +
mlx5e_grp_per_prio_pfc_get_num_stats(priv);
}
-static int mlx5e_grp_per_prio_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)
{
idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
return idx;
}
-static int mlx5e_grp_per_prio_fill_stats(struct mlx5e_priv *priv, u64 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)
{
idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
return idx;
}
-static void mlx5e_grp_per_prio_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)
{
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
struct mlx5_core_dev *mdev = priv->mdev;
@@ -1349,13 +1386,12 @@ static const struct counter_desc mlx5e_pme_error_desc[] = {
#define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc)
#define NUM_PME_ERR_STATS ARRAY_SIZE(mlx5e_pme_error_desc)
-static int mlx5e_grp_pme_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme)
{
return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
}
-static int mlx5e_grp_pme_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)
{
int i;
@@ -1368,8 +1404,7 @@ static int mlx5e_grp_pme_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
-static int mlx5e_grp_pme_fill_stats(struct mlx5e_priv *priv, u64 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)
{
struct mlx5_pme_stats pme_stats;
int i;
@@ -1387,45 +1422,46 @@ static int mlx5e_grp_pme_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
-static int mlx5e_grp_ipsec_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
+
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec)
{
return mlx5e_ipsec_get_count(priv);
}
-static int mlx5e_grp_ipsec_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec)
{
return idx + mlx5e_ipsec_get_strings(priv,
data + idx * ETH_GSTRING_LEN);
}
-static int mlx5e_grp_ipsec_fill_stats(struct mlx5e_priv *priv, u64 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec)
{
return idx + mlx5e_ipsec_get_stats(priv, data + idx);
}
-static void mlx5e_grp_ipsec_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec)
{
mlx5e_ipsec_update_stats(priv);
}
-static int mlx5e_grp_tls_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
{
return mlx5e_tls_get_count(priv);
}
-static int mlx5e_grp_tls_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)
{
return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
}
-static int mlx5e_grp_tls_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)
{
return idx + mlx5e_tls_get_stats(priv, data + idx);
}
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; }
+
static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
@@ -1559,7 +1595,7 @@ static const struct counter_desc ch_stats_desc[] = {
#define NUM_XSKSQ_STATS ARRAY_SIZE(xsksq_stats_desc)
#define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc)
-static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
{
int max_nch = priv->max_nch;
@@ -1572,8 +1608,7 @@ static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
(NUM_XSKSQ_STATS * max_nch * priv->xsk.ever_used);
}
-static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
{
bool is_xsk = priv->xsk.ever_used;
int max_nch = priv->max_nch;
@@ -1615,8 +1650,7 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
}
-static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
- int idx)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
{
bool is_xsk = priv->xsk.ever_used;
int max_nch = priv->max_nch;
@@ -1664,104 +1698,46 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels) { return; }
+
+MLX5E_DEFINE_STATS_GRP(sw, 0);
+MLX5E_DEFINE_STATS_GRP(qcnt, MLX5E_NDO_UPDATE_STATS);
+MLX5E_DEFINE_STATS_GRP(vnic_env, 0);
+MLX5E_DEFINE_STATS_GRP(vport, MLX5E_NDO_UPDATE_STATS);
+MLX5E_DEFINE_STATS_GRP(802_3, MLX5E_NDO_UPDATE_STATS);
+MLX5E_DEFINE_STATS_GRP(2863, 0);
+MLX5E_DEFINE_STATS_GRP(2819, 0);
+MLX5E_DEFINE_STATS_GRP(phy, 0);
+MLX5E_DEFINE_STATS_GRP(pcie, 0);
+MLX5E_DEFINE_STATS_GRP(per_prio, 0);
+MLX5E_DEFINE_STATS_GRP(pme, 0);
+MLX5E_DEFINE_STATS_GRP(channels, 0);
+MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
+MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
+static MLX5E_DEFINE_STATS_GRP(ipsec, 0);
+static MLX5E_DEFINE_STATS_GRP(tls, 0);
+
/* The stats groups order is opposite to the update_stats() order calls */
-const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
- {
- .get_num_stats = mlx5e_grp_sw_get_num_stats,
- .fill_strings = mlx5e_grp_sw_fill_strings,
- .fill_stats = mlx5e_grp_sw_fill_stats,
- .update_stats = mlx5e_grp_sw_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_q_get_num_stats,
- .fill_strings = mlx5e_grp_q_fill_strings,
- .fill_stats = mlx5e_grp_q_fill_stats,
- .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
- .update_stats = mlx5e_grp_q_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_vnic_env_get_num_stats,
- .fill_strings = mlx5e_grp_vnic_env_fill_strings,
- .fill_stats = mlx5e_grp_vnic_env_fill_stats,
- .update_stats = mlx5e_grp_vnic_env_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_vport_get_num_stats,
- .fill_strings = mlx5e_grp_vport_fill_strings,
- .fill_stats = mlx5e_grp_vport_fill_stats,
- .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
- .update_stats = mlx5e_grp_vport_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_802_3_get_num_stats,
- .fill_strings = mlx5e_grp_802_3_fill_strings,
- .fill_stats = mlx5e_grp_802_3_fill_stats,
- .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
- .update_stats = mlx5e_grp_802_3_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_2863_get_num_stats,
- .fill_strings = mlx5e_grp_2863_fill_strings,
- .fill_stats = mlx5e_grp_2863_fill_stats,
- .update_stats = mlx5e_grp_2863_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_2819_get_num_stats,
- .fill_strings = mlx5e_grp_2819_fill_strings,
- .fill_stats = mlx5e_grp_2819_fill_stats,
- .update_stats = mlx5e_grp_2819_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_phy_get_num_stats,
- .fill_strings = mlx5e_grp_phy_fill_strings,
- .fill_stats = mlx5e_grp_phy_fill_stats,
- .update_stats = mlx5e_grp_phy_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_eth_ext_get_num_stats,
- .fill_strings = mlx5e_grp_eth_ext_fill_strings,
- .fill_stats = mlx5e_grp_eth_ext_fill_stats,
- .update_stats = mlx5e_grp_eth_ext_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_pcie_get_num_stats,
- .fill_strings = mlx5e_grp_pcie_fill_strings,
- .fill_stats = mlx5e_grp_pcie_fill_stats,
- .update_stats = mlx5e_grp_pcie_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_per_prio_get_num_stats,
- .fill_strings = mlx5e_grp_per_prio_fill_strings,
- .fill_stats = mlx5e_grp_per_prio_fill_stats,
- .update_stats = mlx5e_grp_per_prio_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_pme_get_num_stats,
- .fill_strings = mlx5e_grp_pme_fill_strings,
- .fill_stats = mlx5e_grp_pme_fill_stats,
- },
- {
- .get_num_stats = mlx5e_grp_ipsec_get_num_stats,
- .fill_strings = mlx5e_grp_ipsec_fill_strings,
- .fill_stats = mlx5e_grp_ipsec_fill_stats,
- .update_stats = mlx5e_grp_ipsec_update_stats,
- },
- {
- .get_num_stats = mlx5e_grp_tls_get_num_stats,
- .fill_strings = mlx5e_grp_tls_fill_strings,
- .fill_stats = mlx5e_grp_tls_fill_stats,
- },
- {
- .get_num_stats = mlx5e_grp_channels_get_num_stats,
- .fill_strings = mlx5e_grp_channels_fill_strings,
- .fill_stats = mlx5e_grp_channels_fill_stats,
- },
- {
- .get_num_stats = mlx5e_grp_per_port_buffer_congest_get_num_stats,
- .fill_strings = mlx5e_grp_per_port_buffer_congest_fill_strings,
- .fill_stats = mlx5e_grp_per_port_buffer_congest_fill_stats,
- .update_stats = mlx5e_grp_per_port_buffer_congest_update_stats,
- },
+mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
+ &MLX5E_STATS_GRP(sw),
+ &MLX5E_STATS_GRP(qcnt),
+ &MLX5E_STATS_GRP(vnic_env),
+ &MLX5E_STATS_GRP(vport),
+ &MLX5E_STATS_GRP(802_3),
+ &MLX5E_STATS_GRP(2863),
+ &MLX5E_STATS_GRP(2819),
+ &MLX5E_STATS_GRP(phy),
+ &MLX5E_STATS_GRP(eth_ext),
+ &MLX5E_STATS_GRP(pcie),
+ &MLX5E_STATS_GRP(per_prio),
+ &MLX5E_STATS_GRP(pme),
+ &MLX5E_STATS_GRP(ipsec),
+ &MLX5E_STATS_GRP(tls),
+ &MLX5E_STATS_GRP(channels),
+ &MLX5E_STATS_GRP(per_port_buff_congest),
};
-const int mlx5e_num_stats_grps = ARRAY_SIZE(mlx5e_stats_grps);
+unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
+{
+ return ARRAY_SIZE(mlx5e_nic_stats_grps);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 869f3502f631..092b39ffa32a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -29,6 +29,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+
#ifndef __MLX5_EN_STATS_H__
#define __MLX5_EN_STATS_H__
@@ -55,6 +56,56 @@ struct counter_desc {
size_t offset; /* Byte offset */
};
+enum {
+ MLX5E_NDO_UPDATE_STATS = BIT(0x1),
+};
+
+struct mlx5e_priv;
+struct mlx5e_stats_grp {
+ u16 update_stats_mask;
+ int (*get_num_stats)(struct mlx5e_priv *priv);
+ int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx);
+ int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx);
+ void (*update_stats)(struct mlx5e_priv *priv);
+};
+
+typedef const struct mlx5e_stats_grp *const mlx5e_stats_grp_t;
+
+#define MLX5E_STATS_GRP_OP(grp, name) mlx5e_stats_grp_ ## grp ## _ ## name
+
+#define MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(grp) \
+ int MLX5E_STATS_GRP_OP(grp, num_stats)(struct mlx5e_priv *priv)
+
+#define MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(grp) \
+ void MLX5E_STATS_GRP_OP(grp, update_stats)(struct mlx5e_priv *priv)
+
+#define MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(grp) \
+ int MLX5E_STATS_GRP_OP(grp, fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx)
+
+#define MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(grp) \
+ int MLX5E_STATS_GRP_OP(grp, fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx)
+
+#define MLX5E_STATS_GRP(grp) mlx5e_stats_grp_ ## grp
+
+#define MLX5E_DECLARE_STATS_GRP(grp) \
+ const struct mlx5e_stats_grp MLX5E_STATS_GRP(grp)
+
+#define MLX5E_DEFINE_STATS_GRP(grp, mask) \
+MLX5E_DECLARE_STATS_GRP(grp) = { \
+ .get_num_stats = MLX5E_STATS_GRP_OP(grp, num_stats), \
+ .fill_stats = MLX5E_STATS_GRP_OP(grp, fill_stats), \
+ .fill_strings = MLX5E_STATS_GRP_OP(grp, fill_strings), \
+ .update_stats = MLX5E_STATS_GRP_OP(grp, update_stats), \
+ .update_stats_mask = mask, \
+}
+
+unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv);
+void mlx5e_stats_update(struct mlx5e_priv *priv);
+void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx);
+void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data);
+
+/* Concrete NIC Stats */
+
struct mlx5e_sw_stats {
u64 rx_packets;
u64 rx_bytes;
@@ -322,22 +373,22 @@ struct mlx5e_stats {
struct mlx5e_pcie_stats pcie;
};
-enum {
- MLX5E_NDO_UPDATE_STATS = BIT(0x1),
-};
-
-struct mlx5e_priv;
-struct mlx5e_stats_grp {
- u16 update_stats_mask;
- int (*get_num_stats)(struct mlx5e_priv *priv);
- int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx);
- int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx);
- void (*update_stats)(struct mlx5e_priv *priv);
-};
-
-extern const struct mlx5e_stats_grp mlx5e_stats_grps[];
-extern const int mlx5e_num_stats_grps;
+extern mlx5e_stats_grp_t mlx5e_nic_stats_grps[];
+unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv);
-void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv);
+extern MLX5E_DECLARE_STATS_GRP(sw);
+extern MLX5E_DECLARE_STATS_GRP(qcnt);
+extern MLX5E_DECLARE_STATS_GRP(vnic_env);
+extern MLX5E_DECLARE_STATS_GRP(vport);
+extern MLX5E_DECLARE_STATS_GRP(802_3);
+extern MLX5E_DECLARE_STATS_GRP(2863);
+extern MLX5E_DECLARE_STATS_GRP(2819);
+extern MLX5E_DECLARE_STATS_GRP(phy);
+extern MLX5E_DECLARE_STATS_GRP(eth_ext);
+extern MLX5E_DECLARE_STATS_GRP(pcie);
+extern MLX5E_DECLARE_STATS_GRP(per_prio);
+extern MLX5E_DECLARE_STATS_GRP(pme);
+extern MLX5E_DECLARE_STATS_GRP(channels);
+extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest);
#endif /* __MLX5_EN_STATS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 9b32a9c0f497..74091f72c9a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -51,6 +51,7 @@
#include "en_rep.h"
#include "en_tc.h"
#include "eswitch.h"
+#include "eswitch_offloads_chains.h"
#include "fs_core.h"
#include "en/port.h"
#include "en/tc_tun.h"
@@ -592,7 +593,7 @@ static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
- ft_attr->max_fte = MLX5E_NUM_TT;
+ ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_TC_PRIO;
}
@@ -960,7 +961,8 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
mutex_lock(&priv->fs.tc.t_lock);
if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
- int tc_grp_size, tc_tbl_size;
+ struct mlx5_flow_table_attr ft_attr = {};
+ int tc_grp_size, tc_tbl_size, tc_num_grps;
u32 max_flow_counter;
max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
@@ -970,13 +972,15 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
+ tc_num_grps = MLX5E_TC_TABLE_NUM_GROUPS;
+ ft_attr.prio = MLX5E_TC_PRIO;
+ ft_attr.max_fte = tc_tbl_size;
+ ft_attr.level = MLX5E_TC_FT_LEVEL;
+ ft_attr.autogroup.max_num_groups = tc_num_grps;
priv->fs.tc.t =
mlx5_create_auto_grouped_flow_table(priv->fs.ns,
- MLX5E_TC_PRIO,
- tc_tbl_size,
- MLX5E_TC_TABLE_NUM_GROUPS,
- MLX5E_TC_FT_LEVEL, 0);
+ &ft_attr);
if (IS_ERR(priv->fs.tc.t)) {
mutex_unlock(&priv->fs.tc.t_lock);
NL_SET_ERR_MSG_MOD(extack,
@@ -1080,7 +1084,7 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
slow_attr->split_count = 0;
- slow_attr->dest_chain = FDB_TC_SLOW_PATH_CHAIN;
+ slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
if (!IS_ERR(rule))
@@ -1097,7 +1101,7 @@ mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
slow_attr->split_count = 0;
- slow_attr->dest_chain = FDB_TC_SLOW_PATH_CHAIN;
+ slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
flow_flag_clear(flow, SLOW);
}
@@ -1157,19 +1161,18 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- u32 max_chain = mlx5_eswitch_get_chain_range(esw);
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
- u16 max_prio = mlx5_eswitch_get_prio_range(esw);
struct net_device *out_dev, *encap_dev = NULL;
struct mlx5_fc *counter = NULL;
struct mlx5e_rep_priv *rpriv;
struct mlx5e_priv *out_priv;
bool encap_valid = true;
+ u32 max_prio, max_chain;
int err = 0;
int out_index;
- if (!mlx5_eswitch_prios_supported(esw) && attr->prio != 1) {
+ if (!mlx5_esw_chains_prios_supported(esw) && attr->prio != 1) {
NL_SET_ERR_MSG(extack, "E-switch priorities unsupported, upgrade FW");
return -EOPNOTSUPP;
}
@@ -1179,11 +1182,13 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
* FDB_FT_CHAIN which is outside tc range.
* See mlx5e_rep_setup_ft_cb().
*/
+ max_chain = mlx5_esw_chains_get_chain_range(esw);
if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
return -EOPNOTSUPP;
}
+ max_prio = mlx5_esw_chains_get_prio_range(esw);
if (attr->prio > max_prio) {
NL_SET_ERR_MSG(extack, "Requested priority is out of supported range");
return -EOPNOTSUPP;
@@ -1805,6 +1810,40 @@ static void *get_match_headers_value(u32 flags,
outer_headers);
}
+static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
+ struct flow_cls_offload *f)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct net_device *ingress_dev;
+ struct flow_match_meta match;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
+ return 0;
+
+ flow_rule_match_meta(rule, &match);
+ if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
+ return -EINVAL;
+ }
+
+ ingress_dev = __dev_get_by_index(dev_net(filter_dev),
+ match.key->ingress_ifindex);
+ if (!ingress_dev) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't find the ingress port to match on");
+ return -EINVAL;
+ }
+
+ if (ingress_dev != filter_dev) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't match on the ingress filter port");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int __parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct flow_cls_offload *f,
@@ -1825,6 +1864,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
u16 addr_type = 0;
u8 ip_proto = 0;
u8 *match_level;
+ int err;
match_level = outer_match_level;
@@ -1868,6 +1908,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
spec);
}
+ err = mlx5e_flower_parse_meta(filter_dev, f);
+ if (err)
+ return err;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
@@ -2842,6 +2886,10 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
+ case FLOW_ACTION_ACCEPT:
+ action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ break;
case FLOW_ACTION_DROP:
action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
if (MLX5_CAP_FLOWTABLE(priv->mdev,
@@ -2999,6 +3047,25 @@ static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info
return kmemdup(tun_info, tun_size, GFP_KERNEL);
}
+static bool is_duplicated_encap_entry(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ int out_index,
+ struct mlx5e_encap_entry *e,
+ struct netlink_ext_ack *extack)
+{
+ int i;
+
+ for (i = 0; i < out_index; i++) {
+ if (flow->encaps[i].e != e)
+ continue;
+ NL_SET_ERR_MSG_MOD(extack, "can't duplicate encap action");
+ netdev_err(priv->netdev, "can't duplicate encap action\n");
+ return true;
+ }
+
+ return false;
+}
+
static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct net_device *mirred_dev,
@@ -3034,6 +3101,12 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
/* must verify if encap is valid or not */
if (e) {
+ /* Check that entry was not already attached to this flow */
+ if (is_duplicated_encap_entry(priv, flow, out_index, e, extack)) {
+ err = -EOPNOTSUPP;
+ goto out_err;
+ }
+
mutex_unlock(&esw->offloads.encap_tbl_lock);
wait_for_completion(&e->res_ready);
@@ -3220,6 +3293,26 @@ bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
same_hw_devs(priv, netdev_priv(out_dev));
}
+static bool is_duplicated_output_device(struct net_device *dev,
+ struct net_device *out_dev,
+ int *ifindexes, int if_count,
+ struct netlink_ext_ack *extack)
+{
+ int i;
+
+ for (i = 0; i < if_count; i++) {
+ if (ifindexes[i] == out_dev->ifindex) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't duplicate output to same device");
+ netdev_err(dev, "can't duplicate output to same device: %s\n",
+ out_dev->name);
+ return true;
+ }
+ }
+
+ return false;
+}
+
static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
struct mlx5e_tc_flow *flow,
@@ -3231,11 +3324,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
const struct ip_tunnel_info *info = NULL;
+ int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
bool ft_flow = mlx5e_is_ft_flow(flow);
const struct flow_action_entry *act;
+ int err, i, if_count = 0;
bool encap = false;
u32 action = 0;
- int err, i;
if (!flow_action_has_entries(flow_action))
return -EINVAL;
@@ -3312,6 +3406,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
struct net_device *uplink_upper;
+ if (is_duplicated_output_device(priv->netdev,
+ out_dev,
+ ifindexes,
+ if_count,
+ extack))
+ return -EOPNOTSUPP;
+
+ ifindexes[if_count] = out_dev->ifindex;
+ if_count++;
+
rcu_read_lock();
uplink_upper =
netdev_master_upper_dev_get_rcu(uplink_dev);
@@ -3406,7 +3510,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
break;
case FLOW_ACTION_GOTO: {
u32 dest_chain = act->chain_index;
- u32 max_chain = mlx5_eswitch_get_chain_range(esw);
+ u32 max_chain = mlx5_esw_chains_get_chain_range(esw);
if (ft_flow) {
NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
@@ -3980,6 +4084,13 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
u32 rate_mbps;
int err;
+ vport_num = rpriv->rep->vport;
+ if (vport_num >= MLX5_VPORT_ECPF) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Ingress rate limit is supported only for Eswitch ports connected to VFs");
+ return -EOPNOTSUPP;
+ }
+
esw = priv->mdev->priv.eswitch;
/* rate is given in bytes/sec.
* First convert to bits/sec and then round to the nearest mbit/secs.
@@ -3988,8 +4099,6 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
* 1 mbit/sec.
*/
rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
- vport_num = rpriv->rep->vport;
-
err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
if (err)
NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
@@ -4142,7 +4251,10 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
return err;
tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
- if (register_netdevice_notifier(&tc->netdevice_nb)) {
+ err = register_netdevice_notifier_dev_net(priv->netdev,
+ &tc->netdevice_nb,
+ &tc->netdevice_nn);
+ if (err) {
tc->netdevice_nb.notifier_call = NULL;
mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
}
@@ -4164,7 +4276,9 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
struct mlx5e_tc_table *tc = &priv->fs.tc;
if (tc->netdevice_nb.notifier_call)
- unregister_netdevice_notifier(&tc->netdevice_nb);
+ unregister_netdevice_notifier_dev_net(priv->netdev,
+ &tc->netdevice_nb,
+ &tc->netdevice_nn);
mutex_destroy(&tc->mod_hdr.lock);
mutex_destroy(&tc->hairpin_tbl_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 2565ba8692d9..ee60383adc5b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -451,34 +451,17 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
i = 0;
do {
+ struct mlx5e_tx_wqe_info *wi;
u16 wqe_counter;
bool last_wqe;
+ u16 ci;
mlx5_cqwq_pop(&cq->wq);
wqe_counter = be16_to_cpu(cqe->wqe_counter);
- if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
- if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
- &sq->state)) {
- struct mlx5e_tx_wqe_info *wi;
- u16 ci;
-
- ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
- wi = &sq->db.wqe_info[ci];
- mlx5e_dump_error_cqe(sq,
- (struct mlx5_err_cqe *)cqe);
- mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
- queue_work(cq->channel->priv->wq,
- &sq->recover_work);
- }
- stats->cqe_err++;
- }
-
do {
- struct mlx5e_tx_wqe_info *wi;
struct sk_buff *skb;
- u16 ci;
int j;
last_wqe = (sqcc == wqe_counter);
@@ -516,6 +499,18 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
napi_consume_skb(skb, napi_budget);
} while (!last_wqe);
+ if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
+ if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
+ &sq->state)) {
+ mlx5e_dump_error_cqe(sq,
+ (struct mlx5_err_cqe *)cqe);
+ mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
+ queue_work(cq->channel->priv->wq,
+ &sq->recover_work);
+ }
+ stats->cqe_err++;
+ }
+
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
stats->cqes += i;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 580c71cb9dfa..cccea3a8eddd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -156,7 +156,8 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
cq->comp(cq, eqe);
mlx5_cq_put(cq);
} else {
- mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn);
+ dev_dbg_ratelimited(eq->dev->device,
+ "Completion event for bogus CQ 0x%x\n", cqn);
}
++eq->cons_index;
@@ -563,6 +564,39 @@ static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
gather_user_async_events(dev, mask);
}
+static int
+setup_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq_async *eq,
+ struct mlx5_eq_param *param, const char *name)
+{
+ int err;
+
+ eq->irq_nb.notifier_call = mlx5_eq_async_int;
+
+ err = create_async_eq(dev, &eq->core, param);
+ if (err) {
+ mlx5_core_warn(dev, "failed to create %s EQ %d\n", name, err);
+ return err;
+ }
+ err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
+ if (err) {
+ mlx5_core_warn(dev, "failed to enable %s EQ %d\n", name, err);
+ destroy_async_eq(dev, &eq->core);
+ }
+ return err;
+}
+
+static void cleanup_async_eq(struct mlx5_core_dev *dev,
+ struct mlx5_eq_async *eq, const char *name)
+{
+ int err;
+
+ mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
+ err = destroy_async_eq(dev, &eq->core);
+ if (err)
+ mlx5_core_err(dev, "failed to destroy %s eq, err(%d)\n",
+ name, err);
+}
+
static int create_async_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
@@ -572,77 +606,45 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
MLX5_NB_INIT(&table->cq_err_nb, cq_err_event_notifier, CQ_ERROR);
mlx5_eq_notifier_register(dev, &table->cq_err_nb);
- table->cmd_eq.irq_nb.notifier_call = mlx5_eq_async_int;
param = (struct mlx5_eq_param) {
.irq_index = 0,
.nent = MLX5_NUM_CMD_EQE,
+ .mask[0] = 1ull << MLX5_EVENT_TYPE_CMD,
};
-
- param.mask[0] = 1ull << MLX5_EVENT_TYPE_CMD;
- err = create_async_eq(dev, &table->cmd_eq.core, &param);
- if (err) {
- mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
- goto err0;
- }
- err = mlx5_eq_enable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb);
- if (err) {
- mlx5_core_warn(dev, "failed to enable cmd EQ %d\n", err);
+ err = setup_async_eq(dev, &table->cmd_eq, &param, "cmd");
+ if (err)
goto err1;
- }
+
mlx5_cmd_use_events(dev);
- table->async_eq.irq_nb.notifier_call = mlx5_eq_async_int;
param = (struct mlx5_eq_param) {
.irq_index = 0,
.nent = MLX5_NUM_ASYNC_EQE,
};
gather_async_events_mask(dev, param.mask);
- err = create_async_eq(dev, &table->async_eq.core, &param);
- if (err) {
- mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
+ err = setup_async_eq(dev, &table->async_eq, &param, "async");
+ if (err)
goto err2;
- }
- err = mlx5_eq_enable(dev, &table->async_eq.core,
- &table->async_eq.irq_nb);
- if (err) {
- mlx5_core_warn(dev, "failed to enable async EQ %d\n", err);
- goto err3;
- }
- table->pages_eq.irq_nb.notifier_call = mlx5_eq_async_int;
param = (struct mlx5_eq_param) {
.irq_index = 0,
.nent = /* TODO: sriov max_vf + */ 1,
+ .mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST,
};
- param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST;
- err = create_async_eq(dev, &table->pages_eq.core, &param);
- if (err) {
- mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
- goto err4;
- }
- err = mlx5_eq_enable(dev, &table->pages_eq.core,
- &table->pages_eq.irq_nb);
- if (err) {
- mlx5_core_warn(dev, "failed to enable pages EQ %d\n", err);
- goto err5;
- }
+ err = setup_async_eq(dev, &table->pages_eq, &param, "pages");
+ if (err)
+ goto err3;
- return err;
+ return 0;
-err5:
- destroy_async_eq(dev, &table->pages_eq.core);
-err4:
- mlx5_eq_disable(dev, &table->async_eq.core, &table->async_eq.irq_nb);
err3:
- destroy_async_eq(dev, &table->async_eq.core);
+ cleanup_async_eq(dev, &table->async_eq, "async");
err2:
mlx5_cmd_use_polling(dev);
- mlx5_eq_disable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb);
+ cleanup_async_eq(dev, &table->cmd_eq, "cmd");
err1:
- destroy_async_eq(dev, &table->cmd_eq.core);
-err0:
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
return err;
}
@@ -650,28 +652,11 @@ err0:
static void destroy_async_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
- int err;
-
- mlx5_eq_disable(dev, &table->pages_eq.core, &table->pages_eq.irq_nb);
- err = destroy_async_eq(dev, &table->pages_eq.core);
- if (err)
- mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n",
- err);
-
- mlx5_eq_disable(dev, &table->async_eq.core, &table->async_eq.irq_nb);
- err = destroy_async_eq(dev, &table->async_eq.core);
- if (err)
- mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n",
- err);
+ cleanup_async_eq(dev, &table->pages_eq, "pages");
+ cleanup_async_eq(dev, &table->async_eq, "async");
mlx5_cmd_use_polling(dev);
-
- mlx5_eq_disable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb);
- err = destroy_async_eq(dev, &table->cmd_eq.core);
- if (err)
- mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n",
- err);
-
+ cleanup_async_eq(dev, &table->cmd_eq, "cmd");
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 2c965ad0d744..5acf60b1bbfe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -277,6 +277,7 @@ enum {
static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
{
+ struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *fdb;
@@ -289,8 +290,10 @@ static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
}
/* num FTE 2, num FG 2 */
- fdb = mlx5_create_auto_grouped_flow_table(root_ns, LEGACY_VEPA_PRIO,
- 2, 2, 0, 0);
+ ft_attr.prio = LEGACY_VEPA_PRIO;
+ ft_attr.max_fte = 2;
+ ft_attr.autogroup.max_num_groups = 2;
+ fdb = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
if (IS_ERR(fdb)) {
err = PTR_ERR(fdb);
esw_warn(dev, "Failed to create VEPA FDB err %d\n", err);
@@ -1928,8 +1931,10 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
struct mlx5_vport *vport;
int i;
- mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
+ mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
memset(&vport->info, 0, sizeof(vport->info));
+ vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
+ }
}
/* Public E-Switch API */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index ffcff3ba3701..4472710ccc9c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -157,7 +157,7 @@ enum offloads_fdb_flags {
ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED = BIT(0),
};
-extern const unsigned int ESW_POOLS[4];
+struct mlx5_esw_chains_priv;
struct mlx5_eswitch_fdb {
union {
@@ -182,14 +182,7 @@ struct mlx5_eswitch_fdb {
struct mlx5_flow_handle *miss_rule_multi;
int vlan_push_pop_refcount;
- struct {
- struct mlx5_flow_table *fdb;
- u32 num_rules;
- } fdb_prio[FDB_NUM_CHAINS][FDB_TC_MAX_PRIO + 1][FDB_TC_LEVELS_PER_PRIO];
- /* Protects fdb_prio table */
- struct mutex fdb_prio_lock;
-
- int fdb_left[ARRAY_SIZE(ESW_POOLS)];
+ struct mlx5_esw_chains_priv *esw_chains_priv;
} offloads;
};
u32 flags;
@@ -355,15 +348,6 @@ mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule,
struct mlx5_esw_flow_attr *attr);
-bool
-mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw);
-
-u16
-mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw);
-
-u32
-mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw);
-
struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
struct mlx5_flow_destination *dest);
@@ -388,6 +372,11 @@ enum {
MLX5_ESW_DEST_ENCAP_VALID = BIT(1),
};
+enum {
+ MLX5_ESW_ATTR_FLAG_VLAN_HANDLED = BIT(0),
+ MLX5_ESW_ATTR_FLAG_SLOW_PATH = BIT(1),
+};
+
struct mlx5_esw_flow_attr {
struct mlx5_eswitch_rep *in_rep;
struct mlx5_core_dev *in_mdev;
@@ -401,7 +390,6 @@ struct mlx5_esw_flow_attr {
u16 vlan_vid[MLX5_FS_VLAN_DEPTH];
u8 vlan_prio[MLX5_FS_VLAN_DEPTH];
u8 total_vlan;
- bool vlan_handled;
struct {
u32 flags;
struct mlx5_eswitch_rep *rep;
@@ -416,6 +404,7 @@ struct mlx5_esw_flow_attr {
u32 chain;
u16 prio;
u32 dest_chain;
+ u32 flags;
struct mlx5e_tc_flow_parse_attr *parse_attr;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 243a5440867e..979f13bdc203 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -37,6 +37,7 @@
#include <linux/mlx5/fs.h>
#include "mlx5_core.h"
#include "eswitch.h"
+#include "eswitch_offloads_chains.h"
#include "rdma.h"
#include "en.h"
#include "fs_core.h"
@@ -47,10 +48,6 @@
* one for multicast.
*/
#define MLX5_ESW_MISS_FLOWS (2)
-
-#define fdb_prio_table(esw, chain, prio, level) \
- (esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)]
-
#define UPLINK_REP_INDEX 0
static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
@@ -62,32 +59,6 @@ static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
return &esw->offloads.vport_reps[idx];
}
-static struct mlx5_flow_table *
-esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
-static void
-esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
-
-bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw)
-{
- return (!!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED));
-}
-
-u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw)
-{
- if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
- return FDB_TC_MAX_CHAIN;
-
- return 0;
-}
-
-u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw)
-{
- if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
- return FDB_TC_MAX_PRIO;
-
- return 1;
-}
-
static bool
esw_check_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw,
const struct mlx5_vport *vport)
@@ -175,10 +146,17 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
}
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
- if (attr->dest_chain) {
- struct mlx5_flow_table *ft;
+ struct mlx5_flow_table *ft;
- ft = esw_get_prio_table(esw, attr->dest_chain, 1, 0);
+ if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
+ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[i].ft = mlx5_esw_chains_get_tc_end_ft(esw);
+ i++;
+ } else if (attr->dest_chain) {
+ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ ft = mlx5_esw_chains_get_table(esw, attr->dest_chain,
+ 1, 0);
if (IS_ERR(ft)) {
rule = ERR_CAST(ft);
goto err_create_goto_table;
@@ -223,7 +201,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
flow_act.modify_hdr = attr->modify_hdr;
- fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!split);
+ fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio,
+ !!split);
if (IS_ERR(fdb)) {
rule = ERR_CAST(fdb);
goto err_esw_get;
@@ -242,10 +221,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
return rule;
err_add_rule:
- esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
+ mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, !!split);
err_esw_get:
- if (attr->dest_chain)
- esw_put_prio_table(esw, attr->dest_chain, 1, 0);
+ if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain)
+ mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
err_create_goto_table:
return rule;
}
@@ -262,13 +241,13 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule;
int i;
- fast_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 0);
+ fast_fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, 0);
if (IS_ERR(fast_fdb)) {
rule = ERR_CAST(fast_fdb);
goto err_get_fast;
}
- fwd_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 1);
+ fwd_fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, 1);
if (IS_ERR(fwd_fdb)) {
rule = ERR_CAST(fwd_fdb);
goto err_get_fwd;
@@ -296,6 +275,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
if (attr->outer_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
if (IS_ERR(rule))
@@ -305,9 +285,9 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
return rule;
add_err:
- esw_put_prio_table(esw, attr->chain, attr->prio, 1);
+ mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 1);
err_get_fwd:
- esw_put_prio_table(esw, attr->chain, attr->prio, 0);
+ mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
err_get_fast:
return rule;
}
@@ -332,12 +312,13 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
atomic64_dec(&esw->offloads.num_flows);
if (fwd_rule) {
- esw_put_prio_table(esw, attr->chain, attr->prio, 1);
- esw_put_prio_table(esw, attr->chain, attr->prio, 0);
+ mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 1);
+ mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
} else {
- esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
+ mlx5_esw_chains_put_table(esw, attr->chain, attr->prio,
+ !!split);
if (attr->dest_chain)
- esw_put_prio_table(esw, attr->dest_chain, 1, 0);
+ mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
}
}
@@ -451,7 +432,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
if (err)
goto unlock;
- attr->vlan_handled = false;
+ attr->flags &= ~MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
vport = esw_vlan_action_get_vport(attr, push, pop);
@@ -459,7 +440,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
/* tracks VF --> wire rules without vlan push action */
if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
vport->vlan_refcount++;
- attr->vlan_handled = true;
+ attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
}
goto unlock;
@@ -490,7 +471,7 @@ skip_set_push:
}
out:
if (!err)
- attr->vlan_handled = true;
+ attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
unlock:
mutex_unlock(&esw->state_lock);
return err;
@@ -508,7 +489,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
return 0;
- if (!attr->vlan_handled)
+ if (!(attr->flags & MLX5_ESW_ATTR_FLAG_VLAN_HANDLED))
return 0;
push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
@@ -582,8 +563,8 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport,
dest.vport.num = vport;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
- &flow_act, &dest, 1);
+ flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule))
esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
out:
@@ -824,8 +805,8 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
dest.vport.num = esw->manager_vport;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
- &flow_act, &dest, 1);
+ flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
@@ -839,8 +820,8 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
outer_headers.dmac_47_16);
dmac_v[0] = 0x01;
- flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
- &flow_act, &dest, 1);
+ flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
@@ -855,174 +836,6 @@ out:
return err;
}
-#define ESW_OFFLOADS_NUM_GROUPS 4
-
-/* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
- * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
- * for each flow table pool. We can allocate up to 16M of each pool,
- * and we keep track of how much we used via put/get_sz_to_pool.
- * Firmware doesn't report any of this for now.
- * ESW_POOL is expected to be sorted from large to small
- */
-#define ESW_SIZE (16 * 1024 * 1024)
-const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024,
- 64 * 1024, 4 * 1024 };
-
-static int
-get_sz_from_pool(struct mlx5_eswitch *esw)
-{
- int sz = 0, i;
-
- for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
- if (esw->fdb_table.offloads.fdb_left[i]) {
- --esw->fdb_table.offloads.fdb_left[i];
- sz = ESW_POOLS[i];
- break;
- }
- }
-
- return sz;
-}
-
-static void
-put_sz_to_pool(struct mlx5_eswitch *esw, int sz)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
- if (sz >= ESW_POOLS[i]) {
- ++esw->fdb_table.offloads.fdb_left[i];
- break;
- }
- }
-}
-
-static struct mlx5_flow_table *
-create_next_size_table(struct mlx5_eswitch *esw,
- struct mlx5_flow_namespace *ns,
- u16 table_prio,
- int level,
- u32 flags)
-{
- struct mlx5_flow_table *fdb;
- int sz;
-
- sz = get_sz_from_pool(esw);
- if (!sz)
- return ERR_PTR(-ENOSPC);
-
- fdb = mlx5_create_auto_grouped_flow_table(ns,
- table_prio,
- sz,
- ESW_OFFLOADS_NUM_GROUPS,
- level,
- flags);
- if (IS_ERR(fdb)) {
- esw_warn(esw->dev, "Failed to create FDB Table err %d (table prio: %d, level: %d, size: %d)\n",
- (int)PTR_ERR(fdb), table_prio, level, sz);
- put_sz_to_pool(esw, sz);
- }
-
- return fdb;
-}
-
-static struct mlx5_flow_table *
-esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
-{
- struct mlx5_core_dev *dev = esw->dev;
- struct mlx5_flow_table *fdb = NULL;
- struct mlx5_flow_namespace *ns;
- int table_prio, l = 0;
- u32 flags = 0;
-
- if (chain == FDB_TC_SLOW_PATH_CHAIN)
- return esw->fdb_table.offloads.slow_fdb;
-
- mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
-
- fdb = fdb_prio_table(esw, chain, prio, level).fdb;
- if (fdb) {
- /* take ref on earlier levels as well */
- while (level >= 0)
- fdb_prio_table(esw, chain, prio, level--).num_rules++;
- mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
- return fdb;
- }
-
- ns = mlx5_get_fdb_sub_ns(dev, chain);
- if (!ns) {
- esw_warn(dev, "Failed to get FDB sub namespace\n");
- mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
- return ERR_PTR(-EOPNOTSUPP);
- }
-
- if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
- flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
- MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
-
- table_prio = prio - 1;
-
- /* create earlier levels for correct fs_core lookup when
- * connecting tables
- */
- for (l = 0; l <= level; l++) {
- if (fdb_prio_table(esw, chain, prio, l).fdb) {
- fdb_prio_table(esw, chain, prio, l).num_rules++;
- continue;
- }
-
- fdb = create_next_size_table(esw, ns, table_prio, l, flags);
- if (IS_ERR(fdb)) {
- l--;
- goto err_create_fdb;
- }
-
- fdb_prio_table(esw, chain, prio, l).fdb = fdb;
- fdb_prio_table(esw, chain, prio, l).num_rules = 1;
- }
-
- mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
- return fdb;
-
-err_create_fdb:
- mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
- if (l >= 0)
- esw_put_prio_table(esw, chain, prio, l);
-
- return fdb;
-}
-
-static void
-esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
-{
- int l;
-
- if (chain == FDB_TC_SLOW_PATH_CHAIN)
- return;
-
- mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
-
- for (l = level; l >= 0; l--) {
- if (--(fdb_prio_table(esw, chain, prio, l).num_rules) > 0)
- continue;
-
- put_sz_to_pool(esw, fdb_prio_table(esw, chain, prio, l).fdb->max_fte);
- mlx5_destroy_flow_table(fdb_prio_table(esw, chain, prio, l).fdb);
- fdb_prio_table(esw, chain, prio, l).fdb = NULL;
- }
-
- mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
-}
-
-static void esw_destroy_offloads_fast_fdb_tables(struct mlx5_eswitch *esw)
-{
- /* If lazy creation isn't supported, deref the fast path tables */
- if (!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)) {
- esw_put_prio_table(esw, 0, 1, 1);
- esw_put_prio_table(esw, 0, 1, 0);
- }
-}
-
#define MAX_PF_SQ 256
#define MAX_SQ_NVPORTS 32
@@ -1055,16 +868,16 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev;
- u32 *flow_group_in, max_flow_counter;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *fdb = NULL;
- int table_size, ix, err = 0, i;
+ u32 flags = 0, *flow_group_in;
+ int table_size, ix, err = 0;
struct mlx5_flow_group *g;
- u32 flags = 0, fdb_max;
void *match_criteria;
u8 *dmac;
esw_debug(esw->dev, "Create offloads FDB Tables\n");
+
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
@@ -1083,19 +896,6 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
goto ns_err;
}
- max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
- MLX5_CAP_GEN(dev, max_flow_counter_15_0);
- fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
-
- esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(%d))\n",
- MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
- max_flow_counter, ESW_OFFLOADS_NUM_GROUPS,
- fdb_max);
-
- for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++)
- esw->fdb_table.offloads.fdb_left[i] =
- ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
-
table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ +
MLX5_ESW_MISS_FLOWS + esw->total_vports;
@@ -1118,16 +918,10 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
}
esw->fdb_table.offloads.slow_fdb = fdb;
- /* If lazy creation isn't supported, open the fast path tables now */
- if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) &&
- esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
- esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
- esw_warn(dev, "Lazy creation of flow tables isn't supported, ignoring priorities\n");
- esw_get_prio_table(esw, 0, 1, 0);
- esw_get_prio_table(esw, 0, 1, 1);
- } else {
- esw_debug(dev, "Lazy creation of flow tables supported, deferring table opening\n");
- esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
+ err = mlx5_esw_chains_create(esw);
+ if (err) {
+ esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
+ goto fdb_chains_err;
}
/* create send-to-vport group */
@@ -1218,7 +1012,8 @@ miss_err:
peer_miss_err:
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
send_vport_err:
- esw_destroy_offloads_fast_fdb_tables(esw);
+ mlx5_esw_chains_destroy(esw);
+fdb_chains_err:
mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
slow_fdb_err:
/* Holds true only as long as DMFS is the default */
@@ -1240,8 +1035,8 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
+ mlx5_esw_chains_destroy(esw);
mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
- esw_destroy_offloads_fast_fdb_tables(esw);
/* Holds true only as long as DMFS is the default */
mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
MLX5_FLOW_STEERING_MODE_DMFS);
@@ -1377,7 +1172,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
return -EINVAL;
}
- mlx5_eswitch_disable(esw, false);
+ mlx5_eswitch_disable(esw, true);
mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs);
err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
if (err) {
@@ -2111,7 +1906,6 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev);
memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
- mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
err = esw_create_uplink_offloads_acl_tables(esw);
if (err)
@@ -2220,7 +2014,8 @@ int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type
int esw_offloads_enable(struct mlx5_eswitch *esw)
{
- int err;
+ struct mlx5_vport *vport;
+ int err, i;
if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
@@ -2237,6 +2032,10 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
if (err)
goto err_vport_metadata;
+ /* Representor will control the vport link state */
+ mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
+ vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
+
err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
if (err)
goto err_vports;
@@ -2266,7 +2065,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
{
int err, err1;
- mlx5_eswitch_disable(esw, false);
+ mlx5_eswitch_disable(esw, true);
err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c
new file mode 100644
index 000000000000..c5a446e295aa
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c
@@ -0,0 +1,758 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2020 Mellanox Technologies.
+
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/mlx5_ifc.h>
+#include <linux/mlx5/fs.h>
+
+#include "eswitch_offloads_chains.h"
+#include "mlx5_core.h"
+#include "fs_core.h"
+#include "eswitch.h"
+#include "en.h"
+
+#define esw_chains_priv(esw) ((esw)->fdb_table.offloads.esw_chains_priv)
+#define esw_chains_lock(esw) (esw_chains_priv(esw)->lock)
+#define esw_chains_ht(esw) (esw_chains_priv(esw)->chains_ht)
+#define esw_prios_ht(esw) (esw_chains_priv(esw)->prios_ht)
+#define fdb_pool_left(esw) (esw_chains_priv(esw)->fdb_left)
+#define tc_slow_fdb(esw) ((esw)->fdb_table.offloads.slow_fdb)
+#define tc_end_fdb(esw) (esw_chains_priv(esw)->tc_end_fdb)
+#define fdb_ignore_flow_level_supported(esw) \
+ (MLX5_CAP_ESW_FLOWTABLE_FDB((esw)->dev, ignore_flow_level))
+
+#define ESW_OFFLOADS_NUM_GROUPS 4
+
+/* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
+ * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
+ * for each flow table pool. We can allocate up to 16M of each pool,
+ * and we keep track of how much we used via get_next_avail_sz_from_pool.
+ * Firmware doesn't report any of this for now.
+ * ESW_POOL is expected to be sorted from large to small and match firmware
+ * pools.
+ */
+#define ESW_SIZE (16 * 1024 * 1024)
+static const unsigned int ESW_POOLS[] = { 4 * 1024 * 1024,
+ 1 * 1024 * 1024,
+ 64 * 1024,
+ 4 * 1024, };
+
+struct mlx5_esw_chains_priv {
+ struct rhashtable chains_ht;
+ struct rhashtable prios_ht;
+ /* Protects above chains_ht and prios_ht */
+ struct mutex lock;
+
+ struct mlx5_flow_table *tc_end_fdb;
+
+ int fdb_left[ARRAY_SIZE(ESW_POOLS)];
+};
+
+struct fdb_chain {
+ struct rhash_head node;
+
+ u32 chain;
+
+ int ref;
+
+ struct mlx5_eswitch *esw;
+ struct list_head prios_list;
+};
+
+struct fdb_prio_key {
+ u32 chain;
+ u32 prio;
+ u32 level;
+};
+
+struct fdb_prio {
+ struct rhash_head node;
+ struct list_head list;
+
+ struct fdb_prio_key key;
+
+ int ref;
+
+ struct fdb_chain *fdb_chain;
+ struct mlx5_flow_table *fdb;
+ struct mlx5_flow_table *next_fdb;
+ struct mlx5_flow_group *miss_group;
+ struct mlx5_flow_handle *miss_rule;
+};
+
+static const struct rhashtable_params chain_params = {
+ .head_offset = offsetof(struct fdb_chain, node),
+ .key_offset = offsetof(struct fdb_chain, chain),
+ .key_len = sizeof_field(struct fdb_chain, chain),
+ .automatic_shrinking = true,
+};
+
+static const struct rhashtable_params prio_params = {
+ .head_offset = offsetof(struct fdb_prio, node),
+ .key_offset = offsetof(struct fdb_prio, key),
+ .key_len = sizeof_field(struct fdb_prio, key),
+ .automatic_shrinking = true,
+};
+
+bool mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw)
+{
+ return esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
+}
+
+u32 mlx5_esw_chains_get_chain_range(struct mlx5_eswitch *esw)
+{
+ if (!mlx5_esw_chains_prios_supported(esw))
+ return 1;
+
+ if (fdb_ignore_flow_level_supported(esw))
+ return UINT_MAX - 1;
+
+ return FDB_TC_MAX_CHAIN;
+}
+
+u32 mlx5_esw_chains_get_ft_chain(struct mlx5_eswitch *esw)
+{
+ return mlx5_esw_chains_get_chain_range(esw) + 1;
+}
+
+u32 mlx5_esw_chains_get_prio_range(struct mlx5_eswitch *esw)
+{
+ if (!mlx5_esw_chains_prios_supported(esw))
+ return 1;
+
+ if (fdb_ignore_flow_level_supported(esw))
+ return UINT_MAX;
+
+ return FDB_TC_MAX_PRIO;
+}
+
+static unsigned int mlx5_esw_chains_get_level_range(struct mlx5_eswitch *esw)
+{
+ if (fdb_ignore_flow_level_supported(esw))
+ return UINT_MAX;
+
+ return FDB_TC_LEVELS_PER_PRIO;
+}
+
+#define POOL_NEXT_SIZE 0
+static int
+mlx5_esw_chains_get_avail_sz_from_pool(struct mlx5_eswitch *esw,
+ int desired_size)
+{
+ int i, found_i = -1;
+
+ for (i = ARRAY_SIZE(ESW_POOLS) - 1; i >= 0; i--) {
+ if (fdb_pool_left(esw)[i] && ESW_POOLS[i] > desired_size) {
+ found_i = i;
+ if (desired_size != POOL_NEXT_SIZE)
+ break;
+ }
+ }
+
+ if (found_i != -1) {
+ --fdb_pool_left(esw)[found_i];
+ return ESW_POOLS[found_i];
+ }
+
+ return 0;
+}
+
+static void
+mlx5_esw_chains_put_sz_to_pool(struct mlx5_eswitch *esw, int sz)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(ESW_POOLS) - 1; i >= 0; i--) {
+ if (sz == ESW_POOLS[i]) {
+ ++fdb_pool_left(esw)[i];
+ return;
+ }
+ }
+
+ WARN_ONCE(1, "Couldn't find size %d in fdb size pool", sz);
+}
+
+static void
+mlx5_esw_chains_init_sz_pool(struct mlx5_eswitch *esw)
+{
+ u32 fdb_max;
+ int i;
+
+ fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, log_max_ft_size);
+
+ for (i = ARRAY_SIZE(ESW_POOLS) - 1; i >= 0; i--)
+ fdb_pool_left(esw)[i] =
+ ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
+}
+
+static struct mlx5_flow_table *
+mlx5_esw_chains_create_fdb_table(struct mlx5_eswitch *esw,
+ u32 chain, u32 prio, u32 level)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *fdb;
+ int sz;
+
+ if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
+ ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
+ MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
+
+ sz = mlx5_esw_chains_get_avail_sz_from_pool(esw, POOL_NEXT_SIZE);
+ if (!sz)
+ return ERR_PTR(-ENOSPC);
+ ft_attr.max_fte = sz;
+
+ /* We use tc_slow_fdb(esw) as the table's next_ft till
+ * ignore_flow_level is allowed on FT creation and not just for FTEs.
+ * Instead caller should add an explicit miss rule if needed.
+ */
+ ft_attr.next_ft = tc_slow_fdb(esw);
+
+ /* The root table(chain 0, prio 1, level 0) is required to be
+ * connected to the previous prio (FDB_BYPASS_PATH if exists).
+ * We always create it, as a managed table, in order to align with
+ * fs_core logic.
+ */
+ if (!fdb_ignore_flow_level_supported(esw) ||
+ (chain == 0 && prio == 1 && level == 0)) {
+ ft_attr.level = level;
+ ft_attr.prio = prio - 1;
+ ns = mlx5_get_fdb_sub_ns(esw->dev, chain);
+ } else {
+ ft_attr.flags |= MLX5_FLOW_TABLE_UNMANAGED;
+ ft_attr.prio = FDB_TC_OFFLOAD;
+ /* Firmware doesn't allow us to create another level 0 table,
+ * so we create all unmanaged tables as level 1.
+ *
+ * To connect them, we use explicit miss rules with
+ * ignore_flow_level. Caller is responsible to create
+ * these rules (if needed).
+ */
+ ft_attr.level = 1;
+ ns = mlx5_get_flow_namespace(esw->dev, MLX5_FLOW_NAMESPACE_FDB);
+ }
+
+ ft_attr.autogroup.num_reserved_entries = 2;
+ ft_attr.autogroup.max_num_groups = ESW_OFFLOADS_NUM_GROUPS;
+ fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ if (IS_ERR(fdb)) {
+ esw_warn(esw->dev,
+ "Failed to create FDB table err %d (chain: %d, prio: %d, level: %d, size: %d)\n",
+ (int)PTR_ERR(fdb), chain, prio, level, sz);
+ mlx5_esw_chains_put_sz_to_pool(esw, sz);
+ return fdb;
+ }
+
+ return fdb;
+}
+
+static void
+mlx5_esw_chains_destroy_fdb_table(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *fdb)
+{
+ mlx5_esw_chains_put_sz_to_pool(esw, fdb->max_fte);
+ mlx5_destroy_flow_table(fdb);
+}
+
+static struct fdb_chain *
+mlx5_esw_chains_create_fdb_chain(struct mlx5_eswitch *esw, u32 chain)
+{
+ struct fdb_chain *fdb_chain = NULL;
+ int err;
+
+ fdb_chain = kvzalloc(sizeof(*fdb_chain), GFP_KERNEL);
+ if (!fdb_chain)
+ return ERR_PTR(-ENOMEM);
+
+ fdb_chain->esw = esw;
+ fdb_chain->chain = chain;
+ INIT_LIST_HEAD(&fdb_chain->prios_list);
+
+ err = rhashtable_insert_fast(&esw_chains_ht(esw), &fdb_chain->node,
+ chain_params);
+ if (err)
+ goto err_insert;
+
+ return fdb_chain;
+
+err_insert:
+ kvfree(fdb_chain);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_esw_chains_destroy_fdb_chain(struct fdb_chain *fdb_chain)
+{
+ struct mlx5_eswitch *esw = fdb_chain->esw;
+
+ rhashtable_remove_fast(&esw_chains_ht(esw), &fdb_chain->node,
+ chain_params);
+ kvfree(fdb_chain);
+}
+
+static struct fdb_chain *
+mlx5_esw_chains_get_fdb_chain(struct mlx5_eswitch *esw, u32 chain)
+{
+ struct fdb_chain *fdb_chain;
+
+ fdb_chain = rhashtable_lookup_fast(&esw_chains_ht(esw), &chain,
+ chain_params);
+ if (!fdb_chain) {
+ fdb_chain = mlx5_esw_chains_create_fdb_chain(esw, chain);
+ if (IS_ERR(fdb_chain))
+ return fdb_chain;
+ }
+
+ fdb_chain->ref++;
+
+ return fdb_chain;
+}
+
+static struct mlx5_flow_handle *
+mlx5_esw_chains_add_miss_rule(struct mlx5_flow_table *fdb,
+ struct mlx5_flow_table *next_fdb)
+{
+ static const struct mlx5_flow_spec spec = {};
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act act = {};
+
+ act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL | FLOW_ACT_NO_APPEND;
+ act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = next_fdb;
+
+ return mlx5_add_flow_rules(fdb, &spec, &act, &dest, 1);
+}
+
+static int
+mlx5_esw_chains_update_prio_prevs(struct fdb_prio *fdb_prio,
+ struct mlx5_flow_table *next_fdb)
+{
+ struct mlx5_flow_handle *miss_rules[FDB_TC_LEVELS_PER_PRIO + 1] = {};
+ struct fdb_chain *fdb_chain = fdb_prio->fdb_chain;
+ struct fdb_prio *pos;
+ int n = 0, err;
+
+ if (fdb_prio->key.level)
+ return 0;
+
+ /* Iterate in reverse order until reaching the level 0 rule of
+ * the previous priority, adding all the miss rules first, so we can
+ * revert them if any of them fails.
+ */
+ pos = fdb_prio;
+ list_for_each_entry_continue_reverse(pos,
+ &fdb_chain->prios_list,
+ list) {
+ miss_rules[n] = mlx5_esw_chains_add_miss_rule(pos->fdb,
+ next_fdb);
+ if (IS_ERR(miss_rules[n])) {
+ err = PTR_ERR(miss_rules[n]);
+ goto err_prev_rule;
+ }
+
+ n++;
+ if (!pos->key.level)
+ break;
+ }
+
+ /* Success, delete old miss rules, and update the pointers. */
+ n = 0;
+ pos = fdb_prio;
+ list_for_each_entry_continue_reverse(pos,
+ &fdb_chain->prios_list,
+ list) {
+ mlx5_del_flow_rules(pos->miss_rule);
+
+ pos->miss_rule = miss_rules[n];
+ pos->next_fdb = next_fdb;
+
+ n++;
+ if (!pos->key.level)
+ break;
+ }
+
+ return 0;
+
+err_prev_rule:
+ while (--n >= 0)
+ mlx5_del_flow_rules(miss_rules[n]);
+
+ return err;
+}
+
+static void
+mlx5_esw_chains_put_fdb_chain(struct fdb_chain *fdb_chain)
+{
+ if (--fdb_chain->ref == 0)
+ mlx5_esw_chains_destroy_fdb_chain(fdb_chain);
+}
+
+static struct fdb_prio *
+mlx5_esw_chains_create_fdb_prio(struct mlx5_eswitch *esw,
+ u32 chain, u32 prio, u32 level)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_handle *miss_rule = NULL;
+ struct mlx5_flow_group *miss_group;
+ struct fdb_prio *fdb_prio = NULL;
+ struct mlx5_flow_table *next_fdb;
+ struct fdb_chain *fdb_chain;
+ struct mlx5_flow_table *fdb;
+ struct list_head *pos;
+ u32 *flow_group_in;
+ int err;
+
+ fdb_chain = mlx5_esw_chains_get_fdb_chain(esw, chain);
+ if (IS_ERR(fdb_chain))
+ return ERR_CAST(fdb_chain);
+
+ fdb_prio = kvzalloc(sizeof(*fdb_prio), GFP_KERNEL);
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!fdb_prio || !flow_group_in) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ /* Chain's prio list is sorted by prio and level.
+ * And all levels of some prio point to the next prio's level 0.
+ * Example list (prio, level):
+ * (3,0)->(3,1)->(5,0)->(5,1)->(6,1)->(7,0)
+ * In hardware, we will we have the following pointers:
+ * (3,0) -> (5,0) -> (7,0) -> Slow path
+ * (3,1) -> (5,0)
+ * (5,1) -> (7,0)
+ * (6,1) -> (7,0)
+ */
+
+ /* Default miss for each chain: */
+ next_fdb = (chain == mlx5_esw_chains_get_ft_chain(esw)) ?
+ tc_slow_fdb(esw) :
+ tc_end_fdb(esw);
+ list_for_each(pos, &fdb_chain->prios_list) {
+ struct fdb_prio *p = list_entry(pos, struct fdb_prio, list);
+
+ /* exit on first pos that is larger */
+ if (prio < p->key.prio || (prio == p->key.prio &&
+ level < p->key.level)) {
+ /* Get next level 0 table */
+ next_fdb = p->key.level == 0 ? p->fdb : p->next_fdb;
+ break;
+ }
+ }
+
+ fdb = mlx5_esw_chains_create_fdb_table(esw, chain, prio, level);
+ if (IS_ERR(fdb)) {
+ err = PTR_ERR(fdb);
+ goto err_create;
+ }
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
+ fdb->max_fte - 2);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
+ fdb->max_fte - 1);
+ miss_group = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(miss_group)) {
+ err = PTR_ERR(miss_group);
+ goto err_group;
+ }
+
+ /* Add miss rule to next_fdb */
+ miss_rule = mlx5_esw_chains_add_miss_rule(fdb, next_fdb);
+ if (IS_ERR(miss_rule)) {
+ err = PTR_ERR(miss_rule);
+ goto err_miss_rule;
+ }
+
+ fdb_prio->miss_group = miss_group;
+ fdb_prio->miss_rule = miss_rule;
+ fdb_prio->next_fdb = next_fdb;
+ fdb_prio->fdb_chain = fdb_chain;
+ fdb_prio->key.chain = chain;
+ fdb_prio->key.prio = prio;
+ fdb_prio->key.level = level;
+ fdb_prio->fdb = fdb;
+
+ err = rhashtable_insert_fast(&esw_prios_ht(esw), &fdb_prio->node,
+ prio_params);
+ if (err)
+ goto err_insert;
+
+ list_add(&fdb_prio->list, pos->prev);
+
+ /* Table is ready, connect it */
+ err = mlx5_esw_chains_update_prio_prevs(fdb_prio, fdb);
+ if (err)
+ goto err_update;
+
+ kvfree(flow_group_in);
+ return fdb_prio;
+
+err_update:
+ list_del(&fdb_prio->list);
+ rhashtable_remove_fast(&esw_prios_ht(esw), &fdb_prio->node,
+ prio_params);
+err_insert:
+ mlx5_del_flow_rules(miss_rule);
+err_miss_rule:
+ mlx5_destroy_flow_group(miss_group);
+err_group:
+ mlx5_esw_chains_destroy_fdb_table(esw, fdb);
+err_create:
+err_alloc:
+ kvfree(fdb_prio);
+ kvfree(flow_group_in);
+ mlx5_esw_chains_put_fdb_chain(fdb_chain);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_esw_chains_destroy_fdb_prio(struct mlx5_eswitch *esw,
+ struct fdb_prio *fdb_prio)
+{
+ struct fdb_chain *fdb_chain = fdb_prio->fdb_chain;
+
+ WARN_ON(mlx5_esw_chains_update_prio_prevs(fdb_prio,
+ fdb_prio->next_fdb));
+
+ list_del(&fdb_prio->list);
+ rhashtable_remove_fast(&esw_prios_ht(esw), &fdb_prio->node,
+ prio_params);
+ mlx5_del_flow_rules(fdb_prio->miss_rule);
+ mlx5_destroy_flow_group(fdb_prio->miss_group);
+ mlx5_esw_chains_destroy_fdb_table(esw, fdb_prio->fdb);
+ mlx5_esw_chains_put_fdb_chain(fdb_chain);
+ kvfree(fdb_prio);
+}
+
+struct mlx5_flow_table *
+mlx5_esw_chains_get_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
+ u32 level)
+{
+ struct mlx5_flow_table *prev_fts;
+ struct fdb_prio *fdb_prio;
+ struct fdb_prio_key key;
+ int l = 0;
+
+ if ((chain > mlx5_esw_chains_get_chain_range(esw) &&
+ chain != mlx5_esw_chains_get_ft_chain(esw)) ||
+ prio > mlx5_esw_chains_get_prio_range(esw) ||
+ level > mlx5_esw_chains_get_level_range(esw))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ /* create earlier levels for correct fs_core lookup when
+ * connecting tables.
+ */
+ for (l = 0; l < level; l++) {
+ prev_fts = mlx5_esw_chains_get_table(esw, chain, prio, l);
+ if (IS_ERR(prev_fts)) {
+ fdb_prio = ERR_CAST(prev_fts);
+ goto err_get_prevs;
+ }
+ }
+
+ key.chain = chain;
+ key.prio = prio;
+ key.level = level;
+
+ mutex_lock(&esw_chains_lock(esw));
+ fdb_prio = rhashtable_lookup_fast(&esw_prios_ht(esw), &key,
+ prio_params);
+ if (!fdb_prio) {
+ fdb_prio = mlx5_esw_chains_create_fdb_prio(esw, chain,
+ prio, level);
+ if (IS_ERR(fdb_prio))
+ goto err_create_prio;
+ }
+
+ ++fdb_prio->ref;
+ mutex_unlock(&esw_chains_lock(esw));
+
+ return fdb_prio->fdb;
+
+err_create_prio:
+ mutex_unlock(&esw_chains_lock(esw));
+err_get_prevs:
+ while (--l >= 0)
+ mlx5_esw_chains_put_table(esw, chain, prio, l);
+ return ERR_CAST(fdb_prio);
+}
+
+void
+mlx5_esw_chains_put_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
+ u32 level)
+{
+ struct fdb_prio *fdb_prio;
+ struct fdb_prio_key key;
+
+ key.chain = chain;
+ key.prio = prio;
+ key.level = level;
+
+ mutex_lock(&esw_chains_lock(esw));
+ fdb_prio = rhashtable_lookup_fast(&esw_prios_ht(esw), &key,
+ prio_params);
+ if (!fdb_prio)
+ goto err_get_prio;
+
+ if (--fdb_prio->ref == 0)
+ mlx5_esw_chains_destroy_fdb_prio(esw, fdb_prio);
+ mutex_unlock(&esw_chains_lock(esw));
+
+ while (level-- > 0)
+ mlx5_esw_chains_put_table(esw, chain, prio, level);
+
+ return;
+
+err_get_prio:
+ mutex_unlock(&esw_chains_lock(esw));
+ WARN_ONCE(1,
+ "Couldn't find table: (chain: %d prio: %d level: %d)",
+ chain, prio, level);
+}
+
+struct mlx5_flow_table *
+mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw)
+{
+ return tc_end_fdb(esw);
+}
+
+static int
+mlx5_esw_chains_init(struct mlx5_eswitch *esw)
+{
+ struct mlx5_esw_chains_priv *chains_priv;
+ struct mlx5_core_dev *dev = esw->dev;
+ u32 max_flow_counter, fdb_max;
+ int err;
+
+ chains_priv = kzalloc(sizeof(*chains_priv), GFP_KERNEL);
+ if (!chains_priv)
+ return -ENOMEM;
+ esw_chains_priv(esw) = chains_priv;
+
+ max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
+ MLX5_CAP_GEN(dev, max_flow_counter_15_0);
+ fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
+
+ esw_debug(dev,
+ "Init esw offloads chains, max counters(%d), groups(%d), max flow table size(%d)\n",
+ max_flow_counter, ESW_OFFLOADS_NUM_GROUPS, fdb_max);
+
+ mlx5_esw_chains_init_sz_pool(esw);
+
+ if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) &&
+ esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
+ esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
+ esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n");
+ } else {
+ esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
+ esw_info(dev, "Supported tc offload range - chains: %u, prios: %u\n",
+ mlx5_esw_chains_get_chain_range(esw),
+ mlx5_esw_chains_get_prio_range(esw));
+ }
+
+ err = rhashtable_init(&esw_chains_ht(esw), &chain_params);
+ if (err)
+ goto init_chains_ht_err;
+
+ err = rhashtable_init(&esw_prios_ht(esw), &prio_params);
+ if (err)
+ goto init_prios_ht_err;
+
+ mutex_init(&esw_chains_lock(esw));
+
+ return 0;
+
+init_prios_ht_err:
+ rhashtable_destroy(&esw_chains_ht(esw));
+init_chains_ht_err:
+ kfree(chains_priv);
+ return err;
+}
+
+static void
+mlx5_esw_chains_cleanup(struct mlx5_eswitch *esw)
+{
+ mutex_destroy(&esw_chains_lock(esw));
+ rhashtable_destroy(&esw_prios_ht(esw));
+ rhashtable_destroy(&esw_chains_ht(esw));
+
+ kfree(esw_chains_priv(esw));
+}
+
+static int
+mlx5_esw_chains_open(struct mlx5_eswitch *esw)
+{
+ struct mlx5_flow_table *ft;
+ int err;
+
+ /* Create tc_end_fdb(esw) which is the always created ft chain */
+ ft = mlx5_esw_chains_get_table(esw, mlx5_esw_chains_get_ft_chain(esw),
+ 1, 0);
+ if (IS_ERR(ft))
+ return PTR_ERR(ft);
+
+ tc_end_fdb(esw) = ft;
+
+ /* Always open the root for fast path */
+ ft = mlx5_esw_chains_get_table(esw, 0, 1, 0);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto level_0_err;
+ }
+
+ /* Open level 1 for split rules now if prios isn't supported */
+ if (!mlx5_esw_chains_prios_supported(esw)) {
+ ft = mlx5_esw_chains_get_table(esw, 0, 1, 1);
+
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto level_1_err;
+ }
+ }
+
+ return 0;
+
+level_1_err:
+ mlx5_esw_chains_put_table(esw, 0, 1, 0);
+level_0_err:
+ mlx5_esw_chains_put_table(esw, mlx5_esw_chains_get_ft_chain(esw), 1, 0);
+ return err;
+}
+
+static void
+mlx5_esw_chains_close(struct mlx5_eswitch *esw)
+{
+ if (!mlx5_esw_chains_prios_supported(esw))
+ mlx5_esw_chains_put_table(esw, 0, 1, 1);
+ mlx5_esw_chains_put_table(esw, 0, 1, 0);
+ mlx5_esw_chains_put_table(esw, mlx5_esw_chains_get_ft_chain(esw), 1, 0);
+}
+
+int
+mlx5_esw_chains_create(struct mlx5_eswitch *esw)
+{
+ int err;
+
+ err = mlx5_esw_chains_init(esw);
+ if (err)
+ return err;
+
+ err = mlx5_esw_chains_open(esw);
+ if (err)
+ goto err_open;
+
+ return 0;
+
+err_open:
+ mlx5_esw_chains_cleanup(esw);
+ return err;
+}
+
+void
+mlx5_esw_chains_destroy(struct mlx5_eswitch *esw)
+{
+ mlx5_esw_chains_close(esw);
+ mlx5_esw_chains_cleanup(esw);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h
new file mode 100644
index 000000000000..2e13097fe348
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies. */
+
+#ifndef __ML5_ESW_CHAINS_H__
+#define __ML5_ESW_CHAINS_H__
+
+bool
+mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw);
+u32
+mlx5_esw_chains_get_prio_range(struct mlx5_eswitch *esw);
+u32
+mlx5_esw_chains_get_chain_range(struct mlx5_eswitch *esw);
+u32
+mlx5_esw_chains_get_ft_chain(struct mlx5_eswitch *esw);
+
+struct mlx5_flow_table *
+mlx5_esw_chains_get_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
+ u32 level);
+void
+mlx5_esw_chains_put_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
+ u32 level);
+
+struct mlx5_flow_table *
+mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw);
+
+int mlx5_esw_chains_create(struct mlx5_eswitch *esw);
+void mlx5_esw_chains_destroy(struct mlx5_eswitch *esw);
+
+#endif /* __ML5_ESW_CHAINS_H__ */
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index 366bda1bb1c3..dc08ed9339ab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -50,8 +50,8 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
struct mlx5_flow_act *flow_act)
{
static const struct mlx5_flow_spec spec = {};
+ struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *root_ns;
- int prio, flags;
int err;
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
@@ -63,10 +63,11 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
/* As this is the terminating action then the termination table is the
* same prio as the slow path
*/
- prio = FDB_SLOW_PATH;
- flags = MLX5_FLOW_TABLE_TERMINATION;
- tt->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, prio, 1, 1,
- 0, flags);
+ ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION;
+ ft_attr.prio = FDB_SLOW_PATH;
+ ft_attr.max_fte = 1;
+ ft_attr.autogroup.max_num_groups = 1;
+ tt->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
if (IS_ERR(tt->termtbl)) {
esw_warn(dev, "Failed to create termination table\n");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index e4ec0e03c289..4c61d25d2e88 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -850,6 +850,7 @@ void mlx5_fpga_ipsec_delete_sa_ctx(void *context)
mutex_lock(&fpga_xfrm->lock);
if (!--fpga_xfrm->num_rules) {
mlx5_fpga_ipsec_release_sa_ctx(fpga_xfrm->sa_ctx);
+ kfree(fpga_xfrm->sa_ctx);
fpga_xfrm->sa_ctx = NULL;
}
mutex_unlock(&fpga_xfrm->lock);
@@ -1478,7 +1479,7 @@ int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs)))
return 0;
- if (!mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
+ if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 3c816e81f8d9..b25465d9e030 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -432,6 +432,9 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(set_fte_in, in, table_type, ft->type);
MLX5_SET(set_fte_in, in, table_id, ft->id);
MLX5_SET(set_fte_in, in, flow_index, fte->index);
+ MLX5_SET(set_fte_in, in, ignore_flow_level,
+ !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL));
+
if (ft->vport) {
MLX5_SET(set_fte_in, in, vport_number, ft->vport);
MLX5_SET(set_fte_in, in, other_vport, 1);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 9a48c4310887..9dc24241dc91 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -531,16 +531,9 @@ static void del_hw_fte(struct fs_node *node)
}
}
-static void del_sw_fte_rcu(struct rcu_head *head)
-{
- struct fs_fte *fte = container_of(head, struct fs_fte, rcu);
- struct mlx5_flow_steering *steering = get_steering(&fte->node);
-
- kmem_cache_free(steering->ftes_cache, fte);
-}
-
static void del_sw_fte(struct fs_node *node)
{
+ struct mlx5_flow_steering *steering = get_steering(node);
struct mlx5_flow_group *fg;
struct fs_fte *fte;
int err;
@@ -553,8 +546,7 @@ static void del_sw_fte(struct fs_node *node)
rhash_fte);
WARN_ON(err);
ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index);
-
- call_rcu(&fte->rcu, del_sw_fte_rcu);
+ kmem_cache_free(steering->ftes_cache, fte);
}
static void del_hw_flow_group(struct fs_node *node)
@@ -587,7 +579,9 @@ static void del_sw_flow_group(struct fs_node *node)
rhashtable_destroy(&fg->ftes_hash);
ida_destroy(&fg->fte_allocator);
- if (ft->autogroup.active && fg->max_ftes == ft->autogroup.group_size)
+ if (ft->autogroup.active &&
+ fg->max_ftes == ft->autogroup.group_size &&
+ fg->start_index < ft->autogroup.max_fte)
ft->autogroup.num_groups--;
err = rhltable_remove(&ft->fgs_hash,
&fg->hash,
@@ -1014,7 +1008,8 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
u16 vport)
{
struct mlx5_flow_root_namespace *root = find_root(&ns->node);
- struct mlx5_flow_table *next_ft = NULL;
+ bool unmanaged = ft_attr->flags & MLX5_FLOW_TABLE_UNMANAGED;
+ struct mlx5_flow_table *next_ft;
struct fs_prio *fs_prio = NULL;
struct mlx5_flow_table *ft;
int log_table_sz;
@@ -1031,14 +1026,21 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
err = -EINVAL;
goto unlock_root;
}
- if (ft_attr->level >= fs_prio->num_levels) {
- err = -ENOSPC;
- goto unlock_root;
+ if (!unmanaged) {
+ /* The level is related to the
+ * priority level range.
+ */
+ if (ft_attr->level >= fs_prio->num_levels) {
+ err = -ENOSPC;
+ goto unlock_root;
+ }
+
+ ft_attr->level += fs_prio->start_level;
}
+
/* The level is related to the
* priority level range.
*/
- ft_attr->level += fs_prio->start_level;
ft = alloc_flow_table(ft_attr->level,
vport,
ft_attr->max_fte ? roundup_pow_of_two(ft_attr->max_fte) : 0,
@@ -1051,19 +1053,27 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
- next_ft = find_next_chained_ft(fs_prio);
+ next_ft = unmanaged ? ft_attr->next_ft :
+ find_next_chained_ft(fs_prio);
ft->def_miss_action = ns->def_miss_action;
err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft);
if (err)
goto free_ft;
- err = connect_flow_table(root->dev, ft, fs_prio);
- if (err)
- goto destroy_ft;
+ if (!unmanaged) {
+ err = connect_flow_table(root->dev, ft, fs_prio);
+ if (err)
+ goto destroy_ft;
+ }
+
ft->node.active = true;
down_write_ref_node(&fs_prio->node, false);
- tree_add_node(&ft->node, &fs_prio->node);
- list_add_flow_table(ft, fs_prio);
+ if (!unmanaged) {
+ tree_add_node(&ft->node, &fs_prio->node);
+ list_add_flow_table(ft, fs_prio);
+ } else {
+ ft->node.root = fs_prio->node.root;
+ }
fs_prio->num_ft++;
up_write_ref_node(&fs_prio->node, false);
mutex_unlock(&root->chain_lock);
@@ -1111,31 +1121,27 @@ EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
struct mlx5_flow_table*
mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
- int prio,
- int num_flow_table_entries,
- int max_num_groups,
- u32 level,
- u32 flags)
+ struct mlx5_flow_table_attr *ft_attr)
{
- struct mlx5_flow_table_attr ft_attr = {};
+ int num_reserved_entries = ft_attr->autogroup.num_reserved_entries;
+ int autogroups_max_fte = ft_attr->max_fte - num_reserved_entries;
+ int max_num_groups = ft_attr->autogroup.max_num_groups;
struct mlx5_flow_table *ft;
- if (max_num_groups > num_flow_table_entries)
+ if (max_num_groups > autogroups_max_fte)
+ return ERR_PTR(-EINVAL);
+ if (num_reserved_entries > ft_attr->max_fte)
return ERR_PTR(-EINVAL);
- ft_attr.max_fte = num_flow_table_entries;
- ft_attr.prio = prio;
- ft_attr.level = level;
- ft_attr.flags = flags;
-
- ft = mlx5_create_flow_table(ns, &ft_attr);
+ ft = mlx5_create_flow_table(ns, ft_attr);
if (IS_ERR(ft))
return ft;
ft->autogroup.active = true;
ft->autogroup.required_groups = max_num_groups;
+ ft->autogroup.max_fte = autogroups_max_fte;
/* We save place for flow groups in addition to max types */
- ft->autogroup.group_size = ft->max_fte / (max_num_groups + 1);
+ ft->autogroup.group_size = autogroups_max_fte / (max_num_groups + 1);
return ft;
}
@@ -1157,7 +1163,7 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg;
int err;
- if (ft->autogroup.active)
+ if (ft->autogroup.active && start_index < ft->autogroup.max_fte)
return ERR_PTR(-EPERM);
down_write_ref_node(&ft->node, false);
@@ -1330,9 +1336,10 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft
const struct mlx5_flow_spec *spec)
{
struct list_head *prev = &ft->node.children;
- struct mlx5_flow_group *fg;
+ u32 max_fte = ft->autogroup.max_fte;
unsigned int candidate_index = 0;
unsigned int group_size = 0;
+ struct mlx5_flow_group *fg;
if (!ft->autogroup.active)
return ERR_PTR(-ENOENT);
@@ -1340,7 +1347,7 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft
if (ft->autogroup.num_groups < ft->autogroup.required_groups)
group_size = ft->autogroup.group_size;
- /* ft->max_fte == ft->autogroup.max_types */
+ /* max_fte == ft->autogroup.max_types */
if (group_size == 0)
group_size = 1;
@@ -1353,7 +1360,7 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft
prev = &fg->node.list;
}
- if (candidate_index + group_size > ft->max_fte)
+ if (candidate_index + group_size > max_fte)
return ERR_PTR(-ENOSPC);
fg = alloc_insert_flow_group(ft,
@@ -1537,18 +1544,30 @@ static bool counter_is_valid(u32 action)
}
static bool dest_is_valid(struct mlx5_flow_destination *dest,
- u32 action,
+ struct mlx5_flow_act *flow_act,
struct mlx5_flow_table *ft)
{
+ bool ignore_level = flow_act->flags & FLOW_ACT_IGNORE_FLOW_LEVEL;
+ u32 action = flow_act->action;
+
if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
return counter_is_valid(action);
if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
return true;
+ if (ignore_level) {
+ if (ft->type != FS_FT_FDB)
+ return false;
+
+ if (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
+ dest->ft->type != FS_FT_FDB)
+ return false;
+ }
+
if (!dest || ((dest->type ==
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
- (dest->ft->level <= ft->level)))
+ (dest->ft->level <= ft->level && !ignore_level)))
return false;
return true;
}
@@ -1563,16 +1582,16 @@ struct match_list_head {
struct match_list first;
};
-static void free_match_list(struct match_list_head *head)
+static void free_match_list(struct match_list_head *head, bool ft_locked)
{
if (!list_empty(&head->list)) {
struct match_list *iter, *match_tmp;
list_del(&head->first.list);
- tree_put_node(&head->first.g->node, false);
+ tree_put_node(&head->first.g->node, ft_locked);
list_for_each_entry_safe(iter, match_tmp, &head->list,
list) {
- tree_put_node(&iter->g->node, false);
+ tree_put_node(&iter->g->node, ft_locked);
list_del(&iter->list);
kfree(iter);
}
@@ -1581,7 +1600,8 @@ static void free_match_list(struct match_list_head *head)
static int build_match_list(struct match_list_head *match_head,
struct mlx5_flow_table *ft,
- const struct mlx5_flow_spec *spec)
+ const struct mlx5_flow_spec *spec,
+ bool ft_locked)
{
struct rhlist_head *tmp, *list;
struct mlx5_flow_group *g;
@@ -1606,7 +1626,7 @@ static int build_match_list(struct match_list_head *match_head,
curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
if (!curr_match) {
- free_match_list(match_head);
+ free_match_list(match_head, ft_locked);
err = -ENOMEM;
goto out;
}
@@ -1633,47 +1653,22 @@ static u64 matched_fgs_get_version(struct list_head *match_head)
}
static struct fs_fte *
-lookup_fte_for_write_locked(struct mlx5_flow_group *g, const u32 *match_value)
-{
- struct fs_fte *fte_tmp;
-
- nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
-
- fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value, rhash_fte);
- if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
- fte_tmp = NULL;
- goto out;
- }
-
- if (!fte_tmp->node.active) {
- tree_put_node(&fte_tmp->node, false);
- fte_tmp = NULL;
- goto out;
- }
- nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
-
-out:
- up_write_ref_node(&g->node, false);
- return fte_tmp;
-}
-
-static struct fs_fte *
-lookup_fte_for_read_locked(struct mlx5_flow_group *g, const u32 *match_value)
+lookup_fte_locked(struct mlx5_flow_group *g,
+ const u32 *match_value,
+ bool take_write)
{
struct fs_fte *fte_tmp;
- if (!tree_get_node(&g->node))
- return NULL;
-
- rcu_read_lock();
- fte_tmp = rhashtable_lookup(&g->ftes_hash, match_value, rhash_fte);
+ if (take_write)
+ nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
+ else
+ nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
+ fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
+ rhash_fte);
if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
- rcu_read_unlock();
fte_tmp = NULL;
goto out;
}
- rcu_read_unlock();
-
if (!fte_tmp->node.active) {
tree_put_node(&fte_tmp->node, false);
fte_tmp = NULL;
@@ -1681,19 +1676,12 @@ lookup_fte_for_read_locked(struct mlx5_flow_group *g, const u32 *match_value)
}
nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
-
out:
- tree_put_node(&g->node, false);
- return fte_tmp;
-}
-
-static struct fs_fte *
-lookup_fte_locked(struct mlx5_flow_group *g, const u32 *match_value, bool write)
-{
- if (write)
- return lookup_fte_for_write_locked(g, match_value);
+ if (take_write)
+ up_write_ref_node(&g->node, false);
else
- return lookup_fte_for_read_locked(g, match_value);
+ up_read_ref_node(&g->node);
+ return fte_tmp;
}
static struct mlx5_flow_handle *
@@ -1810,7 +1798,7 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
return ERR_PTR(-EINVAL);
for (i = 0; i < dest_num; i++) {
- if (!dest_is_valid(&dest[i], flow_act->action, ft))
+ if (!dest_is_valid(&dest[i], flow_act, ft))
return ERR_PTR(-EINVAL);
}
nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
@@ -1818,7 +1806,7 @@ search_again_locked:
version = atomic_read(&ft->node.version);
/* Collect all fgs which has a matching match_criteria */
- err = build_match_list(&match_head, ft, spec);
+ err = build_match_list(&match_head, ft, spec, take_write);
if (err) {
if (take_write)
up_write_ref_node(&ft->node, false);
@@ -1832,7 +1820,7 @@ search_again_locked:
rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
dest_num, version);
- free_match_list(&match_head);
+ free_match_list(&match_head, take_write);
if (!IS_ERR(rule) ||
(PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
if (take_write)
@@ -2073,7 +2061,8 @@ int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
int err = 0;
mutex_lock(&root->chain_lock);
- err = disconnect_flow_table(ft);
+ if (!(ft->flags & MLX5_FLOW_TABLE_UNMANAGED))
+ err = disconnect_flow_table(ft);
if (err) {
mutex_unlock(&root->chain_lock);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index e8cd997f413e..be5f5e32c1e8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -164,6 +164,7 @@ struct mlx5_flow_table {
unsigned int required_groups;
unsigned int group_size;
unsigned int num_groups;
+ unsigned int max_fte;
} autogroup;
/* Protect fwd_rules */
struct mutex lock;
@@ -203,7 +204,6 @@ struct fs_fte {
enum fs_fte_status status;
struct mlx5_fc *counter;
struct rhash_head hash;
- struct rcu_head rcu;
int modify_mask;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index a19790dee7b2..909a7f284614 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -131,11 +131,11 @@ static int mlx5_get_pcam_reg(struct mlx5_core_dev *dev)
MLX5_PCAM_REGS_5000_TO_507F);
}
-static int mlx5_get_mcam_reg(struct mlx5_core_dev *dev)
+static int mlx5_get_mcam_access_reg_group(struct mlx5_core_dev *dev,
+ enum mlx5_mcam_reg_groups group)
{
- return mlx5_query_mcam_reg(dev, dev->caps.mcam,
- MLX5_MCAM_FEATURE_ENHANCED_FEATURES,
- MLX5_MCAM_REGS_FIRST_128);
+ return mlx5_query_mcam_reg(dev, dev->caps.mcam[group],
+ MLX5_MCAM_FEATURE_ENHANCED_FEATURES, group);
}
static int mlx5_get_qcam_reg(struct mlx5_core_dev *dev)
@@ -221,8 +221,11 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, pcam_reg))
mlx5_get_pcam_reg(dev);
- if (MLX5_CAP_GEN(dev, mcam_reg))
- mlx5_get_mcam_reg(dev);
+ if (MLX5_CAP_GEN(dev, mcam_reg)) {
+ mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_FIRST_128);
+ mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_0x9080_0x90FF);
+ mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_0x9100_0x917F);
+ }
if (MLX5_CAP_GEN(dev, qcam_reg))
mlx5_get_qcam_reg(dev);
@@ -239,12 +242,19 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
- if (MLX5_CAP_GEN(dev, tls)) {
+ if (MLX5_CAP_GEN(dev, tls_tx)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_TLS);
if (err)
return err;
}
+ if (MLX5_CAP_GEN_64(dev, general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_VDPA_EMULATION);
+ if (err)
+ return err;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 3ed8ab2d703d..56078b23f1a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -87,8 +87,8 @@ int mlx5i_init(struct mlx5_core_dev *mdev,
mlx5e_set_netdev_mtu_boundaries(priv);
netdev->mtu = netdev->max_mtu;
- mlx5e_build_nic_params(mdev, NULL, &priv->rss_params, &priv->channels.params,
- priv->max_nch, netdev->mtu);
+ mlx5e_build_nic_params(priv, NULL, &priv->rss_params, &priv->channels.params,
+ netdev->mtu);
mlx5i_build_nic_params(mdev, &priv->channels.params);
mlx5e_timestamp_init(priv);
@@ -419,6 +419,28 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
mlx5e_destroy_q_counters(priv);
}
+/* The stats groups order is opposite to the update_stats() order calls */
+static mlx5e_stats_grp_t mlx5i_stats_grps[] = {
+ &MLX5E_STATS_GRP(sw),
+ &MLX5E_STATS_GRP(qcnt),
+ &MLX5E_STATS_GRP(vnic_env),
+ &MLX5E_STATS_GRP(vport),
+ &MLX5E_STATS_GRP(802_3),
+ &MLX5E_STATS_GRP(2863),
+ &MLX5E_STATS_GRP(2819),
+ &MLX5E_STATS_GRP(phy),
+ &MLX5E_STATS_GRP(pcie),
+ &MLX5E_STATS_GRP(per_prio),
+ &MLX5E_STATS_GRP(pme),
+ &MLX5E_STATS_GRP(channels),
+ &MLX5E_STATS_GRP(per_port_buff_congest),
+};
+
+static unsigned int mlx5i_stats_grps_num(struct mlx5e_priv *priv)
+{
+ return ARRAY_SIZE(mlx5i_stats_grps);
+}
+
static const struct mlx5e_profile mlx5i_nic_profile = {
.init = mlx5i_init,
.cleanup = mlx5i_cleanup,
@@ -435,6 +457,8 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
.rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
.max_tc = MLX5I_MAX_NUM_TC,
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
+ .stats_grps = mlx5i_stats_grps,
+ .stats_grps_num = mlx5i_stats_grps_num,
};
/* mlx5i netdev NDos */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index fc0d9583475d..b91eabc09fbc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -586,7 +586,8 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
if (!ldev->nb.notifier_call) {
ldev->nb.notifier_call = mlx5_lag_netdev_event;
- if (register_netdevice_notifier(&ldev->nb)) {
+ if (register_netdevice_notifier_dev_net(netdev, &ldev->nb,
+ &ldev->nn)) {
ldev->nb.notifier_call = NULL;
mlx5_core_err(dev, "Failed to register LAG netdev notifier\n");
}
@@ -599,7 +600,7 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
}
/* Must be called with intf_mutex held */
-void mlx5_lag_remove(struct mlx5_core_dev *dev)
+void mlx5_lag_remove(struct mlx5_core_dev *dev, struct net_device *netdev)
{
struct mlx5_lag *ldev;
int i;
@@ -619,7 +620,8 @@ void mlx5_lag_remove(struct mlx5_core_dev *dev)
if (i == MLX5_MAX_PORTS) {
if (ldev->nb.notifier_call)
- unregister_netdevice_notifier(&ldev->nb);
+ unregister_netdevice_notifier_dev_net(netdev, &ldev->nb,
+ &ldev->nn);
mlx5_lag_mp_cleanup(ldev);
cancel_delayed_work_sync(&ldev->bond_work);
mlx5_lag_dev_free(ldev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag.h
index f1068aac6406..316ab09e2664 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.h
@@ -44,6 +44,7 @@ struct mlx5_lag {
struct workqueue_struct *wq;
struct delayed_work bond_work;
struct notifier_block nb;
+ struct netdev_net_notifier nn;
struct lag_mp lag_mp;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
index b70afa310ad2..416676c35b1f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
@@ -200,8 +200,6 @@ static void mlx5_lag_fib_update(struct work_struct *work)
rtnl_lock();
switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
- case FIB_EVENT_ENTRY_APPEND: /* fall through */
- case FIB_EVENT_ENTRY_ADD: /* fall through */
case FIB_EVENT_ENTRY_DEL:
mlx5_lag_fib_route_event(ldev, fib_work->event,
fib_work->fen_info.fi);
@@ -259,8 +257,6 @@ static int mlx5_lag_fib_event(struct notifier_block *nb,
switch (event) {
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
- case FIB_EVENT_ENTRY_APPEND: /* fall through */
- case FIB_EVENT_ENTRY_ADD: /* fall through */
case FIB_EVENT_ENTRY_DEL:
fen_info = container_of(info, struct fib_entry_notifier_info,
info);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 173e2c12e1c7..f554cfddcf4e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1193,6 +1193,12 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
if (err)
goto err_load;
+ if (boot) {
+ err = mlx5_devlink_register(priv_to_devlink(dev), dev->device);
+ if (err)
+ goto err_devlink_reg;
+ }
+
if (mlx5_device_registered(dev)) {
mlx5_attach_device(dev);
} else {
@@ -1210,6 +1216,9 @@ out:
return err;
err_reg_dev:
+ if (boot)
+ mlx5_devlink_unregister(priv_to_devlink(dev));
+err_devlink_reg:
mlx5_unload(dev);
err_load:
if (boot)
@@ -1347,10 +1356,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
request_module_nowait(MLX5_IB_MOD);
- err = mlx5_devlink_register(devlink, &pdev->dev);
- if (err)
- goto clean_load;
-
err = mlx5_crdump_enable(dev);
if (err)
dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
@@ -1358,9 +1363,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
pci_save_state(pdev);
return 0;
-clean_load:
- mlx5_unload_one(dev, true);
-
err_load_one:
mlx5_pci_close(dev);
pci_init_err:
@@ -1561,6 +1563,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x101d) }, /* ConnectX-6 Dx */
{ PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */
{ PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */
+ { PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */
{ PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
{ PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
{ PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index da67b28d6e23..fcce9e0fc82c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -157,7 +157,7 @@ int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam,
u8 feature_group, u8 access_reg_group);
void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev);
-void mlx5_lag_remove(struct mlx5_core_dev *dev);
+void mlx5_lag_remove(struct mlx5_core_dev *dev, struct net_device *netdev);
int mlx5_irq_table_init(struct mlx5_core_dev *dev);
void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index 004c56c2fc0c..6dec2a550a10 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -677,9 +677,12 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
goto out_invalid_arg;
}
if (action->dest_tbl.tbl->level <= matcher->tbl->level) {
+ mlx5_core_warn_once(dmn->mdev,
+ "Connecting table to a lower/same level destination table\n");
mlx5dr_dbg(dmn,
- "Destination table level should be higher than source table\n");
- goto out_invalid_arg;
+ "Connecting table at level %d to a destination table at level %d\n",
+ matcher->tbl->level,
+ action->dest_tbl.tbl->level);
}
attr.final_icm_addr = rx_rule ?
action->dest_tbl.tbl->rx.s_anchor->chunk->icm_addr :
@@ -690,9 +693,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
/* get the relevant addresses */
if (!action->dest_tbl.fw_tbl.rx_icm_addr) {
- ret = mlx5dr_cmd_query_flow_table(action->dest_tbl.fw_tbl.mdev,
- action->dest_tbl.fw_tbl.ft->type,
- action->dest_tbl.fw_tbl.ft->id,
+ ret = mlx5dr_cmd_query_flow_table(dmn->mdev,
+ action->dest_tbl.fw_tbl.type,
+ action->dest_tbl.fw_tbl.id,
&output);
if (!ret) {
action->dest_tbl.fw_tbl.tx_icm_addr =
@@ -982,8 +985,106 @@ dec_ref:
}
struct mlx5dr_action *
-mlx5dr_create_action_dest_flow_fw_table(struct mlx5_flow_table *ft,
- struct mlx5_core_dev *mdev)
+mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
+ struct mlx5dr_action_dest *dests,
+ u32 num_of_dests)
+{
+ struct mlx5dr_cmd_flow_destination_hw_info *hw_dests;
+ struct mlx5dr_action **ref_actions;
+ struct mlx5dr_action *action;
+ bool reformat_req = false;
+ u32 num_of_ref = 0;
+ int ret;
+ int i;
+
+ if (dmn->type != MLX5DR_DOMAIN_TYPE_FDB) {
+ mlx5dr_err(dmn, "Multiple destination support is for FDB only\n");
+ return NULL;
+ }
+
+ hw_dests = kzalloc(sizeof(*hw_dests) * num_of_dests, GFP_KERNEL);
+ if (!hw_dests)
+ return NULL;
+
+ ref_actions = kzalloc(sizeof(*ref_actions) * num_of_dests * 2, GFP_KERNEL);
+ if (!ref_actions)
+ goto free_hw_dests;
+
+ for (i = 0; i < num_of_dests; i++) {
+ struct mlx5dr_action *reformat_action = dests[i].reformat;
+ struct mlx5dr_action *dest_action = dests[i].dest;
+
+ ref_actions[num_of_ref++] = dest_action;
+
+ switch (dest_action->action_type) {
+ case DR_ACTION_TYP_VPORT:
+ hw_dests[i].vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
+ hw_dests[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ hw_dests[i].vport.num = dest_action->vport.caps->num;
+ hw_dests[i].vport.vhca_id = dest_action->vport.caps->vhca_gvmi;
+ if (reformat_action) {
+ reformat_req = true;
+ hw_dests[i].vport.reformat_id =
+ reformat_action->reformat.reformat_id;
+ ref_actions[num_of_ref++] = reformat_action;
+ hw_dests[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
+ }
+ break;
+
+ case DR_ACTION_TYP_FT:
+ hw_dests[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ if (dest_action->dest_tbl.is_fw_tbl)
+ hw_dests[i].ft_id = dest_action->dest_tbl.fw_tbl.id;
+ else
+ hw_dests[i].ft_id = dest_action->dest_tbl.tbl->table_id;
+ break;
+
+ default:
+ mlx5dr_dbg(dmn, "Invalid multiple destinations action\n");
+ goto free_ref_actions;
+ }
+ }
+
+ action = dr_action_create_generic(DR_ACTION_TYP_FT);
+ if (!action)
+ goto free_ref_actions;
+
+ ret = mlx5dr_fw_create_md_tbl(dmn,
+ hw_dests,
+ num_of_dests,
+ reformat_req,
+ &action->dest_tbl.fw_tbl.id,
+ &action->dest_tbl.fw_tbl.group_id);
+ if (ret)
+ goto free_action;
+
+ refcount_inc(&dmn->refcount);
+
+ for (i = 0; i < num_of_ref; i++)
+ refcount_inc(&ref_actions[i]->refcount);
+
+ action->dest_tbl.is_fw_tbl = true;
+ action->dest_tbl.fw_tbl.dmn = dmn;
+ action->dest_tbl.fw_tbl.type = FS_FT_FDB;
+ action->dest_tbl.fw_tbl.ref_actions = ref_actions;
+ action->dest_tbl.fw_tbl.num_of_ref_actions = num_of_ref;
+
+ kfree(hw_dests);
+
+ return action;
+
+free_action:
+ kfree(action);
+free_ref_actions:
+ kfree(ref_actions);
+free_hw_dests:
+ kfree(hw_dests);
+ return NULL;
+}
+
+struct mlx5dr_action *
+mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *dmn,
+ struct mlx5_flow_table *ft)
{
struct mlx5dr_action *action;
@@ -992,8 +1093,11 @@ mlx5dr_create_action_dest_flow_fw_table(struct mlx5_flow_table *ft,
return NULL;
action->dest_tbl.is_fw_tbl = 1;
- action->dest_tbl.fw_tbl.ft = ft;
- action->dest_tbl.fw_tbl.mdev = mdev;
+ action->dest_tbl.fw_tbl.type = ft->type;
+ action->dest_tbl.fw_tbl.id = ft->id;
+ action->dest_tbl.fw_tbl.dmn = dmn;
+
+ refcount_inc(&dmn->refcount);
return action;
}
@@ -1213,58 +1317,85 @@ not_found:
}
static int
-dr_action_modify_sw_to_hw(struct mlx5dr_domain *dmn,
- __be64 *sw_action,
- __be64 *hw_action,
- const struct dr_action_modify_field_conv **ret_hw_info)
+dr_action_modify_sw_to_hw_add(struct mlx5dr_domain *dmn,
+ __be64 *sw_action,
+ __be64 *hw_action,
+ const struct dr_action_modify_field_conv **ret_hw_info)
{
const struct dr_action_modify_field_conv *hw_action_info;
- u8 offset, length, max_length, action;
+ u8 max_length;
u16 sw_field;
- u8 hw_opcode;
u32 data;
/* Get SW modify action data */
- action = MLX5_GET(set_action_in, sw_action, action_type);
- length = MLX5_GET(set_action_in, sw_action, length);
- offset = MLX5_GET(set_action_in, sw_action, offset);
sw_field = MLX5_GET(set_action_in, sw_action, field);
data = MLX5_GET(set_action_in, sw_action, data);
/* Convert SW data to HW modify action format */
hw_action_info = dr_action_modify_get_hw_info(sw_field);
if (!hw_action_info) {
- mlx5dr_dbg(dmn, "Modify action invalid field given\n");
+ mlx5dr_dbg(dmn, "Modify add action invalid field given\n");
return -EINVAL;
}
max_length = hw_action_info->end - hw_action_info->start + 1;
- switch (action) {
- case MLX5_ACTION_TYPE_SET:
- hw_opcode = MLX5DR_ACTION_MDFY_HW_OP_SET;
- /* PRM defines that length zero specific length of 32bits */
- if (!length)
- length = 32;
+ MLX5_SET(dr_action_hw_set, hw_action,
+ opcode, MLX5DR_ACTION_MDFY_HW_OP_ADD);
- if (length + offset > max_length) {
- mlx5dr_dbg(dmn, "Modify action length + offset exceeds limit\n");
- return -EINVAL;
- }
- break;
+ MLX5_SET(dr_action_hw_set, hw_action, destination_field_code,
+ hw_action_info->hw_field);
- case MLX5_ACTION_TYPE_ADD:
- hw_opcode = MLX5DR_ACTION_MDFY_HW_OP_ADD;
- offset = 0;
- length = max_length;
- break;
+ MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter,
+ hw_action_info->start);
- default:
- mlx5dr_info(dmn, "Unsupported action_type for modify action\n");
- return -EOPNOTSUPP;
+ /* PRM defines that length zero specific length of 32bits */
+ MLX5_SET(dr_action_hw_set, hw_action, destination_length,
+ max_length == 32 ? 0 : max_length);
+
+ MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
+
+ *ret_hw_info = hw_action_info;
+
+ return 0;
+}
+
+static int
+dr_action_modify_sw_to_hw_set(struct mlx5dr_domain *dmn,
+ __be64 *sw_action,
+ __be64 *hw_action,
+ const struct dr_action_modify_field_conv **ret_hw_info)
+{
+ const struct dr_action_modify_field_conv *hw_action_info;
+ u8 offset, length, max_length;
+ u16 sw_field;
+ u32 data;
+
+ /* Get SW modify action data */
+ length = MLX5_GET(set_action_in, sw_action, length);
+ offset = MLX5_GET(set_action_in, sw_action, offset);
+ sw_field = MLX5_GET(set_action_in, sw_action, field);
+ data = MLX5_GET(set_action_in, sw_action, data);
+
+ /* Convert SW data to HW modify action format */
+ hw_action_info = dr_action_modify_get_hw_info(sw_field);
+ if (!hw_action_info) {
+ mlx5dr_dbg(dmn, "Modify set action invalid field given\n");
+ return -EINVAL;
+ }
+
+ /* PRM defines that length zero specific length of 32bits */
+ length = length ? length : 32;
+
+ max_length = hw_action_info->end - hw_action_info->start + 1;
+
+ if (length + offset > max_length) {
+ mlx5dr_dbg(dmn, "Modify action length + offset exceeds limit\n");
+ return -EINVAL;
}
- MLX5_SET(dr_action_hw_set, hw_action, opcode, hw_opcode);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
MLX5_SET(dr_action_hw_set, hw_action, destination_field_code,
hw_action_info->hw_field);
@@ -1283,48 +1414,236 @@ dr_action_modify_sw_to_hw(struct mlx5dr_domain *dmn,
}
static int
-dr_action_modify_check_field_limitation(struct mlx5dr_domain *dmn,
- const __be64 *sw_action)
+dr_action_modify_sw_to_hw_copy(struct mlx5dr_domain *dmn,
+ __be64 *sw_action,
+ __be64 *hw_action,
+ const struct dr_action_modify_field_conv **ret_dst_hw_info,
+ const struct dr_action_modify_field_conv **ret_src_hw_info)
+{
+ u8 src_offset, dst_offset, src_max_length, dst_max_length, length;
+ const struct dr_action_modify_field_conv *hw_dst_action_info;
+ const struct dr_action_modify_field_conv *hw_src_action_info;
+ u16 src_field, dst_field;
+
+ /* Get SW modify action data */
+ src_field = MLX5_GET(copy_action_in, sw_action, src_field);
+ dst_field = MLX5_GET(copy_action_in, sw_action, dst_field);
+ src_offset = MLX5_GET(copy_action_in, sw_action, src_offset);
+ dst_offset = MLX5_GET(copy_action_in, sw_action, dst_offset);
+ length = MLX5_GET(copy_action_in, sw_action, length);
+
+ /* Convert SW data to HW modify action format */
+ hw_src_action_info = dr_action_modify_get_hw_info(src_field);
+ hw_dst_action_info = dr_action_modify_get_hw_info(dst_field);
+ if (!hw_src_action_info || !hw_dst_action_info) {
+ mlx5dr_dbg(dmn, "Modify copy action invalid field given\n");
+ return -EINVAL;
+ }
+
+ /* PRM defines that length zero specific length of 32bits */
+ length = length ? length : 32;
+
+ src_max_length = hw_src_action_info->end -
+ hw_src_action_info->start + 1;
+ dst_max_length = hw_dst_action_info->end -
+ hw_dst_action_info->start + 1;
+
+ if (length + src_offset > src_max_length ||
+ length + dst_offset > dst_max_length) {
+ mlx5dr_dbg(dmn, "Modify action length + offset exceeds limit\n");
+ return -EINVAL;
+ }
+
+ MLX5_SET(dr_action_hw_copy, hw_action,
+ opcode, MLX5DR_ACTION_MDFY_HW_OP_COPY);
+
+ MLX5_SET(dr_action_hw_copy, hw_action, destination_field_code,
+ hw_dst_action_info->hw_field);
+
+ MLX5_SET(dr_action_hw_copy, hw_action, destination_left_shifter,
+ hw_dst_action_info->start + dst_offset);
+
+ MLX5_SET(dr_action_hw_copy, hw_action, destination_length,
+ length == 32 ? 0 : length);
+
+ MLX5_SET(dr_action_hw_copy, hw_action, source_field_code,
+ hw_src_action_info->hw_field);
+
+ MLX5_SET(dr_action_hw_copy, hw_action, source_left_shifter,
+ hw_src_action_info->start + dst_offset);
+
+ *ret_dst_hw_info = hw_dst_action_info;
+ *ret_src_hw_info = hw_src_action_info;
+
+ return 0;
+}
+
+static int
+dr_action_modify_sw_to_hw(struct mlx5dr_domain *dmn,
+ __be64 *sw_action,
+ __be64 *hw_action,
+ const struct dr_action_modify_field_conv **ret_dst_hw_info,
+ const struct dr_action_modify_field_conv **ret_src_hw_info)
{
- u16 sw_field;
u8 action;
+ int ret;
- sw_field = MLX5_GET(set_action_in, sw_action, field);
+ *hw_action = 0;
+ *ret_src_hw_info = NULL;
+
+ /* Get SW modify action type */
action = MLX5_GET(set_action_in, sw_action, action_type);
- /* Check if SW field is supported in current domain (RX/TX) */
- if (action == MLX5_ACTION_TYPE_SET) {
- if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_A) {
+ switch (action) {
+ case MLX5_ACTION_TYPE_SET:
+ ret = dr_action_modify_sw_to_hw_set(dmn, sw_action,
+ hw_action,
+ ret_dst_hw_info);
+ break;
+
+ case MLX5_ACTION_TYPE_ADD:
+ ret = dr_action_modify_sw_to_hw_add(dmn, sw_action,
+ hw_action,
+ ret_dst_hw_info);
+ break;
+
+ case MLX5_ACTION_TYPE_COPY:
+ ret = dr_action_modify_sw_to_hw_copy(dmn, sw_action,
+ hw_action,
+ ret_dst_hw_info,
+ ret_src_hw_info);
+ break;
+
+ default:
+ mlx5dr_info(dmn, "Unsupported action_type for modify action\n");
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int
+dr_action_modify_check_set_field_limitation(struct mlx5dr_action *action,
+ const __be64 *sw_action)
+{
+ u16 sw_field = MLX5_GET(set_action_in, sw_action, field);
+ struct mlx5dr_domain *dmn = action->rewrite.dmn;
+
+ if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_A) {
+ action->rewrite.allow_rx = 0;
+ if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_TX) {
+ mlx5dr_dbg(dmn, "Unsupported field %d for RX/FDB set action\n",
+ sw_field);
+ return -EINVAL;
+ }
+ } else if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_B) {
+ action->rewrite.allow_tx = 0;
+ if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_RX) {
+ mlx5dr_dbg(dmn, "Unsupported field %d for TX/FDB set action\n",
+ sw_field);
+ return -EINVAL;
+ }
+ }
+
+ if (!action->rewrite.allow_rx && !action->rewrite.allow_tx) {
+ mlx5dr_dbg(dmn, "Modify SET actions not supported on both RX and TX\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+dr_action_modify_check_add_field_limitation(struct mlx5dr_action *action,
+ const __be64 *sw_action)
+{
+ u16 sw_field = MLX5_GET(set_action_in, sw_action, field);
+ struct mlx5dr_domain *dmn = action->rewrite.dmn;
+
+ if (sw_field != MLX5_ACTION_IN_FIELD_OUT_IP_TTL &&
+ sw_field != MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT &&
+ sw_field != MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM &&
+ sw_field != MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM) {
+ mlx5dr_dbg(dmn, "Unsupported field %d for add action\n",
+ sw_field);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+dr_action_modify_check_copy_field_limitation(struct mlx5dr_action *action,
+ const __be64 *sw_action)
+{
+ struct mlx5dr_domain *dmn = action->rewrite.dmn;
+ u16 sw_fields[2];
+ int i;
+
+ sw_fields[0] = MLX5_GET(copy_action_in, sw_action, src_field);
+ sw_fields[1] = MLX5_GET(copy_action_in, sw_action, dst_field);
+
+ for (i = 0; i < 2; i++) {
+ if (sw_fields[i] == MLX5_ACTION_IN_FIELD_METADATA_REG_A) {
+ action->rewrite.allow_rx = 0;
if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_TX) {
mlx5dr_dbg(dmn, "Unsupported field %d for RX/FDB set action\n",
- sw_field);
+ sw_fields[i]);
return -EINVAL;
}
- }
-
- if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_B) {
+ } else if (sw_fields[i] == MLX5_ACTION_IN_FIELD_METADATA_REG_B) {
+ action->rewrite.allow_tx = 0;
if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_RX) {
mlx5dr_dbg(dmn, "Unsupported field %d for TX/FDB set action\n",
- sw_field);
+ sw_fields[i]);
return -EINVAL;
}
}
- } else if (action == MLX5_ACTION_TYPE_ADD) {
- if (sw_field != MLX5_ACTION_IN_FIELD_OUT_IP_TTL &&
- sw_field != MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT &&
- sw_field != MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM &&
- sw_field != MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM) {
- mlx5dr_dbg(dmn, "Unsupported field %d for add action\n", sw_field);
- return -EINVAL;
- }
- } else {
- mlx5dr_info(dmn, "Unsupported action %d modify action\n", action);
- return -EOPNOTSUPP;
+ }
+
+ if (!action->rewrite.allow_rx && !action->rewrite.allow_tx) {
+ mlx5dr_dbg(dmn, "Modify copy actions not supported on both RX and TX\n");
+ return -EINVAL;
}
return 0;
}
+static int
+dr_action_modify_check_field_limitation(struct mlx5dr_action *action,
+ const __be64 *sw_action)
+{
+ struct mlx5dr_domain *dmn = action->rewrite.dmn;
+ u8 action_type;
+ int ret;
+
+ action_type = MLX5_GET(set_action_in, sw_action, action_type);
+
+ switch (action_type) {
+ case MLX5_ACTION_TYPE_SET:
+ ret = dr_action_modify_check_set_field_limitation(action,
+ sw_action);
+ break;
+
+ case MLX5_ACTION_TYPE_ADD:
+ ret = dr_action_modify_check_add_field_limitation(action,
+ sw_action);
+ break;
+
+ case MLX5_ACTION_TYPE_COPY:
+ ret = dr_action_modify_check_copy_field_limitation(action,
+ sw_action);
+ break;
+
+ default:
+ mlx5dr_info(dmn, "Unsupported action %d modify action\n",
+ action_type);
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
static bool
dr_action_modify_check_is_ttl_modify(const u64 *sw_action)
{
@@ -1333,7 +1652,7 @@ dr_action_modify_check_is_ttl_modify(const u64 *sw_action)
return sw_field == MLX5_ACTION_IN_FIELD_OUT_IP_TTL;
}
-static int dr_actions_convert_modify_header(struct mlx5dr_domain *dmn,
+static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
u32 max_hw_actions,
u32 num_sw_actions,
__be64 sw_actions[],
@@ -1341,20 +1660,26 @@ static int dr_actions_convert_modify_header(struct mlx5dr_domain *dmn,
u32 *num_hw_actions,
bool *modify_ttl)
{
- const struct dr_action_modify_field_conv *hw_action_info;
+ const struct dr_action_modify_field_conv *hw_dst_action_info;
+ const struct dr_action_modify_field_conv *hw_src_action_info;
u16 hw_field = MLX5DR_ACTION_MDFY_HW_FLD_RESERVED;
u32 l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_NONE;
u32 l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_NONE;
+ struct mlx5dr_domain *dmn = action->rewrite.dmn;
int ret, i, hw_idx = 0;
__be64 *sw_action;
__be64 hw_action;
*modify_ttl = false;
+ action->rewrite.allow_rx = 1;
+ action->rewrite.allow_tx = 1;
+
for (i = 0; i < num_sw_actions; i++) {
sw_action = &sw_actions[i];
- ret = dr_action_modify_check_field_limitation(dmn, sw_action);
+ ret = dr_action_modify_check_field_limitation(action,
+ sw_action);
if (ret)
return ret;
@@ -1365,32 +1690,35 @@ static int dr_actions_convert_modify_header(struct mlx5dr_domain *dmn,
ret = dr_action_modify_sw_to_hw(dmn,
sw_action,
&hw_action,
- &hw_action_info);
+ &hw_dst_action_info,
+ &hw_src_action_info);
if (ret)
return ret;
/* Due to a HW limitation we cannot modify 2 different L3 types */
- if (l3_type && hw_action_info->l3_type &&
- hw_action_info->l3_type != l3_type) {
+ if (l3_type && hw_dst_action_info->l3_type &&
+ hw_dst_action_info->l3_type != l3_type) {
mlx5dr_dbg(dmn, "Action list can't support two different L3 types\n");
return -EINVAL;
}
- if (hw_action_info->l3_type)
- l3_type = hw_action_info->l3_type;
+ if (hw_dst_action_info->l3_type)
+ l3_type = hw_dst_action_info->l3_type;
/* Due to a HW limitation we cannot modify two different L4 types */
- if (l4_type && hw_action_info->l4_type &&
- hw_action_info->l4_type != l4_type) {
+ if (l4_type && hw_dst_action_info->l4_type &&
+ hw_dst_action_info->l4_type != l4_type) {
mlx5dr_dbg(dmn, "Action list can't support two different L4 types\n");
return -EINVAL;
}
- if (hw_action_info->l4_type)
- l4_type = hw_action_info->l4_type;
+ if (hw_dst_action_info->l4_type)
+ l4_type = hw_dst_action_info->l4_type;
/* HW reads and executes two actions at once this means we
* need to create a gap if two actions access the same field
*/
- if ((hw_idx % 2) && hw_field == hw_action_info->hw_field) {
+ if ((hw_idx % 2) && (hw_field == hw_dst_action_info->hw_field ||
+ (hw_src_action_info &&
+ hw_field == hw_src_action_info->hw_field))) {
/* Check if after gap insertion the total number of HW
* modify actions doesn't exceeds the limit
*/
@@ -1400,7 +1728,7 @@ static int dr_actions_convert_modify_header(struct mlx5dr_domain *dmn,
return -EINVAL;
}
}
- hw_field = hw_action_info->hw_field;
+ hw_field = hw_dst_action_info->hw_field;
hw_actions[hw_idx] = hw_action;
hw_idx++;
@@ -1443,7 +1771,7 @@ static int dr_action_create_modify_action(struct mlx5dr_domain *dmn,
goto free_chunk;
}
- ret = dr_actions_convert_modify_header(dmn,
+ ret = dr_actions_convert_modify_header(action,
max_hw_actions,
num_sw_actions,
actions,
@@ -1559,8 +1887,26 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action)
switch (action->action_type) {
case DR_ACTION_TYP_FT:
- if (!action->dest_tbl.is_fw_tbl)
+ if (action->dest_tbl.is_fw_tbl)
+ refcount_dec(&action->dest_tbl.fw_tbl.dmn->refcount);
+ else
refcount_dec(&action->dest_tbl.tbl->refcount);
+
+ if (action->dest_tbl.is_fw_tbl &&
+ action->dest_tbl.fw_tbl.num_of_ref_actions) {
+ struct mlx5dr_action **ref_actions;
+ int i;
+
+ ref_actions = action->dest_tbl.fw_tbl.ref_actions;
+ for (i = 0; i < action->dest_tbl.fw_tbl.num_of_ref_actions; i++)
+ refcount_dec(&ref_actions[i]->refcount);
+
+ kfree(ref_actions);
+
+ mlx5dr_fw_destroy_md_tbl(action->dest_tbl.fw_tbl.dmn,
+ action->dest_tbl.fw_tbl.id,
+ action->dest_tbl.fw_tbl.group_id);
+ }
break;
case DR_ACTION_TYP_TNL_L2_TO_L2:
refcount_dec(&action->reformat.dmn->refcount);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index 41662c4e2664..461b39376daf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -320,12 +320,7 @@ int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
}
int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
- u32 table_type,
- u64 icm_addr_rx,
- u64 icm_addr_tx,
- u8 level,
- bool sw_owner,
- bool term_tbl,
+ struct mlx5dr_cmd_create_flow_table_attr *attr,
u64 *fdb_rx_icm_addr,
u32 *table_id)
{
@@ -335,37 +330,43 @@ int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
int err;
MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
- MLX5_SET(create_flow_table_in, in, table_type, table_type);
+ MLX5_SET(create_flow_table_in, in, table_type, attr->table_type);
ft_mdev = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
- MLX5_SET(flow_table_context, ft_mdev, termination_table, term_tbl);
- MLX5_SET(flow_table_context, ft_mdev, sw_owner, sw_owner);
- MLX5_SET(flow_table_context, ft_mdev, level, level);
+ MLX5_SET(flow_table_context, ft_mdev, termination_table, attr->term_tbl);
+ MLX5_SET(flow_table_context, ft_mdev, sw_owner, attr->sw_owner);
+ MLX5_SET(flow_table_context, ft_mdev, level, attr->level);
- if (sw_owner) {
+ if (attr->sw_owner) {
/* icm_addr_0 used for FDB RX / NIC TX / NIC_RX
* icm_addr_1 used for FDB TX
*/
- if (table_type == MLX5_FLOW_TABLE_TYPE_NIC_RX) {
+ if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_RX) {
MLX5_SET64(flow_table_context, ft_mdev,
- sw_owner_icm_root_0, icm_addr_rx);
- } else if (table_type == MLX5_FLOW_TABLE_TYPE_NIC_TX) {
+ sw_owner_icm_root_0, attr->icm_addr_rx);
+ } else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_TX) {
MLX5_SET64(flow_table_context, ft_mdev,
- sw_owner_icm_root_0, icm_addr_tx);
- } else if (table_type == MLX5_FLOW_TABLE_TYPE_FDB) {
+ sw_owner_icm_root_0, attr->icm_addr_tx);
+ } else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB) {
MLX5_SET64(flow_table_context, ft_mdev,
- sw_owner_icm_root_0, icm_addr_rx);
+ sw_owner_icm_root_0, attr->icm_addr_rx);
MLX5_SET64(flow_table_context, ft_mdev,
- sw_owner_icm_root_1, icm_addr_tx);
+ sw_owner_icm_root_1, attr->icm_addr_tx);
}
}
+ MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
+ attr->decap_en);
+ MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
+ attr->reformat_en);
+
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
*table_id = MLX5_GET(create_flow_table_out, out, table_id);
- if (!sw_owner && table_type == MLX5_FLOW_TABLE_TYPE_FDB)
+ if (!attr->sw_owner && attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB &&
+ fdb_rx_icm_addr)
*fdb_rx_icm_addr =
(u64)MLX5_GET(create_flow_table_out, out, icm_address_31_0) |
(u64)MLX5_GET(create_flow_table_out, out, icm_address_39_32) << 32 |
@@ -478,3 +479,208 @@ int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
return 0;
}
+
+static int mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev *dev,
+ struct mlx5dr_cmd_fte_info *fte,
+ bool *extended_dest)
+{
+ int fw_log_max_fdb_encap_uplink = MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
+ int num_fwd_destinations = 0;
+ int num_encap = 0;
+ int i;
+
+ *extended_dest = false;
+ if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
+ return 0;
+ for (i = 0; i < fte->dests_size; i++) {
+ if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
+ if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
+ fte->dest_arr[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
+ num_encap++;
+ num_fwd_destinations++;
+ }
+
+ if (num_fwd_destinations > 1 && num_encap > 0)
+ *extended_dest = true;
+
+ if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
+ mlx5_core_warn(dev, "FW does not support extended destination");
+ return -EOPNOTSUPP;
+ }
+ if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
+ mlx5_core_warn(dev, "FW does not support more than %d encaps",
+ 1 << fw_log_max_fdb_encap_uplink);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
+ int opmod, int modify_mask,
+ struct mlx5dr_cmd_ft_info *ft,
+ u32 group_id,
+ struct mlx5dr_cmd_fte_info *fte)
+{
+ u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
+ void *in_flow_context, *vlan;
+ bool extended_dest = false;
+ void *in_match_value;
+ unsigned int inlen;
+ int dst_cnt_size;
+ void *in_dests;
+ u32 *in;
+ int err;
+ int i;
+
+ if (mlx5dr_cmd_set_extended_dest(dev, fte, &extended_dest))
+ return -EOPNOTSUPP;
+
+ if (!extended_dest)
+ dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
+ else
+ dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
+
+ inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
+ MLX5_SET(set_fte_in, in, op_mod, opmod);
+ MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
+ MLX5_SET(set_fte_in, in, table_type, ft->type);
+ MLX5_SET(set_fte_in, in, table_id, ft->id);
+ MLX5_SET(set_fte_in, in, flow_index, fte->index);
+ if (ft->vport) {
+ MLX5_SET(set_fte_in, in, vport_number, ft->vport);
+ MLX5_SET(set_fte_in, in, other_vport, 1);
+ }
+
+ in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
+ MLX5_SET(flow_context, in_flow_context, group_id, group_id);
+
+ MLX5_SET(flow_context, in_flow_context, flow_tag,
+ fte->flow_context.flow_tag);
+ MLX5_SET(flow_context, in_flow_context, flow_source,
+ fte->flow_context.flow_source);
+
+ MLX5_SET(flow_context, in_flow_context, extended_destination,
+ extended_dest);
+ if (extended_dest) {
+ u32 action;
+
+ action = fte->action.action &
+ ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ MLX5_SET(flow_context, in_flow_context, action, action);
+ } else {
+ MLX5_SET(flow_context, in_flow_context, action,
+ fte->action.action);
+ if (fte->action.pkt_reformat)
+ MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
+ fte->action.pkt_reformat->id);
+ }
+ if (fte->action.modify_hdr)
+ MLX5_SET(flow_context, in_flow_context, modify_header_id,
+ fte->action.modify_hdr->id);
+
+ vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
+
+ MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
+ MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
+ MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
+
+ vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
+
+ MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
+ MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
+ MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
+
+ in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
+ match_value);
+ memcpy(in_match_value, fte->val, sizeof(u32) * MLX5_ST_SZ_DW_MATCH_PARAM);
+
+ in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ int list_size = 0;
+
+ for (i = 0; i < fte->dests_size; i++) {
+ unsigned int id, type = fte->dest_arr[i].type;
+
+ if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
+
+ switch (type) {
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
+ id = fte->dest_arr[i].ft_num;
+ type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
+ id = fte->dest_arr[i].ft_id;
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_VPORT:
+ id = fte->dest_arr[i].vport.num;
+ MLX5_SET(dest_format_struct, in_dests,
+ destination_eswitch_owner_vhca_id_valid,
+ !!(fte->dest_arr[i].vport.flags &
+ MLX5_FLOW_DEST_VPORT_VHCA_ID));
+ MLX5_SET(dest_format_struct, in_dests,
+ destination_eswitch_owner_vhca_id,
+ fte->dest_arr[i].vport.vhca_id);
+ if (extended_dest && (fte->dest_arr[i].vport.flags &
+ MLX5_FLOW_DEST_VPORT_REFORMAT_ID)) {
+ MLX5_SET(dest_format_struct, in_dests,
+ packet_reformat,
+ !!(fte->dest_arr[i].vport.flags &
+ MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
+ MLX5_SET(extended_dest_format, in_dests,
+ packet_reformat_id,
+ fte->dest_arr[i].vport.reformat_id);
+ }
+ break;
+ default:
+ id = fte->dest_arr[i].tir_num;
+ }
+
+ MLX5_SET(dest_format_struct, in_dests, destination_type,
+ type);
+ MLX5_SET(dest_format_struct, in_dests, destination_id, id);
+ in_dests += dst_cnt_size;
+ list_size++;
+ }
+
+ MLX5_SET(flow_context, in_flow_context, destination_list_size,
+ list_size);
+ }
+
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
+ log_max_flow_counter,
+ ft->type));
+ int list_size = 0;
+
+ for (i = 0; i < fte->dests_size; i++) {
+ if (fte->dest_arr[i].type !=
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
+
+ MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
+ fte->dest_arr[i].counter_id);
+ in_dests += dst_cnt_size;
+ list_size++;
+ }
+ if (list_size > max_list_size) {
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
+ list_size);
+ }
+
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+err_out:
+ kvfree(in);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
index 60ef6e6171e3..1fbcd012bb85 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
@@ -7,6 +7,7 @@
struct mlx5dr_fw_recalc_cs_ft *
mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num)
{
+ struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
u32 table_id, group_id, modify_hdr_id;
u64 rx_icm_addr, modify_ttl_action;
@@ -16,9 +17,14 @@ mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num)
if (!recalc_cs_ft)
return NULL;
- ret = mlx5dr_cmd_create_flow_table(dmn->mdev, MLX5_FLOW_TABLE_TYPE_FDB,
- 0, 0, dmn->info.caps.max_ft_level - 1,
- false, true, &rx_icm_addr, &table_id);
+ ft_attr.table_type = MLX5_FLOW_TABLE_TYPE_FDB;
+ ft_attr.level = dmn->info.caps.max_ft_level - 1;
+ ft_attr.term_tbl = true;
+
+ ret = mlx5dr_cmd_create_flow_table(dmn->mdev,
+ &ft_attr,
+ &rx_icm_addr,
+ &table_id);
if (ret) {
mlx5dr_err(dmn, "Failed creating TTL W/A FW flow table %d\n", ret);
goto free_ttl_tbl;
@@ -91,3 +97,70 @@ void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn,
kfree(recalc_cs_ft);
}
+
+int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
+ struct mlx5dr_cmd_flow_destination_hw_info *dest,
+ int num_dest,
+ bool reformat_req,
+ u32 *tbl_id,
+ u32 *group_id)
+{
+ struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
+ struct mlx5dr_cmd_fte_info fte_info = {};
+ u32 val[MLX5_ST_SZ_DW_MATCH_PARAM] = {};
+ struct mlx5dr_cmd_ft_info ft_info = {};
+ int ret;
+
+ ft_attr.table_type = MLX5_FLOW_TABLE_TYPE_FDB;
+ ft_attr.level = dmn->info.caps.max_ft_level - 2;
+ ft_attr.reformat_en = reformat_req;
+ ft_attr.decap_en = reformat_req;
+
+ ret = mlx5dr_cmd_create_flow_table(dmn->mdev, &ft_attr, NULL, tbl_id);
+ if (ret) {
+ mlx5dr_err(dmn, "Failed creating multi dest FW flow table %d\n", ret);
+ return ret;
+ }
+
+ ret = mlx5dr_cmd_create_empty_flow_group(dmn->mdev,
+ MLX5_FLOW_TABLE_TYPE_FDB,
+ *tbl_id, group_id);
+ if (ret) {
+ mlx5dr_err(dmn, "Failed creating multi dest FW flow group %d\n", ret);
+ goto free_flow_table;
+ }
+
+ ft_info.id = *tbl_id;
+ ft_info.type = FS_FT_FDB;
+ fte_info.action.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ fte_info.dests_size = num_dest;
+ fte_info.val = val;
+ fte_info.dest_arr = dest;
+
+ ret = mlx5dr_cmd_set_fte(dmn->mdev, 0, 0, &ft_info, *group_id, &fte_info);
+ if (ret) {
+ mlx5dr_err(dmn, "Failed setting fte into table %d\n", ret);
+ goto free_flow_group;
+ }
+
+ return 0;
+
+free_flow_group:
+ mlx5dr_cmd_destroy_flow_group(dmn->mdev, MLX5_FLOW_TABLE_TYPE_FDB,
+ *tbl_id, *group_id);
+free_flow_table:
+ mlx5dr_cmd_destroy_flow_table(dmn->mdev, *tbl_id,
+ MLX5_FLOW_TABLE_TYPE_FDB);
+ return ret;
+}
+
+void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn,
+ u32 tbl_id, u32 group_id)
+{
+ mlx5dr_cmd_del_flow_table_entry(dmn->mdev, FS_FT_FDB, tbl_id);
+ mlx5dr_cmd_destroy_flow_group(dmn->mdev,
+ MLX5_FLOW_TABLE_TYPE_FDB,
+ tbl_id, group_id);
+ mlx5dr_cmd_destroy_flow_table(dmn->mdev, tbl_id,
+ MLX5_FLOW_TABLE_TYPE_FDB);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index 32e94d2ee5e4..e4cff7abb348 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -209,7 +209,7 @@ static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher,
/* We need to copy the refcount since this ste
* may have been traversed several times
*/
- refcount_set(&new_ste->refcount, refcount_read(&cur_ste->refcount));
+ new_ste->refcount = cur_ste->refcount;
/* Link old STEs rule_mem list to the new ste */
mlx5dr_rule_update_rule_member(cur_ste, new_ste);
@@ -638,6 +638,9 @@ static int dr_rule_add_member(struct mlx5dr_rule_rx_tx *nic_rule,
if (!rule_mem)
return -ENOMEM;
+ INIT_LIST_HEAD(&rule_mem->list);
+ INIT_LIST_HEAD(&rule_mem->use_ste_list);
+
rule_mem->ste = ste;
list_add_tail(&rule_mem->list, &nic_rule->rule_members_list);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index 51803eef13dd..c7f10d4f8f8d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2019 Mellanox Technologies. */
+#include <linux/smp.h>
#include "dr_types.h"
#define QUEUE_SIZE 128
@@ -729,7 +730,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
if (!in)
goto err_cqwq;
- vector = smp_processor_id() % mlx5_comp_vectors_count(mdev);
+ vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev);
err = mlx5_vector2eqn(mdev, vector, &eqn, &irqn);
if (err) {
kvfree(in);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index a5a266983dd3..c6c7d1defbd7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -348,7 +348,7 @@ static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
if (dst->next_htbl)
dst->next_htbl->pointing_ste = dst;
- refcount_set(&dst->refcount, refcount_read(&src->refcount));
+ dst->refcount = src->refcount;
INIT_LIST_HEAD(&dst->rule_list);
list_splice_tail_init(&src->rule_list, &dst->rule_list);
@@ -565,7 +565,7 @@ bool mlx5dr_ste_is_not_valid_entry(u8 *p_hw_ste)
bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste)
{
- return !refcount_read(&ste->refcount);
+ return !ste->refcount;
}
/* Init one ste as a pattern for ste data array */
@@ -689,14 +689,14 @@ struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
htbl->ste_arr = chunk->ste_arr;
htbl->hw_ste_arr = chunk->hw_ste_arr;
htbl->miss_list = chunk->miss_list;
- refcount_set(&htbl->refcount, 0);
+ htbl->refcount = 0;
for (i = 0; i < chunk->num_of_entries; i++) {
struct mlx5dr_ste *ste = &htbl->ste_arr[i];
ste->hw_ste = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
ste->htbl = htbl;
- refcount_set(&ste->refcount, 0);
+ ste->refcount = 0;
INIT_LIST_HEAD(&ste->miss_list_node);
INIT_LIST_HEAD(&htbl->miss_list[i]);
INIT_LIST_HEAD(&ste->rule_list);
@@ -713,7 +713,7 @@ out_free_htbl:
int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl)
{
- if (refcount_read(&htbl->refcount))
+ if (htbl->refcount)
return -EBUSY;
mlx5dr_icm_free_chunk(htbl->chunk);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
index e178d8d3dbc9..14ce2d7dbb66 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
@@ -211,6 +211,9 @@ static int dr_table_destroy_sw_owned_tbl(struct mlx5dr_table *tbl)
static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl)
{
+ bool en_encap = !!(tbl->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
+ bool en_decap = !!(tbl->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
+ struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
u64 icm_addr_rx = 0;
u64 icm_addr_tx = 0;
int ret;
@@ -221,18 +224,21 @@ static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl)
if (tbl->tx.s_anchor)
icm_addr_tx = tbl->tx.s_anchor->chunk->icm_addr;
- ret = mlx5dr_cmd_create_flow_table(tbl->dmn->mdev,
- tbl->table_type,
- icm_addr_rx,
- icm_addr_tx,
- tbl->dmn->info.caps.max_ft_level - 1,
- true, false, NULL,
- &tbl->table_id);
+ ft_attr.table_type = tbl->table_type;
+ ft_attr.icm_addr_rx = icm_addr_rx;
+ ft_attr.icm_addr_tx = icm_addr_tx;
+ ft_attr.level = tbl->dmn->info.caps.max_ft_level - 1;
+ ft_attr.sw_owner = true;
+ ft_attr.decap_en = en_decap;
+ ft_attr.reformat_en = en_encap;
+
+ ret = mlx5dr_cmd_create_flow_table(tbl->dmn->mdev, &ft_attr,
+ NULL, &tbl->table_id);
return ret;
}
-struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level)
+struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level, u32 flags)
{
struct mlx5dr_table *tbl;
int ret;
@@ -245,6 +251,7 @@ struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level)
tbl->dmn = dmn;
tbl->level = level;
+ tbl->flags = flags;
refcount_set(&tbl->refcount, 1);
ret = dr_table_init(tbl);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 290fe61c33d0..dffe35145d19 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -123,7 +123,7 @@ struct mlx5dr_matcher_rx_tx;
struct mlx5dr_ste {
u8 *hw_ste;
/* refcount: indicates the num of rules that using this ste */
- refcount_t refcount;
+ u32 refcount;
/* attached to the miss_list head at each htbl entry */
struct list_head miss_list_node;
@@ -155,7 +155,7 @@ struct mlx5dr_ste_htbl_ctrl {
struct mlx5dr_ste_htbl {
u8 lu_type;
u16 byte_mask;
- refcount_t refcount;
+ u32 refcount;
struct mlx5dr_icm_chunk *chunk;
struct mlx5dr_ste *ste_arr;
u8 *hw_ste_arr;
@@ -206,13 +206,14 @@ int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl);
static inline void mlx5dr_htbl_put(struct mlx5dr_ste_htbl *htbl)
{
- if (refcount_dec_and_test(&htbl->refcount))
+ htbl->refcount--;
+ if (!htbl->refcount)
mlx5dr_ste_htbl_free(htbl);
}
static inline void mlx5dr_htbl_get(struct mlx5dr_ste_htbl *htbl)
{
- refcount_inc(&htbl->refcount);
+ htbl->refcount++;
}
/* STE utils */
@@ -254,14 +255,15 @@ static inline void mlx5dr_ste_put(struct mlx5dr_ste *ste,
struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher)
{
- if (refcount_dec_and_test(&ste->refcount))
+ ste->refcount--;
+ if (!ste->refcount)
mlx5dr_ste_free(ste, matcher, nic_matcher);
}
/* initial as 0, increased only when ste appears in a new rule */
static inline void mlx5dr_ste_get(struct mlx5dr_ste *ste)
{
- refcount_inc(&ste->refcount);
+ ste->refcount++;
}
void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,
@@ -679,6 +681,7 @@ struct mlx5dr_table {
u32 level;
u32 table_type;
u32 table_id;
+ u32 flags;
struct list_head matcher_list;
struct mlx5dr_action *miss_action;
refcount_t refcount;
@@ -742,10 +745,14 @@ struct mlx5dr_action {
union {
struct mlx5dr_table *tbl;
struct {
- struct mlx5_flow_table *ft;
+ struct mlx5dr_domain *dmn;
+ u32 id;
+ u32 group_id;
+ enum fs_flow_table_type type;
u64 rx_icm_addr;
u64 tx_icm_addr;
- struct mlx5_core_dev *mdev;
+ struct mlx5dr_action **ref_actions;
+ u32 num_of_ref_actions;
} fw_tbl;
};
} dest_tbl;
@@ -867,6 +874,17 @@ struct mlx5dr_cmd_query_flow_table_details {
u64 sw_owner_icm_root_0;
};
+struct mlx5dr_cmd_create_flow_table_attr {
+ u32 table_type;
+ u64 icm_addr_rx;
+ u64 icm_addr_tx;
+ u8 level;
+ bool sw_owner;
+ bool term_tbl;
+ bool decap_en;
+ bool reformat_en;
+};
+
/* internal API functions */
int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
struct mlx5dr_cmd_caps *caps);
@@ -904,12 +922,7 @@ int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
u32 table_id,
u32 group_id);
int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
- u32 table_type,
- u64 icm_addr_rx,
- u64 icm_addr_tx,
- u8 level,
- bool sw_owner,
- bool term_tbl,
+ struct mlx5dr_cmd_create_flow_table_attr *attr,
u64 *fdb_rx_icm_addr,
u32 *table_id);
int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
@@ -1051,6 +1064,43 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
struct mlx5dr_action *action);
+struct mlx5dr_cmd_ft_info {
+ u32 id;
+ u16 vport;
+ enum fs_flow_table_type type;
+};
+
+struct mlx5dr_cmd_flow_destination_hw_info {
+ enum mlx5_flow_destination_type type;
+ union {
+ u32 tir_num;
+ u32 ft_num;
+ u32 ft_id;
+ u32 counter_id;
+ struct {
+ u16 num;
+ u16 vhca_id;
+ u32 reformat_id;
+ u8 flags;
+ } vport;
+ };
+};
+
+struct mlx5dr_cmd_fte_info {
+ u32 dests_size;
+ u32 index;
+ struct mlx5_flow_context flow_context;
+ u32 *val;
+ struct mlx5_flow_act action;
+ struct mlx5dr_cmd_flow_destination_hw_info *dest_arr;
+};
+
+int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
+ int opmod, int modify_mask,
+ struct mlx5dr_cmd_ft_info *ft,
+ u32 group_id,
+ struct mlx5dr_cmd_fte_info *fte);
+
struct mlx5dr_fw_recalc_cs_ft {
u64 rx_icm_addr;
u32 table_id;
@@ -1065,4 +1115,12 @@ void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn,
int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
u32 vport_num,
u64 *rx_icm_addr);
+int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
+ struct mlx5dr_cmd_flow_destination_hw_info *dest,
+ int num_dest,
+ bool reformat_req,
+ u32 *tbl_id,
+ u32 *group_id);
+void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id,
+ u32 group_id);
#endif /* _DR_TYPES_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 3d587d0bdbbe..3abfc8125926 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -74,7 +74,7 @@ static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
next_ft);
tbl = mlx5dr_table_create(ns->fs_dr_domain.dr_domain,
- ft->level);
+ ft->level, ft->flags);
if (!tbl) {
mlx5_core_err(ns->dev, "Failed creating dr flow_table\n");
return -EINVAL;
@@ -184,13 +184,13 @@ static struct mlx5dr_action *create_vport_action(struct mlx5dr_domain *domain,
dest_attr->vport.vhca_id);
}
-static struct mlx5dr_action *create_ft_action(struct mlx5_core_dev *dev,
+static struct mlx5dr_action *create_ft_action(struct mlx5dr_domain *domain,
struct mlx5_flow_rule *dst)
{
struct mlx5_flow_table *dest_ft = dst->dest_attr.ft;
if (mlx5_dr_is_fw_table(dest_ft->flags))
- return mlx5dr_create_action_dest_flow_fw_table(dest_ft, dev);
+ return mlx5dr_action_create_dest_flow_fw_table(domain, dest_ft);
return mlx5dr_action_create_dest_table(dest_ft->fs_dr_table.dr_table);
}
@@ -206,6 +206,12 @@ static struct mlx5dr_action *create_action_push_vlan(struct mlx5dr_domain *domai
return mlx5dr_action_create_push_vlan(domain, htonl(vlan_hdr));
}
+static bool contain_vport_reformat_action(struct mlx5_flow_rule *dst)
+{
+ return dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
+ dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
+}
+
#define MLX5_FLOW_CONTEXT_ACTION_MAX 20
static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
@@ -213,7 +219,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
struct fs_fte *fte)
{
struct mlx5dr_domain *domain = ns->fs_dr_domain.dr_domain;
- struct mlx5dr_action *term_action = NULL;
+ struct mlx5dr_action_dest *term_actions;
struct mlx5dr_match_parameters params;
struct mlx5_core_dev *dev = ns->dev;
struct mlx5dr_action **fs_dr_actions;
@@ -223,6 +229,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5dr_rule *rule;
struct mlx5_flow_rule *dst;
int fs_dr_num_actions = 0;
+ int num_term_actions = 0;
int num_actions = 0;
size_t match_sz;
int err = 0;
@@ -233,18 +240,38 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(*actions),
GFP_KERNEL);
- if (!actions)
- return -ENOMEM;
+ if (!actions) {
+ err = -ENOMEM;
+ goto out_err;
+ }
fs_dr_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
sizeof(*fs_dr_actions), GFP_KERNEL);
if (!fs_dr_actions) {
- kfree(actions);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto free_actions_alloc;
+ }
+
+ term_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
+ sizeof(*term_actions), GFP_KERNEL);
+ if (!term_actions) {
+ err = -ENOMEM;
+ goto free_fs_dr_actions_alloc;
}
match_sz = sizeof(fte->val);
+ /* Drop reformat action bit if destination vport set with reformat */
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ list_for_each_entry(dst, &fte->node.children, node.list) {
+ if (!contain_vport_reformat_action(dst))
+ continue;
+
+ fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ break;
+ }
+ }
+
/* The order of the actions are must to be keep, only the following
* order is supported by SW steering:
* TX: push vlan -> modify header -> encap
@@ -335,7 +362,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
goto free_actions;
}
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
- term_action = tmp_action;
+ term_actions[num_term_actions++].dest = tmp_action;
}
if (fte->flow_context.flow_tag) {
@@ -352,34 +379,25 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
list_for_each_entry(dst, &fte->node.children, node.list) {
enum mlx5_flow_destination_type type = dst->dest_attr.type;
- u32 id;
- if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+ num_term_actions >= MLX5_FLOW_CONTEXT_ACTION_MAX) {
err = -ENOSPC;
goto free_actions;
}
- switch (type) {
- case MLX5_FLOW_DESTINATION_TYPE_COUNTER:
- id = dst->dest_attr.counter_id;
+ if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
- tmp_action =
- mlx5dr_action_create_flow_counter(id);
- if (!tmp_action) {
- err = -ENOMEM;
- goto free_actions;
- }
- fs_dr_actions[fs_dr_num_actions++] = tmp_action;
- actions[num_actions++] = tmp_action;
- break;
+ switch (type) {
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
- tmp_action = create_ft_action(dev, dst);
+ tmp_action = create_ft_action(domain, dst);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
}
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
- term_action = tmp_action;
+ term_actions[num_term_actions++].dest = tmp_action;
break;
case MLX5_FLOW_DESTINATION_TYPE_VPORT:
tmp_action = create_vport_action(domain, dst);
@@ -388,7 +406,14 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
goto free_actions;
}
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
- term_action = tmp_action;
+ term_actions[num_term_actions].dest = tmp_action;
+
+ if (dst->dest_attr.vport.flags &
+ MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
+ term_actions[num_term_actions].reformat =
+ dst->dest_attr.vport.pkt_reformat->action.dr_action;
+
+ num_term_actions++;
break;
default:
err = -EOPNOTSUPP;
@@ -397,11 +422,50 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
}
}
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ list_for_each_entry(dst, &fte->node.children, node.list) {
+ u32 id;
+
+ if (dst->dest_attr.type !=
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
+
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -ENOSPC;
+ goto free_actions;
+ }
+
+ id = dst->dest_attr.counter_id;
+ tmp_action =
+ mlx5dr_action_create_flow_counter(id);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ actions[num_actions++] = tmp_action;
+ }
+ }
+
params.match_sz = match_sz;
params.match_buf = (u64 *)fte->val;
-
- if (term_action)
- actions[num_actions++] = term_action;
+ if (num_term_actions == 1) {
+ if (term_actions->reformat)
+ actions[num_actions++] = term_actions->reformat;
+
+ actions[num_actions++] = term_actions->dest;
+ } else if (num_term_actions > 1) {
+ tmp_action = mlx5dr_action_create_mult_dest_tbl(domain,
+ term_actions,
+ num_term_actions);
+ if (!tmp_action) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ actions[num_actions++] = tmp_action;
+ }
rule = mlx5dr_rule_create(group->fs_dr_matcher.dr_matcher,
&params,
@@ -412,7 +476,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
goto free_actions;
}
+ kfree(term_actions);
kfree(actions);
+
fte->fs_dr_rule.dr_rule = rule;
fte->fs_dr_rule.num_actions = fs_dr_num_actions;
fte->fs_dr_rule.dr_actions = fs_dr_actions;
@@ -420,13 +486,18 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
return 0;
free_actions:
- for (i = 0; i < fs_dr_num_actions; i++)
+ /* Free in reverse order to handle action dependencies */
+ for (i = fs_dr_num_actions - 1; i >= 0; i--)
if (!IS_ERR_OR_NULL(fs_dr_actions[i]))
mlx5dr_action_destroy(fs_dr_actions[i]);
- mlx5_core_err(dev, "Failed to create dr rule err(%d)\n", err);
- kfree(actions);
+ kfree(term_actions);
+free_fs_dr_actions_alloc:
kfree(fs_dr_actions);
+free_actions_alloc:
+ kfree(actions);
+out_err:
+ mlx5_core_err(dev, "Failed to create dr rule err(%d)\n", err);
return err;
}
@@ -533,7 +604,8 @@ static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns,
if (err)
return err;
- for (i = 0; i < rule->num_actions; i++)
+ /* Free in reverse order to handle action dependencies */
+ for (i = rule->num_actions - 1; i >= 0; i--)
if (!IS_ERR_OR_NULL(rule->dr_actions[i]))
mlx5dr_action_destroy(rule->dr_actions[i]);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
index 1722f4668269..e01c3766c7de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
@@ -32,6 +32,7 @@ enum {
};
enum {
+ MLX5DR_ACTION_MDFY_HW_OP_COPY = 0x1,
MLX5DR_ACTION_MDFY_HW_OP_SET = 0x2,
MLX5DR_ACTION_MDFY_HW_OP_ADD = 0x3,
};
@@ -625,4 +626,19 @@ struct mlx5_ifc_dr_action_hw_set_bits {
u8 inline_data[0x20];
};
+struct mlx5_ifc_dr_action_hw_copy_bits {
+ u8 opcode[0x8];
+ u8 destination_field_code[0x8];
+ u8 reserved_at_10[0x2];
+ u8 destination_left_shifter[0x6];
+ u8 reserved_at_18[0x2];
+ u8 destination_length[0x6];
+
+ u8 reserved_at_20[0x8];
+ u8 source_field_code[0x8];
+ u8 reserved_at_30[0x2];
+ u8 source_left_shifter[0x6];
+ u8 reserved_at_38[0x8];
+};
+
#endif /* MLX5_IFC_DR_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index adda9cbfba45..e1edc9c247b7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -33,6 +33,11 @@ struct mlx5dr_match_parameters {
u64 *match_buf; /* Device spec format */
};
+struct mlx5dr_action_dest {
+ struct mlx5dr_action *dest;
+ struct mlx5dr_action *reformat;
+};
+
#ifdef CONFIG_MLX5_SW_STEERING
struct mlx5dr_domain *
@@ -46,7 +51,7 @@ void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
struct mlx5dr_domain *peer_dmn);
struct mlx5dr_table *
-mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level);
+mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags);
int mlx5dr_table_destroy(struct mlx5dr_table *table);
@@ -75,14 +80,19 @@ struct mlx5dr_action *
mlx5dr_action_create_dest_table(struct mlx5dr_table *table);
struct mlx5dr_action *
-mlx5dr_create_action_dest_flow_fw_table(struct mlx5_flow_table *ft,
- struct mlx5_core_dev *mdev);
+mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *domain,
+ struct mlx5_flow_table *ft);
struct mlx5dr_action *
mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain,
u32 vport, u8 vhca_id_valid,
u16 vhca_id);
+struct mlx5dr_action *
+mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
+ struct mlx5dr_action_dest *dests,
+ u32 num_of_dests);
+
struct mlx5dr_action *mlx5dr_action_create_drop(void);
struct mlx5dr_action *mlx5dr_action_create_tag(u32 tag_value);
@@ -131,7 +141,7 @@ mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
struct mlx5dr_domain *peer_dmn) { }
static inline struct mlx5dr_table *
-mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level) { return NULL; }
+mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags) { return NULL; }
static inline int
mlx5dr_table_destroy(struct mlx5dr_table *table) { return 0; }
@@ -165,8 +175,8 @@ static inline struct mlx5dr_action *
mlx5dr_action_create_dest_table(struct mlx5dr_table *table) { return NULL; }
static inline struct mlx5dr_action *
-mlx5dr_create_action_dest_flow_fw_table(struct mlx5_flow_table *ft,
- struct mlx5_core_dev *mdev) { return NULL; }
+mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *domain,
+ struct mlx5_flow_table *ft) { return NULL; }
static inline struct mlx5dr_action *
mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain,
@@ -174,6 +184,11 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain,
u16 vhca_id) { return NULL; }
static inline struct mlx5dr_action *
+mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
+ struct mlx5dr_action_dest *dests,
+ u32 num_of_dests) { return NULL; }
+
+static inline struct mlx5dr_action *
mlx5dr_action_create_drop(void) { return NULL; }
static inline struct mlx5dr_action *
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index f2a0e72285ba..02f7e4a39578 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -89,7 +89,7 @@ void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides)
len = nstrides << wq->fbc.log_stride;
wqe = mlx5_wq_cyc_get_wqe(wq, ix);
- pr_info("WQE DUMP: WQ size %d WQ cur size %d, WQE index 0x%x, len: %ld\n",
+ pr_info("WQE DUMP: WQ size %d WQ cur size %d, WQE index 0x%x, len: %zu\n",
mlx5_wq_cyc_get_size(wq), wq->cur_sz, ix, len);
print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, wqe, len, false);
}
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
index 544344ac4894..79057af4fe99 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netlink.h>
+#include <linux/vmalloc.h>
#include <linux/xz.h>
#include "mlxfw_mfa2.h"
#include "mlxfw_mfa2_file.h"
@@ -548,7 +549,7 @@ mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file,
comp_size = be32_to_cpu(comp->size);
comp_buf_size = comp_size + mlxfw_mfa2_comp_magic_len;
- comp_data = kmalloc(sizeof(*comp_data) + comp_buf_size, GFP_KERNEL);
+ comp_data = vzalloc(sizeof(*comp_data) + comp_buf_size);
if (!comp_data)
return ERR_PTR(-ENOMEM);
comp_data->comp.data_size = comp_size;
@@ -570,7 +571,7 @@ mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file,
comp_data->comp.data = comp_data->buff + mlxfw_mfa2_comp_magic_len;
return &comp_data->comp;
err_out:
- kfree(comp_data);
+ vfree(comp_data);
return ERR_PTR(err);
}
@@ -579,7 +580,7 @@ void mlxfw_mfa2_file_component_put(struct mlxfw_mfa2_component *comp)
const struct mlxfw_mfa2_comp_data *comp_data;
comp_data = container_of(comp, struct mlxfw_mfa2_comp_data, comp);
- kfree(comp_data);
+ vfree(comp_data);
}
void mlxfw_mfa2_file_fini(struct mlxfw_mfa2_file *mfa2_file)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index 9bf8da5f6daf..3fe878d7c94c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -573,6 +573,7 @@ static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon)
static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon)
{
+ enum mlxsw_reg_mgpir_device_type device_type;
int index, max_index, sensor_index;
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
char mtmp_pl[MLXSW_REG_MTMP_LEN];
@@ -584,8 +585,9 @@ static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon)
if (err)
return err;
- mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, NULL, NULL, NULL);
- if (!gbox_num)
+ mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL, NULL);
+ if (device_type != MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE ||
+ !gbox_num)
return 0;
index = mlxsw_hwmon->module_sensor_max;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index c721b171bd8d..ce0a6837daa3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -895,8 +895,10 @@ static int
mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
struct mlxsw_thermal *thermal)
{
+ enum mlxsw_reg_mgpir_device_type device_type;
struct mlxsw_thermal_module *gearbox_tz;
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
+ u8 gbox_num;
int i;
int err;
@@ -908,11 +910,13 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
if (err)
return err;
- mlxsw_reg_mgpir_unpack(mgpir_pl, &thermal->tz_gearbox_num, NULL, NULL,
+ mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL,
NULL);
- if (!thermal->tz_gearbox_num)
+ if (device_type != MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE ||
+ !gbox_num)
return 0;
+ thermal->tz_gearbox_num = gbox_num;
thermal->tz_gearbox_arr = kcalloc(thermal->tz_gearbox_num,
sizeof(*thermal->tz_gearbox_arr),
GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index 2b543911ae00..c4caeeadcba9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -213,8 +213,8 @@ mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u8 local_port, u8 module)
err_register_netdev:
mlxsw_m->ports[local_port] = NULL;
- free_netdev(dev);
err_dev_addr_get:
+ free_netdev(dev);
err_alloc_etherdev:
mlxsw_core_port_fini(mlxsw_m->core, local_port);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 5294a1622643..dd6685156396 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -3477,10 +3477,10 @@ MLXSW_REG_DEFINE(qeec, MLXSW_REG_QEEC_ID, MLXSW_REG_QEEC_LEN);
MLXSW_ITEM32(reg, qeec, local_port, 0x00, 16, 8);
enum mlxsw_reg_qeec_hr {
- MLXSW_REG_QEEC_HIERARCY_PORT,
- MLXSW_REG_QEEC_HIERARCY_GROUP,
- MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
- MLXSW_REG_QEEC_HIERARCY_TC,
+ MLXSW_REG_QEEC_HR_PORT,
+ MLXSW_REG_QEEC_HR_GROUP,
+ MLXSW_REG_QEEC_HR_SUBGROUP,
+ MLXSW_REG_QEEC_HR_TC,
};
/* reg_qeec_element_hierarchy
@@ -3563,8 +3563,8 @@ MLXSW_ITEM32(reg, qeec, min_shaper_rate, 0x0C, 0, 28);
*/
MLXSW_ITEM32(reg, qeec, mase, 0x10, 31, 1);
-/* A large max rate will disable the max shaper. */
-#define MLXSW_REG_QEEC_MAS_DIS 200000000 /* Kbps */
+/* The largest max shaper value possible to disable the shaper. */
+#define MLXSW_REG_QEEC_MAS_DIS ((1u << 31) - 1) /* Kbps */
/* reg_qeec_max_shaper_rate
* Max shaper information rate.
@@ -3602,6 +3602,21 @@ MLXSW_ITEM32(reg, qeec, dwrr, 0x18, 15, 1);
*/
MLXSW_ITEM32(reg, qeec, dwrr_weight, 0x18, 0, 8);
+/* reg_qeec_max_shaper_bs
+ * Max shaper burst size
+ * Burst size is 2^max_shaper_bs * 512 bits
+ * For Spectrum-1: Range is: 5..25
+ * For Spectrum-2: Range is: 11..25
+ * Reserved when ptps = 1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, max_shaper_bs, 0x1C, 0, 6);
+
+#define MLXSW_REG_QEEC_HIGHEST_SHAPER_BS 25
+#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1 5
+#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2 11
+#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3 5
+
static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port,
enum mlxsw_reg_qeec_hr hr, u8 index,
u8 next_index)
@@ -3618,8 +3633,7 @@ static inline void mlxsw_reg_qeec_ptps_pack(char *payload, u8 local_port,
{
MLXSW_REG_ZERO(qeec, payload);
mlxsw_reg_qeec_local_port_set(payload, local_port);
- mlxsw_reg_qeec_element_hierarchy_set(payload,
- MLXSW_REG_QEEC_HIERARCY_PORT);
+ mlxsw_reg_qeec_element_hierarchy_set(payload, MLXSW_REG_QEEC_HR_PORT);
mlxsw_reg_qeec_ptps_set(payload, ptps);
}
@@ -3749,6 +3763,38 @@ mlxsw_reg_qpdsm_prio_pack(char *payload, unsigned short prio, u8 dscp)
mlxsw_reg_qpdsm_prio_entry_color2_dscp_set(payload, prio, dscp);
}
+/* QPDP - QoS Port DSCP to Priority Mapping Register
+ * -------------------------------------------------
+ * This register controls the port default Switch Priority and Color. The
+ * default Switch Priority and Color are used for frames where the trust state
+ * uses default values. All member ports of a LAG should be configured with the
+ * same default values.
+ */
+#define MLXSW_REG_QPDP_ID 0x4007
+#define MLXSW_REG_QPDP_LEN 0x8
+
+MLXSW_REG_DEFINE(qpdp, MLXSW_REG_QPDP_ID, MLXSW_REG_QPDP_LEN);
+
+/* reg_qpdp_local_port
+ * Local Port. Supported for data packets from CPU port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qpdp, local_port, 0x00, 16, 8);
+
+/* reg_qpdp_switch_prio
+ * Default port Switch Priority (default 0)
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qpdp, switch_prio, 0x04, 0, 4);
+
+static inline void mlxsw_reg_qpdp_pack(char *payload, u8 local_port,
+ u8 switch_prio)
+{
+ MLXSW_REG_ZERO(qpdp, payload);
+ mlxsw_reg_qpdp_local_port_set(payload, local_port);
+ mlxsw_reg_qpdp_switch_prio_set(payload, switch_prio);
+}
+
/* QPDPM - QoS Port DSCP to Priority Mapping Register
* --------------------------------------------------
* This register controls the mapping from DSCP field to
@@ -5472,6 +5518,7 @@ enum mlxsw_reg_htgt_trap_group {
MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR,
MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0,
MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP,
__MLXSW_REG_HTGT_TRAP_GROUP_MAX,
MLXSW_REG_HTGT_TRAP_GROUP_MAX = __MLXSW_REG_HTGT_TRAP_GROUP_MAX - 1
@@ -5481,6 +5528,7 @@ enum mlxsw_reg_htgt_discard_trap_group {
MLXSW_REG_HTGT_DISCARD_TRAP_GROUP_BASE = MLXSW_REG_HTGT_TRAP_GROUP_MAX,
MLXSW_REG_HTGT_TRAP_GROUP_SP_L2_DISCARDS,
MLXSW_REG_HTGT_TRAP_GROUP_SP_L3_DISCARDS,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_TUNNEL_DISCARDS,
};
/* reg_htgt_trap_group
@@ -10108,6 +10156,92 @@ static inline void mlxsw_reg_tigcr_pack(char *payload, bool ttlc, u8 ttl_uc)
mlxsw_reg_tigcr_ttl_uc_set(payload, ttl_uc);
}
+/* TIEEM - Tunneling IPinIP Encapsulation ECN Mapping Register
+ * -----------------------------------------------------------
+ * The TIEEM register maps ECN of the IP header at the ingress to the
+ * encapsulation to the ECN of the underlay network.
+ */
+#define MLXSW_REG_TIEEM_ID 0xA812
+#define MLXSW_REG_TIEEM_LEN 0x0C
+
+MLXSW_REG_DEFINE(tieem, MLXSW_REG_TIEEM_ID, MLXSW_REG_TIEEM_LEN);
+
+/* reg_tieem_overlay_ecn
+ * ECN of the IP header in the overlay network.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, tieem, overlay_ecn, 0x04, 24, 2);
+
+/* reg_tineem_underlay_ecn
+ * ECN of the IP header in the underlay network.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tieem, underlay_ecn, 0x04, 16, 2);
+
+static inline void mlxsw_reg_tieem_pack(char *payload, u8 overlay_ecn,
+ u8 underlay_ecn)
+{
+ MLXSW_REG_ZERO(tieem, payload);
+ mlxsw_reg_tieem_overlay_ecn_set(payload, overlay_ecn);
+ mlxsw_reg_tieem_underlay_ecn_set(payload, underlay_ecn);
+}
+
+/* TIDEM - Tunneling IPinIP Decapsulation ECN Mapping Register
+ * -----------------------------------------------------------
+ * The TIDEM register configures the actions that are done in the
+ * decapsulation.
+ */
+#define MLXSW_REG_TIDEM_ID 0xA813
+#define MLXSW_REG_TIDEM_LEN 0x0C
+
+MLXSW_REG_DEFINE(tidem, MLXSW_REG_TIDEM_ID, MLXSW_REG_TIDEM_LEN);
+
+/* reg_tidem_underlay_ecn
+ * ECN field of the IP header in the underlay network.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, tidem, underlay_ecn, 0x04, 24, 2);
+
+/* reg_tidem_overlay_ecn
+ * ECN field of the IP header in the overlay network.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, tidem, overlay_ecn, 0x04, 16, 2);
+
+/* reg_tidem_eip_ecn
+ * Egress IP ECN. ECN field of the IP header of the packet which goes out
+ * from the decapsulation.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tidem, eip_ecn, 0x04, 8, 2);
+
+/* reg_tidem_trap_en
+ * Trap enable:
+ * 0 - No trap due to decap ECN
+ * 1 - Trap enable with trap_id
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tidem, trap_en, 0x08, 28, 4);
+
+/* reg_tidem_trap_id
+ * Trap ID. Either DECAP_ECN0 or DECAP_ECN1.
+ * Reserved when trap_en is '0'.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tidem, trap_id, 0x08, 0, 9);
+
+static inline void mlxsw_reg_tidem_pack(char *payload, u8 underlay_ecn,
+ u8 overlay_ecn, u8 eip_ecn,
+ bool trap_en, u16 trap_id)
+{
+ MLXSW_REG_ZERO(tidem, payload);
+ mlxsw_reg_tidem_underlay_ecn_set(payload, underlay_ecn);
+ mlxsw_reg_tidem_overlay_ecn_set(payload, overlay_ecn);
+ mlxsw_reg_tidem_eip_ecn_set(payload, eip_ecn);
+ mlxsw_reg_tidem_trap_en_set(payload, trap_en);
+ mlxsw_reg_tidem_trap_id_set(payload, trap_id);
+}
+
/* SBPR - Shared Buffer Pools Register
* -----------------------------------
* The SBPR configures and retrieves the shared buffer pools and configuration.
@@ -10580,6 +10714,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(qeec),
MLXSW_REG(qrwe),
MLXSW_REG(qpdsm),
+ MLXSW_REG(qpdp),
MLXSW_REG(qpdpm),
MLXSW_REG(qtctm),
MLXSW_REG(qpsc),
@@ -10651,6 +10786,8 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(tndem),
MLXSW_REG(tnpc),
MLXSW_REG(tigcr),
+ MLXSW_REG(tieem),
+ MLXSW_REG(tidem),
MLXSW_REG(sbpr),
MLXSW_REG(sbcm),
MLXSW_REG(sbpm),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 556dca328bb5..7358b5bc7eb6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -45,11 +45,9 @@
#include "spectrum_ptp.h"
#include "../mlxfw/mlxfw.h"
-#define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
-
#define MLXSW_SP1_FWREV_MAJOR 13
#define MLXSW_SP1_FWREV_MINOR 2000
-#define MLXSW_SP1_FWREV_SUBMINOR 2308
+#define MLXSW_SP1_FWREV_SUBMINOR 2714
#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
@@ -66,7 +64,7 @@ static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
#define MLXSW_SP2_FWREV_MAJOR 29
#define MLXSW_SP2_FWREV_MINOR 2000
-#define MLXSW_SP2_FWREV_SUBMINOR 2308
+#define MLXSW_SP2_FWREV_SUBMINOR 2714
static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
.major = MLXSW_SP2_FWREV_MAJOR,
@@ -197,6 +195,10 @@ struct mlxsw_sp_ptp_ops {
u64 *data, int data_index);
};
+struct mlxsw_sp_span_ops {
+ u32 (*buffsize_get)(int mtu, u32 speed);
+};
+
static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev,
u16 component_index, u32 *p_max_size,
u8 *p_align_bits, u16 *p_max_write_size)
@@ -423,13 +425,12 @@ static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
rev->major, req_rev->major);
return -EINVAL;
}
- if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) ==
- MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor) &&
- mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev))
+ if (mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev))
return 0;
- dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n",
- rev->major, rev->minor, rev->subminor);
+ dev_err(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver (required >= %d.%d.%d)\n",
+ rev->major, rev->minor, rev->subminor, req_rev->major,
+ req_rev->minor, req_rev->subminor);
dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n",
fw_filename);
@@ -860,23 +861,17 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
u64 len;
int err;
+ if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
return NETDEV_TX_BUSY;
- if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
- struct sk_buff *skb_orig = skb;
-
- skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
- if (!skb) {
- this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
- dev_kfree_skb_any(skb_orig);
- return NETDEV_TX_OK;
- }
- dev_consume_skb_any(skb_orig);
- }
-
if (eth_skb_pad(skb)) {
this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
return NETDEV_TX_OK;
@@ -1215,6 +1210,9 @@ static void update_stats_cache(struct work_struct *work)
periodic_hw_stats.update_dw.work);
if (!netif_carrier_ok(mlxsw_sp_port->dev))
+ /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as
+ * necessary when port goes down.
+ */
goto out;
mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
@@ -1796,6 +1794,10 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
case TC_SETUP_QDISC_PRIO:
return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
+ case TC_SETUP_QDISC_ETS:
+ return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data);
+ case TC_SETUP_QDISC_TBF:
+ return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data);
default:
return -EOPNOTSUPP;
}
@@ -3539,6 +3541,27 @@ mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
}
+int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed)
+{
+ const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+ u32 eth_proto_oper;
+ int err;
+
+ port_type_speed_ops = mlxsw_sp->port_type_speed_ops;
+ port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl,
+ mlxsw_sp_port->local_port, 0,
+ false);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+ if (err)
+ return err;
+ port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL,
+ &eth_proto_oper);
+ *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper);
+ return 0;
+}
+
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
bool dwrr, u8 dwrr_weight)
@@ -3556,7 +3579,7 @@ int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index,
- u8 next_index, u32 maxrate)
+ u8 next_index, u32 maxrate, u8 burst_size)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char qeec_pl[MLXSW_REG_QEEC_LEN];
@@ -3565,6 +3588,7 @@ int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
next_index);
mlxsw_reg_qeec_mase_set(qeec_pl, true);
mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
+ mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
}
@@ -3602,26 +3626,25 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
* one subgroup, which are all member in the same group.
*/
err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
- 0);
+ MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0);
if (err)
return err;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
+ MLXSW_REG_QEEC_HR_SUBGROUP, i,
0, false, 0);
if (err)
return err;
}
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_TC, i, i,
+ MLXSW_REG_QEEC_HR_TC, i, i,
false, 0);
if (err)
return err;
err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_TC,
+ MLXSW_REG_QEEC_HR_TC,
i + 8, i,
true, 100);
if (err)
@@ -3633,30 +3656,30 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
* for the initial configuration.
*/
err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
- MLXSW_REG_QEEC_MAS_DIS);
+ MLXSW_REG_QEEC_HR_PORT, 0, 0,
+ MLXSW_REG_QEEC_MAS_DIS, 0);
if (err)
return err;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
+ MLXSW_REG_QEEC_HR_SUBGROUP,
i, 0,
- MLXSW_REG_QEEC_MAS_DIS);
+ MLXSW_REG_QEEC_MAS_DIS, 0);
if (err)
return err;
}
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_TC,
+ MLXSW_REG_QEEC_HR_TC,
i, i,
- MLXSW_REG_QEEC_MAS_DIS);
+ MLXSW_REG_QEEC_MAS_DIS, 0);
if (err)
return err;
err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_TC,
+ MLXSW_REG_QEEC_HR_TC,
i + 8, i,
- MLXSW_REG_QEEC_MAS_DIS);
+ MLXSW_REG_QEEC_MAS_DIS, 0);
if (err)
return err;
}
@@ -3664,7 +3687,7 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
/* Configure the min shaper for multicast TCs. */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_TC,
+ MLXSW_REG_QEEC_HR_TC,
i + 8, i,
MLXSW_REG_QEEC_MIS_MIN);
if (err)
@@ -3888,6 +3911,8 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw,
mlxsw_sp->ptp_ops->shaper_work);
+ INIT_DELAYED_WORK(&mlxsw_sp_port->span.speed_update_dw,
+ mlxsw_sp_span_speed_update_work);
mlxsw_sp->ports[local_port] = mlxsw_sp_port;
err = register_netdev(dev);
@@ -3944,6 +3969,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
+ cancel_delayed_work_sync(&mlxsw_sp_port->span.speed_update_dw);
cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
@@ -4324,6 +4350,15 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
return 0;
}
+static void
+mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int i;
+
+ for (i = 0; i < TC_MAX_QUEUE; i++)
+ mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0;
+}
+
static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
char *pude_pl, void *priv)
{
@@ -4342,9 +4377,11 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
netdev_info(mlxsw_sp_port->dev, "link up\n");
netif_carrier_on(mlxsw_sp_port->dev);
mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0);
+ mlxsw_core_schedule_dw(&mlxsw_sp_port->span.speed_update_dw, 0);
} else {
netdev_info(mlxsw_sp_port->dev, "link down\n");
netif_carrier_off(mlxsw_sp_port->dev);
+ mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port);
}
}
@@ -4540,10 +4577,16 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
false),
MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
- MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false),
- MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false),
- MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
- MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
+ MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, VRRP, false),
+ MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, VRRP, false),
+ MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD,
+ ROUTER_EXP, false),
+ MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD,
+ ROUTER_EXP, false),
+ MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD,
+ ROUTER_EXP, false),
+ MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
+ ROUTER_EXP, false),
/* PKT Sample trap */
MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
false, SP_IP2ME, DISCARD),
@@ -4626,6 +4669,10 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
rate = 19 * 1024;
burst_size = 12;
break;
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP:
+ rate = 360;
+ burst_size = 7;
+ break;
default:
continue;
}
@@ -4665,6 +4712,7 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP:
priority = 5;
tc = 5;
break;
@@ -4877,6 +4925,33 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
.get_stats = mlxsw_sp2_get_stats,
};
+static u32 mlxsw_sp1_span_buffsize_get(int mtu, u32 speed)
+{
+ return mtu * 5 / 2;
+}
+
+static const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = {
+ .buffsize_get = mlxsw_sp1_span_buffsize_get,
+};
+
+#define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38
+
+static u32 mlxsw_sp2_span_buffsize_get(int mtu, u32 speed)
+{
+ return 3 * mtu + MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR * speed / 1000;
+}
+
+static const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = {
+ .buffsize_get = mlxsw_sp2_span_buffsize_get,
+};
+
+u32 mlxsw_sp_span_buffsize_get(struct mlxsw_sp *mlxsw_sp, int mtu, u32 speed)
+{
+ u32 buffsize = mlxsw_sp->span_ops->buffsize_get(speed, mtu);
+
+ return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1;
+}
+
static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr);
@@ -5098,8 +5173,10 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
+ mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
mlxsw_sp->listeners = mlxsw_sp1_listener;
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
+ mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
@@ -5123,6 +5200,31 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
+ mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
+ mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
+
+ return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
+}
+
+static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
+ mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
+ mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
+ mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
+ mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
+ mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
+ mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
+ mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
+ mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
+ mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
+ mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
+ mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
+ mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
@@ -5629,7 +5731,7 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
static struct mlxsw_driver mlxsw_sp3_driver = {
.kind = mlxsw_sp3_driver_name,
.priv_size = sizeof(struct mlxsw_sp),
- .init = mlxsw_sp2_init,
+ .init = mlxsw_sp3_init,
.fini = mlxsw_sp_fini,
.basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
.port_split = mlxsw_sp_port_split,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 347bec9d1ecf..a0f1f9dceec5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -140,6 +140,7 @@ struct mlxsw_sp_sb_vals;
struct mlxsw_sp_port_type_speed_ops;
struct mlxsw_sp_ptp_state;
struct mlxsw_sp_ptp_ops;
+struct mlxsw_sp_span_ops;
struct mlxsw_sp_port_mapping {
u8 module;
@@ -185,8 +186,10 @@ struct mlxsw_sp {
const struct mlxsw_sp_sb_vals *sb_vals;
const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
const struct mlxsw_sp_ptp_ops *ptp_ops;
+ const struct mlxsw_sp_span_ops *span_ops;
const struct mlxsw_listener *listeners;
size_t listeners_count;
+ u32 lowest_shaper_bs;
};
static inline struct mlxsw_sp_upper *
@@ -292,6 +295,9 @@ struct mlxsw_sp_port {
struct mlxsw_sp_ptp_port_stats stats;
} ptp;
u8 split_base_local_port;
+ struct {
+ struct delayed_work speed_update_dw;
+ } span;
};
struct mlxsw_sp_port_type_speed_ops {
@@ -471,6 +477,7 @@ extern struct notifier_block mlxsw_sp_switchdev_notifier;
/* spectrum.c */
void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
u8 local_port, void *priv);
+int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed);
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
bool dwrr, u8 dwrr_weight);
@@ -481,7 +488,7 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
struct ieee_pfc *my_pfc);
int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index,
- u8 next_index, u32 maxrate);
+ u8 next_index, u32 maxrate, u8 burst_size);
enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 stp_state);
int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
u8 state);
@@ -501,6 +508,7 @@ int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
unsigned int *p_counter_index);
void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index);
+u32 mlxsw_sp_span_buffsize_get(struct mlxsw_sp *mlxsw_sp, int mtu, u32 speed);
bool mlxsw_sp_port_dev_check(const struct net_device *dev);
struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev);
@@ -852,6 +860,10 @@ int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_red_qopt_offload *p);
int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_prio_qopt_offload *p);
+int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_ets_qopt_offload *p);
+int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_tbf_qopt_offload *p);
/* spectrum_fid.c */
bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 150b3a144b83..3d3cca596116 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -8,6 +8,7 @@
#include <linux/string.h>
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
+#include <linux/mutex.h>
#include <net/net_namespace.h>
#include <net/tc_act/tc_vlan.h>
@@ -25,6 +26,7 @@ struct mlxsw_sp_acl {
struct mlxsw_sp_fid *dummy_fid;
struct rhashtable ruleset_ht;
struct list_head rules;
+ struct mutex rules_lock; /* Protects rules list */
struct {
struct delayed_work dw;
unsigned long interval; /* ms */
@@ -701,7 +703,9 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
goto err_ruleset_block_bind;
}
+ mutex_lock(&mlxsw_sp->acl->rules_lock);
list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
+ mutex_unlock(&mlxsw_sp->acl->rules_lock);
block->rule_count++;
block->egress_blocker_rule_count += rule->rulei->egress_bind_blocker;
return 0;
@@ -723,7 +727,9 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker;
ruleset->ht_key.block->rule_count--;
+ mutex_lock(&mlxsw_sp->acl->rules_lock);
list_del(&rule->list);
+ mutex_unlock(&mlxsw_sp->acl->rules_lock);
if (!ruleset->ht_key.chain_index &&
mlxsw_sp_acl_ruleset_is_singular(ruleset))
mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset,
@@ -783,19 +789,18 @@ static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
struct mlxsw_sp_acl_rule *rule;
int err;
- /* Protect internal structures from changes */
- rtnl_lock();
+ mutex_lock(&acl->rules_lock);
list_for_each_entry(rule, &acl->rules, list) {
err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
rule);
if (err)
goto err_rule_update;
}
- rtnl_unlock();
+ mutex_unlock(&acl->rules_lock);
return 0;
err_rule_update:
- rtnl_unlock();
+ mutex_unlock(&acl->rules_lock);
return err;
}
@@ -880,6 +885,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
acl->dummy_fid = fid;
INIT_LIST_HEAD(&acl->rules);
+ mutex_init(&acl->rules_lock);
err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam);
if (err)
goto err_acl_ops_init;
@@ -892,6 +898,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
return 0;
err_acl_ops_init:
+ mutex_destroy(&acl->rules_lock);
mlxsw_sp_fid_put(fid);
err_fid_get:
rhashtable_destroy(&acl->ruleset_ht);
@@ -908,6 +915,7 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam);
+ mutex_destroy(&acl->rules_lock);
WARN_ON(!list_empty(&acl->rules));
mlxsw_sp_fid_put(acl->dummy_fid);
rhashtable_destroy(&acl->ruleset_ht);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
index 21296fa7f7fb..49a72a8f1f57 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
@@ -160,7 +160,7 @@ static int __mlxsw_sp_dcbnl_ieee_setets(struct mlxsw_sp_port *mlxsw_sp_port,
u8 weight = ets->tc_tx_bw[i];
err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
+ MLXSW_REG_QEEC_HR_SUBGROUP, i,
0, dwrr, weight);
if (err) {
netdev_err(dev, "Failed to link subgroup ETS element %d to group\n",
@@ -198,7 +198,7 @@ err_port_ets_set:
u8 weight = my_ets->tc_tx_bw[i];
err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
+ MLXSW_REG_QEEC_HR_SUBGROUP, i,
0, dwrr, weight);
}
return err;
@@ -369,6 +369,17 @@ err_update_qrwe:
}
static int
+mlxsw_sp_port_dcb_app_update_qpdp(struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 default_prio)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qpdp_pl[MLXSW_REG_QPDP_LEN];
+
+ mlxsw_reg_qpdp_pack(qpdp_pl, mlxsw_sp_port->local_port, default_prio);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpdp), qpdp_pl);
+}
+
+static int
mlxsw_sp_port_dcb_app_update_qpdpm(struct mlxsw_sp_port *mlxsw_sp_port,
struct dcb_ieee_app_dscp_map *map)
{
@@ -405,6 +416,12 @@ static int mlxsw_sp_port_dcb_app_update(struct mlxsw_sp_port *mlxsw_sp_port)
int err;
default_prio = mlxsw_sp_port_dcb_app_default_prio(mlxsw_sp_port);
+ err = mlxsw_sp_port_dcb_app_update_qpdp(mlxsw_sp_port, default_prio);
+ if (err) {
+ netdev_err(mlxsw_sp_port->dev, "Couldn't configure port default priority\n");
+ return err;
+ }
+
have_dscp = mlxsw_sp_port_dcb_app_prio_dscp_map(mlxsw_sp_port,
&prio_map);
@@ -507,9 +524,9 @@ static int mlxsw_sp_dcbnl_ieee_setmaxrate(struct net_device *dev,
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
+ MLXSW_REG_QEEC_HR_SUBGROUP,
i, 0,
- maxrate->tc_maxrate[i]);
+ maxrate->tc_maxrate[i], 0);
if (err) {
netdev_err(dev, "Failed to set maxrate for TC %d\n", i);
goto err_port_ets_maxrate_set;
@@ -523,8 +540,9 @@ static int mlxsw_sp_dcbnl_ieee_setmaxrate(struct net_device *dev,
err_port_ets_maxrate_set:
for (i--; i >= 0; i--)
mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
- i, 0, my_maxrate->tc_maxrate[i]);
+ MLXSW_REG_QEEC_HR_SUBGROUP,
+ i, 0,
+ my_maxrate->tc_maxrate[i], 0);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index 49933818c6f5..2dc0978428e6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -215,7 +215,7 @@ mlxsw_sp_dpipe_table_erif_entries_dump(void *priv, bool counters_enabled,
start_again:
err = devlink_dpipe_entry_ctx_prepare(dump_ctx);
if (err)
- return err;
+ goto err_ctx_prepare;
j = 0;
for (; i < rif_count; i++) {
struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i);
@@ -247,6 +247,7 @@ start_again:
return 0;
err_entry_append:
err_entry_get:
+err_ctx_prepare:
rtnl_unlock();
devlink_dpipe_entry_clear(&entry);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
index 6400cd644b7a..a8525992528f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -3,8 +3,10 @@
#include <net/ip_tunnels.h>
#include <net/ip6_tunnel.h>
+#include <net/inet_ecn.h>
#include "spectrum_ipip.h"
+#include "reg.h"
struct ip_tunnel_parm
mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev)
@@ -338,3 +340,61 @@ static const struct mlxsw_sp_ipip_ops mlxsw_sp_ipip_gre4_ops = {
const struct mlxsw_sp_ipip_ops *mlxsw_sp_ipip_ops_arr[] = {
[MLXSW_SP_IPIP_TYPE_GRE4] = &mlxsw_sp_ipip_gre4_ops,
};
+
+static int mlxsw_sp_ipip_ecn_encap_init_one(struct mlxsw_sp *mlxsw_sp,
+ u8 inner_ecn, u8 outer_ecn)
+{
+ char tieem_pl[MLXSW_REG_TIEEM_LEN];
+
+ mlxsw_reg_tieem_pack(tieem_pl, inner_ecn, outer_ecn);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tieem), tieem_pl);
+}
+
+int mlxsw_sp_ipip_ecn_encap_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int i;
+
+ /* Iterate over inner ECN values */
+ for (i = INET_ECN_NOT_ECT; i <= INET_ECN_CE; i++) {
+ u8 outer_ecn = INET_ECN_encapsulate(0, i);
+ int err;
+
+ err = mlxsw_sp_ipip_ecn_encap_init_one(mlxsw_sp, i, outer_ecn);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_ipip_ecn_decap_init_one(struct mlxsw_sp *mlxsw_sp,
+ u8 inner_ecn, u8 outer_ecn)
+{
+ char tidem_pl[MLXSW_REG_TIDEM_LEN];
+ bool trap_en, set_ce = false;
+ u8 new_inner_ecn;
+
+ trap_en = __INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce);
+ new_inner_ecn = set_ce ? INET_ECN_CE : inner_ecn;
+
+ mlxsw_reg_tidem_pack(tidem_pl, outer_ecn, inner_ecn, new_inner_ecn,
+ trap_en, trap_en ? MLXSW_TRAP_ID_DECAP_ECN0 : 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tidem), tidem_pl);
+}
+
+int mlxsw_sp_ipip_ecn_decap_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int i, j, err;
+
+ /* Iterate over inner ECN values */
+ for (i = INET_ECN_NOT_ECT; i <= INET_ECN_CE; i++) {
+ /* Iterate over outer ECN values */
+ for (j = INET_ECN_NOT_ECT; j <= INET_ECN_CE; j++) {
+ err = mlxsw_sp_ipip_ecn_decap_init_one(mlxsw_sp, i, j);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index ec2ff3d7f41c..34f7c3501b08 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -920,6 +920,7 @@ static int mlxsw_sp_ptp_get_message_types(const struct hwtstamp_config *config,
egr_types = 0xff;
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
+ case HWTSTAMP_TX_ONESTEP_P2P:
return -ERANGE;
}
@@ -1015,27 +1016,17 @@ mlxsw_sp1_ptp_port_shaper_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
static int mlxsw_sp1_ptp_port_shaper_check(struct mlxsw_sp_port *mlxsw_sp_port)
{
- const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char ptys_pl[MLXSW_REG_PTYS_LEN];
- u32 eth_proto_oper, speed;
bool ptps = false;
int err, i;
+ u32 speed;
if (!mlxsw_sp1_ptp_hwtstamp_enabled(mlxsw_sp_port))
return mlxsw_sp1_ptp_port_shaper_set(mlxsw_sp_port, false);
- port_type_speed_ops = mlxsw_sp->port_type_speed_ops;
- port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl,
- mlxsw_sp_port->local_port, 0,
- false);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+ err = mlxsw_sp_port_speed_get(mlxsw_sp_port, &speed);
if (err)
return err;
- port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL,
- &eth_proto_oper);
- speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper);
for (i = 0; i < MLXSW_SP1_PTP_SHAPER_PARAMS_LEN; i++) {
if (mlxsw_sp1_ptp_shaper_params[i].ethtool_speed == speed) {
ptps = true;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index 68cc6737d45c..02526c53d4f5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -18,6 +18,8 @@ enum mlxsw_sp_qdisc_type {
MLXSW_SP_QDISC_NO_QDISC,
MLXSW_SP_QDISC_RED,
MLXSW_SP_QDISC_PRIO,
+ MLXSW_SP_QDISC_ETS,
+ MLXSW_SP_QDISC_TBF,
};
struct mlxsw_sp_qdisc_ops {
@@ -195,6 +197,20 @@ mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
return -EOPNOTSUPP;
}
+static u64
+mlxsw_sp_xstats_backlog(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
+{
+ return xstats->backlog[tclass_num] +
+ xstats->backlog[tclass_num + 8];
+}
+
+static u64
+mlxsw_sp_xstats_tail_drop(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
+{
+ return xstats->tail_drop[tclass_num] +
+ xstats->tail_drop[tclass_num + 8];
+}
+
static void
mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats,
u8 prio_bitmap, u64 *tx_packets,
@@ -212,6 +228,70 @@ mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats,
}
}
+static void
+mlxsw_sp_qdisc_collect_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ u64 *p_tx_bytes, u64 *p_tx_packets,
+ u64 *p_drops, u64 *p_backlog)
+{
+ u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+ struct mlxsw_sp_port_xstats *xstats;
+ u64 tx_bytes, tx_packets;
+
+ xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
+ mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
+ mlxsw_sp_qdisc->prio_bitmap,
+ &tx_packets, &tx_bytes);
+
+ *p_tx_packets += tx_packets;
+ *p_tx_bytes += tx_bytes;
+ *p_drops += xstats->wred_drop[tclass_num] +
+ mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
+ *p_backlog += mlxsw_sp_xstats_backlog(xstats, tclass_num);
+}
+
+static void
+mlxsw_sp_qdisc_update_stats(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ u64 tx_bytes, u64 tx_packets,
+ u64 drops, u64 backlog,
+ struct tc_qopt_offload_stats *stats_ptr)
+{
+ struct mlxsw_sp_qdisc_stats *stats_base = &mlxsw_sp_qdisc->stats_base;
+
+ tx_bytes -= stats_base->tx_bytes;
+ tx_packets -= stats_base->tx_packets;
+ drops -= stats_base->drops;
+ backlog -= stats_base->backlog;
+
+ _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
+ stats_ptr->qstats->drops += drops;
+ stats_ptr->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp, backlog);
+
+ stats_base->backlog += backlog;
+ stats_base->drops += drops;
+ stats_base->tx_bytes += tx_bytes;
+ stats_base->tx_packets += tx_packets;
+}
+
+static void
+mlxsw_sp_qdisc_get_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_qopt_offload_stats *stats_ptr)
+{
+ u64 tx_packets = 0;
+ u64 tx_bytes = 0;
+ u64 backlog = 0;
+ u64 drops = 0;
+
+ mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &tx_bytes, &tx_packets,
+ &drops, &backlog);
+ mlxsw_sp_qdisc_update_stats(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_qdisc,
+ tx_bytes, tx_packets, drops, backlog,
+ stats_ptr);
+}
+
static int
mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
int tclass_num, u32 min, u32 max,
@@ -269,7 +349,7 @@ mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
&stats_base->tx_bytes);
red_base->prob_mark = xstats->ecn;
red_base->prob_drop = xstats->wred_drop[tclass_num];
- red_base->pdrop = xstats->tail_drop[tclass_num];
+ red_base->pdrop = mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
stats_base->overlimits = red_base->prob_drop + red_base->prob_mark;
stats_base->drops = red_base->prob_drop + red_base->pdrop;
@@ -342,19 +422,28 @@ mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port,
}
static void
-mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- void *params)
+mlxsw_sp_qdisc_leaf_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct gnet_stats_queue *qstats)
{
- struct tc_red_qopt_offload_params *p = params;
u64 backlog;
backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
mlxsw_sp_qdisc->stats_base.backlog);
- p->qstats->backlog -= backlog;
+ qstats->backlog -= backlog;
mlxsw_sp_qdisc->stats_base.backlog = 0;
}
+static void
+mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_red_qopt_offload_params *p = params;
+
+ mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats);
+}
+
static int
mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
@@ -370,7 +459,8 @@ mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
marks = xstats->ecn - xstats_base->prob_mark;
- pdrops = xstats->tail_drop[tclass_num] - xstats_base->pdrop;
+ pdrops = mlxsw_sp_xstats_tail_drop(xstats, tclass_num) -
+ xstats_base->pdrop;
res->pdrop += pdrops;
res->prob_drop += early_drops;
@@ -387,40 +477,21 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
struct tc_qopt_offload_stats *stats_ptr)
{
- u64 tx_bytes, tx_packets, overlimits, drops, backlog;
u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_qdisc_stats *stats_base;
struct mlxsw_sp_port_xstats *xstats;
+ u64 overlimits;
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
stats_base = &mlxsw_sp_qdisc->stats_base;
- mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
- mlxsw_sp_qdisc->prio_bitmap,
- &tx_packets, &tx_bytes);
- tx_bytes = tx_bytes - stats_base->tx_bytes;
- tx_packets = tx_packets - stats_base->tx_packets;
-
+ mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc, stats_ptr);
overlimits = xstats->wred_drop[tclass_num] + xstats->ecn -
stats_base->overlimits;
- drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] -
- stats_base->drops;
- backlog = xstats->backlog[tclass_num];
- _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
stats_ptr->qstats->overlimits += overlimits;
- stats_ptr->qstats->drops += drops;
- stats_ptr->qstats->backlog +=
- mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
- backlog) -
- mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
- stats_base->backlog);
-
- stats_base->backlog = backlog;
- stats_base->drops += drops;
stats_base->overlimits += overlimits;
- stats_base->tx_bytes += tx_bytes;
- stats_base->tx_packets += tx_packets;
+
return 0;
}
@@ -470,15 +541,215 @@ int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
+static void
+mlxsw_sp_setup_tc_qdisc_leaf_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ u64 backlog_cells = 0;
+ u64 tx_packets = 0;
+ u64 tx_bytes = 0;
+ u64 drops = 0;
+
+ mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &tx_bytes, &tx_packets,
+ &drops, &backlog_cells);
+
+ mlxsw_sp_qdisc->stats_base.tx_packets = tx_packets;
+ mlxsw_sp_qdisc->stats_base.tx_bytes = tx_bytes;
+ mlxsw_sp_qdisc->stats_base.drops = drops;
+ mlxsw_sp_qdisc->stats_base.backlog = 0;
+}
+
static int
-mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+mlxsw_sp_qdisc_tbf_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ struct mlxsw_sp_qdisc *root_qdisc = mlxsw_sp_port->root_qdisc;
+
+ if (root_qdisc != mlxsw_sp_qdisc)
+ root_qdisc->stats_base.backlog -=
+ mlxsw_sp_qdisc->stats_base.backlog;
+
+ return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HR_SUBGROUP,
+ mlxsw_sp_qdisc->tclass_num, 0,
+ MLXSW_REG_QEEC_MAS_DIS, 0);
+}
+
+static int
+mlxsw_sp_qdisc_tbf_bs(struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 max_size, u8 *p_burst_size)
+{
+ /* TBF burst size is configured in bytes. The ASIC burst size value is
+ * ((2 ^ bs) * 512 bits. Convert the TBF bytes to 512-bit units.
+ */
+ u32 bs512 = max_size / 64;
+ u8 bs = fls(bs512);
+
+ if (!bs)
+ return -EINVAL;
+ --bs;
+
+ /* Demand a power of two. */
+ if ((1 << bs) != bs512)
+ return -EINVAL;
+
+ if (bs < mlxsw_sp_port->mlxsw_sp->lowest_shaper_bs ||
+ bs > MLXSW_REG_QEEC_HIGHEST_SHAPER_BS)
+ return -EINVAL;
+
+ *p_burst_size = bs;
+ return 0;
+}
+
+static u32
+mlxsw_sp_qdisc_tbf_max_size(u8 bs)
+{
+ return (1U << bs) * 64;
+}
+
+static u64
+mlxsw_sp_qdisc_tbf_rate_kbps(struct tc_tbf_qopt_offload_replace_params *p)
+{
+ /* TBF interface is in bytes/s, whereas Spectrum ASIC is configured in
+ * Kbits/s.
+ */
+ return div_u64(p->rate.rate_bytes_ps, 1000) * 8;
+}
+
+static int
+mlxsw_sp_qdisc_tbf_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_tbf_qopt_offload_replace_params *p = params;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
+ u8 burst_size;
+ int err;
+
+ if (rate_kbps >= MLXSW_REG_QEEC_MAS_DIS) {
+ dev_err(mlxsw_sp_port->mlxsw_sp->bus_info->dev,
+ "spectrum: TBF: rate of %lluKbps must be below %u\n",
+ rate_kbps, MLXSW_REG_QEEC_MAS_DIS);
+ return -EINVAL;
+ }
+
+ err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size);
+ if (err) {
+ u8 highest_shaper_bs = MLXSW_REG_QEEC_HIGHEST_SHAPER_BS;
+
+ dev_err(mlxsw_sp->bus_info->dev,
+ "spectrum: TBF: invalid burst size of %u, must be a power of two between %u and %u",
+ p->max_size,
+ mlxsw_sp_qdisc_tbf_max_size(mlxsw_sp->lowest_shaper_bs),
+ mlxsw_sp_qdisc_tbf_max_size(highest_shaper_bs));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_tbf_qopt_offload_replace_params *p = params;
+ u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
+ u8 burst_size;
+ int err;
+
+ err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size);
+ if (WARN_ON_ONCE(err))
+ /* check_params above was supposed to reject this value. */
+ return -EINVAL;
+
+ /* Configure subgroup shaper, so that both UC and MC traffic is subject
+ * to shaping. That is unlike RED, however UC queue lengths are going to
+ * be different than MC ones due to different pool and quota
+ * configurations, so the configuration is not applicable. For shaper on
+ * the other hand, subjecting the overall stream to the configured
+ * shaper makes sense. Also note that that is what we do for
+ * ieee_setmaxrate().
+ */
+ return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HR_SUBGROUP,
+ mlxsw_sp_qdisc->tclass_num, 0,
+ rate_kbps, burst_size);
+}
+
+static void
+mlxsw_sp_qdisc_tbf_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_tbf_qopt_offload_replace_params *p = params;
+
+ mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats);
+}
+
+static int
+mlxsw_sp_qdisc_get_tbf_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_qopt_offload_stats *stats_ptr)
+{
+ mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ stats_ptr);
+ return 0;
+}
+
+static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_tbf = {
+ .type = MLXSW_SP_QDISC_TBF,
+ .check_params = mlxsw_sp_qdisc_tbf_check_params,
+ .replace = mlxsw_sp_qdisc_tbf_replace,
+ .unoffload = mlxsw_sp_qdisc_tbf_unoffload,
+ .destroy = mlxsw_sp_qdisc_tbf_destroy,
+ .get_stats = mlxsw_sp_qdisc_get_tbf_stats,
+ .clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
+};
+
+int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_tbf_qopt_offload *p)
+{
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
+
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
+ if (!mlxsw_sp_qdisc)
+ return -EOPNOTSUPP;
+
+ if (p->command == TC_TBF_REPLACE)
+ return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
+ mlxsw_sp_qdisc,
+ &mlxsw_sp_qdisc_ops_tbf,
+ &p->replace_params);
+
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
+ MLXSW_SP_QDISC_TBF))
+ return -EOPNOTSUPP;
+
+ switch (p->command) {
+ case TC_TBF_DESTROY:
+ return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+ case TC_TBF_STATS:
+ return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &p->stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+__mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
{
int i;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
MLXSW_SP_PORT_DEFAULT_TCLASS);
+ mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HR_SUBGROUP,
+ i, 0, false, 0);
mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
&mlxsw_sp_port->tclass_qdiscs[i]);
mlxsw_sp_port->tclass_qdiscs[i].prio_bitmap = 0;
@@ -488,36 +759,58 @@ mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int
-mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- void *params)
+mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- struct tc_prio_qopt_offload_params *p = params;
+ return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port);
+}
- if (p->bands > IEEE_8021QAZ_MAX_TCS)
+static int
+__mlxsw_sp_qdisc_ets_check_params(unsigned int nbands)
+{
+ if (nbands > IEEE_8021QAZ_MAX_TCS)
return -EOPNOTSUPP;
return 0;
}
static int
-mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- void *params)
+mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
{
struct tc_prio_qopt_offload_params *p = params;
+
+ return __mlxsw_sp_qdisc_ets_check_params(p->bands);
+}
+
+static int
+__mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ unsigned int nbands,
+ const unsigned int *quanta,
+ const unsigned int *weights,
+ const u8 *priomap)
+{
struct mlxsw_sp_qdisc *child_qdisc;
int tclass, i, band, backlog;
u8 old_priomap;
int err;
- for (band = 0; band < p->bands; band++) {
+ for (band = 0; band < nbands; band++) {
tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass];
old_priomap = child_qdisc->prio_bitmap;
child_qdisc->prio_bitmap = 0;
+
+ err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HR_SUBGROUP,
+ tclass, 0, !!quanta[band],
+ weights[band]);
+ if (err)
+ return err;
+
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- if (p->priomap[i] == band) {
+ if (priomap[i] == band) {
child_qdisc->prio_bitmap |= BIT(i);
if (BIT(i) & old_priomap)
continue;
@@ -540,21 +833,46 @@ mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port,
child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass];
child_qdisc->prio_bitmap = 0;
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc);
+ mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HR_SUBGROUP,
+ tclass, 0, false, 0);
}
return 0;
}
+static int
+mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_prio_qopt_offload_params *p = params;
+ unsigned int zeroes[TCQ_ETS_MAX_BANDS] = {0};
+
+ return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, p->bands,
+ zeroes, zeroes, p->priomap);
+}
+
+static void
+__mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct gnet_stats_queue *qstats)
+{
+ u64 backlog;
+
+ backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ mlxsw_sp_qdisc->stats_base.backlog);
+ qstats->backlog -= backlog;
+}
+
static void
mlxsw_sp_qdisc_prio_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params)
{
struct tc_prio_qopt_offload_params *p = params;
- u64 backlog;
- backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
- mlxsw_sp_qdisc->stats_base.backlog);
- p->qstats->backlog -= backlog;
+ __mlxsw_sp_qdisc_ets_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc,
+ p->qstats);
}
static int
@@ -562,37 +880,23 @@ mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
struct tc_qopt_offload_stats *stats_ptr)
{
- u64 tx_bytes, tx_packets, drops = 0, backlog = 0;
- struct mlxsw_sp_qdisc_stats *stats_base;
- struct mlxsw_sp_port_xstats *xstats;
- struct rtnl_link_stats64 *stats;
+ struct mlxsw_sp_qdisc *tc_qdisc;
+ u64 tx_packets = 0;
+ u64 tx_bytes = 0;
+ u64 backlog = 0;
+ u64 drops = 0;
int i;
- xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
- stats = &mlxsw_sp_port->periodic_hw_stats.stats;
- stats_base = &mlxsw_sp_qdisc->stats_base;
-
- tx_bytes = stats->tx_bytes - stats_base->tx_bytes;
- tx_packets = stats->tx_packets - stats_base->tx_packets;
-
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- drops += xstats->tail_drop[i];
- drops += xstats->wred_drop[i];
- backlog += xstats->backlog[i];
+ tc_qdisc = &mlxsw_sp_port->tclass_qdiscs[i];
+ mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, tc_qdisc,
+ &tx_bytes, &tx_packets,
+ &drops, &backlog);
}
- drops = drops - stats_base->drops;
- _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
- stats_ptr->qstats->drops += drops;
- stats_ptr->qstats->backlog +=
- mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
- backlog) -
- mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
- stats_base->backlog);
- stats_base->backlog = backlog;
- stats_base->drops += drops;
- stats_base->tx_bytes += tx_bytes;
- stats_base->tx_packets += tx_packets;
+ mlxsw_sp_qdisc_update_stats(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_qdisc,
+ tx_bytes, tx_packets, drops, backlog,
+ stats_ptr);
return 0;
}
@@ -614,7 +918,7 @@ mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
stats_base->drops = 0;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- stats_base->drops += xstats->tail_drop[i];
+ stats_base->drops += mlxsw_sp_xstats_tail_drop(xstats, i);
stats_base->drops += xstats->wred_drop[i];
}
@@ -631,31 +935,104 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
.clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
};
-/* Grafting is not supported in mlxsw. It will result in un-offloading of the
- * grafted qdisc as well as the qdisc in the qdisc new location.
- * (However, if the graft is to the location where the qdisc is already at, it
- * will be ignored completely and won't cause un-offloading).
+static int
+mlxsw_sp_qdisc_ets_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_ets_qopt_offload_replace_params *p = params;
+
+ return __mlxsw_sp_qdisc_ets_check_params(p->bands);
+}
+
+static int
+mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_ets_qopt_offload_replace_params *p = params;
+
+ return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, p->bands,
+ p->quanta, p->weights, p->priomap);
+}
+
+static void
+mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_ets_qopt_offload_replace_params *p = params;
+
+ __mlxsw_sp_qdisc_ets_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc,
+ p->qstats);
+}
+
+static int
+mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port);
+}
+
+static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_ets = {
+ .type = MLXSW_SP_QDISC_ETS,
+ .check_params = mlxsw_sp_qdisc_ets_check_params,
+ .replace = mlxsw_sp_qdisc_ets_replace,
+ .unoffload = mlxsw_sp_qdisc_ets_unoffload,
+ .destroy = mlxsw_sp_qdisc_ets_destroy,
+ .get_stats = mlxsw_sp_qdisc_get_prio_stats,
+ .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
+};
+
+/* Linux allows linking of Qdiscs to arbitrary classes (so long as the resulting
+ * graph is free of cycles). These operations do not change the parent handle
+ * though, which means it can be incomplete (if there is more than one class
+ * where the Qdisc in question is grafted) or outright wrong (if the Qdisc was
+ * linked to a different class and then removed from the original class).
+ *
+ * E.g. consider this sequence of operations:
+ *
+ * # tc qdisc add dev swp1 root handle 1: prio
+ * # tc qdisc add dev swp1 parent 1:3 handle 13: red limit 1000000 avpkt 10000
+ * RED: set bandwidth to 10Mbit
+ * # tc qdisc link dev swp1 handle 13: parent 1:2
+ *
+ * At this point, both 1:2 and 1:3 have the same RED Qdisc instance as their
+ * child. But RED will still only claim that 1:3 is its parent. If it's removed
+ * from that band, its only parent will be 1:2, but it will continue to claim
+ * that it is in fact 1:3.
+ *
+ * The notification for child Qdisc replace (e.g. TC_RED_REPLACE) comes before
+ * the notification for parent graft (e.g. TC_PRIO_GRAFT). We take the replace
+ * notification to offload the child Qdisc, based on its parent handle, and use
+ * the graft operation to validate that the class where the child is actually
+ * grafted corresponds to the parent handle. If the two don't match, we
+ * unoffload the child.
*/
static int
-mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- struct tc_prio_qopt_offload_graft_params *p)
+__mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ u8 band, u32 child_handle)
{
- int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(p->band);
+ int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
struct mlxsw_sp_qdisc *old_qdisc;
- /* Check if the grafted qdisc is already in its "new" location. If so -
- * nothing needs to be done.
- */
- if (p->band < IEEE_8021QAZ_MAX_TCS &&
- mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == p->child_handle)
+ if (band < IEEE_8021QAZ_MAX_TCS &&
+ mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == child_handle)
return 0;
+ if (!child_handle) {
+ /* This is an invisible FIFO replacing the original Qdisc.
+ * Ignore it--the original Qdisc's destroy will follow.
+ */
+ return 0;
+ }
+
/* See if the grafted qdisc is already offloaded on any tclass. If so,
* unoffload it.
*/
old_qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port,
- p->child_handle);
+ child_handle);
if (old_qdisc)
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
@@ -664,6 +1041,15 @@ mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
return -EOPNOTSUPP;
}
+static int
+mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_prio_qopt_offload_graft_params *p)
+{
+ return __mlxsw_sp_qdisc_ets_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
+ p->band, p->child_handle);
+}
+
int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_prio_qopt_offload *p)
{
@@ -697,6 +1083,40 @@ int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
+int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_ets_qopt_offload *p)
+{
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
+
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
+ if (!mlxsw_sp_qdisc)
+ return -EOPNOTSUPP;
+
+ if (p->command == TC_ETS_REPLACE)
+ return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
+ mlxsw_sp_qdisc,
+ &mlxsw_sp_qdisc_ops_ets,
+ &p->replace_params);
+
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
+ MLXSW_SP_QDISC_ETS))
+ return -EOPNOTSUPP;
+
+ switch (p->command) {
+ case TC_ETS_DESTROY:
+ return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+ case TC_ETS_STATS:
+ return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &p->stats);
+ case TC_ETS_GRAFT:
+ return __mlxsw_sp_qdisc_ets_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
+ p->graft_params.band,
+ p->graft_params.child_handle);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 30bfe3880faf..4a77b511ead2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -382,9 +382,10 @@ enum mlxsw_sp_fib_entry_type {
};
struct mlxsw_sp_nexthop_group;
+struct mlxsw_sp_fib_entry;
struct mlxsw_sp_fib_node {
- struct list_head entry_list;
+ struct mlxsw_sp_fib_entry *fib_entry;
struct list_head list;
struct rhash_head ht_node;
struct mlxsw_sp_fib *fib;
@@ -397,7 +398,6 @@ struct mlxsw_sp_fib_entry_decap {
};
struct mlxsw_sp_fib_entry {
- struct list_head list;
struct mlxsw_sp_fib_node *fib_node;
enum mlxsw_sp_fib_entry_type type;
struct list_head nexthop_group_node;
@@ -1162,7 +1162,6 @@ mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
const union mlxsw_sp_l3addr *addr,
enum mlxsw_sp_fib_entry_type type)
{
- struct mlxsw_sp_fib_entry *fib_entry;
struct mlxsw_sp_fib_node *fib_node;
unsigned char addr_prefix_len;
struct mlxsw_sp_fib *fib;
@@ -1191,15 +1190,10 @@ mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len,
addr_prefix_len);
- if (!fib_node || list_empty(&fib_node->entry_list))
+ if (!fib_node || fib_node->fib_entry->type != type)
return NULL;
- fib_entry = list_first_entry(&fib_node->entry_list,
- struct mlxsw_sp_fib_entry, list);
- if (fib_entry->type != type)
- return NULL;
-
- return fib_entry;
+ return fib_node->fib_entry;
}
/* Given an IPIP entry, find the corresponding decap route. */
@@ -1209,7 +1203,6 @@ mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
{
static struct mlxsw_sp_fib_node *fib_node;
const struct mlxsw_sp_ipip_ops *ipip_ops;
- struct mlxsw_sp_fib_entry *fib_entry;
unsigned char saddr_prefix_len;
union mlxsw_sp_l3addr saddr;
struct mlxsw_sp_fib *ul_fib;
@@ -1244,15 +1237,11 @@ mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
saddr_prefix_len);
- if (!fib_node || list_empty(&fib_node->entry_list))
- return NULL;
-
- fib_entry = list_first_entry(&fib_node->entry_list,
- struct mlxsw_sp_fib_entry, list);
- if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
+ if (!fib_node ||
+ fib_node->fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
return NULL;
- return fib_entry;
+ return fib_node->fib_entry;
}
static struct mlxsw_sp_ipip_entry *
@@ -3231,10 +3220,6 @@ mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-static bool
-mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
- const struct mlxsw_sp_fib_entry *fib_entry);
-
static int
mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
@@ -3243,9 +3228,6 @@ mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
int err;
list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
- if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
- fib_entry))
- continue;
err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
if (err)
return err;
@@ -3253,24 +3235,6 @@ mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-static void
-mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op, int err);
-
-static void
-mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp)
-{
- enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
- struct mlxsw_sp_fib_entry *fib_entry;
-
- list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
- if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
- fib_entry))
- continue;
- mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
- }
-}
-
static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
{
/* Valid sizes for an adjacency group are:
@@ -3374,6 +3338,73 @@ mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
}
}
+static struct mlxsw_sp_nexthop *
+mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
+ const struct mlxsw_sp_rt6 *mlxsw_sp_rt6);
+
+static void
+mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ int i;
+
+ for (i = 0; i < nh_grp->count; i++) {
+ struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
+
+ if (nh->offloaded)
+ nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
+ else
+ nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
+ }
+}
+
+static void
+__mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group *nh_grp,
+ struct mlxsw_sp_fib6_entry *fib6_entry)
+{
+ struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
+
+ list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
+ struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
+ struct mlxsw_sp_nexthop *nh;
+
+ nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
+ if (nh && nh->offloaded)
+ fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
+ else
+ fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
+ }
+}
+
+static void
+mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ struct mlxsw_sp_fib6_entry *fib6_entry;
+
+ /* Unfortunately, in IPv6 the route and the nexthop are described by
+ * the same struct, so we need to iterate over all the routes using the
+ * nexthop group and set / clear the offload indication for them.
+ */
+ list_for_each_entry(fib6_entry, &nh_grp->fib_list,
+ common.nexthop_group_node)
+ __mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
+}
+
+static void
+mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
+ case AF_INET:
+ mlxsw_sp_nexthop4_group_offload_refresh(mlxsw_sp, nh_grp);
+ break;
+ case AF_INET6:
+ mlxsw_sp_nexthop6_group_offload_refresh(mlxsw_sp, nh_grp);
+ break;
+ }
+}
+
static void
mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
@@ -3447,6 +3478,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
goto set_trap;
}
+ mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
+
if (!old_adj_index_valid) {
/* The trap was set for fib entries, so we have to call
* fib entry update to unset it and use adjacency index.
@@ -3468,9 +3501,6 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
goto set_trap;
}
- /* Offload state within the group changed, so update the flags. */
- mlxsw_sp_nexthop_fib_entries_refresh(nh_grp);
-
return;
set_trap:
@@ -3483,6 +3513,7 @@ set_trap:
err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
if (err)
dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
+ mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
if (old_adj_index_valid)
mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
nh_grp->ecmp_size, nh_grp->adj_index);
@@ -3845,7 +3876,7 @@ static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
key.fib_nh = fib_nh;
nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
- if (WARN_ON_ONCE(!nh))
+ if (!nh)
return;
switch (event) {
@@ -4065,131 +4096,128 @@ mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
}
static void
-mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
+mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
- struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
- int i;
-
- if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
- fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE ||
- fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP ||
- fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP) {
- nh_grp->nexthops->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
- return;
- }
-
- for (i = 0; i < nh_grp->count; i++) {
- struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
+ struct fib_info *fi = mlxsw_sp_nexthop4_group_fi(fib_entry->nh_group);
+ u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
+ int dst_len = fib_entry->fib_node->key.prefix_len;
+ struct mlxsw_sp_fib4_entry *fib4_entry;
+ struct fib_rt_info fri;
+ bool should_offload;
- if (nh->offloaded)
- nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
- else
- nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
- }
+ should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
+ fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
+ common);
+ fri.fi = fi;
+ fri.tb_id = fib4_entry->tb_id;
+ fri.dst = cpu_to_be32(*p_dst);
+ fri.dst_len = dst_len;
+ fri.tos = fib4_entry->tos;
+ fri.type = fib4_entry->type;
+ fri.offload = should_offload;
+ fri.trap = !should_offload;
+ fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
}
static void
-mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
+mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
- struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
- int i;
-
- if (!list_is_singular(&nh_grp->fib_list))
- return;
-
- for (i = 0; i < nh_grp->count; i++) {
- struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
+ struct fib_info *fi = mlxsw_sp_nexthop4_group_fi(fib_entry->nh_group);
+ u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
+ int dst_len = fib_entry->fib_node->key.prefix_len;
+ struct mlxsw_sp_fib4_entry *fib4_entry;
+ struct fib_rt_info fri;
- nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
- }
+ fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
+ common);
+ fri.fi = fi;
+ fri.tb_id = fib4_entry->tb_id;
+ fri.dst = cpu_to_be32(*p_dst);
+ fri.dst_len = dst_len;
+ fri.tos = fib4_entry->tos;
+ fri.type = fib4_entry->type;
+ fri.offload = false;
+ fri.trap = false;
+ fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
}
static void
-mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
+mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
+ bool should_offload;
+
+ should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
+ /* In IPv6 a multipath route is represented using multiple routes, so
+ * we need to set the flags on all of them.
+ */
fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
common);
-
- if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
- fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE) {
- list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
- list)->rt->fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
- return;
- }
-
- list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
- struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
- struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
- struct mlxsw_sp_nexthop *nh;
-
- nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
- if (nh && nh->offloaded)
- fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
- else
- fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
- }
+ list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
+ fib6_info_hw_flags_set(mlxsw_sp_rt6->rt, should_offload,
+ !should_offload);
}
static void
-mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
+mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
common);
- list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
- struct fib6_info *rt = mlxsw_sp_rt6->rt;
-
- rt->fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
- }
+ list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
+ fib6_info_hw_flags_set(mlxsw_sp_rt6->rt, false, false);
}
-static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
+static void
+mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
switch (fib_entry->fib_node->fib->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
- mlxsw_sp_fib4_entry_offload_set(fib_entry);
+ mlxsw_sp_fib4_entry_hw_flags_set(mlxsw_sp, fib_entry);
break;
case MLXSW_SP_L3_PROTO_IPV6:
- mlxsw_sp_fib6_entry_offload_set(fib_entry);
+ mlxsw_sp_fib6_entry_hw_flags_set(mlxsw_sp, fib_entry);
break;
}
}
static void
-mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
+mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
switch (fib_entry->fib_node->fib->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
- mlxsw_sp_fib4_entry_offload_unset(fib_entry);
+ mlxsw_sp_fib4_entry_hw_flags_clear(mlxsw_sp, fib_entry);
break;
case MLXSW_SP_L3_PROTO_IPV6:
- mlxsw_sp_fib6_entry_offload_unset(fib_entry);
+ mlxsw_sp_fib6_entry_hw_flags_clear(mlxsw_sp, fib_entry);
break;
}
}
static void
-mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op, int err)
+mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
{
switch (op) {
- case MLXSW_REG_RALUE_OP_WRITE_DELETE:
- return mlxsw_sp_fib_entry_offload_unset(fib_entry);
case MLXSW_REG_RALUE_OP_WRITE_WRITE:
- if (err)
- return;
- if (mlxsw_sp_fib_entry_should_offload(fib_entry))
- mlxsw_sp_fib_entry_offload_set(fib_entry);
- else
- mlxsw_sp_fib_entry_offload_unset(fib_entry);
- return;
+ mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
+ break;
+ case MLXSW_REG_RALUE_OP_WRITE_DELETE:
+ mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
+ break;
default:
- return;
+ break;
}
}
@@ -4416,7 +4444,10 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
{
int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
- mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
+ if (err)
+ return err;
+
+ mlxsw_sp_fib_entry_hw_flags_refresh(mlxsw_sp, fib_entry, op);
return err;
}
@@ -4491,6 +4522,19 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
}
}
+static void
+mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ switch (fib_entry->type) {
+ case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
+ mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
+ break;
+ default:
+ break;
+ }
+}
+
static struct mlxsw_sp_fib4_entry *
mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node,
@@ -4523,6 +4567,7 @@ mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
return fib4_entry;
err_nexthop4_group_get:
+ mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib_entry);
err_fib4_entry_type_set:
kfree(fib4_entry);
return ERR_PTR(err);
@@ -4532,6 +4577,7 @@ static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib4_entry *fib4_entry)
{
mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
+ mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, &fib4_entry->common);
kfree(fib4_entry);
}
@@ -4555,15 +4601,14 @@ mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
if (!fib_node)
return NULL;
- list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
- if (fib4_entry->tb_id == fen_info->tb_id &&
- fib4_entry->tos == fen_info->tos &&
- fib4_entry->type == fen_info->type &&
- mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
- fen_info->fi) {
- return fib4_entry;
- }
- }
+ fib4_entry = container_of(fib_node->fib_entry,
+ struct mlxsw_sp_fib4_entry, common);
+ if (fib4_entry->tb_id == fen_info->tb_id &&
+ fib4_entry->tos == fen_info->tos &&
+ fib4_entry->type == fen_info->type &&
+ mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
+ fen_info->fi)
+ return fib4_entry;
return NULL;
}
@@ -4611,7 +4656,6 @@ mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
if (!fib_node)
return NULL;
- INIT_LIST_HEAD(&fib_node->entry_list);
list_add(&fib_node->list, &fib->node_list);
memcpy(fib_node->key.addr, addr, addr_len);
fib_node->key.prefix_len = prefix_len;
@@ -4622,18 +4666,9 @@ mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
{
list_del(&fib_node->list);
- WARN_ON(!list_empty(&fib_node->entry_list));
kfree(fib_node);
}
-static bool
-mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
- const struct mlxsw_sp_fib_entry *fib_entry)
-{
- return list_first_entry(&fib_node->entry_list,
- struct mlxsw_sp_fib_entry, list) == fib_entry;
-}
-
static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node)
{
@@ -4773,200 +4808,65 @@ static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_vr *vr = fib_node->fib->vr;
- if (!list_empty(&fib_node->entry_list))
+ if (fib_node->fib_entry)
return;
mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
mlxsw_sp_fib_node_destroy(fib_node);
mlxsw_sp_vr_put(mlxsw_sp, vr);
}
-static struct mlxsw_sp_fib4_entry *
-mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
- const struct mlxsw_sp_fib4_entry *new4_entry)
-{
- struct mlxsw_sp_fib4_entry *fib4_entry;
-
- list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
- if (fib4_entry->tb_id > new4_entry->tb_id)
- continue;
- if (fib4_entry->tb_id != new4_entry->tb_id)
- break;
- if (fib4_entry->tos > new4_entry->tos)
- continue;
- if (fib4_entry->prio >= new4_entry->prio ||
- fib4_entry->tos < new4_entry->tos)
- return fib4_entry;
- }
-
- return NULL;
-}
-
-static int
-mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib4_entry *fib4_entry,
- struct mlxsw_sp_fib4_entry *new4_entry)
-{
- struct mlxsw_sp_fib_node *fib_node;
-
- if (WARN_ON(!fib4_entry))
- return -EINVAL;
-
- fib_node = fib4_entry->common.fib_node;
- list_for_each_entry_from(fib4_entry, &fib_node->entry_list,
- common.list) {
- if (fib4_entry->tb_id != new4_entry->tb_id ||
- fib4_entry->tos != new4_entry->tos ||
- fib4_entry->prio != new4_entry->prio)
- break;
- }
-
- list_add_tail(&new4_entry->common.list, &fib4_entry->common.list);
- return 0;
-}
-
-static int
-mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib4_entry *new4_entry,
- bool replace, bool append)
-{
- struct mlxsw_sp_fib_node *fib_node = new4_entry->common.fib_node;
- struct mlxsw_sp_fib4_entry *fib4_entry;
-
- fib4_entry = mlxsw_sp_fib4_node_entry_find(fib_node, new4_entry);
-
- if (append)
- return mlxsw_sp_fib4_node_list_append(fib4_entry, new4_entry);
- if (replace && WARN_ON(!fib4_entry))
- return -EINVAL;
-
- /* Insert new entry before replaced one, so that we can later
- * remove the second.
- */
- if (fib4_entry) {
- list_add_tail(&new4_entry->common.list,
- &fib4_entry->common.list);
- } else {
- struct mlxsw_sp_fib4_entry *last;
-
- list_for_each_entry(last, &fib_node->entry_list, common.list) {
- if (new4_entry->tb_id > last->tb_id)
- break;
- fib4_entry = last;
- }
-
- if (fib4_entry)
- list_add(&new4_entry->common.list,
- &fib4_entry->common.list);
- else
- list_add(&new4_entry->common.list,
- &fib_node->entry_list);
- }
-
- return 0;
-}
-
-static void
-mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib4_entry *fib4_entry)
-{
- list_del(&fib4_entry->common.list);
-}
-
-static int mlxsw_sp_fib_node_entry_add(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry *fib_entry)
-{
- struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
-
- if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
- return 0;
-
- /* To prevent packet loss, overwrite the previously offloaded
- * entry.
- */
- if (!list_is_singular(&fib_node->entry_list)) {
- enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
- struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
-
- mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
- }
-
- return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
-}
-
-static void mlxsw_sp_fib_node_entry_del(struct mlxsw_sp *mlxsw_sp,
+static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
{
struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
-
- if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
- return;
-
- /* Promote the next entry by overwriting the deleted entry */
- if (!list_is_singular(&fib_node->entry_list)) {
- struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
- enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
-
- mlxsw_sp_fib_entry_update(mlxsw_sp, n);
- mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
- return;
- }
-
- mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
-}
-
-static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib4_entry *fib4_entry,
- bool replace, bool append)
-{
int err;
- err = mlxsw_sp_fib4_node_list_insert(fib4_entry, replace, append);
- if (err)
- return err;
+ fib_node->fib_entry = fib_entry;
- err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib4_entry->common);
+ err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
if (err)
- goto err_fib_node_entry_add;
+ goto err_fib_entry_update;
return 0;
-err_fib_node_entry_add:
- mlxsw_sp_fib4_node_list_remove(fib4_entry);
+err_fib_entry_update:
+ fib_node->fib_entry = NULL;
return err;
}
static void
-mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib4_entry *fib4_entry)
+mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
- mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib4_entry->common);
- mlxsw_sp_fib4_node_list_remove(fib4_entry);
+ struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
- if (fib4_entry->common.type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP)
- mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, &fib4_entry->common);
+ mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
+ fib_node->fib_entry = NULL;
}
-static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib4_entry *fib4_entry,
- bool replace)
+static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
{
struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
- struct mlxsw_sp_fib4_entry *replaced;
+ struct mlxsw_sp_fib4_entry *fib4_replaced;
- if (!replace)
- return;
+ if (!fib_node->fib_entry)
+ return true;
- /* We inserted the new entry before replaced one */
- replaced = list_next_entry(fib4_entry, common.list);
+ fib4_replaced = container_of(fib_node->fib_entry,
+ struct mlxsw_sp_fib4_entry, common);
+ if (fib4_entry->tb_id == RT_TABLE_MAIN &&
+ fib4_replaced->tb_id == RT_TABLE_LOCAL)
+ return false;
- mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
- mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
- mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
+ return true;
}
static int
-mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
- const struct fib_entry_notifier_info *fen_info,
- bool replace, bool append)
+mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
+ const struct fib_entry_notifier_info *fen_info)
{
- struct mlxsw_sp_fib4_entry *fib4_entry;
+ struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
+ struct mlxsw_sp_fib_entry *replaced;
struct mlxsw_sp_fib_node *fib_node;
int err;
@@ -4989,18 +4889,32 @@ mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
goto err_fib4_entry_create;
}
- err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib4_entry, replace,
- append);
+ if (!mlxsw_sp_fib4_allow_replace(fib4_entry)) {
+ mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
+ mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
+ return 0;
+ }
+
+ replaced = fib_node->fib_entry;
+ err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib4_entry->common);
if (err) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
- goto err_fib4_node_entry_link;
+ goto err_fib_node_entry_link;
}
- mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib4_entry, replace);
+ /* Nothing to replace */
+ if (!replaced)
+ return 0;
+
+ mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
+ fib4_replaced = container_of(replaced, struct mlxsw_sp_fib4_entry,
+ common);
+ mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_replaced);
return 0;
-err_fib4_node_entry_link:
+err_fib_node_entry_link:
+ fib_node->fib_entry = replaced;
mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
err_fib4_entry_create:
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
@@ -5017,11 +4931,11 @@ static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
return;
fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
- if (WARN_ON(!fib4_entry))
+ if (!fib4_entry)
return;
fib_node = fib4_entry->common.fib_node;
- mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
+ mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib4_entry->common);
mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
}
@@ -5079,17 +4993,13 @@ static void mlxsw_sp_rt6_release(struct fib6_info *rt)
static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
{
+ struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
+
+ fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
kfree(mlxsw_sp_rt6);
}
-static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt)
-{
- /* RTF_CACHE routes are ignored */
- return !(rt->fib6_flags & RTF_ADDRCONF) &&
- rt->fib6_nh->fib_nh_gw_family;
-}
-
static struct fib6_info *
mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
{
@@ -5097,37 +5007,6 @@ mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
list)->rt;
}
-static struct mlxsw_sp_fib6_entry *
-mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
- const struct fib6_info *nrt, bool replace)
-{
- struct mlxsw_sp_fib6_entry *fib6_entry;
-
- if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace)
- return NULL;
-
- list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
- struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
-
- /* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same
- * virtual router.
- */
- if (rt->fib6_table->tb6_id > nrt->fib6_table->tb6_id)
- continue;
- if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id)
- break;
- if (rt->fib6_metric < nrt->fib6_metric)
- continue;
- if (rt->fib6_metric == nrt->fib6_metric &&
- mlxsw_sp_fib6_rt_can_mp(rt))
- return fib6_entry;
- if (rt->fib6_metric > nrt->fib6_metric)
- break;
- }
-
- return NULL;
-}
-
static struct mlxsw_sp_rt6 *
mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
const struct fib6_info *rt)
@@ -5313,6 +5192,11 @@ static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
&nh_grp->fib_list);
fib6_entry->common.nh_group = nh_grp;
+ /* The route and the nexthop are described by the same struct, so we
+ * need to the update the nexthop offload indication for the new route.
+ */
+ __mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
+
return 0;
}
@@ -5345,16 +5229,16 @@ mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
* currently associated with it in the device's table is that
* of the old group. Start using the new one instead.
*/
- err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
+ err = mlxsw_sp_fib_entry_update(mlxsw_sp, &fib6_entry->common);
if (err)
- goto err_fib_node_entry_add;
+ goto err_fib_entry_update;
if (list_empty(&old_nh_grp->fib_list))
mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
return 0;
-err_fib_node_entry_add:
+err_fib_entry_update:
mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
err_nexthop6_group_get:
list_add_tail(&fib6_entry->common.nexthop_group_node,
@@ -5519,112 +5403,13 @@ static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
}
static struct mlxsw_sp_fib6_entry *
-mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
- const struct fib6_info *nrt, bool replace)
-{
- struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;
-
- list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
- struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
-
- if (rt->fib6_table->tb6_id > nrt->fib6_table->tb6_id)
- continue;
- if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id)
- break;
- if (replace && rt->fib6_metric == nrt->fib6_metric) {
- if (mlxsw_sp_fib6_rt_can_mp(rt) ==
- mlxsw_sp_fib6_rt_can_mp(nrt))
- return fib6_entry;
- if (mlxsw_sp_fib6_rt_can_mp(nrt))
- fallback = fallback ?: fib6_entry;
- }
- if (rt->fib6_metric > nrt->fib6_metric)
- return fallback ?: fib6_entry;
- }
-
- return fallback;
-}
-
-static int
-mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry,
- bool *p_replace)
-{
- struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node;
- struct fib6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry);
- struct mlxsw_sp_fib6_entry *fib6_entry;
-
- fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, *p_replace);
-
- if (*p_replace && !fib6_entry)
- *p_replace = false;
-
- if (fib6_entry) {
- list_add_tail(&new6_entry->common.list,
- &fib6_entry->common.list);
- } else {
- struct mlxsw_sp_fib6_entry *last;
-
- list_for_each_entry(last, &fib_node->entry_list, common.list) {
- struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(last);
-
- if (nrt->fib6_table->tb6_id > rt->fib6_table->tb6_id)
- break;
- fib6_entry = last;
- }
-
- if (fib6_entry)
- list_add(&new6_entry->common.list,
- &fib6_entry->common.list);
- else
- list_add(&new6_entry->common.list,
- &fib_node->entry_list);
- }
-
- return 0;
-}
-
-static void
-mlxsw_sp_fib6_node_list_remove(struct mlxsw_sp_fib6_entry *fib6_entry)
-{
- list_del(&fib6_entry->common.list);
-}
-
-static int mlxsw_sp_fib6_node_entry_link(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib6_entry *fib6_entry,
- bool *p_replace)
-{
- int err;
-
- err = mlxsw_sp_fib6_node_list_insert(fib6_entry, p_replace);
- if (err)
- return err;
-
- err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
- if (err)
- goto err_fib_node_entry_add;
-
- return 0;
-
-err_fib_node_entry_add:
- mlxsw_sp_fib6_node_list_remove(fib6_entry);
- return err;
-}
-
-static void
-mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib6_entry *fib6_entry)
-{
- mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib6_entry->common);
- mlxsw_sp_fib6_node_list_remove(fib6_entry);
-}
-
-static struct mlxsw_sp_fib6_entry *
mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
const struct fib6_info *rt)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
struct mlxsw_sp_fib_node *fib_node;
struct mlxsw_sp_fib *fib;
+ struct fib6_info *cmp_rt;
struct mlxsw_sp_vr *vr;
vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
@@ -5638,40 +5423,44 @@ mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
if (!fib_node)
return NULL;
- list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
- struct fib6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
-
- if (rt->fib6_table->tb6_id == iter_rt->fib6_table->tb6_id &&
- rt->fib6_metric == iter_rt->fib6_metric &&
- mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
- return fib6_entry;
- }
+ fib6_entry = container_of(fib_node->fib_entry,
+ struct mlxsw_sp_fib6_entry, common);
+ cmp_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
+ if (rt->fib6_table->tb6_id == cmp_rt->fib6_table->tb6_id &&
+ rt->fib6_metric == cmp_rt->fib6_metric &&
+ mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
+ return fib6_entry;
return NULL;
}
-static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib6_entry *fib6_entry,
- bool replace)
+static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
{
struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
- struct mlxsw_sp_fib6_entry *replaced;
+ struct mlxsw_sp_fib6_entry *fib6_replaced;
+ struct fib6_info *rt, *rt_replaced;
- if (!replace)
- return;
+ if (!fib_node->fib_entry)
+ return true;
- replaced = list_next_entry(fib6_entry, common.list);
+ fib6_replaced = container_of(fib_node->fib_entry,
+ struct mlxsw_sp_fib6_entry,
+ common);
+ rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
+ rt_replaced = mlxsw_sp_fib6_entry_rt(fib6_replaced);
+ if (rt->fib6_table->tb6_id == RT_TABLE_MAIN &&
+ rt_replaced->fib6_table->tb6_id == RT_TABLE_LOCAL)
+ return false;
- mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, replaced);
- mlxsw_sp_fib6_entry_destroy(mlxsw_sp, replaced);
- mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
+ return true;
}
-static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
- struct fib6_info **rt_arr,
- unsigned int nrt6, bool replace)
+static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
+ struct fib6_info **rt_arr,
+ unsigned int nrt6)
{
- struct mlxsw_sp_fib6_entry *fib6_entry;
+ struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
+ struct mlxsw_sp_fib_entry *replaced;
struct mlxsw_sp_fib_node *fib_node;
struct fib6_info *rt = rt_arr[0];
int err;
@@ -5693,18 +5482,6 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
if (IS_ERR(fib_node))
return PTR_ERR(fib_node);
- /* Before creating a new entry, try to append route to an existing
- * multipath entry.
- */
- fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace);
- if (fib6_entry) {
- err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry,
- rt_arr, nrt6);
- if (err)
- goto err_fib6_entry_nexthop_add;
- return 0;
- }
-
fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt_arr,
nrt6);
if (IS_ERR(fib6_entry)) {
@@ -5712,17 +5489,76 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
goto err_fib6_entry_create;
}
- err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry, &replace);
+ if (!mlxsw_sp_fib6_allow_replace(fib6_entry)) {
+ mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
+ mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
+ return 0;
+ }
+
+ replaced = fib_node->fib_entry;
+ err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib6_entry->common);
if (err)
- goto err_fib6_node_entry_link;
+ goto err_fib_node_entry_link;
- mlxsw_sp_fib6_entry_replace(mlxsw_sp, fib6_entry, replace);
+ /* Nothing to replace */
+ if (!replaced)
+ return 0;
+
+ mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
+ fib6_replaced = container_of(replaced, struct mlxsw_sp_fib6_entry,
+ common);
+ mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_replaced);
return 0;
-err_fib6_node_entry_link:
+err_fib_node_entry_link:
+ fib_node->fib_entry = replaced;
mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
err_fib6_entry_create:
+ mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
+ return err;
+}
+
+static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
+ struct fib6_info **rt_arr,
+ unsigned int nrt6)
+{
+ struct mlxsw_sp_fib6_entry *fib6_entry;
+ struct mlxsw_sp_fib_node *fib_node;
+ struct fib6_info *rt = rt_arr[0];
+ int err;
+
+ if (mlxsw_sp->router->aborted)
+ return 0;
+
+ if (rt->fib6_src.plen)
+ return -EINVAL;
+
+ if (mlxsw_sp_fib6_rt_should_ignore(rt))
+ return 0;
+
+ fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
+ &rt->fib6_dst.addr,
+ sizeof(rt->fib6_dst.addr),
+ rt->fib6_dst.plen,
+ MLXSW_SP_L3_PROTO_IPV6);
+ if (IS_ERR(fib_node))
+ return PTR_ERR(fib_node);
+
+ if (WARN_ON_ONCE(!fib_node->fib_entry)) {
+ mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
+ return -EINVAL;
+ }
+
+ fib6_entry = container_of(fib_node->fib_entry,
+ struct mlxsw_sp_fib6_entry, common);
+ err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt_arr,
+ nrt6);
+ if (err)
+ goto err_fib6_entry_nexthop_add;
+
+ return 0;
+
err_fib6_entry_nexthop_add:
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
return err;
@@ -5742,8 +5578,13 @@ static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
if (mlxsw_sp_fib6_rt_should_ignore(rt))
return;
+ /* Multipath routes are first added to the FIB trie and only then
+ * notified. If we vetoed the addition, we will get a delete
+ * notification for a route we do not have. Therefore, do not warn if
+ * route was not found.
+ */
fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
- if (WARN_ON(!fib6_entry))
+ if (!fib6_entry)
return;
/* If not all the nexthops are deleted, then only reduce the nexthop
@@ -5757,7 +5598,7 @@ static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
fib_node = fib6_entry->common.fib_node;
- mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
+ mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib6_entry->common);
mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
}
@@ -5911,39 +5752,25 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node)
{
- struct mlxsw_sp_fib4_entry *fib4_entry, *tmp;
-
- list_for_each_entry_safe(fib4_entry, tmp, &fib_node->entry_list,
- common.list) {
- bool do_break = &tmp->common.list == &fib_node->entry_list;
+ struct mlxsw_sp_fib4_entry *fib4_entry;
- mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
- mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
- mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
- /* Break when entry list is empty and node was freed.
- * Otherwise, we'll access freed memory in the next
- * iteration.
- */
- if (do_break)
- break;
- }
+ fib4_entry = container_of(fib_node->fib_entry,
+ struct mlxsw_sp_fib4_entry, common);
+ mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
+ mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
+ mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
}
static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node)
{
- struct mlxsw_sp_fib6_entry *fib6_entry, *tmp;
-
- list_for_each_entry_safe(fib6_entry, tmp, &fib_node->entry_list,
- common.list) {
- bool do_break = &tmp->common.list == &fib_node->entry_list;
+ struct mlxsw_sp_fib6_entry *fib6_entry;
- mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
- mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
- mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
- if (do_break)
- break;
- }
+ fib6_entry = container_of(fib_node->fib_entry,
+ struct mlxsw_sp_fib6_entry, common);
+ mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
+ mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
+ mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
}
static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
@@ -6094,7 +5921,6 @@ static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
struct mlxsw_sp_fib_event_work *fib_work =
container_of(work, struct mlxsw_sp_fib_event_work, work);
struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
- bool replace, append;
int err;
/* Protect internal structures from changes */
@@ -6102,13 +5928,9 @@ static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
mlxsw_sp_span_respin(mlxsw_sp);
switch (fib_work->event) {
- case FIB_EVENT_ENTRY_REPLACE: /* fall through */
- case FIB_EVENT_ENTRY_APPEND: /* fall through */
- case FIB_EVENT_ENTRY_ADD:
- replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
- append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
- err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
- replace, append);
+ case FIB_EVENT_ENTRY_REPLACE:
+ err = mlxsw_sp_router_fib4_replace(mlxsw_sp,
+ &fib_work->fen_info);
if (err)
mlxsw_sp_router_fib_abort(mlxsw_sp);
fib_info_put(fib_work->fen_info.fi);
@@ -6133,20 +5955,24 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
struct mlxsw_sp_fib_event_work *fib_work =
container_of(work, struct mlxsw_sp_fib_event_work, work);
struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
- bool replace;
int err;
rtnl_lock();
mlxsw_sp_span_respin(mlxsw_sp);
switch (fib_work->event) {
- case FIB_EVENT_ENTRY_REPLACE: /* fall through */
- case FIB_EVENT_ENTRY_ADD:
- replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
- err = mlxsw_sp_router_fib6_add(mlxsw_sp,
- fib_work->fib6_work.rt_arr,
- fib_work->fib6_work.nrt6,
- replace);
+ case FIB_EVENT_ENTRY_REPLACE:
+ err = mlxsw_sp_router_fib6_replace(mlxsw_sp,
+ fib_work->fib6_work.rt_arr,
+ fib_work->fib6_work.nrt6);
+ if (err)
+ mlxsw_sp_router_fib_abort(mlxsw_sp);
+ mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
+ break;
+ case FIB_EVENT_ENTRY_APPEND:
+ err = mlxsw_sp_router_fib6_append(mlxsw_sp,
+ fib_work->fib6_work.rt_arr,
+ fib_work->fib6_work.nrt6);
if (err)
mlxsw_sp_router_fib_abort(mlxsw_sp);
mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
@@ -6211,8 +6037,6 @@ static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
- case FIB_EVENT_ENTRY_APPEND: /* fall through */
- case FIB_EVENT_ENTRY_ADD: /* fall through */
case FIB_EVENT_ENTRY_DEL:
fen_info = container_of(info, struct fib_entry_notifier_info,
info);
@@ -6240,7 +6064,7 @@ static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
- case FIB_EVENT_ENTRY_ADD: /* fall through */
+ case FIB_EVENT_ENTRY_APPEND: /* fall through */
case FIB_EVENT_ENTRY_DEL:
fen6_info = container_of(info, struct fib6_entry_notifier_info,
info);
@@ -6343,9 +6167,9 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
err = mlxsw_sp_router_fib_rule_event(event, info,
router->mlxsw_sp);
return notifier_from_errno(err);
- case FIB_EVENT_ENTRY_ADD:
+ case FIB_EVENT_ENTRY_ADD: /* fall through */
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
- case FIB_EVENT_ENTRY_APPEND: /* fall through */
+ case FIB_EVENT_ENTRY_APPEND:
if (router->aborted) {
NL_SET_ERR_MSG_MOD(info->extack, "FIB offload was aborted. Not configuring route");
return notifier_from_errno(-EINVAL);
@@ -7074,6 +6898,9 @@ static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
rif = mlxsw_sp->router->rifs[i];
+ if (rif && rif->ops &&
+ rif->ops->type == MLXSW_SP_RIF_TYPE_IPIP_LB)
+ continue;
if (rif && rif->dev && rif->dev != dev &&
!ether_addr_equal_masked(rif->dev->dev_addr, dev_addr,
mlxsw_sp->mac_mask)) {
@@ -8017,8 +7844,18 @@ mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
{
+ int err;
+
mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
+
+ err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp);
+ if (err)
+ return err;
+ err = mlxsw_sp_ipip_ecn_decap_init(mlxsw_sp);
+ if (err)
+ return err;
+
return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index cc1de91e8217..c9b94f435cdd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -104,4 +104,7 @@ static inline bool mlxsw_sp_l3addr_eq(const union mlxsw_sp_l3addr *addr1,
return !memcmp(addr1, addr2, sizeof(*addr1));
}
+int mlxsw_sp_ipip_ecn_encap_init(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_ipip_ecn_decap_init(struct mlxsw_sp *mlxsw_sp);
+
#endif /* _MLXSW_ROUTER_H_*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 200d324e6d99..0cdd7954a085 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -748,33 +748,50 @@ static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
return false;
}
-static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp,
- int mtu)
+static int
+mlxsw_sp_span_port_buffsize_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
{
- return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char sbib_pl[MLXSW_REG_SBIB_LEN];
+ u32 buffsize;
+ u32 speed;
+ int err;
+
+ err = mlxsw_sp_port_speed_get(mlxsw_sp_port, &speed);
+ if (err)
+ return err;
+ if (speed == SPEED_UNKNOWN)
+ speed = 0;
+
+ buffsize = mlxsw_sp_span_buffsize_get(mlxsw_sp, speed, mtu);
+ mlxsw_reg_sbib_pack(sbib_pl, mlxsw_sp_port->local_port, buffsize);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
}
int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
{
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- char sbib_pl[MLXSW_REG_SBIB_LEN];
- int err;
-
/* If port is egress mirrored, the shared buffer size should be
* updated according to the mtu value
*/
- if (mlxsw_sp_span_is_egress_mirror(port)) {
- u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu);
+ if (mlxsw_sp_span_is_egress_mirror(port))
+ return mlxsw_sp_span_port_buffsize_update(port, mtu);
+ return 0;
+}
- mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
- if (err) {
- netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
- return err;
- }
- }
+void mlxsw_sp_span_speed_update_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct mlxsw_sp_port *mlxsw_sp_port;
- return 0;
+ mlxsw_sp_port = container_of(dwork, struct mlxsw_sp_port,
+ span.speed_update_dw);
+
+ /* If port is egress mirrored, the shared buffer size should be
+ * updated according to the speed value.
+ */
+ if (mlxsw_sp_span_is_egress_mirror(mlxsw_sp_port))
+ mlxsw_sp_span_port_buffsize_update(mlxsw_sp_port,
+ mlxsw_sp_port->dev->mtu);
}
static struct mlxsw_sp_span_inspected_port *
@@ -836,15 +853,9 @@ mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
/* if it is an egress SPAN, bind a shared buffer to it */
if (type == MLXSW_SP_SPAN_EGRESS) {
- u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
- port->dev->mtu);
-
- mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
- if (err) {
- netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
+ err = mlxsw_sp_span_port_buffsize_update(port, port->dev->mtu);
+ if (err)
return err;
- }
}
if (bind) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
index 5e04252f2a11..59724335525f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
@@ -74,5 +74,6 @@ void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_span_entry *span_entry);
int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu);
+void mlxsw_sp_span_speed_update_work(struct work_struct *work);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index e0d7c49ffae0..60205aa3f6a5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -9,6 +9,20 @@
#include "reg.h"
#include "spectrum.h"
+/* All driver-specific traps must be documented in
+ * Documentation/networking/devlink/mlxsw.rst
+ */
+enum {
+ DEVLINK_MLXSW_TRAP_ID_BASE = DEVLINK_TRAP_GENERIC_ID_MAX,
+ DEVLINK_MLXSW_TRAP_ID_IRIF_DISABLED,
+ DEVLINK_MLXSW_TRAP_ID_ERIF_DISABLED,
+};
+
+#define DEVLINK_MLXSW_TRAP_NAME_IRIF_DISABLED \
+ "irif_disabled"
+#define DEVLINK_MLXSW_TRAP_NAME_ERIF_DISABLED \
+ "erif_disabled"
+
#define MLXSW_SP_TRAP_METADATA DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT
static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port,
@@ -21,6 +35,12 @@ static void mlxsw_sp_rx_exception_listener(struct sk_buff *skb, u8 local_port,
DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
MLXSW_SP_TRAP_METADATA)
+#define MLXSW_SP_TRAP_DRIVER_DROP(_id, _group_id) \
+ DEVLINK_TRAP_DRIVER(DROP, DROP, DEVLINK_MLXSW_TRAP_ID_##_id, \
+ DEVLINK_MLXSW_TRAP_NAME_##_id, \
+ DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
+ MLXSW_SP_TRAP_METADATA)
+
#define MLXSW_SP_TRAP_EXCEPTION(_id, _group_id) \
DEVLINK_TRAP_GENERIC(EXCEPTION, TRAP, _id, \
DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
@@ -58,6 +78,11 @@ static struct devlink_trap mlxsw_sp_traps_arr[] = {
MLXSW_SP_TRAP_EXCEPTION(UNRESOLVED_NEIGH, L3_DROPS),
MLXSW_SP_TRAP_EXCEPTION(IPV4_LPM_UNICAST_MISS, L3_DROPS),
MLXSW_SP_TRAP_EXCEPTION(IPV6_LPM_UNICAST_MISS, L3_DROPS),
+ MLXSW_SP_TRAP_DRIVER_DROP(IRIF_DISABLED, L3_DROPS),
+ MLXSW_SP_TRAP_DRIVER_DROP(ERIF_DISABLED, L3_DROPS),
+ MLXSW_SP_TRAP_DROP(NON_ROUTABLE, L3_DROPS),
+ MLXSW_SP_TRAP_EXCEPTION(DECAP_ERROR, TUNNEL_DROPS),
+ MLXSW_SP_TRAP_DROP(OVERLAY_SMAC_MC, TUNNEL_DROPS),
};
static struct mlxsw_listener mlxsw_sp_listeners_arr[] = {
@@ -90,6 +115,15 @@ static struct mlxsw_listener mlxsw_sp_listeners_arr[] = {
TRAP_EXCEPTION_TO_CPU),
MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER_LPM6, ROUTER_EXP,
TRAP_EXCEPTION_TO_CPU),
+ MLXSW_SP_RXL_DISCARD(ROUTER_IRIF_EN, L3_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ROUTER_ERIF_EN, L3_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(NON_ROUTABLE, L3_DISCARDS),
+ MLXSW_SP_RXL_EXCEPTION(DECAP_ECN0, ROUTER_EXP, TRAP_EXCEPTION_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(IPIP_DECAP_ERROR, ROUTER_EXP,
+ TRAP_EXCEPTION_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(DISCARD_DEC_PKT, TUNNEL_DISCARDS,
+ TRAP_EXCEPTION_TO_CPU),
+ MLXSW_SP_RXL_DISCARD(OVERLAY_SMAC_MC, TUNNEL_DISCARDS),
};
/* Mapping between hardware trap and devlink trap. Multiple hardware traps can
@@ -123,6 +157,13 @@ static u16 mlxsw_sp_listener_devlink_map[] = {
DEVLINK_TRAP_GENERIC_ID_UNRESOLVED_NEIGH,
DEVLINK_TRAP_GENERIC_ID_IPV4_LPM_UNICAST_MISS,
DEVLINK_TRAP_GENERIC_ID_IPV6_LPM_UNICAST_MISS,
+ DEVLINK_MLXSW_TRAP_ID_IRIF_DISABLED,
+ DEVLINK_MLXSW_TRAP_ID_ERIF_DISABLED,
+ DEVLINK_TRAP_GENERIC_ID_NON_ROUTABLE,
+ DEVLINK_TRAP_GENERIC_ID_DECAP_ERROR,
+ DEVLINK_TRAP_GENERIC_ID_DECAP_ERROR,
+ DEVLINK_TRAP_GENERIC_ID_DECAP_ERROR,
+ DEVLINK_TRAP_GENERIC_ID_OVERLAY_SMAC_MC,
};
static int mlxsw_sp_rx_listener(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
@@ -304,8 +345,9 @@ mlxsw_sp_trap_group_policer_init(struct mlxsw_sp *mlxsw_sp,
u32 rate;
switch (group->id) {
- case DEVLINK_TRAP_GROUP_GENERIC_ID_L3_DROPS:/* fall through */
- case DEVLINK_TRAP_GROUP_GENERIC_ID_L2_DROPS:
+ case DEVLINK_TRAP_GROUP_GENERIC_ID_L2_DROPS: /* fall through */
+ case DEVLINK_TRAP_GROUP_GENERIC_ID_L3_DROPS: /* fall through */
+ case DEVLINK_TRAP_GROUP_GENERIC_ID_TUNNEL_DROPS:
policer_id = MLXSW_SP_DISCARD_POLICER_ID;
ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
is_bytes = false;
@@ -342,6 +384,12 @@ __mlxsw_sp_trap_group_init(struct mlxsw_sp *mlxsw_sp,
priority = 0;
tc = 1;
break;
+ case DEVLINK_TRAP_GROUP_GENERIC_ID_TUNNEL_DROPS:
+ group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_TUNNEL_DISCARDS;
+ policer_id = MLXSW_SP_DISCARD_POLICER_ID;
+ priority = 0;
+ tc = 1;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index de6cb22f68b1..f0e98ec8f1ee 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -299,22 +299,17 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
u64 len;
int err;
+ if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
+ this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
if (mlxsw_core_skb_transmit_busy(mlxsw_sx->core, &tx_info))
return NETDEV_TX_BUSY;
- if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
- struct sk_buff *skb_orig = skb;
-
- skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
- if (!skb) {
- this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
- dev_kfree_skb_any(skb_orig);
- return NETDEV_TX_OK;
- }
- dev_consume_skb_any(skb_orig);
- }
mlxsw_sx_txhdr_construct(skb, &tx_info);
/* TX header is consumed by HW on the way so we shouldn't count its
* bytes as being sent.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 0c1c142bb6b0..12e1fa998d42 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -67,6 +67,7 @@ enum {
MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7,
+ MLXSW_TRAP_ID_DISCARD_NON_ROUTABLE = 0x11A,
MLXSW_TRAP_ID_DISCARD_ROUTER2 = 0x130,
MLXSW_TRAP_ID_DISCARD_ROUTER3 = 0x131,
MLXSW_TRAP_ID_DISCARD_ING_PACKET_SMAC_MC = 0x140,
@@ -80,12 +81,20 @@ enum {
MLXSW_TRAP_ID_DISCARD_ING_ROUTER_UC_DIP_MC_DMAC = 0x161,
MLXSW_TRAP_ID_DISCARD_ING_ROUTER_DIP_LB = 0x162,
MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_MC = 0x163,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_CLASS_E = 0x164,
MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_LB = 0x165,
MLXSW_TRAP_ID_DISCARD_ING_ROUTER_CORRUPTED_IP_HDR = 0x167,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_MC_DMAC = 0x168,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_DIP = 0x169,
MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_SIP_BC = 0x16A,
MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_DIP_LOCAL_NET = 0x16B,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_DIP_LINK_LOCAL = 0x16C,
+ MLXSW_TRAP_ID_DISCARD_ROUTER_IRIF_EN = 0x178,
+ MLXSW_TRAP_ID_DISCARD_ROUTER_ERIF_EN = 0x179,
MLXSW_TRAP_ID_DISCARD_ROUTER_LPM4 = 0x17B,
MLXSW_TRAP_ID_DISCARD_ROUTER_LPM6 = 0x17C,
+ MLXSW_TRAP_ID_DISCARD_DEC_PKT = 0x188,
+ MLXSW_TRAP_ID_DISCARD_OVERLAY_SMAC_MC = 0x190,
MLXSW_TRAP_ID_DISCARD_IPV6_MC_DIP_RESERVED_SCOPE = 0x1B0,
MLXSW_TRAP_ID_DISCARD_IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE = 0x1B1,
MLXSW_TRAP_ID_ACL0 = 0x1C0,
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index da329ca115cc..f3f6dfe3eddc 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -1103,7 +1103,7 @@ static void ks8842_tx_timeout_work(struct work_struct *work)
__ks8842_start_new_rx_dma(netdev);
}
-static void ks8842_tx_timeout(struct net_device *netdev)
+static void ks8842_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct ks8842_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index e102e1560ac7..d1444ba36e10 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4896,7 +4896,7 @@ unlock:
* triggered to free up resources so that the transmit routine can continue
* sending out packets. The hardware is reset to correct the problem.
*/
-static void netdev_tx_timeout(struct net_device *dev)
+static void netdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
static unsigned long last_reset;
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index 0567e4f387a5..09cdc2f2e7ff 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -1325,7 +1325,7 @@ static irqreturn_t enc28j60_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void enc28j60_tx_timeout(struct net_device *ndev)
+static void enc28j60_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct enc28j60_net *priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index 52c41d11f565..39925e4bf2ec 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -892,7 +892,7 @@ static netdev_tx_t encx24j600_tx(struct sk_buff *skb, struct net_device *dev)
}
/* Deal with a transmit timeout */
-static void encx24j600_tx_timeout(struct net_device *dev)
+static void encx24j600_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct encx24j600_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index afe52463dc57..9399f6a98748 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -1265,6 +1265,9 @@ int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
lan743x_ptp_set_sync_ts_insert(adapter, true);
break;
+ case HWTSTAMP_TX_ONESTEP_P2P:
+ ret = -ERANGE;
+ break;
default:
netif_warn(adapter, drv, adapter->netdev,
" tx_type = %d, UNKNOWN\n", config.tx_type);
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 985b46d7e3d1..86d543ab1ab9 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -500,13 +500,14 @@ EXPORT_SYMBOL(ocelot_port_enable);
static int ocelot_port_open(struct net_device *dev)
{
struct ocelot_port_private *priv = netdev_priv(dev);
- struct ocelot *ocelot = priv->port.ocelot;
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
int port = priv->chip_port;
int err;
if (priv->serdes) {
err = phy_set_mode_ext(priv->serdes, PHY_MODE_ETHERNET,
- priv->phy_mode);
+ ocelot_port->phy_mode);
if (err) {
netdev_err(dev, "Could not set mode of SerDes\n");
return err;
@@ -514,7 +515,7 @@ static int ocelot_port_open(struct net_device *dev)
}
err = phy_connect_direct(dev, priv->phy, &ocelot_port_adjust_link,
- priv->phy_mode);
+ ocelot_port->phy_mode);
if (err) {
netdev_err(dev, "Could not attach to PHY\n");
return err;
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index c259114c48fd..04372ba72fec 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -18,11 +18,11 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/regmap.h>
+#include <soc/mscc/ocelot_qsys.h>
#include <soc/mscc/ocelot_sys.h>
+#include <soc/mscc/ocelot_dev.h>
+#include <soc/mscc/ocelot_ana.h>
#include <soc/mscc/ocelot.h>
-#include "ocelot_ana.h"
-#include "ocelot_dev.h"
-#include "ocelot_qsys.h"
#include "ocelot_rew.h"
#include "ocelot_qs.h"
#include "ocelot_tc.h"
@@ -68,7 +68,6 @@ struct ocelot_port_private {
u8 vlan_aware;
- phy_interface_t phy_mode;
struct phy *serdes;
struct ocelot_port_tc tc;
diff --git a/drivers/net/ethernet/mscc/ocelot_ana.h b/drivers/net/ethernet/mscc/ocelot_ana.h
deleted file mode 100644
index 841c6ec22b64..000000000000
--- a/drivers/net/ethernet/mscc/ocelot_ana.h
+++ /dev/null
@@ -1,625 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
-/*
- * Microsemi Ocelot Switch driver
- *
- * Copyright (c) 2017 Microsemi Corporation
- */
-
-#ifndef _MSCC_OCELOT_ANA_H_
-#define _MSCC_OCELOT_ANA_H_
-
-#define ANA_ANAGEFIL_B_DOM_EN BIT(22)
-#define ANA_ANAGEFIL_B_DOM_VAL BIT(21)
-#define ANA_ANAGEFIL_AGE_LOCKED BIT(20)
-#define ANA_ANAGEFIL_PID_EN BIT(19)
-#define ANA_ANAGEFIL_PID_VAL(x) (((x) << 14) & GENMASK(18, 14))
-#define ANA_ANAGEFIL_PID_VAL_M GENMASK(18, 14)
-#define ANA_ANAGEFIL_PID_VAL_X(x) (((x) & GENMASK(18, 14)) >> 14)
-#define ANA_ANAGEFIL_VID_EN BIT(13)
-#define ANA_ANAGEFIL_VID_VAL(x) ((x) & GENMASK(12, 0))
-#define ANA_ANAGEFIL_VID_VAL_M GENMASK(12, 0)
-
-#define ANA_STORMLIMIT_CFG_RSZ 0x4
-
-#define ANA_STORMLIMIT_CFG_STORM_RATE(x) (((x) << 3) & GENMASK(6, 3))
-#define ANA_STORMLIMIT_CFG_STORM_RATE_M GENMASK(6, 3)
-#define ANA_STORMLIMIT_CFG_STORM_RATE_X(x) (((x) & GENMASK(6, 3)) >> 3)
-#define ANA_STORMLIMIT_CFG_STORM_UNIT BIT(2)
-#define ANA_STORMLIMIT_CFG_STORM_MODE(x) ((x) & GENMASK(1, 0))
-#define ANA_STORMLIMIT_CFG_STORM_MODE_M GENMASK(1, 0)
-
-#define ANA_AUTOAGE_AGE_FAST BIT(21)
-#define ANA_AUTOAGE_AGE_PERIOD(x) (((x) << 1) & GENMASK(20, 1))
-#define ANA_AUTOAGE_AGE_PERIOD_M GENMASK(20, 1)
-#define ANA_AUTOAGE_AGE_PERIOD_X(x) (((x) & GENMASK(20, 1)) >> 1)
-#define ANA_AUTOAGE_AUTOAGE_LOCKED BIT(0)
-
-#define ANA_MACTOPTIONS_REDUCED_TABLE BIT(1)
-#define ANA_MACTOPTIONS_SHADOW BIT(0)
-
-#define ANA_AGENCTRL_FID_MASK(x) (((x) << 12) & GENMASK(23, 12))
-#define ANA_AGENCTRL_FID_MASK_M GENMASK(23, 12)
-#define ANA_AGENCTRL_FID_MASK_X(x) (((x) & GENMASK(23, 12)) >> 12)
-#define ANA_AGENCTRL_IGNORE_DMAC_FLAGS BIT(11)
-#define ANA_AGENCTRL_IGNORE_SMAC_FLAGS BIT(10)
-#define ANA_AGENCTRL_FLOOD_SPECIAL BIT(9)
-#define ANA_AGENCTRL_FLOOD_IGNORE_VLAN BIT(8)
-#define ANA_AGENCTRL_MIRROR_CPU BIT(7)
-#define ANA_AGENCTRL_LEARN_CPU_COPY BIT(6)
-#define ANA_AGENCTRL_LEARN_FWD_KILL BIT(5)
-#define ANA_AGENCTRL_LEARN_IGNORE_VLAN BIT(4)
-#define ANA_AGENCTRL_CPU_CPU_KILL_ENA BIT(3)
-#define ANA_AGENCTRL_GREEN_COUNT_MODE BIT(2)
-#define ANA_AGENCTRL_YELLOW_COUNT_MODE BIT(1)
-#define ANA_AGENCTRL_RED_COUNT_MODE BIT(0)
-
-#define ANA_FLOODING_RSZ 0x4
-
-#define ANA_FLOODING_FLD_UNICAST(x) (((x) << 12) & GENMASK(17, 12))
-#define ANA_FLOODING_FLD_UNICAST_M GENMASK(17, 12)
-#define ANA_FLOODING_FLD_UNICAST_X(x) (((x) & GENMASK(17, 12)) >> 12)
-#define ANA_FLOODING_FLD_BROADCAST(x) (((x) << 6) & GENMASK(11, 6))
-#define ANA_FLOODING_FLD_BROADCAST_M GENMASK(11, 6)
-#define ANA_FLOODING_FLD_BROADCAST_X(x) (((x) & GENMASK(11, 6)) >> 6)
-#define ANA_FLOODING_FLD_MULTICAST(x) ((x) & GENMASK(5, 0))
-#define ANA_FLOODING_FLD_MULTICAST_M GENMASK(5, 0)
-
-#define ANA_FLOODING_IPMC_FLD_MC4_CTRL(x) (((x) << 18) & GENMASK(23, 18))
-#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_M GENMASK(23, 18)
-#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_X(x) (((x) & GENMASK(23, 18)) >> 18)
-#define ANA_FLOODING_IPMC_FLD_MC4_DATA(x) (((x) << 12) & GENMASK(17, 12))
-#define ANA_FLOODING_IPMC_FLD_MC4_DATA_M GENMASK(17, 12)
-#define ANA_FLOODING_IPMC_FLD_MC4_DATA_X(x) (((x) & GENMASK(17, 12)) >> 12)
-#define ANA_FLOODING_IPMC_FLD_MC6_CTRL(x) (((x) << 6) & GENMASK(11, 6))
-#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_M GENMASK(11, 6)
-#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_X(x) (((x) & GENMASK(11, 6)) >> 6)
-#define ANA_FLOODING_IPMC_FLD_MC6_DATA(x) ((x) & GENMASK(5, 0))
-#define ANA_FLOODING_IPMC_FLD_MC6_DATA_M GENMASK(5, 0)
-
-#define ANA_SFLOW_CFG_RSZ 0x4
-
-#define ANA_SFLOW_CFG_SF_RATE(x) (((x) << 2) & GENMASK(13, 2))
-#define ANA_SFLOW_CFG_SF_RATE_M GENMASK(13, 2)
-#define ANA_SFLOW_CFG_SF_RATE_X(x) (((x) & GENMASK(13, 2)) >> 2)
-#define ANA_SFLOW_CFG_SF_SAMPLE_RX BIT(1)
-#define ANA_SFLOW_CFG_SF_SAMPLE_TX BIT(0)
-
-#define ANA_PORT_MODE_RSZ 0x4
-
-#define ANA_PORT_MODE_REDTAG_PARSE_CFG BIT(3)
-#define ANA_PORT_MODE_VLAN_PARSE_CFG(x) (((x) << 1) & GENMASK(2, 1))
-#define ANA_PORT_MODE_VLAN_PARSE_CFG_M GENMASK(2, 1)
-#define ANA_PORT_MODE_VLAN_PARSE_CFG_X(x) (((x) & GENMASK(2, 1)) >> 1)
-#define ANA_PORT_MODE_L3_PARSE_CFG BIT(0)
-
-#define ANA_CUT_THRU_CFG_RSZ 0x4
-
-#define ANA_PGID_PGID_RSZ 0x4
-
-#define ANA_PGID_PGID_PGID(x) ((x) & GENMASK(11, 0))
-#define ANA_PGID_PGID_PGID_M GENMASK(11, 0)
-#define ANA_PGID_PGID_CPUQ_DST_PGID(x) (((x) << 27) & GENMASK(29, 27))
-#define ANA_PGID_PGID_CPUQ_DST_PGID_M GENMASK(29, 27)
-#define ANA_PGID_PGID_CPUQ_DST_PGID_X(x) (((x) & GENMASK(29, 27)) >> 27)
-
-#define ANA_TABLES_MACHDATA_VID(x) (((x) << 16) & GENMASK(28, 16))
-#define ANA_TABLES_MACHDATA_VID_M GENMASK(28, 16)
-#define ANA_TABLES_MACHDATA_VID_X(x) (((x) & GENMASK(28, 16)) >> 16)
-#define ANA_TABLES_MACHDATA_MACHDATA(x) ((x) & GENMASK(15, 0))
-#define ANA_TABLES_MACHDATA_MACHDATA_M GENMASK(15, 0)
-
-#define ANA_TABLES_STREAMDATA_SSID_VALID BIT(16)
-#define ANA_TABLES_STREAMDATA_SSID(x) (((x) << 9) & GENMASK(15, 9))
-#define ANA_TABLES_STREAMDATA_SSID_M GENMASK(15, 9)
-#define ANA_TABLES_STREAMDATA_SSID_X(x) (((x) & GENMASK(15, 9)) >> 9)
-#define ANA_TABLES_STREAMDATA_SFID_VALID BIT(8)
-#define ANA_TABLES_STREAMDATA_SFID(x) ((x) & GENMASK(7, 0))
-#define ANA_TABLES_STREAMDATA_SFID_M GENMASK(7, 0)
-
-#define ANA_TABLES_MACACCESS_MAC_CPU_COPY BIT(15)
-#define ANA_TABLES_MACACCESS_SRC_KILL BIT(14)
-#define ANA_TABLES_MACACCESS_IGNORE_VLAN BIT(13)
-#define ANA_TABLES_MACACCESS_AGED_FLAG BIT(12)
-#define ANA_TABLES_MACACCESS_VALID BIT(11)
-#define ANA_TABLES_MACACCESS_ENTRYTYPE(x) (((x) << 9) & GENMASK(10, 9))
-#define ANA_TABLES_MACACCESS_ENTRYTYPE_M GENMASK(10, 9)
-#define ANA_TABLES_MACACCESS_ENTRYTYPE_X(x) (((x) & GENMASK(10, 9)) >> 9)
-#define ANA_TABLES_MACACCESS_DEST_IDX(x) (((x) << 3) & GENMASK(8, 3))
-#define ANA_TABLES_MACACCESS_DEST_IDX_M GENMASK(8, 3)
-#define ANA_TABLES_MACACCESS_DEST_IDX_X(x) (((x) & GENMASK(8, 3)) >> 3)
-#define ANA_TABLES_MACACCESS_MAC_TABLE_CMD(x) ((x) & GENMASK(2, 0))
-#define ANA_TABLES_MACACCESS_MAC_TABLE_CMD_M GENMASK(2, 0)
-#define MACACCESS_CMD_IDLE 0
-#define MACACCESS_CMD_LEARN 1
-#define MACACCESS_CMD_FORGET 2
-#define MACACCESS_CMD_AGE 3
-#define MACACCESS_CMD_GET_NEXT 4
-#define MACACCESS_CMD_INIT 5
-#define MACACCESS_CMD_READ 6
-#define MACACCESS_CMD_WRITE 7
-
-#define ANA_TABLES_VLANACCESS_VLAN_PORT_MASK(x) (((x) << 2) & GENMASK(13, 2))
-#define ANA_TABLES_VLANACCESS_VLAN_PORT_MASK_M GENMASK(13, 2)
-#define ANA_TABLES_VLANACCESS_VLAN_PORT_MASK_X(x) (((x) & GENMASK(13, 2)) >> 2)
-#define ANA_TABLES_VLANACCESS_VLAN_TBL_CMD(x) ((x) & GENMASK(1, 0))
-#define ANA_TABLES_VLANACCESS_VLAN_TBL_CMD_M GENMASK(1, 0)
-#define ANA_TABLES_VLANACCESS_CMD_IDLE 0x0
-#define ANA_TABLES_VLANACCESS_CMD_WRITE 0x2
-#define ANA_TABLES_VLANACCESS_CMD_INIT 0x3
-
-#define ANA_TABLES_VLANTIDX_VLAN_SEC_FWD_ENA BIT(17)
-#define ANA_TABLES_VLANTIDX_VLAN_FLOOD_DIS BIT(16)
-#define ANA_TABLES_VLANTIDX_VLAN_PRIV_VLAN BIT(15)
-#define ANA_TABLES_VLANTIDX_VLAN_LEARN_DISABLED BIT(14)
-#define ANA_TABLES_VLANTIDX_VLAN_MIRROR BIT(13)
-#define ANA_TABLES_VLANTIDX_VLAN_SRC_CHK BIT(12)
-#define ANA_TABLES_VLANTIDX_V_INDEX(x) ((x) & GENMASK(11, 0))
-#define ANA_TABLES_VLANTIDX_V_INDEX_M GENMASK(11, 0)
-
-#define ANA_TABLES_ISDXACCESS_ISDX_PORT_MASK(x) (((x) << 2) & GENMASK(8, 2))
-#define ANA_TABLES_ISDXACCESS_ISDX_PORT_MASK_M GENMASK(8, 2)
-#define ANA_TABLES_ISDXACCESS_ISDX_PORT_MASK_X(x) (((x) & GENMASK(8, 2)) >> 2)
-#define ANA_TABLES_ISDXACCESS_ISDX_TBL_CMD(x) ((x) & GENMASK(1, 0))
-#define ANA_TABLES_ISDXACCESS_ISDX_TBL_CMD_M GENMASK(1, 0)
-
-#define ANA_TABLES_ISDXTIDX_ISDX_SDLBI(x) (((x) << 21) & GENMASK(28, 21))
-#define ANA_TABLES_ISDXTIDX_ISDX_SDLBI_M GENMASK(28, 21)
-#define ANA_TABLES_ISDXTIDX_ISDX_SDLBI_X(x) (((x) & GENMASK(28, 21)) >> 21)
-#define ANA_TABLES_ISDXTIDX_ISDX_MSTI(x) (((x) << 15) & GENMASK(20, 15))
-#define ANA_TABLES_ISDXTIDX_ISDX_MSTI_M GENMASK(20, 15)
-#define ANA_TABLES_ISDXTIDX_ISDX_MSTI_X(x) (((x) & GENMASK(20, 15)) >> 15)
-#define ANA_TABLES_ISDXTIDX_ISDX_ES0_KEY_ENA BIT(14)
-#define ANA_TABLES_ISDXTIDX_ISDX_FORCE_ENA BIT(10)
-#define ANA_TABLES_ISDXTIDX_ISDX_INDEX(x) ((x) & GENMASK(7, 0))
-#define ANA_TABLES_ISDXTIDX_ISDX_INDEX_M GENMASK(7, 0)
-
-#define ANA_TABLES_ENTRYLIM_RSZ 0x4
-
-#define ANA_TABLES_ENTRYLIM_ENTRYLIM(x) (((x) << 14) & GENMASK(17, 14))
-#define ANA_TABLES_ENTRYLIM_ENTRYLIM_M GENMASK(17, 14)
-#define ANA_TABLES_ENTRYLIM_ENTRYLIM_X(x) (((x) & GENMASK(17, 14)) >> 14)
-#define ANA_TABLES_ENTRYLIM_ENTRYSTAT(x) ((x) & GENMASK(13, 0))
-#define ANA_TABLES_ENTRYLIM_ENTRYSTAT_M GENMASK(13, 0)
-
-#define ANA_TABLES_STREAMACCESS_GEN_REC_SEQ_NUM(x) (((x) << 4) & GENMASK(31, 4))
-#define ANA_TABLES_STREAMACCESS_GEN_REC_SEQ_NUM_M GENMASK(31, 4)
-#define ANA_TABLES_STREAMACCESS_GEN_REC_SEQ_NUM_X(x) (((x) & GENMASK(31, 4)) >> 4)
-#define ANA_TABLES_STREAMACCESS_SEQ_GEN_REC_ENA BIT(3)
-#define ANA_TABLES_STREAMACCESS_GEN_REC_TYPE BIT(2)
-#define ANA_TABLES_STREAMACCESS_STREAM_TBL_CMD(x) ((x) & GENMASK(1, 0))
-#define ANA_TABLES_STREAMACCESS_STREAM_TBL_CMD_M GENMASK(1, 0)
-
-#define ANA_TABLES_STREAMTIDX_SEQ_GEN_ERR_STATUS(x) (((x) << 30) & GENMASK(31, 30))
-#define ANA_TABLES_STREAMTIDX_SEQ_GEN_ERR_STATUS_M GENMASK(31, 30)
-#define ANA_TABLES_STREAMTIDX_SEQ_GEN_ERR_STATUS_X(x) (((x) & GENMASK(31, 30)) >> 30)
-#define ANA_TABLES_STREAMTIDX_S_INDEX(x) (((x) << 16) & GENMASK(22, 16))
-#define ANA_TABLES_STREAMTIDX_S_INDEX_M GENMASK(22, 16)
-#define ANA_TABLES_STREAMTIDX_S_INDEX_X(x) (((x) & GENMASK(22, 16)) >> 16)
-#define ANA_TABLES_STREAMTIDX_FORCE_SF_BEHAVIOUR BIT(14)
-#define ANA_TABLES_STREAMTIDX_SEQ_HISTORY_LEN(x) (((x) << 8) & GENMASK(13, 8))
-#define ANA_TABLES_STREAMTIDX_SEQ_HISTORY_LEN_M GENMASK(13, 8)
-#define ANA_TABLES_STREAMTIDX_SEQ_HISTORY_LEN_X(x) (((x) & GENMASK(13, 8)) >> 8)
-#define ANA_TABLES_STREAMTIDX_RESET_ON_ROGUE BIT(7)
-#define ANA_TABLES_STREAMTIDX_REDTAG_POP BIT(6)
-#define ANA_TABLES_STREAMTIDX_STREAM_SPLIT BIT(5)
-#define ANA_TABLES_STREAMTIDX_SEQ_SPACE_LOG2(x) ((x) & GENMASK(4, 0))
-#define ANA_TABLES_STREAMTIDX_SEQ_SPACE_LOG2_M GENMASK(4, 0)
-
-#define ANA_TABLES_SEQ_MASK_SPLIT_MASK(x) (((x) << 16) & GENMASK(22, 16))
-#define ANA_TABLES_SEQ_MASK_SPLIT_MASK_M GENMASK(22, 16)
-#define ANA_TABLES_SEQ_MASK_SPLIT_MASK_X(x) (((x) & GENMASK(22, 16)) >> 16)
-#define ANA_TABLES_SEQ_MASK_INPUT_PORT_MASK(x) ((x) & GENMASK(6, 0))
-#define ANA_TABLES_SEQ_MASK_INPUT_PORT_MASK_M GENMASK(6, 0)
-
-#define ANA_TABLES_SFID_MASK_IGR_PORT_MASK(x) (((x) << 1) & GENMASK(7, 1))
-#define ANA_TABLES_SFID_MASK_IGR_PORT_MASK_M GENMASK(7, 1)
-#define ANA_TABLES_SFID_MASK_IGR_PORT_MASK_X(x) (((x) & GENMASK(7, 1)) >> 1)
-#define ANA_TABLES_SFID_MASK_IGR_SRCPORT_MATCH_ENA BIT(0)
-
-#define ANA_TABLES_SFIDACCESS_IGR_PRIO_MATCH_ENA BIT(22)
-#define ANA_TABLES_SFIDACCESS_IGR_PRIO(x) (((x) << 19) & GENMASK(21, 19))
-#define ANA_TABLES_SFIDACCESS_IGR_PRIO_M GENMASK(21, 19)
-#define ANA_TABLES_SFIDACCESS_IGR_PRIO_X(x) (((x) & GENMASK(21, 19)) >> 19)
-#define ANA_TABLES_SFIDACCESS_FORCE_BLOCK BIT(18)
-#define ANA_TABLES_SFIDACCESS_MAX_SDU_LEN(x) (((x) << 2) & GENMASK(17, 2))
-#define ANA_TABLES_SFIDACCESS_MAX_SDU_LEN_M GENMASK(17, 2)
-#define ANA_TABLES_SFIDACCESS_MAX_SDU_LEN_X(x) (((x) & GENMASK(17, 2)) >> 2)
-#define ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(x) ((x) & GENMASK(1, 0))
-#define ANA_TABLES_SFIDACCESS_SFID_TBL_CMD_M GENMASK(1, 0)
-
-#define ANA_TABLES_SFIDTIDX_SGID_VALID BIT(26)
-#define ANA_TABLES_SFIDTIDX_SGID(x) (((x) << 18) & GENMASK(25, 18))
-#define ANA_TABLES_SFIDTIDX_SGID_M GENMASK(25, 18)
-#define ANA_TABLES_SFIDTIDX_SGID_X(x) (((x) & GENMASK(25, 18)) >> 18)
-#define ANA_TABLES_SFIDTIDX_POL_ENA BIT(17)
-#define ANA_TABLES_SFIDTIDX_POL_IDX(x) (((x) << 8) & GENMASK(16, 8))
-#define ANA_TABLES_SFIDTIDX_POL_IDX_M GENMASK(16, 8)
-#define ANA_TABLES_SFIDTIDX_POL_IDX_X(x) (((x) & GENMASK(16, 8)) >> 8)
-#define ANA_TABLES_SFIDTIDX_SFID_INDEX(x) ((x) & GENMASK(7, 0))
-#define ANA_TABLES_SFIDTIDX_SFID_INDEX_M GENMASK(7, 0)
-
-#define ANA_MSTI_STATE_RSZ 0x4
-
-#define ANA_OAM_UPM_LM_CNT_RSZ 0x4
-
-#define ANA_SG_ACCESS_CTRL_SGID(x) ((x) & GENMASK(7, 0))
-#define ANA_SG_ACCESS_CTRL_SGID_M GENMASK(7, 0)
-#define ANA_SG_ACCESS_CTRL_CONFIG_CHANGE BIT(28)
-
-#define ANA_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0))
-#define ANA_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB_M GENMASK(15, 0)
-#define ANA_SG_CONFIG_REG_3_LIST_LENGTH(x) (((x) << 16) & GENMASK(18, 16))
-#define ANA_SG_CONFIG_REG_3_LIST_LENGTH_M GENMASK(18, 16)
-#define ANA_SG_CONFIG_REG_3_LIST_LENGTH_X(x) (((x) & GENMASK(18, 16)) >> 16)
-#define ANA_SG_CONFIG_REG_3_GATE_ENABLE BIT(20)
-#define ANA_SG_CONFIG_REG_3_INIT_IPS(x) (((x) << 24) & GENMASK(27, 24))
-#define ANA_SG_CONFIG_REG_3_INIT_IPS_M GENMASK(27, 24)
-#define ANA_SG_CONFIG_REG_3_INIT_IPS_X(x) (((x) & GENMASK(27, 24)) >> 24)
-#define ANA_SG_CONFIG_REG_3_INIT_GATE_STATE BIT(28)
-
-#define ANA_SG_GCL_GS_CONFIG_RSZ 0x4
-
-#define ANA_SG_GCL_GS_CONFIG_IPS(x) ((x) & GENMASK(3, 0))
-#define ANA_SG_GCL_GS_CONFIG_IPS_M GENMASK(3, 0)
-#define ANA_SG_GCL_GS_CONFIG_GATE_STATE BIT(4)
-
-#define ANA_SG_GCL_TI_CONFIG_RSZ 0x4
-
-#define ANA_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0))
-#define ANA_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB_M GENMASK(15, 0)
-#define ANA_SG_STATUS_REG_3_GATE_STATE BIT(16)
-#define ANA_SG_STATUS_REG_3_IPS(x) (((x) << 20) & GENMASK(23, 20))
-#define ANA_SG_STATUS_REG_3_IPS_M GENMASK(23, 20)
-#define ANA_SG_STATUS_REG_3_IPS_X(x) (((x) & GENMASK(23, 20)) >> 20)
-#define ANA_SG_STATUS_REG_3_CONFIG_PENDING BIT(24)
-
-#define ANA_PORT_VLAN_CFG_GSZ 0x100
-
-#define ANA_PORT_VLAN_CFG_VLAN_VID_AS_ISDX BIT(21)
-#define ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA BIT(20)
-#define ANA_PORT_VLAN_CFG_VLAN_POP_CNT(x) (((x) << 18) & GENMASK(19, 18))
-#define ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M GENMASK(19, 18)
-#define ANA_PORT_VLAN_CFG_VLAN_POP_CNT_X(x) (((x) & GENMASK(19, 18)) >> 18)
-#define ANA_PORT_VLAN_CFG_VLAN_INNER_TAG_ENA BIT(17)
-#define ANA_PORT_VLAN_CFG_VLAN_TAG_TYPE BIT(16)
-#define ANA_PORT_VLAN_CFG_VLAN_DEI BIT(15)
-#define ANA_PORT_VLAN_CFG_VLAN_PCP(x) (((x) << 12) & GENMASK(14, 12))
-#define ANA_PORT_VLAN_CFG_VLAN_PCP_M GENMASK(14, 12)
-#define ANA_PORT_VLAN_CFG_VLAN_PCP_X(x) (((x) & GENMASK(14, 12)) >> 12)
-#define ANA_PORT_VLAN_CFG_VLAN_VID(x) ((x) & GENMASK(11, 0))
-#define ANA_PORT_VLAN_CFG_VLAN_VID_M GENMASK(11, 0)
-
-#define ANA_PORT_DROP_CFG_GSZ 0x100
-
-#define ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA BIT(6)
-#define ANA_PORT_DROP_CFG_DROP_S_TAGGED_ENA BIT(5)
-#define ANA_PORT_DROP_CFG_DROP_C_TAGGED_ENA BIT(4)
-#define ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA BIT(3)
-#define ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA BIT(2)
-#define ANA_PORT_DROP_CFG_DROP_NULL_MAC_ENA BIT(1)
-#define ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA BIT(0)
-
-#define ANA_PORT_QOS_CFG_GSZ 0x100
-
-#define ANA_PORT_QOS_CFG_DP_DEFAULT_VAL BIT(8)
-#define ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL(x) (((x) << 5) & GENMASK(7, 5))
-#define ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_M GENMASK(7, 5)
-#define ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_X(x) (((x) & GENMASK(7, 5)) >> 5)
-#define ANA_PORT_QOS_CFG_QOS_DSCP_ENA BIT(4)
-#define ANA_PORT_QOS_CFG_QOS_PCP_ENA BIT(3)
-#define ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA BIT(2)
-#define ANA_PORT_QOS_CFG_DSCP_REWR_CFG(x) ((x) & GENMASK(1, 0))
-#define ANA_PORT_QOS_CFG_DSCP_REWR_CFG_M GENMASK(1, 0)
-
-#define ANA_PORT_VCAP_CFG_GSZ 0x100
-
-#define ANA_PORT_VCAP_CFG_S1_ENA BIT(14)
-#define ANA_PORT_VCAP_CFG_S1_DMAC_DIP_ENA(x) (((x) << 11) & GENMASK(13, 11))
-#define ANA_PORT_VCAP_CFG_S1_DMAC_DIP_ENA_M GENMASK(13, 11)
-#define ANA_PORT_VCAP_CFG_S1_DMAC_DIP_ENA_X(x) (((x) & GENMASK(13, 11)) >> 11)
-#define ANA_PORT_VCAP_CFG_S1_VLAN_INNER_TAG_ENA(x) (((x) << 8) & GENMASK(10, 8))
-#define ANA_PORT_VCAP_CFG_S1_VLAN_INNER_TAG_ENA_M GENMASK(10, 8)
-#define ANA_PORT_VCAP_CFG_S1_VLAN_INNER_TAG_ENA_X(x) (((x) & GENMASK(10, 8)) >> 8)
-#define ANA_PORT_VCAP_CFG_PAG_VAL(x) ((x) & GENMASK(7, 0))
-#define ANA_PORT_VCAP_CFG_PAG_VAL_M GENMASK(7, 0)
-
-#define ANA_PORT_VCAP_S1_KEY_CFG_GSZ 0x100
-#define ANA_PORT_VCAP_S1_KEY_CFG_RSZ 0x4
-
-#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP6_CFG(x) (((x) << 4) & GENMASK(6, 4))
-#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP6_CFG_M GENMASK(6, 4)
-#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP6_CFG_X(x) (((x) & GENMASK(6, 4)) >> 4)
-#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP4_CFG(x) (((x) << 2) & GENMASK(3, 2))
-#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP4_CFG_M GENMASK(3, 2)
-#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP4_CFG_X(x) (((x) & GENMASK(3, 2)) >> 2)
-#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_OTHER_CFG(x) ((x) & GENMASK(1, 0))
-#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_OTHER_CFG_M GENMASK(1, 0)
-
-#define ANA_PORT_VCAP_S2_CFG_GSZ 0x100
-
-#define ANA_PORT_VCAP_S2_CFG_S2_UDP_PAYLOAD_ENA(x) (((x) << 17) & GENMASK(18, 17))
-#define ANA_PORT_VCAP_S2_CFG_S2_UDP_PAYLOAD_ENA_M GENMASK(18, 17)
-#define ANA_PORT_VCAP_S2_CFG_S2_UDP_PAYLOAD_ENA_X(x) (((x) & GENMASK(18, 17)) >> 17)
-#define ANA_PORT_VCAP_S2_CFG_S2_ETYPE_PAYLOAD_ENA(x) (((x) << 15) & GENMASK(16, 15))
-#define ANA_PORT_VCAP_S2_CFG_S2_ETYPE_PAYLOAD_ENA_M GENMASK(16, 15)
-#define ANA_PORT_VCAP_S2_CFG_S2_ETYPE_PAYLOAD_ENA_X(x) (((x) & GENMASK(16, 15)) >> 15)
-#define ANA_PORT_VCAP_S2_CFG_S2_ENA BIT(14)
-#define ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS(x) (((x) << 12) & GENMASK(13, 12))
-#define ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS_M GENMASK(13, 12)
-#define ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS_X(x) (((x) & GENMASK(13, 12)) >> 12)
-#define ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS(x) (((x) << 10) & GENMASK(11, 10))
-#define ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS_M GENMASK(11, 10)
-#define ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS_X(x) (((x) & GENMASK(11, 10)) >> 10)
-#define ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS(x) (((x) << 8) & GENMASK(9, 8))
-#define ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS_M GENMASK(9, 8)
-#define ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS_X(x) (((x) & GENMASK(9, 8)) >> 8)
-#define ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS(x) (((x) << 6) & GENMASK(7, 6))
-#define ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS_M GENMASK(7, 6)
-#define ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS_X(x) (((x) & GENMASK(7, 6)) >> 6)
-#define ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG(x) (((x) << 2) & GENMASK(5, 2))
-#define ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG_M GENMASK(5, 2)
-#define ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG_X(x) (((x) & GENMASK(5, 2)) >> 2)
-#define ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS(x) ((x) & GENMASK(1, 0))
-#define ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS_M GENMASK(1, 0)
-
-#define ANA_PORT_PCP_DEI_MAP_GSZ 0x100
-#define ANA_PORT_PCP_DEI_MAP_RSZ 0x4
-
-#define ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL BIT(3)
-#define ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL(x) ((x) & GENMASK(2, 0))
-#define ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL_M GENMASK(2, 0)
-
-#define ANA_PORT_CPU_FWD_CFG_GSZ 0x100
-
-#define ANA_PORT_CPU_FWD_CFG_CPU_VRAP_REDIR_ENA BIT(7)
-#define ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA BIT(6)
-#define ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA BIT(5)
-#define ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA BIT(4)
-#define ANA_PORT_CPU_FWD_CFG_CPU_SRC_COPY_ENA BIT(3)
-#define ANA_PORT_CPU_FWD_CFG_CPU_ALLBRIDGE_DROP_ENA BIT(2)
-#define ANA_PORT_CPU_FWD_CFG_CPU_ALLBRIDGE_REDIR_ENA BIT(1)
-#define ANA_PORT_CPU_FWD_CFG_CPU_OAM_ENA BIT(0)
-
-#define ANA_PORT_CPU_FWD_BPDU_CFG_GSZ 0x100
-
-#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_DROP_ENA(x) (((x) << 16) & GENMASK(31, 16))
-#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_DROP_ENA_M GENMASK(31, 16)
-#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_DROP_ENA_X(x) (((x) & GENMASK(31, 16)) >> 16)
-#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(x) ((x) & GENMASK(15, 0))
-#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA_M GENMASK(15, 0)
-
-#define ANA_PORT_CPU_FWD_GARP_CFG_GSZ 0x100
-
-#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_DROP_ENA(x) (((x) << 16) & GENMASK(31, 16))
-#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_DROP_ENA_M GENMASK(31, 16)
-#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_DROP_ENA_X(x) (((x) & GENMASK(31, 16)) >> 16)
-#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_REDIR_ENA(x) ((x) & GENMASK(15, 0))
-#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_REDIR_ENA_M GENMASK(15, 0)
-
-#define ANA_PORT_CPU_FWD_CCM_CFG_GSZ 0x100
-
-#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_DROP_ENA(x) (((x) << 16) & GENMASK(31, 16))
-#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_DROP_ENA_M GENMASK(31, 16)
-#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_DROP_ENA_X(x) (((x) & GENMASK(31, 16)) >> 16)
-#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_REDIR_ENA(x) ((x) & GENMASK(15, 0))
-#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_REDIR_ENA_M GENMASK(15, 0)
-
-#define ANA_PORT_PORT_CFG_GSZ 0x100
-
-#define ANA_PORT_PORT_CFG_SRC_MIRROR_ENA BIT(15)
-#define ANA_PORT_PORT_CFG_LIMIT_DROP BIT(14)
-#define ANA_PORT_PORT_CFG_LIMIT_CPU BIT(13)
-#define ANA_PORT_PORT_CFG_LOCKED_PORTMOVE_DROP BIT(12)
-#define ANA_PORT_PORT_CFG_LOCKED_PORTMOVE_CPU BIT(11)
-#define ANA_PORT_PORT_CFG_LEARNDROP BIT(10)
-#define ANA_PORT_PORT_CFG_LEARNCPU BIT(9)
-#define ANA_PORT_PORT_CFG_LEARNAUTO BIT(8)
-#define ANA_PORT_PORT_CFG_LEARN_ENA BIT(7)
-#define ANA_PORT_PORT_CFG_RECV_ENA BIT(6)
-#define ANA_PORT_PORT_CFG_PORTID_VAL(x) (((x) << 2) & GENMASK(5, 2))
-#define ANA_PORT_PORT_CFG_PORTID_VAL_M GENMASK(5, 2)
-#define ANA_PORT_PORT_CFG_PORTID_VAL_X(x) (((x) & GENMASK(5, 2)) >> 2)
-#define ANA_PORT_PORT_CFG_USE_B_DOM_TBL BIT(1)
-#define ANA_PORT_PORT_CFG_LSR_MODE BIT(0)
-
-#define ANA_PORT_POL_CFG_GSZ 0x100
-
-#define ANA_PORT_POL_CFG_POL_CPU_REDIR_8021 BIT(19)
-#define ANA_PORT_POL_CFG_POL_CPU_REDIR_IP BIT(18)
-#define ANA_PORT_POL_CFG_PORT_POL_ENA BIT(17)
-#define ANA_PORT_POL_CFG_QUEUE_POL_ENA(x) (((x) << 9) & GENMASK(16, 9))
-#define ANA_PORT_POL_CFG_QUEUE_POL_ENA_M GENMASK(16, 9)
-#define ANA_PORT_POL_CFG_QUEUE_POL_ENA_X(x) (((x) & GENMASK(16, 9)) >> 9)
-#define ANA_PORT_POL_CFG_POL_ORDER(x) ((x) & GENMASK(8, 0))
-#define ANA_PORT_POL_CFG_POL_ORDER_M GENMASK(8, 0)
-
-#define ANA_PORT_PTP_CFG_GSZ 0x100
-
-#define ANA_PORT_PTP_CFG_PTP_BACKPLANE_MODE BIT(0)
-
-#define ANA_PORT_PTP_DLY1_CFG_GSZ 0x100
-
-#define ANA_PORT_PTP_DLY2_CFG_GSZ 0x100
-
-#define ANA_PORT_SFID_CFG_GSZ 0x100
-#define ANA_PORT_SFID_CFG_RSZ 0x4
-
-#define ANA_PORT_SFID_CFG_SFID_VALID BIT(8)
-#define ANA_PORT_SFID_CFG_SFID(x) ((x) & GENMASK(7, 0))
-#define ANA_PORT_SFID_CFG_SFID_M GENMASK(7, 0)
-
-#define ANA_PFC_PFC_CFG_GSZ 0x40
-
-#define ANA_PFC_PFC_CFG_RX_PFC_ENA(x) (((x) << 2) & GENMASK(9, 2))
-#define ANA_PFC_PFC_CFG_RX_PFC_ENA_M GENMASK(9, 2)
-#define ANA_PFC_PFC_CFG_RX_PFC_ENA_X(x) (((x) & GENMASK(9, 2)) >> 2)
-#define ANA_PFC_PFC_CFG_FC_LINK_SPEED(x) ((x) & GENMASK(1, 0))
-#define ANA_PFC_PFC_CFG_FC_LINK_SPEED_M GENMASK(1, 0)
-
-#define ANA_PFC_PFC_TIMER_GSZ 0x40
-#define ANA_PFC_PFC_TIMER_RSZ 0x4
-
-#define ANA_IPT_OAM_MEP_CFG_GSZ 0x8
-
-#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_P(x) (((x) << 6) & GENMASK(10, 6))
-#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_P_M GENMASK(10, 6)
-#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_P_X(x) (((x) & GENMASK(10, 6)) >> 6)
-#define ANA_IPT_OAM_MEP_CFG_MEP_IDX(x) (((x) << 1) & GENMASK(5, 1))
-#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_M GENMASK(5, 1)
-#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_X(x) (((x) & GENMASK(5, 1)) >> 1)
-#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_ENA BIT(0)
-
-#define ANA_IPT_IPT_GSZ 0x8
-
-#define ANA_IPT_IPT_IPT_CFG(x) (((x) << 15) & GENMASK(16, 15))
-#define ANA_IPT_IPT_IPT_CFG_M GENMASK(16, 15)
-#define ANA_IPT_IPT_IPT_CFG_X(x) (((x) & GENMASK(16, 15)) >> 15)
-#define ANA_IPT_IPT_ISDX_P(x) (((x) << 7) & GENMASK(14, 7))
-#define ANA_IPT_IPT_ISDX_P_M GENMASK(14, 7)
-#define ANA_IPT_IPT_ISDX_P_X(x) (((x) & GENMASK(14, 7)) >> 7)
-#define ANA_IPT_IPT_PPT_IDX(x) ((x) & GENMASK(6, 0))
-#define ANA_IPT_IPT_PPT_IDX_M GENMASK(6, 0)
-
-#define ANA_PPT_PPT_RSZ 0x4
-
-#define ANA_FID_MAP_FID_MAP_RSZ 0x4
-
-#define ANA_FID_MAP_FID_MAP_FID_C_VAL(x) (((x) << 6) & GENMASK(11, 6))
-#define ANA_FID_MAP_FID_MAP_FID_C_VAL_M GENMASK(11, 6)
-#define ANA_FID_MAP_FID_MAP_FID_C_VAL_X(x) (((x) & GENMASK(11, 6)) >> 6)
-#define ANA_FID_MAP_FID_MAP_FID_B_VAL(x) ((x) & GENMASK(5, 0))
-#define ANA_FID_MAP_FID_MAP_FID_B_VAL_M GENMASK(5, 0)
-
-#define ANA_AGGR_CFG_AC_RND_ENA BIT(7)
-#define ANA_AGGR_CFG_AC_DMAC_ENA BIT(6)
-#define ANA_AGGR_CFG_AC_SMAC_ENA BIT(5)
-#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA BIT(4)
-#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA BIT(3)
-#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA BIT(2)
-#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA BIT(1)
-#define ANA_AGGR_CFG_AC_ISDX_ENA BIT(0)
-
-#define ANA_CPUQ_CFG_CPUQ_MLD(x) (((x) << 27) & GENMASK(29, 27))
-#define ANA_CPUQ_CFG_CPUQ_MLD_M GENMASK(29, 27)
-#define ANA_CPUQ_CFG_CPUQ_MLD_X(x) (((x) & GENMASK(29, 27)) >> 27)
-#define ANA_CPUQ_CFG_CPUQ_IGMP(x) (((x) << 24) & GENMASK(26, 24))
-#define ANA_CPUQ_CFG_CPUQ_IGMP_M GENMASK(26, 24)
-#define ANA_CPUQ_CFG_CPUQ_IGMP_X(x) (((x) & GENMASK(26, 24)) >> 24)
-#define ANA_CPUQ_CFG_CPUQ_IPMC_CTRL(x) (((x) << 21) & GENMASK(23, 21))
-#define ANA_CPUQ_CFG_CPUQ_IPMC_CTRL_M GENMASK(23, 21)
-#define ANA_CPUQ_CFG_CPUQ_IPMC_CTRL_X(x) (((x) & GENMASK(23, 21)) >> 21)
-#define ANA_CPUQ_CFG_CPUQ_ALLBRIDGE(x) (((x) << 18) & GENMASK(20, 18))
-#define ANA_CPUQ_CFG_CPUQ_ALLBRIDGE_M GENMASK(20, 18)
-#define ANA_CPUQ_CFG_CPUQ_ALLBRIDGE_X(x) (((x) & GENMASK(20, 18)) >> 18)
-#define ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE(x) (((x) << 15) & GENMASK(17, 15))
-#define ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE_M GENMASK(17, 15)
-#define ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE_X(x) (((x) & GENMASK(17, 15)) >> 15)
-#define ANA_CPUQ_CFG_CPUQ_SRC_COPY(x) (((x) << 12) & GENMASK(14, 12))
-#define ANA_CPUQ_CFG_CPUQ_SRC_COPY_M GENMASK(14, 12)
-#define ANA_CPUQ_CFG_CPUQ_SRC_COPY_X(x) (((x) & GENMASK(14, 12)) >> 12)
-#define ANA_CPUQ_CFG_CPUQ_MAC_COPY(x) (((x) << 9) & GENMASK(11, 9))
-#define ANA_CPUQ_CFG_CPUQ_MAC_COPY_M GENMASK(11, 9)
-#define ANA_CPUQ_CFG_CPUQ_MAC_COPY_X(x) (((x) & GENMASK(11, 9)) >> 9)
-#define ANA_CPUQ_CFG_CPUQ_LRN(x) (((x) << 6) & GENMASK(8, 6))
-#define ANA_CPUQ_CFG_CPUQ_LRN_M GENMASK(8, 6)
-#define ANA_CPUQ_CFG_CPUQ_LRN_X(x) (((x) & GENMASK(8, 6)) >> 6)
-#define ANA_CPUQ_CFG_CPUQ_MIRROR(x) (((x) << 3) & GENMASK(5, 3))
-#define ANA_CPUQ_CFG_CPUQ_MIRROR_M GENMASK(5, 3)
-#define ANA_CPUQ_CFG_CPUQ_MIRROR_X(x) (((x) & GENMASK(5, 3)) >> 3)
-#define ANA_CPUQ_CFG_CPUQ_SFLOW(x) ((x) & GENMASK(2, 0))
-#define ANA_CPUQ_CFG_CPUQ_SFLOW_M GENMASK(2, 0)
-
-#define ANA_CPUQ_8021_CFG_RSZ 0x4
-
-#define ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(x) (((x) << 6) & GENMASK(8, 6))
-#define ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL_M GENMASK(8, 6)
-#define ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL_X(x) (((x) & GENMASK(8, 6)) >> 6)
-#define ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL(x) (((x) << 3) & GENMASK(5, 3))
-#define ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL_M GENMASK(5, 3)
-#define ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL_X(x) (((x) & GENMASK(5, 3)) >> 3)
-#define ANA_CPUQ_8021_CFG_CPUQ_CCM_VAL(x) ((x) & GENMASK(2, 0))
-#define ANA_CPUQ_8021_CFG_CPUQ_CCM_VAL_M GENMASK(2, 0)
-
-#define ANA_DSCP_CFG_RSZ 0x4
-
-#define ANA_DSCP_CFG_DP_DSCP_VAL BIT(11)
-#define ANA_DSCP_CFG_QOS_DSCP_VAL(x) (((x) << 8) & GENMASK(10, 8))
-#define ANA_DSCP_CFG_QOS_DSCP_VAL_M GENMASK(10, 8)
-#define ANA_DSCP_CFG_QOS_DSCP_VAL_X(x) (((x) & GENMASK(10, 8)) >> 8)
-#define ANA_DSCP_CFG_DSCP_TRANSLATE_VAL(x) (((x) << 2) & GENMASK(7, 2))
-#define ANA_DSCP_CFG_DSCP_TRANSLATE_VAL_M GENMASK(7, 2)
-#define ANA_DSCP_CFG_DSCP_TRANSLATE_VAL_X(x) (((x) & GENMASK(7, 2)) >> 2)
-#define ANA_DSCP_CFG_DSCP_TRUST_ENA BIT(1)
-#define ANA_DSCP_CFG_DSCP_REWR_ENA BIT(0)
-
-#define ANA_DSCP_REWR_CFG_RSZ 0x4
-
-#define ANA_VCAP_RNG_TYPE_CFG_RSZ 0x4
-
-#define ANA_VCAP_RNG_VAL_CFG_RSZ 0x4
-
-#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MIN_VAL(x) (((x) << 16) & GENMASK(31, 16))
-#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MIN_VAL_M GENMASK(31, 16)
-#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MIN_VAL_X(x) (((x) & GENMASK(31, 16)) >> 16)
-#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MAX_VAL(x) ((x) & GENMASK(15, 0))
-#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MAX_VAL_M GENMASK(15, 0)
-
-#define ANA_VRAP_CFG_VRAP_VLAN_AWARE_ENA BIT(12)
-#define ANA_VRAP_CFG_VRAP_VID(x) ((x) & GENMASK(11, 0))
-#define ANA_VRAP_CFG_VRAP_VID_M GENMASK(11, 0)
-
-#define ANA_DISCARD_CFG_DROP_TAGGING_ISDX0 BIT(3)
-#define ANA_DISCARD_CFG_DROP_CTRLPROT_ISDX0 BIT(2)
-#define ANA_DISCARD_CFG_DROP_TAGGING_S2_ENA BIT(1)
-#define ANA_DISCARD_CFG_DROP_CTRLPROT_S2_ENA BIT(0)
-
-#define ANA_FID_CFG_VID_MC_ENA BIT(0)
-
-#define ANA_POL_PIR_CFG_GSZ 0x20
-
-#define ANA_POL_PIR_CFG_PIR_RATE(x) (((x) << 6) & GENMASK(20, 6))
-#define ANA_POL_PIR_CFG_PIR_RATE_M GENMASK(20, 6)
-#define ANA_POL_PIR_CFG_PIR_RATE_X(x) (((x) & GENMASK(20, 6)) >> 6)
-#define ANA_POL_PIR_CFG_PIR_BURST(x) ((x) & GENMASK(5, 0))
-#define ANA_POL_PIR_CFG_PIR_BURST_M GENMASK(5, 0)
-
-#define ANA_POL_CIR_CFG_GSZ 0x20
-
-#define ANA_POL_CIR_CFG_CIR_RATE(x) (((x) << 6) & GENMASK(20, 6))
-#define ANA_POL_CIR_CFG_CIR_RATE_M GENMASK(20, 6)
-#define ANA_POL_CIR_CFG_CIR_RATE_X(x) (((x) & GENMASK(20, 6)) >> 6)
-#define ANA_POL_CIR_CFG_CIR_BURST(x) ((x) & GENMASK(5, 0))
-#define ANA_POL_CIR_CFG_CIR_BURST_M GENMASK(5, 0)
-
-#define ANA_POL_MODE_CFG_GSZ 0x20
-
-#define ANA_POL_MODE_CFG_IPG_SIZE(x) (((x) << 5) & GENMASK(9, 5))
-#define ANA_POL_MODE_CFG_IPG_SIZE_M GENMASK(9, 5)
-#define ANA_POL_MODE_CFG_IPG_SIZE_X(x) (((x) & GENMASK(9, 5)) >> 5)
-#define ANA_POL_MODE_CFG_FRM_MODE(x) (((x) << 3) & GENMASK(4, 3))
-#define ANA_POL_MODE_CFG_FRM_MODE_M GENMASK(4, 3)
-#define ANA_POL_MODE_CFG_FRM_MODE_X(x) (((x) & GENMASK(4, 3)) >> 3)
-#define ANA_POL_MODE_CFG_DLB_COUPLED BIT(2)
-#define ANA_POL_MODE_CFG_CIR_ENA BIT(1)
-#define ANA_POL_MODE_CFG_OVERSHOOT_ENA BIT(0)
-
-#define ANA_POL_PIR_STATE_GSZ 0x20
-
-#define ANA_POL_CIR_STATE_GSZ 0x20
-
-#define ANA_POL_STATE_GSZ 0x20
-
-#define ANA_POL_FLOWC_RSZ 0x4
-
-#define ANA_POL_FLOWC_POL_FLOWC BIT(0)
-
-#define ANA_POL_HYST_POL_FC_HYST(x) (((x) << 4) & GENMASK(9, 4))
-#define ANA_POL_HYST_POL_FC_HYST_M GENMASK(9, 4)
-#define ANA_POL_HYST_POL_FC_HYST_X(x) (((x) & GENMASK(9, 4)) >> 4)
-#define ANA_POL_HYST_POL_STOP_HYST(x) ((x) & GENMASK(3, 0))
-#define ANA_POL_HYST_POL_STOP_HYST_M GENMASK(3, 0)
-
-#define ANA_POL_MISC_CFG_POL_CLOSE_ALL BIT(1)
-#define ANA_POL_MISC_CFG_POL_LEAK_DIS BIT(0)
-
-#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index 2da8eee27e98..b38820849faa 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -402,9 +402,9 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
of_get_phy_mode(portnp, &phy_mode);
- priv->phy_mode = phy_mode;
+ ocelot_port->phy_mode = phy_mode;
- switch (priv->phy_mode) {
+ switch (ocelot_port->phy_mode) {
case PHY_INTERFACE_MODE_NA:
continue;
case PHY_INTERFACE_MODE_SGMII:
diff --git a/drivers/net/ethernet/mscc/ocelot_dev.h b/drivers/net/ethernet/mscc/ocelot_dev.h
deleted file mode 100644
index 0a50d53bbd3f..000000000000
--- a/drivers/net/ethernet/mscc/ocelot_dev.h
+++ /dev/null
@@ -1,275 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
-/*
- * Microsemi Ocelot Switch driver
- *
- * Copyright (c) 2017 Microsemi Corporation
- */
-
-#ifndef _MSCC_OCELOT_DEV_H_
-#define _MSCC_OCELOT_DEV_H_
-
-#define DEV_CLOCK_CFG 0x0
-
-#define DEV_CLOCK_CFG_MAC_TX_RST BIT(7)
-#define DEV_CLOCK_CFG_MAC_RX_RST BIT(6)
-#define DEV_CLOCK_CFG_PCS_TX_RST BIT(5)
-#define DEV_CLOCK_CFG_PCS_RX_RST BIT(4)
-#define DEV_CLOCK_CFG_PORT_RST BIT(3)
-#define DEV_CLOCK_CFG_PHY_RST BIT(2)
-#define DEV_CLOCK_CFG_LINK_SPEED(x) ((x) & GENMASK(1, 0))
-#define DEV_CLOCK_CFG_LINK_SPEED_M GENMASK(1, 0)
-
-#define DEV_PORT_MISC 0x4
-
-#define DEV_PORT_MISC_FWD_ERROR_ENA BIT(4)
-#define DEV_PORT_MISC_FWD_PAUSE_ENA BIT(3)
-#define DEV_PORT_MISC_FWD_CTRL_ENA BIT(2)
-#define DEV_PORT_MISC_DEV_LOOP_ENA BIT(1)
-#define DEV_PORT_MISC_HDX_FAST_DIS BIT(0)
-
-#define DEV_EVENTS 0x8
-
-#define DEV_EEE_CFG 0xc
-
-#define DEV_EEE_CFG_EEE_ENA BIT(22)
-#define DEV_EEE_CFG_EEE_TIMER_AGE(x) (((x) << 15) & GENMASK(21, 15))
-#define DEV_EEE_CFG_EEE_TIMER_AGE_M GENMASK(21, 15)
-#define DEV_EEE_CFG_EEE_TIMER_AGE_X(x) (((x) & GENMASK(21, 15)) >> 15)
-#define DEV_EEE_CFG_EEE_TIMER_WAKEUP(x) (((x) << 8) & GENMASK(14, 8))
-#define DEV_EEE_CFG_EEE_TIMER_WAKEUP_M GENMASK(14, 8)
-#define DEV_EEE_CFG_EEE_TIMER_WAKEUP_X(x) (((x) & GENMASK(14, 8)) >> 8)
-#define DEV_EEE_CFG_EEE_TIMER_HOLDOFF(x) (((x) << 1) & GENMASK(7, 1))
-#define DEV_EEE_CFG_EEE_TIMER_HOLDOFF_M GENMASK(7, 1)
-#define DEV_EEE_CFG_EEE_TIMER_HOLDOFF_X(x) (((x) & GENMASK(7, 1)) >> 1)
-#define DEV_EEE_CFG_PORT_LPI BIT(0)
-
-#define DEV_RX_PATH_DELAY 0x10
-
-#define DEV_TX_PATH_DELAY 0x14
-
-#define DEV_PTP_PREDICT_CFG 0x18
-
-#define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG(x) (((x) << 4) & GENMASK(11, 4))
-#define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG_M GENMASK(11, 4)
-#define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG_X(x) (((x) & GENMASK(11, 4)) >> 4)
-#define DEV_PTP_PREDICT_CFG_PTP_PHASE_PREDICT_CFG(x) ((x) & GENMASK(3, 0))
-#define DEV_PTP_PREDICT_CFG_PTP_PHASE_PREDICT_CFG_M GENMASK(3, 0)
-
-#define DEV_MAC_ENA_CFG 0x1c
-
-#define DEV_MAC_ENA_CFG_RX_ENA BIT(4)
-#define DEV_MAC_ENA_CFG_TX_ENA BIT(0)
-
-#define DEV_MAC_MODE_CFG 0x20
-
-#define DEV_MAC_MODE_CFG_FC_WORD_SYNC_ENA BIT(8)
-#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA BIT(4)
-#define DEV_MAC_MODE_CFG_FDX_ENA BIT(0)
-
-#define DEV_MAC_MAXLEN_CFG 0x24
-
-#define DEV_MAC_TAGS_CFG 0x28
-
-#define DEV_MAC_TAGS_CFG_TAG_ID(x) (((x) << 16) & GENMASK(31, 16))
-#define DEV_MAC_TAGS_CFG_TAG_ID_M GENMASK(31, 16)
-#define DEV_MAC_TAGS_CFG_TAG_ID_X(x) (((x) & GENMASK(31, 16)) >> 16)
-#define DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA BIT(2)
-#define DEV_MAC_TAGS_CFG_PB_ENA BIT(1)
-#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0)
-
-#define DEV_MAC_ADV_CHK_CFG 0x2c
-
-#define DEV_MAC_ADV_CHK_CFG_LEN_DROP_ENA BIT(0)
-
-#define DEV_MAC_IFG_CFG 0x30
-
-#define DEV_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK BIT(17)
-#define DEV_MAC_IFG_CFG_REDUCED_TX_IFG BIT(16)
-#define DEV_MAC_IFG_CFG_TX_IFG(x) (((x) << 8) & GENMASK(12, 8))
-#define DEV_MAC_IFG_CFG_TX_IFG_M GENMASK(12, 8)
-#define DEV_MAC_IFG_CFG_TX_IFG_X(x) (((x) & GENMASK(12, 8)) >> 8)
-#define DEV_MAC_IFG_CFG_RX_IFG2(x) (((x) << 4) & GENMASK(7, 4))
-#define DEV_MAC_IFG_CFG_RX_IFG2_M GENMASK(7, 4)
-#define DEV_MAC_IFG_CFG_RX_IFG2_X(x) (((x) & GENMASK(7, 4)) >> 4)
-#define DEV_MAC_IFG_CFG_RX_IFG1(x) ((x) & GENMASK(3, 0))
-#define DEV_MAC_IFG_CFG_RX_IFG1_M GENMASK(3, 0)
-
-#define DEV_MAC_HDX_CFG 0x34
-
-#define DEV_MAC_HDX_CFG_BYPASS_COL_SYNC BIT(26)
-#define DEV_MAC_HDX_CFG_OB_ENA BIT(25)
-#define DEV_MAC_HDX_CFG_WEXC_DIS BIT(24)
-#define DEV_MAC_HDX_CFG_SEED(x) (((x) << 16) & GENMASK(23, 16))
-#define DEV_MAC_HDX_CFG_SEED_M GENMASK(23, 16)
-#define DEV_MAC_HDX_CFG_SEED_X(x) (((x) & GENMASK(23, 16)) >> 16)
-#define DEV_MAC_HDX_CFG_SEED_LOAD BIT(12)
-#define DEV_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA BIT(8)
-#define DEV_MAC_HDX_CFG_LATE_COL_POS(x) ((x) & GENMASK(6, 0))
-#define DEV_MAC_HDX_CFG_LATE_COL_POS_M GENMASK(6, 0)
-
-#define DEV_MAC_DBG_CFG 0x38
-
-#define DEV_MAC_DBG_CFG_TBI_MODE BIT(4)
-#define DEV_MAC_DBG_CFG_IFG_CRS_EXT_CHK_ENA BIT(0)
-
-#define DEV_MAC_FC_MAC_LOW_CFG 0x3c
-
-#define DEV_MAC_FC_MAC_HIGH_CFG 0x40
-
-#define DEV_MAC_STICKY 0x44
-
-#define DEV_MAC_STICKY_RX_IPG_SHRINK_STICKY BIT(9)
-#define DEV_MAC_STICKY_RX_PREAM_SHRINK_STICKY BIT(8)
-#define DEV_MAC_STICKY_RX_CARRIER_EXT_STICKY BIT(7)
-#define DEV_MAC_STICKY_RX_CARRIER_EXT_ERR_STICKY BIT(6)
-#define DEV_MAC_STICKY_RX_JUNK_STICKY BIT(5)
-#define DEV_MAC_STICKY_TX_RETRANSMIT_STICKY BIT(4)
-#define DEV_MAC_STICKY_TX_JAM_STICKY BIT(3)
-#define DEV_MAC_STICKY_TX_FIFO_OFLW_STICKY BIT(2)
-#define DEV_MAC_STICKY_TX_FRM_LEN_OVR_STICKY BIT(1)
-#define DEV_MAC_STICKY_TX_ABORT_STICKY BIT(0)
-
-#define PCS1G_CFG 0x48
-
-#define PCS1G_CFG_LINK_STATUS_TYPE BIT(4)
-#define PCS1G_CFG_AN_LINK_CTRL_ENA BIT(1)
-#define PCS1G_CFG_PCS_ENA BIT(0)
-
-#define PCS1G_MODE_CFG 0x4c
-
-#define PCS1G_MODE_CFG_UNIDIR_MODE_ENA BIT(4)
-#define PCS1G_MODE_CFG_SGMII_MODE_ENA BIT(0)
-
-#define PCS1G_SD_CFG 0x50
-
-#define PCS1G_SD_CFG_SD_SEL BIT(8)
-#define PCS1G_SD_CFG_SD_POL BIT(4)
-#define PCS1G_SD_CFG_SD_ENA BIT(0)
-
-#define PCS1G_ANEG_CFG 0x54
-
-#define PCS1G_ANEG_CFG_ADV_ABILITY(x) (((x) << 16) & GENMASK(31, 16))
-#define PCS1G_ANEG_CFG_ADV_ABILITY_M GENMASK(31, 16)
-#define PCS1G_ANEG_CFG_ADV_ABILITY_X(x) (((x) & GENMASK(31, 16)) >> 16)
-#define PCS1G_ANEG_CFG_SW_RESOLVE_ENA BIT(8)
-#define PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT BIT(1)
-#define PCS1G_ANEG_CFG_ANEG_ENA BIT(0)
-
-#define PCS1G_ANEG_NP_CFG 0x58
-
-#define PCS1G_ANEG_NP_CFG_NP_TX(x) (((x) << 16) & GENMASK(31, 16))
-#define PCS1G_ANEG_NP_CFG_NP_TX_M GENMASK(31, 16)
-#define PCS1G_ANEG_NP_CFG_NP_TX_X(x) (((x) & GENMASK(31, 16)) >> 16)
-#define PCS1G_ANEG_NP_CFG_NP_LOADED_ONE_SHOT BIT(0)
-
-#define PCS1G_LB_CFG 0x5c
-
-#define PCS1G_LB_CFG_RA_ENA BIT(4)
-#define PCS1G_LB_CFG_GMII_PHY_LB_ENA BIT(1)
-#define PCS1G_LB_CFG_TBI_HOST_LB_ENA BIT(0)
-
-#define PCS1G_DBG_CFG 0x60
-
-#define PCS1G_DBG_CFG_UDLT BIT(0)
-
-#define PCS1G_CDET_CFG 0x64
-
-#define PCS1G_CDET_CFG_CDET_ENA BIT(0)
-
-#define PCS1G_ANEG_STATUS 0x68
-
-#define PCS1G_ANEG_STATUS_LP_ADV_ABILITY(x) (((x) << 16) & GENMASK(31, 16))
-#define PCS1G_ANEG_STATUS_LP_ADV_ABILITY_M GENMASK(31, 16)
-#define PCS1G_ANEG_STATUS_LP_ADV_ABILITY_X(x) (((x) & GENMASK(31, 16)) >> 16)
-#define PCS1G_ANEG_STATUS_PR BIT(4)
-#define PCS1G_ANEG_STATUS_PAGE_RX_STICKY BIT(3)
-#define PCS1G_ANEG_STATUS_ANEG_COMPLETE BIT(0)
-
-#define PCS1G_ANEG_NP_STATUS 0x6c
-
-#define PCS1G_LINK_STATUS 0x70
-
-#define PCS1G_LINK_STATUS_DELAY_VAR(x) (((x) << 12) & GENMASK(15, 12))
-#define PCS1G_LINK_STATUS_DELAY_VAR_M GENMASK(15, 12)
-#define PCS1G_LINK_STATUS_DELAY_VAR_X(x) (((x) & GENMASK(15, 12)) >> 12)
-#define PCS1G_LINK_STATUS_SIGNAL_DETECT BIT(8)
-#define PCS1G_LINK_STATUS_LINK_STATUS BIT(4)
-#define PCS1G_LINK_STATUS_SYNC_STATUS BIT(0)
-
-#define PCS1G_LINK_DOWN_CNT 0x74
-
-#define PCS1G_STICKY 0x78
-
-#define PCS1G_STICKY_LINK_DOWN_STICKY BIT(4)
-#define PCS1G_STICKY_OUT_OF_SYNC_STICKY BIT(0)
-
-#define PCS1G_DEBUG_STATUS 0x7c
-
-#define PCS1G_LPI_CFG 0x80
-
-#define PCS1G_LPI_CFG_QSGMII_MS_SEL BIT(20)
-#define PCS1G_LPI_CFG_RX_LPI_OUT_DIS BIT(17)
-#define PCS1G_LPI_CFG_LPI_TESTMODE BIT(16)
-#define PCS1G_LPI_CFG_LPI_RX_WTIM(x) (((x) << 4) & GENMASK(5, 4))
-#define PCS1G_LPI_CFG_LPI_RX_WTIM_M GENMASK(5, 4)
-#define PCS1G_LPI_CFG_LPI_RX_WTIM_X(x) (((x) & GENMASK(5, 4)) >> 4)
-#define PCS1G_LPI_CFG_TX_ASSERT_LPIDLE BIT(0)
-
-#define PCS1G_LPI_WAKE_ERROR_CNT 0x84
-
-#define PCS1G_LPI_STATUS 0x88
-
-#define PCS1G_LPI_STATUS_RX_LPI_FAIL BIT(16)
-#define PCS1G_LPI_STATUS_RX_LPI_EVENT_STICKY BIT(12)
-#define PCS1G_LPI_STATUS_RX_QUIET BIT(9)
-#define PCS1G_LPI_STATUS_RX_LPI_MODE BIT(8)
-#define PCS1G_LPI_STATUS_TX_LPI_EVENT_STICKY BIT(4)
-#define PCS1G_LPI_STATUS_TX_QUIET BIT(1)
-#define PCS1G_LPI_STATUS_TX_LPI_MODE BIT(0)
-
-#define PCS1G_TSTPAT_MODE_CFG 0x8c
-
-#define PCS1G_TSTPAT_STATUS 0x90
-
-#define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT(x) (((x) << 8) & GENMASK(15, 8))
-#define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT_M GENMASK(15, 8)
-#define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT_X(x) (((x) & GENMASK(15, 8)) >> 8)
-#define PCS1G_TSTPAT_STATUS_JTP_ERR BIT(4)
-#define PCS1G_TSTPAT_STATUS_JTP_LOCK BIT(0)
-
-#define DEV_PCS_FX100_CFG 0x94
-
-#define DEV_PCS_FX100_CFG_SD_SEL BIT(26)
-#define DEV_PCS_FX100_CFG_SD_POL BIT(25)
-#define DEV_PCS_FX100_CFG_SD_ENA BIT(24)
-#define DEV_PCS_FX100_CFG_LOOPBACK_ENA BIT(20)
-#define DEV_PCS_FX100_CFG_SWAP_MII_ENA BIT(16)
-#define DEV_PCS_FX100_CFG_RXBITSEL(x) (((x) << 12) & GENMASK(15, 12))
-#define DEV_PCS_FX100_CFG_RXBITSEL_M GENMASK(15, 12)
-#define DEV_PCS_FX100_CFG_RXBITSEL_X(x) (((x) & GENMASK(15, 12)) >> 12)
-#define DEV_PCS_FX100_CFG_SIGDET_CFG(x) (((x) << 9) & GENMASK(10, 9))
-#define DEV_PCS_FX100_CFG_SIGDET_CFG_M GENMASK(10, 9)
-#define DEV_PCS_FX100_CFG_SIGDET_CFG_X(x) (((x) & GENMASK(10, 9)) >> 9)
-#define DEV_PCS_FX100_CFG_LINKHYST_TM_ENA BIT(8)
-#define DEV_PCS_FX100_CFG_LINKHYSTTIMER(x) (((x) << 4) & GENMASK(7, 4))
-#define DEV_PCS_FX100_CFG_LINKHYSTTIMER_M GENMASK(7, 4)
-#define DEV_PCS_FX100_CFG_LINKHYSTTIMER_X(x) (((x) & GENMASK(7, 4)) >> 4)
-#define DEV_PCS_FX100_CFG_UNIDIR_MODE_ENA BIT(3)
-#define DEV_PCS_FX100_CFG_FEFCHK_ENA BIT(2)
-#define DEV_PCS_FX100_CFG_FEFGEN_ENA BIT(1)
-#define DEV_PCS_FX100_CFG_PCS_ENA BIT(0)
-
-#define DEV_PCS_FX100_STATUS 0x98
-
-#define DEV_PCS_FX100_STATUS_EDGE_POS_PTP(x) (((x) << 8) & GENMASK(11, 8))
-#define DEV_PCS_FX100_STATUS_EDGE_POS_PTP_M GENMASK(11, 8)
-#define DEV_PCS_FX100_STATUS_EDGE_POS_PTP_X(x) (((x) & GENMASK(11, 8)) >> 8)
-#define DEV_PCS_FX100_STATUS_PCS_ERROR_STICKY BIT(7)
-#define DEV_PCS_FX100_STATUS_FEF_FOUND_STICKY BIT(6)
-#define DEV_PCS_FX100_STATUS_SSD_ERROR_STICKY BIT(5)
-#define DEV_PCS_FX100_STATUS_SYNC_LOST_STICKY BIT(4)
-#define DEV_PCS_FX100_STATUS_FEF_STATUS BIT(2)
-#define DEV_PCS_FX100_STATUS_SIGNAL_DETECT BIT(1)
-#define DEV_PCS_FX100_STATUS_SYNC_STATUS BIT(0)
-
-#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_qsys.h b/drivers/net/ethernet/mscc/ocelot_qsys.h
deleted file mode 100644
index d8c63aa761be..000000000000
--- a/drivers/net/ethernet/mscc/ocelot_qsys.h
+++ /dev/null
@@ -1,270 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
-/*
- * Microsemi Ocelot Switch driver
- *
- * Copyright (c) 2017 Microsemi Corporation
- */
-
-#ifndef _MSCC_OCELOT_QSYS_H_
-#define _MSCC_OCELOT_QSYS_H_
-
-#define QSYS_PORT_MODE_RSZ 0x4
-
-#define QSYS_PORT_MODE_DEQUEUE_DIS BIT(1)
-#define QSYS_PORT_MODE_DEQUEUE_LATE BIT(0)
-
-#define QSYS_SWITCH_PORT_MODE_RSZ 0x4
-
-#define QSYS_SWITCH_PORT_MODE_PORT_ENA BIT(14)
-#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(x) (((x) << 11) & GENMASK(13, 11))
-#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG_M GENMASK(13, 11)
-#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG_X(x) (((x) & GENMASK(13, 11)) >> 11)
-#define QSYS_SWITCH_PORT_MODE_YEL_RSRVD BIT(10)
-#define QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE BIT(9)
-#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA(x) (((x) << 1) & GENMASK(8, 1))
-#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA_M GENMASK(8, 1)
-#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA_X(x) (((x) & GENMASK(8, 1)) >> 1)
-#define QSYS_SWITCH_PORT_MODE_TX_PFC_MODE BIT(0)
-
-#define QSYS_STAT_CNT_CFG_TX_GREEN_CNT_MODE BIT(5)
-#define QSYS_STAT_CNT_CFG_TX_YELLOW_CNT_MODE BIT(4)
-#define QSYS_STAT_CNT_CFG_DROP_GREEN_CNT_MODE BIT(3)
-#define QSYS_STAT_CNT_CFG_DROP_YELLOW_CNT_MODE BIT(2)
-#define QSYS_STAT_CNT_CFG_DROP_COUNT_ONCE BIT(1)
-#define QSYS_STAT_CNT_CFG_DROP_COUNT_EGRESS BIT(0)
-
-#define QSYS_EEE_CFG_RSZ 0x4
-
-#define QSYS_EEE_THRES_EEE_HIGH_BYTES(x) (((x) << 8) & GENMASK(15, 8))
-#define QSYS_EEE_THRES_EEE_HIGH_BYTES_M GENMASK(15, 8)
-#define QSYS_EEE_THRES_EEE_HIGH_BYTES_X(x) (((x) & GENMASK(15, 8)) >> 8)
-#define QSYS_EEE_THRES_EEE_HIGH_FRAMES(x) ((x) & GENMASK(7, 0))
-#define QSYS_EEE_THRES_EEE_HIGH_FRAMES_M GENMASK(7, 0)
-
-#define QSYS_SW_STATUS_RSZ 0x4
-
-#define QSYS_EXT_CPU_CFG_EXT_CPU_PORT(x) (((x) << 8) & GENMASK(12, 8))
-#define QSYS_EXT_CPU_CFG_EXT_CPU_PORT_M GENMASK(12, 8)
-#define QSYS_EXT_CPU_CFG_EXT_CPU_PORT_X(x) (((x) & GENMASK(12, 8)) >> 8)
-#define QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK(x) ((x) & GENMASK(7, 0))
-#define QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M GENMASK(7, 0)
-
-#define QSYS_QMAP_GSZ 0x4
-
-#define QSYS_QMAP_SE_BASE(x) (((x) << 5) & GENMASK(12, 5))
-#define QSYS_QMAP_SE_BASE_M GENMASK(12, 5)
-#define QSYS_QMAP_SE_BASE_X(x) (((x) & GENMASK(12, 5)) >> 5)
-#define QSYS_QMAP_SE_IDX_SEL(x) (((x) << 2) & GENMASK(4, 2))
-#define QSYS_QMAP_SE_IDX_SEL_M GENMASK(4, 2)
-#define QSYS_QMAP_SE_IDX_SEL_X(x) (((x) & GENMASK(4, 2)) >> 2)
-#define QSYS_QMAP_SE_INP_SEL(x) ((x) & GENMASK(1, 0))
-#define QSYS_QMAP_SE_INP_SEL_M GENMASK(1, 0)
-
-#define QSYS_ISDX_SGRP_GSZ 0x4
-
-#define QSYS_TIMED_FRAME_ENTRY_GSZ 0x4
-
-#define QSYS_TFRM_MISC_TIMED_CANCEL_SLOT(x) (((x) << 9) & GENMASK(18, 9))
-#define QSYS_TFRM_MISC_TIMED_CANCEL_SLOT_M GENMASK(18, 9)
-#define QSYS_TFRM_MISC_TIMED_CANCEL_SLOT_X(x) (((x) & GENMASK(18, 9)) >> 9)
-#define QSYS_TFRM_MISC_TIMED_CANCEL_1SHOT BIT(8)
-#define QSYS_TFRM_MISC_TIMED_SLOT_MODE_MC BIT(7)
-#define QSYS_TFRM_MISC_TIMED_ENTRY_FAST_CNT(x) ((x) & GENMASK(6, 0))
-#define QSYS_TFRM_MISC_TIMED_ENTRY_FAST_CNT_M GENMASK(6, 0)
-
-#define QSYS_RED_PROFILE_RSZ 0x4
-
-#define QSYS_RED_PROFILE_WM_RED_LOW(x) (((x) << 8) & GENMASK(15, 8))
-#define QSYS_RED_PROFILE_WM_RED_LOW_M GENMASK(15, 8)
-#define QSYS_RED_PROFILE_WM_RED_LOW_X(x) (((x) & GENMASK(15, 8)) >> 8)
-#define QSYS_RED_PROFILE_WM_RED_HIGH(x) ((x) & GENMASK(7, 0))
-#define QSYS_RED_PROFILE_WM_RED_HIGH_M GENMASK(7, 0)
-
-#define QSYS_RES_CFG_GSZ 0x8
-
-#define QSYS_RES_STAT_GSZ 0x8
-
-#define QSYS_RES_STAT_INUSE(x) (((x) << 12) & GENMASK(23, 12))
-#define QSYS_RES_STAT_INUSE_M GENMASK(23, 12)
-#define QSYS_RES_STAT_INUSE_X(x) (((x) & GENMASK(23, 12)) >> 12)
-#define QSYS_RES_STAT_MAXUSE(x) ((x) & GENMASK(11, 0))
-#define QSYS_RES_STAT_MAXUSE_M GENMASK(11, 0)
-
-#define QSYS_EVENTS_CORE_EV_FDC(x) (((x) << 2) & GENMASK(4, 2))
-#define QSYS_EVENTS_CORE_EV_FDC_M GENMASK(4, 2)
-#define QSYS_EVENTS_CORE_EV_FDC_X(x) (((x) & GENMASK(4, 2)) >> 2)
-#define QSYS_EVENTS_CORE_EV_FRD(x) ((x) & GENMASK(1, 0))
-#define QSYS_EVENTS_CORE_EV_FRD_M GENMASK(1, 0)
-
-#define QSYS_QMAXSDU_CFG_0_RSZ 0x4
-
-#define QSYS_QMAXSDU_CFG_1_RSZ 0x4
-
-#define QSYS_QMAXSDU_CFG_2_RSZ 0x4
-
-#define QSYS_QMAXSDU_CFG_3_RSZ 0x4
-
-#define QSYS_QMAXSDU_CFG_4_RSZ 0x4
-
-#define QSYS_QMAXSDU_CFG_5_RSZ 0x4
-
-#define QSYS_QMAXSDU_CFG_6_RSZ 0x4
-
-#define QSYS_QMAXSDU_CFG_7_RSZ 0x4
-
-#define QSYS_PREEMPTION_CFG_RSZ 0x4
-
-#define QSYS_PREEMPTION_CFG_P_QUEUES(x) ((x) & GENMASK(7, 0))
-#define QSYS_PREEMPTION_CFG_P_QUEUES_M GENMASK(7, 0)
-#define QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE(x) (((x) << 8) & GENMASK(9, 8))
-#define QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE_M GENMASK(9, 8)
-#define QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE_X(x) (((x) & GENMASK(9, 8)) >> 8)
-#define QSYS_PREEMPTION_CFG_STRICT_IPG(x) (((x) << 12) & GENMASK(13, 12))
-#define QSYS_PREEMPTION_CFG_STRICT_IPG_M GENMASK(13, 12)
-#define QSYS_PREEMPTION_CFG_STRICT_IPG_X(x) (((x) & GENMASK(13, 12)) >> 12)
-#define QSYS_PREEMPTION_CFG_HOLD_ADVANCE(x) (((x) << 16) & GENMASK(31, 16))
-#define QSYS_PREEMPTION_CFG_HOLD_ADVANCE_M GENMASK(31, 16)
-#define QSYS_PREEMPTION_CFG_HOLD_ADVANCE_X(x) (((x) & GENMASK(31, 16)) >> 16)
-
-#define QSYS_CIR_CFG_GSZ 0x80
-
-#define QSYS_CIR_CFG_CIR_RATE(x) (((x) << 6) & GENMASK(20, 6))
-#define QSYS_CIR_CFG_CIR_RATE_M GENMASK(20, 6)
-#define QSYS_CIR_CFG_CIR_RATE_X(x) (((x) & GENMASK(20, 6)) >> 6)
-#define QSYS_CIR_CFG_CIR_BURST(x) ((x) & GENMASK(5, 0))
-#define QSYS_CIR_CFG_CIR_BURST_M GENMASK(5, 0)
-
-#define QSYS_EIR_CFG_GSZ 0x80
-
-#define QSYS_EIR_CFG_EIR_RATE(x) (((x) << 7) & GENMASK(21, 7))
-#define QSYS_EIR_CFG_EIR_RATE_M GENMASK(21, 7)
-#define QSYS_EIR_CFG_EIR_RATE_X(x) (((x) & GENMASK(21, 7)) >> 7)
-#define QSYS_EIR_CFG_EIR_BURST(x) (((x) << 1) & GENMASK(6, 1))
-#define QSYS_EIR_CFG_EIR_BURST_M GENMASK(6, 1)
-#define QSYS_EIR_CFG_EIR_BURST_X(x) (((x) & GENMASK(6, 1)) >> 1)
-#define QSYS_EIR_CFG_EIR_MARK_ENA BIT(0)
-
-#define QSYS_SE_CFG_GSZ 0x80
-
-#define QSYS_SE_CFG_SE_DWRR_CNT(x) (((x) << 6) & GENMASK(9, 6))
-#define QSYS_SE_CFG_SE_DWRR_CNT_M GENMASK(9, 6)
-#define QSYS_SE_CFG_SE_DWRR_CNT_X(x) (((x) & GENMASK(9, 6)) >> 6)
-#define QSYS_SE_CFG_SE_RR_ENA BIT(5)
-#define QSYS_SE_CFG_SE_AVB_ENA BIT(4)
-#define QSYS_SE_CFG_SE_FRM_MODE(x) (((x) << 2) & GENMASK(3, 2))
-#define QSYS_SE_CFG_SE_FRM_MODE_M GENMASK(3, 2)
-#define QSYS_SE_CFG_SE_FRM_MODE_X(x) (((x) & GENMASK(3, 2)) >> 2)
-#define QSYS_SE_CFG_SE_EXC_ENA BIT(1)
-#define QSYS_SE_CFG_SE_EXC_FWD BIT(0)
-
-#define QSYS_SE_DWRR_CFG_GSZ 0x80
-#define QSYS_SE_DWRR_CFG_RSZ 0x4
-
-#define QSYS_SE_CONNECT_GSZ 0x80
-
-#define QSYS_SE_CONNECT_SE_OUTP_IDX(x) (((x) << 17) & GENMASK(24, 17))
-#define QSYS_SE_CONNECT_SE_OUTP_IDX_M GENMASK(24, 17)
-#define QSYS_SE_CONNECT_SE_OUTP_IDX_X(x) (((x) & GENMASK(24, 17)) >> 17)
-#define QSYS_SE_CONNECT_SE_INP_IDX(x) (((x) << 9) & GENMASK(16, 9))
-#define QSYS_SE_CONNECT_SE_INP_IDX_M GENMASK(16, 9)
-#define QSYS_SE_CONNECT_SE_INP_IDX_X(x) (((x) & GENMASK(16, 9)) >> 9)
-#define QSYS_SE_CONNECT_SE_OUTP_CON(x) (((x) << 5) & GENMASK(8, 5))
-#define QSYS_SE_CONNECT_SE_OUTP_CON_M GENMASK(8, 5)
-#define QSYS_SE_CONNECT_SE_OUTP_CON_X(x) (((x) & GENMASK(8, 5)) >> 5)
-#define QSYS_SE_CONNECT_SE_INP_CNT(x) (((x) << 1) & GENMASK(4, 1))
-#define QSYS_SE_CONNECT_SE_INP_CNT_M GENMASK(4, 1)
-#define QSYS_SE_CONNECT_SE_INP_CNT_X(x) (((x) & GENMASK(4, 1)) >> 1)
-#define QSYS_SE_CONNECT_SE_TERMINAL BIT(0)
-
-#define QSYS_SE_DLB_SENSE_GSZ 0x80
-
-#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO(x) (((x) << 11) & GENMASK(13, 11))
-#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO_M GENMASK(13, 11)
-#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO_X(x) (((x) & GENMASK(13, 11)) >> 11)
-#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT(x) (((x) << 7) & GENMASK(10, 7))
-#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT_M GENMASK(10, 7)
-#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT_X(x) (((x) & GENMASK(10, 7)) >> 7)
-#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT(x) (((x) << 3) & GENMASK(6, 3))
-#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT_M GENMASK(6, 3)
-#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT_X(x) (((x) & GENMASK(6, 3)) >> 3)
-#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO_ENA BIT(2)
-#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT_ENA BIT(1)
-#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT_ENA BIT(0)
-
-#define QSYS_CIR_STATE_GSZ 0x80
-
-#define QSYS_CIR_STATE_CIR_LVL(x) (((x) << 4) & GENMASK(25, 4))
-#define QSYS_CIR_STATE_CIR_LVL_M GENMASK(25, 4)
-#define QSYS_CIR_STATE_CIR_LVL_X(x) (((x) & GENMASK(25, 4)) >> 4)
-#define QSYS_CIR_STATE_SHP_TIME(x) ((x) & GENMASK(3, 0))
-#define QSYS_CIR_STATE_SHP_TIME_M GENMASK(3, 0)
-
-#define QSYS_EIR_STATE_GSZ 0x80
-
-#define QSYS_SE_STATE_GSZ 0x80
-
-#define QSYS_SE_STATE_SE_OUTP_LVL(x) (((x) << 1) & GENMASK(2, 1))
-#define QSYS_SE_STATE_SE_OUTP_LVL_M GENMASK(2, 1)
-#define QSYS_SE_STATE_SE_OUTP_LVL_X(x) (((x) & GENMASK(2, 1)) >> 1)
-#define QSYS_SE_STATE_SE_WAS_YEL BIT(0)
-
-#define QSYS_HSCH_MISC_CFG_SE_CONNECT_VLD BIT(8)
-#define QSYS_HSCH_MISC_CFG_FRM_ADJ(x) (((x) << 3) & GENMASK(7, 3))
-#define QSYS_HSCH_MISC_CFG_FRM_ADJ_M GENMASK(7, 3)
-#define QSYS_HSCH_MISC_CFG_FRM_ADJ_X(x) (((x) & GENMASK(7, 3)) >> 3)
-#define QSYS_HSCH_MISC_CFG_LEAK_DIS BIT(2)
-#define QSYS_HSCH_MISC_CFG_QSHP_EXC_ENA BIT(1)
-#define QSYS_HSCH_MISC_CFG_PFC_BYP_UPD BIT(0)
-
-#define QSYS_TAG_CONFIG_RSZ 0x4
-
-#define QSYS_TAG_CONFIG_ENABLE BIT(0)
-#define QSYS_TAG_CONFIG_LINK_SPEED(x) (((x) << 4) & GENMASK(5, 4))
-#define QSYS_TAG_CONFIG_LINK_SPEED_M GENMASK(5, 4)
-#define QSYS_TAG_CONFIG_LINK_SPEED_X(x) (((x) & GENMASK(5, 4)) >> 4)
-#define QSYS_TAG_CONFIG_INIT_GATE_STATE(x) (((x) << 8) & GENMASK(15, 8))
-#define QSYS_TAG_CONFIG_INIT_GATE_STATE_M GENMASK(15, 8)
-#define QSYS_TAG_CONFIG_INIT_GATE_STATE_X(x) (((x) & GENMASK(15, 8)) >> 8)
-#define QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES(x) (((x) << 16) & GENMASK(23, 16))
-#define QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES_M GENMASK(23, 16)
-#define QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES_X(x) (((x) & GENMASK(23, 16)) >> 16)
-
-#define QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(x) ((x) & GENMASK(7, 0))
-#define QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM_M GENMASK(7, 0)
-#define QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q BIT(8)
-#define QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE BIT(16)
-
-#define QSYS_PORT_MAX_SDU_RSZ 0x4
-
-#define QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0))
-#define QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB_M GENMASK(15, 0)
-#define QSYS_PARAM_CFG_REG_3_LIST_LENGTH(x) (((x) << 16) & GENMASK(31, 16))
-#define QSYS_PARAM_CFG_REG_3_LIST_LENGTH_M GENMASK(31, 16)
-#define QSYS_PARAM_CFG_REG_3_LIST_LENGTH_X(x) (((x) & GENMASK(31, 16)) >> 16)
-
-#define QSYS_GCL_CFG_REG_1_GCL_ENTRY_NUM(x) ((x) & GENMASK(5, 0))
-#define QSYS_GCL_CFG_REG_1_GCL_ENTRY_NUM_M GENMASK(5, 0)
-#define QSYS_GCL_CFG_REG_1_GATE_STATE(x) (((x) << 8) & GENMASK(15, 8))
-#define QSYS_GCL_CFG_REG_1_GATE_STATE_M GENMASK(15, 8)
-#define QSYS_GCL_CFG_REG_1_GATE_STATE_X(x) (((x) & GENMASK(15, 8)) >> 8)
-
-#define QSYS_PARAM_STATUS_REG_3_BASE_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0))
-#define QSYS_PARAM_STATUS_REG_3_BASE_TIME_SEC_MSB_M GENMASK(15, 0)
-#define QSYS_PARAM_STATUS_REG_3_LIST_LENGTH(x) (((x) << 16) & GENMASK(31, 16))
-#define QSYS_PARAM_STATUS_REG_3_LIST_LENGTH_M GENMASK(31, 16)
-#define QSYS_PARAM_STATUS_REG_3_LIST_LENGTH_X(x) (((x) & GENMASK(31, 16)) >> 16)
-
-#define QSYS_PARAM_STATUS_REG_8_CFG_CHG_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0))
-#define QSYS_PARAM_STATUS_REG_8_CFG_CHG_TIME_SEC_MSB_M GENMASK(15, 0)
-#define QSYS_PARAM_STATUS_REG_8_OPER_GATE_STATE(x) (((x) << 16) & GENMASK(23, 16))
-#define QSYS_PARAM_STATUS_REG_8_OPER_GATE_STATE_M GENMASK(23, 16)
-#define QSYS_PARAM_STATUS_REG_8_OPER_GATE_STATE_X(x) (((x) & GENMASK(23, 16)) >> 16)
-#define QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING BIT(24)
-
-#define QSYS_GCL_STATUS_REG_1_GCL_ENTRY_NUM(x) ((x) & GENMASK(5, 0))
-#define QSYS_GCL_STATUS_REG_1_GCL_ENTRY_NUM_M GENMASK(5, 0)
-#define QSYS_GCL_STATUS_REG_1_GATE_STATE(x) (((x) << 8) & GENMASK(15, 8))
-#define QSYS_GCL_STATUS_REG_1_GATE_STATE_M GENMASK(15, 8)
-#define QSYS_GCL_STATUS_REG_1_GATE_STATE_X(x) (((x) & GENMASK(15, 8)) >> 8)
-
-#endif
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index c979f38a2e0c..2ee0d0be113a 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -2892,7 +2892,7 @@ drop:
static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
struct net_device *dev)
{
- struct sk_buff *segs, *curr;
+ struct sk_buff *segs, *curr, *next;
struct myri10ge_priv *mgp = netdev_priv(dev);
struct myri10ge_slice_state *ss;
netdev_tx_t status;
@@ -2901,10 +2901,8 @@ static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
if (IS_ERR(segs))
goto drop;
- while (segs) {
- curr = segs;
- segs = segs->next;
- curr->next = NULL;
+ skb_list_walk_safe(segs, curr, next) {
+ skb_mark_not_on_list(curr);
status = myri10ge_xmit(curr, dev);
if (status != 0) {
dev_kfree_skb_any(curr);
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 1a2634cbbb69..d21d706b83a7 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -612,7 +612,7 @@ static void undo_cable_magic(struct net_device *dev);
static void check_link(struct net_device *dev);
static void netdev_timer(struct timer_list *t);
static void dump_ring(struct net_device *dev);
-static void ns_tx_timeout(struct net_device *dev);
+static void ns_tx_timeout(struct net_device *dev, unsigned int txqueue);
static int alloc_ring(struct net_device *dev);
static void refill_rx(struct net_device *dev);
static void init_ring(struct net_device *dev);
@@ -1881,7 +1881,7 @@ static void dump_ring(struct net_device *dev)
}
}
-static void ns_tx_timeout(struct net_device *dev)
+static void ns_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem * ioaddr = ns_ioaddr(dev);
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 6af9a7eee114..8e24c7acf79b 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -1549,7 +1549,7 @@ static int ns83820_stop(struct net_device *ndev)
return 0;
}
-static void ns83820_tx_timeout(struct net_device *ndev)
+static void ns83820_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct ns83820 *dev = PRIV(ndev);
u32 tx_done_idx;
@@ -1603,7 +1603,7 @@ static void ns83820_tx_watch(struct timer_list *t)
ndev->name,
dev->tx_done_idx, dev->tx_free_idx,
atomic_read(&dev->nr_tx_skbs));
- ns83820_tx_timeout(ndev);
+ ns83820_tx_timeout(ndev, UINT_MAX);
}
mod_timer(&dev->tx_watchdog, jiffies + 2*HZ);
@@ -1937,7 +1937,7 @@ static int ns83820_init_one(struct pci_dev *pci_dev,
pci_set_master(pci_dev);
addr = pci_resource_start(pci_dev, 1);
- dev->base = ioremap_nocache(addr, PAGE_SIZE);
+ dev->base = ioremap(addr, PAGE_SIZE);
dev->tx_descs = pci_alloc_consistent(pci_dev,
4 * DESC_SIZE * NR_TX_DESC, &dev->tx_phy_descs);
dev->rx_info.descs = pci_alloc_consistent(pci_dev,
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
index b339125b2f09..31be3ba66877 100644
--- a/drivers/net/ethernet/natsemi/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
@@ -64,6 +64,8 @@ static int sonic_open(struct net_device *dev)
netif_dbg(lp, ifup, dev, "%s: initializing sonic driver\n", __func__);
+ spin_lock_init(&lp->lock);
+
for (i = 0; i < SONIC_NUM_RRS; i++) {
struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
if (skb == NULL) {
@@ -114,6 +116,24 @@ static int sonic_open(struct net_device *dev)
return 0;
}
+/* Wait for the SONIC to become idle. */
+static void sonic_quiesce(struct net_device *dev, u16 mask)
+{
+ struct sonic_local * __maybe_unused lp = netdev_priv(dev);
+ int i;
+ u16 bits;
+
+ for (i = 0; i < 1000; ++i) {
+ bits = SONIC_READ(SONIC_CMD) & mask;
+ if (!bits)
+ return;
+ if (irqs_disabled() || in_interrupt())
+ udelay(20);
+ else
+ usleep_range(100, 200);
+ }
+ WARN_ONCE(1, "command deadline expired! 0x%04x\n", bits);
+}
/*
* Close the SONIC device
@@ -130,6 +150,9 @@ static int sonic_close(struct net_device *dev)
/*
* stop the SONIC, disable interrupts
*/
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
+ sonic_quiesce(dev, SONIC_CR_ALL);
+
SONIC_WRITE(SONIC_IMR, 0);
SONIC_WRITE(SONIC_ISR, 0x7fff);
SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
@@ -161,7 +184,7 @@ static int sonic_close(struct net_device *dev)
return 0;
}
-static void sonic_tx_timeout(struct net_device *dev)
+static void sonic_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct sonic_local *lp = netdev_priv(dev);
int i;
@@ -169,6 +192,9 @@ static void sonic_tx_timeout(struct net_device *dev)
* put the Sonic into software-reset mode and
* disable all interrupts before releasing DMA buffers
*/
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
+ sonic_quiesce(dev, SONIC_CR_ALL);
+
SONIC_WRITE(SONIC_IMR, 0);
SONIC_WRITE(SONIC_ISR, 0x7fff);
SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
@@ -206,8 +232,6 @@ static void sonic_tx_timeout(struct net_device *dev)
* wake the tx queue
* Concurrently with all of this, the SONIC is potentially writing to
* the status flags of the TDs.
- * Until some mutual exclusion is added, this code will not work with SMP. However,
- * MIPS Jazz machines and m68k Macs were all uni-processor machines.
*/
static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
@@ -215,7 +239,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
struct sonic_local *lp = netdev_priv(dev);
dma_addr_t laddr;
int length;
- int entry = lp->next_tx;
+ int entry;
+ unsigned long flags;
netif_dbg(lp, tx_queued, dev, "%s: skb=%p\n", __func__, skb);
@@ -237,6 +262,10 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+ spin_lock_irqsave(&lp->lock, flags);
+
+ entry = lp->next_tx;
+
sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */
sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */
@@ -246,10 +275,6 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
sonic_tda_put(dev, entry, SONIC_TD_LINK,
sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);
- /*
- * Must set tx_skb[entry] only after clearing status, and
- * before clearing EOL and before stopping queue
- */
wmb();
lp->tx_len[entry] = length;
lp->tx_laddr[entry] = laddr;
@@ -272,6 +297,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
return NETDEV_TX_OK;
}
@@ -284,15 +311,28 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
struct net_device *dev = dev_id;
struct sonic_local *lp = netdev_priv(dev);
int status;
+ unsigned long flags;
+
+ /* The lock has two purposes. Firstly, it synchronizes sonic_interrupt()
+ * with sonic_send_packet() so that the two functions can share state.
+ * Secondly, it makes sonic_interrupt() re-entrant, as that is required
+ * by macsonic which must use two IRQs with different priority levels.
+ */
+ spin_lock_irqsave(&lp->lock, flags);
+
+ status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
+ if (!status) {
+ spin_unlock_irqrestore(&lp->lock, flags);
- if (!(status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT))
return IRQ_NONE;
+ }
do {
+ SONIC_WRITE(SONIC_ISR, status); /* clear the interrupt(s) */
+
if (status & SONIC_INT_PKTRX) {
netif_dbg(lp, intr, dev, "%s: packet rx\n", __func__);
sonic_rx(dev); /* got packet(s) */
- SONIC_WRITE(SONIC_ISR, SONIC_INT_PKTRX); /* clear the interrupt */
}
if (status & SONIC_INT_TXDN) {
@@ -300,11 +340,12 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
int td_status;
int freed_some = 0;
- /* At this point, cur_tx is the index of a TD that is one of:
- * unallocated/freed (status set & tx_skb[entry] clear)
- * allocated and sent (status set & tx_skb[entry] set )
- * allocated and not yet sent (status clear & tx_skb[entry] set )
- * still being allocated by sonic_send_packet (status clear & tx_skb[entry] clear)
+ /* The state of a Transmit Descriptor may be inferred
+ * from { tx_skb[entry], td_status } as follows.
+ * { clear, clear } => the TD has never been used
+ * { set, clear } => the TD was handed to SONIC
+ * { set, set } => the TD was handed back
+ * { clear, set } => the TD is available for re-use
*/
netif_dbg(lp, intr, dev, "%s: tx done\n", __func__);
@@ -313,18 +354,19 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0)
break;
- if (td_status & 0x0001) {
+ if (td_status & SONIC_TCR_PTX) {
lp->stats.tx_packets++;
lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE);
} else {
- lp->stats.tx_errors++;
- if (td_status & 0x0642)
+ if (td_status & (SONIC_TCR_EXD |
+ SONIC_TCR_EXC | SONIC_TCR_BCM))
lp->stats.tx_aborted_errors++;
- if (td_status & 0x0180)
+ if (td_status &
+ (SONIC_TCR_NCRS | SONIC_TCR_CRLS))
lp->stats.tx_carrier_errors++;
- if (td_status & 0x0020)
+ if (td_status & SONIC_TCR_OWC)
lp->stats.tx_window_errors++;
- if (td_status & 0x0004)
+ if (td_status & SONIC_TCR_FU)
lp->stats.tx_fifo_errors++;
}
@@ -346,7 +388,6 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
if (freed_some || lp->tx_skb[entry] == NULL)
netif_wake_queue(dev); /* The ring is no longer full */
lp->cur_tx = entry;
- SONIC_WRITE(SONIC_ISR, SONIC_INT_TXDN); /* clear the interrupt */
}
/*
@@ -355,42 +396,37 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
if (status & SONIC_INT_RFO) {
netif_dbg(lp, rx_err, dev, "%s: rx fifo overrun\n",
__func__);
- lp->stats.rx_fifo_errors++;
- SONIC_WRITE(SONIC_ISR, SONIC_INT_RFO); /* clear the interrupt */
}
if (status & SONIC_INT_RDE) {
netif_dbg(lp, rx_err, dev, "%s: rx descriptors exhausted\n",
__func__);
- lp->stats.rx_dropped++;
- SONIC_WRITE(SONIC_ISR, SONIC_INT_RDE); /* clear the interrupt */
}
if (status & SONIC_INT_RBAE) {
netif_dbg(lp, rx_err, dev, "%s: rx buffer area exceeded\n",
__func__);
- lp->stats.rx_dropped++;
- SONIC_WRITE(SONIC_ISR, SONIC_INT_RBAE); /* clear the interrupt */
}
/* counter overruns; all counters are 16bit wide */
- if (status & SONIC_INT_FAE) {
+ if (status & SONIC_INT_FAE)
lp->stats.rx_frame_errors += 65536;
- SONIC_WRITE(SONIC_ISR, SONIC_INT_FAE); /* clear the interrupt */
- }
- if (status & SONIC_INT_CRC) {
+ if (status & SONIC_INT_CRC)
lp->stats.rx_crc_errors += 65536;
- SONIC_WRITE(SONIC_ISR, SONIC_INT_CRC); /* clear the interrupt */
- }
- if (status & SONIC_INT_MP) {
+ if (status & SONIC_INT_MP)
lp->stats.rx_missed_errors += 65536;
- SONIC_WRITE(SONIC_ISR, SONIC_INT_MP); /* clear the interrupt */
- }
/* transmit error */
if (status & SONIC_INT_TXER) {
- if (SONIC_READ(SONIC_TCR) & SONIC_TCR_FU)
- netif_dbg(lp, tx_err, dev, "%s: tx fifo underrun\n",
- __func__);
- SONIC_WRITE(SONIC_ISR, SONIC_INT_TXER); /* clear the interrupt */
+ u16 tcr = SONIC_READ(SONIC_TCR);
+
+ netif_dbg(lp, tx_err, dev, "%s: TXER intr, TCR %04x\n",
+ __func__, tcr);
+
+ if (tcr & (SONIC_TCR_EXD | SONIC_TCR_EXC |
+ SONIC_TCR_FU | SONIC_TCR_BCM)) {
+ /* Aborted transmission. Try again. */
+ netif_stop_queue(dev);
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
+ }
}
/* bus retry */
@@ -400,107 +436,164 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
/* ... to help debug DMA problems causing endless interrupts. */
/* Bounce the eth interface to turn on the interrupt again. */
SONIC_WRITE(SONIC_IMR, 0);
- SONIC_WRITE(SONIC_ISR, SONIC_INT_BR); /* clear the interrupt */
}
- /* load CAM done */
- if (status & SONIC_INT_LCD)
- SONIC_WRITE(SONIC_ISR, SONIC_INT_LCD); /* clear the interrupt */
- } while((status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT));
+ status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
+ } while (status);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
return IRQ_HANDLED;
}
+/* Return the array index corresponding to a given Receive Buffer pointer. */
+static int index_from_addr(struct sonic_local *lp, dma_addr_t addr,
+ unsigned int last)
+{
+ unsigned int i = last;
+
+ do {
+ i = (i + 1) & SONIC_RRS_MASK;
+ if (addr == lp->rx_laddr[i])
+ return i;
+ } while (i != last);
+
+ return -ENOENT;
+}
+
+/* Allocate and map a new skb to be used as a receive buffer. */
+static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp,
+ struct sk_buff **new_skb, dma_addr_t *new_addr)
+{
+ *new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
+ if (!*new_skb)
+ return false;
+
+ if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
+ skb_reserve(*new_skb, 2);
+
+ *new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE),
+ SONIC_RBSIZE, DMA_FROM_DEVICE);
+ if (!*new_addr) {
+ dev_kfree_skb(*new_skb);
+ *new_skb = NULL;
+ return false;
+ }
+
+ return true;
+}
+
+/* Place a new receive resource in the Receive Resource Area and update RWP. */
+static void sonic_update_rra(struct net_device *dev, struct sonic_local *lp,
+ dma_addr_t old_addr, dma_addr_t new_addr)
+{
+ unsigned int entry = sonic_rr_entry(dev, SONIC_READ(SONIC_RWP));
+ unsigned int end = sonic_rr_entry(dev, SONIC_READ(SONIC_RRP));
+ u32 buf;
+
+ /* The resources in the range [RRP, RWP) belong to the SONIC. This loop
+ * scans the other resources in the RRA, those in the range [RWP, RRP).
+ */
+ do {
+ buf = (sonic_rra_get(dev, entry, SONIC_RR_BUFADR_H) << 16) |
+ sonic_rra_get(dev, entry, SONIC_RR_BUFADR_L);
+
+ if (buf == old_addr)
+ break;
+
+ entry = (entry + 1) & SONIC_RRS_MASK;
+ } while (entry != end);
+
+ WARN_ONCE(buf != old_addr, "failed to find resource!\n");
+
+ sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, new_addr >> 16);
+ sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, new_addr & 0xffff);
+
+ entry = (entry + 1) & SONIC_RRS_MASK;
+
+ SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, entry));
+}
+
/*
* We have a good packet(s), pass it/them up the network stack.
*/
static void sonic_rx(struct net_device *dev)
{
struct sonic_local *lp = netdev_priv(dev);
- int status;
int entry = lp->cur_rx;
+ int prev_entry = lp->eol_rx;
+ bool rbe = false;
while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) {
- struct sk_buff *used_skb;
- struct sk_buff *new_skb;
- dma_addr_t new_laddr;
- u16 bufadr_l;
- u16 bufadr_h;
- int pkt_len;
-
- status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
- if (status & SONIC_RCR_PRX) {
- /* Malloc up new buffer. */
- new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
- if (new_skb == NULL) {
- lp->stats.rx_dropped++;
+ u16 status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
+
+ /* If the RD has LPKT set, the chip has finished with the RB */
+ if ((status & SONIC_RCR_PRX) && (status & SONIC_RCR_LPKT)) {
+ struct sk_buff *new_skb;
+ dma_addr_t new_laddr;
+ u32 addr = (sonic_rda_get(dev, entry,
+ SONIC_RD_PKTPTR_H) << 16) |
+ sonic_rda_get(dev, entry, SONIC_RD_PKTPTR_L);
+ int i = index_from_addr(lp, addr, entry);
+
+ if (i < 0) {
+ WARN_ONCE(1, "failed to find buffer!\n");
break;
}
- /* provide 16 byte IP header alignment unless DMA requires otherwise */
- if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
- skb_reserve(new_skb, 2);
-
- new_laddr = dma_map_single(lp->device, skb_put(new_skb, SONIC_RBSIZE),
- SONIC_RBSIZE, DMA_FROM_DEVICE);
- if (!new_laddr) {
- dev_kfree_skb(new_skb);
- printk(KERN_ERR "%s: Failed to map rx buffer, dropping packet.\n", dev->name);
+
+ if (sonic_alloc_rb(dev, lp, &new_skb, &new_laddr)) {
+ struct sk_buff *used_skb = lp->rx_skb[i];
+ int pkt_len;
+
+ /* Pass the used buffer up the stack */
+ dma_unmap_single(lp->device, addr, SONIC_RBSIZE,
+ DMA_FROM_DEVICE);
+
+ pkt_len = sonic_rda_get(dev, entry,
+ SONIC_RD_PKTLEN);
+ skb_trim(used_skb, pkt_len);
+ used_skb->protocol = eth_type_trans(used_skb,
+ dev);
+ netif_rx(used_skb);
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pkt_len;
+
+ lp->rx_skb[i] = new_skb;
+ lp->rx_laddr[i] = new_laddr;
+ } else {
+ /* Failed to obtain a new buffer so re-use it */
+ new_laddr = addr;
lp->stats.rx_dropped++;
- break;
}
-
- /* now we have a new skb to replace it, pass the used one up the stack */
- dma_unmap_single(lp->device, lp->rx_laddr[entry], SONIC_RBSIZE, DMA_FROM_DEVICE);
- used_skb = lp->rx_skb[entry];
- pkt_len = sonic_rda_get(dev, entry, SONIC_RD_PKTLEN);
- skb_trim(used_skb, pkt_len);
- used_skb->protocol = eth_type_trans(used_skb, dev);
- netif_rx(used_skb);
- lp->stats.rx_packets++;
- lp->stats.rx_bytes += pkt_len;
-
- /* and insert the new skb */
- lp->rx_laddr[entry] = new_laddr;
- lp->rx_skb[entry] = new_skb;
-
- bufadr_l = (unsigned long)new_laddr & 0xffff;
- bufadr_h = (unsigned long)new_laddr >> 16;
- sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, bufadr_l);
- sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, bufadr_h);
- } else {
- /* This should only happen, if we enable accepting broken packets. */
- lp->stats.rx_errors++;
- if (status & SONIC_RCR_FAER)
- lp->stats.rx_frame_errors++;
- if (status & SONIC_RCR_CRCR)
- lp->stats.rx_crc_errors++;
- }
- if (status & SONIC_RCR_LPKT) {
- /*
- * this was the last packet out of the current receive buffer
- * give the buffer back to the SONIC
+ /* If RBE is already asserted when RWP advances then
+ * it's safe to clear RBE after processing this packet.
*/
- lp->cur_rwp += SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode);
- if (lp->cur_rwp >= lp->rra_end) lp->cur_rwp = lp->rra_laddr & 0xffff;
- SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
- if (SONIC_READ(SONIC_ISR) & SONIC_INT_RBE) {
- netif_dbg(lp, rx_err, dev, "%s: rx buffer exhausted\n",
- __func__);
- SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* clear the flag */
- }
- } else
- printk(KERN_ERR "%s: rx desc without RCR_LPKT. Shouldn't happen !?\n",
- dev->name);
+ rbe = rbe || SONIC_READ(SONIC_ISR) & SONIC_INT_RBE;
+ sonic_update_rra(dev, lp, addr, new_laddr);
+ }
/*
* give back the descriptor
*/
- sonic_rda_put(dev, entry, SONIC_RD_LINK,
- sonic_rda_get(dev, entry, SONIC_RD_LINK) | SONIC_EOL);
+ sonic_rda_put(dev, entry, SONIC_RD_STATUS, 0);
sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1);
- sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK,
- sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK) & ~SONIC_EOL);
- lp->eol_rx = entry;
- lp->cur_rx = entry = (entry + 1) & SONIC_RDS_MASK;
+
+ prev_entry = entry;
+ entry = (entry + 1) & SONIC_RDS_MASK;
+ }
+
+ lp->cur_rx = entry;
+
+ if (prev_entry != lp->eol_rx) {
+ /* Advance the EOL flag to put descriptors back into service */
+ sonic_rda_put(dev, prev_entry, SONIC_RD_LINK, SONIC_EOL |
+ sonic_rda_get(dev, prev_entry, SONIC_RD_LINK));
+ sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, ~SONIC_EOL &
+ sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK));
+ lp->eol_rx = prev_entry;
}
+
+ if (rbe)
+ SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE);
/*
* If any worth-while packets have been received, netif_rx()
* has done a mark_bh(NET_BH) for us and will work on them
@@ -550,6 +643,8 @@ static void sonic_multicast_list(struct net_device *dev)
(netdev_mc_count(dev) > 15)) {
rcr |= SONIC_RCR_AMC;
} else {
+ unsigned long flags;
+
netif_dbg(lp, ifup, dev, "%s: mc_count %d\n", __func__,
netdev_mc_count(dev));
sonic_set_cam_enable(dev, 1); /* always enable our own address */
@@ -563,9 +658,14 @@ static void sonic_multicast_list(struct net_device *dev)
i++;
}
SONIC_WRITE(SONIC_CDC, 16);
- /* issue Load CAM command */
SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
+
+ /* LCAM and TXP commands can't be used simultaneously */
+ spin_lock_irqsave(&lp->lock, flags);
+ sonic_quiesce(dev, SONIC_CR_TXP);
SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
+ sonic_quiesce(dev, SONIC_CR_LCAM);
+ spin_unlock_irqrestore(&lp->lock, flags);
}
}
@@ -580,7 +680,6 @@ static void sonic_multicast_list(struct net_device *dev)
*/
static int sonic_init(struct net_device *dev)
{
- unsigned int cmd;
struct sonic_local *lp = netdev_priv(dev);
int i;
@@ -592,12 +691,16 @@ static int sonic_init(struct net_device *dev)
SONIC_WRITE(SONIC_ISR, 0x7fff);
SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
+ /* While in reset mode, clear CAM Enable register */
+ SONIC_WRITE(SONIC_CE, 0);
+
/*
* clear software reset flag, disable receiver, clear and
* enable interrupts, then completely initialize the SONIC
*/
SONIC_WRITE(SONIC_CMD, 0);
- SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS | SONIC_CR_STP);
+ sonic_quiesce(dev, SONIC_CR_ALL);
/*
* initialize the receive resource area
@@ -615,15 +718,10 @@ static int sonic_init(struct net_device *dev)
}
/* initialize all RRA registers */
- lp->rra_end = (lp->rra_laddr + SONIC_NUM_RRS * SIZEOF_SONIC_RR *
- SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
- lp->cur_rwp = (lp->rra_laddr + (SONIC_NUM_RRS - 1) * SIZEOF_SONIC_RR *
- SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
-
- SONIC_WRITE(SONIC_RSA, lp->rra_laddr & 0xffff);
- SONIC_WRITE(SONIC_REA, lp->rra_end);
- SONIC_WRITE(SONIC_RRP, lp->rra_laddr & 0xffff);
- SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
+ SONIC_WRITE(SONIC_RSA, sonic_rr_addr(dev, 0));
+ SONIC_WRITE(SONIC_REA, sonic_rr_addr(dev, SONIC_NUM_RRS));
+ SONIC_WRITE(SONIC_RRP, sonic_rr_addr(dev, 0));
+ SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, SONIC_NUM_RRS - 1));
SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16);
SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1));
@@ -631,14 +729,7 @@ static int sonic_init(struct net_device *dev)
netif_dbg(lp, ifup, dev, "%s: issuing RRRA command\n", __func__);
SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA);
- i = 0;
- while (i++ < 100) {
- if (SONIC_READ(SONIC_CMD) & SONIC_CR_RRRA)
- break;
- }
-
- netif_dbg(lp, ifup, dev, "%s: status=%x, i=%d\n", __func__,
- SONIC_READ(SONIC_CMD), i);
+ sonic_quiesce(dev, SONIC_CR_RRRA);
/*
* Initialize the receive descriptors so that they
@@ -713,28 +804,17 @@ static int sonic_init(struct net_device *dev)
* load the CAM
*/
SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
-
- i = 0;
- while (i++ < 100) {
- if (SONIC_READ(SONIC_ISR) & SONIC_INT_LCD)
- break;
- }
- netif_dbg(lp, ifup, dev, "%s: CMD=%x, ISR=%x, i=%d\n", __func__,
- SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR), i);
+ sonic_quiesce(dev, SONIC_CR_LCAM);
/*
* enable receiver, disable loopback
* and enable all interrupts
*/
- SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN | SONIC_CR_STP);
SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT);
SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT);
SONIC_WRITE(SONIC_ISR, 0x7fff);
SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT);
-
- cmd = SONIC_READ(SONIC_CMD);
- if ((cmd & SONIC_CR_RXEN) == 0 || (cmd & SONIC_CR_STP) == 0)
- printk(KERN_ERR "sonic_init: failed, status=%x\n", cmd);
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN);
netif_dbg(lp, ifup, dev, "%s: new status=%x\n", __func__,
SONIC_READ(SONIC_CMD));
diff --git a/drivers/net/ethernet/natsemi/sonic.h b/drivers/net/ethernet/natsemi/sonic.h
index 2b27f7049acb..e0e4cba6f6f6 100644
--- a/drivers/net/ethernet/natsemi/sonic.h
+++ b/drivers/net/ethernet/natsemi/sonic.h
@@ -110,6 +110,9 @@
#define SONIC_CR_TXP 0x0002
#define SONIC_CR_HTX 0x0001
+#define SONIC_CR_ALL (SONIC_CR_LCAM | SONIC_CR_RRRA | \
+ SONIC_CR_RXEN | SONIC_CR_TXP)
+
/*
* SONIC data configuration bits
*/
@@ -175,6 +178,7 @@
#define SONIC_TCR_NCRS 0x0100
#define SONIC_TCR_CRLS 0x0080
#define SONIC_TCR_EXC 0x0040
+#define SONIC_TCR_OWC 0x0020
#define SONIC_TCR_PMB 0x0008
#define SONIC_TCR_FU 0x0004
#define SONIC_TCR_BCM 0x0002
@@ -274,8 +278,9 @@
#define SONIC_NUM_RDS SONIC_NUM_RRS /* number of receive descriptors */
#define SONIC_NUM_TDS 16 /* number of transmit descriptors */
-#define SONIC_RDS_MASK (SONIC_NUM_RDS-1)
-#define SONIC_TDS_MASK (SONIC_NUM_TDS-1)
+#define SONIC_RRS_MASK (SONIC_NUM_RRS - 1)
+#define SONIC_RDS_MASK (SONIC_NUM_RDS - 1)
+#define SONIC_TDS_MASK (SONIC_NUM_TDS - 1)
#define SONIC_RBSIZE 1520 /* size of one resource buffer */
@@ -312,8 +317,6 @@ struct sonic_local {
u32 rda_laddr; /* logical DMA address of RDA */
dma_addr_t rx_laddr[SONIC_NUM_RRS]; /* logical DMA addresses of rx skbuffs */
dma_addr_t tx_laddr[SONIC_NUM_TDS]; /* logical DMA addresses of tx skbuffs */
- unsigned int rra_end;
- unsigned int cur_rwp;
unsigned int cur_rx;
unsigned int cur_tx; /* first unacked transmit packet */
unsigned int eol_rx;
@@ -322,6 +325,7 @@ struct sonic_local {
int msg_enable;
struct device *device; /* generic device */
struct net_device_stats stats;
+ spinlock_t lock;
};
#define TX_TIMEOUT (3 * HZ)
@@ -336,7 +340,7 @@ static int sonic_close(struct net_device *dev);
static struct net_device_stats *sonic_get_stats(struct net_device *dev);
static void sonic_multicast_list(struct net_device *dev);
static int sonic_init(struct net_device *dev);
-static void sonic_tx_timeout(struct net_device *dev);
+static void sonic_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void sonic_msg_init(struct net_device *dev);
/* Internal inlines for reading/writing DMA buffers. Note that bus
@@ -344,30 +348,30 @@ static void sonic_msg_init(struct net_device *dev);
as far as we can tell. */
/* OpenBSD calls this "SWO". I'd like to think that sonic_buf_put()
is a much better name. */
-static inline void sonic_buf_put(void* base, int bitmode,
+static inline void sonic_buf_put(u16 *base, int bitmode,
int offset, __u16 val)
{
if (bitmode)
#ifdef __BIG_ENDIAN
- ((__u16 *) base + (offset*2))[1] = val;
+ __raw_writew(val, base + (offset * 2) + 1);
#else
- ((__u16 *) base + (offset*2))[0] = val;
+ __raw_writew(val, base + (offset * 2) + 0);
#endif
else
- ((__u16 *) base)[offset] = val;
+ __raw_writew(val, base + (offset * 1) + 0);
}
-static inline __u16 sonic_buf_get(void* base, int bitmode,
+static inline __u16 sonic_buf_get(u16 *base, int bitmode,
int offset)
{
if (bitmode)
#ifdef __BIG_ENDIAN
- return ((volatile __u16 *) base + (offset*2))[1];
+ return __raw_readw(base + (offset * 2) + 1);
#else
- return ((volatile __u16 *) base + (offset*2))[0];
+ return __raw_readw(base + (offset * 2) + 0);
#endif
else
- return ((volatile __u16 *) base)[offset];
+ return __raw_readw(base + (offset * 1) + 0);
}
/* Inlines that you should actually use for reading/writing DMA buffers */
@@ -447,6 +451,22 @@ static inline __u16 sonic_rra_get(struct net_device* dev, int entry,
(entry * SIZEOF_SONIC_RR) + offset);
}
+static inline u16 sonic_rr_addr(struct net_device *dev, int entry)
+{
+ struct sonic_local *lp = netdev_priv(dev);
+
+ return lp->rra_laddr +
+ entry * SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode);
+}
+
+static inline u16 sonic_rr_entry(struct net_device *dev, u16 addr)
+{
+ struct sonic_local *lp = netdev_priv(dev);
+
+ return (addr - (u16)lp->rra_laddr) / (SIZEOF_SONIC_RR *
+ SONIC_BUS_SCALE(lp->dma_bitmode));
+}
+
static const char version[] =
"sonic.c:v0.92 20.9.98 tsbogend@alpha.franken.de\n";
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index e0b2bf327905..0ec6b8e8b549 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -7238,7 +7238,7 @@ out_unlock:
* void
*/
-static void s2io_tx_watchdog(struct net_device *dev)
+static void s2io_tx_watchdog(struct net_device *dev, unsigned int txqueue)
{
struct s2io_nic *sp = netdev_priv(dev);
struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
diff --git a/drivers/net/ethernet/neterion/s2io.h b/drivers/net/ethernet/neterion/s2io.h
index 0a921f30f98f..6fa3159a977f 100644
--- a/drivers/net/ethernet/neterion/s2io.h
+++ b/drivers/net/ethernet/neterion/s2io.h
@@ -1065,7 +1065,7 @@ static void s2io_txpic_intr_handle(struct s2io_nic *sp);
static void tx_intr_handler(struct fifo_info *fifo_data);
static void s2io_handle_errors(void * dev_id);
-static void s2io_tx_watchdog(struct net_device *dev);
+static void s2io_tx_watchdog(struct net_device *dev, unsigned int txqueue);
static void s2io_set_multicast(struct net_device *dev);
static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp);
static void s2io_link(struct s2io_nic * sp, int link);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 1d334f2e0a56..9b63574b6202 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -3273,7 +3273,7 @@ static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
* This function is triggered if the Tx Queue is stopped
* for a pre-defined amount of time when the Interface is still up.
*/
-static void vxge_tx_watchdog(struct net_device *dev)
+static void vxge_tx_watchdog(struct net_device *dev, unsigned int txqueue)
{
struct vxgedev *vdev;
diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig
index bac5be4d4f43..a3f68a718813 100644
--- a/drivers/net/ethernet/netronome/Kconfig
+++ b/drivers/net/ethernet/netronome/Kconfig
@@ -31,6 +31,7 @@ config NFP_APP_FLOWER
bool "NFP4000/NFP6000 TC Flower offload support"
depends on NFP
depends on NET_SWITCHDEV
+ depends on IPV6!=m || NFP=m
default y
---help---
Enable driver support for TC Flower offload on NFP4000 and NFP6000.
diff --git a/drivers/net/ethernet/netronome/nfp/abm/cls.c b/drivers/net/ethernet/netronome/nfp/abm/cls.c
index 9f8a1f69c0c4..23ebddfb9532 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/cls.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/cls.c
@@ -176,10 +176,8 @@ nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
u8 mask, val;
int err;
- if (!nfp_abm_u32_check_knode(alink->abm, knode, proto, extack)) {
- err = -EOPNOTSUPP;
+ if (!nfp_abm_u32_check_knode(alink->abm, knode, proto, extack))
goto err_delete;
- }
tos_off = proto == htons(ETH_P_IP) ? 16 : 20;
@@ -200,18 +198,14 @@ nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
if ((iter->val & cmask) == (val & cmask) &&
iter->band != knode->res->classid) {
NL_SET_ERR_MSG_MOD(extack, "conflict with already offloaded filter");
- err = -EOPNOTSUPP;
goto err_delete;
}
}
if (!match) {
match = kzalloc(sizeof(*match), GFP_KERNEL);
- if (!match) {
- err = -ENOMEM;
- goto err_delete;
- }
-
+ if (!match)
+ return -ENOMEM;
list_add(&match->list, &alink->dscp_map);
}
match->handle = knode->handle;
@@ -227,7 +221,7 @@ nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
err_delete:
nfp_abm_u32_knode_delete(alink, knode);
- return err;
+ return -EOPNOTSUPP;
}
static int nfp_abm_setup_tc_block_cb(enum tc_setup_type type,
diff --git a/drivers/net/ethernet/netronome/nfp/ccm.h b/drivers/net/ethernet/netronome/nfp/ccm.h
index a460c75522be..d81d450be50e 100644
--- a/drivers/net/ethernet/netronome/nfp/ccm.h
+++ b/drivers/net/ethernet/netronome/nfp/ccm.h
@@ -26,6 +26,7 @@ enum nfp_ccm_type {
NFP_CCM_TYPE_CRYPTO_ADD = 10,
NFP_CCM_TYPE_CRYPTO_DEL = 11,
NFP_CCM_TYPE_CRYPTO_UPDATE = 12,
+ NFP_CCM_TYPE_CRYPTO_RESYNC = 13,
__NFP_CCM_TYPE_MAX,
};
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/crypto.h b/drivers/net/ethernet/netronome/nfp/crypto/crypto.h
index 60372ddf69f0..bffe58bb2f27 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/crypto.h
+++ b/drivers/net/ethernet/netronome/nfp/crypto/crypto.h
@@ -4,6 +4,10 @@
#ifndef NFP_CRYPTO_H
#define NFP_CRYPTO_H 1
+struct net_device;
+struct nfp_net;
+struct nfp_net_tls_resync_req;
+
struct nfp_net_tls_offload_ctx {
__be32 fw_handle[2];
@@ -17,11 +21,22 @@ struct nfp_net_tls_offload_ctx {
#ifdef CONFIG_TLS_DEVICE
int nfp_net_tls_init(struct nfp_net *nn);
+int nfp_net_tls_rx_resync_req(struct net_device *netdev,
+ struct nfp_net_tls_resync_req *req,
+ void *pkt, unsigned int pkt_len);
#else
static inline int nfp_net_tls_init(struct nfp_net *nn)
{
return 0;
}
+
+static inline int
+nfp_net_tls_rx_resync_req(struct net_device *netdev,
+ struct nfp_net_tls_resync_req *req,
+ void *pkt, unsigned int pkt_len)
+{
+ return -EOPNOTSUPP;
+}
#endif
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/fw.h b/drivers/net/ethernet/netronome/nfp/crypto/fw.h
index 67413d946c4a..8d1458896bcb 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/fw.h
+++ b/drivers/net/ethernet/netronome/nfp/crypto/fw.h
@@ -9,6 +9,14 @@
#define NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC 0
#define NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC 1
+struct nfp_net_tls_resync_req {
+ __be32 fw_handle[2];
+ __be32 tcp_seq;
+ u8 l3_offset;
+ u8 l4_offset;
+ u8 resv[2];
+};
+
struct nfp_crypto_reply_simple {
struct nfp_ccm_hdr hdr;
__be32 error;
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/tls.c b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
index 96a96b35c0ca..7c50e3dfb9d5 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/tls.c
+++ b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
@@ -5,6 +5,7 @@
#include <linux/ipv6.h>
#include <linux/skbuff.h>
#include <linux/string.h>
+#include <net/inet6_hashtables.h>
#include <net/tls.h>
#include "../ccm.h"
@@ -391,8 +392,9 @@ nfp_net_tls_add(struct net_device *netdev, struct sock *sk,
if (direction == TLS_OFFLOAD_CTX_DIR_TX)
return 0;
- tls_offload_rx_resync_set_type(sk,
- TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT);
+ if (!nn->tlv_caps.tls_resync_ss)
+ tls_offload_rx_resync_set_type(sk, TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT);
+
return 0;
err_fw_remove:
@@ -424,6 +426,7 @@ nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
struct nfp_net *nn = netdev_priv(netdev);
struct nfp_net_tls_offload_ctx *ntls;
struct nfp_crypto_req_update *req;
+ enum nfp_ccm_type type;
struct sk_buff *skb;
gfp_t flags;
int err;
@@ -442,15 +445,18 @@ nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
req->tcp_seq = cpu_to_be32(seq);
memcpy(req->rec_no, rcd_sn, sizeof(req->rec_no));
+ type = NFP_CCM_TYPE_CRYPTO_UPDATE;
if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
- err = nfp_net_tls_communicate_simple(nn, skb, "sync",
- NFP_CCM_TYPE_CRYPTO_UPDATE);
+ err = nfp_net_tls_communicate_simple(nn, skb, "sync", type);
if (err)
return err;
ntls->next_seq = seq;
} else {
- nfp_ccm_mbox_post(nn, skb, NFP_CCM_TYPE_CRYPTO_UPDATE,
+ if (nn->tlv_caps.tls_resync_ss)
+ type = NFP_CCM_TYPE_CRYPTO_RESYNC;
+ nfp_ccm_mbox_post(nn, skb, type,
sizeof(struct nfp_crypto_reply_simple));
+ atomic_inc(&nn->ktls_rx_resync_sent);
}
return 0;
@@ -462,6 +468,79 @@ static const struct tlsdev_ops nfp_net_tls_ops = {
.tls_dev_resync = nfp_net_tls_resync,
};
+int nfp_net_tls_rx_resync_req(struct net_device *netdev,
+ struct nfp_net_tls_resync_req *req,
+ void *pkt, unsigned int pkt_len)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_net_tls_offload_ctx *ntls;
+ struct ipv6hdr *ipv6h;
+ struct tcphdr *th;
+ struct iphdr *iph;
+ struct sock *sk;
+ __be32 tcp_seq;
+ int err;
+
+ iph = pkt + req->l3_offset;
+ ipv6h = pkt + req->l3_offset;
+ th = pkt + req->l4_offset;
+
+ if ((u8 *)&th[1] > (u8 *)pkt + pkt_len) {
+ netdev_warn_once(netdev, "invalid TLS RX resync request (l3_off: %hhu l4_off: %hhu pkt_len: %u)\n",
+ req->l3_offset, req->l4_offset, pkt_len);
+ err = -EINVAL;
+ goto err_cnt_ign;
+ }
+
+ switch (iph->version) {
+ case 4:
+ sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo,
+ iph->saddr, th->source, iph->daddr,
+ th->dest, netdev->ifindex);
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case 6:
+ sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo,
+ &ipv6h->saddr, th->source,
+ &ipv6h->daddr, ntohs(th->dest),
+ netdev->ifindex, 0);
+ break;
+#endif
+ default:
+ netdev_warn_once(netdev, "invalid TLS RX resync request (l3_off: %hhu l4_off: %hhu ipver: %u)\n",
+ req->l3_offset, req->l4_offset, iph->version);
+ err = -EINVAL;
+ goto err_cnt_ign;
+ }
+
+ err = 0;
+ if (!sk)
+ goto err_cnt_ign;
+ if (!tls_is_sk_rx_device_offloaded(sk) ||
+ sk->sk_shutdown & RCV_SHUTDOWN)
+ goto err_put_sock;
+
+ ntls = tls_driver_ctx(sk, TLS_OFFLOAD_CTX_DIR_RX);
+ /* some FW versions can't report the handle and report 0s */
+ if (memchr_inv(&req->fw_handle, 0, sizeof(req->fw_handle)) &&
+ memcmp(&req->fw_handle, &ntls->fw_handle, sizeof(ntls->fw_handle)))
+ goto err_put_sock;
+
+ /* copy to ensure alignment */
+ memcpy(&tcp_seq, &req->tcp_seq, sizeof(tcp_seq));
+ tls_offload_rx_resync_request(sk, tcp_seq);
+ atomic_inc(&nn->ktls_rx_resync_req);
+
+ sock_gen_put(sk);
+ return 0;
+
+err_put_sock:
+ sock_gen_put(sk);
+err_cnt_ign:
+ atomic_inc(&nn->ktls_rx_resync_ign);
+ return err;
+}
+
static int nfp_net_tls_reset(struct nfp_net *nn)
{
struct nfp_crypto_req_reset *req;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 1b019fdfcd97..c06600fb47ff 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -22,8 +22,9 @@
#define NFP_FL_TUNNEL_CSUM cpu_to_be16(0x01)
#define NFP_FL_TUNNEL_KEY cpu_to_be16(0x04)
#define NFP_FL_TUNNEL_GENEVE_OPT cpu_to_be16(0x0800)
-#define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS IP_TUNNEL_INFO_TX
-#define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \
+#define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS (IP_TUNNEL_INFO_TX | \
+ IP_TUNNEL_INFO_IPV6)
+#define NFP_FL_SUPPORTED_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \
NFP_FL_TUNNEL_KEY | \
NFP_FL_TUNNEL_GENEVE_OPT)
@@ -394,19 +395,26 @@ nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
}
static int
-nfp_fl_set_ipv4_tun(struct nfp_app *app, struct nfp_fl_set_ipv4_tun *set_tun,
- const struct flow_action_entry *act,
- struct nfp_fl_pre_tunnel *pre_tun,
- enum nfp_flower_tun_type tun_type,
- struct net_device *netdev, struct netlink_ext_ack *extack)
+nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
+ const struct flow_action_entry *act,
+ struct nfp_fl_pre_tunnel *pre_tun,
+ enum nfp_flower_tun_type tun_type,
+ struct net_device *netdev, struct netlink_ext_ack *extack)
{
- size_t act_size = sizeof(struct nfp_fl_set_ipv4_tun);
const struct ip_tunnel_info *ip_tun = act->tunnel;
+ bool ipv6 = ip_tunnel_info_af(ip_tun) == AF_INET6;
+ size_t act_size = sizeof(struct nfp_fl_set_tun);
struct nfp_flower_priv *priv = app->priv;
u32 tmp_set_ip_tun_type_index = 0;
/* Currently support one pre-tunnel so index is always 0. */
int pretun_idx = 0;
+ if (!IS_ENABLED(CONFIG_IPV6) && ipv6)
+ return -EOPNOTSUPP;
+
+ if (ipv6 && !(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN))
+ return -EOPNOTSUPP;
+
BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM ||
NFP_FL_TUNNEL_KEY != TUNNEL_KEY ||
NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT);
@@ -417,19 +425,35 @@ nfp_fl_set_ipv4_tun(struct nfp_app *app, struct nfp_fl_set_ipv4_tun *set_tun,
return -EOPNOTSUPP;
}
- set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
+ set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_TUNNEL;
set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
/* Set tunnel type and pre-tunnel index. */
tmp_set_ip_tun_type_index |=
- FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, tun_type) |
- FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx);
+ FIELD_PREP(NFP_FL_TUNNEL_TYPE, tun_type) |
+ FIELD_PREP(NFP_FL_PRE_TUN_INDEX, pretun_idx);
set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
set_tun->tun_id = ip_tun->key.tun_id;
if (ip_tun->key.ttl) {
set_tun->ttl = ip_tun->key.ttl;
+#ifdef CONFIG_IPV6
+ } else if (ipv6) {
+ struct net *net = dev_net(netdev);
+ struct flowi6 flow = {};
+ struct dst_entry *dst;
+
+ flow.daddr = ip_tun->key.u.ipv6.dst;
+ flow.flowi4_proto = IPPROTO_UDP;
+ dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &flow, NULL);
+ if (!IS_ERR(dst)) {
+ set_tun->ttl = ip6_dst_hoplimit(dst);
+ dst_release(dst);
+ } else {
+ set_tun->ttl = net->ipv6.devconf_all->hop_limit;
+ }
+#endif
} else {
struct net *net = dev_net(netdev);
struct flowi4 flow = {};
@@ -455,7 +479,7 @@ nfp_fl_set_ipv4_tun(struct nfp_app *app, struct nfp_fl_set_ipv4_tun *set_tun,
set_tun->tos = ip_tun->key.tos;
if (!(ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) ||
- ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS) {
+ ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_UDP_TUN_FLAGS) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support tunnel flag offload");
return -EOPNOTSUPP;
}
@@ -467,7 +491,12 @@ nfp_fl_set_ipv4_tun(struct nfp_app *app, struct nfp_fl_set_ipv4_tun *set_tun,
}
/* Complete pre_tunnel action. */
- pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
+ if (ipv6) {
+ pre_tun->flags |= cpu_to_be16(NFP_FL_PRE_TUN_IPV6);
+ pre_tun->ipv6_dst = ip_tun->key.u.ipv6.dst;
+ } else {
+ pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
+ }
return 0;
}
@@ -956,8 +985,8 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
struct nfp_flower_pedit_acts *set_act, bool *pkt_host,
struct netlink_ext_ack *extack, int act_idx)
{
- struct nfp_fl_set_ipv4_tun *set_tun;
struct nfp_fl_pre_tunnel *pre_tun;
+ struct nfp_fl_set_tun *set_tun;
struct nfp_fl_push_vlan *psh_v;
struct nfp_fl_push_mpls *psh_m;
struct nfp_fl_pop_vlan *pop_v;
@@ -1032,7 +1061,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
* If none, the packet falls back before applying other actions.
*/
if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
- sizeof(struct nfp_fl_set_ipv4_tun) > NFP_FL_MAX_A_SIZ) {
+ sizeof(struct nfp_fl_set_tun) > NFP_FL_MAX_A_SIZ) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at tunnel encap");
return -EOPNOTSUPP;
}
@@ -1046,11 +1075,11 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
return err;
set_tun = (void *)&nfp_fl->action_data[*a_len];
- err = nfp_fl_set_ipv4_tun(app, set_tun, act, pre_tun,
- *tun_type, netdev, extack);
+ err = nfp_fl_set_tun(app, set_tun, act, pre_tun, *tun_type,
+ netdev, extack);
if (err)
return err;
- *a_len += sizeof(struct nfp_fl_set_ipv4_tun);
+ *a_len += sizeof(struct nfp_fl_set_tun);
}
break;
case FLOW_ACTION_TUNNEL_DECAP:
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index 05981b54eaab..a595ddb92bff 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -270,11 +270,17 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
}
goto err_default;
case NFP_FLOWER_CMSG_TYPE_NO_NEIGH:
- nfp_tunnel_request_route(app, skb);
+ nfp_tunnel_request_route_v4(app, skb);
+ break;
+ case NFP_FLOWER_CMSG_TYPE_NO_NEIGH_V6:
+ nfp_tunnel_request_route_v6(app, skb);
break;
case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS:
nfp_tunnel_keep_alive(app, skb);
break;
+ case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS_V6:
+ nfp_tunnel_keep_alive_v6(app, skb);
+ break;
case NFP_FLOWER_CMSG_TYPE_QOS_STATS:
nfp_flower_stats_rlim_reply(app, skb);
break;
@@ -361,7 +367,8 @@ void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
nfp_flower_process_mtu_ack(app, skb)) {
/* Handle MTU acks outside wq to prevent RTNL conflict. */
dev_consume_skb_any(skb);
- } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) {
+ } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH ||
+ cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6) {
/* Acks from the NFP that the route is added - ignore. */
dev_consume_skb_any(skb);
} else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY) {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 7eb2ec8969c3..9b50d76bbc09 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -26,6 +26,7 @@
#define NFP_FLOWER_LAYER2_GRE BIT(0)
#define NFP_FLOWER_LAYER2_GENEVE BIT(5)
#define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6)
+#define NFP_FLOWER_LAYER2_TUN_IPV6 BIT(7)
#define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13)
#define NFP_FLOWER_MASK_VLAN_PRESENT BIT(12)
@@ -63,6 +64,7 @@
#define NFP_FL_MAX_GENEVE_OPT_ACT 32
#define NFP_FL_MAX_GENEVE_OPT_CNT 64
#define NFP_FL_MAX_GENEVE_OPT_KEY 32
+#define NFP_FL_MAX_GENEVE_OPT_KEY_V6 8
/* Action opcodes */
#define NFP_FL_ACTION_OPCODE_OUTPUT 0
@@ -70,7 +72,7 @@
#define NFP_FL_ACTION_OPCODE_POP_VLAN 2
#define NFP_FL_ACTION_OPCODE_PUSH_MPLS 3
#define NFP_FL_ACTION_OPCODE_POP_MPLS 4
-#define NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL 6
+#define NFP_FL_ACTION_OPCODE_SET_TUNNEL 6
#define NFP_FL_ACTION_OPCODE_SET_ETHERNET 7
#define NFP_FL_ACTION_OPCODE_SET_MPLS 8
#define NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS 9
@@ -99,8 +101,8 @@
/* Tunnel ports */
#define NFP_FL_PORT_TYPE_TUN 0x50000000
-#define NFP_FL_IPV4_TUNNEL_TYPE GENMASK(7, 4)
-#define NFP_FL_IPV4_PRE_TUN_INDEX GENMASK(2, 0)
+#define NFP_FL_TUNNEL_TYPE GENMASK(7, 4)
+#define NFP_FL_PRE_TUN_INDEX GENMASK(2, 0)
#define NFP_FLOWER_WORKQ_MAX_SKBS 30000
@@ -206,13 +208,16 @@ struct nfp_fl_pre_lag {
struct nfp_fl_pre_tunnel {
struct nfp_fl_act_head head;
- __be16 reserved;
- __be32 ipv4_dst;
- /* reserved for use with IPv6 addresses */
- __be32 extra[3];
+ __be16 flags;
+ union {
+ __be32 ipv4_dst;
+ struct in6_addr ipv6_dst;
+ };
};
-struct nfp_fl_set_ipv4_tun {
+#define NFP_FL_PRE_TUN_IPV6 BIT(0)
+
+struct nfp_fl_set_tun {
struct nfp_fl_act_head head;
__be16 reserved;
__be64 tun_id __packed;
@@ -387,6 +392,11 @@ struct nfp_flower_tun_ipv4 {
__be32 dst;
};
+struct nfp_flower_tun_ipv6 {
+ struct in6_addr src;
+ struct in6_addr dst;
+};
+
struct nfp_flower_tun_ip_ext {
u8 tos;
u8 ttl;
@@ -416,6 +426,42 @@ struct nfp_flower_ipv4_udp_tun {
__be32 tun_id;
};
+/* Flow Frame IPv6 UDP TUNNEL --> Tunnel details (11W/44B)
+ * -----------------------------------------------------------------
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 31 - 0 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 63 - 32 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 95 - 64 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 127 - 96 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 31 - 0 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 63 - 32 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 95 - 64 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 127 - 96 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Reserved | tos | ttl |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | VNI | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_ipv6_udp_tun {
+ struct nfp_flower_tun_ipv6 ipv6;
+ __be16 reserved1;
+ struct nfp_flower_tun_ip_ext ip_ext;
+ __be32 reserved2;
+ __be32 tun_id;
+};
+
/* Flow Frame GRE TUNNEL --> Tunnel details (6W/24B)
* -----------------------------------------------------------------
* 3 2 1
@@ -445,6 +491,46 @@ struct nfp_flower_ipv4_gre_tun {
__be32 reserved2;
};
+/* Flow Frame GRE TUNNEL V6 --> Tunnel details (12W/48B)
+ * -----------------------------------------------------------------
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 31 - 0 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 63 - 32 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 95 - 64 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_src, 127 - 96 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 31 - 0 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 63 - 32 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 95 - 64 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv6_addr_dst, 127 - 96 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | tun_flags | tos | ttl |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Reserved | Ethertype |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Key |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_ipv6_gre_tun {
+ struct nfp_flower_tun_ipv6 ipv6;
+ __be16 tun_flags;
+ struct nfp_flower_tun_ip_ext ip_ext;
+ __be16 reserved1;
+ __be16 ethertype;
+ __be32 tun_key;
+ __be32 reserved2;
+};
+
struct nfp_flower_geneve_options {
u8 data[NFP_FL_MAX_GENEVE_OPT_KEY];
};
@@ -485,6 +571,10 @@ enum nfp_flower_cmsg_type_port {
NFP_FLOWER_CMSG_TYPE_QOS_DEL = 19,
NFP_FLOWER_CMSG_TYPE_QOS_STATS = 20,
NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE = 21,
+ NFP_FLOWER_CMSG_TYPE_TUN_IPS_V6 = 22,
+ NFP_FLOWER_CMSG_TYPE_NO_NEIGH_V6 = 23,
+ NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 = 24,
+ NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS_V6 = 25,
NFP_FLOWER_CMSG_TYPE_MAX = 32,
};
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index e0c985fcaec1..d55d0d33bc45 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -43,6 +43,7 @@ struct nfp_app;
#define NFP_FL_FEATS_VF_RLIM BIT(4)
#define NFP_FL_FEATS_FLOW_MOD BIT(5)
#define NFP_FL_FEATS_PRE_TUN_RULES BIT(6)
+#define NFP_FL_FEATS_IPV6_TUN BIT(7)
#define NFP_FL_FEATS_FLOW_MERGE BIT(30)
#define NFP_FL_FEATS_LAG BIT(31)
@@ -62,18 +63,26 @@ struct nfp_fl_stats_id {
* struct nfp_fl_tunnel_offloads - priv data for tunnel offloads
* @offloaded_macs: Hashtable of the offloaded MAC addresses
* @ipv4_off_list: List of IPv4 addresses to offload
- * @neigh_off_list: List of neighbour offloads
+ * @ipv6_off_list: List of IPv6 addresses to offload
+ * @neigh_off_list_v4: List of IPv4 neighbour offloads
+ * @neigh_off_list_v6: List of IPv6 neighbour offloads
* @ipv4_off_lock: Lock for the IPv4 address list
- * @neigh_off_lock: Lock for the neighbour address list
+ * @ipv6_off_lock: Lock for the IPv6 address list
+ * @neigh_off_lock_v4: Lock for the IPv4 neighbour address list
+ * @neigh_off_lock_v6: Lock for the IPv6 neighbour address list
* @mac_off_ids: IDA to manage id assignment for offloaded MACs
* @neigh_nb: Notifier to monitor neighbour state
*/
struct nfp_fl_tunnel_offloads {
struct rhashtable offloaded_macs;
struct list_head ipv4_off_list;
- struct list_head neigh_off_list;
+ struct list_head ipv6_off_list;
+ struct list_head neigh_off_list_v4;
+ struct list_head neigh_off_list_v6;
struct mutex ipv4_off_lock;
- spinlock_t neigh_off_lock;
+ struct mutex ipv6_off_lock;
+ spinlock_t neigh_off_lock_v4;
+ spinlock_t neigh_off_lock_v6;
struct ida mac_off_ids;
struct notifier_block neigh_nb;
};
@@ -273,12 +282,25 @@ struct nfp_fl_stats {
u64 used;
};
+/**
+ * struct nfp_ipv6_addr_entry - cached IPv6 addresses
+ * @ipv6_addr: IP address
+ * @ref_count: number of rules currently using this IP
+ * @list: list pointer
+ */
+struct nfp_ipv6_addr_entry {
+ struct in6_addr ipv6_addr;
+ int ref_count;
+ struct list_head list;
+};
+
struct nfp_fl_payload {
struct nfp_fl_rule_metadata meta;
unsigned long tc_flower_cookie;
struct rhash_head fl_node;
struct rcu_head rcu;
__be32 nfp_tun_ipv4_addr;
+ struct nfp_ipv6_addr_entry *nfp_tun_ipv6;
struct net_device *ingress_dev;
char *unmasked_data;
char *mask_data;
@@ -396,8 +418,14 @@ int nfp_tunnel_mac_event_handler(struct nfp_app *app,
unsigned long event, void *ptr);
void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4);
void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4);
-void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb);
+void
+nfp_tunnel_put_ipv6_off(struct nfp_app *app, struct nfp_ipv6_addr_entry *entry);
+struct nfp_ipv6_addr_entry *
+nfp_tunnel_add_ipv6_off(struct nfp_app *app, struct in6_addr *ipv6);
+void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb);
+void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb);
void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb);
+void nfp_tunnel_keep_alive_v6(struct nfp_app *app, struct sk_buff *skb);
void nfp_flower_lag_init(struct nfp_fl_lag *lag);
void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag);
int nfp_flower_lag_reset(struct nfp_fl_lag *lag);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 9cc3ba17ff69..546bc01d507d 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -10,9 +10,8 @@
static void
nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
struct nfp_flower_meta_tci *msk,
- struct flow_cls_offload *flow, u8 key_type)
+ struct flow_rule *rule, u8 key_type)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
u16 tmp_tci;
memset(ext, 0, sizeof(struct nfp_flower_meta_tci));
@@ -77,11 +76,8 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
static void
nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
- struct nfp_flower_mac_mpls *msk,
- struct flow_cls_offload *flow)
+ struct nfp_flower_mac_mpls *msk, struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
-
memset(ext, 0, sizeof(struct nfp_flower_mac_mpls));
memset(msk, 0, sizeof(struct nfp_flower_mac_mpls));
@@ -130,10 +126,8 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
static void
nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
struct nfp_flower_tp_ports *msk,
- struct flow_cls_offload *flow)
+ struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
-
memset(ext, 0, sizeof(struct nfp_flower_tp_ports));
memset(msk, 0, sizeof(struct nfp_flower_tp_ports));
@@ -150,11 +144,8 @@ nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
static void
nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
- struct nfp_flower_ip_ext *msk,
- struct flow_cls_offload *flow)
+ struct nfp_flower_ip_ext *msk, struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
-
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
@@ -224,10 +215,8 @@ nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
static void
nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
- struct nfp_flower_ipv4 *msk,
- struct flow_cls_offload *flow)
+ struct nfp_flower_ipv4 *msk, struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
struct flow_match_ipv4_addrs match;
memset(ext, 0, sizeof(struct nfp_flower_ipv4));
@@ -241,16 +230,13 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
msk->ipv4_dst = match.mask->dst;
}
- nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
+ nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
}
static void
nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
- struct nfp_flower_ipv6 *msk,
- struct flow_cls_offload *flow)
+ struct nfp_flower_ipv6 *msk, struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
-
memset(ext, 0, sizeof(struct nfp_flower_ipv6));
memset(msk, 0, sizeof(struct nfp_flower_ipv6));
@@ -264,16 +250,15 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
msk->ipv6_dst = match.mask->dst;
}
- nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
+ nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
}
static int
-nfp_flower_compile_geneve_opt(void *ext, void *msk,
- struct flow_cls_offload *flow)
+nfp_flower_compile_geneve_opt(void *ext, void *msk, struct flow_rule *rule)
{
struct flow_match_enc_opts match;
- flow_rule_match_enc_opts(flow->rule, &match);
+ flow_rule_match_enc_opts(rule, &match);
memcpy(ext, match.key->data, match.key->len);
memcpy(msk, match.mask->data, match.mask->len);
@@ -283,10 +268,8 @@ nfp_flower_compile_geneve_opt(void *ext, void *msk,
static void
nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 *ext,
struct nfp_flower_tun_ipv4 *msk,
- struct flow_cls_offload *flow)
+ struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
-
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
struct flow_match_ipv4_addrs match;
@@ -299,12 +282,26 @@ nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 *ext,
}
static void
+nfp_flower_compile_tun_ipv6_addrs(struct nfp_flower_tun_ipv6 *ext,
+ struct nfp_flower_tun_ipv6 *msk,
+ struct flow_rule *rule)
+{
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_enc_ipv6_addrs(rule, &match);
+ ext->src = match.key->src;
+ ext->dst = match.key->dst;
+ msk->src = match.mask->src;
+ msk->dst = match.mask->dst;
+ }
+}
+
+static void
nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext *ext,
struct nfp_flower_tun_ip_ext *msk,
- struct flow_cls_offload *flow)
+ struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
-
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
struct flow_match_ip match;
@@ -317,57 +314,97 @@ nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext *ext,
}
static void
-nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
- struct nfp_flower_ipv4_gre_tun *msk,
- struct flow_cls_offload *flow)
+nfp_flower_compile_tun_udp_key(__be32 *key, __be32 *key_msk,
+ struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
-
- memset(ext, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
- memset(msk, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_match_enc_keyid match;
+ u32 vni;
- /* NVGRE is the only supported GRE tunnel type */
- ext->ethertype = cpu_to_be16(ETH_P_TEB);
- msk->ethertype = cpu_to_be16(~0);
+ flow_rule_match_enc_keyid(rule, &match);
+ vni = be32_to_cpu(match.key->keyid) << NFP_FL_TUN_VNI_OFFSET;
+ *key = cpu_to_be32(vni);
+ vni = be32_to_cpu(match.mask->keyid) << NFP_FL_TUN_VNI_OFFSET;
+ *key_msk = cpu_to_be32(vni);
+ }
+}
+static void
+nfp_flower_compile_tun_gre_key(__be32 *key, __be32 *key_msk, __be16 *flags,
+ __be16 *flags_msk, struct flow_rule *rule)
+{
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
struct flow_match_enc_keyid match;
flow_rule_match_enc_keyid(rule, &match);
- ext->tun_key = match.key->keyid;
- msk->tun_key = match.mask->keyid;
+ *key = match.key->keyid;
+ *key_msk = match.mask->keyid;
- ext->tun_flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
- msk->tun_flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
+ *flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
+ *flags_msk = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
}
+}
+
+static void
+nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
+ struct nfp_flower_ipv4_gre_tun *msk,
+ struct flow_rule *rule)
+{
+ memset(ext, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
+ memset(msk, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
+
+ /* NVGRE is the only supported GRE tunnel type */
+ ext->ethertype = cpu_to_be16(ETH_P_TEB);
+ msk->ethertype = cpu_to_be16(~0);
- nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, flow);
- nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
+ nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, rule);
+ nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
+ nfp_flower_compile_tun_gre_key(&ext->tun_key, &msk->tun_key,
+ &ext->tun_flags, &msk->tun_flags, rule);
}
static void
nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
struct nfp_flower_ipv4_udp_tun *msk,
- struct flow_cls_offload *flow)
+ struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
-
memset(ext, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
memset(msk, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
- struct flow_match_enc_keyid match;
- u32 temp_vni;
+ nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, rule);
+ nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
+ nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule);
+}
- flow_rule_match_enc_keyid(rule, &match);
- temp_vni = be32_to_cpu(match.key->keyid) << NFP_FL_TUN_VNI_OFFSET;
- ext->tun_id = cpu_to_be32(temp_vni);
- temp_vni = be32_to_cpu(match.mask->keyid) << NFP_FL_TUN_VNI_OFFSET;
- msk->tun_id = cpu_to_be32(temp_vni);
- }
+static void
+nfp_flower_compile_ipv6_udp_tun(struct nfp_flower_ipv6_udp_tun *ext,
+ struct nfp_flower_ipv6_udp_tun *msk,
+ struct flow_rule *rule)
+{
+ memset(ext, 0, sizeof(struct nfp_flower_ipv6_udp_tun));
+ memset(msk, 0, sizeof(struct nfp_flower_ipv6_udp_tun));
+
+ nfp_flower_compile_tun_ipv6_addrs(&ext->ipv6, &msk->ipv6, rule);
+ nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
+ nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule);
+}
+
+static void
+nfp_flower_compile_ipv6_gre_tun(struct nfp_flower_ipv6_gre_tun *ext,
+ struct nfp_flower_ipv6_gre_tun *msk,
+ struct flow_rule *rule)
+{
+ memset(ext, 0, sizeof(struct nfp_flower_ipv6_gre_tun));
+ memset(msk, 0, sizeof(struct nfp_flower_ipv6_gre_tun));
+
+ /* NVGRE is the only supported GRE tunnel type */
+ ext->ethertype = cpu_to_be16(ETH_P_TEB);
+ msk->ethertype = cpu_to_be16(~0);
- nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, flow);
- nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
+ nfp_flower_compile_tun_ipv6_addrs(&ext->ipv6, &msk->ipv6, rule);
+ nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
+ nfp_flower_compile_tun_gre_key(&ext->tun_key, &msk->tun_key,
+ &ext->tun_flags, &msk->tun_flags, rule);
}
int nfp_flower_compile_flow_match(struct nfp_app *app,
@@ -378,6 +415,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
enum nfp_flower_tun_type tun_type,
struct netlink_ext_ack *extack)
{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
u32 port_id;
int err;
u8 *ext;
@@ -393,7 +431,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext,
(struct nfp_flower_meta_tci *)msk,
- flow, key_ls->key_layer);
+ rule, key_ls->key_layer);
ext += sizeof(struct nfp_flower_meta_tci);
msk += sizeof(struct nfp_flower_meta_tci);
@@ -425,7 +463,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
(struct nfp_flower_mac_mpls *)msk,
- flow);
+ rule);
ext += sizeof(struct nfp_flower_mac_mpls);
msk += sizeof(struct nfp_flower_mac_mpls);
}
@@ -433,7 +471,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
if (NFP_FLOWER_LAYER_TP & key_ls->key_layer) {
nfp_flower_compile_tport((struct nfp_flower_tp_ports *)ext,
(struct nfp_flower_tp_ports *)msk,
- flow);
+ rule);
ext += sizeof(struct nfp_flower_tp_ports);
msk += sizeof(struct nfp_flower_tp_ports);
}
@@ -441,7 +479,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
if (NFP_FLOWER_LAYER_IPV4 & key_ls->key_layer) {
nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)ext,
(struct nfp_flower_ipv4 *)msk,
- flow);
+ rule);
ext += sizeof(struct nfp_flower_ipv4);
msk += sizeof(struct nfp_flower_ipv4);
}
@@ -449,43 +487,83 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
if (NFP_FLOWER_LAYER_IPV6 & key_ls->key_layer) {
nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)ext,
(struct nfp_flower_ipv6 *)msk,
- flow);
+ rule);
ext += sizeof(struct nfp_flower_ipv6);
msk += sizeof(struct nfp_flower_ipv6);
}
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GRE) {
- __be32 tun_dst;
-
- nfp_flower_compile_ipv4_gre_tun((void *)ext, (void *)msk, flow);
- tun_dst = ((struct nfp_flower_ipv4_gre_tun *)ext)->ipv4.dst;
- ext += sizeof(struct nfp_flower_ipv4_gre_tun);
- msk += sizeof(struct nfp_flower_ipv4_gre_tun);
-
- /* Store the tunnel destination in the rule data.
- * This must be present and be an exact match.
- */
- nfp_flow->nfp_tun_ipv4_addr = tun_dst;
- nfp_tunnel_add_ipv4_off(app, tun_dst);
+ if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
+ struct nfp_flower_ipv6_gre_tun *gre_match;
+ struct nfp_ipv6_addr_entry *entry;
+ struct in6_addr *dst;
+
+ nfp_flower_compile_ipv6_gre_tun((void *)ext,
+ (void *)msk, rule);
+ gre_match = (struct nfp_flower_ipv6_gre_tun *)ext;
+ dst = &gre_match->ipv6.dst;
+ ext += sizeof(struct nfp_flower_ipv6_gre_tun);
+ msk += sizeof(struct nfp_flower_ipv6_gre_tun);
+
+ entry = nfp_tunnel_add_ipv6_off(app, dst);
+ if (!entry)
+ return -EOPNOTSUPP;
+
+ nfp_flow->nfp_tun_ipv6 = entry;
+ } else {
+ __be32 dst;
+
+ nfp_flower_compile_ipv4_gre_tun((void *)ext,
+ (void *)msk, rule);
+ dst = ((struct nfp_flower_ipv4_gre_tun *)ext)->ipv4.dst;
+ ext += sizeof(struct nfp_flower_ipv4_gre_tun);
+ msk += sizeof(struct nfp_flower_ipv4_gre_tun);
+
+ /* Store the tunnel destination in the rule data.
+ * This must be present and be an exact match.
+ */
+ nfp_flow->nfp_tun_ipv4_addr = dst;
+ nfp_tunnel_add_ipv4_off(app, dst);
+ }
}
if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
- __be32 tun_dst;
-
- nfp_flower_compile_ipv4_udp_tun((void *)ext, (void *)msk, flow);
- tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ipv4.dst;
- ext += sizeof(struct nfp_flower_ipv4_udp_tun);
- msk += sizeof(struct nfp_flower_ipv4_udp_tun);
-
- /* Store the tunnel destination in the rule data.
- * This must be present and be an exact match.
- */
- nfp_flow->nfp_tun_ipv4_addr = tun_dst;
- nfp_tunnel_add_ipv4_off(app, tun_dst);
+ if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
+ struct nfp_flower_ipv6_udp_tun *udp_match;
+ struct nfp_ipv6_addr_entry *entry;
+ struct in6_addr *dst;
+
+ nfp_flower_compile_ipv6_udp_tun((void *)ext,
+ (void *)msk, rule);
+ udp_match = (struct nfp_flower_ipv6_udp_tun *)ext;
+ dst = &udp_match->ipv6.dst;
+ ext += sizeof(struct nfp_flower_ipv6_udp_tun);
+ msk += sizeof(struct nfp_flower_ipv6_udp_tun);
+
+ entry = nfp_tunnel_add_ipv6_off(app, dst);
+ if (!entry)
+ return -EOPNOTSUPP;
+
+ nfp_flow->nfp_tun_ipv6 = entry;
+ } else {
+ __be32 dst;
+
+ nfp_flower_compile_ipv4_udp_tun((void *)ext,
+ (void *)msk, rule);
+ dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ipv4.dst;
+ ext += sizeof(struct nfp_flower_ipv4_udp_tun);
+ msk += sizeof(struct nfp_flower_ipv4_udp_tun);
+
+ /* Store the tunnel destination in the rule data.
+ * This must be present and be an exact match.
+ */
+ nfp_flow->nfp_tun_ipv4_addr = dst;
+ nfp_tunnel_add_ipv4_off(app, dst);
+ }
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
- err = nfp_flower_compile_geneve_opt(ext, msk, flow);
+ err = nfp_flower_compile_geneve_opt(ext, msk, rule);
if (err)
return err;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 7c4a15e967df..5defd31d481c 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -65,17 +65,17 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
freed_stats_id = priv->stats_ring_size;
/* Check for unallocated entries first. */
if (priv->stats_ids.init_unalloc > 0) {
- if (priv->active_mem_unit == priv->total_mem_units) {
- priv->stats_ids.init_unalloc--;
- priv->active_mem_unit = 0;
- }
-
*stats_context_id =
FIELD_PREP(NFP_FL_STAT_ID_STAT,
priv->stats_ids.init_unalloc - 1) |
FIELD_PREP(NFP_FL_STAT_ID_MU_NUM,
priv->active_mem_unit);
- priv->active_mem_unit++;
+
+ if (++priv->active_mem_unit == priv->total_mem_units) {
+ priv->stats_ids.init_unalloc--;
+ priv->active_mem_unit = 0;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 987ae221f6be..7ca5c1becfcf 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -54,6 +54,10 @@
(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS))
+#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R \
+ (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS))
+
#define NFP_FLOWER_MERGE_FIELDS \
(NFP_FLOWER_LAYER_PORT | \
NFP_FLOWER_LAYER_MAC | \
@@ -64,7 +68,8 @@
#define NFP_FLOWER_PRE_TUN_RULE_FIELDS \
(NFP_FLOWER_LAYER_PORT | \
NFP_FLOWER_LAYER_MAC | \
- NFP_FLOWER_LAYER_IPV4)
+ NFP_FLOWER_LAYER_IPV4 | \
+ NFP_FLOWER_LAYER_IPV6)
struct nfp_flower_merge_check {
union {
@@ -146,10 +151,11 @@ static bool nfp_flower_check_higher_than_l3(struct flow_cls_offload *f)
static int
nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
- u32 *key_layer_two, int *key_size,
+ u32 *key_layer_two, int *key_size, bool ipv6,
struct netlink_ext_ack *extack)
{
- if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY) {
+ if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY ||
+ (ipv6 && enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY_V6)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: geneve options exceed maximum length");
return -EOPNOTSUPP;
}
@@ -167,7 +173,7 @@ nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
struct flow_dissector_key_enc_opts *enc_op,
u32 *key_layer_two, u8 *key_layer, int *key_size,
struct nfp_flower_priv *priv,
- enum nfp_flower_tun_type *tun_type,
+ enum nfp_flower_tun_type *tun_type, bool ipv6,
struct netlink_ext_ack *extack)
{
int err;
@@ -176,7 +182,15 @@ nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
case htons(IANA_VXLAN_UDP_PORT):
*tun_type = NFP_FL_TUNNEL_VXLAN;
*key_layer |= NFP_FLOWER_LAYER_VXLAN;
- *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+
+ if (ipv6) {
+ *key_layer |= NFP_FLOWER_LAYER_EXT_META;
+ *key_size += sizeof(struct nfp_flower_ext_meta);
+ *key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
+ *key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
+ } else {
+ *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+ }
if (enc_op) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on vxlan tunnels");
@@ -192,7 +206,13 @@ nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
*key_layer |= NFP_FLOWER_LAYER_EXT_META;
*key_size += sizeof(struct nfp_flower_ext_meta);
*key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
- *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+
+ if (ipv6) {
+ *key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
+ *key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
+ } else {
+ *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+ }
if (!enc_op)
break;
@@ -200,8 +220,8 @@ nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve option offload");
return -EOPNOTSUPP;
}
- err = nfp_flower_calc_opt_layer(enc_op, key_layer_two,
- key_size, extack);
+ err = nfp_flower_calc_opt_layer(enc_op, key_layer_two, key_size,
+ ipv6, extack);
if (err)
return err;
break;
@@ -237,6 +257,8 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
/* If any tun dissector is used then the required set must be used. */
if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
+ (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R)
+ != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R &&
(dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
!= NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel match not supported");
@@ -268,8 +290,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
struct flow_match_enc_opts enc_op = { NULL, NULL };
struct flow_match_ipv4_addrs ipv4_addrs;
+ struct flow_match_ipv6_addrs ipv6_addrs;
struct flow_match_control enc_ctl;
struct flow_match_ports enc_ports;
+ bool ipv6_tun = false;
flow_rule_match_enc_control(rule, &enc_ctl);
@@ -277,38 +301,62 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported");
return -EOPNOTSUPP;
}
- if (enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
- NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only IPv4 tunnels are supported");
+
+ ipv6_tun = enc_ctl.key->addr_type ==
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ if (ipv6_tun &&
+ !(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: firmware does not support IPv6 tunnels");
return -EOPNOTSUPP;
}
- /* These fields are already verified as used. */
- flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs);
- if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) {
- NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported");
+ if (!ipv6_tun &&
+ enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel address type not IPv4 or IPv6");
return -EOPNOTSUPP;
}
+ if (ipv6_tun) {
+ flow_rule_match_enc_ipv6_addrs(rule, &ipv6_addrs);
+ if (memchr_inv(&ipv6_addrs.mask->dst, 0xff,
+ sizeof(ipv6_addrs.mask->dst))) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv6 destination address is supported");
+ return -EOPNOTSUPP;
+ }
+ } else {
+ flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs);
+ if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported");
+ return -EOPNOTSUPP;
+ }
+ }
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS))
flow_rule_match_enc_opts(rule, &enc_op);
-
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
/* check if GRE, which has no enc_ports */
- if (netif_is_gretap(netdev)) {
- *tun_type = NFP_FL_TUNNEL_GRE;
- key_layer |= NFP_FLOWER_LAYER_EXT_META;
- key_size += sizeof(struct nfp_flower_ext_meta);
- key_layer_two |= NFP_FLOWER_LAYER2_GRE;
- key_size +=
- sizeof(struct nfp_flower_ipv4_gre_tun);
+ if (!netif_is_gretap(netdev)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels");
+ return -EOPNOTSUPP;
+ }
- if (enc_op.key) {
- NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on GRE tunnels");
- return -EOPNOTSUPP;
- }
+ *tun_type = NFP_FL_TUNNEL_GRE;
+ key_layer |= NFP_FLOWER_LAYER_EXT_META;
+ key_size += sizeof(struct nfp_flower_ext_meta);
+ key_layer_two |= NFP_FLOWER_LAYER2_GRE;
+
+ if (ipv6_tun) {
+ key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
+ key_size +=
+ sizeof(struct nfp_flower_ipv6_udp_tun);
} else {
- NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels");
+ key_size +=
+ sizeof(struct nfp_flower_ipv4_udp_tun);
+ }
+
+ if (enc_op.key) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on GRE tunnels");
return -EOPNOTSUPP;
}
} else {
@@ -323,7 +371,8 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
&key_layer_two,
&key_layer,
&key_size, priv,
- tun_type, extack);
+ tun_type, ipv6_tun,
+ extack);
if (err)
return err;
@@ -491,6 +540,7 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
goto err_free_mask;
flow_pay->nfp_tun_ipv4_addr = 0;
+ flow_pay->nfp_tun_ipv6 = NULL;
flow_pay->meta.flags = 0;
INIT_LIST_HEAD(&flow_pay->linked_flows);
flow_pay->in_hw = false;
@@ -517,10 +567,12 @@ nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
struct nfp_fl_set_ip4_addrs *ipv4_add;
struct nfp_fl_set_ipv6_addr *ipv6_add;
struct nfp_fl_push_vlan *push_vlan;
+ struct nfp_fl_pre_tunnel *pre_tun;
struct nfp_fl_set_tport *tport;
struct nfp_fl_set_eth *eth;
struct nfp_fl_act_head *a;
unsigned int act_off = 0;
+ bool ipv6_tun = false;
u8 act_id = 0;
u8 *ports;
int i;
@@ -542,14 +594,18 @@ nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
case NFP_FL_ACTION_OPCODE_POP_VLAN:
merge->tci = cpu_to_be16(0);
break;
- case NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL:
+ case NFP_FL_ACTION_OPCODE_SET_TUNNEL:
/* New tunnel header means l2 to l4 can be matched. */
eth_broadcast_addr(&merge->l2.mac_dst[0]);
eth_broadcast_addr(&merge->l2.mac_src[0]);
memset(&merge->l4, 0xff,
sizeof(struct nfp_flower_tp_ports));
- memset(&merge->ipv4, 0xff,
- sizeof(struct nfp_flower_ipv4));
+ if (ipv6_tun)
+ memset(&merge->ipv6, 0xff,
+ sizeof(struct nfp_flower_ipv6));
+ else
+ memset(&merge->ipv4, 0xff,
+ sizeof(struct nfp_flower_ipv4));
break;
case NFP_FL_ACTION_OPCODE_SET_ETHERNET:
eth = (struct nfp_fl_set_eth *)a;
@@ -597,6 +653,10 @@ nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
ports[i] |= tport->tp_port_mask[i];
break;
case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
+ pre_tun = (struct nfp_fl_pre_tunnel *)a;
+ ipv6_tun = be16_to_cpu(pre_tun->flags) &
+ NFP_FL_PRE_TUN_IPV6;
+ break;
case NFP_FL_ACTION_OPCODE_PRE_LAG:
case NFP_FL_ACTION_OPCODE_PUSH_GENEVE:
break;
@@ -765,15 +825,15 @@ nfp_fl_verify_post_tun_acts(char *acts, int len, struct nfp_fl_push_vlan **vlan)
static int
nfp_fl_push_vlan_after_tun(char *acts, int len, struct nfp_fl_push_vlan *vlan)
{
- struct nfp_fl_set_ipv4_tun *tun;
+ struct nfp_fl_set_tun *tun;
struct nfp_fl_act_head *a;
unsigned int act_off = 0;
while (act_off < len) {
a = (struct nfp_fl_act_head *)&acts[act_off];
- if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL) {
- tun = (struct nfp_fl_set_ipv4_tun *)a;
+ if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_TUNNEL) {
+ tun = (struct nfp_fl_set_tun *)a;
tun->outer_vlan_tpid = vlan->vlan_tpid;
tun->outer_vlan_tci = vlan->vlan_tci;
@@ -1058,15 +1118,22 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
return -EOPNOTSUPP;
}
- if (key_layer & NFP_FLOWER_LAYER_IPV4) {
+ if (key_layer & NFP_FLOWER_LAYER_IPV4 ||
+ key_layer & NFP_FLOWER_LAYER_IPV6) {
+ /* Flags and proto fields have same offset in IPv4 and IPv6. */
int ip_flags = offsetof(struct nfp_flower_ipv4, ip_ext.flags);
int ip_proto = offsetof(struct nfp_flower_ipv4, ip_ext.proto);
+ int size;
int i;
+ size = key_layer & NFP_FLOWER_LAYER_IPV4 ?
+ sizeof(struct nfp_flower_ipv4) :
+ sizeof(struct nfp_flower_ipv6);
+
mask += sizeof(struct nfp_flower_mac_mpls);
/* Ensure proto and flags are the only IP layer fields. */
- for (i = 0; i < sizeof(struct nfp_flower_ipv4); i++)
+ for (i = 0; i < size; i++)
if (mask[i] && i != ip_flags && i != ip_proto) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header");
return -EOPNOTSUPP;
@@ -1195,6 +1262,8 @@ err_remove_rhash:
err_release_metadata:
nfp_modify_flow_metadata(app, flow_pay);
err_destroy_flow:
+ if (flow_pay->nfp_tun_ipv6)
+ nfp_tunnel_put_ipv6_off(app, flow_pay->nfp_tun_ipv6);
kfree(flow_pay->action_data);
kfree(flow_pay->mask_data);
kfree(flow_pay->unmasked_data);
@@ -1311,6 +1380,9 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
if (nfp_flow->nfp_tun_ipv4_addr)
nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
+ if (nfp_flow->nfp_tun_ipv6)
+ nfp_tunnel_put_ipv6_off(app, nfp_flow->nfp_tun_ipv6);
+
if (!nfp_flow->in_hw) {
err = 0;
goto err_free_merge_flow;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 2600ce476d6b..2df3deedf9fd 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -55,6 +55,25 @@ struct nfp_tun_active_tuns {
};
/**
+ * struct nfp_tun_active_tuns_v6 - periodic message of active IPv6 tunnels
+ * @seq: sequence number of the message
+ * @count: number of tunnels report in message
+ * @flags: options part of the request
+ * @tun_info.ipv6: dest IPv6 address of active route
+ * @tun_info.egress_port: port the encapsulated packet egressed
+ * @tun_info: tunnels that have sent traffic in reported period
+ */
+struct nfp_tun_active_tuns_v6 {
+ __be32 seq;
+ __be32 count;
+ __be32 flags;
+ struct route_ip_info_v6 {
+ struct in6_addr ipv6;
+ __be32 egress_port;
+ } tun_info[];
+};
+
+/**
* struct nfp_tun_neigh - neighbour/route entry on the NFP
* @dst_ipv4: destination IPv4 address
* @src_ipv4: source IPv4 address
@@ -71,6 +90,22 @@ struct nfp_tun_neigh {
};
/**
+ * struct nfp_tun_neigh_v6 - neighbour/route entry on the NFP
+ * @dst_ipv6: destination IPv6 address
+ * @src_ipv6: source IPv6 address
+ * @dst_addr: destination MAC address
+ * @src_addr: source MAC address
+ * @port_id: NFP port to output packet on - associated with source IPv6
+ */
+struct nfp_tun_neigh_v6 {
+ struct in6_addr dst_ipv6;
+ struct in6_addr src_ipv6;
+ u8 dst_addr[ETH_ALEN];
+ u8 src_addr[ETH_ALEN];
+ __be32 port_id;
+};
+
+/**
* struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup
* @ingress_port: ingress port of packet that signalled request
* @ipv4_addr: destination ipv4 address for route
@@ -83,13 +118,23 @@ struct nfp_tun_req_route_ipv4 {
};
/**
- * struct nfp_ipv4_route_entry - routes that are offloaded to the NFP
- * @ipv4_addr: destination of route
+ * struct nfp_tun_req_route_ipv6 - NFP requests an IPv6 route/neighbour lookup
+ * @ingress_port: ingress port of packet that signalled request
+ * @ipv6_addr: destination ipv6 address for route
+ */
+struct nfp_tun_req_route_ipv6 {
+ __be32 ingress_port;
+ struct in6_addr ipv6_addr;
+};
+
+/**
+ * struct nfp_offloaded_route - routes that are offloaded to the NFP
* @list: list pointer
+ * @ip_add: destination of route - can be IPv4 or IPv6
*/
-struct nfp_ipv4_route_entry {
- __be32 ipv4_addr;
+struct nfp_offloaded_route {
struct list_head list;
+ u8 ip_add[];
};
#define NFP_FL_IPV4_ADDRS_MAX 32
@@ -116,6 +161,18 @@ struct nfp_ipv4_addr_entry {
struct list_head list;
};
+#define NFP_FL_IPV6_ADDRS_MAX 4
+
+/**
+ * struct nfp_tun_ipv6_addr - set the IP address list on the NFP
+ * @count: number of IPs populated in the array
+ * @ipv6_addr: array of IPV6_ADDRS_MAX 128 bit IPv6 addresses
+ */
+struct nfp_tun_ipv6_addr {
+ __be32 count;
+ struct in6_addr ipv6_addr[NFP_FL_IPV6_ADDRS_MAX];
+};
+
#define NFP_TUN_MAC_OFFLOAD_DEL_FLAG 0x2
/**
@@ -206,6 +263,49 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
rcu_read_unlock();
}
+void nfp_tunnel_keep_alive_v6(struct nfp_app *app, struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ struct nfp_tun_active_tuns_v6 *payload;
+ struct net_device *netdev;
+ int count, i, pay_len;
+ struct neighbour *n;
+ void *ipv6_add;
+ u32 port;
+
+ payload = nfp_flower_cmsg_get_data(skb);
+ count = be32_to_cpu(payload->count);
+ if (count > NFP_FL_IPV6_ADDRS_MAX) {
+ nfp_flower_cmsg_warn(app, "IPv6 tunnel keep-alive request exceeds max routes.\n");
+ return;
+ }
+
+ pay_len = nfp_flower_cmsg_get_data_len(skb);
+ if (pay_len != struct_size(payload, tun_info, count)) {
+ nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
+ return;
+ }
+
+ rcu_read_lock();
+ for (i = 0; i < count; i++) {
+ ipv6_add = &payload->tun_info[i].ipv6;
+ port = be32_to_cpu(payload->tun_info[i].egress_port);
+ netdev = nfp_app_dev_get(app, port, NULL);
+ if (!netdev)
+ continue;
+
+ n = neigh_lookup(&nd_tbl, ipv6_add, netdev);
+ if (!n)
+ continue;
+
+ /* Update the used timestamp of neighbour */
+ neigh_event_send(n, NULL);
+ neigh_release(n);
+ }
+ rcu_read_unlock();
+#endif
+}
+
static int
nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
gfp_t flag)
@@ -224,71 +324,126 @@ nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
return 0;
}
-static bool nfp_tun_has_route(struct nfp_app *app, __be32 ipv4_addr)
+static bool
+__nfp_tun_has_route(struct list_head *route_list, spinlock_t *list_lock,
+ void *add, int add_len)
{
- struct nfp_flower_priv *priv = app->priv;
- struct nfp_ipv4_route_entry *entry;
- struct list_head *ptr, *storage;
+ struct nfp_offloaded_route *entry;
- spin_lock_bh(&priv->tun.neigh_off_lock);
- list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
- entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
- if (entry->ipv4_addr == ipv4_addr) {
- spin_unlock_bh(&priv->tun.neigh_off_lock);
+ spin_lock_bh(list_lock);
+ list_for_each_entry(entry, route_list, list)
+ if (!memcmp(entry->ip_add, add, add_len)) {
+ spin_unlock_bh(list_lock);
return true;
}
- }
- spin_unlock_bh(&priv->tun.neigh_off_lock);
+ spin_unlock_bh(list_lock);
return false;
}
-static void nfp_tun_add_route_to_cache(struct nfp_app *app, __be32 ipv4_addr)
+static int
+__nfp_tun_add_route_to_cache(struct list_head *route_list,
+ spinlock_t *list_lock, void *add, int add_len)
{
- struct nfp_flower_priv *priv = app->priv;
- struct nfp_ipv4_route_entry *entry;
- struct list_head *ptr, *storage;
+ struct nfp_offloaded_route *entry;
- spin_lock_bh(&priv->tun.neigh_off_lock);
- list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
- entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
- if (entry->ipv4_addr == ipv4_addr) {
- spin_unlock_bh(&priv->tun.neigh_off_lock);
- return;
+ spin_lock_bh(list_lock);
+ list_for_each_entry(entry, route_list, list)
+ if (!memcmp(entry->ip_add, add, add_len)) {
+ spin_unlock_bh(list_lock);
+ return 0;
}
- }
- entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+
+ entry = kmalloc(sizeof(*entry) + add_len, GFP_ATOMIC);
if (!entry) {
- spin_unlock_bh(&priv->tun.neigh_off_lock);
- nfp_flower_cmsg_warn(app, "Mem error when storing new route.\n");
- return;
+ spin_unlock_bh(list_lock);
+ return -ENOMEM;
}
- entry->ipv4_addr = ipv4_addr;
- list_add_tail(&entry->list, &priv->tun.neigh_off_list);
- spin_unlock_bh(&priv->tun.neigh_off_lock);
+ memcpy(entry->ip_add, add, add_len);
+ list_add_tail(&entry->list, route_list);
+ spin_unlock_bh(list_lock);
+
+ return 0;
}
-static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr)
+static void
+__nfp_tun_del_route_from_cache(struct list_head *route_list,
+ spinlock_t *list_lock, void *add, int add_len)
{
- struct nfp_flower_priv *priv = app->priv;
- struct nfp_ipv4_route_entry *entry;
- struct list_head *ptr, *storage;
+ struct nfp_offloaded_route *entry;
- spin_lock_bh(&priv->tun.neigh_off_lock);
- list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
- entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
- if (entry->ipv4_addr == ipv4_addr) {
+ spin_lock_bh(list_lock);
+ list_for_each_entry(entry, route_list, list)
+ if (!memcmp(entry->ip_add, add, add_len)) {
list_del(&entry->list);
kfree(entry);
break;
}
- }
- spin_unlock_bh(&priv->tun.neigh_off_lock);
+ spin_unlock_bh(list_lock);
+}
+
+static bool nfp_tun_has_route_v4(struct nfp_app *app, __be32 *ipv4_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ return __nfp_tun_has_route(&priv->tun.neigh_off_list_v4,
+ &priv->tun.neigh_off_lock_v4, ipv4_addr,
+ sizeof(*ipv4_addr));
+}
+
+static bool
+nfp_tun_has_route_v6(struct nfp_app *app, struct in6_addr *ipv6_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ return __nfp_tun_has_route(&priv->tun.neigh_off_list_v6,
+ &priv->tun.neigh_off_lock_v6, ipv6_addr,
+ sizeof(*ipv6_addr));
+}
+
+static void
+nfp_tun_add_route_to_cache_v4(struct nfp_app *app, __be32 *ipv4_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ __nfp_tun_add_route_to_cache(&priv->tun.neigh_off_list_v4,
+ &priv->tun.neigh_off_lock_v4, ipv4_addr,
+ sizeof(*ipv4_addr));
+}
+
+static void
+nfp_tun_add_route_to_cache_v6(struct nfp_app *app, struct in6_addr *ipv6_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ __nfp_tun_add_route_to_cache(&priv->tun.neigh_off_list_v6,
+ &priv->tun.neigh_off_lock_v6, ipv6_addr,
+ sizeof(*ipv6_addr));
}
static void
-nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
- struct flowi4 *flow, struct neighbour *neigh, gfp_t flag)
+nfp_tun_del_route_from_cache_v4(struct nfp_app *app, __be32 *ipv4_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ __nfp_tun_del_route_from_cache(&priv->tun.neigh_off_list_v4,
+ &priv->tun.neigh_off_lock_v4, ipv4_addr,
+ sizeof(*ipv4_addr));
+}
+
+static void
+nfp_tun_del_route_from_cache_v6(struct nfp_app *app, struct in6_addr *ipv6_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ __nfp_tun_del_route_from_cache(&priv->tun.neigh_off_list_v6,
+ &priv->tun.neigh_off_lock_v6, ipv6_addr,
+ sizeof(*ipv6_addr));
+}
+
+static void
+nfp_tun_write_neigh_v4(struct net_device *netdev, struct nfp_app *app,
+ struct flowi4 *flow, struct neighbour *neigh, gfp_t flag)
{
struct nfp_tun_neigh payload;
u32 port_id;
@@ -302,7 +457,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
/* If entry has expired send dst IP with all other fields 0. */
if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
- nfp_tun_del_route_from_cache(app, payload.dst_ipv4);
+ nfp_tun_del_route_from_cache_v4(app, &payload.dst_ipv4);
/* Trigger ARP to verify invalid neighbour state. */
neigh_event_send(neigh, NULL);
goto send_msg;
@@ -314,7 +469,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
payload.port_id = cpu_to_be32(port_id);
/* Add destination of new route to NFP cache. */
- nfp_tun_add_route_to_cache(app, payload.dst_ipv4);
+ nfp_tun_add_route_to_cache_v4(app, &payload.dst_ipv4);
send_msg:
nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
@@ -322,16 +477,54 @@ send_msg:
(unsigned char *)&payload, flag);
}
+static void
+nfp_tun_write_neigh_v6(struct net_device *netdev, struct nfp_app *app,
+ struct flowi6 *flow, struct neighbour *neigh, gfp_t flag)
+{
+ struct nfp_tun_neigh_v6 payload;
+ u32 port_id;
+
+ port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
+ if (!port_id)
+ return;
+
+ memset(&payload, 0, sizeof(struct nfp_tun_neigh_v6));
+ payload.dst_ipv6 = flow->daddr;
+
+ /* If entry has expired send dst IP with all other fields 0. */
+ if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
+ nfp_tun_del_route_from_cache_v6(app, &payload.dst_ipv6);
+ /* Trigger probe to verify invalid neighbour state. */
+ neigh_event_send(neigh, NULL);
+ goto send_msg;
+ }
+
+ /* Have a valid neighbour so populate rest of entry. */
+ payload.src_ipv6 = flow->saddr;
+ ether_addr_copy(payload.src_addr, netdev->dev_addr);
+ neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
+ payload.port_id = cpu_to_be32(port_id);
+ /* Add destination of new route to NFP cache. */
+ nfp_tun_add_route_to_cache_v6(app, &payload.dst_ipv6);
+
+send_msg:
+ nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6,
+ sizeof(struct nfp_tun_neigh_v6),
+ (unsigned char *)&payload, flag);
+}
+
static int
nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
void *ptr)
{
struct nfp_flower_priv *app_priv;
struct netevent_redirect *redir;
- struct flowi4 flow = {};
+ struct flowi4 flow4 = {};
+ struct flowi6 flow6 = {};
struct neighbour *n;
struct nfp_app *app;
struct rtable *rt;
+ bool ipv6 = false;
int err;
switch (event) {
@@ -346,7 +539,13 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
return NOTIFY_DONE;
}
- flow.daddr = *(__be32 *)n->primary_key;
+ if (n->tbl->family == AF_INET6)
+ ipv6 = true;
+
+ if (ipv6)
+ flow6.daddr = *(struct in6_addr *)n->primary_key;
+ else
+ flow4.daddr = *(__be32 *)n->primary_key;
app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb);
app = app_priv->app;
@@ -356,28 +555,46 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
return NOTIFY_DONE;
/* Only concerned with changes to routes already added to NFP. */
- if (!nfp_tun_has_route(app, flow.daddr))
+ if ((ipv6 && !nfp_tun_has_route_v6(app, &flow6.daddr)) ||
+ (!ipv6 && !nfp_tun_has_route_v4(app, &flow4.daddr)))
return NOTIFY_DONE;
#if IS_ENABLED(CONFIG_INET)
- /* Do a route lookup to populate flow data. */
- rt = ip_route_output_key(dev_net(n->dev), &flow);
- err = PTR_ERR_OR_ZERO(rt);
- if (err)
+ if (ipv6) {
+#if IS_ENABLED(CONFIG_IPV6)
+ struct dst_entry *dst;
+
+ dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(n->dev), NULL,
+ &flow6, NULL);
+ if (IS_ERR(dst))
+ return NOTIFY_DONE;
+
+ dst_release(dst);
+ flow6.flowi6_proto = IPPROTO_UDP;
+ nfp_tun_write_neigh_v6(n->dev, app, &flow6, n, GFP_ATOMIC);
+#else
return NOTIFY_DONE;
+#endif /* CONFIG_IPV6 */
+ } else {
+ /* Do a route lookup to populate flow data. */
+ rt = ip_route_output_key(dev_net(n->dev), &flow4);
+ err = PTR_ERR_OR_ZERO(rt);
+ if (err)
+ return NOTIFY_DONE;
- ip_rt_put(rt);
+ ip_rt_put(rt);
+
+ flow4.flowi4_proto = IPPROTO_UDP;
+ nfp_tun_write_neigh_v4(n->dev, app, &flow4, n, GFP_ATOMIC);
+ }
#else
return NOTIFY_DONE;
-#endif
-
- flow.flowi4_proto = IPPROTO_UDP;
- nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC);
+#endif /* CONFIG_INET */
return NOTIFY_OK;
}
-void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
+void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb)
{
struct nfp_tun_req_route_ipv4 *payload;
struct net_device *netdev;
@@ -411,7 +628,7 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
ip_rt_put(rt);
if (!n)
goto fail_rcu_unlock;
- nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC);
+ nfp_tun_write_neigh_v4(n->dev, app, &flow, n, GFP_ATOMIC);
neigh_release(n);
rcu_read_unlock();
return;
@@ -421,6 +638,48 @@ fail_rcu_unlock:
nfp_flower_cmsg_warn(app, "Requested route not found.\n");
}
+void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb)
+{
+ struct nfp_tun_req_route_ipv6 *payload;
+ struct net_device *netdev;
+ struct flowi6 flow = {};
+ struct dst_entry *dst;
+ struct neighbour *n;
+
+ payload = nfp_flower_cmsg_get_data(skb);
+
+ rcu_read_lock();
+ netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL);
+ if (!netdev)
+ goto fail_rcu_unlock;
+
+ flow.daddr = payload->ipv6_addr;
+ flow.flowi6_proto = IPPROTO_UDP;
+
+#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
+ dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(netdev), NULL, &flow,
+ NULL);
+ if (IS_ERR(dst))
+ goto fail_rcu_unlock;
+#else
+ goto fail_rcu_unlock;
+#endif
+
+ n = dst_neigh_lookup(dst, &flow.daddr);
+ dst_release(dst);
+ if (!n)
+ goto fail_rcu_unlock;
+
+ nfp_tun_write_neigh_v6(n->dev, app, &flow, n, GFP_ATOMIC);
+ neigh_release(n);
+ rcu_read_unlock();
+ return;
+
+fail_rcu_unlock:
+ rcu_read_unlock();
+ nfp_flower_cmsg_warn(app, "Requested IPv6 route not found.\n");
+}
+
static void nfp_tun_write_ipv4_list(struct nfp_app *app)
{
struct nfp_flower_priv *priv = app->priv;
@@ -502,6 +761,78 @@ void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4)
nfp_tun_write_ipv4_list(app);
}
+static void nfp_tun_write_ipv6_list(struct nfp_app *app)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv6_addr_entry *entry;
+ struct nfp_tun_ipv6_addr payload;
+ int count = 0;
+
+ memset(&payload, 0, sizeof(struct nfp_tun_ipv6_addr));
+ mutex_lock(&priv->tun.ipv6_off_lock);
+ list_for_each_entry(entry, &priv->tun.ipv6_off_list, list) {
+ if (count >= NFP_FL_IPV6_ADDRS_MAX) {
+ nfp_flower_cmsg_warn(app, "Too many IPv6 tunnel endpoint addresses, some cannot be offloaded.\n");
+ break;
+ }
+ payload.ipv6_addr[count++] = entry->ipv6_addr;
+ }
+ mutex_unlock(&priv->tun.ipv6_off_lock);
+ payload.count = cpu_to_be32(count);
+
+ nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS_V6,
+ sizeof(struct nfp_tun_ipv6_addr),
+ &payload, GFP_KERNEL);
+}
+
+struct nfp_ipv6_addr_entry *
+nfp_tunnel_add_ipv6_off(struct nfp_app *app, struct in6_addr *ipv6)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv6_addr_entry *entry;
+
+ mutex_lock(&priv->tun.ipv6_off_lock);
+ list_for_each_entry(entry, &priv->tun.ipv6_off_list, list)
+ if (!memcmp(&entry->ipv6_addr, ipv6, sizeof(*ipv6))) {
+ entry->ref_count++;
+ mutex_unlock(&priv->tun.ipv6_off_lock);
+ return entry;
+ }
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ mutex_unlock(&priv->tun.ipv6_off_lock);
+ nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
+ return NULL;
+ }
+ entry->ipv6_addr = *ipv6;
+ entry->ref_count = 1;
+ list_add_tail(&entry->list, &priv->tun.ipv6_off_list);
+ mutex_unlock(&priv->tun.ipv6_off_lock);
+
+ nfp_tun_write_ipv6_list(app);
+
+ return entry;
+}
+
+void
+nfp_tunnel_put_ipv6_off(struct nfp_app *app, struct nfp_ipv6_addr_entry *entry)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ bool freed = false;
+
+ mutex_lock(&priv->tun.ipv6_off_lock);
+ if (!--entry->ref_count) {
+ list_del(&entry->list);
+ kfree(entry);
+ freed = true;
+ }
+ mutex_unlock(&priv->tun.ipv6_off_lock);
+
+ if (freed)
+ nfp_tun_write_ipv6_list(app);
+}
+
static int
__nfp_tunnel_offload_mac(struct nfp_app *app, u8 *mac, u16 idx, bool del)
{
@@ -1013,13 +1344,17 @@ int nfp_tunnel_config_start(struct nfp_app *app)
ida_init(&priv->tun.mac_off_ids);
- /* Initialise priv data for IPv4 offloading. */
+ /* Initialise priv data for IPv4/v6 offloading. */
mutex_init(&priv->tun.ipv4_off_lock);
INIT_LIST_HEAD(&priv->tun.ipv4_off_list);
+ mutex_init(&priv->tun.ipv6_off_lock);
+ INIT_LIST_HEAD(&priv->tun.ipv6_off_list);
/* Initialise priv data for neighbour offloading. */
- spin_lock_init(&priv->tun.neigh_off_lock);
- INIT_LIST_HEAD(&priv->tun.neigh_off_list);
+ spin_lock_init(&priv->tun.neigh_off_lock_v4);
+ INIT_LIST_HEAD(&priv->tun.neigh_off_list_v4);
+ spin_lock_init(&priv->tun.neigh_off_lock_v6);
+ INIT_LIST_HEAD(&priv->tun.neigh_off_list_v6);
priv->tun.neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
err = register_netevent_notifier(&priv->tun.neigh_nb);
@@ -1034,9 +1369,11 @@ int nfp_tunnel_config_start(struct nfp_app *app)
void nfp_tunnel_config_stop(struct nfp_app *app)
{
+ struct nfp_offloaded_route *route_entry, *temp;
struct nfp_flower_priv *priv = app->priv;
- struct nfp_ipv4_route_entry *route_entry;
struct nfp_ipv4_addr_entry *ip_entry;
+ struct nfp_tun_neigh_v6 ipv6_route;
+ struct nfp_tun_neigh ipv4_route;
struct list_head *ptr, *storage;
unregister_netevent_notifier(&priv->tun.neigh_nb);
@@ -1050,12 +1387,35 @@ void nfp_tunnel_config_stop(struct nfp_app *app)
kfree(ip_entry);
}
- /* Free any memory that may be occupied by the route list. */
- list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
- route_entry = list_entry(ptr, struct nfp_ipv4_route_entry,
- list);
+ mutex_destroy(&priv->tun.ipv6_off_lock);
+
+ /* Free memory in the route list and remove entries from fw cache. */
+ list_for_each_entry_safe(route_entry, temp,
+ &priv->tun.neigh_off_list_v4, list) {
+ memset(&ipv4_route, 0, sizeof(ipv4_route));
+ memcpy(&ipv4_route.dst_ipv4, &route_entry->ip_add,
+ sizeof(ipv4_route.dst_ipv4));
list_del(&route_entry->list);
kfree(route_entry);
+
+ nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
+ sizeof(struct nfp_tun_neigh),
+ (unsigned char *)&ipv4_route,
+ GFP_KERNEL);
+ }
+
+ list_for_each_entry_safe(route_entry, temp,
+ &priv->tun.neigh_off_list_v6, list) {
+ memset(&ipv6_route, 0, sizeof(ipv6_route));
+ memcpy(&ipv6_route.dst_ipv6, &route_entry->ip_add,
+ sizeof(ipv6_route.dst_ipv6));
+ list_del(&route_entry->list);
+ kfree(route_entry);
+
+ nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6,
+ sizeof(struct nfp_tun_neigh),
+ (unsigned char *)&ipv6_route,
+ GFP_KERNEL);
}
/* Destroy rhash. Entries should be cleaned on netdev notifier unreg. */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 250f510b1d21..ff4438478ea9 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -586,6 +586,9 @@ struct nfp_net_dp {
* @ktls_conn_id_gen: Trivial generator for kTLS connection ids (for TX)
* @ktls_no_space: Counter of firmware rejecting kTLS connection due to
* lack of space
+ * @ktls_rx_resync_req: Counter of TLS RX resync requested
+ * @ktls_rx_resync_ign: Counter of TLS RX resync requests ignored
+ * @ktls_rx_resync_sent: Counter of TLS RX resync completed
* @mbox_cmsg: Common Control Message via vNIC mailbox state
* @mbox_cmsg.queue: CCM mbox queue of pending messages
* @mbox_cmsg.wq: CCM mbox wait queue of waiting processes
@@ -674,6 +677,9 @@ struct nfp_net {
atomic64_t ktls_conn_id_gen;
atomic_t ktls_no_space;
+ atomic_t ktls_rx_resync_req;
+ atomic_t ktls_rx_resync_ign;
+ atomic_t ktls_rx_resync_sent;
struct {
struct sk_buff_head queue;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index bcdcd6de7dea..9bfb3b077bc1 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -47,6 +47,7 @@
#include "nfp_net_sriov.h"
#include "nfp_port.h"
#include "crypto/crypto.h"
+#include "crypto/fw.h"
/**
* nfp_net_get_fw_version() - Read and parse the FW version
@@ -1321,17 +1322,11 @@ nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
netdev_tx_reset_queue(nd_q);
}
-static void nfp_net_tx_timeout(struct net_device *netdev)
+static void nfp_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct nfp_net *nn = netdev_priv(netdev);
- int i;
- for (i = 0; i < nn->dp.netdev->real_num_tx_queues; i++) {
- if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev, i)))
- continue;
- nn_warn(nn, "TX timeout on ring: %d\n", i);
- }
- nn_warn(nn, "TX watchdog timeout\n");
+ nn_warn(nn, "TX watchdog timeout on ring: %u\n", txqueue);
}
/* Receive processing
@@ -1667,9 +1662,9 @@ nfp_net_set_hash_desc(struct net_device *netdev, struct nfp_meta_parsed *meta,
&rx_hash->hash);
}
-static void *
+static bool
nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
- void *data, int meta_len)
+ void *data, void *pkt, unsigned int pkt_len, int meta_len)
{
u32 meta_info;
@@ -1699,14 +1694,20 @@ nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
(__force __wsum)__get_unaligned_cpu32(data);
data += 4;
break;
+ case NFP_NET_META_RESYNC_INFO:
+ if (nfp_net_tls_rx_resync_req(netdev, data, pkt,
+ pkt_len))
+ return NULL;
+ data += sizeof(struct nfp_net_tls_resync_req);
+ break;
default:
- return NULL;
+ return true;
}
meta_info >>= NFP_NET_META_FIELD_SIZE;
}
- return data;
+ return data != pkt;
}
static void
@@ -1891,12 +1892,10 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
nfp_net_set_hash_desc(dp->netdev, &meta,
rxbuf->frag + meta_off, rxd);
} else if (meta_len) {
- void *end;
-
- end = nfp_net_parse_meta(dp->netdev, &meta,
- rxbuf->frag + meta_off,
- meta_len);
- if (unlikely(end != rxbuf->frag + pkt_off)) {
+ if (unlikely(nfp_net_parse_meta(dp->netdev, &meta,
+ rxbuf->frag + meta_off,
+ rxbuf->frag + pkt_off,
+ pkt_len, meta_len))) {
nn_dp_warn(dp, "invalid RX packet metadata\n");
nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
NULL);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
index d835c14b7257..c3a763134e79 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
@@ -17,6 +17,30 @@ static void nfp_net_tlv_caps_reset(struct nfp_net_tlv_caps *caps)
caps->mbox_len = NFP_NET_CFG_MBOX_VAL_MAX_SZ;
}
+static bool
+nfp_net_tls_parse_crypto_ops(struct device *dev, struct nfp_net_tlv_caps *caps,
+ u8 __iomem *ctrl_mem, u8 __iomem *data,
+ unsigned int length, unsigned int offset,
+ bool rx_stream_scan)
+{
+ /* Ignore the legacy TLV if new one was already parsed */
+ if (caps->tls_resync_ss && !rx_stream_scan)
+ return true;
+
+ if (length < 32) {
+ dev_err(dev,
+ "CRYPTO OPS TLV should be at least 32B, is %dB offset:%u\n",
+ length, offset);
+ return false;
+ }
+
+ caps->crypto_ops = readl(data);
+ caps->crypto_enable_off = data - ctrl_mem + 16;
+ caps->tls_resync_ss = rx_stream_scan;
+
+ return true;
+}
+
int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
struct nfp_net_tlv_caps *caps)
{
@@ -104,15 +128,25 @@ int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
caps->mbox_cmsg_types = readl(data);
break;
case NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS:
- if (length < 32) {
- dev_err(dev,
- "CRYPTO OPS TLV should be at least 32B, is %dB offset:%u\n",
- length, offset);
+ if (!nfp_net_tls_parse_crypto_ops(dev, caps, ctrl_mem,
+ data, length, offset,
+ false))
return -EINVAL;
+ break;
+ case NFP_NET_CFG_TLV_TYPE_VNIC_STATS:
+ if ((data - ctrl_mem) % 8) {
+ dev_warn(dev, "VNIC STATS TLV misaligned, ignoring offset:%u len:%u\n",
+ offset, length);
+ break;
}
-
- caps->crypto_ops = readl(data);
- caps->crypto_enable_off = data - ctrl_mem + 16;
+ caps->vnic_stats_off = data - ctrl_mem;
+ caps->vnic_stats_cnt = length / 10;
+ break;
+ case NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS_RX_SCAN:
+ if (!nfp_net_tls_parse_crypto_ops(dev, caps, ctrl_mem,
+ data, length, offset,
+ true))
+ return -EINVAL;
break;
default:
if (!FIELD_GET(NFP_NET_CFG_TLV_HEADER_REQUIRED, hdr))
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index ee6b24e4eacd..3d61a8cb60b0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -45,6 +45,7 @@
#define NFP_NET_META_PORTID 5
#define NFP_NET_META_CSUM 6 /* checksum complete type */
#define NFP_NET_META_CONN_HANDLE 7
+#define NFP_NET_META_RESYNC_INFO 8 /* RX resync info request */
#define NFP_META_PORT_ID_CTRL ~0U
@@ -479,6 +480,22 @@
* 8 words, bitmaps of supported and enabled crypto operations.
* First 16B (4 words) contains a bitmap of supported crypto operations,
* and next 16B contain the enabled operations.
+ * This capability is made obsolete by ones with better sync methods.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_VNIC_STATS:
+ * Variable, per-vNIC statistics, data should be 8B aligned (FW should insert
+ * zero-length RESERVED TLV to pad).
+ * TLV data has two sections. First is an array of statistics' IDs (2B each).
+ * Second 8B statistics themselves. Statistics are 8B aligned, meaning there
+ * may be a padding between sections.
+ * Number of statistics can be determined as floor(tlv.length / (2 + 8)).
+ * This TLV overwrites %NFP_NET_CFG_STATS_* values (statistics in this TLV
+ * duplicate the old ones, so driver should be careful not to unnecessarily
+ * render both).
+ *
+ * %NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS_RX_SCAN:
+ * Same as %NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS, but crypto TLS does stream scan
+ * RX sync, rather than kernel-assisted sync.
*/
#define NFP_NET_CFG_TLV_TYPE_UNKNOWN 0
#define NFP_NET_CFG_TLV_TYPE_RESERVED 1
@@ -490,6 +507,8 @@
#define NFP_NET_CFG_TLV_TYPE_REPR_CAP 7
#define NFP_NET_CFG_TLV_TYPE_MBOX_CMSG_TYPES 10
#define NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS 11 /* see crypto/fw.h */
+#define NFP_NET_CFG_TLV_TYPE_VNIC_STATS 12
+#define NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS_RX_SCAN 13
struct device;
@@ -502,6 +521,9 @@ struct device;
* @mbox_cmsg_types: cmsgs which can be passed through the mailbox
* @crypto_ops: supported crypto operations
* @crypto_enable_off: offset of crypto ops enable region
+ * @vnic_stats_off: offset of vNIC stats area
+ * @vnic_stats_cnt: number of vNIC stats
+ * @tls_resync_ss: TLS resync will be performed via stream scan
*/
struct nfp_net_tlv_caps {
u32 me_freq_mhz;
@@ -511,6 +533,9 @@ struct nfp_net_tlv_caps {
u32 mbox_cmsg_types;
u32 crypto_ops;
unsigned int crypto_enable_off;
+ unsigned int vnic_stats_off;
+ unsigned int vnic_stats_cnt;
+ unsigned int tls_resync_ss:1;
};
int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 1b840ee47339..d648e32c0520 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -148,11 +148,33 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = {
{ "tx_pause_frames_class7", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS7, },
};
+static const char nfp_tlv_stat_names[][ETH_GSTRING_LEN] = {
+ [1] = "dev_rx_discards",
+ [2] = "dev_rx_errors",
+ [3] = "dev_rx_bytes",
+ [4] = "dev_rx_uc_bytes",
+ [5] = "dev_rx_mc_bytes",
+ [6] = "dev_rx_bc_bytes",
+ [7] = "dev_rx_pkts",
+ [8] = "dev_rx_mc_pkts",
+ [9] = "dev_rx_bc_pkts",
+
+ [10] = "dev_tx_discards",
+ [11] = "dev_tx_errors",
+ [12] = "dev_tx_bytes",
+ [13] = "dev_tx_uc_bytes",
+ [14] = "dev_tx_mc_bytes",
+ [15] = "dev_tx_bc_bytes",
+ [16] = "dev_tx_pkts",
+ [17] = "dev_tx_mc_pkts",
+ [18] = "dev_tx_bc_pkts",
+};
+
#define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
#define NN_ET_SWITCH_STATS_LEN 9
#define NN_RVEC_GATHER_STATS 13
#define NN_RVEC_PER_Q_STATS 3
-#define NN_CTRL_PATH_STATS 1
+#define NN_CTRL_PATH_STATS 4
#define SFP_SFF_REV_COMPLIANCE 1
@@ -454,6 +476,9 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
data = nfp_pr_et(data, "tx_tls_drop_no_sync_data");
data = nfp_pr_et(data, "hw_tls_no_space");
+ data = nfp_pr_et(data, "rx_tls_resync_req_ok");
+ data = nfp_pr_et(data, "rx_tls_resync_req_ign");
+ data = nfp_pr_et(data, "rx_tls_resync_sent");
return data;
}
@@ -502,6 +527,9 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
*data++ = gathered_stats[j];
*data++ = atomic_read(&nn->ktls_no_space);
+ *data++ = atomic_read(&nn->ktls_rx_resync_req);
+ *data++ = atomic_read(&nn->ktls_rx_resync_ign);
+ *data++ = atomic_read(&nn->ktls_rx_resync_sent);
return data;
}
@@ -560,6 +588,65 @@ nfp_vnic_get_hw_stats(u64 *data, u8 __iomem *mem, unsigned int num_vecs)
return data;
}
+static unsigned int nfp_vnic_get_tlv_stats_count(struct nfp_net *nn)
+{
+ return nn->tlv_caps.vnic_stats_cnt + nn->max_r_vecs * 4;
+}
+
+static u8 *nfp_vnic_get_tlv_stats_strings(struct nfp_net *nn, u8 *data)
+{
+ unsigned int i, id;
+ u8 __iomem *mem;
+ u64 id_word = 0;
+
+ mem = nn->dp.ctrl_bar + nn->tlv_caps.vnic_stats_off;
+ for (i = 0; i < nn->tlv_caps.vnic_stats_cnt; i++) {
+ if (!(i % 4))
+ id_word = readq(mem + i * 2);
+
+ id = (u16)id_word;
+ id_word >>= 16;
+
+ if (id < ARRAY_SIZE(nfp_tlv_stat_names) &&
+ nfp_tlv_stat_names[id][0]) {
+ memcpy(data, nfp_tlv_stat_names[id], ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ } else {
+ data = nfp_pr_et(data, "dev_unknown_stat%u", id);
+ }
+ }
+
+ for (i = 0; i < nn->max_r_vecs; i++) {
+ data = nfp_pr_et(data, "rxq_%u_pkts", i);
+ data = nfp_pr_et(data, "rxq_%u_bytes", i);
+ data = nfp_pr_et(data, "txq_%u_pkts", i);
+ data = nfp_pr_et(data, "txq_%u_bytes", i);
+ }
+
+ return data;
+}
+
+static u64 *nfp_vnic_get_tlv_stats(struct nfp_net *nn, u64 *data)
+{
+ u8 __iomem *mem;
+ unsigned int i;
+
+ mem = nn->dp.ctrl_bar + nn->tlv_caps.vnic_stats_off;
+ mem += roundup(2 * nn->tlv_caps.vnic_stats_cnt, 8);
+ for (i = 0; i < nn->tlv_caps.vnic_stats_cnt; i++)
+ *data++ = readq(mem + i * 8);
+
+ mem = nn->dp.ctrl_bar;
+ for (i = 0; i < nn->max_r_vecs; i++) {
+ *data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i));
+ *data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i) + 8);
+ *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i));
+ *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8);
+ }
+
+ return data;
+}
+
static unsigned int nfp_mac_get_stats_count(struct net_device *netdev)
{
struct nfp_port *port;
@@ -609,8 +696,12 @@ static void nfp_net_get_strings(struct net_device *netdev,
switch (stringset) {
case ETH_SS_STATS:
data = nfp_vnic_get_sw_stats_strings(netdev, data);
- data = nfp_vnic_get_hw_stats_strings(data, nn->max_r_vecs,
- false);
+ if (!nn->tlv_caps.vnic_stats_off)
+ data = nfp_vnic_get_hw_stats_strings(data,
+ nn->max_r_vecs,
+ false);
+ else
+ data = nfp_vnic_get_tlv_stats_strings(nn, data);
data = nfp_mac_get_stats_strings(netdev, data);
data = nfp_app_port_get_stats_strings(nn->port, data);
break;
@@ -624,7 +715,11 @@ nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
struct nfp_net *nn = netdev_priv(netdev);
data = nfp_vnic_get_sw_stats(netdev, data);
- data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar, nn->max_r_vecs);
+ if (!nn->tlv_caps.vnic_stats_off)
+ data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar,
+ nn->max_r_vecs);
+ else
+ data = nfp_vnic_get_tlv_stats(nn, data);
data = nfp_mac_get_stats(netdev, data);
data = nfp_app_port_get_stats(nn->port, data);
}
@@ -632,13 +727,18 @@ nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
static int nfp_net_get_sset_count(struct net_device *netdev, int sset)
{
struct nfp_net *nn = netdev_priv(netdev);
+ unsigned int cnt;
switch (sset) {
case ETH_SS_STATS:
- return nfp_vnic_get_sw_stats_count(netdev) +
- nfp_vnic_get_hw_stats_count(nn->max_r_vecs) +
- nfp_mac_get_stats_count(netdev) +
- nfp_app_port_get_stats_count(nn->port);
+ cnt = nfp_vnic_get_sw_stats_count(netdev);
+ if (!nn->tlv_caps.vnic_stats_off)
+ cnt += nfp_vnic_get_hw_stats_count(nn->max_r_vecs);
+ else
+ cnt += nfp_vnic_get_tlv_stats_count(nn);
+ cnt += nfp_mac_get_stats_count(netdev);
+ cnt += nfp_app_port_get_stats_count(nn->port);
+ return cnt;
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index e4977cdf7678..c0e2f4394aef 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -106,7 +106,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
* first NFP_NET_CFG_BAR_SZ of the BAR. This keeps the code
* the identical for PF and VF drivers.
*/
- ctrl_bar = ioremap_nocache(pci_resource_start(pdev, NFP_NET_CTRL_BAR),
+ ctrl_bar = ioremap(pci_resource_start(pdev, NFP_NET_CTRL_BAR),
NFP_NET_CFG_BAR_SZ);
if (!ctrl_bar) {
dev_err(&pdev->dev,
@@ -200,7 +200,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
bar_sz = (rx_bar_off + rx_bar_sz) - bar_off;
map_addr = pci_resource_start(pdev, tx_bar_no) + bar_off;
- vf->q_bar = ioremap_nocache(map_addr, bar_sz);
+ vf->q_bar = ioremap(map_addr, bar_sz);
if (!vf->q_bar) {
nn_err(nn, "Failed to map resource %d\n", tx_bar_no);
err = -EIO;
@@ -216,7 +216,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
/* TX queues */
map_addr = pci_resource_start(pdev, tx_bar_no) + tx_bar_off;
- nn->tx_bar = ioremap_nocache(map_addr, tx_bar_sz);
+ nn->tx_bar = ioremap(map_addr, tx_bar_sz);
if (!nn->tx_bar) {
nn_err(nn, "Failed to map resource %d\n", tx_bar_no);
err = -EIO;
@@ -225,7 +225,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
/* RX queues */
map_addr = pci_resource_start(pdev, rx_bar_no) + rx_bar_off;
- nn->rx_bar = ioremap_nocache(map_addr, rx_bar_sz);
+ nn->rx_bar = ioremap(map_addr, rx_bar_sz);
if (!nn->rx_bar) {
nn_err(nn, "Failed to map resource %d\n", rx_bar_no);
err = -EIO;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
index 85d46f206b3c..b454db283aef 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
@@ -611,7 +611,7 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
/* Configure, and lock, BAR0.0 for General Target use (MSI-X SRAM) */
bar = &nfp->bar[0];
if (nfp_bar_resource_len(bar) >= NFP_PCI_MIN_MAP_SIZE)
- bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar),
+ bar->iomem = ioremap(nfp_bar_resource_start(bar),
nfp_bar_resource_len(bar));
if (bar->iomem) {
int pf;
@@ -677,7 +677,7 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
}
bar = &nfp->bar[4 + i];
- bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar),
+ bar->iomem = ioremap(nfp_bar_resource_start(bar),
nfp_bar_resource_len(bar));
if (bar->iomem) {
msg += snprintf(msg, end - msg,
@@ -858,7 +858,7 @@ static int nfp6000_area_acquire(struct nfp_cpp_area *area)
priv->iomem = priv->bar->iomem + priv->bar_offset;
else
/* Must have been too big. Sub-allocate. */
- priv->iomem = ioremap_nocache(priv->phys, priv->size);
+ priv->iomem = ioremap(priv->phys, priv->size);
if (IS_ERR_OR_NULL(priv->iomem)) {
dev_err(nfp->dev, "Can't ioremap() a %d byte region of BAR %d\n",
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 6b54cb3b681d..2fc10a36afa4 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -2739,7 +2739,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
* nv_tx_timeout: dev->tx_timeout function
* Called with netif_tx_lock held.
*/
-static void nv_tx_timeout(struct net_device *dev)
+static void nv_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 656169214cdb..d20cf03a3ea0 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1149,19 +1149,6 @@ static void lpc_eth_set_multicast_list(struct net_device *ndev)
spin_unlock_irqrestore(&pldat->lock, flags);
}
-static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
-{
- struct phy_device *phydev = ndev->phydev;
-
- if (!netif_running(ndev))
- return -EINVAL;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_mii_ioctl(phydev, req, cmd);
-}
-
static int lpc_eth_open(struct net_device *ndev)
{
struct netdata_local *pldat = netdev_priv(ndev);
@@ -1229,7 +1216,7 @@ static const struct net_device_ops lpc_netdev_ops = {
.ndo_stop = lpc_eth_close,
.ndo_start_xmit = lpc_eth_hard_start_xmit,
.ndo_set_rx_mode = lpc_eth_set_multicast_list,
- .ndo_do_ioctl = lpc_eth_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_set_mac_address = lpc_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 18e6d87c607b..73ec195fbc30 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -2271,7 +2271,7 @@ static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
* pch_gbe_tx_timeout - Respond to a Tx Hang
* @netdev: Network interface device structure
*/
-static void pch_gbe_tx_timeout(struct net_device *netdev)
+static void pch_gbe_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index eee883a2aa8d..70816d2e2990 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -548,7 +548,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
static int hamachi_open(struct net_device *dev);
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static void hamachi_timer(struct timer_list *t);
-static void hamachi_tx_timeout(struct net_device *dev);
+static void hamachi_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void hamachi_init_ring(struct net_device *dev);
static netdev_tx_t hamachi_start_xmit(struct sk_buff *skb,
struct net_device *dev);
@@ -1042,7 +1042,7 @@ static void hamachi_timer(struct timer_list *t)
add_timer(&hmp->timer);
}
-static void hamachi_tx_timeout(struct net_device *dev)
+static void hamachi_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
int i;
struct hamachi_private *hmp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 5113ee647090..520779f05e1a 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -344,7 +344,7 @@ static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static int yellowfin_open(struct net_device *dev);
static void yellowfin_timer(struct timer_list *t);
-static void yellowfin_tx_timeout(struct net_device *dev);
+static void yellowfin_tx_timeout(struct net_device *dev, unsigned int txqueue);
static int yellowfin_init_ring(struct net_device *dev);
static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
struct net_device *dev);
@@ -677,7 +677,7 @@ static void yellowfin_timer(struct timer_list *t)
add_timer(&yp->timer);
}
-static void yellowfin_tx_timeout(struct net_device *dev)
+static void yellowfin_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct yellowfin_private *yp = netdev_priv(dev);
void __iomem *ioaddr = yp->base;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index 98e102af7756..bb106a32f416 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -12,19 +12,27 @@ struct ionic_lif;
#define IONIC_DRV_NAME "ionic"
#define IONIC_DRV_DESCRIPTION "Pensando Ethernet NIC Driver"
-#define IONIC_DRV_VERSION "0.18.0-k"
+#define IONIC_DRV_VERSION "0.20.0-k"
#define PCI_VENDOR_ID_PENSANDO 0x1dd8
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF 0x1002
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003
-#define IONIC_SUBDEV_ID_NAPLES_25 0x4000
-#define IONIC_SUBDEV_ID_NAPLES_100_4 0x4001
-#define IONIC_SUBDEV_ID_NAPLES_100_8 0x4002
-
#define DEVCMD_TIMEOUT 10
+struct ionic_vf {
+ u16 index;
+ u8 macaddr[6];
+ __le32 maxrate;
+ __le16 vlanid;
+ u8 spoofchk;
+ u8 trusted;
+ u8 linkstate;
+ dma_addr_t stats_pa;
+ struct ionic_lif_stats stats;
+};
+
struct ionic {
struct pci_dev *pdev;
struct device *dev;
@@ -46,6 +54,9 @@ struct ionic {
DECLARE_BITMAP(intrs, IONIC_INTR_CTRL_REGS_MAX);
struct work_struct nb_work;
struct notifier_block nb;
+ struct rw_semaphore vf_op_lock; /* lock for VF operations */
+ struct ionic_vf *vfs;
+ int num_vfs;
struct timer_list watchdog_timer;
int watchdog_period;
};
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index 9a9ab8cb2cb3..448d7b23b2f7 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -104,10 +104,112 @@ void ionic_bus_unmap_dbpage(struct ionic *ionic, void __iomem *page)
iounmap(page);
}
+static void ionic_vf_dealloc_locked(struct ionic *ionic)
+{
+ struct ionic_vf *v;
+ dma_addr_t dma = 0;
+ int i;
+
+ if (!ionic->vfs)
+ return;
+
+ for (i = ionic->num_vfs - 1; i >= 0; i--) {
+ v = &ionic->vfs[i];
+
+ if (v->stats_pa) {
+ (void)ionic_set_vf_config(ionic, i,
+ IONIC_VF_ATTR_STATSADDR,
+ (u8 *)&dma);
+ dma_unmap_single(ionic->dev, v->stats_pa,
+ sizeof(v->stats), DMA_FROM_DEVICE);
+ v->stats_pa = 0;
+ }
+ }
+
+ kfree(ionic->vfs);
+ ionic->vfs = NULL;
+ ionic->num_vfs = 0;
+}
+
+static void ionic_vf_dealloc(struct ionic *ionic)
+{
+ down_write(&ionic->vf_op_lock);
+ ionic_vf_dealloc_locked(ionic);
+ up_write(&ionic->vf_op_lock);
+}
+
+static int ionic_vf_alloc(struct ionic *ionic, int num_vfs)
+{
+ struct ionic_vf *v;
+ int err = 0;
+ int i;
+
+ down_write(&ionic->vf_op_lock);
+
+ ionic->vfs = kcalloc(num_vfs, sizeof(struct ionic_vf), GFP_KERNEL);
+ if (!ionic->vfs) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < num_vfs; i++) {
+ v = &ionic->vfs[i];
+ v->stats_pa = dma_map_single(ionic->dev, &v->stats,
+ sizeof(v->stats), DMA_FROM_DEVICE);
+ if (dma_mapping_error(ionic->dev, v->stats_pa)) {
+ v->stats_pa = 0;
+ err = -ENODEV;
+ goto out;
+ }
+
+ /* ignore failures from older FW, we just won't get stats */
+ (void)ionic_set_vf_config(ionic, i, IONIC_VF_ATTR_STATSADDR,
+ (u8 *)&v->stats_pa);
+ ionic->num_vfs++;
+ }
+
+out:
+ if (err)
+ ionic_vf_dealloc_locked(ionic);
+ up_write(&ionic->vf_op_lock);
+ return err;
+}
+
+static int ionic_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct ionic *ionic = pci_get_drvdata(pdev);
+ struct device *dev = ionic->dev;
+ int ret = 0;
+
+ if (num_vfs > 0) {
+ ret = pci_enable_sriov(pdev, num_vfs);
+ if (ret) {
+ dev_err(dev, "Cannot enable SRIOV: %d\n", ret);
+ goto out;
+ }
+
+ ret = ionic_vf_alloc(ionic, num_vfs);
+ if (ret) {
+ dev_err(dev, "Cannot alloc VFs: %d\n", ret);
+ pci_disable_sriov(pdev);
+ goto out;
+ }
+
+ ret = num_vfs;
+ } else {
+ pci_disable_sriov(pdev);
+ ionic_vf_dealloc(ionic);
+ }
+
+out:
+ return ret;
+}
+
static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
struct ionic *ionic;
+ int num_vfs;
int err;
ionic = ionic_devlink_alloc(dev);
@@ -206,6 +308,15 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_free_lifs;
}
+ init_rwsem(&ionic->vf_op_lock);
+ num_vfs = pci_num_vf(pdev);
+ if (num_vfs) {
+ dev_info(dev, "%d VFs found already enabled\n", num_vfs);
+ err = ionic_vf_alloc(ionic, num_vfs);
+ if (err)
+ dev_err(dev, "Cannot enable existing VFs: %d\n", err);
+ }
+
err = ionic_lifs_register(ionic);
if (err) {
dev_err(dev, "Cannot register LIFs: %d, aborting\n", err);
@@ -223,6 +334,7 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_out_deregister_lifs:
ionic_lifs_unregister(ionic);
err_out_deinit_lifs:
+ ionic_vf_dealloc(ionic);
ionic_lifs_deinit(ionic);
err_out_free_lifs:
ionic_lifs_free(ionic);
@@ -279,6 +391,7 @@ static struct pci_driver ionic_driver = {
.id_table = ionic_id_table,
.probe = ionic_probe,
.remove = ionic_remove,
+ .sriov_configure = ionic_sriov_configure,
};
int ionic_bus_register_driver(void)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 5f9d2ec70446..87f82f36812f 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -286,6 +286,64 @@ void ionic_dev_cmd_port_pause(struct ionic_dev *idev, u8 pause_type)
ionic_dev_cmd_go(idev, &cmd);
}
+/* VF commands */
+int ionic_set_vf_config(struct ionic *ionic, int vf, u8 attr, u8 *data)
+{
+ union ionic_dev_cmd cmd = {
+ .vf_setattr.opcode = IONIC_CMD_VF_SETATTR,
+ .vf_setattr.attr = attr,
+ .vf_setattr.vf_index = vf,
+ };
+ int err;
+
+ switch (attr) {
+ case IONIC_VF_ATTR_SPOOFCHK:
+ cmd.vf_setattr.spoofchk = *data;
+ dev_dbg(ionic->dev, "%s: vf %d spoof %d\n",
+ __func__, vf, *data);
+ break;
+ case IONIC_VF_ATTR_TRUST:
+ cmd.vf_setattr.trust = *data;
+ dev_dbg(ionic->dev, "%s: vf %d trust %d\n",
+ __func__, vf, *data);
+ break;
+ case IONIC_VF_ATTR_LINKSTATE:
+ cmd.vf_setattr.linkstate = *data;
+ dev_dbg(ionic->dev, "%s: vf %d linkstate %d\n",
+ __func__, vf, *data);
+ break;
+ case IONIC_VF_ATTR_MAC:
+ ether_addr_copy(cmd.vf_setattr.macaddr, data);
+ dev_dbg(ionic->dev, "%s: vf %d macaddr %pM\n",
+ __func__, vf, data);
+ break;
+ case IONIC_VF_ATTR_VLAN:
+ cmd.vf_setattr.vlanid = cpu_to_le16(*(u16 *)data);
+ dev_dbg(ionic->dev, "%s: vf %d vlan %d\n",
+ __func__, vf, *(u16 *)data);
+ break;
+ case IONIC_VF_ATTR_RATE:
+ cmd.vf_setattr.maxrate = cpu_to_le32(*(u32 *)data);
+ dev_dbg(ionic->dev, "%s: vf %d maxrate %d\n",
+ __func__, vf, *(u32 *)data);
+ break;
+ case IONIC_VF_ATTR_STATSADDR:
+ cmd.vf_setattr.stats_pa = cpu_to_le64(*(u64 *)data);
+ dev_dbg(ionic->dev, "%s: vf %d stats_pa 0x%08llx\n",
+ __func__, vf, *(u64 *)data);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(&ionic->dev_cmd_lock);
+ ionic_dev_cmd_go(&ionic->idev, &cmd);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ mutex_unlock(&ionic->dev_cmd_lock);
+
+ return err;
+}
+
/* LIF commands */
void ionic_dev_cmd_lif_identify(struct ionic_dev *idev, u8 type, u8 ver)
{
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index 4665c5dc5324..7838e342c4fd 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -113,6 +113,12 @@ static_assert(sizeof(struct ionic_rxq_desc) == 16);
static_assert(sizeof(struct ionic_rxq_sg_desc) == 128);
static_assert(sizeof(struct ionic_rxq_comp) == 16);
+/* SR/IOV */
+static_assert(sizeof(struct ionic_vf_setattr_cmd) == 64);
+static_assert(sizeof(struct ionic_vf_setattr_comp) == 16);
+static_assert(sizeof(struct ionic_vf_getattr_cmd) == 64);
+static_assert(sizeof(struct ionic_vf_getattr_comp) == 16);
+
struct ionic_devinfo {
u8 asic_type;
u8 asic_rev;
@@ -275,6 +281,7 @@ void ionic_dev_cmd_port_autoneg(struct ionic_dev *idev, u8 an_enable);
void ionic_dev_cmd_port_fec(struct ionic_dev *idev, u8 fec_type);
void ionic_dev_cmd_port_pause(struct ionic_dev *idev, u8 pause_type);
+int ionic_set_vf_config(struct ionic *ionic, int vf, u8 attr, u8 *data);
void ionic_dev_cmd_lif_identify(struct ionic_dev *idev, u8 type, u8 ver);
void ionic_dev_cmd_lif_init(struct ionic_dev *idev, u16 lif_index,
dma_addr_t addr);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index 39317cdfa6cf..ce07c2931a72 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -51,6 +51,10 @@ enum ionic_cmd_opcode {
IONIC_CMD_RDMA_CREATE_CQ = 52,
IONIC_CMD_RDMA_CREATE_ADMINQ = 53,
+ /* SR/IOV commands */
+ IONIC_CMD_VF_GETATTR = 60,
+ IONIC_CMD_VF_SETATTR = 61,
+
/* QoS commands */
IONIC_CMD_QOS_CLASS_IDENTIFY = 240,
IONIC_CMD_QOS_CLASS_INIT = 241,
@@ -862,7 +866,7 @@ struct ionic_rxq_comp {
#define IONIC_RXQ_COMP_CSUM_F_VLAN 0x40
#define IONIC_RXQ_COMP_CSUM_F_CALC 0x80
u8 pkt_type_color;
-#define IONIC_RXQ_COMP_PKT_TYPE_MASK 0x0f
+#define IONIC_RXQ_COMP_PKT_TYPE_MASK 0x7f
};
enum ionic_pkt_type {
@@ -1639,6 +1643,93 @@ enum ionic_qos_sched_type {
IONIC_QOS_SCHED_TYPE_DWRR = 1, /* Deficit weighted round-robin */
};
+enum ionic_vf_attr {
+ IONIC_VF_ATTR_SPOOFCHK = 1,
+ IONIC_VF_ATTR_TRUST = 2,
+ IONIC_VF_ATTR_MAC = 3,
+ IONIC_VF_ATTR_LINKSTATE = 4,
+ IONIC_VF_ATTR_VLAN = 5,
+ IONIC_VF_ATTR_RATE = 6,
+ IONIC_VF_ATTR_STATSADDR = 7,
+};
+
+/**
+ * VF link status
+ */
+enum ionic_vf_link_status {
+ IONIC_VF_LINK_STATUS_AUTO = 0, /* link state of the uplink */
+ IONIC_VF_LINK_STATUS_UP = 1, /* link is always up */
+ IONIC_VF_LINK_STATUS_DOWN = 2, /* link is always down */
+};
+
+/**
+ * struct ionic_vf_setattr_cmd - Set VF attributes on the NIC
+ * @opcode: Opcode
+ * @index: VF index
+ * @attr: Attribute type (enum ionic_vf_attr)
+ * macaddr mac address
+ * vlanid vlan ID
+ * maxrate max Tx rate in Mbps
+ * spoofchk enable address spoof checking
+ * trust enable VF trust
+ * linkstate set link up or down
+ * stats_pa set DMA address for VF stats
+ */
+struct ionic_vf_setattr_cmd {
+ u8 opcode;
+ u8 attr;
+ __le16 vf_index;
+ union {
+ u8 macaddr[6];
+ __le16 vlanid;
+ __le32 maxrate;
+ u8 spoofchk;
+ u8 trust;
+ u8 linkstate;
+ __le64 stats_pa;
+ u8 pad[60];
+ };
+};
+
+struct ionic_vf_setattr_comp {
+ u8 status;
+ u8 attr;
+ __le16 vf_index;
+ __le16 comp_index;
+ u8 rsvd[9];
+ u8 color;
+};
+
+/**
+ * struct ionic_vf_getattr_cmd - Get VF attributes from the NIC
+ * @opcode: Opcode
+ * @index: VF index
+ * @attr: Attribute type (enum ionic_vf_attr)
+ */
+struct ionic_vf_getattr_cmd {
+ u8 opcode;
+ u8 attr;
+ __le16 vf_index;
+ u8 rsvd[60];
+};
+
+struct ionic_vf_getattr_comp {
+ u8 status;
+ u8 attr;
+ __le16 vf_index;
+ union {
+ u8 macaddr[6];
+ __le16 vlanid;
+ __le32 maxrate;
+ u8 spoofchk;
+ u8 trust;
+ u8 linkstate;
+ __le64 stats_pa;
+ u8 pad[11];
+ };
+ u8 color;
+};
+
/**
* union ionic_qos_config - Qos configuration structure
* @flags: Configuration flags
@@ -2289,6 +2380,9 @@ union ionic_dev_cmd {
struct ionic_port_getattr_cmd port_getattr;
struct ionic_port_setattr_cmd port_setattr;
+ struct ionic_vf_setattr_cmd vf_setattr;
+ struct ionic_vf_getattr_cmd vf_getattr;
+
struct ionic_lif_identify_cmd lif_identify;
struct ionic_lif_init_cmd lif_init;
struct ionic_lif_reset_cmd lif_reset;
@@ -2318,6 +2412,9 @@ union ionic_dev_cmd_comp {
struct ionic_port_getattr_comp port_getattr;
struct ionic_port_setattr_comp port_setattr;
+ struct ionic_vf_setattr_comp vf_setattr;
+ struct ionic_vf_getattr_comp vf_getattr;
+
struct ionic_lif_identify_comp lif_identify;
struct ionic_lif_init_comp lif_init;
ionic_lif_reset_comp lif_reset;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index ef8258713369..191271f6260d 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -1285,7 +1285,7 @@ static void ionic_tx_timeout_work(struct work_struct *ws)
rtnl_unlock();
}
-static void ionic_tx_timeout(struct net_device *netdev)
+static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct ionic_lif *lif = netdev_priv(netdev);
@@ -1619,6 +1619,227 @@ int ionic_stop(struct net_device *netdev)
return err;
}
+static int ionic_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivf)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic *ionic = lif->ionic;
+ int ret = 0;
+
+ down_read(&ionic->vf_op_lock);
+
+ if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
+ ret = -EINVAL;
+ } else {
+ ivf->vf = vf;
+ ivf->vlan = ionic->vfs[vf].vlanid;
+ ivf->qos = 0;
+ ivf->spoofchk = ionic->vfs[vf].spoofchk;
+ ivf->linkstate = ionic->vfs[vf].linkstate;
+ ivf->max_tx_rate = ionic->vfs[vf].maxrate;
+ ivf->trusted = ionic->vfs[vf].trusted;
+ ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr);
+ }
+
+ up_read(&ionic->vf_op_lock);
+ return ret;
+}
+
+static int ionic_get_vf_stats(struct net_device *netdev, int vf,
+ struct ifla_vf_stats *vf_stats)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic *ionic = lif->ionic;
+ struct ionic_lif_stats *vs;
+ int ret = 0;
+
+ down_read(&ionic->vf_op_lock);
+
+ if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
+ ret = -EINVAL;
+ } else {
+ memset(vf_stats, 0, sizeof(*vf_stats));
+ vs = &ionic->vfs[vf].stats;
+
+ vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets);
+ vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets);
+ vf_stats->rx_bytes = le64_to_cpu(vs->rx_ucast_bytes);
+ vf_stats->tx_bytes = le64_to_cpu(vs->tx_ucast_bytes);
+ vf_stats->broadcast = le64_to_cpu(vs->rx_bcast_packets);
+ vf_stats->multicast = le64_to_cpu(vs->rx_mcast_packets);
+ vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) +
+ le64_to_cpu(vs->rx_mcast_drop_packets) +
+ le64_to_cpu(vs->rx_bcast_drop_packets);
+ vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) +
+ le64_to_cpu(vs->tx_mcast_drop_packets) +
+ le64_to_cpu(vs->tx_bcast_drop_packets);
+ }
+
+ up_read(&ionic->vf_op_lock);
+ return ret;
+}
+
+static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic *ionic = lif->ionic;
+ int ret;
+
+ if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac)))
+ return -EINVAL;
+
+ down_read(&ionic->vf_op_lock);
+
+ if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
+ ret = -EINVAL;
+ } else {
+ ret = ionic_set_vf_config(ionic, vf, IONIC_VF_ATTR_MAC, mac);
+ if (!ret)
+ ether_addr_copy(ionic->vfs[vf].macaddr, mac);
+ }
+
+ up_read(&ionic->vf_op_lock);
+ return ret;
+}
+
+static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
+ u8 qos, __be16 proto)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic *ionic = lif->ionic;
+ int ret;
+
+ /* until someday when we support qos */
+ if (qos)
+ return -EINVAL;
+
+ if (vlan > 4095)
+ return -EINVAL;
+
+ if (proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
+ down_read(&ionic->vf_op_lock);
+
+ if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
+ ret = -EINVAL;
+ } else {
+ ret = ionic_set_vf_config(ionic, vf,
+ IONIC_VF_ATTR_VLAN, (u8 *)&vlan);
+ if (!ret)
+ ionic->vfs[vf].vlanid = vlan;
+ }
+
+ up_read(&ionic->vf_op_lock);
+ return ret;
+}
+
+static int ionic_set_vf_rate(struct net_device *netdev, int vf,
+ int tx_min, int tx_max)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic *ionic = lif->ionic;
+ int ret;
+
+ /* setting the min just seems silly */
+ if (tx_min)
+ return -EINVAL;
+
+ down_write(&ionic->vf_op_lock);
+
+ if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
+ ret = -EINVAL;
+ } else {
+ ret = ionic_set_vf_config(ionic, vf,
+ IONIC_VF_ATTR_RATE, (u8 *)&tx_max);
+ if (!ret)
+ lif->ionic->vfs[vf].maxrate = tx_max;
+ }
+
+ up_write(&ionic->vf_op_lock);
+ return ret;
+}
+
+static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic *ionic = lif->ionic;
+ u8 data = set; /* convert to u8 for config */
+ int ret;
+
+ down_write(&ionic->vf_op_lock);
+
+ if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
+ ret = -EINVAL;
+ } else {
+ ret = ionic_set_vf_config(ionic, vf,
+ IONIC_VF_ATTR_SPOOFCHK, &data);
+ if (!ret)
+ ionic->vfs[vf].spoofchk = data;
+ }
+
+ up_write(&ionic->vf_op_lock);
+ return ret;
+}
+
+static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic *ionic = lif->ionic;
+ u8 data = set; /* convert to u8 for config */
+ int ret;
+
+ down_write(&ionic->vf_op_lock);
+
+ if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
+ ret = -EINVAL;
+ } else {
+ ret = ionic_set_vf_config(ionic, vf,
+ IONIC_VF_ATTR_TRUST, &data);
+ if (!ret)
+ ionic->vfs[vf].trusted = data;
+ }
+
+ up_write(&ionic->vf_op_lock);
+ return ret;
+}
+
+static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic *ionic = lif->ionic;
+ u8 data;
+ int ret;
+
+ switch (set) {
+ case IFLA_VF_LINK_STATE_ENABLE:
+ data = IONIC_VF_LINK_STATUS_UP;
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ data = IONIC_VF_LINK_STATUS_DOWN;
+ break;
+ case IFLA_VF_LINK_STATE_AUTO:
+ data = IONIC_VF_LINK_STATUS_AUTO;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ down_write(&ionic->vf_op_lock);
+
+ if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
+ ret = -EINVAL;
+ } else {
+ ret = ionic_set_vf_config(ionic, vf,
+ IONIC_VF_ATTR_LINKSTATE, &data);
+ if (!ret)
+ ionic->vfs[vf].linkstate = set;
+ }
+
+ up_write(&ionic->vf_op_lock);
+ return ret;
+}
+
static const struct net_device_ops ionic_netdev_ops = {
.ndo_open = ionic_open,
.ndo_stop = ionic_stop,
@@ -1632,6 +1853,14 @@ static const struct net_device_ops ionic_netdev_ops = {
.ndo_change_mtu = ionic_change_mtu,
.ndo_vlan_rx_add_vid = ionic_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ionic_vlan_rx_kill_vid,
+ .ndo_set_vf_vlan = ionic_set_vf_vlan,
+ .ndo_set_vf_trust = ionic_set_vf_trust,
+ .ndo_set_vf_mac = ionic_set_vf_mac,
+ .ndo_set_vf_rate = ionic_set_vf_rate,
+ .ndo_set_vf_spoofchk = ionic_set_vf_spoofchk,
+ .ndo_get_vf_config = ionic_get_vf_config,
+ .ndo_set_vf_link_state = ionic_set_vf_link_state,
+ .ndo_get_vf_stats = ionic_get_vf_stats,
};
int ionic_reset_queues(struct ionic_lif *lif)
@@ -1965,18 +2194,22 @@ static int ionic_station_set(struct ionic_lif *lif)
if (err)
return err;
+ if (is_zero_ether_addr(ctx.comp.lif_getattr.mac))
+ return 0;
+
memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
addr.sa_family = AF_INET;
err = eth_prepare_mac_addr_change(netdev, &addr);
- if (err)
- return err;
-
- if (!is_zero_ether_addr(netdev->dev_addr)) {
- netdev_dbg(lif->netdev, "deleting station MAC addr %pM\n",
- netdev->dev_addr);
- ionic_lif_addr(lif, netdev->dev_addr, false);
+ if (err) {
+ netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM\n",
+ addr.sa_data);
+ return 0;
}
+ netdev_dbg(lif->netdev, "deleting station MAC addr %pM\n",
+ netdev->dev_addr);
+ ionic_lif_addr(lif, netdev->dev_addr, false);
+
eth_commit_mac_addr_change(netdev, &addr);
netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
netdev->dev_addr);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index a55fd1f8c31b..9c5a7dd45f9d 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -37,6 +37,7 @@ struct ionic_rx_stats {
u64 csum_complete;
u64 csum_error;
u64 buffers_posted;
+ u64 dropped;
};
#define IONIC_QCQ_F_INITED BIT(0)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 3590ea7fd88a..a8e3fb73b465 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -165,6 +165,10 @@ static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode)
return "IONIC_CMD_FW_DOWNLOAD";
case IONIC_CMD_FW_CONTROL:
return "IONIC_CMD_FW_CONTROL";
+ case IONIC_CMD_VF_GETATTR:
+ return "IONIC_CMD_VF_GETATTR";
+ case IONIC_CMD_VF_SETATTR:
+ return "IONIC_CMD_VF_SETATTR";
default:
return "DEVCMD_UNKNOWN";
}
@@ -326,9 +330,9 @@ int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds)
unsigned long max_wait;
unsigned long duration;
int opcode;
+ int hb = 0;
int done;
int err;
- int hb;
WARN_ON(in_interrupt());
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
index 03916b6d47f2..a1e9796a660a 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
@@ -39,6 +39,7 @@ static const struct ionic_stat_desc ionic_rx_stats_desc[] = {
IONIC_RX_STAT_DESC(csum_none),
IONIC_RX_STAT_DESC(csum_complete),
IONIC_RX_STAT_DESC(csum_error),
+ IONIC_RX_STAT_DESC(dropped),
};
static const struct ionic_stat_desc ionic_txq_stats_desc[] = {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 97e79949b359..e452f4242ba0 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -152,12 +152,16 @@ static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_i
stats = q_to_rx_stats(q);
netdev = q->lif->netdev;
- if (comp->status)
+ if (comp->status) {
+ stats->dropped++;
return;
+ }
/* no packet processing while resetting */
- if (unlikely(test_bit(IONIC_LIF_QUEUE_RESET, q->lif->state)))
+ if (unlikely(test_bit(IONIC_LIF_QUEUE_RESET, q->lif->state))) {
+ stats->dropped++;
return;
+ }
stats->pkts++;
stats->bytes += le16_to_cpu(comp->len);
@@ -167,8 +171,10 @@ static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_i
else
skb = ionic_rx_frags(q, desc_info, cq_info);
- if (unlikely(!skb))
+ if (unlikely(!skb)) {
+ stats->dropped++;
return;
+ }
skb_record_rx_queue(skb, q->index);
@@ -337,6 +343,8 @@ void ionic_rx_fill(struct ionic_queue *q)
struct ionic_rxq_sg_desc *sg_desc;
struct ionic_rxq_sg_elem *sg_elem;
struct ionic_rxq_desc *desc;
+ unsigned int remain_len;
+ unsigned int seg_len;
unsigned int nfrags;
bool ring_doorbell;
unsigned int i, j;
@@ -346,6 +354,7 @@ void ionic_rx_fill(struct ionic_queue *q)
nfrags = round_up(len, PAGE_SIZE) / PAGE_SIZE;
for (i = ionic_q_space_avail(q); i; i--) {
+ remain_len = len;
desc_info = q->head;
desc = desc_info->desc;
sg_desc = desc_info->sg_desc;
@@ -369,7 +378,9 @@ void ionic_rx_fill(struct ionic_queue *q)
return;
}
desc->addr = cpu_to_le64(page_info->dma_addr);
- desc->len = cpu_to_le16(PAGE_SIZE);
+ seg_len = min_t(unsigned int, PAGE_SIZE, len);
+ desc->len = cpu_to_le16(seg_len);
+ remain_len -= seg_len;
page_info++;
/* fill sg descriptors - pages[1..n] */
@@ -385,7 +396,9 @@ void ionic_rx_fill(struct ionic_queue *q)
return;
}
sg_elem->addr = cpu_to_le64(page_info->dma_addr);
- sg_elem->len = cpu_to_le16(PAGE_SIZE);
+ seg_len = min_t(unsigned int, PAGE_SIZE, remain_len);
+ sg_elem->len = cpu_to_le16(seg_len);
+ remain_len -= seg_len;
page_info++;
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index c692a41e4548..8067ea04d455 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -49,7 +49,7 @@ static int netxen_nic_open(struct net_device *netdev);
static int netxen_nic_close(struct net_device *netdev);
static netdev_tx_t netxen_nic_xmit_frame(struct sk_buff *,
struct net_device *);
-static void netxen_tx_timeout(struct net_device *netdev);
+static void netxen_tx_timeout(struct net_device *netdev, unsigned int txqueue);
static void netxen_tx_timeout_task(struct work_struct *work);
static void netxen_fw_poll_work(struct work_struct *work);
static void netxen_schedule_work(struct netxen_adapter *adapter,
@@ -2222,7 +2222,7 @@ static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
netxen_advert_link_change(adapter, linkup);
}
-static void netxen_tx_timeout(struct net_device *netdev)
+static void netxen_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 89fe091c958d..fa41bf08a589 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -253,7 +253,8 @@ enum qed_resources {
QED_VLAN,
QED_RDMA_CNQ_RAM,
QED_ILT,
- QED_LL2_QUEUE,
+ QED_LL2_RAM_QUEUE,
+ QED_LL2_CTX_QUEUE,
QED_CMDQS_CQS,
QED_RDMA_STATS_QUEUE,
QED_BDQ,
@@ -461,6 +462,8 @@ struct qed_fw_data {
const u8 *modes_tree_buf;
union init_op *init_ops;
const u32 *arr_data;
+ const u32 *fw_overlays;
+ u32 fw_overlays_len;
u32 init_ops_size;
};
@@ -531,6 +534,23 @@ struct qed_nvm_image_info {
bool valid;
};
+enum qed_hsi_def_type {
+ QED_HSI_DEF_MAX_NUM_VFS,
+ QED_HSI_DEF_MAX_NUM_L2_QUEUES,
+ QED_HSI_DEF_MAX_NUM_PORTS,
+ QED_HSI_DEF_MAX_SB_PER_PATH,
+ QED_HSI_DEF_MAX_NUM_PFS,
+ QED_HSI_DEF_MAX_NUM_VPORTS,
+ QED_HSI_DEF_NUM_ETH_RSS_ENGINE,
+ QED_HSI_DEF_MAX_QM_TX_QUEUES,
+ QED_HSI_DEF_NUM_PXP_ILT_RECORDS,
+ QED_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS,
+ QED_HSI_DEF_MAX_QM_GLOBAL_RLS,
+ QED_HSI_DEF_MAX_PBF_CMD_LINES,
+ QED_HSI_DEF_MAX_BTB_BLOCKS,
+ QED_NUM_HSI_DEFS
+};
+
#define DRV_MODULE_VERSION \
__stringify(QED_MAJOR_VERSION) "." \
__stringify(QED_MINOR_VERSION) "." \
@@ -646,6 +666,7 @@ struct qed_hwfn {
struct dbg_tools_data dbg_info;
void *dbg_user_info;
+ struct virt_mem_desc dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE];
/* PWM region specific data */
u16 wid_count;
@@ -668,6 +689,7 @@ struct qed_hwfn {
/* Nvm images number and attributes */
struct qed_nvm_image_info nvm_info;
+ struct phys_mem_desc *fw_overlay_mem;
struct qed_ptt *p_arfs_ptt;
struct qed_simd_fp_handler simd_proto_handler[64];
@@ -796,8 +818,8 @@ struct qed_dev {
u8 cache_shift;
/* Init */
- const struct iro *iro_arr;
-#define IRO (p_hwfn->cdev->iro_arr)
+ const u32 *iro_arr;
+#define IRO ((const struct iro *)p_hwfn->cdev->iro_arr)
/* HW functions */
u8 num_hwfns;
@@ -856,6 +878,8 @@ struct qed_dev {
struct qed_cb_ll2_info *ll2;
u8 ll2_mac_address[ETH_ALEN];
#endif
+ struct qed_dbg_feature dbg_features[DBG_FEATURE_NUM];
+ bool disable_ilt_dump;
DECLARE_HASHTABLE(connections, 10);
const struct firmware *firmware;
@@ -868,16 +892,35 @@ struct qed_dev {
bool iwarp_cmt;
};
-#define NUM_OF_VFS(dev) (QED_IS_BB(dev) ? MAX_NUM_VFS_BB \
- : MAX_NUM_VFS_K2)
-#define NUM_OF_L2_QUEUES(dev) (QED_IS_BB(dev) ? MAX_NUM_L2_QUEUES_BB \
- : MAX_NUM_L2_QUEUES_K2)
-#define NUM_OF_PORTS(dev) (QED_IS_BB(dev) ? MAX_NUM_PORTS_BB \
- : MAX_NUM_PORTS_K2)
-#define NUM_OF_SBS(dev) (QED_IS_BB(dev) ? MAX_SB_PER_PATH_BB \
- : MAX_SB_PER_PATH_K2)
-#define NUM_OF_ENG_PFS(dev) (QED_IS_BB(dev) ? MAX_NUM_PFS_BB \
- : MAX_NUM_PFS_K2)
+u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type);
+
+#define NUM_OF_VFS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_VFS)
+#define NUM_OF_L2_QUEUES(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_L2_QUEUES)
+#define NUM_OF_PORTS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_PORTS)
+#define NUM_OF_SBS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_SB_PER_PATH)
+#define NUM_OF_ENG_PFS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_PFS)
+#define NUM_OF_VPORTS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_VPORTS)
+#define NUM_OF_RSS_ENGINES(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_ETH_RSS_ENGINE)
+#define NUM_OF_QM_TX_QUEUES(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_QM_TX_QUEUES)
+#define NUM_OF_PXP_ILT_RECORDS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_PXP_ILT_RECORDS)
+#define NUM_OF_RDMA_STATISTIC_COUNTERS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS)
+#define NUM_OF_QM_GLOBAL_RLS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_QM_GLOBAL_RLS)
+#define NUM_OF_PBF_CMD_LINES(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_PBF_CMD_LINES)
+#define NUM_OF_BTB_BLOCKS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_BTB_BLOCKS)
+
/**
* @brief qed_concrete_to_sw_fid - get the sw function id from
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 8e1bdf58b9e7..1a636bad717d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -50,12 +50,6 @@
#include "qed_reg_addr.h"
#include "qed_sriov.h"
-/* Max number of connection types in HW (DQ/CDU etc.) */
-#define MAX_CONN_TYPES PROTOCOLID_COMMON
-#define NUM_TASK_TYPES 2
-#define NUM_TASK_PF_SEGMENTS 4
-#define NUM_TASK_VF_SEGMENTS 1
-
/* QM constants */
#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
@@ -123,126 +117,6 @@ struct src_ent {
/* Alignment is inherent to the type1_task_context structure */
#define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context)
-/* PF per protocl configuration object */
-#define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
-#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
-
-struct qed_tid_seg {
- u32 count;
- u8 type;
- bool has_fl_mem;
-};
-
-struct qed_conn_type_cfg {
- u32 cid_count;
- u32 cids_per_vf;
- struct qed_tid_seg tid_seg[TASK_SEGMENTS];
-};
-
-/* ILT Client configuration, Per connection type (protocol) resources. */
-#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
-#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
-#define CDUC_BLK (0)
-#define SRQ_BLK (0)
-#define CDUT_SEG_BLK(n) (1 + (u8)(n))
-#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_ ## X ## _SEGMENTS)
-
-enum ilt_clients {
- ILT_CLI_CDUC,
- ILT_CLI_CDUT,
- ILT_CLI_QM,
- ILT_CLI_TM,
- ILT_CLI_SRC,
- ILT_CLI_TSDM,
- ILT_CLI_MAX
-};
-
-struct ilt_cfg_pair {
- u32 reg;
- u32 val;
-};
-
-struct qed_ilt_cli_blk {
- u32 total_size; /* 0 means not active */
- u32 real_size_in_page;
- u32 start_line;
- u32 dynamic_line_cnt;
-};
-
-struct qed_ilt_client_cfg {
- bool active;
-
- /* ILT boundaries */
- struct ilt_cfg_pair first;
- struct ilt_cfg_pair last;
- struct ilt_cfg_pair p_size;
-
- /* ILT client blocks for PF */
- struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
- u32 pf_total_lines;
-
- /* ILT client blocks for VFs */
- struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
- u32 vf_total_lines;
-};
-
-/* Per Path -
- * ILT shadow table
- * Protocol acquired CID lists
- * PF start line in ILT
- */
-struct qed_dma_mem {
- dma_addr_t p_phys;
- void *p_virt;
- size_t size;
-};
-
-struct qed_cid_acquired_map {
- u32 start_cid;
- u32 max_count;
- unsigned long *cid_map;
-};
-
-struct qed_cxt_mngr {
- /* Per protocl configuration */
- struct qed_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
-
- /* computed ILT structure */
- struct qed_ilt_client_cfg clients[ILT_CLI_MAX];
-
- /* Task type sizes */
- u32 task_type_size[NUM_TASK_TYPES];
-
- /* total number of VFs for this hwfn -
- * ALL VFs are symmetric in terms of HW resources
- */
- u32 vf_count;
-
- /* Acquired CIDs */
- struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
-
- struct qed_cid_acquired_map
- acquired_vf[MAX_CONN_TYPES][MAX_NUM_VFS];
-
- /* ILT shadow table */
- struct qed_dma_mem *ilt_shadow;
- u32 pf_start_line;
-
- /* Mutex for a dynamic ILT allocation */
- struct mutex mutex;
-
- /* SRC T2 */
- struct qed_dma_mem *t2;
- u32 t2_num_pages;
- u64 first_free;
- u64 last_free;
-
- /* total number of SRQ's for this hwfn */
- u32 srq_count;
-
- /* Maximal number of L2 steering filters */
- u32 arfs_count;
-};
static bool src_proto(enum protocol_type type)
{
return type == PROTOCOLID_ISCSI ||
@@ -880,30 +754,60 @@ u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines)
static void qed_cxt_src_t2_free(struct qed_hwfn *p_hwfn)
{
- struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_src_t2 *p_t2 = &p_hwfn->p_cxt_mngr->src_t2;
u32 i;
- if (!p_mngr->t2)
+ if (!p_t2 || !p_t2->dma_mem)
return;
- for (i = 0; i < p_mngr->t2_num_pages; i++)
- if (p_mngr->t2[i].p_virt)
+ for (i = 0; i < p_t2->num_pages; i++)
+ if (p_t2->dma_mem[i].virt_addr)
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- p_mngr->t2[i].size,
- p_mngr->t2[i].p_virt,
- p_mngr->t2[i].p_phys);
+ p_t2->dma_mem[i].size,
+ p_t2->dma_mem[i].virt_addr,
+ p_t2->dma_mem[i].phys_addr);
+
+ kfree(p_t2->dma_mem);
+ p_t2->dma_mem = NULL;
+}
+
+static int
+qed_cxt_t2_alloc_pages(struct qed_hwfn *p_hwfn,
+ struct qed_src_t2 *p_t2, u32 total_size, u32 page_size)
+{
+ void **p_virt;
+ u32 size, i;
+
+ if (!p_t2 || !p_t2->dma_mem)
+ return -EINVAL;
- kfree(p_mngr->t2);
- p_mngr->t2 = NULL;
+ for (i = 0; i < p_t2->num_pages; i++) {
+ size = min_t(u32, total_size, page_size);
+ p_virt = &p_t2->dma_mem[i].virt_addr;
+
+ *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ size,
+ &p_t2->dma_mem[i].phys_addr,
+ GFP_KERNEL);
+ if (!p_t2->dma_mem[i].virt_addr)
+ return -ENOMEM;
+
+ memset(*p_virt, 0, size);
+ p_t2->dma_mem[i].size = size;
+ total_size -= size;
+ }
+
+ return 0;
}
static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 conn_num, total_size, ent_per_page, psz, i;
+ struct phys_mem_desc *p_t2_last_page;
struct qed_ilt_client_cfg *p_src;
struct qed_src_iids src_iids;
- struct qed_dma_mem *p_t2;
+ struct qed_src_t2 *p_t2;
int rc;
memset(&src_iids, 0, sizeof(src_iids));
@@ -921,49 +825,39 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
/* use the same page size as the SRC ILT client */
psz = ILT_PAGE_IN_BYTES(p_src->p_size.val);
- p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);
+ p_t2 = &p_mngr->src_t2;
+ p_t2->num_pages = DIV_ROUND_UP(total_size, psz);
/* allocate t2 */
- p_mngr->t2 = kcalloc(p_mngr->t2_num_pages, sizeof(struct qed_dma_mem),
- GFP_KERNEL);
- if (!p_mngr->t2) {
+ p_t2->dma_mem = kcalloc(p_t2->num_pages, sizeof(struct phys_mem_desc),
+ GFP_KERNEL);
+ if (!p_t2->dma_mem) {
+ DP_NOTICE(p_hwfn, "Failed to allocate t2 table\n");
rc = -ENOMEM;
goto t2_fail;
}
- /* allocate t2 pages */
- for (i = 0; i < p_mngr->t2_num_pages; i++) {
- u32 size = min_t(u32, total_size, psz);
- void **p_virt = &p_mngr->t2[i].p_virt;
-
- *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size,
- &p_mngr->t2[i].p_phys,
- GFP_KERNEL);
- if (!p_mngr->t2[i].p_virt) {
- rc = -ENOMEM;
- goto t2_fail;
- }
- p_mngr->t2[i].size = size;
- total_size -= size;
- }
+ rc = qed_cxt_t2_alloc_pages(p_hwfn, p_t2, total_size, psz);
+ if (rc)
+ goto t2_fail;
/* Set the t2 pointers */
/* entries per page - must be a power of two */
ent_per_page = psz / sizeof(struct src_ent);
- p_mngr->first_free = (u64) p_mngr->t2[0].p_phys;
+ p_t2->first_free = (u64)p_t2->dma_mem[0].phys_addr;
- p_t2 = &p_mngr->t2[(conn_num - 1) / ent_per_page];
- p_mngr->last_free = (u64) p_t2->p_phys +
+ p_t2_last_page = &p_t2->dma_mem[(conn_num - 1) / ent_per_page];
+ p_t2->last_free = (u64)p_t2_last_page->phys_addr +
((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent);
- for (i = 0; i < p_mngr->t2_num_pages; i++) {
+ for (i = 0; i < p_t2->num_pages; i++) {
u32 ent_num = min_t(u32,
ent_per_page,
conn_num);
- struct src_ent *entries = p_mngr->t2[i].p_virt;
- u64 p_ent_phys = (u64) p_mngr->t2[i].p_phys, val;
+ struct src_ent *entries = p_t2->dma_mem[i].virt_addr;
+ u64 p_ent_phys = (u64)p_t2->dma_mem[i].phys_addr, val;
u32 j;
for (j = 0; j < ent_num - 1; j++) {
@@ -971,8 +865,8 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
entries[j].next = cpu_to_be64(val);
}
- if (i < p_mngr->t2_num_pages - 1)
- val = (u64) p_mngr->t2[i + 1].p_phys;
+ if (i < p_t2->num_pages - 1)
+ val = (u64)p_t2->dma_mem[i + 1].phys_addr;
else
val = 0;
entries[j].next = cpu_to_be64(val);
@@ -988,7 +882,7 @@ t2_fail:
}
#define for_each_ilt_valid_client(pos, clients) \
- for (pos = 0; pos < ILT_CLI_MAX; pos++) \
+ for (pos = 0; pos < MAX_ILT_CLIENTS; pos++) \
if (!clients[pos].active) { \
continue; \
} else \
@@ -1014,13 +908,13 @@ static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
ilt_size = qed_cxt_ilt_shadow_size(p_cli);
for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
- struct qed_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
+ struct phys_mem_desc *p_dma = &p_mngr->ilt_shadow[i];
- if (p_dma->p_virt)
+ if (p_dma->virt_addr)
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- p_dma->size, p_dma->p_virt,
- p_dma->p_phys);
- p_dma->p_virt = NULL;
+ p_dma->size, p_dma->virt_addr,
+ p_dma->phys_addr);
+ p_dma->virt_addr = NULL;
}
kfree(p_mngr->ilt_shadow);
}
@@ -1030,7 +924,7 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
enum ilt_clients ilt_client,
u32 start_line_offset)
{
- struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
+ struct phys_mem_desc *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
u32 lines, line, sz_left, lines_to_skip = 0;
/* Special handling for RoCE that supports dynamic allocation */
@@ -1059,8 +953,8 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
if (!p_virt)
return -ENOMEM;
- ilt_shadow[line].p_phys = p_phys;
- ilt_shadow[line].p_virt = p_virt;
+ ilt_shadow[line].phys_addr = p_phys;
+ ilt_shadow[line].virt_addr = p_virt;
ilt_shadow[line].size = size;
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
@@ -1083,7 +977,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
int rc;
size = qed_cxt_ilt_shadow_size(clients);
- p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem),
+ p_mngr->ilt_shadow = kcalloc(size, sizeof(struct phys_mem_desc),
GFP_KERNEL);
if (!p_mngr->ilt_shadow) {
rc = -ENOMEM;
@@ -1092,7 +986,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
"Allocated 0x%x bytes for ilt shadow\n",
- (u32)(size * sizeof(struct qed_dma_mem)));
+ (u32)(size * sizeof(struct phys_mem_desc)));
for_each_ilt_valid_client(i, clients) {
for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
@@ -1238,15 +1132,20 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
/* default ILT page size for all clients is 64K */
- for (i = 0; i < ILT_CLI_MAX; i++)
+ for (i = 0; i < MAX_ILT_CLIENTS; i++)
p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
+ p_mngr->conn_ctx_size = CONN_CXT_SIZE(p_hwfn);
+
/* Initialize task sizes */
p_mngr->task_type_size[0] = TYPE0_TASK_CXT_SIZE(p_hwfn);
p_mngr->task_type_size[1] = TYPE1_TASK_CXT_SIZE(p_hwfn);
- if (p_hwfn->cdev->p_iov_info)
+ if (p_hwfn->cdev->p_iov_info) {
p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs;
+ p_mngr->first_vf_in_pf =
+ p_hwfn->cdev->p_iov_info->first_vf_in_pf;
+ }
/* Initialize the dynamic ILT allocation mutex */
mutex_init(&p_mngr->mutex);
@@ -1499,14 +1398,11 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
{
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct qed_qm_pf_rt_init_params params;
- struct qed_mcp_link_state *p_link;
struct qed_qm_iids iids;
memset(&iids, 0, sizeof(iids));
qed_cxt_qm_iids(p_hwfn, &iids);
- p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
-
memset(&params, 0, sizeof(params));
params.port_id = p_hwfn->port_id;
params.pf_id = p_hwfn->rel_pf_id;
@@ -1522,7 +1418,6 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
params.num_vports = qm_info->num_vports;
params.pf_wfq = qm_info->pf_wfq;
params.pf_rl = qm_info->pf_rl;
- params.link_speed = p_link->speed;
params.pq_params = qm_info->qm_pq_params;
params.vport_params = qm_info->qm_vport_params;
@@ -1674,7 +1569,7 @@ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
{
struct qed_ilt_client_cfg *clients;
struct qed_cxt_mngr *p_mngr;
- struct qed_dma_mem *p_shdw;
+ struct phys_mem_desc *p_shdw;
u32 line, rt_offst, i;
qed_ilt_bounds_init(p_hwfn);
@@ -1699,15 +1594,15 @@ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
/** p_virt could be NULL incase of dynamic
* allocation
*/
- if (p_shdw[line].p_virt) {
+ if (p_shdw[line].virt_addr) {
SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
- (p_shdw[line].p_phys >> 12));
+ (p_shdw[line].phys_addr >> 12));
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
"Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n",
rt_offst, line, i,
- (u64)(p_shdw[line].p_phys >> 12));
+ (u64)(p_shdw[line].phys_addr >> 12));
}
STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
@@ -2050,10 +1945,10 @@ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
line = p_info->iid / cxts_per_p;
/* Make sure context is allocated (dynamic allocation) */
- if (!p_mngr->ilt_shadow[line].p_virt)
+ if (!p_mngr->ilt_shadow[line].virt_addr)
return -EINVAL;
- p_info->p_cxt = p_mngr->ilt_shadow[line].p_virt +
+ p_info->p_cxt = p_mngr->ilt_shadow[line].virt_addr +
p_info->iid % cxts_per_p * conn_cxt_size;
DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT),
@@ -2234,7 +2129,7 @@ int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
for (i = 0; i < total_lines; i++) {
shadow_line = i + p_fl_seg->start_line -
p_hwfn->p_cxt_mngr->pf_start_line;
- p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].p_virt;
+ p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].virt_addr;
}
p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) -
p_fl_seg->real_size_in_page;
@@ -2296,7 +2191,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
mutex_lock(&p_hwfn->p_cxt_mngr->mutex);
- if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt)
+ if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr)
goto out0;
p_ptt = qed_ptt_acquire(p_hwfn);
@@ -2334,8 +2229,8 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
}
}
- p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt = p_virt;
- p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys = p_phys;
+ p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr = p_virt;
+ p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr = p_phys;
p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size =
p_blk->real_size_in_page;
@@ -2345,9 +2240,9 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
ilt_hw_entry = 0;
SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
- SET_FIELD(ilt_hw_entry,
- ILT_ENTRY_PHY_ADDR,
- (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys >> 12));
+ SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
+ (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr
+ >> 12));
/* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
qed_dmae_host2grc(p_hwfn, p_ptt, (u64) (uintptr_t)&ilt_hw_entry,
@@ -2434,16 +2329,16 @@ qed_cxt_free_ilt_range(struct qed_hwfn *p_hwfn,
}
for (i = shadow_start_line; i < shadow_end_line; i++) {
- if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt)
+ if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr)
continue;
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
p_hwfn->p_cxt_mngr->ilt_shadow[i].size,
- p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt,
- p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys);
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr,
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr);
- p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt = NULL;
- p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys = 0;
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr = NULL;
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr = 0;
p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
/* compute absolute offset */
@@ -2547,8 +2442,76 @@ int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
ilt_idx = tid / num_tids_per_block + p_seg->start_line -
p_mngr->pf_start_line;
- *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].p_virt +
+ *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].virt_addr +
(tid % num_tids_per_block) * tid_size;
return 0;
}
+
+static u16 qed_blk_calculate_pages(struct qed_ilt_cli_blk *p_blk)
+{
+ if (p_blk->real_size_in_page == 0)
+ return 0;
+
+ return DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
+}
+
+u16 qed_get_cdut_num_pf_init_pages(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_blk;
+ u16 i, pages = 0;
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];
+ pages += qed_blk_calculate_pages(p_blk);
+ }
+
+ return pages;
+}
+
+u16 qed_get_cdut_num_vf_init_pages(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_blk;
+ u16 i, pages = 0;
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ for (i = 0; i < NUM_TASK_VF_SEGMENTS; i++) {
+ p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(i, VF)];
+ pages += qed_blk_calculate_pages(p_blk);
+ }
+
+ return pages;
+}
+
+u16 qed_get_cdut_num_pf_work_pages(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_blk;
+ u16 i, pages = 0;
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];
+ pages += qed_blk_calculate_pages(p_blk);
+ }
+
+ return pages;
+}
+
+u16 qed_get_cdut_num_vf_work_pages(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_blk;
+ u16 pages = 0, i;
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ for (i = 0; i < NUM_TASK_VF_SEGMENTS; i++) {
+ p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(i)];
+ pages += qed_blk_calculate_pages(p_blk);
+ }
+
+ return pages;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index 758a8b4c0de8..c4e815f6cabd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -242,4 +242,134 @@ int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto);
#define QED_CTX_FL_MEM 1
int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
u32 tid, u8 ctx_type, void **task_ctx);
+
+/* Max number of connection types in HW (DQ/CDU etc.) */
+#define MAX_CONN_TYPES PROTOCOLID_COMMON
+#define NUM_TASK_TYPES 2
+#define NUM_TASK_PF_SEGMENTS 4
+#define NUM_TASK_VF_SEGMENTS 1
+
+/* PF per protocl configuration object */
+#define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
+#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
+
+struct qed_tid_seg {
+ u32 count;
+ u8 type;
+ bool has_fl_mem;
+};
+
+struct qed_conn_type_cfg {
+ u32 cid_count;
+ u32 cids_per_vf;
+ struct qed_tid_seg tid_seg[TASK_SEGMENTS];
+};
+
+/* ILT Client configuration,
+ * Per connection type (protocol) resources (cids, tis, vf cids etc.)
+ * 1 - for connection context (CDUC) and for each task context we need two
+ * values, for regular task context and for force load memory
+ */
+#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
+#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
+#define CDUC_BLK (0)
+#define SRQ_BLK (0)
+#define CDUT_SEG_BLK(n) (1 + (u8)(n))
+#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_ ## X ## _SEGMENTS)
+
+struct ilt_cfg_pair {
+ u32 reg;
+ u32 val;
+};
+
+struct qed_ilt_cli_blk {
+ u32 total_size; /* 0 means not active */
+ u32 real_size_in_page;
+ u32 start_line;
+ u32 dynamic_line_offset;
+ u32 dynamic_line_cnt;
+};
+
+struct qed_ilt_client_cfg {
+ bool active;
+
+ /* ILT boundaries */
+ struct ilt_cfg_pair first;
+ struct ilt_cfg_pair last;
+ struct ilt_cfg_pair p_size;
+
+ /* ILT client blocks for PF */
+ struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
+ u32 pf_total_lines;
+
+ /* ILT client blocks for VFs */
+ struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
+ u32 vf_total_lines;
+};
+
+struct qed_cid_acquired_map {
+ u32 start_cid;
+ u32 max_count;
+ unsigned long *cid_map;
+};
+
+struct qed_src_t2 {
+ struct phys_mem_desc *dma_mem;
+ u32 num_pages;
+ u64 first_free;
+ u64 last_free;
+};
+
+struct qed_cxt_mngr {
+ /* Per protocl configuration */
+ struct qed_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
+
+ /* computed ILT structure */
+ struct qed_ilt_client_cfg clients[MAX_ILT_CLIENTS];
+
+ /* Task type sizes */
+ u32 task_type_size[NUM_TASK_TYPES];
+
+ /* total number of VFs for this hwfn -
+ * ALL VFs are symmetric in terms of HW resources
+ */
+ u32 vf_count;
+ u32 first_vf_in_pf;
+
+ /* Acquired CIDs */
+ struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
+
+ struct qed_cid_acquired_map
+ acquired_vf[MAX_CONN_TYPES][MAX_NUM_VFS];
+
+ /* ILT shadow table */
+ struct phys_mem_desc *ilt_shadow;
+ u32 ilt_shadow_size;
+ u32 pf_start_line;
+
+ /* Mutex for a dynamic ILT allocation */
+ struct mutex mutex;
+
+ /* SRC T2 */
+ struct qed_src_t2 src_t2;
+ u32 t2_num_pages;
+ u64 first_free;
+ u64 last_free;
+
+ /* total number of SRQ's for this hwfn */
+ u32 srq_count;
+
+ /* Maximal number of L2 steering filters */
+ u32 arfs_count;
+
+ u8 task_type_id;
+ u16 task_ctx_size;
+ u16 conn_ctx_size;
+};
+
+u16 qed_get_cdut_num_pf_init_pages(struct qed_hwfn *p_hwfn);
+u16 qed_get_cdut_num_vf_init_pages(struct qed_hwfn *p_hwfn);
+u16 qed_get_cdut_num_pf_work_pages(struct qed_hwfn *p_hwfn);
+u16 qed_get_cdut_num_vf_work_pages(struct qed_hwfn *p_hwfn);
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 859caa6c1a1f..f4eebaabb6d0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -7,6 +7,7 @@
#include <linux/vmalloc.h>
#include <linux/crc32.h>
#include "qed.h"
+#include "qed_cxt.h"
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_mcp.h"
@@ -22,27 +23,28 @@ enum mem_groups {
MEM_GROUP_BRB_RAM,
MEM_GROUP_BRB_MEM,
MEM_GROUP_PRS_MEM,
+ MEM_GROUP_SDM_MEM,
+ MEM_GROUP_PBUF,
MEM_GROUP_IOR,
+ MEM_GROUP_RAM,
MEM_GROUP_BTB_RAM,
+ MEM_GROUP_RDIF_CTX,
+ MEM_GROUP_TDIF_CTX,
+ MEM_GROUP_CFC_MEM,
MEM_GROUP_CONN_CFC_MEM,
- MEM_GROUP_TASK_CFC_MEM,
MEM_GROUP_CAU_PI,
MEM_GROUP_CAU_MEM,
+ MEM_GROUP_CAU_MEM_EXT,
MEM_GROUP_PXP_ILT,
- MEM_GROUP_TM_MEM,
- MEM_GROUP_SDM_MEM,
- MEM_GROUP_PBUF,
- MEM_GROUP_RAM,
MEM_GROUP_MULD_MEM,
MEM_GROUP_BTB_MEM,
- MEM_GROUP_RDIF_CTX,
- MEM_GROUP_TDIF_CTX,
- MEM_GROUP_CFC_MEM,
MEM_GROUP_IGU_MEM,
MEM_GROUP_IGU_MSIX,
MEM_GROUP_CAU_SB,
MEM_GROUP_BMB_RAM,
MEM_GROUP_BMB_MEM,
+ MEM_GROUP_TM_MEM,
+ MEM_GROUP_TASK_CFC_MEM,
MEM_GROUPS_NUM
};
@@ -56,27 +58,28 @@ static const char * const s_mem_group_names[] = {
"BRB_RAM",
"BRB_MEM",
"PRS_MEM",
+ "SDM_MEM",
+ "PBUF",
"IOR",
+ "RAM",
"BTB_RAM",
+ "RDIF_CTX",
+ "TDIF_CTX",
+ "CFC_MEM",
"CONN_CFC_MEM",
- "TASK_CFC_MEM",
"CAU_PI",
"CAU_MEM",
+ "CAU_MEM_EXT",
"PXP_ILT",
- "TM_MEM",
- "SDM_MEM",
- "PBUF",
- "RAM",
"MULD_MEM",
"BTB_MEM",
- "RDIF_CTX",
- "TDIF_CTX",
- "CFC_MEM",
"IGU_MEM",
"IGU_MSIX",
"CAU_SB",
"BMB_RAM",
"BMB_MEM",
+ "TM_MEM",
+ "TASK_CFC_MEM",
};
/* Idle check conditions */
@@ -170,35 +173,66 @@ static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
cond13,
};
+#define NUM_PHYS_BLOCKS 84
+
+#define NUM_DBG_RESET_REGS 8
+
/******************************* Data Types **********************************/
-enum platform_ids {
- PLATFORM_ASIC,
+enum hw_types {
+ HW_TYPE_ASIC,
PLATFORM_RESERVED,
PLATFORM_RESERVED2,
PLATFORM_RESERVED3,
- MAX_PLATFORM_IDS
+ PLATFORM_RESERVED4,
+ MAX_HW_TYPES
+};
+
+/* CM context types */
+enum cm_ctx_types {
+ CM_CTX_CONN_AG,
+ CM_CTX_CONN_ST,
+ CM_CTX_TASK_AG,
+ CM_CTX_TASK_ST,
+ NUM_CM_CTX_TYPES
+};
+
+/* Debug bus frame modes */
+enum dbg_bus_frame_modes {
+ DBG_BUS_FRAME_MODE_4ST = 0, /* 4 Storm dwords (no HW) */
+ DBG_BUS_FRAME_MODE_2ST_2HW = 1, /* 2 Storm dwords, 2 HW dwords */
+ DBG_BUS_FRAME_MODE_1ST_3HW = 2, /* 1 Storm dwords, 3 HW dwords */
+ DBG_BUS_FRAME_MODE_4HW = 3, /* 4 HW dwords (no Storms) */
+ DBG_BUS_FRAME_MODE_8HW = 4, /* 8 HW dwords (no Storms) */
+ DBG_BUS_NUM_FRAME_MODES
};
/* Chip constant definitions */
struct chip_defs {
const char *name;
+ u32 num_ilt_pages;
};
-/* Platform constant definitions */
-struct platform_defs {
+/* HW type constant definitions */
+struct hw_type_defs {
const char *name;
u32 delay_factor;
u32 dmae_thresh;
u32 log_thresh;
};
+/* RBC reset definitions */
+struct rbc_reset_defs {
+ u32 reset_reg_addr;
+ u32 reset_val[MAX_CHIP_IDS];
+};
+
/* Storm constant definitions.
* Addresses are in bytes, sizes are in quad-regs.
*/
struct storm_defs {
char letter;
- enum block_id block_id;
+ enum block_id sem_block_id;
enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
bool has_vfc;
u32 sem_fast_mem_addr;
@@ -207,47 +241,26 @@ struct storm_defs {
u32 sem_slow_mode_addr;
u32 sem_slow_mode1_conf_addr;
u32 sem_sync_dbg_empty_addr;
- u32 sem_slow_dbg_empty_addr;
+ u32 sem_gpre_vect_addr;
u32 cm_ctx_wr_addr;
- u32 cm_conn_ag_ctx_lid_size;
- u32 cm_conn_ag_ctx_rd_addr;
- u32 cm_conn_st_ctx_lid_size;
- u32 cm_conn_st_ctx_rd_addr;
- u32 cm_task_ag_ctx_lid_size;
- u32 cm_task_ag_ctx_rd_addr;
- u32 cm_task_st_ctx_lid_size;
- u32 cm_task_st_ctx_rd_addr;
+ u32 cm_ctx_rd_addr[NUM_CM_CTX_TYPES];
+ u32 cm_ctx_lid_sizes[MAX_CHIP_IDS][NUM_CM_CTX_TYPES];
};
-/* Block constant definitions */
-struct block_defs {
- const char *name;
- bool exists[MAX_CHIP_IDS];
- bool associated_to_storm;
-
- /* Valid only if associated_to_storm is true */
- u32 storm_id;
- enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
- u32 dbg_select_addr;
- u32 dbg_enable_addr;
- u32 dbg_shift_addr;
- u32 dbg_force_valid_addr;
- u32 dbg_force_frame_addr;
- bool has_reset_bit;
-
- /* If true, block is taken out of reset before dump */
- bool unreset;
- enum dbg_reset_regs reset_reg;
-
- /* Bit offset in reset register */
- u8 reset_bit_offset;
+/* Debug Bus Constraint operation constant definitions */
+struct dbg_bus_constraint_op_defs {
+ u8 hw_op_val;
+ bool is_cyclic;
};
-/* Reset register definitions */
-struct reset_reg_defs {
- u32 addr;
+/* Storm Mode definitions */
+struct storm_mode_defs {
+ const char *name;
+ bool is_fast_dbg;
+ u8 id_in_hw;
+ u32 src_disable_reg_addr;
+ u32 src_enable_val;
bool exists[MAX_CHIP_IDS];
- u32 unreset_val[MAX_CHIP_IDS];
};
struct grc_param_defs {
@@ -257,7 +270,7 @@ struct grc_param_defs {
bool is_preset;
bool is_persistent;
u32 exclude_all_preset_val;
- u32 crash_preset_val;
+ u32 crash_preset_val[MAX_CHIP_IDS];
};
/* Address is in 128b units. Width is in bits. */
@@ -314,15 +327,7 @@ struct split_type_defs {
/******************************** Constants **********************************/
-#define MAX_LCIDS 320
-#define MAX_LTIDS 320
-
-#define NUM_IOR_SETS 2
-#define IORS_PER_SET 176
-#define IOR_SET_OFFSET(set_id) ((set_id) * 256)
-
#define BYTES_IN_DWORD sizeof(u32)
-
/* In the macros below, size and offset are specified in bits */
#define CEIL_DWORDS(size) DIV_ROUND_UP(size, 32)
#define FIELD_BIT_OFFSET(type, field) type ## _ ## field ## _ ## OFFSET
@@ -348,20 +353,17 @@ struct split_type_defs {
qed_wr(dev, ptt, addr, (arr)[i]); \
} while (0)
-#define ARR_REG_RD(dev, ptt, addr, arr, arr_size) \
- do { \
- for (i = 0; i < (arr_size); i++) \
- (arr)[i] = qed_rd(dev, ptt, addr); \
- } while (0)
-
#define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD)
#define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
-/* Extra lines include a signature line + optional latency events line */
-#define NUM_EXTRA_DBG_LINES(block_desc) \
- (1 + ((block_desc)->has_latency_events ? 1 : 0))
-#define NUM_DBG_LINES(block_desc) \
- ((block_desc)->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
+/* extra lines include a signature line + optional latency events line */
+#define NUM_EXTRA_DBG_LINES(block) \
+ (GET_FIELD((block)->flags, DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS) ? 2 : 1)
+#define NUM_DBG_LINES(block) \
+ ((block)->num_of_dbg_bus_lines + NUM_EXTRA_DBG_LINES(block))
+
+#define USE_DMAE true
+#define PROTECT_WIDE_BUS true
#define RAM_LINES_TO_DWORDS(lines) ((lines) * 2)
#define RAM_LINES_TO_BYTES(lines) \
@@ -380,6 +382,9 @@ struct split_type_defs {
#define IDLE_CHK_RESULT_REG_HDR_DWORDS \
BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
+#define PAGE_MEM_DESC_SIZE_DWORDS \
+ BYTES_TO_DWORDS(sizeof(struct phys_mem_desc))
+
#define IDLE_CHK_MAX_ENTRIES_SIZE 32
/* The sizes and offsets below are specified in bits */
@@ -425,7 +430,9 @@ struct split_type_defs {
#define STATIC_DEBUG_LINE_DWORDS 9
-#define NUM_COMMON_GLOBAL_PARAMS 8
+#define NUM_COMMON_GLOBAL_PARAMS 9
+
+#define MAX_RECURSION_DEPTH 10
#define FW_IMG_MAIN 1
@@ -449,1054 +456,121 @@ struct split_type_defs {
(MCP_REG_SCRATCH + \
offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
+#define MAX_SW_PLTAFORM_STR_SIZE 64
+
#define EMPTY_FW_VERSION_STR "???_???_???_???"
#define EMPTY_FW_IMAGE_STR "???????????????"
/***************************** Constant Arrays *******************************/
-struct dbg_array {
- const u32 *ptr;
- u32 size_in_dwords;
-};
-
-/* Debug arrays */
-static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
-
/* Chip constant definitions array */
static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
- {"bb"},
- {"ah"},
- {"reserved"},
+ {"bb", PSWRQ2_REG_ILT_MEMORY_SIZE_BB / 2},
+ {"ah", PSWRQ2_REG_ILT_MEMORY_SIZE_K2 / 2}
};
/* Storm constant definitions array */
static struct storm_defs s_storm_defs[] = {
/* Tstorm */
{'T', BLOCK_TSEM,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
- DBG_BUS_CLIENT_RBCT}, true,
- TSEM_REG_FAST_MEMORY,
- TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
- TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
- TCM_REG_CTX_RBC_ACCS,
- 4, TCM_REG_AGG_CON_CTX,
- 16, TCM_REG_SM_CON_CTX,
- 2, TCM_REG_AGG_TASK_CTX,
- 4, TCM_REG_SM_TASK_CTX},
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ true,
+ TSEM_REG_FAST_MEMORY,
+ TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
+ TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_DBG_GPRE_VECT,
+ TCM_REG_CTX_RBC_ACCS,
+ {TCM_REG_AGG_CON_CTX, TCM_REG_SM_CON_CTX, TCM_REG_AGG_TASK_CTX,
+ TCM_REG_SM_TASK_CTX},
+ {{4, 16, 2, 4}, {4, 16, 2, 4}} /* {bb} {k2} */
+ },
/* Mstorm */
{'M', BLOCK_MSEM,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM,
- DBG_BUS_CLIENT_RBCM}, false,
- MSEM_REG_FAST_MEMORY,
- MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2,
- MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY_BB_K2,
- MCM_REG_CTX_RBC_ACCS,
- 1, MCM_REG_AGG_CON_CTX,
- 10, MCM_REG_SM_CON_CTX,
- 2, MCM_REG_AGG_TASK_CTX,
- 7, MCM_REG_SM_TASK_CTX},
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ false,
+ MSEM_REG_FAST_MEMORY,
+ MSEM_REG_DBG_FRAME_MODE_BB_K2,
+ MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ MSEM_REG_SLOW_DBG_MODE_BB_K2,
+ MSEM_REG_DBG_MODE1_CFG_BB_K2,
+ MSEM_REG_SYNC_DBG_EMPTY,
+ MSEM_REG_DBG_GPRE_VECT,
+ MCM_REG_CTX_RBC_ACCS,
+ {MCM_REG_AGG_CON_CTX, MCM_REG_SM_CON_CTX, MCM_REG_AGG_TASK_CTX,
+ MCM_REG_SM_TASK_CTX },
+ {{1, 10, 2, 7}, {1, 10, 2, 7}} /* {bb} {k2}*/
+ },
/* Ustorm */
{'U', BLOCK_USEM,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
- DBG_BUS_CLIENT_RBCU}, false,
- USEM_REG_FAST_MEMORY,
- USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2,
- USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY_BB_K2,
- UCM_REG_CTX_RBC_ACCS,
- 2, UCM_REG_AGG_CON_CTX,
- 13, UCM_REG_SM_CON_CTX,
- 3, UCM_REG_AGG_TASK_CTX,
- 3, UCM_REG_SM_TASK_CTX},
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ false,
+ USEM_REG_FAST_MEMORY,
+ USEM_REG_DBG_FRAME_MODE_BB_K2,
+ USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ USEM_REG_SLOW_DBG_MODE_BB_K2,
+ USEM_REG_DBG_MODE1_CFG_BB_K2,
+ USEM_REG_SYNC_DBG_EMPTY,
+ USEM_REG_DBG_GPRE_VECT,
+ UCM_REG_CTX_RBC_ACCS,
+ {UCM_REG_AGG_CON_CTX, UCM_REG_SM_CON_CTX, UCM_REG_AGG_TASK_CTX,
+ UCM_REG_SM_TASK_CTX},
+ {{2, 13, 3, 3}, {2, 13, 3, 3}} /* {bb} {k2} */
+ },
/* Xstorm */
{'X', BLOCK_XSEM,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
- DBG_BUS_CLIENT_RBCX}, false,
- XSEM_REG_FAST_MEMORY,
- XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2,
- XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY_BB_K2,
- XCM_REG_CTX_RBC_ACCS,
- 9, XCM_REG_AGG_CON_CTX,
- 15, XCM_REG_SM_CON_CTX,
- 0, 0,
- 0, 0},
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ false,
+ XSEM_REG_FAST_MEMORY,
+ XSEM_REG_DBG_FRAME_MODE_BB_K2,
+ XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ XSEM_REG_SLOW_DBG_MODE_BB_K2,
+ XSEM_REG_DBG_MODE1_CFG_BB_K2,
+ XSEM_REG_SYNC_DBG_EMPTY,
+ XSEM_REG_DBG_GPRE_VECT,
+ XCM_REG_CTX_RBC_ACCS,
+ {XCM_REG_AGG_CON_CTX, XCM_REG_SM_CON_CTX, 0, 0},
+ {{9, 15, 0, 0}, {9, 15, 0, 0}} /* {bb} {k2} */
+ },
/* Ystorm */
{'Y', BLOCK_YSEM,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY,
- DBG_BUS_CLIENT_RBCY}, false,
- YSEM_REG_FAST_MEMORY,
- YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2,
- YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
- YCM_REG_CTX_RBC_ACCS,
- 2, YCM_REG_AGG_CON_CTX,
- 3, YCM_REG_SM_CON_CTX,
- 2, YCM_REG_AGG_TASK_CTX,
- 12, YCM_REG_SM_TASK_CTX},
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ false,
+ YSEM_REG_FAST_MEMORY,
+ YSEM_REG_DBG_FRAME_MODE_BB_K2,
+ YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ YSEM_REG_SLOW_DBG_MODE_BB_K2,
+ YSEM_REG_DBG_MODE1_CFG_BB_K2,
+ YSEM_REG_SYNC_DBG_EMPTY,
+ YSEM_REG_DBG_GPRE_VECT,
+ YCM_REG_CTX_RBC_ACCS,
+ {YCM_REG_AGG_CON_CTX, YCM_REG_SM_CON_CTX, YCM_REG_AGG_TASK_CTX,
+ YCM_REG_SM_TASK_CTX},
+ {{2, 3, 2, 12}, {2, 3, 2, 12}} /* {bb} {k2} */
+ },
/* Pstorm */
{'P', BLOCK_PSEM,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS,
- DBG_BUS_CLIENT_RBCS}, true,
- PSEM_REG_FAST_MEMORY,
- PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2,
- PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY_BB_K2,
- PCM_REG_CTX_RBC_ACCS,
- 0, 0,
- 10, PCM_REG_SM_CON_CTX,
- 0, 0,
- 0, 0}
-};
-
-/* Block definitions array */
-
-static struct block_defs block_grc_defs = {
- "grc",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
- GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
- GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
- GRC_REG_DBG_FORCE_FRAME,
- true, false, DBG_RESET_REG_MISC_PL_UA, 1
-};
-
-static struct block_defs block_miscs_defs = {
- "miscs", {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_misc_defs = {
- "misc", {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_dbu_defs = {
- "dbu", {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_pglue_b_defs = {
- "pglue_b",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
- PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
- PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
- PGLUE_B_REG_DBG_FORCE_FRAME,
- true, false, DBG_RESET_REG_MISCS_PL_HV, 1
-};
-
-static struct block_defs block_cnig_defs = {
- "cnig",
- {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW,
- DBG_BUS_CLIENT_RBCW},
- CNIG_REG_DBG_SELECT_K2_E5, CNIG_REG_DBG_DWORD_ENABLE_K2_E5,
- CNIG_REG_DBG_SHIFT_K2_E5, CNIG_REG_DBG_FORCE_VALID_K2_E5,
- CNIG_REG_DBG_FORCE_FRAME_K2_E5,
- true, false, DBG_RESET_REG_MISCS_PL_HV, 0
-};
-
-static struct block_defs block_cpmu_defs = {
- "cpmu", {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- true, false, DBG_RESET_REG_MISCS_PL_HV, 8
-};
-
-static struct block_defs block_ncsi_defs = {
- "ncsi",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
- NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
- NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
- NCSI_REG_DBG_FORCE_FRAME,
- true, false, DBG_RESET_REG_MISCS_PL_HV, 5
-};
-
-static struct block_defs block_opte_defs = {
- "opte", {true, true, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- true, false, DBG_RESET_REG_MISCS_PL_HV, 4
-};
-
-static struct block_defs block_bmb_defs = {
- "bmb",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB},
- BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
- BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
- BMB_REG_DBG_FORCE_FRAME,
- true, false, DBG_RESET_REG_MISCS_PL_UA, 7
-};
-
-static struct block_defs block_pcie_defs = {
- "pcie",
- {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
- DBG_BUS_CLIENT_RBCH},
- PCIE_REG_DBG_COMMON_SELECT_K2_E5,
- PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
- PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
- PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
- PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_mcp_defs = {
- "mcp", {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_mcp2_defs = {
- "mcp2",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
- MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
- MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
- MCP2_REG_DBG_FORCE_FRAME,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_pswhst_defs = {
- "pswhst",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
- PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
- PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
- PSWHST_REG_DBG_FORCE_FRAME,
- true, false, DBG_RESET_REG_MISC_PL_HV, 0
-};
-
-static struct block_defs block_pswhst2_defs = {
- "pswhst2",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
- PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
- PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
- PSWHST2_REG_DBG_FORCE_FRAME,
- true, false, DBG_RESET_REG_MISC_PL_HV, 0
-};
-
-static struct block_defs block_pswrd_defs = {
- "pswrd",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
- PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
- PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
- PSWRD_REG_DBG_FORCE_FRAME,
- true, false, DBG_RESET_REG_MISC_PL_HV, 2
-};
-
-static struct block_defs block_pswrd2_defs = {
- "pswrd2",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
- PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
- PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
- PSWRD2_REG_DBG_FORCE_FRAME,
- true, false, DBG_RESET_REG_MISC_PL_HV, 2
-};
-
-static struct block_defs block_pswwr_defs = {
- "pswwr",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
- PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
- PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
- PSWWR_REG_DBG_FORCE_FRAME,
- true, false, DBG_RESET_REG_MISC_PL_HV, 3
-};
-
-static struct block_defs block_pswwr2_defs = {
- "pswwr2", {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- true, false, DBG_RESET_REG_MISC_PL_HV, 3
-};
-
-static struct block_defs block_pswrq_defs = {
- "pswrq",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
- PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
- PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
- PSWRQ_REG_DBG_FORCE_FRAME,
- true, false, DBG_RESET_REG_MISC_PL_HV, 1
-};
-
-static struct block_defs block_pswrq2_defs = {
- "pswrq2",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
- PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
- PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
- PSWRQ2_REG_DBG_FORCE_FRAME,
- true, false, DBG_RESET_REG_MISC_PL_HV, 1
-};
-
-static struct block_defs block_pglcs_defs = {
- "pglcs",
- {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
- DBG_BUS_CLIENT_RBCH},
- PGLCS_REG_DBG_SELECT_K2_E5, PGLCS_REG_DBG_DWORD_ENABLE_K2_E5,
- PGLCS_REG_DBG_SHIFT_K2_E5, PGLCS_REG_DBG_FORCE_VALID_K2_E5,
- PGLCS_REG_DBG_FORCE_FRAME_K2_E5,
- true, false, DBG_RESET_REG_MISCS_PL_HV, 2
-};
-
-static struct block_defs block_ptu_defs = {
- "ptu",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
- PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
- PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
- PTU_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 20
-};
-
-static struct block_defs block_dmae_defs = {
- "dmae",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
- DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
- DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
- DMAE_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 28
-};
-
-static struct block_defs block_tcm_defs = {
- "tcm",
- {true, true, true}, true, DBG_TSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
- TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
- TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
- TCM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 5
-};
-
-static struct block_defs block_mcm_defs = {
- "mcm",
- {true, true, true}, true, DBG_MSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
- MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
- MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
- MCM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 3
-};
-
-static struct block_defs block_ucm_defs = {
- "ucm",
- {true, true, true}, true, DBG_USTORM_ID,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
- UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
- UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
- UCM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 8
-};
-
-static struct block_defs block_xcm_defs = {
- "xcm",
- {true, true, true}, true, DBG_XSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
- XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
- XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
- XCM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 19
-};
-
-static struct block_defs block_ycm_defs = {
- "ycm",
- {true, true, true}, true, DBG_YSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
- YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
- YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
- YCM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 5
-};
-
-static struct block_defs block_pcm_defs = {
- "pcm",
- {true, true, true}, true, DBG_PSTORM_ID,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
- PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
- PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
- PCM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 4
-};
-
-static struct block_defs block_qm_defs = {
- "qm",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ, DBG_BUS_CLIENT_RBCQ},
- QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
- QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
- QM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 16
-};
-
-static struct block_defs block_tm_defs = {
- "tm",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
- TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
- TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
- TM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 17
-};
-
-static struct block_defs block_dorq_defs = {
- "dorq",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
- DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
- DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
- DORQ_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 18
-};
-
-static struct block_defs block_brb_defs = {
- "brb",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
- BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
- BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
- BRB_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 0
-};
-
-static struct block_defs block_src_defs = {
- "src",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
- SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
- SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
- SRC_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 2
-};
-
-static struct block_defs block_prs_defs = {
- "prs",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
- PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
- PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
- PRS_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 1
-};
-
-static struct block_defs block_tsdm_defs = {
- "tsdm",
- {true, true, true}, true, DBG_TSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
- TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
- TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
- TSDM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 3
-};
-
-static struct block_defs block_msdm_defs = {
- "msdm",
- {true, true, true}, true, DBG_MSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
- MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
- MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
- MSDM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 6
-};
-
-static struct block_defs block_usdm_defs = {
- "usdm",
- {true, true, true}, true, DBG_USTORM_ID,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
- USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
- USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
- USDM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 7
-};
-
-static struct block_defs block_xsdm_defs = {
- "xsdm",
- {true, true, true}, true, DBG_XSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
- XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
- XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
- XSDM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 20
-};
-
-static struct block_defs block_ysdm_defs = {
- "ysdm",
- {true, true, true}, true, DBG_YSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
- YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
- YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
- YSDM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 8
-};
-
-static struct block_defs block_psdm_defs = {
- "psdm",
- {true, true, true}, true, DBG_PSTORM_ID,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
- PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
- PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
- PSDM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 7
-};
-
-static struct block_defs block_tsem_defs = {
- "tsem",
- {true, true, true}, true, DBG_TSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
- TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
- TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
- TSEM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 4
-};
-
-static struct block_defs block_msem_defs = {
- "msem",
- {true, true, true}, true, DBG_MSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
- MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
- MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
- MSEM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 9
-};
-
-static struct block_defs block_usem_defs = {
- "usem",
- {true, true, true}, true, DBG_USTORM_ID,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
- USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
- USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
- USEM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 9
-};
-
-static struct block_defs block_xsem_defs = {
- "xsem",
- {true, true, true}, true, DBG_XSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
- XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
- XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
- XSEM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 21
-};
-
-static struct block_defs block_ysem_defs = {
- "ysem",
- {true, true, true}, true, DBG_YSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
- YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
- YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
- YSEM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 11
-};
-
-static struct block_defs block_psem_defs = {
- "psem",
- {true, true, true}, true, DBG_PSTORM_ID,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
- PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
- PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
- PSEM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 10
-};
-
-static struct block_defs block_rss_defs = {
- "rss",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
- RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
- RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
- RSS_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 18
-};
-
-static struct block_defs block_tmld_defs = {
- "tmld",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
- TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
- TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
- TMLD_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 13
-};
-
-static struct block_defs block_muld_defs = {
- "muld",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
- MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
- MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
- MULD_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 14
-};
-
-static struct block_defs block_yuld_defs = {
- "yuld",
- {true, true, false}, false, 0,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
- MAX_DBG_BUS_CLIENTS},
- YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2,
- YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2,
- YULD_REG_DBG_FORCE_FRAME_BB_K2,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
- 15
-};
-
-static struct block_defs block_xyld_defs = {
- "xyld",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
- XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
- XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
- XYLD_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12
-};
-
-static struct block_defs block_ptld_defs = {
- "ptld",
- {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCT},
- PTLD_REG_DBG_SELECT_E5, PTLD_REG_DBG_DWORD_ENABLE_E5,
- PTLD_REG_DBG_SHIFT_E5, PTLD_REG_DBG_FORCE_VALID_E5,
- PTLD_REG_DBG_FORCE_FRAME_E5,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
- 28
-};
-
-static struct block_defs block_ypld_defs = {
- "ypld",
- {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCS},
- YPLD_REG_DBG_SELECT_E5, YPLD_REG_DBG_DWORD_ENABLE_E5,
- YPLD_REG_DBG_SHIFT_E5, YPLD_REG_DBG_FORCE_VALID_E5,
- YPLD_REG_DBG_FORCE_FRAME_E5,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
- 27
-};
-
-static struct block_defs block_prm_defs = {
- "prm",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
- PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
- PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
- PRM_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 21
-};
-
-static struct block_defs block_pbf_pb1_defs = {
- "pbf_pb1",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
- PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
- PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
- PBF_PB1_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
- 11
-};
-
-static struct block_defs block_pbf_pb2_defs = {
- "pbf_pb2",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
- PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
- PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
- PBF_PB2_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
- 12
-};
-
-static struct block_defs block_rpb_defs = {
- "rpb",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
- RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
- RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
- RPB_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 13
-};
-
-static struct block_defs block_btb_defs = {
- "btb",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
- BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
- BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
- BTB_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 10
-};
-
-static struct block_defs block_pbf_defs = {
- "pbf",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
- PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
- PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
- PBF_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 15
-};
-
-static struct block_defs block_rdif_defs = {
- "rdif",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
- RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
- RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
- RDIF_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 16
-};
-
-static struct block_defs block_tdif_defs = {
- "tdif",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
- TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
- TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
- TDIF_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 17
-};
-
-static struct block_defs block_cdu_defs = {
- "cdu",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
- CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
- CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
- CDU_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 23
-};
-
-static struct block_defs block_ccfc_defs = {
- "ccfc",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
- CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
- CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
- CCFC_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 24
-};
-
-static struct block_defs block_tcfc_defs = {
- "tcfc",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
- TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
- TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
- TCFC_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 25
-};
-
-static struct block_defs block_igu_defs = {
- "igu",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
- IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
- IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
- IGU_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 27
-};
-
-static struct block_defs block_cau_defs = {
- "cau",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
- CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
- CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
- CAU_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19
-};
-
-static struct block_defs block_rgfs_defs = {
- "rgfs", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 29
-};
-
-static struct block_defs block_rgsrc_defs = {
- "rgsrc",
- {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
- RGSRC_REG_DBG_SELECT_E5, RGSRC_REG_DBG_DWORD_ENABLE_E5,
- RGSRC_REG_DBG_SHIFT_E5, RGSRC_REG_DBG_FORCE_VALID_E5,
- RGSRC_REG_DBG_FORCE_FRAME_E5,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
- 30
-};
-
-static struct block_defs block_tgfs_defs = {
- "tgfs", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 30
-};
-
-static struct block_defs block_tgsrc_defs = {
- "tgsrc",
- {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCV},
- TGSRC_REG_DBG_SELECT_E5, TGSRC_REG_DBG_DWORD_ENABLE_E5,
- TGSRC_REG_DBG_SHIFT_E5, TGSRC_REG_DBG_FORCE_VALID_E5,
- TGSRC_REG_DBG_FORCE_FRAME_E5,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
- 31
-};
-
-static struct block_defs block_umac_defs = {
- "umac",
- {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ,
- DBG_BUS_CLIENT_RBCZ},
- UMAC_REG_DBG_SELECT_K2_E5, UMAC_REG_DBG_DWORD_ENABLE_K2_E5,
- UMAC_REG_DBG_SHIFT_K2_E5, UMAC_REG_DBG_FORCE_VALID_K2_E5,
- UMAC_REG_DBG_FORCE_FRAME_K2_E5,
- true, false, DBG_RESET_REG_MISCS_PL_HV, 6
-};
-
-static struct block_defs block_xmac_defs = {
- "xmac", {true, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_dbg_defs = {
- "dbg", {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
-};
-
-static struct block_defs block_nig_defs = {
- "nig",
- {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
- NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
- NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
- NIG_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 0
-};
-
-static struct block_defs block_wol_defs = {
- "wol",
- {false, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
- WOL_REG_DBG_SELECT_K2_E5, WOL_REG_DBG_DWORD_ENABLE_K2_E5,
- WOL_REG_DBG_SHIFT_K2_E5, WOL_REG_DBG_FORCE_VALID_K2_E5,
- WOL_REG_DBG_FORCE_FRAME_K2_E5,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7
-};
-
-static struct block_defs block_bmbn_defs = {
- "bmbn",
- {false, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB,
- DBG_BUS_CLIENT_RBCB},
- BMBN_REG_DBG_SELECT_K2_E5, BMBN_REG_DBG_DWORD_ENABLE_K2_E5,
- BMBN_REG_DBG_SHIFT_K2_E5, BMBN_REG_DBG_FORCE_VALID_K2_E5,
- BMBN_REG_DBG_FORCE_FRAME_K2_E5,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_ipc_defs = {
- "ipc", {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- true, false, DBG_RESET_REG_MISCS_PL_UA, 8
-};
-
-static struct block_defs block_nwm_defs = {
- "nwm",
- {false, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
- NWM_REG_DBG_SELECT_K2_E5, NWM_REG_DBG_DWORD_ENABLE_K2_E5,
- NWM_REG_DBG_SHIFT_K2_E5, NWM_REG_DBG_FORCE_VALID_K2_E5,
- NWM_REG_DBG_FORCE_FRAME_K2_E5,
- true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0
-};
-
-static struct block_defs block_nws_defs = {
- "nws",
- {false, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
- NWS_REG_DBG_SELECT_K2_E5, NWS_REG_DBG_DWORD_ENABLE_K2_E5,
- NWS_REG_DBG_SHIFT_K2_E5, NWS_REG_DBG_FORCE_VALID_K2_E5,
- NWS_REG_DBG_FORCE_FRAME_K2_E5,
- true, false, DBG_RESET_REG_MISCS_PL_HV, 12
-};
-
-static struct block_defs block_ms_defs = {
- "ms",
- {false, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
- MS_REG_DBG_SELECT_K2_E5, MS_REG_DBG_DWORD_ENABLE_K2_E5,
- MS_REG_DBG_SHIFT_K2_E5, MS_REG_DBG_FORCE_VALID_K2_E5,
- MS_REG_DBG_FORCE_FRAME_K2_E5,
- true, false, DBG_RESET_REG_MISCS_PL_HV, 13
-};
-
-static struct block_defs block_phy_pcie_defs = {
- "phy_pcie",
- {false, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
- DBG_BUS_CLIENT_RBCH},
- PCIE_REG_DBG_COMMON_SELECT_K2_E5,
- PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
- PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
- PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
- PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_led_defs = {
- "led", {false, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- true, false, DBG_RESET_REG_MISCS_PL_HV, 14
-};
-
-static struct block_defs block_avs_wrap_defs = {
- "avs_wrap", {false, true, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- true, false, DBG_RESET_REG_MISCS_PL_UA, 11
-};
-
-static struct block_defs block_pxpreqbus_defs = {
- "pxpreqbus", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_misc_aeu_defs = {
- "misc_aeu", {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_bar0_map_defs = {
- "bar0_map", {true, true, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
- &block_grc_defs,
- &block_miscs_defs,
- &block_misc_defs,
- &block_dbu_defs,
- &block_pglue_b_defs,
- &block_cnig_defs,
- &block_cpmu_defs,
- &block_ncsi_defs,
- &block_opte_defs,
- &block_bmb_defs,
- &block_pcie_defs,
- &block_mcp_defs,
- &block_mcp2_defs,
- &block_pswhst_defs,
- &block_pswhst2_defs,
- &block_pswrd_defs,
- &block_pswrd2_defs,
- &block_pswwr_defs,
- &block_pswwr2_defs,
- &block_pswrq_defs,
- &block_pswrq2_defs,
- &block_pglcs_defs,
- &block_dmae_defs,
- &block_ptu_defs,
- &block_tcm_defs,
- &block_mcm_defs,
- &block_ucm_defs,
- &block_xcm_defs,
- &block_ycm_defs,
- &block_pcm_defs,
- &block_qm_defs,
- &block_tm_defs,
- &block_dorq_defs,
- &block_brb_defs,
- &block_src_defs,
- &block_prs_defs,
- &block_tsdm_defs,
- &block_msdm_defs,
- &block_usdm_defs,
- &block_xsdm_defs,
- &block_ysdm_defs,
- &block_psdm_defs,
- &block_tsem_defs,
- &block_msem_defs,
- &block_usem_defs,
- &block_xsem_defs,
- &block_ysem_defs,
- &block_psem_defs,
- &block_rss_defs,
- &block_tmld_defs,
- &block_muld_defs,
- &block_yuld_defs,
- &block_xyld_defs,
- &block_ptld_defs,
- &block_ypld_defs,
- &block_prm_defs,
- &block_pbf_pb1_defs,
- &block_pbf_pb2_defs,
- &block_rpb_defs,
- &block_btb_defs,
- &block_pbf_defs,
- &block_rdif_defs,
- &block_tdif_defs,
- &block_cdu_defs,
- &block_ccfc_defs,
- &block_tcfc_defs,
- &block_igu_defs,
- &block_cau_defs,
- &block_rgfs_defs,
- &block_rgsrc_defs,
- &block_tgfs_defs,
- &block_tgsrc_defs,
- &block_umac_defs,
- &block_xmac_defs,
- &block_dbg_defs,
- &block_nig_defs,
- &block_wol_defs,
- &block_bmbn_defs,
- &block_ipc_defs,
- &block_nwm_defs,
- &block_nws_defs,
- &block_ms_defs,
- &block_phy_pcie_defs,
- &block_led_defs,
- &block_avs_wrap_defs,
- &block_pxpreqbus_defs,
- &block_misc_aeu_defs,
- &block_bar0_map_defs,
-};
-
-static struct platform_defs s_platform_defs[] = {
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ true,
+ PSEM_REG_FAST_MEMORY,
+ PSEM_REG_DBG_FRAME_MODE_BB_K2,
+ PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ PSEM_REG_SLOW_DBG_MODE_BB_K2,
+ PSEM_REG_DBG_MODE1_CFG_BB_K2,
+ PSEM_REG_SYNC_DBG_EMPTY,
+ PSEM_REG_DBG_GPRE_VECT,
+ PCM_REG_CTX_RBC_ACCS,
+ {0, PCM_REG_SM_CON_CTX, 0, 0},
+ {{0, 10, 0, 0}, {0, 10, 0, 0}} /* {bb} {k2} */
+ },
+};
+
+static struct hw_type_defs s_hw_type_defs[] = {
+ /* HW_TYPE_ASIC */
{"asic", 1, 256, 32768},
{"reserved", 0, 0, 0},
{"reserved2", 0, 0, 0},
@@ -1505,146 +579,159 @@ static struct platform_defs s_platform_defs[] = {
static struct grc_param_defs s_grc_param_defs[] = {
/* DBG_GRC_PARAM_DUMP_TSTORM */
- {{1, 1, 1}, 0, 1, false, false, 1, 1},
+ {{1, 1}, 0, 1, false, false, 1, {1, 1}},
/* DBG_GRC_PARAM_DUMP_MSTORM */
- {{1, 1, 1}, 0, 1, false, false, 1, 1},
+ {{1, 1}, 0, 1, false, false, 1, {1, 1}},
/* DBG_GRC_PARAM_DUMP_USTORM */
- {{1, 1, 1}, 0, 1, false, false, 1, 1},
+ {{1, 1}, 0, 1, false, false, 1, {1, 1}},
/* DBG_GRC_PARAM_DUMP_XSTORM */
- {{1, 1, 1}, 0, 1, false, false, 1, 1},
+ {{1, 1}, 0, 1, false, false, 1, {1, 1}},
/* DBG_GRC_PARAM_DUMP_YSTORM */
- {{1, 1, 1}, 0, 1, false, false, 1, 1},
+ {{1, 1}, 0, 1, false, false, 1, {1, 1}},
/* DBG_GRC_PARAM_DUMP_PSTORM */
- {{1, 1, 1}, 0, 1, false, false, 1, 1},
+ {{1, 1}, 0, 1, false, false, 1, {1, 1}},
/* DBG_GRC_PARAM_DUMP_REGS */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_RAM */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_PBUF */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_IOR */
- {{0, 0, 0}, 0, 1, false, false, 0, 1},
+ {{0, 0}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_VFC */
- {{0, 0, 0}, 0, 1, false, false, 0, 1},
+ {{0, 0}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_CM_CTX */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_ILT */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_RSS */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_CAU */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_QM */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_MCP */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
- /* DBG_GRC_PARAM_MCP_TRACE_META_SIZE */
- {{1, 1, 1}, 1, 0xffffffff, false, true, 0, 1},
+ /* DBG_GRC_PARAM_DUMP_DORQ */
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_CFC */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_IGU */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_BRB */
- {{0, 0, 0}, 0, 1, false, false, 0, 1},
+ {{0, 0}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_BTB */
- {{0, 0, 0}, 0, 1, false, false, 0, 1},
+ {{0, 0}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_BMB */
- {{0, 0, 0}, 0, 1, false, false, 0, 0},
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
- /* DBG_GRC_PARAM_DUMP_NIG */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ /* DBG_GRC_PARAM_RESERVED1 */
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
/* DBG_GRC_PARAM_DUMP_MULD */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_PRS */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_DMAE */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_TM */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_SDM */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_DIF */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_STATIC */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_UNSTALL */
- {{0, 0, 0}, 0, 1, false, false, 0, 0},
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
- /* DBG_GRC_PARAM_NUM_LCIDS */
- {{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, false,
- MAX_LCIDS, MAX_LCIDS},
+ /* DBG_GRC_PARAM_RESERVED2 */
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
- /* DBG_GRC_PARAM_NUM_LTIDS */
- {{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, false,
- MAX_LTIDS, MAX_LTIDS},
+ /* DBG_GRC_PARAM_MCP_TRACE_META_SIZE */
+ {{0, 0}, 1, 0xffffffff, false, true, 0, {0, 0}},
/* DBG_GRC_PARAM_EXCLUDE_ALL */
- {{0, 0, 0}, 0, 1, true, false, 0, 0},
+ {{0, 0}, 0, 1, true, false, 0, {0, 0}},
/* DBG_GRC_PARAM_CRASH */
- {{0, 0, 0}, 0, 1, true, false, 0, 0},
+ {{0, 0}, 0, 1, true, false, 0, {0, 0}},
/* DBG_GRC_PARAM_PARITY_SAFE */
- {{0, 0, 0}, 0, 1, false, false, 1, 0},
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
/* DBG_GRC_PARAM_DUMP_CM */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_PHY */
- {{1, 1, 1}, 0, 1, false, false, 0, 1},
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
/* DBG_GRC_PARAM_NO_MCP */
- {{0, 0, 0}, 0, 1, false, false, 0, 0},
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
/* DBG_GRC_PARAM_NO_FW_VER */
- {{0, 0, 0}, 0, 1, false, false, 0, 0}
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
+
+ /* DBG_GRC_PARAM_RESERVED3 */
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
+
+ /* DBG_GRC_PARAM_DUMP_MCP_HW_DUMP */
+ {{0, 1}, 0, 1, false, false, 0, {0, 1}},
+
+ /* DBG_GRC_PARAM_DUMP_ILT_CDUC */
+ {{1, 1}, 0, 1, false, false, 0, {0, 0}},
+
+ /* DBG_GRC_PARAM_DUMP_ILT_CDUT */
+ {{1, 1}, 0, 1, false, false, 0, {0, 0}},
+
+ /* DBG_GRC_PARAM_DUMP_CAU_EXT */
+ {{0, 0}, 0, 1, false, false, 0, {1, 1}}
};
static struct rss_mem_defs s_rss_mem_defs[] = {
- { "rss_mem_cid", "rss_cid", 0, 32,
- {256, 320, 512} },
+ {"rss_mem_cid", "rss_cid", 0, 32,
+ {256, 320}},
- { "rss_mem_key_msb", "rss_key", 1024, 256,
- {128, 208, 257} },
+ {"rss_mem_key_msb", "rss_key", 1024, 256,
+ {128, 208}},
- { "rss_mem_key_lsb", "rss_key", 2048, 64,
- {128, 208, 257} },
+ {"rss_mem_key_lsb", "rss_key", 2048, 64,
+ {128, 208}},
- { "rss_mem_info", "rss_info", 3072, 16,
- {128, 208, 256} },
+ {"rss_mem_info", "rss_info", 3072, 16,
+ {128, 208}},
- { "rss_mem_ind", "rss_ind", 4096, 16,
- {16384, 26624, 32768} }
+ {"rss_mem_ind", "rss_ind", 4096, 16,
+ {16384, 26624}}
};
static struct vfc_ram_defs s_vfc_ram_defs[] = {
@@ -1655,54 +742,31 @@ static struct vfc_ram_defs s_vfc_ram_defs[] = {
};
static struct big_ram_defs s_big_ram_defs[] = {
- { "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
- BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
- MISC_REG_BLOCK_256B_EN, {0, 0, 0},
- {153600, 180224, 282624} },
-
- { "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
- BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
- MISC_REG_BLOCK_256B_EN, {0, 1, 1},
- {92160, 117760, 168960} },
-
- { "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
- BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
- MISCS_REG_BLOCK_256B_EN, {0, 0, 0},
- {36864, 36864, 36864} }
-};
-
-static struct reset_reg_defs s_reset_regs_defs[] = {
- /* DBG_RESET_REG_MISCS_PL_UA */
- { MISCS_REG_RESET_PL_UA,
- {true, true, true}, {0x0, 0x0, 0x0} },
-
- /* DBG_RESET_REG_MISCS_PL_HV */
- { MISCS_REG_RESET_PL_HV,
- {true, true, true}, {0x0, 0x400, 0x600} },
-
- /* DBG_RESET_REG_MISCS_PL_HV_2 */
- { MISCS_REG_RESET_PL_HV_2_K2_E5,
- {false, true, true}, {0x0, 0x0, 0x0} },
-
- /* DBG_RESET_REG_MISC_PL_UA */
- { MISC_REG_RESET_PL_UA,
- {true, true, true}, {0x0, 0x0, 0x0} },
-
- /* DBG_RESET_REG_MISC_PL_HV */
- { MISC_REG_RESET_PL_HV,
- {true, true, true}, {0x0, 0x0, 0x0} },
+ {"BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
+ BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
+ MISC_REG_BLOCK_256B_EN, {0, 0},
+ {153600, 180224}},
- /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
- { MISC_REG_RESET_PL_PDA_VMAIN_1,
- {true, true, true}, {0x4404040, 0x4404040, 0x404040} },
+ {"BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
+ BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
+ MISC_REG_BLOCK_256B_EN, {0, 1},
+ {92160, 117760}},
- /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
- { MISC_REG_RESET_PL_PDA_VMAIN_2,
- {true, true, true}, {0x7, 0x7c00007, 0x5c08007} },
+ {"BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
+ BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
+ MISCS_REG_BLOCK_256B_EN, {0, 0},
+ {36864, 36864}}
+};
- /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
- { MISC_REG_RESET_PL_PDA_VAUX,
- {true, true, true}, {0x2, 0x2, 0x2} },
+static struct rbc_reset_defs s_rbc_reset_defs[] = {
+ {MISCS_REG_RESET_PL_HV,
+ {0x0, 0x400}},
+ {MISC_REG_RESET_PL_PDA_VMAIN_1,
+ {0x4404040, 0x4404040}},
+ {MISC_REG_RESET_PL_PDA_VMAIN_2,
+ {0x7, 0x7c00007}},
+ {MISC_REG_RESET_PL_PDA_VAUX,
+ {0x2, 0x2}},
};
static struct phy_defs s_phy_defs[] = {
@@ -1785,9 +849,19 @@ static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
}
}
+/* Sets pointer and size for the specified binary buffer type */
+static void qed_set_dbg_bin_buf(struct qed_hwfn *p_hwfn,
+ enum bin_dbg_buffer_type buf_type,
+ const u32 *ptr, u32 size)
+{
+ struct virt_mem_desc *buf = &p_hwfn->dbg_arrays[buf_type];
+
+ buf->ptr = (void *)ptr;
+ buf->size = size;
+}
+
/* Initializes debug data for the specified device */
-static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u8 num_pfs = 0, max_pfs_per_port = 0;
@@ -1812,26 +886,25 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
return DBG_STATUS_UNKNOWN_CHIP;
}
- /* Set platofrm */
- dev_data->platform_id = PLATFORM_ASIC;
+ /* Set HW type */
+ dev_data->hw_type = HW_TYPE_ASIC;
dev_data->mode_enable[MODE_ASIC] = 1;
/* Set port mode */
- switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
- case 0:
+ switch (p_hwfn->cdev->num_ports_in_engine) {
+ case 1:
dev_data->mode_enable[MODE_PORTS_PER_ENG_1] = 1;
break;
- case 1:
+ case 2:
dev_data->mode_enable[MODE_PORTS_PER_ENG_2] = 1;
break;
- case 2:
+ case 4:
dev_data->mode_enable[MODE_PORTS_PER_ENG_4] = 1;
break;
}
/* Set 100G mode */
- if (dev_data->chip_id == CHIP_BB &&
- qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB) == 2)
+ if (QED_IS_CMT(p_hwfn->cdev))
dev_data->mode_enable[MODE_100G] = 1;
/* Set number of ports */
@@ -1857,14 +930,36 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
return DBG_STATUS_OK;
}
-static struct dbg_bus_block *get_dbg_bus_block_desc(struct qed_hwfn *p_hwfn,
- enum block_id block_id)
+static const struct dbg_block *get_dbg_block(struct qed_hwfn *p_hwfn,
+ enum block_id block_id)
+{
+ const struct dbg_block *dbg_block;
+
+ dbg_block = p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS].ptr;
+ return dbg_block + block_id;
+}
+
+static const struct dbg_block_chip *qed_get_dbg_block_per_chip(struct qed_hwfn
+ *p_hwfn,
+ enum block_id
+ block_id)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- return (struct dbg_bus_block *)&dbg_bus_blocks[block_id *
- MAX_CHIP_IDS +
- dev_data->chip_id];
+ return (const struct dbg_block_chip *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_CHIP_DATA].ptr +
+ block_id * MAX_CHIP_IDS + dev_data->chip_id;
+}
+
+static const struct dbg_reset_reg *qed_get_dbg_reset_reg(struct qed_hwfn
+ *p_hwfn,
+ u8 reset_reg_id)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+ return (const struct dbg_reset_reg *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_RESET_REGS].ptr +
+ reset_reg_id * MAX_CHIP_IDS + dev_data->chip_id;
}
/* Reads the FW info structure for the specified Storm from the chip,
@@ -1885,8 +980,9 @@ static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
* The address is located in the last line of the Storm RAM.
*/
addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
- DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE_BB_K2) -
- sizeof(fw_info_location);
+ DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
+ sizeof(fw_info_location);
+
dest = (u32 *)&fw_info_location;
for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location));
@@ -2081,6 +1177,29 @@ static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
}
+/* Reads the chip revision from the chip and writes it as a param to the
+ * specified buffer. Returns the dumped size in dwords.
+ */
+static u32 qed_dump_chip_revision_param(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ char param_str[3] = "??";
+
+ if (dev_data->hw_type == HW_TYPE_ASIC) {
+ u32 chip_rev, chip_metal;
+
+ chip_rev = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV);
+ chip_metal = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL);
+
+ param_str[0] = 'a' + (u8)chip_rev;
+ param_str[1] = '0' + (u8)chip_metal;
+ }
+
+ return qed_dump_str_param(dump_buf, dump, "chip-revision", param_str);
+}
+
/* Writes a section header to the specified buffer.
* Returns the dumped size in dwords.
*/
@@ -2104,7 +1223,8 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
u8 num_params;
/* Dump global params section header */
- num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
+ num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params +
+ (dev_data->chip_id == CHIP_BB ? 1 : 0);
offset += qed_dump_section_hdr(dump_buf + offset,
dump, "global_params", num_params);
@@ -2112,6 +1232,8 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
offset += qed_dump_mfw_ver_param(p_hwfn,
p_ptt, dump_buf + offset, dump);
+ offset += qed_dump_chip_revision_param(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
offset += qed_dump_num_param(dump_buf + offset,
dump, "tools-version", TOOLS_VERSION);
offset += qed_dump_str_param(dump_buf + offset,
@@ -2121,11 +1243,12 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
offset += qed_dump_str_param(dump_buf + offset,
dump,
"platform",
- s_platform_defs[dev_data->platform_id].
- name);
- offset +=
- qed_dump_num_param(dump_buf + offset, dump, "pci-func",
- p_hwfn->abs_pf_id);
+ s_hw_type_defs[dev_data->hw_type].name);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "pci-func", p_hwfn->abs_pf_id);
+ if (dev_data->chip_id == CHIP_BB)
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "path", QED_PATH_ID(p_hwfn));
return offset;
}
@@ -2156,24 +1279,87 @@ static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
- u32 i;
+ u32 reg_val[NUM_DBG_RESET_REGS] = { 0 };
+ u8 rst_reg_id;
+ u32 blk_id;
/* Read reset registers */
- for (i = 0; i < MAX_DBG_RESET_REGS; i++)
- if (s_reset_regs_defs[i].exists[dev_data->chip_id])
- reg_val[i] = qed_rd(p_hwfn,
- p_ptt, s_reset_regs_defs[i].addr);
+ for (rst_reg_id = 0; rst_reg_id < NUM_DBG_RESET_REGS; rst_reg_id++) {
+ const struct dbg_reset_reg *rst_reg;
+ bool rst_reg_removed;
+ u32 rst_reg_addr;
+
+ rst_reg = qed_get_dbg_reset_reg(p_hwfn, rst_reg_id);
+ rst_reg_removed = GET_FIELD(rst_reg->data,
+ DBG_RESET_REG_IS_REMOVED);
+ rst_reg_addr = DWORDS_TO_BYTES(GET_FIELD(rst_reg->data,
+ DBG_RESET_REG_ADDR));
+
+ if (!rst_reg_removed)
+ reg_val[rst_reg_id] = qed_rd(p_hwfn, p_ptt,
+ rst_reg_addr);
+ }
/* Check if blocks are in reset */
- for (i = 0; i < MAX_BLOCK_ID; i++) {
- struct block_defs *block = s_block_defs[i];
+ for (blk_id = 0; blk_id < NUM_PHYS_BLOCKS; blk_id++) {
+ const struct dbg_block_chip *blk;
+ bool has_rst_reg;
+ bool is_removed;
+
+ blk = qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)blk_id);
+ is_removed = GET_FIELD(blk->flags, DBG_BLOCK_CHIP_IS_REMOVED);
+ has_rst_reg = GET_FIELD(blk->flags,
+ DBG_BLOCK_CHIP_HAS_RESET_REG);
+
+ if (!is_removed && has_rst_reg)
+ dev_data->block_in_reset[blk_id] =
+ !(reg_val[blk->reset_reg_id] &
+ BIT(blk->reset_reg_bit_offset));
+ }
+}
+
+/* is_mode_match recursive function */
+static bool qed_is_mode_match_rec(struct qed_hwfn *p_hwfn,
+ u16 *modes_buf_offset, u8 rec_depth)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u8 *dbg_array;
+ bool arg1, arg2;
+ u8 tree_val;
+
+ if (rec_depth > MAX_RECURSION_DEPTH) {
+ DP_NOTICE(p_hwfn,
+ "Unexpected error: is_mode_match_rec exceeded the max recursion depth. This is probably due to a corrupt init/debug buffer.\n");
+ return false;
+ }
- dev_data->block_in_reset[i] = block->has_reset_bit &&
- !(reg_val[block->reset_reg] & BIT(block->reset_bit_offset));
+ /* Get next element from modes tree buffer */
+ dbg_array = p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
+ tree_val = dbg_array[(*modes_buf_offset)++];
+
+ switch (tree_val) {
+ case INIT_MODE_OP_NOT:
+ return !qed_is_mode_match_rec(p_hwfn,
+ modes_buf_offset, rec_depth + 1);
+ case INIT_MODE_OP_OR:
+ case INIT_MODE_OP_AND:
+ arg1 = qed_is_mode_match_rec(p_hwfn,
+ modes_buf_offset, rec_depth + 1);
+ arg2 = qed_is_mode_match_rec(p_hwfn,
+ modes_buf_offset, rec_depth + 1);
+ return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
+ arg2) : (arg1 && arg2);
+ default:
+ return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
}
}
+/* Returns true if the mode (specified using modes_buf_offset) is enabled */
+static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
+{
+ return qed_is_mode_match_rec(p_hwfn, modes_buf_offset, 0);
+}
+
/* Enable / disable the Debug block */
static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool enable)
@@ -2185,23 +1371,21 @@ static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
- u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
- struct block_defs *dbg_block = s_block_defs[BLOCK_DBG];
+ u32 reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
+ const struct dbg_reset_reg *reset_reg;
+ const struct dbg_block_chip *block;
- dbg_reset_reg_addr = s_reset_regs_defs[dbg_block->reset_reg].addr;
- old_reset_reg_val = qed_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
- new_reset_reg_val =
- old_reset_reg_val & ~BIT(dbg_block->reset_bit_offset);
+ block = qed_get_dbg_block_per_chip(p_hwfn, BLOCK_DBG);
+ reset_reg = qed_get_dbg_reset_reg(p_hwfn, block->reset_reg_id);
+ reset_reg_addr =
+ DWORDS_TO_BYTES(GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR));
- qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
- qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
-}
+ old_reset_reg_val = qed_rd(p_hwfn, p_ptt, reset_reg_addr);
+ new_reset_reg_val =
+ old_reset_reg_val & ~BIT(block->reset_reg_bit_offset);
-static void qed_bus_set_framing_mode(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- enum dbg_bus_frame_modes mode)
-{
- qed_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
+ qed_wr(p_hwfn, p_ptt, reset_reg_addr, new_reset_reg_val);
+ qed_wr(p_hwfn, p_ptt, reset_reg_addr, old_reset_reg_val);
}
/* Enable / disable Debug Bus clients according to the specified mask
@@ -2213,28 +1397,65 @@ static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
}
-static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
+static void qed_bus_config_dbg_line(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum block_id block_id,
+ u8 line_id,
+ u8 enable_mask,
+ u8 right_shift,
+ u8 force_valid_mask, u8 force_frame_mask)
+{
+ const struct dbg_block_chip *block =
+ qed_get_dbg_block_per_chip(p_hwfn, block_id);
+
+ qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_select_reg_addr),
+ line_id);
+ qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_dword_enable_reg_addr),
+ enable_mask);
+ qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_shift_reg_addr),
+ right_shift);
+ qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_valid_reg_addr),
+ force_valid_mask);
+ qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_frame_reg_addr),
+ force_frame_mask);
+}
+
+/* Disable debug bus in all blocks */
+static void qed_bus_disable_blocks(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- bool arg1, arg2;
- const u32 *ptr;
- u8 tree_val;
+ u32 block_id;
- /* Get next element from modes tree buffer */
- ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
- tree_val = ((u8 *)ptr)[(*modes_buf_offset)++];
+ /* Disable all blocks */
+ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
+ const struct dbg_block_chip *block_per_chip =
+ qed_get_dbg_block_per_chip(p_hwfn,
+ (enum block_id)block_id);
- switch (tree_val) {
- case INIT_MODE_OP_NOT:
- return !qed_is_mode_match(p_hwfn, modes_buf_offset);
- case INIT_MODE_OP_OR:
- case INIT_MODE_OP_AND:
- arg1 = qed_is_mode_match(p_hwfn, modes_buf_offset);
- arg2 = qed_is_mode_match(p_hwfn, modes_buf_offset);
- return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
- arg2) : (arg1 && arg2);
- default:
- return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
+ if (GET_FIELD(block_per_chip->flags,
+ DBG_BLOCK_CHIP_IS_REMOVED) ||
+ dev_data->block_in_reset[block_id])
+ continue;
+
+ /* Disable debug bus */
+ if (GET_FIELD(block_per_chip->flags,
+ DBG_BLOCK_CHIP_HAS_DBG_BUS)) {
+ u32 dbg_en_addr =
+ block_per_chip->dbg_dword_enable_reg_addr;
+ u16 modes_buf_offset =
+ GET_FIELD(block_per_chip->dbg_bus_mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+ bool eval_mode =
+ GET_FIELD(block_per_chip->dbg_bus_mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+
+ if (!eval_mode ||
+ qed_is_mode_match(p_hwfn, &modes_buf_offset))
+ qed_wr(p_hwfn, p_ptt,
+ DWORDS_TO_BYTES(dbg_en_addr),
+ 0);
+ }
}
}
@@ -2247,6 +1468,20 @@ static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
return qed_grc_get_param(p_hwfn, grc_param) > 0;
}
+/* Returns the storm_id that matches the specified Storm letter,
+ * or MAX_DBG_STORMS if invalid storm letter.
+ */
+static enum dbg_storms qed_get_id_from_letter(char storm_letter)
+{
+ u8 storm_id;
+
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++)
+ if (s_storm_defs[storm_id].letter == storm_letter)
+ return (enum dbg_storms)storm_id;
+
+ return MAX_DBG_STORMS;
+}
+
/* Returns true of the specified Storm should be included in the dump, false
* otherwise.
*/
@@ -2262,14 +1497,20 @@ static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
enum block_id block_id, u8 mem_group_id)
{
- struct block_defs *block = s_block_defs[block_id];
+ const struct dbg_block *block;
u8 i;
- /* Check Storm match */
- if (block->associated_to_storm &&
- !qed_grc_is_storm_included(p_hwfn,
- (enum dbg_storms)block->storm_id))
- return false;
+ block = get_dbg_block(p_hwfn, block_id);
+
+ /* If the block is associated with a Storm, check Storm match */
+ if (block->associated_storm_letter) {
+ enum dbg_storms associated_storm_id =
+ qed_get_id_from_letter(block->associated_storm_letter);
+
+ if (associated_storm_id == MAX_DBG_STORMS ||
+ !qed_grc_is_storm_included(p_hwfn, associated_storm_id))
+ return false;
+ }
for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
struct big_ram_defs *big_ram = &s_big_ram_defs[i];
@@ -2291,6 +1532,8 @@ static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
case MEM_GROUP_CAU_SB:
case MEM_GROUP_CAU_PI:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
+ case MEM_GROUP_CAU_MEM_EXT:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU_EXT);
case MEM_GROUP_QM_MEM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
case MEM_GROUP_CFC_MEM:
@@ -2298,6 +1541,8 @@ static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
case MEM_GROUP_TASK_CFC_MEM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) ||
qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
+ case MEM_GROUP_DORQ_MEM:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DORQ);
case MEM_GROUP_IGU_MEM:
case MEM_GROUP_IGU_MSIX:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
@@ -2343,64 +1588,104 @@ static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
msleep(STALL_DELAY_MS);
}
-/* Takes all blocks out of reset */
+/* Takes all blocks out of reset. If rbc_only is true, only RBC clients are
+ * taken out of reset.
+ */
static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+ struct qed_ptt *p_ptt, bool rbc_only)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
- u32 block_id, i;
+ u8 chip_id = dev_data->chip_id;
+ u32 i;
- /* Fill reset regs values */
- for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
- struct block_defs *block = s_block_defs[block_id];
+ /* Take RBCs out of reset */
+ for (i = 0; i < ARRAY_SIZE(s_rbc_reset_defs); i++)
+ if (s_rbc_reset_defs[i].reset_val[dev_data->chip_id])
+ qed_wr(p_hwfn,
+ p_ptt,
+ s_rbc_reset_defs[i].reset_reg_addr +
+ RESET_REG_UNRESET_OFFSET,
+ s_rbc_reset_defs[i].reset_val[chip_id]);
- if (block->exists[dev_data->chip_id] && block->has_reset_bit &&
- block->unreset)
- reg_val[block->reset_reg] |=
- BIT(block->reset_bit_offset);
- }
+ if (!rbc_only) {
+ u32 reg_val[NUM_DBG_RESET_REGS] = { 0 };
+ u8 reset_reg_id;
+ u32 block_id;
- /* Write reset registers */
- for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
- if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
- continue;
+ /* Fill reset regs values */
+ for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
+ bool is_removed, has_reset_reg, unreset_before_dump;
+ const struct dbg_block_chip *block;
+
+ block = qed_get_dbg_block_per_chip(p_hwfn,
+ (enum block_id)
+ block_id);
+ is_removed =
+ GET_FIELD(block->flags, DBG_BLOCK_CHIP_IS_REMOVED);
+ has_reset_reg =
+ GET_FIELD(block->flags,
+ DBG_BLOCK_CHIP_HAS_RESET_REG);
+ unreset_before_dump =
+ GET_FIELD(block->flags,
+ DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP);
+
+ if (!is_removed && has_reset_reg && unreset_before_dump)
+ reg_val[block->reset_reg_id] |=
+ BIT(block->reset_reg_bit_offset);
+ }
- reg_val[i] |=
- s_reset_regs_defs[i].unreset_val[dev_data->chip_id];
+ /* Write reset registers */
+ for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS;
+ reset_reg_id++) {
+ const struct dbg_reset_reg *reset_reg;
+ u32 reset_reg_addr;
- if (reg_val[i])
- qed_wr(p_hwfn,
- p_ptt,
- s_reset_regs_defs[i].addr +
- RESET_REG_UNRESET_OFFSET, reg_val[i]);
+ reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id);
+
+ if (GET_FIELD
+ (reset_reg->data, DBG_RESET_REG_IS_REMOVED))
+ continue;
+
+ if (reg_val[reset_reg_id]) {
+ reset_reg_addr =
+ GET_FIELD(reset_reg->data,
+ DBG_RESET_REG_ADDR);
+ qed_wr(p_hwfn,
+ p_ptt,
+ DWORDS_TO_BYTES(reset_reg_addr) +
+ RESET_REG_UNRESET_OFFSET,
+ reg_val[reset_reg_id]);
+ }
+ }
}
}
/* Returns the attention block data of the specified block */
static const struct dbg_attn_block_type_data *
-qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type)
+qed_get_block_attn_data(struct qed_hwfn *p_hwfn,
+ enum block_id block_id, enum dbg_attn_type attn_type)
{
const struct dbg_attn_block *base_attn_block_arr =
- (const struct dbg_attn_block *)
- s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
+ (const struct dbg_attn_block *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
return &base_attn_block_arr[block_id].per_type_data[attn_type];
}
/* Returns the attention registers of the specified block */
static const struct dbg_attn_reg *
-qed_get_block_attn_regs(enum block_id block_id, enum dbg_attn_type attn_type,
+qed_get_block_attn_regs(struct qed_hwfn *p_hwfn,
+ enum block_id block_id, enum dbg_attn_type attn_type,
u8 *num_attn_regs)
{
const struct dbg_attn_block_type_data *block_type_data =
- qed_get_block_attn_data(block_id, attn_type);
+ qed_get_block_attn_data(p_hwfn, block_id, attn_type);
*num_attn_regs = block_type_data->num_regs;
- return &((const struct dbg_attn_reg *)
- s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->
- regs_offset];
+ return (const struct dbg_attn_reg *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr +
+ block_type_data->regs_offset;
}
/* For each block, clear the status of all parities */
@@ -2412,11 +1697,12 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
u8 reg_idx, num_attn_regs;
u32 block_id;
- for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
+ for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
if (dev_data->block_in_reset[block_id])
continue;
- attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
+ attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
+ (enum block_id)block_id,
ATTN_TYPE_PARITY,
&num_attn_regs);
@@ -2444,22 +1730,20 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
}
/* Dumps GRC registers section header. Returns the dumped size in dwords.
- * The following parameters are dumped:
+ * the following parameters are dumped:
* - count: no. of dumped entries
* - split_type: split type
* - split_id: split ID (dumped only if split_id != SPLIT_TYPE_NONE)
- * - param_name: user parameter value (dumped only if param_name != NULL
- * and param_val != NULL).
+ * - reg_type_name: register type name (dumped only if reg_type_name != NULL)
*/
static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
bool dump,
u32 num_reg_entries,
enum init_split_types split_type,
- u8 split_id,
- const char *param_name, const char *param_val)
+ u8 split_id, const char *reg_type_name)
{
u8 num_params = 2 +
- (split_type != SPLIT_TYPE_NONE ? 1 : 0) + (param_name ? 1 : 0);
+ (split_type != SPLIT_TYPE_NONE ? 1 : 0) + (reg_type_name ? 1 : 0);
u32 offset = 0;
offset += qed_dump_section_hdr(dump_buf + offset,
@@ -2472,9 +1756,9 @@ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
if (split_type != SPLIT_TYPE_NONE)
offset += qed_dump_num_param(dump_buf + offset,
dump, "id", split_id);
- if (param_name && param_val)
+ if (reg_type_name)
offset += qed_dump_str_param(dump_buf + offset,
- dump, param_name, param_val);
+ dump, "type", reg_type_name);
return offset;
}
@@ -2504,21 +1788,12 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u8 port_id = 0, pf_id = 0, vf_id = 0, fid = 0;
+ bool read_using_dmae = false;
+ u32 thresh;
if (!dump)
return len;
- /* Print log if needed */
- dev_data->num_regs_read += len;
- if (dev_data->num_regs_read >=
- s_platform_defs[dev_data->platform_id].log_thresh) {
- DP_VERBOSE(p_hwfn,
- QED_MSG_DEBUG,
- "Dumping %d registers...\n",
- dev_data->num_regs_read);
- dev_data->num_regs_read = 0;
- }
-
switch (split_type) {
case SPLIT_TYPE_PORT:
port_id = split_id;
@@ -2539,38 +1814,77 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
}
/* Try reading using DMAE */
- if (dev_data->use_dmae && split_type == SPLIT_TYPE_NONE &&
- (len >= s_platform_defs[dev_data->platform_id].dmae_thresh ||
- wide_bus)) {
- if (!qed_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr),
- (u64)(uintptr_t)(dump_buf), len, NULL))
- return len;
- dev_data->use_dmae = 0;
- DP_VERBOSE(p_hwfn,
- QED_MSG_DEBUG,
- "Failed reading from chip using DMAE, using GRC instead\n");
+ if (dev_data->use_dmae && split_type != SPLIT_TYPE_VF &&
+ (len >= s_hw_type_defs[dev_data->hw_type].dmae_thresh ||
+ (PROTECT_WIDE_BUS && wide_bus))) {
+ struct qed_dmae_params dmae_params;
+
+ /* Set DMAE params */
+ memset(&dmae_params, 0, sizeof(dmae_params));
+ SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 1);
+ switch (split_type) {
+ case SPLIT_TYPE_PORT:
+ SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID,
+ 1);
+ dmae_params.port_id = port_id;
+ break;
+ case SPLIT_TYPE_PF:
+ SET_FIELD(dmae_params.flags,
+ QED_DMAE_PARAMS_SRC_PF_VALID, 1);
+ dmae_params.src_pfid = pf_id;
+ break;
+ case SPLIT_TYPE_PORT_PF:
+ SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID,
+ 1);
+ SET_FIELD(dmae_params.flags,
+ QED_DMAE_PARAMS_SRC_PF_VALID, 1);
+ dmae_params.port_id = port_id;
+ dmae_params.src_pfid = pf_id;
+ break;
+ default:
+ break;
+ }
+
+ /* Execute DMAE command */
+ read_using_dmae = !qed_dmae_grc2host(p_hwfn,
+ p_ptt,
+ DWORDS_TO_BYTES(addr),
+ (u64)(uintptr_t)(dump_buf),
+ len, &dmae_params);
+ if (!read_using_dmae) {
+ dev_data->use_dmae = 0;
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "Failed reading from chip using DMAE, using GRC instead\n");
+ }
}
+ if (read_using_dmae)
+ goto print_log;
+
/* If not read using DMAE, read using GRC */
/* Set pretend */
- if (split_type != dev_data->pretend.split_type || split_id !=
- dev_data->pretend.split_id) {
+ if (split_type != dev_data->pretend.split_type ||
+ split_id != dev_data->pretend.split_id) {
switch (split_type) {
case SPLIT_TYPE_PORT:
qed_port_pretend(p_hwfn, p_ptt, port_id);
break;
case SPLIT_TYPE_PF:
- fid = pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
+ fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
+ pf_id);
qed_fid_pretend(p_hwfn, p_ptt, fid);
break;
case SPLIT_TYPE_PORT_PF:
- fid = pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
+ fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
+ pf_id);
qed_port_fid_pretend(p_hwfn, p_ptt, port_id, fid);
break;
case SPLIT_TYPE_VF:
- fid = BIT(PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT) |
- (vf_id << PXP_PRETEND_CONCRETE_FID_VFID_SHIFT);
+ fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFVALID, 1)
+ | FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFID,
+ vf_id);
qed_fid_pretend(p_hwfn, p_ptt, fid);
break;
default:
@@ -2584,6 +1898,16 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
/* Read registers using GRC */
qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
+print_log:
+ /* Print log */
+ dev_data->num_regs_read += len;
+ thresh = s_hw_type_defs[dev_data->hw_type].log_thresh;
+ if ((dev_data->num_regs_read / thresh) >
+ ((dev_data->num_regs_read - len) / thresh))
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "Dumped %d registers...\n", dev_data->num_regs_read);
+
return len;
}
@@ -2668,7 +1992,7 @@ static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
/* Dumps GRC registers entries. Returns the dumped size in dwords. */
static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- struct dbg_array input_regs_arr,
+ struct virt_mem_desc input_regs_arr,
u32 *dump_buf,
bool dump,
enum init_split_types split_type,
@@ -2681,10 +2005,10 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
*num_dumped_reg_entries = 0;
- while (input_offset < input_regs_arr.size_in_dwords) {
+ while (input_offset < BYTES_TO_DWORDS(input_regs_arr.size)) {
const struct dbg_dump_cond_hdr *cond_hdr =
(const struct dbg_dump_cond_hdr *)
- &input_regs_arr.ptr[input_offset++];
+ input_regs_arr.ptr + input_offset++;
u16 modes_buf_offset;
bool eval_mode;
@@ -2707,7 +2031,7 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
const struct dbg_dump_reg *reg =
(const struct dbg_dump_reg *)
- &input_regs_arr.ptr[input_offset];
+ input_regs_arr.ptr + input_offset;
u32 addr, len;
bool wide_bus;
@@ -2732,14 +2056,12 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
/* Dumps GRC registers entries. Returns the dumped size in dwords. */
static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- struct dbg_array input_regs_arr,
+ struct virt_mem_desc input_regs_arr,
u32 *dump_buf,
bool dump,
bool block_enable[MAX_BLOCK_ID],
enum init_split_types split_type,
- u8 split_id,
- const char *param_name,
- const char *param_val)
+ u8 split_id, const char *reg_type_name)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
enum init_split_types hdr_split_type = split_type;
@@ -2757,7 +2079,7 @@ static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
false,
0,
hdr_split_type,
- hdr_split_id, param_name, param_val);
+ hdr_split_id, reg_type_name);
/* Dump registers */
offset += qed_grc_dump_regs_entries(p_hwfn,
@@ -2776,7 +2098,7 @@ static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
dump,
num_dumped_reg_entries,
hdr_split_type,
- hdr_split_id, param_name, param_val);
+ hdr_split_id, reg_type_name);
return num_dumped_reg_entries > 0 ? offset : 0;
}
@@ -2789,32 +2111,33 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
bool dump,
bool block_enable[MAX_BLOCK_ID],
- const char *param_name, const char *param_val)
+ const char *reg_type_name)
{
+ struct virt_mem_desc *dbg_buf =
+ &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG];
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u32 offset = 0, input_offset = 0;
- u16 fid;
- while (input_offset <
- s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
+
+ while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
const struct dbg_dump_split_hdr *split_hdr;
- struct dbg_array curr_input_regs_arr;
+ struct virt_mem_desc curr_input_regs_arr;
enum init_split_types split_type;
u16 split_count = 0;
u32 split_data_size;
u8 split_id;
split_hdr =
- (const struct dbg_dump_split_hdr *)
- &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
+ (const struct dbg_dump_split_hdr *)
+ dbg_buf->ptr + input_offset++;
split_type =
- GET_FIELD(split_hdr->hdr,
- DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
- split_data_size =
- GET_FIELD(split_hdr->hdr,
- DBG_DUMP_SPLIT_HDR_DATA_SIZE);
+ GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
+ split_data_size = GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_DATA_SIZE);
curr_input_regs_arr.ptr =
- &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset];
- curr_input_regs_arr.size_in_dwords = split_data_size;
+ (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr +
+ input_offset;
+ curr_input_regs_arr.size = DWORDS_TO_BYTES(split_data_size);
switch (split_type) {
case SPLIT_TYPE_NONE:
@@ -2842,16 +2165,16 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
dump, block_enable,
split_type,
split_id,
- param_name,
- param_val);
+ reg_type_name);
input_offset += split_data_size;
}
/* Cancel pretends (pretend to original PF) */
if (dump) {
- fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
- qed_fid_pretend(p_hwfn, p_ptt, fid);
+ qed_fid_pretend(p_hwfn, p_ptt,
+ FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
+ p_hwfn->rel_pf_id));
dev_data->pretend.split_type = SPLIT_TYPE_NONE;
dev_data->pretend.split_id = 0;
}
@@ -2864,26 +2187,32 @@ static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf, bool dump)
{
- struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 i, offset = 0, num_regs = 0;
+ u32 offset = 0, num_regs = 0;
+ u8 reset_reg_id;
/* Calculate header size */
offset += qed_grc_dump_regs_hdr(dump_buf,
- false, 0,
- SPLIT_TYPE_NONE, 0, NULL, NULL);
+ false,
+ 0, SPLIT_TYPE_NONE, 0, "RESET_REGS");
/* Write reset registers */
- for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
- if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
+ for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS;
+ reset_reg_id++) {
+ const struct dbg_reset_reg *reset_reg;
+ u32 reset_reg_addr;
+
+ reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id);
+
+ if (GET_FIELD(reset_reg->data, DBG_RESET_REG_IS_REMOVED))
continue;
+ reset_reg_addr = GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR);
offset += qed_grc_dump_reg_entry(p_hwfn,
p_ptt,
dump_buf + offset,
dump,
- BYTES_TO_DWORDS
- (s_reset_regs_defs[i].addr), 1,
- false, SPLIT_TYPE_NONE, 0);
+ reset_reg_addr,
+ 1, false, SPLIT_TYPE_NONE, 0);
num_regs++;
}
@@ -2891,7 +2220,7 @@ static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
if (dump)
qed_grc_dump_regs_hdr(dump_buf,
true, num_regs, SPLIT_TYPE_NONE,
- 0, NULL, NULL);
+ 0, "RESET_REGS");
return offset;
}
@@ -2904,21 +2233,23 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
u32 *dump_buf, bool dump)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 block_id, offset = 0, num_reg_entries = 0;
+ u32 block_id, offset = 0, stall_regs_offset;
const struct dbg_attn_reg *attn_reg_arr;
u8 storm_id, reg_idx, num_attn_regs;
+ u32 num_reg_entries = 0;
- /* Calculate header size */
+ /* Write empty header for attention registers */
offset += qed_grc_dump_regs_hdr(dump_buf,
- false, 0, SPLIT_TYPE_NONE,
- 0, NULL, NULL);
+ false,
+ 0, SPLIT_TYPE_NONE, 0, "ATTN_REGS");
/* Write parity registers */
- for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
+ for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
if (dev_data->block_in_reset[block_id] && dump)
continue;
- attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
+ attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
+ (enum block_id)block_id,
ATTN_TYPE_PARITY,
&num_attn_regs);
@@ -2961,16 +2292,29 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
}
}
+ /* Overwrite header for attention registers */
+ if (dump)
+ qed_grc_dump_regs_hdr(dump_buf,
+ true,
+ num_reg_entries,
+ SPLIT_TYPE_NONE, 0, "ATTN_REGS");
+
+ /* Write empty header for stall registers */
+ stall_regs_offset = offset;
+ offset += qed_grc_dump_regs_hdr(dump_buf,
+ false, 0, SPLIT_TYPE_NONE, 0, "REGS");
+
/* Write Storm stall status registers */
- for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ for (storm_id = 0, num_reg_entries = 0; storm_id < MAX_DBG_STORMS;
+ storm_id++) {
struct storm_defs *storm = &s_storm_defs[storm_id];
u32 addr;
- if (dev_data->block_in_reset[storm->block_id] && dump)
+ if (dev_data->block_in_reset[storm->sem_block_id] && dump)
continue;
addr =
- BYTES_TO_DWORDS(s_storm_defs[storm_id].sem_fast_mem_addr +
+ BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
SEM_FAST_REG_STALLED);
offset += qed_grc_dump_reg_entry(p_hwfn,
p_ptt,
@@ -2982,12 +2326,12 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
num_reg_entries++;
}
- /* Write header */
+ /* Overwrite header for stall registers */
if (dump)
- qed_grc_dump_regs_hdr(dump_buf,
+ qed_grc_dump_regs_hdr(dump_buf + stall_regs_offset,
true,
- num_reg_entries, SPLIT_TYPE_NONE,
- 0, NULL, NULL);
+ num_reg_entries,
+ SPLIT_TYPE_NONE, 0, "REGS");
return offset;
}
@@ -3000,8 +2344,7 @@ static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
u32 offset = 0, addr;
offset += qed_grc_dump_regs_hdr(dump_buf,
- dump, 2, SPLIT_TYPE_NONE, 0,
- NULL, NULL);
+ dump, 2, SPLIT_TYPE_NONE, 0, "REGS");
/* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
* skipped).
@@ -3049,8 +2392,7 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
u32 len,
u32 bit_width,
bool packed,
- const char *mem_group,
- bool is_storm, char storm_letter)
+ const char *mem_group, char storm_letter)
{
u8 num_params = 3;
u32 offset = 0;
@@ -3071,7 +2413,7 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
if (name) {
/* Dump name */
- if (is_storm) {
+ if (storm_letter) {
strcpy(buf, "?STORM_");
buf[0] = storm_letter;
strcpy(buf + strlen(buf), name);
@@ -3103,7 +2445,7 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
dump, "packed", 1);
/* Dump reg type */
- if (is_storm) {
+ if (storm_letter) {
strcpy(buf, "?STORM_");
buf[0] = storm_letter;
strcpy(buf + strlen(buf), mem_group);
@@ -3130,8 +2472,7 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
bool wide_bus,
u32 bit_width,
bool packed,
- const char *mem_group,
- bool is_storm, char storm_letter)
+ const char *mem_group, char storm_letter)
{
u32 offset = 0;
@@ -3142,8 +2483,7 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
addr,
len,
bit_width,
- packed,
- mem_group, is_storm, storm_letter);
+ packed, mem_group, storm_letter);
offset += qed_grc_dump_addr_range(p_hwfn,
p_ptt,
dump_buf + offset,
@@ -3156,20 +2496,21 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
/* Dumps GRC memories entries. Returns the dumped size in dwords. */
static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- struct dbg_array input_mems_arr,
+ struct virt_mem_desc input_mems_arr,
u32 *dump_buf, bool dump)
{
u32 i, offset = 0, input_offset = 0;
bool mode_match = true;
- while (input_offset < input_mems_arr.size_in_dwords) {
+ while (input_offset < BYTES_TO_DWORDS(input_mems_arr.size)) {
const struct dbg_dump_cond_hdr *cond_hdr;
u16 modes_buf_offset;
u32 num_entries;
bool eval_mode;
- cond_hdr = (const struct dbg_dump_cond_hdr *)
- &input_mems_arr.ptr[input_offset++];
+ cond_hdr =
+ (const struct dbg_dump_cond_hdr *)input_mems_arr.ptr +
+ input_offset++;
num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
/* Check required mode */
@@ -3191,24 +2532,25 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
for (i = 0; i < num_entries;
i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
const struct dbg_dump_mem *mem =
- (const struct dbg_dump_mem *)
- &input_mems_arr.ptr[input_offset];
- u8 mem_group_id = GET_FIELD(mem->dword0,
- DBG_DUMP_MEM_MEM_GROUP_ID);
- bool is_storm = false, mem_wide_bus;
- enum dbg_grc_params grc_param;
- char storm_letter = 'a';
- enum block_id block_id;
+ (const struct dbg_dump_mem *)((u32 *)
+ input_mems_arr.ptr
+ + input_offset);
+ const struct dbg_block *block;
+ char storm_letter = 0;
u32 mem_addr, mem_len;
+ bool mem_wide_bus;
+ u8 mem_group_id;
+ mem_group_id = GET_FIELD(mem->dword0,
+ DBG_DUMP_MEM_MEM_GROUP_ID);
if (mem_group_id >= MEM_GROUPS_NUM) {
DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
return 0;
}
- block_id = (enum block_id)cond_hdr->block_id;
if (!qed_grc_is_mem_included(p_hwfn,
- block_id,
+ (enum block_id)
+ cond_hdr->block_id,
mem_group_id))
continue;
@@ -3217,42 +2559,14 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
mem_wide_bus = GET_FIELD(mem->dword1,
DBG_DUMP_MEM_WIDE_BUS);
- /* Update memory length for CCFC/TCFC memories
- * according to number of LCIDs/LTIDs.
- */
- if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
- if (mem_len % MAX_LCIDS) {
- DP_NOTICE(p_hwfn,
- "Invalid CCFC connection memory size\n");
- return 0;
- }
-
- grc_param = DBG_GRC_PARAM_NUM_LCIDS;
- mem_len = qed_grc_get_param(p_hwfn, grc_param) *
- (mem_len / MAX_LCIDS);
- } else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM) {
- if (mem_len % MAX_LTIDS) {
- DP_NOTICE(p_hwfn,
- "Invalid TCFC task memory size\n");
- return 0;
- }
-
- grc_param = DBG_GRC_PARAM_NUM_LTIDS;
- mem_len = qed_grc_get_param(p_hwfn, grc_param) *
- (mem_len / MAX_LTIDS);
- }
+ block = get_dbg_block(p_hwfn,
+ cond_hdr->block_id);
- /* If memory is associated with Storm, update Storm
- * details.
+ /* If memory is associated with Storm,
+ * update storm details
*/
- if (s_block_defs
- [cond_hdr->block_id]->associated_to_storm) {
- is_storm = true;
- storm_letter =
- s_storm_defs[s_block_defs
- [cond_hdr->block_id]->
- storm_id].letter;
- }
+ if (block->associated_storm_letter)
+ storm_letter = block->associated_storm_letter;
/* Dump memory */
offset += qed_grc_dump_mem(p_hwfn,
@@ -3266,7 +2580,6 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
0,
false,
s_mem_group_names[mem_group_id],
- is_storm,
storm_letter);
}
}
@@ -3281,26 +2594,25 @@ static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf, bool dump)
{
+ struct virt_mem_desc *dbg_buf =
+ &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_MEM];
u32 offset = 0, input_offset = 0;
- while (input_offset <
- s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
+ while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
const struct dbg_dump_split_hdr *split_hdr;
- struct dbg_array curr_input_mems_arr;
+ struct virt_mem_desc curr_input_mems_arr;
enum init_split_types split_type;
u32 split_data_size;
- split_hdr = (const struct dbg_dump_split_hdr *)
- &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
- split_type =
- GET_FIELD(split_hdr->hdr,
- DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
- split_data_size =
- GET_FIELD(split_hdr->hdr,
- DBG_DUMP_SPLIT_HDR_DATA_SIZE);
- curr_input_mems_arr.ptr =
- &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset];
- curr_input_mems_arr.size_in_dwords = split_data_size;
+ split_hdr =
+ (const struct dbg_dump_split_hdr *)dbg_buf->ptr +
+ input_offset++;
+ split_type = GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
+ split_data_size = GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_DATA_SIZE);
+ curr_input_mems_arr.ptr = (u32 *)dbg_buf->ptr + input_offset;
+ curr_input_mems_arr.size = DWORDS_TO_BYTES(split_data_size);
if (split_type == SPLIT_TYPE_NONE)
offset += qed_grc_dump_mem_entries(p_hwfn,
@@ -3328,17 +2640,19 @@ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
bool dump,
const char *name,
u32 num_lids,
- u32 lid_size,
- u32 rd_reg_addr,
- u8 storm_id)
+ enum cm_ctx_types ctx_type, u8 storm_id)
{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
struct storm_defs *storm = &s_storm_defs[storm_id];
- u32 i, lid, total_size, offset = 0;
+ u32 i, lid, lid_size, total_size;
+ u32 rd_reg_addr, offset = 0;
+
+ /* Convert quad-regs to dwords */
+ lid_size = storm->cm_ctx_lid_sizes[dev_data->chip_id][ctx_type] * 4;
if (!lid_size)
return 0;
- lid_size *= BYTES_IN_DWORD;
total_size = num_lids * lid_size;
offset += qed_grc_dump_mem_hdr(p_hwfn,
@@ -3348,18 +2662,26 @@ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
0,
total_size,
lid_size * 32,
- false, name, true, storm->letter);
+ false, name, storm->letter);
if (!dump)
return offset + total_size;
+ rd_reg_addr = BYTES_TO_DWORDS(storm->cm_ctx_rd_addr[ctx_type]);
+
/* Dump context data */
for (lid = 0; lid < num_lids; lid++) {
- for (i = 0; i < lid_size; i++, offset++) {
+ for (i = 0; i < lid_size; i++) {
qed_wr(p_hwfn,
p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
- *(dump_buf + offset) = qed_rd(p_hwfn,
- p_ptt, rd_reg_addr);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ rd_reg_addr,
+ 1,
+ false,
+ SPLIT_TYPE_NONE, 0);
}
}
@@ -3370,115 +2692,126 @@ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
{
- enum dbg_grc_params grc_param;
u32 offset = 0;
u8 storm_id;
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
- struct storm_defs *storm = &s_storm_defs[storm_id];
-
if (!qed_grc_is_storm_included(p_hwfn,
(enum dbg_storms)storm_id))
continue;
/* Dump Conn AG context size */
- grc_param = DBG_GRC_PARAM_NUM_LCIDS;
- offset +=
- qed_grc_dump_ctx_data(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- "CONN_AG_CTX",
- qed_grc_get_param(p_hwfn,
- grc_param),
- storm->cm_conn_ag_ctx_lid_size,
- storm->cm_conn_ag_ctx_rd_addr,
- storm_id);
+ offset += qed_grc_dump_ctx_data(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ "CONN_AG_CTX",
+ NUM_OF_LCIDS,
+ CM_CTX_CONN_AG, storm_id);
/* Dump Conn ST context size */
- grc_param = DBG_GRC_PARAM_NUM_LCIDS;
- offset +=
- qed_grc_dump_ctx_data(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- "CONN_ST_CTX",
- qed_grc_get_param(p_hwfn,
- grc_param),
- storm->cm_conn_st_ctx_lid_size,
- storm->cm_conn_st_ctx_rd_addr,
- storm_id);
+ offset += qed_grc_dump_ctx_data(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ "CONN_ST_CTX",
+ NUM_OF_LCIDS,
+ CM_CTX_CONN_ST, storm_id);
/* Dump Task AG context size */
- grc_param = DBG_GRC_PARAM_NUM_LTIDS;
- offset +=
- qed_grc_dump_ctx_data(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- "TASK_AG_CTX",
- qed_grc_get_param(p_hwfn,
- grc_param),
- storm->cm_task_ag_ctx_lid_size,
- storm->cm_task_ag_ctx_rd_addr,
- storm_id);
+ offset += qed_grc_dump_ctx_data(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ "TASK_AG_CTX",
+ NUM_OF_LTIDS,
+ CM_CTX_TASK_AG, storm_id);
/* Dump Task ST context size */
- grc_param = DBG_GRC_PARAM_NUM_LTIDS;
- offset +=
- qed_grc_dump_ctx_data(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- "TASK_ST_CTX",
- qed_grc_get_param(p_hwfn,
- grc_param),
- storm->cm_task_st_ctx_lid_size,
- storm->cm_task_st_ctx_rd_addr,
- storm_id);
+ offset += qed_grc_dump_ctx_data(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ "TASK_ST_CTX",
+ NUM_OF_LTIDS,
+ CM_CTX_TASK_ST, storm_id);
}
return offset;
}
-/* Dumps GRC IORs data. Returns the dumped size in dwords. */
-static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
-{
- char buf[10] = "IOR_SET_?";
- u32 addr, offset = 0;
- u8 storm_id, set_id;
+#define VFC_STATUS_RESP_READY_BIT 0
+#define VFC_STATUS_BUSY_BIT 1
+#define VFC_STATUS_SENDING_CMD_BIT 2
- for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
- struct storm_defs *storm = &s_storm_defs[storm_id];
+#define VFC_POLLING_DELAY_MS 1
+#define VFC_POLLING_COUNT 20
- if (!qed_grc_is_storm_included(p_hwfn,
- (enum dbg_storms)storm_id))
- continue;
+/* Reads data from VFC. Returns the number of dwords read (0 on error).
+ * Sizes are specified in dwords.
+ */
+static u32 qed_grc_dump_read_from_vfc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct storm_defs *storm,
+ u32 *cmd_data,
+ u32 cmd_size,
+ u32 *addr_data,
+ u32 addr_size,
+ u32 resp_size, u32 *dump_buf)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 vfc_status, polling_ms, polling_count = 0, i;
+ u32 reg_addr, sem_base;
+ bool is_ready = false;
+
+ sem_base = storm->sem_fast_mem_addr;
+ polling_ms = VFC_POLLING_DELAY_MS *
+ s_hw_type_defs[dev_data->hw_type].delay_factor;
+
+ /* Write VFC command */
+ ARR_REG_WR(p_hwfn,
+ p_ptt,
+ sem_base + SEM_FAST_REG_VFC_DATA_WR,
+ cmd_data, cmd_size);
+
+ /* Write VFC address */
+ ARR_REG_WR(p_hwfn,
+ p_ptt,
+ sem_base + SEM_FAST_REG_VFC_ADDR,
+ addr_data, addr_size);
+
+ /* Read response */
+ for (i = 0; i < resp_size; i++) {
+ /* Poll until ready */
+ do {
+ reg_addr = sem_base + SEM_FAST_REG_VFC_STATUS;
+ qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ &vfc_status,
+ true,
+ BYTES_TO_DWORDS(reg_addr),
+ 1,
+ false, SPLIT_TYPE_NONE, 0);
+ is_ready = vfc_status & BIT(VFC_STATUS_RESP_READY_BIT);
+
+ if (!is_ready) {
+ if (polling_count++ == VFC_POLLING_COUNT)
+ return 0;
- for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
- addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
- SEM_FAST_REG_STORM_REG_FILE) +
- IOR_SET_OFFSET(set_id);
- if (strlen(buf) > 0)
- buf[strlen(buf) - 1] = '0' + set_id;
- offset += qed_grc_dump_mem(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- buf,
- addr,
- IORS_PER_SET,
- false,
- 32,
- false,
- "ior",
- true,
- storm->letter);
- }
+ msleep(polling_ms);
+ }
+ } while (!is_ready);
+
+ reg_addr = sem_base + SEM_FAST_REG_VFC_DATA_RD;
+ qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + i,
+ true,
+ BYTES_TO_DWORDS(reg_addr),
+ 1, false, SPLIT_TYPE_NONE, 0);
}
- return offset;
+ return resp_size;
}
/* Dump VFC CAM. Returns the dumped size in dwords. */
@@ -3490,7 +2823,7 @@ static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
struct storm_defs *storm = &s_storm_defs[storm_id];
u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
- u32 row, i, offset = 0;
+ u32 row, offset = 0;
offset += qed_grc_dump_mem_hdr(p_hwfn,
dump_buf + offset,
@@ -3499,7 +2832,7 @@ static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
0,
total_size,
256,
- false, "vfc_cam", true, storm->letter);
+ false, "vfc_cam", storm->letter);
if (!dump)
return offset + total_size;
@@ -3507,26 +2840,18 @@ static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
/* Prepare CAM address */
SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
- for (row = 0; row < VFC_CAM_NUM_ROWS;
- row++, offset += VFC_CAM_RESP_DWORDS) {
- /* Write VFC CAM command */
+ /* Read VFC CAM data */
+ for (row = 0; row < VFC_CAM_NUM_ROWS; row++) {
SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
- ARR_REG_WR(p_hwfn,
- p_ptt,
- storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
- cam_cmd, VFC_CAM_CMD_DWORDS);
-
- /* Write VFC CAM address */
- ARR_REG_WR(p_hwfn,
- p_ptt,
- storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
- cam_addr, VFC_CAM_ADDR_DWORDS);
-
- /* Read VFC CAM read response */
- ARR_REG_RD(p_hwfn,
- p_ptt,
- storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
- dump_buf + offset, VFC_CAM_RESP_DWORDS);
+ offset += qed_grc_dump_read_from_vfc(p_hwfn,
+ p_ptt,
+ storm,
+ cam_cmd,
+ VFC_CAM_CMD_DWORDS,
+ cam_addr,
+ VFC_CAM_ADDR_DWORDS,
+ VFC_CAM_RESP_DWORDS,
+ dump_buf + offset);
}
return offset;
@@ -3543,7 +2868,7 @@ static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
struct storm_defs *storm = &s_storm_defs[storm_id];
u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
- u32 row, i, offset = 0;
+ u32 row, offset = 0;
offset += qed_grc_dump_mem_hdr(p_hwfn,
dump_buf + offset,
@@ -3554,35 +2879,27 @@ static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
256,
false,
ram_defs->type_name,
- true, storm->letter);
-
- /* Prepare RAM address */
- SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
+ storm->letter);
if (!dump)
return offset + total_size;
+ /* Prepare RAM address */
+ SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
+
+ /* Read VFC RAM data */
for (row = ram_defs->base_row;
- row < ram_defs->base_row + ram_defs->num_rows;
- row++, offset += VFC_RAM_RESP_DWORDS) {
- /* Write VFC RAM command */
- ARR_REG_WR(p_hwfn,
- p_ptt,
- storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
- ram_cmd, VFC_RAM_CMD_DWORDS);
-
- /* Write VFC RAM address */
+ row < ram_defs->base_row + ram_defs->num_rows; row++) {
SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
- ARR_REG_WR(p_hwfn,
- p_ptt,
- storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
- ram_addr, VFC_RAM_ADDR_DWORDS);
-
- /* Read VFC RAM read response */
- ARR_REG_RD(p_hwfn,
- p_ptt,
- storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
- dump_buf + offset, VFC_RAM_RESP_DWORDS);
+ offset += qed_grc_dump_read_from_vfc(p_hwfn,
+ p_ptt,
+ storm,
+ ram_cmd,
+ VFC_RAM_CMD_DWORDS,
+ ram_addr,
+ VFC_RAM_ADDR_DWORDS,
+ VFC_RAM_RESP_DWORDS,
+ dump_buf + offset);
}
return offset;
@@ -3592,16 +2909,13 @@ static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
{
- struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u8 storm_id, i;
u32 offset = 0;
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
if (!qed_grc_is_storm_included(p_hwfn,
(enum dbg_storms)storm_id) ||
- !s_storm_defs[storm_id].has_vfc ||
- (storm_id == DBG_PSTORM_ID && dev_data->platform_id !=
- PLATFORM_ASIC))
+ !s_storm_defs[storm_id].has_vfc)
continue;
/* Read CAM */
@@ -3651,7 +2965,7 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
total_dwords,
rss_defs->entry_width,
packed,
- rss_defs->type_name, false, 0);
+ rss_defs->type_name, 0);
/* Dump RSS data */
if (!dump) {
@@ -3711,7 +3025,7 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
0,
ram_size,
block_size * 8,
- false, type_name, false, 0);
+ false, type_name, 0);
/* Read and dump Big RAM data */
if (!dump)
@@ -3737,6 +3051,7 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
return offset;
}
+/* Dumps MCP scratchpad. Returns the dumped size in dwords. */
static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
{
@@ -3758,8 +3073,8 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
dump,
NULL,
BYTES_TO_DWORDS(MCP_REG_SCRATCH),
- MCP_REG_SCRATCH_SIZE_BB_K2,
- false, 0, false, "MCP", false, 0);
+ MCP_REG_SCRATCH_SIZE,
+ false, 0, false, "MCP", 0);
/* Dump MCP cpu_reg_file */
offset += qed_grc_dump_mem(p_hwfn,
@@ -3769,19 +3084,19 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
NULL,
BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
MCP_REG_CPU_REG_FILE_SIZE,
- false, 0, false, "MCP", false, 0);
+ false, 0, false, "MCP", 0);
/* Dump MCP registers */
block_enable[BLOCK_MCP] = true;
offset += qed_grc_dump_registers(p_hwfn,
p_ptt,
dump_buf + offset,
- dump, block_enable, "block", "MCP");
+ dump, block_enable, "MCP");
/* Dump required non-MCP registers */
offset += qed_grc_dump_regs_hdr(dump_buf + offset,
dump, 1, SPLIT_TYPE_NONE, 0,
- "block", "MCP");
+ "MCP");
addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
offset += qed_grc_dump_reg_entry(p_hwfn,
p_ptt,
@@ -3798,7 +3113,9 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
return offset;
}
-/* Dumps the tbus indirect memory for all PHYs. */
+/* Dumps the tbus indirect memory for all PHYs.
+ * Returns the dumped size in dwords.
+ */
static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
{
@@ -3832,7 +3149,7 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
mem_name,
0,
PHY_DUMP_SIZE_DWORDS,
- 16, true, mem_name, false, 0);
+ 16, true, mem_name, 0);
if (!dump) {
offset += PHY_DUMP_SIZE_DWORDS;
@@ -3863,21 +3180,58 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
return offset;
}
-static void qed_config_dbg_line(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- enum block_id block_id,
- u8 line_id,
- u8 enable_mask,
- u8 right_shift,
- u8 force_valid_mask, u8 force_frame_mask)
+static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 image_type,
+ u32 *nvram_offset_bytes,
+ u32 *nvram_size_bytes);
+
+static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 nvram_offset_bytes,
+ u32 nvram_size_bytes, u32 *ret_buf);
+
+/* Dumps the MCP HW dump from NVRAM. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_mcp_hw_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
{
- struct block_defs *block = s_block_defs[block_id];
+ u32 hw_dump_offset_bytes = 0, hw_dump_size_bytes = 0;
+ u32 hw_dump_size_dwords = 0, offset = 0;
+ enum dbg_status status;
- qed_wr(p_hwfn, p_ptt, block->dbg_select_addr, line_id);
- qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, enable_mask);
- qed_wr(p_hwfn, p_ptt, block->dbg_shift_addr, right_shift);
- qed_wr(p_hwfn, p_ptt, block->dbg_force_valid_addr, force_valid_mask);
- qed_wr(p_hwfn, p_ptt, block->dbg_force_frame_addr, force_frame_mask);
+ /* Read HW dump image from NVRAM */
+ status = qed_find_nvram_image(p_hwfn,
+ p_ptt,
+ NVM_TYPE_HW_DUMP_OUT,
+ &hw_dump_offset_bytes,
+ &hw_dump_size_bytes);
+ if (status != DBG_STATUS_OK)
+ return 0;
+
+ hw_dump_size_dwords = BYTES_TO_DWORDS(hw_dump_size_bytes);
+
+ /* Dump HW dump image section */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "mcp_hw_dump", 1);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "size", hw_dump_size_dwords);
+
+ /* Read MCP HW dump image into dump buffer */
+ if (dump && hw_dump_size_dwords) {
+ status = qed_nvram_read(p_hwfn,
+ p_ptt,
+ hw_dump_offset_bytes,
+ hw_dump_size_bytes, dump_buf + offset);
+ if (status != DBG_STATUS_OK) {
+ DP_NOTICE(p_hwfn,
+ "Failed to read MCP HW Dump image from NVRAM\n");
+ return 0;
+ }
+ }
+ offset += hw_dump_size_dwords;
+
+ return offset;
}
/* Dumps Static Debug data. Returns the dumped size in dwords. */
@@ -3886,26 +3240,19 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
u32 *dump_buf, bool dump)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 block_id, line_id, offset = 0;
+ u32 block_id, line_id, offset = 0, addr, len;
/* Don't dump static debug if a debug bus recording is in progress */
if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
return 0;
if (dump) {
- /* Disable all blocks debug output */
- for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
- struct block_defs *block = s_block_defs[block_id];
-
- if (block->dbg_client_id[dev_data->chip_id] !=
- MAX_DBG_BUS_CLIENTS)
- qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr,
- 0);
- }
+ /* Disable debug bus in all blocks */
+ qed_bus_disable_blocks(p_hwfn, p_ptt);
qed_bus_reset_dbg_block(p_hwfn, p_ptt);
- qed_bus_set_framing_mode(p_hwfn,
- p_ptt, DBG_BUS_FRAME_MODE_8HW_0ST);
+ qed_wr(p_hwfn,
+ p_ptt, DBG_REG_FRAMING_MODE, DBG_BUS_FRAME_MODE_8HW);
qed_wr(p_hwfn,
p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
@@ -3914,28 +3261,48 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
/* Dump all static debug lines for each relevant block */
for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
- struct block_defs *block = s_block_defs[block_id];
- struct dbg_bus_block *block_desc;
- u32 block_dwords, addr, len;
- u8 dbg_client_id;
+ const struct dbg_block_chip *block_per_chip;
+ const struct dbg_block *block;
+ bool is_removed, has_dbg_bus;
+ u16 modes_buf_offset;
+ u32 block_dwords;
+
+ block_per_chip =
+ qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)block_id);
+ is_removed = GET_FIELD(block_per_chip->flags,
+ DBG_BLOCK_CHIP_IS_REMOVED);
+ has_dbg_bus = GET_FIELD(block_per_chip->flags,
+ DBG_BLOCK_CHIP_HAS_DBG_BUS);
- if (block->dbg_client_id[dev_data->chip_id] ==
- MAX_DBG_BUS_CLIENTS)
+ /* read+clear for NWS parity is not working, skip NWS block */
+ if (block_id == BLOCK_NWS)
continue;
- block_desc = get_dbg_bus_block_desc(p_hwfn,
- (enum block_id)block_id);
- block_dwords = NUM_DBG_LINES(block_desc) *
+ if (!is_removed && has_dbg_bus &&
+ GET_FIELD(block_per_chip->dbg_bus_mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0) {
+ modes_buf_offset =
+ GET_FIELD(block_per_chip->dbg_bus_mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+ if (!qed_is_mode_match(p_hwfn, &modes_buf_offset))
+ has_dbg_bus = false;
+ }
+
+ if (is_removed || !has_dbg_bus)
+ continue;
+
+ block_dwords = NUM_DBG_LINES(block_per_chip) *
STATIC_DEBUG_LINE_DWORDS;
/* Dump static section params */
+ block = get_dbg_block(p_hwfn, (enum block_id)block_id);
offset += qed_grc_dump_mem_hdr(p_hwfn,
dump_buf + offset,
dump,
block->name,
0,
block_dwords,
- 32, false, "STATIC", false, 0);
+ 32, false, "STATIC", 0);
if (!dump) {
offset += block_dwords;
@@ -3951,20 +3318,19 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
}
/* Enable block's client */
- dbg_client_id = block->dbg_client_id[dev_data->chip_id];
qed_bus_enable_clients(p_hwfn,
p_ptt,
- BIT(dbg_client_id));
+ BIT(block_per_chip->dbg_client_id));
addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
len = STATIC_DEBUG_LINE_DWORDS;
- for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_desc);
+ for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_per_chip);
line_id++) {
/* Configure debug line ID */
- qed_config_dbg_line(p_hwfn,
- p_ptt,
- (enum block_id)block_id,
- (u8)line_id, 0xf, 0, 0, 0);
+ qed_bus_config_dbg_line(p_hwfn,
+ p_ptt,
+ (enum block_id)block_id,
+ (u8)line_id, 0xf, 0, 0, 0);
/* Read debug line info */
offset += qed_grc_dump_addr_range(p_hwfn,
@@ -3979,7 +3345,8 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
/* Disable block's client and debug output */
qed_bus_enable_clients(p_hwfn, p_ptt, 0);
- qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
+ qed_bus_config_dbg_line(p_hwfn, p_ptt,
+ (enum block_id)block_id, 0, 0, 0, 0, 0);
}
if (dump) {
@@ -3999,8 +3366,8 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
bool dump, u32 *num_dumped_dwords)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 dwords_read, offset = 0;
bool parities_masked = false;
- u32 offset = 0;
u8 i;
*num_dumped_dwords = 0;
@@ -4019,13 +3386,11 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
offset += qed_dump_num_param(dump_buf + offset,
dump,
"num-lcids",
- qed_grc_get_param(p_hwfn,
- DBG_GRC_PARAM_NUM_LCIDS));
+ NUM_OF_LCIDS);
offset += qed_dump_num_param(dump_buf + offset,
dump,
"num-ltids",
- qed_grc_get_param(p_hwfn,
- DBG_GRC_PARAM_NUM_LTIDS));
+ NUM_OF_LTIDS);
offset += qed_dump_num_param(dump_buf + offset,
dump, "num-ports", dev_data->num_ports);
@@ -4037,7 +3402,7 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
/* Take all blocks out of reset (using reset registers) */
if (dump) {
- qed_grc_unreset_blocks(p_hwfn, p_ptt);
+ qed_grc_unreset_blocks(p_hwfn, p_ptt, false);
qed_update_blocks_reset_state(p_hwfn, p_ptt);
}
@@ -4080,7 +3445,7 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
dump_buf +
offset,
dump,
- block_enable, NULL, NULL);
+ block_enable, NULL);
/* Dump special registers */
offset += qed_grc_dump_special_regs(p_hwfn,
@@ -4114,23 +3479,29 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
dump_buf + offset,
dump, i);
- /* Dump IORs */
- if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR))
- offset += qed_grc_dump_iors(p_hwfn,
- p_ptt, dump_buf + offset, dump);
-
/* Dump VFC */
- if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))
- offset += qed_grc_dump_vfc(p_hwfn,
- p_ptt, dump_buf + offset, dump);
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)) {
+ dwords_read = qed_grc_dump_vfc(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+ offset += dwords_read;
+ if (!dwords_read)
+ return DBG_STATUS_VFC_READ_ERROR;
+ }
/* Dump PHY tbus */
if (qed_grc_is_included(p_hwfn,
DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
- CHIP_K2 && dev_data->platform_id == PLATFORM_ASIC)
+ CHIP_K2 && dev_data->hw_type == HW_TYPE_ASIC)
offset += qed_grc_dump_phy(p_hwfn,
p_ptt, dump_buf + offset, dump);
+ /* Dump MCP HW Dump */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP_HW_DUMP) &&
+ !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP) && 1)
+ offset += qed_grc_dump_mcp_hw_dump(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump);
+
/* Dump static debug data (only if not during debug bus recording) */
if (qed_grc_is_included(p_hwfn,
DBG_GRC_PARAM_DUMP_STATIC) &&
@@ -4181,8 +3552,9 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
u8 reg_id;
hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
- regs = &((const union dbg_idle_chk_reg *)
- s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
+ regs = (const union dbg_idle_chk_reg *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr +
+ rule->reg_offset;
cond_regs = &regs[0].cond_reg;
info_regs = &regs[rule->num_cond_regs].info_reg;
@@ -4202,8 +3574,8 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
struct dbg_idle_chk_result_reg_hdr *reg_hdr;
- reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
- (dump_buf + offset);
+ reg_hdr =
+ (struct dbg_idle_chk_result_reg_hdr *)(dump_buf + offset);
/* Write register header */
if (!dump) {
@@ -4320,12 +3692,13 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
const u32 *imm_values;
rule = &input_rules[i];
- regs = &((const union dbg_idle_chk_reg *)
- s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)
- [rule->reg_offset];
+ regs = (const union dbg_idle_chk_reg *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr +
+ rule->reg_offset;
cond_regs = &regs[0].cond_reg;
- imm_values = &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr
- [rule->imm_offset];
+ imm_values =
+ (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr +
+ rule->imm_offset;
/* Check if all condition register blocks are out of reset, and
* find maximal number of entries (all condition registers that
@@ -4443,10 +3816,12 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
{
- u32 num_failing_rules_offset, offset = 0, input_offset = 0;
- u32 num_failing_rules = 0;
+ struct virt_mem_desc *dbg_buf =
+ &p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES];
+ u32 num_failing_rules_offset, offset = 0,
+ input_offset = 0, num_failing_rules = 0;
- /* Dump global params */
+ /* Dump global params - 1 must match below amount of params */
offset += qed_dump_common_global_params(p_hwfn,
p_ptt,
dump_buf + offset, dump, 1);
@@ -4458,12 +3833,10 @@ static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
num_failing_rules_offset = offset;
offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
- while (input_offset <
- s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
+ while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
const struct dbg_idle_chk_cond_hdr *cond_hdr =
- (const struct dbg_idle_chk_cond_hdr *)
- &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr
- [input_offset++];
+ (const struct dbg_idle_chk_cond_hdr *)dbg_buf->ptr +
+ input_offset++;
bool eval_mode, mode_match = true;
u32 curr_failing_rules;
u16 modes_buf_offset;
@@ -4480,16 +3853,21 @@ static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
}
if (mode_match) {
+ const struct dbg_idle_chk_rule *rule =
+ (const struct dbg_idle_chk_rule *)((u32 *)
+ dbg_buf->ptr
+ + input_offset);
+ u32 num_input_rules =
+ cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS;
offset +=
qed_idle_chk_dump_rule_entries(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- (const struct dbg_idle_chk_rule *)
- &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].
- ptr[input_offset],
- cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS,
- &curr_failing_rules);
+ p_ptt,
+ dump_buf +
+ offset,
+ dump,
+ rule,
+ num_input_rules,
+ &curr_failing_rules);
num_failing_rules += curr_failing_rules;
}
@@ -4556,7 +3934,7 @@ static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
{
u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
s32 bytes_left = nvram_size_bytes;
- u32 read_offset = 0;
+ u32 read_offset = 0, param = 0;
DP_VERBOSE(p_hwfn,
QED_MSG_DEBUG,
@@ -4569,14 +3947,14 @@ static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
/* Call NVRAM read command */
+ SET_MFW_FIELD(param,
+ DRV_MB_PARAM_NVM_OFFSET,
+ nvram_offset_bytes + read_offset);
+ SET_MFW_FIELD(param, DRV_MB_PARAM_NVM_LEN, bytes_to_copy);
if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
- DRV_MSG_CODE_NVM_READ_NVRAM,
- (nvram_offset_bytes +
- read_offset) |
- (bytes_to_copy <<
- DRV_MB_PARAM_NVM_LEN_OFFSET),
- &ret_mcp_resp, &ret_mcp_param,
- &ret_read_size,
+ DRV_MSG_CODE_NVM_READ_NVRAM, param,
+ &ret_mcp_resp,
+ &ret_mcp_param, &ret_read_size,
(u32 *)((u8 *)ret_buf + read_offset)))
return DBG_STATUS_NVRAM_READ_FAILED;
@@ -4714,12 +4092,12 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
enum dbg_status status;
- bool mcp_access;
int halted = 0;
+ bool use_mfw;
*num_dumped_dwords = 0;
- mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
+ use_mfw = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
/* Get trace data info */
status = qed_mcp_trace_get_data_info(p_hwfn,
@@ -4740,7 +4118,7 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
* consistent. if halt fails, MCP trace is taken anyway, with a small
* risk that it may be corrupt.
*/
- if (dump && mcp_access) {
+ if (dump && use_mfw) {
halted = !qed_mcp_halt(p_hwfn, p_ptt);
if (!halted)
DP_NOTICE(p_hwfn, "MCP halt failed!\n");
@@ -4780,17 +4158,15 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
*/
trace_meta_size_bytes =
qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_MCP_TRACE_META_SIZE);
- if ((!trace_meta_size_bytes || dump) && mcp_access) {
+ if ((!trace_meta_size_bytes || dump) && use_mfw)
status = qed_mcp_trace_get_meta_info(p_hwfn,
p_ptt,
trace_data_size_bytes,
&running_bundle_id,
&trace_meta_offset_bytes,
&trace_meta_size_bytes);
- if (status == DBG_STATUS_OK)
- trace_meta_size_dwords =
- BYTES_TO_DWORDS(trace_meta_size_bytes);
- }
+ if (status == DBG_STATUS_OK)
+ trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
/* Dump trace meta size param */
offset += qed_dump_num_param(dump_buf + offset,
@@ -4814,7 +4190,7 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
/* If no mcp access, indicate that the dump doesn't contain the meta
* data from NVRAM.
*/
- return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
+ return use_mfw ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
}
/* Dump GRC FIFO */
@@ -4992,16 +4368,18 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
override_window_dwords =
qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
PROTECTION_OVERRIDE_ELEMENT_DWORDS;
- addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
- offset += qed_grc_dump_addr_range(p_hwfn,
- p_ptt,
- dump_buf + offset,
- true,
- addr,
- override_window_dwords,
- true, SPLIT_TYPE_NONE, 0);
- qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
- override_window_dwords);
+ if (override_window_dwords) {
+ addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ true,
+ addr,
+ override_window_dwords,
+ true, SPLIT_TYPE_NONE, 0);
+ qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
+ override_window_dwords);
+ }
out:
/* Dump last section */
offset += qed_dump_last_section(dump_buf, offset, dump);
@@ -5037,7 +4415,7 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
struct storm_defs *storm = &s_storm_defs[storm_id];
u32 last_list_idx, addr;
- if (dev_data->block_in_reset[storm->block_id])
+ if (dev_data->block_in_reset[storm->sem_block_id])
continue;
/* Read FW info for the current Storm */
@@ -5088,20 +4466,362 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
return offset;
}
+/* Dumps the specified ILT pages to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_ilt_dump_pages_range(u32 *dump_buf,
+ bool dump,
+ u32 start_page_id,
+ u32 num_pages,
+ struct phys_mem_desc *ilt_pages,
+ bool dump_page_ids)
+{
+ u32 page_id, end_page_id, offset = 0;
+
+ if (num_pages == 0)
+ return offset;
+
+ end_page_id = start_page_id + num_pages - 1;
+
+ for (page_id = start_page_id; page_id <= end_page_id; page_id++) {
+ struct phys_mem_desc *mem_desc = &ilt_pages[page_id];
+
+ /**
+ *
+ * if (page_id >= ->p_cxt_mngr->ilt_shadow_size)
+ * break;
+ */
+
+ if (!ilt_pages[page_id].virt_addr)
+ continue;
+
+ if (dump_page_ids) {
+ /* Copy page ID to dump buffer */
+ if (dump)
+ *(dump_buf + offset) = page_id;
+ offset++;
+ } else {
+ /* Copy page memory to dump buffer */
+ if (dump)
+ memcpy(dump_buf + offset,
+ mem_desc->virt_addr, mem_desc->size);
+ offset += BYTES_TO_DWORDS(mem_desc->size);
+ }
+ }
+
+ return offset;
+}
+
+/* Dumps a section containing the dumped ILT pages.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ bool dump,
+ u32 valid_conn_pf_pages,
+ u32 valid_conn_vf_pages,
+ struct phys_mem_desc *ilt_pages,
+ bool dump_page_ids)
+{
+ struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
+ u32 pf_start_line, start_page_id, offset = 0;
+ u32 cdut_pf_init_pages, cdut_vf_init_pages;
+ u32 cdut_pf_work_pages, cdut_vf_work_pages;
+ u32 base_data_offset, size_param_offset;
+ u32 cdut_pf_pages, cdut_vf_pages;
+ const char *section_name;
+ u8 i;
+
+ section_name = dump_page_ids ? "ilt_page_ids" : "ilt_page_mem";
+ cdut_pf_init_pages = qed_get_cdut_num_pf_init_pages(p_hwfn);
+ cdut_vf_init_pages = qed_get_cdut_num_vf_init_pages(p_hwfn);
+ cdut_pf_work_pages = qed_get_cdut_num_pf_work_pages(p_hwfn);
+ cdut_vf_work_pages = qed_get_cdut_num_vf_work_pages(p_hwfn);
+ cdut_pf_pages = cdut_pf_init_pages + cdut_pf_work_pages;
+ cdut_vf_pages = cdut_vf_init_pages + cdut_vf_work_pages;
+ pf_start_line = p_hwfn->p_cxt_mngr->pf_start_line;
+
+ offset +=
+ qed_dump_section_hdr(dump_buf + offset, dump, section_name, 1);
+
+ /* Dump size parameter (0 for now, overwritten with real size later) */
+ size_param_offset = offset;
+ offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
+ base_data_offset = offset;
+
+ /* CDUC pages are ordered as follows:
+ * - PF pages - valid section (included in PF connection type mapping)
+ * - PF pages - invalid section (not dumped)
+ * - For each VF in the PF:
+ * - VF pages - valid section (included in VF connection type mapping)
+ * - VF pages - invalid section (not dumped)
+ */
+ if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUC)) {
+ /* Dump connection PF pages */
+ start_page_id = clients[ILT_CLI_CDUC].first.val - pf_start_line;
+ offset += qed_ilt_dump_pages_range(dump_buf + offset,
+ dump,
+ start_page_id,
+ valid_conn_pf_pages,
+ ilt_pages, dump_page_ids);
+
+ /* Dump connection VF pages */
+ start_page_id += clients[ILT_CLI_CDUC].pf_total_lines;
+ for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
+ i++, start_page_id += clients[ILT_CLI_CDUC].vf_total_lines)
+ offset += qed_ilt_dump_pages_range(dump_buf + offset,
+ dump,
+ start_page_id,
+ valid_conn_vf_pages,
+ ilt_pages,
+ dump_page_ids);
+ }
+
+ /* CDUT pages are ordered as follows:
+ * - PF init pages (not dumped)
+ * - PF work pages
+ * - For each VF in the PF:
+ * - VF init pages (not dumped)
+ * - VF work pages
+ */
+ if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUT)) {
+ /* Dump task PF pages */
+ start_page_id = clients[ILT_CLI_CDUT].first.val +
+ cdut_pf_init_pages - pf_start_line;
+ offset += qed_ilt_dump_pages_range(dump_buf + offset,
+ dump,
+ start_page_id,
+ cdut_pf_work_pages,
+ ilt_pages, dump_page_ids);
+
+ /* Dump task VF pages */
+ start_page_id = clients[ILT_CLI_CDUT].first.val +
+ cdut_pf_pages + cdut_vf_init_pages - pf_start_line;
+ for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
+ i++, start_page_id += cdut_vf_pages)
+ offset += qed_ilt_dump_pages_range(dump_buf + offset,
+ dump,
+ start_page_id,
+ cdut_vf_work_pages,
+ ilt_pages,
+ dump_page_ids);
+ }
+
+ /* Overwrite size param */
+ if (dump)
+ qed_dump_num_param(dump_buf + size_param_offset,
+ dump, "size", offset - base_data_offset);
+
+ return offset;
+}
+
+/* Performs ILT Dump to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
+ u32 valid_conn_vf_cids, valid_conn_vf_pages, offset = 0;
+ u32 valid_conn_pf_cids, valid_conn_pf_pages, num_pages;
+ u32 num_cids_per_page, conn_ctx_size;
+ u32 cduc_page_size, cdut_page_size;
+ struct phys_mem_desc *ilt_pages;
+ u8 conn_type;
+
+ cduc_page_size = 1 <<
+ (clients[ILT_CLI_CDUC].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
+ cdut_page_size = 1 <<
+ (clients[ILT_CLI_CDUT].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
+ conn_ctx_size = p_hwfn->p_cxt_mngr->conn_ctx_size;
+ num_cids_per_page = (int)(cduc_page_size / conn_ctx_size);
+ ilt_pages = p_hwfn->p_cxt_mngr->ilt_shadow;
+
+ /* Dump global params - 22 must match number of params below */
+ offset += qed_dump_common_global_params(p_hwfn, p_ptt,
+ dump_buf + offset, dump, 22);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "ilt-dump");
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cduc-page-size", cduc_page_size);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cduc-first-page-id",
+ clients[ILT_CLI_CDUC].first.val);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cduc-last-page-id",
+ clients[ILT_CLI_CDUC].last.val);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cduc-num-pf-pages",
+ clients
+ [ILT_CLI_CDUC].pf_total_lines);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cduc-num-vf-pages",
+ clients
+ [ILT_CLI_CDUC].vf_total_lines);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "max-conn-ctx-size",
+ conn_ctx_size);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cdut-page-size", cdut_page_size);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cdut-first-page-id",
+ clients[ILT_CLI_CDUT].first.val);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cdut-last-page-id",
+ clients[ILT_CLI_CDUT].last.val);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cdut-num-pf-init-pages",
+ qed_get_cdut_num_pf_init_pages(p_hwfn));
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cdut-num-vf-init-pages",
+ qed_get_cdut_num_vf_init_pages(p_hwfn));
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cdut-num-pf-work-pages",
+ qed_get_cdut_num_pf_work_pages(p_hwfn));
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cdut-num-vf-work-pages",
+ qed_get_cdut_num_vf_work_pages(p_hwfn));
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "max-task-ctx-size",
+ p_hwfn->p_cxt_mngr->task_ctx_size);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "task-type-id",
+ p_hwfn->p_cxt_mngr->task_type_id);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "first-vf-id-in-pf",
+ p_hwfn->p_cxt_mngr->first_vf_in_pf);
+ offset += /* 18 */ qed_dump_num_param(dump_buf + offset,
+ dump,
+ "num-vfs-in-pf",
+ p_hwfn->p_cxt_mngr->vf_count);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "ptr-size-bytes", sizeof(void *));
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "pf-start-line",
+ p_hwfn->p_cxt_mngr->pf_start_line);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "page-mem-desc-size-dwords",
+ PAGE_MEM_DESC_SIZE_DWORDS);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "ilt-shadow-size",
+ p_hwfn->p_cxt_mngr->ilt_shadow_size);
+ /* Additional/Less parameters require matching of number in call to
+ * dump_common_global_params()
+ */
+
+ /* Dump section containing number of PF CIDs per connection type */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "num_pf_cids_per_conn_type", 1);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "size", NUM_OF_CONNECTION_TYPES_E4);
+ for (conn_type = 0, valid_conn_pf_cids = 0;
+ conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) {
+ u32 num_pf_cids =
+ p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cid_count;
+
+ if (dump)
+ *(dump_buf + offset) = num_pf_cids;
+ valid_conn_pf_cids += num_pf_cids;
+ }
+
+ /* Dump section containing number of VF CIDs per connection type */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "num_vf_cids_per_conn_type", 1);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "size", NUM_OF_CONNECTION_TYPES_E4);
+ for (conn_type = 0, valid_conn_vf_cids = 0;
+ conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) {
+ u32 num_vf_cids =
+ p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cids_per_vf;
+
+ if (dump)
+ *(dump_buf + offset) = num_vf_cids;
+ valid_conn_vf_cids += num_vf_cids;
+ }
+
+ /* Dump section containing physical memory descs for each ILT page */
+ num_pages = p_hwfn->p_cxt_mngr->ilt_shadow_size;
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "ilt_page_desc", 1);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "size",
+ num_pages * PAGE_MEM_DESC_SIZE_DWORDS);
+
+ /* Copy memory descriptors to dump buffer */
+ if (dump) {
+ u32 page_id;
+
+ for (page_id = 0; page_id < num_pages;
+ page_id++, offset += PAGE_MEM_DESC_SIZE_DWORDS)
+ memcpy(dump_buf + offset,
+ &ilt_pages[page_id],
+ DWORDS_TO_BYTES(PAGE_MEM_DESC_SIZE_DWORDS));
+ } else {
+ offset += num_pages * PAGE_MEM_DESC_SIZE_DWORDS;
+ }
+
+ valid_conn_pf_pages = DIV_ROUND_UP(valid_conn_pf_cids,
+ num_cids_per_page);
+ valid_conn_vf_pages = DIV_ROUND_UP(valid_conn_vf_cids,
+ num_cids_per_page);
+
+ /* Dump ILT pages IDs */
+ offset += qed_ilt_dump_pages_section(p_hwfn,
+ dump_buf + offset,
+ dump,
+ valid_conn_pf_pages,
+ valid_conn_vf_pages,
+ ilt_pages, true);
+
+ /* Dump ILT pages memory */
+ offset += qed_ilt_dump_pages_section(p_hwfn,
+ dump_buf + offset,
+ dump,
+ valid_conn_pf_pages,
+ valid_conn_vf_pages,
+ ilt_pages, false);
+
+ /* Dump last section */
+ offset += qed_dump_last_section(dump_buf, offset, dump);
+
+ return offset;
+}
+
/***************************** Public Functions *******************************/
-enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
+enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn,
+ const u8 * const bin_ptr)
{
- struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
+ struct bin_buffer_hdr *buf_hdrs = (struct bin_buffer_hdr *)bin_ptr;
u8 buf_id;
- /* convert binary data to debug arrays */
- for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
- s_dbg_arrays[buf_id].ptr =
- (u32 *)(bin_ptr + buf_array[buf_id].offset);
- s_dbg_arrays[buf_id].size_in_dwords =
- BYTES_TO_DWORDS(buf_array[buf_id].length);
- }
+ /* Convert binary data to debug arrays */
+ for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++)
+ qed_set_dbg_bin_buf(p_hwfn,
+ buf_id,
+ (u32 *)(bin_ptr + buf_hdrs[buf_id].offset),
+ buf_hdrs[buf_id].length);
return DBG_STATUS_OK;
}
@@ -5116,7 +4836,7 @@ bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
struct storm_defs *storm = &s_storm_defs[storm_id];
/* Skip Storm if it's in reset */
- if (dev_data->block_in_reset[storm->block_id])
+ if (dev_data->block_in_reset[storm->sem_block_id])
continue;
/* Read FW info for the current Storm */
@@ -5129,16 +4849,17 @@ bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
}
enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
enum dbg_grc_params grc_param, u32 val)
{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
enum dbg_status status;
int i;
- DP_VERBOSE(p_hwfn, QED_MSG_DEBUG,
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
"dbg_grc_config: paramId = %d, val = %d\n", grc_param, val);
- status = qed_dbg_dev_init(p_hwfn, p_ptt);
+ status = qed_dbg_dev_init(p_hwfn);
if (status != DBG_STATUS_OK)
return status;
@@ -5164,24 +4885,23 @@ enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
/* Update all params with the preset values */
for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) {
+ struct grc_param_defs *defs = &s_grc_param_defs[i];
u32 preset_val;
-
/* Skip persistent params */
- if (s_grc_param_defs[i].is_persistent)
+ if (defs->is_persistent)
continue;
/* Find preset value */
if (grc_param == DBG_GRC_PARAM_EXCLUDE_ALL)
preset_val =
- s_grc_param_defs[i].exclude_all_preset_val;
+ defs->exclude_all_preset_val;
else if (grc_param == DBG_GRC_PARAM_CRASH)
preset_val =
- s_grc_param_defs[i].crash_preset_val;
+ defs->crash_preset_val[dev_data->chip_id];
else
return DBG_STATUS_INVALID_ARGS;
- qed_grc_set_param(p_hwfn,
- (enum dbg_grc_params)i, preset_val);
+ qed_grc_set_param(p_hwfn, i, preset_val);
}
} else {
/* Regular param - set its value */
@@ -5207,18 +4927,18 @@ enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size)
{
- enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn);
*buf_size = 0;
if (status != DBG_STATUS_OK)
return status;
- if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
- !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
- !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
- !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
- !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
+ if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
return DBG_STATUS_DBG_ARRAY_NOT_SET;
return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
@@ -5258,20 +4978,19 @@ enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
u32 *buf_size)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- struct idle_chk_data *idle_chk;
+ struct idle_chk_data *idle_chk = &dev_data->idle_chk;
enum dbg_status status;
- idle_chk = &dev_data->idle_chk;
*buf_size = 0;
- status = qed_dbg_dev_init(p_hwfn, p_ptt);
+ status = qed_dbg_dev_init(p_hwfn);
if (status != DBG_STATUS_OK)
return status;
- if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
- !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
- !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
- !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
+ if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
return DBG_STATUS_DBG_ARRAY_NOT_SET;
if (!idle_chk->buf_size_set) {
@@ -5306,6 +5025,7 @@ enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
return DBG_STATUS_DUMP_BUF_TOO_SMALL;
/* Update reset state */
+ qed_grc_unreset_blocks(p_hwfn, p_ptt, true);
qed_update_blocks_reset_state(p_hwfn, p_ptt);
/* Idle Check Dump */
@@ -5321,7 +5041,7 @@ enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size)
{
- enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn);
*buf_size = 0;
@@ -5368,7 +5088,7 @@ enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size)
{
- enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn);
*buf_size = 0;
@@ -5414,7 +5134,7 @@ enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size)
{
- enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn);
*buf_size = 0;
@@ -5460,7 +5180,7 @@ qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size)
{
- enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn);
*buf_size = 0;
@@ -5510,7 +5230,7 @@ enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size)
{
- enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn);
*buf_size = 0;
@@ -5554,6 +5274,50 @@ enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
return DBG_STATUS_OK;
}
+static enum dbg_status qed_dbg_ilt_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn);
+
+ *buf_size = 0;
+
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ *buf_size = qed_ilt_dump(p_hwfn, p_ptt, NULL, false);
+
+ return DBG_STATUS_OK;
+}
+
+static enum dbg_status qed_dbg_ilt_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ *num_dumped_dwords = 0;
+
+ status = qed_dbg_ilt_get_dump_buf_size(p_hwfn,
+ p_ptt,
+ &needed_buf_size_in_dwords);
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ *num_dumped_dwords = qed_ilt_dump(p_hwfn, p_ptt, dump_buf, true);
+
+ /* Reveret GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
+ return DBG_STATUS_OK;
+}
+
enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum block_id block_id,
@@ -5561,19 +5325,20 @@ enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
bool clear_status,
struct dbg_attn_block_result *results)
{
- enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn);
u8 reg_idx, num_attn_regs, num_result_regs = 0;
const struct dbg_attn_reg *attn_reg_arr;
if (status != DBG_STATUS_OK)
return status;
- if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
- !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
- !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
+ if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
return DBG_STATUS_DBG_ARRAY_NOT_SET;
- attn_reg_arr = qed_get_block_attn_regs(block_id,
+ attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
+ block_id,
attn_type, &num_attn_regs);
for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
@@ -5618,7 +5383,7 @@ enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
results->block_id = (u8)block_id;
results->names_offset =
- qed_get_block_attn_data(block_id, attn_type)->names_offset;
+ qed_get_block_attn_data(p_hwfn, block_id, attn_type)->names_offset;
SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
SET_FIELD(results->data,
DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
@@ -5628,11 +5393,6 @@ enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
/******************************* Data Types **********************************/
-struct block_info {
- const char *name;
- enum block_id id;
-};
-
/* REG fifo element */
struct reg_fifo_element {
u64 data;
@@ -5656,6 +5416,12 @@ struct reg_fifo_element {
#define REG_FIFO_ELEMENT_ERROR_MASK 0x1f
};
+/* REG fifo error element */
+struct reg_fifo_err {
+ u32 err_code;
+ const char *err_msg;
+};
+
/* IGU fifo element */
struct igu_fifo_element {
u32 dword0;
@@ -5755,20 +5521,6 @@ struct igu_fifo_addr_data {
enum igu_fifo_addr_types type;
};
-struct mcp_trace_meta {
- u32 modules_num;
- char **modules;
- u32 formats_num;
- struct mcp_trace_format *formats;
- bool is_allocated;
-};
-
-/* Debug Tools user data */
-struct dbg_tools_user_data {
- struct mcp_trace_meta mcp_trace_meta;
- const u32 *mcp_trace_user_meta_buf;
-};
-
/******************************** Constants **********************************/
#define MAX_MSG_LEN 1024
@@ -5776,7 +5528,7 @@ struct dbg_tools_user_data {
#define MCP_TRACE_MAX_MODULE_LEN 8
#define MCP_TRACE_FORMAT_MAX_PARAMS 3
#define MCP_TRACE_FORMAT_PARAM_WIDTH \
- (MCP_TRACE_FORMAT_P2_SIZE_SHIFT - MCP_TRACE_FORMAT_P1_SIZE_SHIFT)
+ (MCP_TRACE_FORMAT_P2_SIZE_OFFSET - MCP_TRACE_FORMAT_P1_SIZE_OFFSET)
#define REG_FIFO_ELEMENT_ADDR_FACTOR 4
#define REG_FIFO_ELEMENT_IS_PF_VF_VAL 127
@@ -5785,107 +5537,6 @@ struct dbg_tools_user_data {
/***************************** Constant Arrays *******************************/
-struct user_dbg_array {
- const u32 *ptr;
- u32 size_in_dwords;
-};
-
-/* Debug arrays */
-static struct user_dbg_array
-s_user_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
-
-/* Block names array */
-static struct block_info s_block_info_arr[] = {
- {"grc", BLOCK_GRC},
- {"miscs", BLOCK_MISCS},
- {"misc", BLOCK_MISC},
- {"dbu", BLOCK_DBU},
- {"pglue_b", BLOCK_PGLUE_B},
- {"cnig", BLOCK_CNIG},
- {"cpmu", BLOCK_CPMU},
- {"ncsi", BLOCK_NCSI},
- {"opte", BLOCK_OPTE},
- {"bmb", BLOCK_BMB},
- {"pcie", BLOCK_PCIE},
- {"mcp", BLOCK_MCP},
- {"mcp2", BLOCK_MCP2},
- {"pswhst", BLOCK_PSWHST},
- {"pswhst2", BLOCK_PSWHST2},
- {"pswrd", BLOCK_PSWRD},
- {"pswrd2", BLOCK_PSWRD2},
- {"pswwr", BLOCK_PSWWR},
- {"pswwr2", BLOCK_PSWWR2},
- {"pswrq", BLOCK_PSWRQ},
- {"pswrq2", BLOCK_PSWRQ2},
- {"pglcs", BLOCK_PGLCS},
- {"ptu", BLOCK_PTU},
- {"dmae", BLOCK_DMAE},
- {"tcm", BLOCK_TCM},
- {"mcm", BLOCK_MCM},
- {"ucm", BLOCK_UCM},
- {"xcm", BLOCK_XCM},
- {"ycm", BLOCK_YCM},
- {"pcm", BLOCK_PCM},
- {"qm", BLOCK_QM},
- {"tm", BLOCK_TM},
- {"dorq", BLOCK_DORQ},
- {"brb", BLOCK_BRB},
- {"src", BLOCK_SRC},
- {"prs", BLOCK_PRS},
- {"tsdm", BLOCK_TSDM},
- {"msdm", BLOCK_MSDM},
- {"usdm", BLOCK_USDM},
- {"xsdm", BLOCK_XSDM},
- {"ysdm", BLOCK_YSDM},
- {"psdm", BLOCK_PSDM},
- {"tsem", BLOCK_TSEM},
- {"msem", BLOCK_MSEM},
- {"usem", BLOCK_USEM},
- {"xsem", BLOCK_XSEM},
- {"ysem", BLOCK_YSEM},
- {"psem", BLOCK_PSEM},
- {"rss", BLOCK_RSS},
- {"tmld", BLOCK_TMLD},
- {"muld", BLOCK_MULD},
- {"yuld", BLOCK_YULD},
- {"xyld", BLOCK_XYLD},
- {"ptld", BLOCK_PTLD},
- {"ypld", BLOCK_YPLD},
- {"prm", BLOCK_PRM},
- {"pbf_pb1", BLOCK_PBF_PB1},
- {"pbf_pb2", BLOCK_PBF_PB2},
- {"rpb", BLOCK_RPB},
- {"btb", BLOCK_BTB},
- {"pbf", BLOCK_PBF},
- {"rdif", BLOCK_RDIF},
- {"tdif", BLOCK_TDIF},
- {"cdu", BLOCK_CDU},
- {"ccfc", BLOCK_CCFC},
- {"tcfc", BLOCK_TCFC},
- {"igu", BLOCK_IGU},
- {"cau", BLOCK_CAU},
- {"rgfs", BLOCK_RGFS},
- {"rgsrc", BLOCK_RGSRC},
- {"tgfs", BLOCK_TGFS},
- {"tgsrc", BLOCK_TGSRC},
- {"umac", BLOCK_UMAC},
- {"xmac", BLOCK_XMAC},
- {"dbg", BLOCK_DBG},
- {"nig", BLOCK_NIG},
- {"wol", BLOCK_WOL},
- {"bmbn", BLOCK_BMBN},
- {"ipc", BLOCK_IPC},
- {"nwm", BLOCK_NWM},
- {"nws", BLOCK_NWS},
- {"ms", BLOCK_MS},
- {"phy_pcie", BLOCK_PHY_PCIE},
- {"led", BLOCK_LED},
- {"avs_wrap", BLOCK_AVS_WRAP},
- {"pxpreqbus", BLOCK_PXPREQBUS},
- {"misc_aeu", BLOCK_MISC_AEU},
- {"bar0_map", BLOCK_BAR0_MAP}
-};
-
/* Status string array */
static const char * const s_status_str[] = {
/* DBG_STATUS_OK */
@@ -5915,14 +5566,12 @@ static const char * const s_status_str[] = {
/* DBG_STATUS_PCI_BUF_NOT_ALLOCATED */
"A PCI buffer wasn't allocated",
- /* DBG_STATUS_TOO_MANY_INPUTS */
- "Too many inputs were enabled. Enabled less inputs, or set 'unifyInputs' to true",
+ /* DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS */
+ "The filter/trigger constraint dword offsets are not enabled for recording",
- /* DBG_STATUS_INPUT_OVERLAP */
- "Overlapping debug bus inputs",
- /* DBG_STATUS_HW_ONLY_RECORDING */
- "Cannot record Storm data since the entire recording cycle is used by HW",
+ /* DBG_STATUS_VFC_READ_ERROR */
+ "Error reading from VFC",
/* DBG_STATUS_STORM_ALREADY_ENABLED */
"The Storm was already enabled",
@@ -5939,8 +5588,8 @@ static const char * const s_status_str[] = {
/* DBG_STATUS_NO_INPUT_ENABLED */
"No input was enabled for recording",
- /* DBG_STATUS_NO_FILTER_TRIGGER_64B */
- "Filters and triggers are not allowed when recording in 64b units",
+ /* DBG_STATUS_NO_FILTER_TRIGGER_256B */
+ "Filters and triggers are not allowed in E4 256-bit mode",
/* DBG_STATUS_FILTER_ALREADY_ENABLED */
"The filter was already enabled",
@@ -6014,8 +5663,8 @@ static const char * const s_status_str[] = {
/* DBG_STATUS_MCP_COULD_NOT_RESUME */
"Failed to resume MCP after halt",
- /* DBG_STATUS_RESERVED2 */
- "Reserved debug status - shouldn't be returned",
+ /* DBG_STATUS_RESERVED0 */
+ "",
/* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
"Failed to empty SEMI sync FIFO",
@@ -6038,17 +5687,32 @@ static const char * const s_status_str[] = {
/* DBG_STATUS_DBG_ARRAY_NOT_SET */
"Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
- /* DBG_STATUS_FILTER_BUG */
- "Debug Bus filtering requires the -unifyInputs option (due to a HW bug)",
+ /* DBG_STATUS_RESERVED1 */
+ "",
/* DBG_STATUS_NON_MATCHING_LINES */
- "Non-matching debug lines - all lines must be of the same type (either 128b or 256b)",
+ "Non-matching debug lines - in E4, all lines must be of the same type (either 128b or 256b)",
- /* DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET */
- "The selected trigger dword offset wasn't enabled in the recorded HW block",
+ /* DBG_STATUS_INSUFFICIENT_HW_IDS */
+ "Insufficient HW IDs. Try to record less Storms/blocks",
/* DBG_STATUS_DBG_BUS_IN_USE */
- "The debug bus is in use"
+ "The debug bus is in use",
+
+ /* DBG_STATUS_INVALID_STORM_DBG_MODE */
+ "The storm debug mode is not supported in the current chip",
+
+ /* DBG_STATUS_OTHER_ENGINE_BB_ONLY */
+ "Other engine is supported only in BB",
+
+ /* DBG_STATUS_FILTER_SINGLE_HW_ID */
+ "The configured filter mode requires a single Storm/block input",
+
+ /* DBG_STATUS_TRIGGER_SINGLE_HW_ID */
+ "The configured filter mode requires that all the constraints of a single trigger state will be defined on a single Storm/block input",
+
+ /* DBG_STATUS_MISSING_TRIGGER_STATE_STORM */
+ "When triggering on Storm data, the Storm to trigger on must be specified"
};
/* Idle check severity names array */
@@ -6104,7 +5768,7 @@ static const char * const s_master_strs[] = {
"xsdm",
"dbu",
"dmae",
- "???",
+ "jdap",
"???",
"???",
"???",
@@ -6112,12 +5776,13 @@ static const char * const s_master_strs[] = {
};
/* REG FIFO error messages array */
-static const char * const s_reg_fifo_error_strs[] = {
- "grc timeout",
- "address doesn't belong to any block",
- "reserved address in block or write to read-only address",
- "privilege/protection mismatch",
- "path isolation error"
+static struct reg_fifo_err s_reg_fifo_errors[] = {
+ {1, "grc timeout"},
+ {2, "address doesn't belong to any block"},
+ {4, "reserved address in block or write to read-only address"},
+ {8, "privilege/protection mismatch"},
+ {16, "path isolation error"},
+ {17, "RSL error"}
};
/* IGU FIFO sources array */
@@ -6357,8 +6022,21 @@ static u32 qed_print_section_params(u32 *dump_buf,
return dump_offset;
}
-static struct dbg_tools_user_data *
-qed_dbg_get_user_data(struct qed_hwfn *p_hwfn)
+/* Returns the block name that matches the specified block ID,
+ * or NULL if not found.
+ */
+static const char *qed_dbg_get_block_name(struct qed_hwfn *p_hwfn,
+ enum block_id block_id)
+{
+ const struct dbg_block_user *block =
+ (const struct dbg_block_user *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_USER_DATA].ptr + block_id;
+
+ return (const char *)block->name;
+}
+
+static struct dbg_tools_user_data *qed_dbg_get_user_data(struct qed_hwfn
+ *p_hwfn)
{
return (struct dbg_tools_user_data *)p_hwfn->dbg_user_info;
}
@@ -6366,7 +6044,8 @@ qed_dbg_get_user_data(struct qed_hwfn *p_hwfn)
/* Parses the idle check rules and returns the number of characters printed.
* In case of parsing error, returns 0.
*/
-static u32 qed_parse_idle_chk_dump_rules(u32 *dump_buf,
+static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
u32 *dump_buf_end,
u32 num_rules,
bool print_fw_idle_chk,
@@ -6394,19 +6073,18 @@ static u32 qed_parse_idle_chk_dump_rules(u32 *dump_buf,
hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
rule_parsing_data =
- (const struct dbg_idle_chk_rule_parsing_data *)
- &s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].
- ptr[hdr->rule_id];
+ (const struct dbg_idle_chk_rule_parsing_data *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr +
+ hdr->rule_id;
parsing_str_offset =
- GET_FIELD(rule_parsing_data->data,
- DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
+ GET_FIELD(rule_parsing_data->data,
+ DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
has_fw_msg =
- GET_FIELD(rule_parsing_data->data,
- DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
- parsing_str =
- &((const char *)
- s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
- [parsing_str_offset];
+ GET_FIELD(rule_parsing_data->data,
+ DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
+ parsing_str = (const char *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr +
+ parsing_str_offset;
lsi_msg = parsing_str;
curr_reg_id = 0;
@@ -6510,7 +6188,8 @@ static u32 qed_parse_idle_chk_dump_rules(u32 *dump_buf,
* parsed_results_bytes.
* The parsing status is returned.
*/
-static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf,
+static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
u32 num_dumped_dwords,
char *results_buf,
u32 *parsed_results_bytes,
@@ -6528,8 +6207,8 @@ static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf,
*num_errors = 0;
*num_warnings = 0;
- if (!s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
- !s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
+ if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
return DBG_STATUS_DBG_ARRAY_NOT_SET;
/* Read global_params section */
@@ -6562,7 +6241,8 @@ static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf,
results_offset),
"FW_IDLE_CHECK:\n");
rules_print_size =
- qed_parse_idle_chk_dump_rules(dump_buf,
+ qed_parse_idle_chk_dump_rules(p_hwfn,
+ dump_buf,
dump_buf_end,
num_rules,
true,
@@ -6582,7 +6262,8 @@ static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf,
results_offset),
"\nLSI_IDLE_CHECK:\n");
rules_print_size =
- qed_parse_idle_chk_dump_rules(dump_buf,
+ qed_parse_idle_chk_dump_rules(p_hwfn,
+ dump_buf,
dump_buf_end,
num_rules,
false,
@@ -6694,9 +6375,8 @@ qed_mcp_trace_alloc_meta_data(struct qed_hwfn *p_hwfn,
format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes,
&offset);
- format_len =
- (format_ptr->data &
- MCP_TRACE_FORMAT_LEN_MASK) >> MCP_TRACE_FORMAT_LEN_SHIFT;
+ format_len = GET_MFW_FIELD(format_ptr->data,
+ MCP_TRACE_FORMAT_LEN);
format_ptr->format_str = kzalloc(format_len, GFP_KERNEL);
if (!format_ptr->format_str) {
/* Update number of modules to be released */
@@ -6719,7 +6399,7 @@ qed_mcp_trace_alloc_meta_data(struct qed_hwfn *p_hwfn,
* trace_buf - MCP trace cyclic buffer
* trace_buf_size - MCP trace cyclic buffer size in bytes
* data_offset - offset in bytes of the data to parse in the MCP trace cyclic
- * buffer.
+ * buffer.
* data_size - size in bytes of data to parse.
* parsed_buf - destination buffer for parsed data.
* parsed_results_bytes - size of parsed data in bytes.
@@ -6764,9 +6444,8 @@ static enum dbg_status qed_parse_mcp_trace_buf(struct qed_hwfn *p_hwfn,
/* Skip message if its index doesn't exist in the meta data */
if (format_idx >= meta->formats_num) {
- u8 format_size =
- (u8)((header & MFW_TRACE_PRM_SIZE_MASK) >>
- MFW_TRACE_PRM_SIZE_SHIFT);
+ u8 format_size = (u8)GET_MFW_FIELD(header,
+ MFW_TRACE_PRM_SIZE);
if (data_size < format_size)
return DBG_STATUS_MCP_TRACE_BAD_DATA;
@@ -6781,11 +6460,10 @@ static enum dbg_status qed_parse_mcp_trace_buf(struct qed_hwfn *p_hwfn,
format_ptr = &meta->formats[format_idx];
for (i = 0,
- param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK,
- param_shift = MCP_TRACE_FORMAT_P1_SIZE_SHIFT;
+ param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift =
+ MCP_TRACE_FORMAT_P1_SIZE_OFFSET;
i < MCP_TRACE_FORMAT_MAX_PARAMS;
- i++,
- param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
+ i++, param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
/* Extract param size (0..3) */
u8 param_size = (u8)((format_ptr->data & param_mask) >>
@@ -6813,12 +6491,10 @@ static enum dbg_status qed_parse_mcp_trace_buf(struct qed_hwfn *p_hwfn,
data_size -= param_size;
}
- format_level = (u8)((format_ptr->data &
- MCP_TRACE_FORMAT_LEVEL_MASK) >>
- MCP_TRACE_FORMAT_LEVEL_SHIFT);
- format_module = (u8)((format_ptr->data &
- MCP_TRACE_FORMAT_MODULE_MASK) >>
- MCP_TRACE_FORMAT_MODULE_SHIFT);
+ format_level = (u8)GET_MFW_FIELD(format_ptr->data,
+ MCP_TRACE_FORMAT_LEVEL);
+ format_module = (u8)GET_MFW_FIELD(format_ptr->data,
+ MCP_TRACE_FORMAT_MODULE);
if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str))
return DBG_STATUS_MCP_TRACE_BAD_DATA;
@@ -6960,7 +6636,7 @@ static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
const char *section_name, *param_name, *param_str_val;
u32 param_num_val, num_section_params, num_elements;
struct reg_fifo_element *elements;
- u8 i, j, err_val, vf_val;
+ u8 i, j, err_code, vf_val;
u32 results_offset = 0;
char vf_str[4];
@@ -6991,7 +6667,7 @@ static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
/* Decode elements */
for (i = 0; i < num_elements; i++) {
- bool err_printed = false;
+ const char *err_msg = NULL;
/* Discover if element belongs to a VF or a PF */
vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF);
@@ -7000,11 +6676,17 @@ static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
else
sprintf(vf_str, "%d", vf_val);
+ /* Find error message */
+ err_code = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_ERROR);
+ for (j = 0; j < ARRAY_SIZE(s_reg_fifo_errors) && !err_msg; j++)
+ if (err_code == s_reg_fifo_errors[j].err_code)
+ err_msg = s_reg_fifo_errors[j].err_msg;
+
/* Add parsed element to parsed buffer */
results_offset +=
sprintf(qed_get_buf_ptr(results_buf,
results_offset),
- "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
+ "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, error: %s\n",
elements[i].data,
(u32)GET_FIELD(elements[i].data,
REG_FIFO_ELEMENT_ADDRESS) *
@@ -7021,30 +6703,8 @@ static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
s_protection_strs[GET_FIELD(elements[i].data,
REG_FIFO_ELEMENT_PROTECTION)],
s_master_strs[GET_FIELD(elements[i].data,
- REG_FIFO_ELEMENT_MASTER)]);
-
- /* Print errors */
- for (j = 0,
- err_val = GET_FIELD(elements[i].data,
- REG_FIFO_ELEMENT_ERROR);
- j < ARRAY_SIZE(s_reg_fifo_error_strs);
- j++, err_val >>= 1) {
- if (err_val & 0x1) {
- if (err_printed)
- results_offset +=
- sprintf(qed_get_buf_ptr
- (results_buf,
- results_offset), ", ");
- results_offset +=
- sprintf(qed_get_buf_ptr
- (results_buf, results_offset), "%s",
- s_reg_fifo_error_strs[j]);
- err_printed = true;
- }
- }
-
- results_offset +=
- sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
+ REG_FIFO_ELEMENT_MASTER)],
+ err_msg ? err_msg : "unknown error code");
}
results_offset += sprintf(qed_get_buf_ptr(results_buf,
@@ -7398,27 +7058,28 @@ static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf,
/***************************** Public Functions *******************************/
-enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
+enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn,
+ const u8 * const bin_ptr)
{
- struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
+ struct bin_buffer_hdr *buf_hdrs = (struct bin_buffer_hdr *)bin_ptr;
u8 buf_id;
/* Convert binary data to debug arrays */
- for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
- s_user_dbg_arrays[buf_id].ptr =
- (u32 *)(bin_ptr + buf_array[buf_id].offset);
- s_user_dbg_arrays[buf_id].size_in_dwords =
- BYTES_TO_DWORDS(buf_array[buf_id].length);
- }
+ for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++)
+ qed_set_dbg_bin_buf(p_hwfn,
+ (enum bin_dbg_buffer_type)buf_id,
+ (u32 *)(bin_ptr + buf_hdrs[buf_id].offset),
+ buf_hdrs[buf_id].length);
return DBG_STATUS_OK;
}
-enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn)
+enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn,
+ void **user_data_ptr)
{
- p_hwfn->dbg_user_info = kzalloc(sizeof(struct dbg_tools_user_data),
- GFP_KERNEL);
- if (!p_hwfn->dbg_user_info)
+ *user_data_ptr = kzalloc(sizeof(struct dbg_tools_user_data),
+ GFP_KERNEL);
+ if (!(*user_data_ptr))
return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
return DBG_STATUS_OK;
@@ -7437,7 +7098,8 @@ enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
{
u32 num_errors, num_warnings;
- return qed_parse_idle_chk_dump(dump_buf,
+ return qed_parse_idle_chk_dump(p_hwfn,
+ dump_buf,
num_dumped_dwords,
NULL,
results_buf_size,
@@ -7453,7 +7115,8 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
{
u32 parsed_buf_size;
- return qed_parse_idle_chk_dump(dump_buf,
+ return qed_parse_idle_chk_dump(p_hwfn,
+ dump_buf,
num_dumped_dwords,
results_buf,
&parsed_buf_size,
@@ -7624,25 +7287,28 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
struct dbg_attn_block_result *results)
{
- struct user_dbg_array *block_attn, *pstrings;
const u32 *block_attn_name_offsets;
- enum dbg_attn_type attn_type;
+ const char *attn_name_base;
const char *block_name;
+ enum dbg_attn_type attn_type;
u8 num_regs, i, j;
num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
- attn_type = (enum dbg_attn_type)
- GET_FIELD(results->data,
- DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
- block_name = s_block_info_arr[results->block_id].name;
-
- if (!s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr ||
- !s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr ||
- !s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
+ attn_type = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
+ block_name = qed_dbg_get_block_name(p_hwfn, results->block_id);
+ if (!block_name)
+ return DBG_STATUS_INVALID_ARGS;
+
+ if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
return DBG_STATUS_DBG_ARRAY_NOT_SET;
- block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS];
- block_attn_name_offsets = &block_attn->ptr[results->names_offset];
+ block_attn_name_offsets =
+ (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr +
+ results->names_offset;
+
+ attn_name_base = p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr;
/* Go over registers with a non-zero attention status */
for (i = 0; i < num_regs; i++) {
@@ -7653,18 +7319,17 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
reg_result = &results->reg_results[i];
num_reg_attn = GET_FIELD(reg_result->data,
DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
- block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES];
- bit_mapping = &((struct dbg_attn_bit_mapping *)
- block_attn->ptr)[reg_result->block_attn_offset];
-
- pstrings = &s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS];
+ bit_mapping = (struct dbg_attn_bit_mapping *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr +
+ reg_result->block_attn_offset;
/* Go over attention status bits */
- for (j = 0; j < num_reg_attn; j++) {
+ for (j = 0; j < num_reg_attn; j++, bit_idx++) {
u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
DBG_ATTN_BIT_MAPPING_VAL);
const char *attn_name, *attn_type_str, *masked_str;
- u32 attn_name_offset, sts_addr;
+ u32 attn_name_offset;
+ u32 sts_addr;
/* Check if bit mask should be advanced (due to unused
* bits).
@@ -7676,18 +7341,19 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
}
/* Check current bit index */
- if (!(reg_result->sts_val & BIT(bit_idx))) {
- bit_idx++;
+ if (!(reg_result->sts_val & BIT(bit_idx)))
continue;
- }
- /* Find attention name */
+ /* An attention bit with value=1 was found
+ * Find attention name
+ */
attn_name_offset =
block_attn_name_offsets[attn_idx_val];
- attn_name = &((const char *)
- pstrings->ptr)[attn_name_offset];
- attn_type_str = attn_type == ATTN_TYPE_INTERRUPT ?
- "Interrupt" : "Parity";
+ attn_name = attn_name_base + attn_name_offset;
+ attn_type_str =
+ (attn_type ==
+ ATTN_TYPE_INTERRUPT ? "Interrupt" :
+ "Parity");
masked_str = reg_result->mask_val & BIT(bit_idx) ?
" [masked]" : "";
sts_addr = GET_FIELD(reg_result->data,
@@ -7695,15 +7361,15 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn,
"%s (%s) : %s [address 0x%08x, bit %d]%s\n",
block_name, attn_type_str, attn_name,
- sts_addr, bit_idx, masked_str);
-
- bit_idx++;
+ sts_addr * 4, bit_idx, masked_str);
}
}
return DBG_STATUS_OK;
}
+static DEFINE_MUTEX(qed_dbg_lock);
+
/* Wrapper for unifying the idle_chk and mcp_trace api */
static enum dbg_status
qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
@@ -7763,7 +7429,10 @@ static struct {
qed_dbg_fw_asserts_get_dump_buf_size,
qed_dbg_fw_asserts_dump,
qed_print_fw_asserts_results,
- qed_get_fw_asserts_results_buf_size},};
+ qed_get_fw_asserts_results_buf_size}, {
+ "ilt",
+ qed_dbg_ilt_get_dump_buf_size,
+ qed_dbg_ilt_dump, NULL, NULL},};
static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
{
@@ -7846,6 +7515,8 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
return rc;
}
+#define MAX_DBG_FEATURE_SIZE_DWORDS 0x3FFFFFFF
+
/* Generic function for performing the dump of a debug feature. */
static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -7875,6 +7546,17 @@ static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
&buf_size_dwords);
if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
return rc;
+
+ if (buf_size_dwords > MAX_DBG_FEATURE_SIZE_DWORDS) {
+ feature->buf_size = 0;
+ DP_NOTICE(p_hwfn->cdev,
+ "Debug feature [\"%s\"] size (0x%x dwords) exceeds maximum size (0x%x dwords)\n",
+ qed_features_lookup[feature_idx].name,
+ buf_size_dwords, MAX_DBG_FEATURE_SIZE_DWORDS);
+
+ return DBG_STATUS_OK;
+ }
+
feature->buf_size = buf_size_dwords * sizeof(u32);
feature->dump_buf = vmalloc(feature->buf_size);
if (!feature->dump_buf)
@@ -8021,6 +7703,16 @@ int qed_dbg_fw_asserts_size(struct qed_dev *cdev)
return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS);
}
+int qed_dbg_ilt(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_ILT, num_dumped_bytes);
+}
+
+int qed_dbg_ilt_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_ILT);
+}
+
int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
u32 *num_dumped_bytes)
{
@@ -8037,9 +7729,17 @@ int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
* feature buffer.
*/
#define REGDUMP_HEADER_SIZE sizeof(u32)
+#define REGDUMP_HEADER_SIZE_SHIFT 0
+#define REGDUMP_HEADER_SIZE_MASK 0xffffff
#define REGDUMP_HEADER_FEATURE_SHIFT 24
-#define REGDUMP_HEADER_ENGINE_SHIFT 31
+#define REGDUMP_HEADER_FEATURE_MASK 0x3f
#define REGDUMP_HEADER_OMIT_ENGINE_SHIFT 30
+#define REGDUMP_HEADER_OMIT_ENGINE_MASK 0x1
+#define REGDUMP_HEADER_ENGINE_SHIFT 31
+#define REGDUMP_HEADER_ENGINE_MASK 0x1
+#define REGDUMP_MAX_SIZE 0x1000000
+#define ILT_DUMP_MAX_SIZE (1024 * 1024 * 15)
+
enum debug_print_features {
OLD_MODE = 0,
IDLE_CHK = 1,
@@ -8053,17 +7753,27 @@ enum debug_print_features {
NVM_CFG1 = 9,
DEFAULT_CFG = 10,
NVM_META = 11,
+ MDUMP = 12,
+ ILT_DUMP = 13,
};
-static u32 qed_calc_regdump_header(enum debug_print_features feature,
+static u32 qed_calc_regdump_header(struct qed_dev *cdev,
+ enum debug_print_features feature,
int engine, u32 feature_size, u8 omit_engine)
{
- /* Insert the engine, feature and mode inside the header and combine it
- * with feature size.
- */
- return feature_size | (feature << REGDUMP_HEADER_FEATURE_SHIFT) |
- (omit_engine << REGDUMP_HEADER_OMIT_ENGINE_SHIFT) |
- (engine << REGDUMP_HEADER_ENGINE_SHIFT);
+ u32 res = 0;
+
+ SET_FIELD(res, REGDUMP_HEADER_SIZE, feature_size);
+ if (res != feature_size)
+ DP_NOTICE(cdev,
+ "Feature %d is too large (size 0x%x) and will corrupt the dump\n",
+ feature, feature_size);
+
+ SET_FIELD(res, REGDUMP_HEADER_FEATURE, feature);
+ SET_FIELD(res, REGDUMP_HEADER_OMIT_ENGINE, omit_engine);
+ SET_FIELD(res, REGDUMP_HEADER_ENGINE, engine);
+
+ return res;
}
int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
@@ -8079,9 +7789,11 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
grc_params[i] = dev_data->grc.param_val[i];
- if (cdev->num_hwfns == 1)
+ if (!QED_IS_CMT(cdev))
omit_engine = 1;
+ mutex_lock(&qed_dbg_lock);
+
org_engine = qed_get_debug_engine(cdev);
for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
/* Collect idle_chks and grcDump for each hw function */
@@ -8094,7 +7806,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(IDLE_CHK, cur_engine,
+ qed_calc_regdump_header(cdev, IDLE_CHK, cur_engine,
feature_size, omit_engine);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
@@ -8106,7 +7818,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(IDLE_CHK, cur_engine,
+ qed_calc_regdump_header(cdev, IDLE_CHK, cur_engine,
feature_size, omit_engine);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
@@ -8118,7 +7830,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(REG_FIFO, cur_engine,
+ qed_calc_regdump_header(cdev, REG_FIFO, cur_engine,
feature_size, omit_engine);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
@@ -8130,7 +7842,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(IGU_FIFO, cur_engine,
+ qed_calc_regdump_header(cdev, IGU_FIFO, cur_engine,
feature_size, omit_engine);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
@@ -8143,7 +7855,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
&feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(PROTECTION_OVERRIDE,
+ qed_calc_regdump_header(cdev, PROTECTION_OVERRIDE,
cur_engine,
feature_size, omit_engine);
offset += (feature_size + REGDUMP_HEADER_SIZE);
@@ -8158,25 +7870,45 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(FW_ASSERTS, cur_engine,
- feature_size, omit_engine);
+ qed_calc_regdump_header(cdev, FW_ASSERTS,
+ cur_engine, feature_size,
+ omit_engine);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
rc);
}
- for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
- dev_data->grc.param_val[i] = grc_params[i];
+ feature_size = qed_dbg_ilt_size(cdev);
+ if (!cdev->disable_ilt_dump &&
+ feature_size < ILT_DUMP_MAX_SIZE) {
+ rc = qed_dbg_ilt(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(cdev, ILT_DUMP,
+ cur_engine,
+ feature_size,
+ omit_engine);
+ offset += feature_size + REGDUMP_HEADER_SIZE;
+ } else {
+ DP_ERR(cdev, "qed_dbg_ilt failed. rc = %d\n",
+ rc);
+ }
+ }
/* GRC dump - must be last because when mcp stuck it will
* clutter idle_chk, reg_fifo, ...
*/
+ for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
+ dev_data->grc.param_val[i] = grc_params[i];
+
rc = qed_dbg_grc(cdev, (u8 *)buffer + offset +
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(GRC_DUMP, cur_engine,
+ qed_calc_regdump_header(cdev, GRC_DUMP,
+ cur_engine,
feature_size, omit_engine);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
@@ -8185,12 +7917,13 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
}
qed_set_debug_engine(cdev, org_engine);
+
/* mcp_trace */
rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset +
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(MCP_TRACE, cur_engine,
+ qed_calc_regdump_header(cdev, MCP_TRACE, cur_engine,
feature_size, omit_engine);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
@@ -8199,11 +7932,12 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
/* nvm cfg1 */
rc = qed_dbg_nvm_image(cdev,
- (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
- &feature_size, QED_NVM_IMAGE_NVM_CFG1);
+ (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size,
+ QED_NVM_IMAGE_NVM_CFG1);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(NVM_CFG1, cur_engine,
+ qed_calc_regdump_header(cdev, NVM_CFG1, cur_engine,
feature_size, omit_engine);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else if (rc != -ENOENT) {
@@ -8218,7 +7952,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
&feature_size, QED_NVM_IMAGE_DEFAULT_CFG);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(DEFAULT_CFG, cur_engine,
+ qed_calc_regdump_header(cdev, DEFAULT_CFG, cur_engine,
feature_size, omit_engine);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else if (rc != -ENOENT) {
@@ -8234,8 +7968,8 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
&feature_size, QED_NVM_IMAGE_NVM_META);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(NVM_META, cur_engine,
- feature_size, omit_engine);
+ qed_calc_regdump_header(cdev, NVM_META, cur_engine,
+ feature_size, omit_engine);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else if (rc != -ENOENT) {
DP_ERR(cdev,
@@ -8243,6 +7977,23 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META", rc);
}
+ /* nvm mdump */
+ rc = qed_dbg_nvm_image(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size,
+ QED_NVM_IMAGE_MDUMP);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(cdev, MDUMP, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else if (rc != -ENOENT) {
+ DP_ERR(cdev,
+ "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
+ QED_NVM_IMAGE_MDUMP, "QED_NVM_IMAGE_MDUMP", rc);
+ }
+
+ mutex_unlock(&qed_dbg_lock);
+
return 0;
}
@@ -8250,9 +8001,10 @@ int qed_dbg_all_data_size(struct qed_dev *cdev)
{
struct qed_hwfn *p_hwfn =
&cdev->hwfns[cdev->dbg_params.engine_for_debug];
- u32 regs_len = 0, image_len = 0;
+ u32 regs_len = 0, image_len = 0, ilt_len = 0, total_ilt_len = 0;
u8 cur_engine, org_engine;
+ cdev->disable_ilt_dump = false;
org_engine = qed_get_debug_engine(cdev);
for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
/* Engine specific */
@@ -8267,6 +8019,12 @@ int qed_dbg_all_data_size(struct qed_dev *cdev)
REGDUMP_HEADER_SIZE +
qed_dbg_protection_override_size(cdev) +
REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
+
+ ilt_len = REGDUMP_HEADER_SIZE + qed_dbg_ilt_size(cdev);
+ if (ilt_len < ILT_DUMP_MAX_SIZE) {
+ total_ilt_len += ilt_len;
+ regs_len += ilt_len;
+ }
}
qed_set_debug_engine(cdev, org_engine);
@@ -8282,6 +8040,17 @@ int qed_dbg_all_data_size(struct qed_dev *cdev)
qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_META, &image_len);
if (image_len)
regs_len += REGDUMP_HEADER_SIZE + image_len;
+ qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_MDUMP, &image_len);
+ if (image_len)
+ regs_len += REGDUMP_HEADER_SIZE + image_len;
+
+ if (regs_len > REGDUMP_MAX_SIZE) {
+ DP_VERBOSE(cdev, QED_MSG_DEBUG,
+ "Dump exceeds max size 0x%x, disable ILT dump\n",
+ REGDUMP_MAX_SIZE);
+ cdev->disable_ilt_dump = true;
+ regs_len -= total_ilt_len;
+ }
return regs_len;
}
@@ -8327,9 +8096,8 @@ int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
{
struct qed_hwfn *p_hwfn =
&cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature];
struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
- struct qed_dbg_feature *qed_feature =
- &cdev->dbg_params.features[feature];
u32 buf_size_dwords;
enum dbg_status rc;
@@ -8341,6 +8109,10 @@ int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
if (rc != DBG_STATUS_OK)
buf_size_dwords = 0;
+ /* Feature will not be dumped if it exceeds maximum size */
+ if (buf_size_dwords > MAX_DBG_FEATURE_SIZE_DWORDS)
+ buf_size_dwords = 0;
+
qed_ptt_release(p_hwfn, p_ptt);
qed_feature->buf_size = buf_size_dwords * sizeof(u32);
return qed_feature->buf_size;
@@ -8360,14 +8132,21 @@ void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
void qed_dbg_pf_init(struct qed_dev *cdev)
{
- const u8 *dbg_values;
+ const u8 *dbg_values = NULL;
+ int i;
/* Debug values are after init values.
* The offset is the first dword of the file.
*/
dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data;
- qed_dbg_set_bin_ptr((u8 *)dbg_values);
- qed_dbg_user_set_bin_ptr((u8 *)dbg_values);
+
+ for_each_hwfn(cdev, i) {
+ qed_dbg_set_bin_ptr(&cdev->hwfns[i], dbg_values);
+ qed_dbg_user_set_bin_ptr(&cdev->hwfns[i], dbg_values);
+ }
+
+ /* Set the hwfn to be 0 as default */
+ cdev->dbg_params.engine_for_debug = 0;
}
void qed_dbg_pf_exit(struct qed_dev *cdev)
@@ -8375,11 +8154,11 @@ void qed_dbg_pf_exit(struct qed_dev *cdev)
struct qed_dbg_feature *feature = NULL;
enum qed_dbg_features feature_idx;
- /* Debug features' buffers may be allocated if debug feature was used
- * but dump wasn't called.
+ /* debug features' buffers may be allocated if debug feature was used
+ * but dump wasn't called
*/
for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) {
- feature = &cdev->dbg_params.features[feature_idx];
+ feature = &cdev->dbg_features[feature_idx];
if (feature->dump_buf) {
vfree(feature->dump_buf);
feature->dump_buf = NULL;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.h b/drivers/net/ethernet/qlogic/qed/qed_debug.h
index e47e0e8d75b0..edf99d296bd1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.h
@@ -14,11 +14,13 @@ enum qed_dbg_features {
DBG_FEATURE_IGU_FIFO,
DBG_FEATURE_PROTECTION_OVERRIDE,
DBG_FEATURE_FW_ASSERTS,
+ DBG_FEATURE_ILT,
DBG_FEATURE_NUM
};
/* Forward Declaration */
struct qed_dev;
+struct qed_hwfn;
int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes);
int qed_dbg_grc_size(struct qed_dev *cdev);
@@ -37,6 +39,8 @@ int qed_dbg_protection_override_size(struct qed_dev *cdev);
int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
u32 *num_dumped_bytes);
int qed_dbg_fw_asserts_size(struct qed_dev *cdev);
+int qed_dbg_ilt(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes);
+int qed_dbg_ilt_size(struct qed_dev *cdev);
int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
u32 *num_dumped_bytes);
int qed_dbg_mcp_trace_size(struct qed_dev *cdev);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index a1ebc2b1ca0b..03bdd2e26329 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -907,7 +907,7 @@ qed_llh_access_filter(struct qed_hwfn *p_hwfn,
/* Filter value */
addr = NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * filter_idx * 0x4;
- params.flags = QED_DMAE_FLAG_PF_DST;
+ SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_PF_VALID, 0x1);
params.dst_pfid = pfid;
rc = qed_dmae_host2grc(p_hwfn,
p_ptt,
@@ -1412,6 +1412,7 @@ void qed_resc_free(struct qed_dev *cdev)
qed_dmae_info_free(p_hwfn);
qed_dcbx_info_free(p_hwfn);
qed_dbg_user_data_free(p_hwfn);
+ qed_fw_overlay_mem_free(p_hwfn, p_hwfn->fw_overlay_mem);
/* Destroy doorbell recovery mechanism */
qed_db_recovery_teardown(p_hwfn);
@@ -1571,7 +1572,7 @@ static void qed_init_qm_vport_params(struct qed_hwfn *p_hwfn)
/* all vports participate in weighted fair queueing */
for (i = 0; i < qed_init_qm_get_num_vports(p_hwfn); i++)
- qm_info->qm_vport_params[i].vport_wfq = 1;
+ qm_info->qm_vport_params[i].wfq = 1;
}
/* initialize qm port params */
@@ -1579,6 +1580,7 @@ static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn)
{
/* Initialize qm port parameters */
u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engine;
+ struct qed_dev *cdev = p_hwfn->cdev;
/* indicate how ooo and high pri traffic is dealt with */
active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
@@ -1588,11 +1590,13 @@ static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn)
for (i = 0; i < num_ports; i++) {
struct init_qm_port_params *p_qm_port =
&p_hwfn->qm_info.qm_port_params[i];
+ u16 pbf_max_cmd_lines;
p_qm_port->active = 1;
p_qm_port->active_phys_tcs = active_phys_tcs;
- p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
- p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
+ pbf_max_cmd_lines = (u16)NUM_OF_PBF_CMD_LINES(cdev);
+ p_qm_port->num_pbf_cmd_lines = pbf_max_cmd_lines / num_ports;
+ p_qm_port->num_btb_blocks = NUM_OF_BTB_BLOCKS(cdev) / num_ports;
}
}
@@ -2034,9 +2038,8 @@ static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn)
vport = &(qm_info->qm_vport_params[i]);
DP_VERBOSE(p_hwfn,
NETIF_MSG_HW,
- "vport idx %d, vport_rl %d, wfq %d, first_tx_pq_id [ ",
- qm_info->start_vport + i,
- vport->vport_rl, vport->vport_wfq);
+ "vport idx %d, wfq %d, first_tx_pq_id [ ",
+ qm_info->start_vport + i, vport->wfq);
for (tc = 0; tc < NUM_OF_TCS; tc++)
DP_VERBOSE(p_hwfn,
NETIF_MSG_HW,
@@ -2049,11 +2052,11 @@ static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn)
pq = &(qm_info->qm_pq_params[i]);
DP_VERBOSE(p_hwfn,
NETIF_MSG_HW,
- "pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n",
+ "pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d rl_id %d\n",
qm_info->start_pq + i,
pq->port_id,
pq->vport_id,
- pq->tc_id, pq->wrr_group, pq->rl_valid);
+ pq->tc_id, pq->wrr_group, pq->rl_valid, pq->rl_id);
}
}
@@ -2103,9 +2106,6 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
if (!b_rc)
return -EINVAL;
- /* clear the QM_PF runtime phase leftovers from previous init */
- qed_init_clear_rt_data(p_hwfn);
-
/* prepare QM portion of runtime array */
qed_qm_init_pf(p_hwfn, p_ptt, false);
@@ -2346,7 +2346,7 @@ int qed_resc_alloc(struct qed_dev *cdev)
if (rc)
goto alloc_err;
- rc = qed_dbg_alloc_user_data(p_hwfn);
+ rc = qed_dbg_alloc_user_data(p_hwfn, &p_hwfn->dbg_user_info);
if (rc)
goto alloc_err;
}
@@ -2623,7 +2623,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
params.pf_rl_en = qm_info->pf_rl_en;
params.pf_wfq_en = qm_info->pf_wfq_en;
- params.vport_rl_en = qm_info->vport_rl_en;
+ params.global_rl_en = qm_info->vport_rl_en;
params.vport_wfq_en = qm_info->vport_wfq_en;
params.port_params = qm_info->qm_port_params;
@@ -2891,6 +2891,8 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
+ qed_fw_overlay_init_ram(p_hwfn, p_ptt, p_hwfn->fw_overlay_mem);
+
/* Pure runtime initializations - directly to the HW */
qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
@@ -3000,8 +3002,10 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
u32 load_code, resp, param, drv_mb_param;
bool b_default_mtu = true;
struct qed_hwfn *p_hwfn;
- int rc = 0, i;
+ const u32 *fw_overlays;
+ u32 fw_overlays_len;
u16 ether_type;
+ int rc = 0, i;
if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
@@ -3102,6 +3106,18 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
*/
qed_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
+ fw_overlays = cdev->fw_data->fw_overlays;
+ fw_overlays_len = cdev->fw_data->fw_overlays_len;
+ p_hwfn->fw_overlay_mem =
+ qed_fw_overlay_mem_alloc(p_hwfn, fw_overlays,
+ fw_overlays_len);
+ if (!p_hwfn->fw_overlay_mem) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate fw overlay memory\n");
+ rc = -ENOMEM;
+ goto load_err;
+ }
+
switch (load_code) {
case FW_MSG_CODE_DRV_LOAD_ENGINE:
rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
@@ -3566,8 +3582,10 @@ const char *qed_hw_get_resc_name(enum qed_resources res_id)
return "RDMA_CNQ_RAM";
case QED_ILT:
return "ILT";
- case QED_LL2_QUEUE:
- return "LL2_QUEUE";
+ case QED_LL2_RAM_QUEUE:
+ return "LL2_RAM_QUEUE";
+ case QED_LL2_CTX_QUEUE:
+ return "LL2_CTX_QUEUE";
case QED_CMDQS_CQS:
return "CMDQS_CQS";
case QED_RDMA_STATS_QUEUE:
@@ -3606,18 +3624,46 @@ __qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn,
return 0;
}
+static u32 qed_hsi_def_val[][MAX_CHIP_IDS] = {
+ {MAX_NUM_VFS_BB, MAX_NUM_VFS_K2},
+ {MAX_NUM_L2_QUEUES_BB, MAX_NUM_L2_QUEUES_K2},
+ {MAX_NUM_PORTS_BB, MAX_NUM_PORTS_K2},
+ {MAX_SB_PER_PATH_BB, MAX_SB_PER_PATH_K2,},
+ {MAX_NUM_PFS_BB, MAX_NUM_PFS_K2},
+ {MAX_NUM_VPORTS_BB, MAX_NUM_VPORTS_K2},
+ {ETH_RSS_ENGINE_NUM_BB, ETH_RSS_ENGINE_NUM_K2},
+ {MAX_QM_TX_QUEUES_BB, MAX_QM_TX_QUEUES_K2},
+ {PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2},
+ {RDMA_NUM_STATISTIC_COUNTERS_BB, RDMA_NUM_STATISTIC_COUNTERS_K2},
+ {MAX_QM_GLOBAL_RLS, MAX_QM_GLOBAL_RLS},
+ {PBF_MAX_CMD_LINES, PBF_MAX_CMD_LINES},
+ {BTB_MAX_BLOCKS_BB, BTB_MAX_BLOCKS_K2},
+};
+
+u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type)
+{
+ enum chip_ids chip_id = QED_IS_BB(cdev) ? CHIP_BB : CHIP_K2;
+
+ if (type >= QED_NUM_HSI_DEFS) {
+ DP_ERR(cdev, "Unexpected HSI definition type [%d]\n", type);
+ return 0;
+ }
+
+ return qed_hsi_def_val[type][chip_id];
+}
static int
qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- bool b_ah = QED_IS_AH(p_hwfn->cdev);
u32 resc_max_val, mcp_resp;
u8 res_id;
int rc;
-
for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
switch (res_id) {
- case QED_LL2_QUEUE:
- resc_max_val = MAX_NUM_LL2_RX_QUEUES;
+ case QED_LL2_RAM_QUEUE:
+ resc_max_val = MAX_NUM_LL2_RX_RAM_QUEUES;
+ break;
+ case QED_LL2_CTX_QUEUE:
+ resc_max_val = MAX_NUM_LL2_RX_CTX_QUEUES;
break;
case QED_RDMA_CNQ_RAM:
/* No need for a case for QED_CMDQS_CQS since
@@ -3626,8 +3672,8 @@ qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
resc_max_val = NUM_OF_GLOBAL_QUEUES;
break;
case QED_RDMA_STATS_QUEUE:
- resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2
- : RDMA_NUM_STATISTIC_COUNTERS_BB;
+ resc_max_val =
+ NUM_OF_RDMA_STATISTIC_COUNTERS(p_hwfn->cdev);
break;
case QED_BDQ:
resc_max_val = BDQ_NUM_RESOURCES;
@@ -3660,28 +3706,24 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
u32 *p_resc_num, u32 *p_resc_start)
{
u8 num_funcs = p_hwfn->num_funcs_on_engine;
- bool b_ah = QED_IS_AH(p_hwfn->cdev);
+ struct qed_dev *cdev = p_hwfn->cdev;
switch (res_id) {
case QED_L2_QUEUE:
- *p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
- MAX_NUM_L2_QUEUES_BB) / num_funcs;
+ *p_resc_num = NUM_OF_L2_QUEUES(cdev) / num_funcs;
break;
case QED_VPORT:
- *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
- MAX_NUM_VPORTS_BB) / num_funcs;
+ *p_resc_num = NUM_OF_VPORTS(cdev) / num_funcs;
break;
case QED_RSS_ENG:
- *p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
- ETH_RSS_ENGINE_NUM_BB) / num_funcs;
+ *p_resc_num = NUM_OF_RSS_ENGINES(cdev) / num_funcs;
break;
case QED_PQ:
- *p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 :
- MAX_QM_TX_QUEUES_BB) / num_funcs;
+ *p_resc_num = NUM_OF_QM_TX_QUEUES(cdev) / num_funcs;
*p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */
break;
case QED_RL:
- *p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
+ *p_resc_num = NUM_OF_QM_GLOBAL_RLS(cdev) / num_funcs;
break;
case QED_MAC:
case QED_VLAN:
@@ -3689,11 +3731,13 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
*p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
break;
case QED_ILT:
- *p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
- PXP_NUM_ILT_RECORDS_BB) / num_funcs;
+ *p_resc_num = NUM_OF_PXP_ILT_RECORDS(cdev) / num_funcs;
+ break;
+ case QED_LL2_RAM_QUEUE:
+ *p_resc_num = MAX_NUM_LL2_RX_RAM_QUEUES / num_funcs;
break;
- case QED_LL2_QUEUE:
- *p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
+ case QED_LL2_CTX_QUEUE:
+ *p_resc_num = MAX_NUM_LL2_RX_CTX_QUEUES / num_funcs;
break;
case QED_RDMA_CNQ_RAM:
case QED_CMDQS_CQS:
@@ -3701,8 +3745,7 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
*p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs;
break;
case QED_RDMA_STATS_QUEUE:
- *p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 :
- RDMA_NUM_STATISTIC_COUNTERS_BB) / num_funcs;
+ *p_resc_num = NUM_OF_RDMA_STATISTIC_COUNTERS(cdev) / num_funcs;
break;
case QED_BDQ:
if (p_hwfn->hw_info.personality != QED_PCI_ISCSI &&
@@ -5087,11 +5130,11 @@ static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
- vport_params[i].vport_wfq = (wfq_speed * QED_WFQ_UNIT) /
+ vport_params[i].wfq = (wfq_speed * QED_WFQ_UNIT) /
min_pf_rate;
qed_init_vport_wfq(p_hwfn, p_ptt,
vport_params[i].first_tx_pq_id,
- vport_params[i].vport_wfq);
+ vport_params[i].wfq);
}
}
@@ -5102,7 +5145,7 @@ static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn,
int i;
for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
- p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
+ p_hwfn->qm_info.qm_vport_params[i].wfq = 1;
}
static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
@@ -5118,7 +5161,7 @@ static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
qed_init_wfq_default_param(p_hwfn, min_pf_rate);
qed_init_vport_wfq(p_hwfn, p_ptt,
vport_params[i].first_tx_pq_id,
- vport_params[i].vport_wfq);
+ vport_params[i].wfq);
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index 47376d4d071f..eb4808b3bf67 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -230,30 +230,6 @@ enum qed_dmae_address_type_t {
QED_DMAE_ADDRESS_GRC
};
-/* value of flags If QED_DMAE_FLAG_RW_REPL_SRC flag is set and the
- * source is a block of length DMAE_MAX_RW_SIZE and the
- * destination is larger, the source block will be duplicated as
- * many times as required to fill the destination block. This is
- * used mostly to write a zeroed buffer to destination address
- * using DMA
- */
-#define QED_DMAE_FLAG_RW_REPL_SRC 0x00000001
-#define QED_DMAE_FLAG_VF_SRC 0x00000002
-#define QED_DMAE_FLAG_VF_DST 0x00000004
-#define QED_DMAE_FLAG_COMPLETION_DST 0x00000008
-#define QED_DMAE_FLAG_PORT 0x00000010
-#define QED_DMAE_FLAG_PF_SRC 0x00000020
-#define QED_DMAE_FLAG_PF_DST 0x00000040
-
-struct qed_dmae_params {
- u32 flags; /* consists of QED_DMAE_FLAG_* values */
- u8 src_vfid;
- u8 dst_vfid;
- u8 port_id;
- u8 src_pfid;
- u8 dst_pfid;
-};
-
/**
* @brief qed_dmae_host2grc - copy data from source addr to
* dmae registers using the given ptt
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
index de31a382f58e..4c7fa391fd33 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
@@ -167,6 +167,8 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
goto err;
}
p_cxt = cxt_info.p_cxt;
+ memset(p_cxt, 0, sizeof(*p_cxt));
+
SET_FIELD(p_cxt->tstorm_ag_context.flags3,
E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index cf3ceb62e397..4597015b8bff 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -98,6 +98,7 @@ enum core_event_opcode {
CORE_EVENT_RX_QUEUE_STOP,
CORE_EVENT_RX_QUEUE_FLUSH,
CORE_EVENT_TX_QUEUE_UPDATE,
+ CORE_EVENT_QUEUE_STATS_QUERY,
MAX_CORE_EVENT_OPCODE
};
@@ -116,7 +117,7 @@ struct core_ll2_port_stats {
struct regpair gsi_crcchksm_error;
};
-/* Ethernet TX Per Queue Stats */
+/* LL2 TX Per Queue Stats */
struct core_ll2_pstorm_per_queue_stat {
struct regpair sent_ucast_bytes;
struct regpair sent_mcast_bytes;
@@ -124,13 +125,13 @@ struct core_ll2_pstorm_per_queue_stat {
struct regpair sent_ucast_pkts;
struct regpair sent_mcast_pkts;
struct regpair sent_bcast_pkts;
+ struct regpair error_drop_pkts;
};
/* Light-L2 RX Producers in Tstorm RAM */
struct core_ll2_rx_prod {
__le16 bd_prod;
__le16 cqe_prod;
- __le32 reserved;
};
struct core_ll2_tstorm_per_queue_stat {
@@ -147,6 +148,18 @@ struct core_ll2_ustorm_per_queue_stat {
struct regpair rcv_bcast_pkts;
};
+/* Structure for doorbell data, in PWM mode, for RX producers update. */
+struct core_pwm_prod_update_data {
+ __le16 icid; /* internal CID */
+ u8 reserved0;
+ u8 params;
+#define CORE_PWM_PROD_UPDATE_DATA_AGG_CMD_MASK 0x3
+#define CORE_PWM_PROD_UPDATE_DATA_AGG_CMD_SHIFT 0
+#define CORE_PWM_PROD_UPDATE_DATA_RESERVED1_MASK 0x3F /* Set 0 */
+#define CORE_PWM_PROD_UPDATE_DATA_RESERVED1_SHIFT 2
+ struct core_ll2_rx_prod prod; /* Producers */
+};
+
/* Core Ramrod Command IDs (light L2) */
enum core_ramrod_cmd_id {
CORE_RAMROD_UNUSED,
@@ -156,6 +169,7 @@ enum core_ramrod_cmd_id {
CORE_RAMROD_TX_QUEUE_STOP,
CORE_RAMROD_RX_QUEUE_FLUSH,
CORE_RAMROD_TX_QUEUE_UPDATE,
+ CORE_RAMROD_QUEUE_STATS_QUERY,
MAX_CORE_RAMROD_CMD_ID
};
@@ -236,7 +250,8 @@ struct core_rx_gsi_offload_cqe {
__le16 src_mac_addrlo;
__le16 qp_id;
__le32 src_qp;
- __le32 reserved[3];
+ struct core_rx_cqe_opaque_data opaque_data;
+ __le32 reserved;
};
/* Core RX CQE for Light L2 */
@@ -274,8 +289,11 @@ struct core_rx_start_ramrod_data {
u8 mf_si_mcast_accept_all;
struct core_rx_action_on_error action_on_error;
u8 gsi_offload_flag;
+ u8 vport_id_valid;
+ u8 vport_id;
+ u8 zero_prod_flg;
u8 wipe_inner_vlan_pri_en;
- u8 reserved[5];
+ u8 reserved[2];
};
/* Ramrod data for rx queue stop ramrod */
@@ -352,8 +370,11 @@ struct core_tx_start_ramrod_data {
__le16 pbl_size;
__le16 qm_pq_id;
u8 gsi_offload_flag;
+ u8 ctx_stats_en;
+ u8 vport_id_valid;
u8 vport_id;
- u8 resrved[2];
+ u8 enforce_security_flag;
+ u8 reserved[7];
};
/* Ramrod data for tx queue stop ramrod */
@@ -385,7 +406,7 @@ struct ystorm_core_conn_st_ctx {
/* The core storm context for the Pstorm */
struct pstorm_core_conn_st_ctx {
- __le32 reserved[4];
+ __le32 reserved[20];
};
/* Core Slowpath Connection storm context of Xstorm */
@@ -761,7 +782,7 @@ struct e4_tstorm_core_conn_ag_ctx {
__le16 word1;
__le16 word2;
__le16 word3;
- __le32 reg9;
+ __le32 ll2_rx_prod;
__le32 reg10;
};
@@ -836,11 +857,16 @@ struct e4_ustorm_core_conn_ag_ctx {
/* The core storm context for the Mstorm */
struct mstorm_core_conn_st_ctx {
- __le32 reserved[24];
+ __le32 reserved[40];
};
/* The core storm context for the Ustorm */
struct ustorm_core_conn_st_ctx {
+ __le32 reserved[20];
+};
+
+/* The core storm context for the Tstorm */
+struct tstorm_core_conn_st_ctx {
__le32 reserved[4];
};
@@ -857,6 +883,8 @@ struct e4_core_conn_context {
struct mstorm_core_conn_st_ctx mstorm_st_context;
struct ustorm_core_conn_st_ctx ustorm_st_context;
struct regpair ustorm_st_padding[2];
+ struct tstorm_core_conn_st_ctx tstorm_st_context;
+ struct regpair tstorm_st_padding[2];
};
struct eth_mstorm_per_pf_stat {
@@ -888,12 +916,21 @@ struct eth_pstorm_per_pf_stat {
struct regpair sent_gre_bytes;
struct regpair sent_vxlan_bytes;
struct regpair sent_geneve_bytes;
+ struct regpair sent_mpls_bytes;
+ struct regpair sent_gre_mpls_bytes;
+ struct regpair sent_udp_mpls_bytes;
struct regpair sent_gre_pkts;
struct regpair sent_vxlan_pkts;
struct regpair sent_geneve_pkts;
+ struct regpair sent_mpls_pkts;
+ struct regpair sent_gre_mpls_pkts;
+ struct regpair sent_udp_mpls_pkts;
struct regpair gre_drop_pkts;
struct regpair vxlan_drop_pkts;
struct regpair geneve_drop_pkts;
+ struct regpair mpls_drop_pkts;
+ struct regpair gre_mpls_drop_pkts;
+ struct regpair udp_mpls_drop_pkts;
};
/* Ethernet TX Per Queue Stats */
@@ -983,7 +1020,8 @@ union event_ring_data {
struct event_ring_entry {
u8 protocol_id;
u8 opcode;
- __le16 reserved0;
+ u8 reserved0;
+ u8 vf_id;
__le16 echo;
u8 fw_return_code;
u8 flags;
@@ -1061,7 +1099,20 @@ enum malicious_vf_error_id {
ETH_CONTROL_PACKET_VIOLATION,
ETH_ANTI_SPOOFING_ERR,
ETH_PACKET_SIZE_TOO_LARGE,
- MAX_MALICIOUS_VF_ERROR_ID
+ CORE_ILLEGAL_VLAN_MODE,
+ CORE_ILLEGAL_NBDS,
+ CORE_FIRST_BD_WO_SOP,
+ CORE_INSUFFICIENT_BDS,
+ CORE_PACKET_TOO_SMALL,
+ CORE_ILLEGAL_INBAND_TAGS,
+ CORE_VLAN_INSERT_AND_INBAND_VLAN,
+ CORE_MTU_VIOLATION,
+ CORE_CONTROL_PACKET_VIOLATION,
+ CORE_ANTI_SPOOFING_ERR,
+ CORE_PACKET_SIZE_TOO_LARGE,
+ CORE_ILLEGAL_BD_FLAGS,
+ CORE_GSI_PACKET_VIOLATION,
+ MAX_MALICIOUS_VF_ERROR_ID,
};
/* Mstorm non-triggering VF zone */
@@ -1367,6 +1418,16 @@ enum vf_zone_size_mode {
MAX_VF_ZONE_SIZE_MODE
};
+/* Xstorm non-triggering VF zone */
+struct xstorm_non_trigger_vf_zone {
+ struct regpair non_edpm_ack_pkts;
+};
+
+/* Tstorm VF zone */
+struct xstorm_vf_zone {
+ struct xstorm_non_trigger_vf_zone non_trigger;
+};
+
/* Attentions status block */
struct atten_status_block {
__le32 atten_bits;
@@ -1435,7 +1496,11 @@ struct dmae_cmd {
__le16 crc16;
__le16 crc16_c;
__le16 crc10;
- __le16 reserved;
+ __le16 error_bit_reserved;
+#define DMAE_CMD_ERROR_BIT_MASK 0x1
+#define DMAE_CMD_ERROR_BIT_SHIFT 0
+#define DMAE_CMD_RESERVED_MASK 0x7FFF
+#define DMAE_CMD_RESERVED_SHIFT 1
__le16 xsum16;
__le16 xsum8;
};
@@ -1566,6 +1631,41 @@ struct e4_ystorm_core_conn_ag_ctx {
__le32 reg3;
};
+/* DMAE parameters */
+struct qed_dmae_params {
+ u32 flags;
+/* If QED_DMAE_PARAMS_RW_REPL_SRC flag is set and the
+ * source is a block of length DMAE_MAX_RW_SIZE and the
+ * destination is larger, the source block will be duplicated as
+ * many times as required to fill the destination block. This is
+ * used mostly to write a zeroed buffer to destination address
+ * using DMA
+ */
+#define QED_DMAE_PARAMS_RW_REPL_SRC_MASK 0x1
+#define QED_DMAE_PARAMS_RW_REPL_SRC_SHIFT 0
+#define QED_DMAE_PARAMS_SRC_VF_VALID_MASK 0x1
+#define QED_DMAE_PARAMS_SRC_VF_VALID_SHIFT 1
+#define QED_DMAE_PARAMS_DST_VF_VALID_MASK 0x1
+#define QED_DMAE_PARAMS_DST_VF_VALID_SHIFT 2
+#define QED_DMAE_PARAMS_COMPLETION_DST_MASK 0x1
+#define QED_DMAE_PARAMS_COMPLETION_DST_SHIFT 3
+#define QED_DMAE_PARAMS_PORT_VALID_MASK 0x1
+#define QED_DMAE_PARAMS_PORT_VALID_SHIFT 4
+#define QED_DMAE_PARAMS_SRC_PF_VALID_MASK 0x1
+#define QED_DMAE_PARAMS_SRC_PF_VALID_SHIFT 5
+#define QED_DMAE_PARAMS_DST_PF_VALID_MASK 0x1
+#define QED_DMAE_PARAMS_DST_PF_VALID_SHIFT 6
+#define QED_DMAE_PARAMS_RESERVED_MASK 0x1FFFFFF
+#define QED_DMAE_PARAMS_RESERVED_SHIFT 7
+ u8 src_vfid;
+ u8 dst_vfid;
+ u8 port_id;
+ u8 src_pfid;
+ u8 dst_pfid;
+ u8 reserved1;
+ __le16 reserved2;
+};
+
/* IGU cleanup command */
struct igu_cleanup {
__le32 sb_id_and_flags;
@@ -1743,102 +1843,23 @@ struct sdm_op_gen {
#define SDM_OP_GEN_RESERVED_SHIFT 20
};
+/* Physical memory descriptor */
+struct phys_mem_desc {
+ dma_addr_t phys_addr;
+ void *virt_addr;
+ u32 size; /* In bytes */
+};
+
+/* Virtual memory descriptor */
+struct virt_mem_desc {
+ void *ptr;
+ u32 size; /* In bytes */
+};
+
/****************************************/
/* Debug Tools HSI constants and macros */
/****************************************/
-enum block_addr {
- GRCBASE_GRC = 0x50000,
- GRCBASE_MISCS = 0x9000,
- GRCBASE_MISC = 0x8000,
- GRCBASE_DBU = 0xa000,
- GRCBASE_PGLUE_B = 0x2a8000,
- GRCBASE_CNIG = 0x218000,
- GRCBASE_CPMU = 0x30000,
- GRCBASE_NCSI = 0x40000,
- GRCBASE_OPTE = 0x53000,
- GRCBASE_BMB = 0x540000,
- GRCBASE_PCIE = 0x54000,
- GRCBASE_MCP = 0xe00000,
- GRCBASE_MCP2 = 0x52000,
- GRCBASE_PSWHST = 0x2a0000,
- GRCBASE_PSWHST2 = 0x29e000,
- GRCBASE_PSWRD = 0x29c000,
- GRCBASE_PSWRD2 = 0x29d000,
- GRCBASE_PSWWR = 0x29a000,
- GRCBASE_PSWWR2 = 0x29b000,
- GRCBASE_PSWRQ = 0x280000,
- GRCBASE_PSWRQ2 = 0x240000,
- GRCBASE_PGLCS = 0x0,
- GRCBASE_DMAE = 0xc000,
- GRCBASE_PTU = 0x560000,
- GRCBASE_TCM = 0x1180000,
- GRCBASE_MCM = 0x1200000,
- GRCBASE_UCM = 0x1280000,
- GRCBASE_XCM = 0x1000000,
- GRCBASE_YCM = 0x1080000,
- GRCBASE_PCM = 0x1100000,
- GRCBASE_QM = 0x2f0000,
- GRCBASE_TM = 0x2c0000,
- GRCBASE_DORQ = 0x100000,
- GRCBASE_BRB = 0x340000,
- GRCBASE_SRC = 0x238000,
- GRCBASE_PRS = 0x1f0000,
- GRCBASE_TSDM = 0xfb0000,
- GRCBASE_MSDM = 0xfc0000,
- GRCBASE_USDM = 0xfd0000,
- GRCBASE_XSDM = 0xf80000,
- GRCBASE_YSDM = 0xf90000,
- GRCBASE_PSDM = 0xfa0000,
- GRCBASE_TSEM = 0x1700000,
- GRCBASE_MSEM = 0x1800000,
- GRCBASE_USEM = 0x1900000,
- GRCBASE_XSEM = 0x1400000,
- GRCBASE_YSEM = 0x1500000,
- GRCBASE_PSEM = 0x1600000,
- GRCBASE_RSS = 0x238800,
- GRCBASE_TMLD = 0x4d0000,
- GRCBASE_MULD = 0x4e0000,
- GRCBASE_YULD = 0x4c8000,
- GRCBASE_XYLD = 0x4c0000,
- GRCBASE_PTLD = 0x5a0000,
- GRCBASE_YPLD = 0x5c0000,
- GRCBASE_PRM = 0x230000,
- GRCBASE_PBF_PB1 = 0xda0000,
- GRCBASE_PBF_PB2 = 0xda4000,
- GRCBASE_RPB = 0x23c000,
- GRCBASE_BTB = 0xdb0000,
- GRCBASE_PBF = 0xd80000,
- GRCBASE_RDIF = 0x300000,
- GRCBASE_TDIF = 0x310000,
- GRCBASE_CDU = 0x580000,
- GRCBASE_CCFC = 0x2e0000,
- GRCBASE_TCFC = 0x2d0000,
- GRCBASE_IGU = 0x180000,
- GRCBASE_CAU = 0x1c0000,
- GRCBASE_RGFS = 0xf00000,
- GRCBASE_RGSRC = 0x320000,
- GRCBASE_TGFS = 0xd00000,
- GRCBASE_TGSRC = 0x322000,
- GRCBASE_UMAC = 0x51000,
- GRCBASE_XMAC = 0x210000,
- GRCBASE_DBG = 0x10000,
- GRCBASE_NIG = 0x500000,
- GRCBASE_WOL = 0x600000,
- GRCBASE_BMBN = 0x610000,
- GRCBASE_IPC = 0x20000,
- GRCBASE_NWM = 0x800000,
- GRCBASE_NWS = 0x700000,
- GRCBASE_MS = 0x6a0000,
- GRCBASE_PHY_PCIE = 0x620000,
- GRCBASE_LED = 0x6b8000,
- GRCBASE_AVS_WRAP = 0x6b0000,
- GRCBASE_PXPREQBUS = 0x56000,
- GRCBASE_MISC_AEU = 0x8000,
- GRCBASE_BAR0_MAP = 0x1c00000,
- MAX_BLOCK_ADDR
-};
-
enum block_id {
BLOCK_GRC,
BLOCK_MISCS,
@@ -1893,8 +1914,6 @@ enum block_id {
BLOCK_MULD,
BLOCK_YULD,
BLOCK_XYLD,
- BLOCK_PTLD,
- BLOCK_YPLD,
BLOCK_PRM,
BLOCK_PBF_PB1,
BLOCK_PBF_PB2,
@@ -1908,12 +1927,9 @@ enum block_id {
BLOCK_TCFC,
BLOCK_IGU,
BLOCK_CAU,
- BLOCK_RGFS,
- BLOCK_RGSRC,
- BLOCK_TGFS,
- BLOCK_TGSRC,
BLOCK_UMAC,
BLOCK_XMAC,
+ BLOCK_MSTAT,
BLOCK_DBG,
BLOCK_NIG,
BLOCK_WOL,
@@ -1926,8 +1942,17 @@ enum block_id {
BLOCK_LED,
BLOCK_AVS_WRAP,
BLOCK_PXPREQBUS,
- BLOCK_MISC_AEU,
BLOCK_BAR0_MAP,
+ BLOCK_MCP_FIO,
+ BLOCK_LAST_INIT,
+ BLOCK_PRS_FC,
+ BLOCK_PBF_FC,
+ BLOCK_NIG_LB_FC,
+ BLOCK_NIG_LB_FC_PLLH,
+ BLOCK_NIG_TX_FC_PLLH,
+ BLOCK_NIG_TX_FC,
+ BLOCK_NIG_RX_FC_PLLH,
+ BLOCK_NIG_RX_FC,
MAX_BLOCK_ID
};
@@ -1944,10 +1969,13 @@ enum bin_dbg_buffer_type {
BIN_BUF_DBG_ATTN_REGS,
BIN_BUF_DBG_ATTN_INDEXES,
BIN_BUF_DBG_ATTN_NAME_OFFSETS,
- BIN_BUF_DBG_BUS_BLOCKS,
+ BIN_BUF_DBG_BLOCKS,
+ BIN_BUF_DBG_BLOCKS_CHIP_DATA,
BIN_BUF_DBG_BUS_LINES,
- BIN_BUF_DBG_BUS_BLOCKS_USER_DATA,
+ BIN_BUF_DBG_BLOCKS_USER_DATA,
+ BIN_BUF_DBG_BLOCKS_CHIP_USER_DATA,
BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS,
+ BIN_BUF_DBG_RESET_REGS,
BIN_BUF_DBG_PARSING_STRINGS,
MAX_BIN_DBG_BUFFER_TYPE
};
@@ -2031,20 +2059,54 @@ enum dbg_attn_type {
MAX_DBG_ATTN_TYPE
};
-/* Debug Bus block data */
-struct dbg_bus_block {
- u8 num_of_lines;
- u8 has_latency_events;
- u16 lines_offset;
+/* Block debug data */
+struct dbg_block {
+ u8 name[15];
+ u8 associated_storm_letter;
};
-/* Debug Bus block user data */
-struct dbg_bus_block_user_data {
- u8 num_of_lines;
+/* Chip-specific block debug data */
+struct dbg_block_chip {
+ u8 flags;
+#define DBG_BLOCK_CHIP_IS_REMOVED_MASK 0x1
+#define DBG_BLOCK_CHIP_IS_REMOVED_SHIFT 0
+#define DBG_BLOCK_CHIP_HAS_RESET_REG_MASK 0x1
+#define DBG_BLOCK_CHIP_HAS_RESET_REG_SHIFT 1
+#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_MASK 0x1
+#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_SHIFT 2
+#define DBG_BLOCK_CHIP_HAS_DBG_BUS_MASK 0x1
+#define DBG_BLOCK_CHIP_HAS_DBG_BUS_SHIFT 3
+#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_MASK 0x1
+#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_SHIFT 4
+#define DBG_BLOCK_CHIP_RESERVED0_MASK 0x7
+#define DBG_BLOCK_CHIP_RESERVED0_SHIFT 5
+ u8 dbg_client_id;
+ u8 reset_reg_id;
+ u8 reset_reg_bit_offset;
+ struct dbg_mode_hdr dbg_bus_mode;
+ u16 reserved1;
+ u8 reserved2;
+ u8 num_of_dbg_bus_lines;
+ u16 dbg_bus_lines_offset;
+ u32 dbg_select_reg_addr;
+ u32 dbg_dword_enable_reg_addr;
+ u32 dbg_shift_reg_addr;
+ u32 dbg_force_valid_reg_addr;
+ u32 dbg_force_frame_reg_addr;
+};
+
+/* Chip-specific block user debug data */
+struct dbg_block_chip_user {
+ u8 num_of_dbg_bus_lines;
u8 has_latency_events;
u16 names_offset;
};
+/* Block user debug data */
+struct dbg_block_user {
+ u8 name[16];
+};
+
/* Block Debug line data */
struct dbg_bus_line {
u8 data;
@@ -2197,22 +2259,33 @@ enum dbg_idle_chk_severity_types {
MAX_DBG_IDLE_CHK_SEVERITY_TYPES
};
+/* Reset register */
+struct dbg_reset_reg {
+ u32 data;
+#define DBG_RESET_REG_ADDR_MASK 0xFFFFFF
+#define DBG_RESET_REG_ADDR_SHIFT 0
+#define DBG_RESET_REG_IS_REMOVED_MASK 0x1
+#define DBG_RESET_REG_IS_REMOVED_SHIFT 24
+#define DBG_RESET_REG_RESERVED_MASK 0x7F
+#define DBG_RESET_REG_RESERVED_SHIFT 25
+};
+
/* Debug Bus block data */
struct dbg_bus_block_data {
- u16 data;
-#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_MASK 0xF
-#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_SHIFT 0
-#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_MASK 0xF
-#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_SHIFT 4
-#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_MASK 0xF
-#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_SHIFT 8
-#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_MASK 0xF
-#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_SHIFT 12
+ u8 enable_mask;
+ u8 right_shift;
+ u8 force_valid_mask;
+ u8 force_frame_mask;
+ u8 dword_mask;
u8 line_num;
u8 hw_id;
+ u8 flags;
+#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_MASK 0x1
+#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_SHIFT 0
+#define DBG_BUS_BLOCK_DATA_RESERVED_MASK 0x7F
+#define DBG_BUS_BLOCK_DATA_RESERVED_SHIFT 1
};
-/* Debug Bus Clients */
enum dbg_bus_clients {
DBG_BUS_CLIENT_RBCN,
DBG_BUS_CLIENT_RBCP,
@@ -2253,11 +2326,10 @@ enum dbg_bus_constraint_ops {
/* Debug Bus trigger state data */
struct dbg_bus_trigger_state_data {
- u8 data;
-#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_MASK 0xF
-#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_SHIFT 0
-#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_MASK 0xF
-#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_SHIFT 4
+ u8 msg_len;
+ u8 constraint_dword_mask;
+ u8 storm_id;
+ u8 reserved;
};
/* Debug Bus memory address */
@@ -2307,8 +2379,7 @@ struct dbg_bus_storm_data {
struct dbg_bus_data {
u32 app_version;
u8 state;
- u8 hw_dwords;
- u16 hw_id_mask;
+ u8 mode_256b_en;
u8 num_enabled_blocks;
u8 num_enabled_storms;
u8 target;
@@ -2319,67 +2390,21 @@ struct dbg_bus_data {
u8 adding_filter;
u8 filter_pre_trigger;
u8 filter_post_trigger;
- u16 reserved;
u8 trigger_en;
- struct dbg_bus_trigger_state_data trigger_states[3];
+ u8 filter_constraint_dword_mask;
u8 next_trigger_state;
u8 next_constraint_id;
- u8 unify_inputs;
+ struct dbg_bus_trigger_state_data trigger_states[3];
+ u8 filter_msg_len;
u8 rcv_from_other_engine;
+ u8 blocks_dword_mask;
+ u8 blocks_dword_overlap;
+ u32 hw_id_mask;
struct dbg_bus_pci_buf_data pci_buf;
- struct dbg_bus_block_data blocks[88];
+ struct dbg_bus_block_data blocks[132];
struct dbg_bus_storm_data storms[6];
};
-/* Debug bus filter types */
-enum dbg_bus_filter_types {
- DBG_BUS_FILTER_TYPE_OFF,
- DBG_BUS_FILTER_TYPE_PRE,
- DBG_BUS_FILTER_TYPE_POST,
- DBG_BUS_FILTER_TYPE_ON,
- MAX_DBG_BUS_FILTER_TYPES
-};
-
-/* Debug bus frame modes */
-enum dbg_bus_frame_modes {
- DBG_BUS_FRAME_MODE_0HW_4ST = 0, /* 0 HW dwords, 4 Storm dwords */
- DBG_BUS_FRAME_MODE_4HW_0ST = 3, /* 4 HW dwords, 0 Storm dwords */
- DBG_BUS_FRAME_MODE_8HW_0ST = 4, /* 8 HW dwords, 0 Storm dwords */
- MAX_DBG_BUS_FRAME_MODES
-};
-
-/* Debug bus other engine mode */
-enum dbg_bus_other_engine_modes {
- DBG_BUS_OTHER_ENGINE_MODE_NONE,
- DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
- DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX,
- DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX,
- DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX,
- MAX_DBG_BUS_OTHER_ENGINE_MODES
-};
-
-/* Debug bus post-trigger recording types */
-enum dbg_bus_post_trigger_types {
- DBG_BUS_POST_TRIGGER_RECORD,
- DBG_BUS_POST_TRIGGER_DROP,
- MAX_DBG_BUS_POST_TRIGGER_TYPES
-};
-
-/* Debug bus pre-trigger recording types */
-enum dbg_bus_pre_trigger_types {
- DBG_BUS_PRE_TRIGGER_START_FROM_ZERO,
- DBG_BUS_PRE_TRIGGER_NUM_CHUNKS,
- DBG_BUS_PRE_TRIGGER_DROP,
- MAX_DBG_BUS_PRE_TRIGGER_TYPES
-};
-
-/* Debug bus SEMI frame modes */
-enum dbg_bus_semi_frame_modes {
- DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = 0,
- DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = 3,
- MAX_DBG_BUS_SEMI_FRAME_MODES
-};
-
/* Debug bus states */
enum dbg_bus_states {
DBG_BUS_STATE_IDLE,
@@ -2397,7 +2422,9 @@ enum dbg_bus_storm_modes {
DBG_BUS_STORM_MODE_DRA_W,
DBG_BUS_STORM_MODE_LD_ST_ADDR,
DBG_BUS_STORM_MODE_DRA_FSM,
+ DBG_BUS_STORM_MODE_FAST_DBGMUX,
DBG_BUS_STORM_MODE_RH,
+ DBG_BUS_STORM_MODE_RH_WITH_STORE,
DBG_BUS_STORM_MODE_FOC,
DBG_BUS_STORM_MODE_EXT_STORE,
MAX_DBG_BUS_STORM_MODES
@@ -2438,13 +2465,13 @@ enum dbg_grc_params {
DBG_GRC_PARAM_DUMP_CAU,
DBG_GRC_PARAM_DUMP_QM,
DBG_GRC_PARAM_DUMP_MCP,
- DBG_GRC_PARAM_MCP_TRACE_META_SIZE,
+ DBG_GRC_PARAM_DUMP_DORQ,
DBG_GRC_PARAM_DUMP_CFC,
DBG_GRC_PARAM_DUMP_IGU,
DBG_GRC_PARAM_DUMP_BRB,
DBG_GRC_PARAM_DUMP_BTB,
DBG_GRC_PARAM_DUMP_BMB,
- DBG_GRC_PARAM_DUMP_NIG,
+ DBG_GRC_PARAM_RESERVD1,
DBG_GRC_PARAM_DUMP_MULD,
DBG_GRC_PARAM_DUMP_PRS,
DBG_GRC_PARAM_DUMP_DMAE,
@@ -2453,8 +2480,8 @@ enum dbg_grc_params {
DBG_GRC_PARAM_DUMP_DIF,
DBG_GRC_PARAM_DUMP_STATIC,
DBG_GRC_PARAM_UNSTALL,
- DBG_GRC_PARAM_NUM_LCIDS,
- DBG_GRC_PARAM_NUM_LTIDS,
+ DBG_GRC_PARAM_RESERVED2,
+ DBG_GRC_PARAM_MCP_TRACE_META_SIZE,
DBG_GRC_PARAM_EXCLUDE_ALL,
DBG_GRC_PARAM_CRASH,
DBG_GRC_PARAM_PARITY_SAFE,
@@ -2462,22 +2489,14 @@ enum dbg_grc_params {
DBG_GRC_PARAM_DUMP_PHY,
DBG_GRC_PARAM_NO_MCP,
DBG_GRC_PARAM_NO_FW_VER,
+ DBG_GRC_PARAM_RESERVED3,
+ DBG_GRC_PARAM_DUMP_MCP_HW_DUMP,
+ DBG_GRC_PARAM_DUMP_ILT_CDUC,
+ DBG_GRC_PARAM_DUMP_ILT_CDUT,
+ DBG_GRC_PARAM_DUMP_CAU_EXT,
MAX_DBG_GRC_PARAMS
};
-/* Debug reset registers */
-enum dbg_reset_regs {
- DBG_RESET_REG_MISCS_PL_UA,
- DBG_RESET_REG_MISCS_PL_HV,
- DBG_RESET_REG_MISCS_PL_HV_2,
- DBG_RESET_REG_MISC_PL_UA,
- DBG_RESET_REG_MISC_PL_HV,
- DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
- DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
- DBG_RESET_REG_MISC_PL_PDA_VAUX,
- MAX_DBG_RESET_REGS
-};
-
/* Debug status codes */
enum dbg_status {
DBG_STATUS_OK,
@@ -2489,15 +2508,15 @@ enum dbg_status {
DBG_STATUS_INVALID_PCI_BUF_SIZE,
DBG_STATUS_PCI_BUF_ALLOC_FAILED,
DBG_STATUS_PCI_BUF_NOT_ALLOCATED,
- DBG_STATUS_TOO_MANY_INPUTS,
- DBG_STATUS_INPUT_OVERLAP,
- DBG_STATUS_HW_ONLY_RECORDING,
+ DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS,
+ DBG_STATUS_NO_MATCHING_FRAMING_MODE,
+ DBG_STATUS_VFC_READ_ERROR,
DBG_STATUS_STORM_ALREADY_ENABLED,
DBG_STATUS_STORM_NOT_ENABLED,
DBG_STATUS_BLOCK_ALREADY_ENABLED,
DBG_STATUS_BLOCK_NOT_ENABLED,
DBG_STATUS_NO_INPUT_ENABLED,
- DBG_STATUS_NO_FILTER_TRIGGER_64B,
+ DBG_STATUS_NO_FILTER_TRIGGER_256B,
DBG_STATUS_FILTER_ALREADY_ENABLED,
DBG_STATUS_TRIGGER_ALREADY_ENABLED,
DBG_STATUS_TRIGGER_NOT_ENABLED,
@@ -2522,7 +2541,7 @@ enum dbg_status {
DBG_STATUS_MCP_TRACE_NO_META,
DBG_STATUS_MCP_COULD_NOT_HALT,
DBG_STATUS_MCP_COULD_NOT_RESUME,
- DBG_STATUS_RESERVED2,
+ DBG_STATUS_RESERVED0,
DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
DBG_STATUS_IGU_FIFO_BAD_DATA,
DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
@@ -2530,10 +2549,15 @@ enum dbg_status {
DBG_STATUS_REG_FIFO_BAD_DATA,
DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
DBG_STATUS_DBG_ARRAY_NOT_SET,
- DBG_STATUS_FILTER_BUG,
+ DBG_STATUS_RESERVED1,
DBG_STATUS_NON_MATCHING_LINES,
- DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET,
+ DBG_STATUS_INSUFFICIENT_HW_IDS,
DBG_STATUS_DBG_BUS_IN_USE,
+ DBG_STATUS_INVALID_STORM_DBG_MODE,
+ DBG_STATUS_OTHER_ENGINE_BB_ONLY,
+ DBG_STATUS_FILTER_SINGLE_HW_ID,
+ DBG_STATUS_TRIGGER_SINGLE_HW_ID,
+ DBG_STATUS_MISSING_TRIGGER_STATE_STORM,
MAX_DBG_STATUS
};
@@ -2569,9 +2593,9 @@ struct dbg_tools_data {
struct dbg_bus_data bus;
struct idle_chk_data idle_chk;
u8 mode_enable[40];
- u8 block_in_reset[88];
+ u8 block_in_reset[132];
u8 chip_id;
- u8 platform_id;
+ u8 hw_type;
u8 num_ports;
u8 num_pfs_per_port;
u8 num_vfs;
@@ -2582,6 +2606,19 @@ struct dbg_tools_data {
u32 num_regs_read;
};
+/* ILT Clients */
+enum ilt_clients {
+ ILT_CLI_CDUC,
+ ILT_CLI_CDUT,
+ ILT_CLI_QM,
+ ILT_CLI_TM,
+ ILT_CLI_SRC,
+ ILT_CLI_TSDM,
+ ILT_CLI_RGFS,
+ ILT_CLI_TGFS,
+ MAX_ILT_CLIENTS
+};
+
/********************************/
/* HSI Init Functions constants */
/********************************/
@@ -2630,13 +2667,18 @@ struct init_nig_pri_tc_map_req {
struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES];
};
+/* QM per global RL init parameters */
+struct init_qm_global_rl_params {
+ u32 rate_limit;
+};
+
/* QM per-port init parameters */
struct init_qm_port_params {
- u8 active;
- u8 active_phys_tcs;
+ u16 active_phys_tcs;
u16 num_pbf_cmd_lines;
u16 num_btb_blocks;
- u16 reserved;
+ u8 active;
+ u8 reserved;
};
/* QM per-PQ init parameters */
@@ -2645,15 +2687,14 @@ struct init_qm_pq_params {
u8 tc_id;
u8 wrr_group;
u8 rl_valid;
+ u16 rl_id;
u8 port_id;
- u8 reserved0;
- u16 reserved1;
+ u8 reserved;
};
/* QM per-vport init parameters */
struct init_qm_vport_params {
- u32 vport_rl;
- u16 vport_wfq;
+ u16 wfq;
u16 first_tx_pq_id[NUM_OF_TCS];
};
@@ -2673,13 +2714,12 @@ struct init_qm_vport_params {
enum chip_ids {
CHIP_BB,
CHIP_K2,
- CHIP_RESERVED,
MAX_CHIP_IDS
};
struct fw_asserts_ram_section {
- u16 section_ram_line_offset;
- u16 section_ram_line_size;
+ __le16 section_ram_line_offset;
+ __le16 section_ram_line_size;
u8 list_dword_offset;
u8 list_element_dword_size;
u8 list_num_elements;
@@ -2729,6 +2769,7 @@ enum init_modes {
MODE_PORTS_PER_ENG_4,
MODE_100G,
MODE_RESERVED6,
+ MODE_RESERVED7,
MAX_INIT_MODES
};
@@ -2763,9 +2804,19 @@ enum bin_init_buffer_type {
BIN_BUF_INIT_VAL,
BIN_BUF_INIT_MODE_TREE,
BIN_BUF_INIT_IRO,
+ BIN_BUF_INIT_OVERLAYS,
MAX_BIN_INIT_BUFFER_TYPE
};
+/* FW overlay buffer header */
+struct fw_overlay_buf_hdr {
+ u32 data;
+#define FW_OVERLAY_BUF_HDR_STORM_ID_MASK 0xFF
+#define FW_OVERLAY_BUF_HDR_STORM_ID_SHIFT 0
+#define FW_OVERLAY_BUF_HDR_BUF_SIZE_MASK 0xFFFFFF
+#define FW_OVERLAY_BUF_HDR_BUF_SIZE_SHIFT 8
+};
+
/* init array header: raw */
struct init_array_raw_hdr {
u32 data;
@@ -2859,10 +2910,8 @@ struct init_if_phase_op {
u32 op_data;
#define INIT_IF_PHASE_OP_OP_MASK 0xF
#define INIT_IF_PHASE_OP_OP_SHIFT 0
-#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK 0x1
-#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT 4
-#define INIT_IF_PHASE_OP_RESERVED1_MASK 0x7FF
-#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 5
+#define INIT_IF_PHASE_OP_RESERVED1_MASK 0xFFF
+#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 4
#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF
#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16
u32 phase_data;
@@ -2991,9 +3040,11 @@ struct iro {
* @brief qed_dbg_set_bin_ptr - Sets a pointer to the binary data with debug
* arrays.
*
+ * @param p_hwfn - HW device data
* @param bin_ptr - a pointer to the binary data with debug arrays.
*/
-enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr);
+enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn,
+ const u8 * const bin_ptr);
/**
* @brief qed_read_regs - Reads registers into a buffer (using GRC).
@@ -3037,7 +3088,6 @@ bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
* - val is outside the allowed boundaries
*/
enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
enum dbg_grc_params grc_param, u32 val);
/**
@@ -3358,20 +3408,36 @@ enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
struct mcp_trace_format {
u32 data;
#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff
-#define MCP_TRACE_FORMAT_MODULE_SHIFT 0
+#define MCP_TRACE_FORMAT_MODULE_OFFSET 0
#define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000
-#define MCP_TRACE_FORMAT_LEVEL_SHIFT 16
+#define MCP_TRACE_FORMAT_LEVEL_OFFSET 16
#define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000
-#define MCP_TRACE_FORMAT_P1_SIZE_SHIFT 18
+#define MCP_TRACE_FORMAT_P1_SIZE_OFFSET 18
#define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000
-#define MCP_TRACE_FORMAT_P2_SIZE_SHIFT 20
+#define MCP_TRACE_FORMAT_P2_SIZE_OFFSET 20
#define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000
-#define MCP_TRACE_FORMAT_P3_SIZE_SHIFT 22
+#define MCP_TRACE_FORMAT_P3_SIZE_OFFSET 22
#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000
-#define MCP_TRACE_FORMAT_LEN_SHIFT 24
+#define MCP_TRACE_FORMAT_LEN_OFFSET 24
+
char *format_str;
};
+/* MCP Trace Meta data structure */
+struct mcp_trace_meta {
+ u32 modules_num;
+ char **modules;
+ u32 formats_num;
+ struct mcp_trace_format *formats;
+ bool is_allocated;
+};
+
+/* Debug Tools user data */
+struct dbg_tools_user_data {
+ struct mcp_trace_meta mcp_trace_meta;
+ const u32 *mcp_trace_user_meta_buf;
+};
+
/******************************** Constants **********************************/
#define MAX_NAME_LEN 16
@@ -3382,16 +3448,20 @@ struct mcp_trace_format {
* @brief qed_dbg_user_set_bin_ptr - Sets a pointer to the binary data with
* debug arrays.
*
+ * @param p_hwfn - HW device data
* @param bin_ptr - a pointer to the binary data with debug arrays.
*/
-enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr);
+enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn,
+ const u8 * const bin_ptr);
/**
* @brief qed_dbg_alloc_user_data - Allocates user debug data.
*
* @param p_hwfn - HW device data
+ * @param user_data_ptr - OUT: a pointer to the allocated memory.
*/
-enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn);
+enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn,
+ void **user_data_ptr);
/**
* @brief qed_dbg_get_status_str - Returns a string for the specified status.
@@ -3664,271 +3734,6 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
struct dbg_attn_block_result *results);
-/* Debug Bus blocks */
-static const u32 dbg_bus_blocks[] = {
- 0x0000000f, /* grc, bb, 15 lines */
- 0x0000000f, /* grc, k2, 15 lines */
- 0x00000000,
- 0x00000000, /* miscs, bb, 0 lines */
- 0x00000000, /* miscs, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* misc, bb, 0 lines */
- 0x00000000, /* misc, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* dbu, bb, 0 lines */
- 0x00000000, /* dbu, k2, 0 lines */
- 0x00000000,
- 0x000f0127, /* pglue_b, bb, 39 lines */
- 0x0036012a, /* pglue_b, k2, 42 lines */
- 0x00000000,
- 0x00000000, /* cnig, bb, 0 lines */
- 0x00120102, /* cnig, k2, 2 lines */
- 0x00000000,
- 0x00000000, /* cpmu, bb, 0 lines */
- 0x00000000, /* cpmu, k2, 0 lines */
- 0x00000000,
- 0x00000001, /* ncsi, bb, 1 lines */
- 0x00000001, /* ncsi, k2, 1 lines */
- 0x00000000,
- 0x00000000, /* opte, bb, 0 lines */
- 0x00000000, /* opte, k2, 0 lines */
- 0x00000000,
- 0x00600085, /* bmb, bb, 133 lines */
- 0x00600085, /* bmb, k2, 133 lines */
- 0x00000000,
- 0x00000000, /* pcie, bb, 0 lines */
- 0x00e50033, /* pcie, k2, 51 lines */
- 0x00000000,
- 0x00000000, /* mcp, bb, 0 lines */
- 0x00000000, /* mcp, k2, 0 lines */
- 0x00000000,
- 0x01180009, /* mcp2, bb, 9 lines */
- 0x01180009, /* mcp2, k2, 9 lines */
- 0x00000000,
- 0x01210104, /* pswhst, bb, 4 lines */
- 0x01210104, /* pswhst, k2, 4 lines */
- 0x00000000,
- 0x01250103, /* pswhst2, bb, 3 lines */
- 0x01250103, /* pswhst2, k2, 3 lines */
- 0x00000000,
- 0x00340101, /* pswrd, bb, 1 lines */
- 0x00340101, /* pswrd, k2, 1 lines */
- 0x00000000,
- 0x01280119, /* pswrd2, bb, 25 lines */
- 0x01280119, /* pswrd2, k2, 25 lines */
- 0x00000000,
- 0x01410109, /* pswwr, bb, 9 lines */
- 0x01410109, /* pswwr, k2, 9 lines */
- 0x00000000,
- 0x00000000, /* pswwr2, bb, 0 lines */
- 0x00000000, /* pswwr2, k2, 0 lines */
- 0x00000000,
- 0x001c0001, /* pswrq, bb, 1 lines */
- 0x001c0001, /* pswrq, k2, 1 lines */
- 0x00000000,
- 0x014a0015, /* pswrq2, bb, 21 lines */
- 0x014a0015, /* pswrq2, k2, 21 lines */
- 0x00000000,
- 0x00000000, /* pglcs, bb, 0 lines */
- 0x00120006, /* pglcs, k2, 6 lines */
- 0x00000000,
- 0x00100001, /* dmae, bb, 1 lines */
- 0x00100001, /* dmae, k2, 1 lines */
- 0x00000000,
- 0x015f0105, /* ptu, bb, 5 lines */
- 0x015f0105, /* ptu, k2, 5 lines */
- 0x00000000,
- 0x01640120, /* tcm, bb, 32 lines */
- 0x01640120, /* tcm, k2, 32 lines */
- 0x00000000,
- 0x01640120, /* mcm, bb, 32 lines */
- 0x01640120, /* mcm, k2, 32 lines */
- 0x00000000,
- 0x01640120, /* ucm, bb, 32 lines */
- 0x01640120, /* ucm, k2, 32 lines */
- 0x00000000,
- 0x01640120, /* xcm, bb, 32 lines */
- 0x01640120, /* xcm, k2, 32 lines */
- 0x00000000,
- 0x01640120, /* ycm, bb, 32 lines */
- 0x01640120, /* ycm, k2, 32 lines */
- 0x00000000,
- 0x01640120, /* pcm, bb, 32 lines */
- 0x01640120, /* pcm, k2, 32 lines */
- 0x00000000,
- 0x01840062, /* qm, bb, 98 lines */
- 0x01840062, /* qm, k2, 98 lines */
- 0x00000000,
- 0x01e60021, /* tm, bb, 33 lines */
- 0x01e60021, /* tm, k2, 33 lines */
- 0x00000000,
- 0x02070107, /* dorq, bb, 7 lines */
- 0x02070107, /* dorq, k2, 7 lines */
- 0x00000000,
- 0x00600185, /* brb, bb, 133 lines */
- 0x00600185, /* brb, k2, 133 lines */
- 0x00000000,
- 0x020e0019, /* src, bb, 25 lines */
- 0x020c001a, /* src, k2, 26 lines */
- 0x00000000,
- 0x02270104, /* prs, bb, 4 lines */
- 0x02270104, /* prs, k2, 4 lines */
- 0x00000000,
- 0x022b0133, /* tsdm, bb, 51 lines */
- 0x022b0133, /* tsdm, k2, 51 lines */
- 0x00000000,
- 0x022b0133, /* msdm, bb, 51 lines */
- 0x022b0133, /* msdm, k2, 51 lines */
- 0x00000000,
- 0x022b0133, /* usdm, bb, 51 lines */
- 0x022b0133, /* usdm, k2, 51 lines */
- 0x00000000,
- 0x022b0133, /* xsdm, bb, 51 lines */
- 0x022b0133, /* xsdm, k2, 51 lines */
- 0x00000000,
- 0x022b0133, /* ysdm, bb, 51 lines */
- 0x022b0133, /* ysdm, k2, 51 lines */
- 0x00000000,
- 0x022b0133, /* psdm, bb, 51 lines */
- 0x022b0133, /* psdm, k2, 51 lines */
- 0x00000000,
- 0x025e010c, /* tsem, bb, 12 lines */
- 0x025e010c, /* tsem, k2, 12 lines */
- 0x00000000,
- 0x025e010c, /* msem, bb, 12 lines */
- 0x025e010c, /* msem, k2, 12 lines */
- 0x00000000,
- 0x025e010c, /* usem, bb, 12 lines */
- 0x025e010c, /* usem, k2, 12 lines */
- 0x00000000,
- 0x025e010c, /* xsem, bb, 12 lines */
- 0x025e010c, /* xsem, k2, 12 lines */
- 0x00000000,
- 0x025e010c, /* ysem, bb, 12 lines */
- 0x025e010c, /* ysem, k2, 12 lines */
- 0x00000000,
- 0x025e010c, /* psem, bb, 12 lines */
- 0x025e010c, /* psem, k2, 12 lines */
- 0x00000000,
- 0x026a000d, /* rss, bb, 13 lines */
- 0x026a000d, /* rss, k2, 13 lines */
- 0x00000000,
- 0x02770106, /* tmld, bb, 6 lines */
- 0x02770106, /* tmld, k2, 6 lines */
- 0x00000000,
- 0x027d0106, /* muld, bb, 6 lines */
- 0x027d0106, /* muld, k2, 6 lines */
- 0x00000000,
- 0x02770005, /* yuld, bb, 5 lines */
- 0x02770005, /* yuld, k2, 5 lines */
- 0x00000000,
- 0x02830107, /* xyld, bb, 7 lines */
- 0x027d0107, /* xyld, k2, 7 lines */
- 0x00000000,
- 0x00000000, /* ptld, bb, 0 lines */
- 0x00000000, /* ptld, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* ypld, bb, 0 lines */
- 0x00000000, /* ypld, k2, 0 lines */
- 0x00000000,
- 0x028a010e, /* prm, bb, 14 lines */
- 0x02980110, /* prm, k2, 16 lines */
- 0x00000000,
- 0x02a8000d, /* pbf_pb1, bb, 13 lines */
- 0x02a8000d, /* pbf_pb1, k2, 13 lines */
- 0x00000000,
- 0x02a8000d, /* pbf_pb2, bb, 13 lines */
- 0x02a8000d, /* pbf_pb2, k2, 13 lines */
- 0x00000000,
- 0x02a8000d, /* rpb, bb, 13 lines */
- 0x02a8000d, /* rpb, k2, 13 lines */
- 0x00000000,
- 0x00600185, /* btb, bb, 133 lines */
- 0x00600185, /* btb, k2, 133 lines */
- 0x00000000,
- 0x02b50117, /* pbf, bb, 23 lines */
- 0x02b50117, /* pbf, k2, 23 lines */
- 0x00000000,
- 0x02cc0006, /* rdif, bb, 6 lines */
- 0x02cc0006, /* rdif, k2, 6 lines */
- 0x00000000,
- 0x02d20006, /* tdif, bb, 6 lines */
- 0x02d20006, /* tdif, k2, 6 lines */
- 0x00000000,
- 0x02d80003, /* cdu, bb, 3 lines */
- 0x02db000e, /* cdu, k2, 14 lines */
- 0x00000000,
- 0x02e9010d, /* ccfc, bb, 13 lines */
- 0x02f60117, /* ccfc, k2, 23 lines */
- 0x00000000,
- 0x02e9010d, /* tcfc, bb, 13 lines */
- 0x02f60117, /* tcfc, k2, 23 lines */
- 0x00000000,
- 0x030d0133, /* igu, bb, 51 lines */
- 0x030d0133, /* igu, k2, 51 lines */
- 0x00000000,
- 0x03400106, /* cau, bb, 6 lines */
- 0x03400106, /* cau, k2, 6 lines */
- 0x00000000,
- 0x00000000, /* rgfs, bb, 0 lines */
- 0x00000000, /* rgfs, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* rgsrc, bb, 0 lines */
- 0x00000000, /* rgsrc, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* tgfs, bb, 0 lines */
- 0x00000000, /* tgfs, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* tgsrc, bb, 0 lines */
- 0x00000000, /* tgsrc, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* umac, bb, 0 lines */
- 0x00120006, /* umac, k2, 6 lines */
- 0x00000000,
- 0x00000000, /* xmac, bb, 0 lines */
- 0x00000000, /* xmac, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* dbg, bb, 0 lines */
- 0x00000000, /* dbg, k2, 0 lines */
- 0x00000000,
- 0x0346012b, /* nig, bb, 43 lines */
- 0x0346011d, /* nig, k2, 29 lines */
- 0x00000000,
- 0x00000000, /* wol, bb, 0 lines */
- 0x001c0002, /* wol, k2, 2 lines */
- 0x00000000,
- 0x00000000, /* bmbn, bb, 0 lines */
- 0x00210008, /* bmbn, k2, 8 lines */
- 0x00000000,
- 0x00000000, /* ipc, bb, 0 lines */
- 0x00000000, /* ipc, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* nwm, bb, 0 lines */
- 0x0371000b, /* nwm, k2, 11 lines */
- 0x00000000,
- 0x00000000, /* nws, bb, 0 lines */
- 0x037c0009, /* nws, k2, 9 lines */
- 0x00000000,
- 0x00000000, /* ms, bb, 0 lines */
- 0x00120004, /* ms, k2, 4 lines */
- 0x00000000,
- 0x00000000, /* phy_pcie, bb, 0 lines */
- 0x00e5001a, /* phy_pcie, k2, 26 lines */
- 0x00000000,
- 0x00000000, /* led, bb, 0 lines */
- 0x00000000, /* led, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* avs_wrap, bb, 0 lines */
- 0x00000000, /* avs_wrap, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* bar0_map, bb, 0 lines */
- 0x00000000, /* bar0_map, k2, 0 lines */
- 0x00000000,
- 0x00000000, /* bar0_map, bb, 0 lines */
- 0x00000000, /* bar0_map, k2, 0 lines */
- 0x00000000,
-};
-
/* Win 2 */
#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL
@@ -3942,22 +3747,28 @@ static const u32 dbg_bus_blocks[] = {
#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 0x012000UL
/* Win 6 */
-#define GTT_BAR0_MAP_REG_USDM_RAM 0x013000UL
+#define GTT_BAR0_MAP_REG_MSDM_RAM_2048 0x013000UL
/* Win 7 */
-#define GTT_BAR0_MAP_REG_USDM_RAM_1024 0x014000UL
+#define GTT_BAR0_MAP_REG_USDM_RAM 0x014000UL
/* Win 8 */
-#define GTT_BAR0_MAP_REG_USDM_RAM_2048 0x015000UL
+#define GTT_BAR0_MAP_REG_USDM_RAM_1024 0x015000UL
/* Win 9 */
-#define GTT_BAR0_MAP_REG_XSDM_RAM 0x016000UL
+#define GTT_BAR0_MAP_REG_USDM_RAM_2048 0x016000UL
/* Win 10 */
-#define GTT_BAR0_MAP_REG_YSDM_RAM 0x017000UL
+#define GTT_BAR0_MAP_REG_XSDM_RAM 0x017000UL
/* Win 11 */
-#define GTT_BAR0_MAP_REG_PSDM_RAM 0x018000UL
+#define GTT_BAR0_MAP_REG_XSDM_RAM_1024 0x018000UL
+
+/* Win 12 */
+#define GTT_BAR0_MAP_REG_YSDM_RAM 0x019000UL
+
+/* Win 13 */
+#define GTT_BAR0_MAP_REG_PSDM_RAM 0x01a000UL
/**
* @brief qed_qm_pf_mem_size - prepare QM ILT sizes
@@ -3982,7 +3793,7 @@ struct qed_qm_common_rt_init_params {
u8 max_phys_tcs_per_port;
bool pf_rl_en;
bool pf_wfq_en;
- bool vport_rl_en;
+ bool global_rl_en;
bool vport_wfq_en;
struct init_qm_port_params *port_params;
};
@@ -4001,11 +3812,10 @@ struct qed_qm_pf_rt_init_params {
u16 start_pq;
u16 num_pf_pqs;
u16 num_vf_pqs;
- u8 start_vport;
- u8 num_vports;
+ u16 start_vport;
+ u16 num_vports;
u16 pf_wfq;
u32 pf_rl;
- u32 link_speed;
struct init_qm_pq_params *pq_params;
struct init_qm_vport_params *vport_params;
};
@@ -4054,22 +3864,22 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
*/
int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq);
+ u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq);
/**
- * @brief qed_init_vport_rl - Initializes the rate limit of the specified VPORT
+ * @brief qed_init_global_rl - Initializes the rate limit of the specified
+ * rate limiter
*
* @param p_hwfn
* @param p_ptt - ptt window used for writing the registers
- * @param vport_id - VPORT ID
- * @param vport_rl - rate limit in Mb/sec units
- * @param link_speed - link speed in Mbps.
+ * @param rl_id - RL ID
+ * @param rate_limit - rate limit in Mb/sec units
*
* @return 0 on success, -1 on error.
*/
-int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u8 vport_id, u32 vport_rl, u32 link_speed);
+int qed_init_global_rl(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 rl_id, u32 rate_limit);
/**
* @brief qed_send_qm_stop_cmd Sends a stop command to the QM
@@ -4157,7 +3967,7 @@ void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id);
/**
* @brief qed_gft_config - Enable and configure HW for GFT
*
- * @param p_hwfn
+ * @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
* @param pf_id - pf on which to enable GFT.
* @param tcp - set profile tcp packets.
@@ -4242,6 +4052,42 @@ void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u8 assert_level[NUM_STORMS]);
+/**
+ * @brief qed_fw_overlay_mem_alloc - Allocates and fills the FW overlay memory.
+ *
+ * @param p_hwfn - HW device data
+ * @param fw_overlay_in_buf - the input FW overlay buffer.
+ * @param buf_size - the size of the input FW overlay buffer in bytes.
+ * must be aligned to dwords.
+ * @param fw_overlay_out_mem - OUT: a pointer to the allocated overlays memory.
+ *
+ * @return a pointer to the allocated overlays memory,
+ * or NULL in case of failures.
+ */
+struct phys_mem_desc *
+qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn,
+ const u32 * const fw_overlay_in_buf,
+ u32 buf_size_in_bytes);
+
+/**
+ * @brief qed_fw_overlay_init_ram - Initializes the FW overlay RAM.
+ *
+ * @param p_hwfn - HW device data.
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param fw_overlay_mem - the allocated FW overlay memory.
+ */
+void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct phys_mem_desc *fw_overlay_mem);
+
+/**
+ * @brief qed_fw_overlay_mem_free - Frees the FW overlay memory.
+ *
+ * @param p_hwfn - HW device data.
+ * @param fw_overlay_mem - the allocated FW overlay memory to free.
+ */
+void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
+ struct phys_mem_desc *fw_overlay_mem);
/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
@@ -4282,851 +4128,807 @@ void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn,
(IRO[7].base + ((queue_zone_id) * IRO[7].m1))
#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size)
+/* Xstorm common PQ info */
+#define XSTORM_PQ_INFO_OFFSET(pq_id) \
+ (IRO[8].base + ((pq_id) * IRO[8].m1))
+#define XSTORM_PQ_INFO_SIZE (IRO[8].size)
+
/* Xstorm Integration Test Data */
-#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base)
-#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size)
+#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base)
+#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size)
/* Ystorm Integration Test Data */
-#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base)
-#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size)
+#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base)
+#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size)
/* Pstorm Integration Test Data */
-#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base)
-#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size)
+#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base)
+#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size)
/* Tstorm Integration Test Data */
-#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base)
-#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size)
+#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base)
+#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[12].size)
/* Mstorm Integration Test Data */
-#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base)
-#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[12].size)
+#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[13].base)
+#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[13].size)
/* Ustorm Integration Test Data */
-#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[13].base)
-#define USTORM_INTEG_TEST_DATA_SIZE (IRO[13].size)
+#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[14].base)
+#define USTORM_INTEG_TEST_DATA_SIZE (IRO[14].size)
+
+/* Xstorm overlay buffer host address */
+#define XSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[15].base)
+#define XSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[15].size)
+
+/* Ystorm overlay buffer host address */
+#define YSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[16].base)
+#define YSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[16].size)
+
+/* Pstorm overlay buffer host address */
+#define PSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[17].base)
+#define PSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[17].size)
+
+/* Tstorm overlay buffer host address */
+#define TSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[18].base)
+#define TSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[18].size)
+
+/* Mstorm overlay buffer host address */
+#define MSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[19].base)
+#define MSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[19].size)
+
+/* Ustorm overlay buffer host address */
+#define USTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[20].base)
+#define USTORM_OVERLAY_BUF_ADDR_SIZE (IRO[20].size)
/* Tstorm producers */
#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \
- (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1))
-#define TSTORM_LL2_RX_PRODS_SIZE (IRO[14].size)
+ (IRO[21].base + ((core_rx_queue_id) * IRO[21].m1))
+#define TSTORM_LL2_RX_PRODS_SIZE (IRO[21].size)
/* Tstorm LightL2 queue statistics */
#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
- (IRO[15].base + ((core_rx_queue_id) * IRO[15].m1))
-#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[15].size)
+ (IRO[22].base + ((core_rx_queue_id) * IRO[22].m1))
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[22].size)
/* Ustorm LiteL2 queue statistics */
#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
- (IRO[16].base + ((core_rx_queue_id) * IRO[16].m1))
-#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[16].size)
+ (IRO[23].base + ((core_rx_queue_id) * IRO[23].m1))
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[23].size)
/* Pstorm LiteL2 queue statistics */
#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
- (IRO[17].base + ((core_tx_stats_id) * IRO[17].m1))
-#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[17].size)
+ (IRO[24].base + ((core_tx_stats_id) * IRO[24].m1))
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[24].size)
/* Mstorm queue statistics */
#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[18].base + ((stat_counter_id) * IRO[18].m1))
-#define MSTORM_QUEUE_STAT_SIZE (IRO[18].size)
+ (IRO[25].base + ((stat_counter_id) * IRO[25].m1))
+#define MSTORM_QUEUE_STAT_SIZE (IRO[25].size)
-/* Mstorm ETH PF queues producers */
-#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \
- (IRO[19].base + ((queue_id) * IRO[19].m1))
-#define MSTORM_ETH_PF_PRODS_SIZE (IRO[19].size)
+/* TPA agregation timeout in us resolution (on ASIC) */
+#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[26].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[26].size)
/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone size
- * mode.
+ * mode
*/
#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \
- (IRO[20].base + ((vf_id) * IRO[20].m1) + ((vf_queue_id) * IRO[20].m2))
-#define MSTORM_ETH_VF_PRODS_SIZE (IRO[20].size)
+ (IRO[27].base + ((vf_id) * IRO[27].m1) + ((vf_queue_id) * IRO[27].m2))
+#define MSTORM_ETH_VF_PRODS_SIZE (IRO[27].size)
-/* TPA agregation timeout in us resolution (on ASIC) */
-#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[21].base)
-#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[21].size)
+/* Mstorm ETH PF queues producers */
+#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \
+ (IRO[28].base + ((queue_id) * IRO[28].m1))
+#define MSTORM_ETH_PF_PRODS_SIZE (IRO[28].size)
/* Mstorm pf statistics */
#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
- (IRO[22].base + ((pf_id) * IRO[22].m1))
-#define MSTORM_ETH_PF_STAT_SIZE (IRO[22].size)
+ (IRO[29].base + ((pf_id) * IRO[29].m1))
+#define MSTORM_ETH_PF_STAT_SIZE (IRO[29].size)
/* Ustorm queue statistics */
#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[23].base + ((stat_counter_id) * IRO[23].m1))
-#define USTORM_QUEUE_STAT_SIZE (IRO[23].size)
+ (IRO[30].base + ((stat_counter_id) * IRO[30].m1))
+#define USTORM_QUEUE_STAT_SIZE (IRO[30].size)
/* Ustorm pf statistics */
-#define USTORM_ETH_PF_STAT_OFFSET(pf_id)\
- (IRO[24].base + ((pf_id) * IRO[24].m1))
-#define USTORM_ETH_PF_STAT_SIZE (IRO[24].size)
+#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \
+ (IRO[31].base + ((pf_id) * IRO[31].m1))
+#define USTORM_ETH_PF_STAT_SIZE (IRO[31].size)
/* Pstorm queue statistics */
-#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[25].base + ((stat_counter_id) * IRO[25].m1))
-#define PSTORM_QUEUE_STAT_SIZE (IRO[25].size)
+#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[32].base + ((stat_counter_id) * IRO[32].m1))
+#define PSTORM_QUEUE_STAT_SIZE (IRO[32].size)
/* Pstorm pf statistics */
#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
- (IRO[26].base + ((pf_id) * IRO[26].m1))
-#define PSTORM_ETH_PF_STAT_SIZE (IRO[26].size)
+ (IRO[33].base + ((pf_id) * IRO[33].m1))
+#define PSTORM_ETH_PF_STAT_SIZE (IRO[33].size)
/* Control frame's EthType configuration for TX control frame security */
-#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(eth_type_id) \
- (IRO[27].base + ((eth_type_id) * IRO[27].m1))
-#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[27].size)
+#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(eth_type_id) \
+ (IRO[34].base + ((eth_type_id) * IRO[34].m1))
+#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[34].size)
/* Tstorm last parser message */
-#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[28].base)
-#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[28].size)
+#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[35].base)
+#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[35].size)
/* Tstorm Eth limit Rx rate */
-#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
- (IRO[29].base + ((pf_id) * IRO[29].m1))
-#define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size)
+#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
+ (IRO[36].base + ((pf_id) * IRO[36].m1))
+#define ETH_RX_RATE_LIMIT_SIZE (IRO[36].size)
/* RSS indirection table entry update command per PF offset in TSTORM PF BAR0.
- * Use eth_tstorm_rss_update_data for update.
+ * Use eth_tstorm_rss_update_data for update
*/
#define TSTORM_ETH_RSS_UPDATE_OFFSET(pf_id) \
- (IRO[30].base + ((pf_id) * IRO[30].m1))
-#define TSTORM_ETH_RSS_UPDATE_SIZE (IRO[30].size)
+ (IRO[37].base + ((pf_id) * IRO[37].m1))
+#define TSTORM_ETH_RSS_UPDATE_SIZE (IRO[37].size)
/* Xstorm queue zone */
#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
- (IRO[31].base + ((queue_id) * IRO[31].m1))
-#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[31].size)
+ (IRO[38].base + ((queue_id) * IRO[38].m1))
+#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[38].size)
/* Ystorm cqe producer */
#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \
- (IRO[32].base + ((rss_id) * IRO[32].m1))
-#define YSTORM_TOE_CQ_PROD_SIZE (IRO[32].size)
+ (IRO[39].base + ((rss_id) * IRO[39].m1))
+#define YSTORM_TOE_CQ_PROD_SIZE (IRO[39].size)
/* Ustorm cqe producer */
#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \
- (IRO[33].base + ((rss_id) * IRO[33].m1))
-#define USTORM_TOE_CQ_PROD_SIZE (IRO[33].size)
+ (IRO[40].base + ((rss_id) * IRO[40].m1))
+#define USTORM_TOE_CQ_PROD_SIZE (IRO[40].size)
/* Ustorm grq producer */
#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \
- (IRO[34].base + ((pf_id) * IRO[34].m1))
-#define USTORM_TOE_GRQ_PROD_SIZE (IRO[34].size)
+ (IRO[41].base + ((pf_id) * IRO[41].m1))
+#define USTORM_TOE_GRQ_PROD_SIZE (IRO[41].size)
/* Tstorm cmdq-cons of given command queue-id */
#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \
- (IRO[35].base + ((cmdq_queue_id) * IRO[35].m1))
-#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[35].size)
+ (IRO[42].base + ((cmdq_queue_id) * IRO[42].m1))
+#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[42].size)
/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID,
- * BDqueue-id.
+ * BDqueue-id
*/
-#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
- (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2))
-#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size)
+#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id, bdq_id) \
+ (IRO[43].base + ((storage_func_id) * IRO[43].m1) + \
+ ((bdq_id) * IRO[43].m2))
+#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[43].size)
/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */
-#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
- (IRO[37].base + ((func_id) * IRO[37].m1) + ((bdq_id) * IRO[37].m2))
-#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[37].size)
+#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id, bdq_id) \
+ (IRO[44].base + ((storage_func_id) * IRO[44].m1) + \
+ ((bdq_id) * IRO[44].m2))
+#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[44].size)
/* Tstorm iSCSI RX stats */
-#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
- (IRO[38].base + ((pf_id) * IRO[38].m1))
-#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size)
+#define TSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
+ (IRO[45].base + ((storage_func_id) * IRO[45].m1))
+#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[45].size)
/* Mstorm iSCSI RX stats */
-#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
- (IRO[39].base + ((pf_id) * IRO[39].m1))
-#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[39].size)
+#define MSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
+ (IRO[46].base + ((storage_func_id) * IRO[46].m1))
+#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[46].size)
/* Ustorm iSCSI RX stats */
-#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
- (IRO[40].base + ((pf_id) * IRO[40].m1))
-#define USTORM_ISCSI_RX_STATS_SIZE (IRO[40].size)
+#define USTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
+ (IRO[47].base + ((storage_func_id) * IRO[47].m1))
+#define USTORM_ISCSI_RX_STATS_SIZE (IRO[47].size)
/* Xstorm iSCSI TX stats */
-#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
- (IRO[41].base + ((pf_id) * IRO[41].m1))
-#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size)
+#define XSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
+ (IRO[48].base + ((storage_func_id) * IRO[48].m1))
+#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[48].size)
/* Ystorm iSCSI TX stats */
-#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
- (IRO[42].base + ((pf_id) * IRO[42].m1))
-#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size)
+#define YSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
+ (IRO[49].base + ((storage_func_id) * IRO[49].m1))
+#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[49].size)
/* Pstorm iSCSI TX stats */
-#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
- (IRO[43].base + ((pf_id) * IRO[43].m1))
-#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[43].size)
+#define PSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
+ (IRO[50].base + ((storage_func_id) * IRO[50].m1))
+#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[50].size)
/* Tstorm FCoE RX stats */
#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
- (IRO[44].base + ((pf_id) * IRO[44].m1))
-#define TSTORM_FCOE_RX_STATS_SIZE (IRO[44].size)
+ (IRO[51].base + ((pf_id) * IRO[51].m1))
+#define TSTORM_FCOE_RX_STATS_SIZE (IRO[51].size)
/* Pstorm FCoE TX stats */
#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
- (IRO[45].base + ((pf_id) * IRO[45].m1))
-#define PSTORM_FCOE_TX_STATS_SIZE (IRO[45].size)
+ (IRO[52].base + ((pf_id) * IRO[52].m1))
+#define PSTORM_FCOE_TX_STATS_SIZE (IRO[52].size)
/* Pstorm RDMA queue statistics */
#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
- (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1))
-#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size)
+ (IRO[53].base + ((rdma_stat_counter_id) * IRO[53].m1))
+#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[53].size)
/* Tstorm RDMA queue statistics */
#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
- (IRO[47].base + ((rdma_stat_counter_id) * IRO[47].m1))
-#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[47].size)
+ (IRO[54].base + ((rdma_stat_counter_id) * IRO[54].m1))
+#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[54].size)
/* Xstorm error level for assert */
#define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[48].base + ((pf_id) * IRO[48].m1))
-#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[48].size)
+ (IRO[55].base + ((pf_id) * IRO[55].m1))
+#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[55].size)
/* Ystorm error level for assert */
#define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[49].base + ((pf_id) * IRO[49].m1))
-#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[49].size)
+ (IRO[56].base + ((pf_id) * IRO[56].m1))
+#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[56].size)
/* Pstorm error level for assert */
#define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[50].base + ((pf_id) * IRO[50].m1))
-#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[50].size)
+ (IRO[57].base + ((pf_id) * IRO[57].m1))
+#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[57].size)
/* Tstorm error level for assert */
#define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[51].base + ((pf_id) * IRO[51].m1))
-#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[51].size)
+ (IRO[58].base + ((pf_id) * IRO[58].m1))
+#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[58].size)
/* Mstorm error level for assert */
#define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[52].base + ((pf_id) * IRO[52].m1))
-#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[52].size)
+ (IRO[59].base + ((pf_id) * IRO[59].m1))
+#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[59].size)
/* Ustorm error level for assert */
#define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[53].base + ((pf_id) * IRO[53].m1))
-#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[53].size)
+ (IRO[60].base + ((pf_id) * IRO[60].m1))
+#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[60].size)
/* Xstorm iWARP rxmit stats */
#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \
- (IRO[54].base + ((pf_id) * IRO[54].m1))
-#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[54].size)
+ (IRO[61].base + ((pf_id) * IRO[61].m1))
+#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[61].size)
/* Tstorm RoCE Event Statistics */
-#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \
- (IRO[55].base + ((roce_pf_id) * IRO[55].m1))
-#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[55].size)
+#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \
+ (IRO[62].base + ((roce_pf_id) * IRO[62].m1))
+#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[62].size)
/* DCQCN Received Statistics */
-#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \
- (IRO[56].base + ((roce_pf_id) * IRO[56].m1))
-#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[56].size)
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id)\
+ (IRO[63].base + ((roce_pf_id) * IRO[63].m1))
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[63].size)
/* RoCE Error Statistics */
-#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \
- (IRO[57].base + ((roce_pf_id) * IRO[57].m1))
-#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[57].size)
+#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \
+ (IRO[64].base + ((roce_pf_id) * IRO[64].m1))
+#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[64].size)
/* DCQCN Sent Statistics */
-#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \
- (IRO[58].base + ((roce_pf_id) * IRO[58].m1))
-#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[58].size)
+#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \
+ (IRO[65].base + ((roce_pf_id) * IRO[65].m1))
+#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[65].size)
/* RoCE CQEs Statistics */
-#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \
- (IRO[59].base + ((roce_pf_id) * IRO[59].m1))
-#define USTORM_ROCE_CQE_STATS_SIZE (IRO[59].size)
-
-static const struct iro iro_arr[60] = {
- {0x0, 0x0, 0x0, 0x0, 0x8},
- {0x4cb8, 0x88, 0x0, 0x0, 0x88},
- {0x6530, 0x20, 0x0, 0x0, 0x20},
- {0xb00, 0x8, 0x0, 0x0, 0x4},
- {0xa80, 0x8, 0x0, 0x0, 0x4},
- {0x0, 0x8, 0x0, 0x0, 0x2},
- {0x80, 0x8, 0x0, 0x0, 0x4},
- {0x84, 0x8, 0x0, 0x0, 0x2},
- {0x4c48, 0x0, 0x0, 0x0, 0x78},
- {0x3e38, 0x0, 0x0, 0x0, 0x78},
- {0x3ef8, 0x0, 0x0, 0x0, 0x78},
- {0x4c40, 0x0, 0x0, 0x0, 0x78},
- {0x4998, 0x0, 0x0, 0x0, 0x78},
- {0x7f50, 0x0, 0x0, 0x0, 0x78},
- {0xa28, 0x8, 0x0, 0x0, 0x8},
- {0x6210, 0x10, 0x0, 0x0, 0x10},
- {0xb820, 0x30, 0x0, 0x0, 0x30},
- {0xa990, 0x30, 0x0, 0x0, 0x30},
- {0x4b68, 0x80, 0x0, 0x0, 0x40},
- {0x1f8, 0x4, 0x0, 0x0, 0x4},
- {0x53a8, 0x80, 0x4, 0x0, 0x4},
- {0xc7d0, 0x0, 0x0, 0x0, 0x4},
- {0x4ba8, 0x80, 0x0, 0x0, 0x20},
- {0x8158, 0x40, 0x0, 0x0, 0x30},
- {0xe770, 0x60, 0x0, 0x0, 0x60},
- {0x4090, 0x80, 0x0, 0x0, 0x38},
- {0xfea8, 0x78, 0x0, 0x0, 0x78},
- {0x1f8, 0x4, 0x0, 0x0, 0x4},
- {0xaf20, 0x0, 0x0, 0x0, 0xf0},
- {0xb010, 0x8, 0x0, 0x0, 0x8},
- {0xc00, 0x8, 0x0, 0x0, 0x8},
- {0x1f8, 0x8, 0x0, 0x0, 0x8},
- {0xac0, 0x8, 0x0, 0x0, 0x8},
- {0x2578, 0x8, 0x0, 0x0, 0x8},
- {0x24f8, 0x8, 0x0, 0x0, 0x8},
- {0x0, 0x8, 0x0, 0x0, 0x8},
- {0x400, 0x18, 0x8, 0x0, 0x8},
- {0xb78, 0x18, 0x8, 0x0, 0x2},
- {0xd898, 0x50, 0x0, 0x0, 0x3c},
- {0x12908, 0x18, 0x0, 0x0, 0x10},
- {0x11aa8, 0x40, 0x0, 0x0, 0x18},
- {0xa588, 0x50, 0x0, 0x0, 0x20},
- {0x8f00, 0x40, 0x0, 0x0, 0x28},
- {0x10e30, 0x18, 0x0, 0x0, 0x10},
- {0xde48, 0x48, 0x0, 0x0, 0x38},
- {0x11298, 0x20, 0x0, 0x0, 0x20},
- {0x40c8, 0x80, 0x0, 0x0, 0x10},
- {0x5048, 0x10, 0x0, 0x0, 0x10},
- {0xc748, 0x8, 0x0, 0x0, 0x1},
- {0xa928, 0x8, 0x0, 0x0, 0x1},
- {0x11a30, 0x8, 0x0, 0x0, 0x1},
- {0xf030, 0x8, 0x0, 0x0, 0x1},
- {0x13028, 0x8, 0x0, 0x0, 0x1},
- {0x12c58, 0x8, 0x0, 0x0, 0x1},
- {0xc9b8, 0x30, 0x0, 0x0, 0x10},
- {0xed90, 0x28, 0x0, 0x0, 0x28},
- {0xad20, 0x18, 0x0, 0x0, 0x18},
- {0xaea0, 0x8, 0x0, 0x0, 0x8},
- {0x13c38, 0x8, 0x0, 0x0, 0x8},
- {0x13c50, 0x18, 0x0, 0x0, 0x18},
+#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \
+ (IRO[66].base + ((roce_pf_id) * IRO[66].m1))
+#define USTORM_ROCE_CQE_STATS_SIZE (IRO[66].size)
+
+/* IRO Array */
+static const u32 iro_arr[] = {
+ 0x00000000, 0x00000000, 0x00080000,
+ 0x00003288, 0x00000088, 0x00880000,
+ 0x000058e8, 0x00000020, 0x00200000,
+ 0x00000b00, 0x00000008, 0x00040000,
+ 0x00000a80, 0x00000008, 0x00040000,
+ 0x00000000, 0x00000008, 0x00020000,
+ 0x00000080, 0x00000008, 0x00040000,
+ 0x00000084, 0x00000008, 0x00020000,
+ 0x00005718, 0x00000004, 0x00040000,
+ 0x00004dd0, 0x00000000, 0x00780000,
+ 0x00003e40, 0x00000000, 0x00780000,
+ 0x00004480, 0x00000000, 0x00780000,
+ 0x00003210, 0x00000000, 0x00780000,
+ 0x00003b50, 0x00000000, 0x00780000,
+ 0x00007f58, 0x00000000, 0x00780000,
+ 0x00005f58, 0x00000000, 0x00080000,
+ 0x00007100, 0x00000000, 0x00080000,
+ 0x0000aea0, 0x00000000, 0x00080000,
+ 0x00004398, 0x00000000, 0x00080000,
+ 0x0000a5a0, 0x00000000, 0x00080000,
+ 0x0000bde8, 0x00000000, 0x00080000,
+ 0x00000020, 0x00000004, 0x00040000,
+ 0x000056c8, 0x00000010, 0x00100000,
+ 0x0000c210, 0x00000030, 0x00300000,
+ 0x0000b088, 0x00000038, 0x00380000,
+ 0x00003d20, 0x00000080, 0x00400000,
+ 0x0000bf60, 0x00000000, 0x00040000,
+ 0x00004560, 0x00040080, 0x00040000,
+ 0x000001f8, 0x00000004, 0x00040000,
+ 0x00003d60, 0x00000080, 0x00200000,
+ 0x00008960, 0x00000040, 0x00300000,
+ 0x0000e840, 0x00000060, 0x00600000,
+ 0x00004618, 0x00000080, 0x00380000,
+ 0x00010738, 0x000000c0, 0x00c00000,
+ 0x000001f8, 0x00000002, 0x00020000,
+ 0x0000a2a0, 0x00000000, 0x01080000,
+ 0x0000a3a8, 0x00000008, 0x00080000,
+ 0x000001c0, 0x00000008, 0x00080000,
+ 0x000001f8, 0x00000008, 0x00080000,
+ 0x00000ac0, 0x00000008, 0x00080000,
+ 0x00002578, 0x00000008, 0x00080000,
+ 0x000024f8, 0x00000008, 0x00080000,
+ 0x00000280, 0x00000008, 0x00080000,
+ 0x00000680, 0x00080018, 0x00080000,
+ 0x00000b78, 0x00080018, 0x00020000,
+ 0x0000c640, 0x00000050, 0x003c0000,
+ 0x00012038, 0x00000018, 0x00100000,
+ 0x00011b00, 0x00000040, 0x00180000,
+ 0x000095d0, 0x00000050, 0x00200000,
+ 0x00008b10, 0x00000040, 0x00280000,
+ 0x00011640, 0x00000018, 0x00100000,
+ 0x0000c828, 0x00000048, 0x00380000,
+ 0x00011710, 0x00000020, 0x00200000,
+ 0x00004650, 0x00000080, 0x00100000,
+ 0x00003618, 0x00000010, 0x00100000,
+ 0x0000a968, 0x00000008, 0x00010000,
+ 0x000097a0, 0x00000008, 0x00010000,
+ 0x00011990, 0x00000008, 0x00010000,
+ 0x0000f018, 0x00000008, 0x00010000,
+ 0x00012628, 0x00000008, 0x00010000,
+ 0x00011da8, 0x00000008, 0x00010000,
+ 0x0000aa78, 0x00000030, 0x00100000,
+ 0x0000d768, 0x00000028, 0x00280000,
+ 0x00009a58, 0x00000018, 0x00180000,
+ 0x00009bd8, 0x00000008, 0x00080000,
+ 0x00013a18, 0x00000008, 0x00080000,
+ 0x000126e8, 0x00000018, 0x00180000,
+ 0x0000e608, 0x00500288, 0x00100000,
+ 0x00012970, 0x00000138, 0x00280000,
};
/* Runtime array offsets */
-#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
-#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
-#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
-#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
-#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
-#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
-#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
-#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
-#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
-#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
-#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
-#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
-#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
-#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
-#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
-#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
-#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
-#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
-#define DORQ_REG_GLB_MAX_ICID_0_RT_OFFSET 18
-#define DORQ_REG_GLB_MAX_ICID_1_RT_OFFSET 19
-#define DORQ_REG_GLB_RANGE2CONN_TYPE_0_RT_OFFSET 20
-#define DORQ_REG_GLB_RANGE2CONN_TYPE_1_RT_OFFSET 21
-#define DORQ_REG_PRV_PF_MAX_ICID_2_RT_OFFSET 22
-#define DORQ_REG_PRV_PF_MAX_ICID_3_RT_OFFSET 23
-#define DORQ_REG_PRV_PF_MAX_ICID_4_RT_OFFSET 24
-#define DORQ_REG_PRV_PF_MAX_ICID_5_RT_OFFSET 25
-#define DORQ_REG_PRV_VF_MAX_ICID_2_RT_OFFSET 26
-#define DORQ_REG_PRV_VF_MAX_ICID_3_RT_OFFSET 27
-#define DORQ_REG_PRV_VF_MAX_ICID_4_RT_OFFSET 28
-#define DORQ_REG_PRV_VF_MAX_ICID_5_RT_OFFSET 29
-#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_2_RT_OFFSET 30
-#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_3_RT_OFFSET 31
-#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_4_RT_OFFSET 32
-#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_5_RT_OFFSET 33
-#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_2_RT_OFFSET 34
-#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_3_RT_OFFSET 35
-#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_4_RT_OFFSET 36
-#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_5_RT_OFFSET 37
-#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 38
-#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 39
-#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 40
-#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 41
-#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 42
-#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 43
-#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 44
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 45
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 1024
-#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1069
-#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 1024
-#define CAU_REG_PI_MEMORY_RT_OFFSET 2093
-#define CAU_REG_PI_MEMORY_RT_SIZE 4416
-#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6509
-#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6510
-#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6511
-#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6512
-#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6513
-#define PRS_REG_SEARCH_TCP_RT_OFFSET 6514
-#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6515
-#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6516
-#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6517
-#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6518
-#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6519
-#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6520
-#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6521
-#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6522
-#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6523
-#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6524
-#define SRC_REG_FIRSTFREE_RT_OFFSET 6525
-#define SRC_REG_FIRSTFREE_RT_SIZE 2
-#define SRC_REG_LASTFREE_RT_OFFSET 6527
-#define SRC_REG_LASTFREE_RT_SIZE 2
-#define SRC_REG_COUNTFREE_RT_OFFSET 6529
-#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6530
-#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6531
-#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6532
-#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6533
-#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6534
-#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6535
-#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6536
-#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6537
-#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6538
-#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6539
-#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6540
-#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6541
-#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6542
-#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6543
-#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6544
-#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6545
-#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6546
-#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6547
-#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6548
-#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6549
-#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6550
-#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6551
-#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6552
-#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6553
-#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6554
-#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6555
-#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6556
-#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6557
-#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6558
-#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6559
-#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6560
-#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6561
-#define PSWRQ2_REG_TGSRC_FIRST_ILT_RT_OFFSET 6562
-#define PSWRQ2_REG_RGSRC_FIRST_ILT_RT_OFFSET 6563
-#define PSWRQ2_REG_TGSRC_LAST_ILT_RT_OFFSET 6564
-#define PSWRQ2_REG_RGSRC_LAST_ILT_RT_OFFSET 6565
-#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6566
-#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 26414
-#define PGLUE_REG_B_VF_BASE_RT_OFFSET 32980
-#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 32981
-#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 32982
-#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 32983
-#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 32984
-#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 32985
-#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 32986
-#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 32987
-#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 32988
-#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 32989
-#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 32990
-#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 32991
-#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 32992
-#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
-#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 33408
-#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 608
-#define QM_REG_MAXPQSIZE_0_RT_OFFSET 34016
-#define QM_REG_MAXPQSIZE_1_RT_OFFSET 34017
-#define QM_REG_MAXPQSIZE_2_RT_OFFSET 34018
-#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 34019
-#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 34020
-#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 34021
-#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 34022
-#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 34023
-#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 34024
-#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 34025
-#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 34026
-#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 34027
-#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 34028
-#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 34029
-#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 34030
-#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 34031
-#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 34032
-#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 34033
-#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 34034
-#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 34035
-#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 34036
-#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 34037
-#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 34038
-#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 34039
-#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 34040
-#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 34041
-#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 34042
-#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 34043
-#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 34044
-#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 34045
-#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 34046
-#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 34047
-#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 34048
-#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 34049
-#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 34050
-#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 34051
-#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 34052
-#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 34053
-#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 34054
-#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 34055
-#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 34056
-#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 34057
-#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 34058
-#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 34059
-#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 34060
-#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 34061
-#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 34062
-#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 34063
-#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 34064
-#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 34065
-#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 34066
-#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 34067
-#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 34068
-#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 34069
-#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 34070
-#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 34071
-#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 34072
-#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 34073
-#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 34074
-#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 34075
-#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 34076
-#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 34077
-#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 34078
-#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 34079
-#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 34080
-#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 34081
-#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 34082
-#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 34083
-#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
-#define QM_REG_PTRTBLOTHER_RT_OFFSET 34211
-#define QM_REG_PTRTBLOTHER_RT_SIZE 256
-#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 34467
-#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 34468
-#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 34469
-#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 34470
-#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 34471
-#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 34472
-#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 34473
-#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 34474
-#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 34475
-#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 34476
-#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 34477
-#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 34478
-#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 34479
-#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 34480
-#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 34481
-#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 34482
-#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 34483
-#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 34484
-#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 34485
-#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 34486
-#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 34487
-#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 34488
-#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 34489
-#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 34490
-#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 34491
-#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 34492
-#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 34493
-#define QM_REG_PQTX2PF_0_RT_OFFSET 34494
-#define QM_REG_PQTX2PF_1_RT_OFFSET 34495
-#define QM_REG_PQTX2PF_2_RT_OFFSET 34496
-#define QM_REG_PQTX2PF_3_RT_OFFSET 34497
-#define QM_REG_PQTX2PF_4_RT_OFFSET 34498
-#define QM_REG_PQTX2PF_5_RT_OFFSET 34499
-#define QM_REG_PQTX2PF_6_RT_OFFSET 34500
-#define QM_REG_PQTX2PF_7_RT_OFFSET 34501
-#define QM_REG_PQTX2PF_8_RT_OFFSET 34502
-#define QM_REG_PQTX2PF_9_RT_OFFSET 34503
-#define QM_REG_PQTX2PF_10_RT_OFFSET 34504
-#define QM_REG_PQTX2PF_11_RT_OFFSET 34505
-#define QM_REG_PQTX2PF_12_RT_OFFSET 34506
-#define QM_REG_PQTX2PF_13_RT_OFFSET 34507
-#define QM_REG_PQTX2PF_14_RT_OFFSET 34508
-#define QM_REG_PQTX2PF_15_RT_OFFSET 34509
-#define QM_REG_PQTX2PF_16_RT_OFFSET 34510
-#define QM_REG_PQTX2PF_17_RT_OFFSET 34511
-#define QM_REG_PQTX2PF_18_RT_OFFSET 34512
-#define QM_REG_PQTX2PF_19_RT_OFFSET 34513
-#define QM_REG_PQTX2PF_20_RT_OFFSET 34514
-#define QM_REG_PQTX2PF_21_RT_OFFSET 34515
-#define QM_REG_PQTX2PF_22_RT_OFFSET 34516
-#define QM_REG_PQTX2PF_23_RT_OFFSET 34517
-#define QM_REG_PQTX2PF_24_RT_OFFSET 34518
-#define QM_REG_PQTX2PF_25_RT_OFFSET 34519
-#define QM_REG_PQTX2PF_26_RT_OFFSET 34520
-#define QM_REG_PQTX2PF_27_RT_OFFSET 34521
-#define QM_REG_PQTX2PF_28_RT_OFFSET 34522
-#define QM_REG_PQTX2PF_29_RT_OFFSET 34523
-#define QM_REG_PQTX2PF_30_RT_OFFSET 34524
-#define QM_REG_PQTX2PF_31_RT_OFFSET 34525
-#define QM_REG_PQTX2PF_32_RT_OFFSET 34526
-#define QM_REG_PQTX2PF_33_RT_OFFSET 34527
-#define QM_REG_PQTX2PF_34_RT_OFFSET 34528
-#define QM_REG_PQTX2PF_35_RT_OFFSET 34529
-#define QM_REG_PQTX2PF_36_RT_OFFSET 34530
-#define QM_REG_PQTX2PF_37_RT_OFFSET 34531
-#define QM_REG_PQTX2PF_38_RT_OFFSET 34532
-#define QM_REG_PQTX2PF_39_RT_OFFSET 34533
-#define QM_REG_PQTX2PF_40_RT_OFFSET 34534
-#define QM_REG_PQTX2PF_41_RT_OFFSET 34535
-#define QM_REG_PQTX2PF_42_RT_OFFSET 34536
-#define QM_REG_PQTX2PF_43_RT_OFFSET 34537
-#define QM_REG_PQTX2PF_44_RT_OFFSET 34538
-#define QM_REG_PQTX2PF_45_RT_OFFSET 34539
-#define QM_REG_PQTX2PF_46_RT_OFFSET 34540
-#define QM_REG_PQTX2PF_47_RT_OFFSET 34541
-#define QM_REG_PQTX2PF_48_RT_OFFSET 34542
-#define QM_REG_PQTX2PF_49_RT_OFFSET 34543
-#define QM_REG_PQTX2PF_50_RT_OFFSET 34544
-#define QM_REG_PQTX2PF_51_RT_OFFSET 34545
-#define QM_REG_PQTX2PF_52_RT_OFFSET 34546
-#define QM_REG_PQTX2PF_53_RT_OFFSET 34547
-#define QM_REG_PQTX2PF_54_RT_OFFSET 34548
-#define QM_REG_PQTX2PF_55_RT_OFFSET 34549
-#define QM_REG_PQTX2PF_56_RT_OFFSET 34550
-#define QM_REG_PQTX2PF_57_RT_OFFSET 34551
-#define QM_REG_PQTX2PF_58_RT_OFFSET 34552
-#define QM_REG_PQTX2PF_59_RT_OFFSET 34553
-#define QM_REG_PQTX2PF_60_RT_OFFSET 34554
-#define QM_REG_PQTX2PF_61_RT_OFFSET 34555
-#define QM_REG_PQTX2PF_62_RT_OFFSET 34556
-#define QM_REG_PQTX2PF_63_RT_OFFSET 34557
-#define QM_REG_PQOTHER2PF_0_RT_OFFSET 34558
-#define QM_REG_PQOTHER2PF_1_RT_OFFSET 34559
-#define QM_REG_PQOTHER2PF_2_RT_OFFSET 34560
-#define QM_REG_PQOTHER2PF_3_RT_OFFSET 34561
-#define QM_REG_PQOTHER2PF_4_RT_OFFSET 34562
-#define QM_REG_PQOTHER2PF_5_RT_OFFSET 34563
-#define QM_REG_PQOTHER2PF_6_RT_OFFSET 34564
-#define QM_REG_PQOTHER2PF_7_RT_OFFSET 34565
-#define QM_REG_PQOTHER2PF_8_RT_OFFSET 34566
-#define QM_REG_PQOTHER2PF_9_RT_OFFSET 34567
-#define QM_REG_PQOTHER2PF_10_RT_OFFSET 34568
-#define QM_REG_PQOTHER2PF_11_RT_OFFSET 34569
-#define QM_REG_PQOTHER2PF_12_RT_OFFSET 34570
-#define QM_REG_PQOTHER2PF_13_RT_OFFSET 34571
-#define QM_REG_PQOTHER2PF_14_RT_OFFSET 34572
-#define QM_REG_PQOTHER2PF_15_RT_OFFSET 34573
-#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 34574
-#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 34575
-#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 34576
-#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 34577
-#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 34578
-#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 34579
-#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 34580
-#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 34581
-#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 34582
-#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 34583
-#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 34584
-#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 34585
-#define QM_REG_RLGLBLINCVAL_RT_OFFSET 34586
-#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
-#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 34842
-#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
-#define QM_REG_RLGLBLCRD_RT_OFFSET 35098
-#define QM_REG_RLGLBLCRD_RT_SIZE 256
-#define QM_REG_RLGLBLENABLE_RT_OFFSET 35354
-#define QM_REG_RLPFPERIOD_RT_OFFSET 35355
-#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 35356
-#define QM_REG_RLPFINCVAL_RT_OFFSET 35357
-#define QM_REG_RLPFINCVAL_RT_SIZE 16
-#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 35373
-#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_RLPFCRD_RT_OFFSET 35389
-#define QM_REG_RLPFCRD_RT_SIZE 16
-#define QM_REG_RLPFENABLE_RT_OFFSET 35405
-#define QM_REG_RLPFVOQENABLE_RT_OFFSET 35406
-#define QM_REG_WFQPFWEIGHT_RT_OFFSET 35407
-#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
-#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 35423
-#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_WFQPFCRD_RT_OFFSET 35439
-#define QM_REG_WFQPFCRD_RT_SIZE 256
-#define QM_REG_WFQPFENABLE_RT_OFFSET 35695
-#define QM_REG_WFQVPENABLE_RT_OFFSET 35696
-#define QM_REG_BASEADDRTXPQ_RT_OFFSET 35697
-#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
-#define QM_REG_TXPQMAP_RT_OFFSET 36209
-#define QM_REG_TXPQMAP_RT_SIZE 512
-#define QM_REG_WFQVPWEIGHT_RT_OFFSET 36721
-#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
-#define QM_REG_WFQVPCRD_RT_OFFSET 37233
-#define QM_REG_WFQVPCRD_RT_SIZE 512
-#define QM_REG_WFQVPMAP_RT_OFFSET 37745
-#define QM_REG_WFQVPMAP_RT_SIZE 512
-#define QM_REG_PTRTBLTX_RT_OFFSET 38257
-#define QM_REG_PTRTBLTX_RT_SIZE 1024
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 39281
-#define QM_REG_WFQPFCRD_MSB_RT_SIZE 320
-#define QM_REG_VOQCRDLINE_RT_OFFSET 39601
-#define QM_REG_VOQCRDLINE_RT_SIZE 36
-#define QM_REG_VOQINITCRDLINE_RT_OFFSET 39637
-#define QM_REG_VOQINITCRDLINE_RT_SIZE 36
-#define QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET 39673
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 39674
-#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 39675
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 39676
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 39677
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 39678
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 39679
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 39680
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 39681
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 39685
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 39689
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 39721
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 39737
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 39753
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 39769
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 39785
-#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 39786
-#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 39794
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE 1024
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 40818
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE 512
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 41330
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE 512
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 41842
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 512
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 42354
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE 512
-#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 42866
-#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE 32
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 42898
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 42899
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 42900
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 42901
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 42902
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 42903
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 42904
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 42905
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 42906
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 42907
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 42908
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 42909
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 42910
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 42911
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 42912
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 42913
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 42914
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 42915
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 42916
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 42917
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 42918
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 42919
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 42920
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 42921
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 42922
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 42923
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 42924
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 42925
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 42926
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 42927
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 42928
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 42929
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 42930
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 42931
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 42932
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 42933
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 42934
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 42935
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 42936
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 42937
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 42938
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 42939
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 42940
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 42941
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 42942
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 42943
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 42944
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 42945
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 42946
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 42947
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 42948
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 42949
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 42950
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 42951
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 42952
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 42953
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 42954
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 42955
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 42956
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 42957
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 42958
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 42959
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 42960
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 42961
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 42962
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 42963
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 42964
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 42965
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 42966
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 42967
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 42968
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 42969
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 42970
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 42971
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 42972
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 42973
-#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 42974
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 42975
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 42976
-#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 42977
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 42978
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 42979
-#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 42980
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 42981
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 42982
-#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 42983
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 42984
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 42985
-#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 42986
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 42987
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 42988
-#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 42989
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 42990
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 42991
-#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 42992
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 42993
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 42994
-#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 42995
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 42996
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 42997
-#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 42998
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 42999
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 43000
-#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 43001
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 43002
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 43003
-#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 43004
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 43005
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 43006
-#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 43007
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 43008
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 43009
-#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 43010
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 43011
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 43012
-#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 43013
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 43014
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 43015
-#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 43016
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 43017
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 43018
-#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 43019
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 43020
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 43021
-
-#define RUNTIME_ARRAY_SIZE 43022
-
+#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
+#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
+#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
+#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
+#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
+#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
+#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
+#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
+#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
+#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
+#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
+#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
+#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
+#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
+#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
+#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
+#define DORQ_REG_VF_ICID_BIT_SHIFT_NORM_RT_OFFSET 16
+#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 17
+#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 18
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 19
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 20
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 21
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 22
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 23
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 24
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 25
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 26
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 762
+#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
+#define CAU_REG_PI_MEMORY_RT_OFFSET 1498
+#define CAU_REG_PI_MEMORY_RT_SIZE 4416
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 5914
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 5915
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 5916
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 5917
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 5918
+#define PRS_REG_SEARCH_TCP_RT_OFFSET 5919
+#define PRS_REG_SEARCH_FCOE_RT_OFFSET 5920
+#define PRS_REG_SEARCH_ROCE_RT_OFFSET 5921
+#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 5922
+#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 5923
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 5924
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 5925
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 5926
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 5927
+#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 5928
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 5929
+#define SRC_REG_FIRSTFREE_RT_OFFSET 5930
+#define SRC_REG_FIRSTFREE_RT_SIZE 2
+#define SRC_REG_LASTFREE_RT_OFFSET 5932
+#define SRC_REG_LASTFREE_RT_SIZE 2
+#define SRC_REG_COUNTFREE_RT_OFFSET 5934
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 5935
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 5936
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 5937
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 5938
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 5939
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 5940
+#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 5941
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 5942
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 5943
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 5944
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 5945
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 5946
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 5947
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 5948
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 5949
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 5950
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 5951
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 5952
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 5953
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 5954
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 5955
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 5956
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 5957
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 5958
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 5959
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 5960
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 5961
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 5962
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET 5963
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 5964
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 5965
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 5966
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 5967
+#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET 27967
+#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 27968
+#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 27969
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 27970
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 27971
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 27972
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 27973
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 27974
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 27975
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 27976
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 27977
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 27978
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 27979
+#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 28395
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 28907
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 28908
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 28909
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 28910
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 28911
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 28912
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 28913
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 28914
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 28915
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 28916
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 28917
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 28918
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 28919
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 28920
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 28921
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 28922
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 28923
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 28924
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 28925
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 28926
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 28927
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 28928
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 28929
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 28930
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 28931
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 28932
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 28933
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 28934
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 28935
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 28936
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 28937
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 28938
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 28939
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 28940
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 28941
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 28942
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 28943
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 28944
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 28945
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 28946
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 28947
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 28948
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 28949
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 28950
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 28951
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 28952
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 28953
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 28954
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 28955
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 28956
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 28957
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 28958
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 28959
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 28960
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 28961
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 28962
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 28963
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 28964
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 28965
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 28966
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 28967
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 28968
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 28969
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 28970
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 28971
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 28972
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 28973
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 28974
+#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
+#define QM_REG_PTRTBLOTHER_RT_OFFSET 29102
+#define QM_REG_PTRTBLOTHER_RT_SIZE 256
+#define QM_REG_VOQCRDLINE_RT_OFFSET 29358
+#define QM_REG_VOQCRDLINE_RT_SIZE 20
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29378
+#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29398
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29399
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29400
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29401
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29402
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29403
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29404
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29405
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29406
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29407
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29408
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29409
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29410
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29411
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29412
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29413
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29414
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29415
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29416
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29417
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29418
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29419
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29420
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29421
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29422
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29423
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29424
+#define QM_REG_PQTX2PF_0_RT_OFFSET 29425
+#define QM_REG_PQTX2PF_1_RT_OFFSET 29426
+#define QM_REG_PQTX2PF_2_RT_OFFSET 29427
+#define QM_REG_PQTX2PF_3_RT_OFFSET 29428
+#define QM_REG_PQTX2PF_4_RT_OFFSET 29429
+#define QM_REG_PQTX2PF_5_RT_OFFSET 29430
+#define QM_REG_PQTX2PF_6_RT_OFFSET 29431
+#define QM_REG_PQTX2PF_7_RT_OFFSET 29432
+#define QM_REG_PQTX2PF_8_RT_OFFSET 29433
+#define QM_REG_PQTX2PF_9_RT_OFFSET 29434
+#define QM_REG_PQTX2PF_10_RT_OFFSET 29435
+#define QM_REG_PQTX2PF_11_RT_OFFSET 29436
+#define QM_REG_PQTX2PF_12_RT_OFFSET 29437
+#define QM_REG_PQTX2PF_13_RT_OFFSET 29438
+#define QM_REG_PQTX2PF_14_RT_OFFSET 29439
+#define QM_REG_PQTX2PF_15_RT_OFFSET 29440
+#define QM_REG_PQTX2PF_16_RT_OFFSET 29441
+#define QM_REG_PQTX2PF_17_RT_OFFSET 29442
+#define QM_REG_PQTX2PF_18_RT_OFFSET 29443
+#define QM_REG_PQTX2PF_19_RT_OFFSET 29444
+#define QM_REG_PQTX2PF_20_RT_OFFSET 29445
+#define QM_REG_PQTX2PF_21_RT_OFFSET 29446
+#define QM_REG_PQTX2PF_22_RT_OFFSET 29447
+#define QM_REG_PQTX2PF_23_RT_OFFSET 29448
+#define QM_REG_PQTX2PF_24_RT_OFFSET 29449
+#define QM_REG_PQTX2PF_25_RT_OFFSET 29450
+#define QM_REG_PQTX2PF_26_RT_OFFSET 29451
+#define QM_REG_PQTX2PF_27_RT_OFFSET 29452
+#define QM_REG_PQTX2PF_28_RT_OFFSET 29453
+#define QM_REG_PQTX2PF_29_RT_OFFSET 29454
+#define QM_REG_PQTX2PF_30_RT_OFFSET 29455
+#define QM_REG_PQTX2PF_31_RT_OFFSET 29456
+#define QM_REG_PQTX2PF_32_RT_OFFSET 29457
+#define QM_REG_PQTX2PF_33_RT_OFFSET 29458
+#define QM_REG_PQTX2PF_34_RT_OFFSET 29459
+#define QM_REG_PQTX2PF_35_RT_OFFSET 29460
+#define QM_REG_PQTX2PF_36_RT_OFFSET 29461
+#define QM_REG_PQTX2PF_37_RT_OFFSET 29462
+#define QM_REG_PQTX2PF_38_RT_OFFSET 29463
+#define QM_REG_PQTX2PF_39_RT_OFFSET 29464
+#define QM_REG_PQTX2PF_40_RT_OFFSET 29465
+#define QM_REG_PQTX2PF_41_RT_OFFSET 29466
+#define QM_REG_PQTX2PF_42_RT_OFFSET 29467
+#define QM_REG_PQTX2PF_43_RT_OFFSET 29468
+#define QM_REG_PQTX2PF_44_RT_OFFSET 29469
+#define QM_REG_PQTX2PF_45_RT_OFFSET 29470
+#define QM_REG_PQTX2PF_46_RT_OFFSET 29471
+#define QM_REG_PQTX2PF_47_RT_OFFSET 29472
+#define QM_REG_PQTX2PF_48_RT_OFFSET 29473
+#define QM_REG_PQTX2PF_49_RT_OFFSET 29474
+#define QM_REG_PQTX2PF_50_RT_OFFSET 29475
+#define QM_REG_PQTX2PF_51_RT_OFFSET 29476
+#define QM_REG_PQTX2PF_52_RT_OFFSET 29477
+#define QM_REG_PQTX2PF_53_RT_OFFSET 29478
+#define QM_REG_PQTX2PF_54_RT_OFFSET 29479
+#define QM_REG_PQTX2PF_55_RT_OFFSET 29480
+#define QM_REG_PQTX2PF_56_RT_OFFSET 29481
+#define QM_REG_PQTX2PF_57_RT_OFFSET 29482
+#define QM_REG_PQTX2PF_58_RT_OFFSET 29483
+#define QM_REG_PQTX2PF_59_RT_OFFSET 29484
+#define QM_REG_PQTX2PF_60_RT_OFFSET 29485
+#define QM_REG_PQTX2PF_61_RT_OFFSET 29486
+#define QM_REG_PQTX2PF_62_RT_OFFSET 29487
+#define QM_REG_PQTX2PF_63_RT_OFFSET 29488
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29489
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29490
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29491
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29492
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29493
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29494
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29495
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29496
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29497
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29498
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29499
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29500
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29501
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29502
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29503
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29504
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29505
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29506
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29507
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29508
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29509
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29510
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29511
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29512
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29513
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29514
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29515
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29516
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29517
+#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 29773
+#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
+#define QM_REG_RLGLBLCRD_RT_OFFSET 30029
+#define QM_REG_RLGLBLCRD_RT_SIZE 256
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 30285
+#define QM_REG_RLPFPERIOD_RT_OFFSET 30286
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30287
+#define QM_REG_RLPFINCVAL_RT_OFFSET 30288
+#define QM_REG_RLPFINCVAL_RT_SIZE 16
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30304
+#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_RLPFCRD_RT_OFFSET 30320
+#define QM_REG_RLPFCRD_RT_SIZE 16
+#define QM_REG_RLPFENABLE_RT_OFFSET 30336
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30337
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30338
+#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30354
+#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_WFQPFCRD_RT_OFFSET 30370
+#define QM_REG_WFQPFCRD_RT_SIZE 160
+#define QM_REG_WFQPFENABLE_RT_OFFSET 30530
+#define QM_REG_WFQVPENABLE_RT_OFFSET 30531
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 30532
+#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
+#define QM_REG_TXPQMAP_RT_OFFSET 31044
+#define QM_REG_TXPQMAP_RT_SIZE 512
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 31556
+#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
+#define QM_REG_WFQVPCRD_RT_OFFSET 32068
+#define QM_REG_WFQVPCRD_RT_SIZE 512
+#define QM_REG_WFQVPMAP_RT_OFFSET 32580
+#define QM_REG_WFQVPMAP_RT_SIZE 512
+#define QM_REG_PTRTBLTX_RT_OFFSET 33092
+#define QM_REG_PTRTBLTX_RT_SIZE 1024
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34116
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34276
+#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 34277
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34278
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34279
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34280
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34281
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34282
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34283
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34287
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34291
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34323
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34339
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34355
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34371
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34387
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 34388
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34396
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34397
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34398
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34399
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34400
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34401
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34402
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34403
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34404
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34405
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34406
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34407
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34408
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34409
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34410
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34411
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34412
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34413
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34414
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34415
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34416
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34417
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34418
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34419
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34420
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34421
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34422
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34423
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34424
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34425
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34426
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34427
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34428
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34429
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34430
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34431
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34432
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34433
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34434
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34435
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34436
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34437
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34438
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34439
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34440
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34441
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34442
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34443
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34444
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34445
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34446
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34447
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34448
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34449
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34450
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34451
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34452
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34453
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34454
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34455
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34456
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34457
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34458
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34459
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34460
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34461
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34462
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34463
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34464
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34465
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34466
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34467
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34468
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34469
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34470
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34471
+
+#define RUNTIME_ARRAY_SIZE 34472
/* Init Callbacks */
#define DMAE_READY_CB 0
@@ -5648,9 +5450,9 @@ struct e4_eth_conn_context {
struct pstorm_eth_conn_st_ctx pstorm_st_context;
struct xstorm_eth_conn_st_ctx xstorm_st_context;
struct e4_xstorm_eth_conn_ag_ctx xstorm_ag_context;
+ struct e4_tstorm_eth_conn_ag_ctx tstorm_ag_context;
struct ystorm_eth_conn_st_ctx ystorm_st_context;
struct e4_ystorm_eth_conn_ag_ctx ystorm_ag_context;
- struct e4_tstorm_eth_conn_ag_ctx tstorm_ag_context;
struct e4_ustorm_eth_conn_ag_ctx ustorm_ag_context;
struct ustorm_eth_conn_st_ctx ustorm_st_context;
struct mstorm_eth_conn_st_ctx mstorm_st_context;
@@ -5680,6 +5482,16 @@ enum eth_error_code {
ETH_FILTERS_VNI_ADD_FAIL_FULL,
ETH_FILTERS_VNI_ADD_FAIL_DUP,
ETH_FILTERS_GFT_UPDATE_FAIL,
+ ETH_RX_QUEUE_FAIL_LOAD_VF_DATA,
+ ETH_FILTERS_GFS_ADD_FILTER_FAIL_MAX_HOPS,
+ ETH_FILTERS_GFS_ADD_FILTER_FAIL_NO_FREE_ENRTY,
+ ETH_FILTERS_GFS_ADD_FILTER_FAIL_ALREADY_EXISTS,
+ ETH_FILTERS_GFS_ADD_FILTER_FAIL_PCI_ERROR,
+ ETH_FILTERS_GFS_ADD_FINLER_FAIL_MAGIC_NUM_ERROR,
+ ETH_FILTERS_GFS_DEL_FILTER_FAIL_MAX_HOPS,
+ ETH_FILTERS_GFS_DEL_FILTER_FAIL_NO_MATCH_ENRTY,
+ ETH_FILTERS_GFS_DEL_FILTER_FAIL_PCI_ERROR,
+ ETH_FILTERS_GFS_DEL_FILTER_FAIL_MAGIC_NUM_ERROR,
MAX_ETH_ERROR_CODE
};
@@ -5703,6 +5515,11 @@ enum eth_event_opcode {
ETH_EVENT_RX_CREATE_GFT_ACTION,
ETH_EVENT_RX_GFT_UPDATE_FILTER,
ETH_EVENT_TX_QUEUE_UPDATE,
+ ETH_EVENT_RGFS_ADD_FILTER,
+ ETH_EVENT_RGFS_DEL_FILTER,
+ ETH_EVENT_TGFS_ADD_FILTER,
+ ETH_EVENT_TGFS_DEL_FILTER,
+ ETH_EVENT_GFS_COUNTERS_REPORT_REQUEST,
MAX_ETH_EVENT_OPCODE
};
@@ -5795,18 +5612,31 @@ enum eth_ramrod_cmd_id {
ETH_RAMROD_RX_CREATE_GFT_ACTION,
ETH_RAMROD_GFT_UPDATE_FILTER,
ETH_RAMROD_TX_QUEUE_UPDATE,
+ ETH_RAMROD_RGFS_FILTER_ADD,
+ ETH_RAMROD_RGFS_FILTER_DEL,
+ ETH_RAMROD_TGFS_FILTER_ADD,
+ ETH_RAMROD_TGFS_FILTER_DEL,
+ ETH_RAMROD_GFS_COUNTERS_REPORT_REQUEST,
MAX_ETH_RAMROD_CMD_ID
};
/* Return code from eth sp ramrods */
struct eth_return_code {
u8 value;
-#define ETH_RETURN_CODE_ERR_CODE_MASK 0x1F
-#define ETH_RETURN_CODE_ERR_CODE_SHIFT 0
-#define ETH_RETURN_CODE_RESERVED_MASK 0x3
-#define ETH_RETURN_CODE_RESERVED_SHIFT 5
-#define ETH_RETURN_CODE_RX_TX_MASK 0x1
-#define ETH_RETURN_CODE_RX_TX_SHIFT 7
+#define ETH_RETURN_CODE_ERR_CODE_MASK 0x3F
+#define ETH_RETURN_CODE_ERR_CODE_SHIFT 0
+#define ETH_RETURN_CODE_RESERVED_MASK 0x1
+#define ETH_RETURN_CODE_RESERVED_SHIFT 6
+#define ETH_RETURN_CODE_RX_TX_MASK 0x1
+#define ETH_RETURN_CODE_RX_TX_SHIFT 7
+};
+
+/* tx destination enum */
+enum eth_tx_dst_mode_config_enum {
+ ETH_TX_DST_MODE_CONFIG_DISABLE,
+ ETH_TX_DST_MODE_CONFIG_FORWARD_DATA_IN_BD,
+ ETH_TX_DST_MODE_CONFIG_FORWARD_DATA_IN_VPORT,
+ MAX_ETH_TX_DST_MODE_CONFIG_ENUM
};
/* What to do in case an error occurs */
@@ -5833,8 +5663,10 @@ struct eth_tx_err_vals {
#define ETH_TX_ERR_VALS_MTU_VIOLATION_SHIFT 5
#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_MASK 0x1
#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_SHIFT 6
-#define ETH_TX_ERR_VALS_RESERVED_MASK 0x1FF
-#define ETH_TX_ERR_VALS_RESERVED_SHIFT 7
+#define ETH_TX_ERR_VALS_ILLEGAL_BD_FLAGS_MASK 0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_BD_FLAGS_SHIFT 7
+#define ETH_TX_ERR_VALS_RESERVED_MASK 0xFF
+#define ETH_TX_ERR_VALS_RESERVED_SHIFT 8
};
/* vport rss configuration data */
@@ -5864,7 +5696,6 @@ struct eth_vport_rss_config {
u8 tbl_size;
__le32 reserved2[2];
__le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM];
-
__le32 rss_key[ETH_RSS_KEY_SIZE_REGS];
__le32 reserved3[2];
};
@@ -6066,7 +5897,7 @@ struct rx_update_gft_filter_data {
u8 inner_vlan_removal_en;
};
-/* Ramrod data for rx queue start ramrod */
+/* Ramrod data for tx queue start ramrod */
struct tx_queue_start_ramrod_data {
__le16 sb_id;
u8 sb_index;
@@ -6079,16 +5910,14 @@ struct tx_queue_start_ramrod_data {
#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0
#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK 0x1
#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1
-#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK 0x1
-#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT 2
#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_MASK 0x1
-#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT 3
+#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT 2
#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_MASK 0x1
-#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT 4
+#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT 3
#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_MASK 0x1
-#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT 5
-#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK 0x3
-#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT 6
+#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT 4
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK 0x7
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT 5
u8 pxp_st_hint;
u8 pxp_tph_valid_bd;
u8 pxp_tph_valid_pkt;
@@ -6144,18 +5973,22 @@ struct vport_start_ramrod_data {
__le16 default_vlan;
u8 tx_switching_en;
u8 anti_spoofing_en;
-
u8 default_vlan_en;
-
u8 handle_ptp_pkts;
u8 silent_vlan_removal_en;
u8 untagged;
struct eth_tx_err_vals tx_err_behav;
-
u8 zero_placement_offset;
u8 ctl_frame_mac_check_en;
u8 ctl_frame_ethtype_check_en;
+ u8 reserved0;
+ u8 reserved1;
+ u8 tx_dst_port_mode_config;
+ u8 dst_vport_id;
+ u8 tx_dst_port_mode;
+ u8 dst_vport_id_valid;
u8 wipe_inner_vlan_pri_en;
+ u8 reserved2[2];
struct eth_in_to_in_pri_map_cfg in_to_in_vlan_pri_map_cfg;
};
@@ -6715,19 +6548,6 @@ struct e4_xstorm_eth_hw_conn_ag_ctx {
__le16 conn_dpi;
};
-/* GFT CAM line struct */
-struct gft_cam_line {
- __le32 camline;
-#define GFT_CAM_LINE_VALID_MASK 0x1
-#define GFT_CAM_LINE_VALID_SHIFT 0
-#define GFT_CAM_LINE_DATA_MASK 0x3FFF
-#define GFT_CAM_LINE_DATA_SHIFT 1
-#define GFT_CAM_LINE_MASK_BITS_MASK 0x3FFF
-#define GFT_CAM_LINE_MASK_BITS_SHIFT 15
-#define GFT_CAM_LINE_RESERVED1_MASK 0x7
-#define GFT_CAM_LINE_RESERVED1_SHIFT 29
-};
-
/* GFT CAM line struct with fields breakout */
struct gft_cam_line_mapped {
__le32 camline;
@@ -6757,10 +6577,6 @@ struct gft_cam_line_mapped {
#define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT 29
};
-union gft_cam_line_union {
- struct gft_cam_line cam_line;
- struct gft_cam_line_mapped cam_line_mapped;
-};
/* Used in gft_profile_key: Indication for ip version */
enum gft_profile_ip_version {
@@ -7039,6 +6855,11 @@ struct mstorm_rdma_task_st_ctx {
struct regpair temp[4];
};
+/* The roce task context of Ustorm */
+struct ustorm_rdma_task_st_ctx {
+ struct regpair temp[6];
+};
+
struct e4_ustorm_rdma_task_ag_ctx {
u8 reserved;
u8 state;
@@ -7048,8 +6869,8 @@ struct e4_ustorm_rdma_task_ag_ctx {
#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_SHIFT 5
+#define E4_USTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6
u8 flags1;
@@ -7079,29 +6900,29 @@ struct e4_ustorm_rdma_task_ag_ctx {
#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 0
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 2
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_SHIFT 0
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_SHIFT 2
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
__le32 dif_err_intervals;
__le32 dif_error_1st_interval;
- __le32 sq_cons;
- __le32 dif_runt_value;
+ __le32 dif_rxmit_cons;
+ __le32 dif_rxmit_prod;
__le32 sge_index;
- __le32 reg5;
+ __le32 sq_cons;
u8 byte2;
u8 byte3;
- __le16 word1;
- __le16 word2;
+ __le16 dif_write_cons;
+ __le16 dif_write_prod;
__le16 word3;
- __le32 reg6;
- __le32 reg7;
+ __le32 dif_error_buffer_address_lo;
+ __le32 dif_error_buffer_address_hi;
};
/* RDMA task context */
@@ -7112,6 +6933,8 @@ struct e4_rdma_task_context {
struct e4_mstorm_rdma_task_ag_ctx mstorm_ag_context;
struct mstorm_rdma_task_st_ctx mstorm_st_context;
struct rdif_task_context rdif_context;
+ struct ustorm_rdma_task_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2];
struct e4_ustorm_rdma_task_ag_ctx ustorm_ag_context;
};
@@ -7147,7 +6970,12 @@ struct rdma_create_cq_ramrod_data {
u8 pbl_log_page_size;
u8 toggle_bit;
__le16 int_timeout;
- __le16 reserved1;
+ u8 vf_id;
+ u8 flags;
+#define RDMA_CREATE_CQ_RAMROD_DATA_VF_ID_VALID_MASK 0x1
+#define RDMA_CREATE_CQ_RAMROD_DATA_VF_ID_VALID_SHIFT 0
+#define RDMA_CREATE_CQ_RAMROD_DATA_RESERVED1_MASK 0x7F
+#define RDMA_CREATE_CQ_RAMROD_DATA_RESERVED1_SHIFT 1
};
/* rdma deregister tid ramrod data */
@@ -7191,6 +7019,7 @@ enum rdma_fw_return_code {
RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR,
RDMA_RETURN_RESIZE_CQ_ERR,
RDMA_RETURN_NIG_DRAIN_REQ,
+ RDMA_RETURN_GENERAL_ERR,
MAX_RDMA_FW_RETURN_CODE
};
@@ -7204,7 +7033,10 @@ struct rdma_init_func_hdr {
u8 relaxed_ordering;
__le16 first_reg_srq_id;
__le32 reg_srq_base_addr;
- __le32 reserved;
+ u8 searcher_mode;
+ u8 pvrdma_mode;
+ u8 max_num_ns_log;
+ u8 reserved;
};
/* rdma function init ramrod data */
@@ -7294,16 +7126,20 @@ struct rdma_resize_cq_ramrod_data {
#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_SHIFT 0
#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_MASK 0x1
#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_SHIFT 1
-#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_MASK 0x3F
-#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_SHIFT 2
+#define RDMA_RESIZE_CQ_RAMROD_DATA_VF_ID_VALID_MASK 0x1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_VF_ID_VALID_SHIFT 2
+#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_MASK 0x1F
+#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_SHIFT 3
u8 pbl_log_page_size;
__le16 pbl_num_pages;
__le32 max_cqes;
struct regpair pbl_addr;
struct regpair output_params_addr;
+ u8 vf_id;
+ u8 reserved1[7];
};
-/* The rdma storm context of Mstorm */
+/* The rdma SRQ context */
struct rdma_srq_context {
struct regpair temp[8];
};
@@ -7350,6 +7186,7 @@ enum rdma_tid_type {
MAX_RDMA_TID_TYPE
};
+/* The rdma XRC SRQ context */
struct rdma_xrc_srq_context {
struct regpair temp[9];
};
@@ -7531,12 +7368,12 @@ struct e4_xstorm_roce_conn_ag_ctx {
#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT10_SHIFT 2
#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT11_MASK 0x1
#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT12_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT12_SHIFT 4
+#define E4_XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
#define E4_XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
#define E4_XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_SHIFT 6
+#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT14_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT14_SHIFT 6
#define E4_XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
#define E4_XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
u8 flags2;
@@ -7860,9 +7697,9 @@ struct mstorm_roce_conn_st_ctx {
struct regpair temp[6];
};
-/* The roce storm context of Ystorm */
+/* The roce storm context of Ustorm */
struct ustorm_roce_conn_st_ctx {
- struct regpair temp[12];
+ struct regpair temp[14];
};
/* roce connection context */
@@ -7880,6 +7717,7 @@ struct e4_roce_conn_context {
struct mstorm_roce_conn_st_ctx mstorm_st_context;
struct regpair mstorm_st_padding[2];
struct ustorm_roce_conn_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2];
};
/* roce cqes statistics */
@@ -7934,12 +7772,17 @@ struct roce_create_qp_req_ramrod_data {
struct regpair qp_handle_for_cqe;
struct regpair qp_handle_for_async;
u8 stats_counter_id;
- u8 reserved3[6];
+ u8 vf_id;
+ u8 vport_id;
u8 flags2;
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_MASK 0x1
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_SHIFT 0
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x7F
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_VF_ID_VALID_MASK 0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_VF_ID_VALID_SHIFT 1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x3F
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 2
+ u8 name_space;
+ u8 reserved3[3];
__le16 regular_latency_phy_queue;
__le16 dpi;
};
@@ -7967,8 +7810,10 @@ struct roce_create_qp_resp_ramrod_data {
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 11
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG_MASK 0x1
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG_SHIFT 16
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_MASK 0x7FFF
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_SHIFT 17
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_VF_ID_VALID_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_VF_ID_VALID_SHIFT 17
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_MASK 0x3FFF
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_SHIFT 18
__le16 xrc_domain;
u8 max_ird;
u8 traffic_class;
@@ -7995,10 +7840,14 @@ struct roce_create_qp_resp_ramrod_data {
struct regpair qp_handle_for_cqe;
struct regpair qp_handle_for_async;
__le16 low_latency_phy_queue;
- u8 reserved2[2];
+ u8 vf_id;
+ u8 vport_id;
__le32 cq_cid;
__le16 regular_latency_phy_queue;
__le16 dpi;
+ __le32 src_qp_id;
+ u8 name_space;
+ u8 reserved3[3];
};
/* roce DCQCN received statistics */
@@ -8032,6 +7881,8 @@ struct roce_destroy_qp_resp_output_params {
/* RoCE destroy qp responder ramrod data */
struct roce_destroy_qp_resp_ramrod_data {
struct regpair output_params_addr;
+ __le32 src_qp_id;
+ __le32 reserved;
};
/* roce error statistics */
@@ -8115,8 +7966,8 @@ struct roce_modify_qp_req_ramrod_data {
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_SHIFT 9
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_MASK 0x7
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT 10
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUES_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUES_FLG_SHIFT 13
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUE_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUE_FLG_SHIFT 13
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x3
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 14
u8 fields;
@@ -8162,8 +8013,8 @@ struct roce_modify_qp_resp_ramrod_data {
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_SHIFT 8
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 9
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUES_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUES_FLG_SHIFT 10
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_SHIFT 10
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0x1F
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 11
u8 fields;
@@ -8204,7 +8055,7 @@ struct roce_query_qp_req_ramrod_data {
/* RoCE query qp responder output params */
struct roce_query_qp_resp_output_params {
__le32 psn;
- __le32 err_flag;
+ __le32 flags;
#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG_MASK 0x1
#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG_SHIFT 0
#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_RESERVED0_MASK 0x7FFFFFFF
@@ -8271,12 +8122,12 @@ struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT 2
#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSEM_FLUSH_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSEM_FLUSH_SHIFT 5
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSDM_FLUSH_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSDM_FLUSH_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSDM_FLUSH_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSDM_FLUSH_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSEM_FLUSH_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSEM_FLUSH_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT 6
#define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK 0x1
#define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT 7
u8 flags2;
@@ -8649,8 +8500,8 @@ struct e4_tstorm_roce_req_conn_ag_ctx {
u8 flags5;
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_SHIFT 1
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
@@ -8663,13 +8514,13 @@ struct e4_tstorm_roce_req_conn_ag_ctx {
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
- __le32 reg0;
+ __le32 dif_rxmit_cnt;
__le32 snd_nxt_psn;
__le32 snd_max_psn;
__le32 orq_prod;
__le32 reg4;
- __le32 reg5;
- __le32 reg6;
+ __le32 dif_acked_cnt;
+ __le32 dif_cnt;
__le32 reg7;
__le32 reg8;
u8 tx_cqe_error_type;
@@ -8680,7 +8531,7 @@ struct e4_tstorm_roce_req_conn_ag_ctx {
__le16 snd_sq_cons;
__le16 conn_dpi;
__le16 force_comp_cons;
- __le32 reg9;
+ __le32 dif_rxmit_acked_cnt;
__le32 reg10;
};
@@ -8955,10 +8806,10 @@ struct e4_xstorm_roce_req_conn_ag_ctx {
#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2
#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1
#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_SHIFT 5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1
#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6
#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
@@ -9184,10 +9035,10 @@ struct e4_xstorm_roce_resp_conn_ag_ctx {
#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2
#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1
#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_SHIFT 5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1
#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6
#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
@@ -9914,7 +9765,7 @@ struct mstorm_iwarp_conn_st_ctx {
/* The iwarp storm context of Ustorm */
struct ustorm_iwarp_conn_st_ctx {
- __le32 reserved[24];
+ struct regpair reserved[14];
};
/* iwarp connection context */
@@ -9932,6 +9783,7 @@ struct e4_iwarp_conn_context {
struct regpair tstorm_st_padding[2];
struct mstorm_iwarp_conn_st_ctx mstorm_st_context;
struct ustorm_iwarp_conn_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2];
};
/* iWARP create QP params passed by driver to FW in CreateQP Request Ramrod */
@@ -9984,7 +9836,8 @@ enum iwarp_eqe_async_opcode {
struct iwarp_eqe_data_mpa_async_completion {
__le16 ulp_data_len;
- u8 reserved[6];
+ u8 rtr_type_sent;
+ u8 reserved[5];
};
struct iwarp_eqe_data_tcp_async_completion {
@@ -10009,7 +9862,7 @@ enum iwarp_eqe_sync_opcode {
/* iWARP EQE completion status */
enum iwarp_fw_return_code {
- IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET = 5,
+ IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET = 6,
IWARP_CONN_ERROR_TCP_CONNECTION_RST,
IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT,
IWARP_CONN_ERROR_MPA_ERROR_REJECT,
@@ -10178,8 +10031,8 @@ struct iwarp_rxmit_stats_drv {
* offload ramrod.
*/
struct iwarp_tcp_offload_ramrod_data {
- struct iwarp_offload_params iwarp;
struct tcp_offload_params_opt2 tcp;
+ struct iwarp_offload_params iwarp;
};
/* iWARP MPA negotiation types */
@@ -11471,8 +11324,8 @@ struct e4_tstorm_iscsi_conn_ag_ctx {
u8 flags3;
#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_SHIFT 2
#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4
#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1
@@ -11494,8 +11347,8 @@ struct e4_tstorm_iscsi_conn_ag_ctx {
#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4
#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_SHIFT 6
#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
@@ -11727,7 +11580,7 @@ struct e4_ystorm_iscsi_conn_ag_ctx {
/* The trace in the buffer */
#define MFW_TRACE_EVENTID_MASK 0x00ffff
#define MFW_TRACE_PRM_SIZE_MASK 0x0f0000
-#define MFW_TRACE_PRM_SIZE_SHIFT 16
+#define MFW_TRACE_PRM_SIZE_OFFSET 16
#define MFW_TRACE_ENTRY_SIZE 3
struct mcp_trace {
@@ -12485,6 +12338,11 @@ enum resource_id_enum {
RESOURCE_LL2_QUEUE_E = 15,
RESOURCE_RDMA_STATS_QUEUE_E = 16,
RESOURCE_BDQ_E = 17,
+ RESOURCE_QCN_E = 18,
+ RESOURCE_LLH_FILTER_E = 19,
+ RESOURCE_VF_MAC_ADDR = 20,
+ RESOURCE_LL2_CQS_E = 21,
+ RESOURCE_VF_CNQS = 22,
RESOURCE_MAX_NUM,
RESOURCE_NUM_INVALID = 0xFFFFFFFF
};
@@ -12675,7 +12533,10 @@ struct public_drv_mb {
#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI 0x3
+#define DRV_MB_PARAM_NVM_OFFSET_OFFSET 0
+#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF
#define DRV_MB_PARAM_NVM_LEN_OFFSET 24
+#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000
#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
@@ -13436,6 +13297,21 @@ enum nvm_image_type {
NVM_TYPE_FCOE_CFG = 0x1f,
NVM_TYPE_ETH_PHY_FW1 = 0x20,
NVM_TYPE_ETH_PHY_FW2 = 0x21,
+ NVM_TYPE_BDN = 0x22,
+ NVM_TYPE_8485X_PHY_FW = 0x23,
+ NVM_TYPE_PUB_KEY = 0x24,
+ NVM_TYPE_RECOVERY = 0x25,
+ NVM_TYPE_PLDM = 0x26,
+ NVM_TYPE_UPK1 = 0x27,
+ NVM_TYPE_UPK2 = 0x28,
+ NVM_TYPE_MASTER_KC = 0x29,
+ NVM_TYPE_BACKUP_KC = 0x2a,
+ NVM_TYPE_HW_DUMP = 0x2b,
+ NVM_TYPE_HW_DUMP_OUT = 0x2c,
+ NVM_TYPE_BIN_NVM_META = 0x30,
+ NVM_TYPE_ROM_TEST = 0xf0,
+ NVM_TYPE_88X33X0_PHY_FW = 0x31,
+ NVM_TYPE_88X33X0_PHY_SLAVE_FW = 0x32,
NVM_TYPE_MAX,
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index a4de9e3ef72c..4ab8cfaf63d1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -393,7 +393,7 @@ u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid)
/* DMAE */
#define QED_DMAE_FLAGS_IS_SET(params, flag) \
- ((params) != NULL && ((params)->flags & QED_DMAE_FLAG_##flag))
+ ((params) != NULL && GET_FIELD((params)->flags, QED_DMAE_PARAMS_##flag))
static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
const u8 is_src_type_grc,
@@ -408,62 +408,55 @@ static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
* 0- The source is the PCIe
* 1- The source is the GRC.
*/
- opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
- : DMAE_CMD_SRC_MASK_PCIE) <<
- DMAE_CMD_SRC_SHIFT;
- src_pfid = QED_DMAE_FLAGS_IS_SET(p_params, PF_SRC) ?
- p_params->src_pfid : p_hwfn->rel_pf_id;
- opcode |= ((src_pfid & DMAE_CMD_SRC_PF_ID_MASK) <<
- DMAE_CMD_SRC_PF_ID_SHIFT);
+ SET_FIELD(opcode, DMAE_CMD_SRC,
+ (is_src_type_grc ? dmae_cmd_src_grc : dmae_cmd_src_pcie));
+ src_pfid = QED_DMAE_FLAGS_IS_SET(p_params, SRC_PF_VALID) ?
+ p_params->src_pfid : p_hwfn->rel_pf_id;
+ SET_FIELD(opcode, DMAE_CMD_SRC_PF_ID, src_pfid);
/* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
- opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
- : DMAE_CMD_DST_MASK_PCIE) <<
- DMAE_CMD_DST_SHIFT;
- dst_pfid = QED_DMAE_FLAGS_IS_SET(p_params, PF_DST) ?
- p_params->dst_pfid : p_hwfn->rel_pf_id;
- opcode |= ((dst_pfid & DMAE_CMD_DST_PF_ID_MASK) <<
- DMAE_CMD_DST_PF_ID_SHIFT);
+ SET_FIELD(opcode, DMAE_CMD_DST,
+ (is_dst_type_grc ? dmae_cmd_dst_grc : dmae_cmd_dst_pcie));
+ dst_pfid = QED_DMAE_FLAGS_IS_SET(p_params, DST_PF_VALID) ?
+ p_params->dst_pfid : p_hwfn->rel_pf_id;
+ SET_FIELD(opcode, DMAE_CMD_DST_PF_ID, dst_pfid);
+
/* Whether to write a completion word to the completion destination:
* 0-Do not write a completion word
* 1-Write the completion word
*/
- opcode |= (DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT);
- opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
- DMAE_CMD_SRC_ADDR_RESET_SHIFT);
+ SET_FIELD(opcode, DMAE_CMD_COMP_WORD_EN, 1);
+ SET_FIELD(opcode, DMAE_CMD_SRC_ADDR_RESET, 1);
if (QED_DMAE_FLAGS_IS_SET(p_params, COMPLETION_DST))
- opcode |= (1 << DMAE_CMD_COMP_FUNC_SHIFT);
+ SET_FIELD(opcode, DMAE_CMD_COMP_FUNC, 1);
- opcode |= (DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT);
+ /* swapping mode 3 - big endian */
+ SET_FIELD(opcode, DMAE_CMD_ENDIANITY_MODE, DMAE_CMD_ENDIANITY);
- port_id = (QED_DMAE_FLAGS_IS_SET(p_params, PORT)) ?
- p_params->port_id : p_hwfn->port_id;
- opcode |= (port_id << DMAE_CMD_PORT_ID_SHIFT);
+ port_id = (QED_DMAE_FLAGS_IS_SET(p_params, PORT_VALID)) ?
+ p_params->port_id : p_hwfn->port_id;
+ SET_FIELD(opcode, DMAE_CMD_PORT_ID, port_id);
/* reset source address in next go */
- opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
- DMAE_CMD_SRC_ADDR_RESET_SHIFT);
+ SET_FIELD(opcode, DMAE_CMD_SRC_ADDR_RESET, 1);
/* reset dest address in next go */
- opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK <<
- DMAE_CMD_DST_ADDR_RESET_SHIFT);
+ SET_FIELD(opcode, DMAE_CMD_DST_ADDR_RESET, 1);
/* SRC/DST VFID: all 1's - pf, otherwise VF id */
- if (QED_DMAE_FLAGS_IS_SET(p_params, VF_SRC)) {
- opcode |= 1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT;
- opcode_b |= p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT;
+ if (QED_DMAE_FLAGS_IS_SET(p_params, SRC_VF_VALID)) {
+ SET_FIELD(opcode, DMAE_CMD_SRC_VF_ID_VALID, 1);
+ SET_FIELD(opcode_b, DMAE_CMD_SRC_VF_ID, p_params->src_vfid);
} else {
- opcode_b |= DMAE_CMD_SRC_VF_ID_MASK <<
- DMAE_CMD_SRC_VF_ID_SHIFT;
+ SET_FIELD(opcode_b, DMAE_CMD_SRC_VF_ID, 0xFF);
}
-
- if (QED_DMAE_FLAGS_IS_SET(p_params, VF_DST)) {
- opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
- opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
+ if (QED_DMAE_FLAGS_IS_SET(p_params, DST_VF_VALID)) {
+ SET_FIELD(opcode, DMAE_CMD_DST_VF_ID_VALID, 1);
+ SET_FIELD(opcode_b, DMAE_CMD_DST_VF_ID, p_params->dst_vfid);
} else {
- opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT;
+ SET_FIELD(opcode_b, DMAE_CMD_DST_VF_ID, 0xFF);
}
p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index d6430dfebd83..2f1049b0b93a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -44,9 +44,9 @@
#define CDU_VALIDATION_DEFAULT_CFG 61
static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = {
- {400, 336, 352, 304, 304, 384, 416, 352}, /* region 3 offsets */
- {528, 496, 416, 448, 448, 512, 544, 480}, /* region 4 offsets */
- {608, 544, 496, 512, 576, 592, 624, 560} /* region 5 offsets */
+ {400, 336, 352, 368, 304, 384, 416, 352}, /* region 3 offsets */
+ {528, 496, 416, 512, 448, 512, 544, 480}, /* region 4 offsets */
+ {608, 544, 496, 576, 576, 592, 624, 560} /* region 5 offsets */
};
static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
@@ -61,6 +61,9 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
0x100) - 1 : 0)
#define QM_INVALID_PQ_ID 0xffff
+/* Max link speed (in Mbps) */
+#define QM_MAX_LINK_SPEED 100000
+
/* Feature enable */
#define QM_BYPASS_EN 1
#define QM_BYTE_CRD_EN 1
@@ -128,8 +131,6 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
/* Pure LB CmdQ lines (+spare) */
#define PBF_CMDQ_PURE_LB_LINES 150
-#define PBF_CMDQ_LINES_E5_RSVD_RATIO 8
-
#define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
(PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
(ext_voq) * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
@@ -140,6 +141,9 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
(ext_voq) * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
+/* Returns the VOQ line credit for the specified number of PBF command lines.
+ * PBF lines are specified in 256b units.
+ */
#define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
@@ -178,14 +182,14 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
cmd ## _ ## field, \
value)
-#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, rl_valid, vp_pq_id, rl_id, \
+#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, vp_pq_id, rl_valid, rl_id, \
ext_voq, wrr) \
do { \
typeof(map) __map; \
memset(&__map, 0, sizeof(__map)); \
SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _PQ_VALID, 1); \
SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_VALID, \
- rl_valid); \
+ rl_valid ? 1 : 0);\
SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _VP_PQ_ID, \
vp_pq_id); \
SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_ID, rl_id); \
@@ -200,9 +204,12 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
#define WRITE_PQ_INFO_TO_RAM 1
#define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \
(((vp) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | \
- ((rl_valid) << 22) | ((rl) << 24))
+ ((rl_valid ? 1 : 0) << 22) | (((rl) & 255) << 24) | \
+ (((rl) >> 8) << 9))
+
#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
- (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4)
+ XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \
+ XSTORM_PQ_INFO_OFFSET(pq_id)
/******************** INTERNAL IMPLEMENTATION *********************/
@@ -228,9 +235,6 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
STORE_RT_REG(p_hwfn,
QM_REG_RLPFVOQENABLE_RT_OFFSET,
(u32)voq_bit_mask);
- if (num_ext_voqs >= 32)
- STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET,
- (u32)(voq_bit_mask >> 32));
/* Write RL period */
STORE_RT_REG(p_hwfn,
@@ -259,12 +263,12 @@ static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en)
QM_WFQ_UPPER_BOUND);
}
-/* Prepare VPORT RL enable/disable runtime init values */
-static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, bool vport_rl_en)
+/* Prepare global RL enable/disable runtime init values */
+static void qed_enable_global_rl(struct qed_hwfn *p_hwfn, bool global_rl_en)
{
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
- vport_rl_en ? 1 : 0);
- if (vport_rl_en) {
+ global_rl_en ? 1 : 0);
+ if (global_rl_en) {
/* Write RL period (use timer 0 only) */
STORE_RT_REG(p_hwfn,
QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
@@ -331,8 +335,7 @@ static void qed_cmdq_lines_rt_init(
continue;
/* Find number of command queue lines to divide between the
- * active physical TCs. In E5, 1/8 of the lines are reserved.
- * the lines for pure LB TC are subtracted.
+ * active physical TCs.
*/
phys_lines = port_params[port_id].num_pbf_cmd_lines;
phys_lines -= PBF_CMDQ_PURE_LB_LINES;
@@ -361,11 +364,30 @@ static void qed_cmdq_lines_rt_init(
ext_voq = qed_get_ext_voq(p_hwfn,
port_id,
PURE_LB_TC, max_phys_tcs_per_port);
- qed_cmdq_lines_voq_rt_init(p_hwfn,
- ext_voq, PBF_CMDQ_PURE_LB_LINES);
+ qed_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
+ PBF_CMDQ_PURE_LB_LINES);
}
}
+/* Prepare runtime init values to allocate guaranteed BTB blocks for the
+ * specified port. The guaranteed BTB space is divided between the TCs as
+ * follows (shared space Is currently not used):
+ * 1. Parameters:
+ * B - BTB blocks for this port
+ * C - Number of physical TCs for this port
+ * 2. Calculation:
+ * a. 38 blocks (9700B jumbo frame) are allocated for global per port
+ * headroom.
+ * b. B = B - 38 (remainder after global headroom allocation).
+ * c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
+ * d. B = B - MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
+ * e. B/C blocks are allocated for each physical TC.
+ * Assumptions:
+ * - MTU is up to 9700 bytes (38 blocks)
+ * - All TCs are considered symmetrical (same rate and packet size)
+ * - No optimization for lossy TC (all are considered lossless). Shared space
+ * is not enabled and allocated for each TC.
+ */
static void qed_btb_blocks_rt_init(
struct qed_hwfn *p_hwfn,
u8 max_ports_per_engine,
@@ -424,6 +446,34 @@ static void qed_btb_blocks_rt_init(
}
}
+/* Prepare runtime init values for the specified RL.
+ * Set max link speed (100Gbps) per rate limiter.
+ * Return -1 on error.
+ */
+static int qed_global_rl_rt_init(struct qed_hwfn *p_hwfn)
+{
+ u32 upper_bound = QM_VP_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) |
+ (u32)QM_RL_CRD_REG_SIGN_BIT;
+ u32 inc_val;
+ u16 rl_id;
+
+ /* Go over all global RLs */
+ for (rl_id = 0; rl_id < MAX_QM_GLOBAL_RLS; rl_id++) {
+ inc_val = QM_RL_INC_VAL(QM_MAX_LINK_SPEED);
+
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLCRD_RT_OFFSET + rl_id,
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id,
+ upper_bound);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id, inc_val);
+ }
+
+ return 0;
+}
+
/* Prepare Tx PQ mapping runtime init values for the specified PF */
static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -460,18 +510,17 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
/* Go over all Tx PQs */
for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
- u8 ext_voq, vport_id_in_pf, tc_id = pq_params[i].tc_id;
- u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
+ u16 *p_first_tx_pq_id, vport_id_in_pf;
struct qm_rf_pq_map_e4 tx_pq_map;
- bool is_vf_pq, rl_valid;
- u16 *p_first_tx_pq_id;
+ u8 tc_id = pq_params[i].tc_id;
+ bool is_vf_pq;
+ u8 ext_voq;
ext_voq = qed_get_ext_voq(p_hwfn,
pq_params[i].port_id,
tc_id,
p_params->max_phys_tcs_per_port);
is_vf_pq = (i >= p_params->num_pf_pqs);
- rl_valid = pq_params[i].rl_valid > 0;
/* Update first Tx PQ of VPORT/TC */
vport_id_in_pf = pq_params[i].vport_id - p_params->start_vport;
@@ -492,21 +541,14 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
map_val);
}
- /* Check RL ID */
- if (rl_valid && pq_params[i].vport_id >= max_qm_global_rls) {
- DP_NOTICE(p_hwfn,
- "Invalid VPORT ID for rate limiter configuration\n");
- rl_valid = false;
- }
-
/* Prepare PQ map entry */
QM_INIT_TX_PQ_MAP(p_hwfn,
tx_pq_map,
E4,
pq_id,
- rl_valid ? 1 : 0,
*p_first_tx_pq_id,
- rl_valid ? pq_params[i].vport_id : 0,
+ pq_params[i].rl_valid,
+ pq_params[i].rl_id,
ext_voq, pq_params[i].wrr_group);
/* Set PQ base address */
@@ -529,9 +571,8 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
p_params->pf_id,
tc_id,
pq_params[i].port_id,
- rl_valid ? 1 : 0,
- rl_valid ?
- pq_params[i].vport_id : 0);
+ pq_params[i].rl_valid,
+ pq_params[i].rl_id);
qed_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
pq_info);
}
@@ -669,19 +710,19 @@ static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
* Return -1 on error.
*/
static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
- u8 num_vports,
+ u16 num_vports,
struct init_qm_vport_params *vport_params)
{
- u16 vport_pq_id;
+ u16 vport_pq_id, i;
u32 inc_val;
- u8 tc, i;
+ u8 tc;
/* Go over all PF VPORTs */
for (i = 0; i < num_vports; i++) {
- if (!vport_params[i].vport_wfq)
+ if (!vport_params[i].wfq)
continue;
- inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
+ inc_val = QM_WFQ_INC_VAL(vport_params[i].wfq);
if (inc_val > QM_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn,
"Invalid VPORT WFQ weight configuration\n");
@@ -706,48 +747,6 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
return 0;
}
-/* Prepare VPORT RL runtime init values for the specified VPORTs.
- * Return -1 on error.
- */
-static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
- u8 start_vport,
- u8 num_vports,
- u32 link_speed,
- struct init_qm_vport_params *vport_params)
-{
- u8 i, vport_id;
- u32 inc_val;
-
- if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
- DP_NOTICE(p_hwfn,
- "Invalid VPORT ID for rate limiter configuration\n");
- return -1;
- }
-
- /* Go over all PF VPORTs */
- for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
- inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl ?
- vport_params[i].vport_rl :
- link_speed);
- if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
- DP_NOTICE(p_hwfn,
- "Invalid VPORT rate-limit configuration\n");
- return -1;
- }
-
- STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
- (u32)QM_RL_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn,
- QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
- QM_VP_RL_UPPER_BOUND(link_speed) |
- (u32)QM_RL_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
- inc_val);
- }
-
- return 0;
-}
-
static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
@@ -799,23 +798,20 @@ u32 qed_qm_pf_mem_size(u32 num_pf_cids,
int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
struct qed_qm_common_rt_init_params *p_params)
{
- /* Init AFullOprtnstcCrdMask */
- u32 mask = (QM_OPPOR_LINE_VOQ_DEF <<
- QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
- (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
- (p_params->pf_wfq_en <<
- QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
- (p_params->vport_wfq_en <<
- QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
- (p_params->pf_rl_en <<
- QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
- (p_params->vport_rl_en <<
- QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
- (QM_OPPOR_FW_STOP_DEF <<
- QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
- (QM_OPPOR_PQ_EMPTY_DEF <<
- QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
+ u32 mask = 0;
+ /* Init AFullOprtnstcCrdMask */
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_LINEVOQ,
+ QM_OPPOR_LINE_VOQ_DEF);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ, QM_BYTE_CRD_EN);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFWFQ, p_params->pf_wfq_en);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPWFQ, p_params->vport_wfq_en);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFRL, p_params->pf_rl_en);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPQCNRL,
+ p_params->global_rl_en);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_FWPAUSE, QM_OPPOR_FW_STOP_DEF);
+ SET_FIELD(mask,
+ QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY, QM_OPPOR_PQ_EMPTY_DEF);
STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
/* Enable/disable PF RL */
@@ -824,8 +820,8 @@ int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
/* Enable/disable PF WFQ */
qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en);
- /* Enable/disable VPORT RL */
- qed_enable_vport_rl(p_hwfn, p_params->vport_rl_en);
+ /* Enable/disable global RL */
+ qed_enable_global_rl(p_hwfn, p_params->global_rl_en);
/* Enable/disable VPORT WFQ */
qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en);
@@ -842,6 +838,8 @@ int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
p_params->max_phys_tcs_per_port,
p_params->port_params);
+ qed_global_rl_rt_init(p_hwfn);
+
return 0;
}
@@ -853,7 +851,9 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
u32 other_mem_size_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids +
p_params->num_tids) *
QM_OTHER_PQS_PER_PF;
- u8 tc, i;
+ u16 i;
+ u8 tc;
+
/* Clear first Tx PQ ID array for each VPORT */
for (i = 0; i < p_params->num_vports; i++)
@@ -878,16 +878,10 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
return -1;
- /* Set VPORT WFQ */
+ /* Init VPORT WFQ */
if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params))
return -1;
- /* Set VPORT RL */
- if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport,
- p_params->num_vports, p_params->link_speed,
- vport_params))
- return -1;
-
return 0;
}
@@ -925,18 +919,19 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
+ u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq)
{
u16 vport_pq_id;
u32 inc_val;
u8 tc;
- inc_val = QM_WFQ_INC_VAL(vport_wfq);
+ inc_val = QM_WFQ_INC_VAL(wfq);
if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration\n");
+ DP_NOTICE(p_hwfn, "Invalid VPORT WFQ configuration.\n");
return -1;
}
+ /* A VPORT can have several VPORT PQ IDs for various TCs */
for (tc = 0; tc < NUM_OF_TCS; tc++) {
vport_pq_id = first_tx_pq_id[tc];
if (vport_pq_id != QM_INVALID_PQ_ID)
@@ -948,28 +943,20 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
return 0;
}
-int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u8 vport_id, u32 vport_rl, u32 link_speed)
+int qed_init_global_rl(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit)
{
- u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
+ u32 inc_val;
- if (vport_id >= max_qm_global_rls) {
- DP_NOTICE(p_hwfn,
- "Invalid VPORT ID for rate limiter configuration\n");
+ inc_val = QM_RL_INC_VAL(rate_limit);
+ if (inc_val > QM_VP_RL_MAX_INC_VAL(rate_limit)) {
+ DP_NOTICE(p_hwfn, "Invalid rate limit configuration.\n");
return -1;
}
- inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed);
- if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
- DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration\n");
- return -1;
- }
-
- qed_wr(p_hwfn,
- p_ptt,
- QM_REG_RLGLBLCRD + vport_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
- qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
+ qed_wr(p_hwfn, p_ptt,
+ QM_REG_RLGLBLCRD + rl_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
+ qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + rl_id * 4, inc_val);
return 0;
}
@@ -1013,7 +1000,6 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
return true;
}
-
#define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
do { \
typeof(var) *__p_var = &(var); \
@@ -1021,8 +1007,59 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
*__p_var = (*__p_var & ~BIT(__offset)) | \
((enable) ? BIT(__offset) : 0); \
} while (0)
-#define PRS_ETH_TUNN_OUTPUT_FORMAT -188897008
-#define PRS_ETH_OUTPUT_FORMAT -46832
+
+#define PRS_ETH_TUNN_OUTPUT_FORMAT 0xF4DAB910
+#define PRS_ETH_OUTPUT_FORMAT 0xFFFF4910
+
+#define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
+ do { \
+ u32 i; \
+ \
+ for (i = 0; i < (arr_size); i++) \
+ qed_wr(dev, ptt, \
+ ((addr) + (4 * i)), \
+ ((u32 *)&(arr))[i]); \
+ } while (0)
+
+/**
+ * @brief qed_dmae_to_grc - is an internal function - writes from host to
+ * wide-bus registers (split registers are not supported yet)
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param p_data - pointer to source data.
+ * @param addr - Destination register address.
+ * @param len_in_dwords - data length in DWARDS (u32)
+ */
+static int qed_dmae_to_grc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_data, u32 addr, u32 len_in_dwords)
+{
+ struct qed_dmae_params params = {};
+ int rc;
+
+ if (!p_data)
+ return -1;
+
+ /* Set DMAE params */
+ SET_FIELD(params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 1);
+
+ /* Execute DMAE command */
+ rc = qed_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(uintptr_t)(p_data),
+ addr, len_in_dwords, &params);
+
+ /* If not read using DMAE, read using GRC */
+ if (rc) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "Failed writing to chip using DMAE, using GRC instead\n");
+ /* write to registers using GRC */
+ ARR_REG_WR(p_hwfn, p_ptt, addr, p_data, len_in_dwords);
+ }
+
+ return len_in_dwords;
+}
void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 dest_port)
@@ -1166,8 +1203,8 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
ip_geneve_enable ? 1 : 0);
}
-#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 4
-#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -927094512
+#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 3
+#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -925189872
void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool enable)
@@ -1208,6 +1245,8 @@ void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id)
{
+ struct regpair ram_line = { };
+
/* Disable gft search for PF */
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
@@ -1217,12 +1256,9 @@ void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id)
qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
/* Zero ramline */
- qed_wr(p_hwfn,
- p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, 0);
- qed_wr(p_hwfn,
- p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + REG_SIZE,
- 0);
+ qed_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
+ sizeof(ram_line) / REG_SIZE);
}
void qed_gft_config(struct qed_hwfn *p_hwfn,
@@ -1232,7 +1268,8 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
bool udp,
bool ipv4, bool ipv6, enum gft_profile_type profile_type)
{
- u32 reg_val, cam_line, ram_line_lo, ram_line_hi, search_non_ip_as_gft;
+ u32 reg_val, cam_line, search_non_ip_as_gft;
+ struct regpair ram_line = { };
if (!ipv6 && !ipv4)
DP_NOTICE(p_hwfn,
@@ -1298,35 +1335,33 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
qed_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
/* Write line to RAM - compare to filter 4 tuple */
- ram_line_lo = 0;
- ram_line_hi = 0;
/* Search no IP as GFT */
search_non_ip_as_gft = 0;
/* Tunnel type */
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
- SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
- SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
- SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_SRC_PORT, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
+ SET_FIELD(ram_line.hi, GFT_RAM_LINE_DST_IP, 1);
+ SET_FIELD(ram_line.hi, GFT_RAM_LINE_SRC_IP, 1);
+ SET_FIELD(ram_line.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_SRC_PORT, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_DST_PORT, 1);
} else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) {
- SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
+ SET_FIELD(ram_line.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_DST_PORT, 1);
} else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) {
- SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ram_line.hi, GFT_RAM_LINE_DST_IP, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
} else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) {
- SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ram_line.hi, GFT_RAM_LINE_SRC_IP, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
} else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) {
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
/* Allow tunneled traffic without inner IP */
search_non_ip_as_gft = 1;
@@ -1334,24 +1369,17 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn,
p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT, search_non_ip_as_gft);
- qed_wr(p_hwfn,
- p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
- ram_line_lo);
- qed_wr(p_hwfn,
- p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + REG_SIZE,
- ram_line_hi);
+ qed_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
+ sizeof(ram_line) / REG_SIZE);
/* Set default profile so that no filter match will happen */
- qed_wr(p_hwfn,
- p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
- PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff);
- qed_wr(p_hwfn,
- p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
- PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff);
+ ram_line.lo = 0xffffffff;
+ ram_line.hi = 0x3ff;
+ qed_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
+ PRS_GFT_CAM_LINES_NO_MATCH,
+ sizeof(ram_line) / REG_SIZE);
/* Enable gft search */
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
@@ -1544,3 +1572,144 @@ void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, ram_addr, assert_level[storm_id]);
}
}
+
+#define PHYS_ADDR_DWORDS DIV_ROUND_UP(sizeof(dma_addr_t), 4)
+#define OVERLAY_HDR_SIZE_DWORDS (sizeof(struct fw_overlay_buf_hdr) / 4)
+
+static u32 qed_get_overlay_addr_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id)
+{
+ switch (storm_id) {
+ case 0:
+ return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ TSTORM_OVERLAY_BUF_ADDR_OFFSET;
+ case 1:
+ return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ MSTORM_OVERLAY_BUF_ADDR_OFFSET;
+ case 2:
+ return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ USTORM_OVERLAY_BUF_ADDR_OFFSET;
+ case 3:
+ return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ XSTORM_OVERLAY_BUF_ADDR_OFFSET;
+ case 4:
+ return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ YSTORM_OVERLAY_BUF_ADDR_OFFSET;
+ case 5:
+ return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ PSTORM_OVERLAY_BUF_ADDR_OFFSET;
+
+ default:
+ return 0;
+ }
+}
+
+struct phys_mem_desc *qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn,
+ const u32 * const
+ fw_overlay_in_buf,
+ u32 buf_size_in_bytes)
+{
+ u32 buf_size = buf_size_in_bytes / sizeof(u32), buf_offset = 0;
+ struct phys_mem_desc *allocated_mem;
+
+ if (!buf_size)
+ return NULL;
+
+ allocated_mem = kcalloc(NUM_STORMS, sizeof(struct phys_mem_desc),
+ GFP_KERNEL);
+ if (!allocated_mem)
+ return NULL;
+
+ memset(allocated_mem, 0, NUM_STORMS * sizeof(struct phys_mem_desc));
+
+ /* For each Storm, set physical address in RAM */
+ while (buf_offset < buf_size) {
+ struct phys_mem_desc *storm_mem_desc;
+ struct fw_overlay_buf_hdr *hdr;
+ u32 storm_buf_size;
+ u8 storm_id;
+
+ hdr =
+ (struct fw_overlay_buf_hdr *)&fw_overlay_in_buf[buf_offset];
+ storm_buf_size = GET_FIELD(hdr->data,
+ FW_OVERLAY_BUF_HDR_BUF_SIZE);
+ storm_id = GET_FIELD(hdr->data, FW_OVERLAY_BUF_HDR_STORM_ID);
+ storm_mem_desc = allocated_mem + storm_id;
+ storm_mem_desc->size = storm_buf_size * sizeof(u32);
+
+ /* Allocate physical memory for Storm's overlays buffer */
+ storm_mem_desc->virt_addr =
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ storm_mem_desc->size,
+ &storm_mem_desc->phys_addr, GFP_KERNEL);
+ if (!storm_mem_desc->virt_addr)
+ break;
+
+ /* Skip overlays buffer header */
+ buf_offset += OVERLAY_HDR_SIZE_DWORDS;
+
+ /* Copy Storm's overlays buffer to allocated memory */
+ memcpy(storm_mem_desc->virt_addr,
+ &fw_overlay_in_buf[buf_offset], storm_mem_desc->size);
+
+ /* Advance to next Storm */
+ buf_offset += storm_buf_size;
+ }
+
+ /* If memory allocation has failed, free all allocated memory */
+ if (buf_offset < buf_size) {
+ qed_fw_overlay_mem_free(p_hwfn, allocated_mem);
+ return NULL;
+ }
+
+ return allocated_mem;
+}
+
+void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct phys_mem_desc *fw_overlay_mem)
+{
+ u8 storm_id;
+
+ for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
+ struct phys_mem_desc *storm_mem_desc =
+ (struct phys_mem_desc *)fw_overlay_mem + storm_id;
+ u32 ram_addr, i;
+
+ /* Skip Storms with no FW overlays */
+ if (!storm_mem_desc->virt_addr)
+ continue;
+
+ /* Calculate overlay RAM GRC address of current PF */
+ ram_addr = qed_get_overlay_addr_ram_addr(p_hwfn, storm_id) +
+ sizeof(dma_addr_t) * p_hwfn->rel_pf_id;
+
+ /* Write Storm's overlay physical address to RAM */
+ for (i = 0; i < PHYS_ADDR_DWORDS; i++, ram_addr += sizeof(u32))
+ qed_wr(p_hwfn, p_ptt, ram_addr,
+ ((u32 *)&storm_mem_desc->phys_addr)[i]);
+ }
+}
+
+void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
+ struct phys_mem_desc *fw_overlay_mem)
+{
+ u8 storm_id;
+
+ if (!fw_overlay_mem)
+ return;
+
+ for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
+ struct phys_mem_desc *storm_mem_desc =
+ (struct phys_mem_desc *)fw_overlay_mem + storm_id;
+
+ /* Free Storm's physical memory */
+ if (storm_mem_desc->virt_addr)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ storm_mem_desc->size,
+ storm_mem_desc->virt_addr,
+ storm_mem_desc->phys_addr);
+ }
+
+ /* Free allocated virtual memory */
+ kfree(fw_overlay_mem);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index a868d7f88601..5a6e4ac4fef4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -54,15 +54,15 @@ static u32 pxp_global_win[] = {
0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
- 0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */
- 0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */
- 0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */
- 0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */
- 0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */
- 0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */
- 0,
- 0,
- 0,
+ 0x1d02, /* win 6: addr=0x1d02000, size=4096 bytes */
+ 0x1d80, /* win 7: addr=0x1d80000, size=4096 bytes */
+ 0x1d81, /* win 8: addr=0x1d81000, size=4096 bytes */
+ 0x1d82, /* win 9: addr=0x1d82000, size=4096 bytes */
+ 0x1e00, /* win 10: addr=0x1e00000, size=4096 bytes */
+ 0x1e01, /* win 11: addr=0x1e01000, size=4096 bytes */
+ 0x1e80, /* win 12: addr=0x1e80000, size=4096 bytes */
+ 0x1f00, /* win 13: addr=0x1f00000, size=4096 bytes */
+ 0x1c08, /* win 14: addr=0x1c08000, size=4096 bytes */
0,
0,
0,
@@ -74,15 +74,6 @@ void qed_init_iro_array(struct qed_dev *cdev)
cdev->iro_arr = iro_arr;
}
-/* Runtime configuration helpers */
-void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn)
-{
- int i;
-
- for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
- p_hwfn->rt_data.b_valid[i] = false;
-}
-
void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val)
{
p_hwfn->rt_data.init_val[rt_offset] = val;
@@ -106,7 +97,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn,
{
u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
- u16 i, segment;
+ u16 i, j, segment;
int rc = 0;
/* Since not all RT entries are initialized, go over the RT and
@@ -121,6 +112,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn,
*/
if (!b_must_dmae) {
qed_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]);
+ p_valid[i] = false;
continue;
}
@@ -135,6 +127,10 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
+ /* invalidate after writing */
+ for (j = i; j < i + segment; j++)
+ p_valid[j] = false;
+
/* Jump over the entire segment, including invalid entry */
i += segment;
}
@@ -215,7 +211,7 @@ static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
* 3. p_hwfb->temp_data,
* 4. fill_count
*/
- params.flags = QED_DMAE_FLAG_RW_REPL_SRC;
+ SET_FIELD(params.flags, QED_DMAE_PARAMS_RW_REPL_SRC, 0x1);
return qed_dmae_host2grc(p_hwfn, p_ptt,
(uintptr_t)(&zero_buffer[0]),
addr, fill_count, &params);
@@ -490,10 +486,10 @@ static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
int qed_init_run(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, int phase, int phase_id, int modes)
{
+ bool b_dmae = (phase != PHASE_ENGINE);
struct qed_dev *cdev = p_hwfn->cdev;
u32 cmd_num, num_init_ops;
union init_op *init_ops;
- bool b_dmae = false;
int rc = 0;
num_init_ops = cdev->fw_data->init_ops_size;
@@ -522,7 +518,6 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
case INIT_OP_IF_PHASE:
cmd_num += qed_init_cmd_phase(p_hwfn, &cmd->if_phase,
phase, phase_id);
- b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
break;
case INIT_OP_DELAY:
/* qed_init_run is always invoked from
@@ -533,6 +528,9 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
case INIT_OP_CALLBACK:
rc = qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
+ if (phase == PHASE_ENGINE &&
+ cmd->callback.callback_id == DMAE_READY_CB)
+ b_dmae = true;
break;
}
@@ -587,5 +585,10 @@ int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
len = buf_hdr[BIN_BUF_INIT_CMD].length;
fw->init_ops_size = len / sizeof(struct init_raw_op);
+ offset = buf_hdr[BIN_BUF_INIT_OVERLAYS].offset;
+ fw->fw_overlays = (u32 *)(data + offset);
+ len = buf_hdr[BIN_BUF_INIT_OVERLAYS].length;
+ fw->fw_overlays_len = len;
+
return 0;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
index 555dd086796d..e9e8ade50ed3 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
@@ -81,14 +81,6 @@ int qed_init_alloc(struct qed_hwfn *p_hwfn);
void qed_init_free(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_init_clear_rt_data - Clears the runtime init array.
- *
- *
- * @param p_hwfn
- */
-void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn);
-
-/**
* @brief qed_init_store_rt_reg - Store a configuration value in the RT array.
*
*
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index 5585c18053ec..7245a615517a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -204,17 +204,14 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
- SET_FIELD(p_init->hdr.flags,
- ISCSI_SLOW_PATH_HDR_LAYER_CODE, ISCSI_SLOW_PATH_LAYER_CODE);
- p_init->hdr.op_code = ISCSI_RAMROD_CMD_ID_INIT_FUNC;
-
val = p_params->half_way_close_timeout;
p_init->half_way_close_timeout = cpu_to_le16(val);
p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
- p_init->ll2_rx_queue_id = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] +
- p_params->ll2_ooo_queue_id;
+ p_init->ll2_rx_queue_id =
+ p_hwfn->hw_info.resc_start[QED_LL2_RAM_QUEUE] +
+ p_params->ll2_ooo_queue_id;
p_init->func_params.log_page_size = p_params->log_page_size;
val = p_params->num_tasks;
@@ -331,12 +328,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
p_conn->physical_q1 = cpu_to_le16(physical_q);
p_ramrod->iscsi.physical_q1 = cpu_to_le16(physical_q);
- p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN;
- SET_FIELD(p_ramrod->hdr.flags, ISCSI_SLOW_PATH_HDR_LAYER_CODE,
- p_conn->layer_code);
-
p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
- p_ramrod->fw_cid = cpu_to_le32(p_conn->icid);
DMA_REGPAIR_LE(p_ramrod->iscsi.sq_pbl_addr, p_conn->sq_pbl_addr);
@@ -492,12 +484,8 @@ static int qed_sp_iscsi_conn_update(struct qed_hwfn *p_hwfn,
return rc;
p_ramrod = &p_ent->ramrod.iscsi_conn_update;
- p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_UPDATE_CONN;
- SET_FIELD(p_ramrod->hdr.flags,
- ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code);
p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
- p_ramrod->fw_cid = cpu_to_le32(p_conn->icid);
p_ramrod->flags = p_conn->update_flag;
p_ramrod->max_seq_size = cpu_to_le32(p_conn->max_seq_size);
dval = p_conn->max_recv_pdu_length;
@@ -537,12 +525,8 @@ qed_sp_iscsi_mac_update(struct qed_hwfn *p_hwfn,
return rc;
p_ramrod = &p_ent->ramrod.iscsi_conn_mac_update;
- p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_MAC_UPDATE;
- SET_FIELD(p_ramrod->hdr.flags,
- ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code);
p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
- p_ramrod->fw_cid = cpu_to_le32(p_conn->icid);
ucval = p_conn->remote_mac[1];
((u8 *)(&p_ramrod->remote_mac_addr_hi))[0] = ucval;
ucval = p_conn->remote_mac[0];
@@ -583,12 +567,8 @@ static int qed_sp_iscsi_conn_terminate(struct qed_hwfn *p_hwfn,
return rc;
p_ramrod = &p_ent->ramrod.iscsi_conn_terminate;
- p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_TERMINATION_CONN;
- SET_FIELD(p_ramrod->hdr.flags,
- ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code);
p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
- p_ramrod->fw_cid = cpu_to_le32(p_conn->icid);
p_ramrod->abortive = p_conn->abortive_dsconnect;
DMA_REGPAIR_LE(p_ramrod->query_params_addr,
@@ -603,7 +583,6 @@ static int qed_sp_iscsi_conn_clear_sq(struct qed_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_addr)
{
- struct iscsi_slow_path_hdr *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = -EINVAL;
@@ -621,11 +600,6 @@ static int qed_sp_iscsi_conn_clear_sq(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
- p_ramrod = &p_ent->ramrod.iscsi_empty;
- p_ramrod->op_code = ISCSI_RAMROD_CMD_ID_CLEAR_SQ;
- SET_FIELD(p_ramrod->flags,
- ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code);
-
return qed_spq_post(p_hwfn, p_ent, NULL);
}
@@ -633,7 +607,6 @@ static int qed_sp_iscsi_func_stop(struct qed_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_addr)
{
- struct iscsi_spe_func_dstry *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = 0;
@@ -651,9 +624,6 @@ static int qed_sp_iscsi_func_stop(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
- p_ramrod = &p_ent->ramrod.iscsi_destroy;
- p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_DESTROY_FUNC;
-
rc = qed_spq_post(p_hwfn, p_ent, NULL);
qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ISCSI);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index 65ec16a31658..d2fe61a5cf56 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -137,8 +137,8 @@ qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
struct iwarp_init_func_ramrod_data *p_ramrod)
{
p_ramrod->iwarp.ll2_ooo_q_index =
- RESC_START(p_hwfn, QED_LL2_QUEUE) +
- p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
+ RESC_START(p_hwfn, QED_LL2_RAM_QUEUE) +
+ p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT;
@@ -2651,6 +2651,8 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
memset(&data, 0, sizeof(data));
data.input.conn_type = QED_LL2_TYPE_IWARP;
+ /* SYN will use ctx based queues */
+ data.input.rx_conn_type = QED_LL2_RX_TYPE_CTX;
data.input.mtu = params->max_mtu;
data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
@@ -2683,6 +2685,8 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
/* Start OOO connection */
data.input.conn_type = QED_LL2_TYPE_OOO;
+ /* OOO/unaligned will use legacy ll2 queues (ram based) */
+ data.input.rx_conn_type = QED_LL2_RX_TYPE_LEGACY;
data.input.mtu = params->max_mtu;
n_ooo_bufs = (QED_IWARP_MAX_OOO * rcv_wnd_size) /
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 19a1a58d60f8..037e5978787e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -962,7 +962,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
return rc;
p_ramrod = &p_ent->ramrod.core_rx_queue_start;
-
+ memset(p_ramrod, 0, sizeof(*p_ramrod));
p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
p_ramrod->sb_index = p_rx->rx_sb_index;
p_ramrod->complete_event_flg = 1;
@@ -996,6 +996,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->action_on_error.error_type = action_on_error;
p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
+ p_ramrod->zero_prod_flg = 1;
+
return qed_spq_post(p_hwfn, p_ent, NULL);
}
@@ -1317,6 +1319,25 @@ qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs)
return 0;
}
+static void _qed_ll2_calc_allowed_conns(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_acquire_data *data,
+ u8 *start_idx, u8 *last_idx)
+{
+ /* LL2 queues handles will be split as follows:
+ * First will be the legacy queues, and then the ctx based.
+ */
+ if (data->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) {
+ *start_idx = QED_LL2_LEGACY_CONN_BASE_PF;
+ *last_idx = *start_idx +
+ QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF;
+ } else {
+ /* QED_LL2_RX_TYPE_CTX */
+ *start_idx = QED_LL2_CTX_CONN_BASE_PF;
+ *last_idx = *start_idx +
+ QED_MAX_NUM_OF_CTX_LL2_CONNS_PF;
+ }
+}
+
static enum core_error_handle
qed_ll2_get_error_choice(enum qed_ll2_error_handle err)
{
@@ -1337,14 +1358,16 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
struct qed_hwfn *p_hwfn = cxt;
qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
struct qed_ll2_info *p_ll2_info = NULL;
- u8 i, *p_tx_max;
+ u8 i, first_idx, last_idx, *p_tx_max;
int rc;
if (!data->p_connection_handle || !p_hwfn->p_ll2_info)
return -EINVAL;
+ _qed_ll2_calc_allowed_conns(p_hwfn, data, &first_idx, &last_idx);
+
/* Find a free connection to be used */
- for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
+ for (i = first_idx; i < last_idx; i++) {
mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
if (p_hwfn->p_ll2_info[i].b_active) {
mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
@@ -1448,6 +1471,7 @@ static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
enum qed_ll2_error_handle error_input;
enum core_error_handle error_mode;
u8 action_on_error = 0;
+ int rc;
if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
return 0;
@@ -1461,7 +1485,18 @@ static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
error_mode = qed_ll2_get_error_choice(error_input);
SET_FIELD(action_on_error, CORE_RX_ACTION_ON_ERROR_NO_BUFF, error_mode);
- return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
+ rc = qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
+ if (rc)
+ return rc;
+
+ if (p_ll2_conn->rx_queue.ctx_based) {
+ rc = qed_db_recovery_add(p_hwfn->cdev,
+ p_ll2_conn->rx_queue.set_prod_addr,
+ &p_ll2_conn->rx_queue.db_data,
+ DB_REC_WIDTH_64B, DB_REC_KERNEL);
+ }
+
+ return rc;
}
static void
@@ -1475,13 +1510,41 @@ qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
}
+static inline u8 qed_ll2_handle_to_queue_id(struct qed_hwfn *p_hwfn,
+ u8 handle,
+ u8 ll2_queue_type)
+{
+ u8 qid;
+
+ if (ll2_queue_type == QED_LL2_RX_TYPE_LEGACY)
+ return p_hwfn->hw_info.resc_start[QED_LL2_RAM_QUEUE] + handle;
+
+ /* QED_LL2_RX_TYPE_CTX
+ * FW distinguishes between the legacy queues (ram based) and the
+ * ctx based queues by the queue_id.
+ * The first MAX_NUM_LL2_RX_RAM_QUEUES queues are legacy
+ * and the queue ids above that are ctx base.
+ */
+ qid = p_hwfn->hw_info.resc_start[QED_LL2_CTX_QUEUE] +
+ MAX_NUM_LL2_RX_RAM_QUEUES;
+
+ /* See comment on the acquire connection for how the ll2
+ * queues handles are divided.
+ */
+ qid += (handle - QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF);
+
+ return qid;
+}
+
int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
{
- struct qed_hwfn *p_hwfn = cxt;
- struct qed_ll2_info *p_ll2_conn;
+ struct e4_core_conn_context *p_cxt;
struct qed_ll2_tx_packet *p_pkt;
+ struct qed_ll2_info *p_ll2_conn;
+ struct qed_hwfn *p_hwfn = cxt;
struct qed_ll2_rx_queue *p_rx;
struct qed_ll2_tx_queue *p_tx;
+ struct qed_cxt_info cxt_info;
struct qed_ptt *p_ptt;
int rc = -EINVAL;
u32 i, capacity;
@@ -1539,13 +1602,46 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
if (rc)
goto out;
+ cxt_info.iid = p_ll2_conn->cid;
+ rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
+ p_ll2_conn->cid);
+ goto out;
+ }
+
+ p_cxt = cxt_info.p_cxt;
+
+ memset(p_cxt, 0, sizeof(*p_cxt));
- qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
+ qid = qed_ll2_handle_to_queue_id(p_hwfn, connection_handle,
+ p_ll2_conn->input.rx_conn_type);
p_ll2_conn->queue_id = qid;
p_ll2_conn->tx_stats_id = qid;
- p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
- GTT_BAR0_MAP_REG_TSDM_RAM +
- TSTORM_LL2_RX_PRODS_OFFSET(qid);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_LL2,
+ "Establishing ll2 queue. PF %d ctx_based=%d abs qid=%d\n",
+ p_hwfn->rel_pf_id, p_ll2_conn->input.rx_conn_type, qid);
+
+ if (p_ll2_conn->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) {
+ p_rx->set_prod_addr = p_hwfn->regview +
+ GTT_BAR0_MAP_REG_TSDM_RAM + TSTORM_LL2_RX_PRODS_OFFSET(qid);
+ } else {
+ /* QED_LL2_RX_TYPE_CTX - using doorbell */
+ p_rx->ctx_based = 1;
+
+ p_rx->set_prod_addr = p_hwfn->doorbells +
+ p_hwfn->dpi_start_offset +
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_LL2_PROD_UPDATE);
+
+ /* prepare db data */
+ p_rx->db_data.icid = cpu_to_le16((u16)p_ll2_conn->cid);
+ SET_FIELD(p_rx->db_data.params,
+ CORE_PWM_PROD_UPDATE_DATA_AGG_CMD, DB_AGG_CMD_SET);
+ SET_FIELD(p_rx->db_data.params,
+ CORE_PWM_PROD_UPDATE_DATA_RESERVED1, 0);
+ }
+
p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
qed_db_addr(p_ll2_conn->cid,
DQ_DEMS_LEGACY);
@@ -1556,7 +1652,6 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
DQ_XCM_CORE_TX_BD_PROD_CMD);
p_tx->db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
-
rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
if (rc)
goto out;
@@ -1590,7 +1685,7 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
struct qed_ll2_rx_packet *p_curp)
{
struct qed_ll2_rx_packet *p_posting_packet = NULL;
- struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
+ struct core_ll2_rx_prod rx_prod = { 0, 0 };
bool b_notify_fw = false;
u16 bd_prod, cq_prod;
@@ -1615,13 +1710,27 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
- rx_prod.bd_prod = cpu_to_le16(bd_prod);
- rx_prod.cqe_prod = cpu_to_le16(cq_prod);
+ if (p_rx->ctx_based) {
+ /* update producer by giving a doorbell */
+ p_rx->db_data.prod.bd_prod = cpu_to_le16(bd_prod);
+ p_rx->db_data.prod.cqe_prod = cpu_to_le16(cq_prod);
+ /* Make sure chain element is updated before ringing the
+ * doorbell
+ */
+ dma_wmb();
+ DIRECT_REG_WR64(p_rx->set_prod_addr,
+ *((u64 *)&p_rx->db_data));
+ } else {
+ rx_prod.bd_prod = cpu_to_le16(bd_prod);
+ rx_prod.cqe_prod = cpu_to_le16(cq_prod);
- /* Make sure chain element is updated before ringing the doorbell */
- dma_wmb();
+ /* Make sure chain element is updated before ringing the
+ * doorbell
+ */
+ dma_wmb();
- DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
+ DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
+ }
}
int qed_ll2_post_rx_buffer(void *cxt,
@@ -1965,6 +2074,12 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
p_ll2_conn->rx_queue.b_cb_registered = false;
smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
+
+ if (p_ll2_conn->rx_queue.ctx_based)
+ qed_db_recovery_del(p_hwfn->cdev,
+ p_ll2_conn->rx_queue.set_prod_addr,
+ &p_ll2_conn->rx_queue.db_data);
+
rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
if (rc)
goto out;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index 5f01fbd3c073..288642d526b7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -46,6 +46,18 @@
#include "qed_sp.h"
#define QED_MAX_NUM_OF_LL2_CONNECTIONS (4)
+/* LL2 queues handles will be split as follows:
+ * first will be legacy queues, and then the ctx based queues.
+ */
+#define QED_MAX_NUM_OF_LL2_CONNS_PF (4)
+#define QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF (3)
+
+#define QED_MAX_NUM_OF_CTX_LL2_CONNS_PF \
+ (QED_MAX_NUM_OF_LL2_CONNS_PF - QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF)
+
+#define QED_LL2_LEGACY_CONN_BASE_PF 0
+#define QED_LL2_CTX_CONN_BASE_PF QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF
+
struct qed_ll2_rx_packet {
struct list_head list_entry;
@@ -79,6 +91,7 @@ struct qed_ll2_rx_queue {
struct qed_chain rxq_chain;
struct qed_chain rcq_chain;
u8 rx_sb_index;
+ u8 ctx_based;
bool b_cb_registered;
__le16 *p_fw_cons;
struct list_head active_descq;
@@ -86,6 +99,7 @@ struct qed_ll2_rx_queue {
struct list_head posting_descq;
struct qed_ll2_rx_packet *descq_array;
void __iomem *set_prod_addr;
+ struct core_pwm_prod_update_data db_data;
};
struct qed_ll2_tx_queue {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 38f7f40b3a4d..2c189c637cca 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -2637,7 +2637,7 @@ static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val)
if (!ptt)
return -EAGAIN;
- rc = qed_dbg_grc_config(hwfn, ptt, cfg_id, val);
+ rc = qed_dbg_grc_config(hwfn, cfg_id, val);
qed_ptt_release(hwfn, ptt);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 36ddb89856a8..280527cc0578 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -48,6 +48,8 @@
#include "qed_reg_addr.h"
#include "qed_sriov.h"
+#define GRCBASE_MCP 0xe00000
+
#define QED_MCP_RESP_ITER_US 10
#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
@@ -3165,6 +3167,9 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
case QED_NVM_IMAGE_FCOE_CFG:
type = NVM_TYPE_FCOE_CFG;
break;
+ case QED_NVM_IMAGE_MDUMP:
+ type = NVM_TYPE_MDUMP;
+ break;
case QED_NVM_IMAGE_NVM_CFG1:
type = NVM_TYPE_NVM_CFG1;
break;
@@ -3261,9 +3266,12 @@ static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id)
case QED_ILT:
mfw_res_id = RESOURCE_ILT_E;
break;
- case QED_LL2_QUEUE:
+ case QED_LL2_RAM_QUEUE:
mfw_res_id = RESOURCE_LL2_QUEUE_E;
break;
+ case QED_LL2_CTX_QUEUE:
+ mfw_res_id = RESOURCE_LL2_CQS_E;
+ break;
case QED_RDMA_CNQ_RAM:
case QED_CMDQS_CQS:
/* CNQ/CMDQS are the same resource */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
index 0dacf2c18c09..3e613058e225 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ptp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
@@ -44,8 +44,8 @@
/* Add/subtract the Adjustment_Value when making a Drift adjustment */
#define QED_DRIFT_CNTR_DIRECTION_SHIFT 31
#define QED_TIMESTAMP_MASK BIT(16)
-/* Param mask for Hardware to detect/timestamp the unicast PTP packets */
-#define QED_PTP_UCAST_PARAM_MASK 0xF
+/* Param mask for Hardware to detect/timestamp the L2/L4 unicast PTP packets */
+#define QED_PTP_UCAST_PARAM_MASK 0x70F
static enum qed_resc_lock qed_ptcdev_to_resc(struct qed_hwfn *p_hwfn)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index 60f850c3bdd6..3dcb6ff58e73 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -178,6 +178,8 @@
0x008c80UL
#define MCP_REG_SCRATCH \
0xe20000UL
+#define MCP_REG_SCRATCH_SIZE \
+ 57344
#define CNIG_REG_NW_PORT_MODE_BB \
0x218200UL
#define MISCS_REG_CHIP_NUM \
@@ -212,6 +214,8 @@
0x580900UL
#define DBG_REG_CLIENT_ENABLE \
0x010004UL
+#define DBG_REG_TIMESTAMP_VALID_EN \
+ 0x010b58UL
#define DMAE_REG_INIT \
0x00c000UL
#define DORQ_REG_IFEN \
@@ -350,6 +354,10 @@
0x24000cUL
#define PSWRQ2_REG_ILT_MEMORY \
0x260000UL
+#define PSWRQ2_REG_ILT_MEMORY_SIZE_BB \
+ 15200
+#define PSWRQ2_REG_ILT_MEMORY_SIZE_K2 \
+ 22000
#define PSWHST_REG_DISCARD_INTERNAL_WRITES \
0x2a0040UL
#define PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \
@@ -1453,6 +1461,8 @@
0x1401404UL
#define XSEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1401408UL
+#define XSEM_REG_DBG_GPRE_VECT \
+ 0x1401410UL
#define XSEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1401420UL
#define XSEM_REG_FAST_MEMORY \
@@ -1465,6 +1475,8 @@
0x1501404UL
#define YSEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1501408UL
+#define YSEM_REG_DBG_GPRE_VECT \
+ 0x1501410UL
#define YSEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1501420UL
#define YSEM_REG_FAST_MEMORY \
@@ -1479,6 +1491,8 @@
0x1601404UL
#define PSEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1601408UL
+#define PSEM_REG_DBG_GPRE_VECT \
+ 0x1601410UL
#define PSEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1601420UL
#define PSEM_REG_FAST_MEMORY \
@@ -1493,6 +1507,8 @@
0x1701404UL
#define TSEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1701408UL
+#define TSEM_REG_DBG_GPRE_VECT \
+ 0x1701410UL
#define TSEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1701420UL
#define TSEM_REG_FAST_MEMORY \
@@ -1507,12 +1523,16 @@
0x1801404UL
#define MSEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1801408UL
+#define MSEM_REG_DBG_GPRE_VECT \
+ 0x1801410UL
#define MSEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1801420UL
#define MSEM_REG_FAST_MEMORY \
0x1840000UL
#define USEM_REG_SLOW_DBG_EMPTY_BB_K2 \
0x1901140UL
+#define SEM_FAST_REG_INT_RAM_SIZE \
+ 20480
#define USEM_REG_SYNC_DBG_EMPTY \
0x1901160UL
#define USEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
@@ -1521,14 +1541,26 @@
0x1901404UL
#define USEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1901408UL
+#define USEM_REG_DBG_GPRE_VECT \
+ 0x1901410UL
#define USEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1901420UL
#define USEM_REG_FAST_MEMORY \
0x1940000UL
+#define SEM_FAST_REG_DBG_MODE23_SRC_DISABLE \
+ 0x000748UL
+#define SEM_FAST_REG_DBG_MODE4_SRC_DISABLE \
+ 0x00074cUL
+#define SEM_FAST_REG_DBG_MODE6_SRC_DISABLE \
+ 0x000750UL
+#define SEM_FAST_REG_DEBUG_ACTIVE \
+ 0x000740UL
#define SEM_FAST_REG_INT_RAM \
0x020000UL
#define SEM_FAST_REG_INT_RAM_SIZE_BB_K2 \
20480
+#define SEM_FAST_REG_RECORD_FILTER_ENABLE \
+ 0x000768UL
#define GRC_REG_TRACE_FIFO_VALID_DATA \
0x050064UL
#define GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW \
@@ -1583,14 +1615,20 @@
0x181530UL
#define DBG_REG_DBG_BLOCK_ON \
0x010454UL
+#define DBG_REG_FILTER_ENABLE \
+ 0x0109d0UL
#define DBG_REG_FRAMING_MODE \
0x010058UL
+#define DBG_REG_TRIGGER_ENABLE \
+ 0x01054cUL
#define SEM_FAST_REG_VFC_DATA_WR \
0x000b40UL
#define SEM_FAST_REG_VFC_ADDR \
0x000b44UL
#define SEM_FAST_REG_VFC_DATA_RD \
0x000b48UL
+#define SEM_FAST_REG_VFC_STATUS \
+ 0x000b4cUL
#define RSS_REG_RSS_RAM_DATA \
0x238c20UL
#define RSS_REG_RSS_RAM_DATA_SIZE \
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index e49fada85410..37e70562a964 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -900,7 +900,7 @@ int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
goto err_resp;
out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn);
- rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->err_flag),
+ rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->flags),
ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 96ab77ae6af5..b7b4fbbbccfe 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -120,9 +120,7 @@ union ramrod_data {
struct fcoe_conn_terminate_ramrod_params fcoe_conn_terminate;
struct fcoe_stat_ramrod_params fcoe_stat;
- struct iscsi_slow_path_hdr iscsi_empty;
struct iscsi_init_ramrod_params iscsi_init;
- struct iscsi_spe_func_dstry iscsi_destroy;
struct iscsi_spe_conn_offload iscsi_conn_offload;
struct iscsi_conn_update_ramrod_params iscsi_conn_update;
struct iscsi_spe_conn_mac_update iscsi_conn_mac_update;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 7e0b795230b2..900bc603e30a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -331,8 +331,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
u8 sb_index = p_hwfn->p_eq->eq_sb_index;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
- int rc = -EINVAL;
u8 page_cnt, i;
+ int rc;
/* update initial eq producer */
qed_eq_prod_update(p_hwfn,
@@ -447,7 +447,7 @@ int qed_sp_pf_update(struct qed_hwfn *p_hwfn)
{
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
- int rc = -EINVAL;
+ int rc;
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
@@ -471,7 +471,7 @@ int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn)
{
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
- int rc = -EOPNOTSUPP;
+ int rc;
if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_UNKNOWN) {
DP_INFO(p_hwfn, "Invalid priority type %d\n",
@@ -509,7 +509,7 @@ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
{
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
- int rc = -EINVAL;
+ int rc;
if (IS_VF(p_hwfn->cdev))
return qed_vf_pf_tunnel_param_update(p_hwfn, p_tunn);
@@ -546,7 +546,7 @@ int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
{
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
- int rc = -EINVAL;
+ int rc;
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index dcb5c917f373..66876af814c4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -352,7 +352,7 @@ static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
/* propagate bulletin board via dmae to vm memory */
memset(&params, 0, sizeof(params));
- params.flags = QED_DMAE_FLAG_VF_DST;
+ SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1);
params.dst_vfid = p_vf->abs_vf_id;
return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
p_vf->vf_bulletin, p_vf->bulletin.size / 4,
@@ -1225,8 +1225,8 @@ static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
eng_vf_id = p_vf->abs_vf_id;
- memset(&params, 0, sizeof(struct qed_dmae_params));
- params.flags = QED_DMAE_FLAG_VF_DST;
+ memset(&params, 0, sizeof(params));
+ SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1);
params.dst_vfid = eng_vf_id;
qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
@@ -4103,8 +4103,9 @@ static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
if (!vf_info)
return -EINVAL;
- memset(&params, 0, sizeof(struct qed_dmae_params));
- params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST;
+ memset(&params, 0, sizeof(params));
+ SET_FIELD(params.flags, QED_DMAE_PARAMS_SRC_VF_VALID, 0x1);
+ SET_FIELD(params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 0x1);
params.src_vfid = vf_info->abs_vf_id;
if (qed_dmae_host2host(p_hwfn, ptt,
@@ -4354,9 +4355,9 @@ qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, int vfid, int val)
{
- struct qed_mcp_link_state *p_link;
struct qed_vf_info *vf;
u8 abs_vp_id = 0;
+ u16 rl_id;
int rc;
vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
@@ -4367,10 +4368,8 @@ static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
- p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
-
- return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val,
- p_link->speed);
+ rl_id = abs_vp_id; /* The "rl_id" is set as the "vport_id" */
+ return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val);
}
static int
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index d6cfe4ffbaf3..d1ce4531d01a 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -1230,7 +1230,7 @@ qede_configure_mcast_filtering(struct net_device *ndev,
netif_addr_lock_bh(ndev);
mc_count = netdev_mc_count(ndev);
- if (mc_count < 64) {
+ if (mc_count <= 64) {
netdev_for_each_mc_addr(ha, ndev) {
ether_addr_copy(temp, ha->addr);
temp += ETH_ALEN;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 004c0bfec41d..c6c20776b474 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -848,13 +848,13 @@ static void qede_tpa_start(struct qede_dev *edev,
qede_set_gro_params(edev, tpa_info->skb, cqe);
cons_buf: /* We still need to handle bd_len_list to consume buffers */
- if (likely(cqe->ext_bd_len_list[0]))
+ if (likely(cqe->bw_ext_bd_len_list[0]))
qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
- le16_to_cpu(cqe->ext_bd_len_list[0]));
+ le16_to_cpu(cqe->bw_ext_bd_len_list[0]));
- if (unlikely(cqe->ext_bd_len_list[1])) {
+ if (unlikely(cqe->bw_ext_bd_len_list[1])) {
DP_ERR(edev,
- "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n");
+ "Unlikely - got a TPA aggregation with more than one bw_ext_bd_len_list entry in the TPA start\n");
tpa_info->state = QEDE_AGG_STATE_ERROR;
}
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 481b096e984d..34fa3917eb33 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -1406,6 +1406,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
rxq->rx_buf_seg_size = roundup_pow_of_two(size);
} else {
rxq->rx_buf_seg_size = PAGE_SIZE;
+ edev->ndev->features &= ~NETIF_F_GRO_HW;
}
/* Allocate the parallel driver ring for Rx buffers */
@@ -1450,6 +1451,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
}
}
+ edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
if (!edev->gro_disable)
qede_set_tpa_param(rxq);
err:
@@ -1702,8 +1704,6 @@ static void qede_init_fp(struct qede_dev *edev)
snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
edev->ndev->name, queue_id);
}
-
- edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
}
static int qede_set_real_num_queues(struct qede_dev *edev)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index f815435cf106..4c7f7a7fc151 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -247,6 +247,7 @@ static int qede_ptp_cfg_filters(struct qede_dev *edev)
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
+ case HWTSTAMP_TX_ONESTEP_P2P:
DP_ERR(edev, "One-step timestamping is not supported\n");
return -ERANGE;
}
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index b4b8ba00ee01..0fade19e00d4 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -2756,6 +2756,9 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
int err;
for (i = 0; i < qdev->num_large_buffers; i++) {
+ lrg_buf_cb = &qdev->lrg_buf[i];
+ memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
+
skb = netdev_alloc_skb(qdev->ndev,
qdev->lrg_buffer_len);
if (unlikely(!skb)) {
@@ -2766,11 +2769,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
ql_free_large_buffers(qdev);
return -ENOMEM;
} else {
-
- lrg_buf_cb = &qdev->lrg_buf[i];
- memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
lrg_buf_cb->index = i;
- lrg_buf_cb->skb = skb;
/*
* We save some space to copy the ethhdr from first
* buffer
@@ -2792,6 +2791,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
return -ENOMEM;
}
+ lrg_buf_cb->skb = skb;
dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
dma_unmap_len_set(lrg_buf_cb, maplen,
qdev->lrg_buffer_len -
@@ -3602,7 +3602,7 @@ static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
return 0;
}
-static void ql3xxx_tx_timeout(struct net_device *ndev)
+static void ql3xxx_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct ql3_adapter *qdev = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index a496390b8632..07f9067affc6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -2043,6 +2043,7 @@ static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev,
break;
}
entry += p_hdr->size;
+ cond_resched();
}
p_dev->ahw->reset.seq_index = index;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index c07438db30ba..9dd6cb36f366 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -56,7 +56,7 @@ static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static void qlcnic_remove(struct pci_dev *pdev);
static int qlcnic_open(struct net_device *netdev);
static int qlcnic_close(struct net_device *netdev);
-static void qlcnic_tx_timeout(struct net_device *netdev);
+static void qlcnic_tx_timeout(struct net_device *netdev, unsigned int txqueue);
static void qlcnic_attach_work(struct work_struct *work);
static void qlcnic_fwinit_work(struct work_struct *work);
@@ -3068,7 +3068,7 @@ static void qlcnic_dump_rings(struct qlcnic_adapter *adapter)
}
-static void qlcnic_tx_timeout(struct net_device *netdev)
+static void qlcnic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index afa10a163da1..f34ae8c75bc5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -703,6 +703,7 @@ static u32 qlcnic_read_memory_test_agent(struct qlcnic_adapter *adapter,
addr += 16;
reg_read -= 16;
ret += 16;
+ cond_resched();
}
out:
mutex_unlock(&adapter->ahw->mem_lock);
@@ -1383,6 +1384,7 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
buf_offset += entry->hdr.cap_size;
entry_offset += entry->hdr.offset;
buffer = fw_dump->data + buf_offset;
+ cond_resched();
}
fw_dump->clr = 1;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 98f92268cbaa..18b0c7a2d6dc 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -282,25 +282,13 @@ static int emac_close(struct net_device *netdev)
}
/* Respond to a TX hang */
-static void emac_tx_timeout(struct net_device *netdev)
+static void emac_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct emac_adapter *adpt = netdev_priv(netdev);
schedule_work(&adpt->work_thread);
}
-/* IOCTL support for the interface */
-static int emac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- if (!netif_running(netdev))
- return -EINVAL;
-
- if (!netdev->phydev)
- return -ENODEV;
-
- return phy_mii_ioctl(netdev->phydev, ifr, cmd);
-}
-
/**
* emac_update_hw_stats - read the EMAC stat registers
*
@@ -387,7 +375,7 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_start_xmit = emac_start_xmit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = emac_change_mtu,
- .ndo_do_ioctl = emac_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_tx_timeout = emac_tx_timeout,
.ndo_get_stats64 = emac_get_stats64,
.ndo_set_features = emac_set_features,
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index baac016f3ec0..5a3b65a6eb4f 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -785,7 +785,7 @@ qcaspi_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
}
static void
-qcaspi_netdev_tx_timeout(struct net_device *dev)
+qcaspi_netdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct qcaspi *qca = netdev_priv(dev);
diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c
index 0981068504fa..375a844cd27c 100644
--- a/drivers/net/ethernet/qualcomm/qca_uart.c
+++ b/drivers/net/ethernet/qualcomm/qca_uart.c
@@ -248,7 +248,7 @@ out:
return NETDEV_TX_OK;
}
-static void qcauart_netdev_tx_timeout(struct net_device *dev)
+static void qcauart_netdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct qcauart *qca = netdev_priv(dev);
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 274e5b4bc4ac..f5ecc410ff85 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -410,7 +410,7 @@ static void r6040_init_mac_regs(struct net_device *dev)
iowrite16(TM2TX, ioaddr + MTPR);
}
-static void r6040_tx_timeout(struct net_device *dev)
+static void r6040_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct r6040_private *priv = netdev_priv(dev);
void __iomem *ioaddr = priv->base;
@@ -498,14 +498,6 @@ static int r6040_close(struct net_device *dev)
return 0;
}
-static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- if (!dev->phydev)
- return -EINVAL;
-
- return phy_mii_ioctl(dev->phydev, rq, cmd);
-}
-
static int r6040_rx(struct net_device *dev, int limit)
{
struct r6040_private *priv = netdev_priv(dev);
@@ -957,7 +949,7 @@ static const struct net_device_ops r6040_netdev_ops = {
.ndo_set_rx_mode = r6040_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_do_ioctl = r6040_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl,
.ndo_tx_timeout = r6040_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = r6040_poll_controller,
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 4f910c4f67b0..60d342f82fb3 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -1235,7 +1235,7 @@ static int cp_close (struct net_device *dev)
return 0;
}
-static void cp_tx_timeout(struct net_device *dev)
+static void cp_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct cp_private *cp = netdev_priv(dev);
unsigned long flags;
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 55d01266e615..5caeb8368eab 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -642,7 +642,7 @@ static int mdio_read (struct net_device *dev, int phy_id, int location);
static void mdio_write (struct net_device *dev, int phy_id, int location,
int val);
static void rtl8139_start_thread(struct rtl8139_private *tp);
-static void rtl8139_tx_timeout (struct net_device *dev);
+static void rtl8139_tx_timeout (struct net_device *dev, unsigned int txqueue);
static void rtl8139_init_ring (struct net_device *dev);
static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb,
struct net_device *dev);
@@ -1700,7 +1700,7 @@ static void rtl8139_tx_timeout_task (struct work_struct *work)
spin_unlock_bh(&tp->rx_lock);
}
-static void rtl8139_tx_timeout (struct net_device *dev)
+static void rtl8139_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct rtl8139_private *tp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/realtek/Makefile b/drivers/net/ethernet/realtek/Makefile
index d5304bad2372..2e1d78b106b0 100644
--- a/drivers/net/ethernet/realtek/Makefile
+++ b/drivers/net/ethernet/realtek/Makefile
@@ -6,5 +6,5 @@
obj-$(CONFIG_8139CP) += 8139cp.o
obj-$(CONFIG_8139TOO) += 8139too.o
obj-$(CONFIG_ATP) += atp.o
-r8169-objs += r8169_main.o r8169_firmware.o
+r8169-objs += r8169_main.o r8169_firmware.o r8169_phy_config.o
obj-$(CONFIG_R8169) += r8169.o
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index 58e0ca9093d3..9e3b35c97e63 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -204,7 +204,7 @@ static void net_rx(struct net_device *dev);
static void read_block(long ioaddr, int length, unsigned char *buffer, int data_mode);
static int net_close(struct net_device *dev);
static void set_rx_mode(struct net_device *dev);
-static void tx_timeout(struct net_device *dev);
+static void tx_timeout(struct net_device *dev, unsigned int txqueue);
/* A list of all installed ATP devices, for removing the driver module. */
@@ -533,7 +533,7 @@ static void write_packet(long ioaddr, int length, unsigned char *packet, int pad
outb(Ctrl_HNibWrite | Ctrl_SelData | Ctrl_IRQEN, ioaddr + PAR_CONTROL);
}
-static void tx_timeout(struct net_device *dev)
+static void tx_timeout(struct net_device *dev, unsigned int txqueue)
{
long ioaddr = dev->base_addr;
diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
new file mode 100644
index 000000000000..22a6a057b11e
--- /dev/null
+++ b/drivers/net/ethernet/realtek/r8169.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* r8169.h: RealTek 8169/8168/8101 ethernet driver.
+ *
+ * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
+ * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
+ * Copyright (c) a lot of people too. Please respect their work.
+ *
+ * See MAINTAINERS file for support contact information.
+ */
+
+#include <linux/types.h>
+#include <linux/phy.h>
+
+enum mac_version {
+ /* support for ancient RTL_GIGA_MAC_VER_01 has been removed */
+ RTL_GIGA_MAC_VER_02,
+ RTL_GIGA_MAC_VER_03,
+ RTL_GIGA_MAC_VER_04,
+ RTL_GIGA_MAC_VER_05,
+ RTL_GIGA_MAC_VER_06,
+ RTL_GIGA_MAC_VER_07,
+ RTL_GIGA_MAC_VER_08,
+ RTL_GIGA_MAC_VER_09,
+ RTL_GIGA_MAC_VER_10,
+ RTL_GIGA_MAC_VER_11,
+ RTL_GIGA_MAC_VER_12,
+ RTL_GIGA_MAC_VER_13,
+ RTL_GIGA_MAC_VER_14,
+ RTL_GIGA_MAC_VER_15,
+ RTL_GIGA_MAC_VER_16,
+ RTL_GIGA_MAC_VER_17,
+ RTL_GIGA_MAC_VER_18,
+ RTL_GIGA_MAC_VER_19,
+ RTL_GIGA_MAC_VER_20,
+ RTL_GIGA_MAC_VER_21,
+ RTL_GIGA_MAC_VER_22,
+ RTL_GIGA_MAC_VER_23,
+ RTL_GIGA_MAC_VER_24,
+ RTL_GIGA_MAC_VER_25,
+ RTL_GIGA_MAC_VER_26,
+ RTL_GIGA_MAC_VER_27,
+ RTL_GIGA_MAC_VER_28,
+ RTL_GIGA_MAC_VER_29,
+ RTL_GIGA_MAC_VER_30,
+ RTL_GIGA_MAC_VER_31,
+ RTL_GIGA_MAC_VER_32,
+ RTL_GIGA_MAC_VER_33,
+ RTL_GIGA_MAC_VER_34,
+ RTL_GIGA_MAC_VER_35,
+ RTL_GIGA_MAC_VER_36,
+ RTL_GIGA_MAC_VER_37,
+ RTL_GIGA_MAC_VER_38,
+ RTL_GIGA_MAC_VER_39,
+ RTL_GIGA_MAC_VER_40,
+ RTL_GIGA_MAC_VER_41,
+ RTL_GIGA_MAC_VER_42,
+ RTL_GIGA_MAC_VER_43,
+ RTL_GIGA_MAC_VER_44,
+ RTL_GIGA_MAC_VER_45,
+ RTL_GIGA_MAC_VER_46,
+ RTL_GIGA_MAC_VER_47,
+ RTL_GIGA_MAC_VER_48,
+ RTL_GIGA_MAC_VER_49,
+ RTL_GIGA_MAC_VER_50,
+ RTL_GIGA_MAC_VER_51,
+ RTL_GIGA_MAC_VER_52,
+ RTL_GIGA_MAC_VER_60,
+ RTL_GIGA_MAC_VER_61,
+ RTL_GIGA_MAC_NONE
+};
+
+struct rtl8169_private;
+
+void r8169_apply_firmware(struct rtl8169_private *tp);
+u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp);
+u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr);
+void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
+ enum mac_version ver);
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 67a4d5d45e3a..a2168a14794c 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -31,6 +31,7 @@
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
+#include "r8169.h"
#include "r8169_firmware.h"
#define MODULENAME "r8169"
@@ -84,65 +85,6 @@
#define RTL_R16(tp, reg) readw(tp->mmio_addr + (reg))
#define RTL_R32(tp, reg) readl(tp->mmio_addr + (reg))
-enum mac_version {
- /* support for ancient RTL_GIGA_MAC_VER_01 has been removed */
- RTL_GIGA_MAC_VER_02,
- RTL_GIGA_MAC_VER_03,
- RTL_GIGA_MAC_VER_04,
- RTL_GIGA_MAC_VER_05,
- RTL_GIGA_MAC_VER_06,
- RTL_GIGA_MAC_VER_07,
- RTL_GIGA_MAC_VER_08,
- RTL_GIGA_MAC_VER_09,
- RTL_GIGA_MAC_VER_10,
- RTL_GIGA_MAC_VER_11,
- RTL_GIGA_MAC_VER_12,
- RTL_GIGA_MAC_VER_13,
- RTL_GIGA_MAC_VER_14,
- RTL_GIGA_MAC_VER_15,
- RTL_GIGA_MAC_VER_16,
- RTL_GIGA_MAC_VER_17,
- RTL_GIGA_MAC_VER_18,
- RTL_GIGA_MAC_VER_19,
- RTL_GIGA_MAC_VER_20,
- RTL_GIGA_MAC_VER_21,
- RTL_GIGA_MAC_VER_22,
- RTL_GIGA_MAC_VER_23,
- RTL_GIGA_MAC_VER_24,
- RTL_GIGA_MAC_VER_25,
- RTL_GIGA_MAC_VER_26,
- RTL_GIGA_MAC_VER_27,
- RTL_GIGA_MAC_VER_28,
- RTL_GIGA_MAC_VER_29,
- RTL_GIGA_MAC_VER_30,
- RTL_GIGA_MAC_VER_31,
- RTL_GIGA_MAC_VER_32,
- RTL_GIGA_MAC_VER_33,
- RTL_GIGA_MAC_VER_34,
- RTL_GIGA_MAC_VER_35,
- RTL_GIGA_MAC_VER_36,
- RTL_GIGA_MAC_VER_37,
- RTL_GIGA_MAC_VER_38,
- RTL_GIGA_MAC_VER_39,
- RTL_GIGA_MAC_VER_40,
- RTL_GIGA_MAC_VER_41,
- RTL_GIGA_MAC_VER_42,
- RTL_GIGA_MAC_VER_43,
- RTL_GIGA_MAC_VER_44,
- RTL_GIGA_MAC_VER_45,
- RTL_GIGA_MAC_VER_46,
- RTL_GIGA_MAC_VER_47,
- RTL_GIGA_MAC_VER_48,
- RTL_GIGA_MAC_VER_49,
- RTL_GIGA_MAC_VER_50,
- RTL_GIGA_MAC_VER_51,
- RTL_GIGA_MAC_VER_52,
- RTL_GIGA_MAC_VER_60,
- RTL_GIGA_MAC_VER_61,
- RTL_GIGA_MAC_NONE
-};
-
-#define JUMBO_1K ETH_DATA_LEN
#define JUMBO_4K (4*1024 - ETH_HLEN - 2)
#define JUMBO_6K (6*1024 - ETH_HLEN - 2)
#define JUMBO_7K (7*1024 - ETH_HLEN - 2)
@@ -492,6 +434,7 @@ enum rtl_register_content {
/* CPlusCmd p.31 */
EnableBist = (1 << 15), // 8168 8101
Mac_dbgo_oe = (1 << 14), // 8168 8101
+ EnAnaPLL = (1 << 14), // 8169
Normal_mode = (1 << 13), // unused
Force_half_dup = (1 << 12), // 8168 8101
Force_rxflow_en = (1 << 11), // 8168 8101
@@ -1078,52 +1021,6 @@ static int rtl_readphy(struct rtl8169_private *tp, int location)
}
}
-static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
-{
- rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
-}
-
-static void rtl_w0w1_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
-{
- int val;
-
- val = rtl_readphy(tp, reg_addr);
- rtl_writephy(tp, reg_addr, (val & ~m) | p);
-}
-
-static void r8168d_modify_extpage(struct phy_device *phydev, int extpage,
- int reg, u16 mask, u16 val)
-{
- int oldpage = phy_select_page(phydev, 0x0007);
-
- __phy_write(phydev, 0x1e, extpage);
- __phy_modify(phydev, reg, mask, val);
-
- phy_restore_page(phydev, oldpage, 0);
-}
-
-static void r8168d_phy_param(struct phy_device *phydev, u16 parm,
- u16 mask, u16 val)
-{
- int oldpage = phy_select_page(phydev, 0x0005);
-
- __phy_write(phydev, 0x05, parm);
- __phy_modify(phydev, 0x06, mask, val);
-
- phy_restore_page(phydev, oldpage, 0);
-}
-
-static void r8168g_phy_param(struct phy_device *phydev, u16 parm,
- u16 mask, u16 val)
-{
- int oldpage = phy_select_page(phydev, 0x0a43);
-
- __phy_write(phydev, 0x13, parm);
- __phy_modify(phydev, 0x14, mask, val);
-
- phy_restore_page(phydev, oldpage, 0);
-}
-
DECLARE_RTL_COND(rtl_ephyar_cond)
{
return RTL_R32(tp, EPHYAR) & EPHYAR_FLAG;
@@ -1372,7 +1269,7 @@ DECLARE_RTL_COND(rtl_efusear_cond)
return RTL_R32(tp, EFUSEAR) & EFUSEAR_FLAG;
}
-static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
+u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
{
RTL_W32(tp, EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
@@ -1596,7 +1493,7 @@ static netdev_features_t rtl8169_fix_features(struct net_device *dev,
if (dev->mtu > TD_MSS_MAX)
features &= ~NETIF_F_ALL_TSO;
- if (dev->mtu > JUMBO_1K &&
+ if (dev->mtu > ETH_DATA_LEN &&
tp->mac_version > RTL_GIGA_MAC_VER_06)
features &= ~(NETIF_F_CSUM_MASK | NETIF_F_ALL_TSO);
@@ -2268,22 +2165,6 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp)
}
}
-struct phy_reg {
- u16 reg;
- u16 val;
-};
-
-static void __rtl_writephy_batch(struct rtl8169_private *tp,
- const struct phy_reg *regs, int len)
-{
- while (len-- > 0) {
- rtl_writephy(tp, regs->reg, regs->val);
- regs++;
- }
-}
-
-#define rtl_writephy_batch(tp, a) __rtl_writephy_batch(tp, a, ARRAY_SIZE(a))
-
static void rtl_release_firmware(struct rtl8169_private *tp)
{
if (tp->rtl_fw) {
@@ -2293,7 +2174,7 @@ static void rtl_release_firmware(struct rtl8169_private *tp)
}
}
-static void rtl_apply_firmware(struct rtl8169_private *tp)
+void r8169_apply_firmware(struct rtl8169_private *tp)
{
/* TODO: release firmware if rtl_fw_write_firmware signals failure. */
if (tp->rtl_fw)
@@ -2315,594 +2196,6 @@ static void rtl8125_config_eee_mac(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xeb62, 0, BIT(2) | BIT(1));
}
-static void rtl8168f_config_eee_phy(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
-
- r8168d_modify_extpage(phydev, 0x0020, 0x15, 0, BIT(8));
- r8168d_phy_param(phydev, 0x8b85, 0, BIT(13));
-}
-
-static void rtl8168g_config_eee_phy(struct rtl8169_private *tp)
-{
- phy_modify_paged(tp->phydev, 0x0a43, 0x11, 0, BIT(4));
-}
-
-static void rtl8168h_config_eee_phy(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
-
- rtl8168g_config_eee_phy(tp);
-
- phy_modify_paged(phydev, 0xa4a, 0x11, 0x0000, 0x0200);
- phy_modify_paged(phydev, 0xa42, 0x14, 0x0000, 0x0080);
-}
-
-static void rtl8125_config_eee_phy(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
-
- rtl8168h_config_eee_phy(tp);
-
- phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000);
- phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000);
-}
-
-static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
-{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0001 },
- { 0x06, 0x006e },
- { 0x08, 0x0708 },
- { 0x15, 0x4000 },
- { 0x18, 0x65c7 },
-
- { 0x1f, 0x0001 },
- { 0x03, 0x00a1 },
- { 0x02, 0x0008 },
- { 0x01, 0x0120 },
- { 0x00, 0x1000 },
- { 0x04, 0x0800 },
- { 0x04, 0x0000 },
-
- { 0x03, 0xff41 },
- { 0x02, 0xdf60 },
- { 0x01, 0x0140 },
- { 0x00, 0x0077 },
- { 0x04, 0x7800 },
- { 0x04, 0x7000 },
-
- { 0x03, 0x802f },
- { 0x02, 0x4f02 },
- { 0x01, 0x0409 },
- { 0x00, 0xf0f9 },
- { 0x04, 0x9800 },
- { 0x04, 0x9000 },
-
- { 0x03, 0xdf01 },
- { 0x02, 0xdf20 },
- { 0x01, 0xff95 },
- { 0x00, 0xba00 },
- { 0x04, 0xa800 },
- { 0x04, 0xa000 },
-
- { 0x03, 0xff41 },
- { 0x02, 0xdf20 },
- { 0x01, 0x0140 },
- { 0x00, 0x00bb },
- { 0x04, 0xb800 },
- { 0x04, 0xb000 },
-
- { 0x03, 0xdf41 },
- { 0x02, 0xdc60 },
- { 0x01, 0x6340 },
- { 0x00, 0x007d },
- { 0x04, 0xd800 },
- { 0x04, 0xd000 },
-
- { 0x03, 0xdf01 },
- { 0x02, 0xdf20 },
- { 0x01, 0x100a },
- { 0x00, 0xa0ff },
- { 0x04, 0xf800 },
- { 0x04, 0xf000 },
-
- { 0x1f, 0x0000 },
- { 0x0b, 0x0000 },
- { 0x00, 0x9200 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
-}
-
-static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
-{
- phy_write_paged(tp->phydev, 0x0002, 0x01, 0x90d0);
-}
-
-static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
-{
- struct pci_dev *pdev = tp->pci_dev;
-
- if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) ||
- (pdev->subsystem_device != 0xe000))
- return;
-
- phy_write_paged(tp->phydev, 0x0001, 0x10, 0xf01b);
-}
-
-static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
-{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0001 },
- { 0x04, 0x0000 },
- { 0x03, 0x00a1 },
- { 0x02, 0x0008 },
- { 0x01, 0x0120 },
- { 0x00, 0x1000 },
- { 0x04, 0x0800 },
- { 0x04, 0x9000 },
- { 0x03, 0x802f },
- { 0x02, 0x4f02 },
- { 0x01, 0x0409 },
- { 0x00, 0xf099 },
- { 0x04, 0x9800 },
- { 0x04, 0xa000 },
- { 0x03, 0xdf01 },
- { 0x02, 0xdf20 },
- { 0x01, 0xff95 },
- { 0x00, 0xba00 },
- { 0x04, 0xa800 },
- { 0x04, 0xf000 },
- { 0x03, 0xdf01 },
- { 0x02, 0xdf20 },
- { 0x01, 0x101a },
- { 0x00, 0xa0ff },
- { 0x04, 0xf800 },
- { 0x04, 0x0000 },
- { 0x1f, 0x0000 },
-
- { 0x1f, 0x0001 },
- { 0x10, 0xf41b },
- { 0x14, 0xfb54 },
- { 0x18, 0xf5c7 },
- { 0x1f, 0x0000 },
-
- { 0x1f, 0x0001 },
- { 0x17, 0x0cc0 },
- { 0x1f, 0x0000 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
-
- rtl8169scd_hw_phy_config_quirk(tp);
-}
-
-static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
-{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0001 },
- { 0x04, 0x0000 },
- { 0x03, 0x00a1 },
- { 0x02, 0x0008 },
- { 0x01, 0x0120 },
- { 0x00, 0x1000 },
- { 0x04, 0x0800 },
- { 0x04, 0x9000 },
- { 0x03, 0x802f },
- { 0x02, 0x4f02 },
- { 0x01, 0x0409 },
- { 0x00, 0xf099 },
- { 0x04, 0x9800 },
- { 0x04, 0xa000 },
- { 0x03, 0xdf01 },
- { 0x02, 0xdf20 },
- { 0x01, 0xff95 },
- { 0x00, 0xba00 },
- { 0x04, 0xa800 },
- { 0x04, 0xf000 },
- { 0x03, 0xdf01 },
- { 0x02, 0xdf20 },
- { 0x01, 0x101a },
- { 0x00, 0xa0ff },
- { 0x04, 0xf800 },
- { 0x04, 0x0000 },
- { 0x1f, 0x0000 },
-
- { 0x1f, 0x0001 },
- { 0x0b, 0x8480 },
- { 0x1f, 0x0000 },
-
- { 0x1f, 0x0001 },
- { 0x18, 0x67c7 },
- { 0x04, 0x2000 },
- { 0x03, 0x002f },
- { 0x02, 0x4360 },
- { 0x01, 0x0109 },
- { 0x00, 0x3022 },
- { 0x04, 0x2800 },
- { 0x1f, 0x0000 },
-
- { 0x1f, 0x0001 },
- { 0x17, 0x0cc0 },
- { 0x1f, 0x0000 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
-}
-
-static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
-{
- rtl_writephy(tp, 0x1f, 0x0001);
- rtl_patchphy(tp, 0x16, 1 << 0);
- rtl_writephy(tp, 0x10, 0xf41b);
- rtl_writephy(tp, 0x1f, 0x0000);
-}
-
-static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
-{
- phy_write_paged(tp->phydev, 0x0001, 0x10, 0xf41b);
-}
-
-static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
-{
- phy_write(tp->phydev, 0x1d, 0x0f00);
- phy_write_paged(tp->phydev, 0x0002, 0x0c, 0x1ec8);
-}
-
-static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
-{
- phy_set_bits(tp->phydev, 0x14, BIT(5));
- phy_set_bits(tp->phydev, 0x0d, BIT(5));
- phy_write_paged(tp->phydev, 0x0001, 0x1d, 0x3d98);
-}
-
-static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
-{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0001 },
- { 0x12, 0x2300 },
- { 0x1f, 0x0002 },
- { 0x00, 0x88d4 },
- { 0x01, 0x82b1 },
- { 0x03, 0x7002 },
- { 0x08, 0x9e30 },
- { 0x09, 0x01f0 },
- { 0x0a, 0x5500 },
- { 0x0c, 0x00c8 },
- { 0x1f, 0x0003 },
- { 0x12, 0xc096 },
- { 0x16, 0x000a },
- { 0x1f, 0x0000 },
- { 0x1f, 0x0000 },
- { 0x09, 0x2000 },
- { 0x09, 0x0000 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
-
- rtl_patchphy(tp, 0x14, 1 << 5);
- rtl_patchphy(tp, 0x0d, 1 << 5);
- rtl_writephy(tp, 0x1f, 0x0000);
-}
-
-static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
-{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0001 },
- { 0x12, 0x2300 },
- { 0x03, 0x802f },
- { 0x02, 0x4f02 },
- { 0x01, 0x0409 },
- { 0x00, 0xf099 },
- { 0x04, 0x9800 },
- { 0x04, 0x9000 },
- { 0x1d, 0x3d98 },
- { 0x1f, 0x0002 },
- { 0x0c, 0x7eb8 },
- { 0x06, 0x0761 },
- { 0x1f, 0x0003 },
- { 0x16, 0x0f0a },
- { 0x1f, 0x0000 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
-
- rtl_patchphy(tp, 0x16, 1 << 0);
- rtl_patchphy(tp, 0x14, 1 << 5);
- rtl_patchphy(tp, 0x0d, 1 << 5);
- rtl_writephy(tp, 0x1f, 0x0000);
-}
-
-static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
-{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0001 },
- { 0x12, 0x2300 },
- { 0x1d, 0x3d98 },
- { 0x1f, 0x0002 },
- { 0x0c, 0x7eb8 },
- { 0x06, 0x5461 },
- { 0x1f, 0x0003 },
- { 0x16, 0x0f0a },
- { 0x1f, 0x0000 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
-
- rtl_patchphy(tp, 0x16, 1 << 0);
- rtl_patchphy(tp, 0x14, 1 << 5);
- rtl_patchphy(tp, 0x0d, 1 << 5);
- rtl_writephy(tp, 0x1f, 0x0000);
-}
-
-static const struct phy_reg rtl8168d_1_phy_reg_init_0[] = {
- /* Channel Estimation */
- { 0x1f, 0x0001 },
- { 0x06, 0x4064 },
- { 0x07, 0x2863 },
- { 0x08, 0x059c },
- { 0x09, 0x26b4 },
- { 0x0a, 0x6a19 },
- { 0x0b, 0xdcc8 },
- { 0x10, 0xf06d },
- { 0x14, 0x7f68 },
- { 0x18, 0x7fd9 },
- { 0x1c, 0xf0ff },
- { 0x1d, 0x3d9c },
- { 0x1f, 0x0003 },
- { 0x12, 0xf49f },
- { 0x13, 0x070b },
- { 0x1a, 0x05ad },
- { 0x14, 0x94c0 },
-
- /*
- * Tx Error Issue
- * Enhance line driver power
- */
- { 0x1f, 0x0002 },
- { 0x06, 0x5561 },
- { 0x1f, 0x0005 },
- { 0x05, 0x8332 },
- { 0x06, 0x5561 },
-
- /*
- * Can not link to 1Gbps with bad cable
- * Decrease SNR threshold form 21.07dB to 19.04dB
- */
- { 0x1f, 0x0001 },
- { 0x17, 0x0cc0 },
-
- { 0x1f, 0x0000 },
- { 0x0d, 0xf880 }
-};
-
-static const struct phy_reg rtl8168d_1_phy_reg_init_1[] = {
- { 0x1f, 0x0002 },
- { 0x05, 0x669a },
- { 0x1f, 0x0005 },
- { 0x05, 0x8330 },
- { 0x06, 0x669a },
- { 0x1f, 0x0002 }
-};
-
-static void rtl8168d_apply_firmware_cond(struct rtl8169_private *tp, u16 val)
-{
- u16 reg_val;
-
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x001b);
- reg_val = rtl_readphy(tp, 0x06);
- rtl_writephy(tp, 0x1f, 0x0000);
-
- if (reg_val != val)
- netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
- else
- rtl_apply_firmware(tp);
-}
-
-static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
-{
- rtl_writephy_batch(tp, rtl8168d_1_phy_reg_init_0);
-
- /*
- * Rx Error Issue
- * Fine Tune Switching regulator parameter
- */
- rtl_writephy(tp, 0x1f, 0x0002);
- rtl_w0w1_phy(tp, 0x0b, 0x0010, 0x00ef);
- rtl_w0w1_phy(tp, 0x0c, 0xa200, 0x5d00);
-
- if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
- int val;
-
- rtl_writephy_batch(tp, rtl8168d_1_phy_reg_init_1);
-
- val = rtl_readphy(tp, 0x0d);
-
- if ((val & 0x00ff) != 0x006c) {
- static const u32 set[] = {
- 0x0065, 0x0066, 0x0067, 0x0068,
- 0x0069, 0x006a, 0x006b, 0x006c
- };
- int i;
-
- rtl_writephy(tp, 0x1f, 0x0002);
-
- val &= 0xff00;
- for (i = 0; i < ARRAY_SIZE(set); i++)
- rtl_writephy(tp, 0x0d, val | set[i]);
- }
- } else {
- phy_write_paged(tp->phydev, 0x0002, 0x05, 0x6662);
- r8168d_phy_param(tp->phydev, 0x8330, 0xffff, 0x6662);
- }
-
- /* RSET couple improve */
- rtl_writephy(tp, 0x1f, 0x0002);
- rtl_patchphy(tp, 0x0d, 0x0300);
- rtl_patchphy(tp, 0x0f, 0x0010);
-
- /* Fine tune PLL performance */
- rtl_writephy(tp, 0x1f, 0x0002);
- rtl_w0w1_phy(tp, 0x02, 0x0100, 0x0600);
- rtl_w0w1_phy(tp, 0x03, 0x0000, 0xe000);
- rtl_writephy(tp, 0x1f, 0x0000);
-
- rtl8168d_apply_firmware_cond(tp, 0xbf00);
-}
-
-static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
-{
- rtl_writephy_batch(tp, rtl8168d_1_phy_reg_init_0);
-
- if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
- int val;
-
- rtl_writephy_batch(tp, rtl8168d_1_phy_reg_init_1);
-
- val = rtl_readphy(tp, 0x0d);
- if ((val & 0x00ff) != 0x006c) {
- static const u32 set[] = {
- 0x0065, 0x0066, 0x0067, 0x0068,
- 0x0069, 0x006a, 0x006b, 0x006c
- };
- int i;
-
- rtl_writephy(tp, 0x1f, 0x0002);
-
- val &= 0xff00;
- for (i = 0; i < ARRAY_SIZE(set); i++)
- rtl_writephy(tp, 0x0d, val | set[i]);
- }
- } else {
- phy_write_paged(tp->phydev, 0x0002, 0x05, 0x2642);
- r8168d_phy_param(tp->phydev, 0x8330, 0xffff, 0x2642);
- }
-
- /* Fine tune PLL performance */
- rtl_writephy(tp, 0x1f, 0x0002);
- rtl_w0w1_phy(tp, 0x02, 0x0100, 0x0600);
- rtl_w0w1_phy(tp, 0x03, 0x0000, 0xe000);
-
- /* Switching regulator Slew rate */
- rtl_writephy(tp, 0x1f, 0x0002);
- rtl_patchphy(tp, 0x0f, 0x0017);
- rtl_writephy(tp, 0x1f, 0x0000);
-
- rtl8168d_apply_firmware_cond(tp, 0xb300);
-}
-
-static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
-{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0002 },
- { 0x10, 0x0008 },
- { 0x0d, 0x006c },
-
- { 0x1f, 0x0000 },
- { 0x0d, 0xf880 },
-
- { 0x1f, 0x0001 },
- { 0x17, 0x0cc0 },
-
- { 0x1f, 0x0001 },
- { 0x0b, 0xa4d8 },
- { 0x09, 0x281c },
- { 0x07, 0x2883 },
- { 0x0a, 0x6b35 },
- { 0x1d, 0x3da4 },
- { 0x1c, 0xeffd },
- { 0x14, 0x7f52 },
- { 0x18, 0x7fc6 },
- { 0x08, 0x0601 },
- { 0x06, 0x4063 },
- { 0x10, 0xf074 },
- { 0x1f, 0x0003 },
- { 0x13, 0x0789 },
- { 0x12, 0xf4bd },
- { 0x1a, 0x04fd },
- { 0x14, 0x84b0 },
- { 0x1f, 0x0000 },
- { 0x00, 0x9200 },
-
- { 0x1f, 0x0005 },
- { 0x01, 0x0340 },
- { 0x1f, 0x0001 },
- { 0x04, 0x4000 },
- { 0x03, 0x1d21 },
- { 0x02, 0x0c32 },
- { 0x01, 0x0200 },
- { 0x00, 0x5554 },
- { 0x04, 0x4800 },
- { 0x04, 0x4000 },
- { 0x04, 0xf000 },
- { 0x03, 0xdf01 },
- { 0x02, 0xdf20 },
- { 0x01, 0x101a },
- { 0x00, 0xa0ff },
- { 0x04, 0xf800 },
- { 0x04, 0xf000 },
- { 0x1f, 0x0000 },
- };
-
- rtl_writephy_batch(tp, phy_reg_init);
-
- r8168d_modify_extpage(tp->phydev, 0x0023, 0x16, 0xffff, 0x0000);
-}
-
-static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
-{
- phy_write_paged(tp->phydev, 0x0001, 0x17, 0x0cc0);
- r8168d_modify_extpage(tp->phydev, 0x002d, 0x18, 0xffff, 0x0040);
- phy_set_bits(tp->phydev, 0x0d, BIT(5));
-}
-
-static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
-{
- static const struct phy_reg phy_reg_init[] = {
- /* Channel estimation fine tune */
- { 0x1f, 0x0001 },
- { 0x0b, 0x6c20 },
- { 0x07, 0x2872 },
- { 0x1c, 0xefff },
- { 0x1f, 0x0003 },
- { 0x14, 0x6420 },
- { 0x1f, 0x0000 },
- };
- struct phy_device *phydev = tp->phydev;
-
- rtl_apply_firmware(tp);
-
- /* Enable Delay cap */
- r8168d_phy_param(phydev, 0x8b80, 0xffff, 0xc896);
-
- rtl_writephy_batch(tp, phy_reg_init);
-
- /* Update PFM & 10M TX idle timer */
- r8168d_modify_extpage(phydev, 0x002f, 0x15, 0xffff, 0x1919);
-
- r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006);
-
- /* DCO enable for 10M IDLE Power */
- r8168d_modify_extpage(phydev, 0x0023, 0x17, 0x0000, 0x0006);
-
- /* For impedance matching */
- phy_modify_paged(phydev, 0x0002, 0x08, 0x7f00, 0x8000);
-
- /* PHY auto speed down */
- r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0050);
- phy_set_bits(phydev, 0x14, BIT(15));
-
- r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001);
- r8168d_phy_param(phydev, 0x8b85, 0x2000, 0x0000);
-
- r8168d_modify_extpage(phydev, 0x0020, 0x15, 0x1100, 0x0000);
- phy_write_paged(phydev, 0x0006, 0x00, 0x5a00);
-
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0x0000);
-}
-
static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
{
const u16 w[] = {
@@ -2917,698 +2210,20 @@ static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
rtl_eri_write(tp, 0xf4, ERIAR_MASK_1111, w[1] | (w[2] << 16));
}
-static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
-
- rtl_apply_firmware(tp);
-
- /* Enable Delay cap */
- r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006);
-
- /* Channel estimation fine tune */
- phy_write_paged(phydev, 0x0003, 0x09, 0xa20f);
-
- /* Green Setting */
- r8168d_phy_param(phydev, 0x8b5b, 0xffff, 0x9222);
- r8168d_phy_param(phydev, 0x8b6d, 0xffff, 0x8000);
- r8168d_phy_param(phydev, 0x8b76, 0xffff, 0x8000);
-
- /* For 4-corner performance improve */
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b80);
- rtl_w0w1_phy(tp, 0x17, 0x0006, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
-
- /* PHY auto speed down */
- r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010);
- phy_set_bits(phydev, 0x14, BIT(15));
-
- /* improve 10M EEE waveform */
- r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001);
-
- /* Improve 2-pair detection performance */
- r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000);
-
- rtl8168f_config_eee_phy(tp);
- rtl_enable_eee(tp);
-
- /* Green feature */
- rtl_writephy(tp, 0x1f, 0x0003);
- rtl_w0w1_phy(tp, 0x19, 0x0001, 0x0000);
- rtl_w0w1_phy(tp, 0x10, 0x0400, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_w0w1_phy(tp, 0x01, 0x0100, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
-
- /* Broken BIOS workaround: feed GigaMAC registers with MAC address. */
- rtl_rar_exgmac_set(tp, tp->dev->dev_addr);
-}
-
-static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
-
- /* For 4-corner performance improve */
- r8168d_phy_param(phydev, 0x8b80, 0x0000, 0x0006);
-
- /* PHY auto speed down */
- r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010);
- phy_set_bits(phydev, 0x14, BIT(15));
-
- /* Improve 10M EEE waveform */
- r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001);
-
- rtl8168f_config_eee_phy(tp);
- rtl_enable_eee(tp);
-}
-
-static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
+u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp)
{
- struct phy_device *phydev = tp->phydev;
-
- rtl_apply_firmware(tp);
-
- /* Channel estimation fine tune */
- phy_write_paged(phydev, 0x0003, 0x09, 0xa20f);
-
- /* Modify green table for giga & fnet */
- r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000);
- r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000);
- r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000);
- r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000);
- r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000);
- r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00fb);
-
- /* Modify green table for 10M */
- r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00);
-
- /* Disable hiimpedance detection (RTCT) */
- phy_write_paged(phydev, 0x0003, 0x01, 0x328a);
-
- rtl8168f_hw_phy_config(tp);
-
- /* Improve 2-pair detection performance */
- r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000);
-}
-
-static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
-{
- rtl_apply_firmware(tp);
-
- rtl8168f_hw_phy_config(tp);
-}
-
-static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
-
- rtl_apply_firmware(tp);
-
- rtl8168f_hw_phy_config(tp);
-
- /* Improve 2-pair detection performance */
- r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000);
-
- /* Channel estimation fine tune */
- phy_write_paged(phydev, 0x0003, 0x09, 0xa20f);
-
- /* Modify green table for giga & fnet */
- r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000);
- r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000);
- r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000);
- r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000);
- r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000);
- r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00aa);
-
- /* Modify green table for 10M */
- r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00);
-
- /* Disable hiimpedance detection (RTCT) */
- phy_write_paged(phydev, 0x0003, 0x01, 0x328a);
-
- /* Modify green table for giga */
- r8168d_phy_param(phydev, 0x8b54, 0x0800, 0x0000);
- r8168d_phy_param(phydev, 0x8b5d, 0x0800, 0x0000);
- r8168d_phy_param(phydev, 0x8a7c, 0x0100, 0x0000);
- r8168d_phy_param(phydev, 0x8a7f, 0x0000, 0x0100);
- r8168d_phy_param(phydev, 0x8a82, 0x0100, 0x0000);
- r8168d_phy_param(phydev, 0x8a85, 0x0100, 0x0000);
- r8168d_phy_param(phydev, 0x8a88, 0x0100, 0x0000);
-
- /* uc same-seed solution */
- r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x8000);
-
- /* Green feature */
- rtl_writephy(tp, 0x1f, 0x0003);
- rtl_w0w1_phy(tp, 0x19, 0x0000, 0x0001);
- rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0400);
- rtl_writephy(tp, 0x1f, 0x0000);
-}
-
-static void rtl8168g_disable_aldps(struct rtl8169_private *tp)
-{
- phy_modify_paged(tp->phydev, 0x0a43, 0x10, BIT(2), 0);
-}
-
-static void rtl8168g_phy_adjust_10m_aldps(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
-
- phy_modify_paged(phydev, 0x0bcc, 0x14, BIT(8), 0);
- phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(7) | BIT(6));
- r8168g_phy_param(phydev, 0x8084, 0x6000, 0x0000);
- phy_modify_paged(phydev, 0x0a43, 0x10, 0x0000, 0x1003);
-}
-
-static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
-{
- int ret;
-
- rtl_apply_firmware(tp);
-
- ret = phy_read_paged(tp->phydev, 0x0a46, 0x10);
- if (ret & BIT(8))
- phy_modify_paged(tp->phydev, 0x0bcc, 0x12, BIT(15), 0);
- else
- phy_modify_paged(tp->phydev, 0x0bcc, 0x12, 0, BIT(15));
-
- ret = phy_read_paged(tp->phydev, 0x0a46, 0x13);
- if (ret & BIT(8))
- phy_modify_paged(tp->phydev, 0x0c41, 0x15, 0, BIT(1));
- else
- phy_modify_paged(tp->phydev, 0x0c41, 0x15, BIT(1), 0);
-
- /* Enable PHY auto speed down */
- phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2));
-
- rtl8168g_phy_adjust_10m_aldps(tp);
-
- /* EEE auto-fallback function */
- phy_modify_paged(tp->phydev, 0x0a4b, 0x11, 0, BIT(2));
-
- /* Enable UC LPF tune function */
- r8168g_phy_param(tp->phydev, 0x8012, 0x0000, 0x8000);
-
- phy_modify_paged(tp->phydev, 0x0c42, 0x11, BIT(13), BIT(14));
-
- /* Improve SWR Efficiency */
- rtl_writephy(tp, 0x1f, 0x0bcd);
- rtl_writephy(tp, 0x14, 0x5065);
- rtl_writephy(tp, 0x14, 0xd065);
- rtl_writephy(tp, 0x1f, 0x0bc8);
- rtl_writephy(tp, 0x11, 0x5655);
- rtl_writephy(tp, 0x1f, 0x0bcd);
- rtl_writephy(tp, 0x14, 0x1065);
- rtl_writephy(tp, 0x14, 0x9065);
- rtl_writephy(tp, 0x14, 0x1065);
- rtl_writephy(tp, 0x1f, 0x0000);
-
- rtl8168g_disable_aldps(tp);
- rtl8168g_config_eee_phy(tp);
- rtl_enable_eee(tp);
-}
-
-static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp)
-{
- rtl_apply_firmware(tp);
- rtl8168g_config_eee_phy(tp);
- rtl_enable_eee(tp);
-}
-
-static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
- u16 dout_tapbin;
- u32 data;
-
- rtl_apply_firmware(tp);
-
- /* CHN EST parameters adjust - giga master */
- r8168g_phy_param(phydev, 0x809b, 0xf800, 0x8000);
- r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x8000);
- r8168g_phy_param(phydev, 0x80a4, 0xff00, 0x8500);
- r8168g_phy_param(phydev, 0x809c, 0xff00, 0xbd00);
-
- /* CHN EST parameters adjust - giga slave */
- r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x7000);
- r8168g_phy_param(phydev, 0x80b4, 0xff00, 0x5000);
- r8168g_phy_param(phydev, 0x80ac, 0xff00, 0x4000);
-
- /* CHN EST parameters adjust - fnet */
- r8168g_phy_param(phydev, 0x808e, 0xff00, 0x1200);
- r8168g_phy_param(phydev, 0x8090, 0xff00, 0xe500);
- r8168g_phy_param(phydev, 0x8092, 0xff00, 0x9f00);
-
- /* enable R-tune & PGA-retune function */
- dout_tapbin = 0;
- data = phy_read_paged(phydev, 0x0a46, 0x13);
- data &= 3;
- data <<= 2;
- dout_tapbin |= data;
- data = phy_read_paged(phydev, 0x0a46, 0x12);
- data &= 0xc000;
- data >>= 14;
- dout_tapbin |= data;
- dout_tapbin = ~(dout_tapbin^0x08);
- dout_tapbin <<= 12;
- dout_tapbin &= 0xf000;
-
- r8168g_phy_param(phydev, 0x827a, 0xf000, dout_tapbin);
- r8168g_phy_param(phydev, 0x827b, 0xf000, dout_tapbin);
- r8168g_phy_param(phydev, 0x827c, 0xf000, dout_tapbin);
- r8168g_phy_param(phydev, 0x827d, 0xf000, dout_tapbin);
- r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800);
- phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002);
-
- /* enable GPHY 10M */
- phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(11));
-
- /* SAR ADC performance */
- phy_modify_paged(tp->phydev, 0x0bca, 0x17, BIT(12) | BIT(13), BIT(14));
-
- r8168g_phy_param(phydev, 0x803f, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x8047, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x804f, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x8057, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x805f, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x8067, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x806f, 0x3000, 0x0000);
-
- /* disable phy pfm mode */
- phy_modify_paged(tp->phydev, 0x0a44, 0x11, BIT(7), 0);
-
- rtl8168g_disable_aldps(tp);
- rtl8168h_config_eee_phy(tp);
- rtl_enable_eee(tp);
-}
-
-static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp)
-{
- u16 ioffset_p3, ioffset_p2, ioffset_p1, ioffset_p0;
- struct phy_device *phydev = tp->phydev;
- u16 rlen;
- u32 data;
-
- rtl_apply_firmware(tp);
-
- /* CHIN EST parameter update */
- r8168g_phy_param(phydev, 0x808a, 0x003f, 0x000a);
-
- /* enable R-tune & PGA-retune function */
- r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800);
- phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002);
-
- /* enable GPHY 10M */
- phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(11));
+ u16 data1, data2, ioffset;
r8168_mac_ocp_write(tp, 0xdd02, 0x807d);
- data = r8168_mac_ocp_read(tp, 0xdd02);
- ioffset_p3 = ((data & 0x80)>>7);
- ioffset_p3 <<= 3;
-
- data = r8168_mac_ocp_read(tp, 0xdd00);
- ioffset_p3 |= ((data & (0xe000))>>13);
- ioffset_p2 = ((data & (0x1e00))>>9);
- ioffset_p1 = ((data & (0x01e0))>>5);
- ioffset_p0 = ((data & 0x0010)>>4);
- ioffset_p0 <<= 3;
- ioffset_p0 |= (data & (0x07));
- data = (ioffset_p3<<12)|(ioffset_p2<<8)|(ioffset_p1<<4)|(ioffset_p0);
-
- if ((ioffset_p3 != 0x0f) || (ioffset_p2 != 0x0f) ||
- (ioffset_p1 != 0x0f) || (ioffset_p0 != 0x0f))
- phy_write_paged(phydev, 0x0bcf, 0x16, data);
-
- /* Modify rlen (TX LPF corner frequency) level */
- data = phy_read_paged(phydev, 0x0bcd, 0x16);
- data &= 0x000f;
- rlen = 0;
- if (data > 3)
- rlen = data - 3;
- data = rlen | (rlen<<4) | (rlen<<8) | (rlen<<12);
- phy_write_paged(phydev, 0x0bcd, 0x17, data);
-
- /* disable phy pfm mode */
- phy_modify_paged(phydev, 0x0a44, 0x11, BIT(7), 0);
-
- rtl8168g_disable_aldps(tp);
- rtl8168g_config_eee_phy(tp);
- rtl_enable_eee(tp);
-}
-
-static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
-
- /* Enable PHY auto speed down */
- phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2));
-
- rtl8168g_phy_adjust_10m_aldps(tp);
-
- /* Enable EEE auto-fallback function */
- phy_modify_paged(phydev, 0x0a4b, 0x11, 0, BIT(2));
-
- /* Enable UC LPF tune function */
- r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000);
-
- /* set rg_sel_sdm_rate */
- phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14));
-
- rtl8168g_disable_aldps(tp);
- rtl8168g_config_eee_phy(tp);
- rtl_enable_eee(tp);
-}
+ data1 = r8168_mac_ocp_read(tp, 0xdd02);
+ data2 = r8168_mac_ocp_read(tp, 0xdd00);
-static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
-
- rtl8168g_phy_adjust_10m_aldps(tp);
-
- /* Enable UC LPF tune function */
- r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000);
-
- /* Set rg_sel_sdm_rate */
- phy_modify_paged(tp->phydev, 0x0c42, 0x11, BIT(13), BIT(14));
-
- /* Channel estimation parameters */
- r8168g_phy_param(phydev, 0x80f3, 0xff00, 0x8b00);
- r8168g_phy_param(phydev, 0x80f0, 0xff00, 0x3a00);
- r8168g_phy_param(phydev, 0x80ef, 0xff00, 0x0500);
- r8168g_phy_param(phydev, 0x80f6, 0xff00, 0x6e00);
- r8168g_phy_param(phydev, 0x80ec, 0xff00, 0x6800);
- r8168g_phy_param(phydev, 0x80ed, 0xff00, 0x7c00);
- r8168g_phy_param(phydev, 0x80f2, 0xff00, 0xf400);
- r8168g_phy_param(phydev, 0x80f4, 0xff00, 0x8500);
- r8168g_phy_param(phydev, 0x8110, 0xff00, 0xa800);
- r8168g_phy_param(phydev, 0x810f, 0xff00, 0x1d00);
- r8168g_phy_param(phydev, 0x8111, 0xff00, 0xf500);
- r8168g_phy_param(phydev, 0x8113, 0xff00, 0x6100);
- r8168g_phy_param(phydev, 0x8115, 0xff00, 0x9200);
- r8168g_phy_param(phydev, 0x810e, 0xff00, 0x0400);
- r8168g_phy_param(phydev, 0x810c, 0xff00, 0x7c00);
- r8168g_phy_param(phydev, 0x810b, 0xff00, 0x5a00);
- r8168g_phy_param(phydev, 0x80d1, 0xff00, 0xff00);
- r8168g_phy_param(phydev, 0x80cd, 0xff00, 0x9e00);
- r8168g_phy_param(phydev, 0x80d3, 0xff00, 0x0e00);
- r8168g_phy_param(phydev, 0x80d5, 0xff00, 0xca00);
- r8168g_phy_param(phydev, 0x80d7, 0xff00, 0x8400);
-
- /* Force PWM-mode */
- rtl_writephy(tp, 0x1f, 0x0bcd);
- rtl_writephy(tp, 0x14, 0x5065);
- rtl_writephy(tp, 0x14, 0xd065);
- rtl_writephy(tp, 0x1f, 0x0bc8);
- rtl_writephy(tp, 0x12, 0x00ed);
- rtl_writephy(tp, 0x1f, 0x0bcd);
- rtl_writephy(tp, 0x14, 0x1065);
- rtl_writephy(tp, 0x14, 0x9065);
- rtl_writephy(tp, 0x14, 0x1065);
- rtl_writephy(tp, 0x1f, 0x0000);
-
- rtl8168g_disable_aldps(tp);
- rtl8168g_config_eee_phy(tp);
- rtl_enable_eee(tp);
-}
-
-static void rtl8117_hw_phy_config(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
-
- /* CHN EST parameters adjust - fnet */
- r8168g_phy_param(phydev, 0x808e, 0xff00, 0x4800);
- r8168g_phy_param(phydev, 0x8090, 0xff00, 0xcc00);
- r8168g_phy_param(phydev, 0x8092, 0xff00, 0xb000);
-
- r8168g_phy_param(phydev, 0x8088, 0xff00, 0x6000);
- r8168g_phy_param(phydev, 0x808b, 0x3f00, 0x0b00);
- r8168g_phy_param(phydev, 0x808d, 0x1f00, 0x0600);
- r8168g_phy_param(phydev, 0x808c, 0xff00, 0xb000);
- r8168g_phy_param(phydev, 0x80a0, 0xff00, 0x2800);
- r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x5000);
- r8168g_phy_param(phydev, 0x809b, 0xf800, 0xb000);
- r8168g_phy_param(phydev, 0x809a, 0xff00, 0x4b00);
- r8168g_phy_param(phydev, 0x809d, 0x3f00, 0x0800);
- r8168g_phy_param(phydev, 0x80a1, 0xff00, 0x7000);
- r8168g_phy_param(phydev, 0x809f, 0x1f00, 0x0300);
- r8168g_phy_param(phydev, 0x809e, 0xff00, 0x8800);
- r8168g_phy_param(phydev, 0x80b2, 0xff00, 0x2200);
- r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x9800);
- r8168g_phy_param(phydev, 0x80af, 0x3f00, 0x0800);
- r8168g_phy_param(phydev, 0x80b3, 0xff00, 0x6f00);
- r8168g_phy_param(phydev, 0x80b1, 0x1f00, 0x0300);
- r8168g_phy_param(phydev, 0x80b0, 0xff00, 0x9300);
-
- r8168g_phy_param(phydev, 0x8011, 0x0000, 0x0800);
-
- /* enable GPHY 10M */
- phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(11));
-
- r8168g_phy_param(phydev, 0x8016, 0x0000, 0x0400);
-
- rtl8168g_disable_aldps(tp);
- rtl8168h_config_eee_phy(tp);
- rtl_enable_eee(tp);
-}
-
-static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
-{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0003 },
- { 0x08, 0x441d },
- { 0x01, 0x9100 },
- { 0x1f, 0x0000 }
- };
-
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_patchphy(tp, 0x11, 1 << 12);
- rtl_patchphy(tp, 0x19, 1 << 13);
- rtl_patchphy(tp, 0x10, 1 << 15);
-
- rtl_writephy_batch(tp, phy_reg_init);
-}
+ ioffset = (data2 >> 1) & 0x7ff8;
+ ioffset |= data2 & 0x0007;
+ if (data1 & BIT(7))
+ ioffset |= BIT(15);
-static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
-{
- /* Disable ALDPS before ram code */
- phy_write(tp->phydev, 0x18, 0x0310);
- msleep(100);
-
- rtl_apply_firmware(tp);
-
- phy_write_paged(tp->phydev, 0x0005, 0x1a, 0x0000);
- phy_write_paged(tp->phydev, 0x0004, 0x1c, 0x0000);
- phy_write_paged(tp->phydev, 0x0001, 0x15, 0x7701);
-}
-
-static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
-{
- /* Disable ALDPS before setting firmware */
- phy_write(tp->phydev, 0x18, 0x0310);
- msleep(20);
-
- rtl_apply_firmware(tp);
-
- /* EEE setting */
- rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0004);
- rtl_writephy(tp, 0x10, 0x401f);
- rtl_writephy(tp, 0x19, 0x7030);
- rtl_writephy(tp, 0x1f, 0x0000);
-}
-
-static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
-{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0004 },
- { 0x10, 0xc07f },
- { 0x19, 0x7030 },
- { 0x1f, 0x0000 }
- };
-
- /* Disable ALDPS before ram code */
- phy_write(tp->phydev, 0x18, 0x0310);
- msleep(100);
-
- rtl_apply_firmware(tp);
-
- rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
- rtl_writephy_batch(tp, phy_reg_init);
-
- rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000);
-}
-
-static void rtl8125_1_hw_phy_config(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
-
- phy_modify_paged(phydev, 0xad4, 0x10, 0x03ff, 0x0084);
- phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010);
- phy_modify_paged(phydev, 0xad1, 0x13, 0x03ff, 0x0006);
- phy_modify_paged(phydev, 0xad3, 0x11, 0x003f, 0x0006);
- phy_modify_paged(phydev, 0xac0, 0x14, 0x0000, 0x1100);
- phy_modify_paged(phydev, 0xac8, 0x15, 0xf000, 0x7000);
- phy_modify_paged(phydev, 0xad1, 0x14, 0x0000, 0x0400);
- phy_modify_paged(phydev, 0xad1, 0x15, 0x0000, 0x03ff);
- phy_modify_paged(phydev, 0xad1, 0x16, 0x0000, 0x03ff);
-
- r8168g_phy_param(phydev, 0x80ea, 0xff00, 0xc400);
- r8168g_phy_param(phydev, 0x80eb, 0x0700, 0x0300);
- r8168g_phy_param(phydev, 0x80f8, 0xff00, 0x1c00);
- r8168g_phy_param(phydev, 0x80f1, 0xff00, 0x3000);
- r8168g_phy_param(phydev, 0x80fe, 0xff00, 0xa500);
- r8168g_phy_param(phydev, 0x8102, 0xff00, 0x5000);
- r8168g_phy_param(phydev, 0x8105, 0xff00, 0x3300);
- r8168g_phy_param(phydev, 0x8100, 0xff00, 0x7000);
- r8168g_phy_param(phydev, 0x8104, 0xff00, 0xf000);
- r8168g_phy_param(phydev, 0x8106, 0xff00, 0x6500);
- r8168g_phy_param(phydev, 0x80dc, 0xff00, 0xed00);
- r8168g_phy_param(phydev, 0x80df, 0x0000, 0x0100);
- r8168g_phy_param(phydev, 0x80e1, 0x0100, 0x0000);
-
- phy_modify_paged(phydev, 0xbf0, 0x13, 0x003f, 0x0038);
- r8168g_phy_param(phydev, 0x819f, 0xffff, 0xd0b6);
-
- phy_write_paged(phydev, 0xbc3, 0x12, 0x5555);
- phy_modify_paged(phydev, 0xbf0, 0x15, 0x0e00, 0x0a00);
- phy_modify_paged(phydev, 0xa5c, 0x10, 0x0400, 0x0000);
- phy_modify_paged(phydev, 0xa44, 0x11, 0x0000, 0x0800);
-
- rtl8125_config_eee_phy(tp);
- rtl_enable_eee(tp);
-}
-
-static void rtl8125_2_hw_phy_config(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
- int i;
-
- phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010);
- phy_modify_paged(phydev, 0xad1, 0x13, 0x03ff, 0x03ff);
- phy_modify_paged(phydev, 0xad3, 0x11, 0x003f, 0x0006);
- phy_modify_paged(phydev, 0xac0, 0x14, 0x1100, 0x0000);
- phy_modify_paged(phydev, 0xacc, 0x10, 0x0003, 0x0002);
- phy_modify_paged(phydev, 0xad4, 0x10, 0x00e7, 0x0044);
- phy_modify_paged(phydev, 0xac1, 0x12, 0x0080, 0x0000);
- phy_modify_paged(phydev, 0xac8, 0x10, 0x0300, 0x0000);
- phy_modify_paged(phydev, 0xac5, 0x17, 0x0007, 0x0002);
- phy_write_paged(phydev, 0xad4, 0x16, 0x00a8);
- phy_write_paged(phydev, 0xac5, 0x16, 0x01ff);
- phy_modify_paged(phydev, 0xac8, 0x15, 0x00f0, 0x0030);
-
- phy_write(phydev, 0x1f, 0x0b87);
- phy_write(phydev, 0x16, 0x80a2);
- phy_write(phydev, 0x17, 0x0153);
- phy_write(phydev, 0x16, 0x809c);
- phy_write(phydev, 0x17, 0x0153);
- phy_write(phydev, 0x1f, 0x0000);
-
- phy_write(phydev, 0x1f, 0x0a43);
- phy_write(phydev, 0x13, 0x81B3);
- phy_write(phydev, 0x14, 0x0043);
- phy_write(phydev, 0x14, 0x00A7);
- phy_write(phydev, 0x14, 0x00D6);
- phy_write(phydev, 0x14, 0x00EC);
- phy_write(phydev, 0x14, 0x00F6);
- phy_write(phydev, 0x14, 0x00FB);
- phy_write(phydev, 0x14, 0x00FD);
- phy_write(phydev, 0x14, 0x00FF);
- phy_write(phydev, 0x14, 0x00BB);
- phy_write(phydev, 0x14, 0x0058);
- phy_write(phydev, 0x14, 0x0029);
- phy_write(phydev, 0x14, 0x0013);
- phy_write(phydev, 0x14, 0x0009);
- phy_write(phydev, 0x14, 0x0004);
- phy_write(phydev, 0x14, 0x0002);
- for (i = 0; i < 25; i++)
- phy_write(phydev, 0x14, 0x0000);
- phy_write(phydev, 0x1f, 0x0000);
-
- r8168g_phy_param(phydev, 0x8257, 0xffff, 0x020F);
- r8168g_phy_param(phydev, 0x80ea, 0xffff, 0x7843);
-
- rtl_apply_firmware(tp);
-
- phy_modify_paged(phydev, 0xd06, 0x14, 0x0000, 0x2000);
-
- r8168g_phy_param(phydev, 0x81a2, 0x0000, 0x0100);
-
- phy_modify_paged(phydev, 0xb54, 0x16, 0xff00, 0xdb00);
- phy_modify_paged(phydev, 0xa45, 0x12, 0x0001, 0x0000);
- phy_modify_paged(phydev, 0xa5d, 0x12, 0x0000, 0x0020);
- phy_modify_paged(phydev, 0xad4, 0x17, 0x0010, 0x0000);
- phy_modify_paged(phydev, 0xa86, 0x15, 0x0001, 0x0000);
- phy_modify_paged(phydev, 0xa44, 0x11, 0x0000, 0x0800);
-
- rtl8125_config_eee_phy(tp);
- rtl_enable_eee(tp);
-}
-
-static void rtl_hw_phy_config(struct net_device *dev)
-{
- static const rtl_generic_fct phy_configs[] = {
- /* PCI devices. */
- [RTL_GIGA_MAC_VER_02] = rtl8169s_hw_phy_config,
- [RTL_GIGA_MAC_VER_03] = rtl8169s_hw_phy_config,
- [RTL_GIGA_MAC_VER_04] = rtl8169sb_hw_phy_config,
- [RTL_GIGA_MAC_VER_05] = rtl8169scd_hw_phy_config,
- [RTL_GIGA_MAC_VER_06] = rtl8169sce_hw_phy_config,
- /* PCI-E devices. */
- [RTL_GIGA_MAC_VER_07] = rtl8102e_hw_phy_config,
- [RTL_GIGA_MAC_VER_08] = rtl8102e_hw_phy_config,
- [RTL_GIGA_MAC_VER_09] = rtl8102e_hw_phy_config,
- [RTL_GIGA_MAC_VER_10] = NULL,
- [RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config,
- [RTL_GIGA_MAC_VER_12] = rtl8168bef_hw_phy_config,
- [RTL_GIGA_MAC_VER_13] = NULL,
- [RTL_GIGA_MAC_VER_14] = NULL,
- [RTL_GIGA_MAC_VER_15] = NULL,
- [RTL_GIGA_MAC_VER_16] = NULL,
- [RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config,
- [RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_19] = rtl8168c_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_20] = rtl8168c_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_21] = rtl8168c_3_hw_phy_config,
- [RTL_GIGA_MAC_VER_22] = rtl8168c_3_hw_phy_config,
- [RTL_GIGA_MAC_VER_23] = rtl8168cp_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_24] = rtl8168cp_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_25] = rtl8168d_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_26] = rtl8168d_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_27] = rtl8168d_3_hw_phy_config,
- [RTL_GIGA_MAC_VER_28] = rtl8168d_4_hw_phy_config,
- [RTL_GIGA_MAC_VER_29] = rtl8105e_hw_phy_config,
- [RTL_GIGA_MAC_VER_30] = rtl8105e_hw_phy_config,
- [RTL_GIGA_MAC_VER_31] = NULL,
- [RTL_GIGA_MAC_VER_32] = rtl8168e_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_33] = rtl8168e_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_34] = rtl8168e_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_35] = rtl8168f_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_36] = rtl8168f_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_37] = rtl8402_hw_phy_config,
- [RTL_GIGA_MAC_VER_38] = rtl8411_hw_phy_config,
- [RTL_GIGA_MAC_VER_39] = rtl8106e_hw_phy_config,
- [RTL_GIGA_MAC_VER_40] = rtl8168g_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_41] = NULL,
- [RTL_GIGA_MAC_VER_42] = rtl8168g_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_43] = rtl8168g_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_44] = rtl8168g_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_45] = rtl8168h_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_46] = rtl8168h_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_47] = rtl8168h_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_48] = rtl8168h_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_49] = rtl8168ep_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_50] = rtl8168ep_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_52] = rtl8117_hw_phy_config,
- [RTL_GIGA_MAC_VER_60] = rtl8125_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_61] = rtl8125_2_hw_phy_config,
- };
- struct rtl8169_private *tp = netdev_priv(dev);
-
- if (phy_configs[tp->mac_version])
- phy_configs[tp->mac_version](tp);
+ return ioffset;
}
static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
@@ -3617,21 +2232,28 @@ static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
schedule_work(&tp->wk.work);
}
-static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
+static void rtl8169_init_phy(struct rtl8169_private *tp)
{
- rtl_hw_phy_config(dev);
+ r8169_hw_phy_config(tp, tp->phydev, tp->mac_version);
if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
- netif_dbg(tp, drv, dev,
- "Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
+ /* set undocumented MAC Reg C+CR Offset 0x82h */
RTL_W8(tp, 0x82, 0x01);
}
+ if (tp->mac_version == RTL_GIGA_MAC_VER_05 &&
+ tp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_GIGABYTE &&
+ tp->pci_dev->subsystem_device == 0xe000)
+ phy_write_paged(tp->phydev, 0x0001, 0x10, 0xf01b);
+
/* We may have called phy_speed_down before */
phy_speed_up(tp->phydev);
+ if (rtl_supports_eee(tp))
+ rtl_enable_eee(tp);
+
genphy_soft_reset(tp->phydev);
}
@@ -3675,16 +2297,6 @@ static int rtl_set_mac_address(struct net_device *dev, void *p)
return 0;
}
-static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- if (!netif_running(dev))
- return -ENODEV;
-
- return phy_mii_ioctl(tp->phydev, ifr, cmd);
-}
-
static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
@@ -3865,15 +2477,18 @@ static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_12:
case RTL_GIGA_MAC_VER_17:
+ pcie_set_readrq(tp->pci_dev, 512);
r8168b_1_hw_jumbo_enable(tp);
break;
case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
+ pcie_set_readrq(tp->pci_dev, 512);
r8168c_hw_jumbo_enable(tp);
break;
case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28:
r8168dp_hw_jumbo_enable(tp);
break;
case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33:
+ pcie_set_readrq(tp->pci_dev, 512);
r8168e_hw_jumbo_enable(tp);
break;
default:
@@ -3903,6 +2518,9 @@ static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
break;
}
rtl_lock_config_regs(tp);
+
+ if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
+ pcie_set_readrq(tp->pci_dev, 4096);
}
static void rtl_jumbo_config(struct rtl8169_private *tp, int mtu)
@@ -4710,9 +3328,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
rtl_pcie_state_l2l3_disable(tp);
- rtl_writephy(tp, 0x1f, 0x0c42);
- rg_saw_cnt = (rtl_readphy(tp, 0x13) & 0x3fff);
- rtl_writephy(tp, 0x1f, 0x0000);
+ rg_saw_cnt = phy_read_paged(tp->phydev, 0x0c42, 0x13) & 0x3fff;
if (rg_saw_cnt > 0) {
u16 sw_cnt_1ms_ini;
@@ -4887,7 +3503,7 @@ static void rtl_hw_start_8117(struct rtl8169_private *tp)
r8168_mac_ocp_write(tp, 0xc09e, 0x0000);
/* firmware is for MAC only */
- rtl_apply_firmware(tp);
+ r8169_apply_firmware(tp);
rtl_hw_aspm_clkreq_enable(tp, true);
}
@@ -4991,6 +3607,9 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00);
+ /* disable EEE */
+ rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
+
rtl_pcie_state_l2l3_disable(tp);
}
@@ -5005,6 +3624,11 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET);
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
+ rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000);
+
+ /* disable EEE */
+ rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
+
rtl_pcie_state_l2l3_disable(tp);
rtl_hw_aspm_clkreq_enable(tp, true);
}
@@ -5222,11 +3846,8 @@ static void rtl_hw_start_8169(struct rtl8169_private *tp)
tp->cp_cmd |= PCIMulRW;
if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
- tp->mac_version == RTL_GIGA_MAC_VER_03) {
- netif_dbg(tp, drv, tp->dev,
- "Set MAC Reg C+CR Offset 0xe0. Bit 3 and Bit 14 MUST be 1\n");
- tp->cp_cmd |= (1 << 14);
- }
+ tp->mac_version == RTL_GIGA_MAC_VER_03)
+ tp->cp_cmd |= EnAnaPLL;
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
@@ -5435,7 +4056,7 @@ static void rtl_reset_work(struct rtl8169_private *tp)
netif_wake_queue(dev);
}
-static void rtl8169_tx_timeout(struct net_device *dev)
+static void rtl8169_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct rtl8169_private *tp = netdev_priv(dev);
@@ -6240,7 +4861,7 @@ static int rtl_open(struct net_device *dev)
napi_enable(&tp->napi);
- rtl8169_init_phy(dev, tp);
+ rtl8169_init_phy(tp);
rtl_pll_power_up(tp);
@@ -6371,7 +4992,7 @@ static void __rtl8169_resume(struct net_device *dev)
netif_device_attach(dev);
rtl_pll_power_up(tp);
- rtl8169_init_phy(dev, tp);
+ rtl8169_init_phy(tp);
phy_start(tp->phydev);
@@ -6542,7 +5163,7 @@ static const struct net_device_ops rtl_netdev_ops = {
.ndo_fix_features = rtl8169_fix_features,
.ndo_set_features = rtl8169_set_features,
.ndo_set_mac_address = rtl_set_mac_address,
- .ndo_do_ioctl = rtl8169_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_set_rx_mode = rtl_set_rx_mode,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = rtl8169_netpoll,
@@ -6744,7 +5365,7 @@ static int rtl_jumbo_max(struct rtl8169_private *tp)
{
/* Non-GBit versions don't support jumbo frames */
if (!tp->supports_gmii)
- return JUMBO_1K;
+ return 0;
switch (tp->mac_version) {
/* RTL8169 */
@@ -6825,6 +5446,15 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
int chipset, region;
int jumbo_max, rc;
+ /* Some tools for creating an initramfs don't consider softdeps, then
+ * r8169.ko may be in initramfs, but realtek.ko not. Then the generic
+ * PHY driver is used that doesn't work with most chip versions.
+ */
+ if (!driver_find("RTL8201CP Ethernet", &mdio_bus_type)) {
+ dev_err(&pdev->dev, "realtek.ko not loaded, maybe it needs to be added to initramfs?\n");
+ return -ENOENT;
+ }
+
dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp));
if (!dev)
return -ENOMEM;
@@ -6966,10 +5596,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->hw_features |= NETIF_F_RXALL;
dev->hw_features |= NETIF_F_RXFCS;
- /* MTU range: 60 - hw-specific max */
- dev->min_mtu = ETH_ZLEN;
jumbo_max = rtl_jumbo_max(tp);
- dev->max_mtu = jumbo_max;
+ if (jumbo_max)
+ dev->max_mtu = jumbo_max;
rtl_set_irq_mask(tp);
@@ -6999,7 +5628,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
(RTL_R32(tp, TxConfig) >> 20) & 0xfcf,
pci_irq_vector(pdev, 0));
- if (jumbo_max > JUMBO_1K)
+ if (jumbo_max)
netif_info(tp, probe, dev,
"jumbo features [frames: %d bytes, tx checksumming: %s]\n",
jumbo_max, tp->mac_version <= RTL_GIGA_MAC_VER_06 ?
diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c
new file mode 100644
index 000000000000..e367e77c773b
--- /dev/null
+++ b/drivers/net/ethernet/realtek/r8169_phy_config.c
@@ -0,0 +1,1307 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * r8169_phy_config.c: RealTek 8169/8168/8101 ethernet driver.
+ *
+ * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
+ * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
+ * Copyright (c) a lot of people too. Please respect their work.
+ *
+ * See MAINTAINERS file for support contact information.
+ */
+
+#include <linux/delay.h>
+#include <linux/phy.h>
+
+#include "r8169.h"
+
+typedef void (*rtl_phy_cfg_fct)(struct rtl8169_private *tp,
+ struct phy_device *phydev);
+
+static void r8168d_modify_extpage(struct phy_device *phydev, int extpage,
+ int reg, u16 mask, u16 val)
+{
+ int oldpage = phy_select_page(phydev, 0x0007);
+
+ __phy_write(phydev, 0x1e, extpage);
+ __phy_modify(phydev, reg, mask, val);
+
+ phy_restore_page(phydev, oldpage, 0);
+}
+
+static void r8168d_phy_param(struct phy_device *phydev, u16 parm,
+ u16 mask, u16 val)
+{
+ int oldpage = phy_select_page(phydev, 0x0005);
+
+ __phy_write(phydev, 0x05, parm);
+ __phy_modify(phydev, 0x06, mask, val);
+
+ phy_restore_page(phydev, oldpage, 0);
+}
+
+static void r8168g_phy_param(struct phy_device *phydev, u16 parm,
+ u16 mask, u16 val)
+{
+ int oldpage = phy_select_page(phydev, 0x0a43);
+
+ __phy_write(phydev, 0x13, parm);
+ __phy_modify(phydev, 0x14, mask, val);
+
+ phy_restore_page(phydev, oldpage, 0);
+}
+
+struct phy_reg {
+ u16 reg;
+ u16 val;
+};
+
+static void __rtl_writephy_batch(struct phy_device *phydev,
+ const struct phy_reg *regs, int len)
+{
+ phy_lock_mdio_bus(phydev);
+
+ while (len-- > 0) {
+ __phy_write(phydev, regs->reg, regs->val);
+ regs++;
+ }
+
+ phy_unlock_mdio_bus(phydev);
+}
+
+#define rtl_writephy_batch(p, a) __rtl_writephy_batch(p, a, ARRAY_SIZE(a))
+
+static void rtl8168f_config_eee_phy(struct phy_device *phydev)
+{
+ r8168d_modify_extpage(phydev, 0x0020, 0x15, 0, BIT(8));
+ r8168d_phy_param(phydev, 0x8b85, 0, BIT(13));
+}
+
+static void rtl8168g_config_eee_phy(struct phy_device *phydev)
+{
+ phy_modify_paged(phydev, 0x0a43, 0x11, 0, BIT(4));
+}
+
+static void rtl8168h_config_eee_phy(struct phy_device *phydev)
+{
+ rtl8168g_config_eee_phy(phydev);
+
+ phy_modify_paged(phydev, 0xa4a, 0x11, 0x0000, 0x0200);
+ phy_modify_paged(phydev, 0xa42, 0x14, 0x0000, 0x0080);
+}
+
+static void rtl8125_config_eee_phy(struct phy_device *phydev)
+{
+ rtl8168h_config_eee_phy(phydev);
+
+ phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000);
+ phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000);
+}
+
+static void rtl8169s_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x06, 0x006e },
+ { 0x08, 0x0708 },
+ { 0x15, 0x4000 },
+ { 0x18, 0x65c7 },
+
+ { 0x1f, 0x0001 },
+ { 0x03, 0x00a1 },
+ { 0x02, 0x0008 },
+ { 0x01, 0x0120 },
+ { 0x00, 0x1000 },
+ { 0x04, 0x0800 },
+ { 0x04, 0x0000 },
+
+ { 0x03, 0xff41 },
+ { 0x02, 0xdf60 },
+ { 0x01, 0x0140 },
+ { 0x00, 0x0077 },
+ { 0x04, 0x7800 },
+ { 0x04, 0x7000 },
+
+ { 0x03, 0x802f },
+ { 0x02, 0x4f02 },
+ { 0x01, 0x0409 },
+ { 0x00, 0xf0f9 },
+ { 0x04, 0x9800 },
+ { 0x04, 0x9000 },
+
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0xff95 },
+ { 0x00, 0xba00 },
+ { 0x04, 0xa800 },
+ { 0x04, 0xa000 },
+
+ { 0x03, 0xff41 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0x0140 },
+ { 0x00, 0x00bb },
+ { 0x04, 0xb800 },
+ { 0x04, 0xb000 },
+
+ { 0x03, 0xdf41 },
+ { 0x02, 0xdc60 },
+ { 0x01, 0x6340 },
+ { 0x00, 0x007d },
+ { 0x04, 0xd800 },
+ { 0x04, 0xd000 },
+
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0x100a },
+ { 0x00, 0xa0ff },
+ { 0x04, 0xf800 },
+ { 0x04, 0xf000 },
+
+ { 0x1f, 0x0000 },
+ { 0x0b, 0x0000 },
+ { 0x00, 0x9200 }
+ };
+
+ rtl_writephy_batch(phydev, phy_reg_init);
+}
+
+static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ phy_write_paged(phydev, 0x0002, 0x01, 0x90d0);
+}
+
+static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x04, 0x0000 },
+ { 0x03, 0x00a1 },
+ { 0x02, 0x0008 },
+ { 0x01, 0x0120 },
+ { 0x00, 0x1000 },
+ { 0x04, 0x0800 },
+ { 0x04, 0x9000 },
+ { 0x03, 0x802f },
+ { 0x02, 0x4f02 },
+ { 0x01, 0x0409 },
+ { 0x00, 0xf099 },
+ { 0x04, 0x9800 },
+ { 0x04, 0xa000 },
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0xff95 },
+ { 0x00, 0xba00 },
+ { 0x04, 0xa800 },
+ { 0x04, 0xf000 },
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0x101a },
+ { 0x00, 0xa0ff },
+ { 0x04, 0xf800 },
+ { 0x04, 0x0000 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0001 },
+ { 0x10, 0xf41b },
+ { 0x14, 0xfb54 },
+ { 0x18, 0xf5c7 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0001 },
+ { 0x17, 0x0cc0 },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy_batch(phydev, phy_reg_init);
+}
+
+static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x04, 0x0000 },
+ { 0x03, 0x00a1 },
+ { 0x02, 0x0008 },
+ { 0x01, 0x0120 },
+ { 0x00, 0x1000 },
+ { 0x04, 0x0800 },
+ { 0x04, 0x9000 },
+ { 0x03, 0x802f },
+ { 0x02, 0x4f02 },
+ { 0x01, 0x0409 },
+ { 0x00, 0xf099 },
+ { 0x04, 0x9800 },
+ { 0x04, 0xa000 },
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0xff95 },
+ { 0x00, 0xba00 },
+ { 0x04, 0xa800 },
+ { 0x04, 0xf000 },
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0x101a },
+ { 0x00, 0xa0ff },
+ { 0x04, 0xf800 },
+ { 0x04, 0x0000 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0001 },
+ { 0x0b, 0x8480 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0001 },
+ { 0x18, 0x67c7 },
+ { 0x04, 0x2000 },
+ { 0x03, 0x002f },
+ { 0x02, 0x4360 },
+ { 0x01, 0x0109 },
+ { 0x00, 0x3022 },
+ { 0x04, 0x2800 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0001 },
+ { 0x17, 0x0cc0 },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy_batch(phydev, phy_reg_init);
+}
+
+static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ phy_write(phydev, 0x1f, 0x0001);
+ phy_set_bits(phydev, 0x16, BIT(0));
+ phy_write(phydev, 0x10, 0xf41b);
+ phy_write(phydev, 0x1f, 0x0000);
+}
+
+static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ phy_write_paged(phydev, 0x0001, 0x10, 0xf41b);
+}
+
+static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ phy_write(phydev, 0x1d, 0x0f00);
+ phy_write_paged(phydev, 0x0002, 0x0c, 0x1ec8);
+}
+
+static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ phy_set_bits(phydev, 0x14, BIT(5));
+ phy_set_bits(phydev, 0x0d, BIT(5));
+ phy_write_paged(phydev, 0x0001, 0x1d, 0x3d98);
+}
+
+static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x12, 0x2300 },
+ { 0x1f, 0x0002 },
+ { 0x00, 0x88d4 },
+ { 0x01, 0x82b1 },
+ { 0x03, 0x7002 },
+ { 0x08, 0x9e30 },
+ { 0x09, 0x01f0 },
+ { 0x0a, 0x5500 },
+ { 0x0c, 0x00c8 },
+ { 0x1f, 0x0003 },
+ { 0x12, 0xc096 },
+ { 0x16, 0x000a },
+ { 0x1f, 0x0000 },
+ { 0x1f, 0x0000 },
+ { 0x09, 0x2000 },
+ { 0x09, 0x0000 }
+ };
+
+ rtl_writephy_batch(phydev, phy_reg_init);
+
+ phy_set_bits(phydev, 0x14, BIT(5));
+ phy_set_bits(phydev, 0x0d, BIT(5));
+}
+
+static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x12, 0x2300 },
+ { 0x03, 0x802f },
+ { 0x02, 0x4f02 },
+ { 0x01, 0x0409 },
+ { 0x00, 0xf099 },
+ { 0x04, 0x9800 },
+ { 0x04, 0x9000 },
+ { 0x1d, 0x3d98 },
+ { 0x1f, 0x0002 },
+ { 0x0c, 0x7eb8 },
+ { 0x06, 0x0761 },
+ { 0x1f, 0x0003 },
+ { 0x16, 0x0f0a },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy_batch(phydev, phy_reg_init);
+
+ phy_set_bits(phydev, 0x16, BIT(0));
+ phy_set_bits(phydev, 0x14, BIT(5));
+ phy_set_bits(phydev, 0x0d, BIT(5));
+}
+
+static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x12, 0x2300 },
+ { 0x1d, 0x3d98 },
+ { 0x1f, 0x0002 },
+ { 0x0c, 0x7eb8 },
+ { 0x06, 0x5461 },
+ { 0x1f, 0x0003 },
+ { 0x16, 0x0f0a },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy_batch(phydev, phy_reg_init);
+
+ phy_set_bits(phydev, 0x16, BIT(0));
+ phy_set_bits(phydev, 0x14, BIT(5));
+ phy_set_bits(phydev, 0x0d, BIT(5));
+}
+
+static const struct phy_reg rtl8168d_1_phy_reg_init_0[] = {
+ /* Channel Estimation */
+ { 0x1f, 0x0001 },
+ { 0x06, 0x4064 },
+ { 0x07, 0x2863 },
+ { 0x08, 0x059c },
+ { 0x09, 0x26b4 },
+ { 0x0a, 0x6a19 },
+ { 0x0b, 0xdcc8 },
+ { 0x10, 0xf06d },
+ { 0x14, 0x7f68 },
+ { 0x18, 0x7fd9 },
+ { 0x1c, 0xf0ff },
+ { 0x1d, 0x3d9c },
+ { 0x1f, 0x0003 },
+ { 0x12, 0xf49f },
+ { 0x13, 0x070b },
+ { 0x1a, 0x05ad },
+ { 0x14, 0x94c0 },
+
+ /*
+ * Tx Error Issue
+ * Enhance line driver power
+ */
+ { 0x1f, 0x0002 },
+ { 0x06, 0x5561 },
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8332 },
+ { 0x06, 0x5561 },
+
+ /*
+ * Can not link to 1Gbps with bad cable
+ * Decrease SNR threshold form 21.07dB to 19.04dB
+ */
+ { 0x1f, 0x0001 },
+ { 0x17, 0x0cc0 },
+
+ { 0x1f, 0x0000 },
+ { 0x0d, 0xf880 }
+};
+
+static const struct phy_reg rtl8168d_1_phy_reg_init_1[] = {
+ { 0x1f, 0x0002 },
+ { 0x05, 0x669a },
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8330 },
+ { 0x06, 0x669a },
+ { 0x1f, 0x0002 }
+};
+
+static void rtl8168d_apply_firmware_cond(struct rtl8169_private *tp,
+ struct phy_device *phydev,
+ u16 val)
+{
+ u16 reg_val;
+
+ phy_write(phydev, 0x1f, 0x0005);
+ phy_write(phydev, 0x05, 0x001b);
+ reg_val = phy_read(phydev, 0x06);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ if (reg_val != val)
+ phydev_warn(phydev, "chipset not ready for firmware\n");
+ else
+ r8169_apply_firmware(tp);
+}
+
+static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_0);
+
+ /*
+ * Rx Error Issue
+ * Fine Tune Switching regulator parameter
+ */
+ phy_write(phydev, 0x1f, 0x0002);
+ phy_modify(phydev, 0x0b, 0x00ef, 0x0010);
+ phy_modify(phydev, 0x0c, 0x5d00, 0xa200);
+
+ if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
+ int val;
+
+ rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_1);
+
+ val = phy_read(phydev, 0x0d);
+
+ if ((val & 0x00ff) != 0x006c) {
+ static const u32 set[] = {
+ 0x0065, 0x0066, 0x0067, 0x0068,
+ 0x0069, 0x006a, 0x006b, 0x006c
+ };
+ int i;
+
+ phy_write(phydev, 0x1f, 0x0002);
+
+ val &= 0xff00;
+ for (i = 0; i < ARRAY_SIZE(set); i++)
+ phy_write(phydev, 0x0d, val | set[i]);
+ }
+ } else {
+ phy_write_paged(phydev, 0x0002, 0x05, 0x6662);
+ r8168d_phy_param(phydev, 0x8330, 0xffff, 0x6662);
+ }
+
+ /* RSET couple improve */
+ phy_write(phydev, 0x1f, 0x0002);
+ phy_set_bits(phydev, 0x0d, 0x0300);
+ phy_set_bits(phydev, 0x0f, 0x0010);
+
+ /* Fine tune PLL performance */
+ phy_write(phydev, 0x1f, 0x0002);
+ phy_modify(phydev, 0x02, 0x0600, 0x0100);
+ phy_clear_bits(phydev, 0x03, 0xe000);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ rtl8168d_apply_firmware_cond(tp, phydev, 0xbf00);
+}
+
+static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_0);
+
+ if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
+ int val;
+
+ rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_1);
+
+ val = phy_read(phydev, 0x0d);
+ if ((val & 0x00ff) != 0x006c) {
+ static const u32 set[] = {
+ 0x0065, 0x0066, 0x0067, 0x0068,
+ 0x0069, 0x006a, 0x006b, 0x006c
+ };
+ int i;
+
+ phy_write(phydev, 0x1f, 0x0002);
+
+ val &= 0xff00;
+ for (i = 0; i < ARRAY_SIZE(set); i++)
+ phy_write(phydev, 0x0d, val | set[i]);
+ }
+ } else {
+ phy_write_paged(phydev, 0x0002, 0x05, 0x2642);
+ r8168d_phy_param(phydev, 0x8330, 0xffff, 0x2642);
+ }
+
+ /* Fine tune PLL performance */
+ phy_write(phydev, 0x1f, 0x0002);
+ phy_modify(phydev, 0x02, 0x0600, 0x0100);
+ phy_clear_bits(phydev, 0x03, 0xe000);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ /* Switching regulator Slew rate */
+ phy_modify_paged(phydev, 0x0002, 0x0f, 0x0000, 0x0017);
+
+ rtl8168d_apply_firmware_cond(tp, phydev, 0xb300);
+}
+
+static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0002 },
+ { 0x10, 0x0008 },
+ { 0x0d, 0x006c },
+
+ { 0x1f, 0x0000 },
+ { 0x0d, 0xf880 },
+
+ { 0x1f, 0x0001 },
+ { 0x17, 0x0cc0 },
+
+ { 0x1f, 0x0001 },
+ { 0x0b, 0xa4d8 },
+ { 0x09, 0x281c },
+ { 0x07, 0x2883 },
+ { 0x0a, 0x6b35 },
+ { 0x1d, 0x3da4 },
+ { 0x1c, 0xeffd },
+ { 0x14, 0x7f52 },
+ { 0x18, 0x7fc6 },
+ { 0x08, 0x0601 },
+ { 0x06, 0x4063 },
+ { 0x10, 0xf074 },
+ { 0x1f, 0x0003 },
+ { 0x13, 0x0789 },
+ { 0x12, 0xf4bd },
+ { 0x1a, 0x04fd },
+ { 0x14, 0x84b0 },
+ { 0x1f, 0x0000 },
+ { 0x00, 0x9200 },
+
+ { 0x1f, 0x0005 },
+ { 0x01, 0x0340 },
+ { 0x1f, 0x0001 },
+ { 0x04, 0x4000 },
+ { 0x03, 0x1d21 },
+ { 0x02, 0x0c32 },
+ { 0x01, 0x0200 },
+ { 0x00, 0x5554 },
+ { 0x04, 0x4800 },
+ { 0x04, 0x4000 },
+ { 0x04, 0xf000 },
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0x101a },
+ { 0x00, 0xa0ff },
+ { 0x04, 0xf800 },
+ { 0x04, 0xf000 },
+ { 0x1f, 0x0000 },
+ };
+
+ rtl_writephy_batch(phydev, phy_reg_init);
+ r8168d_modify_extpage(phydev, 0x0023, 0x16, 0xffff, 0x0000);
+}
+
+static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ phy_write_paged(phydev, 0x0001, 0x17, 0x0cc0);
+ r8168d_modify_extpage(phydev, 0x002d, 0x18, 0xffff, 0x0040);
+ phy_set_bits(phydev, 0x0d, BIT(5));
+}
+
+static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ /* Channel estimation fine tune */
+ { 0x1f, 0x0001 },
+ { 0x0b, 0x6c20 },
+ { 0x07, 0x2872 },
+ { 0x1c, 0xefff },
+ { 0x1f, 0x0003 },
+ { 0x14, 0x6420 },
+ { 0x1f, 0x0000 },
+ };
+
+ r8169_apply_firmware(tp);
+
+ /* Enable Delay cap */
+ r8168d_phy_param(phydev, 0x8b80, 0xffff, 0xc896);
+
+ rtl_writephy_batch(phydev, phy_reg_init);
+
+ /* Update PFM & 10M TX idle timer */
+ r8168d_modify_extpage(phydev, 0x002f, 0x15, 0xffff, 0x1919);
+
+ r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006);
+
+ /* DCO enable for 10M IDLE Power */
+ r8168d_modify_extpage(phydev, 0x0023, 0x17, 0x0000, 0x0006);
+
+ /* For impedance matching */
+ phy_modify_paged(phydev, 0x0002, 0x08, 0x7f00, 0x8000);
+
+ /* PHY auto speed down */
+ r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0050);
+ phy_set_bits(phydev, 0x14, BIT(15));
+
+ r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001);
+ r8168d_phy_param(phydev, 0x8b85, 0x2000, 0x0000);
+
+ r8168d_modify_extpage(phydev, 0x0020, 0x15, 0x1100, 0x0000);
+ phy_write_paged(phydev, 0x0006, 0x00, 0x5a00);
+
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0x0000);
+}
+
+static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ r8169_apply_firmware(tp);
+
+ /* Enable Delay cap */
+ r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006);
+
+ /* Channel estimation fine tune */
+ phy_write_paged(phydev, 0x0003, 0x09, 0xa20f);
+
+ /* Green Setting */
+ r8168d_phy_param(phydev, 0x8b5b, 0xffff, 0x9222);
+ r8168d_phy_param(phydev, 0x8b6d, 0xffff, 0x8000);
+ r8168d_phy_param(phydev, 0x8b76, 0xffff, 0x8000);
+
+ /* For 4-corner performance improve */
+ phy_write(phydev, 0x1f, 0x0005);
+ phy_write(phydev, 0x05, 0x8b80);
+ phy_set_bits(phydev, 0x17, 0x0006);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ /* PHY auto speed down */
+ r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010);
+ phy_set_bits(phydev, 0x14, BIT(15));
+
+ /* improve 10M EEE waveform */
+ r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001);
+
+ /* Improve 2-pair detection performance */
+ r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000);
+
+ rtl8168f_config_eee_phy(phydev);
+
+ /* Green feature */
+ phy_write(phydev, 0x1f, 0x0003);
+ phy_set_bits(phydev, 0x19, BIT(0));
+ phy_set_bits(phydev, 0x10, BIT(10));
+ phy_write(phydev, 0x1f, 0x0000);
+ phy_modify_paged(phydev, 0x0005, 0x01, 0, BIT(8));
+}
+
+static void rtl8168f_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ /* For 4-corner performance improve */
+ r8168d_phy_param(phydev, 0x8b80, 0x0000, 0x0006);
+
+ /* PHY auto speed down */
+ r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010);
+ phy_set_bits(phydev, 0x14, BIT(15));
+
+ /* Improve 10M EEE waveform */
+ r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001);
+
+ rtl8168f_config_eee_phy(phydev);
+}
+
+static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ r8169_apply_firmware(tp);
+
+ /* Channel estimation fine tune */
+ phy_write_paged(phydev, 0x0003, 0x09, 0xa20f);
+
+ /* Modify green table for giga & fnet */
+ r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000);
+ r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000);
+ r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00fb);
+
+ /* Modify green table for 10M */
+ r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00);
+
+ /* Disable hiimpedance detection (RTCT) */
+ phy_write_paged(phydev, 0x0003, 0x01, 0x328a);
+
+ rtl8168f_hw_phy_config(tp, phydev);
+
+ /* Improve 2-pair detection performance */
+ r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000);
+}
+
+static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ r8169_apply_firmware(tp);
+
+ rtl8168f_hw_phy_config(tp, phydev);
+}
+
+static void rtl8411_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ r8169_apply_firmware(tp);
+
+ rtl8168f_hw_phy_config(tp, phydev);
+
+ /* Improve 2-pair detection performance */
+ r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000);
+
+ /* Channel estimation fine tune */
+ phy_write_paged(phydev, 0x0003, 0x09, 0xa20f);
+
+ /* Modify green table for giga & fnet */
+ r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000);
+ r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000);
+ r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000);
+ r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00aa);
+
+ /* Modify green table for 10M */
+ r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00);
+
+ /* Disable hiimpedance detection (RTCT) */
+ phy_write_paged(phydev, 0x0003, 0x01, 0x328a);
+
+ /* Modify green table for giga */
+ r8168d_phy_param(phydev, 0x8b54, 0x0800, 0x0000);
+ r8168d_phy_param(phydev, 0x8b5d, 0x0800, 0x0000);
+ r8168d_phy_param(phydev, 0x8a7c, 0x0100, 0x0000);
+ r8168d_phy_param(phydev, 0x8a7f, 0x0000, 0x0100);
+ r8168d_phy_param(phydev, 0x8a82, 0x0100, 0x0000);
+ r8168d_phy_param(phydev, 0x8a85, 0x0100, 0x0000);
+ r8168d_phy_param(phydev, 0x8a88, 0x0100, 0x0000);
+
+ /* uc same-seed solution */
+ r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x8000);
+
+ /* Green feature */
+ phy_write(phydev, 0x1f, 0x0003);
+ phy_clear_bits(phydev, 0x19, BIT(0));
+ phy_clear_bits(phydev, 0x10, BIT(10));
+ phy_write(phydev, 0x1f, 0x0000);
+}
+
+static void rtl8168g_disable_aldps(struct phy_device *phydev)
+{
+ phy_modify_paged(phydev, 0x0a43, 0x10, BIT(2), 0);
+}
+
+static void rtl8168g_phy_adjust_10m_aldps(struct phy_device *phydev)
+{
+ phy_modify_paged(phydev, 0x0bcc, 0x14, BIT(8), 0);
+ phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(7) | BIT(6));
+ r8168g_phy_param(phydev, 0x8084, 0x6000, 0x0000);
+ phy_modify_paged(phydev, 0x0a43, 0x10, 0x0000, 0x1003);
+}
+
+static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ int ret;
+
+ r8169_apply_firmware(tp);
+
+ ret = phy_read_paged(phydev, 0x0a46, 0x10);
+ if (ret & BIT(8))
+ phy_modify_paged(phydev, 0x0bcc, 0x12, BIT(15), 0);
+ else
+ phy_modify_paged(phydev, 0x0bcc, 0x12, 0, BIT(15));
+
+ ret = phy_read_paged(phydev, 0x0a46, 0x13);
+ if (ret & BIT(8))
+ phy_modify_paged(phydev, 0x0c41, 0x15, 0, BIT(1));
+ else
+ phy_modify_paged(phydev, 0x0c41, 0x15, BIT(1), 0);
+
+ /* Enable PHY auto speed down */
+ phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2));
+
+ rtl8168g_phy_adjust_10m_aldps(phydev);
+
+ /* EEE auto-fallback function */
+ phy_modify_paged(phydev, 0x0a4b, 0x11, 0, BIT(2));
+
+ /* Enable UC LPF tune function */
+ r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000);
+
+ phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14));
+
+ /* Improve SWR Efficiency */
+ phy_write(phydev, 0x1f, 0x0bcd);
+ phy_write(phydev, 0x14, 0x5065);
+ phy_write(phydev, 0x14, 0xd065);
+ phy_write(phydev, 0x1f, 0x0bc8);
+ phy_write(phydev, 0x11, 0x5655);
+ phy_write(phydev, 0x1f, 0x0bcd);
+ phy_write(phydev, 0x14, 0x1065);
+ phy_write(phydev, 0x14, 0x9065);
+ phy_write(phydev, 0x14, 0x1065);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ rtl8168g_disable_aldps(phydev);
+ rtl8168g_config_eee_phy(phydev);
+}
+
+static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ r8169_apply_firmware(tp);
+ rtl8168g_config_eee_phy(phydev);
+}
+
+static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ u16 dout_tapbin;
+ u32 data;
+
+ r8169_apply_firmware(tp);
+
+ /* CHN EST parameters adjust - giga master */
+ r8168g_phy_param(phydev, 0x809b, 0xf800, 0x8000);
+ r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x8000);
+ r8168g_phy_param(phydev, 0x80a4, 0xff00, 0x8500);
+ r8168g_phy_param(phydev, 0x809c, 0xff00, 0xbd00);
+
+ /* CHN EST parameters adjust - giga slave */
+ r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x7000);
+ r8168g_phy_param(phydev, 0x80b4, 0xff00, 0x5000);
+ r8168g_phy_param(phydev, 0x80ac, 0xff00, 0x4000);
+
+ /* CHN EST parameters adjust - fnet */
+ r8168g_phy_param(phydev, 0x808e, 0xff00, 0x1200);
+ r8168g_phy_param(phydev, 0x8090, 0xff00, 0xe500);
+ r8168g_phy_param(phydev, 0x8092, 0xff00, 0x9f00);
+
+ /* enable R-tune & PGA-retune function */
+ dout_tapbin = 0;
+ data = phy_read_paged(phydev, 0x0a46, 0x13);
+ data &= 3;
+ data <<= 2;
+ dout_tapbin |= data;
+ data = phy_read_paged(phydev, 0x0a46, 0x12);
+ data &= 0xc000;
+ data >>= 14;
+ dout_tapbin |= data;
+ dout_tapbin = ~(dout_tapbin ^ 0x08);
+ dout_tapbin <<= 12;
+ dout_tapbin &= 0xf000;
+
+ r8168g_phy_param(phydev, 0x827a, 0xf000, dout_tapbin);
+ r8168g_phy_param(phydev, 0x827b, 0xf000, dout_tapbin);
+ r8168g_phy_param(phydev, 0x827c, 0xf000, dout_tapbin);
+ r8168g_phy_param(phydev, 0x827d, 0xf000, dout_tapbin);
+ r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800);
+ phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002);
+
+ /* enable GPHY 10M */
+ phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(11));
+
+ /* SAR ADC performance */
+ phy_modify_paged(phydev, 0x0bca, 0x17, BIT(12) | BIT(13), BIT(14));
+
+ r8168g_phy_param(phydev, 0x803f, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x8047, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x804f, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x8057, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x805f, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x8067, 0x3000, 0x0000);
+ r8168g_phy_param(phydev, 0x806f, 0x3000, 0x0000);
+
+ /* disable phy pfm mode */
+ phy_modify_paged(phydev, 0x0a44, 0x11, BIT(7), 0);
+
+ rtl8168g_disable_aldps(phydev);
+ rtl8168h_config_eee_phy(phydev);
+}
+
+static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ u16 ioffset, rlen;
+ u32 data;
+
+ r8169_apply_firmware(tp);
+
+ /* CHIN EST parameter update */
+ r8168g_phy_param(phydev, 0x808a, 0x003f, 0x000a);
+
+ /* enable R-tune & PGA-retune function */
+ r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800);
+ phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002);
+
+ /* enable GPHY 10M */
+ phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(11));
+
+ ioffset = rtl8168h_2_get_adc_bias_ioffset(tp);
+ if (ioffset != 0xffff)
+ phy_write_paged(phydev, 0x0bcf, 0x16, ioffset);
+
+ /* Modify rlen (TX LPF corner frequency) level */
+ data = phy_read_paged(phydev, 0x0bcd, 0x16);
+ data &= 0x000f;
+ rlen = 0;
+ if (data > 3)
+ rlen = data - 3;
+ data = rlen | (rlen << 4) | (rlen << 8) | (rlen << 12);
+ phy_write_paged(phydev, 0x0bcd, 0x17, data);
+
+ /* disable phy pfm mode */
+ phy_modify_paged(phydev, 0x0a44, 0x11, BIT(7), 0);
+
+ rtl8168g_disable_aldps(phydev);
+ rtl8168g_config_eee_phy(phydev);
+}
+
+static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ /* Enable PHY auto speed down */
+ phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2));
+
+ rtl8168g_phy_adjust_10m_aldps(phydev);
+
+ /* Enable EEE auto-fallback function */
+ phy_modify_paged(phydev, 0x0a4b, 0x11, 0, BIT(2));
+
+ /* Enable UC LPF tune function */
+ r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000);
+
+ /* set rg_sel_sdm_rate */
+ phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14));
+
+ rtl8168g_disable_aldps(phydev);
+ rtl8168g_config_eee_phy(phydev);
+}
+
+static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ rtl8168g_phy_adjust_10m_aldps(phydev);
+
+ /* Enable UC LPF tune function */
+ r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000);
+
+ /* Set rg_sel_sdm_rate */
+ phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14));
+
+ /* Channel estimation parameters */
+ r8168g_phy_param(phydev, 0x80f3, 0xff00, 0x8b00);
+ r8168g_phy_param(phydev, 0x80f0, 0xff00, 0x3a00);
+ r8168g_phy_param(phydev, 0x80ef, 0xff00, 0x0500);
+ r8168g_phy_param(phydev, 0x80f6, 0xff00, 0x6e00);
+ r8168g_phy_param(phydev, 0x80ec, 0xff00, 0x6800);
+ r8168g_phy_param(phydev, 0x80ed, 0xff00, 0x7c00);
+ r8168g_phy_param(phydev, 0x80f2, 0xff00, 0xf400);
+ r8168g_phy_param(phydev, 0x80f4, 0xff00, 0x8500);
+ r8168g_phy_param(phydev, 0x8110, 0xff00, 0xa800);
+ r8168g_phy_param(phydev, 0x810f, 0xff00, 0x1d00);
+ r8168g_phy_param(phydev, 0x8111, 0xff00, 0xf500);
+ r8168g_phy_param(phydev, 0x8113, 0xff00, 0x6100);
+ r8168g_phy_param(phydev, 0x8115, 0xff00, 0x9200);
+ r8168g_phy_param(phydev, 0x810e, 0xff00, 0x0400);
+ r8168g_phy_param(phydev, 0x810c, 0xff00, 0x7c00);
+ r8168g_phy_param(phydev, 0x810b, 0xff00, 0x5a00);
+ r8168g_phy_param(phydev, 0x80d1, 0xff00, 0xff00);
+ r8168g_phy_param(phydev, 0x80cd, 0xff00, 0x9e00);
+ r8168g_phy_param(phydev, 0x80d3, 0xff00, 0x0e00);
+ r8168g_phy_param(phydev, 0x80d5, 0xff00, 0xca00);
+ r8168g_phy_param(phydev, 0x80d7, 0xff00, 0x8400);
+
+ /* Force PWM-mode */
+ phy_write(phydev, 0x1f, 0x0bcd);
+ phy_write(phydev, 0x14, 0x5065);
+ phy_write(phydev, 0x14, 0xd065);
+ phy_write(phydev, 0x1f, 0x0bc8);
+ phy_write(phydev, 0x12, 0x00ed);
+ phy_write(phydev, 0x1f, 0x0bcd);
+ phy_write(phydev, 0x14, 0x1065);
+ phy_write(phydev, 0x14, 0x9065);
+ phy_write(phydev, 0x14, 0x1065);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ rtl8168g_disable_aldps(phydev);
+ rtl8168g_config_eee_phy(phydev);
+}
+
+static void rtl8117_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ /* CHN EST parameters adjust - fnet */
+ r8168g_phy_param(phydev, 0x808e, 0xff00, 0x4800);
+ r8168g_phy_param(phydev, 0x8090, 0xff00, 0xcc00);
+ r8168g_phy_param(phydev, 0x8092, 0xff00, 0xb000);
+
+ r8168g_phy_param(phydev, 0x8088, 0xff00, 0x6000);
+ r8168g_phy_param(phydev, 0x808b, 0x3f00, 0x0b00);
+ r8168g_phy_param(phydev, 0x808d, 0x1f00, 0x0600);
+ r8168g_phy_param(phydev, 0x808c, 0xff00, 0xb000);
+ r8168g_phy_param(phydev, 0x80a0, 0xff00, 0x2800);
+ r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x5000);
+ r8168g_phy_param(phydev, 0x809b, 0xf800, 0xb000);
+ r8168g_phy_param(phydev, 0x809a, 0xff00, 0x4b00);
+ r8168g_phy_param(phydev, 0x809d, 0x3f00, 0x0800);
+ r8168g_phy_param(phydev, 0x80a1, 0xff00, 0x7000);
+ r8168g_phy_param(phydev, 0x809f, 0x1f00, 0x0300);
+ r8168g_phy_param(phydev, 0x809e, 0xff00, 0x8800);
+ r8168g_phy_param(phydev, 0x80b2, 0xff00, 0x2200);
+ r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x9800);
+ r8168g_phy_param(phydev, 0x80af, 0x3f00, 0x0800);
+ r8168g_phy_param(phydev, 0x80b3, 0xff00, 0x6f00);
+ r8168g_phy_param(phydev, 0x80b1, 0x1f00, 0x0300);
+ r8168g_phy_param(phydev, 0x80b0, 0xff00, 0x9300);
+
+ r8168g_phy_param(phydev, 0x8011, 0x0000, 0x0800);
+
+ /* enable GPHY 10M */
+ phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(11));
+
+ r8168g_phy_param(phydev, 0x8016, 0x0000, 0x0400);
+
+ rtl8168g_disable_aldps(phydev);
+ rtl8168h_config_eee_phy(phydev);
+}
+
+static void rtl8102e_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0003 },
+ { 0x08, 0x441d },
+ { 0x01, 0x9100 },
+ { 0x1f, 0x0000 }
+ };
+
+ phy_set_bits(phydev, 0x11, BIT(12));
+ phy_set_bits(phydev, 0x19, BIT(13));
+ phy_set_bits(phydev, 0x10, BIT(15));
+
+ rtl_writephy_batch(phydev, phy_reg_init);
+}
+
+static void rtl8105e_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ /* Disable ALDPS before ram code */
+ phy_write(phydev, 0x18, 0x0310);
+ msleep(100);
+
+ r8169_apply_firmware(tp);
+
+ phy_write_paged(phydev, 0x0005, 0x1a, 0x0000);
+ phy_write_paged(phydev, 0x0004, 0x1c, 0x0000);
+ phy_write_paged(phydev, 0x0001, 0x15, 0x7701);
+}
+
+static void rtl8402_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ /* Disable ALDPS before setting firmware */
+ phy_write(phydev, 0x18, 0x0310);
+ msleep(20);
+
+ r8169_apply_firmware(tp);
+
+ /* EEE setting */
+ phy_write(phydev, 0x1f, 0x0004);
+ phy_write(phydev, 0x10, 0x401f);
+ phy_write(phydev, 0x19, 0x7030);
+ phy_write(phydev, 0x1f, 0x0000);
+}
+
+static void rtl8106e_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0004 },
+ { 0x10, 0xc07f },
+ { 0x19, 0x7030 },
+ { 0x1f, 0x0000 }
+ };
+
+ /* Disable ALDPS before ram code */
+ phy_write(phydev, 0x18, 0x0310);
+ msleep(100);
+
+ r8169_apply_firmware(tp);
+
+ rtl_writephy_batch(phydev, phy_reg_init);
+}
+
+static void rtl8125_1_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ phy_modify_paged(phydev, 0xad4, 0x10, 0x03ff, 0x0084);
+ phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010);
+ phy_modify_paged(phydev, 0xad1, 0x13, 0x03ff, 0x0006);
+ phy_modify_paged(phydev, 0xad3, 0x11, 0x003f, 0x0006);
+ phy_modify_paged(phydev, 0xac0, 0x14, 0x0000, 0x1100);
+ phy_modify_paged(phydev, 0xac8, 0x15, 0xf000, 0x7000);
+ phy_modify_paged(phydev, 0xad1, 0x14, 0x0000, 0x0400);
+ phy_modify_paged(phydev, 0xad1, 0x15, 0x0000, 0x03ff);
+ phy_modify_paged(phydev, 0xad1, 0x16, 0x0000, 0x03ff);
+
+ r8168g_phy_param(phydev, 0x80ea, 0xff00, 0xc400);
+ r8168g_phy_param(phydev, 0x80eb, 0x0700, 0x0300);
+ r8168g_phy_param(phydev, 0x80f8, 0xff00, 0x1c00);
+ r8168g_phy_param(phydev, 0x80f1, 0xff00, 0x3000);
+ r8168g_phy_param(phydev, 0x80fe, 0xff00, 0xa500);
+ r8168g_phy_param(phydev, 0x8102, 0xff00, 0x5000);
+ r8168g_phy_param(phydev, 0x8105, 0xff00, 0x3300);
+ r8168g_phy_param(phydev, 0x8100, 0xff00, 0x7000);
+ r8168g_phy_param(phydev, 0x8104, 0xff00, 0xf000);
+ r8168g_phy_param(phydev, 0x8106, 0xff00, 0x6500);
+ r8168g_phy_param(phydev, 0x80dc, 0xff00, 0xed00);
+ r8168g_phy_param(phydev, 0x80df, 0x0000, 0x0100);
+ r8168g_phy_param(phydev, 0x80e1, 0x0100, 0x0000);
+
+ phy_modify_paged(phydev, 0xbf0, 0x13, 0x003f, 0x0038);
+ r8168g_phy_param(phydev, 0x819f, 0xffff, 0xd0b6);
+
+ phy_write_paged(phydev, 0xbc3, 0x12, 0x5555);
+ phy_modify_paged(phydev, 0xbf0, 0x15, 0x0e00, 0x0a00);
+ phy_modify_paged(phydev, 0xa5c, 0x10, 0x0400, 0x0000);
+ phy_modify_paged(phydev, 0xa44, 0x11, 0x0000, 0x0800);
+
+ rtl8125_config_eee_phy(phydev);
+}
+
+static void rtl8125_2_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ int i;
+
+ phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010);
+ phy_modify_paged(phydev, 0xad1, 0x13, 0x03ff, 0x03ff);
+ phy_modify_paged(phydev, 0xad3, 0x11, 0x003f, 0x0006);
+ phy_modify_paged(phydev, 0xac0, 0x14, 0x1100, 0x0000);
+ phy_modify_paged(phydev, 0xacc, 0x10, 0x0003, 0x0002);
+ phy_modify_paged(phydev, 0xad4, 0x10, 0x00e7, 0x0044);
+ phy_modify_paged(phydev, 0xac1, 0x12, 0x0080, 0x0000);
+ phy_modify_paged(phydev, 0xac8, 0x10, 0x0300, 0x0000);
+ phy_modify_paged(phydev, 0xac5, 0x17, 0x0007, 0x0002);
+ phy_write_paged(phydev, 0xad4, 0x16, 0x00a8);
+ phy_write_paged(phydev, 0xac5, 0x16, 0x01ff);
+ phy_modify_paged(phydev, 0xac8, 0x15, 0x00f0, 0x0030);
+
+ phy_write(phydev, 0x1f, 0x0b87);
+ phy_write(phydev, 0x16, 0x80a2);
+ phy_write(phydev, 0x17, 0x0153);
+ phy_write(phydev, 0x16, 0x809c);
+ phy_write(phydev, 0x17, 0x0153);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ phy_write(phydev, 0x1f, 0x0a43);
+ phy_write(phydev, 0x13, 0x81B3);
+ phy_write(phydev, 0x14, 0x0043);
+ phy_write(phydev, 0x14, 0x00A7);
+ phy_write(phydev, 0x14, 0x00D6);
+ phy_write(phydev, 0x14, 0x00EC);
+ phy_write(phydev, 0x14, 0x00F6);
+ phy_write(phydev, 0x14, 0x00FB);
+ phy_write(phydev, 0x14, 0x00FD);
+ phy_write(phydev, 0x14, 0x00FF);
+ phy_write(phydev, 0x14, 0x00BB);
+ phy_write(phydev, 0x14, 0x0058);
+ phy_write(phydev, 0x14, 0x0029);
+ phy_write(phydev, 0x14, 0x0013);
+ phy_write(phydev, 0x14, 0x0009);
+ phy_write(phydev, 0x14, 0x0004);
+ phy_write(phydev, 0x14, 0x0002);
+ for (i = 0; i < 25; i++)
+ phy_write(phydev, 0x14, 0x0000);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ r8168g_phy_param(phydev, 0x8257, 0xffff, 0x020F);
+ r8168g_phy_param(phydev, 0x80ea, 0xffff, 0x7843);
+
+ r8169_apply_firmware(tp);
+
+ phy_modify_paged(phydev, 0xd06, 0x14, 0x0000, 0x2000);
+
+ r8168g_phy_param(phydev, 0x81a2, 0x0000, 0x0100);
+
+ phy_modify_paged(phydev, 0xb54, 0x16, 0xff00, 0xdb00);
+ phy_modify_paged(phydev, 0xa45, 0x12, 0x0001, 0x0000);
+ phy_modify_paged(phydev, 0xa5d, 0x12, 0x0000, 0x0020);
+ phy_modify_paged(phydev, 0xad4, 0x17, 0x0010, 0x0000);
+ phy_modify_paged(phydev, 0xa86, 0x15, 0x0001, 0x0000);
+ phy_modify_paged(phydev, 0xa44, 0x11, 0x0000, 0x0800);
+
+ rtl8125_config_eee_phy(phydev);
+}
+
+void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
+ enum mac_version ver)
+{
+ static const rtl_phy_cfg_fct phy_configs[] = {
+ /* PCI devices. */
+ [RTL_GIGA_MAC_VER_02] = rtl8169s_hw_phy_config,
+ [RTL_GIGA_MAC_VER_03] = rtl8169s_hw_phy_config,
+ [RTL_GIGA_MAC_VER_04] = rtl8169sb_hw_phy_config,
+ [RTL_GIGA_MAC_VER_05] = rtl8169scd_hw_phy_config,
+ [RTL_GIGA_MAC_VER_06] = rtl8169sce_hw_phy_config,
+ /* PCI-E devices. */
+ [RTL_GIGA_MAC_VER_07] = rtl8102e_hw_phy_config,
+ [RTL_GIGA_MAC_VER_08] = rtl8102e_hw_phy_config,
+ [RTL_GIGA_MAC_VER_09] = rtl8102e_hw_phy_config,
+ [RTL_GIGA_MAC_VER_10] = NULL,
+ [RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config,
+ [RTL_GIGA_MAC_VER_12] = rtl8168bef_hw_phy_config,
+ [RTL_GIGA_MAC_VER_13] = NULL,
+ [RTL_GIGA_MAC_VER_14] = NULL,
+ [RTL_GIGA_MAC_VER_15] = NULL,
+ [RTL_GIGA_MAC_VER_16] = NULL,
+ [RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config,
+ [RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_19] = rtl8168c_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_20] = rtl8168c_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_21] = rtl8168c_3_hw_phy_config,
+ [RTL_GIGA_MAC_VER_22] = rtl8168c_3_hw_phy_config,
+ [RTL_GIGA_MAC_VER_23] = rtl8168cp_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_24] = rtl8168cp_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_25] = rtl8168d_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_26] = rtl8168d_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_27] = rtl8168d_3_hw_phy_config,
+ [RTL_GIGA_MAC_VER_28] = rtl8168d_4_hw_phy_config,
+ [RTL_GIGA_MAC_VER_29] = rtl8105e_hw_phy_config,
+ [RTL_GIGA_MAC_VER_30] = rtl8105e_hw_phy_config,
+ [RTL_GIGA_MAC_VER_31] = NULL,
+ [RTL_GIGA_MAC_VER_32] = rtl8168e_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_33] = rtl8168e_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_34] = rtl8168e_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_35] = rtl8168f_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_36] = rtl8168f_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_37] = rtl8402_hw_phy_config,
+ [RTL_GIGA_MAC_VER_38] = rtl8411_hw_phy_config,
+ [RTL_GIGA_MAC_VER_39] = rtl8106e_hw_phy_config,
+ [RTL_GIGA_MAC_VER_40] = rtl8168g_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_41] = NULL,
+ [RTL_GIGA_MAC_VER_42] = rtl8168g_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_43] = rtl8168g_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_44] = rtl8168g_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_45] = rtl8168h_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_46] = rtl8168h_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_47] = rtl8168h_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_48] = rtl8168h_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_49] = rtl8168ep_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_50] = rtl8168ep_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_52] = rtl8117_hw_phy_config,
+ [RTL_GIGA_MAC_VER_60] = rtl8125_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_61] = rtl8125_2_hw_phy_config,
+ };
+
+ if (phy_configs[ver])
+ phy_configs[ver](tp, phydev);
+}
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 4b13a184bfc7..067ad25553b9 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1425,7 +1425,7 @@ out_napi_off:
}
/* Timeout function for Ethernet AVB */
-static void ravb_tx_timeout(struct net_device *ndev)
+static void ravb_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct ravb_private *priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index e19b49c4013e..58ca126518a2 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2204,24 +2204,28 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
if (cd->tsu) {
add_tsu_reg(ARSTR);
add_tsu_reg(TSU_CTRST);
- add_tsu_reg(TSU_FWEN0);
- add_tsu_reg(TSU_FWEN1);
- add_tsu_reg(TSU_FCM);
- add_tsu_reg(TSU_BSYSL0);
- add_tsu_reg(TSU_BSYSL1);
- add_tsu_reg(TSU_PRISL0);
- add_tsu_reg(TSU_PRISL1);
- add_tsu_reg(TSU_FWSL0);
- add_tsu_reg(TSU_FWSL1);
+ if (cd->dual_port) {
+ add_tsu_reg(TSU_FWEN0);
+ add_tsu_reg(TSU_FWEN1);
+ add_tsu_reg(TSU_FCM);
+ add_tsu_reg(TSU_BSYSL0);
+ add_tsu_reg(TSU_BSYSL1);
+ add_tsu_reg(TSU_PRISL0);
+ add_tsu_reg(TSU_PRISL1);
+ add_tsu_reg(TSU_FWSL0);
+ add_tsu_reg(TSU_FWSL1);
+ }
add_tsu_reg(TSU_FWSLC);
- add_tsu_reg(TSU_QTAGM0);
- add_tsu_reg(TSU_QTAGM1);
- add_tsu_reg(TSU_FWSR);
- add_tsu_reg(TSU_FWINMK);
- add_tsu_reg(TSU_ADQT0);
- add_tsu_reg(TSU_ADQT1);
- add_tsu_reg(TSU_VTAG0);
- add_tsu_reg(TSU_VTAG1);
+ if (cd->dual_port) {
+ add_tsu_reg(TSU_QTAGM0);
+ add_tsu_reg(TSU_QTAGM1);
+ add_tsu_reg(TSU_FWSR);
+ add_tsu_reg(TSU_FWINMK);
+ add_tsu_reg(TSU_ADQT0);
+ add_tsu_reg(TSU_ADQT1);
+ add_tsu_reg(TSU_VTAG0);
+ add_tsu_reg(TSU_VTAG1);
+ }
add_tsu_reg(TSU_ADSBSY);
add_tsu_reg(TSU_TEN);
add_tsu_reg(TSU_POST1);
@@ -2478,7 +2482,7 @@ out_napi_off:
}
/* Timeout function */
-static void sh_eth_tx_timeout(struct net_device *ndev)
+static void sh_eth_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_rxdesc *rxdesc;
@@ -2643,20 +2647,6 @@ static int sh_eth_close(struct net_device *ndev)
return 0;
}
-/* ioctl to device function */
-static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
-{
- struct phy_device *phydev = ndev->phydev;
-
- if (!netif_running(ndev))
- return -EINVAL;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_mii_ioctl(phydev, rq, cmd);
-}
-
static int sh_eth_change_mtu(struct net_device *ndev, int new_mtu)
{
if (netif_running(ndev))
@@ -3155,7 +3145,7 @@ static const struct net_device_ops sh_eth_netdev_ops = {
.ndo_get_stats = sh_eth_get_stats,
.ndo_set_rx_mode = sh_eth_set_rx_mode,
.ndo_tx_timeout = sh_eth_tx_timeout,
- .ndo_do_ioctl = sh_eth_do_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_change_mtu = sh_eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
@@ -3171,7 +3161,7 @@ static const struct net_device_ops sh_eth_netdev_ops_tsu = {
.ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid,
.ndo_tx_timeout = sh_eth_tx_timeout,
- .ndo_do_ioctl = sh_eth_do_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_change_mtu = sh_eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index bc4f951315da..7585cd2270ba 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2159,7 +2159,7 @@ static void rocker_router_fib_event_work(struct work_struct *work)
/* Protect internal structures from changes */
rtnl_lock();
switch (fib_work->event) {
- case FIB_EVENT_ENTRY_ADD:
+ case FIB_EVENT_ENTRY_REPLACE:
err = rocker_world_fib4_add(rocker, &fib_work->fen_info);
if (err)
rocker_world_fib4_abort(rocker);
@@ -2201,7 +2201,7 @@ static int rocker_router_fib_event(struct notifier_block *nb,
fib_work->event = event;
switch (event) {
- case FIB_EVENT_ENTRY_ADD: /* fall through */
+ case FIB_EVENT_ENTRY_REPLACE: /* fall through */
case FIB_EVENT_ENTRY_DEL:
if (info->family == AF_INET) {
struct fib_entry_notifier_info *fen_info = ptr;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index c56fcbb37066..c705743d69f7 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -1572,7 +1572,7 @@ static int sxgbe_poll(struct napi_struct *napi, int budget)
* netdev structure and arrange for the device to be reset to a sane state
* in order to transmit a new packet.
*/
-static void sxgbe_tx_timeout(struct net_device *dev)
+static void sxgbe_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
@@ -1939,9 +1939,7 @@ static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
- if (!dev->phydev)
- return -EINVAL;
- ret = phy_mii_ioctl(dev->phydev, rq, cmd);
+ ret = phy_do_ioctl(dev, rq, cmd);
break;
default:
break;
@@ -2296,7 +2294,7 @@ __setup("sxgbeeth=", sxgbe_cmdline_opt);
-MODULE_DESCRIPTION("SAMSUNG 10G/2.5G/1G Ethernet PLATFORM driver");
+MODULE_DESCRIPTION("Samsung 10G/2.5G/1G Ethernet PLATFORM driver");
MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
MODULE_PARM_DESC(eee_timer, "EEE-LPI Default LS timer value");
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index 632a7c85964d..128ee7cda1ed 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -79,7 +79,7 @@ static netdev_tx_t ether3_sendpacket(struct sk_buff *skb,
static irqreturn_t ether3_interrupt (int irq, void *dev_id);
static int ether3_close (struct net_device *dev);
static void ether3_setmulticastlist (struct net_device *dev);
-static void ether3_timeout(struct net_device *dev);
+static void ether3_timeout(struct net_device *dev, unsigned int txqueue);
#define BUS_16 2
#define BUS_8 1
@@ -450,7 +450,7 @@ static void ether3_setmulticastlist(struct net_device *dev)
ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1);
}
-static void ether3_timeout(struct net_device *dev)
+static void ether3_timeout(struct net_device *dev, unsigned int txqueue)
{
unsigned long flags;
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 276c7cae7cee..8507ff242014 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -645,7 +645,7 @@ sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-static void timeout(struct net_device *dev)
+static void timeout(struct net_device *dev, unsigned int txqueue)
{
printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name);
sgiseeq_reset(dev);
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 5f36774bf4b8..ea5a9220196c 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -21,8 +21,6 @@ config SFC
depends on PCI
select MDIO
select CRC32
- select I2C
- select I2C_ALGOBIT
imply PTP_1588_CLOCK
---help---
This driver supports 10/40-gigabit Ethernet cards based on
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index c5c297e78d06..87d093da22ca 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -1,7 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
-sfc-y += efx.o nic.o farch.o siena.o ef10.o tx.o rx.o \
- selftest.o ethtool.o ptp.o tx_tso.o \
- mcdi.o mcdi_port.o mcdi_mon.o
+sfc-y += efx.o efx_common.o efx_channels.o nic.o \
+ farch.o siena.o ef10.o \
+ tx.o tx_common.o tx_tso.o rx.o rx_common.o \
+ selftest.o ethtool.o ethtool_common.o ptp.o \
+ mcdi.o mcdi_port.o mcdi_port_common.o \
+ mcdi_functions.o mcdi_filters.o mcdi_mon.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += sriov.o siena_sriov.o ef10_sriov.o
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 4d9bbccc6f89..52113b7529d6 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -5,11 +5,15 @@
*/
#include "net_driver.h"
+#include "rx_common.h"
#include "ef10_regs.h"
#include "io.h"
#include "mcdi.h"
#include "mcdi_pcol.h"
+#include "mcdi_port_common.h"
+#include "mcdi_functions.h"
#include "nic.h"
+#include "mcdi_filters.h"
#include "workarounds.h"
#include "selftest.h"
#include "ef10_sriov.h"
@@ -25,28 +29,6 @@ enum {
EFX_EF10_TEST = 1,
EFX_EF10_REFILL,
};
-/* The maximum size of a shared RSS context */
-/* TODO: this should really be from the mcdi protocol export */
-#define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL
-
-/* The filter table(s) are managed by firmware and we have write-only
- * access. When removing filters we must identify them to the
- * firmware by a 64-bit handle, but this is too wide for Linux kernel
- * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to
- * be able to tell in advance whether a requested insertion will
- * replace an existing filter. Therefore we maintain a software hash
- * table, which should be at least as large as the hardware hash
- * table.
- *
- * Huntington has a single 8K filter table shared between all filter
- * types and both ports.
- */
-#define HUNT_FILTER_TBL_ROWS 8192
-
-#define EFX_EF10_FILTER_ID_INVALID 0xffff
-
-#define EFX_EF10_FILTER_DEV_UC_MAX 32
-#define EFX_EF10_FILTER_DEV_MC_MAX 256
/* VLAN list entry */
struct efx_ef10_vlan {
@@ -54,95 +36,8 @@ struct efx_ef10_vlan {
u16 vid;
};
-enum efx_ef10_default_filters {
- EFX_EF10_BCAST,
- EFX_EF10_UCDEF,
- EFX_EF10_MCDEF,
- EFX_EF10_VXLAN4_UCDEF,
- EFX_EF10_VXLAN4_MCDEF,
- EFX_EF10_VXLAN6_UCDEF,
- EFX_EF10_VXLAN6_MCDEF,
- EFX_EF10_NVGRE4_UCDEF,
- EFX_EF10_NVGRE4_MCDEF,
- EFX_EF10_NVGRE6_UCDEF,
- EFX_EF10_NVGRE6_MCDEF,
- EFX_EF10_GENEVE4_UCDEF,
- EFX_EF10_GENEVE4_MCDEF,
- EFX_EF10_GENEVE6_UCDEF,
- EFX_EF10_GENEVE6_MCDEF,
-
- EFX_EF10_NUM_DEFAULT_FILTERS
-};
-
-/* Per-VLAN filters information */
-struct efx_ef10_filter_vlan {
- struct list_head list;
- u16 vid;
- u16 uc[EFX_EF10_FILTER_DEV_UC_MAX];
- u16 mc[EFX_EF10_FILTER_DEV_MC_MAX];
- u16 default_filters[EFX_EF10_NUM_DEFAULT_FILTERS];
-};
-
-struct efx_ef10_dev_addr {
- u8 addr[ETH_ALEN];
-};
-
-struct efx_ef10_filter_table {
-/* The MCDI match masks supported by this fw & hw, in order of priority */
- u32 rx_match_mcdi_flags[
- MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM * 2];
- unsigned int rx_match_count;
-
- struct rw_semaphore lock; /* Protects entries */
- struct {
- unsigned long spec; /* pointer to spec plus flag bits */
-/* AUTO_OLD is used to mark and sweep MAC filters for the device address lists. */
-/* unused flag 1UL */
-#define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL
-#define EFX_EF10_FILTER_FLAGS 3UL
- u64 handle; /* firmware handle */
- } *entry;
-/* Shadow of net_device address lists, guarded by mac_lock */
- struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX];
- struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
- int dev_uc_count;
- int dev_mc_count;
- bool uc_promisc;
- bool mc_promisc;
-/* Whether in multicast promiscuous mode when last changed */
- bool mc_promisc_last;
- bool mc_overflow; /* Too many MC addrs; should always imply mc_promisc */
- bool vlan_filter;
- struct list_head vlan_list;
-};
-
-/* An arbitrary search limit for the software hash table */
-#define EFX_EF10_FILTER_SEARCH_LIMIT 200
-
-static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
-static void efx_ef10_filter_table_remove(struct efx_nic *efx);
-static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid);
-static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx,
- struct efx_ef10_filter_vlan *vlan);
-static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid);
static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading);
-static u32 efx_ef10_filter_get_unsafe_id(u32 filter_id)
-{
- WARN_ON_ONCE(filter_id == EFX_EF10_FILTER_ID_INVALID);
- return filter_id & (HUNT_FILTER_TBL_ROWS - 1);
-}
-
-static unsigned int efx_ef10_filter_get_unsafe_pri(u32 filter_id)
-{
- return filter_id / (HUNT_FILTER_TBL_ROWS * 2);
-}
-
-static u32 efx_ef10_make_filter_id(unsigned int pri, u16 idx)
-{
- return pri * HUNT_FILTER_TBL_ROWS * 2 + idx;
-}
-
static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
{
efx_dword_t reg;
@@ -185,24 +80,6 @@ static bool efx_ef10_is_vf(struct efx_nic *efx)
return efx->type->is_vf;
}
-static int efx_ef10_get_pf_index(struct efx_nic *efx)
-{
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- size_t outlen;
- int rc;
-
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
- sizeof(outbuf), &outlen);
- if (rc)
- return rc;
- if (outlen < sizeof(outbuf))
- return -EIO;
-
- nic_data->pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF);
- return 0;
-}
-
#ifdef CONFIG_SFC_SRIOV
static int efx_ef10_get_vf_index(struct efx_nic *efx)
{
@@ -273,24 +150,9 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
u8 vi_window_mode = MCDI_BYTE(outbuf,
GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE);
- switch (vi_window_mode) {
- case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K:
- efx->vi_stride = 8192;
- break;
- case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K:
- efx->vi_stride = 16384;
- break;
- case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K:
- efx->vi_stride = 65536;
- break;
- default:
- netif_err(efx, probe, efx->net_dev,
- "Unrecognised VI window mode %d\n",
- vi_window_mode);
- return -EIO;
- }
- netif_dbg(efx, probe, efx->net_dev, "vi_stride = %u\n",
- efx->vi_stride);
+ rc = efx_mcdi_window_mode_to_stride(efx, vi_window_mode);
+ if (rc)
+ return rc;
} else {
/* keep default VI stride */
netif_dbg(efx, probe, efx->net_dev,
@@ -576,7 +438,7 @@ static int efx_ef10_add_vlan(struct efx_nic *efx, u16 vid)
if (efx->filter_state) {
mutex_lock(&efx->mac_lock);
down_write(&efx->filter_sem);
- rc = efx_ef10_filter_add_vlan(efx, vlan->vid);
+ rc = efx_mcdi_filter_add_vlan(efx, vlan->vid);
up_write(&efx->filter_sem);
mutex_unlock(&efx->mac_lock);
if (rc)
@@ -605,7 +467,7 @@ static void efx_ef10_del_vlan_internal(struct efx_nic *efx,
if (efx->filter_state) {
down_write(&efx->filter_sem);
- efx_ef10_filter_del_vlan(efx, vlan->vid);
+ efx_mcdi_filter_del_vlan(efx, vlan->vid);
up_write(&efx->filter_sem);
}
@@ -689,7 +551,7 @@ static int efx_ef10_probe(struct efx_nic *efx)
}
nic_data->warm_boot_count = rc;
- efx->rss_context.context_id = EFX_EF10_RSS_CONTEXT_INVALID;
+ efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
@@ -725,7 +587,7 @@ static int efx_ef10_probe(struct efx_nic *efx)
if (rc)
goto fail4;
- rc = efx_ef10_get_pf_index(efx);
+ rc = efx_get_pf_index(efx, &nic_data->pf_index);
if (rc)
goto fail5;
@@ -831,22 +693,6 @@ fail1:
return rc;
}
-static int efx_ef10_free_vis(struct efx_nic *efx)
-{
- MCDI_DECLARE_BUF_ERR(outbuf);
- size_t outlen;
- int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
- outbuf, sizeof(outbuf), &outlen);
-
- /* -EALREADY means nothing to free, so ignore */
- if (rc == -EALREADY)
- rc = 0;
- if (rc)
- efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen,
- rc);
- return rc;
-}
-
#ifdef EFX_USE_PIO
static void efx_ef10_free_piobufs(struct efx_nic *efx)
@@ -1084,12 +930,12 @@ static void efx_ef10_remove(struct efx_nic *efx)
efx_mcdi_mon_remove(efx);
- efx_ef10_rx_free_indir_table(efx);
+ efx_mcdi_rx_free_indir_table(efx);
if (nic_data->wc_membase)
iounmap(nic_data->wc_membase);
- rc = efx_ef10_free_vis(efx);
+ rc = efx_mcdi_free_vis(efx);
WARN_ON(rc != 0);
if (!nic_data->must_restore_piobufs)
@@ -1260,28 +1106,10 @@ static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused)))
static int efx_ef10_alloc_vis(struct efx_nic *efx,
unsigned int min_vis, unsigned int max_vis)
{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
struct efx_ef10_nic_data *nic_data = efx->nic_data;
- size_t outlen;
- int rc;
-
- MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
- MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
- rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
- if (rc != 0)
- return rc;
-
- if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
- return -EIO;
- netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
- MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
-
- nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
- nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
- return 0;
+ return efx_mcdi_alloc_vis(efx, min_vis, max_vis, &nic_data->vi_base,
+ &nic_data->n_allocated_vis);
}
/* Note that the failure path of this function does not free
@@ -1363,7 +1191,7 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
}
/* In case the last attached driver failed to free VIs, do it now */
- rc = efx_ef10_free_vis(efx);
+ rc = efx_mcdi_free_vis(efx);
if (rc != 0)
return rc;
@@ -1384,7 +1212,7 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
efx->max_tx_channels =
nic_data->n_allocated_vis / EFX_TXQ_TYPES;
- efx_ef10_free_vis(efx);
+ efx_mcdi_free_vis(efx);
return -EAGAIN;
}
@@ -1401,7 +1229,7 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
}
/* Shrink the original UC mapping of the memory BAR */
- membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size);
+ membase = ioremap(efx->membase_phys, uc_mem_map_size);
if (!membase) {
netif_err(efx, probe, efx->net_dev,
"could not shrink memory BAR to %x\n",
@@ -1490,7 +1318,7 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
return 0;
}
-static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
+static void efx_ef10_table_reset_mc_allocations(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
#ifdef CONFIG_SFC_SRIOV
@@ -1503,7 +1331,7 @@ static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
nic_data->must_restore_filters = true;
nic_data->must_restore_piobufs = true;
efx_ef10_forget_old_piobufs(efx);
- efx->rss_context.context_id = EFX_EF10_RSS_CONTEXT_INVALID;
+ efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
/* Driver-created vswitches and vports must be re-created */
nic_data->must_probe_vswitching = true;
@@ -1571,7 +1399,7 @@ static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
*/
if ((reset_type == RESET_TYPE_ALL ||
reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc)
- efx_ef10_reset_mc_allocations(efx);
+ efx_ef10_table_reset_mc_allocations(efx);
return rc;
}
@@ -2187,7 +2015,7 @@ static void efx_ef10_mcdi_reboot_detected(struct efx_nic *efx)
struct efx_ef10_nic_data *nic_data = efx->nic_data;
/* All our allocations have been reset */
- efx_ef10_reset_mc_allocations(efx);
+ efx_ef10_table_reset_mc_allocations(efx);
/* The datapath firmware might have been changed */
nic_data->must_check_datapath_caps = true;
@@ -2408,20 +2236,15 @@ static u32 efx_ef10_tso_versions(struct efx_nic *efx)
static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
- EFX_BUF_SIZE));
bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
- size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
struct efx_channel *channel = tx_queue->channel;
struct efx_nic *efx = tx_queue->efx;
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_ef10_nic_data *nic_data;
bool tso_v2 = false;
- size_t inlen;
- dma_addr_t dma_addr;
efx_qword_t *txd;
int rc;
- int i;
- BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
+
+ nic_data = efx->nic_data;
/* Only attempt to enable TX timestamping if we have the license for it,
* otherwise TXQ init will fail
@@ -2448,51 +2271,9 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
channel->channel);
}
- MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
- MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
- MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
- MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
- MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
- MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id);
-
- dma_addr = tx_queue->txd.buf.dma_addr;
-
- netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
- tx_queue->queue, entries, (u64)dma_addr);
-
- for (i = 0; i < entries; ++i) {
- MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
- dma_addr += EFX_BUF_SIZE;
- }
-
- inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
-
- do {
- MCDI_POPULATE_DWORD_4(inbuf, INIT_TXQ_IN_FLAGS,
- /* This flag was removed from mcdi_pcol.h for
- * the non-_EXT version of INIT_TXQ. However,
- * firmware still honours it.
- */
- INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, tso_v2,
- INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
- INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload,
- INIT_TXQ_EXT_IN_FLAG_TIMESTAMP,
- tx_queue->timestamping);
-
- rc = efx_mcdi_rpc_quiet(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
- NULL, 0, NULL);
- if (rc == -ENOSPC && tso_v2) {
- /* Retry without TSOv2 if we're short on contexts. */
- tso_v2 = false;
- netif_warn(efx, probe, efx->net_dev,
- "TSOv2 context not available to segment in hardware. TCP performance may be reduced.\n");
- } else if (rc) {
- efx_mcdi_display_error(efx, MC_CMD_INIT_TXQ,
- MC_CMD_INIT_TXQ_EXT_IN_LEN,
- NULL, 0, rc);
- goto fail;
- }
- } while (rc);
+ rc = efx_mcdi_tx_init(tx_queue, tso_v2);
+ if (rc)
+ goto fail;
/* A previous user of this TX queue might have set us up the
* bomb by writing a descriptor to the TX push collector but
@@ -2530,35 +2311,6 @@ fail:
tx_queue->queue);
}
-static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
- MCDI_DECLARE_BUF_ERR(outbuf);
- struct efx_nic *efx = tx_queue->efx;
- size_t outlen;
- int rc;
-
- MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
- tx_queue->queue);
-
- rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
-
- if (rc && rc != -EALREADY)
- goto fail;
-
- return;
-
-fail:
- efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN,
- outbuf, outlen, rc);
-}
-
-static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue)
-{
- efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
-}
-
/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue)
{
@@ -2637,527 +2389,6 @@ static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
}
}
-#define RSS_MODE_HASH_ADDRS (1 << RSS_MODE_HASH_SRC_ADDR_LBN |\
- 1 << RSS_MODE_HASH_DST_ADDR_LBN)
-#define RSS_MODE_HASH_PORTS (1 << RSS_MODE_HASH_SRC_PORT_LBN |\
- 1 << RSS_MODE_HASH_DST_PORT_LBN)
-#define RSS_CONTEXT_FLAGS_DEFAULT (1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN |\
- 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN |\
- 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN |\
- 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN |\
- (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN |\
- RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN |\
- RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN |\
- (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN |\
- RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN |\
- RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN)
-
-static int efx_ef10_get_rss_flags(struct efx_nic *efx, u32 context, u32 *flags)
-{
- /* Firmware had a bug (sfc bug 61952) where it would not actually
- * fill in the flags field in the response to MC_CMD_RSS_CONTEXT_GET_FLAGS.
- * This meant that it would always contain whatever was previously
- * in the MCDI buffer. Fortunately, all firmware versions with
- * this bug have the same default flags value for a newly-allocated
- * RSS context, and the only time we want to get the flags is just
- * after allocating. Moreover, the response has a 32-bit hole
- * where the context ID would be in the request, so we can use an
- * overlength buffer in the request and pre-fill the flags field
- * with what we believe the default to be. Thus if the firmware
- * has the bug, it will leave our pre-filled value in the flags
- * field of the response, and we will get the right answer.
- *
- * However, this does mean that this function should NOT be used if
- * the RSS context flags might not be their defaults - it is ONLY
- * reliably correct for a newly-allocated RSS context.
- */
- MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN);
- size_t outlen;
- int rc;
-
- /* Check we have a hole for the context ID */
- BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN != MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST);
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID, context);
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS,
- RSS_CONTEXT_FLAGS_DEFAULT);
- rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_FLAGS, inbuf,
- sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
- if (rc == 0) {
- if (outlen < MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN)
- rc = -EIO;
- else
- *flags = MCDI_DWORD(outbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS);
- }
- return rc;
-}
-
-/* Attempt to enable 4-tuple UDP hashing on the specified RSS context.
- * If we fail, we just leave the RSS context at its default hash settings,
- * which is safe but may slightly reduce performance.
- * Defaults are 4-tuple for TCP and 2-tuple for UDP and other-IP, so we
- * just need to set the UDP ports flags (for both IP versions).
- */
-static void efx_ef10_set_rss_flags(struct efx_nic *efx,
- struct efx_rss_context *ctx)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN);
- u32 flags;
-
- BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN != 0);
-
- if (efx_ef10_get_rss_flags(efx, ctx->context_id, &flags) != 0)
- return;
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID,
- ctx->context_id);
- flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN;
- flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN;
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_FLAGS, flags);
- if (!efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_FLAGS, inbuf, sizeof(inbuf),
- NULL, 0, NULL))
- /* Succeeded, so UDP 4-tuple is now enabled */
- ctx->rx_hash_udp_4tuple = true;
-}
-
-static int efx_ef10_alloc_rss_context(struct efx_nic *efx, bool exclusive,
- struct efx_rss_context *ctx,
- unsigned *context_size)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- size_t outlen;
- int rc;
- u32 alloc_type = exclusive ?
- MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE :
- MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED;
- unsigned rss_spread = exclusive ?
- efx->rss_spread :
- min(rounddown_pow_of_two(efx->rss_spread),
- EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE);
-
- if (!exclusive && rss_spread == 1) {
- ctx->context_id = EFX_EF10_RSS_CONTEXT_INVALID;
- if (context_size)
- *context_size = 1;
- return 0;
- }
-
- if (nic_data->datapath_caps &
- 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN)
- return -EOPNOTSUPP;
-
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
- nic_data->vport_id);
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
- if (rc != 0)
- return rc;
-
- if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)
- return -EIO;
-
- ctx->context_id = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
-
- if (context_size)
- *context_size = rss_spread;
-
- if (nic_data->datapath_caps &
- 1 << MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN)
- efx_ef10_set_rss_flags(efx, ctx);
-
- return 0;
-}
-
-static int efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN);
-
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID,
- context);
- return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
-}
-
-static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context,
- const u32 *rx_indir_table, const u8 *key)
-{
- MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
- MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
- int i, rc;
-
- MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
- context);
- BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) !=
- MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN);
-
- /* This iterates over the length of efx->rss_context.rx_indir_table, but
- * copies bytes from rx_indir_table. That's because the latter is a
- * pointer rather than an array, but should have the same length.
- * The efx->rss_context.rx_hash_key loop below is similar.
- */
- for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_indir_table); ++i)
- MCDI_PTR(tablebuf,
- RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
- (u8) rx_indir_table[i];
-
- rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
- sizeof(tablebuf), NULL, 0, NULL);
- if (rc != 0)
- return rc;
-
- MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
- context);
- BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_hash_key) !=
- MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
- for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_hash_key); ++i)
- MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = key[i];
-
- return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf,
- sizeof(keybuf), NULL, 0, NULL);
-}
-
-static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
-{
- int rc;
-
- if (efx->rss_context.context_id != EFX_EF10_RSS_CONTEXT_INVALID) {
- rc = efx_ef10_free_rss_context(efx, efx->rss_context.context_id);
- WARN_ON(rc != 0);
- }
- efx->rss_context.context_id = EFX_EF10_RSS_CONTEXT_INVALID;
-}
-
-static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx,
- unsigned *context_size)
-{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- int rc = efx_ef10_alloc_rss_context(efx, false, &efx->rss_context,
- context_size);
-
- if (rc != 0)
- return rc;
-
- nic_data->rx_rss_context_exclusive = false;
- efx_set_default_rx_indir_table(efx, &efx->rss_context);
- return 0;
-}
-
-static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx,
- const u32 *rx_indir_table,
- const u8 *key)
-{
- u32 old_rx_rss_context = efx->rss_context.context_id;
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- int rc;
-
- if (efx->rss_context.context_id == EFX_EF10_RSS_CONTEXT_INVALID ||
- !nic_data->rx_rss_context_exclusive) {
- rc = efx_ef10_alloc_rss_context(efx, true, &efx->rss_context,
- NULL);
- if (rc == -EOPNOTSUPP)
- return rc;
- else if (rc != 0)
- goto fail1;
- }
-
- rc = efx_ef10_populate_rss_table(efx, efx->rss_context.context_id,
- rx_indir_table, key);
- if (rc != 0)
- goto fail2;
-
- if (efx->rss_context.context_id != old_rx_rss_context &&
- old_rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
- WARN_ON(efx_ef10_free_rss_context(efx, old_rx_rss_context) != 0);
- nic_data->rx_rss_context_exclusive = true;
- if (rx_indir_table != efx->rss_context.rx_indir_table)
- memcpy(efx->rss_context.rx_indir_table, rx_indir_table,
- sizeof(efx->rss_context.rx_indir_table));
- if (key != efx->rss_context.rx_hash_key)
- memcpy(efx->rss_context.rx_hash_key, key,
- efx->type->rx_hash_key_size);
-
- return 0;
-
-fail2:
- if (old_rx_rss_context != efx->rss_context.context_id) {
- WARN_ON(efx_ef10_free_rss_context(efx, efx->rss_context.context_id) != 0);
- efx->rss_context.context_id = old_rx_rss_context;
- }
-fail1:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
-}
-
-static int efx_ef10_rx_push_rss_context_config(struct efx_nic *efx,
- struct efx_rss_context *ctx,
- const u32 *rx_indir_table,
- const u8 *key)
-{
- int rc;
-
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
-
- if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) {
- rc = efx_ef10_alloc_rss_context(efx, true, ctx, NULL);
- if (rc)
- return rc;
- }
-
- if (!rx_indir_table) /* Delete this context */
- return efx_ef10_free_rss_context(efx, ctx->context_id);
-
- rc = efx_ef10_populate_rss_table(efx, ctx->context_id,
- rx_indir_table, key);
- if (rc)
- return rc;
-
- memcpy(ctx->rx_indir_table, rx_indir_table,
- sizeof(efx->rss_context.rx_indir_table));
- memcpy(ctx->rx_hash_key, key, efx->type->rx_hash_key_size);
-
- return 0;
-}
-
-static int efx_ef10_rx_pull_rss_context_config(struct efx_nic *efx,
- struct efx_rss_context *ctx)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN);
- MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN);
- MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN);
- size_t outlen;
- int rc, i;
-
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
-
- BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN !=
- MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN);
-
- if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID)
- return -ENOENT;
-
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID,
- ctx->context_id);
- BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_indir_table) !=
- MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN);
- rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_TABLE, inbuf, sizeof(inbuf),
- tablebuf, sizeof(tablebuf), &outlen);
- if (rc != 0)
- return rc;
-
- if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN))
- return -EIO;
-
- for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
- ctx->rx_indir_table[i] = MCDI_PTR(tablebuf,
- RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE)[i];
-
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID,
- ctx->context_id);
- BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_hash_key) !=
- MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
- rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_KEY, inbuf, sizeof(inbuf),
- keybuf, sizeof(keybuf), &outlen);
- if (rc != 0)
- return rc;
-
- if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN))
- return -EIO;
-
- for (i = 0; i < ARRAY_SIZE(ctx->rx_hash_key); ++i)
- ctx->rx_hash_key[i] = MCDI_PTR(
- keybuf, RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY)[i];
-
- return 0;
-}
-
-static int efx_ef10_rx_pull_rss_config(struct efx_nic *efx)
-{
- int rc;
-
- mutex_lock(&efx->rss_lock);
- rc = efx_ef10_rx_pull_rss_context_config(efx, &efx->rss_context);
- mutex_unlock(&efx->rss_lock);
- return rc;
-}
-
-static void efx_ef10_rx_restore_rss_contexts(struct efx_nic *efx)
-{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- struct efx_rss_context *ctx;
- int rc;
-
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
-
- if (!nic_data->must_restore_rss_contexts)
- return;
-
- list_for_each_entry(ctx, &efx->rss_context.list, list) {
- /* previous NIC RSS context is gone */
- ctx->context_id = EFX_EF10_RSS_CONTEXT_INVALID;
- /* so try to allocate a new one */
- rc = efx_ef10_rx_push_rss_context_config(efx, ctx,
- ctx->rx_indir_table,
- ctx->rx_hash_key);
- if (rc)
- netif_warn(efx, probe, efx->net_dev,
- "failed to restore RSS context %u, rc=%d"
- "; RSS filters may fail to be applied\n",
- ctx->user_id, rc);
- }
- nic_data->must_restore_rss_contexts = false;
-}
-
-static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
- const u32 *rx_indir_table,
- const u8 *key)
-{
- int rc;
-
- if (efx->rss_spread == 1)
- return 0;
-
- if (!key)
- key = efx->rss_context.rx_hash_key;
-
- rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table, key);
-
- if (rc == -ENOBUFS && !user) {
- unsigned context_size;
- bool mismatch = false;
- size_t i;
-
- for (i = 0;
- i < ARRAY_SIZE(efx->rss_context.rx_indir_table) && !mismatch;
- i++)
- mismatch = rx_indir_table[i] !=
- ethtool_rxfh_indir_default(i, efx->rss_spread);
-
- rc = efx_ef10_rx_push_shared_rss_config(efx, &context_size);
- if (rc == 0) {
- if (context_size != efx->rss_spread)
- netif_warn(efx, probe, efx->net_dev,
- "Could not allocate an exclusive RSS"
- " context; allocated a shared one of"
- " different size."
- " Wanted %u, got %u.\n",
- efx->rss_spread, context_size);
- else if (mismatch)
- netif_warn(efx, probe, efx->net_dev,
- "Could not allocate an exclusive RSS"
- " context; allocated a shared one but"
- " could not apply custom"
- " indirection.\n");
- else
- netif_info(efx, probe, efx->net_dev,
- "Could not allocate an exclusive RSS"
- " context; allocated a shared one.\n");
- }
- }
- return rc;
-}
-
-static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
- const u32 *rx_indir_table
- __attribute__ ((unused)),
- const u8 *key
- __attribute__ ((unused)))
-{
- if (user)
- return -EOPNOTSUPP;
- if (efx->rss_context.context_id != EFX_EF10_RSS_CONTEXT_INVALID)
- return 0;
- return efx_ef10_rx_push_shared_rss_config(efx, NULL);
-}
-
-static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
-{
- return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
- (rx_queue->ptr_mask + 1) *
- sizeof(efx_qword_t),
- GFP_KERNEL);
-}
-
-static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
-{
- MCDI_DECLARE_BUF(inbuf,
- MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
- EFX_BUF_SIZE));
- struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
- size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
- struct efx_nic *efx = rx_queue->efx;
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- size_t inlen;
- dma_addr_t dma_addr;
- int rc;
- int i;
- BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0);
-
- rx_queue->scatter_n = 0;
- rx_queue->scatter_len = 0;
-
- MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
- MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
- MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
- MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
- efx_rx_queue_index(rx_queue));
- MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS,
- INIT_RXQ_IN_FLAG_PREFIX, 1,
- INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
- MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
- MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id);
-
- dma_addr = rx_queue->rxd.buf.dma_addr;
-
- netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
- efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
-
- for (i = 0; i < entries; ++i) {
- MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
- dma_addr += EFX_BUF_SIZE;
- }
-
- inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
- NULL, 0, NULL);
- if (rc)
- netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
- efx_rx_queue_index(rx_queue));
-}
-
-static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
- MCDI_DECLARE_BUF_ERR(outbuf);
- struct efx_nic *efx = rx_queue->efx;
- size_t outlen;
- int rc;
-
- MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
- efx_rx_queue_index(rx_queue));
-
- rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
-
- if (rc && rc != -EALREADY)
- goto fail;
-
- return;
-
-fail:
- efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN,
- outbuf, outlen, rc);
-}
-
-static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue)
-{
- efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
-}
-
/* This creates an entry in the RX descriptor queue */
static inline void
efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
@@ -3229,106 +2460,20 @@ efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie,
/* nothing to do */
}
-static int efx_ef10_ev_probe(struct efx_channel *channel)
-{
- return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
- (channel->eventq_mask + 1) *
- sizeof(efx_qword_t),
- GFP_KERNEL);
-}
-
-static void efx_ef10_ev_fini(struct efx_channel *channel)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
- MCDI_DECLARE_BUF_ERR(outbuf);
- struct efx_nic *efx = channel->efx;
- size_t outlen;
- int rc;
-
- MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
-
- rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
-
- if (rc && rc != -EALREADY)
- goto fail;
-
- return;
-
-fail:
- efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
- outbuf, outlen, rc);
-}
-
static int efx_ef10_ev_init(struct efx_channel *channel)
{
- MCDI_DECLARE_BUF(inbuf,
- MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
- EFX_BUF_SIZE));
- MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN);
- size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
struct efx_nic *efx = channel->efx;
struct efx_ef10_nic_data *nic_data;
- size_t inlen, outlen;
unsigned int enabled, implemented;
- dma_addr_t dma_addr;
+ bool use_v2, cut_thru;
int rc;
- int i;
nic_data = efx->nic_data;
-
- /* Fill event queue with all ones (i.e. empty events) */
- memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
-
- MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
- MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
- /* INIT_EVQ expects index in vector table, not absolute */
- MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
- MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
- MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
- MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
- MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
- MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
- MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
- MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
-
- if (nic_data->datapath_caps2 &
- 1 << MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN) {
- /* Use the new generic approach to specifying event queue
- * configuration, requesting lower latency or higher throughput.
- * The options that actually get used appear in the output.
- */
- MCDI_POPULATE_DWORD_2(inbuf, INIT_EVQ_V2_IN_FLAGS,
- INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1,
- INIT_EVQ_V2_IN_FLAG_TYPE,
- MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO);
- } else {
- bool cut_thru = !(nic_data->datapath_caps &
- 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
-
- MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
- INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
- INIT_EVQ_IN_FLAG_RX_MERGE, 1,
- INIT_EVQ_IN_FLAG_TX_MERGE, 1,
- INIT_EVQ_IN_FLAG_CUT_THRU, cut_thru);
- }
-
- dma_addr = channel->eventq.buf.dma_addr;
- for (i = 0; i < entries; ++i) {
- MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
- dma_addr += EFX_BUF_SIZE;
- }
-
- inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
- outbuf, sizeof(outbuf), &outlen);
-
- if (outlen >= MC_CMD_INIT_EVQ_V2_OUT_LEN)
- netif_dbg(efx, drv, efx->net_dev,
- "Channel %d using event queue flags %08x\n",
- channel->channel,
- MCDI_DWORD(outbuf, INIT_EVQ_V2_OUT_FLAGS));
+ use_v2 = nic_data->datapath_caps2 &
+ 1 << MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN;
+ cut_thru = !(nic_data->datapath_caps &
+ 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
+ rc = efx_mcdi_ev_init(channel, cut_thru, use_v2);
/* IRQ return is ignored */
if (channel->channel || rc)
@@ -3386,15 +2531,10 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
return 0;
fail:
- efx_ef10_ev_fini(channel);
+ efx_mcdi_ev_fini(channel);
return rc;
}
-static void efx_ef10_ev_remove(struct efx_channel *channel)
-{
- efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
-}
-
static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
unsigned int rx_queue_label)
{
@@ -3976,9 +3116,9 @@ static int efx_ef10_fini_dmaq(struct efx_nic *efx)
if (efx->state != STATE_RECOVERY) {
efx_for_each_channel(channel, efx) {
efx_for_each_channel_rx_queue(rx_queue, channel)
- efx_ef10_rx_fini(rx_queue);
+ efx_mcdi_rx_fini(rx_queue);
efx_for_each_channel_tx_queue(tx_queue, channel)
- efx_ef10_tx_fini(tx_queue);
+ efx_mcdi_tx_fini(tx_queue);
}
wait_event_timeout(efx->flush_wq,
@@ -4000,1538 +3140,6 @@ static void efx_ef10_prepare_flr(struct efx_nic *efx)
atomic_set(&efx->active_queues, 0);
}
-/* Decide whether a filter should be exclusive or else should allow
- * delivery to additional recipients. Currently we decide that
- * filters for specific local unicast MAC and IP addresses are
- * exclusive.
- */
-static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec)
-{
- if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC &&
- !is_multicast_ether_addr(spec->loc_mac))
- return true;
-
- if ((spec->match_flags &
- (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
- (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
- if (spec->ether_type == htons(ETH_P_IP) &&
- !ipv4_is_multicast(spec->loc_host[0]))
- return true;
- if (spec->ether_type == htons(ETH_P_IPV6) &&
- ((const u8 *)spec->loc_host)[0] != 0xff)
- return true;
- }
-
- return false;
-}
-
-static struct efx_filter_spec *
-efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table,
- unsigned int filter_idx)
-{
- return (struct efx_filter_spec *)(table->entry[filter_idx].spec &
- ~EFX_EF10_FILTER_FLAGS);
-}
-
-static unsigned int
-efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table,
- unsigned int filter_idx)
-{
- return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS;
-}
-
-static void
-efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table,
- unsigned int filter_idx,
- const struct efx_filter_spec *spec,
- unsigned int flags)
-{
- table->entry[filter_idx].spec = (unsigned long)spec | flags;
-}
-
-static void
-efx_ef10_filter_push_prep_set_match_fields(struct efx_nic *efx,
- const struct efx_filter_spec *spec,
- efx_dword_t *inbuf)
-{
- enum efx_encap_type encap_type = efx_filter_get_encap_type(spec);
- u32 match_fields = 0, uc_match, mc_match;
-
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
- efx_ef10_filter_is_exclusive(spec) ?
- MC_CMD_FILTER_OP_IN_OP_INSERT :
- MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
-
- /* Convert match flags and values. Unlike almost
- * everything else in MCDI, these fields are in
- * network byte order.
- */
-#define COPY_VALUE(value, mcdi_field) \
- do { \
- match_fields |= \
- 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
- mcdi_field ## _LBN; \
- BUILD_BUG_ON( \
- MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
- sizeof(value)); \
- memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \
- &value, sizeof(value)); \
- } while (0)
-#define COPY_FIELD(gen_flag, gen_field, mcdi_field) \
- if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \
- COPY_VALUE(spec->gen_field, mcdi_field); \
- }
- /* Handle encap filters first. They will always be mismatch
- * (unknown UC or MC) filters
- */
- if (encap_type) {
- /* ether_type and outer_ip_proto need to be variables
- * because COPY_VALUE wants to memcpy them
- */
- __be16 ether_type =
- htons(encap_type & EFX_ENCAP_FLAG_IPV6 ?
- ETH_P_IPV6 : ETH_P_IP);
- u8 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE;
- u8 outer_ip_proto;
-
- switch (encap_type & EFX_ENCAP_TYPES_MASK) {
- case EFX_ENCAP_TYPE_VXLAN:
- vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN;
- /* fallthrough */
- case EFX_ENCAP_TYPE_GENEVE:
- COPY_VALUE(ether_type, ETHER_TYPE);
- outer_ip_proto = IPPROTO_UDP;
- COPY_VALUE(outer_ip_proto, IP_PROTO);
- /* We always need to set the type field, even
- * though we're not matching on the TNI.
- */
- MCDI_POPULATE_DWORD_1(inbuf,
- FILTER_OP_EXT_IN_VNI_OR_VSID,
- FILTER_OP_EXT_IN_VNI_TYPE,
- vni_type);
- break;
- case EFX_ENCAP_TYPE_NVGRE:
- COPY_VALUE(ether_type, ETHER_TYPE);
- outer_ip_proto = IPPROTO_GRE;
- COPY_VALUE(outer_ip_proto, IP_PROTO);
- break;
- default:
- WARN_ON(1);
- }
-
- uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN;
- mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN;
- } else {
- uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
- mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN;
- }
-
- if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
- match_fields |=
- is_multicast_ether_addr(spec->loc_mac) ?
- 1 << mc_match :
- 1 << uc_match;
- COPY_FIELD(REM_HOST, rem_host, SRC_IP);
- COPY_FIELD(LOC_HOST, loc_host, DST_IP);
- COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
- COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
- COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
- COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
- COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
- COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
- COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
- COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
-#undef COPY_FIELD
-#undef COPY_VALUE
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
- match_fields);
-}
-
-static void efx_ef10_filter_push_prep(struct efx_nic *efx,
- const struct efx_filter_spec *spec,
- efx_dword_t *inbuf, u64 handle,
- struct efx_rss_context *ctx,
- bool replacing)
-{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- u32 flags = spec->flags;
-
- memset(inbuf, 0, MC_CMD_FILTER_OP_EXT_IN_LEN);
-
- /* If RSS filter, caller better have given us an RSS context */
- if (flags & EFX_FILTER_FLAG_RX_RSS) {
- /* We don't have the ability to return an error, so we'll just
- * log a warning and disable RSS for the filter.
- */
- if (WARN_ON_ONCE(!ctx))
- flags &= ~EFX_FILTER_FLAG_RX_RSS;
- else if (WARN_ON_ONCE(ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID))
- flags &= ~EFX_FILTER_FLAG_RX_RSS;
- }
-
- if (replacing) {
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
- MC_CMD_FILTER_OP_IN_OP_REPLACE);
- MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle);
- } else {
- efx_ef10_filter_push_prep_set_match_fields(efx, spec, inbuf);
- }
-
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id);
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
- spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
- MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
- MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0);
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
- MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE,
- spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
- 0 : spec->dmaq_id);
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
- (flags & EFX_FILTER_FLAG_RX_RSS) ?
- MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
- MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
- if (flags & EFX_FILTER_FLAG_RX_RSS)
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT, ctx->context_id);
-}
-
-static int efx_ef10_filter_push(struct efx_nic *efx,
- const struct efx_filter_spec *spec, u64 *handle,
- struct efx_rss_context *ctx, bool replacing)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN);
- size_t outlen;
- int rc;
-
- efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, ctx, replacing);
- rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
- if (rc && spec->priority != EFX_FILTER_PRI_HINT)
- efx_mcdi_display_error(efx, MC_CMD_FILTER_OP, sizeof(inbuf),
- outbuf, outlen, rc);
- if (rc == 0)
- *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
- if (rc == -ENOSPC)
- rc = -EBUSY; /* to match efx_farch_filter_insert() */
- return rc;
-}
-
-static u32 efx_ef10_filter_mcdi_flags_from_spec(const struct efx_filter_spec *spec)
-{
- enum efx_encap_type encap_type = efx_filter_get_encap_type(spec);
- unsigned int match_flags = spec->match_flags;
- unsigned int uc_match, mc_match;
- u32 mcdi_flags = 0;
-
-#define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field, encap) { \
- unsigned int old_match_flags = match_flags; \
- match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag; \
- if (match_flags != old_match_flags) \
- mcdi_flags |= \
- (1 << ((encap) ? \
- MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ ## \
- mcdi_field ## _LBN : \
- MC_CMD_FILTER_OP_EXT_IN_MATCH_ ##\
- mcdi_field ## _LBN)); \
- }
- /* inner or outer based on encap type */
- MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP, encap_type);
- MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP, encap_type);
- MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC, encap_type);
- MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT, encap_type);
- MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC, encap_type);
- MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT, encap_type);
- MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE, encap_type);
- MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO, encap_type);
- /* always outer */
- MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN, false);
- MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN, false);
-#undef MAP_FILTER_TO_MCDI_FLAG
-
- /* special handling for encap type, and mismatch */
- if (encap_type) {
- match_flags &= ~EFX_FILTER_MATCH_ENCAP_TYPE;
- mcdi_flags |=
- (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN);
- mcdi_flags |= (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN);
-
- uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN;
- mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN;
- } else {
- uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
- mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN;
- }
-
- if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) {
- match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG;
- mcdi_flags |=
- is_multicast_ether_addr(spec->loc_mac) ?
- 1 << mc_match :
- 1 << uc_match;
- }
-
- /* Did we map them all? */
- WARN_ON_ONCE(match_flags);
-
- return mcdi_flags;
-}
-
-static int efx_ef10_filter_pri(struct efx_ef10_filter_table *table,
- const struct efx_filter_spec *spec)
-{
- u32 mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec);
- unsigned int match_pri;
-
- for (match_pri = 0;
- match_pri < table->rx_match_count;
- match_pri++)
- if (table->rx_match_mcdi_flags[match_pri] == mcdi_flags)
- return match_pri;
-
- return -EPROTONOSUPPORT;
-}
-
-static s32 efx_ef10_filter_insert_locked(struct efx_nic *efx,
- struct efx_filter_spec *spec,
- bool replace_equal)
-{
- DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- struct efx_ef10_filter_table *table;
- struct efx_filter_spec *saved_spec;
- struct efx_rss_context *ctx = NULL;
- unsigned int match_pri, hash;
- unsigned int priv_flags;
- bool rss_locked = false;
- bool replacing = false;
- unsigned int depth, i;
- int ins_index = -1;
- DEFINE_WAIT(wait);
- bool is_mc_recip;
- s32 rc;
-
- WARN_ON(!rwsem_is_locked(&efx->filter_sem));
- table = efx->filter_state;
- down_write(&table->lock);
-
- /* For now, only support RX filters */
- if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) !=
- EFX_FILTER_FLAG_RX) {
- rc = -EINVAL;
- goto out_unlock;
- }
-
- rc = efx_ef10_filter_pri(table, spec);
- if (rc < 0)
- goto out_unlock;
- match_pri = rc;
-
- hash = efx_filter_spec_hash(spec);
- is_mc_recip = efx_filter_is_mc_recipient(spec);
- if (is_mc_recip)
- bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
-
- if (spec->flags & EFX_FILTER_FLAG_RX_RSS) {
- mutex_lock(&efx->rss_lock);
- rss_locked = true;
- if (spec->rss_context)
- ctx = efx_find_rss_context_entry(efx, spec->rss_context);
- else
- ctx = &efx->rss_context;
- if (!ctx) {
- rc = -ENOENT;
- goto out_unlock;
- }
- if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) {
- rc = -EOPNOTSUPP;
- goto out_unlock;
- }
- }
-
- /* Find any existing filters with the same match tuple or
- * else a free slot to insert at.
- */
- for (depth = 1; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
- i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
- saved_spec = efx_ef10_filter_entry_spec(table, i);
-
- if (!saved_spec) {
- if (ins_index < 0)
- ins_index = i;
- } else if (efx_filter_spec_equal(spec, saved_spec)) {
- if (spec->priority < saved_spec->priority &&
- spec->priority != EFX_FILTER_PRI_AUTO) {
- rc = -EPERM;
- goto out_unlock;
- }
- if (!is_mc_recip) {
- /* This is the only one */
- if (spec->priority ==
- saved_spec->priority &&
- !replace_equal) {
- rc = -EEXIST;
- goto out_unlock;
- }
- ins_index = i;
- break;
- } else if (spec->priority >
- saved_spec->priority ||
- (spec->priority ==
- saved_spec->priority &&
- replace_equal)) {
- if (ins_index < 0)
- ins_index = i;
- else
- __set_bit(depth, mc_rem_map);
- }
- }
- }
-
- /* Once we reach the maximum search depth, use the first suitable
- * slot, or return -EBUSY if there was none
- */
- if (ins_index < 0) {
- rc = -EBUSY;
- goto out_unlock;
- }
-
- /* Create a software table entry if necessary. */
- saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
- if (saved_spec) {
- if (spec->priority == EFX_FILTER_PRI_AUTO &&
- saved_spec->priority >= EFX_FILTER_PRI_AUTO) {
- /* Just make sure it won't be removed */
- if (saved_spec->priority > EFX_FILTER_PRI_AUTO)
- saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
- table->entry[ins_index].spec &=
- ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
- rc = ins_index;
- goto out_unlock;
- }
- replacing = true;
- priv_flags = efx_ef10_filter_entry_flags(table, ins_index);
- } else {
- saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
- if (!saved_spec) {
- rc = -ENOMEM;
- goto out_unlock;
- }
- *saved_spec = *spec;
- priv_flags = 0;
- }
- efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
-
- /* Actually insert the filter on the HW */
- rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle,
- ctx, replacing);
-
- if (rc == -EINVAL && nic_data->must_realloc_vis)
- /* The MC rebooted under us, causing it to reject our filter
- * insertion as pointing to an invalid VI (spec->dmaq_id).
- */
- rc = -EAGAIN;
-
- /* Finalise the software table entry */
- if (rc == 0) {
- if (replacing) {
- /* Update the fields that may differ */
- if (saved_spec->priority == EFX_FILTER_PRI_AUTO)
- saved_spec->flags |=
- EFX_FILTER_FLAG_RX_OVER_AUTO;
- saved_spec->priority = spec->priority;
- saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO;
- saved_spec->flags |= spec->flags;
- saved_spec->rss_context = spec->rss_context;
- saved_spec->dmaq_id = spec->dmaq_id;
- }
- } else if (!replacing) {
- kfree(saved_spec);
- saved_spec = NULL;
- } else {
- /* We failed to replace, so the old filter is still present.
- * Roll back the software table to reflect this. In fact the
- * efx_ef10_filter_set_entry() call below will do the right
- * thing, so nothing extra is needed here.
- */
- }
- efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
-
- /* Remove and finalise entries for lower-priority multicast
- * recipients
- */
- if (is_mc_recip) {
- MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
- unsigned int depth, i;
-
- memset(inbuf, 0, sizeof(inbuf));
-
- for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
- if (!test_bit(depth, mc_rem_map))
- continue;
-
- i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
- saved_spec = efx_ef10_filter_entry_spec(table, i);
- priv_flags = efx_ef10_filter_entry_flags(table, i);
-
- if (rc == 0) {
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
- MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
- MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
- table->entry[i].handle);
- rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
- inbuf, sizeof(inbuf),
- NULL, 0, NULL);
- }
-
- if (rc == 0) {
- kfree(saved_spec);
- saved_spec = NULL;
- priv_flags = 0;
- }
- efx_ef10_filter_set_entry(table, i, saved_spec,
- priv_flags);
- }
- }
-
- /* If successful, return the inserted filter ID */
- if (rc == 0)
- rc = efx_ef10_make_filter_id(match_pri, ins_index);
-
-out_unlock:
- if (rss_locked)
- mutex_unlock(&efx->rss_lock);
- up_write(&table->lock);
- return rc;
-}
-
-static s32 efx_ef10_filter_insert(struct efx_nic *efx,
- struct efx_filter_spec *spec,
- bool replace_equal)
-{
- s32 ret;
-
- down_read(&efx->filter_sem);
- ret = efx_ef10_filter_insert_locked(efx, spec, replace_equal);
- up_read(&efx->filter_sem);
-
- return ret;
-}
-
-static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
-{
- /* no need to do anything here on EF10 */
-}
-
-/* Remove a filter.
- * If !by_index, remove by ID
- * If by_index, remove by index
- * Filter ID may come from userland and must be range-checked.
- * Caller must hold efx->filter_sem for read, and efx->filter_state->lock
- * for write.
- */
-static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
- unsigned int priority_mask,
- u32 filter_id, bool by_index)
-{
- unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id);
- struct efx_ef10_filter_table *table = efx->filter_state;
- MCDI_DECLARE_BUF(inbuf,
- MC_CMD_FILTER_OP_IN_HANDLE_OFST +
- MC_CMD_FILTER_OP_IN_HANDLE_LEN);
- struct efx_filter_spec *spec;
- DEFINE_WAIT(wait);
- int rc;
-
- spec = efx_ef10_filter_entry_spec(table, filter_idx);
- if (!spec ||
- (!by_index &&
- efx_ef10_filter_pri(table, spec) !=
- efx_ef10_filter_get_unsafe_pri(filter_id)))
- return -ENOENT;
-
- if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO &&
- priority_mask == (1U << EFX_FILTER_PRI_AUTO)) {
- /* Just remove flags */
- spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO;
- table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
- return 0;
- }
-
- if (!(priority_mask & (1U << spec->priority)))
- return -ENOENT;
-
- if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
- /* Reset to an automatic filter */
-
- struct efx_filter_spec new_spec = *spec;
-
- new_spec.priority = EFX_FILTER_PRI_AUTO;
- new_spec.flags = (EFX_FILTER_FLAG_RX |
- (efx_rss_active(&efx->rss_context) ?
- EFX_FILTER_FLAG_RX_RSS : 0));
- new_spec.dmaq_id = 0;
- new_spec.rss_context = 0;
- rc = efx_ef10_filter_push(efx, &new_spec,
- &table->entry[filter_idx].handle,
- &efx->rss_context,
- true);
-
- if (rc == 0)
- *spec = new_spec;
- } else {
- /* Really remove the filter */
-
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
- efx_ef10_filter_is_exclusive(spec) ?
- MC_CMD_FILTER_OP_IN_OP_REMOVE :
- MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
- MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
- table->entry[filter_idx].handle);
- rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP,
- inbuf, sizeof(inbuf), NULL, 0, NULL);
-
- if ((rc == 0) || (rc == -ENOENT)) {
- /* Filter removed OK or didn't actually exist */
- kfree(spec);
- efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
- } else {
- efx_mcdi_display_error(efx, MC_CMD_FILTER_OP,
- MC_CMD_FILTER_OP_EXT_IN_LEN,
- NULL, 0, rc);
- }
- }
-
- return rc;
-}
-
-static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id)
-{
- struct efx_ef10_filter_table *table;
- int rc;
-
- down_read(&efx->filter_sem);
- table = efx->filter_state;
- down_write(&table->lock);
- rc = efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id,
- false);
- up_write(&table->lock);
- up_read(&efx->filter_sem);
- return rc;
-}
-
-/* Caller must hold efx->filter_sem for read */
-static void efx_ef10_filter_remove_unsafe(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
-
- if (filter_id == EFX_EF10_FILTER_ID_INVALID)
- return;
-
- down_write(&table->lock);
- efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id,
- true);
- up_write(&table->lock);
-}
-
-static int efx_ef10_filter_get_safe(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id, struct efx_filter_spec *spec)
-{
- unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id);
- const struct efx_filter_spec *saved_spec;
- struct efx_ef10_filter_table *table;
- int rc;
-
- down_read(&efx->filter_sem);
- table = efx->filter_state;
- down_read(&table->lock);
- saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
- if (saved_spec && saved_spec->priority == priority &&
- efx_ef10_filter_pri(table, saved_spec) ==
- efx_ef10_filter_get_unsafe_pri(filter_id)) {
- *spec = *saved_spec;
- rc = 0;
- } else {
- rc = -ENOENT;
- }
- up_read(&table->lock);
- up_read(&efx->filter_sem);
- return rc;
-}
-
-static int efx_ef10_filter_clear_rx(struct efx_nic *efx,
- enum efx_filter_priority priority)
-{
- struct efx_ef10_filter_table *table;
- unsigned int priority_mask;
- unsigned int i;
- int rc;
-
- priority_mask = (((1U << (priority + 1)) - 1) &
- ~(1U << EFX_FILTER_PRI_AUTO));
-
- down_read(&efx->filter_sem);
- table = efx->filter_state;
- down_write(&table->lock);
- for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
- rc = efx_ef10_filter_remove_internal(efx, priority_mask,
- i, true);
- if (rc && rc != -ENOENT)
- break;
- rc = 0;
- }
-
- up_write(&table->lock);
- up_read(&efx->filter_sem);
- return rc;
-}
-
-static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
- enum efx_filter_priority priority)
-{
- struct efx_ef10_filter_table *table;
- unsigned int filter_idx;
- s32 count = 0;
-
- down_read(&efx->filter_sem);
- table = efx->filter_state;
- down_read(&table->lock);
- for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
- if (table->entry[filter_idx].spec &&
- efx_ef10_filter_entry_spec(table, filter_idx)->priority ==
- priority)
- ++count;
- }
- up_read(&table->lock);
- up_read(&efx->filter_sem);
- return count;
-}
-
-static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
-
- return table->rx_match_count * HUNT_FILTER_TBL_ROWS * 2;
-}
-
-static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 *buf, u32 size)
-{
- struct efx_ef10_filter_table *table;
- struct efx_filter_spec *spec;
- unsigned int filter_idx;
- s32 count = 0;
-
- down_read(&efx->filter_sem);
- table = efx->filter_state;
- down_read(&table->lock);
-
- for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
- spec = efx_ef10_filter_entry_spec(table, filter_idx);
- if (spec && spec->priority == priority) {
- if (count == size) {
- count = -EMSGSIZE;
- break;
- }
- buf[count++] =
- efx_ef10_make_filter_id(
- efx_ef10_filter_pri(table, spec),
- filter_idx);
- }
- }
- up_read(&table->lock);
- up_read(&efx->filter_sem);
- return count;
-}
-
-#ifdef CONFIG_RFS_ACCEL
-
-static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
- unsigned int filter_idx)
-{
- struct efx_filter_spec *spec, saved_spec;
- struct efx_ef10_filter_table *table;
- struct efx_arfs_rule *rule = NULL;
- bool ret = true, force = false;
- u16 arfs_id;
-
- down_read(&efx->filter_sem);
- table = efx->filter_state;
- down_write(&table->lock);
- spec = efx_ef10_filter_entry_spec(table, filter_idx);
-
- if (!spec || spec->priority != EFX_FILTER_PRI_HINT)
- goto out_unlock;
-
- spin_lock_bh(&efx->rps_hash_lock);
- if (!efx->rps_hash_table) {
- /* In the absence of the table, we always return 0 to ARFS. */
- arfs_id = 0;
- } else {
- rule = efx_rps_hash_find(efx, spec);
- if (!rule)
- /* ARFS table doesn't know of this filter, so remove it */
- goto expire;
- arfs_id = rule->arfs_id;
- ret = efx_rps_check_rule(rule, filter_idx, &force);
- if (force)
- goto expire;
- if (!ret) {
- spin_unlock_bh(&efx->rps_hash_lock);
- goto out_unlock;
- }
- }
- if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, flow_id, arfs_id))
- ret = false;
- else if (rule)
- rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
-expire:
- saved_spec = *spec; /* remove operation will kfree spec */
- spin_unlock_bh(&efx->rps_hash_lock);
- /* At this point (since we dropped the lock), another thread might queue
- * up a fresh insertion request (but the actual insertion will be held
- * up by our possession of the filter table lock). In that case, it
- * will set rule->filter_id to EFX_ARFS_FILTER_ID_PENDING, meaning that
- * the rule is not removed by efx_rps_hash_del() below.
- */
- if (ret)
- ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority,
- filter_idx, true) == 0;
- /* While we can't safely dereference rule (we dropped the lock), we can
- * still test it for NULL.
- */
- if (ret && rule) {
- /* Expiring, so remove entry from ARFS table */
- spin_lock_bh(&efx->rps_hash_lock);
- efx_rps_hash_del(efx, &saved_spec);
- spin_unlock_bh(&efx->rps_hash_lock);
- }
-out_unlock:
- up_write(&table->lock);
- up_read(&efx->filter_sem);
- return ret;
-}
-
-#endif /* CONFIG_RFS_ACCEL */
-
-static int efx_ef10_filter_match_flags_from_mcdi(bool encap, u32 mcdi_flags)
-{
- int match_flags = 0;
-
-#define MAP_FLAG(gen_flag, mcdi_field) do { \
- u32 old_mcdi_flags = mcdi_flags; \
- mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ ## \
- mcdi_field ## _LBN); \
- if (mcdi_flags != old_mcdi_flags) \
- match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \
- } while (0)
-
- if (encap) {
- /* encap filters must specify encap type */
- match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
- /* and imply ethertype and ip proto */
- mcdi_flags &=
- ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN);
- mcdi_flags &=
- ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN);
- /* VLAN tags refer to the outer packet */
- MAP_FLAG(INNER_VID, INNER_VLAN);
- MAP_FLAG(OUTER_VID, OUTER_VLAN);
- /* everything else refers to the inner packet */
- MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_UCAST_DST);
- MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_MCAST_DST);
- MAP_FLAG(REM_HOST, IFRM_SRC_IP);
- MAP_FLAG(LOC_HOST, IFRM_DST_IP);
- MAP_FLAG(REM_MAC, IFRM_SRC_MAC);
- MAP_FLAG(REM_PORT, IFRM_SRC_PORT);
- MAP_FLAG(LOC_MAC, IFRM_DST_MAC);
- MAP_FLAG(LOC_PORT, IFRM_DST_PORT);
- MAP_FLAG(ETHER_TYPE, IFRM_ETHER_TYPE);
- MAP_FLAG(IP_PROTO, IFRM_IP_PROTO);
- } else {
- MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
- MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
- MAP_FLAG(REM_HOST, SRC_IP);
- MAP_FLAG(LOC_HOST, DST_IP);
- MAP_FLAG(REM_MAC, SRC_MAC);
- MAP_FLAG(REM_PORT, SRC_PORT);
- MAP_FLAG(LOC_MAC, DST_MAC);
- MAP_FLAG(LOC_PORT, DST_PORT);
- MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
- MAP_FLAG(INNER_VID, INNER_VLAN);
- MAP_FLAG(OUTER_VID, OUTER_VLAN);
- MAP_FLAG(IP_PROTO, IP_PROTO);
- }
-#undef MAP_FLAG
-
- /* Did we map them all? */
- if (mcdi_flags)
- return -EINVAL;
-
- return match_flags;
-}
-
-static void efx_ef10_filter_cleanup_vlans(struct efx_nic *efx)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- struct efx_ef10_filter_vlan *vlan, *next_vlan;
-
- /* See comment in efx_ef10_filter_table_remove() */
- if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
- return;
-
- if (!table)
- return;
-
- list_for_each_entry_safe(vlan, next_vlan, &table->vlan_list, list)
- efx_ef10_filter_del_vlan_internal(efx, vlan);
-}
-
-static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table,
- bool encap,
- enum efx_filter_match_flags match_flags)
-{
- unsigned int match_pri;
- int mf;
-
- for (match_pri = 0;
- match_pri < table->rx_match_count;
- match_pri++) {
- mf = efx_ef10_filter_match_flags_from_mcdi(encap,
- table->rx_match_mcdi_flags[match_pri]);
- if (mf == match_flags)
- return true;
- }
-
- return false;
-}
-
-static int
-efx_ef10_filter_table_probe_matches(struct efx_nic *efx,
- struct efx_ef10_filter_table *table,
- bool encap)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
- unsigned int pd_match_pri, pd_match_count;
- size_t outlen;
- int rc;
-
- /* Find out which RX filter types are supported, and their priorities */
- MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP,
- encap ?
- MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES :
- MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO,
- inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
- &outlen);
- if (rc)
- return rc;
-
- pd_match_count = MCDI_VAR_ARRAY_LEN(
- outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES);
-
- for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) {
- u32 mcdi_flags =
- MCDI_ARRAY_DWORD(
- outbuf,
- GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES,
- pd_match_pri);
- rc = efx_ef10_filter_match_flags_from_mcdi(encap, mcdi_flags);
- if (rc < 0) {
- netif_dbg(efx, probe, efx->net_dev,
- "%s: fw flags %#x pri %u not supported in driver\n",
- __func__, mcdi_flags, pd_match_pri);
- } else {
- netif_dbg(efx, probe, efx->net_dev,
- "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
- __func__, mcdi_flags, pd_match_pri,
- rc, table->rx_match_count);
- table->rx_match_mcdi_flags[table->rx_match_count] = mcdi_flags;
- table->rx_match_count++;
- }
- }
-
- return 0;
-}
-
-static int efx_ef10_filter_table_probe(struct efx_nic *efx)
-{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- struct net_device *net_dev = efx->net_dev;
- struct efx_ef10_filter_table *table;
- struct efx_ef10_vlan *vlan;
- int rc;
-
- if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
- return -EINVAL;
-
- if (efx->filter_state) /* already probed */
- return 0;
-
- table = kzalloc(sizeof(*table), GFP_KERNEL);
- if (!table)
- return -ENOMEM;
-
- table->rx_match_count = 0;
- rc = efx_ef10_filter_table_probe_matches(efx, table, false);
- if (rc)
- goto fail;
- if (nic_data->datapath_caps &
- (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
- rc = efx_ef10_filter_table_probe_matches(efx, table, true);
- if (rc)
- goto fail;
- if ((efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) &&
- !(efx_ef10_filter_match_supported(table, false,
- (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC)) &&
- efx_ef10_filter_match_supported(table, false,
- (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC_IG)))) {
- netif_info(efx, probe, net_dev,
- "VLAN filters are not supported in this firmware variant\n");
- net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
- efx->fixed_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
- net_dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
- }
-
- table->entry = vzalloc(array_size(HUNT_FILTER_TBL_ROWS,
- sizeof(*table->entry)));
- if (!table->entry) {
- rc = -ENOMEM;
- goto fail;
- }
-
- table->mc_promisc_last = false;
- table->vlan_filter =
- !!(efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
- INIT_LIST_HEAD(&table->vlan_list);
- init_rwsem(&table->lock);
-
- efx->filter_state = table;
-
- list_for_each_entry(vlan, &nic_data->vlan_list, list) {
- rc = efx_ef10_filter_add_vlan(efx, vlan->vid);
- if (rc)
- goto fail_add_vlan;
- }
-
- return 0;
-
-fail_add_vlan:
- efx_ef10_filter_cleanup_vlans(efx);
- efx->filter_state = NULL;
-fail:
- kfree(table);
- return rc;
-}
-
-/* Caller must hold efx->filter_sem for read if race against
- * efx_ef10_filter_table_remove() is possible
- */
-static void efx_ef10_filter_table_restore(struct efx_nic *efx)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- unsigned int invalid_filters = 0, failed = 0;
- struct efx_ef10_filter_vlan *vlan;
- struct efx_filter_spec *spec;
- struct efx_rss_context *ctx;
- unsigned int filter_idx;
- u32 mcdi_flags;
- int match_pri;
- int rc, i;
-
- WARN_ON(!rwsem_is_locked(&efx->filter_sem));
-
- if (!nic_data->must_restore_filters)
- return;
-
- if (!table)
- return;
-
- down_write(&table->lock);
- mutex_lock(&efx->rss_lock);
-
- for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
- spec = efx_ef10_filter_entry_spec(table, filter_idx);
- if (!spec)
- continue;
-
- mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec);
- match_pri = 0;
- while (match_pri < table->rx_match_count &&
- table->rx_match_mcdi_flags[match_pri] != mcdi_flags)
- ++match_pri;
- if (match_pri >= table->rx_match_count) {
- invalid_filters++;
- goto not_restored;
- }
- if (spec->rss_context)
- ctx = efx_find_rss_context_entry(efx, spec->rss_context);
- else
- ctx = &efx->rss_context;
- if (spec->flags & EFX_FILTER_FLAG_RX_RSS) {
- if (!ctx) {
- netif_warn(efx, drv, efx->net_dev,
- "Warning: unable to restore a filter with nonexistent RSS context %u.\n",
- spec->rss_context);
- invalid_filters++;
- goto not_restored;
- }
- if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) {
- netif_warn(efx, drv, efx->net_dev,
- "Warning: unable to restore a filter with RSS context %u as it was not created.\n",
- spec->rss_context);
- invalid_filters++;
- goto not_restored;
- }
- }
-
- rc = efx_ef10_filter_push(efx, spec,
- &table->entry[filter_idx].handle,
- ctx, false);
- if (rc)
- failed++;
-
- if (rc) {
-not_restored:
- list_for_each_entry(vlan, &table->vlan_list, list)
- for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; ++i)
- if (vlan->default_filters[i] == filter_idx)
- vlan->default_filters[i] =
- EFX_EF10_FILTER_ID_INVALID;
-
- kfree(spec);
- efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
- }
- }
-
- mutex_unlock(&efx->rss_lock);
- up_write(&table->lock);
-
- /* This can happen validly if the MC's capabilities have changed, so
- * is not an error.
- */
- if (invalid_filters)
- netif_dbg(efx, drv, efx->net_dev,
- "Did not restore %u filters that are now unsupported.\n",
- invalid_filters);
-
- if (failed)
- netif_err(efx, hw, efx->net_dev,
- "unable to restore %u filters\n", failed);
- else
- nic_data->must_restore_filters = false;
-}
-
-static void efx_ef10_filter_table_remove(struct efx_nic *efx)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
- struct efx_filter_spec *spec;
- unsigned int filter_idx;
- int rc;
-
- efx_ef10_filter_cleanup_vlans(efx);
- efx->filter_state = NULL;
- /* If we were called without locking, then it's not safe to free
- * the table as others might be using it. So we just WARN, leak
- * the memory, and potentially get an inconsistent filter table
- * state.
- * This should never actually happen.
- */
- if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
- return;
-
- if (!table)
- return;
-
- for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
- spec = efx_ef10_filter_entry_spec(table, filter_idx);
- if (!spec)
- continue;
-
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
- efx_ef10_filter_is_exclusive(spec) ?
- MC_CMD_FILTER_OP_IN_OP_REMOVE :
- MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
- MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
- table->entry[filter_idx].handle);
- rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf,
- sizeof(inbuf), NULL, 0, NULL);
- if (rc)
- netif_info(efx, drv, efx->net_dev,
- "%s: filter %04x remove failed\n",
- __func__, filter_idx);
- kfree(spec);
- }
-
- vfree(table->entry);
- kfree(table);
-}
-
-static void efx_ef10_filter_mark_one_old(struct efx_nic *efx, uint16_t *id)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- unsigned int filter_idx;
-
- efx_rwsem_assert_write_locked(&table->lock);
-
- if (*id != EFX_EF10_FILTER_ID_INVALID) {
- filter_idx = efx_ef10_filter_get_unsafe_id(*id);
- if (!table->entry[filter_idx].spec)
- netif_dbg(efx, drv, efx->net_dev,
- "marked null spec old %04x:%04x\n", *id,
- filter_idx);
- table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
- *id = EFX_EF10_FILTER_ID_INVALID;
- }
-}
-
-/* Mark old per-VLAN filters that may need to be removed */
-static void _efx_ef10_filter_vlan_mark_old(struct efx_nic *efx,
- struct efx_ef10_filter_vlan *vlan)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- unsigned int i;
-
- for (i = 0; i < table->dev_uc_count; i++)
- efx_ef10_filter_mark_one_old(efx, &vlan->uc[i]);
- for (i = 0; i < table->dev_mc_count; i++)
- efx_ef10_filter_mark_one_old(efx, &vlan->mc[i]);
- for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
- efx_ef10_filter_mark_one_old(efx, &vlan->default_filters[i]);
-}
-
-/* Mark old filters that may need to be removed.
- * Caller must hold efx->filter_sem for read if race against
- * efx_ef10_filter_table_remove() is possible
- */
-static void efx_ef10_filter_mark_old(struct efx_nic *efx)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- struct efx_ef10_filter_vlan *vlan;
-
- down_write(&table->lock);
- list_for_each_entry(vlan, &table->vlan_list, list)
- _efx_ef10_filter_vlan_mark_old(efx, vlan);
- up_write(&table->lock);
-}
-
-static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- struct net_device *net_dev = efx->net_dev;
- struct netdev_hw_addr *uc;
- unsigned int i;
-
- table->uc_promisc = !!(net_dev->flags & IFF_PROMISC);
- ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
- i = 1;
- netdev_for_each_uc_addr(uc, net_dev) {
- if (i >= EFX_EF10_FILTER_DEV_UC_MAX) {
- table->uc_promisc = true;
- break;
- }
- ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
- i++;
- }
-
- table->dev_uc_count = i;
-}
-
-static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- struct net_device *net_dev = efx->net_dev;
- struct netdev_hw_addr *mc;
- unsigned int i;
-
- table->mc_overflow = false;
- table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI));
-
- i = 0;
- netdev_for_each_mc_addr(mc, net_dev) {
- if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
- table->mc_promisc = true;
- table->mc_overflow = true;
- break;
- }
- ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
- i++;
- }
-
- table->dev_mc_count = i;
-}
-
-static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
- struct efx_ef10_filter_vlan *vlan,
- bool multicast, bool rollback)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- struct efx_ef10_dev_addr *addr_list;
- enum efx_filter_flags filter_flags;
- struct efx_filter_spec spec;
- u8 baddr[ETH_ALEN];
- unsigned int i, j;
- int addr_count;
- u16 *ids;
- int rc;
-
- if (multicast) {
- addr_list = table->dev_mc_list;
- addr_count = table->dev_mc_count;
- ids = vlan->mc;
- } else {
- addr_list = table->dev_uc_list;
- addr_count = table->dev_uc_count;
- ids = vlan->uc;
- }
-
- filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0;
-
- /* Insert/renew filters */
- for (i = 0; i < addr_count; i++) {
- EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID);
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
- efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr);
- rc = efx_ef10_filter_insert_locked(efx, &spec, true);
- if (rc < 0) {
- if (rollback) {
- netif_info(efx, drv, efx->net_dev,
- "efx_ef10_filter_insert failed rc=%d\n",
- rc);
- /* Fall back to promiscuous */
- for (j = 0; j < i; j++) {
- efx_ef10_filter_remove_unsafe(
- efx, EFX_FILTER_PRI_AUTO,
- ids[j]);
- ids[j] = EFX_EF10_FILTER_ID_INVALID;
- }
- return rc;
- } else {
- /* keep invalid ID, and carry on */
- }
- } else {
- ids[i] = efx_ef10_filter_get_unsafe_id(rc);
- }
- }
-
- if (multicast && rollback) {
- /* Also need an Ethernet broadcast filter */
- EFX_WARN_ON_PARANOID(vlan->default_filters[EFX_EF10_BCAST] !=
- EFX_EF10_FILTER_ID_INVALID);
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
- eth_broadcast_addr(baddr);
- efx_filter_set_eth_local(&spec, vlan->vid, baddr);
- rc = efx_ef10_filter_insert_locked(efx, &spec, true);
- if (rc < 0) {
- netif_warn(efx, drv, efx->net_dev,
- "Broadcast filter insert failed rc=%d\n", rc);
- /* Fall back to promiscuous */
- for (j = 0; j < i; j++) {
- efx_ef10_filter_remove_unsafe(
- efx, EFX_FILTER_PRI_AUTO,
- ids[j]);
- ids[j] = EFX_EF10_FILTER_ID_INVALID;
- }
- return rc;
- } else {
- vlan->default_filters[EFX_EF10_BCAST] =
- efx_ef10_filter_get_unsafe_id(rc);
- }
- }
-
- return 0;
-}
-
-static int efx_ef10_filter_insert_def(struct efx_nic *efx,
- struct efx_ef10_filter_vlan *vlan,
- enum efx_encap_type encap_type,
- bool multicast, bool rollback)
-{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- enum efx_filter_flags filter_flags;
- struct efx_filter_spec spec;
- u8 baddr[ETH_ALEN];
- int rc;
- u16 *id;
-
- filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0;
-
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
-
- if (multicast)
- efx_filter_set_mc_def(&spec);
- else
- efx_filter_set_uc_def(&spec);
-
- if (encap_type) {
- if (nic_data->datapath_caps &
- (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
- efx_filter_set_encap_type(&spec, encap_type);
- else
- /* don't insert encap filters on non-supporting
- * platforms. ID will be left as INVALID.
- */
- return 0;
- }
-
- if (vlan->vid != EFX_FILTER_VID_UNSPEC)
- efx_filter_set_eth_local(&spec, vlan->vid, NULL);
-
- rc = efx_ef10_filter_insert_locked(efx, &spec, true);
- if (rc < 0) {
- const char *um = multicast ? "Multicast" : "Unicast";
- const char *encap_name = "";
- const char *encap_ipv = "";
-
- if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
- EFX_ENCAP_TYPE_VXLAN)
- encap_name = "VXLAN ";
- else if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
- EFX_ENCAP_TYPE_NVGRE)
- encap_name = "NVGRE ";
- else if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
- EFX_ENCAP_TYPE_GENEVE)
- encap_name = "GENEVE ";
- if (encap_type & EFX_ENCAP_FLAG_IPV6)
- encap_ipv = "IPv6 ";
- else if (encap_type)
- encap_ipv = "IPv4 ";
-
- /* unprivileged functions can't insert mismatch filters
- * for encapsulated or unicast traffic, so downgrade
- * those warnings to debug.
- */
- netif_cond_dbg(efx, drv, efx->net_dev,
- rc == -EPERM && (encap_type || !multicast), warn,
- "%s%s%s mismatch filter insert failed rc=%d\n",
- encap_name, encap_ipv, um, rc);
- } else if (multicast) {
- /* mapping from encap types to default filter IDs (multicast) */
- static enum efx_ef10_default_filters map[] = {
- [EFX_ENCAP_TYPE_NONE] = EFX_EF10_MCDEF,
- [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_MCDEF,
- [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_MCDEF,
- [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_MCDEF,
- [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] =
- EFX_EF10_VXLAN6_MCDEF,
- [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] =
- EFX_EF10_NVGRE6_MCDEF,
- [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] =
- EFX_EF10_GENEVE6_MCDEF,
- };
-
- /* quick bounds check (BCAST result impossible) */
- BUILD_BUG_ON(EFX_EF10_BCAST != 0);
- if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) {
- WARN_ON(1);
- return -EINVAL;
- }
- /* then follow map */
- id = &vlan->default_filters[map[encap_type]];
-
- EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
- *id = efx_ef10_filter_get_unsafe_id(rc);
- if (!nic_data->workaround_26807 && !encap_type) {
- /* Also need an Ethernet broadcast filter */
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
- filter_flags, 0);
- eth_broadcast_addr(baddr);
- efx_filter_set_eth_local(&spec, vlan->vid, baddr);
- rc = efx_ef10_filter_insert_locked(efx, &spec, true);
- if (rc < 0) {
- netif_warn(efx, drv, efx->net_dev,
- "Broadcast filter insert failed rc=%d\n",
- rc);
- if (rollback) {
- /* Roll back the mc_def filter */
- efx_ef10_filter_remove_unsafe(
- efx, EFX_FILTER_PRI_AUTO,
- *id);
- *id = EFX_EF10_FILTER_ID_INVALID;
- return rc;
- }
- } else {
- EFX_WARN_ON_PARANOID(
- vlan->default_filters[EFX_EF10_BCAST] !=
- EFX_EF10_FILTER_ID_INVALID);
- vlan->default_filters[EFX_EF10_BCAST] =
- efx_ef10_filter_get_unsafe_id(rc);
- }
- }
- rc = 0;
- } else {
- /* mapping from encap types to default filter IDs (unicast) */
- static enum efx_ef10_default_filters map[] = {
- [EFX_ENCAP_TYPE_NONE] = EFX_EF10_UCDEF,
- [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_UCDEF,
- [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_UCDEF,
- [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_UCDEF,
- [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] =
- EFX_EF10_VXLAN6_UCDEF,
- [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] =
- EFX_EF10_NVGRE6_UCDEF,
- [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] =
- EFX_EF10_GENEVE6_UCDEF,
- };
-
- /* quick bounds check (BCAST result impossible) */
- BUILD_BUG_ON(EFX_EF10_BCAST != 0);
- if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) {
- WARN_ON(1);
- return -EINVAL;
- }
- /* then follow map */
- id = &vlan->default_filters[map[encap_type]];
- EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
- *id = rc;
- rc = 0;
- }
- return rc;
-}
-
-/* Remove filters that weren't renewed. */
-static void efx_ef10_filter_remove_old(struct efx_nic *efx)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- int remove_failed = 0;
- int remove_noent = 0;
- int rc;
- int i;
-
- down_write(&table->lock);
- for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
- if (READ_ONCE(table->entry[i].spec) &
- EFX_EF10_FILTER_FLAG_AUTO_OLD) {
- rc = efx_ef10_filter_remove_internal(efx,
- 1U << EFX_FILTER_PRI_AUTO, i, true);
- if (rc == -ENOENT)
- remove_noent++;
- else if (rc)
- remove_failed++;
- }
- }
- up_write(&table->lock);
-
- if (remove_failed)
- netif_info(efx, drv, efx->net_dev,
- "%s: failed to remove %d filters\n",
- __func__, remove_failed);
- if (remove_noent)
- netif_info(efx, drv, efx->net_dev,
- "%s: failed to remove %d non-existent filters\n",
- __func__, remove_noent);
-}
-
static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -5545,7 +3153,7 @@ static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
efx_device_detach_sync(efx);
efx_net_stop(efx->net_dev);
down_write(&efx->filter_sem);
- efx_ef10_filter_table_remove(efx);
+ efx_mcdi_filter_table_remove(efx);
up_write(&efx->filter_sem);
rc = efx_ef10_vadaptor_free(efx, nic_data->vport_id);
@@ -5577,7 +3185,7 @@ restore_vadaptor:
goto reset_nic;
restore_filters:
down_write(&efx->filter_sem);
- rc2 = efx_ef10_filter_table_probe(efx);
+ rc2 = efx_mcdi_filter_table_probe(efx);
up_write(&efx->filter_sem);
if (rc2)
goto reset_nic;
@@ -5598,256 +3206,6 @@ reset_nic:
return rc ? rc : rc2;
}
-/* Caller must hold efx->filter_sem for read if race against
- * efx_ef10_filter_table_remove() is possible
- */
-static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx,
- struct efx_ef10_filter_vlan *vlan)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
-
- /* Do not install unspecified VID if VLAN filtering is enabled.
- * Do not install all specified VIDs if VLAN filtering is disabled.
- */
- if ((vlan->vid == EFX_FILTER_VID_UNSPEC) == table->vlan_filter)
- return;
-
- /* Insert/renew unicast filters */
- if (table->uc_promisc) {
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NONE,
- false, false);
- efx_ef10_filter_insert_addr_list(efx, vlan, false, false);
- } else {
- /* If any of the filters failed to insert, fall back to
- * promiscuous mode - add in the uc_def filter. But keep
- * our individual unicast filters.
- */
- if (efx_ef10_filter_insert_addr_list(efx, vlan, false, false))
- efx_ef10_filter_insert_def(efx, vlan,
- EFX_ENCAP_TYPE_NONE,
- false, false);
- }
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN,
- false, false);
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN |
- EFX_ENCAP_FLAG_IPV6,
- false, false);
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE,
- false, false);
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE |
- EFX_ENCAP_FLAG_IPV6,
- false, false);
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE,
- false, false);
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE |
- EFX_ENCAP_FLAG_IPV6,
- false, false);
-
- /* Insert/renew multicast filters */
- /* If changing promiscuous state with cascaded multicast filters, remove
- * old filters first, so that packets are dropped rather than duplicated
- */
- if (nic_data->workaround_26807 &&
- table->mc_promisc_last != table->mc_promisc)
- efx_ef10_filter_remove_old(efx);
- if (table->mc_promisc) {
- if (nic_data->workaround_26807) {
- /* If we failed to insert promiscuous filters, rollback
- * and fall back to individual multicast filters
- */
- if (efx_ef10_filter_insert_def(efx, vlan,
- EFX_ENCAP_TYPE_NONE,
- true, true)) {
- /* Changing promisc state, so remove old filters */
- efx_ef10_filter_remove_old(efx);
- efx_ef10_filter_insert_addr_list(efx, vlan,
- true, false);
- }
- } else {
- /* If we failed to insert promiscuous filters, don't
- * rollback. Regardless, also insert the mc_list,
- * unless it's incomplete due to overflow
- */
- efx_ef10_filter_insert_def(efx, vlan,
- EFX_ENCAP_TYPE_NONE,
- true, false);
- if (!table->mc_overflow)
- efx_ef10_filter_insert_addr_list(efx, vlan,
- true, false);
- }
- } else {
- /* If any filters failed to insert, rollback and fall back to
- * promiscuous mode - mc_def filter and maybe broadcast. If
- * that fails, roll back again and insert as many of our
- * individual multicast filters as we can.
- */
- if (efx_ef10_filter_insert_addr_list(efx, vlan, true, true)) {
- /* Changing promisc state, so remove old filters */
- if (nic_data->workaround_26807)
- efx_ef10_filter_remove_old(efx);
- if (efx_ef10_filter_insert_def(efx, vlan,
- EFX_ENCAP_TYPE_NONE,
- true, true))
- efx_ef10_filter_insert_addr_list(efx, vlan,
- true, false);
- }
- }
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN,
- true, false);
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN |
- EFX_ENCAP_FLAG_IPV6,
- true, false);
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE,
- true, false);
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE |
- EFX_ENCAP_FLAG_IPV6,
- true, false);
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE,
- true, false);
- efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE |
- EFX_ENCAP_FLAG_IPV6,
- true, false);
-}
-
-/* Caller must hold efx->filter_sem for read if race against
- * efx_ef10_filter_table_remove() is possible
- */
-static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- struct net_device *net_dev = efx->net_dev;
- struct efx_ef10_filter_vlan *vlan;
- bool vlan_filter;
-
- if (!efx_dev_registered(efx))
- return;
-
- if (!table)
- return;
-
- efx_ef10_filter_mark_old(efx);
-
- /* Copy/convert the address lists; add the primary station
- * address and broadcast address
- */
- netif_addr_lock_bh(net_dev);
- efx_ef10_filter_uc_addr_list(efx);
- efx_ef10_filter_mc_addr_list(efx);
- netif_addr_unlock_bh(net_dev);
-
- /* If VLAN filtering changes, all old filters are finally removed.
- * Do it in advance to avoid conflicts for unicast untagged and
- * VLAN 0 tagged filters.
- */
- vlan_filter = !!(net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
- if (table->vlan_filter != vlan_filter) {
- table->vlan_filter = vlan_filter;
- efx_ef10_filter_remove_old(efx);
- }
-
- list_for_each_entry(vlan, &table->vlan_list, list)
- efx_ef10_filter_vlan_sync_rx_mode(efx, vlan);
-
- efx_ef10_filter_remove_old(efx);
- table->mc_promisc_last = table->mc_promisc;
-}
-
-static struct efx_ef10_filter_vlan *efx_ef10_filter_find_vlan(struct efx_nic *efx, u16 vid)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- struct efx_ef10_filter_vlan *vlan;
-
- WARN_ON(!rwsem_is_locked(&efx->filter_sem));
-
- list_for_each_entry(vlan, &table->vlan_list, list) {
- if (vlan->vid == vid)
- return vlan;
- }
-
- return NULL;
-}
-
-static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- struct efx_ef10_filter_vlan *vlan;
- unsigned int i;
-
- if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
- return -EINVAL;
-
- vlan = efx_ef10_filter_find_vlan(efx, vid);
- if (WARN_ON(vlan)) {
- netif_err(efx, drv, efx->net_dev,
- "VLAN %u already added\n", vid);
- return -EALREADY;
- }
-
- vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
- if (!vlan)
- return -ENOMEM;
-
- vlan->vid = vid;
-
- for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
- vlan->uc[i] = EFX_EF10_FILTER_ID_INVALID;
- for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
- vlan->mc[i] = EFX_EF10_FILTER_ID_INVALID;
- for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
- vlan->default_filters[i] = EFX_EF10_FILTER_ID_INVALID;
-
- list_add_tail(&vlan->list, &table->vlan_list);
-
- if (efx_dev_registered(efx))
- efx_ef10_filter_vlan_sync_rx_mode(efx, vlan);
-
- return 0;
-}
-
-static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx,
- struct efx_ef10_filter_vlan *vlan)
-{
- unsigned int i;
-
- /* See comment in efx_ef10_filter_table_remove() */
- if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
- return;
-
- list_del(&vlan->list);
-
- for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
- efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
- vlan->uc[i]);
- for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
- efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
- vlan->mc[i]);
- for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
- if (vlan->default_filters[i] != EFX_EF10_FILTER_ID_INVALID)
- efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
- vlan->default_filters[i]);
-
- kfree(vlan);
-}
-
-static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid)
-{
- struct efx_ef10_filter_vlan *vlan;
-
- /* See comment in efx_ef10_filter_table_remove() */
- if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
- return;
-
- vlan = efx_ef10_filter_find_vlan(efx, vid);
- if (!vlan) {
- netif_err(efx, drv, efx->net_dev,
- "VLAN %u not found in filter state\n", vid);
- return;
- }
-
- efx_ef10_filter_del_vlan_internal(efx, vlan);
-}
-
static int efx_ef10_set_mac_address(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
@@ -5860,7 +3218,7 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
mutex_lock(&efx->mac_lock);
down_write(&efx->filter_sem);
- efx_ef10_filter_table_remove(efx);
+ efx_mcdi_filter_table_remove(efx);
ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR),
efx->net_dev->dev_addr);
@@ -5869,7 +3227,7 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
sizeof(inbuf), NULL, 0, NULL);
- efx_ef10_filter_table_probe(efx);
+ efx_mcdi_filter_table_probe(efx);
up_write(&efx->filter_sem);
mutex_unlock(&efx->mac_lock);
@@ -5931,14 +3289,14 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
{
- efx_ef10_filter_sync_rx_mode(efx);
+ efx_mcdi_filter_sync_rx_mode(efx);
return efx_mcdi_set_mac(efx);
}
static int efx_ef10_mac_reconfigure_vf(struct efx_nic *efx)
{
- efx_ef10_filter_sync_rx_mode(efx);
+ efx_mcdi_filter_sync_rx_mode(efx);
return 0;
}
@@ -6650,36 +4008,36 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.irq_handle_legacy = efx_ef10_legacy_interrupt,
.tx_probe = efx_ef10_tx_probe,
.tx_init = efx_ef10_tx_init,
- .tx_remove = efx_ef10_tx_remove,
+ .tx_remove = efx_mcdi_tx_remove,
.tx_write = efx_ef10_tx_write,
.tx_limit_len = efx_ef10_tx_limit_len,
- .rx_push_rss_config = efx_ef10_vf_rx_push_rss_config,
- .rx_pull_rss_config = efx_ef10_rx_pull_rss_config,
- .rx_probe = efx_ef10_rx_probe,
- .rx_init = efx_ef10_rx_init,
- .rx_remove = efx_ef10_rx_remove,
+ .rx_push_rss_config = efx_mcdi_vf_rx_push_rss_config,
+ .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
+ .rx_probe = efx_mcdi_rx_probe,
+ .rx_init = efx_mcdi_rx_init,
+ .rx_remove = efx_mcdi_rx_remove,
.rx_write = efx_ef10_rx_write,
.rx_defer_refill = efx_ef10_rx_defer_refill,
- .ev_probe = efx_ef10_ev_probe,
+ .ev_probe = efx_mcdi_ev_probe,
.ev_init = efx_ef10_ev_init,
- .ev_fini = efx_ef10_ev_fini,
- .ev_remove = efx_ef10_ev_remove,
+ .ev_fini = efx_mcdi_ev_fini,
+ .ev_remove = efx_mcdi_ev_remove,
.ev_process = efx_ef10_ev_process,
.ev_read_ack = efx_ef10_ev_read_ack,
.ev_test_generate = efx_ef10_ev_test_generate,
- .filter_table_probe = efx_ef10_filter_table_probe,
- .filter_table_restore = efx_ef10_filter_table_restore,
- .filter_table_remove = efx_ef10_filter_table_remove,
- .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
- .filter_insert = efx_ef10_filter_insert,
- .filter_remove_safe = efx_ef10_filter_remove_safe,
- .filter_get_safe = efx_ef10_filter_get_safe,
- .filter_clear_rx = efx_ef10_filter_clear_rx,
- .filter_count_rx_used = efx_ef10_filter_count_rx_used,
- .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
- .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
+ .filter_table_probe = efx_mcdi_filter_table_probe,
+ .filter_table_restore = efx_mcdi_filter_table_restore,
+ .filter_table_remove = efx_mcdi_filter_table_remove,
+ .filter_update_rx_scatter = efx_mcdi_update_rx_scatter,
+ .filter_insert = efx_mcdi_filter_insert,
+ .filter_remove_safe = efx_mcdi_filter_remove_safe,
+ .filter_get_safe = efx_mcdi_filter_get_safe,
+ .filter_clear_rx = efx_mcdi_filter_clear_rx,
+ .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
#ifdef CONFIG_RFS_ACCEL
- .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
+ .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
#endif
#ifdef CONFIG_SFC_MTD
.mtd_probe = efx_port_dummy_op_int,
@@ -6709,7 +4067,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
.offload_features = EF10_OFFLOAD_FEATURES,
.mcdi_max_ver = 2,
- .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
+ .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
.hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
1 << HWTSTAMP_FILTER_ALL,
.rx_hash_key_size = 40,
@@ -6759,39 +4117,39 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.irq_handle_legacy = efx_ef10_legacy_interrupt,
.tx_probe = efx_ef10_tx_probe,
.tx_init = efx_ef10_tx_init,
- .tx_remove = efx_ef10_tx_remove,
+ .tx_remove = efx_mcdi_tx_remove,
.tx_write = efx_ef10_tx_write,
.tx_limit_len = efx_ef10_tx_limit_len,
- .rx_push_rss_config = efx_ef10_pf_rx_push_rss_config,
- .rx_pull_rss_config = efx_ef10_rx_pull_rss_config,
- .rx_push_rss_context_config = efx_ef10_rx_push_rss_context_config,
- .rx_pull_rss_context_config = efx_ef10_rx_pull_rss_context_config,
- .rx_restore_rss_contexts = efx_ef10_rx_restore_rss_contexts,
- .rx_probe = efx_ef10_rx_probe,
- .rx_init = efx_ef10_rx_init,
- .rx_remove = efx_ef10_rx_remove,
+ .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
+ .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
+ .rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config,
+ .rx_pull_rss_context_config = efx_mcdi_rx_pull_rss_context_config,
+ .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
+ .rx_probe = efx_mcdi_rx_probe,
+ .rx_init = efx_mcdi_rx_init,
+ .rx_remove = efx_mcdi_rx_remove,
.rx_write = efx_ef10_rx_write,
.rx_defer_refill = efx_ef10_rx_defer_refill,
- .ev_probe = efx_ef10_ev_probe,
+ .ev_probe = efx_mcdi_ev_probe,
.ev_init = efx_ef10_ev_init,
- .ev_fini = efx_ef10_ev_fini,
- .ev_remove = efx_ef10_ev_remove,
+ .ev_fini = efx_mcdi_ev_fini,
+ .ev_remove = efx_mcdi_ev_remove,
.ev_process = efx_ef10_ev_process,
.ev_read_ack = efx_ef10_ev_read_ack,
.ev_test_generate = efx_ef10_ev_test_generate,
- .filter_table_probe = efx_ef10_filter_table_probe,
- .filter_table_restore = efx_ef10_filter_table_restore,
- .filter_table_remove = efx_ef10_filter_table_remove,
- .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
- .filter_insert = efx_ef10_filter_insert,
- .filter_remove_safe = efx_ef10_filter_remove_safe,
- .filter_get_safe = efx_ef10_filter_get_safe,
- .filter_clear_rx = efx_ef10_filter_clear_rx,
- .filter_count_rx_used = efx_ef10_filter_count_rx_used,
- .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
- .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
+ .filter_table_probe = efx_mcdi_filter_table_probe,
+ .filter_table_restore = efx_mcdi_filter_table_restore,
+ .filter_table_remove = efx_mcdi_filter_table_remove,
+ .filter_update_rx_scatter = efx_mcdi_update_rx_scatter,
+ .filter_insert = efx_mcdi_filter_insert,
+ .filter_remove_safe = efx_mcdi_filter_remove_safe,
+ .filter_get_safe = efx_mcdi_filter_get_safe,
+ .filter_clear_rx = efx_mcdi_filter_clear_rx,
+ .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
#ifdef CONFIG_RFS_ACCEL
- .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
+ .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
#endif
#ifdef CONFIG_SFC_MTD
.mtd_probe = efx_ef10_mtd_probe,
@@ -6844,7 +4202,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
.offload_features = EF10_OFFLOAD_FEATURES,
.mcdi_max_ver = 2,
- .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
+ .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
.hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
1 << HWTSTAMP_FILTER_ALL,
.rx_hash_key_size = 40,
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index 52bd43f45761..14393767ef9f 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -522,10 +522,9 @@ int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac)
if (!is_zero_ether_addr(mac)) {
rc = efx_ef10_vport_add_mac(efx, vf->vport_id, mac);
- if (rc) {
- eth_zero_addr(vf->mac);
+ if (rc)
goto fail;
- }
+
if (vf->efx)
ether_addr_copy(vf->efx->net_dev->dev_addr, mac);
}
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 992c773620ec..4481f21a1f43 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -23,6 +23,10 @@
#include <net/gre.h>
#include <net/udp_tunnel.h>
#include "efx.h"
+#include "efx_common.h"
+#include "efx_channels.h"
+#include "rx_common.h"
+#include "tx_common.h"
#include "nic.h"
#include "io.h"
#include "selftest.h"
@@ -39,56 +43,6 @@
**************************************************************************
*/
-/* Loopback mode names (see LOOPBACK_MODE()) */
-const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
-const char *const efx_loopback_mode_names[] = {
- [LOOPBACK_NONE] = "NONE",
- [LOOPBACK_DATA] = "DATAPATH",
- [LOOPBACK_GMAC] = "GMAC",
- [LOOPBACK_XGMII] = "XGMII",
- [LOOPBACK_XGXS] = "XGXS",
- [LOOPBACK_XAUI] = "XAUI",
- [LOOPBACK_GMII] = "GMII",
- [LOOPBACK_SGMII] = "SGMII",
- [LOOPBACK_XGBR] = "XGBR",
- [LOOPBACK_XFI] = "XFI",
- [LOOPBACK_XAUI_FAR] = "XAUI_FAR",
- [LOOPBACK_GMII_FAR] = "GMII_FAR",
- [LOOPBACK_SGMII_FAR] = "SGMII_FAR",
- [LOOPBACK_XFI_FAR] = "XFI_FAR",
- [LOOPBACK_GPHY] = "GPHY",
- [LOOPBACK_PHYXS] = "PHYXS",
- [LOOPBACK_PCS] = "PCS",
- [LOOPBACK_PMAPMD] = "PMA/PMD",
- [LOOPBACK_XPORT] = "XPORT",
- [LOOPBACK_XGMII_WS] = "XGMII_WS",
- [LOOPBACK_XAUI_WS] = "XAUI_WS",
- [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR",
- [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
- [LOOPBACK_GMII_WS] = "GMII_WS",
- [LOOPBACK_XFI_WS] = "XFI_WS",
- [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR",
- [LOOPBACK_PHYXS_WS] = "PHYXS_WS",
-};
-
-const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
-const char *const efx_reset_type_names[] = {
- [RESET_TYPE_INVISIBLE] = "INVISIBLE",
- [RESET_TYPE_ALL] = "ALL",
- [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
- [RESET_TYPE_WORLD] = "WORLD",
- [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
- [RESET_TYPE_DATAPATH] = "DATAPATH",
- [RESET_TYPE_MC_BIST] = "MC_BIST",
- [RESET_TYPE_DISABLE] = "DISABLE",
- [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
- [RESET_TYPE_INT_ERROR] = "INT_ERROR",
- [RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
- [RESET_TYPE_TX_SKIP] = "TX_SKIP",
- [RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
- [RESET_TYPE_MCDI_TIMEOUT] = "MCDI_TIMEOUT (FLR)",
-};
-
/* UDP tunnel type names */
static const char *const efx_udp_tunnel_type_names[] = {
[TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN] = "vxlan",
@@ -104,18 +58,6 @@ void efx_get_udp_tunnel_type_name(u16 type, char *buf, size_t buflen)
snprintf(buf, buflen, "type %d", type);
}
-/* Reset workqueue. If any NIC has a hardware failure then a reset will be
- * queued onto this work queue. This is not a per-nic work queue, because
- * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
- */
-static struct workqueue_struct *reset_workqueue;
-
-/* How often and how many times to poll for a reset while waiting for a
- * BIST that another function started to complete.
- */
-#define BIST_WAIT_DELAY_MS 100
-#define BIST_WAIT_DELAY_COUNT 100
-
/**************************************************************************
*
* Configurable values
@@ -135,21 +77,6 @@ module_param(efx_separate_tx_channels, bool, 0444);
MODULE_PARM_DESC(efx_separate_tx_channels,
"Use separate channels for TX and RX");
-/* This is the weight assigned to each of the (per-channel) virtual
- * NAPI devices.
- */
-static int napi_weight = 64;
-
-/* This is the time (in jiffies) between invocations of the hardware
- * monitor.
- * On Falcon-based NICs, this will:
- * - Check the on-board hardware monitor;
- * - Poll the link state and reconfigure the hardware as necessary.
- * On Siena-based NICs for power systems with EEH support, this will give EEH a
- * chance to start.
- */
-static unsigned int efx_monitor_interval = 1 * HZ;
-
/* Initial interrupt moderation settings. They can be modified after
* module load with ethtool.
*
@@ -169,38 +96,10 @@ static unsigned int rx_irq_mod_usec = 60;
*/
static unsigned int tx_irq_mod_usec = 150;
-/* This is the first interrupt mode to try out of:
- * 0 => MSI-X
- * 1 => MSI
- * 2 => legacy
- */
-static unsigned int interrupt_mode;
-
-/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
- * i.e. the number of CPUs among which we may distribute simultaneous
- * interrupt handling.
- *
- * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
- * The default (0) means to assign an interrupt to each core.
- */
-static unsigned int rss_cpus;
-module_param(rss_cpus, uint, 0444);
-MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
-
static bool phy_flash_cfg;
module_param(phy_flash_cfg, bool, 0644);
MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
-static unsigned irq_adapt_low_thresh = 8000;
-module_param(irq_adapt_low_thresh, uint, 0644);
-MODULE_PARM_DESC(irq_adapt_low_thresh,
- "Threshold score for reducing IRQ moderation");
-
-static unsigned irq_adapt_high_thresh = 16000;
-module_param(irq_adapt_high_thresh, uint, 0644);
-MODULE_PARM_DESC(irq_adapt_high_thresh,
- "Threshold score for increasing IRQ moderation");
-
static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
@@ -214,18 +113,8 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
*
*************************************************************************/
-static int efx_soft_enable_interrupts(struct efx_nic *efx);
-static void efx_soft_disable_interrupts(struct efx_nic *efx);
-static void efx_remove_channel(struct efx_channel *channel);
-static void efx_remove_channels(struct efx_nic *efx);
static const struct efx_channel_type efx_default_channel_type;
static void efx_remove_port(struct efx_nic *efx);
-static void efx_init_napi_channel(struct efx_channel *channel);
-static void efx_fini_napi(struct efx_nic *efx);
-static void efx_fini_napi_channel(struct efx_channel *channel);
-static void efx_fini_struct(struct efx_nic *efx);
-static void efx_start_all(struct efx_nic *efx);
-static void efx_stop_all(struct efx_nic *efx);
static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog);
static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp);
static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
@@ -239,776 +128,12 @@ static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
ASSERT_RTNL(); \
} while (0)
-static int efx_check_disabled(struct efx_nic *efx)
-{
- if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
- netif_err(efx, drv, efx->net_dev,
- "device is disabled due to earlier errors\n");
- return -EIO;
- }
- return 0;
-}
-
-/**************************************************************************
- *
- * Event queue processing
- *
- *************************************************************************/
-
-/* Process channel's event queue
- *
- * This function is responsible for processing the event queue of a
- * single channel. The caller must guarantee that this function will
- * never be concurrently called more than once on the same channel,
- * though different channels may be being processed concurrently.
- */
-static int efx_process_channel(struct efx_channel *channel, int budget)
-{
- struct efx_tx_queue *tx_queue;
- struct list_head rx_list;
- int spent;
-
- if (unlikely(!channel->enabled))
- return 0;
-
- /* Prepare the batch receive list */
- EFX_WARN_ON_PARANOID(channel->rx_list != NULL);
- INIT_LIST_HEAD(&rx_list);
- channel->rx_list = &rx_list;
-
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- tx_queue->pkts_compl = 0;
- tx_queue->bytes_compl = 0;
- }
-
- spent = efx_nic_process_eventq(channel, budget);
- if (spent && efx_channel_has_rx_queue(channel)) {
- struct efx_rx_queue *rx_queue =
- efx_channel_get_rx_queue(channel);
-
- efx_rx_flush_packet(channel);
- efx_fast_push_rx_descriptors(rx_queue, true);
- }
-
- /* Update BQL */
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- if (tx_queue->bytes_compl) {
- netdev_tx_completed_queue(tx_queue->core_txq,
- tx_queue->pkts_compl, tx_queue->bytes_compl);
- }
- }
-
- /* Receive any packets we queued up */
- netif_receive_skb_list(channel->rx_list);
- channel->rx_list = NULL;
-
- return spent;
-}
-
-/* NAPI poll handler
- *
- * NAPI guarantees serialisation of polls of the same device, which
- * provides the guarantee required by efx_process_channel().
- */
-static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel)
-{
- int step = efx->irq_mod_step_us;
-
- if (channel->irq_mod_score < irq_adapt_low_thresh) {
- if (channel->irq_moderation_us > step) {
- channel->irq_moderation_us -= step;
- efx->type->push_irq_moderation(channel);
- }
- } else if (channel->irq_mod_score > irq_adapt_high_thresh) {
- if (channel->irq_moderation_us <
- efx->irq_rx_moderation_us) {
- channel->irq_moderation_us += step;
- efx->type->push_irq_moderation(channel);
- }
- }
-
- channel->irq_count = 0;
- channel->irq_mod_score = 0;
-}
-
-static int efx_poll(struct napi_struct *napi, int budget)
-{
- struct efx_channel *channel =
- container_of(napi, struct efx_channel, napi_str);
- struct efx_nic *efx = channel->efx;
- int spent;
-
- netif_vdbg(efx, intr, efx->net_dev,
- "channel %d NAPI poll executing on CPU %d\n",
- channel->channel, raw_smp_processor_id());
-
- spent = efx_process_channel(channel, budget);
-
- xdp_do_flush_map();
-
- if (spent < budget) {
- if (efx_channel_has_rx_queue(channel) &&
- efx->irq_rx_adaptive &&
- unlikely(++channel->irq_count == 1000)) {
- efx_update_irq_mod(efx, channel);
- }
-
-#ifdef CONFIG_RFS_ACCEL
- /* Perhaps expire some ARFS filters */
- mod_delayed_work(system_wq, &channel->filter_work, 0);
-#endif
-
- /* There is no race here; although napi_disable() will
- * only wait for napi_complete(), this isn't a problem
- * since efx_nic_eventq_read_ack() will have no effect if
- * interrupts have already been disabled.
- */
- if (napi_complete_done(napi, spent))
- efx_nic_eventq_read_ack(channel);
- }
-
- return spent;
-}
-
-/* Create event queue
- * Event queue memory allocations are done only once. If the channel
- * is reset, the memory buffer will be reused; this guards against
- * errors during channel reset and also simplifies interrupt handling.
- */
-static int efx_probe_eventq(struct efx_channel *channel)
-{
- struct efx_nic *efx = channel->efx;
- unsigned long entries;
-
- netif_dbg(efx, probe, efx->net_dev,
- "chan %d create event queue\n", channel->channel);
-
- /* Build an event queue with room for one event per tx and rx buffer,
- * plus some extra for link state events and MCDI completions. */
- entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
- EFX_WARN_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
- channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
-
- return efx_nic_probe_eventq(channel);
-}
-
-/* Prepare channel's event queue */
-static int efx_init_eventq(struct efx_channel *channel)
-{
- struct efx_nic *efx = channel->efx;
- int rc;
-
- EFX_WARN_ON_PARANOID(channel->eventq_init);
-
- netif_dbg(efx, drv, efx->net_dev,
- "chan %d init event queue\n", channel->channel);
-
- rc = efx_nic_init_eventq(channel);
- if (rc == 0) {
- efx->type->push_irq_moderation(channel);
- channel->eventq_read_ptr = 0;
- channel->eventq_init = true;
- }
- return rc;
-}
-
-/* Enable event queue processing and NAPI */
-void efx_start_eventq(struct efx_channel *channel)
-{
- netif_dbg(channel->efx, ifup, channel->efx->net_dev,
- "chan %d start event queue\n", channel->channel);
-
- /* Make sure the NAPI handler sees the enabled flag set */
- channel->enabled = true;
- smp_wmb();
-
- napi_enable(&channel->napi_str);
- efx_nic_eventq_read_ack(channel);
-}
-
-/* Disable event queue processing and NAPI */
-void efx_stop_eventq(struct efx_channel *channel)
-{
- if (!channel->enabled)
- return;
-
- napi_disable(&channel->napi_str);
- channel->enabled = false;
-}
-
-static void efx_fini_eventq(struct efx_channel *channel)
-{
- if (!channel->eventq_init)
- return;
-
- netif_dbg(channel->efx, drv, channel->efx->net_dev,
- "chan %d fini event queue\n", channel->channel);
-
- efx_nic_fini_eventq(channel);
- channel->eventq_init = false;
-}
-
-static void efx_remove_eventq(struct efx_channel *channel)
-{
- netif_dbg(channel->efx, drv, channel->efx->net_dev,
- "chan %d remove event queue\n", channel->channel);
-
- efx_nic_remove_eventq(channel);
-}
-
-/**************************************************************************
- *
- * Channel handling
- *
- *************************************************************************/
-
-/* Allocate and initialise a channel structure. */
-static struct efx_channel *
-efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
-{
- struct efx_channel *channel;
- struct efx_rx_queue *rx_queue;
- struct efx_tx_queue *tx_queue;
- int j;
-
- channel = kzalloc(sizeof(*channel), GFP_KERNEL);
- if (!channel)
- return NULL;
-
- channel->efx = efx;
- channel->channel = i;
- channel->type = &efx_default_channel_type;
-
- for (j = 0; j < EFX_TXQ_TYPES; j++) {
- tx_queue = &channel->tx_queue[j];
- tx_queue->efx = efx;
- tx_queue->queue = i * EFX_TXQ_TYPES + j;
- tx_queue->channel = channel;
- }
-
-#ifdef CONFIG_RFS_ACCEL
- INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
-#endif
-
- rx_queue = &channel->rx_queue;
- rx_queue->efx = efx;
- timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
-
- return channel;
-}
-
-/* Allocate and initialise a channel structure, copying parameters
- * (but not resources) from an old channel structure.
- */
-static struct efx_channel *
-efx_copy_channel(const struct efx_channel *old_channel)
-{
- struct efx_channel *channel;
- struct efx_rx_queue *rx_queue;
- struct efx_tx_queue *tx_queue;
- int j;
-
- channel = kmalloc(sizeof(*channel), GFP_KERNEL);
- if (!channel)
- return NULL;
-
- *channel = *old_channel;
-
- channel->napi_dev = NULL;
- INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
- channel->napi_str.napi_id = 0;
- channel->napi_str.state = 0;
- memset(&channel->eventq, 0, sizeof(channel->eventq));
-
- for (j = 0; j < EFX_TXQ_TYPES; j++) {
- tx_queue = &channel->tx_queue[j];
- if (tx_queue->channel)
- tx_queue->channel = channel;
- tx_queue->buffer = NULL;
- memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
- }
-
- rx_queue = &channel->rx_queue;
- rx_queue->buffer = NULL;
- memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
- timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
-#ifdef CONFIG_RFS_ACCEL
- INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
-#endif
-
- return channel;
-}
-
-static int efx_probe_channel(struct efx_channel *channel)
-{
- struct efx_tx_queue *tx_queue;
- struct efx_rx_queue *rx_queue;
- int rc;
-
- netif_dbg(channel->efx, probe, channel->efx->net_dev,
- "creating channel %d\n", channel->channel);
-
- rc = channel->type->pre_probe(channel);
- if (rc)
- goto fail;
-
- rc = efx_probe_eventq(channel);
- if (rc)
- goto fail;
-
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- rc = efx_probe_tx_queue(tx_queue);
- if (rc)
- goto fail;
- }
-
- efx_for_each_channel_rx_queue(rx_queue, channel) {
- rc = efx_probe_rx_queue(rx_queue);
- if (rc)
- goto fail;
- }
-
- channel->rx_list = NULL;
-
- return 0;
-
-fail:
- efx_remove_channel(channel);
- return rc;
-}
-
-static void
-efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
-{
- struct efx_nic *efx = channel->efx;
- const char *type;
- int number;
-
- number = channel->channel;
-
- if (number >= efx->xdp_channel_offset &&
- !WARN_ON_ONCE(!efx->n_xdp_channels)) {
- type = "-xdp";
- number -= efx->xdp_channel_offset;
- } else if (efx->tx_channel_offset == 0) {
- type = "";
- } else if (number < efx->tx_channel_offset) {
- type = "-rx";
- } else {
- type = "-tx";
- number -= efx->tx_channel_offset;
- }
- snprintf(buf, len, "%s%s-%d", efx->name, type, number);
-}
-
-static void efx_set_channel_names(struct efx_nic *efx)
-{
- struct efx_channel *channel;
-
- efx_for_each_channel(channel, efx)
- channel->type->get_name(channel,
- efx->msi_context[channel->channel].name,
- sizeof(efx->msi_context[0].name));
-}
-
-static int efx_probe_channels(struct efx_nic *efx)
-{
- struct efx_channel *channel;
- int rc;
-
- /* Restart special buffer allocation */
- efx->next_buffer_table = 0;
-
- /* Probe channels in reverse, so that any 'extra' channels
- * use the start of the buffer table. This allows the traffic
- * channels to be resized without moving them or wasting the
- * entries before them.
- */
- efx_for_each_channel_rev(channel, efx) {
- rc = efx_probe_channel(channel);
- if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "failed to create channel %d\n",
- channel->channel);
- goto fail;
- }
- }
- efx_set_channel_names(efx);
-
- return 0;
-
-fail:
- efx_remove_channels(efx);
- return rc;
-}
-
-/* Channels are shutdown and reinitialised whilst the NIC is running
- * to propagate configuration changes (mtu, checksum offload), or
- * to clear hardware error conditions
- */
-static void efx_start_datapath(struct efx_nic *efx)
-{
- netdev_features_t old_features = efx->net_dev->features;
- bool old_rx_scatter = efx->rx_scatter;
- struct efx_tx_queue *tx_queue;
- struct efx_rx_queue *rx_queue;
- struct efx_channel *channel;
- size_t rx_buf_len;
-
- /* Calculate the rx buffer allocation parameters required to
- * support the current MTU, including padding for header
- * alignment and overruns.
- */
- efx->rx_dma_len = (efx->rx_prefix_size +
- EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
- efx->type->rx_buffer_padding);
- rx_buf_len = (sizeof(struct efx_rx_page_state) + XDP_PACKET_HEADROOM +
- efx->rx_ip_align + efx->rx_dma_len);
- if (rx_buf_len <= PAGE_SIZE) {
- efx->rx_scatter = efx->type->always_rx_scatter;
- efx->rx_buffer_order = 0;
- } else if (efx->type->can_rx_scatter) {
- BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
- BUILD_BUG_ON(sizeof(struct efx_rx_page_state) +
- 2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE,
- EFX_RX_BUF_ALIGNMENT) >
- PAGE_SIZE);
- efx->rx_scatter = true;
- efx->rx_dma_len = EFX_RX_USR_BUF_SIZE;
- efx->rx_buffer_order = 0;
- } else {
- efx->rx_scatter = false;
- efx->rx_buffer_order = get_order(rx_buf_len);
- }
-
- efx_rx_config_page_split(efx);
- if (efx->rx_buffer_order)
- netif_dbg(efx, drv, efx->net_dev,
- "RX buf len=%u; page order=%u batch=%u\n",
- efx->rx_dma_len, efx->rx_buffer_order,
- efx->rx_pages_per_batch);
- else
- netif_dbg(efx, drv, efx->net_dev,
- "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
- efx->rx_dma_len, efx->rx_page_buf_step,
- efx->rx_bufs_per_page, efx->rx_pages_per_batch);
-
- /* Restore previously fixed features in hw_features and remove
- * features which are fixed now
- */
- efx->net_dev->hw_features |= efx->net_dev->features;
- efx->net_dev->hw_features &= ~efx->fixed_features;
- efx->net_dev->features |= efx->fixed_features;
- if (efx->net_dev->features != old_features)
- netdev_features_change(efx->net_dev);
-
- /* RX filters may also have scatter-enabled flags */
- if (efx->rx_scatter != old_rx_scatter)
- efx->type->filter_update_rx_scatter(efx);
-
- /* We must keep at least one descriptor in a TX ring empty.
- * We could avoid this when the queue size does not exactly
- * match the hardware ring size, but it's not that important.
- * Therefore we stop the queue when one more skb might fill
- * the ring completely. We wake it when half way back to
- * empty.
- */
- efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
- efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
-
- /* Initialise the channels */
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- efx_init_tx_queue(tx_queue);
- atomic_inc(&efx->active_queues);
- }
-
- efx_for_each_channel_rx_queue(rx_queue, channel) {
- efx_init_rx_queue(rx_queue);
- atomic_inc(&efx->active_queues);
- efx_stop_eventq(channel);
- efx_fast_push_rx_descriptors(rx_queue, false);
- efx_start_eventq(channel);
- }
-
- WARN_ON(channel->rx_pkt_n_frags);
- }
-
- efx_ptp_start_datapath(efx);
-
- if (netif_device_present(efx->net_dev))
- netif_tx_wake_all_queues(efx->net_dev);
-}
-
-static void efx_stop_datapath(struct efx_nic *efx)
-{
- struct efx_channel *channel;
- struct efx_tx_queue *tx_queue;
- struct efx_rx_queue *rx_queue;
- int rc;
-
- EFX_ASSERT_RESET_SERIALISED(efx);
- BUG_ON(efx->port_enabled);
-
- efx_ptp_stop_datapath(efx);
-
- /* Stop RX refill */
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_rx_queue(rx_queue, channel)
- rx_queue->refill_enabled = false;
- }
-
- efx_for_each_channel(channel, efx) {
- /* RX packet processing is pipelined, so wait for the
- * NAPI handler to complete. At least event queue 0
- * might be kept active by non-data events, so don't
- * use napi_synchronize() but actually disable NAPI
- * temporarily.
- */
- if (efx_channel_has_rx_queue(channel)) {
- efx_stop_eventq(channel);
- efx_start_eventq(channel);
- }
- }
-
- rc = efx->type->fini_dmaq(efx);
- if (rc) {
- netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
- } else {
- netif_dbg(efx, drv, efx->net_dev,
- "successfully flushed all queues\n");
- }
-
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_rx_queue(rx_queue, channel)
- efx_fini_rx_queue(rx_queue);
- efx_for_each_possible_channel_tx_queue(tx_queue, channel)
- efx_fini_tx_queue(tx_queue);
- }
- efx->xdp_rxq_info_failed = false;
-}
-
-static void efx_remove_channel(struct efx_channel *channel)
-{
- struct efx_tx_queue *tx_queue;
- struct efx_rx_queue *rx_queue;
-
- netif_dbg(channel->efx, drv, channel->efx->net_dev,
- "destroy chan %d\n", channel->channel);
-
- efx_for_each_channel_rx_queue(rx_queue, channel)
- efx_remove_rx_queue(rx_queue);
- efx_for_each_possible_channel_tx_queue(tx_queue, channel)
- efx_remove_tx_queue(tx_queue);
- efx_remove_eventq(channel);
- channel->type->post_remove(channel);
-}
-
-static void efx_remove_channels(struct efx_nic *efx)
-{
- struct efx_channel *channel;
-
- efx_for_each_channel(channel, efx)
- efx_remove_channel(channel);
-
- kfree(efx->xdp_tx_queues);
-}
-
-int
-efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
-{
- struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
- u32 old_rxq_entries, old_txq_entries;
- unsigned i, next_buffer_table = 0;
- int rc, rc2;
-
- rc = efx_check_disabled(efx);
- if (rc)
- return rc;
-
- /* Not all channels should be reallocated. We must avoid
- * reallocating their buffer table entries.
- */
- efx_for_each_channel(channel, efx) {
- struct efx_rx_queue *rx_queue;
- struct efx_tx_queue *tx_queue;
-
- if (channel->type->copy)
- continue;
- next_buffer_table = max(next_buffer_table,
- channel->eventq.index +
- channel->eventq.entries);
- efx_for_each_channel_rx_queue(rx_queue, channel)
- next_buffer_table = max(next_buffer_table,
- rx_queue->rxd.index +
- rx_queue->rxd.entries);
- efx_for_each_channel_tx_queue(tx_queue, channel)
- next_buffer_table = max(next_buffer_table,
- tx_queue->txd.index +
- tx_queue->txd.entries);
- }
-
- efx_device_detach_sync(efx);
- efx_stop_all(efx);
- efx_soft_disable_interrupts(efx);
-
- /* Clone channels (where possible) */
- memset(other_channel, 0, sizeof(other_channel));
- for (i = 0; i < efx->n_channels; i++) {
- channel = efx->channel[i];
- if (channel->type->copy)
- channel = channel->type->copy(channel);
- if (!channel) {
- rc = -ENOMEM;
- goto out;
- }
- other_channel[i] = channel;
- }
-
- /* Swap entry counts and channel pointers */
- old_rxq_entries = efx->rxq_entries;
- old_txq_entries = efx->txq_entries;
- efx->rxq_entries = rxq_entries;
- efx->txq_entries = txq_entries;
- for (i = 0; i < efx->n_channels; i++) {
- channel = efx->channel[i];
- efx->channel[i] = other_channel[i];
- other_channel[i] = channel;
- }
-
- /* Restart buffer table allocation */
- efx->next_buffer_table = next_buffer_table;
-
- for (i = 0; i < efx->n_channels; i++) {
- channel = efx->channel[i];
- if (!channel->type->copy)
- continue;
- rc = efx_probe_channel(channel);
- if (rc)
- goto rollback;
- efx_init_napi_channel(efx->channel[i]);
- }
-
-out:
- /* Destroy unused channel structures */
- for (i = 0; i < efx->n_channels; i++) {
- channel = other_channel[i];
- if (channel && channel->type->copy) {
- efx_fini_napi_channel(channel);
- efx_remove_channel(channel);
- kfree(channel);
- }
- }
-
- rc2 = efx_soft_enable_interrupts(efx);
- if (rc2) {
- rc = rc ? rc : rc2;
- netif_err(efx, drv, efx->net_dev,
- "unable to restart interrupts on channel reallocation\n");
- efx_schedule_reset(efx, RESET_TYPE_DISABLE);
- } else {
- efx_start_all(efx);
- efx_device_attach_if_not_resetting(efx);
- }
- return rc;
-
-rollback:
- /* Swap back */
- efx->rxq_entries = old_rxq_entries;
- efx->txq_entries = old_txq_entries;
- for (i = 0; i < efx->n_channels; i++) {
- channel = efx->channel[i];
- efx->channel[i] = other_channel[i];
- other_channel[i] = channel;
- }
- goto out;
-}
-
-void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
-{
- mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10));
-}
-
-static bool efx_default_channel_want_txqs(struct efx_channel *channel)
-{
- return channel->channel - channel->efx->tx_channel_offset <
- channel->efx->n_tx_channels;
-}
-
-static const struct efx_channel_type efx_default_channel_type = {
- .pre_probe = efx_channel_dummy_op_int,
- .post_remove = efx_channel_dummy_op_void,
- .get_name = efx_get_channel_name,
- .copy = efx_copy_channel,
- .want_txqs = efx_default_channel_want_txqs,
- .keep_eventq = false,
- .want_pio = true,
-};
-
-int efx_channel_dummy_op_int(struct efx_channel *channel)
-{
- return 0;
-}
-
-void efx_channel_dummy_op_void(struct efx_channel *channel)
-{
-}
-
/**************************************************************************
*
* Port handling
*
**************************************************************************/
-/* This ensures that the kernel is kept informed (via
- * netif_carrier_on/off) of the link status, and also maintains the
- * link status's stop on the port's TX queue.
- */
-void efx_link_status_changed(struct efx_nic *efx)
-{
- struct efx_link_state *link_state = &efx->link_state;
-
- /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
- * that no events are triggered between unregister_netdev() and the
- * driver unloading. A more general condition is that NETDEV_CHANGE
- * can only be generated between NETDEV_UP and NETDEV_DOWN */
- if (!netif_running(efx->net_dev))
- return;
-
- if (link_state->up != netif_carrier_ok(efx->net_dev)) {
- efx->n_link_state_changes++;
-
- if (link_state->up)
- netif_carrier_on(efx->net_dev);
- else
- netif_carrier_off(efx->net_dev);
- }
-
- /* Status message for kernel log */
- if (link_state->up)
- netif_info(efx, link, efx->net_dev,
- "link up at %uMbps %s-duplex (MTU %d)\n",
- link_state->speed, link_state->fd ? "full" : "half",
- efx->net_dev->mtu);
- else
- netif_info(efx, link, efx->net_dev, "link down\n");
-}
-
-void efx_link_set_advertising(struct efx_nic *efx,
- const unsigned long *advertising)
-{
- memcpy(efx->link_advertising, advertising,
- sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
-
- efx->link_advertising[0] |= ADVERTISED_Autoneg;
- if (advertising[0] & ADVERTISED_Pause)
- efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
- else
- efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
- if (advertising[0] & ADVERTISED_Asym_Pause)
- efx->wanted_fc ^= EFX_FC_TX;
-}
-
/* Equivalent to efx_link_set_advertising with all-zeroes, except does not
* force the Autoneg bit on.
*/
@@ -1035,73 +160,6 @@ void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
static void efx_fini_port(struct efx_nic *efx);
-/* We assume that efx->type->reconfigure_mac will always try to sync RX
- * filters and therefore needs to read-lock the filter table against freeing
- */
-void efx_mac_reconfigure(struct efx_nic *efx)
-{
- down_read(&efx->filter_sem);
- efx->type->reconfigure_mac(efx);
- up_read(&efx->filter_sem);
-}
-
-/* Push loopback/power/transmit disable settings to the PHY, and reconfigure
- * the MAC appropriately. All other PHY configuration changes are pushed
- * through phy_op->set_settings(), and pushed asynchronously to the MAC
- * through efx_monitor().
- *
- * Callers must hold the mac_lock
- */
-int __efx_reconfigure_port(struct efx_nic *efx)
-{
- enum efx_phy_mode phy_mode;
- int rc;
-
- WARN_ON(!mutex_is_locked(&efx->mac_lock));
-
- /* Disable PHY transmit in mac level loopbacks */
- phy_mode = efx->phy_mode;
- if (LOOPBACK_INTERNAL(efx))
- efx->phy_mode |= PHY_MODE_TX_DISABLED;
- else
- efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
-
- rc = efx->type->reconfigure_port(efx);
-
- if (rc)
- efx->phy_mode = phy_mode;
-
- return rc;
-}
-
-/* Reinitialise the MAC to pick up new PHY settings, even if the port is
- * disabled. */
-int efx_reconfigure_port(struct efx_nic *efx)
-{
- int rc;
-
- EFX_ASSERT_RESET_SERIALISED(efx);
-
- mutex_lock(&efx->mac_lock);
- rc = __efx_reconfigure_port(efx);
- mutex_unlock(&efx->mac_lock);
-
- return rc;
-}
-
-/* Asynchronous work item for changing MAC promiscuity and multicast
- * hash. Avoid a drain/rx_ingress enable by reconfiguring the current
- * MAC directly. */
-static void efx_mac_work(struct work_struct *data)
-{
- struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
-
- mutex_lock(&efx->mac_lock);
- if (efx->port_enabled)
- efx_mac_reconfigure(efx);
- mutex_unlock(&efx->mac_lock);
-}
-
static int efx_probe_port(struct efx_nic *efx)
{
int rc;
@@ -1155,44 +213,6 @@ fail1:
return rc;
}
-static void efx_start_port(struct efx_nic *efx)
-{
- netif_dbg(efx, ifup, efx->net_dev, "start port\n");
- BUG_ON(efx->port_enabled);
-
- mutex_lock(&efx->mac_lock);
- efx->port_enabled = true;
-
- /* Ensure MAC ingress/egress is enabled */
- efx_mac_reconfigure(efx);
-
- mutex_unlock(&efx->mac_lock);
-}
-
-/* Cancel work for MAC reconfiguration, periodic hardware monitoring
- * and the async self-test, wait for them to finish and prevent them
- * being scheduled again. This doesn't cover online resets, which
- * should only be cancelled when removing the device.
- */
-static void efx_stop_port(struct efx_nic *efx)
-{
- netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
-
- EFX_ASSERT_RESET_SERIALISED(efx);
-
- mutex_lock(&efx->mac_lock);
- efx->port_enabled = false;
- mutex_unlock(&efx->mac_lock);
-
- /* Serialise against efx_set_multicast_list() */
- netif_addr_lock_bh(efx->net_dev);
- netif_addr_unlock_bh(efx->net_dev);
-
- cancel_delayed_work_sync(&efx->monitor_work);
- efx_selftest_async_cancel(efx);
- cancel_work_sync(&efx->mac_work);
-}
-
static void efx_fini_port(struct efx_nic *efx)
{
netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
@@ -1291,583 +311,6 @@ static void efx_dissociate(struct efx_nic *efx)
}
}
-/* This configures the PCI device to enable I/O and DMA. */
-static int efx_init_io(struct efx_nic *efx)
-{
- struct pci_dev *pci_dev = efx->pci_dev;
- dma_addr_t dma_mask = efx->type->max_dma_mask;
- unsigned int mem_map_size = efx->type->mem_map_size(efx);
- int rc, bar;
-
- netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
-
- bar = efx->type->mem_bar(efx);
-
- rc = pci_enable_device(pci_dev);
- if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "failed to enable PCI device\n");
- goto fail1;
- }
-
- pci_set_master(pci_dev);
-
- /* Set the PCI DMA mask. Try all possibilities from our genuine mask
- * down to 32 bits, because some architectures will allow 40 bit
- * masks event though they reject 46 bit masks.
- */
- while (dma_mask > 0x7fffffffUL) {
- rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
- if (rc == 0)
- break;
- dma_mask >>= 1;
- }
- if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "could not find a suitable DMA mask\n");
- goto fail2;
- }
- netif_dbg(efx, probe, efx->net_dev,
- "using DMA mask %llx\n", (unsigned long long) dma_mask);
-
- efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
- rc = pci_request_region(pci_dev, bar, "sfc");
- if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "request for memory BAR failed\n");
- rc = -EIO;
- goto fail3;
- }
- efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size);
- if (!efx->membase) {
- netif_err(efx, probe, efx->net_dev,
- "could not map memory BAR at %llx+%x\n",
- (unsigned long long)efx->membase_phys, mem_map_size);
- rc = -ENOMEM;
- goto fail4;
- }
- netif_dbg(efx, probe, efx->net_dev,
- "memory BAR at %llx+%x (virtual %p)\n",
- (unsigned long long)efx->membase_phys, mem_map_size,
- efx->membase);
-
- return 0;
-
- fail4:
- pci_release_region(efx->pci_dev, bar);
- fail3:
- efx->membase_phys = 0;
- fail2:
- pci_disable_device(efx->pci_dev);
- fail1:
- return rc;
-}
-
-static void efx_fini_io(struct efx_nic *efx)
-{
- int bar;
-
- netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
-
- if (efx->membase) {
- iounmap(efx->membase);
- efx->membase = NULL;
- }
-
- if (efx->membase_phys) {
- bar = efx->type->mem_bar(efx);
- pci_release_region(efx->pci_dev, bar);
- efx->membase_phys = 0;
- }
-
- /* Don't disable bus-mastering if VFs are assigned */
- if (!pci_vfs_assigned(efx->pci_dev))
- pci_disable_device(efx->pci_dev);
-}
-
-void efx_set_default_rx_indir_table(struct efx_nic *efx,
- struct efx_rss_context *ctx)
-{
- size_t i;
-
- for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
- ctx->rx_indir_table[i] =
- ethtool_rxfh_indir_default(i, efx->rss_spread);
-}
-
-static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
-{
- cpumask_var_t thread_mask;
- unsigned int count;
- int cpu;
-
- if (rss_cpus) {
- count = rss_cpus;
- } else {
- if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
- netif_warn(efx, probe, efx->net_dev,
- "RSS disabled due to allocation failure\n");
- return 1;
- }
-
- count = 0;
- for_each_online_cpu(cpu) {
- if (!cpumask_test_cpu(cpu, thread_mask)) {
- ++count;
- cpumask_or(thread_mask, thread_mask,
- topology_sibling_cpumask(cpu));
- }
- }
-
- free_cpumask_var(thread_mask);
- }
-
- if (count > EFX_MAX_RX_QUEUES) {
- netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
- "Reducing number of rx queues from %u to %u.\n",
- count, EFX_MAX_RX_QUEUES);
- count = EFX_MAX_RX_QUEUES;
- }
-
- /* If RSS is requested for the PF *and* VFs then we can't write RSS
- * table entries that are inaccessible to VFs
- */
-#ifdef CONFIG_SFC_SRIOV
- if (efx->type->sriov_wanted) {
- if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
- count > efx_vf_size(efx)) {
- netif_warn(efx, probe, efx->net_dev,
- "Reducing number of RSS channels from %u to %u for "
- "VF support. Increase vf-msix-limit to use more "
- "channels on the PF.\n",
- count, efx_vf_size(efx));
- count = efx_vf_size(efx);
- }
- }
-#endif
-
- return count;
-}
-
-static int efx_allocate_msix_channels(struct efx_nic *efx,
- unsigned int max_channels,
- unsigned int extra_channels,
- unsigned int parallelism)
-{
- unsigned int n_channels = parallelism;
- int vec_count;
- int n_xdp_tx;
- int n_xdp_ev;
-
- if (efx_separate_tx_channels)
- n_channels *= 2;
- n_channels += extra_channels;
-
- /* To allow XDP transmit to happen from arbitrary NAPI contexts
- * we allocate a TX queue per CPU. We share event queues across
- * multiple tx queues, assuming tx and ev queues are both
- * maximum size.
- */
-
- n_xdp_tx = num_possible_cpus();
- n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, EFX_TXQ_TYPES);
-
- /* Check resources.
- * We need a channel per event queue, plus a VI per tx queue.
- * This may be more pessimistic than it needs to be.
- */
- if (n_channels + n_xdp_ev > max_channels) {
- netif_err(efx, drv, efx->net_dev,
- "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
- n_xdp_ev, n_channels, max_channels);
- efx->n_xdp_channels = 0;
- efx->xdp_tx_per_channel = 0;
- efx->xdp_tx_queue_count = 0;
- } else {
- efx->n_xdp_channels = n_xdp_ev;
- efx->xdp_tx_per_channel = EFX_TXQ_TYPES;
- efx->xdp_tx_queue_count = n_xdp_tx;
- n_channels += n_xdp_ev;
- netif_dbg(efx, drv, efx->net_dev,
- "Allocating %d TX and %d event queues for XDP\n",
- n_xdp_tx, n_xdp_ev);
- }
-
- n_channels = min(n_channels, max_channels);
-
- vec_count = pci_msix_vec_count(efx->pci_dev);
- if (vec_count < 0)
- return vec_count;
- if (vec_count < n_channels) {
- netif_err(efx, drv, efx->net_dev,
- "WARNING: Insufficient MSI-X vectors available (%d < %u).\n",
- vec_count, n_channels);
- netif_err(efx, drv, efx->net_dev,
- "WARNING: Performance may be reduced.\n");
- n_channels = vec_count;
- }
-
- efx->n_channels = n_channels;
-
- /* Do not create the PTP TX queue(s) if PTP uses the MC directly. */
- if (extra_channels && !efx_ptp_use_mac_tx_timestamps(efx))
- n_channels--;
-
- /* Ignore XDP tx channels when creating rx channels. */
- n_channels -= efx->n_xdp_channels;
-
- if (efx_separate_tx_channels) {
- efx->n_tx_channels =
- min(max(n_channels / 2, 1U),
- efx->max_tx_channels);
- efx->tx_channel_offset =
- n_channels - efx->n_tx_channels;
- efx->n_rx_channels =
- max(n_channels -
- efx->n_tx_channels, 1U);
- } else {
- efx->n_tx_channels = min(n_channels, efx->max_tx_channels);
- efx->tx_channel_offset = 0;
- efx->n_rx_channels = n_channels;
- }
-
- if (efx->n_xdp_channels)
- efx->xdp_channel_offset = efx->tx_channel_offset +
- efx->n_tx_channels;
- else
- efx->xdp_channel_offset = efx->n_channels;
-
- netif_dbg(efx, drv, efx->net_dev,
- "Allocating %u RX channels\n",
- efx->n_rx_channels);
-
- return efx->n_channels;
-}
-
-/* Probe the number and type of interrupts we are able to obtain, and
- * the resulting numbers of channels and RX queues.
- */
-static int efx_probe_interrupts(struct efx_nic *efx)
-{
- unsigned int extra_channels = 0;
- unsigned int i, j;
- int rc;
-
- for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
- if (efx->extra_channel_type[i])
- ++extra_channels;
-
- if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
- unsigned int parallelism = efx_wanted_parallelism(efx);
- struct msix_entry xentries[EFX_MAX_CHANNELS];
- unsigned int n_channels;
-
- rc = efx_allocate_msix_channels(efx, efx->max_channels,
- extra_channels, parallelism);
- if (rc >= 0) {
- n_channels = rc;
- for (i = 0; i < n_channels; i++)
- xentries[i].entry = i;
- rc = pci_enable_msix_range(efx->pci_dev, xentries, 1,
- n_channels);
- }
- if (rc < 0) {
- /* Fall back to single channel MSI */
- netif_err(efx, drv, efx->net_dev,
- "could not enable MSI-X\n");
- if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI)
- efx->interrupt_mode = EFX_INT_MODE_MSI;
- else
- return rc;
- } else if (rc < n_channels) {
- netif_err(efx, drv, efx->net_dev,
- "WARNING: Insufficient MSI-X vectors"
- " available (%d < %u).\n", rc, n_channels);
- netif_err(efx, drv, efx->net_dev,
- "WARNING: Performance may be reduced.\n");
- n_channels = rc;
- }
-
- if (rc > 0) {
- for (i = 0; i < efx->n_channels; i++)
- efx_get_channel(efx, i)->irq =
- xentries[i].vector;
- }
- }
-
- /* Try single interrupt MSI */
- if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
- efx->n_channels = 1;
- efx->n_rx_channels = 1;
- efx->n_tx_channels = 1;
- efx->n_xdp_channels = 0;
- efx->xdp_channel_offset = efx->n_channels;
- rc = pci_enable_msi(efx->pci_dev);
- if (rc == 0) {
- efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
- } else {
- netif_err(efx, drv, efx->net_dev,
- "could not enable MSI\n");
- if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY)
- efx->interrupt_mode = EFX_INT_MODE_LEGACY;
- else
- return rc;
- }
- }
-
- /* Assume legacy interrupts */
- if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
- efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
- efx->n_rx_channels = 1;
- efx->n_tx_channels = 1;
- efx->n_xdp_channels = 0;
- efx->xdp_channel_offset = efx->n_channels;
- efx->legacy_irq = efx->pci_dev->irq;
- }
-
- /* Assign extra channels if possible, before XDP channels */
- efx->n_extra_tx_channels = 0;
- j = efx->xdp_channel_offset;
- for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
- if (!efx->extra_channel_type[i])
- continue;
- if (efx->interrupt_mode != EFX_INT_MODE_MSIX ||
- efx->n_channels <= extra_channels) {
- efx->extra_channel_type[i]->handle_no_channel(efx);
- } else {
- --j;
- efx_get_channel(efx, j)->type =
- efx->extra_channel_type[i];
- if (efx_channel_has_tx_queues(efx_get_channel(efx, j)))
- efx->n_extra_tx_channels++;
- }
- }
-
- /* RSS might be usable on VFs even if it is disabled on the PF */
-#ifdef CONFIG_SFC_SRIOV
- if (efx->type->sriov_wanted) {
- efx->rss_spread = ((efx->n_rx_channels > 1 ||
- !efx->type->sriov_wanted(efx)) ?
- efx->n_rx_channels : efx_vf_size(efx));
- return 0;
- }
-#endif
- efx->rss_spread = efx->n_rx_channels;
-
- return 0;
-}
-
-#if defined(CONFIG_SMP)
-static void efx_set_interrupt_affinity(struct efx_nic *efx)
-{
- struct efx_channel *channel;
- unsigned int cpu;
-
- efx_for_each_channel(channel, efx) {
- cpu = cpumask_local_spread(channel->channel,
- pcibus_to_node(efx->pci_dev->bus));
- irq_set_affinity_hint(channel->irq, cpumask_of(cpu));
- }
-}
-
-static void efx_clear_interrupt_affinity(struct efx_nic *efx)
-{
- struct efx_channel *channel;
-
- efx_for_each_channel(channel, efx)
- irq_set_affinity_hint(channel->irq, NULL);
-}
-#else
-static void
-efx_set_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
-{
-}
-
-static void
-efx_clear_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
-{
-}
-#endif /* CONFIG_SMP */
-
-static int efx_soft_enable_interrupts(struct efx_nic *efx)
-{
- struct efx_channel *channel, *end_channel;
- int rc;
-
- BUG_ON(efx->state == STATE_DISABLED);
-
- efx->irq_soft_enabled = true;
- smp_wmb();
-
- efx_for_each_channel(channel, efx) {
- if (!channel->type->keep_eventq) {
- rc = efx_init_eventq(channel);
- if (rc)
- goto fail;
- }
- efx_start_eventq(channel);
- }
-
- efx_mcdi_mode_event(efx);
-
- return 0;
-fail:
- end_channel = channel;
- efx_for_each_channel(channel, efx) {
- if (channel == end_channel)
- break;
- efx_stop_eventq(channel);
- if (!channel->type->keep_eventq)
- efx_fini_eventq(channel);
- }
-
- return rc;
-}
-
-static void efx_soft_disable_interrupts(struct efx_nic *efx)
-{
- struct efx_channel *channel;
-
- if (efx->state == STATE_DISABLED)
- return;
-
- efx_mcdi_mode_poll(efx);
-
- efx->irq_soft_enabled = false;
- smp_wmb();
-
- if (efx->legacy_irq)
- synchronize_irq(efx->legacy_irq);
-
- efx_for_each_channel(channel, efx) {
- if (channel->irq)
- synchronize_irq(channel->irq);
-
- efx_stop_eventq(channel);
- if (!channel->type->keep_eventq)
- efx_fini_eventq(channel);
- }
-
- /* Flush the asynchronous MCDI request queue */
- efx_mcdi_flush_async(efx);
-}
-
-static int efx_enable_interrupts(struct efx_nic *efx)
-{
- struct efx_channel *channel, *end_channel;
- int rc;
-
- BUG_ON(efx->state == STATE_DISABLED);
-
- if (efx->eeh_disabled_legacy_irq) {
- enable_irq(efx->legacy_irq);
- efx->eeh_disabled_legacy_irq = false;
- }
-
- efx->type->irq_enable_master(efx);
-
- efx_for_each_channel(channel, efx) {
- if (channel->type->keep_eventq) {
- rc = efx_init_eventq(channel);
- if (rc)
- goto fail;
- }
- }
-
- rc = efx_soft_enable_interrupts(efx);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- end_channel = channel;
- efx_for_each_channel(channel, efx) {
- if (channel == end_channel)
- break;
- if (channel->type->keep_eventq)
- efx_fini_eventq(channel);
- }
-
- efx->type->irq_disable_non_ev(efx);
-
- return rc;
-}
-
-static void efx_disable_interrupts(struct efx_nic *efx)
-{
- struct efx_channel *channel;
-
- efx_soft_disable_interrupts(efx);
-
- efx_for_each_channel(channel, efx) {
- if (channel->type->keep_eventq)
- efx_fini_eventq(channel);
- }
-
- efx->type->irq_disable_non_ev(efx);
-}
-
-static void efx_remove_interrupts(struct efx_nic *efx)
-{
- struct efx_channel *channel;
-
- /* Remove MSI/MSI-X interrupts */
- efx_for_each_channel(channel, efx)
- channel->irq = 0;
- pci_disable_msi(efx->pci_dev);
- pci_disable_msix(efx->pci_dev);
-
- /* Remove legacy interrupt */
- efx->legacy_irq = 0;
-}
-
-static int efx_set_channels(struct efx_nic *efx)
-{
- struct efx_channel *channel;
- struct efx_tx_queue *tx_queue;
- int xdp_queue_number;
-
- efx->tx_channel_offset =
- efx_separate_tx_channels ?
- efx->n_channels - efx->n_tx_channels : 0;
-
- if (efx->xdp_tx_queue_count) {
- EFX_WARN_ON_PARANOID(efx->xdp_tx_queues);
-
- /* Allocate array for XDP TX queue lookup. */
- efx->xdp_tx_queues = kcalloc(efx->xdp_tx_queue_count,
- sizeof(*efx->xdp_tx_queues),
- GFP_KERNEL);
- if (!efx->xdp_tx_queues)
- return -ENOMEM;
- }
-
- /* We need to mark which channels really have RX and TX
- * queues, and adjust the TX queue numbers if we have separate
- * RX-only and TX-only channels.
- */
- xdp_queue_number = 0;
- efx_for_each_channel(channel, efx) {
- if (channel->channel < efx->n_rx_channels)
- channel->rx_queue.core_index = channel->channel;
- else
- channel->rx_queue.core_index = -1;
-
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- tx_queue->queue -= (efx->tx_channel_offset *
- EFX_TXQ_TYPES);
-
- if (efx_channel_is_xdp_tx(channel) &&
- xdp_queue_number < efx->xdp_tx_queue_count) {
- efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
- xdp_queue_number++;
- }
- }
- }
- return 0;
-}
-
static int efx_probe_nic(struct efx_nic *efx)
{
int rc;
@@ -1940,70 +383,6 @@ static void efx_remove_nic(struct efx_nic *efx)
efx->type->remove(efx);
}
-static int efx_probe_filters(struct efx_nic *efx)
-{
- int rc;
-
- init_rwsem(&efx->filter_sem);
- mutex_lock(&efx->mac_lock);
- down_write(&efx->filter_sem);
- rc = efx->type->filter_table_probe(efx);
- if (rc)
- goto out_unlock;
-
-#ifdef CONFIG_RFS_ACCEL
- if (efx->type->offload_features & NETIF_F_NTUPLE) {
- struct efx_channel *channel;
- int i, success = 1;
-
- efx_for_each_channel(channel, efx) {
- channel->rps_flow_id =
- kcalloc(efx->type->max_rx_ip_filters,
- sizeof(*channel->rps_flow_id),
- GFP_KERNEL);
- if (!channel->rps_flow_id)
- success = 0;
- else
- for (i = 0;
- i < efx->type->max_rx_ip_filters;
- ++i)
- channel->rps_flow_id[i] =
- RPS_FLOW_ID_INVALID;
- channel->rfs_expire_index = 0;
- channel->rfs_filter_count = 0;
- }
-
- if (!success) {
- efx_for_each_channel(channel, efx)
- kfree(channel->rps_flow_id);
- efx->type->filter_table_remove(efx);
- rc = -ENOMEM;
- goto out_unlock;
- }
- }
-#endif
-out_unlock:
- up_write(&efx->filter_sem);
- mutex_unlock(&efx->mac_lock);
- return rc;
-}
-
-static void efx_remove_filters(struct efx_nic *efx)
-{
-#ifdef CONFIG_RFS_ACCEL
- struct efx_channel *channel;
-
- efx_for_each_channel(channel, efx) {
- cancel_delayed_work_sync(&channel->filter_work);
- kfree(channel->rps_flow_id);
- }
-#endif
- down_write(&efx->filter_sem);
- efx->type->filter_table_remove(efx);
- up_write(&efx->filter_sem);
-}
-
-
/**************************************************************************
*
* NIC startup/shutdown
@@ -2068,81 +447,6 @@ static int efx_probe_all(struct efx_nic *efx)
return rc;
}
-/* If the interface is supposed to be running but is not, start
- * the hardware and software data path, regular activity for the port
- * (MAC statistics, link polling, etc.) and schedule the port to be
- * reconfigured. Interrupts must already be enabled. This function
- * is safe to call multiple times, so long as the NIC is not disabled.
- * Requires the RTNL lock.
- */
-static void efx_start_all(struct efx_nic *efx)
-{
- EFX_ASSERT_RESET_SERIALISED(efx);
- BUG_ON(efx->state == STATE_DISABLED);
-
- /* Check that it is appropriate to restart the interface. All
- * of these flags are safe to read under just the rtnl lock */
- if (efx->port_enabled || !netif_running(efx->net_dev) ||
- efx->reset_pending)
- return;
-
- efx_start_port(efx);
- efx_start_datapath(efx);
-
- /* Start the hardware monitor if there is one */
- if (efx->type->monitor != NULL)
- queue_delayed_work(efx->workqueue, &efx->monitor_work,
- efx_monitor_interval);
-
- /* Link state detection is normally event-driven; we have
- * to poll now because we could have missed a change
- */
- mutex_lock(&efx->mac_lock);
- if (efx->phy_op->poll(efx))
- efx_link_status_changed(efx);
- mutex_unlock(&efx->mac_lock);
-
- efx->type->start_stats(efx);
- efx->type->pull_stats(efx);
- spin_lock_bh(&efx->stats_lock);
- efx->type->update_stats(efx, NULL, NULL);
- spin_unlock_bh(&efx->stats_lock);
-}
-
-/* Quiesce the hardware and software data path, and regular activity
- * for the port without bringing the link down. Safe to call multiple
- * times with the NIC in almost any state, but interrupts should be
- * enabled. Requires the RTNL lock.
- */
-static void efx_stop_all(struct efx_nic *efx)
-{
- EFX_ASSERT_RESET_SERIALISED(efx);
-
- /* port_enabled can be read safely under the rtnl lock */
- if (!efx->port_enabled)
- return;
-
- /* update stats before we go down so we can accurately count
- * rx_nodesc_drops
- */
- efx->type->pull_stats(efx);
- spin_lock_bh(&efx->stats_lock);
- efx->type->update_stats(efx, NULL, NULL);
- spin_unlock_bh(&efx->stats_lock);
- efx->type->stop_stats(efx);
- efx_stop_port(efx);
-
- /* Stop the kernel transmit interface. This is only valid if
- * the device is stopped or detached; otherwise the watchdog
- * may fire immediately.
- */
- WARN_ON(netif_running(efx->net_dev) &&
- netif_device_present(efx->net_dev));
- netif_tx_disable(efx->net_dev);
-
- efx_stop_datapath(efx);
-}
-
static void efx_remove_all(struct efx_nic *efx)
{
rtnl_lock();
@@ -2238,36 +542,6 @@ void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
/**************************************************************************
*
- * Hardware monitor
- *
- **************************************************************************/
-
-/* Run periodically off the general workqueue */
-static void efx_monitor(struct work_struct *data)
-{
- struct efx_nic *efx = container_of(data, struct efx_nic,
- monitor_work.work);
-
- netif_vdbg(efx, timer, efx->net_dev,
- "hardware monitor executing on CPU %d\n",
- raw_smp_processor_id());
- BUG_ON(efx->type->monitor == NULL);
-
- /* If the mac_lock is already held then it is likely a port
- * reconfiguration is already in place, which will likely do
- * most of the work of monitor() anyway. */
- if (mutex_trylock(&efx->mac_lock)) {
- if (efx->port_enabled)
- efx->type->monitor(efx);
- mutex_unlock(&efx->mac_lock);
- }
-
- queue_delayed_work(efx->workqueue, &efx->monitor_work,
- efx_monitor_interval);
-}
-
-/**************************************************************************
- *
* ioctls
*
*************************************************************************/
@@ -2295,45 +569,6 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
/**************************************************************************
*
- * NAPI interface
- *
- **************************************************************************/
-
-static void efx_init_napi_channel(struct efx_channel *channel)
-{
- struct efx_nic *efx = channel->efx;
-
- channel->napi_dev = efx->net_dev;
- netif_napi_add(channel->napi_dev, &channel->napi_str,
- efx_poll, napi_weight);
-}
-
-static void efx_init_napi(struct efx_nic *efx)
-{
- struct efx_channel *channel;
-
- efx_for_each_channel(channel, efx)
- efx_init_napi_channel(channel);
-}
-
-static void efx_fini_napi_channel(struct efx_channel *channel)
-{
- if (channel->napi_dev)
- netif_napi_del(&channel->napi_str);
-
- channel->napi_dev = NULL;
-}
-
-static void efx_fini_napi(struct efx_nic *efx)
-{
- struct efx_channel *channel;
-
- efx_for_each_channel(channel, efx)
- efx_fini_napi_channel(channel);
-}
-
-/**************************************************************************
- *
* Kernel net device interface
*
*************************************************************************/
@@ -2383,19 +618,8 @@ int efx_net_stop(struct net_device *net_dev)
return 0;
}
-/* Context: process, dev_base_lock or RTNL held, non-blocking. */
-static void efx_net_stats(struct net_device *net_dev,
- struct rtnl_link_stats64 *stats)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
-
- spin_lock_bh(&efx->stats_lock);
- efx->type->update_stats(efx, NULL, stats);
- spin_unlock_bh(&efx->stats_lock);
-}
-
/* Context: netif_tx_lock held, BHs disabled. */
-static void efx_watchdog(struct net_device *net_dev)
+static void efx_watchdog(struct net_device *net_dev, unsigned int txqueue)
{
struct efx_nic *efx = netdev_priv(net_dev);
@@ -2406,51 +630,6 @@ static void efx_watchdog(struct net_device *net_dev)
efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
}
-static unsigned int efx_xdp_max_mtu(struct efx_nic *efx)
-{
- /* The maximum MTU that we can fit in a single page, allowing for
- * framing, overhead and XDP headroom.
- */
- int overhead = EFX_MAX_FRAME_LEN(0) + sizeof(struct efx_rx_page_state) +
- efx->rx_prefix_size + efx->type->rx_buffer_padding +
- efx->rx_ip_align + XDP_PACKET_HEADROOM;
-
- return PAGE_SIZE - overhead;
-}
-
-/* Context: process, rtnl_lock() held. */
-static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- int rc;
-
- rc = efx_check_disabled(efx);
- if (rc)
- return rc;
-
- if (rtnl_dereference(efx->xdp_prog) &&
- new_mtu > efx_xdp_max_mtu(efx)) {
- netif_err(efx, drv, efx->net_dev,
- "Requested MTU of %d too big for XDP (max: %d)\n",
- new_mtu, efx_xdp_max_mtu(efx));
- return -EINVAL;
- }
-
- netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
-
- efx_device_detach_sync(efx);
- efx_stop_all(efx);
-
- mutex_lock(&efx->mac_lock);
- net_dev->mtu = new_mtu;
- efx_mac_reconfigure(efx);
- mutex_unlock(&efx->mac_lock);
-
- efx_start_all(efx);
- efx_device_attach_if_not_resetting(efx);
- return 0;
-}
-
static int efx_set_mac_address(struct net_device *net_dev, void *data)
{
struct efx_nic *efx = netdev_priv(net_dev);
@@ -2727,28 +906,6 @@ show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
-#ifdef CONFIG_SFC_MCDI_LOGGING
-static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct efx_nic *efx = dev_get_drvdata(dev);
- struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
-
- return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled);
-}
-static ssize_t set_mcdi_log(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct efx_nic *efx = dev_get_drvdata(dev);
- struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- bool enable = count > 0 && *buf != '0';
-
- mcdi->logging_enabled = enable;
- return count;
-}
-static DEVICE_ATTR(mcdi_logging, 0644, show_mcdi_log, set_mcdi_log);
-#endif
-
static int efx_register_netdev(struct efx_nic *efx)
{
struct net_device *net_dev = efx->net_dev;
@@ -2808,21 +965,11 @@ static int efx_register_netdev(struct efx_nic *efx)
"failed to init net dev attributes\n");
goto fail_registered;
}
-#ifdef CONFIG_SFC_MCDI_LOGGING
- rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
- if (rc) {
- netif_err(efx, drv, efx->net_dev,
- "failed to init net dev attributes\n");
- goto fail_attr_mcdi_logging;
- }
-#endif
+
+ efx_init_mcdi_logging(efx);
return 0;
-#ifdef CONFIG_SFC_MCDI_LOGGING
-fail_attr_mcdi_logging:
- device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
-#endif
fail_registered:
rtnl_lock();
efx_dissociate(efx);
@@ -2843,9 +990,7 @@ static void efx_unregister_netdev(struct efx_nic *efx)
if (efx_dev_registered(efx)) {
strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
-#ifdef CONFIG_SFC_MCDI_LOGGING
- device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
-#endif
+ efx_fini_mcdi_logging(efx);
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
unregister_netdev(efx->net_dev);
}
@@ -2853,292 +998,6 @@ static void efx_unregister_netdev(struct efx_nic *efx)
/**************************************************************************
*
- * Device reset and suspend
- *
- **************************************************************************/
-
-/* Tears down the entire software state and most of the hardware state
- * before reset. */
-void efx_reset_down(struct efx_nic *efx, enum reset_type method)
-{
- EFX_ASSERT_RESET_SERIALISED(efx);
-
- if (method == RESET_TYPE_MCDI_TIMEOUT)
- efx->type->prepare_flr(efx);
-
- efx_stop_all(efx);
- efx_disable_interrupts(efx);
-
- mutex_lock(&efx->mac_lock);
- down_write(&efx->filter_sem);
- mutex_lock(&efx->rss_lock);
- if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
- method != RESET_TYPE_DATAPATH)
- efx->phy_op->fini(efx);
- efx->type->fini(efx);
-}
-
-/* This function will always ensure that the locks acquired in
- * efx_reset_down() are released. A failure return code indicates
- * that we were unable to reinitialise the hardware, and the
- * driver should be disabled. If ok is false, then the rx and tx
- * engines are not restarted, pending a RESET_DISABLE. */
-int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
-{
- int rc;
-
- EFX_ASSERT_RESET_SERIALISED(efx);
-
- if (method == RESET_TYPE_MCDI_TIMEOUT)
- efx->type->finish_flr(efx);
-
- /* Ensure that SRAM is initialised even if we're disabling the device */
- rc = efx->type->init(efx);
- if (rc) {
- netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
- goto fail;
- }
-
- if (!ok)
- goto fail;
-
- if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
- method != RESET_TYPE_DATAPATH) {
- rc = efx->phy_op->init(efx);
- if (rc)
- goto fail;
- rc = efx->phy_op->reconfigure(efx);
- if (rc && rc != -EPERM)
- netif_err(efx, drv, efx->net_dev,
- "could not restore PHY settings\n");
- }
-
- rc = efx_enable_interrupts(efx);
- if (rc)
- goto fail;
-
-#ifdef CONFIG_SFC_SRIOV
- rc = efx->type->vswitching_restore(efx);
- if (rc) /* not fatal; the PF will still work fine */
- netif_warn(efx, probe, efx->net_dev,
- "failed to restore vswitching rc=%d;"
- " VFs may not function\n", rc);
-#endif
-
- if (efx->type->rx_restore_rss_contexts)
- efx->type->rx_restore_rss_contexts(efx);
- mutex_unlock(&efx->rss_lock);
- efx->type->filter_table_restore(efx);
- up_write(&efx->filter_sem);
- if (efx->type->sriov_reset)
- efx->type->sriov_reset(efx);
-
- mutex_unlock(&efx->mac_lock);
-
- efx_start_all(efx);
-
- if (efx->type->udp_tnl_push_ports)
- efx->type->udp_tnl_push_ports(efx);
-
- return 0;
-
-fail:
- efx->port_initialized = false;
-
- mutex_unlock(&efx->rss_lock);
- up_write(&efx->filter_sem);
- mutex_unlock(&efx->mac_lock);
-
- return rc;
-}
-
-/* Reset the NIC using the specified method. Note that the reset may
- * fail, in which case the card will be left in an unusable state.
- *
- * Caller must hold the rtnl_lock.
- */
-int efx_reset(struct efx_nic *efx, enum reset_type method)
-{
- int rc, rc2;
- bool disabled;
-
- netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
- RESET_TYPE(method));
-
- efx_device_detach_sync(efx);
- efx_reset_down(efx, method);
-
- rc = efx->type->reset(efx, method);
- if (rc) {
- netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
- goto out;
- }
-
- /* Clear flags for the scopes we covered. We assume the NIC and
- * driver are now quiescent so that there is no race here.
- */
- if (method < RESET_TYPE_MAX_METHOD)
- efx->reset_pending &= -(1 << (method + 1));
- else /* it doesn't fit into the well-ordered scope hierarchy */
- __clear_bit(method, &efx->reset_pending);
-
- /* Reinitialise bus-mastering, which may have been turned off before
- * the reset was scheduled. This is still appropriate, even in the
- * RESET_TYPE_DISABLE since this driver generally assumes the hardware
- * can respond to requests. */
- pci_set_master(efx->pci_dev);
-
-out:
- /* Leave device stopped if necessary */
- disabled = rc ||
- method == RESET_TYPE_DISABLE ||
- method == RESET_TYPE_RECOVER_OR_DISABLE;
- rc2 = efx_reset_up(efx, method, !disabled);
- if (rc2) {
- disabled = true;
- if (!rc)
- rc = rc2;
- }
-
- if (disabled) {
- dev_close(efx->net_dev);
- netif_err(efx, drv, efx->net_dev, "has been disabled\n");
- efx->state = STATE_DISABLED;
- } else {
- netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
- efx_device_attach_if_not_resetting(efx);
- }
- return rc;
-}
-
-/* Try recovery mechanisms.
- * For now only EEH is supported.
- * Returns 0 if the recovery mechanisms are unsuccessful.
- * Returns a non-zero value otherwise.
- */
-int efx_try_recovery(struct efx_nic *efx)
-{
-#ifdef CONFIG_EEH
- /* A PCI error can occur and not be seen by EEH because nothing
- * happens on the PCI bus. In this case the driver may fail and
- * schedule a 'recover or reset', leading to this recovery handler.
- * Manually call the eeh failure check function.
- */
- struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev);
- if (eeh_dev_check_failure(eehdev)) {
- /* The EEH mechanisms will handle the error and reset the
- * device if necessary.
- */
- return 1;
- }
-#endif
- return 0;
-}
-
-static void efx_wait_for_bist_end(struct efx_nic *efx)
-{
- int i;
-
- for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) {
- if (efx_mcdi_poll_reboot(efx))
- goto out;
- msleep(BIST_WAIT_DELAY_MS);
- }
-
- netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n");
-out:
- /* Either way unset the BIST flag. If we found no reboot we probably
- * won't recover, but we should try.
- */
- efx->mc_bist_for_other_fn = false;
-}
-
-/* The worker thread exists so that code that cannot sleep can
- * schedule a reset for later.
- */
-static void efx_reset_work(struct work_struct *data)
-{
- struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
- unsigned long pending;
- enum reset_type method;
-
- pending = READ_ONCE(efx->reset_pending);
- method = fls(pending) - 1;
-
- if (method == RESET_TYPE_MC_BIST)
- efx_wait_for_bist_end(efx);
-
- if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
- method == RESET_TYPE_RECOVER_OR_ALL) &&
- efx_try_recovery(efx))
- return;
-
- if (!pending)
- return;
-
- rtnl_lock();
-
- /* We checked the state in efx_schedule_reset() but it may
- * have changed by now. Now that we have the RTNL lock,
- * it cannot change again.
- */
- if (efx->state == STATE_READY)
- (void)efx_reset(efx, method);
-
- rtnl_unlock();
-}
-
-void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
-{
- enum reset_type method;
-
- if (efx->state == STATE_RECOVERY) {
- netif_dbg(efx, drv, efx->net_dev,
- "recovering: skip scheduling %s reset\n",
- RESET_TYPE(type));
- return;
- }
-
- switch (type) {
- case RESET_TYPE_INVISIBLE:
- case RESET_TYPE_ALL:
- case RESET_TYPE_RECOVER_OR_ALL:
- case RESET_TYPE_WORLD:
- case RESET_TYPE_DISABLE:
- case RESET_TYPE_RECOVER_OR_DISABLE:
- case RESET_TYPE_DATAPATH:
- case RESET_TYPE_MC_BIST:
- case RESET_TYPE_MCDI_TIMEOUT:
- method = type;
- netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
- RESET_TYPE(method));
- break;
- default:
- method = efx->type->map_reset_reason(type);
- netif_dbg(efx, drv, efx->net_dev,
- "scheduling %s reset for %s\n",
- RESET_TYPE(method), RESET_TYPE(type));
- break;
- }
-
- set_bit(method, &efx->reset_pending);
- smp_mb(); /* ensure we change reset_pending before checking state */
-
- /* If we're not READY then just leave the flags set as the cue
- * to abort probing or reschedule the reset later.
- */
- if (READ_ONCE(efx->state) != STATE_READY)
- return;
-
- /* efx_process_channel() will no longer read events once a
- * reset is scheduled. So switch back to poll'd MCDI completions. */
- efx_mcdi_mode_poll(efx);
-
- queue_work(reset_workqueue, &efx->reset_work);
-}
-
-/**************************************************************************
- *
* List of NICs we support
*
**************************************************************************/
@@ -3170,139 +1029,10 @@ static const struct pci_device_id efx_pci_table[] = {
/**************************************************************************
*
- * Dummy PHY/MAC operations
- *
- * Can be used for some unimplemented operations
- * Needed so all function pointers are valid and do not have to be tested
- * before use
- *
- **************************************************************************/
-int efx_port_dummy_op_int(struct efx_nic *efx)
-{
- return 0;
-}
-void efx_port_dummy_op_void(struct efx_nic *efx) {}
-
-static bool efx_port_dummy_op_poll(struct efx_nic *efx)
-{
- return false;
-}
-
-static const struct efx_phy_operations efx_dummy_phy_operations = {
- .init = efx_port_dummy_op_int,
- .reconfigure = efx_port_dummy_op_int,
- .poll = efx_port_dummy_op_poll,
- .fini = efx_port_dummy_op_void,
-};
-
-/**************************************************************************
- *
* Data housekeeping
*
**************************************************************************/
-/* This zeroes out and then fills in the invariants in a struct
- * efx_nic (including all sub-structures).
- */
-static int efx_init_struct(struct efx_nic *efx,
- struct pci_dev *pci_dev, struct net_device *net_dev)
-{
- int rc = -ENOMEM, i;
-
- /* Initialise common structures */
- INIT_LIST_HEAD(&efx->node);
- INIT_LIST_HEAD(&efx->secondary_list);
- spin_lock_init(&efx->biu_lock);
-#ifdef CONFIG_SFC_MTD
- INIT_LIST_HEAD(&efx->mtd_list);
-#endif
- INIT_WORK(&efx->reset_work, efx_reset_work);
- INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
- INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
- efx->pci_dev = pci_dev;
- efx->msg_enable = debug;
- efx->state = STATE_UNINIT;
- strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
-
- efx->net_dev = net_dev;
- efx->rx_prefix_size = efx->type->rx_prefix_size;
- efx->rx_ip_align =
- NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
- efx->rx_packet_hash_offset =
- efx->type->rx_hash_offset - efx->type->rx_prefix_size;
- efx->rx_packet_ts_offset =
- efx->type->rx_ts_offset - efx->type->rx_prefix_size;
- INIT_LIST_HEAD(&efx->rss_context.list);
- mutex_init(&efx->rss_lock);
- spin_lock_init(&efx->stats_lock);
- efx->vi_stride = EFX_DEFAULT_VI_STRIDE;
- efx->num_mac_stats = MC_CMD_MAC_NSTATS;
- BUILD_BUG_ON(MC_CMD_MAC_NSTATS - 1 != MC_CMD_MAC_GENERATION_END);
- mutex_init(&efx->mac_lock);
-#ifdef CONFIG_RFS_ACCEL
- mutex_init(&efx->rps_mutex);
- spin_lock_init(&efx->rps_hash_lock);
- /* Failure to allocate is not fatal, but may degrade ARFS performance */
- efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE,
- sizeof(*efx->rps_hash_table), GFP_KERNEL);
-#endif
- efx->phy_op = &efx_dummy_phy_operations;
- efx->mdio.dev = net_dev;
- INIT_WORK(&efx->mac_work, efx_mac_work);
- init_waitqueue_head(&efx->flush_wq);
-
- for (i = 0; i < EFX_MAX_CHANNELS; i++) {
- efx->channel[i] = efx_alloc_channel(efx, i, NULL);
- if (!efx->channel[i])
- goto fail;
- efx->msi_context[i].efx = efx;
- efx->msi_context[i].index = i;
- }
-
- /* Higher numbered interrupt modes are less capable! */
- if (WARN_ON_ONCE(efx->type->max_interrupt_mode >
- efx->type->min_interrupt_mode)) {
- rc = -EIO;
- goto fail;
- }
- efx->interrupt_mode = max(efx->type->max_interrupt_mode,
- interrupt_mode);
- efx->interrupt_mode = min(efx->type->min_interrupt_mode,
- interrupt_mode);
-
- /* Would be good to use the net_dev name, but we're too early */
- snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
- pci_name(pci_dev));
- efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
- if (!efx->workqueue)
- goto fail;
-
- return 0;
-
-fail:
- efx_fini_struct(efx);
- return rc;
-}
-
-static void efx_fini_struct(struct efx_nic *efx)
-{
- int i;
-
-#ifdef CONFIG_RFS_ACCEL
- kfree(efx->rps_hash_table);
-#endif
-
- for (i = 0; i < EFX_MAX_CHANNELS; i++)
- kfree(efx->channel[i]);
-
- kfree(efx->vpd_sn);
-
- if (efx->workqueue) {
- destroy_workqueue(efx->workqueue);
- efx->workqueue = NULL;
- }
-}
-
void efx_update_sw_stats(struct efx_nic *efx, u64 *stats)
{
u64 n_rx_nodesc_trunc = 0;
@@ -3314,197 +1044,6 @@ void efx_update_sw_stats(struct efx_nic *efx, u64 *stats)
stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops);
}
-bool efx_filter_spec_equal(const struct efx_filter_spec *left,
- const struct efx_filter_spec *right)
-{
- if ((left->match_flags ^ right->match_flags) |
- ((left->flags ^ right->flags) &
- (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
- return false;
-
- return memcmp(&left->outer_vid, &right->outer_vid,
- sizeof(struct efx_filter_spec) -
- offsetof(struct efx_filter_spec, outer_vid)) == 0;
-}
-
-u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
-{
- BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
- return jhash2((const u32 *)&spec->outer_vid,
- (sizeof(struct efx_filter_spec) -
- offsetof(struct efx_filter_spec, outer_vid)) / 4,
- 0);
-}
-
-#ifdef CONFIG_RFS_ACCEL
-bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
- bool *force)
-{
- if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
- /* ARFS is currently updating this entry, leave it */
- return false;
- }
- if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
- /* ARFS tried and failed to update this, so it's probably out
- * of date. Remove the filter and the ARFS rule entry.
- */
- rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
- *force = true;
- return true;
- } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
- /* ARFS has moved on, so old filter is not needed. Since we did
- * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
- * not be removed by efx_rps_hash_del() subsequently.
- */
- *force = true;
- return true;
- }
- /* Remove it iff ARFS wants to. */
- return true;
-}
-
-static
-struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
- const struct efx_filter_spec *spec)
-{
- u32 hash = efx_filter_spec_hash(spec);
-
- lockdep_assert_held(&efx->rps_hash_lock);
- if (!efx->rps_hash_table)
- return NULL;
- return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
-}
-
-struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
- const struct efx_filter_spec *spec)
-{
- struct efx_arfs_rule *rule;
- struct hlist_head *head;
- struct hlist_node *node;
-
- head = efx_rps_hash_bucket(efx, spec);
- if (!head)
- return NULL;
- hlist_for_each(node, head) {
- rule = container_of(node, struct efx_arfs_rule, node);
- if (efx_filter_spec_equal(spec, &rule->spec))
- return rule;
- }
- return NULL;
-}
-
-struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
- const struct efx_filter_spec *spec,
- bool *new)
-{
- struct efx_arfs_rule *rule;
- struct hlist_head *head;
- struct hlist_node *node;
-
- head = efx_rps_hash_bucket(efx, spec);
- if (!head)
- return NULL;
- hlist_for_each(node, head) {
- rule = container_of(node, struct efx_arfs_rule, node);
- if (efx_filter_spec_equal(spec, &rule->spec)) {
- *new = false;
- return rule;
- }
- }
- rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
- *new = true;
- if (rule) {
- memcpy(&rule->spec, spec, sizeof(rule->spec));
- hlist_add_head(&rule->node, head);
- }
- return rule;
-}
-
-void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
-{
- struct efx_arfs_rule *rule;
- struct hlist_head *head;
- struct hlist_node *node;
-
- head = efx_rps_hash_bucket(efx, spec);
- if (WARN_ON(!head))
- return;
- hlist_for_each(node, head) {
- rule = container_of(node, struct efx_arfs_rule, node);
- if (efx_filter_spec_equal(spec, &rule->spec)) {
- /* Someone already reused the entry. We know that if
- * this check doesn't fire (i.e. filter_id == REMOVING)
- * then the REMOVING mark was put there by our caller,
- * because caller is holding a lock on filter table and
- * only holders of that lock set REMOVING.
- */
- if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
- return;
- hlist_del(node);
- kfree(rule);
- return;
- }
- }
- /* We didn't find it. */
- WARN_ON(1);
-}
-#endif
-
-/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because
- * (a) this is an infrequent control-plane operation and (b) n is small (max 64)
- */
-struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx)
-{
- struct list_head *head = &efx->rss_context.list;
- struct efx_rss_context *ctx, *new;
- u32 id = 1; /* Don't use zero, that refers to the master RSS context */
-
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
-
- /* Search for first gap in the numbering */
- list_for_each_entry(ctx, head, list) {
- if (ctx->user_id != id)
- break;
- id++;
- /* Check for wrap. If this happens, we have nearly 2^32
- * allocated RSS contexts, which seems unlikely.
- */
- if (WARN_ON_ONCE(!id))
- return NULL;
- }
-
- /* Create the new entry */
- new = kmalloc(sizeof(struct efx_rss_context), GFP_KERNEL);
- if (!new)
- return NULL;
- new->context_id = EFX_EF10_RSS_CONTEXT_INVALID;
- new->rx_hash_udp_4tuple = false;
-
- /* Insert the new entry into the gap */
- new->user_id = id;
- list_add_tail(&new->list, &ctx->list);
- return new;
-}
-
-struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id)
-{
- struct list_head *head = &efx->rss_context.list;
- struct efx_rss_context *ctx;
-
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
-
- list_for_each_entry(ctx, head, list)
- if (ctx->user_id == id)
- return ctx;
- return NULL;
-}
-
-void efx_free_rss_context_entry(struct efx_rss_context *ctx)
-{
- list_del(&ctx->list);
- kfree(ctx);
-}
-
/**************************************************************************
*
* PCI interface
@@ -3520,7 +1059,7 @@ static void efx_pci_remove_main(struct efx_nic *efx)
* are not READY.
*/
BUG_ON(efx->state == STATE_READY);
- cancel_work_sync(&efx->reset_work);
+ efx_flush_reset_workqueue(efx);
efx_disable_interrupts(efx);
efx_clear_interrupt_affinity(efx);
@@ -3560,7 +1099,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
efx_pci_remove_main(efx);
- efx_fini_io(efx);
+ efx_fini_io(efx, efx->type->mem_bar(efx));
netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
efx_fini_struct(efx);
@@ -3783,7 +1322,8 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
efx_probe_vpd_strings(efx);
/* Set up basic I/O (BAR mappings etc) */
- rc = efx_init_io(efx);
+ rc = efx_init_io(efx, efx->type->mem_bar(efx), efx->type->max_dma_mask,
+ efx->type->mem_map_size(efx));
if (rc)
goto fail2;
@@ -3827,7 +1367,7 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
return 0;
fail3:
- efx_fini_io(efx);
+ efx_fini_io(efx, efx->type->mem_bar(efx));
fail2:
efx_fini_struct(efx);
fail1:
@@ -3905,7 +1445,7 @@ static int efx_pm_thaw(struct device *dev)
rtnl_unlock();
/* Reschedule any quenched resets scheduled during efx_pm_freeze() */
- queue_work(reset_workqueue, &efx->reset_work);
+ efx_queue_reset_work(efx);
return 0;
@@ -4084,10 +1624,6 @@ static struct pci_driver efx_pci_driver = {
*
*************************************************************************/
-module_param(interrupt_mode, uint, 0444);
-MODULE_PARM_DESC(interrupt_mode,
- "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
-
static int __init efx_init_module(void)
{
int rc;
@@ -4104,11 +1640,9 @@ static int __init efx_init_module(void)
goto err_sriov;
#endif
- reset_workqueue = create_singlethread_workqueue("sfc_reset");
- if (!reset_workqueue) {
- rc = -ENOMEM;
+ rc = efx_create_reset_workqueue();
+ if (rc)
goto err_reset;
- }
rc = pci_register_driver(&efx_pci_driver);
if (rc < 0)
@@ -4117,7 +1651,7 @@ static int __init efx_init_module(void)
return 0;
err_pci:
- destroy_workqueue(reset_workqueue);
+ efx_destroy_reset_workqueue();
err_reset:
#ifdef CONFIG_SFC_SRIOV
efx_fini_sriov();
@@ -4133,7 +1667,7 @@ static void __exit efx_exit_module(void)
printk(KERN_INFO "Solarflare NET driver unloading\n");
pci_unregister_driver(&efx_pci_driver);
- destroy_workqueue(reset_workqueue);
+ efx_destroy_reset_workqueue();
#ifdef CONFIG_SFC_SRIOV
efx_fini_sriov();
#endif
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 2dd8d5002315..f1bdb04efbe4 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -15,31 +15,17 @@ int efx_net_open(struct net_device *net_dev);
int efx_net_stop(struct net_device *net_dev);
/* TX */
-int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
-void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
-void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
-void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
struct net_device *net_dev);
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
void *type_data);
-unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
extern unsigned int efx_piobuf_size;
extern bool efx_separate_tx_channels;
/* RX */
-void efx_set_default_rx_indir_table(struct efx_nic *efx,
- struct efx_rss_context *ctx);
-void efx_rx_config_page_split(struct efx_nic *efx);
-int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
-void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
-void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
-void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
-void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic);
-void efx_rx_slow_fill(struct timer_list *t);
void __efx_rx_packet(struct efx_channel *channel);
void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
unsigned int n_frags, unsigned int len, u16 flags);
@@ -48,7 +34,6 @@ static inline void efx_rx_flush_packet(struct efx_channel *channel)
if (channel->rx_pkt_n_frags)
__efx_rx_packet(channel);
}
-void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
#define EFX_MAX_DMAQ_SIZE 4096UL
#define EFX_DEFAULT_DMAQ_SIZE 1024UL
@@ -80,8 +65,6 @@ static inline bool efx_rss_enabled(struct efx_nic *efx)
/* Filters */
-void efx_mac_reconfigure(struct efx_nic *efx);
-
/**
* efx_filter_insert_filter - add or replace a filter
* @efx: NIC in which to insert the filter
@@ -186,58 +169,17 @@ static inline void efx_filter_rfs_expire(struct work_struct *data)
static inline void efx_filter_rfs_expire(struct work_struct *data) {}
#define efx_filter_rfs_enabled() 0
#endif
-bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
-
-bool efx_filter_spec_equal(const struct efx_filter_spec *left,
- const struct efx_filter_spec *right);
-u32 efx_filter_spec_hash(const struct efx_filter_spec *spec);
-
-#ifdef CONFIG_RFS_ACCEL
-bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
- bool *force);
-
-struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
- const struct efx_filter_spec *spec);
-
-/* @new is written to indicate if entry was newly added (true) or if an old
- * entry was found and returned (false).
- */
-struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
- const struct efx_filter_spec *spec,
- bool *new);
-
-void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec);
-#endif
/* RSS contexts */
-struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx);
-struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id);
-void efx_free_rss_context_entry(struct efx_rss_context *ctx);
static inline bool efx_rss_active(struct efx_rss_context *ctx)
{
- return ctx->context_id != EFX_EF10_RSS_CONTEXT_INVALID;
+ return ctx->context_id != EFX_MCDI_RSS_CONTEXT_INVALID;
}
-/* Channels */
-int efx_channel_dummy_op_int(struct efx_channel *channel);
-void efx_channel_dummy_op_void(struct efx_channel *channel);
-int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
-
-/* Ports */
-int efx_reconfigure_port(struct efx_nic *efx);
-int __efx_reconfigure_port(struct efx_nic *efx);
-
/* Ethtool support */
extern const struct ethtool_ops efx_ethtool_ops;
-/* Reset handling */
-int efx_reset(struct efx_nic *efx, enum reset_type method);
-void efx_reset_down(struct efx_nic *efx, enum reset_type method);
-int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
-int efx_try_recovery(struct efx_nic *efx);
-
/* Global */
-void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs);
unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks);
int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
@@ -245,8 +187,6 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
bool rx_may_override_tx);
void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
unsigned int *rx_usecs, bool *rx_adaptive);
-void efx_stop_eventq(struct efx_channel *channel);
-void efx_start_eventq(struct efx_channel *channel);
/* Dummy PHY ops for PHY drivers */
int efx_port_dummy_op_int(struct efx_nic *efx);
@@ -293,9 +233,6 @@ static inline void efx_schedule_channel_irq(struct efx_channel *channel)
efx_schedule_channel(channel);
}
-void efx_link_status_changed(struct efx_nic *efx);
-void efx_link_set_advertising(struct efx_nic *efx,
- const unsigned long *advertising);
void efx_link_clear_advertising(struct efx_nic *efx);
void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
new file mode 100644
index 000000000000..aeb5e8aa2f2a
--- /dev/null
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -0,0 +1,1234 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include <linux/module.h>
+#include "efx_channels.h"
+#include "efx.h"
+#include "efx_common.h"
+#include "tx_common.h"
+#include "rx_common.h"
+#include "nic.h"
+#include "sriov.h"
+
+/* This is the first interrupt mode to try out of:
+ * 0 => MSI-X
+ * 1 => MSI
+ * 2 => legacy
+ */
+static unsigned int interrupt_mode;
+module_param(interrupt_mode, uint, 0444);
+MODULE_PARM_DESC(interrupt_mode,
+ "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
+
+/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
+ * i.e. the number of CPUs among which we may distribute simultaneous
+ * interrupt handling.
+ *
+ * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
+ * The default (0) means to assign an interrupt to each core.
+ */
+static unsigned int rss_cpus;
+module_param(rss_cpus, uint, 0444);
+MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
+
+static unsigned int irq_adapt_low_thresh = 8000;
+module_param(irq_adapt_low_thresh, uint, 0644);
+MODULE_PARM_DESC(irq_adapt_low_thresh,
+ "Threshold score for reducing IRQ moderation");
+
+static unsigned int irq_adapt_high_thresh = 16000;
+module_param(irq_adapt_high_thresh, uint, 0644);
+MODULE_PARM_DESC(irq_adapt_high_thresh,
+ "Threshold score for increasing IRQ moderation");
+
+/* This is the weight assigned to each of the (per-channel) virtual
+ * NAPI devices.
+ */
+static int napi_weight = 64;
+
+/***************
+ * Housekeeping
+ ***************/
+
+int efx_channel_dummy_op_int(struct efx_channel *channel)
+{
+ return 0;
+}
+
+void efx_channel_dummy_op_void(struct efx_channel *channel)
+{
+}
+
+static const struct efx_channel_type efx_default_channel_type = {
+ .pre_probe = efx_channel_dummy_op_int,
+ .post_remove = efx_channel_dummy_op_void,
+ .get_name = efx_get_channel_name,
+ .copy = efx_copy_channel,
+ .want_txqs = efx_default_channel_want_txqs,
+ .keep_eventq = false,
+ .want_pio = true,
+};
+
+/*************
+ * INTERRUPTS
+ *************/
+
+static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
+{
+ cpumask_var_t thread_mask;
+ unsigned int count;
+ int cpu;
+
+ if (rss_cpus) {
+ count = rss_cpus;
+ } else {
+ if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
+ netif_warn(efx, probe, efx->net_dev,
+ "RSS disabled due to allocation failure\n");
+ return 1;
+ }
+
+ count = 0;
+ for_each_online_cpu(cpu) {
+ if (!cpumask_test_cpu(cpu, thread_mask)) {
+ ++count;
+ cpumask_or(thread_mask, thread_mask,
+ topology_sibling_cpumask(cpu));
+ }
+ }
+
+ free_cpumask_var(thread_mask);
+ }
+
+ if (count > EFX_MAX_RX_QUEUES) {
+ netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
+ "Reducing number of rx queues from %u to %u.\n",
+ count, EFX_MAX_RX_QUEUES);
+ count = EFX_MAX_RX_QUEUES;
+ }
+
+ /* If RSS is requested for the PF *and* VFs then we can't write RSS
+ * table entries that are inaccessible to VFs
+ */
+#ifdef CONFIG_SFC_SRIOV
+ if (efx->type->sriov_wanted) {
+ if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
+ count > efx_vf_size(efx)) {
+ netif_warn(efx, probe, efx->net_dev,
+ "Reducing number of RSS channels from %u to %u for "
+ "VF support. Increase vf-msix-limit to use more "
+ "channels on the PF.\n",
+ count, efx_vf_size(efx));
+ count = efx_vf_size(efx);
+ }
+ }
+#endif
+
+ return count;
+}
+
+static int efx_allocate_msix_channels(struct efx_nic *efx,
+ unsigned int max_channels,
+ unsigned int extra_channels,
+ unsigned int parallelism)
+{
+ unsigned int n_channels = parallelism;
+ int vec_count;
+ int n_xdp_tx;
+ int n_xdp_ev;
+
+ if (efx_separate_tx_channels)
+ n_channels *= 2;
+ n_channels += extra_channels;
+
+ /* To allow XDP transmit to happen from arbitrary NAPI contexts
+ * we allocate a TX queue per CPU. We share event queues across
+ * multiple tx queues, assuming tx and ev queues are both
+ * maximum size.
+ */
+
+ n_xdp_tx = num_possible_cpus();
+ n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, EFX_TXQ_TYPES);
+
+ vec_count = pci_msix_vec_count(efx->pci_dev);
+ if (vec_count < 0)
+ return vec_count;
+
+ max_channels = min_t(unsigned int, vec_count, max_channels);
+
+ /* Check resources.
+ * We need a channel per event queue, plus a VI per tx queue.
+ * This may be more pessimistic than it needs to be.
+ */
+ if (n_channels + n_xdp_ev > max_channels) {
+ netif_err(efx, drv, efx->net_dev,
+ "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
+ n_xdp_ev, n_channels, max_channels);
+ efx->n_xdp_channels = 0;
+ efx->xdp_tx_per_channel = 0;
+ efx->xdp_tx_queue_count = 0;
+ } else {
+ efx->n_xdp_channels = n_xdp_ev;
+ efx->xdp_tx_per_channel = EFX_TXQ_TYPES;
+ efx->xdp_tx_queue_count = n_xdp_tx;
+ n_channels += n_xdp_ev;
+ netif_dbg(efx, drv, efx->net_dev,
+ "Allocating %d TX and %d event queues for XDP\n",
+ n_xdp_tx, n_xdp_ev);
+ }
+
+ if (vec_count < n_channels) {
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Insufficient MSI-X vectors available (%d < %u).\n",
+ vec_count, n_channels);
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Performance may be reduced.\n");
+ n_channels = vec_count;
+ }
+
+ n_channels = min(n_channels, max_channels);
+
+ efx->n_channels = n_channels;
+
+ /* Ignore XDP tx channels when creating rx channels. */
+ n_channels -= efx->n_xdp_channels;
+
+ if (efx_separate_tx_channels) {
+ efx->n_tx_channels =
+ min(max(n_channels / 2, 1U),
+ efx->max_tx_channels);
+ efx->tx_channel_offset =
+ n_channels - efx->n_tx_channels;
+ efx->n_rx_channels =
+ max(n_channels -
+ efx->n_tx_channels, 1U);
+ } else {
+ efx->n_tx_channels = min(n_channels, efx->max_tx_channels);
+ efx->tx_channel_offset = 0;
+ efx->n_rx_channels = n_channels;
+ }
+
+ efx->n_rx_channels = min(efx->n_rx_channels, parallelism);
+ efx->n_tx_channels = min(efx->n_tx_channels, parallelism);
+
+ efx->xdp_channel_offset = n_channels;
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "Allocating %u RX channels\n",
+ efx->n_rx_channels);
+
+ return efx->n_channels;
+}
+
+/* Probe the number and type of interrupts we are able to obtain, and
+ * the resulting numbers of channels and RX queues.
+ */
+int efx_probe_interrupts(struct efx_nic *efx)
+{
+ unsigned int extra_channels = 0;
+ unsigned int rss_spread;
+ unsigned int i, j;
+ int rc;
+
+ for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
+ if (efx->extra_channel_type[i])
+ ++extra_channels;
+
+ if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
+ unsigned int parallelism = efx_wanted_parallelism(efx);
+ struct msix_entry xentries[EFX_MAX_CHANNELS];
+ unsigned int n_channels;
+
+ rc = efx_allocate_msix_channels(efx, efx->max_channels,
+ extra_channels, parallelism);
+ if (rc >= 0) {
+ n_channels = rc;
+ for (i = 0; i < n_channels; i++)
+ xentries[i].entry = i;
+ rc = pci_enable_msix_range(efx->pci_dev, xentries, 1,
+ n_channels);
+ }
+ if (rc < 0) {
+ /* Fall back to single channel MSI */
+ netif_err(efx, drv, efx->net_dev,
+ "could not enable MSI-X\n");
+ if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI)
+ efx->interrupt_mode = EFX_INT_MODE_MSI;
+ else
+ return rc;
+ } else if (rc < n_channels) {
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Insufficient MSI-X vectors"
+ " available (%d < %u).\n", rc, n_channels);
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Performance may be reduced.\n");
+ n_channels = rc;
+ }
+
+ if (rc > 0) {
+ for (i = 0; i < efx->n_channels; i++)
+ efx_get_channel(efx, i)->irq =
+ xentries[i].vector;
+ }
+ }
+
+ /* Try single interrupt MSI */
+ if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
+ efx->n_channels = 1;
+ efx->n_rx_channels = 1;
+ efx->n_tx_channels = 1;
+ efx->n_xdp_channels = 0;
+ efx->xdp_channel_offset = efx->n_channels;
+ rc = pci_enable_msi(efx->pci_dev);
+ if (rc == 0) {
+ efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
+ } else {
+ netif_err(efx, drv, efx->net_dev,
+ "could not enable MSI\n");
+ if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY)
+ efx->interrupt_mode = EFX_INT_MODE_LEGACY;
+ else
+ return rc;
+ }
+ }
+
+ /* Assume legacy interrupts */
+ if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
+ efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
+ efx->n_rx_channels = 1;
+ efx->n_tx_channels = 1;
+ efx->n_xdp_channels = 0;
+ efx->xdp_channel_offset = efx->n_channels;
+ efx->legacy_irq = efx->pci_dev->irq;
+ }
+
+ /* Assign extra channels if possible, before XDP channels */
+ efx->n_extra_tx_channels = 0;
+ j = efx->xdp_channel_offset;
+ for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
+ if (!efx->extra_channel_type[i])
+ continue;
+ if (j <= efx->tx_channel_offset + efx->n_tx_channels) {
+ efx->extra_channel_type[i]->handle_no_channel(efx);
+ } else {
+ --j;
+ efx_get_channel(efx, j)->type =
+ efx->extra_channel_type[i];
+ if (efx_channel_has_tx_queues(efx_get_channel(efx, j)))
+ efx->n_extra_tx_channels++;
+ }
+ }
+
+ rss_spread = efx->n_rx_channels;
+ /* RSS might be usable on VFs even if it is disabled on the PF */
+#ifdef CONFIG_SFC_SRIOV
+ if (efx->type->sriov_wanted) {
+ efx->rss_spread = ((rss_spread > 1 ||
+ !efx->type->sriov_wanted(efx)) ?
+ rss_spread : efx_vf_size(efx));
+ return 0;
+ }
+#endif
+ efx->rss_spread = rss_spread;
+
+ return 0;
+}
+
+#if defined(CONFIG_SMP)
+void efx_set_interrupt_affinity(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ unsigned int cpu;
+
+ efx_for_each_channel(channel, efx) {
+ cpu = cpumask_local_spread(channel->channel,
+ pcibus_to_node(efx->pci_dev->bus));
+ irq_set_affinity_hint(channel->irq, cpumask_of(cpu));
+ }
+}
+
+void efx_clear_interrupt_affinity(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ irq_set_affinity_hint(channel->irq, NULL);
+}
+#else
+void
+efx_set_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
+{
+}
+
+void
+efx_clear_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
+{
+}
+#endif /* CONFIG_SMP */
+
+void efx_remove_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ /* Remove MSI/MSI-X interrupts */
+ efx_for_each_channel(channel, efx)
+ channel->irq = 0;
+ pci_disable_msi(efx->pci_dev);
+ pci_disable_msix(efx->pci_dev);
+
+ /* Remove legacy interrupt */
+ efx->legacy_irq = 0;
+}
+
+/***************
+ * EVENT QUEUES
+ ***************/
+
+/* Create event queue
+ * Event queue memory allocations are done only once. If the channel
+ * is reset, the memory buffer will be reused; this guards against
+ * errors during channel reset and also simplifies interrupt handling.
+ */
+int efx_probe_eventq(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned long entries;
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "chan %d create event queue\n", channel->channel);
+
+ /* Build an event queue with room for one event per tx and rx buffer,
+ * plus some extra for link state events and MCDI completions.
+ */
+ entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
+ EFX_WARN_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
+ channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
+
+ return efx_nic_probe_eventq(channel);
+}
+
+/* Prepare channel's event queue */
+int efx_init_eventq(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+ int rc;
+
+ EFX_WARN_ON_PARANOID(channel->eventq_init);
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "chan %d init event queue\n", channel->channel);
+
+ rc = efx_nic_init_eventq(channel);
+ if (rc == 0) {
+ efx->type->push_irq_moderation(channel);
+ channel->eventq_read_ptr = 0;
+ channel->eventq_init = true;
+ }
+ return rc;
+}
+
+/* Enable event queue processing and NAPI */
+void efx_start_eventq(struct efx_channel *channel)
+{
+ netif_dbg(channel->efx, ifup, channel->efx->net_dev,
+ "chan %d start event queue\n", channel->channel);
+
+ /* Make sure the NAPI handler sees the enabled flag set */
+ channel->enabled = true;
+ smp_wmb();
+
+ napi_enable(&channel->napi_str);
+ efx_nic_eventq_read_ack(channel);
+}
+
+/* Disable event queue processing and NAPI */
+void efx_stop_eventq(struct efx_channel *channel)
+{
+ if (!channel->enabled)
+ return;
+
+ napi_disable(&channel->napi_str);
+ channel->enabled = false;
+}
+
+void efx_fini_eventq(struct efx_channel *channel)
+{
+ if (!channel->eventq_init)
+ return;
+
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "chan %d fini event queue\n", channel->channel);
+
+ efx_nic_fini_eventq(channel);
+ channel->eventq_init = false;
+}
+
+void efx_remove_eventq(struct efx_channel *channel)
+{
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "chan %d remove event queue\n", channel->channel);
+
+ efx_nic_remove_eventq(channel);
+}
+
+/**************************************************************************
+ *
+ * Channel handling
+ *
+ *************************************************************************/
+
+/* Allocate and initialise a channel structure. */
+struct efx_channel *
+efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
+{
+ struct efx_rx_queue *rx_queue;
+ struct efx_tx_queue *tx_queue;
+ struct efx_channel *channel;
+ int j;
+
+ channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return NULL;
+
+ channel->efx = efx;
+ channel->channel = i;
+ channel->type = &efx_default_channel_type;
+
+ for (j = 0; j < EFX_TXQ_TYPES; j++) {
+ tx_queue = &channel->tx_queue[j];
+ tx_queue->efx = efx;
+ tx_queue->queue = i * EFX_TXQ_TYPES + j;
+ tx_queue->channel = channel;
+ }
+
+#ifdef CONFIG_RFS_ACCEL
+ INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
+#endif
+
+ rx_queue = &channel->rx_queue;
+ rx_queue->efx = efx;
+ timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
+
+ return channel;
+}
+
+int efx_init_channels(struct efx_nic *efx)
+{
+ unsigned int i;
+
+ for (i = 0; i < EFX_MAX_CHANNELS; i++) {
+ efx->channel[i] = efx_alloc_channel(efx, i, NULL);
+ if (!efx->channel[i])
+ return -ENOMEM;
+ efx->msi_context[i].efx = efx;
+ efx->msi_context[i].index = i;
+ }
+
+ /* Higher numbered interrupt modes are less capable! */
+ if (WARN_ON_ONCE(efx->type->max_interrupt_mode >
+ efx->type->min_interrupt_mode)) {
+ return -EIO;
+ }
+ efx->interrupt_mode = max(efx->type->max_interrupt_mode,
+ interrupt_mode);
+ efx->interrupt_mode = min(efx->type->min_interrupt_mode,
+ interrupt_mode);
+
+ return 0;
+}
+
+void efx_fini_channels(struct efx_nic *efx)
+{
+ unsigned int i;
+
+ for (i = 0; i < EFX_MAX_CHANNELS; i++)
+ if (efx->channel[i]) {
+ kfree(efx->channel[i]);
+ efx->channel[i] = NULL;
+ }
+}
+
+/* Allocate and initialise a channel structure, copying parameters
+ * (but not resources) from an old channel structure.
+ */
+struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel)
+{
+ struct efx_rx_queue *rx_queue;
+ struct efx_tx_queue *tx_queue;
+ struct efx_channel *channel;
+ int j;
+
+ channel = kmalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return NULL;
+
+ *channel = *old_channel;
+
+ channel->napi_dev = NULL;
+ INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
+ channel->napi_str.napi_id = 0;
+ channel->napi_str.state = 0;
+ memset(&channel->eventq, 0, sizeof(channel->eventq));
+
+ for (j = 0; j < EFX_TXQ_TYPES; j++) {
+ tx_queue = &channel->tx_queue[j];
+ if (tx_queue->channel)
+ tx_queue->channel = channel;
+ tx_queue->buffer = NULL;
+ memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
+ }
+
+ rx_queue = &channel->rx_queue;
+ rx_queue->buffer = NULL;
+ memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
+ timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
+#ifdef CONFIG_RFS_ACCEL
+ INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
+#endif
+
+ return channel;
+}
+
+static int efx_probe_channel(struct efx_channel *channel)
+{
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ int rc;
+
+ netif_dbg(channel->efx, probe, channel->efx->net_dev,
+ "creating channel %d\n", channel->channel);
+
+ rc = channel->type->pre_probe(channel);
+ if (rc)
+ goto fail;
+
+ rc = efx_probe_eventq(channel);
+ if (rc)
+ goto fail;
+
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ rc = efx_probe_tx_queue(tx_queue);
+ if (rc)
+ goto fail;
+ }
+
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ rc = efx_probe_rx_queue(rx_queue);
+ if (rc)
+ goto fail;
+ }
+
+ channel->rx_list = NULL;
+
+ return 0;
+
+fail:
+ efx_remove_channel(channel);
+ return rc;
+}
+
+void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
+{
+ struct efx_nic *efx = channel->efx;
+ const char *type;
+ int number;
+
+ number = channel->channel;
+
+ if (number >= efx->xdp_channel_offset &&
+ !WARN_ON_ONCE(!efx->n_xdp_channels)) {
+ type = "-xdp";
+ number -= efx->xdp_channel_offset;
+ } else if (efx->tx_channel_offset == 0) {
+ type = "";
+ } else if (number < efx->tx_channel_offset) {
+ type = "-rx";
+ } else {
+ type = "-tx";
+ number -= efx->tx_channel_offset;
+ }
+ snprintf(buf, len, "%s%s-%d", efx->name, type, number);
+}
+
+void efx_set_channel_names(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ channel->type->get_name(channel,
+ efx->msi_context[channel->channel].name,
+ sizeof(efx->msi_context[0].name));
+}
+
+int efx_probe_channels(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ int rc;
+
+ /* Restart special buffer allocation */
+ efx->next_buffer_table = 0;
+
+ /* Probe channels in reverse, so that any 'extra' channels
+ * use the start of the buffer table. This allows the traffic
+ * channels to be resized without moving them or wasting the
+ * entries before them.
+ */
+ efx_for_each_channel_rev(channel, efx) {
+ rc = efx_probe_channel(channel);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to create channel %d\n",
+ channel->channel);
+ goto fail;
+ }
+ }
+ efx_set_channel_names(efx);
+
+ return 0;
+
+fail:
+ efx_remove_channels(efx);
+ return rc;
+}
+
+void efx_remove_channel(struct efx_channel *channel)
+{
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "destroy chan %d\n", channel->channel);
+
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ efx_remove_rx_queue(rx_queue);
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel)
+ efx_remove_tx_queue(tx_queue);
+ efx_remove_eventq(channel);
+ channel->type->post_remove(channel);
+}
+
+void efx_remove_channels(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ efx_remove_channel(channel);
+
+ kfree(efx->xdp_tx_queues);
+}
+
+int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
+{
+ struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
+ unsigned int i, next_buffer_table = 0;
+ u32 old_rxq_entries, old_txq_entries;
+ int rc, rc2;
+
+ rc = efx_check_disabled(efx);
+ if (rc)
+ return rc;
+
+ /* Not all channels should be reallocated. We must avoid
+ * reallocating their buffer table entries.
+ */
+ efx_for_each_channel(channel, efx) {
+ struct efx_rx_queue *rx_queue;
+ struct efx_tx_queue *tx_queue;
+
+ if (channel->type->copy)
+ continue;
+ next_buffer_table = max(next_buffer_table,
+ channel->eventq.index +
+ channel->eventq.entries);
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ next_buffer_table = max(next_buffer_table,
+ rx_queue->rxd.index +
+ rx_queue->rxd.entries);
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ next_buffer_table = max(next_buffer_table,
+ tx_queue->txd.index +
+ tx_queue->txd.entries);
+ }
+
+ efx_device_detach_sync(efx);
+ efx_stop_all(efx);
+ efx_soft_disable_interrupts(efx);
+
+ /* Clone channels (where possible) */
+ memset(other_channel, 0, sizeof(other_channel));
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx->channel[i];
+ if (channel->type->copy)
+ channel = channel->type->copy(channel);
+ if (!channel) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ other_channel[i] = channel;
+ }
+
+ /* Swap entry counts and channel pointers */
+ old_rxq_entries = efx->rxq_entries;
+ old_txq_entries = efx->txq_entries;
+ efx->rxq_entries = rxq_entries;
+ efx->txq_entries = txq_entries;
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx->channel[i];
+ efx->channel[i] = other_channel[i];
+ other_channel[i] = channel;
+ }
+
+ /* Restart buffer table allocation */
+ efx->next_buffer_table = next_buffer_table;
+
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx->channel[i];
+ if (!channel->type->copy)
+ continue;
+ rc = efx_probe_channel(channel);
+ if (rc)
+ goto rollback;
+ efx_init_napi_channel(efx->channel[i]);
+ }
+
+out:
+ /* Destroy unused channel structures */
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = other_channel[i];
+ if (channel && channel->type->copy) {
+ efx_fini_napi_channel(channel);
+ efx_remove_channel(channel);
+ kfree(channel);
+ }
+ }
+
+ rc2 = efx_soft_enable_interrupts(efx);
+ if (rc2) {
+ rc = rc ? rc : rc2;
+ netif_err(efx, drv, efx->net_dev,
+ "unable to restart interrupts on channel reallocation\n");
+ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+ } else {
+ efx_start_all(efx);
+ efx_device_attach_if_not_resetting(efx);
+ }
+ return rc;
+
+rollback:
+ /* Swap back */
+ efx->rxq_entries = old_rxq_entries;
+ efx->txq_entries = old_txq_entries;
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx->channel[i];
+ efx->channel[i] = other_channel[i];
+ other_channel[i] = channel;
+ }
+ goto out;
+}
+
+int efx_set_channels(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ int xdp_queue_number;
+
+ efx->tx_channel_offset =
+ efx_separate_tx_channels ?
+ efx->n_channels - efx->n_tx_channels : 0;
+
+ if (efx->xdp_tx_queue_count) {
+ EFX_WARN_ON_PARANOID(efx->xdp_tx_queues);
+
+ /* Allocate array for XDP TX queue lookup. */
+ efx->xdp_tx_queues = kcalloc(efx->xdp_tx_queue_count,
+ sizeof(*efx->xdp_tx_queues),
+ GFP_KERNEL);
+ if (!efx->xdp_tx_queues)
+ return -ENOMEM;
+ }
+
+ /* We need to mark which channels really have RX and TX
+ * queues, and adjust the TX queue numbers if we have separate
+ * RX-only and TX-only channels.
+ */
+ xdp_queue_number = 0;
+ efx_for_each_channel(channel, efx) {
+ if (channel->channel < efx->n_rx_channels)
+ channel->rx_queue.core_index = channel->channel;
+ else
+ channel->rx_queue.core_index = -1;
+
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ tx_queue->queue -= (efx->tx_channel_offset *
+ EFX_TXQ_TYPES);
+
+ if (efx_channel_is_xdp_tx(channel) &&
+ xdp_queue_number < efx->xdp_tx_queue_count) {
+ efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
+ xdp_queue_number++;
+ }
+ }
+ }
+ return 0;
+}
+
+bool efx_default_channel_want_txqs(struct efx_channel *channel)
+{
+ return channel->channel - channel->efx->tx_channel_offset <
+ channel->efx->n_tx_channels;
+}
+
+/*************
+ * START/STOP
+ *************/
+
+int efx_soft_enable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel, *end_channel;
+ int rc;
+
+ BUG_ON(efx->state == STATE_DISABLED);
+
+ efx->irq_soft_enabled = true;
+ smp_wmb();
+
+ efx_for_each_channel(channel, efx) {
+ if (!channel->type->keep_eventq) {
+ rc = efx_init_eventq(channel);
+ if (rc)
+ goto fail;
+ }
+ efx_start_eventq(channel);
+ }
+
+ efx_mcdi_mode_event(efx);
+
+ return 0;
+fail:
+ end_channel = channel;
+ efx_for_each_channel(channel, efx) {
+ if (channel == end_channel)
+ break;
+ efx_stop_eventq(channel);
+ if (!channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ return rc;
+}
+
+void efx_soft_disable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ if (efx->state == STATE_DISABLED)
+ return;
+
+ efx_mcdi_mode_poll(efx);
+
+ efx->irq_soft_enabled = false;
+ smp_wmb();
+
+ if (efx->legacy_irq)
+ synchronize_irq(efx->legacy_irq);
+
+ efx_for_each_channel(channel, efx) {
+ if (channel->irq)
+ synchronize_irq(channel->irq);
+
+ efx_stop_eventq(channel);
+ if (!channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ /* Flush the asynchronous MCDI request queue */
+ efx_mcdi_flush_async(efx);
+}
+
+int efx_enable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel, *end_channel;
+ int rc;
+
+ /* TODO: Is this really a bug? */
+ BUG_ON(efx->state == STATE_DISABLED);
+
+ if (efx->eeh_disabled_legacy_irq) {
+ enable_irq(efx->legacy_irq);
+ efx->eeh_disabled_legacy_irq = false;
+ }
+
+ efx->type->irq_enable_master(efx);
+
+ efx_for_each_channel(channel, efx) {
+ if (channel->type->keep_eventq) {
+ rc = efx_init_eventq(channel);
+ if (rc)
+ goto fail;
+ }
+ }
+
+ rc = efx_soft_enable_interrupts(efx);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ end_channel = channel;
+ efx_for_each_channel(channel, efx) {
+ if (channel == end_channel)
+ break;
+ if (channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ efx->type->irq_disable_non_ev(efx);
+
+ return rc;
+}
+
+void efx_disable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_soft_disable_interrupts(efx);
+
+ efx_for_each_channel(channel, efx) {
+ if (channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ efx->type->irq_disable_non_ev(efx);
+}
+
+void efx_start_channels(struct efx_nic *efx)
+{
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ efx_init_tx_queue(tx_queue);
+ atomic_inc(&efx->active_queues);
+ }
+
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ efx_init_rx_queue(rx_queue);
+ atomic_inc(&efx->active_queues);
+ efx_stop_eventq(channel);
+ efx_fast_push_rx_descriptors(rx_queue, false);
+ efx_start_eventq(channel);
+ }
+
+ WARN_ON(channel->rx_pkt_n_frags);
+ }
+}
+
+void efx_stop_channels(struct efx_nic *efx)
+{
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ struct efx_channel *channel;
+ int rc = 0;
+
+ /* Stop RX refill */
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ rx_queue->refill_enabled = false;
+ }
+
+ efx_for_each_channel(channel, efx) {
+ /* RX packet processing is pipelined, so wait for the
+ * NAPI handler to complete. At least event queue 0
+ * might be kept active by non-data events, so don't
+ * use napi_synchronize() but actually disable NAPI
+ * temporarily.
+ */
+ if (efx_channel_has_rx_queue(channel)) {
+ efx_stop_eventq(channel);
+ efx_start_eventq(channel);
+ }
+ }
+
+ if (efx->type->fini_dmaq)
+ rc = efx->type->fini_dmaq(efx);
+
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
+ } else {
+ netif_dbg(efx, drv, efx->net_dev,
+ "successfully flushed all queues\n");
+ }
+
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ efx_fini_rx_queue(rx_queue);
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel)
+ efx_fini_tx_queue(tx_queue);
+ }
+}
+
+/**************************************************************************
+ *
+ * NAPI interface
+ *
+ *************************************************************************/
+
+/* Process channel's event queue
+ *
+ * This function is responsible for processing the event queue of a
+ * single channel. The caller must guarantee that this function will
+ * never be concurrently called more than once on the same channel,
+ * though different channels may be being processed concurrently.
+ */
+static int efx_process_channel(struct efx_channel *channel, int budget)
+{
+ struct efx_tx_queue *tx_queue;
+ struct list_head rx_list;
+ int spent;
+
+ if (unlikely(!channel->enabled))
+ return 0;
+
+ /* Prepare the batch receive list */
+ EFX_WARN_ON_PARANOID(channel->rx_list != NULL);
+ INIT_LIST_HEAD(&rx_list);
+ channel->rx_list = &rx_list;
+
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ tx_queue->pkts_compl = 0;
+ tx_queue->bytes_compl = 0;
+ }
+
+ spent = efx_nic_process_eventq(channel, budget);
+ if (spent && efx_channel_has_rx_queue(channel)) {
+ struct efx_rx_queue *rx_queue =
+ efx_channel_get_rx_queue(channel);
+
+ efx_rx_flush_packet(channel);
+ efx_fast_push_rx_descriptors(rx_queue, true);
+ }
+
+ /* Update BQL */
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ if (tx_queue->bytes_compl) {
+ netdev_tx_completed_queue(tx_queue->core_txq,
+ tx_queue->pkts_compl,
+ tx_queue->bytes_compl);
+ }
+ }
+
+ /* Receive any packets we queued up */
+ netif_receive_skb_list(channel->rx_list);
+ channel->rx_list = NULL;
+
+ return spent;
+}
+
+static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel)
+{
+ int step = efx->irq_mod_step_us;
+
+ if (channel->irq_mod_score < irq_adapt_low_thresh) {
+ if (channel->irq_moderation_us > step) {
+ channel->irq_moderation_us -= step;
+ efx->type->push_irq_moderation(channel);
+ }
+ } else if (channel->irq_mod_score > irq_adapt_high_thresh) {
+ if (channel->irq_moderation_us <
+ efx->irq_rx_moderation_us) {
+ channel->irq_moderation_us += step;
+ efx->type->push_irq_moderation(channel);
+ }
+ }
+
+ channel->irq_count = 0;
+ channel->irq_mod_score = 0;
+}
+
+/* NAPI poll handler
+ *
+ * NAPI guarantees serialisation of polls of the same device, which
+ * provides the guarantee required by efx_process_channel().
+ */
+static int efx_poll(struct napi_struct *napi, int budget)
+{
+ struct efx_channel *channel =
+ container_of(napi, struct efx_channel, napi_str);
+ struct efx_nic *efx = channel->efx;
+ int spent;
+
+ netif_vdbg(efx, intr, efx->net_dev,
+ "channel %d NAPI poll executing on CPU %d\n",
+ channel->channel, raw_smp_processor_id());
+
+ spent = efx_process_channel(channel, budget);
+
+ xdp_do_flush_map();
+
+ if (spent < budget) {
+ if (efx_channel_has_rx_queue(channel) &&
+ efx->irq_rx_adaptive &&
+ unlikely(++channel->irq_count == 1000)) {
+ efx_update_irq_mod(efx, channel);
+ }
+
+#ifdef CONFIG_RFS_ACCEL
+ /* Perhaps expire some ARFS filters */
+ mod_delayed_work(system_wq, &channel->filter_work, 0);
+#endif
+
+ /* There is no race here; although napi_disable() will
+ * only wait for napi_complete(), this isn't a problem
+ * since efx_nic_eventq_read_ack() will have no effect if
+ * interrupts have already been disabled.
+ */
+ if (napi_complete_done(napi, spent))
+ efx_nic_eventq_read_ack(channel);
+ }
+
+ return spent;
+}
+
+void efx_init_napi_channel(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+
+ channel->napi_dev = efx->net_dev;
+ netif_napi_add(channel->napi_dev, &channel->napi_str,
+ efx_poll, napi_weight);
+}
+
+void efx_init_napi(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ efx_init_napi_channel(channel);
+}
+
+void efx_fini_napi_channel(struct efx_channel *channel)
+{
+ if (channel->napi_dev)
+ netif_napi_del(&channel->napi_str);
+
+ channel->napi_dev = NULL;
+}
+
+void efx_fini_napi(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ efx_fini_napi_channel(channel);
+}
diff --git a/drivers/net/ethernet/sfc/efx_channels.h b/drivers/net/ethernet/sfc/efx_channels.h
new file mode 100644
index 000000000000..8d7b8c4142d7
--- /dev/null
+++ b/drivers/net/ethernet/sfc/efx_channels.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_CHANNELS_H
+#define EFX_CHANNELS_H
+
+int efx_probe_interrupts(struct efx_nic *efx);
+void efx_remove_interrupts(struct efx_nic *efx);
+int efx_soft_enable_interrupts(struct efx_nic *efx);
+void efx_soft_disable_interrupts(struct efx_nic *efx);
+int efx_enable_interrupts(struct efx_nic *efx);
+void efx_disable_interrupts(struct efx_nic *efx);
+
+void efx_set_interrupt_affinity(struct efx_nic *efx);
+void efx_clear_interrupt_affinity(struct efx_nic *efx);
+
+int efx_probe_eventq(struct efx_channel *channel);
+int efx_init_eventq(struct efx_channel *channel);
+void efx_start_eventq(struct efx_channel *channel);
+void efx_stop_eventq(struct efx_channel *channel);
+void efx_fini_eventq(struct efx_channel *channel);
+void efx_remove_eventq(struct efx_channel *channel);
+
+struct efx_channel *
+efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel);
+int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
+void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len);
+void efx_set_channel_names(struct efx_nic *efx);
+int efx_init_channels(struct efx_nic *efx);
+int efx_probe_channels(struct efx_nic *efx);
+int efx_set_channels(struct efx_nic *efx);
+bool efx_default_channel_want_txqs(struct efx_channel *channel);
+void efx_remove_channel(struct efx_channel *channel);
+void efx_remove_channels(struct efx_nic *efx);
+void efx_fini_channels(struct efx_nic *efx);
+struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel);
+void efx_start_channels(struct efx_nic *efx);
+void efx_stop_channels(struct efx_nic *efx);
+
+void efx_init_napi_channel(struct efx_channel *channel);
+void efx_init_napi(struct efx_nic *efx);
+void efx_fini_napi_channel(struct efx_channel *channel);
+void efx_fini_napi(struct efx_nic *efx);
+
+int efx_channel_dummy_op_int(struct efx_channel *channel);
+void efx_channel_dummy_op_void(struct efx_channel *channel);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
new file mode 100644
index 000000000000..b0d76bc19673
--- /dev/null
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -0,0 +1,1102 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include "efx_common.h"
+#include "efx_channels.h"
+#include "efx.h"
+#include "mcdi.h"
+#include "selftest.h"
+#include "rx_common.h"
+#include "tx_common.h"
+#include "nic.h"
+#include "io.h"
+#include "mcdi_pcol.h"
+
+static unsigned int debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
+ NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
+ NETIF_MSG_TX_ERR | NETIF_MSG_HW);
+module_param(debug, uint, 0);
+MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
+
+/* This is the time (in jiffies) between invocations of the hardware
+ * monitor.
+ * On Falcon-based NICs, this will:
+ * - Check the on-board hardware monitor;
+ * - Poll the link state and reconfigure the hardware as necessary.
+ * On Siena-based NICs for power systems with EEH support, this will give EEH a
+ * chance to start.
+ */
+static unsigned int efx_monitor_interval = 1 * HZ;
+
+/* How often and how many times to poll for a reset while waiting for a
+ * BIST that another function started to complete.
+ */
+#define BIST_WAIT_DELAY_MS 100
+#define BIST_WAIT_DELAY_COUNT 100
+
+/* Default stats update time */
+#define STATS_PERIOD_MS_DEFAULT 1000
+
+const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
+const char *const efx_reset_type_names[] = {
+ [RESET_TYPE_INVISIBLE] = "INVISIBLE",
+ [RESET_TYPE_ALL] = "ALL",
+ [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
+ [RESET_TYPE_WORLD] = "WORLD",
+ [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
+ [RESET_TYPE_DATAPATH] = "DATAPATH",
+ [RESET_TYPE_MC_BIST] = "MC_BIST",
+ [RESET_TYPE_DISABLE] = "DISABLE",
+ [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
+ [RESET_TYPE_INT_ERROR] = "INT_ERROR",
+ [RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
+ [RESET_TYPE_TX_SKIP] = "TX_SKIP",
+ [RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
+ [RESET_TYPE_MCDI_TIMEOUT] = "MCDI_TIMEOUT (FLR)",
+};
+
+#define RESET_TYPE(type) \
+ STRING_TABLE_LOOKUP(type, efx_reset_type)
+
+/* Loopback mode names (see LOOPBACK_MODE()) */
+const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
+const char *const efx_loopback_mode_names[] = {
+ [LOOPBACK_NONE] = "NONE",
+ [LOOPBACK_DATA] = "DATAPATH",
+ [LOOPBACK_GMAC] = "GMAC",
+ [LOOPBACK_XGMII] = "XGMII",
+ [LOOPBACK_XGXS] = "XGXS",
+ [LOOPBACK_XAUI] = "XAUI",
+ [LOOPBACK_GMII] = "GMII",
+ [LOOPBACK_SGMII] = "SGMII",
+ [LOOPBACK_XGBR] = "XGBR",
+ [LOOPBACK_XFI] = "XFI",
+ [LOOPBACK_XAUI_FAR] = "XAUI_FAR",
+ [LOOPBACK_GMII_FAR] = "GMII_FAR",
+ [LOOPBACK_SGMII_FAR] = "SGMII_FAR",
+ [LOOPBACK_XFI_FAR] = "XFI_FAR",
+ [LOOPBACK_GPHY] = "GPHY",
+ [LOOPBACK_PHYXS] = "PHYXS",
+ [LOOPBACK_PCS] = "PCS",
+ [LOOPBACK_PMAPMD] = "PMA/PMD",
+ [LOOPBACK_XPORT] = "XPORT",
+ [LOOPBACK_XGMII_WS] = "XGMII_WS",
+ [LOOPBACK_XAUI_WS] = "XAUI_WS",
+ [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR",
+ [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
+ [LOOPBACK_GMII_WS] = "GMII_WS",
+ [LOOPBACK_XFI_WS] = "XFI_WS",
+ [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR",
+ [LOOPBACK_PHYXS_WS] = "PHYXS_WS",
+};
+
+/* Reset workqueue. If any NIC has a hardware failure then a reset will be
+ * queued onto this work queue. This is not a per-nic work queue, because
+ * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
+ */
+static struct workqueue_struct *reset_workqueue;
+
+int efx_create_reset_workqueue(void)
+{
+ reset_workqueue = create_singlethread_workqueue("sfc_reset");
+ if (!reset_workqueue) {
+ printk(KERN_ERR "Failed to create reset workqueue\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void efx_queue_reset_work(struct efx_nic *efx)
+{
+ queue_work(reset_workqueue, &efx->reset_work);
+}
+
+void efx_flush_reset_workqueue(struct efx_nic *efx)
+{
+ cancel_work_sync(&efx->reset_work);
+}
+
+void efx_destroy_reset_workqueue(void)
+{
+ if (reset_workqueue) {
+ destroy_workqueue(reset_workqueue);
+ reset_workqueue = NULL;
+ }
+}
+
+/* We assume that efx->type->reconfigure_mac will always try to sync RX
+ * filters and therefore needs to read-lock the filter table against freeing
+ */
+void efx_mac_reconfigure(struct efx_nic *efx)
+{
+ if (efx->type->reconfigure_mac) {
+ down_read(&efx->filter_sem);
+ efx->type->reconfigure_mac(efx);
+ up_read(&efx->filter_sem);
+ }
+}
+
+/* Asynchronous work item for changing MAC promiscuity and multicast
+ * hash. Avoid a drain/rx_ingress enable by reconfiguring the current
+ * MAC directly.
+ */
+static void efx_mac_work(struct work_struct *data)
+{
+ struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
+
+ mutex_lock(&efx->mac_lock);
+ if (efx->port_enabled)
+ efx_mac_reconfigure(efx);
+ mutex_unlock(&efx->mac_lock);
+}
+
+/* This ensures that the kernel is kept informed (via
+ * netif_carrier_on/off) of the link status, and also maintains the
+ * link status's stop on the port's TX queue.
+ */
+void efx_link_status_changed(struct efx_nic *efx)
+{
+ struct efx_link_state *link_state = &efx->link_state;
+
+ /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
+ * that no events are triggered between unregister_netdev() and the
+ * driver unloading. A more general condition is that NETDEV_CHANGE
+ * can only be generated between NETDEV_UP and NETDEV_DOWN
+ */
+ if (!netif_running(efx->net_dev))
+ return;
+
+ if (link_state->up != netif_carrier_ok(efx->net_dev)) {
+ efx->n_link_state_changes++;
+
+ if (link_state->up)
+ netif_carrier_on(efx->net_dev);
+ else
+ netif_carrier_off(efx->net_dev);
+ }
+
+ /* Status message for kernel log */
+ if (link_state->up)
+ netif_info(efx, link, efx->net_dev,
+ "link up at %uMbps %s-duplex (MTU %d)\n",
+ link_state->speed, link_state->fd ? "full" : "half",
+ efx->net_dev->mtu);
+ else
+ netif_info(efx, link, efx->net_dev, "link down\n");
+}
+
+unsigned int efx_xdp_max_mtu(struct efx_nic *efx)
+{
+ /* The maximum MTU that we can fit in a single page, allowing for
+ * framing, overhead and XDP headroom.
+ */
+ int overhead = EFX_MAX_FRAME_LEN(0) + sizeof(struct efx_rx_page_state) +
+ efx->rx_prefix_size + efx->type->rx_buffer_padding +
+ efx->rx_ip_align + XDP_PACKET_HEADROOM;
+
+ return PAGE_SIZE - overhead;
+}
+
+/* Context: process, rtnl_lock() held. */
+int efx_change_mtu(struct net_device *net_dev, int new_mtu)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ int rc;
+
+ rc = efx_check_disabled(efx);
+ if (rc)
+ return rc;
+
+ if (rtnl_dereference(efx->xdp_prog) &&
+ new_mtu > efx_xdp_max_mtu(efx)) {
+ netif_err(efx, drv, efx->net_dev,
+ "Requested MTU of %d too big for XDP (max: %d)\n",
+ new_mtu, efx_xdp_max_mtu(efx));
+ return -EINVAL;
+ }
+
+ netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
+
+ efx_device_detach_sync(efx);
+ efx_stop_all(efx);
+
+ mutex_lock(&efx->mac_lock);
+ net_dev->mtu = new_mtu;
+ efx_mac_reconfigure(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ efx_start_all(efx);
+ efx_device_attach_if_not_resetting(efx);
+ return 0;
+}
+
+/**************************************************************************
+ *
+ * Hardware monitor
+ *
+ **************************************************************************/
+
+/* Run periodically off the general workqueue */
+static void efx_monitor(struct work_struct *data)
+{
+ struct efx_nic *efx = container_of(data, struct efx_nic,
+ monitor_work.work);
+
+ netif_vdbg(efx, timer, efx->net_dev,
+ "hardware monitor executing on CPU %d\n",
+ raw_smp_processor_id());
+ BUG_ON(efx->type->monitor == NULL);
+
+ /* If the mac_lock is already held then it is likely a port
+ * reconfiguration is already in place, which will likely do
+ * most of the work of monitor() anyway.
+ */
+ if (mutex_trylock(&efx->mac_lock)) {
+ if (efx->port_enabled && efx->type->monitor)
+ efx->type->monitor(efx);
+ mutex_unlock(&efx->mac_lock);
+ }
+
+ efx_start_monitor(efx);
+}
+
+void efx_start_monitor(struct efx_nic *efx)
+{
+ if (efx->type->monitor)
+ queue_delayed_work(efx->workqueue, &efx->monitor_work,
+ efx_monitor_interval);
+}
+
+/**************************************************************************
+ *
+ * Event queue processing
+ *
+ *************************************************************************/
+
+/* Channels are shutdown and reinitialised whilst the NIC is running
+ * to propagate configuration changes (mtu, checksum offload), or
+ * to clear hardware error conditions
+ */
+static void efx_start_datapath(struct efx_nic *efx)
+{
+ netdev_features_t old_features = efx->net_dev->features;
+ bool old_rx_scatter = efx->rx_scatter;
+ size_t rx_buf_len;
+
+ /* Calculate the rx buffer allocation parameters required to
+ * support the current MTU, including padding for header
+ * alignment and overruns.
+ */
+ efx->rx_dma_len = (efx->rx_prefix_size +
+ EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
+ efx->type->rx_buffer_padding);
+ rx_buf_len = (sizeof(struct efx_rx_page_state) + XDP_PACKET_HEADROOM +
+ efx->rx_ip_align + efx->rx_dma_len);
+ if (rx_buf_len <= PAGE_SIZE) {
+ efx->rx_scatter = efx->type->always_rx_scatter;
+ efx->rx_buffer_order = 0;
+ } else if (efx->type->can_rx_scatter) {
+ BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
+ BUILD_BUG_ON(sizeof(struct efx_rx_page_state) +
+ 2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE,
+ EFX_RX_BUF_ALIGNMENT) >
+ PAGE_SIZE);
+ efx->rx_scatter = true;
+ efx->rx_dma_len = EFX_RX_USR_BUF_SIZE;
+ efx->rx_buffer_order = 0;
+ } else {
+ efx->rx_scatter = false;
+ efx->rx_buffer_order = get_order(rx_buf_len);
+ }
+
+ efx_rx_config_page_split(efx);
+ if (efx->rx_buffer_order)
+ netif_dbg(efx, drv, efx->net_dev,
+ "RX buf len=%u; page order=%u batch=%u\n",
+ efx->rx_dma_len, efx->rx_buffer_order,
+ efx->rx_pages_per_batch);
+ else
+ netif_dbg(efx, drv, efx->net_dev,
+ "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
+ efx->rx_dma_len, efx->rx_page_buf_step,
+ efx->rx_bufs_per_page, efx->rx_pages_per_batch);
+
+ /* Restore previously fixed features in hw_features and remove
+ * features which are fixed now
+ */
+ efx->net_dev->hw_features |= efx->net_dev->features;
+ efx->net_dev->hw_features &= ~efx->fixed_features;
+ efx->net_dev->features |= efx->fixed_features;
+ if (efx->net_dev->features != old_features)
+ netdev_features_change(efx->net_dev);
+
+ /* RX filters may also have scatter-enabled flags */
+ if ((efx->rx_scatter != old_rx_scatter) &&
+ efx->type->filter_update_rx_scatter)
+ efx->type->filter_update_rx_scatter(efx);
+
+ /* We must keep at least one descriptor in a TX ring empty.
+ * We could avoid this when the queue size does not exactly
+ * match the hardware ring size, but it's not that important.
+ * Therefore we stop the queue when one more skb might fill
+ * the ring completely. We wake it when half way back to
+ * empty.
+ */
+ efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
+ efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
+
+ /* Initialise the channels */
+ efx_start_channels(efx);
+
+ efx_ptp_start_datapath(efx);
+
+ if (netif_device_present(efx->net_dev))
+ netif_tx_wake_all_queues(efx->net_dev);
+}
+
+static void efx_stop_datapath(struct efx_nic *efx)
+{
+ EFX_ASSERT_RESET_SERIALISED(efx);
+ BUG_ON(efx->port_enabled);
+
+ efx_ptp_stop_datapath(efx);
+
+ efx_stop_channels(efx);
+}
+
+/**************************************************************************
+ *
+ * Port handling
+ *
+ **************************************************************************/
+
+static void efx_start_port(struct efx_nic *efx)
+{
+ netif_dbg(efx, ifup, efx->net_dev, "start port\n");
+ BUG_ON(efx->port_enabled);
+
+ mutex_lock(&efx->mac_lock);
+ efx->port_enabled = true;
+
+ /* Ensure MAC ingress/egress is enabled */
+ efx_mac_reconfigure(efx);
+
+ mutex_unlock(&efx->mac_lock);
+}
+
+/* Cancel work for MAC reconfiguration, periodic hardware monitoring
+ * and the async self-test, wait for them to finish and prevent them
+ * being scheduled again. This doesn't cover online resets, which
+ * should only be cancelled when removing the device.
+ */
+static void efx_stop_port(struct efx_nic *efx)
+{
+ netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ mutex_lock(&efx->mac_lock);
+ efx->port_enabled = false;
+ mutex_unlock(&efx->mac_lock);
+
+ /* Serialise against efx_set_multicast_list() */
+ netif_addr_lock_bh(efx->net_dev);
+ netif_addr_unlock_bh(efx->net_dev);
+
+ cancel_delayed_work_sync(&efx->monitor_work);
+ efx_selftest_async_cancel(efx);
+ cancel_work_sync(&efx->mac_work);
+}
+
+/* If the interface is supposed to be running but is not, start
+ * the hardware and software data path, regular activity for the port
+ * (MAC statistics, link polling, etc.) and schedule the port to be
+ * reconfigured. Interrupts must already be enabled. This function
+ * is safe to call multiple times, so long as the NIC is not disabled.
+ * Requires the RTNL lock.
+ */
+void efx_start_all(struct efx_nic *efx)
+{
+ EFX_ASSERT_RESET_SERIALISED(efx);
+ BUG_ON(efx->state == STATE_DISABLED);
+
+ /* Check that it is appropriate to restart the interface. All
+ * of these flags are safe to read under just the rtnl lock
+ */
+ if (efx->port_enabled || !netif_running(efx->net_dev) ||
+ efx->reset_pending)
+ return;
+
+ efx_start_port(efx);
+ efx_start_datapath(efx);
+
+ /* Start the hardware monitor if there is one */
+ efx_start_monitor(efx);
+
+ /* Link state detection is normally event-driven; we have
+ * to poll now because we could have missed a change
+ */
+ mutex_lock(&efx->mac_lock);
+ if (efx->phy_op->poll(efx))
+ efx_link_status_changed(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ if (efx->type->start_stats) {
+ efx->type->start_stats(efx);
+ efx->type->pull_stats(efx);
+ spin_lock_bh(&efx->stats_lock);
+ efx->type->update_stats(efx, NULL, NULL);
+ spin_unlock_bh(&efx->stats_lock);
+ }
+}
+
+/* Quiesce the hardware and software data path, and regular activity
+ * for the port without bringing the link down. Safe to call multiple
+ * times with the NIC in almost any state, but interrupts should be
+ * enabled. Requires the RTNL lock.
+ */
+void efx_stop_all(struct efx_nic *efx)
+{
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ /* port_enabled can be read safely under the rtnl lock */
+ if (!efx->port_enabled)
+ return;
+
+ if (efx->type->update_stats) {
+ /* update stats before we go down so we can accurately count
+ * rx_nodesc_drops
+ */
+ efx->type->pull_stats(efx);
+ spin_lock_bh(&efx->stats_lock);
+ efx->type->update_stats(efx, NULL, NULL);
+ spin_unlock_bh(&efx->stats_lock);
+ efx->type->stop_stats(efx);
+ }
+
+ efx_stop_port(efx);
+
+ /* Stop the kernel transmit interface. This is only valid if
+ * the device is stopped or detached; otherwise the watchdog
+ * may fire immediately.
+ */
+ WARN_ON(netif_running(efx->net_dev) &&
+ netif_device_present(efx->net_dev));
+ netif_tx_disable(efx->net_dev);
+
+ efx_stop_datapath(efx);
+}
+
+/* Context: process, dev_base_lock or RTNL held, non-blocking. */
+void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ spin_lock_bh(&efx->stats_lock);
+ efx->type->update_stats(efx, NULL, stats);
+ spin_unlock_bh(&efx->stats_lock);
+}
+
+/* Push loopback/power/transmit disable settings to the PHY, and reconfigure
+ * the MAC appropriately. All other PHY configuration changes are pushed
+ * through phy_op->set_settings(), and pushed asynchronously to the MAC
+ * through efx_monitor().
+ *
+ * Callers must hold the mac_lock
+ */
+int __efx_reconfigure_port(struct efx_nic *efx)
+{
+ enum efx_phy_mode phy_mode;
+ int rc = 0;
+
+ WARN_ON(!mutex_is_locked(&efx->mac_lock));
+
+ /* Disable PHY transmit in mac level loopbacks */
+ phy_mode = efx->phy_mode;
+ if (LOOPBACK_INTERNAL(efx))
+ efx->phy_mode |= PHY_MODE_TX_DISABLED;
+ else
+ efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
+
+ if (efx->type->reconfigure_port)
+ rc = efx->type->reconfigure_port(efx);
+
+ if (rc)
+ efx->phy_mode = phy_mode;
+
+ return rc;
+}
+
+/* Reinitialise the MAC to pick up new PHY settings, even if the port is
+ * disabled.
+ */
+int efx_reconfigure_port(struct efx_nic *efx)
+{
+ int rc;
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ mutex_lock(&efx->mac_lock);
+ rc = __efx_reconfigure_port(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ return rc;
+}
+
+/**************************************************************************
+ *
+ * Device reset and suspend
+ *
+ **************************************************************************/
+
+static void efx_wait_for_bist_end(struct efx_nic *efx)
+{
+ int i;
+
+ for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) {
+ if (efx_mcdi_poll_reboot(efx))
+ goto out;
+ msleep(BIST_WAIT_DELAY_MS);
+ }
+
+ netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n");
+out:
+ /* Either way unset the BIST flag. If we found no reboot we probably
+ * won't recover, but we should try.
+ */
+ efx->mc_bist_for_other_fn = false;
+}
+
+/* Try recovery mechanisms.
+ * For now only EEH is supported.
+ * Returns 0 if the recovery mechanisms are unsuccessful.
+ * Returns a non-zero value otherwise.
+ */
+int efx_try_recovery(struct efx_nic *efx)
+{
+#ifdef CONFIG_EEH
+ /* A PCI error can occur and not be seen by EEH because nothing
+ * happens on the PCI bus. In this case the driver may fail and
+ * schedule a 'recover or reset', leading to this recovery handler.
+ * Manually call the eeh failure check function.
+ */
+ struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev);
+ if (eeh_dev_check_failure(eehdev)) {
+ /* The EEH mechanisms will handle the error and reset the
+ * device if necessary.
+ */
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+/* Tears down the entire software state and most of the hardware state
+ * before reset.
+ */
+void efx_reset_down(struct efx_nic *efx, enum reset_type method)
+{
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ if (method == RESET_TYPE_MCDI_TIMEOUT)
+ efx->type->prepare_flr(efx);
+
+ efx_stop_all(efx);
+ efx_disable_interrupts(efx);
+
+ mutex_lock(&efx->mac_lock);
+ down_write(&efx->filter_sem);
+ mutex_lock(&efx->rss_lock);
+ if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
+ method != RESET_TYPE_DATAPATH)
+ efx->phy_op->fini(efx);
+ efx->type->fini(efx);
+}
+
+/* This function will always ensure that the locks acquired in
+ * efx_reset_down() are released. A failure return code indicates
+ * that we were unable to reinitialise the hardware, and the
+ * driver should be disabled. If ok is false, then the rx and tx
+ * engines are not restarted, pending a RESET_DISABLE.
+ */
+int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
+{
+ int rc;
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ if (method == RESET_TYPE_MCDI_TIMEOUT)
+ efx->type->finish_flr(efx);
+
+ /* Ensure that SRAM is initialised even if we're disabling the device */
+ rc = efx->type->init(efx);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
+ goto fail;
+ }
+
+ if (!ok)
+ goto fail;
+
+ if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
+ method != RESET_TYPE_DATAPATH) {
+ rc = efx->phy_op->init(efx);
+ if (rc)
+ goto fail;
+ rc = efx->phy_op->reconfigure(efx);
+ if (rc && rc != -EPERM)
+ netif_err(efx, drv, efx->net_dev,
+ "could not restore PHY settings\n");
+ }
+
+ rc = efx_enable_interrupts(efx);
+ if (rc)
+ goto fail;
+
+#ifdef CONFIG_SFC_SRIOV
+ rc = efx->type->vswitching_restore(efx);
+ if (rc) /* not fatal; the PF will still work fine */
+ netif_warn(efx, probe, efx->net_dev,
+ "failed to restore vswitching rc=%d;"
+ " VFs may not function\n", rc);
+#endif
+
+ if (efx->type->rx_restore_rss_contexts)
+ efx->type->rx_restore_rss_contexts(efx);
+ mutex_unlock(&efx->rss_lock);
+ efx->type->filter_table_restore(efx);
+ up_write(&efx->filter_sem);
+ if (efx->type->sriov_reset)
+ efx->type->sriov_reset(efx);
+
+ mutex_unlock(&efx->mac_lock);
+
+ efx_start_all(efx);
+
+ if (efx->type->udp_tnl_push_ports)
+ efx->type->udp_tnl_push_ports(efx);
+
+ return 0;
+
+fail:
+ efx->port_initialized = false;
+
+ mutex_unlock(&efx->rss_lock);
+ up_write(&efx->filter_sem);
+ mutex_unlock(&efx->mac_lock);
+
+ return rc;
+}
+
+/* Reset the NIC using the specified method. Note that the reset may
+ * fail, in which case the card will be left in an unusable state.
+ *
+ * Caller must hold the rtnl_lock.
+ */
+int efx_reset(struct efx_nic *efx, enum reset_type method)
+{
+ bool disabled;
+ int rc, rc2;
+
+ netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
+ RESET_TYPE(method));
+
+ efx_device_detach_sync(efx);
+ efx_reset_down(efx, method);
+
+ rc = efx->type->reset(efx, method);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
+ goto out;
+ }
+
+ /* Clear flags for the scopes we covered. We assume the NIC and
+ * driver are now quiescent so that there is no race here.
+ */
+ if (method < RESET_TYPE_MAX_METHOD)
+ efx->reset_pending &= -(1 << (method + 1));
+ else /* it doesn't fit into the well-ordered scope hierarchy */
+ __clear_bit(method, &efx->reset_pending);
+
+ /* Reinitialise bus-mastering, which may have been turned off before
+ * the reset was scheduled. This is still appropriate, even in the
+ * RESET_TYPE_DISABLE since this driver generally assumes the hardware
+ * can respond to requests.
+ */
+ pci_set_master(efx->pci_dev);
+
+out:
+ /* Leave device stopped if necessary */
+ disabled = rc ||
+ method == RESET_TYPE_DISABLE ||
+ method == RESET_TYPE_RECOVER_OR_DISABLE;
+ rc2 = efx_reset_up(efx, method, !disabled);
+ if (rc2) {
+ disabled = true;
+ if (!rc)
+ rc = rc2;
+ }
+
+ if (disabled) {
+ dev_close(efx->net_dev);
+ netif_err(efx, drv, efx->net_dev, "has been disabled\n");
+ efx->state = STATE_DISABLED;
+ } else {
+ netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
+ efx_device_attach_if_not_resetting(efx);
+ }
+ return rc;
+}
+
+/* The worker thread exists so that code that cannot sleep can
+ * schedule a reset for later.
+ */
+static void efx_reset_work(struct work_struct *data)
+{
+ struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
+ unsigned long pending;
+ enum reset_type method;
+
+ pending = READ_ONCE(efx->reset_pending);
+ method = fls(pending) - 1;
+
+ if (method == RESET_TYPE_MC_BIST)
+ efx_wait_for_bist_end(efx);
+
+ if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
+ method == RESET_TYPE_RECOVER_OR_ALL) &&
+ efx_try_recovery(efx))
+ return;
+
+ if (!pending)
+ return;
+
+ rtnl_lock();
+
+ /* We checked the state in efx_schedule_reset() but it may
+ * have changed by now. Now that we have the RTNL lock,
+ * it cannot change again.
+ */
+ if (efx->state == STATE_READY)
+ (void)efx_reset(efx, method);
+
+ rtnl_unlock();
+}
+
+void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
+{
+ enum reset_type method;
+
+ if (efx->state == STATE_RECOVERY) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "recovering: skip scheduling %s reset\n",
+ RESET_TYPE(type));
+ return;
+ }
+
+ switch (type) {
+ case RESET_TYPE_INVISIBLE:
+ case RESET_TYPE_ALL:
+ case RESET_TYPE_RECOVER_OR_ALL:
+ case RESET_TYPE_WORLD:
+ case RESET_TYPE_DISABLE:
+ case RESET_TYPE_RECOVER_OR_DISABLE:
+ case RESET_TYPE_DATAPATH:
+ case RESET_TYPE_MC_BIST:
+ case RESET_TYPE_MCDI_TIMEOUT:
+ method = type;
+ netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
+ RESET_TYPE(method));
+ break;
+ default:
+ method = efx->type->map_reset_reason(type);
+ netif_dbg(efx, drv, efx->net_dev,
+ "scheduling %s reset for %s\n",
+ RESET_TYPE(method), RESET_TYPE(type));
+ break;
+ }
+
+ set_bit(method, &efx->reset_pending);
+ smp_mb(); /* ensure we change reset_pending before checking state */
+
+ /* If we're not READY then just leave the flags set as the cue
+ * to abort probing or reschedule the reset later.
+ */
+ if (READ_ONCE(efx->state) != STATE_READY)
+ return;
+
+ /* efx_process_channel() will no longer read events once a
+ * reset is scheduled. So switch back to poll'd MCDI completions.
+ */
+ efx_mcdi_mode_poll(efx);
+
+ efx_queue_reset_work(efx);
+}
+
+/**************************************************************************
+ *
+ * Dummy PHY/MAC operations
+ *
+ * Can be used for some unimplemented operations
+ * Needed so all function pointers are valid and do not have to be tested
+ * before use
+ *
+ **************************************************************************/
+int efx_port_dummy_op_int(struct efx_nic *efx)
+{
+ return 0;
+}
+void efx_port_dummy_op_void(struct efx_nic *efx) {}
+
+static bool efx_port_dummy_op_poll(struct efx_nic *efx)
+{
+ return false;
+}
+
+static const struct efx_phy_operations efx_dummy_phy_operations = {
+ .init = efx_port_dummy_op_int,
+ .reconfigure = efx_port_dummy_op_int,
+ .poll = efx_port_dummy_op_poll,
+ .fini = efx_port_dummy_op_void,
+};
+
+/**************************************************************************
+ *
+ * Data housekeeping
+ *
+ **************************************************************************/
+
+/* This zeroes out and then fills in the invariants in a struct
+ * efx_nic (including all sub-structures).
+ */
+int efx_init_struct(struct efx_nic *efx,
+ struct pci_dev *pci_dev, struct net_device *net_dev)
+{
+ int rc = -ENOMEM;
+
+ /* Initialise common structures */
+ INIT_LIST_HEAD(&efx->node);
+ INIT_LIST_HEAD(&efx->secondary_list);
+ spin_lock_init(&efx->biu_lock);
+#ifdef CONFIG_SFC_MTD
+ INIT_LIST_HEAD(&efx->mtd_list);
+#endif
+ INIT_WORK(&efx->reset_work, efx_reset_work);
+ INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
+ efx_selftest_async_init(efx);
+ efx->pci_dev = pci_dev;
+ efx->msg_enable = debug;
+ efx->state = STATE_UNINIT;
+ strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
+
+ efx->net_dev = net_dev;
+ efx->rx_prefix_size = efx->type->rx_prefix_size;
+ efx->rx_ip_align =
+ NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
+ efx->rx_packet_hash_offset =
+ efx->type->rx_hash_offset - efx->type->rx_prefix_size;
+ efx->rx_packet_ts_offset =
+ efx->type->rx_ts_offset - efx->type->rx_prefix_size;
+ INIT_LIST_HEAD(&efx->rss_context.list);
+ mutex_init(&efx->rss_lock);
+ spin_lock_init(&efx->stats_lock);
+ efx->vi_stride = EFX_DEFAULT_VI_STRIDE;
+ efx->num_mac_stats = MC_CMD_MAC_NSTATS;
+ BUILD_BUG_ON(MC_CMD_MAC_NSTATS - 1 != MC_CMD_MAC_GENERATION_END);
+ mutex_init(&efx->mac_lock);
+#ifdef CONFIG_RFS_ACCEL
+ mutex_init(&efx->rps_mutex);
+ spin_lock_init(&efx->rps_hash_lock);
+ /* Failure to allocate is not fatal, but may degrade ARFS performance */
+ efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE,
+ sizeof(*efx->rps_hash_table), GFP_KERNEL);
+#endif
+ efx->phy_op = &efx_dummy_phy_operations;
+ efx->mdio.dev = net_dev;
+ INIT_WORK(&efx->mac_work, efx_mac_work);
+ init_waitqueue_head(&efx->flush_wq);
+
+ rc = efx_init_channels(efx);
+ if (rc)
+ goto fail;
+
+ /* Would be good to use the net_dev name, but we're too early */
+ snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
+ pci_name(pci_dev));
+ efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
+ if (!efx->workqueue) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ efx_fini_struct(efx);
+ return rc;
+}
+
+void efx_fini_struct(struct efx_nic *efx)
+{
+#ifdef CONFIG_RFS_ACCEL
+ kfree(efx->rps_hash_table);
+#endif
+
+ efx_fini_channels(efx);
+
+ kfree(efx->vpd_sn);
+
+ if (efx->workqueue) {
+ destroy_workqueue(efx->workqueue);
+ efx->workqueue = NULL;
+ }
+}
+
+/* This configures the PCI device to enable I/O and DMA. */
+int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
+ unsigned int mem_map_size)
+{
+ struct pci_dev *pci_dev = efx->pci_dev;
+ int rc;
+
+ netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
+
+ rc = pci_enable_device(pci_dev);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to enable PCI device\n");
+ goto fail1;
+ }
+
+ pci_set_master(pci_dev);
+
+ /* Set the PCI DMA mask. Try all possibilities from our
+ * genuine mask down to 32 bits, because some architectures
+ * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
+ * masks event though they reject 46 bit masks.
+ */
+ while (dma_mask > 0x7fffffffUL) {
+ rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
+ if (rc == 0)
+ break;
+ dma_mask >>= 1;
+ }
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "could not find a suitable DMA mask\n");
+ goto fail2;
+ }
+ netif_dbg(efx, probe, efx->net_dev,
+ "using DMA mask %llx\n", (unsigned long long)dma_mask);
+
+ efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
+ if (!efx->membase_phys) {
+ netif_err(efx, probe, efx->net_dev,
+ "ERROR: No BAR%d mapping from the BIOS. "
+ "Try pci=realloc on the kernel command line\n", bar);
+ rc = -ENODEV;
+ goto fail3;
+ }
+
+ rc = pci_request_region(pci_dev, bar, "sfc");
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "request for memory BAR failed\n");
+ rc = -EIO;
+ goto fail3;
+ }
+
+ efx->membase = ioremap(efx->membase_phys, mem_map_size);
+ if (!efx->membase) {
+ netif_err(efx, probe, efx->net_dev,
+ "could not map memory BAR at %llx+%x\n",
+ (unsigned long long)efx->membase_phys, mem_map_size);
+ rc = -ENOMEM;
+ goto fail4;
+ }
+ netif_dbg(efx, probe, efx->net_dev,
+ "memory BAR at %llx+%x (virtual %p)\n",
+ (unsigned long long)efx->membase_phys, mem_map_size,
+ efx->membase);
+
+ return 0;
+
+fail4:
+ pci_release_region(efx->pci_dev, bar);
+fail3:
+ efx->membase_phys = 0;
+fail2:
+ pci_disable_device(efx->pci_dev);
+fail1:
+ return rc;
+}
+
+void efx_fini_io(struct efx_nic *efx, int bar)
+{
+ netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
+
+ if (efx->membase) {
+ iounmap(efx->membase);
+ efx->membase = NULL;
+ }
+
+ if (efx->membase_phys) {
+ pci_release_region(efx->pci_dev, bar);
+ efx->membase_phys = 0;
+ }
+
+ /* Don't disable bus-mastering if VFs are assigned */
+ if (!pci_vfs_assigned(efx->pci_dev))
+ pci_disable_device(efx->pci_dev);
+}
+
+#ifdef CONFIG_SFC_MCDI_LOGGING
+static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct efx_nic *efx = dev_get_drvdata(dev);
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled);
+}
+
+static ssize_t set_mcdi_log(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct efx_nic *efx = dev_get_drvdata(dev);
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ bool enable = count > 0 && *buf != '0';
+
+ mcdi->logging_enabled = enable;
+ return count;
+}
+
+static DEVICE_ATTR(mcdi_logging, 0644, show_mcdi_log, set_mcdi_log);
+
+void efx_init_mcdi_logging(struct efx_nic *efx)
+{
+ int rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
+
+ if (rc) {
+ netif_warn(efx, drv, efx->net_dev,
+ "failed to init net dev attributes\n");
+ }
+}
+
+void efx_fini_mcdi_logging(struct efx_nic *efx)
+{
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
+}
+#endif
diff --git a/drivers/net/ethernet/sfc/efx_common.h b/drivers/net/ethernet/sfc/efx_common.h
new file mode 100644
index 000000000000..fa2fc681e7f9
--- /dev/null
+++ b/drivers/net/ethernet/sfc/efx_common.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_COMMON_H
+#define EFX_COMMON_H
+
+int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
+ unsigned int mem_map_size);
+void efx_fini_io(struct efx_nic *efx, int bar);
+int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev,
+ struct net_device *net_dev);
+void efx_fini_struct(struct efx_nic *efx);
+
+void efx_start_all(struct efx_nic *efx);
+void efx_stop_all(struct efx_nic *efx);
+
+void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats);
+
+int efx_create_reset_workqueue(void);
+void efx_queue_reset_work(struct efx_nic *efx);
+void efx_flush_reset_workqueue(struct efx_nic *efx);
+void efx_destroy_reset_workqueue(void);
+
+void efx_start_monitor(struct efx_nic *efx);
+
+int __efx_reconfigure_port(struct efx_nic *efx);
+int efx_reconfigure_port(struct efx_nic *efx);
+
+#define EFX_ASSERT_RESET_SERIALISED(efx) \
+ do { \
+ if ((efx->state == STATE_READY) || \
+ (efx->state == STATE_RECOVERY) || \
+ (efx->state == STATE_DISABLED)) \
+ ASSERT_RTNL(); \
+ } while (0)
+
+int efx_try_recovery(struct efx_nic *efx);
+void efx_reset_down(struct efx_nic *efx, enum reset_type method);
+int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
+int efx_reset(struct efx_nic *efx, enum reset_type method);
+void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
+
+static inline int efx_check_disabled(struct efx_nic *efx)
+{
+ if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
+ netif_err(efx, drv, efx->net_dev,
+ "device is disabled due to earlier errors\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+#ifdef CONFIG_SFC_MCDI_LOGGING
+void efx_init_mcdi_logging(struct efx_nic *efx);
+void efx_fini_mcdi_logging(struct efx_nic *efx);
+#else
+static inline void efx_init_mcdi_logging(struct efx_nic *efx) {}
+static inline void efx_fini_mcdi_logging(struct efx_nic *efx) {}
+#endif
+
+void efx_mac_reconfigure(struct efx_nic *efx);
+void efx_link_status_changed(struct efx_nic *efx);
+unsigned int efx_xdp_max_mtu(struct efx_nic *efx);
+int efx_change_mtu(struct net_device *net_dev, int new_mtu);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index b31032da4bcb..993b5769525b 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -13,92 +13,13 @@
#include "workarounds.h"
#include "selftest.h"
#include "efx.h"
+#include "efx_channels.h"
+#include "rx_common.h"
+#include "tx_common.h"
+#include "ethtool_common.h"
#include "filter.h"
#include "nic.h"
-struct efx_sw_stat_desc {
- const char *name;
- enum {
- EFX_ETHTOOL_STAT_SOURCE_nic,
- EFX_ETHTOOL_STAT_SOURCE_channel,
- EFX_ETHTOOL_STAT_SOURCE_tx_queue
- } source;
- unsigned offset;
- u64(*get_stat) (void *field); /* Reader function */
-};
-
-/* Initialiser for a struct efx_sw_stat_desc with type-checking */
-#define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
- get_stat_function) { \
- .name = #stat_name, \
- .source = EFX_ETHTOOL_STAT_SOURCE_##source_name, \
- .offset = ((((field_type *) 0) == \
- &((struct efx_##source_name *)0)->field) ? \
- offsetof(struct efx_##source_name, field) : \
- offsetof(struct efx_##source_name, field)), \
- .get_stat = get_stat_function, \
-}
-
-static u64 efx_get_uint_stat(void *field)
-{
- return *(unsigned int *)field;
-}
-
-static u64 efx_get_atomic_stat(void *field)
-{
- return atomic_read((atomic_t *) field);
-}
-
-#define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \
- EFX_ETHTOOL_STAT(field, nic, field, \
- atomic_t, efx_get_atomic_stat)
-
-#define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \
- EFX_ETHTOOL_STAT(field, channel, n_##field, \
- unsigned int, efx_get_uint_stat)
-#define EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(field) \
- EFX_ETHTOOL_STAT(field, channel, field, \
- unsigned int, efx_get_uint_stat)
-
-#define EFX_ETHTOOL_UINT_TXQ_STAT(field) \
- EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
- unsigned int, efx_get_uint_stat)
-
-static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
- EFX_ETHTOOL_UINT_TXQ_STAT(merge_events),
- EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
- EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
- EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
- EFX_ETHTOOL_UINT_TXQ_STAT(tso_fallbacks),
- EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
- EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets),
- EFX_ETHTOOL_UINT_TXQ_STAT(cb_packets),
- EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_ip_hdr_chksum_err),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_tcp_udp_chksum_err),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_ip_hdr_chksum_err),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_tcp_udp_chksum_err),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_eth_crc_err),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_drops),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_bad_drops),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_tx),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_redirect),
-#ifdef CONFIG_RFS_ACCEL
- EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(rfs_filter_count),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_succeeded),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_failed),
-#endif
-};
-
-#define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc)
-
#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB
/**************************************************************************
@@ -185,18 +106,6 @@ efx_ethtool_set_link_ksettings(struct net_device *net_dev,
return rc;
}
-static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
- struct ethtool_drvinfo *info)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
-
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
- efx_mcdi_print_fwver(efx, info->fw_version,
- sizeof(info->fw_version));
- strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
-}
-
static int efx_ethtool_get_regs_len(struct net_device *net_dev)
{
return efx_nic_get_regs_len(netdev_priv(net_dev));
@@ -211,341 +120,6 @@ static void efx_ethtool_get_regs(struct net_device *net_dev,
efx_nic_get_regs(efx, buf);
}
-static u32 efx_ethtool_get_msglevel(struct net_device *net_dev)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- return efx->msg_enable;
-}
-
-static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- efx->msg_enable = msg_enable;
-}
-
-/**
- * efx_fill_test - fill in an individual self-test entry
- * @test_index: Index of the test
- * @strings: Ethtool strings, or %NULL
- * @data: Ethtool test results, or %NULL
- * @test: Pointer to test result (used only if data != %NULL)
- * @unit_format: Unit name format (e.g. "chan\%d")
- * @unit_id: Unit id (e.g. 0 for "chan0")
- * @test_format: Test name format (e.g. "loopback.\%s.tx.sent")
- * @test_id: Test id (e.g. "PHYXS" for "loopback.PHYXS.tx_sent")
- *
- * Fill in an individual self-test entry.
- */
-static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data,
- int *test, const char *unit_format, int unit_id,
- const char *test_format, const char *test_id)
-{
- char unit_str[ETH_GSTRING_LEN], test_str[ETH_GSTRING_LEN];
-
- /* Fill data value, if applicable */
- if (data)
- data[test_index] = *test;
-
- /* Fill string, if applicable */
- if (strings) {
- if (strchr(unit_format, '%'))
- snprintf(unit_str, sizeof(unit_str),
- unit_format, unit_id);
- else
- strcpy(unit_str, unit_format);
- snprintf(test_str, sizeof(test_str), test_format, test_id);
- snprintf(strings + test_index * ETH_GSTRING_LEN,
- ETH_GSTRING_LEN,
- "%-6s %-24s", unit_str, test_str);
- }
-}
-
-#define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel
-#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue
-#define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
-#define EFX_LOOPBACK_NAME(_mode, _counter) \
- "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode)
-
-/**
- * efx_fill_loopback_test - fill in a block of loopback self-test entries
- * @efx: Efx NIC
- * @lb_tests: Efx loopback self-test results structure
- * @mode: Loopback test mode
- * @test_index: Starting index of the test
- * @strings: Ethtool strings, or %NULL
- * @data: Ethtool test results, or %NULL
- *
- * Fill in a block of loopback self-test entries. Return new test
- * index.
- */
-static int efx_fill_loopback_test(struct efx_nic *efx,
- struct efx_loopback_self_tests *lb_tests,
- enum efx_loopback_mode mode,
- unsigned int test_index,
- u8 *strings, u64 *data)
-{
- struct efx_channel *channel =
- efx_get_channel(efx, efx->tx_channel_offset);
- struct efx_tx_queue *tx_queue;
-
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- efx_fill_test(test_index++, strings, data,
- &lb_tests->tx_sent[tx_queue->queue],
- EFX_TX_QUEUE_NAME(tx_queue),
- EFX_LOOPBACK_NAME(mode, "tx_sent"));
- efx_fill_test(test_index++, strings, data,
- &lb_tests->tx_done[tx_queue->queue],
- EFX_TX_QUEUE_NAME(tx_queue),
- EFX_LOOPBACK_NAME(mode, "tx_done"));
- }
- efx_fill_test(test_index++, strings, data,
- &lb_tests->rx_good,
- "rx", 0,
- EFX_LOOPBACK_NAME(mode, "rx_good"));
- efx_fill_test(test_index++, strings, data,
- &lb_tests->rx_bad,
- "rx", 0,
- EFX_LOOPBACK_NAME(mode, "rx_bad"));
-
- return test_index;
-}
-
-/**
- * efx_ethtool_fill_self_tests - get self-test details
- * @efx: Efx NIC
- * @tests: Efx self-test results structure, or %NULL
- * @strings: Ethtool strings, or %NULL
- * @data: Ethtool test results, or %NULL
- *
- * Get self-test number of strings, strings, and/or test results.
- * Return number of strings (== number of test results).
- *
- * The reason for merging these three functions is to make sure that
- * they can never be inconsistent.
- */
-static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
- struct efx_self_tests *tests,
- u8 *strings, u64 *data)
-{
- struct efx_channel *channel;
- unsigned int n = 0, i;
- enum efx_loopback_mode mode;
-
- efx_fill_test(n++, strings, data, &tests->phy_alive,
- "phy", 0, "alive", NULL);
- efx_fill_test(n++, strings, data, &tests->nvram,
- "core", 0, "nvram", NULL);
- efx_fill_test(n++, strings, data, &tests->interrupt,
- "core", 0, "interrupt", NULL);
-
- /* Event queues */
- efx_for_each_channel(channel, efx) {
- efx_fill_test(n++, strings, data,
- &tests->eventq_dma[channel->channel],
- EFX_CHANNEL_NAME(channel),
- "eventq.dma", NULL);
- efx_fill_test(n++, strings, data,
- &tests->eventq_int[channel->channel],
- EFX_CHANNEL_NAME(channel),
- "eventq.int", NULL);
- }
-
- efx_fill_test(n++, strings, data, &tests->memory,
- "core", 0, "memory", NULL);
- efx_fill_test(n++, strings, data, &tests->registers,
- "core", 0, "registers", NULL);
-
- if (efx->phy_op->run_tests != NULL) {
- EFX_WARN_ON_PARANOID(efx->phy_op->test_name == NULL);
-
- for (i = 0; true; ++i) {
- const char *name;
-
- EFX_WARN_ON_PARANOID(i >= EFX_MAX_PHY_TESTS);
- name = efx->phy_op->test_name(efx, i);
- if (name == NULL)
- break;
-
- efx_fill_test(n++, strings, data, &tests->phy_ext[i],
- "phy", 0, name, NULL);
- }
- }
-
- /* Loopback tests */
- for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
- if (!(efx->loopback_modes & (1 << mode)))
- continue;
- n = efx_fill_loopback_test(efx,
- &tests->loopback[mode], mode, n,
- strings, data);
- }
-
- return n;
-}
-
-static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
-{
- size_t n_stats = 0;
- struct efx_channel *channel;
-
- efx_for_each_channel(channel, efx) {
- if (efx_channel_has_tx_queues(channel)) {
- n_stats++;
- if (strings != NULL) {
- snprintf(strings, ETH_GSTRING_LEN,
- "tx-%u.tx_packets",
- channel->tx_queue[0].queue /
- EFX_TXQ_TYPES);
-
- strings += ETH_GSTRING_LEN;
- }
- }
- }
- efx_for_each_channel(channel, efx) {
- if (efx_channel_has_rx_queue(channel)) {
- n_stats++;
- if (strings != NULL) {
- snprintf(strings, ETH_GSTRING_LEN,
- "rx-%d.rx_packets", channel->channel);
- strings += ETH_GSTRING_LEN;
- }
- }
- }
- if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
- unsigned short xdp;
-
- for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
- n_stats++;
- if (strings) {
- snprintf(strings, ETH_GSTRING_LEN,
- "tx-xdp-cpu-%hu.tx_packets", xdp);
- strings += ETH_GSTRING_LEN;
- }
- }
- }
-
- return n_stats;
-}
-
-static int efx_ethtool_get_sset_count(struct net_device *net_dev,
- int string_set)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
-
- switch (string_set) {
- case ETH_SS_STATS:
- return efx->type->describe_stats(efx, NULL) +
- EFX_ETHTOOL_SW_STAT_COUNT +
- efx_describe_per_queue_stats(efx, NULL) +
- efx_ptp_describe_stats(efx, NULL);
- case ETH_SS_TEST:
- return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
- default:
- return -EINVAL;
- }
-}
-
-static void efx_ethtool_get_strings(struct net_device *net_dev,
- u32 string_set, u8 *strings)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- int i;
-
- switch (string_set) {
- case ETH_SS_STATS:
- strings += (efx->type->describe_stats(efx, strings) *
- ETH_GSTRING_LEN);
- for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
- strlcpy(strings + i * ETH_GSTRING_LEN,
- efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
- strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
- strings += (efx_describe_per_queue_stats(efx, strings) *
- ETH_GSTRING_LEN);
- efx_ptp_describe_stats(efx, strings);
- break;
- case ETH_SS_TEST:
- efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
- break;
- default:
- /* No other string sets */
- break;
- }
-}
-
-static void efx_ethtool_get_stats(struct net_device *net_dev,
- struct ethtool_stats *stats,
- u64 *data)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- const struct efx_sw_stat_desc *stat;
- struct efx_channel *channel;
- struct efx_tx_queue *tx_queue;
- struct efx_rx_queue *rx_queue;
- int i;
-
- spin_lock_bh(&efx->stats_lock);
-
- /* Get NIC statistics */
- data += efx->type->update_stats(efx, data, NULL);
-
- /* Get software statistics */
- for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) {
- stat = &efx_sw_stat_desc[i];
- switch (stat->source) {
- case EFX_ETHTOOL_STAT_SOURCE_nic:
- data[i] = stat->get_stat((void *)efx + stat->offset);
- break;
- case EFX_ETHTOOL_STAT_SOURCE_channel:
- data[i] = 0;
- efx_for_each_channel(channel, efx)
- data[i] += stat->get_stat((void *)channel +
- stat->offset);
- break;
- case EFX_ETHTOOL_STAT_SOURCE_tx_queue:
- data[i] = 0;
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_tx_queue(tx_queue, channel)
- data[i] +=
- stat->get_stat((void *)tx_queue
- + stat->offset);
- }
- break;
- }
- }
- data += EFX_ETHTOOL_SW_STAT_COUNT;
-
- spin_unlock_bh(&efx->stats_lock);
-
- efx_for_each_channel(channel, efx) {
- if (efx_channel_has_tx_queues(channel)) {
- *data = 0;
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- *data += tx_queue->tx_packets;
- }
- data++;
- }
- }
- efx_for_each_channel(channel, efx) {
- if (efx_channel_has_rx_queue(channel)) {
- *data = 0;
- efx_for_each_channel_rx_queue(rx_queue, channel) {
- *data += rx_queue->rx_packets;
- }
- data++;
- }
- }
- if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
- int xdp;
-
- for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
- data[0] = efx->xdp_tx_queues[xdp]->tx_packets;
- data++;
- }
- }
-
- efx_ptp_update_stats(efx, data);
-}
-
static void efx_ethtool_self_test(struct net_device *net_dev,
struct ethtool_test *test, u64 *data)
{
@@ -787,16 +361,6 @@ out:
return rc;
}
-static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
- struct ethtool_pauseparam *pause)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
-
- pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX);
- pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX);
- pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO);
-}
-
static void efx_ethtool_get_wol(struct net_device *net_dev,
struct ethtool_wolinfo *wol)
{
@@ -1456,7 +1020,7 @@ static int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
rc = -ENOMEM;
goto out_unlock;
}
- ctx->context_id = EFX_EF10_RSS_CONTEXT_INVALID;
+ ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
/* Initialise indir table and key to defaults */
efx_set_default_rx_indir_table(efx, ctx);
netdev_rss_key_fill(ctx->rx_hash_key, sizeof(ctx->rx_hash_key));
diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c
new file mode 100644
index 000000000000..b8d281ab6c7a
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ethtool_common.c
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include "net_driver.h"
+#include "mcdi.h"
+#include "nic.h"
+#include "selftest.h"
+#include "ethtool_common.h"
+
+struct efx_sw_stat_desc {
+ const char *name;
+ enum {
+ EFX_ETHTOOL_STAT_SOURCE_nic,
+ EFX_ETHTOOL_STAT_SOURCE_channel,
+ EFX_ETHTOOL_STAT_SOURCE_tx_queue
+ } source;
+ unsigned int offset;
+ u64 (*get_stat)(void *field); /* Reader function */
+};
+
+/* Initialiser for a struct efx_sw_stat_desc with type-checking */
+#define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
+ get_stat_function) { \
+ .name = #stat_name, \
+ .source = EFX_ETHTOOL_STAT_SOURCE_##source_name, \
+ .offset = ((((field_type *) 0) == \
+ &((struct efx_##source_name *)0)->field) ? \
+ offsetof(struct efx_##source_name, field) : \
+ offsetof(struct efx_##source_name, field)), \
+ .get_stat = get_stat_function, \
+}
+
+static u64 efx_get_uint_stat(void *field)
+{
+ return *(unsigned int *)field;
+}
+
+static u64 efx_get_atomic_stat(void *field)
+{
+ return atomic_read((atomic_t *) field);
+}
+
+#define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \
+ EFX_ETHTOOL_STAT(field, nic, field, \
+ atomic_t, efx_get_atomic_stat)
+
+#define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \
+ EFX_ETHTOOL_STAT(field, channel, n_##field, \
+ unsigned int, efx_get_uint_stat)
+#define EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(field) \
+ EFX_ETHTOOL_STAT(field, channel, field, \
+ unsigned int, efx_get_uint_stat)
+
+#define EFX_ETHTOOL_UINT_TXQ_STAT(field) \
+ EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
+ unsigned int, efx_get_uint_stat)
+
+static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
+ EFX_ETHTOOL_UINT_TXQ_STAT(merge_events),
+ EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
+ EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
+ EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
+ EFX_ETHTOOL_UINT_TXQ_STAT(tso_fallbacks),
+ EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
+ EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets),
+ EFX_ETHTOOL_UINT_TXQ_STAT(cb_packets),
+ EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_ip_hdr_chksum_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_tcp_udp_chksum_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_ip_hdr_chksum_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_tcp_udp_chksum_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_eth_crc_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_drops),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_bad_drops),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_tx),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_redirect),
+#ifdef CONFIG_RFS_ACCEL
+ EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(rfs_filter_count),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_succeeded),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_failed),
+#endif
+};
+
+#define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc)
+
+void efx_ethtool_get_drvinfo(struct net_device *net_dev,
+ struct ethtool_drvinfo *info)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
+ efx_mcdi_print_fwver(efx, info->fw_version,
+ sizeof(info->fw_version));
+ strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
+}
+
+u32 efx_ethtool_get_msglevel(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ return efx->msg_enable;
+}
+
+void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ efx->msg_enable = msg_enable;
+}
+
+void efx_ethtool_get_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX);
+ pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX);
+ pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO);
+}
+
+/**
+ * efx_fill_test - fill in an individual self-test entry
+ * @test_index: Index of the test
+ * @strings: Ethtool strings, or %NULL
+ * @data: Ethtool test results, or %NULL
+ * @test: Pointer to test result (used only if data != %NULL)
+ * @unit_format: Unit name format (e.g. "chan\%d")
+ * @unit_id: Unit id (e.g. 0 for "chan0")
+ * @test_format: Test name format (e.g. "loopback.\%s.tx.sent")
+ * @test_id: Test id (e.g. "PHYXS" for "loopback.PHYXS.tx_sent")
+ *
+ * Fill in an individual self-test entry.
+ */
+static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data,
+ int *test, const char *unit_format, int unit_id,
+ const char *test_format, const char *test_id)
+{
+ char unit_str[ETH_GSTRING_LEN], test_str[ETH_GSTRING_LEN];
+
+ /* Fill data value, if applicable */
+ if (data)
+ data[test_index] = *test;
+
+ /* Fill string, if applicable */
+ if (strings) {
+ if (strchr(unit_format, '%'))
+ snprintf(unit_str, sizeof(unit_str),
+ unit_format, unit_id);
+ else
+ strcpy(unit_str, unit_format);
+ snprintf(test_str, sizeof(test_str), test_format, test_id);
+ snprintf(strings + test_index * ETH_GSTRING_LEN,
+ ETH_GSTRING_LEN,
+ "%-6s %-24s", unit_str, test_str);
+ }
+}
+
+#define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel
+#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue
+#define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
+#define EFX_LOOPBACK_NAME(_mode, _counter) \
+ "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode)
+
+/**
+ * efx_fill_loopback_test - fill in a block of loopback self-test entries
+ * @efx: Efx NIC
+ * @lb_tests: Efx loopback self-test results structure
+ * @mode: Loopback test mode
+ * @test_index: Starting index of the test
+ * @strings: Ethtool strings, or %NULL
+ * @data: Ethtool test results, or %NULL
+ *
+ * Fill in a block of loopback self-test entries. Return new test
+ * index.
+ */
+static int efx_fill_loopback_test(struct efx_nic *efx,
+ struct efx_loopback_self_tests *lb_tests,
+ enum efx_loopback_mode mode,
+ unsigned int test_index,
+ u8 *strings, u64 *data)
+{
+ struct efx_channel *channel =
+ efx_get_channel(efx, efx->tx_channel_offset);
+ struct efx_tx_queue *tx_queue;
+
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->tx_sent[tx_queue->queue],
+ EFX_TX_QUEUE_NAME(tx_queue),
+ EFX_LOOPBACK_NAME(mode, "tx_sent"));
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->tx_done[tx_queue->queue],
+ EFX_TX_QUEUE_NAME(tx_queue),
+ EFX_LOOPBACK_NAME(mode, "tx_done"));
+ }
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->rx_good,
+ "rx", 0,
+ EFX_LOOPBACK_NAME(mode, "rx_good"));
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->rx_bad,
+ "rx", 0,
+ EFX_LOOPBACK_NAME(mode, "rx_bad"));
+
+ return test_index;
+}
+
+/**
+ * efx_ethtool_fill_self_tests - get self-test details
+ * @efx: Efx NIC
+ * @tests: Efx self-test results structure, or %NULL
+ * @strings: Ethtool strings, or %NULL
+ * @data: Ethtool test results, or %NULL
+ *
+ * Get self-test number of strings, strings, and/or test results.
+ * Return number of strings (== number of test results).
+ *
+ * The reason for merging these three functions is to make sure that
+ * they can never be inconsistent.
+ */
+int efx_ethtool_fill_self_tests(struct efx_nic *efx,
+ struct efx_self_tests *tests,
+ u8 *strings, u64 *data)
+{
+ struct efx_channel *channel;
+ unsigned int n = 0, i;
+ enum efx_loopback_mode mode;
+
+ efx_fill_test(n++, strings, data, &tests->phy_alive,
+ "phy", 0, "alive", NULL);
+ efx_fill_test(n++, strings, data, &tests->nvram,
+ "core", 0, "nvram", NULL);
+ efx_fill_test(n++, strings, data, &tests->interrupt,
+ "core", 0, "interrupt", NULL);
+
+ /* Event queues */
+ efx_for_each_channel(channel, efx) {
+ efx_fill_test(n++, strings, data,
+ &tests->eventq_dma[channel->channel],
+ EFX_CHANNEL_NAME(channel),
+ "eventq.dma", NULL);
+ efx_fill_test(n++, strings, data,
+ &tests->eventq_int[channel->channel],
+ EFX_CHANNEL_NAME(channel),
+ "eventq.int", NULL);
+ }
+
+ efx_fill_test(n++, strings, data, &tests->memory,
+ "core", 0, "memory", NULL);
+ efx_fill_test(n++, strings, data, &tests->registers,
+ "core", 0, "registers", NULL);
+
+ if (efx->phy_op->run_tests != NULL) {
+ EFX_WARN_ON_PARANOID(efx->phy_op->test_name == NULL);
+
+ for (i = 0; true; ++i) {
+ const char *name;
+
+ EFX_WARN_ON_PARANOID(i >= EFX_MAX_PHY_TESTS);
+ name = efx->phy_op->test_name(efx, i);
+ if (name == NULL)
+ break;
+
+ efx_fill_test(n++, strings, data, &tests->phy_ext[i],
+ "phy", 0, name, NULL);
+ }
+ }
+
+ /* Loopback tests */
+ for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
+ if (!(efx->loopback_modes & (1 << mode)))
+ continue;
+ n = efx_fill_loopback_test(efx,
+ &tests->loopback[mode], mode, n,
+ strings, data);
+ }
+
+ return n;
+}
+
+static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
+{
+ size_t n_stats = 0;
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx) {
+ if (efx_channel_has_tx_queues(channel)) {
+ n_stats++;
+ if (strings != NULL) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ "tx-%u.tx_packets",
+ channel->tx_queue[0].queue /
+ EFX_TXQ_TYPES);
+
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+ }
+ efx_for_each_channel(channel, efx) {
+ if (efx_channel_has_rx_queue(channel)) {
+ n_stats++;
+ if (strings != NULL) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ "rx-%d.rx_packets", channel->channel);
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+ }
+ if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
+ unsigned short xdp;
+
+ for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
+ n_stats++;
+ if (strings) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ "tx-xdp-cpu-%hu.tx_packets", xdp);
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+ }
+
+ return n_stats;
+}
+
+int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ switch (string_set) {
+ case ETH_SS_STATS:
+ return efx->type->describe_stats(efx, NULL) +
+ EFX_ETHTOOL_SW_STAT_COUNT +
+ efx_describe_per_queue_stats(efx, NULL) +
+ efx_ptp_describe_stats(efx, NULL);
+ case ETH_SS_TEST:
+ return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
+ default:
+ return -EINVAL;
+ }
+}
+
+void efx_ethtool_get_strings(struct net_device *net_dev,
+ u32 string_set, u8 *strings)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ int i;
+
+ switch (string_set) {
+ case ETH_SS_STATS:
+ strings += (efx->type->describe_stats(efx, strings) *
+ ETH_GSTRING_LEN);
+ for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
+ strlcpy(strings + i * ETH_GSTRING_LEN,
+ efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
+ strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
+ strings += (efx_describe_per_queue_stats(efx, strings) *
+ ETH_GSTRING_LEN);
+ efx_ptp_describe_stats(efx, strings);
+ break;
+ case ETH_SS_TEST:
+ efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
+ break;
+ default:
+ /* No other string sets */
+ break;
+ }
+}
+
+void efx_ethtool_get_stats(struct net_device *net_dev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ const struct efx_sw_stat_desc *stat;
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ int i;
+
+ spin_lock_bh(&efx->stats_lock);
+
+ /* Get NIC statistics */
+ data += efx->type->update_stats(efx, data, NULL);
+
+ /* Get software statistics */
+ for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) {
+ stat = &efx_sw_stat_desc[i];
+ switch (stat->source) {
+ case EFX_ETHTOOL_STAT_SOURCE_nic:
+ data[i] = stat->get_stat((void *)efx + stat->offset);
+ break;
+ case EFX_ETHTOOL_STAT_SOURCE_channel:
+ data[i] = 0;
+ efx_for_each_channel(channel, efx)
+ data[i] += stat->get_stat((void *)channel +
+ stat->offset);
+ break;
+ case EFX_ETHTOOL_STAT_SOURCE_tx_queue:
+ data[i] = 0;
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ data[i] +=
+ stat->get_stat((void *)tx_queue
+ + stat->offset);
+ }
+ break;
+ }
+ }
+ data += EFX_ETHTOOL_SW_STAT_COUNT;
+
+ spin_unlock_bh(&efx->stats_lock);
+
+ efx_for_each_channel(channel, efx) {
+ if (efx_channel_has_tx_queues(channel)) {
+ *data = 0;
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ *data += tx_queue->tx_packets;
+ }
+ data++;
+ }
+ }
+ efx_for_each_channel(channel, efx) {
+ if (efx_channel_has_rx_queue(channel)) {
+ *data = 0;
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ *data += rx_queue->rx_packets;
+ }
+ data++;
+ }
+ }
+ if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
+ int xdp;
+
+ for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
+ data[0] = efx->xdp_tx_queues[xdp]->tx_packets;
+ data++;
+ }
+ }
+
+ efx_ptp_update_stats(efx, data);
+}
diff --git a/drivers/net/ethernet/sfc/ethtool_common.h b/drivers/net/ethernet/sfc/ethtool_common.h
new file mode 100644
index 000000000000..fa624313f330
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ethtool_common.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_ETHTOOL_COMMON_H
+#define EFX_ETHTOOL_COMMON_H
+
+void efx_ethtool_get_drvinfo(struct net_device *net_dev,
+ struct ethtool_drvinfo *info);
+u32 efx_ethtool_get_msglevel(struct net_device *net_dev);
+void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable);
+void efx_ethtool_get_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *pause);
+int efx_ethtool_fill_self_tests(struct efx_nic *efx,
+ struct efx_self_tests *tests,
+ u8 *strings, u64 *data);
+int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set);
+void efx_ethtool_get_strings(struct net_device *net_dev, u32 string_set,
+ u8 *strings);
+void efx_ethtool_get_stats(struct net_device *net_dev,
+ struct ethtool_stats *stats __attribute__ ((unused)),
+ u64 *data);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index eecc348b1c32..42bcd34fc508 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -1265,7 +1265,7 @@ static int ef4_init_io(struct ef4_nic *efx)
rc = -EIO;
goto fail3;
}
- efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size);
+ efx->membase = ioremap(efx->membase_phys, mem_map_size);
if (!efx->membase) {
netif_err(efx, probe, efx->net_dev,
"could not map memory BAR at %llx+%x\n",
@@ -2108,7 +2108,7 @@ static void ef4_net_stats(struct net_device *net_dev,
}
/* Context: netif_tx_lock held, BHs disabled. */
-static void ef4_watchdog(struct net_device *net_dev)
+static void ef4_watchdog(struct net_device *net_dev, unsigned int txqueue)
{
struct ef4_nic *efx = netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index eedd32e2bfcb..dbbb898adddb 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -15,6 +15,7 @@
#include "net_driver.h"
#include "bitfield.h"
#include "efx.h"
+#include "rx_common.h"
#include "nic.h"
#include "farch_regs.h"
#include "sriov.h"
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 9081f84a2604..54a45010b576 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -346,11 +346,8 @@ int efx_mcdi_flush_rxqs(struct efx_nic *efx);
int efx_mcdi_port_probe(struct efx_nic *efx);
void efx_mcdi_port_remove(struct efx_nic *efx);
int efx_mcdi_port_reconfigure(struct efx_nic *efx);
-int efx_mcdi_port_get_number(struct efx_nic *efx);
u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
-int efx_mcdi_set_mac(struct efx_nic *efx);
-#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
void efx_mcdi_mac_start_stats(struct efx_nic *efx);
void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
void efx_mcdi_mac_pull_stats(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/mcdi_filters.c b/drivers/net/ethernet/sfc/mcdi_filters.c
new file mode 100644
index 000000000000..4310ae5bd898
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mcdi_filters.c
@@ -0,0 +1,2270 @@
+#include "mcdi_filters.h"
+#include "mcdi.h"
+#include "nic.h"
+#include "rx_common.h"
+
+/* The maximum size of a shared RSS context */
+/* TODO: this should really be from the mcdi protocol export */
+#define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL
+
+#define EFX_EF10_FILTER_ID_INVALID 0xffff
+
+/* An arbitrary search limit for the software hash table */
+#define EFX_EF10_FILTER_SEARCH_LIMIT 200
+
+static struct efx_filter_spec *
+efx_mcdi_filter_entry_spec(const struct efx_mcdi_filter_table *table,
+ unsigned int filter_idx)
+{
+ return (struct efx_filter_spec *)(table->entry[filter_idx].spec &
+ ~EFX_EF10_FILTER_FLAGS);
+}
+
+static unsigned int
+efx_mcdi_filter_entry_flags(const struct efx_mcdi_filter_table *table,
+ unsigned int filter_idx)
+{
+ return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS;
+}
+
+static u32 efx_mcdi_filter_get_unsafe_id(u32 filter_id)
+{
+ WARN_ON_ONCE(filter_id == EFX_EF10_FILTER_ID_INVALID);
+ return filter_id & (EFX_MCDI_FILTER_TBL_ROWS - 1);
+}
+
+static unsigned int efx_mcdi_filter_get_unsafe_pri(u32 filter_id)
+{
+ return filter_id / (EFX_MCDI_FILTER_TBL_ROWS * 2);
+}
+
+static u32 efx_mcdi_filter_make_filter_id(unsigned int pri, u16 idx)
+{
+ return pri * EFX_MCDI_FILTER_TBL_ROWS * 2 + idx;
+}
+
+/*
+ * Decide whether a filter should be exclusive or else should allow
+ * delivery to additional recipients. Currently we decide that
+ * filters for specific local unicast MAC and IP addresses are
+ * exclusive.
+ */
+static bool efx_mcdi_filter_is_exclusive(const struct efx_filter_spec *spec)
+{
+ if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC &&
+ !is_multicast_ether_addr(spec->loc_mac))
+ return true;
+
+ if ((spec->match_flags &
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
+ if (spec->ether_type == htons(ETH_P_IP) &&
+ !ipv4_is_multicast(spec->loc_host[0]))
+ return true;
+ if (spec->ether_type == htons(ETH_P_IPV6) &&
+ ((const u8 *)spec->loc_host)[0] != 0xff)
+ return true;
+ }
+
+ return false;
+}
+
+static void
+efx_mcdi_filter_set_entry(struct efx_mcdi_filter_table *table,
+ unsigned int filter_idx,
+ const struct efx_filter_spec *spec,
+ unsigned int flags)
+{
+ table->entry[filter_idx].spec = (unsigned long)spec | flags;
+}
+
+static void
+efx_mcdi_filter_push_prep_set_match_fields(struct efx_nic *efx,
+ const struct efx_filter_spec *spec,
+ efx_dword_t *inbuf)
+{
+ enum efx_encap_type encap_type = efx_filter_get_encap_type(spec);
+ u32 match_fields = 0, uc_match, mc_match;
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ efx_mcdi_filter_is_exclusive(spec) ?
+ MC_CMD_FILTER_OP_IN_OP_INSERT :
+ MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
+
+ /*
+ * Convert match flags and values. Unlike almost
+ * everything else in MCDI, these fields are in
+ * network byte order.
+ */
+#define COPY_VALUE(value, mcdi_field) \
+ do { \
+ match_fields |= \
+ 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
+ mcdi_field ## _LBN; \
+ BUILD_BUG_ON( \
+ MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
+ sizeof(value)); \
+ memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \
+ &value, sizeof(value)); \
+ } while (0)
+#define COPY_FIELD(gen_flag, gen_field, mcdi_field) \
+ if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \
+ COPY_VALUE(spec->gen_field, mcdi_field); \
+ }
+ /*
+ * Handle encap filters first. They will always be mismatch
+ * (unknown UC or MC) filters
+ */
+ if (encap_type) {
+ /*
+ * ether_type and outer_ip_proto need to be variables
+ * because COPY_VALUE wants to memcpy them
+ */
+ __be16 ether_type =
+ htons(encap_type & EFX_ENCAP_FLAG_IPV6 ?
+ ETH_P_IPV6 : ETH_P_IP);
+ u8 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE;
+ u8 outer_ip_proto;
+
+ switch (encap_type & EFX_ENCAP_TYPES_MASK) {
+ case EFX_ENCAP_TYPE_VXLAN:
+ vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN;
+ /* fallthrough */
+ case EFX_ENCAP_TYPE_GENEVE:
+ COPY_VALUE(ether_type, ETHER_TYPE);
+ outer_ip_proto = IPPROTO_UDP;
+ COPY_VALUE(outer_ip_proto, IP_PROTO);
+ /*
+ * We always need to set the type field, even
+ * though we're not matching on the TNI.
+ */
+ MCDI_POPULATE_DWORD_1(inbuf,
+ FILTER_OP_EXT_IN_VNI_OR_VSID,
+ FILTER_OP_EXT_IN_VNI_TYPE,
+ vni_type);
+ break;
+ case EFX_ENCAP_TYPE_NVGRE:
+ COPY_VALUE(ether_type, ETHER_TYPE);
+ outer_ip_proto = IPPROTO_GRE;
+ COPY_VALUE(outer_ip_proto, IP_PROTO);
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN;
+ mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN;
+ } else {
+ uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
+ mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN;
+ }
+
+ if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
+ match_fields |=
+ is_multicast_ether_addr(spec->loc_mac) ?
+ 1 << mc_match :
+ 1 << uc_match;
+ COPY_FIELD(REM_HOST, rem_host, SRC_IP);
+ COPY_FIELD(LOC_HOST, loc_host, DST_IP);
+ COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
+ COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
+ COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
+ COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
+ COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
+ COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
+ COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
+ COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
+#undef COPY_FIELD
+#undef COPY_VALUE
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
+ match_fields);
+}
+
+static void efx_mcdi_filter_push_prep(struct efx_nic *efx,
+ const struct efx_filter_spec *spec,
+ efx_dword_t *inbuf, u64 handle,
+ struct efx_rss_context *ctx,
+ bool replacing)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u32 flags = spec->flags;
+
+ memset(inbuf, 0, MC_CMD_FILTER_OP_EXT_IN_LEN);
+
+ /* If RSS filter, caller better have given us an RSS context */
+ if (flags & EFX_FILTER_FLAG_RX_RSS) {
+ /*
+ * We don't have the ability to return an error, so we'll just
+ * log a warning and disable RSS for the filter.
+ */
+ if (WARN_ON_ONCE(!ctx))
+ flags &= ~EFX_FILTER_FLAG_RX_RSS;
+ else if (WARN_ON_ONCE(ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID))
+ flags &= ~EFX_FILTER_FLAG_RX_RSS;
+ }
+
+ if (replacing) {
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ MC_CMD_FILTER_OP_IN_OP_REPLACE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle);
+ } else {
+ efx_mcdi_filter_push_prep_set_match_fields(efx, spec, inbuf);
+ }
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
+ spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
+ MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
+ MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
+ MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE,
+ spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
+ 0 : spec->dmaq_id);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
+ (flags & EFX_FILTER_FLAG_RX_RSS) ?
+ MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
+ MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
+ if (flags & EFX_FILTER_FLAG_RX_RSS)
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT, ctx->context_id);
+}
+
+static int efx_mcdi_filter_push(struct efx_nic *efx,
+ const struct efx_filter_spec *spec, u64 *handle,
+ struct efx_rss_context *ctx, bool replacing)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ efx_mcdi_filter_push_prep(efx, spec, inbuf, *handle, ctx, replacing);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc && spec->priority != EFX_FILTER_PRI_HINT)
+ efx_mcdi_display_error(efx, MC_CMD_FILTER_OP, sizeof(inbuf),
+ outbuf, outlen, rc);
+ if (rc == 0)
+ *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
+ if (rc == -ENOSPC)
+ rc = -EBUSY; /* to match efx_farch_filter_insert() */
+ return rc;
+}
+
+static u32 efx_mcdi_filter_mcdi_flags_from_spec(const struct efx_filter_spec *spec)
+{
+ enum efx_encap_type encap_type = efx_filter_get_encap_type(spec);
+ unsigned int match_flags = spec->match_flags;
+ unsigned int uc_match, mc_match;
+ u32 mcdi_flags = 0;
+
+#define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field, encap) { \
+ unsigned int old_match_flags = match_flags; \
+ match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag; \
+ if (match_flags != old_match_flags) \
+ mcdi_flags |= \
+ (1 << ((encap) ? \
+ MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ ## \
+ mcdi_field ## _LBN : \
+ MC_CMD_FILTER_OP_EXT_IN_MATCH_ ##\
+ mcdi_field ## _LBN)); \
+ }
+ /* inner or outer based on encap type */
+ MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP, encap_type);
+ MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP, encap_type);
+ MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC, encap_type);
+ MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT, encap_type);
+ MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC, encap_type);
+ MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT, encap_type);
+ MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE, encap_type);
+ MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO, encap_type);
+ /* always outer */
+ MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN, false);
+ MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN, false);
+#undef MAP_FILTER_TO_MCDI_FLAG
+
+ /* special handling for encap type, and mismatch */
+ if (encap_type) {
+ match_flags &= ~EFX_FILTER_MATCH_ENCAP_TYPE;
+ mcdi_flags |=
+ (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN);
+ mcdi_flags |= (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN);
+
+ uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN;
+ mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN;
+ } else {
+ uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
+ mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN;
+ }
+
+ if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) {
+ match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG;
+ mcdi_flags |=
+ is_multicast_ether_addr(spec->loc_mac) ?
+ 1 << mc_match :
+ 1 << uc_match;
+ }
+
+ /* Did we map them all? */
+ WARN_ON_ONCE(match_flags);
+
+ return mcdi_flags;
+}
+
+static int efx_mcdi_filter_pri(struct efx_mcdi_filter_table *table,
+ const struct efx_filter_spec *spec)
+{
+ u32 mcdi_flags = efx_mcdi_filter_mcdi_flags_from_spec(spec);
+ unsigned int match_pri;
+
+ for (match_pri = 0;
+ match_pri < table->rx_match_count;
+ match_pri++)
+ if (table->rx_match_mcdi_flags[match_pri] == mcdi_flags)
+ return match_pri;
+
+ return -EPROTONOSUPPORT;
+}
+
+static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx,
+ struct efx_filter_spec *spec,
+ bool replace_equal)
+{
+ DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_mcdi_filter_table *table;
+ struct efx_filter_spec *saved_spec;
+ struct efx_rss_context *ctx = NULL;
+ unsigned int match_pri, hash;
+ unsigned int priv_flags;
+ bool rss_locked = false;
+ bool replacing = false;
+ unsigned int depth, i;
+ int ins_index = -1;
+ DEFINE_WAIT(wait);
+ bool is_mc_recip;
+ s32 rc;
+
+ WARN_ON(!rwsem_is_locked(&efx->filter_sem));
+ table = efx->filter_state;
+ down_write(&table->lock);
+
+ /* For now, only support RX filters */
+ if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) !=
+ EFX_FILTER_FLAG_RX) {
+ rc = -EINVAL;
+ goto out_unlock;
+ }
+
+ rc = efx_mcdi_filter_pri(table, spec);
+ if (rc < 0)
+ goto out_unlock;
+ match_pri = rc;
+
+ hash = efx_filter_spec_hash(spec);
+ is_mc_recip = efx_filter_is_mc_recipient(spec);
+ if (is_mc_recip)
+ bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
+
+ if (spec->flags & EFX_FILTER_FLAG_RX_RSS) {
+ mutex_lock(&efx->rss_lock);
+ rss_locked = true;
+ if (spec->rss_context)
+ ctx = efx_find_rss_context_entry(efx, spec->rss_context);
+ else
+ ctx = &efx->rss_context;
+ if (!ctx) {
+ rc = -ENOENT;
+ goto out_unlock;
+ }
+ if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID) {
+ rc = -EOPNOTSUPP;
+ goto out_unlock;
+ }
+ }
+
+ /* Find any existing filters with the same match tuple or
+ * else a free slot to insert at.
+ */
+ for (depth = 1; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
+ i = (hash + depth) & (EFX_MCDI_FILTER_TBL_ROWS - 1);
+ saved_spec = efx_mcdi_filter_entry_spec(table, i);
+
+ if (!saved_spec) {
+ if (ins_index < 0)
+ ins_index = i;
+ } else if (efx_filter_spec_equal(spec, saved_spec)) {
+ if (spec->priority < saved_spec->priority &&
+ spec->priority != EFX_FILTER_PRI_AUTO) {
+ rc = -EPERM;
+ goto out_unlock;
+ }
+ if (!is_mc_recip) {
+ /* This is the only one */
+ if (spec->priority ==
+ saved_spec->priority &&
+ !replace_equal) {
+ rc = -EEXIST;
+ goto out_unlock;
+ }
+ ins_index = i;
+ break;
+ } else if (spec->priority >
+ saved_spec->priority ||
+ (spec->priority ==
+ saved_spec->priority &&
+ replace_equal)) {
+ if (ins_index < 0)
+ ins_index = i;
+ else
+ __set_bit(depth, mc_rem_map);
+ }
+ }
+ }
+
+ /* Once we reach the maximum search depth, use the first suitable
+ * slot, or return -EBUSY if there was none
+ */
+ if (ins_index < 0) {
+ rc = -EBUSY;
+ goto out_unlock;
+ }
+
+ /* Create a software table entry if necessary. */
+ saved_spec = efx_mcdi_filter_entry_spec(table, ins_index);
+ if (saved_spec) {
+ if (spec->priority == EFX_FILTER_PRI_AUTO &&
+ saved_spec->priority >= EFX_FILTER_PRI_AUTO) {
+ /* Just make sure it won't be removed */
+ if (saved_spec->priority > EFX_FILTER_PRI_AUTO)
+ saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
+ table->entry[ins_index].spec &=
+ ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
+ rc = ins_index;
+ goto out_unlock;
+ }
+ replacing = true;
+ priv_flags = efx_mcdi_filter_entry_flags(table, ins_index);
+ } else {
+ saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
+ if (!saved_spec) {
+ rc = -ENOMEM;
+ goto out_unlock;
+ }
+ *saved_spec = *spec;
+ priv_flags = 0;
+ }
+ efx_mcdi_filter_set_entry(table, ins_index, saved_spec, priv_flags);
+
+ /* Actually insert the filter on the HW */
+ rc = efx_mcdi_filter_push(efx, spec, &table->entry[ins_index].handle,
+ ctx, replacing);
+
+ if (rc == -EINVAL && nic_data->must_realloc_vis)
+ /* The MC rebooted under us, causing it to reject our filter
+ * insertion as pointing to an invalid VI (spec->dmaq_id).
+ */
+ rc = -EAGAIN;
+
+ /* Finalise the software table entry */
+ if (rc == 0) {
+ if (replacing) {
+ /* Update the fields that may differ */
+ if (saved_spec->priority == EFX_FILTER_PRI_AUTO)
+ saved_spec->flags |=
+ EFX_FILTER_FLAG_RX_OVER_AUTO;
+ saved_spec->priority = spec->priority;
+ saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO;
+ saved_spec->flags |= spec->flags;
+ saved_spec->rss_context = spec->rss_context;
+ saved_spec->dmaq_id = spec->dmaq_id;
+ }
+ } else if (!replacing) {
+ kfree(saved_spec);
+ saved_spec = NULL;
+ } else {
+ /* We failed to replace, so the old filter is still present.
+ * Roll back the software table to reflect this. In fact the
+ * efx_mcdi_filter_set_entry() call below will do the right
+ * thing, so nothing extra is needed here.
+ */
+ }
+ efx_mcdi_filter_set_entry(table, ins_index, saved_spec, priv_flags);
+
+ /* Remove and finalise entries for lower-priority multicast
+ * recipients
+ */
+ if (is_mc_recip) {
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
+ unsigned int depth, i;
+
+ memset(inbuf, 0, sizeof(inbuf));
+
+ for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
+ if (!test_bit(depth, mc_rem_map))
+ continue;
+
+ i = (hash + depth) & (EFX_MCDI_FILTER_TBL_ROWS - 1);
+ saved_spec = efx_mcdi_filter_entry_spec(table, i);
+ priv_flags = efx_mcdi_filter_entry_flags(table, i);
+
+ if (rc == 0) {
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
+ table->entry[i].handle);
+ rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
+ inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ }
+
+ if (rc == 0) {
+ kfree(saved_spec);
+ saved_spec = NULL;
+ priv_flags = 0;
+ }
+ efx_mcdi_filter_set_entry(table, i, saved_spec,
+ priv_flags);
+ }
+ }
+
+ /* If successful, return the inserted filter ID */
+ if (rc == 0)
+ rc = efx_mcdi_filter_make_filter_id(match_pri, ins_index);
+
+out_unlock:
+ if (rss_locked)
+ mutex_unlock(&efx->rss_lock);
+ up_write(&table->lock);
+ return rc;
+}
+
+s32 efx_mcdi_filter_insert(struct efx_nic *efx, struct efx_filter_spec *spec,
+ bool replace_equal)
+{
+ s32 ret;
+
+ down_read(&efx->filter_sem);
+ ret = efx_mcdi_filter_insert_locked(efx, spec, replace_equal);
+ up_read(&efx->filter_sem);
+
+ return ret;
+}
+
+/*
+ * Remove a filter.
+ * If !by_index, remove by ID
+ * If by_index, remove by index
+ * Filter ID may come from userland and must be range-checked.
+ * Caller must hold efx->filter_sem for read, and efx->filter_state->lock
+ * for write.
+ */
+static int efx_mcdi_filter_remove_internal(struct efx_nic *efx,
+ unsigned int priority_mask,
+ u32 filter_id, bool by_index)
+{
+ unsigned int filter_idx = efx_mcdi_filter_get_unsafe_id(filter_id);
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_FILTER_OP_IN_HANDLE_OFST +
+ MC_CMD_FILTER_OP_IN_HANDLE_LEN);
+ struct efx_filter_spec *spec;
+ DEFINE_WAIT(wait);
+ int rc;
+
+ spec = efx_mcdi_filter_entry_spec(table, filter_idx);
+ if (!spec ||
+ (!by_index &&
+ efx_mcdi_filter_pri(table, spec) !=
+ efx_mcdi_filter_get_unsafe_pri(filter_id)))
+ return -ENOENT;
+
+ if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO &&
+ priority_mask == (1U << EFX_FILTER_PRI_AUTO)) {
+ /* Just remove flags */
+ spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO;
+ table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
+ return 0;
+ }
+
+ if (!(priority_mask & (1U << spec->priority)))
+ return -ENOENT;
+
+ if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
+ /* Reset to an automatic filter */
+
+ struct efx_filter_spec new_spec = *spec;
+
+ new_spec.priority = EFX_FILTER_PRI_AUTO;
+ new_spec.flags = (EFX_FILTER_FLAG_RX |
+ (efx_rss_active(&efx->rss_context) ?
+ EFX_FILTER_FLAG_RX_RSS : 0));
+ new_spec.dmaq_id = 0;
+ new_spec.rss_context = 0;
+ rc = efx_mcdi_filter_push(efx, &new_spec,
+ &table->entry[filter_idx].handle,
+ &efx->rss_context,
+ true);
+
+ if (rc == 0)
+ *spec = new_spec;
+ } else {
+ /* Really remove the filter */
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ efx_mcdi_filter_is_exclusive(spec) ?
+ MC_CMD_FILTER_OP_IN_OP_REMOVE :
+ MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
+ table->entry[filter_idx].handle);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
+
+ if ((rc == 0) || (rc == -ENOENT)) {
+ /* Filter removed OK or didn't actually exist */
+ kfree(spec);
+ efx_mcdi_filter_set_entry(table, filter_idx, NULL, 0);
+ } else {
+ efx_mcdi_display_error(efx, MC_CMD_FILTER_OP,
+ MC_CMD_FILTER_OP_EXT_IN_LEN,
+ NULL, 0, rc);
+ }
+ }
+
+ return rc;
+}
+
+/* Remove filters that weren't renewed. */
+static void efx_mcdi_filter_remove_old(struct efx_nic *efx)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ int remove_failed = 0;
+ int remove_noent = 0;
+ int rc;
+ int i;
+
+ down_write(&table->lock);
+ for (i = 0; i < EFX_MCDI_FILTER_TBL_ROWS; i++) {
+ if (READ_ONCE(table->entry[i].spec) &
+ EFX_EF10_FILTER_FLAG_AUTO_OLD) {
+ rc = efx_mcdi_filter_remove_internal(efx,
+ 1U << EFX_FILTER_PRI_AUTO, i, true);
+ if (rc == -ENOENT)
+ remove_noent++;
+ else if (rc)
+ remove_failed++;
+ }
+ }
+ up_write(&table->lock);
+
+ if (remove_failed)
+ netif_info(efx, drv, efx->net_dev,
+ "%s: failed to remove %d filters\n",
+ __func__, remove_failed);
+ if (remove_noent)
+ netif_info(efx, drv, efx->net_dev,
+ "%s: failed to remove %d non-existent filters\n",
+ __func__, remove_noent);
+}
+
+int efx_mcdi_filter_remove_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id)
+{
+ struct efx_mcdi_filter_table *table;
+ int rc;
+
+ down_read(&efx->filter_sem);
+ table = efx->filter_state;
+ down_write(&table->lock);
+ rc = efx_mcdi_filter_remove_internal(efx, 1U << priority, filter_id,
+ false);
+ up_write(&table->lock);
+ up_read(&efx->filter_sem);
+ return rc;
+}
+
+/* Caller must hold efx->filter_sem for read */
+static void efx_mcdi_filter_remove_unsafe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+
+ if (filter_id == EFX_EF10_FILTER_ID_INVALID)
+ return;
+
+ down_write(&table->lock);
+ efx_mcdi_filter_remove_internal(efx, 1U << priority, filter_id,
+ true);
+ up_write(&table->lock);
+}
+
+int efx_mcdi_filter_get_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *spec)
+{
+ unsigned int filter_idx = efx_mcdi_filter_get_unsafe_id(filter_id);
+ const struct efx_filter_spec *saved_spec;
+ struct efx_mcdi_filter_table *table;
+ int rc;
+
+ down_read(&efx->filter_sem);
+ table = efx->filter_state;
+ down_read(&table->lock);
+ saved_spec = efx_mcdi_filter_entry_spec(table, filter_idx);
+ if (saved_spec && saved_spec->priority == priority &&
+ efx_mcdi_filter_pri(table, saved_spec) ==
+ efx_mcdi_filter_get_unsafe_pri(filter_id)) {
+ *spec = *saved_spec;
+ rc = 0;
+ } else {
+ rc = -ENOENT;
+ }
+ up_read(&table->lock);
+ up_read(&efx->filter_sem);
+ return rc;
+}
+
+static int efx_mcdi_filter_insert_addr_list(struct efx_nic *efx,
+ struct efx_mcdi_filter_vlan *vlan,
+ bool multicast, bool rollback)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ struct efx_mcdi_dev_addr *addr_list;
+ enum efx_filter_flags filter_flags;
+ struct efx_filter_spec spec;
+ u8 baddr[ETH_ALEN];
+ unsigned int i, j;
+ int addr_count;
+ u16 *ids;
+ int rc;
+
+ if (multicast) {
+ addr_list = table->dev_mc_list;
+ addr_count = table->dev_mc_count;
+ ids = vlan->mc;
+ } else {
+ addr_list = table->dev_uc_list;
+ addr_count = table->dev_uc_count;
+ ids = vlan->uc;
+ }
+
+ filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0;
+
+ /* Insert/renew filters */
+ for (i = 0; i < addr_count; i++) {
+ EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID);
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
+ efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr);
+ rc = efx_mcdi_filter_insert_locked(efx, &spec, true);
+ if (rc < 0) {
+ if (rollback) {
+ netif_info(efx, drv, efx->net_dev,
+ "efx_mcdi_filter_insert failed rc=%d\n",
+ rc);
+ /* Fall back to promiscuous */
+ for (j = 0; j < i; j++) {
+ efx_mcdi_filter_remove_unsafe(
+ efx, EFX_FILTER_PRI_AUTO,
+ ids[j]);
+ ids[j] = EFX_EF10_FILTER_ID_INVALID;
+ }
+ return rc;
+ } else {
+ /* keep invalid ID, and carry on */
+ }
+ } else {
+ ids[i] = efx_mcdi_filter_get_unsafe_id(rc);
+ }
+ }
+
+ if (multicast && rollback) {
+ /* Also need an Ethernet broadcast filter */
+ EFX_WARN_ON_PARANOID(vlan->default_filters[EFX_EF10_BCAST] !=
+ EFX_EF10_FILTER_ID_INVALID);
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
+ eth_broadcast_addr(baddr);
+ efx_filter_set_eth_local(&spec, vlan->vid, baddr);
+ rc = efx_mcdi_filter_insert_locked(efx, &spec, true);
+ if (rc < 0) {
+ netif_warn(efx, drv, efx->net_dev,
+ "Broadcast filter insert failed rc=%d\n", rc);
+ /* Fall back to promiscuous */
+ for (j = 0; j < i; j++) {
+ efx_mcdi_filter_remove_unsafe(
+ efx, EFX_FILTER_PRI_AUTO,
+ ids[j]);
+ ids[j] = EFX_EF10_FILTER_ID_INVALID;
+ }
+ return rc;
+ } else {
+ vlan->default_filters[EFX_EF10_BCAST] =
+ efx_mcdi_filter_get_unsafe_id(rc);
+ }
+ }
+
+ return 0;
+}
+
+static int efx_mcdi_filter_insert_def(struct efx_nic *efx,
+ struct efx_mcdi_filter_vlan *vlan,
+ enum efx_encap_type encap_type,
+ bool multicast, bool rollback)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ enum efx_filter_flags filter_flags;
+ struct efx_filter_spec spec;
+ u8 baddr[ETH_ALEN];
+ int rc;
+ u16 *id;
+
+ filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0;
+
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
+
+ if (multicast)
+ efx_filter_set_mc_def(&spec);
+ else
+ efx_filter_set_uc_def(&spec);
+
+ if (encap_type) {
+ if (nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
+ efx_filter_set_encap_type(&spec, encap_type);
+ else
+ /*
+ * don't insert encap filters on non-supporting
+ * platforms. ID will be left as INVALID.
+ */
+ return 0;
+ }
+
+ if (vlan->vid != EFX_FILTER_VID_UNSPEC)
+ efx_filter_set_eth_local(&spec, vlan->vid, NULL);
+
+ rc = efx_mcdi_filter_insert_locked(efx, &spec, true);
+ if (rc < 0) {
+ const char *um = multicast ? "Multicast" : "Unicast";
+ const char *encap_name = "";
+ const char *encap_ipv = "";
+
+ if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
+ EFX_ENCAP_TYPE_VXLAN)
+ encap_name = "VXLAN ";
+ else if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
+ EFX_ENCAP_TYPE_NVGRE)
+ encap_name = "NVGRE ";
+ else if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
+ EFX_ENCAP_TYPE_GENEVE)
+ encap_name = "GENEVE ";
+ if (encap_type & EFX_ENCAP_FLAG_IPV6)
+ encap_ipv = "IPv6 ";
+ else if (encap_type)
+ encap_ipv = "IPv4 ";
+
+ /*
+ * unprivileged functions can't insert mismatch filters
+ * for encapsulated or unicast traffic, so downgrade
+ * those warnings to debug.
+ */
+ netif_cond_dbg(efx, drv, efx->net_dev,
+ rc == -EPERM && (encap_type || !multicast), warn,
+ "%s%s%s mismatch filter insert failed rc=%d\n",
+ encap_name, encap_ipv, um, rc);
+ } else if (multicast) {
+ /* mapping from encap types to default filter IDs (multicast) */
+ static enum efx_mcdi_filter_default_filters map[] = {
+ [EFX_ENCAP_TYPE_NONE] = EFX_EF10_MCDEF,
+ [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_MCDEF,
+ [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_MCDEF,
+ [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_MCDEF,
+ [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] =
+ EFX_EF10_VXLAN6_MCDEF,
+ [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] =
+ EFX_EF10_NVGRE6_MCDEF,
+ [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] =
+ EFX_EF10_GENEVE6_MCDEF,
+ };
+
+ /* quick bounds check (BCAST result impossible) */
+ BUILD_BUG_ON(EFX_EF10_BCAST != 0);
+ if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ /* then follow map */
+ id = &vlan->default_filters[map[encap_type]];
+
+ EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
+ *id = efx_mcdi_filter_get_unsafe_id(rc);
+ if (!nic_data->workaround_26807 && !encap_type) {
+ /* Also need an Ethernet broadcast filter */
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ filter_flags, 0);
+ eth_broadcast_addr(baddr);
+ efx_filter_set_eth_local(&spec, vlan->vid, baddr);
+ rc = efx_mcdi_filter_insert_locked(efx, &spec, true);
+ if (rc < 0) {
+ netif_warn(efx, drv, efx->net_dev,
+ "Broadcast filter insert failed rc=%d\n",
+ rc);
+ if (rollback) {
+ /* Roll back the mc_def filter */
+ efx_mcdi_filter_remove_unsafe(
+ efx, EFX_FILTER_PRI_AUTO,
+ *id);
+ *id = EFX_EF10_FILTER_ID_INVALID;
+ return rc;
+ }
+ } else {
+ EFX_WARN_ON_PARANOID(
+ vlan->default_filters[EFX_EF10_BCAST] !=
+ EFX_EF10_FILTER_ID_INVALID);
+ vlan->default_filters[EFX_EF10_BCAST] =
+ efx_mcdi_filter_get_unsafe_id(rc);
+ }
+ }
+ rc = 0;
+ } else {
+ /* mapping from encap types to default filter IDs (unicast) */
+ static enum efx_mcdi_filter_default_filters map[] = {
+ [EFX_ENCAP_TYPE_NONE] = EFX_EF10_UCDEF,
+ [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_UCDEF,
+ [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_UCDEF,
+ [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_UCDEF,
+ [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] =
+ EFX_EF10_VXLAN6_UCDEF,
+ [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] =
+ EFX_EF10_NVGRE6_UCDEF,
+ [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] =
+ EFX_EF10_GENEVE6_UCDEF,
+ };
+
+ /* quick bounds check (BCAST result impossible) */
+ BUILD_BUG_ON(EFX_EF10_BCAST != 0);
+ if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ /* then follow map */
+ id = &vlan->default_filters[map[encap_type]];
+ EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
+ *id = rc;
+ rc = 0;
+ }
+ return rc;
+}
+
+/*
+ * Caller must hold efx->filter_sem for read if race against
+ * efx_mcdi_filter_table_remove() is possible
+ */
+static void efx_mcdi_filter_vlan_sync_rx_mode(struct efx_nic *efx,
+ struct efx_mcdi_filter_vlan *vlan)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ /*
+ * Do not install unspecified VID if VLAN filtering is enabled.
+ * Do not install all specified VIDs if VLAN filtering is disabled.
+ */
+ if ((vlan->vid == EFX_FILTER_VID_UNSPEC) == table->vlan_filter)
+ return;
+
+ /* Insert/renew unicast filters */
+ if (table->uc_promisc) {
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NONE,
+ false, false);
+ efx_mcdi_filter_insert_addr_list(efx, vlan, false, false);
+ } else {
+ /*
+ * If any of the filters failed to insert, fall back to
+ * promiscuous mode - add in the uc_def filter. But keep
+ * our individual unicast filters.
+ */
+ if (efx_mcdi_filter_insert_addr_list(efx, vlan, false, false))
+ efx_mcdi_filter_insert_def(efx, vlan,
+ EFX_ENCAP_TYPE_NONE,
+ false, false);
+ }
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN,
+ false, false);
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN |
+ EFX_ENCAP_FLAG_IPV6,
+ false, false);
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE,
+ false, false);
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE |
+ EFX_ENCAP_FLAG_IPV6,
+ false, false);
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE,
+ false, false);
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE |
+ EFX_ENCAP_FLAG_IPV6,
+ false, false);
+
+ /*
+ * Insert/renew multicast filters
+ *
+ * If changing promiscuous state with cascaded multicast filters, remove
+ * old filters first, so that packets are dropped rather than duplicated
+ */
+ if (nic_data->workaround_26807 &&
+ table->mc_promisc_last != table->mc_promisc)
+ efx_mcdi_filter_remove_old(efx);
+ if (table->mc_promisc) {
+ if (nic_data->workaround_26807) {
+ /*
+ * If we failed to insert promiscuous filters, rollback
+ * and fall back to individual multicast filters
+ */
+ if (efx_mcdi_filter_insert_def(efx, vlan,
+ EFX_ENCAP_TYPE_NONE,
+ true, true)) {
+ /* Changing promisc state, so remove old filters */
+ efx_mcdi_filter_remove_old(efx);
+ efx_mcdi_filter_insert_addr_list(efx, vlan,
+ true, false);
+ }
+ } else {
+ /*
+ * If we failed to insert promiscuous filters, don't
+ * rollback. Regardless, also insert the mc_list,
+ * unless it's incomplete due to overflow
+ */
+ efx_mcdi_filter_insert_def(efx, vlan,
+ EFX_ENCAP_TYPE_NONE,
+ true, false);
+ if (!table->mc_overflow)
+ efx_mcdi_filter_insert_addr_list(efx, vlan,
+ true, false);
+ }
+ } else {
+ /*
+ * If any filters failed to insert, rollback and fall back to
+ * promiscuous mode - mc_def filter and maybe broadcast. If
+ * that fails, roll back again and insert as many of our
+ * individual multicast filters as we can.
+ */
+ if (efx_mcdi_filter_insert_addr_list(efx, vlan, true, true)) {
+ /* Changing promisc state, so remove old filters */
+ if (nic_data->workaround_26807)
+ efx_mcdi_filter_remove_old(efx);
+ if (efx_mcdi_filter_insert_def(efx, vlan,
+ EFX_ENCAP_TYPE_NONE,
+ true, true))
+ efx_mcdi_filter_insert_addr_list(efx, vlan,
+ true, false);
+ }
+ }
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN,
+ true, false);
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN |
+ EFX_ENCAP_FLAG_IPV6,
+ true, false);
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE,
+ true, false);
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE |
+ EFX_ENCAP_FLAG_IPV6,
+ true, false);
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE,
+ true, false);
+ efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE |
+ EFX_ENCAP_FLAG_IPV6,
+ true, false);
+}
+
+int efx_mcdi_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ struct efx_mcdi_filter_table *table;
+ unsigned int priority_mask;
+ unsigned int i;
+ int rc;
+
+ priority_mask = (((1U << (priority + 1)) - 1) &
+ ~(1U << EFX_FILTER_PRI_AUTO));
+
+ down_read(&efx->filter_sem);
+ table = efx->filter_state;
+ down_write(&table->lock);
+ for (i = 0; i < EFX_MCDI_FILTER_TBL_ROWS; i++) {
+ rc = efx_mcdi_filter_remove_internal(efx, priority_mask,
+ i, true);
+ if (rc && rc != -ENOENT)
+ break;
+ rc = 0;
+ }
+
+ up_write(&table->lock);
+ up_read(&efx->filter_sem);
+ return rc;
+}
+
+u32 efx_mcdi_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ struct efx_mcdi_filter_table *table;
+ unsigned int filter_idx;
+ s32 count = 0;
+
+ down_read(&efx->filter_sem);
+ table = efx->filter_state;
+ down_read(&table->lock);
+ for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) {
+ if (table->entry[filter_idx].spec &&
+ efx_mcdi_filter_entry_spec(table, filter_idx)->priority ==
+ priority)
+ ++count;
+ }
+ up_read(&table->lock);
+ up_read(&efx->filter_sem);
+ return count;
+}
+
+u32 efx_mcdi_filter_get_rx_id_limit(struct efx_nic *efx)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+
+ return table->rx_match_count * EFX_MCDI_FILTER_TBL_ROWS * 2;
+}
+
+s32 efx_mcdi_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size)
+{
+ struct efx_mcdi_filter_table *table;
+ struct efx_filter_spec *spec;
+ unsigned int filter_idx;
+ s32 count = 0;
+
+ down_read(&efx->filter_sem);
+ table = efx->filter_state;
+ down_read(&table->lock);
+
+ for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) {
+ spec = efx_mcdi_filter_entry_spec(table, filter_idx);
+ if (spec && spec->priority == priority) {
+ if (count == size) {
+ count = -EMSGSIZE;
+ break;
+ }
+ buf[count++] =
+ efx_mcdi_filter_make_filter_id(
+ efx_mcdi_filter_pri(table, spec),
+ filter_idx);
+ }
+ }
+ up_read(&table->lock);
+ up_read(&efx->filter_sem);
+ return count;
+}
+
+static int efx_mcdi_filter_match_flags_from_mcdi(bool encap, u32 mcdi_flags)
+{
+ int match_flags = 0;
+
+#define MAP_FLAG(gen_flag, mcdi_field) do { \
+ u32 old_mcdi_flags = mcdi_flags; \
+ mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ ## \
+ mcdi_field ## _LBN); \
+ if (mcdi_flags != old_mcdi_flags) \
+ match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \
+ } while (0)
+
+ if (encap) {
+ /* encap filters must specify encap type */
+ match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
+ /* and imply ethertype and ip proto */
+ mcdi_flags &=
+ ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN);
+ mcdi_flags &=
+ ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN);
+ /* VLAN tags refer to the outer packet */
+ MAP_FLAG(INNER_VID, INNER_VLAN);
+ MAP_FLAG(OUTER_VID, OUTER_VLAN);
+ /* everything else refers to the inner packet */
+ MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_UCAST_DST);
+ MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_MCAST_DST);
+ MAP_FLAG(REM_HOST, IFRM_SRC_IP);
+ MAP_FLAG(LOC_HOST, IFRM_DST_IP);
+ MAP_FLAG(REM_MAC, IFRM_SRC_MAC);
+ MAP_FLAG(REM_PORT, IFRM_SRC_PORT);
+ MAP_FLAG(LOC_MAC, IFRM_DST_MAC);
+ MAP_FLAG(LOC_PORT, IFRM_DST_PORT);
+ MAP_FLAG(ETHER_TYPE, IFRM_ETHER_TYPE);
+ MAP_FLAG(IP_PROTO, IFRM_IP_PROTO);
+ } else {
+ MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
+ MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
+ MAP_FLAG(REM_HOST, SRC_IP);
+ MAP_FLAG(LOC_HOST, DST_IP);
+ MAP_FLAG(REM_MAC, SRC_MAC);
+ MAP_FLAG(REM_PORT, SRC_PORT);
+ MAP_FLAG(LOC_MAC, DST_MAC);
+ MAP_FLAG(LOC_PORT, DST_PORT);
+ MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
+ MAP_FLAG(INNER_VID, INNER_VLAN);
+ MAP_FLAG(OUTER_VID, OUTER_VLAN);
+ MAP_FLAG(IP_PROTO, IP_PROTO);
+ }
+#undef MAP_FLAG
+
+ /* Did we map them all? */
+ if (mcdi_flags)
+ return -EINVAL;
+
+ return match_flags;
+}
+
+bool efx_mcdi_filter_match_supported(struct efx_mcdi_filter_table *table,
+ bool encap,
+ enum efx_filter_match_flags match_flags)
+{
+ unsigned int match_pri;
+ int mf;
+
+ for (match_pri = 0;
+ match_pri < table->rx_match_count;
+ match_pri++) {
+ mf = efx_mcdi_filter_match_flags_from_mcdi(encap,
+ table->rx_match_mcdi_flags[match_pri]);
+ if (mf == match_flags)
+ return true;
+ }
+
+ return false;
+}
+
+static int
+efx_mcdi_filter_table_probe_matches(struct efx_nic *efx,
+ struct efx_mcdi_filter_table *table,
+ bool encap)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
+ unsigned int pd_match_pri, pd_match_count;
+ size_t outlen;
+ int rc;
+
+ /* Find out which RX filter types are supported, and their priorities */
+ MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP,
+ encap ?
+ MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES :
+ MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO,
+ inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
+ &outlen);
+ if (rc)
+ return rc;
+
+ pd_match_count = MCDI_VAR_ARRAY_LEN(
+ outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES);
+
+ for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) {
+ u32 mcdi_flags =
+ MCDI_ARRAY_DWORD(
+ outbuf,
+ GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES,
+ pd_match_pri);
+ rc = efx_mcdi_filter_match_flags_from_mcdi(encap, mcdi_flags);
+ if (rc < 0) {
+ netif_dbg(efx, probe, efx->net_dev,
+ "%s: fw flags %#x pri %u not supported in driver\n",
+ __func__, mcdi_flags, pd_match_pri);
+ } else {
+ netif_dbg(efx, probe, efx->net_dev,
+ "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
+ __func__, mcdi_flags, pd_match_pri,
+ rc, table->rx_match_count);
+ table->rx_match_mcdi_flags[table->rx_match_count] = mcdi_flags;
+ table->rx_match_count++;
+ }
+ }
+
+ return 0;
+}
+
+int efx_mcdi_filter_table_probe(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct net_device *net_dev = efx->net_dev;
+ struct efx_mcdi_filter_table *table;
+ struct efx_mcdi_filter_vlan *vlan;
+ int rc;
+
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return -EINVAL;
+
+ if (efx->filter_state) /* already probed */
+ return 0;
+
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ table->rx_match_count = 0;
+ rc = efx_mcdi_filter_table_probe_matches(efx, table, false);
+ if (rc)
+ goto fail;
+ if (nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
+ rc = efx_mcdi_filter_table_probe_matches(efx, table, true);
+ if (rc)
+ goto fail;
+ if ((efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ !(efx_mcdi_filter_match_supported(table, false,
+ (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC)) &&
+ efx_mcdi_filter_match_supported(table, false,
+ (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC_IG)))) {
+ netif_info(efx, probe, net_dev,
+ "VLAN filters are not supported in this firmware variant\n");
+ net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ efx->fixed_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ net_dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ }
+
+ table->entry = vzalloc(array_size(EFX_MCDI_FILTER_TBL_ROWS,
+ sizeof(*table->entry)));
+ if (!table->entry) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ table->mc_promisc_last = false;
+ table->vlan_filter =
+ !!(efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
+ INIT_LIST_HEAD(&table->vlan_list);
+ init_rwsem(&table->lock);
+
+ efx->filter_state = table;
+
+ list_for_each_entry(vlan, &nic_data->vlan_list, list) {
+ rc = efx_mcdi_filter_add_vlan(efx, vlan->vid);
+ if (rc)
+ goto fail_add_vlan;
+ }
+
+ return 0;
+
+fail_add_vlan:
+ efx_mcdi_filter_cleanup_vlans(efx);
+ efx->filter_state = NULL;
+fail:
+ kfree(table);
+ return rc;
+}
+
+/*
+ * Caller must hold efx->filter_sem for read if race against
+ * efx_mcdi_filter_table_remove() is possible
+ */
+void efx_mcdi_filter_table_restore(struct efx_nic *efx)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ unsigned int invalid_filters = 0, failed = 0;
+ struct efx_mcdi_filter_vlan *vlan;
+ struct efx_filter_spec *spec;
+ struct efx_rss_context *ctx;
+ unsigned int filter_idx;
+ u32 mcdi_flags;
+ int match_pri;
+ int rc, i;
+
+ WARN_ON(!rwsem_is_locked(&efx->filter_sem));
+
+ if (!nic_data->must_restore_filters)
+ return;
+
+ if (!table)
+ return;
+
+ down_write(&table->lock);
+ mutex_lock(&efx->rss_lock);
+
+ for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) {
+ spec = efx_mcdi_filter_entry_spec(table, filter_idx);
+ if (!spec)
+ continue;
+
+ mcdi_flags = efx_mcdi_filter_mcdi_flags_from_spec(spec);
+ match_pri = 0;
+ while (match_pri < table->rx_match_count &&
+ table->rx_match_mcdi_flags[match_pri] != mcdi_flags)
+ ++match_pri;
+ if (match_pri >= table->rx_match_count) {
+ invalid_filters++;
+ goto not_restored;
+ }
+ if (spec->rss_context)
+ ctx = efx_find_rss_context_entry(efx, spec->rss_context);
+ else
+ ctx = &efx->rss_context;
+ if (spec->flags & EFX_FILTER_FLAG_RX_RSS) {
+ if (!ctx) {
+ netif_warn(efx, drv, efx->net_dev,
+ "Warning: unable to restore a filter with nonexistent RSS context %u.\n",
+ spec->rss_context);
+ invalid_filters++;
+ goto not_restored;
+ }
+ if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID) {
+ netif_warn(efx, drv, efx->net_dev,
+ "Warning: unable to restore a filter with RSS context %u as it was not created.\n",
+ spec->rss_context);
+ invalid_filters++;
+ goto not_restored;
+ }
+ }
+
+ rc = efx_mcdi_filter_push(efx, spec,
+ &table->entry[filter_idx].handle,
+ ctx, false);
+ if (rc)
+ failed++;
+
+ if (rc) {
+not_restored:
+ list_for_each_entry(vlan, &table->vlan_list, list)
+ for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; ++i)
+ if (vlan->default_filters[i] == filter_idx)
+ vlan->default_filters[i] =
+ EFX_EF10_FILTER_ID_INVALID;
+
+ kfree(spec);
+ efx_mcdi_filter_set_entry(table, filter_idx, NULL, 0);
+ }
+ }
+
+ mutex_unlock(&efx->rss_lock);
+ up_write(&table->lock);
+
+ /*
+ * This can happen validly if the MC's capabilities have changed, so
+ * is not an error.
+ */
+ if (invalid_filters)
+ netif_dbg(efx, drv, efx->net_dev,
+ "Did not restore %u filters that are now unsupported.\n",
+ invalid_filters);
+
+ if (failed)
+ netif_err(efx, hw, efx->net_dev,
+ "unable to restore %u filters\n", failed);
+ else
+ nic_data->must_restore_filters = false;
+}
+
+void efx_mcdi_filter_table_remove(struct efx_nic *efx)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
+ struct efx_filter_spec *spec;
+ unsigned int filter_idx;
+ int rc;
+
+ efx_mcdi_filter_cleanup_vlans(efx);
+ efx->filter_state = NULL;
+ /*
+ * If we were called without locking, then it's not safe to free
+ * the table as others might be using it. So we just WARN, leak
+ * the memory, and potentially get an inconsistent filter table
+ * state.
+ * This should never actually happen.
+ */
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return;
+
+ if (!table)
+ return;
+
+ for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) {
+ spec = efx_mcdi_filter_entry_spec(table, filter_idx);
+ if (!spec)
+ continue;
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ efx_mcdi_filter_is_exclusive(spec) ?
+ MC_CMD_FILTER_OP_IN_OP_REMOVE :
+ MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
+ table->entry[filter_idx].handle);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
+ if (rc)
+ netif_info(efx, drv, efx->net_dev,
+ "%s: filter %04x remove failed\n",
+ __func__, filter_idx);
+ kfree(spec);
+ }
+
+ vfree(table->entry);
+ kfree(table);
+}
+
+static void efx_mcdi_filter_mark_one_old(struct efx_nic *efx, uint16_t *id)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ unsigned int filter_idx;
+
+ efx_rwsem_assert_write_locked(&table->lock);
+
+ if (*id != EFX_EF10_FILTER_ID_INVALID) {
+ filter_idx = efx_mcdi_filter_get_unsafe_id(*id);
+ if (!table->entry[filter_idx].spec)
+ netif_dbg(efx, drv, efx->net_dev,
+ "marked null spec old %04x:%04x\n", *id,
+ filter_idx);
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
+ *id = EFX_EF10_FILTER_ID_INVALID;
+ }
+}
+
+/* Mark old per-VLAN filters that may need to be removed */
+static void _efx_mcdi_filter_vlan_mark_old(struct efx_nic *efx,
+ struct efx_mcdi_filter_vlan *vlan)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ unsigned int i;
+
+ for (i = 0; i < table->dev_uc_count; i++)
+ efx_mcdi_filter_mark_one_old(efx, &vlan->uc[i]);
+ for (i = 0; i < table->dev_mc_count; i++)
+ efx_mcdi_filter_mark_one_old(efx, &vlan->mc[i]);
+ for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
+ efx_mcdi_filter_mark_one_old(efx, &vlan->default_filters[i]);
+}
+
+/*
+ * Mark old filters that may need to be removed.
+ * Caller must hold efx->filter_sem for read if race against
+ * efx_mcdi_filter_table_remove() is possible
+ */
+static void efx_mcdi_filter_mark_old(struct efx_nic *efx)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ struct efx_mcdi_filter_vlan *vlan;
+
+ down_write(&table->lock);
+ list_for_each_entry(vlan, &table->vlan_list, list)
+ _efx_mcdi_filter_vlan_mark_old(efx, vlan);
+ up_write(&table->lock);
+}
+
+int efx_mcdi_filter_add_vlan(struct efx_nic *efx, u16 vid)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ struct efx_mcdi_filter_vlan *vlan;
+ unsigned int i;
+
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return -EINVAL;
+
+ vlan = efx_mcdi_filter_find_vlan(efx, vid);
+ if (WARN_ON(vlan)) {
+ netif_err(efx, drv, efx->net_dev,
+ "VLAN %u already added\n", vid);
+ return -EALREADY;
+ }
+
+ vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan)
+ return -ENOMEM;
+
+ vlan->vid = vid;
+
+ for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
+ vlan->uc[i] = EFX_EF10_FILTER_ID_INVALID;
+ for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
+ vlan->mc[i] = EFX_EF10_FILTER_ID_INVALID;
+ for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
+ vlan->default_filters[i] = EFX_EF10_FILTER_ID_INVALID;
+
+ list_add_tail(&vlan->list, &table->vlan_list);
+
+ if (efx_dev_registered(efx))
+ efx_mcdi_filter_vlan_sync_rx_mode(efx, vlan);
+
+ return 0;
+}
+
+static void efx_mcdi_filter_del_vlan_internal(struct efx_nic *efx,
+ struct efx_mcdi_filter_vlan *vlan)
+{
+ unsigned int i;
+
+ /* See comment in efx_mcdi_filter_table_remove() */
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return;
+
+ list_del(&vlan->list);
+
+ for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
+ efx_mcdi_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
+ vlan->uc[i]);
+ for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
+ efx_mcdi_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
+ vlan->mc[i]);
+ for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
+ if (vlan->default_filters[i] != EFX_EF10_FILTER_ID_INVALID)
+ efx_mcdi_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
+ vlan->default_filters[i]);
+
+ kfree(vlan);
+}
+
+void efx_mcdi_filter_del_vlan(struct efx_nic *efx, u16 vid)
+{
+ struct efx_mcdi_filter_vlan *vlan;
+
+ /* See comment in efx_mcdi_filter_table_remove() */
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return;
+
+ vlan = efx_mcdi_filter_find_vlan(efx, vid);
+ if (!vlan) {
+ netif_err(efx, drv, efx->net_dev,
+ "VLAN %u not found in filter state\n", vid);
+ return;
+ }
+
+ efx_mcdi_filter_del_vlan_internal(efx, vlan);
+}
+
+struct efx_mcdi_filter_vlan *efx_mcdi_filter_find_vlan(struct efx_nic *efx,
+ u16 vid)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ struct efx_mcdi_filter_vlan *vlan;
+
+ WARN_ON(!rwsem_is_locked(&efx->filter_sem));
+
+ list_for_each_entry(vlan, &table->vlan_list, list) {
+ if (vlan->vid == vid)
+ return vlan;
+ }
+
+ return NULL;
+}
+
+void efx_mcdi_filter_cleanup_vlans(struct efx_nic *efx)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ struct efx_mcdi_filter_vlan *vlan, *next_vlan;
+
+ /* See comment in efx_mcdi_filter_table_remove() */
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return;
+
+ if (!table)
+ return;
+
+ list_for_each_entry_safe(vlan, next_vlan, &table->vlan_list, list)
+ efx_mcdi_filter_del_vlan_internal(efx, vlan);
+}
+
+static void efx_mcdi_filter_uc_addr_list(struct efx_nic *efx)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ struct net_device *net_dev = efx->net_dev;
+ struct netdev_hw_addr *uc;
+ unsigned int i;
+
+ table->uc_promisc = !!(net_dev->flags & IFF_PROMISC);
+ ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
+ i = 1;
+ netdev_for_each_uc_addr(uc, net_dev) {
+ if (i >= EFX_EF10_FILTER_DEV_UC_MAX) {
+ table->uc_promisc = true;
+ break;
+ }
+ ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
+ i++;
+ }
+
+ table->dev_uc_count = i;
+}
+
+static void efx_mcdi_filter_mc_addr_list(struct efx_nic *efx)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ struct net_device *net_dev = efx->net_dev;
+ struct netdev_hw_addr *mc;
+ unsigned int i;
+
+ table->mc_overflow = false;
+ table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI));
+
+ i = 0;
+ netdev_for_each_mc_addr(mc, net_dev) {
+ if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
+ table->mc_promisc = true;
+ table->mc_overflow = true;
+ break;
+ }
+ ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
+ i++;
+ }
+
+ table->dev_mc_count = i;
+}
+
+/*
+ * Caller must hold efx->filter_sem for read if race against
+ * efx_mcdi_filter_table_remove() is possible
+ */
+void efx_mcdi_filter_sync_rx_mode(struct efx_nic *efx)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+ struct net_device *net_dev = efx->net_dev;
+ struct efx_mcdi_filter_vlan *vlan;
+ bool vlan_filter;
+
+ if (!efx_dev_registered(efx))
+ return;
+
+ if (!table)
+ return;
+
+ efx_mcdi_filter_mark_old(efx);
+
+ /*
+ * Copy/convert the address lists; add the primary station
+ * address and broadcast address
+ */
+ netif_addr_lock_bh(net_dev);
+ efx_mcdi_filter_uc_addr_list(efx);
+ efx_mcdi_filter_mc_addr_list(efx);
+ netif_addr_unlock_bh(net_dev);
+
+ /*
+ * If VLAN filtering changes, all old filters are finally removed.
+ * Do it in advance to avoid conflicts for unicast untagged and
+ * VLAN 0 tagged filters.
+ */
+ vlan_filter = !!(net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
+ if (table->vlan_filter != vlan_filter) {
+ table->vlan_filter = vlan_filter;
+ efx_mcdi_filter_remove_old(efx);
+ }
+
+ list_for_each_entry(vlan, &table->vlan_list, list)
+ efx_mcdi_filter_vlan_sync_rx_mode(efx, vlan);
+
+ efx_mcdi_filter_remove_old(efx);
+ table->mc_promisc_last = table->mc_promisc;
+}
+
+#ifdef CONFIG_RFS_ACCEL
+
+bool efx_mcdi_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
+ unsigned int filter_idx)
+{
+ struct efx_filter_spec *spec, saved_spec;
+ struct efx_mcdi_filter_table *table;
+ struct efx_arfs_rule *rule = NULL;
+ bool ret = true, force = false;
+ u16 arfs_id;
+
+ down_read(&efx->filter_sem);
+ table = efx->filter_state;
+ down_write(&table->lock);
+ spec = efx_mcdi_filter_entry_spec(table, filter_idx);
+
+ if (!spec || spec->priority != EFX_FILTER_PRI_HINT)
+ goto out_unlock;
+
+ spin_lock_bh(&efx->rps_hash_lock);
+ if (!efx->rps_hash_table) {
+ /* In the absence of the table, we always return 0 to ARFS. */
+ arfs_id = 0;
+ } else {
+ rule = efx_rps_hash_find(efx, spec);
+ if (!rule)
+ /* ARFS table doesn't know of this filter, so remove it */
+ goto expire;
+ arfs_id = rule->arfs_id;
+ ret = efx_rps_check_rule(rule, filter_idx, &force);
+ if (force)
+ goto expire;
+ if (!ret) {
+ spin_unlock_bh(&efx->rps_hash_lock);
+ goto out_unlock;
+ }
+ }
+ if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, flow_id, arfs_id))
+ ret = false;
+ else if (rule)
+ rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
+expire:
+ saved_spec = *spec; /* remove operation will kfree spec */
+ spin_unlock_bh(&efx->rps_hash_lock);
+ /*
+ * At this point (since we dropped the lock), another thread might queue
+ * up a fresh insertion request (but the actual insertion will be held
+ * up by our possession of the filter table lock). In that case, it
+ * will set rule->filter_id to EFX_ARFS_FILTER_ID_PENDING, meaning that
+ * the rule is not removed by efx_rps_hash_del() below.
+ */
+ if (ret)
+ ret = efx_mcdi_filter_remove_internal(efx, 1U << spec->priority,
+ filter_idx, true) == 0;
+ /*
+ * While we can't safely dereference rule (we dropped the lock), we can
+ * still test it for NULL.
+ */
+ if (ret && rule) {
+ /* Expiring, so remove entry from ARFS table */
+ spin_lock_bh(&efx->rps_hash_lock);
+ efx_rps_hash_del(efx, &saved_spec);
+ spin_unlock_bh(&efx->rps_hash_lock);
+ }
+out_unlock:
+ up_write(&table->lock);
+ up_read(&efx->filter_sem);
+ return ret;
+}
+
+#endif /* CONFIG_RFS_ACCEL */
+
+#define RSS_MODE_HASH_ADDRS (1 << RSS_MODE_HASH_SRC_ADDR_LBN |\
+ 1 << RSS_MODE_HASH_DST_ADDR_LBN)
+#define RSS_MODE_HASH_PORTS (1 << RSS_MODE_HASH_SRC_PORT_LBN |\
+ 1 << RSS_MODE_HASH_DST_PORT_LBN)
+#define RSS_CONTEXT_FLAGS_DEFAULT (1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN |\
+ 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN |\
+ 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN |\
+ 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN |\
+ (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN |\
+ RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN |\
+ RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN |\
+ (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN |\
+ RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN |\
+ RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN)
+
+int efx_mcdi_get_rss_context_flags(struct efx_nic *efx, u32 context, u32 *flags)
+{
+ /*
+ * Firmware had a bug (sfc bug 61952) where it would not actually
+ * fill in the flags field in the response to MC_CMD_RSS_CONTEXT_GET_FLAGS.
+ * This meant that it would always contain whatever was previously
+ * in the MCDI buffer. Fortunately, all firmware versions with
+ * this bug have the same default flags value for a newly-allocated
+ * RSS context, and the only time we want to get the flags is just
+ * after allocating. Moreover, the response has a 32-bit hole
+ * where the context ID would be in the request, so we can use an
+ * overlength buffer in the request and pre-fill the flags field
+ * with what we believe the default to be. Thus if the firmware
+ * has the bug, it will leave our pre-filled value in the flags
+ * field of the response, and we will get the right answer.
+ *
+ * However, this does mean that this function should NOT be used if
+ * the RSS context flags might not be their defaults - it is ONLY
+ * reliably correct for a newly-allocated RSS context.
+ */
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ /* Check we have a hole for the context ID */
+ BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN != MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST);
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID, context);
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS,
+ RSS_CONTEXT_FLAGS_DEFAULT);
+ rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_FLAGS, inbuf,
+ sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
+ if (rc == 0) {
+ if (outlen < MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN)
+ rc = -EIO;
+ else
+ *flags = MCDI_DWORD(outbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS);
+ }
+ return rc;
+}
+
+/*
+ * Attempt to enable 4-tuple UDP hashing on the specified RSS context.
+ * If we fail, we just leave the RSS context at its default hash settings,
+ * which is safe but may slightly reduce performance.
+ * Defaults are 4-tuple for TCP and 2-tuple for UDP and other-IP, so we
+ * just need to set the UDP ports flags (for both IP versions).
+ */
+void efx_mcdi_set_rss_context_flags(struct efx_nic *efx,
+ struct efx_rss_context *ctx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN);
+ u32 flags;
+
+ BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN != 0);
+
+ if (efx_mcdi_get_rss_context_flags(efx, ctx->context_id, &flags) != 0)
+ return;
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID,
+ ctx->context_id);
+ flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN;
+ flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN;
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_FLAGS, flags);
+ if (!efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_FLAGS, inbuf, sizeof(inbuf),
+ NULL, 0, NULL))
+ /* Succeeded, so UDP 4-tuple is now enabled */
+ ctx->rx_hash_udp_4tuple = true;
+}
+
+static int efx_mcdi_filter_alloc_rss_context(struct efx_nic *efx, bool exclusive,
+ struct efx_rss_context *ctx,
+ unsigned *context_size)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ size_t outlen;
+ int rc;
+ u32 alloc_type = exclusive ?
+ MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE :
+ MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED;
+ unsigned rss_spread = exclusive ?
+ efx->rss_spread :
+ min(rounddown_pow_of_two(efx->rss_spread),
+ EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE);
+
+ if (!exclusive && rss_spread == 1) {
+ ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
+ if (context_size)
+ *context_size = 1;
+ return 0;
+ }
+
+ if (nic_data->datapath_caps &
+ 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN)
+ return -EOPNOTSUPP;
+
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
+ nic_data->vport_id);
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc != 0)
+ return rc;
+
+ if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)
+ return -EIO;
+
+ ctx->context_id = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
+
+ if (context_size)
+ *context_size = rss_spread;
+
+ if (nic_data->datapath_caps &
+ 1 << MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN)
+ efx_mcdi_set_rss_context_flags(efx, ctx);
+
+ return 0;
+}
+
+static int efx_mcdi_filter_free_rss_context(struct efx_nic *efx, u32 context)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID,
+ context);
+ return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+static int efx_mcdi_filter_populate_rss_table(struct efx_nic *efx, u32 context,
+ const u32 *rx_indir_table, const u8 *key)
+{
+ MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
+ MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
+ int i, rc;
+
+ MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
+ context);
+ BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) !=
+ MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN);
+
+ /* This iterates over the length of efx->rss_context.rx_indir_table, but
+ * copies bytes from rx_indir_table. That's because the latter is a
+ * pointer rather than an array, but should have the same length.
+ * The efx->rss_context.rx_hash_key loop below is similar.
+ */
+ for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_indir_table); ++i)
+ MCDI_PTR(tablebuf,
+ RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
+ (u8) rx_indir_table[i];
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
+ sizeof(tablebuf), NULL, 0, NULL);
+ if (rc != 0)
+ return rc;
+
+ MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
+ context);
+ BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_hash_key) !=
+ MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
+ for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_hash_key); ++i)
+ MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = key[i];
+
+ return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf,
+ sizeof(keybuf), NULL, 0, NULL);
+}
+
+void efx_mcdi_rx_free_indir_table(struct efx_nic *efx)
+{
+ int rc;
+
+ if (efx->rss_context.context_id != EFX_MCDI_RSS_CONTEXT_INVALID) {
+ rc = efx_mcdi_filter_free_rss_context(efx, efx->rss_context.context_id);
+ WARN_ON(rc != 0);
+ }
+ efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
+}
+
+static int efx_mcdi_filter_rx_push_shared_rss_config(struct efx_nic *efx,
+ unsigned *context_size)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc = efx_mcdi_filter_alloc_rss_context(efx, false, &efx->rss_context,
+ context_size);
+
+ if (rc != 0)
+ return rc;
+
+ nic_data->rx_rss_context_exclusive = false;
+ efx_set_default_rx_indir_table(efx, &efx->rss_context);
+ return 0;
+}
+
+static int efx_mcdi_filter_rx_push_exclusive_rss_config(struct efx_nic *efx,
+ const u32 *rx_indir_table,
+ const u8 *key)
+{
+ u32 old_rx_rss_context = efx->rss_context.context_id;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ if (efx->rss_context.context_id == EFX_MCDI_RSS_CONTEXT_INVALID ||
+ !nic_data->rx_rss_context_exclusive) {
+ rc = efx_mcdi_filter_alloc_rss_context(efx, true, &efx->rss_context,
+ NULL);
+ if (rc == -EOPNOTSUPP)
+ return rc;
+ else if (rc != 0)
+ goto fail1;
+ }
+
+ rc = efx_mcdi_filter_populate_rss_table(efx, efx->rss_context.context_id,
+ rx_indir_table, key);
+ if (rc != 0)
+ goto fail2;
+
+ if (efx->rss_context.context_id != old_rx_rss_context &&
+ old_rx_rss_context != EFX_MCDI_RSS_CONTEXT_INVALID)
+ WARN_ON(efx_mcdi_filter_free_rss_context(efx, old_rx_rss_context) != 0);
+ nic_data->rx_rss_context_exclusive = true;
+ if (rx_indir_table != efx->rss_context.rx_indir_table)
+ memcpy(efx->rss_context.rx_indir_table, rx_indir_table,
+ sizeof(efx->rss_context.rx_indir_table));
+ if (key != efx->rss_context.rx_hash_key)
+ memcpy(efx->rss_context.rx_hash_key, key,
+ efx->type->rx_hash_key_size);
+
+ return 0;
+
+fail2:
+ if (old_rx_rss_context != efx->rss_context.context_id) {
+ WARN_ON(efx_mcdi_filter_free_rss_context(efx, efx->rss_context.context_id) != 0);
+ efx->rss_context.context_id = old_rx_rss_context;
+ }
+fail1:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+int efx_mcdi_rx_push_rss_context_config(struct efx_nic *efx,
+ struct efx_rss_context *ctx,
+ const u32 *rx_indir_table,
+ const u8 *key)
+{
+ int rc;
+
+ WARN_ON(!mutex_is_locked(&efx->rss_lock));
+
+ if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID) {
+ rc = efx_mcdi_filter_alloc_rss_context(efx, true, ctx, NULL);
+ if (rc)
+ return rc;
+ }
+
+ if (!rx_indir_table) /* Delete this context */
+ return efx_mcdi_filter_free_rss_context(efx, ctx->context_id);
+
+ rc = efx_mcdi_filter_populate_rss_table(efx, ctx->context_id,
+ rx_indir_table, key);
+ if (rc)
+ return rc;
+
+ memcpy(ctx->rx_indir_table, rx_indir_table,
+ sizeof(efx->rss_context.rx_indir_table));
+ memcpy(ctx->rx_hash_key, key, efx->type->rx_hash_key_size);
+
+ return 0;
+}
+
+int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx,
+ struct efx_rss_context *ctx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN);
+ MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN);
+ MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN);
+ size_t outlen;
+ int rc, i;
+
+ WARN_ON(!mutex_is_locked(&efx->rss_lock));
+
+ BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN !=
+ MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN);
+
+ if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID)
+ return -ENOENT;
+
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID,
+ ctx->context_id);
+ BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_indir_table) !=
+ MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN);
+ rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_TABLE, inbuf, sizeof(inbuf),
+ tablebuf, sizeof(tablebuf), &outlen);
+ if (rc != 0)
+ return rc;
+
+ if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN))
+ return -EIO;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
+ ctx->rx_indir_table[i] = MCDI_PTR(tablebuf,
+ RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE)[i];
+
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID,
+ ctx->context_id);
+ BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_hash_key) !=
+ MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
+ rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_KEY, inbuf, sizeof(inbuf),
+ keybuf, sizeof(keybuf), &outlen);
+ if (rc != 0)
+ return rc;
+
+ if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN))
+ return -EIO;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->rx_hash_key); ++i)
+ ctx->rx_hash_key[i] = MCDI_PTR(
+ keybuf, RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY)[i];
+
+ return 0;
+}
+
+int efx_mcdi_rx_pull_rss_config(struct efx_nic *efx)
+{
+ int rc;
+
+ mutex_lock(&efx->rss_lock);
+ rc = efx_mcdi_rx_pull_rss_context_config(efx, &efx->rss_context);
+ mutex_unlock(&efx->rss_lock);
+ return rc;
+}
+
+void efx_mcdi_rx_restore_rss_contexts(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_rss_context *ctx;
+ int rc;
+
+ WARN_ON(!mutex_is_locked(&efx->rss_lock));
+
+ if (!nic_data->must_restore_rss_contexts)
+ return;
+
+ list_for_each_entry(ctx, &efx->rss_context.list, list) {
+ /* previous NIC RSS context is gone */
+ ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
+ /* so try to allocate a new one */
+ rc = efx_mcdi_rx_push_rss_context_config(efx, ctx,
+ ctx->rx_indir_table,
+ ctx->rx_hash_key);
+ if (rc)
+ netif_warn(efx, probe, efx->net_dev,
+ "failed to restore RSS context %u, rc=%d"
+ "; RSS filters may fail to be applied\n",
+ ctx->user_id, rc);
+ }
+ nic_data->must_restore_rss_contexts = false;
+}
+
+int efx_mcdi_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table,
+ const u8 *key)
+{
+ int rc;
+
+ if (efx->rss_spread == 1)
+ return 0;
+
+ if (!key)
+ key = efx->rss_context.rx_hash_key;
+
+ rc = efx_mcdi_filter_rx_push_exclusive_rss_config(efx, rx_indir_table, key);
+
+ if (rc == -ENOBUFS && !user) {
+ unsigned context_size;
+ bool mismatch = false;
+ size_t i;
+
+ for (i = 0;
+ i < ARRAY_SIZE(efx->rss_context.rx_indir_table) && !mismatch;
+ i++)
+ mismatch = rx_indir_table[i] !=
+ ethtool_rxfh_indir_default(i, efx->rss_spread);
+
+ rc = efx_mcdi_filter_rx_push_shared_rss_config(efx, &context_size);
+ if (rc == 0) {
+ if (context_size != efx->rss_spread)
+ netif_warn(efx, probe, efx->net_dev,
+ "Could not allocate an exclusive RSS"
+ " context; allocated a shared one of"
+ " different size."
+ " Wanted %u, got %u.\n",
+ efx->rss_spread, context_size);
+ else if (mismatch)
+ netif_warn(efx, probe, efx->net_dev,
+ "Could not allocate an exclusive RSS"
+ " context; allocated a shared one but"
+ " could not apply custom"
+ " indirection.\n");
+ else
+ netif_info(efx, probe, efx->net_dev,
+ "Could not allocate an exclusive RSS"
+ " context; allocated a shared one.\n");
+ }
+ }
+ return rc;
+}
+
+int efx_mcdi_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table
+ __attribute__ ((unused)),
+ const u8 *key
+ __attribute__ ((unused)))
+{
+ if (user)
+ return -EOPNOTSUPP;
+ if (efx->rss_context.context_id != EFX_MCDI_RSS_CONTEXT_INVALID)
+ return 0;
+ return efx_mcdi_filter_rx_push_shared_rss_config(efx, NULL);
+}
diff --git a/drivers/net/ethernet/sfc/mcdi_filters.h b/drivers/net/ethernet/sfc/mcdi_filters.h
new file mode 100644
index 000000000000..1837f4f5d661
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mcdi_filters.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#ifndef EFX_MCDI_FILTERS_H
+#define EFX_MCDI_FILTERS_H
+
+#include "net_driver.h"
+#include "filter.h"
+#include "mcdi_pcol.h"
+
+#define EFX_EF10_FILTER_DEV_UC_MAX 32
+#define EFX_EF10_FILTER_DEV_MC_MAX 256
+
+enum efx_mcdi_filter_default_filters {
+ EFX_EF10_BCAST,
+ EFX_EF10_UCDEF,
+ EFX_EF10_MCDEF,
+ EFX_EF10_VXLAN4_UCDEF,
+ EFX_EF10_VXLAN4_MCDEF,
+ EFX_EF10_VXLAN6_UCDEF,
+ EFX_EF10_VXLAN6_MCDEF,
+ EFX_EF10_NVGRE4_UCDEF,
+ EFX_EF10_NVGRE4_MCDEF,
+ EFX_EF10_NVGRE6_UCDEF,
+ EFX_EF10_NVGRE6_MCDEF,
+ EFX_EF10_GENEVE4_UCDEF,
+ EFX_EF10_GENEVE4_MCDEF,
+ EFX_EF10_GENEVE6_UCDEF,
+ EFX_EF10_GENEVE6_MCDEF,
+
+ EFX_EF10_NUM_DEFAULT_FILTERS
+};
+
+/* Per-VLAN filters information */
+struct efx_mcdi_filter_vlan {
+ struct list_head list;
+ u16 vid;
+ u16 uc[EFX_EF10_FILTER_DEV_UC_MAX];
+ u16 mc[EFX_EF10_FILTER_DEV_MC_MAX];
+ u16 default_filters[EFX_EF10_NUM_DEFAULT_FILTERS];
+};
+
+struct efx_mcdi_dev_addr {
+ u8 addr[ETH_ALEN];
+};
+
+struct efx_mcdi_filter_table {
+/* The MCDI match masks supported by this fw & hw, in order of priority */
+ u32 rx_match_mcdi_flags[
+ MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM * 2];
+ unsigned int rx_match_count;
+
+ struct rw_semaphore lock; /* Protects entries */
+ struct {
+ unsigned long spec; /* pointer to spec plus flag bits */
+/* AUTO_OLD is used to mark and sweep MAC filters for the device address lists. */
+/* unused flag 1UL */
+#define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL
+#define EFX_EF10_FILTER_FLAGS 3UL
+ u64 handle; /* firmware handle */
+ } *entry;
+/* Shadow of net_device address lists, guarded by mac_lock */
+ struct efx_mcdi_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX];
+ struct efx_mcdi_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
+ int dev_uc_count;
+ int dev_mc_count;
+ bool uc_promisc;
+ bool mc_promisc;
+/* Whether in multicast promiscuous mode when last changed */
+ bool mc_promisc_last;
+ bool mc_overflow; /* Too many MC addrs; should always imply mc_promisc */
+ bool vlan_filter;
+ struct list_head vlan_list;
+};
+
+int efx_mcdi_filter_table_probe(struct efx_nic *efx);
+void efx_mcdi_filter_table_remove(struct efx_nic *efx);
+void efx_mcdi_filter_table_restore(struct efx_nic *efx);
+
+/*
+ * The filter table(s) are managed by firmware and we have write-only
+ * access. When removing filters we must identify them to the
+ * firmware by a 64-bit handle, but this is too wide for Linux kernel
+ * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to
+ * be able to tell in advance whether a requested insertion will
+ * replace an existing filter. Therefore we maintain a software hash
+ * table, which should be at least as large as the hardware hash
+ * table.
+ *
+ * Huntington has a single 8K filter table shared between all filter
+ * types and both ports.
+ */
+#define EFX_MCDI_FILTER_TBL_ROWS 8192
+
+bool efx_mcdi_filter_match_supported(struct efx_mcdi_filter_table *table,
+ bool encap,
+ enum efx_filter_match_flags match_flags);
+
+void efx_mcdi_filter_sync_rx_mode(struct efx_nic *efx);
+s32 efx_mcdi_filter_insert(struct efx_nic *efx, struct efx_filter_spec *spec,
+ bool replace_equal);
+int efx_mcdi_filter_remove_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id);
+int efx_mcdi_filter_get_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *spec);
+
+u32 efx_mcdi_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+int efx_mcdi_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+u32 efx_mcdi_filter_get_rx_id_limit(struct efx_nic *efx);
+s32 efx_mcdi_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size);
+
+void efx_mcdi_filter_cleanup_vlans(struct efx_nic *efx);
+int efx_mcdi_filter_add_vlan(struct efx_nic *efx, u16 vid);
+struct efx_mcdi_filter_vlan *efx_mcdi_filter_find_vlan(struct efx_nic *efx, u16 vid);
+void efx_mcdi_filter_del_vlan(struct efx_nic *efx, u16 vid);
+
+void efx_mcdi_rx_free_indir_table(struct efx_nic *efx);
+int efx_mcdi_rx_push_rss_context_config(struct efx_nic *efx,
+ struct efx_rss_context *ctx,
+ const u32 *rx_indir_table,
+ const u8 *key);
+int efx_mcdi_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table,
+ const u8 *key);
+int efx_mcdi_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table
+ __attribute__ ((unused)),
+ const u8 *key
+ __attribute__ ((unused)));
+int efx_mcdi_rx_pull_rss_config(struct efx_nic *efx);
+int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx,
+ struct efx_rss_context *ctx);
+int efx_mcdi_get_rss_context_flags(struct efx_nic *efx, u32 context,
+ u32 *flags);
+void efx_mcdi_set_rss_context_flags(struct efx_nic *efx,
+ struct efx_rss_context *ctx);
+void efx_mcdi_rx_restore_rss_contexts(struct efx_nic *efx);
+
+static inline void efx_mcdi_update_rx_scatter(struct efx_nic *efx)
+{
+ /* no need to do anything here */
+}
+
+bool efx_mcdi_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
+ unsigned int filter_idx);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/mcdi_functions.c b/drivers/net/ethernet/sfc/mcdi_functions.c
new file mode 100644
index 000000000000..dcfe78b0fa5a
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mcdi_functions.c
@@ -0,0 +1,386 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include "efx.h"
+#include "nic.h"
+#include "mcdi_functions.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+
+int efx_mcdi_free_vis(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF_ERR(outbuf);
+ size_t outlen;
+ int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+
+ /* -EALREADY means nothing to free, so ignore */
+ if (rc == -EALREADY)
+ rc = 0;
+ if (rc)
+ efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen,
+ rc);
+ return rc;
+}
+
+int efx_mcdi_alloc_vis(struct efx_nic *efx, unsigned int min_vis,
+ unsigned int max_vis, unsigned int *vi_base,
+ unsigned int *allocated_vis)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
+ MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
+ rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc != 0)
+ return rc;
+
+ if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
+ return -EIO;
+
+ netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
+ MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
+
+ if (vi_base)
+ *vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
+ if (allocated_vis)
+ *allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
+ return 0;
+}
+
+int efx_mcdi_ev_probe(struct efx_channel *channel)
+{
+ return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
+ (channel->eventq_mask + 1) *
+ sizeof(efx_qword_t),
+ GFP_KERNEL);
+}
+
+int efx_mcdi_ev_init(struct efx_channel *channel, bool v1_cut_thru, bool v2)
+{
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
+ EFX_BUF_SIZE));
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN);
+ size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
+ struct efx_nic *efx = channel->efx;
+ size_t inlen, outlen;
+ dma_addr_t dma_addr;
+ int rc, i;
+
+ /* Fill event queue with all ones (i.e. empty events) */
+ memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
+
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
+ /* INIT_EVQ expects index in vector table, not absolute */
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
+ MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
+ MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
+
+ if (v2) {
+ /* Use the new generic approach to specifying event queue
+ * configuration, requesting lower latency or higher throughput.
+ * The options that actually get used appear in the output.
+ */
+ MCDI_POPULATE_DWORD_2(inbuf, INIT_EVQ_V2_IN_FLAGS,
+ INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1,
+ INIT_EVQ_V2_IN_FLAG_TYPE,
+ MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO);
+ } else {
+ MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
+ INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
+ INIT_EVQ_IN_FLAG_RX_MERGE, 1,
+ INIT_EVQ_IN_FLAG_TX_MERGE, 1,
+ INIT_EVQ_IN_FLAG_CUT_THRU, v1_cut_thru);
+ }
+
+ dma_addr = channel->eventq.buf.dma_addr;
+ for (i = 0; i < entries; ++i) {
+ MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
+ dma_addr += EFX_BUF_SIZE;
+ }
+
+ inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
+ outbuf, sizeof(outbuf), &outlen);
+
+ if (outlen >= MC_CMD_INIT_EVQ_V2_OUT_LEN)
+ netif_dbg(efx, drv, efx->net_dev,
+ "Channel %d using event queue flags %08x\n",
+ channel->channel,
+ MCDI_DWORD(outbuf, INIT_EVQ_V2_OUT_FLAGS));
+
+ return rc;
+}
+
+void efx_mcdi_ev_remove(struct efx_channel *channel)
+{
+ efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
+}
+
+void efx_mcdi_ev_fini(struct efx_channel *channel)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
+ MCDI_DECLARE_BUF_ERR(outbuf);
+ struct efx_nic *efx = channel->efx;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+
+ if (rc && rc != -EALREADY)
+ goto fail;
+
+ return;
+
+fail:
+ efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
+ outbuf, outlen, rc);
+}
+
+int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
+ EFX_BUF_SIZE));
+ bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
+ size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
+ struct efx_channel *channel = tx_queue->channel;
+ struct efx_nic *efx = tx_queue->efx;
+ struct efx_ef10_nic_data *nic_data;
+ dma_addr_t dma_addr;
+ size_t inlen;
+ int rc, i;
+
+ BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
+
+ nic_data = efx->nic_data;
+
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id);
+
+ dma_addr = tx_queue->txd.buf.dma_addr;
+
+ netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
+ tx_queue->queue, entries, (u64)dma_addr);
+
+ for (i = 0; i < entries; ++i) {
+ MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
+ dma_addr += EFX_BUF_SIZE;
+ }
+
+ inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
+
+ do {
+ MCDI_POPULATE_DWORD_4(inbuf, INIT_TXQ_IN_FLAGS,
+ /* This flag was removed from mcdi_pcol.h for
+ * the non-_EXT version of INIT_TXQ. However,
+ * firmware still honours it.
+ */
+ INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, tso_v2,
+ INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
+ INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload,
+ INIT_TXQ_EXT_IN_FLAG_TIMESTAMP,
+ tx_queue->timestamping);
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
+ NULL, 0, NULL);
+ if (rc == -ENOSPC && tso_v2) {
+ /* Retry without TSOv2 if we're short on contexts. */
+ tso_v2 = false;
+ netif_warn(efx, probe, efx->net_dev,
+ "TSOv2 context not available to segment in "
+ "hardware. TCP performance may be reduced.\n"
+ );
+ } else if (rc) {
+ efx_mcdi_display_error(efx, MC_CMD_INIT_TXQ,
+ MC_CMD_INIT_TXQ_EXT_IN_LEN,
+ NULL, 0, rc);
+ goto fail;
+ }
+ } while (rc);
+
+ return 0;
+
+fail:
+ return rc;
+}
+
+void efx_mcdi_tx_remove(struct efx_tx_queue *tx_queue)
+{
+ efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
+}
+
+void efx_mcdi_tx_fini(struct efx_tx_queue *tx_queue)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
+ MCDI_DECLARE_BUF_ERR(outbuf);
+ struct efx_nic *efx = tx_queue->efx;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
+ tx_queue->queue);
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+
+ if (rc && rc != -EALREADY)
+ goto fail;
+
+ return;
+
+fail:
+ efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN,
+ outbuf, outlen, rc);
+}
+
+int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue)
+{
+ return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
+ (rx_queue->ptr_mask + 1) *
+ sizeof(efx_qword_t),
+ GFP_KERNEL);
+}
+
+void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue)
+{
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
+ EFX_BUF_SIZE));
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
+ size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
+ struct efx_nic *efx = rx_queue->efx;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ dma_addr_t dma_addr;
+ size_t inlen;
+ int rc;
+ int i;
+ BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0);
+
+ rx_queue->scatter_n = 0;
+ rx_queue->scatter_len = 0;
+
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
+ efx_rx_queue_index(rx_queue));
+ MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS,
+ INIT_RXQ_IN_FLAG_PREFIX, 1,
+ INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id);
+
+ dma_addr = rx_queue->rxd.buf.dma_addr;
+
+ netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
+ efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
+
+ for (i = 0; i < entries; ++i) {
+ MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
+ dma_addr += EFX_BUF_SIZE;
+ }
+
+ inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
+ NULL, 0, NULL);
+ if (rc)
+ netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
+ efx_rx_queue_index(rx_queue));
+}
+
+void efx_mcdi_rx_remove(struct efx_rx_queue *rx_queue)
+{
+ efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
+}
+
+void efx_mcdi_rx_fini(struct efx_rx_queue *rx_queue)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
+ MCDI_DECLARE_BUF_ERR(outbuf);
+ struct efx_nic *efx = rx_queue->efx;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
+ efx_rx_queue_index(rx_queue));
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+
+ if (rc && rc != -EALREADY)
+ goto fail;
+
+ return;
+
+fail:
+ efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN,
+ outbuf, outlen, rc);
+}
+
+int efx_mcdi_window_mode_to_stride(struct efx_nic *efx, u8 vi_window_mode)
+{
+ switch (vi_window_mode) {
+ case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K:
+ efx->vi_stride = 8192;
+ break;
+ case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K:
+ efx->vi_stride = 16384;
+ break;
+ case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K:
+ efx->vi_stride = 65536;
+ break;
+ default:
+ netif_err(efx, probe, efx->net_dev,
+ "Unrecognised VI window mode %d\n",
+ vi_window_mode);
+ return -EIO;
+ }
+ netif_dbg(efx, probe, efx->net_dev, "vi_stride = %u\n",
+ efx->vi_stride);
+ return 0;
+}
+
+int efx_get_pf_index(struct efx_nic *efx, unsigned int *pf_index)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
+ sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+
+ *pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF);
+ return 0;
+}
diff --git a/drivers/net/ethernet/sfc/mcdi_functions.h b/drivers/net/ethernet/sfc/mcdi_functions.h
new file mode 100644
index 000000000000..ca4a5ac1a66b
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mcdi_functions.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#ifndef EFX_MCDI_FUNCTIONS_H
+#define EFX_MCDI_FUNCTIONS_H
+
+int efx_mcdi_alloc_vis(struct efx_nic *efx, unsigned int min_vis,
+ unsigned int max_vis, unsigned int *vi_base,
+ unsigned int *allocated_vis);
+int efx_mcdi_free_vis(struct efx_nic *efx);
+
+int efx_mcdi_ev_probe(struct efx_channel *channel);
+int efx_mcdi_ev_init(struct efx_channel *channel, bool v1_cut_thru, bool v2);
+void efx_mcdi_ev_remove(struct efx_channel *channel);
+void efx_mcdi_ev_fini(struct efx_channel *channel);
+int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2);
+void efx_mcdi_tx_remove(struct efx_tx_queue *tx_queue);
+void efx_mcdi_tx_fini(struct efx_tx_queue *tx_queue);
+int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue);
+void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue);
+void efx_mcdi_rx_remove(struct efx_rx_queue *rx_queue);
+void efx_mcdi_rx_fini(struct efx_rx_queue *rx_queue);
+int efx_mcdi_window_mode_to_stride(struct efx_nic *efx, u8 vi_window_mode);
+int efx_get_pf_index(struct efx_nic *efx, unsigned int *pf_index);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index fb7cde4980ed..ab5227b13ae6 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -14,106 +14,7 @@
#include "mcdi_pcol.h"
#include "nic.h"
#include "selftest.h"
-
-struct efx_mcdi_phy_data {
- u32 flags;
- u32 type;
- u32 supported_cap;
- u32 channel;
- u32 port;
- u32 stats_mask;
- u8 name[20];
- u32 media;
- u32 mmd_mask;
- u8 revision[20];
- u32 forced_cap;
-};
-
-static int
-efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg)
-{
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_CFG_OUT_LEN);
- size_t outlen;
- int rc;
-
- BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_IN_LEN != 0);
- BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_OUT_NAME_LEN != sizeof(cfg->name));
-
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_CFG, NULL, 0,
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- goto fail;
-
- if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) {
- rc = -EIO;
- goto fail;
- }
-
- cfg->flags = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_FLAGS);
- cfg->type = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_TYPE);
- cfg->supported_cap =
- MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_SUPPORTED_CAP);
- cfg->channel = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_CHANNEL);
- cfg->port = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_PRT);
- cfg->stats_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_STATS_MASK);
- memcpy(cfg->name, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_NAME),
- sizeof(cfg->name));
- cfg->media = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MEDIA_TYPE);
- cfg->mmd_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MMD_MASK);
- memcpy(cfg->revision, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_REVISION),
- sizeof(cfg->revision));
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
-}
-
-static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
- u32 flags, u32 loopback_mode,
- u32 loopback_speed)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_LINK_IN_LEN);
- int rc;
-
- BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0);
-
- MCDI_SET_DWORD(inbuf, SET_LINK_IN_CAP, capabilities);
- MCDI_SET_DWORD(inbuf, SET_LINK_IN_FLAGS, flags);
- MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_MODE, loopback_mode);
- MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_SPEED, loopback_speed);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
- return rc;
-}
-
-static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
-{
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LOOPBACK_MODES_OUT_LEN);
- size_t outlen;
- int rc;
-
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_LOOPBACK_MODES, NULL, 0,
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- goto fail;
-
- if (outlen < (MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST +
- MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN)) {
- rc = -EIO;
- goto fail;
- }
-
- *loopback_modes = MCDI_QWORD(outbuf, GET_LOOPBACK_MODES_OUT_SUGGESTED);
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
-}
+#include "mcdi_port_common.h"
static int efx_mcdi_mdio_read(struct net_device *net_dev,
int prtad, int devad, u16 addr)
@@ -168,246 +69,6 @@ static int efx_mcdi_mdio_write(struct net_device *net_dev,
return 0;
}
-static void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset)
-{
- #define SET_BIT(name) __set_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
- linkset)
-
- bitmap_zero(linkset, __ETHTOOL_LINK_MODE_MASK_NBITS);
- switch (media) {
- case MC_CMD_MEDIA_KX4:
- SET_BIT(Backplane);
- if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
- SET_BIT(1000baseKX_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
- SET_BIT(10000baseKX4_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
- SET_BIT(40000baseKR4_Full);
- break;
-
- case MC_CMD_MEDIA_XFP:
- case MC_CMD_MEDIA_SFP_PLUS:
- case MC_CMD_MEDIA_QSFP_PLUS:
- SET_BIT(FIBRE);
- if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
- SET_BIT(1000baseT_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
- SET_BIT(10000baseT_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
- SET_BIT(40000baseCR4_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN))
- SET_BIT(100000baseCR4_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN))
- SET_BIT(25000baseCR_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN))
- SET_BIT(50000baseCR2_Full);
- break;
-
- case MC_CMD_MEDIA_BASE_T:
- SET_BIT(TP);
- if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
- SET_BIT(10baseT_Half);
- if (cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
- SET_BIT(10baseT_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
- SET_BIT(100baseT_Half);
- if (cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
- SET_BIT(100baseT_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
- SET_BIT(1000baseT_Half);
- if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
- SET_BIT(1000baseT_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
- SET_BIT(10000baseT_Full);
- break;
- }
-
- if (cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
- SET_BIT(Pause);
- if (cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
- SET_BIT(Asym_Pause);
- if (cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
- SET_BIT(Autoneg);
-
- #undef SET_BIT
-}
-
-static u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset)
-{
- u32 result = 0;
-
- #define TEST_BIT(name) test_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
- linkset)
-
- if (TEST_BIT(10baseT_Half))
- result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN);
- if (TEST_BIT(10baseT_Full))
- result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN);
- if (TEST_BIT(100baseT_Half))
- result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN);
- if (TEST_BIT(100baseT_Full))
- result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN);
- if (TEST_BIT(1000baseT_Half))
- result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN);
- if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full))
- result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN);
- if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full))
- result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN);
- if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full))
- result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN);
- if (TEST_BIT(100000baseCR4_Full))
- result |= (1 << MC_CMD_PHY_CAP_100000FDX_LBN);
- if (TEST_BIT(25000baseCR_Full))
- result |= (1 << MC_CMD_PHY_CAP_25000FDX_LBN);
- if (TEST_BIT(50000baseCR2_Full))
- result |= (1 << MC_CMD_PHY_CAP_50000FDX_LBN);
- if (TEST_BIT(Pause))
- result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN);
- if (TEST_BIT(Asym_Pause))
- result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN);
- if (TEST_BIT(Autoneg))
- result |= (1 << MC_CMD_PHY_CAP_AN_LBN);
-
- #undef TEST_BIT
-
- return result;
-}
-
-static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
-{
- struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
- enum efx_phy_mode mode, supported;
- u32 flags;
-
- /* TODO: Advertise the capabilities supported by this PHY */
- supported = 0;
- if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN))
- supported |= PHY_MODE_TX_DISABLED;
- if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN))
- supported |= PHY_MODE_LOW_POWER;
- if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN))
- supported |= PHY_MODE_OFF;
-
- mode = efx->phy_mode & supported;
-
- flags = 0;
- if (mode & PHY_MODE_TX_DISABLED)
- flags |= (1 << MC_CMD_SET_LINK_IN_TXDIS_LBN);
- if (mode & PHY_MODE_LOW_POWER)
- flags |= (1 << MC_CMD_SET_LINK_IN_LOWPOWER_LBN);
- if (mode & PHY_MODE_OFF)
- flags |= (1 << MC_CMD_SET_LINK_IN_POWEROFF_LBN);
-
- return flags;
-}
-
-static u8 mcdi_to_ethtool_media(u32 media)
-{
- switch (media) {
- case MC_CMD_MEDIA_XAUI:
- case MC_CMD_MEDIA_CX4:
- case MC_CMD_MEDIA_KX4:
- return PORT_OTHER;
-
- case MC_CMD_MEDIA_XFP:
- case MC_CMD_MEDIA_SFP_PLUS:
- case MC_CMD_MEDIA_QSFP_PLUS:
- return PORT_FIBRE;
-
- case MC_CMD_MEDIA_BASE_T:
- return PORT_TP;
-
- default:
- return PORT_OTHER;
- }
-}
-
-static void efx_mcdi_phy_decode_link(struct efx_nic *efx,
- struct efx_link_state *link_state,
- u32 speed, u32 flags, u32 fcntl)
-{
- switch (fcntl) {
- case MC_CMD_FCNTL_AUTO:
- WARN_ON(1); /* This is not a link mode */
- link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX;
- break;
- case MC_CMD_FCNTL_BIDIR:
- link_state->fc = EFX_FC_TX | EFX_FC_RX;
- break;
- case MC_CMD_FCNTL_RESPOND:
- link_state->fc = EFX_FC_RX;
- break;
- default:
- WARN_ON(1);
- /* Fall through */
- case MC_CMD_FCNTL_OFF:
- link_state->fc = 0;
- break;
- }
-
- link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
- link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
- link_state->speed = speed;
-}
-
-/* The semantics of the ethtool FEC mode bitmask are not well defined,
- * particularly the meaning of combinations of bits. Which means we get to
- * define our own semantics, as follows:
- * OFF overrides any other bits, and means "disable all FEC" (with the
- * exception of 25G KR4/CR4, where it is not possible to reject it if AN
- * partner requests it).
- * AUTO on its own means use cable requirements and link partner autoneg with
- * fw-default preferences for the cable type.
- * AUTO and either RS or BASER means use the specified FEC type if cable and
- * link partner support it, otherwise autoneg/fw-default.
- * RS or BASER alone means use the specified FEC type if cable and link partner
- * support it and either requests it, otherwise no FEC.
- * Both RS and BASER (whether AUTO or not) means use FEC if cable and link
- * partner support it, preferring RS to BASER.
- */
-static u32 ethtool_fec_caps_to_mcdi(u32 ethtool_cap)
-{
- u32 ret = 0;
-
- if (ethtool_cap & ETHTOOL_FEC_OFF)
- return 0;
-
- if (ethtool_cap & ETHTOOL_FEC_AUTO)
- ret |= (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) |
- (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) |
- (1 << MC_CMD_PHY_CAP_RS_FEC_LBN);
- if (ethtool_cap & ETHTOOL_FEC_RS)
- ret |= (1 << MC_CMD_PHY_CAP_RS_FEC_LBN) |
- (1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN);
- if (ethtool_cap & ETHTOOL_FEC_BASER)
- ret |= (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) |
- (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) |
- (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN) |
- (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN);
- return ret;
-}
-
-/* Invert ethtool_fec_caps_to_mcdi. There are two combinations that function
- * can never produce, (baser xor rs) and neither req; the implementation below
- * maps both of those to AUTO. This should never matter, and it's not clear
- * what a better mapping would be anyway.
- */
-static u32 mcdi_fec_caps_to_ethtool(u32 caps, bool is_25g)
-{
- bool rs = caps & (1 << MC_CMD_PHY_CAP_RS_FEC_LBN),
- rs_req = caps & (1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN),
- baser = is_25g ? caps & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN)
- : caps & (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN),
- baser_req = is_25g ? caps & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN)
- : caps & (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN);
-
- if (!baser && !rs)
- return ETHTOOL_FEC_OFF;
- return (rs_req ? ETHTOOL_FEC_RS : 0) |
- (baser_req ? ETHTOOL_FEC_BASER : 0) |
- (baser == baser_req && rs == rs_req ? 0 : ETHTOOL_FEC_AUTO);
-}
-
static int efx_mcdi_phy_probe(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_data;
@@ -527,58 +188,6 @@ int efx_mcdi_port_reconfigure(struct efx_nic *efx)
efx->loopback_mode, 0);
}
-/* Verify that the forced flow control settings (!EFX_FC_AUTO) are
- * supported by the link partner. Warn the user if this isn't the case
- */
-static void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
-{
- struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
- u32 rmtadv;
-
- /* The link partner capabilities are only relevant if the
- * link supports flow control autonegotiation */
- if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
- return;
-
- /* If flow control autoneg is supported and enabled, then fine */
- if (efx->wanted_fc & EFX_FC_AUTO)
- return;
-
- rmtadv = 0;
- if (lpa & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
- rmtadv |= ADVERTISED_Pause;
- if (lpa & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
- rmtadv |= ADVERTISED_Asym_Pause;
-
- if ((efx->wanted_fc & EFX_FC_TX) && rmtadv == ADVERTISED_Asym_Pause)
- netif_err(efx, link, efx->net_dev,
- "warning: link partner doesn't support pause frames");
-}
-
-static bool efx_mcdi_phy_poll(struct efx_nic *efx)
-{
- struct efx_link_state old_state = efx->link_state;
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
- int rc;
-
- WARN_ON(!mutex_is_locked(&efx->mac_lock));
-
- BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
- outbuf, sizeof(outbuf), NULL);
- if (rc)
- efx->link_state.up = false;
- else
- efx_mcdi_phy_decode_link(
- efx, &efx->link_state,
- MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
- MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
- MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
-
- return !efx_link_state_equal(&efx->link_state, &old_state);
-}
-
static void efx_mcdi_phy_remove(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_data = efx->phy_data;
@@ -666,58 +275,6 @@ efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx,
return 0;
}
-static int efx_mcdi_phy_get_fecparam(struct efx_nic *efx,
- struct ethtool_fecparam *fec)
-{
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_V2_LEN);
- u32 caps, active, speed; /* MCDI format */
- bool is_25g = false;
- size_t outlen;
- int rc;
-
- BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- return rc;
- if (outlen < MC_CMD_GET_LINK_OUT_V2_LEN)
- return -EOPNOTSUPP;
-
- /* behaviour for 25G/50G links depends on 25G BASER bit */
- speed = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_LINK_SPEED);
- is_25g = speed == 25000 || speed == 50000;
-
- caps = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_CAP);
- fec->fec = mcdi_fec_caps_to_ethtool(caps, is_25g);
- /* BASER is never supported on 100G */
- if (speed == 100000)
- fec->fec &= ~ETHTOOL_FEC_BASER;
-
- active = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_FEC_TYPE);
- switch (active) {
- case MC_CMD_FEC_NONE:
- fec->active_fec = ETHTOOL_FEC_OFF;
- break;
- case MC_CMD_FEC_BASER:
- fec->active_fec = ETHTOOL_FEC_BASER;
- break;
- case MC_CMD_FEC_RS:
- fec->active_fec = ETHTOOL_FEC_RS;
- break;
- default:
- netif_warn(efx, hw, efx->net_dev,
- "Firmware reports unrecognised FEC_TYPE %u\n",
- active);
- /* We don't know what firmware has picked. AUTO is as good a
- * "can't happen" value as any other.
- */
- fec->active_fec = ETHTOOL_FEC_AUTO;
- break;
- }
-
- return 0;
-}
-
static int efx_mcdi_phy_set_fecparam(struct efx_nic *efx,
const struct ethtool_fecparam *fec)
{
@@ -745,27 +302,6 @@ static int efx_mcdi_phy_set_fecparam(struct efx_nic *efx,
return 0;
}
-static int efx_mcdi_phy_test_alive(struct efx_nic *efx)
-{
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_STATE_OUT_LEN);
- size_t outlen;
- int rc;
-
- BUILD_BUG_ON(MC_CMD_GET_PHY_STATE_IN_LEN != 0);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_STATE, NULL, 0,
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- return rc;
-
- if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN)
- return -EIO;
- if (MCDI_DWORD(outbuf, GET_PHY_STATE_OUT_STATE) != MC_CMD_PHY_STATE_OK)
- return -EINVAL;
-
- return 0;
-}
-
static const char *const mcdi_sft9001_cable_diag_names[] = {
"cable.pairA.length",
"cable.pairB.length",
@@ -1139,84 +675,6 @@ u32 efx_mcdi_phy_get_caps(struct efx_nic *efx)
return phy_data->supported_cap;
}
-static unsigned int efx_mcdi_event_link_speed[] = {
- [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
- [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
- [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
- [MCDI_EVENT_LINKCHANGE_SPEED_40G] = 40000,
- [MCDI_EVENT_LINKCHANGE_SPEED_25G] = 25000,
- [MCDI_EVENT_LINKCHANGE_SPEED_50G] = 50000,
- [MCDI_EVENT_LINKCHANGE_SPEED_100G] = 100000,
-};
-
-void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
-{
- u32 flags, fcntl, speed, lpa;
-
- speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED);
- EFX_WARN_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed));
- speed = efx_mcdi_event_link_speed[speed];
-
- flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS);
- fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL);
- lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP);
-
- /* efx->link_state is only modified by efx_mcdi_phy_get_link(),
- * which is only run after flushing the event queues. Therefore, it
- * is safe to modify the link state outside of the mac_lock here.
- */
- efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl);
-
- efx_mcdi_phy_check_fcntl(efx, lpa);
-
- efx_link_status_changed(efx);
-}
-
-int efx_mcdi_set_mac(struct efx_nic *efx)
-{
- u32 fcntl;
- MCDI_DECLARE_BUF(cmdbytes, MC_CMD_SET_MAC_IN_LEN);
-
- BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
-
- /* This has no effect on EF10 */
- ether_addr_copy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR),
- efx->net_dev->dev_addr);
-
- MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU,
- EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
- MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
-
- /* Set simple MAC filter for Siena */
- MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_REJECT,
- SET_MAC_IN_REJECT_UNCST, efx->unicast_filter);
-
- MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_FLAGS,
- SET_MAC_IN_FLAG_INCLUDE_FCS,
- !!(efx->net_dev->features & NETIF_F_RXFCS));
-
- switch (efx->wanted_fc) {
- case EFX_FC_RX | EFX_FC_TX:
- fcntl = MC_CMD_FCNTL_BIDIR;
- break;
- case EFX_FC_RX:
- fcntl = MC_CMD_FCNTL_RESPOND;
- break;
- default:
- fcntl = MC_CMD_FCNTL_OFF;
- break;
- }
- if (efx->wanted_fc & EFX_FC_AUTO)
- fcntl = MC_CMD_FCNTL_AUTO;
- if (efx->fc_disable)
- fcntl = MC_CMD_FCNTL_OFF;
-
- MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
-
- return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes),
- NULL, 0, NULL);
-}
-
bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
@@ -1348,17 +806,3 @@ void efx_mcdi_port_remove(struct efx_nic *efx)
efx->phy_op->remove(efx);
efx_nic_free_buffer(efx, &efx->stats_buffer);
}
-
-/* Get physical port number (EF10 only; on Siena it is same as PF number) */
-int efx_mcdi_port_get_number(struct efx_nic *efx)
-{
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN);
- int rc;
-
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_PORT_ASSIGNMENT, NULL, 0,
- outbuf, sizeof(outbuf), NULL);
- if (rc)
- return rc;
-
- return MCDI_DWORD(outbuf, GET_PORT_ASSIGNMENT_OUT_PORT);
-}
diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.c b/drivers/net/ethernet/sfc/mcdi_port_common.c
new file mode 100644
index 000000000000..a6a072ba46d3
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mcdi_port_common.c
@@ -0,0 +1,568 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "mcdi_port_common.h"
+#include "efx_common.h"
+
+int efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_CFG_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_IN_LEN != 0);
+ BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_OUT_NAME_LEN != sizeof(cfg->name));
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_CFG, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ cfg->flags = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_FLAGS);
+ cfg->type = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_TYPE);
+ cfg->supported_cap =
+ MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_SUPPORTED_CAP);
+ cfg->channel = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_CHANNEL);
+ cfg->port = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_PRT);
+ cfg->stats_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_STATS_MASK);
+ memcpy(cfg->name, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_NAME),
+ sizeof(cfg->name));
+ cfg->media = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MEDIA_TYPE);
+ cfg->mmd_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MMD_MASK);
+ memcpy(cfg->revision, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_REVISION),
+ sizeof(cfg->revision));
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+void efx_link_set_advertising(struct efx_nic *efx,
+ const unsigned long *advertising)
+{
+ memcpy(efx->link_advertising, advertising,
+ sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
+
+ efx->link_advertising[0] |= ADVERTISED_Autoneg;
+ if (advertising[0] & ADVERTISED_Pause)
+ efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
+ else
+ efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
+ if (advertising[0] & ADVERTISED_Asym_Pause)
+ efx->wanted_fc ^= EFX_FC_TX;
+}
+
+int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
+ u32 flags, u32 loopback_mode, u32 loopback_speed)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_LINK_IN_LEN);
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0);
+
+ MCDI_SET_DWORD(inbuf, SET_LINK_IN_CAP, capabilities);
+ MCDI_SET_DWORD(inbuf, SET_LINK_IN_FLAGS, flags);
+ MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_MODE, loopback_mode);
+ MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_SPEED, loopback_speed);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ return rc;
+}
+
+int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LOOPBACK_MODES_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_LOOPBACK_MODES, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ if (outlen < (MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST +
+ MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN)) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ *loopback_modes = MCDI_QWORD(outbuf, GET_LOOPBACK_MODES_OUT_SUGGESTED);
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset)
+{
+ #define SET_BIT(name) __set_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
+ linkset)
+
+ bitmap_zero(linkset, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ switch (media) {
+ case MC_CMD_MEDIA_KX4:
+ SET_BIT(Backplane);
+ if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ SET_BIT(1000baseKX_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+ SET_BIT(10000baseKX4_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
+ SET_BIT(40000baseKR4_Full);
+ break;
+
+ case MC_CMD_MEDIA_XFP:
+ case MC_CMD_MEDIA_SFP_PLUS:
+ case MC_CMD_MEDIA_QSFP_PLUS:
+ SET_BIT(FIBRE);
+ if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ SET_BIT(1000baseT_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+ SET_BIT(10000baseT_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
+ SET_BIT(40000baseCR4_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN))
+ SET_BIT(100000baseCR4_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN))
+ SET_BIT(25000baseCR_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN))
+ SET_BIT(50000baseCR2_Full);
+ break;
+
+ case MC_CMD_MEDIA_BASE_T:
+ SET_BIT(TP);
+ if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
+ SET_BIT(10baseT_Half);
+ if (cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
+ SET_BIT(10baseT_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
+ SET_BIT(100baseT_Half);
+ if (cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
+ SET_BIT(100baseT_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
+ SET_BIT(1000baseT_Half);
+ if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ SET_BIT(1000baseT_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+ SET_BIT(10000baseT_Full);
+ break;
+ }
+
+ if (cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
+ SET_BIT(Pause);
+ if (cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
+ SET_BIT(Asym_Pause);
+ if (cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ SET_BIT(Autoneg);
+
+ #undef SET_BIT
+}
+
+u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset)
+{
+ u32 result = 0;
+
+ #define TEST_BIT(name) test_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
+ linkset)
+
+ if (TEST_BIT(10baseT_Half))
+ result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN);
+ if (TEST_BIT(10baseT_Full))
+ result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN);
+ if (TEST_BIT(100baseT_Half))
+ result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN);
+ if (TEST_BIT(100baseT_Full))
+ result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN);
+ if (TEST_BIT(1000baseT_Half))
+ result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN);
+ if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full))
+ result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN);
+ if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full))
+ result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN);
+ if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full))
+ result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN);
+ if (TEST_BIT(100000baseCR4_Full))
+ result |= (1 << MC_CMD_PHY_CAP_100000FDX_LBN);
+ if (TEST_BIT(25000baseCR_Full))
+ result |= (1 << MC_CMD_PHY_CAP_25000FDX_LBN);
+ if (TEST_BIT(50000baseCR2_Full))
+ result |= (1 << MC_CMD_PHY_CAP_50000FDX_LBN);
+ if (TEST_BIT(Pause))
+ result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN);
+ if (TEST_BIT(Asym_Pause))
+ result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN);
+ if (TEST_BIT(Autoneg))
+ result |= (1 << MC_CMD_PHY_CAP_AN_LBN);
+
+ #undef TEST_BIT
+
+ return result;
+}
+
+u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+ enum efx_phy_mode mode, supported;
+ u32 flags;
+
+ /* TODO: Advertise the capabilities supported by this PHY */
+ supported = 0;
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN))
+ supported |= PHY_MODE_TX_DISABLED;
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN))
+ supported |= PHY_MODE_LOW_POWER;
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN))
+ supported |= PHY_MODE_OFF;
+
+ mode = efx->phy_mode & supported;
+
+ flags = 0;
+ if (mode & PHY_MODE_TX_DISABLED)
+ flags |= (1 << MC_CMD_SET_LINK_IN_TXDIS_LBN);
+ if (mode & PHY_MODE_LOW_POWER)
+ flags |= (1 << MC_CMD_SET_LINK_IN_LOWPOWER_LBN);
+ if (mode & PHY_MODE_OFF)
+ flags |= (1 << MC_CMD_SET_LINK_IN_POWEROFF_LBN);
+
+ return flags;
+}
+
+u8 mcdi_to_ethtool_media(u32 media)
+{
+ switch (media) {
+ case MC_CMD_MEDIA_XAUI:
+ case MC_CMD_MEDIA_CX4:
+ case MC_CMD_MEDIA_KX4:
+ return PORT_OTHER;
+
+ case MC_CMD_MEDIA_XFP:
+ case MC_CMD_MEDIA_SFP_PLUS:
+ case MC_CMD_MEDIA_QSFP_PLUS:
+ return PORT_FIBRE;
+
+ case MC_CMD_MEDIA_BASE_T:
+ return PORT_TP;
+
+ default:
+ return PORT_OTHER;
+ }
+}
+
+void efx_mcdi_phy_decode_link(struct efx_nic *efx,
+ struct efx_link_state *link_state,
+ u32 speed, u32 flags, u32 fcntl)
+{
+ switch (fcntl) {
+ case MC_CMD_FCNTL_AUTO:
+ WARN_ON(1); /* This is not a link mode */
+ link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX;
+ break;
+ case MC_CMD_FCNTL_BIDIR:
+ link_state->fc = EFX_FC_TX | EFX_FC_RX;
+ break;
+ case MC_CMD_FCNTL_RESPOND:
+ link_state->fc = EFX_FC_RX;
+ break;
+ default:
+ WARN_ON(1);
+ /* Fall through */
+ case MC_CMD_FCNTL_OFF:
+ link_state->fc = 0;
+ break;
+ }
+
+ link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
+ link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
+ link_state->speed = speed;
+}
+
+/* The semantics of the ethtool FEC mode bitmask are not well defined,
+ * particularly the meaning of combinations of bits. Which means we get to
+ * define our own semantics, as follows:
+ * OFF overrides any other bits, and means "disable all FEC" (with the
+ * exception of 25G KR4/CR4, where it is not possible to reject it if AN
+ * partner requests it).
+ * AUTO on its own means use cable requirements and link partner autoneg with
+ * fw-default preferences for the cable type.
+ * AUTO and either RS or BASER means use the specified FEC type if cable and
+ * link partner support it, otherwise autoneg/fw-default.
+ * RS or BASER alone means use the specified FEC type if cable and link partner
+ * support it and either requests it, otherwise no FEC.
+ * Both RS and BASER (whether AUTO or not) means use FEC if cable and link
+ * partner support it, preferring RS to BASER.
+ */
+u32 ethtool_fec_caps_to_mcdi(u32 ethtool_cap)
+{
+ u32 ret = 0;
+
+ if (ethtool_cap & ETHTOOL_FEC_OFF)
+ return 0;
+
+ if (ethtool_cap & ETHTOOL_FEC_AUTO)
+ ret |= (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) |
+ (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) |
+ (1 << MC_CMD_PHY_CAP_RS_FEC_LBN);
+ if (ethtool_cap & ETHTOOL_FEC_RS)
+ ret |= (1 << MC_CMD_PHY_CAP_RS_FEC_LBN) |
+ (1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN);
+ if (ethtool_cap & ETHTOOL_FEC_BASER)
+ ret |= (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) |
+ (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) |
+ (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN) |
+ (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN);
+ return ret;
+}
+
+/* Invert ethtool_fec_caps_to_mcdi. There are two combinations that function
+ * can never produce, (baser xor rs) and neither req; the implementation below
+ * maps both of those to AUTO. This should never matter, and it's not clear
+ * what a better mapping would be anyway.
+ */
+u32 mcdi_fec_caps_to_ethtool(u32 caps, bool is_25g)
+{
+ bool rs = caps & (1 << MC_CMD_PHY_CAP_RS_FEC_LBN),
+ rs_req = caps & (1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN),
+ baser = is_25g ? caps & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN)
+ : caps & (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN),
+ baser_req = is_25g ? caps & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN)
+ : caps & (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN);
+
+ if (!baser && !rs)
+ return ETHTOOL_FEC_OFF;
+ return (rs_req ? ETHTOOL_FEC_RS : 0) |
+ (baser_req ? ETHTOOL_FEC_BASER : 0) |
+ (baser == baser_req && rs == rs_req ? 0 : ETHTOOL_FEC_AUTO);
+}
+
+/* Verify that the forced flow control settings (!EFX_FC_AUTO) are
+ * supported by the link partner. Warn the user if this isn't the case
+ */
+void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+ u32 rmtadv;
+
+ /* The link partner capabilities are only relevant if the
+ * link supports flow control autonegotiation
+ */
+ if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ return;
+
+ /* If flow control autoneg is supported and enabled, then fine */
+ if (efx->wanted_fc & EFX_FC_AUTO)
+ return;
+
+ rmtadv = 0;
+ if (lpa & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
+ rmtadv |= ADVERTISED_Pause;
+ if (lpa & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
+ rmtadv |= ADVERTISED_Asym_Pause;
+
+ if ((efx->wanted_fc & EFX_FC_TX) && rmtadv == ADVERTISED_Asym_Pause)
+ netif_err(efx, link, efx->net_dev,
+ "warning: link partner doesn't support pause frames");
+}
+
+bool efx_mcdi_phy_poll(struct efx_nic *efx)
+{
+ struct efx_link_state old_state = efx->link_state;
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
+ int rc;
+
+ WARN_ON(!mutex_is_locked(&efx->mac_lock));
+
+ BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ efx->link_state.up = false;
+ else
+ efx_mcdi_phy_decode_link(
+ efx, &efx->link_state,
+ MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
+ MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
+ MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
+
+ return !efx_link_state_equal(&efx->link_state, &old_state);
+}
+
+int efx_mcdi_phy_get_fecparam(struct efx_nic *efx, struct ethtool_fecparam *fec)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_V2_LEN);
+ u32 caps, active, speed; /* MCDI format */
+ bool is_25g = false;
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_GET_LINK_OUT_V2_LEN)
+ return -EOPNOTSUPP;
+
+ /* behaviour for 25G/50G links depends on 25G BASER bit */
+ speed = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_LINK_SPEED);
+ is_25g = speed == 25000 || speed == 50000;
+
+ caps = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_CAP);
+ fec->fec = mcdi_fec_caps_to_ethtool(caps, is_25g);
+ /* BASER is never supported on 100G */
+ if (speed == 100000)
+ fec->fec &= ~ETHTOOL_FEC_BASER;
+
+ active = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_FEC_TYPE);
+ switch (active) {
+ case MC_CMD_FEC_NONE:
+ fec->active_fec = ETHTOOL_FEC_OFF;
+ break;
+ case MC_CMD_FEC_BASER:
+ fec->active_fec = ETHTOOL_FEC_BASER;
+ break;
+ case MC_CMD_FEC_RS:
+ fec->active_fec = ETHTOOL_FEC_RS;
+ break;
+ default:
+ netif_warn(efx, hw, efx->net_dev,
+ "Firmware reports unrecognised FEC_TYPE %u\n",
+ active);
+ /* We don't know what firmware has picked. AUTO is as good a
+ * "can't happen" value as any other.
+ */
+ fec->active_fec = ETHTOOL_FEC_AUTO;
+ break;
+ }
+
+ return 0;
+}
+
+int efx_mcdi_phy_test_alive(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_STATE_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_PHY_STATE_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_STATE, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+
+ if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN)
+ return -EIO;
+ if (MCDI_DWORD(outbuf, GET_PHY_STATE_OUT_STATE) != MC_CMD_PHY_STATE_OK)
+ return -EINVAL;
+
+ return 0;
+}
+
+int efx_mcdi_set_mac(struct efx_nic *efx)
+{
+ u32 fcntl;
+ MCDI_DECLARE_BUF(cmdbytes, MC_CMD_SET_MAC_IN_LEN);
+
+ BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
+
+ /* This has no effect on EF10 */
+ ether_addr_copy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR),
+ efx->net_dev->dev_addr);
+
+ MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU,
+ EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
+ MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
+
+ /* Set simple MAC filter for Siena */
+ MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_REJECT,
+ SET_MAC_IN_REJECT_UNCST, efx->unicast_filter);
+
+ MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_FLAGS,
+ SET_MAC_IN_FLAG_INCLUDE_FCS,
+ !!(efx->net_dev->features & NETIF_F_RXFCS));
+
+ switch (efx->wanted_fc) {
+ case EFX_FC_RX | EFX_FC_TX:
+ fcntl = MC_CMD_FCNTL_BIDIR;
+ break;
+ case EFX_FC_RX:
+ fcntl = MC_CMD_FCNTL_RESPOND;
+ break;
+ default:
+ fcntl = MC_CMD_FCNTL_OFF;
+ break;
+ }
+ if (efx->wanted_fc & EFX_FC_AUTO)
+ fcntl = MC_CMD_FCNTL_AUTO;
+ if (efx->fc_disable)
+ fcntl = MC_CMD_FCNTL_OFF;
+
+ MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
+
+ return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes),
+ NULL, 0, NULL);
+}
+
+/* Get physical port number (EF10 only; on Siena it is same as PF number) */
+int efx_mcdi_port_get_number(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN);
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_PORT_ASSIGNMENT, NULL, 0,
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ return rc;
+
+ return MCDI_DWORD(outbuf, GET_PORT_ASSIGNMENT_OUT_PORT);
+}
+
+static unsigned int efx_mcdi_event_link_speed[] = {
+ [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
+ [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_40G] = 40000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_25G] = 25000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_50G] = 50000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_100G] = 100000,
+};
+
+void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
+{
+ u32 flags, fcntl, speed, lpa;
+
+ speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED);
+ EFX_WARN_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed));
+ speed = efx_mcdi_event_link_speed[speed];
+
+ flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS);
+ fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL);
+ lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP);
+
+ /* efx->link_state is only modified by efx_mcdi_phy_get_link(),
+ * which is only run after flushing the event queues. Therefore, it
+ * is safe to modify the link state outside of the mac_lock here.
+ */
+ efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl);
+
+ efx_mcdi_phy_check_fcntl(efx, lpa);
+
+ efx_link_status_changed(efx);
+}
diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.h b/drivers/net/ethernet/sfc/mcdi_port_common.h
new file mode 100644
index 000000000000..b16f11265269
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mcdi_port_common.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#ifndef EFX_MCDI_PORT_COMMON_H
+#define EFX_MCDI_PORT_COMMON_H
+
+#include "net_driver.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+
+struct efx_mcdi_phy_data {
+ u32 flags;
+ u32 type;
+ u32 supported_cap;
+ u32 channel;
+ u32 port;
+ u32 stats_mask;
+ u8 name[20];
+ u32 media;
+ u32 mmd_mask;
+ u8 revision[20];
+ u32 forced_cap;
+};
+
+#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
+
+int efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg);
+void efx_link_set_advertising(struct efx_nic *efx,
+ const unsigned long *advertising);
+int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
+ u32 flags, u32 loopback_mode, u32 loopback_speed);
+int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes);
+void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset);
+u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset);
+u32 efx_get_mcdi_phy_flags(struct efx_nic *efx);
+u8 mcdi_to_ethtool_media(u32 media);
+void efx_mcdi_phy_decode_link(struct efx_nic *efx,
+ struct efx_link_state *link_state,
+ u32 speed, u32 flags, u32 fcntl);
+u32 ethtool_fec_caps_to_mcdi(u32 ethtool_cap);
+u32 mcdi_fec_caps_to_ethtool(u32 caps, bool is_25g);
+void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa);
+bool efx_mcdi_phy_poll(struct efx_nic *efx);
+int efx_mcdi_phy_get_fecparam(struct efx_nic *efx,
+ struct ethtool_fecparam *fec);
+int efx_mcdi_phy_test_alive(struct efx_nic *efx);
+int efx_mcdi_set_mac(struct efx_nic *efx);
+int efx_mcdi_port_get_number(struct efx_nic *efx);
+void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 1f88212be085..9f9886f222c8 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -24,7 +24,6 @@
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/vmalloc.h>
-#include <linux/i2c.h>
#include <linux/mtd/mtd.h>
#include <net/busy_poll.h>
#include <net/xdp.h>
@@ -139,6 +138,8 @@ struct efx_special_buffer {
* freed when descriptor completes
* @xdpf: When @flags & %EFX_TX_BUF_XDP, the XDP frame information; its @data
* member is the associated buffer to drop a page reference on.
+ * @option: When @flags & %EFX_TX_BUF_OPTION, an EF10-specific option
+ * descriptor.
* @dma_addr: DMA address of the fragment.
* @flags: Flags for allocation and DMA mapping type
* @len: Length of this fragment.
@@ -153,7 +154,7 @@ struct efx_tx_buffer {
struct xdp_frame *xdpf;
};
union {
- efx_qword_t option;
+ efx_qword_t option; /* EF10 */
dma_addr_t dma_addr;
};
unsigned short flags;
@@ -743,13 +744,13 @@ union efx_multicast_hash {
struct vfdi_status;
/* The reserved RSS context value */
-#define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff
+#define EFX_MCDI_RSS_CONTEXT_INVALID 0xffffffff
/**
* struct efx_rss_context - A user-defined RSS context for filtering
* @list: node of linked list on which this struct is stored
* @context_id: the RSS_CONTEXT_ID returned by MC firmware, or
- * %EFX_EF10_RSS_CONTEXT_INVALID if this context is not present on the NIC.
- * For Siena, 0 if RSS is active, else %EFX_EF10_RSS_CONTEXT_INVALID.
+ * %EFX_MCDI_RSS_CONTEXT_INVALID if this context is not present on the NIC.
+ * For Siena, 0 if RSS is active, else %EFX_MCDI_RSS_CONTEXT_INVALID.
* @user_id: the rss_context ID exposed to userspace over ethtool.
* @rx_hash_udp_4tuple: UDP 4-tuple hashing enabled
* @rx_hash_key: Toeplitz hash key for this RSS context
@@ -1533,9 +1534,7 @@ static inline bool efx_channel_is_xdp_tx(struct efx_channel *channel)
static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
{
- return efx_channel_is_xdp_tx(channel) ||
- (channel->type && channel->type->want_txqs &&
- channel->type->want_txqs(channel));
+ return true;
}
static inline struct efx_tx_queue *
@@ -1613,6 +1612,15 @@ static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
return &rx_queue->buffer[index];
}
+static inline struct efx_rx_buffer *
+efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
+{
+ if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
+ return efx_rx_buffer(rx_queue, 0);
+ else
+ return rx_buf + 1;
+}
+
/**
* EFX_MAX_FRAME_LEN - calculate maximum frame length
*
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 1f7c5717de75..6670fda8f35a 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -9,9 +9,9 @@
#define EFX_NIC_H
#include <linux/net_tstamp.h>
-#include <linux/i2c-algo-bit.h>
#include "net_driver.h"
#include "efx.h"
+#include "efx_common.h"
#include "mcdi.h"
enum {
@@ -506,6 +506,9 @@ static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
tx_queue->efx->type->tx_write(tx_queue);
}
+int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+ bool *data_mapped);
+
/* RX data path */
static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
{
@@ -554,6 +557,7 @@ static inline void efx_nic_eventq_read_ack(struct efx_channel *channel)
{
channel->efx->type->ev_read_ack(channel);
}
+
void efx_nic_event_test_start(struct efx_channel *channel);
/* Falcon/Siena queue operations */
@@ -671,6 +675,7 @@ struct efx_farch_register_test {
unsigned address;
efx_oword_t mask;
};
+
int efx_farch_test_registers(struct efx_nic *efx,
const struct efx_farch_register_test *regs,
size_t n_regs);
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index ef52b24ad9e7..a2042f16babc 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -21,6 +21,7 @@
#include <linux/bpf_trace.h>
#include "net_driver.h"
#include "efx.h"
+#include "rx_common.h"
#include "filter.h"
#include "nic.h"
#include "selftest.h"
@@ -32,60 +33,13 @@
/* Maximum rx prefix used by any architecture. */
#define EFX_MAX_RX_PREFIX_SIZE 16
-/* Number of RX buffers to recycle pages for. When creating the RX page recycle
- * ring, this number is divided by the number of buffers per page to calculate
- * the number of pages to store in the RX page recycle ring.
- */
-#define EFX_RECYCLE_RING_SIZE_IOMMU 4096
-#define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
-
/* Size of buffer allocated for skb header area. */
#define EFX_SKB_HEADERS 128u
-/* This is the percentage fill level below which new RX descriptors
- * will be added to the RX descriptor ring.
- */
-static unsigned int rx_refill_threshold;
-
/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
EFX_RX_USR_BUF_SIZE)
-/*
- * RX maximum head room required.
- *
- * This must be at least 1 to prevent overflow, plus one packet-worth
- * to allow pipelined receives.
- */
-#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
-
-static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
-{
- return page_address(buf->page) + buf->page_offset;
-}
-
-static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
-{
-#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
- return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
-#else
- const u8 *data = eh + efx->rx_packet_hash_offset;
- return (u32)data[0] |
- (u32)data[1] << 8 |
- (u32)data[2] << 16 |
- (u32)data[3] << 24;
-#endif
-}
-
-static inline struct efx_rx_buffer *
-efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
-{
- if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
- return efx_rx_buffer(rx_queue, 0);
- else
- return rx_buf + 1;
-}
-
static inline void efx_sync_rx_buffer(struct efx_nic *efx,
struct efx_rx_buffer *rx_buf,
unsigned int len)
@@ -94,301 +48,6 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx,
DMA_FROM_DEVICE);
}
-void efx_rx_config_page_split(struct efx_nic *efx)
-{
- efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align,
- EFX_RX_BUF_ALIGNMENT);
- efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
- ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
- (efx->rx_page_buf_step + XDP_PACKET_HEADROOM));
- efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
- efx->rx_bufs_per_page;
- efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
- efx->rx_bufs_per_page);
-}
-
-/* Check the RX page recycle ring for a page that can be reused. */
-static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
-{
- struct efx_nic *efx = rx_queue->efx;
- struct page *page;
- struct efx_rx_page_state *state;
- unsigned index;
-
- index = rx_queue->page_remove & rx_queue->page_ptr_mask;
- page = rx_queue->page_ring[index];
- if (page == NULL)
- return NULL;
-
- rx_queue->page_ring[index] = NULL;
- /* page_remove cannot exceed page_add. */
- if (rx_queue->page_remove != rx_queue->page_add)
- ++rx_queue->page_remove;
-
- /* If page_count is 1 then we hold the only reference to this page. */
- if (page_count(page) == 1) {
- ++rx_queue->page_recycle_count;
- return page;
- } else {
- state = page_address(page);
- dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
- PAGE_SIZE << efx->rx_buffer_order,
- DMA_FROM_DEVICE);
- put_page(page);
- ++rx_queue->page_recycle_failed;
- }
-
- return NULL;
-}
-
-/**
- * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
- *
- * @rx_queue: Efx RX queue
- *
- * This allocates a batch of pages, maps them for DMA, and populates
- * struct efx_rx_buffers for each one. Return a negative error code or
- * 0 on success. If a single page can be used for multiple buffers,
- * then the page will either be inserted fully, or not at all.
- */
-static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
-{
- struct efx_nic *efx = rx_queue->efx;
- struct efx_rx_buffer *rx_buf;
- struct page *page;
- unsigned int page_offset;
- struct efx_rx_page_state *state;
- dma_addr_t dma_addr;
- unsigned index, count;
-
- count = 0;
- do {
- page = efx_reuse_page(rx_queue);
- if (page == NULL) {
- page = alloc_pages(__GFP_COMP |
- (atomic ? GFP_ATOMIC : GFP_KERNEL),
- efx->rx_buffer_order);
- if (unlikely(page == NULL))
- return -ENOMEM;
- dma_addr =
- dma_map_page(&efx->pci_dev->dev, page, 0,
- PAGE_SIZE << efx->rx_buffer_order,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
- dma_addr))) {
- __free_pages(page, efx->rx_buffer_order);
- return -EIO;
- }
- state = page_address(page);
- state->dma_addr = dma_addr;
- } else {
- state = page_address(page);
- dma_addr = state->dma_addr;
- }
-
- dma_addr += sizeof(struct efx_rx_page_state);
- page_offset = sizeof(struct efx_rx_page_state);
-
- do {
- page_offset += XDP_PACKET_HEADROOM;
- dma_addr += XDP_PACKET_HEADROOM;
-
- index = rx_queue->added_count & rx_queue->ptr_mask;
- rx_buf = efx_rx_buffer(rx_queue, index);
- rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
- rx_buf->page = page;
- rx_buf->page_offset = page_offset + efx->rx_ip_align;
- rx_buf->len = efx->rx_dma_len;
- rx_buf->flags = 0;
- ++rx_queue->added_count;
- get_page(page);
- dma_addr += efx->rx_page_buf_step;
- page_offset += efx->rx_page_buf_step;
- } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
-
- rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
- } while (++count < efx->rx_pages_per_batch);
-
- return 0;
-}
-
-/* Unmap a DMA-mapped page. This function is only called for the final RX
- * buffer in a page.
- */
-static void efx_unmap_rx_buffer(struct efx_nic *efx,
- struct efx_rx_buffer *rx_buf)
-{
- struct page *page = rx_buf->page;
-
- if (page) {
- struct efx_rx_page_state *state = page_address(page);
- dma_unmap_page(&efx->pci_dev->dev,
- state->dma_addr,
- PAGE_SIZE << efx->rx_buffer_order,
- DMA_FROM_DEVICE);
- }
-}
-
-static void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *rx_buf,
- unsigned int num_bufs)
-{
- do {
- if (rx_buf->page) {
- put_page(rx_buf->page);
- rx_buf->page = NULL;
- }
- rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
- } while (--num_bufs);
-}
-
-/* Attempt to recycle the page if there is an RX recycle ring; the page can
- * only be added if this is the final RX buffer, to prevent pages being used in
- * the descriptor ring and appearing in the recycle ring simultaneously.
- */
-static void efx_recycle_rx_page(struct efx_channel *channel,
- struct efx_rx_buffer *rx_buf)
-{
- struct page *page = rx_buf->page;
- struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
- struct efx_nic *efx = rx_queue->efx;
- unsigned index;
-
- /* Only recycle the page after processing the final buffer. */
- if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
- return;
-
- index = rx_queue->page_add & rx_queue->page_ptr_mask;
- if (rx_queue->page_ring[index] == NULL) {
- unsigned read_index = rx_queue->page_remove &
- rx_queue->page_ptr_mask;
-
- /* The next slot in the recycle ring is available, but
- * increment page_remove if the read pointer currently
- * points here.
- */
- if (read_index == index)
- ++rx_queue->page_remove;
- rx_queue->page_ring[index] = page;
- ++rx_queue->page_add;
- return;
- }
- ++rx_queue->page_recycle_full;
- efx_unmap_rx_buffer(efx, rx_buf);
- put_page(rx_buf->page);
-}
-
-static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *rx_buf)
-{
- /* Release the page reference we hold for the buffer. */
- if (rx_buf->page)
- put_page(rx_buf->page);
-
- /* If this is the last buffer in a page, unmap and free it. */
- if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
- efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
- efx_free_rx_buffers(rx_queue, rx_buf, 1);
- }
- rx_buf->page = NULL;
-}
-
-/* Recycle the pages that are used by buffers that have just been received. */
-static void efx_recycle_rx_pages(struct efx_channel *channel,
- struct efx_rx_buffer *rx_buf,
- unsigned int n_frags)
-{
- struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
-
- do {
- efx_recycle_rx_page(channel, rx_buf);
- rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
- } while (--n_frags);
-}
-
-static void efx_discard_rx_packet(struct efx_channel *channel,
- struct efx_rx_buffer *rx_buf,
- unsigned int n_frags)
-{
- struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
-
- efx_recycle_rx_pages(channel, rx_buf, n_frags);
-
- efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
-}
-
-/**
- * efx_fast_push_rx_descriptors - push new RX descriptors quickly
- * @rx_queue: RX descriptor queue
- *
- * This will aim to fill the RX descriptor queue up to
- * @rx_queue->@max_fill. If there is insufficient atomic
- * memory to do so, a slow fill will be scheduled.
- *
- * The caller must provide serialisation (none is used here). In practise,
- * this means this function must run from the NAPI handler, or be called
- * when NAPI is disabled.
- */
-void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
-{
- struct efx_nic *efx = rx_queue->efx;
- unsigned int fill_level, batch_size;
- int space, rc = 0;
-
- if (!rx_queue->refill_enabled)
- return;
-
- /* Calculate current fill level, and exit if we don't need to fill */
- fill_level = (rx_queue->added_count - rx_queue->removed_count);
- EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries);
- if (fill_level >= rx_queue->fast_fill_trigger)
- goto out;
-
- /* Record minimum fill level */
- if (unlikely(fill_level < rx_queue->min_fill)) {
- if (fill_level)
- rx_queue->min_fill = fill_level;
- }
-
- batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
- space = rx_queue->max_fill - fill_level;
- EFX_WARN_ON_ONCE_PARANOID(space < batch_size);
-
- netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
- "RX queue %d fast-filling descriptor ring from"
- " level %d to level %d\n",
- efx_rx_queue_index(rx_queue), fill_level,
- rx_queue->max_fill);
-
-
- do {
- rc = efx_init_rx_buffers(rx_queue, atomic);
- if (unlikely(rc)) {
- /* Ensure that we don't leave the rx queue empty */
- efx_schedule_slow_fill(rx_queue);
- goto out;
- }
- } while ((space -= batch_size) >= batch_size);
-
- netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
- "RX queue %d fast-filled descriptor ring "
- "to level %d\n", efx_rx_queue_index(rx_queue),
- rx_queue->added_count - rx_queue->removed_count);
-
- out:
- if (rx_queue->notified_count != rx_queue->added_count)
- efx_nic_notify_rx_desc(rx_queue);
-}
-
-void efx_rx_slow_fill(struct timer_list *t)
-{
- struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
-
- /* Post an event to cause NAPI to run and refill the queue */
- efx_nic_generate_fill_event(rx_queue);
- ++rx_queue->slow_fill_count;
-}
-
static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
struct efx_rx_buffer *rx_buf,
int len)
@@ -412,53 +71,6 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
}
-/* Pass a received packet up through GRO. GRO can handle pages
- * regardless of checksum state and skbs with a good checksum.
- */
-static void
-efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
- unsigned int n_frags, u8 *eh)
-{
- struct napi_struct *napi = &channel->napi_str;
- struct efx_nic *efx = channel->efx;
- struct sk_buff *skb;
-
- skb = napi_get_frags(napi);
- if (unlikely(!skb)) {
- struct efx_rx_queue *rx_queue;
-
- rx_queue = efx_channel_get_rx_queue(channel);
- efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
- return;
- }
-
- if (efx->net_dev->features & NETIF_F_RXHASH)
- skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
- PKT_HASH_TYPE_L3);
- skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
- CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
- skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
-
- for (;;) {
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
- rx_buf->page, rx_buf->page_offset,
- rx_buf->len);
- rx_buf->page = NULL;
- skb->len += rx_buf->len;
- if (skb_shinfo(skb)->nr_frags == n_frags)
- break;
-
- rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
- }
-
- skb->data_len = skb->len;
- skb->truesize += n_frags * efx->rx_buffer_truesize;
-
- skb_record_rx_queue(skb, channel->rx_queue.core_index);
-
- napi_gro_frags(napi);
-}
-
/* Allocate and construct an SKB around page fragments */
static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf,
@@ -805,174 +417,6 @@ out:
channel->rx_pkt_n_frags = 0;
}
-int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
-{
- struct efx_nic *efx = rx_queue->efx;
- unsigned int entries;
- int rc;
-
- /* Create the smallest power-of-two aligned ring */
- entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
- EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
- rx_queue->ptr_mask = entries - 1;
-
- netif_dbg(efx, probe, efx->net_dev,
- "creating RX queue %d size %#x mask %#x\n",
- efx_rx_queue_index(rx_queue), efx->rxq_entries,
- rx_queue->ptr_mask);
-
- /* Allocate RX buffers */
- rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
- GFP_KERNEL);
- if (!rx_queue->buffer)
- return -ENOMEM;
-
- rc = efx_nic_probe_rx(rx_queue);
- if (rc) {
- kfree(rx_queue->buffer);
- rx_queue->buffer = NULL;
- }
-
- return rc;
-}
-
-static void efx_init_rx_recycle_ring(struct efx_nic *efx,
- struct efx_rx_queue *rx_queue)
-{
- unsigned int bufs_in_recycle_ring, page_ring_size;
-
- /* Set the RX recycle ring size */
-#ifdef CONFIG_PPC64
- bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
-#else
- if (iommu_present(&pci_bus_type))
- bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
- else
- bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
-#endif /* CONFIG_PPC64 */
-
- page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
- efx->rx_bufs_per_page);
- rx_queue->page_ring = kcalloc(page_ring_size,
- sizeof(*rx_queue->page_ring), GFP_KERNEL);
- rx_queue->page_ptr_mask = page_ring_size - 1;
-}
-
-void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
-{
- struct efx_nic *efx = rx_queue->efx;
- unsigned int max_fill, trigger, max_trigger;
- int rc = 0;
-
- netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
- "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
-
- /* Initialise ptr fields */
- rx_queue->added_count = 0;
- rx_queue->notified_count = 0;
- rx_queue->removed_count = 0;
- rx_queue->min_fill = -1U;
- efx_init_rx_recycle_ring(efx, rx_queue);
-
- rx_queue->page_remove = 0;
- rx_queue->page_add = rx_queue->page_ptr_mask + 1;
- rx_queue->page_recycle_count = 0;
- rx_queue->page_recycle_failed = 0;
- rx_queue->page_recycle_full = 0;
-
- /* Initialise limit fields */
- max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
- max_trigger =
- max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
- if (rx_refill_threshold != 0) {
- trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
- if (trigger > max_trigger)
- trigger = max_trigger;
- } else {
- trigger = max_trigger;
- }
-
- rx_queue->max_fill = max_fill;
- rx_queue->fast_fill_trigger = trigger;
- rx_queue->refill_enabled = true;
-
- /* Initialise XDP queue information */
- rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
- rx_queue->core_index);
-
- if (rc) {
- netif_err(efx, rx_err, efx->net_dev,
- "Failure to initialise XDP queue information rc=%d\n",
- rc);
- efx->xdp_rxq_info_failed = true;
- } else {
- rx_queue->xdp_rxq_info_valid = true;
- }
-
- /* Set up RX descriptor ring */
- efx_nic_init_rx(rx_queue);
-}
-
-void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
-{
- int i;
- struct efx_nic *efx = rx_queue->efx;
- struct efx_rx_buffer *rx_buf;
-
- netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
- "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
-
- del_timer_sync(&rx_queue->slow_fill);
-
- /* Release RX buffers from the current read ptr to the write ptr */
- if (rx_queue->buffer) {
- for (i = rx_queue->removed_count; i < rx_queue->added_count;
- i++) {
- unsigned index = i & rx_queue->ptr_mask;
- rx_buf = efx_rx_buffer(rx_queue, index);
- efx_fini_rx_buffer(rx_queue, rx_buf);
- }
- }
-
- /* Unmap and release the pages in the recycle ring. Remove the ring. */
- for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
- struct page *page = rx_queue->page_ring[i];
- struct efx_rx_page_state *state;
-
- if (page == NULL)
- continue;
-
- state = page_address(page);
- dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
- PAGE_SIZE << efx->rx_buffer_order,
- DMA_FROM_DEVICE);
- put_page(page);
- }
- kfree(rx_queue->page_ring);
- rx_queue->page_ring = NULL;
-
- if (rx_queue->xdp_rxq_info_valid)
- xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
-
- rx_queue->xdp_rxq_info_valid = false;
-}
-
-void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
-{
- netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
- "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
-
- efx_nic_remove_rx(rx_queue);
-
- kfree(rx_queue->buffer);
- rx_queue->buffer = NULL;
-}
-
-
-module_param(rx_refill_threshold, uint, 0444);
-MODULE_PARM_DESC(rx_refill_threshold,
- "RX descriptor ring refill threshold (%)");
-
#ifdef CONFIG_RFS_ACCEL
static void efx_filter_rfs_work(struct work_struct *data)
@@ -1206,37 +650,3 @@ bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota)
}
#endif /* CONFIG_RFS_ACCEL */
-
-/**
- * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
- * @spec: Specification to test
- *
- * Return: %true if the specification is a non-drop RX filter that
- * matches a local MAC address I/G bit value of 1 or matches a local
- * IPv4 or IPv6 address value in the respective multicast address
- * range. Otherwise %false.
- */
-bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
-{
- if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
- spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
- return false;
-
- if (spec->match_flags &
- (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
- is_multicast_ether_addr(spec->loc_mac))
- return true;
-
- if ((spec->match_flags &
- (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
- (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
- if (spec->ether_type == htons(ETH_P_IP) &&
- ipv4_is_multicast(spec->loc_host[0]))
- return true;
- if (spec->ether_type == htons(ETH_P_IPV6) &&
- ((const u8 *)spec->loc_host)[0] == 0xff)
- return true;
- }
-
- return false;
-}
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
new file mode 100644
index 000000000000..ee8beb87bdc1
--- /dev/null
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -0,0 +1,851 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include <linux/module.h>
+#include <linux/iommu.h>
+#include "efx.h"
+#include "nic.h"
+#include "rx_common.h"
+
+/* This is the percentage fill level below which new RX descriptors
+ * will be added to the RX descriptor ring.
+ */
+static unsigned int rx_refill_threshold;
+module_param(rx_refill_threshold, uint, 0444);
+MODULE_PARM_DESC(rx_refill_threshold,
+ "RX descriptor ring refill threshold (%)");
+
+/* Number of RX buffers to recycle pages for. When creating the RX page recycle
+ * ring, this number is divided by the number of buffers per page to calculate
+ * the number of pages to store in the RX page recycle ring.
+ */
+#define EFX_RECYCLE_RING_SIZE_IOMMU 4096
+#define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
+
+/* RX maximum head room required.
+ *
+ * This must be at least 1 to prevent overflow, plus one packet-worth
+ * to allow pipelined receives.
+ */
+#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
+
+/* Check the RX page recycle ring for a page that can be reused. */
+static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ struct efx_rx_page_state *state;
+ unsigned int index;
+ struct page *page;
+
+ index = rx_queue->page_remove & rx_queue->page_ptr_mask;
+ page = rx_queue->page_ring[index];
+ if (page == NULL)
+ return NULL;
+
+ rx_queue->page_ring[index] = NULL;
+ /* page_remove cannot exceed page_add. */
+ if (rx_queue->page_remove != rx_queue->page_add)
+ ++rx_queue->page_remove;
+
+ /* If page_count is 1 then we hold the only reference to this page. */
+ if (page_count(page) == 1) {
+ ++rx_queue->page_recycle_count;
+ return page;
+ } else {
+ state = page_address(page);
+ dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ put_page(page);
+ ++rx_queue->page_recycle_failed;
+ }
+
+ return NULL;
+}
+
+/* Attempt to recycle the page if there is an RX recycle ring; the page can
+ * only be added if this is the final RX buffer, to prevent pages being used in
+ * the descriptor ring and appearing in the recycle ring simultaneously.
+ */
+static void efx_recycle_rx_page(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf)
+{
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+ struct efx_nic *efx = rx_queue->efx;
+ struct page *page = rx_buf->page;
+ unsigned int index;
+
+ /* Only recycle the page after processing the final buffer. */
+ if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
+ return;
+
+ index = rx_queue->page_add & rx_queue->page_ptr_mask;
+ if (rx_queue->page_ring[index] == NULL) {
+ unsigned int read_index = rx_queue->page_remove &
+ rx_queue->page_ptr_mask;
+
+ /* The next slot in the recycle ring is available, but
+ * increment page_remove if the read pointer currently
+ * points here.
+ */
+ if (read_index == index)
+ ++rx_queue->page_remove;
+ rx_queue->page_ring[index] = page;
+ ++rx_queue->page_add;
+ return;
+ }
+ ++rx_queue->page_recycle_full;
+ efx_unmap_rx_buffer(efx, rx_buf);
+ put_page(rx_buf->page);
+}
+
+/* Recycle the pages that are used by buffers that have just been received. */
+void efx_recycle_rx_pages(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags)
+{
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+
+ do {
+ efx_recycle_rx_page(channel, rx_buf);
+ rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
+ } while (--n_frags);
+}
+
+void efx_discard_rx_packet(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags)
+{
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+
+ efx_recycle_rx_pages(channel, rx_buf, n_frags);
+
+ efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
+}
+
+static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
+{
+ unsigned int bufs_in_recycle_ring, page_ring_size;
+ struct efx_nic *efx = rx_queue->efx;
+
+ /* Set the RX recycle ring size */
+#ifdef CONFIG_PPC64
+ bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
+#else
+ if (iommu_present(&pci_bus_type))
+ bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
+ else
+ bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
+#endif /* CONFIG_PPC64 */
+
+ page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
+ efx->rx_bufs_per_page);
+ rx_queue->page_ring = kcalloc(page_ring_size,
+ sizeof(*rx_queue->page_ring), GFP_KERNEL);
+ rx_queue->page_ptr_mask = page_ring_size - 1;
+}
+
+static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ int i;
+
+ /* Unmap and release the pages in the recycle ring. Remove the ring. */
+ for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
+ struct page *page = rx_queue->page_ring[i];
+ struct efx_rx_page_state *state;
+
+ if (page == NULL)
+ continue;
+
+ state = page_address(page);
+ dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ put_page(page);
+ }
+ kfree(rx_queue->page_ring);
+ rx_queue->page_ring = NULL;
+}
+
+static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf)
+{
+ /* Release the page reference we hold for the buffer. */
+ if (rx_buf->page)
+ put_page(rx_buf->page);
+
+ /* If this is the last buffer in a page, unmap and free it. */
+ if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
+ efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
+ efx_free_rx_buffers(rx_queue, rx_buf, 1);
+ }
+ rx_buf->page = NULL;
+}
+
+int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned int entries;
+ int rc;
+
+ /* Create the smallest power-of-two aligned ring */
+ entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
+ EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
+ rx_queue->ptr_mask = entries - 1;
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "creating RX queue %d size %#x mask %#x\n",
+ efx_rx_queue_index(rx_queue), efx->rxq_entries,
+ rx_queue->ptr_mask);
+
+ /* Allocate RX buffers */
+ rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
+ GFP_KERNEL);
+ if (!rx_queue->buffer)
+ return -ENOMEM;
+
+ rc = efx_nic_probe_rx(rx_queue);
+ if (rc) {
+ kfree(rx_queue->buffer);
+ rx_queue->buffer = NULL;
+ }
+
+ return rc;
+}
+
+void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
+{
+ unsigned int max_fill, trigger, max_trigger;
+ struct efx_nic *efx = rx_queue->efx;
+ int rc = 0;
+
+ netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+ "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
+
+ /* Initialise ptr fields */
+ rx_queue->added_count = 0;
+ rx_queue->notified_count = 0;
+ rx_queue->removed_count = 0;
+ rx_queue->min_fill = -1U;
+ efx_init_rx_recycle_ring(rx_queue);
+
+ rx_queue->page_remove = 0;
+ rx_queue->page_add = rx_queue->page_ptr_mask + 1;
+ rx_queue->page_recycle_count = 0;
+ rx_queue->page_recycle_failed = 0;
+ rx_queue->page_recycle_full = 0;
+
+ /* Initialise limit fields */
+ max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
+ max_trigger =
+ max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
+ if (rx_refill_threshold != 0) {
+ trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
+ if (trigger > max_trigger)
+ trigger = max_trigger;
+ } else {
+ trigger = max_trigger;
+ }
+
+ rx_queue->max_fill = max_fill;
+ rx_queue->fast_fill_trigger = trigger;
+ rx_queue->refill_enabled = true;
+
+ /* Initialise XDP queue information */
+ rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
+ rx_queue->core_index);
+
+ if (rc) {
+ netif_err(efx, rx_err, efx->net_dev,
+ "Failure to initialise XDP queue information rc=%d\n",
+ rc);
+ efx->xdp_rxq_info_failed = true;
+ } else {
+ rx_queue->xdp_rxq_info_valid = true;
+ }
+
+ /* Set up RX descriptor ring */
+ efx_nic_init_rx(rx_queue);
+}
+
+void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
+{
+ struct efx_rx_buffer *rx_buf;
+ int i;
+
+ netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+ "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
+
+ del_timer_sync(&rx_queue->slow_fill);
+
+ /* Release RX buffers from the current read ptr to the write ptr */
+ if (rx_queue->buffer) {
+ for (i = rx_queue->removed_count; i < rx_queue->added_count;
+ i++) {
+ unsigned int index = i & rx_queue->ptr_mask;
+
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ efx_fini_rx_buffer(rx_queue, rx_buf);
+ }
+ }
+
+ efx_fini_rx_recycle_ring(rx_queue);
+
+ if (rx_queue->xdp_rxq_info_valid)
+ xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
+
+ rx_queue->xdp_rxq_info_valid = false;
+}
+
+void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
+{
+ netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+ "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
+
+ efx_nic_remove_rx(rx_queue);
+
+ kfree(rx_queue->buffer);
+ rx_queue->buffer = NULL;
+}
+
+/* Unmap a DMA-mapped page. This function is only called for the final RX
+ * buffer in a page.
+ */
+void efx_unmap_rx_buffer(struct efx_nic *efx,
+ struct efx_rx_buffer *rx_buf)
+{
+ struct page *page = rx_buf->page;
+
+ if (page) {
+ struct efx_rx_page_state *state = page_address(page);
+
+ dma_unmap_page(&efx->pci_dev->dev,
+ state->dma_addr,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ }
+}
+
+void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int num_bufs)
+{
+ do {
+ if (rx_buf->page) {
+ put_page(rx_buf->page);
+ rx_buf->page = NULL;
+ }
+ rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
+ } while (--num_bufs);
+}
+
+void efx_rx_slow_fill(struct timer_list *t)
+{
+ struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
+
+ /* Post an event to cause NAPI to run and refill the queue */
+ efx_nic_generate_fill_event(rx_queue);
+ ++rx_queue->slow_fill_count;
+}
+
+void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
+{
+ mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10));
+}
+
+/* efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
+ *
+ * @rx_queue: Efx RX queue
+ *
+ * This allocates a batch of pages, maps them for DMA, and populates
+ * struct efx_rx_buffers for each one. Return a negative error code or
+ * 0 on success. If a single page can be used for multiple buffers,
+ * then the page will either be inserted fully, or not at all.
+ */
+static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
+{
+ unsigned int page_offset, index, count;
+ struct efx_nic *efx = rx_queue->efx;
+ struct efx_rx_page_state *state;
+ struct efx_rx_buffer *rx_buf;
+ dma_addr_t dma_addr;
+ struct page *page;
+
+ count = 0;
+ do {
+ page = efx_reuse_page(rx_queue);
+ if (page == NULL) {
+ page = alloc_pages(__GFP_COMP |
+ (atomic ? GFP_ATOMIC : GFP_KERNEL),
+ efx->rx_buffer_order);
+ if (unlikely(page == NULL))
+ return -ENOMEM;
+ dma_addr =
+ dma_map_page(&efx->pci_dev->dev, page, 0,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
+ dma_addr))) {
+ __free_pages(page, efx->rx_buffer_order);
+ return -EIO;
+ }
+ state = page_address(page);
+ state->dma_addr = dma_addr;
+ } else {
+ state = page_address(page);
+ dma_addr = state->dma_addr;
+ }
+
+ dma_addr += sizeof(struct efx_rx_page_state);
+ page_offset = sizeof(struct efx_rx_page_state);
+
+ do {
+ index = rx_queue->added_count & rx_queue->ptr_mask;
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ rx_buf->dma_addr = dma_addr + efx->rx_ip_align +
+ XDP_PACKET_HEADROOM;
+ rx_buf->page = page;
+ rx_buf->page_offset = page_offset + efx->rx_ip_align +
+ XDP_PACKET_HEADROOM;
+ rx_buf->len = efx->rx_dma_len;
+ rx_buf->flags = 0;
+ ++rx_queue->added_count;
+ get_page(page);
+ dma_addr += efx->rx_page_buf_step;
+ page_offset += efx->rx_page_buf_step;
+ } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
+
+ rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
+ } while (++count < efx->rx_pages_per_batch);
+
+ return 0;
+}
+
+void efx_rx_config_page_split(struct efx_nic *efx)
+{
+ efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
+ XDP_PACKET_HEADROOM,
+ EFX_RX_BUF_ALIGNMENT);
+ efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
+ ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
+ efx->rx_page_buf_step);
+ efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
+ efx->rx_bufs_per_page;
+ efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
+ efx->rx_bufs_per_page);
+}
+
+/* efx_fast_push_rx_descriptors - push new RX descriptors quickly
+ * @rx_queue: RX descriptor queue
+ *
+ * This will aim to fill the RX descriptor queue up to
+ * @rx_queue->@max_fill. If there is insufficient atomic
+ * memory to do so, a slow fill will be scheduled.
+ *
+ * The caller must provide serialisation (none is used here). In practise,
+ * this means this function must run from the NAPI handler, or be called
+ * when NAPI is disabled.
+ */
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned int fill_level, batch_size;
+ int space, rc = 0;
+
+ if (!rx_queue->refill_enabled)
+ return;
+
+ /* Calculate current fill level, and exit if we don't need to fill */
+ fill_level = (rx_queue->added_count - rx_queue->removed_count);
+ EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries);
+ if (fill_level >= rx_queue->fast_fill_trigger)
+ goto out;
+
+ /* Record minimum fill level */
+ if (unlikely(fill_level < rx_queue->min_fill)) {
+ if (fill_level)
+ rx_queue->min_fill = fill_level;
+ }
+
+ batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
+ space = rx_queue->max_fill - fill_level;
+ EFX_WARN_ON_ONCE_PARANOID(space < batch_size);
+
+ netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
+ "RX queue %d fast-filling descriptor ring from"
+ " level %d to level %d\n",
+ efx_rx_queue_index(rx_queue), fill_level,
+ rx_queue->max_fill);
+
+ do {
+ rc = efx_init_rx_buffers(rx_queue, atomic);
+ if (unlikely(rc)) {
+ /* Ensure that we don't leave the rx queue empty */
+ efx_schedule_slow_fill(rx_queue);
+ goto out;
+ }
+ } while ((space -= batch_size) >= batch_size);
+
+ netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
+ "RX queue %d fast-filled descriptor ring "
+ "to level %d\n", efx_rx_queue_index(rx_queue),
+ rx_queue->added_count - rx_queue->removed_count);
+
+ out:
+ if (rx_queue->notified_count != rx_queue->added_count)
+ efx_nic_notify_rx_desc(rx_queue);
+}
+
+/* Pass a received packet up through GRO. GRO can handle pages
+ * regardless of checksum state and skbs with a good checksum.
+ */
+void
+efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags, u8 *eh)
+{
+ struct napi_struct *napi = &channel->napi_str;
+ struct efx_nic *efx = channel->efx;
+ struct sk_buff *skb;
+
+ skb = napi_get_frags(napi);
+ if (unlikely(!skb)) {
+ struct efx_rx_queue *rx_queue;
+
+ rx_queue = efx_channel_get_rx_queue(channel);
+ efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
+ return;
+ }
+
+ if (efx->net_dev->features & NETIF_F_RXHASH)
+ skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
+ PKT_HASH_TYPE_L3);
+ skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
+ CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
+ skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
+
+ for (;;) {
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ rx_buf->page, rx_buf->page_offset,
+ rx_buf->len);
+ rx_buf->page = NULL;
+ skb->len += rx_buf->len;
+ if (skb_shinfo(skb)->nr_frags == n_frags)
+ break;
+
+ rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
+ }
+
+ skb->data_len = skb->len;
+ skb->truesize += n_frags * efx->rx_buffer_truesize;
+
+ skb_record_rx_queue(skb, channel->rx_queue.core_index);
+
+ napi_gro_frags(napi);
+}
+
+/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because
+ * (a) this is an infrequent control-plane operation and (b) n is small (max 64)
+ */
+struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx)
+{
+ struct list_head *head = &efx->rss_context.list;
+ struct efx_rss_context *ctx, *new;
+ u32 id = 1; /* Don't use zero, that refers to the master RSS context */
+
+ WARN_ON(!mutex_is_locked(&efx->rss_lock));
+
+ /* Search for first gap in the numbering */
+ list_for_each_entry(ctx, head, list) {
+ if (ctx->user_id != id)
+ break;
+ id++;
+ /* Check for wrap. If this happens, we have nearly 2^32
+ * allocated RSS contexts, which seems unlikely.
+ */
+ if (WARN_ON_ONCE(!id))
+ return NULL;
+ }
+
+ /* Create the new entry */
+ new = kmalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return NULL;
+ new->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
+ new->rx_hash_udp_4tuple = false;
+
+ /* Insert the new entry into the gap */
+ new->user_id = id;
+ list_add_tail(&new->list, &ctx->list);
+ return new;
+}
+
+struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id)
+{
+ struct list_head *head = &efx->rss_context.list;
+ struct efx_rss_context *ctx;
+
+ WARN_ON(!mutex_is_locked(&efx->rss_lock));
+
+ list_for_each_entry(ctx, head, list)
+ if (ctx->user_id == id)
+ return ctx;
+ return NULL;
+}
+
+void efx_free_rss_context_entry(struct efx_rss_context *ctx)
+{
+ list_del(&ctx->list);
+ kfree(ctx);
+}
+
+void efx_set_default_rx_indir_table(struct efx_nic *efx,
+ struct efx_rss_context *ctx)
+{
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
+ ctx->rx_indir_table[i] =
+ ethtool_rxfh_indir_default(i, efx->rss_spread);
+}
+
+/**
+ * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
+ * @spec: Specification to test
+ *
+ * Return: %true if the specification is a non-drop RX filter that
+ * matches a local MAC address I/G bit value of 1 or matches a local
+ * IPv4 or IPv6 address value in the respective multicast address
+ * range. Otherwise %false.
+ */
+bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
+{
+ if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
+ spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
+ return false;
+
+ if (spec->match_flags &
+ (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
+ is_multicast_ether_addr(spec->loc_mac))
+ return true;
+
+ if ((spec->match_flags &
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
+ if (spec->ether_type == htons(ETH_P_IP) &&
+ ipv4_is_multicast(spec->loc_host[0]))
+ return true;
+ if (spec->ether_type == htons(ETH_P_IPV6) &&
+ ((const u8 *)spec->loc_host)[0] == 0xff)
+ return true;
+ }
+
+ return false;
+}
+
+bool efx_filter_spec_equal(const struct efx_filter_spec *left,
+ const struct efx_filter_spec *right)
+{
+ if ((left->match_flags ^ right->match_flags) |
+ ((left->flags ^ right->flags) &
+ (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
+ return false;
+
+ return memcmp(&left->outer_vid, &right->outer_vid,
+ sizeof(struct efx_filter_spec) -
+ offsetof(struct efx_filter_spec, outer_vid)) == 0;
+}
+
+u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
+{
+ BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
+ return jhash2((const u32 *)&spec->outer_vid,
+ (sizeof(struct efx_filter_spec) -
+ offsetof(struct efx_filter_spec, outer_vid)) / 4,
+ 0);
+}
+
+#ifdef CONFIG_RFS_ACCEL
+bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
+ bool *force)
+{
+ if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
+ /* ARFS is currently updating this entry, leave it */
+ return false;
+ }
+ if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
+ /* ARFS tried and failed to update this, so it's probably out
+ * of date. Remove the filter and the ARFS rule entry.
+ */
+ rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
+ *force = true;
+ return true;
+ } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
+ /* ARFS has moved on, so old filter is not needed. Since we did
+ * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
+ * not be removed by efx_rps_hash_del() subsequently.
+ */
+ *force = true;
+ return true;
+ }
+ /* Remove it iff ARFS wants to. */
+ return true;
+}
+
+static
+struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
+ const struct efx_filter_spec *spec)
+{
+ u32 hash = efx_filter_spec_hash(spec);
+
+ lockdep_assert_held(&efx->rps_hash_lock);
+ if (!efx->rps_hash_table)
+ return NULL;
+ return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
+}
+
+struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
+ const struct efx_filter_spec *spec)
+{
+ struct efx_arfs_rule *rule;
+ struct hlist_head *head;
+ struct hlist_node *node;
+
+ head = efx_rps_hash_bucket(efx, spec);
+ if (!head)
+ return NULL;
+ hlist_for_each(node, head) {
+ rule = container_of(node, struct efx_arfs_rule, node);
+ if (efx_filter_spec_equal(spec, &rule->spec))
+ return rule;
+ }
+ return NULL;
+}
+
+struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
+ const struct efx_filter_spec *spec,
+ bool *new)
+{
+ struct efx_arfs_rule *rule;
+ struct hlist_head *head;
+ struct hlist_node *node;
+
+ head = efx_rps_hash_bucket(efx, spec);
+ if (!head)
+ return NULL;
+ hlist_for_each(node, head) {
+ rule = container_of(node, struct efx_arfs_rule, node);
+ if (efx_filter_spec_equal(spec, &rule->spec)) {
+ *new = false;
+ return rule;
+ }
+ }
+ rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
+ *new = true;
+ if (rule) {
+ memcpy(&rule->spec, spec, sizeof(rule->spec));
+ hlist_add_head(&rule->node, head);
+ }
+ return rule;
+}
+
+void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
+{
+ struct efx_arfs_rule *rule;
+ struct hlist_head *head;
+ struct hlist_node *node;
+
+ head = efx_rps_hash_bucket(efx, spec);
+ if (WARN_ON(!head))
+ return;
+ hlist_for_each(node, head) {
+ rule = container_of(node, struct efx_arfs_rule, node);
+ if (efx_filter_spec_equal(spec, &rule->spec)) {
+ /* Someone already reused the entry. We know that if
+ * this check doesn't fire (i.e. filter_id == REMOVING)
+ * then the REMOVING mark was put there by our caller,
+ * because caller is holding a lock on filter table and
+ * only holders of that lock set REMOVING.
+ */
+ if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
+ return;
+ hlist_del(node);
+ kfree(rule);
+ return;
+ }
+ }
+ /* We didn't find it. */
+ WARN_ON(1);
+}
+#endif
+
+int efx_probe_filters(struct efx_nic *efx)
+{
+ int rc;
+
+ init_rwsem(&efx->filter_sem);
+ mutex_lock(&efx->mac_lock);
+ down_write(&efx->filter_sem);
+ rc = efx->type->filter_table_probe(efx);
+ if (rc)
+ goto out_unlock;
+
+#ifdef CONFIG_RFS_ACCEL
+ if (efx->type->offload_features & NETIF_F_NTUPLE) {
+ struct efx_channel *channel;
+ int i, success = 1;
+
+ efx_for_each_channel(channel, efx) {
+ channel->rps_flow_id =
+ kcalloc(efx->type->max_rx_ip_filters,
+ sizeof(*channel->rps_flow_id),
+ GFP_KERNEL);
+ if (!channel->rps_flow_id)
+ success = 0;
+ else
+ for (i = 0;
+ i < efx->type->max_rx_ip_filters;
+ ++i)
+ channel->rps_flow_id[i] =
+ RPS_FLOW_ID_INVALID;
+ channel->rfs_expire_index = 0;
+ channel->rfs_filter_count = 0;
+ }
+
+ if (!success) {
+ efx_for_each_channel(channel, efx)
+ kfree(channel->rps_flow_id);
+ efx->type->filter_table_remove(efx);
+ rc = -ENOMEM;
+ goto out_unlock;
+ }
+ }
+#endif
+out_unlock:
+ up_write(&efx->filter_sem);
+ mutex_unlock(&efx->mac_lock);
+ return rc;
+}
+
+void efx_remove_filters(struct efx_nic *efx)
+{
+#ifdef CONFIG_RFS_ACCEL
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx) {
+ cancel_delayed_work_sync(&channel->filter_work);
+ kfree(channel->rps_flow_id);
+ }
+#endif
+ down_write(&efx->filter_sem);
+ efx->type->filter_table_remove(efx);
+ up_write(&efx->filter_sem);
+}
diff --git a/drivers/net/ethernet/sfc/rx_common.h b/drivers/net/ethernet/sfc/rx_common.h
new file mode 100644
index 000000000000..c41f12a89477
--- /dev/null
+++ b/drivers/net/ethernet/sfc/rx_common.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_RX_COMMON_H
+#define EFX_RX_COMMON_H
+
+/* Preferred number of descriptors to fill at once */
+#define EFX_RX_PREFERRED_BATCH 8U
+
+/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
+#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
+ EFX_RX_USR_BUF_SIZE)
+
+static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
+{
+ return page_address(buf->page) + buf->page_offset;
+}
+
+static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
+#else
+ const u8 *data = eh + efx->rx_packet_hash_offset;
+
+ return (u32)data[0] |
+ (u32)data[1] << 8 |
+ (u32)data[2] << 16 |
+ (u32)data[3] << 24;
+#endif
+}
+
+void efx_rx_slow_fill(struct timer_list *t);
+
+void efx_recycle_rx_pages(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags);
+void efx_discard_rx_packet(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags);
+
+int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_destroy_rx_queue(struct efx_rx_queue *rx_queue);
+
+void efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
+ struct page *page,
+ unsigned int page_offset,
+ u16 flags);
+void efx_unmap_rx_buffer(struct efx_nic *efx, struct efx_rx_buffer *rx_buf);
+void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int num_bufs);
+
+void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
+void efx_rx_config_page_split(struct efx_nic *efx);
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic);
+
+void
+efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags, u8 *eh);
+
+struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx);
+struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id);
+void efx_free_rss_context_entry(struct efx_rss_context *ctx);
+void efx_set_default_rx_indir_table(struct efx_nic *efx,
+ struct efx_rss_context *ctx);
+
+bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
+bool efx_filter_spec_equal(const struct efx_filter_spec *left,
+ const struct efx_filter_spec *right);
+u32 efx_filter_spec_hash(const struct efx_filter_spec *spec);
+
+#ifdef CONFIG_RFS_ACCEL
+bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
+ bool *force);
+struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
+ const struct efx_filter_spec *spec);
+struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
+ const struct efx_filter_spec *spec,
+ bool *new);
+void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec);
+#endif
+
+int efx_probe_filters(struct efx_nic *efx);
+void efx_remove_filters(struct efx_nic *efx);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 8474cf8ea7d3..1ae369022d7d 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -18,6 +18,8 @@
#include <linux/slab.h>
#include "net_driver.h"
#include "efx.h"
+#include "efx_common.h"
+#include "efx_channels.h"
#include "nic.h"
#include "selftest.h"
#include "workarounds.h"
@@ -783,7 +785,7 @@ void efx_selftest_async_cancel(struct efx_nic *efx)
cancel_delayed_work_sync(&efx->selftest_work);
}
-void efx_selftest_async_work(struct work_struct *data)
+static void efx_selftest_async_work(struct work_struct *data)
{
struct efx_nic *efx = container_of(data, struct efx_nic,
selftest_work.work);
@@ -802,3 +804,8 @@ void efx_selftest_async_work(struct work_struct *data)
channel->channel, cpu);
}
}
+
+void efx_selftest_async_init(struct efx_nic *efx)
+{
+ INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
+}
diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h
index a3553816d92c..ca88ebb4f6b1 100644
--- a/drivers/net/ethernet/sfc/selftest.h
+++ b/drivers/net/ethernet/sfc/selftest.h
@@ -45,8 +45,8 @@ void efx_loopback_rx_packet(struct efx_nic *efx, const char *buf_ptr,
int pkt_len);
int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
unsigned flags);
+void efx_selftest_async_init(struct efx_nic *efx);
void efx_selftest_async_start(struct efx_nic *efx);
void efx_selftest_async_cancel(struct efx_nic *efx);
-void efx_selftest_async_work(struct work_struct *data);
#endif /* EFX_SELFTEST_H */
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 81499244a4b4..baa464161626 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -14,12 +14,14 @@
#include "net_driver.h"
#include "bitfield.h"
#include "efx.h"
+#include "efx_common.h"
#include "nic.h"
#include "farch_regs.h"
#include "io.h"
#include "workarounds.h"
#include "mcdi.h"
#include "mcdi_pcol.h"
+#include "mcdi_port_common.h"
#include "selftest.h"
#include "siena_sriov.h"
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index dfbdf05dcf79..83dcfcae3d4b 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include "net_driver.h"
#include "efx.h"
+#include "efx_channels.h"
#include "nic.h"
#include "io.h"
#include "mcdi.h"
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 00c1c4402451..04d7f41d7ed9 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -20,6 +20,7 @@
#include "io.h"
#include "nic.h"
#include "tx.h"
+#include "tx_common.h"
#include "workarounds.h"
#include "ef10_regs.h"
@@ -56,72 +57,6 @@ u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
return efx_tx_get_copy_buffer(tx_queue, buffer);
}
-static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
- struct efx_tx_buffer *buffer,
- unsigned int *pkts_compl,
- unsigned int *bytes_compl)
-{
- if (buffer->unmap_len) {
- struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
- dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
- if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
- dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
- DMA_TO_DEVICE);
- else
- dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
- DMA_TO_DEVICE);
- buffer->unmap_len = 0;
- }
-
- if (buffer->flags & EFX_TX_BUF_SKB) {
- struct sk_buff *skb = (struct sk_buff *)buffer->skb;
-
- EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
- (*pkts_compl)++;
- (*bytes_compl) += skb->len;
- if (tx_queue->timestamping &&
- (tx_queue->completed_timestamp_major ||
- tx_queue->completed_timestamp_minor)) {
- struct skb_shared_hwtstamps hwtstamp;
-
- hwtstamp.hwtstamp =
- efx_ptp_nic_to_kernel_time(tx_queue);
- skb_tstamp_tx(skb, &hwtstamp);
-
- tx_queue->completed_timestamp_major = 0;
- tx_queue->completed_timestamp_minor = 0;
- }
- dev_consume_skb_any((struct sk_buff *)buffer->skb);
- netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
- "TX queue %d transmission id %x complete\n",
- tx_queue->queue, tx_queue->read_count);
- } else if (buffer->flags & EFX_TX_BUF_XDP) {
- xdp_return_frame_rx_napi(buffer->xdpf);
- }
-
- buffer->len = 0;
- buffer->flags = 0;
-}
-
-unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
-{
- /* Header and payload descriptor for each output segment, plus
- * one for every input fragment boundary within a segment
- */
- unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
-
- /* Possibly one more per segment for option descriptors */
- if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
- max_descs += EFX_TSO_MAX_SEGS;
-
- /* Possibly more for PCIe page boundaries within input fragments */
- if (PAGE_SIZE > EFX_PAGE_SIZE)
- max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
- DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
-
- return max_descs;
-}
-
static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
{
/* We need to consider both queues that the net core sees as one */
@@ -333,125 +268,6 @@ static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue,
}
#endif /* EFX_USE_PIO */
-static struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
- dma_addr_t dma_addr,
- size_t len)
-{
- const struct efx_nic_type *nic_type = tx_queue->efx->type;
- struct efx_tx_buffer *buffer;
- unsigned int dma_len;
-
- /* Map the fragment taking account of NIC-dependent DMA limits. */
- do {
- buffer = efx_tx_queue_get_insert_buffer(tx_queue);
- dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
-
- buffer->len = dma_len;
- buffer->dma_addr = dma_addr;
- buffer->flags = EFX_TX_BUF_CONT;
- len -= dma_len;
- dma_addr += dma_len;
- ++tx_queue->insert_count;
- } while (len);
-
- return buffer;
-}
-
-/* Map all data from an SKB for DMA and create descriptors on the queue.
- */
-static int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
- unsigned int segment_count)
-{
- struct efx_nic *efx = tx_queue->efx;
- struct device *dma_dev = &efx->pci_dev->dev;
- unsigned int frag_index, nr_frags;
- dma_addr_t dma_addr, unmap_addr;
- unsigned short dma_flags;
- size_t len, unmap_len;
-
- nr_frags = skb_shinfo(skb)->nr_frags;
- frag_index = 0;
-
- /* Map header data. */
- len = skb_headlen(skb);
- dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
- dma_flags = EFX_TX_BUF_MAP_SINGLE;
- unmap_len = len;
- unmap_addr = dma_addr;
-
- if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
- return -EIO;
-
- if (segment_count) {
- /* For TSO we need to put the header in to a separate
- * descriptor. Map this separately if necessary.
- */
- size_t header_len = skb_transport_header(skb) - skb->data +
- (tcp_hdr(skb)->doff << 2u);
-
- if (header_len != len) {
- tx_queue->tso_long_headers++;
- efx_tx_map_chunk(tx_queue, dma_addr, header_len);
- len -= header_len;
- dma_addr += header_len;
- }
- }
-
- /* Add descriptors for each fragment. */
- do {
- struct efx_tx_buffer *buffer;
- skb_frag_t *fragment;
-
- buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
-
- /* The final descriptor for a fragment is responsible for
- * unmapping the whole fragment.
- */
- buffer->flags = EFX_TX_BUF_CONT | dma_flags;
- buffer->unmap_len = unmap_len;
- buffer->dma_offset = buffer->dma_addr - unmap_addr;
-
- if (frag_index >= nr_frags) {
- /* Store SKB details with the final buffer for
- * the completion.
- */
- buffer->skb = skb;
- buffer->flags = EFX_TX_BUF_SKB | dma_flags;
- return 0;
- }
-
- /* Move on to the next fragment. */
- fragment = &skb_shinfo(skb)->frags[frag_index++];
- len = skb_frag_size(fragment);
- dma_addr = skb_frag_dma_map(dma_dev, fragment,
- 0, len, DMA_TO_DEVICE);
- dma_flags = 0;
- unmap_len = len;
- unmap_addr = dma_addr;
-
- if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
- return -EIO;
- } while (1);
-}
-
-/* Remove buffers put into a tx_queue for the current packet.
- * None of the buffers must have an skb attached.
- */
-static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
- unsigned int insert_count)
-{
- struct efx_tx_buffer *buffer;
- unsigned int bytes_compl = 0;
- unsigned int pkts_compl = 0;
-
- /* Work backwards until we hit the original insert pointer value */
- while (tx_queue->insert_count != insert_count) {
- --tx_queue->insert_count;
- buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
- efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
- }
-}
-
/*
* Fallback to software TSO.
*
@@ -473,12 +289,9 @@ static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
dev_consume_skb_any(skb);
skb = segments;
- while (skb) {
- next = skb->next;
- skb->next = NULL;
-
+ skb_list_walk_safe(skb, skb, next) {
+ skb_mark_not_on_list(skb);
efx_enqueue_skb(tx_queue, skb);
- skb = next;
}
return 0;
@@ -687,41 +500,6 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
return i;
}
-/* Remove packets from the TX queue
- *
- * This removes packets from the TX queue, up to and including the
- * specified index.
- */
-static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
- unsigned int index,
- unsigned int *pkts_compl,
- unsigned int *bytes_compl)
-{
- struct efx_nic *efx = tx_queue->efx;
- unsigned int stop_index, read_ptr;
-
- stop_index = (index + 1) & tx_queue->ptr_mask;
- read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
-
- while (read_ptr != stop_index) {
- struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
-
- if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
- unlikely(buffer->len == 0)) {
- netif_err(efx, tx_err, efx->net_dev,
- "TX queue %d spurious TX completion id %x\n",
- tx_queue->queue, read_ptr);
- efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
- return;
- }
-
- efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
-
- ++tx_queue->read_count;
- read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
- }
-}
-
/* Initiate a packet transmission. We use one channel per CPU
* (sharing when we have more CPUs than channels). On Falcon, the TX
* completion events will be directed back to the CPU that transmitted
@@ -834,173 +612,3 @@ int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
net_dev->num_tc = num_tc;
return 0;
}
-
-void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
-{
- unsigned fill_level;
- struct efx_nic *efx = tx_queue->efx;
- struct efx_tx_queue *txq2;
- unsigned int pkts_compl = 0, bytes_compl = 0;
-
- EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
-
- efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
- tx_queue->pkts_compl += pkts_compl;
- tx_queue->bytes_compl += bytes_compl;
-
- if (pkts_compl > 1)
- ++tx_queue->merge_events;
-
- /* See if we need to restart the netif queue. This memory
- * barrier ensures that we write read_count (inside
- * efx_dequeue_buffers()) before reading the queue status.
- */
- smp_mb();
- if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
- likely(efx->port_enabled) &&
- likely(netif_device_present(efx->net_dev))) {
- txq2 = efx_tx_queue_partner(tx_queue);
- fill_level = max(tx_queue->insert_count - tx_queue->read_count,
- txq2->insert_count - txq2->read_count);
- if (fill_level <= efx->txq_wake_thresh)
- netif_tx_wake_queue(tx_queue->core_txq);
- }
-
- /* Check whether the hardware queue is now empty */
- if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
- tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
- if (tx_queue->read_count == tx_queue->old_write_count) {
- smp_mb();
- tx_queue->empty_read_count =
- tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
- }
- }
-}
-
-static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
-{
- return DIV_ROUND_UP(tx_queue->ptr_mask + 1, PAGE_SIZE >> EFX_TX_CB_ORDER);
-}
-
-int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
-{
- struct efx_nic *efx = tx_queue->efx;
- unsigned int entries;
- int rc;
-
- /* Create the smallest power-of-two aligned ring */
- entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
- EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
- tx_queue->ptr_mask = entries - 1;
-
- netif_dbg(efx, probe, efx->net_dev,
- "creating TX queue %d size %#x mask %#x\n",
- tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
-
- /* Allocate software ring */
- tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
- GFP_KERNEL);
- if (!tx_queue->buffer)
- return -ENOMEM;
-
- tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue),
- sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
- if (!tx_queue->cb_page) {
- rc = -ENOMEM;
- goto fail1;
- }
-
- /* Allocate hardware ring */
- rc = efx_nic_probe_tx(tx_queue);
- if (rc)
- goto fail2;
-
- return 0;
-
-fail2:
- kfree(tx_queue->cb_page);
- tx_queue->cb_page = NULL;
-fail1:
- kfree(tx_queue->buffer);
- tx_queue->buffer = NULL;
- return rc;
-}
-
-void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
-{
- struct efx_nic *efx = tx_queue->efx;
-
- netif_dbg(efx, drv, efx->net_dev,
- "initialising TX queue %d\n", tx_queue->queue);
-
- tx_queue->insert_count = 0;
- tx_queue->write_count = 0;
- tx_queue->packet_write_count = 0;
- tx_queue->old_write_count = 0;
- tx_queue->read_count = 0;
- tx_queue->old_read_count = 0;
- tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
- tx_queue->xmit_more_available = false;
- tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
- tx_queue->channel == efx_ptp_channel(efx));
- tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
- tx_queue->completed_timestamp_major = 0;
- tx_queue->completed_timestamp_minor = 0;
-
- tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel);
-
- /* Set up default function pointers. These may get replaced by
- * efx_nic_init_tx() based off NIC/queue capabilities.
- */
- tx_queue->handle_tso = efx_enqueue_skb_tso;
-
- /* Set up TX descriptor ring */
- efx_nic_init_tx(tx_queue);
-
- tx_queue->initialised = true;
-}
-
-void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
-{
- struct efx_tx_buffer *buffer;
-
- netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
- "shutting down TX queue %d\n", tx_queue->queue);
-
- if (!tx_queue->buffer)
- return;
-
- /* Free any buffers left in the ring */
- while (tx_queue->read_count != tx_queue->write_count) {
- unsigned int pkts_compl = 0, bytes_compl = 0;
- buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
- efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
-
- ++tx_queue->read_count;
- }
- tx_queue->xmit_more_available = false;
- netdev_tx_reset_queue(tx_queue->core_txq);
-}
-
-void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
-{
- int i;
-
- if (!tx_queue->buffer)
- return;
-
- netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
- "destroying TX queue %d\n", tx_queue->queue);
- efx_nic_remove_tx(tx_queue);
-
- if (tx_queue->cb_page) {
- for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++)
- efx_nic_free_buffer(tx_queue->efx,
- &tx_queue->cb_page[i]);
- kfree(tx_queue->cb_page);
- tx_queue->cb_page = NULL;
- }
-
- kfree(tx_queue->buffer);
- tx_queue->buffer = NULL;
-}
diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c
new file mode 100644
index 000000000000..b1571e9789d0
--- /dev/null
+++ b/drivers/net/ethernet/sfc/tx_common.c
@@ -0,0 +1,404 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include "efx.h"
+#include "nic.h"
+#include "tx_common.h"
+
+static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
+{
+ return DIV_ROUND_UP(tx_queue->ptr_mask + 1,
+ PAGE_SIZE >> EFX_TX_CB_ORDER);
+}
+
+int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ unsigned int entries;
+ int rc;
+
+ /* Create the smallest power-of-two aligned ring */
+ entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
+ EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
+ tx_queue->ptr_mask = entries - 1;
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "creating TX queue %d size %#x mask %#x\n",
+ tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
+
+ /* Allocate software ring */
+ tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
+ GFP_KERNEL);
+ if (!tx_queue->buffer)
+ return -ENOMEM;
+
+ tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue),
+ sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
+ if (!tx_queue->cb_page) {
+ rc = -ENOMEM;
+ goto fail1;
+ }
+
+ /* Allocate hardware ring */
+ rc = efx_nic_probe_tx(tx_queue);
+ if (rc)
+ goto fail2;
+
+ return 0;
+
+fail2:
+ kfree(tx_queue->cb_page);
+ tx_queue->cb_page = NULL;
+fail1:
+ kfree(tx_queue->buffer);
+ tx_queue->buffer = NULL;
+ return rc;
+}
+
+void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "initialising TX queue %d\n", tx_queue->queue);
+
+ tx_queue->insert_count = 0;
+ tx_queue->write_count = 0;
+ tx_queue->packet_write_count = 0;
+ tx_queue->old_write_count = 0;
+ tx_queue->read_count = 0;
+ tx_queue->old_read_count = 0;
+ tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
+ tx_queue->xmit_more_available = false;
+ tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
+ tx_queue->channel == efx_ptp_channel(efx));
+ tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
+ tx_queue->completed_timestamp_major = 0;
+ tx_queue->completed_timestamp_minor = 0;
+
+ tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel);
+
+ /* Set up default function pointers. These may get replaced by
+ * efx_nic_init_tx() based off NIC/queue capabilities.
+ */
+ tx_queue->handle_tso = efx_enqueue_skb_tso;
+
+ /* Set up TX descriptor ring */
+ efx_nic_init_tx(tx_queue);
+
+ tx_queue->initialised = true;
+}
+
+void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ struct efx_tx_buffer *buffer;
+
+ netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+ "shutting down TX queue %d\n", tx_queue->queue);
+
+ if (!tx_queue->buffer)
+ return;
+
+ /* Free any buffers left in the ring */
+ while (tx_queue->read_count != tx_queue->write_count) {
+ unsigned int pkts_compl = 0, bytes_compl = 0;
+
+ buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
+ efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
+
+ ++tx_queue->read_count;
+ }
+ tx_queue->xmit_more_available = false;
+ netdev_tx_reset_queue(tx_queue->core_txq);
+}
+
+void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ int i;
+
+ if (!tx_queue->buffer)
+ return;
+
+ netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+ "destroying TX queue %d\n", tx_queue->queue);
+ efx_nic_remove_tx(tx_queue);
+
+ if (tx_queue->cb_page) {
+ for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++)
+ efx_nic_free_buffer(tx_queue->efx,
+ &tx_queue->cb_page[i]);
+ kfree(tx_queue->cb_page);
+ tx_queue->cb_page = NULL;
+ }
+
+ kfree(tx_queue->buffer);
+ tx_queue->buffer = NULL;
+}
+
+void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
+ struct efx_tx_buffer *buffer,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
+{
+ if (buffer->unmap_len) {
+ struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
+ dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
+
+ if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
+ dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
+ DMA_TO_DEVICE);
+ buffer->unmap_len = 0;
+ }
+
+ if (buffer->flags & EFX_TX_BUF_SKB) {
+ struct sk_buff *skb = (struct sk_buff *)buffer->skb;
+
+ EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
+ (*pkts_compl)++;
+ (*bytes_compl) += skb->len;
+ if (tx_queue->timestamping &&
+ (tx_queue->completed_timestamp_major ||
+ tx_queue->completed_timestamp_minor)) {
+ struct skb_shared_hwtstamps hwtstamp;
+
+ hwtstamp.hwtstamp =
+ efx_ptp_nic_to_kernel_time(tx_queue);
+ skb_tstamp_tx(skb, &hwtstamp);
+
+ tx_queue->completed_timestamp_major = 0;
+ tx_queue->completed_timestamp_minor = 0;
+ }
+ dev_consume_skb_any((struct sk_buff *)buffer->skb);
+ netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
+ "TX queue %d transmission id %x complete\n",
+ tx_queue->queue, tx_queue->read_count);
+ } else if (buffer->flags & EFX_TX_BUF_XDP) {
+ xdp_return_frame_rx_napi(buffer->xdpf);
+ }
+
+ buffer->len = 0;
+ buffer->flags = 0;
+}
+
+/* Remove packets from the TX queue
+ *
+ * This removes packets from the TX queue, up to and including the
+ * specified index.
+ */
+static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
+ unsigned int index,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ unsigned int stop_index, read_ptr;
+
+ stop_index = (index + 1) & tx_queue->ptr_mask;
+ read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
+
+ while (read_ptr != stop_index) {
+ struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
+
+ if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
+ unlikely(buffer->len == 0)) {
+ netif_err(efx, tx_err, efx->net_dev,
+ "TX queue %d spurious TX completion id %x\n",
+ tx_queue->queue, read_ptr);
+ efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
+ return;
+ }
+
+ efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
+
+ ++tx_queue->read_count;
+ read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
+ }
+}
+
+void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
+{
+ unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
+ struct efx_nic *efx = tx_queue->efx;
+ struct efx_tx_queue *txq2;
+
+ EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
+
+ efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
+ tx_queue->pkts_compl += pkts_compl;
+ tx_queue->bytes_compl += bytes_compl;
+
+ if (pkts_compl > 1)
+ ++tx_queue->merge_events;
+
+ /* See if we need to restart the netif queue. This memory
+ * barrier ensures that we write read_count (inside
+ * efx_dequeue_buffers()) before reading the queue status.
+ */
+ smp_mb();
+ if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
+ likely(efx->port_enabled) &&
+ likely(netif_device_present(efx->net_dev))) {
+ txq2 = efx_tx_queue_partner(tx_queue);
+ fill_level = max(tx_queue->insert_count - tx_queue->read_count,
+ txq2->insert_count - txq2->read_count);
+ if (fill_level <= efx->txq_wake_thresh)
+ netif_tx_wake_queue(tx_queue->core_txq);
+ }
+
+ /* Check whether the hardware queue is now empty */
+ if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
+ tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
+ if (tx_queue->read_count == tx_queue->old_write_count) {
+ smp_mb();
+ tx_queue->empty_read_count =
+ tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
+ }
+ }
+}
+
+/* Remove buffers put into a tx_queue for the current packet.
+ * None of the buffers must have an skb attached.
+ */
+void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
+ unsigned int insert_count)
+{
+ struct efx_tx_buffer *buffer;
+ unsigned int bytes_compl = 0;
+ unsigned int pkts_compl = 0;
+
+ /* Work backwards until we hit the original insert pointer value */
+ while (tx_queue->insert_count != insert_count) {
+ --tx_queue->insert_count;
+ buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
+ efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
+ }
+}
+
+struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
+ dma_addr_t dma_addr, size_t len)
+{
+ const struct efx_nic_type *nic_type = tx_queue->efx->type;
+ struct efx_tx_buffer *buffer;
+ unsigned int dma_len;
+
+ /* Map the fragment taking account of NIC-dependent DMA limits. */
+ do {
+ buffer = efx_tx_queue_get_insert_buffer(tx_queue);
+ dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
+
+ buffer->len = dma_len;
+ buffer->dma_addr = dma_addr;
+ buffer->flags = EFX_TX_BUF_CONT;
+ len -= dma_len;
+ dma_addr += dma_len;
+ ++tx_queue->insert_count;
+ } while (len);
+
+ return buffer;
+}
+
+/* Map all data from an SKB for DMA and create descriptors on the queue. */
+int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+ unsigned int segment_count)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ struct device *dma_dev = &efx->pci_dev->dev;
+ unsigned int frag_index, nr_frags;
+ dma_addr_t dma_addr, unmap_addr;
+ unsigned short dma_flags;
+ size_t len, unmap_len;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ frag_index = 0;
+
+ /* Map header data. */
+ len = skb_headlen(skb);
+ dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
+ dma_flags = EFX_TX_BUF_MAP_SINGLE;
+ unmap_len = len;
+ unmap_addr = dma_addr;
+
+ if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
+ return -EIO;
+
+ if (segment_count) {
+ /* For TSO we need to put the header in to a separate
+ * descriptor. Map this separately if necessary.
+ */
+ size_t header_len = skb_transport_header(skb) - skb->data +
+ (tcp_hdr(skb)->doff << 2u);
+
+ if (header_len != len) {
+ tx_queue->tso_long_headers++;
+ efx_tx_map_chunk(tx_queue, dma_addr, header_len);
+ len -= header_len;
+ dma_addr += header_len;
+ }
+ }
+
+ /* Add descriptors for each fragment. */
+ do {
+ struct efx_tx_buffer *buffer;
+ skb_frag_t *fragment;
+
+ buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
+
+ /* The final descriptor for a fragment is responsible for
+ * unmapping the whole fragment.
+ */
+ buffer->flags = EFX_TX_BUF_CONT | dma_flags;
+ buffer->unmap_len = unmap_len;
+ buffer->dma_offset = buffer->dma_addr - unmap_addr;
+
+ if (frag_index >= nr_frags) {
+ /* Store SKB details with the final buffer for
+ * the completion.
+ */
+ buffer->skb = skb;
+ buffer->flags = EFX_TX_BUF_SKB | dma_flags;
+ return 0;
+ }
+
+ /* Move on to the next fragment. */
+ fragment = &skb_shinfo(skb)->frags[frag_index++];
+ len = skb_frag_size(fragment);
+ dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
+ DMA_TO_DEVICE);
+ dma_flags = 0;
+ unmap_len = len;
+ unmap_addr = dma_addr;
+
+ if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
+ return -EIO;
+ } while (1);
+}
+
+unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
+{
+ /* Header and payload descriptor for each output segment, plus
+ * one for every input fragment boundary within a segment
+ */
+ unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
+
+ /* Possibly one more per segment for option descriptors */
+ if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
+ max_descs += EFX_TSO_MAX_SEGS;
+
+ /* Possibly more for PCIe page boundaries within input fragments */
+ if (PAGE_SIZE > EFX_PAGE_SIZE)
+ max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
+ DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
+
+ return max_descs;
+}
diff --git a/drivers/net/ethernet/sfc/tx_common.h b/drivers/net/ethernet/sfc/tx_common.h
new file mode 100644
index 000000000000..f92f1fe3a87f
--- /dev/null
+++ b/drivers/net/ethernet/sfc/tx_common.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_TX_COMMON_H
+#define EFX_TX_COMMON_H
+
+int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
+
+void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
+ struct efx_tx_buffer *buffer,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl);
+
+void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
+
+void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
+ unsigned int insert_count);
+
+struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
+ dma_addr_t dma_addr, size_t len);
+int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+ unsigned int segment_count);
+
+unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
+
+#endif
diff --git a/drivers/net/ethernet/sgi/Kconfig b/drivers/net/ethernet/sgi/Kconfig
index 37f048e1230c..bc26fa0d196f 100644
--- a/drivers/net/ethernet/sgi/Kconfig
+++ b/drivers/net/ethernet/sgi/Kconfig
@@ -6,7 +6,7 @@
config NET_VENDOR_SGI
bool "SGI devices"
default y
- depends on (PCI && SGI_IP27) || SGI_IP32
+ depends on (PCI && SGI_MFD_IOC3) || SGI_IP32
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -19,7 +19,8 @@ if NET_VENDOR_SGI
config SGI_IOC3_ETH
bool "SGI IOC3 Ethernet"
- depends on PCI && SGI_IP27
+ depends on PCI && SGI_MFD_IOC3
+ select CRC16
select CRC32
select MII
---help---
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index d242906ae233..db6b2988e632 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -14,7 +14,6 @@
* o Use prefetching for large packets. What is a good lower limit for
* prefetching?
* o Use hardware checksums.
- * o Convert to using a IOC3 meta driver.
* o Which PHYs might possibly be attached to the IOC3 in real live,
* which workarounds are required for them? Do we ever have Lucent's?
* o For the 2.5 branch kill the mii-tool ioctls.
@@ -28,7 +27,8 @@
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/module.h>
-#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/crc16.h>
#include <linux/crc32.h>
#include <linux/mii.h>
#include <linux/in.h>
@@ -37,28 +37,22 @@
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/gfp.h>
-
-#ifdef CONFIG_SERIAL_8250
-#include <linux/serial_core.h>
-#include <linux/serial_8250.h>
-#include <linux/serial_reg.h>
-#endif
-
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/skbuff.h>
#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/nvmem-consumer.h>
#include <net/ip.h>
-#include <asm/byteorder.h>
-#include <asm/pgtable.h>
-#include <linux/uaccess.h>
-#include <asm/sn/types.h>
#include <asm/sn/ioc3.h>
#include <asm/pci/bridge.h>
+#define CRC16_INIT 0
+#define CRC16_VALID 0xb001
+
/* Number of RX buffers. This is tunable in the range of 16 <= x < 512.
* The value must be a power of two.
*/
@@ -85,7 +79,6 @@
/* Private per NIC data of the driver. */
struct ioc3_private {
struct ioc3_ethregs *regs;
- struct ioc3 *all_regs;
struct device *dma_dev;
u32 *ssram;
unsigned long *rxr; /* pointer to receiver ring */
@@ -104,9 +97,6 @@ struct ioc3_private {
spinlock_t ioc3_lock;
struct mii_if_info mii;
- struct net_device *dev;
- struct pci_dev *pdev;
-
/* Members used by autonegotiation */
struct timer_list ioc3_timer;
};
@@ -114,7 +104,7 @@ struct ioc3_private {
static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static void ioc3_set_multicast_list(struct net_device *dev);
static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
-static void ioc3_timeout(struct net_device *dev);
+static void ioc3_timeout(struct net_device *dev, unsigned int txqueue);
static inline unsigned int ioc3_hash(const unsigned char *addr);
static void ioc3_start(struct ioc3_private *ip);
static inline void ioc3_stop(struct ioc3_private *ip);
@@ -123,10 +113,8 @@ static int ioc3_alloc_rx_bufs(struct net_device *dev);
static void ioc3_free_rx_bufs(struct ioc3_private *ip);
static inline void ioc3_clean_tx_ring(struct ioc3_private *ip);
-static const char ioc3_str[] = "IOC3 Ethernet";
static const struct ethtool_ops ioc3_ethtool_ops;
-
static inline unsigned long aligned_rx_skb_addr(unsigned long addr)
{
return (~addr + 1) & (IOC3_DMA_XFER_LEN - 1UL);
@@ -179,225 +167,61 @@ static inline unsigned long ioc3_map(dma_addr_t addr, unsigned long attr)
#define ERBAR_VAL 0
#endif
-#define IOC3_SIZE 0x100000
-
-static inline u32 mcr_pack(u32 pulse, u32 sample)
-{
- return (pulse << 10) | (sample << 2);
-}
-
-static int nic_wait(u32 __iomem *mcr)
-{
- u32 m;
-
- do {
- m = readl(mcr);
- } while (!(m & 2));
-
- return m & 1;
-}
-
-static int nic_reset(u32 __iomem *mcr)
-{
- int presence;
-
- writel(mcr_pack(500, 65), mcr);
- presence = nic_wait(mcr);
-
- writel(mcr_pack(0, 500), mcr);
- nic_wait(mcr);
-
- return presence;
-}
-
-static inline int nic_read_bit(u32 __iomem *mcr)
-{
- int result;
-
- writel(mcr_pack(6, 13), mcr);
- result = nic_wait(mcr);
- writel(mcr_pack(0, 100), mcr);
- nic_wait(mcr);
-
- return result;
-}
-
-static inline void nic_write_bit(u32 __iomem *mcr, int bit)
+static int ioc3eth_nvmem_match(struct device *dev, const void *data)
{
- if (bit)
- writel(mcr_pack(6, 110), mcr);
- else
- writel(mcr_pack(80, 30), mcr);
+ const char *name = dev_name(dev);
+ const char *prefix = data;
+ int prefix_len;
- nic_wait(mcr);
-}
-
-/* Read a byte from an iButton device
- */
-static u32 nic_read_byte(u32 __iomem *mcr)
-{
- u32 result = 0;
- int i;
+ prefix_len = strlen(prefix);
+ if (strlen(name) < (prefix_len + 3))
+ return 0;
- for (i = 0; i < 8; i++)
- result = (result >> 1) | (nic_read_bit(mcr) << 7);
+ if (memcmp(prefix, name, prefix_len) != 0)
+ return 0;
- return result;
-}
-
-/* Write a byte to an iButton device
- */
-static void nic_write_byte(u32 __iomem *mcr, int byte)
-{
- int i, bit;
-
- for (i = 8; i; i--) {
- bit = byte & 1;
- byte >>= 1;
-
- nic_write_bit(mcr, bit);
- }
-}
-
-static u64 nic_find(u32 __iomem *mcr, int *last)
-{
- int a, b, index, disc;
- u64 address = 0;
-
- nic_reset(mcr);
- /* Search ROM. */
- nic_write_byte(mcr, 0xf0);
-
- /* Algorithm from ``Book of iButton Standards''. */
- for (index = 0, disc = 0; index < 64; index++) {
- a = nic_read_bit(mcr);
- b = nic_read_bit(mcr);
-
- if (a && b) {
- pr_warn("NIC search failed (not fatal).\n");
- *last = 0;
- return 0;
- }
-
- if (!a && !b) {
- if (index == *last) {
- address |= 1UL << index;
- } else if (index > *last) {
- address &= ~(1UL << index);
- disc = index;
- } else if ((address & (1UL << index)) == 0) {
- disc = index;
- }
- nic_write_bit(mcr, address & (1UL << index));
- continue;
- } else {
- if (a)
- address |= 1UL << index;
- else
- address &= ~(1UL << index);
- nic_write_bit(mcr, a);
- continue;
- }
- }
-
- *last = disc;
-
- return address;
-}
-
-static int nic_init(u32 __iomem *mcr)
-{
- const char *unknown = "unknown";
- const char *type = unknown;
- u8 crc;
- u8 serial[6];
- int save = 0, i;
-
- while (1) {
- u64 reg;
-
- reg = nic_find(mcr, &save);
-
- switch (reg & 0xff) {
- case 0x91:
- type = "DS1981U";
- break;
- default:
- if (save == 0) {
- /* Let the caller try again. */
- return -1;
- }
- continue;
- }
-
- nic_reset(mcr);
-
- /* Match ROM. */
- nic_write_byte(mcr, 0x55);
- for (i = 0; i < 8; i++)
- nic_write_byte(mcr, (reg >> (i << 3)) & 0xff);
-
- reg >>= 8; /* Shift out type. */
- for (i = 0; i < 6; i++) {
- serial[i] = reg & 0xff;
- reg >>= 8;
- }
- crc = reg & 0xff;
- break;
- }
-
- pr_info("Found %s NIC", type);
- if (type != unknown)
- pr_cont(" registration number %pM, CRC %02x", serial, crc);
- pr_cont(".\n");
+ /* found nvmem device which is attached to our ioc3
+ * now check for one wire family code 09, 89 and 91
+ */
+ if (memcmp(name + prefix_len, "09-", 3) == 0)
+ return 1;
+ if (memcmp(name + prefix_len, "89-", 3) == 0)
+ return 1;
+ if (memcmp(name + prefix_len, "91-", 3) == 0)
+ return 1;
return 0;
}
-/* Read the NIC (Number-In-a-Can) device used to store the MAC address on
- * SN0 / SN00 nodeboards and PCI cards.
- */
-static void ioc3_get_eaddr_nic(struct ioc3_private *ip)
+static int ioc3eth_get_mac_addr(struct resource *res, u8 mac_addr[6])
{
- u32 __iomem *mcr = &ip->all_regs->mcr;
- int tries = 2; /* There may be some problem with the battery? */
- u8 nic[14];
+ struct nvmem_device *nvmem;
+ char prefix[24];
+ u8 prom[16];
+ int ret;
int i;
- writel(1 << 21, &ip->all_regs->gpcr_s);
+ snprintf(prefix, sizeof(prefix), "ioc3-%012llx-",
+ res->start & ~0xffff);
- while (tries--) {
- if (!nic_init(mcr))
- break;
- udelay(500);
- }
-
- if (tries < 0) {
- pr_err("Failed to read MAC address\n");
- return;
- }
+ nvmem = nvmem_device_find(prefix, ioc3eth_nvmem_match);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
- /* Read Memory. */
- nic_write_byte(mcr, 0xf0);
- nic_write_byte(mcr, 0x00);
- nic_write_byte(mcr, 0x00);
+ ret = nvmem_device_read(nvmem, 0, 16, prom);
+ nvmem_device_put(nvmem);
+ if (ret < 0)
+ return ret;
- for (i = 13; i >= 0; i--)
- nic[i] = nic_read_byte(mcr);
+ /* check, if content is valid */
+ if (prom[0] != 0x0a ||
+ crc16(CRC16_INIT, prom, 13) != CRC16_VALID)
+ return -EINVAL;
- for (i = 2; i < 8; i++)
- ip->dev->dev_addr[i - 2] = nic[i];
-}
-
-/* Ok, this is hosed by design. It's necessary to know what machine the
- * NIC is in in order to know how to read the NIC address. We also have
- * to know if it's a PCI card or a NIC in on the node board ...
- */
-static void ioc3_get_eaddr(struct ioc3_private *ip)
-{
- ioc3_get_eaddr_nic(ip);
+ for (i = 0; i < 6; i++)
+ mac_addr[i] = prom[10 - i];
- pr_info("Ethernet address is %pM.\n", ip->dev->dev_addr);
+ return 0;
}
static void __ioc3_set_mac_address(struct net_device *dev)
@@ -770,7 +594,7 @@ static int ioc3_mii_init(struct ioc3_private *ip)
u16 word;
for (i = 0; i < 32; i++) {
- word = ioc3_mdio_read(ip->dev, i, MII_PHYSID1);
+ word = ioc3_mdio_read(ip->mii.dev, i, MII_PHYSID1);
if (word != 0xffff && word != 0x0000) {
found = 1;
@@ -975,12 +799,6 @@ static int ioc3_open(struct net_device *dev)
{
struct ioc3_private *ip = netdev_priv(dev);
- if (request_irq(dev->irq, ioc3_interrupt, IRQF_SHARED, ioc3_str, dev)) {
- netdev_err(dev, "Can't get irq %d\n", dev->irq);
-
- return -EAGAIN;
- }
-
ip->ehar_h = 0;
ip->ehar_l = 0;
@@ -1005,7 +823,6 @@ static int ioc3_close(struct net_device *dev)
netif_stop_queue(dev);
ioc3_stop(ip);
- free_irq(dev->irq, dev);
ioc3_free_rx_bufs(ip);
ioc3_clean_tx_ring(ip);
@@ -1013,147 +830,6 @@ static int ioc3_close(struct net_device *dev)
return 0;
}
-/* MENET cards have four IOC3 chips, which are attached to two sets of
- * PCI slot resources each: the primary connections are on slots
- * 0..3 and the secondaries are on 4..7
- *
- * All four ethernets are brought out to connectors; six serial ports
- * (a pair from each of the first three IOC3s) are brought out to
- * MiniDINs; all other subdevices are left swinging in the wind, leave
- * them disabled.
- */
-
-static int ioc3_adjacent_is_ioc3(struct pci_dev *pdev, int slot)
-{
- struct pci_dev *dev = pci_get_slot(pdev->bus, PCI_DEVFN(slot, 0));
- int ret = 0;
-
- if (dev) {
- if (dev->vendor == PCI_VENDOR_ID_SGI &&
- dev->device == PCI_DEVICE_ID_SGI_IOC3)
- ret = 1;
- pci_dev_put(dev);
- }
-
- return ret;
-}
-
-static int ioc3_is_menet(struct pci_dev *pdev)
-{
- return !pdev->bus->parent &&
- ioc3_adjacent_is_ioc3(pdev, 0) &&
- ioc3_adjacent_is_ioc3(pdev, 1) &&
- ioc3_adjacent_is_ioc3(pdev, 2);
-}
-
-#ifdef CONFIG_SERIAL_8250
-/* Note about serial ports and consoles:
- * For console output, everyone uses the IOC3 UARTA (offset 0x178)
- * connected to the master node (look in ip27_setup_console() and
- * ip27prom_console_write()).
- *
- * For serial (/dev/ttyS0 etc), we can not have hardcoded serial port
- * addresses on a partitioned machine. Since we currently use the ioc3
- * serial ports, we use dynamic serial port discovery that the serial.c
- * driver uses for pci/pnp ports (there is an entry for the SGI ioc3
- * boards in pci_boards[]). Unfortunately, UARTA's pio address is greater
- * than UARTB's, although UARTA on o200s has traditionally been known as
- * port 0. So, we just use one serial port from each ioc3 (since the
- * serial driver adds addresses to get to higher ports).
- *
- * The first one to do a register_console becomes the preferred console
- * (if there is no kernel command line console= directive). /dev/console
- * (ie 5, 1) is then "aliased" into the device number returned by the
- * "device" routine referred to in this console structure
- * (ip27prom_console_dev).
- *
- * Also look in ip27-pci.c:pci_fixup_ioc3() for some comments on working
- * around ioc3 oddities in this respect.
- *
- * The IOC3 serials use a 22MHz clock rate with an additional divider which
- * can be programmed in the SCR register if the DLAB bit is set.
- *
- * Register to interrupt zero because we share the interrupt with
- * the serial driver which we don't properly support yet.
- *
- * Can't use UPF_IOREMAP as the whole of IOC3 resources have already been
- * registered.
- */
-static void ioc3_8250_register(struct ioc3_uartregs __iomem *uart)
-{
-#define COSMISC_CONSTANT 6
-
- struct uart_8250_port port = {
- .port = {
- .irq = 0,
- .flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF,
- .iotype = UPIO_MEM,
- .regshift = 0,
- .uartclk = (22000000 << 1) / COSMISC_CONSTANT,
-
- .membase = (unsigned char __iomem *)uart,
- .mapbase = (unsigned long)uart,
- }
- };
- unsigned char lcr;
-
- lcr = readb(&uart->iu_lcr);
- writeb(lcr | UART_LCR_DLAB, &uart->iu_lcr);
- writeb(COSMISC_CONSTANT, &uart->iu_scr);
- writeb(lcr, &uart->iu_lcr);
- readb(&uart->iu_lcr);
- serial8250_register_8250_port(&port);
-}
-
-static void ioc3_serial_probe(struct pci_dev *pdev, struct ioc3 *ioc3)
-{
- u32 sio_iec;
-
- /* We need to recognice and treat the fourth MENET serial as it
- * does not have an SuperIO chip attached to it, therefore attempting
- * to access it will result in bus errors. We call something an
- * MENET if PCI slot 0, 1, 2 and 3 of a master PCI bus all have an IOC3
- * in it. This is paranoid but we want to avoid blowing up on a
- * showhorn PCI box that happens to have 4 IOC3 cards in it so it's
- * not paranoid enough ...
- */
- if (ioc3_is_menet(pdev) && PCI_SLOT(pdev->devfn) == 3)
- return;
-
- /* Switch IOC3 to PIO mode. It probably already was but let's be
- * paranoid
- */
- writel(GPCR_UARTA_MODESEL | GPCR_UARTB_MODESEL, &ioc3->gpcr_s);
- readl(&ioc3->gpcr_s);
- writel(0, &ioc3->gppr[6]);
- readl(&ioc3->gppr[6]);
- writel(0, &ioc3->gppr[7]);
- readl(&ioc3->gppr[7]);
- writel(readl(&ioc3->port_a.sscr) & ~SSCR_DMA_EN, &ioc3->port_a.sscr);
- readl(&ioc3->port_a.sscr);
- writel(readl(&ioc3->port_b.sscr) & ~SSCR_DMA_EN, &ioc3->port_b.sscr);
- readl(&ioc3->port_b.sscr);
- /* Disable all SA/B interrupts except for SA/B_INT in SIO_IEC. */
- sio_iec = readl(&ioc3->sio_iec);
- sio_iec &= ~(SIO_IR_SA_TX_MT | SIO_IR_SA_RX_FULL |
- SIO_IR_SA_RX_HIGH | SIO_IR_SA_RX_TIMER |
- SIO_IR_SA_DELTA_DCD | SIO_IR_SA_DELTA_CTS |
- SIO_IR_SA_TX_EXPLICIT | SIO_IR_SA_MEMERR);
- sio_iec |= SIO_IR_SA_INT;
- sio_iec &= ~(SIO_IR_SB_TX_MT | SIO_IR_SB_RX_FULL |
- SIO_IR_SB_RX_HIGH | SIO_IR_SB_RX_TIMER |
- SIO_IR_SB_DELTA_DCD | SIO_IR_SB_DELTA_CTS |
- SIO_IR_SB_TX_EXPLICIT | SIO_IR_SB_MEMERR);
- sio_iec |= SIO_IR_SB_INT;
- writel(sio_iec, &ioc3->sio_iec);
- writel(0, &ioc3->port_a.sscr);
- writel(0, &ioc3->port_b.sscr);
-
- ioc3_8250_register(&ioc3->sregs.uarta);
- ioc3_8250_register(&ioc3->sregs.uartb);
-}
-#endif
-
static const struct net_device_ops ioc3_netdev_ops = {
.ndo_open = ioc3_open,
.ndo_stop = ioc3_close,
@@ -1166,61 +842,52 @@ static const struct net_device_ops ioc3_netdev_ops = {
.ndo_set_mac_address = ioc3_set_mac_address,
};
-static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int ioc3eth_probe(struct platform_device *pdev)
{
- unsigned int sw_physid1, sw_physid2;
- struct net_device *dev = NULL;
+ u32 sw_physid1, sw_physid2, vendor, model, rev;
struct ioc3_private *ip;
- struct ioc3 *ioc3;
- unsigned long ioc3_base, ioc3_size;
- u32 vendor, model, rev;
+ struct net_device *dev;
+ struct resource *regs;
+ u8 mac_addr[6];
int err;
- /* Configure DMA attributes. */
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (err) {
- pr_err("%s: No usable DMA configuration, aborting.\n",
- pci_name(pdev));
- goto out;
- }
-
- if (pci_enable_device(pdev))
- return -ENODEV;
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ /* get mac addr from one wire prom */
+ if (ioc3eth_get_mac_addr(regs, mac_addr))
+ return -EPROBE_DEFER; /* not available yet */
dev = alloc_etherdev(sizeof(struct ioc3_private));
- if (!dev) {
- err = -ENOMEM;
- goto out_disable;
- }
-
- err = pci_request_regions(pdev, "ioc3");
- if (err)
- goto out_free;
+ if (!dev)
+ return -ENOMEM;
SET_NETDEV_DEV(dev, &pdev->dev);
ip = netdev_priv(dev);
- ip->dev = dev;
- ip->dma_dev = &pdev->dev;
-
- dev->irq = pdev->irq;
+ ip->dma_dev = pdev->dev.parent;
+ ip->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (!ip->regs) {
+ err = -ENOMEM;
+ goto out_free;
+ }
- ioc3_base = pci_resource_start(pdev, 0);
- ioc3_size = pci_resource_len(pdev, 0);
- ioc3 = (struct ioc3 *)ioremap(ioc3_base, ioc3_size);
- if (!ioc3) {
- pr_err("ioc3eth(%s): ioremap failed, goodbye.\n",
- pci_name(pdev));
+ ip->ssram = devm_platform_ioremap_resource(pdev, 1);
+ if (!ip->ssram) {
err = -ENOMEM;
- goto out_res;
+ goto out_free;
}
- ip->regs = &ioc3->eth;
- ip->ssram = ioc3->ssram;
- ip->all_regs = ioc3;
-#ifdef CONFIG_SERIAL_8250
- ioc3_serial_probe(pdev, ioc3);
-#endif
+ dev->irq = platform_get_irq(pdev, 0);
+ if (dev->irq < 0) {
+ err = dev->irq;
+ goto out_free;
+ }
+
+ if (devm_request_irq(&pdev->dev, dev->irq, ioc3_interrupt,
+ IRQF_SHARED, "ioc3-eth", dev)) {
+ dev_err(&pdev->dev, "Can't get irq %d\n", dev->irq);
+ err = -ENODEV;
+ goto out_free;
+ }
spin_lock_init(&ip->ioc3_lock);
timer_setup(&ip->ioc3_timer, ioc3_timer, 0);
@@ -1250,8 +917,6 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ioc3_init(dev);
- ip->pdev = pdev;
-
ip->mii.phy_id_mask = 0x1f;
ip->mii.reg_num_mask = 0x1f;
ip->mii.dev = dev;
@@ -1261,15 +926,14 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ioc3_mii_init(ip);
if (ip->mii.phy_id == -1) {
- pr_err("ioc3-eth(%s): Didn't find a PHY, goodbye.\n",
- pci_name(pdev));
+ netdev_err(dev, "Didn't find a PHY, goodbye.\n");
err = -ENODEV;
goto out_stop;
}
ioc3_mii_start(ip);
ioc3_ssram_disc(ip);
- ioc3_get_eaddr(ip);
+ memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
/* The IOC3-specific entries in the device structure. */
dev->watchdog_timeo = 5 * HZ;
@@ -1306,21 +970,14 @@ out_stop:
if (ip->tx_ring)
dma_free_coherent(ip->dma_dev, TX_RING_SIZE, ip->tx_ring,
ip->txr_dma);
-out_res:
- pci_release_regions(pdev);
out_free:
free_netdev(dev);
-out_disable:
- /* We should call pci_disable_device(pdev); here if the IOC3 wasn't
- * such a weird device ...
- */
-out:
return err;
}
-static void ioc3_remove_one(struct pci_dev *pdev)
+static int ioc3eth_remove(struct platform_device *pdev)
{
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = platform_get_drvdata(pdev);
struct ioc3_private *ip = netdev_priv(dev);
dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr, ip->rxr_dma);
@@ -1328,27 +985,11 @@ static void ioc3_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
del_timer_sync(&ip->ioc3_timer);
-
- iounmap(ip->all_regs);
- pci_release_regions(pdev);
free_netdev(dev);
- /* We should call pci_disable_device(pdev); here if the IOC3 wasn't
- * such a weird device ...
- */
-}
-static const struct pci_device_id ioc3_pci_tbl[] = {
- { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, ioc3_pci_tbl);
+ return 0;
+}
-static struct pci_driver ioc3_driver = {
- .name = "ioc3-eth",
- .id_table = ioc3_pci_tbl,
- .probe = ioc3_probe,
- .remove = ioc3_remove_one,
-};
static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
@@ -1479,7 +1120,7 @@ drop_packet:
return NETDEV_TX_OK;
}
-static void ioc3_timeout(struct net_device *dev)
+static void ioc3_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ioc3_private *ip = netdev_priv(dev);
@@ -1530,11 +1171,10 @@ static inline unsigned int ioc3_hash(const unsigned char *addr)
static void ioc3_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- struct ioc3_private *ip = netdev_priv(dev);
-
strlcpy(info->driver, IOC3_NAME, sizeof(info->driver));
strlcpy(info->version, IOC3_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(ip->pdev), sizeof(info->bus_info));
+ strlcpy(info->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
+ sizeof(info->bus_info));
}
static int ioc3_get_link_ksettings(struct net_device *dev,
@@ -1646,7 +1286,16 @@ static void ioc3_set_multicast_list(struct net_device *dev)
spin_unlock_irq(&ip->ioc3_lock);
}
-module_pci_driver(ioc3_driver);
+static struct platform_driver ioc3eth_driver = {
+ .probe = ioc3eth_probe,
+ .remove = ioc3eth_remove,
+ .driver = {
+ .name = "ioc3-eth",
+ }
+};
+
+module_platform_driver(ioc3eth_driver);
+
MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
MODULE_DESCRIPTION("SGI IOC3 Ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 539bc5db989c..0c396ecd3389 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -90,7 +90,7 @@ struct meth_private {
spinlock_t meth_lock;
};
-static void meth_tx_timeout(struct net_device *dev);
+static void meth_tx_timeout(struct net_device *dev, unsigned int txqueue);
static irqreturn_t meth_interrupt(int irq, void *dev_id);
/* global, initialized in ip32-setup.c */
@@ -727,7 +727,7 @@ static netdev_tx_t meth_tx(struct sk_buff *skb, struct net_device *dev)
/*
* Deal with a transmit timeout.
*/
-static void meth_tx_timeout(struct net_device *dev)
+static void meth_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct meth_private *priv = netdev_priv(dev);
unsigned long flags;
diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index c7641a236eb8..cb043eb1bdc1 100644
--- a/drivers/net/ethernet/silan/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
@@ -1078,7 +1078,7 @@ static void sc92031_set_multicast_list(struct net_device *dev)
spin_unlock_bh(&priv->lock);
}
-static void sc92031_tx_timeout(struct net_device *dev)
+static void sc92031_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct sc92031_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 5b351beb78cb..5a4b6e3ab38f 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1538,7 +1538,7 @@ err_out_0:
goto out;
}
-static void sis190_tx_timeout(struct net_device *dev)
+static void sis190_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct sis190_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 85eaccbbbac1..81ed7589e33c 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -222,7 +222,7 @@ static int mdio_read(struct net_device *net_dev, int phy_id, int location);
static void mdio_write(struct net_device *net_dev, int phy_id, int location, int val);
static void sis900_timer(struct timer_list *t);
static void sis900_check_mode (struct net_device *net_dev, struct mii_phy *mii_phy);
-static void sis900_tx_timeout(struct net_device *net_dev);
+static void sis900_tx_timeout(struct net_device *net_dev, unsigned int txqueue);
static void sis900_init_tx_ring(struct net_device *net_dev);
static void sis900_init_rx_ring(struct net_device *net_dev);
static netdev_tx_t sis900_start_xmit(struct sk_buff *skb,
@@ -1537,7 +1537,7 @@ static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex
* disable interrupts and do some tasks
*/
-static void sis900_tx_timeout(struct net_device *net_dev)
+static void sis900_tx_timeout(struct net_device *net_dev, unsigned int txqueue)
{
struct sis900_private *sis_priv = netdev_priv(net_dev);
void __iomem *ioaddr = sis_priv->ioaddr;
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index be47d864f8b9..61ddee0c2a2e 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -280,6 +280,7 @@ struct epic_private {
signed char phys[4]; /* MII device addresses. */
u16 advertising; /* NWay media advertisement */
int mii_phy_cnt;
+ u32 ethtool_ops_nesting;
struct mii_if_info mii;
unsigned int tx_full:1; /* The Tx queue is full. */
unsigned int default_port:4; /* Last dev->if_port value. */
@@ -291,7 +292,7 @@ static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
static void epic_restart(struct net_device *dev);
static void epic_timer(struct timer_list *t);
-static void epic_tx_timeout(struct net_device *dev);
+static void epic_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void epic_init_ring(struct net_device *dev);
static netdev_tx_t epic_start_xmit(struct sk_buff *skb,
struct net_device *dev);
@@ -861,7 +862,7 @@ static void epic_timer(struct timer_list *t)
add_timer(&ep->timer);
}
-static void epic_tx_timeout(struct net_device *dev)
+static void epic_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct epic_private *ep = netdev_priv(dev);
void __iomem *ioaddr = ep->ioaddr;
@@ -1435,8 +1436,10 @@ static int ethtool_begin(struct net_device *dev)
struct epic_private *ep = netdev_priv(dev);
void __iomem *ioaddr = ep->ioaddr;
+ if (ep->ethtool_ops_nesting == U32_MAX)
+ return -EBUSY;
/* power-up, if interface is down */
- if (!netif_running(dev)) {
+ if (!ep->ethtool_ops_nesting++ && !netif_running(dev)) {
ew32(GENCTL, 0x0200);
ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
}
@@ -1449,7 +1452,7 @@ static void ethtool_complete(struct net_device *dev)
void __iomem *ioaddr = ep->ioaddr;
/* power-down, if interface is down */
- if (!netif_running(dev)) {
+ if (!--ep->ethtool_ops_nesting && !netif_running(dev)) {
ew32(GENCTL, 0x0008);
ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
}
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 8d88e4083456..186c0bddbe5f 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -936,7 +936,7 @@ static void smc911x_phy_configure(struct work_struct *work)
if (lp->ctl_rspeed != 100)
my_ad_caps &= ~(ADVERTISE_100BASE4|ADVERTISE_100FULL|ADVERTISE_100HALF);
- if (!lp->ctl_rfduplx)
+ if (!lp->ctl_rfduplx)
my_ad_caps &= ~(ADVERTISE_100FULL|ADVERTISE_10FULL);
/* Update our Auto-Neg Advertisement Register */
@@ -1245,7 +1245,7 @@ static void smc911x_poll_controller(struct net_device *dev)
#endif
/* Our watchdog timed out. Called by the networking layer */
-static void smc911x_timeout(struct net_device *dev)
+static void smc911x_timeout(struct net_device *dev, unsigned int txqueue)
{
struct smc911x_local *lp = netdev_priv(dev);
int status, mask;
diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c
index d3bb2ba51f40..4b2330deed47 100644
--- a/drivers/net/ethernet/smsc/smc9194.c
+++ b/drivers/net/ethernet/smsc/smc9194.c
@@ -216,7 +216,7 @@ static int smc_open(struct net_device *dev);
/*
. Our watchdog timed out. Called by the networking layer
*/
-static void smc_timeout(struct net_device *dev);
+static void smc_timeout(struct net_device *dev, unsigned int txqueue);
/*
. This is called by the kernel in response to 'ifconfig ethX down'. It
@@ -1094,7 +1094,7 @@ static int smc_open(struct net_device *dev)
.--------------------------------------------------------
*/
-static void smc_timeout(struct net_device *dev)
+static void smc_timeout(struct net_device *dev, unsigned int txqueue)
{
/* If we get here, some higher level has decided we are broken.
There should really be a "kick me" function call instead. */
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index a55f430f6a7b..f2a50eb3c1e0 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -271,7 +271,7 @@ static void smc91c92_release(struct pcmcia_device *link);
static int smc_open(struct net_device *dev);
static int smc_close(struct net_device *dev);
static int smc_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-static void smc_tx_timeout(struct net_device *dev);
+static void smc_tx_timeout(struct net_device *dev, unsigned int txqueue);
static netdev_tx_t smc_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static irqreturn_t smc_interrupt(int irq, void *dev_id);
@@ -1178,7 +1178,7 @@ static void smc_hardware_send_packet(struct net_device * dev)
/*====================================================================*/
-static void smc_tx_timeout(struct net_device *dev)
+static void smc_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct smc_private *smc = netdev_priv(dev);
unsigned int ioaddr = dev->base_addr;
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 3a6761131f4c..90410f9d3b1a 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -1321,7 +1321,7 @@ static void smc_poll_controller(struct net_device *dev)
#endif
/* Our watchdog timed out. Called by the networking layer */
-static void smc_timeout(struct net_device *dev)
+static void smc_timeout(struct net_device *dev, unsigned int txqueue)
{
struct smc_local *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 38068fc34141..49a6a9167af4 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1943,15 +1943,6 @@ static int smsc911x_set_mac_address(struct net_device *dev, void *p)
return 0;
}
-/* Standard ioctls for mii-tool */
-static int smsc911x_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- if (!netif_running(dev) || !dev->phydev)
- return -EINVAL;
-
- return phy_mii_ioctl(dev->phydev, ifr, cmd);
-}
-
static void smsc911x_ethtool_getdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
@@ -2151,7 +2142,7 @@ static const struct net_device_ops smsc911x_netdev_ops = {
.ndo_start_xmit = smsc911x_hard_start_xmit,
.ndo_get_stats = smsc911x_get_stats,
.ndo_set_rx_mode = smsc911x_set_multicast_list,
- .ndo_do_ioctl = smsc911x_do_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = smsc911x_set_mac_address,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -2454,7 +2445,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
pdata = netdev_priv(dev);
dev->irq = irq;
- pdata->ioaddr = ioremap_nocache(res->start, res_size);
+ pdata->ioaddr = ioremap(res->start, res_size);
if (!pdata->ioaddr) {
retval = -ENOMEM;
goto out_ioremap_fail;
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index a6962a41c3d2..7312e522c022 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -210,15 +210,6 @@ static int smsc9420_eeprom_reload(struct smsc9420_pdata *pd)
return -EIO;
}
-/* Standard ioctls for mii-tool */
-static int smsc9420_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- if (!netif_running(dev) || !dev->phydev)
- return -EINVAL;
-
- return phy_mii_ioctl(dev->phydev, ifr, cmd);
-}
-
static void smsc9420_ethtool_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
@@ -1504,7 +1495,7 @@ static const struct net_device_ops smsc9420_netdev_ops = {
.ndo_start_xmit = smsc9420_hard_start_xmit,
.ndo_get_stats = smsc9420_get_stats,
.ndo_set_rx_mode = smsc9420_set_multicast_list,
- .ndo_do_ioctl = smsc9420_do_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 869a498e3b5e..e8224b543dfc 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -243,6 +243,7 @@
NET_IP_ALIGN)
#define NETSEC_RX_BUF_NON_DATA (NETSEC_RXBUF_HEADROOM + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define NETSEC_RX_BUF_SIZE (PAGE_SIZE - NETSEC_RX_BUF_NON_DATA)
#define DESC_SZ sizeof(struct netsec_de)
@@ -719,7 +720,6 @@ static void *netsec_alloc_rx_data(struct netsec_priv *priv,
{
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
- enum dma_data_direction dma_dir;
struct page *page;
page = page_pool_dev_alloc_pages(dring->page_pool);
@@ -734,9 +734,7 @@ static void *netsec_alloc_rx_data(struct netsec_priv *priv,
/* Make sure the incoming payload fits in the page for XDP and non-XDP
* cases and reserve enough space for headroom + skb_shared_info
*/
- *desc_len = PAGE_SIZE - NETSEC_RX_BUF_NON_DATA;
- dma_dir = page_pool_get_dma_dir(dring->page_pool);
- dma_sync_single_for_device(priv->dev, *dma_handle, *desc_len, dma_dir);
+ *desc_len = NETSEC_RX_BUF_SIZE;
return page_address(page);
}
@@ -883,6 +881,8 @@ static u32 netsec_xdp_xmit_back(struct netsec_priv *priv, struct xdp_buff *xdp)
static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
struct xdp_buff *xdp)
{
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
+ unsigned int len = xdp->data_end - xdp->data;
u32 ret = NETSEC_XDP_PASS;
int err;
u32 act;
@@ -896,7 +896,9 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
case XDP_TX:
ret = netsec_xdp_xmit_back(priv, xdp);
if (ret != NETSEC_XDP_TX)
- xdp_return_buff(xdp);
+ __page_pool_put_page(dring->page_pool,
+ virt_to_head_page(xdp->data),
+ len, true);
break;
case XDP_REDIRECT:
err = xdp_do_redirect(priv->ndev, xdp, prog);
@@ -904,7 +906,9 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
ret = NETSEC_XDP_REDIR;
} else {
ret = NETSEC_XDP_CONSUMED;
- xdp_return_buff(xdp);
+ __page_pool_put_page(dring->page_pool,
+ virt_to_head_page(xdp->data),
+ len, true);
}
break;
default:
@@ -915,7 +919,9 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
/* fall through -- handle aborts by dropping packet */
case XDP_DROP:
ret = NETSEC_XDP_CONSUMED;
- xdp_return_buff(xdp);
+ __page_pool_put_page(dring->page_pool,
+ virt_to_head_page(xdp->data),
+ len, true);
break;
}
@@ -929,7 +935,6 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
struct netsec_rx_pkt_info rx_info;
enum dma_data_direction dma_dir;
struct bpf_prog *xdp_prog;
- struct sk_buff *skb = NULL;
u16 xdp_xmit = 0;
u32 xdp_act = 0;
int done = 0;
@@ -943,7 +948,8 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
struct netsec_desc *desc = &dring->desc[idx];
struct page *page = virt_to_page(desc->addr);
- u32 xdp_result = XDP_PASS;
+ u32 xdp_result = NETSEC_XDP_PASS;
+ struct sk_buff *skb = NULL;
u16 pkt_len, desc_len;
dma_addr_t dma_handle;
struct xdp_buff xdp;
@@ -1014,7 +1020,8 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
* cache state. Since we paid the allocation cost if
* building an skb fails try to put the page into cache
*/
- page_pool_recycle_direct(dring->page_pool, page);
+ __page_pool_put_page(dring->page_pool, page,
+ pkt_len, true);
netif_err(priv, drv, priv->ndev,
"rx failed to build skb\n");
break;
@@ -1272,17 +1279,19 @@ static int netsec_setup_rx_dring(struct netsec_priv *priv)
{
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
struct bpf_prog *xdp_prog = READ_ONCE(priv->xdp_prog);
- struct page_pool_params pp_params = { 0 };
+ struct page_pool_params pp_params = {
+ .order = 0,
+ /* internal DMA mapping in page_pool */
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = DESC_NUM,
+ .nid = NUMA_NO_NODE,
+ .dev = priv->dev,
+ .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
+ .offset = NETSEC_RXBUF_HEADROOM,
+ .max_len = NETSEC_RX_BUF_SIZE,
+ };
int i, err;
- pp_params.order = 0;
- /* internal DMA mapping in page_pool */
- pp_params.flags = PP_FLAG_DMA_MAP;
- pp_params.pool_size = DESC_NUM;
- pp_params.nid = cpu_to_node(0);
- pp_params.dev = priv->dev;
- pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
-
dring->page_pool = page_pool_create(&pp_params);
if (IS_ERR(dring->page_pool)) {
err = PTR_ERR(dring->page_pool);
@@ -1731,12 +1740,6 @@ static int netsec_netdev_set_features(struct net_device *ndev,
return 0;
}
-static int netsec_netdev_ioctl(struct net_device *ndev, struct ifreq *ifr,
- int cmd)
-{
- return phy_mii_ioctl(ndev->phydev, ifr, cmd);
-}
-
static int netsec_xdp_xmit(struct net_device *ndev, int n,
struct xdp_frame **frames, u32 flags)
{
@@ -1821,7 +1824,7 @@ static const struct net_device_ops netsec_netdev_ops = {
.ndo_set_features = netsec_netdev_set_features,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = netsec_netdev_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl,
.ndo_xdp_xmit = netsec_xdp_xmit,
.ndo_bpf = netsec_xdp,
};
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index f7e927ad67fa..67ddf782d98a 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -424,16 +424,22 @@ static void ave_ethtool_get_wol(struct net_device *ndev,
phy_ethtool_get_wol(ndev->phydev, wol);
}
-static int ave_ethtool_set_wol(struct net_device *ndev,
- struct ethtool_wolinfo *wol)
+static int __ave_ethtool_set_wol(struct net_device *ndev,
+ struct ethtool_wolinfo *wol)
{
- int ret;
-
if (!ndev->phydev ||
(wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)))
return -EOPNOTSUPP;
- ret = phy_ethtool_set_wol(ndev->phydev, wol);
+ return phy_ethtool_set_wol(ndev->phydev, wol);
+}
+
+static int ave_ethtool_set_wol(struct net_device *ndev,
+ struct ethtool_wolinfo *wol)
+{
+ int ret;
+
+ ret = __ave_ethtool_set_wol(ndev, wol);
if (!ret)
device_set_wakeup_enable(&ndev->dev, !!wol->wolopts);
@@ -1216,7 +1222,7 @@ static int ave_init(struct net_device *ndev)
/* set wol initial state disabled */
wol.wolopts = 0;
- ave_ethtool_set_wol(ndev, &wol);
+ __ave_ethtool_set_wol(ndev, &wol);
if (!phy_interface_is_rgmii(phydev))
phy_set_max_speed(phydev, SPEED_100);
@@ -1768,7 +1774,7 @@ static int ave_resume(struct device *dev)
ave_ethtool_get_wol(ndev, &wol);
wol.wolopts = priv->wolopts;
- ave_ethtool_set_wol(ndev, &wol);
+ __ave_ethtool_set_wol(ndev, &wol);
if (ndev->phydev) {
ret = phy_resume(ndev->phydev);
@@ -1804,6 +1810,9 @@ static int ave_pro4_get_pinmode(struct ave_private *priv,
break;
case PHY_INTERFACE_MODE_MII:
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
priv->pinmode_val = 0;
break;
default:
@@ -1848,6 +1857,9 @@ static int ave_ld20_get_pinmode(struct ave_private *priv,
priv->pinmode_val = SG_ETPINMODE_RMII(0);
break;
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
priv->pinmode_val = 0;
break;
default:
@@ -1870,6 +1882,9 @@ static int ave_pxs3_get_pinmode(struct ave_private *priv,
priv->pinmode_val = SG_ETPINMODE_RMII(arg);
break;
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
priv->pinmode_val = 0;
break;
default:
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index b210e987a1db..487099092693 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -363,11 +363,16 @@ struct dma_features {
unsigned int dvlan;
unsigned int l3l4fnum;
unsigned int arpoffsel;
+ /* TSN Features */
+ unsigned int estwid;
+ unsigned int estdep;
+ unsigned int estsel;
+ unsigned int fpesel;
+ unsigned int tbssel;
};
-/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
-#define BUF_SIZE_16KiB 16384
-/* RX Buffer size must be < 8191 and multiple of 4/8/16 bytes */
+/* RX Buffer size must be multiple of 4/8/16 bytes */
+#define BUF_SIZE_16KiB 16368
#define BUF_SIZE_8KiB 8188
#define BUF_SIZE_4KiB 4096
#define BUF_SIZE_2KiB 2048
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h
index 9f0b9a9e63b3..49d6a866244f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
@@ -171,6 +171,15 @@ struct dma_extended_desc {
__le32 des7; /* Tx/Rx Timestamp High */
};
+/* Enhanced descriptor for TBS */
+struct dma_edesc {
+ __le32 des4;
+ __le32 des5;
+ __le32 des6;
+ __le32 des7;
+ struct dma_desc basic;
+};
+
/* Transmit checksum insertion control */
#define TX_CIC_FULL 3 /* Include IP header and pseudoheader */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index dd9967aeda22..2342d497348e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -40,7 +40,7 @@ struct tegra_eqos {
static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
struct plat_stmmacenet_data *plat_dat)
{
- struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
u32 burst_map = 0;
u32 bit_index = 0;
u32 a_index = 0;
@@ -52,9 +52,10 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
return -ENOMEM;
}
- plat_dat->axi->axi_lpi_en = of_property_read_bool(np, "snps,en-lpi");
- if (of_property_read_u32(np, "snps,write-requests",
- &plat_dat->axi->axi_wr_osr_lmt)) {
+ plat_dat->axi->axi_lpi_en = device_property_read_bool(dev,
+ "snps,en-lpi");
+ if (device_property_read_u32(dev, "snps,write-requests",
+ &plat_dat->axi->axi_wr_osr_lmt)) {
/**
* Since the register has a reset value of 1, if property
* is missing, default to 1.
@@ -68,8 +69,8 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
plat_dat->axi->axi_wr_osr_lmt--;
}
- if (of_property_read_u32(np, "snps,read-requests",
- &plat_dat->axi->axi_rd_osr_lmt)) {
+ if (device_property_read_u32(dev, "snps,read-requests",
+ &plat_dat->axi->axi_rd_osr_lmt)) {
/**
* Since the register has a reset value of 1, if property
* is missing, default to 1.
@@ -82,7 +83,7 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
*/
plat_dat->axi->axi_rd_osr_lmt--;
}
- of_property_read_u32(np, "snps,burst-map", &burst_map);
+ device_property_read_u32(dev, "snps,burst-map", &burst_map);
/* converts burst-map bitmask to burst array */
for (bit_index = 0; bit_index < 7; bit_index++) {
@@ -270,6 +271,7 @@ static void *tegra_eqos_probe(struct platform_device *pdev,
struct plat_stmmacenet_data *data,
struct stmmac_resources *res)
{
+ struct device *dev = &pdev->dev;
struct tegra_eqos *eqos;
int err;
@@ -282,6 +284,9 @@ static void *tegra_eqos_probe(struct platform_device *pdev,
eqos->dev = &pdev->dev;
eqos->regs = res->addr;
+ if (!is_of_node(dev->fwnode))
+ goto bypass_clk_reset_gpio;
+
eqos->clk_master = devm_clk_get(&pdev->dev, "master_bus");
if (IS_ERR(eqos->clk_master)) {
err = PTR_ERR(eqos->clk_master);
@@ -354,6 +359,7 @@ static void *tegra_eqos_probe(struct platform_device *pdev,
usleep_range(2000, 4000);
+bypass_clk_reset_gpio:
data->fix_mac_speed = tegra_eqos_fix_speed;
data->init = tegra_eqos_init;
data->bsp_priv = eqos;
@@ -421,7 +427,7 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
void *priv;
int ret;
- data = of_device_get_match_data(&pdev->dev);
+ data = device_get_match_data(&pdev->dev);
memset(&stmmac_res, 0, sizeof(struct stmmac_resources));
@@ -478,7 +484,7 @@ static int dwc_eth_dwmac_remove(struct platform_device *pdev)
const struct dwc_eth_dwmac_data *data;
int err;
- data = of_device_get_match_data(&pdev->dev);
+ data = device_get_match_data(&pdev->dev);
err = stmmac_dvr_remove(&pdev->dev);
if (err < 0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index bdb80421acac..9e4b83832938 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -55,6 +55,8 @@ struct mediatek_dwmac_plat_data {
struct regmap *peri_regmap;
struct device *dev;
phy_interface_t phy_mode;
+ int num_clks_to_config;
+ bool rmii_clk_from_mac;
bool rmii_rxc;
};
@@ -73,21 +75,33 @@ struct mediatek_dwmac_variant {
/* list of clocks required for mac */
static const char * const mt2712_dwmac_clk_l[] = {
- "axi", "apb", "mac_main", "ptp_ref"
+ "axi", "apb", "mac_main", "ptp_ref", "rmii_internal"
};
static int mt2712_set_interface(struct mediatek_dwmac_plat_data *plat)
{
+ int rmii_clk_from_mac = plat->rmii_clk_from_mac ? RMII_CLK_SRC_INTERNAL : 0;
int rmii_rxc = plat->rmii_rxc ? RMII_CLK_SRC_RXC : 0;
u32 intf_val = 0;
+ /* The clock labeled as "rmii_internal" in mt2712_dwmac_clk_l is needed
+ * only in RMII(when MAC provides the reference clock), and useless for
+ * RGMII/MII/RMII(when PHY provides the reference clock).
+ * num_clks_to_config indicates the real number of clocks should be
+ * configured, equals to (plat->variant->num_clks - 1) in default for all the case,
+ * then +1 for rmii_clk_from_mac case.
+ */
+ plat->num_clks_to_config = plat->variant->num_clks - 1;
+
/* select phy interface in top control domain */
switch (plat->phy_mode) {
case PHY_INTERFACE_MODE_MII:
intf_val |= PHY_INTF_MII;
break;
case PHY_INTERFACE_MODE_RMII:
- intf_val |= (PHY_INTF_RMII | rmii_rxc);
+ if (plat->rmii_clk_from_mac)
+ plat->num_clks_to_config++;
+ intf_val |= (PHY_INTF_RMII | rmii_rxc | rmii_clk_from_mac);
break;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_TXID:
@@ -173,35 +187,50 @@ static int mt2712_set_delay(struct mediatek_dwmac_plat_data *plat)
delay_val |= FIELD_PREP(ETH_DLY_RXC_INV, mac_delay->rx_inv);
break;
case PHY_INTERFACE_MODE_RMII:
- /* the rmii reference clock is from external phy,
- * and the property "rmii_rxc" indicates which pin(TXC/RXC)
- * the reference clk is connected to. The reference clock is a
- * received signal, so rx_delay/rx_inv are used to indicate
- * the reference clock timing adjustment
- */
- if (plat->rmii_rxc) {
- /* the rmii reference clock from outside is connected
- * to RXC pin, the reference clock will be adjusted
- * by RXC delay macro circuit.
- */
- delay_val |= FIELD_PREP(ETH_DLY_RXC_ENABLE, !!mac_delay->rx_delay);
- delay_val |= FIELD_PREP(ETH_DLY_RXC_STAGES, mac_delay->rx_delay);
- delay_val |= FIELD_PREP(ETH_DLY_RXC_INV, mac_delay->rx_inv);
- } else {
- /* the rmii reference clock from outside is connected
- * to TXC pin, the reference clock will be adjusted
- * by TXC delay macro circuit.
+ if (plat->rmii_clk_from_mac) {
+ /* case 1: mac provides the rmii reference clock,
+ * and the clock output to TXC pin.
+ * The egress timing can be adjusted by GTXC delay macro circuit.
+ * The ingress timing can be adjusted by TXC delay macro circuit.
*/
delay_val |= FIELD_PREP(ETH_DLY_TXC_ENABLE, !!mac_delay->rx_delay);
delay_val |= FIELD_PREP(ETH_DLY_TXC_STAGES, mac_delay->rx_delay);
delay_val |= FIELD_PREP(ETH_DLY_TXC_INV, mac_delay->rx_inv);
+
+ delay_val |= FIELD_PREP(ETH_DLY_GTXC_ENABLE, !!mac_delay->tx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_GTXC_STAGES, mac_delay->tx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_GTXC_INV, mac_delay->tx_inv);
+ } else {
+ /* case 2: the rmii reference clock is from external phy,
+ * and the property "rmii_rxc" indicates which pin(TXC/RXC)
+ * the reference clk is connected to. The reference clock is a
+ * received signal, so rx_delay/rx_inv are used to indicate
+ * the reference clock timing adjustment
+ */
+ if (plat->rmii_rxc) {
+ /* the rmii reference clock from outside is connected
+ * to RXC pin, the reference clock will be adjusted
+ * by RXC delay macro circuit.
+ */
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_ENABLE, !!mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_STAGES, mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_INV, mac_delay->rx_inv);
+ } else {
+ /* the rmii reference clock from outside is connected
+ * to TXC pin, the reference clock will be adjusted
+ * by TXC delay macro circuit.
+ */
+ delay_val |= FIELD_PREP(ETH_DLY_TXC_ENABLE, !!mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_TXC_STAGES, mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_TXC_INV, mac_delay->rx_inv);
+ }
+ /* tx_inv will inverse the tx clock inside mac relateive to
+ * reference clock from external phy,
+ * and this bit is located in the same register with fine-tune
+ */
+ if (mac_delay->tx_inv)
+ fine_val = ETH_RMII_DLY_TX_INV;
}
- /* tx_inv will inverse the tx clock inside mac relateive to
- * reference clock from external phy,
- * and this bit is located in the same register with fine-tune
- */
- if (mac_delay->tx_inv)
- fine_val = ETH_RMII_DLY_TX_INV;
break;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_TXID:
@@ -278,6 +307,7 @@ static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
mac_delay->tx_inv = of_property_read_bool(plat->np, "mediatek,txc-inverse");
mac_delay->rx_inv = of_property_read_bool(plat->np, "mediatek,rxc-inverse");
plat->rmii_rxc = of_property_read_bool(plat->np, "mediatek,rmii-rxc");
+ plat->rmii_clk_from_mac = of_property_read_bool(plat->np, "mediatek,rmii-clk-from-mac");
return 0;
}
@@ -294,6 +324,8 @@ static int mediatek_dwmac_clk_init(struct mediatek_dwmac_plat_data *plat)
for (i = 0; i < num; i++)
plat->clks[i].id = variant->clk_list[i];
+ plat->num_clks_to_config = variant->num_clks;
+
return devm_clk_bulk_get(plat->dev, num, plat->clks);
}
@@ -321,7 +353,7 @@ static int mediatek_dwmac_init(struct platform_device *pdev, void *priv)
return ret;
}
- ret = clk_bulk_prepare_enable(variant->num_clks, plat->clks);
+ ret = clk_bulk_prepare_enable(plat->num_clks_to_config, plat->clks);
if (ret) {
dev_err(plat->dev, "failed to enable clks, err = %d\n", ret);
return ret;
@@ -336,9 +368,8 @@ static int mediatek_dwmac_init(struct platform_device *pdev, void *priv)
static void mediatek_dwmac_exit(struct platform_device *pdev, void *priv)
{
struct mediatek_dwmac_plat_data *plat = priv;
- const struct mediatek_dwmac_variant *variant = plat->variant;
- clk_bulk_disable_unprepare(variant->num_clks, plat->clks);
+ clk_bulk_disable_unprepare(plat->num_clks_to_config, plat->clks);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index bd6c01004913..0e2fa14f1423 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -112,6 +112,14 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
struct device *dev = dwmac->dev;
const char *parent_name, *mux_parent_names[MUX_CLK_NUM_PARENTS];
struct meson8b_dwmac_clk_configs *clk_configs;
+ static const struct clk_div_table div_table[] = {
+ { .div = 2, .val = 2, },
+ { .div = 3, .val = 3, },
+ { .div = 4, .val = 4, },
+ { .div = 5, .val = 5, },
+ { .div = 6, .val = 6, },
+ { .div = 7, .val = 7, },
+ };
clk_configs = devm_kzalloc(dev, sizeof(*clk_configs), GFP_KERNEL);
if (!clk_configs)
@@ -146,9 +154,9 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
clk_configs->m250_div.reg = dwmac->regs + PRG_ETH0;
clk_configs->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT;
clk_configs->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH;
- clk_configs->m250_div.flags = CLK_DIVIDER_ONE_BASED |
- CLK_DIVIDER_ALLOW_ZERO |
- CLK_DIVIDER_ROUND_CLOSEST;
+ clk_configs->m250_div.table = div_table;
+ clk_configs->m250_div.flags = CLK_DIVIDER_ALLOW_ZERO |
+ CLK_DIVIDER_ROUND_CLOSEST;
clk = meson8b_dwmac_register_clk(dwmac, "m250_div", &parent_name, 1,
&clk_divider_ops,
&clk_configs->m250_div.hw);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 7ec895407d23..e0a5fe83d8e0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -413,6 +413,7 @@ static int ethqos_configure(struct qcom_ethqos *ethqos)
dll_lock = rgmii_readl(ethqos, SDC4_STATUS);
if (dll_lock & SDC4_STATUS_DLL_LOCK)
break;
+ retry--;
} while (retry > 0);
if (!retry)
dev_err(&ethqos->pdev->dev,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 1c8d84ed8410..58e0511badba 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -335,14 +335,30 @@ static void sun8i_dwmac_dump_mac_regs(struct mac_device_info *hw,
}
}
-static void sun8i_dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan)
+static void sun8i_dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan,
+ bool rx, bool tx)
{
- writel(EMAC_RX_INT | EMAC_TX_INT, ioaddr + EMAC_INT_EN);
+ u32 value = readl(ioaddr + EMAC_INT_EN);
+
+ if (rx)
+ value |= EMAC_RX_INT;
+ if (tx)
+ value |= EMAC_TX_INT;
+
+ writel(value, ioaddr + EMAC_INT_EN);
}
-static void sun8i_dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan)
+static void sun8i_dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan,
+ bool rx, bool tx)
{
- writel(0, ioaddr + EMAC_INT_EN);
+ u32 value = readl(ioaddr + EMAC_INT_EN);
+
+ if (rx)
+ value &= ~EMAC_RX_INT;
+ if (tx)
+ value &= ~EMAC_TX_INT;
+
+ writel(value, ioaddr + EMAC_INT_EN);
}
static void sun8i_dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan)
@@ -957,6 +973,9 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
/* default */
break;
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
reg |= SYSCON_EPIT | SYSCON_ETCS_INT_GMII;
break;
case PHY_INTERFACE_MODE_RMII:
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index 26353ef616b8..7d40760e9ba8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -44,7 +44,7 @@ static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
* rate, which then uses the auto-reparenting feature of the
* clock driver, and enabling/disabling the clock.
*/
- if (gmac->interface == PHY_INTERFACE_MODE_RGMII) {
+ if (phy_interface_mode_is_rgmii(gmac->interface)) {
clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE);
clk_prepare_enable(gmac->tx_clk);
gmac->clk_enabled = 1;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 2dc70d104161..af50af27550b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -64,6 +64,8 @@
#define GMAC_RXQCTRL_MCBCQEN_SHIFT 20
#define GMAC_RXQCTRL_TACPQE BIT(21)
#define GMAC_RXQCTRL_TACPQE_SHIFT 21
+#define GMAC_RXQCTRL_FPRQ GENMASK(26, 24)
+#define GMAC_RXQCTRL_FPRQ_SHIFT 24
/* MAC Packet Filtering */
#define GMAC_PACKET_FILTER_PR BIT(0)
@@ -176,6 +178,8 @@ enum power_event {
#define GMAC_CONFIG_SARC GENMASK(30, 28)
#define GMAC_CONFIG_SARC_SHIFT 28
#define GMAC_CONFIG_IPC BIT(27)
+#define GMAC_CONFIG_IPG GENMASK(26, 24)
+#define GMAC_CONFIG_IPG_SHIFT 24
#define GMAC_CONFIG_2K BIT(22)
#define GMAC_CONFIG_ACS BIT(20)
#define GMAC_CONFIG_BE BIT(18)
@@ -183,6 +187,7 @@ enum power_event {
#define GMAC_CONFIG_JE BIT(16)
#define GMAC_CONFIG_PS BIT(15)
#define GMAC_CONFIG_FES BIT(14)
+#define GMAC_CONFIG_FES_SHIFT 14
#define GMAC_CONFIG_DM BIT(13)
#define GMAC_CONFIG_LM BIT(12)
#define GMAC_CONFIG_DCRS BIT(9)
@@ -190,6 +195,9 @@ enum power_event {
#define GMAC_CONFIG_RE BIT(0)
/* MAC extended config */
+#define GMAC_CONFIG_EIPG GENMASK(29, 25)
+#define GMAC_CONFIG_EIPG_SHIFT 25
+#define GMAC_CONFIG_EIPG_EN BIT(24)
#define GMAC_CONFIG_HDSMS GENMASK(22, 20)
#define GMAC_CONFIG_HDSMS_SHIFT 20
#define GMAC_CONFIG_HDSMS_256 (0x2 << GMAC_CONFIG_HDSMS_SHIFT)
@@ -231,6 +239,11 @@ enum power_event {
/* MAC HW features3 bitmap */
#define GMAC_HW_FEAT_ASP GENMASK(29, 28)
+#define GMAC_HW_FEAT_TBSSEL BIT(27)
+#define GMAC_HW_FEAT_FPESEL BIT(26)
+#define GMAC_HW_FEAT_ESTWID GENMASK(21, 20)
+#define GMAC_HW_FEAT_ESTDEP GENMASK(19, 17)
+#define GMAC_HW_FEAT_ESTSEL BIT(16)
#define GMAC_HW_FEAT_FRPES GENMASK(14, 13)
#define GMAC_HW_FEAT_FRPBS GENMASK(12, 11)
#define GMAC_HW_FEAT_FRPSEL BIT(10)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 40ca00e596dd..dc09d2131e40 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -420,7 +420,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
value |= GMAC_PACKET_FILTER_PM;
/* Set all the bits of the HASH tab */
memset(mc_filter, 0xff, sizeof(mc_filter));
- } else if (!netdev_mc_empty(dev)) {
+ } else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
struct netdev_hw_addr *ha;
/* Hash filter for multicast */
@@ -736,11 +736,14 @@ static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
__le16 perfect_match, bool is_double)
{
void __iomem *ioaddr = hw->pcsr;
+ u32 value;
writel(hash, ioaddr + GMAC_VLAN_HASH_TABLE);
+ value = readl(ioaddr + GMAC_VLAN_TAG);
+
if (hash) {
- u32 value = GMAC_VLAN_VTHM | GMAC_VLAN_ETV;
+ value |= GMAC_VLAN_VTHM | GMAC_VLAN_ETV;
if (is_double) {
value |= GMAC_VLAN_EDVLP;
value |= GMAC_VLAN_ESVL;
@@ -759,8 +762,6 @@ static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
writel(value | perfect_match, ioaddr + GMAC_VLAN_TAG);
} else {
- u32 value = readl(ioaddr + GMAC_VLAN_TAG);
-
value &= ~(GMAC_VLAN_VTHM | GMAC_VLAN_ETV);
value &= ~(GMAC_VLAN_EDVLP | GMAC_VLAN_ESVL);
value &= ~GMAC_VLAN_DOVLTC;
@@ -984,6 +985,8 @@ const struct stmmac_ops dwmac410_ops = {
.set_arp_offload = dwmac4_set_arp_offload,
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
+ .est_configure = dwmac5_est_configure,
+ .fpe_configure = dwmac5_fpe_configure,
};
const struct stmmac_ops dwmac510_ops = {
@@ -1027,6 +1030,8 @@ const struct stmmac_ops dwmac510_ops = {
.set_arp_offload = dwmac4_set_arp_offload,
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
+ .est_configure = dwmac5_est_configure,
+ .fpe_configure = dwmac5_fpe_configure,
};
int dwmac4_setup(struct stmmac_priv *priv)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 3e14da69f378..eff82065a501 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -10,6 +10,7 @@
#include <linux/stmmac.h>
#include "common.h"
+#include "dwmac4.h"
#include "dwmac4_descs.h"
static int dwmac4_wrback_get_tx_status(void *data, struct stmmac_extra_stats *x,
@@ -505,6 +506,14 @@ static void dwmac4_set_sec_addr(struct dma_desc *p, dma_addr_t addr)
p->des3 = cpu_to_le32(upper_32_bits(addr) | RDES3_BUFFER2_VALID_ADDR);
}
+static void dwmac4_set_tbs(struct dma_edesc *p, u32 sec, u32 nsec)
+{
+ p->des4 = cpu_to_le32((sec & TDES4_LT) | TDES4_LTV);
+ p->des5 = cpu_to_le32(nsec & TDES5_LT);
+ p->des6 = 0;
+ p->des7 = 0;
+}
+
const struct stmmac_desc_ops dwmac4_desc_ops = {
.tx_status = dwmac4_wrback_get_tx_status,
.rx_status = dwmac4_wrback_get_rx_status,
@@ -534,6 +543,7 @@ const struct stmmac_desc_ops dwmac4_desc_ops = {
.set_vlan = dwmac4_set_vlan,
.get_rx_header_len = dwmac4_get_rx_header_len,
.set_sec_addr = dwmac4_set_sec_addr,
+ .set_tbs = dwmac4_set_tbs,
};
const struct stmmac_mode_ops dwmac4_ring_mode_ops = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
index 6d92109dc9aa..6da070ccd737 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
@@ -73,6 +73,13 @@
#define TDES3_CONTEXT_TYPE BIT(30)
#define TDES3_CONTEXT_TYPE_SHIFT 30
+/* TDES4 */
+#define TDES4_LTV BIT(31)
+#define TDES4_LT GENMASK(7, 0)
+
+/* TDES5 */
+#define TDES5_LT GENMASK(31, 8)
+
/* TDS3 use for both format (read and write back) */
#define TDES3_OWN BIT(31)
#define TDES3_OWN_SHIFT 31
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index c15409030710..bb29bfcd62c3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -404,6 +404,11 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr,
/* 5.10 Features */
dma_cap->asp = (hw_cap & GMAC_HW_FEAT_ASP) >> 28;
+ dma_cap->tbssel = (hw_cap & GMAC_HW_FEAT_TBSSEL) >> 27;
+ dma_cap->fpesel = (hw_cap & GMAC_HW_FEAT_FPESEL) >> 26;
+ dma_cap->estwid = (hw_cap & GMAC_HW_FEAT_ESTWID) >> 20;
+ dma_cap->estdep = (hw_cap & GMAC_HW_FEAT_ESTDEP) >> 17;
+ dma_cap->estsel = (hw_cap & GMAC_HW_FEAT_ESTSEL) >> 16;
dma_cap->frpes = (hw_cap & GMAC_HW_FEAT_FRPES) >> 13;
dma_cap->frpbs = (hw_cap & GMAC_HW_FEAT_FRPBS) >> 11;
dma_cap->frpsel = (hw_cap & GMAC_HW_FEAT_FRPSEL) >> 10;
@@ -467,6 +472,25 @@ static void dwmac4_enable_sph(void __iomem *ioaddr, bool en, u32 chan)
writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
}
+static int dwmac4_enable_tbs(void __iomem *ioaddr, bool en, u32 chan)
+{
+ u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
+
+ if (en)
+ value |= DMA_CONTROL_EDSE;
+ else
+ value &= ~DMA_CONTROL_EDSE;
+
+ writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
+
+ value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)) & DMA_CONTROL_EDSE;
+ if (en && !value)
+ return -EIO;
+
+ writel(DMA_TBS_DEF_FTOS, ioaddr + DMA_TBS_CTRL);
+ return 0;
+}
+
const struct stmmac_dma_ops dwmac4_dma_ops = {
.reset = dwmac4_dma_reset,
.init = dwmac4_dma_init,
@@ -523,4 +547,5 @@ const struct stmmac_dma_ops dwmac410_dma_ops = {
.qmode = dwmac4_qmode,
.set_bfsize = dwmac4_set_bfsize,
.enable_sph = dwmac4_enable_sph,
+ .enable_tbs = dwmac4_enable_tbs,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
index 589931795847..8391ca63d943 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -22,6 +22,7 @@
#define DMA_DEBUG_STATUS_1 0x00001010
#define DMA_DEBUG_STATUS_2 0x00001014
#define DMA_AXI_BUS_MODE 0x00001028
+#define DMA_TBS_CTRL 0x00001050
/* DMA Bus Mode bitmap */
#define DMA_BUS_MODE_SFT_RESET BIT(0)
@@ -82,6 +83,11 @@
#define DMA_AXI_BURST_LEN_MASK 0x000000FE
+/* DMA TBS Control */
+#define DMA_TBS_FTOS GENMASK(31, 8)
+#define DMA_TBS_FTOV BIT(0)
+#define DMA_TBS_DEF_FTOS (DMA_TBS_FTOS | DMA_TBS_FTOV)
+
/* Following DMA defines are chanels oriented */
#define DMA_CHAN_BASE_ADDR 0x00001100
#define DMA_CHAN_BASE_OFFSET 0x80
@@ -114,6 +120,7 @@
#define DMA_CONTROL_MSS_MASK GENMASK(13, 0)
/* DMA Tx Channel X Control register defines */
+#define DMA_CONTROL_EDSE BIT(28)
#define DMA_CONTROL_TSE BIT(12)
#define DMA_CONTROL_OSP BIT(4)
#define DMA_CONTROL_ST BIT(0)
@@ -168,6 +175,8 @@
/* DMA default interrupt mask for 4.00 */
#define DMA_CHAN_INTR_DEFAULT_MASK (DMA_CHAN_INTR_NORMAL | \
DMA_CHAN_INTR_ABNORMAL)
+#define DMA_CHAN_INTR_DEFAULT_RX (DMA_CHAN_INTR_ENA_RIE)
+#define DMA_CHAN_INTR_DEFAULT_TX (DMA_CHAN_INTR_ENA_TIE)
#define DMA_CHAN_INTR_NORMAL_4_10 (DMA_CHAN_INTR_ENA_NIE_4_10 | \
DMA_CHAN_INTR_ENA_RIE | \
@@ -178,6 +187,8 @@
/* DMA default interrupt mask for 4.10a */
#define DMA_CHAN_INTR_DEFAULT_MASK_4_10 (DMA_CHAN_INTR_NORMAL_4_10 | \
DMA_CHAN_INTR_ABNORMAL_4_10)
+#define DMA_CHAN_INTR_DEFAULT_RX_4_10 (DMA_CHAN_INTR_ENA_RIE)
+#define DMA_CHAN_INTR_DEFAULT_TX_4_10 (DMA_CHAN_INTR_ENA_TIE)
/* channel 0 specific fields */
#define DMA_CHAN0_DBG_STAT_TPS GENMASK(15, 12)
@@ -186,9 +197,10 @@
#define DMA_CHAN0_DBG_STAT_RPS_SHIFT 8
int dwmac4_dma_reset(void __iomem *ioaddr);
-void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan);
-void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan);
-void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan);
+void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
+void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
+void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
+void dwmac410_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan);
void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan);
void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
index f2a29a90e085..9becca280074 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -97,21 +97,52 @@ void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(chan));
}
-void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan)
+void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
{
- writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr +
- DMA_CHAN_INTR_ENA(chan));
+ u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
+
+ if (rx)
+ value |= DMA_CHAN_INTR_DEFAULT_RX;
+ if (tx)
+ value |= DMA_CHAN_INTR_DEFAULT_TX;
+
+ writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
}
-void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan)
+void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
{
- writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10,
- ioaddr + DMA_CHAN_INTR_ENA(chan));
+ u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
+
+ if (rx)
+ value |= DMA_CHAN_INTR_DEFAULT_RX_4_10;
+ if (tx)
+ value |= DMA_CHAN_INTR_DEFAULT_TX_4_10;
+
+ writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
}
-void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan)
+void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
{
- writel(0, ioaddr + DMA_CHAN_INTR_ENA(chan));
+ u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
+
+ if (rx)
+ value &= ~DMA_CHAN_INTR_DEFAULT_RX;
+ if (tx)
+ value &= ~DMA_CHAN_INTR_DEFAULT_TX;
+
+ writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
+}
+
+void dwmac410_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
+{
+ u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
+
+ if (rx)
+ value &= ~DMA_CHAN_INTR_DEFAULT_RX_4_10;
+ if (tx)
+ value &= ~DMA_CHAN_INTR_DEFAULT_TX_4_10;
+
+ writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
}
int dwmac4_dma_interrupt(void __iomem *ioaddr,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
index e436fa160c7d..494c859b4ade 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
@@ -550,3 +550,122 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
writel(val, ioaddr + MAC_PPS_CONTROL);
return 0;
}
+
+static int dwmac5_est_write(void __iomem *ioaddr, u32 reg, u32 val, bool gcl)
+{
+ u32 ctrl;
+
+ writel(val, ioaddr + MTL_EST_GCL_DATA);
+
+ ctrl = (reg << ADDR_SHIFT);
+ ctrl |= gcl ? 0 : GCRR;
+
+ writel(ctrl, ioaddr + MTL_EST_GCL_CONTROL);
+
+ ctrl |= SRWO;
+ writel(ctrl, ioaddr + MTL_EST_GCL_CONTROL);
+
+ return readl_poll_timeout(ioaddr + MTL_EST_GCL_CONTROL,
+ ctrl, !(ctrl & SRWO), 100, 5000);
+}
+
+int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
+ unsigned int ptp_rate)
+{
+ u32 speed, total_offset, offset, ctrl, ctr_low;
+ u32 extcfg = readl(ioaddr + GMAC_EXT_CONFIG);
+ u32 mac_cfg = readl(ioaddr + GMAC_CONFIG);
+ int i, ret = 0x0;
+ u64 total_ctr;
+
+ if (extcfg & GMAC_CONFIG_EIPG_EN) {
+ offset = (extcfg & GMAC_CONFIG_EIPG) >> GMAC_CONFIG_EIPG_SHIFT;
+ offset = 104 + (offset * 8);
+ } else {
+ offset = (mac_cfg & GMAC_CONFIG_IPG) >> GMAC_CONFIG_IPG_SHIFT;
+ offset = 96 - (offset * 8);
+ }
+
+ speed = mac_cfg & (GMAC_CONFIG_PS | GMAC_CONFIG_FES);
+ speed = speed >> GMAC_CONFIG_FES_SHIFT;
+
+ switch (speed) {
+ case 0x0:
+ offset = offset * 1000; /* 1G */
+ break;
+ case 0x1:
+ offset = offset * 400; /* 2.5G */
+ break;
+ case 0x2:
+ offset = offset * 100000; /* 10M */
+ break;
+ case 0x3:
+ offset = offset * 10000; /* 100M */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ offset = offset / 1000;
+
+ ret |= dwmac5_est_write(ioaddr, BTR_LOW, cfg->btr[0], false);
+ ret |= dwmac5_est_write(ioaddr, BTR_HIGH, cfg->btr[1], false);
+ ret |= dwmac5_est_write(ioaddr, TER, cfg->ter, false);
+ ret |= dwmac5_est_write(ioaddr, LLR, cfg->gcl_size, false);
+ if (ret)
+ return ret;
+
+ total_offset = 0;
+ for (i = 0; i < cfg->gcl_size; i++) {
+ ret = dwmac5_est_write(ioaddr, i, cfg->gcl[i] + offset, true);
+ if (ret)
+ return ret;
+
+ total_offset += offset;
+ }
+
+ total_ctr = cfg->ctr[0] + cfg->ctr[1] * 1000000000;
+ total_ctr += total_offset;
+
+ ctr_low = do_div(total_ctr, 1000000000);
+
+ ret |= dwmac5_est_write(ioaddr, CTR_LOW, ctr_low, false);
+ ret |= dwmac5_est_write(ioaddr, CTR_HIGH, total_ctr, false);
+ if (ret)
+ return ret;
+
+ ctrl = readl(ioaddr + MTL_EST_CONTROL);
+ ctrl &= ~PTOV;
+ ctrl |= ((1000000000 / ptp_rate) * 6) << PTOV_SHIFT;
+ if (cfg->enable)
+ ctrl |= EEST | SSWL;
+ else
+ ctrl &= ~EEST;
+
+ writel(ctrl, ioaddr + MTL_EST_CONTROL);
+ return 0;
+}
+
+void dwmac5_fpe_configure(void __iomem *ioaddr, u32 num_txq, u32 num_rxq,
+ bool enable)
+{
+ u32 value;
+
+ if (!enable) {
+ value = readl(ioaddr + MAC_FPE_CTRL_STS);
+
+ value &= ~EFPE;
+
+ writel(value, ioaddr + MAC_FPE_CTRL_STS);
+ return;
+ }
+
+ value = readl(ioaddr + GMAC_RXQ_CTRL1);
+ value &= ~GMAC_RXQCTRL_FPRQ;
+ value |= (num_rxq - 1) << GMAC_RXQCTRL_FPRQ_SHIFT;
+ writel(value, ioaddr + GMAC_RXQ_CTRL1);
+
+ value = readl(ioaddr + MAC_FPE_CTRL_STS);
+ value |= EFPE;
+ writel(value, ioaddr + MAC_FPE_CTRL_STS);
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
index 23fecf68f781..3e8faa96b4d4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
@@ -11,6 +11,9 @@
#define PRTYEN BIT(1)
#define TMOUTEN BIT(0)
+#define MAC_FPE_CTRL_STS 0x00000234
+#define EFPE BIT(0)
+
#define MAC_PPS_CONTROL 0x00000b70
#define PPS_MAXIDX(x) ((((x) + 1) * 8) - 1)
#define PPS_MINIDX(x) ((x) * 8)
@@ -30,6 +33,23 @@
#define MAC_PPSx_INTERVAL(x) (0x00000b88 + ((x) * 0x10))
#define MAC_PPSx_WIDTH(x) (0x00000b8c + ((x) * 0x10))
+#define MTL_EST_CONTROL 0x00000c50
+#define PTOV GENMASK(31, 24)
+#define PTOV_SHIFT 24
+#define SSWL BIT(1)
+#define EEST BIT(0)
+#define MTL_EST_GCL_CONTROL 0x00000c80
+#define BTR_LOW 0x0
+#define BTR_HIGH 0x1
+#define CTR_LOW 0x2
+#define CTR_HIGH 0x3
+#define TER 0x4
+#define LLR 0x5
+#define ADDR_SHIFT 8
+#define GCRR BIT(2)
+#define SRWO BIT(0)
+#define MTL_EST_GCL_DATA 0x00000c84
+
#define MTL_RXP_CONTROL_STATUS 0x00000ca0
#define RXPI BIT(31)
#define NPE GENMASK(23, 16)
@@ -83,5 +103,9 @@ int dwmac5_rxp_config(void __iomem *ioaddr, struct stmmac_tc_entry *entries,
int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
struct stmmac_pps_cfg *cfg, bool enable,
u32 sub_second_inc, u32 systime_flags);
+int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
+ unsigned int ptp_rate);
+void dwmac5_fpe_configure(void __iomem *ioaddr, u32 num_txq, u32 num_rxq,
+ bool enable);
#endif /* __DWMAC5_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 292b880f3f9f..e5dbd0bc257e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -96,6 +96,8 @@
/* DMA default interrupt mask */
#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
+#define DMA_INTR_DEFAULT_RX (DMA_INTR_ENA_RIE)
+#define DMA_INTR_DEFAULT_TX (DMA_INTR_ENA_TIE)
/* DMA Status register defines */
#define DMA_STATUS_GLPII 0x40000000 /* GMAC LPI interrupt */
@@ -130,8 +132,8 @@
#define NUM_DWMAC1000_DMA_REGS 23
void dwmac_enable_dma_transmission(void __iomem *ioaddr);
-void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan);
-void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan);
+void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
+void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan);
void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan);
void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 1bc25aa86dbd..688d36095333 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -37,14 +37,28 @@ void dwmac_enable_dma_transmission(void __iomem *ioaddr)
writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
}
-void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan)
+void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
{
- writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+ u32 value = readl(ioaddr + DMA_INTR_ENA);
+
+ if (rx)
+ value |= DMA_INTR_DEFAULT_RX;
+ if (tx)
+ value |= DMA_INTR_DEFAULT_TX;
+
+ writel(value, ioaddr + DMA_INTR_ENA);
}
-void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan)
+void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
{
- writel(0, ioaddr + DMA_INTR_ENA);
+ u32 value = readl(ioaddr + DMA_INTR_ENA);
+
+ if (rx)
+ value &= ~DMA_INTR_DEFAULT_RX;
+ if (tx)
+ value &= ~DMA_INTR_DEFAULT_TX;
+
+ writel(value, ioaddr + DMA_INTR_ENA);
}
void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index 3b6e559aa0b9..6c3b8a950f58 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -73,6 +73,9 @@
#define XGMAC_RXQ_CTRL0 0x000000a0
#define XGMAC_RXQEN(x) GENMASK((x) * 2 + 1, (x) * 2)
#define XGMAC_RXQEN_SHIFT(x) ((x) * 2)
+#define XGMAC_RXQ_CTRL1 0x000000a4
+#define XGMAC_RQ GENMASK(7, 4)
+#define XGMAC_RQ_SHIFT 4
#define XGMAC_RXQ_CTRL2 0x000000a8
#define XGMAC_RXQ_CTRL3 0x000000ac
#define XGMAC_PSRQ(x) GENMASK((x) * 8 + 7, (x) * 8)
@@ -136,6 +139,11 @@
#define XGMAC_HWFEAT_TXQCNT GENMASK(9, 6)
#define XGMAC_HWFEAT_RXQCNT GENMASK(3, 0)
#define XGMAC_HW_FEATURE3 0x00000128
+#define XGMAC_HWFEAT_TBSSEL BIT(27)
+#define XGMAC_HWFEAT_FPESEL BIT(26)
+#define XGMAC_HWFEAT_ESTWID GENMASK(24, 23)
+#define XGMAC_HWFEAT_ESTDEP GENMASK(22, 20)
+#define XGMAC_HWFEAT_ESTSEL BIT(19)
#define XGMAC_HWFEAT_ASP GENMASK(15, 14)
#define XGMAC_HWFEAT_DVLAN BIT(13)
#define XGMAC_HWFEAT_FRPES GENMASK(12, 11)
@@ -148,6 +156,8 @@
#define XGMAC_MDIO_ADDR 0x00000200
#define XGMAC_MDIO_DATA 0x00000204
#define XGMAC_MDIO_C22P 0x00000220
+#define XGMAC_FPE_CTRL_STS 0x00000280
+#define XGMAC_EFPE BIT(0)
#define XGMAC_ADDRx_HIGH(x) (0x00000300 + (x) * 0x8)
#define XGMAC_ADDR_MAX 32
#define XGMAC_AE BIT(31)
@@ -237,6 +247,22 @@
#define XGMAC_TC_PRTY_MAP1 0x00001044
#define XGMAC_PSTC(x) GENMASK((x) * 8 + 7, (x) * 8)
#define XGMAC_PSTC_SHIFT(x) ((x) * 8)
+#define XGMAC_MTL_EST_CONTROL 0x00001050
+#define XGMAC_PTOV GENMASK(31, 23)
+#define XGMAC_PTOV_SHIFT 23
+#define XGMAC_SSWL BIT(1)
+#define XGMAC_EEST BIT(0)
+#define XGMAC_MTL_EST_GCL_CONTROL 0x00001080
+#define XGMAC_BTR_LOW 0x0
+#define XGMAC_BTR_HIGH 0x1
+#define XGMAC_CTR_LOW 0x2
+#define XGMAC_CTR_HIGH 0x3
+#define XGMAC_TER 0x4
+#define XGMAC_LLR 0x5
+#define XGMAC_ADDR_SHIFT 8
+#define XGMAC_GCRR BIT(2)
+#define XGMAC_SRWO BIT(0)
+#define XGMAC_MTL_EST_GCL_DATA 0x00001084
#define XGMAC_MTL_RXP_CONTROL_STATUS 0x000010a0
#define XGMAC_RXPI BIT(31)
#define XGMAC_NPE GENMASK(23, 16)
@@ -321,6 +347,13 @@
#define XGMAC_TDPS GENMASK(29, 0)
#define XGMAC_RX_EDMA_CTRL 0x00003044
#define XGMAC_RDPS GENMASK(29, 0)
+#define XGMAC_DMA_TBS_CTRL0 0x00003054
+#define XGMAC_DMA_TBS_CTRL1 0x00003058
+#define XGMAC_DMA_TBS_CTRL2 0x0000305c
+#define XGMAC_DMA_TBS_CTRL3 0x00003060
+#define XGMAC_FTOS GENMASK(31, 8)
+#define XGMAC_FTOV BIT(0)
+#define XGMAC_DEF_FTOS (XGMAC_FTOS | XGMAC_FTOV)
#define XGMAC_DMA_SAFETY_INT_STATUS 0x00003064
#define XGMAC_MCSIS BIT(31)
#define XGMAC_MSUIS BIT(29)
@@ -335,6 +368,7 @@
#define XGMAC_SPH BIT(24)
#define XGMAC_PBLx8 BIT(16)
#define XGMAC_DMA_CH_TX_CONTROL(x) (0x00003104 + (0x80 * (x)))
+#define XGMAC_EDSE BIT(28)
#define XGMAC_TxPBL GENMASK(21, 16)
#define XGMAC_TxPBL_SHIFT 16
#define XGMAC_TSE BIT(12)
@@ -343,6 +377,8 @@
#define XGMAC_DMA_CH_RX_CONTROL(x) (0x00003108 + (0x80 * (x)))
#define XGMAC_RxPBL GENMASK(21, 16)
#define XGMAC_RxPBL_SHIFT 16
+#define XGMAC_RBSZ GENMASK(14, 1)
+#define XGMAC_RBSZ_SHIFT 1
#define XGMAC_RXST BIT(0)
#define XGMAC_DMA_CH_TxDESC_HADDR(x) (0x00003110 + (0x80 * (x)))
#define XGMAC_DMA_CH_TxDESC_LADDR(x) (0x00003114 + (0x80 * (x)))
@@ -361,6 +397,8 @@
#define XGMAC_TIE BIT(0)
#define XGMAC_DMA_INT_DEFAULT_EN (XGMAC_NIE | XGMAC_AIE | XGMAC_RBUE | \
XGMAC_RIE | XGMAC_TIE)
+#define XGMAC_DMA_INT_DEFAULT_RX (XGMAC_RBUE | XGMAC_RIE)
+#define XGMAC_DMA_INT_DEFAULT_TX (XGMAC_TIE)
#define XGMAC_DMA_CH_Rx_WATCHDOG(x) (0x0000313c + (0x80 * (x)))
#define XGMAC_RWT GENMASK(7, 0)
#define XGMAC_DMA_CH_STATUS(x) (0x00003160 + (0x80 * (x)))
@@ -375,6 +413,9 @@
#define XGMAC_REGSIZE ((0x0000317c + (0x80 * 15)) / 4)
/* Descriptors */
+#define XGMAC_TDES0_LTV BIT(31)
+#define XGMAC_TDES0_LT GENMASK(7, 0)
+#define XGMAC_TDES1_LT GENMASK(31, 8)
#define XGMAC_TDES2_IVT GENMASK(31, 16)
#define XGMAC_TDES2_IVT_SHIFT 16
#define XGMAC_TDES2_IOC BIT(31)
@@ -393,6 +434,7 @@
#define XGMAC_TDES3_TCMSSV BIT(26)
#define XGMAC_TDES3_SAIC GENMASK(25, 23)
#define XGMAC_TDES3_SAIC_SHIFT 23
+#define XGMAC_TDES3_TBSV BIT(24)
#define XGMAC_TDES3_THL GENMASK(22, 19)
#define XGMAC_TDES3_THL_SHIFT 19
#define XGMAC_TDES3_IVTIR GENMASK(19, 18)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 082f5ee9e525..67b754a56288 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -458,7 +458,7 @@ static void dwxgmac2_set_filter(struct mac_device_info *hw,
for (i = 0; i < XGMAC_MAX_HASH_TABLE; i++)
writel(~0x0, ioaddr + XGMAC_HASH_TABLE(i));
- } else if (!netdev_mc_empty(dev)) {
+ } else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
struct netdev_hw_addr *ha;
value |= XGMAC_FILTER_HMC;
@@ -569,7 +569,9 @@ static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
writel(value, ioaddr + XGMAC_PACKET_FILTER);
- value = XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
+ value = readl(ioaddr + XGMAC_VLAN_TAG);
+
+ value |= XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
if (is_double) {
value |= XGMAC_VLAN_EDVLP;
value |= XGMAC_VLAN_ESVL;
@@ -584,7 +586,9 @@ static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
writel(value, ioaddr + XGMAC_PACKET_FILTER);
- value = XGMAC_VLAN_ETV;
+ value = readl(ioaddr + XGMAC_VLAN_TAG);
+
+ value |= XGMAC_VLAN_ETV;
if (is_double) {
value |= XGMAC_VLAN_EDVLP;
value |= XGMAC_VLAN_ESVL;
@@ -1359,6 +1363,81 @@ static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en,
writel(value, ioaddr + XGMAC_RX_CONFIG);
}
+static int dwxgmac3_est_write(void __iomem *ioaddr, u32 reg, u32 val, bool gcl)
+{
+ u32 ctrl;
+
+ writel(val, ioaddr + XGMAC_MTL_EST_GCL_DATA);
+
+ ctrl = (reg << XGMAC_ADDR_SHIFT);
+ ctrl |= gcl ? 0 : XGMAC_GCRR;
+
+ writel(ctrl, ioaddr + XGMAC_MTL_EST_GCL_CONTROL);
+
+ ctrl |= XGMAC_SRWO;
+ writel(ctrl, ioaddr + XGMAC_MTL_EST_GCL_CONTROL);
+
+ return readl_poll_timeout_atomic(ioaddr + XGMAC_MTL_EST_GCL_CONTROL,
+ ctrl, !(ctrl & XGMAC_SRWO), 100, 5000);
+}
+
+static int dwxgmac3_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
+ unsigned int ptp_rate)
+{
+ int i, ret = 0x0;
+ u32 ctrl;
+
+ ret |= dwxgmac3_est_write(ioaddr, XGMAC_BTR_LOW, cfg->btr[0], false);
+ ret |= dwxgmac3_est_write(ioaddr, XGMAC_BTR_HIGH, cfg->btr[1], false);
+ ret |= dwxgmac3_est_write(ioaddr, XGMAC_TER, cfg->ter, false);
+ ret |= dwxgmac3_est_write(ioaddr, XGMAC_LLR, cfg->gcl_size, false);
+ ret |= dwxgmac3_est_write(ioaddr, XGMAC_CTR_LOW, cfg->ctr[0], false);
+ ret |= dwxgmac3_est_write(ioaddr, XGMAC_CTR_HIGH, cfg->ctr[1], false);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < cfg->gcl_size; i++) {
+ ret = dwxgmac3_est_write(ioaddr, i, cfg->gcl[i], true);
+ if (ret)
+ return ret;
+ }
+
+ ctrl = readl(ioaddr + XGMAC_MTL_EST_CONTROL);
+ ctrl &= ~XGMAC_PTOV;
+ ctrl |= ((1000000000 / ptp_rate) * 9) << XGMAC_PTOV_SHIFT;
+ if (cfg->enable)
+ ctrl |= XGMAC_EEST | XGMAC_SSWL;
+ else
+ ctrl &= ~XGMAC_EEST;
+
+ writel(ctrl, ioaddr + XGMAC_MTL_EST_CONTROL);
+ return 0;
+}
+
+static void dwxgmac3_fpe_configure(void __iomem *ioaddr, u32 num_txq,
+ u32 num_rxq, bool enable)
+{
+ u32 value;
+
+ if (!enable) {
+ value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
+
+ value &= ~XGMAC_EFPE;
+
+ writel(value, ioaddr + XGMAC_FPE_CTRL_STS);
+ return;
+ }
+
+ value = readl(ioaddr + XGMAC_RXQ_CTRL1);
+ value &= ~XGMAC_RQ;
+ value |= (num_rxq - 1) << XGMAC_RQ_SHIFT;
+ writel(value, ioaddr + XGMAC_RXQ_CTRL1);
+
+ value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
+ value |= XGMAC_EFPE;
+ writel(value, ioaddr + XGMAC_FPE_CTRL_STS);
+}
+
const struct stmmac_ops dwxgmac210_ops = {
.core_init = dwxgmac2_core_init,
.set_mac = dwxgmac2_set_mac,
@@ -1402,6 +1481,8 @@ const struct stmmac_ops dwxgmac210_ops = {
.config_l3_filter = dwxgmac2_config_l3_filter,
.config_l4_filter = dwxgmac2_config_l4_filter,
.set_arp_offload = dwxgmac2_set_arp_offload,
+ .est_configure = dwxgmac3_est_configure,
+ .fpe_configure = dwxgmac3_fpe_configure,
};
int dwxgmac2_setup(struct stmmac_priv *priv)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index bd5838ce1e8a..c3d654cfa9ef 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -339,6 +339,14 @@ static void dwxgmac2_set_vlan(struct dma_desc *p, u32 type)
p->des2 |= cpu_to_le32(type & XGMAC_TDES2_VTIR);
}
+static void dwxgmac2_set_tbs(struct dma_edesc *p, u32 sec, u32 nsec)
+{
+ p->des4 = cpu_to_le32((sec & XGMAC_TDES0_LT) | XGMAC_TDES0_LTV);
+ p->des5 = cpu_to_le32(nsec & XGMAC_TDES1_LT);
+ p->des6 = 0;
+ p->des7 = 0;
+}
+
const struct stmmac_desc_ops dwxgmac210_desc_ops = {
.tx_status = dwxgmac2_get_tx_status,
.rx_status = dwxgmac2_get_rx_status,
@@ -368,4 +376,5 @@ const struct stmmac_desc_ops dwxgmac210_desc_ops = {
.set_sarc = dwxgmac2_set_sarc,
.set_vlan_tag = dwxgmac2_set_vlan_tag,
.set_vlan = dwxgmac2_set_vlan,
+ .set_tbs = dwxgmac2_set_tbs,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index 22a7f0cc1b90..77308c5c5d29 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -248,14 +248,30 @@ static void dwxgmac2_dma_tx_mode(void __iomem *ioaddr, int mode,
writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
}
-static void dwxgmac2_enable_dma_irq(void __iomem *ioaddr, u32 chan)
+static void dwxgmac2_enable_dma_irq(void __iomem *ioaddr, u32 chan,
+ bool rx, bool tx)
{
- writel(XGMAC_DMA_INT_DEFAULT_EN, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
+ u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
+
+ if (rx)
+ value |= XGMAC_DMA_INT_DEFAULT_RX;
+ if (tx)
+ value |= XGMAC_DMA_INT_DEFAULT_TX;
+
+ writel(value, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
}
-static void dwxgmac2_disable_dma_irq(void __iomem *ioaddr, u32 chan)
+static void dwxgmac2_disable_dma_irq(void __iomem *ioaddr, u32 chan,
+ bool rx, bool tx)
{
- writel(0, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
+ u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
+
+ if (rx)
+ value &= ~XGMAC_DMA_INT_DEFAULT_RX;
+ if (tx)
+ value &= ~XGMAC_DMA_INT_DEFAULT_TX;
+
+ writel(value, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
}
static void dwxgmac2_dma_start_tx(void __iomem *ioaddr, u32 chan)
@@ -413,6 +429,11 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
/* MAC HW feature 3 */
hw_cap = readl(ioaddr + XGMAC_HW_FEATURE3);
+ dma_cap->tbssel = (hw_cap & XGMAC_HWFEAT_TBSSEL) >> 27;
+ dma_cap->fpesel = (hw_cap & XGMAC_HWFEAT_FPESEL) >> 26;
+ dma_cap->estwid = (hw_cap & XGMAC_HWFEAT_ESTWID) >> 23;
+ dma_cap->estdep = (hw_cap & XGMAC_HWFEAT_ESTDEP) >> 20;
+ dma_cap->estsel = (hw_cap & XGMAC_HWFEAT_ESTSEL) >> 19;
dma_cap->asp = (hw_cap & XGMAC_HWFEAT_ASP) >> 14;
dma_cap->dvlan = (hw_cap & XGMAC_HWFEAT_DVLAN) >> 13;
dma_cap->frpes = (hw_cap & XGMAC_HWFEAT_FRPES) >> 11;
@@ -482,7 +503,8 @@ static void dwxgmac2_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
u32 value;
value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
- value |= bfsize << 1;
+ value &= ~XGMAC_RBSZ;
+ value |= bfsize << XGMAC_RBSZ_SHIFT;
writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
}
@@ -502,6 +524,28 @@ static void dwxgmac2_enable_sph(void __iomem *ioaddr, bool en, u32 chan)
writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan));
}
+static int dwxgmac2_enable_tbs(void __iomem *ioaddr, bool en, u32 chan)
+{
+ u32 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+
+ if (en)
+ value |= XGMAC_EDSE;
+ else
+ value &= ~XGMAC_EDSE;
+
+ writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+
+ value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)) & XGMAC_EDSE;
+ if (en && !value)
+ return -EIO;
+
+ writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL0);
+ writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL1);
+ writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL2);
+ writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL3);
+ return 0;
+}
+
const struct stmmac_dma_ops dwxgmac210_dma_ops = {
.reset = dwxgmac2_dma_reset,
.init = dwxgmac2_dma_init,
@@ -529,4 +573,5 @@ const struct stmmac_dma_ops dwxgmac210_dma_ops = {
.qmode = dwxgmac2_qmode,
.set_bfsize = dwxgmac2_set_bfsize,
.enable_sph = dwxgmac2_enable_sph,
+ .enable_tbs = dwxgmac2_enable_tbs,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index aa5b917398fe..df63b0367aff 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -29,6 +29,7 @@ struct stmmac_extra_stats;
struct stmmac_safety_stats;
struct dma_desc;
struct dma_extended_desc;
+struct dma_edesc;
/* Descriptors helpers */
struct stmmac_desc_ops {
@@ -95,6 +96,7 @@ struct stmmac_desc_ops {
void (*set_vlan_tag)(struct dma_desc *p, u16 tag, u16 inner_tag,
u32 inner_type);
void (*set_vlan)(struct dma_desc *p, u32 type);
+ void (*set_tbs)(struct dma_edesc *p, u32 sec, u32 nsec);
};
#define stmmac_init_rx_desc(__priv, __args...) \
@@ -157,6 +159,8 @@ struct stmmac_desc_ops {
stmmac_do_void_callback(__priv, desc, set_vlan_tag, __args)
#define stmmac_set_desc_vlan(__priv, __args...) \
stmmac_do_void_callback(__priv, desc, set_vlan, __args)
+#define stmmac_set_desc_tbs(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_tbs, __args)
struct stmmac_dma_cfg;
struct dma_features;
@@ -187,8 +191,10 @@ struct stmmac_dma_ops {
void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
void __iomem *ioaddr);
void (*enable_dma_transmission) (void __iomem *ioaddr);
- void (*enable_dma_irq)(void __iomem *ioaddr, u32 chan);
- void (*disable_dma_irq)(void __iomem *ioaddr, u32 chan);
+ void (*enable_dma_irq)(void __iomem *ioaddr, u32 chan,
+ bool rx, bool tx);
+ void (*disable_dma_irq)(void __iomem *ioaddr, u32 chan,
+ bool rx, bool tx);
void (*start_tx)(void __iomem *ioaddr, u32 chan);
void (*stop_tx)(void __iomem *ioaddr, u32 chan);
void (*start_rx)(void __iomem *ioaddr, u32 chan);
@@ -208,6 +214,7 @@ struct stmmac_dma_ops {
void (*qmode)(void __iomem *ioaddr, u32 channel, u8 qmode);
void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan);
void (*enable_sph)(void __iomem *ioaddr, bool en, u32 chan);
+ int (*enable_tbs)(void __iomem *ioaddr, bool en, u32 chan);
};
#define stmmac_reset(__priv, __args...) \
@@ -266,6 +273,8 @@ struct stmmac_dma_ops {
stmmac_do_void_callback(__priv, dma, set_bfsize, __args)
#define stmmac_enable_sph(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, enable_sph, __args)
+#define stmmac_enable_tbs(__priv, __args...) \
+ stmmac_do_callback(__priv, dma, enable_tbs, __args)
struct mac_device_info;
struct net_device;
@@ -274,6 +283,7 @@ struct stmmac_safety_stats;
struct stmmac_tc_entry;
struct stmmac_pps_cfg;
struct stmmac_rss;
+struct stmmac_est;
/* Helpers to program the MAC core */
struct stmmac_ops {
@@ -371,6 +381,10 @@ struct stmmac_ops {
bool en, bool udp, bool sa, bool inv,
u32 match);
void (*set_arp_offload)(struct mac_device_info *hw, bool en, u32 addr);
+ int (*est_configure)(void __iomem *ioaddr, struct stmmac_est *cfg,
+ unsigned int ptp_rate);
+ void (*fpe_configure)(void __iomem *ioaddr, u32 num_txq, u32 num_rxq,
+ bool enable);
};
#define stmmac_core_init(__priv, __args...) \
@@ -457,6 +471,10 @@ struct stmmac_ops {
stmmac_do_callback(__priv, mac, config_l4_filter, __args)
#define stmmac_set_arp_offload(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, set_arp_offload, __args)
+#define stmmac_est_configure(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, est_configure, __args)
+#define stmmac_fpe_configure(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, fpe_configure, __args)
/* PTP and HW Timer helpers */
struct stmmac_hwtimestamp {
@@ -514,6 +532,8 @@ struct stmmac_priv;
struct tc_cls_u32_offload;
struct tc_cbs_qopt_offload;
struct flow_cls_offload;
+struct tc_taprio_qopt_offload;
+struct tc_etf_qopt_offload;
struct stmmac_tc_ops {
int (*init)(struct stmmac_priv *priv);
@@ -523,6 +543,10 @@ struct stmmac_tc_ops {
struct tc_cbs_qopt_offload *qopt);
int (*setup_cls)(struct stmmac_priv *priv,
struct flow_cls_offload *cls);
+ int (*setup_taprio)(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt);
+ int (*setup_etf)(struct stmmac_priv *priv,
+ struct tc_etf_qopt_offload *qopt);
};
#define stmmac_tc_init(__priv, __args...) \
@@ -533,6 +557,10 @@ struct stmmac_tc_ops {
stmmac_do_callback(__priv, tc, setup_cbs, __args)
#define stmmac_tc_setup_cls(__priv, __args...) \
stmmac_do_callback(__priv, tc, setup_cls, __args)
+#define stmmac_tc_setup_taprio(__priv, __args...) \
+ stmmac_do_callback(__priv, tc, setup_taprio, __args)
+#define stmmac_tc_setup_etf(__priv, __args...) \
+ stmmac_do_callback(__priv, tc, setup_etf, __args)
struct stmmac_counters;
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 252cf48c5816..a57b0fa815ab 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -119,6 +119,13 @@
#define MMC_RX_ICMP_GD_OCTETS 0x180
#define MMC_RX_ICMP_ERR_OCTETS 0x184
+#define MMC_TX_FPE_FRAG 0x1a8
+#define MMC_TX_HOLD_REQ 0x1ac
+#define MMC_RX_PKT_ASSEMBLY_ERR 0x1c8
+#define MMC_RX_PKT_SMD_ERR 0x1cc
+#define MMC_RX_PKT_ASSEMBLY_OK 0x1d0
+#define MMC_RX_FPE_FRAG 0x1d4
+
/* XGMAC MMC Registers */
#define MMC_XGMAC_TX_OCTET_GB 0x14
#define MMC_XGMAC_TX_PKT_GB 0x1c
@@ -315,6 +322,15 @@ static void dwmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
mmc->mmc_rx_tcp_err_octets += readl(mmcaddr + MMC_RX_TCP_ERR_OCTETS);
mmc->mmc_rx_icmp_gd_octets += readl(mmcaddr + MMC_RX_ICMP_GD_OCTETS);
mmc->mmc_rx_icmp_err_octets += readl(mmcaddr + MMC_RX_ICMP_ERR_OCTETS);
+
+ mmc->mmc_tx_fpe_fragment_cntr += readl(mmcaddr + MMC_TX_FPE_FRAG);
+ mmc->mmc_tx_hold_req_cntr += readl(mmcaddr + MMC_TX_HOLD_REQ);
+ mmc->mmc_rx_packet_assembly_err_cntr +=
+ readl(mmcaddr + MMC_RX_PKT_ASSEMBLY_ERR);
+ mmc->mmc_rx_packet_smd_err_cntr += readl(mmcaddr + MMC_RX_PKT_SMD_ERR);
+ mmc->mmc_rx_packet_assembly_ok_cntr +=
+ readl(mmcaddr + MMC_RX_PKT_ASSEMBLY_OK);
+ mmc->mmc_rx_fpe_fragment_cntr += readl(mmcaddr + MMC_RX_FPE_FRAG);
}
const struct stmmac_mmc_ops dwmac_mmc_ops = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index d993fc7e82c3..9c02fc754bf1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -39,13 +39,18 @@ struct stmmac_tx_info {
bool is_jumbo;
};
+#define STMMAC_TBS_AVAIL BIT(0)
+#define STMMAC_TBS_EN BIT(1)
+
/* Frequently used values are kept adjacent for cache effect */
struct stmmac_tx_queue {
u32 tx_count_frames;
+ int tbs;
struct timer_list txtimer;
u32 queue_index;
struct stmmac_priv *priv_data;
struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
+ struct dma_edesc *dma_entx;
struct dma_desc *dma_tx;
struct sk_buff **tx_skbuff;
struct stmmac_tx_info *tx_skbuff_dma;
@@ -88,6 +93,7 @@ struct stmmac_channel {
struct napi_struct rx_napi ____cacheline_aligned_in_smp;
struct napi_struct tx_napi ____cacheline_aligned_in_smp;
struct stmmac_priv *priv_data;
+ spinlock_t lock;
u32 index;
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index bbc65bd332a8..5836b21edd7e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -46,7 +46,7 @@
#include "dwxgmac2.h"
#include "hwif.h"
-#define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
+#define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
/* Module parameters */
@@ -106,6 +106,7 @@ MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
#ifdef CONFIG_DEBUG_FS
+static const struct net_device_ops stmmac_netdev_ops;
static void stmmac_init_fs(struct net_device *dev);
static void stmmac_exit_fs(struct net_device *dev);
#endif
@@ -387,9 +388,8 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
/* Using PCS we cannot dial with the phy registers at this stage
* so we do not support extra feature like EEE.
*/
- if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
- (priv->hw->pcs == STMMAC_PCS_TBI) ||
- (priv->hw->pcs == STMMAC_PCS_RTBI))
+ if (priv->hw->pcs == STMMAC_PCS_TBI ||
+ priv->hw->pcs == STMMAC_PCS_RTBI)
return false;
/* Check if MAC core supports the EEE feature. */
@@ -1089,6 +1089,8 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv)
if (priv->extend_desc)
head_tx = (void *)tx_q->dma_etx;
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ head_tx = (void *)tx_q->dma_entx;
else
head_tx = (void *)tx_q->dma_tx;
@@ -1109,7 +1111,9 @@ static int stmmac_set_bfsize(int mtu, int bufsize)
{
int ret = bufsize;
- if (mtu >= BUF_SIZE_4KiB)
+ if (mtu >= BUF_SIZE_8KiB)
+ ret = BUF_SIZE_16KiB;
+ else if (mtu >= BUF_SIZE_4KiB)
ret = BUF_SIZE_8KiB;
else if (mtu >= BUF_SIZE_2KiB)
ret = BUF_SIZE_4KiB;
@@ -1160,13 +1164,19 @@ static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
int i;
/* Clear the TX descriptors */
- for (i = 0; i < DMA_TX_SIZE; i++)
+ for (i = 0; i < DMA_TX_SIZE; i++) {
+ int last = (i == (DMA_TX_SIZE - 1));
+ struct dma_desc *p;
+
if (priv->extend_desc)
- stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
- priv->mode, (i == DMA_TX_SIZE - 1));
+ p = &tx_q->dma_etx[i].basic;
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ p = &tx_q->dma_entx[i].basic;
else
- stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
- priv->mode, (i == DMA_TX_SIZE - 1));
+ p = &tx_q->dma_tx[i];
+
+ stmmac_init_tx_desc(priv, p, priv->mode, last);
+ }
}
/**
@@ -1293,19 +1303,9 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_count = priv->plat->rx_queues_to_use;
int ret = -ENOMEM;
- int bfsize = 0;
int queue;
int i;
- bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
- if (bfsize < 0)
- bfsize = 0;
-
- if (bfsize < BUF_SIZE_16KiB)
- bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
-
- priv->dma_buf_sz = bfsize;
-
/* RX INITIALIZATION */
netif_dbg(priv, probe, priv->dev,
"SKB addresses:\nskb\t\tskb data\tdma data\n");
@@ -1347,8 +1347,6 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
}
}
- buf_sz = bfsize;
-
return 0;
err_init_rx_buffers:
@@ -1392,7 +1390,7 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
if (priv->extend_desc)
stmmac_mode_init(priv, tx_q->dma_etx,
tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
- else
+ else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
stmmac_mode_init(priv, tx_q->dma_tx,
tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
}
@@ -1401,6 +1399,8 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
struct dma_desc *p;
if (priv->extend_desc)
p = &((tx_q->dma_etx + i)->basic);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ p = &((tx_q->dma_entx + i)->basic);
else
p = tx_q->dma_tx + i;
@@ -1520,19 +1520,26 @@ static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
/* Free TX queue resources */
for (queue = 0; queue < tx_count; queue++) {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ size_t size;
+ void *addr;
/* Release the DMA TX socket buffers */
dma_free_tx_skbufs(priv, queue);
- /* Free DMA regions of consistent memory previously allocated */
- if (!priv->extend_desc)
- dma_free_coherent(priv->device,
- DMA_TX_SIZE * sizeof(struct dma_desc),
- tx_q->dma_tx, tx_q->dma_tx_phy);
- else
- dma_free_coherent(priv->device, DMA_TX_SIZE *
- sizeof(struct dma_extended_desc),
- tx_q->dma_etx, tx_q->dma_tx_phy);
+ if (priv->extend_desc) {
+ size = sizeof(struct dma_extended_desc);
+ addr = tx_q->dma_etx;
+ } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
+ size = sizeof(struct dma_edesc);
+ addr = tx_q->dma_entx;
+ } else {
+ size = sizeof(struct dma_desc);
+ addr = tx_q->dma_tx;
+ }
+
+ size *= DMA_TX_SIZE;
+
+ dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
kfree(tx_q->tx_skbuff_dma);
kfree(tx_q->tx_skbuff);
@@ -1625,6 +1632,8 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
/* TX queues buffers and DMA */
for (queue = 0; queue < tx_count; queue++) {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ size_t size;
+ void *addr;
tx_q->queue_index = queue;
tx_q->priv_data = priv;
@@ -1641,28 +1650,32 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
if (!tx_q->tx_skbuff)
goto err_dma;
- if (priv->extend_desc) {
- tx_q->dma_etx = dma_alloc_coherent(priv->device,
- DMA_TX_SIZE * sizeof(struct dma_extended_desc),
- &tx_q->dma_tx_phy,
- GFP_KERNEL);
- if (!tx_q->dma_etx)
- goto err_dma;
- } else {
- tx_q->dma_tx = dma_alloc_coherent(priv->device,
- DMA_TX_SIZE * sizeof(struct dma_desc),
- &tx_q->dma_tx_phy,
- GFP_KERNEL);
- if (!tx_q->dma_tx)
- goto err_dma;
- }
+ if (priv->extend_desc)
+ size = sizeof(struct dma_extended_desc);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ size = sizeof(struct dma_edesc);
+ else
+ size = sizeof(struct dma_desc);
+
+ size *= DMA_TX_SIZE;
+
+ addr = dma_alloc_coherent(priv->device, size,
+ &tx_q->dma_tx_phy, GFP_KERNEL);
+ if (!addr)
+ goto err_dma;
+
+ if (priv->extend_desc)
+ tx_q->dma_etx = addr;
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ tx_q->dma_entx = addr;
+ else
+ tx_q->dma_tx = addr;
}
return 0;
err_dma:
free_dma_tx_desc_resources(priv);
-
return ret;
}
@@ -1894,6 +1907,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
if (priv->extend_desc)
p = (struct dma_desc *)(tx_q->dma_etx + entry);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ p = &tx_q->dma_entx[entry].basic;
else
p = tx_q->dma_tx + entry;
@@ -1975,7 +1990,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
/* We still have pending packets, let's call for a new scheduling */
if (tx_q->dirty_tx != tx_q->cur_tx)
- mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
+ mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
@@ -1992,19 +2007,12 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
{
struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
- int i;
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
stmmac_stop_tx_dma(priv, chan);
dma_free_tx_skbufs(priv, chan);
- for (i = 0; i < DMA_TX_SIZE; i++)
- if (priv->extend_desc)
- stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
- priv->mode, (i == DMA_TX_SIZE - 1));
- else
- stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
- priv->mode, (i == DMA_TX_SIZE - 1));
+ stmmac_clear_tx_descriptors(priv, chan);
tx_q->dirty_tx = 0;
tx_q->cur_tx = 0;
tx_q->mss = 0;
@@ -2069,17 +2077,25 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
&priv->xstats, chan);
struct stmmac_channel *ch = &priv->channel[chan];
+ unsigned long flags;
if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
if (napi_schedule_prep(&ch->rx_napi)) {
- stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
+ spin_unlock_irqrestore(&ch->lock, flags);
__napi_schedule_irqoff(&ch->rx_napi);
- status |= handle_tx;
}
}
- if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use))
- napi_schedule_irqoff(&ch->tx_napi);
+ if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
+ if (napi_schedule_prep(&ch->tx_napi)) {
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ __napi_schedule_irqoff(&ch->tx_napi);
+ }
+ }
return status;
}
@@ -2274,14 +2290,14 @@ static void stmmac_tx_timer(struct timer_list *t)
ch = &priv->channel[tx_q->queue_index];
- /*
- * If NAPI is already running we can miss some events. Let's rearm
- * the timer and try again.
- */
- if (likely(napi_schedule_prep(&ch->tx_napi)))
+ if (likely(napi_schedule_prep(&ch->tx_napi))) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
__napi_schedule(&ch->tx_napi);
- else
- mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
+ }
}
/**
@@ -2633,6 +2649,14 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
if (priv->dma_cap.vlins)
stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
+ /* TBS */
+ for (chan = 0; chan < tx_cnt; chan++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+ int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
+
+ stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
+ }
+
/* Start the ball rolling... */
stmmac_start_all_dma(priv);
@@ -2658,11 +2682,11 @@ static void stmmac_hw_teardown(struct net_device *dev)
static int stmmac_open(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ int bfsize = 0;
u32 chan;
int ret;
- if (priv->hw->pcs != STMMAC_PCS_RGMII &&
- priv->hw->pcs != STMMAC_PCS_TBI &&
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI) {
ret = stmmac_init_phy(dev);
if (ret) {
@@ -2677,9 +2701,28 @@ static int stmmac_open(struct net_device *dev)
memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
priv->xstats.threshold = tc;
- priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
+ bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
+ if (bfsize < 0)
+ bfsize = 0;
+
+ if (bfsize < BUF_SIZE_16KiB)
+ bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
+
+ priv->dma_buf_sz = bfsize;
+ buf_sz = bfsize;
+
priv->rx_copybreak = STMMAC_RX_COPYBREAK;
+ /* Earlier check for TBS */
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+ int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
+
+ tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
+ if (stmmac_enable_tbs(priv, priv->ioaddr, tbs_en, chan))
+ tx_q->tbs &= ~STMMAC_TBS_AVAIL;
+ }
+
ret = alloc_dma_desc_resources(priv);
if (ret < 0) {
netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
@@ -2828,7 +2871,11 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
tag = skb_vlan_tag_get(skb);
- p = tx_q->dma_tx + tx_q->cur_tx;
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ p = &tx_q->dma_entx[tx_q->cur_tx].basic;
+ else
+ p = &tx_q->dma_tx[tx_q->cur_tx];
+
if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
return false;
@@ -2863,7 +2910,11 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
- desc = tx_q->dma_tx + tx_q->cur_tx;
+
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
+ else
+ desc = &tx_q->dma_tx[tx_q->cur_tx];
curr_addr = des + (total_len - tmp_len);
if (priv->dma_cap.addr64 <= 32)
@@ -2914,13 +2965,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dma_desc *desc, *first, *mss_desc = NULL;
struct stmmac_priv *priv = netdev_priv(dev);
+ int desc_size, tmp_pay_len = 0, first_tx;
int nfrags = skb_shinfo(skb)->nr_frags;
u32 queue = skb_get_queue_mapping(skb);
unsigned int first_entry, tx_packets;
- int tmp_pay_len = 0, first_tx;
struct stmmac_tx_queue *tx_q;
- u8 proto_hdr_len, hdr;
bool has_vlan, set_ic;
+ u8 proto_hdr_len, hdr;
u32 pay_len, mss;
dma_addr_t des;
int i;
@@ -2957,7 +3008,11 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
/* set new MSS value if needed */
if (mss != tx_q->mss) {
- mss_desc = tx_q->dma_tx + tx_q->cur_tx;
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
+ else
+ mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
+
stmmac_set_mss(priv, mss_desc, mss);
tx_q->mss = mss;
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
@@ -2977,7 +3032,10 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
first_entry = tx_q->cur_tx;
WARN_ON(tx_q->tx_skbuff[first_entry]);
- desc = tx_q->dma_tx + first_entry;
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[first_entry].basic;
+ else
+ desc = &tx_q->dma_tx[first_entry];
first = desc;
if (has_vlan)
@@ -3049,12 +3107,14 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
set_ic = false;
if (set_ic) {
- desc = &tx_q->dma_tx[tx_q->cur_tx];
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
+ else
+ desc = &tx_q->dma_tx[tx_q->cur_tx];
+
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, desc);
priv->xstats.tx_set_ic_bit++;
- } else {
- stmmac_tx_timer_arm(priv, queue);
}
/* We've used all descriptors we need for this skb, however,
@@ -3114,17 +3174,20 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
tx_q->cur_tx, first, nfrags);
-
- stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
-
pr_info(">>> frame to be transmitted: ");
print_pkt(skb->data, skb_headlen(skb));
}
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
- tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc_size = sizeof(struct dma_edesc);
+ else
+ desc_size = sizeof(struct dma_desc);
+
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
+ stmmac_tx_timer_arm(priv, queue);
return NETDEV_TX_OK;
@@ -3152,10 +3215,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
u32 queue = skb_get_queue_mapping(skb);
int nfrags = skb_shinfo(skb)->nr_frags;
int gso = skb_shinfo(skb)->gso_type;
+ struct dma_edesc *tbs_desc = NULL;
+ int entry, desc_size, first_tx;
struct dma_desc *desc, *first;
struct stmmac_tx_queue *tx_q;
bool has_vlan, set_ic;
- int entry, first_tx;
dma_addr_t des;
tx_q = &priv->tx_queue[queue];
@@ -3195,6 +3259,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (likely(priv->extend_desc))
desc = (struct dma_desc *)(tx_q->dma_etx + entry);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[entry].basic;
else
desc = tx_q->dma_tx + entry;
@@ -3224,6 +3290,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (likely(priv->extend_desc))
desc = (struct dma_desc *)(tx_q->dma_etx + entry);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[entry].basic;
else
desc = tx_q->dma_tx + entry;
@@ -3270,14 +3338,14 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (set_ic) {
if (likely(priv->extend_desc))
desc = &tx_q->dma_etx[entry].basic;
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[entry].basic;
else
desc = &tx_q->dma_tx[entry];
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, desc);
priv->xstats.tx_set_ic_bit++;
- } else {
- stmmac_tx_timer_arm(priv, queue);
}
/* We've used all descriptors we need for this skb, however,
@@ -3289,20 +3357,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->cur_tx = entry;
if (netif_msg_pktdata(priv)) {
- void *tx_head;
-
netdev_dbg(priv->dev,
"%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
entry, first, nfrags);
- if (priv->extend_desc)
- tx_head = (void *)tx_q->dma_etx;
- else
- tx_head = (void *)tx_q->dma_tx;
-
- stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
-
netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
print_pkt(skb->data, skb->len);
}
@@ -3348,12 +3407,19 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
/* Prepare the first descriptor setting the OWN bit too */
stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
- csum_insertion, priv->mode, 1, last_segment,
+ csum_insertion, priv->mode, 0, last_segment,
skb->len);
- } else {
- stmmac_set_tx_owner(priv, first);
}
+ if (tx_q->tbs & STMMAC_TBS_EN) {
+ struct timespec64 ts = ns_to_timespec64(skb->tstamp);
+
+ tbs_desc = &tx_q->dma_entx[first_entry];
+ stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
+ }
+
+ stmmac_set_tx_owner(priv, first);
+
/* The own bit must be the latest setting done when prepare the
* descriptor and then barrier is needed to make sure that
* all is coherent before granting the DMA engine.
@@ -3364,8 +3430,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
stmmac_enable_dma_transmission(priv, priv->ioaddr);
- tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
+ if (likely(priv->extend_desc))
+ desc_size = sizeof(struct dma_extended_desc);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc_size = sizeof(struct dma_edesc);
+ else
+ desc_size = sizeof(struct dma_desc);
+
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
+ stmmac_tx_timer_arm(priv, queue);
return NETDEV_TX_OK;
@@ -3646,8 +3720,9 @@ read_again:
* feature is always disabled and packets need to be
* stripped manually.
*/
- if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
- unlikely(status != llc_snap)) {
+ if (likely(!(status & rx_not_ls)) &&
+ (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
+ unlikely(status != llc_snap))) {
if (buf2_len)
buf2_len -= ETH_FCS_LEN;
else
@@ -3751,8 +3826,14 @@ static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
priv->xstats.napi_poll++;
work_done = stmmac_rx(priv, budget, chan);
- if (work_done < budget && napi_complete_done(napi, work_done))
- stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+
return work_done;
}
@@ -3761,7 +3842,6 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
struct stmmac_channel *ch =
container_of(napi, struct stmmac_channel, tx_napi);
struct stmmac_priv *priv = ch->priv_data;
- struct stmmac_tx_queue *tx_q;
u32 chan = ch->index;
int work_done;
@@ -3770,15 +3850,12 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
work_done = min(work_done, budget);
- if (work_done < budget)
- napi_complete_done(napi, work_done);
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ unsigned long flags;
- /* Force transmission restart */
- tx_q = &priv->tx_queue[chan];
- if (tx_q->cur_tx != tx_q->dirty_tx) {
- stmmac_enable_dma_transmission(priv, priv->ioaddr);
- stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr,
- chan);
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
}
return work_done;
@@ -3792,7 +3869,7 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
* netdev structure and arrange for the device to be reset to a sane state
* in order to transmit a new packet.
*/
-static void stmmac_tx_timeout(struct net_device *dev)
+static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct stmmac_priv *priv = netdev_priv(dev);
@@ -3829,12 +3906,24 @@ static void stmmac_set_rx_mode(struct net_device *dev)
static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ int txfifosz = priv->plat->tx_fifo_size;
+
+ if (txfifosz == 0)
+ txfifosz = priv->dma_cap.tx_fifo_size;
+
+ txfifosz /= priv->plat->tx_queues_to_use;
if (netif_running(dev)) {
netdev_err(priv->dev, "must be stopped to change its MTU\n");
return -EBUSY;
}
+ new_mtu = STMMAC_ALIGN(new_mtu);
+
+ /* If condition true, FIFO is too small or MTU too large */
+ if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
+ return -EINVAL;
+
dev->mtu = new_mtu;
netdev_update_features(dev);
@@ -4066,6 +4155,10 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
priv, priv, true);
case TC_SETUP_QDISC_CBS:
return stmmac_tc_setup_cbs(priv, priv, type_data);
+ case TC_SETUP_QDISC_TAPRIO:
+ return stmmac_tc_setup_taprio(priv, priv, type_data);
+ case TC_SETUP_QDISC_ETF:
+ return stmmac_tc_setup_etf(priv, priv, type_data);
default:
return -EOPNOTSUPP;
}
@@ -4169,7 +4262,7 @@ static int stmmac_rings_status_show(struct seq_file *seq, void *v)
seq_printf(seq, "Extended descriptor ring:\n");
sysfs_display_ring((void *)tx_q->dma_etx,
DMA_TX_SIZE, 1, seq);
- } else {
+ } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
seq_printf(seq, "Descriptor ring:\n");
sysfs_display_ring((void *)tx_q->dma_tx,
DMA_TX_SIZE, 0, seq);
@@ -4238,13 +4331,76 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
priv->dma_cap.number_rx_channel);
seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
priv->dma_cap.number_tx_channel);
+ seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
+ priv->dma_cap.number_rx_queues);
+ seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
+ priv->dma_cap.number_tx_queues);
seq_printf(seq, "\tEnhanced descriptors: %s\n",
(priv->dma_cap.enh_desc) ? "Y" : "N");
-
+ seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
+ seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
+ seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
+ seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
+ seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
+ priv->dma_cap.pps_out_num);
+ seq_printf(seq, "\tSafety Features: %s\n",
+ priv->dma_cap.asp ? "Y" : "N");
+ seq_printf(seq, "\tFlexible RX Parser: %s\n",
+ priv->dma_cap.frpsel ? "Y" : "N");
+ seq_printf(seq, "\tEnhanced Addressing: %d\n",
+ priv->dma_cap.addr64);
+ seq_printf(seq, "\tReceive Side Scaling: %s\n",
+ priv->dma_cap.rssen ? "Y" : "N");
+ seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
+ priv->dma_cap.vlhash ? "Y" : "N");
+ seq_printf(seq, "\tSplit Header: %s\n",
+ priv->dma_cap.sphen ? "Y" : "N");
+ seq_printf(seq, "\tVLAN TX Insertion: %s\n",
+ priv->dma_cap.vlins ? "Y" : "N");
+ seq_printf(seq, "\tDouble VLAN: %s\n",
+ priv->dma_cap.dvlan ? "Y" : "N");
+ seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
+ priv->dma_cap.l3l4fnum);
+ seq_printf(seq, "\tARP Offloading: %s\n",
+ priv->dma_cap.arpoffsel ? "Y" : "N");
+ seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
+ priv->dma_cap.estsel ? "Y" : "N");
+ seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
+ priv->dma_cap.fpesel ? "Y" : "N");
+ seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
+ priv->dma_cap.tbssel ? "Y" : "N");
return 0;
}
DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
+/* Use network device events to rename debugfs file entries.
+ */
+static int stmmac_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (dev->netdev_ops != &stmmac_netdev_ops)
+ goto done;
+
+ switch (event) {
+ case NETDEV_CHANGENAME:
+ if (priv->dbgfs_dir)
+ priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
+ priv->dbgfs_dir,
+ stmmac_fs_dir,
+ dev->name);
+ break;
+ }
+done:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block stmmac_notifier = {
+ .notifier_call = stmmac_device_event,
+};
+
static void stmmac_init_fs(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
@@ -4259,12 +4415,15 @@ static void stmmac_init_fs(struct net_device *dev)
/* Entry to report the DMA HW features */
debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
&stmmac_dma_cap_fops);
+
+ register_netdevice_notifier(&stmmac_notifier);
}
static void stmmac_exit_fs(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ unregister_netdevice_notifier(&stmmac_notifier);
debugfs_remove_recursive(priv->dbgfs_dir);
}
#endif /* CONFIG_DEBUG_FS */
@@ -4685,6 +4844,7 @@ int stmmac_dvr_probe(struct device *device,
for (queue = 0; queue < maxq; queue++) {
struct stmmac_channel *ch = &priv->channel[queue];
+ spin_lock_init(&ch->lock);
ch->priv_data = priv;
ch->index = queue;
@@ -4714,8 +4874,7 @@ int stmmac_dvr_probe(struct device *device,
stmmac_check_pcs_mode(priv);
- if (priv->hw->pcs != STMMAC_PCS_RGMII &&
- priv->hw->pcs != STMMAC_PCS_TBI &&
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI) {
/* MDIO bus Registration */
ret = stmmac_mdio_register(ndev);
@@ -4749,8 +4908,7 @@ int stmmac_dvr_probe(struct device *device,
error_netdev_register:
phylink_destroy(priv->phylink);
error_phy_setup:
- if (priv->hw->pcs != STMMAC_PCS_RGMII &&
- priv->hw->pcs != STMMAC_PCS_TBI &&
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI)
stmmac_mdio_unregister(ndev);
error_mdio_register:
@@ -4795,8 +4953,7 @@ int stmmac_dvr_remove(struct device *dev)
reset_control_assert(priv->plat->stmmac_rst);
clk_disable_unprepare(priv->plat->pclk);
clk_disable_unprepare(priv->plat->stmmac_clk);
- if (priv->hw->pcs != STMMAC_PCS_RGMII &&
- priv->hw->pcs != STMMAC_PCS_TBI &&
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI)
stmmac_mdio_unregister(ndev);
destroy_workqueue(priv->wq);
@@ -4817,6 +4974,7 @@ int stmmac_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
+ u32 chan;
if (!ndev || !netif_running(ndev))
return 0;
@@ -4830,6 +4988,9 @@ int stmmac_suspend(struct device *dev)
stmmac_disable_all_queues(priv);
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+ del_timer_sync(&priv->tx_queue[chan].txtimer);
+
/* Stop TX/RX DMA */
stmmac_stop_all_dma(priv);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 8237dbc3e991..fe2c9fa6a71c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -65,7 +65,6 @@ static void common_default_data(struct plat_stmmacenet_data *plat)
plat->force_sf_dma_mode = 1;
plat->mdio_bus_data->needs_reset = true;
- plat->mdio_bus_data->phy_mask = 0;
/* Set default value for multicast hash bins */
plat->multicast_filter_bins = HASH_TABLE_SIZE;
@@ -96,7 +95,7 @@ static int stmmac_default_data(struct pci_dev *pdev,
plat->bus_id = 1;
plat->phy_addr = 0;
- plat->interface = PHY_INTERFACE_MODE_GMII;
+ plat->phy_interface = PHY_INTERFACE_MODE_GMII;
plat->dma_cfg->pbl = 32;
plat->dma_cfg->pblx8 = true;
@@ -154,8 +153,6 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->tx_queues_cfg[6].weight = 0x0F;
plat->tx_queues_cfg[7].weight = 0x10;
- plat->mdio_bus_data->phy_mask = 0;
-
plat->dma_cfg->pbl = 32;
plat->dma_cfg->pblx8 = true;
plat->dma_cfg->fixed_burst = 0;
@@ -220,7 +217,8 @@ static int ehl_sgmii_data(struct pci_dev *pdev,
{
plat->bus_id = 1;
plat->phy_addr = 0;
- plat->interface = PHY_INTERFACE_MODE_SGMII;
+ plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+
return ehl_common_data(pdev, plat);
}
@@ -233,7 +231,8 @@ static int ehl_rgmii_data(struct pci_dev *pdev,
{
plat->bus_id = 1;
plat->phy_addr = 0;
- plat->interface = PHY_INTERFACE_MODE_RGMII;
+ plat->phy_interface = PHY_INTERFACE_MODE_RGMII;
+
return ehl_common_data(pdev, plat);
}
@@ -261,7 +260,7 @@ static int tgl_sgmii_data(struct pci_dev *pdev,
{
plat->bus_id = 1;
plat->phy_addr = 0;
- plat->interface = PHY_INTERFACE_MODE_SGMII;
+ plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
return tgl_common_data(pdev, plat);
}
@@ -361,7 +360,7 @@ static int quark_default_data(struct pci_dev *pdev,
plat->bus_id = pci_dev_id(pdev);
plat->phy_addr = ret;
- plat->interface = PHY_INTERFACE_MODE_RMII;
+ plat->phy_interface = PHY_INTERFACE_MODE_RMII;
plat->dma_cfg->pbl = 16;
plat->dma_cfg->pblx8 = true;
@@ -386,8 +385,6 @@ static int snps_gmac5_default_data(struct pci_dev *pdev,
plat->tso_en = 1;
plat->pmt = 1;
- plat->mdio_bus_data->phy_mask = 0;
-
/* Set default value for multicast hash bins */
plat->multicast_filter_bins = HASH_TABLE_SIZE;
@@ -406,6 +403,8 @@ static int snps_gmac5_default_data(struct pci_dev *pdev,
plat->tx_queues_cfg[i].use_prio = false;
plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
plat->tx_queues_cfg[i].weight = 25;
+ if (i > 0)
+ plat->tx_queues_cfg[i].tbs_en = 1;
}
plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
@@ -418,7 +417,7 @@ static int snps_gmac5_default_data(struct pci_dev *pdev,
plat->bus_id = 1;
plat->phy_addr = -1;
- plat->interface = PHY_INTERFACE_MODE_GMII;
+ plat->phy_interface = PHY_INTERFACE_MODE_GMII;
plat->dma_cfg->pbl = 32;
plat->dma_cfg->pblx8 = true;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index bedaff0c13bd..d10ac54bf385 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -320,7 +320,7 @@ out:
static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
struct device_node *np, struct device *dev)
{
- bool mdio = true;
+ bool mdio = !of_phy_is_fixed_link(np);
static const struct of_device_id need_mdio_ids[] = {
{ .compatible = "snps,dwc-qos-ethernet-4.10" },
{},
@@ -412,9 +412,9 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
*mac = NULL;
}
- rc = of_get_phy_mode(np, &plat->phy_interface);
- if (rc)
- return ERR_PTR(rc);
+ plat->phy_interface = device_get_phy_mode(&pdev->dev);
+ if (plat->phy_interface < 0)
+ return ERR_PTR(plat->phy_interface);
plat->interface = stmmac_of_get_mac_mode(np);
if (plat->interface < 0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index f3d8b9336b8e..2aba2673d6c3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -14,6 +14,7 @@
#include <linux/phy.h>
#include <linux/udp.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <net/tc_act/tc_gact.h>
@@ -50,6 +51,7 @@ struct stmmac_packet_attrs {
u8 id;
int sarc;
u16 queue_mapping;
+ u64 timestamp;
};
static u8 stmmac_test_next_id;
@@ -80,7 +82,7 @@ static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv,
if (attr->max_size && (attr->max_size > size))
size = attr->max_size;
- skb = netdev_alloc_skb_ip_align(priv->dev, size);
+ skb = netdev_alloc_skb(priv->dev, size);
if (!skb)
return NULL;
@@ -208,6 +210,9 @@ static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv,
skb->pkt_type = PACKET_HOST;
skb->dev = priv->dev;
+ if (attr->timestamp)
+ skb->tstamp = ns_to_ktime(attr->timestamp);
+
return skb;
}
@@ -244,6 +249,8 @@ static int stmmac_test_loopback_validate(struct sk_buff *skb,
struct net_device *orig_ndev)
{
struct stmmac_test_priv *tpriv = pt->af_packet_priv;
+ unsigned char *src = tpriv->packet->src;
+ unsigned char *dst = tpriv->packet->dst;
struct stmmachdr *shdr;
struct ethhdr *ehdr;
struct udphdr *uhdr;
@@ -260,15 +267,15 @@ static int stmmac_test_loopback_validate(struct sk_buff *skb,
goto out;
ehdr = (struct ethhdr *)skb_mac_header(skb);
- if (tpriv->packet->dst) {
- if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst))
+ if (dst) {
+ if (!ether_addr_equal_unaligned(ehdr->h_dest, dst))
goto out;
}
if (tpriv->packet->sarc) {
- if (!ether_addr_equal(ehdr->h_source, ehdr->h_dest))
+ if (!ether_addr_equal_unaligned(ehdr->h_source, ehdr->h_dest))
goto out;
- } else if (tpriv->packet->src) {
- if (!ether_addr_equal(ehdr->h_source, tpriv->packet->src))
+ } else if (src) {
+ if (!ether_addr_equal_unaligned(ehdr->h_source, src))
goto out;
}
@@ -339,8 +346,7 @@ static int __stmmac_test_loopback(struct stmmac_priv *priv,
goto cleanup;
}
- skb_set_queue_mapping(skb, attr->queue_mapping);
- ret = dev_queue_xmit(skb);
+ ret = dev_direct_xmit(skb, attr->queue_mapping);
if (ret)
goto cleanup;
@@ -624,6 +630,8 @@ static int stmmac_test_mcfilt(struct stmmac_priv *priv)
return -EOPNOTSUPP;
if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries)
return -EOPNOTSUPP;
+ if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
+ return -EOPNOTSUPP;
while (--tries) {
/* We only need to check the mc_addr for collisions */
@@ -666,6 +674,8 @@ static int stmmac_test_ucfilt(struct stmmac_priv *priv)
if (stmmac_filter_check(priv))
return -EOPNOTSUPP;
+ if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries)
+ return -EOPNOTSUPP;
if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
return -EOPNOTSUPP;
@@ -710,7 +720,7 @@ static int stmmac_test_flowctrl_validate(struct sk_buff *skb,
struct ethhdr *ehdr;
ehdr = (struct ethhdr *)skb_mac_header(skb);
- if (!ether_addr_equal(ehdr->h_source, orig_ndev->dev_addr))
+ if (!ether_addr_equal_unaligned(ehdr->h_source, orig_ndev->dev_addr))
goto out;
if (ehdr->h_proto != htons(ETH_P_PAUSE))
goto out;
@@ -847,12 +857,16 @@ static int stmmac_test_vlan_validate(struct sk_buff *skb,
if (tpriv->vlan_id) {
if (skb->vlan_proto != htons(proto))
goto out;
- if (skb->vlan_tci != tpriv->vlan_id)
+ if (skb->vlan_tci != tpriv->vlan_id) {
+ /* Means filter did not work. */
+ tpriv->ok = false;
+ complete(&tpriv->comp);
goto out;
+ }
}
ehdr = (struct ethhdr *)skb_mac_header(skb);
- if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst))
+ if (!ether_addr_equal_unaligned(ehdr->h_dest, tpriv->packet->dst))
goto out;
ihdr = ip_hdr(skb);
@@ -922,8 +936,7 @@ static int __stmmac_test_vlanfilt(struct stmmac_priv *priv)
goto vlan_del;
}
- skb_set_queue_mapping(skb, 0);
- ret = dev_queue_xmit(skb);
+ ret = dev_direct_xmit(skb, 0);
if (ret)
goto vlan_del;
@@ -961,6 +974,9 @@ static int stmmac_test_vlanfilt_perfect(struct stmmac_priv *priv)
{
int ret, prev_cap = priv->dma_cap.vlhash;
+ if (!(priv->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
+ return -EOPNOTSUPP;
+
priv->dma_cap.vlhash = 0;
ret = __stmmac_test_vlanfilt(priv);
priv->dma_cap.vlhash = prev_cap;
@@ -1014,8 +1030,7 @@ static int __stmmac_test_dvlanfilt(struct stmmac_priv *priv)
goto vlan_del;
}
- skb_set_queue_mapping(skb, 0);
- ret = dev_queue_xmit(skb);
+ ret = dev_direct_xmit(skb, 0);
if (ret)
goto vlan_del;
@@ -1053,6 +1068,9 @@ static int stmmac_test_dvlanfilt_perfect(struct stmmac_priv *priv)
{
int ret, prev_cap = priv->dma_cap.vlhash;
+ if (!(priv->dev->features & NETIF_F_HW_VLAN_STAG_FILTER))
+ return -EOPNOTSUPP;
+
priv->dma_cap.vlhash = 0;
ret = __stmmac_test_dvlanfilt(priv);
priv->dma_cap.vlhash = prev_cap;
@@ -1282,8 +1300,7 @@ static int stmmac_test_vlanoff_common(struct stmmac_priv *priv, bool svlan)
__vlan_hwaccel_put_tag(skb, htons(proto), tpriv->vlan_id);
skb->protocol = htons(proto);
- skb_set_queue_mapping(skb, 0);
- ret = dev_queue_xmit(skb);
+ ret = dev_direct_xmit(skb, 0);
if (ret)
goto vlan_del;
@@ -1319,16 +1336,19 @@ static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src,
struct stmmac_packet_attrs attr = { };
struct flow_dissector *dissector;
struct flow_cls_offload *cls;
+ int ret, old_enable = 0;
struct flow_rule *rule;
- int ret;
if (!tc_can_offload(priv->dev))
return -EOPNOTSUPP;
if (!priv->dma_cap.l3l4fnum)
return -EOPNOTSUPP;
- if (priv->rss.enable)
+ if (priv->rss.enable) {
+ old_enable = priv->rss.enable;
+ priv->rss.enable = false;
stmmac_rss_configure(priv, priv->hw, NULL,
priv->plat->rx_queues_to_use);
+ }
dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
if (!dissector) {
@@ -1395,7 +1415,8 @@ cleanup_cls:
cleanup_dissector:
kfree(dissector);
cleanup_rss:
- if (priv->rss.enable) {
+ if (old_enable) {
+ priv->rss.enable = old_enable;
stmmac_rss_configure(priv, priv->hw, &priv->rss,
priv->plat->rx_queues_to_use);
}
@@ -1440,16 +1461,19 @@ static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src,
struct stmmac_packet_attrs attr = { };
struct flow_dissector *dissector;
struct flow_cls_offload *cls;
+ int ret, old_enable = 0;
struct flow_rule *rule;
- int ret;
if (!tc_can_offload(priv->dev))
return -EOPNOTSUPP;
if (!priv->dma_cap.l3l4fnum)
return -EOPNOTSUPP;
- if (priv->rss.enable)
+ if (priv->rss.enable) {
+ old_enable = priv->rss.enable;
+ priv->rss.enable = false;
stmmac_rss_configure(priv, priv->hw, NULL,
priv->plat->rx_queues_to_use);
+ }
dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
if (!dissector) {
@@ -1521,7 +1545,8 @@ cleanup_cls:
cleanup_dissector:
kfree(dissector);
cleanup_rss:
- if (priv->rss.enable) {
+ if (old_enable) {
+ priv->rss.enable = old_enable;
stmmac_rss_configure(priv, priv->hw, &priv->rss,
priv->plat->rx_queues_to_use);
}
@@ -1574,7 +1599,7 @@ static int stmmac_test_arp_validate(struct sk_buff *skb,
struct arphdr *ahdr;
ehdr = (struct ethhdr *)skb_mac_header(skb);
- if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->src))
+ if (!ether_addr_equal_unaligned(ehdr->h_dest, tpriv->packet->src))
goto out;
ahdr = arp_hdr(skb);
@@ -1635,8 +1660,7 @@ static int stmmac_test_arpoffload(struct stmmac_priv *priv)
if (ret)
goto cleanup;
- skb_set_queue_mapping(skb, 0);
- ret = dev_queue_xmit(skb);
+ ret = dev_direct_xmit(skb, 0);
if (ret)
goto cleanup_promisc;
@@ -1724,6 +1748,68 @@ static int stmmac_test_sph(struct stmmac_priv *priv)
return 0;
}
+static int stmmac_test_tbs(struct stmmac_priv *priv)
+{
+#define STMMAC_TBS_LT_OFFSET (500 * 1000 * 1000) /* 500 ms*/
+ struct stmmac_packet_attrs attr = { };
+ struct tc_etf_qopt_offload qopt;
+ u64 start_time, curr_time = 0;
+ unsigned long flags;
+ int ret, i;
+
+ if (!priv->hwts_tx_en)
+ return -EOPNOTSUPP;
+
+ /* Find first TBS enabled Queue, if any */
+ for (i = 0; i < priv->plat->tx_queues_to_use; i++)
+ if (priv->tx_queue[i].tbs & STMMAC_TBS_AVAIL)
+ break;
+
+ if (i >= priv->plat->tx_queues_to_use)
+ return -EOPNOTSUPP;
+
+ qopt.enable = true;
+ qopt.queue = i;
+
+ ret = stmmac_tc_setup_etf(priv, priv, &qopt);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&priv->ptp_lock, flags);
+ stmmac_get_systime(priv, priv->ptpaddr, &curr_time);
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
+
+ if (!curr_time) {
+ ret = -EOPNOTSUPP;
+ goto fail_disable;
+ }
+
+ start_time = curr_time;
+ curr_time += STMMAC_TBS_LT_OFFSET;
+
+ attr.dst = priv->dev->dev_addr;
+ attr.timestamp = curr_time;
+ attr.timeout = nsecs_to_jiffies(2 * STMMAC_TBS_LT_OFFSET);
+ attr.queue_mapping = i;
+
+ ret = __stmmac_test_loopback(priv, &attr);
+ if (ret)
+ goto fail_disable;
+
+ /* Check if expected time has elapsed */
+ spin_lock_irqsave(&priv->ptp_lock, flags);
+ stmmac_get_systime(priv, priv->ptpaddr, &curr_time);
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
+
+ if ((curr_time - start_time) < STMMAC_TBS_LT_OFFSET)
+ ret = -EINVAL;
+
+fail_disable:
+ qopt.enable = false;
+ stmmac_tc_setup_etf(priv, priv, &qopt);
+ return ret;
+}
+
#define STMMAC_LOOPBACK_NONE 0
#define STMMAC_LOOPBACK_MAC 1
#define STMMAC_LOOPBACK_PHY 2
@@ -1857,6 +1943,10 @@ static const struct stmmac_test {
.name = "Split Header ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_sph,
+ }, {
+ .name = "TBS (ETF Scheduler) ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_tbs,
},
};
@@ -1865,7 +1955,6 @@ void stmmac_selftest_run(struct net_device *dev,
{
struct stmmac_priv *priv = netdev_priv(dev);
int count = stmmac_selftest_get_count(priv);
- int carrier = netif_carrier_ok(dev);
int i, ret;
memset(buf, 0, sizeof(*buf) * count);
@@ -1875,15 +1964,12 @@ void stmmac_selftest_run(struct net_device *dev,
netdev_err(priv->dev, "Only offline tests are supported\n");
etest->flags |= ETH_TEST_FL_FAILED;
return;
- } else if (!carrier) {
+ } else if (!netif_carrier_ok(dev)) {
netdev_err(priv->dev, "You need valid Link to execute tests\n");
etest->flags |= ETH_TEST_FL_FAILED;
return;
}
- /* We don't want extra traffic */
- netif_carrier_off(dev);
-
/* Wait for queues drain */
msleep(200);
@@ -1938,10 +2024,6 @@ void stmmac_selftest_run(struct net_device *dev,
break;
}
}
-
- /* Restart everything */
- if (carrier)
- netif_carrier_on(dev);
}
void stmmac_selftest_get_strings(struct stmmac_priv *priv, u8 *data)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 7d972e0fd2b0..7a01dee2f9a8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -577,6 +577,10 @@ static int tc_setup_cls(struct stmmac_priv *priv,
{
int ret = 0;
+ /* When RSS is enabled, the filtering will be bypassed */
+ if (priv->rss.enable)
+ return -EBUSY;
+
switch (cls->command) {
case FLOW_CLS_REPLACE:
ret = tc_add_flow(priv, cls);
@@ -591,9 +595,167 @@ static int tc_setup_cls(struct stmmac_priv *priv,
return ret;
}
+static int tc_setup_taprio(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
+ struct plat_stmmacenet_data *plat = priv->plat;
+ struct timespec64 time;
+ bool fpe = false;
+ int i, ret = 0;
+ u64 ctr;
+
+ if (!priv->dma_cap.estsel)
+ return -EOPNOTSUPP;
+
+ switch (wid) {
+ case 0x1:
+ wid = 16;
+ break;
+ case 0x2:
+ wid = 20;
+ break;
+ case 0x3:
+ wid = 24;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ switch (dep) {
+ case 0x1:
+ dep = 64;
+ break;
+ case 0x2:
+ dep = 128;
+ break;
+ case 0x3:
+ dep = 256;
+ break;
+ case 0x4:
+ dep = 512;
+ break;
+ case 0x5:
+ dep = 1024;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (!qopt->enable)
+ goto disable;
+ if (qopt->num_entries >= dep)
+ return -EINVAL;
+ if (!qopt->base_time)
+ return -ERANGE;
+ if (!qopt->cycle_time)
+ return -ERANGE;
+
+ if (!plat->est) {
+ plat->est = devm_kzalloc(priv->device, sizeof(*plat->est),
+ GFP_KERNEL);
+ if (!plat->est)
+ return -ENOMEM;
+ } else {
+ memset(plat->est, 0, sizeof(*plat->est));
+ }
+
+ size = qopt->num_entries;
+
+ priv->plat->est->gcl_size = size;
+ priv->plat->est->enable = qopt->enable;
+
+ for (i = 0; i < size; i++) {
+ s64 delta_ns = qopt->entries[i].interval;
+ u32 gates = qopt->entries[i].gate_mask;
+
+ if (delta_ns > GENMASK(wid, 0))
+ return -ERANGE;
+ if (gates > GENMASK(31 - wid, 0))
+ return -ERANGE;
+
+ switch (qopt->entries[i].command) {
+ case TC_TAPRIO_CMD_SET_GATES:
+ if (fpe)
+ return -EINVAL;
+ break;
+ case TC_TAPRIO_CMD_SET_AND_HOLD:
+ gates |= BIT(0);
+ fpe = true;
+ break;
+ case TC_TAPRIO_CMD_SET_AND_RELEASE:
+ gates &= ~BIT(0);
+ fpe = true;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ priv->plat->est->gcl[i] = delta_ns | (gates << wid);
+ }
+
+ /* Adjust for real system time */
+ time = ktime_to_timespec64(qopt->base_time);
+ priv->plat->est->btr[0] = (u32)time.tv_nsec;
+ priv->plat->est->btr[1] = (u32)time.tv_sec;
+
+ ctr = qopt->cycle_time;
+ priv->plat->est->ctr[0] = do_div(ctr, NSEC_PER_SEC);
+ priv->plat->est->ctr[1] = (u32)ctr;
+
+ if (fpe && !priv->dma_cap.fpesel)
+ return -EOPNOTSUPP;
+
+ ret = stmmac_fpe_configure(priv, priv->ioaddr,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use, fpe);
+ if (ret && fpe) {
+ netdev_err(priv->dev, "failed to enable Frame Preemption\n");
+ return ret;
+ }
+
+ ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+ priv->plat->clk_ptp_rate);
+ if (ret) {
+ netdev_err(priv->dev, "failed to configure EST\n");
+ goto disable;
+ }
+
+ netdev_info(priv->dev, "configured EST\n");
+ return 0;
+
+disable:
+ priv->plat->est->enable = false;
+ stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+ priv->plat->clk_ptp_rate);
+ return ret;
+}
+
+static int tc_setup_etf(struct stmmac_priv *priv,
+ struct tc_etf_qopt_offload *qopt)
+{
+ if (!priv->dma_cap.tbssel)
+ return -EOPNOTSUPP;
+ if (qopt->queue >= priv->plat->tx_queues_to_use)
+ return -EINVAL;
+ if (!(priv->tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL))
+ return -EINVAL;
+
+ if (qopt->enable)
+ priv->tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN;
+ else
+ priv->tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN;
+
+ netdev_info(priv->dev, "%s ETF for Queue %d\n",
+ qopt->enable ? "enabled" : "disabled", qopt->queue);
+ return 0;
+}
+
const struct stmmac_tc_ops dwmac510_tc_ops = {
.init = tc_init,
.setup_cls_u32 = tc_setup_cls_u32,
.setup_cbs = tc_setup_cbs,
.setup_cls = tc_setup_cls,
+ .setup_taprio = tc_setup_taprio,
+ .setup_etf = tc_setup_etf,
};
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index c91876f8c536..6ec9163e232c 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -2666,7 +2666,7 @@ static void cas_netpoll(struct net_device *dev)
}
#endif
-static void cas_tx_timeout(struct net_device *dev)
+static void cas_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct cas *cp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index f5fd1f3c07cc..9a5004f674c7 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6517,7 +6517,7 @@ static void niu_reset_task(struct work_struct *work)
spin_unlock_irqrestore(&np->lock, flags);
}
-static void niu_tx_timeout(struct net_device *dev)
+static void niu_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct niu *np = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index e9b757b03b56..c5add0b45eed 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -941,7 +941,7 @@ static int bigmac_close(struct net_device *dev)
return 0;
}
-static void bigmac_tx_timeout(struct net_device *dev)
+static void bigmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct bigmac *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 3e7631160384..8358064fbd48 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -970,7 +970,7 @@ static void gem_poll_controller(struct net_device *dev)
}
#endif
-static void gem_tx_timeout(struct net_device *dev)
+static void gem_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct gem *gp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index d007dfeba5c3..f0fe7bb2a750 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2246,7 +2246,7 @@ static int happy_meal_close(struct net_device *dev)
#define SXD(x)
#endif
-static void happy_meal_tx_timeout(struct net_device *dev)
+static void happy_meal_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct happy_meal *hp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index 1468fa0a54e9..2102b95ec347 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -544,7 +544,7 @@ static void qe_tx_reclaim(struct sunqe *qep)
qep->tx_old = elem;
}
-static void qe_tx_timeout(struct net_device *dev)
+static void qe_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct sunqe *qep = netdev_priv(dev);
int tx_full;
diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c
index 8b94d9ad9e2b..8dc6c9ff22e1 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.c
+++ b/drivers/net/ethernet/sun/sunvnet_common.c
@@ -1223,7 +1223,7 @@ vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb,
{
struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
- struct sk_buff *segs;
+ struct sk_buff *segs, *curr, *next;
int maclen, datalen;
int status;
int gso_size, gso_type, gso_segs;
@@ -1282,11 +1282,8 @@ vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb,
skb_reset_mac_header(skb);
status = 0;
- while (segs) {
- struct sk_buff *curr = segs;
-
- segs = segs->next;
- curr->next = NULL;
+ skb_list_walk_safe(segs, curr, next) {
+ skb_mark_not_on_list(curr);
if (port->tso && curr->len > dev->mtu) {
skb_shinfo(curr)->gso_size = gso_size;
skb_shinfo(curr)->gso_type = gso_type;
@@ -1353,27 +1350,12 @@ sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev,
if (vio_version_after_eq(&port->vio, 1, 3))
localmtu -= VLAN_HLEN;
- if (skb->protocol == htons(ETH_P_IP)) {
- struct flowi4 fl4;
- struct rtable *rt = NULL;
-
- memset(&fl4, 0, sizeof(fl4));
- fl4.flowi4_oif = dev->ifindex;
- fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
- fl4.daddr = ip_hdr(skb)->daddr;
- fl4.saddr = ip_hdr(skb)->saddr;
-
- rt = ip_route_output_key(dev_net(dev), &fl4);
- if (!IS_ERR(rt)) {
- skb_dst_set(skb, &rt->dst);
- icmp_send(skb, ICMP_DEST_UNREACH,
- ICMP_FRAG_NEEDED,
- htonl(localmtu));
- }
- }
+ if (skb->protocol == htons(ETH_P_IP))
+ icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+ htonl(localmtu));
#if IS_ENABLED(CONFIG_IPV6)
else if (skb->protocol == htons(ETH_P_IPV6))
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu);
+ icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu);
#endif
goto out_dropped;
}
@@ -1539,7 +1521,7 @@ out_dropped:
}
EXPORT_SYMBOL_GPL(sunvnet_start_xmit_common);
-void sunvnet_tx_timeout_common(struct net_device *dev)
+void sunvnet_tx_timeout_common(struct net_device *dev, unsigned int txqueue)
{
/* XXX Implement me XXX */
}
diff --git a/drivers/net/ethernet/sun/sunvnet_common.h b/drivers/net/ethernet/sun/sunvnet_common.h
index 2b808d2482d6..5416a3cb9e7d 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.h
+++ b/drivers/net/ethernet/sun/sunvnet_common.h
@@ -135,7 +135,7 @@ int sunvnet_open_common(struct net_device *dev);
int sunvnet_close_common(struct net_device *dev);
void sunvnet_set_rx_mode_common(struct net_device *dev, struct vnet *vp);
int sunvnet_set_mac_addr_common(struct net_device *dev, void *p);
-void sunvnet_tx_timeout_common(struct net_device *dev);
+void sunvnet_tx_timeout_common(struct net_device *dev, unsigned int txqueue);
netdev_tx_t
sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev,
struct vnet_port *(*vnet_tx_port)
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
index a1f5a1e61040..07046a2370b3 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
@@ -689,7 +689,7 @@ static int xlgmac_close(struct net_device *netdev)
return 0;
}
-static void xlgmac_tx_timeout(struct net_device *netdev)
+static void xlgmac_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct xlgmac_pdata *pdata = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index a46f4189fde3..bf98e0fa7d8b 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -63,6 +63,7 @@ config TI_CPSW_SWITCHDEV
tristate "TI CPSW Switch Support with switchdev"
depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
depends on NET_SWITCHDEV
+ select PAGE_POOL
select TI_DAVINCI_MDIO
select MFD_SYSCON
select REGMAP
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index d34df8e5cf94..ecf776ad8689 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -5,6 +5,7 @@
obj-$(CONFIG_TI_CPSW) += cpsw-common.o
obj-$(CONFIG_TI_DAVINCI_EMAC) += cpsw-common.o
+obj-$(CONFIG_TI_CPSW_SWITCHDEV) += cpsw-common.o
obj-$(CONFIG_TLAN) += tlan.o
obj-$(CONFIG_CPMAC) += cpmac.o
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 3a655a4dc10e..a530afe3ce12 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -797,7 +797,7 @@ static irqreturn_t cpmac_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void cpmac_tx_timeout(struct net_device *dev)
+static void cpmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct cpmac_priv *priv = netdev_priv(dev);
@@ -816,16 +816,6 @@ static void cpmac_tx_timeout(struct net_device *dev)
netif_tx_wake_all_queues(priv->dev);
}
-static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- if (!(netif_running(dev)))
- return -EINVAL;
- if (!dev->phydev)
- return -EINVAL;
-
- return phy_mii_ioctl(dev->phydev, ifr, cmd);
-}
-
static void cpmac_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *ring)
{
@@ -1054,7 +1044,7 @@ static const struct net_device_ops cpmac_netdev_ops = {
.ndo_start_xmit = cpmac_start_xmit,
.ndo_tx_timeout = cpmac_tx_timeout,
.ndo_set_rx_mode = cpmac_set_multicast_list,
- .ndo_do_ioctl = cpmac_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
};
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 707d5eb480ce..97a058ca60ac 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -272,7 +272,7 @@ void soft_reset(const char *module, void __iomem *reg)
WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
}
-void cpsw_ndo_tx_timeout(struct net_device *ndev)
+void cpsw_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw;
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index bc726356a72c..b8d7b924ee3d 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -449,7 +449,7 @@ int cpsw_rx_poll(struct napi_struct *napi_rx, int budget);
void cpsw_rx_vlan_encap(struct sk_buff *skb);
void soft_reset(const char *module, void __iomem *reg);
void cpsw_set_slave_mac(struct cpsw_slave *slave, struct cpsw_priv *priv);
-void cpsw_ndo_tx_timeout(struct net_device *ndev);
+void cpsw_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue);
int cpsw_need_resplit(struct cpsw_common *cpsw);
int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd);
int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate);
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 37ba708ac781..6614fa3089b2 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -1018,7 +1018,6 @@ static int cpdma_chan_submit_si(struct submit_info *si)
struct cpdma_chan *chan = si->chan;
struct cpdma_ctlr *ctlr = chan->ctlr;
int len = si->len;
- int swlen = len;
struct cpdma_desc __iomem *desc;
dma_addr_t buffer;
u32 mode;
@@ -1046,7 +1045,6 @@ static int cpdma_chan_submit_si(struct submit_info *si)
if (si->data_dma) {
buffer = si->data_dma;
dma_sync_single_for_device(ctlr->dev, buffer, len, chan->dir);
- swlen |= CPDMA_DMA_EXT_MAP;
} else {
buffer = dma_map_single(ctlr->dev, si->data_virt, len, chan->dir);
ret = dma_mapping_error(ctlr->dev, buffer);
@@ -1065,7 +1063,8 @@ static int cpdma_chan_submit_si(struct submit_info *si)
writel_relaxed(mode | len, &desc->hw_mode);
writel_relaxed((uintptr_t)si->token, &desc->sw_token);
writel_relaxed(buffer, &desc->sw_buffer);
- writel_relaxed(swlen, &desc->sw_len);
+ writel_relaxed(si->data_dma ? len | CPDMA_DMA_EXT_MAP : len,
+ &desc->sw_len);
desc_read(desc, sw_len);
__cpdma_chan_submit(chan, desc);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index ae27be85e363..75d4e16c692b 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -983,7 +983,7 @@ fail_tx:
* error and re-initialize the TX channel for hardware operation
*
*/
-static void emac_dev_tx_timeout(struct net_device *ndev)
+static void emac_dev_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct emac_priv *priv = netdev_priv(ndev);
struct device *emac_dev = &ndev->dev;
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 1b2702f74455..d7a144b4a09f 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1811,7 +1811,7 @@ out:
return (ret == 0) ? 0 : err;
}
-static void netcp_ndo_tx_timeout(struct net_device *ndev)
+static void netcp_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct netcp_intf *netcp = netdev_priv(ndev);
unsigned int descs = knav_pool_count(netcp->tx_pool);
@@ -2019,7 +2019,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
goto quit;
}
- efuse = devm_ioremap_nocache(dev, res.start, size);
+ efuse = devm_ioremap(dev, res.start, size);
if (!efuse) {
dev_err(dev, "could not map resource\n");
devm_release_mem_region(dev, res.start, size);
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index d6a192c1f337..fb36115e9c51 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -2533,8 +2533,6 @@ static int gbe_del_vid(void *intf_priv, int vid)
}
#if IS_ENABLED(CONFIG_TI_CPTS)
-#define HAS_PHY_TXTSTAMP(p) ((p)->drv && (p)->drv->txtstamp)
-#define HAS_PHY_RXTSTAMP(p) ((p)->drv && (p)->drv->rxtstamp)
static void gbe_txtstamp(void *context, struct sk_buff *skb)
{
@@ -2566,7 +2564,7 @@ static int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
* We mark it here because skb_tx_timestamp() is called
* after all the txhooks are called.
*/
- if (phydev && HAS_PHY_TXTSTAMP(phydev)) {
+ if (phy_has_txtstamp(phydev)) {
skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
return 0;
}
@@ -2588,7 +2586,7 @@ static int gbe_rxtstamp(struct gbe_intf *gbe_intf, struct netcp_packet *p_info)
if (p_info->rxtstamp_complete)
return 0;
- if (phydev && HAS_PHY_RXTSTAMP(phydev)) {
+ if (phy_has_rxtstamp(phydev)) {
p_info->rxtstamp_complete = true;
return 0;
}
@@ -2830,7 +2828,7 @@ static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
struct gbe_intf *gbe_intf = intf_priv;
struct phy_device *phy = gbe_intf->slave->phy;
- if (!phy || !phy->drv->hwtstamp) {
+ if (!phy_has_hwtstamp(phy)) {
switch (cmd) {
case SIOCGHWTSTAMP:
return gbe_hwtstamp_get(gbe_intf, req);
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 78f0f2d59e22..ad465202980a 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -161,7 +161,7 @@ static void tlan_set_multicast_list(struct net_device *);
static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static int tlan_probe1(struct pci_dev *pdev, long ioaddr,
int irq, int rev, const struct pci_device_id *ent);
-static void tlan_tx_timeout(struct net_device *dev);
+static void tlan_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void tlan_tx_timeout_work(struct work_struct *work);
static int tlan_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent);
@@ -997,7 +997,7 @@ static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
*
**************************************************************/
-static void tlan_tx_timeout(struct net_device *dev)
+static void tlan_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name);
@@ -1028,7 +1028,7 @@ static void tlan_tx_timeout_work(struct work_struct *work)
struct tlan_priv *priv =
container_of(work, struct tlan_priv, tlan_tqueue);
- tlan_tx_timeout(priv->dev);
+ tlan_tx_timeout(priv->dev, UINT_MAX);
}
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 9d9f8acb7ee3..070dd6fa9401 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1405,7 +1405,7 @@ out:
*
* called, if tx hangs. Schedules a task that resets the interface
*/
-void gelic_net_tx_timeout(struct net_device *netdev)
+void gelic_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct gelic_card *card;
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
index 051033580f0a..805903dbddcc 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
@@ -359,7 +359,7 @@ int gelic_net_open(struct net_device *netdev);
int gelic_net_stop(struct net_device *netdev);
netdev_tx_t gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
void gelic_net_set_multi(struct net_device *netdev);
-void gelic_net_tx_timeout(struct net_device *netdev);
+void gelic_net_tx_timeout(struct net_device *netdev, unsigned int txqueue);
int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card);
/* shared ethtool ops */
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 538e70810d3d..6576271642c1 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -2180,7 +2180,7 @@ out:
* called, if tx hangs. Schedules a task that resets the interface
*/
static void
-spider_net_tx_timeout(struct net_device *netdev)
+spider_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct spider_net_card *card;
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 12466a72cefc..3fd43d30b20d 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -483,8 +483,7 @@ static void tc35815_txdone(struct net_device *dev);
static int tc35815_close(struct net_device *dev);
static struct net_device_stats *tc35815_get_stats(struct net_device *dev);
static void tc35815_set_multicast_list(struct net_device *dev);
-static void tc35815_tx_timeout(struct net_device *dev);
-static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void tc35815_tx_timeout(struct net_device *dev, unsigned int txqueue);
#ifdef CONFIG_NET_POLL_CONTROLLER
static void tc35815_poll_controller(struct net_device *dev);
#endif
@@ -751,7 +750,7 @@ static const struct net_device_ops tc35815_netdev_ops = {
.ndo_get_stats = tc35815_get_stats,
.ndo_set_rx_mode = tc35815_set_multicast_list,
.ndo_tx_timeout = tc35815_tx_timeout,
- .ndo_do_ioctl = tc35815_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1189,7 +1188,7 @@ static void tc35815_schedule_restart(struct net_device *dev)
spin_unlock_irqrestore(&lp->lock, flags);
}
-static void tc35815_tx_timeout(struct net_device *dev)
+static void tc35815_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct tc35815_regs __iomem *tr =
(struct tc35815_regs __iomem *)dev->base_addr;
@@ -2009,15 +2008,6 @@ static const struct ethtool_ops tc35815_ethtool_ops = {
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
-static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- if (!netif_running(dev))
- return -EINVAL;
- if (!dev->phydev)
- return -ENODEV;
- return phy_mii_ioctl(dev->phydev, rq, cmd);
-}
-
static void tc35815_chip_reset(struct net_device *dev)
{
struct tc35815_regs __iomem *tr =
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index ed12dbd156f0..803247d51fe9 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -506,7 +506,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
static int rhine_open(struct net_device *dev);
static void rhine_reset_task(struct work_struct *work);
static void rhine_slow_event_task(struct work_struct *work);
-static void rhine_tx_timeout(struct net_device *dev);
+static void rhine_tx_timeout(struct net_device *dev, unsigned int txqueue);
static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
struct net_device *dev);
static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
@@ -1761,7 +1761,7 @@ out_unlock:
mutex_unlock(&rp->task_lock);
}
-static void rhine_tx_timeout(struct net_device *dev)
+static void rhine_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 346e44115c4e..4b556b74541a 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -3257,12 +3257,16 @@ static struct platform_driver velocity_platform_driver = {
* @dev: network device
*
* Called before an ethtool operation. We need to make sure the
- * chip is out of D3 state before we poke at it.
+ * chip is out of D3 state before we poke at it. In case of ethtool
+ * ops nesting, only wake the device up in the outermost block.
*/
static int velocity_ethtool_up(struct net_device *dev)
{
struct velocity_info *vptr = netdev_priv(dev);
- if (!netif_running(dev))
+
+ if (vptr->ethtool_ops_nesting == U32_MAX)
+ return -EBUSY;
+ if (!vptr->ethtool_ops_nesting++ && !netif_running(dev))
velocity_set_power_state(vptr, PCI_D0);
return 0;
}
@@ -3272,12 +3276,14 @@ static int velocity_ethtool_up(struct net_device *dev)
* @dev: network device
*
* Called after an ethtool operation. Restore the chip back to D3
- * state if it isn't running.
+ * state if it isn't running. In case of ethtool ops nesting, only
+ * put the device to sleep in the outermost block.
*/
static void velocity_ethtool_down(struct net_device *dev)
{
struct velocity_info *vptr = netdev_priv(dev);
- if (!netif_running(dev))
+
+ if (!--vptr->ethtool_ops_nesting && !netif_running(dev))
velocity_set_power_state(vptr, PCI_D3hot);
}
diff --git a/drivers/net/ethernet/via/via-velocity.h b/drivers/net/ethernet/via/via-velocity.h
index cdfe7809e3c1..f196e71d2c04 100644
--- a/drivers/net/ethernet/via/via-velocity.h
+++ b/drivers/net/ethernet/via/via-velocity.h
@@ -1483,6 +1483,7 @@ struct velocity_info {
struct velocity_context context;
u32 ticks;
+ u32 ethtool_ops_nesting;
u8 rev_id;
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index bede1ff289c5..c0d181a7f83a 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -790,7 +790,7 @@ static void w5100_restart_work(struct work_struct *work)
w5100_restart(priv->ndev);
}
-static void w5100_tx_timeout(struct net_device *ndev)
+static void w5100_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct w5100_priv *priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 6ba2747779ce..46aae30c4636 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -341,7 +341,7 @@ static void w5300_get_regs(struct net_device *ndev,
}
}
-static void w5300_tx_timeout(struct net_device *ndev)
+static void w5300_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct w5300_priv *priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 21c1b4322ea7..6f11f52c9a9e 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1080,17 +1080,6 @@ temac_poll_controller(struct net_device *ndev)
}
#endif
-static int temac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
-{
- if (!netif_running(ndev))
- return -EINVAL;
-
- if (!ndev->phydev)
- return -EINVAL;
-
- return phy_mii_ioctl(ndev->phydev, rq, cmd);
-}
-
static const struct net_device_ops temac_netdev_ops = {
.ndo_open = temac_open,
.ndo_stop = temac_stop,
@@ -1098,7 +1087,7 @@ static const struct net_device_ops temac_netdev_ops = {
.ndo_set_rx_mode = temac_set_multicast_list,
.ndo_set_mac_address = temac_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = temac_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = temac_poll_controller,
#endif
@@ -1202,7 +1191,7 @@ static int temac_probe(struct platform_device *pdev)
/* map device registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- lp->regs = devm_ioremap_nocache(&pdev->dev, res->start,
+ lp->regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (IS_ERR(lp->regs)) {
dev_err(&pdev->dev, "could not map TEMAC registers\n");
@@ -1296,7 +1285,7 @@ static int temac_probe(struct platform_device *pdev)
} else if (pdata) {
/* 2nd memory resource specifies DMA registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- lp->sdma_regs = devm_ioremap_nocache(&pdev->dev, res->start,
+ lp->sdma_regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (IS_ERR(lp->sdma_regs)) {
dev_err(&pdev->dev,
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 0de52e70abcc..0c26f5bcc523 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -521,7 +521,7 @@ static int xemaclite_set_mac_address(struct net_device *dev, void *address)
*
* This function is called when Tx time out occurs for Emaclite device.
*/
-static void xemaclite_tx_timeout(struct net_device *dev)
+static void xemaclite_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct net_local *lp = netdev_priv(dev);
unsigned long flags;
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index fd5288ff53b5..480ab7251515 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -288,7 +288,7 @@ struct local_info {
*/
static netdev_tx_t do_start_xmit(struct sk_buff *skb,
struct net_device *dev);
-static void xirc_tx_timeout(struct net_device *dev);
+static void xirc_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void xirc2ps_tx_timeout_task(struct work_struct *work);
static void set_addresses(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
@@ -1203,7 +1203,7 @@ xirc2ps_tx_timeout_task(struct work_struct *work)
}
static void
-xirc_tx_timeout(struct net_device *dev)
+xirc_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct local_info *lp = netdev_priv(dev);
dev->stats.tx_errors++;
diff --git a/drivers/net/ethernet/xscale/Kconfig b/drivers/net/ethernet/xscale/Kconfig
index cd0a8f46e7c6..98aa7b8ddb06 100644
--- a/drivers/net/ethernet/xscale/Kconfig
+++ b/drivers/net/ethernet/xscale/Kconfig
@@ -27,4 +27,18 @@ config IXP4XX_ETH
Say Y here if you want to use built-in Ethernet ports
on IXP4xx processor.
+config PTP_1588_CLOCK_IXP46X
+ tristate "Intel IXP46x as PTP clock"
+ depends on IXP4XX_ETH
+ depends on PTP_1588_CLOCK
+ default y
+ help
+ This driver adds support for using the IXP46X as a PTP
+ clock. This clock is only useful if your PTP programs are
+ getting hardware time stamps on the PTP Ethernet packets
+ using the SO_TIMESTAMPING API.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ptp_ixp46x.
+
endif # NET_VENDOR_XSCALE
diff --git a/drivers/net/ethernet/xscale/Makefile b/drivers/net/ethernet/xscale/Makefile
index 794a519d07b3..607f91b1e878 100644
--- a/drivers/net/ethernet/xscale/Makefile
+++ b/drivers/net/ethernet/xscale/Makefile
@@ -3,4 +3,5 @@
# Makefile for the Intel XScale IXP device drivers.
#
-obj-$(CONFIG_IXP4XX_ETH) += ixp4xx_eth.o
+obj-$(CONFIG_IXP4XX_ETH) += ixp4xx_eth.o
+obj-$(CONFIG_PTP_1588_CLOCK_IXP46X) += ptp_ixp46x.o
diff --git a/drivers/net/ethernet/xscale/ixp46x_ts.h b/drivers/net/ethernet/xscale/ixp46x_ts.h
new file mode 100644
index 000000000000..d792130e27b0
--- /dev/null
+++ b/drivers/net/ethernet/xscale/ixp46x_ts.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * PTP 1588 clock using the IXP46X
+ *
+ * Copyright (C) 2010 OMICRON electronics GmbH
+ */
+
+#ifndef _IXP46X_TS_H_
+#define _IXP46X_TS_H_
+
+#define DEFAULT_ADDEND 0xF0000029
+#define TICKS_NS_SHIFT 4
+
+struct ixp46x_channel_ctl {
+ u32 ch_control; /* 0x40 Time Synchronization Channel Control */
+ u32 ch_event; /* 0x44 Time Synchronization Channel Event */
+ u32 tx_snap_lo; /* 0x48 Transmit Snapshot Low Register */
+ u32 tx_snap_hi; /* 0x4C Transmit Snapshot High Register */
+ u32 rx_snap_lo; /* 0x50 Receive Snapshot Low Register */
+ u32 rx_snap_hi; /* 0x54 Receive Snapshot High Register */
+ u32 src_uuid_lo; /* 0x58 Source UUID0 Low Register */
+ u32 src_uuid_hi; /* 0x5C Sequence Identifier/Source UUID0 High */
+};
+
+struct ixp46x_ts_regs {
+ u32 control; /* 0x00 Time Sync Control Register */
+ u32 event; /* 0x04 Time Sync Event Register */
+ u32 addend; /* 0x08 Time Sync Addend Register */
+ u32 accum; /* 0x0C Time Sync Accumulator Register */
+ u32 test; /* 0x10 Time Sync Test Register */
+ u32 unused; /* 0x14 */
+ u32 rsystime_lo; /* 0x18 RawSystemTime_Low Register */
+ u32 rsystime_hi; /* 0x1C RawSystemTime_High Register */
+ u32 systime_lo; /* 0x20 SystemTime_Low Register */
+ u32 systime_hi; /* 0x24 SystemTime_High Register */
+ u32 trgt_lo; /* 0x28 TargetTime_Low Register */
+ u32 trgt_hi; /* 0x2C TargetTime_High Register */
+ u32 asms_lo; /* 0x30 Auxiliary Slave Mode Snapshot Low */
+ u32 asms_hi; /* 0x34 Auxiliary Slave Mode Snapshot High */
+ u32 amms_lo; /* 0x38 Auxiliary Master Mode Snapshot Low */
+ u32 amms_hi; /* 0x3C Auxiliary Master Mode Snapshot High */
+
+ struct ixp46x_channel_ctl channel[3];
+};
+
+/* 0x00 Time Sync Control Register Bits */
+#define TSCR_AMM (1<<3)
+#define TSCR_ASM (1<<2)
+#define TSCR_TTM (1<<1)
+#define TSCR_RST (1<<0)
+
+/* 0x04 Time Sync Event Register Bits */
+#define TSER_SNM (1<<3)
+#define TSER_SNS (1<<2)
+#define TTIPEND (1<<1)
+
+/* 0x40 Time Synchronization Channel Control Register Bits */
+#define MASTER_MODE (1<<0)
+#define TIMESTAMP_ALL (1<<1)
+
+/* 0x44 Time Synchronization Channel Event Register Bits */
+#define TX_SNAPSHOT_LOCKED (1<<0)
+#define RX_SNAPSHOT_LOCKED (1<<1)
+
+/* The ptp_ixp46x module will set this variable */
+extern int ixp46x_phc_index;
+
+#endif
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 6fc04ffb22c2..269596c15133 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -29,14 +29,16 @@
#include <linux/net_tstamp.h>
#include <linux/of.h>
#include <linux/phy.h>
+#include <linux/platform_data/eth_ixp4xx.h>
#include <linux/platform_device.h>
#include <linux/ptp_classify.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <mach/ixp46x_ts.h>
#include <linux/soc/ixp4xx/npe.h>
#include <linux/soc/ixp4xx/qmgr.h>
+#include "ixp46x_ts.h"
+
#define DEBUG_DESC 0
#define DEBUG_RX 0
#define DEBUG_TX 0
@@ -517,25 +519,14 @@ static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location,
return ret;
}
-static int ixp4xx_mdio_register(void)
+static int ixp4xx_mdio_register(struct eth_regs __iomem *regs)
{
int err;
if (!(mdio_bus = mdiobus_alloc()))
return -ENOMEM;
- if (cpu_is_ixp43x()) {
- /* IXP43x lacks NPE-B and uses NPE-C for MII PHY access */
- if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEC_ETH))
- return -ENODEV;
- mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT;
- } else {
- /* All MII PHY accesses use NPE-B Ethernet registers */
- if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0))
- return -ENODEV;
- mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
- }
-
+ mdio_regs = regs;
__raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
spin_lock_init(&mdio_lock);
mdio_bus->name = "IXP4xx MII Bus";
@@ -581,8 +572,8 @@ static void ixp4xx_adjust_link(struct net_device *dev)
__raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX,
&port->regs->tx_control[0]);
- printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n",
- dev->name, port->speed, port->duplex ? "full" : "half");
+ netdev_info(dev, "%s: link up, speed %u Mb/s, %s duplex\n",
+ dev->name, port->speed, port->duplex ? "full" : "half");
}
@@ -592,7 +583,7 @@ static inline void debug_pkt(struct net_device *dev, const char *func,
#if DEBUG_PKT_BYTES
int i;
- printk(KERN_DEBUG "%s: %s(%i) ", dev->name, func, len);
+ netdev_debug(dev, "%s(%i) ", func, len);
for (i = 0; i < len; i++) {
if (i >= DEBUG_PKT_BYTES)
break;
@@ -683,7 +674,7 @@ static int eth_poll(struct napi_struct *napi, int budget)
int received = 0;
#if DEBUG_RX
- printk(KERN_DEBUG "%s: eth_poll\n", dev->name);
+ netdev_debug(dev, "eth_poll\n");
#endif
while (received < budget) {
@@ -697,23 +688,20 @@ static int eth_poll(struct napi_struct *napi, int budget)
if ((n = queue_get_desc(rxq, port, 0)) < 0) {
#if DEBUG_RX
- printk(KERN_DEBUG "%s: eth_poll napi_complete\n",
- dev->name);
+ netdev_debug(dev, "eth_poll napi_complete\n");
#endif
napi_complete(napi);
qmgr_enable_irq(rxq);
if (!qmgr_stat_below_low_watermark(rxq) &&
napi_reschedule(napi)) { /* not empty again */
#if DEBUG_RX
- printk(KERN_DEBUG "%s: eth_poll napi_reschedule succeeded\n",
- dev->name);
+ netdev_debug(dev, "eth_poll napi_reschedule succeeded\n");
#endif
qmgr_disable_irq(rxq);
continue;
}
#if DEBUG_RX
- printk(KERN_DEBUG "%s: eth_poll all done\n",
- dev->name);
+ netdev_debug(dev, "eth_poll all done\n");
#endif
return received; /* all work done */
}
@@ -778,7 +766,7 @@ static int eth_poll(struct napi_struct *napi, int budget)
}
#if DEBUG_RX
- printk(KERN_DEBUG "eth_poll(): end, not all work done\n");
+ netdev_debug(dev, "eth_poll(): end, not all work done\n");
#endif
return received; /* not all work done */
}
@@ -842,7 +830,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
struct desc *desc;
#if DEBUG_TX
- printk(KERN_DEBUG "%s: eth_xmit\n", dev->name);
+ netdev_debug(dev, "eth_xmit\n");
#endif
if (unlikely(skb->len > MAX_MRU)) {
@@ -897,22 +885,21 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */
#if DEBUG_TX
- printk(KERN_DEBUG "%s: eth_xmit queue full\n", dev->name);
+ netdev_debug(dev, "eth_xmit queue full\n");
#endif
netif_stop_queue(dev);
/* we could miss TX ready interrupt */
/* really empty in fact */
if (!qmgr_stat_below_low_watermark(txreadyq)) {
#if DEBUG_TX
- printk(KERN_DEBUG "%s: eth_xmit ready again\n",
- dev->name);
+ netdev_debug(dev, "eth_xmit ready again\n");
#endif
netif_wake_queue(dev);
}
}
#if DEBUG_TX
- printk(KERN_DEBUG "%s: eth_xmit end\n", dev->name);
+ netdev_debug(dev, "eth_xmit end\n");
#endif
ixp_tx_timestamp(port, skb);
@@ -1099,7 +1086,7 @@ static int init_queues(struct port *port)
int i;
if (!ports_open) {
- dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
+ dma_pool = dma_pool_create(DRV_NAME, port->netdev->dev.parent,
POOL_ALLOC_SIZE, 32, 0);
if (!dma_pool)
return -ENOMEM;
@@ -1186,8 +1173,7 @@ static int eth_open(struct net_device *dev)
return err;
if (npe_recv_message(npe, &msg, "ETH_GET_STATUS")) {
- printk(KERN_ERR "%s: %s not responding\n", dev->name,
- npe_name(npe));
+ netdev_err(dev, "%s not responding\n", npe_name(npe));
return -EIO;
}
port->firmware[0] = msg.byte4;
@@ -1299,7 +1285,7 @@ static int eth_close(struct net_device *dev)
msg.eth_id = port->id;
msg.byte3 = 1;
if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK"))
- printk(KERN_CRIT "%s: unable to enable loopback\n", dev->name);
+ netdev_crit(dev, "unable to enable loopback\n");
i = 0;
do { /* drain RX buffers */
@@ -1323,11 +1309,11 @@ static int eth_close(struct net_device *dev)
} while (++i < MAX_CLOSE_WAIT);
if (buffs)
- printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)"
- " left in NPE\n", dev->name, buffs);
+ netdev_crit(dev, "unable to drain RX queue, %i buffer(s)"
+ " left in NPE\n", buffs);
#if DEBUG_CLOSE
if (!buffs)
- printk(KERN_DEBUG "Draining RX queue took %i cycles\n", i);
+ netdev_debug(dev, "draining RX queue took %i cycles\n", i);
#endif
buffs = TX_DESCS;
@@ -1343,17 +1329,16 @@ static int eth_close(struct net_device *dev)
} while (++i < MAX_CLOSE_WAIT);
if (buffs)
- printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) "
- "left in NPE\n", dev->name, buffs);
+ netdev_crit(dev, "unable to drain TX queue, %i buffer(s) "
+ "left in NPE\n", buffs);
#if DEBUG_CLOSE
if (!buffs)
- printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i);
+ netdev_debug(dev, "draining TX queues took %i cycles\n", i);
#endif
msg.byte3 = 0;
if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK"))
- printk(KERN_CRIT "%s: unable to disable loopback\n",
- dev->name);
+ netdev_crit(dev, "unable to disable loopback\n");
phy_stop(dev->phydev);
@@ -1374,54 +1359,88 @@ static const struct net_device_ops ixp4xx_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int eth_init_one(struct platform_device *pdev)
+static int ixp4xx_eth_probe(struct platform_device *pdev)
{
- struct port *port;
- struct net_device *dev;
- struct eth_plat_info *plat = dev_get_platdata(&pdev->dev);
- struct phy_device *phydev = NULL;
- u32 regs_phys;
char phy_id[MII_BUS_ID_SIZE + 3];
+ struct phy_device *phydev = NULL;
+ struct device *dev = &pdev->dev;
+ struct eth_plat_info *plat;
+ resource_size_t regs_phys;
+ struct net_device *ndev;
+ struct resource *res;
+ struct port *port;
int err;
- if (!(dev = alloc_etherdev(sizeof(struct port))))
+ plat = dev_get_platdata(dev);
+
+ if (!(ndev = devm_alloc_etherdev(dev, sizeof(struct port))))
return -ENOMEM;
- SET_NETDEV_DEV(dev, &pdev->dev);
- port = netdev_priv(dev);
- port->netdev = dev;
+ SET_NETDEV_DEV(ndev, dev);
+ port = netdev_priv(ndev);
+ port->netdev = ndev;
port->id = pdev->id;
+ /* Get the port resource and remap */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ regs_phys = res->start;
+ port->regs = devm_ioremap_resource(dev, res);
+
switch (port->id) {
case IXP4XX_ETH_NPEA:
- port->regs = (struct eth_regs __iomem *)IXP4XX_EthA_BASE_VIRT;
- regs_phys = IXP4XX_EthA_BASE_PHYS;
+ /* If the MDIO bus is not up yet, defer probe */
+ if (!mdio_bus)
+ return -EPROBE_DEFER;
break;
case IXP4XX_ETH_NPEB:
- port->regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
- regs_phys = IXP4XX_EthB_BASE_PHYS;
+ /*
+ * On all except IXP43x, NPE-B is used for the MDIO bus.
+ * If there is no NPE-B in the feature set, bail out, else
+ * register the MDIO bus.
+ */
+ if (!cpu_is_ixp43x()) {
+ if (!(ixp4xx_read_feature_bits() &
+ IXP4XX_FEATURE_NPEB_ETH0))
+ return -ENODEV;
+ /* Else register the MDIO bus on NPE-B */
+ if ((err = ixp4xx_mdio_register(port->regs)))
+ return err;
+ }
+ if (!mdio_bus)
+ return -EPROBE_DEFER;
break;
case IXP4XX_ETH_NPEC:
- port->regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT;
- regs_phys = IXP4XX_EthC_BASE_PHYS;
+ /*
+ * IXP43x lacks NPE-B and uses NPE-C for the MDIO bus access,
+ * of there is no NPE-C, no bus, nothing works, so bail out.
+ */
+ if (cpu_is_ixp43x()) {
+ if (!(ixp4xx_read_feature_bits() &
+ IXP4XX_FEATURE_NPEC_ETH))
+ return -ENODEV;
+ /* Else register the MDIO bus on NPE-C */
+ if ((err = ixp4xx_mdio_register(port->regs)))
+ return err;
+ }
+ if (!mdio_bus)
+ return -EPROBE_DEFER;
break;
default:
- err = -ENODEV;
- goto err_free;
+ return -ENODEV;
}
- dev->netdev_ops = &ixp4xx_netdev_ops;
- dev->ethtool_ops = &ixp4xx_ethtool_ops;
- dev->tx_queue_len = 100;
+ ndev->netdev_ops = &ixp4xx_netdev_ops;
+ ndev->ethtool_ops = &ixp4xx_ethtool_ops;
+ ndev->tx_queue_len = 100;
- netif_napi_add(dev, &port->napi, eth_poll, NAPI_WEIGHT);
+ netif_napi_add(ndev, &port->napi, eth_poll, NAPI_WEIGHT);
- if (!(port->npe = npe_request(NPE_ID(port->id)))) {
- err = -EIO;
- goto err_free;
- }
+ if (!(port->npe = npe_request(NPE_ID(port->id))))
+ return -EIO;
- port->mem_res = request_mem_region(regs_phys, REGS_SIZE, dev->name);
+ port->mem_res = request_mem_region(regs_phys, REGS_SIZE, ndev->name);
if (!port->mem_res) {
err = -EBUSY;
goto err_npe_rel;
@@ -1429,9 +1448,9 @@ static int eth_init_one(struct platform_device *pdev)
port->plat = plat;
npe_port_tab[NPE_ID(port->id)] = port;
- memcpy(dev->dev_addr, plat->hwaddr, ETH_ALEN);
+ memcpy(ndev->dev_addr, plat->hwaddr, ETH_ALEN);
- platform_set_drvdata(pdev, dev);
+ platform_set_drvdata(pdev, ndev);
__raw_writel(DEFAULT_CORE_CNTRL | CORE_RESET,
&port->regs->core_control);
@@ -1441,7 +1460,7 @@ static int eth_init_one(struct platform_device *pdev)
snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
mdio_bus->id, plat->phy);
- phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link,
+ phydev = phy_connect(ndev, phy_id, &ixp4xx_adjust_link,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
err = PTR_ERR(phydev);
@@ -1450,11 +1469,11 @@ static int eth_init_one(struct platform_device *pdev)
phydev->irq = PHY_POLL;
- if ((err = register_netdev(dev)))
+ if ((err = register_netdev(ndev)))
goto err_phy_dis;
- printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy,
- npe_name(port->npe));
+ netdev_info(ndev, "%s: MII PHY %i on %s\n", ndev->name, plat->phy,
+ npe_name(port->npe));
return 0;
@@ -1465,58 +1484,32 @@ err_free_mem:
release_resource(port->mem_res);
err_npe_rel:
npe_release(port->npe);
-err_free:
- free_netdev(dev);
return err;
}
-static int eth_remove_one(struct platform_device *pdev)
+static int ixp4xx_eth_remove(struct platform_device *pdev)
{
- struct net_device *dev = platform_get_drvdata(pdev);
- struct phy_device *phydev = dev->phydev;
- struct port *port = netdev_priv(dev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct phy_device *phydev = ndev->phydev;
+ struct port *port = netdev_priv(ndev);
- unregister_netdev(dev);
+ unregister_netdev(ndev);
phy_disconnect(phydev);
+ ixp4xx_mdio_remove();
npe_port_tab[NPE_ID(port->id)] = NULL;
npe_release(port->npe);
release_resource(port->mem_res);
- free_netdev(dev);
return 0;
}
static struct platform_driver ixp4xx_eth_driver = {
.driver.name = DRV_NAME,
- .probe = eth_init_one,
- .remove = eth_remove_one,
+ .probe = ixp4xx_eth_probe,
+ .remove = ixp4xx_eth_remove,
};
-
-static int __init eth_init_module(void)
-{
- int err;
-
- /*
- * FIXME: we bail out on device tree boot but this really needs
- * to be fixed in a nicer way: this registers the MDIO bus before
- * even matching the driver infrastructure, we should only probe
- * detected hardware.
- */
- if (of_have_populated_dt())
- return -ENODEV;
- if ((err = ixp4xx_mdio_register()))
- return err;
- return platform_driver_register(&ixp4xx_eth_driver);
-}
-
-static void __exit eth_cleanup_module(void)
-{
- platform_driver_unregister(&ixp4xx_eth_driver);
- ixp4xx_mdio_remove();
-}
+module_platform_driver(ixp4xx_eth_driver);
MODULE_AUTHOR("Krzysztof Halasa");
MODULE_DESCRIPTION("Intel IXP4xx Ethernet driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:ixp4xx_eth");
-module_init(eth_init_module);
-module_exit(eth_cleanup_module);
diff --git a/drivers/ptp/ptp_ixp46x.c b/drivers/net/ethernet/xscale/ptp_ixp46x.c
index 67028484e9a0..9ecc395239e9 100644
--- a/drivers/ptp/ptp_ixp46x.c
+++ b/drivers/net/ethernet/xscale/ptp_ixp46x.c
@@ -15,7 +15,8 @@
#include <linux/module.h>
#include <linux/ptp_clock_kernel.h>
-#include <mach/ixp46x_ts.h>
+
+#include "ixp46x_ts.h"
#define DRIVER "ptp_ixp46x"
#define N_EXT_TS 2
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 56b7791911bf..077c68498f04 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -614,7 +614,7 @@ static int dfx_register(struct device *bdev)
/* Set up I/O base address. */
if (dfx_use_mmio) {
- bp->base.mem = ioremap_nocache(bar_start[0], bar_len[0]);
+ bp->base.mem = ioremap(bar_start[0], bar_len[0]);
if (!bp->base.mem) {
printk(KERN_ERR "%s: Cannot map MMIO\n", print_name);
err = -ENOMEM;
diff --git a/drivers/net/fddi/defza.c b/drivers/net/fddi/defza.c
index 060712c666bf..eaf85db53a5e 100644
--- a/drivers/net/fddi/defza.c
+++ b/drivers/net/fddi/defza.c
@@ -1318,7 +1318,7 @@ static int fza_probe(struct device *bdev)
}
/* MMIO mapping setup. */
- mmio = ioremap_nocache(start, len);
+ mmio = ioremap(start, len);
if (!mmio) {
pr_err("%s: cannot map MMIO\n", fp->name);
ret = -ENOMEM;
diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c
index 15754451cb87..69c29a2ef95d 100644
--- a/drivers/net/fddi/skfp/skfddi.c
+++ b/drivers/net/fddi/skfp/skfddi.c
@@ -1520,22 +1520,8 @@ void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd)
#ifdef DUMPPACKETS
void dump_data(unsigned char *Data, int length)
{
- int i, j;
- unsigned char s[255], sh[10];
- if (length > 64) {
- length = 64;
- }
printk(KERN_INFO "---Packet start---\n");
- for (i = 0, j = 0; i < length / 8; i++, j += 8)
- printk(KERN_INFO "%02x %02x %02x %02x %02x %02x %02x %02x\n",
- Data[j + 0], Data[j + 1], Data[j + 2], Data[j + 3],
- Data[j + 4], Data[j + 5], Data[j + 6], Data[j + 7]);
- strcpy(s, "");
- for (i = 0; i < length % 8; i++) {
- sprintf(sh, "%02x ", Data[j + i]);
- strcat(s, sh);
- }
- printk(KERN_INFO "%s\n", s);
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, Data, min_t(size_t, length, 64), false);
printk(KERN_INFO "------------------\n");
} // dump_data
#else
diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c
index 8a4fbfacad7e..065bb0a40b1d 100644
--- a/drivers/net/fjes/fjes_hw.c
+++ b/drivers/net/fjes/fjes_hw.c
@@ -40,7 +40,7 @@ static u8 *fjes_hw_iomap(struct fjes_hw *hw)
return NULL;
}
- base = (u8 *)ioremap_nocache(hw->hw_res.start, hw->hw_res.size);
+ base = (u8 *)ioremap(hw->hw_res.start, hw->hw_res.size);
return base;
}
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index b517c1af9de0..8c810edece86 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -48,7 +48,7 @@ static void fjes_get_stats64(struct net_device *, struct rtnl_link_stats64 *);
static int fjes_change_mtu(struct net_device *, int);
static int fjes_vlan_rx_add_vid(struct net_device *, __be16 proto, u16);
static int fjes_vlan_rx_kill_vid(struct net_device *, __be16 proto, u16);
-static void fjes_tx_retry(struct net_device *);
+static void fjes_tx_retry(struct net_device *, unsigned int txqueue);
static int fjes_acpi_add(struct acpi_device *);
static int fjes_acpi_remove(struct acpi_device *);
@@ -166,6 +166,9 @@ static int fjes_acpi_add(struct acpi_device *device)
/* create platform_device */
plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource,
ARRAY_SIZE(fjes_resource));
+ if (IS_ERR(plat_dev))
+ return PTR_ERR(plat_dev);
+
device->driver_data = plat_dev;
return 0;
@@ -792,7 +795,7 @@ fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
return ret;
}
-static void fjes_tx_retry(struct net_device *netdev)
+static void fjes_tx_retry(struct net_device *netdev, unsigned int txqueue)
{
struct netdev_queue *queue = netdev_get_tx_queue(netdev, 0);
diff --git a/drivers/net/fjes/fjes_trace.h b/drivers/net/fjes/fjes_trace.h
index c611b6a80b20..9237b69d8e21 100644
--- a/drivers/net/fjes/fjes_trace.h
+++ b/drivers/net/fjes/fjes_trace.h
@@ -28,7 +28,7 @@ TRACE_EVENT(fjes_hw_issue_request_command,
__field(u8, cs_busy)
__field(u8, cs_complete)
__field(int, timeout)
- __field(int, ret);
+ __field(int, ret)
),
TP_fast_assign(
__entry->cr_req = cr->bits.req_code;
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index ecfe26215935..672cd2caf2fb 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -38,7 +38,6 @@ struct pdp_ctx {
struct hlist_node hlist_addr;
union {
- u64 tid;
struct {
u64 tid;
u16 flow;
@@ -541,14 +540,14 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
mtu = dst_mtu(&rt->dst);
}
- rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu);
+ rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu, false);
if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
mtu < ntohs(iph->tot_len)) {
netdev_dbg(dev, "packet too big, fragmentation needed\n");
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
- htonl(mtu));
+ icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+ htonl(mtu));
goto err_rt;
}
@@ -641,9 +640,16 @@ static void gtp_link_setup(struct net_device *dev)
}
static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
-static void gtp_hashtable_free(struct gtp_dev *gtp);
static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]);
+static void gtp_destructor(struct net_device *dev)
+{
+ struct gtp_dev *gtp = netdev_priv(dev);
+
+ kfree(gtp->addr_hash);
+ kfree(gtp->tid_hash);
+}
+
static int gtp_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
@@ -661,10 +667,13 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
if (err < 0)
return err;
- if (!data[IFLA_GTP_PDP_HASHSIZE])
+ if (!data[IFLA_GTP_PDP_HASHSIZE]) {
hashsize = 1024;
- else
+ } else {
hashsize = nla_get_u32(data[IFLA_GTP_PDP_HASHSIZE]);
+ if (!hashsize)
+ hashsize = 1024;
+ }
err = gtp_hashtable_new(gtp, hashsize);
if (err < 0)
@@ -678,13 +687,15 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
gn = net_generic(dev_net(dev), gtp_net_id);
list_add_rcu(&gtp->list, &gn->gtp_dev_list);
+ dev->priv_destructor = gtp_destructor;
netdev_dbg(dev, "registered new GTP interface\n");
return 0;
out_hashtable:
- gtp_hashtable_free(gtp);
+ kfree(gtp->addr_hash);
+ kfree(gtp->tid_hash);
out_encap:
gtp_encap_disable(gtp);
return err;
@@ -693,8 +704,13 @@ out_encap:
static void gtp_dellink(struct net_device *dev, struct list_head *head)
{
struct gtp_dev *gtp = netdev_priv(dev);
+ struct pdp_ctx *pctx;
+ int i;
+
+ for (i = 0; i < gtp->hash_size; i++)
+ hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid)
+ pdp_context_delete(pctx);
- gtp_hashtable_free(gtp);
list_del_rcu(&gtp->list);
unregister_netdevice_queue(dev, head);
}
@@ -751,12 +767,12 @@ static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize)
int i;
gtp->addr_hash = kmalloc_array(hsize, sizeof(struct hlist_head),
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_NOWARN);
if (gtp->addr_hash == NULL)
return -ENOMEM;
gtp->tid_hash = kmalloc_array(hsize, sizeof(struct hlist_head),
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_NOWARN);
if (gtp->tid_hash == NULL)
goto err1;
@@ -772,20 +788,6 @@ err1:
return -ENOMEM;
}
-static void gtp_hashtable_free(struct gtp_dev *gtp)
-{
- struct pdp_ctx *pctx;
- int i;
-
- for (i = 0; i < gtp->hash_size; i++)
- hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid)
- pdp_context_delete(pctx);
-
- synchronize_rcu();
- kfree(gtp->addr_hash);
- kfree(gtp->tid_hash);
-}
-
static struct sock *gtp_encap_enable_socket(int fd, int type,
struct gtp_dev *gtp)
{
@@ -802,19 +804,21 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
return NULL;
}
- if (sock->sk->sk_protocol != IPPROTO_UDP) {
+ sk = sock->sk;
+ if (sk->sk_protocol != IPPROTO_UDP ||
+ sk->sk_type != SOCK_DGRAM ||
+ (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) {
pr_debug("socket fd=%d not UDP\n", fd);
sk = ERR_PTR(-EINVAL);
goto out_sock;
}
- lock_sock(sock->sk);
- if (sock->sk->sk_user_data) {
+ lock_sock(sk);
+ if (sk->sk_user_data) {
sk = ERR_PTR(-EBUSY);
- goto out_sock;
+ goto out_rel_sock;
}
- sk = sock->sk;
sock_hold(sk);
tuncfg.sk_user_data = gtp;
@@ -824,8 +828,9 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg);
-out_sock:
+out_rel_sock:
release_sock(sock->sk);
+out_sock:
sockfd_put(sock);
return sk;
}
@@ -849,8 +854,7 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
sk1u = gtp_encap_enable_socket(fd1, UDP_ENCAP_GTP1U, gtp);
if (IS_ERR(sk1u)) {
- if (sk0)
- gtp_encap_disable_sock(sk0);
+ gtp_encap_disable_sock(sk0);
return PTR_ERR(sk1u);
}
}
@@ -858,10 +862,8 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
if (data[IFLA_GTP_ROLE]) {
role = nla_get_u32(data[IFLA_GTP_ROLE]);
if (role > GTP_ROLE_SGSN) {
- if (sk0)
- gtp_encap_disable_sock(sk0);
- if (sk1u)
- gtp_encap_disable_sock(sk1u);
+ gtp_encap_disable_sock(sk0);
+ gtp_encap_disable_sock(sk1u);
return -EINVAL;
}
}
@@ -926,24 +928,31 @@ static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
}
}
-static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk,
- struct genl_info *info)
+static int gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
+ struct genl_info *info)
{
+ struct pdp_ctx *pctx, *pctx_tid = NULL;
struct net_device *dev = gtp->dev;
u32 hash_ms, hash_tid = 0;
- struct pdp_ctx *pctx;
+ unsigned int version;
bool found = false;
__be32 ms_addr;
ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size;
+ version = nla_get_u32(info->attrs[GTPA_VERSION]);
- hlist_for_each_entry_rcu(pctx, &gtp->addr_hash[hash_ms], hlist_addr) {
- if (pctx->ms_addr_ip4.s_addr == ms_addr) {
- found = true;
- break;
- }
- }
+ pctx = ipv4_pdp_find(gtp, ms_addr);
+ if (pctx)
+ found = true;
+ if (version == GTP_V0)
+ pctx_tid = gtp0_pdp_find(gtp,
+ nla_get_u64(info->attrs[GTPA_TID]));
+ else if (version == GTP_V1)
+ pctx_tid = gtp1_pdp_find(gtp,
+ nla_get_u32(info->attrs[GTPA_I_TEI]));
+ if (pctx_tid)
+ found = true;
if (found) {
if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
@@ -951,6 +960,11 @@ static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk,
if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE)
return -EOPNOTSUPP;
+ if (pctx && pctx_tid)
+ return -EEXIST;
+ if (!pctx)
+ pctx = pctx_tid;
+
ipv4_pdp_fill(pctx, info);
if (pctx->gtp_version == GTP_V0)
@@ -1074,7 +1088,7 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
goto out_unlock;
}
- err = ipv4_pdp_add(gtp, sk, info);
+ err = gtp_pdp_add(gtp, sk, info);
out_unlock:
rcu_read_unlock();
@@ -1232,43 +1246,46 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb,
struct netlink_callback *cb)
{
struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
+ int i, j, bucket = cb->args[0], skip = cb->args[1];
struct net *net = sock_net(skb->sk);
- struct gtp_net *gn = net_generic(net, gtp_net_id);
- unsigned long tid = cb->args[1];
- int i, k = cb->args[0], ret;
struct pdp_ctx *pctx;
+ struct gtp_net *gn;
+
+ gn = net_generic(net, gtp_net_id);
if (cb->args[4])
return 0;
+ rcu_read_lock();
list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
if (last_gtp && last_gtp != gtp)
continue;
else
last_gtp = NULL;
- for (i = k; i < gtp->hash_size; i++) {
- hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid) {
- if (tid && tid != pctx->u.tid)
- continue;
- else
- tid = 0;
-
- ret = gtp_genl_fill_info(skb,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- cb->nlh->nlmsg_type, pctx);
- if (ret < 0) {
+ for (i = bucket; i < gtp->hash_size; i++) {
+ j = 0;
+ hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i],
+ hlist_tid) {
+ if (j >= skip &&
+ gtp_genl_fill_info(skb,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ cb->nlh->nlmsg_type, pctx)) {
cb->args[0] = i;
- cb->args[1] = pctx->u.tid;
+ cb->args[1] = j;
cb->args[2] = (unsigned long)gtp;
goto out;
}
+ j++;
}
+ skip = 0;
}
+ bucket = 0;
}
cb->args[4] = 1;
out:
+ rcu_read_unlock();
return skb->len;
}
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 23281aeeb222..71d6629e65c9 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -654,10 +654,10 @@ static void sixpack_close(struct tty_struct *tty)
{
struct sixpack *sp;
- write_lock_bh(&disc_data_lock);
+ write_lock_irq(&disc_data_lock);
sp = tty->disc_data;
tty->disc_data = NULL;
- write_unlock_bh(&disc_data_lock);
+ write_unlock_irq(&disc_data_lock);
if (!sp)
return;
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index df495b5595f5..e7413a643929 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -687,8 +687,6 @@ struct net_device *hdlcdrv_register(const struct hdlcdrv_ops *ops,
struct hdlcdrv_state *s;
int err;
- BUG_ON(ops == NULL);
-
if (privsize < sizeof(struct hdlcdrv_state))
privsize = sizeof(struct hdlcdrv_state);
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index c5bfa19ddb93..deef14215110 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -773,10 +773,10 @@ static void mkiss_close(struct tty_struct *tty)
{
struct mkiss *ax;
- write_lock_bh(&disc_data_lock);
+ write_lock_irq(&disc_data_lock);
ax = tty->disc_data;
tty->disc_data = NULL;
- write_unlock_bh(&disc_data_lock);
+ write_unlock_irq(&disc_data_lock);
if (!ax)
return;
diff --git a/drivers/net/hyperv/Makefile b/drivers/net/hyperv/Makefile
index 3a2aa0708166..0db7ccaec4a4 100644
--- a/drivers/net/hyperv/Makefile
+++ b/drivers/net/hyperv/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_HYPERV_NET) += hv_netvsc.o
-hv_netvsc-y := netvsc_drv.o netvsc.o rndis_filter.o netvsc_trace.o
+hv_netvsc-y := netvsc_drv.o netvsc.o rndis_filter.o netvsc_trace.o netvsc_bpf.o
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 9caa876ce6e8..abda736e7c7d 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -142,6 +142,8 @@ struct netvsc_device_info {
u32 send_section_size;
u32 recv_section_size;
+ struct bpf_prog *bprog;
+
u8 rss_key[NETVSC_HASH_KEYLEN];
};
@@ -169,7 +171,6 @@ struct rndis_device {
u8 hw_mac_adr[ETH_ALEN];
u8 rss_key[NETVSC_HASH_KEYLEN];
- u16 rx_table[ITAB_NUM];
};
@@ -190,7 +191,8 @@ int netvsc_send(struct net_device *net,
struct hv_netvsc_packet *packet,
struct rndis_message *rndis_msg,
struct hv_page_buffer *page_buffer,
- struct sk_buff *skb);
+ struct sk_buff *skb,
+ bool xdp_tx);
void netvsc_linkstatus_callback(struct net_device *net,
struct rndis_message *resp);
int netvsc_recv_callback(struct net_device *net,
@@ -199,6 +201,16 @@ int netvsc_recv_callback(struct net_device *net,
void netvsc_channel_cb(void *context);
int netvsc_poll(struct napi_struct *napi, int budget);
+u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
+ struct xdp_buff *xdp);
+unsigned int netvsc_xdp_fraglen(unsigned int len);
+struct bpf_prog *netvsc_xdp_get(struct netvsc_device *nvdev);
+int netvsc_xdp_set(struct net_device *dev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack,
+ struct netvsc_device *nvdev);
+int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog);
+int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf);
+
int rndis_set_subchannel(struct net_device *ndev,
struct netvsc_device *nvdev,
struct netvsc_device_info *dev_info);
@@ -833,6 +845,8 @@ struct nvsp_message {
#define RNDIS_MAX_PKT_DEFAULT 8
#define RNDIS_PKT_ALIGN_DEFAULT 8
+#define NETVSC_XDP_HDRM 256
+
struct multi_send_data {
struct sk_buff *skb; /* skb containing the pkt */
struct hv_netvsc_packet *pkt; /* netvsc pkt pending */
@@ -868,6 +882,7 @@ struct netvsc_stats {
u64 bytes;
u64 broadcast;
u64 multicast;
+ u64 xdp_drop;
struct u64_stats_sync syncp;
};
@@ -940,6 +955,8 @@ struct net_device_context {
u32 tx_table[VRSS_SEND_TAB_SIZE];
+ u16 rx_table[ITAB_NUM];
+
/* Ethtool settings */
u8 duplex;
u32 speed;
@@ -971,6 +988,9 @@ struct netvsc_channel {
atomic_t queue_sends;
struct nvsc_rsc rsc;
+ struct bpf_prog __rcu *bpf_prog;
+ struct xdp_rxq_info xdp_rxq;
+
struct netvsc_stats tx_stats;
struct netvsc_stats rx_stats;
};
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index eab83e71567a..ae3f3084c2ed 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -122,8 +122,10 @@ static void free_netvsc_device(struct rcu_head *head)
vfree(nvdev->send_buf);
kfree(nvdev->send_section_map);
- for (i = 0; i < VRSS_CHANNEL_MAX; i++)
+ for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
+ xdp_rxq_info_unreg(&nvdev->chan_table[i].xdp_rxq);
vfree(nvdev->chan_table[i].mrc.slots);
+ }
kfree(nvdev);
}
@@ -900,7 +902,8 @@ int netvsc_send(struct net_device *ndev,
struct hv_netvsc_packet *packet,
struct rndis_message *rndis_msg,
struct hv_page_buffer *pb,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ bool xdp_tx)
{
struct net_device_context *ndev_ctx = netdev_priv(ndev);
struct netvsc_device *net_device
@@ -923,10 +926,11 @@ int netvsc_send(struct net_device *ndev,
packet->send_buf_index = NETVSC_INVALID_INDEX;
packet->cp_partial = false;
- /* Send control message directly without accessing msd (Multi-Send
- * Data) field which may be changed during data packet processing.
+ /* Send a control message or XDP packet directly without accessing
+ * msd (Multi-Send Data) field which may be changed during data packet
+ * processing.
*/
- if (!skb)
+ if (!skb || xdp_tx)
return netvsc_send_pkt(device, packet, net_device, pb, skb);
/* batch packets in send buffer if possible */
@@ -1392,6 +1396,21 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
nvchan->net_device = net_device;
u64_stats_init(&nvchan->tx_stats.syncp);
u64_stats_init(&nvchan->rx_stats.syncp);
+
+ ret = xdp_rxq_info_reg(&nvchan->xdp_rxq, ndev, i);
+
+ if (ret) {
+ netdev_err(ndev, "xdp_rxq_info_reg fail: %d\n", ret);
+ goto cleanup2;
+ }
+
+ ret = xdp_rxq_info_reg_mem_model(&nvchan->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED, NULL);
+
+ if (ret) {
+ netdev_err(ndev, "xdp reg_mem_model fail: %d\n", ret);
+ goto cleanup2;
+ }
}
/* Enable NAPI handler before init callbacks */
@@ -1437,6 +1456,8 @@ close:
cleanup:
netif_napi_del(&net_device->chan_table[0].napi);
+
+cleanup2:
free_netvsc_device(&net_device->rcu);
return ERR_PTR(ret);
diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c
new file mode 100644
index 000000000000..b86611041db6
--- /dev/null
+++ b/drivers/net/hyperv/netvsc_bpf.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2019, Microsoft Corporation.
+ *
+ * Author:
+ * Haiyang Zhang <haiyangz@microsoft.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <linux/kernel.h>
+#include <net/xdp.h>
+
+#include <linux/mutex.h>
+#include <linux/rtnetlink.h>
+
+#include "hyperv_net.h"
+
+u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
+ struct xdp_buff *xdp)
+{
+ void *data = nvchan->rsc.data[0];
+ u32 len = nvchan->rsc.len[0];
+ struct page *page = NULL;
+ struct bpf_prog *prog;
+ u32 act = XDP_PASS;
+
+ xdp->data_hard_start = NULL;
+
+ rcu_read_lock();
+ prog = rcu_dereference(nvchan->bpf_prog);
+
+ if (!prog)
+ goto out;
+
+ /* allocate page buffer for data */
+ page = alloc_page(GFP_ATOMIC);
+ if (!page) {
+ act = XDP_DROP;
+ goto out;
+ }
+
+ xdp->data_hard_start = page_address(page);
+ xdp->data = xdp->data_hard_start + NETVSC_XDP_HDRM;
+ xdp_set_data_meta_invalid(xdp);
+ xdp->data_end = xdp->data + len;
+ xdp->rxq = &nvchan->xdp_rxq;
+ xdp->handle = 0;
+
+ memcpy(xdp->data, data, len);
+
+ act = bpf_prog_run_xdp(prog, xdp);
+
+ switch (act) {
+ case XDP_PASS:
+ case XDP_TX:
+ case XDP_DROP:
+ break;
+
+ case XDP_ABORTED:
+ trace_xdp_exception(ndev, prog, act);
+ break;
+
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ }
+
+out:
+ rcu_read_unlock();
+
+ if (page && act != XDP_PASS && act != XDP_TX) {
+ __free_page(page);
+ xdp->data_hard_start = NULL;
+ }
+
+ return act;
+}
+
+unsigned int netvsc_xdp_fraglen(unsigned int len)
+{
+ return SKB_DATA_ALIGN(len) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+}
+
+struct bpf_prog *netvsc_xdp_get(struct netvsc_device *nvdev)
+{
+ return rtnl_dereference(nvdev->chan_table[0].bpf_prog);
+}
+
+int netvsc_xdp_set(struct net_device *dev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack,
+ struct netvsc_device *nvdev)
+{
+ struct bpf_prog *old_prog;
+ int buf_max, i;
+
+ old_prog = netvsc_xdp_get(nvdev);
+
+ if (!old_prog && !prog)
+ return 0;
+
+ buf_max = NETVSC_XDP_HDRM + netvsc_xdp_fraglen(dev->mtu + ETH_HLEN);
+ if (prog && buf_max > PAGE_SIZE) {
+ netdev_err(dev, "XDP: mtu:%u too large, buf_max:%u\n",
+ dev->mtu, buf_max);
+ NL_SET_ERR_MSG_MOD(extack, "XDP: mtu too large");
+
+ return -EOPNOTSUPP;
+ }
+
+ if (prog && (dev->features & NETIF_F_LRO)) {
+ netdev_err(dev, "XDP: not support LRO\n");
+ NL_SET_ERR_MSG_MOD(extack, "XDP: not support LRO");
+
+ return -EOPNOTSUPP;
+ }
+
+ if (prog)
+ bpf_prog_add(prog, nvdev->num_chn - 1);
+
+ for (i = 0; i < nvdev->num_chn; i++)
+ rcu_assign_pointer(nvdev->chan_table[i].bpf_prog, prog);
+
+ if (old_prog)
+ for (i = 0; i < nvdev->num_chn; i++)
+ bpf_prog_put(old_prog);
+
+ return 0;
+}
+
+int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
+{
+ struct netdev_bpf xdp;
+ bpf_op_t ndo_bpf;
+ int ret;
+
+ ASSERT_RTNL();
+
+ if (!vf_netdev)
+ return 0;
+
+ ndo_bpf = vf_netdev->netdev_ops->ndo_bpf;
+ if (!ndo_bpf)
+ return 0;
+
+ memset(&xdp, 0, sizeof(xdp));
+
+ if (prog)
+ bpf_prog_inc(prog);
+
+ xdp.command = XDP_SETUP_PROG;
+ xdp.prog = prog;
+
+ ret = ndo_bpf(vf_netdev, &xdp);
+
+ if (ret && prog)
+ bpf_prog_put(prog);
+
+ return ret;
+}
+
+static u32 netvsc_xdp_query(struct netvsc_device *nvdev)
+{
+ struct bpf_prog *prog = netvsc_xdp_get(nvdev);
+
+ if (prog)
+ return prog->aux->id;
+
+ return 0;
+}
+
+int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf)
+{
+ struct net_device_context *ndevctx = netdev_priv(dev);
+ struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
+ struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
+ struct netlink_ext_ack *extack = bpf->extack;
+ int ret;
+
+ if (!nvdev || nvdev->destroy) {
+ if (bpf->command == XDP_QUERY_PROG) {
+ bpf->prog_id = 0;
+ return 0; /* Query must always succeed */
+ } else {
+ return -ENODEV;
+ }
+ }
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ ret = netvsc_xdp_set(dev, bpf->prog, extack, nvdev);
+
+ if (ret)
+ return ret;
+
+ ret = netvsc_vf_setxdp(vf_netdev, bpf->prog);
+
+ if (ret) {
+ netdev_err(dev, "vf_setxdp failed:%d\n", ret);
+ NL_SET_ERR_MSG_MOD(extack, "vf_setxdp failed");
+
+ netvsc_xdp_set(dev, NULL, extack, nvdev);
+ }
+
+ return ret;
+
+ case XDP_QUERY_PROG:
+ bpf->prog_id = netvsc_xdp_query(nvdev);
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 02e66473f2ed..65e12cb07f45 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include <linux/netpoll.h>
+#include <linux/bpf.h>
#include <net/arp.h>
#include <net/route.h>
@@ -519,7 +520,7 @@ static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev,
return rc;
}
-static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
+static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
struct hv_netvsc_packet *packet = NULL;
@@ -686,7 +687,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
/* timestamp packet in software */
skb_tx_timestamp(skb);
- ret = netvsc_send(net, packet, rndis_msg, pb, skb);
+ ret = netvsc_send(net, packet, rndis_msg, pb, skb, xdp_tx);
if (likely(ret == 0))
return NETDEV_TX_OK;
@@ -709,6 +710,11 @@ no_memory:
goto drop;
}
+static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ return netvsc_xmit(skb, ndev, false);
+}
+
/*
* netvsc_linkstatus_callback - Link up/down notification
*/
@@ -751,6 +757,22 @@ void netvsc_linkstatus_callback(struct net_device *net,
schedule_delayed_work(&ndev_ctx->dwork, 0);
}
+static void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ int rc;
+
+ skb->queue_mapping = skb_get_rx_queue(skb);
+ __skb_push(skb, ETH_HLEN);
+
+ rc = netvsc_xmit(skb, ndev, true);
+
+ if (dev_xmit_complete(rc))
+ return;
+
+ dev_kfree_skb_any(skb);
+ ndev->stats.tx_dropped++;
+}
+
static void netvsc_comp_ipcsum(struct sk_buff *skb)
{
struct iphdr *iph = (struct iphdr *)skb->data;
@@ -760,7 +782,8 @@ static void netvsc_comp_ipcsum(struct sk_buff *skb)
}
static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
- struct netvsc_channel *nvchan)
+ struct netvsc_channel *nvchan,
+ struct xdp_buff *xdp)
{
struct napi_struct *napi = &nvchan->napi;
const struct ndis_pkt_8021q_info *vlan = nvchan->rsc.vlan;
@@ -768,18 +791,37 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
nvchan->rsc.csum_info;
const u32 *hash_info = nvchan->rsc.hash_info;
struct sk_buff *skb;
+ void *xbuf = xdp->data_hard_start;
int i;
- skb = napi_alloc_skb(napi, nvchan->rsc.pktlen);
- if (!skb)
- return skb;
+ if (xbuf) {
+ unsigned int hdroom = xdp->data - xdp->data_hard_start;
+ unsigned int xlen = xdp->data_end - xdp->data;
+ unsigned int frag_size = netvsc_xdp_fraglen(hdroom + xlen);
- /*
- * Copy to skb. This copy is needed here since the memory pointed by
- * hv_netvsc_packet cannot be deallocated
- */
- for (i = 0; i < nvchan->rsc.cnt; i++)
- skb_put_data(skb, nvchan->rsc.data[i], nvchan->rsc.len[i]);
+ skb = build_skb(xbuf, frag_size);
+
+ if (!skb) {
+ __free_page(virt_to_page(xbuf));
+ return NULL;
+ }
+
+ skb_reserve(skb, hdroom);
+ skb_put(skb, xlen);
+ skb->dev = napi->dev;
+ } else {
+ skb = napi_alloc_skb(napi, nvchan->rsc.pktlen);
+
+ if (!skb)
+ return NULL;
+
+ /* Copy to skb. This copy is needed here since the memory
+ * pointed by hv_netvsc_packet cannot be deallocated.
+ */
+ for (i = 0; i < nvchan->rsc.cnt; i++)
+ skb_put_data(skb, nvchan->rsc.data[i],
+ nvchan->rsc.len[i]);
+ }
skb->protocol = eth_type_trans(skb, net);
@@ -829,13 +871,25 @@ int netvsc_recv_callback(struct net_device *net,
struct vmbus_channel *channel = nvchan->channel;
u16 q_idx = channel->offermsg.offer.sub_channel_index;
struct sk_buff *skb;
- struct netvsc_stats *rx_stats;
+ struct netvsc_stats *rx_stats = &nvchan->rx_stats;
+ struct xdp_buff xdp;
+ u32 act;
if (net->reg_state != NETREG_REGISTERED)
return NVSP_STAT_FAIL;
+ act = netvsc_run_xdp(net, nvchan, &xdp);
+
+ if (act != XDP_PASS && act != XDP_TX) {
+ u64_stats_update_begin(&rx_stats->syncp);
+ rx_stats->xdp_drop++;
+ u64_stats_update_end(&rx_stats->syncp);
+
+ return NVSP_STAT_SUCCESS; /* consumed by XDP */
+ }
+
/* Allocate a skb - TODO direct I/O to pages? */
- skb = netvsc_alloc_recv_skb(net, nvchan);
+ skb = netvsc_alloc_recv_skb(net, nvchan, &xdp);
if (unlikely(!skb)) {
++net_device_ctx->eth_stats.rx_no_memory;
@@ -849,7 +903,6 @@ int netvsc_recv_callback(struct net_device *net,
* on the synthetic device because modifying the VF device
* statistics will not work correctly.
*/
- rx_stats = &nvchan->rx_stats;
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->packets++;
rx_stats->bytes += nvchan->rsc.pktlen;
@@ -860,6 +913,11 @@ int netvsc_recv_callback(struct net_device *net,
++rx_stats->multicast;
u64_stats_update_end(&rx_stats->syncp);
+ if (act == XDP_TX) {
+ netvsc_xdp_xmit(skb, net);
+ return NVSP_STAT_SUCCESS;
+ }
+
napi_gro_receive(&nvchan->napi, skb);
return NVSP_STAT_SUCCESS;
}
@@ -886,10 +944,11 @@ static void netvsc_get_channels(struct net_device *net,
/* Alloc struct netvsc_device_info, and initialize it from either existing
* struct netvsc_device, or from default values.
*/
-static struct netvsc_device_info *netvsc_devinfo_get
- (struct netvsc_device *nvdev)
+static
+struct netvsc_device_info *netvsc_devinfo_get(struct netvsc_device *nvdev)
{
struct netvsc_device_info *dev_info;
+ struct bpf_prog *prog;
dev_info = kzalloc(sizeof(*dev_info), GFP_ATOMIC);
@@ -897,6 +956,8 @@ static struct netvsc_device_info *netvsc_devinfo_get
return NULL;
if (nvdev) {
+ ASSERT_RTNL();
+
dev_info->num_chn = nvdev->num_chn;
dev_info->send_sections = nvdev->send_section_cnt;
dev_info->send_section_size = nvdev->send_section_size;
@@ -905,6 +966,12 @@ static struct netvsc_device_info *netvsc_devinfo_get
memcpy(dev_info->rss_key, nvdev->extension->rss_key,
NETVSC_HASH_KEYLEN);
+
+ prog = netvsc_xdp_get(nvdev);
+ if (prog) {
+ bpf_prog_inc(prog);
+ dev_info->bprog = prog;
+ }
} else {
dev_info->num_chn = VRSS_CHANNEL_DEFAULT;
dev_info->send_sections = NETVSC_DEFAULT_TX;
@@ -916,6 +983,17 @@ static struct netvsc_device_info *netvsc_devinfo_get
return dev_info;
}
+/* Free struct netvsc_device_info */
+static void netvsc_devinfo_put(struct netvsc_device_info *dev_info)
+{
+ if (dev_info->bprog) {
+ ASSERT_RTNL();
+ bpf_prog_put(dev_info->bprog);
+ }
+
+ kfree(dev_info);
+}
+
static int netvsc_detach(struct net_device *ndev,
struct netvsc_device *nvdev)
{
@@ -927,6 +1005,8 @@ static int netvsc_detach(struct net_device *ndev,
if (cancel_work_sync(&nvdev->subchan_work))
nvdev->num_chn = 1;
+ netvsc_xdp_set(ndev, NULL, NULL, nvdev);
+
/* If device was up (receiving) then shutdown */
if (netif_running(ndev)) {
netvsc_tx_disable(nvdev, ndev);
@@ -960,7 +1040,8 @@ static int netvsc_attach(struct net_device *ndev,
struct hv_device *hdev = ndev_ctx->device_ctx;
struct netvsc_device *nvdev;
struct rndis_device *rdev;
- int ret;
+ struct bpf_prog *prog;
+ int ret = 0;
nvdev = rndis_filter_device_add(hdev, dev_info);
if (IS_ERR(nvdev))
@@ -976,6 +1057,16 @@ static int netvsc_attach(struct net_device *ndev,
}
}
+ prog = dev_info->bprog;
+ if (prog) {
+ bpf_prog_inc(prog);
+ ret = netvsc_xdp_set(ndev, prog, NULL, nvdev);
+ if (ret) {
+ bpf_prog_put(prog);
+ goto err1;
+ }
+ }
+
/* In any case device is now ready */
netif_device_attach(ndev);
@@ -985,7 +1076,7 @@ static int netvsc_attach(struct net_device *ndev,
if (netif_running(ndev)) {
ret = rndis_filter_open(nvdev);
if (ret)
- goto err;
+ goto err2;
rdev = nvdev->extension;
if (!rdev->link_state)
@@ -994,9 +1085,10 @@ static int netvsc_attach(struct net_device *ndev,
return 0;
-err:
+err2:
netif_device_detach(ndev);
+err1:
rndis_filter_device_remove(hdev, nvdev);
return ret;
@@ -1046,7 +1138,7 @@ static int netvsc_set_channels(struct net_device *net,
}
out:
- kfree(device_info);
+ netvsc_devinfo_put(device_info);
return ret;
}
@@ -1153,7 +1245,7 @@ rollback_vf:
dev_set_mtu(vf_netdev, orig_mtu);
out:
- kfree(device_info);
+ netvsc_devinfo_put(device_info);
return ret;
}
@@ -1378,8 +1470,8 @@ static const struct {
/* statistics per queue (rx/tx packets/bytes) */
#define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats))
-/* 4 statistics per queue (rx/tx packets/bytes) */
-#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4)
+/* 5 statistics per queue (rx/tx packets/bytes, rx xdp_drop) */
+#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 5)
static int netvsc_get_sset_count(struct net_device *dev, int string_set)
{
@@ -1411,6 +1503,7 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
struct netvsc_ethtool_pcpu_stats *pcpu_sum;
unsigned int start;
u64 packets, bytes;
+ u64 xdp_drop;
int i, j, cpu;
if (!nvdev)
@@ -1439,9 +1532,11 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
start = u64_stats_fetch_begin_irq(&qstats->syncp);
packets = qstats->packets;
bytes = qstats->bytes;
+ xdp_drop = qstats->xdp_drop;
} while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
data[i++] = packets;
data[i++] = bytes;
+ data[i++] = xdp_drop;
}
pcpu_sum = kvmalloc_array(num_possible_cpus(),
@@ -1489,6 +1584,8 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_xdp_drop", i);
+ p += ETH_GSTRING_LEN;
}
for_each_present_cpu(cpu) {
@@ -1662,7 +1759,7 @@ static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
rndis_dev = ndev->extension;
if (indir) {
for (i = 0; i < ITAB_NUM; i++)
- indir[i] = rndis_dev->rx_table[i];
+ indir[i] = ndc->rx_table[i];
}
if (key)
@@ -1692,7 +1789,7 @@ static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
return -EINVAL;
for (i = 0; i < ITAB_NUM; i++)
- rndis_dev->rx_table[i] = indir[i];
+ ndc->rx_table[i] = indir[i];
}
if (!key) {
@@ -1785,10 +1882,27 @@ static int netvsc_set_ringparam(struct net_device *ndev,
}
out:
- kfree(device_info);
+ netvsc_devinfo_put(device_info);
return ret;
}
+static netdev_features_t netvsc_fix_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ struct net_device_context *ndevctx = netdev_priv(ndev);
+ struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
+
+ if (!nvdev || nvdev->destroy)
+ return features;
+
+ if ((features & NETIF_F_LRO) && netvsc_xdp_get(nvdev)) {
+ features ^= NETIF_F_LRO;
+ netdev_info(ndev, "Skip LRO - unsupported with XDP\n");
+ }
+
+ return features;
+}
+
static int netvsc_set_features(struct net_device *ndev,
netdev_features_t features)
{
@@ -1875,12 +1989,14 @@ static const struct net_device_ops device_ops = {
.ndo_start_xmit = netvsc_start_xmit,
.ndo_change_rx_flags = netvsc_change_rx_flags,
.ndo_set_rx_mode = netvsc_set_rx_mode,
+ .ndo_fix_features = netvsc_fix_features,
.ndo_set_features = netvsc_set_features,
.ndo_change_mtu = netvsc_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = netvsc_set_mac_addr,
.ndo_select_queue = netvsc_select_queue,
.ndo_get_stats64 = netvsc_get_stats64,
+ .ndo_bpf = netvsc_bpf,
};
/*
@@ -2167,6 +2283,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
{
struct net_device_context *net_device_ctx;
struct netvsc_device *netvsc_dev;
+ struct bpf_prog *prog;
struct net_device *ndev;
int ret;
@@ -2211,6 +2328,9 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
vf_netdev->wanted_features = ndev->features;
netdev_update_features(vf_netdev);
+ prog = netvsc_xdp_get(netvsc_dev);
+ netvsc_vf_setxdp(vf_netdev, prog);
+
return NOTIFY_OK;
}
@@ -2252,6 +2372,8 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
+ netvsc_vf_setxdp(vf_netdev, NULL);
+
netdev_rx_handler_unregister(vf_netdev);
netdev_upper_dev_unlink(vf_netdev, ndev);
RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
@@ -2363,14 +2485,14 @@ static int netvsc_probe(struct hv_device *dev,
list_add(&net_device_ctx->list, &netvsc_dev_list);
rtnl_unlock();
- kfree(device_info);
+ netvsc_devinfo_put(device_info);
return 0;
register_failed:
rtnl_unlock();
rndis_filter_device_remove(dev, nvdev);
rndis_failed:
- kfree(device_info);
+ netvsc_devinfo_put(device_info);
devinfo_failed:
free_percpu(net_device_ctx->vf_stats);
no_stats:
@@ -2398,8 +2520,10 @@ static int netvsc_remove(struct hv_device *dev)
rtnl_lock();
nvdev = rtnl_dereference(ndev_ctx->nvdev);
- if (nvdev)
+ if (nvdev) {
cancel_work_sync(&nvdev->subchan_work);
+ netvsc_xdp_set(net, NULL, NULL, nvdev);
+ }
/*
* Call to the vsc driver to let it know that the device is being
@@ -2472,11 +2596,11 @@ static int netvsc_resume(struct hv_device *dev)
ret = netvsc_attach(net, device_info);
- rtnl_unlock();
-
- kfree(device_info);
+ netvsc_devinfo_put(device_info);
net_device_ctx->saved_netvsc_dev_info = NULL;
+ rtnl_unlock();
+
return ret;
}
static const struct hv_vmbus_device_id id_table[] = {
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 206b4e77eaf0..b81ceba38218 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -235,7 +235,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
trace_rndis_send(dev->ndev, 0, &req->request_msg);
rcu_read_lock_bh();
- ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL);
+ ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL, false);
rcu_read_unlock_bh();
return ret;
@@ -773,6 +773,7 @@ static int rndis_set_rss_param_msg(struct rndis_device *rdev,
const u8 *rss_key, u16 flag)
{
struct net_device *ndev = rdev->ndev;
+ struct net_device_context *ndc = netdev_priv(ndev);
struct rndis_request *request;
struct rndis_set_request *set;
struct rndis_set_complete *set_complete;
@@ -812,7 +813,7 @@ static int rndis_set_rss_param_msg(struct rndis_device *rdev,
/* Set indirection table entries */
itab = (u32 *)(rssp + 1);
for (i = 0; i < ITAB_NUM; i++)
- itab[i] = rdev->rx_table[i];
+ itab[i] = ndc->rx_table[i];
/* Set hask key values */
keyp = (u8 *)((unsigned long)rssp + rssp->hashkey_offset);
@@ -1171,6 +1172,9 @@ int rndis_set_subchannel(struct net_device *ndev,
wait_event(nvdev->subchan_open,
atomic_read(&nvdev->open_chn) == nvdev->num_chn);
+ for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
+ ndev_ctx->tx_table[i] = i % nvdev->num_chn;
+
/* ignore failures from setting rss parameters, still have channels */
if (dev_info)
rndis_filter_set_rss_param(rdev, dev_info->rss_key);
@@ -1180,9 +1184,6 @@ int rndis_set_subchannel(struct net_device *ndev,
netif_set_real_num_tx_queues(ndev, nvdev->num_chn);
netif_set_real_num_rx_queues(ndev, nvdev->num_chn);
- for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
- ndev_ctx->tx_table[i] = i % nvdev->num_chn;
-
return 0;
}
@@ -1312,6 +1313,7 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
struct netvsc_device_info *device_info)
{
struct net_device *net = hv_get_drvdata(dev);
+ struct net_device_context *ndc = netdev_priv(net);
struct netvsc_device *net_device;
struct rndis_device *rndis_device;
struct ndis_recv_scale_cap rsscap;
@@ -1398,9 +1400,11 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
/* We will use the given number of channels if available. */
net_device->num_chn = min(net_device->max_chn, device_info->num_chn);
- for (i = 0; i < ITAB_NUM; i++)
- rndis_device->rx_table[i] = ethtool_rxfh_indir_default(
+ if (!netif_is_rxfh_configured(net)) {
+ for (i = 0; i < ITAB_NUM; i++)
+ ndc->rx_table[i] = ethtool_rxfh_indir_default(
i, net_device->num_chn);
+ }
atomic_set(&net_device->open_chn, 1);
vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
@@ -1439,8 +1443,6 @@ void rndis_filter_device_remove(struct hv_device *dev,
/* Halt and release the rndis device */
rndis_filter_halt_device(net_dev, rndis_dev);
- net_dev->extension = NULL;
-
netvsc_device_remove(dev);
}
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index afd8b2a08245..45bfd99f17fa 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -11,16 +11,17 @@
#include <linux/module.h>
#include <crypto/aead.h>
#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/refcount.h>
#include <net/genetlink.h>
#include <net/sock.h>
#include <net/gro_cells.h>
+#include <net/macsec.h>
+#include <linux/phy.h>
#include <uapi/linux/if_macsec.h>
-typedef u64 __bitwise sci_t;
-
#define MACSEC_SCI_LEN 8
/* SecTAG length = macsec_eth_header without the optional SCI */
@@ -58,8 +59,6 @@ struct macsec_eth_header {
#define GCM_AES_IV_LEN 12
#define DEFAULT_ICV_LEN 16
-#define MACSEC_NUM_AN 4 /* 2 bits for the association number */
-
#define for_each_rxsc(secy, sc) \
for (sc = rcu_dereference_bh(secy->rx_sc); \
sc; \
@@ -77,49 +76,6 @@ struct gcm_iv {
__be32 pn;
};
-/**
- * struct macsec_key - SA key
- * @id: user-provided key identifier
- * @tfm: crypto struct, key storage
- */
-struct macsec_key {
- u8 id[MACSEC_KEYID_LEN];
- struct crypto_aead *tfm;
-};
-
-struct macsec_rx_sc_stats {
- __u64 InOctetsValidated;
- __u64 InOctetsDecrypted;
- __u64 InPktsUnchecked;
- __u64 InPktsDelayed;
- __u64 InPktsOK;
- __u64 InPktsInvalid;
- __u64 InPktsLate;
- __u64 InPktsNotValid;
- __u64 InPktsNotUsingSA;
- __u64 InPktsUnusedSA;
-};
-
-struct macsec_rx_sa_stats {
- __u32 InPktsOK;
- __u32 InPktsInvalid;
- __u32 InPktsNotValid;
- __u32 InPktsNotUsingSA;
- __u32 InPktsUnusedSA;
-};
-
-struct macsec_tx_sa_stats {
- __u32 OutPktsProtected;
- __u32 OutPktsEncrypted;
-};
-
-struct macsec_tx_sc_stats {
- __u64 OutPktsProtected;
- __u64 OutPktsEncrypted;
- __u64 OutOctetsProtected;
- __u64 OutOctetsEncrypted;
-};
-
struct macsec_dev_stats {
__u64 OutPktsUntagged;
__u64 InPktsUntagged;
@@ -131,124 +87,8 @@ struct macsec_dev_stats {
__u64 InPktsOverrun;
};
-/**
- * struct macsec_rx_sa - receive secure association
- * @active:
- * @next_pn: packet number expected for the next packet
- * @lock: protects next_pn manipulations
- * @key: key structure
- * @stats: per-SA stats
- */
-struct macsec_rx_sa {
- struct macsec_key key;
- spinlock_t lock;
- u32 next_pn;
- refcount_t refcnt;
- bool active;
- struct macsec_rx_sa_stats __percpu *stats;
- struct macsec_rx_sc *sc;
- struct rcu_head rcu;
-};
-
-struct pcpu_rx_sc_stats {
- struct macsec_rx_sc_stats stats;
- struct u64_stats_sync syncp;
-};
-
-/**
- * struct macsec_rx_sc - receive secure channel
- * @sci: secure channel identifier for this SC
- * @active: channel is active
- * @sa: array of secure associations
- * @stats: per-SC stats
- */
-struct macsec_rx_sc {
- struct macsec_rx_sc __rcu *next;
- sci_t sci;
- bool active;
- struct macsec_rx_sa __rcu *sa[MACSEC_NUM_AN];
- struct pcpu_rx_sc_stats __percpu *stats;
- refcount_t refcnt;
- struct rcu_head rcu_head;
-};
-
-/**
- * struct macsec_tx_sa - transmit secure association
- * @active:
- * @next_pn: packet number to use for the next packet
- * @lock: protects next_pn manipulations
- * @key: key structure
- * @stats: per-SA stats
- */
-struct macsec_tx_sa {
- struct macsec_key key;
- spinlock_t lock;
- u32 next_pn;
- refcount_t refcnt;
- bool active;
- struct macsec_tx_sa_stats __percpu *stats;
- struct rcu_head rcu;
-};
-
-struct pcpu_tx_sc_stats {
- struct macsec_tx_sc_stats stats;
- struct u64_stats_sync syncp;
-};
-
-/**
- * struct macsec_tx_sc - transmit secure channel
- * @active:
- * @encoding_sa: association number of the SA currently in use
- * @encrypt: encrypt packets on transmit, or authenticate only
- * @send_sci: always include the SCI in the SecTAG
- * @end_station:
- * @scb: single copy broadcast flag
- * @sa: array of secure associations
- * @stats: stats for this TXSC
- */
-struct macsec_tx_sc {
- bool active;
- u8 encoding_sa;
- bool encrypt;
- bool send_sci;
- bool end_station;
- bool scb;
- struct macsec_tx_sa __rcu *sa[MACSEC_NUM_AN];
- struct pcpu_tx_sc_stats __percpu *stats;
-};
-
#define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
-/**
- * struct macsec_secy - MACsec Security Entity
- * @netdev: netdevice for this SecY
- * @n_rx_sc: number of receive secure channels configured on this SecY
- * @sci: secure channel identifier used for tx
- * @key_len: length of keys used by the cipher suite
- * @icv_len: length of ICV used by the cipher suite
- * @validate_frames: validation mode
- * @operational: MAC_Operational flag
- * @protect_frames: enable protection for this SecY
- * @replay_protect: enable packet number checks on receive
- * @replay_window: size of the replay window
- * @tx_sc: transmit secure channel
- * @rx_sc: linked list of receive secure channels
- */
-struct macsec_secy {
- struct net_device *netdev;
- unsigned int n_rx_sc;
- sci_t sci;
- u16 key_len;
- u16 icv_len;
- enum macsec_validation_type validate_frames;
- bool operational;
- bool protect_frames;
- bool replay_protect;
- u32 replay_window;
- struct macsec_tx_sc tx_sc;
- struct macsec_rx_sc __rcu *rx_sc;
-};
-
struct pcpu_secy_stats {
struct macsec_dev_stats stats;
struct u64_stats_sync syncp;
@@ -260,6 +100,7 @@ struct pcpu_secy_stats {
* @real_dev: pointer to underlying netdevice
* @stats: MACsec device stats
* @secys: linked list of SecY's on the underlying device
+ * @offload: status of offloading on the MACsec device
*/
struct macsec_dev {
struct macsec_secy secy;
@@ -267,6 +108,7 @@ struct macsec_dev {
struct pcpu_secy_stats __percpu *stats;
struct list_head secys;
struct gro_cells gro_cells;
+ enum macsec_offload offload;
};
/**
@@ -480,6 +322,56 @@ static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len)
h->short_length = data_len;
}
+/* Checks if a MACsec interface is being offloaded to an hardware engine */
+static bool macsec_is_offloaded(struct macsec_dev *macsec)
+{
+ if (macsec->offload == MACSEC_OFFLOAD_PHY)
+ return true;
+
+ return false;
+}
+
+/* Checks if underlying layers implement MACsec offloading functions. */
+static bool macsec_check_offload(enum macsec_offload offload,
+ struct macsec_dev *macsec)
+{
+ if (!macsec || !macsec->real_dev)
+ return false;
+
+ if (offload == MACSEC_OFFLOAD_PHY)
+ return macsec->real_dev->phydev &&
+ macsec->real_dev->phydev->macsec_ops;
+
+ return false;
+}
+
+static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload,
+ struct macsec_dev *macsec,
+ struct macsec_context *ctx)
+{
+ if (ctx) {
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->offload = offload;
+
+ if (offload == MACSEC_OFFLOAD_PHY)
+ ctx->phydev = macsec->real_dev->phydev;
+ }
+
+ return macsec->real_dev->phydev->macsec_ops;
+}
+
+/* Returns a pointer to the MACsec ops struct if any and updates the MACsec
+ * context device reference if provided.
+ */
+static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec,
+ struct macsec_context *ctx)
+{
+ if (!macsec_check_offload(macsec->offload, macsec))
+ return NULL;
+
+ return __macsec_get_ops(macsec->offload, macsec, ctx);
+}
+
/* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */
static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len)
{
@@ -532,6 +424,23 @@ static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
return (struct macsec_eth_header *)skb_mac_header(skb);
}
+static void __macsec_pn_wrapped(struct macsec_secy *secy,
+ struct macsec_tx_sa *tx_sa)
+{
+ pr_debug("PN wrapped, transitioning to !oper\n");
+ tx_sa->active = false;
+ if (secy->protect_frames)
+ secy->operational = false;
+}
+
+void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa)
+{
+ spin_lock_bh(&tx_sa->lock);
+ __macsec_pn_wrapped(secy, tx_sa);
+ spin_unlock_bh(&tx_sa->lock);
+}
+EXPORT_SYMBOL_GPL(macsec_pn_wrapped);
+
static u32 tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy)
{
u32 pn;
@@ -540,12 +449,8 @@ static u32 tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy)
pn = tx_sa->next_pn;
tx_sa->next_pn++;
- if (tx_sa->next_pn == 0) {
- pr_debug("PN wrapped, transitioning to !oper\n");
- tx_sa->active = false;
- if (secy->protect_frames)
- secy->operational = false;
- }
+ if (tx_sa->next_pn == 0)
+ __macsec_pn_wrapped(secy, tx_sa);
spin_unlock_bh(&tx_sa->lock);
return pn;
@@ -1029,8 +934,10 @@ static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci)
return NULL;
}
-static void handle_not_macsec(struct sk_buff *skb)
+static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
{
+ /* Deliver to the uncontrolled port by default */
+ enum rx_handler_result ret = RX_HANDLER_PASS;
struct macsec_rxh_data *rxd;
struct macsec_dev *macsec;
@@ -1045,7 +952,8 @@ static void handle_not_macsec(struct sk_buff *skb)
struct sk_buff *nskb;
struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
- if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
+ if (!macsec_is_offloaded(macsec) &&
+ macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsNoTag++;
u64_stats_update_end(&secy_stats->syncp);
@@ -1064,9 +972,17 @@ static void handle_not_macsec(struct sk_buff *skb)
secy_stats->stats.InPktsUntagged++;
u64_stats_update_end(&secy_stats->syncp);
}
+
+ if (netif_running(macsec->secy.netdev) &&
+ macsec_is_offloaded(macsec)) {
+ ret = RX_HANDLER_EXACT;
+ goto out;
+ }
}
+out:
rcu_read_unlock();
+ return ret;
}
static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
@@ -1091,12 +1007,8 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
goto drop_direct;
hdr = macsec_ethhdr(skb);
- if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) {
- handle_not_macsec(skb);
-
- /* and deliver to the uncontrolled port */
- return RX_HANDLER_PASS;
- }
+ if (hdr->eth.h_proto != htons(ETH_P_MACSEC))
+ return handle_not_macsec(skb);
skb = skb_unshare(skb, GFP_ATOMIC);
*pskb = skb;
@@ -1585,6 +1497,7 @@ static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = {
[MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 },
[MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED },
[MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED },
+ [MACSEC_ATTR_OFFLOAD] = { .type = NLA_NESTED },
};
static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
@@ -1602,6 +1515,44 @@ static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
.len = MACSEC_MAX_KEY_LEN, },
};
+static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = {
+ [MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 },
+};
+
+/* Offloads an operation to a device driver */
+static int macsec_offload(int (* const func)(struct macsec_context *),
+ struct macsec_context *ctx)
+{
+ int ret;
+
+ if (unlikely(!func))
+ return 0;
+
+ if (ctx->offload == MACSEC_OFFLOAD_PHY)
+ mutex_lock(&ctx->phydev->lock);
+
+ /* Phase I: prepare. The drive should fail here if there are going to be
+ * issues in the commit phase.
+ */
+ ctx->prepare = true;
+ ret = (*func)(ctx);
+ if (ret)
+ goto phy_unlock;
+
+ /* Phase II: commit. This step cannot fail. */
+ ctx->prepare = false;
+ ret = (*func)(ctx);
+ /* This should never happen: commit is not allowed to fail */
+ if (unlikely(ret))
+ WARN(1, "MACsec offloading commit failed (%d)\n", ret);
+
+phy_unlock:
+ if (ctx->offload == MACSEC_OFFLOAD_PHY)
+ mutex_unlock(&ctx->phydev->lock);
+
+ return ret;
+}
+
static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa)
{
if (!attrs[MACSEC_ATTR_SA_CONFIG])
@@ -1717,13 +1668,40 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
- nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
rx_sa->sc = rx_sc;
+
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(netdev_priv(dev))) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (!ops) {
+ err = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
+ ctx.sa.assoc_num = assoc_num;
+ ctx.sa.rx_sa = rx_sa;
+ memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
+ MACSEC_KEYID_LEN);
+
+ err = macsec_offload(ops->mdo_add_rxsa, &ctx);
+ if (err)
+ goto cleanup;
+ }
+
+ nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
rtnl_unlock();
return 0;
+
+cleanup:
+ kfree(rx_sa);
+ rtnl_unlock();
+ return err;
}
static bool validate_add_rxsc(struct nlattr **attrs)
@@ -1746,6 +1724,8 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
struct nlattr **attrs = info->attrs;
struct macsec_rx_sc *rx_sc;
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
+ bool was_active;
+ int ret;
if (!attrs[MACSEC_ATTR_IFINDEX])
return -EINVAL;
@@ -1771,12 +1751,35 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
return PTR_ERR(rx_sc);
}
+ was_active = rx_sc->active;
if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
+ if (macsec_is_offloaded(netdev_priv(dev))) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (!ops) {
+ ret = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
+ ctx.rx_sc = rx_sc;
+
+ ret = macsec_offload(ops->mdo_add_rxsc, &ctx);
+ if (ret)
+ goto cleanup;
+ }
+
rtnl_unlock();
return 0;
+
+cleanup:
+ rx_sc->active = was_active;
+ rtnl_unlock();
+ return ret;
}
static bool validate_add_txsa(struct nlattr **attrs)
@@ -1813,6 +1816,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
struct macsec_tx_sa *tx_sa;
unsigned char assoc_num;
struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
+ bool was_operational;
int err;
if (!attrs[MACSEC_ATTR_IFINDEX])
@@ -1863,8 +1867,6 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
return err;
}
- nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
-
spin_lock_bh(&tx_sa->lock);
tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&tx_sa->lock);
@@ -1872,14 +1874,43 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
+ was_operational = secy->operational;
if (assoc_num == tx_sc->encoding_sa && tx_sa->active)
secy->operational = true;
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(netdev_priv(dev))) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (!ops) {
+ err = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
+ ctx.sa.assoc_num = assoc_num;
+ ctx.sa.tx_sa = tx_sa;
+ memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
+ MACSEC_KEYID_LEN);
+
+ err = macsec_offload(ops->mdo_add_txsa, &ctx);
+ if (err)
+ goto cleanup;
+ }
+
+ nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);
rtnl_unlock();
return 0;
+
+cleanup:
+ secy->operational = was_operational;
+ kfree(tx_sa);
+ rtnl_unlock();
+ return err;
}
static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
@@ -1892,6 +1923,7 @@ static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
u8 assoc_num;
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
+ int ret;
if (!attrs[MACSEC_ATTR_IFINDEX])
return -EINVAL;
@@ -1915,12 +1947,35 @@ static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
return -EBUSY;
}
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(netdev_priv(dev))) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (!ops) {
+ ret = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
+ ctx.sa.assoc_num = assoc_num;
+ ctx.sa.rx_sa = rx_sa;
+
+ ret = macsec_offload(ops->mdo_del_rxsa, &ctx);
+ if (ret)
+ goto cleanup;
+ }
+
RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL);
clear_rx_sa(rx_sa);
rtnl_unlock();
return 0;
+
+cleanup:
+ rtnl_unlock();
+ return ret;
}
static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
@@ -1931,6 +1986,7 @@ static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
struct macsec_rx_sc *rx_sc;
sci_t sci;
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
+ int ret;
if (!attrs[MACSEC_ATTR_IFINDEX])
return -EINVAL;
@@ -1957,10 +2013,31 @@ static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
return -ENODEV;
}
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(netdev_priv(dev))) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (!ops) {
+ ret = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
+ ctx.rx_sc = rx_sc;
+ ret = macsec_offload(ops->mdo_del_rxsc, &ctx);
+ if (ret)
+ goto cleanup;
+ }
+
free_rx_sc(rx_sc);
rtnl_unlock();
return 0;
+
+cleanup:
+ rtnl_unlock();
+ return ret;
}
static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
@@ -1972,6 +2049,7 @@ static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
struct macsec_tx_sa *tx_sa;
u8 assoc_num;
struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
+ int ret;
if (!attrs[MACSEC_ATTR_IFINDEX])
return -EINVAL;
@@ -1992,12 +2070,35 @@ static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
return -EBUSY;
}
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(netdev_priv(dev))) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (!ops) {
+ ret = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
+ ctx.sa.assoc_num = assoc_num;
+ ctx.sa.tx_sa = tx_sa;
+
+ ret = macsec_offload(ops->mdo_del_txsa, &ctx);
+ if (ret)
+ goto cleanup;
+ }
+
RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL);
clear_tx_sa(tx_sa);
rtnl_unlock();
return 0;
+
+cleanup:
+ rtnl_unlock();
+ return ret;
}
static bool validate_upd_sa(struct nlattr **attrs)
@@ -2030,6 +2131,9 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
struct macsec_tx_sa *tx_sa;
u8 assoc_num;
struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
+ bool was_operational, was_active;
+ u32 prev_pn = 0;
+ int ret = 0;
if (!attrs[MACSEC_ATTR_IFINDEX])
return -EINVAL;
@@ -2050,19 +2154,52 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
if (tb_sa[MACSEC_SA_ATTR_PN]) {
spin_lock_bh(&tx_sa->lock);
+ prev_pn = tx_sa->next_pn;
tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&tx_sa->lock);
}
+ was_active = tx_sa->active;
if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
+ was_operational = secy->operational;
if (assoc_num == tx_sc->encoding_sa)
secy->operational = tx_sa->active;
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(netdev_priv(dev))) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (!ops) {
+ ret = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
+ ctx.sa.assoc_num = assoc_num;
+ ctx.sa.tx_sa = tx_sa;
+
+ ret = macsec_offload(ops->mdo_upd_txsa, &ctx);
+ if (ret)
+ goto cleanup;
+ }
+
rtnl_unlock();
return 0;
+
+cleanup:
+ if (tb_sa[MACSEC_SA_ATTR_PN]) {
+ spin_lock_bh(&tx_sa->lock);
+ tx_sa->next_pn = prev_pn;
+ spin_unlock_bh(&tx_sa->lock);
+ }
+ tx_sa->active = was_active;
+ secy->operational = was_operational;
+ rtnl_unlock();
+ return ret;
}
static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
@@ -2075,6 +2212,9 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
u8 assoc_num;
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
+ bool was_active;
+ u32 prev_pn = 0;
+ int ret = 0;
if (!attrs[MACSEC_ATTR_IFINDEX])
return -EINVAL;
@@ -2098,15 +2238,46 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
if (tb_sa[MACSEC_SA_ATTR_PN]) {
spin_lock_bh(&rx_sa->lock);
+ prev_pn = rx_sa->next_pn;
rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&rx_sa->lock);
}
+ was_active = rx_sa->active;
if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(netdev_priv(dev))) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (!ops) {
+ ret = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
+ ctx.sa.assoc_num = assoc_num;
+ ctx.sa.rx_sa = rx_sa;
+
+ ret = macsec_offload(ops->mdo_upd_rxsa, &ctx);
+ if (ret)
+ goto cleanup;
+ }
+
rtnl_unlock();
return 0;
+
+cleanup:
+ if (tb_sa[MACSEC_SA_ATTR_PN]) {
+ spin_lock_bh(&rx_sa->lock);
+ rx_sa->next_pn = prev_pn;
+ spin_unlock_bh(&rx_sa->lock);
+ }
+ rx_sa->active = was_active;
+ rtnl_unlock();
+ return ret;
}
static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
@@ -2116,6 +2287,9 @@ static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
struct macsec_secy *secy;
struct macsec_rx_sc *rx_sc;
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
+ unsigned int prev_n_rx_sc;
+ bool was_active;
+ int ret;
if (!attrs[MACSEC_ATTR_IFINDEX])
return -EINVAL;
@@ -2133,6 +2307,8 @@ static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
return PTR_ERR(rx_sc);
}
+ was_active = rx_sc->active;
+ prev_n_rx_sc = secy->n_rx_sc;
if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) {
bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
@@ -2142,9 +2318,153 @@ static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
rx_sc->active = new;
}
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(netdev_priv(dev))) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (!ops) {
+ ret = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
+ ctx.rx_sc = rx_sc;
+
+ ret = macsec_offload(ops->mdo_upd_rxsc, &ctx);
+ if (ret)
+ goto cleanup;
+ }
+
+ rtnl_unlock();
+
+ return 0;
+
+cleanup:
+ secy->n_rx_sc = prev_n_rx_sc;
+ rx_sc->active = was_active;
rtnl_unlock();
+ return ret;
+}
+
+static bool macsec_is_configured(struct macsec_dev *macsec)
+{
+ struct macsec_secy *secy = &macsec->secy;
+ struct macsec_tx_sc *tx_sc = &secy->tx_sc;
+ int i;
+
+ if (secy->n_rx_sc > 0)
+ return true;
+
+ for (i = 0; i < MACSEC_NUM_AN; i++)
+ if (tx_sc->sa[i])
+ return true;
+
+ return false;
+}
+
+static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1];
+ enum macsec_offload offload, prev_offload;
+ int (*func)(struct macsec_context *ctx);
+ struct nlattr **attrs = info->attrs;
+ struct net_device *dev, *loop_dev;
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+ struct macsec_dev *macsec;
+ struct net *loop_net;
+ int ret;
+
+ if (!attrs[MACSEC_ATTR_IFINDEX])
+ return -EINVAL;
+
+ if (!attrs[MACSEC_ATTR_OFFLOAD])
+ return -EINVAL;
+
+ if (nla_parse_nested_deprecated(tb_offload, MACSEC_OFFLOAD_ATTR_MAX,
+ attrs[MACSEC_ATTR_OFFLOAD],
+ macsec_genl_offload_policy, NULL))
+ return -EINVAL;
+
+ dev = get_dev_from_nl(genl_info_net(info), attrs);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+ macsec = macsec_priv(dev);
+
+ offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]);
+ if (macsec->offload == offload)
+ return 0;
+
+ /* Check if the offloading mode is supported by the underlying layers */
+ if (offload != MACSEC_OFFLOAD_OFF &&
+ !macsec_check_offload(offload, macsec))
+ return -EOPNOTSUPP;
+
+ if (offload == MACSEC_OFFLOAD_OFF)
+ goto skip_limitation;
+ /* Check the physical interface isn't offloading another interface
+ * first.
+ */
+ for_each_net(loop_net) {
+ for_each_netdev(loop_net, loop_dev) {
+ struct macsec_dev *priv;
+
+ if (!netif_is_macsec(loop_dev))
+ continue;
+
+ priv = macsec_priv(loop_dev);
+
+ if (priv->real_dev == macsec->real_dev &&
+ priv->offload != MACSEC_OFFLOAD_OFF)
+ return -EBUSY;
+ }
+ }
+
+skip_limitation:
+ /* Check if the net device is busy. */
+ if (netif_running(dev))
+ return -EBUSY;
+
+ rtnl_lock();
+
+ prev_offload = macsec->offload;
+ macsec->offload = offload;
+
+ /* Check if the device already has rules configured: we do not support
+ * rules migration.
+ */
+ if (macsec_is_configured(macsec)) {
+ ret = -EBUSY;
+ goto rollback;
+ }
+
+ ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload,
+ macsec, &ctx);
+ if (!ops) {
+ ret = -EOPNOTSUPP;
+ goto rollback;
+ }
+
+ if (prev_offload == MACSEC_OFFLOAD_OFF)
+ func = ops->mdo_add_secy;
+ else
+ func = ops->mdo_del_secy;
+
+ ctx.secy = &macsec->secy;
+ ret = macsec_offload(func, &ctx);
+ if (ret)
+ goto rollback;
+
+ rtnl_unlock();
return 0;
+
+rollback:
+ macsec->offload = prev_offload;
+
+ rtnl_unlock();
+ return ret;
}
static int copy_tx_sa_stats(struct sk_buff *skb,
@@ -2408,12 +2728,13 @@ static noinline_for_stack int
dump_secy(struct macsec_secy *secy, struct net_device *dev,
struct sk_buff *skb, struct netlink_callback *cb)
{
- struct macsec_rx_sc *rx_sc;
+ struct macsec_dev *macsec = netdev_priv(dev);
struct macsec_tx_sc *tx_sc = &secy->tx_sc;
struct nlattr *txsa_list, *rxsc_list;
- int i, j;
- void *hdr;
+ struct macsec_rx_sc *rx_sc;
struct nlattr *attr;
+ void *hdr;
+ int i, j;
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
&macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC);
@@ -2425,6 +2746,13 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
goto nla_put_failure;
+ attr = nla_nest_start_noflag(skb, MACSEC_ATTR_OFFLOAD);
+ if (!attr)
+ goto nla_put_failure;
+ if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload))
+ goto nla_put_failure;
+ nla_nest_end(skb, attr);
+
if (nla_put_secy(secy, skb))
goto nla_put_failure;
@@ -2690,6 +3018,12 @@ static const struct genl_ops macsec_genl_ops[] = {
.doit = macsec_upd_rxsa,
.flags = GENL_ADMIN_PERM,
},
+ {
+ .cmd = MACSEC_CMD_UPD_OFFLOAD,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = macsec_upd_offload,
+ .flags = GENL_ADMIN_PERM,
+ },
};
static struct genl_family macsec_fam __ro_after_init = {
@@ -2712,6 +3046,11 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
struct pcpu_secy_stats *secy_stats;
int ret, len;
+ if (macsec_is_offloaded(netdev_priv(dev))) {
+ skb->dev = macsec->real_dev;
+ return dev_queue_xmit(skb);
+ }
+
/* 10.5 */
if (!secy->protect_frames) {
secy_stats = this_cpu_ptr(macsec->stats);
@@ -2825,6 +3164,22 @@ static int macsec_dev_open(struct net_device *dev)
goto clear_allmulti;
}
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(macsec)) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (!ops) {
+ err = -EOPNOTSUPP;
+ goto clear_allmulti;
+ }
+
+ err = macsec_offload(ops->mdo_dev_open, &ctx);
+ if (err)
+ goto clear_allmulti;
+ }
+
if (netif_carrier_ok(real_dev))
netif_carrier_on(dev);
@@ -2845,6 +3200,16 @@ static int macsec_dev_stop(struct net_device *dev)
netif_carrier_off(dev);
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(macsec)) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(macsec, &ctx);
+ if (ops)
+ macsec_offload(ops->mdo_dev_stop, &ctx);
+ }
+
dev_mc_unsync(real_dev, dev);
dev_uc_unsync(real_dev, dev);
@@ -3076,6 +3441,11 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[],
struct netlink_ext_ack *extack)
{
+ struct macsec_dev *macsec = macsec_priv(dev);
+ struct macsec_tx_sa tx_sc;
+ struct macsec_secy secy;
+ int ret;
+
if (!data)
return 0;
@@ -3085,7 +3455,41 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
data[IFLA_MACSEC_PORT])
return -EINVAL;
- return macsec_changelink_common(dev, data);
+ /* Keep a copy of unmodified secy and tx_sc, in case the offload
+ * propagation fails, to revert macsec_changelink_common.
+ */
+ memcpy(&secy, &macsec->secy, sizeof(secy));
+ memcpy(&tx_sc, &macsec->secy.tx_sc, sizeof(tx_sc));
+
+ ret = macsec_changelink_common(dev, data);
+ if (ret)
+ return ret;
+
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(macsec)) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+ int ret;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (!ops) {
+ ret = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
+ ctx.secy = &macsec->secy;
+ ret = macsec_offload(ops->mdo_upd_secy, &ctx);
+ if (ret)
+ goto cleanup;
+ }
+
+ return 0;
+
+cleanup:
+ memcpy(&macsec->secy.tx_sc, &tx_sc, sizeof(tx_sc));
+ memcpy(&macsec->secy, &secy, sizeof(secy));
+
+ return ret;
}
static void macsec_del_dev(struct macsec_dev *macsec)
@@ -3128,6 +3532,18 @@ static void macsec_dellink(struct net_device *dev, struct list_head *head)
struct net_device *real_dev = macsec->real_dev;
struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(macsec)) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (ops) {
+ ctx.secy = &macsec->secy;
+ macsec_offload(ops->mdo_del_secy, &ctx);
+ }
+ }
+
macsec_common_dellink(dev, head);
if (list_empty(&rxd->secys)) {
@@ -3239,6 +3655,9 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
macsec->real_dev = real_dev;
+ /* MACsec offloading is off by default */
+ macsec->offload = MACSEC_OFFLOAD_OFF;
+
if (data && data[IFLA_MACSEC_ICV_LEN])
icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
dev->mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 05631d97eeb4..81aa7adf4801 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -513,10 +513,11 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
const struct macvlan_dev *dest;
if (vlan->mode == MACVLAN_MODE_BRIDGE) {
- const struct ethhdr *eth = (void *)skb->data;
+ const struct ethhdr *eth = skb_eth_hdr(skb);
/* send to other bridge ports directly */
if (is_multicast_ether_addr(eth->h_dest)) {
+ skb_reset_mac_header(skb);
macvlan_broadcast(skb, port, dev, MACVLAN_MODE_BRIDGE);
goto xmit_world;
}
@@ -1036,8 +1037,8 @@ static int macvlan_ethtool_get_ts_info(struct net_device *dev,
const struct ethtool_ops *ops = real_dev->ethtool_ops;
struct phy_device *phydev = real_dev->phydev;
- if (phydev && phydev->drv && phydev->drv->ts_info) {
- return phydev->drv->ts_info(phydev, info);
+ if (phy_has_tsinfo(phydev)) {
+ return phy_ts_info(phydev, info);
} else if (ops->get_ts_info) {
return ops->get_ts_info(real_dev, info);
} else {
diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
index 2b74425822ab..0b362b8dac17 100644
--- a/drivers/net/netdevsim/bpf.c
+++ b/drivers/net/netdevsim/bpf.c
@@ -218,6 +218,7 @@ static int nsim_bpf_create_prog(struct nsim_dev *nsim_dev,
{
struct nsim_bpf_bound_prog *state;
char name[16];
+ int ret;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
@@ -230,9 +231,10 @@ static int nsim_bpf_create_prog(struct nsim_dev *nsim_dev,
/* Program id is not populated yet when we create the state. */
sprintf(name, "%u", nsim_dev->prog_id_gen++);
state->ddir = debugfs_create_dir(name, nsim_dev->ddir_bpf_bound_progs);
- if (IS_ERR_OR_NULL(state->ddir)) {
+ if (IS_ERR(state->ddir)) {
+ ret = PTR_ERR(state->ddir);
kfree(state);
- return -ENOMEM;
+ return ret;
}
debugfs_create_u32("id", 0400, state->ddir, &prog->aux->id);
@@ -587,8 +589,8 @@ int nsim_bpf_dev_init(struct nsim_dev *nsim_dev)
nsim_dev->ddir_bpf_bound_progs = debugfs_create_dir("bpf_bound_progs",
nsim_dev->ddir);
- if (IS_ERR_OR_NULL(nsim_dev->ddir_bpf_bound_progs))
- return -ENOMEM;
+ if (IS_ERR(nsim_dev->ddir_bpf_bound_progs))
+ return PTR_ERR(nsim_dev->ddir_bpf_bound_progs);
nsim_dev->bpf_dev = bpf_offload_dev_create(&nsim_bpf_dev_ops, nsim_dev);
err = PTR_ERR_OR_ZERO(nsim_dev->bpf_dev);
diff --git a/drivers/net/netdevsim/bus.c b/drivers/net/netdevsim/bus.c
index 6aeed0c600f8..7971dc4f54f1 100644
--- a/drivers/net/netdevsim/bus.c
+++ b/drivers/net/netdevsim/bus.c
@@ -17,6 +17,7 @@
static DEFINE_IDA(nsim_bus_dev_ids);
static LIST_HEAD(nsim_bus_dev_list);
static DEFINE_MUTEX(nsim_bus_dev_list_lock);
+static bool nsim_bus_enable;
static struct nsim_bus_dev *to_nsim_bus_dev(struct device *dev)
{
@@ -28,7 +29,7 @@ static int nsim_bus_dev_vfs_enable(struct nsim_bus_dev *nsim_bus_dev,
{
nsim_bus_dev->vfconfigs = kcalloc(num_vfs,
sizeof(struct nsim_vf_config),
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_NOWARN);
if (!nsim_bus_dev->vfconfigs)
return -ENOMEM;
nsim_bus_dev->num_vfs = num_vfs;
@@ -96,13 +97,25 @@ new_port_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
+ struct nsim_dev *nsim_dev = dev_get_drvdata(dev);
+ struct devlink *devlink;
unsigned int port_index;
int ret;
+ /* Prevent to use nsim_bus_dev before initialization. */
+ if (!smp_load_acquire(&nsim_bus_dev->init))
+ return -EBUSY;
ret = kstrtouint(buf, 0, &port_index);
if (ret)
return ret;
+
+ devlink = priv_to_devlink(nsim_dev);
+
+ mutex_lock(&nsim_bus_dev->nsim_bus_reload_lock);
+ devlink_reload_disable(devlink);
ret = nsim_dev_port_add(nsim_bus_dev, port_index);
+ devlink_reload_enable(devlink);
+ mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
return ret ? ret : count;
}
@@ -113,13 +126,25 @@ del_port_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
+ struct nsim_dev *nsim_dev = dev_get_drvdata(dev);
+ struct devlink *devlink;
unsigned int port_index;
int ret;
+ /* Prevent to use nsim_bus_dev before initialization. */
+ if (!smp_load_acquire(&nsim_bus_dev->init))
+ return -EBUSY;
ret = kstrtouint(buf, 0, &port_index);
if (ret)
return ret;
+
+ devlink = priv_to_devlink(nsim_dev);
+
+ mutex_lock(&nsim_bus_dev->nsim_bus_reload_lock);
+ devlink_reload_disable(devlink);
ret = nsim_dev_port_del(nsim_bus_dev, port_index);
+ devlink_reload_enable(devlink);
+ mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
return ret ? ret : count;
}
@@ -179,15 +204,30 @@ new_device_store(struct bus_type *bus, const char *buf, size_t count)
pr_err("Format for adding new device is \"id port_count\" (uint uint).\n");
return -EINVAL;
}
- nsim_bus_dev = nsim_bus_dev_new(id, port_count);
- if (IS_ERR(nsim_bus_dev))
- return PTR_ERR(nsim_bus_dev);
mutex_lock(&nsim_bus_dev_list_lock);
+ /* Prevent to use resource before initialization. */
+ if (!smp_load_acquire(&nsim_bus_enable)) {
+ err = -EBUSY;
+ goto err;
+ }
+
+ nsim_bus_dev = nsim_bus_dev_new(id, port_count);
+ if (IS_ERR(nsim_bus_dev)) {
+ err = PTR_ERR(nsim_bus_dev);
+ goto err;
+ }
+
+ /* Allow using nsim_bus_dev */
+ smp_store_release(&nsim_bus_dev->init, true);
+
list_add_tail(&nsim_bus_dev->list, &nsim_bus_dev_list);
mutex_unlock(&nsim_bus_dev_list_lock);
return count;
+err:
+ mutex_unlock(&nsim_bus_dev_list_lock);
+ return err;
}
static BUS_ATTR_WO(new_device);
@@ -215,6 +255,11 @@ del_device_store(struct bus_type *bus, const char *buf, size_t count)
err = -ENOENT;
mutex_lock(&nsim_bus_dev_list_lock);
+ /* Prevent to use resource before initialization. */
+ if (!smp_load_acquire(&nsim_bus_enable)) {
+ mutex_unlock(&nsim_bus_dev_list_lock);
+ return -EBUSY;
+ }
list_for_each_entry_safe(nsim_bus_dev, tmp, &nsim_bus_dev_list, list) {
if (nsim_bus_dev->dev.id != id)
continue;
@@ -284,6 +329,9 @@ nsim_bus_dev_new(unsigned int id, unsigned int port_count)
nsim_bus_dev->dev.type = &nsim_bus_dev_type;
nsim_bus_dev->port_count = port_count;
nsim_bus_dev->initial_net = current->nsproxy->net_ns;
+ mutex_init(&nsim_bus_dev->nsim_bus_reload_lock);
+ /* Disallow using nsim_bus_dev */
+ smp_store_release(&nsim_bus_dev->init, false);
err = device_register(&nsim_bus_dev->dev);
if (err)
@@ -299,6 +347,8 @@ err_nsim_bus_dev_free:
static void nsim_bus_dev_del(struct nsim_bus_dev *nsim_bus_dev)
{
+ /* Disallow using nsim_bus_dev */
+ smp_store_release(&nsim_bus_dev->init, false);
device_unregister(&nsim_bus_dev->dev);
ida_free(&nsim_bus_dev_ids, nsim_bus_dev->dev.id);
kfree(nsim_bus_dev);
@@ -320,6 +370,8 @@ int nsim_bus_init(void)
err = driver_register(&nsim_driver);
if (err)
goto err_bus_unregister;
+ /* Allow using resources */
+ smp_store_release(&nsim_bus_enable, true);
return 0;
err_bus_unregister:
@@ -331,12 +383,16 @@ void nsim_bus_exit(void)
{
struct nsim_bus_dev *nsim_bus_dev, *tmp;
+ /* Disallow using resources */
+ smp_store_release(&nsim_bus_enable, false);
+
mutex_lock(&nsim_bus_dev_list_lock);
list_for_each_entry_safe(nsim_bus_dev, tmp, &nsim_bus_dev_list, list) {
list_del(&nsim_bus_dev->list);
nsim_bus_dev_del(nsim_bus_dev);
}
mutex_unlock(&nsim_bus_dev_list_lock);
+
driver_unregister(&nsim_driver);
bus_unregister(&nsim_bus);
}
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index 059711edfc61..d7706a0346f2 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -53,7 +53,7 @@ static ssize_t nsim_dev_take_snapshot_write(struct file *file,
get_random_bytes(dummy_data, NSIM_DEV_DUMMY_REGION_SIZE);
- id = devlink_region_shapshot_id_get(priv_to_devlink(nsim_dev));
+ id = devlink_region_snapshot_id_get(priv_to_devlink(nsim_dev));
err = devlink_region_snapshot_create(nsim_dev->dummy_region,
dummy_data, id, kfree);
if (err) {
@@ -73,23 +73,26 @@ static const struct file_operations nsim_dev_take_snapshot_fops = {
static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
{
- char dev_ddir_name[16];
+ char dev_ddir_name[sizeof(DRV_NAME) + 10];
sprintf(dev_ddir_name, DRV_NAME "%u", nsim_dev->nsim_bus_dev->dev.id);
nsim_dev->ddir = debugfs_create_dir(dev_ddir_name, nsim_dev_ddir);
- if (IS_ERR_OR_NULL(nsim_dev->ddir))
- return PTR_ERR_OR_ZERO(nsim_dev->ddir) ?: -EINVAL;
+ if (IS_ERR(nsim_dev->ddir))
+ return PTR_ERR(nsim_dev->ddir);
nsim_dev->ports_ddir = debugfs_create_dir("ports", nsim_dev->ddir);
- if (IS_ERR_OR_NULL(nsim_dev->ports_ddir))
- return PTR_ERR_OR_ZERO(nsim_dev->ports_ddir) ?: -EINVAL;
+ if (IS_ERR(nsim_dev->ports_ddir))
+ return PTR_ERR(nsim_dev->ports_ddir);
debugfs_create_bool("fw_update_status", 0600, nsim_dev->ddir,
&nsim_dev->fw_update_status);
debugfs_create_u32("max_macs", 0600, nsim_dev->ddir,
&nsim_dev->max_macs);
debugfs_create_bool("test1", 0600, nsim_dev->ddir,
&nsim_dev->test1);
- debugfs_create_file("take_snapshot", 0200, nsim_dev->ddir, nsim_dev,
- &nsim_dev_take_snapshot_fops);
+ nsim_dev->take_snapshot = debugfs_create_file("take_snapshot",
+ 0200,
+ nsim_dev->ddir,
+ nsim_dev,
+ &nsim_dev_take_snapshot_fops);
debugfs_create_bool("dont_allow_reload", 0600, nsim_dev->ddir,
&nsim_dev->dont_allow_reload);
debugfs_create_bool("fail_reload", 0600, nsim_dev->ddir,
@@ -112,8 +115,8 @@ static int nsim_dev_port_debugfs_init(struct nsim_dev *nsim_dev,
sprintf(port_ddir_name, "%u", nsim_dev_port->port_index);
nsim_dev_port->ddir = debugfs_create_dir(port_ddir_name,
nsim_dev->ports_ddir);
- if (IS_ERR_OR_NULL(nsim_dev_port->ddir))
- return -ENOMEM;
+ if (IS_ERR(nsim_dev_port->ddir))
+ return PTR_ERR(nsim_dev_port->ddir);
sprintf(dev_link_name, "../../../" DRV_NAME "%u",
nsim_dev->nsim_bus_dev->dev.id);
@@ -270,7 +273,7 @@ struct nsim_trap_data {
};
/* All driver-specific traps must be documented in
- * Documentation/networking/devlink-trap-netdevsim.rst
+ * Documentation/networking/devlink/netdevsim.rst
*/
enum {
NSIM_TRAP_ID_BASE = DEVLINK_TRAP_GENERIC_ID_MAX,
@@ -740,6 +743,11 @@ static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
if (err)
goto err_health_exit;
+ nsim_dev->take_snapshot = debugfs_create_file("take_snapshot",
+ 0200,
+ nsim_dev->ddir,
+ nsim_dev,
+ &nsim_dev_take_snapshot_fops);
return 0;
err_health_exit:
@@ -853,6 +861,7 @@ static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev)
if (devlink_is_reload_failed(devlink))
return;
+ debugfs_remove(nsim_dev->take_snapshot);
nsim_dev_port_del_all(nsim_dev);
nsim_dev_health_exit(nsim_dev);
nsim_dev_traps_exit(devlink);
@@ -925,9 +934,7 @@ int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev,
int nsim_dev_init(void)
{
nsim_dev_ddir = debugfs_create_dir(DRV_NAME, NULL);
- if (IS_ERR_OR_NULL(nsim_dev_ddir))
- return -ENOMEM;
- return 0;
+ return PTR_ERR_OR_ZERO(nsim_dev_ddir);
}
void nsim_dev_exit(void)
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
index 13540dee7364..f32d56ac3e80 100644
--- a/drivers/net/netdevsim/fib.c
+++ b/drivers/net/netdevsim/fib.c
@@ -14,6 +14,12 @@
* THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
*/
+#include <linux/in6.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/rhashtable.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
#include <net/fib_notifier.h>
#include <net/ip_fib.h>
#include <net/ip6_fib.h>
@@ -36,6 +42,48 @@ struct nsim_fib_data {
struct notifier_block fib_nb;
struct nsim_per_fib_data ipv4;
struct nsim_per_fib_data ipv6;
+ struct rhashtable fib_rt_ht;
+ struct list_head fib_rt_list;
+ spinlock_t fib_lock; /* Protects hashtable, list and accounting */
+ struct devlink *devlink;
+};
+
+struct nsim_fib_rt_key {
+ unsigned char addr[sizeof(struct in6_addr)];
+ unsigned char prefix_len;
+ int family;
+ u32 tb_id;
+};
+
+struct nsim_fib_rt {
+ struct nsim_fib_rt_key key;
+ struct rhash_head ht_node;
+ struct list_head list; /* Member of fib_rt_list */
+};
+
+struct nsim_fib4_rt {
+ struct nsim_fib_rt common;
+ struct fib_info *fi;
+ u8 tos;
+ u8 type;
+};
+
+struct nsim_fib6_rt {
+ struct nsim_fib_rt common;
+ struct list_head nh_list;
+ unsigned int nhs;
+};
+
+struct nsim_fib6_rt_nh {
+ struct list_head list; /* Member of nh_list */
+ struct fib6_info *rt;
+};
+
+static const struct rhashtable_params nsim_fib_rt_ht_params = {
+ .key_offset = offsetof(struct nsim_fib_rt, key),
+ .head_offset = offsetof(struct nsim_fib_rt, ht_node),
+ .key_len = sizeof(struct nsim_fib_rt_key),
+ .automatic_shrinking = true,
};
u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
@@ -144,18 +192,556 @@ static int nsim_fib_account(struct nsim_fib_entry *entry, bool add,
return err;
}
+static void nsim_fib_rt_init(struct nsim_fib_data *data,
+ struct nsim_fib_rt *fib_rt, const void *addr,
+ size_t addr_len, unsigned int prefix_len,
+ int family, u32 tb_id)
+{
+ memcpy(fib_rt->key.addr, addr, addr_len);
+ fib_rt->key.prefix_len = prefix_len;
+ fib_rt->key.family = family;
+ fib_rt->key.tb_id = tb_id;
+ list_add(&fib_rt->list, &data->fib_rt_list);
+}
+
+static void nsim_fib_rt_fini(struct nsim_fib_rt *fib_rt)
+{
+ list_del(&fib_rt->list);
+}
+
+static struct nsim_fib_rt *nsim_fib_rt_lookup(struct rhashtable *fib_rt_ht,
+ const void *addr, size_t addr_len,
+ unsigned int prefix_len,
+ int family, u32 tb_id)
+{
+ struct nsim_fib_rt_key key;
+
+ memset(&key, 0, sizeof(key));
+ memcpy(key.addr, addr, addr_len);
+ key.prefix_len = prefix_len;
+ key.family = family;
+ key.tb_id = tb_id;
+
+ return rhashtable_lookup_fast(fib_rt_ht, &key, nsim_fib_rt_ht_params);
+}
+
+static struct nsim_fib4_rt *
+nsim_fib4_rt_create(struct nsim_fib_data *data,
+ struct fib_entry_notifier_info *fen_info)
+{
+ struct nsim_fib4_rt *fib4_rt;
+
+ fib4_rt = kzalloc(sizeof(*fib4_rt), GFP_ATOMIC);
+ if (!fib4_rt)
+ return NULL;
+
+ nsim_fib_rt_init(data, &fib4_rt->common, &fen_info->dst, sizeof(u32),
+ fen_info->dst_len, AF_INET, fen_info->tb_id);
+
+ fib4_rt->fi = fen_info->fi;
+ fib_info_hold(fib4_rt->fi);
+ fib4_rt->tos = fen_info->tos;
+ fib4_rt->type = fen_info->type;
+
+ return fib4_rt;
+}
+
+static void nsim_fib4_rt_destroy(struct nsim_fib4_rt *fib4_rt)
+{
+ fib_info_put(fib4_rt->fi);
+ nsim_fib_rt_fini(&fib4_rt->common);
+ kfree(fib4_rt);
+}
+
+static struct nsim_fib4_rt *
+nsim_fib4_rt_lookup(struct rhashtable *fib_rt_ht,
+ const struct fib_entry_notifier_info *fen_info)
+{
+ struct nsim_fib_rt *fib_rt;
+
+ fib_rt = nsim_fib_rt_lookup(fib_rt_ht, &fen_info->dst, sizeof(u32),
+ fen_info->dst_len, AF_INET,
+ fen_info->tb_id);
+ if (!fib_rt)
+ return NULL;
+
+ return container_of(fib_rt, struct nsim_fib4_rt, common);
+}
+
+static void nsim_fib4_rt_hw_flags_set(struct net *net,
+ const struct nsim_fib4_rt *fib4_rt,
+ bool trap)
+{
+ u32 *p_dst = (u32 *) fib4_rt->common.key.addr;
+ int dst_len = fib4_rt->common.key.prefix_len;
+ struct fib_rt_info fri;
+
+ fri.fi = fib4_rt->fi;
+ fri.tb_id = fib4_rt->common.key.tb_id;
+ fri.dst = cpu_to_be32(*p_dst);
+ fri.dst_len = dst_len;
+ fri.tos = fib4_rt->tos;
+ fri.type = fib4_rt->type;
+ fri.offload = false;
+ fri.trap = trap;
+ fib_alias_hw_flags_set(net, &fri);
+}
+
+static int nsim_fib4_rt_add(struct nsim_fib_data *data,
+ struct nsim_fib4_rt *fib4_rt,
+ struct netlink_ext_ack *extack)
+{
+ struct net *net = devlink_net(data->devlink);
+ int err;
+
+ err = nsim_fib_account(&data->ipv4.fib, true, extack);
+ if (err)
+ return err;
+
+ err = rhashtable_insert_fast(&data->fib_rt_ht,
+ &fib4_rt->common.ht_node,
+ nsim_fib_rt_ht_params);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to insert IPv4 route");
+ goto err_fib_dismiss;
+ }
+
+ nsim_fib4_rt_hw_flags_set(net, fib4_rt, true);
+
+ return 0;
+
+err_fib_dismiss:
+ nsim_fib_account(&data->ipv4.fib, false, extack);
+ return err;
+}
+
+static int nsim_fib4_rt_replace(struct nsim_fib_data *data,
+ struct nsim_fib4_rt *fib4_rt,
+ struct nsim_fib4_rt *fib4_rt_old,
+ struct netlink_ext_ack *extack)
+{
+ struct net *net = devlink_net(data->devlink);
+ int err;
+
+ /* We are replacing a route, so no need to change the accounting. */
+ err = rhashtable_replace_fast(&data->fib_rt_ht,
+ &fib4_rt_old->common.ht_node,
+ &fib4_rt->common.ht_node,
+ nsim_fib_rt_ht_params);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to replace IPv4 route");
+ return err;
+ }
+
+ nsim_fib4_rt_hw_flags_set(net, fib4_rt, true);
+
+ nsim_fib4_rt_hw_flags_set(net, fib4_rt_old, false);
+ nsim_fib4_rt_destroy(fib4_rt_old);
+
+ return 0;
+}
+
+static int nsim_fib4_rt_insert(struct nsim_fib_data *data,
+ struct fib_entry_notifier_info *fen_info)
+{
+ struct netlink_ext_ack *extack = fen_info->info.extack;
+ struct nsim_fib4_rt *fib4_rt, *fib4_rt_old;
+ int err;
+
+ fib4_rt = nsim_fib4_rt_create(data, fen_info);
+ if (!fib4_rt)
+ return -ENOMEM;
+
+ fib4_rt_old = nsim_fib4_rt_lookup(&data->fib_rt_ht, fen_info);
+ if (!fib4_rt_old)
+ err = nsim_fib4_rt_add(data, fib4_rt, extack);
+ else
+ err = nsim_fib4_rt_replace(data, fib4_rt, fib4_rt_old, extack);
+
+ if (err)
+ nsim_fib4_rt_destroy(fib4_rt);
+
+ return err;
+}
+
+static void nsim_fib4_rt_remove(struct nsim_fib_data *data,
+ const struct fib_entry_notifier_info *fen_info)
+{
+ struct netlink_ext_ack *extack = fen_info->info.extack;
+ struct nsim_fib4_rt *fib4_rt;
+
+ fib4_rt = nsim_fib4_rt_lookup(&data->fib_rt_ht, fen_info);
+ if (WARN_ON_ONCE(!fib4_rt))
+ return;
+
+ rhashtable_remove_fast(&data->fib_rt_ht, &fib4_rt->common.ht_node,
+ nsim_fib_rt_ht_params);
+ nsim_fib_account(&data->ipv4.fib, false, extack);
+ nsim_fib4_rt_destroy(fib4_rt);
+}
+
+static int nsim_fib4_event(struct nsim_fib_data *data,
+ struct fib_notifier_info *info,
+ unsigned long event)
+{
+ struct fib_entry_notifier_info *fen_info;
+ int err = 0;
+
+ fen_info = container_of(info, struct fib_entry_notifier_info, info);
+
+ if (fen_info->fi->nh) {
+ NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported");
+ return 0;
+ }
+
+ switch (event) {
+ case FIB_EVENT_ENTRY_REPLACE:
+ err = nsim_fib4_rt_insert(data, fen_info);
+ break;
+ case FIB_EVENT_ENTRY_DEL:
+ nsim_fib4_rt_remove(data, fen_info);
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static struct nsim_fib6_rt_nh *
+nsim_fib6_rt_nh_find(const struct nsim_fib6_rt *fib6_rt,
+ const struct fib6_info *rt)
+{
+ struct nsim_fib6_rt_nh *fib6_rt_nh;
+
+ list_for_each_entry(fib6_rt_nh, &fib6_rt->nh_list, list) {
+ if (fib6_rt_nh->rt == rt)
+ return fib6_rt_nh;
+ }
+
+ return NULL;
+}
+
+static int nsim_fib6_rt_nh_add(struct nsim_fib6_rt *fib6_rt,
+ struct fib6_info *rt)
+{
+ struct nsim_fib6_rt_nh *fib6_rt_nh;
+
+ fib6_rt_nh = kzalloc(sizeof(*fib6_rt_nh), GFP_ATOMIC);
+ if (!fib6_rt_nh)
+ return -ENOMEM;
+
+ fib6_info_hold(rt);
+ fib6_rt_nh->rt = rt;
+ list_add_tail(&fib6_rt_nh->list, &fib6_rt->nh_list);
+ fib6_rt->nhs++;
+
+ return 0;
+}
+
+static void nsim_fib6_rt_nh_del(struct nsim_fib6_rt *fib6_rt,
+ const struct fib6_info *rt)
+{
+ struct nsim_fib6_rt_nh *fib6_rt_nh;
+
+ fib6_rt_nh = nsim_fib6_rt_nh_find(fib6_rt, rt);
+ if (WARN_ON_ONCE(!fib6_rt_nh))
+ return;
+
+ fib6_rt->nhs--;
+ list_del(&fib6_rt_nh->list);
+#if IS_ENABLED(CONFIG_IPV6)
+ fib6_info_release(fib6_rt_nh->rt);
+#endif
+ kfree(fib6_rt_nh);
+}
+
+static struct nsim_fib6_rt *
+nsim_fib6_rt_create(struct nsim_fib_data *data,
+ struct fib6_entry_notifier_info *fen6_info)
+{
+ struct fib6_info *iter, *rt = fen6_info->rt;
+ struct nsim_fib6_rt *fib6_rt;
+ int i = 0;
+ int err;
+
+ fib6_rt = kzalloc(sizeof(*fib6_rt), GFP_ATOMIC);
+ if (!fib6_rt)
+ return ERR_PTR(-ENOMEM);
+
+ nsim_fib_rt_init(data, &fib6_rt->common, &rt->fib6_dst.addr,
+ sizeof(rt->fib6_dst.addr), rt->fib6_dst.plen, AF_INET6,
+ rt->fib6_table->tb6_id);
+
+ /* We consider a multipath IPv6 route as one entry, but it can be made
+ * up from several fib6_info structs (one for each nexthop), so we
+ * add them all to the same list under the entry.
+ */
+ INIT_LIST_HEAD(&fib6_rt->nh_list);
+
+ err = nsim_fib6_rt_nh_add(fib6_rt, rt);
+ if (err)
+ goto err_fib_rt_fini;
+
+ if (!fen6_info->nsiblings)
+ return fib6_rt;
+
+ list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
+ if (i == fen6_info->nsiblings)
+ break;
+
+ err = nsim_fib6_rt_nh_add(fib6_rt, iter);
+ if (err)
+ goto err_fib6_rt_nh_del;
+ i++;
+ }
+ WARN_ON_ONCE(i != fen6_info->nsiblings);
+
+ return fib6_rt;
+
+err_fib6_rt_nh_del:
+ list_for_each_entry_continue_reverse(iter, &rt->fib6_siblings,
+ fib6_siblings)
+ nsim_fib6_rt_nh_del(fib6_rt, iter);
+ nsim_fib6_rt_nh_del(fib6_rt, rt);
+err_fib_rt_fini:
+ nsim_fib_rt_fini(&fib6_rt->common);
+ kfree(fib6_rt);
+ return ERR_PTR(err);
+}
+
+static void nsim_fib6_rt_destroy(struct nsim_fib6_rt *fib6_rt)
+{
+ struct nsim_fib6_rt_nh *iter, *tmp;
+
+ list_for_each_entry_safe(iter, tmp, &fib6_rt->nh_list, list)
+ nsim_fib6_rt_nh_del(fib6_rt, iter->rt);
+ WARN_ON_ONCE(!list_empty(&fib6_rt->nh_list));
+ nsim_fib_rt_fini(&fib6_rt->common);
+ kfree(fib6_rt);
+}
+
+static struct nsim_fib6_rt *
+nsim_fib6_rt_lookup(struct rhashtable *fib_rt_ht, const struct fib6_info *rt)
+{
+ struct nsim_fib_rt *fib_rt;
+
+ fib_rt = nsim_fib_rt_lookup(fib_rt_ht, &rt->fib6_dst.addr,
+ sizeof(rt->fib6_dst.addr),
+ rt->fib6_dst.plen, AF_INET6,
+ rt->fib6_table->tb6_id);
+ if (!fib_rt)
+ return NULL;
+
+ return container_of(fib_rt, struct nsim_fib6_rt, common);
+}
+
+static int nsim_fib6_rt_append(struct nsim_fib_data *data,
+ struct fib6_entry_notifier_info *fen6_info)
+{
+ struct fib6_info *iter, *rt = fen6_info->rt;
+ struct nsim_fib6_rt *fib6_rt;
+ int i = 0;
+ int err;
+
+ fib6_rt = nsim_fib6_rt_lookup(&data->fib_rt_ht, rt);
+ if (WARN_ON_ONCE(!fib6_rt))
+ return -EINVAL;
+
+ err = nsim_fib6_rt_nh_add(fib6_rt, rt);
+ if (err)
+ return err;
+ rt->trap = true;
+
+ if (!fen6_info->nsiblings)
+ return 0;
+
+ list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
+ if (i == fen6_info->nsiblings)
+ break;
+
+ err = nsim_fib6_rt_nh_add(fib6_rt, iter);
+ if (err)
+ goto err_fib6_rt_nh_del;
+ iter->trap = true;
+ i++;
+ }
+ WARN_ON_ONCE(i != fen6_info->nsiblings);
+
+ return 0;
+
+err_fib6_rt_nh_del:
+ list_for_each_entry_continue_reverse(iter, &rt->fib6_siblings,
+ fib6_siblings) {
+ iter->trap = false;
+ nsim_fib6_rt_nh_del(fib6_rt, iter);
+ }
+ rt->trap = false;
+ nsim_fib6_rt_nh_del(fib6_rt, rt);
+ return err;
+}
+
+static void nsim_fib6_rt_hw_flags_set(const struct nsim_fib6_rt *fib6_rt,
+ bool trap)
+{
+ struct nsim_fib6_rt_nh *fib6_rt_nh;
+
+ list_for_each_entry(fib6_rt_nh, &fib6_rt->nh_list, list)
+ fib6_info_hw_flags_set(fib6_rt_nh->rt, false, trap);
+}
+
+static int nsim_fib6_rt_add(struct nsim_fib_data *data,
+ struct nsim_fib6_rt *fib6_rt,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = nsim_fib_account(&data->ipv6.fib, true, extack);
+ if (err)
+ return err;
+
+ err = rhashtable_insert_fast(&data->fib_rt_ht,
+ &fib6_rt->common.ht_node,
+ nsim_fib_rt_ht_params);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to insert IPv6 route");
+ goto err_fib_dismiss;
+ }
+
+ nsim_fib6_rt_hw_flags_set(fib6_rt, true);
+
+ return 0;
+
+err_fib_dismiss:
+ nsim_fib_account(&data->ipv6.fib, false, extack);
+ return err;
+}
+
+static int nsim_fib6_rt_replace(struct nsim_fib_data *data,
+ struct nsim_fib6_rt *fib6_rt,
+ struct nsim_fib6_rt *fib6_rt_old,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ /* We are replacing a route, so no need to change the accounting. */
+ err = rhashtable_replace_fast(&data->fib_rt_ht,
+ &fib6_rt_old->common.ht_node,
+ &fib6_rt->common.ht_node,
+ nsim_fib_rt_ht_params);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to replace IPv6 route");
+ return err;
+ }
+
+ nsim_fib6_rt_hw_flags_set(fib6_rt, true);
+
+ nsim_fib6_rt_hw_flags_set(fib6_rt_old, false);
+ nsim_fib6_rt_destroy(fib6_rt_old);
+
+ return 0;
+}
+
+static int nsim_fib6_rt_insert(struct nsim_fib_data *data,
+ struct fib6_entry_notifier_info *fen6_info)
+{
+ struct netlink_ext_ack *extack = fen6_info->info.extack;
+ struct nsim_fib6_rt *fib6_rt, *fib6_rt_old;
+ int err;
+
+ fib6_rt = nsim_fib6_rt_create(data, fen6_info);
+ if (IS_ERR(fib6_rt))
+ return PTR_ERR(fib6_rt);
+
+ fib6_rt_old = nsim_fib6_rt_lookup(&data->fib_rt_ht, fen6_info->rt);
+ if (!fib6_rt_old)
+ err = nsim_fib6_rt_add(data, fib6_rt, extack);
+ else
+ err = nsim_fib6_rt_replace(data, fib6_rt, fib6_rt_old, extack);
+
+ if (err)
+ nsim_fib6_rt_destroy(fib6_rt);
+
+ return err;
+}
+
+static void
+nsim_fib6_rt_remove(struct nsim_fib_data *data,
+ const struct fib6_entry_notifier_info *fen6_info)
+{
+ struct netlink_ext_ack *extack = fen6_info->info.extack;
+ struct nsim_fib6_rt *fib6_rt;
+
+ /* Multipath routes are first added to the FIB trie and only then
+ * notified. If we vetoed the addition, we will get a delete
+ * notification for a route we do not have. Therefore, do not warn if
+ * route was not found.
+ */
+ fib6_rt = nsim_fib6_rt_lookup(&data->fib_rt_ht, fen6_info->rt);
+ if (!fib6_rt)
+ return;
+
+ /* If not all the nexthops are deleted, then only reduce the nexthop
+ * group.
+ */
+ if (fen6_info->nsiblings + 1 != fib6_rt->nhs) {
+ nsim_fib6_rt_nh_del(fib6_rt, fen6_info->rt);
+ return;
+ }
+
+ rhashtable_remove_fast(&data->fib_rt_ht, &fib6_rt->common.ht_node,
+ nsim_fib_rt_ht_params);
+ nsim_fib_account(&data->ipv6.fib, false, extack);
+ nsim_fib6_rt_destroy(fib6_rt);
+}
+
+static int nsim_fib6_event(struct nsim_fib_data *data,
+ struct fib_notifier_info *info,
+ unsigned long event)
+{
+ struct fib6_entry_notifier_info *fen6_info;
+ int err = 0;
+
+ fen6_info = container_of(info, struct fib6_entry_notifier_info, info);
+
+ if (fen6_info->rt->nh) {
+ NL_SET_ERR_MSG_MOD(info->extack, "IPv6 route with nexthop objects is not supported");
+ return 0;
+ }
+
+ if (fen6_info->rt->fib6_src.plen) {
+ NL_SET_ERR_MSG_MOD(info->extack, "IPv6 source-specific route is not supported");
+ return 0;
+ }
+
+ switch (event) {
+ case FIB_EVENT_ENTRY_REPLACE:
+ err = nsim_fib6_rt_insert(data, fen6_info);
+ break;
+ case FIB_EVENT_ENTRY_APPEND:
+ err = nsim_fib6_rt_append(data, fen6_info);
+ break;
+ case FIB_EVENT_ENTRY_DEL:
+ nsim_fib6_rt_remove(data, fen6_info);
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
static int nsim_fib_event(struct nsim_fib_data *data,
- struct fib_notifier_info *info, bool add)
+ struct fib_notifier_info *info, unsigned long event)
{
- struct netlink_ext_ack *extack = info->extack;
int err = 0;
switch (info->family) {
case AF_INET:
- err = nsim_fib_account(&data->ipv4.fib, add, extack);
+ err = nsim_fib4_event(data, info, event);
break;
case AF_INET6:
- err = nsim_fib_account(&data->ipv6.fib, add, extack);
+ err = nsim_fib6_event(data, info, event);
break;
}
@@ -170,6 +756,9 @@ static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
struct fib_notifier_info *info = ptr;
int err = 0;
+ /* IPv6 routes can be added via RAs from softIRQ. */
+ spin_lock_bh(&data->fib_lock);
+
switch (event) {
case FIB_EVENT_RULE_ADD: /* fall through */
case FIB_EVENT_RULE_DEL:
@@ -177,25 +766,75 @@ static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
event == FIB_EVENT_RULE_ADD);
break;
- case FIB_EVENT_ENTRY_ADD: /* fall through */
+ case FIB_EVENT_ENTRY_REPLACE: /* fall through */
+ case FIB_EVENT_ENTRY_APPEND: /* fall through */
case FIB_EVENT_ENTRY_DEL:
- err = nsim_fib_event(data, info,
- event == FIB_EVENT_ENTRY_ADD);
+ err = nsim_fib_event(data, info, event);
break;
}
+ spin_unlock_bh(&data->fib_lock);
+
return notifier_from_errno(err);
}
+static void nsim_fib4_rt_free(struct nsim_fib_rt *fib_rt,
+ struct nsim_fib_data *data)
+{
+ struct devlink *devlink = data->devlink;
+ struct nsim_fib4_rt *fib4_rt;
+
+ fib4_rt = container_of(fib_rt, struct nsim_fib4_rt, common);
+ nsim_fib4_rt_hw_flags_set(devlink_net(devlink), fib4_rt, false);
+ nsim_fib_account(&data->ipv4.fib, false, NULL);
+ nsim_fib4_rt_destroy(fib4_rt);
+}
+
+static void nsim_fib6_rt_free(struct nsim_fib_rt *fib_rt,
+ struct nsim_fib_data *data)
+{
+ struct nsim_fib6_rt *fib6_rt;
+
+ fib6_rt = container_of(fib_rt, struct nsim_fib6_rt, common);
+ nsim_fib6_rt_hw_flags_set(fib6_rt, false);
+ nsim_fib_account(&data->ipv6.fib, false, NULL);
+ nsim_fib6_rt_destroy(fib6_rt);
+}
+
+static void nsim_fib_rt_free(void *ptr, void *arg)
+{
+ struct nsim_fib_rt *fib_rt = ptr;
+ struct nsim_fib_data *data = arg;
+
+ switch (fib_rt->key.family) {
+ case AF_INET:
+ nsim_fib4_rt_free(fib_rt, data);
+ break;
+ case AF_INET6:
+ nsim_fib6_rt_free(fib_rt, data);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+}
+
/* inconsistent dump, trying again */
static void nsim_fib_dump_inconsistent(struct notifier_block *nb)
{
struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data,
fib_nb);
+ struct nsim_fib_rt *fib_rt, *fib_rt_tmp;
+
+ /* The notifier block is still not registered, so we do not need to
+ * take any locks here.
+ */
+ list_for_each_entry_safe(fib_rt, fib_rt_tmp, &data->fib_rt_list, list) {
+ rhashtable_remove_fast(&data->fib_rt_ht, &fib_rt->ht_node,
+ nsim_fib_rt_ht_params);
+ nsim_fib_rt_free(fib_rt, data);
+ }
- data->ipv4.fib.num = 0ULL;
data->ipv4.rules.num = 0ULL;
- data->ipv6.fib.num = 0ULL;
data->ipv6.rules.num = 0ULL;
}
@@ -256,6 +895,13 @@ struct nsim_fib_data *nsim_fib_create(struct devlink *devlink,
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return ERR_PTR(-ENOMEM);
+ data->devlink = devlink;
+
+ spin_lock_init(&data->fib_lock);
+ INIT_LIST_HEAD(&data->fib_rt_list);
+ err = rhashtable_init(&data->fib_rt_ht, &nsim_fib_rt_ht_params);
+ if (err)
+ goto err_data_free;
nsim_fib_set_max_all(data, devlink);
@@ -264,7 +910,7 @@ struct nsim_fib_data *nsim_fib_create(struct devlink *devlink,
nsim_fib_dump_inconsistent, extack);
if (err) {
pr_err("Failed to register fib notifier\n");
- goto err_out;
+ goto err_rhashtable_destroy;
}
devlink_resource_occ_get_register(devlink,
@@ -285,7 +931,10 @@ struct nsim_fib_data *nsim_fib_create(struct devlink *devlink,
data);
return data;
-err_out:
+err_rhashtable_destroy:
+ rhashtable_free_and_destroy(&data->fib_rt_ht, nsim_fib_rt_free,
+ data);
+err_data_free:
kfree(data);
return ERR_PTR(err);
}
@@ -301,5 +950,8 @@ void nsim_fib_destroy(struct devlink *devlink, struct nsim_fib_data *data)
devlink_resource_occ_get_unregister(devlink,
NSIM_RESOURCE_IPV4_FIB);
unregister_fib_notifier(devlink_net(devlink), &data->fib_nb);
+ rhashtable_free_and_destroy(&data->fib_rt_ht, nsim_fib_rt_free,
+ data);
+ WARN_ON_ONCE(!list_empty(&data->fib_rt_list));
kfree(data);
}
diff --git a/drivers/net/netdevsim/health.c b/drivers/net/netdevsim/health.c
index 9aa637d162eb..ba8d9ad60feb 100644
--- a/drivers/net/netdevsim/health.c
+++ b/drivers/net/netdevsim/health.c
@@ -82,7 +82,7 @@ static int nsim_dev_dummy_fmsg_put(struct devlink_fmsg *fmsg, u32 binary_len)
if (err)
return err;
- binary = kmalloc(binary_len, GFP_KERNEL);
+ binary = kmalloc(binary_len, GFP_KERNEL | __GFP_NOWARN);
if (!binary)
return -ENOMEM;
get_random_bytes(binary, binary_len);
@@ -285,8 +285,8 @@ int nsim_dev_health_init(struct nsim_dev *nsim_dev, struct devlink *devlink)
}
health->ddir = debugfs_create_dir("health", nsim_dev->ddir);
- if (IS_ERR_OR_NULL(health->ddir)) {
- err = PTR_ERR_OR_ZERO(health->ddir) ?: -EINVAL;
+ if (IS_ERR(health->ddir)) {
+ err = PTR_ERR(health->ddir);
goto err_dummy_reporter_destroy;
}
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 94df795ef4d3..2eb7b0dc1594 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -160,6 +160,7 @@ struct nsim_dev {
struct nsim_trap_data *trap_data;
struct dentry *ddir;
struct dentry *ports_ddir;
+ struct dentry *take_snapshot;
struct bpf_offload_dev *bpf_dev;
bool bpf_bind_accept;
u32 bpf_bind_verifier_delay;
@@ -240,6 +241,9 @@ struct nsim_bus_dev {
*/
unsigned int num_vfs;
struct nsim_vf_config *vfconfigs;
+ /* Lock for devlink->reload_enabled in netdevsim module */
+ struct mutex nsim_bus_reload_lock;
+ bool init;
};
int nsim_bus_init(void);
diff --git a/drivers/net/netdevsim/sdev.c b/drivers/net/netdevsim/sdev.c
deleted file mode 100644
index 6712da3340d6..000000000000
--- a/drivers/net/netdevsim/sdev.c
+++ /dev/null
@@ -1,69 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2019 Mellanox Technologies. All rights reserved */
-
-#include <linux/debugfs.h>
-#include <linux/err.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-
-#include "netdevsim.h"
-
-static struct dentry *nsim_sdev_ddir;
-
-static u32 nsim_sdev_id;
-
-struct netdevsim_shared_dev *nsim_sdev_get(struct netdevsim *joinns)
-{
- struct netdevsim_shared_dev *sdev;
- char sdev_ddir_name[10];
- int err;
-
- if (joinns) {
- if (WARN_ON(!joinns->sdev))
- return ERR_PTR(-EINVAL);
- sdev = joinns->sdev;
- sdev->refcnt++;
- return sdev;
- }
-
- sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
- if (!sdev)
- return ERR_PTR(-ENOMEM);
- sdev->refcnt = 1;
- sdev->switch_id = nsim_sdev_id++;
-
- sprintf(sdev_ddir_name, "%u", sdev->switch_id);
- sdev->ddir = debugfs_create_dir(sdev_ddir_name, nsim_sdev_ddir);
- if (IS_ERR_OR_NULL(sdev->ddir)) {
- err = PTR_ERR_OR_ZERO(sdev->ddir) ?: -EINVAL;
- goto err_sdev_free;
- }
-
- return sdev;
-
-err_sdev_free:
- nsim_sdev_id--;
- kfree(sdev);
- return ERR_PTR(err);
-}
-
-void nsim_sdev_put(struct netdevsim_shared_dev *sdev)
-{
- if (--sdev->refcnt)
- return;
- debugfs_remove_recursive(sdev->ddir);
- kfree(sdev);
-}
-
-int nsim_sdev_init(void)
-{
- nsim_sdev_ddir = debugfs_create_dir(DRV_NAME "_sdev", NULL);
- if (IS_ERR_OR_NULL(nsim_sdev_ddir))
- return -ENOMEM;
- return 0;
-}
-
-void nsim_sdev_exit(void)
-{
- debugfs_remove_recursive(nsim_sdev_ddir);
-}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 5848219005d7..9dabe03a668c 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -38,6 +38,7 @@ config MDIO_BCM_IPROC
tristate "Broadcom iProc MDIO bus controller"
depends on ARCH_BCM_IPROC || COMPILE_TEST
depends on HAS_IOMEM && OF_MDIO
+ default ARCH_BCM_IPROC
help
This module provides a driver for the MDIO busses found in the
Broadcom iProc SoC's.
@@ -324,6 +325,12 @@ config BROADCOM_PHY
Currently supports the BCM5411, BCM5421, BCM5461, BCM54616S, BCM5464,
BCM5481, BCM54810 and BCM5482 PHYs.
+config BCM84881_PHY
+ bool "Broadcom BCM84881 PHY"
+ depends on PHYLIB=y
+ ---help---
+ Support the Broadcom BCM84881 PHY.
+
config CICADA_PHY
tristate "Cicada PHYs"
---help---
@@ -340,14 +347,15 @@ config DAVICOM_PHY
Currently supports dm9161e and dm9131
config DP83822_PHY
- tristate "Texas Instruments DP83822 PHY"
+ tristate "Texas Instruments DP83822/825/826 PHYs"
---help---
- Supports the DP83822 PHY.
+ Supports the DP83822, DP83825I, DP83825CM, DP83825CS, DP83825S,
+ DP83826C and DP83826NC PHYs.
config DP83TC811_PHY
- tristate "Texas Instruments DP83TC822 PHY"
+ tristate "Texas Instruments DP83TC811 PHY"
---help---
- Supports the DP83TC822 PHY.
+ Supports the DP83TC811 PHY.
config DP83848_PHY
tristate "Texas Instruments DP83848 PHY"
@@ -431,6 +439,9 @@ config MICROCHIP_T1_PHY
config MICROSEMI_PHY
tristate "Microsemi PHYs"
+ depends on MACSEC || MACSEC=n
+ select CRYPTO_AES
+ select CRYPTO_ECB
---help---
Currently supports VSC8514, VSC8530, VSC8531, VSC8540 and VSC8541 PHYs
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index b433ec3bf9a6..fe5badf13b65 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -43,6 +43,8 @@ obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o
obj-$(CONFIG_MDIO_XGENE) += mdio-xgene.o
+obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += mii_timestamper.o
+
obj-$(CONFIG_SFP) += sfp.o
sfp-obj-$(CONFIG_SFP) += sfp-bus.o
obj-y += $(sfp-obj-y) $(sfp-obj-m)
@@ -62,6 +64,7 @@ obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o
obj-$(CONFIG_BCM_CYGNUS_PHY) += bcm-cygnus.o
obj-$(CONFIG_BCM_NET_PHYLIB) += bcm-phy-lib.o
obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
+obj-$(CONFIG_BCM84881_PHY) += bcm84881.o
obj-$(CONFIG_CICADA_PHY) += cicada.o
obj-$(CONFIG_CORTINA_PHY) += cortina.o
obj-$(CONFIG_DAVICOM_PHY) += davicom.o
diff --git a/drivers/net/phy/adin.c b/drivers/net/phy/adin.c
index cf5a391c93e6..c7eabe4382fb 100644
--- a/drivers/net/phy/adin.c
+++ b/drivers/net/phy/adin.c
@@ -145,7 +145,7 @@ struct adin_clause45_mmd_map {
u16 adin_regnum;
};
-static struct adin_clause45_mmd_map adin_clause45_mmd_map[] = {
+static const struct adin_clause45_mmd_map adin_clause45_mmd_map[] = {
{ MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE, ADIN1300_EEE_CAP_REG },
{ MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, ADIN1300_EEE_LPABLE_REG },
{ MDIO_MMD_AN, MDIO_AN_EEE_ADV, ADIN1300_EEE_ADV_REG },
@@ -159,7 +159,7 @@ struct adin_hw_stat {
u16 reg2;
};
-static struct adin_hw_stat adin_hw_stats[] = {
+static const struct adin_hw_stat adin_hw_stats[] = {
{ "total_frames_checked_count", 0x940A, 0x940B }, /* hi + lo */
{ "length_error_frames_count", 0x940C },
{ "alignment_error_frames_count", 0x940D },
@@ -456,7 +456,7 @@ static int adin_phy_config_intr(struct phy_device *phydev)
static int adin_cl45_to_adin_reg(struct phy_device *phydev, int devad,
u16 cl45_regnum)
{
- struct adin_clause45_mmd_map *m;
+ const struct adin_clause45_mmd_map *m;
int i;
if (devad == MDIO_MMD_VEND1)
@@ -625,7 +625,7 @@ static int adin_soft_reset(struct phy_device *phydev)
if (rc < 0)
return rc;
- msleep(10);
+ msleep(20);
/* If we get a read error something may be wrong */
rc = phy_read_mmd(phydev, MDIO_MMD_VEND1,
@@ -650,7 +650,7 @@ static void adin_get_strings(struct phy_device *phydev, u8 *data)
}
static int adin_read_mmd_stat_regs(struct phy_device *phydev,
- struct adin_hw_stat *stat,
+ const struct adin_hw_stat *stat,
u32 *val)
{
int ret;
@@ -676,7 +676,7 @@ static int adin_read_mmd_stat_regs(struct phy_device *phydev,
static u64 adin_get_stat(struct phy_device *phydev, int i)
{
- struct adin_hw_stat *stat = &adin_hw_stats[i];
+ const struct adin_hw_stat *stat = &adin_hw_stats[i];
struct adin_priv *priv = phydev->priv;
u32 val;
int ret;
diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
index 3b29d381116f..31927b2c7d5a 100644
--- a/drivers/net/phy/aquantia_main.c
+++ b/drivers/net/phy/aquantia_main.c
@@ -358,9 +358,11 @@ static int aqr107_read_status(struct phy_device *phydev)
switch (FIELD_GET(MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK, val)) {
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR:
- case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI:
phydev->interface = PHY_INTERFACE_MODE_10GKR;
break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI:
+ phydev->interface = PHY_INTERFACE_MODE_10GBASER;
+ break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_USXGMII:
phydev->interface = PHY_INTERFACE_MODE_USXGMII;
break;
@@ -493,7 +495,8 @@ static int aqr107_config_init(struct phy_device *phydev)
phydev->interface != PHY_INTERFACE_MODE_2500BASEX &&
phydev->interface != PHY_INTERFACE_MODE_XGMII &&
phydev->interface != PHY_INTERFACE_MODE_USXGMII &&
- phydev->interface != PHY_INTERFACE_MODE_10GKR)
+ phydev->interface != PHY_INTERFACE_MODE_10GKR &&
+ phydev->interface != PHY_INTERFACE_MODE_10GBASER)
return -ENODEV;
WARN(phydev->interface == PHY_INTERFACE_MODE_XGMII,
@@ -627,6 +630,8 @@ static struct phy_driver aqr_driver[] = {
.config_intr = aqr_config_intr,
.ack_interrupt = aqr_ack_interrupt,
.read_status = aqr_read_status,
+ .suspend = aqr107_suspend,
+ .resume = aqr107_resume,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR106),
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index aee62610bade..481cf48c9b9e 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -489,6 +489,14 @@ static int at803x_probe(struct phy_device *phydev)
return at803x_parse_dt(phydev);
}
+static void at803x_remove(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+
+ if (priv->vddio)
+ regulator_disable(priv->vddio);
+}
+
static int at803x_clk_out_config(struct phy_device *phydev)
{
struct at803x_priv *priv = phydev->priv;
@@ -711,6 +719,7 @@ static struct phy_driver at803x_driver[] = {
.name = "Qualcomm Atheros AR8035",
.phy_id_mask = AT803X_PHY_ID_MASK,
.probe = at803x_probe,
+ .remove = at803x_remove,
.config_init = at803x_config_init,
.set_wol = at803x_set_wol,
.get_wol = at803x_get_wol,
@@ -726,6 +735,7 @@ static struct phy_driver at803x_driver[] = {
.name = "Qualcomm Atheros AR8030",
.phy_id_mask = AT803X_PHY_ID_MASK,
.probe = at803x_probe,
+ .remove = at803x_remove,
.config_init = at803x_config_init,
.link_change_notify = at803x_link_change_notify,
.set_wol = at803x_set_wol,
@@ -741,6 +751,7 @@ static struct phy_driver at803x_driver[] = {
.name = "Qualcomm Atheros AR8031/AR8033",
.phy_id_mask = AT803X_PHY_ID_MASK,
.probe = at803x_probe,
+ .remove = at803x_remove,
.config_init = at803x_config_init,
.set_wol = at803x_set_wol,
.get_wol = at803x_get_wol,
diff --git a/drivers/net/phy/bcm84881.c b/drivers/net/phy/bcm84881.c
new file mode 100644
index 000000000000..14d55a77eb28
--- /dev/null
+++ b/drivers/net/phy/bcm84881.c
@@ -0,0 +1,269 @@
+// SPDX-License-Identifier: GPL-2.0
+// Broadcom BCM84881 NBASE-T PHY driver, as found on a SFP+ module.
+// Copyright (C) 2019 Russell King, Deep Blue Solutions Ltd.
+//
+// Like the Marvell 88x3310, the Broadcom 84881 changes its host-side
+// interface according to the operating speed between 10GBASE-R,
+// 2500BASE-X and SGMII (but unlike the 88x3310, without the control
+// word).
+//
+// This driver only supports those aspects of the PHY that I'm able to
+// observe and test with the SFP+ module, which is an incomplete subset
+// of what this PHY is able to support. For example, I only assume it
+// supports a single lane Serdes connection, but it may be that the PHY
+// is able to support more than that.
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+
+enum {
+ MDIO_AN_C22 = 0xffe0,
+};
+
+static int bcm84881_wait_init(struct phy_device *phydev)
+{
+ unsigned int tries = 20;
+ int ret, val;
+
+ do {
+ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1);
+ if (val < 0) {
+ ret = val;
+ break;
+ }
+ if (!(val & MDIO_CTRL1_RESET)) {
+ ret = 0;
+ break;
+ }
+ if (!--tries) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+ msleep(100);
+ } while (1);
+
+ if (ret)
+ phydev_err(phydev, "%s failed: %d\n", __func__, ret);
+
+ return ret;
+}
+
+static int bcm84881_config_init(struct phy_device *phydev)
+{
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_10GBASER:
+ break;
+ default:
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int bcm84881_probe(struct phy_device *phydev)
+{
+ /* This driver requires PMAPMD and AN blocks */
+ const u32 mmd_mask = MDIO_DEVS_PMAPMD | MDIO_DEVS_AN;
+
+ if (!phydev->is_c45 ||
+ (phydev->c45_ids.devices_in_package & mmd_mask) != mmd_mask)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int bcm84881_get_features(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_c45_pma_read_abilities(phydev);
+ if (ret)
+ return ret;
+
+ /* Although the PHY sets bit 1.11.8, it does not support 10M modes */
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ phydev->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ phydev->supported);
+
+ return 0;
+}
+
+static int bcm84881_config_aneg(struct phy_device *phydev)
+{
+ bool changed = false;
+ u32 adv;
+ int ret;
+
+ /* Wait for the PHY to finish initialising, otherwise our
+ * advertisement may be overwritten.
+ */
+ ret = bcm84881_wait_init(phydev);
+ if (ret)
+ return ret;
+
+ /* We don't support manual MDI control */
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+
+ /* disabled autoneg doesn't seem to work with this PHY */
+ if (phydev->autoneg == AUTONEG_DISABLE)
+ return -EINVAL;
+
+ ret = genphy_c45_an_config_aneg(phydev);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ adv = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising);
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN,
+ MDIO_AN_C22 + MII_CTRL1000,
+ ADVERTISE_1000FULL | ADVERTISE_1000HALF,
+ adv);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ return genphy_c45_check_and_restart_aneg(phydev, changed);
+}
+
+static int bcm84881_aneg_done(struct phy_device *phydev)
+{
+ int bmsr, val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+ if (val < 0)
+ return val;
+
+ bmsr = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_C22 + MII_BMSR);
+ if (bmsr < 0)
+ return val;
+
+ return !!(val & MDIO_AN_STAT1_COMPLETE) &&
+ !!(bmsr & BMSR_ANEGCOMPLETE);
+}
+
+static int bcm84881_read_status(struct phy_device *phydev)
+{
+ unsigned int mode;
+ int bmsr, val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
+ if (val < 0)
+ return val;
+
+ if (val & MDIO_AN_CTRL1_RESTART) {
+ phydev->link = 0;
+ return 0;
+ }
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+ if (val < 0)
+ return val;
+
+ bmsr = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_C22 + MII_BMSR);
+ if (bmsr < 0)
+ return val;
+
+ phydev->autoneg_complete = !!(val & MDIO_AN_STAT1_COMPLETE) &&
+ !!(bmsr & BMSR_ANEGCOMPLETE);
+ phydev->link = !!(val & MDIO_STAT1_LSTATUS) &&
+ !!(bmsr & BMSR_LSTATUS);
+ if (phydev->autoneg == AUTONEG_ENABLE && !phydev->autoneg_complete)
+ phydev->link = false;
+
+ if (!phydev->link)
+ return 0;
+
+ linkmode_zero(phydev->lp_advertising);
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+ phydev->mdix = 0;
+
+ if (phydev->autoneg_complete) {
+ val = genphy_c45_read_lpa(phydev);
+ if (val < 0)
+ return val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN,
+ MDIO_AN_C22 + MII_STAT1000);
+ if (val < 0)
+ return val;
+
+ mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, val);
+
+ if (phydev->autoneg == AUTONEG_ENABLE)
+ phy_resolve_aneg_linkmode(phydev);
+ }
+
+ if (phydev->autoneg == AUTONEG_DISABLE) {
+ /* disabled autoneg doesn't seem to work, so force the link
+ * down.
+ */
+ phydev->link = 0;
+ return 0;
+ }
+
+ /* Set the host link mode - we set the phy interface mode and
+ * the speed according to this register so that downshift works.
+ * We leave the duplex setting as per the resolution from the
+ * above.
+ */
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND1, 0x4011);
+ mode = (val & 0x1e) >> 1;
+ if (mode == 1 || mode == 2)
+ phydev->interface = PHY_INTERFACE_MODE_SGMII;
+ else if (mode == 3)
+ phydev->interface = PHY_INTERFACE_MODE_10GBASER;
+ else if (mode == 4)
+ phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+ switch (mode & 7) {
+ case 1:
+ phydev->speed = SPEED_100;
+ break;
+ case 2:
+ phydev->speed = SPEED_1000;
+ break;
+ case 3:
+ phydev->speed = SPEED_10000;
+ break;
+ case 4:
+ phydev->speed = SPEED_2500;
+ break;
+ case 5:
+ phydev->speed = SPEED_5000;
+ break;
+ }
+
+ return genphy_c45_read_mdix(phydev);
+}
+
+static struct phy_driver bcm84881_drivers[] = {
+ {
+ .phy_id = 0xae025150,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM84881",
+ .config_init = bcm84881_config_init,
+ .probe = bcm84881_probe,
+ .get_features = bcm84881_get_features,
+ .config_aneg = bcm84881_config_aneg,
+ .aneg_done = bcm84881_aneg_done,
+ .read_status = bcm84881_read_status,
+ },
+};
+
+module_phy_driver(bcm84881_drivers);
+
+/* FIXME: module auto-loading for Clause 45 PHYs seems non-functional */
+static struct mdio_device_id __maybe_unused bcm84881_tbl[] = {
+ { 0xae025150, 0xfffffff0 },
+ { },
+};
+MODULE_AUTHOR("Russell King");
+MODULE_DESCRIPTION("Broadcom BCM84881 PHY driver");
+MODULE_DEVICE_TABLE(mdio, bcm84881_tbl);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 8f241b57fcf6..ac72a324fcd1 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -98,6 +98,7 @@ struct dp83640_private {
struct list_head list;
struct dp83640_clock *clock;
struct phy_device *phydev;
+ struct mii_timestamper mii_ts;
struct delayed_work ts_work;
int hwts_tx_en;
int hwts_rx_en;
@@ -1131,96 +1132,6 @@ static void dp83640_clock_put(struct dp83640_clock *clock)
mutex_unlock(&clock->clock_lock);
}
-static int dp83640_probe(struct phy_device *phydev)
-{
- struct dp83640_clock *clock;
- struct dp83640_private *dp83640;
- int err = -ENOMEM, i;
-
- if (phydev->mdio.addr == BROADCAST_ADDR)
- return 0;
-
- clock = dp83640_clock_get_bus(phydev->mdio.bus);
- if (!clock)
- goto no_clock;
-
- dp83640 = kzalloc(sizeof(struct dp83640_private), GFP_KERNEL);
- if (!dp83640)
- goto no_memory;
-
- dp83640->phydev = phydev;
- INIT_DELAYED_WORK(&dp83640->ts_work, rx_timestamp_work);
-
- INIT_LIST_HEAD(&dp83640->rxts);
- INIT_LIST_HEAD(&dp83640->rxpool);
- for (i = 0; i < MAX_RXTS; i++)
- list_add(&dp83640->rx_pool_data[i].list, &dp83640->rxpool);
-
- phydev->priv = dp83640;
-
- spin_lock_init(&dp83640->rx_lock);
- skb_queue_head_init(&dp83640->rx_queue);
- skb_queue_head_init(&dp83640->tx_queue);
-
- dp83640->clock = clock;
-
- if (choose_this_phy(clock, phydev)) {
- clock->chosen = dp83640;
- clock->ptp_clock = ptp_clock_register(&clock->caps,
- &phydev->mdio.dev);
- if (IS_ERR(clock->ptp_clock)) {
- err = PTR_ERR(clock->ptp_clock);
- goto no_register;
- }
- } else
- list_add_tail(&dp83640->list, &clock->phylist);
-
- dp83640_clock_put(clock);
- return 0;
-
-no_register:
- clock->chosen = NULL;
- kfree(dp83640);
-no_memory:
- dp83640_clock_put(clock);
-no_clock:
- return err;
-}
-
-static void dp83640_remove(struct phy_device *phydev)
-{
- struct dp83640_clock *clock;
- struct list_head *this, *next;
- struct dp83640_private *tmp, *dp83640 = phydev->priv;
-
- if (phydev->mdio.addr == BROADCAST_ADDR)
- return;
-
- enable_status_frames(phydev, false);
- cancel_delayed_work_sync(&dp83640->ts_work);
-
- skb_queue_purge(&dp83640->rx_queue);
- skb_queue_purge(&dp83640->tx_queue);
-
- clock = dp83640_clock_get(dp83640->clock);
-
- if (dp83640 == clock->chosen) {
- ptp_clock_unregister(clock->ptp_clock);
- clock->chosen = NULL;
- } else {
- list_for_each_safe(this, next, &clock->phylist) {
- tmp = list_entry(this, struct dp83640_private, list);
- if (tmp == dp83640) {
- list_del_init(&tmp->list);
- break;
- }
- }
- }
-
- dp83640_clock_put(clock);
- kfree(dp83640);
-}
-
static int dp83640_soft_reset(struct phy_device *phydev)
{
int ret;
@@ -1319,9 +1230,10 @@ static int dp83640_config_intr(struct phy_device *phydev)
}
}
-static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
+static int dp83640_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
{
- struct dp83640_private *dp83640 = phydev->priv;
+ struct dp83640_private *dp83640 =
+ container_of(mii_ts, struct dp83640_private, mii_ts);
struct hwtstamp_config cfg;
u16 txcfg0, rxcfg0;
@@ -1397,8 +1309,8 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
mutex_lock(&dp83640->clock->extreg_lock);
- ext_write(0, phydev, PAGE5, PTP_TXCFG0, txcfg0);
- ext_write(0, phydev, PAGE5, PTP_RXCFG0, rxcfg0);
+ ext_write(0, dp83640->phydev, PAGE5, PTP_TXCFG0, txcfg0);
+ ext_write(0, dp83640->phydev, PAGE5, PTP_RXCFG0, rxcfg0);
mutex_unlock(&dp83640->clock->extreg_lock);
@@ -1428,10 +1340,11 @@ static void rx_timestamp_work(struct work_struct *work)
schedule_delayed_work(&dp83640->ts_work, SKB_TIMESTAMP_TIMEOUT);
}
-static bool dp83640_rxtstamp(struct phy_device *phydev,
+static bool dp83640_rxtstamp(struct mii_timestamper *mii_ts,
struct sk_buff *skb, int type)
{
- struct dp83640_private *dp83640 = phydev->priv;
+ struct dp83640_private *dp83640 =
+ container_of(mii_ts, struct dp83640_private, mii_ts);
struct dp83640_skb_info *skb_info = (struct dp83640_skb_info *)skb->cb;
struct list_head *this, *next;
struct rxts *rxts;
@@ -1477,11 +1390,12 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
return true;
}
-static void dp83640_txtstamp(struct phy_device *phydev,
+static void dp83640_txtstamp(struct mii_timestamper *mii_ts,
struct sk_buff *skb, int type)
{
struct dp83640_skb_info *skb_info = (struct dp83640_skb_info *)skb->cb;
- struct dp83640_private *dp83640 = phydev->priv;
+ struct dp83640_private *dp83640 =
+ container_of(mii_ts, struct dp83640_private, mii_ts);
switch (dp83640->hwts_tx_en) {
@@ -1504,9 +1418,11 @@ static void dp83640_txtstamp(struct phy_device *phydev,
}
}
-static int dp83640_ts_info(struct phy_device *dev, struct ethtool_ts_info *info)
+static int dp83640_ts_info(struct mii_timestamper *mii_ts,
+ struct ethtool_ts_info *info)
{
- struct dp83640_private *dp83640 = dev->priv;
+ struct dp83640_private *dp83640 =
+ container_of(mii_ts, struct dp83640_private, mii_ts);
info->so_timestamping =
SOF_TIMESTAMPING_TX_HARDWARE |
@@ -1526,6 +1442,103 @@ static int dp83640_ts_info(struct phy_device *dev, struct ethtool_ts_info *info)
return 0;
}
+static int dp83640_probe(struct phy_device *phydev)
+{
+ struct dp83640_clock *clock;
+ struct dp83640_private *dp83640;
+ int err = -ENOMEM, i;
+
+ if (phydev->mdio.addr == BROADCAST_ADDR)
+ return 0;
+
+ clock = dp83640_clock_get_bus(phydev->mdio.bus);
+ if (!clock)
+ goto no_clock;
+
+ dp83640 = kzalloc(sizeof(struct dp83640_private), GFP_KERNEL);
+ if (!dp83640)
+ goto no_memory;
+
+ dp83640->phydev = phydev;
+ dp83640->mii_ts.rxtstamp = dp83640_rxtstamp;
+ dp83640->mii_ts.txtstamp = dp83640_txtstamp;
+ dp83640->mii_ts.hwtstamp = dp83640_hwtstamp;
+ dp83640->mii_ts.ts_info = dp83640_ts_info;
+
+ INIT_DELAYED_WORK(&dp83640->ts_work, rx_timestamp_work);
+ INIT_LIST_HEAD(&dp83640->rxts);
+ INIT_LIST_HEAD(&dp83640->rxpool);
+ for (i = 0; i < MAX_RXTS; i++)
+ list_add(&dp83640->rx_pool_data[i].list, &dp83640->rxpool);
+
+ phydev->mii_ts = &dp83640->mii_ts;
+ phydev->priv = dp83640;
+
+ spin_lock_init(&dp83640->rx_lock);
+ skb_queue_head_init(&dp83640->rx_queue);
+ skb_queue_head_init(&dp83640->tx_queue);
+
+ dp83640->clock = clock;
+
+ if (choose_this_phy(clock, phydev)) {
+ clock->chosen = dp83640;
+ clock->ptp_clock = ptp_clock_register(&clock->caps,
+ &phydev->mdio.dev);
+ if (IS_ERR(clock->ptp_clock)) {
+ err = PTR_ERR(clock->ptp_clock);
+ goto no_register;
+ }
+ } else
+ list_add_tail(&dp83640->list, &clock->phylist);
+
+ dp83640_clock_put(clock);
+ return 0;
+
+no_register:
+ clock->chosen = NULL;
+ kfree(dp83640);
+no_memory:
+ dp83640_clock_put(clock);
+no_clock:
+ return err;
+}
+
+static void dp83640_remove(struct phy_device *phydev)
+{
+ struct dp83640_clock *clock;
+ struct list_head *this, *next;
+ struct dp83640_private *tmp, *dp83640 = phydev->priv;
+
+ if (phydev->mdio.addr == BROADCAST_ADDR)
+ return;
+
+ phydev->mii_ts = NULL;
+
+ enable_status_frames(phydev, false);
+ cancel_delayed_work_sync(&dp83640->ts_work);
+
+ skb_queue_purge(&dp83640->rx_queue);
+ skb_queue_purge(&dp83640->tx_queue);
+
+ clock = dp83640_clock_get(dp83640->clock);
+
+ if (dp83640 == clock->chosen) {
+ ptp_clock_unregister(clock->ptp_clock);
+ clock->chosen = NULL;
+ } else {
+ list_for_each_safe(this, next, &clock->phylist) {
+ tmp = list_entry(this, struct dp83640_private, list);
+ if (tmp == dp83640) {
+ list_del_init(&tmp->list);
+ break;
+ }
+ }
+ }
+
+ dp83640_clock_put(clock);
+ kfree(dp83640);
+}
+
static struct phy_driver dp83640_driver = {
.phy_id = DP83640_PHY_ID,
.phy_id_mask = 0xfffffff0,
@@ -1537,10 +1550,6 @@ static struct phy_driver dp83640_driver = {
.config_init = dp83640_config_init,
.ack_interrupt = dp83640_ack_interrupt,
.config_intr = dp83640_config_intr,
- .ts_info = dp83640_ts_info,
- .hwtstamp = dp83640_hwtstamp,
- .rxtstamp = dp83640_rxtstamp,
- .txtstamp = dp83640_txtstamp,
};
static int __init dp83640_init(void)
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index 8a4b1d167ce2..fe9aa3ad52a7 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*
- * Driver for the Texas Instruments DP83822 PHY
+/* Driver for the Texas Instruments DP83822, DP83825 and DP83826 PHYs.
*
* Copyright (C) 2017 Texas Instruments Inc.
*/
@@ -15,7 +14,12 @@
#include <linux/netdevice.h>
#define DP83822_PHY_ID 0x2000a240
+#define DP83825S_PHY_ID 0x2000a140
#define DP83825I_PHY_ID 0x2000a150
+#define DP83825CM_PHY_ID 0x2000a160
+#define DP83825CS_PHY_ID 0x2000a170
+#define DP83826C_PHY_ID 0x2000a130
+#define DP83826NC_PHY_ID 0x2000a110
#define DP83822_DEVADDR 0x1f
@@ -319,12 +323,22 @@ static int dp83822_resume(struct phy_device *phydev)
static struct phy_driver dp83822_driver[] = {
DP83822_PHY_DRIVER(DP83822_PHY_ID, "TI DP83822"),
DP83822_PHY_DRIVER(DP83825I_PHY_ID, "TI DP83825I"),
+ DP83822_PHY_DRIVER(DP83826C_PHY_ID, "TI DP83826C"),
+ DP83822_PHY_DRIVER(DP83826NC_PHY_ID, "TI DP83826NC"),
+ DP83822_PHY_DRIVER(DP83825S_PHY_ID, "TI DP83825S"),
+ DP83822_PHY_DRIVER(DP83825CM_PHY_ID, "TI DP83825M"),
+ DP83822_PHY_DRIVER(DP83825CS_PHY_ID, "TI DP83825CS"),
};
module_phy_driver(dp83822_driver);
static struct mdio_device_id __maybe_unused dp83822_tbl[] = {
{ DP83822_PHY_ID, 0xfffffff0 },
{ DP83825I_PHY_ID, 0xfffffff0 },
+ { DP83826C_PHY_ID, 0xfffffff0 },
+ { DP83826NC_PHY_ID, 0xfffffff0 },
+ { DP83825S_PHY_ID, 0xfffffff0 },
+ { DP83825CM_PHY_ID, 0xfffffff0 },
+ { DP83825CS_PHY_ID, 0xfffffff0 },
{ },
};
MODULE_DEVICE_TABLE(mdio, dp83822_tbl);
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 9cd9dcee4eb2..967f57ed0b65 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -93,10 +93,13 @@
#define DP83867_STRAP_STS2_CLK_SKEW_NONE BIT(2)
/* PHY CTRL bits */
-#define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14
+#define DP83867_PHYCR_TX_FIFO_DEPTH_SHIFT 14
+#define DP83867_PHYCR_RX_FIFO_DEPTH_SHIFT 12
#define DP83867_PHYCR_FIFO_DEPTH_MAX 0x03
-#define DP83867_PHYCR_FIFO_DEPTH_MASK GENMASK(15, 14)
+#define DP83867_PHYCR_TX_FIFO_DEPTH_MASK GENMASK(15, 14)
+#define DP83867_PHYCR_RX_FIFO_DEPTH_MASK GENMASK(13, 12)
#define DP83867_PHYCR_RESERVED_MASK BIT(11)
+#define DP83867_PHYCR_FORCE_LINK_GOOD BIT(10)
/* RGMIIDCTL bits */
#define DP83867_RGMII_TX_CLK_DELAY_MAX 0xf
@@ -131,7 +134,8 @@ enum {
struct dp83867_private {
u32 rx_id_delay;
u32 tx_id_delay;
- u32 fifo_depth;
+ u32 tx_fifo_depth;
+ u32 rx_fifo_depth;
int io_impedance;
int port_mirroring;
bool rxctrl_strap_quirk;
@@ -408,18 +412,32 @@ static int dp83867_of_init(struct phy_device *phydev)
dp83867->port_mirroring = DP83867_PORT_MIRROING_DIS;
ret = of_property_read_u32(of_node, "ti,fifo-depth",
- &dp83867->fifo_depth);
+ &dp83867->tx_fifo_depth);
if (ret) {
- phydev_err(phydev,
- "ti,fifo-depth property is required\n");
- return ret;
+ ret = of_property_read_u32(of_node, "tx-fifo-depth",
+ &dp83867->tx_fifo_depth);
+ if (ret)
+ dp83867->tx_fifo_depth =
+ DP83867_PHYCR_FIFO_DEPTH_4_B_NIB;
}
- if (dp83867->fifo_depth > DP83867_PHYCR_FIFO_DEPTH_MAX) {
- phydev_err(phydev,
- "ti,fifo-depth value %u out of range\n",
- dp83867->fifo_depth);
+
+ if (dp83867->tx_fifo_depth > DP83867_PHYCR_FIFO_DEPTH_MAX) {
+ phydev_err(phydev, "tx-fifo-depth value %u out of range\n",
+ dp83867->tx_fifo_depth);
return -EINVAL;
}
+
+ ret = of_property_read_u32(of_node, "rx-fifo-depth",
+ &dp83867->rx_fifo_depth);
+ if (ret)
+ dp83867->rx_fifo_depth = DP83867_PHYCR_FIFO_DEPTH_4_B_NIB;
+
+ if (dp83867->rx_fifo_depth > DP83867_PHYCR_FIFO_DEPTH_MAX) {
+ phydev_err(phydev, "rx-fifo-depth value %u out of range\n",
+ dp83867->rx_fifo_depth);
+ return -EINVAL;
+ }
+
return 0;
}
#else
@@ -458,12 +476,31 @@ static int dp83867_config_init(struct phy_device *phydev)
phy_clear_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
BIT(7));
+ if (phy_interface_is_rgmii(phydev) ||
+ phydev->interface == PHY_INTERFACE_MODE_SGMII) {
+ val = phy_read(phydev, MII_DP83867_PHYCTRL);
+ if (val < 0)
+ return val;
+
+ val &= ~DP83867_PHYCR_TX_FIFO_DEPTH_MASK;
+ val |= (dp83867->tx_fifo_depth <<
+ DP83867_PHYCR_TX_FIFO_DEPTH_SHIFT);
+
+ if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
+ val &= ~DP83867_PHYCR_RX_FIFO_DEPTH_MASK;
+ val |= (dp83867->rx_fifo_depth <<
+ DP83867_PHYCR_RX_FIFO_DEPTH_SHIFT);
+ }
+
+ ret = phy_write(phydev, MII_DP83867_PHYCTRL, val);
+ if (ret)
+ return ret;
+ }
+
if (phy_interface_is_rgmii(phydev)) {
val = phy_read(phydev, MII_DP83867_PHYCTRL);
if (val < 0)
return val;
- val &= ~DP83867_PHYCR_FIFO_DEPTH_MASK;
- val |= (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT);
/* The code below checks if "port mirroring" N/A MODE4 has been
* enabled during power on bootstrap.
@@ -599,7 +636,12 @@ static int dp83867_phy_reset(struct phy_device *phydev)
usleep_range(10, 20);
- return 0;
+ /* After reset FORCE_LINK_GOOD bit is set. Although the
+ * default value should be unset. Disable FORCE_LINK_GOOD
+ * for the phy to work properly.
+ */
+ return phy_modify(phydev, MII_DP83867_PHYCTRL,
+ DP83867_PHYCR_FORCE_LINK_GOOD, 0);
}
static struct phy_driver dp83867_driver[] = {
diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
index 93021904c5e4..7996a4aea8d2 100644
--- a/drivers/net/phy/dp83869.c
+++ b/drivers/net/phy/dp83869.c
@@ -334,7 +334,7 @@ static int dp83869_configure_mode(struct phy_device *phydev,
break;
default:
return -EINVAL;
- };
+ }
return ret;
}
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index 7c5265fd2b94..4a3d34f40cb9 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -210,18 +210,15 @@ static struct gpio_desc *fixed_phy_get_gpiod(struct device_node *np)
* Linux device associated with it, we simply have obtain
* the GPIO descriptor from the device tree like this.
*/
- gpiod = gpiod_get_from_of_node(fixed_link_node, "link-gpios", 0,
- GPIOD_IN, "mdio");
- of_node_put(fixed_link_node);
- if (IS_ERR(gpiod)) {
- if (PTR_ERR(gpiod) == -EPROBE_DEFER)
- return gpiod;
-
+ gpiod = fwnode_gpiod_get_index(of_fwnode_handle(fixed_link_node),
+ "link", 0, GPIOD_IN, "mdio");
+ if (IS_ERR(gpiod) && PTR_ERR(gpiod) != -EPROBE_DEFER) {
if (PTR_ERR(gpiod) != -ENOENT)
pr_err("error getting GPIO for fixed link %pOF, proceed without\n",
fixed_link_node);
gpiod = NULL;
}
+ of_node_put(fixed_link_node);
return gpiod;
}
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index 356bd6472f49..fec58ad69e02 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -190,27 +190,11 @@ static int lxt973a2_read_status(struct phy_device *phydev)
phydev->duplex = DUPLEX_FULL;
}
- if (phydev->duplex == DUPLEX_FULL) {
- phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
- phydev->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
- }
+ phy_resolve_aneg_pause(phydev);
} else {
- int bmcr = phy_read(phydev, MII_BMCR);
-
- if (bmcr < 0)
- return bmcr;
-
- if (bmcr & BMCR_FULLDPLX)
- phydev->duplex = DUPLEX_FULL;
- else
- phydev->duplex = DUPLEX_HALF;
-
- if (bmcr & BMCR_SPEED1000)
- phydev->speed = SPEED_1000;
- else if (bmcr & BMCR_SPEED100)
- phydev->speed = SPEED_100;
- else
- phydev->speed = SPEED_10;
+ err = genphy_read_status_fixed(phydev);
+ if (err < 0)
+ return err;
phydev->pause = phydev->asym_pause = 0;
linkmode_zero(phydev->lp_advertising);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index b1fbd1937328..28e33ece4ce1 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -162,19 +162,9 @@
#define MII_88E1510_GEN_CTRL_REG_1_MODE_SGMII 0x1 /* SGMII to copper */
#define MII_88E1510_GEN_CTRL_REG_1_RESET 0x8000 /* Soft reset */
-#define LPA_FIBER_1000HALF 0x40
-#define LPA_FIBER_1000FULL 0x20
-
#define LPA_PAUSE_FIBER 0x180
#define LPA_PAUSE_ASYM_FIBER 0x100
-#define ADVERTISE_FIBER_1000HALF 0x40
-#define ADVERTISE_FIBER_1000FULL 0x20
-
-#define ADVERTISE_PAUSE_FIBER 0x180
-#define ADVERTISE_PAUSE_ASYM_FIBER 0x100
-
-#define REGISTER_LINK_STATUS 0x400
#define NB_FIBER_STATS 1
MODULE_DESCRIPTION("Marvell PHY driver");
@@ -497,16 +487,15 @@ static inline u32 linkmode_adv_to_fiber_adv_t(unsigned long *advertise)
u32 result = 0;
if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, advertise))
- result |= ADVERTISE_FIBER_1000HALF;
+ result |= ADVERTISE_1000XHALF;
if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, advertise))
- result |= ADVERTISE_FIBER_1000FULL;
+ result |= ADVERTISE_1000XFULL;
if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertise) &&
linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertise))
- result |= LPA_PAUSE_ASYM_FIBER;
+ result |= ADVERTISE_1000XPSE_ASYM;
else if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertise))
- result |= (ADVERTISE_PAUSE_FIBER
- & (~ADVERTISE_PAUSE_ASYM_FIBER));
+ result |= ADVERTISE_1000XPAUSE;
return result;
}
@@ -524,7 +513,7 @@ static int marvell_config_aneg_fiber(struct phy_device *phydev)
{
int changed = 0;
int err;
- int adv, oldadv;
+ u16 adv;
if (phydev->autoneg != AUTONEG_ENABLE)
return genphy_setup_forced(phydev);
@@ -533,44 +522,19 @@ static int marvell_config_aneg_fiber(struct phy_device *phydev)
linkmode_and(phydev->advertising, phydev->advertising,
phydev->supported);
- /* Setup fiber advertisement */
- adv = phy_read(phydev, MII_ADVERTISE);
- if (adv < 0)
- return adv;
-
- oldadv = adv;
- adv &= ~(ADVERTISE_FIBER_1000HALF | ADVERTISE_FIBER_1000FULL
- | LPA_PAUSE_FIBER);
- adv |= linkmode_adv_to_fiber_adv_t(phydev->advertising);
-
- if (adv != oldadv) {
- err = phy_write(phydev, MII_ADVERTISE, adv);
- if (err < 0)
- return err;
+ adv = linkmode_adv_to_fiber_adv_t(phydev->advertising);
+ /* Setup fiber advertisement */
+ err = phy_modify_changed(phydev, MII_ADVERTISE,
+ ADVERTISE_1000XHALF | ADVERTISE_1000XFULL |
+ ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM,
+ adv);
+ if (err < 0)
+ return err;
+ if (err > 0)
changed = 1;
- }
-
- if (changed == 0) {
- /* Advertisement hasn't changed, but maybe aneg was never on to
- * begin with? Or maybe phy was isolated?
- */
- int ctl = phy_read(phydev, MII_BMCR);
-
- if (ctl < 0)
- return ctl;
-
- if (!(ctl & BMCR_ANENABLE) || (ctl & BMCR_ISOLATE))
- changed = 1; /* do restart aneg */
- }
- /* Only restart aneg if we are advertising something different
- * than we were before.
- */
- if (changed > 0)
- changed = genphy_restart_aneg(phydev);
-
- return changed;
+ return genphy_check_and_restart_aneg(phydev, changed);
}
static int m88e1510_config_aneg(struct phy_device *phydev)
@@ -1302,93 +1266,29 @@ static int m88e6390_config_aneg(struct phy_device *phydev)
static void fiber_lpa_mod_linkmode_lpa_t(unsigned long *advertising, u32 lpa)
{
linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
- advertising, lpa & LPA_FIBER_1000HALF);
+ advertising, lpa & LPA_1000XHALF);
linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
- advertising, lpa & LPA_FIBER_1000FULL);
-}
-
-/**
- * marvell_update_link - update link status in real time in @phydev
- * @phydev: target phy_device struct
- *
- * Description: Update the value in phydev->link to reflect the
- * current link value.
- */
-static int marvell_update_link(struct phy_device *phydev, int fiber)
-{
- int status;
-
- /* Use the generic register for copper link, or specific
- * register for fiber case
- */
- if (fiber) {
- status = phy_read(phydev, MII_M1011_PHY_STATUS);
- if (status < 0)
- return status;
-
- if ((status & REGISTER_LINK_STATUS) == 0)
- phydev->link = 0;
- else
- phydev->link = 1;
- } else {
- return genphy_update_link(phydev);
- }
-
- return 0;
+ advertising, lpa & LPA_1000XFULL);
}
static int marvell_read_status_page_an(struct phy_device *phydev,
- int fiber)
+ int fiber, int status)
{
- int status;
int lpa;
- int lpagb;
-
- status = phy_read(phydev, MII_M1011_PHY_STATUS);
- if (status < 0)
- return status;
-
- lpa = phy_read(phydev, MII_LPA);
- if (lpa < 0)
- return lpa;
-
- lpagb = phy_read(phydev, MII_STAT1000);
- if (lpagb < 0)
- return lpagb;
-
- if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
- phydev->duplex = DUPLEX_FULL;
- else
- phydev->duplex = DUPLEX_HALF;
-
- status = status & MII_M1011_PHY_STATUS_SPD_MASK;
- phydev->pause = 0;
- phydev->asym_pause = 0;
-
- switch (status) {
- case MII_M1011_PHY_STATUS_1000:
- phydev->speed = SPEED_1000;
- break;
-
- case MII_M1011_PHY_STATUS_100:
- phydev->speed = SPEED_100;
- break;
-
- default:
- phydev->speed = SPEED_10;
- break;
- }
+ int err;
if (!fiber) {
- mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising, lpa);
- mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, lpagb);
+ err = genphy_read_lpa(phydev);
+ if (err < 0)
+ return err;
- if (phydev->duplex == DUPLEX_FULL) {
- phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
- phydev->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
- }
+ phy_resolve_aneg_pause(phydev);
} else {
+ lpa = phy_read(phydev, MII_LPA);
+ if (lpa < 0)
+ return lpa;
+
/* The fiber link is only 1000M capable */
fiber_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, lpa);
@@ -1405,31 +1305,25 @@ static int marvell_read_status_page_an(struct phy_device *phydev,
}
}
}
- return 0;
-}
-static int marvell_read_status_page_fixed(struct phy_device *phydev)
-{
- int bmcr = phy_read(phydev, MII_BMCR);
-
- if (bmcr < 0)
- return bmcr;
-
- if (bmcr & BMCR_FULLDPLX)
+ if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
phydev->duplex = DUPLEX_FULL;
else
phydev->duplex = DUPLEX_HALF;
- if (bmcr & BMCR_SPEED1000)
+ switch (status & MII_M1011_PHY_STATUS_SPD_MASK) {
+ case MII_M1011_PHY_STATUS_1000:
phydev->speed = SPEED_1000;
- else if (bmcr & BMCR_SPEED100)
+ break;
+
+ case MII_M1011_PHY_STATUS_100:
phydev->speed = SPEED_100;
- else
- phydev->speed = SPEED_10;
+ break;
- phydev->pause = 0;
- phydev->asym_pause = 0;
- linkmode_zero(phydev->lp_advertising);
+ default:
+ phydev->speed = SPEED_10;
+ break;
+ }
return 0;
}
@@ -1444,25 +1338,38 @@ static int marvell_read_status_page_fixed(struct phy_device *phydev)
*/
static int marvell_read_status_page(struct phy_device *phydev, int page)
{
+ int status;
int fiber;
int err;
- /* Detect and update the link, but return if there
- * was an error
+ status = phy_read(phydev, MII_M1011_PHY_STATUS);
+ if (status < 0)
+ return status;
+
+ /* Use the generic register for copper link status,
+ * and the PHY status register for fiber link status.
*/
+ if (page == MII_MARVELL_FIBER_PAGE) {
+ phydev->link = !!(status & MII_M1011_PHY_STATUS_LINK);
+ } else {
+ err = genphy_update_link(phydev);
+ if (err)
+ return err;
+ }
+
if (page == MII_MARVELL_FIBER_PAGE)
fiber = 1;
else
fiber = 0;
- err = marvell_update_link(phydev, fiber);
- if (err)
- return err;
+ linkmode_zero(phydev->lp_advertising);
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
if (phydev->autoneg == AUTONEG_ENABLE)
- err = marvell_read_status_page_an(phydev, fiber);
+ err = marvell_read_status_page_an(phydev, fiber, status);
else
- err = marvell_read_status_page_fixed(phydev);
+ err = genphy_read_status_fixed(phydev);
return err;
}
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 1bf13017d288..64c9f3bba2cd 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -214,9 +214,9 @@ static int mv3310_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
phy_interface_t iface;
sfp_parse_support(phydev->sfp_bus, id, support);
- iface = sfp_select_interface(phydev->sfp_bus, id, support);
+ iface = sfp_select_interface(phydev->sfp_bus, support);
- if (iface != PHY_INTERFACE_MODE_10GKR) {
+ if (iface != PHY_INTERFACE_MODE_10GBASER) {
dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n");
return -EINVAL;
}
@@ -304,7 +304,7 @@ static int mv3310_config_init(struct phy_device *phydev)
phydev->interface != PHY_INTERFACE_MODE_2500BASEX &&
phydev->interface != PHY_INTERFACE_MODE_XAUI &&
phydev->interface != PHY_INTERFACE_MODE_RXAUI &&
- phydev->interface != PHY_INTERFACE_MODE_10GKR)
+ phydev->interface != PHY_INTERFACE_MODE_10GBASER)
return -ENODEV;
return 0;
@@ -386,16 +386,17 @@ static void mv3310_update_interface(struct phy_device *phydev)
{
if ((phydev->interface == PHY_INTERFACE_MODE_SGMII ||
phydev->interface == PHY_INTERFACE_MODE_2500BASEX ||
- phydev->interface == PHY_INTERFACE_MODE_10GKR) && phydev->link) {
+ phydev->interface == PHY_INTERFACE_MODE_10GBASER) &&
+ phydev->link) {
/* The PHY automatically switches its serdes interface (and
- * active PHYXS instance) between Cisco SGMII, 10GBase-KR and
+ * active PHYXS instance) between Cisco SGMII, 10GBase-R and
* 2500BaseX modes according to the speed. Florian suggests
* setting phydev->interface to communicate this to the MAC.
* Only do this if we are already in one of the above modes.
*/
switch (phydev->speed) {
case SPEED_10000:
- phydev->interface = PHY_INTERFACE_MODE_10GKR;
+ phydev->interface = PHY_INTERFACE_MODE_10GBASER;
break;
case SPEED_2500:
phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
diff --git a/drivers/net/phy/mdio-i2c.c b/drivers/net/phy/mdio-i2c.c
index 0dce67672548..0746e2cc39ae 100644
--- a/drivers/net/phy/mdio-i2c.c
+++ b/drivers/net/phy/mdio-i2c.c
@@ -33,17 +33,24 @@ static int i2c_mii_read(struct mii_bus *bus, int phy_id, int reg)
{
struct i2c_adapter *i2c = bus->priv;
struct i2c_msg msgs[2];
- u8 data[2], dev_addr = reg;
+ u8 addr[3], data[2], *p;
int bus_addr, ret;
if (!i2c_mii_valid_phy_id(phy_id))
return 0xffff;
+ p = addr;
+ if (reg & MII_ADDR_C45) {
+ *p++ = 0x20 | ((reg >> 16) & 31);
+ *p++ = reg >> 8;
+ }
+ *p++ = reg;
+
bus_addr = i2c_mii_phy_addr(phy_id);
msgs[0].addr = bus_addr;
msgs[0].flags = 0;
- msgs[0].len = 1;
- msgs[0].buf = &dev_addr;
+ msgs[0].len = p - addr;
+ msgs[0].buf = addr;
msgs[1].addr = bus_addr;
msgs[1].flags = I2C_M_RD;
msgs[1].len = sizeof(data);
@@ -61,18 +68,23 @@ static int i2c_mii_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
struct i2c_adapter *i2c = bus->priv;
struct i2c_msg msg;
int ret;
- u8 data[3];
+ u8 data[5], *p;
if (!i2c_mii_valid_phy_id(phy_id))
return 0;
- data[0] = reg;
- data[1] = val >> 8;
- data[2] = val;
+ p = data;
+ if (reg & MII_ADDR_C45) {
+ *p++ = (reg >> 16) & 31;
+ *p++ = reg >> 8;
+ }
+ *p++ = reg;
+ *p++ = val >> 8;
+ *p++ = val;
msg.addr = i2c_mii_phy_addr(phy_id);
msg.flags = 0;
- msg.len = 3;
+ msg.len = p - data;
msg.buf = data;
ret = i2c_transfer(i2c, &msg, 1);
diff --git a/drivers/net/phy/mdio-mux-meson-g12a.c b/drivers/net/phy/mdio-mux-meson-g12a.c
index 7a9ad54582e1..bf86c9c7a288 100644
--- a/drivers/net/phy/mdio-mux-meson-g12a.c
+++ b/drivers/net/phy/mdio-mux-meson-g12a.c
@@ -123,7 +123,7 @@ static int g12a_ephy_pll_is_enabled(struct clk_hw *hw)
return (val & PLL_CTL0_LOCK_DIG) ? 1 : 0;
}
-static void g12a_ephy_pll_init(struct clk_hw *hw)
+static int g12a_ephy_pll_init(struct clk_hw *hw)
{
struct g12a_ephy_pll *pll = g12a_ephy_pll_to_dev(hw);
@@ -136,6 +136,8 @@ static void g12a_ephy_pll_init(struct clk_hw *hw)
writel(0x20200000, pll->base + ETH_PLL_CTL5);
writel(0x0000c002, pll->base + ETH_PLL_CTL6);
writel(0x00000023, pll->base + ETH_PLL_CTL7);
+
+ return 0;
}
static const struct clk_ops g12a_ephy_pll_ops = {
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 229e480179ff..9bb9f37f21dc 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -59,17 +59,11 @@ static int mdiobus_register_gpiod(struct mdio_device *mdiodev)
static int mdiobus_register_reset(struct mdio_device *mdiodev)
{
- struct reset_control *reset = NULL;
-
- if (mdiodev->dev.of_node)
- reset = of_reset_control_get_exclusive(mdiodev->dev.of_node,
- "phy");
- if (IS_ERR(reset)) {
- if (PTR_ERR(reset) == -ENOENT || PTR_ERR(reset) == -ENOTSUPP)
- reset = NULL;
- else
- return PTR_ERR(reset);
- }
+ struct reset_control *reset;
+
+ reset = reset_control_get_optional_exclusive(&mdiodev->dev, "phy");
+ if (IS_ERR(reset))
+ return PTR_ERR(reset);
mdiodev->reset_ctrl = reset;
@@ -164,9 +158,11 @@ struct mii_bus *mdiobus_alloc_size(size_t size)
if (size)
bus->priv = (void *)bus + aligned_size;
- /* Initialise the interrupts to polling */
- for (i = 0; i < PHY_MAX_ADDR; i++)
+ /* Initialise the interrupts to polling and 64-bit seqcounts */
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
bus->irq[i] = PHY_POLL;
+ u64_stats_init(&bus->stats[i].syncp);
+ }
return bus;
}
@@ -255,9 +251,215 @@ static void mdiobus_release(struct device *d)
kfree(bus);
}
+struct mdio_bus_stat_attr {
+ int addr;
+ unsigned int field_offset;
+};
+
+static u64 mdio_bus_get_stat(struct mdio_bus_stats *s, unsigned int offset)
+{
+ const char *p = (const char *)s + offset;
+ unsigned int start;
+ u64 val = 0;
+
+ do {
+ start = u64_stats_fetch_begin(&s->syncp);
+ val = u64_stats_read((const u64_stats_t *)p);
+ } while (u64_stats_fetch_retry(&s->syncp, start));
+
+ return val;
+}
+
+static u64 mdio_bus_get_global_stat(struct mii_bus *bus, unsigned int offset)
+{
+ unsigned int i;
+ u64 val = 0;
+
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ val += mdio_bus_get_stat(&bus->stats[i], offset);
+
+ return val;
+}
+
+static ssize_t mdio_bus_stat_field_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mii_bus *bus = to_mii_bus(dev);
+ struct mdio_bus_stat_attr *sattr;
+ struct dev_ext_attribute *eattr;
+ u64 val;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+ sattr = eattr->var;
+
+ if (sattr->addr < 0)
+ val = mdio_bus_get_global_stat(bus, sattr->field_offset);
+ else
+ val = mdio_bus_get_stat(&bus->stats[sattr->addr],
+ sattr->field_offset);
+
+ return sprintf(buf, "%llu\n", val);
+}
+
+static ssize_t mdio_bus_device_stat_field_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mdio_device *mdiodev = to_mdio_device(dev);
+ struct mii_bus *bus = mdiodev->bus;
+ struct mdio_bus_stat_attr *sattr;
+ struct dev_ext_attribute *eattr;
+ int addr = mdiodev->addr;
+ u64 val;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+ sattr = eattr->var;
+
+ val = mdio_bus_get_stat(&bus->stats[addr], sattr->field_offset);
+
+ return sprintf(buf, "%llu\n", val);
+}
+
+#define MDIO_BUS_STATS_ATTR_DECL(field, file) \
+static struct dev_ext_attribute dev_attr_mdio_bus_##field = { \
+ .attr = { .attr = { .name = file, .mode = 0444 }, \
+ .show = mdio_bus_stat_field_show, \
+ }, \
+ .var = &((struct mdio_bus_stat_attr) { \
+ -1, offsetof(struct mdio_bus_stats, field) \
+ }), \
+}; \
+static struct dev_ext_attribute dev_attr_mdio_bus_device_##field = { \
+ .attr = { .attr = { .name = file, .mode = 0444 }, \
+ .show = mdio_bus_device_stat_field_show, \
+ }, \
+ .var = &((struct mdio_bus_stat_attr) { \
+ -1, offsetof(struct mdio_bus_stats, field) \
+ }), \
+};
+
+#define MDIO_BUS_STATS_ATTR(field) \
+ MDIO_BUS_STATS_ATTR_DECL(field, __stringify(field))
+
+MDIO_BUS_STATS_ATTR(transfers);
+MDIO_BUS_STATS_ATTR(errors);
+MDIO_BUS_STATS_ATTR(writes);
+MDIO_BUS_STATS_ATTR(reads);
+
+#define MDIO_BUS_STATS_ADDR_ATTR_DECL(field, addr, file) \
+static struct dev_ext_attribute dev_attr_mdio_bus_addr_##field##_##addr = { \
+ .attr = { .attr = { .name = file, .mode = 0444 }, \
+ .show = mdio_bus_stat_field_show, \
+ }, \
+ .var = &((struct mdio_bus_stat_attr) { \
+ addr, offsetof(struct mdio_bus_stats, field) \
+ }), \
+}
+
+#define MDIO_BUS_STATS_ADDR_ATTR(field, addr) \
+ MDIO_BUS_STATS_ADDR_ATTR_DECL(field, addr, \
+ __stringify(field) "_" __stringify(addr))
+
+#define MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(addr) \
+ MDIO_BUS_STATS_ADDR_ATTR(transfers, addr); \
+ MDIO_BUS_STATS_ADDR_ATTR(errors, addr); \
+ MDIO_BUS_STATS_ADDR_ATTR(writes, addr); \
+ MDIO_BUS_STATS_ADDR_ATTR(reads, addr) \
+
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(0);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(1);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(2);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(3);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(4);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(5);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(6);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(7);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(8);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(9);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(10);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(11);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(12);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(13);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(14);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(15);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(16);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(17);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(18);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(19);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(20);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(21);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(22);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(23);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(24);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(25);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(26);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(27);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(28);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(29);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(30);
+MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(31);
+
+#define MDIO_BUS_STATS_ADDR_ATTR_GROUP(addr) \
+ &dev_attr_mdio_bus_addr_transfers_##addr.attr.attr, \
+ &dev_attr_mdio_bus_addr_errors_##addr.attr.attr, \
+ &dev_attr_mdio_bus_addr_writes_##addr.attr.attr, \
+ &dev_attr_mdio_bus_addr_reads_##addr.attr.attr \
+
+static struct attribute *mdio_bus_statistics_attrs[] = {
+ &dev_attr_mdio_bus_transfers.attr.attr,
+ &dev_attr_mdio_bus_errors.attr.attr,
+ &dev_attr_mdio_bus_writes.attr.attr,
+ &dev_attr_mdio_bus_reads.attr.attr,
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(0),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(1),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(2),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(3),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(4),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(5),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(6),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(7),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(8),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(9),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(10),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(11),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(12),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(13),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(14),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(15),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(16),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(17),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(18),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(19),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(20),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(21),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(22),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(23),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(24),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(25),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(26),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(27),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(28),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(29),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(30),
+ MDIO_BUS_STATS_ADDR_ATTR_GROUP(31),
+ NULL,
+};
+
+static const struct attribute_group mdio_bus_statistics_group = {
+ .name = "statistics",
+ .attrs = mdio_bus_statistics_attrs,
+};
+
+static const struct attribute_group *mdio_bus_groups[] = {
+ &mdio_bus_statistics_group,
+ NULL,
+};
+
static struct class mdio_bus_class = {
.name = "mdio_bus",
.dev_release = mdiobus_release,
+ .dev_groups = mdio_bus_groups,
};
#if IS_ENABLED(CONFIG_OF_MDIO)
@@ -536,6 +738,24 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr)
}
EXPORT_SYMBOL(mdiobus_scan);
+static void mdiobus_stats_acct(struct mdio_bus_stats *stats, bool op, int ret)
+{
+ u64_stats_update_begin(&stats->syncp);
+
+ u64_stats_inc(&stats->transfers);
+ if (ret < 0) {
+ u64_stats_inc(&stats->errors);
+ goto out;
+ }
+
+ if (op)
+ u64_stats_inc(&stats->reads);
+ else
+ u64_stats_inc(&stats->writes);
+out:
+ u64_stats_update_end(&stats->syncp);
+}
+
/**
* __mdiobus_read - Unlocked version of the mdiobus_read function
* @bus: the mii_bus struct
@@ -555,6 +775,7 @@ int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
retval = bus->read(bus, addr, regnum);
trace_mdio_access(bus, 1, addr, regnum, retval, retval);
+ mdiobus_stats_acct(&bus->stats[addr], true, retval);
return retval;
}
@@ -580,6 +801,7 @@ int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
err = bus->write(bus, addr, regnum, val);
trace_mdio_access(bus, 0, addr, regnum, val, err);
+ mdiobus_stats_acct(&bus->stats[addr], false, err);
return err;
}
@@ -725,8 +947,27 @@ static int mdio_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
+static struct attribute *mdio_bus_device_statistics_attrs[] = {
+ &dev_attr_mdio_bus_device_transfers.attr.attr,
+ &dev_attr_mdio_bus_device_errors.attr.attr,
+ &dev_attr_mdio_bus_device_writes.attr.attr,
+ &dev_attr_mdio_bus_device_reads.attr.attr,
+ NULL,
+};
+
+static const struct attribute_group mdio_bus_device_statistics_group = {
+ .name = "statistics",
+ .attrs = mdio_bus_device_statistics_attrs,
+};
+
+static const struct attribute_group *mdio_bus_dev_groups[] = {
+ &mdio_bus_device_statistics_group,
+ NULL,
+};
+
struct bus_type mdio_bus_type = {
.name = "mdio_bus",
+ .dev_groups = mdio_bus_dev_groups,
.match = mdio_bus_match,
.uevent = mdio_uevent,
};
diff --git a/drivers/net/phy/mii_timestamper.c b/drivers/net/phy/mii_timestamper.c
new file mode 100644
index 000000000000..b71b7456462d
--- /dev/null
+++ b/drivers/net/phy/mii_timestamper.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Support for generic time stamping devices on MII buses.
+// Copyright (C) 2018 Richard Cochran <richardcochran@gmail.com>
+//
+
+#include <linux/mii_timestamper.h>
+
+static LIST_HEAD(mii_timestamping_devices);
+static DEFINE_MUTEX(tstamping_devices_lock);
+
+struct mii_timestamping_desc {
+ struct list_head list;
+ struct mii_timestamping_ctrl *ctrl;
+ struct device *device;
+};
+
+/**
+ * register_mii_tstamp_controller() - registers an MII time stamping device.
+ *
+ * @device: The device to be registered.
+ * @ctrl: Pointer to device's control interface.
+ *
+ * Returns zero on success or non-zero on failure.
+ */
+int register_mii_tstamp_controller(struct device *device,
+ struct mii_timestamping_ctrl *ctrl)
+{
+ struct mii_timestamping_desc *desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&desc->list);
+ desc->ctrl = ctrl;
+ desc->device = device;
+
+ mutex_lock(&tstamping_devices_lock);
+ list_add_tail(&mii_timestamping_devices, &desc->list);
+ mutex_unlock(&tstamping_devices_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(register_mii_tstamp_controller);
+
+/**
+ * unregister_mii_tstamp_controller() - unregisters an MII time stamping device.
+ *
+ * @device: A device previously passed to register_mii_tstamp_controller().
+ */
+void unregister_mii_tstamp_controller(struct device *device)
+{
+ struct mii_timestamping_desc *desc;
+ struct list_head *this, *next;
+
+ mutex_lock(&tstamping_devices_lock);
+ list_for_each_safe(this, next, &mii_timestamping_devices) {
+ desc = list_entry(this, struct mii_timestamping_desc, list);
+ if (desc->device == device) {
+ list_del_init(&desc->list);
+ kfree(desc);
+ break;
+ }
+ }
+ mutex_unlock(&tstamping_devices_lock);
+}
+EXPORT_SYMBOL(unregister_mii_tstamp_controller);
+
+/**
+ * register_mii_timestamper - Enables a given port of an MII time stamper.
+ *
+ * @node: The device tree node of the MII time stamp controller.
+ * @port: The index of the port to be enabled.
+ *
+ * Returns a valid interface on success or ERR_PTR otherwise.
+ */
+struct mii_timestamper *register_mii_timestamper(struct device_node *node,
+ unsigned int port)
+{
+ struct mii_timestamper *mii_ts = NULL;
+ struct mii_timestamping_desc *desc;
+ struct list_head *this;
+
+ mutex_lock(&tstamping_devices_lock);
+ list_for_each(this, &mii_timestamping_devices) {
+ desc = list_entry(this, struct mii_timestamping_desc, list);
+ if (desc->device->of_node == node) {
+ mii_ts = desc->ctrl->probe_channel(desc->device, port);
+ if (!IS_ERR(mii_ts)) {
+ mii_ts->device = desc->device;
+ get_device(desc->device);
+ }
+ break;
+ }
+ }
+ mutex_unlock(&tstamping_devices_lock);
+
+ return mii_ts ? mii_ts : ERR_PTR(-EPROBE_DEFER);
+}
+EXPORT_SYMBOL(register_mii_timestamper);
+
+/**
+ * unregister_mii_timestamper - Disables a given MII time stamper.
+ *
+ * @mii_ts: An interface obtained via register_mii_timestamper().
+ *
+ */
+void unregister_mii_timestamper(struct mii_timestamper *mii_ts)
+{
+ struct mii_timestamping_desc *desc;
+ struct list_head *this;
+
+ /* mii_timestamper statically registered by the PHY driver won't use the
+ * register_mii_timestamper() and thus don't have ->device set. Don't
+ * try to unregister these.
+ */
+ if (!mii_ts->device)
+ return;
+
+ mutex_lock(&tstamping_devices_lock);
+ list_for_each(this, &mii_timestamping_devices) {
+ desc = list_entry(this, struct mii_timestamping_desc, list);
+ if (desc->device == mii_ts->device) {
+ desc->ctrl->release_channel(desc->device, mii_ts);
+ put_device(desc->device);
+ break;
+ }
+ }
+ mutex_unlock(&tstamping_devices_lock);
+}
+EXPORT_SYMBOL(unregister_mii_timestamper);
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index d5f8f351d9ef..937ac7da2789 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -18,6 +18,17 @@
#include <linux/netdevice.h>
#include <dt-bindings/net/mscc-phy-vsc8531.h>
+#include <linux/scatterlist.h>
+#include <crypto/skcipher.h>
+
+#if IS_ENABLED(CONFIG_MACSEC)
+#include <net/macsec.h>
+#endif
+
+#include "mscc_macsec.h"
+#include "mscc_mac.h"
+#include "mscc_fc_buffer.h"
+
enum rgmii_rx_clock_delay {
RGMII_RX_CLK_DELAY_0_2_NS = 0,
RGMII_RX_CLK_DELAY_0_8_NS = 1,
@@ -69,7 +80,7 @@ enum rgmii_rx_clock_delay {
#define MSCC_PHY_EXT_PHY_CNTL_2 24
#define MII_VSC85XX_INT_MASK 25
-#define MII_VSC85XX_INT_MASK_MASK 0xa000
+#define MII_VSC85XX_INT_MASK_MASK 0xa020
#define MII_VSC85XX_INT_MASK_WOL 0x0040
#define MII_VSC85XX_INT_STATUS 26
@@ -121,6 +132,26 @@ enum rgmii_rx_clock_delay {
#define PHY_S6G_PLL_FSM_CTRL_DATA_POS 8
#define PHY_S6G_PLL_FSM_ENA_POS 7
+#define MSCC_EXT_PAGE_MACSEC_17 17
+#define MSCC_EXT_PAGE_MACSEC_18 18
+
+#define MSCC_EXT_PAGE_MACSEC_19 19
+#define MSCC_PHY_MACSEC_19_REG_ADDR(x) (x)
+#define MSCC_PHY_MACSEC_19_TARGET(x) ((x) << 12)
+#define MSCC_PHY_MACSEC_19_READ BIT(14)
+#define MSCC_PHY_MACSEC_19_CMD BIT(15)
+
+#define MSCC_EXT_PAGE_MACSEC_20 20
+#define MSCC_PHY_MACSEC_20_TARGET(x) (x)
+enum macsec_bank {
+ FC_BUFFER = 0x04,
+ HOST_MAC = 0x05,
+ LINE_MAC = 0x06,
+ IP_1588 = 0x0e,
+ MACSEC_INGR = 0x38,
+ MACSEC_EGR = 0x3c,
+};
+
#define MSCC_EXT_PAGE_ACCESS 31
#define MSCC_PHY_PAGE_STANDARD 0x0000 /* Standard registers */
#define MSCC_PHY_PAGE_EXTENDED 0x0001 /* Extended registers */
@@ -128,6 +159,7 @@ enum rgmii_rx_clock_delay {
#define MSCC_PHY_PAGE_EXTENDED_3 0x0003 /* Extended reg - page 3 */
#define MSCC_PHY_PAGE_EXTENDED_4 0x0004 /* Extended reg - page 4 */
#define MSCC_PHY_PAGE_CSR_CNTL MSCC_PHY_PAGE_EXTENDED_4
+#define MSCC_PHY_PAGE_MACSEC MSCC_PHY_PAGE_EXTENDED_4
/* Extended reg - GPIO; this is a bank of registers that are shared for all PHYs
* in the same package.
*/
@@ -175,6 +207,9 @@ enum rgmii_rx_clock_delay {
#define SECURE_ON_ENABLE 0x8000
#define SECURE_ON_PASSWD_LEN_4 0x4000
+#define MSCC_PHY_EXTENDED_INT 28
+#define MSCC_PHY_EXTENDED_INT_MS_EGR BIT(9)
+
/* Extended Page 3 Registers */
#define MSCC_PHY_SERDES_TX_VALID_CNT 21
#define MSCC_PHY_SERDES_TX_CRC_ERR_CNT 22
@@ -411,6 +446,44 @@ static const struct vsc85xx_hw_stat vsc8584_hw_stats[] = {
},
};
+#if IS_ENABLED(CONFIG_MACSEC)
+struct macsec_flow {
+ struct list_head list;
+ enum mscc_macsec_destination_ports port;
+ enum macsec_bank bank;
+ u32 index;
+ int assoc_num;
+ bool has_transformation;
+
+ /* Highest takes precedence [0..15] */
+ u8 priority;
+
+ u8 key[MACSEC_KEYID_LEN];
+
+ union {
+ struct macsec_rx_sa *rx_sa;
+ struct macsec_tx_sa *tx_sa;
+ };
+
+ /* Matching */
+ struct {
+ u8 sci:1;
+ u8 tagged:1;
+ u8 untagged:1;
+ u8 etype:1;
+ } match;
+
+ u16 etype;
+
+ /* Action */
+ struct {
+ u8 bypass:1;
+ u8 drop:1;
+ } action;
+
+};
+#endif
+
struct vsc8531_private {
int rate_magic;
u16 supp_led_modes;
@@ -424,6 +497,19 @@ struct vsc8531_private {
* package.
*/
unsigned int base_addr;
+
+#if IS_ENABLED(CONFIG_MACSEC)
+ /* MACsec fields:
+ * - One SecY per device (enforced at the s/w implementation level)
+ * - macsec_flows: list of h/w flows
+ * - ingr_flows: bitmap of ingress flows
+ * - egr_flows: bitmap of egress flows
+ */
+ struct macsec_secy *secy;
+ struct list_head macsec_flows;
+ unsigned long ingr_flows;
+ unsigned long egr_flows;
+#endif
};
#ifdef CONFIG_OF_MDIO
@@ -1584,6 +1670,978 @@ out:
return ret;
}
+#if IS_ENABLED(CONFIG_MACSEC)
+static u32 vsc8584_macsec_phy_read(struct phy_device *phydev,
+ enum macsec_bank bank, u32 reg)
+{
+ u32 val, val_l = 0, val_h = 0;
+ unsigned long deadline;
+ int rc;
+
+ rc = phy_select_page(phydev, MSCC_PHY_PAGE_MACSEC);
+ if (rc < 0)
+ goto failed;
+
+ __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_20,
+ MSCC_PHY_MACSEC_20_TARGET(bank >> 2));
+
+ if (bank >> 2 == 0x1)
+ /* non-MACsec access */
+ bank &= 0x3;
+ else
+ bank = 0;
+
+ __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_19,
+ MSCC_PHY_MACSEC_19_CMD | MSCC_PHY_MACSEC_19_READ |
+ MSCC_PHY_MACSEC_19_REG_ADDR(reg) |
+ MSCC_PHY_MACSEC_19_TARGET(bank));
+
+ deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+ do {
+ val = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_19);
+ } while (time_before(jiffies, deadline) && !(val & MSCC_PHY_MACSEC_19_CMD));
+
+ val_l = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_17);
+ val_h = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_18);
+
+failed:
+ phy_restore_page(phydev, rc, rc);
+
+ return (val_h << 16) | val_l;
+}
+
+static void vsc8584_macsec_phy_write(struct phy_device *phydev,
+ enum macsec_bank bank, u32 reg, u32 val)
+{
+ unsigned long deadline;
+ int rc;
+
+ rc = phy_select_page(phydev, MSCC_PHY_PAGE_MACSEC);
+ if (rc < 0)
+ goto failed;
+
+ __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_20,
+ MSCC_PHY_MACSEC_20_TARGET(bank >> 2));
+
+ if ((bank >> 2 == 0x1) || (bank >> 2 == 0x3))
+ bank &= 0x3;
+ else
+ /* MACsec access */
+ bank = 0;
+
+ __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_17, (u16)val);
+ __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_18, (u16)(val >> 16));
+
+ __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_19,
+ MSCC_PHY_MACSEC_19_CMD | MSCC_PHY_MACSEC_19_REG_ADDR(reg) |
+ MSCC_PHY_MACSEC_19_TARGET(bank));
+
+ deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+ do {
+ val = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_19);
+ } while (time_before(jiffies, deadline) && !(val & MSCC_PHY_MACSEC_19_CMD));
+
+failed:
+ phy_restore_page(phydev, rc, rc);
+}
+
+static void vsc8584_macsec_classification(struct phy_device *phydev,
+ enum macsec_bank bank)
+{
+ /* enable VLAN tag parsing */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_CP_TAG,
+ MSCC_MS_SAM_CP_TAG_PARSE_STAG |
+ MSCC_MS_SAM_CP_TAG_PARSE_QTAG |
+ MSCC_MS_SAM_CP_TAG_PARSE_QINQ);
+}
+
+static void vsc8584_macsec_flow_default_action(struct phy_device *phydev,
+ enum macsec_bank bank,
+ bool block)
+{
+ u32 port = (bank == MACSEC_INGR) ?
+ MSCC_MS_PORT_UNCONTROLLED : MSCC_MS_PORT_COMMON;
+ u32 action = MSCC_MS_FLOW_BYPASS;
+
+ if (block)
+ action = MSCC_MS_FLOW_DROP;
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_NM_FLOW_NCP,
+ /* MACsec untagged */
+ MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_DEST_PORT(port) |
+ /* MACsec tagged */
+ MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_DEST_PORT(port) |
+ /* Bad tag */
+ MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_DEST_PORT(port) |
+ /* Kay tag */
+ MSCC_MS_SAM_NM_FLOW_NCP_KAY_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_NCP_KAY_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_NCP_KAY_DEST_PORT(port));
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_NM_FLOW_CP,
+ /* MACsec untagged */
+ MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_CP_UNTAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_CP_UNTAGGED_DEST_PORT(port) |
+ /* MACsec tagged */
+ MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_CP_TAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_CP_TAGGED_DEST_PORT(port) |
+ /* Bad tag */
+ MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_CP_BADTAG_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_CP_BADTAG_DEST_PORT(port) |
+ /* Kay tag */
+ MSCC_MS_SAM_NM_FLOW_NCP_KAY_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_CP_KAY_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_CP_KAY_DEST_PORT(port));
+}
+
+static void vsc8584_macsec_integrity_checks(struct phy_device *phydev,
+ enum macsec_bank bank)
+{
+ u32 val;
+
+ if (bank != MACSEC_INGR)
+ return;
+
+ /* Set default rules to pass unmatched frames */
+ val = vsc8584_macsec_phy_read(phydev, bank,
+ MSCC_MS_PARAMS2_IG_CC_CONTROL);
+ val |= MSCC_MS_PARAMS2_IG_CC_CONTROL_NON_MATCH_CTRL_ACT |
+ MSCC_MS_PARAMS2_IG_CC_CONTROL_NON_MATCH_ACT;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_PARAMS2_IG_CC_CONTROL,
+ val);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_PARAMS2_IG_CP_TAG,
+ MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_STAG |
+ MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_QTAG |
+ MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_QINQ);
+}
+
+static void vsc8584_macsec_block_init(struct phy_device *phydev,
+ enum macsec_bank bank)
+{
+ u32 val;
+ int i;
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_ENA_CFG,
+ MSCC_MS_ENA_CFG_SW_RST |
+ MSCC_MS_ENA_CFG_MACSEC_BYPASS_ENA);
+
+ /* Set the MACsec block out of s/w reset and enable clocks */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_ENA_CFG,
+ MSCC_MS_ENA_CFG_CLK_ENA);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_STATUS_CONTEXT_CTRL,
+ bank == MACSEC_INGR ? 0xe5880214 : 0xe5880218);
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_MISC_CONTROL,
+ MSCC_MS_MISC_CONTROL_MC_LATENCY_FIX(bank == MACSEC_INGR ? 57 : 40) |
+ MSCC_MS_MISC_CONTROL_XFORM_REC_SIZE(bank == MACSEC_INGR ? 1 : 2));
+
+ /* Clear the counters */
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_COUNT_CONTROL);
+ val |= MSCC_MS_COUNT_CONTROL_AUTO_CNTR_RESET;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_COUNT_CONTROL, val);
+
+ /* Enable octet increment mode */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_PP_CTRL,
+ MSCC_MS_PP_CTRL_MACSEC_OCTET_INCR_MODE);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_BLOCK_CTX_UPDATE, 0x3);
+
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_COUNT_CONTROL);
+ val |= MSCC_MS_COUNT_CONTROL_RESET_ALL;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_COUNT_CONTROL, val);
+
+ /* Set the MTU */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_NON_VLAN_MTU_CHECK,
+ MSCC_MS_NON_VLAN_MTU_CHECK_NV_MTU_COMPARE(32761) |
+ MSCC_MS_NON_VLAN_MTU_CHECK_NV_MTU_COMP_DROP);
+
+ for (i = 0; i < 8; i++)
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_VLAN_MTU_CHECK(i),
+ MSCC_MS_VLAN_MTU_CHECK_MTU_COMPARE(32761) |
+ MSCC_MS_VLAN_MTU_CHECK_MTU_COMP_DROP);
+
+ if (bank == MACSEC_EGR) {
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_INTR_CTRL_STATUS);
+ val &= ~MSCC_MS_INTR_CTRL_STATUS_INTR_ENABLE_M;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_INTR_CTRL_STATUS, val);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_FC_CFG,
+ MSCC_MS_FC_CFG_FCBUF_ENA |
+ MSCC_MS_FC_CFG_LOW_THRESH(0x1) |
+ MSCC_MS_FC_CFG_HIGH_THRESH(0x4) |
+ MSCC_MS_FC_CFG_LOW_BYTES_VAL(0x4) |
+ MSCC_MS_FC_CFG_HIGH_BYTES_VAL(0x6));
+ }
+
+ vsc8584_macsec_classification(phydev, bank);
+ vsc8584_macsec_flow_default_action(phydev, bank, false);
+ vsc8584_macsec_integrity_checks(phydev, bank);
+
+ /* Enable the MACsec block */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_ENA_CFG,
+ MSCC_MS_ENA_CFG_CLK_ENA |
+ MSCC_MS_ENA_CFG_MACSEC_ENA |
+ MSCC_MS_ENA_CFG_MACSEC_SPEED_MODE(0x5));
+}
+
+static void vsc8584_macsec_mac_init(struct phy_device *phydev,
+ enum macsec_bank bank)
+{
+ u32 val;
+ int i;
+
+ /* Clear host & line stats */
+ for (i = 0; i < 36; i++)
+ vsc8584_macsec_phy_write(phydev, bank, 0x1c + i, 0);
+
+ val = vsc8584_macsec_phy_read(phydev, bank,
+ MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL);
+ val &= ~MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_MODE_M;
+ val |= MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_MODE(2) |
+ MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_VALUE(0xffff);
+ vsc8584_macsec_phy_write(phydev, bank,
+ MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL, val);
+
+ val = vsc8584_macsec_phy_read(phydev, bank,
+ MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_2);
+ val |= 0xffff;
+ vsc8584_macsec_phy_write(phydev, bank,
+ MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_2, val);
+
+ val = vsc8584_macsec_phy_read(phydev, bank,
+ MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL);
+ if (bank == HOST_MAC)
+ val |= MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_TIMER_ENA |
+ MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_FRAME_DROP_ENA;
+ else
+ val |= MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_REACT_ENA |
+ MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_FRAME_DROP_ENA |
+ MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_MODE |
+ MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_EARLY_PAUSE_DETECT_ENA;
+ vsc8584_macsec_phy_write(phydev, bank,
+ MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL, val);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_PKTINF_CFG,
+ MSCC_MAC_CFG_PKTINF_CFG_STRIP_FCS_ENA |
+ MSCC_MAC_CFG_PKTINF_CFG_INSERT_FCS_ENA |
+ MSCC_MAC_CFG_PKTINF_CFG_LPI_RELAY_ENA |
+ MSCC_MAC_CFG_PKTINF_CFG_STRIP_PREAMBLE_ENA |
+ MSCC_MAC_CFG_PKTINF_CFG_INSERT_PREAMBLE_ENA |
+ (bank == HOST_MAC ?
+ MSCC_MAC_CFG_PKTINF_CFG_ENABLE_TX_PADDING : 0));
+
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MAC_CFG_MODE_CFG);
+ val &= ~MSCC_MAC_CFG_MODE_CFG_DISABLE_DIC;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_MODE_CFG, val);
+
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MAC_CFG_MAXLEN_CFG);
+ val &= ~MSCC_MAC_CFG_MAXLEN_CFG_MAX_LEN_M;
+ val |= MSCC_MAC_CFG_MAXLEN_CFG_MAX_LEN(10240);
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_MAXLEN_CFG, val);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_ADV_CHK_CFG,
+ MSCC_MAC_CFG_ADV_CHK_CFG_SFD_CHK_ENA |
+ MSCC_MAC_CFG_ADV_CHK_CFG_PRM_CHK_ENA |
+ MSCC_MAC_CFG_ADV_CHK_CFG_OOR_ERR_ENA |
+ MSCC_MAC_CFG_ADV_CHK_CFG_INR_ERR_ENA);
+
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MAC_CFG_LFS_CFG);
+ val &= ~MSCC_MAC_CFG_LFS_CFG_LFS_MODE_ENA;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_LFS_CFG, val);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_ENA_CFG,
+ MSCC_MAC_CFG_ENA_CFG_RX_CLK_ENA |
+ MSCC_MAC_CFG_ENA_CFG_TX_CLK_ENA |
+ MSCC_MAC_CFG_ENA_CFG_RX_ENA |
+ MSCC_MAC_CFG_ENA_CFG_TX_ENA);
+}
+
+/* Must be called with mdio_lock taken */
+static int vsc8584_macsec_init(struct phy_device *phydev)
+{
+ u32 val;
+
+ vsc8584_macsec_block_init(phydev, MACSEC_INGR);
+ vsc8584_macsec_block_init(phydev, MACSEC_EGR);
+ vsc8584_macsec_mac_init(phydev, HOST_MAC);
+ vsc8584_macsec_mac_init(phydev, LINE_MAC);
+
+ vsc8584_macsec_phy_write(phydev, FC_BUFFER,
+ MSCC_FCBUF_FC_READ_THRESH_CFG,
+ MSCC_FCBUF_FC_READ_THRESH_CFG_TX_THRESH(4) |
+ MSCC_FCBUF_FC_READ_THRESH_CFG_RX_THRESH(5));
+
+ val = vsc8584_macsec_phy_read(phydev, FC_BUFFER, MSCC_FCBUF_MODE_CFG);
+ val |= MSCC_FCBUF_MODE_CFG_PAUSE_GEN_ENA |
+ MSCC_FCBUF_MODE_CFG_RX_PPM_RATE_ADAPT_ENA |
+ MSCC_FCBUF_MODE_CFG_TX_PPM_RATE_ADAPT_ENA;
+ vsc8584_macsec_phy_write(phydev, FC_BUFFER, MSCC_FCBUF_MODE_CFG, val);
+
+ vsc8584_macsec_phy_write(phydev, FC_BUFFER, MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG,
+ MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_TX_THRESH(8) |
+ MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_TX_OFFSET(9));
+
+ val = vsc8584_macsec_phy_read(phydev, FC_BUFFER,
+ MSCC_FCBUF_TX_DATA_QUEUE_CFG);
+ val &= ~(MSCC_FCBUF_TX_DATA_QUEUE_CFG_START_M |
+ MSCC_FCBUF_TX_DATA_QUEUE_CFG_END_M);
+ val |= MSCC_FCBUF_TX_DATA_QUEUE_CFG_START(0) |
+ MSCC_FCBUF_TX_DATA_QUEUE_CFG_END(5119);
+ vsc8584_macsec_phy_write(phydev, FC_BUFFER,
+ MSCC_FCBUF_TX_DATA_QUEUE_CFG, val);
+
+ val = vsc8584_macsec_phy_read(phydev, FC_BUFFER, MSCC_FCBUF_ENA_CFG);
+ val |= MSCC_FCBUF_ENA_CFG_TX_ENA | MSCC_FCBUF_ENA_CFG_RX_ENA;
+ vsc8584_macsec_phy_write(phydev, FC_BUFFER, MSCC_FCBUF_ENA_CFG, val);
+
+ val = vsc8584_macsec_phy_read(phydev, IP_1588,
+ MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL);
+ val &= ~MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M;
+ val |= MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(4);
+ vsc8584_macsec_phy_write(phydev, IP_1588,
+ MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL, val);
+
+ return 0;
+}
+
+static void vsc8584_macsec_flow(struct phy_device *phydev,
+ struct macsec_flow *flow)
+{
+ struct vsc8531_private *priv = phydev->priv;
+ enum macsec_bank bank = flow->bank;
+ u32 val, match = 0, mask = 0, action = 0, idx = flow->index;
+
+ if (flow->match.tagged)
+ match |= MSCC_MS_SAM_MISC_MATCH_TAGGED;
+ if (flow->match.untagged)
+ match |= MSCC_MS_SAM_MISC_MATCH_UNTAGGED;
+
+ if (bank == MACSEC_INGR && flow->assoc_num >= 0) {
+ match |= MSCC_MS_SAM_MISC_MATCH_AN(flow->assoc_num);
+ mask |= MSCC_MS_SAM_MASK_AN_MASK(0x3);
+ }
+
+ if (bank == MACSEC_INGR && flow->match.sci && flow->rx_sa->sc->sci) {
+ match |= MSCC_MS_SAM_MISC_MATCH_TCI(BIT(3));
+ mask |= MSCC_MS_SAM_MASK_TCI_MASK(BIT(3)) |
+ MSCC_MS_SAM_MASK_SCI_MASK;
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MATCH_SCI_LO(idx),
+ lower_32_bits(flow->rx_sa->sc->sci));
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MATCH_SCI_HI(idx),
+ upper_32_bits(flow->rx_sa->sc->sci));
+ }
+
+ if (flow->match.etype) {
+ mask |= MSCC_MS_SAM_MASK_MAC_ETYPE_MASK;
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MAC_SA_MATCH_HI(idx),
+ MSCC_MS_SAM_MAC_SA_MATCH_HI_ETYPE(htons(flow->etype)));
+ }
+
+ match |= MSCC_MS_SAM_MISC_MATCH_PRIORITY(flow->priority);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MISC_MATCH(idx), match);
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MASK(idx), mask);
+
+ /* Action for matching packets */
+ if (flow->action.drop)
+ action = MSCC_MS_FLOW_DROP;
+ else if (flow->action.bypass || flow->port == MSCC_MS_PORT_UNCONTROLLED)
+ action = MSCC_MS_FLOW_BYPASS;
+ else
+ action = (bank == MACSEC_INGR) ?
+ MSCC_MS_FLOW_INGRESS : MSCC_MS_FLOW_EGRESS;
+
+ val = MSCC_MS_SAM_FLOW_CTRL_FLOW_TYPE(action) |
+ MSCC_MS_SAM_FLOW_CTRL_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_FLOW_CTRL_DEST_PORT(flow->port);
+
+ if (action == MSCC_MS_FLOW_BYPASS)
+ goto write_ctrl;
+
+ if (bank == MACSEC_INGR) {
+ if (priv->secy->replay_protect)
+ val |= MSCC_MS_SAM_FLOW_CTRL_REPLAY_PROTECT;
+ if (priv->secy->validate_frames == MACSEC_VALIDATE_STRICT)
+ val |= MSCC_MS_SAM_FLOW_CTRL_VALIDATE_FRAMES(MSCC_MS_VALIDATE_STRICT);
+ else if (priv->secy->validate_frames == MACSEC_VALIDATE_CHECK)
+ val |= MSCC_MS_SAM_FLOW_CTRL_VALIDATE_FRAMES(MSCC_MS_VALIDATE_CHECK);
+ } else if (bank == MACSEC_EGR) {
+ if (priv->secy->protect_frames)
+ val |= MSCC_MS_SAM_FLOW_CTRL_PROTECT_FRAME;
+ if (priv->secy->tx_sc.encrypt)
+ val |= MSCC_MS_SAM_FLOW_CTRL_CONF_PROTECT;
+ if (priv->secy->tx_sc.send_sci)
+ val |= MSCC_MS_SAM_FLOW_CTRL_INCLUDE_SCI;
+ }
+
+write_ctrl:
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx), val);
+}
+
+static struct macsec_flow *vsc8584_macsec_find_flow(struct macsec_context *ctx,
+ enum macsec_bank bank)
+{
+ struct vsc8531_private *priv = ctx->phydev->priv;
+ struct macsec_flow *pos, *tmp;
+
+ list_for_each_entry_safe(pos, tmp, &priv->macsec_flows, list)
+ if (pos->assoc_num == ctx->sa.assoc_num && pos->bank == bank)
+ return pos;
+
+ return ERR_PTR(-ENOENT);
+}
+
+static void vsc8584_macsec_flow_enable(struct phy_device *phydev,
+ struct macsec_flow *flow)
+{
+ enum macsec_bank bank = flow->bank;
+ u32 val, idx = flow->index;
+
+ if ((flow->bank == MACSEC_INGR && flow->rx_sa && !flow->rx_sa->active) ||
+ (flow->bank == MACSEC_EGR && flow->tx_sa && !flow->tx_sa->active))
+ return;
+
+ /* Enable */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_ENTRY_SET1, BIT(idx));
+
+ /* Set in-use */
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx));
+ val |= MSCC_MS_SAM_FLOW_CTRL_SA_IN_USE;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx), val);
+}
+
+static void vsc8584_macsec_flow_disable(struct phy_device *phydev,
+ struct macsec_flow *flow)
+{
+ enum macsec_bank bank = flow->bank;
+ u32 val, idx = flow->index;
+
+ /* Disable */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_ENTRY_CLEAR1, BIT(idx));
+
+ /* Clear in-use */
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx));
+ val &= ~MSCC_MS_SAM_FLOW_CTRL_SA_IN_USE;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx), val);
+}
+
+static u32 vsc8584_macsec_flow_context_id(struct macsec_flow *flow)
+{
+ if (flow->bank == MACSEC_INGR)
+ return flow->index + MSCC_MS_MAX_FLOWS;
+
+ return flow->index;
+}
+
+/* Derive the AES key to get a key for the hash autentication */
+static int vsc8584_macsec_derive_key(const u8 key[MACSEC_KEYID_LEN],
+ u16 key_len, u8 hkey[16])
+{
+ struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
+ struct skcipher_request *req = NULL;
+ struct scatterlist src, dst;
+ DECLARE_CRYPTO_WAIT(wait);
+ u32 input[4] = {0};
+ int ret;
+
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ req = skcipher_request_alloc(tfm, GFP_KERNEL);
+ if (!req) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done,
+ &wait);
+ ret = crypto_skcipher_setkey(tfm, key, key_len);
+ if (ret < 0)
+ goto out;
+
+ sg_init_one(&src, input, 16);
+ sg_init_one(&dst, hkey, 16);
+ skcipher_request_set_crypt(req, &src, &dst, 16, NULL);
+
+ ret = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
+
+out:
+ skcipher_request_free(req);
+ crypto_free_skcipher(tfm);
+ return ret;
+}
+
+static int vsc8584_macsec_transformation(struct phy_device *phydev,
+ struct macsec_flow *flow)
+{
+ struct vsc8531_private *priv = phydev->priv;
+ enum macsec_bank bank = flow->bank;
+ int i, ret, index = flow->index;
+ u32 rec = 0, control = 0;
+ u8 hkey[16];
+ sci_t sci;
+
+ ret = vsc8584_macsec_derive_key(flow->key, priv->secy->key_len, hkey);
+ if (ret)
+ return ret;
+
+ switch (priv->secy->key_len) {
+ case 16:
+ control |= CONTROL_CRYPTO_ALG(CTRYPTO_ALG_AES_CTR_128);
+ break;
+ case 32:
+ control |= CONTROL_CRYPTO_ALG(CTRYPTO_ALG_AES_CTR_256);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ control |= (bank == MACSEC_EGR) ?
+ (CONTROL_TYPE_EGRESS | CONTROL_AN(priv->secy->tx_sc.encoding_sa)) :
+ (CONTROL_TYPE_INGRESS | CONTROL_SEQ_MASK);
+
+ control |= CONTROL_UPDATE_SEQ | CONTROL_ENCRYPT_AUTH | CONTROL_KEY_IN_CTX |
+ CONTROL_IV0 | CONTROL_IV1 | CONTROL_IV_IN_SEQ |
+ CONTROL_DIGEST_TYPE(0x2) | CONTROL_SEQ_TYPE(0x1) |
+ CONTROL_AUTH_ALG(AUTH_ALG_AES_GHAS) | CONTROL_CONTEXT_ID;
+
+ /* Set the control word */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
+ control);
+
+ /* Set the context ID. Must be unique. */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
+ vsc8584_macsec_flow_context_id(flow));
+
+ /* Set the encryption/decryption key */
+ for (i = 0; i < priv->secy->key_len / sizeof(u32); i++)
+ vsc8584_macsec_phy_write(phydev, bank,
+ MSCC_MS_XFORM_REC(index, rec++),
+ ((u32 *)flow->key)[i]);
+
+ /* Set the authentication key */
+ for (i = 0; i < 4; i++)
+ vsc8584_macsec_phy_write(phydev, bank,
+ MSCC_MS_XFORM_REC(index, rec++),
+ ((u32 *)hkey)[i]);
+
+ /* Initial sequence number */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
+ bank == MACSEC_INGR ?
+ flow->rx_sa->next_pn : flow->tx_sa->next_pn);
+
+ if (bank == MACSEC_INGR)
+ /* Set the mask (replay window size) */
+ vsc8584_macsec_phy_write(phydev, bank,
+ MSCC_MS_XFORM_REC(index, rec++),
+ priv->secy->replay_window);
+
+ /* Set the input vectors */
+ sci = bank == MACSEC_INGR ? flow->rx_sa->sc->sci : priv->secy->sci;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
+ lower_32_bits(sci));
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
+ upper_32_bits(sci));
+
+ while (rec < 20)
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
+ 0);
+
+ flow->has_transformation = true;
+ return 0;
+}
+
+static struct macsec_flow *vsc8584_macsec_alloc_flow(struct vsc8531_private *priv,
+ enum macsec_bank bank)
+{
+ unsigned long *bitmap = bank == MACSEC_INGR ?
+ &priv->ingr_flows : &priv->egr_flows;
+ struct macsec_flow *flow;
+ int index;
+
+ index = find_first_zero_bit(bitmap, MSCC_MS_MAX_FLOWS);
+
+ if (index == MSCC_MS_MAX_FLOWS)
+ return ERR_PTR(-ENOMEM);
+
+ flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+ if (!flow)
+ return ERR_PTR(-ENOMEM);
+
+ set_bit(index, bitmap);
+ flow->index = index;
+ flow->bank = bank;
+ flow->priority = 8;
+ flow->assoc_num = -1;
+
+ list_add_tail(&flow->list, &priv->macsec_flows);
+ return flow;
+}
+
+static void vsc8584_macsec_free_flow(struct vsc8531_private *priv,
+ struct macsec_flow *flow)
+{
+ unsigned long *bitmap = flow->bank == MACSEC_INGR ?
+ &priv->ingr_flows : &priv->egr_flows;
+
+ list_del(&flow->list);
+ clear_bit(flow->index, bitmap);
+ kfree(flow);
+}
+
+static int vsc8584_macsec_add_flow(struct phy_device *phydev,
+ struct macsec_flow *flow, bool update)
+{
+ int ret;
+
+ flow->port = MSCC_MS_PORT_CONTROLLED;
+ vsc8584_macsec_flow(phydev, flow);
+
+ if (update)
+ return 0;
+
+ ret = vsc8584_macsec_transformation(phydev, flow);
+ if (ret) {
+ vsc8584_macsec_free_flow(phydev->priv, flow);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vsc8584_macsec_default_flows(struct phy_device *phydev)
+{
+ struct macsec_flow *flow;
+
+ /* Add a rule to let the MKA traffic go through, ingress */
+ flow = vsc8584_macsec_alloc_flow(phydev->priv, MACSEC_INGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ flow->priority = 15;
+ flow->port = MSCC_MS_PORT_UNCONTROLLED;
+ flow->match.tagged = 1;
+ flow->match.untagged = 1;
+ flow->match.etype = 1;
+ flow->etype = ETH_P_PAE;
+ flow->action.bypass = 1;
+
+ vsc8584_macsec_flow(phydev, flow);
+ vsc8584_macsec_flow_enable(phydev, flow);
+
+ /* Add a rule to let the MKA traffic go through, egress */
+ flow = vsc8584_macsec_alloc_flow(phydev->priv, MACSEC_EGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ flow->priority = 15;
+ flow->port = MSCC_MS_PORT_COMMON;
+ flow->match.untagged = 1;
+ flow->match.etype = 1;
+ flow->etype = ETH_P_PAE;
+ flow->action.bypass = 1;
+
+ vsc8584_macsec_flow(phydev, flow);
+ vsc8584_macsec_flow_enable(phydev, flow);
+
+ return 0;
+}
+
+static void vsc8584_macsec_del_flow(struct phy_device *phydev,
+ struct macsec_flow *flow)
+{
+ vsc8584_macsec_flow_disable(phydev, flow);
+ vsc8584_macsec_free_flow(phydev->priv, flow);
+}
+
+static int __vsc8584_macsec_add_rxsa(struct macsec_context *ctx,
+ struct macsec_flow *flow, bool update)
+{
+ struct phy_device *phydev = ctx->phydev;
+ struct vsc8531_private *priv = phydev->priv;
+
+ if (!flow) {
+ flow = vsc8584_macsec_alloc_flow(priv, MACSEC_INGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ memcpy(flow->key, ctx->sa.key, priv->secy->key_len);
+ }
+
+ flow->assoc_num = ctx->sa.assoc_num;
+ flow->rx_sa = ctx->sa.rx_sa;
+
+ /* Always match tagged packets on ingress */
+ flow->match.tagged = 1;
+ flow->match.sci = 1;
+
+ if (priv->secy->validate_frames != MACSEC_VALIDATE_DISABLED)
+ flow->match.untagged = 1;
+
+ return vsc8584_macsec_add_flow(phydev, flow, update);
+}
+
+static int __vsc8584_macsec_add_txsa(struct macsec_context *ctx,
+ struct macsec_flow *flow, bool update)
+{
+ struct phy_device *phydev = ctx->phydev;
+ struct vsc8531_private *priv = phydev->priv;
+
+ if (!flow) {
+ flow = vsc8584_macsec_alloc_flow(priv, MACSEC_EGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ memcpy(flow->key, ctx->sa.key, priv->secy->key_len);
+ }
+
+ flow->assoc_num = ctx->sa.assoc_num;
+ flow->tx_sa = ctx->sa.tx_sa;
+
+ /* Always match untagged packets on egress */
+ flow->match.untagged = 1;
+
+ return vsc8584_macsec_add_flow(phydev, flow, update);
+}
+
+static int vsc8584_macsec_dev_open(struct macsec_context *ctx)
+{
+ struct vsc8531_private *priv = ctx->phydev->priv;
+ struct macsec_flow *flow, *tmp;
+
+ /* No operation to perform before the commit step */
+ if (ctx->prepare)
+ return 0;
+
+ list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
+ vsc8584_macsec_flow_enable(ctx->phydev, flow);
+
+ return 0;
+}
+
+static int vsc8584_macsec_dev_stop(struct macsec_context *ctx)
+{
+ struct vsc8531_private *priv = ctx->phydev->priv;
+ struct macsec_flow *flow, *tmp;
+
+ /* No operation to perform before the commit step */
+ if (ctx->prepare)
+ return 0;
+
+ list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
+ vsc8584_macsec_flow_disable(ctx->phydev, flow);
+
+ return 0;
+}
+
+static int vsc8584_macsec_add_secy(struct macsec_context *ctx)
+{
+ struct vsc8531_private *priv = ctx->phydev->priv;
+ struct macsec_secy *secy = ctx->secy;
+
+ if (ctx->prepare) {
+ if (priv->secy)
+ return -EEXIST;
+
+ return 0;
+ }
+
+ priv->secy = secy;
+
+ vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_EGR,
+ secy->validate_frames != MACSEC_VALIDATE_DISABLED);
+ vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_INGR,
+ secy->validate_frames != MACSEC_VALIDATE_DISABLED);
+
+ return vsc8584_macsec_default_flows(ctx->phydev);
+}
+
+static int vsc8584_macsec_del_secy(struct macsec_context *ctx)
+{
+ struct vsc8531_private *priv = ctx->phydev->priv;
+ struct macsec_flow *flow, *tmp;
+
+ /* No operation to perform before the commit step */
+ if (ctx->prepare)
+ return 0;
+
+ list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
+ vsc8584_macsec_del_flow(ctx->phydev, flow);
+
+ vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_EGR, false);
+ vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_INGR, false);
+
+ priv->secy = NULL;
+ return 0;
+}
+
+static int vsc8584_macsec_upd_secy(struct macsec_context *ctx)
+{
+ /* No operation to perform before the commit step */
+ if (ctx->prepare)
+ return 0;
+
+ vsc8584_macsec_del_secy(ctx);
+ return vsc8584_macsec_add_secy(ctx);
+}
+
+static int vsc8584_macsec_add_rxsc(struct macsec_context *ctx)
+{
+ /* Nothing to do */
+ return 0;
+}
+
+static int vsc8584_macsec_upd_rxsc(struct macsec_context *ctx)
+{
+ return -EOPNOTSUPP;
+}
+
+static int vsc8584_macsec_del_rxsc(struct macsec_context *ctx)
+{
+ struct vsc8531_private *priv = ctx->phydev->priv;
+ struct macsec_flow *flow, *tmp;
+
+ /* No operation to perform before the commit step */
+ if (ctx->prepare)
+ return 0;
+
+ list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list) {
+ if (flow->bank == MACSEC_INGR && flow->rx_sa &&
+ flow->rx_sa->sc->sci == ctx->rx_sc->sci)
+ vsc8584_macsec_del_flow(ctx->phydev, flow);
+ }
+
+ return 0;
+}
+
+static int vsc8584_macsec_add_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_flow *flow = NULL;
+
+ if (ctx->prepare)
+ return __vsc8584_macsec_add_rxsa(ctx, flow, false);
+
+ flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ vsc8584_macsec_flow_enable(ctx->phydev, flow);
+ return 0;
+}
+
+static int vsc8584_macsec_upd_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_flow *flow;
+
+ flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ if (ctx->prepare) {
+ /* Make sure the flow is disabled before updating it */
+ vsc8584_macsec_flow_disable(ctx->phydev, flow);
+
+ return __vsc8584_macsec_add_rxsa(ctx, flow, true);
+ }
+
+ vsc8584_macsec_flow_enable(ctx->phydev, flow);
+ return 0;
+}
+
+static int vsc8584_macsec_del_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_flow *flow;
+
+ flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
+
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+ if (ctx->prepare)
+ return 0;
+
+ vsc8584_macsec_del_flow(ctx->phydev, flow);
+ return 0;
+}
+
+static int vsc8584_macsec_add_txsa(struct macsec_context *ctx)
+{
+ struct macsec_flow *flow = NULL;
+
+ if (ctx->prepare)
+ return __vsc8584_macsec_add_txsa(ctx, flow, false);
+
+ flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ vsc8584_macsec_flow_enable(ctx->phydev, flow);
+ return 0;
+}
+
+static int vsc8584_macsec_upd_txsa(struct macsec_context *ctx)
+{
+ struct macsec_flow *flow;
+
+ flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ if (ctx->prepare) {
+ /* Make sure the flow is disabled before updating it */
+ vsc8584_macsec_flow_disable(ctx->phydev, flow);
+
+ return __vsc8584_macsec_add_txsa(ctx, flow, true);
+ }
+
+ vsc8584_macsec_flow_enable(ctx->phydev, flow);
+ return 0;
+}
+
+static int vsc8584_macsec_del_txsa(struct macsec_context *ctx)
+{
+ struct macsec_flow *flow;
+
+ flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
+
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+ if (ctx->prepare)
+ return 0;
+
+ vsc8584_macsec_del_flow(ctx->phydev, flow);
+ return 0;
+}
+
+static struct macsec_ops vsc8584_macsec_ops = {
+ .mdo_dev_open = vsc8584_macsec_dev_open,
+ .mdo_dev_stop = vsc8584_macsec_dev_stop,
+ .mdo_add_secy = vsc8584_macsec_add_secy,
+ .mdo_upd_secy = vsc8584_macsec_upd_secy,
+ .mdo_del_secy = vsc8584_macsec_del_secy,
+ .mdo_add_rxsc = vsc8584_macsec_add_rxsc,
+ .mdo_upd_rxsc = vsc8584_macsec_upd_rxsc,
+ .mdo_del_rxsc = vsc8584_macsec_del_rxsc,
+ .mdo_add_rxsa = vsc8584_macsec_add_rxsa,
+ .mdo_upd_rxsa = vsc8584_macsec_upd_rxsa,
+ .mdo_del_rxsa = vsc8584_macsec_del_rxsa,
+ .mdo_add_txsa = vsc8584_macsec_add_txsa,
+ .mdo_upd_txsa = vsc8584_macsec_upd_txsa,
+ .mdo_del_txsa = vsc8584_macsec_del_txsa,
+};
+#endif /* CONFIG_MACSEC */
+
/* Check if one PHY has already done the init of the parts common to all PHYs
* in the Quad PHY package.
*/
@@ -1733,6 +2791,24 @@ static int vsc8584_config_init(struct phy_device *phydev)
mutex_unlock(&phydev->mdio.bus->mdio_lock);
+#if IS_ENABLED(CONFIG_MACSEC)
+ /* MACsec */
+ switch (phydev->phy_id & phydev->drv->phy_id_mask) {
+ case PHY_ID_VSC856X:
+ case PHY_ID_VSC8575:
+ case PHY_ID_VSC8582:
+ case PHY_ID_VSC8584:
+ INIT_LIST_HEAD(&vsc8531->macsec_flows);
+ vsc8531->secy = NULL;
+
+ phydev->macsec_ops = &vsc8584_macsec_ops;
+
+ ret = vsc8584_macsec_init(phydev);
+ if (ret)
+ goto err;
+ }
+#endif
+
phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
val = phy_read(phydev, MSCC_PHY_EXT_PHY_CNTL_1);
@@ -1758,6 +2834,43 @@ err:
return ret;
}
+static int vsc8584_handle_interrupt(struct phy_device *phydev)
+{
+#if IS_ENABLED(CONFIG_MACSEC)
+ struct vsc8531_private *priv = phydev->priv;
+ struct macsec_flow *flow, *tmp;
+ u32 cause, rec;
+
+ /* Check MACsec PN rollover */
+ cause = vsc8584_macsec_phy_read(phydev, MACSEC_EGR,
+ MSCC_MS_INTR_CTRL_STATUS);
+ cause &= MSCC_MS_INTR_CTRL_STATUS_INTR_CLR_STATUS_M;
+ if (!(cause & MACSEC_INTR_CTRL_STATUS_ROLLOVER))
+ goto skip_rollover;
+
+ rec = 6 + priv->secy->key_len / sizeof(u32);
+ list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list) {
+ u32 val;
+
+ if (flow->bank != MACSEC_EGR || !flow->has_transformation)
+ continue;
+
+ val = vsc8584_macsec_phy_read(phydev, MACSEC_EGR,
+ MSCC_MS_XFORM_REC(flow->index, rec));
+ if (val == 0xffffffff) {
+ vsc8584_macsec_flow_disable(phydev, flow);
+ macsec_pn_wrapped(priv->secy, flow->tx_sa);
+ break;
+ }
+ }
+
+skip_rollover:
+#endif
+
+ phy_mac_interrupt(phydev);
+ return 0;
+}
+
static int vsc85xx_config_init(struct phy_device *phydev)
{
int rc, i, phy_id;
@@ -2201,6 +3314,20 @@ static int vsc85xx_config_intr(struct phy_device *phydev)
int rc;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+#if IS_ENABLED(CONFIG_MACSEC)
+ phy_write(phydev, MSCC_EXT_PAGE_ACCESS,
+ MSCC_PHY_PAGE_EXTENDED_2);
+ phy_write(phydev, MSCC_PHY_EXTENDED_INT,
+ MSCC_PHY_EXTENDED_INT_MS_EGR);
+ phy_write(phydev, MSCC_EXT_PAGE_ACCESS,
+ MSCC_PHY_PAGE_STANDARD);
+
+ vsc8584_macsec_phy_write(phydev, MACSEC_EGR,
+ MSCC_MS_AIC_CTRL, 0xf);
+ vsc8584_macsec_phy_write(phydev, MACSEC_EGR,
+ MSCC_MS_INTR_CTRL_STATUS,
+ MSCC_MS_INTR_CTRL_STATUS_INTR_ENABLE(MACSEC_INTR_CTRL_STATUS_ROLLOVER));
+#endif
rc = phy_write(phydev, MII_VSC85XX_INT_MASK,
MII_VSC85XX_INT_MASK_MASK);
} else {
@@ -2404,7 +3531,6 @@ static struct phy_driver vsc85xx_driver[] = {
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
- .aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
@@ -2429,7 +3555,6 @@ static struct phy_driver vsc85xx_driver[] = {
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
- .aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
@@ -2454,7 +3579,6 @@ static struct phy_driver vsc85xx_driver[] = {
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
- .aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
@@ -2479,7 +3603,6 @@ static struct phy_driver vsc85xx_driver[] = {
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
- .aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
@@ -2504,7 +3627,6 @@ static struct phy_driver vsc85xx_driver[] = {
.soft_reset = &genphy_soft_reset,
.config_init = &vsc8584_config_init,
.config_aneg = &vsc85xx_config_aneg,
- .aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
@@ -2530,7 +3652,6 @@ static struct phy_driver vsc85xx_driver[] = {
.soft_reset = &genphy_soft_reset,
.config_init = &vsc8584_config_init,
.config_aneg = &vsc85xx_config_aneg,
- .aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
@@ -2556,6 +3677,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_aneg = &vsc85xx_config_aneg,
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
+ .handle_interrupt = &vsc8584_handle_interrupt,
.ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
.did_interrupt = &vsc8584_did_interrupt,
@@ -2608,6 +3730,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_aneg = &vsc85xx_config_aneg,
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
+ .handle_interrupt = &vsc8584_handle_interrupt,
.ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
.did_interrupt = &vsc8584_did_interrupt,
@@ -2632,6 +3755,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_aneg = &vsc85xx_config_aneg,
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
+ .handle_interrupt = &vsc8584_handle_interrupt,
.ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
.did_interrupt = &vsc8584_did_interrupt,
@@ -2656,6 +3780,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_aneg = &vsc85xx_config_aneg,
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
+ .handle_interrupt = &vsc8584_handle_interrupt,
.ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
.did_interrupt = &vsc8584_did_interrupt,
diff --git a/drivers/net/phy/mscc_fc_buffer.h b/drivers/net/phy/mscc_fc_buffer.h
new file mode 100644
index 000000000000..7e9c0e877895
--- /dev/null
+++ b/drivers/net/phy/mscc_fc_buffer.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (C) 2019 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_FC_BUFFER_H_
+#define _MSCC_OCELOT_FC_BUFFER_H_
+
+#define MSCC_FCBUF_ENA_CFG 0x00
+#define MSCC_FCBUF_MODE_CFG 0x01
+#define MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG 0x02
+#define MSCC_FCBUF_TX_CTRL_QUEUE_CFG 0x03
+#define MSCC_FCBUF_TX_DATA_QUEUE_CFG 0x04
+#define MSCC_FCBUF_RX_DATA_QUEUE_CFG 0x05
+#define MSCC_FCBUF_TX_BUFF_XON_XOFF_THRESH_CFG 0x06
+#define MSCC_FCBUF_FC_READ_THRESH_CFG 0x07
+#define MSCC_FCBUF_TX_FRM_GAP_COMP 0x08
+
+#define MSCC_FCBUF_ENA_CFG_TX_ENA BIT(0)
+#define MSCC_FCBUF_ENA_CFG_RX_ENA BIT(4)
+
+#define MSCC_FCBUF_MODE_CFG_DROP_BEHAVIOUR BIT(4)
+#define MSCC_FCBUF_MODE_CFG_PAUSE_REACT_ENA BIT(8)
+#define MSCC_FCBUF_MODE_CFG_RX_PPM_RATE_ADAPT_ENA BIT(12)
+#define MSCC_FCBUF_MODE_CFG_TX_PPM_RATE_ADAPT_ENA BIT(16)
+#define MSCC_FCBUF_MODE_CFG_TX_CTRL_QUEUE_ENA BIT(20)
+#define MSCC_FCBUF_MODE_CFG_PAUSE_GEN_ENA BIT(24)
+#define MSCC_FCBUF_MODE_CFG_INCLUDE_PAUSE_RCVD_IN_PAUSE_GEN BIT(28)
+
+#define MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_TX_THRESH(x) (x)
+#define MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_TX_THRESH_M GENMASK(15, 0)
+#define MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_TX_OFFSET(x) ((x) << 16)
+#define MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_TX_OFFSET_M GENMASK(19, 16)
+#define MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_RX_THRESH(x) ((x) << 20)
+#define MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_RX_THRESH_M GENMASK(31, 20)
+
+#define MSCC_FCBUF_TX_CTRL_QUEUE_CFG_START(x) (x)
+#define MSCC_FCBUF_TX_CTRL_QUEUE_CFG_START_M GENMASK(15, 0)
+#define MSCC_FCBUF_TX_CTRL_QUEUE_CFG_END(x) ((x) << 16)
+#define MSCC_FCBUF_TX_CTRL_QUEUE_CFG_END_M GENMASK(31, 16)
+
+#define MSCC_FCBUF_TX_DATA_QUEUE_CFG_START(x) (x)
+#define MSCC_FCBUF_TX_DATA_QUEUE_CFG_START_M GENMASK(15, 0)
+#define MSCC_FCBUF_TX_DATA_QUEUE_CFG_END(x) ((x) << 16)
+#define MSCC_FCBUF_TX_DATA_QUEUE_CFG_END_M GENMASK(31, 16)
+
+#define MSCC_FCBUF_RX_DATA_QUEUE_CFG_START(x) (x)
+#define MSCC_FCBUF_RX_DATA_QUEUE_CFG_START_M GENMASK(15, 0)
+#define MSCC_FCBUF_RX_DATA_QUEUE_CFG_END(x) ((x) << 16)
+#define MSCC_FCBUF_RX_DATA_QUEUE_CFG_END_M GENMASK(31, 16)
+
+#define MSCC_FCBUF_TX_BUFF_XON_XOFF_THRESH_CFG_XOFF_THRESH(x) (x)
+#define MSCC_FCBUF_TX_BUFF_XON_XOFF_THRESH_CFG_XOFF_THRESH_M GENMASK(15, 0)
+#define MSCC_FCBUF_TX_BUFF_XON_XOFF_THRESH_CFG_XON_THRESH(x) ((x) << 16)
+#define MSCC_FCBUF_TX_BUFF_XON_XOFF_THRESH_CFG_XON_THRESH_M GENMASK(31, 16)
+
+#define MSCC_FCBUF_FC_READ_THRESH_CFG_TX_THRESH(x) (x)
+#define MSCC_FCBUF_FC_READ_THRESH_CFG_TX_THRESH_M GENMASK(15, 0)
+#define MSCC_FCBUF_FC_READ_THRESH_CFG_RX_THRESH(x) ((x) << 16)
+#define MSCC_FCBUF_FC_READ_THRESH_CFG_RX_THRESH_M GENMASK(31, 16)
+
+#endif
diff --git a/drivers/net/phy/mscc_mac.h b/drivers/net/phy/mscc_mac.h
new file mode 100644
index 000000000000..9420ee5175a6
--- /dev/null
+++ b/drivers/net/phy/mscc_mac.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_LINE_MAC_H_
+#define _MSCC_OCELOT_LINE_MAC_H_
+
+#define MSCC_MAC_CFG_ENA_CFG 0x00
+#define MSCC_MAC_CFG_MODE_CFG 0x01
+#define MSCC_MAC_CFG_MAXLEN_CFG 0x02
+#define MSCC_MAC_CFG_NUM_TAGS_CFG 0x03
+#define MSCC_MAC_CFG_TAGS_CFG 0x04
+#define MSCC_MAC_CFG_ADV_CHK_CFG 0x07
+#define MSCC_MAC_CFG_LFS_CFG 0x08
+#define MSCC_MAC_CFG_LB_CFG 0x09
+#define MSCC_MAC_CFG_PKTINF_CFG 0x0a
+#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL 0x0b
+#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_2 0x0c
+#define MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL 0x0d
+#define MSCC_MAC_PAUSE_CFG_STATE 0x0e
+#define MSCC_MAC_PAUSE_CFG_MAC_ADDRESS_LSB 0x0f
+#define MSCC_MAC_PAUSE_CFG_MAC_ADDRESS_MSB 0x10
+#define MSCC_MAC_STATUS_RX_LANE_STICKY_0 0x11
+#define MSCC_MAC_STATUS_RX_LANE_STICKY_1 0x12
+#define MSCC_MAC_STATUS_TX_MONITOR_STICKY 0x13
+#define MSCC_MAC_STATUS_TX_MONITOR_STICKY_MASK 0x14
+#define MSCC_MAC_STATUS_STICKY 0x15
+#define MSCC_MAC_STATUS_STICKY_MASK 0x16
+#define MSCC_MAC_STATS_32BIT_RX_HIH_CKSM_ERR_CNT 0x17
+#define MSCC_MAC_STATS_32BIT_RX_XGMII_PROT_ERR_CNT 0x18
+#define MSCC_MAC_STATS_32BIT_RX_SYMBOL_ERR_CNT 0x19
+#define MSCC_MAC_STATS_32BIT_RX_PAUSE_CNT 0x1a
+#define MSCC_MAC_STATS_32BIT_RX_UNSUP_OPCODE_CNT 0x1b
+#define MSCC_MAC_STATS_32BIT_RX_UC_CNT 0x1c
+#define MSCC_MAC_STATS_32BIT_RX_MC_CNT 0x1d
+#define MSCC_MAC_STATS_32BIT_RX_BC_CNT 0x1e
+#define MSCC_MAC_STATS_32BIT_RX_CRC_ERR_CNT 0x1f
+#define MSCC_MAC_STATS_32BIT_RX_UNDERSIZE_CNT 0x20
+#define MSCC_MAC_STATS_32BIT_RX_FRAGMENTS_CNT 0x21
+#define MSCC_MAC_STATS_32BIT_RX_IN_RANGE_LEN_ERR_CNT 0x22
+#define MSCC_MAC_STATS_32BIT_RX_OUT_OF_RANGE_LEN_ERR_CNT 0x23
+#define MSCC_MAC_STATS_32BIT_RX_OVERSIZE_CNT 0x24
+#define MSCC_MAC_STATS_32BIT_RX_JABBERS_CNT 0x25
+#define MSCC_MAC_STATS_32BIT_RX_SIZE64_CNT 0x26
+#define MSCC_MAC_STATS_32BIT_RX_SIZE65TO127_CNT 0x27
+#define MSCC_MAC_STATS_32BIT_RX_SIZE128TO255_CNT 0x28
+#define MSCC_MAC_STATS_32BIT_RX_SIZE256TO511_CNT 0x29
+#define MSCC_MAC_STATS_32BIT_RX_SIZE512TO1023_CNT 0x2a
+#define MSCC_MAC_STATS_32BIT_RX_SIZE1024TO1518_CNT 0x2b
+#define MSCC_MAC_STATS_32BIT_RX_SIZE1519TOMAX_CNT 0x2c
+#define MSCC_MAC_STATS_32BIT_RX_IPG_SHRINK_CNT 0x2d
+#define MSCC_MAC_STATS_32BIT_TX_PAUSE_CNT 0x2e
+#define MSCC_MAC_STATS_32BIT_TX_UC_CNT 0x2f
+#define MSCC_MAC_STATS_32BIT_TX_MC_CNT 0x30
+#define MSCC_MAC_STATS_32BIT_TX_BC_CNT 0x31
+#define MSCC_MAC_STATS_32BIT_TX_SIZE64_CNT 0x32
+#define MSCC_MAC_STATS_32BIT_TX_SIZE65TO127_CNT 0x33
+#define MSCC_MAC_STATS_32BIT_TX_SIZE128TO255_CNT 0x34
+#define MSCC_MAC_STATS_32BIT_TX_SIZE256TO511_CNT 0x35
+#define MSCC_MAC_STATS_32BIT_TX_SIZE512TO1023_CNT 0x36
+#define MSCC_MAC_STATS_32BIT_TX_SIZE1024TO1518_CNT 0x37
+#define MSCC_MAC_STATS_32BIT_TX_SIZE1519TOMAX_CNT 0x38
+#define MSCC_MAC_STATS_40BIT_RX_BAD_BYTES_CNT 0x39
+#define MSCC_MAC_STATS_40BIT_RX_BAD_BYTES_MSB_CNT 0x3a
+#define MSCC_MAC_STATS_40BIT_RX_OK_BYTES_CNT 0x3b
+#define MSCC_MAC_STATS_40BIT_RX_OK_BYTES_MSB_CNT 0x3c
+#define MSCC_MAC_STATS_40BIT_RX_IN_BYTES_CNT 0x3d
+#define MSCC_MAC_STATS_40BIT_RX_IN_BYTES_MSB_CNT 0x3e
+#define MSCC_MAC_STATS_40BIT_TX_OK_BYTES_CNT 0x3f
+#define MSCC_MAC_STATS_40BIT_TX_OK_BYTES_MSB_CNT 0x40
+#define MSCC_MAC_STATS_40BIT_TX_OUT_BYTES_CNT 0x41
+#define MSCC_MAC_STATS_40BIT_TX_OUT_BYTES_MSB_CNT 0x42
+
+#define MSCC_MAC_CFG_ENA_CFG_RX_CLK_ENA BIT(0)
+#define MSCC_MAC_CFG_ENA_CFG_TX_CLK_ENA BIT(4)
+#define MSCC_MAC_CFG_ENA_CFG_RX_SW_RST BIT(8)
+#define MSCC_MAC_CFG_ENA_CFG_TX_SW_RST BIT(12)
+#define MSCC_MAC_CFG_ENA_CFG_RX_ENA BIT(16)
+#define MSCC_MAC_CFG_ENA_CFG_TX_ENA BIT(20)
+
+#define MSCC_MAC_CFG_MODE_CFG_FORCE_CW_UPDATE_INTERVAL(x) ((x) << 20)
+#define MSCC_MAC_CFG_MODE_CFG_FORCE_CW_UPDATE_INTERVAL_M GENMASK(29, 20)
+#define MSCC_MAC_CFG_MODE_CFG_FORCE_CW_UPDATE BIT(16)
+#define MSCC_MAC_CFG_MODE_CFG_TUNNEL_PAUSE_FRAMES BIT(14)
+#define MSCC_MAC_CFG_MODE_CFG_MAC_PREAMBLE_CFG(x) ((x) << 10)
+#define MSCC_MAC_CFG_MODE_CFG_MAC_PREAMBLE_CFG_M GENMASK(12, 10)
+#define MSCC_MAC_CFG_MODE_CFG_MAC_IPG_CFG BIT(6)
+#define MSCC_MAC_CFG_MODE_CFG_XGMII_GEN_MODE_ENA BIT(4)
+#define MSCC_MAC_CFG_MODE_CFG_HIH_CRC_CHECK BIT(2)
+#define MSCC_MAC_CFG_MODE_CFG_UNDERSIZED_FRAME_DROP_DIS BIT(1)
+#define MSCC_MAC_CFG_MODE_CFG_DISABLE_DIC BIT(0)
+
+#define MSCC_MAC_CFG_MAXLEN_CFG_MAX_LEN_TAG_CHK BIT(16)
+#define MSCC_MAC_CFG_MAXLEN_CFG_MAX_LEN(x) (x)
+#define MSCC_MAC_CFG_MAXLEN_CFG_MAX_LEN_M GENMASK(15, 0)
+
+#define MSCC_MAC_CFG_TAGS_CFG_RSZ 0x4
+#define MSCC_MAC_CFG_TAGS_CFG_TAG_ID(x) ((x) << 16)
+#define MSCC_MAC_CFG_TAGS_CFG_TAG_ID_M GENMASK(31, 16)
+#define MSCC_MAC_CFG_TAGS_CFG_TAG_ENA BIT(4)
+
+#define MSCC_MAC_CFG_ADV_CHK_CFG_EXT_EOP_CHK_ENA BIT(24)
+#define MSCC_MAC_CFG_ADV_CHK_CFG_EXT_SOP_CHK_ENA BIT(20)
+#define MSCC_MAC_CFG_ADV_CHK_CFG_SFD_CHK_ENA BIT(16)
+#define MSCC_MAC_CFG_ADV_CHK_CFG_PRM_SHK_CHK_DIS BIT(12)
+#define MSCC_MAC_CFG_ADV_CHK_CFG_PRM_CHK_ENA BIT(8)
+#define MSCC_MAC_CFG_ADV_CHK_CFG_OOR_ERR_ENA BIT(4)
+#define MSCC_MAC_CFG_ADV_CHK_CFG_INR_ERR_ENA BIT(0)
+
+#define MSCC_MAC_CFG_LFS_CFG_LFS_INH_TX BIT(8)
+#define MSCC_MAC_CFG_LFS_CFG_LFS_DIS_TX BIT(4)
+#define MSCC_MAC_CFG_LFS_CFG_LFS_UNIDIR_ENA BIT(3)
+#define MSCC_MAC_CFG_LFS_CFG_USE_LEADING_EDGE_DETECT BIT(2)
+#define MSCC_MAC_CFG_LFS_CFG_SPURIOUS_Q_DIS BIT(1)
+#define MSCC_MAC_CFG_LFS_CFG_LFS_MODE_ENA BIT(0)
+
+#define MSCC_MAC_CFG_LB_CFG_XGMII_HOST_LB_ENA BIT(4)
+#define MSCC_MAC_CFG_LB_CFG_XGMII_PHY_LB_ENA BIT(0)
+
+#define MSCC_MAC_CFG_PKTINF_CFG_STRIP_FCS_ENA BIT(0)
+#define MSCC_MAC_CFG_PKTINF_CFG_INSERT_FCS_ENA BIT(4)
+#define MSCC_MAC_CFG_PKTINF_CFG_STRIP_PREAMBLE_ENA BIT(8)
+#define MSCC_MAC_CFG_PKTINF_CFG_INSERT_PREAMBLE_ENA BIT(12)
+#define MSCC_MAC_CFG_PKTINF_CFG_LPI_RELAY_ENA BIT(16)
+#define MSCC_MAC_CFG_PKTINF_CFG_LF_RELAY_ENA BIT(20)
+#define MSCC_MAC_CFG_PKTINF_CFG_RF_RELAY_ENA BIT(24)
+#define MSCC_MAC_CFG_PKTINF_CFG_ENABLE_TX_PADDING BIT(25)
+#define MSCC_MAC_CFG_PKTINF_CFG_ENABLE_RX_PADDING BIT(26)
+#define MSCC_MAC_CFG_PKTINF_CFG_ENABLE_4BYTE_PREAMBLE BIT(27)
+#define MSCC_MAC_CFG_PKTINF_CFG_MACSEC_BYPASS_NUM_PTP_STALL_CLKS(x) ((x) << 28)
+#define MSCC_MAC_CFG_PKTINF_CFG_MACSEC_BYPASS_NUM_PTP_STALL_CLKS_M GENMASK(30, 28)
+
+#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_VALUE(x) ((x) << 16)
+#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_VALUE_M GENMASK(31, 16)
+#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_WAIT_FOR_LPI_LOW BIT(12)
+#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_USE_PAUSE_STALL_ENA BIT(8)
+#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_REPL_MODE BIT(4)
+#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_FRC_FRAME BIT(2)
+#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_MODE(x) (x)
+#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_MODE_M GENMASK(1, 0)
+
+#define MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_EARLY_PAUSE_DETECT_ENA BIT(16)
+#define MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PRE_CRC_MODE BIT(20)
+#define MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_TIMER_ENA BIT(12)
+#define MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_REACT_ENA BIT(8)
+#define MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_FRAME_DROP_ENA BIT(4)
+#define MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_MODE BIT(0)
+
+#define MSCC_MAC_PAUSE_CFG_STATE_PAUSE_STATE BIT(0)
+#define MSCC_MAC_PAUSE_CFG_STATE_MAC_TX_PAUSE_GEN BIT(4)
+
+#define MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL 0x2
+#define MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(x) (x)
+#define MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M GENMASK(2, 0)
+
+#endif /* _MSCC_OCELOT_LINE_MAC_H_ */
diff --git a/drivers/net/phy/mscc_macsec.h b/drivers/net/phy/mscc_macsec.h
new file mode 100644
index 000000000000..d9ab6aba7482
--- /dev/null
+++ b/drivers/net/phy/mscc_macsec.h
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2018 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_MACSEC_H_
+#define _MSCC_OCELOT_MACSEC_H_
+
+#define MSCC_MS_MAX_FLOWS 16
+
+#define CONTROL_TYPE_EGRESS 0x6
+#define CONTROL_TYPE_INGRESS 0xf
+#define CONTROL_IV0 BIT(5)
+#define CONTROL_IV1 BIT(6)
+#define CONTROL_IV2 BIT(7)
+#define CONTROL_UPDATE_SEQ BIT(13)
+#define CONTROL_IV_IN_SEQ BIT(14)
+#define CONTROL_ENCRYPT_AUTH BIT(15)
+#define CONTROL_KEY_IN_CTX BIT(16)
+#define CONTROL_CRYPTO_ALG(x) ((x) << 17)
+#define CTRYPTO_ALG_AES_CTR_128 0x5
+#define CTRYPTO_ALG_AES_CTR_192 0x6
+#define CTRYPTO_ALG_AES_CTR_256 0x7
+#define CONTROL_DIGEST_TYPE(x) ((x) << 21)
+#define CONTROL_AUTH_ALG(x) ((x) << 23)
+#define AUTH_ALG_AES_GHAS 0x4
+#define CONTROL_AN(x) ((x) << 26)
+#define CONTROL_SEQ_TYPE(x) ((x) << 28)
+#define CONTROL_SEQ_MASK BIT(30)
+#define CONTROL_CONTEXT_ID BIT(31)
+
+enum mscc_macsec_destination_ports {
+ MSCC_MS_PORT_COMMON = 0,
+ MSCC_MS_PORT_RSVD = 1,
+ MSCC_MS_PORT_CONTROLLED = 2,
+ MSCC_MS_PORT_UNCONTROLLED = 3,
+};
+
+enum mscc_macsec_drop_actions {
+ MSCC_MS_ACTION_BYPASS_CRC = 0,
+ MSCC_MS_ACTION_BYPASS_BAD = 1,
+ MSCC_MS_ACTION_DROP = 2,
+ MSCC_MS_ACTION_BYPASS = 3,
+};
+
+enum mscc_macsec_flow_types {
+ MSCC_MS_FLOW_BYPASS = 0,
+ MSCC_MS_FLOW_DROP = 1,
+ MSCC_MS_FLOW_INGRESS = 2,
+ MSCC_MS_FLOW_EGRESS = 3,
+};
+
+enum mscc_macsec_validate_levels {
+ MSCC_MS_VALIDATE_DISABLED = 0,
+ MSCC_MS_VALIDATE_CHECK = 1,
+ MSCC_MS_VALIDATE_STRICT = 2,
+};
+
+#define MSCC_MS_XFORM_REC(x, y) (((x) << 5) + (y))
+#define MSCC_MS_ENA_CFG 0x800
+#define MSCC_MS_FC_CFG 0x804
+#define MSCC_MS_SAM_MAC_SA_MATCH_LO(x) (0x1000 + ((x) << 4))
+#define MSCC_MS_SAM_MAC_SA_MATCH_HI(x) (0x1001 + ((x) << 4))
+#define MSCC_MS_SAM_MISC_MATCH(x) (0x1004 + ((x) << 4))
+#define MSCC_MS_SAM_MATCH_SCI_LO(x) (0x1005 + ((x) << 4))
+#define MSCC_MS_SAM_MATCH_SCI_HI(x) (0x1006 + ((x) << 4))
+#define MSCC_MS_SAM_MASK(x) (0x1007 + ((x) << 4))
+#define MSCC_MS_SAM_ENTRY_SET1 0x1808
+#define MSCC_MS_SAM_ENTRY_CLEAR1 0x180c
+#define MSCC_MS_SAM_FLOW_CTRL(x) (0x1c00 + (x))
+#define MSCC_MS_SAM_CP_TAG 0x1e40
+#define MSCC_MS_SAM_NM_FLOW_NCP 0x1e51
+#define MSCC_MS_SAM_NM_FLOW_CP 0x1e52
+#define MSCC_MS_MISC_CONTROL 0x1e5f
+#define MSCC_MS_COUNT_CONTROL 0x3204
+#define MSCC_MS_PARAMS2_IG_CC_CONTROL 0x3a10
+#define MSCC_MS_PARAMS2_IG_CP_TAG 0x3a14
+#define MSCC_MS_VLAN_MTU_CHECK(x) (0x3c40 + (x))
+#define MSCC_MS_NON_VLAN_MTU_CHECK 0x3c48
+#define MSCC_MS_PP_CTRL 0x3c4b
+#define MSCC_MS_STATUS_CONTEXT_CTRL 0x3d02
+#define MSCC_MS_INTR_CTRL_STATUS 0x3d04
+#define MSCC_MS_BLOCK_CTX_UPDATE 0x3d0c
+#define MSCC_MS_AIC_CTRL 0x3e02
+
+/* MACSEC_ENA_CFG */
+#define MSCC_MS_ENA_CFG_CLK_ENA BIT(0)
+#define MSCC_MS_ENA_CFG_SW_RST BIT(1)
+#define MSCC_MS_ENA_CFG_MACSEC_BYPASS_ENA BIT(8)
+#define MSCC_MS_ENA_CFG_MACSEC_ENA BIT(9)
+#define MSCC_MS_ENA_CFG_MACSEC_SPEED_MODE(x) ((x) << 10)
+#define MSCC_MS_ENA_CFG_MACSEC_SPEED_MODE_M GENMASK(12, 10)
+
+/* MACSEC_FC_CFG */
+#define MSCC_MS_FC_CFG_FCBUF_ENA BIT(0)
+#define MSCC_MS_FC_CFG_USE_PKT_EXPANSION_INDICATION BIT(1)
+#define MSCC_MS_FC_CFG_LOW_THRESH(x) ((x) << 4)
+#define MSCC_MS_FC_CFG_LOW_THRESH_M GENMASK(7, 4)
+#define MSCC_MS_FC_CFG_HIGH_THRESH(x) ((x) << 8)
+#define MSCC_MS_FC_CFG_HIGH_THRESH_M GENMASK(11, 8)
+#define MSCC_MS_FC_CFG_LOW_BYTES_VAL(x) ((x) << 12)
+#define MSCC_MS_FC_CFG_LOW_BYTES_VAL_M GENMASK(14, 12)
+#define MSCC_MS_FC_CFG_HIGH_BYTES_VAL(x) ((x) << 16)
+#define MSCC_MS_FC_CFG_HIGH_BYTES_VAL_M GENMASK(18, 16)
+
+/* MSCC_MS_SAM_MAC_SA_MATCH_HI */
+#define MSCC_MS_SAM_MAC_SA_MATCH_HI_ETYPE(x) ((x) << 16)
+#define MSCC_MS_SAM_MAC_SA_MATCH_HI_ETYPE_M GENMASK(31, 16)
+
+/* MACSEC_SAM_MISC_MATCH */
+#define MSCC_MS_SAM_MISC_MATCH_VLAN_VALID BIT(0)
+#define MSCC_MS_SAM_MISC_MATCH_QINQ_FOUND BIT(1)
+#define MSCC_MS_SAM_MISC_MATCH_STAG_VALID BIT(2)
+#define MSCC_MS_SAM_MISC_MATCH_QTAG_VALID BIT(3)
+#define MSCC_MS_SAM_MISC_MATCH_VLAN_UP(x) ((x) << 4)
+#define MSCC_MS_SAM_MISC_MATCH_VLAN_UP_M GENMASK(6, 4)
+#define MSCC_MS_SAM_MISC_MATCH_CONTROL_PACKET BIT(7)
+#define MSCC_MS_SAM_MISC_MATCH_UNTAGGED BIT(8)
+#define MSCC_MS_SAM_MISC_MATCH_TAGGED BIT(9)
+#define MSCC_MS_SAM_MISC_MATCH_BAD_TAG BIT(10)
+#define MSCC_MS_SAM_MISC_MATCH_KAY_TAG BIT(11)
+#define MSCC_MS_SAM_MISC_MATCH_SOURCE_PORT(x) ((x) << 12)
+#define MSCC_MS_SAM_MISC_MATCH_SOURCE_PORT_M GENMASK(13, 12)
+#define MSCC_MS_SAM_MISC_MATCH_PRIORITY(x) ((x) << 16)
+#define MSCC_MS_SAM_MISC_MATCH_PRIORITY_M GENMASK(19, 16)
+#define MSCC_MS_SAM_MISC_MATCH_AN(x) ((x) << 24)
+#define MSCC_MS_SAM_MISC_MATCH_TCI(x) ((x) << 26)
+
+/* MACSEC_SAM_MASK */
+#define MSCC_MS_SAM_MASK_MAC_SA_MASK(x) (x)
+#define MSCC_MS_SAM_MASK_MAC_SA_MASK_M GENMASK(5, 0)
+#define MSCC_MS_SAM_MASK_MAC_DA_MASK(x) ((x) << 6)
+#define MSCC_MS_SAM_MASK_MAC_DA_MASK_M GENMASK(11, 6)
+#define MSCC_MS_SAM_MASK_MAC_ETYPE_MASK BIT(12)
+#define MSCC_MS_SAM_MASK_VLAN_VLD_MASK BIT(13)
+#define MSCC_MS_SAM_MASK_QINQ_FOUND_MASK BIT(14)
+#define MSCC_MS_SAM_MASK_STAG_VLD_MASK BIT(15)
+#define MSCC_MS_SAM_MASK_QTAG_VLD_MASK BIT(16)
+#define MSCC_MS_SAM_MASK_VLAN_UP_MASK BIT(17)
+#define MSCC_MS_SAM_MASK_VLAN_ID_MASK BIT(18)
+#define MSCC_MS_SAM_MASK_SOURCE_PORT_MASK BIT(19)
+#define MSCC_MS_SAM_MASK_CTL_PACKET_MASK BIT(20)
+#define MSCC_MS_SAM_MASK_VLAN_UP_INNER_MASK BIT(21)
+#define MSCC_MS_SAM_MASK_VLAN_ID_INNER_MASK BIT(22)
+#define MSCC_MS_SAM_MASK_SCI_MASK BIT(23)
+#define MSCC_MS_SAM_MASK_AN_MASK(x) ((x) << 24)
+#define MSCC_MS_SAM_MASK_TCI_MASK(x) ((x) << 26)
+
+/* MACSEC_SAM_FLOW_CTRL_EGR */
+#define MSCC_MS_SAM_FLOW_CTRL_FLOW_TYPE(x) (x)
+#define MSCC_MS_SAM_FLOW_CTRL_FLOW_TYPE_M GENMASK(1, 0)
+#define MSCC_MS_SAM_FLOW_CTRL_DEST_PORT(x) ((x) << 2)
+#define MSCC_MS_SAM_FLOW_CTRL_DEST_PORT_M GENMASK(3, 2)
+#define MSCC_MS_SAM_FLOW_CTRL_RESV_4 BIT(4)
+#define MSCC_MS_SAM_FLOW_CTRL_FLOW_CRYPT_AUTH BIT(5)
+#define MSCC_MS_SAM_FLOW_CTRL_DROP_ACTION(x) ((x) << 6)
+#define MSCC_MS_SAM_FLOW_CTRL_DROP_ACTION_M GENMASK(7, 6)
+#define MSCC_MS_SAM_FLOW_CTRL_RESV_15_TO_8(x) ((x) << 8)
+#define MSCC_MS_SAM_FLOW_CTRL_RESV_15_TO_8_M GENMASK(15, 8)
+#define MSCC_MS_SAM_FLOW_CTRL_PROTECT_FRAME BIT(16)
+#define MSCC_MS_SAM_FLOW_CTRL_REPLAY_PROTECT BIT(16)
+#define MSCC_MS_SAM_FLOW_CTRL_SA_IN_USE BIT(17)
+#define MSCC_MS_SAM_FLOW_CTRL_INCLUDE_SCI BIT(18)
+#define MSCC_MS_SAM_FLOW_CTRL_USE_ES BIT(19)
+#define MSCC_MS_SAM_FLOW_CTRL_USE_SCB BIT(20)
+#define MSCC_MS_SAM_FLOW_CTRL_VALIDATE_FRAMES(x) ((x) << 19)
+#define MSCC_MS_SAM_FLOW_CTRL_TAG_BYPASS_SIZE(x) ((x) << 21)
+#define MSCC_MS_SAM_FLOW_CTRL_TAG_BYPASS_SIZE_M GENMASK(22, 21)
+#define MSCC_MS_SAM_FLOW_CTRL_RESV_23 BIT(23)
+#define MSCC_MS_SAM_FLOW_CTRL_CONFIDENTIALITY_OFFSET(x) ((x) << 24)
+#define MSCC_MS_SAM_FLOW_CTRL_CONFIDENTIALITY_OFFSET_M GENMASK(30, 24)
+#define MSCC_MS_SAM_FLOW_CTRL_CONF_PROTECT BIT(31)
+
+/* MACSEC_SAM_CP_TAG */
+#define MSCC_MS_SAM_CP_TAG_MAP_TBL(x) (x)
+#define MSCC_MS_SAM_CP_TAG_MAP_TBL_M GENMASK(23, 0)
+#define MSCC_MS_SAM_CP_TAG_DEF_UP(x) ((x) << 24)
+#define MSCC_MS_SAM_CP_TAG_DEF_UP_M GENMASK(26, 24)
+#define MSCC_MS_SAM_CP_TAG_STAG_UP_EN BIT(27)
+#define MSCC_MS_SAM_CP_TAG_QTAG_UP_EN BIT(28)
+#define MSCC_MS_SAM_CP_TAG_PARSE_QINQ BIT(29)
+#define MSCC_MS_SAM_CP_TAG_PARSE_STAG BIT(30)
+#define MSCC_MS_SAM_CP_TAG_PARSE_QTAG BIT(31)
+
+/* MACSEC_SAM_NM_FLOW_NCP */
+#define MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_FLOW_TYPE(x) (x)
+#define MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_DEST_PORT(x) ((x) << 2)
+#define MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_DROP_ACTION(x) ((x) << 6)
+#define MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_FLOW_TYPE(x) ((x) << 8)
+#define MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_DEST_PORT(x) ((x) << 10)
+#define MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_DROP_ACTION(x) ((x) << 14)
+#define MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_FLOW_TYPE(x) ((x) << 16)
+#define MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_DEST_PORT(x) ((x) << 18)
+#define MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_DROP_ACTION(x) ((x) << 22)
+#define MSCC_MS_SAM_NM_FLOW_NCP_KAY_FLOW_TYPE(x) ((x) << 24)
+#define MSCC_MS_SAM_NM_FLOW_NCP_KAY_DEST_PORT(x) ((x) << 26)
+#define MSCC_MS_SAM_NM_FLOW_NCP_KAY_DROP_ACTION(x) ((x) << 30)
+
+/* MACSEC_SAM_NM_FLOW_CP */
+#define MSCC_MS_SAM_NM_FLOW_CP_UNTAGGED_FLOW_TYPE(x) (x)
+#define MSCC_MS_SAM_NM_FLOW_CP_UNTAGGED_DEST_PORT(x) ((x) << 2)
+#define MSCC_MS_SAM_NM_FLOW_CP_UNTAGGED_DROP_ACTION(x) ((x) << 6)
+#define MSCC_MS_SAM_NM_FLOW_CP_TAGGED_FLOW_TYPE(x) ((x) << 8)
+#define MSCC_MS_SAM_NM_FLOW_CP_TAGGED_DEST_PORT(x) ((x) << 10)
+#define MSCC_MS_SAM_NM_FLOW_CP_TAGGED_DROP_ACTION(x) ((x) << 14)
+#define MSCC_MS_SAM_NM_FLOW_CP_BADTAG_FLOW_TYPE(x) ((x) << 16)
+#define MSCC_MS_SAM_NM_FLOW_CP_BADTAG_DEST_PORT(x) ((x) << 18)
+#define MSCC_MS_SAM_NM_FLOW_CP_BADTAG_DROP_ACTION(x) ((x) << 22)
+#define MSCC_MS_SAM_NM_FLOW_CP_KAY_FLOW_TYPE(x) ((x) << 24)
+#define MSCC_MS_SAM_NM_FLOW_CP_KAY_DEST_PORT(x) ((x) << 26)
+#define MSCC_MS_SAM_NM_FLOW_CP_KAY_DROP_ACTION(x) ((x) << 30)
+
+/* MACSEC_MISC_CONTROL */
+#define MSCC_MS_MISC_CONTROL_MC_LATENCY_FIX(x) (x)
+#define MSCC_MS_MISC_CONTROL_MC_LATENCY_FIX_M GENMASK(5, 0)
+#define MSCC_MS_MISC_CONTROL_STATIC_BYPASS BIT(8)
+#define MSCC_MS_MISC_CONTROL_NM_MACSEC_EN BIT(9)
+#define MSCC_MS_MISC_CONTROL_VALIDATE_FRAMES(x) ((x) << 10)
+#define MSCC_MS_MISC_CONTROL_VALIDATE_FRAMES_M GENMASK(11, 10)
+#define MSCC_MS_MISC_CONTROL_XFORM_REC_SIZE(x) ((x) << 24)
+#define MSCC_MS_MISC_CONTROL_XFORM_REC_SIZE_M GENMASK(25, 24)
+
+/* MACSEC_COUNT_CONTROL */
+#define MSCC_MS_COUNT_CONTROL_RESET_ALL BIT(0)
+#define MSCC_MS_COUNT_CONTROL_DEBUG_ACCESS BIT(1)
+#define MSCC_MS_COUNT_CONTROL_SATURATE_CNTRS BIT(2)
+#define MSCC_MS_COUNT_CONTROL_AUTO_CNTR_RESET BIT(3)
+
+/* MACSEC_PARAMS2_IG_CC_CONTROL */
+#define MSCC_MS_PARAMS2_IG_CC_CONTROL_NON_MATCH_CTRL_ACT BIT(14)
+#define MSCC_MS_PARAMS2_IG_CC_CONTROL_NON_MATCH_ACT BIT(15)
+
+/* MACSEC_PARAMS2_IG_CP_TAG */
+#define MSCC_MS_PARAMS2_IG_CP_TAG_MAP_TBL(x) (x)
+#define MSCC_MS_PARAMS2_IG_CP_TAG_MAP_TBL_M GENMASK(23, 0)
+#define MSCC_MS_PARAMS2_IG_CP_TAG_DEF_UP(x) ((x) << 24)
+#define MSCC_MS_PARAMS2_IG_CP_TAG_DEF_UP_M GENMASK(26, 24)
+#define MSCC_MS_PARAMS2_IG_CP_TAG_STAG_UP_EN BIT(27)
+#define MSCC_MS_PARAMS2_IG_CP_TAG_QTAG_UP_EN BIT(28)
+#define MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_QINQ BIT(29)
+#define MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_STAG BIT(30)
+#define MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_QTAG BIT(31)
+
+/* MACSEC_VLAN_MTU_CHECK */
+#define MSCC_MS_VLAN_MTU_CHECK_MTU_COMPARE(x) (x)
+#define MSCC_MS_VLAN_MTU_CHECK_MTU_COMPARE_M GENMASK(14, 0)
+#define MSCC_MS_VLAN_MTU_CHECK_MTU_COMP_DROP BIT(15)
+
+/* MACSEC_NON_VLAN_MTU_CHECK */
+#define MSCC_MS_NON_VLAN_MTU_CHECK_NV_MTU_COMPARE(x) (x)
+#define MSCC_MS_NON_VLAN_MTU_CHECK_NV_MTU_COMPARE_M GENMASK(14, 0)
+#define MSCC_MS_NON_VLAN_MTU_CHECK_NV_MTU_COMP_DROP BIT(15)
+
+/* MACSEC_PP_CTRL */
+#define MSCC_MS_PP_CTRL_MACSEC_OCTET_INCR_MODE BIT(0)
+
+/* MACSEC_INTR_CTRL_STATUS */
+#define MSCC_MS_INTR_CTRL_STATUS_INTR_CLR_STATUS(x) (x)
+#define MSCC_MS_INTR_CTRL_STATUS_INTR_CLR_STATUS_M GENMASK(15, 0)
+#define MSCC_MS_INTR_CTRL_STATUS_INTR_ENABLE(x) ((x) << 16)
+#define MSCC_MS_INTR_CTRL_STATUS_INTR_ENABLE_M GENMASK(31, 16)
+#define MACSEC_INTR_CTRL_STATUS_ROLLOVER BIT(5)
+
+#endif
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 769a076514b0..a4d2d59fceca 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -387,7 +387,7 @@ int __phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
if (regnum > (u16)~0 || devad > 32)
return -EINVAL;
- if (phydev->drv->read_mmd) {
+ if (phydev->drv && phydev->drv->read_mmd) {
val = phydev->drv->read_mmd(phydev, devad, regnum);
} else if (phydev->is_c45) {
u32 addr = MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff);
@@ -444,7 +444,7 @@ int __phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val)
if (regnum > (u16)~0 || devad > 32)
return -EINVAL;
- if (phydev->drv->write_mmd) {
+ if (phydev->drv && phydev->drv->write_mmd) {
ret = phydev->drv->write_mmd(phydev, devad, regnum, val);
} else if (phydev->is_c45) {
u32 addr = MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 80be4d691e5b..d76e038cf2cb 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -422,8 +422,8 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
return 0;
case SIOCSHWTSTAMP:
- if (phydev->drv && phydev->drv->hwtstamp)
- return phydev->drv->hwtstamp(phydev, ifr);
+ if (phydev->mii_ts && phydev->mii_ts->hwtstamp)
+ return phydev->mii_ts->hwtstamp(phydev->mii_ts, ifr);
/* fall through */
default:
@@ -432,6 +432,31 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
}
EXPORT_SYMBOL(phy_mii_ioctl);
+/**
+ * phy_do_ioctl - generic ndo_do_ioctl implementation
+ * @dev: the net_device struct
+ * @ifr: &struct ifreq for socket ioctl's
+ * @cmd: ioctl cmd to execute
+ */
+int phy_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ if (!dev->phydev)
+ return -ENODEV;
+
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
+}
+EXPORT_SYMBOL(phy_do_ioctl);
+
+/* same as phy_do_ioctl, but ensures that net_device is running */
+int phy_do_ioctl_running(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ if (!netif_running(dev))
+ return -ENODEV;
+
+ return phy_do_ioctl(dev, ifr, cmd);
+}
+EXPORT_SYMBOL(phy_do_ioctl_running);
+
void phy_queue_state_machine(struct phy_device *phydev, unsigned long jiffies)
{
mod_delayed_work(system_power_efficient_wq, &phydev->state_queue,
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 0887ed2bb050..6a5056e0ae77 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -553,7 +553,7 @@ static const struct device_type mdio_bus_phy_type = {
.pm = MDIO_BUS_PHY_PM_OPS,
};
-static int phy_request_driver_module(struct phy_device *dev, int phy_id)
+static int phy_request_driver_module(struct phy_device *dev, u32 phy_id)
{
int ret;
@@ -565,15 +565,15 @@ static int phy_request_driver_module(struct phy_device *dev, int phy_id)
* then modprobe isn't available.
*/
if (IS_ENABLED(CONFIG_MODULES) && ret < 0 && ret != -ENOENT) {
- phydev_err(dev, "error %d loading PHY driver module for ID 0x%08x\n",
- ret, phy_id);
+ phydev_err(dev, "error %d loading PHY driver module for ID 0x%08lx\n",
+ ret, (unsigned long)phy_id);
return ret;
}
return 0;
}
-struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
+struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
bool is_c45,
struct phy_c45_device_ids *c45_ids)
{
@@ -881,6 +881,9 @@ EXPORT_SYMBOL(phy_device_register);
*/
void phy_device_remove(struct phy_device *phydev)
{
+ if (phydev->mii_ts)
+ unregister_mii_timestamper(phydev->mii_ts);
+
device_del(&phydev->mdio.dev);
/* Assert the reset signal */
@@ -919,6 +922,8 @@ static void phy_link_change(struct phy_device *phydev, bool up, bool do_carrier)
netif_carrier_off(netdev);
}
phydev->adjust_link(netdev);
+ if (phydev->mii_ts && phydev->mii_ts->link_state)
+ phydev->mii_ts->link_state(phydev->mii_ts, phydev);
}
/**
@@ -1102,9 +1107,8 @@ void phy_attached_info(struct phy_device *phydev)
EXPORT_SYMBOL(phy_attached_info);
#define ATTACHED_FMT "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%s)"
-void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
+char *phy_attached_info_irq(struct phy_device *phydev)
{
- const char *drv_name = phydev->drv ? phydev->drv->name : "unbound";
char *irq_str;
char irq_num[8];
@@ -1121,6 +1125,14 @@ void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
break;
}
+ return kasprintf(GFP_KERNEL, "%s", irq_str);
+}
+EXPORT_SYMBOL(phy_attached_info_irq);
+
+void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
+{
+ const char *drv_name = phydev->drv ? phydev->drv->name : "unbound";
+ char *irq_str = phy_attached_info_irq(phydev);
if (!fmt) {
phydev_info(phydev, ATTACHED_FMT "\n",
@@ -1137,6 +1149,7 @@ void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
vprintk(fmt, ap);
va_end(ap);
}
+ kfree(irq_str);
}
EXPORT_SYMBOL(phy_attached_print);
@@ -1771,6 +1784,36 @@ int genphy_restart_aneg(struct phy_device *phydev)
EXPORT_SYMBOL(genphy_restart_aneg);
/**
+ * genphy_check_and_restart_aneg - Enable and restart auto-negotiation
+ * @phydev: target phy_device struct
+ * @restart: whether aneg restart is requested
+ *
+ * Check, and restart auto-negotiation if needed.
+ */
+int genphy_check_and_restart_aneg(struct phy_device *phydev, bool restart)
+{
+ int ret = 0;
+
+ if (!restart) {
+ /* Advertisement hasn't changed, but maybe aneg was never on to
+ * begin with? Or maybe phy was isolated?
+ */
+ ret = phy_read(phydev, MII_BMCR);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & BMCR_ANENABLE) || (ret & BMCR_ISOLATE))
+ restart = true;
+ }
+
+ if (restart)
+ ret = genphy_restart_aneg(phydev);
+
+ return ret;
+}
+EXPORT_SYMBOL(genphy_check_and_restart_aneg);
+
+/**
* __genphy_config_aneg - restart auto-negotiation or write BMCR
* @phydev: target phy_device struct
* @changed: whether autoneg is requested
@@ -1795,23 +1838,7 @@ int __genphy_config_aneg(struct phy_device *phydev, bool changed)
else if (err)
changed = true;
- if (!changed) {
- /* Advertisement hasn't changed, but maybe aneg was never on to
- * begin with? Or maybe phy was isolated?
- */
- int ctl = phy_read(phydev, MII_BMCR);
-
- if (ctl < 0)
- return ctl;
-
- if (!(ctl & BMCR_ANENABLE) || (ctl & BMCR_ISOLATE))
- changed = true; /* do restart aneg */
- }
-
- /* Only restart aneg if we are advertising something different
- * than we were before.
- */
- return changed ? genphy_restart_aneg(phydev) : 0;
+ return genphy_check_and_restart_aneg(phydev, changed);
}
EXPORT_SYMBOL(__genphy_config_aneg);
@@ -1979,6 +2006,36 @@ int genphy_read_lpa(struct phy_device *phydev)
EXPORT_SYMBOL(genphy_read_lpa);
/**
+ * genphy_read_status_fixed - read the link parameters for !aneg mode
+ * @phydev: target phy_device struct
+ *
+ * Read the current duplex and speed state for a PHY operating with
+ * autonegotiation disabled.
+ */
+int genphy_read_status_fixed(struct phy_device *phydev)
+{
+ int bmcr = phy_read(phydev, MII_BMCR);
+
+ if (bmcr < 0)
+ return bmcr;
+
+ if (bmcr & BMCR_FULLDPLX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+
+ if (bmcr & BMCR_SPEED1000)
+ phydev->speed = SPEED_1000;
+ else if (bmcr & BMCR_SPEED100)
+ phydev->speed = SPEED_100;
+ else
+ phydev->speed = SPEED_10;
+
+ return 0;
+}
+EXPORT_SYMBOL(genphy_read_status_fixed);
+
+/**
* genphy_read_status - check the link status and update current link state
* @phydev: target phy_device struct
*
@@ -2012,22 +2069,9 @@ int genphy_read_status(struct phy_device *phydev)
if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) {
phy_resolve_aneg_linkmode(phydev);
} else if (phydev->autoneg == AUTONEG_DISABLE) {
- int bmcr = phy_read(phydev, MII_BMCR);
-
- if (bmcr < 0)
- return bmcr;
-
- if (bmcr & BMCR_FULLDPLX)
- phydev->duplex = DUPLEX_FULL;
- else
- phydev->duplex = DUPLEX_HALF;
-
- if (bmcr & BMCR_SPEED1000)
- phydev->speed = SPEED_1000;
- else if (bmcr & BMCR_SPEED100)
- phydev->speed = SPEED_100;
- else
- phydev->speed = SPEED_10;
+ err = genphy_read_status_fixed(phydev);
+ if (err < 0)
+ return err;
}
return 0;
@@ -2575,7 +2619,6 @@ static struct phy_driver genphy_driver = {
.name = "Generic PHY",
.soft_reset = genphy_no_soft_reset,
.get_features = genphy_read_abilities,
- .aneg_done = genphy_aneg_done,
.suspend = genphy_suspend,
.resume = genphy_resume,
.set_loopback = genphy_loopback,
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 9a616d6bc4eb..70b9a143db84 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -48,7 +48,8 @@ struct phylink {
unsigned long phylink_disable_state; /* bitmask of disables */
struct phy_device *phydev;
phy_interface_t link_interface; /* PHY_INTERFACE_xxx */
- u8 link_an_mode; /* MLO_AN_xxx */
+ u8 cfg_link_an_mode; /* MLO_AN_xxx */
+ u8 cur_link_an_mode;
u8 link_port; /* The current non-phy ethtool port */
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
@@ -71,6 +72,9 @@ struct phylink {
bool mac_link_dropped;
struct sfp_bus *sfp_bus;
+ bool sfp_may_have_phy;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
+ u8 sfp_port;
};
#define phylink_printk(level, pl, fmt, ...) \
@@ -182,8 +186,8 @@ static int phylink_parse_fixedlink(struct phylink *pl,
pl->link_config.pause |= MLO_PAUSE_ASYM;
if (ret == 0) {
- desc = fwnode_get_named_gpiod(fixed_node, "link-gpios",
- 0, GPIOD_IN, "?");
+ desc = fwnode_gpiod_get_index(fixed_node, "link", 0,
+ GPIOD_IN, "?");
if (!IS_ERR(desc))
pl->link_gpio = desc;
@@ -256,12 +260,12 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
dn = fwnode_get_named_child_node(fwnode, "fixed-link");
if (dn || fwnode_property_present(fwnode, "fixed-link"))
- pl->link_an_mode = MLO_AN_FIXED;
+ pl->cfg_link_an_mode = MLO_AN_FIXED;
fwnode_handle_put(dn);
if (fwnode_property_read_string(fwnode, "managed", &managed) == 0 &&
strcmp(managed, "in-band-status") == 0) {
- if (pl->link_an_mode == MLO_AN_FIXED) {
+ if (pl->cfg_link_an_mode == MLO_AN_FIXED) {
phylink_err(pl,
"can't use both fixed-link and in-band-status\n");
return -EINVAL;
@@ -273,10 +277,11 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
phylink_set(pl->supported, Asym_Pause);
phylink_set(pl->supported, Pause);
pl->link_config.an_enabled = true;
- pl->link_an_mode = MLO_AN_INBAND;
+ pl->cfg_link_an_mode = MLO_AN_INBAND;
switch (pl->link_config.interface) {
case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
phylink_set(pl->supported, 10baseT_Half);
phylink_set(pl->supported, 10baseT_Full);
phylink_set(pl->supported, 100baseT_Half);
@@ -293,7 +298,9 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
phylink_set(pl->supported, 2500baseX_Full);
break;
+ case PHY_INTERFACE_MODE_USXGMII:
case PHY_INTERFACE_MODE_10GKR:
+ case PHY_INTERFACE_MODE_10GBASER:
phylink_set(pl->supported, 10baseT_Half);
phylink_set(pl->supported, 10baseT_Full);
phylink_set(pl->supported, 100baseT_Half);
@@ -301,6 +308,10 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
phylink_set(pl->supported, 1000baseT_Half);
phylink_set(pl->supported, 1000baseT_Full);
phylink_set(pl->supported, 1000baseX_Full);
+ phylink_set(pl->supported, 2500baseT_Full);
+ phylink_set(pl->supported, 2500baseX_Full);
+ phylink_set(pl->supported, 5000baseT_Full);
+ phylink_set(pl->supported, 10000baseT_Full);
phylink_set(pl->supported, 10000baseKR_Full);
phylink_set(pl->supported, 10000baseCR_Full);
phylink_set(pl->supported, 10000baseSR_Full);
@@ -333,14 +344,14 @@ static void phylink_mac_config(struct phylink *pl,
{
phylink_dbg(pl,
"%s: mode=%s/%s/%s/%s adv=%*pb pause=%02x link=%u an=%u\n",
- __func__, phylink_an_mode_str(pl->link_an_mode),
+ __func__, phylink_an_mode_str(pl->cur_link_an_mode),
phy_modes(state->interface),
phy_speed_to_str(state->speed),
phy_duplex_to_str(state->duplex),
__ETHTOOL_LINK_MODE_MASK_NBITS, state->advertising,
state->pause, state->link, state->an_enabled);
- pl->ops->mac_config(pl->config, pl->link_an_mode, state);
+ pl->ops->mac_config(pl->config, pl->cur_link_an_mode, state);
}
static void phylink_mac_config_up(struct phylink *pl,
@@ -441,9 +452,8 @@ static void phylink_mac_link_up(struct phylink *pl,
struct net_device *ndev = pl->netdev;
pl->cur_interface = link_state.interface;
- pl->ops->mac_link_up(pl->config, pl->link_an_mode,
- pl->phy_state.interface,
- pl->phydev);
+ pl->ops->mac_link_up(pl->config, pl->cur_link_an_mode,
+ pl->cur_interface, pl->phydev);
if (ndev)
netif_carrier_on(ndev);
@@ -461,7 +471,7 @@ static void phylink_mac_link_down(struct phylink *pl)
if (ndev)
netif_carrier_off(ndev);
- pl->ops->mac_link_down(pl->config, pl->link_an_mode,
+ pl->ops->mac_link_down(pl->config, pl->cur_link_an_mode,
pl->cur_interface);
phylink_info(pl, "Link is Down\n");
}
@@ -480,7 +490,7 @@ static void phylink_resolve(struct work_struct *w)
} else if (pl->mac_link_dropped) {
link_state.link = false;
} else {
- switch (pl->link_an_mode) {
+ switch (pl->cur_link_an_mode) {
case MLO_AN_PHY:
link_state = pl->phy_state;
phylink_resolve_flow(pl, &link_state);
@@ -567,6 +577,9 @@ static int phylink_register_sfp(struct phylink *pl,
struct sfp_bus *bus;
int ret;
+ if (!fwnode)
+ return 0;
+
bus = sfp_bus_find_fwnode(fwnode);
if (IS_ERR(bus)) {
ret = PTR_ERR(bus);
@@ -648,7 +661,7 @@ struct phylink *phylink_create(struct phylink_config *config,
return ERR_PTR(ret);
}
- if (pl->link_an_mode == MLO_AN_FIXED) {
+ if (pl->cfg_link_an_mode == MLO_AN_FIXED) {
ret = phylink_parse_fixedlink(pl, fwnode);
if (ret < 0) {
kfree(pl);
@@ -656,6 +669,8 @@ struct phylink *phylink_create(struct phylink_config *config,
}
}
+ pl->cur_link_an_mode = pl->cfg_link_an_mode;
+
ret = phylink_register_sfp(pl, fwnode);
if (ret < 0) {
kfree(pl);
@@ -711,10 +726,12 @@ static void phylink_phy_change(struct phy_device *phydev, bool up,
phy_duplex_to_str(phydev->duplex));
}
-static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
+static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
+ phy_interface_t interface)
{
struct phylink_link_state config;
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+ char *irq_str;
int ret;
/*
@@ -729,7 +746,19 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
memset(&config, 0, sizeof(config));
linkmode_copy(supported, phy->supported);
linkmode_copy(config.advertising, phy->advertising);
- config.interface = pl->link_config.interface;
+
+ /* Clause 45 PHYs switch their Serdes lane between several different
+ * modes, normally 10GBASE-R, SGMII. Some use 2500BASE-X for 2.5G
+ * speeds. We really need to know which interface modes the PHY and
+ * MAC supports to properly work out which linkmodes can be supported.
+ */
+ if (phy->is_c45 &&
+ interface != PHY_INTERFACE_MODE_RXAUI &&
+ interface != PHY_INTERFACE_MODE_XAUI &&
+ interface != PHY_INTERFACE_MODE_USXGMII)
+ config.interface = PHY_INTERFACE_MODE_NA;
+ else
+ config.interface = interface;
ret = phylink_validate(pl, supported, &config);
if (ret)
@@ -738,13 +767,16 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
phy->phylink = pl;
phy->phy_link_change = phylink_phy_change;
+ irq_str = phy_attached_info_irq(phy);
phylink_info(pl,
- "PHY [%s] driver [%s]\n", dev_name(&phy->mdio.dev),
- phy->drv->name);
+ "PHY [%s] driver [%s] (irq=%s)\n",
+ dev_name(&phy->mdio.dev), phy->drv->name, irq_str);
+ kfree(irq_str);
mutex_lock(&phy->lock);
mutex_lock(&pl->state_mutex);
pl->phydev = phy;
+ pl->phy_state.interface = interface;
linkmode_copy(pl->supported, supported);
linkmode_copy(pl->link_config.advertising, config.advertising);
@@ -764,28 +796,18 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
return 0;
}
-static int __phylink_connect_phy(struct phylink *pl, struct phy_device *phy,
- phy_interface_t interface)
+static int phylink_attach_phy(struct phylink *pl, struct phy_device *phy,
+ phy_interface_t interface)
{
- int ret;
-
- if (WARN_ON(pl->link_an_mode == MLO_AN_FIXED ||
- (pl->link_an_mode == MLO_AN_INBAND &&
+ if (WARN_ON(pl->cfg_link_an_mode == MLO_AN_FIXED ||
+ (pl->cfg_link_an_mode == MLO_AN_INBAND &&
phy_interface_mode_is_8023z(interface))))
return -EINVAL;
if (pl->phydev)
return -EBUSY;
- ret = phy_attach_direct(pl->netdev, phy, 0, interface);
- if (ret)
- return ret;
-
- ret = phylink_bringup_phy(pl, phy);
- if (ret)
- phy_detach(phy);
-
- return ret;
+ return phy_attach_direct(pl->netdev, phy, 0, interface);
}
/**
@@ -805,13 +827,23 @@ static int __phylink_connect_phy(struct phylink *pl, struct phy_device *phy,
*/
int phylink_connect_phy(struct phylink *pl, struct phy_device *phy)
{
+ int ret;
+
/* Use PHY device/driver interface */
if (pl->link_interface == PHY_INTERFACE_MODE_NA) {
pl->link_interface = phy->interface;
pl->link_config.interface = pl->link_interface;
}
- return __phylink_connect_phy(pl, phy, pl->link_interface);
+ ret = phylink_attach_phy(pl, phy, pl->link_interface);
+ if (ret < 0)
+ return ret;
+
+ ret = phylink_bringup_phy(pl, phy, pl->link_config.interface);
+ if (ret)
+ phy_detach(phy);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(phylink_connect_phy);
@@ -835,8 +867,8 @@ int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn,
int ret;
/* Fixed links and 802.3z are handled without needing a PHY */
- if (pl->link_an_mode == MLO_AN_FIXED ||
- (pl->link_an_mode == MLO_AN_INBAND &&
+ if (pl->cfg_link_an_mode == MLO_AN_FIXED ||
+ (pl->cfg_link_an_mode == MLO_AN_INBAND &&
phy_interface_mode_is_8023z(pl->link_interface)))
return 0;
@@ -847,20 +879,23 @@ int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn,
phy_node = of_parse_phandle(dn, "phy-device", 0);
if (!phy_node) {
- if (pl->link_an_mode == MLO_AN_PHY)
+ if (pl->cfg_link_an_mode == MLO_AN_PHY)
return -ENODEV;
return 0;
}
- phy_dev = of_phy_attach(pl->netdev, phy_node, flags,
- pl->link_interface);
+ phy_dev = of_phy_find_device(phy_node);
/* We're done with the phy_node handle */
of_node_put(phy_node);
-
if (!phy_dev)
return -ENODEV;
- ret = phylink_bringup_phy(pl, phy_dev);
+ ret = phy_attach_direct(pl->netdev, phy_dev, flags,
+ pl->link_interface);
+ if (ret)
+ return ret;
+
+ ret = phylink_bringup_phy(pl, phy_dev, pl->link_config.interface);
if (ret)
phy_detach(phy_dev);
@@ -910,7 +945,7 @@ int phylink_fixed_state_cb(struct phylink *pl,
/* It does not make sense to let the link be overriden unless we use
* MLO_AN_FIXED
*/
- if (pl->link_an_mode != MLO_AN_FIXED)
+ if (pl->cfg_link_an_mode != MLO_AN_FIXED)
return -EINVAL;
mutex_lock(&pl->state_mutex);
@@ -960,7 +995,7 @@ void phylink_start(struct phylink *pl)
ASSERT_RTNL();
phylink_info(pl, "configuring for %s/%s link mode\n",
- phylink_an_mode_str(pl->link_an_mode),
+ phylink_an_mode_str(pl->cur_link_an_mode),
phy_modes(pl->link_config.interface));
/* Always set the carrier off */
@@ -983,7 +1018,7 @@ void phylink_start(struct phylink *pl)
clear_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
phylink_run_resolve(pl);
- if (pl->link_an_mode == MLO_AN_FIXED && pl->link_gpio) {
+ if (pl->cfg_link_an_mode == MLO_AN_FIXED && pl->link_gpio) {
int irq = gpiod_to_irq(pl->link_gpio);
if (irq > 0) {
@@ -998,7 +1033,8 @@ void phylink_start(struct phylink *pl)
if (irq <= 0)
mod_timer(&pl->link_poll, jiffies + HZ);
}
- if (pl->link_an_mode == MLO_AN_FIXED && pl->get_fixed_state)
+ if ((pl->cfg_link_an_mode == MLO_AN_FIXED && pl->get_fixed_state) ||
+ pl->config->pcs_poll)
mod_timer(&pl->link_poll, jiffies + HZ);
if (pl->phydev)
phy_start(pl->phydev);
@@ -1125,7 +1161,7 @@ int phylink_ethtool_ksettings_get(struct phylink *pl,
linkmode_copy(kset->link_modes.supported, pl->supported);
- switch (pl->link_an_mode) {
+ switch (pl->cur_link_an_mode) {
case MLO_AN_FIXED:
/* We are using fixed settings. Report these as the
* current link settings - and note that these also
@@ -1197,7 +1233,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
/* If we have a fixed link (as specified by firmware), refuse
* to change link parameters.
*/
- if (pl->link_an_mode == MLO_AN_FIXED &&
+ if (pl->cur_link_an_mode == MLO_AN_FIXED &&
(s->speed != pl->link_config.speed ||
s->duplex != pl->link_config.duplex))
return -EINVAL;
@@ -1209,7 +1245,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
__clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, config.advertising);
} else {
/* If we have a fixed link, refuse to enable autonegotiation */
- if (pl->link_an_mode == MLO_AN_FIXED)
+ if (pl->cur_link_an_mode == MLO_AN_FIXED)
return -EINVAL;
config.speed = SPEED_UNKNOWN;
@@ -1219,44 +1255,66 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
__set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, config.advertising);
}
- if (phylink_validate(pl, support, &config))
- return -EINVAL;
-
- /* If autonegotiation is enabled, we must have an advertisement */
- if (config.an_enabled && phylink_is_empty_linkmode(config.advertising))
- return -EINVAL;
-
- our_kset = *kset;
- linkmode_copy(our_kset.link_modes.advertising, config.advertising);
- our_kset.base.speed = config.speed;
- our_kset.base.duplex = config.duplex;
-
- /* If we have a PHY, configure the phy */
if (pl->phydev) {
+ /* If we have a PHY, we process the kset change via phylib.
+ * phylib will call our link state function if the PHY
+ * parameters have changed, which will trigger a resolve
+ * and update the MAC configuration.
+ */
+ our_kset = *kset;
+ linkmode_copy(our_kset.link_modes.advertising,
+ config.advertising);
+ our_kset.base.speed = config.speed;
+ our_kset.base.duplex = config.duplex;
+
ret = phy_ethtool_ksettings_set(pl->phydev, &our_kset);
if (ret)
return ret;
- }
- mutex_lock(&pl->state_mutex);
- /* Configure the MAC to match the new settings */
- linkmode_copy(pl->link_config.advertising, our_kset.link_modes.advertising);
- pl->link_config.interface = config.interface;
- pl->link_config.speed = our_kset.base.speed;
- pl->link_config.duplex = our_kset.base.duplex;
- pl->link_config.an_enabled = our_kset.base.autoneg != AUTONEG_DISABLE;
+ mutex_lock(&pl->state_mutex);
+ /* Save the new configuration */
+ linkmode_copy(pl->link_config.advertising,
+ our_kset.link_modes.advertising);
+ pl->link_config.interface = config.interface;
+ pl->link_config.speed = our_kset.base.speed;
+ pl->link_config.duplex = our_kset.base.duplex;
+ pl->link_config.an_enabled = our_kset.base.autoneg !=
+ AUTONEG_DISABLE;
+ mutex_unlock(&pl->state_mutex);
+ } else {
+ /* For a fixed link, this isn't able to change any parameters,
+ * which just leaves inband mode.
+ */
+ if (phylink_validate(pl, support, &config))
+ return -EINVAL;
- /* If we have a PHY, phylib will call our link state function if the
- * mode has changed, which will trigger a resolve and update the MAC
- * configuration. For a fixed link, this isn't able to change any
- * parameters, which just leaves inband mode.
- */
- if (pl->link_an_mode == MLO_AN_INBAND &&
- !test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) {
- phylink_mac_config(pl, &pl->link_config);
- phylink_mac_an_restart(pl);
+ /* If autonegotiation is enabled, we must have an advertisement */
+ if (config.an_enabled &&
+ phylink_is_empty_linkmode(config.advertising))
+ return -EINVAL;
+
+ mutex_lock(&pl->state_mutex);
+ linkmode_copy(pl->link_config.advertising, config.advertising);
+ pl->link_config.interface = config.interface;
+ pl->link_config.speed = config.speed;
+ pl->link_config.duplex = config.duplex;
+ pl->link_config.an_enabled = kset->base.autoneg !=
+ AUTONEG_DISABLE;
+
+ if (pl->cur_link_an_mode == MLO_AN_INBAND &&
+ !test_bit(PHYLINK_DISABLE_STOPPED,
+ &pl->phylink_disable_state)) {
+ /* If in 802.3z mode, this updates the advertisement.
+ *
+ * If we are in SGMII mode without a PHY, there is no
+ * advertisement; the only thing we have is the pause
+ * modes which can only come from a PHY.
+ */
+ phylink_mac_config(pl, &pl->link_config);
+ phylink_mac_an_restart(pl);
+ }
+ mutex_unlock(&pl->state_mutex);
}
- mutex_unlock(&pl->state_mutex);
return 0;
}
@@ -1341,7 +1399,7 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
pause->tx_pause);
} else if (!test_bit(PHYLINK_DISABLE_STOPPED,
&pl->phylink_disable_state)) {
- switch (pl->link_an_mode) {
+ switch (pl->cur_link_an_mode) {
case MLO_AN_FIXED:
/* Should we allow fixed links to change against the config? */
phylink_resolve_flow(pl, config);
@@ -1548,7 +1606,7 @@ static int phylink_mii_read(struct phylink *pl, unsigned int phy_id,
struct phylink_link_state state;
int val = 0xffff;
- switch (pl->link_an_mode) {
+ switch (pl->cur_link_an_mode) {
case MLO_AN_FIXED:
if (phy_id == 0) {
phylink_get_fixed_state(pl, &state);
@@ -1573,7 +1631,7 @@ static int phylink_mii_read(struct phylink *pl, unsigned int phy_id,
static int phylink_mii_write(struct phylink *pl, unsigned int phy_id,
unsigned int reg, unsigned int val)
{
- switch (pl->link_an_mode) {
+ switch (pl->cur_link_an_mode) {
case MLO_AN_FIXED:
break;
@@ -1679,25 +1737,21 @@ static void phylink_sfp_detach(void *upstream, struct sfp_bus *bus)
pl->netdev->sfp_bus = NULL;
}
-static int phylink_sfp_module_insert(void *upstream,
- const struct sfp_eeprom_id *id)
+static int phylink_sfp_config(struct phylink *pl, u8 mode,
+ const unsigned long *supported,
+ const unsigned long *advertising)
{
- struct phylink *pl = upstream;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, };
__ETHTOOL_DECLARE_LINK_MODE_MASK(support1);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(support);
struct phylink_link_state config;
phy_interface_t iface;
- int ret = 0;
bool changed;
- u8 port;
-
- ASSERT_RTNL();
+ int ret;
- sfp_parse_support(pl->sfp_bus, id, support);
- port = sfp_parse_port(pl->sfp_bus, id, support);
+ linkmode_copy(support, supported);
memset(&config, 0, sizeof(config));
- linkmode_copy(config.advertising, support);
+ linkmode_copy(config.advertising, advertising);
config.interface = PHY_INTERFACE_MODE_NA;
config.speed = SPEED_UNKNOWN;
config.duplex = DUPLEX_UNKNOWN;
@@ -1712,9 +1766,7 @@ static int phylink_sfp_module_insert(void *upstream,
return ret;
}
- linkmode_copy(support1, support);
-
- iface = sfp_select_interface(pl->sfp_bus, id, config.advertising);
+ iface = sfp_select_interface(pl->sfp_bus, config.advertising);
if (iface == PHY_INTERFACE_MODE_NA) {
phylink_err(pl,
"selection of interface failed, advertisement %*pb\n",
@@ -1723,18 +1775,18 @@ static int phylink_sfp_module_insert(void *upstream,
}
config.interface = iface;
+ linkmode_copy(support1, support);
ret = phylink_validate(pl, support1, &config);
if (ret) {
phylink_err(pl, "validation of %s/%s with support %*pb failed: %d\n",
- phylink_an_mode_str(MLO_AN_INBAND),
+ phylink_an_mode_str(mode),
phy_modes(config.interface),
__ETHTOOL_LINK_MODE_MASK_NBITS, support, ret);
return ret;
}
phylink_dbg(pl, "requesting link mode %s/%s with support %*pb\n",
- phylink_an_mode_str(MLO_AN_INBAND),
- phy_modes(config.interface),
+ phylink_an_mode_str(mode), phy_modes(config.interface),
__ETHTOOL_LINK_MODE_MASK_NBITS, support);
if (phy_interface_mode_is_8023z(iface) && pl->phydev)
@@ -1746,19 +1798,19 @@ static int phylink_sfp_module_insert(void *upstream,
linkmode_copy(pl->link_config.advertising, config.advertising);
}
- if (pl->link_an_mode != MLO_AN_INBAND ||
+ if (pl->cur_link_an_mode != mode ||
pl->link_config.interface != config.interface) {
pl->link_config.interface = config.interface;
- pl->link_an_mode = MLO_AN_INBAND;
+ pl->cur_link_an_mode = mode;
changed = true;
phylink_info(pl, "switched to %s/%s link mode\n",
- phylink_an_mode_str(MLO_AN_INBAND),
+ phylink_an_mode_str(mode),
phy_modes(config.interface));
}
- pl->link_port = port;
+ pl->link_port = pl->sfp_port;
if (changed && !test_bit(PHYLINK_DISABLE_STOPPED,
&pl->phylink_disable_state))
@@ -1767,6 +1819,55 @@ static int phylink_sfp_module_insert(void *upstream,
return ret;
}
+static int phylink_sfp_module_insert(void *upstream,
+ const struct sfp_eeprom_id *id)
+{
+ struct phylink *pl = upstream;
+ unsigned long *support = pl->sfp_support;
+
+ ASSERT_RTNL();
+
+ linkmode_zero(support);
+ sfp_parse_support(pl->sfp_bus, id, support);
+ pl->sfp_port = sfp_parse_port(pl->sfp_bus, id, support);
+
+ /* If this module may have a PHY connecting later, defer until later */
+ pl->sfp_may_have_phy = sfp_may_have_phy(pl->sfp_bus, id);
+ if (pl->sfp_may_have_phy)
+ return 0;
+
+ return phylink_sfp_config(pl, MLO_AN_INBAND, support, support);
+}
+
+static int phylink_sfp_module_start(void *upstream)
+{
+ struct phylink *pl = upstream;
+
+ /* If this SFP module has a PHY, start the PHY now. */
+ if (pl->phydev) {
+ phy_start(pl->phydev);
+ return 0;
+ }
+
+ /* If the module may have a PHY but we didn't detect one we
+ * need to configure the MAC here.
+ */
+ if (!pl->sfp_may_have_phy)
+ return 0;
+
+ return phylink_sfp_config(pl, MLO_AN_INBAND,
+ pl->sfp_support, pl->sfp_support);
+}
+
+static void phylink_sfp_module_stop(void *upstream)
+{
+ struct phylink *pl = upstream;
+
+ /* If this SFP module has a PHY, stop it. */
+ if (pl->phydev)
+ phy_stop(pl->phydev);
+}
+
static void phylink_sfp_link_down(void *upstream)
{
struct phylink *pl = upstream;
@@ -1786,11 +1887,51 @@ static void phylink_sfp_link_up(void *upstream)
phylink_run_resolve(pl);
}
+/* The Broadcom BCM84881 in the Methode DM7052 is unable to provide a SGMII
+ * or 802.3z control word, so inband will not work.
+ */
+static bool phylink_phy_no_inband(struct phy_device *phy)
+{
+ return phy->is_c45 &&
+ (phy->c45_ids.device_ids[1] & 0xfffffff0) == 0xae025150;
+}
+
static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy)
{
struct phylink *pl = upstream;
+ phy_interface_t interface;
+ u8 mode;
+ int ret;
- return __phylink_connect_phy(upstream, phy, pl->link_config.interface);
+ /*
+ * This is the new way of dealing with flow control for PHYs,
+ * as described by Timur Tabi in commit 529ed1275263 ("net: phy:
+ * phy drivers should not set SUPPORTED_[Asym_]Pause") except
+ * using our validate call to the MAC, we rely upon the MAC
+ * clearing the bits from both supported and advertising fields.
+ */
+ phy_support_asym_pause(phy);
+
+ if (phylink_phy_no_inband(phy))
+ mode = MLO_AN_PHY;
+ else
+ mode = MLO_AN_INBAND;
+
+ /* Do the initial configuration */
+ ret = phylink_sfp_config(pl, mode, phy->supported, phy->advertising);
+ if (ret < 0)
+ return ret;
+
+ interface = pl->link_config.interface;
+ ret = phylink_attach_phy(pl, phy, interface);
+ if (ret < 0)
+ return ret;
+
+ ret = phylink_bringup_phy(pl, phy, interface);
+ if (ret)
+ phy_detach(phy);
+
+ return ret;
}
static void phylink_sfp_disconnect_phy(void *upstream)
@@ -1802,6 +1943,8 @@ static const struct sfp_upstream_ops sfp_phylink_ops = {
.attach = phylink_sfp_attach,
.detach = phylink_sfp_detach,
.module_insert = phylink_sfp_module_insert,
+ .module_start = phylink_sfp_module_start,
+ .module_stop = phylink_sfp_module_stop,
.link_up = phylink_sfp_link_up,
.link_down = phylink_sfp_link_down,
.connect_phy = phylink_sfp_connect_phy,
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 476db5345e1a..f5fa2fff3ddc 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -29,6 +29,8 @@
#define RTL8211F_INSR 0x1d
#define RTL8211F_TX_DELAY BIT(8)
+#define RTL8211F_RX_DELAY BIT(3)
+
#define RTL8211E_TX_DELAY BIT(1)
#define RTL8211E_RX_DELAY BIT(2)
#define RTL8211E_MODE_MII_GMII BIT(3)
@@ -171,25 +173,66 @@ static int rtl8211c_config_init(struct phy_device *phydev)
static int rtl8211f_config_init(struct phy_device *phydev)
{
- u16 val;
+ struct device *dev = &phydev->mdio.dev;
+ u16 val_txdly, val_rxdly;
+ int ret;
- /* enable TX-delay for rgmii-{id,txid}, and disable it for rgmii and
- * rgmii-rxid. The RX-delay can be enabled by the external RXDLY pin.
- */
switch (phydev->interface) {
case PHY_INTERFACE_MODE_RGMII:
+ val_txdly = 0;
+ val_rxdly = 0;
+ break;
+
case PHY_INTERFACE_MODE_RGMII_RXID:
- val = 0;
+ val_txdly = 0;
+ val_rxdly = RTL8211F_RX_DELAY;
break;
- case PHY_INTERFACE_MODE_RGMII_ID:
+
case PHY_INTERFACE_MODE_RGMII_TXID:
- val = RTL8211F_TX_DELAY;
+ val_txdly = RTL8211F_TX_DELAY;
+ val_rxdly = 0;
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ val_txdly = RTL8211F_TX_DELAY;
+ val_rxdly = RTL8211F_RX_DELAY;
break;
+
default: /* the rest of the modes imply leaving delay as is. */
return 0;
}
- return phy_modify_paged(phydev, 0xd08, 0x11, RTL8211F_TX_DELAY, val);
+ ret = phy_modify_paged_changed(phydev, 0xd08, 0x11, RTL8211F_TX_DELAY,
+ val_txdly);
+ if (ret < 0) {
+ dev_err(dev, "Failed to update the TX delay register\n");
+ return ret;
+ } else if (ret) {
+ dev_dbg(dev,
+ "%s 2ns TX delay (and changing the value from pin-strapping RXD1 or the bootloader)\n",
+ val_txdly ? "Enabling" : "Disabling");
+ } else {
+ dev_dbg(dev,
+ "2ns TX delay was already %s (by pin-strapping RXD1 or bootloader configuration)\n",
+ val_txdly ? "enabled" : "disabled");
+ }
+
+ ret = phy_modify_paged_changed(phydev, 0xd08, 0x15, RTL8211F_RX_DELAY,
+ val_rxdly);
+ if (ret < 0) {
+ dev_err(dev, "Failed to update the RX delay register\n");
+ return ret;
+ } else if (ret) {
+ dev_dbg(dev,
+ "%s 2ns RX delay (and changing the value from pin-strapping RXD0 or the bootloader)\n",
+ val_rxdly ? "Enabling" : "Disabling");
+ } else {
+ dev_dbg(dev,
+ "2ns RX delay was already %s (by pin-strapping RXD0 or bootloader configuration)\n",
+ val_rxdly ? "enabled" : "disabled");
+ }
+
+ return 0;
}
static int rtl8211e_config_init(struct phy_device *phydev)
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 5a72093ab6e7..d949ea7b4f8c 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -103,6 +103,7 @@ static const struct sfp_quirk *sfp_lookup_quirk(const struct sfp_eeprom_id *id)
return NULL;
}
+
/**
* sfp_parse_port() - Parse the EEPROM base ID, setting the port type
* @bus: a pointer to the &struct sfp_bus structure for the sfp module
@@ -124,35 +125,35 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
/* port is the physical connector, set this from the connector field. */
switch (id->base.connector) {
- case SFP_CONNECTOR_SC:
- case SFP_CONNECTOR_FIBERJACK:
- case SFP_CONNECTOR_LC:
- case SFP_CONNECTOR_MT_RJ:
- case SFP_CONNECTOR_MU:
- case SFP_CONNECTOR_OPTICAL_PIGTAIL:
+ case SFF8024_CONNECTOR_SC:
+ case SFF8024_CONNECTOR_FIBERJACK:
+ case SFF8024_CONNECTOR_LC:
+ case SFF8024_CONNECTOR_MT_RJ:
+ case SFF8024_CONNECTOR_MU:
+ case SFF8024_CONNECTOR_OPTICAL_PIGTAIL:
+ case SFF8024_CONNECTOR_MPO_1X12:
+ case SFF8024_CONNECTOR_MPO_2X16:
port = PORT_FIBRE;
break;
- case SFP_CONNECTOR_RJ45:
+ case SFF8024_CONNECTOR_RJ45:
port = PORT_TP;
break;
- case SFP_CONNECTOR_COPPER_PIGTAIL:
+ case SFF8024_CONNECTOR_COPPER_PIGTAIL:
port = PORT_DA;
break;
- case SFP_CONNECTOR_UNSPEC:
+ case SFF8024_CONNECTOR_UNSPEC:
if (id->base.e1000_base_t) {
port = PORT_TP;
break;
}
/* fallthrough */
- case SFP_CONNECTOR_SG: /* guess */
- case SFP_CONNECTOR_MPO_1X12:
- case SFP_CONNECTOR_MPO_2X16:
- case SFP_CONNECTOR_HSSDC_II:
- case SFP_CONNECTOR_NOSEPARATE:
- case SFP_CONNECTOR_MXC_2X16:
+ case SFF8024_CONNECTOR_SG: /* guess */
+ case SFF8024_CONNECTOR_HSSDC_II:
+ case SFF8024_CONNECTOR_NOSEPARATE:
+ case SFF8024_CONNECTOR_MXC_2X16:
port = PORT_OTHER;
break;
default:
@@ -179,6 +180,33 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
EXPORT_SYMBOL_GPL(sfp_parse_port);
/**
+ * sfp_may_have_phy() - indicate whether the module may have a PHY
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ * @id: a pointer to the module's &struct sfp_eeprom_id
+ *
+ * Parse the EEPROM identification given in @id, and return whether
+ * this module may have a PHY.
+ */
+bool sfp_may_have_phy(struct sfp_bus *bus, const struct sfp_eeprom_id *id)
+{
+ if (id->base.e1000_base_t)
+ return true;
+
+ if (id->base.phys_id != SFF8024_ID_DWDM_SFP) {
+ switch (id->base.extended_cc) {
+ case SFF8024_ECC_10GBASE_T_SFI:
+ case SFF8024_ECC_10GBASE_T_SR:
+ case SFF8024_ECC_5GBASE_T:
+ case SFF8024_ECC_2_5GBASE_T:
+ return true;
+ }
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(sfp_may_have_phy);
+
+/**
* sfp_parse_support() - Parse the eeprom id for supported link modes
* @bus: a pointer to the &struct sfp_bus structure for the sfp module
* @id: a pointer to the module's &struct sfp_eeprom_id
@@ -261,22 +289,33 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
}
switch (id->base.extended_cc) {
- case 0x00: /* Unspecified */
+ case SFF8024_ECC_UNSPEC:
break;
- case 0x02: /* 100Gbase-SR4 or 25Gbase-SR */
+ case SFF8024_ECC_100GBASE_SR4_25GBASE_SR:
phylink_set(modes, 100000baseSR4_Full);
phylink_set(modes, 25000baseSR_Full);
break;
- case 0x03: /* 100Gbase-LR4 or 25Gbase-LR */
- case 0x04: /* 100Gbase-ER4 or 25Gbase-ER */
+ case SFF8024_ECC_100GBASE_LR4_25GBASE_LR:
+ case SFF8024_ECC_100GBASE_ER4_25GBASE_ER:
phylink_set(modes, 100000baseLR4_ER4_Full);
break;
- case 0x0b: /* 100Gbase-CR4 or 25Gbase-CR CA-L */
- case 0x0c: /* 25Gbase-CR CA-S */
- case 0x0d: /* 25Gbase-CR CA-N */
+ case SFF8024_ECC_100GBASE_CR4:
phylink_set(modes, 100000baseCR4_Full);
+ /* fallthrough */
+ case SFF8024_ECC_25GBASE_CR_S:
+ case SFF8024_ECC_25GBASE_CR_N:
phylink_set(modes, 25000baseCR_Full);
break;
+ case SFF8024_ECC_10GBASE_T_SFI:
+ case SFF8024_ECC_10GBASE_T_SR:
+ phylink_set(modes, 10000baseT_Full);
+ break;
+ case SFF8024_ECC_5GBASE_T:
+ phylink_set(modes, 5000baseT_Full);
+ break;
+ case SFF8024_ECC_2_5GBASE_T:
+ phylink_set(modes, 2500baseT_Full);
+ break;
default:
dev_warn(bus->sfp_dev,
"Unknown/unsupported extended compliance code: 0x%02x\n",
@@ -301,7 +340,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
*/
if (bitmap_empty(modes, __ETHTOOL_LINK_MODE_MASK_NBITS)) {
/* If the encoding and bit rate allows 1000baseX */
- if (id->base.encoding == SFP_ENCODING_8B10B && br_nom &&
+ if (id->base.encoding == SFF8024_ENCODING_8B10B && br_nom &&
br_min <= 1300 && br_max >= 1200)
phylink_set(modes, 1000baseX_Full);
}
@@ -320,31 +359,27 @@ EXPORT_SYMBOL_GPL(sfp_parse_support);
/**
* sfp_select_interface() - Select appropriate phy_interface_t mode
* @bus: a pointer to the &struct sfp_bus structure for the sfp module
- * @id: a pointer to the module's &struct sfp_eeprom_id
* @link_modes: ethtool link modes mask
*
- * Derive the phy_interface_t mode for the information found in the
- * module's identifying EEPROM and the link modes mask. There is no
- * standard or defined way to derive this information, so we decide
- * based upon the link mode mask.
+ * Derive the phy_interface_t mode for the SFP module from the link
+ * modes mask.
*/
phy_interface_t sfp_select_interface(struct sfp_bus *bus,
- const struct sfp_eeprom_id *id,
unsigned long *link_modes)
{
if (phylink_test(link_modes, 10000baseCR_Full) ||
phylink_test(link_modes, 10000baseSR_Full) ||
phylink_test(link_modes, 10000baseLR_Full) ||
phylink_test(link_modes, 10000baseLRM_Full) ||
- phylink_test(link_modes, 10000baseER_Full))
- return PHY_INTERFACE_MODE_10GKR;
+ phylink_test(link_modes, 10000baseER_Full) ||
+ phylink_test(link_modes, 10000baseT_Full))
+ return PHY_INTERFACE_MODE_10GBASER;
if (phylink_test(link_modes, 2500baseX_Full))
return PHY_INTERFACE_MODE_2500BASEX;
- if (id->base.e1000_base_t ||
- id->base.e100_base_lx ||
- id->base.e100_base_fx)
+ if (phylink_test(link_modes, 1000baseT_Half) ||
+ phylink_test(link_modes, 1000baseT_Full))
return PHY_INTERFACE_MODE_SGMII;
if (phylink_test(link_modes, 1000baseX_Full))
@@ -705,6 +740,27 @@ void sfp_module_remove(struct sfp_bus *bus)
}
EXPORT_SYMBOL_GPL(sfp_module_remove);
+int sfp_module_start(struct sfp_bus *bus)
+{
+ const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus);
+ int ret = 0;
+
+ if (ops && ops->module_start)
+ ret = ops->module_start(bus->upstream);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sfp_module_start);
+
+void sfp_module_stop(struct sfp_bus *bus)
+{
+ const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus);
+
+ if (ops && ops->module_stop)
+ ops->module_stop(bus->upstream);
+}
+EXPORT_SYMBOL_GPL(sfp_module_stop);
+
static void sfp_socket_clear(struct sfp_bus *bus)
{
bus->sfp_dev = NULL;
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index c0b9a8e4e65a..73c2969f11a4 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -59,8 +59,10 @@ enum {
SFP_DEV_UP,
SFP_S_DOWN = 0,
+ SFP_S_FAIL,
SFP_S_WAIT,
SFP_S_INIT,
+ SFP_S_INIT_PHY,
SFP_S_INIT_TX_FAULT,
SFP_S_WAIT_LOS,
SFP_S_LINK_UP,
@@ -122,8 +124,10 @@ static const char *event_to_str(unsigned short event)
static const char * const sm_state_strings[] = {
[SFP_S_DOWN] = "down",
+ [SFP_S_FAIL] = "fail",
[SFP_S_WAIT] = "wait",
[SFP_S_INIT] = "init",
+ [SFP_S_INIT_PHY] = "init_phy",
[SFP_S_INIT_TX_FAULT] = "init_tx_fault",
[SFP_S_WAIT_LOS] = "wait_los",
[SFP_S_LINK_UP] = "link_up",
@@ -155,10 +159,34 @@ static const enum gpiod_flags gpio_flags[] = {
GPIOD_ASIS,
};
-#define T_WAIT msecs_to_jiffies(50)
-#define T_INIT_JIFFIES msecs_to_jiffies(300)
-#define T_RESET_US 10
-#define T_FAULT_RECOVER msecs_to_jiffies(1000)
+/* t_start_up (SFF-8431) or t_init (SFF-8472) is the time required for a
+ * non-cooled module to initialise its laser safety circuitry. We wait
+ * an initial T_WAIT period before we check the tx fault to give any PHY
+ * on board (for a copper SFP) time to initialise.
+ */
+#define T_WAIT msecs_to_jiffies(50)
+#define T_START_UP msecs_to_jiffies(300)
+#define T_START_UP_BAD_GPON msecs_to_jiffies(60000)
+
+/* t_reset is the time required to assert the TX_DISABLE signal to reset
+ * an indicated TX_FAULT.
+ */
+#define T_RESET_US 10
+#define T_FAULT_RECOVER msecs_to_jiffies(1000)
+
+/* N_FAULT_INIT is the number of recovery attempts at module initialisation
+ * time. If the TX_FAULT signal is not deasserted after this number of
+ * attempts at clearing it, we decide that the module is faulty.
+ * N_FAULT is the same but after the module has initialised.
+ */
+#define N_FAULT_INIT 5
+#define N_FAULT 5
+
+/* T_PHY_RETRY is the time interval between attempts to probe the PHY.
+ * R_PHY_RETRY is the number of attempts.
+ */
+#define T_PHY_RETRY msecs_to_jiffies(50)
+#define R_PHY_RETRY 12
/* SFP module presence detection is poor: the three MOD DEF signals are
* the same length on the PCB, which means it's possible for MOD DEF 0 to
@@ -214,10 +242,12 @@ struct sfp {
unsigned char sm_mod_tries;
unsigned char sm_dev_state;
unsigned short sm_state;
- unsigned int sm_retries;
+ unsigned char sm_fault_retries;
+ unsigned char sm_phy_retries;
struct sfp_eeprom_id id;
unsigned int module_power_mW;
+ unsigned int module_t_start_up;
#if IS_ENABLED(CONFIG_HWMON)
struct sfp_diag diag;
@@ -231,7 +261,7 @@ struct sfp {
static bool sff_module_supported(const struct sfp_eeprom_id *id)
{
- return id->base.phys_id == SFP_PHYS_ID_SFF &&
+ return id->base.phys_id == SFF8024_ID_SFF_8472 &&
id->base.phys_ext_id == SFP_PHYS_EXT_ID_SFP;
}
@@ -242,7 +272,7 @@ static const struct sff_data sff_data = {
static bool sfp_module_supported(const struct sfp_eeprom_id *id)
{
- return id->base.phys_id == SFP_PHYS_ID_SFP &&
+ return id->base.phys_id == SFF8024_ID_SFP &&
id->base.phys_ext_id == SFP_PHYS_EXT_ID_SFP;
}
@@ -412,13 +442,20 @@ static unsigned int sfp_soft_get_state(struct sfp *sfp)
{
unsigned int state = 0;
u8 status;
+ int ret;
- if (sfp_read(sfp, true, SFP_STATUS, &status, sizeof(status)) ==
- sizeof(status)) {
+ ret = sfp_read(sfp, true, SFP_STATUS, &status, sizeof(status));
+ if (ret == sizeof(status)) {
if (status & SFP_STATUS_RX_LOS)
state |= SFP_F_LOS;
if (status & SFP_STATUS_TX_FAULT)
state |= SFP_F_TX_FAULT;
+ } else {
+ dev_err_ratelimited(sfp->dev,
+ "failed to read SFP soft status: %d\n",
+ ret);
+ /* Preserve the current state */
+ state = sfp->state;
}
return state & sfp->state_soft_mask;
@@ -1383,26 +1420,30 @@ static void sfp_sm_mod_next(struct sfp *sfp, unsigned int state,
static void sfp_sm_phy_detach(struct sfp *sfp)
{
- phy_stop(sfp->mod_phy);
sfp_remove_phy(sfp->sfp_bus);
phy_device_remove(sfp->mod_phy);
phy_device_free(sfp->mod_phy);
sfp->mod_phy = NULL;
}
-static void sfp_sm_probe_phy(struct sfp *sfp)
+static int sfp_sm_probe_phy(struct sfp *sfp, bool is_c45)
{
struct phy_device *phy;
int err;
- phy = mdiobus_scan(sfp->i2c_mii, SFP_PHY_ADDR);
- if (phy == ERR_PTR(-ENODEV)) {
- dev_info(sfp->dev, "no PHY detected\n");
- return;
- }
+ phy = get_phy_device(sfp->i2c_mii, SFP_PHY_ADDR, is_c45);
+ if (phy == ERR_PTR(-ENODEV))
+ return PTR_ERR(phy);
if (IS_ERR(phy)) {
dev_err(sfp->dev, "mdiobus scan returned %ld\n", PTR_ERR(phy));
- return;
+ return PTR_ERR(phy);
+ }
+
+ err = phy_device_register(phy);
+ if (err) {
+ phy_device_free(phy);
+ dev_err(sfp->dev, "phy_device_register failed: %d\n", err);
+ return err;
}
err = sfp_add_phy(sfp->sfp_bus, phy);
@@ -1410,11 +1451,12 @@ static void sfp_sm_probe_phy(struct sfp *sfp)
phy_device_remove(phy);
phy_device_free(phy);
dev_err(sfp->dev, "sfp_add_phy failed: %d\n", err);
- return;
+ return err;
}
sfp->mod_phy = phy;
- phy_start(phy);
+
+ return 0;
}
static void sfp_sm_link_up(struct sfp *sfp)
@@ -1464,7 +1506,7 @@ static bool sfp_los_event_inactive(struct sfp *sfp, unsigned int event)
static void sfp_sm_fault(struct sfp *sfp, unsigned int next_state, bool warn)
{
- if (sfp->sm_retries && !--sfp->sm_retries) {
+ if (sfp->sm_fault_retries && !--sfp->sm_fault_retries) {
dev_err(sfp->dev,
"module persistently indicates fault, disabling\n");
sfp_sm_next(sfp, SFP_S_TX_DISABLE, 0);
@@ -1476,21 +1518,35 @@ static void sfp_sm_fault(struct sfp *sfp, unsigned int next_state, bool warn)
}
}
-static void sfp_sm_probe_for_phy(struct sfp *sfp)
+/* Probe a SFP for a PHY device if the module supports copper - the PHY
+ * normally sits at I2C bus address 0x56, and may either be a clause 22
+ * or clause 45 PHY.
+ *
+ * Clause 22 copper SFP modules normally operate in Cisco SGMII mode with
+ * negotiation enabled, but some may be in 1000base-X - which is for the
+ * PHY driver to determine.
+ *
+ * Clause 45 copper SFP+ modules (10G) appear to switch their interface
+ * mode according to the negotiated line speed.
+ */
+static int sfp_sm_probe_for_phy(struct sfp *sfp)
{
- /* Setting the serdes link mode is guesswork: there's no
- * field in the EEPROM which indicates what mode should
- * be used.
- *
- * If it's a gigabit-only fiber module, it probably does
- * not have a PHY, so switch to 802.3z negotiation mode.
- * Otherwise, switch to SGMII mode (which is required to
- * support non-gigabit speeds) and probe for a PHY.
- */
- if (sfp->id.base.e1000_base_t ||
- sfp->id.base.e100_base_lx ||
- sfp->id.base.e100_base_fx)
- sfp_sm_probe_phy(sfp);
+ int err = 0;
+
+ switch (sfp->id.base.extended_cc) {
+ case SFF8024_ECC_10GBASE_T_SFI:
+ case SFF8024_ECC_10GBASE_T_SR:
+ case SFF8024_ECC_5GBASE_T:
+ case SFF8024_ECC_2_5GBASE_T:
+ err = sfp_sm_probe_phy(sfp, true);
+ break;
+
+ default:
+ if (sfp->id.base.e1000_base_t)
+ err = sfp_sm_probe_phy(sfp, false);
+ break;
+ }
+ return err;
}
static int sfp_module_parse_power(struct sfp *sfp)
@@ -1550,6 +1606,13 @@ static int sfp_sm_mod_hpower(struct sfp *sfp, bool enable)
return -EAGAIN;
}
+ /* DM7052 reports as a high power module, responds to reads (with
+ * all bytes 0xff) at 0x51 but does not accept writes. In any case,
+ * if the bit is already set, we're already in high power mode.
+ */
+ if (!!(val & BIT(0)) == enable)
+ return 0;
+
if (enable)
val |= BIT(0);
else
@@ -1655,6 +1718,12 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
if (ret < 0)
return ret;
+ if (!memcmp(id.base.vendor_name, "ALCATELLUCENT ", 16) &&
+ !memcmp(id.base.vendor_pn, "3FE46541AA ", 16))
+ sfp->module_t_start_up = T_START_UP_BAD_GPON;
+ else
+ sfp->module_t_start_up = T_START_UP;
+
return 0;
}
@@ -1812,6 +1881,7 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event)
static void sfp_sm_main(struct sfp *sfp, unsigned int event)
{
unsigned long timeout;
+ int ret;
/* Some events are global */
if (sfp->sm_state != SFP_S_DOWN &&
@@ -1820,6 +1890,8 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event)
if (sfp->sm_state == SFP_S_LINK_UP &&
sfp->sm_dev_state == SFP_DEV_UP)
sfp_sm_link_down(sfp);
+ if (sfp->sm_state > SFP_S_INIT)
+ sfp_module_stop(sfp->sfp_bus);
if (sfp->mod_phy)
sfp_sm_phy_detach(sfp);
sfp_module_tx_disable(sfp);
@@ -1841,7 +1913,7 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event)
sfp_module_tx_enable(sfp);
/* Initialise the fault clearance retries */
- sfp->sm_retries = 5;
+ sfp->sm_fault_retries = N_FAULT_INIT;
/* We need to check the TX_FAULT state, which is not defined
* while TX_DISABLE is asserted. The earliest we want to do
@@ -1855,11 +1927,12 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event)
break;
if (sfp->state & SFP_F_TX_FAULT) {
- /* Wait t_init before indicating that the link is up,
- * provided the current state indicates no TX_FAULT. If
- * TX_FAULT clears before this time, that's fine too.
+ /* Wait up to t_init (SFF-8472) or t_start_up (SFF-8431)
+ * from the TX_DISABLE deassertion for the module to
+ * initialise, which is indicated by TX_FAULT
+ * deasserting.
*/
- timeout = T_INIT_JIFFIES;
+ timeout = sfp->module_t_start_up;
if (timeout > T_WAIT)
timeout -= T_WAIT;
else
@@ -1876,27 +1949,51 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event)
case SFP_S_INIT:
if (event == SFP_E_TIMEOUT && sfp->state & SFP_F_TX_FAULT) {
- /* TX_FAULT is still asserted after t_init, so assume
- * there is a fault.
+ /* TX_FAULT is still asserted after t_init or
+ * or t_start_up, so assume there is a fault.
*/
sfp_sm_fault(sfp, SFP_S_INIT_TX_FAULT,
- sfp->sm_retries == 5);
+ sfp->sm_fault_retries == N_FAULT_INIT);
} else if (event == SFP_E_TIMEOUT || event == SFP_E_TX_CLEAR) {
- init_done: /* TX_FAULT deasserted or we timed out with TX_FAULT
- * clear. Probe for the PHY and check the LOS state.
- */
- sfp_sm_probe_for_phy(sfp);
- sfp_sm_link_check_los(sfp);
+ init_done:
+ sfp->sm_phy_retries = R_PHY_RETRY;
+ goto phy_probe;
+ }
+ break;
- /* Reset the fault retry count */
- sfp->sm_retries = 5;
+ case SFP_S_INIT_PHY:
+ if (event != SFP_E_TIMEOUT)
+ break;
+ phy_probe:
+ /* TX_FAULT deasserted or we timed out with TX_FAULT
+ * clear. Probe for the PHY and check the LOS state.
+ */
+ ret = sfp_sm_probe_for_phy(sfp);
+ if (ret == -ENODEV) {
+ if (--sfp->sm_phy_retries) {
+ sfp_sm_next(sfp, SFP_S_INIT_PHY, T_PHY_RETRY);
+ break;
+ } else {
+ dev_info(sfp->dev, "no PHY detected\n");
+ }
+ } else if (ret) {
+ sfp_sm_next(sfp, SFP_S_FAIL, 0);
+ break;
}
+ if (sfp_module_start(sfp->sfp_bus)) {
+ sfp_sm_next(sfp, SFP_S_FAIL, 0);
+ break;
+ }
+ sfp_sm_link_check_los(sfp);
+
+ /* Reset the fault retry count */
+ sfp->sm_fault_retries = N_FAULT;
break;
case SFP_S_INIT_TX_FAULT:
if (event == SFP_E_TIMEOUT) {
sfp_module_tx_fault_reset(sfp);
- sfp_sm_next(sfp, SFP_S_INIT, T_INIT_JIFFIES);
+ sfp_sm_next(sfp, SFP_S_INIT, sfp->module_t_start_up);
}
break;
@@ -1920,7 +2017,7 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event)
case SFP_S_TX_FAULT:
if (event == SFP_E_TIMEOUT) {
sfp_module_tx_fault_reset(sfp);
- sfp_sm_next(sfp, SFP_S_REINIT, T_INIT_JIFFIES);
+ sfp_sm_next(sfp, SFP_S_REINIT, sfp->module_t_start_up);
}
break;
diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h
index 64f54b0bbd8c..b83f70526270 100644
--- a/drivers/net/phy/sfp.h
+++ b/drivers/net/phy/sfp.h
@@ -22,6 +22,8 @@ void sfp_link_up(struct sfp_bus *bus);
void sfp_link_down(struct sfp_bus *bus);
int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id);
void sfp_module_remove(struct sfp_bus *bus);
+int sfp_module_start(struct sfp_bus *bus);
+void sfp_module_stop(struct sfp_bus *bus);
int sfp_link_configure(struct sfp_bus *bus, const struct sfp_eeprom_id *id);
struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp,
const struct sfp_socket_ops *ops);
diff --git a/drivers/net/phy/uPD60620.c b/drivers/net/phy/uPD60620.c
index a32b3fd8a370..38834347a427 100644
--- a/drivers/net/phy/uPD60620.c
+++ b/drivers/net/phy/uPD60620.c
@@ -68,12 +68,7 @@ static int upd60620_read_status(struct phy_device *phydev)
mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising,
phy_state);
- if (phydev->duplex == DUPLEX_FULL) {
- if (phy_state & LPA_PAUSE_CAP)
- phydev->pause = 1;
- if (phy_state & LPA_PAUSE_ASYM)
- phydev->asym_pause = 1;
- }
+ phy_resolve_aneg_pause(phydev);
}
}
return 0;
diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
index a7b9cf3269bf..29a0917a81e6 100644
--- a/drivers/net/ppp/ppp_async.c
+++ b/drivers/net/ppp/ppp_async.c
@@ -874,15 +874,15 @@ ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2);
if (!skb)
goto nomem;
- ap->rpkt = skb;
- }
- if (skb->len == 0) {
- /* Try to get the payload 4-byte aligned.
- * This should match the
- * PPP_ALLSTATIONS/PPP_UI/compressed tests in
- * process_input_packet, but we do not have
- * enough chars here to test buf[1] and buf[2].
- */
+ ap->rpkt = skb;
+ }
+ if (skb->len == 0) {
+ /* Try to get the payload 4-byte aligned.
+ * This should match the
+ * PPP_ALLSTATIONS/PPP_UI/compressed tests in
+ * process_input_packet, but we do not have
+ * enough chars here to test buf[1] and buf[2].
+ */
if (buf[0] != PPP_ALLSTATIONS)
skb_reserve(skb, 2 + (buf[0] & 1));
}
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 3bf8a8b42983..22cc2cb9d878 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -296,8 +296,6 @@ static struct class *ppp_class;
/* per net-namespace data */
static inline struct ppp_net *ppp_pernet(struct net *net)
{
- BUG_ON(!net);
-
return net_generic(net, ppp_net_id);
}
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index e1fabb3e3246..acccb747aeda 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -155,7 +155,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
opt->dst_addr.sin_addr.s_addr,
opt->src_addr.sin_addr.s_addr,
0, 0, IPPROTO_GRE,
- RT_TOS(0), 0);
+ RT_TOS(0), sk->sk_bound_dev_if);
if (IS_ERR(rt))
goto tx_error;
@@ -444,7 +444,8 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
opt->dst_addr.sin_addr.s_addr,
opt->src_addr.sin_addr.s_addr,
0, 0,
- IPPROTO_GRE, RT_CONN_FLAGS(sk), 0);
+ IPPROTO_GRE, RT_CONN_FLAGS(sk),
+ sk->sk_bound_dev_if);
if (IS_ERR(rt)) {
error = -EHOSTUNREACH;
goto end;
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index 2a91c192659f..6f4d7ba8b109 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -452,12 +452,19 @@ static void slip_transmit(struct work_struct *work)
*/
static void slip_write_wakeup(struct tty_struct *tty)
{
- struct slip *sl = tty->disc_data;
+ struct slip *sl;
+
+ rcu_read_lock();
+ sl = rcu_dereference(tty->disc_data);
+ if (!sl)
+ goto out;
schedule_work(&sl->tx_work);
+out:
+ rcu_read_unlock();
}
-static void sl_tx_timeout(struct net_device *dev)
+static void sl_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct slip *sl = netdev_priv(dev);
@@ -882,10 +889,11 @@ static void slip_close(struct tty_struct *tty)
return;
spin_lock_bh(&sl->lock);
- tty->disc_data = NULL;
+ rcu_assign_pointer(tty->disc_data, NULL);
sl->tty = NULL;
spin_unlock_bh(&sl->lock);
+ synchronize_rcu();
flush_work(&sl->tx_work);
/* VSV = very important to remove timers */
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index a6d63665ad03..1f4bdd94407a 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -341,6 +341,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
features |= tap->tap_features;
if (netif_needs_gso(skb, features)) {
struct sk_buff *segs = __skb_gso_segment(skb, features, false);
+ struct sk_buff *next;
if (IS_ERR(segs))
goto drop;
@@ -352,16 +353,13 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
}
consume_skb(skb);
- while (segs) {
- struct sk_buff *nskb = segs->next;
-
- segs->next = NULL;
- if (ptr_ring_produce(&q->ring, segs)) {
- kfree_skb(segs);
- kfree_skb_list(nskb);
+ skb_list_walk_safe(segs, skb, next) {
+ skb_mark_not_on_list(skb);
+ if (ptr_ring_produce(&q->ring, skb)) {
+ kfree_skb(skb);
+ kfree_skb_list(next);
break;
}
- segs = nskb;
}
} else {
/* If we receive a partial checksum and the tap side
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 683d371e6e82..650c937ed56b 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1718,7 +1718,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
if (err < 0)
goto err_xdp;
if (err == XDP_REDIRECT)
- xdp_do_flush_map();
+ xdp_do_flush();
if (err != XDP_PASS)
goto out;
@@ -1936,6 +1936,10 @@ drop:
if (ret != XDP_PASS) {
rcu_read_unlock();
local_bh_enable();
+ if (frags) {
+ tfile->napi.skb = NULL;
+ mutex_unlock(&tfile->napi_mutex);
+ }
return total_len;
}
}
@@ -2549,7 +2553,7 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
}
if (flush)
- xdp_do_flush_map();
+ xdp_do_flush();
rcu_read_unlock();
local_bh_enable();
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index af3994e0853b..4e514f5d7c6c 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -39,17 +39,6 @@ static int asix_mdio_bus_write(struct mii_bus *bus, int phy_id, int regnum,
return 0;
}
-static int ax88172a_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
-{
- if (!netif_running(net))
- return -EINVAL;
-
- if (!net->phydev)
- return -ENODEV;
-
- return phy_mii_ioctl(net->phydev, rq, cmd);
-}
-
/* set MAC link settings according to information from phylib */
static void ax88172a_adjust_link(struct net_device *netdev)
{
@@ -134,7 +123,7 @@ static const struct net_device_ops ax88172a_netdev_ops = {
.ndo_get_stats64 = usbnet_get_stats64,
.ndo_set_mac_address = asix_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = ax88172a_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_set_rx_mode = asix_set_multicast,
};
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 1e58702c737f..d387bc7ac1b6 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -447,7 +447,7 @@ static netdev_tx_t catc_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-static void catc_tx_timeout(struct net_device *netdev)
+static void catc_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct catc *catc = netdev_priv(netdev);
diff --git a/drivers/net/usb/ch9200.c b/drivers/net/usb/ch9200.c
index 9df3c1ffff35..d7f3b70d5477 100644
--- a/drivers/net/usb/ch9200.c
+++ b/drivers/net/usb/ch9200.c
@@ -111,8 +111,8 @@ static int control_read(struct usbnet *dev,
request_type = (USB_DIR_IN | USB_TYPE_VENDOR |
USB_RECIP_DEVICE);
- netdev_dbg(dev->net, "Control_read() index=0x%02x size=%d\n",
- index, size);
+ netdev_dbg(dev->net, "%s() index=0x%02x size=%d\n",
+ __func__, index, size);
buf = kmalloc(size, GFP_KERNEL);
if (!buf) {
@@ -130,8 +130,6 @@ static int control_read(struct usbnet *dev,
err = -EINVAL;
kfree(buf);
- return err;
-
err_out:
return err;
}
@@ -151,8 +149,8 @@ static int control_write(struct usbnet *dev, unsigned char request,
request_type = (USB_DIR_OUT | USB_TYPE_VENDOR |
USB_RECIP_DEVICE);
- netdev_dbg(dev->net, "Control_write() index=0x%02x size=%d\n",
- index, size);
+ netdev_dbg(dev->net, "%s() index=0x%02x size=%d\n",
+ __func__, index, size);
if (data) {
buf = kmemdup(data, size, GFP_KERNEL);
@@ -181,8 +179,8 @@ static int ch9200_mdio_read(struct net_device *netdev, int phy_id, int loc)
struct usbnet *dev = netdev_priv(netdev);
unsigned char buff[2];
- netdev_dbg(netdev, "ch9200_mdio_read phy_id:%02x loc:%02x\n",
- phy_id, loc);
+ netdev_dbg(netdev, "%s phy_id:%02x loc:%02x\n",
+ __func__, phy_id, loc);
if (phy_id != 0)
return -ENODEV;
@@ -199,8 +197,8 @@ static void ch9200_mdio_write(struct net_device *netdev,
struct usbnet *dev = netdev_priv(netdev);
unsigned char buff[2];
- netdev_dbg(netdev, "ch9200_mdio_write() phy_id=%02x loc:%02x\n",
- phy_id, loc);
+ netdev_dbg(netdev, "%s() phy_id=%02x loc:%02x\n",
+ __func__, phy_id, loc);
if (phy_id != 0)
return;
@@ -219,8 +217,8 @@ static int ch9200_link_reset(struct usbnet *dev)
mii_check_media(&dev->mii, 1, 1);
mii_ethtool_gset(&dev->mii, &ecmd);
- netdev_dbg(dev->net, "link_reset() speed:%d duplex:%d\n",
- ecmd.speed, ecmd.duplex);
+ netdev_dbg(dev->net, "%s() speed:%d duplex:%d\n",
+ __func__, ecmd.speed, ecmd.duplex);
return 0;
}
@@ -309,7 +307,7 @@ static int get_mac_address(struct usbnet *dev, unsigned char *data)
unsigned char mac_addr[0x06];
int rd_mac_len = 0;
- netdev_dbg(dev->net, "get_mac_address:\n\tusbnet VID:%0x PID:%0x\n",
+ netdev_dbg(dev->net, "%s:\n\tusbnet VID:%0x PID:%0x\n", __func__,
le16_to_cpu(dev->udev->descriptor.idVendor),
le16_to_cpu(dev->udev->descriptor.idProduct));
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index ca827802f291..417e42c9fd03 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -820,7 +820,7 @@ static const struct ethtool_ops ops = {
};
/* called when a packet did not ack after watchdogtimeout */
-static void hso_net_tx_timeout(struct net_device *net)
+static void hso_net_tx_timeout(struct net_device *net, unsigned int txqueue)
{
struct hso_net *odev = netdev_priv(net);
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 8c01fbf68a89..c792d65dd7b4 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -400,7 +400,7 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
return NETDEV_TX_OK;
}
-static void ipheth_tx_timeout(struct net_device *net)
+static void ipheth_tx_timeout(struct net_device *net, unsigned int txqueue)
{
struct ipheth_device *dev = netdev_priv(net);
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 8e210ba4a313..ed01dc964c99 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -894,7 +894,7 @@ static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth)
/****************************************************************
* kaweth_tx_timeout
****************************************************************/
-static void kaweth_tx_timeout(struct net_device *net)
+static void kaweth_tx_timeout(struct net_device *net, unsigned int txqueue)
{
struct kaweth_device *kaweth = netdev_priv(net);
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index cf1f3f0a4b9b..eccbf4cd7149 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -20,6 +20,7 @@
#include <linux/mdio.h>
#include <linux/phy.h>
#include <net/ip6_checksum.h>
+#include <net/vxlan.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irq.h>
@@ -511,7 +512,7 @@ static int lan78xx_read_stats(struct lan78xx_net *dev,
}
} else {
netdev_warn(dev->net,
- "Failed to read stat ret = 0x%x", ret);
+ "Failed to read stat ret = %d", ret);
}
kfree(stats);
@@ -1663,14 +1664,6 @@ static const struct ethtool_ops lan78xx_ethtool_ops = {
.get_regs = lan78xx_get_regs,
};
-static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
-{
- if (!netif_running(netdev))
- return -EINVAL;
-
- return phy_mii_ioctl(netdev->phydev, rq, cmd);
-}
-
static void lan78xx_init_mac_address(struct lan78xx_net *dev)
{
u32 addr_lo, addr_hi;
@@ -1808,6 +1801,7 @@ static int lan78xx_mdio_init(struct lan78xx_net *dev)
dev->mdiobus->read = lan78xx_mdiobus_read;
dev->mdiobus->write = lan78xx_mdiobus_write;
dev->mdiobus->name = "lan78xx-mdiobus";
+ dev->mdiobus->parent = &dev->udev->dev;
snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
dev->udev->bus->busnum, dev->udev->devnum);
@@ -2723,11 +2717,6 @@ static int lan78xx_stop(struct net_device *net)
return 0;
}
-static int lan78xx_linearize(struct sk_buff *skb)
-{
- return skb_linearize(skb);
-}
-
static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
struct sk_buff *skb, gfp_t flags)
{
@@ -2739,8 +2728,10 @@ static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
return NULL;
}
- if (lan78xx_linearize(skb) < 0)
+ if (skb_linearize(skb)) {
+ dev_kfree_skb_any(skb);
return NULL;
+ }
tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
@@ -3662,7 +3653,7 @@ static void lan78xx_disconnect(struct usb_interface *intf)
usb_put_dev(udev);
}
-static void lan78xx_tx_timeout(struct net_device *net)
+static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
{
struct lan78xx_net *dev = netdev_priv(net);
@@ -3670,6 +3661,19 @@ static void lan78xx_tx_timeout(struct net_device *net)
tasklet_schedule(&dev->bh);
}
+static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
+ struct net_device *netdev,
+ netdev_features_t features)
+{
+ if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
+ features &= ~NETIF_F_GSO_MASK;
+
+ features = vlan_features_check(skb, features);
+ features = vxlan_features_check(skb, features);
+
+ return features;
+}
+
static const struct net_device_ops lan78xx_netdev_ops = {
.ndo_open = lan78xx_open,
.ndo_stop = lan78xx_stop,
@@ -3678,11 +3682,12 @@ static const struct net_device_ops lan78xx_netdev_ops = {
.ndo_change_mtu = lan78xx_change_mtu,
.ndo_set_mac_address = lan78xx_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = lan78xx_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_set_rx_mode = lan78xx_set_multicast,
.ndo_set_features = lan78xx_set_features,
.ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
+ .ndo_features_check = lan78xx_features_check,
};
static void lan78xx_stat_monitor(struct timer_list *t)
@@ -3752,6 +3757,7 @@ static int lan78xx_probe(struct usb_interface *intf,
/* MTU range: 68 - 9000 */
netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
+ netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index f7d117d80cfb..8783e2ab3ec0 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -693,7 +693,7 @@ static void intr_callback(struct urb *urb)
"can't resubmit interrupt urb, %d\n", res);
}
-static void pegasus_tx_timeout(struct net_device *net)
+static void pegasus_tx_timeout(struct net_device *net, unsigned int txqueue)
{
pegasus_t *pegasus = netdev_priv(net);
netif_warn(pegasus, timer, net, "tx timeout\n");
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 4196c0e32740..3b7a3b8a5e06 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -61,7 +61,6 @@ enum qmi_wwan_flags {
enum qmi_wwan_quirks {
QMI_WWAN_QUIRK_DTR = 1 << 0, /* needs "set DTR" request */
- QMI_WWAN_QUIRK_QUECTEL_DYNCFG = 1 << 1, /* check num. endpoints */
};
struct qmimux_hdr {
@@ -916,16 +915,6 @@ static const struct driver_info qmi_wwan_info_quirk_dtr = {
.data = QMI_WWAN_QUIRK_DTR,
};
-static const struct driver_info qmi_wwan_info_quirk_quectel_dyncfg = {
- .description = "WWAN/QMI device",
- .flags = FLAG_WWAN | FLAG_SEND_ZLP,
- .bind = qmi_wwan_bind,
- .unbind = qmi_wwan_unbind,
- .manage_power = qmi_wwan_manage_power,
- .rx_fixup = qmi_wwan_rx_fixup,
- .data = QMI_WWAN_QUIRK_DTR | QMI_WWAN_QUIRK_QUECTEL_DYNCFG,
-};
-
#define HUAWEI_VENDOR_ID 0x12D1
/* map QMI/wwan function by a fixed interface number */
@@ -946,14 +935,18 @@ static const struct driver_info qmi_wwan_info_quirk_quectel_dyncfg = {
#define QMI_GOBI_DEVICE(vend, prod) \
QMI_FIXED_INTF(vend, prod, 0)
-/* Quectel does not use fixed interface numbers on at least some of their
- * devices. We need to check the number of endpoints to ensure that we bind to
- * the correct interface.
+/* Many devices have QMI and DIAG functions which are distinguishable
+ * from other vendor specific functions by class, subclass and
+ * protocol all being 0xff. The DIAG function has exactly 2 endpoints
+ * and is silently rejected when probed.
+ *
+ * This makes it possible to match dynamically numbered QMI functions
+ * as seen on e.g. many Quectel modems.
*/
-#define QMI_QUIRK_QUECTEL_DYNCFG(vend, prod) \
+#define QMI_MATCH_FF_FF_FF(vend, prod) \
USB_DEVICE_AND_INTERFACE_INFO(vend, prod, USB_CLASS_VENDOR_SPEC, \
USB_SUBCLASS_VENDOR_SPEC, 0xff), \
- .driver_info = (unsigned long)&qmi_wwan_info_quirk_quectel_dyncfg
+ .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr
static const struct usb_device_id products[] = {
/* 1. CDC ECM like devices match on the control interface */
@@ -1059,9 +1052,10 @@ static const struct usb_device_id products[] = {
USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
.driver_info = (unsigned long)&qmi_wwan_info,
},
- {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
- {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
- {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
+ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
+ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
+ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
+ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
/* 3. Combined interface devices matching on interface number */
{QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
@@ -1362,6 +1356,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
+ {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */
{QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
{QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
@@ -1453,7 +1448,6 @@ static int qmi_wwan_probe(struct usb_interface *intf,
{
struct usb_device_id *id = (struct usb_device_id *)prod;
struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc;
- const struct driver_info *info;
/* Workaround to enable dynamic IDs. This disables usbnet
* blacklisting functionality. Which, if required, can be
@@ -1489,12 +1483,8 @@ static int qmi_wwan_probe(struct usb_interface *intf,
* different. Ignore the current interface if the number of endpoints
* equals the number for the diag interface (two).
*/
- info = (void *)id->driver_info;
-
- if (info->data & QMI_WWAN_QUIRK_QUECTEL_DYNCFG) {
- if (desc->bNumEndpoints == 2)
- return -ENODEV;
- }
+ if (desc->bNumEndpoints == 2)
+ return -ENODEV;
return usbnet_probe(intf, id);
}
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index c5ebf35d2488..78ddbaf6401b 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -31,7 +31,7 @@
#define NETNEXT_VERSION "11"
/* Information for net */
-#define NET_VERSION "10"
+#define NET_VERSION "11"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -68,6 +68,7 @@
#define PLA_LED_FEATURE 0xdd92
#define PLA_PHYAR 0xde00
#define PLA_BOOT_CTRL 0xe004
+#define PLA_LWAKE_CTRL_REG 0xe007
#define PLA_GPHY_INTR_IMR 0xe022
#define PLA_EEE_CR 0xe040
#define PLA_EEEP_CR 0xe080
@@ -95,6 +96,7 @@
#define PLA_TALLYCNT 0xe890
#define PLA_SFF_STS_7 0xe8de
#define PLA_PHYSTATUS 0xe908
+#define PLA_CONFIG6 0xe90a /* CONFIG6 */
#define PLA_BP_BA 0xfc26
#define PLA_BP_0 0xfc28
#define PLA_BP_1 0xfc2a
@@ -107,6 +109,7 @@
#define PLA_BP_EN 0xfc38
#define USB_USB2PHY 0xb41e
+#define USB_SSPHYLINK1 0xb426
#define USB_SSPHYLINK2 0xb428
#define USB_U2P3_CTRL 0xb460
#define USB_CSR_DUMMY1 0xb464
@@ -300,6 +303,9 @@
#define LINK_ON_WAKE_EN 0x0010
#define LINK_OFF_WAKE_EN 0x0008
+/* PLA_CONFIG6 */
+#define LANWAKE_CLR_EN BIT(0)
+
/* PLA_CONFIG5 */
#define BWF_EN 0x0040
#define MWF_EN 0x0020
@@ -312,6 +318,7 @@
/* PLA_PHY_PWR */
#define TX_10M_IDLE_EN 0x0080
#define PFM_PWM_SWITCH 0x0040
+#define TEST_IO_OFF BIT(4)
/* PLA_MAC_PWR_CTRL */
#define D3_CLK_GATED_EN 0x00004000
@@ -324,6 +331,7 @@
#define MAC_CLK_SPDWN_EN BIT(15)
/* PLA_MAC_PWR_CTRL3 */
+#define PLA_MCU_SPDWN_EN BIT(14)
#define PKT_AVAIL_SPDWN_EN 0x0100
#define SUSPEND_SPDWN_EN 0x0004
#define U1U2_SPDWN_EN 0x0002
@@ -354,6 +362,9 @@
/* PLA_BOOT_CTRL */
#define AUTOLOAD_DONE 0x0002
+/* PLA_LWAKE_CTRL_REG */
+#define LANWAKE_PIN BIT(7)
+
/* PLA_SUSPEND_FLAG */
#define LINK_CHG_EVENT BIT(0)
@@ -365,13 +376,18 @@
#define DEBUG_LTSSM 0x0082
/* PLA_EXTRA_STATUS */
+#define CUR_LINK_OK BIT(15)
#define U3P3_CHECK_EN BIT(7) /* RTL_VER_05 only */
#define LINK_CHANGE_FLAG BIT(8)
+#define POLL_LINK_CHG BIT(0)
/* USB_USB2PHY */
#define USB2PHY_SUSPEND 0x0001
#define USB2PHY_L1 0x0002
+/* USB_SSPHYLINK1 */
+#define DELAY_PHY_PWR_CHG BIT(1)
+
/* USB_SSPHYLINK2 */
#define pwd_dn_scale_mask 0x3ffe
#define pwd_dn_scale(x) ((x) << 1)
@@ -682,6 +698,9 @@ enum rtl8152_flags {
#define VENDOR_ID_NVIDIA 0x0955
#define VENDOR_ID_TPLINK 0x2357
+#define DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2 0x3082
+#define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2 0xa387
+
#define MCU_TYPE_PLA 0x0100
#define MCU_TYPE_USB 0x0000
@@ -1897,8 +1916,8 @@ static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb,
{
if (skb_shinfo(skb)->gso_size) {
netdev_features_t features = tp->netdev->features;
+ struct sk_buff *segs, *seg, *next;
struct sk_buff_head seg_list;
- struct sk_buff *segs, *nskb;
features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
segs = skb_gso_segment(skb, features);
@@ -1907,12 +1926,10 @@ static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb,
__skb_queue_head_init(&seg_list);
- do {
- nskb = segs;
- segs = segs->next;
- nskb->next = NULL;
- __skb_queue_tail(&seg_list, nskb);
- } while (segs);
+ skb_list_walk_safe(segs, seg, next) {
+ skb_mark_not_on_list(seg);
+ __skb_queue_tail(&seg_list, seg);
+ }
skb_queue_splice(&seg_list, list);
dev_kfree_skb(skb);
@@ -2507,7 +2524,7 @@ static void rtl_drop_queued_tx(struct r8152 *tp)
}
}
-static void rtl8152_tx_timeout(struct net_device *netdev)
+static void rtl8152_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct r8152 *tp = netdev_priv(netdev);
@@ -2863,6 +2880,17 @@ static int rtl8153_enable(struct r8152 *tp)
r8153_set_rx_early_timeout(tp);
r8153_set_rx_early_size(tp);
+ if (tp->version == RTL_VER_09) {
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK);
+ ocp_data &= ~FC_PATCH_TASK;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data);
+ usleep_range(1000, 2000);
+ ocp_data |= FC_PATCH_TASK;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data);
+ }
+
return rtl_enable(tp);
}
@@ -3376,8 +3404,8 @@ static void rtl8153b_runtime_enable(struct r8152 *tp, bool enable)
r8153b_ups_en(tp, false);
r8153_queue_wake(tp, false);
rtl_runtime_suspend_enable(tp, false);
- r8153_u2p3en(tp, true);
- r8153b_u1u2en(tp, true);
+ if (tp->udev->speed != USB_SPEED_HIGH)
+ r8153b_u1u2en(tp, true);
}
}
@@ -4675,7 +4703,6 @@ static void r8153b_hw_phy_cfg(struct r8152 *tp)
r8153_aldps_en(tp, true);
r8152b_enable_fc(tp);
- r8153_u2p3en(tp, true);
set_bit(PHY_RESET, &tp->flags);
}
@@ -4954,6 +4981,8 @@ static void rtl8152_down(struct r8152 *tp)
static void rtl8153_up(struct r8152 *tp)
{
+ u32 ocp_data;
+
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
@@ -4961,6 +4990,19 @@ static void rtl8153_up(struct r8152 *tp)
r8153_u2p3en(tp, false);
r8153_aldps_en(tp, false);
r8153_first_init(tp);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6);
+ ocp_data |= LANWAKE_CLR_EN;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG);
+ ocp_data &= ~LANWAKE_PIN;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_SSPHYLINK1);
+ ocp_data &= ~DELAY_PHY_PWR_CHG;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_SSPHYLINK1, ocp_data);
+
r8153_aldps_en(tp, true);
switch (tp->version) {
@@ -4979,11 +5021,17 @@ static void rtl8153_up(struct r8152 *tp)
static void rtl8153_down(struct r8152 *tp)
{
+ u32 ocp_data;
+
if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
rtl_drop_queued_tx(tp);
return;
}
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6);
+ ocp_data &= ~LANWAKE_CLR_EN;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data);
+
r8153_u1u2en(tp, false);
r8153_u2p3en(tp, false);
r8153_power_cut_en(tp, false);
@@ -4994,6 +5042,8 @@ static void rtl8153_down(struct r8152 *tp)
static void rtl8153b_up(struct r8152 *tp)
{
+ u32 ocp_data;
+
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
@@ -5004,18 +5054,29 @@ static void rtl8153b_up(struct r8152 *tp)
r8153_first_init(tp);
ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_B);
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3);
+ ocp_data &= ~PLA_MCU_SPDWN_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data);
+
r8153_aldps_en(tp, true);
- r8153_u2p3en(tp, true);
- r8153b_u1u2en(tp, true);
+
+ if (tp->udev->speed != USB_SPEED_HIGH)
+ r8153b_u1u2en(tp, true);
}
static void rtl8153b_down(struct r8152 *tp)
{
+ u32 ocp_data;
+
if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
rtl_drop_queued_tx(tp);
return;
}
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3);
+ ocp_data |= PLA_MCU_SPDWN_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data);
+
r8153b_u1u2en(tp, false);
r8153_u2p3en(tp, false);
r8153b_power_cut_en(tp, false);
@@ -5387,6 +5448,16 @@ static void r8153_init(struct r8152 *tp)
else
ocp_data |= DYNAMIC_BURST;
ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1, ocp_data);
+
+ r8153_queue_wake(tp, false);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS);
+ if (rtl8152_get_speed(tp) & LINK_STATUS)
+ ocp_data |= CUR_LINK_OK;
+ else
+ ocp_data &= ~CUR_LINK_OK;
+ ocp_data |= POLL_LINK_CHG;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data);
}
ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY2);
@@ -5416,10 +5487,19 @@ static void r8153_init(struct r8152 *tp)
ocp_write_word(tp, MCU_TYPE_USB, USB_CONNECT_TIMER, 0x0001);
r8153_power_cut_en(tp, false);
+ rtl_runtime_suspend_enable(tp, false);
r8153_u1u2en(tp, true);
r8153_mac_clk_spd(tp, false);
usb_enable_lpm(tp->udev);
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6);
+ ocp_data |= LANWAKE_CLR_EN;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG);
+ ocp_data &= ~LANWAKE_PIN;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG, ocp_data);
+
/* rx aggregation */
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
@@ -5484,7 +5564,17 @@ static void r8153b_init(struct r8152 *tp)
r8153b_ups_en(tp, false);
r8153_queue_wake(tp, false);
rtl_runtime_suspend_enable(tp, false);
- r8153b_u1u2en(tp, true);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS);
+ if (rtl8152_get_speed(tp) & LINK_STATUS)
+ ocp_data |= CUR_LINK_OK;
+ else
+ ocp_data &= ~CUR_LINK_OK;
+ ocp_data |= POLL_LINK_CHG;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data);
+
+ if (tp->udev->speed != USB_SPEED_HIGH)
+ r8153b_u1u2en(tp, true);
usb_enable_lpm(tp->udev);
/* MAC clock speed down */
@@ -5492,6 +5582,19 @@ static void r8153b_init(struct r8152 *tp)
ocp_data |= MAC_CLK_SPDWN_EN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data);
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3);
+ ocp_data &= ~PLA_MCU_SPDWN_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data);
+
+ if (tp->version == RTL_VER_09) {
+ /* Disable Test IO for 32QFN */
+ if (ocp_read_byte(tp, MCU_TYPE_PLA, 0xdc00) & BIT(5)) {
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR);
+ ocp_data |= TEST_IO_OFF;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data);
+ }
+ }
+
set_bit(GREEN_ETHERNET, &tp->flags);
/* rx aggregation */
@@ -6597,6 +6700,9 @@ static int rtl8152_probe(struct usb_interface *intf,
return -ENODEV;
}
+ if (intf->cur_altsetting->desc.bNumEndpoints < 3)
+ return -ENODEV;
+
usb_reset_device(udev);
netdev = alloc_etherdev(sizeof(struct r8152));
if (!netdev) {
@@ -6656,9 +6762,13 @@ static int rtl8152_probe(struct usb_interface *intf,
netdev->hw_features &= ~NETIF_F_RXCSUM;
}
- if (le16_to_cpu(udev->descriptor.idVendor) == VENDOR_ID_LENOVO &&
- le16_to_cpu(udev->descriptor.idProduct) == 0x3082)
- set_bit(LENOVO_MACPASSTHRU, &tp->flags);
+ if (le16_to_cpu(udev->descriptor.idVendor) == VENDOR_ID_LENOVO) {
+ switch (le16_to_cpu(udev->descriptor.idProduct)) {
+ case DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2:
+ case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2:
+ set_bit(LENOVO_MACPASSTHRU, &tp->flags);
+ }
+ }
if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial &&
(!strcmp(udev->serial, "000001000000") ||
@@ -6704,6 +6814,11 @@ static int rtl8152_probe(struct usb_interface *intf,
intf->needs_remote_wakeup = 1;
+ if (!rtl_can_wakeup(tp))
+ __rtl_set_wol(tp, 0);
+ else
+ tp->saved_wolopts = __rtl_get_wol(tp);
+
tp->rtl_ops.init(tp);
#if IS_BUILTIN(CONFIG_USB_RTL8152)
/* Retry in case request_firmware() is not ready yet. */
@@ -6721,10 +6836,6 @@ static int rtl8152_probe(struct usb_interface *intf,
goto out1;
}
- if (!rtl_can_wakeup(tp))
- __rtl_set_wol(tp, 0);
-
- tp->saved_wolopts = __rtl_get_wol(tp);
if (tp->saved_wolopts)
device_set_wakeup_enable(&udev->dev, true);
else
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 13e51ccf0214..e7c630d37589 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -655,7 +655,7 @@ static void disable_net_traffic(rtl8150_t * dev)
set_registers(dev, CR, 1, &cr);
}
-static void rtl8150_tx_timeout(struct net_device *netdev)
+static void rtl8150_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
rtl8150_t *dev = netdev_priv(netdev);
dev_warn(&netdev->dev, "Tx timeout.\n");
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 9ce6d30576dd..5ec97def3513 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1293,7 +1293,7 @@ static void tx_complete (struct urb *urb)
/*-------------------------------------------------------------------------*/
-void usbnet_tx_timeout (struct net_device *net)
+void usbnet_tx_timeout (struct net_device *net, unsigned int txqueue)
{
struct usbnet *dev = netdev_priv(net);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index a552df37a347..8cdc4415fa70 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -377,6 +377,7 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
unsigned int max_len;
struct veth_rq *rq;
+ rcu_read_lock();
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
ret = -EINVAL;
goto drop;
@@ -418,11 +419,14 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
if (flags & XDP_XMIT_FLUSH)
__veth_xdp_flush(rq);
- if (likely(!drops))
+ if (likely(!drops)) {
+ rcu_read_unlock();
return n;
+ }
ret = n - drops;
drop:
+ rcu_read_unlock();
atomic64_add(drops, &priv->dropped);
return ret;
@@ -769,7 +773,7 @@ static int veth_poll(struct napi_struct *napi, int budget)
if (xdp_xmit & VETH_XDP_TX)
veth_xdp_flush(rq->dev, &bq);
if (xdp_xmit & VETH_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
xdp_clear_return_frame_no_direct();
return done;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4d7d5434cc5d..2fe7a3188282 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -501,7 +501,7 @@ static int virtnet_xdp_xmit(struct net_device *dev,
/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
* indicate XDP resources have been successfully allocated.
*/
- xdp_prog = rcu_dereference(rq->xdp_prog);
+ xdp_prog = rcu_access_pointer(rq->xdp_prog);
if (!xdp_prog)
return -ENXIO;
@@ -1432,7 +1432,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
virtqueue_napi_complete(napi, rq->vq, received);
if (xdp_xmit & VIRTIO_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
if (xdp_xmit & VIRTIO_XDP_TX) {
sq = virtnet_xdp_sq(vi);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 216acf37ca7c..18f152fa0068 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -3198,7 +3198,7 @@ vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
static void
-vmxnet3_tx_timeout(struct net_device *netdev)
+vmxnet3_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
adapter->tx_timeout_count++;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 0a38c76688ab..1e4b9ba70983 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -555,10 +555,8 @@ vmxnet3_set_ringparam(struct net_device *netdev,
}
if (VMXNET3_VERSION_GE_3(adapter)) {
- if (param->rx_mini_pending < 0 ||
- param->rx_mini_pending > VMXNET3_RXDATA_DESC_MAX_SIZE) {
+ if (param->rx_mini_pending > VMXNET3_RXDATA_DESC_MAX_SIZE)
return -EINVAL;
- }
} else if (param->rx_mini_pending != 0) {
return -EINVAL;
}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 3ec6b506033d..d3b08b76b1ec 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1060,6 +1060,7 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
return err;
} else {
union vxlan_addr *remote = &vxlan->default_dst.remote_ip;
+
if (remote->sa.sa_family == AF_INET) {
ip->sin.sin_addr.s_addr = htonl(INADDR_ANY);
ip->sa.sa_family = AF_INET;
@@ -1696,7 +1697,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
if (__iptunnel_pull_header(skb, VXLAN_HLEN, protocol, raw_proto,
!net_eq(vxlan->net, dev_net(vxlan->dev))))
- goto drop;
+ goto drop;
if (vxlan_collect_metadata(vs)) {
struct metadata_dst *tun_dst;
@@ -2541,7 +2542,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
ndst = &rt->dst;
skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM);
- tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
+ tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb);
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
vni, md, flags, udp_sum);
@@ -2581,7 +2582,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM);
- tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
+ tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb);
ttl = ttl ? : ip6_dst_hoplimit(ndst);
skb_scrub_packet(skb, xnet);
err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),
@@ -4128,30 +4129,30 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_u8(skb, IFLA_VXLAN_DF, vxlan->cfg.df) ||
nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) ||
nla_put_u8(skb, IFLA_VXLAN_LEARNING,
- !!(vxlan->cfg.flags & VXLAN_F_LEARN)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_LEARN)) ||
nla_put_u8(skb, IFLA_VXLAN_PROXY,
- !!(vxlan->cfg.flags & VXLAN_F_PROXY)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_PROXY)) ||
nla_put_u8(skb, IFLA_VXLAN_RSC,
!!(vxlan->cfg.flags & VXLAN_F_RSC)) ||
nla_put_u8(skb, IFLA_VXLAN_L2MISS,
- !!(vxlan->cfg.flags & VXLAN_F_L2MISS)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_L2MISS)) ||
nla_put_u8(skb, IFLA_VXLAN_L3MISS,
- !!(vxlan->cfg.flags & VXLAN_F_L3MISS)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_L3MISS)) ||
nla_put_u8(skb, IFLA_VXLAN_COLLECT_METADATA,
!!(vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)) ||
nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) ||
nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) ||
nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) ||
nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
- !(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM_TX)) ||
+ !(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM_TX)) ||
nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
- !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
- !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) ||
nla_put_u8(skb, IFLA_VXLAN_REMCSUM_TX,
- !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_TX)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_TX)) ||
nla_put_u8(skb, IFLA_VXLAN_REMCSUM_RX,
- !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_RX)))
+ !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_RX)))
goto nla_put_failure;
if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index dd1a147f2971..4530840e15ef 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -315,7 +315,8 @@ config DSCC4_PCI_RST
config IXP4XX_HSS
tristate "Intel IXP4xx HSS (synchronous serial port) support"
- depends on HDLC && ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR
+ depends on HDLC && IXP4XX_NPE && IXP4XX_QMGR
+ depends on ARCH_IXP4XX
help
Say Y here if you want to use built-in HSS ports
on IXP4xx processor.
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index af539151d663..5d6532ad6b78 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -268,7 +268,7 @@ static int cosa_net_attach(struct net_device *dev, unsigned short encoding,
unsigned short parity);
static int cosa_net_open(struct net_device *d);
static int cosa_net_close(struct net_device *d);
-static void cosa_net_timeout(struct net_device *d);
+static void cosa_net_timeout(struct net_device *d, unsigned int txqueue);
static netdev_tx_t cosa_net_tx(struct sk_buff *skb, struct net_device *d);
static char *cosa_net_setup_rx(struct channel_data *channel, int size);
static int cosa_net_rx_done(struct channel_data *channel);
@@ -670,7 +670,7 @@ static netdev_tx_t cosa_net_tx(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-static void cosa_net_timeout(struct net_device *dev)
+static void cosa_net_timeout(struct net_device *dev, unsigned int txqueue)
{
struct channel_data *chan = dev_to_chan(dev);
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 1901ec7948d8..7916efce7188 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2239,7 +2239,7 @@ fst_attach(struct net_device *dev, unsigned short encoding, unsigned short parit
}
static void
-fst_tx_timeout(struct net_device *dev)
+fst_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct fst_port_info *port;
struct fst_card_info *card;
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index ca0f3be2b6bf..9edd94679283 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -73,7 +73,7 @@ static struct ucc_tdm_info utdm_primary_info = {
},
};
-static struct ucc_tdm_info utdm_info[MAX_HDLC_NUM];
+static struct ucc_tdm_info utdm_info[UCC_MAX_NUM];
static int uhdlc_init(struct ucc_hdlc_private *priv)
{
@@ -84,8 +84,8 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
int ret, i;
void *bd_buffer;
dma_addr_t bd_dma_addr;
- u32 riptr;
- u32 tiptr;
+ s32 riptr;
+ s32 tiptr;
u32 gumr;
ut_info = priv->ut_info;
@@ -195,7 +195,7 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param),
ALIGNMENT_OF_UCC_HDLC_PRAM);
- if (IS_ERR_VALUE(priv->ucc_pram_offset)) {
+ if (priv->ucc_pram_offset < 0) {
dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n");
ret = -ENOMEM;
goto free_tx_bd;
@@ -233,18 +233,23 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
/* Alloc riptr, tiptr */
riptr = qe_muram_alloc(32, 32);
- if (IS_ERR_VALUE(riptr)) {
+ if (riptr < 0) {
dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
ret = -ENOMEM;
goto free_tx_skbuff;
}
tiptr = qe_muram_alloc(32, 32);
- if (IS_ERR_VALUE(tiptr)) {
+ if (tiptr < 0) {
dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
ret = -ENOMEM;
goto free_riptr;
}
+ if (riptr != (u16)riptr || tiptr != (u16)tiptr) {
+ dev_err(priv->dev, "MURAM allocation out of addressable range\n");
+ ret = -ENOMEM;
+ goto free_tiptr;
+ }
/* Set RIPTR, TIPTR */
iowrite16be(riptr, &priv->ucc_pram->riptr);
@@ -623,8 +628,8 @@ static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
if (howmany < budget) {
napi_complete_done(napi, howmany);
- qe_setbits32(priv->uccf->p_uccm,
- (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
+ qe_setbits_be32(priv->uccf->p_uccm,
+ (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
}
return howmany;
@@ -635,11 +640,9 @@ static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id;
struct net_device *dev = priv->ndev;
struct ucc_fast_private *uccf;
- struct ucc_tdm_info *ut_info;
u32 ucce;
u32 uccm;
- ut_info = priv->ut_info;
uccf = priv->uccf;
ucce = ioread32be(uccf->p_ucce);
@@ -732,8 +735,8 @@ static int uhdlc_open(struct net_device *dev)
static void uhdlc_memclean(struct ucc_hdlc_private *priv)
{
- qe_muram_free(priv->ucc_pram->riptr);
- qe_muram_free(priv->ucc_pram->tiptr);
+ qe_muram_free(ioread16be(&priv->ucc_pram->riptr));
+ qe_muram_free(ioread16be(&priv->ucc_pram->tiptr));
if (priv->rx_bd_base) {
dma_free_coherent(priv->dev,
@@ -872,7 +875,6 @@ static void resume_clk_config(struct ucc_hdlc_private *priv)
static int uhdlc_suspend(struct device *dev)
{
struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
- struct ucc_tdm_info *ut_info;
struct ucc_fast __iomem *uf_regs;
if (!priv)
@@ -884,7 +886,6 @@ static int uhdlc_suspend(struct device *dev)
netif_device_detach(priv->ndev);
napi_disable(&priv->napi);
- ut_info = priv->ut_info;
uf_regs = priv->uf_regs;
/* backup gumr guemr*/
@@ -917,7 +918,7 @@ static int uhdlc_resume(struct device *dev)
struct ucc_fast __iomem *uf_regs;
struct ucc_fast_private *uccf;
struct ucc_fast_info *uf_info;
- int ret, i;
+ int i;
u32 cecr_subblock;
u16 bd_status;
@@ -962,16 +963,16 @@ static int uhdlc_resume(struct device *dev)
/* Write to QE CECR, UCCx channel to Stop Transmission */
cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
- ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
- (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
+ qe_issue_cmd(QE_STOP_TX, cecr_subblock,
+ (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
/* Set UPSMR normal mode */
iowrite32be(0, &uf_regs->upsmr);
/* init parameter base */
cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
- ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
- QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
+ qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
+ QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
qe_muram_addr(priv->ucc_pram_offset);
@@ -1039,7 +1040,7 @@ static const struct dev_pm_ops uhdlc_pm_ops = {
#define HDLC_PM_OPS NULL
#endif
-static void uhdlc_tx_timeout(struct net_device *ndev)
+static void uhdlc_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
netdev_err(ndev, "%s\n", __func__);
}
diff --git a/drivers/net/wan/fsl_ucc_hdlc.h b/drivers/net/wan/fsl_ucc_hdlc.h
index 8b3507ae1781..71d5ad0a7b98 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.h
+++ b/drivers/net/wan/fsl_ucc_hdlc.h
@@ -98,7 +98,7 @@ struct ucc_hdlc_private {
unsigned short tx_ring_size;
unsigned short rx_ring_size;
- u32 ucc_pram_offset;
+ s32 ucc_pram_offset;
unsigned short encoding;
unsigned short parity;
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index a030f5aa6b95..d8cba3625c18 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -75,7 +75,7 @@ static int cisco_hard_header(struct sk_buff *skb, struct net_device *dev,
{
struct hdlc_header *data;
#ifdef DEBUG_HARD_HEADER
- printk(KERN_DEBUG "%s: cisco_hard_header called\n", dev->name);
+ netdev_dbg(dev, "%s called\n", __func__);
#endif
skb_push(skb, sizeof(struct hdlc_header));
@@ -101,7 +101,7 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type,
skb = dev_alloc_skb(sizeof(struct hdlc_header) +
sizeof(struct cisco_packet));
if (!skb) {
- netdev_warn(dev, "Memory squeeze on cisco_keepalive_send()\n");
+ netdev_warn(dev, "Memory squeeze on %s()\n", __func__);
return;
}
skb_reserve(skb, 4);
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index 5643675ff724..c84536b03aa8 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -21,8 +21,17 @@
#include <linux/skbuff.h>
#include <net/x25device.h>
+struct x25_state {
+ x25_hdlc_proto settings;
+};
+
static int x25_ioctl(struct net_device *dev, struct ifreq *ifr);
+static struct x25_state *state(hdlc_device *hdlc)
+{
+ return hdlc->state;
+}
+
/* These functions are callbacks called by LAPB layer */
static void x25_connect_disconnect(struct net_device *dev, int reason, int code)
@@ -62,11 +71,12 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
{
unsigned char *ptr;
- skb_push(skb, 1);
-
if (skb_cow(skb, 1))
return NET_RX_DROP;
+ skb_push(skb, 1);
+ skb_reset_network_header(skb);
+
ptr = skb->data;
*ptr = X25_IFACE_DATA;
@@ -79,6 +89,13 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
static void x25_data_transmit(struct net_device *dev, struct sk_buff *skb)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
+
+ skb_reset_network_header(skb);
+ skb->protocol = hdlc_type_trans(skb, dev);
+
+ if (dev_nit_active(dev))
+ dev_queue_xmit_nit(skb, dev);
+
hdlc->xmit(skb, dev); /* Ignore return value :-( */
}
@@ -93,6 +110,7 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
switch (skb->data[0]) {
case X25_IFACE_DATA: /* Data to be transmitted */
skb_pull(skb, 1);
+ skb_reset_network_header(skb);
if ((result = lapb_data_request(dev, skb)) != LAPB_OK)
dev_kfree_skb(skb);
return NETDEV_TX_OK;
@@ -131,7 +149,6 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
static int x25_open(struct net_device *dev)
{
- int result;
static const struct lapb_register_struct cb = {
.connect_confirmation = x25_connected,
.connect_indication = x25_connected,
@@ -140,10 +157,33 @@ static int x25_open(struct net_device *dev)
.data_indication = x25_data_indication,
.data_transmit = x25_data_transmit,
};
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ struct lapb_parms_struct params;
+ int result;
result = lapb_register(dev, &cb);
if (result != LAPB_OK)
return result;
+
+ result = lapb_getparms(dev, &params);
+ if (result != LAPB_OK)
+ return result;
+
+ if (state(hdlc)->settings.dce)
+ params.mode = params.mode | LAPB_DCE;
+
+ if (state(hdlc)->settings.modulo == 128)
+ params.mode = params.mode | LAPB_EXTENDED;
+
+ params.window = state(hdlc)->settings.window;
+ params.t1 = state(hdlc)->settings.t1;
+ params.t2 = state(hdlc)->settings.t2;
+ params.n2 = state(hdlc)->settings.n2;
+
+ result = lapb_setparms(dev, &params);
+ if (result != LAPB_OK)
+ return result;
+
return 0;
}
@@ -186,7 +226,10 @@ static struct hdlc_proto proto = {
static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
{
+ x25_hdlc_proto __user *x25_s = ifr->ifr_settings.ifs_ifsu.x25;
+ const size_t size = sizeof(x25_hdlc_proto);
hdlc_device *hdlc = dev_to_hdlc(dev);
+ x25_hdlc_proto new_settings;
int result;
switch (ifr->ifr_settings.type) {
@@ -194,7 +237,13 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
if (dev_to_hdlc(dev)->proto != &proto)
return -EINVAL;
ifr->ifr_settings.type = IF_PROTO_X25;
- return 0; /* return protocol only, no settable parameters */
+ if (ifr->ifr_settings.size < size) {
+ ifr->ifr_settings.size = size; /* data size wanted */
+ return -ENOBUFS;
+ }
+ if (copy_to_user(x25_s, &state(hdlc)->settings, size))
+ return -EFAULT;
+ return 0;
case IF_PROTO_X25:
if (!capable(CAP_NET_ADMIN))
@@ -203,12 +252,46 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
if (dev->flags & IFF_UP)
return -EBUSY;
+ /* backward compatibility */
+ if (ifr->ifr_settings.size == 0) {
+ new_settings.dce = 0;
+ new_settings.modulo = 8;
+ new_settings.window = 7;
+ new_settings.t1 = 3;
+ new_settings.t2 = 1;
+ new_settings.n2 = 10;
+ }
+ else {
+ if (copy_from_user(&new_settings, x25_s, size))
+ return -EFAULT;
+
+ if ((new_settings.dce != 0 &&
+ new_settings.dce != 1) ||
+ (new_settings.modulo != 8 &&
+ new_settings.modulo != 128) ||
+ new_settings.window < 1 ||
+ (new_settings.modulo == 8 &&
+ new_settings.window > 7) ||
+ (new_settings.modulo == 128 &&
+ new_settings.window > 127) ||
+ new_settings.t1 < 1 ||
+ new_settings.t1 > 255 ||
+ new_settings.t2 < 1 ||
+ new_settings.t2 > 255 ||
+ new_settings.n2 < 1 ||
+ new_settings.n2 > 255)
+ return -EINVAL;
+ }
+
result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
if (result)
return result;
- if ((result = attach_hdlc_protocol(dev, &proto, 0)))
+ if ((result = attach_hdlc_protocol(dev, &proto,
+ sizeof(struct x25_state))))
return result;
+
+ memcpy(&state(hdlc)->settings, &new_settings, size);
dev->type = ARPHRD_X25;
call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
netif_dormant_off(dev);
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index ea6ee6a608ce..7c5cf77e9ef1 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -17,6 +17,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include <linux/platform_data/wan_ixp4xx_hss.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/soc/ixp4xx/npe.h>
@@ -258,7 +259,7 @@ struct port {
struct hss_plat_info *plat;
buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
struct desc *desc_tab; /* coherent */
- u32 desc_tab_phys;
+ dma_addr_t desc_tab_phys;
unsigned int id;
unsigned int clock_type, clock_rate, loopback;
unsigned int initialized, carrier;
@@ -858,7 +859,7 @@ static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
- memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
+ memcpy_swab32(mem, (u32 *)((uintptr_t)skb->data & ~3), bytes / 4);
dev_kfree_skb(skb);
#endif
@@ -1182,14 +1183,14 @@ static int hss_hdlc_attach(struct net_device *dev, unsigned short encoding,
}
}
-static u32 check_clock(u32 rate, u32 a, u32 b, u32 c,
+static u32 check_clock(u32 timer_freq, u32 rate, u32 a, u32 b, u32 c,
u32 *best, u32 *best_diff, u32 *reg)
{
/* a is 10-bit, b is 10-bit, c is 12-bit */
u64 new_rate;
u32 new_diff;
- new_rate = ixp4xx_timer_freq * (u64)(c + 1);
+ new_rate = timer_freq * (u64)(c + 1);
do_div(new_rate, a * (c + 1) + b + 1);
new_diff = abs((u32)new_rate - rate);
@@ -1201,40 +1202,43 @@ static u32 check_clock(u32 rate, u32 a, u32 b, u32 c,
return new_diff;
}
-static void find_best_clock(u32 rate, u32 *best, u32 *reg)
+static void find_best_clock(u32 timer_freq, u32 rate, u32 *best, u32 *reg)
{
u32 a, b, diff = 0xFFFFFFFF;
- a = ixp4xx_timer_freq / rate;
+ a = timer_freq / rate;
if (a > 0x3FF) { /* 10-bit value - we can go as slow as ca. 65 kb/s */
- check_clock(rate, 0x3FF, 1, 1, best, &diff, reg);
+ check_clock(timer_freq, rate, 0x3FF, 1, 1, best, &diff, reg);
return;
}
if (a == 0) { /* > 66.666 MHz */
a = 1; /* minimum divider is 1 (a = 0, b = 1, c = 1) */
- rate = ixp4xx_timer_freq;
+ rate = timer_freq;
}
- if (rate * a == ixp4xx_timer_freq) { /* don't divide by 0 later */
- check_clock(rate, a - 1, 1, 1, best, &diff, reg);
+ if (rate * a == timer_freq) { /* don't divide by 0 later */
+ check_clock(timer_freq, rate, a - 1, 1, 1, best, &diff, reg);
return;
}
for (b = 0; b < 0x400; b++) {
u64 c = (b + 1) * (u64)rate;
- do_div(c, ixp4xx_timer_freq - rate * a);
+ do_div(c, timer_freq - rate * a);
c--;
if (c >= 0xFFF) { /* 12-bit - no need to check more 'b's */
if (b == 0 && /* also try a bit higher rate */
- !check_clock(rate, a - 1, 1, 1, best, &diff, reg))
+ !check_clock(timer_freq, rate, a - 1, 1, 1, best,
+ &diff, reg))
return;
- check_clock(rate, a, b, 0xFFF, best, &diff, reg);
+ check_clock(timer_freq, rate, a, b, 0xFFF, best,
+ &diff, reg);
return;
}
- if (!check_clock(rate, a, b, c, best, &diff, reg))
+ if (!check_clock(timer_freq, rate, a, b, c, best, &diff, reg))
return;
- if (!check_clock(rate, a, b, c + 1, best, &diff, reg))
+ if (!check_clock(timer_freq, rate, a, b, c + 1, best, &diff,
+ reg))
return;
}
}
@@ -1285,8 +1289,9 @@ static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
port->clock_type = clk; /* Update settings */
if (clk == CLOCK_INT)
- find_best_clock(new_line.clock_rate, &port->clock_rate,
- &port->clock_reg);
+ find_best_clock(port->plat->timer_freq,
+ new_line.clock_rate,
+ &port->clock_rate, &port->clock_reg);
else {
port->clock_rate = 0;
port->clock_reg = CLK42X_SPEED_2048KHZ;
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 0f1217b506ad..e30d91a38cfb 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -64,7 +64,7 @@ static struct lapbethdev *lapbeth_get_x25_dev(struct net_device *dev)
{
struct lapbethdev *lapbeth;
- list_for_each_entry_rcu(lapbeth, &lapbeth_devices, node) {
+ list_for_each_entry_rcu(lapbeth, &lapbeth_devices, node, lockdep_rtnl_is_held()) {
if (lapbeth->ethdev == dev)
return lapbeth;
}
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 0e6a51525d91..a20f467ca48a 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -99,7 +99,7 @@ static int lmc_ifdown(struct net_device * const);
static void lmc_watchdog(struct timer_list *t);
static void lmc_reset(lmc_softc_t * const sc);
static void lmc_dec_reset(lmc_softc_t * const sc);
-static void lmc_driver_timeout(struct net_device *dev);
+static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue);
/*
* linux reserves 16 device specific IOCTLs. We call them
@@ -2044,7 +2044,7 @@ static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, /*fold00
lmc_trace(sc->lmc_device, "lmc_initcsrs out");
}
-static void lmc_driver_timeout(struct net_device *dev)
+static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue)
{
lmc_softc_t *sc = dev_to_sc(dev);
u32 csr6;
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index e2e679a01b65..77ccf3672ede 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -708,7 +708,7 @@ static netdev_tx_t sdla_transmit(struct sk_buff *skb,
spin_lock_irqsave(&sdla_lock, flags);
SDLA_WINDOW(dev, addr);
- pbuf = (void *)(((int) dev->mem_start) + (addr & SDLA_ADDR_MASK));
+ pbuf = (void *)(dev->mem_start + (addr & SDLA_ADDR_MASK));
__sdla_write(dev, pbuf->buf_addr, skb->data, skb->len);
SDLA_WINDOW(dev, addr);
pbuf->opp_flag = 1;
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index 34e94ee806d6..23f93f1c815d 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -635,7 +635,7 @@ static int wanxl_pci_init_one(struct pci_dev *pdev,
/* set up PLX mapping */
plx_phy = pci_resource_start(pdev, 0);
- card->plx = ioremap_nocache(plx_phy, 0x70);
+ card->plx = ioremap(plx_phy, 0x70);
if (!card->plx) {
pr_err("ioremap() failed\n");
wanxl_pci_remove_one(pdev);
@@ -704,7 +704,7 @@ static int wanxl_pci_init_one(struct pci_dev *pdev,
PCI_DMA_FROMDEVICE);
}
- mem = ioremap_nocache(mem_phy, PDM_OFFSET + sizeof(firmware));
+ mem = ioremap(mem_phy, PDM_OFFSET + sizeof(firmware));
if (!mem) {
pr_err("ioremap() failed\n");
wanxl_pci_remove_one(pdev);
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 914be5847386..69773d228ec1 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -276,7 +276,7 @@ static void x25_asy_write_wakeup(struct tty_struct *tty)
sl->xhead += actual;
}
-static void x25_asy_timeout(struct net_device *dev)
+static void x25_asy_timeout(struct net_device *dev, unsigned int txqueue)
{
struct x25_asy *sl = netdev_priv(dev);
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index a5db3c06b646..a7fcbceb6e6b 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -380,7 +380,7 @@ drop:
static
-void i2400m_tx_timeout(struct net_device *net_dev)
+void i2400m_tx_timeout(struct net_device *net_dev, unsigned int txqueue)
{
/*
* We might want to kick the device
diff --git a/drivers/net/wireguard/Makefile b/drivers/net/wireguard/Makefile
new file mode 100644
index 000000000000..fc52b2cb500b
--- /dev/null
+++ b/drivers/net/wireguard/Makefile
@@ -0,0 +1,18 @@
+ccflags-y := -O3
+ccflags-y += -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt'
+ccflags-$(CONFIG_WIREGUARD_DEBUG) += -DDEBUG
+wireguard-y := main.o
+wireguard-y += noise.o
+wireguard-y += device.o
+wireguard-y += peer.o
+wireguard-y += timers.o
+wireguard-y += queueing.o
+wireguard-y += send.o
+wireguard-y += receive.o
+wireguard-y += socket.o
+wireguard-y += peerlookup.o
+wireguard-y += allowedips.o
+wireguard-y += ratelimiter.o
+wireguard-y += cookie.o
+wireguard-y += netlink.o
+obj-$(CONFIG_WIREGUARD) := wireguard.o
diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c
new file mode 100644
index 000000000000..3725e9cd85f4
--- /dev/null
+++ b/drivers/net/wireguard/allowedips.c
@@ -0,0 +1,377 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "allowedips.h"
+#include "peer.h"
+
+static void swap_endian(u8 *dst, const u8 *src, u8 bits)
+{
+ if (bits == 32) {
+ *(u32 *)dst = be32_to_cpu(*(const __be32 *)src);
+ } else if (bits == 128) {
+ ((u64 *)dst)[0] = be64_to_cpu(((const __be64 *)src)[0]);
+ ((u64 *)dst)[1] = be64_to_cpu(((const __be64 *)src)[1]);
+ }
+}
+
+static void copy_and_assign_cidr(struct allowedips_node *node, const u8 *src,
+ u8 cidr, u8 bits)
+{
+ node->cidr = cidr;
+ node->bit_at_a = cidr / 8U;
+#ifdef __LITTLE_ENDIAN
+ node->bit_at_a ^= (bits / 8U - 1U) % 8U;
+#endif
+ node->bit_at_b = 7U - (cidr % 8U);
+ node->bitlen = bits;
+ memcpy(node->bits, src, bits / 8U);
+}
+#define CHOOSE_NODE(parent, key) \
+ parent->bit[(key[parent->bit_at_a] >> parent->bit_at_b) & 1]
+
+static void push_rcu(struct allowedips_node **stack,
+ struct allowedips_node __rcu *p, unsigned int *len)
+{
+ if (rcu_access_pointer(p)) {
+ WARN_ON(IS_ENABLED(DEBUG) && *len >= 128);
+ stack[(*len)++] = rcu_dereference_raw(p);
+ }
+}
+
+static void root_free_rcu(struct rcu_head *rcu)
+{
+ struct allowedips_node *node, *stack[128] = {
+ container_of(rcu, struct allowedips_node, rcu) };
+ unsigned int len = 1;
+
+ while (len > 0 && (node = stack[--len])) {
+ push_rcu(stack, node->bit[0], &len);
+ push_rcu(stack, node->bit[1], &len);
+ kfree(node);
+ }
+}
+
+static void root_remove_peer_lists(struct allowedips_node *root)
+{
+ struct allowedips_node *node, *stack[128] = { root };
+ unsigned int len = 1;
+
+ while (len > 0 && (node = stack[--len])) {
+ push_rcu(stack, node->bit[0], &len);
+ push_rcu(stack, node->bit[1], &len);
+ if (rcu_access_pointer(node->peer))
+ list_del(&node->peer_list);
+ }
+}
+
+static void walk_remove_by_peer(struct allowedips_node __rcu **top,
+ struct wg_peer *peer, struct mutex *lock)
+{
+#define REF(p) rcu_access_pointer(p)
+#define DEREF(p) rcu_dereference_protected(*(p), lockdep_is_held(lock))
+#define PUSH(p) ({ \
+ WARN_ON(IS_ENABLED(DEBUG) && len >= 128); \
+ stack[len++] = p; \
+ })
+
+ struct allowedips_node __rcu **stack[128], **nptr;
+ struct allowedips_node *node, *prev;
+ unsigned int len;
+
+ if (unlikely(!peer || !REF(*top)))
+ return;
+
+ for (prev = NULL, len = 0, PUSH(top); len > 0; prev = node) {
+ nptr = stack[len - 1];
+ node = DEREF(nptr);
+ if (!node) {
+ --len;
+ continue;
+ }
+ if (!prev || REF(prev->bit[0]) == node ||
+ REF(prev->bit[1]) == node) {
+ if (REF(node->bit[0]))
+ PUSH(&node->bit[0]);
+ else if (REF(node->bit[1]))
+ PUSH(&node->bit[1]);
+ } else if (REF(node->bit[0]) == prev) {
+ if (REF(node->bit[1]))
+ PUSH(&node->bit[1]);
+ } else {
+ if (rcu_dereference_protected(node->peer,
+ lockdep_is_held(lock)) == peer) {
+ RCU_INIT_POINTER(node->peer, NULL);
+ list_del_init(&node->peer_list);
+ if (!node->bit[0] || !node->bit[1]) {
+ rcu_assign_pointer(*nptr, DEREF(
+ &node->bit[!REF(node->bit[0])]));
+ kfree_rcu(node, rcu);
+ node = DEREF(nptr);
+ }
+ }
+ --len;
+ }
+ }
+
+#undef REF
+#undef DEREF
+#undef PUSH
+}
+
+static unsigned int fls128(u64 a, u64 b)
+{
+ return a ? fls64(a) + 64U : fls64(b);
+}
+
+static u8 common_bits(const struct allowedips_node *node, const u8 *key,
+ u8 bits)
+{
+ if (bits == 32)
+ return 32U - fls(*(const u32 *)node->bits ^ *(const u32 *)key);
+ else if (bits == 128)
+ return 128U - fls128(
+ *(const u64 *)&node->bits[0] ^ *(const u64 *)&key[0],
+ *(const u64 *)&node->bits[8] ^ *(const u64 *)&key[8]);
+ return 0;
+}
+
+static bool prefix_matches(const struct allowedips_node *node, const u8 *key,
+ u8 bits)
+{
+ /* This could be much faster if it actually just compared the common
+ * bits properly, by precomputing a mask bswap(~0 << (32 - cidr)), and
+ * the rest, but it turns out that common_bits is already super fast on
+ * modern processors, even taking into account the unfortunate bswap.
+ * So, we just inline it like this instead.
+ */
+ return common_bits(node, key, bits) >= node->cidr;
+}
+
+static struct allowedips_node *find_node(struct allowedips_node *trie, u8 bits,
+ const u8 *key)
+{
+ struct allowedips_node *node = trie, *found = NULL;
+
+ while (node && prefix_matches(node, key, bits)) {
+ if (rcu_access_pointer(node->peer))
+ found = node;
+ if (node->cidr == bits)
+ break;
+ node = rcu_dereference_bh(CHOOSE_NODE(node, key));
+ }
+ return found;
+}
+
+/* Returns a strong reference to a peer */
+static struct wg_peer *lookup(struct allowedips_node __rcu *root, u8 bits,
+ const void *be_ip)
+{
+ /* Aligned so it can be passed to fls/fls64 */
+ u8 ip[16] __aligned(__alignof(u64));
+ struct allowedips_node *node;
+ struct wg_peer *peer = NULL;
+
+ swap_endian(ip, be_ip, bits);
+
+ rcu_read_lock_bh();
+retry:
+ node = find_node(rcu_dereference_bh(root), bits, ip);
+ if (node) {
+ peer = wg_peer_get_maybe_zero(rcu_dereference_bh(node->peer));
+ if (!peer)
+ goto retry;
+ }
+ rcu_read_unlock_bh();
+ return peer;
+}
+
+static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key,
+ u8 cidr, u8 bits, struct allowedips_node **rnode,
+ struct mutex *lock)
+{
+ struct allowedips_node *node = rcu_dereference_protected(trie,
+ lockdep_is_held(lock));
+ struct allowedips_node *parent = NULL;
+ bool exact = false;
+
+ while (node && node->cidr <= cidr && prefix_matches(node, key, bits)) {
+ parent = node;
+ if (parent->cidr == cidr) {
+ exact = true;
+ break;
+ }
+ node = rcu_dereference_protected(CHOOSE_NODE(parent, key),
+ lockdep_is_held(lock));
+ }
+ *rnode = parent;
+ return exact;
+}
+
+static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
+ u8 cidr, struct wg_peer *peer, struct mutex *lock)
+{
+ struct allowedips_node *node, *parent, *down, *newnode;
+
+ if (unlikely(cidr > bits || !peer))
+ return -EINVAL;
+
+ if (!rcu_access_pointer(*trie)) {
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (unlikely(!node))
+ return -ENOMEM;
+ RCU_INIT_POINTER(node->peer, peer);
+ list_add_tail(&node->peer_list, &peer->allowedips_list);
+ copy_and_assign_cidr(node, key, cidr, bits);
+ rcu_assign_pointer(*trie, node);
+ return 0;
+ }
+ if (node_placement(*trie, key, cidr, bits, &node, lock)) {
+ rcu_assign_pointer(node->peer, peer);
+ list_move_tail(&node->peer_list, &peer->allowedips_list);
+ return 0;
+ }
+
+ newnode = kzalloc(sizeof(*newnode), GFP_KERNEL);
+ if (unlikely(!newnode))
+ return -ENOMEM;
+ RCU_INIT_POINTER(newnode->peer, peer);
+ list_add_tail(&newnode->peer_list, &peer->allowedips_list);
+ copy_and_assign_cidr(newnode, key, cidr, bits);
+
+ if (!node) {
+ down = rcu_dereference_protected(*trie, lockdep_is_held(lock));
+ } else {
+ down = rcu_dereference_protected(CHOOSE_NODE(node, key),
+ lockdep_is_held(lock));
+ if (!down) {
+ rcu_assign_pointer(CHOOSE_NODE(node, key), newnode);
+ return 0;
+ }
+ }
+ cidr = min(cidr, common_bits(down, key, bits));
+ parent = node;
+
+ if (newnode->cidr == cidr) {
+ rcu_assign_pointer(CHOOSE_NODE(newnode, down->bits), down);
+ if (!parent)
+ rcu_assign_pointer(*trie, newnode);
+ else
+ rcu_assign_pointer(CHOOSE_NODE(parent, newnode->bits),
+ newnode);
+ } else {
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (unlikely(!node)) {
+ list_del(&newnode->peer_list);
+ kfree(newnode);
+ return -ENOMEM;
+ }
+ INIT_LIST_HEAD(&node->peer_list);
+ copy_and_assign_cidr(node, newnode->bits, cidr, bits);
+
+ rcu_assign_pointer(CHOOSE_NODE(node, down->bits), down);
+ rcu_assign_pointer(CHOOSE_NODE(node, newnode->bits), newnode);
+ if (!parent)
+ rcu_assign_pointer(*trie, node);
+ else
+ rcu_assign_pointer(CHOOSE_NODE(parent, node->bits),
+ node);
+ }
+ return 0;
+}
+
+void wg_allowedips_init(struct allowedips *table)
+{
+ table->root4 = table->root6 = NULL;
+ table->seq = 1;
+}
+
+void wg_allowedips_free(struct allowedips *table, struct mutex *lock)
+{
+ struct allowedips_node __rcu *old4 = table->root4, *old6 = table->root6;
+
+ ++table->seq;
+ RCU_INIT_POINTER(table->root4, NULL);
+ RCU_INIT_POINTER(table->root6, NULL);
+ if (rcu_access_pointer(old4)) {
+ struct allowedips_node *node = rcu_dereference_protected(old4,
+ lockdep_is_held(lock));
+
+ root_remove_peer_lists(node);
+ call_rcu(&node->rcu, root_free_rcu);
+ }
+ if (rcu_access_pointer(old6)) {
+ struct allowedips_node *node = rcu_dereference_protected(old6,
+ lockdep_is_held(lock));
+
+ root_remove_peer_lists(node);
+ call_rcu(&node->rcu, root_free_rcu);
+ }
+}
+
+int wg_allowedips_insert_v4(struct allowedips *table, const struct in_addr *ip,
+ u8 cidr, struct wg_peer *peer, struct mutex *lock)
+{
+ /* Aligned so it can be passed to fls */
+ u8 key[4] __aligned(__alignof(u32));
+
+ ++table->seq;
+ swap_endian(key, (const u8 *)ip, 32);
+ return add(&table->root4, 32, key, cidr, peer, lock);
+}
+
+int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip,
+ u8 cidr, struct wg_peer *peer, struct mutex *lock)
+{
+ /* Aligned so it can be passed to fls64 */
+ u8 key[16] __aligned(__alignof(u64));
+
+ ++table->seq;
+ swap_endian(key, (const u8 *)ip, 128);
+ return add(&table->root6, 128, key, cidr, peer, lock);
+}
+
+void wg_allowedips_remove_by_peer(struct allowedips *table,
+ struct wg_peer *peer, struct mutex *lock)
+{
+ ++table->seq;
+ walk_remove_by_peer(&table->root4, peer, lock);
+ walk_remove_by_peer(&table->root6, peer, lock);
+}
+
+int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr)
+{
+ const unsigned int cidr_bytes = DIV_ROUND_UP(node->cidr, 8U);
+ swap_endian(ip, node->bits, node->bitlen);
+ memset(ip + cidr_bytes, 0, node->bitlen / 8U - cidr_bytes);
+ if (node->cidr)
+ ip[cidr_bytes - 1U] &= ~0U << (-node->cidr % 8U);
+
+ *cidr = node->cidr;
+ return node->bitlen == 32 ? AF_INET : AF_INET6;
+}
+
+/* Returns a strong reference to a peer */
+struct wg_peer *wg_allowedips_lookup_dst(struct allowedips *table,
+ struct sk_buff *skb)
+{
+ if (skb->protocol == htons(ETH_P_IP))
+ return lookup(table->root4, 32, &ip_hdr(skb)->daddr);
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ return lookup(table->root6, 128, &ipv6_hdr(skb)->daddr);
+ return NULL;
+}
+
+/* Returns a strong reference to a peer */
+struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table,
+ struct sk_buff *skb)
+{
+ if (skb->protocol == htons(ETH_P_IP))
+ return lookup(table->root4, 32, &ip_hdr(skb)->saddr);
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ return lookup(table->root6, 128, &ipv6_hdr(skb)->saddr);
+ return NULL;
+}
+
+#include "selftest/allowedips.c"
diff --git a/drivers/net/wireguard/allowedips.h b/drivers/net/wireguard/allowedips.h
new file mode 100644
index 000000000000..e5c83cafcef4
--- /dev/null
+++ b/drivers/net/wireguard/allowedips.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _WG_ALLOWEDIPS_H
+#define _WG_ALLOWEDIPS_H
+
+#include <linux/mutex.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+
+struct wg_peer;
+
+struct allowedips_node {
+ struct wg_peer __rcu *peer;
+ struct allowedips_node __rcu *bit[2];
+ /* While it may seem scandalous that we waste space for v4,
+ * we're alloc'ing to the nearest power of 2 anyway, so this
+ * doesn't actually make a difference.
+ */
+ u8 bits[16] __aligned(__alignof(u64));
+ u8 cidr, bit_at_a, bit_at_b, bitlen;
+
+ /* Keep rarely used list at bottom to be beyond cache line. */
+ union {
+ struct list_head peer_list;
+ struct rcu_head rcu;
+ };
+};
+
+struct allowedips {
+ struct allowedips_node __rcu *root4;
+ struct allowedips_node __rcu *root6;
+ u64 seq;
+};
+
+void wg_allowedips_init(struct allowedips *table);
+void wg_allowedips_free(struct allowedips *table, struct mutex *mutex);
+int wg_allowedips_insert_v4(struct allowedips *table, const struct in_addr *ip,
+ u8 cidr, struct wg_peer *peer, struct mutex *lock);
+int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip,
+ u8 cidr, struct wg_peer *peer, struct mutex *lock);
+void wg_allowedips_remove_by_peer(struct allowedips *table,
+ struct wg_peer *peer, struct mutex *lock);
+/* The ip input pointer should be __aligned(__alignof(u64))) */
+int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr);
+
+/* These return a strong reference to a peer: */
+struct wg_peer *wg_allowedips_lookup_dst(struct allowedips *table,
+ struct sk_buff *skb);
+struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table,
+ struct sk_buff *skb);
+
+#ifdef DEBUG
+bool wg_allowedips_selftest(void);
+#endif
+
+#endif /* _WG_ALLOWEDIPS_H */
diff --git a/drivers/net/wireguard/cookie.c b/drivers/net/wireguard/cookie.c
new file mode 100644
index 000000000000..4956f0499c19
--- /dev/null
+++ b/drivers/net/wireguard/cookie.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "cookie.h"
+#include "peer.h"
+#include "device.h"
+#include "messages.h"
+#include "ratelimiter.h"
+#include "timers.h"
+
+#include <crypto/blake2s.h>
+#include <crypto/chacha20poly1305.h>
+
+#include <net/ipv6.h>
+#include <crypto/algapi.h>
+
+void wg_cookie_checker_init(struct cookie_checker *checker,
+ struct wg_device *wg)
+{
+ init_rwsem(&checker->secret_lock);
+ checker->secret_birthdate = ktime_get_coarse_boottime_ns();
+ get_random_bytes(checker->secret, NOISE_HASH_LEN);
+ checker->device = wg;
+}
+
+enum { COOKIE_KEY_LABEL_LEN = 8 };
+static const u8 mac1_key_label[COOKIE_KEY_LABEL_LEN] = "mac1----";
+static const u8 cookie_key_label[COOKIE_KEY_LABEL_LEN] = "cookie--";
+
+static void precompute_key(u8 key[NOISE_SYMMETRIC_KEY_LEN],
+ const u8 pubkey[NOISE_PUBLIC_KEY_LEN],
+ const u8 label[COOKIE_KEY_LABEL_LEN])
+{
+ struct blake2s_state blake;
+
+ blake2s_init(&blake, NOISE_SYMMETRIC_KEY_LEN);
+ blake2s_update(&blake, label, COOKIE_KEY_LABEL_LEN);
+ blake2s_update(&blake, pubkey, NOISE_PUBLIC_KEY_LEN);
+ blake2s_final(&blake, key);
+}
+
+/* Must hold peer->handshake.static_identity->lock */
+void wg_cookie_checker_precompute_device_keys(struct cookie_checker *checker)
+{
+ if (likely(checker->device->static_identity.has_identity)) {
+ precompute_key(checker->cookie_encryption_key,
+ checker->device->static_identity.static_public,
+ cookie_key_label);
+ precompute_key(checker->message_mac1_key,
+ checker->device->static_identity.static_public,
+ mac1_key_label);
+ } else {
+ memset(checker->cookie_encryption_key, 0,
+ NOISE_SYMMETRIC_KEY_LEN);
+ memset(checker->message_mac1_key, 0, NOISE_SYMMETRIC_KEY_LEN);
+ }
+}
+
+void wg_cookie_checker_precompute_peer_keys(struct wg_peer *peer)
+{
+ precompute_key(peer->latest_cookie.cookie_decryption_key,
+ peer->handshake.remote_static, cookie_key_label);
+ precompute_key(peer->latest_cookie.message_mac1_key,
+ peer->handshake.remote_static, mac1_key_label);
+}
+
+void wg_cookie_init(struct cookie *cookie)
+{
+ memset(cookie, 0, sizeof(*cookie));
+ init_rwsem(&cookie->lock);
+}
+
+static void compute_mac1(u8 mac1[COOKIE_LEN], const void *message, size_t len,
+ const u8 key[NOISE_SYMMETRIC_KEY_LEN])
+{
+ len = len - sizeof(struct message_macs) +
+ offsetof(struct message_macs, mac1);
+ blake2s(mac1, message, key, COOKIE_LEN, len, NOISE_SYMMETRIC_KEY_LEN);
+}
+
+static void compute_mac2(u8 mac2[COOKIE_LEN], const void *message, size_t len,
+ const u8 cookie[COOKIE_LEN])
+{
+ len = len - sizeof(struct message_macs) +
+ offsetof(struct message_macs, mac2);
+ blake2s(mac2, message, cookie, COOKIE_LEN, len, COOKIE_LEN);
+}
+
+static void make_cookie(u8 cookie[COOKIE_LEN], struct sk_buff *skb,
+ struct cookie_checker *checker)
+{
+ struct blake2s_state state;
+
+ if (wg_birthdate_has_expired(checker->secret_birthdate,
+ COOKIE_SECRET_MAX_AGE)) {
+ down_write(&checker->secret_lock);
+ checker->secret_birthdate = ktime_get_coarse_boottime_ns();
+ get_random_bytes(checker->secret, NOISE_HASH_LEN);
+ up_write(&checker->secret_lock);
+ }
+
+ down_read(&checker->secret_lock);
+
+ blake2s_init_key(&state, COOKIE_LEN, checker->secret, NOISE_HASH_LEN);
+ if (skb->protocol == htons(ETH_P_IP))
+ blake2s_update(&state, (u8 *)&ip_hdr(skb)->saddr,
+ sizeof(struct in_addr));
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ blake2s_update(&state, (u8 *)&ipv6_hdr(skb)->saddr,
+ sizeof(struct in6_addr));
+ blake2s_update(&state, (u8 *)&udp_hdr(skb)->source, sizeof(__be16));
+ blake2s_final(&state, cookie);
+
+ up_read(&checker->secret_lock);
+}
+
+enum cookie_mac_state wg_cookie_validate_packet(struct cookie_checker *checker,
+ struct sk_buff *skb,
+ bool check_cookie)
+{
+ struct message_macs *macs = (struct message_macs *)
+ (skb->data + skb->len - sizeof(*macs));
+ enum cookie_mac_state ret;
+ u8 computed_mac[COOKIE_LEN];
+ u8 cookie[COOKIE_LEN];
+
+ ret = INVALID_MAC;
+ compute_mac1(computed_mac, skb->data, skb->len,
+ checker->message_mac1_key);
+ if (crypto_memneq(computed_mac, macs->mac1, COOKIE_LEN))
+ goto out;
+
+ ret = VALID_MAC_BUT_NO_COOKIE;
+
+ if (!check_cookie)
+ goto out;
+
+ make_cookie(cookie, skb, checker);
+
+ compute_mac2(computed_mac, skb->data, skb->len, cookie);
+ if (crypto_memneq(computed_mac, macs->mac2, COOKIE_LEN))
+ goto out;
+
+ ret = VALID_MAC_WITH_COOKIE_BUT_RATELIMITED;
+ if (!wg_ratelimiter_allow(skb, dev_net(checker->device->dev)))
+ goto out;
+
+ ret = VALID_MAC_WITH_COOKIE;
+
+out:
+ return ret;
+}
+
+void wg_cookie_add_mac_to_packet(void *message, size_t len,
+ struct wg_peer *peer)
+{
+ struct message_macs *macs = (struct message_macs *)
+ ((u8 *)message + len - sizeof(*macs));
+
+ down_write(&peer->latest_cookie.lock);
+ compute_mac1(macs->mac1, message, len,
+ peer->latest_cookie.message_mac1_key);
+ memcpy(peer->latest_cookie.last_mac1_sent, macs->mac1, COOKIE_LEN);
+ peer->latest_cookie.have_sent_mac1 = true;
+ up_write(&peer->latest_cookie.lock);
+
+ down_read(&peer->latest_cookie.lock);
+ if (peer->latest_cookie.is_valid &&
+ !wg_birthdate_has_expired(peer->latest_cookie.birthdate,
+ COOKIE_SECRET_MAX_AGE - COOKIE_SECRET_LATENCY))
+ compute_mac2(macs->mac2, message, len,
+ peer->latest_cookie.cookie);
+ else
+ memset(macs->mac2, 0, COOKIE_LEN);
+ up_read(&peer->latest_cookie.lock);
+}
+
+void wg_cookie_message_create(struct message_handshake_cookie *dst,
+ struct sk_buff *skb, __le32 index,
+ struct cookie_checker *checker)
+{
+ struct message_macs *macs = (struct message_macs *)
+ ((u8 *)skb->data + skb->len - sizeof(*macs));
+ u8 cookie[COOKIE_LEN];
+
+ dst->header.type = cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE);
+ dst->receiver_index = index;
+ get_random_bytes_wait(dst->nonce, COOKIE_NONCE_LEN);
+
+ make_cookie(cookie, skb, checker);
+ xchacha20poly1305_encrypt(dst->encrypted_cookie, cookie, COOKIE_LEN,
+ macs->mac1, COOKIE_LEN, dst->nonce,
+ checker->cookie_encryption_key);
+}
+
+void wg_cookie_message_consume(struct message_handshake_cookie *src,
+ struct wg_device *wg)
+{
+ struct wg_peer *peer = NULL;
+ u8 cookie[COOKIE_LEN];
+ bool ret;
+
+ if (unlikely(!wg_index_hashtable_lookup(wg->index_hashtable,
+ INDEX_HASHTABLE_HANDSHAKE |
+ INDEX_HASHTABLE_KEYPAIR,
+ src->receiver_index, &peer)))
+ return;
+
+ down_read(&peer->latest_cookie.lock);
+ if (unlikely(!peer->latest_cookie.have_sent_mac1)) {
+ up_read(&peer->latest_cookie.lock);
+ goto out;
+ }
+ ret = xchacha20poly1305_decrypt(
+ cookie, src->encrypted_cookie, sizeof(src->encrypted_cookie),
+ peer->latest_cookie.last_mac1_sent, COOKIE_LEN, src->nonce,
+ peer->latest_cookie.cookie_decryption_key);
+ up_read(&peer->latest_cookie.lock);
+
+ if (ret) {
+ down_write(&peer->latest_cookie.lock);
+ memcpy(peer->latest_cookie.cookie, cookie, COOKIE_LEN);
+ peer->latest_cookie.birthdate = ktime_get_coarse_boottime_ns();
+ peer->latest_cookie.is_valid = true;
+ peer->latest_cookie.have_sent_mac1 = false;
+ up_write(&peer->latest_cookie.lock);
+ } else {
+ net_dbg_ratelimited("%s: Could not decrypt invalid cookie response\n",
+ wg->dev->name);
+ }
+
+out:
+ wg_peer_put(peer);
+}
diff --git a/drivers/net/wireguard/cookie.h b/drivers/net/wireguard/cookie.h
new file mode 100644
index 000000000000..c4bd61ca03f2
--- /dev/null
+++ b/drivers/net/wireguard/cookie.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _WG_COOKIE_H
+#define _WG_COOKIE_H
+
+#include "messages.h"
+#include <linux/rwsem.h>
+
+struct wg_peer;
+
+struct cookie_checker {
+ u8 secret[NOISE_HASH_LEN];
+ u8 cookie_encryption_key[NOISE_SYMMETRIC_KEY_LEN];
+ u8 message_mac1_key[NOISE_SYMMETRIC_KEY_LEN];
+ u64 secret_birthdate;
+ struct rw_semaphore secret_lock;
+ struct wg_device *device;
+};
+
+struct cookie {
+ u64 birthdate;
+ bool is_valid;
+ u8 cookie[COOKIE_LEN];
+ bool have_sent_mac1;
+ u8 last_mac1_sent[COOKIE_LEN];
+ u8 cookie_decryption_key[NOISE_SYMMETRIC_KEY_LEN];
+ u8 message_mac1_key[NOISE_SYMMETRIC_KEY_LEN];
+ struct rw_semaphore lock;
+};
+
+enum cookie_mac_state {
+ INVALID_MAC,
+ VALID_MAC_BUT_NO_COOKIE,
+ VALID_MAC_WITH_COOKIE_BUT_RATELIMITED,
+ VALID_MAC_WITH_COOKIE
+};
+
+void wg_cookie_checker_init(struct cookie_checker *checker,
+ struct wg_device *wg);
+void wg_cookie_checker_precompute_device_keys(struct cookie_checker *checker);
+void wg_cookie_checker_precompute_peer_keys(struct wg_peer *peer);
+void wg_cookie_init(struct cookie *cookie);
+
+enum cookie_mac_state wg_cookie_validate_packet(struct cookie_checker *checker,
+ struct sk_buff *skb,
+ bool check_cookie);
+void wg_cookie_add_mac_to_packet(void *message, size_t len,
+ struct wg_peer *peer);
+
+void wg_cookie_message_create(struct message_handshake_cookie *src,
+ struct sk_buff *skb, __le32 index,
+ struct cookie_checker *checker);
+void wg_cookie_message_consume(struct message_handshake_cookie *src,
+ struct wg_device *wg);
+
+#endif /* _WG_COOKIE_H */
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
new file mode 100644
index 000000000000..43db442b1373
--- /dev/null
+++ b/drivers/net/wireguard/device.c
@@ -0,0 +1,458 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "queueing.h"
+#include "socket.h"
+#include "timers.h"
+#include "device.h"
+#include "ratelimiter.h"
+#include "peer.h"
+#include "messages.h"
+
+#include <linux/module.h>
+#include <linux/rtnetlink.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/if_arp.h>
+#include <linux/icmp.h>
+#include <linux/suspend.h>
+#include <net/icmp.h>
+#include <net/rtnetlink.h>
+#include <net/ip_tunnels.h>
+#include <net/addrconf.h>
+
+static LIST_HEAD(device_list);
+
+static int wg_open(struct net_device *dev)
+{
+ struct in_device *dev_v4 = __in_dev_get_rtnl(dev);
+ struct inet6_dev *dev_v6 = __in6_dev_get(dev);
+ struct wg_device *wg = netdev_priv(dev);
+ struct wg_peer *peer;
+ int ret;
+
+ if (dev_v4) {
+ /* At some point we might put this check near the ip_rt_send_
+ * redirect call of ip_forward in net/ipv4/ip_forward.c, similar
+ * to the current secpath check.
+ */
+ IN_DEV_CONF_SET(dev_v4, SEND_REDIRECTS, false);
+ IPV4_DEVCONF_ALL(dev_net(dev), SEND_REDIRECTS) = false;
+ }
+ if (dev_v6)
+ dev_v6->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_NONE;
+
+ ret = wg_socket_init(wg, wg->incoming_port);
+ if (ret < 0)
+ return ret;
+ mutex_lock(&wg->device_update_lock);
+ list_for_each_entry(peer, &wg->peer_list, peer_list) {
+ wg_packet_send_staged_packets(peer);
+ if (peer->persistent_keepalive_interval)
+ wg_packet_send_keepalive(peer);
+ }
+ mutex_unlock(&wg->device_update_lock);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int wg_pm_notification(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct wg_device *wg;
+ struct wg_peer *peer;
+
+ /* If the machine is constantly suspending and resuming, as part of
+ * its normal operation rather than as a somewhat rare event, then we
+ * don't actually want to clear keys.
+ */
+ if (IS_ENABLED(CONFIG_PM_AUTOSLEEP) || IS_ENABLED(CONFIG_ANDROID))
+ return 0;
+
+ if (action != PM_HIBERNATION_PREPARE && action != PM_SUSPEND_PREPARE)
+ return 0;
+
+ rtnl_lock();
+ list_for_each_entry(wg, &device_list, device_list) {
+ mutex_lock(&wg->device_update_lock);
+ list_for_each_entry(peer, &wg->peer_list, peer_list) {
+ del_timer(&peer->timer_zero_key_material);
+ wg_noise_handshake_clear(&peer->handshake);
+ wg_noise_keypairs_clear(&peer->keypairs);
+ }
+ mutex_unlock(&wg->device_update_lock);
+ }
+ rtnl_unlock();
+ rcu_barrier();
+ return 0;
+}
+
+static struct notifier_block pm_notifier = { .notifier_call = wg_pm_notification };
+#endif
+
+static int wg_stop(struct net_device *dev)
+{
+ struct wg_device *wg = netdev_priv(dev);
+ struct wg_peer *peer;
+
+ mutex_lock(&wg->device_update_lock);
+ list_for_each_entry(peer, &wg->peer_list, peer_list) {
+ wg_packet_purge_staged_packets(peer);
+ wg_timers_stop(peer);
+ wg_noise_handshake_clear(&peer->handshake);
+ wg_noise_keypairs_clear(&peer->keypairs);
+ wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake);
+ }
+ mutex_unlock(&wg->device_update_lock);
+ skb_queue_purge(&wg->incoming_handshakes);
+ wg_socket_reinit(wg, NULL, NULL);
+ return 0;
+}
+
+static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct wg_device *wg = netdev_priv(dev);
+ struct sk_buff_head packets;
+ struct wg_peer *peer;
+ struct sk_buff *next;
+ sa_family_t family;
+ u32 mtu;
+ int ret;
+
+ if (unlikely(wg_skb_examine_untrusted_ip_hdr(skb) != skb->protocol)) {
+ ret = -EPROTONOSUPPORT;
+ net_dbg_ratelimited("%s: Invalid IP packet\n", dev->name);
+ goto err;
+ }
+
+ peer = wg_allowedips_lookup_dst(&wg->peer_allowedips, skb);
+ if (unlikely(!peer)) {
+ ret = -ENOKEY;
+ if (skb->protocol == htons(ETH_P_IP))
+ net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI4\n",
+ dev->name, &ip_hdr(skb)->daddr);
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI6\n",
+ dev->name, &ipv6_hdr(skb)->daddr);
+ goto err;
+ }
+
+ family = READ_ONCE(peer->endpoint.addr.sa_family);
+ if (unlikely(family != AF_INET && family != AF_INET6)) {
+ ret = -EDESTADDRREQ;
+ net_dbg_ratelimited("%s: No valid endpoint has been configured or discovered for peer %llu\n",
+ dev->name, peer->internal_id);
+ goto err_peer;
+ }
+
+ mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
+
+ __skb_queue_head_init(&packets);
+ if (!skb_is_gso(skb)) {
+ skb_mark_not_on_list(skb);
+ } else {
+ struct sk_buff *segs = skb_gso_segment(skb, 0);
+
+ if (unlikely(IS_ERR(segs))) {
+ ret = PTR_ERR(segs);
+ goto err_peer;
+ }
+ dev_kfree_skb(skb);
+ skb = segs;
+ }
+
+ skb_list_walk_safe(skb, skb, next) {
+ skb_mark_not_on_list(skb);
+
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (unlikely(!skb))
+ continue;
+
+ /* We only need to keep the original dst around for icmp,
+ * so at this point we're in a position to drop it.
+ */
+ skb_dst_drop(skb);
+
+ PACKET_CB(skb)->mtu = mtu;
+
+ __skb_queue_tail(&packets, skb);
+ }
+
+ spin_lock_bh(&peer->staged_packet_queue.lock);
+ /* If the queue is getting too big, we start removing the oldest packets
+ * until it's small again. We do this before adding the new packet, so
+ * we don't remove GSO segments that are in excess.
+ */
+ while (skb_queue_len(&peer->staged_packet_queue) > MAX_STAGED_PACKETS) {
+ dev_kfree_skb(__skb_dequeue(&peer->staged_packet_queue));
+ ++dev->stats.tx_dropped;
+ }
+ skb_queue_splice_tail(&packets, &peer->staged_packet_queue);
+ spin_unlock_bh(&peer->staged_packet_queue.lock);
+
+ wg_packet_send_staged_packets(peer);
+
+ wg_peer_put(peer);
+ return NETDEV_TX_OK;
+
+err_peer:
+ wg_peer_put(peer);
+err:
+ ++dev->stats.tx_errors;
+ if (skb->protocol == htons(ETH_P_IP))
+ icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
+ kfree_skb(skb);
+ return ret;
+}
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = wg_open,
+ .ndo_stop = wg_stop,
+ .ndo_start_xmit = wg_xmit,
+ .ndo_get_stats64 = ip_tunnel_get_stats64
+};
+
+static void wg_destruct(struct net_device *dev)
+{
+ struct wg_device *wg = netdev_priv(dev);
+
+ rtnl_lock();
+ list_del(&wg->device_list);
+ rtnl_unlock();
+ mutex_lock(&wg->device_update_lock);
+ wg->incoming_port = 0;
+ wg_socket_reinit(wg, NULL, NULL);
+ /* The final references are cleared in the below calls to destroy_workqueue. */
+ wg_peer_remove_all(wg);
+ destroy_workqueue(wg->handshake_receive_wq);
+ destroy_workqueue(wg->handshake_send_wq);
+ destroy_workqueue(wg->packet_crypt_wq);
+ wg_packet_queue_free(&wg->decrypt_queue, true);
+ wg_packet_queue_free(&wg->encrypt_queue, true);
+ rcu_barrier(); /* Wait for all the peers to be actually freed. */
+ wg_ratelimiter_uninit();
+ memzero_explicit(&wg->static_identity, sizeof(wg->static_identity));
+ skb_queue_purge(&wg->incoming_handshakes);
+ free_percpu(dev->tstats);
+ free_percpu(wg->incoming_handshakes_worker);
+ if (wg->have_creating_net_ref)
+ put_net(wg->creating_net);
+ kvfree(wg->index_hashtable);
+ kvfree(wg->peer_hashtable);
+ mutex_unlock(&wg->device_update_lock);
+
+ pr_debug("%s: Interface deleted\n", dev->name);
+ free_netdev(dev);
+}
+
+static const struct device_type device_type = { .name = KBUILD_MODNAME };
+
+static void wg_setup(struct net_device *dev)
+{
+ struct wg_device *wg = netdev_priv(dev);
+ enum { WG_NETDEV_FEATURES = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_SG | NETIF_F_GSO |
+ NETIF_F_GSO_SOFTWARE | NETIF_F_HIGHDMA };
+
+ dev->netdev_ops = &netdev_ops;
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->needed_headroom = DATA_PACKET_HEAD_ROOM;
+ dev->needed_tailroom = noise_encrypted_len(MESSAGE_PADDING_MULTIPLE);
+ dev->type = ARPHRD_NONE;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ dev->priv_flags |= IFF_NO_QUEUE;
+ dev->features |= NETIF_F_LLTX;
+ dev->features |= WG_NETDEV_FEATURES;
+ dev->hw_features |= WG_NETDEV_FEATURES;
+ dev->hw_enc_features |= WG_NETDEV_FEATURES;
+ dev->mtu = ETH_DATA_LEN - MESSAGE_MINIMUM_LENGTH -
+ sizeof(struct udphdr) -
+ max(sizeof(struct ipv6hdr), sizeof(struct iphdr));
+
+ SET_NETDEV_DEVTYPE(dev, &device_type);
+
+ /* We need to keep the dst around in case of icmp replies. */
+ netif_keep_dst(dev);
+
+ memset(wg, 0, sizeof(*wg));
+ wg->dev = dev;
+}
+
+static int wg_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct wg_device *wg = netdev_priv(dev);
+ int ret = -ENOMEM;
+
+ wg->creating_net = src_net;
+ init_rwsem(&wg->static_identity.lock);
+ mutex_init(&wg->socket_update_lock);
+ mutex_init(&wg->device_update_lock);
+ skb_queue_head_init(&wg->incoming_handshakes);
+ wg_allowedips_init(&wg->peer_allowedips);
+ wg_cookie_checker_init(&wg->cookie_checker, wg);
+ INIT_LIST_HEAD(&wg->peer_list);
+ wg->device_update_gen = 1;
+
+ wg->peer_hashtable = wg_pubkey_hashtable_alloc();
+ if (!wg->peer_hashtable)
+ return ret;
+
+ wg->index_hashtable = wg_index_hashtable_alloc();
+ if (!wg->index_hashtable)
+ goto err_free_peer_hashtable;
+
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!dev->tstats)
+ goto err_free_index_hashtable;
+
+ wg->incoming_handshakes_worker =
+ wg_packet_percpu_multicore_worker_alloc(
+ wg_packet_handshake_receive_worker, wg);
+ if (!wg->incoming_handshakes_worker)
+ goto err_free_tstats;
+
+ wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s",
+ WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name);
+ if (!wg->handshake_receive_wq)
+ goto err_free_incoming_handshakes;
+
+ wg->handshake_send_wq = alloc_workqueue("wg-kex-%s",
+ WQ_UNBOUND | WQ_FREEZABLE, 0, dev->name);
+ if (!wg->handshake_send_wq)
+ goto err_destroy_handshake_receive;
+
+ wg->packet_crypt_wq = alloc_workqueue("wg-crypt-%s",
+ WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 0, dev->name);
+ if (!wg->packet_crypt_wq)
+ goto err_destroy_handshake_send;
+
+ ret = wg_packet_queue_init(&wg->encrypt_queue, wg_packet_encrypt_worker,
+ true, MAX_QUEUED_PACKETS);
+ if (ret < 0)
+ goto err_destroy_packet_crypt;
+
+ ret = wg_packet_queue_init(&wg->decrypt_queue, wg_packet_decrypt_worker,
+ true, MAX_QUEUED_PACKETS);
+ if (ret < 0)
+ goto err_free_encrypt_queue;
+
+ ret = wg_ratelimiter_init();
+ if (ret < 0)
+ goto err_free_decrypt_queue;
+
+ ret = register_netdevice(dev);
+ if (ret < 0)
+ goto err_uninit_ratelimiter;
+
+ list_add(&wg->device_list, &device_list);
+
+ /* We wait until the end to assign priv_destructor, so that
+ * register_netdevice doesn't call it for us if it fails.
+ */
+ dev->priv_destructor = wg_destruct;
+
+ pr_debug("%s: Interface created\n", dev->name);
+ return ret;
+
+err_uninit_ratelimiter:
+ wg_ratelimiter_uninit();
+err_free_decrypt_queue:
+ wg_packet_queue_free(&wg->decrypt_queue, true);
+err_free_encrypt_queue:
+ wg_packet_queue_free(&wg->encrypt_queue, true);
+err_destroy_packet_crypt:
+ destroy_workqueue(wg->packet_crypt_wq);
+err_destroy_handshake_send:
+ destroy_workqueue(wg->handshake_send_wq);
+err_destroy_handshake_receive:
+ destroy_workqueue(wg->handshake_receive_wq);
+err_free_incoming_handshakes:
+ free_percpu(wg->incoming_handshakes_worker);
+err_free_tstats:
+ free_percpu(dev->tstats);
+err_free_index_hashtable:
+ kvfree(wg->index_hashtable);
+err_free_peer_hashtable:
+ kvfree(wg->peer_hashtable);
+ return ret;
+}
+
+static struct rtnl_link_ops link_ops __read_mostly = {
+ .kind = KBUILD_MODNAME,
+ .priv_size = sizeof(struct wg_device),
+ .setup = wg_setup,
+ .newlink = wg_newlink,
+};
+
+static int wg_netdevice_notification(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct net_device *dev = ((struct netdev_notifier_info *)data)->dev;
+ struct wg_device *wg = netdev_priv(dev);
+
+ ASSERT_RTNL();
+
+ if (action != NETDEV_REGISTER || dev->netdev_ops != &netdev_ops)
+ return 0;
+
+ if (dev_net(dev) == wg->creating_net && wg->have_creating_net_ref) {
+ put_net(wg->creating_net);
+ wg->have_creating_net_ref = false;
+ } else if (dev_net(dev) != wg->creating_net &&
+ !wg->have_creating_net_ref) {
+ wg->have_creating_net_ref = true;
+ get_net(wg->creating_net);
+ }
+ return 0;
+}
+
+static struct notifier_block netdevice_notifier = {
+ .notifier_call = wg_netdevice_notification
+};
+
+int __init wg_device_init(void)
+{
+ int ret;
+
+#ifdef CONFIG_PM_SLEEP
+ ret = register_pm_notifier(&pm_notifier);
+ if (ret)
+ return ret;
+#endif
+
+ ret = register_netdevice_notifier(&netdevice_notifier);
+ if (ret)
+ goto error_pm;
+
+ ret = rtnl_link_register(&link_ops);
+ if (ret)
+ goto error_netdevice;
+
+ return 0;
+
+error_netdevice:
+ unregister_netdevice_notifier(&netdevice_notifier);
+error_pm:
+#ifdef CONFIG_PM_SLEEP
+ unregister_pm_notifier(&pm_notifier);
+#endif
+ return ret;
+}
+
+void wg_device_uninit(void)
+{
+ rtnl_link_unregister(&link_ops);
+ unregister_netdevice_notifier(&netdevice_notifier);
+#ifdef CONFIG_PM_SLEEP
+ unregister_pm_notifier(&pm_notifier);
+#endif
+ rcu_barrier();
+}
diff --git a/drivers/net/wireguard/device.h b/drivers/net/wireguard/device.h
new file mode 100644
index 000000000000..b15a8be9d816
--- /dev/null
+++ b/drivers/net/wireguard/device.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _WG_DEVICE_H
+#define _WG_DEVICE_H
+
+#include "noise.h"
+#include "allowedips.h"
+#include "peerlookup.h"
+#include "cookie.h"
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/net.h>
+#include <linux/ptr_ring.h>
+
+struct wg_device;
+
+struct multicore_worker {
+ void *ptr;
+ struct work_struct work;
+};
+
+struct crypt_queue {
+ struct ptr_ring ring;
+ union {
+ struct {
+ struct multicore_worker __percpu *worker;
+ int last_cpu;
+ };
+ struct work_struct work;
+ };
+};
+
+struct wg_device {
+ struct net_device *dev;
+ struct crypt_queue encrypt_queue, decrypt_queue;
+ struct sock __rcu *sock4, *sock6;
+ struct net *creating_net;
+ struct noise_static_identity static_identity;
+ struct workqueue_struct *handshake_receive_wq, *handshake_send_wq;
+ struct workqueue_struct *packet_crypt_wq;
+ struct sk_buff_head incoming_handshakes;
+ int incoming_handshake_cpu;
+ struct multicore_worker __percpu *incoming_handshakes_worker;
+ struct cookie_checker cookie_checker;
+ struct pubkey_hashtable *peer_hashtable;
+ struct index_hashtable *index_hashtable;
+ struct allowedips peer_allowedips;
+ struct mutex device_update_lock, socket_update_lock;
+ struct list_head device_list, peer_list;
+ unsigned int num_peers, device_update_gen;
+ u32 fwmark;
+ u16 incoming_port;
+ bool have_creating_net_ref;
+};
+
+int wg_device_init(void);
+void wg_device_uninit(void);
+
+#endif /* _WG_DEVICE_H */
diff --git a/drivers/net/wireguard/main.c b/drivers/net/wireguard/main.c
new file mode 100644
index 000000000000..7a7d5f1a80fc
--- /dev/null
+++ b/drivers/net/wireguard/main.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "version.h"
+#include "device.h"
+#include "noise.h"
+#include "queueing.h"
+#include "ratelimiter.h"
+#include "netlink.h"
+
+#include <uapi/linux/wireguard.h>
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/genetlink.h>
+#include <net/rtnetlink.h>
+
+static int __init mod_init(void)
+{
+ int ret;
+
+#ifdef DEBUG
+ if (!wg_allowedips_selftest() || !wg_packet_counter_selftest() ||
+ !wg_ratelimiter_selftest())
+ return -ENOTRECOVERABLE;
+#endif
+ wg_noise_init();
+
+ ret = wg_device_init();
+ if (ret < 0)
+ goto err_device;
+
+ ret = wg_genetlink_init();
+ if (ret < 0)
+ goto err_netlink;
+
+ pr_info("WireGuard " WIREGUARD_VERSION " loaded. See www.wireguard.com for information.\n");
+ pr_info("Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.\n");
+
+ return 0;
+
+err_netlink:
+ wg_device_uninit();
+err_device:
+ return ret;
+}
+
+static void __exit mod_exit(void)
+{
+ wg_genetlink_uninit();
+ wg_device_uninit();
+}
+
+module_init(mod_init);
+module_exit(mod_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("WireGuard secure network tunnel");
+MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
+MODULE_VERSION(WIREGUARD_VERSION);
+MODULE_ALIAS_RTNL_LINK(KBUILD_MODNAME);
+MODULE_ALIAS_GENL_FAMILY(WG_GENL_NAME);
diff --git a/drivers/net/wireguard/messages.h b/drivers/net/wireguard/messages.h
new file mode 100644
index 000000000000..b8a7b9ce32ba
--- /dev/null
+++ b/drivers/net/wireguard/messages.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _WG_MESSAGES_H
+#define _WG_MESSAGES_H
+
+#include <crypto/curve25519.h>
+#include <crypto/chacha20poly1305.h>
+#include <crypto/blake2s.h>
+
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/skbuff.h>
+
+enum noise_lengths {
+ NOISE_PUBLIC_KEY_LEN = CURVE25519_KEY_SIZE,
+ NOISE_SYMMETRIC_KEY_LEN = CHACHA20POLY1305_KEY_SIZE,
+ NOISE_TIMESTAMP_LEN = sizeof(u64) + sizeof(u32),
+ NOISE_AUTHTAG_LEN = CHACHA20POLY1305_AUTHTAG_SIZE,
+ NOISE_HASH_LEN = BLAKE2S_HASH_SIZE
+};
+
+#define noise_encrypted_len(plain_len) ((plain_len) + NOISE_AUTHTAG_LEN)
+
+enum cookie_values {
+ COOKIE_SECRET_MAX_AGE = 2 * 60,
+ COOKIE_SECRET_LATENCY = 5,
+ COOKIE_NONCE_LEN = XCHACHA20POLY1305_NONCE_SIZE,
+ COOKIE_LEN = 16
+};
+
+enum counter_values {
+ COUNTER_BITS_TOTAL = 2048,
+ COUNTER_REDUNDANT_BITS = BITS_PER_LONG,
+ COUNTER_WINDOW_SIZE = COUNTER_BITS_TOTAL - COUNTER_REDUNDANT_BITS
+};
+
+enum limits {
+ REKEY_AFTER_MESSAGES = 1ULL << 60,
+ REJECT_AFTER_MESSAGES = U64_MAX - COUNTER_WINDOW_SIZE - 1,
+ REKEY_TIMEOUT = 5,
+ REKEY_TIMEOUT_JITTER_MAX_JIFFIES = HZ / 3,
+ REKEY_AFTER_TIME = 120,
+ REJECT_AFTER_TIME = 180,
+ INITIATIONS_PER_SECOND = 50,
+ MAX_PEERS_PER_DEVICE = 1U << 20,
+ KEEPALIVE_TIMEOUT = 10,
+ MAX_TIMER_HANDSHAKES = 90 / REKEY_TIMEOUT,
+ MAX_QUEUED_INCOMING_HANDSHAKES = 4096, /* TODO: replace this with DQL */
+ MAX_STAGED_PACKETS = 128,
+ MAX_QUEUED_PACKETS = 1024 /* TODO: replace this with DQL */
+};
+
+enum message_type {
+ MESSAGE_INVALID = 0,
+ MESSAGE_HANDSHAKE_INITIATION = 1,
+ MESSAGE_HANDSHAKE_RESPONSE = 2,
+ MESSAGE_HANDSHAKE_COOKIE = 3,
+ MESSAGE_DATA = 4
+};
+
+struct message_header {
+ /* The actual layout of this that we want is:
+ * u8 type
+ * u8 reserved_zero[3]
+ *
+ * But it turns out that by encoding this as little endian,
+ * we achieve the same thing, and it makes checking faster.
+ */
+ __le32 type;
+};
+
+struct message_macs {
+ u8 mac1[COOKIE_LEN];
+ u8 mac2[COOKIE_LEN];
+};
+
+struct message_handshake_initiation {
+ struct message_header header;
+ __le32 sender_index;
+ u8 unencrypted_ephemeral[NOISE_PUBLIC_KEY_LEN];
+ u8 encrypted_static[noise_encrypted_len(NOISE_PUBLIC_KEY_LEN)];
+ u8 encrypted_timestamp[noise_encrypted_len(NOISE_TIMESTAMP_LEN)];
+ struct message_macs macs;
+};
+
+struct message_handshake_response {
+ struct message_header header;
+ __le32 sender_index;
+ __le32 receiver_index;
+ u8 unencrypted_ephemeral[NOISE_PUBLIC_KEY_LEN];
+ u8 encrypted_nothing[noise_encrypted_len(0)];
+ struct message_macs macs;
+};
+
+struct message_handshake_cookie {
+ struct message_header header;
+ __le32 receiver_index;
+ u8 nonce[COOKIE_NONCE_LEN];
+ u8 encrypted_cookie[noise_encrypted_len(COOKIE_LEN)];
+};
+
+struct message_data {
+ struct message_header header;
+ __le32 key_idx;
+ __le64 counter;
+ u8 encrypted_data[];
+};
+
+#define message_data_len(plain_len) \
+ (noise_encrypted_len(plain_len) + sizeof(struct message_data))
+
+enum message_alignments {
+ MESSAGE_PADDING_MULTIPLE = 16,
+ MESSAGE_MINIMUM_LENGTH = message_data_len(0)
+};
+
+#define SKB_HEADER_LEN \
+ (max(sizeof(struct iphdr), sizeof(struct ipv6hdr)) + \
+ sizeof(struct udphdr) + NET_SKB_PAD)
+#define DATA_PACKET_HEAD_ROOM \
+ ALIGN(sizeof(struct message_data) + SKB_HEADER_LEN, 4)
+
+enum { HANDSHAKE_DSCP = 0x88 /* AF41, plus 00 ECN */ };
+
+#endif /* _WG_MESSAGES_H */
diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c
new file mode 100644
index 000000000000..bda26405497c
--- /dev/null
+++ b/drivers/net/wireguard/netlink.c
@@ -0,0 +1,640 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "netlink.h"
+#include "device.h"
+#include "peer.h"
+#include "socket.h"
+#include "queueing.h"
+#include "messages.h"
+
+#include <uapi/linux/wireguard.h>
+
+#include <linux/if.h>
+#include <net/genetlink.h>
+#include <net/sock.h>
+#include <crypto/algapi.h>
+
+static struct genl_family genl_family;
+
+static const struct nla_policy device_policy[WGDEVICE_A_MAX + 1] = {
+ [WGDEVICE_A_IFINDEX] = { .type = NLA_U32 },
+ [WGDEVICE_A_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
+ [WGDEVICE_A_PRIVATE_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN },
+ [WGDEVICE_A_PUBLIC_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN },
+ [WGDEVICE_A_FLAGS] = { .type = NLA_U32 },
+ [WGDEVICE_A_LISTEN_PORT] = { .type = NLA_U16 },
+ [WGDEVICE_A_FWMARK] = { .type = NLA_U32 },
+ [WGDEVICE_A_PEERS] = { .type = NLA_NESTED }
+};
+
+static const struct nla_policy peer_policy[WGPEER_A_MAX + 1] = {
+ [WGPEER_A_PUBLIC_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN },
+ [WGPEER_A_PRESHARED_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_SYMMETRIC_KEY_LEN },
+ [WGPEER_A_FLAGS] = { .type = NLA_U32 },
+ [WGPEER_A_ENDPOINT] = { .type = NLA_MIN_LEN, .len = sizeof(struct sockaddr) },
+ [WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL] = { .type = NLA_U16 },
+ [WGPEER_A_LAST_HANDSHAKE_TIME] = { .type = NLA_EXACT_LEN, .len = sizeof(struct __kernel_timespec) },
+ [WGPEER_A_RX_BYTES] = { .type = NLA_U64 },
+ [WGPEER_A_TX_BYTES] = { .type = NLA_U64 },
+ [WGPEER_A_ALLOWEDIPS] = { .type = NLA_NESTED },
+ [WGPEER_A_PROTOCOL_VERSION] = { .type = NLA_U32 }
+};
+
+static const struct nla_policy allowedip_policy[WGALLOWEDIP_A_MAX + 1] = {
+ [WGALLOWEDIP_A_FAMILY] = { .type = NLA_U16 },
+ [WGALLOWEDIP_A_IPADDR] = { .type = NLA_MIN_LEN, .len = sizeof(struct in_addr) },
+ [WGALLOWEDIP_A_CIDR_MASK] = { .type = NLA_U8 }
+};
+
+static struct wg_device *lookup_interface(struct nlattr **attrs,
+ struct sk_buff *skb)
+{
+ struct net_device *dev = NULL;
+
+ if (!attrs[WGDEVICE_A_IFINDEX] == !attrs[WGDEVICE_A_IFNAME])
+ return ERR_PTR(-EBADR);
+ if (attrs[WGDEVICE_A_IFINDEX])
+ dev = dev_get_by_index(sock_net(skb->sk),
+ nla_get_u32(attrs[WGDEVICE_A_IFINDEX]));
+ else if (attrs[WGDEVICE_A_IFNAME])
+ dev = dev_get_by_name(sock_net(skb->sk),
+ nla_data(attrs[WGDEVICE_A_IFNAME]));
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+ if (!dev->rtnl_link_ops || !dev->rtnl_link_ops->kind ||
+ strcmp(dev->rtnl_link_ops->kind, KBUILD_MODNAME)) {
+ dev_put(dev);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+ return netdev_priv(dev);
+}
+
+static int get_allowedips(struct sk_buff *skb, const u8 *ip, u8 cidr,
+ int family)
+{
+ struct nlattr *allowedip_nest;
+
+ allowedip_nest = nla_nest_start(skb, 0);
+ if (!allowedip_nest)
+ return -EMSGSIZE;
+
+ if (nla_put_u8(skb, WGALLOWEDIP_A_CIDR_MASK, cidr) ||
+ nla_put_u16(skb, WGALLOWEDIP_A_FAMILY, family) ||
+ nla_put(skb, WGALLOWEDIP_A_IPADDR, family == AF_INET6 ?
+ sizeof(struct in6_addr) : sizeof(struct in_addr), ip)) {
+ nla_nest_cancel(skb, allowedip_nest);
+ return -EMSGSIZE;
+ }
+
+ nla_nest_end(skb, allowedip_nest);
+ return 0;
+}
+
+struct dump_ctx {
+ struct wg_device *wg;
+ struct wg_peer *next_peer;
+ u64 allowedips_seq;
+ struct allowedips_node *next_allowedip;
+};
+
+#define DUMP_CTX(cb) ((struct dump_ctx *)(cb)->args)
+
+static int
+get_peer(struct wg_peer *peer, struct sk_buff *skb, struct dump_ctx *ctx)
+{
+
+ struct nlattr *allowedips_nest, *peer_nest = nla_nest_start(skb, 0);
+ struct allowedips_node *allowedips_node = ctx->next_allowedip;
+ bool fail;
+
+ if (!peer_nest)
+ return -EMSGSIZE;
+
+ down_read(&peer->handshake.lock);
+ fail = nla_put(skb, WGPEER_A_PUBLIC_KEY, NOISE_PUBLIC_KEY_LEN,
+ peer->handshake.remote_static);
+ up_read(&peer->handshake.lock);
+ if (fail)
+ goto err;
+
+ if (!allowedips_node) {
+ const struct __kernel_timespec last_handshake = {
+ .tv_sec = peer->walltime_last_handshake.tv_sec,
+ .tv_nsec = peer->walltime_last_handshake.tv_nsec
+ };
+
+ down_read(&peer->handshake.lock);
+ fail = nla_put(skb, WGPEER_A_PRESHARED_KEY,
+ NOISE_SYMMETRIC_KEY_LEN,
+ peer->handshake.preshared_key);
+ up_read(&peer->handshake.lock);
+ if (fail)
+ goto err;
+
+ if (nla_put(skb, WGPEER_A_LAST_HANDSHAKE_TIME,
+ sizeof(last_handshake), &last_handshake) ||
+ nla_put_u16(skb, WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL,
+ peer->persistent_keepalive_interval) ||
+ nla_put_u64_64bit(skb, WGPEER_A_TX_BYTES, peer->tx_bytes,
+ WGPEER_A_UNSPEC) ||
+ nla_put_u64_64bit(skb, WGPEER_A_RX_BYTES, peer->rx_bytes,
+ WGPEER_A_UNSPEC) ||
+ nla_put_u32(skb, WGPEER_A_PROTOCOL_VERSION, 1))
+ goto err;
+
+ read_lock_bh(&peer->endpoint_lock);
+ if (peer->endpoint.addr.sa_family == AF_INET)
+ fail = nla_put(skb, WGPEER_A_ENDPOINT,
+ sizeof(peer->endpoint.addr4),
+ &peer->endpoint.addr4);
+ else if (peer->endpoint.addr.sa_family == AF_INET6)
+ fail = nla_put(skb, WGPEER_A_ENDPOINT,
+ sizeof(peer->endpoint.addr6),
+ &peer->endpoint.addr6);
+ read_unlock_bh(&peer->endpoint_lock);
+ if (fail)
+ goto err;
+ allowedips_node =
+ list_first_entry_or_null(&peer->allowedips_list,
+ struct allowedips_node, peer_list);
+ }
+ if (!allowedips_node)
+ goto no_allowedips;
+ if (!ctx->allowedips_seq)
+ ctx->allowedips_seq = peer->device->peer_allowedips.seq;
+ else if (ctx->allowedips_seq != peer->device->peer_allowedips.seq)
+ goto no_allowedips;
+
+ allowedips_nest = nla_nest_start(skb, WGPEER_A_ALLOWEDIPS);
+ if (!allowedips_nest)
+ goto err;
+
+ list_for_each_entry_from(allowedips_node, &peer->allowedips_list,
+ peer_list) {
+ u8 cidr, ip[16] __aligned(__alignof(u64));
+ int family;
+
+ family = wg_allowedips_read_node(allowedips_node, ip, &cidr);
+ if (get_allowedips(skb, ip, cidr, family)) {
+ nla_nest_end(skb, allowedips_nest);
+ nla_nest_end(skb, peer_nest);
+ ctx->next_allowedip = allowedips_node;
+ return -EMSGSIZE;
+ }
+ }
+ nla_nest_end(skb, allowedips_nest);
+no_allowedips:
+ nla_nest_end(skb, peer_nest);
+ ctx->next_allowedip = NULL;
+ ctx->allowedips_seq = 0;
+ return 0;
+err:
+ nla_nest_cancel(skb, peer_nest);
+ return -EMSGSIZE;
+}
+
+static int wg_get_device_start(struct netlink_callback *cb)
+{
+ struct wg_device *wg;
+
+ wg = lookup_interface(genl_dumpit_info(cb)->attrs, cb->skb);
+ if (IS_ERR(wg))
+ return PTR_ERR(wg);
+ DUMP_CTX(cb)->wg = wg;
+ return 0;
+}
+
+static int wg_get_device_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct wg_peer *peer, *next_peer_cursor;
+ struct dump_ctx *ctx = DUMP_CTX(cb);
+ struct wg_device *wg = ctx->wg;
+ struct nlattr *peers_nest;
+ int ret = -EMSGSIZE;
+ bool done = true;
+ void *hdr;
+
+ rtnl_lock();
+ mutex_lock(&wg->device_update_lock);
+ cb->seq = wg->device_update_gen;
+ next_peer_cursor = ctx->next_peer;
+
+ hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ &genl_family, NLM_F_MULTI, WG_CMD_GET_DEVICE);
+ if (!hdr)
+ goto out;
+ genl_dump_check_consistent(cb, hdr);
+
+ if (!ctx->next_peer) {
+ if (nla_put_u16(skb, WGDEVICE_A_LISTEN_PORT,
+ wg->incoming_port) ||
+ nla_put_u32(skb, WGDEVICE_A_FWMARK, wg->fwmark) ||
+ nla_put_u32(skb, WGDEVICE_A_IFINDEX, wg->dev->ifindex) ||
+ nla_put_string(skb, WGDEVICE_A_IFNAME, wg->dev->name))
+ goto out;
+
+ down_read(&wg->static_identity.lock);
+ if (wg->static_identity.has_identity) {
+ if (nla_put(skb, WGDEVICE_A_PRIVATE_KEY,
+ NOISE_PUBLIC_KEY_LEN,
+ wg->static_identity.static_private) ||
+ nla_put(skb, WGDEVICE_A_PUBLIC_KEY,
+ NOISE_PUBLIC_KEY_LEN,
+ wg->static_identity.static_public)) {
+ up_read(&wg->static_identity.lock);
+ goto out;
+ }
+ }
+ up_read(&wg->static_identity.lock);
+ }
+
+ peers_nest = nla_nest_start(skb, WGDEVICE_A_PEERS);
+ if (!peers_nest)
+ goto out;
+ ret = 0;
+ /* If the last cursor was removed via list_del_init in peer_remove, then
+ * we just treat this the same as there being no more peers left. The
+ * reason is that seq_nr should indicate to userspace that this isn't a
+ * coherent dump anyway, so they'll try again.
+ */
+ if (list_empty(&wg->peer_list) ||
+ (ctx->next_peer && list_empty(&ctx->next_peer->peer_list))) {
+ nla_nest_cancel(skb, peers_nest);
+ goto out;
+ }
+ lockdep_assert_held(&wg->device_update_lock);
+ peer = list_prepare_entry(ctx->next_peer, &wg->peer_list, peer_list);
+ list_for_each_entry_continue(peer, &wg->peer_list, peer_list) {
+ if (get_peer(peer, skb, ctx)) {
+ done = false;
+ break;
+ }
+ next_peer_cursor = peer;
+ }
+ nla_nest_end(skb, peers_nest);
+
+out:
+ if (!ret && !done && next_peer_cursor)
+ wg_peer_get(next_peer_cursor);
+ wg_peer_put(ctx->next_peer);
+ mutex_unlock(&wg->device_update_lock);
+ rtnl_unlock();
+
+ if (ret) {
+ genlmsg_cancel(skb, hdr);
+ return ret;
+ }
+ genlmsg_end(skb, hdr);
+ if (done) {
+ ctx->next_peer = NULL;
+ return 0;
+ }
+ ctx->next_peer = next_peer_cursor;
+ return skb->len;
+
+ /* At this point, we can't really deal ourselves with safely zeroing out
+ * the private key material after usage. This will need an additional API
+ * in the kernel for marking skbs as zero_on_free.
+ */
+}
+
+static int wg_get_device_done(struct netlink_callback *cb)
+{
+ struct dump_ctx *ctx = DUMP_CTX(cb);
+
+ if (ctx->wg)
+ dev_put(ctx->wg->dev);
+ wg_peer_put(ctx->next_peer);
+ return 0;
+}
+
+static int set_port(struct wg_device *wg, u16 port)
+{
+ struct wg_peer *peer;
+
+ if (wg->incoming_port == port)
+ return 0;
+ list_for_each_entry(peer, &wg->peer_list, peer_list)
+ wg_socket_clear_peer_endpoint_src(peer);
+ if (!netif_running(wg->dev)) {
+ wg->incoming_port = port;
+ return 0;
+ }
+ return wg_socket_init(wg, port);
+}
+
+static int set_allowedip(struct wg_peer *peer, struct nlattr **attrs)
+{
+ int ret = -EINVAL;
+ u16 family;
+ u8 cidr;
+
+ if (!attrs[WGALLOWEDIP_A_FAMILY] || !attrs[WGALLOWEDIP_A_IPADDR] ||
+ !attrs[WGALLOWEDIP_A_CIDR_MASK])
+ return ret;
+ family = nla_get_u16(attrs[WGALLOWEDIP_A_FAMILY]);
+ cidr = nla_get_u8(attrs[WGALLOWEDIP_A_CIDR_MASK]);
+
+ if (family == AF_INET && cidr <= 32 &&
+ nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in_addr))
+ ret = wg_allowedips_insert_v4(
+ &peer->device->peer_allowedips,
+ nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr, peer,
+ &peer->device->device_update_lock);
+ else if (family == AF_INET6 && cidr <= 128 &&
+ nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in6_addr))
+ ret = wg_allowedips_insert_v6(
+ &peer->device->peer_allowedips,
+ nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr, peer,
+ &peer->device->device_update_lock);
+
+ return ret;
+}
+
+static int set_peer(struct wg_device *wg, struct nlattr **attrs)
+{
+ u8 *public_key = NULL, *preshared_key = NULL;
+ struct wg_peer *peer = NULL;
+ u32 flags = 0;
+ int ret;
+
+ ret = -EINVAL;
+ if (attrs[WGPEER_A_PUBLIC_KEY] &&
+ nla_len(attrs[WGPEER_A_PUBLIC_KEY]) == NOISE_PUBLIC_KEY_LEN)
+ public_key = nla_data(attrs[WGPEER_A_PUBLIC_KEY]);
+ else
+ goto out;
+ if (attrs[WGPEER_A_PRESHARED_KEY] &&
+ nla_len(attrs[WGPEER_A_PRESHARED_KEY]) == NOISE_SYMMETRIC_KEY_LEN)
+ preshared_key = nla_data(attrs[WGPEER_A_PRESHARED_KEY]);
+
+ if (attrs[WGPEER_A_FLAGS])
+ flags = nla_get_u32(attrs[WGPEER_A_FLAGS]);
+ ret = -EOPNOTSUPP;
+ if (flags & ~__WGPEER_F_ALL)
+ goto out;
+
+ ret = -EPFNOSUPPORT;
+ if (attrs[WGPEER_A_PROTOCOL_VERSION]) {
+ if (nla_get_u32(attrs[WGPEER_A_PROTOCOL_VERSION]) != 1)
+ goto out;
+ }
+
+ peer = wg_pubkey_hashtable_lookup(wg->peer_hashtable,
+ nla_data(attrs[WGPEER_A_PUBLIC_KEY]));
+ ret = 0;
+ if (!peer) { /* Peer doesn't exist yet. Add a new one. */
+ if (flags & (WGPEER_F_REMOVE_ME | WGPEER_F_UPDATE_ONLY))
+ goto out;
+
+ /* The peer is new, so there aren't allowed IPs to remove. */
+ flags &= ~WGPEER_F_REPLACE_ALLOWEDIPS;
+
+ down_read(&wg->static_identity.lock);
+ if (wg->static_identity.has_identity &&
+ !memcmp(nla_data(attrs[WGPEER_A_PUBLIC_KEY]),
+ wg->static_identity.static_public,
+ NOISE_PUBLIC_KEY_LEN)) {
+ /* We silently ignore peers that have the same public
+ * key as the device. The reason we do it silently is
+ * that we'd like for people to be able to reuse the
+ * same set of API calls across peers.
+ */
+ up_read(&wg->static_identity.lock);
+ ret = 0;
+ goto out;
+ }
+ up_read(&wg->static_identity.lock);
+
+ peer = wg_peer_create(wg, public_key, preshared_key);
+ if (IS_ERR(peer)) {
+ /* Similar to the above, if the key is invalid, we skip
+ * it without fanfare, so that services don't need to
+ * worry about doing key validation themselves.
+ */
+ ret = PTR_ERR(peer) == -EKEYREJECTED ? 0 : PTR_ERR(peer);
+ peer = NULL;
+ goto out;
+ }
+ /* Take additional reference, as though we've just been
+ * looked up.
+ */
+ wg_peer_get(peer);
+ }
+
+ if (flags & WGPEER_F_REMOVE_ME) {
+ wg_peer_remove(peer);
+ goto out;
+ }
+
+ if (preshared_key) {
+ down_write(&peer->handshake.lock);
+ memcpy(&peer->handshake.preshared_key, preshared_key,
+ NOISE_SYMMETRIC_KEY_LEN);
+ up_write(&peer->handshake.lock);
+ }
+
+ if (attrs[WGPEER_A_ENDPOINT]) {
+ struct sockaddr *addr = nla_data(attrs[WGPEER_A_ENDPOINT]);
+ size_t len = nla_len(attrs[WGPEER_A_ENDPOINT]);
+
+ if ((len == sizeof(struct sockaddr_in) &&
+ addr->sa_family == AF_INET) ||
+ (len == sizeof(struct sockaddr_in6) &&
+ addr->sa_family == AF_INET6)) {
+ struct endpoint endpoint = { { { 0 } } };
+
+ memcpy(&endpoint.addr, addr, len);
+ wg_socket_set_peer_endpoint(peer, &endpoint);
+ }
+ }
+
+ if (flags & WGPEER_F_REPLACE_ALLOWEDIPS)
+ wg_allowedips_remove_by_peer(&wg->peer_allowedips, peer,
+ &wg->device_update_lock);
+
+ if (attrs[WGPEER_A_ALLOWEDIPS]) {
+ struct nlattr *attr, *allowedip[WGALLOWEDIP_A_MAX + 1];
+ int rem;
+
+ nla_for_each_nested(attr, attrs[WGPEER_A_ALLOWEDIPS], rem) {
+ ret = nla_parse_nested(allowedip, WGALLOWEDIP_A_MAX,
+ attr, allowedip_policy, NULL);
+ if (ret < 0)
+ goto out;
+ ret = set_allowedip(peer, allowedip);
+ if (ret < 0)
+ goto out;
+ }
+ }
+
+ if (attrs[WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL]) {
+ const u16 persistent_keepalive_interval = nla_get_u16(
+ attrs[WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL]);
+ const bool send_keepalive =
+ !peer->persistent_keepalive_interval &&
+ persistent_keepalive_interval &&
+ netif_running(wg->dev);
+
+ peer->persistent_keepalive_interval = persistent_keepalive_interval;
+ if (send_keepalive)
+ wg_packet_send_keepalive(peer);
+ }
+
+ if (netif_running(wg->dev))
+ wg_packet_send_staged_packets(peer);
+
+out:
+ wg_peer_put(peer);
+ if (attrs[WGPEER_A_PRESHARED_KEY])
+ memzero_explicit(nla_data(attrs[WGPEER_A_PRESHARED_KEY]),
+ nla_len(attrs[WGPEER_A_PRESHARED_KEY]));
+ return ret;
+}
+
+static int wg_set_device(struct sk_buff *skb, struct genl_info *info)
+{
+ struct wg_device *wg = lookup_interface(info->attrs, skb);
+ u32 flags = 0;
+ int ret;
+
+ if (IS_ERR(wg)) {
+ ret = PTR_ERR(wg);
+ goto out_nodev;
+ }
+
+ rtnl_lock();
+ mutex_lock(&wg->device_update_lock);
+
+ if (info->attrs[WGDEVICE_A_FLAGS])
+ flags = nla_get_u32(info->attrs[WGDEVICE_A_FLAGS]);
+ ret = -EOPNOTSUPP;
+ if (flags & ~__WGDEVICE_F_ALL)
+ goto out;
+
+ ret = -EPERM;
+ if ((info->attrs[WGDEVICE_A_LISTEN_PORT] ||
+ info->attrs[WGDEVICE_A_FWMARK]) &&
+ !ns_capable(wg->creating_net->user_ns, CAP_NET_ADMIN))
+ goto out;
+
+ ++wg->device_update_gen;
+
+ if (info->attrs[WGDEVICE_A_FWMARK]) {
+ struct wg_peer *peer;
+
+ wg->fwmark = nla_get_u32(info->attrs[WGDEVICE_A_FWMARK]);
+ list_for_each_entry(peer, &wg->peer_list, peer_list)
+ wg_socket_clear_peer_endpoint_src(peer);
+ }
+
+ if (info->attrs[WGDEVICE_A_LISTEN_PORT]) {
+ ret = set_port(wg,
+ nla_get_u16(info->attrs[WGDEVICE_A_LISTEN_PORT]));
+ if (ret)
+ goto out;
+ }
+
+ if (flags & WGDEVICE_F_REPLACE_PEERS)
+ wg_peer_remove_all(wg);
+
+ if (info->attrs[WGDEVICE_A_PRIVATE_KEY] &&
+ nla_len(info->attrs[WGDEVICE_A_PRIVATE_KEY]) ==
+ NOISE_PUBLIC_KEY_LEN) {
+ u8 *private_key = nla_data(info->attrs[WGDEVICE_A_PRIVATE_KEY]);
+ u8 public_key[NOISE_PUBLIC_KEY_LEN];
+ struct wg_peer *peer, *temp;
+
+ if (!crypto_memneq(wg->static_identity.static_private,
+ private_key, NOISE_PUBLIC_KEY_LEN))
+ goto skip_set_private_key;
+
+ /* We remove before setting, to prevent race, which means doing
+ * two 25519-genpub ops.
+ */
+ if (curve25519_generate_public(public_key, private_key)) {
+ peer = wg_pubkey_hashtable_lookup(wg->peer_hashtable,
+ public_key);
+ if (peer) {
+ wg_peer_put(peer);
+ wg_peer_remove(peer);
+ }
+ }
+
+ down_write(&wg->static_identity.lock);
+ wg_noise_set_static_identity_private_key(&wg->static_identity,
+ private_key);
+ list_for_each_entry_safe(peer, temp, &wg->peer_list,
+ peer_list) {
+ BUG_ON(!wg_noise_precompute_static_static(peer));
+ wg_noise_expire_current_peer_keypairs(peer);
+ }
+ wg_cookie_checker_precompute_device_keys(&wg->cookie_checker);
+ up_write(&wg->static_identity.lock);
+ }
+skip_set_private_key:
+
+ if (info->attrs[WGDEVICE_A_PEERS]) {
+ struct nlattr *attr, *peer[WGPEER_A_MAX + 1];
+ int rem;
+
+ nla_for_each_nested(attr, info->attrs[WGDEVICE_A_PEERS], rem) {
+ ret = nla_parse_nested(peer, WGPEER_A_MAX, attr,
+ peer_policy, NULL);
+ if (ret < 0)
+ goto out;
+ ret = set_peer(wg, peer);
+ if (ret < 0)
+ goto out;
+ }
+ }
+ ret = 0;
+
+out:
+ mutex_unlock(&wg->device_update_lock);
+ rtnl_unlock();
+ dev_put(wg->dev);
+out_nodev:
+ if (info->attrs[WGDEVICE_A_PRIVATE_KEY])
+ memzero_explicit(nla_data(info->attrs[WGDEVICE_A_PRIVATE_KEY]),
+ nla_len(info->attrs[WGDEVICE_A_PRIVATE_KEY]));
+ return ret;
+}
+
+static const struct genl_ops genl_ops[] = {
+ {
+ .cmd = WG_CMD_GET_DEVICE,
+ .start = wg_get_device_start,
+ .dumpit = wg_get_device_dump,
+ .done = wg_get_device_done,
+ .flags = GENL_UNS_ADMIN_PERM
+ }, {
+ .cmd = WG_CMD_SET_DEVICE,
+ .doit = wg_set_device,
+ .flags = GENL_UNS_ADMIN_PERM
+ }
+};
+
+static struct genl_family genl_family __ro_after_init = {
+ .ops = genl_ops,
+ .n_ops = ARRAY_SIZE(genl_ops),
+ .name = WG_GENL_NAME,
+ .version = WG_GENL_VERSION,
+ .maxattr = WGDEVICE_A_MAX,
+ .module = THIS_MODULE,
+ .policy = device_policy,
+ .netnsok = true
+};
+
+int __init wg_genetlink_init(void)
+{
+ return genl_register_family(&genl_family);
+}
+
+void __exit wg_genetlink_uninit(void)
+{
+ genl_unregister_family(&genl_family);
+}
diff --git a/drivers/net/wireguard/netlink.h b/drivers/net/wireguard/netlink.h
new file mode 100644
index 000000000000..15100d92e2e3
--- /dev/null
+++ b/drivers/net/wireguard/netlink.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _WG_NETLINK_H
+#define _WG_NETLINK_H
+
+int wg_genetlink_init(void);
+void wg_genetlink_uninit(void);
+
+#endif /* _WG_NETLINK_H */
diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c
new file mode 100644
index 000000000000..919d9d866446
--- /dev/null
+++ b/drivers/net/wireguard/noise.c
@@ -0,0 +1,832 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "noise.h"
+#include "device.h"
+#include "peer.h"
+#include "messages.h"
+#include "queueing.h"
+#include "peerlookup.h"
+
+#include <linux/rcupdate.h>
+#include <linux/slab.h>
+#include <linux/bitmap.h>
+#include <linux/scatterlist.h>
+#include <linux/highmem.h>
+#include <crypto/algapi.h>
+
+/* This implements Noise_IKpsk2:
+ *
+ * <- s
+ * ******
+ * -> e, es, s, ss, {t}
+ * <- e, ee, se, psk, {}
+ */
+
+static const u8 handshake_name[37] = "Noise_IKpsk2_25519_ChaChaPoly_BLAKE2s";
+static const u8 identifier_name[34] = "WireGuard v1 zx2c4 Jason@zx2c4.com";
+static u8 handshake_init_hash[NOISE_HASH_LEN] __ro_after_init;
+static u8 handshake_init_chaining_key[NOISE_HASH_LEN] __ro_after_init;
+static atomic64_t keypair_counter = ATOMIC64_INIT(0);
+
+void __init wg_noise_init(void)
+{
+ struct blake2s_state blake;
+
+ blake2s(handshake_init_chaining_key, handshake_name, NULL,
+ NOISE_HASH_LEN, sizeof(handshake_name), 0);
+ blake2s_init(&blake, NOISE_HASH_LEN);
+ blake2s_update(&blake, handshake_init_chaining_key, NOISE_HASH_LEN);
+ blake2s_update(&blake, identifier_name, sizeof(identifier_name));
+ blake2s_final(&blake, handshake_init_hash);
+}
+
+/* Must hold peer->handshake.static_identity->lock */
+bool wg_noise_precompute_static_static(struct wg_peer *peer)
+{
+ bool ret;
+
+ down_write(&peer->handshake.lock);
+ if (peer->handshake.static_identity->has_identity) {
+ ret = curve25519(
+ peer->handshake.precomputed_static_static,
+ peer->handshake.static_identity->static_private,
+ peer->handshake.remote_static);
+ } else {
+ u8 empty[NOISE_PUBLIC_KEY_LEN] = { 0 };
+
+ ret = curve25519(empty, empty, peer->handshake.remote_static);
+ memset(peer->handshake.precomputed_static_static, 0,
+ NOISE_PUBLIC_KEY_LEN);
+ }
+ up_write(&peer->handshake.lock);
+ return ret;
+}
+
+bool wg_noise_handshake_init(struct noise_handshake *handshake,
+ struct noise_static_identity *static_identity,
+ const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN],
+ const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN],
+ struct wg_peer *peer)
+{
+ memset(handshake, 0, sizeof(*handshake));
+ init_rwsem(&handshake->lock);
+ handshake->entry.type = INDEX_HASHTABLE_HANDSHAKE;
+ handshake->entry.peer = peer;
+ memcpy(handshake->remote_static, peer_public_key, NOISE_PUBLIC_KEY_LEN);
+ if (peer_preshared_key)
+ memcpy(handshake->preshared_key, peer_preshared_key,
+ NOISE_SYMMETRIC_KEY_LEN);
+ handshake->static_identity = static_identity;
+ handshake->state = HANDSHAKE_ZEROED;
+ return wg_noise_precompute_static_static(peer);
+}
+
+static void handshake_zero(struct noise_handshake *handshake)
+{
+ memset(&handshake->ephemeral_private, 0, NOISE_PUBLIC_KEY_LEN);
+ memset(&handshake->remote_ephemeral, 0, NOISE_PUBLIC_KEY_LEN);
+ memset(&handshake->hash, 0, NOISE_HASH_LEN);
+ memset(&handshake->chaining_key, 0, NOISE_HASH_LEN);
+ handshake->remote_index = 0;
+ handshake->state = HANDSHAKE_ZEROED;
+}
+
+void wg_noise_handshake_clear(struct noise_handshake *handshake)
+{
+ wg_index_hashtable_remove(
+ handshake->entry.peer->device->index_hashtable,
+ &handshake->entry);
+ down_write(&handshake->lock);
+ handshake_zero(handshake);
+ up_write(&handshake->lock);
+ wg_index_hashtable_remove(
+ handshake->entry.peer->device->index_hashtable,
+ &handshake->entry);
+}
+
+static struct noise_keypair *keypair_create(struct wg_peer *peer)
+{
+ struct noise_keypair *keypair = kzalloc(sizeof(*keypair), GFP_KERNEL);
+
+ if (unlikely(!keypair))
+ return NULL;
+ keypair->internal_id = atomic64_inc_return(&keypair_counter);
+ keypair->entry.type = INDEX_HASHTABLE_KEYPAIR;
+ keypair->entry.peer = peer;
+ kref_init(&keypair->refcount);
+ return keypair;
+}
+
+static void keypair_free_rcu(struct rcu_head *rcu)
+{
+ kzfree(container_of(rcu, struct noise_keypair, rcu));
+}
+
+static void keypair_free_kref(struct kref *kref)
+{
+ struct noise_keypair *keypair =
+ container_of(kref, struct noise_keypair, refcount);
+
+ net_dbg_ratelimited("%s: Keypair %llu destroyed for peer %llu\n",
+ keypair->entry.peer->device->dev->name,
+ keypair->internal_id,
+ keypair->entry.peer->internal_id);
+ wg_index_hashtable_remove(keypair->entry.peer->device->index_hashtable,
+ &keypair->entry);
+ call_rcu(&keypair->rcu, keypair_free_rcu);
+}
+
+void wg_noise_keypair_put(struct noise_keypair *keypair, bool unreference_now)
+{
+ if (unlikely(!keypair))
+ return;
+ if (unlikely(unreference_now))
+ wg_index_hashtable_remove(
+ keypair->entry.peer->device->index_hashtable,
+ &keypair->entry);
+ kref_put(&keypair->refcount, keypair_free_kref);
+}
+
+struct noise_keypair *wg_noise_keypair_get(struct noise_keypair *keypair)
+{
+ RCU_LOCKDEP_WARN(!rcu_read_lock_bh_held(),
+ "Taking noise keypair reference without holding the RCU BH read lock");
+ if (unlikely(!keypair || !kref_get_unless_zero(&keypair->refcount)))
+ return NULL;
+ return keypair;
+}
+
+void wg_noise_keypairs_clear(struct noise_keypairs *keypairs)
+{
+ struct noise_keypair *old;
+
+ spin_lock_bh(&keypairs->keypair_update_lock);
+
+ /* We zero the next_keypair before zeroing the others, so that
+ * wg_noise_received_with_keypair returns early before subsequent ones
+ * are zeroed.
+ */
+ old = rcu_dereference_protected(keypairs->next_keypair,
+ lockdep_is_held(&keypairs->keypair_update_lock));
+ RCU_INIT_POINTER(keypairs->next_keypair, NULL);
+ wg_noise_keypair_put(old, true);
+
+ old = rcu_dereference_protected(keypairs->previous_keypair,
+ lockdep_is_held(&keypairs->keypair_update_lock));
+ RCU_INIT_POINTER(keypairs->previous_keypair, NULL);
+ wg_noise_keypair_put(old, true);
+
+ old = rcu_dereference_protected(keypairs->current_keypair,
+ lockdep_is_held(&keypairs->keypair_update_lock));
+ RCU_INIT_POINTER(keypairs->current_keypair, NULL);
+ wg_noise_keypair_put(old, true);
+
+ spin_unlock_bh(&keypairs->keypair_update_lock);
+}
+
+void wg_noise_expire_current_peer_keypairs(struct wg_peer *peer)
+{
+ struct noise_keypair *keypair;
+
+ wg_noise_handshake_clear(&peer->handshake);
+ wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake);
+
+ spin_lock_bh(&peer->keypairs.keypair_update_lock);
+ keypair = rcu_dereference_protected(peer->keypairs.next_keypair,
+ lockdep_is_held(&peer->keypairs.keypair_update_lock));
+ if (keypair)
+ keypair->sending.is_valid = false;
+ keypair = rcu_dereference_protected(peer->keypairs.current_keypair,
+ lockdep_is_held(&peer->keypairs.keypair_update_lock));
+ if (keypair)
+ keypair->sending.is_valid = false;
+ spin_unlock_bh(&peer->keypairs.keypair_update_lock);
+}
+
+static void add_new_keypair(struct noise_keypairs *keypairs,
+ struct noise_keypair *new_keypair)
+{
+ struct noise_keypair *previous_keypair, *next_keypair, *current_keypair;
+
+ spin_lock_bh(&keypairs->keypair_update_lock);
+ previous_keypair = rcu_dereference_protected(keypairs->previous_keypair,
+ lockdep_is_held(&keypairs->keypair_update_lock));
+ next_keypair = rcu_dereference_protected(keypairs->next_keypair,
+ lockdep_is_held(&keypairs->keypair_update_lock));
+ current_keypair = rcu_dereference_protected(keypairs->current_keypair,
+ lockdep_is_held(&keypairs->keypair_update_lock));
+ if (new_keypair->i_am_the_initiator) {
+ /* If we're the initiator, it means we've sent a handshake, and
+ * received a confirmation response, which means this new
+ * keypair can now be used.
+ */
+ if (next_keypair) {
+ /* If there already was a next keypair pending, we
+ * demote it to be the previous keypair, and free the
+ * existing current. Note that this means KCI can result
+ * in this transition. It would perhaps be more sound to
+ * always just get rid of the unused next keypair
+ * instead of putting it in the previous slot, but this
+ * might be a bit less robust. Something to think about
+ * for the future.
+ */
+ RCU_INIT_POINTER(keypairs->next_keypair, NULL);
+ rcu_assign_pointer(keypairs->previous_keypair,
+ next_keypair);
+ wg_noise_keypair_put(current_keypair, true);
+ } else /* If there wasn't an existing next keypair, we replace
+ * the previous with the current one.
+ */
+ rcu_assign_pointer(keypairs->previous_keypair,
+ current_keypair);
+ /* At this point we can get rid of the old previous keypair, and
+ * set up the new keypair.
+ */
+ wg_noise_keypair_put(previous_keypair, true);
+ rcu_assign_pointer(keypairs->current_keypair, new_keypair);
+ } else {
+ /* If we're the responder, it means we can't use the new keypair
+ * until we receive confirmation via the first data packet, so
+ * we get rid of the existing previous one, the possibly
+ * existing next one, and slide in the new next one.
+ */
+ rcu_assign_pointer(keypairs->next_keypair, new_keypair);
+ wg_noise_keypair_put(next_keypair, true);
+ RCU_INIT_POINTER(keypairs->previous_keypair, NULL);
+ wg_noise_keypair_put(previous_keypair, true);
+ }
+ spin_unlock_bh(&keypairs->keypair_update_lock);
+}
+
+bool wg_noise_received_with_keypair(struct noise_keypairs *keypairs,
+ struct noise_keypair *received_keypair)
+{
+ struct noise_keypair *old_keypair;
+ bool key_is_new;
+
+ /* We first check without taking the spinlock. */
+ key_is_new = received_keypair ==
+ rcu_access_pointer(keypairs->next_keypair);
+ if (likely(!key_is_new))
+ return false;
+
+ spin_lock_bh(&keypairs->keypair_update_lock);
+ /* After locking, we double check that things didn't change from
+ * beneath us.
+ */
+ if (unlikely(received_keypair !=
+ rcu_dereference_protected(keypairs->next_keypair,
+ lockdep_is_held(&keypairs->keypair_update_lock)))) {
+ spin_unlock_bh(&keypairs->keypair_update_lock);
+ return false;
+ }
+
+ /* When we've finally received the confirmation, we slide the next
+ * into the current, the current into the previous, and get rid of
+ * the old previous.
+ */
+ old_keypair = rcu_dereference_protected(keypairs->previous_keypair,
+ lockdep_is_held(&keypairs->keypair_update_lock));
+ rcu_assign_pointer(keypairs->previous_keypair,
+ rcu_dereference_protected(keypairs->current_keypair,
+ lockdep_is_held(&keypairs->keypair_update_lock)));
+ wg_noise_keypair_put(old_keypair, true);
+ rcu_assign_pointer(keypairs->current_keypair, received_keypair);
+ RCU_INIT_POINTER(keypairs->next_keypair, NULL);
+
+ spin_unlock_bh(&keypairs->keypair_update_lock);
+ return true;
+}
+
+/* Must hold static_identity->lock */
+void wg_noise_set_static_identity_private_key(
+ struct noise_static_identity *static_identity,
+ const u8 private_key[NOISE_PUBLIC_KEY_LEN])
+{
+ memcpy(static_identity->static_private, private_key,
+ NOISE_PUBLIC_KEY_LEN);
+ curve25519_clamp_secret(static_identity->static_private);
+ static_identity->has_identity = curve25519_generate_public(
+ static_identity->static_public, private_key);
+}
+
+/* This is Hugo Krawczyk's HKDF:
+ * - https://eprint.iacr.org/2010/264.pdf
+ * - https://tools.ietf.org/html/rfc5869
+ */
+static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data,
+ size_t first_len, size_t second_len, size_t third_len,
+ size_t data_len, const u8 chaining_key[NOISE_HASH_LEN])
+{
+ u8 output[BLAKE2S_HASH_SIZE + 1];
+ u8 secret[BLAKE2S_HASH_SIZE];
+
+ WARN_ON(IS_ENABLED(DEBUG) &&
+ (first_len > BLAKE2S_HASH_SIZE ||
+ second_len > BLAKE2S_HASH_SIZE ||
+ third_len > BLAKE2S_HASH_SIZE ||
+ ((second_len || second_dst || third_len || third_dst) &&
+ (!first_len || !first_dst)) ||
+ ((third_len || third_dst) && (!second_len || !second_dst))));
+
+ /* Extract entropy from data into secret */
+ blake2s256_hmac(secret, data, chaining_key, data_len, NOISE_HASH_LEN);
+
+ if (!first_dst || !first_len)
+ goto out;
+
+ /* Expand first key: key = secret, data = 0x1 */
+ output[0] = 1;
+ blake2s256_hmac(output, output, secret, 1, BLAKE2S_HASH_SIZE);
+ memcpy(first_dst, output, first_len);
+
+ if (!second_dst || !second_len)
+ goto out;
+
+ /* Expand second key: key = secret, data = first-key || 0x2 */
+ output[BLAKE2S_HASH_SIZE] = 2;
+ blake2s256_hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1,
+ BLAKE2S_HASH_SIZE);
+ memcpy(second_dst, output, second_len);
+
+ if (!third_dst || !third_len)
+ goto out;
+
+ /* Expand third key: key = secret, data = second-key || 0x3 */
+ output[BLAKE2S_HASH_SIZE] = 3;
+ blake2s256_hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1,
+ BLAKE2S_HASH_SIZE);
+ memcpy(third_dst, output, third_len);
+
+out:
+ /* Clear sensitive data from stack */
+ memzero_explicit(secret, BLAKE2S_HASH_SIZE);
+ memzero_explicit(output, BLAKE2S_HASH_SIZE + 1);
+}
+
+static void symmetric_key_init(struct noise_symmetric_key *key)
+{
+ spin_lock_init(&key->counter.receive.lock);
+ atomic64_set(&key->counter.counter, 0);
+ memset(key->counter.receive.backtrack, 0,
+ sizeof(key->counter.receive.backtrack));
+ key->birthdate = ktime_get_coarse_boottime_ns();
+ key->is_valid = true;
+}
+
+static void derive_keys(struct noise_symmetric_key *first_dst,
+ struct noise_symmetric_key *second_dst,
+ const u8 chaining_key[NOISE_HASH_LEN])
+{
+ kdf(first_dst->key, second_dst->key, NULL, NULL,
+ NOISE_SYMMETRIC_KEY_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, 0,
+ chaining_key);
+ symmetric_key_init(first_dst);
+ symmetric_key_init(second_dst);
+}
+
+static bool __must_check mix_dh(u8 chaining_key[NOISE_HASH_LEN],
+ u8 key[NOISE_SYMMETRIC_KEY_LEN],
+ const u8 private[NOISE_PUBLIC_KEY_LEN],
+ const u8 public[NOISE_PUBLIC_KEY_LEN])
+{
+ u8 dh_calculation[NOISE_PUBLIC_KEY_LEN];
+
+ if (unlikely(!curve25519(dh_calculation, private, public)))
+ return false;
+ kdf(chaining_key, key, NULL, dh_calculation, NOISE_HASH_LEN,
+ NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN, chaining_key);
+ memzero_explicit(dh_calculation, NOISE_PUBLIC_KEY_LEN);
+ return true;
+}
+
+static void mix_hash(u8 hash[NOISE_HASH_LEN], const u8 *src, size_t src_len)
+{
+ struct blake2s_state blake;
+
+ blake2s_init(&blake, NOISE_HASH_LEN);
+ blake2s_update(&blake, hash, NOISE_HASH_LEN);
+ blake2s_update(&blake, src, src_len);
+ blake2s_final(&blake, hash);
+}
+
+static void mix_psk(u8 chaining_key[NOISE_HASH_LEN], u8 hash[NOISE_HASH_LEN],
+ u8 key[NOISE_SYMMETRIC_KEY_LEN],
+ const u8 psk[NOISE_SYMMETRIC_KEY_LEN])
+{
+ u8 temp_hash[NOISE_HASH_LEN];
+
+ kdf(chaining_key, temp_hash, key, psk, NOISE_HASH_LEN, NOISE_HASH_LEN,
+ NOISE_SYMMETRIC_KEY_LEN, NOISE_SYMMETRIC_KEY_LEN, chaining_key);
+ mix_hash(hash, temp_hash, NOISE_HASH_LEN);
+ memzero_explicit(temp_hash, NOISE_HASH_LEN);
+}
+
+static void handshake_init(u8 chaining_key[NOISE_HASH_LEN],
+ u8 hash[NOISE_HASH_LEN],
+ const u8 remote_static[NOISE_PUBLIC_KEY_LEN])
+{
+ memcpy(hash, handshake_init_hash, NOISE_HASH_LEN);
+ memcpy(chaining_key, handshake_init_chaining_key, NOISE_HASH_LEN);
+ mix_hash(hash, remote_static, NOISE_PUBLIC_KEY_LEN);
+}
+
+static void message_encrypt(u8 *dst_ciphertext, const u8 *src_plaintext,
+ size_t src_len, u8 key[NOISE_SYMMETRIC_KEY_LEN],
+ u8 hash[NOISE_HASH_LEN])
+{
+ chacha20poly1305_encrypt(dst_ciphertext, src_plaintext, src_len, hash,
+ NOISE_HASH_LEN,
+ 0 /* Always zero for Noise_IK */, key);
+ mix_hash(hash, dst_ciphertext, noise_encrypted_len(src_len));
+}
+
+static bool message_decrypt(u8 *dst_plaintext, const u8 *src_ciphertext,
+ size_t src_len, u8 key[NOISE_SYMMETRIC_KEY_LEN],
+ u8 hash[NOISE_HASH_LEN])
+{
+ if (!chacha20poly1305_decrypt(dst_plaintext, src_ciphertext, src_len,
+ hash, NOISE_HASH_LEN,
+ 0 /* Always zero for Noise_IK */, key))
+ return false;
+ mix_hash(hash, src_ciphertext, src_len);
+ return true;
+}
+
+static void message_ephemeral(u8 ephemeral_dst[NOISE_PUBLIC_KEY_LEN],
+ const u8 ephemeral_src[NOISE_PUBLIC_KEY_LEN],
+ u8 chaining_key[NOISE_HASH_LEN],
+ u8 hash[NOISE_HASH_LEN])
+{
+ if (ephemeral_dst != ephemeral_src)
+ memcpy(ephemeral_dst, ephemeral_src, NOISE_PUBLIC_KEY_LEN);
+ mix_hash(hash, ephemeral_src, NOISE_PUBLIC_KEY_LEN);
+ kdf(chaining_key, NULL, NULL, ephemeral_src, NOISE_HASH_LEN, 0, 0,
+ NOISE_PUBLIC_KEY_LEN, chaining_key);
+}
+
+static void tai64n_now(u8 output[NOISE_TIMESTAMP_LEN])
+{
+ struct timespec64 now;
+
+ ktime_get_real_ts64(&now);
+
+ /* In order to prevent some sort of infoleak from precise timers, we
+ * round down the nanoseconds part to the closest rounded-down power of
+ * two to the maximum initiations per second allowed anyway by the
+ * implementation.
+ */
+ now.tv_nsec = ALIGN_DOWN(now.tv_nsec,
+ rounddown_pow_of_two(NSEC_PER_SEC / INITIATIONS_PER_SECOND));
+
+ /* https://cr.yp.to/libtai/tai64.html */
+ *(__be64 *)output = cpu_to_be64(0x400000000000000aULL + now.tv_sec);
+ *(__be32 *)(output + sizeof(__be64)) = cpu_to_be32(now.tv_nsec);
+}
+
+bool
+wg_noise_handshake_create_initiation(struct message_handshake_initiation *dst,
+ struct noise_handshake *handshake)
+{
+ u8 timestamp[NOISE_TIMESTAMP_LEN];
+ u8 key[NOISE_SYMMETRIC_KEY_LEN];
+ bool ret = false;
+
+ /* We need to wait for crng _before_ taking any locks, since
+ * curve25519_generate_secret uses get_random_bytes_wait.
+ */
+ wait_for_random_bytes();
+
+ down_read(&handshake->static_identity->lock);
+ down_write(&handshake->lock);
+
+ if (unlikely(!handshake->static_identity->has_identity))
+ goto out;
+
+ dst->header.type = cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION);
+
+ handshake_init(handshake->chaining_key, handshake->hash,
+ handshake->remote_static);
+
+ /* e */
+ curve25519_generate_secret(handshake->ephemeral_private);
+ if (!curve25519_generate_public(dst->unencrypted_ephemeral,
+ handshake->ephemeral_private))
+ goto out;
+ message_ephemeral(dst->unencrypted_ephemeral,
+ dst->unencrypted_ephemeral, handshake->chaining_key,
+ handshake->hash);
+
+ /* es */
+ if (!mix_dh(handshake->chaining_key, key, handshake->ephemeral_private,
+ handshake->remote_static))
+ goto out;
+
+ /* s */
+ message_encrypt(dst->encrypted_static,
+ handshake->static_identity->static_public,
+ NOISE_PUBLIC_KEY_LEN, key, handshake->hash);
+
+ /* ss */
+ kdf(handshake->chaining_key, key, NULL,
+ handshake->precomputed_static_static, NOISE_HASH_LEN,
+ NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN,
+ handshake->chaining_key);
+
+ /* {t} */
+ tai64n_now(timestamp);
+ message_encrypt(dst->encrypted_timestamp, timestamp,
+ NOISE_TIMESTAMP_LEN, key, handshake->hash);
+
+ dst->sender_index = wg_index_hashtable_insert(
+ handshake->entry.peer->device->index_hashtable,
+ &handshake->entry);
+
+ handshake->state = HANDSHAKE_CREATED_INITIATION;
+ ret = true;
+
+out:
+ up_write(&handshake->lock);
+ up_read(&handshake->static_identity->lock);
+ memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN);
+ return ret;
+}
+
+struct wg_peer *
+wg_noise_handshake_consume_initiation(struct message_handshake_initiation *src,
+ struct wg_device *wg)
+{
+ struct wg_peer *peer = NULL, *ret_peer = NULL;
+ struct noise_handshake *handshake;
+ bool replay_attack, flood_attack;
+ u8 key[NOISE_SYMMETRIC_KEY_LEN];
+ u8 chaining_key[NOISE_HASH_LEN];
+ u8 hash[NOISE_HASH_LEN];
+ u8 s[NOISE_PUBLIC_KEY_LEN];
+ u8 e[NOISE_PUBLIC_KEY_LEN];
+ u8 t[NOISE_TIMESTAMP_LEN];
+ u64 initiation_consumption;
+
+ down_read(&wg->static_identity.lock);
+ if (unlikely(!wg->static_identity.has_identity))
+ goto out;
+
+ handshake_init(chaining_key, hash, wg->static_identity.static_public);
+
+ /* e */
+ message_ephemeral(e, src->unencrypted_ephemeral, chaining_key, hash);
+
+ /* es */
+ if (!mix_dh(chaining_key, key, wg->static_identity.static_private, e))
+ goto out;
+
+ /* s */
+ if (!message_decrypt(s, src->encrypted_static,
+ sizeof(src->encrypted_static), key, hash))
+ goto out;
+
+ /* Lookup which peer we're actually talking to */
+ peer = wg_pubkey_hashtable_lookup(wg->peer_hashtable, s);
+ if (!peer)
+ goto out;
+ handshake = &peer->handshake;
+
+ /* ss */
+ kdf(chaining_key, key, NULL, handshake->precomputed_static_static,
+ NOISE_HASH_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN,
+ chaining_key);
+
+ /* {t} */
+ if (!message_decrypt(t, src->encrypted_timestamp,
+ sizeof(src->encrypted_timestamp), key, hash))
+ goto out;
+
+ down_read(&handshake->lock);
+ replay_attack = memcmp(t, handshake->latest_timestamp,
+ NOISE_TIMESTAMP_LEN) <= 0;
+ flood_attack = (s64)handshake->last_initiation_consumption +
+ NSEC_PER_SEC / INITIATIONS_PER_SECOND >
+ (s64)ktime_get_coarse_boottime_ns();
+ up_read(&handshake->lock);
+ if (replay_attack || flood_attack)
+ goto out;
+
+ /* Success! Copy everything to peer */
+ down_write(&handshake->lock);
+ memcpy(handshake->remote_ephemeral, e, NOISE_PUBLIC_KEY_LEN);
+ if (memcmp(t, handshake->latest_timestamp, NOISE_TIMESTAMP_LEN) > 0)
+ memcpy(handshake->latest_timestamp, t, NOISE_TIMESTAMP_LEN);
+ memcpy(handshake->hash, hash, NOISE_HASH_LEN);
+ memcpy(handshake->chaining_key, chaining_key, NOISE_HASH_LEN);
+ handshake->remote_index = src->sender_index;
+ if ((s64)(handshake->last_initiation_consumption -
+ (initiation_consumption = ktime_get_coarse_boottime_ns())) < 0)
+ handshake->last_initiation_consumption = initiation_consumption;
+ handshake->state = HANDSHAKE_CONSUMED_INITIATION;
+ up_write(&handshake->lock);
+ ret_peer = peer;
+
+out:
+ memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN);
+ memzero_explicit(hash, NOISE_HASH_LEN);
+ memzero_explicit(chaining_key, NOISE_HASH_LEN);
+ up_read(&wg->static_identity.lock);
+ if (!ret_peer)
+ wg_peer_put(peer);
+ return ret_peer;
+}
+
+bool wg_noise_handshake_create_response(struct message_handshake_response *dst,
+ struct noise_handshake *handshake)
+{
+ u8 key[NOISE_SYMMETRIC_KEY_LEN];
+ bool ret = false;
+
+ /* We need to wait for crng _before_ taking any locks, since
+ * curve25519_generate_secret uses get_random_bytes_wait.
+ */
+ wait_for_random_bytes();
+
+ down_read(&handshake->static_identity->lock);
+ down_write(&handshake->lock);
+
+ if (handshake->state != HANDSHAKE_CONSUMED_INITIATION)
+ goto out;
+
+ dst->header.type = cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE);
+ dst->receiver_index = handshake->remote_index;
+
+ /* e */
+ curve25519_generate_secret(handshake->ephemeral_private);
+ if (!curve25519_generate_public(dst->unencrypted_ephemeral,
+ handshake->ephemeral_private))
+ goto out;
+ message_ephemeral(dst->unencrypted_ephemeral,
+ dst->unencrypted_ephemeral, handshake->chaining_key,
+ handshake->hash);
+
+ /* ee */
+ if (!mix_dh(handshake->chaining_key, NULL, handshake->ephemeral_private,
+ handshake->remote_ephemeral))
+ goto out;
+
+ /* se */
+ if (!mix_dh(handshake->chaining_key, NULL, handshake->ephemeral_private,
+ handshake->remote_static))
+ goto out;
+
+ /* psk */
+ mix_psk(handshake->chaining_key, handshake->hash, key,
+ handshake->preshared_key);
+
+ /* {} */
+ message_encrypt(dst->encrypted_nothing, NULL, 0, key, handshake->hash);
+
+ dst->sender_index = wg_index_hashtable_insert(
+ handshake->entry.peer->device->index_hashtable,
+ &handshake->entry);
+
+ handshake->state = HANDSHAKE_CREATED_RESPONSE;
+ ret = true;
+
+out:
+ up_write(&handshake->lock);
+ up_read(&handshake->static_identity->lock);
+ memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN);
+ return ret;
+}
+
+struct wg_peer *
+wg_noise_handshake_consume_response(struct message_handshake_response *src,
+ struct wg_device *wg)
+{
+ enum noise_handshake_state state = HANDSHAKE_ZEROED;
+ struct wg_peer *peer = NULL, *ret_peer = NULL;
+ struct noise_handshake *handshake;
+ u8 key[NOISE_SYMMETRIC_KEY_LEN];
+ u8 hash[NOISE_HASH_LEN];
+ u8 chaining_key[NOISE_HASH_LEN];
+ u8 e[NOISE_PUBLIC_KEY_LEN];
+ u8 ephemeral_private[NOISE_PUBLIC_KEY_LEN];
+ u8 static_private[NOISE_PUBLIC_KEY_LEN];
+
+ down_read(&wg->static_identity.lock);
+
+ if (unlikely(!wg->static_identity.has_identity))
+ goto out;
+
+ handshake = (struct noise_handshake *)wg_index_hashtable_lookup(
+ wg->index_hashtable, INDEX_HASHTABLE_HANDSHAKE,
+ src->receiver_index, &peer);
+ if (unlikely(!handshake))
+ goto out;
+
+ down_read(&handshake->lock);
+ state = handshake->state;
+ memcpy(hash, handshake->hash, NOISE_HASH_LEN);
+ memcpy(chaining_key, handshake->chaining_key, NOISE_HASH_LEN);
+ memcpy(ephemeral_private, handshake->ephemeral_private,
+ NOISE_PUBLIC_KEY_LEN);
+ up_read(&handshake->lock);
+
+ if (state != HANDSHAKE_CREATED_INITIATION)
+ goto fail;
+
+ /* e */
+ message_ephemeral(e, src->unencrypted_ephemeral, chaining_key, hash);
+
+ /* ee */
+ if (!mix_dh(chaining_key, NULL, ephemeral_private, e))
+ goto fail;
+
+ /* se */
+ if (!mix_dh(chaining_key, NULL, wg->static_identity.static_private, e))
+ goto fail;
+
+ /* psk */
+ mix_psk(chaining_key, hash, key, handshake->preshared_key);
+
+ /* {} */
+ if (!message_decrypt(NULL, src->encrypted_nothing,
+ sizeof(src->encrypted_nothing), key, hash))
+ goto fail;
+
+ /* Success! Copy everything to peer */
+ down_write(&handshake->lock);
+ /* It's important to check that the state is still the same, while we
+ * have an exclusive lock.
+ */
+ if (handshake->state != state) {
+ up_write(&handshake->lock);
+ goto fail;
+ }
+ memcpy(handshake->remote_ephemeral, e, NOISE_PUBLIC_KEY_LEN);
+ memcpy(handshake->hash, hash, NOISE_HASH_LEN);
+ memcpy(handshake->chaining_key, chaining_key, NOISE_HASH_LEN);
+ handshake->remote_index = src->sender_index;
+ handshake->state = HANDSHAKE_CONSUMED_RESPONSE;
+ up_write(&handshake->lock);
+ ret_peer = peer;
+ goto out;
+
+fail:
+ wg_peer_put(peer);
+out:
+ memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN);
+ memzero_explicit(hash, NOISE_HASH_LEN);
+ memzero_explicit(chaining_key, NOISE_HASH_LEN);
+ memzero_explicit(ephemeral_private, NOISE_PUBLIC_KEY_LEN);
+ memzero_explicit(static_private, NOISE_PUBLIC_KEY_LEN);
+ up_read(&wg->static_identity.lock);
+ return ret_peer;
+}
+
+bool wg_noise_handshake_begin_session(struct noise_handshake *handshake,
+ struct noise_keypairs *keypairs)
+{
+ struct noise_keypair *new_keypair;
+ bool ret = false;
+
+ down_write(&handshake->lock);
+ if (handshake->state != HANDSHAKE_CREATED_RESPONSE &&
+ handshake->state != HANDSHAKE_CONSUMED_RESPONSE)
+ goto out;
+
+ new_keypair = keypair_create(handshake->entry.peer);
+ if (!new_keypair)
+ goto out;
+ new_keypair->i_am_the_initiator = handshake->state ==
+ HANDSHAKE_CONSUMED_RESPONSE;
+ new_keypair->remote_index = handshake->remote_index;
+
+ if (new_keypair->i_am_the_initiator)
+ derive_keys(&new_keypair->sending, &new_keypair->receiving,
+ handshake->chaining_key);
+ else
+ derive_keys(&new_keypair->receiving, &new_keypair->sending,
+ handshake->chaining_key);
+
+ handshake_zero(handshake);
+ rcu_read_lock_bh();
+ if (likely(!READ_ONCE(container_of(handshake, struct wg_peer,
+ handshake)->is_dead))) {
+ add_new_keypair(keypairs, new_keypair);
+ net_dbg_ratelimited("%s: Keypair %llu created for peer %llu\n",
+ handshake->entry.peer->device->dev->name,
+ new_keypair->internal_id,
+ handshake->entry.peer->internal_id);
+ ret = wg_index_hashtable_replace(
+ handshake->entry.peer->device->index_hashtable,
+ &handshake->entry, &new_keypair->entry);
+ } else {
+ kzfree(new_keypair);
+ }
+ rcu_read_unlock_bh();
+
+out:
+ up_write(&handshake->lock);
+ return ret;
+}
diff --git a/drivers/net/wireguard/noise.h b/drivers/net/wireguard/noise.h
new file mode 100644
index 000000000000..138a07bb817c
--- /dev/null
+++ b/drivers/net/wireguard/noise.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+#ifndef _WG_NOISE_H
+#define _WG_NOISE_H
+
+#include "messages.h"
+#include "peerlookup.h"
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/atomic.h>
+#include <linux/rwsem.h>
+#include <linux/mutex.h>
+#include <linux/kref.h>
+
+union noise_counter {
+ struct {
+ u64 counter;
+ unsigned long backtrack[COUNTER_BITS_TOTAL / BITS_PER_LONG];
+ spinlock_t lock;
+ } receive;
+ atomic64_t counter;
+};
+
+struct noise_symmetric_key {
+ u8 key[NOISE_SYMMETRIC_KEY_LEN];
+ union noise_counter counter;
+ u64 birthdate;
+ bool is_valid;
+};
+
+struct noise_keypair {
+ struct index_hashtable_entry entry;
+ struct noise_symmetric_key sending;
+ struct noise_symmetric_key receiving;
+ __le32 remote_index;
+ bool i_am_the_initiator;
+ struct kref refcount;
+ struct rcu_head rcu;
+ u64 internal_id;
+};
+
+struct noise_keypairs {
+ struct noise_keypair __rcu *current_keypair;
+ struct noise_keypair __rcu *previous_keypair;
+ struct noise_keypair __rcu *next_keypair;
+ spinlock_t keypair_update_lock;
+};
+
+struct noise_static_identity {
+ u8 static_public[NOISE_PUBLIC_KEY_LEN];
+ u8 static_private[NOISE_PUBLIC_KEY_LEN];
+ struct rw_semaphore lock;
+ bool has_identity;
+};
+
+enum noise_handshake_state {
+ HANDSHAKE_ZEROED,
+ HANDSHAKE_CREATED_INITIATION,
+ HANDSHAKE_CONSUMED_INITIATION,
+ HANDSHAKE_CREATED_RESPONSE,
+ HANDSHAKE_CONSUMED_RESPONSE
+};
+
+struct noise_handshake {
+ struct index_hashtable_entry entry;
+
+ enum noise_handshake_state state;
+ u64 last_initiation_consumption;
+
+ struct noise_static_identity *static_identity;
+
+ u8 ephemeral_private[NOISE_PUBLIC_KEY_LEN];
+ u8 remote_static[NOISE_PUBLIC_KEY_LEN];
+ u8 remote_ephemeral[NOISE_PUBLIC_KEY_LEN];
+ u8 precomputed_static_static[NOISE_PUBLIC_KEY_LEN];
+
+ u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN];
+
+ u8 hash[NOISE_HASH_LEN];
+ u8 chaining_key[NOISE_HASH_LEN];
+
+ u8 latest_timestamp[NOISE_TIMESTAMP_LEN];
+ __le32 remote_index;
+
+ /* Protects all members except the immutable (after noise_handshake_
+ * init): remote_static, precomputed_static_static, static_identity.
+ */
+ struct rw_semaphore lock;
+};
+
+struct wg_device;
+
+void wg_noise_init(void);
+bool wg_noise_handshake_init(struct noise_handshake *handshake,
+ struct noise_static_identity *static_identity,
+ const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN],
+ const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN],
+ struct wg_peer *peer);
+void wg_noise_handshake_clear(struct noise_handshake *handshake);
+static inline void wg_noise_reset_last_sent_handshake(atomic64_t *handshake_ns)
+{
+ atomic64_set(handshake_ns, ktime_get_coarse_boottime_ns() -
+ (u64)(REKEY_TIMEOUT + 1) * NSEC_PER_SEC);
+}
+
+void wg_noise_keypair_put(struct noise_keypair *keypair, bool unreference_now);
+struct noise_keypair *wg_noise_keypair_get(struct noise_keypair *keypair);
+void wg_noise_keypairs_clear(struct noise_keypairs *keypairs);
+bool wg_noise_received_with_keypair(struct noise_keypairs *keypairs,
+ struct noise_keypair *received_keypair);
+void wg_noise_expire_current_peer_keypairs(struct wg_peer *peer);
+
+void wg_noise_set_static_identity_private_key(
+ struct noise_static_identity *static_identity,
+ const u8 private_key[NOISE_PUBLIC_KEY_LEN]);
+bool wg_noise_precompute_static_static(struct wg_peer *peer);
+
+bool
+wg_noise_handshake_create_initiation(struct message_handshake_initiation *dst,
+ struct noise_handshake *handshake);
+struct wg_peer *
+wg_noise_handshake_consume_initiation(struct message_handshake_initiation *src,
+ struct wg_device *wg);
+
+bool wg_noise_handshake_create_response(struct message_handshake_response *dst,
+ struct noise_handshake *handshake);
+struct wg_peer *
+wg_noise_handshake_consume_response(struct message_handshake_response *src,
+ struct wg_device *wg);
+
+bool wg_noise_handshake_begin_session(struct noise_handshake *handshake,
+ struct noise_keypairs *keypairs);
+
+#endif /* _WG_NOISE_H */
diff --git a/drivers/net/wireguard/peer.c b/drivers/net/wireguard/peer.c
new file mode 100644
index 000000000000..071eedf33f5a
--- /dev/null
+++ b/drivers/net/wireguard/peer.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "peer.h"
+#include "device.h"
+#include "queueing.h"
+#include "timers.h"
+#include "peerlookup.h"
+#include "noise.h"
+
+#include <linux/kref.h>
+#include <linux/lockdep.h>
+#include <linux/rcupdate.h>
+#include <linux/list.h>
+
+static atomic64_t peer_counter = ATOMIC64_INIT(0);
+
+struct wg_peer *wg_peer_create(struct wg_device *wg,
+ const u8 public_key[NOISE_PUBLIC_KEY_LEN],
+ const u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN])
+{
+ struct wg_peer *peer;
+ int ret = -ENOMEM;
+
+ lockdep_assert_held(&wg->device_update_lock);
+
+ if (wg->num_peers >= MAX_PEERS_PER_DEVICE)
+ return ERR_PTR(ret);
+
+ peer = kzalloc(sizeof(*peer), GFP_KERNEL);
+ if (unlikely(!peer))
+ return ERR_PTR(ret);
+ peer->device = wg;
+
+ if (!wg_noise_handshake_init(&peer->handshake, &wg->static_identity,
+ public_key, preshared_key, peer)) {
+ ret = -EKEYREJECTED;
+ goto err_1;
+ }
+ if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))
+ goto err_1;
+ if (wg_packet_queue_init(&peer->tx_queue, wg_packet_tx_worker, false,
+ MAX_QUEUED_PACKETS))
+ goto err_2;
+ if (wg_packet_queue_init(&peer->rx_queue, NULL, false,
+ MAX_QUEUED_PACKETS))
+ goto err_3;
+
+ peer->internal_id = atomic64_inc_return(&peer_counter);
+ peer->serial_work_cpu = nr_cpumask_bits;
+ wg_cookie_init(&peer->latest_cookie);
+ wg_timers_init(peer);
+ wg_cookie_checker_precompute_peer_keys(peer);
+ spin_lock_init(&peer->keypairs.keypair_update_lock);
+ INIT_WORK(&peer->transmit_handshake_work,
+ wg_packet_handshake_send_worker);
+ rwlock_init(&peer->endpoint_lock);
+ kref_init(&peer->refcount);
+ skb_queue_head_init(&peer->staged_packet_queue);
+ wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake);
+ set_bit(NAPI_STATE_NO_BUSY_POLL, &peer->napi.state);
+ netif_napi_add(wg->dev, &peer->napi, wg_packet_rx_poll,
+ NAPI_POLL_WEIGHT);
+ napi_enable(&peer->napi);
+ list_add_tail(&peer->peer_list, &wg->peer_list);
+ INIT_LIST_HEAD(&peer->allowedips_list);
+ wg_pubkey_hashtable_add(wg->peer_hashtable, peer);
+ ++wg->num_peers;
+ pr_debug("%s: Peer %llu created\n", wg->dev->name, peer->internal_id);
+ return peer;
+
+err_3:
+ wg_packet_queue_free(&peer->tx_queue, false);
+err_2:
+ dst_cache_destroy(&peer->endpoint_cache);
+err_1:
+ kfree(peer);
+ return ERR_PTR(ret);
+}
+
+struct wg_peer *wg_peer_get_maybe_zero(struct wg_peer *peer)
+{
+ RCU_LOCKDEP_WARN(!rcu_read_lock_bh_held(),
+ "Taking peer reference without holding the RCU read lock");
+ if (unlikely(!peer || !kref_get_unless_zero(&peer->refcount)))
+ return NULL;
+ return peer;
+}
+
+static void peer_make_dead(struct wg_peer *peer)
+{
+ /* Remove from configuration-time lookup structures. */
+ list_del_init(&peer->peer_list);
+ wg_allowedips_remove_by_peer(&peer->device->peer_allowedips, peer,
+ &peer->device->device_update_lock);
+ wg_pubkey_hashtable_remove(peer->device->peer_hashtable, peer);
+
+ /* Mark as dead, so that we don't allow jumping contexts after. */
+ WRITE_ONCE(peer->is_dead, true);
+
+ /* The caller must now synchronize_rcu() for this to take effect. */
+}
+
+static void peer_remove_after_dead(struct wg_peer *peer)
+{
+ WARN_ON(!peer->is_dead);
+
+ /* No more keypairs can be created for this peer, since is_dead protects
+ * add_new_keypair, so we can now destroy existing ones.
+ */
+ wg_noise_keypairs_clear(&peer->keypairs);
+
+ /* Destroy all ongoing timers that were in-flight at the beginning of
+ * this function.
+ */
+ wg_timers_stop(peer);
+
+ /* The transition between packet encryption/decryption queues isn't
+ * guarded by is_dead, but each reference's life is strictly bounded by
+ * two generations: once for parallel crypto and once for serial
+ * ingestion, so we can simply flush twice, and be sure that we no
+ * longer have references inside these queues.
+ */
+
+ /* a) For encrypt/decrypt. */
+ flush_workqueue(peer->device->packet_crypt_wq);
+ /* b.1) For send (but not receive, since that's napi). */
+ flush_workqueue(peer->device->packet_crypt_wq);
+ /* b.2.1) For receive (but not send, since that's wq). */
+ napi_disable(&peer->napi);
+ /* b.2.1) It's now safe to remove the napi struct, which must be done
+ * here from process context.
+ */
+ netif_napi_del(&peer->napi);
+
+ /* Ensure any workstructs we own (like transmit_handshake_work or
+ * clear_peer_work) no longer are in use.
+ */
+ flush_workqueue(peer->device->handshake_send_wq);
+
+ /* After the above flushes, a peer might still be active in a few
+ * different contexts: 1) from xmit(), before hitting is_dead and
+ * returning, 2) from wg_packet_consume_data(), before hitting is_dead
+ * and returning, 3) from wg_receive_handshake_packet() after a point
+ * where it has processed an incoming handshake packet, but where
+ * all calls to pass it off to timers fails because of is_dead. We won't
+ * have new references in (1) eventually, because we're removed from
+ * allowedips; we won't have new references in (2) eventually, because
+ * wg_index_hashtable_lookup will always return NULL, since we removed
+ * all existing keypairs and no more can be created; we won't have new
+ * references in (3) eventually, because we're removed from the pubkey
+ * hash table, which allows for a maximum of one handshake response,
+ * via the still-uncleared index hashtable entry, but not more than one,
+ * and in wg_cookie_message_consume, the lookup eventually gets a peer
+ * with a refcount of zero, so no new reference is taken.
+ */
+
+ --peer->device->num_peers;
+ wg_peer_put(peer);
+}
+
+/* We have a separate "remove" function make sure that all active places where
+ * a peer is currently operating will eventually come to an end and not pass
+ * their reference onto another context.
+ */
+void wg_peer_remove(struct wg_peer *peer)
+{
+ if (unlikely(!peer))
+ return;
+ lockdep_assert_held(&peer->device->device_update_lock);
+
+ peer_make_dead(peer);
+ synchronize_rcu();
+ peer_remove_after_dead(peer);
+}
+
+void wg_peer_remove_all(struct wg_device *wg)
+{
+ struct wg_peer *peer, *temp;
+ LIST_HEAD(dead_peers);
+
+ lockdep_assert_held(&wg->device_update_lock);
+
+ /* Avoid having to traverse individually for each one. */
+ wg_allowedips_free(&wg->peer_allowedips, &wg->device_update_lock);
+
+ list_for_each_entry_safe(peer, temp, &wg->peer_list, peer_list) {
+ peer_make_dead(peer);
+ list_add_tail(&peer->peer_list, &dead_peers);
+ }
+ synchronize_rcu();
+ list_for_each_entry_safe(peer, temp, &dead_peers, peer_list)
+ peer_remove_after_dead(peer);
+}
+
+static void rcu_release(struct rcu_head *rcu)
+{
+ struct wg_peer *peer = container_of(rcu, struct wg_peer, rcu);
+
+ dst_cache_destroy(&peer->endpoint_cache);
+ wg_packet_queue_free(&peer->rx_queue, false);
+ wg_packet_queue_free(&peer->tx_queue, false);
+
+ /* The final zeroing takes care of clearing any remaining handshake key
+ * material and other potentially sensitive information.
+ */
+ kzfree(peer);
+}
+
+static void kref_release(struct kref *refcount)
+{
+ struct wg_peer *peer = container_of(refcount, struct wg_peer, refcount);
+
+ pr_debug("%s: Peer %llu (%pISpfsc) destroyed\n",
+ peer->device->dev->name, peer->internal_id,
+ &peer->endpoint.addr);
+
+ /* Remove ourself from dynamic runtime lookup structures, now that the
+ * last reference is gone.
+ */
+ wg_index_hashtable_remove(peer->device->index_hashtable,
+ &peer->handshake.entry);
+
+ /* Remove any lingering packets that didn't have a chance to be
+ * transmitted.
+ */
+ wg_packet_purge_staged_packets(peer);
+
+ /* Free the memory used. */
+ call_rcu(&peer->rcu, rcu_release);
+}
+
+void wg_peer_put(struct wg_peer *peer)
+{
+ if (unlikely(!peer))
+ return;
+ kref_put(&peer->refcount, kref_release);
+}
diff --git a/drivers/net/wireguard/peer.h b/drivers/net/wireguard/peer.h
new file mode 100644
index 000000000000..23af40922997
--- /dev/null
+++ b/drivers/net/wireguard/peer.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _WG_PEER_H
+#define _WG_PEER_H
+
+#include "device.h"
+#include "noise.h"
+#include "cookie.h"
+
+#include <linux/types.h>
+#include <linux/netfilter.h>
+#include <linux/spinlock.h>
+#include <linux/kref.h>
+#include <net/dst_cache.h>
+
+struct wg_device;
+
+struct endpoint {
+ union {
+ struct sockaddr addr;
+ struct sockaddr_in addr4;
+ struct sockaddr_in6 addr6;
+ };
+ union {
+ struct {
+ struct in_addr src4;
+ /* Essentially the same as addr6->scope_id */
+ int src_if4;
+ };
+ struct in6_addr src6;
+ };
+};
+
+struct wg_peer {
+ struct wg_device *device;
+ struct crypt_queue tx_queue, rx_queue;
+ struct sk_buff_head staged_packet_queue;
+ int serial_work_cpu;
+ struct noise_keypairs keypairs;
+ struct endpoint endpoint;
+ struct dst_cache endpoint_cache;
+ rwlock_t endpoint_lock;
+ struct noise_handshake handshake;
+ atomic64_t last_sent_handshake;
+ struct work_struct transmit_handshake_work, clear_peer_work;
+ struct cookie latest_cookie;
+ struct hlist_node pubkey_hash;
+ u64 rx_bytes, tx_bytes;
+ struct timer_list timer_retransmit_handshake, timer_send_keepalive;
+ struct timer_list timer_new_handshake, timer_zero_key_material;
+ struct timer_list timer_persistent_keepalive;
+ unsigned int timer_handshake_attempts;
+ u16 persistent_keepalive_interval;
+ bool timer_need_another_keepalive;
+ bool sent_lastminute_handshake;
+ struct timespec64 walltime_last_handshake;
+ struct kref refcount;
+ struct rcu_head rcu;
+ struct list_head peer_list;
+ struct list_head allowedips_list;
+ u64 internal_id;
+ struct napi_struct napi;
+ bool is_dead;
+};
+
+struct wg_peer *wg_peer_create(struct wg_device *wg,
+ const u8 public_key[NOISE_PUBLIC_KEY_LEN],
+ const u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN]);
+
+struct wg_peer *__must_check wg_peer_get_maybe_zero(struct wg_peer *peer);
+static inline struct wg_peer *wg_peer_get(struct wg_peer *peer)
+{
+ kref_get(&peer->refcount);
+ return peer;
+}
+void wg_peer_put(struct wg_peer *peer);
+void wg_peer_remove(struct wg_peer *peer);
+void wg_peer_remove_all(struct wg_device *wg);
+
+#endif /* _WG_PEER_H */
diff --git a/drivers/net/wireguard/peerlookup.c b/drivers/net/wireguard/peerlookup.c
new file mode 100644
index 000000000000..e4deb331476b
--- /dev/null
+++ b/drivers/net/wireguard/peerlookup.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "peerlookup.h"
+#include "peer.h"
+#include "noise.h"
+
+static struct hlist_head *pubkey_bucket(struct pubkey_hashtable *table,
+ const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
+{
+ /* siphash gives us a secure 64bit number based on a random key. Since
+ * the bits are uniformly distributed, we can then mask off to get the
+ * bits we need.
+ */
+ const u64 hash = siphash(pubkey, NOISE_PUBLIC_KEY_LEN, &table->key);
+
+ return &table->hashtable[hash & (HASH_SIZE(table->hashtable) - 1)];
+}
+
+struct pubkey_hashtable *wg_pubkey_hashtable_alloc(void)
+{
+ struct pubkey_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL);
+
+ if (!table)
+ return NULL;
+
+ get_random_bytes(&table->key, sizeof(table->key));
+ hash_init(table->hashtable);
+ mutex_init(&table->lock);
+ return table;
+}
+
+void wg_pubkey_hashtable_add(struct pubkey_hashtable *table,
+ struct wg_peer *peer)
+{
+ mutex_lock(&table->lock);
+ hlist_add_head_rcu(&peer->pubkey_hash,
+ pubkey_bucket(table, peer->handshake.remote_static));
+ mutex_unlock(&table->lock);
+}
+
+void wg_pubkey_hashtable_remove(struct pubkey_hashtable *table,
+ struct wg_peer *peer)
+{
+ mutex_lock(&table->lock);
+ hlist_del_init_rcu(&peer->pubkey_hash);
+ mutex_unlock(&table->lock);
+}
+
+/* Returns a strong reference to a peer */
+struct wg_peer *
+wg_pubkey_hashtable_lookup(struct pubkey_hashtable *table,
+ const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
+{
+ struct wg_peer *iter_peer, *peer = NULL;
+
+ rcu_read_lock_bh();
+ hlist_for_each_entry_rcu_bh(iter_peer, pubkey_bucket(table, pubkey),
+ pubkey_hash) {
+ if (!memcmp(pubkey, iter_peer->handshake.remote_static,
+ NOISE_PUBLIC_KEY_LEN)) {
+ peer = iter_peer;
+ break;
+ }
+ }
+ peer = wg_peer_get_maybe_zero(peer);
+ rcu_read_unlock_bh();
+ return peer;
+}
+
+static struct hlist_head *index_bucket(struct index_hashtable *table,
+ const __le32 index)
+{
+ /* Since the indices are random and thus all bits are uniformly
+ * distributed, we can find its bucket simply by masking.
+ */
+ return &table->hashtable[(__force u32)index &
+ (HASH_SIZE(table->hashtable) - 1)];
+}
+
+struct index_hashtable *wg_index_hashtable_alloc(void)
+{
+ struct index_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL);
+
+ if (!table)
+ return NULL;
+
+ hash_init(table->hashtable);
+ spin_lock_init(&table->lock);
+ return table;
+}
+
+/* At the moment, we limit ourselves to 2^20 total peers, which generally might
+ * amount to 2^20*3 items in this hashtable. The algorithm below works by
+ * picking a random number and testing it. We can see that these limits mean we
+ * usually succeed pretty quickly:
+ *
+ * >>> def calculation(tries, size):
+ * ... return (size / 2**32)**(tries - 1) * (1 - (size / 2**32))
+ * ...
+ * >>> calculation(1, 2**20 * 3)
+ * 0.999267578125
+ * >>> calculation(2, 2**20 * 3)
+ * 0.0007318854331970215
+ * >>> calculation(3, 2**20 * 3)
+ * 5.360489012673497e-07
+ * >>> calculation(4, 2**20 * 3)
+ * 3.9261394135792216e-10
+ *
+ * At the moment, we don't do any masking, so this algorithm isn't exactly
+ * constant time in either the random guessing or in the hash list lookup. We
+ * could require a minimum of 3 tries, which would successfully mask the
+ * guessing. this would not, however, help with the growing hash lengths, which
+ * is another thing to consider moving forward.
+ */
+
+__le32 wg_index_hashtable_insert(struct index_hashtable *table,
+ struct index_hashtable_entry *entry)
+{
+ struct index_hashtable_entry *existing_entry;
+
+ spin_lock_bh(&table->lock);
+ hlist_del_init_rcu(&entry->index_hash);
+ spin_unlock_bh(&table->lock);
+
+ rcu_read_lock_bh();
+
+search_unused_slot:
+ /* First we try to find an unused slot, randomly, while unlocked. */
+ entry->index = (__force __le32)get_random_u32();
+ hlist_for_each_entry_rcu_bh(existing_entry,
+ index_bucket(table, entry->index),
+ index_hash) {
+ if (existing_entry->index == entry->index)
+ /* If it's already in use, we continue searching. */
+ goto search_unused_slot;
+ }
+
+ /* Once we've found an unused slot, we lock it, and then double-check
+ * that nobody else stole it from us.
+ */
+ spin_lock_bh(&table->lock);
+ hlist_for_each_entry_rcu_bh(existing_entry,
+ index_bucket(table, entry->index),
+ index_hash) {
+ if (existing_entry->index == entry->index) {
+ spin_unlock_bh(&table->lock);
+ /* If it was stolen, we start over. */
+ goto search_unused_slot;
+ }
+ }
+ /* Otherwise, we know we have it exclusively (since we're locked),
+ * so we insert.
+ */
+ hlist_add_head_rcu(&entry->index_hash,
+ index_bucket(table, entry->index));
+ spin_unlock_bh(&table->lock);
+
+ rcu_read_unlock_bh();
+
+ return entry->index;
+}
+
+bool wg_index_hashtable_replace(struct index_hashtable *table,
+ struct index_hashtable_entry *old,
+ struct index_hashtable_entry *new)
+{
+ if (unlikely(hlist_unhashed(&old->index_hash)))
+ return false;
+ spin_lock_bh(&table->lock);
+ new->index = old->index;
+ hlist_replace_rcu(&old->index_hash, &new->index_hash);
+
+ /* Calling init here NULLs out index_hash, and in fact after this
+ * function returns, it's theoretically possible for this to get
+ * reinserted elsewhere. That means the RCU lookup below might either
+ * terminate early or jump between buckets, in which case the packet
+ * simply gets dropped, which isn't terrible.
+ */
+ INIT_HLIST_NODE(&old->index_hash);
+ spin_unlock_bh(&table->lock);
+ return true;
+}
+
+void wg_index_hashtable_remove(struct index_hashtable *table,
+ struct index_hashtable_entry *entry)
+{
+ spin_lock_bh(&table->lock);
+ hlist_del_init_rcu(&entry->index_hash);
+ spin_unlock_bh(&table->lock);
+}
+
+/* Returns a strong reference to a entry->peer */
+struct index_hashtable_entry *
+wg_index_hashtable_lookup(struct index_hashtable *table,
+ const enum index_hashtable_type type_mask,
+ const __le32 index, struct wg_peer **peer)
+{
+ struct index_hashtable_entry *iter_entry, *entry = NULL;
+
+ rcu_read_lock_bh();
+ hlist_for_each_entry_rcu_bh(iter_entry, index_bucket(table, index),
+ index_hash) {
+ if (iter_entry->index == index) {
+ if (likely(iter_entry->type & type_mask))
+ entry = iter_entry;
+ break;
+ }
+ }
+ if (likely(entry)) {
+ entry->peer = wg_peer_get_maybe_zero(entry->peer);
+ if (likely(entry->peer))
+ *peer = entry->peer;
+ else
+ entry = NULL;
+ }
+ rcu_read_unlock_bh();
+ return entry;
+}
diff --git a/drivers/net/wireguard/peerlookup.h b/drivers/net/wireguard/peerlookup.h
new file mode 100644
index 000000000000..ced811797680
--- /dev/null
+++ b/drivers/net/wireguard/peerlookup.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _WG_PEERLOOKUP_H
+#define _WG_PEERLOOKUP_H
+
+#include "messages.h"
+
+#include <linux/hashtable.h>
+#include <linux/mutex.h>
+#include <linux/siphash.h>
+
+struct wg_peer;
+
+struct pubkey_hashtable {
+ /* TODO: move to rhashtable */
+ DECLARE_HASHTABLE(hashtable, 11);
+ siphash_key_t key;
+ struct mutex lock;
+};
+
+struct pubkey_hashtable *wg_pubkey_hashtable_alloc(void);
+void wg_pubkey_hashtable_add(struct pubkey_hashtable *table,
+ struct wg_peer *peer);
+void wg_pubkey_hashtable_remove(struct pubkey_hashtable *table,
+ struct wg_peer *peer);
+struct wg_peer *
+wg_pubkey_hashtable_lookup(struct pubkey_hashtable *table,
+ const u8 pubkey[NOISE_PUBLIC_KEY_LEN]);
+
+struct index_hashtable {
+ /* TODO: move to rhashtable */
+ DECLARE_HASHTABLE(hashtable, 13);
+ spinlock_t lock;
+};
+
+enum index_hashtable_type {
+ INDEX_HASHTABLE_HANDSHAKE = 1U << 0,
+ INDEX_HASHTABLE_KEYPAIR = 1U << 1
+};
+
+struct index_hashtable_entry {
+ struct wg_peer *peer;
+ struct hlist_node index_hash;
+ enum index_hashtable_type type;
+ __le32 index;
+};
+
+struct index_hashtable *wg_index_hashtable_alloc(void);
+__le32 wg_index_hashtable_insert(struct index_hashtable *table,
+ struct index_hashtable_entry *entry);
+bool wg_index_hashtable_replace(struct index_hashtable *table,
+ struct index_hashtable_entry *old,
+ struct index_hashtable_entry *new);
+void wg_index_hashtable_remove(struct index_hashtable *table,
+ struct index_hashtable_entry *entry);
+struct index_hashtable_entry *
+wg_index_hashtable_lookup(struct index_hashtable *table,
+ const enum index_hashtable_type type_mask,
+ const __le32 index, struct wg_peer **peer);
+
+#endif /* _WG_PEERLOOKUP_H */
diff --git a/drivers/net/wireguard/queueing.c b/drivers/net/wireguard/queueing.c
new file mode 100644
index 000000000000..5c964fcb994e
--- /dev/null
+++ b/drivers/net/wireguard/queueing.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "queueing.h"
+
+struct multicore_worker __percpu *
+wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr)
+{
+ int cpu;
+ struct multicore_worker __percpu *worker =
+ alloc_percpu(struct multicore_worker);
+
+ if (!worker)
+ return NULL;
+
+ for_each_possible_cpu(cpu) {
+ per_cpu_ptr(worker, cpu)->ptr = ptr;
+ INIT_WORK(&per_cpu_ptr(worker, cpu)->work, function);
+ }
+ return worker;
+}
+
+int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
+ bool multicore, unsigned int len)
+{
+ int ret;
+
+ memset(queue, 0, sizeof(*queue));
+ ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL);
+ if (ret)
+ return ret;
+ if (function) {
+ if (multicore) {
+ queue->worker = wg_packet_percpu_multicore_worker_alloc(
+ function, queue);
+ if (!queue->worker)
+ return -ENOMEM;
+ } else {
+ INIT_WORK(&queue->work, function);
+ }
+ }
+ return 0;
+}
+
+void wg_packet_queue_free(struct crypt_queue *queue, bool multicore)
+{
+ if (multicore)
+ free_percpu(queue->worker);
+ WARN_ON(!__ptr_ring_empty(&queue->ring));
+ ptr_ring_cleanup(&queue->ring, NULL);
+}
diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h
new file mode 100644
index 000000000000..fecb559cbdb6
--- /dev/null
+++ b/drivers/net/wireguard/queueing.h
@@ -0,0 +1,194 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _WG_QUEUEING_H
+#define _WG_QUEUEING_H
+
+#include "peer.h"
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+
+struct wg_device;
+struct wg_peer;
+struct multicore_worker;
+struct crypt_queue;
+struct sk_buff;
+
+/* queueing.c APIs: */
+int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
+ bool multicore, unsigned int len);
+void wg_packet_queue_free(struct crypt_queue *queue, bool multicore);
+struct multicore_worker __percpu *
+wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr);
+
+/* receive.c APIs: */
+void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb);
+void wg_packet_handshake_receive_worker(struct work_struct *work);
+/* NAPI poll function: */
+int wg_packet_rx_poll(struct napi_struct *napi, int budget);
+/* Workqueue worker: */
+void wg_packet_decrypt_worker(struct work_struct *work);
+
+/* send.c APIs: */
+void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer,
+ bool is_retry);
+void wg_packet_send_handshake_response(struct wg_peer *peer);
+void wg_packet_send_handshake_cookie(struct wg_device *wg,
+ struct sk_buff *initiating_skb,
+ __le32 sender_index);
+void wg_packet_send_keepalive(struct wg_peer *peer);
+void wg_packet_purge_staged_packets(struct wg_peer *peer);
+void wg_packet_send_staged_packets(struct wg_peer *peer);
+/* Workqueue workers: */
+void wg_packet_handshake_send_worker(struct work_struct *work);
+void wg_packet_tx_worker(struct work_struct *work);
+void wg_packet_encrypt_worker(struct work_struct *work);
+
+enum packet_state {
+ PACKET_STATE_UNCRYPTED,
+ PACKET_STATE_CRYPTED,
+ PACKET_STATE_DEAD
+};
+
+struct packet_cb {
+ u64 nonce;
+ struct noise_keypair *keypair;
+ atomic_t state;
+ u32 mtu;
+ u8 ds;
+};
+
+#define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb))
+#define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer)
+
+/* Returns either the correct skb->protocol value, or 0 if invalid. */
+static inline __be16 wg_skb_examine_untrusted_ip_hdr(struct sk_buff *skb)
+{
+ if (skb_network_header(skb) >= skb->head &&
+ (skb_network_header(skb) + sizeof(struct iphdr)) <=
+ skb_tail_pointer(skb) &&
+ ip_hdr(skb)->version == 4)
+ return htons(ETH_P_IP);
+ if (skb_network_header(skb) >= skb->head &&
+ (skb_network_header(skb) + sizeof(struct ipv6hdr)) <=
+ skb_tail_pointer(skb) &&
+ ipv6_hdr(skb)->version == 6)
+ return htons(ETH_P_IPV6);
+ return 0;
+}
+
+static inline void wg_reset_packet(struct sk_buff *skb)
+{
+ skb_scrub_packet(skb, true);
+ memset(&skb->headers_start, 0,
+ offsetof(struct sk_buff, headers_end) -
+ offsetof(struct sk_buff, headers_start));
+ skb->queue_mapping = 0;
+ skb->nohdr = 0;
+ skb->peeked = 0;
+ skb->mac_len = 0;
+ skb->dev = NULL;
+#ifdef CONFIG_NET_SCHED
+ skb->tc_index = 0;
+ skb_reset_tc(skb);
+#endif
+ skb->hdr_len = skb_headroom(skb);
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+ skb_probe_transport_header(skb);
+ skb_reset_inner_headers(skb);
+}
+
+static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
+{
+ unsigned int cpu = *stored_cpu, cpu_index, i;
+
+ if (unlikely(cpu == nr_cpumask_bits ||
+ !cpumask_test_cpu(cpu, cpu_online_mask))) {
+ cpu_index = id % cpumask_weight(cpu_online_mask);
+ cpu = cpumask_first(cpu_online_mask);
+ for (i = 0; i < cpu_index; ++i)
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ *stored_cpu = cpu;
+ }
+ return cpu;
+}
+
+/* This function is racy, in the sense that next is unlocked, so it could return
+ * the same CPU twice. A race-free version of this would be to instead store an
+ * atomic sequence number, do an increment-and-return, and then iterate through
+ * every possible CPU until we get to that index -- choose_cpu. However that's
+ * a bit slower, and it doesn't seem like this potential race actually
+ * introduces any performance loss, so we live with it.
+ */
+static inline int wg_cpumask_next_online(int *next)
+{
+ int cpu = *next;
+
+ while (unlikely(!cpumask_test_cpu(cpu, cpu_online_mask)))
+ cpu = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
+ *next = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
+ return cpu;
+}
+
+static inline int wg_queue_enqueue_per_device_and_peer(
+ struct crypt_queue *device_queue, struct crypt_queue *peer_queue,
+ struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu)
+{
+ int cpu;
+
+ atomic_set_release(&PACKET_CB(skb)->state, PACKET_STATE_UNCRYPTED);
+ /* We first queue this up for the peer ingestion, but the consumer
+ * will wait for the state to change to CRYPTED or DEAD before.
+ */
+ if (unlikely(ptr_ring_produce_bh(&peer_queue->ring, skb)))
+ return -ENOSPC;
+ /* Then we queue it up in the device queue, which consumes the
+ * packet as soon as it can.
+ */
+ cpu = wg_cpumask_next_online(next_cpu);
+ if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb)))
+ return -EPIPE;
+ queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work);
+ return 0;
+}
+
+static inline void wg_queue_enqueue_per_peer(struct crypt_queue *queue,
+ struct sk_buff *skb,
+ enum packet_state state)
+{
+ /* We take a reference, because as soon as we call atomic_set, the
+ * peer can be freed from below us.
+ */
+ struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
+
+ atomic_set_release(&PACKET_CB(skb)->state, state);
+ queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu,
+ peer->internal_id),
+ peer->device->packet_crypt_wq, &queue->work);
+ wg_peer_put(peer);
+}
+
+static inline void wg_queue_enqueue_per_peer_napi(struct sk_buff *skb,
+ enum packet_state state)
+{
+ /* We take a reference, because as soon as we call atomic_set, the
+ * peer can be freed from below us.
+ */
+ struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
+
+ atomic_set_release(&PACKET_CB(skb)->state, state);
+ napi_schedule(&peer->napi);
+ wg_peer_put(peer);
+}
+
+#ifdef DEBUG
+bool wg_packet_counter_selftest(void);
+#endif
+
+#endif /* _WG_QUEUEING_H */
diff --git a/drivers/net/wireguard/ratelimiter.c b/drivers/net/wireguard/ratelimiter.c
new file mode 100644
index 000000000000..3fedd1d21f5e
--- /dev/null
+++ b/drivers/net/wireguard/ratelimiter.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "ratelimiter.h"
+#include <linux/siphash.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <net/ip.h>
+
+static struct kmem_cache *entry_cache;
+static hsiphash_key_t key;
+static spinlock_t table_lock = __SPIN_LOCK_UNLOCKED("ratelimiter_table_lock");
+static DEFINE_MUTEX(init_lock);
+static u64 init_refcnt; /* Protected by init_lock, hence not atomic. */
+static atomic_t total_entries = ATOMIC_INIT(0);
+static unsigned int max_entries, table_size;
+static void wg_ratelimiter_gc_entries(struct work_struct *);
+static DECLARE_DEFERRABLE_WORK(gc_work, wg_ratelimiter_gc_entries);
+static struct hlist_head *table_v4;
+#if IS_ENABLED(CONFIG_IPV6)
+static struct hlist_head *table_v6;
+#endif
+
+struct ratelimiter_entry {
+ u64 last_time_ns, tokens, ip;
+ void *net;
+ spinlock_t lock;
+ struct hlist_node hash;
+ struct rcu_head rcu;
+};
+
+enum {
+ PACKETS_PER_SECOND = 20,
+ PACKETS_BURSTABLE = 5,
+ PACKET_COST = NSEC_PER_SEC / PACKETS_PER_SECOND,
+ TOKEN_MAX = PACKET_COST * PACKETS_BURSTABLE
+};
+
+static void entry_free(struct rcu_head *rcu)
+{
+ kmem_cache_free(entry_cache,
+ container_of(rcu, struct ratelimiter_entry, rcu));
+ atomic_dec(&total_entries);
+}
+
+static void entry_uninit(struct ratelimiter_entry *entry)
+{
+ hlist_del_rcu(&entry->hash);
+ call_rcu(&entry->rcu, entry_free);
+}
+
+/* Calling this function with a NULL work uninits all entries. */
+static void wg_ratelimiter_gc_entries(struct work_struct *work)
+{
+ const u64 now = ktime_get_coarse_boottime_ns();
+ struct ratelimiter_entry *entry;
+ struct hlist_node *temp;
+ unsigned int i;
+
+ for (i = 0; i < table_size; ++i) {
+ spin_lock(&table_lock);
+ hlist_for_each_entry_safe(entry, temp, &table_v4[i], hash) {
+ if (unlikely(!work) ||
+ now - entry->last_time_ns > NSEC_PER_SEC)
+ entry_uninit(entry);
+ }
+#if IS_ENABLED(CONFIG_IPV6)
+ hlist_for_each_entry_safe(entry, temp, &table_v6[i], hash) {
+ if (unlikely(!work) ||
+ now - entry->last_time_ns > NSEC_PER_SEC)
+ entry_uninit(entry);
+ }
+#endif
+ spin_unlock(&table_lock);
+ if (likely(work))
+ cond_resched();
+ }
+ if (likely(work))
+ queue_delayed_work(system_power_efficient_wq, &gc_work, HZ);
+}
+
+bool wg_ratelimiter_allow(struct sk_buff *skb, struct net *net)
+{
+ /* We only take the bottom half of the net pointer, so that we can hash
+ * 3 words in the end. This way, siphash's len param fits into the final
+ * u32, and we don't incur an extra round.
+ */
+ const u32 net_word = (unsigned long)net;
+ struct ratelimiter_entry *entry;
+ struct hlist_head *bucket;
+ u64 ip;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ ip = (u64 __force)ip_hdr(skb)->saddr;
+ bucket = &table_v4[hsiphash_2u32(net_word, ip, &key) &
+ (table_size - 1)];
+ }
+#if IS_ENABLED(CONFIG_IPV6)
+ else if (skb->protocol == htons(ETH_P_IPV6)) {
+ /* Only use 64 bits, so as to ratelimit the whole /64. */
+ memcpy(&ip, &ipv6_hdr(skb)->saddr, sizeof(ip));
+ bucket = &table_v6[hsiphash_3u32(net_word, ip >> 32, ip, &key) &
+ (table_size - 1)];
+ }
+#endif
+ else
+ return false;
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(entry, bucket, hash) {
+ if (entry->net == net && entry->ip == ip) {
+ u64 now, tokens;
+ bool ret;
+ /* Quasi-inspired by nft_limit.c, but this is actually a
+ * slightly different algorithm. Namely, we incorporate
+ * the burst as part of the maximum tokens, rather than
+ * as part of the rate.
+ */
+ spin_lock(&entry->lock);
+ now = ktime_get_coarse_boottime_ns();
+ tokens = min_t(u64, TOKEN_MAX,
+ entry->tokens + now -
+ entry->last_time_ns);
+ entry->last_time_ns = now;
+ ret = tokens >= PACKET_COST;
+ entry->tokens = ret ? tokens - PACKET_COST : tokens;
+ spin_unlock(&entry->lock);
+ rcu_read_unlock();
+ return ret;
+ }
+ }
+ rcu_read_unlock();
+
+ if (atomic_inc_return(&total_entries) > max_entries)
+ goto err_oom;
+
+ entry = kmem_cache_alloc(entry_cache, GFP_KERNEL);
+ if (unlikely(!entry))
+ goto err_oom;
+
+ entry->net = net;
+ entry->ip = ip;
+ INIT_HLIST_NODE(&entry->hash);
+ spin_lock_init(&entry->lock);
+ entry->last_time_ns = ktime_get_coarse_boottime_ns();
+ entry->tokens = TOKEN_MAX - PACKET_COST;
+ spin_lock(&table_lock);
+ hlist_add_head_rcu(&entry->hash, bucket);
+ spin_unlock(&table_lock);
+ return true;
+
+err_oom:
+ atomic_dec(&total_entries);
+ return false;
+}
+
+int wg_ratelimiter_init(void)
+{
+ mutex_lock(&init_lock);
+ if (++init_refcnt != 1)
+ goto out;
+
+ entry_cache = KMEM_CACHE(ratelimiter_entry, 0);
+ if (!entry_cache)
+ goto err;
+
+ /* xt_hashlimit.c uses a slightly different algorithm for ratelimiting,
+ * but what it shares in common is that it uses a massive hashtable. So,
+ * we borrow their wisdom about good table sizes on different systems
+ * dependent on RAM. This calculation here comes from there.
+ */
+ table_size = (totalram_pages() > (1U << 30) / PAGE_SIZE) ? 8192 :
+ max_t(unsigned long, 16, roundup_pow_of_two(
+ (totalram_pages() << PAGE_SHIFT) /
+ (1U << 14) / sizeof(struct hlist_head)));
+ max_entries = table_size * 8;
+
+ table_v4 = kvzalloc(table_size * sizeof(*table_v4), GFP_KERNEL);
+ if (unlikely(!table_v4))
+ goto err_kmemcache;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ table_v6 = kvzalloc(table_size * sizeof(*table_v6), GFP_KERNEL);
+ if (unlikely(!table_v6)) {
+ kvfree(table_v4);
+ goto err_kmemcache;
+ }
+#endif
+
+ queue_delayed_work(system_power_efficient_wq, &gc_work, HZ);
+ get_random_bytes(&key, sizeof(key));
+out:
+ mutex_unlock(&init_lock);
+ return 0;
+
+err_kmemcache:
+ kmem_cache_destroy(entry_cache);
+err:
+ --init_refcnt;
+ mutex_unlock(&init_lock);
+ return -ENOMEM;
+}
+
+void wg_ratelimiter_uninit(void)
+{
+ mutex_lock(&init_lock);
+ if (!init_refcnt || --init_refcnt)
+ goto out;
+
+ cancel_delayed_work_sync(&gc_work);
+ wg_ratelimiter_gc_entries(NULL);
+ rcu_barrier();
+ kvfree(table_v4);
+#if IS_ENABLED(CONFIG_IPV6)
+ kvfree(table_v6);
+#endif
+ kmem_cache_destroy(entry_cache);
+out:
+ mutex_unlock(&init_lock);
+}
+
+#include "selftest/ratelimiter.c"
diff --git a/drivers/net/wireguard/ratelimiter.h b/drivers/net/wireguard/ratelimiter.h
new file mode 100644
index 000000000000..83067f71ea99
--- /dev/null
+++ b/drivers/net/wireguard/ratelimiter.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _WG_RATELIMITER_H
+#define _WG_RATELIMITER_H
+
+#include <linux/skbuff.h>
+
+int wg_ratelimiter_init(void);
+void wg_ratelimiter_uninit(void);
+bool wg_ratelimiter_allow(struct sk_buff *skb, struct net *net);
+
+#ifdef DEBUG
+bool wg_ratelimiter_selftest(void);
+#endif
+
+#endif /* _WG_RATELIMITER_H */
diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
new file mode 100644
index 000000000000..9c6bab9c981f
--- /dev/null
+++ b/drivers/net/wireguard/receive.c
@@ -0,0 +1,595 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "queueing.h"
+#include "device.h"
+#include "peer.h"
+#include "timers.h"
+#include "messages.h"
+#include "cookie.h"
+#include "socket.h"
+
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/udp.h>
+#include <net/ip_tunnels.h>
+
+/* Must be called with bh disabled. */
+static void update_rx_stats(struct wg_peer *peer, size_t len)
+{
+ struct pcpu_sw_netstats *tstats =
+ get_cpu_ptr(peer->device->dev->tstats);
+
+ u64_stats_update_begin(&tstats->syncp);
+ ++tstats->rx_packets;
+ tstats->rx_bytes += len;
+ peer->rx_bytes += len;
+ u64_stats_update_end(&tstats->syncp);
+ put_cpu_ptr(tstats);
+}
+
+#define SKB_TYPE_LE32(skb) (((struct message_header *)(skb)->data)->type)
+
+static size_t validate_header_len(struct sk_buff *skb)
+{
+ if (unlikely(skb->len < sizeof(struct message_header)))
+ return 0;
+ if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_DATA) &&
+ skb->len >= MESSAGE_MINIMUM_LENGTH)
+ return sizeof(struct message_data);
+ if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION) &&
+ skb->len == sizeof(struct message_handshake_initiation))
+ return sizeof(struct message_handshake_initiation);
+ if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE) &&
+ skb->len == sizeof(struct message_handshake_response))
+ return sizeof(struct message_handshake_response);
+ if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE) &&
+ skb->len == sizeof(struct message_handshake_cookie))
+ return sizeof(struct message_handshake_cookie);
+ return 0;
+}
+
+static int prepare_skb_header(struct sk_buff *skb, struct wg_device *wg)
+{
+ size_t data_offset, data_len, header_len;
+ struct udphdr *udp;
+
+ if (unlikely(wg_skb_examine_untrusted_ip_hdr(skb) != skb->protocol ||
+ skb_transport_header(skb) < skb->head ||
+ (skb_transport_header(skb) + sizeof(struct udphdr)) >
+ skb_tail_pointer(skb)))
+ return -EINVAL; /* Bogus IP header */
+ udp = udp_hdr(skb);
+ data_offset = (u8 *)udp - skb->data;
+ if (unlikely(data_offset > U16_MAX ||
+ data_offset + sizeof(struct udphdr) > skb->len))
+ /* Packet has offset at impossible location or isn't big enough
+ * to have UDP fields.
+ */
+ return -EINVAL;
+ data_len = ntohs(udp->len);
+ if (unlikely(data_len < sizeof(struct udphdr) ||
+ data_len > skb->len - data_offset))
+ /* UDP packet is reporting too small of a size or lying about
+ * its size.
+ */
+ return -EINVAL;
+ data_len -= sizeof(struct udphdr);
+ data_offset = (u8 *)udp + sizeof(struct udphdr) - skb->data;
+ if (unlikely(!pskb_may_pull(skb,
+ data_offset + sizeof(struct message_header)) ||
+ pskb_trim(skb, data_len + data_offset) < 0))
+ return -EINVAL;
+ skb_pull(skb, data_offset);
+ if (unlikely(skb->len != data_len))
+ /* Final len does not agree with calculated len */
+ return -EINVAL;
+ header_len = validate_header_len(skb);
+ if (unlikely(!header_len))
+ return -EINVAL;
+ __skb_push(skb, data_offset);
+ if (unlikely(!pskb_may_pull(skb, data_offset + header_len)))
+ return -EINVAL;
+ __skb_pull(skb, data_offset);
+ return 0;
+}
+
+static void wg_receive_handshake_packet(struct wg_device *wg,
+ struct sk_buff *skb)
+{
+ enum cookie_mac_state mac_state;
+ struct wg_peer *peer = NULL;
+ /* This is global, so that our load calculation applies to the whole
+ * system. We don't care about races with it at all.
+ */
+ static u64 last_under_load;
+ bool packet_needs_cookie;
+ bool under_load;
+
+ if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE)) {
+ net_dbg_skb_ratelimited("%s: Receiving cookie response from %pISpfsc\n",
+ wg->dev->name, skb);
+ wg_cookie_message_consume(
+ (struct message_handshake_cookie *)skb->data, wg);
+ return;
+ }
+
+ under_load = skb_queue_len(&wg->incoming_handshakes) >=
+ MAX_QUEUED_INCOMING_HANDSHAKES / 8;
+ if (under_load)
+ last_under_load = ktime_get_coarse_boottime_ns();
+ else if (last_under_load)
+ under_load = !wg_birthdate_has_expired(last_under_load, 1);
+ mac_state = wg_cookie_validate_packet(&wg->cookie_checker, skb,
+ under_load);
+ if ((under_load && mac_state == VALID_MAC_WITH_COOKIE) ||
+ (!under_load && mac_state == VALID_MAC_BUT_NO_COOKIE)) {
+ packet_needs_cookie = false;
+ } else if (under_load && mac_state == VALID_MAC_BUT_NO_COOKIE) {
+ packet_needs_cookie = true;
+ } else {
+ net_dbg_skb_ratelimited("%s: Invalid MAC of handshake, dropping packet from %pISpfsc\n",
+ wg->dev->name, skb);
+ return;
+ }
+
+ switch (SKB_TYPE_LE32(skb)) {
+ case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION): {
+ struct message_handshake_initiation *message =
+ (struct message_handshake_initiation *)skb->data;
+
+ if (packet_needs_cookie) {
+ wg_packet_send_handshake_cookie(wg, skb,
+ message->sender_index);
+ return;
+ }
+ peer = wg_noise_handshake_consume_initiation(message, wg);
+ if (unlikely(!peer)) {
+ net_dbg_skb_ratelimited("%s: Invalid handshake initiation from %pISpfsc\n",
+ wg->dev->name, skb);
+ return;
+ }
+ wg_socket_set_peer_endpoint_from_skb(peer, skb);
+ net_dbg_ratelimited("%s: Receiving handshake initiation from peer %llu (%pISpfsc)\n",
+ wg->dev->name, peer->internal_id,
+ &peer->endpoint.addr);
+ wg_packet_send_handshake_response(peer);
+ break;
+ }
+ case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE): {
+ struct message_handshake_response *message =
+ (struct message_handshake_response *)skb->data;
+
+ if (packet_needs_cookie) {
+ wg_packet_send_handshake_cookie(wg, skb,
+ message->sender_index);
+ return;
+ }
+ peer = wg_noise_handshake_consume_response(message, wg);
+ if (unlikely(!peer)) {
+ net_dbg_skb_ratelimited("%s: Invalid handshake response from %pISpfsc\n",
+ wg->dev->name, skb);
+ return;
+ }
+ wg_socket_set_peer_endpoint_from_skb(peer, skb);
+ net_dbg_ratelimited("%s: Receiving handshake response from peer %llu (%pISpfsc)\n",
+ wg->dev->name, peer->internal_id,
+ &peer->endpoint.addr);
+ if (wg_noise_handshake_begin_session(&peer->handshake,
+ &peer->keypairs)) {
+ wg_timers_session_derived(peer);
+ wg_timers_handshake_complete(peer);
+ /* Calling this function will either send any existing
+ * packets in the queue and not send a keepalive, which
+ * is the best case, Or, if there's nothing in the
+ * queue, it will send a keepalive, in order to give
+ * immediate confirmation of the session.
+ */
+ wg_packet_send_keepalive(peer);
+ }
+ break;
+ }
+ }
+
+ if (unlikely(!peer)) {
+ WARN(1, "Somehow a wrong type of packet wound up in the handshake queue!\n");
+ return;
+ }
+
+ local_bh_disable();
+ update_rx_stats(peer, skb->len);
+ local_bh_enable();
+
+ wg_timers_any_authenticated_packet_received(peer);
+ wg_timers_any_authenticated_packet_traversal(peer);
+ wg_peer_put(peer);
+}
+
+void wg_packet_handshake_receive_worker(struct work_struct *work)
+{
+ struct wg_device *wg = container_of(work, struct multicore_worker,
+ work)->ptr;
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&wg->incoming_handshakes)) != NULL) {
+ wg_receive_handshake_packet(wg, skb);
+ dev_kfree_skb(skb);
+ cond_resched();
+ }
+}
+
+static void keep_key_fresh(struct wg_peer *peer)
+{
+ struct noise_keypair *keypair;
+ bool send = false;
+
+ if (peer->sent_lastminute_handshake)
+ return;
+
+ rcu_read_lock_bh();
+ keypair = rcu_dereference_bh(peer->keypairs.current_keypair);
+ if (likely(keypair && READ_ONCE(keypair->sending.is_valid)) &&
+ keypair->i_am_the_initiator &&
+ unlikely(wg_birthdate_has_expired(keypair->sending.birthdate,
+ REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT)))
+ send = true;
+ rcu_read_unlock_bh();
+
+ if (send) {
+ peer->sent_lastminute_handshake = true;
+ wg_packet_send_queued_handshake_initiation(peer, false);
+ }
+}
+
+static bool decrypt_packet(struct sk_buff *skb, struct noise_symmetric_key *key)
+{
+ struct scatterlist sg[MAX_SKB_FRAGS + 8];
+ struct sk_buff *trailer;
+ unsigned int offset;
+ int num_frags;
+
+ if (unlikely(!key))
+ return false;
+
+ if (unlikely(!READ_ONCE(key->is_valid) ||
+ wg_birthdate_has_expired(key->birthdate, REJECT_AFTER_TIME) ||
+ key->counter.receive.counter >= REJECT_AFTER_MESSAGES)) {
+ WRITE_ONCE(key->is_valid, false);
+ return false;
+ }
+
+ PACKET_CB(skb)->nonce =
+ le64_to_cpu(((struct message_data *)skb->data)->counter);
+
+ /* We ensure that the network header is part of the packet before we
+ * call skb_cow_data, so that there's no chance that data is removed
+ * from the skb, so that later we can extract the original endpoint.
+ */
+ offset = skb->data - skb_network_header(skb);
+ skb_push(skb, offset);
+ num_frags = skb_cow_data(skb, 0, &trailer);
+ offset += sizeof(struct message_data);
+ skb_pull(skb, offset);
+ if (unlikely(num_frags < 0 || num_frags > ARRAY_SIZE(sg)))
+ return false;
+
+ sg_init_table(sg, num_frags);
+ if (skb_to_sgvec(skb, sg, 0, skb->len) <= 0)
+ return false;
+
+ if (!chacha20poly1305_decrypt_sg_inplace(sg, skb->len, NULL, 0,
+ PACKET_CB(skb)->nonce,
+ key->key))
+ return false;
+
+ /* Another ugly situation of pushing and pulling the header so as to
+ * keep endpoint information intact.
+ */
+ skb_push(skb, offset);
+ if (pskb_trim(skb, skb->len - noise_encrypted_len(0)))
+ return false;
+ skb_pull(skb, offset);
+
+ return true;
+}
+
+/* This is RFC6479, a replay detection bitmap algorithm that avoids bitshifts */
+static bool counter_validate(union noise_counter *counter, u64 their_counter)
+{
+ unsigned long index, index_current, top, i;
+ bool ret = false;
+
+ spin_lock_bh(&counter->receive.lock);
+
+ if (unlikely(counter->receive.counter >= REJECT_AFTER_MESSAGES + 1 ||
+ their_counter >= REJECT_AFTER_MESSAGES))
+ goto out;
+
+ ++their_counter;
+
+ if (unlikely((COUNTER_WINDOW_SIZE + their_counter) <
+ counter->receive.counter))
+ goto out;
+
+ index = their_counter >> ilog2(BITS_PER_LONG);
+
+ if (likely(their_counter > counter->receive.counter)) {
+ index_current = counter->receive.counter >> ilog2(BITS_PER_LONG);
+ top = min_t(unsigned long, index - index_current,
+ COUNTER_BITS_TOTAL / BITS_PER_LONG);
+ for (i = 1; i <= top; ++i)
+ counter->receive.backtrack[(i + index_current) &
+ ((COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1)] = 0;
+ counter->receive.counter = their_counter;
+ }
+
+ index &= (COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1;
+ ret = !test_and_set_bit(their_counter & (BITS_PER_LONG - 1),
+ &counter->receive.backtrack[index]);
+
+out:
+ spin_unlock_bh(&counter->receive.lock);
+ return ret;
+}
+
+#include "selftest/counter.c"
+
+static void wg_packet_consume_data_done(struct wg_peer *peer,
+ struct sk_buff *skb,
+ struct endpoint *endpoint)
+{
+ struct net_device *dev = peer->device->dev;
+ unsigned int len, len_before_trim;
+ struct wg_peer *routed_peer;
+
+ wg_socket_set_peer_endpoint(peer, endpoint);
+
+ if (unlikely(wg_noise_received_with_keypair(&peer->keypairs,
+ PACKET_CB(skb)->keypair))) {
+ wg_timers_handshake_complete(peer);
+ wg_packet_send_staged_packets(peer);
+ }
+
+ keep_key_fresh(peer);
+
+ wg_timers_any_authenticated_packet_received(peer);
+ wg_timers_any_authenticated_packet_traversal(peer);
+
+ /* A packet with length 0 is a keepalive packet */
+ if (unlikely(!skb->len)) {
+ update_rx_stats(peer, message_data_len(0));
+ net_dbg_ratelimited("%s: Receiving keepalive packet from peer %llu (%pISpfsc)\n",
+ dev->name, peer->internal_id,
+ &peer->endpoint.addr);
+ goto packet_processed;
+ }
+
+ wg_timers_data_received(peer);
+
+ if (unlikely(skb_network_header(skb) < skb->head))
+ goto dishonest_packet_size;
+ if (unlikely(!(pskb_network_may_pull(skb, sizeof(struct iphdr)) &&
+ (ip_hdr(skb)->version == 4 ||
+ (ip_hdr(skb)->version == 6 &&
+ pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))))))
+ goto dishonest_packet_type;
+
+ skb->dev = dev;
+ /* We've already verified the Poly1305 auth tag, which means this packet
+ * was not modified in transit. We can therefore tell the networking
+ * stack that all checksums of every layer of encapsulation have already
+ * been checked "by the hardware" and therefore is unnecessary to check
+ * again in software.
+ */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = ~0; /* All levels */
+ skb->protocol = wg_skb_examine_untrusted_ip_hdr(skb);
+ if (skb->protocol == htons(ETH_P_IP)) {
+ len = ntohs(ip_hdr(skb)->tot_len);
+ if (unlikely(len < sizeof(struct iphdr)))
+ goto dishonest_packet_size;
+ if (INET_ECN_is_ce(PACKET_CB(skb)->ds))
+ IP_ECN_set_ce(ip_hdr(skb));
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ len = ntohs(ipv6_hdr(skb)->payload_len) +
+ sizeof(struct ipv6hdr);
+ if (INET_ECN_is_ce(PACKET_CB(skb)->ds))
+ IP6_ECN_set_ce(skb, ipv6_hdr(skb));
+ } else {
+ goto dishonest_packet_type;
+ }
+
+ if (unlikely(len > skb->len))
+ goto dishonest_packet_size;
+ len_before_trim = skb->len;
+ if (unlikely(pskb_trim(skb, len)))
+ goto packet_processed;
+
+ routed_peer = wg_allowedips_lookup_src(&peer->device->peer_allowedips,
+ skb);
+ wg_peer_put(routed_peer); /* We don't need the extra reference. */
+
+ if (unlikely(routed_peer != peer))
+ goto dishonest_packet_peer;
+
+ if (unlikely(napi_gro_receive(&peer->napi, skb) == GRO_DROP)) {
+ ++dev->stats.rx_dropped;
+ net_dbg_ratelimited("%s: Failed to give packet to userspace from peer %llu (%pISpfsc)\n",
+ dev->name, peer->internal_id,
+ &peer->endpoint.addr);
+ } else {
+ update_rx_stats(peer, message_data_len(len_before_trim));
+ }
+ return;
+
+dishonest_packet_peer:
+ net_dbg_skb_ratelimited("%s: Packet has unallowed src IP (%pISc) from peer %llu (%pISpfsc)\n",
+ dev->name, skb, peer->internal_id,
+ &peer->endpoint.addr);
+ ++dev->stats.rx_errors;
+ ++dev->stats.rx_frame_errors;
+ goto packet_processed;
+dishonest_packet_type:
+ net_dbg_ratelimited("%s: Packet is neither ipv4 nor ipv6 from peer %llu (%pISpfsc)\n",
+ dev->name, peer->internal_id, &peer->endpoint.addr);
+ ++dev->stats.rx_errors;
+ ++dev->stats.rx_frame_errors;
+ goto packet_processed;
+dishonest_packet_size:
+ net_dbg_ratelimited("%s: Packet has incorrect size from peer %llu (%pISpfsc)\n",
+ dev->name, peer->internal_id, &peer->endpoint.addr);
+ ++dev->stats.rx_errors;
+ ++dev->stats.rx_length_errors;
+ goto packet_processed;
+packet_processed:
+ dev_kfree_skb(skb);
+}
+
+int wg_packet_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct wg_peer *peer = container_of(napi, struct wg_peer, napi);
+ struct crypt_queue *queue = &peer->rx_queue;
+ struct noise_keypair *keypair;
+ struct endpoint endpoint;
+ enum packet_state state;
+ struct sk_buff *skb;
+ int work_done = 0;
+ bool free;
+
+ if (unlikely(budget <= 0))
+ return 0;
+
+ while ((skb = __ptr_ring_peek(&queue->ring)) != NULL &&
+ (state = atomic_read_acquire(&PACKET_CB(skb)->state)) !=
+ PACKET_STATE_UNCRYPTED) {
+ __ptr_ring_discard_one(&queue->ring);
+ peer = PACKET_PEER(skb);
+ keypair = PACKET_CB(skb)->keypair;
+ free = true;
+
+ if (unlikely(state != PACKET_STATE_CRYPTED))
+ goto next;
+
+ if (unlikely(!counter_validate(&keypair->receiving.counter,
+ PACKET_CB(skb)->nonce))) {
+ net_dbg_ratelimited("%s: Packet has invalid nonce %llu (max %llu)\n",
+ peer->device->dev->name,
+ PACKET_CB(skb)->nonce,
+ keypair->receiving.counter.receive.counter);
+ goto next;
+ }
+
+ if (unlikely(wg_socket_endpoint_from_skb(&endpoint, skb)))
+ goto next;
+
+ wg_reset_packet(skb);
+ wg_packet_consume_data_done(peer, skb, &endpoint);
+ free = false;
+
+next:
+ wg_noise_keypair_put(keypair, false);
+ wg_peer_put(peer);
+ if (unlikely(free))
+ dev_kfree_skb(skb);
+
+ if (++work_done >= budget)
+ break;
+ }
+
+ if (work_done < budget)
+ napi_complete_done(napi, work_done);
+
+ return work_done;
+}
+
+void wg_packet_decrypt_worker(struct work_struct *work)
+{
+ struct crypt_queue *queue = container_of(work, struct multicore_worker,
+ work)->ptr;
+ struct sk_buff *skb;
+
+ while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) {
+ enum packet_state state = likely(decrypt_packet(skb,
+ &PACKET_CB(skb)->keypair->receiving)) ?
+ PACKET_STATE_CRYPTED : PACKET_STATE_DEAD;
+ wg_queue_enqueue_per_peer_napi(skb, state);
+ }
+}
+
+static void wg_packet_consume_data(struct wg_device *wg, struct sk_buff *skb)
+{
+ __le32 idx = ((struct message_data *)skb->data)->key_idx;
+ struct wg_peer *peer = NULL;
+ int ret;
+
+ rcu_read_lock_bh();
+ PACKET_CB(skb)->keypair =
+ (struct noise_keypair *)wg_index_hashtable_lookup(
+ wg->index_hashtable, INDEX_HASHTABLE_KEYPAIR, idx,
+ &peer);
+ if (unlikely(!wg_noise_keypair_get(PACKET_CB(skb)->keypair)))
+ goto err_keypair;
+
+ if (unlikely(READ_ONCE(peer->is_dead)))
+ goto err;
+
+ ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue,
+ &peer->rx_queue, skb,
+ wg->packet_crypt_wq,
+ &wg->decrypt_queue.last_cpu);
+ if (unlikely(ret == -EPIPE))
+ wg_queue_enqueue_per_peer_napi(skb, PACKET_STATE_DEAD);
+ if (likely(!ret || ret == -EPIPE)) {
+ rcu_read_unlock_bh();
+ return;
+ }
+err:
+ wg_noise_keypair_put(PACKET_CB(skb)->keypair, false);
+err_keypair:
+ rcu_read_unlock_bh();
+ wg_peer_put(peer);
+ dev_kfree_skb(skb);
+}
+
+void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb)
+{
+ if (unlikely(prepare_skb_header(skb, wg) < 0))
+ goto err;
+ switch (SKB_TYPE_LE32(skb)) {
+ case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION):
+ case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE):
+ case cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE): {
+ int cpu;
+
+ if (skb_queue_len(&wg->incoming_handshakes) >
+ MAX_QUEUED_INCOMING_HANDSHAKES ||
+ unlikely(!rng_is_initialized())) {
+ net_dbg_skb_ratelimited("%s: Dropping handshake packet from %pISpfsc\n",
+ wg->dev->name, skb);
+ goto err;
+ }
+ skb_queue_tail(&wg->incoming_handshakes, skb);
+ /* Queues up a call to packet_process_queued_handshake_
+ * packets(skb):
+ */
+ cpu = wg_cpumask_next_online(&wg->incoming_handshake_cpu);
+ queue_work_on(cpu, wg->handshake_receive_wq,
+ &per_cpu_ptr(wg->incoming_handshakes_worker, cpu)->work);
+ break;
+ }
+ case cpu_to_le32(MESSAGE_DATA):
+ PACKET_CB(skb)->ds = ip_tunnel_get_dsfield(ip_hdr(skb), skb);
+ wg_packet_consume_data(wg, skb);
+ break;
+ default:
+ net_dbg_skb_ratelimited("%s: Invalid packet from %pISpfsc\n",
+ wg->dev->name, skb);
+ goto err;
+ }
+ return;
+
+err:
+ dev_kfree_skb(skb);
+}
diff --git a/drivers/net/wireguard/selftest/allowedips.c b/drivers/net/wireguard/selftest/allowedips.c
new file mode 100644
index 000000000000..846db14cb046
--- /dev/null
+++ b/drivers/net/wireguard/selftest/allowedips.c
@@ -0,0 +1,683 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This contains some basic static unit tests for the allowedips data structure.
+ * It also has two additional modes that are disabled and meant to be used by
+ * folks directly playing with this file. If you define the macro
+ * DEBUG_PRINT_TRIE_GRAPHVIZ to be 1, then every time there's a full tree in
+ * memory, it will be printed out as KERN_DEBUG in a format that can be passed
+ * to graphviz (the dot command) to visualize it. If you define the macro
+ * DEBUG_RANDOM_TRIE to be 1, then there will be an extremely costly set of
+ * randomized tests done against a trivial implementation, which may take
+ * upwards of a half-hour to complete. There's no set of users who should be
+ * enabling these, and the only developers that should go anywhere near these
+ * nobs are the ones who are reading this comment.
+ */
+
+#ifdef DEBUG
+
+#include <linux/siphash.h>
+
+static __init void swap_endian_and_apply_cidr(u8 *dst, const u8 *src, u8 bits,
+ u8 cidr)
+{
+ swap_endian(dst, src, bits);
+ memset(dst + (cidr + 7) / 8, 0, bits / 8 - (cidr + 7) / 8);
+ if (cidr)
+ dst[(cidr + 7) / 8 - 1] &= ~0U << ((8 - (cidr % 8)) % 8);
+}
+
+static __init void print_node(struct allowedips_node *node, u8 bits)
+{
+ char *fmt_connection = KERN_DEBUG "\t\"%p/%d\" -> \"%p/%d\";\n";
+ char *fmt_declaration = KERN_DEBUG
+ "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n";
+ char *style = "dotted";
+ u8 ip1[16], ip2[16];
+ u32 color = 0;
+
+ if (bits == 32) {
+ fmt_connection = KERN_DEBUG "\t\"%pI4/%d\" -> \"%pI4/%d\";\n";
+ fmt_declaration = KERN_DEBUG
+ "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n";
+ } else if (bits == 128) {
+ fmt_connection = KERN_DEBUG "\t\"%pI6/%d\" -> \"%pI6/%d\";\n";
+ fmt_declaration = KERN_DEBUG
+ "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n";
+ }
+ if (node->peer) {
+ hsiphash_key_t key = { { 0 } };
+
+ memcpy(&key, &node->peer, sizeof(node->peer));
+ color = hsiphash_1u32(0xdeadbeef, &key) % 200 << 16 |
+ hsiphash_1u32(0xbabecafe, &key) % 200 << 8 |
+ hsiphash_1u32(0xabad1dea, &key) % 200;
+ style = "bold";
+ }
+ swap_endian_and_apply_cidr(ip1, node->bits, bits, node->cidr);
+ printk(fmt_declaration, ip1, node->cidr, style, color);
+ if (node->bit[0]) {
+ swap_endian_and_apply_cidr(ip2,
+ rcu_dereference_raw(node->bit[0])->bits, bits,
+ node->cidr);
+ printk(fmt_connection, ip1, node->cidr, ip2,
+ rcu_dereference_raw(node->bit[0])->cidr);
+ print_node(rcu_dereference_raw(node->bit[0]), bits);
+ }
+ if (node->bit[1]) {
+ swap_endian_and_apply_cidr(ip2,
+ rcu_dereference_raw(node->bit[1])->bits,
+ bits, node->cidr);
+ printk(fmt_connection, ip1, node->cidr, ip2,
+ rcu_dereference_raw(node->bit[1])->cidr);
+ print_node(rcu_dereference_raw(node->bit[1]), bits);
+ }
+}
+
+static __init void print_tree(struct allowedips_node __rcu *top, u8 bits)
+{
+ printk(KERN_DEBUG "digraph trie {\n");
+ print_node(rcu_dereference_raw(top), bits);
+ printk(KERN_DEBUG "}\n");
+}
+
+enum {
+ NUM_PEERS = 2000,
+ NUM_RAND_ROUTES = 400,
+ NUM_MUTATED_ROUTES = 100,
+ NUM_QUERIES = NUM_RAND_ROUTES * NUM_MUTATED_ROUTES * 30
+};
+
+struct horrible_allowedips {
+ struct hlist_head head;
+};
+
+struct horrible_allowedips_node {
+ struct hlist_node table;
+ union nf_inet_addr ip;
+ union nf_inet_addr mask;
+ u8 ip_version;
+ void *value;
+};
+
+static __init void horrible_allowedips_init(struct horrible_allowedips *table)
+{
+ INIT_HLIST_HEAD(&table->head);
+}
+
+static __init void horrible_allowedips_free(struct horrible_allowedips *table)
+{
+ struct horrible_allowedips_node *node;
+ struct hlist_node *h;
+
+ hlist_for_each_entry_safe(node, h, &table->head, table) {
+ hlist_del(&node->table);
+ kfree(node);
+ }
+}
+
+static __init inline union nf_inet_addr horrible_cidr_to_mask(u8 cidr)
+{
+ union nf_inet_addr mask;
+
+ memset(&mask, 0x00, 128 / 8);
+ memset(&mask, 0xff, cidr / 8);
+ if (cidr % 32)
+ mask.all[cidr / 32] = (__force u32)htonl(
+ (0xFFFFFFFFUL << (32 - (cidr % 32))) & 0xFFFFFFFFUL);
+ return mask;
+}
+
+static __init inline u8 horrible_mask_to_cidr(union nf_inet_addr subnet)
+{
+ return hweight32(subnet.all[0]) + hweight32(subnet.all[1]) +
+ hweight32(subnet.all[2]) + hweight32(subnet.all[3]);
+}
+
+static __init inline void
+horrible_mask_self(struct horrible_allowedips_node *node)
+{
+ if (node->ip_version == 4) {
+ node->ip.ip &= node->mask.ip;
+ } else if (node->ip_version == 6) {
+ node->ip.ip6[0] &= node->mask.ip6[0];
+ node->ip.ip6[1] &= node->mask.ip6[1];
+ node->ip.ip6[2] &= node->mask.ip6[2];
+ node->ip.ip6[3] &= node->mask.ip6[3];
+ }
+}
+
+static __init inline bool
+horrible_match_v4(const struct horrible_allowedips_node *node,
+ struct in_addr *ip)
+{
+ return (ip->s_addr & node->mask.ip) == node->ip.ip;
+}
+
+static __init inline bool
+horrible_match_v6(const struct horrible_allowedips_node *node,
+ struct in6_addr *ip)
+{
+ return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) ==
+ node->ip.ip6[0] &&
+ (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) ==
+ node->ip.ip6[1] &&
+ (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) ==
+ node->ip.ip6[2] &&
+ (ip->in6_u.u6_addr32[3] & node->mask.ip6[3]) == node->ip.ip6[3];
+}
+
+static __init void
+horrible_insert_ordered(struct horrible_allowedips *table,
+ struct horrible_allowedips_node *node)
+{
+ struct horrible_allowedips_node *other = NULL, *where = NULL;
+ u8 my_cidr = horrible_mask_to_cidr(node->mask);
+
+ hlist_for_each_entry(other, &table->head, table) {
+ if (!memcmp(&other->mask, &node->mask,
+ sizeof(union nf_inet_addr)) &&
+ !memcmp(&other->ip, &node->ip,
+ sizeof(union nf_inet_addr)) &&
+ other->ip_version == node->ip_version) {
+ other->value = node->value;
+ kfree(node);
+ return;
+ }
+ where = other;
+ if (horrible_mask_to_cidr(other->mask) <= my_cidr)
+ break;
+ }
+ if (!other && !where)
+ hlist_add_head(&node->table, &table->head);
+ else if (!other)
+ hlist_add_behind(&node->table, &where->table);
+ else
+ hlist_add_before(&node->table, &where->table);
+}
+
+static __init int
+horrible_allowedips_insert_v4(struct horrible_allowedips *table,
+ struct in_addr *ip, u8 cidr, void *value)
+{
+ struct horrible_allowedips_node *node = kzalloc(sizeof(*node),
+ GFP_KERNEL);
+
+ if (unlikely(!node))
+ return -ENOMEM;
+ node->ip.in = *ip;
+ node->mask = horrible_cidr_to_mask(cidr);
+ node->ip_version = 4;
+ node->value = value;
+ horrible_mask_self(node);
+ horrible_insert_ordered(table, node);
+ return 0;
+}
+
+static __init int
+horrible_allowedips_insert_v6(struct horrible_allowedips *table,
+ struct in6_addr *ip, u8 cidr, void *value)
+{
+ struct horrible_allowedips_node *node = kzalloc(sizeof(*node),
+ GFP_KERNEL);
+
+ if (unlikely(!node))
+ return -ENOMEM;
+ node->ip.in6 = *ip;
+ node->mask = horrible_cidr_to_mask(cidr);
+ node->ip_version = 6;
+ node->value = value;
+ horrible_mask_self(node);
+ horrible_insert_ordered(table, node);
+ return 0;
+}
+
+static __init void *
+horrible_allowedips_lookup_v4(struct horrible_allowedips *table,
+ struct in_addr *ip)
+{
+ struct horrible_allowedips_node *node;
+ void *ret = NULL;
+
+ hlist_for_each_entry(node, &table->head, table) {
+ if (node->ip_version != 4)
+ continue;
+ if (horrible_match_v4(node, ip)) {
+ ret = node->value;
+ break;
+ }
+ }
+ return ret;
+}
+
+static __init void *
+horrible_allowedips_lookup_v6(struct horrible_allowedips *table,
+ struct in6_addr *ip)
+{
+ struct horrible_allowedips_node *node;
+ void *ret = NULL;
+
+ hlist_for_each_entry(node, &table->head, table) {
+ if (node->ip_version != 6)
+ continue;
+ if (horrible_match_v6(node, ip)) {
+ ret = node->value;
+ break;
+ }
+ }
+ return ret;
+}
+
+static __init bool randomized_test(void)
+{
+ unsigned int i, j, k, mutate_amount, cidr;
+ u8 ip[16], mutate_mask[16], mutated[16];
+ struct wg_peer **peers, *peer;
+ struct horrible_allowedips h;
+ DEFINE_MUTEX(mutex);
+ struct allowedips t;
+ bool ret = false;
+
+ mutex_init(&mutex);
+
+ wg_allowedips_init(&t);
+ horrible_allowedips_init(&h);
+
+ peers = kcalloc(NUM_PEERS, sizeof(*peers), GFP_KERNEL);
+ if (unlikely(!peers)) {
+ pr_err("allowedips random self-test malloc: FAIL\n");
+ goto free;
+ }
+ for (i = 0; i < NUM_PEERS; ++i) {
+ peers[i] = kzalloc(sizeof(*peers[i]), GFP_KERNEL);
+ if (unlikely(!peers[i])) {
+ pr_err("allowedips random self-test malloc: FAIL\n");
+ goto free;
+ }
+ kref_init(&peers[i]->refcount);
+ }
+
+ mutex_lock(&mutex);
+
+ for (i = 0; i < NUM_RAND_ROUTES; ++i) {
+ prandom_bytes(ip, 4);
+ cidr = prandom_u32_max(32) + 1;
+ peer = peers[prandom_u32_max(NUM_PEERS)];
+ if (wg_allowedips_insert_v4(&t, (struct in_addr *)ip, cidr,
+ peer, &mutex) < 0) {
+ pr_err("allowedips random self-test malloc: FAIL\n");
+ goto free_locked;
+ }
+ if (horrible_allowedips_insert_v4(&h, (struct in_addr *)ip,
+ cidr, peer) < 0) {
+ pr_err("allowedips random self-test malloc: FAIL\n");
+ goto free_locked;
+ }
+ for (j = 0; j < NUM_MUTATED_ROUTES; ++j) {
+ memcpy(mutated, ip, 4);
+ prandom_bytes(mutate_mask, 4);
+ mutate_amount = prandom_u32_max(32);
+ for (k = 0; k < mutate_amount / 8; ++k)
+ mutate_mask[k] = 0xff;
+ mutate_mask[k] = 0xff
+ << ((8 - (mutate_amount % 8)) % 8);
+ for (; k < 4; ++k)
+ mutate_mask[k] = 0;
+ for (k = 0; k < 4; ++k)
+ mutated[k] = (mutated[k] & mutate_mask[k]) |
+ (~mutate_mask[k] &
+ prandom_u32_max(256));
+ cidr = prandom_u32_max(32) + 1;
+ peer = peers[prandom_u32_max(NUM_PEERS)];
+ if (wg_allowedips_insert_v4(&t,
+ (struct in_addr *)mutated,
+ cidr, peer, &mutex) < 0) {
+ pr_err("allowedips random malloc: FAIL\n");
+ goto free_locked;
+ }
+ if (horrible_allowedips_insert_v4(&h,
+ (struct in_addr *)mutated, cidr, peer)) {
+ pr_err("allowedips random self-test malloc: FAIL\n");
+ goto free_locked;
+ }
+ }
+ }
+
+ for (i = 0; i < NUM_RAND_ROUTES; ++i) {
+ prandom_bytes(ip, 16);
+ cidr = prandom_u32_max(128) + 1;
+ peer = peers[prandom_u32_max(NUM_PEERS)];
+ if (wg_allowedips_insert_v6(&t, (struct in6_addr *)ip, cidr,
+ peer, &mutex) < 0) {
+ pr_err("allowedips random self-test malloc: FAIL\n");
+ goto free_locked;
+ }
+ if (horrible_allowedips_insert_v6(&h, (struct in6_addr *)ip,
+ cidr, peer) < 0) {
+ pr_err("allowedips random self-test malloc: FAIL\n");
+ goto free_locked;
+ }
+ for (j = 0; j < NUM_MUTATED_ROUTES; ++j) {
+ memcpy(mutated, ip, 16);
+ prandom_bytes(mutate_mask, 16);
+ mutate_amount = prandom_u32_max(128);
+ for (k = 0; k < mutate_amount / 8; ++k)
+ mutate_mask[k] = 0xff;
+ mutate_mask[k] = 0xff
+ << ((8 - (mutate_amount % 8)) % 8);
+ for (; k < 4; ++k)
+ mutate_mask[k] = 0;
+ for (k = 0; k < 4; ++k)
+ mutated[k] = (mutated[k] & mutate_mask[k]) |
+ (~mutate_mask[k] &
+ prandom_u32_max(256));
+ cidr = prandom_u32_max(128) + 1;
+ peer = peers[prandom_u32_max(NUM_PEERS)];
+ if (wg_allowedips_insert_v6(&t,
+ (struct in6_addr *)mutated,
+ cidr, peer, &mutex) < 0) {
+ pr_err("allowedips random self-test malloc: FAIL\n");
+ goto free_locked;
+ }
+ if (horrible_allowedips_insert_v6(
+ &h, (struct in6_addr *)mutated, cidr,
+ peer)) {
+ pr_err("allowedips random self-test malloc: FAIL\n");
+ goto free_locked;
+ }
+ }
+ }
+
+ mutex_unlock(&mutex);
+
+ if (IS_ENABLED(DEBUG_PRINT_TRIE_GRAPHVIZ)) {
+ print_tree(t.root4, 32);
+ print_tree(t.root6, 128);
+ }
+
+ for (i = 0; i < NUM_QUERIES; ++i) {
+ prandom_bytes(ip, 4);
+ if (lookup(t.root4, 32, ip) !=
+ horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) {
+ pr_err("allowedips random self-test: FAIL\n");
+ goto free;
+ }
+ }
+
+ for (i = 0; i < NUM_QUERIES; ++i) {
+ prandom_bytes(ip, 16);
+ if (lookup(t.root6, 128, ip) !=
+ horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) {
+ pr_err("allowedips random self-test: FAIL\n");
+ goto free;
+ }
+ }
+ ret = true;
+
+free:
+ mutex_lock(&mutex);
+free_locked:
+ wg_allowedips_free(&t, &mutex);
+ mutex_unlock(&mutex);
+ horrible_allowedips_free(&h);
+ if (peers) {
+ for (i = 0; i < NUM_PEERS; ++i)
+ kfree(peers[i]);
+ }
+ kfree(peers);
+ return ret;
+}
+
+static __init inline struct in_addr *ip4(u8 a, u8 b, u8 c, u8 d)
+{
+ static struct in_addr ip;
+ u8 *split = (u8 *)&ip;
+
+ split[0] = a;
+ split[1] = b;
+ split[2] = c;
+ split[3] = d;
+ return &ip;
+}
+
+static __init inline struct in6_addr *ip6(u32 a, u32 b, u32 c, u32 d)
+{
+ static struct in6_addr ip;
+ __be32 *split = (__be32 *)&ip;
+
+ split[0] = cpu_to_be32(a);
+ split[1] = cpu_to_be32(b);
+ split[2] = cpu_to_be32(c);
+ split[3] = cpu_to_be32(d);
+ return &ip;
+}
+
+static __init struct wg_peer *init_peer(void)
+{
+ struct wg_peer *peer = kzalloc(sizeof(*peer), GFP_KERNEL);
+
+ if (!peer)
+ return NULL;
+ kref_init(&peer->refcount);
+ INIT_LIST_HEAD(&peer->allowedips_list);
+ return peer;
+}
+
+#define insert(version, mem, ipa, ipb, ipc, ipd, cidr) \
+ wg_allowedips_insert_v##version(&t, ip##version(ipa, ipb, ipc, ipd), \
+ cidr, mem, &mutex)
+
+#define maybe_fail() do { \
+ ++i; \
+ if (!_s) { \
+ pr_info("allowedips self-test %zu: FAIL\n", i); \
+ success = false; \
+ } \
+ } while (0)
+
+#define test(version, mem, ipa, ipb, ipc, ipd) do { \
+ bool _s = lookup(t.root##version, (version) == 4 ? 32 : 128, \
+ ip##version(ipa, ipb, ipc, ipd)) == (mem); \
+ maybe_fail(); \
+ } while (0)
+
+#define test_negative(version, mem, ipa, ipb, ipc, ipd) do { \
+ bool _s = lookup(t.root##version, (version) == 4 ? 32 : 128, \
+ ip##version(ipa, ipb, ipc, ipd)) != (mem); \
+ maybe_fail(); \
+ } while (0)
+
+#define test_boolean(cond) do { \
+ bool _s = (cond); \
+ maybe_fail(); \
+ } while (0)
+
+bool __init wg_allowedips_selftest(void)
+{
+ bool found_a = false, found_b = false, found_c = false, found_d = false,
+ found_e = false, found_other = false;
+ struct wg_peer *a = init_peer(), *b = init_peer(), *c = init_peer(),
+ *d = init_peer(), *e = init_peer(), *f = init_peer(),
+ *g = init_peer(), *h = init_peer();
+ struct allowedips_node *iter_node;
+ bool success = false;
+ struct allowedips t;
+ DEFINE_MUTEX(mutex);
+ struct in6_addr ip;
+ size_t i = 0, count = 0;
+ __be64 part;
+
+ mutex_init(&mutex);
+ mutex_lock(&mutex);
+ wg_allowedips_init(&t);
+
+ if (!a || !b || !c || !d || !e || !f || !g || !h) {
+ pr_err("allowedips self-test malloc: FAIL\n");
+ goto free;
+ }
+
+ insert(4, a, 192, 168, 4, 0, 24);
+ insert(4, b, 192, 168, 4, 4, 32);
+ insert(4, c, 192, 168, 0, 0, 16);
+ insert(4, d, 192, 95, 5, 64, 27);
+ /* replaces previous entry, and maskself is required */
+ insert(4, c, 192, 95, 5, 65, 27);
+ insert(6, d, 0x26075300, 0x60006b00, 0, 0xc05f0543, 128);
+ insert(6, c, 0x26075300, 0x60006b00, 0, 0, 64);
+ insert(4, e, 0, 0, 0, 0, 0);
+ insert(6, e, 0, 0, 0, 0, 0);
+ /* replaces previous entry */
+ insert(6, f, 0, 0, 0, 0, 0);
+ insert(6, g, 0x24046800, 0, 0, 0, 32);
+ /* maskself is required */
+ insert(6, h, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef, 64);
+ insert(6, a, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef, 128);
+ insert(6, c, 0x24446800, 0x40e40800, 0xdeaebeef, 0xdefbeef, 128);
+ insert(6, b, 0x24446800, 0xf0e40800, 0xeeaebeef, 0, 98);
+ insert(4, g, 64, 15, 112, 0, 20);
+ /* maskself is required */
+ insert(4, h, 64, 15, 123, 211, 25);
+ insert(4, a, 10, 0, 0, 0, 25);
+ insert(4, b, 10, 0, 0, 128, 25);
+ insert(4, a, 10, 1, 0, 0, 30);
+ insert(4, b, 10, 1, 0, 4, 30);
+ insert(4, c, 10, 1, 0, 8, 29);
+ insert(4, d, 10, 1, 0, 16, 29);
+
+ if (IS_ENABLED(DEBUG_PRINT_TRIE_GRAPHVIZ)) {
+ print_tree(t.root4, 32);
+ print_tree(t.root6, 128);
+ }
+
+ success = true;
+
+ test(4, a, 192, 168, 4, 20);
+ test(4, a, 192, 168, 4, 0);
+ test(4, b, 192, 168, 4, 4);
+ test(4, c, 192, 168, 200, 182);
+ test(4, c, 192, 95, 5, 68);
+ test(4, e, 192, 95, 5, 96);
+ test(6, d, 0x26075300, 0x60006b00, 0, 0xc05f0543);
+ test(6, c, 0x26075300, 0x60006b00, 0, 0xc02e01ee);
+ test(6, f, 0x26075300, 0x60006b01, 0, 0);
+ test(6, g, 0x24046800, 0x40040806, 0, 0x1006);
+ test(6, g, 0x24046800, 0x40040806, 0x1234, 0x5678);
+ test(6, f, 0x240467ff, 0x40040806, 0x1234, 0x5678);
+ test(6, f, 0x24046801, 0x40040806, 0x1234, 0x5678);
+ test(6, h, 0x24046800, 0x40040800, 0x1234, 0x5678);
+ test(6, h, 0x24046800, 0x40040800, 0, 0);
+ test(6, h, 0x24046800, 0x40040800, 0x10101010, 0x10101010);
+ test(6, a, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef);
+ test(4, g, 64, 15, 116, 26);
+ test(4, g, 64, 15, 127, 3);
+ test(4, g, 64, 15, 123, 1);
+ test(4, h, 64, 15, 123, 128);
+ test(4, h, 64, 15, 123, 129);
+ test(4, a, 10, 0, 0, 52);
+ test(4, b, 10, 0, 0, 220);
+ test(4, a, 10, 1, 0, 2);
+ test(4, b, 10, 1, 0, 6);
+ test(4, c, 10, 1, 0, 10);
+ test(4, d, 10, 1, 0, 20);
+
+ insert(4, a, 1, 0, 0, 0, 32);
+ insert(4, a, 64, 0, 0, 0, 32);
+ insert(4, a, 128, 0, 0, 0, 32);
+ insert(4, a, 192, 0, 0, 0, 32);
+ insert(4, a, 255, 0, 0, 0, 32);
+ wg_allowedips_remove_by_peer(&t, a, &mutex);
+ test_negative(4, a, 1, 0, 0, 0);
+ test_negative(4, a, 64, 0, 0, 0);
+ test_negative(4, a, 128, 0, 0, 0);
+ test_negative(4, a, 192, 0, 0, 0);
+ test_negative(4, a, 255, 0, 0, 0);
+
+ wg_allowedips_free(&t, &mutex);
+ wg_allowedips_init(&t);
+ insert(4, a, 192, 168, 0, 0, 16);
+ insert(4, a, 192, 168, 0, 0, 24);
+ wg_allowedips_remove_by_peer(&t, a, &mutex);
+ test_negative(4, a, 192, 168, 0, 1);
+
+ /* These will hit the WARN_ON(len >= 128) in free_node if something
+ * goes wrong.
+ */
+ for (i = 0; i < 128; ++i) {
+ part = cpu_to_be64(~(1LLU << (i % 64)));
+ memset(&ip, 0xff, 16);
+ memcpy((u8 *)&ip + (i < 64) * 8, &part, 8);
+ wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex);
+ }
+
+ wg_allowedips_free(&t, &mutex);
+
+ wg_allowedips_init(&t);
+ insert(4, a, 192, 95, 5, 93, 27);
+ insert(6, a, 0x26075300, 0x60006b00, 0, 0xc05f0543, 128);
+ insert(4, a, 10, 1, 0, 20, 29);
+ insert(6, a, 0x26075300, 0x6d8a6bf8, 0xdab1f1df, 0xc05f1523, 83);
+ insert(6, a, 0x26075300, 0x6d8a6bf8, 0xdab1f1df, 0xc05f1523, 21);
+ list_for_each_entry(iter_node, &a->allowedips_list, peer_list) {
+ u8 cidr, ip[16] __aligned(__alignof(u64));
+ int family = wg_allowedips_read_node(iter_node, ip, &cidr);
+
+ count++;
+
+ if (cidr == 27 && family == AF_INET &&
+ !memcmp(ip, ip4(192, 95, 5, 64), sizeof(struct in_addr)))
+ found_a = true;
+ else if (cidr == 128 && family == AF_INET6 &&
+ !memcmp(ip, ip6(0x26075300, 0x60006b00, 0, 0xc05f0543),
+ sizeof(struct in6_addr)))
+ found_b = true;
+ else if (cidr == 29 && family == AF_INET &&
+ !memcmp(ip, ip4(10, 1, 0, 16), sizeof(struct in_addr)))
+ found_c = true;
+ else if (cidr == 83 && family == AF_INET6 &&
+ !memcmp(ip, ip6(0x26075300, 0x6d8a6bf8, 0xdab1e000, 0),
+ sizeof(struct in6_addr)))
+ found_d = true;
+ else if (cidr == 21 && family == AF_INET6 &&
+ !memcmp(ip, ip6(0x26075000, 0, 0, 0),
+ sizeof(struct in6_addr)))
+ found_e = true;
+ else
+ found_other = true;
+ }
+ test_boolean(count == 5);
+ test_boolean(found_a);
+ test_boolean(found_b);
+ test_boolean(found_c);
+ test_boolean(found_d);
+ test_boolean(found_e);
+ test_boolean(!found_other);
+
+ if (IS_ENABLED(DEBUG_RANDOM_TRIE) && success)
+ success = randomized_test();
+
+ if (success)
+ pr_info("allowedips self-tests: pass\n");
+
+free:
+ wg_allowedips_free(&t, &mutex);
+ kfree(a);
+ kfree(b);
+ kfree(c);
+ kfree(d);
+ kfree(e);
+ kfree(f);
+ kfree(g);
+ kfree(h);
+ mutex_unlock(&mutex);
+
+ return success;
+}
+
+#undef test_negative
+#undef test
+#undef remove
+#undef insert
+#undef init_peer
+
+#endif
diff --git a/drivers/net/wireguard/selftest/counter.c b/drivers/net/wireguard/selftest/counter.c
new file mode 100644
index 000000000000..f4fbb9072ed7
--- /dev/null
+++ b/drivers/net/wireguard/selftest/counter.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifdef DEBUG
+bool __init wg_packet_counter_selftest(void)
+{
+ unsigned int test_num = 0, i;
+ union noise_counter counter;
+ bool success = true;
+
+#define T_INIT do { \
+ memset(&counter, 0, sizeof(union noise_counter)); \
+ spin_lock_init(&counter.receive.lock); \
+ } while (0)
+#define T_LIM (COUNTER_WINDOW_SIZE + 1)
+#define T(n, v) do { \
+ ++test_num; \
+ if (counter_validate(&counter, n) != (v)) { \
+ pr_err("nonce counter self-test %u: FAIL\n", \
+ test_num); \
+ success = false; \
+ } \
+ } while (0)
+
+ T_INIT;
+ /* 1 */ T(0, true);
+ /* 2 */ T(1, true);
+ /* 3 */ T(1, false);
+ /* 4 */ T(9, true);
+ /* 5 */ T(8, true);
+ /* 6 */ T(7, true);
+ /* 7 */ T(7, false);
+ /* 8 */ T(T_LIM, true);
+ /* 9 */ T(T_LIM - 1, true);
+ /* 10 */ T(T_LIM - 1, false);
+ /* 11 */ T(T_LIM - 2, true);
+ /* 12 */ T(2, true);
+ /* 13 */ T(2, false);
+ /* 14 */ T(T_LIM + 16, true);
+ /* 15 */ T(3, false);
+ /* 16 */ T(T_LIM + 16, false);
+ /* 17 */ T(T_LIM * 4, true);
+ /* 18 */ T(T_LIM * 4 - (T_LIM - 1), true);
+ /* 19 */ T(10, false);
+ /* 20 */ T(T_LIM * 4 - T_LIM, false);
+ /* 21 */ T(T_LIM * 4 - (T_LIM + 1), false);
+ /* 22 */ T(T_LIM * 4 - (T_LIM - 2), true);
+ /* 23 */ T(T_LIM * 4 + 1 - T_LIM, false);
+ /* 24 */ T(0, false);
+ /* 25 */ T(REJECT_AFTER_MESSAGES, false);
+ /* 26 */ T(REJECT_AFTER_MESSAGES - 1, true);
+ /* 27 */ T(REJECT_AFTER_MESSAGES, false);
+ /* 28 */ T(REJECT_AFTER_MESSAGES - 1, false);
+ /* 29 */ T(REJECT_AFTER_MESSAGES - 2, true);
+ /* 30 */ T(REJECT_AFTER_MESSAGES + 1, false);
+ /* 31 */ T(REJECT_AFTER_MESSAGES + 2, false);
+ /* 32 */ T(REJECT_AFTER_MESSAGES - 2, false);
+ /* 33 */ T(REJECT_AFTER_MESSAGES - 3, true);
+ /* 34 */ T(0, false);
+
+ T_INIT;
+ for (i = 1; i <= COUNTER_WINDOW_SIZE; ++i)
+ T(i, true);
+ T(0, true);
+ T(0, false);
+
+ T_INIT;
+ for (i = 2; i <= COUNTER_WINDOW_SIZE + 1; ++i)
+ T(i, true);
+ T(1, true);
+ T(0, false);
+
+ T_INIT;
+ for (i = COUNTER_WINDOW_SIZE + 1; i-- > 0;)
+ T(i, true);
+
+ T_INIT;
+ for (i = COUNTER_WINDOW_SIZE + 2; i-- > 1;)
+ T(i, true);
+ T(0, false);
+
+ T_INIT;
+ for (i = COUNTER_WINDOW_SIZE + 1; i-- > 1;)
+ T(i, true);
+ T(COUNTER_WINDOW_SIZE + 1, true);
+ T(0, false);
+
+ T_INIT;
+ for (i = COUNTER_WINDOW_SIZE + 1; i-- > 1;)
+ T(i, true);
+ T(0, true);
+ T(COUNTER_WINDOW_SIZE + 1, true);
+
+#undef T
+#undef T_LIM
+#undef T_INIT
+
+ if (success)
+ pr_info("nonce counter self-tests: pass\n");
+ return success;
+}
+#endif
diff --git a/drivers/net/wireguard/selftest/ratelimiter.c b/drivers/net/wireguard/selftest/ratelimiter.c
new file mode 100644
index 000000000000..bcd6462e4540
--- /dev/null
+++ b/drivers/net/wireguard/selftest/ratelimiter.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifdef DEBUG
+
+#include <linux/jiffies.h>
+
+static const struct {
+ bool result;
+ unsigned int msec_to_sleep_before;
+} expected_results[] __initconst = {
+ [0 ... PACKETS_BURSTABLE - 1] = { true, 0 },
+ [PACKETS_BURSTABLE] = { false, 0 },
+ [PACKETS_BURSTABLE + 1] = { true, MSEC_PER_SEC / PACKETS_PER_SECOND },
+ [PACKETS_BURSTABLE + 2] = { false, 0 },
+ [PACKETS_BURSTABLE + 3] = { true, (MSEC_PER_SEC / PACKETS_PER_SECOND) * 2 },
+ [PACKETS_BURSTABLE + 4] = { true, 0 },
+ [PACKETS_BURSTABLE + 5] = { false, 0 }
+};
+
+static __init unsigned int maximum_jiffies_at_index(int index)
+{
+ unsigned int total_msecs = 2 * MSEC_PER_SEC / PACKETS_PER_SECOND / 3;
+ int i;
+
+ for (i = 0; i <= index; ++i)
+ total_msecs += expected_results[i].msec_to_sleep_before;
+ return msecs_to_jiffies(total_msecs);
+}
+
+static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4,
+ struct sk_buff *skb6, struct ipv6hdr *hdr6,
+ int *test)
+{
+ unsigned long loop_start_time;
+ int i;
+
+ wg_ratelimiter_gc_entries(NULL);
+ rcu_barrier();
+ loop_start_time = jiffies;
+
+ for (i = 0; i < ARRAY_SIZE(expected_results); ++i) {
+ if (expected_results[i].msec_to_sleep_before)
+ msleep(expected_results[i].msec_to_sleep_before);
+
+ if (time_is_before_jiffies(loop_start_time +
+ maximum_jiffies_at_index(i)))
+ return -ETIMEDOUT;
+ if (wg_ratelimiter_allow(skb4, &init_net) !=
+ expected_results[i].result)
+ return -EXFULL;
+ ++(*test);
+
+ hdr4->saddr = htonl(ntohl(hdr4->saddr) + i + 1);
+ if (time_is_before_jiffies(loop_start_time +
+ maximum_jiffies_at_index(i)))
+ return -ETIMEDOUT;
+ if (!wg_ratelimiter_allow(skb4, &init_net))
+ return -EXFULL;
+ ++(*test);
+
+ hdr4->saddr = htonl(ntohl(hdr4->saddr) - i - 1);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ hdr6->saddr.in6_u.u6_addr32[2] = htonl(i);
+ hdr6->saddr.in6_u.u6_addr32[3] = htonl(i);
+ if (time_is_before_jiffies(loop_start_time +
+ maximum_jiffies_at_index(i)))
+ return -ETIMEDOUT;
+ if (wg_ratelimiter_allow(skb6, &init_net) !=
+ expected_results[i].result)
+ return -EXFULL;
+ ++(*test);
+
+ hdr6->saddr.in6_u.u6_addr32[0] =
+ htonl(ntohl(hdr6->saddr.in6_u.u6_addr32[0]) + i + 1);
+ if (time_is_before_jiffies(loop_start_time +
+ maximum_jiffies_at_index(i)))
+ return -ETIMEDOUT;
+ if (!wg_ratelimiter_allow(skb6, &init_net))
+ return -EXFULL;
+ ++(*test);
+
+ hdr6->saddr.in6_u.u6_addr32[0] =
+ htonl(ntohl(hdr6->saddr.in6_u.u6_addr32[0]) - i - 1);
+
+ if (time_is_before_jiffies(loop_start_time +
+ maximum_jiffies_at_index(i)))
+ return -ETIMEDOUT;
+#endif
+ }
+ return 0;
+}
+
+static __init int capacity_test(struct sk_buff *skb4, struct iphdr *hdr4,
+ int *test)
+{
+ int i;
+
+ wg_ratelimiter_gc_entries(NULL);
+ rcu_barrier();
+
+ if (atomic_read(&total_entries))
+ return -EXFULL;
+ ++(*test);
+
+ for (i = 0; i <= max_entries; ++i) {
+ hdr4->saddr = htonl(i);
+ if (wg_ratelimiter_allow(skb4, &init_net) != (i != max_entries))
+ return -EXFULL;
+ ++(*test);
+ }
+ return 0;
+}
+
+bool __init wg_ratelimiter_selftest(void)
+{
+ enum { TRIALS_BEFORE_GIVING_UP = 5000 };
+ bool success = false;
+ int test = 0, trials;
+ struct sk_buff *skb4, *skb6;
+ struct iphdr *hdr4;
+ struct ipv6hdr *hdr6;
+
+ if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_UBSAN))
+ return true;
+
+ BUILD_BUG_ON(MSEC_PER_SEC % PACKETS_PER_SECOND != 0);
+
+ if (wg_ratelimiter_init())
+ goto out;
+ ++test;
+ if (wg_ratelimiter_init()) {
+ wg_ratelimiter_uninit();
+ goto out;
+ }
+ ++test;
+ if (wg_ratelimiter_init()) {
+ wg_ratelimiter_uninit();
+ wg_ratelimiter_uninit();
+ goto out;
+ }
+ ++test;
+
+ skb4 = alloc_skb(sizeof(struct iphdr), GFP_KERNEL);
+ if (unlikely(!skb4))
+ goto err_nofree;
+ skb4->protocol = htons(ETH_P_IP);
+ hdr4 = (struct iphdr *)skb_put(skb4, sizeof(*hdr4));
+ hdr4->saddr = htonl(8182);
+ skb_reset_network_header(skb4);
+ ++test;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ skb6 = alloc_skb(sizeof(struct ipv6hdr), GFP_KERNEL);
+ if (unlikely(!skb6)) {
+ kfree_skb(skb4);
+ goto err_nofree;
+ }
+ skb6->protocol = htons(ETH_P_IPV6);
+ hdr6 = (struct ipv6hdr *)skb_put(skb6, sizeof(*hdr6));
+ hdr6->saddr.in6_u.u6_addr32[0] = htonl(1212);
+ hdr6->saddr.in6_u.u6_addr32[1] = htonl(289188);
+ skb_reset_network_header(skb6);
+ ++test;
+#endif
+
+ for (trials = TRIALS_BEFORE_GIVING_UP;;) {
+ int test_count = 0, ret;
+
+ ret = timings_test(skb4, hdr4, skb6, hdr6, &test_count);
+ if (ret == -ETIMEDOUT) {
+ if (!trials--) {
+ test += test_count;
+ goto err;
+ }
+ msleep(500);
+ continue;
+ } else if (ret < 0) {
+ test += test_count;
+ goto err;
+ } else {
+ test += test_count;
+ break;
+ }
+ }
+
+ for (trials = TRIALS_BEFORE_GIVING_UP;;) {
+ int test_count = 0;
+
+ if (capacity_test(skb4, hdr4, &test_count) < 0) {
+ if (!trials--) {
+ test += test_count;
+ goto err;
+ }
+ msleep(50);
+ continue;
+ }
+ test += test_count;
+ break;
+ }
+
+ success = true;
+
+err:
+ kfree_skb(skb4);
+#if IS_ENABLED(CONFIG_IPV6)
+ kfree_skb(skb6);
+#endif
+err_nofree:
+ wg_ratelimiter_uninit();
+ wg_ratelimiter_uninit();
+ wg_ratelimiter_uninit();
+ /* Uninit one extra time to check underflow detection. */
+ wg_ratelimiter_uninit();
+out:
+ if (success)
+ pr_info("ratelimiter self-tests: pass\n");
+ else
+ pr_err("ratelimiter self-test %d: FAIL\n", test);
+
+ return success;
+}
+#endif
diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c
new file mode 100644
index 000000000000..c13260563446
--- /dev/null
+++ b/drivers/net/wireguard/send.c
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "queueing.h"
+#include "timers.h"
+#include "device.h"
+#include "peer.h"
+#include "socket.h"
+#include "messages.h"
+#include "cookie.h"
+
+#include <linux/uio.h>
+#include <linux/inetdevice.h>
+#include <linux/socket.h>
+#include <net/ip_tunnels.h>
+#include <net/udp.h>
+#include <net/sock.h>
+
+static void wg_packet_send_handshake_initiation(struct wg_peer *peer)
+{
+ struct message_handshake_initiation packet;
+
+ if (!wg_birthdate_has_expired(atomic64_read(&peer->last_sent_handshake),
+ REKEY_TIMEOUT))
+ return; /* This function is rate limited. */
+
+ atomic64_set(&peer->last_sent_handshake, ktime_get_coarse_boottime_ns());
+ net_dbg_ratelimited("%s: Sending handshake initiation to peer %llu (%pISpfsc)\n",
+ peer->device->dev->name, peer->internal_id,
+ &peer->endpoint.addr);
+
+ if (wg_noise_handshake_create_initiation(&packet, &peer->handshake)) {
+ wg_cookie_add_mac_to_packet(&packet, sizeof(packet), peer);
+ wg_timers_any_authenticated_packet_traversal(peer);
+ wg_timers_any_authenticated_packet_sent(peer);
+ atomic64_set(&peer->last_sent_handshake,
+ ktime_get_coarse_boottime_ns());
+ wg_socket_send_buffer_to_peer(peer, &packet, sizeof(packet),
+ HANDSHAKE_DSCP);
+ wg_timers_handshake_initiated(peer);
+ }
+}
+
+void wg_packet_handshake_send_worker(struct work_struct *work)
+{
+ struct wg_peer *peer = container_of(work, struct wg_peer,
+ transmit_handshake_work);
+
+ wg_packet_send_handshake_initiation(peer);
+ wg_peer_put(peer);
+}
+
+void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer,
+ bool is_retry)
+{
+ if (!is_retry)
+ peer->timer_handshake_attempts = 0;
+
+ rcu_read_lock_bh();
+ /* We check last_sent_handshake here in addition to the actual function
+ * we're queueing up, so that we don't queue things if not strictly
+ * necessary:
+ */
+ if (!wg_birthdate_has_expired(atomic64_read(&peer->last_sent_handshake),
+ REKEY_TIMEOUT) ||
+ unlikely(READ_ONCE(peer->is_dead)))
+ goto out;
+
+ wg_peer_get(peer);
+ /* Queues up calling packet_send_queued_handshakes(peer), where we do a
+ * peer_put(peer) after:
+ */
+ if (!queue_work(peer->device->handshake_send_wq,
+ &peer->transmit_handshake_work))
+ /* If the work was already queued, we want to drop the
+ * extra reference:
+ */
+ wg_peer_put(peer);
+out:
+ rcu_read_unlock_bh();
+}
+
+void wg_packet_send_handshake_response(struct wg_peer *peer)
+{
+ struct message_handshake_response packet;
+
+ atomic64_set(&peer->last_sent_handshake, ktime_get_coarse_boottime_ns());
+ net_dbg_ratelimited("%s: Sending handshake response to peer %llu (%pISpfsc)\n",
+ peer->device->dev->name, peer->internal_id,
+ &peer->endpoint.addr);
+
+ if (wg_noise_handshake_create_response(&packet, &peer->handshake)) {
+ wg_cookie_add_mac_to_packet(&packet, sizeof(packet), peer);
+ if (wg_noise_handshake_begin_session(&peer->handshake,
+ &peer->keypairs)) {
+ wg_timers_session_derived(peer);
+ wg_timers_any_authenticated_packet_traversal(peer);
+ wg_timers_any_authenticated_packet_sent(peer);
+ atomic64_set(&peer->last_sent_handshake,
+ ktime_get_coarse_boottime_ns());
+ wg_socket_send_buffer_to_peer(peer, &packet,
+ sizeof(packet),
+ HANDSHAKE_DSCP);
+ }
+ }
+}
+
+void wg_packet_send_handshake_cookie(struct wg_device *wg,
+ struct sk_buff *initiating_skb,
+ __le32 sender_index)
+{
+ struct message_handshake_cookie packet;
+
+ net_dbg_skb_ratelimited("%s: Sending cookie response for denied handshake message for %pISpfsc\n",
+ wg->dev->name, initiating_skb);
+ wg_cookie_message_create(&packet, initiating_skb, sender_index,
+ &wg->cookie_checker);
+ wg_socket_send_buffer_as_reply_to_skb(wg, initiating_skb, &packet,
+ sizeof(packet));
+}
+
+static void keep_key_fresh(struct wg_peer *peer)
+{
+ struct noise_keypair *keypair;
+ bool send = false;
+
+ rcu_read_lock_bh();
+ keypair = rcu_dereference_bh(peer->keypairs.current_keypair);
+ if (likely(keypair && READ_ONCE(keypair->sending.is_valid)) &&
+ (unlikely(atomic64_read(&keypair->sending.counter.counter) >
+ REKEY_AFTER_MESSAGES) ||
+ (keypair->i_am_the_initiator &&
+ unlikely(wg_birthdate_has_expired(keypair->sending.birthdate,
+ REKEY_AFTER_TIME)))))
+ send = true;
+ rcu_read_unlock_bh();
+
+ if (send)
+ wg_packet_send_queued_handshake_initiation(peer, false);
+}
+
+static unsigned int calculate_skb_padding(struct sk_buff *skb)
+{
+ /* We do this modulo business with the MTU, just in case the networking
+ * layer gives us a packet that's bigger than the MTU. In that case, we
+ * wouldn't want the final subtraction to overflow in the case of the
+ * padded_size being clamped.
+ */
+ unsigned int last_unit = skb->len % PACKET_CB(skb)->mtu;
+ unsigned int padded_size = ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE);
+
+ if (padded_size > PACKET_CB(skb)->mtu)
+ padded_size = PACKET_CB(skb)->mtu;
+ return padded_size - last_unit;
+}
+
+static bool encrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair)
+{
+ unsigned int padding_len, plaintext_len, trailer_len;
+ struct scatterlist sg[MAX_SKB_FRAGS + 8];
+ struct message_data *header;
+ struct sk_buff *trailer;
+ int num_frags;
+
+ /* Calculate lengths. */
+ padding_len = calculate_skb_padding(skb);
+ trailer_len = padding_len + noise_encrypted_len(0);
+ plaintext_len = skb->len + padding_len;
+
+ /* Expand data section to have room for padding and auth tag. */
+ num_frags = skb_cow_data(skb, trailer_len, &trailer);
+ if (unlikely(num_frags < 0 || num_frags > ARRAY_SIZE(sg)))
+ return false;
+
+ /* Set the padding to zeros, and make sure it and the auth tag are part
+ * of the skb.
+ */
+ memset(skb_tail_pointer(trailer), 0, padding_len);
+
+ /* Expand head section to have room for our header and the network
+ * stack's headers.
+ */
+ if (unlikely(skb_cow_head(skb, DATA_PACKET_HEAD_ROOM) < 0))
+ return false;
+
+ /* Finalize checksum calculation for the inner packet, if required. */
+ if (unlikely(skb->ip_summed == CHECKSUM_PARTIAL &&
+ skb_checksum_help(skb)))
+ return false;
+
+ /* Only after checksumming can we safely add on the padding at the end
+ * and the header.
+ */
+ skb_set_inner_network_header(skb, 0);
+ header = (struct message_data *)skb_push(skb, sizeof(*header));
+ header->header.type = cpu_to_le32(MESSAGE_DATA);
+ header->key_idx = keypair->remote_index;
+ header->counter = cpu_to_le64(PACKET_CB(skb)->nonce);
+ pskb_put(skb, trailer, trailer_len);
+
+ /* Now we can encrypt the scattergather segments */
+ sg_init_table(sg, num_frags);
+ if (skb_to_sgvec(skb, sg, sizeof(struct message_data),
+ noise_encrypted_len(plaintext_len)) <= 0)
+ return false;
+ return chacha20poly1305_encrypt_sg_inplace(sg, plaintext_len, NULL, 0,
+ PACKET_CB(skb)->nonce,
+ keypair->sending.key);
+}
+
+void wg_packet_send_keepalive(struct wg_peer *peer)
+{
+ struct sk_buff *skb;
+
+ if (skb_queue_empty(&peer->staged_packet_queue)) {
+ skb = alloc_skb(DATA_PACKET_HEAD_ROOM + MESSAGE_MINIMUM_LENGTH,
+ GFP_ATOMIC);
+ if (unlikely(!skb))
+ return;
+ skb_reserve(skb, DATA_PACKET_HEAD_ROOM);
+ skb->dev = peer->device->dev;
+ PACKET_CB(skb)->mtu = skb->dev->mtu;
+ skb_queue_tail(&peer->staged_packet_queue, skb);
+ net_dbg_ratelimited("%s: Sending keepalive packet to peer %llu (%pISpfsc)\n",
+ peer->device->dev->name, peer->internal_id,
+ &peer->endpoint.addr);
+ }
+
+ wg_packet_send_staged_packets(peer);
+}
+
+static void wg_packet_create_data_done(struct sk_buff *first,
+ struct wg_peer *peer)
+{
+ struct sk_buff *skb, *next;
+ bool is_keepalive, data_sent = false;
+
+ wg_timers_any_authenticated_packet_traversal(peer);
+ wg_timers_any_authenticated_packet_sent(peer);
+ skb_list_walk_safe(first, skb, next) {
+ is_keepalive = skb->len == message_data_len(0);
+ if (likely(!wg_socket_send_skb_to_peer(peer, skb,
+ PACKET_CB(skb)->ds) && !is_keepalive))
+ data_sent = true;
+ }
+
+ if (likely(data_sent))
+ wg_timers_data_sent(peer);
+
+ keep_key_fresh(peer);
+}
+
+void wg_packet_tx_worker(struct work_struct *work)
+{
+ struct crypt_queue *queue = container_of(work, struct crypt_queue,
+ work);
+ struct noise_keypair *keypair;
+ enum packet_state state;
+ struct sk_buff *first;
+ struct wg_peer *peer;
+
+ while ((first = __ptr_ring_peek(&queue->ring)) != NULL &&
+ (state = atomic_read_acquire(&PACKET_CB(first)->state)) !=
+ PACKET_STATE_UNCRYPTED) {
+ __ptr_ring_discard_one(&queue->ring);
+ peer = PACKET_PEER(first);
+ keypair = PACKET_CB(first)->keypair;
+
+ if (likely(state == PACKET_STATE_CRYPTED))
+ wg_packet_create_data_done(first, peer);
+ else
+ kfree_skb_list(first);
+
+ wg_noise_keypair_put(keypair, false);
+ wg_peer_put(peer);
+ }
+}
+
+void wg_packet_encrypt_worker(struct work_struct *work)
+{
+ struct crypt_queue *queue = container_of(work, struct multicore_worker,
+ work)->ptr;
+ struct sk_buff *first, *skb, *next;
+
+ while ((first = ptr_ring_consume_bh(&queue->ring)) != NULL) {
+ enum packet_state state = PACKET_STATE_CRYPTED;
+
+ skb_list_walk_safe(first, skb, next) {
+ if (likely(encrypt_packet(skb,
+ PACKET_CB(first)->keypair))) {
+ wg_reset_packet(skb);
+ } else {
+ state = PACKET_STATE_DEAD;
+ break;
+ }
+ }
+ wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first,
+ state);
+
+ }
+}
+
+static void wg_packet_create_data(struct sk_buff *first)
+{
+ struct wg_peer *peer = PACKET_PEER(first);
+ struct wg_device *wg = peer->device;
+ int ret = -EINVAL;
+
+ rcu_read_lock_bh();
+ if (unlikely(READ_ONCE(peer->is_dead)))
+ goto err;
+
+ ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue,
+ &peer->tx_queue, first,
+ wg->packet_crypt_wq,
+ &wg->encrypt_queue.last_cpu);
+ if (unlikely(ret == -EPIPE))
+ wg_queue_enqueue_per_peer(&peer->tx_queue, first,
+ PACKET_STATE_DEAD);
+err:
+ rcu_read_unlock_bh();
+ if (likely(!ret || ret == -EPIPE))
+ return;
+ wg_noise_keypair_put(PACKET_CB(first)->keypair, false);
+ wg_peer_put(peer);
+ kfree_skb_list(first);
+}
+
+void wg_packet_purge_staged_packets(struct wg_peer *peer)
+{
+ spin_lock_bh(&peer->staged_packet_queue.lock);
+ peer->device->dev->stats.tx_dropped += peer->staged_packet_queue.qlen;
+ __skb_queue_purge(&peer->staged_packet_queue);
+ spin_unlock_bh(&peer->staged_packet_queue.lock);
+}
+
+void wg_packet_send_staged_packets(struct wg_peer *peer)
+{
+ struct noise_symmetric_key *key;
+ struct noise_keypair *keypair;
+ struct sk_buff_head packets;
+ struct sk_buff *skb;
+
+ /* Steal the current queue into our local one. */
+ __skb_queue_head_init(&packets);
+ spin_lock_bh(&peer->staged_packet_queue.lock);
+ skb_queue_splice_init(&peer->staged_packet_queue, &packets);
+ spin_unlock_bh(&peer->staged_packet_queue.lock);
+ if (unlikely(skb_queue_empty(&packets)))
+ return;
+
+ /* First we make sure we have a valid reference to a valid key. */
+ rcu_read_lock_bh();
+ keypair = wg_noise_keypair_get(
+ rcu_dereference_bh(peer->keypairs.current_keypair));
+ rcu_read_unlock_bh();
+ if (unlikely(!keypair))
+ goto out_nokey;
+ key = &keypair->sending;
+ if (unlikely(!READ_ONCE(key->is_valid)))
+ goto out_nokey;
+ if (unlikely(wg_birthdate_has_expired(key->birthdate,
+ REJECT_AFTER_TIME)))
+ goto out_invalid;
+
+ /* After we know we have a somewhat valid key, we now try to assign
+ * nonces to all of the packets in the queue. If we can't assign nonces
+ * for all of them, we just consider it a failure and wait for the next
+ * handshake.
+ */
+ skb_queue_walk(&packets, skb) {
+ /* 0 for no outer TOS: no leak. TODO: at some later point, we
+ * might consider using flowi->tos as outer instead.
+ */
+ PACKET_CB(skb)->ds = ip_tunnel_ecn_encap(0, ip_hdr(skb), skb);
+ PACKET_CB(skb)->nonce =
+ atomic64_inc_return(&key->counter.counter) - 1;
+ if (unlikely(PACKET_CB(skb)->nonce >= REJECT_AFTER_MESSAGES))
+ goto out_invalid;
+ }
+
+ packets.prev->next = NULL;
+ wg_peer_get(keypair->entry.peer);
+ PACKET_CB(packets.next)->keypair = keypair;
+ wg_packet_create_data(packets.next);
+ return;
+
+out_invalid:
+ WRITE_ONCE(key->is_valid, false);
+out_nokey:
+ wg_noise_keypair_put(keypair, false);
+
+ /* We orphan the packets if we're waiting on a handshake, so that they
+ * don't block a socket's pool.
+ */
+ skb_queue_walk(&packets, skb)
+ skb_orphan(skb);
+ /* Then we put them back on the top of the queue. We're not too
+ * concerned about accidentally getting things a little out of order if
+ * packets are being added really fast, because this queue is for before
+ * packets can even be sent and it's small anyway.
+ */
+ spin_lock_bh(&peer->staged_packet_queue.lock);
+ skb_queue_splice(&packets, &peer->staged_packet_queue);
+ spin_unlock_bh(&peer->staged_packet_queue.lock);
+
+ /* If we're exiting because there's something wrong with the key, it
+ * means we should initiate a new handshake.
+ */
+ wg_packet_send_queued_handshake_initiation(peer, false);
+}
diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c
new file mode 100644
index 000000000000..262f3b5c819d
--- /dev/null
+++ b/drivers/net/wireguard/socket.c
@@ -0,0 +1,438 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "device.h"
+#include "peer.h"
+#include "socket.h"
+#include "queueing.h"
+#include "messages.h"
+
+#include <linux/ctype.h>
+#include <linux/net.h>
+#include <linux/if_vlan.h>
+#include <linux/if_ether.h>
+#include <linux/inetdevice.h>
+#include <net/udp_tunnel.h>
+#include <net/ipv6.h>
+
+static int send4(struct wg_device *wg, struct sk_buff *skb,
+ struct endpoint *endpoint, u8 ds, struct dst_cache *cache)
+{
+ struct flowi4 fl = {
+ .saddr = endpoint->src4.s_addr,
+ .daddr = endpoint->addr4.sin_addr.s_addr,
+ .fl4_dport = endpoint->addr4.sin_port,
+ .flowi4_mark = wg->fwmark,
+ .flowi4_proto = IPPROTO_UDP
+ };
+ struct rtable *rt = NULL;
+ struct sock *sock;
+ int ret = 0;
+
+ skb_mark_not_on_list(skb);
+ skb->dev = wg->dev;
+ skb->mark = wg->fwmark;
+
+ rcu_read_lock_bh();
+ sock = rcu_dereference_bh(wg->sock4);
+
+ if (unlikely(!sock)) {
+ ret = -ENONET;
+ goto err;
+ }
+
+ fl.fl4_sport = inet_sk(sock)->inet_sport;
+
+ if (cache)
+ rt = dst_cache_get_ip4(cache, &fl.saddr);
+
+ if (!rt) {
+ security_sk_classify_flow(sock, flowi4_to_flowi(&fl));
+ if (unlikely(!inet_confirm_addr(sock_net(sock), NULL, 0,
+ fl.saddr, RT_SCOPE_HOST))) {
+ endpoint->src4.s_addr = 0;
+ *(__force __be32 *)&endpoint->src_if4 = 0;
+ fl.saddr = 0;
+ if (cache)
+ dst_cache_reset(cache);
+ }
+ rt = ip_route_output_flow(sock_net(sock), &fl, sock);
+ if (unlikely(endpoint->src_if4 && ((IS_ERR(rt) &&
+ PTR_ERR(rt) == -EINVAL) || (!IS_ERR(rt) &&
+ rt->dst.dev->ifindex != endpoint->src_if4)))) {
+ endpoint->src4.s_addr = 0;
+ *(__force __be32 *)&endpoint->src_if4 = 0;
+ fl.saddr = 0;
+ if (cache)
+ dst_cache_reset(cache);
+ if (!IS_ERR(rt))
+ ip_rt_put(rt);
+ rt = ip_route_output_flow(sock_net(sock), &fl, sock);
+ }
+ if (unlikely(IS_ERR(rt))) {
+ ret = PTR_ERR(rt);
+ net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n",
+ wg->dev->name, &endpoint->addr, ret);
+ goto err;
+ } else if (unlikely(rt->dst.dev == skb->dev)) {
+ ip_rt_put(rt);
+ ret = -ELOOP;
+ net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n",
+ wg->dev->name, &endpoint->addr);
+ goto err;
+ }
+ if (cache)
+ dst_cache_set_ip4(cache, &rt->dst, fl.saddr);
+ }
+
+ skb->ignore_df = 1;
+ udp_tunnel_xmit_skb(rt, sock, skb, fl.saddr, fl.daddr, ds,
+ ip4_dst_hoplimit(&rt->dst), 0, fl.fl4_sport,
+ fl.fl4_dport, false, false);
+ goto out;
+
+err:
+ kfree_skb(skb);
+out:
+ rcu_read_unlock_bh();
+ return ret;
+}
+
+static int send6(struct wg_device *wg, struct sk_buff *skb,
+ struct endpoint *endpoint, u8 ds, struct dst_cache *cache)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ struct flowi6 fl = {
+ .saddr = endpoint->src6,
+ .daddr = endpoint->addr6.sin6_addr,
+ .fl6_dport = endpoint->addr6.sin6_port,
+ .flowi6_mark = wg->fwmark,
+ .flowi6_oif = endpoint->addr6.sin6_scope_id,
+ .flowi6_proto = IPPROTO_UDP
+ /* TODO: addr->sin6_flowinfo */
+ };
+ struct dst_entry *dst = NULL;
+ struct sock *sock;
+ int ret = 0;
+
+ skb_mark_not_on_list(skb);
+ skb->dev = wg->dev;
+ skb->mark = wg->fwmark;
+
+ rcu_read_lock_bh();
+ sock = rcu_dereference_bh(wg->sock6);
+
+ if (unlikely(!sock)) {
+ ret = -ENONET;
+ goto err;
+ }
+
+ fl.fl6_sport = inet_sk(sock)->inet_sport;
+
+ if (cache)
+ dst = dst_cache_get_ip6(cache, &fl.saddr);
+
+ if (!dst) {
+ security_sk_classify_flow(sock, flowi6_to_flowi(&fl));
+ if (unlikely(!ipv6_addr_any(&fl.saddr) &&
+ !ipv6_chk_addr(sock_net(sock), &fl.saddr, NULL, 0))) {
+ endpoint->src6 = fl.saddr = in6addr_any;
+ if (cache)
+ dst_cache_reset(cache);
+ }
+ dst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(sock), sock, &fl,
+ NULL);
+ if (unlikely(IS_ERR(dst))) {
+ ret = PTR_ERR(dst);
+ net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n",
+ wg->dev->name, &endpoint->addr, ret);
+ goto err;
+ } else if (unlikely(dst->dev == skb->dev)) {
+ dst_release(dst);
+ ret = -ELOOP;
+ net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n",
+ wg->dev->name, &endpoint->addr);
+ goto err;
+ }
+ if (cache)
+ dst_cache_set_ip6(cache, dst, &fl.saddr);
+ }
+
+ skb->ignore_df = 1;
+ udp_tunnel6_xmit_skb(dst, sock, skb, skb->dev, &fl.saddr, &fl.daddr, ds,
+ ip6_dst_hoplimit(dst), 0, fl.fl6_sport,
+ fl.fl6_dport, false);
+ goto out;
+
+err:
+ kfree_skb(skb);
+out:
+ rcu_read_unlock_bh();
+ return ret;
+#else
+ return -EAFNOSUPPORT;
+#endif
+}
+
+int wg_socket_send_skb_to_peer(struct wg_peer *peer, struct sk_buff *skb, u8 ds)
+{
+ size_t skb_len = skb->len;
+ int ret = -EAFNOSUPPORT;
+
+ read_lock_bh(&peer->endpoint_lock);
+ if (peer->endpoint.addr.sa_family == AF_INET)
+ ret = send4(peer->device, skb, &peer->endpoint, ds,
+ &peer->endpoint_cache);
+ else if (peer->endpoint.addr.sa_family == AF_INET6)
+ ret = send6(peer->device, skb, &peer->endpoint, ds,
+ &peer->endpoint_cache);
+ else
+ dev_kfree_skb(skb);
+ if (likely(!ret))
+ peer->tx_bytes += skb_len;
+ read_unlock_bh(&peer->endpoint_lock);
+
+ return ret;
+}
+
+int wg_socket_send_buffer_to_peer(struct wg_peer *peer, void *buffer,
+ size_t len, u8 ds)
+{
+ struct sk_buff *skb = alloc_skb(len + SKB_HEADER_LEN, GFP_ATOMIC);
+
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ skb_reserve(skb, SKB_HEADER_LEN);
+ skb_set_inner_network_header(skb, 0);
+ skb_put_data(skb, buffer, len);
+ return wg_socket_send_skb_to_peer(peer, skb, ds);
+}
+
+int wg_socket_send_buffer_as_reply_to_skb(struct wg_device *wg,
+ struct sk_buff *in_skb, void *buffer,
+ size_t len)
+{
+ int ret = 0;
+ struct sk_buff *skb;
+ struct endpoint endpoint;
+
+ if (unlikely(!in_skb))
+ return -EINVAL;
+ ret = wg_socket_endpoint_from_skb(&endpoint, in_skb);
+ if (unlikely(ret < 0))
+ return ret;
+
+ skb = alloc_skb(len + SKB_HEADER_LEN, GFP_ATOMIC);
+ if (unlikely(!skb))
+ return -ENOMEM;
+ skb_reserve(skb, SKB_HEADER_LEN);
+ skb_set_inner_network_header(skb, 0);
+ skb_put_data(skb, buffer, len);
+
+ if (endpoint.addr.sa_family == AF_INET)
+ ret = send4(wg, skb, &endpoint, 0, NULL);
+ else if (endpoint.addr.sa_family == AF_INET6)
+ ret = send6(wg, skb, &endpoint, 0, NULL);
+ /* No other possibilities if the endpoint is valid, which it is,
+ * as we checked above.
+ */
+
+ return ret;
+}
+
+int wg_socket_endpoint_from_skb(struct endpoint *endpoint,
+ const struct sk_buff *skb)
+{
+ memset(endpoint, 0, sizeof(*endpoint));
+ if (skb->protocol == htons(ETH_P_IP)) {
+ endpoint->addr4.sin_family = AF_INET;
+ endpoint->addr4.sin_port = udp_hdr(skb)->source;
+ endpoint->addr4.sin_addr.s_addr = ip_hdr(skb)->saddr;
+ endpoint->src4.s_addr = ip_hdr(skb)->daddr;
+ endpoint->src_if4 = skb->skb_iif;
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ endpoint->addr6.sin6_family = AF_INET6;
+ endpoint->addr6.sin6_port = udp_hdr(skb)->source;
+ endpoint->addr6.sin6_addr = ipv6_hdr(skb)->saddr;
+ endpoint->addr6.sin6_scope_id = ipv6_iface_scope_id(
+ &ipv6_hdr(skb)->saddr, skb->skb_iif);
+ endpoint->src6 = ipv6_hdr(skb)->daddr;
+ } else {
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static bool endpoint_eq(const struct endpoint *a, const struct endpoint *b)
+{
+ return (a->addr.sa_family == AF_INET && b->addr.sa_family == AF_INET &&
+ a->addr4.sin_port == b->addr4.sin_port &&
+ a->addr4.sin_addr.s_addr == b->addr4.sin_addr.s_addr &&
+ a->src4.s_addr == b->src4.s_addr && a->src_if4 == b->src_if4) ||
+ (a->addr.sa_family == AF_INET6 &&
+ b->addr.sa_family == AF_INET6 &&
+ a->addr6.sin6_port == b->addr6.sin6_port &&
+ ipv6_addr_equal(&a->addr6.sin6_addr, &b->addr6.sin6_addr) &&
+ a->addr6.sin6_scope_id == b->addr6.sin6_scope_id &&
+ ipv6_addr_equal(&a->src6, &b->src6)) ||
+ unlikely(!a->addr.sa_family && !b->addr.sa_family);
+}
+
+void wg_socket_set_peer_endpoint(struct wg_peer *peer,
+ const struct endpoint *endpoint)
+{
+ /* First we check unlocked, in order to optimize, since it's pretty rare
+ * that an endpoint will change. If we happen to be mid-write, and two
+ * CPUs wind up writing the same thing or something slightly different,
+ * it doesn't really matter much either.
+ */
+ if (endpoint_eq(endpoint, &peer->endpoint))
+ return;
+ write_lock_bh(&peer->endpoint_lock);
+ if (endpoint->addr.sa_family == AF_INET) {
+ peer->endpoint.addr4 = endpoint->addr4;
+ peer->endpoint.src4 = endpoint->src4;
+ peer->endpoint.src_if4 = endpoint->src_if4;
+ } else if (endpoint->addr.sa_family == AF_INET6) {
+ peer->endpoint.addr6 = endpoint->addr6;
+ peer->endpoint.src6 = endpoint->src6;
+ } else {
+ goto out;
+ }
+ dst_cache_reset(&peer->endpoint_cache);
+out:
+ write_unlock_bh(&peer->endpoint_lock);
+}
+
+void wg_socket_set_peer_endpoint_from_skb(struct wg_peer *peer,
+ const struct sk_buff *skb)
+{
+ struct endpoint endpoint;
+
+ if (!wg_socket_endpoint_from_skb(&endpoint, skb))
+ wg_socket_set_peer_endpoint(peer, &endpoint);
+}
+
+void wg_socket_clear_peer_endpoint_src(struct wg_peer *peer)
+{
+ write_lock_bh(&peer->endpoint_lock);
+ memset(&peer->endpoint.src6, 0, sizeof(peer->endpoint.src6));
+ dst_cache_reset(&peer->endpoint_cache);
+ write_unlock_bh(&peer->endpoint_lock);
+}
+
+static int wg_receive(struct sock *sk, struct sk_buff *skb)
+{
+ struct wg_device *wg;
+
+ if (unlikely(!sk))
+ goto err;
+ wg = sk->sk_user_data;
+ if (unlikely(!wg))
+ goto err;
+ skb_mark_not_on_list(skb);
+ wg_packet_receive(wg, skb);
+ return 0;
+
+err:
+ kfree_skb(skb);
+ return 0;
+}
+
+static void sock_free(struct sock *sock)
+{
+ if (unlikely(!sock))
+ return;
+ sk_clear_memalloc(sock);
+ udp_tunnel_sock_release(sock->sk_socket);
+}
+
+static void set_sock_opts(struct socket *sock)
+{
+ sock->sk->sk_allocation = GFP_ATOMIC;
+ sock->sk->sk_sndbuf = INT_MAX;
+ sk_set_memalloc(sock->sk);
+}
+
+int wg_socket_init(struct wg_device *wg, u16 port)
+{
+ int ret;
+ struct udp_tunnel_sock_cfg cfg = {
+ .sk_user_data = wg,
+ .encap_type = 1,
+ .encap_rcv = wg_receive
+ };
+ struct socket *new4 = NULL, *new6 = NULL;
+ struct udp_port_cfg port4 = {
+ .family = AF_INET,
+ .local_ip.s_addr = htonl(INADDR_ANY),
+ .local_udp_port = htons(port),
+ .use_udp_checksums = true
+ };
+#if IS_ENABLED(CONFIG_IPV6)
+ int retries = 0;
+ struct udp_port_cfg port6 = {
+ .family = AF_INET6,
+ .local_ip6 = IN6ADDR_ANY_INIT,
+ .use_udp6_tx_checksums = true,
+ .use_udp6_rx_checksums = true,
+ .ipv6_v6only = true
+ };
+#endif
+
+#if IS_ENABLED(CONFIG_IPV6)
+retry:
+#endif
+
+ ret = udp_sock_create(wg->creating_net, &port4, &new4);
+ if (ret < 0) {
+ pr_err("%s: Could not create IPv4 socket\n", wg->dev->name);
+ return ret;
+ }
+ set_sock_opts(new4);
+ setup_udp_tunnel_sock(wg->creating_net, new4, &cfg);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (ipv6_mod_enabled()) {
+ port6.local_udp_port = inet_sk(new4->sk)->inet_sport;
+ ret = udp_sock_create(wg->creating_net, &port6, &new6);
+ if (ret < 0) {
+ udp_tunnel_sock_release(new4);
+ if (ret == -EADDRINUSE && !port && retries++ < 100)
+ goto retry;
+ pr_err("%s: Could not create IPv6 socket\n",
+ wg->dev->name);
+ return ret;
+ }
+ set_sock_opts(new6);
+ setup_udp_tunnel_sock(wg->creating_net, new6, &cfg);
+ }
+#endif
+
+ wg_socket_reinit(wg, new4->sk, new6 ? new6->sk : NULL);
+ return 0;
+}
+
+void wg_socket_reinit(struct wg_device *wg, struct sock *new4,
+ struct sock *new6)
+{
+ struct sock *old4, *old6;
+
+ mutex_lock(&wg->socket_update_lock);
+ old4 = rcu_dereference_protected(wg->sock4,
+ lockdep_is_held(&wg->socket_update_lock));
+ old6 = rcu_dereference_protected(wg->sock6,
+ lockdep_is_held(&wg->socket_update_lock));
+ rcu_assign_pointer(wg->sock4, new4);
+ rcu_assign_pointer(wg->sock6, new6);
+ if (new4)
+ wg->incoming_port = ntohs(inet_sk(new4)->inet_sport);
+ mutex_unlock(&wg->socket_update_lock);
+ synchronize_rcu();
+ synchronize_net();
+ sock_free(old4);
+ sock_free(old6);
+}
diff --git a/drivers/net/wireguard/socket.h b/drivers/net/wireguard/socket.h
new file mode 100644
index 000000000000..bab5848efbcd
--- /dev/null
+++ b/drivers/net/wireguard/socket.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _WG_SOCKET_H
+#define _WG_SOCKET_H
+
+#include <linux/netdevice.h>
+#include <linux/udp.h>
+#include <linux/if_vlan.h>
+#include <linux/if_ether.h>
+
+int wg_socket_init(struct wg_device *wg, u16 port);
+void wg_socket_reinit(struct wg_device *wg, struct sock *new4,
+ struct sock *new6);
+int wg_socket_send_buffer_to_peer(struct wg_peer *peer, void *data,
+ size_t len, u8 ds);
+int wg_socket_send_skb_to_peer(struct wg_peer *peer, struct sk_buff *skb,
+ u8 ds);
+int wg_socket_send_buffer_as_reply_to_skb(struct wg_device *wg,
+ struct sk_buff *in_skb,
+ void *out_buffer, size_t len);
+
+int wg_socket_endpoint_from_skb(struct endpoint *endpoint,
+ const struct sk_buff *skb);
+void wg_socket_set_peer_endpoint(struct wg_peer *peer,
+ const struct endpoint *endpoint);
+void wg_socket_set_peer_endpoint_from_skb(struct wg_peer *peer,
+ const struct sk_buff *skb);
+void wg_socket_clear_peer_endpoint_src(struct wg_peer *peer);
+
+#if defined(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
+#define net_dbg_skb_ratelimited(fmt, dev, skb, ...) do { \
+ struct endpoint __endpoint; \
+ wg_socket_endpoint_from_skb(&__endpoint, skb); \
+ net_dbg_ratelimited(fmt, dev, &__endpoint.addr, \
+ ##__VA_ARGS__); \
+ } while (0)
+#else
+#define net_dbg_skb_ratelimited(fmt, skb, ...)
+#endif
+
+#endif /* _WG_SOCKET_H */
diff --git a/drivers/net/wireguard/timers.c b/drivers/net/wireguard/timers.c
new file mode 100644
index 000000000000..d54d32ac9bc4
--- /dev/null
+++ b/drivers/net/wireguard/timers.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "timers.h"
+#include "device.h"
+#include "peer.h"
+#include "queueing.h"
+#include "socket.h"
+
+/*
+ * - Timer for retransmitting the handshake if we don't hear back after
+ * `REKEY_TIMEOUT + jitter` ms.
+ *
+ * - Timer for sending empty packet if we have received a packet but after have
+ * not sent one for `KEEPALIVE_TIMEOUT` ms.
+ *
+ * - Timer for initiating new handshake if we have sent a packet but after have
+ * not received one (even empty) for `(KEEPALIVE_TIMEOUT + REKEY_TIMEOUT) +
+ * jitter` ms.
+ *
+ * - Timer for zeroing out all ephemeral keys after `(REJECT_AFTER_TIME * 3)` ms
+ * if no new keys have been received.
+ *
+ * - Timer for, if enabled, sending an empty authenticated packet every user-
+ * specified seconds.
+ */
+
+static inline void mod_peer_timer(struct wg_peer *peer,
+ struct timer_list *timer,
+ unsigned long expires)
+{
+ rcu_read_lock_bh();
+ if (likely(netif_running(peer->device->dev) &&
+ !READ_ONCE(peer->is_dead)))
+ mod_timer(timer, expires);
+ rcu_read_unlock_bh();
+}
+
+static void wg_expired_retransmit_handshake(struct timer_list *timer)
+{
+ struct wg_peer *peer = from_timer(peer, timer,
+ timer_retransmit_handshake);
+
+ if (peer->timer_handshake_attempts > MAX_TIMER_HANDSHAKES) {
+ pr_debug("%s: Handshake for peer %llu (%pISpfsc) did not complete after %d attempts, giving up\n",
+ peer->device->dev->name, peer->internal_id,
+ &peer->endpoint.addr, MAX_TIMER_HANDSHAKES + 2);
+
+ del_timer(&peer->timer_send_keepalive);
+ /* We drop all packets without a keypair and don't try again,
+ * if we try unsuccessfully for too long to make a handshake.
+ */
+ wg_packet_purge_staged_packets(peer);
+
+ /* We set a timer for destroying any residue that might be left
+ * of a partial exchange.
+ */
+ if (!timer_pending(&peer->timer_zero_key_material))
+ mod_peer_timer(peer, &peer->timer_zero_key_material,
+ jiffies + REJECT_AFTER_TIME * 3 * HZ);
+ } else {
+ ++peer->timer_handshake_attempts;
+ pr_debug("%s: Handshake for peer %llu (%pISpfsc) did not complete after %d seconds, retrying (try %d)\n",
+ peer->device->dev->name, peer->internal_id,
+ &peer->endpoint.addr, REKEY_TIMEOUT,
+ peer->timer_handshake_attempts + 1);
+
+ /* We clear the endpoint address src address, in case this is
+ * the cause of trouble.
+ */
+ wg_socket_clear_peer_endpoint_src(peer);
+
+ wg_packet_send_queued_handshake_initiation(peer, true);
+ }
+}
+
+static void wg_expired_send_keepalive(struct timer_list *timer)
+{
+ struct wg_peer *peer = from_timer(peer, timer, timer_send_keepalive);
+
+ wg_packet_send_keepalive(peer);
+ if (peer->timer_need_another_keepalive) {
+ peer->timer_need_another_keepalive = false;
+ mod_peer_timer(peer, &peer->timer_send_keepalive,
+ jiffies + KEEPALIVE_TIMEOUT * HZ);
+ }
+}
+
+static void wg_expired_new_handshake(struct timer_list *timer)
+{
+ struct wg_peer *peer = from_timer(peer, timer, timer_new_handshake);
+
+ pr_debug("%s: Retrying handshake with peer %llu (%pISpfsc) because we stopped hearing back after %d seconds\n",
+ peer->device->dev->name, peer->internal_id,
+ &peer->endpoint.addr, KEEPALIVE_TIMEOUT + REKEY_TIMEOUT);
+ /* We clear the endpoint address src address, in case this is the cause
+ * of trouble.
+ */
+ wg_socket_clear_peer_endpoint_src(peer);
+ wg_packet_send_queued_handshake_initiation(peer, false);
+}
+
+static void wg_expired_zero_key_material(struct timer_list *timer)
+{
+ struct wg_peer *peer = from_timer(peer, timer, timer_zero_key_material);
+
+ rcu_read_lock_bh();
+ if (!READ_ONCE(peer->is_dead)) {
+ wg_peer_get(peer);
+ if (!queue_work(peer->device->handshake_send_wq,
+ &peer->clear_peer_work))
+ /* If the work was already on the queue, we want to drop
+ * the extra reference.
+ */
+ wg_peer_put(peer);
+ }
+ rcu_read_unlock_bh();
+}
+
+static void wg_queued_expired_zero_key_material(struct work_struct *work)
+{
+ struct wg_peer *peer = container_of(work, struct wg_peer,
+ clear_peer_work);
+
+ pr_debug("%s: Zeroing out all keys for peer %llu (%pISpfsc), since we haven't received a new one in %d seconds\n",
+ peer->device->dev->name, peer->internal_id,
+ &peer->endpoint.addr, REJECT_AFTER_TIME * 3);
+ wg_noise_handshake_clear(&peer->handshake);
+ wg_noise_keypairs_clear(&peer->keypairs);
+ wg_peer_put(peer);
+}
+
+static void wg_expired_send_persistent_keepalive(struct timer_list *timer)
+{
+ struct wg_peer *peer = from_timer(peer, timer,
+ timer_persistent_keepalive);
+
+ if (likely(peer->persistent_keepalive_interval))
+ wg_packet_send_keepalive(peer);
+}
+
+/* Should be called after an authenticated data packet is sent. */
+void wg_timers_data_sent(struct wg_peer *peer)
+{
+ if (!timer_pending(&peer->timer_new_handshake))
+ mod_peer_timer(peer, &peer->timer_new_handshake,
+ jiffies + (KEEPALIVE_TIMEOUT + REKEY_TIMEOUT) * HZ +
+ prandom_u32_max(REKEY_TIMEOUT_JITTER_MAX_JIFFIES));
+}
+
+/* Should be called after an authenticated data packet is received. */
+void wg_timers_data_received(struct wg_peer *peer)
+{
+ if (likely(netif_running(peer->device->dev))) {
+ if (!timer_pending(&peer->timer_send_keepalive))
+ mod_peer_timer(peer, &peer->timer_send_keepalive,
+ jiffies + KEEPALIVE_TIMEOUT * HZ);
+ else
+ peer->timer_need_another_keepalive = true;
+ }
+}
+
+/* Should be called after any type of authenticated packet is sent, whether
+ * keepalive, data, or handshake.
+ */
+void wg_timers_any_authenticated_packet_sent(struct wg_peer *peer)
+{
+ del_timer(&peer->timer_send_keepalive);
+}
+
+/* Should be called after any type of authenticated packet is received, whether
+ * keepalive, data, or handshake.
+ */
+void wg_timers_any_authenticated_packet_received(struct wg_peer *peer)
+{
+ del_timer(&peer->timer_new_handshake);
+}
+
+/* Should be called after a handshake initiation message is sent. */
+void wg_timers_handshake_initiated(struct wg_peer *peer)
+{
+ mod_peer_timer(peer, &peer->timer_retransmit_handshake,
+ jiffies + REKEY_TIMEOUT * HZ +
+ prandom_u32_max(REKEY_TIMEOUT_JITTER_MAX_JIFFIES));
+}
+
+/* Should be called after a handshake response message is received and processed
+ * or when getting key confirmation via the first data message.
+ */
+void wg_timers_handshake_complete(struct wg_peer *peer)
+{
+ del_timer(&peer->timer_retransmit_handshake);
+ peer->timer_handshake_attempts = 0;
+ peer->sent_lastminute_handshake = false;
+ ktime_get_real_ts64(&peer->walltime_last_handshake);
+}
+
+/* Should be called after an ephemeral key is created, which is before sending a
+ * handshake response or after receiving a handshake response.
+ */
+void wg_timers_session_derived(struct wg_peer *peer)
+{
+ mod_peer_timer(peer, &peer->timer_zero_key_material,
+ jiffies + REJECT_AFTER_TIME * 3 * HZ);
+}
+
+/* Should be called before a packet with authentication, whether
+ * keepalive, data, or handshakem is sent, or after one is received.
+ */
+void wg_timers_any_authenticated_packet_traversal(struct wg_peer *peer)
+{
+ if (peer->persistent_keepalive_interval)
+ mod_peer_timer(peer, &peer->timer_persistent_keepalive,
+ jiffies + peer->persistent_keepalive_interval * HZ);
+}
+
+void wg_timers_init(struct wg_peer *peer)
+{
+ timer_setup(&peer->timer_retransmit_handshake,
+ wg_expired_retransmit_handshake, 0);
+ timer_setup(&peer->timer_send_keepalive, wg_expired_send_keepalive, 0);
+ timer_setup(&peer->timer_new_handshake, wg_expired_new_handshake, 0);
+ timer_setup(&peer->timer_zero_key_material,
+ wg_expired_zero_key_material, 0);
+ timer_setup(&peer->timer_persistent_keepalive,
+ wg_expired_send_persistent_keepalive, 0);
+ INIT_WORK(&peer->clear_peer_work, wg_queued_expired_zero_key_material);
+ peer->timer_handshake_attempts = 0;
+ peer->sent_lastminute_handshake = false;
+ peer->timer_need_another_keepalive = false;
+}
+
+void wg_timers_stop(struct wg_peer *peer)
+{
+ del_timer_sync(&peer->timer_retransmit_handshake);
+ del_timer_sync(&peer->timer_send_keepalive);
+ del_timer_sync(&peer->timer_new_handshake);
+ del_timer_sync(&peer->timer_zero_key_material);
+ del_timer_sync(&peer->timer_persistent_keepalive);
+ flush_work(&peer->clear_peer_work);
+}
diff --git a/drivers/net/wireguard/timers.h b/drivers/net/wireguard/timers.h
new file mode 100644
index 000000000000..f0653dcb1326
--- /dev/null
+++ b/drivers/net/wireguard/timers.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _WG_TIMERS_H
+#define _WG_TIMERS_H
+
+#include <linux/ktime.h>
+
+struct wg_peer;
+
+void wg_timers_init(struct wg_peer *peer);
+void wg_timers_stop(struct wg_peer *peer);
+void wg_timers_data_sent(struct wg_peer *peer);
+void wg_timers_data_received(struct wg_peer *peer);
+void wg_timers_any_authenticated_packet_sent(struct wg_peer *peer);
+void wg_timers_any_authenticated_packet_received(struct wg_peer *peer);
+void wg_timers_handshake_initiated(struct wg_peer *peer);
+void wg_timers_handshake_complete(struct wg_peer *peer);
+void wg_timers_session_derived(struct wg_peer *peer);
+void wg_timers_any_authenticated_packet_traversal(struct wg_peer *peer);
+
+static inline bool wg_birthdate_has_expired(u64 birthday_nanoseconds,
+ u64 expiration_seconds)
+{
+ return (s64)(birthday_nanoseconds + expiration_seconds * NSEC_PER_SEC)
+ <= (s64)ktime_get_coarse_boottime_ns();
+}
+
+#endif /* _WG_TIMERS_H */
diff --git a/drivers/net/wireguard/version.h b/drivers/net/wireguard/version.h
new file mode 100644
index 000000000000..a1a269a11634
--- /dev/null
+++ b/drivers/net/wireguard/version.h
@@ -0,0 +1 @@
+#define WIREGUARD_VERSION "1.0.0"
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index 7b90b8546162..b10972b6cba4 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -62,5 +62,6 @@ source "drivers/net/wireless/ath/ar5523/Kconfig"
source "drivers/net/wireless/ath/wil6210/Kconfig"
source "drivers/net/wireless/ath/ath10k/Kconfig"
source "drivers/net/wireless/ath/wcn36xx/Kconfig"
+source "drivers/net/wireless/ath/ath11k/Kconfig"
endif
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index ee2b2431e5a3..8e4ae9de5ae4 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_AR5523) += ar5523/
obj-$(CONFIG_WIL6210) += wil6210/
obj-$(CONFIG_ATH10K) += ath10k/
obj-$(CONFIG_WCN36XX) += wcn36xx/
+obj-$(CONFIG_ATH11K) += ath11k/
obj-$(CONFIG_ATH_COMMON) += ath.o
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index da2d179430ca..49cc4b7ed516 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -74,7 +74,7 @@ static void ar5523_read_reply(struct ar5523 *ar, struct ar5523_cmd_hdr *hdr,
if (cmd->odata) {
if (cmd->olen < olen) {
- ar5523_err(ar, "olen to small %d < %d\n",
+ ar5523_err(ar, "olen too small %d < %d\n",
cmd->olen, olen);
cmd->olen = 0;
cmd->res = -EOVERFLOW;
@@ -1770,6 +1770,8 @@ static const struct usb_device_id ar5523_id_table[] = {
AR5523_DEVICE_UX(0x0846, 0x4300), /* Netgear / WG111U */
AR5523_DEVICE_UG(0x0846, 0x4250), /* Netgear / WG111T */
AR5523_DEVICE_UG(0x0846, 0x5f00), /* Netgear / WPN111 */
+ AR5523_DEVICE_UG(0x083a, 0x4506), /* SMC / EZ Connect
+ SMCWUSBT-G2 */
AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / AR5523_1 */
AR5523_DEVICE_UX(0x157e, 0x3205), /* Umedia / AR5523_2 */
AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / TEW444UBEU */
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index f80854180e21..ed87bc00f2aa 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -458,7 +458,7 @@ static int ath10k_ahb_resource_init(struct ath10k *ar)
ar_ahb->mem_len = resource_size(res);
- ar_ahb->gcc_mem = ioremap_nocache(ATH10K_GCC_REG_BASE,
+ ar_ahb->gcc_mem = ioremap(ATH10K_GCC_REG_BASE,
ATH10K_GCC_REG_SIZE);
if (!ar_ahb->gcc_mem) {
ath10k_err(ar, "gcc mem ioremap error\n");
@@ -466,7 +466,7 @@ static int ath10k_ahb_resource_init(struct ath10k *ar)
goto err_mem_unmap;
}
- ar_ahb->tcsr_mem = ioremap_nocache(ATH10K_TCSR_REG_BASE,
+ ar_ahb->tcsr_mem = ioremap(ATH10K_TCSR_REG_BASE,
ATH10K_TCSR_REG_SIZE);
if (!ar_ahb->tcsr_mem) {
ath10k_err(ar, "tcsr mem ioremap error\n");
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index 95dc4be82e5c..ea908107581d 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -346,6 +346,52 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
return 0;
}
+static int ath10k_bmi_lz_data_large(struct ath10k *ar, const void *buffer, u32 length)
+{
+ struct bmi_cmd *cmd;
+ u32 hdrlen = sizeof(cmd->id) + sizeof(cmd->lz_data);
+ u32 txlen;
+ int ret;
+ size_t buf_len;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "large bmi lz data buffer 0x%pK length %d\n",
+ buffer, length);
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn(ar, "command disallowed\n");
+ return -EBUSY;
+ }
+
+ buf_len = sizeof(*cmd) + BMI_MAX_LARGE_DATA_SIZE - BMI_MAX_DATA_SIZE;
+ cmd = kzalloc(buf_len, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ while (length) {
+ txlen = min(length, BMI_MAX_LARGE_DATA_SIZE - hdrlen);
+
+ WARN_ON_ONCE(txlen & 3);
+
+ cmd->id = __cpu_to_le32(BMI_LZ_DATA);
+ cmd->lz_data.len = __cpu_to_le32(txlen);
+ memcpy(cmd->lz_data.payload, buffer, txlen);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, cmd, hdrlen + txlen,
+ NULL, NULL);
+ if (ret) {
+ ath10k_warn(ar, "unable to write to the device\n");
+ return ret;
+ }
+
+ buffer += txlen;
+ length -= txlen;
+ }
+
+ kfree(cmd);
+
+ return 0;
+}
+
int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
{
struct bmi_cmd cmd;
@@ -430,7 +476,11 @@ int ath10k_bmi_fast_download(struct ath10k *ar,
if (trailer_len > 0)
memcpy(trailer, buffer + head_len, trailer_len);
- ret = ath10k_bmi_lz_data(ar, buffer, head_len);
+ if (ar->hw_params.bmi_large_size_download)
+ ret = ath10k_bmi_lz_data_large(ar, buffer, head_len);
+ else
+ ret = ath10k_bmi_lz_data(ar, buffer, head_len);
+
if (ret)
return ret;
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
index ef3bdba43bed..f6fadcbdd86e 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.h
+++ b/drivers/net/wireless/ath/ath10k/bmi.h
@@ -45,6 +45,15 @@
sizeof(u32) + \
sizeof(u32))
+/* Maximum data size used for large BMI transfers */
+#define BMI_MAX_LARGE_DATA_SIZE 2048
+
+/* len = cmd + addr + length */
+#define BMI_MAX_LARGE_CMDBUF_SIZE (BMI_MAX_LARGE_DATA_SIZE + \
+ sizeof(u32) + \
+ sizeof(u32) + \
+ sizeof(u32))
+
/* BMI Commands */
enum bmi_cmd_id {
@@ -258,6 +267,7 @@ int ath10k_bmi_write_memory(struct ath10k *ar, u32 address,
int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result);
int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address);
int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length);
+
int ath10k_bmi_fast_download(struct ath10k *ar, u32 address,
const void *buffer, u32 length);
int ath10k_bmi_read_soc_reg(struct ath10k *ar, u32 address, u32 *reg_val);
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 4f76ba5d78a9..5ec16ce19b69 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -189,6 +189,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.uart_pin_workaround = true,
.tx_stats_over_pktlog = false,
+ .bmi_large_size_download = true,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -714,18 +715,6 @@ static int ath10k_init_sdio(struct ath10k *ar, enum ath10k_firmware_mode mode)
if (ret)
return ret;
- /* Explicitly set fwlog prints to zero as target may turn it on
- * based on scratch registers.
- */
- ret = ath10k_bmi_read32(ar, hi_option_flag, &param);
- if (ret)
- return ret;
-
- param |= HI_OPTION_DISABLE_DBGLOG;
- ret = ath10k_bmi_write32(ar, hi_option_flag, param);
- if (ret)
- return ret;
-
return 0;
}
@@ -3231,6 +3220,8 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
init_waitqueue_head(&ar->htt.empty_tx_wq);
init_waitqueue_head(&ar->wmi.tx_credits_wq);
+ skb_queue_head_init(&ar->htt.rx_indication_head);
+
init_completion(&ar->offchan_tx_completed);
INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
skb_queue_head_init(&ar->offchan_tx_queue);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index af68eb5d0776..5101bf2b5b15 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -124,6 +124,7 @@ struct ath10k_skb_cb {
struct ath10k_skb_rxcb {
dma_addr_t paddr;
struct hlist_node hlist;
+ u8 eid;
};
static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
@@ -1180,6 +1181,7 @@ struct ath10k {
struct {
/* protected by data_lock */
+ u32 rx_crc_err_drop;
u32 fw_crash_counter;
u32 fw_warm_reset_counter;
u32 fw_cold_reset_counter;
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 04c50a26a4f4..e000677ac516 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -1094,6 +1094,7 @@ static const char ath10k_gstrings_stats[][ETH_GSTRING_LEN] = {
"d_rts_good",
"d_tx_power", /* in .5 dbM I think */
"d_rx_crc_err", /* fcs_bad */
+ "d_rx_crc_err_drop", /* frame with FCS error, dropped late in kernel */
"d_no_beacon",
"d_tx_mpdus_queued",
"d_tx_msdu_queued",
@@ -1193,6 +1194,7 @@ void ath10k_debug_get_et_stats(struct ieee80211_hw *hw,
data[i++] = pdev_stats->rts_good;
data[i++] = pdev_stats->chan_tx_power;
data[i++] = pdev_stats->fcs_bad;
+ data[i++] = ar->stats.rx_crc_err_drop;
data[i++] = pdev_stats->no_beacons;
data[i++] = pdev_stats->mpdu_enqued;
data[i++] = pdev_stats->msdu_enqued;
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 1d4d1a1992fe..2248d6c022f4 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -270,7 +270,7 @@ ath10k_htc_process_lookahead_bundle(struct ath10k_htc *htc,
struct ath10k *ar = htc->ar;
int bundle_cnt = len / sizeof(*report);
- if (!bundle_cnt || (bundle_cnt > HTC_HOST_MAX_MSG_PER_RX_BUNDLE)) {
+ if (!bundle_cnt || (bundle_cnt > htc->max_msgs_per_htc_bundle)) {
ath10k_warn(ar, "Invalid lookahead bundle count: %d\n",
bundle_cnt);
return -EINVAL;
@@ -800,8 +800,8 @@ setup:
&ep->ul_pipe_id,
&ep->dl_pipe_id);
if (status) {
- ath10k_warn(ar, "unsupported HTC service id: %d\n",
- ep->service_id);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "unsupported HTC service id: %d\n",
+ ep->service_id);
return status;
}
@@ -878,8 +878,8 @@ static bool ath10k_htc_pktlog_svc_supported(struct ath10k *ar)
&ul_pipe_id,
&dl_pipe_id);
if (status) {
- ath10k_warn(ar, "unsupported HTC service id: %d\n",
- ATH10K_HTC_SVC_ID_HTT_LOG_MSG);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "unsupported HTC pktlog service id: %d\n",
+ ATH10K_HTC_SVC_ID_HTT_LOG_MSG);
return false;
}
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
index f55d3caec61f..065c82d9d689 100644
--- a/drivers/net/wireless/ath/ath10k/htc.h
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -12,6 +12,7 @@
#include <linux/bug.h>
#include <linux/skbuff.h>
#include <linux/timer.h>
+#include <linux/bitfield.h>
struct ath10k;
@@ -39,7 +40,7 @@ struct ath10k;
* 4-byte aligned.
*/
-#define HTC_HOST_MAX_MSG_PER_RX_BUNDLE 8
+#define HTC_HOST_MAX_MSG_PER_RX_BUNDLE 32
enum ath10k_htc_tx_flags {
ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01,
@@ -49,9 +50,27 @@ enum ath10k_htc_tx_flags {
enum ath10k_htc_rx_flags {
ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK = 0x01,
ATH10K_HTC_FLAG_TRAILER_PRESENT = 0x02,
- ATH10K_HTC_FLAG_BUNDLE_MASK = 0xF0
};
+#define ATH10K_HTC_FLAG_BUNDLE_MASK GENMASK(7, 4)
+
+/* bits 2-3 are for extra bundle count bits 4-5 */
+#define ATH10K_HTC_BUNDLE_EXTRA_MASK GENMASK(3, 2)
+#define ATH10K_HTC_BUNDLE_EXTRA_SHIFT 4
+
+static inline unsigned int ath10k_htc_get_bundle_count(u8 max_msgs, u8 flags)
+{
+ unsigned int count, extra_count = 0;
+
+ count = FIELD_GET(ATH10K_HTC_FLAG_BUNDLE_MASK, flags);
+
+ if (max_msgs > 16)
+ extra_count = FIELD_GET(ATH10K_HTC_BUNDLE_EXTRA_MASK, flags) <<
+ ATH10K_HTC_BUNDLE_EXTRA_SHIFT;
+
+ return count + extra_count;
+}
+
struct ath10k_htc_hdr {
u8 eid; /* @enum ath10k_htc_ep_id */
u8 flags; /* @enum ath10k_htc_tx_flags, ath10k_htc_rx_flags */
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 30c080094af1..4a12564fc30e 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -1869,6 +1869,8 @@ struct ath10k_htt {
struct ath10k *ar;
enum ath10k_htc_ep_id eid;
+ struct sk_buff_head rx_indication_head;
+
u8 target_version_major;
u8 target_version_minor;
struct completion target_version_received;
@@ -2283,6 +2285,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu);
void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
struct sk_buff *skb);
int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget);
+int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget);
void ath10k_htt_set_tx_ops(struct ath10k_htt *htt);
void ath10k_htt_set_rx_ops(struct ath10k_htt *htt);
#endif
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index d95b63f133ab..38a5814cf345 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -1285,6 +1285,13 @@ static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
status = IEEE80211_SKB_RXCB(skb);
+ if (!(ar->filter_flags & FIF_FCSFAIL) &&
+ status->flag & RX_FLAG_FAILED_FCS_CRC) {
+ ar->stats.rx_crc_err_drop++;
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
ath10k_dbg(ar, ATH10K_DBG_DATA,
"rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
skb,
@@ -2133,7 +2140,7 @@ static bool ath10k_htt_rx_pn_check_replay_hl(struct ath10k *ar,
if (last_pn_valid)
pn_invalid = ath10k_htt_rx_pn_cmp48(&new_pn, last_pn);
else
- peer->tids_last_pn_valid[tid] = 1;
+ peer->tids_last_pn_valid[tid] = true;
if (!pn_invalid)
last_pn->pn48 = new_pn.pn48;
@@ -2196,8 +2203,8 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
HTT_RX_IND_MPDU_STATUS_OK &&
mpdu_ranges->mpdu_range_status !=
HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR) {
- ath10k_warn(ar, "MPDU range status: %d\n",
- mpdu_ranges->mpdu_range_status);
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt mpdu_range_status %d\n",
+ mpdu_ranges->mpdu_range_status);
goto err;
}
@@ -2235,8 +2242,10 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
hdr = (struct ieee80211_hdr *)skb->data;
qos = ieee80211_is_data_qos(hdr->frame_control);
+
rx_status = IEEE80211_SKB_RXCB(skb);
- rx_status->chains |= BIT(0);
+ memset(rx_status, 0, sizeof(*rx_status));
+
if (rx->ppdu.combined_rssi == 0) {
/* SDIO firmware does not provide signal */
rx_status->signal = 0;
@@ -2350,7 +2359,10 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
memcpy(skb->data + offset, &qos_ctrl, IEEE80211_QOS_CTL_LEN);
}
- ieee80211_rx_ni(ar->hw, skb);
+ if (ar->napi.dev)
+ ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
+ else
+ ieee80211_rx_ni(ar->hw, skb);
/* We have delivered the skb to the upper layers (mac80211) so we
* must not free it.
@@ -3751,14 +3763,12 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
case HTT_T2H_MSG_TYPE_RX_IND:
- if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
- return ath10k_htt_rx_proc_rx_ind_hl(htt,
- &resp->rx_ind_hl,
- skb,
- HTT_RX_PN_CHECK,
- HTT_RX_NON_TKIP_MIC);
- else
+ if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) {
ath10k_htt_rx_proc_rx_ind_ll(htt, &resp->rx_ind);
+ } else {
+ skb_queue_tail(&htt->rx_indication_head, skb);
+ return false;
+ }
break;
case HTT_T2H_MSG_TYPE_PEER_MAP: {
struct htt_peer_map_event ev = {
@@ -3948,6 +3958,37 @@ static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
return quota;
}
+int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget)
+{
+ struct htt_resp *resp;
+ struct ath10k_htt *htt = &ar->htt;
+ struct sk_buff *skb;
+ bool release;
+ int quota;
+
+ for (quota = 0; quota < budget; quota++) {
+ skb = skb_dequeue(&htt->rx_indication_head);
+ if (!skb)
+ break;
+
+ resp = (struct htt_resp *)skb->data;
+
+ release = ath10k_htt_rx_proc_rx_ind_hl(htt,
+ &resp->rx_ind_hl,
+ skb,
+ HTT_RX_PN_CHECK,
+ HTT_RX_NON_TKIP_MIC);
+
+ if (release)
+ dev_kfree_skb_any(skb);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "rx indication poll pending count:%d\n",
+ skb_queue_len(&htt->rx_indication_head));
+ }
+ return quota;
+}
+EXPORT_SYMBOL(ath10k_htt_rx_hl_indication);
+
int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
{
struct ath10k_htt *htt = &ar->htt;
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 35a362329a4f..775fd62fb92d 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -613,6 +613,9 @@ struct ath10k_hw_params {
/* target supporting fw download via diag ce */
bool fw_diag_ce_download;
+ /* target supporting fw download via large size BMI */
+ bool bmi_large_size_download;
+
/* need to set uart pin if disable uart print, workaround for a
* firmware bug
*/
@@ -813,7 +816,7 @@ ath10k_is_rssi_enable(struct ath10k_hw_params *hw,
#define TARGET_10_4_TX_DBG_LOG_SIZE 1024
#define TARGET_10_4_NUM_WDS_ENTRIES 32
-#define TARGET_10_4_DMA_BURST_SIZE 0
+#define TARGET_10_4_DMA_BURST_SIZE 1
#define TARGET_10_4_MAC_AGGR_DELIM 0
#define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
#define TARGET_10_4_VOW_CONFIG 0
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 83cc8778ca1e..7fee35ff966b 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -1098,7 +1098,7 @@ static int ath10k_monitor_vdev_stop(struct ath10k *ar)
ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
if (ret)
- ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n",
+ ath10k_warn(ar, "failed to request monitor vdev %i stop: %d\n",
ar->monitor_vdev_id, ret);
ret = ath10k_vdev_setup_sync(ar);
@@ -6329,6 +6329,9 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (sta && sta->tdls)
ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
ar->wmi.peer_param->authorize, 1);
+ else if (sta && cmd == SET_KEY && (key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ ath10k_wmi_peer_set_param(ar, arvif->vdev_id, peer_addr,
+ ar->wmi.peer_param->authorize, 1);
exit:
mutex_unlock(&ar->conf_mutex);
@@ -8908,6 +8911,7 @@ int ath10k_mac_register(struct ath10k *ar)
WMI_PNO_MAX_SCHED_SCAN_PLAN_INT;
ar->hw->wiphy->max_sched_scan_plan_iterations =
WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS;
+ ar->hw->wiphy->features |= NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
}
ar->hw->vif_data_size = sizeof(struct ath10k_vif);
@@ -8958,6 +8962,7 @@ int ath10k_mac_register(struct ath10k *ar)
wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
wiphy_ext_feature_set(ar->hw->wiphy,
NL80211_EXT_FEATURE_SET_SCAN_DWELL);
+ wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_AQL);
if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map) ||
test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, ar->wmi.svc_map))
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index bb44f5a0941b..ded7a220a4aa 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -1578,7 +1578,7 @@ static int ath10k_pci_set_ram_config(struct ath10k *ar, u32 config)
return 0;
}
-/* if an error happened returns < 0, otherwise the length */
+/* Always returns the length */
static int ath10k_pci_dump_memory_sram(struct ath10k *ar,
const struct ath10k_mem_region *region,
u8 *buf)
@@ -1604,11 +1604,22 @@ static int ath10k_pci_dump_memory_reg(struct ath10k *ar,
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
u32 i;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state != ATH10K_STATE_ON) {
+ ath10k_warn(ar, "Skipping pci_dump_memory_reg invalid state\n");
+ ret = -EIO;
+ goto done;
+ }
for (i = 0; i < region->len; i += 4)
*(u32 *)(buf + i) = ioread32(ar_pci->mem + region->start + i);
- return region->len;
+ ret = region->len;
+done:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
}
/* if an error happened returns < 0, otherwise the length */
@@ -1704,7 +1715,11 @@ static void ath10k_pci_dump_memory(struct ath10k *ar,
count = ath10k_pci_dump_memory_sram(ar, current_region, buf);
break;
case ATH10K_MEM_REGION_TYPE_IOREG:
- count = ath10k_pci_dump_memory_reg(ar, current_region, buf);
+ ret = ath10k_pci_dump_memory_reg(ar, current_region, buf);
+ if (ret < 0)
+ break;
+
+ count = ret;
break;
default:
ret = ath10k_pci_dump_memory_generic(ar, current_region, buf);
diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
index a0ba07b85362..85dce43c5439 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.c
+++ b/drivers/net/wireless/ath/ath10k/qmi.c
@@ -84,6 +84,9 @@ static int ath10k_qmi_setup_msa_permissions(struct ath10k_qmi *qmi)
int ret;
int i;
+ if (qmi->msa_fixed_perm)
+ return 0;
+
for (i = 0; i < qmi->nr_mem_region; i++) {
ret = ath10k_qmi_map_msa_permission(qmi, &qmi->mem_region[i]);
if (ret)
@@ -102,6 +105,9 @@ static void ath10k_qmi_remove_msa_permission(struct ath10k_qmi *qmi)
{
int i;
+ if (qmi->msa_fixed_perm)
+ return;
+
for (i = 0; i < qmi->nr_mem_region; i++)
ath10k_qmi_unmap_msa_permission(qmi, &qmi->mem_region[i]);
}
@@ -279,7 +285,15 @@ static int ath10k_qmi_bdf_dnld_send_sync(struct ath10k_qmi *qmi)
if (ret < 0)
goto out;
- if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ /* end = 1 triggers a CRC check on the BDF. If this fails, we
+ * get a QMI_ERR_MALFORMED_MSG_V01 error, but the FW is still
+ * willing to use the BDF. For some platforms, all the valid
+ * released BDFs fail this CRC check, so attempt to detect this
+ * scenario and treat it as non-fatal.
+ */
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01 &&
+ !(req->end == 1 &&
+ resp.resp.result == QMI_ERR_MALFORMED_MSG_V01)) {
ath10k_err(ar, "failed to download board data file: %d\n",
resp.resp.error);
ret = -EINVAL;
@@ -635,7 +649,9 @@ static int ath10k_qmi_host_cap_send_sync(struct ath10k_qmi *qmi)
if (ret < 0)
goto out;
- if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ /* older FW didn't support this request, which is not fatal */
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01 &&
+ resp.resp.error != QMI_ERR_NOT_SUPPORTED_V01) {
ath10k_err(ar, "host capability request rejected: %d\n", resp.resp.error);
ret = -EINVAL;
goto out;
@@ -1025,6 +1041,9 @@ static int ath10k_qmi_setup_msa_resources(struct ath10k_qmi *qmi, u32 msa_size)
qmi->msa_mem_size = msa_size;
}
+ if (of_property_read_bool(dev->of_node, "qcom,msa-fixed-perm"))
+ qmi->msa_fixed_perm = true;
+
ath10k_dbg(ar, ATH10K_DBG_QMI, "msa pa: %pad , msa va: 0x%p\n",
&qmi->msa_pa,
qmi->msa_va);
diff --git a/drivers/net/wireless/ath/ath10k/qmi.h b/drivers/net/wireless/ath/ath10k/qmi.h
index 40aafb875ed0..dc257375f161 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.h
+++ b/drivers/net/wireless/ath/ath10k/qmi.h
@@ -104,6 +104,7 @@ struct ath10k_qmi {
bool fw_ready;
char fw_build_timestamp[MAX_TIMESTAMP_LEN + 1];
struct ath10k_qmi_cal_data cal_data[MAX_NUM_CAL_V01];
+ bool msa_fixed_perm;
};
int ath10k_qmi_wlan_enable(struct ath10k *ar,
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index 120200a93bcc..e5316b911e1d 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -24,6 +24,8 @@
#include "trace.h"
#include "sdio.h"
+#define ATH10K_SDIO_VSG_BUF_SIZE (64 * 1024)
+
/* inlined helper functions */
static inline int ath10k_sdio_calc_txrx_padded_len(struct ath10k_sdio *ar_sdio,
@@ -417,6 +419,7 @@ static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
struct ath10k_htc *htc = &ar->htc;
struct ath10k_sdio_rx_data *pkt;
struct ath10k_htc_ep *ep;
+ struct ath10k_skb_rxcb *cb;
enum ath10k_htc_ep_id id;
int ret, i, *n_lookahead_local;
u32 *lookaheads_local;
@@ -462,10 +465,16 @@ static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
if (ret)
goto out;
- if (!pkt->trailer_only)
- ep->ep_ops.ep_rx_complete(ar_sdio->ar, pkt->skb);
- else
+ if (!pkt->trailer_only) {
+ cb = ATH10K_SKB_RXCB(pkt->skb);
+ cb->eid = id;
+
+ skb_queue_tail(&ar_sdio->rx_head, pkt->skb);
+ queue_work(ar->workqueue_aux,
+ &ar_sdio->async_work_rx);
+ } else {
kfree_skb(pkt->skb);
+ }
/* The RX complete handler now owns the skb...*/
pkt->skb = NULL;
@@ -484,21 +493,22 @@ out:
return ret;
}
-static int ath10k_sdio_mbox_alloc_pkt_bundle(struct ath10k *ar,
- struct ath10k_sdio_rx_data *rx_pkts,
- struct ath10k_htc_hdr *htc_hdr,
- size_t full_len, size_t act_len,
- size_t *bndl_cnt)
+static int ath10k_sdio_mbox_alloc_bundle(struct ath10k *ar,
+ struct ath10k_sdio_rx_data *rx_pkts,
+ struct ath10k_htc_hdr *htc_hdr,
+ size_t full_len, size_t act_len,
+ size_t *bndl_cnt)
{
int ret, i;
+ u8 max_msgs = ar->htc.max_msgs_per_htc_bundle;
- *bndl_cnt = FIELD_GET(ATH10K_HTC_FLAG_BUNDLE_MASK, htc_hdr->flags);
+ *bndl_cnt = ath10k_htc_get_bundle_count(max_msgs, htc_hdr->flags);
- if (*bndl_cnt > HTC_HOST_MAX_MSG_PER_RX_BUNDLE) {
+ if (*bndl_cnt > max_msgs) {
ath10k_warn(ar,
"HTC bundle length %u exceeds maximum %u\n",
le16_to_cpu(htc_hdr->len),
- HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
+ max_msgs);
return -ENOMEM;
}
@@ -529,12 +539,11 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
size_t full_len, act_len;
bool last_in_bundle;
int ret, i;
+ int pkt_cnt = 0;
if (n_lookaheads > ATH10K_SDIO_MAX_RX_MSGS) {
- ath10k_warn(ar,
- "the total number of pkgs to be fetched (%u) exceeds maximum %u\n",
- n_lookaheads,
- ATH10K_SDIO_MAX_RX_MSGS);
+ ath10k_warn(ar, "the total number of pkgs to be fetched (%u) exceeds maximum %u\n",
+ n_lookaheads, ATH10K_SDIO_MAX_RX_MSGS);
ret = -ENOMEM;
goto err;
}
@@ -543,10 +552,8 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
htc_hdr = (struct ath10k_htc_hdr *)&lookaheads[i];
last_in_bundle = false;
- if (le16_to_cpu(htc_hdr->len) >
- ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH) {
- ath10k_warn(ar,
- "payload length %d exceeds max htc length: %zu\n",
+ if (le16_to_cpu(htc_hdr->len) > ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH) {
+ ath10k_warn(ar, "payload length %d exceeds max htc length: %zu\n",
le16_to_cpu(htc_hdr->len),
ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH);
ret = -ENOMEM;
@@ -557,36 +564,37 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
full_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio, act_len);
if (full_len > ATH10K_SDIO_MAX_BUFFER_SIZE) {
- ath10k_warn(ar,
- "rx buffer requested with invalid htc_hdr length (%d, 0x%x): %d\n",
+ ath10k_warn(ar, "rx buffer requested with invalid htc_hdr length (%d, 0x%x): %d\n",
htc_hdr->eid, htc_hdr->flags,
le16_to_cpu(htc_hdr->len));
ret = -EINVAL;
goto err;
}
- if (htc_hdr->flags & ATH10K_HTC_FLAG_BUNDLE_MASK) {
+ if (ath10k_htc_get_bundle_count(
+ ar->htc.max_msgs_per_htc_bundle, htc_hdr->flags)) {
/* HTC header indicates that every packet to follow
* has the same padded length so that it can be
* optimally fetched as a full bundle.
*/
size_t bndl_cnt;
- ret = ath10k_sdio_mbox_alloc_pkt_bundle(ar,
- &ar_sdio->rx_pkts[i],
- htc_hdr,
- full_len,
- act_len,
- &bndl_cnt);
+ ret = ath10k_sdio_mbox_alloc_bundle(ar,
+ &ar_sdio->rx_pkts[pkt_cnt],
+ htc_hdr,
+ full_len,
+ act_len,
+ &bndl_cnt);
if (ret) {
- ath10k_warn(ar, "alloc_bundle error %d\n", ret);
+ ath10k_warn(ar, "failed to allocate a bundle: %d\n",
+ ret);
goto err;
}
- n_lookaheads += bndl_cnt;
- i += bndl_cnt;
- /*Next buffer will be the last in the bundle */
+ pkt_cnt += bndl_cnt;
+
+ /* next buffer will be the last in the bundle */
last_in_bundle = true;
}
@@ -597,7 +605,7 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
if (htc_hdr->flags & ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK)
full_len += ATH10K_HIF_MBOX_BLOCK_SIZE;
- ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[i],
+ ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[pkt_cnt],
act_len,
full_len,
last_in_bundle,
@@ -606,9 +614,11 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
ath10k_warn(ar, "alloc_rx_pkt error %d\n", ret);
goto err;
}
+
+ pkt_cnt++;
}
- ar_sdio->n_rx_pkts = i;
+ ar_sdio->n_rx_pkts = pkt_cnt;
return 0;
@@ -622,10 +632,10 @@ err:
return ret;
}
-static int ath10k_sdio_mbox_rx_packet(struct ath10k *ar,
- struct ath10k_sdio_rx_data *pkt)
+static int ath10k_sdio_mbox_rx_fetch(struct ath10k *ar)
{
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_sdio_rx_data *pkt = &ar_sdio->rx_pkts[0];
struct sk_buff *skb = pkt->skb;
struct ath10k_htc_hdr *htc_hdr;
int ret;
@@ -633,48 +643,75 @@ static int ath10k_sdio_mbox_rx_packet(struct ath10k *ar,
ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,
skb->data, pkt->alloc_len);
if (ret)
- goto out;
+ goto err;
- /* Update actual length. The original length may be incorrect,
- * as the FW will bundle multiple packets as long as their sizes
- * fit within the same aligned length (pkt->alloc_len).
- */
htc_hdr = (struct ath10k_htc_hdr *)skb->data;
pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
+
if (pkt->act_len > pkt->alloc_len) {
- ath10k_warn(ar, "rx packet too large (%zu > %zu)\n",
- pkt->act_len, pkt->alloc_len);
- ret = -EMSGSIZE;
- goto out;
+ ret = -EINVAL;
+ goto err;
}
skb_put(skb, pkt->act_len);
+ return 0;
-out:
- pkt->status = ret;
+err:
+ ar_sdio->n_rx_pkts = 0;
+ ath10k_sdio_mbox_free_rx_pkt(pkt);
return ret;
}
-static int ath10k_sdio_mbox_rx_fetch(struct ath10k *ar)
+static int ath10k_sdio_mbox_rx_fetch_bundle(struct ath10k *ar)
{
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_sdio_rx_data *pkt;
+ struct ath10k_htc_hdr *htc_hdr;
int ret, i;
+ u32 pkt_offset, virt_pkt_len;
+
+ virt_pkt_len = 0;
+ for (i = 0; i < ar_sdio->n_rx_pkts; i++)
+ virt_pkt_len += ar_sdio->rx_pkts[i].alloc_len;
+
+ if (virt_pkt_len > ATH10K_SDIO_VSG_BUF_SIZE) {
+ ath10k_warn(ar, "sdio vsg buffer size limit: %d\n", virt_pkt_len);
+ ret = -E2BIG;
+ goto err;
+ }
+ ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,
+ ar_sdio->vsg_buffer, virt_pkt_len);
+ if (ret) {
+ ath10k_warn(ar, "failed to read bundle packets: %d", ret);
+ goto err;
+ }
+
+ pkt_offset = 0;
for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
- ret = ath10k_sdio_mbox_rx_packet(ar,
- &ar_sdio->rx_pkts[i]);
- if (ret)
+ pkt = &ar_sdio->rx_pkts[i];
+ htc_hdr = (struct ath10k_htc_hdr *)(ar_sdio->vsg_buffer + pkt_offset);
+ pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
+
+ if (pkt->act_len > pkt->alloc_len ) {
+ ret = -EINVAL;
goto err;
+ }
+
+ skb_put_data(pkt->skb, htc_hdr, pkt->act_len);
+ pkt_offset += pkt->alloc_len;
}
return 0;
err:
/* Free all packets that was not successfully fetched. */
- for (; i < ar_sdio->n_rx_pkts; i++)
+ for (i = 0; i < ar_sdio->n_rx_pkts; i++)
ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
+ ar_sdio->n_rx_pkts = 0;
+
return ret;
}
@@ -717,7 +754,10 @@ static int ath10k_sdio_mbox_rxmsg_pending_handler(struct ath10k *ar,
*/
*done = false;
- ret = ath10k_sdio_mbox_rx_fetch(ar);
+ if (ar_sdio->n_rx_pkts > 1)
+ ret = ath10k_sdio_mbox_rx_fetch_bundle(ar);
+ else
+ ret = ath10k_sdio_mbox_rx_fetch(ar);
/* Process fetched packets. This will potentially update
* n_lookaheads depending on if the packets contain lookahead
@@ -1293,6 +1333,31 @@ static void __ath10k_sdio_write_async(struct ath10k *ar,
ath10k_sdio_free_bus_req(ar, req);
}
+/* To improve throughput use workqueue to deliver packets to HTC layer,
+ * this way SDIO bus is utilised much better.
+ */
+static void ath10k_rx_indication_async_work(struct work_struct *work)
+{
+ struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
+ async_work_rx);
+ struct ath10k *ar = ar_sdio->ar;
+ struct ath10k_htc_ep *ep;
+ struct ath10k_skb_rxcb *cb;
+ struct sk_buff *skb;
+
+ while (true) {
+ skb = skb_dequeue(&ar_sdio->rx_head);
+ if (!skb)
+ break;
+ cb = ATH10K_SKB_RXCB(skb);
+ ep = &ar->htc.endpoint[cb->eid];
+ ep->ep_ops.ep_rx_complete(ar, skb);
+ }
+
+ if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags))
+ napi_schedule(&ar->napi);
+}
+
static void ath10k_sdio_write_async_work(struct work_struct *work)
{
struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
@@ -1681,6 +1746,8 @@ static int ath10k_sdio_hif_start(struct ath10k *ar)
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
int ret;
+ napi_enable(&ar->napi);
+
/* Sleep 20 ms before HIF interrupts are disabled.
* This will give target plenty of time to process the BMI done
* request before interrupts are disabled.
@@ -1805,13 +1872,16 @@ static void ath10k_sdio_hif_stop(struct ath10k *ar)
}
spin_unlock_bh(&ar_sdio->wr_async_lock);
+
+ napi_synchronize(&ar->napi);
+ napi_disable(&ar->napi);
}
#ifdef CONFIG_PM
static int ath10k_sdio_hif_suspend(struct ath10k *ar)
{
- return -EOPNOTSUPP;
+ return 0;
}
static int ath10k_sdio_hif_resume(struct ath10k *ar)
@@ -1961,7 +2031,26 @@ static const struct ath10k_hif_ops ath10k_sdio_hif_ops = {
*/
static int ath10k_sdio_pm_suspend(struct device *device)
{
- return 0;
+ struct sdio_func *func = dev_to_sdio_func(device);
+ struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
+ struct ath10k *ar = ar_sdio->ar;
+ mmc_pm_flag_t pm_flag, pm_caps;
+ int ret;
+
+ if (!device_may_wakeup(ar->dev))
+ return 0;
+
+ pm_flag = MMC_PM_KEEP_POWER;
+
+ ret = sdio_set_host_pm_flags(func, pm_flag);
+ if (ret) {
+ pm_caps = sdio_get_host_pm_caps(func);
+ ath10k_warn(ar, "failed to set sdio host pm flags (0x%x, 0x%x): %d\n",
+ pm_flag, pm_caps, ret);
+ return ret;
+ }
+
+ return ret;
}
static int ath10k_sdio_pm_resume(struct device *device)
@@ -1980,6 +2069,20 @@ static SIMPLE_DEV_PM_OPS(ath10k_sdio_pm_ops, ath10k_sdio_pm_suspend,
#endif /* CONFIG_PM_SLEEP */
+static int ath10k_sdio_napi_poll(struct napi_struct *ctx, int budget)
+{
+ struct ath10k *ar = container_of(ctx, struct ath10k, napi);
+ int done;
+
+ done = ath10k_htt_rx_hl_indication(ar, budget);
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "napi poll: done: %d, budget:%d\n", done, budget);
+
+ if (done < budget)
+ napi_complete_done(ctx, done);
+
+ return done;
+}
+
static int ath10k_sdio_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
@@ -2005,6 +2108,9 @@ static int ath10k_sdio_probe(struct sdio_func *func,
return -ENOMEM;
}
+ netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll,
+ ATH10K_NAPI_BUDGET);
+
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
func->num, func->vendor, func->device,
@@ -2020,6 +2126,12 @@ static int ath10k_sdio_probe(struct sdio_func *func,
goto err_core_destroy;
}
+ ar_sdio->vsg_buffer = devm_kmalloc(ar->dev, ATH10K_SDIO_VSG_BUF_SIZE, GFP_KERNEL);
+ if (!ar_sdio->vsg_buffer) {
+ ret = -ENOMEM;
+ goto err_core_destroy;
+ }
+
ar_sdio->irq_data.irq_en_reg =
devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_enable_regs),
GFP_KERNEL);
@@ -2028,7 +2140,7 @@ static int ath10k_sdio_probe(struct sdio_func *func,
goto err_core_destroy;
}
- ar_sdio->bmi_buf = devm_kzalloc(ar->dev, BMI_MAX_CMDBUF_SIZE, GFP_KERNEL);
+ ar_sdio->bmi_buf = devm_kzalloc(ar->dev, BMI_MAX_LARGE_CMDBUF_SIZE, GFP_KERNEL);
if (!ar_sdio->bmi_buf) {
ret = -ENOMEM;
goto err_core_destroy;
@@ -2057,6 +2169,9 @@ static int ath10k_sdio_probe(struct sdio_func *func,
for (i = 0; i < ATH10K_SDIO_BUS_REQUEST_MAX_NUM; i++)
ath10k_sdio_free_bus_req(ar, &ar_sdio->bus_req[i]);
+ skb_queue_head_init(&ar_sdio->rx_head);
+ INIT_WORK(&ar_sdio->async_work_rx, ath10k_rx_indication_async_work);
+
dev_id_base = FIELD_GET(QCA_MANUFACTURER_ID_BASE, id->device);
switch (dev_id_base) {
case QCA_MANUFACTURER_ID_AR6005_BASE:
@@ -2080,6 +2195,8 @@ static int ath10k_sdio_probe(struct sdio_func *func,
bus_params.chip_id = 0;
bus_params.hl_msdu_ids = true;
+ ar->hw->max_mtu = ETH_DATA_LEN;
+
ret = ath10k_core_register(ar, &bus_params);
if (ret) {
ath10k_err(ar, "failed to register driver core: %d\n", ret);
@@ -2106,6 +2223,9 @@ static void ath10k_sdio_remove(struct sdio_func *func)
func->num, func->vendor, func->device);
ath10k_core_unregister(ar);
+
+ netif_napi_del(&ar->napi);
+
ath10k_core_destroy(ar);
flush_workqueue(ar_sdio->workqueue);
diff --git a/drivers/net/wireless/ath/ath10k/sdio.h b/drivers/net/wireless/ath/ath10k/sdio.h
index b8c7ac0330bd..33195f49acab 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.h
+++ b/drivers/net/wireless/ath/ath10k/sdio.h
@@ -89,10 +89,10 @@
* to the maximum value (HTC_HOST_MAX_MSG_PER_RX_BUNDLE).
*
* in this case the driver must allocate
- * (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * HTC_HOST_MAX_MSG_PER_RX_BUNDLE) skb's.
+ * (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * 2) skb's.
*/
#define ATH10K_SDIO_MAX_RX_MSGS \
- (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * HTC_HOST_MAX_MSG_PER_RX_BUNDLE)
+ (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * 2)
#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL 0x00000868u
#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF 0xFFFEFFFF
@@ -126,7 +126,6 @@ struct ath10k_sdio_rx_data {
bool part_of_bundle;
bool last_in_bundle;
bool trailer_only;
- int status;
};
struct ath10k_sdio_irq_proc_regs {
@@ -138,8 +137,8 @@ struct ath10k_sdio_irq_proc_regs {
u8 rx_lookahead_valid;
u8 host_int_status2;
u8 gmbox_rx_avail;
- __le32 rx_lookahead[2];
- __le32 rx_gmbox_lookahead_alias[2];
+ __le32 rx_lookahead[2 * ATH10K_HIF_MBOX_NUM_MAX];
+ __le32 int_status_enable;
};
struct ath10k_sdio_irq_enable_regs {
@@ -187,6 +186,9 @@ struct ath10k_sdio {
struct ath10k_sdio_bus_request bus_req[ATH10K_SDIO_BUS_REQUEST_MAX_NUM];
/* free list of bus requests */
struct list_head bus_req_freeq;
+
+ struct sk_buff_head rx_head;
+
/* protects access to bus_req_freeq */
spinlock_t lock;
@@ -196,6 +198,13 @@ struct ath10k_sdio {
struct ath10k *ar;
struct ath10k_sdio_irq_data irq_data;
+ /* temporary buffer for sdio read.
+ * It is allocated when probe, and used for receive bundled packets,
+ * the read for bundled packets is not parallel, so it does not need
+ * protected.
+ */
+ u8 *vsg_buffer;
+
/* temporary buffer for BMI requests */
u8 *bmi_buf;
@@ -206,6 +215,8 @@ struct ath10k_sdio {
struct list_head wr_asyncq;
/* protects access to wr_asyncq */
spinlock_t wr_async_lock;
+
+ struct work_struct async_work_rx;
};
static inline struct ath10k_sdio *ath10k_sdio_priv(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index 16177497bba7..21081b4a27d7 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -46,7 +46,7 @@ static const char * const ath10k_regulators[] = {
};
static const char * const ath10k_clocks[] = {
- "cxo_ref_clk_pin",
+ "cxo_ref_clk_pin", "qdss",
};
static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
@@ -582,7 +582,7 @@ static void ath10k_snoc_process_rx_cb(struct ath10k_ce_pipe *ce_state,
max_nbytes, DMA_FROM_DEVICE);
if (unlikely(max_nbytes < nbytes)) {
- ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
+ ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)\n",
nbytes, max_nbytes);
dev_kfree_skb_any(skb);
continue;
@@ -1201,7 +1201,7 @@ static int ath10k_snoc_request_irq(struct ath10k *ar)
irqflags, ce_name[id], ar);
if (ret) {
ath10k_err(ar,
- "failed to register IRQ handler for CE %d: %d",
+ "failed to register IRQ handler for CE %d: %d\n",
id, ret);
goto err_irq;
}
@@ -1466,7 +1466,6 @@ MODULE_DEVICE_TABLE(of, ath10k_snoc_dt_match);
static int ath10k_snoc_probe(struct platform_device *pdev)
{
const struct ath10k_snoc_drv_priv *drv_data;
- const struct of_device_id *of_id;
struct ath10k_snoc *ar_snoc;
struct device *dev;
struct ath10k *ar;
@@ -1474,18 +1473,16 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
int ret;
u32 i;
- of_id = of_match_device(ath10k_snoc_dt_match, &pdev->dev);
- if (!of_id) {
- dev_err(&pdev->dev, "failed to find matching device tree id\n");
+ dev = &pdev->dev;
+ drv_data = device_get_match_data(dev);
+ if (!drv_data) {
+ dev_err(dev, "failed to find matching device tree id\n");
return -EINVAL;
}
- drv_data = of_id->data;
- dev = &pdev->dev;
-
ret = dma_set_mask_and_coherent(dev, drv_data->dma_mask);
if (ret) {
- dev_err(dev, "failed to set dma mask: %d", ret);
+ dev_err(dev, "failed to set dma mask: %d\n", ret);
return ret;
}
@@ -1563,13 +1560,16 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
ret = ath10k_qmi_init(ar, msa_size);
if (ret) {
ath10k_warn(ar, "failed to register wlfw qmi client: %d\n", ret);
- goto err_core_destroy;
+ goto err_power_off;
}
ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
return 0;
+err_power_off:
+ ath10k_hw_power_off(ar);
+
err_free_irq:
ath10k_snoc_free_irq(ar);
diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c
index 1bffe3fbea3f..7a9b9bbcdbfc 100644
--- a/drivers/net/wireless/ath/ath10k/testmode.c
+++ b/drivers/net/wireless/ath/ath10k/testmode.c
@@ -65,7 +65,7 @@ bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_CMD, ATH10K_TM_CMD_WMI);
if (ret) {
ath10k_warn(ar,
- "failed to to put testmode wmi event cmd attribute: %d\n",
+ "failed to put testmode wmi event cmd attribute: %d\n",
ret);
kfree_skb(nl_skb);
goto out;
@@ -74,7 +74,7 @@ bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_WMI_CMDID, cmd_id);
if (ret) {
ath10k_warn(ar,
- "failed to to put testmode wmi even cmd_id: %d\n",
+ "failed to put testmode wmi event cmd_id: %d\n",
ret);
kfree_skb(nl_skb);
goto out;
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index ab916459d237..842e42ec814f 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -239,7 +239,7 @@ TRACE_EVENT(ath10k_wmi_dbglog,
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
- __field(u8, hw_type);
+ __field(u8, hw_type)
__field(size_t, buf_len)
__dynamic_array(u8, buf, buf_len)
),
@@ -269,7 +269,7 @@ TRACE_EVENT(ath10k_htt_pktlog,
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
- __field(u8, hw_type);
+ __field(u8, hw_type)
__field(u16, buf_len)
__dynamic_array(u8, pktlog, buf_len)
),
@@ -435,7 +435,7 @@ TRACE_EVENT(ath10k_htt_rx_desc,
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
- __field(u8, hw_type);
+ __field(u8, hw_type)
__field(u16, len)
__dynamic_array(u8, rxdesc, len)
),
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 69a1ec53df29..4e68debda9bf 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -841,7 +841,7 @@ static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar,
const struct wmi_tlv_mgmt_rx_ev *ev;
const u8 *frame;
u32 msdu_len;
- int ret;
+ int ret, i;
tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
@@ -865,6 +865,9 @@ static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar,
arg->phy_mode = ev->phy_mode;
arg->rate = ev->rate;
+ for (i = 0; i < ARRAY_SIZE(ev->rssi); i++)
+ arg->rssi[i] = ev->rssi[i];
+
msdu_len = __le32_to_cpu(arg->buf_len);
if (skb->len < (frame - skb->data) + msdu_len) {
@@ -3707,6 +3710,7 @@ ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar,
struct wmi_tlv *tlv;
struct sk_buff *skb;
__le32 *channel_list;
+ u16 tlv_len;
size_t len;
void *ptr;
u32 i;
@@ -3764,10 +3768,12 @@ ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar,
/* nlo_configured_parameters(nlo_list) */
cmd->no_of_ssids = __cpu_to_le32(min_t(u8, pno->uc_networks_count,
WMI_NLO_MAX_SSIDS));
+ tlv_len = __le32_to_cpu(cmd->no_of_ssids) *
+ sizeof(struct nlo_configured_parameters);
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
- tlv->len = __cpu_to_le16(len);
+ tlv->len = __cpu_to_le16(tlv_len);
ptr += sizeof(*tlv);
nlo_list = ptr;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 9f564e2b7a14..61885d4d662c 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -2463,10 +2463,10 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
u32 rx_status;
u32 channel;
u32 phy_mode;
- u32 snr;
+ u32 snr, rssi;
u32 rate;
u16 fc;
- int ret;
+ int ret, i;
ret = ath10k_wmi_pull_mgmt_rx(ar, skb, &arg);
if (ret) {
@@ -2525,6 +2525,20 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
status->freq = ieee80211_channel_to_frequency(channel, status->band);
status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
+
+ BUILD_BUG_ON(ARRAY_SIZE(status->chain_signal) != ARRAY_SIZE(arg.rssi));
+
+ for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
+ status->chains &= ~BIT(i);
+ rssi = __le32_to_cpu(arg.rssi[i]);
+ ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt rssi[%d]:%d\n", i, arg.rssi[i]);
+
+ if (rssi != ATH10K_INVALID_RSSI && rssi != 0) {
+ status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR + rssi;
+ status->chains |= BIT(i);
+ }
+ }
+
status->rate_idx = ath10k_mac_bitrate_to_idx(sband, rate / 100);
hdr = (struct ieee80211_hdr *)skb->data;
@@ -9476,7 +9490,7 @@ static int ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id, void *ptr,
msdu = pkt_addr->vaddr;
dma_unmap_single(ar->dev, pkt_addr->paddr,
- msdu->len, DMA_FROM_DEVICE);
+ msdu->len, DMA_TO_DEVICE);
ieee80211_free_txskb(ar->hw, msdu);
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 74adce1dd3a9..972d53d77654 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -6786,6 +6786,7 @@ struct wmi_peer_delete_resp_ev_arg {
struct wmi_mac_addr peer_addr;
};
+#define WMI_MGMT_RX_NUM_RSSI 4
struct wmi_mgmt_rx_ev_arg {
__le32 channel;
__le32 snr;
@@ -6794,6 +6795,7 @@ struct wmi_mgmt_rx_ev_arg {
__le32 buf_len;
__le32 status; /* %WMI_RX_STATUS_ */
struct wmi_mgmt_rx_ext_info ext_info;
+ __le32 rssi[WMI_MGMT_RX_NUM_RSSI];
};
struct wmi_ch_info_ev_arg {
diff --git a/drivers/net/wireless/ath/ath11k/Kconfig b/drivers/net/wireless/ath/ath11k/Kconfig
new file mode 100644
index 000000000000..c88e16d4022b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/Kconfig
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+config ATH11K
+ tristate "Qualcomm Technologies 802.11ax chipset support"
+ depends on MAC80211 && HAS_DMA
+ depends on REMOTEPROC
+ depends on ARCH_QCOM || COMPILE_TEST
+ select ATH_COMMON
+ select QCOM_QMI_HELPERS
+ ---help---
+ This module adds support for Qualcomm Technologies 802.11ax family of
+ chipsets.
+
+ If you choose to build a module, it'll be called ath11k.
+
+config ATH11K_DEBUG
+ bool "QCA ath11k debugging"
+ depends on ATH11K
+ ---help---
+ Enables debug support
+
+ If unsure, say Y to make it easier to debug problems.
+
+config ATH11K_DEBUGFS
+ bool "QCA ath11k debugfs support"
+ depends on ATH11K && DEBUG_FS && MAC80211_DEBUGFS
+ ---help---
+ Enable ath11k debugfs support
+
+ If unsure, say Y to make it easier to debug problems.
+
+config ATH11K_TRACING
+ bool "ath11k tracing support"
+ depends on ATH11K && EVENT_TRACING
+ ---help---
+ Select this to use ath11k tracing infrastructure.
diff --git a/drivers/net/wireless/ath/ath11k/Makefile b/drivers/net/wireless/ath/ath11k/Makefile
new file mode 100644
index 000000000000..2761d07d938e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/Makefile
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+obj-$(CONFIG_ATH11K) += ath11k.o
+ath11k-y += core.o \
+ hal.o \
+ hal_tx.o \
+ hal_rx.o \
+ ahb.o \
+ wmi.o \
+ mac.o \
+ reg.o \
+ htc.o \
+ qmi.o \
+ dp.o \
+ dp_tx.o \
+ dp_rx.o \
+ debug.o \
+ ce.o \
+ peer.o
+
+ath11k-$(CONFIG_ATH11K_DEBUGFS) += debug_htt_stats.o debugfs_sta.o
+ath11k-$(CONFIG_NL80211_TESTMODE) += testmode.o
+ath11k-$(CONFIG_ATH11K_TRACING) += trace.o
+
+# for tracing framework to find trace.h
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
new file mode 100644
index 000000000000..e7e3e64c07aa
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -0,0 +1,1003 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/dma-mapping.h>
+#include "ahb.h"
+#include "debug.h"
+#include <linux/remoteproc.h>
+
+static const struct of_device_id ath11k_ahb_of_match[] = {
+ /* TODO: Should we change the compatible string to something similar
+ * to one that ath10k uses?
+ */
+ { .compatible = "qcom,ipq8074-wifi",
+ .data = (void *)ATH11K_HW_IPQ8074,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, ath11k_ahb_of_match);
+
+/* Target firmware's Copy Engine configuration. */
+static const struct ce_pipe_config target_ce_config_wlan[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .pipenum = __cpu_to_le32(1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .pipenum = __cpu_to_le32(2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .pipenum = __cpu_to_le32(3),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(4),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(256),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE5: target->host Pktlog */
+ {
+ .pipenum = __cpu_to_le32(5),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(0),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE6: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = __cpu_to_le32(6),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(65535),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE7 used only by Host */
+ {
+ .pipenum = __cpu_to_le32(7),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE8 target->host used only by IPA */
+ {
+ .pipenum = __cpu_to_le32(8),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(65535),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE9 host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(9),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE10 target->host HTT */
+ {
+ .pipenum = __cpu_to_le32(10),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT_H2H),
+ .nentries = __cpu_to_le32(0),
+ .nbytes_max = __cpu_to_le32(0),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE11 Not used */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(0),
+ .nentries = __cpu_to_le32(0),
+ .nbytes_max = __cpu_to_le32(0),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+};
+
+/* Map from service/endpoint to Copy Engine.
+ * This table is derived from the CE_PCI TABLE, above.
+ * It is passed to the Target at startup for use by firmware.
+ */
+static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(7),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(9),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(0),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ { /* not used */
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(0),
+ },
+ { /* not used */
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(4),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_PKT_LOG),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(5),
+ },
+
+ /* (Additions here) */
+
+ { /* terminator entry */ }
+};
+
+#define ATH11K_IRQ_CE0_OFFSET 4
+
+static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
+ "misc-pulse1",
+ "misc-latch",
+ "sw-exception",
+ "watchdog",
+ "ce0",
+ "ce1",
+ "ce2",
+ "ce3",
+ "ce4",
+ "ce5",
+ "ce6",
+ "ce7",
+ "ce8",
+ "ce9",
+ "ce10",
+ "ce11",
+ "host2wbm-desc-feed",
+ "host2reo-re-injection",
+ "host2reo-command",
+ "host2rxdma-monitor-ring3",
+ "host2rxdma-monitor-ring2",
+ "host2rxdma-monitor-ring1",
+ "reo2ost-exception",
+ "wbm2host-rx-release",
+ "reo2host-status",
+ "reo2host-destination-ring4",
+ "reo2host-destination-ring3",
+ "reo2host-destination-ring2",
+ "reo2host-destination-ring1",
+ "rxdma2host-monitor-destination-mac3",
+ "rxdma2host-monitor-destination-mac2",
+ "rxdma2host-monitor-destination-mac1",
+ "ppdu-end-interrupts-mac3",
+ "ppdu-end-interrupts-mac2",
+ "ppdu-end-interrupts-mac1",
+ "rxdma2host-monitor-status-ring-mac3",
+ "rxdma2host-monitor-status-ring-mac2",
+ "rxdma2host-monitor-status-ring-mac1",
+ "host2rxdma-host-buf-ring-mac3",
+ "host2rxdma-host-buf-ring-mac2",
+ "host2rxdma-host-buf-ring-mac1",
+ "rxdma2host-destination-ring-mac3",
+ "rxdma2host-destination-ring-mac2",
+ "rxdma2host-destination-ring-mac1",
+ "host2tcl-input-ring4",
+ "host2tcl-input-ring3",
+ "host2tcl-input-ring2",
+ "host2tcl-input-ring1",
+ "wbm2host-tx-completions-ring3",
+ "wbm2host-tx-completions-ring2",
+ "wbm2host-tx-completions-ring1",
+ "tcl2host-status-ring",
+};
+
+#define ATH11K_TX_RING_MASK_0 0x1
+#define ATH11K_TX_RING_MASK_1 0x2
+#define ATH11K_TX_RING_MASK_2 0x4
+
+#define ATH11K_RX_RING_MASK_0 0x1
+#define ATH11K_RX_RING_MASK_1 0x2
+#define ATH11K_RX_RING_MASK_2 0x4
+#define ATH11K_RX_RING_MASK_3 0x8
+
+#define ATH11K_RX_ERR_RING_MASK_0 0x1
+
+#define ATH11K_RX_WBM_REL_RING_MASK_0 0x1
+
+#define ATH11K_REO_STATUS_RING_MASK_0 0x1
+
+#define ATH11K_RXDMA2HOST_RING_MASK_0 0x1
+#define ATH11K_RXDMA2HOST_RING_MASK_1 0x2
+#define ATH11K_RXDMA2HOST_RING_MASK_2 0x4
+
+#define ATH11K_HOST2RXDMA_RING_MASK_0 0x1
+#define ATH11K_HOST2RXDMA_RING_MASK_1 0x2
+#define ATH11K_HOST2RXDMA_RING_MASK_2 0x4
+
+#define ATH11K_RX_MON_STATUS_RING_MASK_0 0x1
+#define ATH11K_RX_MON_STATUS_RING_MASK_1 0x2
+#define ATH11K_RX_MON_STATUS_RING_MASK_2 0x4
+
+const u8 ath11k_tx_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
+ ATH11K_TX_RING_MASK_0,
+ ATH11K_TX_RING_MASK_1,
+ ATH11K_TX_RING_MASK_2,
+};
+
+const u8 rx_mon_status_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
+ 0, 0, 0, 0,
+ ATH11K_RX_MON_STATUS_RING_MASK_0,
+ ATH11K_RX_MON_STATUS_RING_MASK_1,
+ ATH11K_RX_MON_STATUS_RING_MASK_2,
+};
+
+const u8 ath11k_rx_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
+ 0, 0, 0, 0, 0, 0, 0,
+ ATH11K_RX_RING_MASK_0,
+ ATH11K_RX_RING_MASK_1,
+ ATH11K_RX_RING_MASK_2,
+ ATH11K_RX_RING_MASK_3,
+};
+
+const u8 ath11k_rx_err_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
+ ATH11K_RX_ERR_RING_MASK_0,
+};
+
+const u8 ath11k_rx_wbm_rel_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
+ ATH11K_RX_WBM_REL_RING_MASK_0,
+};
+
+const u8 ath11k_reo_status_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
+ ATH11K_REO_STATUS_RING_MASK_0,
+};
+
+const u8 ath11k_rxdma2host_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
+ ATH11K_RXDMA2HOST_RING_MASK_0,
+ ATH11K_RXDMA2HOST_RING_MASK_1,
+ ATH11K_RXDMA2HOST_RING_MASK_2,
+};
+
+const u8 ath11k_host2rxdma_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
+ ATH11K_HOST2RXDMA_RING_MASK_0,
+ ATH11K_HOST2RXDMA_RING_MASK_1,
+ ATH11K_HOST2RXDMA_RING_MASK_2,
+};
+
+/* enum ext_irq_num - irq numbers that can be used by external modules
+ * like datapath
+ */
+enum ext_irq_num {
+ host2wbm_desc_feed = 16,
+ host2reo_re_injection,
+ host2reo_command,
+ host2rxdma_monitor_ring3,
+ host2rxdma_monitor_ring2,
+ host2rxdma_monitor_ring1,
+ reo2host_exception,
+ wbm2host_rx_release,
+ reo2host_status,
+ reo2host_destination_ring4,
+ reo2host_destination_ring3,
+ reo2host_destination_ring2,
+ reo2host_destination_ring1,
+ rxdma2host_monitor_destination_mac3,
+ rxdma2host_monitor_destination_mac2,
+ rxdma2host_monitor_destination_mac1,
+ ppdu_end_interrupts_mac3,
+ ppdu_end_interrupts_mac2,
+ ppdu_end_interrupts_mac1,
+ rxdma2host_monitor_status_ring_mac3,
+ rxdma2host_monitor_status_ring_mac2,
+ rxdma2host_monitor_status_ring_mac1,
+ host2rxdma_host_buf_ring_mac3,
+ host2rxdma_host_buf_ring_mac2,
+ host2rxdma_host_buf_ring_mac1,
+ rxdma2host_destination_ring_mac3,
+ rxdma2host_destination_ring_mac2,
+ rxdma2host_destination_ring_mac1,
+ host2tcl_input_ring4,
+ host2tcl_input_ring3,
+ host2tcl_input_ring2,
+ host2tcl_input_ring1,
+ wbm2host_tx_completions_ring3,
+ wbm2host_tx_completions_ring2,
+ wbm2host_tx_completions_ring1,
+ tcl2host_status_ring,
+};
+
+static void ath11k_ahb_kill_tasklets(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
+
+ if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ tasklet_kill(&ce_pipe->intr_tq);
+ }
+}
+
+static void ath11k_ahb_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
+{
+ int i;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+static void __ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
+{
+ struct sk_buff *skb;
+ int i;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ ath11k_ahb_ext_grp_disable(irq_grp);
+
+ napi_synchronize(&irq_grp->napi);
+ napi_disable(&irq_grp->napi);
+
+ while ((skb = __skb_dequeue(&irq_grp->pending_q)))
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static void ath11k_ahb_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
+{
+ int i;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+static void ath11k_ahb_setbit32(struct ath11k_base *ab, u8 bit, u32 offset)
+{
+ u32 val;
+
+ val = ath11k_ahb_read32(ab, offset);
+ ath11k_ahb_write32(ab, offset, val | BIT(bit));
+}
+
+static void ath11k_ahb_clearbit32(struct ath11k_base *ab, u8 bit, u32 offset)
+{
+ u32 val;
+
+ val = ath11k_ahb_read32(ab, offset);
+ ath11k_ahb_write32(ab, offset, val & ~BIT(bit));
+}
+
+static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
+{
+ const struct ce_pipe_config *ce_config;
+
+ ce_config = &target_ce_config_wlan[ce_id];
+ if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_OUT)
+ ath11k_ahb_setbit32(ab, ce_id, CE_HOST_IE_ADDRESS);
+
+ if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_IN) {
+ ath11k_ahb_setbit32(ab, ce_id, CE_HOST_IE_2_ADDRESS);
+ ath11k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
+ CE_HOST_IE_3_ADDRESS);
+ }
+}
+
+static void ath11k_ahb_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
+{
+ const struct ce_pipe_config *ce_config;
+
+ ce_config = &target_ce_config_wlan[ce_id];
+ if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_OUT)
+ ath11k_ahb_clearbit32(ab, ce_id, CE_HOST_IE_ADDRESS);
+
+ if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_IN) {
+ ath11k_ahb_clearbit32(ab, ce_id, CE_HOST_IE_2_ADDRESS);
+ ath11k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
+ CE_HOST_IE_3_ADDRESS);
+ }
+}
+
+static void ath11k_ahb_sync_ce_irqs(struct ath11k_base *ab)
+{
+ int i;
+ int irq_idx;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
+ synchronize_irq(ab->irq_num[irq_idx]);
+ }
+}
+
+static void ath11k_ahb_sync_ext_irqs(struct ath11k_base *ab)
+{
+ int i, j;
+ int irq_idx;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ for (j = 0; j < irq_grp->num_irq; j++) {
+ irq_idx = irq_grp->irqs[j];
+ synchronize_irq(ab->irq_num[irq_idx]);
+ }
+ }
+}
+
+static void ath11k_ahb_ce_irqs_enable(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR)
+ continue;
+ ath11k_ahb_ce_irq_enable(ab, i);
+ }
+}
+
+static void ath11k_ahb_ce_irqs_disable(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR)
+ continue;
+ ath11k_ahb_ce_irq_disable(ab, i);
+ }
+}
+
+int ath11k_ahb_start(struct ath11k_base *ab)
+{
+ ath11k_ahb_ce_irqs_enable(ab);
+ ath11k_ce_rx_post_buf(ab);
+
+ return 0;
+}
+
+void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ napi_enable(&irq_grp->napi);
+ ath11k_ahb_ext_grp_enable(irq_grp);
+ }
+}
+
+void ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
+{
+ __ath11k_ahb_ext_irq_disable(ab);
+ ath11k_ahb_sync_ext_irqs(ab);
+}
+
+void ath11k_ahb_stop(struct ath11k_base *ab)
+{
+ if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
+ ath11k_ahb_ce_irqs_disable(ab);
+ ath11k_ahb_sync_ce_irqs(ab);
+ ath11k_ahb_kill_tasklets(ab);
+ del_timer_sync(&ab->rx_replenish_retry);
+ ath11k_ce_cleanup_pipes(ab);
+}
+
+int ath11k_ahb_power_up(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = rproc_boot(ab->tgt_rproc);
+ if (ret)
+ ath11k_err(ab, "failed to boot the remote processor Q6\n");
+
+ return ret;
+}
+
+void ath11k_ahb_power_down(struct ath11k_base *ab)
+{
+ rproc_shutdown(ab->tgt_rproc);
+}
+
+static void ath11k_ahb_init_qmi_ce_config(struct ath11k_base *ab)
+{
+ struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
+
+ cfg->tgt_ce_len = ARRAY_SIZE(target_ce_config_wlan) - 1;
+ cfg->tgt_ce = target_ce_config_wlan;
+ cfg->svc_to_ce_map_len = ARRAY_SIZE(target_service_to_ce_map_wlan);
+ cfg->svc_to_ce_map = target_service_to_ce_map_wlan;
+}
+
+static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab)
+{
+ int i, j;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ for (j = 0; j < irq_grp->num_irq; j++)
+ free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
+ }
+}
+
+static void ath11k_ahb_free_irq(struct ath11k_base *ab)
+{
+ int irq_idx;
+ int i;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR)
+ continue;
+ irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
+ free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
+ }
+
+ ath11k_ahb_free_ext_irq(ab);
+}
+
+static void ath11k_ahb_ce_tasklet(unsigned long data)
+{
+ struct ath11k_ce_pipe *ce_pipe = (struct ath11k_ce_pipe *)data;
+
+ ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
+
+ ath11k_ahb_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
+}
+
+static irqreturn_t ath11k_ahb_ce_interrupt_handler(int irq, void *arg)
+{
+ struct ath11k_ce_pipe *ce_pipe = arg;
+
+ ath11k_ahb_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
+
+ tasklet_schedule(&ce_pipe->intr_tq);
+
+ return IRQ_HANDLED;
+}
+
+static int ath11k_ahb_ext_grp_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
+ struct ath11k_ext_irq_grp,
+ napi);
+ struct ath11k_base *ab = irq_grp->ab;
+ int work_done;
+
+ work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ ath11k_ahb_ext_grp_enable(irq_grp);
+ }
+
+ if (work_done > budget)
+ work_done = budget;
+
+ return work_done;
+}
+
+static irqreturn_t ath11k_ahb_ext_interrupt_handler(int irq, void *arg)
+{
+ struct ath11k_ext_irq_grp *irq_grp = arg;
+
+ ath11k_ahb_ext_grp_disable(irq_grp);
+
+ napi_schedule(&irq_grp->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int ath11k_ahb_ext_irq_config(struct ath11k_base *ab)
+{
+ int i, j;
+ int irq;
+ int ret;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+ u32 num_irq = 0;
+
+ irq_grp->ab = ab;
+ irq_grp->grp_id = i;
+ init_dummy_netdev(&irq_grp->napi_ndev);
+ netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
+ ath11k_ahb_ext_grp_napi_poll, NAPI_POLL_WEIGHT);
+ __skb_queue_head_init(&irq_grp->pending_q);
+
+ for (j = 0; j < ATH11K_EXT_IRQ_NUM_MAX; j++) {
+ if (ath11k_tx_ring_mask[i] & BIT(j)) {
+ irq_grp->irqs[num_irq++] =
+ wbm2host_tx_completions_ring1 - j;
+ }
+
+ if (ath11k_rx_ring_mask[i] & BIT(j)) {
+ irq_grp->irqs[num_irq++] =
+ reo2host_destination_ring1 - j;
+ }
+
+ if (ath11k_rx_err_ring_mask[i] & BIT(j))
+ irq_grp->irqs[num_irq++] = reo2host_exception;
+
+ if (ath11k_rx_wbm_rel_ring_mask[i] & BIT(j))
+ irq_grp->irqs[num_irq++] = wbm2host_rx_release;
+
+ if (ath11k_reo_status_ring_mask[i] & BIT(j))
+ irq_grp->irqs[num_irq++] = reo2host_status;
+
+ if (j < MAX_RADIOS) {
+ if (ath11k_rxdma2host_ring_mask[i] & BIT(j)) {
+ irq_grp->irqs[num_irq++] =
+ rxdma2host_destination_ring_mac1
+ - ath11k_core_get_hw_mac_id(ab, j);
+ }
+
+ if (ath11k_host2rxdma_ring_mask[i] & BIT(j)) {
+ irq_grp->irqs[num_irq++] =
+ host2rxdma_host_buf_ring_mac1
+ - ath11k_core_get_hw_mac_id(ab, j);
+ }
+
+ if (rx_mon_status_ring_mask[i] & BIT(j)) {
+ irq_grp->irqs[num_irq++] =
+ ppdu_end_interrupts_mac1 -
+ ath11k_core_get_hw_mac_id(ab, j);
+ irq_grp->irqs[num_irq++] =
+ rxdma2host_monitor_status_ring_mac1 -
+ ath11k_core_get_hw_mac_id(ab, j);
+ }
+ }
+ }
+ irq_grp->num_irq = num_irq;
+
+ for (j = 0; j < irq_grp->num_irq; j++) {
+ int irq_idx = irq_grp->irqs[j];
+
+ irq = platform_get_irq_byname(ab->pdev,
+ irq_name[irq_idx]);
+ ab->irq_num[irq_idx] = irq;
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
+ ret = request_irq(irq, ath11k_ahb_ext_interrupt_handler,
+ IRQF_TRIGGER_RISING,
+ irq_name[irq_idx], irq_grp);
+ if (ret) {
+ ath11k_err(ab, "failed request_irq for %d\n",
+ irq);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_ahb_config_irq(struct ath11k_base *ab)
+{
+ int irq, irq_idx, i;
+ int ret;
+
+ /* Configure CE irqs */
+ for (i = 0; i < CE_COUNT; i++) {
+ struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
+
+ if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
+
+ tasklet_init(&ce_pipe->intr_tq, ath11k_ahb_ce_tasklet,
+ (unsigned long)ce_pipe);
+ irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]);
+ ret = request_irq(irq, ath11k_ahb_ce_interrupt_handler,
+ IRQF_TRIGGER_RISING, irq_name[irq_idx],
+ ce_pipe);
+ if (ret)
+ return ret;
+
+ ab->irq_num[irq_idx] = irq;
+ }
+
+ /* Configure external interrupts */
+ ret = ath11k_ahb_ext_irq_config(ab);
+
+ return ret;
+}
+
+int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ const struct service_to_pipe *entry;
+ bool ul_set = false, dl_set = false;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
+ entry = &target_service_to_ce_map_wlan[i];
+
+ if (__le32_to_cpu(entry->service_id) != service_id)
+ continue;
+
+ switch (__le32_to_cpu(entry->pipedir)) {
+ case PIPEDIR_NONE:
+ break;
+ case PIPEDIR_IN:
+ WARN_ON(dl_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ break;
+ case PIPEDIR_OUT:
+ WARN_ON(ul_set);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ ul_set = true;
+ break;
+ case PIPEDIR_INOUT:
+ WARN_ON(dl_set);
+ WARN_ON(ul_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ ul_set = true;
+ break;
+ }
+ }
+
+ if (WARN_ON(!ul_set || !dl_set))
+ return -ENOENT;
+
+ return 0;
+}
+
+static int ath11k_ahb_probe(struct platform_device *pdev)
+{
+ struct ath11k_base *ab;
+ const struct of_device_id *of_id;
+ struct resource *mem_res;
+ void __iomem *mem;
+ int ret;
+
+ of_id = of_match_device(ath11k_ahb_of_match, &pdev->dev);
+ if (!of_id) {
+ dev_err(&pdev->dev, "failed to find matching device tree id\n");
+ return -EINVAL;
+ }
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem_res) {
+ dev_err(&pdev->dev, "failed to get IO memory resource\n");
+ return -ENXIO;
+ }
+
+ mem = devm_ioremap_resource(&pdev->dev, mem_res);
+ if (IS_ERR(mem)) {
+ dev_err(&pdev->dev, "ioremap error\n");
+ return PTR_ERR(mem);
+ }
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "failed to set 32-bit consistent dma\n");
+ return ret;
+ }
+
+ ab = ath11k_core_alloc(&pdev->dev);
+ if (!ab) {
+ dev_err(&pdev->dev, "failed to allocate ath11k base\n");
+ return -ENOMEM;
+ }
+
+ ab->pdev = pdev;
+ ab->hw_rev = (enum ath11k_hw_rev)of_id->data;
+ ab->mem = mem;
+ ab->mem_len = resource_size(mem_res);
+ platform_set_drvdata(pdev, ab);
+
+ ret = ath11k_hal_srng_init(ab);
+ if (ret)
+ goto err_core_free;
+
+ ret = ath11k_ce_alloc_pipes(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret);
+ goto err_hal_srng_deinit;
+ }
+
+ ath11k_ahb_init_qmi_ce_config(ab);
+
+ ret = ath11k_ahb_config_irq(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to configure irq: %d\n", ret);
+ goto err_ce_free;
+ }
+
+ ret = ath11k_core_init(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to init core: %d\n", ret);
+ goto err_ce_free;
+ }
+
+ return 0;
+
+err_ce_free:
+ ath11k_ce_free_pipes(ab);
+
+err_hal_srng_deinit:
+ ath11k_hal_srng_deinit(ab);
+
+err_core_free:
+ ath11k_core_free(ab);
+ platform_set_drvdata(pdev, NULL);
+
+ return ret;
+}
+
+static int ath11k_ahb_remove(struct platform_device *pdev)
+{
+ struct ath11k_base *ab = platform_get_drvdata(pdev);
+
+ reinit_completion(&ab->driver_recovery);
+
+ if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags))
+ wait_for_completion_timeout(&ab->driver_recovery,
+ ATH11K_AHB_RECOVERY_TIMEOUT);
+
+ set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
+ cancel_work_sync(&ab->restart_work);
+
+ ath11k_core_deinit(ab);
+ ath11k_ahb_free_irq(ab);
+
+ ath11k_hal_srng_deinit(ab);
+ ath11k_ce_free_pipes(ab);
+ ath11k_core_free(ab);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver ath11k_ahb_driver = {
+ .driver = {
+ .name = "ath11k",
+ .of_match_table = ath11k_ahb_of_match,
+ },
+ .probe = ath11k_ahb_probe,
+ .remove = ath11k_ahb_remove,
+};
+
+int ath11k_ahb_init(void)
+{
+ return platform_driver_register(&ath11k_ahb_driver);
+}
+
+void ath11k_ahb_exit(void)
+{
+ platform_driver_unregister(&ath11k_ahb_driver);
+}
diff --git a/drivers/net/wireless/ath/ath11k/ahb.h b/drivers/net/wireless/ath/ath11k/ahb.h
new file mode 100644
index 000000000000..93f46dfe22df
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/ahb.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+#ifndef ATH11K_AHB_H
+#define ATH11K_AHB_H
+
+#include "core.h"
+
+#define ATH11K_AHB_RECOVERY_TIMEOUT (3 * HZ)
+struct ath11k_base;
+
+static inline u32 ath11k_ahb_read32(struct ath11k_base *ab, u32 offset)
+{
+ return ioread32(ab->mem + offset);
+}
+
+static inline void ath11k_ahb_write32(struct ath11k_base *ab, u32 offset, u32 value)
+{
+ iowrite32(value, ab->mem + offset);
+}
+
+void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab);
+void ath11k_ahb_ext_irq_disable(struct ath11k_base *ab);
+int ath11k_ahb_start(struct ath11k_base *ab);
+void ath11k_ahb_stop(struct ath11k_base *ab);
+int ath11k_ahb_power_up(struct ath11k_base *ab);
+void ath11k_ahb_power_down(struct ath11k_base *ab);
+int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe);
+
+int ath11k_ahb_init(void);
+void ath11k_ahb_exit(void);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c
new file mode 100644
index 000000000000..cdd40c8fc867
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/ce.c
@@ -0,0 +1,808 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include "dp_rx.h"
+#include "debug.h"
+
+static const struct ce_attr host_ce_config_wlan[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 16,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_htc_rx_completion_handler,
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_htc_rx_completion_handler,
+ },
+
+ /* CE3: host->target WMI (mac0) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 2048,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ },
+
+ /* CE5: target->host pktlog */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
+ },
+
+ /* CE6: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE7: host->target WMI (mac1) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE8: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE9: host->target WMI (mac2) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE10: target->host HTT */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_htc_rx_completion_handler,
+ },
+
+ /* CE11: Not used */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+};
+
+static int ath11k_ce_rx_buf_enqueue_pipe(struct ath11k_ce_pipe *pipe,
+ struct sk_buff *skb, dma_addr_t paddr)
+{
+ struct ath11k_base *ab = pipe->ab;
+ struct ath11k_ce_ring *ring = pipe->dest_ring;
+ struct hal_srng *srng;
+ unsigned int write_index;
+ unsigned int nentries_mask = ring->nentries_mask;
+ u32 *desc;
+ int ret;
+
+ lockdep_assert_held(&ab->ce.ce_lock);
+
+ write_index = ring->write_index;
+
+ srng = &ab->hal.srng_list[ring->hal_ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
+ ret = -ENOSPC;
+ goto exit;
+ }
+
+ desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc) {
+ ret = -ENOSPC;
+ goto exit;
+ }
+
+ ath11k_hal_ce_dst_set_desc(desc, paddr);
+
+ ring->skb[write_index] = skb;
+ write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+ ring->write_index = write_index;
+
+ pipe->rx_buf_needed--;
+
+ ret = 0;
+exit:
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return ret;
+}
+
+static int ath11k_ce_rx_post_pipe(struct ath11k_ce_pipe *pipe)
+{
+ struct ath11k_base *ab = pipe->ab;
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+ int ret = 0;
+
+ if (!(pipe->dest_ring || pipe->status_ring))
+ return 0;
+
+ spin_lock_bh(&ab->ce.ce_lock);
+ while (pipe->rx_buf_needed) {
+ skb = dev_alloc_skb(pipe->buf_sz);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4));
+
+ paddr = dma_map_single(ab->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ab->dev, paddr))) {
+ ath11k_warn(ab, "failed to dma map ce rx buf\n");
+ dev_kfree_skb_any(skb);
+ ret = -EIO;
+ goto exit;
+ }
+
+ ATH11K_SKB_RXCB(skb)->paddr = paddr;
+
+ ret = ath11k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr);
+
+ if (ret) {
+ ath11k_warn(ab, "failed to enqueue rx buf: %d\n", ret);
+ dma_unmap_single(ab->dev, paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ goto exit;
+ }
+ }
+
+exit:
+ spin_unlock_bh(&ab->ce.ce_lock);
+ return ret;
+}
+
+static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe *pipe,
+ struct sk_buff **skb, int *nbytes)
+{
+ struct ath11k_base *ab = pipe->ab;
+ struct hal_srng *srng;
+ unsigned int sw_index;
+ unsigned int nentries_mask;
+ u32 *desc;
+ int ret = 0;
+
+ spin_lock_bh(&ab->ce.ce_lock);
+
+ sw_index = pipe->dest_ring->sw_index;
+ nentries_mask = pipe->dest_ring->nentries_mask;
+
+ srng = &ab->hal.srng_list[pipe->status_ring->hal_ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
+ if (!desc) {
+ ret = -EIO;
+ goto err;
+ }
+
+ *nbytes = ath11k_hal_ce_dst_status_get_length(desc);
+ if (*nbytes == 0) {
+ ret = -EIO;
+ goto err;
+ }
+
+ *skb = pipe->dest_ring->skb[sw_index];
+ pipe->dest_ring->skb[sw_index] = NULL;
+
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ pipe->dest_ring->sw_index = sw_index;
+
+ pipe->rx_buf_needed++;
+err:
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ spin_unlock_bh(&ab->ce.ce_lock);
+
+ return ret;
+}
+
+static void ath11k_ce_recv_process_cb(struct ath11k_ce_pipe *pipe)
+{
+ struct ath11k_base *ab = pipe->ab;
+ struct sk_buff *skb;
+ struct sk_buff_head list;
+ unsigned int nbytes, max_nbytes;
+ int ret;
+
+ __skb_queue_head_init(&list);
+ while (ath11k_ce_completed_recv_next(pipe, &skb, &nbytes) == 0) {
+ max_nbytes = skb->len + skb_tailroom(skb);
+ dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
+ max_nbytes, DMA_FROM_DEVICE);
+
+ if (unlikely(max_nbytes < nbytes)) {
+ ath11k_warn(ab, "rxed more than expected (nbytes %d, max %d)",
+ nbytes, max_nbytes);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ skb_put(skb, nbytes);
+ __skb_queue_tail(&list, skb);
+ }
+
+ while ((skb = __skb_dequeue(&list))) {
+ ath11k_dbg(ab, ATH11K_DBG_AHB, "rx ce pipe %d len %d\n",
+ pipe->pipe_num, skb->len);
+ pipe->recv_cb(ab, skb);
+ }
+
+ ret = ath11k_ce_rx_post_pipe(pipe);
+ if (ret && ret != -ENOSPC) {
+ ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
+ pipe->pipe_num, ret);
+ mod_timer(&ab->rx_replenish_retry,
+ jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
+ }
+}
+
+static struct sk_buff *ath11k_ce_completed_send_next(struct ath11k_ce_pipe *pipe)
+{
+ struct ath11k_base *ab = pipe->ab;
+ struct hal_srng *srng;
+ unsigned int sw_index;
+ unsigned int nentries_mask;
+ struct sk_buff *skb;
+ u32 *desc;
+
+ spin_lock_bh(&ab->ce.ce_lock);
+
+ sw_index = pipe->src_ring->sw_index;
+ nentries_mask = pipe->src_ring->nentries_mask;
+
+ srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ desc = ath11k_hal_srng_src_reap_next(ab, srng);
+ if (!desc) {
+ skb = ERR_PTR(-EIO);
+ goto err_unlock;
+ }
+
+ skb = pipe->src_ring->skb[sw_index];
+
+ pipe->src_ring->skb[sw_index] = NULL;
+
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ pipe->src_ring->sw_index = sw_index;
+
+err_unlock:
+ spin_unlock_bh(&srng->lock);
+
+ spin_unlock_bh(&ab->ce.ce_lock);
+
+ return skb;
+}
+
+static void ath11k_ce_send_done_cb(struct ath11k_ce_pipe *pipe)
+{
+ struct ath11k_base *ab = pipe->ab;
+ struct sk_buff *skb;
+
+ while (!IS_ERR(skb = ath11k_ce_completed_send_next(pipe))) {
+ if (!skb)
+ continue;
+
+ dma_unmap_single(ab->dev, ATH11K_SKB_CB(skb)->paddr, skb->len,
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static int ath11k_ce_init_ring(struct ath11k_base *ab,
+ struct ath11k_ce_ring *ce_ring,
+ int ce_id, enum hal_ring_type type)
+{
+ struct hal_srng_params params = { 0 };
+ int ret;
+
+ params.ring_base_paddr = ce_ring->base_addr_ce_space;
+ params.ring_base_vaddr = ce_ring->base_addr_owner_space;
+ params.num_entries = ce_ring->nentries;
+
+ switch (type) {
+ case HAL_CE_SRC:
+ if (!(CE_ATTR_DIS_INTR & host_ce_config_wlan[ce_id].flags))
+ params.intr_batch_cntr_thres_entries = 1;
+ break;
+ case HAL_CE_DST:
+ params.max_buffer_len = host_ce_config_wlan[ce_id].src_sz_max;
+ if (!(host_ce_config_wlan[ce_id].flags & CE_ATTR_DIS_INTR)) {
+ params.intr_timer_thres_us = 1024;
+ params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
+ params.low_threshold = ce_ring->nentries - 3;
+ }
+ break;
+ case HAL_CE_DST_STATUS:
+ if (!(host_ce_config_wlan[ce_id].flags & CE_ATTR_DIS_INTR)) {
+ params.intr_batch_cntr_thres_entries = 1;
+ params.intr_timer_thres_us = 0x1000;
+ }
+ break;
+ default:
+ ath11k_warn(ab, "Invalid CE ring type %d\n", type);
+ return -EINVAL;
+ }
+
+ /* TODO: Init other params needed by HAL to init the ring */
+
+ ret = ath11k_hal_srng_setup(ab, type, ce_id, 0, &params);
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
+ ret, ce_id);
+ return ret;
+ }
+ ce_ring->hal_ring_id = ret;
+
+ return 0;
+}
+
+static struct ath11k_ce_ring *
+ath11k_ce_alloc_ring(struct ath11k_base *ab, int nentries, int desc_sz)
+{
+ struct ath11k_ce_ring *ce_ring;
+ dma_addr_t base_addr;
+
+ ce_ring = kzalloc(struct_size(ce_ring, skb, nentries), GFP_KERNEL);
+ if (ce_ring == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ ce_ring->nentries = nentries;
+ ce_ring->nentries_mask = nentries - 1;
+
+ /* Legacy platforms that do not support cache
+ * coherent DMA are unsupported
+ */
+ ce_ring->base_addr_owner_space_unaligned =
+ dma_alloc_coherent(ab->dev,
+ nentries * desc_sz + CE_DESC_RING_ALIGN,
+ &base_addr, GFP_KERNEL);
+ if (!ce_ring->base_addr_owner_space_unaligned) {
+ kfree(ce_ring);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ce_ring->base_addr_ce_space_unaligned = base_addr;
+
+ ce_ring->base_addr_owner_space = PTR_ALIGN(
+ ce_ring->base_addr_owner_space_unaligned,
+ CE_DESC_RING_ALIGN);
+ ce_ring->base_addr_ce_space = ALIGN(
+ ce_ring->base_addr_ce_space_unaligned,
+ CE_DESC_RING_ALIGN);
+
+ return ce_ring;
+}
+
+static int ath11k_ce_alloc_pipe(struct ath11k_base *ab, int ce_id)
+{
+ struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
+ const struct ce_attr *attr = &host_ce_config_wlan[ce_id];
+ struct ath11k_ce_ring *ring;
+ int nentries;
+ int desc_sz;
+
+ pipe->attr_flags = attr->flags;
+
+ if (attr->src_nentries) {
+ pipe->send_cb = ath11k_ce_send_done_cb;
+ nentries = roundup_pow_of_two(attr->src_nentries);
+ desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
+ ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
+ if (IS_ERR(ring))
+ return PTR_ERR(ring);
+ pipe->src_ring = ring;
+ }
+
+ if (attr->dest_nentries) {
+ pipe->recv_cb = attr->recv_cb;
+ nentries = roundup_pow_of_two(attr->dest_nentries);
+ desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
+ ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
+ if (IS_ERR(ring))
+ return PTR_ERR(ring);
+ pipe->dest_ring = ring;
+
+ desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
+ ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
+ if (IS_ERR(ring))
+ return PTR_ERR(ring);
+ pipe->status_ring = ring;
+ }
+
+ return 0;
+}
+
+void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id)
+{
+ struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
+
+ if (pipe->send_cb)
+ pipe->send_cb(pipe);
+
+ if (pipe->recv_cb)
+ ath11k_ce_recv_process_cb(pipe);
+}
+
+void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id)
+{
+ struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
+
+ if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && pipe->send_cb)
+ pipe->send_cb(pipe);
+}
+
+int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id,
+ u16 transfer_id)
+{
+ struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
+ struct hal_srng *srng;
+ u32 *desc;
+ unsigned int write_index, sw_index;
+ unsigned int nentries_mask;
+ int ret = 0;
+ u8 byte_swap_data = 0;
+ int num_used;
+
+ /* Check if some entries could be regained by handling tx completion if
+ * the CE has interrupts disabled and the used entries is more than the
+ * defined usage threshold.
+ */
+ if (pipe->attr_flags & CE_ATTR_DIS_INTR) {
+ spin_lock_bh(&ab->ce.ce_lock);
+ write_index = pipe->src_ring->write_index;
+
+ sw_index = pipe->src_ring->sw_index;
+
+ if (write_index >= sw_index)
+ num_used = write_index - sw_index;
+ else
+ num_used = pipe->src_ring->nentries - sw_index +
+ write_index;
+
+ spin_unlock_bh(&ab->ce.ce_lock);
+
+ if (num_used > ATH11K_CE_USAGE_THRESHOLD)
+ ath11k_ce_poll_send_completed(ab, pipe->pipe_num);
+ }
+
+ if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
+ return -ESHUTDOWN;
+
+ spin_lock_bh(&ab->ce.ce_lock);
+
+ write_index = pipe->src_ring->write_index;
+ nentries_mask = pipe->src_ring->nentries_mask;
+
+ srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
+ ath11k_hal_srng_access_end(ab, srng);
+ ret = -ENOBUFS;
+ goto err_unlock;
+ }
+
+ desc = ath11k_hal_srng_src_get_next_reaped(ab, srng);
+ if (!desc) {
+ ath11k_hal_srng_access_end(ab, srng);
+ ret = -ENOBUFS;
+ goto err_unlock;
+ }
+
+ if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
+ byte_swap_data = 1;
+
+ ath11k_hal_ce_src_set_desc(desc, ATH11K_SKB_CB(skb)->paddr,
+ skb->len, transfer_id, byte_swap_data);
+
+ pipe->src_ring->skb[write_index] = skb;
+ pipe->src_ring->write_index = CE_RING_IDX_INCR(nentries_mask,
+ write_index);
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ spin_unlock_bh(&ab->ce.ce_lock);
+
+ return 0;
+
+err_unlock:
+ spin_unlock_bh(&srng->lock);
+
+ spin_unlock_bh(&ab->ce.ce_lock);
+
+ return ret;
+}
+
+static void ath11k_ce_rx_pipe_cleanup(struct ath11k_ce_pipe *pipe)
+{
+ struct ath11k_base *ab = pipe->ab;
+ struct ath11k_ce_ring *ring = pipe->dest_ring;
+ struct sk_buff *skb;
+ int i;
+
+ if (!(ring && pipe->buf_sz))
+ return;
+
+ for (i = 0; i < ring->nentries; i++) {
+ skb = ring->skb[i];
+ if (!skb)
+ continue;
+
+ ring->skb[i] = NULL;
+ dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+void ath11k_ce_cleanup_pipes(struct ath11k_base *ab)
+{
+ struct ath11k_ce_pipe *pipe;
+ int pipe_num;
+
+ for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
+ pipe = &ab->ce.ce_pipe[pipe_num];
+ ath11k_ce_rx_pipe_cleanup(pipe);
+
+ /* Cleanup any src CE's which have interrupts disabled */
+ ath11k_ce_poll_send_completed(ab, pipe_num);
+
+ /* NOTE: Should we also clean up tx buffer in all pipes? */
+ }
+}
+
+void ath11k_ce_rx_post_buf(struct ath11k_base *ab)
+{
+ struct ath11k_ce_pipe *pipe;
+ int i;
+ int ret;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ pipe = &ab->ce.ce_pipe[i];
+ ret = ath11k_ce_rx_post_pipe(pipe);
+ if (ret) {
+ if (ret == -ENOSPC)
+ continue;
+
+ ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
+ i, ret);
+ mod_timer(&ab->rx_replenish_retry,
+ jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
+
+ return;
+ }
+ }
+}
+
+void ath11k_ce_rx_replenish_retry(struct timer_list *t)
+{
+ struct ath11k_base *ab = from_timer(ab, t, rx_replenish_retry);
+
+ ath11k_ce_rx_post_buf(ab);
+}
+
+int ath11k_ce_init_pipes(struct ath11k_base *ab)
+{
+ struct ath11k_ce_pipe *pipe;
+ int i;
+ int ret;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ pipe = &ab->ce.ce_pipe[i];
+
+ if (pipe->src_ring) {
+ ret = ath11k_ce_init_ring(ab, pipe->src_ring, i,
+ HAL_CE_SRC);
+ if (ret) {
+ ath11k_warn(ab, "failed to init src ring: %d\n",
+ ret);
+ /* Should we clear any partial init */
+ return ret;
+ }
+
+ pipe->src_ring->write_index = 0;
+ pipe->src_ring->sw_index = 0;
+ }
+
+ if (pipe->dest_ring) {
+ ret = ath11k_ce_init_ring(ab, pipe->dest_ring, i,
+ HAL_CE_DST);
+ if (ret) {
+ ath11k_warn(ab, "failed to init dest ring: %d\n",
+ ret);
+ /* Should we clear any partial init */
+ return ret;
+ }
+
+ pipe->rx_buf_needed = pipe->dest_ring->nentries ?
+ pipe->dest_ring->nentries - 2 : 0;
+
+ pipe->dest_ring->write_index = 0;
+ pipe->dest_ring->sw_index = 0;
+ }
+
+ if (pipe->status_ring) {
+ ret = ath11k_ce_init_ring(ab, pipe->status_ring, i,
+ HAL_CE_DST_STATUS);
+ if (ret) {
+ ath11k_warn(ab, "failed to init dest status ing: %d\n",
+ ret);
+ /* Should we clear any partial init */
+ return ret;
+ }
+
+ pipe->status_ring->write_index = 0;
+ pipe->status_ring->sw_index = 0;
+ }
+ }
+
+ return 0;
+}
+
+void ath11k_ce_free_pipes(struct ath11k_base *ab)
+{
+ struct ath11k_ce_pipe *pipe;
+ int desc_sz;
+ int i;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ pipe = &ab->ce.ce_pipe[i];
+
+ if (pipe->src_ring) {
+ desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
+ dma_free_coherent(ab->dev,
+ pipe->src_ring->nentries * desc_sz +
+ CE_DESC_RING_ALIGN,
+ pipe->src_ring->base_addr_owner_space,
+ pipe->src_ring->base_addr_ce_space);
+ kfree(pipe->src_ring);
+ pipe->src_ring = NULL;
+ }
+
+ if (pipe->dest_ring) {
+ desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
+ dma_free_coherent(ab->dev,
+ pipe->dest_ring->nentries * desc_sz +
+ CE_DESC_RING_ALIGN,
+ pipe->dest_ring->base_addr_owner_space,
+ pipe->dest_ring->base_addr_ce_space);
+ kfree(pipe->dest_ring);
+ pipe->dest_ring = NULL;
+ }
+
+ if (pipe->status_ring) {
+ desc_sz =
+ ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
+ dma_free_coherent(ab->dev,
+ pipe->status_ring->nentries * desc_sz +
+ CE_DESC_RING_ALIGN,
+ pipe->status_ring->base_addr_owner_space,
+ pipe->status_ring->base_addr_ce_space);
+ kfree(pipe->status_ring);
+ pipe->status_ring = NULL;
+ }
+ }
+}
+
+int ath11k_ce_alloc_pipes(struct ath11k_base *ab)
+{
+ struct ath11k_ce_pipe *pipe;
+ int i;
+ int ret;
+ const struct ce_attr *attr;
+
+ spin_lock_init(&ab->ce.ce_lock);
+
+ for (i = 0; i < CE_COUNT; i++) {
+ attr = &host_ce_config_wlan[i];
+ pipe = &ab->ce.ce_pipe[i];
+ pipe->pipe_num = i;
+ pipe->ab = ab;
+ pipe->buf_sz = attr->src_sz_max;
+
+ ret = ath11k_ce_alloc_pipe(ab, i);
+ if (ret) {
+ /* Free any parial successful allocation */
+ ath11k_ce_free_pipes(ab);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* For Big Endian Host, Copy Engine byte_swap is enabled
+ * When Copy Engine does byte_swap, need to byte swap again for the
+ * Host to get/put buffer content in the correct byte order
+ */
+void ath11k_ce_byte_swap(void *mem, u32 len)
+{
+ int i;
+
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
+ if (!mem)
+ return;
+
+ for (i = 0; i < (len / 4); i++) {
+ *(u32 *)mem = swab32(*(u32 *)mem);
+ mem += 4;
+ }
+ }
+}
+
+int ath11k_ce_get_attr_flags(int ce_id)
+{
+ if (ce_id >= CE_COUNT)
+ return -EINVAL;
+
+ return host_ce_config_wlan[ce_id].flags;
+}
diff --git a/drivers/net/wireless/ath/ath11k/ce.h b/drivers/net/wireless/ath/ath11k/ce.h
new file mode 100644
index 000000000000..e355dfda48bf
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/ce.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_CE_H
+#define ATH11K_CE_H
+
+#define CE_COUNT 12
+
+/* Byte swap data words */
+#define CE_ATTR_BYTE_SWAP_DATA 2
+
+/* no interrupt on copy completion */
+#define CE_ATTR_DIS_INTR 8
+
+/* Host software's Copy Engine configuration. */
+#ifdef __BIG_ENDIAN
+#define CE_ATTR_FLAGS CE_ATTR_BYTE_SWAP_DATA
+#else
+#define CE_ATTR_FLAGS 0
+#endif
+
+/* Threshold to poll for tx completion in case of Interrupt disabled CE's */
+#define ATH11K_CE_USAGE_THRESHOLD 32
+
+void ath11k_ce_byte_swap(void *mem, u32 len);
+
+/*
+ * Directions for interconnect pipe configuration.
+ * These definitions may be used during configuration and are shared
+ * between Host and Target.
+ *
+ * Pipe Directions are relative to the Host, so PIPEDIR_IN means
+ * "coming IN over air through Target to Host" as with a WiFi Rx operation.
+ * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air"
+ * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man"
+ * Target since things that are "PIPEDIR_OUT" are coming IN to the Target
+ * over the interconnect.
+ */
+#define PIPEDIR_NONE 0
+#define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */
+#define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */
+#define PIPEDIR_INOUT 3 /* bidirectional */
+#define PIPEDIR_INOUT_H2H 4 /* bidirectional, host to host */
+
+/* CE address/mask */
+#define CE_HOST_IE_ADDRESS 0x00A1803C
+#define CE_HOST_IE_2_ADDRESS 0x00A18040
+#define CE_HOST_IE_3_ADDRESS CE_HOST_IE_ADDRESS
+
+#define CE_HOST_IE_3_SHIFT 0xC
+
+#define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
+
+#define ATH11K_CE_RX_POST_RETRY_JIFFIES 50
+
+struct ath11k_base;
+
+/*
+ * Establish a mapping between a service/direction and a pipe.
+ * Configuration information for a Copy Engine pipe and services.
+ * Passed from Host to Target through QMI message and must be in
+ * little endian format.
+ */
+struct service_to_pipe {
+ __le32 service_id;
+ __le32 pipedir;
+ __le32 pipenum;
+};
+
+/*
+ * Configuration information for a Copy Engine pipe.
+ * Passed from Host to Target through QMI message during startup (one per CE).
+ *
+ * NOTE: Structure is shared between Host software and Target firmware!
+ */
+struct ce_pipe_config {
+ __le32 pipenum;
+ __le32 pipedir;
+ __le32 nentries;
+ __le32 nbytes_max;
+ __le32 flags;
+ __le32 reserved;
+};
+
+struct ce_attr {
+ /* CE_ATTR_* values */
+ unsigned int flags;
+
+ /* #entries in source ring - Must be a power of 2 */
+ unsigned int src_nentries;
+
+ /*
+ * Max source send size for this CE.
+ * This is also the minimum size of a destination buffer.
+ */
+ unsigned int src_sz_max;
+
+ /* #entries in destination ring - Must be a power of 2 */
+ unsigned int dest_nentries;
+
+ void (*recv_cb)(struct ath11k_base *, struct sk_buff *);
+};
+
+#define CE_DESC_RING_ALIGN 8
+
+struct ath11k_ce_ring {
+ /* Number of entries in this ring; must be power of 2 */
+ unsigned int nentries;
+ unsigned int nentries_mask;
+
+ /* For dest ring, this is the next index to be processed
+ * by software after it was/is received into.
+ *
+ * For src ring, this is the last descriptor that was sent
+ * and completion processed by software.
+ *
+ * Regardless of src or dest ring, this is an invariant
+ * (modulo ring size):
+ * write index >= read index >= sw_index
+ */
+ unsigned int sw_index;
+ /* cached copy */
+ unsigned int write_index;
+
+ /* Start of DMA-coherent area reserved for descriptors */
+ /* Host address space */
+ void *base_addr_owner_space_unaligned;
+ /* CE address space */
+ u32 base_addr_ce_space_unaligned;
+
+ /* Actual start of descriptors.
+ * Aligned to descriptor-size boundary.
+ * Points into reserved DMA-coherent area, above.
+ */
+ /* Host address space */
+ void *base_addr_owner_space;
+
+ /* CE address space */
+ u32 base_addr_ce_space;
+
+ /* HAL ring id */
+ u32 hal_ring_id;
+
+ /* keep last */
+ struct sk_buff *skb[0];
+};
+
+struct ath11k_ce_pipe {
+ struct ath11k_base *ab;
+ u16 pipe_num;
+ unsigned int attr_flags;
+ unsigned int buf_sz;
+ unsigned int rx_buf_needed;
+
+ void (*send_cb)(struct ath11k_ce_pipe *);
+ void (*recv_cb)(struct ath11k_base *, struct sk_buff *);
+
+ struct tasklet_struct intr_tq;
+ struct ath11k_ce_ring *src_ring;
+ struct ath11k_ce_ring *dest_ring;
+ struct ath11k_ce_ring *status_ring;
+};
+
+struct ath11k_ce {
+ struct ath11k_ce_pipe ce_pipe[CE_COUNT];
+ /* Protects rings of all ce pipes */
+ spinlock_t ce_lock;
+};
+
+void ath11k_ce_cleanup_pipes(struct ath11k_base *ab);
+void ath11k_ce_rx_replenish_retry(struct timer_list *t);
+void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id);
+int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id,
+ u16 transfer_id);
+void ath11k_ce_rx_post_buf(struct ath11k_base *ab);
+int ath11k_ce_init_pipes(struct ath11k_base *ab);
+int ath11k_ce_alloc_pipes(struct ath11k_base *ab);
+void ath11k_ce_free_pipes(struct ath11k_base *ab);
+int ath11k_ce_get_attr_flags(int ce_id);
+void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id);
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
new file mode 100644
index 000000000000..9e823056e673
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -0,0 +1,795 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/remoteproc.h>
+#include <linux/firmware.h>
+#include "ahb.h"
+#include "core.h"
+#include "dp_tx.h"
+#include "dp_rx.h"
+#include "debug.h"
+
+unsigned int ath11k_debug_mask;
+module_param_named(debug_mask, ath11k_debug_mask, uint, 0644);
+MODULE_PARM_DESC(debug_mask, "Debugging mask");
+
+static const struct ath11k_hw_params ath11k_hw_params = {
+ .name = "ipq8074",
+ .fw = {
+ .dir = IPQ8074_FW_DIR,
+ .board_size = IPQ8074_MAX_BOARD_DATA_SZ,
+ .cal_size = IPQ8074_MAX_CAL_DATA_SZ,
+ },
+};
+
+/* Map from pdev index to hw mac index */
+u8 ath11k_core_get_hw_mac_id(struct ath11k_base *ab, int pdev_idx)
+{
+ switch (pdev_idx) {
+ case 0:
+ return 0;
+ case 1:
+ return 2;
+ case 2:
+ return 1;
+ default:
+ ath11k_warn(ab, "Invalid pdev idx %d\n", pdev_idx);
+ return ATH11K_INVALID_HW_MAC_ID;
+ }
+}
+
+static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
+ size_t name_len)
+{
+ /* Note: bus is fixed to ahb. When other bus type supported,
+ * make it to dynamic.
+ */
+ scnprintf(name, name_len,
+ "bus=ahb,qmi-chip-id=%d,qmi-board-id=%d",
+ ab->qmi.target.chip_id,
+ ab->qmi.target.board_id);
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot using board name '%s'\n", name);
+
+ return 0;
+}
+
+static const struct firmware *ath11k_fetch_fw_file(struct ath11k_base *ab,
+ const char *dir,
+ const char *file)
+{
+ char filename[100];
+ const struct firmware *fw;
+ int ret;
+
+ if (file == NULL)
+ return ERR_PTR(-ENOENT);
+
+ if (dir == NULL)
+ dir = ".";
+
+ snprintf(filename, sizeof(filename), "%s/%s", dir, file);
+ ret = firmware_request_nowarn(&fw, filename, ab->dev);
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot fw request '%s': %d\n",
+ filename, ret);
+
+ if (ret)
+ return ERR_PTR(ret);
+ ath11k_warn(ab, "Downloading BDF: %s, size: %zu\n",
+ filename, fw->size);
+
+ return fw;
+}
+
+void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd)
+{
+ if (!IS_ERR(bd->fw))
+ release_firmware(bd->fw);
+
+ memset(bd, 0, sizeof(*bd));
+}
+
+static int ath11k_core_parse_bd_ie_board(struct ath11k_base *ab,
+ struct ath11k_board_data *bd,
+ const void *buf, size_t buf_len,
+ const char *boardname,
+ int bd_ie_type)
+{
+ const struct ath11k_fw_ie *hdr;
+ bool name_match_found;
+ int ret, board_ie_id;
+ size_t board_ie_len;
+ const void *board_ie_data;
+
+ name_match_found = false;
+
+ /* go through ATH11K_BD_IE_BOARD_ elements */
+ while (buf_len > sizeof(struct ath11k_fw_ie)) {
+ hdr = buf;
+ board_ie_id = le32_to_cpu(hdr->id);
+ board_ie_len = le32_to_cpu(hdr->len);
+ board_ie_data = hdr->data;
+
+ buf_len -= sizeof(*hdr);
+ buf += sizeof(*hdr);
+
+ if (buf_len < ALIGN(board_ie_len, 4)) {
+ ath11k_err(ab, "invalid ATH11K_BD_IE_BOARD length: %zu < %zu\n",
+ buf_len, ALIGN(board_ie_len, 4));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (board_ie_id) {
+ case ATH11K_BD_IE_BOARD_NAME:
+ ath11k_dbg_dump(ab, ATH11K_DBG_BOOT, "board name", "",
+ board_ie_data, board_ie_len);
+
+ if (board_ie_len != strlen(boardname))
+ break;
+
+ ret = memcmp(board_ie_data, boardname, strlen(boardname));
+ if (ret)
+ break;
+
+ name_match_found = true;
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "boot found match for name '%s'",
+ boardname);
+ break;
+ case ATH11K_BD_IE_BOARD_DATA:
+ if (!name_match_found)
+ /* no match found */
+ break;
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "boot found board data for '%s'", boardname);
+
+ bd->data = board_ie_data;
+ bd->len = board_ie_len;
+
+ ret = 0;
+ goto out;
+ default:
+ ath11k_warn(ab, "unknown ATH11K_BD_IE_BOARD found: %d\n",
+ board_ie_id);
+ break;
+ }
+
+ /* jump over the padding */
+ board_ie_len = ALIGN(board_ie_len, 4);
+
+ buf_len -= board_ie_len;
+ buf += board_ie_len;
+ }
+
+ /* no match found */
+ ret = -ENOENT;
+
+out:
+ return ret;
+}
+
+static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab,
+ struct ath11k_board_data *bd,
+ const char *boardname)
+{
+ size_t len, magic_len;
+ const u8 *data;
+ char *filename = ATH11K_BOARD_API2_FILE;
+ size_t ie_len;
+ struct ath11k_fw_ie *hdr;
+ int ret, ie_id;
+
+ if (!bd->fw)
+ bd->fw = ath11k_fetch_fw_file(ab,
+ ab->hw_params.fw.dir,
+ filename);
+ if (IS_ERR(bd->fw))
+ return PTR_ERR(bd->fw);
+
+ data = bd->fw->data;
+ len = bd->fw->size;
+
+ /* magic has extra null byte padded */
+ magic_len = strlen(ATH11K_BOARD_MAGIC) + 1;
+ if (len < magic_len) {
+ ath11k_err(ab, "failed to find magic value in %s/%s, file too short: %zu\n",
+ ab->hw_params.fw.dir, filename, len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (memcmp(data, ATH11K_BOARD_MAGIC, magic_len)) {
+ ath11k_err(ab, "found invalid board magic\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* magic is padded to 4 bytes */
+ magic_len = ALIGN(magic_len, 4);
+ if (len < magic_len) {
+ ath11k_err(ab, "failed: %s/%s too small to contain board data, len: %zu\n",
+ ab->hw_params.fw.dir, filename, len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ data += magic_len;
+ len -= magic_len;
+
+ while (len > sizeof(struct ath11k_fw_ie)) {
+ hdr = (struct ath11k_fw_ie *)data;
+ ie_id = le32_to_cpu(hdr->id);
+ ie_len = le32_to_cpu(hdr->len);
+
+ len -= sizeof(*hdr);
+ data = hdr->data;
+
+ if (len < ALIGN(ie_len, 4)) {
+ ath11k_err(ab, "invalid length for board ie_id %d ie_len %zu len %zu\n",
+ ie_id, ie_len, len);
+ return -EINVAL;
+ }
+
+ switch (ie_id) {
+ case ATH11K_BD_IE_BOARD:
+ ret = ath11k_core_parse_bd_ie_board(ab, bd, data,
+ ie_len,
+ boardname,
+ ATH11K_BD_IE_BOARD);
+ if (ret == -ENOENT)
+ /* no match found, continue */
+ break;
+ else if (ret)
+ /* there was an error, bail out */
+ goto err;
+ /* either found or error, so stop searching */
+ goto out;
+ }
+
+ /* jump over the padding */
+ ie_len = ALIGN(ie_len, 4);
+
+ len -= ie_len;
+ data += ie_len;
+ }
+
+out:
+ if (!bd->data || !bd->len) {
+ ath11k_err(ab,
+ "failed to fetch board data for %s from %s/%s\n",
+ boardname, ab->hw_params.fw.dir, filename);
+ ret = -ENODATA;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ ath11k_core_free_bdf(ab, bd);
+ return ret;
+}
+
+static int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab,
+ struct ath11k_board_data *bd)
+{
+ bd->fw = ath11k_fetch_fw_file(ab,
+ ab->hw_params.fw.dir,
+ ATH11K_DEFAULT_BOARD_FILE);
+ if (IS_ERR(bd->fw))
+ return PTR_ERR(bd->fw);
+
+ bd->data = bd->fw->data;
+ bd->len = bd->fw->size;
+
+ return 0;
+}
+
+#define BOARD_NAME_SIZE 100
+int ath11k_core_fetch_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd)
+{
+ char boardname[BOARD_NAME_SIZE];
+ int ret;
+
+ ret = ath11k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
+ if (ret) {
+ ath11k_err(ab, "failed to create board name: %d", ret);
+ return ret;
+ }
+
+ ab->bd_api = 2;
+ ret = ath11k_core_fetch_board_data_api_n(ab, bd, boardname);
+ if (!ret)
+ goto success;
+
+ ab->bd_api = 1;
+ ret = ath11k_core_fetch_board_data_api_1(ab, bd);
+ if (ret) {
+ ath11k_err(ab, "failed to fetch board-2.bin or board.bin from %s\n",
+ ab->hw_params.fw.dir);
+ return ret;
+ }
+
+success:
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "using board api %d\n", ab->bd_api);
+ return 0;
+}
+
+static void ath11k_core_stop(struct ath11k_base *ab)
+{
+ if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
+ ath11k_qmi_firmware_stop(ab);
+ ath11k_ahb_stop(ab);
+ ath11k_wmi_detach(ab);
+ ath11k_dp_pdev_reo_cleanup(ab);
+
+ /* De-Init of components as needed */
+}
+
+static int ath11k_core_soc_create(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_qmi_init_service(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to initialize qmi :%d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_debug_soc_create(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to create ath11k debugfs\n");
+ goto err_qmi_deinit;
+ }
+
+ ret = ath11k_ahb_power_up(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to power up :%d\n", ret);
+ goto err_debugfs_reg;
+ }
+
+ return 0;
+
+err_debugfs_reg:
+ ath11k_debug_soc_destroy(ab);
+err_qmi_deinit:
+ ath11k_qmi_deinit_service(ab);
+ return ret;
+}
+
+static void ath11k_core_soc_destroy(struct ath11k_base *ab)
+{
+ ath11k_debug_soc_destroy(ab);
+ ath11k_dp_free(ab);
+ ath11k_reg_free(ab);
+ ath11k_qmi_deinit_service(ab);
+}
+
+static int ath11k_core_pdev_create(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_debug_pdev_create(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to create core pdev debugfs: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_mac_register(ab);
+ if (ret) {
+ ath11k_err(ab, "failed register the radio with mac80211: %d\n", ret);
+ goto err_pdev_debug;
+ }
+
+ ret = ath11k_dp_pdev_alloc(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to attach DP pdev: %d\n", ret);
+ goto err_mac_unregister;
+ }
+
+ return 0;
+
+err_mac_unregister:
+ ath11k_mac_unregister(ab);
+
+err_pdev_debug:
+ ath11k_debug_pdev_destroy(ab);
+
+ return ret;
+}
+
+static void ath11k_core_pdev_destroy(struct ath11k_base *ab)
+{
+ ath11k_mac_unregister(ab);
+ ath11k_ahb_ext_irq_disable(ab);
+ ath11k_dp_pdev_free(ab);
+ ath11k_debug_pdev_destroy(ab);
+}
+
+static int ath11k_core_start(struct ath11k_base *ab,
+ enum ath11k_firmware_mode mode)
+{
+ int ret;
+
+ ret = ath11k_qmi_firmware_start(ab, mode);
+ if (ret) {
+ ath11k_err(ab, "failed to attach wmi: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_wmi_attach(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to attach wmi: %d\n", ret);
+ goto err_firmware_stop;
+ }
+
+ ret = ath11k_htc_init(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to init htc: %d\n", ret);
+ goto err_wmi_detach;
+ }
+
+ ret = ath11k_ahb_start(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to start HIF: %d\n", ret);
+ goto err_wmi_detach;
+ }
+
+ ret = ath11k_htc_wait_target(&ab->htc);
+ if (ret) {
+ ath11k_err(ab, "failed to connect to HTC: %d\n", ret);
+ goto err_hif_stop;
+ }
+
+ ret = ath11k_dp_htt_connect(&ab->dp);
+ if (ret) {
+ ath11k_err(ab, "failed to connect to HTT: %d\n", ret);
+ goto err_hif_stop;
+ }
+
+ ret = ath11k_wmi_connect(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to connect wmi: %d\n", ret);
+ goto err_hif_stop;
+ }
+
+ ret = ath11k_htc_start(&ab->htc);
+ if (ret) {
+ ath11k_err(ab, "failed to start HTC: %d\n", ret);
+ goto err_hif_stop;
+ }
+
+ ret = ath11k_wmi_wait_for_service_ready(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to receive wmi service ready event: %d\n",
+ ret);
+ goto err_hif_stop;
+ }
+
+ ret = ath11k_mac_allocate(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to create new hw device with mac80211 :%d\n",
+ ret);
+ goto err_hif_stop;
+ }
+
+ ath11k_dp_pdev_pre_alloc(ab);
+
+ ret = ath11k_dp_pdev_reo_setup(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
+ goto err_mac_destroy;
+ }
+
+ ret = ath11k_wmi_cmd_init(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to send wmi init cmd: %d\n", ret);
+ goto err_reo_cleanup;
+ }
+
+ ret = ath11k_wmi_wait_for_unified_ready(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to receive wmi unified ready event: %d\n",
+ ret);
+ goto err_reo_cleanup;
+ }
+
+ ret = ath11k_dp_tx_htt_h2t_ver_req_msg(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to send htt version request message: %d\n",
+ ret);
+ goto err_reo_cleanup;
+ }
+
+ return 0;
+
+err_reo_cleanup:
+ ath11k_dp_pdev_reo_cleanup(ab);
+err_mac_destroy:
+ ath11k_mac_destroy(ab);
+err_hif_stop:
+ ath11k_ahb_stop(ab);
+err_wmi_detach:
+ ath11k_wmi_detach(ab);
+err_firmware_stop:
+ ath11k_qmi_firmware_stop(ab);
+
+ return ret;
+}
+
+int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_ce_init_pipes(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to initialize CE: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_dp_alloc(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to init DP: %d\n", ret);
+ return ret;
+ }
+
+ mutex_lock(&ab->core_lock);
+ ret = ath11k_core_start(ab, ATH11K_FIRMWARE_MODE_NORMAL);
+ if (ret) {
+ ath11k_err(ab, "failed to start core: %d\n", ret);
+ goto err_dp_free;
+ }
+
+ ret = ath11k_core_pdev_create(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to create pdev core: %d\n", ret);
+ goto err_core_stop;
+ }
+ ath11k_ahb_ext_irq_enable(ab);
+ mutex_unlock(&ab->core_lock);
+
+ return 0;
+
+err_core_stop:
+ ath11k_core_stop(ab);
+ ath11k_mac_destroy(ab);
+err_dp_free:
+ ath11k_dp_free(ab);
+ mutex_unlock(&ab->core_lock);
+ return ret;
+}
+
+static int ath11k_core_reconfigure_on_crash(struct ath11k_base *ab)
+{
+ int ret;
+
+ mutex_lock(&ab->core_lock);
+ ath11k_ahb_ext_irq_disable(ab);
+ ath11k_dp_pdev_free(ab);
+ ath11k_ahb_stop(ab);
+ ath11k_wmi_detach(ab);
+ ath11k_dp_pdev_reo_cleanup(ab);
+ mutex_unlock(&ab->core_lock);
+
+ ath11k_dp_free(ab);
+ ath11k_hal_srng_deinit(ab);
+
+ ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
+
+ ret = ath11k_hal_srng_init(ab);
+ if (ret)
+ return ret;
+
+ clear_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags);
+
+ ret = ath11k_core_qmi_firmware_ready(ab);
+ if (ret)
+ goto err_hal_srng_deinit;
+
+ clear_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags);
+
+ return 0;
+
+err_hal_srng_deinit:
+ ath11k_hal_srng_deinit(ab);
+ return ret;
+}
+
+void ath11k_core_halt(struct ath11k *ar)
+{
+ struct ath11k_base *ab = ar->ab;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ar->num_created_vdevs = 0;
+
+ ath11k_mac_scan_finish(ar);
+ ath11k_mac_peer_cleanup_all(ar);
+ cancel_delayed_work_sync(&ar->scan.timeout);
+ cancel_work_sync(&ar->regd_update_work);
+
+ rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
+ synchronize_rcu();
+ INIT_LIST_HEAD(&ar->arvifs);
+ idr_init(&ar->txmgmt_idr);
+}
+
+static void ath11k_core_restart(struct work_struct *work)
+{
+ struct ath11k_base *ab = container_of(work, struct ath11k_base, restart_work);
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int i, ret = 0;
+
+ spin_lock_bh(&ab->base_lock);
+ ab->stats.fw_crash_counter++;
+ spin_unlock_bh(&ab->base_lock);
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (!ar || ar->state == ATH11K_STATE_OFF)
+ continue;
+
+ ieee80211_stop_queues(ar->hw);
+ ath11k_mac_drain_tx(ar);
+ complete(&ar->scan.started);
+ complete(&ar->scan.completed);
+ complete(&ar->peer_assoc_done);
+ complete(&ar->install_key_done);
+ complete(&ar->vdev_setup_done);
+ complete(&ar->bss_survey_done);
+
+ wake_up(&ar->dp.tx_empty_waitq);
+ idr_for_each(&ar->txmgmt_idr,
+ ath11k_mac_tx_mgmt_pending_free, ar);
+ idr_destroy(&ar->txmgmt_idr);
+ }
+
+ wake_up(&ab->wmi_ab.tx_credits_wq);
+ wake_up(&ab->peer_mapping_wq);
+
+ ret = ath11k_core_reconfigure_on_crash(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to reconfigure driver on crash recovery\n");
+ return;
+ }
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (!ar || ar->state == ATH11K_STATE_OFF)
+ continue;
+
+ mutex_lock(&ar->conf_mutex);
+
+ switch (ar->state) {
+ case ATH11K_STATE_ON:
+ ar->state = ATH11K_STATE_RESTARTING;
+ ath11k_core_halt(ar);
+ ieee80211_restart_hw(ar->hw);
+ break;
+ case ATH11K_STATE_OFF:
+ ath11k_warn(ab,
+ "cannot restart radio %d that hasn't been started\n",
+ i);
+ break;
+ case ATH11K_STATE_RESTARTING:
+ break;
+ case ATH11K_STATE_RESTARTED:
+ ar->state = ATH11K_STATE_WEDGED;
+ /* fall through */
+ case ATH11K_STATE_WEDGED:
+ ath11k_warn(ab,
+ "device is wedged, will not restart radio %d\n", i);
+ break;
+ }
+ mutex_unlock(&ar->conf_mutex);
+ }
+ complete(&ab->driver_recovery);
+}
+
+int ath11k_core_init(struct ath11k_base *ab)
+{
+ struct device *dev = ab->dev;
+ struct rproc *prproc;
+ phandle rproc_phandle;
+ int ret;
+
+ if (of_property_read_u32(dev->of_node, "qcom,rproc", &rproc_phandle)) {
+ ath11k_err(ab, "failed to get q6_rproc handle\n");
+ return -ENOENT;
+ }
+
+ prproc = rproc_get_by_phandle(rproc_phandle);
+ if (!prproc) {
+ ath11k_err(ab, "failed to get rproc\n");
+ return -EINVAL;
+ }
+ ab->tgt_rproc = prproc;
+ ab->hw_params = ath11k_hw_params;
+
+ ret = ath11k_core_soc_create(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to create soc core: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void ath11k_core_deinit(struct ath11k_base *ab)
+{
+ mutex_lock(&ab->core_lock);
+
+ ath11k_core_pdev_destroy(ab);
+ ath11k_core_stop(ab);
+
+ mutex_unlock(&ab->core_lock);
+
+ ath11k_ahb_power_down(ab);
+ ath11k_mac_destroy(ab);
+ ath11k_core_soc_destroy(ab);
+}
+
+void ath11k_core_free(struct ath11k_base *ab)
+{
+ kfree(ab);
+}
+
+struct ath11k_base *ath11k_core_alloc(struct device *dev)
+{
+ struct ath11k_base *ab;
+
+ ab = kzalloc(sizeof(*ab), GFP_KERNEL);
+ if (!ab)
+ return NULL;
+
+ init_completion(&ab->driver_recovery);
+
+ ab->workqueue = create_singlethread_workqueue("ath11k_wq");
+ if (!ab->workqueue)
+ goto err_sc_free;
+
+ mutex_init(&ab->core_lock);
+ spin_lock_init(&ab->base_lock);
+
+ INIT_LIST_HEAD(&ab->peers);
+ init_waitqueue_head(&ab->peer_mapping_wq);
+ init_waitqueue_head(&ab->wmi_ab.tx_credits_wq);
+ INIT_WORK(&ab->restart_work, ath11k_core_restart);
+ timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0);
+ ab->dev = dev;
+
+ return ab;
+
+err_sc_free:
+ kfree(ab);
+ return NULL;
+}
+
+static int __init ath11k_init(void)
+{
+ int ret;
+
+ ret = ath11k_ahb_init();
+ if (ret)
+ printk(KERN_ERR "failed to register ath11k ahb driver: %d\n",
+ ret);
+ return ret;
+}
+module_init(ath11k_init);
+
+static void __exit ath11k_exit(void)
+{
+ ath11k_ahb_exit();
+}
+module_exit(ath11k_exit);
+
+MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax wireless chip");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
new file mode 100644
index 000000000000..25cdcf71d0c4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -0,0 +1,826 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_CORE_H
+#define ATH11K_CORE_H
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/bitfield.h>
+#include "qmi.h"
+#include "htc.h"
+#include "wmi.h"
+#include "hal.h"
+#include "dp.h"
+#include "ce.h"
+#include "mac.h"
+#include "hw.h"
+#include "hal_rx.h"
+#include "reg.h"
+
+#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
+
+#define ATH11K_TX_MGMT_NUM_PENDING_MAX 512
+
+#define ATH11K_TX_MGMT_TARGET_MAX_SUPPORT_WMI 64
+
+/* Pending management packets threshold for dropping probe responses */
+#define ATH11K_PRB_RSP_DROP_THRESHOLD ((ATH11K_TX_MGMT_TARGET_MAX_SUPPORT_WMI * 3) / 4)
+
+#define ATH11K_INVALID_HW_MAC_ID 0xFF
+
+enum ath11k_supported_bw {
+ ATH11K_BW_20 = 0,
+ ATH11K_BW_40 = 1,
+ ATH11K_BW_80 = 2,
+ ATH11K_BW_160 = 3,
+};
+
+enum wme_ac {
+ WME_AC_BE,
+ WME_AC_BK,
+ WME_AC_VI,
+ WME_AC_VO,
+ WME_NUM_AC
+};
+
+#define ATH11K_HT_MCS_MAX 7
+#define ATH11K_VHT_MCS_MAX 9
+#define ATH11K_HE_MCS_MAX 11
+
+static inline enum wme_ac ath11k_tid_to_ac(u32 tid)
+{
+ return (((tid == 0) || (tid == 3)) ? WME_AC_BE :
+ ((tid == 1) || (tid == 2)) ? WME_AC_BK :
+ ((tid == 4) || (tid == 5)) ? WME_AC_VI :
+ WME_AC_VO);
+}
+
+struct ath11k_skb_cb {
+ dma_addr_t paddr;
+ u8 eid;
+ struct ath11k *ar;
+ struct ieee80211_vif *vif;
+} __packed;
+
+struct ath11k_skb_rxcb {
+ dma_addr_t paddr;
+ bool is_first_msdu;
+ bool is_last_msdu;
+ bool is_continuation;
+ struct hal_rx_desc *rx_desc;
+ u8 err_rel_src;
+ u8 err_code;
+ u8 mac_id;
+ u8 unmapped;
+};
+
+enum ath11k_hw_rev {
+ ATH11K_HW_IPQ8074,
+};
+
+enum ath11k_firmware_mode {
+ /* the default mode, standard 802.11 functionality */
+ ATH11K_FIRMWARE_MODE_NORMAL,
+
+ /* factory tests etc */
+ ATH11K_FIRMWARE_MODE_FTM,
+};
+
+#define ATH11K_IRQ_NUM_MAX 52
+#define ATH11K_EXT_IRQ_GRP_NUM_MAX 11
+#define ATH11K_EXT_IRQ_NUM_MAX 16
+
+extern const u8 ath11k_reo_status_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+extern const u8 ath11k_tx_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+extern const u8 ath11k_rx_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+extern const u8 ath11k_rx_err_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+extern const u8 ath11k_rx_wbm_rel_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+extern const u8 ath11k_rxdma2host_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+extern const u8 ath11k_host2rxdma_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+extern const u8 rx_mon_status_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+
+struct ath11k_ext_irq_grp {
+ struct ath11k_base *ab;
+ u32 irqs[ATH11K_EXT_IRQ_NUM_MAX];
+ u32 num_irq;
+ u32 grp_id;
+ struct napi_struct napi;
+ struct net_device napi_ndev;
+ /* Queue of pending packets, not expected to be accessed concurrently
+ * to avoid locking overhead.
+ */
+ struct sk_buff_head pending_q;
+};
+
+#define HEHANDLE_CAP_PHYINFO_SIZE 3
+#define HECAP_PHYINFO_SIZE 9
+#define HECAP_MACINFO_SIZE 5
+#define HECAP_TXRX_MCS_NSS_SIZE 2
+#define HECAP_PPET16_PPET8_MAX_SIZE 25
+
+#define HE_PPET16_PPET8_SIZE 8
+
+/* 802.11ax PPE (PPDU packet Extension) threshold */
+struct he_ppe_threshold {
+ u32 numss_m1;
+ u32 ru_mask;
+ u32 ppet16_ppet8_ru3_ru0[HE_PPET16_PPET8_SIZE];
+};
+
+struct ath11k_he {
+ u8 hecap_macinfo[HECAP_MACINFO_SIZE];
+ u32 hecap_rxmcsnssmap;
+ u32 hecap_txmcsnssmap;
+ u32 hecap_phyinfo[HEHANDLE_CAP_PHYINFO_SIZE];
+ struct he_ppe_threshold hecap_ppet;
+ u32 heop_param;
+};
+
+#define MAX_RADIOS 3
+
+enum {
+ WMI_HOST_TP_SCALE_MAX = 0,
+ WMI_HOST_TP_SCALE_50 = 1,
+ WMI_HOST_TP_SCALE_25 = 2,
+ WMI_HOST_TP_SCALE_12 = 3,
+ WMI_HOST_TP_SCALE_MIN = 4,
+ WMI_HOST_TP_SCALE_SIZE = 5,
+};
+
+enum ath11k_scan_state {
+ ATH11K_SCAN_IDLE,
+ ATH11K_SCAN_STARTING,
+ ATH11K_SCAN_RUNNING,
+ ATH11K_SCAN_ABORTING,
+};
+
+enum ath11k_dev_flags {
+ ATH11K_CAC_RUNNING,
+ ATH11K_FLAG_CORE_REGISTERED,
+ ATH11K_FLAG_CRASH_FLUSH,
+ ATH11K_FLAG_RAW_MODE,
+ ATH11K_FLAG_HW_CRYPTO_DISABLED,
+ ATH11K_FLAG_BTCOEX,
+ ATH11K_FLAG_RECOVERY,
+ ATH11K_FLAG_UNREGISTERING,
+ ATH11K_FLAG_REGISTERED,
+};
+
+enum ath11k_monitor_flags {
+ ATH11K_FLAG_MONITOR_ENABLED,
+};
+
+struct ath11k_vif {
+ u32 vdev_id;
+ enum wmi_vdev_type vdev_type;
+ enum wmi_vdev_subtype vdev_subtype;
+ u32 beacon_interval;
+ u32 dtim_period;
+ u16 ast_hash;
+ u16 tcl_metadata;
+ u8 hal_addr_search_flags;
+ u8 search_type;
+
+ struct ath11k *ar;
+ struct ieee80211_vif *vif;
+
+ u16 tx_seq_no;
+ struct wmi_wmm_params_all_arg wmm_params;
+ struct list_head list;
+ union {
+ struct {
+ u32 uapsd;
+ } sta;
+ struct {
+ /* 127 stations; wmi limit */
+ u8 tim_bitmap[16];
+ u8 tim_len;
+ u32 ssid_len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ bool hidden_ssid;
+ /* P2P_IE with NoA attribute for P2P_GO case */
+ u32 noa_len;
+ u8 *noa_data;
+ } ap;
+ } u;
+
+ bool is_started;
+ bool is_up;
+ u32 aid;
+ u8 bssid[ETH_ALEN];
+ struct cfg80211_bitrate_mask bitrate_mask;
+ int num_legacy_stations;
+ int rtscts_prot_mode;
+ int txpower;
+};
+
+struct ath11k_vif_iter {
+ u32 vdev_id;
+ struct ath11k_vif *arvif;
+};
+
+struct ath11k_rx_peer_stats {
+ u64 num_msdu;
+ u64 num_mpdu_fcs_ok;
+ u64 num_mpdu_fcs_err;
+ u64 tcp_msdu_count;
+ u64 udp_msdu_count;
+ u64 other_msdu_count;
+ u64 ampdu_msdu_count;
+ u64 non_ampdu_msdu_count;
+ u64 stbc_count;
+ u64 beamformed_count;
+ u64 mcs_count[HAL_RX_MAX_MCS + 1];
+ u64 nss_count[HAL_RX_MAX_NSS];
+ u64 bw_count[HAL_RX_BW_MAX];
+ u64 gi_count[HAL_RX_GI_MAX];
+ u64 coding_count[HAL_RX_SU_MU_CODING_MAX];
+ u64 tid_count[IEEE80211_NUM_TIDS + 1];
+ u64 pream_cnt[HAL_RX_PREAMBLE_MAX];
+ u64 reception_type[HAL_RX_RECEPTION_TYPE_MAX];
+ u64 rx_duration;
+};
+
+#define ATH11K_HE_MCS_NUM 12
+#define ATH11K_VHT_MCS_NUM 10
+#define ATH11K_BW_NUM 4
+#define ATH11K_NSS_NUM 4
+#define ATH11K_LEGACY_NUM 12
+#define ATH11K_GI_NUM 4
+#define ATH11K_HT_MCS_NUM 32
+
+enum ath11k_pkt_rx_err {
+ ATH11K_PKT_RX_ERR_FCS,
+ ATH11K_PKT_RX_ERR_TKIP,
+ ATH11K_PKT_RX_ERR_CRYPT,
+ ATH11K_PKT_RX_ERR_PEER_IDX_INVAL,
+ ATH11K_PKT_RX_ERR_MAX,
+};
+
+enum ath11k_ampdu_subfrm_num {
+ ATH11K_AMPDU_SUBFRM_NUM_10,
+ ATH11K_AMPDU_SUBFRM_NUM_20,
+ ATH11K_AMPDU_SUBFRM_NUM_30,
+ ATH11K_AMPDU_SUBFRM_NUM_40,
+ ATH11K_AMPDU_SUBFRM_NUM_50,
+ ATH11K_AMPDU_SUBFRM_NUM_60,
+ ATH11K_AMPDU_SUBFRM_NUM_MORE,
+ ATH11K_AMPDU_SUBFRM_NUM_MAX,
+};
+
+enum ath11k_amsdu_subfrm_num {
+ ATH11K_AMSDU_SUBFRM_NUM_1,
+ ATH11K_AMSDU_SUBFRM_NUM_2,
+ ATH11K_AMSDU_SUBFRM_NUM_3,
+ ATH11K_AMSDU_SUBFRM_NUM_4,
+ ATH11K_AMSDU_SUBFRM_NUM_MORE,
+ ATH11K_AMSDU_SUBFRM_NUM_MAX,
+};
+
+enum ath11k_counter_type {
+ ATH11K_COUNTER_TYPE_BYTES,
+ ATH11K_COUNTER_TYPE_PKTS,
+ ATH11K_COUNTER_TYPE_MAX,
+};
+
+enum ath11k_stats_type {
+ ATH11K_STATS_TYPE_SUCC,
+ ATH11K_STATS_TYPE_FAIL,
+ ATH11K_STATS_TYPE_RETRY,
+ ATH11K_STATS_TYPE_AMPDU,
+ ATH11K_STATS_TYPE_MAX,
+};
+
+struct ath11k_htt_data_stats {
+ u64 legacy[ATH11K_COUNTER_TYPE_MAX][ATH11K_LEGACY_NUM];
+ u64 ht[ATH11K_COUNTER_TYPE_MAX][ATH11K_HT_MCS_NUM];
+ u64 vht[ATH11K_COUNTER_TYPE_MAX][ATH11K_VHT_MCS_NUM];
+ u64 he[ATH11K_COUNTER_TYPE_MAX][ATH11K_HE_MCS_NUM];
+ u64 bw[ATH11K_COUNTER_TYPE_MAX][ATH11K_BW_NUM];
+ u64 nss[ATH11K_COUNTER_TYPE_MAX][ATH11K_NSS_NUM];
+ u64 gi[ATH11K_COUNTER_TYPE_MAX][ATH11K_GI_NUM];
+};
+
+struct ath11k_htt_tx_stats {
+ struct ath11k_htt_data_stats stats[ATH11K_STATS_TYPE_MAX];
+ u64 tx_duration;
+ u64 ba_fails;
+ u64 ack_fails;
+};
+
+struct ath11k_per_ppdu_tx_stats {
+ u16 succ_pkts;
+ u16 failed_pkts;
+ u16 retry_pkts;
+ u32 succ_bytes;
+ u32 failed_bytes;
+ u32 retry_bytes;
+};
+
+struct ath11k_sta {
+ struct ath11k_vif *arvif;
+
+ /* the following are protected by ar->data_lock */
+ u32 changed; /* IEEE80211_RC_* */
+ u32 bw;
+ u32 nss;
+ u32 smps;
+
+ struct work_struct update_wk;
+ struct ieee80211_tx_info tx_info;
+ struct rate_info txrate;
+ struct rate_info last_txrate;
+ u64 rx_duration;
+ u64 tx_duration;
+ u8 rssi_comb;
+ struct ath11k_htt_tx_stats *tx_stats;
+ struct ath11k_rx_peer_stats *rx_stats;
+};
+
+#define ATH11K_NUM_CHANS 41
+#define ATH11K_MAX_5G_CHAN 173
+
+enum ath11k_state {
+ ATH11K_STATE_OFF,
+ ATH11K_STATE_ON,
+ ATH11K_STATE_RESTARTING,
+ ATH11K_STATE_RESTARTED,
+ ATH11K_STATE_WEDGED,
+ /* Add other states as required */
+};
+
+/* Antenna noise floor */
+#define ATH11K_DEFAULT_NOISE_FLOOR -95
+
+struct ath11k_fw_stats {
+ struct dentry *debugfs_fwstats;
+ u32 pdev_id;
+ u32 stats_id;
+ struct list_head pdevs;
+ struct list_head vdevs;
+ struct list_head bcn;
+};
+
+struct ath11k_dbg_htt_stats {
+ u8 type;
+ u8 reset;
+ struct debug_htt_stats_req *stats_req;
+ /* protects shared stats req buffer */
+ spinlock_t lock;
+};
+
+struct ath11k_debug {
+ struct dentry *debugfs_pdev;
+ struct ath11k_dbg_htt_stats htt_stats;
+ u32 extd_tx_stats;
+ struct ath11k_fw_stats fw_stats;
+ struct completion fw_stats_complete;
+ bool fw_stats_done;
+ u32 extd_rx_stats;
+ u32 pktlog_filter;
+ u32 pktlog_mode;
+ u32 pktlog_peer_valid;
+ u8 pktlog_peer_addr[ETH_ALEN];
+};
+
+struct ath11k_per_peer_tx_stats {
+ u32 succ_bytes;
+ u32 retry_bytes;
+ u32 failed_bytes;
+ u16 succ_pkts;
+ u16 retry_pkts;
+ u16 failed_pkts;
+ u32 duration;
+ u8 ba_fails;
+ bool is_ampdu;
+};
+
+#define ATH11K_FLUSH_TIMEOUT (5 * HZ)
+
+struct ath11k_vdev_stop_status {
+ bool stop_in_progress;
+ u32 vdev_id;
+};
+
+struct ath11k {
+ struct ath11k_base *ab;
+ struct ath11k_pdev *pdev;
+ struct ieee80211_hw *hw;
+ struct ieee80211_ops *ops;
+ struct ath11k_pdev_wmi *wmi;
+ struct ath11k_pdev_dp dp;
+ u8 mac_addr[ETH_ALEN];
+ u32 ht_cap_info;
+ u32 vht_cap_info;
+ struct ath11k_he ar_he;
+ enum ath11k_state state;
+ struct {
+ struct completion started;
+ struct completion completed;
+ struct completion on_channel;
+ struct delayed_work timeout;
+ enum ath11k_scan_state state;
+ bool is_roc;
+ int vdev_id;
+ int roc_freq;
+ bool roc_notify;
+ } scan;
+
+ struct {
+ struct ieee80211_supported_band sbands[NUM_NL80211_BANDS];
+ struct ieee80211_sband_iftype_data
+ iftype[NUM_NL80211_BANDS][NUM_NL80211_IFTYPES];
+ } mac;
+ unsigned long dev_flags;
+ unsigned int filter_flags;
+ unsigned long monitor_flags;
+ u32 min_tx_power;
+ u32 max_tx_power;
+ u32 txpower_limit_2g;
+ u32 txpower_limit_5g;
+ u32 txpower_scale;
+ u32 power_scale;
+ u32 chan_tx_pwr;
+ u32 num_stations;
+ u32 max_num_stations;
+ bool monitor_present;
+ /* To synchronize concurrent synchronous mac80211 callback operations,
+ * concurrent debugfs configuration and concurrent FW statistics events.
+ */
+ struct mutex conf_mutex;
+ /* protects the radio specific data like debug stats, ppdu_stats_info stats,
+ * vdev_stop_status info, scan data, ath11k_sta info, ath11k_vif info,
+ * channel context data, survey info, test mode data.
+ */
+ spinlock_t data_lock;
+
+ struct list_head arvifs;
+ /* should never be NULL; needed for regular htt rx */
+ struct ieee80211_channel *rx_channel;
+
+ /* valid during scan; needed for mgmt rx during scan */
+ struct ieee80211_channel *scan_channel;
+
+ u8 cfg_tx_chainmask;
+ u8 cfg_rx_chainmask;
+ u8 num_rx_chains;
+ u8 num_tx_chains;
+ /* pdev_idx starts from 0 whereas pdev->pdev_id starts with 1 */
+ u8 pdev_idx;
+ u8 lmac_id;
+
+ struct completion peer_assoc_done;
+
+ int install_key_status;
+ struct completion install_key_done;
+
+ int last_wmi_vdev_start_status;
+ struct ath11k_vdev_stop_status vdev_stop_status;
+ struct completion vdev_setup_done;
+
+ int num_peers;
+ int max_num_peers;
+ u32 num_started_vdevs;
+ u32 num_created_vdevs;
+
+ struct idr txmgmt_idr;
+ /* protects txmgmt_idr data */
+ spinlock_t txmgmt_idr_lock;
+ atomic_t num_pending_mgmt_tx;
+
+ /* cycle count is reported twice for each visited channel during scan.
+ * access protected by data_lock
+ */
+ u32 survey_last_rx_clear_count;
+ u32 survey_last_cycle_count;
+
+ /* Channel info events are expected to come in pairs without and with
+ * COMPLETE flag set respectively for each channel visit during scan.
+ *
+ * However there are deviations from this rule. This flag is used to
+ * avoid reporting garbage data.
+ */
+ bool ch_info_can_report_survey;
+ struct survey_info survey[ATH11K_NUM_CHANS];
+ struct completion bss_survey_done;
+
+ struct work_struct regd_update_work;
+
+ struct work_struct wmi_mgmt_tx_work;
+ struct sk_buff_head wmi_mgmt_tx_queue;
+
+ struct ath11k_per_peer_tx_stats peer_tx_stats;
+ struct list_head ppdu_stats_info;
+ u32 ppdu_stat_list_depth;
+
+ struct ath11k_per_peer_tx_stats cached_stats;
+ u32 last_ppdu_id;
+ u32 cached_ppdu_id;
+#ifdef CONFIG_ATH11K_DEBUGFS
+ struct ath11k_debug debug;
+#endif
+ bool dfs_block_radar_events;
+};
+
+struct ath11k_band_cap {
+ u32 max_bw_supported;
+ u32 ht_cap_info;
+ u32 he_cap_info[2];
+ u32 he_mcs;
+ u32 he_cap_phy_info[PSOC_HOST_MAX_PHY_SIZE];
+ struct ath11k_ppe_threshold he_ppet;
+};
+
+struct ath11k_pdev_cap {
+ u32 supported_bands;
+ u32 ampdu_density;
+ u32 vht_cap;
+ u32 vht_mcs;
+ u32 he_mcs;
+ u32 tx_chain_mask;
+ u32 rx_chain_mask;
+ u32 tx_chain_mask_shift;
+ u32 rx_chain_mask_shift;
+ struct ath11k_band_cap band[NUM_NL80211_BANDS];
+};
+
+struct ath11k_pdev {
+ struct ath11k *ar;
+ u32 pdev_id;
+ struct ath11k_pdev_cap cap;
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct ath11k_board_data {
+ const struct firmware *fw;
+ const void *data;
+ size_t len;
+};
+
+/* IPQ8074 HW channel counters frequency value in hertz */
+#define IPQ8074_CC_FREQ_HERTZ 320000
+
+struct ath11k_soc_dp_rx_stats {
+ u32 err_ring_pkts;
+ u32 invalid_rbm;
+ u32 rxdma_error[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX];
+ u32 reo_error[HAL_REO_DEST_RING_ERROR_CODE_MAX];
+ u32 hal_reo_error[DP_REO_DST_RING_MAX];
+};
+
+/* Master structure to hold the hw data which may be used in core module */
+struct ath11k_base {
+ enum ath11k_hw_rev hw_rev;
+ struct platform_device *pdev;
+ struct device *dev;
+ struct ath11k_qmi qmi;
+ struct ath11k_wmi_base wmi_ab;
+ struct completion fw_ready;
+ struct rproc *tgt_rproc;
+ int num_radios;
+ /* HW channel counters frequency value in hertz common to all MACs */
+ u32 cc_freq_hz;
+
+ struct ath11k_htc htc;
+
+ struct ath11k_dp dp;
+
+ void __iomem *mem;
+ unsigned long mem_len;
+
+ const struct ath11k_hif_ops *hif_ops;
+
+ struct ath11k_ce ce;
+ struct timer_list rx_replenish_retry;
+ struct ath11k_hal hal;
+ /* To synchronize core_start/core_stop */
+ struct mutex core_lock;
+ /* Protects data like peers */
+ spinlock_t base_lock;
+ struct ath11k_pdev pdevs[MAX_RADIOS];
+ struct ath11k_pdev __rcu *pdevs_active[MAX_RADIOS];
+ struct ath11k_hal_reg_capabilities_ext hal_reg_cap[MAX_RADIOS];
+ unsigned long long free_vdev_map;
+ struct list_head peers;
+ wait_queue_head_t peer_mapping_wq;
+ u8 mac_addr[ETH_ALEN];
+ bool wmi_ready;
+ u32 wlan_init_status;
+ int irq_num[ATH11K_IRQ_NUM_MAX];
+ struct ath11k_ext_irq_grp ext_irq_grp[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+ struct napi_struct *napi;
+ struct ath11k_targ_cap target_caps;
+ u32 ext_service_bitmap[WMI_SERVICE_EXT_BM_SIZE];
+ bool pdevs_macaddr_valid;
+ int bd_api;
+ struct ath11k_hw_params hw_params;
+ const struct firmware *cal_file;
+
+ /* Below regd's are protected by ab->data_lock */
+ /* This is the regd set for every radio
+ * by the firmware during initializatin
+ */
+ struct ieee80211_regdomain *default_regd[MAX_RADIOS];
+ /* This regd is set during dynamic country setting
+ * This may or may not be used during the runtime
+ */
+ struct ieee80211_regdomain *new_regd[MAX_RADIOS];
+
+ /* Current DFS Regulatory */
+ enum ath11k_dfs_region dfs_region;
+#ifdef CONFIG_ATH11K_DEBUGFS
+ struct dentry *debugfs_soc;
+ struct dentry *debugfs_ath11k;
+#endif
+ struct ath11k_soc_dp_rx_stats soc_stats;
+
+ unsigned long dev_flags;
+ struct completion driver_recovery;
+ struct workqueue_struct *workqueue;
+ struct work_struct restart_work;
+ struct {
+ /* protected by data_lock */
+ u32 fw_crash_counter;
+ } stats;
+};
+
+struct ath11k_fw_stats_pdev {
+ struct list_head list;
+
+ /* PDEV stats */
+ s32 ch_noise_floor;
+ /* Cycles spent transmitting frames */
+ u32 tx_frame_count;
+ /* Cycles spent receiving frames */
+ u32 rx_frame_count;
+ /* Total channel busy time, evidently */
+ u32 rx_clear_count;
+ /* Total on-channel time */
+ u32 cycle_count;
+ u32 phy_err_count;
+ u32 chan_tx_power;
+ u32 ack_rx_bad;
+ u32 rts_bad;
+ u32 rts_good;
+ u32 fcs_bad;
+ u32 no_beacons;
+ u32 mib_int_count;
+
+ /* PDEV TX stats */
+ /* Num HTT cookies queued to dispatch list */
+ s32 comp_queued;
+ /* Num HTT cookies dispatched */
+ s32 comp_delivered;
+ /* Num MSDU queued to WAL */
+ s32 msdu_enqued;
+ /* Num MPDU queue to WAL */
+ s32 mpdu_enqued;
+ /* Num MSDUs dropped by WMM limit */
+ s32 wmm_drop;
+ /* Num Local frames queued */
+ s32 local_enqued;
+ /* Num Local frames done */
+ s32 local_freed;
+ /* Num queued to HW */
+ s32 hw_queued;
+ /* Num PPDU reaped from HW */
+ s32 hw_reaped;
+ /* Num underruns */
+ s32 underrun;
+ /* Num PPDUs cleaned up in TX abort */
+ s32 tx_abort;
+ /* Num MPDUs requed by SW */
+ s32 mpdus_requed;
+ /* excessive retries */
+ u32 tx_ko;
+ /* data hw rate code */
+ u32 data_rc;
+ /* Scheduler self triggers */
+ u32 self_triggers;
+ /* frames dropped due to excessive sw retries */
+ u32 sw_retry_failure;
+ /* illegal rate phy errors */
+ u32 illgl_rate_phy_err;
+ /* wal pdev continuous xretry */
+ u32 pdev_cont_xretry;
+ /* wal pdev tx timeouts */
+ u32 pdev_tx_timeout;
+ /* wal pdev resets */
+ u32 pdev_resets;
+ /* frames dropped due to non-availability of stateless TIDs */
+ u32 stateless_tid_alloc_failure;
+ /* PhY/BB underrun */
+ u32 phy_underrun;
+ /* MPDU is more than txop limit */
+ u32 txop_ovf;
+
+ /* PDEV RX stats */
+ /* Cnts any change in ring routing mid-ppdu */
+ s32 mid_ppdu_route_change;
+ /* Total number of statuses processed */
+ s32 status_rcvd;
+ /* Extra frags on rings 0-3 */
+ s32 r0_frags;
+ s32 r1_frags;
+ s32 r2_frags;
+ s32 r3_frags;
+ /* MSDUs / MPDUs delivered to HTT */
+ s32 htt_msdus;
+ s32 htt_mpdus;
+ /* MSDUs / MPDUs delivered to local stack */
+ s32 loc_msdus;
+ s32 loc_mpdus;
+ /* AMSDUs that have more MSDUs than the status ring size */
+ s32 oversize_amsdu;
+ /* Number of PHY errors */
+ s32 phy_errs;
+ /* Number of PHY errors drops */
+ s32 phy_err_drop;
+ /* Number of mpdu errors - FCS, MIC, ENC etc. */
+ s32 mpdu_errs;
+};
+
+struct ath11k_fw_stats_vdev {
+ struct list_head list;
+
+ u32 vdev_id;
+ u32 beacon_snr;
+ u32 data_snr;
+ u32 num_tx_frames[WLAN_MAX_AC];
+ u32 num_rx_frames;
+ u32 num_tx_frames_retries[WLAN_MAX_AC];
+ u32 num_tx_frames_failures[WLAN_MAX_AC];
+ u32 num_rts_fail;
+ u32 num_rts_success;
+ u32 num_rx_err;
+ u32 num_rx_discard;
+ u32 num_tx_not_acked;
+ u32 tx_rate_history[MAX_TX_RATE_VALUES];
+ u32 beacon_rssi_history[MAX_TX_RATE_VALUES];
+};
+
+struct ath11k_fw_stats_bcn {
+ struct list_head list;
+
+ u32 vdev_id;
+ u32 tx_bcn_succ_cnt;
+ u32 tx_bcn_outage_cnt;
+};
+
+void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id);
+void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id,
+ u8 *mac_addr, u16 ast_hash);
+struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
+ const u8 *addr);
+struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,
+ const u8 *addr);
+struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab, int peer_id);
+int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab);
+int ath11k_core_init(struct ath11k_base *ath11k);
+void ath11k_core_deinit(struct ath11k_base *ath11k);
+struct ath11k_base *ath11k_core_alloc(struct device *dev);
+void ath11k_core_free(struct ath11k_base *ath11k);
+int ath11k_core_fetch_bdf(struct ath11k_base *ath11k,
+ struct ath11k_board_data *bd);
+void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd);
+
+void ath11k_core_halt(struct ath11k *ar);
+u8 ath11k_core_get_hw_mac_id(struct ath11k_base *ab, int pdev_idx);
+
+static inline const char *ath11k_scan_state_str(enum ath11k_scan_state state)
+{
+ switch (state) {
+ case ATH11K_SCAN_IDLE:
+ return "idle";
+ case ATH11K_SCAN_STARTING:
+ return "starting";
+ case ATH11K_SCAN_RUNNING:
+ return "running";
+ case ATH11K_SCAN_ABORTING:
+ return "aborting";
+ }
+
+ return "unknown";
+}
+
+static inline struct ath11k_skb_cb *ATH11K_SKB_CB(struct sk_buff *skb)
+{
+ return (struct ath11k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
+}
+
+static inline struct ath11k_skb_rxcb *ATH11K_SKB_RXCB(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct ath11k_skb_rxcb) > sizeof(skb->cb));
+ return (struct ath11k_skb_rxcb *)skb->cb;
+}
+
+static inline struct ath11k_vif *ath11k_vif_to_arvif(struct ieee80211_vif *vif)
+{
+ return (struct ath11k_vif *)vif->drv_priv;
+}
+
+#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/debug.c b/drivers/net/wireless/ath/ath11k/debug.c
new file mode 100644
index 000000000000..8d485171b0b3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debug.c
@@ -0,0 +1,1075 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/vmalloc.h>
+#include "core.h"
+#include "debug.h"
+#include "wmi.h"
+#include "hal_rx.h"
+#include "dp_tx.h"
+#include "debug_htt_stats.h"
+#include "peer.h"
+
+void ath11k_info(struct ath11k_base *ab, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ dev_info(ab->dev, "%pV", &vaf);
+ /* TODO: Trace the log */
+ va_end(args);
+}
+
+void ath11k_err(struct ath11k_base *ab, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ dev_err(ab->dev, "%pV", &vaf);
+ /* TODO: Trace the log */
+ va_end(args);
+}
+
+void ath11k_warn(struct ath11k_base *ab, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ dev_warn_ratelimited(ab->dev, "%pV", &vaf);
+ /* TODO: Trace the log */
+ va_end(args);
+}
+
+#ifdef CONFIG_ATH11K_DEBUG
+void __ath11k_dbg(struct ath11k_base *ab, enum ath11k_debug_mask mask,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (ath11k_debug_mask & mask)
+ dev_printk(KERN_DEBUG, ab->dev, "%pV", &vaf);
+
+ /* TODO: trace log */
+
+ va_end(args);
+}
+
+void ath11k_dbg_dump(struct ath11k_base *ab,
+ enum ath11k_debug_mask mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len)
+{
+ char linebuf[256];
+ size_t linebuflen;
+ const void *ptr;
+
+ if (ath11k_debug_mask & mask) {
+ if (msg)
+ __ath11k_dbg(ab, mask, "%s\n", msg);
+
+ for (ptr = buf; (ptr - buf) < len; ptr += 16) {
+ linebuflen = 0;
+ linebuflen += scnprintf(linebuf + linebuflen,
+ sizeof(linebuf) - linebuflen,
+ "%s%08x: ",
+ (prefix ? prefix : ""),
+ (unsigned int)(ptr - buf));
+ hex_dump_to_buffer(ptr, len - (ptr - buf), 16, 1,
+ linebuf + linebuflen,
+ sizeof(linebuf) - linebuflen, true);
+ dev_printk(KERN_DEBUG, ab->dev, "%s\n", linebuf);
+ }
+ }
+}
+
+#endif
+
+#ifdef CONFIG_ATH11K_DEBUGFS
+static void ath11k_fw_stats_pdevs_free(struct list_head *head)
+{
+ struct ath11k_fw_stats_pdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath11k_fw_stats_vdevs_free(struct list_head *head)
+{
+ struct ath11k_fw_stats_vdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath11k_fw_stats_bcn_free(struct list_head *head)
+{
+ struct ath11k_fw_stats_bcn *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath11k_debug_fw_stats_reset(struct ath11k *ar)
+{
+ spin_lock_bh(&ar->data_lock);
+ ar->debug.fw_stats_done = false;
+ ath11k_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs);
+ ath11k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+void ath11k_debug_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct ath11k_fw_stats stats = {};
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ bool is_end;
+ static unsigned int num_vdev, num_bcn;
+ size_t total_vdevs_started = 0;
+ int i, ret;
+
+ INIT_LIST_HEAD(&stats.pdevs);
+ INIT_LIST_HEAD(&stats.vdevs);
+ INIT_LIST_HEAD(&stats.bcn);
+
+ ret = ath11k_wmi_pull_fw_stats(ab, skb, &stats);
+ if (ret) {
+ ath11k_warn(ab, "failed to pull fw stats: %d\n", ret);
+ goto free;
+ }
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, stats.pdev_id);
+ if (!ar) {
+ rcu_read_unlock();
+ ath11k_warn(ab, "failed to get ar for pdev_id %d: %d\n",
+ stats.pdev_id, ret);
+ goto free;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (stats.stats_id == WMI_REQUEST_PDEV_STAT) {
+ list_splice_tail_init(&stats.pdevs, &ar->debug.fw_stats.pdevs);
+ ar->debug.fw_stats_done = true;
+ goto complete;
+ }
+
+ if (stats.stats_id == WMI_REQUEST_VDEV_STAT) {
+ if (list_empty(&stats.vdevs)) {
+ ath11k_warn(ab, "empty vdev stats");
+ goto complete;
+ }
+ /* FW sends all the active VDEV stats irrespective of PDEV,
+ * hence limit until the count of all VDEVs started
+ */
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar)
+ total_vdevs_started += ar->num_started_vdevs;
+ }
+
+ is_end = ((++num_vdev) == total_vdevs_started ? true : false);
+
+ list_splice_tail_init(&stats.vdevs,
+ &ar->debug.fw_stats.vdevs);
+
+ if (is_end) {
+ ar->debug.fw_stats_done = true;
+ num_vdev = 0;
+ }
+ goto complete;
+ }
+
+ if (stats.stats_id == WMI_REQUEST_BCN_STAT) {
+ if (list_empty(&stats.bcn)) {
+ ath11k_warn(ab, "empty bcn stats");
+ goto complete;
+ }
+ /* Mark end until we reached the count of all started VDEVs
+ * within the PDEV
+ */
+ is_end = ((++num_bcn) == ar->num_started_vdevs ? true : false);
+
+ list_splice_tail_init(&stats.bcn,
+ &ar->debug.fw_stats.bcn);
+
+ if (is_end) {
+ ar->debug.fw_stats_done = true;
+ num_bcn = 0;
+ }
+ }
+complete:
+ complete(&ar->debug.fw_stats_complete);
+ rcu_read_unlock();
+ spin_unlock_bh(&ar->data_lock);
+
+free:
+ ath11k_fw_stats_pdevs_free(&stats.pdevs);
+ ath11k_fw_stats_vdevs_free(&stats.vdevs);
+ ath11k_fw_stats_bcn_free(&stats.bcn);
+}
+
+static int ath11k_debug_fw_stats_request(struct ath11k *ar,
+ struct stats_request_params *req_param)
+{
+ struct ath11k_base *ab = ar->ab;
+ unsigned long timeout, time_left;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* FW stats can get split when exceeding the stats data buffer limit.
+ * In that case, since there is no end marking for the back-to-back
+ * received 'update stats' event, we keep a 3 seconds timeout in case,
+ * fw_stats_done is not marked yet
+ */
+ timeout = jiffies + msecs_to_jiffies(3 * HZ);
+
+ ath11k_debug_fw_stats_reset(ar);
+
+ reinit_completion(&ar->debug.fw_stats_complete);
+
+ ret = ath11k_wmi_send_stats_request_cmd(ar, req_param);
+
+ if (ret) {
+ ath11k_warn(ab, "could not request fw stats (%d)\n",
+ ret);
+ return ret;
+ }
+
+ time_left =
+ wait_for_completion_timeout(&ar->debug.fw_stats_complete,
+ 1 * HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ for (;;) {
+ if (time_after(jiffies, timeout))
+ break;
+
+ spin_lock_bh(&ar->data_lock);
+ if (ar->debug.fw_stats_done) {
+ spin_unlock_bh(&ar->data_lock);
+ break;
+ }
+ spin_unlock_bh(&ar->data_lock);
+ }
+ return 0;
+}
+
+static int ath11k_open_pdev_stats(struct inode *inode, struct file *file)
+{
+ struct ath11k *ar = inode->i_private;
+ struct ath11k_base *ab = ar->ab;
+ struct stats_request_params req_param;
+ void *buf = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ req_param.pdev_id = ar->pdev->pdev_id;
+ req_param.vdev_id = 0;
+ req_param.stats_id = WMI_REQUEST_PDEV_STAT;
+
+ ret = ath11k_debug_fw_stats_request(ar, &req_param);
+ if (ret) {
+ ath11k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
+ goto err_free;
+ }
+
+ ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id,
+ buf);
+
+ file->private_data = buf;
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_free:
+ vfree(buf);
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath11k_release_pdev_stats(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath11k_read_pdev_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_pdev_stats = {
+ .open = ath11k_open_pdev_stats,
+ .release = ath11k_release_pdev_stats,
+ .read = ath11k_read_pdev_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath11k_open_vdev_stats(struct inode *inode, struct file *file)
+{
+ struct ath11k *ar = inode->i_private;
+ struct stats_request_params req_param;
+ void *buf = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ req_param.pdev_id = ar->pdev->pdev_id;
+ /* VDEV stats is always sent for all active VDEVs from FW */
+ req_param.vdev_id = 0;
+ req_param.stats_id = WMI_REQUEST_VDEV_STAT;
+
+ ret = ath11k_debug_fw_stats_request(ar, &req_param);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request fw vdev stats: %d\n", ret);
+ goto err_free;
+ }
+
+ ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id,
+ buf);
+
+ file->private_data = buf;
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_free:
+ vfree(buf);
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath11k_release_vdev_stats(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath11k_read_vdev_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_vdev_stats = {
+ .open = ath11k_open_vdev_stats,
+ .release = ath11k_release_vdev_stats,
+ .read = ath11k_read_vdev_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath11k_open_bcn_stats(struct inode *inode, struct file *file)
+{
+ struct ath11k *ar = inode->i_private;
+ struct ath11k_vif *arvif;
+ struct stats_request_params req_param;
+ void *buf = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ req_param.stats_id = WMI_REQUEST_BCN_STAT;
+ req_param.pdev_id = ar->pdev->pdev_id;
+
+ /* loop all active VDEVs for bcn stats */
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (!arvif->is_up)
+ continue;
+
+ req_param.vdev_id = arvif->vdev_id;
+ ret = ath11k_debug_fw_stats_request(ar, &req_param);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request fw bcn stats: %d\n", ret);
+ goto err_free;
+ }
+ }
+
+ ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id,
+ buf);
+
+ /* since beacon stats request is looped for all active VDEVs, saved fw
+ * stats is not freed for each request until done for all active VDEVs
+ */
+ spin_lock_bh(&ar->data_lock);
+ ath11k_fw_stats_bcn_free(&ar->debug.fw_stats.bcn);
+ spin_unlock_bh(&ar->data_lock);
+
+ file->private_data = buf;
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_free:
+ vfree(buf);
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath11k_release_bcn_stats(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath11k_read_bcn_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_bcn_stats = {
+ .open = ath11k_open_bcn_stats,
+ .release = ath11k_release_bcn_stats,
+ .read = ath11k_read_bcn_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_read_simulate_fw_crash(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char buf[] =
+ "To simulate firmware crash write one of the keywords to this file:\n"
+ "`assert` - this will send WMI_FORCE_FW_HANG_CMDID to firmware to cause assert.\n"
+ "`hw-restart` - this will simply queue hw restart without fw/hw actually crashing.\n";
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+}
+
+/* Simulate firmware crash:
+ * 'soft': Call wmi command causing firmware hang. This firmware hang is
+ * recoverable by warm firmware reset.
+ * 'hard': Force firmware crash by setting any vdev parameter for not allowed
+ * vdev id. This is hard firmware crash because it is recoverable only by cold
+ * firmware reset.
+ */
+static ssize_t ath11k_write_simulate_fw_crash(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_base *ab = file->private_data;
+ struct ath11k_pdev *pdev;
+ struct ath11k *ar = ab->pdevs[0].ar;
+ char buf[32] = {0};
+ ssize_t rc;
+ int i, ret, radioup = 0;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (ar && ar->state == ATH11K_STATE_ON) {
+ radioup = 1;
+ break;
+ }
+ }
+ /* filter partial writes and invalid commands */
+ if (*ppos != 0 || count >= sizeof(buf) || count == 0)
+ return -EINVAL;
+
+ rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+ if (rc < 0)
+ return rc;
+
+ /* drop the possible '\n' from the end */
+ if (buf[*ppos - 1] == '\n')
+ buf[*ppos - 1] = '\0';
+
+ if (radioup == 0) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (!strcmp(buf, "assert")) {
+ ath11k_info(ab, "simulating firmware assert crash\n");
+ ret = ath11k_wmi_force_fw_hang_cmd(ar,
+ ATH11K_WMI_FW_HANG_ASSERT_TYPE,
+ ATH11K_WMI_FW_HANG_DELAY);
+ } else {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (ret) {
+ ath11k_warn(ab, "failed to simulate firmware crash: %d\n", ret);
+ goto exit;
+ }
+
+ ret = count;
+
+exit:
+ return ret;
+}
+
+static const struct file_operations fops_simulate_fw_crash = {
+ .read = ath11k_read_simulate_fw_crash,
+ .write = ath11k_write_simulate_fw_crash,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_write_enable_extd_tx_stats(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ u32 filter;
+ int ret;
+
+ if (kstrtouint_from_user(ubuf, count, 0, &filter))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ if (filter == ar->debug.extd_tx_stats) {
+ ret = count;
+ goto out;
+ }
+
+ ar->debug.extd_tx_stats = filter;
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath11k_read_enable_extd_tx_stats(struct file *file,
+ char __user *ubuf,
+ size_t count, loff_t *ppos)
+
+{
+ char buf[32] = {0};
+ struct ath11k *ar = file->private_data;
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%08x\n",
+ ar->debug.extd_tx_stats);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_extd_tx_stats = {
+ .read = ath11k_read_enable_extd_tx_stats,
+ .write = ath11k_write_enable_extd_tx_stats,
+ .open = simple_open
+};
+
+static ssize_t ath11k_write_extd_rx_stats(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ u32 enable, rx_filter = 0, ring_id;
+ int ret;
+
+ if (kstrtouint_from_user(ubuf, count, 0, &enable))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (enable > 1) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (enable == ar->debug.extd_rx_stats) {
+ ret = count;
+ goto exit;
+ }
+
+ if (enable) {
+ rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_START;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE;
+
+ tlv_filter.rx_filter = rx_filter;
+ tlv_filter.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0;
+ tlv_filter.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1;
+ tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2;
+ tlv_filter.pkt_filter_flags3 = HTT_RX_FP_CTRL_FILTER_FLASG3 |
+ HTT_RX_FP_DATA_FILTER_FLASG3;
+ } else {
+ tlv_filter = ath11k_mac_mon_status_filter_default;
+ }
+
+ ring_id = ar->dp.rx_mon_status_refill_ring.refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
+ HAL_RXDMA_MONITOR_STATUS,
+ DP_RX_BUFFER_SIZE, &tlv_filter);
+
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set rx filter for monitor status ring\n");
+ goto exit;
+ }
+
+ ar->debug.extd_rx_stats = enable;
+ ret = count;
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath11k_read_extd_rx_stats(struct file *file,
+ char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32];
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+ ar->debug.extd_rx_stats);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_extd_rx_stats = {
+ .read = ath11k_read_extd_rx_stats,
+ .write = ath11k_write_extd_rx_stats,
+ .open = simple_open,
+};
+
+static ssize_t ath11k_debug_dump_soc_rx_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_base *ab = file->private_data;
+ struct ath11k_soc_dp_rx_stats *soc_stats = &ab->soc_stats;
+ int len = 0, i, retval;
+ const int size = 4096;
+ static const char *rxdma_err[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX] = {
+ "Overflow", "MPDU len", "FCS", "Decrypt", "TKIP MIC",
+ "Unencrypt", "MSDU len", "MSDU limit", "WiFi parse",
+ "AMSDU parse", "SA timeout", "DA timeout",
+ "Flow timeout", "Flush req"};
+ static const char *reo_err[HAL_REO_DEST_RING_ERROR_CODE_MAX] = {
+ "Desc addr zero", "Desc inval", "AMPDU in non BA",
+ "Non BA dup", "BA dup", "Frame 2k jump", "BAR 2k jump",
+ "Frame OOR", "BAR OOR", "No BA session",
+ "Frame SN equal SSN", "PN check fail", "2k err",
+ "PN err", "Desc blocked"};
+
+ char *buf;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += scnprintf(buf + len, size - len, "SOC RX STATS:\n\n");
+ len += scnprintf(buf + len, size - len, "err ring pkts: %u\n",
+ soc_stats->err_ring_pkts);
+ len += scnprintf(buf + len, size - len, "Invalid RBM: %u\n\n",
+ soc_stats->invalid_rbm);
+ len += scnprintf(buf + len, size - len, "RXDMA errors:\n");
+ for (i = 0; i < HAL_REO_ENTR_RING_RXDMA_ECODE_MAX; i++)
+ len += scnprintf(buf + len, size - len, "%s: %u\n",
+ rxdma_err[i], soc_stats->rxdma_error[i]);
+
+ len += scnprintf(buf + len, size - len, "\nREO errors:\n");
+ for (i = 0; i < HAL_REO_DEST_RING_ERROR_CODE_MAX; i++)
+ len += scnprintf(buf + len, size - len, "%s: %u\n",
+ reo_err[i], soc_stats->reo_error[i]);
+
+ len += scnprintf(buf + len, size - len, "\nHAL REO errors:\n");
+ len += scnprintf(buf + len, size - len,
+ "ring0: %u\nring1: %u\nring2: %u\nring3: %u\n",
+ soc_stats->hal_reo_error[0],
+ soc_stats->hal_reo_error[1],
+ soc_stats->hal_reo_error[2],
+ soc_stats->hal_reo_error[3]);
+
+ if (len > size)
+ len = size;
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static const struct file_operations fops_soc_rx_stats = {
+ .read = ath11k_debug_dump_soc_rx_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+int ath11k_debug_pdev_create(struct ath11k_base *ab)
+{
+ ab->debugfs_soc = debugfs_create_dir(ab->hw_params.name, ab->debugfs_ath11k);
+
+ if (IS_ERR_OR_NULL(ab->debugfs_soc)) {
+ if (IS_ERR(ab->debugfs_soc))
+ return PTR_ERR(ab->debugfs_soc);
+ return -ENOMEM;
+ }
+
+ debugfs_create_file("simulate_fw_crash", 0600, ab->debugfs_soc, ab,
+ &fops_simulate_fw_crash);
+
+ debugfs_create_file("soc_rx_stats", 0600, ab->debugfs_soc, ab,
+ &fops_soc_rx_stats);
+
+ return 0;
+}
+
+void ath11k_debug_pdev_destroy(struct ath11k_base *ab)
+{
+ debugfs_remove_recursive(ab->debugfs_ath11k);
+ ab->debugfs_ath11k = NULL;
+}
+
+int ath11k_debug_soc_create(struct ath11k_base *ab)
+{
+ ab->debugfs_ath11k = debugfs_create_dir("ath11k", NULL);
+
+ if (IS_ERR_OR_NULL(ab->debugfs_ath11k)) {
+ if (IS_ERR(ab->debugfs_ath11k))
+ return PTR_ERR(ab->debugfs_ath11k);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void ath11k_debug_soc_destroy(struct ath11k_base *ab)
+{
+ debugfs_remove_recursive(ab->debugfs_soc);
+ ab->debugfs_soc = NULL;
+}
+
+void ath11k_debug_fw_stats_init(struct ath11k *ar)
+{
+ struct dentry *fwstats_dir = debugfs_create_dir("fw_stats",
+ ar->debug.debugfs_pdev);
+
+ ar->debug.fw_stats.debugfs_fwstats = fwstats_dir;
+
+ /* all stats debugfs files created are under "fw_stats" directory
+ * created per PDEV
+ */
+ debugfs_create_file("pdev_stats", 0600, fwstats_dir, ar,
+ &fops_pdev_stats);
+ debugfs_create_file("vdev_stats", 0600, fwstats_dir, ar,
+ &fops_vdev_stats);
+ debugfs_create_file("beacon_stats", 0600, fwstats_dir, ar,
+ &fops_bcn_stats);
+
+ INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
+ INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs);
+ INIT_LIST_HEAD(&ar->debug.fw_stats.bcn);
+
+ init_completion(&ar->debug.fw_stats_complete);
+}
+
+static ssize_t ath11k_write_pktlog_filter(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ u32 rx_filter = 0, ring_id, filter, mode;
+ u8 buf[128] = {0};
+ int ret;
+ ssize_t rc;
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+ if (rc < 0) {
+ ret = rc;
+ goto out;
+ }
+ buf[rc] = '\0';
+
+ ret = sscanf(buf, "0x%x %u", &filter, &mode);
+ if (ret != 2) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (filter) {
+ ret = ath11k_wmi_pdev_pktlog_enable(ar, filter);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to enable pktlog filter %x: %d\n",
+ ar->debug.pktlog_filter, ret);
+ goto out;
+ }
+ } else {
+ ret = ath11k_wmi_pdev_pktlog_disable(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to disable pktlog: %d\n", ret);
+ goto out;
+ }
+ }
+
+#define HTT_RX_FILTER_TLV_LITE_MODE \
+ (HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE | \
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_START)
+
+ if (mode == ATH11K_PKTLOG_MODE_FULL) {
+ rx_filter = HTT_RX_FILTER_TLV_LITE_MODE |
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_START |
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_END |
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_END |
+ HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER |
+ HTT_RX_FILTER_TLV_FLAGS_ATTENTION;
+ } else if (mode == ATH11K_PKTLOG_MODE_LITE) {
+ ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar,
+ HTT_PPDU_STATS_TAG_PKTLOG);
+ if (ret) {
+ ath11k_err(ar->ab, "failed to enable pktlog lite: %d\n", ret);
+ goto out;
+ }
+
+ rx_filter = HTT_RX_FILTER_TLV_LITE_MODE;
+ } else {
+ ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar,
+ HTT_PPDU_STATS_TAG_DEFAULT);
+ if (ret) {
+ ath11k_err(ar->ab, "failed to send htt ppdu stats req: %d\n",
+ ret);
+ goto out;
+ }
+ }
+
+ tlv_filter.rx_filter = rx_filter;
+ if (rx_filter) {
+ tlv_filter.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0;
+ tlv_filter.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1;
+ tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2;
+ tlv_filter.pkt_filter_flags3 = HTT_RX_FP_CTRL_FILTER_FLASG3 |
+ HTT_RX_FP_DATA_FILTER_FLASG3;
+ }
+
+ ring_id = ar->dp.rx_mon_status_refill_ring.refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
+ HAL_RXDMA_MONITOR_STATUS,
+ DP_RX_BUFFER_SIZE, &tlv_filter);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set rx filter for monitor status ring\n");
+ goto out;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "pktlog filter %d mode %s\n",
+ filter, ((mode == ATH11K_PKTLOG_MODE_FULL) ? "full" : "lite"));
+
+ ar->debug.pktlog_filter = filter;
+ ar->debug.pktlog_mode = mode;
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath11k_read_pktlog_filter(struct file *file,
+ char __user *ubuf,
+ size_t count, loff_t *ppos)
+
+{
+ char buf[32] = {0};
+ struct ath11k *ar = file->private_data;
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%08x %08x\n",
+ ar->debug.pktlog_filter,
+ ar->debug.pktlog_mode);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_pktlog_filter = {
+ .read = ath11k_read_pktlog_filter,
+ .write = ath11k_write_pktlog_filter,
+ .open = simple_open
+};
+
+static ssize_t ath11k_write_simulate_radar(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ int ret;
+
+ ret = ath11k_wmi_simulate_radar(ar);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations fops_simulate_radar = {
+ .write = ath11k_write_simulate_radar,
+ .open = simple_open
+};
+
+int ath11k_debug_register(struct ath11k *ar)
+{
+ struct ath11k_base *ab = ar->ab;
+ char pdev_name[5];
+ char buf[100] = {0};
+
+ snprintf(pdev_name, sizeof(pdev_name), "%s%d", "mac", ar->pdev_idx);
+
+ ar->debug.debugfs_pdev = debugfs_create_dir(pdev_name, ab->debugfs_soc);
+
+ if (IS_ERR_OR_NULL(ar->debug.debugfs_pdev)) {
+ if (IS_ERR(ar->debug.debugfs_pdev))
+ return PTR_ERR(ar->debug.debugfs_pdev);
+
+ return -ENOMEM;
+ }
+
+ /* Create a symlink under ieee80211/phy* */
+ snprintf(buf, 100, "../../ath11k/%pd2", ar->debug.debugfs_pdev);
+ debugfs_create_symlink("ath11k", ar->hw->wiphy->debugfsdir, buf);
+
+ ath11k_debug_htt_stats_init(ar);
+
+ ath11k_debug_fw_stats_init(ar);
+
+ debugfs_create_file("ext_tx_stats", 0644,
+ ar->debug.debugfs_pdev, ar,
+ &fops_extd_tx_stats);
+ debugfs_create_file("ext_rx_stats", 0644,
+ ar->debug.debugfs_pdev, ar,
+ &fops_extd_rx_stats);
+ debugfs_create_file("pktlog_filter", 0644,
+ ar->debug.debugfs_pdev, ar,
+ &fops_pktlog_filter);
+
+ if (ar->hw->wiphy->bands[NL80211_BAND_5GHZ]) {
+ debugfs_create_file("dfs_simulate_radar", 0200,
+ ar->debug.debugfs_pdev, ar,
+ &fops_simulate_radar);
+ debugfs_create_bool("dfs_block_radar_events", 0200,
+ ar->debug.debugfs_pdev,
+ &ar->dfs_block_radar_events);
+ }
+
+ return 0;
+}
+
+void ath11k_debug_unregister(struct ath11k *ar)
+{
+}
+#endif /* CONFIG_ATH11K_DEBUGFS */
diff --git a/drivers/net/wireless/ath/ath11k/debug.h b/drivers/net/wireless/ath/ath11k/debug.h
new file mode 100644
index 000000000000..8e8d5588b541
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debug.h
@@ -0,0 +1,279 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _ATH11K_DEBUG_H_
+#define _ATH11K_DEBUG_H_
+
+#include "hal_tx.h"
+#include "trace.h"
+
+#define ATH11K_TX_POWER_MAX_VAL 70
+#define ATH11K_TX_POWER_MIN_VAL 0
+
+enum ath11k_debug_mask {
+ ATH11K_DBG_AHB = 0x00000001,
+ ATH11K_DBG_WMI = 0x00000002,
+ ATH11K_DBG_HTC = 0x00000004,
+ ATH11K_DBG_DP_HTT = 0x00000008,
+ ATH11K_DBG_MAC = 0x00000010,
+ ATH11K_DBG_BOOT = 0x00000020,
+ ATH11K_DBG_QMI = 0x00000040,
+ ATH11K_DBG_DATA = 0x00000080,
+ ATH11K_DBG_MGMT = 0x00000100,
+ ATH11K_DBG_REG = 0x00000200,
+ ATH11K_DBG_TESTMODE = 0x00000400,
+ ATH11k_DBG_HAL = 0x00000800,
+ ATH11K_DBG_ANY = 0xffffffff,
+};
+
+/* htt_dbg_ext_stats_type */
+enum ath11k_dbg_htt_ext_stats_type {
+ ATH11K_DBG_HTT_EXT_STATS_RESET = 0,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX = 1,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_RX = 2,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_HWQ = 3,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TQM = 6,
+ ATH11K_DBG_HTT_EXT_STATS_TQM_CMDQ = 7,
+ ATH11K_DBG_HTT_EXT_STATS_TX_DE_INFO = 8,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_RATE = 9,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_RX_RATE = 10,
+ ATH11K_DBG_HTT_EXT_STATS_PEER_INFO = 11,
+ ATH11K_DBG_HTT_EXT_STATS_TX_SELFGEN_INFO = 12,
+ ATH11K_DBG_HTT_EXT_STATS_TX_MU_HWQ = 13,
+ ATH11K_DBG_HTT_EXT_STATS_RING_IF_INFO = 14,
+ ATH11K_DBG_HTT_EXT_STATS_SRNG_INFO = 15,
+ ATH11K_DBG_HTT_EXT_STATS_SFM_INFO = 16,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_MU = 17,
+ ATH11K_DBG_HTT_EXT_STATS_ACTIVE_PEERS_LIST = 18,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS = 19,
+ ATH11K_DBG_HTT_EXT_STATS_TWT_SESSIONS = 20,
+ ATH11K_DBG_HTT_EXT_STATS_REO_RESOURCE_STATS = 21,
+ ATH11K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO = 22,
+
+ /* keep this last */
+ ATH11K_DBG_HTT_NUM_EXT_STATS,
+};
+
+struct debug_htt_stats_req {
+ bool done;
+ u8 pdev_id;
+ u8 type;
+ u8 peer_addr[ETH_ALEN];
+ struct completion cmpln;
+ u32 buf_len;
+ u8 buf[0];
+};
+
+#define ATH11K_HTT_STATS_BUF_SIZE (1024 * 512)
+
+#define ATH11K_FW_STATS_BUF_SIZE (1024 * 1024)
+
+#define ATH11K_HTT_PKTLOG_MAX_SIZE 2048
+
+enum ath11k_pktlog_filter {
+ ATH11K_PKTLOG_RX = 0x000000001,
+ ATH11K_PKTLOG_TX = 0x000000002,
+ ATH11K_PKTLOG_RCFIND = 0x000000004,
+ ATH11K_PKTLOG_RCUPDATE = 0x000000008,
+ ATH11K_PKTLOG_EVENT_SMART_ANT = 0x000000020,
+ ATH11K_PKTLOG_EVENT_SW = 0x000000040,
+ ATH11K_PKTLOG_ANY = 0x00000006f,
+};
+
+enum ath11k_pktlog_mode {
+ ATH11K_PKTLOG_MODE_LITE = 1,
+ ATH11K_PKTLOG_MODE_FULL = 2,
+};
+
+enum ath11k_pktlog_enum {
+ ATH11K_PKTLOG_TYPE_TX_CTRL = 1,
+ ATH11K_PKTLOG_TYPE_TX_STAT = 2,
+ ATH11K_PKTLOG_TYPE_TX_MSDU_ID = 3,
+ ATH11K_PKTLOG_TYPE_RX_STAT = 5,
+ ATH11K_PKTLOG_TYPE_RC_FIND = 6,
+ ATH11K_PKTLOG_TYPE_RC_UPDATE = 7,
+ ATH11K_PKTLOG_TYPE_TX_VIRT_ADDR = 8,
+ ATH11K_PKTLOG_TYPE_RX_CBF = 10,
+ ATH11K_PKTLOG_TYPE_RX_STATBUF = 22,
+ ATH11K_PKTLOG_TYPE_PPDU_STATS = 23,
+ ATH11K_PKTLOG_TYPE_LITE_RX = 24,
+};
+
+__printf(2, 3) void ath11k_info(struct ath11k_base *ab, const char *fmt, ...);
+__printf(2, 3) void ath11k_err(struct ath11k_base *ab, const char *fmt, ...);
+__printf(2, 3) void ath11k_warn(struct ath11k_base *ab, const char *fmt, ...);
+
+extern unsigned int ath11k_debug_mask;
+
+#ifdef CONFIG_ATH11K_DEBUG
+__printf(3, 4) void __ath11k_dbg(struct ath11k_base *ab,
+ enum ath11k_debug_mask mask,
+ const char *fmt, ...);
+void ath11k_dbg_dump(struct ath11k_base *ab,
+ enum ath11k_debug_mask mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len);
+#else /* CONFIG_ATH11K_DEBUG */
+static inline int __ath11k_dbg(struct ath11k_base *ab,
+ enum ath11k_debug_mask dbg_mask,
+ const char *fmt, ...)
+{
+ return 0;
+}
+
+static inline void ath11k_dbg_dump(struct ath11k_base *ab,
+ enum ath11k_debug_mask mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len)
+{
+}
+#endif /* CONFIG_ATH11K_DEBUG */
+
+#ifdef CONFIG_ATH11K_DEBUGFS
+int ath11k_debug_soc_create(struct ath11k_base *ab);
+void ath11k_debug_soc_destroy(struct ath11k_base *ab);
+int ath11k_debug_pdev_create(struct ath11k_base *ab);
+void ath11k_debug_pdev_destroy(struct ath11k_base *ab);
+int ath11k_debug_register(struct ath11k *ar);
+void ath11k_debug_unregister(struct ath11k *ar);
+void ath11k_dbg_htt_ext_stats_handler(struct ath11k_base *ab,
+ struct sk_buff *skb);
+void ath11k_debug_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb);
+
+void ath11k_debug_fw_stats_init(struct ath11k *ar);
+int ath11k_dbg_htt_stats_req(struct ath11k *ar);
+
+static inline bool ath11k_debug_is_pktlog_lite_mode_enabled(struct ath11k *ar)
+{
+ return (ar->debug.pktlog_mode == ATH11K_PKTLOG_MODE_LITE);
+}
+
+static inline bool ath11k_debug_is_pktlog_rx_stats_enabled(struct ath11k *ar)
+{
+ return (!ar->debug.pktlog_peer_valid && ar->debug.pktlog_mode);
+}
+
+static inline bool ath11k_debug_is_pktlog_peer_valid(struct ath11k *ar, u8 *addr)
+{
+ return (ar->debug.pktlog_peer_valid && ar->debug.pktlog_mode &&
+ ether_addr_equal(addr, ar->debug.pktlog_peer_addr));
+}
+
+static inline int ath11k_debug_is_extd_tx_stats_enabled(struct ath11k *ar)
+{
+ return ar->debug.extd_tx_stats;
+}
+
+static inline int ath11k_debug_is_extd_rx_stats_enabled(struct ath11k *ar)
+{
+ return ar->debug.extd_rx_stats;
+}
+
+void ath11k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct dentry *dir);
+void
+ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta,
+ struct ath11k_per_peer_tx_stats *peer_stats,
+ u8 legacy_rate_idx);
+void ath11k_update_per_peer_stats_from_txcompl(struct ath11k *ar,
+ struct sk_buff *msdu,
+ struct hal_tx_status *ts);
+#else
+static inline int ath11k_debug_soc_create(struct ath11k_base *ab)
+{
+ return 0;
+}
+
+static inline void ath11k_debug_soc_destroy(struct ath11k_base *ab)
+{
+}
+
+static inline int ath11k_debug_pdev_create(struct ath11k_base *ab)
+{
+ return 0;
+}
+
+static inline void ath11k_debug_pdev_destroy(struct ath11k_base *ab)
+{
+}
+
+static inline int ath11k_debug_register(struct ath11k *ar)
+{
+ return 0;
+}
+
+static inline void ath11k_debug_unregister(struct ath11k *ar)
+{
+}
+
+static inline void ath11k_dbg_htt_ext_stats_handler(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+}
+
+static inline void ath11k_debug_fw_stats_process(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+}
+
+static inline void ath11k_debug_fw_stats_init(struct ath11k *ar)
+{
+}
+
+static inline int ath11k_debug_is_extd_tx_stats_enabled(struct ath11k *ar)
+{
+ return 0;
+}
+
+static inline int ath11k_debug_is_extd_rx_stats_enabled(struct ath11k *ar)
+{
+ return 0;
+}
+
+static inline int ath11k_dbg_htt_stats_req(struct ath11k *ar)
+{
+ return 0;
+}
+
+static inline bool ath11k_debug_is_pktlog_lite_mode_enabled(struct ath11k *ar)
+{
+ return false;
+}
+
+static inline bool ath11k_debug_is_pktlog_rx_stats_enabled(struct ath11k *ar)
+{
+ return false;
+}
+
+static inline bool ath11k_debug_is_pktlog_peer_valid(struct ath11k *ar, u8 *addr)
+{
+ return false;
+}
+
+static inline void
+ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta,
+ struct ath11k_per_peer_tx_stats *peer_stats,
+ u8 legacy_rate_idx)
+{
+}
+
+static inline void
+ath11k_update_per_peer_stats_from_txcompl(struct ath11k *ar,
+ struct sk_buff *msdu,
+ struct hal_tx_status *ts)
+{
+}
+
+#endif /* CONFIG_MAC80211_DEBUGFS*/
+
+#define ath11k_dbg(ar, dbg_mask, fmt, ...) \
+do { \
+ if (ath11k_debug_mask & dbg_mask) \
+ __ath11k_dbg(ar, dbg_mask, fmt, ##__VA_ARGS__); \
+} while (0)
+
+#endif /* _ATH11K_DEBUG_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/debug_htt_stats.c b/drivers/net/wireless/ath/ath11k/debug_htt_stats.c
new file mode 100644
index 000000000000..9939e909628f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debug_htt_stats.c
@@ -0,0 +1,4570 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/vmalloc.h>
+#include "core.h"
+#include "dp_tx.h"
+#include "dp_rx.h"
+#include "debug.h"
+#include "debug_htt_stats.h"
+
+#define HTT_DBG_OUT(buf, len, fmt, ...) \
+ scnprintf(buf, len, fmt "\n", ##__VA_ARGS__)
+
+#define HTT_MAX_STRING_LEN 256
+#define HTT_MAX_PRINT_CHAR_PER_ELEM 15
+
+#define HTT_TLV_HDR_LEN 4
+
+#define ARRAY_TO_STRING(out, arr, len) \
+ do { \
+ int index = 0; u8 i; \
+ for (i = 0; i < len; i++) { \
+ index += snprintf(out + index, HTT_MAX_STRING_LEN - index, \
+ " %u:%u,", i, arr[i]); \
+ if (index < 0 || index >= HTT_MAX_STRING_LEN) \
+ break; \
+ } \
+ } while (0)
+
+static inline void htt_print_stats_string_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_stats_string_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u8 i;
+ u16 index = 0;
+ char data[HTT_MAX_STRING_LEN] = {0};
+
+ tag_len = tag_len >> 2;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_STATS_STRING_TLV:");
+
+ for (i = 0; i < tag_len; i++) {
+ index += snprintf(&data[index],
+ HTT_MAX_STRING_LEN - index,
+ "%.*s", 4, (char *)&(htt_stats_buf->data[i]));
+ if (index >= HTT_MAX_STRING_LEN)
+ break;
+ }
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "data = %s\n", data);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_pdev_stats_cmn_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_CMN_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_queued = %u",
+ htt_stats_buf->hw_queued);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_reaped = %u",
+ htt_stats_buf->hw_reaped);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "underrun = %u",
+ htt_stats_buf->underrun);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_paused = %u",
+ htt_stats_buf->hw_paused);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_flush = %u",
+ htt_stats_buf->hw_flush);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_filt = %u",
+ htt_stats_buf->hw_filt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_abort = %u",
+ htt_stats_buf->tx_abort);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_requeued = %u",
+ htt_stats_buf->mpdu_requed);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_xretry = %u",
+ htt_stats_buf->tx_xretry);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "data_rc = %u",
+ htt_stats_buf->data_rc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_dropped_xretry = %u",
+ htt_stats_buf->mpdu_dropped_xretry);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "illegal_rate_phy_err = %u",
+ htt_stats_buf->illgl_rate_phy_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "cont_xretry = %u",
+ htt_stats_buf->cont_xretry);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_timeout = %u",
+ htt_stats_buf->tx_timeout);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "pdev_resets = %u",
+ htt_stats_buf->pdev_resets);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "phy_underrun = %u",
+ htt_stats_buf->phy_underrun);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "txop_ovf = %u",
+ htt_stats_buf->txop_ovf);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_posted = %u",
+ htt_stats_buf->seq_posted);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_failed_queueing = %u",
+ htt_stats_buf->seq_failed_queueing);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_completed = %u",
+ htt_stats_buf->seq_completed);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_restarted = %u",
+ htt_stats_buf->seq_restarted);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_seq_posted = %u",
+ htt_stats_buf->mu_seq_posted);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_switch_hw_paused = %u",
+ htt_stats_buf->seq_switch_hw_paused);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "next_seq_posted_dsr = %u",
+ htt_stats_buf->next_seq_posted_dsr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_posted_isr = %u",
+ htt_stats_buf->seq_posted_isr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_ctrl_cached = %u",
+ htt_stats_buf->seq_ctrl_cached);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_count_tqm = %u",
+ htt_stats_buf->mpdu_count_tqm);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_count_tqm = %u",
+ htt_stats_buf->msdu_count_tqm);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_removed_tqm = %u",
+ htt_stats_buf->mpdu_removed_tqm);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_removed_tqm = %u",
+ htt_stats_buf->msdu_removed_tqm);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_sw_flush = %u",
+ htt_stats_buf->mpdus_sw_flush);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_hw_filter = %u",
+ htt_stats_buf->mpdus_hw_filter);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_truncated = %u",
+ htt_stats_buf->mpdus_truncated);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_ack_failed = %u",
+ htt_stats_buf->mpdus_ack_failed);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_expired = %u",
+ htt_stats_buf->mpdus_expired);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_seq_hw_retry = %u",
+ htt_stats_buf->mpdus_seq_hw_retry);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ack_tlv_proc = %u",
+ htt_stats_buf->ack_tlv_proc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "coex_abort_mpdu_cnt_valid = %u",
+ htt_stats_buf->coex_abort_mpdu_cnt_valid);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "coex_abort_mpdu_cnt = %u",
+ htt_stats_buf->coex_abort_mpdu_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_total_ppdus_tried_ota = %u",
+ htt_stats_buf->num_total_ppdus_tried_ota);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_tried_ota = %u",
+ htt_stats_buf->num_data_ppdus_tried_ota);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "local_ctrl_mgmt_enqued = %u",
+ htt_stats_buf->local_ctrl_mgmt_enqued);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "local_ctrl_mgmt_freed = %u",
+ htt_stats_buf->local_ctrl_mgmt_freed);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "local_data_enqued = %u",
+ htt_stats_buf->local_data_enqued);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "local_data_freed = %u",
+ htt_stats_buf->local_data_freed);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_tried = %u",
+ htt_stats_buf->mpdu_tried);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "isr_wait_seq_posted = %u",
+ htt_stats_buf->isr_wait_seq_posted);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_active_dur_us_low = %u",
+ htt_stats_buf->tx_active_dur_us_low);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_active_dur_us_high = %u\n",
+ htt_stats_buf->tx_active_dur_us_high);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_urrn_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_urrn_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char urrn_stats[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_URRN_STATS);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_URRN_TLV_V:");
+
+ ARRAY_TO_STRING(urrn_stats, htt_stats_buf->urrn_stats, num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "urrn_stats = %s\n", urrn_stats);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_flush_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_flush_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char flush_errs[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_FLUSH_REASON_STATS);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_FLUSH_TLV_V:");
+
+ ARRAY_TO_STRING(flush_errs, htt_stats_buf->flush_errs, num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "flush_errs = %s\n", flush_errs);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_sifs_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_sifs_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char sifs_status[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_SIFS_BURST_STATS);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_SIFS_TLV_V:");
+
+ ARRAY_TO_STRING(sifs_status, htt_stats_buf->sifs_status, num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sifs_status = %s\n",
+ sifs_status);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_phy_err_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_phy_err_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char phy_errs[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_PHY_ERR_STATS);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_PHY_ERR_TLV_V:");
+
+ ARRAY_TO_STRING(phy_errs, htt_stats_buf->phy_errs, num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "phy_errs = %s\n", phy_errs);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_sifs_hist_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_sifs_hist_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char sifs_hist_status[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_SIFS_BURST_HIST_STATS);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_SIFS_HIST_TLV_V:");
+
+ ARRAY_TO_STRING(sifs_hist_status, htt_stats_buf->sifs_hist_status, num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sifs_hist_status = %s\n",
+ sifs_hist_status);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_tx_ppdu_stats_tlv_v(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_tx_ppdu_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_TX_PPDU_STATS_TLV_V:");
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_legacy_su = %u",
+ htt_stats_buf->num_data_ppdus_legacy_su);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_ac_su = %u",
+ htt_stats_buf->num_data_ppdus_ac_su);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_ax_su = %u",
+ htt_stats_buf->num_data_ppdus_ax_su);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_ac_su_txbf = %u",
+ htt_stats_buf->num_data_ppdus_ac_su_txbf);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_ax_su_txbf = %u\n",
+ htt_stats_buf->num_data_ppdus_ax_su_txbf);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char tried_mpdu_cnt_hist[HTT_MAX_STRING_LEN] = {0};
+ u32 num_elements = ((tag_len - sizeof(htt_stats_buf->hist_bin_size)) >> 2);
+ u32 required_buffer_size = HTT_MAX_PRINT_CHAR_PER_ELEM * num_elements;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_TRIED_MPDU_CNT_HIST_TLV_V:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "TRIED_MPDU_CNT_HIST_BIN_SIZE : %u",
+ htt_stats_buf->hist_bin_size);
+
+ if (required_buffer_size < HTT_MAX_STRING_LEN) {
+ ARRAY_TO_STRING(tried_mpdu_cnt_hist,
+ htt_stats_buf->tried_mpdu_cnt_hist,
+ num_elements);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tried_mpdu_cnt_hist = %s\n",
+ tried_mpdu_cnt_hist);
+ } else {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "INSUFFICIENT PRINT BUFFER\n");
+ }
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_hw_stats_intr_misc_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_hw_stats_intr_misc_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char hw_intr_name[HTT_STATS_MAX_HW_INTR_NAME_LEN + 1] = {0};
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_HW_STATS_INTR_MISC_TLV:");
+ memcpy(hw_intr_name, &(htt_stats_buf->hw_intr_name[0]),
+ HTT_STATS_MAX_HW_INTR_NAME_LEN);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_intr_name = %s ", hw_intr_name);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mask = %u",
+ htt_stats_buf->mask);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "count = %u\n",
+ htt_stats_buf->count);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_hw_stats_wd_timeout_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_hw_stats_wd_timeout_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char hw_module_name[HTT_STATS_MAX_HW_MODULE_NAME_LEN + 1] = {0};
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_HW_STATS_WD_TIMEOUT_TLV:");
+ memcpy(hw_module_name, &(htt_stats_buf->hw_module_name[0]),
+ HTT_STATS_MAX_HW_MODULE_NAME_LEN);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_module_name = %s ",
+ hw_module_name);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "count = %u",
+ htt_stats_buf->count);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_hw_stats_pdev_errs_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_hw_stats_pdev_errs_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_HW_STATS_PDEV_ERRS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_abort = %u",
+ htt_stats_buf->tx_abort);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_abort_fail_count = %u",
+ htt_stats_buf->tx_abort_fail_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_abort = %u",
+ htt_stats_buf->rx_abort);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_abort_fail_count = %u",
+ htt_stats_buf->rx_abort_fail_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "warm_reset = %u",
+ htt_stats_buf->warm_reset);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "cold_reset = %u",
+ htt_stats_buf->cold_reset);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_flush = %u",
+ htt_stats_buf->tx_flush);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_glb_reset = %u",
+ htt_stats_buf->tx_glb_reset);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_txq_reset = %u",
+ htt_stats_buf->tx_txq_reset);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_timeout_reset = %u\n",
+ htt_stats_buf->rx_timeout_reset);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_msdu_flow_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_msdu_flow_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_MSDU_FLOW_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_update_timestamp = %u",
+ htt_stats_buf->last_update_timestamp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_add_timestamp = %u",
+ htt_stats_buf->last_add_timestamp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_remove_timestamp = %u",
+ htt_stats_buf->last_remove_timestamp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "total_processed_msdu_count = %u",
+ htt_stats_buf->total_processed_msdu_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "cur_msdu_count_in_flowq = %u",
+ htt_stats_buf->cur_msdu_count_in_flowq);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sw_peer_id = %u",
+ htt_stats_buf->sw_peer_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_flow_no = %u",
+ htt_stats_buf->tx_flow_no__tid_num__drop_rule & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_num = %u",
+ (htt_stats_buf->tx_flow_no__tid_num__drop_rule & 0xF0000) >>
+ 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "drop_rule = %u",
+ (htt_stats_buf->tx_flow_no__tid_num__drop_rule & 0x100000) >>
+ 20);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_cycle_enqueue_count = %u",
+ htt_stats_buf->last_cycle_enqueue_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_cycle_dequeue_count = %u",
+ htt_stats_buf->last_cycle_dequeue_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_cycle_drop_count = %u",
+ htt_stats_buf->last_cycle_drop_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "current_drop_th = %u\n",
+ htt_stats_buf->current_drop_th);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_tid_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tid_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char tid_name[MAX_HTT_TID_NAME + 1] = {0};
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TID_STATS_TLV:");
+ memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_name = %s ", tid_name);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sw_peer_id = %u",
+ htt_stats_buf->sw_peer_id__tid_num & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_num = %u",
+ (htt_stats_buf->sw_peer_id__tid_num & 0xFFFF0000) >> 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_sched_pending = %u",
+ htt_stats_buf->num_sched_pending__num_ppdu_in_hwq & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_ppdu_in_hwq = %u",
+ (htt_stats_buf->num_sched_pending__num_ppdu_in_hwq &
+ 0xFF00) >> 8);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_flags = 0x%x",
+ htt_stats_buf->tid_flags);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_queued = %u",
+ htt_stats_buf->hw_queued);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_reaped = %u",
+ htt_stats_buf->hw_reaped);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_hw_filter = %u",
+ htt_stats_buf->mpdus_hw_filter);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_bytes = %u",
+ htt_stats_buf->qdepth_bytes);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_num_msdu = %u",
+ htt_stats_buf->qdepth_num_msdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_num_mpdu = %u",
+ htt_stats_buf->qdepth_num_mpdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_scheduled_tsmp = %u",
+ htt_stats_buf->last_scheduled_tsmp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "pause_module_id = %u",
+ htt_stats_buf->pause_module_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "block_module_id = %u\n",
+ htt_stats_buf->block_module_id);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_tid_stats_v1_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tid_stats_v1_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char tid_name[MAX_HTT_TID_NAME + 1] = {0};
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TID_STATS_V1_TLV:");
+ memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_name = %s ", tid_name);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sw_peer_id = %u",
+ htt_stats_buf->sw_peer_id__tid_num & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_num = %u",
+ (htt_stats_buf->sw_peer_id__tid_num & 0xFFFF0000) >> 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_sched_pending = %u",
+ htt_stats_buf->num_sched_pending__num_ppdu_in_hwq & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_ppdu_in_hwq = %u",
+ (htt_stats_buf->num_sched_pending__num_ppdu_in_hwq &
+ 0xFF00) >> 8);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_flags = 0x%x",
+ htt_stats_buf->tid_flags);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "max_qdepth_bytes = %u",
+ htt_stats_buf->max_qdepth_bytes);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "max_qdepth_n_msdus = %u",
+ htt_stats_buf->max_qdepth_n_msdus);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rsvd = %u",
+ htt_stats_buf->rsvd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_bytes = %u",
+ htt_stats_buf->qdepth_bytes);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_num_msdu = %u",
+ htt_stats_buf->qdepth_num_msdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_num_mpdu = %u",
+ htt_stats_buf->qdepth_num_mpdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_scheduled_tsmp = %u",
+ htt_stats_buf->last_scheduled_tsmp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "pause_module_id = %u",
+ htt_stats_buf->pause_module_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "block_module_id = %u",
+ htt_stats_buf->block_module_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "allow_n_flags = 0x%x",
+ htt_stats_buf->allow_n_flags);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sendn_frms_allowed = %u\n",
+ htt_stats_buf->sendn_frms_allowed);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_rx_tid_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_tid_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char tid_name[MAX_HTT_TID_NAME + 1] = {0};
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_TID_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sw_peer_id = %u",
+ htt_stats_buf->sw_peer_id__tid_num & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_num = %u",
+ (htt_stats_buf->sw_peer_id__tid_num & 0xFFFF0000) >> 16);
+ memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_name = %s ", tid_name);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "dup_in_reorder = %u",
+ htt_stats_buf->dup_in_reorder);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "dup_past_outside_window = %u",
+ htt_stats_buf->dup_past_outside_window);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "dup_past_within_window = %u",
+ htt_stats_buf->dup_past_within_window);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rxdesc_err_decrypt = %u\n",
+ htt_stats_buf->rxdesc_err_decrypt);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_counter_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_counter_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char counter_name[HTT_MAX_STRING_LEN] = {0};
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_COUNTER_TLV:");
+
+ ARRAY_TO_STRING(counter_name,
+ htt_stats_buf->counter_name,
+ HTT_MAX_COUNTER_NAME);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "counter_name = %s ", counter_name);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "count = %u\n",
+ htt_stats_buf->count);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_peer_stats_cmn_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_peer_stats_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_PEER_STATS_CMN_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ppdu_cnt = %u",
+ htt_stats_buf->ppdu_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_cnt = %u",
+ htt_stats_buf->mpdu_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_cnt = %u",
+ htt_stats_buf->msdu_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "pause_bitmap = %u",
+ htt_stats_buf->pause_bitmap);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "block_bitmap = %u",
+ htt_stats_buf->block_bitmap);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_rssi = %d",
+ htt_stats_buf->rssi);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "enqueued_count = %llu",
+ htt_stats_buf->peer_enqueued_count_low |
+ ((u64)htt_stats_buf->peer_enqueued_count_high << 32));
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "dequeued_count = %llu",
+ htt_stats_buf->peer_dequeued_count_low |
+ ((u64)htt_stats_buf->peer_dequeued_count_high << 32));
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "dropped_count = %llu",
+ htt_stats_buf->peer_dropped_count_low |
+ ((u64)htt_stats_buf->peer_dropped_count_high << 32));
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "transmitted_ppdu_bytes = %llu",
+ htt_stats_buf->ppdu_transmitted_bytes_low |
+ ((u64)htt_stats_buf->ppdu_transmitted_bytes_high << 32));
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ttl_removed_count = %u",
+ htt_stats_buf->peer_ttl_removed_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "inactive_time = %u\n",
+ htt_stats_buf->inactive_time);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_peer_details_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_peer_details_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_PEER_DETAILS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "peer_type = %u",
+ htt_stats_buf->peer_type);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sw_peer_id = %u",
+ htt_stats_buf->sw_peer_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "vdev_id = %u",
+ htt_stats_buf->vdev_pdev_ast_idx & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "pdev_id = %u",
+ (htt_stats_buf->vdev_pdev_ast_idx & 0xFF00) >> 8);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ast_idx = %u",
+ (htt_stats_buf->vdev_pdev_ast_idx & 0xFFFF0000) >> 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "mac_addr = %02x:%02x:%02x:%02x:%02x:%02x",
+ htt_stats_buf->mac_addr.mac_addr_l32 & 0xFF,
+ (htt_stats_buf->mac_addr.mac_addr_l32 & 0xFF00) >> 8,
+ (htt_stats_buf->mac_addr.mac_addr_l32 & 0xFF0000) >> 16,
+ (htt_stats_buf->mac_addr.mac_addr_l32 & 0xFF000000) >> 24,
+ (htt_stats_buf->mac_addr.mac_addr_h16 & 0xFF),
+ (htt_stats_buf->mac_addr.mac_addr_h16 & 0xFF00) >> 8);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "peer_flags = 0x%x",
+ htt_stats_buf->peer_flags);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qpeer_flags = 0x%x\n",
+ htt_stats_buf->qpeer_flags);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_peer_rate_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_peer_rate_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char str_buf[HTT_MAX_STRING_LEN] = {0};
+ char *tx_gi[HTT_TX_PEER_STATS_NUM_GI_COUNTERS] = {NULL};
+ u8 j;
+
+ for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++) {
+ tx_gi[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
+ if (!tx_gi[j])
+ goto fail;
+ }
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PEER_RATE_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_ldpc = %u",
+ htt_stats_buf->tx_ldpc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rts_cnt = %u",
+ htt_stats_buf->rts_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ack_rssi = %u",
+ htt_stats_buf->ack_rssi);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_mcs,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_mcs = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_su_mcs,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_su_mcs = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_mu_mcs,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_mu_mcs = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf,
+ htt_stats_buf->tx_nss,
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_nss = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf,
+ htt_stats_buf->tx_bw,
+ HTT_TX_PDEV_STATS_NUM_BW_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_bw = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_stbc,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_stbc = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_pream,
+ HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_pream = %s ", str_buf);
+
+ for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++) {
+ ARRAY_TO_STRING(tx_gi[j],
+ htt_stats_buf->tx_gi[j],
+ HTT_TX_PEER_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_gi[%u] = %s ",
+ j, tx_gi[j]);
+ }
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf,
+ htt_stats_buf->tx_dcm,
+ HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_dcm = %s\n", str_buf);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+
+fail:
+ for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++)
+ kfree(tx_gi[j]);
+}
+
+static inline void htt_print_rx_peer_rate_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_peer_rate_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u8 j;
+ char *rssi_chain[HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS] = {NULL};
+ char *rx_gi[HTT_RX_PEER_STATS_NUM_GI_COUNTERS] = {NULL};
+ char str_buf[HTT_MAX_STRING_LEN] = {0};
+
+ for (j = 0; j < HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS; j++) {
+ rssi_chain[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
+ if (!rssi_chain[j])
+ goto fail;
+ }
+
+ for (j = 0; j < HTT_RX_PEER_STATS_NUM_GI_COUNTERS; j++) {
+ rx_gi[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
+ if (!rx_gi[j])
+ goto fail;
+ }
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_PEER_RATE_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "nsts = %u",
+ htt_stats_buf->nsts);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ldpc = %u",
+ htt_stats_buf->rx_ldpc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rts_cnt = %u",
+ htt_stats_buf->rts_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_mgmt = %u",
+ htt_stats_buf->rssi_mgmt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_data = %u",
+ htt_stats_buf->rssi_data);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_comb = %u",
+ htt_stats_buf->rssi_comb);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_mcs,
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_mcs = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_nss,
+ HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_nss = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_dcm,
+ HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_dcm = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_stbc,
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_stbc = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_bw,
+ HTT_RX_PDEV_STATS_NUM_BW_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_bw = %s ", str_buf);
+
+ for (j = 0; j < HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS; j++) {
+ ARRAY_TO_STRING(rssi_chain[j], htt_stats_buf->rssi_chain[j],
+ HTT_RX_PEER_STATS_NUM_BW_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_chain[%u] = %s ",
+ j, rssi_chain[j]);
+ }
+
+ for (j = 0; j < HTT_RX_PEER_STATS_NUM_GI_COUNTERS; j++) {
+ ARRAY_TO_STRING(rx_gi[j], htt_stats_buf->rx_gi[j],
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_gi[%u] = %s ",
+ j, rx_gi[j]);
+ }
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_pream,
+ HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_pream = %s\n", str_buf);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+
+fail:
+ for (j = 0; j < HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS; j++)
+ kfree(rssi_chain[j]);
+
+ for (j = 0; j < HTT_RX_PEER_STATS_NUM_GI_COUNTERS; j++)
+ kfree(rx_gi[j]);
+}
+
+static inline void
+htt_print_tx_hwq_mu_mimo_sch_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_mu_mimo_sch_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_HWQ_MU_MIMO_SCH_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_sch_posted = %u",
+ htt_stats_buf->mu_mimo_sch_posted);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_sch_failed = %u",
+ htt_stats_buf->mu_mimo_sch_failed);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n",
+ htt_stats_buf->mu_mimo_ppdu_posted);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_mu_mimo_mpdu_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_mu_mimo_mpdu_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_HWQ_MU_MIMO_MPDU_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_mpdus_queued_usr = %u",
+ htt_stats_buf->mu_mimo_mpdus_queued_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_mpdus_tried_usr = %u",
+ htt_stats_buf->mu_mimo_mpdus_tried_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_mpdus_failed_usr = %u",
+ htt_stats_buf->mu_mimo_mpdus_failed_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_mpdus_requeued_usr = %u",
+ htt_stats_buf->mu_mimo_mpdus_requeued_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_err_no_ba_usr = %u",
+ htt_stats_buf->mu_mimo_err_no_ba_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_mpdu_underrun_usr = %u",
+ htt_stats_buf->mu_mimo_mpdu_underrun_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_ampdu_underrun_usr = %u\n",
+ htt_stats_buf->mu_mimo_ampdu_underrun_usr);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_mu_mimo_cmn_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_mu_mimo_cmn_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_HWQ_MU_MIMO_CMN_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__hwq_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hwq_id = %u\n",
+ (htt_stats_buf->mac_id__hwq_id__word & 0xFF00) >> 8);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_stats_cmn_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_stats_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ /* TODO: HKDBG */
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_HWQ_STATS_CMN_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__hwq_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hwq_id = %u",
+ (htt_stats_buf->mac_id__hwq_id__word & 0xFF00) >> 8);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "xretry = %u",
+ htt_stats_buf->xretry);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "underrun_cnt = %u",
+ htt_stats_buf->underrun_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "flush_cnt = %u",
+ htt_stats_buf->flush_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "filt_cnt = %u",
+ htt_stats_buf->filt_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "null_mpdu_bmap = %u",
+ htt_stats_buf->null_mpdu_bmap);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "user_ack_failure = %u",
+ htt_stats_buf->user_ack_failure);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ack_tlv_proc = %u",
+ htt_stats_buf->ack_tlv_proc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_id_proc = %u",
+ htt_stats_buf->sched_id_proc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "null_mpdu_tx_count = %u",
+ htt_stats_buf->null_mpdu_tx_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_bmap_not_recvd = %u",
+ htt_stats_buf->mpdu_bmap_not_recvd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_bar = %u",
+ htt_stats_buf->num_bar);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rts = %u",
+ htt_stats_buf->rts);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "cts2self = %u",
+ htt_stats_buf->cts2self);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qos_null = %u",
+ htt_stats_buf->qos_null);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_tried_cnt = %u",
+ htt_stats_buf->mpdu_tried_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_queued_cnt = %u",
+ htt_stats_buf->mpdu_queued_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_ack_fail_cnt = %u",
+ htt_stats_buf->mpdu_ack_fail_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_filt_cnt = %u",
+ htt_stats_buf->mpdu_filt_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "false_mpdu_ack_count = %u",
+ htt_stats_buf->false_mpdu_ack_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "txq_timeout = %u\n",
+ htt_stats_buf->txq_timeout);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_difs_latency_stats_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_difs_latency_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 data_len = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_DIFS_LATENCY_BINS);
+ char difs_latency_hist[HTT_MAX_STRING_LEN] = {0};
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_HWQ_DIFS_LATENCY_STATS_TLV_V:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hist_intvl = %u",
+ htt_stats_buf->hist_intvl);
+
+ ARRAY_TO_STRING(difs_latency_hist, htt_stats_buf->difs_latency_hist,
+ data_len);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "difs_latency_hist = %s\n",
+ difs_latency_hist);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_cmd_result_stats_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_cmd_result_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 data_len;
+ char cmd_result[HTT_MAX_STRING_LEN] = {0};
+
+ data_len = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_CMD_RESULT_STATS);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_HWQ_CMD_RESULT_STATS_TLV_V:");
+
+ ARRAY_TO_STRING(cmd_result, htt_stats_buf->cmd_result, data_len);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "cmd_result = %s\n", cmd_result);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_cmd_stall_stats_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_cmd_stall_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems;
+ char cmd_stall_status[HTT_MAX_STRING_LEN] = {0};
+
+ num_elems = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_CMD_STALL_STATS);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_HWQ_CMD_STALL_STATS_TLV_V:");
+
+ ARRAY_TO_STRING(cmd_stall_status, htt_stats_buf->cmd_stall_status, num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "cmd_stall_status = %s\n",
+ cmd_stall_status);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_fes_result_stats_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_fes_result_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems;
+ char fes_result[HTT_MAX_STRING_LEN] = {0};
+
+ num_elems = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_FES_RESULT_STATS);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_HWQ_FES_RESULT_STATS_TLV_V:");
+
+ ARRAY_TO_STRING(fes_result, htt_stats_buf->fes_result, num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fes_result = %s\n", fes_result);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_tried_mpdu_cnt_hist_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_tried_mpdu_cnt_hist_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char tried_mpdu_cnt_hist[HTT_MAX_STRING_LEN] = {0};
+ u32 num_elements = ((tag_len -
+ sizeof(htt_stats_buf->hist_bin_size)) >> 2);
+ u32 required_buffer_size = HTT_MAX_PRINT_CHAR_PER_ELEM * num_elements;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_HWQ_TRIED_MPDU_CNT_HIST_TLV_V:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "TRIED_MPDU_CNT_HIST_BIN_SIZE : %u",
+ htt_stats_buf->hist_bin_size);
+
+ if (required_buffer_size < HTT_MAX_STRING_LEN) {
+ ARRAY_TO_STRING(tried_mpdu_cnt_hist,
+ htt_stats_buf->tried_mpdu_cnt_hist,
+ num_elements);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "tried_mpdu_cnt_hist = %s\n",
+ tried_mpdu_cnt_hist);
+ } else {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "INSUFFICIENT PRINT BUFFER ");
+ }
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_txop_used_cnt_hist_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_txop_used_cnt_hist_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char txop_used_cnt_hist[HTT_MAX_STRING_LEN] = {0};
+ u32 num_elements = tag_len >> 2;
+ u32 required_buffer_size = HTT_MAX_PRINT_CHAR_PER_ELEM * num_elements;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_HWQ_TXOP_USED_CNT_HIST_TLV_V:");
+
+ if (required_buffer_size < HTT_MAX_STRING_LEN) {
+ ARRAY_TO_STRING(txop_used_cnt_hist,
+ htt_stats_buf->txop_used_cnt_hist,
+ num_elements);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "txop_used_cnt_hist = %s\n",
+ txop_used_cnt_hist);
+ } else {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "INSUFFICIENT PRINT BUFFER ");
+ }
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_sounding_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ s32 i;
+ const struct htt_tx_sounding_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ const u32 *cbf_20 = htt_stats_buf->cbf_20;
+ const u32 *cbf_40 = htt_stats_buf->cbf_40;
+ const u32 *cbf_80 = htt_stats_buf->cbf_80;
+ const u32 *cbf_160 = htt_stats_buf->cbf_160;
+
+ if (htt_stats_buf->tx_sounding_mode == HTT_TX_AC_SOUNDING_MODE) {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "\nHTT_TX_AC_SOUNDING_STATS_TLV:\n");
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_cbf_20 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u ",
+ cbf_20[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_cbf_40 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
+ cbf_40[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_cbf_80 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
+ cbf_80[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_cbf_160 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
+ cbf_160[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++) {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "Sounding User %u = 20MHz: %u, 40MHz : %u, 80MHz: %u, 160MHz: %u ",
+ i,
+ htt_stats_buf->sounding[0],
+ htt_stats_buf->sounding[1],
+ htt_stats_buf->sounding[2],
+ htt_stats_buf->sounding[3]);
+ }
+ } else if (htt_stats_buf->tx_sounding_mode == HTT_TX_AX_SOUNDING_MODE) {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "\nHTT_TX_AX_SOUNDING_STATS_TLV:\n");
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_cbf_20 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u ",
+ cbf_20[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_cbf_40 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
+ cbf_40[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_cbf_80 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
+ cbf_80[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_cbf_160 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
+ cbf_160[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS; i++) {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "Sounding User %u = 20MHz: %u, 40MHz : %u, 80MHz: %u, 160MHz: %u ",
+ i,
+ htt_stats_buf->sounding[0],
+ htt_stats_buf->sounding[1],
+ htt_stats_buf->sounding[2],
+ htt_stats_buf->sounding[3]);
+ }
+ }
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_selfgen_cmn_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_selfgen_cmn_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_SELFGEN_CMN_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "su_bar = %u",
+ htt_stats_buf->su_bar);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rts = %u",
+ htt_stats_buf->rts);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "cts2self = %u",
+ htt_stats_buf->cts2self);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qos_null = %u",
+ htt_stats_buf->qos_null);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_1 = %u",
+ htt_stats_buf->delayed_bar_1);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_2 = %u",
+ htt_stats_buf->delayed_bar_2);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_3 = %u",
+ htt_stats_buf->delayed_bar_3);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_4 = %u",
+ htt_stats_buf->delayed_bar_4);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_5 = %u",
+ htt_stats_buf->delayed_bar_5);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_6 = %u",
+ htt_stats_buf->delayed_bar_6);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_7 = %u\n",
+ htt_stats_buf->delayed_bar_7);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_selfgen_ac_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_selfgen_ac_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_su_ndpa = %u",
+ htt_stats_buf->ac_su_ndpa);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_su_ndp = %u",
+ htt_stats_buf->ac_su_ndp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_ndpa = %u",
+ htt_stats_buf->ac_mu_mimo_ndpa);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_ndp = %u",
+ htt_stats_buf->ac_mu_mimo_ndp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brpoll_1 = %u",
+ htt_stats_buf->ac_mu_mimo_brpoll_1);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brpoll_2 = %u",
+ htt_stats_buf->ac_mu_mimo_brpoll_2);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brpoll_3 = %u\n",
+ htt_stats_buf->ac_mu_mimo_brpoll_3);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_selfgen_ax_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_selfgen_ax_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_su_ndpa = %u",
+ htt_stats_buf->ax_su_ndpa);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_su_ndp = %u",
+ htt_stats_buf->ax_su_ndp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_ndpa = %u",
+ htt_stats_buf->ax_mu_mimo_ndpa);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_ndp = %u",
+ htt_stats_buf->ax_mu_mimo_ndp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_1 = %u",
+ htt_stats_buf->ax_mu_mimo_brpoll_1);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_2 = %u",
+ htt_stats_buf->ax_mu_mimo_brpoll_2);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_3 = %u",
+ htt_stats_buf->ax_mu_mimo_brpoll_3);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_4 = %u",
+ htt_stats_buf->ax_mu_mimo_brpoll_4);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_5 = %u",
+ htt_stats_buf->ax_mu_mimo_brpoll_5);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_6 = %u",
+ htt_stats_buf->ax_mu_mimo_brpoll_6);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_7 = %u",
+ htt_stats_buf->ax_mu_mimo_brpoll_7);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_basic_trigger = %u",
+ htt_stats_buf->ax_basic_trigger);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_bsr_trigger = %u",
+ htt_stats_buf->ax_bsr_trigger);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_bar_trigger = %u",
+ htt_stats_buf->ax_mu_bar_trigger);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_rts_trigger = %u\n",
+ htt_stats_buf->ax_mu_rts_trigger);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_selfgen_ac_err_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_selfgen_ac_err_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_ERR_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_su_ndp_err = %u",
+ htt_stats_buf->ac_su_ndp_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_su_ndpa_err = %u",
+ htt_stats_buf->ac_su_ndpa_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_ndpa_err = %u",
+ htt_stats_buf->ac_mu_mimo_ndpa_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_ndp_err = %u",
+ htt_stats_buf->ac_mu_mimo_ndp_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brp1_err = %u",
+ htt_stats_buf->ac_mu_mimo_brp1_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brp2_err = %u",
+ htt_stats_buf->ac_mu_mimo_brp2_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brp3_err = %u\n",
+ htt_stats_buf->ac_mu_mimo_brp3_err);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_selfgen_ax_err_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_selfgen_ax_err_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_ERR_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_su_ndp_err = %u",
+ htt_stats_buf->ax_su_ndp_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_su_ndpa_err = %u",
+ htt_stats_buf->ax_su_ndpa_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_ndpa_err = %u",
+ htt_stats_buf->ax_mu_mimo_ndpa_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_ndp_err = %u",
+ htt_stats_buf->ax_mu_mimo_ndp_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp1_err = %u",
+ htt_stats_buf->ax_mu_mimo_brp1_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp2_err = %u",
+ htt_stats_buf->ax_mu_mimo_brp2_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp3_err = %u",
+ htt_stats_buf->ax_mu_mimo_brp3_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp4_err = %u",
+ htt_stats_buf->ax_mu_mimo_brp4_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp5_err = %u",
+ htt_stats_buf->ax_mu_mimo_brp5_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp6_err = %u",
+ htt_stats_buf->ax_mu_mimo_brp6_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp7_err = %u",
+ htt_stats_buf->ax_mu_mimo_brp7_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_basic_trigger_err = %u",
+ htt_stats_buf->ax_basic_trigger_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_bsr_trigger_err = %u",
+ htt_stats_buf->ax_bsr_trigger_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_bar_trigger_err = %u",
+ htt_stats_buf->ax_mu_bar_trigger_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_rts_trigger_err = %u\n",
+ htt_stats_buf->ax_mu_rts_trigger_err);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_mu_mimo_sch_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_mu_mimo_sch_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u8 i;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_PDEV_MU_MIMO_SCH_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_sch_posted = %u",
+ htt_stats_buf->mu_mimo_sch_posted);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_sch_failed = %u",
+ htt_stats_buf->mu_mimo_sch_failed);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n",
+ htt_stats_buf->mu_mimo_ppdu_posted);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "11ac MU_MIMO SCH STATS:");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++)
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_mu_mimo_sch_nusers_%u = %u",
+ i, htt_stats_buf->ac_mu_mimo_sch_nusers[i]);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "\n11ax MU_MIMO SCH STATS:");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS; i++)
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_mimo_sch_nusers_%u = %u",
+ i, htt_stats_buf->ax_mu_mimo_sch_nusers[i]);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "\n11ax OFDMA SCH STATS:");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++)
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_ofdma_sch_nusers_%u = %u",
+ i, htt_stats_buf->ax_ofdma_sch_nusers[i]);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_mu_mimo_mpdu_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_mpdu_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_MIMO_AC) {
+ if (!htt_stats_buf->user_index)
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_PDEV_MU_MIMO_AC_MPDU_STATS:\n");
+
+ if (htt_stats_buf->user_index <
+ HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS) {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_queued_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_queued_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_tried_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_tried_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_failed_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_failed_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_requeued_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_requeued_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_mu_mimo_err_no_ba_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->err_no_ba_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdu_underrun_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdu_underrun_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_mu_mimo_ampdu_underrun_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->ampdu_underrun_usr);
+ }
+ }
+
+ if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_MIMO_AX) {
+ if (!htt_stats_buf->user_index)
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_PDEV_MU_MIMO_AX_MPDU_STATS:\n");
+
+ if (htt_stats_buf->user_index <
+ HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS) {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_queued_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_queued_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_tried_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_tried_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_failed_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_failed_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_requeued_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_requeued_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_mimo_err_no_ba_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->err_no_ba_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdu_underrun_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdu_underrun_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_mimo_ampdu_underrun_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->ampdu_underrun_usr);
+ }
+ }
+
+ if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_OFDMA_AX) {
+ if (!htt_stats_buf->user_index)
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_PDEV_AX_MU_OFDMA_MPDU_STATS:\n");
+
+ if (htt_stats_buf->user_index < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS) {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_queued_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_queued_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_tried_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_tried_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_failed_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_failed_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_requeued_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_requeued_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_ofdma_err_no_ba_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->err_no_ba_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdu_underrun_usr_%u = %u",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdu_underrun_usr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_ofdma_ampdu_underrun_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->ampdu_underrun_usr);
+ }
+ }
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_sched_txq_cmd_posted_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sched_txq_cmd_posted_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char sched_cmd_posted[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elements = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_SCHED_TX_MODE_MAX);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_POSTED_TLV_V:");
+
+ ARRAY_TO_STRING(sched_cmd_posted, htt_stats_buf->sched_cmd_posted,
+ num_elements);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_cmd_posted = %s\n",
+ sched_cmd_posted);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_sched_txq_cmd_reaped_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sched_txq_cmd_reaped_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char sched_cmd_reaped[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elements = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_SCHED_TX_MODE_MAX);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_REAPED_TLV_V:");
+
+ ARRAY_TO_STRING(sched_cmd_reaped, htt_stats_buf->sched_cmd_reaped,
+ num_elements);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_cmd_reaped = %s\n",
+ sched_cmd_reaped);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_sched_txq_sched_order_su_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sched_txq_sched_order_su_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char sched_order_su[HTT_MAX_STRING_LEN] = {0};
+ /* each entry is u32, i.e. 4 bytes */
+ u32 sched_order_su_num_entries =
+ min_t(u32, (tag_len >> 2), HTT_TX_PDEV_NUM_SCHED_ORDER_LOG);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_SCHED_TXQ_SCHED_ORDER_SU_TLV_V:");
+
+ ARRAY_TO_STRING(sched_order_su, htt_stats_buf->sched_order_su,
+ sched_order_su_num_entries);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_order_su = %s\n",
+ sched_order_su);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_sched_txq_sched_ineligibility_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sched_txq_sched_ineligibility_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char sched_ineligibility[HTT_MAX_STRING_LEN] = {0};
+ /* each entry is u32, i.e. 4 bytes */
+ u32 sched_ineligibility_num_entries = tag_len >> 2;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_SCHED_TXQ_SCHED_INELIGIBILITY_V:");
+
+ ARRAY_TO_STRING(sched_ineligibility, htt_stats_buf->sched_ineligibility,
+ sched_ineligibility_num_entries);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_ineligibility = %s\n",
+ sched_ineligibility);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_sched_per_txq_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_sched_per_txq_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_SCHED_PER_TXQ_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__txq_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "txq_id = %u",
+ (htt_stats_buf->mac_id__txq_id__word & 0xFF00) >> 8);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_policy = %u",
+ htt_stats_buf->sched_policy);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "last_sched_cmd_posted_timestamp = %u",
+ htt_stats_buf->last_sched_cmd_posted_timestamp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "last_sched_cmd_compl_timestamp = %u",
+ htt_stats_buf->last_sched_cmd_compl_timestamp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_2_tac_lwm_count = %u",
+ htt_stats_buf->sched_2_tac_lwm_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_2_tac_ring_full = %u",
+ htt_stats_buf->sched_2_tac_ring_full);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_cmd_post_failure = %u",
+ htt_stats_buf->sched_cmd_post_failure);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_active_tids = %u",
+ htt_stats_buf->num_active_tids);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_ps_schedules = %u",
+ htt_stats_buf->num_ps_schedules);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_cmds_pending = %u",
+ htt_stats_buf->sched_cmds_pending);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_tid_register = %u",
+ htt_stats_buf->num_tid_register);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_tid_unregister = %u",
+ htt_stats_buf->num_tid_unregister);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_qstats_queried = %u",
+ htt_stats_buf->num_qstats_queried);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qstats_update_pending = %u",
+ htt_stats_buf->qstats_update_pending);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_qstats_query_timestamp = %u",
+ htt_stats_buf->last_qstats_query_timestamp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_tqm_cmdq_full = %u",
+ htt_stats_buf->num_tqm_cmdq_full);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_de_sched_algo_trigger = %u",
+ htt_stats_buf->num_de_sched_algo_trigger);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_rt_sched_algo_trigger = %u",
+ htt_stats_buf->num_rt_sched_algo_trigger);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_tqm_sched_algo_trigger = %u",
+ htt_stats_buf->num_tqm_sched_algo_trigger);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "notify_sched = %u\n",
+ htt_stats_buf->notify_sched);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "dur_based_sendn_term = %u\n",
+ htt_stats_buf->dur_based_sendn_term);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_stats_tx_sched_cmn_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_stats_tx_sched_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_STATS_TX_SCHED_CMN_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "current_timestamp = %u\n",
+ htt_stats_buf->current_timestamp);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_tqm_gen_mpdu_stats_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tqm_gen_mpdu_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char gen_mpdu_end_reason[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elements = min_t(u16, (tag_len >> 2),
+ HTT_TX_TQM_MAX_LIST_MPDU_END_REASON);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_GEN_MPDU_STATS_TLV_V:");
+
+ ARRAY_TO_STRING(gen_mpdu_end_reason, htt_stats_buf->gen_mpdu_end_reason,
+ num_elements);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "gen_mpdu_end_reason = %s\n",
+ gen_mpdu_end_reason);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_tqm_list_mpdu_stats_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tqm_list_mpdu_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char list_mpdu_end_reason[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_TQM_MAX_LIST_MPDU_END_REASON);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_TQM_LIST_MPDU_STATS_TLV_V:");
+
+ ARRAY_TO_STRING(list_mpdu_end_reason, htt_stats_buf->list_mpdu_end_reason,
+ num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "list_mpdu_end_reason = %s\n",
+ list_mpdu_end_reason);
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_tqm_list_mpdu_cnt_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tqm_list_mpdu_cnt_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char list_mpdu_cnt_hist[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2),
+ HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_LIST_MPDU_CNT_TLV_V:");
+
+ ARRAY_TO_STRING(list_mpdu_cnt_hist, htt_stats_buf->list_mpdu_cnt_hist,
+ num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "list_mpdu_cnt_hist = %s\n",
+ list_mpdu_cnt_hist);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_tqm_pdev_stats_tlv_v(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tqm_pdev_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_PDEV_STATS_TLV_V:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_count = %u",
+ htt_stats_buf->msdu_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_count = %u",
+ htt_stats_buf->mpdu_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_msdu = %u",
+ htt_stats_buf->remove_msdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_mpdu = %u",
+ htt_stats_buf->remove_mpdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_msdu_ttl = %u",
+ htt_stats_buf->remove_msdu_ttl);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "send_bar = %u",
+ htt_stats_buf->send_bar);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "bar_sync = %u",
+ htt_stats_buf->bar_sync);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "notify_mpdu = %u",
+ htt_stats_buf->notify_mpdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sync_cmd = %u",
+ htt_stats_buf->sync_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "write_cmd = %u",
+ htt_stats_buf->write_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_trigger = %u",
+ htt_stats_buf->hwsch_trigger);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ack_tlv_proc = %u",
+ htt_stats_buf->ack_tlv_proc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "gen_mpdu_cmd = %u",
+ htt_stats_buf->gen_mpdu_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "gen_list_cmd = %u",
+ htt_stats_buf->gen_list_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_mpdu_cmd = %u",
+ htt_stats_buf->remove_mpdu_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_mpdu_tried_cmd = %u",
+ htt_stats_buf->remove_mpdu_tried_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u",
+ htt_stats_buf->mpdu_queue_stats_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_head_info_cmd = %u",
+ htt_stats_buf->mpdu_head_info_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u",
+ htt_stats_buf->msdu_flow_stats_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_msdu_cmd = %u",
+ htt_stats_buf->remove_msdu_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_msdu_ttl_cmd = %u",
+ htt_stats_buf->remove_msdu_ttl_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "flush_cache_cmd = %u",
+ htt_stats_buf->flush_cache_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "update_mpduq_cmd = %u",
+ htt_stats_buf->update_mpduq_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "enqueue = %u",
+ htt_stats_buf->enqueue);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "enqueue_notify = %u",
+ htt_stats_buf->enqueue_notify);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "notify_mpdu_at_head = %u",
+ htt_stats_buf->notify_mpdu_at_head);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "notify_mpdu_state_valid = %u",
+ htt_stats_buf->notify_mpdu_state_valid);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_udp_notify1 = %u",
+ htt_stats_buf->sched_udp_notify1);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_udp_notify2 = %u",
+ htt_stats_buf->sched_udp_notify2);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_nonudp_notify1 = %u",
+ htt_stats_buf->sched_nonudp_notify1);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_nonudp_notify2 = %u\n",
+ htt_stats_buf->sched_nonudp_notify2);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_tqm_cmn_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tqm_cmn_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_CMN_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "max_cmdq_id = %u",
+ htt_stats_buf->max_cmdq_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "list_mpdu_cnt_hist_intvl = %u",
+ htt_stats_buf->list_mpdu_cnt_hist_intvl);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "add_msdu = %u",
+ htt_stats_buf->add_msdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "q_empty = %u",
+ htt_stats_buf->q_empty);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "q_not_empty = %u",
+ htt_stats_buf->q_not_empty);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "drop_notification = %u",
+ htt_stats_buf->drop_notification);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "desc_threshold = %u\n",
+ htt_stats_buf->desc_threshold);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_tqm_error_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tqm_error_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_ERROR_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "q_empty_failure = %u",
+ htt_stats_buf->q_empty_failure);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "q_not_empty_failure = %u",
+ htt_stats_buf->q_not_empty_failure);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "add_msdu_failure = %u\n",
+ htt_stats_buf->add_msdu_failure);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_tqm_cmdq_status_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tqm_cmdq_status_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_CMDQ_STATUS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__cmdq_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "cmdq_id = %u\n",
+ (htt_stats_buf->mac_id__cmdq_id__word & 0xFF00) >> 8);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sync_cmd = %u",
+ htt_stats_buf->sync_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "write_cmd = %u",
+ htt_stats_buf->write_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "gen_mpdu_cmd = %u",
+ htt_stats_buf->gen_mpdu_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u",
+ htt_stats_buf->mpdu_queue_stats_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_head_info_cmd = %u",
+ htt_stats_buf->mpdu_head_info_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u",
+ htt_stats_buf->msdu_flow_stats_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_mpdu_cmd = %u",
+ htt_stats_buf->remove_mpdu_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_msdu_cmd = %u",
+ htt_stats_buf->remove_msdu_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "flush_cache_cmd = %u",
+ htt_stats_buf->flush_cache_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "update_mpduq_cmd = %u",
+ htt_stats_buf->update_mpduq_cmd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "update_msduq_cmd = %u\n",
+ htt_stats_buf->update_msduq_cmd);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_eapol_packets_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_eapol_packets_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_DE_EAPOL_PACKETS_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "m1_packets = %u",
+ htt_stats_buf->m1_packets);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "m2_packets = %u",
+ htt_stats_buf->m2_packets);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "m3_packets = %u",
+ htt_stats_buf->m3_packets);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "m4_packets = %u",
+ htt_stats_buf->m4_packets);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "g1_packets = %u",
+ htt_stats_buf->g1_packets);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "g2_packets = %u\n",
+ htt_stats_buf->g2_packets);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_classify_failed_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_classify_failed_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_DE_CLASSIFY_FAILED_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ap_bss_peer_not_found = %u",
+ htt_stats_buf->ap_bss_peer_not_found);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ap_bcast_mcast_no_peer = %u",
+ htt_stats_buf->ap_bcast_mcast_no_peer);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sta_delete_in_progress = %u",
+ htt_stats_buf->sta_delete_in_progress);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ibss_no_bss_peer = %u",
+ htt_stats_buf->ibss_no_bss_peer);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "invalid_vdev_type = %u",
+ htt_stats_buf->invalid_vdev_type);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "invalid_ast_peer_entry = %u",
+ htt_stats_buf->invalid_ast_peer_entry);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "peer_entry_invalid = %u",
+ htt_stats_buf->peer_entry_invalid);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ethertype_not_ip = %u",
+ htt_stats_buf->ethertype_not_ip);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "eapol_lookup_failed = %u",
+ htt_stats_buf->eapol_lookup_failed);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qpeer_not_allow_data = %u",
+ htt_stats_buf->qpeer_not_allow_data);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_tid_override = %u",
+ htt_stats_buf->fse_tid_override);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ipv6_jumbogram_zero_length = %u",
+ htt_stats_buf->ipv6_jumbogram_zero_length);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "qos_to_non_qos_in_prog = %u\n",
+ htt_stats_buf->qos_to_non_qos_in_prog);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_classify_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_classify_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_DE_CLASSIFY_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "arp_packets = %u",
+ htt_stats_buf->arp_packets);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "igmp_packets = %u",
+ htt_stats_buf->igmp_packets);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "dhcp_packets = %u",
+ htt_stats_buf->dhcp_packets);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "host_inspected = %u",
+ htt_stats_buf->host_inspected);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_included = %u",
+ htt_stats_buf->htt_included);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_mcs = %u",
+ htt_stats_buf->htt_valid_mcs);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_nss = %u",
+ htt_stats_buf->htt_valid_nss);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_preamble_type = %u",
+ htt_stats_buf->htt_valid_preamble_type);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_chainmask = %u",
+ htt_stats_buf->htt_valid_chainmask);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_guard_interval = %u",
+ htt_stats_buf->htt_valid_guard_interval);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_retries = %u",
+ htt_stats_buf->htt_valid_retries);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_bw_info = %u",
+ htt_stats_buf->htt_valid_bw_info);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_power = %u",
+ htt_stats_buf->htt_valid_power);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_key_flags = 0x%x",
+ htt_stats_buf->htt_valid_key_flags);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_no_encryption = %u",
+ htt_stats_buf->htt_valid_no_encryption);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_entry_count = %u",
+ htt_stats_buf->fse_entry_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_priority_be = %u",
+ htt_stats_buf->fse_priority_be);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_priority_high = %u",
+ htt_stats_buf->fse_priority_high);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_priority_low = %u",
+ htt_stats_buf->fse_priority_low);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_traffic_ptrn_be = %u",
+ htt_stats_buf->fse_traffic_ptrn_be);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_traffic_ptrn_over_sub = %u",
+ htt_stats_buf->fse_traffic_ptrn_over_sub);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_traffic_ptrn_bursty = %u",
+ htt_stats_buf->fse_traffic_ptrn_bursty);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_traffic_ptrn_interactive = %u",
+ htt_stats_buf->fse_traffic_ptrn_interactive);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_traffic_ptrn_periodic = %u",
+ htt_stats_buf->fse_traffic_ptrn_periodic);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_hwqueue_alloc = %u",
+ htt_stats_buf->fse_hwqueue_alloc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_hwqueue_created = %u",
+ htt_stats_buf->fse_hwqueue_created);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_hwqueue_send_to_host = %u",
+ htt_stats_buf->fse_hwqueue_send_to_host);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mcast_entry = %u",
+ htt_stats_buf->mcast_entry);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "bcast_entry = %u",
+ htt_stats_buf->bcast_entry);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_update_peer_cache = %u",
+ htt_stats_buf->htt_update_peer_cache);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_learning_frame = %u",
+ htt_stats_buf->htt_learning_frame);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_invalid_peer = %u",
+ htt_stats_buf->fse_invalid_peer);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mec_notify = %u\n",
+ htt_stats_buf->mec_notify);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_classify_status_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_classify_status_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_DE_CLASSIFY_STATUS_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "eok = %u",
+ htt_stats_buf->eok);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "classify_done = %u",
+ htt_stats_buf->classify_done);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "lookup_failed = %u",
+ htt_stats_buf->lookup_failed);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "send_host_dhcp = %u",
+ htt_stats_buf->send_host_dhcp);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "send_host_mcast = %u",
+ htt_stats_buf->send_host_mcast);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "send_host_unknown_dest = %u",
+ htt_stats_buf->send_host_unknown_dest);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "send_host = %u",
+ htt_stats_buf->send_host);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "status_invalid = %u\n",
+ htt_stats_buf->status_invalid);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_enqueue_packets_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_enqueue_packets_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_DE_ENQUEUE_PACKETS_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "enqueued_pkts = %u",
+ htt_stats_buf->enqueued_pkts);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "to_tqm = %u",
+ htt_stats_buf->to_tqm);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "to_tqm_bypass = %u\n",
+ htt_stats_buf->to_tqm_bypass);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_enqueue_discard_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_enqueue_discard_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_DE_ENQUEUE_DISCARD_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "discarded_pkts = %u",
+ htt_stats_buf->discarded_pkts);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "local_frames = %u",
+ htt_stats_buf->local_frames);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "is_ext_msdu = %u\n",
+ htt_stats_buf->is_ext_msdu);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_de_compl_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_compl_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_DE_COMPL_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tcl_dummy_frame = %u",
+ htt_stats_buf->tcl_dummy_frame);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tqm_dummy_frame = %u",
+ htt_stats_buf->tqm_dummy_frame);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tqm_notify_frame = %u",
+ htt_stats_buf->tqm_notify_frame);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw2wbm_enq = %u",
+ htt_stats_buf->fw2wbm_enq);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tqm_bypass_frame = %u\n",
+ htt_stats_buf->tqm_bypass_frame);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_fw2wbm_ring_full_hist_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_fw2wbm_ring_full_hist_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char fw2wbm_ring_full_hist[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elements = tag_len >> 2;
+ u32 required_buffer_size = HTT_MAX_PRINT_CHAR_PER_ELEM * num_elements;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_TX_DE_FW2WBM_RING_FULL_HIST_TLV");
+
+ if (required_buffer_size < HTT_MAX_STRING_LEN) {
+ ARRAY_TO_STRING(fw2wbm_ring_full_hist,
+ htt_stats_buf->fw2wbm_ring_full_hist,
+ num_elements);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "fw2wbm_ring_full_hist = %s\n",
+ fw2wbm_ring_full_hist);
+ } else {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "INSUFFICIENT PRINT BUFFER ");
+ }
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_cmn_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_cmn_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_DE_CMN_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tcl2fw_entry_count = %u",
+ htt_stats_buf->tcl2fw_entry_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "not_to_fw = %u",
+ htt_stats_buf->not_to_fw);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "invalid_pdev_vdev_peer = %u",
+ htt_stats_buf->invalid_pdev_vdev_peer);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tcl_res_invalid_addrx = %u",
+ htt_stats_buf->tcl_res_invalid_addrx);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "wbm2fw_entry_count = %u",
+ htt_stats_buf->wbm2fw_entry_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "invalid_pdev = %u\n",
+ htt_stats_buf->invalid_pdev);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_ring_if_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_ring_if_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char low_wm_hit_count[HTT_MAX_STRING_LEN] = {0};
+ char high_wm_hit_count[HTT_MAX_STRING_LEN] = {0};
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RING_IF_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "base_addr = %u",
+ htt_stats_buf->base_addr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "elem_size = %u",
+ htt_stats_buf->elem_size);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_elems = %u",
+ htt_stats_buf->num_elems__prefetch_tail_idx & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "prefetch_tail_idx = %u",
+ (htt_stats_buf->num_elems__prefetch_tail_idx &
+ 0xFFFF0000) >> 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "head_idx = %u",
+ htt_stats_buf->head_idx__tail_idx & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tail_idx = %u",
+ (htt_stats_buf->head_idx__tail_idx & 0xFFFF0000) >> 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "shadow_head_idx = %u",
+ htt_stats_buf->shadow_head_idx__shadow_tail_idx & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "shadow_tail_idx = %u",
+ (htt_stats_buf->shadow_head_idx__shadow_tail_idx &
+ 0xFFFF0000) >> 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_tail_incr = %u",
+ htt_stats_buf->num_tail_incr);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "lwm_thresh = %u",
+ htt_stats_buf->lwm_thresh__hwm_thresh & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hwm_thresh = %u",
+ (htt_stats_buf->lwm_thresh__hwm_thresh & 0xFFFF0000) >> 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "overrun_hit_count = %u",
+ htt_stats_buf->overrun_hit_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "underrun_hit_count = %u",
+ htt_stats_buf->underrun_hit_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "prod_blockwait_count = %u",
+ htt_stats_buf->prod_blockwait_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "cons_blockwait_count = %u",
+ htt_stats_buf->cons_blockwait_count);
+
+ ARRAY_TO_STRING(low_wm_hit_count, htt_stats_buf->low_wm_hit_count,
+ HTT_STATS_LOW_WM_BINS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "low_wm_hit_count = %s ",
+ low_wm_hit_count);
+
+ ARRAY_TO_STRING(high_wm_hit_count, htt_stats_buf->high_wm_hit_count,
+ HTT_STATS_HIGH_WM_BINS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "high_wm_hit_count = %s\n",
+ high_wm_hit_count);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_ring_if_cmn_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_ring_if_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RING_IF_CMN_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_records = %u\n",
+ htt_stats_buf->num_records);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_sfm_client_user_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sfm_client_user_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char dwords_used_by_user_n[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = tag_len >> 2;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SFM_CLIENT_USER_TLV_V:");
+
+ ARRAY_TO_STRING(dwords_used_by_user_n,
+ htt_stats_buf->dwords_used_by_user_n,
+ num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "dwords_used_by_user_n = %s\n",
+ dwords_used_by_user_n);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_sfm_client_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sfm_client_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SFM_CLIENT_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "client_id = %u",
+ htt_stats_buf->client_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_min = %u",
+ htt_stats_buf->buf_min);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_max = %u",
+ htt_stats_buf->buf_max);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_busy = %u",
+ htt_stats_buf->buf_busy);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_alloc = %u",
+ htt_stats_buf->buf_alloc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_avail = %u",
+ htt_stats_buf->buf_avail);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_users = %u\n",
+ htt_stats_buf->num_users);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_sfm_cmn_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sfm_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SFM_CMN_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_total = %u",
+ htt_stats_buf->buf_total);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mem_empty = %u",
+ htt_stats_buf->mem_empty);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "deallocate_bufs = %u",
+ htt_stats_buf->deallocate_bufs);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_records = %u\n",
+ htt_stats_buf->num_records);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_sring_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sring_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SRING_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__ring_id__arena__ep & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ring_id = %u",
+ (htt_stats_buf->mac_id__ring_id__arena__ep & 0xFF00) >> 8);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "arena = %u",
+ (htt_stats_buf->mac_id__ring_id__arena__ep & 0xFF0000) >> 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ep = %u",
+ (htt_stats_buf->mac_id__ring_id__arena__ep & 0x1000000) >> 24);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "base_addr_lsb = 0x%x",
+ htt_stats_buf->base_addr_lsb);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "base_addr_msb = 0x%x",
+ htt_stats_buf->base_addr_msb);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ring_size = %u",
+ htt_stats_buf->ring_size);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "elem_size = %u",
+ htt_stats_buf->elem_size);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_avail_words = %u",
+ htt_stats_buf->num_avail_words__num_valid_words & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_valid_words = %u",
+ (htt_stats_buf->num_avail_words__num_valid_words &
+ 0xFFFF0000) >> 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "head_ptr = %u",
+ htt_stats_buf->head_ptr__tail_ptr & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tail_ptr = %u",
+ (htt_stats_buf->head_ptr__tail_ptr & 0xFFFF0000) >> 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "consumer_empty = %u",
+ htt_stats_buf->consumer_empty__producer_full & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "producer_full = %u",
+ (htt_stats_buf->consumer_empty__producer_full &
+ 0xFFFF0000) >> 16);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "prefetch_count = %u",
+ htt_stats_buf->prefetch_count__internal_tail_ptr & 0xFFFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "internal_tail_ptr = %u\n",
+ (htt_stats_buf->prefetch_count__internal_tail_ptr &
+ 0xFFFF0000) >> 16);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_sring_cmn_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sring_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SRING_CMN_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_records = %u\n",
+ htt_stats_buf->num_records);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_pdev_rate_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_rate_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u8 j;
+ char str_buf[HTT_MAX_STRING_LEN] = {0};
+ char *tx_gi[HTT_TX_PEER_STATS_NUM_GI_COUNTERS] = {NULL};
+
+ for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++) {
+ tx_gi[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
+ if (!tx_gi[j])
+ goto fail;
+ }
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_RATE_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_ldpc = %u",
+ htt_stats_buf->tx_ldpc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_tx_ldpc = %u",
+ htt_stats_buf->ac_mu_mimo_tx_ldpc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_tx_ldpc = %u",
+ htt_stats_buf->ax_mu_mimo_tx_ldpc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ofdma_tx_ldpc = %u",
+ htt_stats_buf->ofdma_tx_ldpc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rts_cnt = %u",
+ htt_stats_buf->rts_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rts_success = %u",
+ htt_stats_buf->rts_success);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ack_rssi = %u",
+ htt_stats_buf->ack_rssi);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "Legacy CCK Rates: 1 Mbps: %u, 2 Mbps: %u, 5.5 Mbps: %u, 11 Mbps: %u",
+ htt_stats_buf->tx_legacy_cck_rate[0],
+ htt_stats_buf->tx_legacy_cck_rate[1],
+ htt_stats_buf->tx_legacy_cck_rate[2],
+ htt_stats_buf->tx_legacy_cck_rate[3]);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "Legacy OFDM Rates: 6 Mbps: %u, 9 Mbps: %u, 12 Mbps: %u, 18 Mbps: %u\n"
+ " 24 Mbps: %u, 36 Mbps: %u, 48 Mbps: %u, 54 Mbps: %u",
+ htt_stats_buf->tx_legacy_ofdm_rate[0],
+ htt_stats_buf->tx_legacy_ofdm_rate[1],
+ htt_stats_buf->tx_legacy_ofdm_rate[2],
+ htt_stats_buf->tx_legacy_ofdm_rate[3],
+ htt_stats_buf->tx_legacy_ofdm_rate[4],
+ htt_stats_buf->tx_legacy_ofdm_rate[5],
+ htt_stats_buf->tx_legacy_ofdm_rate[6],
+ htt_stats_buf->tx_legacy_ofdm_rate[7]);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_mcs,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_mcs = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ac_mu_mimo_tx_mcs,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_tx_mcs = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ax_mu_mimo_tx_mcs,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_tx_mcs = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ofdma_tx_mcs,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ofdma_tx_mcs = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_nss,
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_nss = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ac_mu_mimo_tx_nss,
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_tx_nss = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ax_mu_mimo_tx_nss,
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_tx_nss = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ofdma_tx_nss,
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ofdma_tx_nss = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_bw,
+ HTT_TX_PDEV_STATS_NUM_BW_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_bw = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ac_mu_mimo_tx_bw,
+ HTT_TX_PDEV_STATS_NUM_BW_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_tx_bw = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ax_mu_mimo_tx_bw,
+ HTT_TX_PDEV_STATS_NUM_BW_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_tx_bw = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ofdma_tx_bw,
+ HTT_TX_PDEV_STATS_NUM_BW_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ofdma_tx_bw = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_stbc,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_stbc = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_pream,
+ HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_pream = %s ", str_buf);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HE LTF: 1x: %u, 2x: %u, 4x: %u",
+ htt_stats_buf->tx_he_ltf[1],
+ htt_stats_buf->tx_he_ltf[2],
+ htt_stats_buf->tx_he_ltf[3]);
+
+ /* SU GI Stats */
+ for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ ARRAY_TO_STRING(tx_gi[j], htt_stats_buf->tx_gi[j],
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_gi[%u] = %s ",
+ j, tx_gi[j]);
+ }
+
+ /* AC MU-MIMO GI Stats */
+ for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ ARRAY_TO_STRING(tx_gi[j], htt_stats_buf->ac_mu_mimo_tx_gi[j],
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ac_mu_mimo_tx_gi[%u] = %s ",
+ j, tx_gi[j]);
+ }
+
+ /* AX MU-MIMO GI Stats */
+ for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ ARRAY_TO_STRING(tx_gi[j], htt_stats_buf->ax_mu_mimo_tx_gi[j],
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ax_mu_mimo_tx_gi[%u] = %s ",
+ j, tx_gi[j]);
+ }
+
+ /* DL OFDMA GI Stats */
+ for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ ARRAY_TO_STRING(tx_gi[j], htt_stats_buf->ofdma_tx_gi[j],
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ofdma_tx_gi[%u] = %s ",
+ j, tx_gi[j]);
+ }
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_dcm,
+ HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_dcm = %s\n", str_buf);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+fail:
+ for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++)
+ kfree(tx_gi[j]);
+}
+
+static inline void htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_pdev_rate_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u8 i, j;
+ u16 index = 0;
+ char *rssi_chain[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS] = {NULL};
+ char *rx_gi[HTT_RX_PDEV_STATS_NUM_GI_COUNTERS] = {NULL};
+ char str_buf[HTT_MAX_STRING_LEN] = {0};
+ char *rx_pilot_evm_db[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS] = {NULL};
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+ rssi_chain[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
+ if (!rssi_chain[j])
+ goto fail;
+ }
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ rx_gi[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
+ if (!rx_gi[j])
+ goto fail;
+ }
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+ rx_pilot_evm_db[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
+ if (!rx_pilot_evm_db[j])
+ goto fail;
+ }
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_PDEV_RATE_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "nsts = %u",
+ htt_stats_buf->nsts);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ldpc = %u",
+ htt_stats_buf->rx_ldpc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rts_cnt = %u",
+ htt_stats_buf->rts_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_mgmt = %u",
+ htt_stats_buf->rssi_mgmt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_data = %u",
+ htt_stats_buf->rssi_data);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_comb = %u",
+ htt_stats_buf->rssi_comb);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_in_dbm = %d",
+ htt_stats_buf->rssi_in_dbm);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_mcs,
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_mcs = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_nss,
+ HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_nss = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_dcm,
+ HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_dcm = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_stbc,
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_stbc = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_bw,
+ HTT_RX_PDEV_STATS_NUM_BW_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_bw = %s ", str_buf);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_evm_nss_count = %u",
+ htt_stats_buf->nss_count);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_evm_pilot_count = %u",
+ htt_stats_buf->pilot_count);
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+ index = 0;
+
+ for (i = 0; i < HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_PER_NSS; i++)
+ index += snprintf(&rx_pilot_evm_db[j][index],
+ HTT_MAX_STRING_LEN - index,
+ " %u:%d,",
+ i,
+ htt_stats_buf->rx_pilot_evm_db[j][i]);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "pilot_evm_dB[%u] = %s ",
+ j, rx_pilot_evm_db[j]);
+ }
+
+ index = 0;
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ for (i = 0; i < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
+ index += snprintf(&str_buf[index],
+ HTT_MAX_STRING_LEN - index,
+ " %u:%d,", i, htt_stats_buf->rx_pilot_evm_db_mean[i]);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "pilot_evm_dB_mean = %s ", str_buf);
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+ ARRAY_TO_STRING(rssi_chain[j], htt_stats_buf->rssi_chain[j],
+ HTT_RX_PDEV_STATS_NUM_BW_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_chain[%u] = %s ",
+ j, rssi_chain[j]);
+ }
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ ARRAY_TO_STRING(rx_gi[j], htt_stats_buf->rx_gi[j],
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_gi[%u] = %s ",
+ j, rx_gi[j]);
+ }
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_pream,
+ HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_pream = %s", str_buf);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_11ax_su_ext = %u",
+ htt_stats_buf->rx_11ax_su_ext);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_11ac_mumimo = %u",
+ htt_stats_buf->rx_11ac_mumimo);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_11ax_mumimo = %u",
+ htt_stats_buf->rx_11ax_mumimo);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_11ax_ofdma = %u",
+ htt_stats_buf->rx_11ax_ofdma);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "txbf = %u",
+ htt_stats_buf->txbf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_legacy_cck_rate,
+ HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_legacy_cck_rate = %s ",
+ str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_legacy_ofdm_rate,
+ HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_legacy_ofdm_rate = %s ",
+ str_buf);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_active_dur_us_low = %u",
+ htt_stats_buf->rx_active_dur_us_low);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_active_dur_us_high = %u",
+ htt_stats_buf->rx_active_dur_us_high);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_11ax_ul_ofdma = %u",
+ htt_stats_buf->rx_11ax_ul_ofdma);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ul_ofdma_rx_mcs,
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_mcs = %s ", str_buf);
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ ARRAY_TO_STRING(rx_gi[j], htt_stats_buf->ul_ofdma_rx_gi[j],
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_gi[%u] = %s ",
+ j, rx_gi[j]);
+ }
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ul_ofdma_rx_nss,
+ HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_nss = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->ul_ofdma_rx_bw,
+ HTT_RX_PDEV_STATS_NUM_BW_COUNTERS);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_bw = %s ", str_buf);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_stbc = %u",
+ htt_stats_buf->ul_ofdma_rx_stbc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_ldpc = %u",
+ htt_stats_buf->ul_ofdma_rx_ldpc);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_ulofdma_non_data_ppdu,
+ HTT_RX_PDEV_MAX_OFDMA_NUM_USER);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ulofdma_non_data_ppdu = %s ",
+ str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_ulofdma_data_ppdu,
+ HTT_RX_PDEV_MAX_OFDMA_NUM_USER);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ulofdma_data_ppdu = %s ",
+ str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_ulofdma_mpdu_ok,
+ HTT_RX_PDEV_MAX_OFDMA_NUM_USER);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ulofdma_mpdu_ok = %s ", str_buf);
+
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_ulofdma_mpdu_fail,
+ HTT_RX_PDEV_MAX_OFDMA_NUM_USER);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ulofdma_mpdu_fail = %s",
+ str_buf);
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+ index = 0;
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ for (i = 0; i < HTT_RX_PDEV_MAX_OFDMA_NUM_USER; i++)
+ index += snprintf(&str_buf[index],
+ HTT_MAX_STRING_LEN - index,
+ " %u:%d,",
+ i, htt_stats_buf->rx_ul_fd_rssi[j][i]);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "rx_ul_fd_rssi: nss[%u] = %s", j, str_buf);
+ }
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "per_chain_rssi_pkt_type = %#x",
+ htt_stats_buf->per_chain_rssi_pkt_type);
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+ index = 0;
+ memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ for (i = 0; i < HTT_RX_PDEV_STATS_NUM_BW_COUNTERS; i++)
+ index += snprintf(&str_buf[index],
+ HTT_MAX_STRING_LEN - index,
+ " %u:%d,",
+ i,
+ htt_stats_buf->rx_per_chain_rssi_in_dbm[j][i]);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "rx_per_chain_rssi_in_dbm[%u] = %s ", j, str_buf);
+ }
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+
+fail:
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++)
+ kfree(rssi_chain[j]);
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++)
+ kfree(rx_pilot_evm_db[j]);
+
+ for (i = 0; i < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; i++)
+ kfree(rx_gi[i]);
+}
+
+static inline void htt_print_rx_soc_fw_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_soc_fw_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_SOC_FW_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_reo_ring_data_msdu = %u",
+ htt_stats_buf->fw_reo_ring_data_msdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_to_host_data_msdu_bcmc = %u",
+ htt_stats_buf->fw_to_host_data_msdu_bcmc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_to_host_data_msdu_uc = %u",
+ htt_stats_buf->fw_to_host_data_msdu_uc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ofld_remote_data_buf_recycle_cnt = %u",
+ htt_stats_buf->ofld_remote_data_buf_recycle_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ofld_remote_free_buf_indication_cnt = %u",
+ htt_stats_buf->ofld_remote_free_buf_indication_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ofld_buf_to_host_data_msdu_uc = %u",
+ htt_stats_buf->ofld_buf_to_host_data_msdu_uc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "reo_fw_ring_to_host_data_msdu_uc = %u",
+ htt_stats_buf->reo_fw_ring_to_host_data_msdu_uc);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "wbm_sw_ring_reap = %u",
+ htt_stats_buf->wbm_sw_ring_reap);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "wbm_forward_to_host_cnt = %u",
+ htt_stats_buf->wbm_forward_to_host_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "wbm_target_recycle_cnt = %u",
+ htt_stats_buf->wbm_target_recycle_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "target_refill_ring_recycle_cnt = %u",
+ htt_stats_buf->target_refill_ring_recycle_cnt);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_soc_fw_refill_ring_empty_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_soc_fw_refill_ring_empty_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char refill_ring_empty_cnt[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_REFILL_MAX_RING);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_RX_SOC_FW_REFILL_RING_EMPTY_TLV_V:");
+
+ ARRAY_TO_STRING(refill_ring_empty_cnt,
+ htt_stats_buf->refill_ring_empty_cnt,
+ num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "refill_ring_empty_cnt = %s\n",
+ refill_ring_empty_cnt);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v *htt_stats_buf =
+ tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char rxdma_err_cnt[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_RXDMA_MAX_ERR_CODE);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_RX_SOC_FW_REFILL_RING_NUM_RXDMA_ERR_TLV_V:");
+
+ ARRAY_TO_STRING(rxdma_err_cnt,
+ htt_stats_buf->rxdma_err,
+ num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rxdma_err = %s\n",
+ rxdma_err_cnt);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_soc_fw_refill_ring_num_reo_err_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_soc_fw_refill_ring_num_reo_err_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char reo_err_cnt[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_REO_MAX_ERR_CODE);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_RX_SOC_FW_REFILL_RING_NUM_REO_ERR_TLV_V:");
+
+ ARRAY_TO_STRING(reo_err_cnt,
+ htt_stats_buf->reo_err,
+ num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "reo_err = %s\n",
+ reo_err_cnt);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_reo_debug_stats_tlv_v(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_reo_resource_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_REO_RESOURCE_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sample_id = %u",
+ htt_stats_buf->sample_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "total_max = %u",
+ htt_stats_buf->total_max);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "total_avg = %u",
+ htt_stats_buf->total_avg);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "total_sample = %u",
+ htt_stats_buf->total_sample);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "non_zeros_avg = %u",
+ htt_stats_buf->non_zeros_avg);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "non_zeros_sample = %u",
+ htt_stats_buf->non_zeros_sample);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_non_zeros_max = %u",
+ htt_stats_buf->last_non_zeros_max);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_non_zeros_min %u",
+ htt_stats_buf->last_non_zeros_min);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_non_zeros_avg %u",
+ htt_stats_buf->last_non_zeros_avg);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_non_zeros_sample %u\n",
+ htt_stats_buf->last_non_zeros_sample);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_soc_fw_refill_ring_num_refill_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_soc_fw_refill_ring_num_refill_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char refill_ring_num_refill[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_REFILL_MAX_RING);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_RX_SOC_FW_REFILL_RING_NUM_REFILL_TLV_V:");
+
+ ARRAY_TO_STRING(refill_ring_num_refill,
+ htt_stats_buf->refill_ring_num_refill,
+ num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "refill_ring_num_refill = %s\n",
+ refill_ring_num_refill);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_rx_pdev_fw_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_pdev_fw_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char fw_ring_mgmt_subtype[HTT_MAX_STRING_LEN] = {0};
+ char fw_ring_ctrl_subtype[HTT_MAX_STRING_LEN] = {0};
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_PDEV_FW_STATS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ppdu_recvd = %u",
+ htt_stats_buf->ppdu_recvd);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_cnt_fcs_ok = %u",
+ htt_stats_buf->mpdu_cnt_fcs_ok);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_cnt_fcs_err = %u",
+ htt_stats_buf->mpdu_cnt_fcs_err);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tcp_msdu_cnt = %u",
+ htt_stats_buf->tcp_msdu_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "tcp_ack_msdu_cnt = %u",
+ htt_stats_buf->tcp_ack_msdu_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "udp_msdu_cnt = %u",
+ htt_stats_buf->udp_msdu_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "other_msdu_cnt = %u",
+ htt_stats_buf->other_msdu_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_mpdu_ind = %u",
+ htt_stats_buf->fw_ring_mpdu_ind);
+
+ ARRAY_TO_STRING(fw_ring_mgmt_subtype,
+ htt_stats_buf->fw_ring_mgmt_subtype,
+ HTT_STATS_SUBTYPE_MAX);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_mgmt_subtype = %s ",
+ fw_ring_mgmt_subtype);
+
+ ARRAY_TO_STRING(fw_ring_ctrl_subtype,
+ htt_stats_buf->fw_ring_ctrl_subtype,
+ HTT_STATS_SUBTYPE_MAX);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_ctrl_subtype = %s ",
+ fw_ring_ctrl_subtype);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_mcast_data_msdu = %u",
+ htt_stats_buf->fw_ring_mcast_data_msdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_bcast_data_msdu = %u",
+ htt_stats_buf->fw_ring_bcast_data_msdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_ucast_data_msdu = %u",
+ htt_stats_buf->fw_ring_ucast_data_msdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_null_data_msdu = %u",
+ htt_stats_buf->fw_ring_null_data_msdu);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_mpdu_drop = %u",
+ htt_stats_buf->fw_ring_mpdu_drop);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "ofld_local_data_ind_cnt = %u",
+ htt_stats_buf->ofld_local_data_ind_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "ofld_local_data_buf_recycle_cnt = %u",
+ htt_stats_buf->ofld_local_data_buf_recycle_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "drx_local_data_ind_cnt = %u",
+ htt_stats_buf->drx_local_data_ind_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "drx_local_data_buf_recycle_cnt = %u",
+ htt_stats_buf->drx_local_data_buf_recycle_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "local_nondata_ind_cnt = %u",
+ htt_stats_buf->local_nondata_ind_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "local_nondata_buf_recycle_cnt = %u",
+ htt_stats_buf->local_nondata_buf_recycle_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_status_buf_ring_refill_cnt = %u",
+ htt_stats_buf->fw_status_buf_ring_refill_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_status_buf_ring_empty_cnt = %u",
+ htt_stats_buf->fw_status_buf_ring_empty_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_pkt_buf_ring_refill_cnt = %u",
+ htt_stats_buf->fw_pkt_buf_ring_refill_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_pkt_buf_ring_empty_cnt = %u",
+ htt_stats_buf->fw_pkt_buf_ring_empty_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_link_buf_ring_refill_cnt = %u",
+ htt_stats_buf->fw_link_buf_ring_refill_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_link_buf_ring_empty_cnt = %u",
+ htt_stats_buf->fw_link_buf_ring_empty_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "host_pkt_buf_ring_refill_cnt = %u",
+ htt_stats_buf->host_pkt_buf_ring_refill_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "host_pkt_buf_ring_empty_cnt = %u",
+ htt_stats_buf->host_pkt_buf_ring_empty_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_pkt_buf_ring_refill_cnt = %u",
+ htt_stats_buf->mon_pkt_buf_ring_refill_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_pkt_buf_ring_empty_cnt = %u",
+ htt_stats_buf->mon_pkt_buf_ring_empty_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "mon_status_buf_ring_refill_cnt = %u",
+ htt_stats_buf->mon_status_buf_ring_refill_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_status_buf_ring_empty_cnt = %u",
+ htt_stats_buf->mon_status_buf_ring_empty_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_desc_buf_ring_refill_cnt = %u",
+ htt_stats_buf->mon_desc_buf_ring_refill_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_desc_buf_ring_empty_cnt = %u",
+ htt_stats_buf->mon_desc_buf_ring_empty_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_dest_ring_update_cnt = %u",
+ htt_stats_buf->mon_dest_ring_update_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_dest_ring_full_cnt = %u",
+ htt_stats_buf->mon_dest_ring_full_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_suspend_cnt = %u",
+ htt_stats_buf->rx_suspend_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_suspend_fail_cnt = %u",
+ htt_stats_buf->rx_suspend_fail_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_resume_cnt = %u",
+ htt_stats_buf->rx_resume_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_resume_fail_cnt = %u",
+ htt_stats_buf->rx_resume_fail_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ring_switch_cnt = %u",
+ htt_stats_buf->rx_ring_switch_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ring_restore_cnt = %u",
+ htt_stats_buf->rx_ring_restore_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_flush_cnt = %u",
+ htt_stats_buf->rx_flush_cnt);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_recovery_reset_cnt = %u\n",
+ htt_stats_buf->rx_recovery_reset_cnt);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_pdev_fw_ring_mpdu_err_tlv_v(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_pdev_fw_ring_mpdu_err_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char fw_ring_mpdu_err[HTT_MAX_STRING_LEN] = {0};
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_RX_PDEV_FW_RING_MPDU_ERR_TLV_V:");
+
+ ARRAY_TO_STRING(fw_ring_mpdu_err,
+ htt_stats_buf->fw_ring_mpdu_err,
+ HTT_RX_STATS_RXDMA_MAX_ERR);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_mpdu_err = %s\n",
+ fw_ring_mpdu_err);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_pdev_fw_mpdu_drop_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_pdev_fw_mpdu_drop_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char fw_mpdu_drop[HTT_MAX_STRING_LEN] = {0};
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_FW_DROP_REASON_MAX);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_PDEV_FW_MPDU_DROP_TLV_V:");
+
+ ARRAY_TO_STRING(fw_mpdu_drop,
+ htt_stats_buf->fw_mpdu_drop,
+ num_elems);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_mpdu_drop = %s\n", fw_mpdu_drop);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_pdev_fw_stats_phy_err_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_pdev_fw_stats_phy_err_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char phy_errs[HTT_MAX_STRING_LEN] = {0};
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_PDEV_FW_STATS_PHY_ERR_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id__word = %u",
+ htt_stats_buf->mac_id__word);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "total_phy_err_nct = %u",
+ htt_stats_buf->total_phy_err_cnt);
+
+ ARRAY_TO_STRING(phy_errs,
+ htt_stats_buf->phy_err,
+ HTT_STATS_PHY_ERR_MAX);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "phy_errs = %s\n", phy_errs);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_pdev_cca_stats_hist_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_pdev_cca_stats_hist_v1_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "\nHTT_PDEV_CCA_STATS_HIST_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "chan_num = %u",
+ htt_stats_buf->chan_num);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_records = %u",
+ htt_stats_buf->num_records);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "valid_cca_counters_bitmap = 0x%x",
+ htt_stats_buf->valid_cca_counters_bitmap);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "collection_interval = %u\n",
+ htt_stats_buf->collection_interval);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HTT_PDEV_STATS_CCA_COUNTERS_TLV:(in usec)");
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "| tx_frame| rx_frame| rx_clear| my_rx_frame| cnt| med_rx_idle| med_tx_idle_global| cca_obss|");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_pdev_stats_cca_counters_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_pdev_stats_cca_counters_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "|%10u| %10u| %10u| %11u| %10u| %11u| %18u| %10u|",
+ htt_stats_buf->tx_frame_usec,
+ htt_stats_buf->rx_frame_usec,
+ htt_stats_buf->rx_clear_usec,
+ htt_stats_buf->my_rx_frame_usec,
+ htt_stats_buf->usec_cnt,
+ htt_stats_buf->med_rx_idle_usec,
+ htt_stats_buf->med_tx_idle_global_usec,
+ htt_stats_buf->cca_obss_usec);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_hw_stats_whal_tx_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_hw_stats_whal_tx_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_HW_STATS_WHAL_TX_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
+ htt_stats_buf->mac_id__word & 0xFF);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "last_unpause_ppdu_id = %u",
+ htt_stats_buf->last_unpause_ppdu_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_unpause_wait_tqm_write = %u",
+ htt_stats_buf->hwsch_unpause_wait_tqm_write);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_dummy_tlv_skipped = %u",
+ htt_stats_buf->hwsch_dummy_tlv_skipped);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "hwsch_misaligned_offset_received = %u",
+ htt_stats_buf->hwsch_misaligned_offset_received);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_reset_count = %u",
+ htt_stats_buf->hwsch_reset_count);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_dev_reset_war = %u",
+ htt_stats_buf->hwsch_dev_reset_war);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_delayed_pause = %u",
+ htt_stats_buf->hwsch_delayed_pause);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_long_delayed_pause = %u",
+ htt_stats_buf->hwsch_long_delayed_pause);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sch_rx_ppdu_no_response = %u",
+ htt_stats_buf->sch_rx_ppdu_no_response);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sch_selfgen_response = %u",
+ htt_stats_buf->sch_selfgen_response);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sch_rx_sifs_resp_trigger= %u\n",
+ htt_stats_buf->sch_rx_sifs_resp_trigger);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_pdev_stats_twt_sessions_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_pdev_stats_twt_sessions_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_PDEV_STATS_TWT_SESSIONS_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "pdev_id = %u",
+ htt_stats_buf->pdev_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_sessions = %u\n",
+ htt_stats_buf->num_sessions);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_pdev_stats_twt_session_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_pdev_stats_twt_session_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_PDEV_STATS_TWT_SESSION_TLV:");
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "vdev_id = %u",
+ htt_stats_buf->vdev_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "peer_mac = %02x:%02x:%02x:%02x:%02x:%02x",
+ htt_stats_buf->peer_mac.mac_addr_l32 & 0xFF,
+ (htt_stats_buf->peer_mac.mac_addr_l32 & 0xFF00) >> 8,
+ (htt_stats_buf->peer_mac.mac_addr_l32 & 0xFF0000) >> 16,
+ (htt_stats_buf->peer_mac.mac_addr_l32 & 0xFF000000) >> 24,
+ (htt_stats_buf->peer_mac.mac_addr_h16 & 0xFF),
+ (htt_stats_buf->peer_mac.mac_addr_h16 & 0xFF00) >> 8);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "flow_id_flags = %u",
+ htt_stats_buf->flow_id_flags);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "dialog_id = %u",
+ htt_stats_buf->dialog_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "wake_dura_us = %u",
+ htt_stats_buf->wake_dura_us);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "wake_intvl_us = %u",
+ htt_stats_buf->wake_intvl_us);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "sp_offset_us = %u\n",
+ htt_stats_buf->sp_offset_us);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_pdev_obss_pd_stats_tlv_v(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_pdev_obss_pd_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "OBSS Tx success PPDU = %u",
+ htt_stats_buf->num_obss_tx_ppdu_success);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "OBSS Tx failures PPDU = %u\n",
+ htt_stats_buf->num_obss_tx_ppdu_failure);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_htt_stats_debug_dump(const u32 *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u32 tlv_len = 0, i = 0, word_len = 0;
+
+ tlv_len = FIELD_GET(HTT_TLV_LEN, *tag_buf) + HTT_TLV_HDR_LEN;
+ word_len = (tlv_len % 4) == 0 ? (tlv_len / 4) : ((tlv_len / 4) + 1);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "============================================");
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "HKDBG TLV DUMP: (tag_len=%u bytes, words=%u)",
+ tlv_len, word_len);
+
+ for (i = 0; i + 3 < word_len; i += 4) {
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "0x%08x 0x%08x 0x%08x 0x%08x",
+ tag_buf[i], tag_buf[i + 1],
+ tag_buf[i + 2], tag_buf[i + 3]);
+ }
+
+ if (i + 3 == word_len) {
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "0x%08x 0x%08x 0x%08x ",
+ tag_buf[i], tag_buf[i + 1], tag_buf[i + 2]);
+ } else if (i + 2 == word_len) {
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "0x%08x 0x%08x ",
+ tag_buf[i], tag_buf[i + 1]);
+ } else if (i + 1 == word_len) {
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "0x%08x ",
+ tag_buf[i]);
+ }
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "============================================");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static int ath11k_dbg_htt_ext_stats_parse(struct ath11k_base *ab,
+ u16 tag, u16 len, const void *tag_buf,
+ void *user_data)
+{
+ struct debug_htt_stats_req *stats_req = user_data;
+
+ switch (tag) {
+ case HTT_STATS_TX_PDEV_CMN_TAG:
+ htt_print_tx_pdev_stats_cmn_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_UNDERRUN_TAG:
+ htt_print_tx_pdev_stats_urrn_tlv_v(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_SIFS_TAG:
+ htt_print_tx_pdev_stats_sifs_tlv_v(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_FLUSH_TAG:
+ htt_print_tx_pdev_stats_flush_tlv_v(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_PHY_ERR_TAG:
+ htt_print_tx_pdev_stats_phy_err_tlv_v(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_SIFS_HIST_TAG:
+ htt_print_tx_pdev_stats_sifs_hist_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_PDEV_TX_PPDU_STATS_TAG:
+ htt_print_tx_pdev_stats_tx_ppdu_stats_tlv_v(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_PDEV_TRIED_MPDU_CNT_HIST_TAG:
+ htt_print_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v(tag_buf, len,
+ stats_req);
+ break;
+
+ case HTT_STATS_STRING_TAG:
+ htt_print_stats_string_tlv(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_CMN_TAG:
+ htt_print_tx_hwq_stats_cmn_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_DIFS_LATENCY_TAG:
+ htt_print_tx_hwq_difs_latency_stats_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_CMD_RESULT_TAG:
+ htt_print_tx_hwq_cmd_result_stats_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_CMD_STALL_TAG:
+ htt_print_tx_hwq_cmd_stall_stats_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_FES_STATUS_TAG:
+ htt_print_tx_hwq_fes_result_stats_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_TRIED_MPDU_CNT_HIST_TAG:
+ htt_print_tx_hwq_tried_mpdu_cnt_hist_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_TXOP_USED_CNT_HIST_TAG:
+ htt_print_tx_hwq_txop_used_cnt_hist_tlv_v(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_TQM_GEN_MPDU_TAG:
+ htt_print_tx_tqm_gen_mpdu_stats_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_TQM_LIST_MPDU_TAG:
+ htt_print_tx_tqm_list_mpdu_stats_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG:
+ htt_print_tx_tqm_list_mpdu_cnt_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_TQM_CMN_TAG:
+ htt_print_tx_tqm_cmn_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_TQM_PDEV_TAG:
+ htt_print_tx_tqm_pdev_stats_tlv_v(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_TQM_CMDQ_STATUS_TAG:
+ htt_print_tx_tqm_cmdq_status_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_EAPOL_PACKETS_TAG:
+ htt_print_tx_de_eapol_packets_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_CLASSIFY_FAILED_TAG:
+ htt_print_tx_de_classify_failed_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_CLASSIFY_STATS_TAG:
+ htt_print_tx_de_classify_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_CLASSIFY_STATUS_TAG:
+ htt_print_tx_de_classify_status_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_ENQUEUE_PACKETS_TAG:
+ htt_print_tx_de_enqueue_packets_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_ENQUEUE_DISCARD_TAG:
+ htt_print_tx_de_enqueue_discard_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_FW2WBM_RING_FULL_HIST_TAG:
+ htt_print_tx_de_fw2wbm_ring_full_hist_tlv(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_CMN_TAG:
+ htt_print_tx_de_cmn_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_RING_IF_TAG:
+ htt_print_ring_if_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_PDEV_MU_MIMO_STATS_TAG:
+ htt_print_tx_pdev_mu_mimo_sch_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_SFM_CMN_TAG:
+ htt_print_sfm_cmn_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_SRING_STATS_TAG:
+ htt_print_sring_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_RX_PDEV_FW_STATS_TAG:
+ htt_print_rx_pdev_fw_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_RX_PDEV_FW_RING_MPDU_ERR_TAG:
+ htt_print_rx_pdev_fw_ring_mpdu_err_tlv_v(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_RX_PDEV_FW_MPDU_DROP_TAG:
+ htt_print_rx_pdev_fw_mpdu_drop_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_RX_SOC_FW_STATS_TAG:
+ htt_print_rx_soc_fw_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_RX_SOC_FW_REFILL_RING_EMPTY_TAG:
+ htt_print_rx_soc_fw_refill_ring_empty_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_RX_SOC_FW_REFILL_RING_NUM_REFILL_TAG:
+ htt_print_rx_soc_fw_refill_ring_num_refill_tlv_v(
+ tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_RX_REFILL_RXDMA_ERR_TAG:
+ htt_print_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v(
+ tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_RX_REFILL_REO_ERR_TAG:
+ htt_print_rx_soc_fw_refill_ring_num_reo_err_tlv_v(
+ tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_RX_REO_RESOURCE_STATS_TAG:
+ htt_print_rx_reo_debug_stats_tlv_v(
+ tag_buf, stats_req);
+ break;
+ case HTT_STATS_RX_PDEV_FW_STATS_PHY_ERR_TAG:
+ htt_print_rx_pdev_fw_stats_phy_err_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_PDEV_RATE_STATS_TAG:
+ htt_print_tx_pdev_rate_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_RX_PDEV_RATE_STATS_TAG:
+ htt_print_rx_pdev_rate_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG:
+ htt_print_tx_pdev_stats_sched_per_txq_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_TX_SCHED_CMN_TAG:
+ htt_print_stats_tx_sched_cmn_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_PDEV_MPDU_STATS_TAG:
+ htt_print_tx_pdev_mu_mimo_mpdu_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG:
+ htt_print_sched_txq_cmd_posted_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_RING_IF_CMN_TAG:
+ htt_print_ring_if_cmn_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_SFM_CLIENT_USER_TAG:
+ htt_print_sfm_client_user_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_SFM_CLIENT_TAG:
+ htt_print_sfm_client_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_TQM_ERROR_STATS_TAG:
+ htt_print_tx_tqm_error_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG:
+ htt_print_sched_txq_cmd_reaped_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_SRING_CMN_TAG:
+ htt_print_sring_cmn_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_SOUNDING_STATS_TAG:
+ htt_print_tx_sounding_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_SELFGEN_AC_ERR_STATS_TAG:
+ htt_print_tx_selfgen_ac_err_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_SELFGEN_CMN_STATS_TAG:
+ htt_print_tx_selfgen_cmn_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_SELFGEN_AC_STATS_TAG:
+ htt_print_tx_selfgen_ac_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_SELFGEN_AX_STATS_TAG:
+ htt_print_tx_selfgen_ax_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_SELFGEN_AX_ERR_STATS_TAG:
+ htt_print_tx_selfgen_ax_err_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_MUMIMO_SCH_STATS_TAG:
+ htt_print_tx_hwq_mu_mimo_sch_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_MUMIMO_MPDU_STATS_TAG:
+ htt_print_tx_hwq_mu_mimo_mpdu_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_MUMIMO_CMN_STATS_TAG:
+ htt_print_tx_hwq_mu_mimo_cmn_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_HW_INTR_MISC_TAG:
+ htt_print_hw_stats_intr_misc_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_HW_WD_TIMEOUT_TAG:
+ htt_print_hw_stats_wd_timeout_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_HW_PDEV_ERRS_TAG:
+ htt_print_hw_stats_pdev_errs_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_COUNTER_NAME_TAG:
+ htt_print_counter_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_TID_DETAILS_TAG:
+ htt_print_tx_tid_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_TID_DETAILS_V1_TAG:
+ htt_print_tx_tid_stats_v1_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_RX_TID_DETAILS_TAG:
+ htt_print_rx_tid_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PEER_STATS_CMN_TAG:
+ htt_print_peer_stats_cmn_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PEER_DETAILS_TAG:
+ htt_print_peer_details_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PEER_MSDU_FLOWQ_TAG:
+ htt_print_msdu_flow_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PEER_TX_RATE_STATS_TAG:
+ htt_print_tx_peer_rate_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PEER_RX_RATE_STATS_TAG:
+ htt_print_rx_peer_rate_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_COMPL_STATS_TAG:
+ htt_print_tx_de_compl_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PDEV_CCA_1SEC_HIST_TAG:
+ case HTT_STATS_PDEV_CCA_100MSEC_HIST_TAG:
+ case HTT_STATS_PDEV_CCA_STAT_CUMULATIVE_TAG:
+ htt_print_pdev_cca_stats_hist_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PDEV_CCA_COUNTERS_TAG:
+ htt_print_pdev_stats_cca_counters_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_WHAL_TX_TAG:
+ htt_print_hw_stats_whal_tx_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PDEV_TWT_SESSIONS_TAG:
+ htt_print_pdev_stats_twt_sessions_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PDEV_TWT_SESSION_TAG:
+ htt_print_pdev_stats_twt_session_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG:
+ htt_print_sched_txq_sched_order_su_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG:
+ htt_print_sched_txq_sched_ineligibility_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_PDEV_OBSS_PD_TAG:
+ htt_print_pdev_obss_pd_stats_tlv_v(tag_buf, stats_req);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+void ath11k_dbg_htt_ext_stats_handler(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath11k_htt_extd_stats_msg *msg;
+ struct debug_htt_stats_req *stats_req;
+ struct ath11k *ar;
+ u32 len;
+ u64 cookie;
+ int ret;
+ u8 pdev_id;
+
+ msg = (struct ath11k_htt_extd_stats_msg *)skb->data;
+ cookie = msg->cookie;
+
+ if (FIELD_GET(HTT_STATS_COOKIE_MSB, cookie) != HTT_STATS_MAGIC_VALUE) {
+ ath11k_warn(ab, "received invalid htt ext stats event\n");
+ return;
+ }
+
+ pdev_id = FIELD_GET(HTT_STATS_COOKIE_LSB, cookie);
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
+ rcu_read_unlock();
+ if (!ar) {
+ ath11k_warn(ab, "failed to get ar for pdev_id %d\n", pdev_id);
+ return;
+ }
+
+ stats_req = ar->debug.htt_stats.stats_req;
+ if (!stats_req)
+ return;
+
+ spin_lock_bh(&ar->debug.htt_stats.lock);
+ if (stats_req->done) {
+ spin_unlock_bh(&ar->debug.htt_stats.lock);
+ return;
+ }
+ stats_req->done = true;
+ spin_unlock_bh(&ar->debug.htt_stats.lock);
+
+ len = FIELD_GET(HTT_T2H_EXT_STATS_INFO1_LENGTH, msg->info1);
+ ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len,
+ ath11k_dbg_htt_ext_stats_parse,
+ stats_req);
+ if (ret)
+ ath11k_warn(ab, "Failed to parse tlv %d\n", ret);
+
+ complete(&stats_req->cmpln);
+}
+
+static ssize_t ath11k_read_htt_stats_type(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32];
+ size_t len;
+
+ len = scnprintf(buf, sizeof(buf), "%u\n", ar->debug.htt_stats.type);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath11k_write_htt_stats_type(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ u8 type;
+ int ret;
+
+ ret = kstrtou8_from_user(user_buf, count, 0, &type);
+ if (ret)
+ return ret;
+
+ if (type >= ATH11K_DBG_HTT_NUM_EXT_STATS)
+ return -E2BIG;
+
+ if (type == ATH11K_DBG_HTT_EXT_STATS_RESET ||
+ type == ATH11K_DBG_HTT_EXT_STATS_PEER_INFO)
+ return -EPERM;
+
+ ar->debug.htt_stats.type = type;
+
+ ret = count;
+
+ return ret;
+}
+
+static const struct file_operations fops_htt_stats_type = {
+ .read = ath11k_read_htt_stats_type,
+ .write = ath11k_write_htt_stats_type,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath11k_prep_htt_stats_cfg_params(struct ath11k *ar, u8 type,
+ const u8 *mac_addr,
+ struct htt_ext_stats_cfg_params *cfg_params)
+{
+ if (!cfg_params)
+ return -EINVAL;
+
+ switch (type) {
+ case ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_HWQ:
+ case ATH11K_DBG_HTT_EXT_STATS_TX_MU_HWQ:
+ cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ALL_HWQS;
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED:
+ cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ALL_TXQS;
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_TQM_CMDQ:
+ cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ALL_CMDQS;
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_PEER_INFO:
+ cfg_params->cfg0 = HTT_STAT_PEER_INFO_MAC_ADDR;
+ cfg_params->cfg0 |= FIELD_PREP(GENMASK(15, 1),
+ HTT_PEER_STATS_REQ_MODE_FLUSH_TQM);
+ cfg_params->cfg1 = HTT_STAT_DEFAULT_PEER_REQ_TYPE;
+ cfg_params->cfg2 |= FIELD_PREP(GENMASK(7, 0), mac_addr[0]);
+ cfg_params->cfg2 |= FIELD_PREP(GENMASK(15, 8), mac_addr[1]);
+ cfg_params->cfg2 |= FIELD_PREP(GENMASK(23, 16), mac_addr[2]);
+ cfg_params->cfg2 |= FIELD_PREP(GENMASK(31, 24), mac_addr[3]);
+ cfg_params->cfg3 |= FIELD_PREP(GENMASK(7, 0), mac_addr[4]);
+ cfg_params->cfg3 |= FIELD_PREP(GENMASK(15, 8), mac_addr[5]);
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_RING_IF_INFO:
+ case ATH11K_DBG_HTT_EXT_STATS_SRNG_INFO:
+ cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ALL_RINGS;
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_ACTIVE_PEERS_LIST:
+ cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ACTIVE_PEERS;
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS:
+ cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_CCA_CUMULATIVE;
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO:
+ cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ACTIVE_VDEVS;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int ath11k_dbg_htt_stats_req(struct ath11k *ar)
+{
+ struct debug_htt_stats_req *stats_req = ar->debug.htt_stats.stats_req;
+ u8 type = stats_req->type;
+ u64 cookie = 0;
+ int ret, pdev_id = ar->pdev->pdev_id;
+ struct htt_ext_stats_cfg_params cfg_params = { 0 };
+
+ init_completion(&stats_req->cmpln);
+
+ stats_req->done = false;
+ stats_req->pdev_id = pdev_id;
+
+ cookie = FIELD_PREP(HTT_STATS_COOKIE_MSB, HTT_STATS_MAGIC_VALUE) |
+ FIELD_PREP(HTT_STATS_COOKIE_LSB, pdev_id);
+
+ ret = ath11k_prep_htt_stats_cfg_params(ar, type, stats_req->peer_addr,
+ &cfg_params);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set htt stats cfg params: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_dp_tx_htt_h2t_ext_stats_req(ar, type, &cfg_params, cookie);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send htt stats request: %d\n", ret);
+ return ret;
+ }
+
+ while (!wait_for_completion_timeout(&stats_req->cmpln, 3 * HZ)) {
+ spin_lock_bh(&ar->debug.htt_stats.lock);
+ if (!stats_req->done) {
+ stats_req->done = true;
+ spin_unlock_bh(&ar->debug.htt_stats.lock);
+ ath11k_warn(ar->ab, "stats request timed out\n");
+ return -ETIMEDOUT;
+ }
+ spin_unlock_bh(&ar->debug.htt_stats.lock);
+ }
+
+ return 0;
+}
+
+static int ath11k_open_htt_stats(struct inode *inode, struct file *file)
+{
+ struct ath11k *ar = inode->i_private;
+ struct debug_htt_stats_req *stats_req;
+ u8 type = ar->debug.htt_stats.type;
+ int ret;
+
+ if (type == ATH11K_DBG_HTT_EXT_STATS_RESET)
+ return -EPERM;
+
+ stats_req = vzalloc(sizeof(*stats_req) + ATH11K_HTT_STATS_BUF_SIZE);
+ if (!stats_req)
+ return -ENOMEM;
+
+ mutex_lock(&ar->conf_mutex);
+ ar->debug.htt_stats.stats_req = stats_req;
+ stats_req->type = type;
+ ret = ath11k_dbg_htt_stats_req(ar);
+ mutex_unlock(&ar->conf_mutex);
+ if (ret < 0)
+ goto out;
+
+ file->private_data = stats_req;
+ return 0;
+out:
+ vfree(stats_req);
+ return ret;
+}
+
+static int ath11k_release_htt_stats(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+ return 0;
+}
+
+static ssize_t ath11k_read_htt_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct debug_htt_stats_req *stats_req = file->private_data;
+ char *buf;
+ u32 length = 0;
+
+ buf = stats_req->buf;
+ length = min_t(u32, stats_req->buf_len, ATH11K_HTT_STATS_BUF_SIZE);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, length);
+}
+
+static const struct file_operations fops_dump_htt_stats = {
+ .open = ath11k_open_htt_stats,
+ .release = ath11k_release_htt_stats,
+ .read = ath11k_read_htt_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_read_htt_stats_reset(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32];
+ size_t len;
+
+ len = scnprintf(buf, sizeof(buf), "%u\n", ar->debug.htt_stats.reset);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath11k_write_htt_stats_reset(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ u8 type;
+ struct htt_ext_stats_cfg_params cfg_params = { 0 };
+ int ret;
+
+ ret = kstrtou8_from_user(user_buf, count, 0, &type);
+ if (ret)
+ return ret;
+
+ if (type >= ATH11K_DBG_HTT_NUM_EXT_STATS ||
+ type == ATH11K_DBG_HTT_EXT_STATS_RESET)
+ return -E2BIG;
+
+ mutex_lock(&ar->conf_mutex);
+ cfg_params.cfg0 = HTT_STAT_DEFAULT_RESET_START_OFFSET;
+ cfg_params.cfg1 = 1 << (cfg_params.cfg0 + type);
+ ret = ath11k_dp_tx_htt_h2t_ext_stats_req(ar,
+ ATH11K_DBG_HTT_EXT_STATS_RESET,
+ &cfg_params,
+ 0ULL);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send htt stats request: %d\n", ret);
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+ }
+
+ ar->debug.htt_stats.reset = type;
+ mutex_unlock(&ar->conf_mutex);
+
+ ret = count;
+
+ return ret;
+}
+
+static const struct file_operations fops_htt_stats_reset = {
+ .read = ath11k_read_htt_stats_reset,
+ .write = ath11k_write_htt_stats_reset,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath11k_debug_htt_stats_init(struct ath11k *ar)
+{
+ spin_lock_init(&ar->debug.htt_stats.lock);
+ debugfs_create_file("htt_stats_type", 0600, ar->debug.debugfs_pdev,
+ ar, &fops_htt_stats_type);
+ debugfs_create_file("htt_stats", 0400, ar->debug.debugfs_pdev,
+ ar, &fops_dump_htt_stats);
+ debugfs_create_file("htt_stats_reset", 0600, ar->debug.debugfs_pdev,
+ ar, &fops_htt_stats_reset);
+}
diff --git a/drivers/net/wireless/ath/ath11k/debug_htt_stats.h b/drivers/net/wireless/ath/ath11k/debug_htt_stats.h
new file mode 100644
index 000000000000..4bdb62dd7b8d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debug_htt_stats.h
@@ -0,0 +1,1662 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef DEBUG_HTT_STATS_H
+#define DEBUG_HTT_STATS_H
+
+#define HTT_STATS_COOKIE_LSB GENMASK_ULL(31, 0)
+#define HTT_STATS_COOKIE_MSB GENMASK_ULL(63, 32)
+#define HTT_STATS_MAGIC_VALUE 0xF0F0F0F0
+
+enum htt_tlv_tag_t {
+ HTT_STATS_TX_PDEV_CMN_TAG = 0,
+ HTT_STATS_TX_PDEV_UNDERRUN_TAG = 1,
+ HTT_STATS_TX_PDEV_SIFS_TAG = 2,
+ HTT_STATS_TX_PDEV_FLUSH_TAG = 3,
+ HTT_STATS_TX_PDEV_PHY_ERR_TAG = 4,
+ HTT_STATS_STRING_TAG = 5,
+ HTT_STATS_TX_HWQ_CMN_TAG = 6,
+ HTT_STATS_TX_HWQ_DIFS_LATENCY_TAG = 7,
+ HTT_STATS_TX_HWQ_CMD_RESULT_TAG = 8,
+ HTT_STATS_TX_HWQ_CMD_STALL_TAG = 9,
+ HTT_STATS_TX_HWQ_FES_STATUS_TAG = 10,
+ HTT_STATS_TX_TQM_GEN_MPDU_TAG = 11,
+ HTT_STATS_TX_TQM_LIST_MPDU_TAG = 12,
+ HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG = 13,
+ HTT_STATS_TX_TQM_CMN_TAG = 14,
+ HTT_STATS_TX_TQM_PDEV_TAG = 15,
+ HTT_STATS_TX_TQM_CMDQ_STATUS_TAG = 16,
+ HTT_STATS_TX_DE_EAPOL_PACKETS_TAG = 17,
+ HTT_STATS_TX_DE_CLASSIFY_FAILED_TAG = 18,
+ HTT_STATS_TX_DE_CLASSIFY_STATS_TAG = 19,
+ HTT_STATS_TX_DE_CLASSIFY_STATUS_TAG = 20,
+ HTT_STATS_TX_DE_ENQUEUE_PACKETS_TAG = 21,
+ HTT_STATS_TX_DE_ENQUEUE_DISCARD_TAG = 22,
+ HTT_STATS_TX_DE_CMN_TAG = 23,
+ HTT_STATS_RING_IF_TAG = 24,
+ HTT_STATS_TX_PDEV_MU_MIMO_STATS_TAG = 25,
+ HTT_STATS_SFM_CMN_TAG = 26,
+ HTT_STATS_SRING_STATS_TAG = 27,
+ HTT_STATS_RX_PDEV_FW_STATS_TAG = 28,
+ HTT_STATS_RX_PDEV_FW_RING_MPDU_ERR_TAG = 29,
+ HTT_STATS_RX_PDEV_FW_MPDU_DROP_TAG = 30,
+ HTT_STATS_RX_SOC_FW_STATS_TAG = 31,
+ HTT_STATS_RX_SOC_FW_REFILL_RING_EMPTY_TAG = 32,
+ HTT_STATS_RX_SOC_FW_REFILL_RING_NUM_REFILL_TAG = 33,
+ HTT_STATS_TX_PDEV_RATE_STATS_TAG = 34,
+ HTT_STATS_RX_PDEV_RATE_STATS_TAG = 35,
+ HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG = 36,
+ HTT_STATS_TX_SCHED_CMN_TAG = 37,
+ HTT_STATS_TX_PDEV_MUMIMO_MPDU_STATS_TAG = 38,
+ HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG = 39,
+ HTT_STATS_RING_IF_CMN_TAG = 40,
+ HTT_STATS_SFM_CLIENT_USER_TAG = 41,
+ HTT_STATS_SFM_CLIENT_TAG = 42,
+ HTT_STATS_TX_TQM_ERROR_STATS_TAG = 43,
+ HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG = 44,
+ HTT_STATS_SRING_CMN_TAG = 45,
+ HTT_STATS_TX_SELFGEN_AC_ERR_STATS_TAG = 46,
+ HTT_STATS_TX_SELFGEN_CMN_STATS_TAG = 47,
+ HTT_STATS_TX_SELFGEN_AC_STATS_TAG = 48,
+ HTT_STATS_TX_SELFGEN_AX_STATS_TAG = 49,
+ HTT_STATS_TX_SELFGEN_AX_ERR_STATS_TAG = 50,
+ HTT_STATS_TX_HWQ_MUMIMO_SCH_STATS_TAG = 51,
+ HTT_STATS_TX_HWQ_MUMIMO_MPDU_STATS_TAG = 52,
+ HTT_STATS_TX_HWQ_MUMIMO_CMN_STATS_TAG = 53,
+ HTT_STATS_HW_INTR_MISC_TAG = 54,
+ HTT_STATS_HW_WD_TIMEOUT_TAG = 55,
+ HTT_STATS_HW_PDEV_ERRS_TAG = 56,
+ HTT_STATS_COUNTER_NAME_TAG = 57,
+ HTT_STATS_TX_TID_DETAILS_TAG = 58,
+ HTT_STATS_RX_TID_DETAILS_TAG = 59,
+ HTT_STATS_PEER_STATS_CMN_TAG = 60,
+ HTT_STATS_PEER_DETAILS_TAG = 61,
+ HTT_STATS_PEER_TX_RATE_STATS_TAG = 62,
+ HTT_STATS_PEER_RX_RATE_STATS_TAG = 63,
+ HTT_STATS_PEER_MSDU_FLOWQ_TAG = 64,
+ HTT_STATS_TX_DE_COMPL_STATS_TAG = 65,
+ HTT_STATS_WHAL_TX_TAG = 66,
+ HTT_STATS_TX_PDEV_SIFS_HIST_TAG = 67,
+ HTT_STATS_RX_PDEV_FW_STATS_PHY_ERR_TAG = 68,
+ HTT_STATS_TX_TID_DETAILS_V1_TAG = 69,
+ HTT_STATS_PDEV_CCA_1SEC_HIST_TAG = 70,
+ HTT_STATS_PDEV_CCA_100MSEC_HIST_TAG = 71,
+ HTT_STATS_PDEV_CCA_STAT_CUMULATIVE_TAG = 72,
+ HTT_STATS_PDEV_CCA_COUNTERS_TAG = 73,
+ HTT_STATS_TX_PDEV_MPDU_STATS_TAG = 74,
+ HTT_STATS_PDEV_TWT_SESSIONS_TAG = 75,
+ HTT_STATS_PDEV_TWT_SESSION_TAG = 76,
+ HTT_STATS_RX_REFILL_RXDMA_ERR_TAG = 77,
+ HTT_STATS_RX_REFILL_REO_ERR_TAG = 78,
+ HTT_STATS_RX_REO_RESOURCE_STATS_TAG = 79,
+ HTT_STATS_TX_SOUNDING_STATS_TAG = 80,
+ HTT_STATS_TX_PDEV_TX_PPDU_STATS_TAG = 81,
+ HTT_STATS_TX_PDEV_TRIED_MPDU_CNT_HIST_TAG = 82,
+ HTT_STATS_TX_HWQ_TRIED_MPDU_CNT_HIST_TAG = 83,
+ HTT_STATS_TX_HWQ_TXOP_USED_CNT_HIST_TAG = 84,
+ HTT_STATS_TX_DE_FW2WBM_RING_FULL_HIST_TAG = 85,
+ HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG = 86,
+ HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG = 87,
+ HTT_STATS_PDEV_OBSS_PD_TAG = 88,
+
+ HTT_STATS_MAX_TAG,
+};
+
+#define HTT_STATS_MAX_STRING_SZ32 4
+#define HTT_STATS_MACID_INVALID 0xff
+#define HTT_TX_HWQ_MAX_DIFS_LATENCY_BINS 10
+#define HTT_TX_HWQ_MAX_CMD_RESULT_STATS 13
+#define HTT_TX_HWQ_MAX_CMD_STALL_STATS 5
+#define HTT_TX_HWQ_MAX_FES_RESULT_STATS 10
+
+enum htt_tx_pdev_underrun_enum {
+ HTT_STATS_TX_PDEV_NO_DATA_UNDERRUN = 0,
+ HTT_STATS_TX_PDEV_DATA_UNDERRUN_BETWEEN_MPDU = 1,
+ HTT_STATS_TX_PDEV_DATA_UNDERRUN_WITHIN_MPDU = 2,
+ HTT_TX_PDEV_MAX_URRN_STATS = 3,
+};
+
+#define HTT_TX_PDEV_MAX_FLUSH_REASON_STATS 71
+#define HTT_TX_PDEV_MAX_SIFS_BURST_STATS 9
+#define HTT_TX_PDEV_MAX_SIFS_BURST_HIST_STATS 10
+#define HTT_TX_PDEV_MAX_PHY_ERR_STATS 18
+#define HTT_TX_PDEV_SCHED_TX_MODE_MAX 4
+#define HTT_TX_PDEV_NUM_SCHED_ORDER_LOG 20
+
+#define HTT_RX_STATS_REFILL_MAX_RING 4
+#define HTT_RX_STATS_RXDMA_MAX_ERR 16
+#define HTT_RX_STATS_FW_DROP_REASON_MAX 16
+
+/* Bytes stored in little endian order */
+/* Length should be multiple of DWORD */
+struct htt_stats_string_tlv {
+ u32 data[0]; /* Can be variable length */
+} __packed;
+
+/* == TX PDEV STATS == */
+struct htt_tx_pdev_stats_cmn_tlv {
+ u32 mac_id__word;
+ u32 hw_queued;
+ u32 hw_reaped;
+ u32 underrun;
+ u32 hw_paused;
+ u32 hw_flush;
+ u32 hw_filt;
+ u32 tx_abort;
+ u32 mpdu_requed;
+ u32 tx_xretry;
+ u32 data_rc;
+ u32 mpdu_dropped_xretry;
+ u32 illgl_rate_phy_err;
+ u32 cont_xretry;
+ u32 tx_timeout;
+ u32 pdev_resets;
+ u32 phy_underrun;
+ u32 txop_ovf;
+ u32 seq_posted;
+ u32 seq_failed_queueing;
+ u32 seq_completed;
+ u32 seq_restarted;
+ u32 mu_seq_posted;
+ u32 seq_switch_hw_paused;
+ u32 next_seq_posted_dsr;
+ u32 seq_posted_isr;
+ u32 seq_ctrl_cached;
+ u32 mpdu_count_tqm;
+ u32 msdu_count_tqm;
+ u32 mpdu_removed_tqm;
+ u32 msdu_removed_tqm;
+ u32 mpdus_sw_flush;
+ u32 mpdus_hw_filter;
+ u32 mpdus_truncated;
+ u32 mpdus_ack_failed;
+ u32 mpdus_expired;
+ u32 mpdus_seq_hw_retry;
+ u32 ack_tlv_proc;
+ u32 coex_abort_mpdu_cnt_valid;
+ u32 coex_abort_mpdu_cnt;
+ u32 num_total_ppdus_tried_ota;
+ u32 num_data_ppdus_tried_ota;
+ u32 local_ctrl_mgmt_enqued;
+ u32 local_ctrl_mgmt_freed;
+ u32 local_data_enqued;
+ u32 local_data_freed;
+ u32 mpdu_tried;
+ u32 isr_wait_seq_posted;
+
+ u32 tx_active_dur_us_low;
+ u32 tx_active_dur_us_high;
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_pdev_stats_urrn_tlv_v {
+ u32 urrn_stats[0]; /* HTT_TX_PDEV_MAX_URRN_STATS */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_pdev_stats_flush_tlv_v {
+ u32 flush_errs[0]; /* HTT_TX_PDEV_MAX_FLUSH_REASON_STATS */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_pdev_stats_sifs_tlv_v {
+ u32 sifs_status[0]; /* HTT_TX_PDEV_MAX_SIFS_BURST_STATS */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_pdev_stats_phy_err_tlv_v {
+ u32 phy_errs[0]; /* HTT_TX_PDEV_MAX_PHY_ERR_STATS */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_pdev_stats_sifs_hist_tlv_v {
+ u32 sifs_hist_status[0]; /* HTT_TX_PDEV_SIFS_BURST_HIST_STATS */
+};
+
+struct htt_tx_pdev_stats_tx_ppdu_stats_tlv_v {
+ u32 num_data_ppdus_legacy_su;
+ u32 num_data_ppdus_ac_su;
+ u32 num_data_ppdus_ax_su;
+ u32 num_data_ppdus_ac_su_txbf;
+ u32 num_data_ppdus_ax_su_txbf;
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size .
+ *
+ * Tried_mpdu_cnt_hist is the histogram of MPDUs tries per HWQ.
+ * The tries here is the count of the MPDUS within a PPDU that the
+ * HW had attempted to transmit on air, for the HWSCH Schedule
+ * command submitted by FW.It is not the retry attempts.
+ * The histogram bins are 0-29, 30-59, 60-89 and so on. The are
+ * 10 bins in this histogram. They are defined in FW using the
+ * following macros
+ * #define WAL_MAX_TRIED_MPDU_CNT_HISTOGRAM 9
+ * #define WAL_TRIED_MPDU_CNT_HISTOGRAM_INTERVAL 30
+ */
+struct htt_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v {
+ u32 hist_bin_size;
+ u32 tried_mpdu_cnt_hist[0]; /* HTT_TX_PDEV_TRIED_MPDU_CNT_HIST */
+};
+
+/* == SOC ERROR STATS == */
+
+/* =============== PDEV ERROR STATS ============== */
+#define HTT_STATS_MAX_HW_INTR_NAME_LEN 8
+struct htt_hw_stats_intr_misc_tlv {
+ /* Stored as little endian */
+ u8 hw_intr_name[HTT_STATS_MAX_HW_INTR_NAME_LEN];
+ u32 mask;
+ u32 count;
+};
+
+#define HTT_STATS_MAX_HW_MODULE_NAME_LEN 8
+struct htt_hw_stats_wd_timeout_tlv {
+ /* Stored as little endian */
+ u8 hw_module_name[HTT_STATS_MAX_HW_MODULE_NAME_LEN];
+ u32 count;
+};
+
+struct htt_hw_stats_pdev_errs_tlv {
+ u32 mac_id__word; /* BIT [ 7 : 0] : mac_id */
+ u32 tx_abort;
+ u32 tx_abort_fail_count;
+ u32 rx_abort;
+ u32 rx_abort_fail_count;
+ u32 warm_reset;
+ u32 cold_reset;
+ u32 tx_flush;
+ u32 tx_glb_reset;
+ u32 tx_txq_reset;
+ u32 rx_timeout_reset;
+};
+
+struct htt_hw_stats_whal_tx_tlv {
+ u32 mac_id__word;
+ u32 last_unpause_ppdu_id;
+ u32 hwsch_unpause_wait_tqm_write;
+ u32 hwsch_dummy_tlv_skipped;
+ u32 hwsch_misaligned_offset_received;
+ u32 hwsch_reset_count;
+ u32 hwsch_dev_reset_war;
+ u32 hwsch_delayed_pause;
+ u32 hwsch_long_delayed_pause;
+ u32 sch_rx_ppdu_no_response;
+ u32 sch_selfgen_response;
+ u32 sch_rx_sifs_resp_trigger;
+};
+
+/* ============ PEER STATS ============ */
+struct htt_msdu_flow_stats_tlv {
+ u32 last_update_timestamp;
+ u32 last_add_timestamp;
+ u32 last_remove_timestamp;
+ u32 total_processed_msdu_count;
+ u32 cur_msdu_count_in_flowq;
+ u32 sw_peer_id;
+ u32 tx_flow_no__tid_num__drop_rule;
+ u32 last_cycle_enqueue_count;
+ u32 last_cycle_dequeue_count;
+ u32 last_cycle_drop_count;
+ u32 current_drop_th;
+};
+
+#define MAX_HTT_TID_NAME 8
+
+/* Tidq stats */
+struct htt_tx_tid_stats_tlv {
+ /* Stored as little endian */
+ u8 tid_name[MAX_HTT_TID_NAME];
+ u32 sw_peer_id__tid_num;
+ u32 num_sched_pending__num_ppdu_in_hwq;
+ u32 tid_flags;
+ u32 hw_queued;
+ u32 hw_reaped;
+ u32 mpdus_hw_filter;
+
+ u32 qdepth_bytes;
+ u32 qdepth_num_msdu;
+ u32 qdepth_num_mpdu;
+ u32 last_scheduled_tsmp;
+ u32 pause_module_id;
+ u32 block_module_id;
+ u32 tid_tx_airtime;
+};
+
+/* Tidq stats */
+struct htt_tx_tid_stats_v1_tlv {
+ /* Stored as little endian */
+ u8 tid_name[MAX_HTT_TID_NAME];
+ u32 sw_peer_id__tid_num;
+ u32 num_sched_pending__num_ppdu_in_hwq;
+ u32 tid_flags;
+ u32 max_qdepth_bytes;
+ u32 max_qdepth_n_msdus;
+ u32 rsvd;
+
+ u32 qdepth_bytes;
+ u32 qdepth_num_msdu;
+ u32 qdepth_num_mpdu;
+ u32 last_scheduled_tsmp;
+ u32 pause_module_id;
+ u32 block_module_id;
+ u32 tid_tx_airtime;
+ u32 allow_n_flags;
+ u32 sendn_frms_allowed;
+};
+
+struct htt_rx_tid_stats_tlv {
+ u32 sw_peer_id__tid_num;
+ u8 tid_name[MAX_HTT_TID_NAME];
+ u32 dup_in_reorder;
+ u32 dup_past_outside_window;
+ u32 dup_past_within_window;
+ u32 rxdesc_err_decrypt;
+ u32 tid_rx_airtime;
+};
+
+#define HTT_MAX_COUNTER_NAME 8
+struct htt_counter_tlv {
+ u8 counter_name[HTT_MAX_COUNTER_NAME];
+ u32 count;
+};
+
+struct htt_peer_stats_cmn_tlv {
+ u32 ppdu_cnt;
+ u32 mpdu_cnt;
+ u32 msdu_cnt;
+ u32 pause_bitmap;
+ u32 block_bitmap;
+ u32 current_timestamp;
+ u32 peer_tx_airtime;
+ u32 peer_rx_airtime;
+ s32 rssi;
+ u32 peer_enqueued_count_low;
+ u32 peer_enqueued_count_high;
+ u32 peer_dequeued_count_low;
+ u32 peer_dequeued_count_high;
+ u32 peer_dropped_count_low;
+ u32 peer_dropped_count_high;
+ u32 ppdu_transmitted_bytes_low;
+ u32 ppdu_transmitted_bytes_high;
+ u32 peer_ttl_removed_count;
+ u32 inactive_time;
+};
+
+struct htt_peer_details_tlv {
+ u32 peer_type;
+ u32 sw_peer_id;
+ u32 vdev_pdev_ast_idx;
+ struct htt_mac_addr mac_addr;
+ u32 peer_flags;
+ u32 qpeer_flags;
+};
+
+enum htt_stats_param_type {
+ HTT_STATS_PREAM_OFDM,
+ HTT_STATS_PREAM_CCK,
+ HTT_STATS_PREAM_HT,
+ HTT_STATS_PREAM_VHT,
+ HTT_STATS_PREAM_HE,
+ HTT_STATS_PREAM_RSVD,
+ HTT_STATS_PREAM_RSVD1,
+
+ HTT_STATS_PREAM_COUNT,
+};
+
+#define HTT_TX_PEER_STATS_NUM_MCS_COUNTERS 12
+#define HTT_TX_PEER_STATS_NUM_GI_COUNTERS 4
+#define HTT_TX_PEER_STATS_NUM_DCM_COUNTERS 5
+#define HTT_TX_PEER_STATS_NUM_BW_COUNTERS 4
+#define HTT_TX_PEER_STATS_NUM_SPATIAL_STREAMS 8
+#define HTT_TX_PEER_STATS_NUM_PREAMBLE_TYPES HTT_STATS_PREAM_COUNT
+
+struct htt_tx_peer_rate_stats_tlv {
+ u32 tx_ldpc;
+ u32 rts_cnt;
+ u32 ack_rssi;
+
+ u32 tx_mcs[HTT_TX_PEER_STATS_NUM_MCS_COUNTERS];
+ u32 tx_su_mcs[HTT_TX_PEER_STATS_NUM_MCS_COUNTERS];
+ u32 tx_mu_mcs[HTT_TX_PEER_STATS_NUM_MCS_COUNTERS];
+ /* element 0,1, ...7 -> NSS 1,2, ...8 */
+ u32 tx_nss[HTT_TX_PEER_STATS_NUM_SPATIAL_STREAMS];
+ /* element 0: 20 MHz, 1: 40 MHz, 2: 80 MHz, 3: 160 and 80+80 MHz */
+ u32 tx_bw[HTT_TX_PEER_STATS_NUM_BW_COUNTERS];
+ u32 tx_stbc[HTT_TX_PEER_STATS_NUM_MCS_COUNTERS];
+ u32 tx_pream[HTT_TX_PEER_STATS_NUM_PREAMBLE_TYPES];
+
+ /* Counters to track number of tx packets in each GI
+ * (400us, 800us, 1600us & 3200us) in each mcs (0-11)
+ */
+ u32 tx_gi[HTT_TX_PEER_STATS_NUM_GI_COUNTERS][HTT_TX_PEER_STATS_NUM_MCS_COUNTERS];
+
+ /* Counters to track packets in dcm mcs (MCS 0, 1, 3, 4) */
+ u32 tx_dcm[HTT_TX_PEER_STATS_NUM_DCM_COUNTERS];
+
+};
+
+#define HTT_RX_PEER_STATS_NUM_MCS_COUNTERS 12
+#define HTT_RX_PEER_STATS_NUM_GI_COUNTERS 4
+#define HTT_RX_PEER_STATS_NUM_DCM_COUNTERS 5
+#define HTT_RX_PEER_STATS_NUM_BW_COUNTERS 4
+#define HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS 8
+#define HTT_RX_PEER_STATS_NUM_PREAMBLE_TYPES HTT_STATS_PREAM_COUNT
+
+struct htt_rx_peer_rate_stats_tlv {
+ u32 nsts;
+
+ /* Number of rx ldpc packets */
+ u32 rx_ldpc;
+ /* Number of rx rts packets */
+ u32 rts_cnt;
+
+ u32 rssi_mgmt; /* units = dB above noise floor */
+ u32 rssi_data; /* units = dB above noise floor */
+ u32 rssi_comb; /* units = dB above noise floor */
+ u32 rx_mcs[HTT_RX_PEER_STATS_NUM_MCS_COUNTERS];
+ /* element 0,1, ...7 -> NSS 1,2, ...8 */
+ u32 rx_nss[HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS];
+ u32 rx_dcm[HTT_RX_PEER_STATS_NUM_DCM_COUNTERS];
+ u32 rx_stbc[HTT_RX_PEER_STATS_NUM_MCS_COUNTERS];
+ /* element 0: 20 MHz, 1: 40 MHz, 2: 80 MHz, 3: 160 and 80+80 MHz */
+ u32 rx_bw[HTT_RX_PEER_STATS_NUM_BW_COUNTERS];
+ u32 rx_pream[HTT_RX_PEER_STATS_NUM_PREAMBLE_TYPES];
+ /* units = dB above noise floor */
+ u8 rssi_chain[HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS]
+ [HTT_RX_PEER_STATS_NUM_BW_COUNTERS];
+
+ /* Counters to track number of rx packets in each GI in each mcs (0-11) */
+ u32 rx_gi[HTT_RX_PEER_STATS_NUM_GI_COUNTERS]
+ [HTT_RX_PEER_STATS_NUM_MCS_COUNTERS];
+};
+
+enum htt_peer_stats_req_mode {
+ HTT_PEER_STATS_REQ_MODE_NO_QUERY,
+ HTT_PEER_STATS_REQ_MODE_QUERY_TQM,
+ HTT_PEER_STATS_REQ_MODE_FLUSH_TQM,
+};
+
+enum htt_peer_stats_tlv_enum {
+ HTT_PEER_STATS_CMN_TLV = 0,
+ HTT_PEER_DETAILS_TLV = 1,
+ HTT_TX_PEER_RATE_STATS_TLV = 2,
+ HTT_RX_PEER_RATE_STATS_TLV = 3,
+ HTT_TX_TID_STATS_TLV = 4,
+ HTT_RX_TID_STATS_TLV = 5,
+ HTT_MSDU_FLOW_STATS_TLV = 6,
+
+ HTT_PEER_STATS_MAX_TLV = 31,
+};
+
+/* =========== MUMIMO HWQ stats =========== */
+/* MU MIMO stats per hwQ */
+struct htt_tx_hwq_mu_mimo_sch_stats_tlv {
+ u32 mu_mimo_sch_posted;
+ u32 mu_mimo_sch_failed;
+ u32 mu_mimo_ppdu_posted;
+};
+
+struct htt_tx_hwq_mu_mimo_mpdu_stats_tlv {
+ u32 mu_mimo_mpdus_queued_usr;
+ u32 mu_mimo_mpdus_tried_usr;
+ u32 mu_mimo_mpdus_failed_usr;
+ u32 mu_mimo_mpdus_requeued_usr;
+ u32 mu_mimo_err_no_ba_usr;
+ u32 mu_mimo_mpdu_underrun_usr;
+ u32 mu_mimo_ampdu_underrun_usr;
+};
+
+struct htt_tx_hwq_mu_mimo_cmn_stats_tlv {
+ u32 mac_id__hwq_id__word;
+};
+
+/* == TX HWQ STATS == */
+struct htt_tx_hwq_stats_cmn_tlv {
+ u32 mac_id__hwq_id__word;
+
+ /* PPDU level stats */
+ u32 xretry;
+ u32 underrun_cnt;
+ u32 flush_cnt;
+ u32 filt_cnt;
+ u32 null_mpdu_bmap;
+ u32 user_ack_failure;
+ u32 ack_tlv_proc;
+ u32 sched_id_proc;
+ u32 null_mpdu_tx_count;
+ u32 mpdu_bmap_not_recvd;
+
+ /* Selfgen stats per hwQ */
+ u32 num_bar;
+ u32 rts;
+ u32 cts2self;
+ u32 qos_null;
+
+ /* MPDU level stats */
+ u32 mpdu_tried_cnt;
+ u32 mpdu_queued_cnt;
+ u32 mpdu_ack_fail_cnt;
+ u32 mpdu_filt_cnt;
+ u32 false_mpdu_ack_count;
+
+ u32 txq_timeout;
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_hwq_difs_latency_stats_tlv_v {
+ u32 hist_intvl;
+ /* histogram of ppdu post to hwsch - > cmd status received */
+ u32 difs_latency_hist[0]; /* HTT_TX_HWQ_MAX_DIFS_LATENCY_BINS */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_hwq_cmd_result_stats_tlv_v {
+ /* Histogram of sched cmd result */
+ u32 cmd_result[0]; /* HTT_TX_HWQ_MAX_CMD_RESULT_STATS */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_hwq_cmd_stall_stats_tlv_v {
+ /* Histogram of various pause conitions */
+ u32 cmd_stall_status[0]; /* HTT_TX_HWQ_MAX_CMD_STALL_STATS */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_hwq_fes_result_stats_tlv_v {
+ /* Histogram of number of user fes result */
+ u32 fes_result[0]; /* HTT_TX_HWQ_MAX_FES_RESULT_STATS */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size
+ *
+ * The hwq_tried_mpdu_cnt_hist is a histogram of MPDUs tries per HWQ.
+ * The tries here is the count of the MPDUS within a PPDU that the HW
+ * had attempted to transmit on air, for the HWSCH Schedule command
+ * submitted by FW in this HWQ .It is not the retry attempts. The
+ * histogram bins are 0-29, 30-59, 60-89 and so on. The are 10 bins
+ * in this histogram.
+ * they are defined in FW using the following macros
+ * #define WAL_MAX_TRIED_MPDU_CNT_HISTOGRAM 9
+ * #define WAL_TRIED_MPDU_CNT_HISTOGRAM_INTERVAL 30
+ */
+struct htt_tx_hwq_tried_mpdu_cnt_hist_tlv_v {
+ u32 hist_bin_size;
+ /* Histogram of number of mpdus on tried mpdu */
+ u32 tried_mpdu_cnt_hist[0]; /* HTT_TX_HWQ_TRIED_MPDU_CNT_HIST */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size
+ *
+ * The txop_used_cnt_hist is the histogram of txop per burst. After
+ * completing the burst, we identify the txop used in the burst and
+ * incr the corresponding bin.
+ * Each bin represents 1ms & we have 10 bins in this histogram.
+ * they are deined in FW using the following macros
+ * #define WAL_MAX_TXOP_USED_CNT_HISTOGRAM 10
+ * #define WAL_TXOP_USED_HISTOGRAM_INTERVAL 1000 ( 1 ms )
+ */
+struct htt_tx_hwq_txop_used_cnt_hist_tlv_v {
+ /* Histogram of txop used cnt */
+ u32 txop_used_cnt_hist[0]; /* HTT_TX_HWQ_TXOP_USED_CNT_HIST */
+};
+
+/* == TX SELFGEN STATS == */
+struct htt_tx_selfgen_cmn_stats_tlv {
+ u32 mac_id__word;
+ u32 su_bar;
+ u32 rts;
+ u32 cts2self;
+ u32 qos_null;
+ u32 delayed_bar_1; /* MU user 1 */
+ u32 delayed_bar_2; /* MU user 2 */
+ u32 delayed_bar_3; /* MU user 3 */
+ u32 delayed_bar_4; /* MU user 4 */
+ u32 delayed_bar_5; /* MU user 5 */
+ u32 delayed_bar_6; /* MU user 6 */
+ u32 delayed_bar_7; /* MU user 7 */
+};
+
+struct htt_tx_selfgen_ac_stats_tlv {
+ /* 11AC */
+ u32 ac_su_ndpa;
+ u32 ac_su_ndp;
+ u32 ac_mu_mimo_ndpa;
+ u32 ac_mu_mimo_ndp;
+ u32 ac_mu_mimo_brpoll_1; /* MU user 1 */
+ u32 ac_mu_mimo_brpoll_2; /* MU user 2 */
+ u32 ac_mu_mimo_brpoll_3; /* MU user 3 */
+};
+
+struct htt_tx_selfgen_ax_stats_tlv {
+ /* 11AX */
+ u32 ax_su_ndpa;
+ u32 ax_su_ndp;
+ u32 ax_mu_mimo_ndpa;
+ u32 ax_mu_mimo_ndp;
+ u32 ax_mu_mimo_brpoll_1; /* MU user 1 */
+ u32 ax_mu_mimo_brpoll_2; /* MU user 2 */
+ u32 ax_mu_mimo_brpoll_3; /* MU user 3 */
+ u32 ax_mu_mimo_brpoll_4; /* MU user 4 */
+ u32 ax_mu_mimo_brpoll_5; /* MU user 5 */
+ u32 ax_mu_mimo_brpoll_6; /* MU user 6 */
+ u32 ax_mu_mimo_brpoll_7; /* MU user 7 */
+ u32 ax_basic_trigger;
+ u32 ax_bsr_trigger;
+ u32 ax_mu_bar_trigger;
+ u32 ax_mu_rts_trigger;
+};
+
+struct htt_tx_selfgen_ac_err_stats_tlv {
+ /* 11AC error stats */
+ u32 ac_su_ndp_err;
+ u32 ac_su_ndpa_err;
+ u32 ac_mu_mimo_ndpa_err;
+ u32 ac_mu_mimo_ndp_err;
+ u32 ac_mu_mimo_brp1_err;
+ u32 ac_mu_mimo_brp2_err;
+ u32 ac_mu_mimo_brp3_err;
+};
+
+struct htt_tx_selfgen_ax_err_stats_tlv {
+ /* 11AX error stats */
+ u32 ax_su_ndp_err;
+ u32 ax_su_ndpa_err;
+ u32 ax_mu_mimo_ndpa_err;
+ u32 ax_mu_mimo_ndp_err;
+ u32 ax_mu_mimo_brp1_err;
+ u32 ax_mu_mimo_brp2_err;
+ u32 ax_mu_mimo_brp3_err;
+ u32 ax_mu_mimo_brp4_err;
+ u32 ax_mu_mimo_brp5_err;
+ u32 ax_mu_mimo_brp6_err;
+ u32 ax_mu_mimo_brp7_err;
+ u32 ax_basic_trigger_err;
+ u32 ax_bsr_trigger_err;
+ u32 ax_mu_bar_trigger_err;
+ u32 ax_mu_rts_trigger_err;
+};
+
+/* == TX MU STATS == */
+#define HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS 4
+#define HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS 8
+#define HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS 74
+
+struct htt_tx_pdev_mu_mimo_sch_stats_tlv {
+ /* mu-mimo sw sched cmd stats */
+ u32 mu_mimo_sch_posted;
+ u32 mu_mimo_sch_failed;
+ /* MU PPDU stats per hwQ */
+ u32 mu_mimo_ppdu_posted;
+ /*
+ * Counts the number of users in each transmission of
+ * the given TX mode.
+ *
+ * Index is the number of users - 1.
+ */
+ u32 ac_mu_mimo_sch_nusers[HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS];
+ u32 ax_mu_mimo_sch_nusers[HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS];
+ u32 ax_ofdma_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+};
+
+struct htt_tx_pdev_mu_mimo_mpdu_stats_tlv {
+ u32 mu_mimo_mpdus_queued_usr;
+ u32 mu_mimo_mpdus_tried_usr;
+ u32 mu_mimo_mpdus_failed_usr;
+ u32 mu_mimo_mpdus_requeued_usr;
+ u32 mu_mimo_err_no_ba_usr;
+ u32 mu_mimo_mpdu_underrun_usr;
+ u32 mu_mimo_ampdu_underrun_usr;
+
+ u32 ax_mu_mimo_mpdus_queued_usr;
+ u32 ax_mu_mimo_mpdus_tried_usr;
+ u32 ax_mu_mimo_mpdus_failed_usr;
+ u32 ax_mu_mimo_mpdus_requeued_usr;
+ u32 ax_mu_mimo_err_no_ba_usr;
+ u32 ax_mu_mimo_mpdu_underrun_usr;
+ u32 ax_mu_mimo_ampdu_underrun_usr;
+
+ u32 ax_ofdma_mpdus_queued_usr;
+ u32 ax_ofdma_mpdus_tried_usr;
+ u32 ax_ofdma_mpdus_failed_usr;
+ u32 ax_ofdma_mpdus_requeued_usr;
+ u32 ax_ofdma_err_no_ba_usr;
+ u32 ax_ofdma_mpdu_underrun_usr;
+ u32 ax_ofdma_ampdu_underrun_usr;
+};
+
+#define HTT_STATS_TX_SCHED_MODE_MU_MIMO_AC 1
+#define HTT_STATS_TX_SCHED_MODE_MU_MIMO_AX 2
+#define HTT_STATS_TX_SCHED_MODE_MU_OFDMA_AX 3
+
+struct htt_tx_pdev_mpdu_stats_tlv {
+ /* mpdu level stats */
+ u32 mpdus_queued_usr;
+ u32 mpdus_tried_usr;
+ u32 mpdus_failed_usr;
+ u32 mpdus_requeued_usr;
+ u32 err_no_ba_usr;
+ u32 mpdu_underrun_usr;
+ u32 ampdu_underrun_usr;
+ u32 user_index;
+ u32 tx_sched_mode; /* HTT_STATS_TX_SCHED_MODE_xxx */
+};
+
+/* == TX SCHED STATS == */
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_sched_txq_cmd_posted_tlv_v {
+ u32 sched_cmd_posted[0]; /* HTT_TX_PDEV_SCHED_TX_MODE_MAX */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_sched_txq_cmd_reaped_tlv_v {
+ u32 sched_cmd_reaped[0]; /* HTT_TX_PDEV_SCHED_TX_MODE_MAX */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_sched_txq_sched_order_su_tlv_v {
+ u32 sched_order_su[0]; /* HTT_TX_PDEV_NUM_SCHED_ORDER_LOG */
+};
+
+enum htt_sched_txq_sched_ineligibility_tlv_enum {
+ HTT_SCHED_TID_SKIP_SCHED_MASK_DISABLED = 0,
+ HTT_SCHED_TID_SKIP_NOTIFY_MPDU,
+ HTT_SCHED_TID_SKIP_MPDU_STATE_INVALID,
+ HTT_SCHED_TID_SKIP_SCHED_DISABLED,
+ HTT_SCHED_TID_SKIP_TQM_BYPASS_CMD_PENDING,
+ HTT_SCHED_TID_SKIP_SECOND_SU_SCHEDULE,
+
+ HTT_SCHED_TID_SKIP_CMD_SLOT_NOT_AVAIL,
+ HTT_SCHED_TID_SKIP_NO_ENQ,
+ HTT_SCHED_TID_SKIP_LOW_ENQ,
+ HTT_SCHED_TID_SKIP_PAUSED,
+ HTT_SCHED_TID_SKIP_UL,
+ HTT_SCHED_TID_REMOVE_PAUSED,
+ HTT_SCHED_TID_REMOVE_NO_ENQ,
+ HTT_SCHED_TID_REMOVE_UL,
+ HTT_SCHED_TID_QUERY,
+ HTT_SCHED_TID_SU_ONLY,
+ HTT_SCHED_TID_ELIGIBLE,
+ HTT_SCHED_INELIGIBILITY_MAX,
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_sched_txq_sched_ineligibility_tlv_v {
+ /* indexed by htt_sched_txq_sched_ineligibility_tlv_enum */
+ u32 sched_ineligibility[0];
+};
+
+struct htt_tx_pdev_stats_sched_per_txq_tlv {
+ u32 mac_id__txq_id__word;
+ u32 sched_policy;
+ u32 last_sched_cmd_posted_timestamp;
+ u32 last_sched_cmd_compl_timestamp;
+ u32 sched_2_tac_lwm_count;
+ u32 sched_2_tac_ring_full;
+ u32 sched_cmd_post_failure;
+ u32 num_active_tids;
+ u32 num_ps_schedules;
+ u32 sched_cmds_pending;
+ u32 num_tid_register;
+ u32 num_tid_unregister;
+ u32 num_qstats_queried;
+ u32 qstats_update_pending;
+ u32 last_qstats_query_timestamp;
+ u32 num_tqm_cmdq_full;
+ u32 num_de_sched_algo_trigger;
+ u32 num_rt_sched_algo_trigger;
+ u32 num_tqm_sched_algo_trigger;
+ u32 notify_sched;
+ u32 dur_based_sendn_term;
+};
+
+struct htt_stats_tx_sched_cmn_tlv {
+ /* BIT [ 7 : 0] :- mac_id
+ * BIT [31 : 8] :- reserved
+ */
+ u32 mac_id__word;
+ /* Current timestamp */
+ u32 current_timestamp;
+};
+
+/* == TQM STATS == */
+#define HTT_TX_TQM_MAX_GEN_MPDU_END_REASON 16
+#define HTT_TX_TQM_MAX_LIST_MPDU_END_REASON 16
+#define HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS 16
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_tqm_gen_mpdu_stats_tlv_v {
+ u32 gen_mpdu_end_reason[0]; /* HTT_TX_TQM_MAX_GEN_MPDU_END_REASON */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_tqm_list_mpdu_stats_tlv_v {
+ u32 list_mpdu_end_reason[0]; /* HTT_TX_TQM_MAX_LIST_MPDU_END_REASON */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_tqm_list_mpdu_cnt_tlv_v {
+ u32 list_mpdu_cnt_hist[0];
+ /* HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS */
+};
+
+struct htt_tx_tqm_pdev_stats_tlv_v {
+ u32 msdu_count;
+ u32 mpdu_count;
+ u32 remove_msdu;
+ u32 remove_mpdu;
+ u32 remove_msdu_ttl;
+ u32 send_bar;
+ u32 bar_sync;
+ u32 notify_mpdu;
+ u32 sync_cmd;
+ u32 write_cmd;
+ u32 hwsch_trigger;
+ u32 ack_tlv_proc;
+ u32 gen_mpdu_cmd;
+ u32 gen_list_cmd;
+ u32 remove_mpdu_cmd;
+ u32 remove_mpdu_tried_cmd;
+ u32 mpdu_queue_stats_cmd;
+ u32 mpdu_head_info_cmd;
+ u32 msdu_flow_stats_cmd;
+ u32 remove_msdu_cmd;
+ u32 remove_msdu_ttl_cmd;
+ u32 flush_cache_cmd;
+ u32 update_mpduq_cmd;
+ u32 enqueue;
+ u32 enqueue_notify;
+ u32 notify_mpdu_at_head;
+ u32 notify_mpdu_state_valid;
+ /*
+ * On receiving TQM_FLOW_NOT_EMPTY_STATUS from TQM, (on MSDUs being enqueued
+ * the flow is non empty), if the number of MSDUs is greater than the threshold,
+ * notify is incremented. UDP_THRESH counters are for UDP MSDUs, and NONUDP are
+ * for non-UDP MSDUs.
+ * MSDUQ_SWNOTIFY_UDP_THRESH1 threshold - sched_udp_notify1 is incremented
+ * MSDUQ_SWNOTIFY_UDP_THRESH2 threshold - sched_udp_notify2 is incremented
+ * MSDUQ_SWNOTIFY_NONUDP_THRESH1 threshold - sched_nonudp_notify1 is incremented
+ * MSDUQ_SWNOTIFY_NONUDP_THRESH2 threshold - sched_nonudp_notify2 is incremented
+ *
+ * Notify signifies that we trigger the scheduler.
+ */
+ u32 sched_udp_notify1;
+ u32 sched_udp_notify2;
+ u32 sched_nonudp_notify1;
+ u32 sched_nonudp_notify2;
+};
+
+struct htt_tx_tqm_cmn_stats_tlv {
+ u32 mac_id__word;
+ u32 max_cmdq_id;
+ u32 list_mpdu_cnt_hist_intvl;
+
+ /* Global stats */
+ u32 add_msdu;
+ u32 q_empty;
+ u32 q_not_empty;
+ u32 drop_notification;
+ u32 desc_threshold;
+};
+
+struct htt_tx_tqm_error_stats_tlv {
+ /* Error stats */
+ u32 q_empty_failure;
+ u32 q_not_empty_failure;
+ u32 add_msdu_failure;
+};
+
+/* == TQM CMDQ stats == */
+struct htt_tx_tqm_cmdq_status_tlv {
+ u32 mac_id__cmdq_id__word;
+ u32 sync_cmd;
+ u32 write_cmd;
+ u32 gen_mpdu_cmd;
+ u32 mpdu_queue_stats_cmd;
+ u32 mpdu_head_info_cmd;
+ u32 msdu_flow_stats_cmd;
+ u32 remove_mpdu_cmd;
+ u32 remove_msdu_cmd;
+ u32 flush_cache_cmd;
+ u32 update_mpduq_cmd;
+ u32 update_msduq_cmd;
+};
+
+/* == TX-DE STATS == */
+/* Structures for tx de stats */
+struct htt_tx_de_eapol_packets_stats_tlv {
+ u32 m1_packets;
+ u32 m2_packets;
+ u32 m3_packets;
+ u32 m4_packets;
+ u32 g1_packets;
+ u32 g2_packets;
+};
+
+struct htt_tx_de_classify_failed_stats_tlv {
+ u32 ap_bss_peer_not_found;
+ u32 ap_bcast_mcast_no_peer;
+ u32 sta_delete_in_progress;
+ u32 ibss_no_bss_peer;
+ u32 invalid_vdev_type;
+ u32 invalid_ast_peer_entry;
+ u32 peer_entry_invalid;
+ u32 ethertype_not_ip;
+ u32 eapol_lookup_failed;
+ u32 qpeer_not_allow_data;
+ u32 fse_tid_override;
+ u32 ipv6_jumbogram_zero_length;
+ u32 qos_to_non_qos_in_prog;
+};
+
+struct htt_tx_de_classify_stats_tlv {
+ u32 arp_packets;
+ u32 igmp_packets;
+ u32 dhcp_packets;
+ u32 host_inspected;
+ u32 htt_included;
+ u32 htt_valid_mcs;
+ u32 htt_valid_nss;
+ u32 htt_valid_preamble_type;
+ u32 htt_valid_chainmask;
+ u32 htt_valid_guard_interval;
+ u32 htt_valid_retries;
+ u32 htt_valid_bw_info;
+ u32 htt_valid_power;
+ u32 htt_valid_key_flags;
+ u32 htt_valid_no_encryption;
+ u32 fse_entry_count;
+ u32 fse_priority_be;
+ u32 fse_priority_high;
+ u32 fse_priority_low;
+ u32 fse_traffic_ptrn_be;
+ u32 fse_traffic_ptrn_over_sub;
+ u32 fse_traffic_ptrn_bursty;
+ u32 fse_traffic_ptrn_interactive;
+ u32 fse_traffic_ptrn_periodic;
+ u32 fse_hwqueue_alloc;
+ u32 fse_hwqueue_created;
+ u32 fse_hwqueue_send_to_host;
+ u32 mcast_entry;
+ u32 bcast_entry;
+ u32 htt_update_peer_cache;
+ u32 htt_learning_frame;
+ u32 fse_invalid_peer;
+ /*
+ * mec_notify is HTT TX WBM multicast echo check notification
+ * from firmware to host. FW sends SA addresses to host for all
+ * multicast/broadcast packets received on STA side.
+ */
+ u32 mec_notify;
+};
+
+struct htt_tx_de_classify_status_stats_tlv {
+ u32 eok;
+ u32 classify_done;
+ u32 lookup_failed;
+ u32 send_host_dhcp;
+ u32 send_host_mcast;
+ u32 send_host_unknown_dest;
+ u32 send_host;
+ u32 status_invalid;
+};
+
+struct htt_tx_de_enqueue_packets_stats_tlv {
+ u32 enqueued_pkts;
+ u32 to_tqm;
+ u32 to_tqm_bypass;
+};
+
+struct htt_tx_de_enqueue_discard_stats_tlv {
+ u32 discarded_pkts;
+ u32 local_frames;
+ u32 is_ext_msdu;
+};
+
+struct htt_tx_de_compl_stats_tlv {
+ u32 tcl_dummy_frame;
+ u32 tqm_dummy_frame;
+ u32 tqm_notify_frame;
+ u32 fw2wbm_enq;
+ u32 tqm_bypass_frame;
+};
+
+/*
+ * The htt_tx_de_fw2wbm_ring_full_hist_tlv is a histogram of time we waited
+ * for the fw2wbm ring buffer. we are requesting a buffer in FW2WBM release
+ * ring,which may fail, due to non availability of buffer. Hence we sleep for
+ * 200us & again request for it. This is a histogram of time we wait, with
+ * bin of 200ms & there are 10 bin (2 seconds max)
+ * They are defined by the following macros in FW
+ * #define ENTRIES_PER_BIN_COUNT 1000 // per bin 1000 * 200us = 200ms
+ * #define RING_FULL_BIN_ENTRIES (WAL_TX_DE_FW2WBM_ALLOC_TIMEOUT_COUNT /
+ * ENTRIES_PER_BIN_COUNT)
+ */
+struct htt_tx_de_fw2wbm_ring_full_hist_tlv {
+ u32 fw2wbm_ring_full_hist[0];
+};
+
+struct htt_tx_de_cmn_stats_tlv {
+ u32 mac_id__word;
+
+ /* Global Stats */
+ u32 tcl2fw_entry_count;
+ u32 not_to_fw;
+ u32 invalid_pdev_vdev_peer;
+ u32 tcl_res_invalid_addrx;
+ u32 wbm2fw_entry_count;
+ u32 invalid_pdev;
+};
+
+/* == RING-IF STATS == */
+#define HTT_STATS_LOW_WM_BINS 5
+#define HTT_STATS_HIGH_WM_BINS 5
+
+struct htt_ring_if_stats_tlv {
+ u32 base_addr; /* DWORD aligned base memory address of the ring */
+ u32 elem_size;
+ u32 num_elems__prefetch_tail_idx;
+ u32 head_idx__tail_idx;
+ u32 shadow_head_idx__shadow_tail_idx;
+ u32 num_tail_incr;
+ u32 lwm_thresh__hwm_thresh;
+ u32 overrun_hit_count;
+ u32 underrun_hit_count;
+ u32 prod_blockwait_count;
+ u32 cons_blockwait_count;
+ u32 low_wm_hit_count[HTT_STATS_LOW_WM_BINS];
+ u32 high_wm_hit_count[HTT_STATS_HIGH_WM_BINS];
+};
+
+struct htt_ring_if_cmn_tlv {
+ u32 mac_id__word;
+ u32 num_records;
+};
+
+/* == SFM STATS == */
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_sfm_client_user_tlv_v {
+ /* Number of DWORDS used per user and per client */
+ u32 dwords_used_by_user_n[0];
+};
+
+struct htt_sfm_client_tlv {
+ /* Client ID */
+ u32 client_id;
+ /* Minimum number of buffers */
+ u32 buf_min;
+ /* Maximum number of buffers */
+ u32 buf_max;
+ /* Number of Busy buffers */
+ u32 buf_busy;
+ /* Number of Allocated buffers */
+ u32 buf_alloc;
+ /* Number of Available/Usable buffers */
+ u32 buf_avail;
+ /* Number of users */
+ u32 num_users;
+};
+
+struct htt_sfm_cmn_tlv {
+ u32 mac_id__word;
+ /* Indicates the total number of 128 byte buffers
+ * in the CMEM that are available for buffer sharing
+ */
+ u32 buf_total;
+ /* Indicates for certain client or all the clients
+ * there is no dowrd saved in SFM, refer to SFM_R1_MEM_EMPTY
+ */
+ u32 mem_empty;
+ /* DEALLOCATE_BUFFERS, refer to register SFM_R0_DEALLOCATE_BUFFERS */
+ u32 deallocate_bufs;
+ /* Number of Records */
+ u32 num_records;
+};
+
+/* == SRNG STATS == */
+struct htt_sring_stats_tlv {
+ u32 mac_id__ring_id__arena__ep;
+ u32 base_addr_lsb; /* DWORD aligned base memory address of the ring */
+ u32 base_addr_msb;
+ u32 ring_size;
+ u32 elem_size;
+
+ u32 num_avail_words__num_valid_words;
+ u32 head_ptr__tail_ptr;
+ u32 consumer_empty__producer_full;
+ u32 prefetch_count__internal_tail_ptr;
+};
+
+struct htt_sring_cmn_tlv {
+ u32 num_records;
+};
+
+/* == PDEV TX RATE CTRL STATS == */
+#define HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS 12
+#define HTT_TX_PDEV_STATS_NUM_GI_COUNTERS 4
+#define HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS 5
+#define HTT_TX_PDEV_STATS_NUM_BW_COUNTERS 4
+#define HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS 8
+#define HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES HTT_STATS_PREAM_COUNT
+#define HTT_TX_PDEV_STATS_NUM_LEGACY_CCK_STATS 4
+#define HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS 8
+#define HTT_TX_PDEV_STATS_NUM_LTF 4
+
+#define HTT_TX_NUM_OF_SOUNDING_STATS_WORDS \
+ (HTT_TX_PDEV_STATS_NUM_BW_COUNTERS * \
+ HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS)
+
+struct htt_tx_pdev_rate_stats_tlv {
+ u32 mac_id__word;
+ u32 tx_ldpc;
+ u32 rts_cnt;
+ /* RSSI value of last ack packet (units = dB above noise floor) */
+ u32 ack_rssi;
+
+ u32 tx_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+
+ u32 tx_su_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 tx_mu_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+
+ /* element 0,1, ...7 -> NSS 1,2, ...8 */
+ u32 tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ /* element 0: 20 MHz, 1: 40 MHz, 2: 80 MHz, 3: 160 and 80+80 MHz */
+ u32 tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+ u32 tx_stbc[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 tx_pream[HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES];
+
+ /* Counters to track number of tx packets
+ * in each GI (400us, 800us, 1600us & 3200us) in each mcs (0-11)
+ */
+ u32 tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS][HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+
+ /* Counters to track packets in dcm mcs (MCS 0, 1, 3, 4) */
+ u32 tx_dcm[HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS];
+ /* Number of CTS-acknowledged RTS packets */
+ u32 rts_success;
+
+ /*
+ * Counters for legacy 11a and 11b transmissions.
+ *
+ * The index corresponds to:
+ *
+ * CCK: 0: 1 Mbps, 1: 2 Mbps, 2: 5.5 Mbps, 3: 11 Mbps
+ *
+ * OFDM: 0: 6 Mbps, 1: 9 Mbps, 2: 12 Mbps, 3: 18 Mbps,
+ * 4: 24 Mbps, 5: 36 Mbps, 6: 48 Mbps, 7: 54 Mbps
+ */
+ u32 tx_legacy_cck_rate[HTT_TX_PDEV_STATS_NUM_LEGACY_CCK_STATS];
+ u32 tx_legacy_ofdm_rate[HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS];
+
+ u32 ac_mu_mimo_tx_ldpc;
+ u32 ax_mu_mimo_tx_ldpc;
+ u32 ofdma_tx_ldpc;
+
+ /*
+ * Counters for 11ax HE LTF selection during TX.
+ *
+ * The index corresponds to:
+ *
+ * 0: unused, 1: 1x LTF, 2: 2x LTF, 3: 4x LTF
+ */
+ u32 tx_he_ltf[HTT_TX_PDEV_STATS_NUM_LTF];
+
+ u32 ac_mu_mimo_tx_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 ax_mu_mimo_tx_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 ofdma_tx_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+
+ u32 ac_mu_mimo_tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ u32 ax_mu_mimo_tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ u32 ofdma_tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+
+ u32 ac_mu_mimo_tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+ u32 ax_mu_mimo_tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+ u32 ofdma_tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+
+ u32 ac_mu_mimo_tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 ax_mu_mimo_tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 ofdma_tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+};
+
+/* == PDEV RX RATE CTRL STATS == */
+#define HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS 4
+#define HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS 8
+#define HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS 12
+#define HTT_RX_PDEV_STATS_NUM_GI_COUNTERS 4
+#define HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS 5
+#define HTT_RX_PDEV_STATS_NUM_BW_COUNTERS 4
+#define HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS 8
+#define HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES HTT_STATS_PREAM_COUNT
+#define HTT_RX_PDEV_MAX_OFDMA_NUM_USER 8
+#define HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_PER_NSS 16
+
+struct htt_rx_pdev_rate_stats_tlv {
+ u32 mac_id__word;
+ u32 nsts;
+
+ u32 rx_ldpc;
+ u32 rts_cnt;
+
+ u32 rssi_mgmt; /* units = dB above noise floor */
+ u32 rssi_data; /* units = dB above noise floor */
+ u32 rssi_comb; /* units = dB above noise floor */
+ u32 rx_mcs[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ /* element 0,1, ...7 -> NSS 1,2, ...8 */
+ u32 rx_nss[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ u32 rx_dcm[HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS];
+ u32 rx_stbc[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ /* element 0: 20 MHz, 1: 40 MHz, 2: 80 MHz, 3: 160 and 80+80 MHz */
+ u32 rx_bw[HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+ u32 rx_pream[HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES];
+ u8 rssi_chain[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+ [HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+ /* units = dB above noise floor */
+
+ /* Counters to track number of rx packets
+ * in each GI in each mcs (0-11)
+ */
+ u32 rx_gi[HTT_RX_PDEV_STATS_NUM_GI_COUNTERS][HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ s32 rssi_in_dbm; /* rx Signal Strength value in dBm unit */
+
+ u32 rx_11ax_su_ext;
+ u32 rx_11ac_mumimo;
+ u32 rx_11ax_mumimo;
+ u32 rx_11ax_ofdma;
+ u32 txbf;
+ u32 rx_legacy_cck_rate[HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS];
+ u32 rx_legacy_ofdm_rate[HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS];
+ u32 rx_active_dur_us_low;
+ u32 rx_active_dur_us_high;
+
+ u32 rx_11ax_ul_ofdma;
+
+ u32 ul_ofdma_rx_mcs[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 ul_ofdma_rx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 ul_ofdma_rx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ u32 ul_ofdma_rx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+ u32 ul_ofdma_rx_stbc;
+ u32 ul_ofdma_rx_ldpc;
+
+ /* record the stats for each user index */
+ u32 rx_ulofdma_non_data_ppdu[HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; /* ppdu level */
+ u32 rx_ulofdma_data_ppdu[HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; /* ppdu level */
+ u32 rx_ulofdma_mpdu_ok[HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; /* mpdu level */
+ u32 rx_ulofdma_mpdu_fail[HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; /* mpdu level */
+
+ u32 nss_count;
+ u32 pilot_count;
+ /* RxEVM stats in dB */
+ s32 rx_pilot_evm_db[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+ [HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_PER_NSS];
+ /* rx_pilot_evm_db_mean:
+ * EVM mean across pilots, computed as
+ * mean(10*log10(rx_pilot_evm_linear)) = mean(rx_pilot_evm_db)
+ */
+ s32 rx_pilot_evm_db_mean[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ s8 rx_ul_fd_rssi[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+ [HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; /* dBm units */
+ /* per_chain_rssi_pkt_type:
+ * This field shows what type of rx frame the per-chain RSSI was computed
+ * on, by recording the frame type and sub-type as bit-fields within this
+ * field:
+ * BIT [3 : 0] :- IEEE80211_FC0_TYPE
+ * BIT [7 : 4] :- IEEE80211_FC0_SUBTYPE
+ * BIT [31 : 8] :- Reserved
+ */
+ u32 per_chain_rssi_pkt_type;
+ s8 rx_per_chain_rssi_in_dbm[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+ [HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+};
+
+/* == RX PDEV/SOC STATS == */
+struct htt_rx_soc_fw_stats_tlv {
+ u32 fw_reo_ring_data_msdu;
+ u32 fw_to_host_data_msdu_bcmc;
+ u32 fw_to_host_data_msdu_uc;
+ u32 ofld_remote_data_buf_recycle_cnt;
+ u32 ofld_remote_free_buf_indication_cnt;
+
+ u32 ofld_buf_to_host_data_msdu_uc;
+ u32 reo_fw_ring_to_host_data_msdu_uc;
+
+ u32 wbm_sw_ring_reap;
+ u32 wbm_forward_to_host_cnt;
+ u32 wbm_target_recycle_cnt;
+
+ u32 target_refill_ring_recycle_cnt;
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_soc_fw_refill_ring_empty_tlv_v {
+ u32 refill_ring_empty_cnt[0]; /* HTT_RX_STATS_REFILL_MAX_RING */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_soc_fw_refill_ring_num_refill_tlv_v {
+ u32 refill_ring_num_refill[0]; /* HTT_RX_STATS_REFILL_MAX_RING */
+};
+
+/* RXDMA error code from WBM released packets */
+enum htt_rx_rxdma_error_code_enum {
+ HTT_RX_RXDMA_OVERFLOW_ERR = 0,
+ HTT_RX_RXDMA_MPDU_LENGTH_ERR = 1,
+ HTT_RX_RXDMA_FCS_ERR = 2,
+ HTT_RX_RXDMA_DECRYPT_ERR = 3,
+ HTT_RX_RXDMA_TKIP_MIC_ERR = 4,
+ HTT_RX_RXDMA_UNECRYPTED_ERR = 5,
+ HTT_RX_RXDMA_MSDU_LEN_ERR = 6,
+ HTT_RX_RXDMA_MSDU_LIMIT_ERR = 7,
+ HTT_RX_RXDMA_WIFI_PARSE_ERR = 8,
+ HTT_RX_RXDMA_AMSDU_PARSE_ERR = 9,
+ HTT_RX_RXDMA_SA_TIMEOUT_ERR = 10,
+ HTT_RX_RXDMA_DA_TIMEOUT_ERR = 11,
+ HTT_RX_RXDMA_FLOW_TIMEOUT_ERR = 12,
+ HTT_RX_RXDMA_FLUSH_REQUEST = 13,
+ HTT_RX_RXDMA_ERR_CODE_RVSD0 = 14,
+ HTT_RX_RXDMA_ERR_CODE_RVSD1 = 15,
+
+ /* This MAX_ERR_CODE should not be used in any host/target messages,
+ * so that even though it is defined within a host/target interface
+ * definition header file, it isn't actually part of the host/target
+ * interface, and thus can be modified.
+ */
+ HTT_RX_RXDMA_MAX_ERR_CODE
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v {
+ u32 rxdma_err[0]; /* HTT_RX_RXDMA_MAX_ERR_CODE */
+};
+
+/* REO error code from WBM released packets */
+enum htt_rx_reo_error_code_enum {
+ HTT_RX_REO_QUEUE_DESC_ADDR_ZERO = 0,
+ HTT_RX_REO_QUEUE_DESC_NOT_VALID = 1,
+ HTT_RX_AMPDU_IN_NON_BA = 2,
+ HTT_RX_NON_BA_DUPLICATE = 3,
+ HTT_RX_BA_DUPLICATE = 4,
+ HTT_RX_REGULAR_FRAME_2K_JUMP = 5,
+ HTT_RX_BAR_FRAME_2K_JUMP = 6,
+ HTT_RX_REGULAR_FRAME_OOR = 7,
+ HTT_RX_BAR_FRAME_OOR = 8,
+ HTT_RX_BAR_FRAME_NO_BA_SESSION = 9,
+ HTT_RX_BAR_FRAME_SN_EQUALS_SSN = 10,
+ HTT_RX_PN_CHECK_FAILED = 11,
+ HTT_RX_2K_ERROR_HANDLING_FLAG_SET = 12,
+ HTT_RX_PN_ERROR_HANDLING_FLAG_SET = 13,
+ HTT_RX_QUEUE_DESCRIPTOR_BLOCKED_SET = 14,
+ HTT_RX_REO_ERR_CODE_RVSD = 15,
+
+ /* This MAX_ERR_CODE should not be used in any host/target messages,
+ * so that even though it is defined within a host/target interface
+ * definition header file, it isn't actually part of the host/target
+ * interface, and thus can be modified.
+ */
+ HTT_RX_REO_MAX_ERR_CODE
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_soc_fw_refill_ring_num_reo_err_tlv_v {
+ u32 reo_err[0]; /* HTT_RX_REO_MAX_ERR_CODE */
+};
+
+/* == RX PDEV STATS == */
+#define HTT_STATS_SUBTYPE_MAX 16
+
+struct htt_rx_pdev_fw_stats_tlv {
+ u32 mac_id__word;
+ u32 ppdu_recvd;
+ u32 mpdu_cnt_fcs_ok;
+ u32 mpdu_cnt_fcs_err;
+ u32 tcp_msdu_cnt;
+ u32 tcp_ack_msdu_cnt;
+ u32 udp_msdu_cnt;
+ u32 other_msdu_cnt;
+ u32 fw_ring_mpdu_ind;
+ u32 fw_ring_mgmt_subtype[HTT_STATS_SUBTYPE_MAX];
+ u32 fw_ring_ctrl_subtype[HTT_STATS_SUBTYPE_MAX];
+ u32 fw_ring_mcast_data_msdu;
+ u32 fw_ring_bcast_data_msdu;
+ u32 fw_ring_ucast_data_msdu;
+ u32 fw_ring_null_data_msdu;
+ u32 fw_ring_mpdu_drop;
+ u32 ofld_local_data_ind_cnt;
+ u32 ofld_local_data_buf_recycle_cnt;
+ u32 drx_local_data_ind_cnt;
+ u32 drx_local_data_buf_recycle_cnt;
+ u32 local_nondata_ind_cnt;
+ u32 local_nondata_buf_recycle_cnt;
+
+ u32 fw_status_buf_ring_refill_cnt;
+ u32 fw_status_buf_ring_empty_cnt;
+ u32 fw_pkt_buf_ring_refill_cnt;
+ u32 fw_pkt_buf_ring_empty_cnt;
+ u32 fw_link_buf_ring_refill_cnt;
+ u32 fw_link_buf_ring_empty_cnt;
+
+ u32 host_pkt_buf_ring_refill_cnt;
+ u32 host_pkt_buf_ring_empty_cnt;
+ u32 mon_pkt_buf_ring_refill_cnt;
+ u32 mon_pkt_buf_ring_empty_cnt;
+ u32 mon_status_buf_ring_refill_cnt;
+ u32 mon_status_buf_ring_empty_cnt;
+ u32 mon_desc_buf_ring_refill_cnt;
+ u32 mon_desc_buf_ring_empty_cnt;
+ u32 mon_dest_ring_update_cnt;
+ u32 mon_dest_ring_full_cnt;
+
+ u32 rx_suspend_cnt;
+ u32 rx_suspend_fail_cnt;
+ u32 rx_resume_cnt;
+ u32 rx_resume_fail_cnt;
+ u32 rx_ring_switch_cnt;
+ u32 rx_ring_restore_cnt;
+ u32 rx_flush_cnt;
+ u32 rx_recovery_reset_cnt;
+};
+
+#define HTT_STATS_PHY_ERR_MAX 43
+
+struct htt_rx_pdev_fw_stats_phy_err_tlv {
+ u32 mac_id__word;
+ u32 total_phy_err_cnt;
+ /* Counts of different types of phy errs
+ * The mapping of PHY error types to phy_err array elements is HW dependent.
+ * The only currently-supported mapping is shown below:
+ *
+ * 0 phyrx_err_phy_off Reception aborted due to receiving a PHY_OFF TLV
+ * 1 phyrx_err_synth_off
+ * 2 phyrx_err_ofdma_timing
+ * 3 phyrx_err_ofdma_signal_parity
+ * 4 phyrx_err_ofdma_rate_illegal
+ * 5 phyrx_err_ofdma_length_illegal
+ * 6 phyrx_err_ofdma_restart
+ * 7 phyrx_err_ofdma_service
+ * 8 phyrx_err_ppdu_ofdma_power_drop
+ * 9 phyrx_err_cck_blokker
+ * 10 phyrx_err_cck_timing
+ * 11 phyrx_err_cck_header_crc
+ * 12 phyrx_err_cck_rate_illegal
+ * 13 phyrx_err_cck_length_illegal
+ * 14 phyrx_err_cck_restart
+ * 15 phyrx_err_cck_service
+ * 16 phyrx_err_cck_power_drop
+ * 17 phyrx_err_ht_crc_err
+ * 18 phyrx_err_ht_length_illegal
+ * 19 phyrx_err_ht_rate_illegal
+ * 20 phyrx_err_ht_zlf
+ * 21 phyrx_err_false_radar_ext
+ * 22 phyrx_err_green_field
+ * 23 phyrx_err_bw_gt_dyn_bw
+ * 24 phyrx_err_leg_ht_mismatch
+ * 25 phyrx_err_vht_crc_error
+ * 26 phyrx_err_vht_siga_unsupported
+ * 27 phyrx_err_vht_lsig_len_invalid
+ * 28 phyrx_err_vht_ndp_or_zlf
+ * 29 phyrx_err_vht_nsym_lt_zero
+ * 30 phyrx_err_vht_rx_extra_symbol_mismatch
+ * 31 phyrx_err_vht_rx_skip_group_id0
+ * 32 phyrx_err_vht_rx_skip_group_id1to62
+ * 33 phyrx_err_vht_rx_skip_group_id63
+ * 34 phyrx_err_ofdm_ldpc_decoder_disabled
+ * 35 phyrx_err_defer_nap
+ * 36 phyrx_err_fdomain_timeout
+ * 37 phyrx_err_lsig_rel_check
+ * 38 phyrx_err_bt_collision
+ * 39 phyrx_err_unsupported_mu_feedback
+ * 40 phyrx_err_ppdu_tx_interrupt_rx
+ * 41 phyrx_err_unsupported_cbf
+ * 42 phyrx_err_other
+ */
+ u32 phy_err[HTT_STATS_PHY_ERR_MAX];
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_pdev_fw_ring_mpdu_err_tlv_v {
+ /* Num error MPDU for each RxDMA error type */
+ u32 fw_ring_mpdu_err[0]; /* HTT_RX_STATS_RXDMA_MAX_ERR */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_pdev_fw_mpdu_drop_tlv_v {
+ /* Num MPDU dropped */
+ u32 fw_mpdu_drop[0]; /* HTT_RX_STATS_FW_DROP_REASON_MAX */
+};
+
+#define HTT_PDEV_CCA_STATS_TX_FRAME_INFO_PRESENT (0x1)
+#define HTT_PDEV_CCA_STATS_RX_FRAME_INFO_PRESENT (0x2)
+#define HTT_PDEV_CCA_STATS_RX_CLEAR_INFO_PRESENT (0x4)
+#define HTT_PDEV_CCA_STATS_MY_RX_FRAME_INFO_PRESENT (0x8)
+#define HTT_PDEV_CCA_STATS_USEC_CNT_INFO_PRESENT (0x10)
+#define HTT_PDEV_CCA_STATS_MED_RX_IDLE_INFO_PRESENT (0x20)
+#define HTT_PDEV_CCA_STATS_MED_TX_IDLE_GLOBAL_INFO_PRESENT (0x40)
+#define HTT_PDEV_CCA_STATS_CCA_OBBS_USEC_INFO_PRESENT (0x80)
+
+struct htt_pdev_stats_cca_counters_tlv {
+ /* Below values are obtained from the HW Cycles counter registers */
+ u32 tx_frame_usec;
+ u32 rx_frame_usec;
+ u32 rx_clear_usec;
+ u32 my_rx_frame_usec;
+ u32 usec_cnt;
+ u32 med_rx_idle_usec;
+ u32 med_tx_idle_global_usec;
+ u32 cca_obss_usec;
+};
+
+struct htt_pdev_cca_stats_hist_v1_tlv {
+ u32 chan_num;
+ /* num of CCA records (Num of htt_pdev_stats_cca_counters_tlv)*/
+ u32 num_records;
+ u32 valid_cca_counters_bitmap;
+ u32 collection_interval;
+
+ /* This will be followed by an array which contains the CCA stats
+ * collected in the last N intervals,
+ * if the indication is for last N intervals CCA stats.
+ * Then the pdev_cca_stats[0] element contains the oldest CCA stats
+ * and pdev_cca_stats[N-1] will have the most recent CCA stats.
+ * htt_pdev_stats_cca_counters_tlv cca_hist_tlv[1];
+ */
+};
+
+struct htt_pdev_stats_twt_session_tlv {
+ u32 vdev_id;
+ struct htt_mac_addr peer_mac;
+ u32 flow_id_flags;
+
+ /* TWT_DIALOG_ID_UNAVAILABLE is used
+ * when TWT session is not initiated by host
+ */
+ u32 dialog_id;
+ u32 wake_dura_us;
+ u32 wake_intvl_us;
+ u32 sp_offset_us;
+};
+
+struct htt_pdev_stats_twt_sessions_tlv {
+ u32 pdev_id;
+ u32 num_sessions;
+ struct htt_pdev_stats_twt_session_tlv twt_session[0];
+};
+
+enum htt_rx_reo_resource_sample_id_enum {
+ /* Global link descriptor queued in REO */
+ HTT_RX_REO_RESOURCE_GLOBAL_LINK_DESC_COUNT_0 = 0,
+ HTT_RX_REO_RESOURCE_GLOBAL_LINK_DESC_COUNT_1 = 1,
+ HTT_RX_REO_RESOURCE_GLOBAL_LINK_DESC_COUNT_2 = 2,
+ /*Number of queue descriptors of this aging group */
+ HTT_RX_REO_RESOURCE_BUFFERS_USED_AC0 = 3,
+ HTT_RX_REO_RESOURCE_BUFFERS_USED_AC1 = 4,
+ HTT_RX_REO_RESOURCE_BUFFERS_USED_AC2 = 5,
+ HTT_RX_REO_RESOURCE_BUFFERS_USED_AC3 = 6,
+ /* Total number of MSDUs buffered in AC */
+ HTT_RX_REO_RESOURCE_AGING_NUM_QUEUES_AC0 = 7,
+ HTT_RX_REO_RESOURCE_AGING_NUM_QUEUES_AC1 = 8,
+ HTT_RX_REO_RESOURCE_AGING_NUM_QUEUES_AC2 = 9,
+ HTT_RX_REO_RESOURCE_AGING_NUM_QUEUES_AC3 = 10,
+
+ HTT_RX_REO_RESOURCE_STATS_MAX = 16
+};
+
+struct htt_rx_reo_resource_stats_tlv_v {
+ /* Variable based on the Number of records. HTT_RX_REO_RESOURCE_STATS_MAX */
+ u32 sample_id;
+ u32 total_max;
+ u32 total_avg;
+ u32 total_sample;
+ u32 non_zeros_avg;
+ u32 non_zeros_sample;
+ u32 last_non_zeros_max;
+ u32 last_non_zeros_min;
+ u32 last_non_zeros_avg;
+ u32 last_non_zeros_sample;
+};
+
+/* == TX SOUNDING STATS == */
+
+enum htt_txbf_sound_steer_modes {
+ HTT_IMPLICIT_TXBF_STEER_STATS = 0,
+ HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS = 1,
+ HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS = 2,
+ HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS = 3,
+ HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS = 4,
+ HTT_TXBF_MAX_NUM_OF_MODES = 5
+};
+
+enum htt_stats_sounding_tx_mode {
+ HTT_TX_AC_SOUNDING_MODE = 0,
+ HTT_TX_AX_SOUNDING_MODE = 1,
+};
+
+struct htt_tx_sounding_stats_tlv {
+ u32 tx_sounding_mode; /* HTT_TX_XX_SOUNDING_MODE */
+ /* Counts number of soundings for all steering modes in each bw */
+ u32 cbf_20[HTT_TXBF_MAX_NUM_OF_MODES];
+ u32 cbf_40[HTT_TXBF_MAX_NUM_OF_MODES];
+ u32 cbf_80[HTT_TXBF_MAX_NUM_OF_MODES];
+ u32 cbf_160[HTT_TXBF_MAX_NUM_OF_MODES];
+ /*
+ * The sounding array is a 2-D array stored as an 1-D array of
+ * u32. The stats for a particular user/bw combination is
+ * referenced with the following:
+ *
+ * sounding[(user* max_bw) + bw]
+ *
+ * ... where max_bw == 4 for 160mhz
+ */
+ u32 sounding[HTT_TX_NUM_OF_SOUNDING_STATS_WORDS];
+};
+
+struct htt_pdev_obss_pd_stats_tlv {
+ u32 num_obss_tx_ppdu_success;
+ u32 num_obss_tx_ppdu_failure;
+};
+
+void ath11k_debug_htt_stats_init(struct ath11k *ar);
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.c b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
new file mode 100644
index 000000000000..743760c9bcae
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
@@ -0,0 +1,543 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/vmalloc.h>
+
+#include "core.h"
+#include "peer.h"
+#include "debug.h"
+
+void
+ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta,
+ struct ath11k_per_peer_tx_stats *peer_stats,
+ u8 legacy_rate_idx)
+{
+ struct rate_info *txrate = &arsta->txrate;
+ struct ath11k_htt_tx_stats *tx_stats;
+ int gi, mcs, bw, nss;
+
+ if (!arsta->tx_stats)
+ return;
+
+ tx_stats = arsta->tx_stats;
+ gi = FIELD_GET(RATE_INFO_FLAGS_SHORT_GI, arsta->txrate.flags);
+ mcs = txrate->mcs;
+ bw = txrate->bw;
+ nss = txrate->nss - 1;
+
+#define STATS_OP_FMT(name) tx_stats->stats[ATH11K_STATS_TYPE_##name]
+
+ if (txrate->flags & RATE_INFO_FLAGS_HE_MCS) {
+ STATS_OP_FMT(SUCC).he[0][mcs] += peer_stats->succ_bytes;
+ STATS_OP_FMT(SUCC).he[1][mcs] += peer_stats->succ_pkts;
+ STATS_OP_FMT(FAIL).he[0][mcs] += peer_stats->failed_bytes;
+ STATS_OP_FMT(FAIL).he[1][mcs] += peer_stats->failed_pkts;
+ STATS_OP_FMT(RETRY).he[0][mcs] += peer_stats->retry_bytes;
+ STATS_OP_FMT(RETRY).he[1][mcs] += peer_stats->retry_pkts;
+ } else if (txrate->flags & RATE_INFO_FLAGS_VHT_MCS) {
+ STATS_OP_FMT(SUCC).vht[0][mcs] += peer_stats->succ_bytes;
+ STATS_OP_FMT(SUCC).vht[1][mcs] += peer_stats->succ_pkts;
+ STATS_OP_FMT(FAIL).vht[0][mcs] += peer_stats->failed_bytes;
+ STATS_OP_FMT(FAIL).vht[1][mcs] += peer_stats->failed_pkts;
+ STATS_OP_FMT(RETRY).vht[0][mcs] += peer_stats->retry_bytes;
+ STATS_OP_FMT(RETRY).vht[1][mcs] += peer_stats->retry_pkts;
+ } else if (txrate->flags & RATE_INFO_FLAGS_MCS) {
+ STATS_OP_FMT(SUCC).ht[0][mcs] += peer_stats->succ_bytes;
+ STATS_OP_FMT(SUCC).ht[1][mcs] += peer_stats->succ_pkts;
+ STATS_OP_FMT(FAIL).ht[0][mcs] += peer_stats->failed_bytes;
+ STATS_OP_FMT(FAIL).ht[1][mcs] += peer_stats->failed_pkts;
+ STATS_OP_FMT(RETRY).ht[0][mcs] += peer_stats->retry_bytes;
+ STATS_OP_FMT(RETRY).ht[1][mcs] += peer_stats->retry_pkts;
+ } else {
+ mcs = legacy_rate_idx;
+
+ STATS_OP_FMT(SUCC).legacy[0][mcs] += peer_stats->succ_bytes;
+ STATS_OP_FMT(SUCC).legacy[1][mcs] += peer_stats->succ_pkts;
+ STATS_OP_FMT(FAIL).legacy[0][mcs] += peer_stats->failed_bytes;
+ STATS_OP_FMT(FAIL).legacy[1][mcs] += peer_stats->failed_pkts;
+ STATS_OP_FMT(RETRY).legacy[0][mcs] += peer_stats->retry_bytes;
+ STATS_OP_FMT(RETRY).legacy[1][mcs] += peer_stats->retry_pkts;
+ }
+
+ if (peer_stats->is_ampdu) {
+ tx_stats->ba_fails += peer_stats->ba_fails;
+
+ if (txrate->flags & RATE_INFO_FLAGS_HE_MCS) {
+ STATS_OP_FMT(AMPDU).he[0][mcs] +=
+ peer_stats->succ_bytes + peer_stats->retry_bytes;
+ STATS_OP_FMT(AMPDU).he[1][mcs] +=
+ peer_stats->succ_pkts + peer_stats->retry_pkts;
+ } else if (txrate->flags & RATE_INFO_FLAGS_MCS) {
+ STATS_OP_FMT(AMPDU).ht[0][mcs] +=
+ peer_stats->succ_bytes + peer_stats->retry_bytes;
+ STATS_OP_FMT(AMPDU).ht[1][mcs] +=
+ peer_stats->succ_pkts + peer_stats->retry_pkts;
+ } else {
+ STATS_OP_FMT(AMPDU).vht[0][mcs] +=
+ peer_stats->succ_bytes + peer_stats->retry_bytes;
+ STATS_OP_FMT(AMPDU).vht[1][mcs] +=
+ peer_stats->succ_pkts + peer_stats->retry_pkts;
+ }
+ STATS_OP_FMT(AMPDU).bw[0][bw] +=
+ peer_stats->succ_bytes + peer_stats->retry_bytes;
+ STATS_OP_FMT(AMPDU).nss[0][nss] +=
+ peer_stats->succ_bytes + peer_stats->retry_bytes;
+ STATS_OP_FMT(AMPDU).gi[0][gi] +=
+ peer_stats->succ_bytes + peer_stats->retry_bytes;
+ STATS_OP_FMT(AMPDU).bw[1][bw] +=
+ peer_stats->succ_pkts + peer_stats->retry_pkts;
+ STATS_OP_FMT(AMPDU).nss[1][nss] +=
+ peer_stats->succ_pkts + peer_stats->retry_pkts;
+ STATS_OP_FMT(AMPDU).gi[1][gi] +=
+ peer_stats->succ_pkts + peer_stats->retry_pkts;
+ } else {
+ tx_stats->ack_fails += peer_stats->ba_fails;
+ }
+
+ STATS_OP_FMT(SUCC).bw[0][bw] += peer_stats->succ_bytes;
+ STATS_OP_FMT(SUCC).nss[0][nss] += peer_stats->succ_bytes;
+ STATS_OP_FMT(SUCC).gi[0][gi] += peer_stats->succ_bytes;
+
+ STATS_OP_FMT(SUCC).bw[1][bw] += peer_stats->succ_pkts;
+ STATS_OP_FMT(SUCC).nss[1][nss] += peer_stats->succ_pkts;
+ STATS_OP_FMT(SUCC).gi[1][gi] += peer_stats->succ_pkts;
+
+ STATS_OP_FMT(FAIL).bw[0][bw] += peer_stats->failed_bytes;
+ STATS_OP_FMT(FAIL).nss[0][nss] += peer_stats->failed_bytes;
+ STATS_OP_FMT(FAIL).gi[0][gi] += peer_stats->failed_bytes;
+
+ STATS_OP_FMT(FAIL).bw[1][bw] += peer_stats->failed_pkts;
+ STATS_OP_FMT(FAIL).nss[1][nss] += peer_stats->failed_pkts;
+ STATS_OP_FMT(FAIL).gi[1][gi] += peer_stats->failed_pkts;
+
+ STATS_OP_FMT(RETRY).bw[0][bw] += peer_stats->retry_bytes;
+ STATS_OP_FMT(RETRY).nss[0][nss] += peer_stats->retry_bytes;
+ STATS_OP_FMT(RETRY).gi[0][gi] += peer_stats->retry_bytes;
+
+ STATS_OP_FMT(RETRY).bw[1][bw] += peer_stats->retry_pkts;
+ STATS_OP_FMT(RETRY).nss[1][nss] += peer_stats->retry_pkts;
+ STATS_OP_FMT(RETRY).gi[1][gi] += peer_stats->retry_pkts;
+
+ tx_stats->tx_duration += peer_stats->duration;
+}
+
+void ath11k_update_per_peer_stats_from_txcompl(struct ath11k *ar,
+ struct sk_buff *msdu,
+ struct hal_tx_status *ts)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_per_peer_tx_stats *peer_stats = &ar->cached_stats;
+ enum hal_tx_rate_stats_pkt_type pkt_type;
+ enum hal_tx_rate_stats_sgi sgi;
+ enum hal_tx_rate_stats_bw bw;
+ struct ath11k_peer *peer;
+ struct ath11k_sta *arsta;
+ struct ieee80211_sta *sta;
+ u16 rate;
+ u8 rate_idx;
+ int ret;
+ u8 mcs;
+
+ rcu_read_lock();
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_id(ab, ts->peer_id);
+ if (!peer || !peer->sta) {
+ ath11k_warn(ab, "failed to find the peer\n");
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+ return;
+ }
+
+ sta = peer->sta;
+ arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ memset(&arsta->txrate, 0, sizeof(arsta->txrate));
+ pkt_type = FIELD_GET(HAL_TX_RATE_STATS_INFO0_PKT_TYPE,
+ ts->rate_stats);
+ mcs = FIELD_GET(HAL_TX_RATE_STATS_INFO0_MCS,
+ ts->rate_stats);
+ sgi = FIELD_GET(HAL_TX_RATE_STATS_INFO0_SGI,
+ ts->rate_stats);
+ bw = FIELD_GET(HAL_TX_RATE_STATS_INFO0_BW, ts->rate_stats);
+
+ if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11A ||
+ pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11B) {
+ ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs,
+ pkt_type,
+ &rate_idx,
+ &rate);
+ if (ret < 0)
+ goto err_out;
+ arsta->txrate.legacy = rate;
+ } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11N) {
+ if (mcs > 7) {
+ ath11k_warn(ab, "Invalid HT mcs index %d\n", mcs);
+ goto err_out;
+ }
+
+ arsta->txrate.mcs = mcs + 8 * (arsta->last_txrate.nss - 1);
+ arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
+ if (sgi)
+ arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AC) {
+ if (mcs > 9) {
+ ath11k_warn(ab, "Invalid VHT mcs index %d\n", mcs);
+ goto err_out;
+ }
+
+ arsta->txrate.mcs = mcs;
+ arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
+ if (sgi)
+ arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) {
+ /* TODO */
+ }
+
+ arsta->txrate.nss = arsta->last_txrate.nss;
+ arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
+
+ ath11k_accumulate_per_peer_tx_stats(arsta, peer_stats, rate_idx);
+err_out:
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+}
+
+static ssize_t ath11k_dbg_sta_dump_tx_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ struct ath11k_htt_data_stats *stats;
+ static const char *str_name[ATH11K_STATS_TYPE_MAX] = {"succ", "fail",
+ "retry", "ampdu"};
+ static const char *str[ATH11K_COUNTER_TYPE_MAX] = {"bytes", "packets"};
+ int len = 0, i, j, k, retval = 0;
+ const int size = 2 * 4096;
+ char *buf;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ for (k = 0; k < ATH11K_STATS_TYPE_MAX; k++) {
+ for (j = 0; j < ATH11K_COUNTER_TYPE_MAX; j++) {
+ stats = &arsta->tx_stats->stats[k];
+ len += scnprintf(buf + len, size - len, "%s_%s\n",
+ str_name[k],
+ str[j]);
+ len += scnprintf(buf + len, size - len,
+ " HE MCS %s\n",
+ str[j]);
+ for (i = 0; i < ATH11K_HE_MCS_NUM; i++)
+ len += scnprintf(buf + len, size - len,
+ " %llu ",
+ stats->he[j][i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len,
+ " VHT MCS %s\n",
+ str[j]);
+ for (i = 0; i < ATH11K_VHT_MCS_NUM; i++)
+ len += scnprintf(buf + len, size - len,
+ " %llu ",
+ stats->vht[j][i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len, " HT MCS %s\n",
+ str[j]);
+ for (i = 0; i < ATH11K_HT_MCS_NUM; i++)
+ len += scnprintf(buf + len, size - len,
+ " %llu ", stats->ht[j][i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len,
+ " BW %s (20,40,80,160 MHz)\n", str[j]);
+ len += scnprintf(buf + len, size - len,
+ " %llu %llu %llu %llu\n",
+ stats->bw[j][0], stats->bw[j][1],
+ stats->bw[j][2], stats->bw[j][3]);
+ len += scnprintf(buf + len, size - len,
+ " NSS %s (1x1,2x2,3x3,4x4)\n", str[j]);
+ len += scnprintf(buf + len, size - len,
+ " %llu %llu %llu %llu\n",
+ stats->nss[j][0], stats->nss[j][1],
+ stats->nss[j][2], stats->nss[j][3]);
+ len += scnprintf(buf + len, size - len,
+ " GI %s (0.4us,0.8us,1.6us,3.2us)\n",
+ str[j]);
+ len += scnprintf(buf + len, size - len,
+ " %llu %llu %llu %llu\n",
+ stats->gi[j][0], stats->gi[j][1],
+ stats->gi[j][2], stats->gi[j][3]);
+ len += scnprintf(buf + len, size - len,
+ " legacy rate %s (1,2 ... Mbps)\n ",
+ str[j]);
+ for (i = 0; i < ATH11K_LEGACY_NUM; i++)
+ len += scnprintf(buf + len, size - len, "%llu ",
+ stats->legacy[j][i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ }
+ }
+
+ len += scnprintf(buf + len, size - len,
+ "\nTX duration\n %llu usecs\n",
+ arsta->tx_stats->tx_duration);
+ len += scnprintf(buf + len, size - len,
+ "BA fails\n %llu\n", arsta->tx_stats->ba_fails);
+ len += scnprintf(buf + len, size - len,
+ "ack fails\n %llu\n", arsta->tx_stats->ack_fails);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (len > size)
+ len = size;
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ mutex_unlock(&ar->conf_mutex);
+ return retval;
+}
+
+static const struct file_operations fops_tx_stats = {
+ .read = ath11k_dbg_sta_dump_tx_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_dump_rx_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats;
+ int len = 0, i, retval = 0;
+ const int size = 4096;
+ char *buf;
+
+ if (!rx_stats)
+ return -ENOENT;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&ar->conf_mutex);
+ spin_lock_bh(&ar->ab->base_lock);
+
+ len += scnprintf(buf + len, size - len, "RX peer stats:\n");
+ len += scnprintf(buf + len, size - len, "Num of MSDUs: %llu\n",
+ rx_stats->num_msdu);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs with TCP L4: %llu\n",
+ rx_stats->tcp_msdu_count);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs with UDP L4: %llu\n",
+ rx_stats->udp_msdu_count);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs part of AMPDU: %llu\n",
+ rx_stats->ampdu_msdu_count);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs not part of AMPDU: %llu\n",
+ rx_stats->non_ampdu_msdu_count);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs using STBC: %llu\n",
+ rx_stats->stbc_count);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs beamformed: %llu\n",
+ rx_stats->beamformed_count);
+ len += scnprintf(buf + len, size - len, "Num of MPDUs with FCS ok: %llu\n",
+ rx_stats->num_mpdu_fcs_ok);
+ len += scnprintf(buf + len, size - len, "Num of MPDUs with FCS error: %llu\n",
+ rx_stats->num_mpdu_fcs_err);
+ len += scnprintf(buf + len, size - len,
+ "GI: 0.8us %llu 0.4us %llu 1.6us %llu 3.2us %llu\n",
+ rx_stats->gi_count[0], rx_stats->gi_count[1],
+ rx_stats->gi_count[2], rx_stats->gi_count[3]);
+ len += scnprintf(buf + len, size - len,
+ "BW: 20Mhz %llu 40Mhz %llu 80Mhz %llu 160Mhz %llu\n",
+ rx_stats->bw_count[0], rx_stats->bw_count[1],
+ rx_stats->bw_count[2], rx_stats->bw_count[3]);
+ len += scnprintf(buf + len, size - len, "BCC %llu LDPC %llu\n",
+ rx_stats->coding_count[0], rx_stats->coding_count[1]);
+ len += scnprintf(buf + len, size - len,
+ "preamble: 11A %llu 11B %llu 11N %llu 11AC %llu 11AX %llu\n",
+ rx_stats->pream_cnt[0], rx_stats->pream_cnt[1],
+ rx_stats->pream_cnt[2], rx_stats->pream_cnt[3],
+ rx_stats->pream_cnt[4]);
+ len += scnprintf(buf + len, size - len,
+ "reception type: SU %llu MU_MIMO %llu MU_OFDMA %llu MU_OFDMA_MIMO %llu\n",
+ rx_stats->reception_type[0], rx_stats->reception_type[1],
+ rx_stats->reception_type[2], rx_stats->reception_type[3]);
+ len += scnprintf(buf + len, size - len, "TID(0-15) Legacy TID(16):");
+ for (i = 0; i <= IEEE80211_NUM_TIDS; i++)
+ len += scnprintf(buf + len, size - len, "%llu ", rx_stats->tid_count[i]);
+ len += scnprintf(buf + len, size - len, "\nMCS(0-11) Legacy MCS(12):");
+ for (i = 0; i < HAL_RX_MAX_MCS + 1; i++)
+ len += scnprintf(buf + len, size - len, "%llu ", rx_stats->mcs_count[i]);
+ len += scnprintf(buf + len, size - len, "\nNSS(1-8):");
+ for (i = 0; i < HAL_RX_MAX_NSS; i++)
+ len += scnprintf(buf + len, size - len, "%llu ", rx_stats->nss_count[i]);
+ len += scnprintf(buf + len, size - len, "\nRX Duration:%llu ",
+ rx_stats->rx_duration);
+ len += scnprintf(buf + len, size - len, "\n");
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ if (len > size)
+ len = size;
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ mutex_unlock(&ar->conf_mutex);
+ return retval;
+}
+
+static const struct file_operations fops_rx_stats = {
+ .read = ath11k_dbg_sta_dump_rx_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int
+ath11k_dbg_sta_open_htt_peer_stats(struct inode *inode, struct file *file)
+{
+ struct ieee80211_sta *sta = inode->i_private;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ struct debug_htt_stats_req *stats_req;
+ int ret;
+
+ stats_req = vzalloc(sizeof(*stats_req) + ATH11K_HTT_STATS_BUF_SIZE);
+ if (!stats_req)
+ return -ENOMEM;
+
+ mutex_lock(&ar->conf_mutex);
+ ar->debug.htt_stats.stats_req = stats_req;
+ stats_req->type = ATH11K_DBG_HTT_EXT_STATS_PEER_INFO;
+ memcpy(stats_req->peer_addr, sta->addr, ETH_ALEN);
+ ret = ath11k_dbg_htt_stats_req(ar);
+ mutex_unlock(&ar->conf_mutex);
+ if (ret < 0)
+ goto out;
+
+ file->private_data = stats_req;
+ return 0;
+out:
+ vfree(stats_req);
+ return ret;
+}
+
+static int
+ath11k_dbg_sta_release_htt_peer_stats(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+ return 0;
+}
+
+static ssize_t ath11k_dbg_sta_read_htt_peer_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct debug_htt_stats_req *stats_req = file->private_data;
+ char *buf;
+ u32 length = 0;
+
+ buf = stats_req->buf;
+ length = min_t(u32, stats_req->buf_len, ATH11K_HTT_STATS_BUF_SIZE);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, length);
+}
+
+static const struct file_operations fops_htt_peer_stats = {
+ .open = ath11k_dbg_sta_open_htt_peer_stats,
+ .release = ath11k_dbg_sta_release_htt_peer_stats,
+ .read = ath11k_dbg_sta_read_htt_peer_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_write_peer_pktlog(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ int ret, enable;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ ret = kstrtoint_from_user(buf, count, 0, &enable);
+ if (ret)
+ goto out;
+
+ ar->debug.pktlog_peer_valid = enable;
+ memcpy(ar->debug.pktlog_peer_addr, sta->addr, ETH_ALEN);
+
+ /* Send peer based pktlog enable/disable */
+ ret = ath11k_wmi_pdev_peer_pktlog_filter(ar, sta->addr, enable);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set peer pktlog filter %pM: %d\n",
+ sta->addr, ret);
+ goto out;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "peer pktlog filter set to %d\n",
+ enable);
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath11k_dbg_sta_read_peer_pktlog(struct file *file,
+ char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ char buf[32] = {0};
+ int len;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf), "%08x %pM\n",
+ ar->debug.pktlog_peer_valid,
+ ar->debug.pktlog_peer_addr);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_peer_pktlog = {
+ .write = ath11k_dbg_sta_write_peer_pktlog,
+ .read = ath11k_dbg_sta_read_peer_pktlog,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath11k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct dentry *dir)
+{
+ struct ath11k *ar = hw->priv;
+
+ if (ath11k_debug_is_extd_tx_stats_enabled(ar))
+ debugfs_create_file("tx_stats", 0400, dir, sta,
+ &fops_tx_stats);
+ if (ath11k_debug_is_extd_rx_stats_enabled(ar))
+ debugfs_create_file("rx_stats", 0400, dir, sta,
+ &fops_rx_stats);
+
+ debugfs_create_file("htt_peer_stats", 0400, dir, sta,
+ &fops_htt_peer_stats);
+
+ debugfs_create_file("peer_pktlog", 0644, dir, sta,
+ &fops_peer_pktlog);
+}
diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
new file mode 100644
index 000000000000..b112825a52ed
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/dp.c
@@ -0,0 +1,899 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include "core.h"
+#include "dp_tx.h"
+#include "hal_tx.h"
+#include "debug.h"
+#include "dp_rx.h"
+#include "peer.h"
+
+static void ath11k_dp_htt_htc_tx_complete(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ dev_kfree_skb_any(skb);
+}
+
+void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_peer *peer;
+
+ /* TODO: Any other peer specific DP cleanup */
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find(ab, vdev_id, addr);
+ if (!peer) {
+ ath11k_warn(ab, "failed to lookup peer %pM on vdev %d\n",
+ addr, vdev_id);
+ spin_unlock_bh(&ab->base_lock);
+ return;
+ }
+
+ ath11k_peer_rx_tid_cleanup(ar, peer);
+ spin_unlock_bh(&ab->base_lock);
+}
+
+int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr)
+{
+ struct ath11k_base *ab = ar->ab;
+ u32 reo_dest;
+ int ret;
+
+ /* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
+ reo_dest = ar->dp.mac_id + 1;
+ ret = ath11k_wmi_set_peer_param(ar, addr, vdev_id,
+ WMI_PEER_SET_DEFAULT_ROUTING,
+ DP_RX_HASH_ENABLE | (reo_dest << 1));
+
+ if (ret) {
+ ath11k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n",
+ ret, addr, vdev_id);
+ return ret;
+ }
+
+ ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id,
+ HAL_DESC_REO_NON_QOS_TID, 1, 0);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup rxd tid queue for non-qos tid %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, 0, 1, 0);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup rxd tid queue for tid 0 %d\n",
+ ret);
+ return ret;
+ }
+
+ /* TODO: Setup other peer specific resource used in data path */
+
+ return 0;
+}
+
+void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
+{
+ if (!ring->vaddr_unaligned)
+ return;
+
+ dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
+ ring->paddr_unaligned);
+
+ ring->vaddr_unaligned = NULL;
+}
+
+int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
+ enum hal_ring_type type, int ring_num,
+ int mac_id, int num_entries)
+{
+ struct hal_srng_params params = { 0 };
+ int entry_sz = ath11k_hal_srng_get_entrysize(type);
+ int max_entries = ath11k_hal_srng_get_max_entries(type);
+ int ret;
+
+ if (max_entries < 0 || entry_sz < 0)
+ return -EINVAL;
+
+ if (num_entries > max_entries)
+ num_entries = max_entries;
+
+ ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
+ ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
+ &ring->paddr_unaligned,
+ GFP_KERNEL);
+ if (!ring->vaddr_unaligned)
+ return -ENOMEM;
+
+ ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN);
+ ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
+ (unsigned long)ring->vaddr_unaligned);
+
+ params.ring_base_vaddr = ring->vaddr;
+ params.ring_base_paddr = ring->paddr;
+ params.num_entries = num_entries;
+
+ switch (type) {
+ case HAL_REO_DST:
+ params.intr_batch_cntr_thres_entries =
+ HAL_SRNG_INT_BATCH_THRESHOLD_RX;
+ params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
+ break;
+ case HAL_RXDMA_BUF:
+ case HAL_RXDMA_MONITOR_BUF:
+ case HAL_RXDMA_MONITOR_STATUS:
+ params.low_threshold = num_entries >> 3;
+ params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
+ params.intr_batch_cntr_thres_entries = 0;
+ params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
+ break;
+ case HAL_WBM2SW_RELEASE:
+ if (ring_num < 3) {
+ params.intr_batch_cntr_thres_entries =
+ HAL_SRNG_INT_BATCH_THRESHOLD_TX;
+ params.intr_timer_thres_us =
+ HAL_SRNG_INT_TIMER_THRESHOLD_TX;
+ break;
+ }
+ /* follow through when ring_num >= 3 */
+ /* fall through */
+ case HAL_REO_EXCEPTION:
+ case HAL_REO_REINJECT:
+ case HAL_REO_CMD:
+ case HAL_REO_STATUS:
+ case HAL_TCL_DATA:
+ case HAL_TCL_CMD:
+ case HAL_TCL_STATUS:
+ case HAL_WBM_IDLE_LINK:
+ case HAL_SW2WBM_RELEASE:
+ case HAL_RXDMA_DST:
+ case HAL_RXDMA_MONITOR_DST:
+ case HAL_RXDMA_MONITOR_DESC:
+ case HAL_RXDMA_DIR_BUF:
+ params.intr_batch_cntr_thres_entries =
+ HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
+ params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
+ break;
+ default:
+ ath11k_warn(ab, "Not a valid ring type in dp :%d\n", type);
+ return -EINVAL;
+ }
+
+ ret = ath11k_hal_srng_setup(ab, type, ring_num, mac_id, &params);
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
+ ret, ring_num);
+ return ret;
+ }
+
+ ring->ring_id = ret;
+
+ return 0;
+}
+
+static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ int i;
+
+ ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
+ for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
+ ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
+ }
+ ath11k_dp_srng_cleanup(ab, &dp->reo_reinject_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->rx_rel_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->reo_except_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->reo_cmd_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->reo_status_ring);
+}
+
+static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct hal_srng *srng;
+ int i, ret;
+
+ ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
+ HAL_SW2WBM_RELEASE, 0, 0,
+ DP_WBM_RELEASE_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up wbm2sw_release ring :%d\n",
+ ret);
+ goto err;
+ }
+
+ ret = ath11k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0,
+ DP_TCL_CMD_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret);
+ goto err;
+ }
+
+ ret = ath11k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS,
+ 0, 0, DP_TCL_STATUS_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up tcl_status ring :%d\n", ret);
+ goto err;
+ }
+
+ for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
+ ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
+ HAL_TCL_DATA, i, 0,
+ DP_TCL_DATA_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
+ i, ret);
+ goto err;
+ }
+
+ ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
+ HAL_WBM2SW_RELEASE, i, 0,
+ DP_TX_COMP_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up tcl_comp ring ring (%d) :%d\n",
+ i, ret);
+ goto err;
+ }
+
+ srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id];
+ ath11k_hal_tx_init_data_ring(ab, srng);
+ }
+
+ ret = ath11k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
+ 0, 0, DP_REO_REINJECT_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up reo_reinject ring :%d\n",
+ ret);
+ goto err;
+ }
+
+ ret = ath11k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
+ 3, 0, DP_RX_RELEASE_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
+ goto err;
+ }
+
+ ret = ath11k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION,
+ 0, 0, DP_REO_EXCEPTION_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up reo_exception ring :%d\n",
+ ret);
+ goto err;
+ }
+
+ ret = ath11k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD,
+ 0, 0, DP_REO_CMD_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret);
+ goto err;
+ }
+
+ srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
+ ath11k_hal_reo_init_cmd_ring(ab, srng);
+
+ ret = ath11k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
+ 0, 0, DP_REO_STATUS_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up reo_status ring :%d\n", ret);
+ goto err;
+ }
+
+ ath11k_hal_reo_hw_setup(ab);
+
+ return 0;
+
+err:
+ ath11k_dp_srng_common_cleanup(ab);
+
+ return ret;
+}
+
+static void ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
+ int i;
+
+ for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
+ if (!slist[i].vaddr)
+ continue;
+
+ dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
+ slist[i].vaddr, slist[i].paddr);
+ slist[i].vaddr = NULL;
+ }
+}
+
+static int ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base *ab,
+ int size,
+ u32 n_link_desc_bank,
+ u32 n_link_desc,
+ u32 last_bank_sz)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
+ struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
+ u32 n_entries_per_buf;
+ int num_scatter_buf, scatter_idx;
+ struct hal_wbm_link_desc *scatter_buf;
+ int align_bytes, n_entries;
+ dma_addr_t paddr;
+ int rem_entries;
+ int i;
+ int ret = 0;
+ u32 end_offset;
+
+ n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
+ ath11k_hal_srng_get_entrysize(HAL_WBM_IDLE_LINK);
+ num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
+
+ if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
+ return -EINVAL;
+
+ for (i = 0; i < num_scatter_buf; i++) {
+ slist[i].vaddr = dma_alloc_coherent(ab->dev,
+ HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
+ &slist[i].paddr, GFP_KERNEL);
+ if (!slist[i].vaddr) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ }
+
+ scatter_idx = 0;
+ scatter_buf = slist[scatter_idx].vaddr;
+ rem_entries = n_entries_per_buf;
+
+ for (i = 0; i < n_link_desc_bank; i++) {
+ align_bytes = link_desc_banks[i].vaddr -
+ link_desc_banks[i].vaddr_unaligned;
+ n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) /
+ HAL_LINK_DESC_SIZE;
+ paddr = link_desc_banks[i].paddr;
+ while (n_entries) {
+ ath11k_hal_set_link_desc_addr(scatter_buf, i, paddr);
+ n_entries--;
+ paddr += HAL_LINK_DESC_SIZE;
+ if (rem_entries) {
+ rem_entries--;
+ scatter_buf++;
+ continue;
+ }
+
+ rem_entries = n_entries_per_buf;
+ scatter_idx++;
+ scatter_buf = slist[scatter_idx].vaddr;
+ }
+ }
+
+ end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
+ sizeof(struct hal_wbm_link_desc);
+ ath11k_hal_setup_link_idle_list(ab, slist, num_scatter_buf,
+ n_link_desc, end_offset);
+
+ return 0;
+
+err:
+ ath11k_dp_scatter_idle_link_desc_cleanup(ab);
+
+ return ret;
+}
+
+static void
+ath11k_dp_link_desc_bank_free(struct ath11k_base *ab,
+ struct dp_link_desc_bank *link_desc_banks)
+{
+ int i;
+
+ for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
+ if (link_desc_banks[i].vaddr_unaligned) {
+ dma_free_coherent(ab->dev,
+ link_desc_banks[i].size,
+ link_desc_banks[i].vaddr_unaligned,
+ link_desc_banks[i].paddr_unaligned);
+ link_desc_banks[i].vaddr_unaligned = NULL;
+ }
+ }
+}
+
+static int ath11k_dp_link_desc_bank_alloc(struct ath11k_base *ab,
+ struct dp_link_desc_bank *desc_bank,
+ int n_link_desc_bank,
+ int last_bank_sz)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ int i;
+ int ret = 0;
+ int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
+
+ for (i = 0; i < n_link_desc_bank; i++) {
+ if (i == (n_link_desc_bank - 1) && last_bank_sz)
+ desc_sz = last_bank_sz;
+
+ desc_bank[i].vaddr_unaligned =
+ dma_alloc_coherent(ab->dev, desc_sz,
+ &desc_bank[i].paddr_unaligned,
+ GFP_KERNEL);
+ if (!desc_bank[i].vaddr_unaligned) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned,
+ HAL_LINK_DESC_ALIGN);
+ desc_bank[i].paddr = desc_bank[i].paddr_unaligned +
+ ((unsigned long)desc_bank[i].vaddr -
+ (unsigned long)desc_bank[i].vaddr_unaligned);
+ desc_bank[i].size = desc_sz;
+ }
+
+ return 0;
+
+err:
+ ath11k_dp_link_desc_bank_free(ab, dp->link_desc_banks);
+
+ return ret;
+}
+
+void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab,
+ struct dp_link_desc_bank *desc_bank,
+ u32 ring_type, struct dp_srng *ring)
+{
+ ath11k_dp_link_desc_bank_free(ab, desc_bank);
+
+ if (ring_type != HAL_RXDMA_MONITOR_DESC) {
+ ath11k_dp_srng_cleanup(ab, ring);
+ ath11k_dp_scatter_idle_link_desc_cleanup(ab);
+ }
+}
+
+static int ath11k_wbm_idle_ring_setup(struct ath11k_base *ab, u32 *n_link_desc)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ u32 n_mpdu_link_desc, n_mpdu_queue_desc;
+ u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc;
+ int ret = 0;
+
+ n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
+ HAL_NUM_MPDUS_PER_LINK_DESC;
+
+ n_mpdu_queue_desc = n_mpdu_link_desc /
+ HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
+
+ n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
+ DP_AVG_MSDUS_PER_FLOW) /
+ HAL_NUM_TX_MSDUS_PER_LINK_DESC;
+
+ n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
+ DP_AVG_MSDUS_PER_MPDU) /
+ HAL_NUM_RX_MSDUS_PER_LINK_DESC;
+
+ *n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
+ n_tx_msdu_link_desc + n_rx_msdu_link_desc;
+
+ if (*n_link_desc & (*n_link_desc - 1))
+ *n_link_desc = 1 << fls(*n_link_desc);
+
+ ret = ath11k_dp_srng_setup(ab, &dp->wbm_idle_ring,
+ HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
+ return ret;
+ }
+ return ret;
+}
+
+int ath11k_dp_link_desc_setup(struct ath11k_base *ab,
+ struct dp_link_desc_bank *link_desc_banks,
+ u32 ring_type, struct hal_srng *srng,
+ u32 n_link_desc)
+{
+ u32 tot_mem_sz;
+ u32 n_link_desc_bank, last_bank_sz;
+ u32 entry_sz, align_bytes, n_entries;
+ u32 paddr;
+ u32 *desc;
+ int i, ret;
+
+ tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
+ tot_mem_sz += HAL_LINK_DESC_ALIGN;
+
+ if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
+ n_link_desc_bank = 1;
+ last_bank_sz = tot_mem_sz;
+ } else {
+ n_link_desc_bank = tot_mem_sz /
+ (DP_LINK_DESC_ALLOC_SIZE_THRESH -
+ HAL_LINK_DESC_ALIGN);
+ last_bank_sz = tot_mem_sz %
+ (DP_LINK_DESC_ALLOC_SIZE_THRESH -
+ HAL_LINK_DESC_ALIGN);
+
+ if (last_bank_sz)
+ n_link_desc_bank += 1;
+ }
+
+ if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
+ return -EINVAL;
+
+ ret = ath11k_dp_link_desc_bank_alloc(ab, link_desc_banks,
+ n_link_desc_bank, last_bank_sz);
+ if (ret)
+ return ret;
+
+ /* Setup link desc idle list for HW internal usage */
+ entry_sz = ath11k_hal_srng_get_entrysize(ring_type);
+ tot_mem_sz = entry_sz * n_link_desc;
+
+ /* Setup scatter desc list when the total memory requirement is more */
+ if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
+ ring_type != HAL_RXDMA_MONITOR_DESC) {
+ ret = ath11k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz,
+ n_link_desc_bank,
+ n_link_desc,
+ last_bank_sz);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup scatting idle list descriptor :%d\n",
+ ret);
+ goto fail_desc_bank_free;
+ }
+
+ return 0;
+ }
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ for (i = 0; i < n_link_desc_bank; i++) {
+ align_bytes = link_desc_banks[i].vaddr -
+ link_desc_banks[i].vaddr_unaligned;
+ n_entries = (link_desc_banks[i].size - align_bytes) /
+ HAL_LINK_DESC_SIZE;
+ paddr = link_desc_banks[i].paddr;
+ while (n_entries &&
+ (desc = ath11k_hal_srng_src_get_next_entry(ab, srng))) {
+ ath11k_hal_set_link_desc_addr((struct hal_wbm_link_desc *)desc,
+ i, paddr);
+ n_entries--;
+ paddr += HAL_LINK_DESC_SIZE;
+ }
+ }
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return 0;
+
+fail_desc_bank_free:
+ ath11k_dp_link_desc_bank_free(ab, link_desc_banks);
+
+ return ret;
+}
+
+int ath11k_dp_service_srng(struct ath11k_base *ab,
+ struct ath11k_ext_irq_grp *irq_grp,
+ int budget)
+{
+ struct napi_struct *napi = &irq_grp->napi;
+ int grp_id = irq_grp->grp_id;
+ int work_done = 0;
+ int i = 0;
+ int tot_work_done = 0;
+
+ while (ath11k_tx_ring_mask[grp_id] >> i) {
+ if (ath11k_tx_ring_mask[grp_id] & BIT(i))
+ ath11k_dp_tx_completion_handler(ab, i);
+ i++;
+ }
+
+ if (ath11k_rx_err_ring_mask[grp_id]) {
+ work_done = ath11k_dp_process_rx_err(ab, napi, budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+ if (budget <= 0)
+ goto done;
+ }
+
+ if (ath11k_rx_wbm_rel_ring_mask[grp_id]) {
+ work_done = ath11k_dp_rx_process_wbm_err(ab,
+ napi,
+ budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+
+ if (budget <= 0)
+ goto done;
+ }
+
+ if (ath11k_rx_ring_mask[grp_id]) {
+ for (i = 0; i < ab->num_radios; i++) {
+ if (ath11k_rx_ring_mask[grp_id] & BIT(i)) {
+ work_done = ath11k_dp_process_rx(ab, i, napi,
+ &irq_grp->pending_q,
+ budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+ }
+ if (budget <= 0)
+ goto done;
+ }
+ }
+
+ if (rx_mon_status_ring_mask[grp_id]) {
+ for (i = 0; i < ab->num_radios; i++) {
+ if (rx_mon_status_ring_mask[grp_id] & BIT(i)) {
+ work_done =
+ ath11k_dp_rx_process_mon_rings(ab,
+ i, napi,
+ budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+ }
+ if (budget <= 0)
+ goto done;
+ }
+ }
+
+ if (ath11k_reo_status_ring_mask[grp_id])
+ ath11k_dp_process_reo_status(ab);
+
+ for (i = 0; i < ab->num_radios; i++) {
+ if (ath11k_rxdma2host_ring_mask[grp_id] & BIT(i)) {
+ work_done = ath11k_dp_process_rxdma_err(ab, i, budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+ }
+
+ if (budget <= 0)
+ goto done;
+
+ if (ath11k_host2rxdma_ring_mask[grp_id] & BIT(i)) {
+ struct ath11k_pdev_dp *dp = &ab->pdevs[i].ar->dp;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+
+ ath11k_dp_rxbufs_replenish(ab, i, rx_ring, 0,
+ HAL_RX_BUF_RBM_SW3_BM,
+ GFP_ATOMIC);
+ }
+ }
+ /* TODO: Implement handler for other interrupts */
+
+done:
+ return tot_work_done;
+}
+
+void ath11k_dp_pdev_free(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ ath11k_dp_rx_pdev_free(ab, i);
+ ath11k_debug_unregister(ar);
+ ath11k_dp_rx_pdev_mon_detach(ar);
+ }
+}
+
+void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev_dp *dp;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ dp = &ar->dp;
+ dp->mac_id = i;
+ idr_init(&dp->rx_refill_buf_ring.bufs_idr);
+ spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);
+ atomic_set(&dp->num_tx_pending, 0);
+ init_waitqueue_head(&dp->tx_empty_waitq);
+ idr_init(&dp->rx_mon_status_refill_ring.bufs_idr);
+ spin_lock_init(&dp->rx_mon_status_refill_ring.idr_lock);
+ idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
+ spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
+ }
+}
+
+int ath11k_dp_pdev_alloc(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ int ret;
+ int i;
+
+ /* TODO:Per-pdev rx ring unlike tx ring which is mapped to different AC's */
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ ret = ath11k_dp_rx_pdev_alloc(ab, i);
+ if (ret) {
+ ath11k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n",
+ i);
+ goto err;
+ }
+ ret = ath11k_dp_rx_pdev_mon_attach(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to initialize mon pdev %d\n",
+ i);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ ath11k_dp_pdev_free(ab);
+
+ return ret;
+}
+
+int ath11k_dp_htt_connect(struct ath11k_dp *dp)
+{
+ struct ath11k_htc_svc_conn_req conn_req;
+ struct ath11k_htc_svc_conn_resp conn_resp;
+ int status;
+
+ memset(&conn_req, 0, sizeof(conn_req));
+ memset(&conn_resp, 0, sizeof(conn_resp));
+
+ conn_req.ep_ops.ep_tx_complete = ath11k_dp_htt_htc_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath11k_dp_htt_htc_t2h_msg_handler;
+
+ /* connect to control service */
+ conn_req.service_id = ATH11K_HTC_SVC_ID_HTT_DATA_MSG;
+
+ status = ath11k_htc_connect_service(&dp->ab->htc, &conn_req,
+ &conn_resp);
+
+ if (status)
+ return status;
+
+ dp->eid = conn_resp.eid;
+
+ return 0;
+}
+
+static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif)
+{
+ /* For STA mode, enable address search index,
+ * tcl uses ast_hash value in the descriptor.
+ */
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_STA:
+ arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
+ arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
+ break;
+ case WMI_VDEV_TYPE_AP:
+ case WMI_VDEV_TYPE_IBSS:
+ arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
+ arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
+ break;
+ case WMI_VDEV_TYPE_MONITOR:
+ default:
+ return;
+ }
+}
+
+void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif)
+{
+ arvif->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 1) |
+ FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID,
+ arvif->vdev_id) |
+ FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID,
+ ar->pdev->pdev_id);
+
+ /* set HTT extension valid bit to 0 by default */
+ arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
+
+ ath11k_dp_update_vdev_search(arvif);
+}
+
+static int ath11k_dp_tx_pending_cleanup(int buf_id, void *skb, void *ctx)
+{
+ struct ath11k_base *ab = (struct ath11k_base *)ctx;
+ struct sk_buff *msdu = skb;
+
+ dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len,
+ DMA_TO_DEVICE);
+
+ dev_kfree_skb_any(msdu);
+
+ return 0;
+}
+
+void ath11k_dp_free(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ int i;
+
+ ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
+ HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
+
+ ath11k_dp_srng_common_cleanup(ab);
+
+ ath11k_dp_reo_cmd_list_cleanup(ab);
+
+ for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
+ spin_lock_bh(&dp->tx_ring[i].tx_idr_lock);
+ idr_for_each(&dp->tx_ring[i].txbuf_idr,
+ ath11k_dp_tx_pending_cleanup, ab);
+ idr_destroy(&dp->tx_ring[i].txbuf_idr);
+ spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock);
+ kfree(dp->tx_ring[i].tx_status);
+ }
+
+ /* Deinit any SOC level resource */
+}
+
+int ath11k_dp_alloc(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct hal_srng *srng = NULL;
+ size_t size = 0;
+ u32 n_link_desc = 0;
+ int ret;
+ int i;
+
+ dp->ab = ab;
+
+ INIT_LIST_HEAD(&dp->reo_cmd_list);
+ INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
+ spin_lock_init(&dp->reo_cmd_lock);
+
+ ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
+ return ret;
+ }
+
+ srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id];
+
+ ret = ath11k_dp_link_desc_setup(ab, dp->link_desc_banks,
+ HAL_WBM_IDLE_LINK, srng, n_link_desc);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup link desc: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_dp_srng_common_setup(ab);
+ if (ret)
+ goto fail_link_desc_cleanup;
+
+ size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE;
+
+ for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
+ idr_init(&dp->tx_ring[i].txbuf_idr);
+ spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
+ dp->tx_ring[i].tcl_data_ring_id = i;
+
+ dp->tx_ring[i].tx_status_head = 0;
+ dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
+ dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
+ if (!dp->tx_ring[i].tx_status)
+ goto fail_cmn_srng_cleanup;
+ }
+
+ for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
+ ath11k_hal_tx_set_dscp_tid_map(ab, i);
+
+ /* Init any SOC level resource for DP */
+
+ return 0;
+
+fail_cmn_srng_cleanup:
+ ath11k_dp_srng_common_cleanup(ab);
+
+fail_link_desc_cleanup:
+ ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
+ HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
new file mode 100644
index 000000000000..6ef5be4201b2
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/dp.h
@@ -0,0 +1,1535 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_DP_H
+#define ATH11K_DP_H
+
+#include "hal_rx.h"
+
+struct ath11k_base;
+struct ath11k_peer;
+struct ath11k_dp;
+struct ath11k_vif;
+struct hal_tcl_status_ring;
+struct ath11k_ext_irq_grp;
+
+struct dp_rx_tid {
+ u8 tid;
+ u32 *vaddr;
+ dma_addr_t paddr;
+ u32 size;
+ u32 ba_win_sz;
+ bool active;
+};
+
+#define DP_REO_DESC_FREE_TIMEOUT_MS 1000
+
+struct dp_reo_cache_flush_elem {
+ struct list_head list;
+ struct dp_rx_tid data;
+ unsigned long ts;
+};
+
+struct dp_reo_cmd {
+ struct list_head list;
+ struct dp_rx_tid data;
+ int cmd_num;
+ void (*handler)(struct ath11k_dp *, void *,
+ enum hal_reo_cmd_status status);
+};
+
+struct dp_srng {
+ u32 *vaddr_unaligned;
+ u32 *vaddr;
+ dma_addr_t paddr_unaligned;
+ dma_addr_t paddr;
+ int size;
+ u32 ring_id;
+};
+
+struct dp_rxdma_ring {
+ struct dp_srng refill_buf_ring;
+ struct idr bufs_idr;
+ /* Protects bufs_idr */
+ spinlock_t idr_lock;
+ int bufs_max;
+};
+
+#define ATH11K_TX_COMPL_NEXT(x) (((x) + 1) % DP_TX_COMP_RING_SIZE)
+
+struct dp_tx_ring {
+ u8 tcl_data_ring_id;
+ struct dp_srng tcl_data_ring;
+ struct dp_srng tcl_comp_ring;
+ struct idr txbuf_idr;
+ /* Protects txbuf_idr and num_pending */
+ spinlock_t tx_idr_lock;
+ struct hal_wbm_release_ring *tx_status;
+ int tx_status_head;
+ int tx_status_tail;
+};
+
+struct ath11k_pdev_mon_stats {
+ u32 status_ppdu_state;
+ u32 status_ppdu_start;
+ u32 status_ppdu_end;
+ u32 status_ppdu_compl;
+ u32 status_ppdu_start_mis;
+ u32 status_ppdu_end_mis;
+ u32 status_ppdu_done;
+ u32 dest_ppdu_done;
+ u32 dest_mpdu_done;
+ u32 dest_mpdu_drop;
+ u32 dup_mon_linkdesc_cnt;
+ u32 dup_mon_buf_cnt;
+};
+
+struct dp_link_desc_bank {
+ void *vaddr_unaligned;
+ void *vaddr;
+ dma_addr_t paddr_unaligned;
+ dma_addr_t paddr;
+ u32 size;
+};
+
+/* Size to enforce scatter idle list mode */
+#define DP_LINK_DESC_ALLOC_SIZE_THRESH 0x200000
+#define DP_LINK_DESC_BANKS_MAX 8
+
+#define DP_RX_DESC_COOKIE_INDEX_MAX 0x3ffff
+#define DP_RX_DESC_COOKIE_POOL_ID_MAX 0x1c0000
+#define DP_RX_DESC_COOKIE_MAX \
+ (DP_RX_DESC_COOKIE_INDEX_MAX | DP_RX_DESC_COOKIE_POOL_ID_MAX)
+#define DP_NOT_PPDU_ID_WRAP_AROUND 20000
+
+enum ath11k_dp_ppdu_state {
+ DP_PPDU_STATUS_START,
+ DP_PPDU_STATUS_DONE,
+};
+
+struct ath11k_mon_data {
+ struct dp_link_desc_bank link_desc_banks[DP_LINK_DESC_BANKS_MAX];
+ struct hal_rx_mon_ppdu_info mon_ppdu_info;
+
+ u32 mon_ppdu_status;
+ u32 mon_last_buf_cookie;
+ u64 mon_last_linkdesc_paddr;
+ u16 chan_noise_floor;
+
+ struct ath11k_pdev_mon_stats rx_mon_stats;
+ /* lock for monitor data */
+ spinlock_t mon_lock;
+ struct sk_buff_head rx_status_q;
+};
+
+struct ath11k_pdev_dp {
+ u32 mac_id;
+ atomic_t num_tx_pending;
+ wait_queue_head_t tx_empty_waitq;
+ struct dp_srng reo_dst_ring;
+ struct dp_rxdma_ring rx_refill_buf_ring;
+ struct dp_srng rxdma_err_dst_ring;
+ struct dp_srng rxdma_mon_dst_ring;
+ struct dp_srng rxdma_mon_desc_ring;
+
+ struct dp_rxdma_ring rxdma_mon_buf_ring;
+ struct dp_rxdma_ring rx_mon_status_refill_ring;
+ struct ieee80211_rx_status rx_status;
+ struct ath11k_mon_data mon_data;
+};
+
+#define DP_NUM_CLIENTS_MAX 64
+#define DP_AVG_TIDS_PER_CLIENT 2
+#define DP_NUM_TIDS_MAX (DP_NUM_CLIENTS_MAX * DP_AVG_TIDS_PER_CLIENT)
+#define DP_AVG_MSDUS_PER_FLOW 128
+#define DP_AVG_FLOWS_PER_TID 2
+#define DP_AVG_MPDUS_PER_TID_MAX 128
+#define DP_AVG_MSDUS_PER_MPDU 4
+
+#define DP_RX_HASH_ENABLE 0 /* Disable hash based Rx steering */
+
+#define DP_BA_WIN_SZ_MAX 256
+
+#define DP_TCL_NUM_RING_MAX 3
+
+#define DP_IDLE_SCATTER_BUFS_MAX 16
+
+#define DP_WBM_RELEASE_RING_SIZE 64
+#define DP_TCL_DATA_RING_SIZE 512
+#define DP_TX_COMP_RING_SIZE 8192
+#define DP_TX_IDR_SIZE (DP_TX_COMP_RING_SIZE << 1)
+#define DP_TCL_CMD_RING_SIZE 32
+#define DP_TCL_STATUS_RING_SIZE 32
+#define DP_REO_DST_RING_MAX 4
+#define DP_REO_DST_RING_SIZE 2048
+#define DP_REO_REINJECT_RING_SIZE 32
+#define DP_RX_RELEASE_RING_SIZE 1024
+#define DP_REO_EXCEPTION_RING_SIZE 128
+#define DP_REO_CMD_RING_SIZE 128
+#define DP_REO_STATUS_RING_SIZE 256
+#define DP_RXDMA_BUF_RING_SIZE 4096
+#define DP_RXDMA_REFILL_RING_SIZE 2048
+#define DP_RXDMA_ERR_DST_RING_SIZE 1024
+#define DP_RXDMA_MON_STATUS_RING_SIZE 1024
+#define DP_RXDMA_MONITOR_BUF_RING_SIZE 4096
+#define DP_RXDMA_MONITOR_DST_RING_SIZE 2048
+#define DP_RXDMA_MONITOR_DESC_RING_SIZE 4096
+
+#define DP_RX_BUFFER_SIZE 2048
+#define DP_RX_BUFFER_ALIGN_SIZE 128
+
+#define DP_RXDMA_BUF_COOKIE_BUF_ID GENMASK(17, 0)
+#define DP_RXDMA_BUF_COOKIE_PDEV_ID GENMASK(20, 18)
+
+#define DP_HW2SW_MACID(mac_id) ((mac_id) ? ((mac_id) - 1) : 0)
+#define DP_SW2HW_MACID(mac_id) ((mac_id) + 1)
+
+#define DP_TX_DESC_ID_MAC_ID GENMASK(1, 0)
+#define DP_TX_DESC_ID_MSDU_ID GENMASK(18, 2)
+#define DP_TX_DESC_ID_POOL_ID GENMASK(20, 19)
+
+struct ath11k_dp {
+ struct ath11k_base *ab;
+ enum ath11k_htc_ep_id eid;
+ struct completion htt_tgt_version_received;
+ u8 htt_tgt_ver_major;
+ u8 htt_tgt_ver_minor;
+ struct dp_link_desc_bank link_desc_banks[DP_LINK_DESC_BANKS_MAX];
+ struct dp_srng wbm_idle_ring;
+ struct dp_srng wbm_desc_rel_ring;
+ struct dp_srng tcl_cmd_ring;
+ struct dp_srng tcl_status_ring;
+ struct dp_srng reo_reinject_ring;
+ struct dp_srng rx_rel_ring;
+ struct dp_srng reo_except_ring;
+ struct dp_srng reo_cmd_ring;
+ struct dp_srng reo_status_ring;
+ struct dp_tx_ring tx_ring[DP_TCL_NUM_RING_MAX];
+ struct hal_wbm_idle_scatter_list scatter_list[DP_IDLE_SCATTER_BUFS_MAX];
+ struct list_head reo_cmd_list;
+ struct list_head reo_cmd_cache_flush_list;
+ /* protects access to reo_cmd_list and reo_cmd_cache_flush_list */
+ spinlock_t reo_cmd_lock;
+};
+
+/* HTT definitions */
+
+#define HTT_TCL_META_DATA_TYPE BIT(0)
+#define HTT_TCL_META_DATA_VALID_HTT BIT(1)
+
+/* vdev meta data */
+#define HTT_TCL_META_DATA_VDEV_ID GENMASK(9, 2)
+#define HTT_TCL_META_DATA_PDEV_ID GENMASK(11, 10)
+#define HTT_TCL_META_DATA_HOST_INSPECTED BIT(12)
+
+/* peer meta data */
+#define HTT_TCL_META_DATA_PEER_ID GENMASK(15, 2)
+
+#define HTT_TX_WBM_COMP_STATUS_OFFSET 8
+
+/* HTT tx completion is overlayed in wbm_release_ring */
+#define HTT_TX_WBM_COMP_INFO0_STATUS GENMASK(12, 9)
+#define HTT_TX_WBM_COMP_INFO0_REINJECT_REASON GENMASK(16, 13)
+#define HTT_TX_WBM_COMP_INFO0_REINJECT_REASON GENMASK(16, 13)
+
+#define HTT_TX_WBM_COMP_INFO1_ACK_RSSI GENMASK(31, 24)
+
+struct htt_tx_wbm_completion {
+ u32 info0;
+ u32 info1;
+ u32 info2;
+ u32 info3;
+} __packed;
+
+enum htt_h2t_msg_type {
+ HTT_H2T_MSG_TYPE_VERSION_REQ = 0,
+ HTT_H2T_MSG_TYPE_SRING_SETUP = 0xb,
+ HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG = 0xc,
+ HTT_H2T_MSG_TYPE_EXT_STATS_CFG = 0x10,
+ HTT_H2T_MSG_TYPE_PPDU_STATS_CFG = 0x11,
+};
+
+#define HTT_VER_REQ_INFO_MSG_ID GENMASK(7, 0)
+
+struct htt_ver_req_cmd {
+ u32 ver_reg_info;
+} __packed;
+
+enum htt_srng_ring_type {
+ HTT_HW_TO_SW_RING,
+ HTT_SW_TO_HW_RING,
+ HTT_SW_TO_SW_RING,
+};
+
+enum htt_srng_ring_id {
+ HTT_RXDMA_HOST_BUF_RING,
+ HTT_RXDMA_MONITOR_STATUS_RING,
+ HTT_RXDMA_MONITOR_BUF_RING,
+ HTT_RXDMA_MONITOR_DESC_RING,
+ HTT_RXDMA_MONITOR_DEST_RING,
+ HTT_HOST1_TO_FW_RXBUF_RING,
+ HTT_HOST2_TO_FW_RXBUF_RING,
+ HTT_RXDMA_NON_MONITOR_DEST_RING,
+};
+
+/* host -> target HTT_SRING_SETUP message
+ *
+ * After target is booted up, Host can send SRING setup message for
+ * each host facing LMAC SRING. Target setups up HW registers based
+ * on setup message and confirms back to Host if response_required is set.
+ * Host should wait for confirmation message before sending new SRING
+ * setup message
+ *
+ * The message would appear as follows:
+ *
+ * |31 24|23 20|19|18 16|15|14 8|7 0|
+ * |--------------- +-----------------+----------------+------------------|
+ * | ring_type | ring_id | pdev_id | msg_type |
+ * |----------------------------------------------------------------------|
+ * | ring_base_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_base_addr_hi |
+ * |----------------------------------------------------------------------|
+ * |ring_misc_cfg_flag|ring_entry_size| ring_size |
+ * |----------------------------------------------------------------------|
+ * | ring_head_offset32_remote_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_head_offset32_remote_addr_hi |
+ * |----------------------------------------------------------------------|
+ * | ring_tail_offset32_remote_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_tail_offset32_remote_addr_hi |
+ * |----------------------------------------------------------------------|
+ * | ring_msi_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_msi_addr_hi |
+ * |----------------------------------------------------------------------|
+ * | ring_msi_data |
+ * |----------------------------------------------------------------------|
+ * | intr_timer_th |IM| intr_batch_counter_th |
+ * |----------------------------------------------------------------------|
+ * | reserved |RR|PTCF| intr_low_threshold |
+ * |----------------------------------------------------------------------|
+ * Where
+ * IM = sw_intr_mode
+ * RR = response_required
+ * PTCF = prefetch_timer_cfg
+ *
+ * The message is interpreted as follows:
+ * dword0 - b'0:7 - msg_type: This will be set to
+ * HTT_H2T_MSG_TYPE_SRING_SETUP
+ * b'8:15 - pdev_id:
+ * 0 (for rings at SOC/UMAC level),
+ * 1/2/3 mac id (for rings at LMAC level)
+ * b'16:23 - ring_id: identify which ring is to setup,
+ * more details can be got from enum htt_srng_ring_id
+ * b'24:31 - ring_type: identify type of host rings,
+ * more details can be got from enum htt_srng_ring_type
+ * dword1 - b'0:31 - ring_base_addr_lo: Lower 32bits of ring base address
+ * dword2 - b'0:31 - ring_base_addr_hi: Upper 32bits of ring base address
+ * dword3 - b'0:15 - ring_size: size of the ring in unit of 4-bytes words
+ * b'16:23 - ring_entry_size: Size of each entry in 4-byte word units
+ * b'24:31 - ring_misc_cfg_flag: Valid only for HW_TO_SW_RING and
+ * SW_TO_HW_RING.
+ * Refer to HTT_SRING_SETUP_RING_MISC_CFG_RING defs.
+ * dword4 - b'0:31 - ring_head_off32_remote_addr_lo:
+ * Lower 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the head
+ * element within the ring.
+ * (The head offset variable has type u32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword5 - b'0:31 - ring_head_off32_remote_addr_hi:
+ * Upper 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the head
+ * element within the ring.
+ * (The head offset variable has type u32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword6 - b'0:31 - ring_tail_off32_remote_addr_lo:
+ * Lower 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the tail
+ * element within the ring.
+ * (The tail offset variable has type u32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword7 - b'0:31 - ring_tail_off32_remote_addr_hi:
+ * Upper 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the tail
+ * element within the ring.
+ * (The tail offset variable has type u32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword8 - b'0:31 - ring_msi_addr_lo: Lower 32bits of MSI cfg address
+ * valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * dword9 - b'0:31 - ring_msi_addr_hi: Upper 32bits of MSI cfg address
+ * valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * dword10 - b'0:31 - ring_msi_data: MSI data
+ * Refer to HTT_SRING_SETUP_RING_MSC_CFG_xxx defs
+ * valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * dword11 - b'0:14 - intr_batch_counter_th:
+ * batch counter threshold is in units of 4-byte words.
+ * HW internally maintains and increments batch count.
+ * (see SRING spec for detail description).
+ * When batch count reaches threshold value, an interrupt
+ * is generated by HW.
+ * b'15 - sw_intr_mode:
+ * This configuration shall be static.
+ * Only programmed at power up.
+ * 0: generate pulse style sw interrupts
+ * 1: generate level style sw interrupts
+ * b'16:31 - intr_timer_th:
+ * The timer init value when timer is idle or is
+ * initialized to start downcounting.
+ * In 8us units (to cover a range of 0 to 524 ms)
+ * dword12 - b'0:15 - intr_low_threshold:
+ * Used only by Consumer ring to generate ring_sw_int_p.
+ * Ring entries low threshold water mark, that is used
+ * in combination with the interrupt timer as well as
+ * the the clearing of the level interrupt.
+ * b'16:18 - prefetch_timer_cfg:
+ * Used only by Consumer ring to set timer mode to
+ * support Application prefetch handling.
+ * The external tail offset/pointer will be updated
+ * at following intervals:
+ * 3'b000: (Prefetch feature disabled; used only for debug)
+ * 3'b001: 1 usec
+ * 3'b010: 4 usec
+ * 3'b011: 8 usec (default)
+ * 3'b100: 16 usec
+ * Others: Reserverd
+ * b'19 - response_required:
+ * Host needs HTT_T2H_MSG_TYPE_SRING_SETUP_DONE as response
+ * b'20:31 - reserved: reserved for future use
+ */
+
+#define HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE GENMASK(7, 0)
+#define HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID GENMASK(15, 8)
+#define HTT_SRNG_SETUP_CMD_INFO0_RING_ID GENMASK(23, 16)
+#define HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE GENMASK(31, 24)
+
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE GENMASK(15, 0)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE GENMASK(23, 16)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS BIT(25)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP BIT(27)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP BIT(28)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP BIT(29)
+
+#define HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH GENMASK(14, 0)
+#define HTT_SRNG_SETUP_CMD_INTR_INFO_SW_INTR_MODE BIT(15)
+#define HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH GENMASK(31, 16)
+
+#define HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH GENMASK(15, 0)
+#define HTT_SRNG_SETUP_CMD_INFO2_PRE_FETCH_TIMER_CFG BIT(16)
+#define HTT_SRNG_SETUP_CMD_INFO2_RESPONSE_REQUIRED BIT(19)
+
+struct htt_srng_setup_cmd {
+ u32 info0;
+ u32 ring_base_addr_lo;
+ u32 ring_base_addr_hi;
+ u32 info1;
+ u32 ring_head_off32_remote_addr_lo;
+ u32 ring_head_off32_remote_addr_hi;
+ u32 ring_tail_off32_remote_addr_lo;
+ u32 ring_tail_off32_remote_addr_hi;
+ u32 ring_msi_addr_lo;
+ u32 ring_msi_addr_hi;
+ u32 msi_data;
+ u32 intr_info;
+ u32 info2;
+} __packed;
+
+/* host -> target FW PPDU_STATS config message
+ *
+ * @details
+ * The following field definitions describe the format of the HTT host
+ * to target FW for PPDU_STATS_CFG msg.
+ * The message allows the host to configure the PPDU_STATS_IND messages
+ * produced by the target.
+ *
+ * |31 24|23 16|15 8|7 0|
+ * |-----------------------------------------------------------|
+ * | REQ bit mask | pdev_mask | msg type |
+ * |-----------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: identifies this is a req to configure ppdu_stats_ind from target
+ * Value: 0x11
+ * - PDEV_MASK
+ * Bits 8:15
+ * Purpose: identifies which pdevs this PPDU stats configuration applies to
+ * Value: This is a overloaded field, refer to usage and interpretation of
+ * PDEV in interface document.
+ * Bit 8 : Reserved for SOC stats
+ * Bit 9 - 15 : Indicates PDEV_MASK in DBDC
+ * Indicates MACID_MASK in DBS
+ * - REQ_TLV_BIT_MASK
+ * Bits 16:31
+ * Purpose: each set bit indicates the corresponding PPDU stats TLV type
+ * needs to be included in the target's PPDU_STATS_IND messages.
+ * Value: refer htt_ppdu_stats_tlv_tag_t <<<???
+ *
+ */
+
+struct htt_ppdu_stats_cfg_cmd {
+ u32 msg;
+} __packed;
+
+#define HTT_PPDU_STATS_CFG_MSG_TYPE GENMASK(7, 0)
+#define HTT_PPDU_STATS_CFG_PDEV_ID GENMASK(16, 9)
+#define HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK GENMASK(31, 16)
+
+enum htt_ppdu_stats_tag_type {
+ HTT_PPDU_STATS_TAG_COMMON,
+ HTT_PPDU_STATS_TAG_USR_COMMON,
+ HTT_PPDU_STATS_TAG_USR_RATE,
+ HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_64,
+ HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_256,
+ HTT_PPDU_STATS_TAG_SCH_CMD_STATUS,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_64,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_256,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_FLUSH,
+ HTT_PPDU_STATS_TAG_USR_COMMON_ARRAY,
+ HTT_PPDU_STATS_TAG_INFO,
+ HTT_PPDU_STATS_TAG_TX_MGMTCTRL_PAYLOAD,
+
+ /* New TLV's are added above to this line */
+ HTT_PPDU_STATS_TAG_MAX,
+};
+
+#define HTT_PPDU_STATS_TAG_DEFAULT (BIT(HTT_PPDU_STATS_TAG_COMMON) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMMON) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_RATE) \
+ | BIT(HTT_PPDU_STATS_TAG_SCH_CMD_STATUS) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_FLUSH) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMMON_ARRAY))
+
+#define HTT_PPDU_STATS_TAG_PKTLOG (BIT(HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_64) | \
+ BIT(HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_256) | \
+ BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_64) | \
+ BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_256) | \
+ BIT(HTT_PPDU_STATS_TAG_INFO) | \
+ BIT(HTT_PPDU_STATS_TAG_TX_MGMTCTRL_PAYLOAD) | \
+ HTT_PPDU_STATS_TAG_DEFAULT)
+
+/* HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG Message
+ *
+ * details:
+ * HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG message is sent by host to
+ * configure RXDMA rings.
+ * The configuration is per ring based and includes both packet subtypes
+ * and PPDU/MPDU TLVs.
+ *
+ * The message would appear as follows:
+ *
+ * |31 26|25|24|23 16|15 8|7 0|
+ * |-----------------+----------------+----------------+---------------|
+ * | rsvd1 |PS|SS| ring_id | pdev_id | msg_type |
+ * |-------------------------------------------------------------------|
+ * | rsvd2 | ring_buffer_size |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_0 |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_1 |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_2 |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_3 |
+ * |-------------------------------------------------------------------|
+ * | tlv_filter_in_flags |
+ * |-------------------------------------------------------------------|
+ * Where:
+ * PS = pkt_swap
+ * SS = status_swap
+ * The message is interpreted as follows:
+ * dword0 - b'0:7 - msg_type: This will be set to
+ * HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG
+ * b'8:15 - pdev_id:
+ * 0 (for rings at SOC/UMAC level),
+ * 1/2/3 mac id (for rings at LMAC level)
+ * b'16:23 - ring_id : Identify the ring to configure.
+ * More details can be got from enum htt_srng_ring_id
+ * b'24 - status_swap: 1 is to swap status TLV
+ * b'25 - pkt_swap: 1 is to swap packet TLV
+ * b'26:31 - rsvd1: reserved for future use
+ * dword1 - b'0:16 - ring_buffer_size: size of bufferes referenced by rx ring,
+ * in byte units.
+ * Valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * - b'16:31 - rsvd2: Reserved for future use
+ * dword2 - b'0:31 - packet_type_enable_flags_0:
+ * Enable MGMT packet from 0b0000 to 0b1001
+ * bits from low to high: FP, MD, MO - 3 bits
+ * FP: Filter_Pass
+ * MD: Monitor_Direct
+ * MO: Monitor_Other
+ * 10 mgmt subtypes * 3 bits -> 30 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG0_xxx_MGMT_xxx defs
+ * dword3 - b'0:31 - packet_type_enable_flags_1:
+ * Enable MGMT packet from 0b1010 to 0b1111
+ * bits from low to high: FP, MD, MO - 3 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG1_xxx_MGMT_xxx defs
+ * dword4 - b'0:31 - packet_type_enable_flags_2:
+ * Enable CTRL packet from 0b0000 to 0b1001
+ * bits from low to high: FP, MD, MO - 3 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG2_xxx_CTRL_xxx defs
+ * dword5 - b'0:31 - packet_type_enable_flags_3:
+ * Enable CTRL packet from 0b1010 to 0b1111,
+ * MCAST_DATA, UCAST_DATA, NULL_DATA
+ * bits from low to high: FP, MD, MO - 3 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG3_xxx_CTRL_xxx defs
+ * dword6 - b'0:31 - tlv_filter_in_flags:
+ * Filter in Attention/MPDU/PPDU/Header/User tlvs
+ * Refer to CFG_TLV_FILTER_IN_FLAG defs
+ */
+
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE GENMASK(7, 0)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID GENMASK(15, 8)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID GENMASK(23, 16)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS BIT(24)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS BIT(25)
+
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE GENMASK(15, 0)
+
+enum htt_rx_filter_tlv_flags {
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_START = BIT(0),
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_START = BIT(1),
+ HTT_RX_FILTER_TLV_FLAGS_RX_PACKET = BIT(2),
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_END = BIT(3),
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_END = BIT(4),
+ HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER = BIT(5),
+ HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER = BIT(6),
+ HTT_RX_FILTER_TLV_FLAGS_ATTENTION = BIT(7),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_START = BIT(8),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END = BIT(9),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS = BIT(10),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT = BIT(11),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE = BIT(12),
+};
+
+enum htt_rx_mgmt_pkt_filter_tlv_flags0 {
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ = BIT(0),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ = BIT(1),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ = BIT(2),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP = BIT(3),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP = BIT(4),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP = BIT(5),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ = BIT(6),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ = BIT(7),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ = BIT(8),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP = BIT(9),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP = BIT(10),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP = BIT(11),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ = BIT(12),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ = BIT(13),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ = BIT(14),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP = BIT(15),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP = BIT(16),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP = BIT(17),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV = BIT(18),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV = BIT(19),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV = BIT(20),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7 = BIT(21),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7 = BIT(22),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7 = BIT(23),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON = BIT(24),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON = BIT(25),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON = BIT(26),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM = BIT(27),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM = BIT(28),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM = BIT(29),
+};
+
+enum htt_rx_mgmt_pkt_filter_tlv_flags1 {
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC = BIT(0),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC = BIT(1),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC = BIT(2),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH = BIT(3),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH = BIT(4),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH = BIT(5),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH = BIT(6),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH = BIT(7),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH = BIT(8),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION = BIT(9),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION = BIT(10),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION = BIT(11),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK = BIT(12),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK = BIT(13),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK = BIT(14),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15 = BIT(15),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15 = BIT(16),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15 = BIT(17),
+};
+
+enum htt_rx_ctrl_pkt_filter_tlv_flags2 {
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 = BIT(0),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 = BIT(1),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 = BIT(2),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 = BIT(3),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 = BIT(4),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 = BIT(5),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER = BIT(6),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER = BIT(7),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER = BIT(8),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 = BIT(9),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 = BIT(10),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 = BIT(11),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL = BIT(12),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL = BIT(13),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL = BIT(14),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP = BIT(15),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP = BIT(16),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP = BIT(17),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT = BIT(18),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT = BIT(19),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT = BIT(20),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER = BIT(21),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER = BIT(22),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER = BIT(23),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR = BIT(24),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BAR = BIT(25),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BAR = BIT(26),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BA = BIT(27),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BA = BIT(28),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BA = BIT(29),
+};
+
+enum htt_rx_ctrl_pkt_filter_tlv_flags3 {
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL = BIT(0),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL = BIT(1),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL = BIT(2),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_RTS = BIT(3),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_RTS = BIT(4),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_RTS = BIT(5),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CTS = BIT(6),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CTS = BIT(7),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CTS = BIT(8),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_ACK = BIT(9),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_ACK = BIT(10),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_ACK = BIT(11),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND = BIT(12),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND = BIT(13),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND = BIT(14),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK = BIT(15),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK = BIT(16),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK = BIT(17),
+};
+
+enum htt_rx_data_pkt_filter_tlv_flasg3 {
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST = BIT(18),
+ HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_MCAST = BIT(19),
+ HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_MCAST = BIT(20),
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST = BIT(21),
+ HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_UCAST = BIT(22),
+ HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_UCAST = BIT(23),
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA = BIT(24),
+ HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA = BIT(25),
+ HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA = BIT(26),
+};
+
+#define HTT_RX_FP_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM)
+
+#define HTT_RX_MD_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM)
+
+#define HTT_RX_MO_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM)
+
+#define HTT_RX_FP_MGMT_FILTER_FLAGS1 (HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK)
+
+#define HTT_RX_MD_MGMT_FILTER_FLAGS1 (HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK)
+
+#define HTT_RX_MO_MGMT_FILTER_FLAGS1 (HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK)
+
+#define HTT_RX_FP_CTRL_FILTER_FLASG2 (HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BA)
+
+#define HTT_RX_MD_CTRL_FILTER_FLASG2 (HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BA)
+
+#define HTT_RX_MO_CTRL_FILTER_FLASG2 (HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BA)
+
+#define HTT_RX_FP_CTRL_FILTER_FLASG3 (HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK)
+
+#define HTT_RX_MD_CTRL_FILTER_FLASG3 (HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK)
+
+#define HTT_RX_MO_CTRL_FILTER_FLASG3 (HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK)
+
+#define HTT_RX_FP_DATA_FILTER_FLASG3 (HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST \
+ | HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST \
+ | HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA)
+
+#define HTT_RX_MD_DATA_FILTER_FLASG3 (HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_MCAST \
+ | HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_UCAST \
+ | HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA)
+
+#define HTT_RX_MO_DATA_FILTER_FLASG3 (HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_MCAST \
+ | HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_UCAST \
+ | HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA)
+
+#define HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_FP_MGMT_FILTER_FLAGS0 | \
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7)
+
+#define HTT_RX_MON_MO_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_MO_MGMT_FILTER_FLAGS0 | \
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7)
+
+#define HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 \
+ (HTT_RX_FP_MGMT_FILTER_FLAGS1 | \
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15)
+
+#define HTT_RX_MON_MO_MGMT_FILTER_FLAGS1 \
+ (HTT_RX_MO_MGMT_FILTER_FLAGS1 | \
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15)
+
+#define HTT_RX_MON_FP_CTRL_FILTER_FLASG2 \
+ (HTT_RX_FP_CTRL_FILTER_FLASG2 | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT)
+
+#define HTT_RX_MON_MO_CTRL_FILTER_FLASG2 \
+ (HTT_RX_MO_CTRL_FILTER_FLASG2 | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT)
+
+#define HTT_RX_MON_FP_CTRL_FILTER_FLASG3 HTT_RX_FP_CTRL_FILTER_FLASG3
+
+#define HTT_RX_MON_MO_CTRL_FILTER_FLASG3 HTT_RX_MO_CTRL_FILTER_FLASG3
+
+#define HTT_RX_MON_FP_DATA_FILTER_FLASG3 HTT_RX_FP_DATA_FILTER_FLASG3
+
+#define HTT_RX_MON_MO_DATA_FILTER_FLASG3 HTT_RX_MO_DATA_FILTER_FLASG3
+
+#define HTT_RX_MON_FILTER_TLV_FLAGS \
+ (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE)
+
+#define HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING \
+ (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE)
+
+#define HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING \
+ (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_RX_PACKET | \
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER | \
+ HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER | \
+ HTT_RX_FILTER_TLV_FLAGS_ATTENTION)
+
+struct htt_rx_ring_selection_cfg_cmd {
+ u32 info0;
+ u32 info1;
+ u32 pkt_type_en_flags0;
+ u32 pkt_type_en_flags1;
+ u32 pkt_type_en_flags2;
+ u32 pkt_type_en_flags3;
+ u32 rx_filter_tlv;
+} __packed;
+
+struct htt_rx_ring_tlv_filter {
+ u32 rx_filter; /* see htt_rx_filter_tlv_flags */
+ u32 pkt_filter_flags0; /* MGMT */
+ u32 pkt_filter_flags1; /* MGMT */
+ u32 pkt_filter_flags2; /* CTRL */
+ u32 pkt_filter_flags3; /* DATA */
+};
+
+/* HTT message target->host */
+
+enum htt_t2h_msg_type {
+ HTT_T2H_MSG_TYPE_VERSION_CONF,
+ HTT_T2H_MSG_TYPE_RX_ADDBA = 0x5,
+ HTT_T2H_MSG_TYPE_PKTLOG = 0x8,
+ HTT_T2H_MSG_TYPE_SEC_IND = 0xb,
+ HTT_T2H_MSG_TYPE_PEER_MAP = 0x1e,
+ HTT_T2H_MSG_TYPE_PEER_UNMAP = 0x1f,
+ HTT_T2H_MSG_TYPE_PPDU_STATS_IND = 0x1d,
+ HTT_T2H_MSG_TYPE_EXT_STATS_CONF = 0x1c,
+};
+
+#define HTT_TARGET_VERSION_MAJOR 3
+
+#define HTT_T2H_MSG_TYPE GENMASK(7, 0)
+#define HTT_T2H_VERSION_CONF_MINOR GENMASK(15, 8)
+#define HTT_T2H_VERSION_CONF_MAJOR GENMASK(23, 16)
+
+struct htt_t2h_version_conf_msg {
+ u32 version;
+} __packed;
+
+#define HTT_T2H_PEER_MAP_INFO_VDEV_ID GENMASK(15, 8)
+#define HTT_T2H_PEER_MAP_INFO_PEER_ID GENMASK(31, 16)
+#define HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16 GENMASK(15, 0)
+#define HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID GENMASK(31, 16)
+#define HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL GENMASK(15, 0)
+#define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_M BIT(16)
+#define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_S 16
+
+struct htt_t2h_peer_map_event {
+ u32 info;
+ u32 mac_addr_l32;
+ u32 info1;
+ u32 info2;
+} __packed;
+
+#define HTT_T2H_PEER_UNMAP_INFO_VDEV_ID HTT_T2H_PEER_MAP_INFO_VDEV_ID
+#define HTT_T2H_PEER_UNMAP_INFO_PEER_ID HTT_T2H_PEER_MAP_INFO_PEER_ID
+#define HTT_T2H_PEER_UNMAP_INFO1_MAC_ADDR_H16 \
+ HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16
+#define HTT_T2H_PEER_MAP_INFO1_NEXT_HOP_M HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_M
+#define HTT_T2H_PEER_MAP_INFO1_NEXT_HOP_S HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_S
+
+struct htt_t2h_peer_unmap_event {
+ u32 info;
+ u32 mac_addr_l32;
+ u32 info1;
+} __packed;
+
+struct htt_resp_msg {
+ union {
+ struct htt_t2h_version_conf_msg version_msg;
+ struct htt_t2h_peer_map_event peer_map_ev;
+ struct htt_t2h_peer_unmap_event peer_unmap_ev;
+ };
+} __packed;
+
+/* ppdu stats
+ *
+ * @details
+ * The following field definitions describe the format of the HTT target
+ * to host ppdu stats indication message.
+ *
+ *
+ * |31 16|15 12|11 10|9 8|7 0 |
+ * |----------------------------------------------------------------------|
+ * | payload_size | rsvd |pdev_id|mac_id | msg type |
+ * |----------------------------------------------------------------------|
+ * | ppdu_id |
+ * |----------------------------------------------------------------------|
+ * | Timestamp in us |
+ * |----------------------------------------------------------------------|
+ * | reserved |
+ * |----------------------------------------------------------------------|
+ * | type-specific stats info |
+ * | (see htt_ppdu_stats.h) |
+ * |----------------------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: Identifies this is a PPDU STATS indication
+ * message.
+ * Value: 0x1d
+ * - mac_id
+ * Bits 9:8
+ * Purpose: mac_id of this ppdu_id
+ * Value: 0-3
+ * - pdev_id
+ * Bits 11:10
+ * Purpose: pdev_id of this ppdu_id
+ * Value: 0-3
+ * 0 (for rings at SOC level),
+ * 1/2/3 PDEV -> 0/1/2
+ * - payload_size
+ * Bits 31:16
+ * Purpose: total tlv size
+ * Value: payload_size in bytes
+ */
+
+#define HTT_T2H_PPDU_STATS_INFO_PDEV_ID GENMASK(11, 10)
+#define HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE GENMASK(31, 16)
+
+struct ath11k_htt_ppdu_stats_msg {
+ u32 info;
+ u32 ppdu_id;
+ u32 timestamp;
+ u32 rsvd;
+ u8 data[0];
+} __packed;
+
+struct htt_tlv {
+ u32 header;
+ u8 value[0];
+} __packed;
+
+#define HTT_TLV_TAG GENMASK(11, 0)
+#define HTT_TLV_LEN GENMASK(23, 12)
+
+enum HTT_PPDU_STATS_BW {
+ HTT_PPDU_STATS_BANDWIDTH_5MHZ = 0,
+ HTT_PPDU_STATS_BANDWIDTH_10MHZ = 1,
+ HTT_PPDU_STATS_BANDWIDTH_20MHZ = 2,
+ HTT_PPDU_STATS_BANDWIDTH_40MHZ = 3,
+ HTT_PPDU_STATS_BANDWIDTH_80MHZ = 4,
+ HTT_PPDU_STATS_BANDWIDTH_160MHZ = 5, /* includes 80+80 */
+ HTT_PPDU_STATS_BANDWIDTH_DYN = 6,
+};
+
+#define HTT_PPDU_STATS_CMN_FLAGS_FRAME_TYPE_M GENMASK(7, 0)
+#define HTT_PPDU_STATS_CMN_FLAGS_QUEUE_TYPE_M GENMASK(15, 8)
+/* bw - HTT_PPDU_STATS_BW */
+#define HTT_PPDU_STATS_CMN_FLAGS_BW_M GENMASK(19, 16)
+
+struct htt_ppdu_stats_common {
+ u32 ppdu_id;
+ u16 sched_cmdid;
+ u8 ring_id;
+ u8 num_users;
+ u32 flags; /* %HTT_PPDU_STATS_COMMON_FLAGS_*/
+ u32 chain_mask;
+ u32 fes_duration_us; /* frame exchange sequence */
+ u32 ppdu_sch_eval_start_tstmp_us;
+ u32 ppdu_sch_end_tstmp_us;
+ u32 ppdu_start_tstmp_us;
+ /* BIT [15 : 0] - phy mode (WLAN_PHY_MODE) with which ppdu was transmitted
+ * BIT [31 : 16] - bandwidth (in MHz) with which ppdu was transmitted
+ */
+ u16 phy_mode;
+ u16 bw_mhz;
+} __packed;
+
+#define HTT_PPDU_STATS_USER_RATE_INFO0_USER_POS_M GENMASK(3, 0)
+#define HTT_PPDU_STATS_USER_RATE_INFO0_MU_GROUP_ID_M GENMASK(11, 4)
+
+#define HTT_PPDU_STATS_USER_RATE_INFO1_RESP_TYPE_VALD_M BIT(0)
+#define HTT_PPDU_STATS_USER_RATE_INFO1_PPDU_TYPE_M GENMASK(5, 1)
+
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_LTF_SIZE_M GENMASK(1, 0)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_STBC_M BIT(2)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_HE_RE_M BIT(3)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_TXBF_M GENMASK(7, 4)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_BW_M GENMASK(11, 8)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_NSS_M GENMASK(15, 12)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_MCS_M GENMASK(19, 16)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_PREAMBLE_M GENMASK(23, 20)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_GI_M GENMASK(27, 24)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_DCM_M BIT(28)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_LDPC_M BIT(29)
+
+#define HTT_USR_RATE_PREAMBLE(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_PREAMBLE_M, _val)
+#define HTT_USR_RATE_BW(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_BW_M, _val)
+#define HTT_USR_RATE_NSS(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_NSS_M, _val)
+#define HTT_USR_RATE_MCS(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_MCS_M, _val)
+#define HTT_USR_RATE_GI(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_GI_M, _val)
+
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_LTF_SIZE_M GENMASK(1, 0)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_STBC_M BIT(2)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_HE_RE_M BIT(3)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_TXBF_M GENMASK(7, 4)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_BW_M GENMASK(11, 8)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_NSS_M GENMASK(15, 12)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_MCS_M GENMASK(19, 16)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_PREAMBLE_M GENMASK(23, 20)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_GI_M GENMASK(27, 24)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_DCM_M BIT(28)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_LDPC_M BIT(29)
+
+struct htt_ppdu_stats_user_rate {
+ u8 tid_num;
+ u8 reserved0;
+ u16 sw_peer_id;
+ u32 info0; /* %HTT_PPDU_STATS_USER_RATE_INFO0_*/
+ u16 ru_end;
+ u16 ru_start;
+ u16 resp_ru_end;
+ u16 resp_ru_start;
+ u32 info1; /* %HTT_PPDU_STATS_USER_RATE_INFO1_ */
+ u32 rate_flags; /* %HTT_PPDU_STATS_USER_RATE_FLAGS_ */
+ /* Note: resp_rate_info is only valid for if resp_type is UL */
+ u32 resp_rate_flags; /* %HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_ */
+} __packed;
+
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_RATECODE_M GENMASK(7, 0)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_IS_AMPDU_M BIT(8)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_BA_ACK_FAILED_M GENMASK(10, 9)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_BW_M GENMASK(13, 11)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_SGI_M BIT(14)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M GENMASK(31, 16)
+
+#define HTT_TX_INFO_IS_AMSDU(_flags) \
+ FIELD_GET(HTT_PPDU_STATS_TX_INFO_FLAGS_IS_AMPDU_M, _flags)
+#define HTT_TX_INFO_BA_ACK_FAILED(_flags) \
+ FIELD_GET(HTT_PPDU_STATS_TX_INFO_FLAGS_BA_ACK_FAILED_M, _flags)
+#define HTT_TX_INFO_RATECODE(_flags) \
+ FIELD_GET(HTT_PPDU_STATS_TX_INFO_FLAGS_RATECODE_M, _flags)
+#define HTT_TX_INFO_PEERID(_flags) \
+ FIELD_GET(HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M, _flags)
+
+struct htt_tx_ppdu_stats_info {
+ struct htt_tlv tlv_hdr;
+ u32 tx_success_bytes;
+ u32 tx_retry_bytes;
+ u32 tx_failed_bytes;
+ u32 flags; /* %HTT_PPDU_STATS_TX_INFO_FLAGS_ */
+ u16 tx_success_msdus;
+ u16 tx_retry_msdus;
+ u16 tx_failed_msdus;
+ u16 tx_duration; /* united in us */
+} __packed;
+
+enum htt_ppdu_stats_usr_compln_status {
+ HTT_PPDU_STATS_USER_STATUS_OK,
+ HTT_PPDU_STATS_USER_STATUS_FILTERED,
+ HTT_PPDU_STATS_USER_STATUS_RESP_TIMEOUT,
+ HTT_PPDU_STATS_USER_STATUS_RESP_MISMATCH,
+ HTT_PPDU_STATS_USER_STATUS_ABORT,
+};
+
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRY_M GENMASK(3, 0)
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_SHORT_RETRY_M GENMASK(7, 4)
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_IS_AMPDU_M BIT(8)
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_RESP_TYPE_M GENMASK(12, 9)
+
+#define HTT_USR_CMPLTN_IS_AMPDU(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_IS_AMPDU_M, _val)
+#define HTT_USR_CMPLTN_LONG_RETRY(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRY_M, _val)
+#define HTT_USR_CMPLTN_SHORT_RETRY(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_SHORT_RETRY_M, _val)
+
+struct htt_ppdu_stats_usr_cmpltn_cmn {
+ u8 status;
+ u8 tid_num;
+ u16 sw_peer_id;
+ /* RSSI value of last ack packet (units = dB above noise floor) */
+ u32 ack_rssi;
+ u16 mpdu_tried;
+ u16 mpdu_success;
+ u32 flags; /* %HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRIES*/
+} __packed;
+
+#define HTT_PPDU_STATS_ACK_BA_INFO_NUM_MPDU_M GENMASK(8, 0)
+#define HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M GENMASK(24, 9)
+#define HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM GENMASK(31, 25)
+
+#define HTT_PPDU_STATS_NON_QOS_TID 16
+
+struct htt_ppdu_stats_usr_cmpltn_ack_ba_status {
+ u32 ppdu_id;
+ u16 sw_peer_id;
+ u16 reserved0;
+ u32 info; /* %HTT_PPDU_STATS_USR_CMPLTN_CMN_INFO_ */
+ u16 current_seq;
+ u16 start_seq;
+ u32 success_bytes;
+} __packed;
+
+struct htt_ppdu_stats_usr_cmn_array {
+ struct htt_tlv tlv_hdr;
+ u32 num_ppdu_stats;
+ /* tx_ppdu_stats_info is filled by multiple struct htt_tx_ppdu_stats_info
+ * elements.
+ * tx_ppdu_stats_info is variable length, with length =
+ * number_of_ppdu_stats * sizeof (struct htt_tx_ppdu_stats_info)
+ */
+ struct htt_tx_ppdu_stats_info tx_ppdu_info[0];
+} __packed;
+
+struct htt_ppdu_user_stats {
+ u16 peer_id;
+ u32 tlv_flags;
+ bool is_valid_peer_id;
+ struct htt_ppdu_stats_user_rate rate;
+ struct htt_ppdu_stats_usr_cmpltn_cmn cmpltn_cmn;
+ struct htt_ppdu_stats_usr_cmpltn_ack_ba_status ack_ba;
+};
+
+#define HTT_PPDU_STATS_MAX_USERS 8
+#define HTT_PPDU_DESC_MAX_DEPTH 16
+
+struct htt_ppdu_stats {
+ struct htt_ppdu_stats_common common;
+ struct htt_ppdu_user_stats user_stats[HTT_PPDU_STATS_MAX_USERS];
+};
+
+struct htt_ppdu_stats_info {
+ u32 ppdu_id;
+ struct htt_ppdu_stats ppdu_stats;
+ struct list_head list;
+};
+
+/**
+ * @brief target -> host packet log message
+ *
+ * @details
+ * The following field definitions describe the format of the packet log
+ * message sent from the target to the host.
+ * The message consists of a 4-octet header,followed by a variable number
+ * of 32-bit character values.
+ *
+ * |31 16|15 12|11 10|9 8|7 0|
+ * |------------------------------------------------------------------|
+ * | payload_size | rsvd |pdev_id|mac_id| msg type |
+ * |------------------------------------------------------------------|
+ * | payload |
+ * |------------------------------------------------------------------|
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: identifies this as a pktlog message
+ * Value: HTT_T2H_MSG_TYPE_PKTLOG
+ * - mac_id
+ * Bits 9:8
+ * Purpose: identifies which MAC/PHY instance generated this pktlog info
+ * Value: 0-3
+ * - pdev_id
+ * Bits 11:10
+ * Purpose: pdev_id
+ * Value: 0-3
+ * 0 (for rings at SOC level),
+ * 1/2/3 PDEV -> 0/1/2
+ * - payload_size
+ * Bits 31:16
+ * Purpose: explicitly specify the payload size
+ * Value: payload size in bytes (payload size is a multiple of 4 bytes)
+ */
+struct htt_pktlog_msg {
+ u32 hdr;
+ u8 payload[0];
+};
+
+/**
+ * @brief host -> target FW extended statistics retrieve
+ *
+ * @details
+ * The following field definitions describe the format of the HTT host
+ * to target FW extended stats retrieve message.
+ * The message specifies the type of stats the host wants to retrieve.
+ *
+ * |31 24|23 16|15 8|7 0|
+ * |-----------------------------------------------------------|
+ * | reserved | stats type | pdev_mask | msg type |
+ * |-----------------------------------------------------------|
+ * | config param [0] |
+ * |-----------------------------------------------------------|
+ * | config param [1] |
+ * |-----------------------------------------------------------|
+ * | config param [2] |
+ * |-----------------------------------------------------------|
+ * | config param [3] |
+ * |-----------------------------------------------------------|
+ * | reserved |
+ * |-----------------------------------------------------------|
+ * | cookie LSBs |
+ * |-----------------------------------------------------------|
+ * | cookie MSBs |
+ * |-----------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: identifies this is a extended stats upload request message
+ * Value: 0x10
+ * - PDEV_MASK
+ * Bits 8:15
+ * Purpose: identifies the mask of PDEVs to retrieve stats from
+ * Value: This is a overloaded field, refer to usage and interpretation of
+ * PDEV in interface document.
+ * Bit 8 : Reserved for SOC stats
+ * Bit 9 - 15 : Indicates PDEV_MASK in DBDC
+ * Indicates MACID_MASK in DBS
+ * - STATS_TYPE
+ * Bits 23:16
+ * Purpose: identifies which FW statistics to upload
+ * Value: Defined by htt_dbg_ext_stats_type (see htt_stats.h)
+ * - Reserved
+ * Bits 31:24
+ * - CONFIG_PARAM [0]
+ * Bits 31:0
+ * Purpose: give an opaque configuration value to the specified stats type
+ * Value: stats-type specific configuration value
+ * Refer to htt_stats.h for interpretation for each stats sub_type
+ * - CONFIG_PARAM [1]
+ * Bits 31:0
+ * Purpose: give an opaque configuration value to the specified stats type
+ * Value: stats-type specific configuration value
+ * Refer to htt_stats.h for interpretation for each stats sub_type
+ * - CONFIG_PARAM [2]
+ * Bits 31:0
+ * Purpose: give an opaque configuration value to the specified stats type
+ * Value: stats-type specific configuration value
+ * Refer to htt_stats.h for interpretation for each stats sub_type
+ * - CONFIG_PARAM [3]
+ * Bits 31:0
+ * Purpose: give an opaque configuration value to the specified stats type
+ * Value: stats-type specific configuration value
+ * Refer to htt_stats.h for interpretation for each stats sub_type
+ * - Reserved [31:0] for future use.
+ * - COOKIE_LSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: LSBs of the opaque cookie specified by the host-side requestor
+ * - COOKIE_MSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: MSBs of the opaque cookie specified by the host-side requestor
+ */
+
+struct htt_ext_stats_cfg_hdr {
+ u8 msg_type;
+ u8 pdev_mask;
+ u8 stats_type;
+ u8 reserved;
+} __packed;
+
+struct htt_ext_stats_cfg_cmd {
+ struct htt_ext_stats_cfg_hdr hdr;
+ u32 cfg_param0;
+ u32 cfg_param1;
+ u32 cfg_param2;
+ u32 cfg_param3;
+ u32 reserved;
+ u32 cookie_lsb;
+ u32 cookie_msb;
+} __packed;
+
+/* htt stats config default params */
+#define HTT_STAT_DEFAULT_RESET_START_OFFSET 0
+#define HTT_STAT_DEFAULT_CFG0_ALL_HWQS 0xffffffff
+#define HTT_STAT_DEFAULT_CFG0_ALL_TXQS 0xffffffff
+#define HTT_STAT_DEFAULT_CFG0_ALL_CMDQS 0xffff
+#define HTT_STAT_DEFAULT_CFG0_ALL_RINGS 0xffff
+#define HTT_STAT_DEFAULT_CFG0_ACTIVE_PEERS 0xff
+#define HTT_STAT_DEFAULT_CFG0_CCA_CUMULATIVE 0x00
+#define HTT_STAT_DEFAULT_CFG0_ACTIVE_VDEVS 0x00
+
+/* HTT_DBG_EXT_STATS_PEER_INFO
+ * PARAMS:
+ * @config_param0:
+ * [Bit0] - [0] for sw_peer_id, [1] for mac_addr based request
+ * [Bit15 : Bit 1] htt_peer_stats_req_mode_t
+ * [Bit31 : Bit16] sw_peer_id
+ * @config_param1:
+ * peer_stats_req_type_mask:32 (enum htt_peer_stats_tlv_enum)
+ * 0 bit htt_peer_stats_cmn_tlv
+ * 1 bit htt_peer_details_tlv
+ * 2 bit htt_tx_peer_rate_stats_tlv
+ * 3 bit htt_rx_peer_rate_stats_tlv
+ * 4 bit htt_tx_tid_stats_tlv/htt_tx_tid_stats_v1_tlv
+ * 5 bit htt_rx_tid_stats_tlv
+ * 6 bit htt_msdu_flow_stats_tlv
+ * @config_param2: [Bit31 : Bit0] mac_addr31to0
+ * @config_param3: [Bit15 : Bit0] mac_addr47to32
+ * [Bit31 : Bit16] reserved
+ */
+#define HTT_STAT_PEER_INFO_MAC_ADDR BIT(0)
+#define HTT_STAT_DEFAULT_PEER_REQ_TYPE 0x7f
+
+/* Used to set different configs to the specified stats type.*/
+struct htt_ext_stats_cfg_params {
+ u32 cfg0;
+ u32 cfg1;
+ u32 cfg2;
+ u32 cfg3;
+};
+
+/**
+ * @brief target -> host extended statistics upload
+ *
+ * @details
+ * The following field definitions describe the format of the HTT target
+ * to host stats upload confirmation message.
+ * The message contains a cookie echoed from the HTT host->target stats
+ * upload request, which identifies which request the confirmation is
+ * for, and a single stats can span over multiple HTT stats indication
+ * due to the HTT message size limitation so every HTT ext stats indication
+ * will have tag-length-value stats information elements.
+ * The tag-length header for each HTT stats IND message also includes a
+ * status field, to indicate whether the request for the stat type in
+ * question was fully met, partially met, unable to be met, or invalid
+ * (if the stat type in question is disabled in the target).
+ * A Done bit 1's indicate the end of the of stats info elements.
+ *
+ *
+ * |31 16|15 12|11|10 8|7 5|4 0|
+ * |--------------------------------------------------------------|
+ * | reserved | msg type |
+ * |--------------------------------------------------------------|
+ * | cookie LSBs |
+ * |--------------------------------------------------------------|
+ * | cookie MSBs |
+ * |--------------------------------------------------------------|
+ * | stats entry length | rsvd | D| S | stat type |
+ * |--------------------------------------------------------------|
+ * | type-specific stats info |
+ * | (see htt_stats.h) |
+ * |--------------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: Identifies this is a extended statistics upload confirmation
+ * message.
+ * Value: 0x1c
+ * - COOKIE_LSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: LSBs of the opaque cookie specified by the host-side requestor
+ * - COOKIE_MSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: MSBs of the opaque cookie specified by the host-side requestor
+ *
+ * Stats Information Element tag-length header fields:
+ * - STAT_TYPE
+ * Bits 7:0
+ * Purpose: identifies the type of statistics info held in the
+ * following information element
+ * Value: htt_dbg_ext_stats_type
+ * - STATUS
+ * Bits 10:8
+ * Purpose: indicate whether the requested stats are present
+ * Value: htt_dbg_ext_stats_status
+ * - DONE
+ * Bits 11
+ * Purpose:
+ * Indicates the completion of the stats entry, this will be the last
+ * stats conf HTT segment for the requested stats type.
+ * Value:
+ * 0 -> the stats retrieval is ongoing
+ * 1 -> the stats retrieval is complete
+ * - LENGTH
+ * Bits 31:16
+ * Purpose: indicate the stats information size
+ * Value: This field specifies the number of bytes of stats information
+ * that follows the element tag-length header.
+ * It is expected but not required that this length is a multiple of
+ * 4 bytes.
+ */
+
+#define HTT_T2H_EXT_STATS_INFO1_LENGTH GENMASK(31, 16)
+
+struct ath11k_htt_extd_stats_msg {
+ u32 info0;
+ u64 cookie;
+ u32 info1;
+ u8 data[0];
+} __packed;
+
+struct htt_mac_addr {
+ u32 mac_addr_l32;
+ u32 mac_addr_h16;
+};
+
+static inline void ath11k_dp_get_mac_addr(u32 addr_l32, u16 addr_h16, u8 *addr)
+{
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
+ addr_l32 = swab32(addr_l32);
+ addr_h16 = swab16(addr_h16);
+ }
+
+ memcpy(addr, &addr_l32, 4);
+ memcpy(addr + 4, &addr_h16, ETH_ALEN - 4);
+}
+
+int ath11k_dp_service_srng(struct ath11k_base *ab,
+ struct ath11k_ext_irq_grp *irq_grp,
+ int budget);
+int ath11k_dp_htt_connect(struct ath11k_dp *dp);
+void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif);
+void ath11k_dp_free(struct ath11k_base *ab);
+int ath11k_dp_alloc(struct ath11k_base *ab);
+int ath11k_dp_pdev_alloc(struct ath11k_base *ab);
+void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab);
+void ath11k_dp_pdev_free(struct ath11k_base *ab);
+int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type);
+int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr);
+void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr);
+void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring);
+int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
+ enum hal_ring_type type, int ring_num,
+ int mac_id, int num_entries);
+void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab,
+ struct dp_link_desc_bank *desc_bank,
+ u32 ring_type, struct dp_srng *ring);
+int ath11k_dp_link_desc_setup(struct ath11k_base *ab,
+ struct dp_link_desc_bank *link_desc_banks,
+ u32 ring_type, struct hal_srng *srng,
+ u32 n_link_desc);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
new file mode 100644
index 000000000000..b08da839b7d9
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -0,0 +1,4195 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/ieee80211.h>
+#include "core.h"
+#include "debug.h"
+#include "hal_desc.h"
+#include "hw.h"
+#include "dp_rx.h"
+#include "hal_rx.h"
+#include "dp_tx.h"
+#include "peer.h"
+
+static u8 *ath11k_dp_rx_h_80211_hdr(struct hal_rx_desc *desc)
+{
+ return desc->hdr_status;
+}
+
+static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct hal_rx_desc *desc)
+{
+ if (!(__le32_to_cpu(desc->mpdu_start.info1) &
+ RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID))
+ return HAL_ENCRYPT_TYPE_OPEN;
+
+ return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE,
+ __le32_to_cpu(desc->mpdu_start.info2));
+}
+
+static u8 ath11k_dp_rx_h_mpdu_start_decap_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO5_DECAP_TYPE,
+ __le32_to_cpu(desc->mpdu_start.info5));
+}
+
+static bool ath11k_dp_rx_h_attn_msdu_done(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE,
+ __le32_to_cpu(desc->attention.info2));
+}
+
+static bool ath11k_dp_rx_h_attn_first_mpdu(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_ATTENTION_INFO1_FIRST_MPDU,
+ __le32_to_cpu(desc->attention.info1));
+}
+
+static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL,
+ __le32_to_cpu(desc->attention.info1));
+}
+
+static bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL,
+ __le32_to_cpu(desc->attention.info1));
+}
+
+static bool ath11k_dp_rx_h_attn_is_decrypted(struct hal_rx_desc *desc)
+{
+ return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE,
+ __le32_to_cpu(desc->attention.info2)) ==
+ RX_DESC_DECRYPT_STATUS_CODE_OK);
+}
+
+static u32 ath11k_dp_rx_h_attn_mpdu_err(struct hal_rx_desc *desc)
+{
+ u32 info = __le32_to_cpu(desc->attention.info1);
+ u32 errmap = 0;
+
+ if (info & RX_ATTENTION_INFO1_FCS_ERR)
+ errmap |= DP_RX_MPDU_ERR_FCS;
+
+ if (info & RX_ATTENTION_INFO1_DECRYPT_ERR)
+ errmap |= DP_RX_MPDU_ERR_DECRYPT;
+
+ if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR)
+ errmap |= DP_RX_MPDU_ERR_TKIP_MIC;
+
+ if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR)
+ errmap |= DP_RX_MPDU_ERR_AMSDU_ERR;
+
+ if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR)
+ errmap |= DP_RX_MPDU_ERR_OVERFLOW;
+
+ if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR)
+ errmap |= DP_RX_MPDU_ERR_MSDU_LEN;
+
+ if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR)
+ errmap |= DP_RX_MPDU_ERR_MPDU_LEN;
+
+ return errmap;
+}
+
+static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH,
+ __le32_to_cpu(desc->msdu_start.info1));
+}
+
+static u8 ath11k_dp_rx_h_msdu_start_sgi(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_SGI,
+ __le32_to_cpu(desc->msdu_start.info3));
+}
+
+static u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS,
+ __le32_to_cpu(desc->msdu_start.info3));
+}
+
+static u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW,
+ __le32_to_cpu(desc->msdu_start.info3));
+}
+
+static u32 ath11k_dp_rx_h_msdu_start_freq(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->msdu_start.phy_meta_data);
+}
+
+static u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE,
+ __le32_to_cpu(desc->msdu_start.info3));
+}
+
+static u8 ath11k_dp_rx_h_msdu_start_nss(struct hal_rx_desc *desc)
+{
+ u8 mimo_ss_bitmap = FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP,
+ __le32_to_cpu(desc->msdu_start.info3));
+
+ return hweight8(mimo_ss_bitmap);
+}
+
+static u8 ath11k_dp_rx_h_msdu_end_l3pad(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING,
+ __le32_to_cpu(desc->msdu_end.info2));
+}
+
+static bool ath11k_dp_rx_h_msdu_end_first_msdu(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU,
+ __le32_to_cpu(desc->msdu_end.info2));
+}
+
+static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU,
+ __le32_to_cpu(desc->msdu_end.info2));
+}
+
+static void ath11k_dp_rx_desc_end_tlv_copy(struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc)
+{
+ memcpy((u8 *)&fdesc->msdu_end, (u8 *)&ldesc->msdu_end,
+ sizeof(struct rx_msdu_end));
+ memcpy((u8 *)&fdesc->attention, (u8 *)&ldesc->attention,
+ sizeof(struct rx_attention));
+ memcpy((u8 *)&fdesc->mpdu_end, (u8 *)&ldesc->mpdu_end,
+ sizeof(struct rx_mpdu_end));
+}
+
+static u32 ath11k_dp_rxdesc_get_mpdulen_err(struct hal_rx_desc *rx_desc)
+{
+ struct rx_attention *rx_attn;
+
+ rx_attn = &rx_desc->attention;
+
+ return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR,
+ __le32_to_cpu(rx_attn->info1));
+}
+
+static u32 ath11k_dp_rxdesc_get_decap_format(struct hal_rx_desc *rx_desc)
+{
+ struct rx_msdu_start *rx_msdu_start;
+
+ rx_msdu_start = &rx_desc->msdu_start;
+
+ return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
+ __le32_to_cpu(rx_msdu_start->info2));
+}
+
+static u8 *ath11k_dp_rxdesc_get_80211hdr(struct hal_rx_desc *rx_desc)
+{
+ u8 *rx_pkt_hdr;
+
+ rx_pkt_hdr = &rx_desc->msdu_payload[0];
+
+ return rx_pkt_hdr;
+}
+
+static bool ath11k_dp_rxdesc_mpdu_valid(struct hal_rx_desc *rx_desc)
+{
+ u32 tlv_tag;
+
+ tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG,
+ __le32_to_cpu(rx_desc->mpdu_start_tag));
+
+ return tlv_tag == HAL_RX_MPDU_START ? true : false;
+}
+
+static u32 ath11k_dp_rxdesc_get_ppduid(struct hal_rx_desc *rx_desc)
+{
+ return __le16_to_cpu(rx_desc->mpdu_start.phy_ppdu_id);
+}
+
+/* Returns number of Rx buffers replenished */
+int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
+ struct dp_rxdma_ring *rx_ring,
+ int req_entries,
+ enum hal_rx_buf_return_buf_manager mgr,
+ gfp_t gfp)
+{
+ struct hal_srng *srng;
+ u32 *desc;
+ struct sk_buff *skb;
+ int num_free;
+ int num_remain;
+ int buf_id;
+ u32 cookie;
+ dma_addr_t paddr;
+
+ req_entries = min(req_entries, rx_ring->bufs_max);
+
+ srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
+ if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
+ req_entries = num_free;
+
+ req_entries = min(num_free, req_entries);
+ num_remain = req_entries;
+
+ while (num_remain > 0) {
+ skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
+ DP_RX_BUFFER_ALIGN_SIZE);
+ if (!skb)
+ break;
+
+ if (!IS_ALIGNED((unsigned long)skb->data,
+ DP_RX_BUFFER_ALIGN_SIZE)) {
+ skb_pull(skb,
+ PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
+ skb->data);
+ }
+
+ paddr = dma_map_single(ab->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ab->dev, paddr))
+ goto fail_free_skb;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
+ rx_ring->bufs_max * 3, gfp);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ if (buf_id < 0)
+ goto fail_dma_unmap;
+
+ desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc)
+ goto fail_idr_remove;
+
+ ATH11K_SKB_RXCB(skb)->paddr = paddr;
+
+ cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
+ FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
+
+ num_remain--;
+
+ ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
+ }
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return req_entries - num_remain;
+
+fail_idr_remove:
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+fail_dma_unmap:
+ dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+fail_free_skb:
+ dev_kfree_skb_any(skb);
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return req_entries - num_remain;
+}
+
+static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar,
+ struct dp_rxdma_ring *rx_ring)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct sk_buff *skb;
+ int buf_id;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ /* TODO: Understand where internal driver does this dma_unmap of
+ * of rxdma_buffer.
+ */
+ dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+
+ idr_destroy(&rx_ring->bufs_idr);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ rx_ring = &dp->rx_mon_status_refill_ring;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ /* XXX: Understand where internal driver does this dma_unmap of
+ * of rxdma_buffer.
+ */
+ dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb), DMA_BIDIRECTIONAL);
+ dev_kfree_skb_any(skb);
+ }
+
+ idr_destroy(&rx_ring->bufs_idr);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ return 0;
+}
+
+static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+
+ ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
+
+ rx_ring = &dp->rxdma_mon_buf_ring;
+ ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
+
+ rx_ring = &dp->rx_mon_status_refill_ring;
+ ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
+ return 0;
+}
+
+static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar,
+ struct dp_rxdma_ring *rx_ring,
+ u32 ringtype)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ int num_entries;
+
+ num_entries = rx_ring->refill_buf_ring.size /
+ ath11k_hal_srng_get_entrysize(ringtype);
+
+ rx_ring->bufs_max = num_entries;
+ ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries,
+ HAL_RX_BUF_RBM_SW3_BM, GFP_KERNEL);
+ return 0;
+}
+
+static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+
+ ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF);
+
+ rx_ring = &dp->rxdma_mon_buf_ring;
+ ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF);
+
+ rx_ring = &dp->rx_mon_status_refill_ring;
+ ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS);
+
+ return 0;
+}
+
+static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+
+ ath11k_dp_srng_cleanup(ar->ab, &dp->rx_refill_buf_ring.refill_buf_ring);
+ ath11k_dp_srng_cleanup(ar->ab, &dp->rxdma_err_dst_ring);
+ ath11k_dp_srng_cleanup(ar->ab, &dp->rx_mon_status_refill_ring.refill_buf_ring);
+ ath11k_dp_srng_cleanup(ar->ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
+}
+
+void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab)
+{
+ struct ath11k_pdev_dp *dp;
+ struct ath11k *ar;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ dp = &ar->dp;
+ ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring);
+ }
+}
+
+int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev_dp *dp;
+ int ret;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ dp = &ar->dp;
+ ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring, HAL_REO_DST,
+ dp->mac_id, dp->mac_id,
+ DP_REO_DST_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup reo_dst_ring\n");
+ goto err_reo_cleanup;
+ }
+ }
+
+ return 0;
+
+err_reo_cleanup:
+ ath11k_dp_pdev_reo_cleanup(ab);
+
+ return ret;
+}
+
+static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct dp_srng *srng = NULL;
+ int ret;
+
+ ret = ath11k_dp_srng_setup(ar->ab,
+ &dp->rx_refill_buf_ring.refill_buf_ring,
+ HAL_RXDMA_BUF, 0,
+ dp->mac_id, DP_RXDMA_BUF_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n");
+ return ret;
+ }
+
+ ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring,
+ HAL_RXDMA_DST, 0, dp->mac_id,
+ DP_RXDMA_ERR_DST_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring\n");
+ return ret;
+ }
+
+ srng = &dp->rx_mon_status_refill_ring.refill_buf_ring;
+ ret = ath11k_dp_srng_setup(ar->ab,
+ srng,
+ HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id,
+ DP_RXDMA_MON_STATUS_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to setup rx_mon_status_refill_ring\n");
+ return ret;
+ }
+ ret = ath11k_dp_srng_setup(ar->ab,
+ &dp->rxdma_mon_buf_ring.refill_buf_ring,
+ HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id,
+ DP_RXDMA_MONITOR_BUF_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to setup HAL_RXDMA_MONITOR_BUF\n");
+ return ret;
+ }
+
+ ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring,
+ HAL_RXDMA_MONITOR_DST, 0, dp->mac_id,
+ DP_RXDMA_MONITOR_DST_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to setup HAL_RXDMA_MONITOR_DST\n");
+ return ret;
+ }
+
+ ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring,
+ HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id,
+ DP_RXDMA_MONITOR_DESC_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to setup HAL_RXDMA_MONITOR_DESC\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct dp_reo_cmd *cmd, *tmp;
+ struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache;
+
+ spin_lock_bh(&dp->reo_cmd_lock);
+ list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
+ list_del(&cmd->list);
+ dma_unmap_single(ab->dev, cmd->data.paddr,
+ cmd->data.size, DMA_BIDIRECTIONAL);
+ kfree(cmd->data.vaddr);
+ kfree(cmd);
+ }
+
+ list_for_each_entry_safe(cmd_cache, tmp_cache,
+ &dp->reo_cmd_cache_flush_list, list) {
+ list_del(&cmd_cache->list);
+ dma_unmap_single(ab->dev, cmd_cache->data.paddr,
+ cmd_cache->data.size, DMA_BIDIRECTIONAL);
+ kfree(cmd_cache->data.vaddr);
+ kfree(cmd_cache);
+ }
+ spin_unlock_bh(&dp->reo_cmd_lock);
+}
+
+static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx,
+ enum hal_reo_cmd_status status)
+{
+ struct dp_rx_tid *rx_tid = ctx;
+
+ if (status != HAL_REO_CMD_SUCCESS)
+ ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
+ rx_tid->tid, status);
+
+ dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
+ DMA_BIDIRECTIONAL);
+ kfree(rx_tid->vaddr);
+}
+
+static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab,
+ struct dp_rx_tid *rx_tid)
+{
+ struct ath11k_hal_reo_cmd cmd = {0};
+ unsigned long tot_desc_sz, desc_sz;
+ int ret;
+
+ tot_desc_sz = rx_tid->size;
+ desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
+
+ while (tot_desc_sz > desc_sz) {
+ tot_desc_sz -= desc_sz;
+ cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
+ cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
+ HAL_REO_CMD_FLUSH_CACHE, &cmd,
+ NULL);
+ if (ret)
+ ath11k_warn(ab,
+ "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
+ rx_tid->tid, ret);
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.addr_lo = lower_32_bits(rx_tid->paddr);
+ cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
+ ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
+ HAL_REO_CMD_FLUSH_CACHE,
+ &cmd, ath11k_dp_reo_cmd_free);
+ if (ret) {
+ ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
+ rx_tid->tid, ret);
+ dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
+ DMA_BIDIRECTIONAL);
+ kfree(rx_tid->vaddr);
+ }
+}
+
+static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
+ enum hal_reo_cmd_status status)
+{
+ struct ath11k_base *ab = dp->ab;
+ struct dp_rx_tid *rx_tid = ctx;
+ struct dp_reo_cache_flush_elem *elem, *tmp;
+
+ if (status == HAL_REO_CMD_DRAIN) {
+ goto free_desc;
+ } else if (status != HAL_REO_CMD_SUCCESS) {
+ /* Shouldn't happen! Cleanup in case of other failure? */
+ ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",
+ rx_tid->tid, status);
+ return;
+ }
+
+ elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
+ if (!elem)
+ goto free_desc;
+
+ elem->ts = jiffies;
+ memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
+
+ spin_lock_bh(&dp->reo_cmd_lock);
+ list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
+ spin_unlock_bh(&dp->reo_cmd_lock);
+
+ /* Flush and invalidate aged REO desc from HW cache */
+ spin_lock_bh(&dp->reo_cmd_lock);
+ list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
+ list) {
+ if (time_after(jiffies, elem->ts +
+ msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) {
+ list_del(&elem->list);
+ spin_unlock_bh(&dp->reo_cmd_lock);
+
+ ath11k_dp_reo_cache_flush(ab, &elem->data);
+ kfree(elem);
+ spin_lock_bh(&dp->reo_cmd_lock);
+ }
+ }
+ spin_unlock_bh(&dp->reo_cmd_lock);
+
+ return;
+free_desc:
+ dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
+ DMA_BIDIRECTIONAL);
+ kfree(rx_tid->vaddr);
+}
+
+static void ath11k_peer_rx_tid_delete(struct ath11k *ar,
+ struct ath11k_peer *peer, u8 tid)
+{
+ struct ath11k_hal_reo_cmd cmd = {0};
+ struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
+ int ret;
+
+ if (!rx_tid->active)
+ return;
+
+ cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
+ cmd.addr_lo = lower_32_bits(rx_tid->paddr);
+ cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
+ ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
+ HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
+ ath11k_dp_rx_tid_del_func);
+ if (ret) {
+ ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
+ tid, ret);
+ dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
+ DMA_BIDIRECTIONAL);
+ kfree(rx_tid->vaddr);
+ }
+
+ rx_tid->active = false;
+}
+
+void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer)
+{
+ int i;
+
+ for (i = 0; i <= IEEE80211_NUM_TIDS; i++)
+ ath11k_peer_rx_tid_delete(ar, peer, i);
+}
+
+static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar,
+ struct ath11k_peer *peer,
+ struct dp_rx_tid *rx_tid,
+ u32 ba_win_sz, u16 ssn,
+ bool update_ssn)
+{
+ struct ath11k_hal_reo_cmd cmd = {0};
+ int ret;
+
+ cmd.addr_lo = lower_32_bits(rx_tid->paddr);
+ cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
+ cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
+ cmd.ba_window_size = ba_win_sz;
+
+ if (update_ssn) {
+ cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
+ cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn);
+ }
+
+ ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
+ HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
+ NULL);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
+ rx_tid->tid, ret);
+ return ret;
+ }
+
+ rx_tid->ba_win_sz = ba_win_sz;
+
+ return 0;
+}
+
+static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab,
+ const u8 *peer_mac, int vdev_id, u8 tid)
+{
+ struct ath11k_peer *peer;
+ struct dp_rx_tid *rx_tid;
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find(ab, vdev_id, peer_mac);
+ if (!peer) {
+ ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n");
+ goto unlock_exit;
+ }
+
+ rx_tid = &peer->rx_tid[tid];
+ if (!rx_tid->active)
+ goto unlock_exit;
+
+ dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
+ DMA_BIDIRECTIONAL);
+ kfree(rx_tid->vaddr);
+
+ rx_tid->active = false;
+
+unlock_exit:
+ spin_unlock_bh(&ab->base_lock);
+}
+
+int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
+ u8 tid, u32 ba_win_sz, u16 ssn)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_peer *peer;
+ struct dp_rx_tid *rx_tid;
+ u32 hw_desc_sz;
+ u32 *addr_aligned;
+ void *vaddr;
+ dma_addr_t paddr;
+ int ret;
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find(ab, vdev_id, peer_mac);
+ if (!peer) {
+ ath11k_warn(ab, "failed to find the peer to set up rx tid\n");
+ spin_unlock_bh(&ab->base_lock);
+ return -ENOENT;
+ }
+
+ rx_tid = &peer->rx_tid[tid];
+ /* Update the tid queue if it is already setup */
+ if (rx_tid->active) {
+ paddr = rx_tid->paddr;
+ ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid,
+ ba_win_sz, ssn, true);
+ spin_unlock_bh(&ab->base_lock);
+ if (ret) {
+ ath11k_warn(ab, "failed to update reo for rx tid %d\n", tid);
+ return ret;
+ }
+
+ ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
+ peer_mac, paddr,
+ tid, 1, ba_win_sz);
+ if (ret)
+ ath11k_warn(ab, "failed to send wmi command to update rx reorder queue, tid :%d (%d)\n",
+ tid, ret);
+ return ret;
+ }
+
+ rx_tid->tid = tid;
+
+ rx_tid->ba_win_sz = ba_win_sz;
+
+ /* TODO: Optimize the memory allocation for qos tid based on the
+ * the actual BA window size in REO tid update path.
+ */
+ if (tid == HAL_DESC_REO_NON_QOS_TID)
+ hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid);
+ else
+ hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
+
+ vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_KERNEL);
+ if (!vaddr) {
+ spin_unlock_bh(&ab->base_lock);
+ return -ENOMEM;
+ }
+
+ addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
+
+ ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz, ssn);
+
+ paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
+ DMA_BIDIRECTIONAL);
+
+ ret = dma_mapping_error(ab->dev, paddr);
+ if (ret) {
+ spin_unlock_bh(&ab->base_lock);
+ goto err_mem_free;
+ }
+
+ rx_tid->vaddr = vaddr;
+ rx_tid->paddr = paddr;
+ rx_tid->size = hw_desc_sz;
+ rx_tid->active = true;
+
+ spin_unlock_bh(&ab->base_lock);
+
+ ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
+ paddr, tid, 1, ba_win_sz);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup rx reorder queue, tid :%d (%d)\n",
+ tid, ret);
+ ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid);
+ }
+
+ return ret;
+
+err_mem_free:
+ kfree(vaddr);
+
+ return ret;
+}
+
+int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
+ struct ieee80211_ampdu_params *params)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_sta *arsta = (void *)params->sta->drv_priv;
+ int vdev_id = arsta->arvif->vdev_id;
+ int ret;
+
+ ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id,
+ params->tid, params->buf_size,
+ params->ssn);
+ if (ret)
+ ath11k_warn(ab, "failed to setup rx tid %d\n", ret);
+
+ return ret;
+}
+
+int ath11k_dp_rx_ampdu_stop(struct ath11k *ar,
+ struct ieee80211_ampdu_params *params)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_peer *peer;
+ struct ath11k_sta *arsta = (void *)params->sta->drv_priv;
+ int vdev_id = arsta->arvif->vdev_id;
+ dma_addr_t paddr;
+ bool active;
+ int ret;
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find(ab, vdev_id, params->sta->addr);
+ if (!peer) {
+ ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n");
+ spin_unlock_bh(&ab->base_lock);
+ return -ENOENT;
+ }
+
+ paddr = peer->rx_tid[params->tid].paddr;
+ active = peer->rx_tid[params->tid].active;
+
+ if (!active) {
+ spin_unlock_bh(&ab->base_lock);
+ return 0;
+ }
+
+ ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);
+ spin_unlock_bh(&ab->base_lock);
+ if (ret) {
+ ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n",
+ params->tid, ret);
+ return ret;
+ }
+
+ ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
+ params->sta->addr, paddr,
+ params->tid, 1, 1);
+ if (ret)
+ ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n",
+ ret);
+
+ return ret;
+}
+
+static int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
+ u16 peer_id)
+{
+ int i;
+
+ for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
+ if (ppdu_stats->user_stats[i].is_valid_peer_id) {
+ if (peer_id == ppdu_stats->user_stats[i].peer_id)
+ return i;
+ } else {
+ return i;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab,
+ u16 tag, u16 len, const void *ptr,
+ void *data)
+{
+ struct htt_ppdu_stats_info *ppdu_info;
+ struct htt_ppdu_user_stats *user_stats;
+ int cur_user;
+ u16 peer_id;
+
+ ppdu_info = (struct htt_ppdu_stats_info *)data;
+
+ switch (tag) {
+ case HTT_PPDU_STATS_TAG_COMMON:
+ if (len < sizeof(struct htt_ppdu_stats_common)) {
+ ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+ memcpy((void *)&ppdu_info->ppdu_stats.common, ptr,
+ sizeof(struct htt_ppdu_stats_common));
+ break;
+ case HTT_PPDU_STATS_TAG_USR_RATE:
+ if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
+ ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+
+ peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id;
+ cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
+ peer_id);
+ if (cur_user < 0)
+ return -EINVAL;
+ user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
+ user_stats->peer_id = peer_id;
+ user_stats->is_valid_peer_id = true;
+ memcpy((void *)&user_stats->rate, ptr,
+ sizeof(struct htt_ppdu_stats_user_rate));
+ user_stats->tlv_flags |= BIT(tag);
+ break;
+ case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
+ if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
+ ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+
+ peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id;
+ cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
+ peer_id);
+ if (cur_user < 0)
+ return -EINVAL;
+ user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
+ user_stats->peer_id = peer_id;
+ user_stats->is_valid_peer_id = true;
+ memcpy((void *)&user_stats->cmpltn_cmn, ptr,
+ sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
+ user_stats->tlv_flags |= BIT(tag);
+ break;
+ case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
+ if (len <
+ sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
+ ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+
+ peer_id =
+ ((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id;
+ cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
+ peer_id);
+ if (cur_user < 0)
+ return -EINVAL;
+ user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
+ user_stats->peer_id = peer_id;
+ user_stats->is_valid_peer_id = true;
+ memcpy((void *)&user_stats->ack_ba, ptr,
+ sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
+ user_stats->tlv_flags |= BIT(tag);
+ break;
+ }
+ return 0;
+}
+
+int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
+ int (*iter)(struct ath11k_base *ar, u16 tag, u16 len,
+ const void *ptr, void *data),
+ void *data)
+{
+ const struct htt_tlv *tlv;
+ const void *begin = ptr;
+ u16 tlv_tag, tlv_len;
+ int ret = -EINVAL;
+
+ while (len > 0) {
+ if (len < sizeof(*tlv)) {
+ ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
+ ptr - begin, len, sizeof(*tlv));
+ return -EINVAL;
+ }
+ tlv = (struct htt_tlv *)ptr;
+ tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header);
+ tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header);
+ ptr += sizeof(*tlv);
+ len -= sizeof(*tlv);
+
+ if (tlv_len > len) {
+ ath11k_err(ab, "htt tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n",
+ tlv_tag, ptr - begin, len, tlv_len);
+ return -EINVAL;
+ }
+ ret = iter(ab, tlv_tag, tlv_len, ptr, data);
+ if (ret == -ENOMEM)
+ return ret;
+
+ ptr += tlv_len;
+ len -= tlv_len;
+ }
+ return 0;
+}
+
+static u32 ath11k_bw_to_mac80211_bwflags(u8 bw)
+{
+ u32 bwflags = 0;
+
+ switch (bw) {
+ case ATH11K_BW_40:
+ bwflags = IEEE80211_TX_RC_40_MHZ_WIDTH;
+ break;
+ case ATH11K_BW_80:
+ bwflags = IEEE80211_TX_RC_80_MHZ_WIDTH;
+ break;
+ case ATH11K_BW_160:
+ bwflags = IEEE80211_TX_RC_160_MHZ_WIDTH;
+ break;
+ }
+
+ return bwflags;
+}
+
+static void
+ath11k_update_per_peer_tx_stats(struct ath11k *ar,
+ struct htt_ppdu_stats *ppdu_stats, u8 user)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_peer *peer;
+ struct ieee80211_sta *sta;
+ struct ath11k_sta *arsta;
+ struct htt_ppdu_stats_user_rate *user_rate;
+ struct ieee80211_chanctx_conf *conf = NULL;
+ struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
+ struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
+ struct htt_ppdu_stats_common *common = &ppdu_stats->common;
+ int ret;
+ u8 flags, mcs, nss, bw, sgi, rate_idx = 0;
+ u32 succ_bytes = 0;
+ u16 rate = 0, succ_pkts = 0;
+ u32 tx_duration = 0;
+ u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
+ bool is_ampdu = false;
+
+ if (!usr_stats)
+ return;
+
+ if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
+ return;
+
+ if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))
+ is_ampdu =
+ HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
+
+ if (usr_stats->tlv_flags &
+ BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
+ succ_bytes = usr_stats->ack_ba.success_bytes;
+ succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M,
+ usr_stats->ack_ba.info);
+ tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM,
+ usr_stats->ack_ba.info);
+ }
+
+ if (common->fes_duration_us)
+ tx_duration = common->fes_duration_us;
+
+ user_rate = &usr_stats->rate;
+ flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
+ bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
+ nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
+ mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
+ sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
+
+ /* Note: If host configured fixed rates and in some other special
+ * cases, the broadcast/management frames are sent in different rates.
+ * Firmware rate's control to be skipped for this?
+ */
+
+ if (flags == WMI_RATE_PREAMBLE_VHT && mcs > 9) {
+ ath11k_warn(ab, "Invalid VHT mcs %hhd peer stats", mcs);
+ return;
+ }
+
+ if (flags == WMI_RATE_PREAMBLE_HT && (mcs > 7 || nss < 1)) {
+ ath11k_warn(ab, "Invalid HT mcs %hhd nss %hhd peer stats",
+ mcs, nss);
+ return;
+ }
+
+ if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
+ ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs,
+ flags,
+ &rate_idx,
+ &rate);
+ if (ret < 0)
+ return;
+ }
+
+ rcu_read_lock();
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id);
+
+ if (!peer || !peer->sta) {
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+ return;
+ }
+
+ sta = peer->sta;
+ arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ memset(&arsta->txrate, 0, sizeof(arsta->txrate));
+ memset(&arsta->tx_info.status, 0, sizeof(arsta->tx_info.status));
+
+ switch (flags) {
+ case WMI_RATE_PREAMBLE_OFDM:
+ arsta->txrate.legacy = rate;
+ if (arsta->arvif && arsta->arvif->vif)
+ conf = rcu_dereference(arsta->arvif->vif->chanctx_conf);
+ if (conf && conf->def.chan->band == NL80211_BAND_5GHZ)
+ arsta->tx_info.status.rates[0].idx = rate_idx - 4;
+ break;
+ case WMI_RATE_PREAMBLE_CCK:
+ arsta->txrate.legacy = rate;
+ arsta->tx_info.status.rates[0].idx = rate_idx;
+ if (mcs > ATH11K_HW_RATE_CCK_LP_1M &&
+ mcs <= ATH11K_HW_RATE_CCK_SP_2M)
+ arsta->tx_info.status.rates[0].flags |=
+ IEEE80211_TX_RC_USE_SHORT_PREAMBLE;
+ break;
+ case WMI_RATE_PREAMBLE_HT:
+ arsta->txrate.mcs = mcs + 8 * (nss - 1);
+ arsta->tx_info.status.rates[0].idx = arsta->txrate.mcs;
+ arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
+ arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_MCS;
+ if (sgi) {
+ arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ arsta->tx_info.status.rates[0].flags |=
+ IEEE80211_TX_RC_SHORT_GI;
+ }
+ break;
+ case WMI_RATE_PREAMBLE_VHT:
+ arsta->txrate.mcs = mcs;
+ ieee80211_rate_set_vht(&arsta->tx_info.status.rates[0], mcs, nss);
+ arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
+ arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_VHT_MCS;
+ if (sgi) {
+ arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ arsta->tx_info.status.rates[0].flags |=
+ IEEE80211_TX_RC_SHORT_GI;
+ }
+ break;
+ }
+
+ arsta->txrate.nss = nss;
+ arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
+ arsta->tx_info.status.rates[0].flags |= ath11k_bw_to_mac80211_bwflags(bw);
+ arsta->tx_duration += tx_duration;
+ memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
+
+ if (succ_pkts) {
+ arsta->tx_info.flags = IEEE80211_TX_STAT_ACK;
+ arsta->tx_info.status.rates[0].count = 1;
+ ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info);
+ }
+
+ /* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
+ * So skip peer stats update for mgmt packets.
+ */
+ if (tid < HTT_PPDU_STATS_NON_QOS_TID) {
+ memset(peer_stats, 0, sizeof(*peer_stats));
+ peer_stats->succ_pkts = succ_pkts;
+ peer_stats->succ_bytes = succ_bytes;
+ peer_stats->is_ampdu = is_ampdu;
+ peer_stats->duration = tx_duration;
+ peer_stats->ba_fails =
+ HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
+ HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
+
+ if (ath11k_debug_is_extd_tx_stats_enabled(ar))
+ ath11k_accumulate_per_peer_tx_stats(arsta,
+ peer_stats, rate_idx);
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+}
+
+static void ath11k_htt_update_ppdu_stats(struct ath11k *ar,
+ struct htt_ppdu_stats *ppdu_stats)
+{
+ u8 user;
+
+ for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
+ ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user);
+}
+
+static
+struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar,
+ u32 ppdu_id)
+{
+ struct htt_ppdu_stats_info *ppdu_info;
+
+ spin_lock_bh(&ar->data_lock);
+ if (!list_empty(&ar->ppdu_stats_info)) {
+ list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
+ if (ppdu_info->ppdu_id == ppdu_id) {
+ spin_unlock_bh(&ar->data_lock);
+ return ppdu_info;
+ }
+ }
+
+ if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
+ ppdu_info = list_first_entry(&ar->ppdu_stats_info,
+ typeof(*ppdu_info), list);
+ list_del(&ppdu_info->list);
+ ar->ppdu_stat_list_depth--;
+ ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);
+ kfree(ppdu_info);
+ }
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_KERNEL);
+ if (!ppdu_info)
+ return NULL;
+
+ spin_lock_bh(&ar->data_lock);
+ list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
+ ar->ppdu_stat_list_depth++;
+ spin_unlock_bh(&ar->data_lock);
+
+ return ppdu_info;
+}
+
+static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath11k_htt_ppdu_stats_msg *msg;
+ struct htt_ppdu_stats_info *ppdu_info;
+ struct ath11k *ar;
+ int ret;
+ u8 pdev_id;
+ u32 ppdu_id, len;
+
+ msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data;
+ len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info);
+ pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info);
+ ppdu_id = msg->ppdu_id;
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
+ if (!ar) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (ath11k_debug_is_pktlog_lite_mode_enabled(ar))
+ trace_ath11k_htt_ppdu_stats(ar, skb->data, len);
+
+ ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id);
+ if (!ppdu_info) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ppdu_info->ppdu_id = ppdu_id;
+ ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len,
+ ath11k_htt_tlv_ppdu_stats_parse,
+ (void *)ppdu_info);
+ if (ret) {
+ ath11k_warn(ab, "Failed to parse tlv %d\n", ret);
+ goto exit;
+ }
+
+exit:
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data;
+ struct ath11k *ar;
+ u32 len;
+ u8 pdev_id;
+
+ len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, data->hdr);
+ if (len > ATH11K_HTT_PKTLOG_MAX_SIZE) {
+ ath11k_warn(ab, "htt pktlog buffer size %d, expected < %d\n",
+ len,
+ ATH11K_HTT_PKTLOG_MAX_SIZE);
+ return;
+ }
+
+ pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr);
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id);
+ return;
+ }
+
+ trace_ath11k_htt_pktlog(ar, data->payload, len);
+}
+
+void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
+ enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp);
+ u16 peer_id;
+ u8 vdev_id;
+ u8 mac_addr[ETH_ALEN];
+ u16 peer_mac_h16;
+ u16 ast_hash;
+
+ ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
+
+ switch (type) {
+ case HTT_T2H_MSG_TYPE_VERSION_CONF:
+ dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR,
+ resp->version_msg.version);
+ dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR,
+ resp->version_msg.version);
+ complete(&dp->htt_tgt_version_received);
+ break;
+ case HTT_T2H_MSG_TYPE_PEER_MAP:
+ vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
+ resp->peer_map_ev.info);
+ peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
+ resp->peer_map_ev.info);
+ peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
+ resp->peer_map_ev.info1);
+ ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
+ peer_mac_h16, mac_addr);
+ ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL,
+ resp->peer_map_ev.info2);
+ ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash);
+ break;
+ case HTT_T2H_MSG_TYPE_PEER_UNMAP:
+ peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID,
+ resp->peer_unmap_ev.info);
+ ath11k_peer_unmap_event(ab, peer_id);
+ break;
+ case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
+ ath11k_htt_pull_ppdu_stats(ab, skb);
+ break;
+ case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
+ ath11k_dbg_htt_ext_stats_handler(ab, skb);
+ break;
+ case HTT_T2H_MSG_TYPE_PKTLOG:
+ ath11k_htt_pktlog(ab, skb);
+ break;
+ default:
+ ath11k_warn(ab, "htt event %d not handled\n", type);
+ break;
+ }
+
+ dev_kfree_skb_any(skb);
+}
+
+static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar,
+ struct sk_buff_head *msdu_list,
+ struct sk_buff *first, struct sk_buff *last,
+ u8 l3pad_bytes, int msdu_len)
+{
+ struct sk_buff *skb;
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
+ int buf_first_hdr_len, buf_first_len;
+ struct hal_rx_desc *ldesc;
+ int space_extra;
+ int rem_len;
+ int buf_len;
+
+ /* As the msdu is spread across multiple rx buffers,
+ * find the offset to the start of msdu for computing
+ * the length of the msdu in the first buffer.
+ */
+ buf_first_hdr_len = HAL_RX_DESC_SIZE + l3pad_bytes;
+ buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
+
+ if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {
+ skb_put(first, buf_first_hdr_len + msdu_len);
+ skb_pull(first, buf_first_hdr_len);
+ return 0;
+ }
+
+ ldesc = (struct hal_rx_desc *)last->data;
+ rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ldesc);
+ rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ldesc);
+
+ /* MSDU spans over multiple buffers because the length of the MSDU
+ * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
+ * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
+ */
+ skb_put(first, DP_RX_BUFFER_SIZE);
+ skb_pull(first, buf_first_hdr_len);
+
+ /* When an MSDU spread over multiple buffers attention, MSDU_END and
+ * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs.
+ */
+ ath11k_dp_rx_desc_end_tlv_copy(rxcb->rx_desc, ldesc);
+
+ space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
+ if (space_extra > 0 &&
+ (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {
+ /* Free up all buffers of the MSDU */
+ while ((skb = __skb_dequeue(msdu_list)) != NULL) {
+ rxcb = ATH11K_SKB_RXCB(skb);
+ if (!rxcb->is_continuation) {
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ dev_kfree_skb_any(skb);
+ }
+ return -ENOMEM;
+ }
+
+ rem_len = msdu_len - buf_first_len;
+ while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
+ rxcb = ATH11K_SKB_RXCB(skb);
+ if (rxcb->is_continuation)
+ buf_len = DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE;
+ else
+ buf_len = rem_len;
+
+ if (buf_len > (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE)) {
+ WARN_ON_ONCE(1);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+
+ skb_put(skb, buf_len + HAL_RX_DESC_SIZE);
+ skb_pull(skb, HAL_RX_DESC_SIZE);
+ skb_copy_from_linear_data(skb, skb_put(first, buf_len),
+ buf_len);
+ dev_kfree_skb_any(skb);
+
+ rem_len -= buf_len;
+ if (!rxcb->is_continuation)
+ break;
+ }
+
+ return 0;
+}
+
+static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
+ struct sk_buff *first)
+{
+ struct sk_buff *skb;
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
+
+ if (!rxcb->is_continuation)
+ return first;
+
+ skb_queue_walk(msdu_list, skb) {
+ rxcb = ATH11K_SKB_RXCB(skb);
+ if (!rxcb->is_continuation)
+ return skb;
+ }
+
+ return NULL;
+}
+
+static int ath11k_dp_rx_retrieve_amsdu(struct ath11k *ar,
+ struct sk_buff_head *msdu_list,
+ struct sk_buff_head *amsdu_list)
+{
+ struct sk_buff *msdu = skb_peek(msdu_list);
+ struct sk_buff *last_buf;
+ struct ath11k_skb_rxcb *rxcb;
+ struct ieee80211_hdr *hdr;
+ struct hal_rx_desc *rx_desc, *lrx_desc;
+ u16 msdu_len;
+ u8 l3_pad_bytes;
+ u8 *hdr_status;
+ int ret;
+
+ if (!msdu)
+ return -ENOENT;
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+ hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc);
+ hdr = (struct ieee80211_hdr *)hdr_status;
+ /* Process only data frames */
+ if (!ieee80211_is_data(hdr->frame_control)) {
+ __skb_unlink(msdu, msdu_list);
+ dev_kfree_skb_any(msdu);
+ return -EINVAL;
+ }
+
+ do {
+ __skb_unlink(msdu, msdu_list);
+ last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
+ if (!last_buf) {
+ ath11k_warn(ar->ab,
+ "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n");
+ ret = -EIO;
+ goto free_out;
+ }
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+ lrx_desc = (struct hal_rx_desc *)last_buf->data;
+
+ if (!ath11k_dp_rx_h_attn_msdu_done(lrx_desc)) {
+ ath11k_warn(ar->ab, "msdu_done bit in attention is not set\n");
+ ret = -EIO;
+ goto free_out;
+ }
+
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ rxcb->rx_desc = rx_desc;
+ msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc);
+ l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(lrx_desc);
+
+ if (!rxcb->is_continuation) {
+ skb_put(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes + msdu_len);
+ skb_pull(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes);
+ } else {
+ ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list,
+ msdu, last_buf,
+ l3_pad_bytes, msdu_len);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to coalesce msdu rx buffer%d\n", ret);
+ goto free_out;
+ }
+ }
+ __skb_queue_tail(amsdu_list, msdu);
+
+ /* Should we also consider msdu_cnt from mpdu_meta while
+ * preparing amsdu list?
+ */
+ if (rxcb->is_last_msdu)
+ break;
+ } while ((msdu = skb_peek(msdu_list)) != NULL);
+
+ return 0;
+
+free_out:
+ dev_kfree_skb_any(msdu);
+ __skb_queue_purge(amsdu_list);
+
+ return ret;
+}
+
+static void ath11k_dp_rx_h_csum_offload(struct sk_buff *msdu)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ bool ip_csum_fail, l4_csum_fail;
+
+ ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rxcb->rx_desc);
+ l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rxcb->rx_desc);
+
+ msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
+ CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
+}
+
+static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar,
+ enum hal_encrypt_type enctype)
+{
+ switch (enctype) {
+ case HAL_ENCRYPT_TYPE_OPEN:
+ case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
+ case HAL_ENCRYPT_TYPE_TKIP_MIC:
+ return 0;
+ case HAL_ENCRYPT_TYPE_CCMP_128:
+ return IEEE80211_CCMP_MIC_LEN;
+ case HAL_ENCRYPT_TYPE_CCMP_256:
+ return IEEE80211_CCMP_256_MIC_LEN;
+ case HAL_ENCRYPT_TYPE_GCMP_128:
+ case HAL_ENCRYPT_TYPE_AES_GCMP_256:
+ return IEEE80211_GCMP_MIC_LEN;
+ case HAL_ENCRYPT_TYPE_WEP_40:
+ case HAL_ENCRYPT_TYPE_WEP_104:
+ case HAL_ENCRYPT_TYPE_WEP_128:
+ case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
+ case HAL_ENCRYPT_TYPE_WAPI:
+ break;
+ }
+
+ ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype);
+ return 0;
+}
+
+static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar,
+ enum hal_encrypt_type enctype)
+{
+ switch (enctype) {
+ case HAL_ENCRYPT_TYPE_OPEN:
+ return 0;
+ case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
+ case HAL_ENCRYPT_TYPE_TKIP_MIC:
+ return IEEE80211_TKIP_IV_LEN;
+ case HAL_ENCRYPT_TYPE_CCMP_128:
+ return IEEE80211_CCMP_HDR_LEN;
+ case HAL_ENCRYPT_TYPE_CCMP_256:
+ return IEEE80211_CCMP_256_HDR_LEN;
+ case HAL_ENCRYPT_TYPE_GCMP_128:
+ case HAL_ENCRYPT_TYPE_AES_GCMP_256:
+ return IEEE80211_GCMP_HDR_LEN;
+ case HAL_ENCRYPT_TYPE_WEP_40:
+ case HAL_ENCRYPT_TYPE_WEP_104:
+ case HAL_ENCRYPT_TYPE_WEP_128:
+ case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
+ case HAL_ENCRYPT_TYPE_WAPI:
+ break;
+ }
+
+ ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
+ return 0;
+}
+
+static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar,
+ enum hal_encrypt_type enctype)
+{
+ switch (enctype) {
+ case HAL_ENCRYPT_TYPE_OPEN:
+ case HAL_ENCRYPT_TYPE_CCMP_128:
+ case HAL_ENCRYPT_TYPE_CCMP_256:
+ case HAL_ENCRYPT_TYPE_GCMP_128:
+ case HAL_ENCRYPT_TYPE_AES_GCMP_256:
+ return 0;
+ case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
+ case HAL_ENCRYPT_TYPE_TKIP_MIC:
+ return IEEE80211_TKIP_ICV_LEN;
+ case HAL_ENCRYPT_TYPE_WEP_40:
+ case HAL_ENCRYPT_TYPE_WEP_104:
+ case HAL_ENCRYPT_TYPE_WEP_128:
+ case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
+ case HAL_ENCRYPT_TYPE_WAPI:
+ break;
+ }
+
+ ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
+ return 0;
+}
+
+static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar,
+ struct sk_buff *msdu,
+ u8 *first_hdr,
+ enum hal_encrypt_type enctype,
+ struct ieee80211_rx_status *status)
+{
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len;
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
+
+ /* pull decapped header and copy SA & DA */
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ ether_addr_copy(da, ieee80211_get_DA(hdr));
+ ether_addr_copy(sa, ieee80211_get_SA(hdr));
+ skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control));
+
+ /* push original 802.11 header */
+ hdr = (struct ieee80211_hdr *)first_hdr;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+ memcpy(skb_push(msdu,
+ ath11k_dp_rx_crypto_param_len(ar, enctype)),
+ (void *)hdr + hdr_len,
+ ath11k_dp_rx_crypto_param_len(ar, enctype));
+ }
+
+ memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
+
+ /* original 802.11 header has a different DA and in
+ * case of 4addr it may also have different SA
+ */
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ ether_addr_copy(ieee80211_get_DA(hdr), da);
+ ether_addr_copy(ieee80211_get_SA(hdr), sa);
+}
+
+static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu,
+ enum hal_encrypt_type enctype,
+ struct ieee80211_rx_status *status,
+ bool decrypted)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len;
+ size_t crypto_len;
+
+ if (!rxcb->is_first_msdu ||
+ !(rxcb->is_first_msdu && rxcb->is_last_msdu)) {
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ skb_trim(msdu, msdu->len - FCS_LEN);
+
+ if (!decrypted)
+ return;
+
+ hdr = (void *)msdu->data;
+
+ /* Tail */
+ if (status->flag & RX_FLAG_IV_STRIPPED) {
+ skb_trim(msdu, msdu->len -
+ ath11k_dp_rx_crypto_mic_len(ar, enctype));
+
+ skb_trim(msdu, msdu->len -
+ ath11k_dp_rx_crypto_icv_len(ar, enctype));
+ } else {
+ /* MIC */
+ if (status->flag & RX_FLAG_MIC_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath11k_dp_rx_crypto_mic_len(ar, enctype));
+
+ /* ICV */
+ if (status->flag & RX_FLAG_ICV_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath11k_dp_rx_crypto_icv_len(ar, enctype));
+ }
+
+ /* MMIC */
+ if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
+ !ieee80211_has_morefrags(hdr->frame_control) &&
+ enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
+ skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
+
+ /* Head */
+ if (status->flag & RX_FLAG_IV_STRIPPED) {
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
+
+ memmove((void *)msdu->data + crypto_len,
+ (void *)msdu->data, hdr_len);
+ skb_pull(msdu, crypto_len);
+ }
+}
+
+static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar,
+ struct sk_buff *msdu,
+ enum hal_encrypt_type enctype)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len, crypto_len;
+ void *rfc1042;
+ bool is_amsdu;
+
+ is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu);
+ hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(rxcb->rx_desc);
+ rfc1042 = hdr;
+
+ if (rxcb->is_first_msdu) {
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
+
+ rfc1042 += hdr_len + crypto_len;
+ }
+
+ if (is_amsdu)
+ rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr);
+
+ return rfc1042;
+}
+
+static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar,
+ struct sk_buff *msdu,
+ u8 *first_hdr,
+ enum hal_encrypt_type enctype,
+ struct ieee80211_rx_status *status)
+{
+ struct ieee80211_hdr *hdr;
+ struct ethhdr *eth;
+ size_t hdr_len;
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
+ void *rfc1042;
+
+ rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype);
+ if (WARN_ON_ONCE(!rfc1042))
+ return;
+
+ /* pull decapped header and copy SA & DA */
+ eth = (struct ethhdr *)msdu->data;
+ ether_addr_copy(da, eth->h_dest);
+ ether_addr_copy(sa, eth->h_source);
+ skb_pull(msdu, sizeof(struct ethhdr));
+
+ /* push rfc1042/llc/snap */
+ memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042,
+ sizeof(struct ath11k_dp_rfc1042_hdr));
+
+ /* push original 802.11 header */
+ hdr = (struct ieee80211_hdr *)first_hdr;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+ memcpy(skb_push(msdu,
+ ath11k_dp_rx_crypto_param_len(ar, enctype)),
+ (void *)hdr + hdr_len,
+ ath11k_dp_rx_crypto_param_len(ar, enctype));
+ }
+
+ memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
+
+ /* original 802.11 header has a different DA and in
+ * case of 4addr it may also have different SA
+ */
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ ether_addr_copy(ieee80211_get_DA(hdr), da);
+ ether_addr_copy(ieee80211_get_SA(hdr), sa);
+}
+
+static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
+ struct hal_rx_desc *rx_desc,
+ enum hal_encrypt_type enctype,
+ struct ieee80211_rx_status *status,
+ bool decrypted)
+{
+ u8 *first_hdr;
+ u8 decap;
+
+ first_hdr = ath11k_dp_rx_h_80211_hdr(rx_desc);
+ decap = ath11k_dp_rx_h_mpdu_start_decap_type(rx_desc);
+
+ switch (decap) {
+ case DP_RX_DECAP_TYPE_NATIVE_WIFI:
+ ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr,
+ enctype, status);
+ break;
+ case DP_RX_DECAP_TYPE_RAW:
+ ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,
+ decrypted);
+ break;
+ case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
+ ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
+ enctype, status);
+ break;
+ case DP_RX_DECAP_TYPE_8023:
+ /* TODO: Handle undecap for these formats */
+ break;
+ }
+}
+
+static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
+ struct sk_buff_head *amsdu_list,
+ struct hal_rx_desc *rx_desc,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ieee80211_hdr *hdr;
+ enum hal_encrypt_type enctype;
+ struct sk_buff *last_msdu;
+ struct sk_buff *msdu;
+ struct ath11k_skb_rxcb *last_rxcb;
+ bool is_decrypted;
+ u32 err_bitmap;
+ u8 *qos;
+
+ if (skb_queue_empty(amsdu_list))
+ return;
+
+ hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(rx_desc);
+
+ /* Each A-MSDU subframe will use the original header as the base and be
+ * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
+ */
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ qos = ieee80211_get_qos_ctl(hdr);
+ qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+ }
+
+ is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc);
+ enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc);
+
+ /* Some attention flags are valid only in the last MSDU. */
+ last_msdu = skb_peek_tail(amsdu_list);
+ last_rxcb = ATH11K_SKB_RXCB(last_msdu);
+
+ err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(last_rxcb->rx_desc);
+
+ /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
+ rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
+ RX_FLAG_MMIC_ERROR |
+ RX_FLAG_DECRYPTED |
+ RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED);
+
+ if (err_bitmap & DP_RX_MPDU_ERR_FCS)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+ if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)
+ rx_status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (is_decrypted)
+ rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED |
+ RX_FLAG_MIC_STRIPPED | RX_FLAG_ICV_STRIPPED;
+
+ skb_queue_walk(amsdu_list, msdu) {
+ ath11k_dp_rx_h_csum_offload(msdu);
+ ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
+ enctype, rx_status, is_decrypted);
+ }
+}
+
+static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ieee80211_supported_band *sband;
+ enum rx_msdu_start_pkt_type pkt_type;
+ u8 bw;
+ u8 rate_mcs, nss;
+ u8 sgi;
+ bool is_cck;
+
+ pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(rx_desc);
+ bw = ath11k_dp_rx_h_msdu_start_rx_bw(rx_desc);
+ rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(rx_desc);
+ nss = ath11k_dp_rx_h_msdu_start_nss(rx_desc);
+ sgi = ath11k_dp_rx_h_msdu_start_sgi(rx_desc);
+
+ switch (pkt_type) {
+ case RX_MSDU_START_PKT_TYPE_11A:
+ case RX_MSDU_START_PKT_TYPE_11B:
+ is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
+ sband = &ar->mac.sbands[rx_status->band];
+ rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs,
+ is_cck);
+ break;
+ case RX_MSDU_START_PKT_TYPE_11N:
+ rx_status->encoding = RX_ENC_HT;
+ if (rate_mcs > ATH11K_HT_MCS_MAX) {
+ ath11k_warn(ar->ab,
+ "Received with invalid mcs in HT mode %d\n",
+ rate_mcs);
+ break;
+ }
+ rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
+ if (sgi)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
+ break;
+ case RX_MSDU_START_PKT_TYPE_11AC:
+ rx_status->encoding = RX_ENC_VHT;
+ rx_status->rate_idx = rate_mcs;
+ if (rate_mcs > ATH11K_VHT_MCS_MAX) {
+ ath11k_warn(ar->ab,
+ "Received with invalid mcs in VHT mode %d\n",
+ rate_mcs);
+ break;
+ }
+ rx_status->nss = nss;
+ if (sgi)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
+ break;
+ case RX_MSDU_START_PKT_TYPE_11AX:
+ rx_status->rate_idx = rate_mcs;
+ if (rate_mcs > ATH11K_HE_MCS_MAX) {
+ ath11k_warn(ar->ab,
+ "Received with invalid mcs in HE mode %d\n",
+ rate_mcs);
+ break;
+ }
+ rx_status->encoding = RX_ENC_HE;
+ rx_status->nss = nss;
+ rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
+ break;
+ }
+}
+
+static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
+ struct ieee80211_rx_status *rx_status)
+{
+ u8 channel_num;
+
+ rx_status->freq = 0;
+ rx_status->rate_idx = 0;
+ rx_status->nss = 0;
+ rx_status->encoding = RX_ENC_LEGACY;
+ rx_status->bw = RATE_INFO_BW_20;
+
+ rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+
+ channel_num = ath11k_dp_rx_h_msdu_start_freq(rx_desc);
+
+ if (channel_num >= 1 && channel_num <= 14) {
+ rx_status->band = NL80211_BAND_2GHZ;
+ } else if (channel_num >= 36 && channel_num <= 173) {
+ rx_status->band = NL80211_BAND_5GHZ;
+ } else {
+ ath11k_warn(ar->ab, "Unsupported Channel info received %d\n",
+ channel_num);
+ return;
+ }
+
+ rx_status->freq = ieee80211_channel_to_frequency(channel_num,
+ rx_status->band);
+
+ ath11k_dp_rx_h_rate(ar, rx_desc, rx_status);
+}
+
+static void ath11k_dp_rx_process_amsdu(struct ath11k *ar,
+ struct sk_buff_head *amsdu_list,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct sk_buff *first;
+ struct ath11k_skb_rxcb *rxcb;
+ struct hal_rx_desc *rx_desc;
+ bool first_mpdu;
+
+ if (skb_queue_empty(amsdu_list))
+ return;
+
+ first = skb_peek(amsdu_list);
+ rxcb = ATH11K_SKB_RXCB(first);
+ rx_desc = rxcb->rx_desc;
+
+ first_mpdu = ath11k_dp_rx_h_attn_first_mpdu(rx_desc);
+ if (first_mpdu)
+ ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
+
+ ath11k_dp_rx_h_mpdu(ar, amsdu_list, rx_desc, rx_status);
+}
+
+static char *ath11k_print_get_tid(struct ieee80211_hdr *hdr, char *out,
+ size_t size)
+{
+ u8 *qc;
+ int tid;
+
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ return "";
+
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
+ snprintf(out, size, "tid %d", tid);
+
+ return out;
+}
+
+static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi,
+ struct sk_buff *msdu)
+{
+ static const struct ieee80211_radiotap_he known = {
+ .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
+ .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
+ };
+ struct ieee80211_rx_status *status;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
+ struct ieee80211_radiotap_he *he = NULL;
+ char tid[32];
+
+ status = IEEE80211_SKB_RXCB(msdu);
+ if (status->encoding == RX_ENC_HE) {
+ he = skb_push(msdu, sizeof(known));
+ memcpy(he, &known, sizeof(known));
+ status->flag |= RX_FLAG_RADIOTAP_HE;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+ msdu,
+ msdu->len,
+ ieee80211_get_SA(hdr),
+ ath11k_print_get_tid(hdr, tid, sizeof(tid)),
+ is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
+ "mcast" : "ucast",
+ (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
+ (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
+ (status->encoding == RX_ENC_HT) ? "ht" : "",
+ (status->encoding == RX_ENC_VHT) ? "vht" : "",
+ (status->encoding == RX_ENC_HE) ? "he" : "",
+ (status->bw == RATE_INFO_BW_40) ? "40" : "",
+ (status->bw == RATE_INFO_BW_80) ? "80" : "",
+ (status->bw == RATE_INFO_BW_160) ? "160" : "",
+ status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
+ status->rate_idx,
+ status->nss,
+ status->freq,
+ status->band, status->flag,
+ !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
+ !!(status->flag & RX_FLAG_MMIC_ERROR),
+ !!(status->flag & RX_FLAG_AMSDU_MORE));
+
+ /* TODO: trace rx packet */
+
+ ieee80211_rx_napi(ar->hw, NULL, msdu, napi);
+}
+
+static void ath11k_dp_rx_pre_deliver_amsdu(struct ath11k *ar,
+ struct sk_buff_head *amsdu_list,
+ struct ieee80211_rx_status *rxs)
+{
+ struct sk_buff *msdu;
+ struct sk_buff *first_subframe;
+ struct ieee80211_rx_status *status;
+
+ first_subframe = skb_peek(amsdu_list);
+
+ skb_queue_walk(amsdu_list, msdu) {
+ /* Setup per-MSDU flags */
+ if (skb_queue_empty(amsdu_list))
+ rxs->flag &= ~RX_FLAG_AMSDU_MORE;
+ else
+ rxs->flag |= RX_FLAG_AMSDU_MORE;
+
+ if (msdu == first_subframe) {
+ first_subframe = NULL;
+ rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN;
+ } else {
+ rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
+ }
+ rxs->flag |= RX_FLAG_SKIP_MONITOR;
+
+ status = IEEE80211_SKB_RXCB(msdu);
+ *status = *rxs;
+ }
+}
+
+static void ath11k_dp_rx_process_pending_packets(struct ath11k_base *ab,
+ struct napi_struct *napi,
+ struct sk_buff_head *pending_q,
+ int *quota, u8 mac_id)
+{
+ struct ath11k *ar;
+ struct sk_buff *msdu;
+ struct ath11k_pdev *pdev;
+
+ if (skb_queue_empty(pending_q))
+ return;
+
+ ar = ab->pdevs[mac_id].ar;
+
+ rcu_read_lock();
+ pdev = rcu_dereference(ab->pdevs_active[mac_id]);
+
+ while (*quota && (msdu = __skb_dequeue(pending_q))) {
+ if (!pdev) {
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+
+ ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
+ (*quota)--;
+ }
+ rcu_read_unlock();
+}
+
+int ath11k_dp_process_rx(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, struct sk_buff_head *pending_q,
+ int budget)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ieee80211_rx_status *rx_status = &dp->rx_status;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+ struct hal_srng *srng;
+ struct sk_buff *msdu;
+ struct sk_buff_head msdu_list;
+ struct sk_buff_head amsdu_list;
+ struct ath11k_skb_rxcb *rxcb;
+ u32 *rx_desc;
+ int buf_id;
+ int num_buffs_reaped = 0;
+ int quota = budget;
+ int ret;
+ bool done = false;
+
+ /* Process any pending packets from the previous napi poll.
+ * Note: All msdu's in this pending_q corresponds to the same mac id
+ * due to pdev based reo dest mapping and also since each irq group id
+ * maps to specific reo dest ring.
+ */
+ ath11k_dp_rx_process_pending_packets(ab, napi, pending_q, &quota,
+ mac_id);
+
+ /* If all quota is exhausted by processing the pending_q,
+ * Wait for the next napi poll to reap the new info
+ */
+ if (!quota)
+ goto exit;
+
+ __skb_queue_head_init(&msdu_list);
+
+ srng = &ab->hal.srng_list[dp->reo_dst_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+try_again:
+ while ((rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
+ struct hal_reo_dest_ring *desc = (struct hal_reo_dest_ring *)rx_desc;
+ enum hal_reo_dest_ring_push_reason push_reason;
+ u32 cookie;
+
+ cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ desc->buf_addr_info.info1);
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
+ cookie);
+ spin_lock_bh(&rx_ring->idr_lock);
+ msdu = idr_find(&rx_ring->bufs_idr, buf_id);
+ if (!msdu) {
+ ath11k_warn(ab, "frame rx with invalid buf_id %d\n",
+ buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ continue;
+ }
+
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+
+ num_buffs_reaped++;
+
+ push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
+ desc->info0);
+ if (push_reason !=
+ HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
+ /* TODO: Check if the msdu can be sent up for processing */
+ dev_kfree_skb_any(msdu);
+ ab->soc_stats.hal_reo_error[dp->reo_dst_ring.ring_id]++;
+ continue;
+ }
+
+ rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 &
+ RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
+ rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 &
+ RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
+ rxcb->is_continuation = !!(desc->rx_msdu_info.info0 &
+ RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
+ rxcb->mac_id = mac_id;
+ __skb_queue_tail(&msdu_list, msdu);
+
+ /* Stop reaping from the ring once quota is exhausted
+ * and we've received all msdu's in the the AMSDU. The
+ * additional msdu's reaped in excess of quota here would
+ * be pushed into the pending queue to be processed during
+ * the next napi poll.
+ * Note: More profiling can be done to see the impact on
+ * pending_q and throughput during various traffic & density
+ * and how use of budget instead of remaining quota affects it.
+ */
+ if (num_buffs_reaped >= quota && rxcb->is_last_msdu &&
+ !rxcb->is_continuation) {
+ done = true;
+ break;
+ }
+ }
+
+ /* Hw might have updated the head pointer after we cached it.
+ * In this case, even though there are entries in the ring we'll
+ * get rx_desc NULL. Give the read another try with updated cached
+ * head pointer so that we can reap complete MPDU in the current
+ * rx processing.
+ */
+ if (!done && ath11k_hal_srng_dst_num_free(ab, srng, true)) {
+ ath11k_hal_srng_access_end(ab, srng);
+ goto try_again;
+ }
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ if (!num_buffs_reaped)
+ goto exit;
+
+ /* Should we reschedule it later if we are not able to replenish all
+ * the buffers?
+ */
+ ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buffs_reaped,
+ HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
+
+ rcu_read_lock();
+ if (!rcu_dereference(ab->pdevs_active[mac_id])) {
+ __skb_queue_purge(&msdu_list);
+ goto rcu_unlock;
+ }
+
+ if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
+ __skb_queue_purge(&msdu_list);
+ goto rcu_unlock;
+ }
+
+ while (!skb_queue_empty(&msdu_list)) {
+ __skb_queue_head_init(&amsdu_list);
+ ret = ath11k_dp_rx_retrieve_amsdu(ar, &msdu_list, &amsdu_list);
+ if (ret) {
+ if (ret == -EIO) {
+ ath11k_err(ab, "rx ring got corrupted %d\n", ret);
+ __skb_queue_purge(&msdu_list);
+ /* Should stop processing any more rx in
+ * future from this ring?
+ */
+ goto rcu_unlock;
+ }
+
+ /* A-MSDU retrieval got failed due to non-fatal condition,
+ * continue processing with the next msdu.
+ */
+ continue;
+ }
+
+ ath11k_dp_rx_process_amsdu(ar, &amsdu_list, rx_status);
+
+ ath11k_dp_rx_pre_deliver_amsdu(ar, &amsdu_list, rx_status);
+ skb_queue_splice_tail(&amsdu_list, pending_q);
+ }
+
+ while (quota && (msdu = __skb_dequeue(pending_q))) {
+ ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
+ quota--;
+ }
+
+rcu_unlock:
+ rcu_read_unlock();
+exit:
+ return budget - quota;
+}
+
+static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats;
+ u32 num_msdu;
+
+ if (!rx_stats)
+ return;
+
+ num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
+ ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
+
+ rx_stats->num_msdu += num_msdu;
+ rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count +
+ ppdu_info->tcp_ack_msdu_count;
+ rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count;
+ rx_stats->other_msdu_count += ppdu_info->other_msdu_count;
+
+ if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A ||
+ ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) {
+ ppdu_info->nss = 1;
+ ppdu_info->mcs = HAL_RX_MAX_MCS;
+ ppdu_info->tid = IEEE80211_NUM_TIDS;
+ }
+
+ if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS)
+ rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu;
+
+ if (ppdu_info->mcs <= HAL_RX_MAX_MCS)
+ rx_stats->mcs_count[ppdu_info->mcs] += num_msdu;
+
+ if (ppdu_info->gi < HAL_RX_GI_MAX)
+ rx_stats->gi_count[ppdu_info->gi] += num_msdu;
+
+ if (ppdu_info->bw < HAL_RX_BW_MAX)
+ rx_stats->bw_count[ppdu_info->bw] += num_msdu;
+
+ if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX)
+ rx_stats->coding_count[ppdu_info->ldpc] += num_msdu;
+
+ if (ppdu_info->tid <= IEEE80211_NUM_TIDS)
+ rx_stats->tid_count[ppdu_info->tid] += num_msdu;
+
+ if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX)
+ rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu;
+
+ if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX)
+ rx_stats->reception_type[ppdu_info->reception_type] += num_msdu;
+
+ if (ppdu_info->is_stbc)
+ rx_stats->stbc_count += num_msdu;
+
+ if (ppdu_info->beamformed)
+ rx_stats->beamformed_count += num_msdu;
+
+ if (ppdu_info->num_mpdu_fcs_ok > 1)
+ rx_stats->ampdu_msdu_count += num_msdu;
+ else
+ rx_stats->non_ampdu_msdu_count += num_msdu;
+
+ rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok;
+ rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err;
+
+ arsta->rssi_comb = ppdu_info->rssi_comb;
+ rx_stats->rx_duration += ppdu_info->rx_duration;
+ arsta->rx_duration = rx_stats->rx_duration;
+}
+
+static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab,
+ struct dp_rxdma_ring *rx_ring,
+ int *buf_id, gfp_t gfp)
+{
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+
+ skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
+ DP_RX_BUFFER_ALIGN_SIZE);
+
+ if (!skb)
+ goto fail_alloc_skb;
+
+ if (!IS_ALIGNED((unsigned long)skb->data,
+ DP_RX_BUFFER_ALIGN_SIZE)) {
+ skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
+ skb->data);
+ }
+
+ paddr = dma_map_single(ab->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(ab->dev, paddr)))
+ goto fail_free_skb;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
+ rx_ring->bufs_max, gfp);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ if (*buf_id < 0)
+ goto fail_dma_unmap;
+
+ ATH11K_SKB_RXCB(skb)->paddr = paddr;
+ return skb;
+
+fail_dma_unmap:
+ dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
+ DMA_BIDIRECTIONAL);
+fail_free_skb:
+ dev_kfree_skb_any(skb);
+fail_alloc_skb:
+ return NULL;
+}
+
+int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
+ struct dp_rxdma_ring *rx_ring,
+ int req_entries,
+ enum hal_rx_buf_return_buf_manager mgr,
+ gfp_t gfp)
+{
+ struct hal_srng *srng;
+ u32 *desc;
+ struct sk_buff *skb;
+ int num_free;
+ int num_remain;
+ int buf_id;
+ u32 cookie;
+ dma_addr_t paddr;
+
+ req_entries = min(req_entries, rx_ring->bufs_max);
+
+ srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
+
+ req_entries = min(num_free, req_entries);
+ num_remain = req_entries;
+
+ while (num_remain > 0) {
+ skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
+ &buf_id, gfp);
+ if (!skb)
+ break;
+ paddr = ATH11K_SKB_RXCB(skb)->paddr;
+
+ desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc)
+ goto fail_desc_get;
+
+ cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
+ FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
+
+ num_remain--;
+
+ ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
+ }
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return req_entries - num_remain;
+
+fail_desc_get:
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
+ DMA_BIDIRECTIONAL);
+ dev_kfree_skb_any(skb);
+ ath11k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+
+ return req_entries - num_remain;
+}
+
+static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
+ int *budget, struct sk_buff_head *skb_list)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_mon_status_refill_ring;
+ struct hal_srng *srng;
+ void *rx_mon_status_desc;
+ struct sk_buff *skb;
+ struct ath11k_skb_rxcb *rxcb;
+ struct hal_tlv_hdr *tlv;
+ u32 cookie;
+ int buf_id;
+ dma_addr_t paddr;
+ u8 rbm;
+ int num_buffs_reaped = 0;
+
+ srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+ while (*budget) {
+ *budget -= 1;
+ rx_mon_status_desc =
+ ath11k_hal_srng_src_peek(ab, srng);
+ if (!rx_mon_status_desc)
+ break;
+
+ ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
+ &cookie, &rbm);
+ if (paddr) {
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ skb = idr_find(&rx_ring->bufs_idr, buf_id);
+ if (!skb) {
+ ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n",
+ buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ continue;
+ }
+
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ rxcb = ATH11K_SKB_RXCB(skb);
+
+ dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_BIDIRECTIONAL);
+
+ tlv = (struct hal_tlv_hdr *)skb->data;
+ if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) !=
+ HAL_RX_STATUS_BUFFER_DONE) {
+ ath11k_hal_srng_src_get_next_entry(ab, srng);
+ continue;
+ }
+
+ __skb_queue_tail(skb_list, skb);
+ }
+
+ skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
+ &buf_id, GFP_ATOMIC);
+
+ if (!skb) {
+ ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
+ HAL_RX_BUF_RBM_SW3_BM);
+ num_buffs_reaped++;
+ break;
+ }
+ rxcb = ATH11K_SKB_RXCB(skb);
+
+ cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
+ FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
+
+ ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr,
+ cookie, HAL_RX_BUF_RBM_SW3_BM);
+ ath11k_hal_srng_src_get_next_entry(ab, srng);
+ num_buffs_reaped++;
+ }
+ ath11k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+
+ return num_buffs_reaped;
+}
+
+int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+ enum hal_rx_mon_status hal_status;
+ struct sk_buff *skb;
+ struct sk_buff_head skb_list;
+ struct hal_rx_mon_ppdu_info ppdu_info;
+ struct ath11k_peer *peer;
+ struct ath11k_sta *arsta;
+ int num_buffs_reaped = 0;
+
+ __skb_queue_head_init(&skb_list);
+
+ num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget,
+ &skb_list);
+ if (!num_buffs_reaped)
+ goto exit;
+
+ while ((skb = __skb_dequeue(&skb_list))) {
+ memset(&ppdu_info, 0, sizeof(ppdu_info));
+ ppdu_info.peer_id = HAL_INVALID_PEERID;
+
+ if (ath11k_debug_is_pktlog_rx_stats_enabled(ar))
+ trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE);
+
+ hal_status = ath11k_hal_rx_parse_mon_status(ab, &ppdu_info, skb);
+
+ if (ppdu_info.peer_id == HAL_INVALID_PEERID ||
+ hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ rcu_read_lock();
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_id(ab, ppdu_info.peer_id);
+
+ if (!peer || !peer->sta) {
+ ath11k_dbg(ab, ATH11K_DBG_DATA,
+ "failed to find the peer with peer_id %d\n",
+ ppdu_info.peer_id);
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ arsta = (struct ath11k_sta *)peer->sta->drv_priv;
+ ath11k_dp_rx_update_peer_stats(arsta, &ppdu_info);
+
+ if (ath11k_debug_is_pktlog_peer_valid(ar, peer->addr))
+ trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE);
+
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+
+ dev_kfree_skb_any(skb);
+ }
+exit:
+ return num_buffs_reaped;
+}
+
+static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab,
+ u32 *link_desc,
+ enum hal_wbm_rel_bm_act action)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct hal_srng *srng;
+ u32 *desc;
+ int ret = 0;
+
+ srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc) {
+ ret = -ENOBUFS;
+ goto exit;
+ }
+
+ ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc,
+ action);
+
+exit:
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return ret;
+}
+
+static void ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
+ struct sk_buff *msdu,
+ struct hal_rx_desc *rx_desc,
+ struct ieee80211_rx_status *rx_status)
+{
+ u8 rx_channel;
+ enum hal_encrypt_type enctype;
+ bool is_decrypted;
+ u32 err_bitmap;
+
+ is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc);
+ enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc);
+ err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_desc);
+
+ if (err_bitmap & DP_RX_MPDU_ERR_FCS)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+ if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)
+ rx_status->flag |= RX_FLAG_MMIC_ERROR;
+
+ rx_status->encoding = RX_ENC_LEGACY;
+ rx_status->bw = RATE_INFO_BW_20;
+
+ rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+
+ rx_channel = ath11k_dp_rx_h_msdu_start_freq(rx_desc);
+
+ if (rx_channel >= 1 && rx_channel <= 14) {
+ rx_status->band = NL80211_BAND_2GHZ;
+ } else if (rx_channel >= 36 && rx_channel <= 173) {
+ rx_status->band = NL80211_BAND_5GHZ;
+ } else {
+ ath11k_warn(ar->ab, "Unsupported Channel info received %d\n",
+ rx_channel);
+ return;
+ }
+
+ rx_status->freq = ieee80211_channel_to_frequency(rx_channel,
+ rx_status->band);
+ ath11k_dp_rx_h_rate(ar, rx_desc, rx_status);
+
+ /* Rx fragments are received in raw mode */
+ skb_trim(msdu, msdu->len - FCS_LEN);
+
+ if (is_decrypted) {
+ rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MIC_STRIPPED;
+ skb_trim(msdu, msdu->len -
+ ath11k_dp_rx_crypto_mic_len(ar, enctype));
+ }
+}
+
+static int
+ath11k_dp_process_rx_err_buf(struct ath11k *ar, struct napi_struct *napi,
+ int buf_id, bool frag)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+ struct ieee80211_rx_status rx_status = {0};
+ struct sk_buff *msdu;
+ struct ath11k_skb_rxcb *rxcb;
+ struct ieee80211_rx_status *status;
+ struct hal_rx_desc *rx_desc;
+ u16 msdu_len;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ msdu = idr_find(&rx_ring->bufs_idr, buf_id);
+ if (!msdu) {
+ ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n",
+ buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ return -EINVAL;
+ }
+
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ dma_unmap_single(ar->ab->dev, rxcb->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+
+ if (!frag) {
+ /* Process only rx fragments below, and drop
+ * msdu's indicated due to error reasons.
+ */
+ dev_kfree_skb_any(msdu);
+ return 0;
+ }
+
+ rcu_read_lock();
+ if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
+ dev_kfree_skb_any(msdu);
+ goto exit;
+ }
+
+ if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
+ dev_kfree_skb_any(msdu);
+ goto exit;
+ }
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+ msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc);
+ skb_put(msdu, HAL_RX_DESC_SIZE + msdu_len);
+ skb_pull(msdu, HAL_RX_DESC_SIZE);
+
+ ath11k_dp_rx_frag_h_mpdu(ar, msdu, rx_desc, &rx_status);
+
+ status = IEEE80211_SKB_RXCB(msdu);
+
+ *status = rx_status;
+
+ ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
+
+exit:
+ rcu_read_unlock();
+ return 0;
+}
+
+int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
+ int budget)
+{
+ u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
+ struct dp_link_desc_bank *link_desc_banks;
+ enum hal_rx_buf_return_buf_manager rbm;
+ int tot_n_bufs_reaped, quota, ret, i;
+ int n_bufs_reaped[MAX_RADIOS] = {0};
+ struct dp_rxdma_ring *rx_ring;
+ struct dp_srng *reo_except;
+ u32 desc_bank, num_msdus;
+ struct hal_srng *srng;
+ struct ath11k_dp *dp;
+ void *link_desc_va;
+ int buf_id, mac_id;
+ struct ath11k *ar;
+ dma_addr_t paddr;
+ u32 *desc;
+ bool is_frag;
+
+ tot_n_bufs_reaped = 0;
+ quota = budget;
+
+ dp = &ab->dp;
+ reo_except = &dp->reo_except_ring;
+ link_desc_banks = dp->link_desc_banks;
+
+ srng = &ab->hal.srng_list[reo_except->ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ while (budget &&
+ (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
+ struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc;
+
+ ab->soc_stats.err_ring_pkts++;
+ ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr,
+ &desc_bank);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse error reo desc %d\n",
+ ret);
+ continue;
+ }
+ link_desc_va = link_desc_banks[desc_bank].vaddr +
+ (paddr - link_desc_banks[desc_bank].paddr);
+ ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
+ &rbm);
+ if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
+ rbm != HAL_RX_BUF_RBM_SW3_BM) {
+ ab->soc_stats.invalid_rbm++;
+ ath11k_warn(ab, "invalid return buffer manager %d\n", rbm);
+ ath11k_dp_rx_link_desc_return(ab, desc,
+ HAL_WBM_REL_BM_ACT_REL_MSDU);
+ continue;
+ }
+
+ is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG);
+
+ /* Return the link desc back to wbm idle list */
+ ath11k_dp_rx_link_desc_return(ab, desc,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+
+ for (i = 0; i < num_msdus; i++) {
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
+ msdu_cookies[i]);
+
+ mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID,
+ msdu_cookies[i]);
+
+ ar = ab->pdevs[mac_id].ar;
+
+ if (!ath11k_dp_process_rx_err_buf(ar, napi, buf_id,
+ is_frag)) {
+ n_bufs_reaped[mac_id]++;
+ tot_n_bufs_reaped++;
+ }
+ }
+
+ if (tot_n_bufs_reaped >= quota) {
+ tot_n_bufs_reaped = quota;
+ goto exit;
+ }
+
+ budget = quota - tot_n_bufs_reaped;
+ }
+
+exit:
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ for (i = 0; i < ab->num_radios; i++) {
+ if (!n_bufs_reaped[i])
+ continue;
+
+ ar = ab->pdevs[i].ar;
+ rx_ring = &ar->dp.rx_refill_buf_ring;
+
+ ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i],
+ HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
+ }
+
+ return tot_n_bufs_reaped;
+}
+
+static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar,
+ int msdu_len,
+ struct sk_buff_head *msdu_list)
+{
+ struct sk_buff *skb, *tmp;
+ struct ath11k_skb_rxcb *rxcb;
+ int n_buffs;
+
+ n_buffs = DIV_ROUND_UP(msdu_len,
+ (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE));
+
+ skb_queue_walk_safe(msdu_list, skb, tmp) {
+ rxcb = ATH11K_SKB_RXCB(skb);
+ if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&
+ rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {
+ if (!n_buffs)
+ break;
+ __skb_unlink(skb, msdu_list);
+ dev_kfree_skb_any(skb);
+ n_buffs--;
+ }
+ }
+}
+
+static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ struct sk_buff_head *msdu_list)
+{
+ struct sk_buff_head amsdu_list;
+ u16 msdu_len;
+ struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
+ u8 l3pad_bytes;
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+
+ msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc);
+
+ if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) {
+ /* First buffer will be freed by the caller, so deduct it's length */
+ msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE);
+ ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
+ return -EINVAL;
+ }
+
+ if (!ath11k_dp_rx_h_attn_msdu_done(desc)) {
+ ath11k_warn(ar->ab,
+ "msdu_done bit not set in null_q_des processing\n");
+ __skb_queue_purge(msdu_list);
+ return -EIO;
+ }
+
+ /* Handle NULL queue descriptor violations arising out a missing
+ * REO queue for a given peer or a given TID. This typically
+ * may happen if a packet is received on a QOS enabled TID before the
+ * ADDBA negotiation for that TID, when the TID queue is setup. Or
+ * it may also happen for MC/BC frames if they are not routed to the
+ * non-QOS TID queue, in the absence of any other default TID queue.
+ * This error can show up both in a REO destination or WBM release ring.
+ */
+
+ __skb_queue_head_init(&amsdu_list);
+
+ rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc);
+ rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc);
+
+ l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc);
+
+ if ((HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
+ return -EINVAL;
+
+ skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len);
+ skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes);
+
+ ath11k_dp_rx_h_ppdu(ar, desc, status);
+
+ __skb_queue_tail(&amsdu_list, msdu);
+
+ ath11k_dp_rx_h_mpdu(ar, &amsdu_list, desc, status);
+
+ /* Please note that caller will having the access to msdu and completing
+ * rx with mac80211. Need not worry about cleaning up amsdu_list.
+ */
+
+ return 0;
+}
+
+static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ struct sk_buff_head *msdu_list)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ bool drop = false;
+
+ ar->ab->soc_stats.reo_error[rxcb->err_code]++;
+
+ switch (rxcb->err_code) {
+ case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
+ if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
+ drop = true;
+ break;
+ default:
+ /* TODO: Review other errors and process them to mac80211
+ * as appropriate.
+ */
+ drop = true;
+ break;
+ }
+
+ return drop;
+}
+
+static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu,
+ struct ieee80211_rx_status *status)
+{
+ u16 msdu_len;
+ struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
+ u8 l3pad_bytes;
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+
+ rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc);
+ rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc);
+
+ l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc);
+ msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc);
+ skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len);
+ skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes);
+
+ ath11k_dp_rx_h_ppdu(ar, desc, status);
+
+ status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
+ RX_FLAG_DECRYPTED);
+
+ ath11k_dp_rx_h_undecap(ar, msdu, desc,
+ HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
+}
+
+static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar, struct sk_buff *msdu,
+ struct ieee80211_rx_status *status)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ bool drop = false;
+
+ ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
+
+ switch (rxcb->err_code) {
+ case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
+ ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status);
+ break;
+ default:
+ /* TODO: Review other rxdma error code to check if anything is
+ * worth reporting to mac80211
+ */
+ drop = true;
+ break;
+ }
+
+ return drop;
+}
+
+static void ath11k_dp_rx_wbm_err(struct ath11k *ar,
+ struct napi_struct *napi,
+ struct sk_buff *msdu,
+ struct sk_buff_head *msdu_list)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ struct ieee80211_rx_status rxs = {0};
+ struct ieee80211_rx_status *status;
+ bool drop = true;
+
+ switch (rxcb->err_rel_src) {
+ case HAL_WBM_REL_SRC_MODULE_REO:
+ drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);
+ break;
+ case HAL_WBM_REL_SRC_MODULE_RXDMA:
+ drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs);
+ break;
+ default:
+ /* msdu will get freed */
+ break;
+ }
+
+ if (drop) {
+ dev_kfree_skb_any(msdu);
+ return;
+ }
+
+ status = IEEE80211_SKB_RXCB(msdu);
+ *status = rxs;
+
+ ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
+}
+
+int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
+ struct napi_struct *napi, int budget)
+{
+ struct ath11k *ar;
+ struct ath11k_dp *dp = &ab->dp;
+ struct dp_rxdma_ring *rx_ring;
+ struct hal_rx_wbm_rel_info err_info;
+ struct hal_srng *srng;
+ struct sk_buff *msdu;
+ struct sk_buff_head msdu_list[MAX_RADIOS];
+ struct ath11k_skb_rxcb *rxcb;
+ u32 *rx_desc;
+ int buf_id, mac_id;
+ int num_buffs_reaped[MAX_RADIOS] = {0};
+ int total_num_buffs_reaped = 0;
+ int ret, i;
+
+ for (i = 0; i < MAX_RADIOS; i++)
+ __skb_queue_head_init(&msdu_list[i]);
+
+ srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ while (budget) {
+ rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
+ if (!rx_desc)
+ break;
+
+ ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to parse rx error in wbm_rel ring desc %d\n",
+ ret);
+ continue;
+ }
+
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie);
+ mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie);
+
+ ar = ab->pdevs[mac_id].ar;
+ rx_ring = &ar->dp.rx_refill_buf_ring;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ msdu = idr_find(&rx_ring->bufs_idr, buf_id);
+ if (!msdu) {
+ ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n",
+ buf_id, mac_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ continue;
+ }
+
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+
+ num_buffs_reaped[mac_id]++;
+ total_num_buffs_reaped++;
+ budget--;
+
+ if (err_info.push_reason !=
+ HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+
+ rxcb->err_rel_src = err_info.err_rel_src;
+ rxcb->err_code = err_info.err_code;
+ rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
+ __skb_queue_tail(&msdu_list[mac_id], msdu);
+ }
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ if (!total_num_buffs_reaped)
+ goto done;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ if (!num_buffs_reaped[i])
+ continue;
+
+ ar = ab->pdevs[i].ar;
+ rx_ring = &ar->dp.rx_refill_buf_ring;
+
+ ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
+ HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
+ }
+
+ rcu_read_lock();
+ for (i = 0; i < ab->num_radios; i++) {
+ if (!rcu_dereference(ab->pdevs_active[i])) {
+ __skb_queue_purge(&msdu_list[i]);
+ continue;
+ }
+
+ ar = ab->pdevs[i].ar;
+
+ if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
+ __skb_queue_purge(&msdu_list[i]);
+ continue;
+ }
+
+ while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)
+ ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);
+ }
+ rcu_read_unlock();
+done:
+ return total_num_buffs_reaped;
+}
+
+int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+ struct dp_srng *err_ring = &ar->dp.rxdma_err_dst_ring;
+ struct dp_rxdma_ring *rx_ring = &ar->dp.rx_refill_buf_ring;
+ struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks;
+ struct hal_srng *srng;
+ u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
+ enum hal_rx_buf_return_buf_manager rbm;
+ enum hal_reo_entr_rxdma_ecode rxdma_err_code;
+ struct ath11k_skb_rxcb *rxcb;
+ struct sk_buff *skb;
+ struct hal_reo_entrance_ring *entr_ring;
+ void *desc;
+ int num_buf_freed = 0;
+ int quota = budget;
+ dma_addr_t paddr;
+ u32 desc_bank;
+ void *link_desc_va;
+ int num_msdus;
+ int i;
+ int buf_id;
+
+ srng = &ab->hal.srng_list[err_ring->ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ while (quota-- &&
+ (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
+ ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank);
+
+ entr_ring = (struct hal_reo_entrance_ring *)desc;
+ rxdma_err_code =
+ FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
+ entr_ring->info1);
+ ab->soc_stats.rxdma_error[rxdma_err_code]++;
+
+ link_desc_va = link_desc_banks[desc_bank].vaddr +
+ (paddr - link_desc_banks[desc_bank].paddr);
+ ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus,
+ msdu_cookies, &rbm);
+
+ for (i = 0; i < num_msdus; i++) {
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
+ msdu_cookies[i]);
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ skb = idr_find(&rx_ring->bufs_idr, buf_id);
+ if (!skb) {
+ ath11k_warn(ab, "rxdma error with invalid buf_id %d\n",
+ buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ continue;
+ }
+
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ rxcb = ATH11K_SKB_RXCB(skb);
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+
+ num_buf_freed++;
+ }
+
+ ath11k_dp_rx_link_desc_return(ab, desc,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ }
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ if (num_buf_freed)
+ ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed,
+ HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
+
+ return budget - quota;
+}
+
+void ath11k_dp_process_reo_status(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct hal_srng *srng;
+ struct dp_reo_cmd *cmd, *tmp;
+ bool found = false;
+ u32 *reo_desc;
+ u16 tag;
+ struct hal_reo_status reo_status;
+
+ srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];
+
+ memset(&reo_status, 0, sizeof(reo_status));
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
+ tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc);
+
+ switch (tag) {
+ case HAL_REO_GET_QUEUE_STATS_STATUS:
+ ath11k_hal_reo_status_queue_stats(ab, reo_desc,
+ &reo_status);
+ break;
+ case HAL_REO_FLUSH_QUEUE_STATUS:
+ ath11k_hal_reo_flush_queue_status(ab, reo_desc,
+ &reo_status);
+ break;
+ case HAL_REO_FLUSH_CACHE_STATUS:
+ ath11k_hal_reo_flush_cache_status(ab, reo_desc,
+ &reo_status);
+ break;
+ case HAL_REO_UNBLOCK_CACHE_STATUS:
+ ath11k_hal_reo_unblk_cache_status(ab, reo_desc,
+ &reo_status);
+ break;
+ case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
+ ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc,
+ &reo_status);
+ break;
+ case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
+ ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc,
+ &reo_status);
+ break;
+ case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
+ ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc,
+ &reo_status);
+ break;
+ default:
+ ath11k_warn(ab, "Unknown reo status type %d\n", tag);
+ continue;
+ }
+
+ spin_lock_bh(&dp->reo_cmd_lock);
+ list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
+ if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
+ found = true;
+ list_del(&cmd->list);
+ break;
+ }
+ }
+ spin_unlock_bh(&dp->reo_cmd_lock);
+
+ if (found) {
+ cmd->handler(dp, (void *)&cmd->data,
+ reo_status.uniform_hdr.cmd_status);
+ kfree(cmd);
+ }
+
+ found = false;
+ }
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+}
+
+void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+
+ ath11k_dp_rx_pdev_srng_free(ar);
+ ath11k_dp_rxdma_pdev_buf_free(ar);
+}
+
+int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ u32 ring_id;
+ int ret;
+
+ ret = ath11k_dp_rx_pdev_srng_alloc(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup rx srngs\n");
+ return ret;
+ }
+
+ ret = ath11k_dp_rxdma_pdev_buf_setup(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup rxdma ring\n");
+ return ret;
+ }
+
+ ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF);
+ if (ret) {
+ ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",
+ ret);
+ return ret;
+ }
+
+ ring_id = dp->rxdma_err_dst_ring.ring_id;
+ ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_DST);
+ if (ret) {
+ ath11k_warn(ab, "failed to configure rxdma_err_dest_ring %d\n",
+ ret);
+ return ret;
+ }
+
+ ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
+ mac_id, HAL_RXDMA_MONITOR_BUF);
+ if (ret) {
+ ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
+ ret);
+ return ret;
+ }
+ ret = ath11k_dp_tx_htt_srng_setup(ab,
+ dp->rxdma_mon_dst_ring.ring_id,
+ mac_id, HAL_RXDMA_MONITOR_DST);
+ if (ret) {
+ ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
+ ret);
+ return ret;
+ }
+ ret = ath11k_dp_tx_htt_srng_setup(ab,
+ dp->rxdma_mon_desc_ring.ring_id,
+ mac_id, HAL_RXDMA_MONITOR_DESC);
+ if (ret) {
+ ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
+ ret);
+ return ret;
+ }
+ ring_id = dp->rx_mon_status_refill_ring.refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id,
+ HAL_RXDMA_MONITOR_STATUS);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to configure mon_status_refill_ring %d\n",
+ ret);
+ return ret;
+ }
+ return 0;
+}
+
+static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len)
+{
+ if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) {
+ *frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc);
+ *total_len -= *frag_len;
+ } else {
+ *frag_len = *total_len;
+ *total_len = 0;
+ }
+}
+
+static
+int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar,
+ void *p_last_buf_addr_info,
+ u8 mac_id)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct dp_srng *dp_srng;
+ void *hal_srng;
+ void *src_srng_desc;
+ int ret = 0;
+
+ dp_srng = &dp->rxdma_mon_desc_ring;
+ hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
+
+ ath11k_hal_srng_access_begin(ar->ab, hal_srng);
+
+ src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng);
+
+ if (src_srng_desc) {
+ struct ath11k_buffer_addr *src_desc =
+ (struct ath11k_buffer_addr *)src_srng_desc;
+
+ *src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info);
+ } else {
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "Monitor Link Desc Ring %d Full", mac_id);
+ ret = -ENOMEM;
+ }
+
+ ath11k_hal_srng_access_end(ar->ab, hal_srng);
+ return ret;
+}
+
+static
+void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc,
+ dma_addr_t *paddr, u32 *sw_cookie,
+ void **pp_buf_addr_info)
+{
+ struct hal_rx_msdu_link *msdu_link =
+ (struct hal_rx_msdu_link *)rx_msdu_link_desc;
+ struct ath11k_buffer_addr *buf_addr_info;
+ u8 rbm = 0;
+
+ buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info;
+
+ ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, &rbm);
+
+ *pp_buf_addr_info = (void *)buf_addr_info;
+}
+
+static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len)
+{
+ if (skb->len > len) {
+ skb_trim(skb, len);
+ } else {
+ if (skb_tailroom(skb) < len - skb->len) {
+ if ((pskb_expand_head(skb, 0,
+ len - skb->len - skb_tailroom(skb),
+ GFP_ATOMIC))) {
+ dev_kfree_skb_any(skb);
+ return -ENOMEM;
+ }
+ }
+ skb_put(skb, (len - skb->len));
+ }
+ return 0;
+}
+
+static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar,
+ void *msdu_link_desc,
+ struct hal_rx_msdu_list *msdu_list,
+ u16 *num_msdus)
+{
+ struct hal_rx_msdu_details *msdu_details = NULL;
+ struct rx_msdu_desc *msdu_desc_info = NULL;
+ struct hal_rx_msdu_link *msdu_link = NULL;
+ int i;
+ u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1);
+ u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1);
+ u8 tmp = 0;
+
+ msdu_link = (struct hal_rx_msdu_link *)msdu_link_desc;
+ msdu_details = &msdu_link->msdu_link[0];
+
+ for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) {
+ if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
+ msdu_details[i].buf_addr_info.info0) == 0) {
+ msdu_desc_info = &msdu_details[i - 1].rx_msdu_info;
+ msdu_desc_info->info0 |= last;
+ ;
+ break;
+ }
+ msdu_desc_info = &msdu_details[i].rx_msdu_info;
+
+ if (!i)
+ msdu_desc_info->info0 |= first;
+ else if (i == (HAL_RX_NUM_MSDU_DESC - 1))
+ msdu_desc_info->info0 |= last;
+ msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0;
+ msdu_list->msdu_info[i].msdu_len =
+ HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0);
+ msdu_list->sw_cookie[i] =
+ FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ msdu_details[i].buf_addr_info.info1);
+ tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
+ msdu_details[i].buf_addr_info.info1);
+ msdu_list->rbm[i] = tmp;
+ }
+ *num_msdus = i;
+}
+
+static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id,
+ u32 *rx_bufs_used)
+{
+ u32 ret = 0;
+
+ if ((*ppdu_id < msdu_ppdu_id) &&
+ ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) {
+ *ppdu_id = msdu_ppdu_id;
+ ret = msdu_ppdu_id;
+ } else if ((*ppdu_id > msdu_ppdu_id) &&
+ ((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) {
+ /* mon_dst is behind than mon_status
+ * skip dst_ring and free it
+ */
+ *rx_bufs_used += 1;
+ *ppdu_id = msdu_ppdu_id;
+ ret = msdu_ppdu_id;
+ }
+ return ret;
+}
+
+static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
+ bool *is_frag, u32 *total_len,
+ u32 *frag_len, u32 *msdu_cnt)
+{
+ if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) {
+ if (!*is_frag) {
+ *total_len = info->msdu_len;
+ *is_frag = true;
+ }
+ ath11k_dp_mon_set_frag_len(total_len,
+ frag_len);
+ } else {
+ if (*is_frag) {
+ ath11k_dp_mon_set_frag_len(total_len,
+ frag_len);
+ } else {
+ *frag_len = info->msdu_len;
+ }
+ *is_frag = false;
+ *msdu_cnt -= 1;
+ }
+}
+
+static u32
+ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar,
+ void *ring_entry, struct sk_buff **head_msdu,
+ struct sk_buff **tail_msdu, u32 *npackets,
+ u32 *ppdu_id)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
+ struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
+ struct sk_buff *msdu = NULL, *last = NULL;
+ struct hal_rx_msdu_list msdu_list;
+ void *p_buf_addr_info, *p_last_buf_addr_info;
+ struct hal_rx_desc *rx_desc;
+ void *rx_msdu_link_desc;
+ dma_addr_t paddr;
+ u16 num_msdus = 0;
+ u32 rx_buf_size, rx_pkt_offset, sw_cookie;
+ u32 rx_bufs_used = 0, i = 0;
+ u32 msdu_ppdu_id = 0, msdu_cnt = 0;
+ u32 total_len = 0, frag_len = 0;
+ bool is_frag, is_first_msdu;
+ bool drop_mpdu = false;
+ struct ath11k_skb_rxcb *rxcb;
+ struct hal_reo_entrance_ring *ent_desc =
+ (struct hal_reo_entrance_ring *)ring_entry;
+ int buf_id;
+
+ ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr,
+ &sw_cookie, &p_last_buf_addr_info,
+ &msdu_cnt);
+
+ if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON,
+ ent_desc->info1) ==
+ HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
+ u8 rxdma_err =
+ FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
+ ent_desc->info1);
+ if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
+ rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
+ rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
+ drop_mpdu = true;
+ pmon->rx_mon_stats.dest_mpdu_drop++;
+ }
+ }
+
+ is_frag = false;
+ is_first_msdu = true;
+
+ do {
+ if (pmon->mon_last_linkdesc_paddr == paddr) {
+ pmon->rx_mon_stats.dup_mon_linkdesc_cnt++;
+ return rx_bufs_used;
+ }
+
+ rx_msdu_link_desc =
+ (void *)pmon->link_desc_banks[sw_cookie].vaddr +
+ (paddr - pmon->link_desc_banks[sw_cookie].paddr);
+
+ ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
+ &num_msdus);
+
+ for (i = 0; i < num_msdus; i++) {
+ u32 l2_hdr_offset;
+
+ if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "i %d last_cookie %d is same\n",
+ i, pmon->mon_last_buf_cookie);
+ drop_mpdu = true;
+ pmon->rx_mon_stats.dup_mon_buf_cnt++;
+ continue;
+ }
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
+ msdu_list.sw_cookie[i]);
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ msdu = idr_find(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ if (!msdu) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "msdu_pop: invalid buf_id %d\n", buf_id);
+ break;
+ }
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ if (!rxcb->unmapped) {
+ dma_unmap_single(ar->ab->dev, rxcb->paddr,
+ msdu->len +
+ skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+ rxcb->unmapped = 1;
+ }
+ if (drop_mpdu) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "i %d drop msdu %p *ppdu_id %x\n",
+ i, msdu, *ppdu_id);
+ dev_kfree_skb_any(msdu);
+ msdu = NULL;
+ goto next_msdu;
+ }
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+
+ rx_pkt_offset = sizeof(struct hal_rx_desc);
+ l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(rx_desc);
+
+ if (is_first_msdu) {
+ if (!ath11k_dp_rxdesc_mpdu_valid(rx_desc)) {
+ drop_mpdu = true;
+ dev_kfree_skb_any(msdu);
+ msdu = NULL;
+ pmon->mon_last_linkdesc_paddr = paddr;
+ goto next_msdu;
+ }
+
+ msdu_ppdu_id =
+ ath11k_dp_rxdesc_get_ppduid(rx_desc);
+
+ if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id,
+ ppdu_id,
+ &rx_bufs_used)) {
+ if (rx_bufs_used) {
+ drop_mpdu = true;
+ dev_kfree_skb_any(msdu);
+ msdu = NULL;
+ goto next_msdu;
+ }
+ return rx_bufs_used;
+ }
+ pmon->mon_last_linkdesc_paddr = paddr;
+ is_first_msdu = false;
+ }
+ ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
+ &is_frag, &total_len,
+ &frag_len, &msdu_cnt);
+ rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
+
+ ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
+
+ if (!(*head_msdu))
+ *head_msdu = msdu;
+ else if (last)
+ last->next = msdu;
+
+ last = msdu;
+next_msdu:
+ pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i];
+ rx_bufs_used++;
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ }
+
+ ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr,
+ &sw_cookie,
+ &p_buf_addr_info);
+
+ if (ath11k_dp_rx_monitor_link_desc_return(ar,
+ p_last_buf_addr_info,
+ dp->mac_id))
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "dp_rx_monitor_link_desc_return failed");
+
+ p_last_buf_addr_info = p_buf_addr_info;
+
+ } while (paddr && msdu_cnt);
+
+ if (last)
+ last->next = NULL;
+
+ *tail_msdu = msdu;
+
+ if (msdu_cnt == 0)
+ *npackets = 1;
+
+ return rx_bufs_used;
+}
+
+static void ath11k_dp_rx_msdus_set_payload(struct sk_buff *msdu)
+{
+ u32 rx_pkt_offset, l2_hdr_offset;
+
+ rx_pkt_offset = sizeof(struct hal_rx_desc);
+ l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad((struct hal_rx_desc *)msdu->data);
+ skb_pull(msdu, rx_pkt_offset + l2_hdr_offset);
+}
+
+static struct sk_buff *
+ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
+ u32 mac_id, struct sk_buff *head_msdu,
+ struct sk_buff *last_msdu,
+ struct ieee80211_rx_status *rxs)
+{
+ struct sk_buff *msdu, *mpdu_buf, *prev_buf;
+ u32 decap_format, wifi_hdr_len;
+ struct hal_rx_desc *rx_desc;
+ char *hdr_desc;
+ u8 *dest;
+ struct ieee80211_hdr_3addr *wh;
+
+ mpdu_buf = NULL;
+
+ if (!head_msdu)
+ goto err_merge_fail;
+
+ rx_desc = (struct hal_rx_desc *)head_msdu->data;
+
+ if (ath11k_dp_rxdesc_get_mpdulen_err(rx_desc))
+ return NULL;
+
+ decap_format = ath11k_dp_rxdesc_get_decap_format(rx_desc);
+
+ ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
+
+ if (decap_format == DP_RX_DECAP_TYPE_RAW) {
+ ath11k_dp_rx_msdus_set_payload(head_msdu);
+
+ prev_buf = head_msdu;
+ msdu = head_msdu->next;
+
+ while (msdu) {
+ ath11k_dp_rx_msdus_set_payload(msdu);
+
+ prev_buf = msdu;
+ msdu = msdu->next;
+ }
+
+ prev_buf->next = NULL;
+
+ skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
+ } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {
+ __le16 qos_field;
+ u8 qos_pkt = 0;
+
+ rx_desc = (struct hal_rx_desc *)head_msdu->data;
+ hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc);
+
+ /* Base size */
+ wifi_hdr_len = sizeof(struct ieee80211_hdr_3addr);
+ wh = (struct ieee80211_hdr_3addr *)hdr_desc;
+
+ if (ieee80211_is_data_qos(wh->frame_control)) {
+ struct ieee80211_qos_hdr *qwh =
+ (struct ieee80211_qos_hdr *)hdr_desc;
+
+ qos_field = qwh->qos_ctrl;
+ qos_pkt = 1;
+ }
+ msdu = head_msdu;
+
+ while (msdu) {
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+ hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc);
+
+ if (qos_pkt) {
+ dest = skb_push(msdu, sizeof(__le16));
+ if (!dest)
+ goto err_merge_fail;
+ memcpy(dest, hdr_desc, wifi_hdr_len);
+ memcpy(dest + wifi_hdr_len,
+ (u8 *)&qos_field, sizeof(__le16));
+ }
+ ath11k_dp_rx_msdus_set_payload(msdu);
+ prev_buf = msdu;
+ msdu = msdu->next;
+ }
+ dest = skb_put(prev_buf, HAL_RX_FCS_LEN);
+ if (!dest)
+ goto err_merge_fail;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "mpdu_buf %pK mpdu_buf->len %u",
+ prev_buf, prev_buf->len);
+ } else {
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "decap format %d is not supported!\n",
+ decap_format);
+ goto err_merge_fail;
+ }
+
+ return head_msdu;
+
+err_merge_fail:
+ if (mpdu_buf && decap_format != DP_RX_DECAP_TYPE_RAW) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "err_merge_fail mpdu_buf %pK", mpdu_buf);
+ /* Free the head buffer */
+ dev_kfree_skb_any(mpdu_buf);
+ }
+ return NULL;
+}
+
+static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
+ struct sk_buff *head_msdu,
+ struct sk_buff *tail_msdu,
+ struct napi_struct *napi)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct sk_buff *mon_skb, *skb_next, *header;
+ struct ieee80211_rx_status *rxs = &dp->rx_status, *status;
+
+ mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu,
+ tail_msdu, rxs);
+
+ if (!mon_skb)
+ goto mon_deliver_fail;
+
+ header = mon_skb;
+
+ rxs->flag = 0;
+ do {
+ skb_next = mon_skb->next;
+ if (!skb_next)
+ rxs->flag &= ~RX_FLAG_AMSDU_MORE;
+ else
+ rxs->flag |= RX_FLAG_AMSDU_MORE;
+
+ if (mon_skb == header) {
+ header = NULL;
+ rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN;
+ } else {
+ rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
+ }
+ rxs->flag |= RX_FLAG_ONLY_MONITOR;
+
+ status = IEEE80211_SKB_RXCB(mon_skb);
+ *status = *rxs;
+
+ ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb);
+ mon_skb = skb_next;
+ } while (mon_skb);
+ rxs->flag = 0;
+
+ return 0;
+
+mon_deliver_fail:
+ mon_skb = head_msdu;
+ while (mon_skb) {
+ skb_next = mon_skb->next;
+ dev_kfree_skb_any(mon_skb);
+ mon_skb = skb_next;
+ }
+ return -EINVAL;
+}
+
+static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, u32 quota,
+ struct napi_struct *napi)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
+ void *ring_entry;
+ void *mon_dst_srng;
+ u32 ppdu_id;
+ u32 rx_bufs_used;
+ struct ath11k_pdev_mon_stats *rx_mon_stats;
+ u32 npackets = 0;
+
+ mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
+
+ if (!mon_dst_srng) {
+ ath11k_warn(ar->ab,
+ "HAL Monitor Destination Ring Init Failed -- %pK",
+ mon_dst_srng);
+ return;
+ }
+
+ spin_lock_bh(&pmon->mon_lock);
+
+ ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
+
+ ppdu_id = pmon->mon_ppdu_info.ppdu_id;
+ rx_bufs_used = 0;
+ rx_mon_stats = &pmon->rx_mon_stats;
+
+ while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
+ struct sk_buff *head_msdu, *tail_msdu;
+
+ head_msdu = NULL;
+ tail_msdu = NULL;
+
+ rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, ring_entry,
+ &head_msdu,
+ &tail_msdu,
+ &npackets, &ppdu_id);
+
+ if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) {
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "dest_rx: new ppdu_id %x != status ppdu_id %x",
+ ppdu_id, pmon->mon_ppdu_info.ppdu_id);
+ break;
+ }
+ if (head_msdu && tail_msdu) {
+ ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu,
+ tail_msdu, napi);
+ rx_mon_stats->dest_mpdu_done++;
+ }
+
+ ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
+ mon_dst_srng);
+ }
+ ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
+
+ spin_unlock_bh(&pmon->mon_lock);
+
+ if (rx_bufs_used) {
+ rx_mon_stats->dest_ppdu_done++;
+ ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
+ &dp->rxdma_mon_buf_ring,
+ rx_bufs_used,
+ HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
+ }
+}
+
+static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar,
+ u32 quota,
+ struct napi_struct *napi)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
+ struct hal_rx_mon_ppdu_info *ppdu_info;
+ struct sk_buff *status_skb;
+ u32 tlv_status = HAL_TLV_STATUS_BUF_DONE;
+ struct ath11k_pdev_mon_stats *rx_mon_stats;
+
+ ppdu_info = &pmon->mon_ppdu_info;
+ rx_mon_stats = &pmon->rx_mon_stats;
+
+ if (pmon->mon_ppdu_status != DP_PPDU_STATUS_START)
+ return;
+
+ while (!skb_queue_empty(&pmon->rx_status_q)) {
+ status_skb = skb_dequeue(&pmon->rx_status_q);
+
+ tlv_status = ath11k_hal_rx_parse_mon_status(ar->ab, ppdu_info,
+ status_skb);
+ if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) {
+ rx_mon_stats->status_ppdu_done++;
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
+ ath11k_dp_rx_mon_dest_process(ar, quota, napi);
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
+ }
+ dev_kfree_skb_any(status_skb);
+ }
+}
+
+static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
+ int num_buffs_reaped = 0;
+
+ num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, dp->mac_id, &budget,
+ &pmon->rx_status_q);
+ if (num_buffs_reaped)
+ ath11k_dp_rx_mon_status_process_tlv(ar, budget, napi);
+
+ return num_buffs_reaped;
+}
+
+int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+ int ret = 0;
+
+ if (test_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags))
+ ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget);
+ else
+ ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget);
+ return ret;
+}
+
+static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
+
+ skb_queue_head_init(&pmon->rx_status_q);
+
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
+
+ memset(&pmon->rx_mon_stats, 0,
+ sizeof(pmon->rx_mon_stats));
+ return 0;
+}
+
+int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = &dp->mon_data;
+ struct hal_srng *mon_desc_srng = NULL;
+ struct dp_srng *dp_srng;
+ int ret = 0;
+ u32 n_link_desc = 0;
+
+ ret = ath11k_dp_rx_pdev_mon_status_attach(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "pdev_mon_status_attach() failed");
+ return ret;
+ }
+
+ dp_srng = &dp->rxdma_mon_desc_ring;
+ n_link_desc = dp_srng->size /
+ ath11k_hal_srng_get_entrysize(HAL_RXDMA_MONITOR_DESC);
+ mon_desc_srng =
+ &ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id];
+
+ ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks,
+ HAL_RXDMA_MONITOR_DESC, mon_desc_srng,
+ n_link_desc);
+ if (ret) {
+ ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed");
+ return ret;
+ }
+ pmon->mon_last_linkdesc_paddr = 0;
+ pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
+ spin_lock_init(&pmon->mon_lock);
+ return 0;
+}
+
+static int ath11k_dp_mon_link_free(struct ath11k *ar)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = &dp->mon_data;
+
+ ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks,
+ HAL_RXDMA_MONITOR_DESC,
+ &dp->rxdma_mon_desc_ring);
+ return 0;
+}
+
+int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar)
+{
+ ath11k_dp_mon_link_free(ar);
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.h b/drivers/net/wireless/ath/ath11k/dp_rx.h
new file mode 100644
index 000000000000..eec5deaa59ad
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+#ifndef ATH11K_DP_RX_H
+#define ATH11K_DP_RX_H
+
+#include "core.h"
+#include "rx_desc.h"
+#include "debug.h"
+
+#define DP_RX_MPDU_ERR_FCS BIT(0)
+#define DP_RX_MPDU_ERR_DECRYPT BIT(1)
+#define DP_RX_MPDU_ERR_TKIP_MIC BIT(2)
+#define DP_RX_MPDU_ERR_AMSDU_ERR BIT(3)
+#define DP_RX_MPDU_ERR_OVERFLOW BIT(4)
+#define DP_RX_MPDU_ERR_MSDU_LEN BIT(5)
+#define DP_RX_MPDU_ERR_MPDU_LEN BIT(6)
+#define DP_RX_MPDU_ERR_UNENCRYPTED_FRAME BIT(7)
+
+enum dp_rx_decap_type {
+ DP_RX_DECAP_TYPE_RAW,
+ DP_RX_DECAP_TYPE_NATIVE_WIFI,
+ DP_RX_DECAP_TYPE_ETHERNET2_DIX,
+ DP_RX_DECAP_TYPE_8023,
+};
+
+struct ath11k_dp_amsdu_subframe_hdr {
+ u8 dst[ETH_ALEN];
+ u8 src[ETH_ALEN];
+ __be16 len;
+} __packed;
+
+struct ath11k_dp_rfc1042_hdr {
+ u8 llc_dsap;
+ u8 llc_ssap;
+ u8 llc_ctrl;
+ u8 snap_oui[3];
+ __be16 snap_type;
+} __packed;
+
+int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
+ struct ieee80211_ampdu_params *params);
+int ath11k_dp_rx_ampdu_stop(struct ath11k *ar,
+ struct ieee80211_ampdu_params *params);
+void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer);
+int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
+ u8 tid, u32 ba_win_sz, u16 ssn);
+void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
+ struct sk_buff *skb);
+int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab);
+void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab);
+int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int pdev_idx);
+void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int pdev_idx);
+void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab);
+void ath11k_dp_process_reo_status(struct ath11k_base *ab);
+int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget);
+int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
+ struct napi_struct *napi, int budget);
+int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
+ int budget);
+int ath11k_dp_process_rx(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, struct sk_buff_head *pending_q,
+ int budget);
+int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
+ struct dp_rxdma_ring *rx_ring,
+ int req_entries,
+ enum hal_rx_buf_return_buf_manager mgr,
+ gfp_t gfp);
+int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
+ int (*iter)(struct ath11k_base *ar, u16 tag, u16 len,
+ const void *ptr, void *data),
+ void *data);
+int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget);
+int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget);
+int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
+ struct dp_rxdma_ring *rx_ring,
+ int req_entries,
+ enum hal_rx_buf_return_buf_manager mgr,
+ gfp_t gfp);
+int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar);
+int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar);
+
+#endif /* ATH11K_DP_RX_H */
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
new file mode 100644
index 000000000000..6d7d33761caf
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
@@ -0,0 +1,962 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include "core.h"
+#include "dp_tx.h"
+#include "debug.h"
+#include "hw.h"
+
+/* NOTE: Any of the mapped ring id value must not exceed DP_TCL_NUM_RING_MAX */
+static const u8
+ath11k_txq_tcl_ring_map[ATH11K_HW_MAX_QUEUES] = { 0x0, 0x1, 0x2, 0x2 };
+
+static enum hal_tcl_encap_type
+ath11k_dp_tx_get_encap_type(struct ath11k_vif *arvif, struct sk_buff *skb)
+{
+ /* TODO: Determine encap type based on vif_type and configuration */
+ return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI;
+}
+
+static void ath11k_dp_tx_encap_nwifi(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ u8 *qos_ctl;
+
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ return;
+
+ qos_ctl = ieee80211_get_qos_ctl(hdr);
+ memmove(skb->data + IEEE80211_QOS_CTL_LEN,
+ skb->data, (void *)qos_ctl - (void *)skb->data);
+ skb_pull(skb, IEEE80211_QOS_CTL_LEN);
+
+ hdr = (void *)skb->data;
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
+}
+
+static u8 ath11k_dp_tx_get_tid(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ return HAL_DESC_REO_NON_QOS_TID;
+ else
+ return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+}
+
+static enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher)
+{
+ switch (cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return HAL_ENCRYPT_TYPE_WEP_40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return HAL_ENCRYPT_TYPE_WEP_104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return HAL_ENCRYPT_TYPE_TKIP_MIC;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return HAL_ENCRYPT_TYPE_CCMP_128;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ return HAL_ENCRYPT_TYPE_CCMP_256;
+ case WLAN_CIPHER_SUITE_GCMP:
+ return HAL_ENCRYPT_TYPE_GCMP_128;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ return HAL_ENCRYPT_TYPE_AES_GCMP_256;
+ default:
+ return HAL_ENCRYPT_TYPE_OPEN;
+ }
+}
+
+int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
+ struct sk_buff *skb)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_dp *dp = &ab->dp;
+ struct hal_tx_info ti = {0};
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
+ struct hal_srng *tcl_ring;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct dp_tx_ring *tx_ring;
+ void *hal_tcl_desc;
+ u8 pool_id;
+ u8 hal_ring_id;
+ int ret;
+
+ if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
+ return -ESHUTDOWN;
+
+ if (!ieee80211_is_data(hdr->frame_control))
+ return -ENOTSUPP;
+
+ pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1);
+ ti.ring_id = ath11k_txq_tcl_ring_map[pool_id];
+
+ tx_ring = &dp->tx_ring[ti.ring_id];
+
+ spin_lock_bh(&tx_ring->tx_idr_lock);
+ ret = idr_alloc(&tx_ring->txbuf_idr, skb, 0,
+ DP_TX_IDR_SIZE - 1, GFP_ATOMIC);
+ spin_unlock_bh(&tx_ring->tx_idr_lock);
+
+ if (ret < 0)
+ return -ENOSPC;
+
+ ti.desc_id = FIELD_PREP(DP_TX_DESC_ID_MAC_ID, ar->pdev_idx) |
+ FIELD_PREP(DP_TX_DESC_ID_MSDU_ID, ret) |
+ FIELD_PREP(DP_TX_DESC_ID_POOL_ID, pool_id);
+ ti.encap_type = ath11k_dp_tx_get_encap_type(arvif, skb);
+ ti.meta_data_flags = arvif->tcl_metadata;
+
+ if (info->control.hw_key)
+ ti.encrypt_type =
+ ath11k_dp_tx_get_encrypt_type(info->control.hw_key->cipher);
+ else
+ ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
+
+ ti.addr_search_flags = arvif->hal_addr_search_flags;
+ ti.search_type = arvif->search_type;
+ ti.type = HAL_TCL_DESC_TYPE_BUFFER;
+ ti.pkt_offset = 0;
+ ti.lmac_id = ar->lmac_id;
+ ti.bss_ast_hash = arvif->ast_hash;
+ ti.dscp_tid_tbl_idx = 0;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_IP4_CKSUM_EN, 1) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP4_CKSUM_EN, 1) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP6_CKSUM_EN, 1) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP4_CKSUM_EN, 1) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP6_CKSUM_EN, 1);
+ }
+
+ if (ieee80211_vif_is_mesh(arvif->vif))
+ ti.flags1 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_MESH_ENABLE, 1);
+
+ ti.flags1 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE, 1);
+
+ ti.tid = ath11k_dp_tx_get_tid(skb);
+
+ switch (ti.encap_type) {
+ case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI:
+ ath11k_dp_tx_encap_nwifi(skb);
+ break;
+ case HAL_TCL_ENCAP_TYPE_RAW:
+ /* TODO: for CHECKSUM_PARTIAL case in raw mode, HW checksum offload
+ * is not applicable, hence manual checksum calculation using
+ * skb_checksum_help() is needed
+ */
+ case HAL_TCL_ENCAP_TYPE_ETHERNET:
+ case HAL_TCL_ENCAP_TYPE_802_3:
+ /* TODO: Take care of other encap modes as well */
+ ret = -EINVAL;
+ goto fail_remove_idr;
+ }
+
+ ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ab->dev, ti.paddr)) {
+ ath11k_warn(ab, "failed to DMA map data Tx buffer\n");
+ ret = -ENOMEM;
+ goto fail_remove_idr;
+ }
+
+ ti.data_len = skb->len;
+ skb_cb->paddr = ti.paddr;
+ skb_cb->vif = arvif->vif;
+ skb_cb->ar = ar;
+
+ hal_ring_id = tx_ring->tcl_data_ring.ring_id;
+ tcl_ring = &ab->hal.srng_list[hal_ring_id];
+
+ spin_lock_bh(&tcl_ring->lock);
+
+ ath11k_hal_srng_access_begin(ab, tcl_ring);
+
+ hal_tcl_desc = (void *)ath11k_hal_srng_src_get_next_entry(ab, tcl_ring);
+ if (!hal_tcl_desc) {
+ /* NOTE: It is highly unlikely we'll be running out of tcl_ring
+ * desc because the desc is directly enqueued onto hw queue.
+ * So add tx packet throttling logic in future if required.
+ */
+ ath11k_hal_srng_access_end(ab, tcl_ring);
+ spin_unlock_bh(&tcl_ring->lock);
+ ret = -ENOMEM;
+ goto fail_unmap_dma;
+ }
+
+ ath11k_hal_tx_cmd_desc_setup(ab, hal_tcl_desc +
+ sizeof(struct hal_tlv_hdr), &ti);
+
+ ath11k_hal_srng_access_end(ab, tcl_ring);
+
+ spin_unlock_bh(&tcl_ring->lock);
+
+ atomic_inc(&ar->dp.num_tx_pending);
+
+ return 0;
+
+fail_unmap_dma:
+ dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
+
+fail_remove_idr:
+ spin_lock_bh(&tx_ring->tx_idr_lock);
+ idr_remove(&tx_ring->txbuf_idr,
+ FIELD_GET(DP_TX_DESC_ID_MSDU_ID, ti.desc_id));
+ spin_unlock_bh(&tx_ring->tx_idr_lock);
+
+ return ret;
+}
+
+static void ath11k_dp_tx_free_txbuf(struct ath11k_base *ab, u8 mac_id,
+ int msdu_id,
+ struct dp_tx_ring *tx_ring)
+{
+ struct ath11k *ar;
+ struct sk_buff *msdu;
+ struct ath11k_skb_cb *skb_cb;
+
+ spin_lock_bh(&tx_ring->tx_idr_lock);
+ msdu = idr_find(&tx_ring->txbuf_idr, msdu_id);
+ if (!msdu) {
+ ath11k_warn(ab, "tx completion for unknown msdu_id %d\n",
+ msdu_id);
+ spin_unlock_bh(&tx_ring->tx_idr_lock);
+ return;
+ }
+
+ skb_cb = ATH11K_SKB_CB(msdu);
+
+ idr_remove(&tx_ring->txbuf_idr, msdu_id);
+ spin_unlock_bh(&tx_ring->tx_idr_lock);
+
+ dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(msdu);
+
+ ar = ab->pdevs[mac_id].ar;
+ if (atomic_dec_and_test(&ar->dp.num_tx_pending))
+ wake_up(&ar->dp.tx_empty_waitq);
+}
+
+static void
+ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab,
+ struct dp_tx_ring *tx_ring,
+ struct ath11k_dp_htt_wbm_tx_status *ts)
+{
+ struct sk_buff *msdu;
+ struct ieee80211_tx_info *info;
+ struct ath11k_skb_cb *skb_cb;
+ struct ath11k *ar;
+
+ spin_lock_bh(&tx_ring->tx_idr_lock);
+ msdu = idr_find(&tx_ring->txbuf_idr, ts->msdu_id);
+ if (!msdu) {
+ ath11k_warn(ab, "htt tx completion for unknown msdu_id %d\n",
+ ts->msdu_id);
+ spin_unlock_bh(&tx_ring->tx_idr_lock);
+ return;
+ }
+
+ skb_cb = ATH11K_SKB_CB(msdu);
+ info = IEEE80211_SKB_CB(msdu);
+
+ ar = skb_cb->ar;
+
+ idr_remove(&tx_ring->txbuf_idr, ts->msdu_id);
+ spin_unlock_bh(&tx_ring->tx_idr_lock);
+
+ if (atomic_dec_and_test(&ar->dp.num_tx_pending))
+ wake_up(&ar->dp.tx_empty_waitq);
+
+ dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+
+ memset(&info->status, 0, sizeof(info->status));
+
+ if (ts->acked) {
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR +
+ ts->ack_rssi;
+ info->status.is_valid_ack_signal = true;
+ } else {
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ }
+ }
+
+ ieee80211_tx_status(ar->hw, msdu);
+}
+
+static void
+ath11k_dp_tx_process_htt_tx_complete(struct ath11k_base *ab,
+ void *desc, u8 mac_id,
+ u32 msdu_id, struct dp_tx_ring *tx_ring)
+{
+ struct htt_tx_wbm_completion *status_desc;
+ struct ath11k_dp_htt_wbm_tx_status ts = {0};
+ enum hal_wbm_htt_tx_comp_status wbm_status;
+
+ status_desc = desc + HTT_TX_WBM_COMP_STATUS_OFFSET;
+
+ wbm_status = FIELD_GET(HTT_TX_WBM_COMP_INFO0_STATUS,
+ status_desc->info0);
+
+ switch (wbm_status) {
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
+ ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
+ ts.msdu_id = msdu_id;
+ ts.ack_rssi = FIELD_GET(HTT_TX_WBM_COMP_INFO1_ACK_RSSI,
+ status_desc->info1);
+ ath11k_dp_tx_htt_tx_complete_buf(ab, tx_ring, &ts);
+ break;
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
+ ath11k_dp_tx_free_txbuf(ab, mac_id, msdu_id, tx_ring);
+ break;
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY:
+ /* This event is to be handled only when the driver decides to
+ * use WDS offload functionality.
+ */
+ break;
+ default:
+ ath11k_warn(ab, "Unknown htt tx status %d\n", wbm_status);
+ break;
+ }
+}
+
+static void ath11k_dp_tx_cache_peer_stats(struct ath11k *ar,
+ struct sk_buff *msdu,
+ struct hal_tx_status *ts)
+{
+ struct ath11k_per_peer_tx_stats *peer_stats = &ar->cached_stats;
+
+ if (ts->try_cnt > 1) {
+ peer_stats->retry_pkts += ts->try_cnt - 1;
+ peer_stats->retry_bytes += (ts->try_cnt - 1) * msdu->len;
+
+ if (ts->status != HAL_WBM_TQM_REL_REASON_FRAME_ACKED) {
+ peer_stats->failed_pkts += 1;
+ peer_stats->failed_bytes += msdu->len;
+ }
+ }
+}
+
+static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
+ struct sk_buff *msdu,
+ struct hal_tx_status *ts)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ieee80211_tx_info *info;
+ struct ath11k_skb_cb *skb_cb;
+
+ if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) {
+ /* Must not happen */
+ return;
+ }
+
+ skb_cb = ATH11K_SKB_CB(msdu);
+
+ dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+
+ rcu_read_lock();
+
+ if (!rcu_dereference(ab->pdevs_active[ar->pdev_idx])) {
+ dev_kfree_skb_any(msdu);
+ goto exit;
+ }
+
+ if (!skb_cb->vif) {
+ dev_kfree_skb_any(msdu);
+ goto exit;
+ }
+
+ info = IEEE80211_SKB_CB(msdu);
+ memset(&info->status, 0, sizeof(info->status));
+
+ /* skip tx rate update from ieee80211_status*/
+ info->status.rates[0].idx = -1;
+
+ if (ts->status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED &&
+ !(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR +
+ ts->ack_rssi;
+ info->status.is_valid_ack_signal = true;
+ }
+
+ if (ts->status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX &&
+ (info->flags & IEEE80211_TX_CTL_NO_ACK))
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+
+ if (ath11k_debug_is_extd_tx_stats_enabled(ar)) {
+ if (ts->flags & HAL_TX_STATUS_FLAGS_FIRST_MSDU) {
+ if (ar->last_ppdu_id == 0) {
+ ar->last_ppdu_id = ts->ppdu_id;
+ } else if (ar->last_ppdu_id == ts->ppdu_id ||
+ ar->cached_ppdu_id == ar->last_ppdu_id) {
+ ar->cached_ppdu_id = ar->last_ppdu_id;
+ ar->cached_stats.is_ampdu = true;
+ ath11k_update_per_peer_stats_from_txcompl(ar, msdu, ts);
+ memset(&ar->cached_stats, 0,
+ sizeof(struct ath11k_per_peer_tx_stats));
+ } else {
+ ar->cached_stats.is_ampdu = false;
+ ath11k_update_per_peer_stats_from_txcompl(ar, msdu, ts);
+ memset(&ar->cached_stats, 0,
+ sizeof(struct ath11k_per_peer_tx_stats));
+ }
+ ar->last_ppdu_id = ts->ppdu_id;
+ }
+
+ ath11k_dp_tx_cache_peer_stats(ar, msdu, ts);
+ }
+
+ /* NOTE: Tx rate status reporting. Tx completion status does not have
+ * necessary information (for example nss) to build the tx rate.
+ * Might end up reporting it out-of-band from HTT stats.
+ */
+
+ ieee80211_tx_status(ar->hw, msdu);
+
+exit:
+ rcu_read_unlock();
+}
+
+static inline void ath11k_dp_tx_status_parse(struct ath11k_base *ab,
+ struct hal_wbm_release_ring *desc,
+ struct hal_tx_status *ts)
+{
+ ts->buf_rel_source =
+ FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE, desc->info0);
+ if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
+ ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)
+ return;
+
+ if (ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)
+ return;
+
+ ts->status = FIELD_GET(HAL_WBM_RELEASE_INFO0_TQM_RELEASE_REASON,
+ desc->info0);
+ ts->ppdu_id = FIELD_GET(HAL_WBM_RELEASE_INFO1_TQM_STATUS_NUMBER,
+ desc->info1);
+ ts->try_cnt = FIELD_GET(HAL_WBM_RELEASE_INFO1_TRANSMIT_COUNT,
+ desc->info1);
+ ts->ack_rssi = FIELD_GET(HAL_WBM_RELEASE_INFO2_ACK_FRAME_RSSI,
+ desc->info2);
+ if (desc->info2 & HAL_WBM_RELEASE_INFO2_FIRST_MSDU)
+ ts->flags |= HAL_TX_STATUS_FLAGS_FIRST_MSDU;
+ ts->peer_id = FIELD_GET(HAL_WBM_RELEASE_INFO3_PEER_ID, desc->info3);
+ ts->tid = FIELD_GET(HAL_WBM_RELEASE_INFO3_TID, desc->info3);
+ if (desc->rate_stats.info0 & HAL_TX_RATE_STATS_INFO0_VALID)
+ ts->rate_stats = desc->rate_stats.info0;
+ else
+ ts->rate_stats = 0;
+}
+
+void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)
+{
+ struct ath11k *ar;
+ struct ath11k_dp *dp = &ab->dp;
+ int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
+ struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id];
+ struct sk_buff *msdu;
+ struct hal_tx_status ts = { 0 };
+ struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
+ u32 *desc;
+ u32 msdu_id;
+ u8 mac_id;
+
+ ath11k_hal_srng_access_begin(ab, status_ring);
+
+ while ((ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) !=
+ tx_ring->tx_status_tail) &&
+ (desc = ath11k_hal_srng_dst_get_next_entry(ab, status_ring))) {
+ memcpy(&tx_ring->tx_status[tx_ring->tx_status_head],
+ desc, sizeof(struct hal_wbm_release_ring));
+ tx_ring->tx_status_head =
+ ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head);
+ }
+
+ if ((ath11k_hal_srng_dst_peek(ab, status_ring) != NULL) &&
+ (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) == tx_ring->tx_status_tail)) {
+ /* TODO: Process pending tx_status messages when kfifo_is_full() */
+ ath11k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
+ }
+
+ ath11k_hal_srng_access_end(ab, status_ring);
+
+ while (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_tail) != tx_ring->tx_status_head) {
+ struct hal_wbm_release_ring *tx_status;
+ u32 desc_id;
+
+ tx_ring->tx_status_tail =
+ ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_tail);
+ tx_status = &tx_ring->tx_status[tx_ring->tx_status_tail];
+ ath11k_dp_tx_status_parse(ab, tx_status, &ts);
+
+ desc_id = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ tx_status->buf_addr_info.info1);
+ mac_id = FIELD_GET(DP_TX_DESC_ID_MAC_ID, desc_id);
+ msdu_id = FIELD_GET(DP_TX_DESC_ID_MSDU_ID, desc_id);
+
+ if (ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) {
+ ath11k_dp_tx_process_htt_tx_complete(ab,
+ (void *)tx_status,
+ mac_id, msdu_id,
+ tx_ring);
+ continue;
+ }
+
+ spin_lock_bh(&tx_ring->tx_idr_lock);
+ msdu = idr_find(&tx_ring->txbuf_idr, msdu_id);
+ if (!msdu) {
+ ath11k_warn(ab, "tx completion for unknown msdu_id %d\n",
+ msdu_id);
+ spin_unlock_bh(&tx_ring->tx_idr_lock);
+ continue;
+ }
+ idr_remove(&tx_ring->txbuf_idr, msdu_id);
+ spin_unlock_bh(&tx_ring->tx_idr_lock);
+
+ ar = ab->pdevs[mac_id].ar;
+
+ if (atomic_dec_and_test(&ar->dp.num_tx_pending))
+ wake_up(&ar->dp.tx_empty_waitq);
+
+ ath11k_dp_tx_complete_msdu(ar, msdu, &ts);
+ }
+}
+
+int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid,
+ enum hal_reo_cmd_type type,
+ struct ath11k_hal_reo_cmd *cmd,
+ void (*cb)(struct ath11k_dp *, void *,
+ enum hal_reo_cmd_status))
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct dp_reo_cmd *dp_cmd;
+ struct hal_srng *cmd_ring;
+ int cmd_num;
+
+ cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
+ cmd_num = ath11k_hal_reo_cmd_send(ab, cmd_ring, type, cmd);
+
+ /* reo cmd ring descriptors has cmd_num starting from 1 */
+ if (cmd_num <= 0)
+ return -EINVAL;
+
+ if (!cb)
+ return 0;
+
+ /* Can this be optimized so that we keep the pending command list only
+ * for tid delete command to free up the resoruce on the command status
+ * indication?
+ */
+ dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC);
+
+ if (!dp_cmd)
+ return -ENOMEM;
+
+ memcpy(&dp_cmd->data, rx_tid, sizeof(struct dp_rx_tid));
+ dp_cmd->cmd_num = cmd_num;
+ dp_cmd->handler = cb;
+
+ spin_lock_bh(&dp->reo_cmd_lock);
+ list_add_tail(&dp_cmd->list, &dp->reo_cmd_list);
+ spin_unlock_bh(&dp->reo_cmd_lock);
+
+ return 0;
+}
+
+static int
+ath11k_dp_tx_get_ring_id_type(struct ath11k_base *ab,
+ int mac_id, u32 ring_id,
+ enum hal_ring_type ring_type,
+ enum htt_srng_ring_type *htt_ring_type,
+ enum htt_srng_ring_id *htt_ring_id)
+{
+ int lmac_ring_id_offset = 0;
+ int ret = 0;
+
+ switch (ring_type) {
+ case HAL_RXDMA_BUF:
+ lmac_ring_id_offset = mac_id * HAL_SRNG_RINGS_PER_LMAC;
+ if (!(ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF +
+ lmac_ring_id_offset) ||
+ ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_BUF +
+ lmac_ring_id_offset))) {
+ ret = -EINVAL;
+ }
+ *htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ break;
+ case HAL_RXDMA_DST:
+ *htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
+ *htt_ring_type = HTT_HW_TO_SW_RING;
+ break;
+ case HAL_RXDMA_MONITOR_BUF:
+ *htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ break;
+ case HAL_RXDMA_MONITOR_STATUS:
+ *htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ break;
+ case HAL_RXDMA_MONITOR_DST:
+ *htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
+ *htt_ring_type = HTT_HW_TO_SW_RING;
+ break;
+ case HAL_RXDMA_MONITOR_DESC:
+ *htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ break;
+ default:
+ ath11k_warn(ab, "Unsupported ring type in DP :%d\n", ring_type);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type)
+{
+ struct htt_srng_setup_cmd *cmd;
+ struct hal_srng *srng = &ab->hal.srng_list[ring_id];
+ struct hal_srng_params params;
+ struct sk_buff *skb;
+ u32 ring_entry_sz;
+ int len = sizeof(*cmd);
+ dma_addr_t hp_addr, tp_addr;
+ enum htt_srng_ring_type htt_ring_type;
+ enum htt_srng_ring_id htt_ring_id;
+ int ret;
+
+ skb = ath11k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ memset(&params, 0, sizeof(params));
+ ath11k_hal_srng_get_params(ab, srng, &params);
+
+ hp_addr = ath11k_hal_srng_get_hp_addr(ab, srng);
+ tp_addr = ath11k_hal_srng_get_tp_addr(ab, srng);
+
+ ret = ath11k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
+ ring_type, &htt_ring_type,
+ &htt_ring_id);
+ if (ret)
+ goto err_free;
+
+ skb_put(skb, len);
+ cmd = (struct htt_srng_setup_cmd *)skb->data;
+ cmd->info0 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE,
+ HTT_H2T_MSG_TYPE_SRING_SETUP);
+ if (htt_ring_type == HTT_SW_TO_HW_RING ||
+ htt_ring_type == HTT_HW_TO_SW_RING)
+ cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID,
+ DP_SW2HW_MACID(mac_id));
+ else
+ cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID,
+ mac_id);
+ cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE,
+ htt_ring_type);
+ cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_ID, htt_ring_id);
+
+ cmd->ring_base_addr_lo = params.ring_base_paddr &
+ HAL_ADDR_LSB_REG_MASK;
+
+ cmd->ring_base_addr_hi = (u64)params.ring_base_paddr >>
+ HAL_ADDR_MSB_REG_SHIFT;
+
+ ret = ath11k_hal_srng_get_entrysize(ring_type);
+ if (ret < 0)
+ goto err_free;
+
+ ring_entry_sz = ret;
+
+ ring_entry_sz >>= 2;
+ cmd->info1 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE,
+ ring_entry_sz);
+ cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE,
+ params.num_entries * ring_entry_sz);
+ cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP,
+ !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP));
+ cmd->info1 |= FIELD_PREP(
+ HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP,
+ !!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP));
+ cmd->info1 |= FIELD_PREP(
+ HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP,
+ !!(params.flags & HAL_SRNG_FLAGS_RING_PTR_SWAP));
+ if (htt_ring_type == HTT_SW_TO_HW_RING)
+ cmd->info1 |= HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS;
+
+ cmd->ring_head_off32_remote_addr_lo = hp_addr & HAL_ADDR_LSB_REG_MASK;
+ cmd->ring_head_off32_remote_addr_hi = (u64)hp_addr >>
+ HAL_ADDR_MSB_REG_SHIFT;
+
+ cmd->ring_tail_off32_remote_addr_lo = tp_addr & HAL_ADDR_LSB_REG_MASK;
+ cmd->ring_tail_off32_remote_addr_hi = (u64)tp_addr >>
+ HAL_ADDR_MSB_REG_SHIFT;
+
+ cmd->ring_msi_addr_lo = 0;
+ cmd->ring_msi_addr_hi = 0;
+ cmd->msi_data = 0;
+
+ cmd->intr_info = FIELD_PREP(
+ HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH,
+ params.intr_batch_cntr_thres_entries * ring_entry_sz);
+ cmd->intr_info |= FIELD_PREP(
+ HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH,
+ params.intr_timer_thres_us >> 3);
+
+ cmd->info2 = 0;
+ if (params.flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
+ cmd->info2 = FIELD_PREP(
+ HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH,
+ params.low_threshold);
+ }
+
+ ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+#define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ)
+
+int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct sk_buff *skb;
+ struct htt_ver_req_cmd *cmd;
+ int len = sizeof(*cmd);
+ int ret;
+
+ init_completion(&dp->htt_tgt_version_received);
+
+ skb = ath11k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_ver_req_cmd *)skb->data;
+ cmd->ver_reg_info = FIELD_PREP(HTT_VER_REQ_INFO_MSG_ID,
+ HTT_H2T_MSG_TYPE_VERSION_REQ);
+
+ ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&dp->htt_tgt_version_received,
+ HTT_TARGET_VERSION_TIMEOUT_HZ);
+ if (ret == 0) {
+ ath11k_warn(ab, "htt target version request timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
+ ath11k_err(ab, "unsupported htt major version %d supported version is %d\n",
+ dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_dp *dp = &ab->dp;
+ struct sk_buff *skb;
+ struct htt_ppdu_stats_cfg_cmd *cmd;
+ int len = sizeof(*cmd);
+ u8 pdev_mask;
+ int ret;
+
+ skb = ath11k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data;
+ cmd->msg = FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE,
+ HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
+
+ pdev_mask = 1 << (ar->pdev_idx);
+ cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID, pdev_mask);
+ cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK, mask);
+
+ ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath11k_dp_tx_htt_rx_filter_setup(struct ath11k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type,
+ int rx_buf_size,
+ struct htt_rx_ring_tlv_filter *tlv_filter)
+{
+ struct htt_rx_ring_selection_cfg_cmd *cmd;
+ struct hal_srng *srng = &ab->hal.srng_list[ring_id];
+ struct hal_srng_params params;
+ struct sk_buff *skb;
+ int len = sizeof(*cmd);
+ enum htt_srng_ring_type htt_ring_type;
+ enum htt_srng_ring_id htt_ring_id;
+ int ret;
+
+ skb = ath11k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ memset(&params, 0, sizeof(params));
+ ath11k_hal_srng_get_params(ab, srng, &params);
+
+ ret = ath11k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
+ ring_type, &htt_ring_type,
+ &htt_ring_id);
+ if (ret)
+ goto err_free;
+
+ skb_put(skb, len);
+ cmd = (struct htt_rx_ring_selection_cfg_cmd *)skb->data;
+ cmd->info0 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE,
+ HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
+ if (htt_ring_type == HTT_SW_TO_HW_RING ||
+ htt_ring_type == HTT_HW_TO_SW_RING)
+ cmd->info0 |=
+ FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID,
+ DP_SW2HW_MACID(mac_id));
+ else
+ cmd->info0 |=
+ FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID,
+ mac_id);
+ cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID,
+ htt_ring_id);
+ cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS,
+ !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP));
+ cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS,
+ !!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP));
+
+ cmd->info1 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE,
+ rx_buf_size);
+ cmd->pkt_type_en_flags0 = tlv_filter->pkt_filter_flags0;
+ cmd->pkt_type_en_flags1 = tlv_filter->pkt_filter_flags1;
+ cmd->pkt_type_en_flags2 = tlv_filter->pkt_filter_flags2;
+ cmd->pkt_type_en_flags3 = tlv_filter->pkt_filter_flags3;
+ cmd->rx_filter_tlv = tlv_filter->rx_filter;
+
+ ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int
+ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k *ar, u8 type,
+ struct htt_ext_stats_cfg_params *cfg_params,
+ u64 cookie)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_dp *dp = &ab->dp;
+ struct sk_buff *skb;
+ struct htt_ext_stats_cfg_cmd *cmd;
+ int len = sizeof(*cmd);
+ int ret;
+
+ skb = ath11k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+
+ cmd = (struct htt_ext_stats_cfg_cmd *)skb->data;
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG;
+
+ cmd->hdr.pdev_mask = 1 << ar->pdev->pdev_id;
+
+ cmd->hdr.stats_type = type;
+ cmd->cfg_param0 = cfg_params->cfg0;
+ cmd->cfg_param1 = cfg_params->cfg1;
+ cmd->cfg_param2 = cfg_params->cfg2;
+ cmd->cfg_param3 = cfg_params->cfg3;
+ cmd->cookie_lsb = lower_32_bits(cookie);
+ cmd->cookie_msb = upper_32_bits(cookie);
+
+ ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
+ if (ret) {
+ ath11k_warn(ab, "failed to send htt type stats request: %d",
+ ret);
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ int ret = 0, ring_id = 0;
+
+ ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
+
+ if (!reset) {
+ tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING;
+ tlv_filter.pkt_filter_flags0 =
+ HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 |
+ HTT_RX_MON_MO_MGMT_FILTER_FLAGS0;
+ tlv_filter.pkt_filter_flags1 =
+ HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 |
+ HTT_RX_MON_MO_MGMT_FILTER_FLAGS1;
+ tlv_filter.pkt_filter_flags2 =
+ HTT_RX_MON_FP_CTRL_FILTER_FLASG2 |
+ HTT_RX_MON_MO_CTRL_FILTER_FLASG2;
+ tlv_filter.pkt_filter_flags3 =
+ HTT_RX_MON_FP_CTRL_FILTER_FLASG3 |
+ HTT_RX_MON_MO_CTRL_FILTER_FLASG3 |
+ HTT_RX_MON_FP_DATA_FILTER_FLASG3 |
+ HTT_RX_MON_MO_DATA_FILTER_FLASG3;
+ }
+
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, dp->mac_id,
+ HAL_RXDMA_MONITOR_BUF,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ if (ret)
+ return ret;
+
+ ring_id = dp->rx_mon_status_refill_ring.refill_buf_ring.ring_id;
+ if (!reset)
+ tlv_filter.rx_filter =
+ HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING;
+ else
+ tlv_filter = ath11k_mac_mon_status_filter_default;
+
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, dp->mac_id,
+ HAL_RXDMA_MONITOR_STATUS,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ return ret;
+}
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.h b/drivers/net/wireless/ath/ath11k/dp_tx.h
new file mode 100644
index 000000000000..f8a9f9c8e444
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_DP_TX_H
+#define ATH11K_DP_TX_H
+
+#include "core.h"
+#include "hal_tx.h"
+
+struct ath11k_dp_htt_wbm_tx_status {
+ u32 msdu_id;
+ bool acked;
+ int ack_rssi;
+};
+
+int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base *ab);
+int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
+ struct sk_buff *skb);
+void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id);
+int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid,
+ enum hal_reo_cmd_type type,
+ struct ath11k_hal_reo_cmd *cmd,
+ void (*func)(struct ath11k_dp *, void *,
+ enum hal_reo_cmd_status));
+
+int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask);
+int
+ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k *ar, u8 type,
+ struct htt_ext_stats_cfg_params *cfg_params,
+ u64 cookie);
+int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset);
+
+int ath11k_dp_tx_htt_rx_filter_setup(struct ath11k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type,
+ int rx_buf_size,
+ struct htt_rx_ring_tlv_filter *tlv_filter);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
new file mode 100644
index 000000000000..b58ac11c2747
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -0,0 +1,1124 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+#include <linux/dma-mapping.h>
+#include "ahb.h"
+#include "hal_tx.h"
+#include "debug.h"
+#include "hal_desc.h"
+
+static const struct hal_srng_config hw_srng_config[] = {
+ /* TODO: max_rings can populated by querying HW capabilities */
+ { /* REO_DST */
+ .start_ring_id = HAL_SRNG_RING_ID_REO2SW1,
+ .max_rings = 4,
+ .entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .reg_start = {
+ HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB,
+ HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP,
+ },
+ .reg_size = {
+ HAL_REO2_RING_BASE_LSB - HAL_REO1_RING_BASE_LSB,
+ HAL_REO2_RING_HP - HAL_REO1_RING_HP,
+ },
+ .max_size = HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* REO_EXCEPTION */
+ /* Designating REO2TCL ring as exception ring. This ring is
+ * similar to other REO2SW rings though it is named as REO2TCL.
+ * Any of theREO2SW rings can be used as exception ring.
+ */
+ .start_ring_id = HAL_SRNG_RING_ID_REO2TCL,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .reg_start = {
+ HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_BASE_LSB,
+ HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_HP,
+ },
+ .max_size = HAL_REO_REO2TCL_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* REO_REINJECT */
+ .start_ring_id = HAL_SRNG_RING_ID_SW2REO,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .reg_start = {
+ HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB,
+ HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP,
+ },
+ .max_size = HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* REO_CMD */
+ .start_ring_id = HAL_SRNG_RING_ID_REO_CMD,
+ .max_rings = 1,
+ .entry_size = (sizeof(struct hal_tlv_hdr) +
+ sizeof(struct hal_reo_get_queue_stats)) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .reg_start = {
+ HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB,
+ HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP,
+ },
+ .max_size = HAL_REO_CMD_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* REO_STATUS */
+ .start_ring_id = HAL_SRNG_RING_ID_REO_STATUS,
+ .max_rings = 1,
+ .entry_size = (sizeof(struct hal_tlv_hdr) +
+ sizeof(struct hal_reo_get_queue_stats_status)) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .reg_start = {
+ HAL_SEQ_WCSS_UMAC_REO_REG +
+ HAL_REO_STATUS_RING_BASE_LSB,
+ HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP,
+ },
+ .max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* TCL_DATA */
+ .start_ring_id = HAL_SRNG_RING_ID_SW2TCL1,
+ .max_rings = 3,
+ .entry_size = (sizeof(struct hal_tlv_hdr) +
+ sizeof(struct hal_tcl_data_cmd)) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .reg_start = {
+ HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB,
+ HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP,
+ },
+ .reg_size = {
+ HAL_TCL2_RING_BASE_LSB - HAL_TCL1_RING_BASE_LSB,
+ HAL_TCL2_RING_HP - HAL_TCL1_RING_HP,
+ },
+ .max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* TCL_CMD */
+ .start_ring_id = HAL_SRNG_RING_ID_SW2TCL_CMD,
+ .max_rings = 1,
+ .entry_size = (sizeof(struct hal_tlv_hdr) +
+ sizeof(struct hal_tcl_gse_cmd)) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .reg_start = {
+ HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB,
+ HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP,
+ },
+ .max_size = HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* TCL_STATUS */
+ .start_ring_id = HAL_SRNG_RING_ID_TCL_STATUS,
+ .max_rings = 1,
+ .entry_size = (sizeof(struct hal_tlv_hdr) +
+ sizeof(struct hal_tcl_status_ring)) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .reg_start = {
+ HAL_SEQ_WCSS_UMAC_TCL_REG +
+ HAL_TCL_STATUS_RING_BASE_LSB,
+ HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP,
+ },
+ .max_size = HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* CE_SRC */
+ .start_ring_id = HAL_SRNG_RING_ID_CE0_SRC,
+ .max_rings = 12,
+ .entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .reg_start = {
+ (HAL_SEQ_WCSS_UMAC_CE0_SRC_REG +
+ HAL_CE_DST_RING_BASE_LSB),
+ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_HP,
+ },
+ .reg_size = {
+ (HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG),
+ (HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG),
+ },
+ .max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* CE_DST */
+ .start_ring_id = HAL_SRNG_RING_ID_CE0_DST,
+ .max_rings = 12,
+ .entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .reg_start = {
+ (HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
+ HAL_CE_DST_RING_BASE_LSB),
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_HP,
+ },
+ .reg_size = {
+ (HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG),
+ (HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG),
+ },
+ .max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* CE_DST_STATUS */
+ .start_ring_id = HAL_SRNG_RING_ID_CE0_DST_STATUS,
+ .max_rings = 12,
+ .entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .reg_start = {
+ (HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
+ HAL_CE_DST_STATUS_RING_BASE_LSB),
+ (HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
+ HAL_CE_DST_STATUS_RING_HP),
+ },
+ .reg_size = {
+ (HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG),
+ (HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG),
+ },
+ .max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* WBM_IDLE_LINK */
+ .start_ring_id = HAL_SRNG_RING_ID_WBM_IDLE_LINK,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_link_desc) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .reg_start = {
+ (HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_IDLE_LINK_RING_BASE_LSB),
+ (HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP),
+ },
+ .max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* SW2WBM_RELEASE */
+ .start_ring_id = HAL_SRNG_RING_ID_WBM_SW_RELEASE,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .reg_start = {
+ (HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_RELEASE_RING_BASE_LSB),
+ (HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_HP),
+ },
+ .max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* WBM2SW_RELEASE */
+ .start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
+ .max_rings = 4,
+ .entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .reg_start = {
+ (HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM0_RELEASE_RING_BASE_LSB),
+ (HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP),
+ },
+ .reg_size = {
+ (HAL_WBM1_RELEASE_RING_BASE_LSB -
+ HAL_WBM0_RELEASE_RING_BASE_LSB),
+ (HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP),
+ },
+ .max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* RXDMA_BUF */
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF,
+ .max_rings = 2,
+ .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
+ .lmac_ring = true,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE,
+ },
+ { /* RXDMA_DST */
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
+ .lmac_ring = true,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE,
+ },
+ { /* RXDMA_MONITOR_BUF */
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA2_BUF,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
+ .lmac_ring = true,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE,
+ },
+ { /* RXDMA_MONITOR_STATUS */
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
+ .lmac_ring = true,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE,
+ },
+ { /* RXDMA_MONITOR_DST */
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
+ .lmac_ring = true,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE,
+ },
+ { /* RXDMA_MONITOR_DESC */
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
+ .lmac_ring = true,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE,
+ },
+ { /* RXDMA DIR BUF */
+ .start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
+ .max_rings = 1,
+ .entry_size = 8 >> 2, /* TODO: Define the struct */
+ .lmac_ring = true,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE,
+ },
+};
+
+static int ath11k_hal_alloc_cont_rdp(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ size_t size;
+
+ size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
+ hal->rdp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->rdp.paddr,
+ GFP_KERNEL);
+ if (!hal->rdp.vaddr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void ath11k_hal_free_cont_rdp(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ size_t size;
+
+ if (!hal->rdp.vaddr)
+ return;
+
+ size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
+ dma_free_coherent(ab->dev, size,
+ hal->rdp.vaddr, hal->rdp.paddr);
+ hal->rdp.vaddr = NULL;
+}
+
+static int ath11k_hal_alloc_cont_wrp(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ size_t size;
+
+ size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS;
+ hal->wrp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->wrp.paddr,
+ GFP_KERNEL);
+ if (!hal->wrp.vaddr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void ath11k_hal_free_cont_wrp(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ size_t size;
+
+ if (!hal->wrp.vaddr)
+ return;
+
+ size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS;
+ dma_free_coherent(ab->dev, size,
+ hal->wrp.vaddr, hal->wrp.paddr);
+ hal->wrp.vaddr = NULL;
+}
+
+static void ath11k_hal_ce_dst_setup(struct ath11k_base *ab,
+ struct hal_srng *srng, int ring_num)
+{
+ const struct hal_srng_config *srng_config = &hw_srng_config[HAL_CE_DST];
+ u32 addr;
+ u32 val;
+
+ addr = HAL_CE_DST_RING_CTRL +
+ srng_config->reg_start[HAL_SRNG_REG_GRP_R0] +
+ ring_num * srng_config->reg_size[HAL_SRNG_REG_GRP_R0];
+ val = ath11k_ahb_read32(ab, addr);
+ val &= ~HAL_CE_DST_R0_DEST_CTRL_MAX_LEN;
+ val |= FIELD_PREP(HAL_CE_DST_R0_DEST_CTRL_MAX_LEN,
+ srng->u.dst_ring.max_buffer_length);
+ ath11k_ahb_write32(ab, addr, val);
+}
+
+static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ u32 val;
+ u64 hp_addr;
+ u32 reg_base;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+
+ if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
+ ath11k_ahb_write32(ab, reg_base +
+ HAL_REO1_RING_MSI1_BASE_LSB_OFFSET,
+ (u32)srng->msi_addr);
+
+ val = FIELD_PREP(HAL_REO1_RING_MSI1_BASE_MSB_ADDR,
+ ((u64)srng->msi_addr >>
+ HAL_ADDR_MSB_REG_SHIFT)) |
+ HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
+ ath11k_ahb_write32(ab, reg_base +
+ HAL_REO1_RING_MSI1_BASE_MSB_OFFSET, val);
+
+ ath11k_ahb_write32(ab,
+ reg_base + HAL_REO1_RING_MSI1_DATA_OFFSET,
+ srng->msi_data);
+ }
+
+ ath11k_ahb_write32(ab, reg_base, (u32)srng->ring_base_paddr);
+
+ val = FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
+ ((u64)srng->ring_base_paddr >>
+ HAL_ADDR_MSB_REG_SHIFT)) |
+ FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_SIZE,
+ (srng->entry_size * srng->num_entries));
+ ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_BASE_MSB_OFFSET, val);
+
+ val = FIELD_PREP(HAL_REO1_RING_ID_RING_ID, srng->ring_id) |
+ FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
+ ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_ID_OFFSET, val);
+
+ /* interrupt setup */
+ val = FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD,
+ (srng->intr_timer_thres_us >> 3));
+
+ val |= FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD,
+ (srng->intr_batch_cntr_thres_entries *
+ srng->entry_size));
+
+ ath11k_ahb_write32(ab,
+ reg_base + HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET,
+ val);
+
+ hp_addr = hal->rdp.paddr +
+ ((unsigned long)srng->u.dst_ring.hp_addr -
+ (unsigned long)hal->rdp.vaddr);
+ ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_LSB_OFFSET,
+ hp_addr & HAL_ADDR_LSB_REG_MASK);
+ ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_MSB_OFFSET,
+ hp_addr >> HAL_ADDR_MSB_REG_SHIFT);
+
+ /* Initialize head and tail pointers to indicate ring is empty */
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
+ ath11k_ahb_write32(ab, reg_base, 0);
+ ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_TP_OFFSET, 0);
+ *srng->u.dst_ring.hp_addr = 0;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+ val = 0;
+ if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
+ val |= HAL_REO1_RING_MISC_DATA_TLV_SWAP;
+ if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
+ val |= HAL_REO1_RING_MISC_HOST_FW_SWAP;
+ if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
+ val |= HAL_REO1_RING_MISC_MSI_SWAP;
+ val |= HAL_REO1_RING_MISC_SRNG_ENABLE;
+
+ ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_MISC_OFFSET, val);
+}
+
+static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ u32 val;
+ u64 tp_addr;
+ u32 reg_base;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+
+ if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
+ ath11k_ahb_write32(ab, reg_base +
+ HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET,
+ (u32)srng->msi_addr);
+
+ val = FIELD_PREP(HAL_TCL1_RING_MSI1_BASE_MSB_ADDR,
+ ((u64)srng->msi_addr >>
+ HAL_ADDR_MSB_REG_SHIFT)) |
+ HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
+ ath11k_ahb_write32(ab, reg_base +
+ HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET,
+ val);
+
+ ath11k_ahb_write32(ab, reg_base +
+ HAL_TCL1_RING_MSI1_DATA_OFFSET,
+ srng->msi_data);
+ }
+
+ ath11k_ahb_write32(ab, reg_base, (u32)srng->ring_base_paddr);
+
+ val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
+ ((u64)srng->ring_base_paddr >>
+ HAL_ADDR_MSB_REG_SHIFT)) |
+ FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE,
+ (srng->entry_size * srng->num_entries));
+ ath11k_ahb_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET, val);
+
+ val = FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
+ ath11k_ahb_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET, val);
+
+ /* interrupt setup */
+ /* NOTE: IPQ8074 v2 requires the interrupt timer threshold in the
+ * unit of 8 usecs instead of 1 usec (as required by v1).
+ */
+ val = FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD,
+ srng->intr_timer_thres_us);
+
+ val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD,
+ (srng->intr_batch_cntr_thres_entries *
+ srng->entry_size));
+
+ ath11k_ahb_write32(ab,
+ reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET,
+ val);
+
+ val = 0;
+ if (srng->flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
+ val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD,
+ srng->u.src_ring.low_threshold);
+ }
+ ath11k_ahb_write32(ab,
+ reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET,
+ val);
+
+ if (srng->ring_id != HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
+ tp_addr = hal->rdp.paddr +
+ ((unsigned long)srng->u.src_ring.tp_addr -
+ (unsigned long)hal->rdp.vaddr);
+ ath11k_ahb_write32(ab,
+ reg_base + HAL_TCL1_RING_TP_ADDR_LSB_OFFSET,
+ tp_addr & HAL_ADDR_LSB_REG_MASK);
+ ath11k_ahb_write32(ab,
+ reg_base + HAL_TCL1_RING_TP_ADDR_MSB_OFFSET,
+ tp_addr >> HAL_ADDR_MSB_REG_SHIFT);
+ }
+
+ /* Initialize head and tail pointers to indicate ring is empty */
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
+ ath11k_ahb_write32(ab, reg_base, 0);
+ ath11k_ahb_write32(ab, reg_base + HAL_TCL1_RING_TP_OFFSET, 0);
+ *srng->u.src_ring.tp_addr = 0;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+ val = 0;
+ if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
+ val |= HAL_TCL1_RING_MISC_DATA_TLV_SWAP;
+ if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
+ val |= HAL_TCL1_RING_MISC_HOST_FW_SWAP;
+ if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
+ val |= HAL_TCL1_RING_MISC_MSI_SWAP;
+
+ /* Loop count is not used for SRC rings */
+ val |= HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE;
+
+ val |= HAL_TCL1_RING_MISC_SRNG_ENABLE;
+
+ ath11k_ahb_write32(ab, reg_base + HAL_TCL1_RING_MISC_OFFSET, val);
+}
+
+static void ath11k_hal_srng_hw_init(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ ath11k_hal_srng_src_hw_init(ab, srng);
+ else
+ ath11k_hal_srng_dst_hw_init(ab, srng);
+}
+
+static int ath11k_hal_srng_get_ring_id(struct ath11k_base *ab,
+ enum hal_ring_type type,
+ int ring_num, int mac_id)
+{
+ const struct hal_srng_config *srng_config = &hw_srng_config[type];
+ int ring_id;
+
+ if (ring_num >= srng_config->max_rings) {
+ ath11k_warn(ab, "invalid ring number :%d\n", ring_num);
+ return -EINVAL;
+ }
+
+ ring_id = srng_config->start_ring_id + ring_num;
+ if (srng_config->lmac_ring)
+ ring_id += mac_id * HAL_SRNG_RINGS_PER_LMAC;
+
+ if (WARN_ON(ring_id >= HAL_SRNG_RING_ID_MAX))
+ return -EINVAL;
+
+ return ring_id;
+}
+
+int ath11k_hal_srng_get_entrysize(u32 ring_type)
+{
+ const struct hal_srng_config *srng_config;
+
+ if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
+ return -EINVAL;
+
+ srng_config = &hw_srng_config[ring_type];
+
+ return (srng_config->entry_size << 2);
+}
+
+int ath11k_hal_srng_get_max_entries(u32 ring_type)
+{
+ const struct hal_srng_config *srng_config;
+
+ if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
+ return -EINVAL;
+
+ srng_config = &hw_srng_config[ring_type];
+
+ return (srng_config->max_size / srng_config->entry_size);
+}
+
+void ath11k_hal_srng_get_params(struct ath11k_base *ab, struct hal_srng *srng,
+ struct hal_srng_params *params)
+{
+ params->ring_base_paddr = srng->ring_base_paddr;
+ params->ring_base_vaddr = srng->ring_base_vaddr;
+ params->num_entries = srng->num_entries;
+ params->intr_timer_thres_us = srng->intr_timer_thres_us;
+ params->intr_batch_cntr_thres_entries =
+ srng->intr_batch_cntr_thres_entries;
+ params->low_threshold = srng->u.src_ring.low_threshold;
+ params->flags = srng->flags;
+}
+
+dma_addr_t ath11k_hal_srng_get_hp_addr(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
+ return 0;
+
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ return ab->hal.wrp.paddr +
+ ((unsigned long)srng->u.src_ring.hp_addr -
+ (unsigned long)ab->hal.wrp.vaddr);
+ else
+ return ab->hal.rdp.paddr +
+ ((unsigned long)srng->u.dst_ring.hp_addr -
+ (unsigned long)ab->hal.rdp.vaddr);
+}
+
+dma_addr_t ath11k_hal_srng_get_tp_addr(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
+ return 0;
+
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ return ab->hal.rdp.paddr +
+ ((unsigned long)srng->u.src_ring.tp_addr -
+ (unsigned long)ab->hal.rdp.vaddr);
+ else
+ return ab->hal.wrp.paddr +
+ ((unsigned long)srng->u.dst_ring.tp_addr -
+ (unsigned long)ab->hal.wrp.vaddr);
+}
+
+u32 ath11k_hal_ce_get_desc_size(enum hal_ce_desc type)
+{
+ switch (type) {
+ case HAL_CE_DESC_SRC:
+ return sizeof(struct hal_ce_srng_src_desc);
+ case HAL_CE_DESC_DST:
+ return sizeof(struct hal_ce_srng_dest_desc);
+ case HAL_CE_DESC_DST_STATUS:
+ return sizeof(struct hal_ce_srng_dst_status_desc);
+ }
+
+ return 0;
+}
+
+void ath11k_hal_ce_src_set_desc(void *buf, dma_addr_t paddr, u32 len, u32 id,
+ u8 byte_swap_data)
+{
+ struct hal_ce_srng_src_desc *desc = (struct hal_ce_srng_src_desc *)buf;
+
+ desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK;
+ desc->buffer_addr_info =
+ FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI,
+ ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
+ FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP,
+ byte_swap_data) |
+ FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_GATHER, 0) |
+ FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_LEN, len);
+ desc->meta_info = FIELD_PREP(HAL_CE_SRC_DESC_META_INFO_DATA, id);
+}
+
+void ath11k_hal_ce_dst_set_desc(void *buf, dma_addr_t paddr)
+{
+ struct hal_ce_srng_dest_desc *desc =
+ (struct hal_ce_srng_dest_desc *)buf;
+
+ desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK;
+ desc->buffer_addr_info =
+ FIELD_PREP(HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI,
+ ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT));
+}
+
+u32 ath11k_hal_ce_dst_status_get_length(void *buf)
+{
+ struct hal_ce_srng_dst_status_desc *desc =
+ (struct hal_ce_srng_dst_status_desc *)buf;
+ u32 len;
+
+ len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, desc->flags);
+ desc->flags &= ~HAL_CE_DST_STATUS_DESC_FLAGS_LEN;
+
+ return len;
+}
+
+void ath11k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
+ dma_addr_t paddr)
+{
+ desc->buf_addr_info.info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
+ (paddr & HAL_ADDR_LSB_REG_MASK));
+ desc->buf_addr_info.info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR,
+ ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
+ FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, 1) |
+ FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, cookie);
+}
+
+u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng)
+{
+ lockdep_assert_held(&srng->lock);
+
+ if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
+ return (srng->ring_base_vaddr + srng->u.dst_ring.tp);
+
+ return NULL;
+}
+
+u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ u32 *desc;
+
+ lockdep_assert_held(&srng->lock);
+
+ if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
+ return NULL;
+
+ desc = srng->ring_base_vaddr + srng->u.dst_ring.tp;
+
+ srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) %
+ srng->ring_size;
+
+ return desc;
+}
+
+int ath11k_hal_srng_dst_num_free(struct ath11k_base *ab, struct hal_srng *srng,
+ bool sync_hw_ptr)
+{
+ u32 tp, hp;
+
+ lockdep_assert_held(&srng->lock);
+
+ tp = srng->u.dst_ring.tp;
+
+ if (sync_hw_ptr) {
+ hp = *srng->u.dst_ring.hp_addr;
+ srng->u.dst_ring.cached_hp = hp;
+ } else {
+ hp = srng->u.dst_ring.cached_hp;
+ }
+
+ if (hp >= tp)
+ return (hp - tp) / srng->entry_size;
+ else
+ return (srng->ring_size - tp + hp) / srng->entry_size;
+}
+
+/* Returns number of available entries in src ring */
+int ath11k_hal_srng_src_num_free(struct ath11k_base *ab, struct hal_srng *srng,
+ bool sync_hw_ptr)
+{
+ u32 tp, hp;
+
+ lockdep_assert_held(&srng->lock);
+
+ hp = srng->u.src_ring.hp;
+
+ if (sync_hw_ptr) {
+ tp = *srng->u.src_ring.tp_addr;
+ srng->u.src_ring.cached_tp = tp;
+ } else {
+ tp = srng->u.src_ring.cached_tp;
+ }
+
+ if (tp > hp)
+ return ((tp - hp) / srng->entry_size) - 1;
+ else
+ return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
+}
+
+u32 *ath11k_hal_srng_src_get_next_entry(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ u32 *desc;
+ u32 next_hp;
+
+ lockdep_assert_held(&srng->lock);
+
+ /* TODO: Using % is expensive, but we have to do this since size of some
+ * SRNG rings is not power of 2 (due to descriptor sizes). Need to see
+ * if separate function is defined for rings having power of 2 ring size
+ * (TCL2SW, REO2SW, SW2RXDMA and CE rings) so that we can avoid the
+ * overhead of % by using mask (with &).
+ */
+ next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size;
+
+ if (next_hp == srng->u.src_ring.cached_tp)
+ return NULL;
+
+ desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
+ srng->u.src_ring.hp = next_hp;
+
+ /* TODO: Reap functionality is not used by all rings. If particular
+ * ring does not use reap functionality, we need not update reap_hp
+ * with next_hp pointer. Need to make sure a separate function is used
+ * before doing any optimization by removing below code updating
+ * reap_hp.
+ */
+ srng->u.src_ring.reap_hp = next_hp;
+
+ return desc;
+}
+
+u32 *ath11k_hal_srng_src_reap_next(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ u32 *desc;
+ u32 next_reap_hp;
+
+ lockdep_assert_held(&srng->lock);
+
+ next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
+ srng->ring_size;
+
+ if (next_reap_hp == srng->u.src_ring.cached_tp)
+ return NULL;
+
+ desc = srng->ring_base_vaddr + next_reap_hp;
+ srng->u.src_ring.reap_hp = next_reap_hp;
+
+ return desc;
+}
+
+u32 *ath11k_hal_srng_src_get_next_reaped(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ u32 *desc;
+
+ lockdep_assert_held(&srng->lock);
+
+ if (srng->u.src_ring.hp == srng->u.src_ring.reap_hp)
+ return NULL;
+
+ desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
+ srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
+ srng->ring_size;
+
+ return desc;
+}
+
+u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng)
+{
+ lockdep_assert_held(&srng->lock);
+
+ if (((srng->u.src_ring.hp + srng->entry_size) % srng->ring_size) ==
+ srng->u.src_ring.cached_tp)
+ return NULL;
+
+ return srng->ring_base_vaddr + srng->u.src_ring.hp;
+}
+
+void ath11k_hal_srng_access_begin(struct ath11k_base *ab, struct hal_srng *srng)
+{
+ lockdep_assert_held(&srng->lock);
+
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ srng->u.src_ring.cached_tp =
+ *(volatile u32 *)srng->u.src_ring.tp_addr;
+ else
+ srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr;
+}
+
+/* Update cached ring head/tail pointers to HW. ath11k_hal_srng_access_begin()
+ * should have been called before this.
+ */
+void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng)
+{
+ lockdep_assert_held(&srng->lock);
+
+ /* TODO: See if we need a write memory barrier here */
+ if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) {
+ /* For LMAC rings, ring pointer updates are done through FW and
+ * hence written to a shared memory location that is read by FW
+ */
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ *srng->u.src_ring.hp_addr = srng->u.src_ring.hp;
+ else
+ *srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp;
+ } else {
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+ ath11k_ahb_write32(ab,
+ (unsigned long)srng->u.src_ring.hp_addr -
+ (unsigned long)ab->mem,
+ srng->u.src_ring.hp);
+ } else {
+ ath11k_ahb_write32(ab,
+ (unsigned long)srng->u.dst_ring.tp_addr -
+ (unsigned long)ab->mem,
+ srng->u.dst_ring.tp);
+ }
+ }
+}
+
+void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab,
+ struct hal_wbm_idle_scatter_list *sbuf,
+ u32 nsbufs, u32 tot_link_desc,
+ u32 end_offset)
+{
+ struct ath11k_buffer_addr *link_addr;
+ int i;
+ u32 reg_scatter_buf_sz = HAL_WBM_IDLE_SCATTER_BUF_SIZE / 64;
+
+ link_addr = (void *)sbuf[0].vaddr + HAL_WBM_IDLE_SCATTER_BUF_SIZE;
+
+ for (i = 1; i < nsbufs; i++) {
+ link_addr->info0 = sbuf[i].paddr & HAL_ADDR_LSB_REG_MASK;
+ link_addr->info1 = FIELD_PREP(
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
+ (u64)sbuf[i].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
+ FIELD_PREP(
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
+ BASE_ADDR_MATCH_TAG_VAL);
+
+ link_addr = (void *)sbuf[i].vaddr +
+ HAL_WBM_IDLE_SCATTER_BUF_SIZE;
+ }
+
+ ath11k_ahb_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR,
+ FIELD_PREP(HAL_WBM_SCATTER_BUFFER_SIZE, reg_scatter_buf_sz) |
+ FIELD_PREP(HAL_WBM_LINK_DESC_IDLE_LIST_MODE, 0x1));
+ ath11k_ahb_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_SIZE_ADDR,
+ FIELD_PREP(HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST,
+ reg_scatter_buf_sz * nsbufs));
+ ath11k_ahb_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_RING_BASE_LSB,
+ FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
+ sbuf[0].paddr & HAL_ADDR_LSB_REG_MASK));
+ ath11k_ahb_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_RING_BASE_MSB,
+ FIELD_PREP(
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
+ (u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
+ FIELD_PREP(
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
+ BASE_ADDR_MATCH_TAG_VAL));
+
+ /* Setup head and tail pointers for the idle list */
+ ath11k_ahb_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
+ FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
+ sbuf[nsbufs - 1].paddr));
+ ath11k_ahb_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1,
+ FIELD_PREP(
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
+ ((u64)sbuf[nsbufs - 1].paddr >>
+ HAL_ADDR_MSB_REG_SHIFT)) |
+ FIELD_PREP(HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1,
+ (end_offset >> 2)));
+ ath11k_ahb_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
+ FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
+ sbuf[0].paddr));
+
+ ath11k_ahb_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0,
+ FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
+ sbuf[0].paddr));
+ ath11k_ahb_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1,
+ FIELD_PREP(
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
+ ((u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
+ FIELD_PREP(HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1,
+ 0));
+ ath11k_ahb_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR,
+ 2 * tot_link_desc);
+
+ /* Enable the SRNG */
+ ath11k_ahb_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_IDLE_LINK_RING_MISC_ADDR, 0x40);
+}
+
+int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
+ int ring_num, int mac_id,
+ struct hal_srng_params *params)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ const struct hal_srng_config *srng_config = &hw_srng_config[type];
+ struct hal_srng *srng;
+ int ring_id;
+ u32 lmac_idx;
+ int i;
+ u32 reg_base;
+
+ ring_id = ath11k_hal_srng_get_ring_id(ab, type, ring_num, mac_id);
+ if (ring_id < 0)
+ return ring_id;
+
+ srng = &hal->srng_list[ring_id];
+
+ srng->ring_id = ring_id;
+ srng->ring_dir = srng_config->ring_dir;
+ srng->ring_base_paddr = params->ring_base_paddr;
+ srng->ring_base_vaddr = params->ring_base_vaddr;
+ srng->entry_size = srng_config->entry_size;
+ srng->num_entries = params->num_entries;
+ srng->ring_size = srng->entry_size * srng->num_entries;
+ srng->intr_batch_cntr_thres_entries =
+ params->intr_batch_cntr_thres_entries;
+ srng->intr_timer_thres_us = params->intr_timer_thres_us;
+ srng->flags = params->flags;
+ spin_lock_init(&srng->lock);
+
+ for (i = 0; i < HAL_SRNG_NUM_REG_GRP; i++) {
+ srng->hwreg_base[i] = srng_config->reg_start[i] +
+ (ring_num * srng_config->reg_size[i]);
+ }
+
+ memset(srng->ring_base_vaddr, 0,
+ (srng->entry_size * srng->num_entries) << 2);
+
+ /* TODO: Add comments on these swap configurations */
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ srng->flags |= HAL_SRNG_FLAGS_MSI_SWAP | HAL_SRNG_FLAGS_DATA_TLV_SWAP |
+ HAL_SRNG_FLAGS_RING_PTR_SWAP;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
+
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+ srng->u.src_ring.hp = 0;
+ srng->u.src_ring.cached_tp = 0;
+ srng->u.src_ring.reap_hp = srng->ring_size - srng->entry_size;
+ srng->u.src_ring.tp_addr = (void *)(hal->rdp.vaddr + ring_id);
+ srng->u.src_ring.low_threshold = params->low_threshold *
+ srng->entry_size;
+ if (srng_config->lmac_ring) {
+ lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
+ srng->u.src_ring.hp_addr = (void *)(hal->wrp.vaddr +
+ lmac_idx);
+ srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
+ } else {
+ srng->u.src_ring.hp_addr =
+ (u32 *)((unsigned long)ab->mem + reg_base);
+ }
+ } else {
+ /* During initialization loop count in all the descriptors
+ * will be set to zero, and HW will set it to 1 on completing
+ * descriptor update in first loop, and increments it by 1 on
+ * subsequent loops (loop count wraps around after reaching
+ * 0xffff). The 'loop_cnt' in SW ring state is the expected
+ * loop count in descriptors updated by HW (to be processed
+ * by SW).
+ */
+ srng->u.dst_ring.loop_cnt = 1;
+ srng->u.dst_ring.tp = 0;
+ srng->u.dst_ring.cached_hp = 0;
+ srng->u.dst_ring.hp_addr = (void *)(hal->rdp.vaddr + ring_id);
+ if (srng_config->lmac_ring) {
+ /* For LMAC rings, tail pointer updates will be done
+ * through FW by writing to a shared memory location
+ */
+ lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
+ srng->u.dst_ring.tp_addr = (void *)(hal->wrp.vaddr +
+ lmac_idx);
+ srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
+ } else {
+ srng->u.dst_ring.tp_addr =
+ (u32 *)((unsigned long)ab->mem + reg_base +
+ (HAL_REO1_RING_TP - HAL_REO1_RING_HP));
+ }
+ }
+
+ if (srng_config->lmac_ring)
+ return ring_id;
+
+ ath11k_hal_srng_hw_init(ab, srng);
+
+ if (type == HAL_CE_DST) {
+ srng->u.dst_ring.max_buffer_length = params->max_buffer_len;
+ ath11k_hal_ce_dst_setup(ab, srng, ring_num);
+ }
+
+ return ring_id;
+}
+
+int ath11k_hal_srng_init(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ int ret;
+
+ memset(hal, 0, sizeof(*hal));
+
+ hal->srng_config = hw_srng_config;
+
+ ret = ath11k_hal_alloc_cont_rdp(ab);
+ if (ret)
+ goto err_hal;
+
+ ret = ath11k_hal_alloc_cont_wrp(ab);
+ if (ret)
+ goto err_free_cont_rdp;
+
+ return 0;
+
+err_free_cont_rdp:
+ ath11k_hal_free_cont_rdp(ab);
+
+err_hal:
+ return ret;
+}
+
+void ath11k_hal_srng_deinit(struct ath11k_base *ab)
+{
+ ath11k_hal_free_cont_rdp(ab);
+ ath11k_hal_free_cont_wrp(ab);
+}
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
new file mode 100644
index 000000000000..5b13ccdf39e4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -0,0 +1,897 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_HAL_H
+#define ATH11K_HAL_H
+
+#include "hal_desc.h"
+#include "rx_desc.h"
+
+struct ath11k_base;
+
+#define HAL_LINK_DESC_SIZE (32 << 2)
+#define HAL_LINK_DESC_ALIGN 128
+#define HAL_NUM_MPDUS_PER_LINK_DESC 6
+#define HAL_NUM_TX_MSDUS_PER_LINK_DESC 7
+#define HAL_NUM_RX_MSDUS_PER_LINK_DESC 6
+#define HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC 12
+#define HAL_MAX_AVAIL_BLK_RES 3
+
+#define HAL_RING_BASE_ALIGN 8
+
+#define HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX 32704
+/* TODO: Check with hw team on the supported scatter buf size */
+#define HAL_WBM_IDLE_SCATTER_NEXT_PTR_SIZE 8
+#define HAL_WBM_IDLE_SCATTER_BUF_SIZE (HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX - \
+ HAL_WBM_IDLE_SCATTER_NEXT_PTR_SIZE)
+
+#define HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX 48
+#define HAL_DSCP_TID_TBL_SIZE 24
+
+/* calculate the register address from bar0 of shadow register x */
+#define SHADOW_BASE_ADDRESS 0x00003024
+#define SHADOW_NUM_REGISTERS 36
+
+/* WCSS Relative address */
+#define HAL_SEQ_WCSS_UMAC_REO_REG 0x00a38000
+#define HAL_SEQ_WCSS_UMAC_TCL_REG 0x00a44000
+#define HAL_SEQ_WCSS_UMAC_CE0_SRC_REG 0x00a00000
+#define HAL_SEQ_WCSS_UMAC_CE0_DST_REG 0x00a01000
+#define HAL_SEQ_WCSS_UMAC_CE1_SRC_REG 0x00a02000
+#define HAL_SEQ_WCSS_UMAC_CE1_DST_REG 0x00a03000
+#define HAL_SEQ_WCSS_UMAC_WBM_REG 0x00a34000
+
+/* SW2TCL(x) R0 ring configuration address */
+#define HAL_TCL1_RING_CMN_CTRL_REG 0x00000014
+#define HAL_TCL1_RING_DSCP_TID_MAP 0x0000002c
+#define HAL_TCL1_RING_BASE_LSB 0x00000510
+#define HAL_TCL1_RING_BASE_MSB 0x00000514
+#define HAL_TCL1_RING_ID 0x00000518
+#define HAL_TCL1_RING_MISC 0x00000520
+#define HAL_TCL1_RING_TP_ADDR_LSB 0x0000052c
+#define HAL_TCL1_RING_TP_ADDR_MSB 0x00000530
+#define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0 0x00000540
+#define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1 0x00000544
+#define HAL_TCL1_RING_MSI1_BASE_LSB 0x00000558
+#define HAL_TCL1_RING_MSI1_BASE_MSB 0x0000055c
+#define HAL_TCL1_RING_MSI1_DATA 0x00000560
+#define HAL_TCL2_RING_BASE_LSB 0x00000568
+#define HAL_TCL_RING_BASE_LSB 0x00000618
+
+#define HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET \
+ (HAL_TCL1_RING_MSI1_BASE_LSB - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET \
+ (HAL_TCL1_RING_MSI1_BASE_MSB - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_MSI1_DATA_OFFSET \
+ (HAL_TCL1_RING_MSI1_DATA - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_BASE_MSB_OFFSET \
+ (HAL_TCL1_RING_BASE_MSB - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_ID_OFFSET \
+ (HAL_TCL1_RING_ID - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET \
+ (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0 - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET \
+ (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1 - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_TP_ADDR_LSB_OFFSET \
+ (HAL_TCL1_RING_TP_ADDR_LSB - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_TP_ADDR_MSB_OFFSET \
+ (HAL_TCL1_RING_TP_ADDR_MSB - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_MISC_OFFSET \
+ (HAL_TCL1_RING_MISC - HAL_TCL1_RING_BASE_LSB)
+
+/* SW2TCL(x) R2 ring pointers (head/tail) address */
+#define HAL_TCL1_RING_HP 0x00002000
+#define HAL_TCL1_RING_TP 0x00002004
+#define HAL_TCL2_RING_HP 0x00002008
+#define HAL_TCL_RING_HP 0x00002018
+
+#define HAL_TCL1_RING_TP_OFFSET \
+ (HAL_TCL1_RING_TP - HAL_TCL1_RING_HP)
+
+/* TCL STATUS ring address */
+#define HAL_TCL_STATUS_RING_BASE_LSB 0x00000720
+#define HAL_TCL_STATUS_RING_HP 0x00002030
+
+/* REO2SW(x) R0 ring configuration address */
+#define HAL_REO1_GEN_ENABLE 0x00000000
+#define HAL_REO1_DEST_RING_CTRL_IX_2 0x0000000c
+#define HAL_REO1_DEST_RING_CTRL_IX_3 0x00000010
+#define HAL_REO1_RING_BASE_LSB 0x0000029c
+#define HAL_REO1_RING_BASE_MSB 0x000002a0
+#define HAL_REO1_RING_ID 0x000002a4
+#define HAL_REO1_RING_MISC 0x000002ac
+#define HAL_REO1_RING_HP_ADDR_LSB 0x000002b0
+#define HAL_REO1_RING_HP_ADDR_MSB 0x000002b4
+#define HAL_REO1_RING_PRODUCER_INT_SETUP 0x000002c0
+#define HAL_REO1_RING_MSI1_BASE_LSB 0x000002e4
+#define HAL_REO1_RING_MSI1_BASE_MSB 0x000002e8
+#define HAL_REO1_RING_MSI1_DATA 0x000002ec
+#define HAL_REO2_RING_BASE_LSB 0x000002f4
+#define HAL_REO1_AGING_THRESH_IX_0 0x00000564
+#define HAL_REO1_AGING_THRESH_IX_1 0x00000568
+#define HAL_REO1_AGING_THRESH_IX_2 0x0000056c
+#define HAL_REO1_AGING_THRESH_IX_3 0x00000570
+
+#define HAL_REO1_RING_MSI1_BASE_LSB_OFFSET \
+ (HAL_REO1_RING_MSI1_BASE_LSB - HAL_REO1_RING_BASE_LSB)
+#define HAL_REO1_RING_MSI1_BASE_MSB_OFFSET \
+ (HAL_REO1_RING_MSI1_BASE_MSB - HAL_REO1_RING_BASE_LSB)
+#define HAL_REO1_RING_MSI1_DATA_OFFSET \
+ (HAL_REO1_RING_MSI1_DATA - HAL_REO1_RING_BASE_LSB)
+#define HAL_REO1_RING_BASE_MSB_OFFSET \
+ (HAL_REO1_RING_BASE_MSB - HAL_REO1_RING_BASE_LSB)
+#define HAL_REO1_RING_ID_OFFSET (HAL_REO1_RING_ID - HAL_REO1_RING_BASE_LSB)
+#define HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET \
+ (HAL_REO1_RING_PRODUCER_INT_SETUP - HAL_REO1_RING_BASE_LSB)
+#define HAL_REO1_RING_HP_ADDR_LSB_OFFSET \
+ (HAL_REO1_RING_HP_ADDR_LSB - HAL_REO1_RING_BASE_LSB)
+#define HAL_REO1_RING_HP_ADDR_MSB_OFFSET \
+ (HAL_REO1_RING_HP_ADDR_MSB - HAL_REO1_RING_BASE_LSB)
+#define HAL_REO1_RING_MISC_OFFSET (HAL_REO1_RING_MISC - HAL_REO1_RING_BASE_LSB)
+
+/* REO2SW(x) R2 ring pointers (head/tail) address */
+#define HAL_REO1_RING_HP 0x00003038
+#define HAL_REO1_RING_TP 0x0000303c
+#define HAL_REO2_RING_HP 0x00003040
+
+#define HAL_REO1_RING_TP_OFFSET (HAL_REO1_RING_TP - HAL_REO1_RING_HP)
+
+/* REO2TCL R0 ring configuration address */
+#define HAL_REO_TCL_RING_BASE_LSB 0x000003fc
+
+/* REO2TCL R2 ring pointer (head/tail) address */
+#define HAL_REO_TCL_RING_HP 0x00003058
+
+/* REO CMD R0 address */
+#define HAL_REO_CMD_RING_BASE_LSB 0x00000194
+
+/* REO CMD R2 address */
+#define HAL_REO_CMD_HP 0x00003020
+
+/* SW2REO R0 address */
+#define HAL_SW2REO_RING_BASE_LSB 0x000001ec
+
+/* SW2REO R2 address */
+#define HAL_SW2REO_RING_HP 0x00003028
+
+/* CE ring R0 address */
+#define HAL_CE_DST_RING_BASE_LSB 0x00000000
+#define HAL_CE_DST_STATUS_RING_BASE_LSB 0x00000058
+#define HAL_CE_DST_RING_CTRL 0x000000b0
+
+/* CE ring R2 address */
+#define HAL_CE_DST_RING_HP 0x00000400
+#define HAL_CE_DST_STATUS_RING_HP 0x00000408
+
+/* REO status address */
+#define HAL_REO_STATUS_RING_BASE_LSB 0x00000504
+#define HAL_REO_STATUS_HP 0x00003070
+
+/* WBM Idle R0 address */
+#define HAL_WBM_IDLE_LINK_RING_BASE_LSB 0x00000860
+#define HAL_WBM_IDLE_LINK_RING_MISC_ADDR 0x00000870
+#define HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR 0x00000048
+#define HAL_WBM_R0_IDLE_LIST_SIZE_ADDR 0x0000004c
+#define HAL_WBM_SCATTERED_RING_BASE_LSB 0x00000058
+#define HAL_WBM_SCATTERED_RING_BASE_MSB 0x0000005c
+#define HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0 0x00000068
+#define HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1 0x0000006c
+#define HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0 0x00000078
+#define HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1 0x0000007c
+#define HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR 0x00000084
+
+/* WBM Idle R2 address */
+#define HAL_WBM_IDLE_LINK_RING_HP 0x000030b0
+
+/* SW2WBM R0 release address */
+#define HAL_WBM_RELEASE_RING_BASE_LSB 0x000001d8
+
+/* SW2WBM R2 release address */
+#define HAL_WBM_RELEASE_RING_HP 0x00003018
+
+/* WBM2SW R0 release address */
+#define HAL_WBM0_RELEASE_RING_BASE_LSB 0x00000910
+#define HAL_WBM1_RELEASE_RING_BASE_LSB 0x00000968
+
+/* WBM2SW R2 release address */
+#define HAL_WBM0_RELEASE_RING_HP 0x000030c0
+#define HAL_WBM1_RELEASE_RING_HP 0x000030c8
+
+/* TCL ring feild mask and offset */
+#define HAL_TCL1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8)
+#define HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0)
+#define HAL_TCL1_RING_ID_ENTRY_SIZE GENMASK(7, 0)
+#define HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE BIT(1)
+#define HAL_TCL1_RING_MISC_MSI_SWAP BIT(3)
+#define HAL_TCL1_RING_MISC_HOST_FW_SWAP BIT(4)
+#define HAL_TCL1_RING_MISC_DATA_TLV_SWAP BIT(5)
+#define HAL_TCL1_RING_MISC_SRNG_ENABLE BIT(6)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD GENMASK(31, 16)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD GENMASK(14, 0)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD GENMASK(15, 0)
+#define HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE BIT(8)
+#define HAL_TCL1_RING_MSI1_BASE_MSB_ADDR GENMASK(7, 0)
+#define HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN BIT(17)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP GENMASK(31, 0)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP0 GENMASK(2, 0)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP1 GENMASK(5, 3)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP2 GENMASK(8, 6)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP3 GENMASK(11, 9)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP4 GENMASK(14, 12)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP5 GENMASK(17, 15)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP6 GENMASK(20, 18)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP7 GENMASK(23, 21)
+
+/* REO ring feild mask and offset */
+#define HAL_REO1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8)
+#define HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0)
+#define HAL_REO1_RING_ID_RING_ID GENMASK(15, 8)
+#define HAL_REO1_RING_ID_ENTRY_SIZE GENMASK(7, 0)
+#define HAL_REO1_RING_MISC_MSI_SWAP BIT(3)
+#define HAL_REO1_RING_MISC_HOST_FW_SWAP BIT(4)
+#define HAL_REO1_RING_MISC_DATA_TLV_SWAP BIT(5)
+#define HAL_REO1_RING_MISC_SRNG_ENABLE BIT(6)
+#define HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD GENMASK(31, 16)
+#define HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD GENMASK(14, 0)
+#define HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE BIT(8)
+#define HAL_REO1_RING_MSI1_BASE_MSB_ADDR GENMASK(7, 0)
+#define HAL_REO1_GEN_ENABLE_FRAG_DST_RING GENMASK(25, 23)
+#define HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE BIT(2)
+#define HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE BIT(3)
+
+/* CE ring bit field mask and shift */
+#define HAL_CE_DST_R0_DEST_CTRL_MAX_LEN GENMASK(15, 0)
+
+#define HAL_ADDR_LSB_REG_MASK 0xffffffff
+
+#define HAL_ADDR_MSB_REG_SHIFT 32
+
+/* WBM ring bit field mask and shift */
+#define HAL_WBM_LINK_DESC_IDLE_LIST_MODE BIT(1)
+#define HAL_WBM_SCATTER_BUFFER_SIZE GENMASK(10, 2)
+#define HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST GENMASK(31, 16)
+#define HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32 GENMASK(7, 0)
+#define HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG GENMASK(31, 8)
+
+#define HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1 GENMASK(20, 8)
+#define HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1 GENMASK(20, 8)
+
+#define BASE_ADDR_MATCH_TAG_VAL 0x5
+
+#define HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_REO_REO2TCL_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_REO_CMD_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_CE_SRC_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_CE_DST_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_RXDMA_RING_MAX_SIZE 0x0000ffff
+
+#define HAL_RX_DESC_SIZE (sizeof(struct hal_rx_desc))
+
+/* Add any other errors here and return them in
+ * ath11k_hal_rx_desc_get_err().
+ */
+
+enum hal_srng_ring_id {
+ HAL_SRNG_RING_ID_REO2SW1 = 0,
+ HAL_SRNG_RING_ID_REO2SW2,
+ HAL_SRNG_RING_ID_REO2SW3,
+ HAL_SRNG_RING_ID_REO2SW4,
+ HAL_SRNG_RING_ID_REO2TCL,
+ HAL_SRNG_RING_ID_SW2REO,
+
+ HAL_SRNG_RING_ID_REO_CMD = 8,
+ HAL_SRNG_RING_ID_REO_STATUS,
+
+ HAL_SRNG_RING_ID_SW2TCL1 = 16,
+ HAL_SRNG_RING_ID_SW2TCL2,
+ HAL_SRNG_RING_ID_SW2TCL3,
+ HAL_SRNG_RING_ID_SW2TCL4,
+
+ HAL_SRNG_RING_ID_SW2TCL_CMD = 24,
+ HAL_SRNG_RING_ID_TCL_STATUS,
+
+ HAL_SRNG_RING_ID_CE0_SRC = 32,
+ HAL_SRNG_RING_ID_CE1_SRC,
+ HAL_SRNG_RING_ID_CE2_SRC,
+ HAL_SRNG_RING_ID_CE3_SRC,
+ HAL_SRNG_RING_ID_CE4_SRC,
+ HAL_SRNG_RING_ID_CE5_SRC,
+ HAL_SRNG_RING_ID_CE6_SRC,
+ HAL_SRNG_RING_ID_CE7_SRC,
+ HAL_SRNG_RING_ID_CE8_SRC,
+ HAL_SRNG_RING_ID_CE9_SRC,
+ HAL_SRNG_RING_ID_CE10_SRC,
+ HAL_SRNG_RING_ID_CE11_SRC,
+
+ HAL_SRNG_RING_ID_CE0_DST = 56,
+ HAL_SRNG_RING_ID_CE1_DST,
+ HAL_SRNG_RING_ID_CE2_DST,
+ HAL_SRNG_RING_ID_CE3_DST,
+ HAL_SRNG_RING_ID_CE4_DST,
+ HAL_SRNG_RING_ID_CE5_DST,
+ HAL_SRNG_RING_ID_CE6_DST,
+ HAL_SRNG_RING_ID_CE7_DST,
+ HAL_SRNG_RING_ID_CE8_DST,
+ HAL_SRNG_RING_ID_CE9_DST,
+ HAL_SRNG_RING_ID_CE10_DST,
+ HAL_SRNG_RING_ID_CE11_DST,
+
+ HAL_SRNG_RING_ID_CE0_DST_STATUS = 80,
+ HAL_SRNG_RING_ID_CE1_DST_STATUS,
+ HAL_SRNG_RING_ID_CE2_DST_STATUS,
+ HAL_SRNG_RING_ID_CE3_DST_STATUS,
+ HAL_SRNG_RING_ID_CE4_DST_STATUS,
+ HAL_SRNG_RING_ID_CE5_DST_STATUS,
+ HAL_SRNG_RING_ID_CE6_DST_STATUS,
+ HAL_SRNG_RING_ID_CE7_DST_STATUS,
+ HAL_SRNG_RING_ID_CE8_DST_STATUS,
+ HAL_SRNG_RING_ID_CE9_DST_STATUS,
+ HAL_SRNG_RING_ID_CE10_DST_STATUS,
+ HAL_SRNG_RING_ID_CE11_DST_STATUS,
+
+ HAL_SRNG_RING_ID_WBM_IDLE_LINK = 104,
+ HAL_SRNG_RING_ID_WBM_SW_RELEASE,
+ HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
+ HAL_SRNG_RING_ID_WBM2SW1_RELEASE,
+ HAL_SRNG_RING_ID_WBM2SW2_RELEASE,
+ HAL_SRNG_RING_ID_WBM2SW3_RELEASE,
+
+ HAL_SRNG_RING_ID_UMAC_ID_END = 127,
+ HAL_SRNG_RING_ID_LMAC1_ID_START,
+
+ HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF = HAL_SRNG_RING_ID_LMAC1_ID_START,
+ HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_BUF,
+ HAL_SRNG_RING_ID_WMAC1_SW2RXDMA2_BUF,
+ HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_STATBUF,
+ HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF,
+ HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
+ HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
+ HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC,
+ HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
+
+ HAL_SRNG_RING_ID_LMAC1_ID_END = 143
+};
+
+/* SRNG registers are split into two groups R0 and R2 */
+#define HAL_SRNG_REG_GRP_R0 0
+#define HAL_SRNG_REG_GRP_R2 1
+#define HAL_SRNG_NUM_REG_GRP 2
+
+#define HAL_SRNG_NUM_LMACS 3
+#define HAL_SRNG_REO_EXCEPTION HAL_SRNG_RING_ID_REO2SW1
+#define HAL_SRNG_RINGS_PER_LMAC (HAL_SRNG_RING_ID_LMAC1_ID_END - \
+ HAL_SRNG_RING_ID_LMAC1_ID_START)
+#define HAL_SRNG_NUM_LMAC_RINGS (HAL_SRNG_NUM_LMACS * HAL_SRNG_RINGS_PER_LMAC)
+#define HAL_SRNG_RING_ID_MAX (HAL_SRNG_RING_ID_UMAC_ID_END + \
+ HAL_SRNG_NUM_LMAC_RINGS)
+
+enum hal_ring_type {
+ HAL_REO_DST,
+ HAL_REO_EXCEPTION,
+ HAL_REO_REINJECT,
+ HAL_REO_CMD,
+ HAL_REO_STATUS,
+ HAL_TCL_DATA,
+ HAL_TCL_CMD,
+ HAL_TCL_STATUS,
+ HAL_CE_SRC,
+ HAL_CE_DST,
+ HAL_CE_DST_STATUS,
+ HAL_WBM_IDLE_LINK,
+ HAL_SW2WBM_RELEASE,
+ HAL_WBM2SW_RELEASE,
+ HAL_RXDMA_BUF,
+ HAL_RXDMA_DST,
+ HAL_RXDMA_MONITOR_BUF,
+ HAL_RXDMA_MONITOR_STATUS,
+ HAL_RXDMA_MONITOR_DST,
+ HAL_RXDMA_MONITOR_DESC,
+ HAL_RXDMA_DIR_BUF,
+ HAL_MAX_RING_TYPES,
+};
+
+#define HAL_RX_MAX_BA_WINDOW 256
+
+#define HAL_DEFAULT_REO_TIMEOUT_USEC (40 * 1000)
+
+/**
+ * enum hal_reo_cmd_type: Enum for REO command type
+ * @CMD_GET_QUEUE_STATS: Get REO queue status/stats
+ * @CMD_FLUSH_QUEUE: Flush all frames in REO queue
+ * @CMD_FLUSH_CACHE: Flush descriptor entries in the cache
+ * @CMD_UNBLOCK_CACHE: Unblock a descriptor's address that was blocked
+ * earlier with a 'REO_FLUSH_CACHE' command
+ * @CMD_FLUSH_TIMEOUT_LIST: Flush buffers/descriptors from timeout list
+ * @CMD_UPDATE_RX_REO_QUEUE: Update REO queue settings
+ */
+enum hal_reo_cmd_type {
+ HAL_REO_CMD_GET_QUEUE_STATS = 0,
+ HAL_REO_CMD_FLUSH_QUEUE = 1,
+ HAL_REO_CMD_FLUSH_CACHE = 2,
+ HAL_REO_CMD_UNBLOCK_CACHE = 3,
+ HAL_REO_CMD_FLUSH_TIMEOUT_LIST = 4,
+ HAL_REO_CMD_UPDATE_RX_QUEUE = 5,
+};
+
+/**
+ * enum hal_reo_cmd_status: Enum for execution status of REO command
+ * @HAL_REO_CMD_SUCCESS: Command has successfully executed
+ * @HAL_REO_CMD_BLOCKED: Command could not be executed as the queue
+ * or cache was blocked
+ * @HAL_REO_CMD_FAILED: Command execution failed, could be due to
+ * invalid queue desc
+ * @HAL_REO_CMD_RESOURCE_BLOCKED:
+ * @HAL_REO_CMD_DRAIN:
+ */
+enum hal_reo_cmd_status {
+ HAL_REO_CMD_SUCCESS = 0,
+ HAL_REO_CMD_BLOCKED = 1,
+ HAL_REO_CMD_FAILED = 2,
+ HAL_REO_CMD_RESOURCE_BLOCKED = 3,
+ HAL_REO_CMD_DRAIN = 0xff,
+};
+
+struct hal_wbm_idle_scatter_list {
+ dma_addr_t paddr;
+ struct hal_wbm_link_desc *vaddr;
+};
+
+struct hal_srng_params {
+ dma_addr_t ring_base_paddr;
+ u32 *ring_base_vaddr;
+ int num_entries;
+ u32 intr_batch_cntr_thres_entries;
+ u32 intr_timer_thres_us;
+ u32 flags;
+ u32 max_buffer_len;
+ u32 low_threshold;
+
+ /* Add more params as needed */
+};
+
+enum hal_srng_dir {
+ HAL_SRNG_DIR_SRC,
+ HAL_SRNG_DIR_DST
+};
+
+/* srng flags */
+#define HAL_SRNG_FLAGS_MSI_SWAP 0x00000008
+#define HAL_SRNG_FLAGS_RING_PTR_SWAP 0x00000010
+#define HAL_SRNG_FLAGS_DATA_TLV_SWAP 0x00000020
+#define HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN 0x00010000
+#define HAL_SRNG_FLAGS_MSI_INTR 0x00020000
+#define HAL_SRNG_FLAGS_LMAC_RING 0x80000000
+
+#define HAL_SRNG_TLV_HDR_TAG GENMASK(9, 1)
+#define HAL_SRNG_TLV_HDR_LEN GENMASK(25, 10)
+
+/* Common SRNG ring structure for source and destination rings */
+struct hal_srng {
+ /* Unique SRNG ring ID */
+ u8 ring_id;
+
+ /* Ring initialization done */
+ u8 initialized;
+
+ /* Interrupt/MSI value assigned to this ring */
+ int irq;
+
+ /* Physical base address of the ring */
+ dma_addr_t ring_base_paddr;
+
+ /* Virtual base address of the ring */
+ u32 *ring_base_vaddr;
+
+ /* Number of entries in ring */
+ u32 num_entries;
+
+ /* Ring size */
+ u32 ring_size;
+
+ /* Ring size mask */
+ u32 ring_size_mask;
+
+ /* Size of ring entry */
+ u32 entry_size;
+
+ /* Interrupt timer threshold - in micro seconds */
+ u32 intr_timer_thres_us;
+
+ /* Interrupt batch counter threshold - in number of ring entries */
+ u32 intr_batch_cntr_thres_entries;
+
+ /* MSI Address */
+ dma_addr_t msi_addr;
+
+ /* MSI data */
+ u32 msi_data;
+
+ /* Misc flags */
+ u32 flags;
+
+ /* Lock for serializing ring index updates */
+ spinlock_t lock;
+
+ /* Start offset of SRNG register groups for this ring
+ * TBD: See if this is required - register address can be derived
+ * from ring ID
+ */
+ u32 hwreg_base[HAL_SRNG_NUM_REG_GRP];
+
+ /* Source or Destination ring */
+ enum hal_srng_dir ring_dir;
+
+ union {
+ struct {
+ /* SW tail pointer */
+ u32 tp;
+
+ /* Shadow head pointer location to be updated by HW */
+ volatile u32 *hp_addr;
+
+ /* Cached head pointer */
+ u32 cached_hp;
+
+ /* Tail pointer location to be updated by SW - This
+ * will be a register address and need not be
+ * accessed through SW structure
+ */
+ u32 *tp_addr;
+
+ /* Current SW loop cnt */
+ u32 loop_cnt;
+
+ /* max transfer size */
+ u16 max_buffer_length;
+ } dst_ring;
+
+ struct {
+ /* SW head pointer */
+ u32 hp;
+
+ /* SW reap head pointer */
+ u32 reap_hp;
+
+ /* Shadow tail pointer location to be updated by HW */
+ u32 *tp_addr;
+
+ /* Cached tail pointer */
+ u32 cached_tp;
+
+ /* Head pointer location to be updated by SW - This
+ * will be a register address and need not be accessed
+ * through SW structure
+ */
+ u32 *hp_addr;
+
+ /* Low threshold - in number of ring entries */
+ u32 low_threshold;
+ } src_ring;
+ } u;
+};
+
+/* Interrupt mitigation - Batch threshold in terms of numer of frames */
+#define HAL_SRNG_INT_BATCH_THRESHOLD_TX 256
+#define HAL_SRNG_INT_BATCH_THRESHOLD_RX 128
+#define HAL_SRNG_INT_BATCH_THRESHOLD_OTHER 1
+
+/* Interrupt mitigation - timer threshold in us */
+#define HAL_SRNG_INT_TIMER_THRESHOLD_TX 1000
+#define HAL_SRNG_INT_TIMER_THRESHOLD_RX 500
+#define HAL_SRNG_INT_TIMER_THRESHOLD_OTHER 1000
+
+/* HW SRNG configuration table */
+struct hal_srng_config {
+ int start_ring_id;
+ u16 max_rings;
+ u16 entry_size;
+ u32 reg_start[HAL_SRNG_NUM_REG_GRP];
+ u16 reg_size[HAL_SRNG_NUM_REG_GRP];
+ u8 lmac_ring;
+ enum hal_srng_dir ring_dir;
+ u32 max_size;
+};
+
+/**
+ * enum hal_rx_buf_return_buf_manager
+ *
+ * @HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST: Buffer returned to WBM idle buffer list
+ * @HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST: Descriptor returned to WBM idle
+ * descriptor list.
+ * @HAL_RX_BUF_RBM_FW_BM: Buffer returned to FW
+ * @HAL_RX_BUF_RBM_SW0_BM: For Tx completion -- returned to host
+ * @HAL_RX_BUF_RBM_SW1_BM: For Tx completion -- returned to host
+ * @HAL_RX_BUF_RBM_SW2_BM: For Tx completion -- returned to host
+ * @HAL_RX_BUF_RBM_SW3_BM: For Rx release -- returned to host
+ */
+
+enum hal_rx_buf_return_buf_manager {
+ HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST,
+ HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST,
+ HAL_RX_BUF_RBM_FW_BM,
+ HAL_RX_BUF_RBM_SW0_BM,
+ HAL_RX_BUF_RBM_SW1_BM,
+ HAL_RX_BUF_RBM_SW2_BM,
+ HAL_RX_BUF_RBM_SW3_BM,
+};
+
+#define HAL_SRNG_DESC_LOOP_CNT 0xf0000000
+
+#define HAL_REO_CMD_FLG_NEED_STATUS BIT(0)
+#define HAL_REO_CMD_FLG_STATS_CLEAR BIT(1)
+#define HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER BIT(2)
+#define HAL_REO_CMD_FLG_FLUSH_RELEASE_BLOCKING BIT(3)
+#define HAL_REO_CMD_FLG_FLUSH_NO_INVAL BIT(4)
+#define HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS BIT(5)
+#define HAL_REO_CMD_FLG_FLUSH_ALL BIT(6)
+#define HAL_REO_CMD_FLG_UNBLK_RESOURCE BIT(7)
+#define HAL_REO_CMD_FLG_UNBLK_CACHE BIT(8)
+
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* feilds */
+#define HAL_REO_CMD_UPD0_RX_QUEUE_NUM BIT(8)
+#define HAL_REO_CMD_UPD0_VLD BIT(9)
+#define HAL_REO_CMD_UPD0_ALDC BIT(10)
+#define HAL_REO_CMD_UPD0_DIS_DUP_DETECTION BIT(11)
+#define HAL_REO_CMD_UPD0_SOFT_REORDER_EN BIT(12)
+#define HAL_REO_CMD_UPD0_AC BIT(13)
+#define HAL_REO_CMD_UPD0_BAR BIT(14)
+#define HAL_REO_CMD_UPD0_RETRY BIT(15)
+#define HAL_REO_CMD_UPD0_CHECK_2K_MODE BIT(16)
+#define HAL_REO_CMD_UPD0_OOR_MODE BIT(17)
+#define HAL_REO_CMD_UPD0_BA_WINDOW_SIZE BIT(18)
+#define HAL_REO_CMD_UPD0_PN_CHECK BIT(19)
+#define HAL_REO_CMD_UPD0_EVEN_PN BIT(20)
+#define HAL_REO_CMD_UPD0_UNEVEN_PN BIT(21)
+#define HAL_REO_CMD_UPD0_PN_HANDLE_ENABLE BIT(22)
+#define HAL_REO_CMD_UPD0_PN_SIZE BIT(23)
+#define HAL_REO_CMD_UPD0_IGNORE_AMPDU_FLG BIT(24)
+#define HAL_REO_CMD_UPD0_SVLD BIT(25)
+#define HAL_REO_CMD_UPD0_SSN BIT(26)
+#define HAL_REO_CMD_UPD0_SEQ_2K_ERR BIT(27)
+#define HAL_REO_CMD_UPD0_PN_ERR BIT(28)
+#define HAL_REO_CMD_UPD0_PN_VALID BIT(29)
+#define HAL_REO_CMD_UPD0_PN BIT(30)
+
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO1_* feilds */
+#define HAL_REO_CMD_UPD1_VLD BIT(16)
+#define HAL_REO_CMD_UPD1_ALDC GENMASK(18, 17)
+#define HAL_REO_CMD_UPD1_DIS_DUP_DETECTION BIT(19)
+#define HAL_REO_CMD_UPD1_SOFT_REORDER_EN BIT(20)
+#define HAL_REO_CMD_UPD1_AC GENMASK(22, 21)
+#define HAL_REO_CMD_UPD1_BAR BIT(23)
+#define HAL_REO_CMD_UPD1_RETRY BIT(24)
+#define HAL_REO_CMD_UPD1_CHECK_2K_MODE BIT(25)
+#define HAL_REO_CMD_UPD1_OOR_MODE BIT(26)
+#define HAL_REO_CMD_UPD1_PN_CHECK BIT(27)
+#define HAL_REO_CMD_UPD1_EVEN_PN BIT(28)
+#define HAL_REO_CMD_UPD1_UNEVEN_PN BIT(29)
+#define HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE BIT(30)
+#define HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG BIT(31)
+
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO2_* feilds */
+#define HAL_REO_CMD_UPD2_SVLD BIT(10)
+#define HAL_REO_CMD_UPD2_SSN GENMASK(22, 11)
+#define HAL_REO_CMD_UPD2_SEQ_2K_ERR BIT(23)
+#define HAL_REO_CMD_UPD2_PN_ERR BIT(24)
+
+#define HAL_REO_DEST_RING_CTRL_HASH_RING_MAP GENMASK(31, 8)
+
+struct ath11k_hal_reo_cmd {
+ u32 addr_lo;
+ u32 flag;
+ u32 upd0;
+ u32 upd1;
+ u32 upd2;
+ u32 pn[4];
+ u16 rx_queue_num;
+ u16 min_rel;
+ u16 min_fwd;
+ u8 addr_hi;
+ u8 ac_list;
+ u8 blocking_idx;
+ u16 ba_window_size;
+ u8 pn_size;
+};
+
+enum hal_pn_type {
+ HAL_PN_TYPE_NONE,
+ HAL_PN_TYPE_WPA,
+ HAL_PN_TYPE_WAPI_EVEN,
+ HAL_PN_TYPE_WAPI_UNEVEN,
+};
+
+enum hal_ce_desc {
+ HAL_CE_DESC_SRC,
+ HAL_CE_DESC_DST,
+ HAL_CE_DESC_DST_STATUS,
+};
+
+struct hal_reo_status_header {
+ u16 cmd_num;
+ enum hal_reo_cmd_status cmd_status;
+ u16 cmd_exe_time;
+ u32 timestamp;
+};
+
+struct hal_reo_status_queue_stats {
+ u16 ssn;
+ u16 curr_idx;
+ u32 pn[4];
+ u32 last_rx_queue_ts;
+ u32 last_rx_dequeue_ts;
+ u32 rx_bitmap[8]; /* Bitmap from 0-255 */
+ u32 curr_mpdu_cnt;
+ u32 curr_msdu_cnt;
+ u16 fwd_due_to_bar_cnt;
+ u16 dup_cnt;
+ u32 frames_in_order_cnt;
+ u32 num_mpdu_processed_cnt;
+ u32 num_msdu_processed_cnt;
+ u32 total_num_processed_byte_cnt;
+ u32 late_rx_mpdu_cnt;
+ u32 reorder_hole_cnt;
+ u8 timeout_cnt;
+ u8 bar_rx_cnt;
+ u8 num_window_2k_jump_cnt;
+};
+
+struct hal_reo_status_flush_queue {
+ bool err_detected;
+};
+
+enum hal_reo_status_flush_cache_err_code {
+ HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_SUCCESS,
+ HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_IN_USE,
+ HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_NOT_FOUND,
+};
+
+struct hal_reo_status_flush_cache {
+ bool err_detected;
+ enum hal_reo_status_flush_cache_err_code err_code;
+ bool cache_controller_flush_status_hit;
+ u8 cache_controller_flush_status_desc_type;
+ u8 cache_controller_flush_status_client_id;
+ u8 cache_controller_flush_status_err;
+ u8 cache_controller_flush_status_cnt;
+};
+
+enum hal_reo_status_unblock_cache_type {
+ HAL_REO_STATUS_UNBLOCK_BLOCKING_RESOURCE,
+ HAL_REO_STATUS_UNBLOCK_ENTIRE_CACHE_USAGE,
+};
+
+struct hal_reo_status_unblock_cache {
+ bool err_detected;
+ enum hal_reo_status_unblock_cache_type unblock_type;
+};
+
+struct hal_reo_status_flush_timeout_list {
+ bool err_detected;
+ bool list_empty;
+ u16 release_desc_cnt;
+ u16 fwd_buf_cnt;
+};
+
+enum hal_reo_threshold_idx {
+ HAL_REO_THRESHOLD_IDX_DESC_COUNTER0,
+ HAL_REO_THRESHOLD_IDX_DESC_COUNTER1,
+ HAL_REO_THRESHOLD_IDX_DESC_COUNTER2,
+ HAL_REO_THRESHOLD_IDX_DESC_COUNTER_SUM,
+};
+
+struct hal_reo_status_desc_thresh_reached {
+ enum hal_reo_threshold_idx threshold_idx;
+ u32 link_desc_counter0;
+ u32 link_desc_counter1;
+ u32 link_desc_counter2;
+ u32 link_desc_counter_sum;
+};
+
+struct hal_reo_status {
+ struct hal_reo_status_header uniform_hdr;
+ u8 loop_cnt;
+ union {
+ struct hal_reo_status_queue_stats queue_stats;
+ struct hal_reo_status_flush_queue flush_queue;
+ struct hal_reo_status_flush_cache flush_cache;
+ struct hal_reo_status_unblock_cache unblock_cache;
+ struct hal_reo_status_flush_timeout_list timeout_list;
+ struct hal_reo_status_desc_thresh_reached desc_thresh_reached;
+ } u;
+};
+
+/**
+ * HAL context to be used to access SRNG APIs (currently used by data path
+ * and transport (CE) modules)
+ */
+struct ath11k_hal {
+ /* HAL internal state for all SRNG rings.
+ */
+ struct hal_srng srng_list[HAL_SRNG_RING_ID_MAX];
+
+ /* SRNG configuration table */
+ const struct hal_srng_config *srng_config;
+
+ /* Remote pointer memory for HW/FW updates */
+ struct {
+ u32 *vaddr;
+ dma_addr_t paddr;
+ } rdp;
+
+ /* Shared memory for ring pointer updates from host to FW */
+ struct {
+ u32 *vaddr;
+ dma_addr_t paddr;
+ } wrp;
+
+ /* Available REO blocking resources bitmap */
+ u8 avail_blk_resource;
+
+ u8 current_blk_index;
+
+ /* shadow register configuration */
+ u32 shadow_reg_addr[SHADOW_NUM_REGISTERS];
+ int num_shadow_reg_configured;
+};
+
+u32 ath11k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid);
+void ath11k_hal_reo_qdesc_setup(void *vaddr, int tid, u32 ba_window_size,
+ u32 start_seqtype);
+void ath11k_hal_reo_init_cmd_ring(struct ath11k_base *ab,
+ struct hal_srng *srng);
+void ath11k_hal_reo_hw_setup(struct ath11k_base *ab);
+void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab,
+ struct hal_wbm_idle_scatter_list *sbuf,
+ u32 nsbufs, u32 tot_link_desc,
+ u32 end_offset);
+
+dma_addr_t ath11k_hal_srng_get_tp_addr(struct ath11k_base *ab,
+ struct hal_srng *srng);
+dma_addr_t ath11k_hal_srng_get_hp_addr(struct ath11k_base *ab,
+ struct hal_srng *srng);
+void ath11k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
+ dma_addr_t paddr);
+u32 ath11k_hal_ce_get_desc_size(enum hal_ce_desc type);
+void ath11k_hal_ce_src_set_desc(void *buf, dma_addr_t paddr, u32 len, u32 id,
+ u8 byte_swap_data);
+void ath11k_hal_ce_dst_set_desc(void *buf, dma_addr_t paddr);
+u32 ath11k_hal_ce_dst_status_get_length(void *buf);
+int ath11k_hal_srng_get_entrysize(u32 ring_type);
+int ath11k_hal_srng_get_max_entries(u32 ring_type);
+void ath11k_hal_srng_get_params(struct ath11k_base *ab, struct hal_srng *srng,
+ struct hal_srng_params *params);
+u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab,
+ struct hal_srng *srng);
+u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng);
+int ath11k_hal_srng_dst_num_free(struct ath11k_base *ab, struct hal_srng *srng,
+ bool sync_hw_ptr);
+u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng);
+u32 *ath11k_hal_srng_src_get_next_reaped(struct ath11k_base *ab,
+ struct hal_srng *srng);
+u32 *ath11k_hal_srng_src_reap_next(struct ath11k_base *ab,
+ struct hal_srng *srng);
+u32 *ath11k_hal_srng_src_get_next_entry(struct ath11k_base *ab,
+ struct hal_srng *srng);
+int ath11k_hal_srng_src_num_free(struct ath11k_base *ab, struct hal_srng *srng,
+ bool sync_hw_ptr);
+void ath11k_hal_srng_access_begin(struct ath11k_base *ab,
+ struct hal_srng *srng);
+void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng);
+int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
+ int ring_num, int mac_id,
+ struct hal_srng_params *params);
+int ath11k_hal_srng_init(struct ath11k_base *ath11k);
+void ath11k_hal_srng_deinit(struct ath11k_base *ath11k);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/hal_desc.h b/drivers/net/wireless/ath/ath11k/hal_desc.h
new file mode 100644
index 000000000000..5e200380cca4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hal_desc.h
@@ -0,0 +1,2468 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+#ifndef ATH11K_HAL_DESC_H
+#define ATH11K_HAL_DESC_H
+
+#define BUFFER_ADDR_INFO0_ADDR GENMASK(31, 0)
+
+#define BUFFER_ADDR_INFO1_ADDR GENMASK(7, 0)
+#define BUFFER_ADDR_INFO1_RET_BUF_MGR GENMASK(10, 8)
+#define BUFFER_ADDR_INFO1_SW_COOKIE GENMASK(31, 11)
+
+struct ath11k_buffer_addr {
+ u32 info0;
+ u32 info1;
+} __packed;
+
+/* ath11k_buffer_addr
+ *
+ * info0
+ * Address (lower 32 bits) of the msdu buffer or msdu extension
+ * descriptor or Link descriptor
+ *
+ * addr
+ * Address (upper 8 bits) of the msdu buffer or msdu extension
+ * descriptor or Link descriptor
+ *
+ * return_buffer_manager (RBM)
+ * Consumer: WBM
+ * Producer: SW/FW
+ * Indicates to which buffer manager the buffer or MSDU_EXTENSION
+ * descriptor or link descriptor that is being pointed to shall be
+ * returned after the frame has been processed. It is used by WBM
+ * for routing purposes.
+ *
+ * Values are defined in enum %HAL_RX_BUF_RBM_
+ *
+ * sw_buffer_cookie
+ * Cookie field exclusively used by SW. HW ignores the contents,
+ * accept that it passes the programmed value on to other
+ * descriptors together with the physical address.
+ *
+ * Field can be used by SW to for example associate the buffers
+ * physical address with the virtual address.
+ */
+
+enum hal_tlv_tag {
+ HAL_MACTX_CBF_START = 0 /* 0x0 */,
+ HAL_PHYRX_DATA = 1 /* 0x1 */,
+ HAL_PHYRX_CBF_DATA_RESP = 2 /* 0x2 */,
+ HAL_PHYRX_ABORT_REQUEST = 3 /* 0x3 */,
+ HAL_PHYRX_USER_ABORT_NOTIFICATION = 4 /* 0x4 */,
+ HAL_MACTX_DATA_RESP = 5 /* 0x5 */,
+ HAL_MACTX_CBF_DATA = 6 /* 0x6 */,
+ HAL_MACTX_CBF_DONE = 7 /* 0x7 */,
+ HAL_MACRX_CBF_READ_REQUEST = 8 /* 0x8 */,
+ HAL_MACRX_CBF_DATA_REQUEST = 9 /* 0x9 */,
+ HAL_MACRX_EXPECT_NDP_RECEPTION = 10 /* 0xa */,
+ HAL_MACRX_FREEZE_CAPTURE_CHANNEL = 11 /* 0xb */,
+ HAL_MACRX_NDP_TIMEOUT = 12 /* 0xc */,
+ HAL_MACRX_ABORT_ACK = 13 /* 0xd */,
+ HAL_MACRX_REQ_IMPLICIT_FB = 14 /* 0xe */,
+ HAL_MACRX_CHAIN_MASK = 15 /* 0xf */,
+ HAL_MACRX_NAP_USER = 16 /* 0x10 */,
+ HAL_MACRX_ABORT_REQUEST = 17 /* 0x11 */,
+ HAL_PHYTX_OTHER_TRANSMIT_INFO16 = 18 /* 0x12 */,
+ HAL_PHYTX_ABORT_ACK = 19 /* 0x13 */,
+ HAL_PHYTX_ABORT_REQUEST = 20 /* 0x14 */,
+ HAL_PHYTX_PKT_END = 21 /* 0x15 */,
+ HAL_PHYTX_PPDU_HEADER_INFO_REQUEST = 22 /* 0x16 */,
+ HAL_PHYTX_REQUEST_CTRL_INFO = 23 /* 0x17 */,
+ HAL_PHYTX_DATA_REQUEST = 24 /* 0x18 */,
+ HAL_PHYTX_BF_CV_LOADING_DONE = 25 /* 0x19 */,
+ HAL_PHYTX_NAP_ACK = 26 /* 0x1a */,
+ HAL_PHYTX_NAP_DONE = 27 /* 0x1b */,
+ HAL_PHYTX_OFF_ACK = 28 /* 0x1c */,
+ HAL_PHYTX_ON_ACK = 29 /* 0x1d */,
+ HAL_PHYTX_SYNTH_OFF_ACK = 30 /* 0x1e */,
+ HAL_PHYTX_DEBUG16 = 31 /* 0x1f */,
+ HAL_MACTX_ABORT_REQUEST = 32 /* 0x20 */,
+ HAL_MACTX_ABORT_ACK = 33 /* 0x21 */,
+ HAL_MACTX_PKT_END = 34 /* 0x22 */,
+ HAL_MACTX_PRE_PHY_DESC = 35 /* 0x23 */,
+ HAL_MACTX_BF_PARAMS_COMMON = 36 /* 0x24 */,
+ HAL_MACTX_BF_PARAMS_PER_USER = 37 /* 0x25 */,
+ HAL_MACTX_PREFETCH_CV = 38 /* 0x26 */,
+ HAL_MACTX_USER_DESC_COMMON = 39 /* 0x27 */,
+ HAL_MACTX_USER_DESC_PER_USER = 40 /* 0x28 */,
+ HAL_EXAMPLE_USER_TLV_16 = 41 /* 0x29 */,
+ HAL_EXAMPLE_TLV_16 = 42 /* 0x2a */,
+ HAL_MACTX_PHY_OFF = 43 /* 0x2b */,
+ HAL_MACTX_PHY_ON = 44 /* 0x2c */,
+ HAL_MACTX_SYNTH_OFF = 45 /* 0x2d */,
+ HAL_MACTX_EXPECT_CBF_COMMON = 46 /* 0x2e */,
+ HAL_MACTX_EXPECT_CBF_PER_USER = 47 /* 0x2f */,
+ HAL_MACTX_PHY_DESC = 48 /* 0x30 */,
+ HAL_MACTX_L_SIG_A = 49 /* 0x31 */,
+ HAL_MACTX_L_SIG_B = 50 /* 0x32 */,
+ HAL_MACTX_HT_SIG = 51 /* 0x33 */,
+ HAL_MACTX_VHT_SIG_A = 52 /* 0x34 */,
+ HAL_MACTX_VHT_SIG_B_SU20 = 53 /* 0x35 */,
+ HAL_MACTX_VHT_SIG_B_SU40 = 54 /* 0x36 */,
+ HAL_MACTX_VHT_SIG_B_SU80 = 55 /* 0x37 */,
+ HAL_MACTX_VHT_SIG_B_SU160 = 56 /* 0x38 */,
+ HAL_MACTX_VHT_SIG_B_MU20 = 57 /* 0x39 */,
+ HAL_MACTX_VHT_SIG_B_MU40 = 58 /* 0x3a */,
+ HAL_MACTX_VHT_SIG_B_MU80 = 59 /* 0x3b */,
+ HAL_MACTX_VHT_SIG_B_MU160 = 60 /* 0x3c */,
+ HAL_MACTX_SERVICE = 61 /* 0x3d */,
+ HAL_MACTX_HE_SIG_A_SU = 62 /* 0x3e */,
+ HAL_MACTX_HE_SIG_A_MU_DL = 63 /* 0x3f */,
+ HAL_MACTX_HE_SIG_A_MU_UL = 64 /* 0x40 */,
+ HAL_MACTX_HE_SIG_B1_MU = 65 /* 0x41 */,
+ HAL_MACTX_HE_SIG_B2_MU = 66 /* 0x42 */,
+ HAL_MACTX_HE_SIG_B2_OFDMA = 67 /* 0x43 */,
+ HAL_MACTX_DELETE_CV = 68 /* 0x44 */,
+ HAL_MACTX_MU_UPLINK_COMMON = 69 /* 0x45 */,
+ HAL_MACTX_MU_UPLINK_USER_SETUP = 70 /* 0x46 */,
+ HAL_MACTX_OTHER_TRANSMIT_INFO = 71 /* 0x47 */,
+ HAL_MACTX_PHY_NAP = 72 /* 0x48 */,
+ HAL_MACTX_DEBUG = 73 /* 0x49 */,
+ HAL_PHYRX_ABORT_ACK = 74 /* 0x4a */,
+ HAL_PHYRX_GENERATED_CBF_DETAILS = 75 /* 0x4b */,
+ HAL_PHYRX_RSSI_LEGACY = 76 /* 0x4c */,
+ HAL_PHYRX_RSSI_HT = 77 /* 0x4d */,
+ HAL_PHYRX_USER_INFO = 78 /* 0x4e */,
+ HAL_PHYRX_PKT_END = 79 /* 0x4f */,
+ HAL_PHYRX_DEBUG = 80 /* 0x50 */,
+ HAL_PHYRX_CBF_TRANSFER_DONE = 81 /* 0x51 */,
+ HAL_PHYRX_CBF_TRANSFER_ABORT = 82 /* 0x52 */,
+ HAL_PHYRX_L_SIG_A = 83 /* 0x53 */,
+ HAL_PHYRX_L_SIG_B = 84 /* 0x54 */,
+ HAL_PHYRX_HT_SIG = 85 /* 0x55 */,
+ HAL_PHYRX_VHT_SIG_A = 86 /* 0x56 */,
+ HAL_PHYRX_VHT_SIG_B_SU20 = 87 /* 0x57 */,
+ HAL_PHYRX_VHT_SIG_B_SU40 = 88 /* 0x58 */,
+ HAL_PHYRX_VHT_SIG_B_SU80 = 89 /* 0x59 */,
+ HAL_PHYRX_VHT_SIG_B_SU160 = 90 /* 0x5a */,
+ HAL_PHYRX_VHT_SIG_B_MU20 = 91 /* 0x5b */,
+ HAL_PHYRX_VHT_SIG_B_MU40 = 92 /* 0x5c */,
+ HAL_PHYRX_VHT_SIG_B_MU80 = 93 /* 0x5d */,
+ HAL_PHYRX_VHT_SIG_B_MU160 = 94 /* 0x5e */,
+ HAL_PHYRX_HE_SIG_A_SU = 95 /* 0x5f */,
+ HAL_PHYRX_HE_SIG_A_MU_DL = 96 /* 0x60 */,
+ HAL_PHYRX_HE_SIG_A_MU_UL = 97 /* 0x61 */,
+ HAL_PHYRX_HE_SIG_B1_MU = 98 /* 0x62 */,
+ HAL_PHYRX_HE_SIG_B2_MU = 99 /* 0x63 */,
+ HAL_PHYRX_HE_SIG_B2_OFDMA = 100 /* 0x64 */,
+ HAL_PHYRX_OTHER_RECEIVE_INFO = 101 /* 0x65 */,
+ HAL_PHYRX_COMMON_USER_INFO = 102 /* 0x66 */,
+ HAL_PHYRX_DATA_DONE = 103 /* 0x67 */,
+ HAL_RECEIVE_RSSI_INFO = 104 /* 0x68 */,
+ HAL_RECEIVE_USER_INFO = 105 /* 0x69 */,
+ HAL_MIMO_CONTROL_INFO = 106 /* 0x6a */,
+ HAL_RX_LOCATION_INFO = 107 /* 0x6b */,
+ HAL_COEX_TX_REQ = 108 /* 0x6c */,
+ HAL_DUMMY = 109 /* 0x6d */,
+ HAL_RX_TIMING_OFFSET_INFO = 110 /* 0x6e */,
+ HAL_EXAMPLE_TLV_32_NAME = 111 /* 0x6f */,
+ HAL_MPDU_LIMIT = 112 /* 0x70 */,
+ HAL_NA_LENGTH_END = 113 /* 0x71 */,
+ HAL_OLE_BUF_STATUS = 114 /* 0x72 */,
+ HAL_PCU_PPDU_SETUP_DONE = 115 /* 0x73 */,
+ HAL_PCU_PPDU_SETUP_END = 116 /* 0x74 */,
+ HAL_PCU_PPDU_SETUP_INIT = 117 /* 0x75 */,
+ HAL_PCU_PPDU_SETUP_START = 118 /* 0x76 */,
+ HAL_PDG_FES_SETUP = 119 /* 0x77 */,
+ HAL_PDG_RESPONSE = 120 /* 0x78 */,
+ HAL_PDG_TX_REQ = 121 /* 0x79 */,
+ HAL_SCH_WAIT_INSTR = 122 /* 0x7a */,
+ HAL_SCHEDULER_TLV = 123 /* 0x7b */,
+ HAL_TQM_FLOW_EMPTY_STATUS = 124 /* 0x7c */,
+ HAL_TQM_FLOW_NOT_EMPTY_STATUS = 125 /* 0x7d */,
+ HAL_TQM_GEN_MPDU_LENGTH_LIST = 126 /* 0x7e */,
+ HAL_TQM_GEN_MPDU_LENGTH_LIST_STATUS = 127 /* 0x7f */,
+ HAL_TQM_GEN_MPDUS = 128 /* 0x80 */,
+ HAL_TQM_GEN_MPDUS_STATUS = 129 /* 0x81 */,
+ HAL_TQM_REMOVE_MPDU = 130 /* 0x82 */,
+ HAL_TQM_REMOVE_MPDU_STATUS = 131 /* 0x83 */,
+ HAL_TQM_REMOVE_MSDU = 132 /* 0x84 */,
+ HAL_TQM_REMOVE_MSDU_STATUS = 133 /* 0x85 */,
+ HAL_TQM_UPDATE_TX_MPDU_COUNT = 134 /* 0x86 */,
+ HAL_TQM_WRITE_CMD = 135 /* 0x87 */,
+ HAL_OFDMA_TRIGGER_DETAILS = 136 /* 0x88 */,
+ HAL_TX_DATA = 137 /* 0x89 */,
+ HAL_TX_FES_SETUP = 138 /* 0x8a */,
+ HAL_RX_PACKET = 139 /* 0x8b */,
+ HAL_EXPECTED_RESPONSE = 140 /* 0x8c */,
+ HAL_TX_MPDU_END = 141 /* 0x8d */,
+ HAL_TX_MPDU_START = 142 /* 0x8e */,
+ HAL_TX_MSDU_END = 143 /* 0x8f */,
+ HAL_TX_MSDU_START = 144 /* 0x90 */,
+ HAL_TX_SW_MODE_SETUP = 145 /* 0x91 */,
+ HAL_TXPCU_BUFFER_STATUS = 146 /* 0x92 */,
+ HAL_TXPCU_USER_BUFFER_STATUS = 147 /* 0x93 */,
+ HAL_DATA_TO_TIME_CONFIG = 148 /* 0x94 */,
+ HAL_EXAMPLE_USER_TLV_32 = 149 /* 0x95 */,
+ HAL_MPDU_INFO = 150 /* 0x96 */,
+ HAL_PDG_USER_SETUP = 151 /* 0x97 */,
+ HAL_TX_11AH_SETUP = 152 /* 0x98 */,
+ HAL_REO_UPDATE_RX_REO_QUEUE_STATUS = 153 /* 0x99 */,
+ HAL_TX_PEER_ENTRY = 154 /* 0x9a */,
+ HAL_TX_RAW_OR_NATIVE_FRAME_SETUP = 155 /* 0x9b */,
+ HAL_EXAMPLE_STRUCT_NAME = 156 /* 0x9c */,
+ HAL_PCU_PPDU_SETUP_END_INFO = 157 /* 0x9d */,
+ HAL_PPDU_RATE_SETTING = 158 /* 0x9e */,
+ HAL_PROT_RATE_SETTING = 159 /* 0x9f */,
+ HAL_RX_MPDU_DETAILS = 160 /* 0xa0 */,
+ HAL_EXAMPLE_USER_TLV_42 = 161 /* 0xa1 */,
+ HAL_RX_MSDU_LINK = 162 /* 0xa2 */,
+ HAL_RX_REO_QUEUE = 163 /* 0xa3 */,
+ HAL_ADDR_SEARCH_ENTRY = 164 /* 0xa4 */,
+ HAL_SCHEDULER_CMD = 165 /* 0xa5 */,
+ HAL_TX_FLUSH = 166 /* 0xa6 */,
+ HAL_TQM_ENTRANCE_RING = 167 /* 0xa7 */,
+ HAL_TX_DATA_WORD = 168 /* 0xa8 */,
+ HAL_TX_MPDU_DETAILS = 169 /* 0xa9 */,
+ HAL_TX_MPDU_LINK = 170 /* 0xaa */,
+ HAL_TX_MPDU_LINK_PTR = 171 /* 0xab */,
+ HAL_TX_MPDU_QUEUE_HEAD = 172 /* 0xac */,
+ HAL_TX_MPDU_QUEUE_EXT = 173 /* 0xad */,
+ HAL_TX_MPDU_QUEUE_EXT_PTR = 174 /* 0xae */,
+ HAL_TX_MSDU_DETAILS = 175 /* 0xaf */,
+ HAL_TX_MSDU_EXTENSION = 176 /* 0xb0 */,
+ HAL_TX_MSDU_FLOW = 177 /* 0xb1 */,
+ HAL_TX_MSDU_LINK = 178 /* 0xb2 */,
+ HAL_TX_MSDU_LINK_ENTRY_PTR = 179 /* 0xb3 */,
+ HAL_RESPONSE_RATE_SETTING = 180 /* 0xb4 */,
+ HAL_TXPCU_BUFFER_BASICS = 181 /* 0xb5 */,
+ HAL_UNIFORM_DESCRIPTOR_HEADER = 182 /* 0xb6 */,
+ HAL_UNIFORM_TQM_CMD_HEADER = 183 /* 0xb7 */,
+ HAL_UNIFORM_TQM_STATUS_HEADER = 184 /* 0xb8 */,
+ HAL_USER_RATE_SETTING = 185 /* 0xb9 */,
+ HAL_WBM_BUFFER_RING = 186 /* 0xba */,
+ HAL_WBM_LINK_DESCRIPTOR_RING = 187 /* 0xbb */,
+ HAL_WBM_RELEASE_RING = 188 /* 0xbc */,
+ HAL_TX_FLUSH_REQ = 189 /* 0xbd */,
+ HAL_RX_MSDU_DETAILS = 190 /* 0xbe */,
+ HAL_TQM_WRITE_CMD_STATUS = 191 /* 0xbf */,
+ HAL_TQM_GET_MPDU_QUEUE_STATS = 192 /* 0xc0 */,
+ HAL_TQM_GET_MSDU_FLOW_STATS = 193 /* 0xc1 */,
+ HAL_EXAMPLE_USER_CTLV_32 = 194 /* 0xc2 */,
+ HAL_TX_FES_STATUS_START = 195 /* 0xc3 */,
+ HAL_TX_FES_STATUS_USER_PPDU = 196 /* 0xc4 */,
+ HAL_TX_FES_STATUS_USER_RESPONSE = 197 /* 0xc5 */,
+ HAL_TX_FES_STATUS_END = 198 /* 0xc6 */,
+ HAL_RX_TRIG_INFO = 199 /* 0xc7 */,
+ HAL_RXPCU_TX_SETUP_CLEAR = 200 /* 0xc8 */,
+ HAL_RX_FRAME_BITMAP_REQ = 201 /* 0xc9 */,
+ HAL_RX_FRAME_BITMAP_ACK = 202 /* 0xca */,
+ HAL_COEX_RX_STATUS = 203 /* 0xcb */,
+ HAL_RX_START_PARAM = 204 /* 0xcc */,
+ HAL_RX_PPDU_START = 205 /* 0xcd */,
+ HAL_RX_PPDU_END = 206 /* 0xce */,
+ HAL_RX_MPDU_START = 207 /* 0xcf */,
+ HAL_RX_MPDU_END = 208 /* 0xd0 */,
+ HAL_RX_MSDU_START = 209 /* 0xd1 */,
+ HAL_RX_MSDU_END = 210 /* 0xd2 */,
+ HAL_RX_ATTENTION = 211 /* 0xd3 */,
+ HAL_RECEIVED_RESPONSE_INFO = 212 /* 0xd4 */,
+ HAL_RX_PHY_SLEEP = 213 /* 0xd5 */,
+ HAL_RX_HEADER = 214 /* 0xd6 */,
+ HAL_RX_PEER_ENTRY = 215 /* 0xd7 */,
+ HAL_RX_FLUSH = 216 /* 0xd8 */,
+ HAL_RX_RESPONSE_REQUIRED_INFO = 217 /* 0xd9 */,
+ HAL_RX_FRAMELESS_BAR_DETAILS = 218 /* 0xda */,
+ HAL_TQM_GET_MPDU_QUEUE_STATS_STATUS = 219 /* 0xdb */,
+ HAL_TQM_GET_MSDU_FLOW_STATS_STATUS = 220 /* 0xdc */,
+ HAL_TX_CBF_INFO = 221 /* 0xdd */,
+ HAL_PCU_PPDU_SETUP_USER = 222 /* 0xde */,
+ HAL_RX_MPDU_PCU_START = 223 /* 0xdf */,
+ HAL_RX_PM_INFO = 224 /* 0xe0 */,
+ HAL_RX_USER_PPDU_END = 225 /* 0xe1 */,
+ HAL_RX_PRE_PPDU_START = 226 /* 0xe2 */,
+ HAL_RX_PREAMBLE = 227 /* 0xe3 */,
+ HAL_TX_FES_SETUP_COMPLETE = 228 /* 0xe4 */,
+ HAL_TX_LAST_MPDU_FETCHED = 229 /* 0xe5 */,
+ HAL_TXDMA_STOP_REQUEST = 230 /* 0xe6 */,
+ HAL_RXPCU_SETUP = 231 /* 0xe7 */,
+ HAL_RXPCU_USER_SETUP = 232 /* 0xe8 */,
+ HAL_TX_FES_STATUS_ACK_OR_BA = 233 /* 0xe9 */,
+ HAL_TQM_ACKED_MPDU = 234 /* 0xea */,
+ HAL_COEX_TX_RESP = 235 /* 0xeb */,
+ HAL_COEX_TX_STATUS = 236 /* 0xec */,
+ HAL_MACTX_COEX_PHY_CTRL = 237 /* 0xed */,
+ HAL_COEX_STATUS_BROADCAST = 238 /* 0xee */,
+ HAL_RESPONSE_START_STATUS = 239 /* 0xef */,
+ HAL_RESPONSE_END_STATUS = 240 /* 0xf0 */,
+ HAL_CRYPTO_STATUS = 241 /* 0xf1 */,
+ HAL_RECEIVED_TRIGGER_INFO = 242 /* 0xf2 */,
+ HAL_REO_ENTRANCE_RING = 243 /* 0xf3 */,
+ HAL_RX_MPDU_LINK = 244 /* 0xf4 */,
+ HAL_COEX_TX_STOP_CTRL = 245 /* 0xf5 */,
+ HAL_RX_PPDU_ACK_REPORT = 246 /* 0xf6 */,
+ HAL_RX_PPDU_NO_ACK_REPORT = 247 /* 0xf7 */,
+ HAL_SCH_COEX_STATUS = 248 /* 0xf8 */,
+ HAL_SCHEDULER_COMMAND_STATUS = 249 /* 0xf9 */,
+ HAL_SCHEDULER_RX_PPDU_NO_RESPONSE_STATUS = 250 /* 0xfa */,
+ HAL_TX_FES_STATUS_PROT = 251 /* 0xfb */,
+ HAL_TX_FES_STATUS_START_PPDU = 252 /* 0xfc */,
+ HAL_TX_FES_STATUS_START_PROT = 253 /* 0xfd */,
+ HAL_TXPCU_PHYTX_DEBUG32 = 254 /* 0xfe */,
+ HAL_TXPCU_PHYTX_OTHER_TRANSMIT_INFO32 = 255 /* 0xff */,
+ HAL_TX_MPDU_COUNT_TRANSFER_END = 256 /* 0x100 */,
+ HAL_WHO_ANCHOR_OFFSET = 257 /* 0x101 */,
+ HAL_WHO_ANCHOR_VALUE = 258 /* 0x102 */,
+ HAL_WHO_CCE_INFO = 259 /* 0x103 */,
+ HAL_WHO_COMMIT = 260 /* 0x104 */,
+ HAL_WHO_COMMIT_DONE = 261 /* 0x105 */,
+ HAL_WHO_FLUSH = 262 /* 0x106 */,
+ HAL_WHO_L2_LLC = 263 /* 0x107 */,
+ HAL_WHO_L2_PAYLOAD = 264 /* 0x108 */,
+ HAL_WHO_L3_CHECKSUM = 265 /* 0x109 */,
+ HAL_WHO_L3_INFO = 266 /* 0x10a */,
+ HAL_WHO_L4_CHECKSUM = 267 /* 0x10b */,
+ HAL_WHO_L4_INFO = 268 /* 0x10c */,
+ HAL_WHO_MSDU = 269 /* 0x10d */,
+ HAL_WHO_MSDU_MISC = 270 /* 0x10e */,
+ HAL_WHO_PACKET_DATA = 271 /* 0x10f */,
+ HAL_WHO_PACKET_HDR = 272 /* 0x110 */,
+ HAL_WHO_PPDU_END = 273 /* 0x111 */,
+ HAL_WHO_PPDU_START = 274 /* 0x112 */,
+ HAL_WHO_TSO = 275 /* 0x113 */,
+ HAL_WHO_WMAC_HEADER_PV0 = 276 /* 0x114 */,
+ HAL_WHO_WMAC_HEADER_PV1 = 277 /* 0x115 */,
+ HAL_WHO_WMAC_IV = 278 /* 0x116 */,
+ HAL_MPDU_INFO_END = 279 /* 0x117 */,
+ HAL_MPDU_INFO_BITMAP = 280 /* 0x118 */,
+ HAL_TX_QUEUE_EXTENSION = 281 /* 0x119 */,
+ HAL_RX_PEER_ENTRY_DETAILS = 282 /* 0x11a */,
+ HAL_RX_REO_QUEUE_REFERENCE = 283 /* 0x11b */,
+ HAL_RX_REO_QUEUE_EXT = 284 /* 0x11c */,
+ HAL_SCHEDULER_SELFGEN_RESPONSE_STATUS = 285 /* 0x11d */,
+ HAL_TQM_UPDATE_TX_MPDU_COUNT_STATUS = 286 /* 0x11e */,
+ HAL_TQM_ACKED_MPDU_STATUS = 287 /* 0x11f */,
+ HAL_TQM_ADD_MSDU_STATUS = 288 /* 0x120 */,
+ HAL_RX_MPDU_LINK_PTR = 289 /* 0x121 */,
+ HAL_REO_DESTINATION_RING = 290 /* 0x122 */,
+ HAL_TQM_LIST_GEN_DONE = 291 /* 0x123 */,
+ HAL_WHO_TERMINATE = 292 /* 0x124 */,
+ HAL_TX_LAST_MPDU_END = 293 /* 0x125 */,
+ HAL_TX_CV_DATA = 294 /* 0x126 */,
+ HAL_TCL_ENTRANCE_FROM_PPE_RING = 295 /* 0x127 */,
+ HAL_PPDU_TX_END = 296 /* 0x128 */,
+ HAL_PROT_TX_END = 297 /* 0x129 */,
+ HAL_PDG_RESPONSE_RATE_SETTING = 298 /* 0x12a */,
+ HAL_MPDU_INFO_GLOBAL_END = 299 /* 0x12b */,
+ HAL_TQM_SCH_INSTR_GLOBAL_END = 300 /* 0x12c */,
+ HAL_RX_PPDU_END_USER_STATS = 301 /* 0x12d */,
+ HAL_RX_PPDU_END_USER_STATS_EXT = 302 /* 0x12e */,
+ HAL_NO_ACK_REPORT = 303 /* 0x12f */,
+ HAL_ACK_REPORT = 304 /* 0x130 */,
+ HAL_UNIFORM_REO_CMD_HEADER = 305 /* 0x131 */,
+ HAL_REO_GET_QUEUE_STATS = 306 /* 0x132 */,
+ HAL_REO_FLUSH_QUEUE = 307 /* 0x133 */,
+ HAL_REO_FLUSH_CACHE = 308 /* 0x134 */,
+ HAL_REO_UNBLOCK_CACHE = 309 /* 0x135 */,
+ HAL_UNIFORM_REO_STATUS_HEADER = 310 /* 0x136 */,
+ HAL_REO_GET_QUEUE_STATS_STATUS = 311 /* 0x137 */,
+ HAL_REO_FLUSH_QUEUE_STATUS = 312 /* 0x138 */,
+ HAL_REO_FLUSH_CACHE_STATUS = 313 /* 0x139 */,
+ HAL_REO_UNBLOCK_CACHE_STATUS = 314 /* 0x13a */,
+ HAL_TQM_FLUSH_CACHE = 315 /* 0x13b */,
+ HAL_TQM_UNBLOCK_CACHE = 316 /* 0x13c */,
+ HAL_TQM_FLUSH_CACHE_STATUS = 317 /* 0x13d */,
+ HAL_TQM_UNBLOCK_CACHE_STATUS = 318 /* 0x13e */,
+ HAL_RX_PPDU_END_STATUS_DONE = 319 /* 0x13f */,
+ HAL_RX_STATUS_BUFFER_DONE = 320 /* 0x140 */,
+ HAL_BUFFER_ADDR_INFO = 321 /* 0x141 */,
+ HAL_RX_MSDU_DESC_INFO = 322 /* 0x142 */,
+ HAL_RX_MPDU_DESC_INFO = 323 /* 0x143 */,
+ HAL_TCL_DATA_CMD = 324 /* 0x144 */,
+ HAL_TCL_GSE_CMD = 325 /* 0x145 */,
+ HAL_TCL_EXIT_BASE = 326 /* 0x146 */,
+ HAL_TCL_COMPACT_EXIT_RING = 327 /* 0x147 */,
+ HAL_TCL_REGULAR_EXIT_RING = 328 /* 0x148 */,
+ HAL_TCL_EXTENDED_EXIT_RING = 329 /* 0x149 */,
+ HAL_UPLINK_COMMON_INFO = 330 /* 0x14a */,
+ HAL_UPLINK_USER_SETUP_INFO = 331 /* 0x14b */,
+ HAL_TX_DATA_SYNC = 332 /* 0x14c */,
+ HAL_PHYRX_CBF_READ_REQUEST_ACK = 333 /* 0x14d */,
+ HAL_TCL_STATUS_RING = 334 /* 0x14e */,
+ HAL_TQM_GET_MPDU_HEAD_INFO = 335 /* 0x14f */,
+ HAL_TQM_SYNC_CMD = 336 /* 0x150 */,
+ HAL_TQM_GET_MPDU_HEAD_INFO_STATUS = 337 /* 0x151 */,
+ HAL_TQM_SYNC_CMD_STATUS = 338 /* 0x152 */,
+ HAL_TQM_THRESHOLD_DROP_NOTIFICATION_STATUS = 339 /* 0x153 */,
+ HAL_TQM_DESCRIPTOR_THRESHOLD_REACHED_STATUS = 340 /* 0x154 */,
+ HAL_REO_FLUSH_TIMEOUT_LIST = 341 /* 0x155 */,
+ HAL_REO_FLUSH_TIMEOUT_LIST_STATUS = 342 /* 0x156 */,
+ HAL_REO_TO_PPE_RING = 343 /* 0x157 */,
+ HAL_RX_MPDU_INFO = 344 /* 0x158 */,
+ HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS = 345 /* 0x159 */,
+ HAL_SCHEDULER_RX_SIFS_RESPONSE_TRIGGER_STATUS = 346 /* 0x15a */,
+ HAL_EXAMPLE_USER_TLV_32_NAME = 347 /* 0x15b */,
+ HAL_RX_PPDU_START_USER_INFO = 348 /* 0x15c */,
+ HAL_RX_RXPCU_CLASSIFICATION_OVERVIEW = 349 /* 0x15d */,
+ HAL_RX_RING_MASK = 350 /* 0x15e */,
+ HAL_WHO_CLASSIFY_INFO = 351 /* 0x15f */,
+ HAL_TXPT_CLASSIFY_INFO = 352 /* 0x160 */,
+ HAL_RXPT_CLASSIFY_INFO = 353 /* 0x161 */,
+ HAL_TX_FLOW_SEARCH_ENTRY = 354 /* 0x162 */,
+ HAL_RX_FLOW_SEARCH_ENTRY = 355 /* 0x163 */,
+ HAL_RECEIVED_TRIGGER_INFO_DETAILS = 356 /* 0x164 */,
+ HAL_COEX_MAC_NAP = 357 /* 0x165 */,
+ HAL_MACRX_ABORT_REQUEST_INFO = 358 /* 0x166 */,
+ HAL_MACTX_ABORT_REQUEST_INFO = 359 /* 0x167 */,
+ HAL_PHYRX_ABORT_REQUEST_INFO = 360 /* 0x168 */,
+ HAL_PHYTX_ABORT_REQUEST_INFO = 361 /* 0x169 */,
+ HAL_RXPCU_PPDU_END_INFO = 362 /* 0x16a */,
+ HAL_WHO_MESH_CONTROL = 363 /* 0x16b */,
+ HAL_L_SIG_A_INFO = 364 /* 0x16c */,
+ HAL_L_SIG_B_INFO = 365 /* 0x16d */,
+ HAL_HT_SIG_INFO = 366 /* 0x16e */,
+ HAL_VHT_SIG_A_INFO = 367 /* 0x16f */,
+ HAL_VHT_SIG_B_SU20_INFO = 368 /* 0x170 */,
+ HAL_VHT_SIG_B_SU40_INFO = 369 /* 0x171 */,
+ HAL_VHT_SIG_B_SU80_INFO = 370 /* 0x172 */,
+ HAL_VHT_SIG_B_SU160_INFO = 371 /* 0x173 */,
+ HAL_VHT_SIG_B_MU20_INFO = 372 /* 0x174 */,
+ HAL_VHT_SIG_B_MU40_INFO = 373 /* 0x175 */,
+ HAL_VHT_SIG_B_MU80_INFO = 374 /* 0x176 */,
+ HAL_VHT_SIG_B_MU160_INFO = 375 /* 0x177 */,
+ HAL_SERVICE_INFO = 376 /* 0x178 */,
+ HAL_HE_SIG_A_SU_INFO = 377 /* 0x179 */,
+ HAL_HE_SIG_A_MU_DL_INFO = 378 /* 0x17a */,
+ HAL_HE_SIG_A_MU_UL_INFO = 379 /* 0x17b */,
+ HAL_HE_SIG_B1_MU_INFO = 380 /* 0x17c */,
+ HAL_HE_SIG_B2_MU_INFO = 381 /* 0x17d */,
+ HAL_HE_SIG_B2_OFDMA_INFO = 382 /* 0x17e */,
+ HAL_PDG_SW_MODE_BW_START = 383 /* 0x17f */,
+ HAL_PDG_SW_MODE_BW_END = 384 /* 0x180 */,
+ HAL_PDG_WAIT_FOR_MAC_REQUEST = 385 /* 0x181 */,
+ HAL_PDG_WAIT_FOR_PHY_REQUEST = 386 /* 0x182 */,
+ HAL_SCHEDULER_END = 387 /* 0x183 */,
+ HAL_PEER_TABLE_ENTRY = 388 /* 0x184 */,
+ HAL_SW_PEER_INFO = 389 /* 0x185 */,
+ HAL_RXOLE_CCE_CLASSIFY_INFO = 390 /* 0x186 */,
+ HAL_TCL_CCE_CLASSIFY_INFO = 391 /* 0x187 */,
+ HAL_RXOLE_CCE_INFO = 392 /* 0x188 */,
+ HAL_TCL_CCE_INFO = 393 /* 0x189 */,
+ HAL_TCL_CCE_SUPERRULE = 394 /* 0x18a */,
+ HAL_CCE_RULE = 395 /* 0x18b */,
+ HAL_RX_PPDU_START_DROPPED = 396 /* 0x18c */,
+ HAL_RX_PPDU_END_DROPPED = 397 /* 0x18d */,
+ HAL_RX_PPDU_END_STATUS_DONE_DROPPED = 398 /* 0x18e */,
+ HAL_RX_MPDU_START_DROPPED = 399 /* 0x18f */,
+ HAL_RX_MSDU_START_DROPPED = 400 /* 0x190 */,
+ HAL_RX_MSDU_END_DROPPED = 401 /* 0x191 */,
+ HAL_RX_MPDU_END_DROPPED = 402 /* 0x192 */,
+ HAL_RX_ATTENTION_DROPPED = 403 /* 0x193 */,
+ HAL_TXPCU_USER_SETUP = 404 /* 0x194 */,
+ HAL_RXPCU_USER_SETUP_EXT = 405 /* 0x195 */,
+ HAL_CE_SRC_DESC = 406 /* 0x196 */,
+ HAL_CE_STAT_DESC = 407 /* 0x197 */,
+ HAL_RXOLE_CCE_SUPERRULE = 408 /* 0x198 */,
+ HAL_TX_RATE_STATS_INFO = 409 /* 0x199 */,
+ HAL_CMD_PART_0_END = 410 /* 0x19a */,
+ HAL_MACTX_SYNTH_ON = 411 /* 0x19b */,
+ HAL_SCH_CRITICAL_TLV_REFERENCE = 412 /* 0x19c */,
+ HAL_TQM_MPDU_GLOBAL_START = 413 /* 0x19d */,
+ HAL_EXAMPLE_TLV_32 = 414 /* 0x19e */,
+ HAL_TQM_UPDATE_TX_MSDU_FLOW = 415 /* 0x19f */,
+ HAL_TQM_UPDATE_TX_MPDU_QUEUE_HEAD = 416 /* 0x1a0 */,
+ HAL_TQM_UPDATE_TX_MSDU_FLOW_STATUS = 417 /* 0x1a1 */,
+ HAL_TQM_UPDATE_TX_MPDU_QUEUE_HEAD_STATUS = 418 /* 0x1a2 */,
+ HAL_REO_UPDATE_RX_REO_QUEUE = 419 /* 0x1a3 */,
+ HAL_CE_DST_DESC = 420 /* 0x1a4 */,
+ HAL_TLV_BASE = 511 /* 0x1ff */,
+};
+
+#define HAL_TLV_HDR_TAG GENMASK(9, 1)
+#define HAL_TLV_HDR_LEN GENMASK(25, 10)
+
+#define HAL_TLV_ALIGN 4
+
+struct hal_tlv_hdr {
+ u32 tl;
+ u8 value[0];
+} __packed;
+
+#define RX_MPDU_DESC_INFO0_MSDU_COUNT GENMASK(7, 0)
+#define RX_MPDU_DESC_INFO0_SEQ_NUM GENMASK(19, 8)
+#define RX_MPDU_DESC_INFO0_FRAG_FLAG BIT(20)
+#define RX_MPDU_DESC_INFO0_MPDU_RETRY BIT(21)
+#define RX_MPDU_DESC_INFO0_AMPDU_FLAG BIT(22)
+#define RX_MPDU_DESC_INFO0_BAR_FRAME BIT(23)
+#define RX_MPDU_DESC_INFO0_VALID_PN BIT(24)
+#define RX_MPDU_DESC_INFO0_VALID_SA BIT(25)
+#define RX_MPDU_DESC_INFO0_SA_IDX_TIMEOUT BIT(26)
+#define RX_MPDU_DESC_INFO0_VALID_DA BIT(27)
+#define RX_MPDU_DESC_INFO0_DA_MCBC BIT(28)
+#define RX_MPDU_DESC_INFO0_DA_IDX_TIMEOUT BIT(29)
+#define RX_MPDU_DESC_INFO0_RAW_MPDU BIT(30)
+
+struct rx_mpdu_desc {
+ u32 info0; /* %RX_MPDU_DESC_INFO */
+ u32 meta_data;
+} __packed;
+
+/* rx_mpdu_desc
+ * Producer: RXDMA
+ * Consumer: REO/SW/FW
+ *
+ * msdu_count
+ * The number of MSDUs within the MPDU
+ *
+ * mpdu_sequence_number
+ * The field can have two different meanings based on the setting
+ * of field 'bar_frame'. If 'bar_frame' is set, it means the MPDU
+ * start sequence number from the BAR frame otherwise it means
+ * the MPDU sequence number of the received frame.
+ *
+ * fragment_flag
+ * When set, this MPDU is a fragment and REO should forward this
+ * fragment MPDU to the REO destination ring without any reorder
+ * checks, pn checks or bitmap update. This implies that REO is
+ * forwarding the pointer to the MSDU link descriptor.
+ *
+ * mpdu_retry_bit
+ * The retry bit setting from the MPDU header of the received frame
+ *
+ * ampdu_flag
+ * Indicates the MPDU was received as part of an A-MPDU.
+ *
+ * bar_frame
+ * Indicates the received frame is a BAR frame. After processing,
+ * this frame shall be pushed to SW or deleted.
+ *
+ * valid_pn
+ * When not set, REO will not perform a PN sequence number check.
+ *
+ * valid_sa
+ * Indicates OLE found a valid SA entry for all MSDUs in this MPDU.
+ *
+ * sa_idx_timeout
+ * Indicates, at least 1 MSDU within the MPDU has an unsuccessful
+ * MAC source address search due to the expiration of search timer.
+ *
+ * valid_da
+ * When set, OLE found a valid DA entry for all MSDUs in this MPDU.
+ *
+ * da_mcbc
+ * Field Only valid if valid_da is set. Indicates at least one of
+ * the DA addresses is a Multicast or Broadcast address.
+ *
+ * da_idx_timeout
+ * Indicates, at least 1 MSDU within the MPDU has an unsuccessful
+ * MAC destination address search due to the expiration of search
+ * timer.
+ *
+ * raw_mpdu
+ * Field only valid when first_msdu_in_mpdu_flag is set. Indicates
+ * the contents in the MSDU buffer contains a 'RAW' MPDU.
+ */
+
+enum hal_rx_msdu_desc_reo_dest_ind {
+ HAL_RX_MSDU_DESC_REO_DEST_IND_TCL,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW1,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW2,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW3,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW4,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_RELEASE,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_FW,
+};
+
+#define RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU BIT(0)
+#define RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU BIT(1)
+#define RX_MSDU_DESC_INFO0_MSDU_CONTINUATION BIT(2)
+#define RX_MSDU_DESC_INFO0_MSDU_LENGTH GENMASK(16, 3)
+#define RX_MSDU_DESC_INFO0_REO_DEST_IND GENMASK(21, 17)
+#define RX_MSDU_DESC_INFO0_MSDU_DROP BIT(22)
+#define RX_MSDU_DESC_INFO0_VALID_SA BIT(23)
+#define RX_MSDU_DESC_INFO0_SA_IDX_TIMEOUT BIT(24)
+#define RX_MSDU_DESC_INFO0_VALID_DA BIT(25)
+#define RX_MSDU_DESC_INFO0_DA_MCBC BIT(26)
+#define RX_MSDU_DESC_INFO0_DA_IDX_TIMEOUT BIT(27)
+
+#define HAL_RX_MSDU_PKT_LENGTH_GET(val) \
+ (FIELD_GET(RX_MSDU_DESC_INFO0_MSDU_LENGTH, (val)))
+
+struct rx_msdu_desc {
+ u32 info0;
+ u32 rsvd0;
+} __packed;
+
+/* rx_msdu_desc
+ *
+ * first_msdu_in_mpdu
+ * Indicates first msdu in mpdu.
+ *
+ * last_msdu_in_mpdu
+ * Indicates last msdu in mpdu. This flag can be true only when
+ * 'Msdu_continuation' set to 0. This implies that when an msdu
+ * is spread out over multiple buffers and thus msdu_continuation
+ * is set, only for the very last buffer of the msdu, can the
+ * 'last_msdu_in_mpdu' be set.
+ *
+ * When both first_msdu_in_mpdu and last_msdu_in_mpdu are set,
+ * the MPDU that this MSDU belongs to only contains a single MSDU.
+ *
+ * msdu_continuation
+ * When set, this MSDU buffer was not able to hold the entire MSDU.
+ * The next buffer will therefor contain additional information
+ * related to this MSDU.
+ *
+ * msdu_length
+ * Field is only valid in combination with the 'first_msdu_in_mpdu'
+ * being set. Full MSDU length in bytes after decapsulation. This
+ * field is still valid for MPDU frames without A-MSDU. It still
+ * represents MSDU length after decapsulation Or in case of RAW
+ * MPDUs, it indicates the length of the entire MPDU (without FCS
+ * field).
+ *
+ * reo_destination_indication
+ * The id of the reo exit ring where the msdu frame shall push
+ * after (MPDU level) reordering has finished. Values are defined
+ * in enum %HAL_RX_MSDU_DESC_REO_DEST_IND_.
+ *
+ * msdu_drop
+ * Indicates that REO shall drop this MSDU and not forward it to
+ * any other ring.
+ *
+ * valid_sa
+ * Indicates OLE found a valid SA entry for this MSDU.
+ *
+ * sa_idx_timeout
+ * Indicates, an unsuccessful MAC source address search due to
+ * the expiration of search timer for this MSDU.
+ *
+ * valid_da
+ * When set, OLE found a valid DA entry for this MSDU.
+ *
+ * da_mcbc
+ * Field Only valid if valid_da is set. Indicates the DA address
+ * is a Multicast or Broadcast address for this MSDU.
+ *
+ * da_idx_timeout
+ * Indicates, an unsuccessful MAC destination address search due
+ * to the expiration of search timer fot this MSDU.
+ */
+
+enum hal_reo_dest_ring_buffer_type {
+ HAL_REO_DEST_RING_BUFFER_TYPE_MSDU,
+ HAL_REO_DEST_RING_BUFFER_TYPE_LINK_DESC,
+};
+
+enum hal_reo_dest_ring_push_reason {
+ HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED,
+ HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION,
+};
+
+enum hal_reo_dest_ring_error_code {
+ HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO,
+ HAL_REO_DEST_RING_ERROR_CODE_DESC_INVALID,
+ HAL_REO_DEST_RING_ERROR_CODE_AMPDU_IN_NON_BA,
+ HAL_REO_DEST_RING_ERROR_CODE_NON_BA_DUPLICATE,
+ HAL_REO_DEST_RING_ERROR_CODE_BA_DUPLICATE,
+ HAL_REO_DEST_RING_ERROR_CODE_FRAME_2K_JUMP,
+ HAL_REO_DEST_RING_ERROR_CODE_BAR_2K_JUMP,
+ HAL_REO_DEST_RING_ERROR_CODE_FRAME_OOR,
+ HAL_REO_DEST_RING_ERROR_CODE_BAR_OOR,
+ HAL_REO_DEST_RING_ERROR_CODE_NO_BA_SESSION,
+ HAL_REO_DEST_RING_ERROR_CODE_FRAME_SN_EQUALS_SSN,
+ HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED,
+ HAL_REO_DEST_RING_ERROR_CODE_2K_ERR_FLAG_SET,
+ HAL_REO_DEST_RING_ERROR_CODE_PN_ERR_FLAG_SET,
+ HAL_REO_DEST_RING_ERROR_CODE_DESC_BLOCKED,
+ HAL_REO_DEST_RING_ERROR_CODE_MAX,
+};
+
+#define HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_DEST_RING_INFO0_BUFFER_TYPE BIT(8)
+#define HAL_REO_DEST_RING_INFO0_PUSH_REASON GENMASK(10, 9)
+#define HAL_REO_DEST_RING_INFO0_ERROR_CODE GENMASK(15, 11)
+#define HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM GENMASK(31, 16)
+
+#define HAL_REO_DEST_RING_INFO1_REORDER_INFO_VALID BIT(0)
+#define HAL_REO_DEST_RING_INFO1_REORDER_OPCODE GENMASK(4, 1)
+#define HAL_REO_DEST_RING_INFO1_REORDER_SLOT_IDX GENMASK(12, 5)
+
+#define HAL_REO_DEST_RING_INFO2_RING_ID GENMASK(27, 20)
+#define HAL_REO_DEST_RING_INFO2_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_reo_dest_ring {
+ struct ath11k_buffer_addr buf_addr_info;
+ struct rx_mpdu_desc rx_mpdu_info;
+ struct rx_msdu_desc rx_msdu_info;
+ u32 queue_addr_lo;
+ u32 info0; /* %HAL_REO_DEST_RING_INFO0_ */
+ u32 info1; /* %HAL_REO_DEST_RING_INFO1_ */
+ u32 rsvd0;
+ u32 rsvd1;
+ u32 rsvd2;
+ u32 rsvd3;
+ u32 rsvd4;
+ u32 rsvd5;
+ u32 info2; /* %HAL_REO_DEST_RING_INFO2_ */
+} __packed;
+
+/* hal_reo_dest_ring
+ *
+ * Producer: RXDMA
+ * Consumer: REO/SW/FW
+ *
+ * buf_addr_info
+ * Details of the physical address of a buffer or MSDU
+ * link descriptor.
+ *
+ * rx_mpdu_info
+ * General information related to the MPDU that is passed
+ * on from REO entrance ring to the REO destination ring.
+ *
+ * rx_msdu_info
+ * General information related to the MSDU that is passed
+ * on from RXDMA all the way to to the REO destination ring.
+ *
+ * queue_addr_lo
+ * Address (lower 32 bits) of the REO queue descriptor.
+ *
+ * queue_addr_hi
+ * Address (upper 8 bits) of the REO queue descriptor.
+ *
+ * buffer_type
+ * Indicates the type of address provided in the buf_addr_info.
+ * Values are defined in enum %HAL_REO_DEST_RING_BUFFER_TYPE_.
+ *
+ * push_reason
+ * Reason for pushing this frame to this exit ring. Values are
+ * defined in enum %HAL_REO_DEST_RING_PUSH_REASON_.
+ *
+ * error_code
+ * Valid only when 'push_reason' is set. All error codes are
+ * defined in enum %HAL_REO_DEST_RING_ERROR_CODE_.
+ *
+ * rx_queue_num
+ * Indicates the REO MPDU reorder queue id from which this frame
+ * originated.
+ *
+ * reorder_info_valid
+ * When set, REO has been instructed to not perform the actual
+ * re-ordering of frames for this queue, but just to insert
+ * the reorder opcodes.
+ *
+ * reorder_opcode
+ * Field is valid when 'reorder_info_valid' is set. This field is
+ * always valid for debug purpose as well.
+ *
+ * reorder_slot_idx
+ * Valid only when 'reorder_info_valid' is set.
+ *
+ * ring_id
+ * The buffer pointer ring id.
+ * 0 - Idle ring
+ * 1 - N refers to other rings.
+ *
+ * looping_count
+ * Indicates the number of times the producer of entries into
+ * this ring has looped around the ring.
+ */
+
+enum hal_reo_entr_rxdma_ecode {
+ HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_FCS_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_UNECRYPTED_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_MSDU_LEN_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_MSDU_LIMIT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_WIFI_PARSE_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_PARSE_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_SA_TIMEOUT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_DA_TIMEOUT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_FLOW_TIMEOUT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_MAX,
+};
+
+#define HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_ENTR_RING_INFO0_MPDU_BYTE_COUNT GENMASK(21, 8)
+#define HAL_REO_ENTR_RING_INFO0_DEST_IND GENMASK(26, 22)
+#define HAL_REO_ENTR_RING_INFO0_FRAMELESS_BAR BIT(27)
+
+#define HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON GENMASK(1, 0)
+#define HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE GENMASK(6, 2)
+
+struct hal_reo_entrance_ring {
+ struct ath11k_buffer_addr buf_addr_info;
+ struct rx_mpdu_desc rx_mpdu_info;
+ u32 queue_addr_lo;
+ u32 info0; /* %HAL_REO_ENTR_RING_INFO0_ */
+ u32 info1; /* %HAL_REO_ENTR_RING_INFO1_ */
+ u32 info2; /* %HAL_REO_DEST_RING_INFO2_ */
+
+} __packed;
+
+/* hal_reo_entrance_ring
+ *
+ * Producer: RXDMA
+ * Consumer: REO
+ *
+ * buf_addr_info
+ * Details of the physical address of a buffer or MSDU
+ * link descriptor.
+ *
+ * rx_mpdu_info
+ * General information related to the MPDU that is passed
+ * on from REO entrance ring to the REO destination ring.
+ *
+ * queue_addr_lo
+ * Address (lower 32 bits) of the REO queue descriptor.
+ *
+ * queue_addr_hi
+ * Address (upper 8 bits) of the REO queue descriptor.
+ *
+ * mpdu_byte_count
+ * An approximation of the number of bytes received in this MPDU.
+ * Used to keeps stats on the amount of data flowing
+ * through a queue.
+ *
+ * reo_destination_indication
+ * The id of the reo exit ring where the msdu frame shall push
+ * after (MPDU level) reordering has finished. Values are defined
+ * in enum %HAL_RX_MSDU_DESC_REO_DEST_IND_.
+ *
+ * frameless_bar
+ * Indicates that this REO entrance ring struct contains BAR info
+ * from a multi TID BAR frame. The original multi TID BAR frame
+ * itself contained all the REO info for the first TID, but all
+ * the subsequent TID info and their linkage to the REO descriptors
+ * is passed down as 'frameless' BAR info.
+ *
+ * The only fields valid in this descriptor when this bit is set
+ * are queue_addr_lo, queue_addr_hi, mpdu_sequence_number,
+ * bar_frame and peer_meta_data.
+ *
+ * rxdma_push_reason
+ * Reason for pushing this frame to this exit ring. Values are
+ * defined in enum %HAL_REO_DEST_RING_PUSH_REASON_.
+ *
+ * rxdma_error_code
+ * Valid only when 'push_reason' is set. All error codes are
+ * defined in enum %HAL_REO_ENTR_RING_RXDMA_ECODE_.
+ *
+ * ring_id
+ * The buffer pointer ring id.
+ * 0 - Idle ring
+ * 1 - N refers to other rings.
+ *
+ * looping_count
+ * Indicates the number of times the producer of entries into
+ * this ring has looped around the ring.
+ */
+
+#define HAL_REO_CMD_HDR_INFO0_CMD_NUMBER GENMASK(15, 0)
+#define HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED BIT(16)
+
+struct hal_reo_cmd_hdr {
+ u32 info0;
+} __packed;
+
+#define HAL_REO_GET_QUEUE_STATS_INFO0_QUEUE_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_GET_QUEUE_STATS_INFO0_CLEAR_STATS BIT(8)
+
+struct hal_reo_get_queue_stats {
+ struct hal_reo_cmd_hdr cmd;
+ u32 queue_addr_lo;
+ u32 info0;
+ u32 rsvd0[6];
+} __packed;
+
+/* hal_reo_get_queue_stats
+ * Producer: SW
+ * Consumer: REO
+ *
+ * cmd
+ * Details for command execution tracking purposes.
+ *
+ * queue_addr_lo
+ * Address (lower 32 bits) of the REO queue descriptor.
+ *
+ * queue_addr_hi
+ * Address (upper 8 bits) of the REO queue descriptor.
+ *
+ * clear_stats
+ * Clear stats settings. When set, Clear the stats after
+ * generating the status.
+ *
+ * Following stats will be cleared.
+ * Timeout_count
+ * Forward_due_to_bar_count
+ * Duplicate_count
+ * Frames_in_order_count
+ * BAR_received_count
+ * MPDU_Frames_processed_count
+ * MSDU_Frames_processed_count
+ * Total_processed_byte_count
+ * Late_receive_MPDU_count
+ * window_jump_2k
+ * Hole_count
+ */
+
+#define HAL_REO_FLUSH_QUEUE_INFO0_DESC_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_FLUSH_QUEUE_INFO0_BLOCK_DESC_ADDR BIT(8)
+#define HAL_REO_FLUSH_QUEUE_INFO0_BLOCK_RESRC_IDX GENMASK(10, 9)
+
+struct hal_reo_flush_queue {
+ struct hal_reo_cmd_hdr cmd;
+ u32 desc_addr_lo;
+ u32 info0;
+ u32 rsvd0[6];
+} __packed;
+
+#define HAL_REO_FLUSH_CACHE_INFO0_CACHE_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_FLUSH_CACHE_INFO0_FWD_ALL_MPDUS BIT(8)
+#define HAL_REO_FLUSH_CACHE_INFO0_RELEASE_BLOCK_IDX BIT(9)
+#define HAL_REO_FLUSH_CACHE_INFO0_BLOCK_RESRC_IDX GENMASK(11, 10)
+#define HAL_REO_FLUSH_CACHE_INFO0_FLUSH_WO_INVALIDATE BIT(12)
+#define HAL_REO_FLUSH_CACHE_INFO0_BLOCK_CACHE_USAGE BIT(13)
+#define HAL_REO_FLUSH_CACHE_INFO0_FLUSH_ALL BIT(14)
+
+struct hal_reo_flush_cache {
+ struct hal_reo_cmd_hdr cmd;
+ u32 cache_addr_lo;
+ u32 info0;
+ u32 rsvd0[6];
+} __packed;
+
+#define HAL_TCL_DATA_CMD_INFO0_DESC_TYPE BIT(0)
+#define HAL_TCL_DATA_CMD_INFO0_EPD BIT(1)
+#define HAL_TCL_DATA_CMD_INFO0_ENCAP_TYPE GENMASK(3, 2)
+#define HAL_TCL_DATA_CMD_INFO0_ENCRYPT_TYPE GENMASK(7, 4)
+#define HAL_TCL_DATA_CMD_INFO0_SRC_BUF_SWAP BIT(8)
+#define HAL_TCL_DATA_CMD_INFO0_LNK_META_SWAP BIT(9)
+#define HAL_TCL_DATA_CMD_INFO0_SEARCH_TYPE GENMASK(13, 12)
+#define HAL_TCL_DATA_CMD_INFO0_ADDR_EN GENMASK(15, 14)
+#define HAL_TCL_DATA_CMD_INFO0_CMD_NUM GENMASK(31, 16)
+
+#define HAL_TCL_DATA_CMD_INFO1_DATA_LEN GENMASK(15, 0)
+#define HAL_TCL_DATA_CMD_INFO1_IP4_CKSUM_EN BIT(16)
+#define HAL_TCL_DATA_CMD_INFO1_UDP4_CKSUM_EN BIT(17)
+#define HAL_TCL_DATA_CMD_INFO1_UDP6_CKSUM_EN BIT(18)
+#define HAL_TCL_DATA_CMD_INFO1_TCP4_CKSUM_EN BIT(19)
+#define HAL_TCL_DATA_CMD_INFO1_TCP6_CKSUM_EN BIT(20)
+#define HAL_TCL_DATA_CMD_INFO1_TO_FW BIT(21)
+#define HAL_TCL_DATA_CMD_INFO1_PKT_OFFSET GENMASK(31, 23)
+
+#define HAL_TCL_DATA_CMD_INFO2_BUF_TIMESTAMP GENMASK(18, 0)
+#define HAL_TCL_DATA_CMD_INFO2_BUF_T_VALID BIT(19)
+#define HAL_TCL_DATA_CMD_INFO2_MESH_ENABLE BIT(20)
+#define HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE BIT(21)
+#define HAL_TCL_DATA_CMD_INFO2_TID GENMASK(25, 22)
+#define HAL_TCL_DATA_CMD_INFO2_LMAC_ID GENMASK(27, 26)
+
+#define HAL_TCL_DATA_CMD_INFO3_DSCP_TID_TABLE_IDX GENMASK(5, 0)
+#define HAL_TCL_DATA_CMD_INFO3_SEARCH_INDEX GENMASK(25, 6)
+#define HAL_TCL_DATA_CMD_INFO3_CACHE_SET_NUM GENMASK(29, 26)
+
+#define HAL_TCL_DATA_CMD_INFO4_RING_ID GENMASK(27, 20)
+#define HAL_TCL_DATA_CMD_INFO4_LOOPING_COUNT GENMASK(31, 28)
+
+enum hal_encrypt_type {
+ HAL_ENCRYPT_TYPE_WEP_40,
+ HAL_ENCRYPT_TYPE_WEP_104,
+ HAL_ENCRYPT_TYPE_TKIP_NO_MIC,
+ HAL_ENCRYPT_TYPE_WEP_128,
+ HAL_ENCRYPT_TYPE_TKIP_MIC,
+ HAL_ENCRYPT_TYPE_WAPI,
+ HAL_ENCRYPT_TYPE_CCMP_128,
+ HAL_ENCRYPT_TYPE_OPEN,
+ HAL_ENCRYPT_TYPE_CCMP_256,
+ HAL_ENCRYPT_TYPE_GCMP_128,
+ HAL_ENCRYPT_TYPE_AES_GCMP_256,
+ HAL_ENCRYPT_TYPE_WAPI_GCM_SM4,
+};
+
+enum hal_tcl_encap_type {
+ HAL_TCL_ENCAP_TYPE_RAW,
+ HAL_TCL_ENCAP_TYPE_NATIVE_WIFI,
+ HAL_TCL_ENCAP_TYPE_ETHERNET,
+ HAL_TCL_ENCAP_TYPE_802_3 = 3,
+};
+
+enum hal_tcl_desc_type {
+ HAL_TCL_DESC_TYPE_BUFFER,
+ HAL_TCL_DESC_TYPE_EXT_DESC,
+};
+
+enum hal_wbm_htt_tx_comp_status {
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_OK,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY,
+};
+
+struct hal_tcl_data_cmd {
+ struct ath11k_buffer_addr buf_addr_info;
+ u32 info0;
+ u32 info1;
+ u32 info2;
+ u32 info3;
+ u32 info4;
+} __packed;
+
+/* hal_tcl_data_cmd
+ *
+ * buf_addr_info
+ * Details of the physical address of a buffer or MSDU
+ * link descriptor.
+ *
+ * desc_type
+ * Indicates the type of address provided in the buf_addr_info.
+ * Values are defined in enum %HAL_REO_DEST_RING_BUFFER_TYPE_.
+ *
+ * epd
+ * When this bit is set then input packet is an EPD type.
+ *
+ * encap_type
+ * Indicates the encapsulation that HW will perform. Values are
+ * defined in enum %HAL_TCL_ENCAP_TYPE_.
+ *
+ * encrypt_type
+ * Field only valid for encap_type: RAW
+ * Values are defined in enum %HAL_ENCRYPT_TYPE_.
+ *
+ * src_buffer_swap
+ * Treats source memory (packet buffer) organization as big-endian.
+ * 1'b0: Source memory is little endian
+ * 1'b1: Source memory is big endian
+ *
+ * link_meta_swap
+ * Treats link descriptor and Metadata as big-endian.
+ * 1'b0: memory is little endian
+ * 1'b1: memory is big endian
+ *
+ * search_type
+ * Search type select
+ * 0 - Normal search, 1 - Index based address search,
+ * 2 - Index based flow search
+ *
+ * addrx_en
+ * addry_en
+ * Address X/Y search enable in ASE correspondingly.
+ * 1'b0: Search disable
+ * 1'b1: Search Enable
+ *
+ * cmd_num
+ * This number can be used to match against status.
+ *
+ * data_length
+ * MSDU length in case of direct descriptor. Length of link
+ * extension descriptor in case of Link extension descriptor.
+ *
+ * *_checksum_en
+ * Enable checksum replacement for ipv4, udp_over_ipv4, ipv6,
+ * udp_over_ipv6, tcp_over_ipv4 and tcp_over_ipv6.
+ *
+ * to_fw
+ * Forward packet to FW along with classification result. The
+ * packet will not be forward to TQM when this bit is set.
+ * 1'b0: Use classification result to forward the packet.
+ * 1'b1: Override classification result & forward packet only to fw
+ *
+ * packet_offset
+ * Packet offset from Metadata in case of direct buffer descriptor.
+ *
+ * buffer_timestamp
+ * buffer_timestamp_valid
+ * Frame system entrance timestamp. It shall be filled by first
+ * module (SW, TCL or TQM) that sees the frames first.
+ *
+ * mesh_enable
+ * For raw WiFi frames, this indicates transmission to a mesh STA,
+ * enabling the interpretation of the 'Mesh Control Present' bit
+ * (bit 8) of QoS Control.
+ * For native WiFi frames, this indicates that a 'Mesh Control'
+ * field is present between the header and the LLC.
+ *
+ * hlos_tid_overwrite
+ *
+ * When set, TCL shall ignore the IP DSCP and VLAN PCP
+ * fields and use HLOS_TID as the final TID. Otherwise TCL
+ * shall consider the DSCP and PCP fields as well as HLOS_TID
+ * and choose a final TID based on the configured priority
+ *
+ * hlos_tid
+ * HLOS MSDU priority
+ * Field is used when HLOS_TID_overwrite is set.
+ *
+ * lmac_id
+ * TCL uses this LMAC_ID in address search, i.e, while
+ * finding matching entry for the packet in AST corresponding
+ * to given LMAC_ID
+ *
+ * If LMAC ID is all 1s (=> value 3), it indicates wildcard
+ * match for any MAC
+ *
+ * dscp_tid_table_num
+ * DSCP to TID mapping table number that need to be used
+ * for the MSDU.
+ *
+ * search_index
+ * The index that will be used for index based address or
+ * flow search. The field is valid when 'search_type' is 1 or 2.
+ *
+ * cache_set_num
+ *
+ * Cache set number that should be used to cache the index
+ * based search results, for address and flow search. This
+ * value should be equal to LSB four bits of the hash value of
+ * match data, in case of search index points to an entry which
+ * may be used in content based search also. The value can be
+ * anything when the entry pointed by search index will not be
+ * used for content based search.
+ *
+ * ring_id
+ * The buffer pointer ring ID.
+ * 0 refers to the IDLE ring
+ * 1 - N refers to other rings
+ *
+ * looping_count
+ *
+ * A count value that indicates the number of times the
+ * producer of entries into the Ring has looped around the
+ * ring.
+ *
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ *
+ * Also note that SW if it wants only needs to look at the
+ * LSB bit of this count value.
+ */
+
+#define HAL_TCL_DESC_LEN sizeof(struct hal_tcl_data_cmd)
+
+enum hal_tcl_gse_ctrl {
+ HAL_TCL_GSE_CTRL_RD_STAT,
+ HAL_TCL_GSE_CTRL_SRCH_DIS,
+ HAL_TCL_GSE_CTRL_WR_BK_SINGLE,
+ HAL_TCL_GSE_CTRL_WR_BK_ALL,
+ HAL_TCL_GSE_CTRL_INVAL_SINGLE,
+ HAL_TCL_GSE_CTRL_INVAL_ALL,
+ HAL_TCL_GSE_CTRL_WR_BK_INVAL_SINGLE,
+ HAL_TCL_GSE_CTRL_WR_BK_INVAL_ALL,
+ HAL_TCL_GSE_CTRL_CLR_STAT_SINGLE,
+};
+
+/* hal_tcl_gse_ctrl
+ *
+ * rd_stat
+ * Report or Read statistics
+ * srch_dis
+ * Search disable. Report only Hash.
+ * wr_bk_single
+ * Write Back single entry
+ * wr_bk_all
+ * Write Back entire cache entry
+ * inval_single
+ * Invalidate single cache entry
+ * inval_all
+ * Invalidate entire cache
+ * wr_bk_inval_single
+ * Write back and invalidate single entry in cache
+ * wr_bk_inval_all
+ * Write back and invalidate entire cache
+ * clr_stat_single
+ * Clear statistics for single entry
+ */
+
+#define HAL_TCL_GSE_CMD_INFO0_CTRL_BUF_ADDR_HI GENMASK(7, 0)
+#define HAL_TCL_GSE_CMD_INFO0_GSE_CTRL GENMASK(11, 8)
+#define HAL_TCL_GSE_CMD_INFO0_GSE_SEL BIT(12)
+#define HAL_TCL_GSE_CMD_INFO0_STATUS_DEST_RING_ID BIT(13)
+#define HAL_TCL_GSE_CMD_INFO0_SWAP BIT(14)
+
+#define HAL_TCL_GSE_CMD_INFO1_RING_ID GENMASK(27, 20)
+#define HAL_TCL_GSE_CMD_INFO1_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_tcl_gse_cmd {
+ u32 ctrl_buf_addr_lo;
+ u32 info0;
+ u32 meta_data[2];
+ u32 rsvd0[2];
+ u32 info1;
+} __packed;
+
+/* hal_tcl_gse_cmd
+ *
+ * ctrl_buf_addr_lo, ctrl_buf_addr_hi
+ * Address of a control buffer containing additional info needed
+ * for this command execution.
+ *
+ * gse_ctrl
+ * GSE control operations. This includes cache operations and table
+ * entry statistics read/clear operation. Values are defined in
+ * enum %HAL_TCL_GSE_CTRL.
+ *
+ * gse_sel
+ * To select the ASE/FSE to do the operation mention by GSE_ctrl.
+ * 0: FSE select 1: ASE select
+ *
+ * status_destination_ring_id
+ * TCL status ring to which the GSE status needs to be send.
+ *
+ * swap
+ * Bit to enable byte swapping of contents of buffer.
+ *
+ * meta_data
+ * Meta data to be returned in the status descriptor
+ */
+
+enum hal_tcl_cache_op_res {
+ HAL_TCL_CACHE_OP_RES_DONE,
+ HAL_TCL_CACHE_OP_RES_NOT_FOUND,
+ HAL_TCL_CACHE_OP_RES_TIMEOUT,
+};
+
+#define HAL_TCL_STATUS_RING_INFO0_GSE_CTRL GENMASK(3, 0)
+#define HAL_TCL_STATUS_RING_INFO0_GSE_SEL BIT(4)
+#define HAL_TCL_STATUS_RING_INFO0_CACHE_OP_RES GENMASK(6, 5)
+#define HAL_TCL_STATUS_RING_INFO0_MSDU_CNT GENMASK(31, 8)
+
+#define HAL_TCL_STATUS_RING_INFO1_HASH_IDX GENMASK(19, 0)
+
+#define HAL_TCL_STATUS_RING_INFO2_RING_ID GENMASK(27, 20)
+#define HAL_TCL_STATUS_RING_INFO2_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_tcl_status_ring {
+ u32 info0;
+ u32 msdu_byte_count;
+ u32 msdu_timestamp;
+ u32 meta_data[2];
+ u32 info1;
+ u32 rsvd0;
+ u32 info2;
+} __packed;
+
+/* hal_tcl_status_ring
+ *
+ * gse_ctrl
+ * GSE control operations. This includes cache operations and table
+ * entry statistics read/clear operation. Values are defined in
+ * enum %HAL_TCL_GSE_CTRL.
+ *
+ * gse_sel
+ * To select the ASE/FSE to do the operation mention by GSE_ctrl.
+ * 0: FSE select 1: ASE select
+ *
+ * cache_op_res
+ * Cache operation result. Values are defined in enum
+ * %HAL_TCL_CACHE_OP_RES_.
+ *
+ * msdu_cnt
+ * msdu_byte_count
+ * MSDU count of Entry and MSDU byte count for entry 1.
+ *
+ * hash_indx
+ * Hash value of the entry in case of search failed or disabled.
+ */
+
+#define HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI GENMASK(7, 0)
+#define HAL_CE_SRC_DESC_ADDR_INFO_HASH_EN BIT(8)
+#define HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP BIT(9)
+#define HAL_CE_SRC_DESC_ADDR_INFO_DEST_SWAP BIT(10)
+#define HAL_CE_SRC_DESC_ADDR_INFO_GATHER BIT(11)
+#define HAL_CE_SRC_DESC_ADDR_INFO_LEN GENMASK(31, 16)
+
+#define HAL_CE_SRC_DESC_META_INFO_DATA GENMASK(15, 0)
+
+#define HAL_CE_SRC_DESC_FLAGS_RING_ID GENMASK(27, 20)
+#define HAL_CE_SRC_DESC_FLAGS_LOOP_CNT HAL_SRNG_DESC_LOOP_CNT
+
+struct hal_ce_srng_src_desc {
+ u32 buffer_addr_low;
+ u32 buffer_addr_info; /* %HAL_CE_SRC_DESC_ADDR_INFO_ */
+ u32 meta_info; /* %HAL_CE_SRC_DESC_META_INFO_ */
+ u32 flags; /* %HAL_CE_SRC_DESC_FLAGS_ */
+} __packed;
+
+/*
+ * hal_ce_srng_src_desc
+ *
+ * buffer_addr_lo
+ * LSB 32 bits of the 40 Bit Pointer to the source buffer
+ *
+ * buffer_addr_hi
+ * MSB 8 bits of the 40 Bit Pointer to the source buffer
+ *
+ * toeplitz_en
+ * Enable generation of 32-bit Toeplitz-LFSR hash for
+ * data transfer. In case of gather field in first source
+ * ring entry of the gather copy cycle in taken into account.
+ *
+ * src_swap
+ * Treats source memory organization as big-endian. For
+ * each dword read (4 bytes), the byte 0 is swapped with byte 3
+ * and byte 1 is swapped with byte 2.
+ * In case of gather field in first source ring entry of
+ * the gather copy cycle in taken into account.
+ *
+ * dest_swap
+ * Treats destination memory organization as big-endian.
+ * For each dword write (4 bytes), the byte 0 is swapped with
+ * byte 3 and byte 1 is swapped with byte 2.
+ * In case of gather field in first source ring entry of
+ * the gather copy cycle in taken into account.
+ *
+ * gather
+ * Enables gather of multiple copy engine source
+ * descriptors to one destination.
+ *
+ * ce_res_0
+ * Reserved
+ *
+ *
+ * length
+ * Length of the buffer in units of octets of the current
+ * descriptor
+ *
+ * fw_metadata
+ * Meta data used by FW.
+ * In case of gather field in first source ring entry of
+ * the gather copy cycle in taken into account.
+ *
+ * ce_res_1
+ * Reserved
+ *
+ * ce_res_2
+ * Reserved
+ *
+ * ring_id
+ * The buffer pointer ring ID.
+ * 0 refers to the IDLE ring
+ * 1 - N refers to other rings
+ * Helps with debugging when dumping ring contents.
+ *
+ * looping_count
+ * A count value that indicates the number of times the
+ * producer of entries into the Ring has looped around the
+ * ring.
+ *
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ *
+ * Also note that SW if it wants only needs to look at the
+ * LSB bit of this count value.
+ */
+
+#define HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI GENMASK(7, 0)
+#define HAL_CE_DEST_DESC_ADDR_INFO_RING_ID GENMASK(27, 20)
+#define HAL_CE_DEST_DESC_ADDR_INFO_LOOP_CNT HAL_SRNG_DESC_LOOP_CNT
+
+struct hal_ce_srng_dest_desc {
+ u32 buffer_addr_low;
+ u32 buffer_addr_info; /* %HAL_CE_DEST_DESC_ADDR_INFO_ */
+} __packed;
+
+/* hal_ce_srng_dest_desc
+ *
+ * dst_buffer_low
+ * LSB 32 bits of the 40 Bit Pointer to the Destination
+ * buffer
+ *
+ * dst_buffer_high
+ * MSB 8 bits of the 40 Bit Pointer to the Destination
+ * buffer
+ *
+ * ce_res_4
+ * Reserved
+ *
+ * ring_id
+ * The buffer pointer ring ID.
+ * 0 refers to the IDLE ring
+ * 1 - N refers to other rings
+ * Helps with debugging when dumping ring contents.
+ *
+ * looping_count
+ * A count value that indicates the number of times the
+ * producer of entries into the Ring has looped around the
+ * ring.
+ *
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ *
+ * Also note that SW if it wants only needs to look at the
+ * LSB bit of this count value.
+ */
+
+#define HAL_CE_DST_STATUS_DESC_FLAGS_HASH_EN BIT(8)
+#define HAL_CE_DST_STATUS_DESC_FLAGS_BYTE_SWAP BIT(9)
+#define HAL_CE_DST_STATUS_DESC_FLAGS_DEST_SWAP BIT(10)
+#define HAL_CE_DST_STATUS_DESC_FLAGS_GATHER BIT(11)
+#define HAL_CE_DST_STATUS_DESC_FLAGS_LEN GENMASK(31, 16)
+
+#define HAL_CE_DST_STATUS_DESC_META_INFO_DATA GENMASK(7, 0)
+#define HAL_CE_DST_STATUS_DESC_META_INFO_RING_ID GENMASK(27, 20)
+#define HAL_CE_DST_STATUS_DESC_META_INFO_LOOP_CNT HAL_SRNG_DESC_LOOP_CNT
+
+struct hal_ce_srng_dst_status_desc {
+ u32 flags; /* %HAL_CE_DST_STATUS_DESC_FLAGS_ */
+ u32 toeplitz_hash0;
+ u32 toeplitz_hash1;
+ u32 meta_info; /* HAL_CE_DST_STATUS_DESC_META_INFO_ */
+} __packed;
+
+/* hal_ce_srng_dst_status_desc
+ *
+ * ce_res_5
+ * Reserved
+ *
+ * toeplitz_en
+ *
+ * src_swap
+ * Source memory buffer swapped
+ *
+ * dest_swap
+ * Destination memory buffer swapped
+ *
+ * gather
+ * Gather of multiple copy engine source descriptors to one
+ * destination enabled
+ *
+ * ce_res_6
+ * Reserved
+ *
+ * length
+ * Sum of all the Lengths of the source descriptor in the
+ * gather chain
+ *
+ * toeplitz_hash_0
+ * 32 LS bits of 64 bit Toeplitz LFSR hash result
+ *
+ * toeplitz_hash_1
+ * 32 MS bits of 64 bit Toeplitz LFSR hash result
+ *
+ * fw_metadata
+ * Meta data used by FW
+ * In case of gather field in first source ring entry of
+ * the gather copy cycle in taken into account.
+ *
+ * ce_res_7
+ * Reserved
+ *
+ * ring_id
+ * The buffer pointer ring ID.
+ * 0 refers to the IDLE ring
+ * 1 - N refers to other rings
+ * Helps with debugging when dumping ring contents.
+ *
+ * looping_count
+ * A count value that indicates the number of times the
+ * producer of entries into the Ring has looped around the
+ * ring.
+ *
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ *
+ * Also note that SW if it wants only needs to look at the
+ * LSB bit of this count value.
+ */
+
+#define HAL_TX_RATE_STATS_INFO0_VALID BIT(0)
+#define HAL_TX_RATE_STATS_INFO0_BW GENMASK(2, 1)
+#define HAL_TX_RATE_STATS_INFO0_PKT_TYPE GENMASK(6, 3)
+#define HAL_TX_RATE_STATS_INFO0_STBC BIT(7)
+#define HAL_TX_RATE_STATS_INFO0_LDPC BIT(8)
+#define HAL_TX_RATE_STATS_INFO0_SGI GENMASK(10, 9)
+#define HAL_TX_RATE_STATS_INFO0_MCS GENMASK(14, 11)
+#define HAL_TX_RATE_STATS_INFO0_OFDMA_TX BIT(15)
+#define HAL_TX_RATE_STATS_INFO0_TONES_IN_RU GENMASK(27, 16)
+
+enum hal_tx_rate_stats_bw {
+ HAL_TX_RATE_STATS_BW_20,
+ HAL_TX_RATE_STATS_BW_40,
+ HAL_TX_RATE_STATS_BW_80,
+ HAL_TX_RATE_STATS_BW_160,
+};
+
+enum hal_tx_rate_stats_pkt_type {
+ HAL_TX_RATE_STATS_PKT_TYPE_11A,
+ HAL_TX_RATE_STATS_PKT_TYPE_11B,
+ HAL_TX_RATE_STATS_PKT_TYPE_11N,
+ HAL_TX_RATE_STATS_PKT_TYPE_11AC,
+ HAL_TX_RATE_STATS_PKT_TYPE_11AX,
+};
+
+enum hal_tx_rate_stats_sgi {
+ HAL_TX_RATE_STATS_SGI_08US,
+ HAL_TX_RATE_STATS_SGI_04US,
+ HAL_TX_RATE_STATS_SGI_16US,
+ HAL_TX_RATE_STATS_SGI_32US,
+};
+
+struct hal_tx_rate_stats {
+ u32 info0;
+ u32 tsf;
+} __packed;
+
+struct hal_wbm_link_desc {
+ struct ath11k_buffer_addr buf_addr_info;
+} __packed;
+
+/* hal_wbm_link_desc
+ *
+ * Producer: WBM
+ * Consumer: WBM
+ *
+ * buf_addr_info
+ * Details of the physical address of a buffer or MSDU
+ * link descriptor.
+ */
+
+enum hal_wbm_rel_src_module {
+ HAL_WBM_REL_SRC_MODULE_TQM,
+ HAL_WBM_REL_SRC_MODULE_RXDMA,
+ HAL_WBM_REL_SRC_MODULE_REO,
+ HAL_WBM_REL_SRC_MODULE_FW,
+ HAL_WBM_REL_SRC_MODULE_SW,
+};
+
+enum hal_wbm_rel_desc_type {
+ HAL_WBM_REL_DESC_TYPE_REL_MSDU,
+ HAL_WBM_REL_DESC_TYPE_MSDU_LINK,
+ HAL_WBM_REL_DESC_TYPE_MPDU_LINK,
+ HAL_WBM_REL_DESC_TYPE_MSDU_EXT,
+ HAL_WBM_REL_DESC_TYPE_QUEUE_EXT,
+};
+
+/* hal_wbm_rel_desc_type
+ *
+ * msdu_buffer
+ * The address points to an MSDU buffer
+ *
+ * msdu_link_descriptor
+ * The address points to an Tx MSDU link descriptor
+ *
+ * mpdu_link_descriptor
+ * The address points to an MPDU link descriptor
+ *
+ * msdu_ext_descriptor
+ * The address points to an MSDU extension descriptor
+ *
+ * queue_ext_descriptor
+ * The address points to an TQM queue extension descriptor. WBM should
+ * treat this is the same way as a link descriptor.
+ */
+
+enum hal_wbm_rel_bm_act {
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE,
+ HAL_WBM_REL_BM_ACT_REL_MSDU,
+};
+
+/* hal_wbm_rel_bm_act
+ *
+ * put_in_idle_list
+ * Put the buffer or descriptor back in the idle list. In case of MSDU or
+ * MDPU link descriptor, BM does not need to check to release any
+ * individual MSDU buffers.
+ *
+ * release_msdu_list
+ * This BM action can only be used in combination with desc_type being
+ * msdu_link_descriptor. Field first_msdu_index points out which MSDU
+ * pointer in the MSDU link descriptor is the first of an MPDU that is
+ * released. BM shall release all the MSDU buffers linked to this first
+ * MSDU buffer pointer. All related MSDU buffer pointer entries shall be
+ * set to value 0, which represents the 'NULL' pointer. When all MSDU
+ * buffer pointers in the MSDU link descriptor are 'NULL', the MSDU link
+ * descriptor itself shall also be released.
+ */
+
+#define HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE GENMASK(2, 0)
+#define HAL_WBM_RELEASE_INFO0_BM_ACTION GENMASK(5, 3)
+#define HAL_WBM_RELEASE_INFO0_DESC_TYPE GENMASK(8, 6)
+#define HAL_WBM_RELEASE_INFO0_FIRST_MSDU_IDX GENMASK(12, 9)
+#define HAL_WBM_RELEASE_INFO0_TQM_RELEASE_REASON GENMASK(16, 13)
+#define HAL_WBM_RELEASE_INFO0_RXDMA_PUSH_REASON GENMASK(18, 17)
+#define HAL_WBM_RELEASE_INFO0_RXDMA_ERROR_CODE GENMASK(23, 19)
+#define HAL_WBM_RELEASE_INFO0_REO_PUSH_REASON GENMASK(25, 24)
+#define HAL_WBM_RELEASE_INFO0_REO_ERROR_CODE GENMASK(30, 26)
+#define HAL_WBM_RELEASE_INFO0_WBM_INTERNAL_ERROR BIT(31)
+
+#define HAL_WBM_RELEASE_INFO1_TQM_STATUS_NUMBER GENMASK(23, 0)
+#define HAL_WBM_RELEASE_INFO1_TRANSMIT_COUNT GENMASK(30, 24)
+
+#define HAL_WBM_RELEASE_INFO2_ACK_FRAME_RSSI GENMASK(7, 0)
+#define HAL_WBM_RELEASE_INFO2_SW_REL_DETAILS_VALID BIT(8)
+#define HAL_WBM_RELEASE_INFO2_FIRST_MSDU BIT(9)
+#define HAL_WBM_RELEASE_INFO2_LAST_MSDU BIT(10)
+#define HAL_WBM_RELEASE_INFO2_MSDU_IN_AMSDU BIT(11)
+#define HAL_WBM_RELEASE_INFO2_FW_TX_NOTIF_FRAME BIT(12)
+#define HAL_WBM_RELEASE_INFO2_BUFFER_TIMESTAMP GENMASK(31, 13)
+
+#define HAL_WBM_RELEASE_INFO3_PEER_ID GENMASK(15, 0)
+#define HAL_WBM_RELEASE_INFO3_TID GENMASK(19, 16)
+#define HAL_WBM_RELEASE_INFO3_RING_ID GENMASK(27, 20)
+#define HAL_WBM_RELEASE_INFO3_LOOPING_COUNT GENMASK(31, 28)
+
+#define HAL_WBM_REL_HTT_TX_COMP_INFO0_STATUS GENMASK(12, 9)
+#define HAL_WBM_REL_HTT_TX_COMP_INFO0_REINJ_REASON GENMASK(16, 13)
+#define HAL_WBM_REL_HTT_TX_COMP_INFO0_EXP_FRAME BIT(17)
+
+struct hal_wbm_release_ring {
+ struct ath11k_buffer_addr buf_addr_info;
+ u32 info0;
+ u32 info1;
+ u32 info2;
+ struct hal_tx_rate_stats rate_stats;
+ u32 info3;
+} __packed;
+
+/* hal_wbm_release_ring
+ *
+ * Producer: SW/TQM/RXDMA/REO/SWITCH
+ * Consumer: WBM/SW/FW
+ *
+ * HTT tx status is overlayed on wbm_release ring on 4-byte words 2, 3, 4 and 5
+ * for software based completions.
+ *
+ * buf_addr_info
+ * Details of the physical address of the buffer or link descriptor.
+ *
+ * release_source_module
+ * Indicates which module initiated the release of this buffer/descriptor.
+ * Values are defined in enum %HAL_WBM_REL_SRC_MODULE_.
+ *
+ * bm_action
+ * Field only valid when the field return_buffer_manager in
+ * Released_buff_or_desc_addr_info indicates:
+ * WBM_IDLE_BUF_LIST / WBM_IDLE_DESC_LIST
+ * Values are defined in enum %HAL_WBM_REL_BM_ACT_.
+ *
+ * buffer_or_desc_type
+ * Field only valid when WBM is marked as the return_buffer_manager in
+ * the Released_Buffer_address_info. Indicates that type of buffer or
+ * descriptor is being released. Values are in enum %HAL_WBM_REL_DESC_TYPE.
+ *
+ * first_msdu_index
+ * Field only valid for the bm_action release_msdu_list. The index of the
+ * first MSDU in an MSDU link descriptor all belonging to the same MPDU.
+ *
+ * tqm_release_reason
+ * Field only valid when Release_source_module is set to release_source_TQM
+ * Release reasons are defined in enum %HAL_WBM_TQM_REL_REASON_.
+ *
+ * rxdma_push_reason
+ * reo_push_reason
+ * Indicates why rxdma/reo pushed the frame to this ring and values are
+ * defined in enum %HAL_REO_DEST_RING_PUSH_REASON_.
+ *
+ * rxdma_error_code
+ * Field only valid when 'rxdma_push_reason' set to 'error_detected'.
+ * Values are defined in enum %HAL_REO_ENTR_RING_RXDMA_ECODE_.
+ *
+ * reo_error_code
+ * Field only valid when 'reo_push_reason' set to 'error_detected'. Values
+ * are defined in enum %HAL_REO_DEST_RING_ERROR_CODE_.
+ *
+ * wbm_internal_error
+ * Is set when WBM got a buffer pointer but the action was to push it to
+ * the idle link descriptor ring or do link related activity OR
+ * Is set when WBM got a link buffer pointer but the action was to push it
+ * to the buffer descriptor ring.
+ *
+ * tqm_status_number
+ * The value in this field is equal to tqm_cmd_number in TQM command. It is
+ * used to correlate the statu with TQM commands. Only valid when
+ * release_source_module is TQM.
+ *
+ * transmit_count
+ * The number of times the frame has been transmitted, valid only when
+ * release source in TQM.
+ *
+ * ack_frame_rssi
+ * This field is only valid when the source is TQM. If this frame is
+ * removed as the result of the reception of an ACK or BA, this field
+ * indicates the RSSI of the received ACK or BA frame.
+ *
+ * sw_release_details_valid
+ * This is set when WMB got a 'release_msdu_list' command from TQM and
+ * return buffer manager is not WMB. WBM will then de-aggregate all MSDUs
+ * and pass them one at a time on to the 'buffer owner'.
+ *
+ * first_msdu
+ * Field only valid when SW_release_details_valid is set.
+ * When set, this MSDU is the first MSDU pointed to in the
+ * 'release_msdu_list' command.
+ *
+ * last_msdu
+ * Field only valid when SW_release_details_valid is set.
+ * When set, this MSDU is the last MSDU pointed to in the
+ * 'release_msdu_list' command.
+ *
+ * msdu_part_of_amsdu
+ * Field only valid when SW_release_details_valid is set.
+ * When set, this MSDU was part of an A-MSDU in MPDU
+ *
+ * fw_tx_notify_frame
+ * Field only valid when SW_release_details_valid is set.
+ *
+ * buffer_timestamp
+ * Field only valid when SW_release_details_valid is set.
+ * This is the Buffer_timestamp field from the
+ * Timestamp in units of 1024 us
+ *
+ * struct hal_tx_rate_stats rate_stats
+ * Details for command execution tracking purposes.
+ *
+ * sw_peer_id
+ * tid
+ * Field only valid when Release_source_module is set to
+ * release_source_TQM
+ *
+ * 1) Release of msdu buffer due to drop_frame = 1. Flow is
+ * not fetched and hence sw_peer_id and tid = 0
+ *
+ * buffer_or_desc_type = e_num 0
+ * MSDU_rel_buffertqm_release_reason = e_num 1
+ * tqm_rr_rem_cmd_rem
+ *
+ * 2) Release of msdu buffer due to Flow is not fetched and
+ * hence sw_peer_id and tid = 0
+ *
+ * buffer_or_desc_type = e_num 0
+ * MSDU_rel_buffertqm_release_reason = e_num 1
+ * tqm_rr_rem_cmd_rem
+ *
+ * 3) Release of msdu link due to remove_mpdu or acked_mpdu
+ * command.
+ *
+ * buffer_or_desc_type = e_num1
+ * msdu_link_descriptortqm_release_reason can be:e_num 1
+ * tqm_rr_rem_cmd_reme_num 2 tqm_rr_rem_cmd_tx
+ * e_num 3 tqm_rr_rem_cmd_notxe_num 4 tqm_rr_rem_cmd_aged
+ *
+ * This field represents the TID from the TX_MSDU_FLOW
+ * descriptor or TX_MPDU_QUEUE descriptor
+ *
+ * rind_id
+ * For debugging.
+ * This field is filled in by the SRNG module.
+ * It help to identify the ring that is being looked
+ *
+ * looping_count
+ * A count value that indicates the number of times the
+ * producer of entries into the Buffer Manager Ring has looped
+ * around the ring.
+ *
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ *
+ * Also note that SW if it wants only needs to look at the
+ * LSB bit of this count value.
+ */
+
+/**
+ * enum hal_wbm_tqm_rel_reason - TQM release reason code
+ * @HAL_WBM_TQM_REL_REASON_FRAME_ACKED: ACK or BACK received for the frame
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_MPDU: Command remove_mpdus initiated by SW
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX: Command remove transmitted_mpdus
+ * initiated by sw.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_NOTX: Command remove untransmitted_mpdus
+ * initiated by sw.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_AGED_FRAMES: Command remove aged msdus or
+ * mpdus.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON1: Remove command initiated by
+ * fw with fw_reason1.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON2: Remove command initiated by
+ * fw with fw_reason2.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON3: Remove command initiated by
+ * fw with fw_reason3.
+ */
+enum hal_wbm_tqm_rel_reason {
+ HAL_WBM_TQM_REL_REASON_FRAME_ACKED,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_MPDU,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_NOTX,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_AGED_FRAMES,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON1,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON2,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON3,
+};
+
+struct hal_wbm_buffer_ring {
+ struct ath11k_buffer_addr buf_addr_info;
+};
+
+enum hal_desc_owner {
+ HAL_DESC_OWNER_WBM,
+ HAL_DESC_OWNER_SW,
+ HAL_DESC_OWNER_TQM,
+ HAL_DESC_OWNER_RXDMA,
+ HAL_DESC_OWNER_REO,
+ HAL_DESC_OWNER_SWITCH,
+};
+
+enum hal_desc_buf_type {
+ HAL_DESC_BUF_TYPE_TX_MSDU_LINK,
+ HAL_DESC_BUF_TYPE_TX_MPDU_LINK,
+ HAL_DESC_BUF_TYPE_TX_MPDU_QUEUE_HEAD,
+ HAL_DESC_BUF_TYPE_TX_MPDU_QUEUE_EXT,
+ HAL_DESC_BUF_TYPE_TX_FLOW,
+ HAL_DESC_BUF_TYPE_TX_BUFFER,
+ HAL_DESC_BUF_TYPE_RX_MSDU_LINK,
+ HAL_DESC_BUF_TYPE_RX_MPDU_LINK,
+ HAL_DESC_BUF_TYPE_RX_REO_QUEUE,
+ HAL_DESC_BUF_TYPE_RX_REO_QUEUE_EXT,
+ HAL_DESC_BUF_TYPE_RX_BUFFER,
+ HAL_DESC_BUF_TYPE_IDLE_LINK,
+};
+
+#define HAL_DESC_REO_OWNED 4
+#define HAL_DESC_REO_QUEUE_DESC 8
+#define HAL_DESC_REO_QUEUE_EXT_DESC 9
+#define HAL_DESC_REO_NON_QOS_TID 16
+
+#define HAL_DESC_HDR_INFO0_OWNER GENMASK(3, 0)
+#define HAL_DESC_HDR_INFO0_BUF_TYPE GENMASK(7, 4)
+#define HAL_DESC_HDR_INFO0_DBG_RESERVED GENMASK(31, 8)
+
+struct hal_desc_header {
+ u32 info0;
+} __packed;
+
+struct hal_rx_mpdu_link_ptr {
+ struct ath11k_buffer_addr addr_info;
+} __packed;
+
+struct hal_rx_msdu_details {
+ struct ath11k_buffer_addr buf_addr_info;
+ struct rx_msdu_desc rx_msdu_info;
+} __packed;
+
+#define HAL_RX_MSDU_LNK_INFO0_RX_QUEUE_NUMBER GENMASK(15, 0)
+#define HAL_RX_MSDU_LNK_INFO0_FIRST_MSDU_LNK BIT(16)
+
+struct hal_rx_msdu_link {
+ struct hal_desc_header desc_hdr;
+ struct ath11k_buffer_addr buf_addr_info;
+ u32 info0;
+ u32 pn[4];
+ struct hal_rx_msdu_details msdu_link[6];
+} __packed;
+
+struct hal_rx_reo_queue_ext {
+ struct hal_desc_header desc_hdr;
+ u32 rsvd;
+ struct hal_rx_mpdu_link_ptr mpdu_link[15];
+} __packed;
+
+/* hal_rx_reo_queue_ext
+ * Consumer: REO
+ * Producer: REO
+ *
+ * descriptor_header
+ * Details about which module owns this struct.
+ *
+ * mpdu_link
+ * Pointer to the next MPDU_link descriptor in the MPDU queue.
+ */
+
+enum hal_rx_reo_queue_pn_size {
+ HAL_RX_REO_QUEUE_PN_SIZE_24,
+ HAL_RX_REO_QUEUE_PN_SIZE_48,
+ HAL_RX_REO_QUEUE_PN_SIZE_128,
+};
+
+#define HAL_RX_REO_QUEUE_RX_QUEUE_NUMBER GENMASK(15, 0)
+
+#define HAL_RX_REO_QUEUE_INFO0_VLD BIT(0)
+#define HAL_RX_REO_QUEUE_INFO0_ASSOC_LNK_DESC_COUNTER GENMASK(2, 1)
+#define HAL_RX_REO_QUEUE_INFO0_DIS_DUP_DETECTION BIT(3)
+#define HAL_RX_REO_QUEUE_INFO0_SOFT_REORDER_EN BIT(4)
+#define HAL_RX_REO_QUEUE_INFO0_AC GENMASK(6, 5)
+#define HAL_RX_REO_QUEUE_INFO0_BAR BIT(7)
+#define HAL_RX_REO_QUEUE_INFO0_RETRY BIT(8)
+#define HAL_RX_REO_QUEUE_INFO0_CHECK_2K_MODE BIT(9)
+#define HAL_RX_REO_QUEUE_INFO0_OOR_MODE BIT(10)
+#define HAL_RX_REO_QUEUE_INFO0_BA_WINDOW_SIZE GENMASK(18, 11)
+#define HAL_RX_REO_QUEUE_INFO0_PN_CHECK BIT(19)
+#define HAL_RX_REO_QUEUE_INFO0_EVEN_PN BIT(20)
+#define HAL_RX_REO_QUEUE_INFO0_UNEVEN_PN BIT(21)
+#define HAL_RX_REO_QUEUE_INFO0_PN_HANDLE_ENABLE BIT(22)
+#define HAL_RX_REO_QUEUE_INFO0_PN_SIZE GENMASK(24, 23)
+#define HAL_RX_REO_QUEUE_INFO0_IGNORE_AMPDU_FLG BIT(25)
+
+#define HAL_RX_REO_QUEUE_INFO1_SVLD BIT(0)
+#define HAL_RX_REO_QUEUE_INFO1_SSN GENMASK(12, 1)
+#define HAL_RX_REO_QUEUE_INFO1_CURRENT_IDX GENMASK(20, 13)
+#define HAL_RX_REO_QUEUE_INFO1_SEQ_2K_ERR BIT(21)
+#define HAL_RX_REO_QUEUE_INFO1_PN_ERR BIT(22)
+#define HAL_RX_REO_QUEUE_INFO1_PN_VALID BIT(31)
+
+#define HAL_RX_REO_QUEUE_INFO2_MPDU_COUNT GENMASK(6, 0)
+#define HAL_RX_REO_QUEUE_INFO2_MSDU_COUNT (31, 7)
+
+#define HAL_RX_REO_QUEUE_INFO3_TIMEOUT_COUNT GENMASK(9, 4)
+#define HAL_RX_REO_QUEUE_INFO3_FWD_DUE_TO_BAR_CNT GENMASK(15, 10)
+#define HAL_RX_REO_QUEUE_INFO3_DUPLICATE_COUNT GENMASK(31, 10)
+
+#define HAL_RX_REO_QUEUE_INFO4_FRAME_IN_ORD_COUNT GENMASK(23, 0)
+#define HAL_RX_REO_QUEUE_INFO4_BAR_RECVD_COUNT GENMASK(31, 24)
+
+#define HAL_RX_REO_QUEUE_INFO5_LATE_RX_MPDU_COUNT GENMASK(11, 0)
+#define HAL_RX_REO_QUEUE_INFO5_WINDOW_JUMP_2K GENMASK(15, 12)
+#define HAL_RX_REO_QUEUE_INFO5_HOLE_COUNT GENMASK(31, 16)
+
+struct hal_rx_reo_queue {
+ struct hal_desc_header desc_hdr;
+ u32 rx_queue_num;
+ u32 info0;
+ u32 info1;
+ u32 pn[4];
+ u32 last_rx_enqueue_timestamp;
+ u32 last_rx_dequeue_timestamp;
+ u32 next_aging_queue[2];
+ u32 prev_aging_queue[2];
+ u32 rx_bitmap[8];
+ u32 info2;
+ u32 info3;
+ u32 info4;
+ u32 processed_mpdus;
+ u32 processed_msdus;
+ u32 processed_total_bytes;
+ u32 info5;
+ u32 rsvd[3];
+ struct hal_rx_reo_queue_ext ext_desc[0];
+} __packed;
+
+/* hal_rx_reo_queue
+ *
+ * descriptor_header
+ * Details about which module owns this struct. Note that sub field
+ * Buffer_type shall be set to receive_reo_queue_descriptor.
+ *
+ * receive_queue_number
+ * Indicates the MPDU queue ID to which this MPDU link descriptor belongs.
+ *
+ * vld
+ * Valid bit indicating a session is established and the queue descriptor
+ * is valid.
+ * associated_link_descriptor_counter
+ * Indicates which of the 3 link descriptor counters shall be incremented
+ * or decremented when link descriptors are added or removed from this
+ * flow queue.
+ * disable_duplicate_detection
+ * When set, do not perform any duplicate detection.
+ * soft_reorder_enable
+ * When set, REO has been instructed to not perform the actual re-ordering
+ * of frames for this queue, but just to insert the reorder opcodes.
+ * ac
+ * Indicates the access category of the queue descriptor.
+ * bar
+ * Indicates if BAR has been received.
+ * retry
+ * Retry bit is checked if this bit is set.
+ * chk_2k_mode
+ * Indicates what type of operation is expected from Reo when the received
+ * frame SN falls within the 2K window.
+ * oor_mode
+ * Indicates what type of operation is expected when the received frame
+ * falls within the OOR window.
+ * ba_window_size
+ * Indicates the negotiated (window size + 1). Max of 256 bits.
+ *
+ * A value 255 means 256 bitmap, 63 means 64 bitmap, 0 (means non-BA
+ * session, with window size of 0). The 3 values here are the main values
+ * validated, but other values should work as well.
+ *
+ * A BA window size of 0 (=> one frame entry bitmat), means that there is
+ * no additional rx_reo_queue_ext desc. following rx_reo_queue in memory.
+ * A BA window size of 1 - 105, means that there is 1 rx_reo_queue_ext.
+ * A BA window size of 106 - 210, means that there are 2 rx_reo_queue_ext.
+ * A BA window size of 211 - 256, means that there are 3 rx_reo_queue_ext.
+ * pn_check_needed, pn_shall_be_even, pn_shall_be_uneven, pn_handling_enable,
+ * pn_size
+ * REO shall perform the PN increment check, even number check, uneven
+ * number check, PN error check and size of the PN field check.
+ * ignore_ampdu_flag
+ * REO shall ignore the ampdu_flag on entrance descriptor for this queue.
+ *
+ * svld
+ * Sequence number in next field is valid one.
+ * ssn
+ * Starting Sequence number of the session.
+ * current_index
+ * Points to last forwarded packet
+ * seq_2k_error_detected_flag
+ * REO has detected a 2k error jump in the sequence number and from that
+ * moment forward, all new frames are forwarded directly to FW, without
+ * duplicate detect, reordering, etc.
+ * pn_error_detected_flag
+ * REO has detected a PN error.
+ */
+
+#define HAL_REO_UPD_RX_QUEUE_INFO0_QUEUE_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RX_QUEUE_NUM BIT(8)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_VLD BIT(9)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_ASSOC_LNK_DESC_CNT BIT(10)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_DIS_DUP_DETECTION BIT(11)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SOFT_REORDER_EN BIT(12)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_AC BIT(13)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BAR BIT(14)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RETRY BIT(15)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_CHECK_2K_MODE BIT(16)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_OOR_MODE BIT(17)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BA_WINDOW_SIZE BIT(18)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_CHECK BIT(19)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_EVEN_PN BIT(20)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_UNEVEN_PN BIT(21)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_HANDLE_ENABLE BIT(22)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_SIZE BIT(23)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_IGNORE_AMPDU_FLG BIT(24)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SVLD BIT(25)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SSN BIT(26)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SEQ_2K_ERR BIT(27)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_ERR BIT(28)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_VALID BIT(29)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN BIT(30)
+
+#define HAL_REO_UPD_RX_QUEUE_INFO1_RX_QUEUE_NUMBER GENMASK(15, 0)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_VLD BIT(16)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_ASSOC_LNK_DESC_COUNTER GENMASK(18, 17)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_DIS_DUP_DETECTION BIT(19)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_SOFT_REORDER_EN BIT(20)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_AC GENMASK(22, 21)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_BAR BIT(23)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_RETRY BIT(24)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_CHECK_2K_MODE BIT(25)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_OOR_MODE BIT(26)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_PN_CHECK BIT(27)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_EVEN_PN BIT(28)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_UNEVEN_PN BIT(29)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_PN_HANDLE_ENABLE BIT(30)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_IGNORE_AMPDU_FLG BIT(31)
+
+#define HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE GENMASK(7, 0)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE GENMASK(9, 8)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SVLD BIT(10)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SSN GENMASK(22, 11)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR BIT(23)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR BIT(24)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_VALID BIT(25)
+
+struct hal_reo_update_rx_queue {
+ struct hal_reo_cmd_hdr cmd;
+ u32 queue_addr_lo;
+ u32 info0;
+ u32 info1;
+ u32 info2;
+ u32 pn[4];
+} __packed;
+
+#define HAL_REO_UNBLOCK_CACHE_INFO0_UNBLK_CACHE BIT(0)
+#define HAL_REO_UNBLOCK_CACHE_INFO0_RESOURCE_IDX GENMASK(2, 1)
+
+struct hal_reo_unblock_cache {
+ struct hal_reo_cmd_hdr cmd;
+ u32 info0;
+ u32 rsvd[7];
+} __packed;
+
+enum hal_reo_exec_status {
+ HAL_REO_EXEC_STATUS_SUCCESS,
+ HAL_REO_EXEC_STATUS_BLOCKED,
+ HAL_REO_EXEC_STATUS_FAILED,
+ HAL_REO_EXEC_STATUS_RESOURCE_BLOCKED,
+};
+
+#define HAL_REO_STATUS_HDR_INFO0_STATUS_NUM GENMASK(15, 0)
+#define HAL_REO_STATUS_HDR_INFO0_EXEC_TIME GENMASK(25, 16)
+#define HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS GENMASK(27, 26)
+
+struct hal_reo_status_hdr {
+ u32 info0;
+ u32 timestamp;
+} __packed;
+
+/* hal_reo_status_hdr
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_num
+ * The value in this field is equal to value of the reo command
+ * number. This field helps to correlate the statuses with the REO
+ * commands.
+ *
+ * execution_time (in us)
+ * The amount of time REO took to excecute the command. Note that
+ * this time does not include the duration of the command waiting
+ * in the command ring, before the execution started.
+ *
+ * execution_status
+ * Execution status of the command. Values are defined in
+ * enum %HAL_REO_EXEC_STATUS_.
+ */
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_SSN GENMASK(11, 0)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_CUR_IDX GENMASK(19, 12)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MPDU_COUNT GENMASK(6, 0)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MSDU_COUNT GENMASK(31, 7)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_TIMEOUT_COUNT GENMASK(9, 4)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_FDTB_COUNT GENMASK(15, 10)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_DUPLICATE_COUNT GENMASK(31, 16)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_FIO_COUNT GENMASK(23, 0)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_BAR_RCVD_CNT GENMASK(31, 24)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_LATE_RX_MPDU GENMASK(11, 0)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_WINDOW_JMP2K GENMASK(15, 12)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_HOLE_COUNT GENMASK(31, 16)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO5_LOOPING_CNT GENMASK(31, 28)
+
+struct hal_reo_get_queue_stats_status {
+ struct hal_reo_status_hdr hdr;
+ u32 info0;
+ u32 pn[4];
+ u32 last_rx_enqueue_timestamp;
+ u32 last_rx_dequeue_timestamp;
+ u32 rx_bitmap[8];
+ u32 info1;
+ u32 info2;
+ u32 info3;
+ u32 num_mpdu_frames;
+ u32 num_msdu_frames;
+ u32 total_bytes;
+ u32 info4;
+ u32 info5;
+} __packed;
+
+/* hal_reo_get_queue_stats_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * ssn
+ * Starting Sequence number of the session, this changes whenever
+ * window moves (can be filled by SW then maintained by REO).
+ *
+ * current_index
+ * Points to last forwarded packet.
+ *
+ * pn
+ * Bits of the PN number.
+ *
+ * last_rx_enqueue_timestamp
+ * last_rx_dequeue_timestamp
+ * Timestamp of arrival of the last MPDU for this queue and
+ * Timestamp of forwarding an MPDU accordingly.
+ *
+ * rx_bitmap
+ * When a bit is set, the corresponding frame is currently held
+ * in the re-order queue. The bitmap is Fully managed by HW.
+ *
+ * current_mpdu_count
+ * current_msdu_count
+ * The number of MPDUs and MSDUs in the queue.
+ *
+ * timeout_count
+ * The number of times REO started forwarding frames even though
+ * there is a hole in the bitmap. Forwarding reason is timeout.
+ *
+ * forward_due_to_bar_count
+ * The number of times REO started forwarding frames even though
+ * there is a hole in the bitmap. Fwd reason is reception of BAR.
+ *
+ * duplicate_count
+ * The number of duplicate frames that have been detected.
+ *
+ * frames_in_order_count
+ * The number of frames that have been received in order (without
+ * a hole that prevented them from being forwarded immediately).
+ *
+ * bar_received_count
+ * The number of times a BAR frame is received.
+ *
+ * mpdu_frames_processed_count
+ * msdu_frames_processed_count
+ * The total number of MPDU/MSDU frames that have been processed.
+ *
+ * total_bytes
+ * An approximation of the number of bytes received for this queue.
+ *
+ * late_receive_mpdu_count
+ * The number of MPDUs received after the window had already moved
+ * on. The 'late' sequence window is defined as
+ * (Window SSN - 256) - (Window SSN - 1).
+ *
+ * window_jump_2k
+ * The number of times the window moved more than 2K
+ *
+ * hole_count
+ * The number of times a hole was created in the receive bitmap.
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_STATUS_LOOP_CNT GENMASK(31, 28)
+
+#define HAL_REO_FLUSH_QUEUE_INFO0_ERR_DETECTED BIT(0)
+#define HAL_REO_FLUSH_QUEUE_INFO0_RSVD GENMASK(31, 1)
+#define HAL_REO_FLUSH_QUEUE_INFO1_RSVD GENMASK(27, 0)
+
+struct hal_reo_flush_queue_status {
+ struct hal_reo_status_hdr hdr;
+ u32 info0;
+ u32 rsvd0[21];
+ u32 info1;
+} __packed;
+
+/* hal_reo_flush_queue_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * error_detected
+ * Status of blocking resource
+ *
+ * 0 - No error has been detected while executing this command
+ * 1 - Error detected. The resource to be used for blocking was
+ * already in use.
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_IS_ERR BIT(0)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_BLOCK_ERR_CODE GENMASK(2, 1)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_STATUS_HIT BIT(8)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_DESC_TYPE GENMASK(11, 9)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_CLIENT_ID GENMASK(15, 12)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_ERR GENMASK(17, 16)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_COUNT GENMASK(25, 18)
+
+struct hal_reo_flush_cache_status {
+ struct hal_reo_status_hdr hdr;
+ u32 info0;
+ u32 rsvd0[21];
+ u32 info1;
+} __packed;
+
+/* hal_reo_flush_cache_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * error_detected
+ * Status for blocking resource handling
+ *
+ * 0 - No error has been detected while executing this command
+ * 1 - An error in the blocking resource management was detected
+ *
+ * block_error_details
+ * only valid when error_detected is set
+ *
+ * 0 - No blocking related errors found
+ * 1 - Blocking resource is already in use
+ * 2 - Resource requested to be unblocked, was not blocked
+ *
+ * cache_controller_flush_status_hit
+ * The status that the cache controller returned on executing the
+ * flush command.
+ *
+ * 0 - miss; 1 - hit
+ *
+ * cache_controller_flush_status_desc_type
+ * Flush descriptor type
+ *
+ * cache_controller_flush_status_client_id
+ * Module who made the flush request
+ *
+ * In REO, this is always 0
+ *
+ * cache_controller_flush_status_error
+ * Error condition
+ *
+ * 0 - No error found
+ * 1 - HW interface is still busy
+ * 2 - Line currently locked. Used for one line flush command
+ * 3 - At least one line is still locked.
+ * Used for cache flush command.
+ *
+ * cache_controller_flush_count
+ * The number of lines that were actually flushed out
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_IS_ERR BIT(0)
+#define HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_TYPE BIT(1)
+
+struct hal_reo_unblock_cache_status {
+ struct hal_reo_status_hdr hdr;
+ u32 info0;
+ u32 rsvd0[21];
+ u32 info1;
+} __packed;
+
+/* hal_reo_unblock_cache_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * error_detected
+ * 0 - No error has been detected while executing this command
+ * 1 - The blocking resource was not in use, and therefore it could
+ * not be unblocked.
+ *
+ * unblock_type
+ * Reference to the type of unblock command
+ * 0 - Unblock a blocking resource
+ * 1 - The entire cache usage is unblock
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_IS_ERR BIT(0)
+#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_LIST_EMPTY BIT(1)
+
+#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_REL_DESC_COUNT GENMASK(15, 0)
+#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_FWD_BUF_COUNT GENMASK(31, 16)
+
+struct hal_reo_flush_timeout_list_status {
+ struct hal_reo_status_hdr hdr;
+ u32 info0;
+ u32 info1;
+ u32 rsvd0[20];
+ u32 info2;
+} __packed;
+
+/* hal_reo_flush_timeout_list_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * error_detected
+ * 0 - No error has been detected while executing this command
+ * 1 - Command not properly executed and returned with error
+ *
+ * timeout_list_empty
+ * When set, REO has depleted the timeout list and all entries are
+ * gone.
+ *
+ * release_desc_count
+ * Producer: SW; Consumer: REO
+ * The number of link descriptor released
+ *
+ * forward_buf_count
+ * Producer: SW; Consumer: REO
+ * The number of buffers forwarded to the REO destination rings
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_DESC_THRESH_STATUS_INFO0_THRESH_INDEX GENMASK(1, 0)
+#define HAL_REO_DESC_THRESH_STATUS_INFO1_LINK_DESC_COUNTER0 GENMASK(23, 0)
+#define HAL_REO_DESC_THRESH_STATUS_INFO2_LINK_DESC_COUNTER1 GENMASK(23, 0)
+#define HAL_REO_DESC_THRESH_STATUS_INFO3_LINK_DESC_COUNTER2 GENMASK(23, 0)
+#define HAL_REO_DESC_THRESH_STATUS_INFO4_LINK_DESC_COUNTER_SUM GENMASK(23, 0)
+
+struct hal_reo_desc_thresh_reached_status {
+ struct hal_reo_status_hdr hdr;
+ u32 info0;
+ u32 info1;
+ u32 info2;
+ u32 info3;
+ u32 info4;
+ u32 rsvd0[17];
+ u32 info5;
+} __packed;
+
+/* hal_reo_desc_thresh_reached_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * threshold_index
+ * The index of the threshold register whose value got reached
+ *
+ * link_descriptor_counter0
+ * link_descriptor_counter1
+ * link_descriptor_counter2
+ * link_descriptor_counter_sum
+ * Value of the respective counters at generation of this message
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#endif /* ATH11K_HAL_DESC_H */
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c
new file mode 100644
index 000000000000..9e0f8064e427
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.c
@@ -0,0 +1,1190 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include "ahb.h"
+#include "debug.h"
+#include "hal.h"
+#include "hal_tx.h"
+#include "hal_rx.h"
+#include "hal_desc.h"
+
+static void ath11k_hal_reo_set_desc_hdr(struct hal_desc_header *hdr,
+ u8 owner, u8 buffer_type, u32 magic)
+{
+ hdr->info0 = FIELD_PREP(HAL_DESC_HDR_INFO0_OWNER, owner) |
+ FIELD_PREP(HAL_DESC_HDR_INFO0_BUF_TYPE, buffer_type);
+
+ /* Magic pattern in reserved bits for debugging */
+ hdr->info0 |= FIELD_PREP(HAL_DESC_HDR_INFO0_DBG_RESERVED, magic);
+}
+
+static int ath11k_hal_reo_cmd_queue_stats(struct hal_tlv_hdr *tlv,
+ struct ath11k_hal_reo_cmd *cmd)
+{
+ struct hal_reo_get_queue_stats *desc;
+
+ tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_GET_QUEUE_STATS) |
+ FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
+
+ desc = (struct hal_reo_get_queue_stats *)tlv->value;
+ memset(&desc->queue_addr_lo, 0,
+ (sizeof(*desc) - sizeof(struct hal_reo_cmd_hdr)));
+
+ desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+ if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
+ desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+
+ desc->queue_addr_lo = cmd->addr_lo;
+ desc->info0 = FIELD_PREP(HAL_REO_GET_QUEUE_STATS_INFO0_QUEUE_ADDR_HI,
+ cmd->addr_hi);
+ if (cmd->flag & HAL_REO_CMD_FLG_STATS_CLEAR)
+ desc->info0 |= HAL_REO_GET_QUEUE_STATS_INFO0_CLEAR_STATS;
+
+ return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
+}
+
+static int ath11k_hal_reo_cmd_flush_cache(struct ath11k_hal *hal, struct hal_tlv_hdr *tlv,
+ struct ath11k_hal_reo_cmd *cmd)
+{
+ struct hal_reo_flush_cache *desc;
+ u8 avail_slot = ffz(hal->avail_blk_resource);
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) {
+ if (avail_slot >= HAL_MAX_AVAIL_BLK_RES)
+ return -ENOSPC;
+
+ hal->current_blk_index = avail_slot;
+ }
+
+ tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_FLUSH_CACHE) |
+ FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
+
+ desc = (struct hal_reo_flush_cache *)tlv->value;
+ memset(&desc->cache_addr_lo, 0,
+ (sizeof(*desc) - sizeof(struct hal_reo_cmd_hdr)));
+
+ desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+ if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
+ desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+
+ desc->cache_addr_lo = cmd->addr_lo;
+ desc->info0 = FIELD_PREP(HAL_REO_FLUSH_CACHE_INFO0_CACHE_ADDR_HI,
+ cmd->addr_hi);
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS)
+ desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FWD_ALL_MPDUS;
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) {
+ desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_BLOCK_CACHE_USAGE;
+ desc->info0 |=
+ FIELD_PREP(HAL_REO_FLUSH_CACHE_INFO0_BLOCK_RESRC_IDX,
+ avail_slot);
+ }
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_NO_INVAL)
+ desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FLUSH_WO_INVALIDATE;
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_ALL)
+ desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FLUSH_ALL;
+
+ return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
+}
+
+static int ath11k_hal_reo_cmd_update_rx_queue(struct hal_tlv_hdr *tlv,
+ struct ath11k_hal_reo_cmd *cmd)
+{
+ struct hal_reo_update_rx_queue *desc;
+
+ tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_UPDATE_RX_REO_QUEUE) |
+ FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
+
+ desc = (struct hal_reo_update_rx_queue *)tlv->value;
+ memset(&desc->queue_addr_lo, 0,
+ (sizeof(*desc) - sizeof(struct hal_reo_cmd_hdr)));
+
+ desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+ if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
+ desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+
+ desc->queue_addr_lo = cmd->addr_lo;
+ desc->info0 =
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_QUEUE_ADDR_HI,
+ cmd->addr_hi) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RX_QUEUE_NUM,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_RX_QUEUE_NUM)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_VLD,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_VLD)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_ASSOC_LNK_DESC_CNT,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_ALDC)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_DIS_DUP_DETECTION,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_DIS_DUP_DETECTION)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SOFT_REORDER_EN,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_SOFT_REORDER_EN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_AC,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_AC)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BAR,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_BAR)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RETRY,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_RETRY)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_CHECK_2K_MODE,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_CHECK_2K_MODE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_OOR_MODE,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_OOR_MODE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BA_WINDOW_SIZE,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_BA_WINDOW_SIZE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_CHECK,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_CHECK)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_EVEN_PN,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_EVEN_PN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_UNEVEN_PN,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_UNEVEN_PN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_HANDLE_ENABLE,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_HANDLE_ENABLE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_SIZE,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_SIZE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_IGNORE_AMPDU_FLG,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_IGNORE_AMPDU_FLG)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SVLD,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_SVLD)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SSN,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_SSN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SEQ_2K_ERR,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_SEQ_2K_ERR)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_VALID,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_VALID)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN));
+
+ desc->info1 =
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_RX_QUEUE_NUMBER,
+ cmd->rx_queue_num) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_VLD,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_VLD)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_ASSOC_LNK_DESC_COUNTER,
+ FIELD_GET(HAL_REO_CMD_UPD1_ALDC, cmd->upd1)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_DIS_DUP_DETECTION,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_DIS_DUP_DETECTION)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_SOFT_REORDER_EN,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_SOFT_REORDER_EN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_AC,
+ FIELD_GET(HAL_REO_CMD_UPD1_AC, cmd->upd1)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_BAR,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_BAR)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_CHECK_2K_MODE,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_CHECK_2K_MODE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_RETRY,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_RETRY)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_OOR_MODE,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_OOR_MODE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_PN_CHECK,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_CHECK)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_EVEN_PN,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_EVEN_PN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_UNEVEN_PN,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_UNEVEN_PN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_PN_HANDLE_ENABLE,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_IGNORE_AMPDU_FLG,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG));
+
+ if (cmd->pn_size == 24)
+ cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_24;
+ else if (cmd->pn_size == 48)
+ cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_48;
+ else if (cmd->pn_size == 128)
+ cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_128;
+
+ if (cmd->ba_window_size < 1)
+ cmd->ba_window_size = 1;
+
+ if (cmd->ba_window_size == 1)
+ cmd->ba_window_size++;
+
+ desc->info2 =
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE,
+ cmd->ba_window_size - 1) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE, cmd->pn_size) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SVLD,
+ !!(cmd->upd2 & HAL_REO_CMD_UPD2_SVLD)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SSN,
+ FIELD_GET(HAL_REO_CMD_UPD2_SSN, cmd->upd2)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR,
+ !!(cmd->upd2 & HAL_REO_CMD_UPD2_SEQ_2K_ERR)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR,
+ !!(cmd->upd2 & HAL_REO_CMD_UPD2_PN_ERR));
+
+ return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
+}
+
+int ath11k_hal_reo_cmd_send(struct ath11k_base *ab, struct hal_srng *srng,
+ enum hal_reo_cmd_type type,
+ struct ath11k_hal_reo_cmd *cmd)
+{
+ struct hal_tlv_hdr *reo_desc;
+ int ret;
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+ reo_desc = (struct hal_tlv_hdr *)ath11k_hal_srng_src_get_next_entry(ab, srng);
+ if (!reo_desc) {
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ switch (type) {
+ case HAL_REO_CMD_GET_QUEUE_STATS:
+ ret = ath11k_hal_reo_cmd_queue_stats(reo_desc, cmd);
+ break;
+ case HAL_REO_CMD_FLUSH_CACHE:
+ ret = ath11k_hal_reo_cmd_flush_cache(&ab->hal, reo_desc, cmd);
+ break;
+ case HAL_REO_CMD_UPDATE_RX_QUEUE:
+ ret = ath11k_hal_reo_cmd_update_rx_queue(reo_desc, cmd);
+ break;
+ case HAL_REO_CMD_FLUSH_QUEUE:
+ case HAL_REO_CMD_UNBLOCK_CACHE:
+ case HAL_REO_CMD_FLUSH_TIMEOUT_LIST:
+ ath11k_warn(ab, "Unsupported reo command %d\n", type);
+ ret = -ENOTSUPP;
+ break;
+ default:
+ ath11k_warn(ab, "Unknown reo command %d\n", type);
+ ret = -EINVAL;
+ break;
+ }
+
+out:
+ ath11k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+
+ return ret;
+}
+
+void ath11k_hal_rx_buf_addr_info_set(void *desc, dma_addr_t paddr,
+ u32 cookie, u8 manager)
+{
+ struct ath11k_buffer_addr *binfo = (struct ath11k_buffer_addr *)desc;
+ u32 paddr_lo, paddr_hi;
+
+ paddr_lo = lower_32_bits(paddr);
+ paddr_hi = upper_32_bits(paddr);
+ binfo->info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, paddr_lo);
+ binfo->info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR, paddr_hi) |
+ FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, cookie) |
+ FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, manager);
+}
+
+void ath11k_hal_rx_buf_addr_info_get(void *desc, dma_addr_t *paddr,
+ u32 *cookie, u8 *rbm)
+{
+ struct ath11k_buffer_addr *binfo = (struct ath11k_buffer_addr *)desc;
+
+ *paddr =
+ (((u64)FIELD_GET(BUFFER_ADDR_INFO1_ADDR, binfo->info1)) << 32) |
+ FIELD_GET(BUFFER_ADDR_INFO0_ADDR, binfo->info0);
+ *cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, binfo->info1);
+ *rbm = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR, binfo->info1);
+}
+
+void ath11k_hal_rx_msdu_link_info_get(void *link_desc, u32 *num_msdus,
+ u32 *msdu_cookies,
+ enum hal_rx_buf_return_buf_manager *rbm)
+{
+ struct hal_rx_msdu_link *link = (struct hal_rx_msdu_link *)link_desc;
+ struct hal_rx_msdu_details *msdu;
+ int i;
+
+ *num_msdus = HAL_NUM_RX_MSDUS_PER_LINK_DESC;
+
+ msdu = &link->msdu_link[0];
+ *rbm = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
+ msdu->buf_addr_info.info1);
+
+ for (i = 0; i < *num_msdus; i++) {
+ msdu = &link->msdu_link[i];
+
+ if (!FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
+ msdu->buf_addr_info.info0)) {
+ *num_msdus = i;
+ break;
+ }
+ *msdu_cookies = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ msdu->buf_addr_info.info1);
+ msdu_cookies++;
+ }
+}
+
+int ath11k_hal_desc_reo_parse_err(struct ath11k_base *ab, u32 *rx_desc,
+ dma_addr_t *paddr, u32 *desc_bank)
+{
+ struct hal_reo_dest_ring *desc = (struct hal_reo_dest_ring *)rx_desc;
+ enum hal_reo_dest_ring_push_reason push_reason;
+ enum hal_reo_dest_ring_error_code err_code;
+
+ push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
+ desc->info0);
+ err_code = FIELD_GET(HAL_REO_DEST_RING_INFO0_ERROR_CODE,
+ desc->info0);
+ ab->soc_stats.reo_error[err_code]++;
+
+ if (push_reason != HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED &&
+ push_reason != HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
+ ath11k_warn(ab, "expected error push reason code, received %d\n",
+ push_reason);
+ return -EINVAL;
+ }
+
+ if (FIELD_GET(HAL_REO_DEST_RING_INFO0_BUFFER_TYPE, desc->info0) !=
+ HAL_REO_DEST_RING_BUFFER_TYPE_LINK_DESC) {
+ ath11k_warn(ab, "expected buffer type link_desc");
+ return -EINVAL;
+ }
+
+ ath11k_hal_rx_reo_ent_paddr_get(ab, rx_desc, paddr, desc_bank);
+
+ return 0;
+}
+
+int ath11k_hal_wbm_desc_parse_err(struct ath11k_base *ab, void *desc,
+ struct hal_rx_wbm_rel_info *rel_info)
+{
+ struct hal_wbm_release_ring *wbm_desc = desc;
+ enum hal_wbm_rel_desc_type type;
+ enum hal_wbm_rel_src_module rel_src;
+
+ type = FIELD_GET(HAL_WBM_RELEASE_INFO0_DESC_TYPE,
+ wbm_desc->info0);
+ /* We expect only WBM_REL buffer type */
+ if (type != HAL_WBM_REL_DESC_TYPE_REL_MSDU) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ rel_src = FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE,
+ wbm_desc->info0);
+ if (rel_src != HAL_WBM_REL_SRC_MODULE_RXDMA &&
+ rel_src != HAL_WBM_REL_SRC_MODULE_REO)
+ return -EINVAL;
+
+ if (FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
+ wbm_desc->buf_addr_info.info1) != HAL_RX_BUF_RBM_SW3_BM) {
+ ab->soc_stats.invalid_rbm++;
+ return -EINVAL;
+ }
+
+ rel_info->cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ wbm_desc->buf_addr_info.info1);
+ rel_info->err_rel_src = rel_src;
+ if (rel_src == HAL_WBM_REL_SRC_MODULE_REO) {
+ rel_info->push_reason =
+ FIELD_GET(HAL_WBM_RELEASE_INFO0_REO_PUSH_REASON,
+ wbm_desc->info0);
+ rel_info->err_code =
+ FIELD_GET(HAL_WBM_RELEASE_INFO0_REO_ERROR_CODE,
+ wbm_desc->info0);
+ } else {
+ rel_info->push_reason =
+ FIELD_GET(HAL_WBM_RELEASE_INFO0_RXDMA_PUSH_REASON,
+ wbm_desc->info0);
+ rel_info->err_code =
+ FIELD_GET(HAL_WBM_RELEASE_INFO0_RXDMA_ERROR_CODE,
+ wbm_desc->info0);
+ }
+
+ rel_info->first_msdu = FIELD_GET(HAL_WBM_RELEASE_INFO2_FIRST_MSDU,
+ wbm_desc->info2);
+ rel_info->last_msdu = FIELD_GET(HAL_WBM_RELEASE_INFO2_LAST_MSDU,
+ wbm_desc->info2);
+ return 0;
+}
+
+void ath11k_hal_rx_reo_ent_paddr_get(struct ath11k_base *ab, void *desc,
+ dma_addr_t *paddr, u32 *desc_bank)
+{
+ struct ath11k_buffer_addr *buff_addr = desc;
+
+ *paddr = ((u64)(FIELD_GET(BUFFER_ADDR_INFO1_ADDR, buff_addr->info1)) << 32) |
+ FIELD_GET(BUFFER_ADDR_INFO0_ADDR, buff_addr->info0);
+
+ *desc_bank = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, buff_addr->info1);
+}
+
+void ath11k_hal_rx_msdu_link_desc_set(struct ath11k_base *ab, void *desc,
+ void *link_desc,
+ enum hal_wbm_rel_bm_act action)
+{
+ struct hal_wbm_release_ring *dst_desc = desc;
+ struct hal_wbm_release_ring *src_desc = link_desc;
+
+ dst_desc->buf_addr_info = src_desc->buf_addr_info;
+ dst_desc->info0 |= FIELD_PREP(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE,
+ HAL_WBM_REL_SRC_MODULE_SW) |
+ FIELD_PREP(HAL_WBM_RELEASE_INFO0_BM_ACTION, action) |
+ FIELD_PREP(HAL_WBM_RELEASE_INFO0_DESC_TYPE,
+ HAL_WBM_REL_DESC_TYPE_MSDU_LINK);
+}
+
+void ath11k_hal_reo_status_queue_stats(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status)
+{
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_get_queue_stats_status *desc =
+ (struct hal_reo_get_queue_stats_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
+ desc->hdr.info0);
+ status->uniform_hdr.cmd_status =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
+ desc->hdr.info0);
+
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "Queue stats status:\n");
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "header: cmd_num %d status %d\n",
+ status->uniform_hdr.cmd_num,
+ status->uniform_hdr.cmd_status);
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "ssn %ld cur_idx %ld\n",
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_SSN,
+ desc->info0),
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_CUR_IDX,
+ desc->info0));
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "pn = [%08x, %08x, %08x, %08x]\n",
+ desc->pn[0], desc->pn[1], desc->pn[2], desc->pn[3]);
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "last_rx: enqueue_tstamp %08x dequeue_tstamp %08x\n",
+ desc->last_rx_enqueue_timestamp,
+ desc->last_rx_dequeue_timestamp);
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "rx_bitmap [%08x %08x %08x %08x %08x %08x %08x %08x]\n",
+ desc->rx_bitmap[0], desc->rx_bitmap[1], desc->rx_bitmap[2],
+ desc->rx_bitmap[3], desc->rx_bitmap[4], desc->rx_bitmap[5],
+ desc->rx_bitmap[6], desc->rx_bitmap[7]);
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "count: cur_mpdu %ld cur_msdu %ld\n",
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MPDU_COUNT,
+ desc->info1),
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MSDU_COUNT,
+ desc->info1));
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "fwd_timeout %ld fwd_bar %ld dup_count %ld\n",
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_TIMEOUT_COUNT,
+ desc->info2),
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_FDTB_COUNT,
+ desc->info2),
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_DUPLICATE_COUNT,
+ desc->info2));
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "frames_in_order %ld bar_rcvd %ld\n",
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_FIO_COUNT,
+ desc->info3),
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_BAR_RCVD_CNT,
+ desc->info3));
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "num_mpdus %d num_msdus %d total_bytes %d\n",
+ desc->num_mpdu_frames, desc->num_msdu_frames,
+ desc->total_bytes);
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "late_rcvd %ld win_jump_2k %ld hole_cnt %ld\n",
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_LATE_RX_MPDU,
+ desc->info4),
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_WINDOW_JMP2K,
+ desc->info4),
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_HOLE_COUNT,
+ desc->info4));
+ ath11k_dbg(ab, ATH11k_DBG_HAL, "looping count %ld\n",
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO5_LOOPING_CNT,
+ desc->info5));
+}
+
+int ath11k_hal_reo_process_status(u8 *reo_desc, u8 *status)
+{
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_status_hdr *hdr;
+
+ hdr = (struct hal_reo_status_hdr *)tlv->value;
+ *status = FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS, hdr->info0);
+
+ return FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM, hdr->info0);
+}
+
+void ath11k_hal_reo_flush_queue_status(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status)
+{
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_flush_queue_status *desc =
+ (struct hal_reo_flush_queue_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
+ desc->hdr.info0);
+ status->uniform_hdr.cmd_status =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
+ desc->hdr.info0);
+ status->u.flush_queue.err_detected =
+ FIELD_GET(HAL_REO_FLUSH_QUEUE_INFO0_ERR_DETECTED,
+ desc->info0);
+}
+
+void ath11k_hal_reo_flush_cache_status(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_flush_cache_status *desc =
+ (struct hal_reo_flush_cache_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
+ desc->hdr.info0);
+ status->uniform_hdr.cmd_status =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
+ desc->hdr.info0);
+
+ status->u.flush_cache.err_detected =
+ FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_IS_ERR,
+ desc->info0);
+ status->u.flush_cache.err_code =
+ FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_BLOCK_ERR_CODE,
+ desc->info0);
+ if (!status->u.flush_cache.err_code)
+ hal->avail_blk_resource |= BIT(hal->current_blk_index);
+
+ status->u.flush_cache.cache_controller_flush_status_hit =
+ FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_STATUS_HIT,
+ desc->info0);
+
+ status->u.flush_cache.cache_controller_flush_status_desc_type =
+ FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_DESC_TYPE,
+ desc->info0);
+ status->u.flush_cache.cache_controller_flush_status_client_id =
+ FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_CLIENT_ID,
+ desc->info0);
+ status->u.flush_cache.cache_controller_flush_status_err =
+ FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_ERR,
+ desc->info0);
+ status->u.flush_cache.cache_controller_flush_status_cnt =
+ FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_COUNT,
+ desc->info0);
+}
+
+void ath11k_hal_reo_unblk_cache_status(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_unblock_cache_status *desc =
+ (struct hal_reo_unblock_cache_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
+ desc->hdr.info0);
+ status->uniform_hdr.cmd_status =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
+ desc->hdr.info0);
+
+ status->u.unblock_cache.err_detected =
+ FIELD_GET(HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_IS_ERR,
+ desc->info0);
+ status->u.unblock_cache.unblock_type =
+ FIELD_GET(HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_TYPE,
+ desc->info0);
+
+ if (!status->u.unblock_cache.err_detected &&
+ status->u.unblock_cache.unblock_type ==
+ HAL_REO_STATUS_UNBLOCK_BLOCKING_RESOURCE)
+ hal->avail_blk_resource &= ~BIT(hal->current_blk_index);
+}
+
+void ath11k_hal_reo_flush_timeout_list_status(struct ath11k_base *ab,
+ u32 *reo_desc,
+ struct hal_reo_status *status)
+{
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_flush_timeout_list_status *desc =
+ (struct hal_reo_flush_timeout_list_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
+ desc->hdr.info0);
+ status->uniform_hdr.cmd_status =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
+ desc->hdr.info0);
+
+ status->u.timeout_list.err_detected =
+ FIELD_GET(HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_IS_ERR,
+ desc->info0);
+ status->u.timeout_list.list_empty =
+ FIELD_GET(HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_LIST_EMPTY,
+ desc->info0);
+
+ status->u.timeout_list.release_desc_cnt =
+ FIELD_GET(HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_REL_DESC_COUNT,
+ desc->info1);
+ status->u.timeout_list.fwd_buf_cnt =
+ FIELD_GET(HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_FWD_BUF_COUNT,
+ desc->info1);
+}
+
+void ath11k_hal_reo_desc_thresh_reached_status(struct ath11k_base *ab,
+ u32 *reo_desc,
+ struct hal_reo_status *status)
+{
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_desc_thresh_reached_status *desc =
+ (struct hal_reo_desc_thresh_reached_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
+ desc->hdr.info0);
+ status->uniform_hdr.cmd_status =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
+ desc->hdr.info0);
+
+ status->u.desc_thresh_reached.threshold_idx =
+ FIELD_GET(HAL_REO_DESC_THRESH_STATUS_INFO0_THRESH_INDEX,
+ desc->info0);
+
+ status->u.desc_thresh_reached.link_desc_counter0 =
+ FIELD_GET(HAL_REO_DESC_THRESH_STATUS_INFO1_LINK_DESC_COUNTER0,
+ desc->info1);
+
+ status->u.desc_thresh_reached.link_desc_counter1 =
+ FIELD_GET(HAL_REO_DESC_THRESH_STATUS_INFO2_LINK_DESC_COUNTER1,
+ desc->info2);
+
+ status->u.desc_thresh_reached.link_desc_counter2 =
+ FIELD_GET(HAL_REO_DESC_THRESH_STATUS_INFO3_LINK_DESC_COUNTER2,
+ desc->info3);
+
+ status->u.desc_thresh_reached.link_desc_counter_sum =
+ FIELD_GET(HAL_REO_DESC_THRESH_STATUS_INFO4_LINK_DESC_COUNTER_SUM,
+ desc->info4);
+}
+
+void ath11k_hal_reo_update_rx_reo_queue_status(struct ath11k_base *ab,
+ u32 *reo_desc,
+ struct hal_reo_status *status)
+{
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_status_hdr *desc =
+ (struct hal_reo_status_hdr *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
+ desc->info0);
+ status->uniform_hdr.cmd_status =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
+ desc->info0);
+}
+
+u32 ath11k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid)
+{
+ u32 num_ext_desc;
+
+ if (ba_window_size <= 1) {
+ if (tid != HAL_DESC_REO_NON_QOS_TID)
+ num_ext_desc = 1;
+ else
+ num_ext_desc = 0;
+ } else if (ba_window_size <= 105) {
+ num_ext_desc = 1;
+ } else if (ba_window_size <= 210) {
+ num_ext_desc = 2;
+ } else {
+ num_ext_desc = 3;
+ }
+
+ return sizeof(struct hal_rx_reo_queue) +
+ (num_ext_desc * sizeof(struct hal_rx_reo_queue_ext));
+}
+
+void ath11k_hal_reo_qdesc_setup(void *vaddr, int tid, u32 ba_window_size,
+ u32 start_seq)
+{
+ struct hal_rx_reo_queue *qdesc = (struct hal_rx_reo_queue *)vaddr;
+ struct hal_rx_reo_queue_ext *ext_desc;
+
+ memset(qdesc, 0, sizeof(*qdesc));
+
+ ath11k_hal_reo_set_desc_hdr(&qdesc->desc_hdr, HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_DESC,
+ REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0);
+
+ qdesc->rx_queue_num = FIELD_PREP(HAL_RX_REO_QUEUE_RX_QUEUE_NUMBER, tid);
+
+ qdesc->info0 =
+ FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_VLD, 1) |
+ FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_ASSOC_LNK_DESC_COUNTER, 1) |
+ FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_AC, ath11k_tid_to_ac(tid));
+
+ if (ba_window_size < 1)
+ ba_window_size = 1;
+
+ if (ba_window_size == 1 && tid != HAL_DESC_REO_NON_QOS_TID)
+ ba_window_size++;
+
+ if (ba_window_size == 1)
+ qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_RETRY, 1);
+
+ qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_BA_WINDOW_SIZE,
+ ba_window_size - 1);
+
+ /* TODO: Set Ignore ampdu flags based on BA window size and/or
+ * AMPDU capabilities
+ */
+ qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_IGNORE_AMPDU_FLG, 1);
+
+ qdesc->info1 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO1_SVLD, 0);
+
+ if (start_seq <= 0xfff)
+ qdesc->info1 = FIELD_PREP(HAL_RX_REO_QUEUE_INFO1_SSN,
+ start_seq);
+
+ if (tid == HAL_DESC_REO_NON_QOS_TID)
+ return;
+
+ ext_desc = qdesc->ext_desc;
+
+ /* TODO: HW queue descriptors are currently allocated for max BA
+ * window size for all QOS TIDs so that same descriptor can be used
+ * later when ADDBA request is recevied. This should be changed to
+ * allocate HW queue descriptors based on BA window size being
+ * negotiated (0 for non BA cases), and reallocate when BA window
+ * size changes and also send WMI message to FW to change the REO
+ * queue descriptor in Rx peer entry as part of dp_rx_tid_update.
+ */
+ memset(ext_desc, 0, 3 * sizeof(*ext_desc));
+ ath11k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_EXT_DESC,
+ REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1);
+ ext_desc++;
+ ath11k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_EXT_DESC,
+ REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2);
+ ext_desc++;
+ ath11k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_EXT_DESC,
+ REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3);
+}
+
+void ath11k_hal_reo_init_cmd_ring(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ struct hal_srng_params params;
+ struct hal_tlv_hdr *tlv;
+ struct hal_reo_get_queue_stats *desc;
+ int i, cmd_num = 1;
+ int entry_size;
+ u8 *entry;
+
+ memset(&params, 0, sizeof(params));
+
+ entry_size = ath11k_hal_srng_get_entrysize(HAL_REO_CMD);
+ ath11k_hal_srng_get_params(ab, srng, &params);
+ entry = (u8 *)params.ring_base_vaddr;
+
+ for (i = 0; i < params.num_entries; i++) {
+ tlv = (struct hal_tlv_hdr *)entry;
+ desc = (struct hal_reo_get_queue_stats *)tlv->value;
+ desc->cmd.info0 =
+ FIELD_PREP(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, cmd_num++);
+ entry += entry_size;
+ }
+}
+
+void ath11k_hal_reo_hw_setup(struct ath11k_base *ab)
+{
+ u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
+ u32 val;
+
+ val = ath11k_ahb_read32(ab, reo_base + HAL_REO1_GEN_ENABLE);
+
+ val &= ~HAL_REO1_GEN_ENABLE_FRAG_DST_RING;
+ val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_FRAG_DST_RING,
+ HAL_SRNG_RING_ID_REO2SW1) |
+ FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
+ FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
+ ath11k_ahb_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val);
+
+ ath11k_ahb_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0,
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_ahb_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1,
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_ahb_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2,
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_ahb_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3,
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+}
+
+static enum hal_rx_mon_status
+ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
+ struct hal_rx_mon_ppdu_info *ppdu_info,
+ u32 tlv_tag, u8 *tlv_data)
+{
+ u32 info0, info1;
+
+ switch (tlv_tag) {
+ case HAL_RX_PPDU_START: {
+ struct hal_rx_ppdu_start *ppdu_start =
+ (struct hal_rx_ppdu_start *)tlv_data;
+
+ ppdu_info->ppdu_id =
+ FIELD_GET(HAL_RX_PPDU_START_INFO0_PPDU_ID,
+ __le32_to_cpu(ppdu_start->info0));
+ ppdu_info->chan_num = __le32_to_cpu(ppdu_start->chan_num);
+ ppdu_info->ppdu_ts = __le32_to_cpu(ppdu_start->ppdu_start_ts);
+ break;
+ }
+ case HAL_RX_PPDU_END_USER_STATS: {
+ struct hal_rx_ppdu_end_user_stats *eu_stats =
+ (struct hal_rx_ppdu_end_user_stats *)tlv_data;
+
+ info0 = __le32_to_cpu(eu_stats->info0);
+ info1 = __le32_to_cpu(eu_stats->info1);
+
+ ppdu_info->tid =
+ ffs(FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP,
+ __le32_to_cpu(eu_stats->info6))) - 1;
+ ppdu_info->tcp_msdu_count =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO4_TCP_MSDU_CNT,
+ __le32_to_cpu(eu_stats->info4));
+ ppdu_info->udp_msdu_count =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO4_UDP_MSDU_CNT,
+ __le32_to_cpu(eu_stats->info4));
+ ppdu_info->other_msdu_count =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO5_OTHER_MSDU_CNT,
+ __le32_to_cpu(eu_stats->info5));
+ ppdu_info->tcp_ack_msdu_count =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO5_TCP_ACK_MSDU_CNT,
+ __le32_to_cpu(eu_stats->info5));
+ ppdu_info->preamble_type =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO1_PKT_TYPE, info1);
+ ppdu_info->num_mpdu_fcs_ok =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK,
+ info1);
+ ppdu_info->num_mpdu_fcs_err =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR,
+ info0);
+ break;
+ }
+ case HAL_PHYRX_HT_SIG: {
+ struct hal_rx_ht_sig_info *ht_sig =
+ (struct hal_rx_ht_sig_info *)tlv_data;
+
+ info0 = __le32_to_cpu(ht_sig->info0);
+ info1 = __le32_to_cpu(ht_sig->info1);
+
+ ppdu_info->mcs = FIELD_GET(HAL_RX_HT_SIG_INFO_INFO0_MCS, info0);
+ ppdu_info->bw = FIELD_GET(HAL_RX_HT_SIG_INFO_INFO0_BW, info0);
+ ppdu_info->is_stbc = FIELD_GET(HAL_RX_HT_SIG_INFO_INFO1_STBC,
+ info1);
+ ppdu_info->ldpc = FIELD_GET(HAL_RX_HT_SIG_INFO_INFO1_FEC_CODING, info1);
+ ppdu_info->gi = info1 & HAL_RX_HT_SIG_INFO_INFO1_GI;
+
+ switch (ppdu_info->mcs) {
+ case 0 ... 7:
+ ppdu_info->nss = 1;
+ break;
+ case 8 ... 15:
+ ppdu_info->nss = 2;
+ break;
+ case 16 ... 23:
+ ppdu_info->nss = 3;
+ break;
+ case 24 ... 31:
+ ppdu_info->nss = 4;
+ break;
+ }
+
+ if (ppdu_info->nss > 1)
+ ppdu_info->mcs = ppdu_info->mcs % 8;
+
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+ break;
+ }
+ case HAL_PHYRX_L_SIG_B: {
+ struct hal_rx_lsig_b_info *lsigb =
+ (struct hal_rx_lsig_b_info *)tlv_data;
+
+ ppdu_info->rate = FIELD_GET(HAL_RX_LSIG_B_INFO_INFO0_RATE,
+ __le32_to_cpu(lsigb->info0));
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+ break;
+ }
+ case HAL_PHYRX_L_SIG_A: {
+ struct hal_rx_lsig_a_info *lsiga =
+ (struct hal_rx_lsig_a_info *)tlv_data;
+
+ ppdu_info->rate = FIELD_GET(HAL_RX_LSIG_A_INFO_INFO0_RATE,
+ __le32_to_cpu(lsiga->info0));
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+ break;
+ }
+ case HAL_PHYRX_VHT_SIG_A: {
+ struct hal_rx_vht_sig_a_info *vht_sig =
+ (struct hal_rx_vht_sig_a_info *)tlv_data;
+ u32 nsts;
+ u32 group_id;
+ u8 gi_setting;
+
+ info0 = __le32_to_cpu(vht_sig->info0);
+ info1 = __le32_to_cpu(vht_sig->info1);
+
+ ppdu_info->ldpc = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING,
+ info0);
+ ppdu_info->mcs = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO1_MCS,
+ info1);
+ gi_setting = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO1_GI_SETTING,
+ info1);
+ switch (gi_setting) {
+ case HAL_RX_VHT_SIG_A_NORMAL_GI:
+ ppdu_info->gi = HAL_RX_GI_0_8_US;
+ break;
+ case HAL_RX_VHT_SIG_A_SHORT_GI:
+ case HAL_RX_VHT_SIG_A_SHORT_GI_AMBIGUITY:
+ ppdu_info->gi = HAL_RX_GI_0_4_US;
+ break;
+ }
+
+ ppdu_info->is_stbc = info0 & HAL_RX_VHT_SIG_A_INFO_INFO0_STBC;
+ nsts = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO0_NSTS, info0);
+ if (ppdu_info->is_stbc && nsts > 0)
+ nsts = ((nsts + 1) >> 1) - 1;
+
+ ppdu_info->nss = (nsts & VHT_SIG_SU_NSS_MASK) + 1;
+ ppdu_info->bw = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO0_BW,
+ info0);
+ ppdu_info->beamformed = info1 &
+ HAL_RX_VHT_SIG_A_INFO_INFO1_BEAMFORMED;
+ group_id = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID,
+ info0);
+ if (group_id == 0 || group_id == 63)
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+ else
+ ppdu_info->reception_type =
+ HAL_RX_RECEPTION_TYPE_MU_MIMO;
+ break;
+ }
+ case HAL_PHYRX_HE_SIG_A_SU: {
+ struct hal_rx_he_sig_a_su_info *he_sig_a =
+ (struct hal_rx_he_sig_a_su_info *)tlv_data;
+ u32 nsts, cp_ltf, dcm;
+
+ info0 = __le32_to_cpu(he_sig_a->info0);
+ info1 = __le32_to_cpu(he_sig_a->info1);
+
+ ppdu_info->mcs =
+ FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS,
+ info0);
+ ppdu_info->bw =
+ FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW,
+ info0);
+ ppdu_info->ldpc = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING, info0);
+ ppdu_info->is_stbc = info1 &
+ HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC;
+ ppdu_info->beamformed = info1 &
+ HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF;
+ dcm = info0 & HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM;
+ cp_ltf = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE,
+ info0);
+ nsts = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS, info0);
+
+ switch (cp_ltf) {
+ case 0:
+ case 1:
+ ppdu_info->gi = HAL_RX_GI_0_8_US;
+ break;
+ case 2:
+ ppdu_info->gi = HAL_RX_GI_1_6_US;
+ break;
+ case 3:
+ if (dcm && ppdu_info->is_stbc)
+ ppdu_info->gi = HAL_RX_GI_0_8_US;
+ else
+ ppdu_info->gi = HAL_RX_GI_3_2_US;
+ break;
+ }
+
+ ppdu_info->nss = nsts + 1;
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+ break;
+ }
+ case HAL_PHYRX_HE_SIG_A_MU_DL: {
+ struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_mu_dl =
+ (struct hal_rx_he_sig_a_mu_dl_info *)tlv_data;
+
+ u32 cp_ltf;
+
+ info0 = __le32_to_cpu(he_sig_a_mu_dl->info0);
+ info1 = __le32_to_cpu(he_sig_a_mu_dl->info1);
+
+ ppdu_info->bw =
+ FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW,
+ info0);
+ cp_ltf = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE,
+ info0);
+
+ switch (cp_ltf) {
+ case 0:
+ case 1:
+ ppdu_info->gi = HAL_RX_GI_0_8_US;
+ break;
+ case 2:
+ ppdu_info->gi = HAL_RX_GI_1_6_US;
+ break;
+ case 3:
+ ppdu_info->gi = HAL_RX_GI_3_2_US;
+ break;
+ }
+
+ ppdu_info->is_stbc = info1 &
+ HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_STBC;
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
+ break;
+ }
+ case HAL_PHYRX_HE_SIG_B1_MU: {
+ /* TODO: Check if resource unit(RU) allocation stats
+ * are required
+ */
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
+ break;
+ }
+ case HAL_PHYRX_HE_SIG_B2_MU: {
+ struct hal_rx_he_sig_b2_mu_info *he_sig_b2_mu =
+ (struct hal_rx_he_sig_b2_mu_info *)tlv_data;
+
+ info0 = __le32_to_cpu(he_sig_b2_mu->info0);
+
+ ppdu_info->mcs =
+ FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS,
+ info0);
+ ppdu_info->nss =
+ FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS,
+ info0) + 1;
+ ppdu_info->ldpc = FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING,
+ info0);
+ break;
+ }
+ case HAL_PHYRX_HE_SIG_B2_OFDMA: {
+ struct hal_rx_he_sig_b2_ofdma_info *he_sig_b2_ofdma =
+ (struct hal_rx_he_sig_b2_ofdma_info *)tlv_data;
+
+ info0 = __le32_to_cpu(he_sig_b2_ofdma->info0);
+
+ ppdu_info->mcs =
+ FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS,
+ info0);
+ ppdu_info->nss =
+ FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS,
+ info0) + 1;
+ ppdu_info->beamformed =
+ info0 &
+ HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF;
+ ppdu_info->ldpc = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING,
+ info0);
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA;
+ break;
+ }
+ case HAL_PHYRX_RSSI_LEGACY: {
+ struct hal_rx_phyrx_rssi_legacy_info *rssi =
+ (struct hal_rx_phyrx_rssi_legacy_info *)tlv_data;
+
+ /* TODO: Please note that the combined rssi will not be accurate
+ * in MU case. Rssi in MU needs to be retrieved from
+ * PHYRX_OTHER_RECEIVE_INFO TLV.
+ */
+ ppdu_info->rssi_comb =
+ FIELD_GET(HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB,
+ __le32_to_cpu(rssi->info0));
+ break;
+ }
+ case HAL_RX_MPDU_START: {
+ struct hal_rx_mpdu_info *mpdu_info =
+ (struct hal_rx_mpdu_info *)tlv_data;
+ u16 peer_id;
+
+ peer_id = FIELD_GET(HAL_RX_MPDU_INFO_INFO0_PEERID,
+ __le32_to_cpu(mpdu_info->info0));
+ if (peer_id)
+ ppdu_info->peer_id = peer_id;
+ break;
+ }
+ case HAL_RXPCU_PPDU_END_INFO: {
+ struct hal_rx_ppdu_end_duration *ppdu_rx_duration =
+ (struct hal_rx_ppdu_end_duration *)tlv_data;
+ ppdu_info->rx_duration =
+ FIELD_GET(HAL_RX_PPDU_END_DURATION,
+ __le32_to_cpu(ppdu_rx_duration->info0));
+ break;
+ }
+ case HAL_DUMMY:
+ return HAL_RX_MON_STATUS_BUF_DONE;
+ case HAL_RX_PPDU_END_STATUS_DONE:
+ case 0:
+ return HAL_RX_MON_STATUS_PPDU_DONE;
+ default:
+ break;
+ }
+
+ return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
+}
+
+enum hal_rx_mon_status
+ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab,
+ struct hal_rx_mon_ppdu_info *ppdu_info,
+ struct sk_buff *skb)
+{
+ struct hal_tlv_hdr *tlv;
+ enum hal_rx_mon_status hal_status = HAL_RX_MON_STATUS_BUF_DONE;
+ u16 tlv_tag;
+ u16 tlv_len;
+ u8 *ptr = skb->data;
+
+ do {
+ tlv = (struct hal_tlv_hdr *)ptr;
+ tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl);
+ tlv_len = FIELD_GET(HAL_TLV_HDR_LEN, tlv->tl);
+ ptr += sizeof(*tlv);
+
+ /* The actual length of PPDU_END is the combined length of many PHY
+ * TLVs that follow. Skip the TLV header and
+ * rx_rxpcu_classification_overview that follows the header to get to
+ * next TLV.
+ */
+ if (tlv_tag == HAL_RX_PPDU_END)
+ tlv_len = sizeof(struct hal_rx_rxpcu_classification_overview);
+
+ hal_status = ath11k_hal_rx_parse_mon_status_tlv(ab, ppdu_info,
+ tlv_tag, ptr);
+ ptr += tlv_len;
+ ptr = PTR_ALIGN(ptr, HAL_TLV_ALIGN);
+
+ if ((ptr - skb->data) >= DP_RX_BUFFER_SIZE)
+ break;
+ } while (hal_status == HAL_RX_MON_STATUS_PPDU_NOT_DONE);
+
+ return hal_status;
+}
+
+void ath11k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr,
+ u32 *sw_cookie, void **pp_buf_addr,
+ u32 *msdu_cnt)
+{
+ struct hal_reo_entrance_ring *reo_ent_ring =
+ (struct hal_reo_entrance_ring *)rx_desc;
+ struct ath11k_buffer_addr *buf_addr_info;
+ struct rx_mpdu_desc *rx_mpdu_desc_info_details;
+
+ rx_mpdu_desc_info_details =
+ (struct rx_mpdu_desc *)&reo_ent_ring->rx_mpdu_info;
+
+ *msdu_cnt = FIELD_GET(RX_MPDU_DESC_INFO0_MSDU_COUNT,
+ rx_mpdu_desc_info_details->info0);
+
+ buf_addr_info = (struct ath11k_buffer_addr *)&reo_ent_ring->buf_addr_info;
+
+ *paddr = (((u64)FIELD_GET(BUFFER_ADDR_INFO1_ADDR,
+ buf_addr_info->info1)) << 32) |
+ FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
+ buf_addr_info->info0);
+
+ *sw_cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ buf_addr_info->info1);
+
+ *pp_buf_addr = (void *)buf_addr_info;
+}
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.h b/drivers/net/wireless/ath/ath11k/hal_rx.h
new file mode 100644
index 000000000000..bb022c781c48
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.h
@@ -0,0 +1,332 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_HAL_RX_H
+#define ATH11K_HAL_RX_H
+
+struct hal_rx_wbm_rel_info {
+ u32 cookie;
+ enum hal_wbm_rel_src_module err_rel_src;
+ enum hal_reo_dest_ring_push_reason push_reason;
+ u32 err_code;
+ bool first_msdu;
+ bool last_msdu;
+};
+
+#define HAL_INVALID_PEERID 0xffff
+#define VHT_SIG_SU_NSS_MASK 0x7
+
+#define HAL_RX_MAX_MCS 12
+#define HAL_RX_MAX_NSS 8
+
+struct hal_rx_mon_status_tlv_hdr {
+ u32 hdr;
+ u8 value[0];
+};
+
+enum hal_rx_su_mu_coding {
+ HAL_RX_SU_MU_CODING_BCC,
+ HAL_RX_SU_MU_CODING_LDPC,
+ HAL_RX_SU_MU_CODING_MAX,
+};
+
+enum hal_rx_gi {
+ HAL_RX_GI_0_8_US,
+ HAL_RX_GI_0_4_US,
+ HAL_RX_GI_1_6_US,
+ HAL_RX_GI_3_2_US,
+ HAL_RX_GI_MAX,
+};
+
+enum hal_rx_bw {
+ HAL_RX_BW_20MHZ,
+ HAL_RX_BW_40MHZ,
+ HAL_RX_BW_80MHZ,
+ HAL_RX_BW_160MHZ,
+ HAL_RX_BW_MAX,
+};
+
+enum hal_rx_preamble {
+ HAL_RX_PREAMBLE_11A,
+ HAL_RX_PREAMBLE_11B,
+ HAL_RX_PREAMBLE_11N,
+ HAL_RX_PREAMBLE_11AC,
+ HAL_RX_PREAMBLE_11AX,
+ HAL_RX_PREAMBLE_MAX,
+};
+
+enum hal_rx_reception_type {
+ HAL_RX_RECEPTION_TYPE_SU,
+ HAL_RX_RECEPTION_TYPE_MU_MIMO,
+ HAL_RX_RECEPTION_TYPE_MU_OFDMA,
+ HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO,
+ HAL_RX_RECEPTION_TYPE_MAX,
+};
+
+#define HAL_TLV_STATUS_PPDU_NOT_DONE 0
+#define HAL_TLV_STATUS_PPDU_DONE 1
+#define HAL_TLV_STATUS_BUF_DONE 2
+#define HAL_TLV_STATUS_PPDU_NON_STD_DONE 3
+#define HAL_RX_FCS_LEN 4
+
+enum hal_rx_mon_status {
+ HAL_RX_MON_STATUS_PPDU_NOT_DONE,
+ HAL_RX_MON_STATUS_PPDU_DONE,
+ HAL_RX_MON_STATUS_BUF_DONE,
+};
+
+struct hal_rx_mon_ppdu_info {
+ u32 ppdu_id;
+ u32 ppdu_ts;
+ u32 num_mpdu_fcs_ok;
+ u32 num_mpdu_fcs_err;
+ u32 preamble_type;
+ u16 chan_num;
+ u16 tcp_msdu_count;
+ u16 tcp_ack_msdu_count;
+ u16 udp_msdu_count;
+ u16 other_msdu_count;
+ u16 peer_id;
+ u8 rate;
+ u8 mcs;
+ u8 nss;
+ u8 bw;
+ u8 is_stbc;
+ u8 gi;
+ u8 ldpc;
+ u8 beamformed;
+ u8 rssi_comb;
+ u8 tid;
+ u8 reception_type;
+ u64 rx_duration;
+};
+
+#define HAL_RX_PPDU_START_INFO0_PPDU_ID GENMASK(15, 0)
+
+struct hal_rx_ppdu_start {
+ __le32 info0;
+ __le32 chan_num;
+ __le32 ppdu_start_ts;
+} __packed;
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR GENMASK(25, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK GENMASK(8, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_FC_VALID BIT(9)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_QOS_CTRL_VALID BIT(10)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_HT_CTRL_VALID BIT(11)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_PKT_TYPE GENMASK(23, 20)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX GENMASK(15, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO2_FRAME_CTRL GENMASK(31, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO3_QOS_CTRL GENMASK(31, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO4_UDP_MSDU_CNT GENMASK(15, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO4_TCP_MSDU_CNT GENMASK(31, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO5_OTHER_MSDU_CNT GENMASK(15, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO5_TCP_ACK_MSDU_CNT GENMASK(31, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP GENMASK(15, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_EOSP_BITMAP GENMASK(31, 16)
+
+struct hal_rx_ppdu_end_user_stats {
+ __le32 rsvd0[2];
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ __le32 info3;
+ __le32 ht_ctrl;
+ __le32 rsvd1[2];
+ __le32 info4;
+ __le32 info5;
+ __le32 info6;
+ __le32 rsvd2[11];
+} __packed;
+
+#define HAL_RX_HT_SIG_INFO_INFO0_MCS GENMASK(6, 0)
+#define HAL_RX_HT_SIG_INFO_INFO0_BW BIT(7)
+
+#define HAL_RX_HT_SIG_INFO_INFO1_STBC GENMASK(5, 4)
+#define HAL_RX_HT_SIG_INFO_INFO1_FEC_CODING BIT(6)
+#define HAL_RX_HT_SIG_INFO_INFO1_GI BIT(7)
+
+struct hal_rx_ht_sig_info {
+ __le32 info0;
+ __le32 info1;
+} __packed;
+
+#define HAL_RX_LSIG_B_INFO_INFO0_RATE GENMASK(3, 0)
+#define HAL_RX_LSIG_B_INFO_INFO0_LEN GENMASK(15, 4)
+
+struct hal_rx_lsig_b_info {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_LSIG_A_INFO_INFO0_RATE GENMASK(3, 0)
+#define HAL_RX_LSIG_A_INFO_INFO0_LEN GENMASK(16, 5)
+#define HAL_RX_LSIG_A_INFO_INFO0_PKT_TYPE GENMASK(27, 24)
+
+struct hal_rx_lsig_a_info {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_VHT_SIG_A_INFO_INFO0_BW GENMASK(1, 0)
+#define HAL_RX_VHT_SIG_A_INFO_INFO0_STBC BIT(3)
+#define HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID GENMASK(9, 4)
+#define HAL_RX_VHT_SIG_A_INFO_INFO0_NSTS GENMASK(21, 10)
+
+#define HAL_RX_VHT_SIG_A_INFO_INFO1_GI_SETTING GENMASK(1, 0)
+#define HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING BIT(2)
+#define HAL_RX_VHT_SIG_A_INFO_INFO1_MCS GENMASK(7, 4)
+#define HAL_RX_VHT_SIG_A_INFO_INFO1_BEAMFORMED BIT(8)
+
+struct hal_rx_vht_sig_a_info {
+ __le32 info0;
+ __le32 info1;
+} __packed;
+
+enum hal_rx_vht_sig_a_gi_setting {
+ HAL_RX_VHT_SIG_A_NORMAL_GI = 0,
+ HAL_RX_VHT_SIG_A_SHORT_GI = 1,
+ HAL_RX_VHT_SIG_A_SHORT_GI_AMBIGUITY = 3,
+};
+
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS GENMASK(6, 3)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM BIT(7)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW GENMASK(20, 19)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE GENMASK(22, 21)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS GENMASK(25, 23)
+
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING BIT(7)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC BIT(9)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF BIT(10)
+
+struct hal_rx_he_sig_a_su_info {
+ __le32 info0;
+ __le32 info1;
+} __packed;
+
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW GENMASK(17, 15)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE GENMASK(24, 23)
+
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_STBC BIT(12)
+
+struct hal_rx_he_sig_a_mu_dl_info {
+ __le32 info0;
+ __le32 info1;
+} __packed;
+
+#define HAL_RX_HE_SIG_B1_MU_INFO_INFO0_RU_ALLOCATION GENMASK(7, 0)
+
+struct hal_rx_he_sig_b1_mu_info {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS GENMASK(18, 15)
+#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING BIT(20)
+#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS GENMASK(31, 29)
+
+struct hal_rx_he_sig_b2_mu_info {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS GENMASK(13, 11)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF BIT(19)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS GENMASK(18, 15)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_DCM BIT(19)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING BIT(20)
+
+struct hal_rx_he_sig_b2_ofdma_info {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB GENMASK(15, 8)
+
+struct hal_rx_phyrx_rssi_legacy_info {
+ __le32 rsvd[35];
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_MPDU_INFO_INFO0_PEERID GENMASK(31, 16)
+struct hal_rx_mpdu_info {
+ __le32 rsvd0;
+ __le32 info0;
+ __le32 rsvd1[21];
+} __packed;
+
+#define HAL_RX_PPDU_END_DURATION GENMASK(23, 0)
+struct hal_rx_ppdu_end_duration {
+ __le32 rsvd0[9];
+ __le32 info0;
+ __le32 rsvd1[4];
+} __packed;
+
+struct hal_rx_rxpcu_classification_overview {
+ u32 rsvd0;
+} __packed;
+
+struct hal_rx_msdu_desc_info {
+ u32 msdu_flags;
+ u16 msdu_len; /* 14 bits for length */
+};
+
+#define HAL_RX_NUM_MSDU_DESC 6
+struct hal_rx_msdu_list {
+ struct hal_rx_msdu_desc_info msdu_info[HAL_RX_NUM_MSDU_DESC];
+ u32 sw_cookie[HAL_RX_NUM_MSDU_DESC];
+ u8 rbm[HAL_RX_NUM_MSDU_DESC];
+};
+
+void ath11k_hal_reo_status_queue_stats(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status);
+void ath11k_hal_reo_flush_queue_status(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status);
+void ath11k_hal_reo_flush_cache_status(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status);
+void ath11k_hal_reo_flush_cache_status(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status);
+void ath11k_hal_reo_unblk_cache_status(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status);
+void ath11k_hal_reo_flush_timeout_list_status(struct ath11k_base *ab,
+ u32 *reo_desc,
+ struct hal_reo_status *status);
+void ath11k_hal_reo_desc_thresh_reached_status(struct ath11k_base *ab,
+ u32 *reo_desc,
+ struct hal_reo_status *status);
+void ath11k_hal_reo_update_rx_reo_queue_status(struct ath11k_base *ab,
+ u32 *reo_desc,
+ struct hal_reo_status *status);
+int ath11k_hal_reo_process_status(u8 *reo_desc, u8 *status);
+void ath11k_hal_rx_msdu_link_info_get(void *link_desc, u32 *num_msdus,
+ u32 *msdu_cookies,
+ enum hal_rx_buf_return_buf_manager *rbm);
+void ath11k_hal_rx_msdu_link_desc_set(struct ath11k_base *ab, void *desc,
+ void *link_desc,
+ enum hal_wbm_rel_bm_act action);
+void ath11k_hal_rx_buf_addr_info_set(void *desc, dma_addr_t paddr,
+ u32 cookie, u8 manager);
+void ath11k_hal_rx_buf_addr_info_get(void *desc, dma_addr_t *paddr,
+ u32 *cookie, u8 *rbm);
+int ath11k_hal_desc_reo_parse_err(struct ath11k_base *ab, u32 *rx_desc,
+ dma_addr_t *paddr, u32 *desc_bank);
+int ath11k_hal_wbm_desc_parse_err(struct ath11k_base *ab, void *desc,
+ struct hal_rx_wbm_rel_info *rel_info);
+void ath11k_hal_rx_reo_ent_paddr_get(struct ath11k_base *ab, void *desc,
+ dma_addr_t *paddr, u32 *desc_bank);
+void ath11k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc,
+ dma_addr_t *paddr, u32 *sw_cookie,
+ void **pp_buf_addr_info,
+ u32 *msdu_cnt);
+enum hal_rx_mon_status
+ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab,
+ struct hal_rx_mon_ppdu_info *ppdu_info,
+ struct sk_buff *skb);
+#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0 0xDDBEEF
+#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1 0xADBEEF
+#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2 0xBDBEEF
+#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3 0xCDBEEF
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.c b/drivers/net/wireless/ath/ath11k/hal_tx.c
new file mode 100644
index 000000000000..e4aa7e8a1284
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hal_tx.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include "ahb.h"
+#include "hal.h"
+#include "hal_tx.h"
+
+#define DSCP_TID_MAP_TBL_ENTRY_SIZE 64
+
+/* dscp_tid_map - Default DSCP-TID mapping
+ *
+ * DSCP TID
+ * 000000 0
+ * 001000 1
+ * 010000 2
+ * 011000 3
+ * 100000 4
+ * 101000 5
+ * 110000 6
+ * 111000 7
+ */
+static const u8 dscp_tid_map[DSCP_TID_MAP_TBL_ENTRY_SIZE] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 5, 5, 5, 5, 5, 5, 5, 5,
+ 6, 6, 6, 6, 6, 6, 6, 6,
+ 7, 7, 7, 7, 7, 7, 7, 7,
+};
+
+void ath11k_hal_tx_cmd_desc_setup(struct ath11k_base *ab, void *cmd,
+ struct hal_tx_info *ti)
+{
+ struct hal_tcl_data_cmd *tcl_cmd = (struct hal_tcl_data_cmd *)cmd;
+
+ tcl_cmd->buf_addr_info.info0 =
+ FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, ti->paddr);
+ tcl_cmd->buf_addr_info.info1 =
+ FIELD_PREP(BUFFER_ADDR_INFO1_ADDR,
+ ((uint64_t)ti->paddr >> HAL_ADDR_MSB_REG_SHIFT));
+ tcl_cmd->buf_addr_info.info1 |=
+ FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR,
+ (ti->ring_id + HAL_RX_BUF_RBM_SW0_BM)) |
+ FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, ti->desc_id);
+
+ tcl_cmd->info0 =
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_DESC_TYPE, ti->type) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ENCAP_TYPE, ti->encap_type) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ENCRYPT_TYPE,
+ ti->encrypt_type) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_SEARCH_TYPE,
+ ti->search_type) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ADDR_EN,
+ ti->addr_search_flags) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_CMD_NUM,
+ ti->meta_data_flags);
+
+ tcl_cmd->info1 = ti->flags0 |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_DATA_LEN, ti->data_len) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_PKT_OFFSET, ti->pkt_offset);
+
+ tcl_cmd->info2 = ti->flags1 |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID, ti->tid) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_LMAC_ID, ti->lmac_id);
+
+ tcl_cmd->info3 = FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_DSCP_TID_TABLE_IDX,
+ ti->dscp_tid_tbl_idx) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_SEARCH_INDEX,
+ ti->bss_ast_hash);
+ tcl_cmd->info4 = 0;
+}
+
+void ath11k_hal_tx_set_dscp_tid_map(struct ath11k_base *ab, int id)
+{
+ u32 ctrl_reg_val;
+ u32 addr;
+ u8 hw_map_val[HAL_DSCP_TID_TBL_SIZE];
+ int i;
+ u32 value;
+ int cnt = 0;
+
+ ctrl_reg_val = ath11k_ahb_read32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
+ HAL_TCL1_RING_CMN_CTRL_REG);
+ /* Enable read/write access */
+ ctrl_reg_val |= HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN;
+ ath11k_ahb_write32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
+ HAL_TCL1_RING_CMN_CTRL_REG, ctrl_reg_val);
+
+ addr = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_DSCP_TID_MAP +
+ (4 * id * (HAL_DSCP_TID_TBL_SIZE / 4));
+
+ /* Configure each DSCP-TID mapping in three bits there by configure
+ * three bytes in an iteration.
+ */
+ for (i = 0; i < DSCP_TID_MAP_TBL_ENTRY_SIZE; i += 8) {
+ value = FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP0,
+ dscp_tid_map[i]) |
+ FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP1,
+ dscp_tid_map[i + 1]) |
+ FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP2,
+ dscp_tid_map[i + 2]) |
+ FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP3,
+ dscp_tid_map[i + 3]) |
+ FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP4,
+ dscp_tid_map[i + 4]) |
+ FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP5,
+ dscp_tid_map[i + 5]) |
+ FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP6,
+ dscp_tid_map[i + 6]) |
+ FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP7,
+ dscp_tid_map[i + 7]);
+ memcpy(&hw_map_val[cnt], (u8 *)&value, 3);
+ cnt += 3;
+ }
+
+ for (i = 0; i < HAL_DSCP_TID_TBL_SIZE; i += 4) {
+ ath11k_ahb_write32(ab, addr, *(u32 *)&hw_map_val[i]);
+ addr += 4;
+ }
+
+ /* Disable read/write access */
+ ctrl_reg_val = ath11k_ahb_read32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
+ HAL_TCL1_RING_CMN_CTRL_REG);
+ ctrl_reg_val &= ~HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN;
+ ath11k_ahb_write32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
+ HAL_TCL1_RING_CMN_CTRL_REG,
+ ctrl_reg_val);
+}
+
+void ath11k_hal_tx_init_data_ring(struct ath11k_base *ab, struct hal_srng *srng)
+{
+ struct hal_srng_params params;
+ struct hal_tlv_hdr *tlv;
+ int i, entry_size;
+ u8 *desc;
+
+ memset(&params, 0, sizeof(params));
+
+ entry_size = ath11k_hal_srng_get_entrysize(HAL_TCL_DATA);
+ ath11k_hal_srng_get_params(ab, srng, &params);
+ desc = (u8 *)params.ring_base_vaddr;
+
+ for (i = 0; i < params.num_entries; i++) {
+ tlv = (struct hal_tlv_hdr *)desc;
+ tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_TCL_DATA_CMD) |
+ FIELD_PREP(HAL_TLV_HDR_LEN,
+ sizeof(struct hal_tcl_data_cmd));
+ desc += entry_size;
+ }
+}
diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.h b/drivers/net/wireless/ath/ath11k/hal_tx.h
new file mode 100644
index 000000000000..ce48a61bfb66
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hal_tx.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_HAL_TX_H
+#define ATH11K_HAL_TX_H
+
+#include "hal_desc.h"
+
+#define HAL_TX_ADDRX_EN 1
+#define HAL_TX_ADDRY_EN 2
+
+#define HAL_TX_ADDR_SEARCH_DEFAULT 0
+#define HAL_TX_ADDR_SEARCH_INDEX 1
+
+struct hal_tx_info {
+ u16 meta_data_flags; /* %HAL_TCL_DATA_CMD_INFO0_META_ */
+ u8 ring_id;
+ u32 desc_id;
+ enum hal_tcl_desc_type type;
+ enum hal_tcl_encap_type encap_type;
+ dma_addr_t paddr;
+ u32 data_len;
+ u32 pkt_offset;
+ enum hal_encrypt_type encrypt_type;
+ u32 flags0; /* %HAL_TCL_DATA_CMD_INFO1_ */
+ u32 flags1; /* %HAL_TCL_DATA_CMD_INFO2_ */
+ u16 addr_search_flags; /* %HAL_TCL_DATA_CMD_INFO0_ADDR(X/Y)_ */
+ u16 bss_ast_hash;
+ u8 tid;
+ u8 search_type; /* %HAL_TX_ADDR_SEARCH_ */
+ u8 lmac_id;
+ u8 dscp_tid_tbl_idx;
+};
+
+/* TODO: Check if the actual desc macros can be used instead */
+#define HAL_TX_STATUS_FLAGS_FIRST_MSDU BIT(0)
+#define HAL_TX_STATUS_FLAGS_LAST_MSDU BIT(1)
+#define HAL_TX_STATUS_FLAGS_MSDU_IN_AMSDU BIT(2)
+#define HAL_TX_STATUS_FLAGS_RATE_STATS_VALID BIT(3)
+#define HAL_TX_STATUS_FLAGS_RATE_LDPC BIT(4)
+#define HAL_TX_STATUS_FLAGS_RATE_STBC BIT(5)
+#define HAL_TX_STATUS_FLAGS_OFDMA BIT(6)
+
+#define HAL_TX_STATUS_DESC_LEN sizeof(struct hal_wbm_release_ring)
+
+/* Tx status parsed from srng desc */
+struct hal_tx_status {
+ enum hal_wbm_rel_src_module buf_rel_source;
+ enum hal_wbm_tqm_rel_reason status;
+ u8 ack_rssi;
+ u32 flags; /* %HAL_TX_STATUS_FLAGS_ */
+ u32 ppdu_id;
+ u8 try_cnt;
+ u8 tid;
+ u16 peer_id;
+ u32 rate_stats;
+};
+
+void ath11k_hal_tx_cmd_desc_setup(struct ath11k_base *ab, void *cmd,
+ struct hal_tx_info *ti);
+void ath11k_hal_tx_set_dscp_tid_map(struct ath11k_base *ab, int id);
+int ath11k_hal_reo_cmd_send(struct ath11k_base *ab, struct hal_srng *srng,
+ enum hal_reo_cmd_type type,
+ struct ath11k_hal_reo_cmd *cmd);
+void ath11k_hal_tx_init_data_ring(struct ath11k_base *ab,
+ struct hal_srng *srng);
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/htc.c b/drivers/net/wireless/ath/ath11k/htc.c
new file mode 100644
index 000000000000..8f54f58b83e6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/htc.c
@@ -0,0 +1,773 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+#include <linux/skbuff.h>
+#include <linux/ctype.h>
+
+#include "ahb.h"
+#include "debug.h"
+
+struct sk_buff *ath11k_htc_alloc_skb(struct ath11k_base *ab, int size)
+{
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(size + sizeof(struct ath11k_htc_hdr));
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, sizeof(struct ath11k_htc_hdr));
+
+ /* FW/HTC requires 4-byte aligned streams */
+ if (!IS_ALIGNED((unsigned long)skb->data, 4))
+ ath11k_warn(ab, "Unaligned HTC tx skb\n");
+
+ return skb;
+}
+
+static void ath11k_htc_control_tx_complete(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ kfree_skb(skb);
+}
+
+static struct sk_buff *ath11k_htc_build_tx_ctrl_skb(void *ab)
+{
+ struct sk_buff *skb;
+ struct ath11k_skb_cb *skb_cb;
+
+ skb = dev_alloc_skb(ATH11K_HTC_CONTROL_BUFFER_SIZE);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, sizeof(struct ath11k_htc_hdr));
+ WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4));
+
+ skb_cb = ATH11K_SKB_CB(skb);
+ memset(skb_cb, 0, sizeof(*skb_cb));
+
+ ath11k_dbg(ab, ATH11K_DBG_HTC, "%s: skb %pK\n", __func__, skb);
+ return skb;
+}
+
+static inline void ath11k_htc_restore_tx_skb(struct ath11k_htc *htc,
+ struct sk_buff *skb)
+{
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
+
+ dma_unmap_single(htc->ab->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
+ skb_pull(skb, sizeof(struct ath11k_htc_hdr));
+}
+
+static void ath11k_htc_prepare_tx_skb(struct ath11k_htc_ep *ep,
+ struct sk_buff *skb)
+{
+ struct ath11k_htc_hdr *hdr;
+
+ hdr = (struct ath11k_htc_hdr *)skb->data;
+
+ memset(hdr, 0, sizeof(*hdr));
+ hdr->htc_info = FIELD_PREP(HTC_HDR_ENDPOINTID, ep->eid) |
+ FIELD_PREP(HTC_HDR_PAYLOADLEN,
+ (skb->len - sizeof(*hdr))) |
+ FIELD_PREP(HTC_HDR_FLAGS,
+ ATH11K_HTC_FLAG_NEED_CREDIT_UPDATE);
+
+ spin_lock_bh(&ep->htc->tx_lock);
+ hdr->ctrl_info = FIELD_PREP(HTC_HDR_CONTROLBYTES1, ep->seq_no++);
+ spin_unlock_bh(&ep->htc->tx_lock);
+}
+
+int ath11k_htc_send(struct ath11k_htc *htc,
+ enum ath11k_htc_ep_id eid,
+ struct sk_buff *skb)
+{
+ struct ath11k_htc_ep *ep = &htc->endpoint[eid];
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
+ struct device *dev = htc->ab->dev;
+ struct ath11k_base *ab = htc->ab;
+ int credits = 0;
+ int ret;
+
+ if (eid >= ATH11K_HTC_EP_COUNT) {
+ ath11k_warn(ab, "Invalid endpoint id: %d\n", eid);
+ return -ENOENT;
+ }
+
+ skb_push(skb, sizeof(struct ath11k_htc_hdr));
+
+ if (ep->tx_credit_flow_enabled) {
+ credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
+ spin_lock_bh(&htc->tx_lock);
+ if (ep->tx_credits < credits) {
+ ath11k_dbg(ab, ATH11K_DBG_HTC,
+ "htc insufficient credits ep %d required %d available %d\n",
+ eid, credits, ep->tx_credits);
+ spin_unlock_bh(&htc->tx_lock);
+ ret = -EAGAIN;
+ goto err_pull;
+ }
+ ep->tx_credits -= credits;
+ ath11k_dbg(ab, ATH11K_DBG_HTC,
+ "htc ep %d consumed %d credits (total %d)\n",
+ eid, credits, ep->tx_credits);
+ spin_unlock_bh(&htc->tx_lock);
+ }
+
+ ath11k_htc_prepare_tx_skb(ep, skb);
+
+ skb_cb->eid = eid;
+ skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(dev, skb_cb->paddr);
+ if (ret) {
+ ret = -EIO;
+ goto err_credits;
+ }
+
+ ret = ath11k_ce_send(htc->ab, skb, ep->ul_pipe_id, ep->eid);
+ if (ret)
+ goto err_unmap;
+
+ return 0;
+
+err_unmap:
+ dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
+err_credits:
+ if (ep->tx_credit_flow_enabled) {
+ spin_lock_bh(&htc->tx_lock);
+ ep->tx_credits += credits;
+ ath11k_dbg(ab, ATH11K_DBG_HTC,
+ "htc ep %d reverted %d credits back (total %d)\n",
+ eid, credits, ep->tx_credits);
+ spin_unlock_bh(&htc->tx_lock);
+
+ if (ep->ep_ops.ep_tx_credits)
+ ep->ep_ops.ep_tx_credits(htc->ab);
+ }
+err_pull:
+ skb_pull(skb, sizeof(struct ath11k_htc_hdr));
+ return ret;
+}
+
+static void
+ath11k_htc_process_credit_report(struct ath11k_htc *htc,
+ const struct ath11k_htc_credit_report *report,
+ int len,
+ enum ath11k_htc_ep_id eid)
+{
+ struct ath11k_base *ab = htc->ab;
+ struct ath11k_htc_ep *ep;
+ int i, n_reports;
+
+ if (len % sizeof(*report))
+ ath11k_warn(ab, "Uneven credit report len %d", len);
+
+ n_reports = len / sizeof(*report);
+
+ spin_lock_bh(&htc->tx_lock);
+ for (i = 0; i < n_reports; i++, report++) {
+ if (report->eid >= ATH11K_HTC_EP_COUNT)
+ break;
+
+ ep = &htc->endpoint[report->eid];
+ ep->tx_credits += report->credits;
+
+ ath11k_dbg(ab, ATH11K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
+ report->eid, report->credits, ep->tx_credits);
+
+ if (ep->ep_ops.ep_tx_credits) {
+ spin_unlock_bh(&htc->tx_lock);
+ ep->ep_ops.ep_tx_credits(htc->ab);
+ spin_lock_bh(&htc->tx_lock);
+ }
+ }
+ spin_unlock_bh(&htc->tx_lock);
+}
+
+static int ath11k_htc_process_trailer(struct ath11k_htc *htc,
+ u8 *buffer,
+ int length,
+ enum ath11k_htc_ep_id src_eid)
+{
+ struct ath11k_base *ab = htc->ab;
+ int status = 0;
+ struct ath11k_htc_record *record;
+ size_t len;
+
+ while (length > 0) {
+ record = (struct ath11k_htc_record *)buffer;
+
+ if (length < sizeof(record->hdr)) {
+ status = -EINVAL;
+ break;
+ }
+
+ if (record->hdr.len > length) {
+ /* no room left in buffer for record */
+ ath11k_warn(ab, "Invalid record length: %d\n",
+ record->hdr.len);
+ status = -EINVAL;
+ break;
+ }
+
+ switch (record->hdr.id) {
+ case ATH11K_HTC_RECORD_CREDITS:
+ len = sizeof(struct ath11k_htc_credit_report);
+ if (record->hdr.len < len) {
+ ath11k_warn(ab, "Credit report too long\n");
+ status = -EINVAL;
+ break;
+ }
+ ath11k_htc_process_credit_report(htc,
+ record->credit_report,
+ record->hdr.len,
+ src_eid);
+ break;
+ default:
+ ath11k_warn(ab, "Unhandled record: id:%d length:%d\n",
+ record->hdr.id, record->hdr.len);
+ break;
+ }
+
+ if (status)
+ break;
+
+ /* multiple records may be present in a trailer */
+ buffer += sizeof(record->hdr) + record->hdr.len;
+ length -= sizeof(record->hdr) + record->hdr.len;
+ }
+
+ return status;
+}
+
+void ath11k_htc_rx_completion_handler(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ int status = 0;
+ struct ath11k_htc *htc = &ab->htc;
+ struct ath11k_htc_hdr *hdr;
+ struct ath11k_htc_ep *ep;
+ u16 payload_len;
+ u32 trailer_len = 0;
+ size_t min_len;
+ u8 eid;
+ bool trailer_present;
+
+ hdr = (struct ath11k_htc_hdr *)skb->data;
+ skb_pull(skb, sizeof(*hdr));
+
+ eid = FIELD_GET(HTC_HDR_ENDPOINTID, hdr->htc_info);
+
+ if (eid >= ATH11K_HTC_EP_COUNT) {
+ ath11k_warn(ab, "HTC Rx: invalid eid %d\n", eid);
+ goto out;
+ }
+
+ ep = &htc->endpoint[eid];
+
+ payload_len = FIELD_GET(HTC_HDR_PAYLOADLEN, hdr->htc_info);
+
+ if (payload_len + sizeof(*hdr) > ATH11K_HTC_MAX_LEN) {
+ ath11k_warn(ab, "HTC rx frame too long, len: %zu\n",
+ payload_len + sizeof(*hdr));
+ goto out;
+ }
+
+ if (skb->len < payload_len) {
+ ath11k_warn(ab, "HTC Rx: insufficient length, got %d, expected %d\n",
+ skb->len, payload_len);
+ goto out;
+ }
+
+ /* get flags to check for trailer */
+ trailer_present = (FIELD_GET(HTC_HDR_FLAGS, hdr->htc_info)) &
+ ATH11K_HTC_FLAG_TRAILER_PRESENT;
+
+ if (trailer_present) {
+ u8 *trailer;
+
+ trailer_len = FIELD_GET(HTC_HDR_CONTROLBYTES0, hdr->ctrl_info);
+ min_len = sizeof(struct ath11k_htc_record_hdr);
+
+ if ((trailer_len < min_len) ||
+ (trailer_len > payload_len)) {
+ ath11k_warn(ab, "Invalid trailer length: %d\n",
+ trailer_len);
+ goto out;
+ }
+
+ trailer = (u8 *)hdr;
+ trailer += sizeof(*hdr);
+ trailer += payload_len;
+ trailer -= trailer_len;
+ status = ath11k_htc_process_trailer(htc, trailer,
+ trailer_len, eid);
+ if (status)
+ goto out;
+
+ skb_trim(skb, skb->len - trailer_len);
+ }
+
+ if (trailer_len >= payload_len)
+ /* zero length packet with trailer data, just drop these */
+ goto out;
+
+ if (eid == ATH11K_HTC_EP_0) {
+ struct ath11k_htc_msg *msg = (struct ath11k_htc_msg *)skb->data;
+
+ switch (FIELD_GET(HTC_MSG_MESSAGEID, msg->msg_svc_id)) {
+ case ATH11K_HTC_MSG_READY_ID:
+ case ATH11K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
+ /* handle HTC control message */
+ if (completion_done(&htc->ctl_resp)) {
+ /* this is a fatal error, target should not be
+ * sending unsolicited messages on the ep 0
+ */
+ ath11k_warn(ab, "HTC rx ctrl still processing\n");
+ complete(&htc->ctl_resp);
+ goto out;
+ }
+
+ htc->control_resp_len =
+ min_t(int, skb->len,
+ ATH11K_HTC_MAX_CTRL_MSG_LEN);
+
+ memcpy(htc->control_resp_buffer, skb->data,
+ htc->control_resp_len);
+
+ complete(&htc->ctl_resp);
+ break;
+ default:
+ ath11k_warn(ab, "ignoring unsolicited htc ep0 event\n");
+ break;
+ }
+ goto out;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_HTC, "htc rx completion ep %d skb %pK\n",
+ eid, skb);
+ ep->ep_ops.ep_rx_complete(ab, skb);
+
+ /* poll tx completion for interrupt disabled CE's */
+ ath11k_ce_poll_send_completed(ab, ep->ul_pipe_id);
+
+ /* skb is now owned by the rx completion handler */
+ skb = NULL;
+out:
+ kfree_skb(skb);
+}
+
+static void ath11k_htc_control_rx_complete(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ /* This is unexpected. FW is not supposed to send regular rx on this
+ * endpoint.
+ */
+ ath11k_warn(ab, "unexpected htc rx\n");
+ kfree_skb(skb);
+}
+
+static const char *htc_service_name(enum ath11k_htc_svc_id id)
+{
+ switch (id) {
+ case ATH11K_HTC_SVC_ID_RESERVED:
+ return "Reserved";
+ case ATH11K_HTC_SVC_ID_RSVD_CTRL:
+ return "Control";
+ case ATH11K_HTC_SVC_ID_WMI_CONTROL:
+ return "WMI";
+ case ATH11K_HTC_SVC_ID_WMI_DATA_BE:
+ return "DATA BE";
+ case ATH11K_HTC_SVC_ID_WMI_DATA_BK:
+ return "DATA BK";
+ case ATH11K_HTC_SVC_ID_WMI_DATA_VI:
+ return "DATA VI";
+ case ATH11K_HTC_SVC_ID_WMI_DATA_VO:
+ return "DATA VO";
+ case ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1:
+ return "WMI MAC1";
+ case ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2:
+ return "WMI MAC2";
+ case ATH11K_HTC_SVC_ID_NMI_CONTROL:
+ return "NMI Control";
+ case ATH11K_HTC_SVC_ID_NMI_DATA:
+ return "NMI Data";
+ case ATH11K_HTC_SVC_ID_HTT_DATA_MSG:
+ return "HTT Data";
+ case ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS:
+ return "RAW";
+ case ATH11K_HTC_SVC_ID_IPA_TX:
+ return "IPA TX";
+ case ATH11K_HTC_SVC_ID_PKT_LOG:
+ return "PKT LOG";
+ }
+
+ return "Unknown";
+}
+
+static void ath11k_htc_reset_endpoint_states(struct ath11k_htc *htc)
+{
+ struct ath11k_htc_ep *ep;
+ int i;
+
+ for (i = ATH11K_HTC_EP_0; i < ATH11K_HTC_EP_COUNT; i++) {
+ ep = &htc->endpoint[i];
+ ep->service_id = ATH11K_HTC_SVC_ID_UNUSED;
+ ep->max_ep_message_len = 0;
+ ep->max_tx_queue_depth = 0;
+ ep->eid = i;
+ ep->htc = htc;
+ ep->tx_credit_flow_enabled = true;
+ }
+}
+
+static u8 ath11k_htc_get_credit_allocation(struct ath11k_htc *htc,
+ u16 service_id)
+{
+ u8 i, allocation = 0;
+
+ for (i = 0; i < ATH11K_HTC_MAX_SERVICE_ALLOC_ENTRIES; i++) {
+ if (htc->service_alloc_table[i].service_id == service_id) {
+ allocation =
+ htc->service_alloc_table[i].credit_allocation;
+ }
+ }
+
+ return allocation;
+}
+
+static int ath11k_htc_setup_target_buffer_assignments(struct ath11k_htc *htc)
+{
+ struct ath11k_htc_svc_tx_credits *serv_entry;
+ u32 svc_id[] = {
+ ATH11K_HTC_SVC_ID_WMI_CONTROL,
+ ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1,
+ ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2,
+ };
+ int i, credits;
+
+ credits = htc->total_transmit_credits;
+ serv_entry = htc->service_alloc_table;
+
+ if ((htc->wmi_ep_count == 0) ||
+ (htc->wmi_ep_count > ARRAY_SIZE(svc_id)))
+ return -EINVAL;
+
+ /* Divide credits among number of endpoints for WMI */
+ credits = credits / htc->wmi_ep_count;
+ for (i = 0; i < htc->wmi_ep_count; i++) {
+ serv_entry[i].service_id = svc_id[i];
+ serv_entry[i].credit_allocation = credits;
+ }
+
+ return 0;
+}
+
+int ath11k_htc_wait_target(struct ath11k_htc *htc)
+{
+ int i, status = 0;
+ struct ath11k_base *ab = htc->ab;
+ unsigned long time_left;
+ struct ath11k_htc_ready *ready;
+ u16 message_id;
+ u16 credit_count;
+ u16 credit_size;
+
+ time_left = wait_for_completion_timeout(&htc->ctl_resp,
+ ATH11K_HTC_WAIT_TIMEOUT_HZ);
+ if (!time_left) {
+ ath11k_warn(ab, "failed to receive control response completion, polling..\n");
+
+ for (i = 0; i < CE_COUNT; i++)
+ ath11k_ce_per_engine_service(htc->ab, i);
+
+ time_left =
+ wait_for_completion_timeout(&htc->ctl_resp,
+ ATH11K_HTC_WAIT_TIMEOUT_HZ);
+
+ if (!time_left)
+ status = -ETIMEDOUT;
+ }
+
+ if (status < 0) {
+ ath11k_warn(ab, "ctl_resp never came in (%d)\n", status);
+ return status;
+ }
+
+ if (htc->control_resp_len < sizeof(*ready)) {
+ ath11k_warn(ab, "Invalid HTC ready msg len:%d\n",
+ htc->control_resp_len);
+ return -ECOMM;
+ }
+
+ ready = (struct ath11k_htc_ready *)htc->control_resp_buffer;
+ message_id = FIELD_GET(HTC_MSG_MESSAGEID, ready->id_credit_count);
+ credit_count = FIELD_GET(HTC_READY_MSG_CREDITCOUNT,
+ ready->id_credit_count);
+ credit_size = FIELD_GET(HTC_READY_MSG_CREDITSIZE, ready->size_ep);
+
+ if (message_id != ATH11K_HTC_MSG_READY_ID) {
+ ath11k_warn(ab, "Invalid HTC ready msg: 0x%x\n", message_id);
+ return -ECOMM;
+ }
+
+ htc->total_transmit_credits = credit_count;
+ htc->target_credit_size = credit_size;
+
+ ath11k_dbg(ab, ATH11K_DBG_HTC,
+ "Target ready! transmit resources: %d size:%d\n",
+ htc->total_transmit_credits, htc->target_credit_size);
+
+ if ((htc->total_transmit_credits == 0) ||
+ (htc->target_credit_size == 0)) {
+ ath11k_warn(ab, "Invalid credit size received\n");
+ return -ECOMM;
+ }
+
+ ath11k_htc_setup_target_buffer_assignments(htc);
+
+ return 0;
+}
+
+int ath11k_htc_connect_service(struct ath11k_htc *htc,
+ struct ath11k_htc_svc_conn_req *conn_req,
+ struct ath11k_htc_svc_conn_resp *conn_resp)
+{
+ struct ath11k_base *ab = htc->ab;
+ struct ath11k_htc_conn_svc *req_msg;
+ struct ath11k_htc_conn_svc_resp resp_msg_dummy;
+ struct ath11k_htc_conn_svc_resp *resp_msg = &resp_msg_dummy;
+ enum ath11k_htc_ep_id assigned_eid = ATH11K_HTC_EP_COUNT;
+ struct ath11k_htc_ep *ep;
+ struct sk_buff *skb;
+ unsigned int max_msg_size = 0;
+ int length, status;
+ unsigned long time_left;
+ bool disable_credit_flow_ctrl = false;
+ u16 message_id, service_id, flags = 0;
+ u8 tx_alloc = 0;
+
+ /* special case for HTC pseudo control service */
+ if (conn_req->service_id == ATH11K_HTC_SVC_ID_RSVD_CTRL) {
+ disable_credit_flow_ctrl = true;
+ assigned_eid = ATH11K_HTC_EP_0;
+ max_msg_size = ATH11K_HTC_MAX_CTRL_MSG_LEN;
+ memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
+ goto setup;
+ }
+
+ tx_alloc = ath11k_htc_get_credit_allocation(htc,
+ conn_req->service_id);
+ if (!tx_alloc)
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "boot htc service %s does not allocate target credits\n",
+ htc_service_name(conn_req->service_id));
+
+ skb = ath11k_htc_build_tx_ctrl_skb(htc->ab);
+ if (!skb) {
+ ath11k_warn(ab, "Failed to allocate HTC packet\n");
+ return -ENOMEM;
+ }
+
+ length = sizeof(*req_msg);
+ skb_put(skb, length);
+ memset(skb->data, 0, length);
+
+ req_msg = (struct ath11k_htc_conn_svc *)skb->data;
+ req_msg->msg_svc_id = FIELD_PREP(HTC_MSG_MESSAGEID,
+ ATH11K_HTC_MSG_CONNECT_SERVICE_ID);
+
+ flags |= FIELD_PREP(ATH11K_HTC_CONN_FLAGS_RECV_ALLOC, tx_alloc);
+
+ /* Only enable credit flow control for WMI ctrl service */
+ if (!(conn_req->service_id == ATH11K_HTC_SVC_ID_WMI_CONTROL ||
+ conn_req->service_id == ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1 ||
+ conn_req->service_id == ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2)) {
+ flags |= ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
+ disable_credit_flow_ctrl = true;
+ }
+
+ req_msg->flags_len = FIELD_PREP(HTC_SVC_MSG_CONNECTIONFLAGS, flags);
+ req_msg->msg_svc_id |= FIELD_PREP(HTC_SVC_MSG_SERVICE_ID,
+ conn_req->service_id);
+
+ reinit_completion(&htc->ctl_resp);
+
+ status = ath11k_htc_send(htc, ATH11K_HTC_EP_0, skb);
+ if (status) {
+ kfree_skb(skb);
+ return status;
+ }
+
+ /* wait for response */
+ time_left = wait_for_completion_timeout(&htc->ctl_resp,
+ ATH11K_HTC_CONN_SVC_TIMEOUT_HZ);
+ if (!time_left) {
+ ath11k_err(ab, "Service connect timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ /* we controlled the buffer creation, it's aligned */
+ resp_msg = (struct ath11k_htc_conn_svc_resp *)htc->control_resp_buffer;
+ message_id = FIELD_GET(HTC_MSG_MESSAGEID, resp_msg->msg_svc_id);
+ service_id = FIELD_GET(HTC_SVC_RESP_MSG_SERVICEID,
+ resp_msg->msg_svc_id);
+
+ if ((message_id != ATH11K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
+ (htc->control_resp_len < sizeof(*resp_msg))) {
+ ath11k_err(ab, "Invalid resp message ID 0x%x", message_id);
+ return -EPROTO;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_HTC,
+ "HTC Service %s connect response: status: 0x%lx, assigned ep: 0x%lx\n",
+ htc_service_name(service_id),
+ FIELD_GET(HTC_SVC_RESP_MSG_STATUS, resp_msg->flags_len),
+ FIELD_GET(HTC_SVC_RESP_MSG_ENDPOINTID, resp_msg->flags_len));
+
+ conn_resp->connect_resp_code = FIELD_GET(HTC_SVC_RESP_MSG_STATUS,
+ resp_msg->flags_len);
+
+ /* check response status */
+ if (conn_resp->connect_resp_code != ATH11K_HTC_CONN_SVC_STATUS_SUCCESS) {
+ ath11k_err(ab, "HTC Service %s connect request failed: 0x%x)\n",
+ htc_service_name(service_id),
+ conn_resp->connect_resp_code);
+ return -EPROTO;
+ }
+
+ assigned_eid = (enum ath11k_htc_ep_id)FIELD_GET(
+ HTC_SVC_RESP_MSG_ENDPOINTID,
+ resp_msg->flags_len);
+
+ max_msg_size = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
+ resp_msg->flags_len);
+
+setup:
+
+ if (assigned_eid >= ATH11K_HTC_EP_COUNT)
+ return -EPROTO;
+
+ if (max_msg_size == 0)
+ return -EPROTO;
+
+ ep = &htc->endpoint[assigned_eid];
+ ep->eid = assigned_eid;
+
+ if (ep->service_id != ATH11K_HTC_SVC_ID_UNUSED)
+ return -EPROTO;
+
+ /* return assigned endpoint to caller */
+ conn_resp->eid = assigned_eid;
+ conn_resp->max_msg_len = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
+ resp_msg->flags_len);
+
+ /* setup the endpoint */
+ ep->service_id = conn_req->service_id;
+ ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
+ ep->max_ep_message_len = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
+ resp_msg->flags_len);
+ ep->tx_credits = tx_alloc;
+
+ /* copy all the callbacks */
+ ep->ep_ops = conn_req->ep_ops;
+
+ status = ath11k_ahb_map_service_to_pipe(htc->ab,
+ ep->service_id,
+ &ep->ul_pipe_id,
+ &ep->dl_pipe_id);
+ if (status)
+ return status;
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
+ htc_service_name(ep->service_id), ep->ul_pipe_id,
+ ep->dl_pipe_id, ep->eid);
+
+ if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
+ ep->tx_credit_flow_enabled = false;
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "boot htc service '%s' eid %d TX flow control disabled\n",
+ htc_service_name(ep->service_id), assigned_eid);
+ }
+
+ return status;
+}
+
+int ath11k_htc_start(struct ath11k_htc *htc)
+{
+ struct sk_buff *skb;
+ int status = 0;
+ struct ath11k_base *ab = htc->ab;
+ struct ath11k_htc_setup_complete_extended *msg;
+
+ skb = ath11k_htc_build_tx_ctrl_skb(htc->ab);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, sizeof(*msg));
+ memset(skb->data, 0, skb->len);
+
+ msg = (struct ath11k_htc_setup_complete_extended *)skb->data;
+ msg->msg_id = FIELD_PREP(HTC_MSG_MESSAGEID,
+ ATH11K_HTC_MSG_SETUP_COMPLETE_EX_ID);
+
+ ath11k_dbg(ab, ATH11K_DBG_HTC, "HTC is using TX credit flow control\n");
+
+ status = ath11k_htc_send(htc, ATH11K_HTC_EP_0, skb);
+ if (status) {
+ kfree_skb(skb);
+ return status;
+ }
+
+ return 0;
+}
+
+int ath11k_htc_init(struct ath11k_base *ab)
+{
+ struct ath11k_htc *htc = &ab->htc;
+ struct ath11k_htc_svc_conn_req conn_req;
+ struct ath11k_htc_svc_conn_resp conn_resp;
+ int ret;
+
+ spin_lock_init(&htc->tx_lock);
+
+ ath11k_htc_reset_endpoint_states(htc);
+
+ htc->ab = ab;
+
+ switch (ab->wmi_ab.preferred_hw_mode) {
+ case WMI_HOST_HW_MODE_SINGLE:
+ htc->wmi_ep_count = 1;
+ break;
+ case WMI_HOST_HW_MODE_DBS:
+ case WMI_HOST_HW_MODE_DBS_OR_SBS:
+ htc->wmi_ep_count = 2;
+ break;
+ case WMI_HOST_HW_MODE_DBS_SBS:
+ htc->wmi_ep_count = 3;
+ break;
+ default:
+ htc->wmi_ep_count = 3;
+ break;
+ }
+
+ /* setup our pseudo HTC control endpoint connection */
+ memset(&conn_req, 0, sizeof(conn_req));
+ memset(&conn_resp, 0, sizeof(conn_resp));
+ conn_req.ep_ops.ep_tx_complete = ath11k_htc_control_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath11k_htc_control_rx_complete;
+ conn_req.max_send_queue_depth = ATH11K_NUM_CONTROL_TX_BUFFERS;
+ conn_req.service_id = ATH11K_HTC_SVC_ID_RSVD_CTRL;
+
+ /* connect fake service */
+ ret = ath11k_htc_connect_service(htc, &conn_req, &conn_resp);
+ if (ret) {
+ ath11k_err(ab, "could not connect to htc service (%d)\n", ret);
+ return ret;
+ }
+
+ init_completion(&htc->ctl_resp);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath11k/htc.h b/drivers/net/wireless/ath/ath11k/htc.h
new file mode 100644
index 000000000000..f0a3387567ca
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/htc.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_HTC_H
+#define ATH11K_HTC_H
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/bug.h>
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+
+struct ath11k_base;
+
+#define HTC_HDR_ENDPOINTID GENMASK(7, 0)
+#define HTC_HDR_FLAGS GENMASK(15, 8)
+#define HTC_HDR_PAYLOADLEN GENMASK(31, 16)
+#define HTC_HDR_CONTROLBYTES0 GENMASK(7, 0)
+#define HTC_HDR_CONTROLBYTES1 GENMASK(15, 8)
+#define HTC_HDR_RESERVED GENMASK(31, 16)
+
+#define HTC_SVC_MSG_SERVICE_ID GENMASK(31, 16)
+#define HTC_SVC_MSG_CONNECTIONFLAGS GENMASK(15, 0)
+#define HTC_SVC_MSG_SERVICEMETALENGTH GENMASK(23, 16)
+#define HTC_READY_MSG_CREDITCOUNT GENMASK(31, 16)
+#define HTC_READY_MSG_CREDITSIZE GENMASK(15, 0)
+#define HTC_READY_MSG_MAXENDPOINTS GENMASK(23, 16)
+
+#define HTC_READY_EX_MSG_HTCVERSION GENMASK(7, 0)
+#define HTC_READY_EX_MSG_MAXMSGSPERHTCBUNDLE GENMASK(15, 8)
+
+#define HTC_SVC_RESP_MSG_SERVICEID GENMASK(31, 16)
+#define HTC_SVC_RESP_MSG_STATUS GENMASK(7, 0)
+#define HTC_SVC_RESP_MSG_ENDPOINTID GENMASK(15, 8)
+#define HTC_SVC_RESP_MSG_MAXMSGSIZE GENMASK(31, 16)
+#define HTC_SVC_RESP_MSG_SERVICEMETALENGTH GENMASK(7, 0)
+
+#define HTC_MSG_MESSAGEID GENMASK(15, 0)
+#define HTC_SETUP_COMPLETE_EX_MSG_SETUPFLAGS GENMASK(31, 0)
+#define HTC_SETUP_COMPLETE_EX_MSG_MAXMSGSPERBUNDLEDRECV GENMASK(7, 0)
+#define HTC_SETUP_COMPLETE_EX_MSG_RSVD0 GENMASK(15, 8)
+#define HTC_SETUP_COMPLETE_EX_MSG_RSVD1 GENMASK(23, 16)
+#define HTC_SETUP_COMPLETE_EX_MSG_RSVD2 GENMASK(31, 24)
+
+enum ath11k_htc_tx_flags {
+ ATH11K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01,
+ ATH11K_HTC_FLAG_SEND_BUNDLE = 0x02
+};
+
+enum ath11k_htc_rx_flags {
+ ATH11K_HTC_FLAG_TRAILER_PRESENT = 0x02,
+ ATH11K_HTC_FLAG_BUNDLE_MASK = 0xF0
+};
+
+struct ath11k_htc_hdr {
+ u32 htc_info;
+ u32 ctrl_info;
+} __packed __aligned(4);
+
+enum ath11k_htc_msg_id {
+ ATH11K_HTC_MSG_READY_ID = 1,
+ ATH11K_HTC_MSG_CONNECT_SERVICE_ID = 2,
+ ATH11K_HTC_MSG_CONNECT_SERVICE_RESP_ID = 3,
+ ATH11K_HTC_MSG_SETUP_COMPLETE_ID = 4,
+ ATH11K_HTC_MSG_SETUP_COMPLETE_EX_ID = 5,
+ ATH11K_HTC_MSG_SEND_SUSPEND_COMPLETE = 6
+};
+
+enum ath11k_htc_version {
+ ATH11K_HTC_VERSION_2P0 = 0x00, /* 2.0 */
+ ATH11K_HTC_VERSION_2P1 = 0x01, /* 2.1 */
+};
+
+#define ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_MASK GENMASK(1, 0)
+#define ATH11K_HTC_CONN_FLAGS_RECV_ALLOC GENMASK(15, 8)
+
+enum ath11k_htc_conn_flags {
+ ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_FOURTH = 0x0,
+ ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_HALF = 0x1,
+ ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_THREE_FOURTHS = 0x2,
+ ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_UNITY = 0x3,
+ ATH11K_HTC_CONN_FLAGS_REDUCE_CREDIT_DRIBBLE = 1 << 2,
+ ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL = 1 << 3
+};
+
+enum ath11k_htc_conn_svc_status {
+ ATH11K_HTC_CONN_SVC_STATUS_SUCCESS = 0,
+ ATH11K_HTC_CONN_SVC_STATUS_NOT_FOUND = 1,
+ ATH11K_HTC_CONN_SVC_STATUS_FAILED = 2,
+ ATH11K_HTC_CONN_SVC_STATUS_NO_RESOURCES = 3,
+ ATH11K_HTC_CONN_SVC_STATUS_NO_MORE_EP = 4
+};
+
+struct ath11k_htc_ready {
+ u32 id_credit_count;
+ u32 size_ep;
+} __packed;
+
+struct ath11k_htc_ready_extended {
+ struct ath11k_htc_ready base;
+ u32 ver_bundle;
+} __packed;
+
+struct ath11k_htc_conn_svc {
+ u32 msg_svc_id;
+ u32 flags_len;
+} __packed;
+
+struct ath11k_htc_conn_svc_resp {
+ u32 msg_svc_id;
+ u32 flags_len;
+ u32 svc_meta_pad;
+} __packed;
+
+struct ath11k_htc_setup_complete_extended {
+ u32 msg_id;
+ u32 flags;
+ u32 max_msgs_per_bundled_recv;
+} __packed;
+
+struct ath11k_htc_msg {
+ u32 msg_svc_id;
+ u32 flags_len;
+} __packed __aligned(4);
+
+enum ath11k_htc_record_id {
+ ATH11K_HTC_RECORD_NULL = 0,
+ ATH11K_HTC_RECORD_CREDITS = 1
+};
+
+struct ath11k_htc_record_hdr {
+ u8 id; /* @enum ath11k_htc_record_id */
+ u8 len;
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct ath11k_htc_credit_report {
+ u8 eid; /* @enum ath11k_htc_ep_id */
+ u8 credits;
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct ath11k_htc_record {
+ struct ath11k_htc_record_hdr hdr;
+ union {
+ struct ath11k_htc_credit_report credit_report[0];
+ u8 pauload[0];
+ };
+} __packed __aligned(4);
+
+/* note: the trailer offset is dynamic depending
+ * on payload length. this is only a struct layout draft
+ */
+struct ath11k_htc_frame {
+ struct ath11k_htc_hdr hdr;
+ union {
+ struct ath11k_htc_msg msg;
+ u8 payload[0];
+ };
+ struct ath11k_htc_record trailer[0];
+} __packed __aligned(4);
+
+enum ath11k_htc_svc_gid {
+ ATH11K_HTC_SVC_GRP_RSVD = 0,
+ ATH11K_HTC_SVC_GRP_WMI = 1,
+ ATH11K_HTC_SVC_GRP_NMI = 2,
+ ATH11K_HTC_SVC_GRP_HTT = 3,
+ ATH11K_HTC_SVC_GRP_CFG = 4,
+ ATH11K_HTC_SVC_GRP_IPA = 5,
+ ATH11K_HTC_SVC_GRP_PKTLOG = 6,
+
+ ATH11K_HTC_SVC_GRP_TEST = 254,
+ ATH11K_HTC_SVC_GRP_LAST = 255,
+};
+
+#define SVC(group, idx) \
+ (int)(((int)(group) << 8) | (int)(idx))
+
+enum ath11k_htc_svc_id {
+ /* NOTE: service ID of 0x0000 is reserved and should never be used */
+ ATH11K_HTC_SVC_ID_RESERVED = 0x0000,
+ ATH11K_HTC_SVC_ID_UNUSED = ATH11K_HTC_SVC_ID_RESERVED,
+
+ ATH11K_HTC_SVC_ID_RSVD_CTRL = SVC(ATH11K_HTC_SVC_GRP_RSVD, 1),
+ ATH11K_HTC_SVC_ID_WMI_CONTROL = SVC(ATH11K_HTC_SVC_GRP_WMI, 0),
+ ATH11K_HTC_SVC_ID_WMI_DATA_BE = SVC(ATH11K_HTC_SVC_GRP_WMI, 1),
+ ATH11K_HTC_SVC_ID_WMI_DATA_BK = SVC(ATH11K_HTC_SVC_GRP_WMI, 2),
+ ATH11K_HTC_SVC_ID_WMI_DATA_VI = SVC(ATH11K_HTC_SVC_GRP_WMI, 3),
+ ATH11K_HTC_SVC_ID_WMI_DATA_VO = SVC(ATH11K_HTC_SVC_GRP_WMI, 4),
+ ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1 = SVC(ATH11K_HTC_SVC_GRP_WMI, 5),
+ ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2 = SVC(ATH11K_HTC_SVC_GRP_WMI, 6),
+
+ ATH11K_HTC_SVC_ID_NMI_CONTROL = SVC(ATH11K_HTC_SVC_GRP_NMI, 0),
+ ATH11K_HTC_SVC_ID_NMI_DATA = SVC(ATH11K_HTC_SVC_GRP_NMI, 1),
+
+ ATH11K_HTC_SVC_ID_HTT_DATA_MSG = SVC(ATH11K_HTC_SVC_GRP_HTT, 0),
+
+ /* raw stream service (i.e. flash, tcmd, calibration apps) */
+ ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS = SVC(ATH11K_HTC_SVC_GRP_TEST, 0),
+ ATH11K_HTC_SVC_ID_IPA_TX = SVC(ATH11K_HTC_SVC_GRP_IPA, 0),
+ ATH11K_HTC_SVC_ID_PKT_LOG = SVC(ATH11K_HTC_SVC_GRP_PKTLOG, 0),
+};
+
+#undef SVC
+
+enum ath11k_htc_ep_id {
+ ATH11K_HTC_EP_UNUSED = -1,
+ ATH11K_HTC_EP_0 = 0,
+ ATH11K_HTC_EP_1 = 1,
+ ATH11K_HTC_EP_2,
+ ATH11K_HTC_EP_3,
+ ATH11K_HTC_EP_4,
+ ATH11K_HTC_EP_5,
+ ATH11K_HTC_EP_6,
+ ATH11K_HTC_EP_7,
+ ATH11K_HTC_EP_8,
+ ATH11K_HTC_EP_COUNT,
+};
+
+struct ath11k_htc_ops {
+ void (*target_send_suspend_complete)(struct ath11k_base *ar);
+};
+
+struct ath11k_htc_ep_ops {
+ void (*ep_tx_complete)(struct ath11k_base *, struct sk_buff *);
+ void (*ep_rx_complete)(struct ath11k_base *, struct sk_buff *);
+ void (*ep_tx_credits)(struct ath11k_base *);
+};
+
+/* service connection information */
+struct ath11k_htc_svc_conn_req {
+ u16 service_id;
+ struct ath11k_htc_ep_ops ep_ops;
+ int max_send_queue_depth;
+};
+
+/* service connection response information */
+struct ath11k_htc_svc_conn_resp {
+ u8 buffer_len;
+ u8 actual_len;
+ enum ath11k_htc_ep_id eid;
+ unsigned int max_msg_len;
+ u8 connect_resp_code;
+};
+
+#define ATH11K_NUM_CONTROL_TX_BUFFERS 2
+#define ATH11K_HTC_MAX_LEN 4096
+#define ATH11K_HTC_MAX_CTRL_MSG_LEN 256
+#define ATH11K_HTC_WAIT_TIMEOUT_HZ (1 * HZ)
+#define ATH11K_HTC_CONTROL_BUFFER_SIZE (ATH11K_HTC_MAX_CTRL_MSG_LEN + \
+ sizeof(struct ath11k_htc_hdr))
+#define ATH11K_HTC_CONN_SVC_TIMEOUT_HZ (1 * HZ)
+#define ATH11K_HTC_MAX_SERVICE_ALLOC_ENTRIES 8
+
+struct ath11k_htc_ep {
+ struct ath11k_htc *htc;
+ enum ath11k_htc_ep_id eid;
+ enum ath11k_htc_svc_id service_id;
+ struct ath11k_htc_ep_ops ep_ops;
+
+ int max_tx_queue_depth;
+ int max_ep_message_len;
+ u8 ul_pipe_id;
+ u8 dl_pipe_id;
+
+ u8 seq_no; /* for debugging */
+ int tx_credits;
+ bool tx_credit_flow_enabled;
+};
+
+struct ath11k_htc_svc_tx_credits {
+ u16 service_id;
+ u8 credit_allocation;
+};
+
+struct ath11k_htc {
+ struct ath11k_base *ab;
+ struct ath11k_htc_ep endpoint[ATH11K_HTC_EP_COUNT];
+
+ /* protects endpoints */
+ spinlock_t tx_lock;
+
+ struct ath11k_htc_ops htc_ops;
+
+ u8 control_resp_buffer[ATH11K_HTC_MAX_CTRL_MSG_LEN];
+ int control_resp_len;
+
+ struct completion ctl_resp;
+
+ int total_transmit_credits;
+ struct ath11k_htc_svc_tx_credits
+ service_alloc_table[ATH11K_HTC_MAX_SERVICE_ALLOC_ENTRIES];
+ int target_credit_size;
+ u8 wmi_ep_count;
+};
+
+int ath11k_htc_init(struct ath11k_base *ar);
+int ath11k_htc_wait_target(struct ath11k_htc *htc);
+int ath11k_htc_start(struct ath11k_htc *htc);
+int ath11k_htc_connect_service(struct ath11k_htc *htc,
+ struct ath11k_htc_svc_conn_req *conn_req,
+ struct ath11k_htc_svc_conn_resp *conn_resp);
+int ath11k_htc_send(struct ath11k_htc *htc, enum ath11k_htc_ep_id eid,
+ struct sk_buff *packet);
+struct sk_buff *ath11k_htc_alloc_skb(struct ath11k_base *ar, int size);
+void ath11k_htc_rx_completion_handler(struct ath11k_base *ar,
+ struct sk_buff *skb);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
new file mode 100644
index 000000000000..dd39333ec0ea
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_HW_H
+#define ATH11K_HW_H
+
+/* Target configuration defines */
+
+/* Num VDEVS per radio */
+#define TARGET_NUM_VDEVS (16 + 1)
+
+#define TARGET_NUM_PEERS_PDEV (512 + TARGET_NUM_VDEVS)
+
+/* Num of peers for Single Radio mode */
+#define TARGET_NUM_PEERS_SINGLE (TARGET_NUM_PEERS_PDEV)
+
+/* Num of peers for DBS */
+#define TARGET_NUM_PEERS_DBS (2 * TARGET_NUM_PEERS_PDEV)
+
+/* Num of peers for DBS_SBS */
+#define TARGET_NUM_PEERS_DBS_SBS (3 * TARGET_NUM_PEERS_PDEV)
+
+/* Max num of stations (per radio) */
+#define TARGET_NUM_STATIONS 512
+
+#define TARGET_NUM_PEERS(x) TARGET_NUM_PEERS_##x
+#define TARGET_NUM_PEER_KEYS 2
+#define TARGET_NUM_TIDS(x) (2 * TARGET_NUM_PEERS(x) + \
+ 4 * TARGET_NUM_VDEVS + 8)
+
+#define TARGET_AST_SKID_LIMIT 16
+#define TARGET_NUM_OFFLD_PEERS 4
+#define TARGET_NUM_OFFLD_REORDER_BUFFS 4
+
+#define TARGET_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(4))
+#define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(4))
+#define TARGET_RX_TIMEOUT_LO_PRI 100
+#define TARGET_RX_TIMEOUT_HI_PRI 40
+
+#define TARGET_DECAP_MODE_RAW 0
+#define TARGET_DECAP_MODE_NATIVE_WIFI 1
+#define TARGET_DECAP_MODE_ETH 2
+
+#define TARGET_SCAN_MAX_PENDING_REQS 4
+#define TARGET_BMISS_OFFLOAD_MAX_VDEV 3
+#define TARGET_ROAM_OFFLOAD_MAX_VDEV 3
+#define TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES 8
+#define TARGET_GTK_OFFLOAD_MAX_VDEV 3
+#define TARGET_NUM_MCAST_GROUPS 12
+#define TARGET_NUM_MCAST_TABLE_ELEMS 64
+#define TARGET_MCAST2UCAST_MODE 2
+#define TARGET_TX_DBG_LOG_SIZE 1024
+#define TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
+#define TARGET_VOW_CONFIG 0
+#define TARGET_NUM_MSDU_DESC (2500)
+#define TARGET_MAX_FRAG_ENTRIES 6
+#define TARGET_MAX_BCN_OFFLD 16
+#define TARGET_NUM_WDS_ENTRIES 32
+#define TARGET_DMA_BURST_SIZE 1
+#define TARGET_RX_BATCHMODE 1
+
+#define ATH11K_HW_MAX_QUEUES 4
+
+#define ATH11k_HW_RATECODE_CCK_SHORT_PREAM_MASK 0x4
+
+#define ATH11K_FW_DIR "ath11k"
+
+/* IPQ8074 definitions */
+#define IPQ8074_FW_DIR "IPQ8074"
+#define IPQ8074_MAX_BOARD_DATA_SZ (256 * 1024)
+#define IPQ8074_MAX_CAL_DATA_SZ IPQ8074_MAX_BOARD_DATA_SZ
+
+#define ATH11K_BOARD_MAGIC "QCA-ATH11K-BOARD"
+#define ATH11K_BOARD_API2_FILE "board-2.bin"
+#define ATH11K_DEFAULT_BOARD_FILE "bdwlan.bin"
+#define ATH11K_DEFAULT_CAL_FILE "caldata.bin"
+
+enum ath11k_hw_rate_cck {
+ ATH11K_HW_RATE_CCK_LP_11M = 0,
+ ATH11K_HW_RATE_CCK_LP_5_5M,
+ ATH11K_HW_RATE_CCK_LP_2M,
+ ATH11K_HW_RATE_CCK_LP_1M,
+ ATH11K_HW_RATE_CCK_SP_11M,
+ ATH11K_HW_RATE_CCK_SP_5_5M,
+ ATH11K_HW_RATE_CCK_SP_2M,
+};
+
+enum ath11k_hw_rate_ofdm {
+ ATH11K_HW_RATE_OFDM_48M = 0,
+ ATH11K_HW_RATE_OFDM_24M,
+ ATH11K_HW_RATE_OFDM_12M,
+ ATH11K_HW_RATE_OFDM_6M,
+ ATH11K_HW_RATE_OFDM_54M,
+ ATH11K_HW_RATE_OFDM_36M,
+ ATH11K_HW_RATE_OFDM_18M,
+ ATH11K_HW_RATE_OFDM_9M,
+};
+
+struct ath11k_hw_params {
+ const char *name;
+ struct {
+ const char *dir;
+ size_t board_size;
+ size_t cal_size;
+ } fw;
+};
+
+struct ath11k_fw_ie {
+ __le32 id;
+ __le32 len;
+ u8 data[0];
+};
+
+enum ath11k_bd_ie_board_type {
+ ATH11K_BD_IE_BOARD_NAME = 0,
+ ATH11K_BD_IE_BOARD_DATA = 1,
+};
+
+enum ath11k_bd_ie_type {
+ /* contains sub IEs of enum ath11k_bd_ie_board_type */
+ ATH11K_BD_IE_BOARD = 0,
+ ATH11K_BD_IE_BOARD_EXT = 1,
+};
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
new file mode 100644
index 000000000000..6640662f5ede
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -0,0 +1,5907 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include "mac.h"
+#include "core.h"
+#include "debug.h"
+#include "wmi.h"
+#include "hw.h"
+#include "dp_tx.h"
+#include "dp_rx.h"
+#include "testmode.h"
+#include "peer.h"
+
+#define CHAN2G(_channel, _freq, _flags) { \
+ .band = NL80211_BAND_2GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = (_freq), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define CHAN5G(_channel, _freq, _flags) { \
+ .band = NL80211_BAND_5GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = (_freq), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+static const struct ieee80211_channel ath11k_2ghz_channels[] = {
+ CHAN2G(1, 2412, 0),
+ CHAN2G(2, 2417, 0),
+ CHAN2G(3, 2422, 0),
+ CHAN2G(4, 2427, 0),
+ CHAN2G(5, 2432, 0),
+ CHAN2G(6, 2437, 0),
+ CHAN2G(7, 2442, 0),
+ CHAN2G(8, 2447, 0),
+ CHAN2G(9, 2452, 0),
+ CHAN2G(10, 2457, 0),
+ CHAN2G(11, 2462, 0),
+ CHAN2G(12, 2467, 0),
+ CHAN2G(13, 2472, 0),
+ CHAN2G(14, 2484, 0),
+};
+
+static const struct ieee80211_channel ath11k_5ghz_channels[] = {
+ CHAN5G(36, 5180, 0),
+ CHAN5G(40, 5200, 0),
+ CHAN5G(44, 5220, 0),
+ CHAN5G(48, 5240, 0),
+ CHAN5G(52, 5260, 0),
+ CHAN5G(56, 5280, 0),
+ CHAN5G(60, 5300, 0),
+ CHAN5G(64, 5320, 0),
+ CHAN5G(100, 5500, 0),
+ CHAN5G(104, 5520, 0),
+ CHAN5G(108, 5540, 0),
+ CHAN5G(112, 5560, 0),
+ CHAN5G(116, 5580, 0),
+ CHAN5G(120, 5600, 0),
+ CHAN5G(124, 5620, 0),
+ CHAN5G(128, 5640, 0),
+ CHAN5G(132, 5660, 0),
+ CHAN5G(136, 5680, 0),
+ CHAN5G(140, 5700, 0),
+ CHAN5G(144, 5720, 0),
+ CHAN5G(149, 5745, 0),
+ CHAN5G(153, 5765, 0),
+ CHAN5G(157, 5785, 0),
+ CHAN5G(161, 5805, 0),
+ CHAN5G(165, 5825, 0),
+ CHAN5G(169, 5845, 0),
+ CHAN5G(173, 5865, 0),
+};
+
+static struct ieee80211_rate ath11k_legacy_rates[] = {
+ { .bitrate = 10,
+ .hw_value = ATH11K_HW_RATE_CCK_LP_1M },
+ { .bitrate = 20,
+ .hw_value = ATH11K_HW_RATE_CCK_LP_2M,
+ .hw_value_short = ATH11K_HW_RATE_CCK_SP_2M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 55,
+ .hw_value = ATH11K_HW_RATE_CCK_LP_5_5M,
+ .hw_value_short = ATH11K_HW_RATE_CCK_SP_5_5M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 110,
+ .hw_value = ATH11K_HW_RATE_CCK_LP_11M,
+ .hw_value_short = ATH11K_HW_RATE_CCK_SP_11M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+
+ { .bitrate = 60, .hw_value = ATH11K_HW_RATE_OFDM_6M },
+ { .bitrate = 90, .hw_value = ATH11K_HW_RATE_OFDM_9M },
+ { .bitrate = 120, .hw_value = ATH11K_HW_RATE_OFDM_12M },
+ { .bitrate = 180, .hw_value = ATH11K_HW_RATE_OFDM_18M },
+ { .bitrate = 240, .hw_value = ATH11K_HW_RATE_OFDM_24M },
+ { .bitrate = 360, .hw_value = ATH11K_HW_RATE_OFDM_36M },
+ { .bitrate = 480, .hw_value = ATH11K_HW_RATE_OFDM_48M },
+ { .bitrate = 540, .hw_value = ATH11K_HW_RATE_OFDM_54M },
+};
+
+static const int
+ath11k_phymodes[NUM_NL80211_BANDS][ATH11K_CHAN_WIDTH_NUM] = {
+ [NL80211_BAND_2GHZ] = {
+ [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20_2G,
+ [NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20_2G,
+ [NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40_2G,
+ [NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80_2G,
+ [NL80211_CHAN_WIDTH_80P80] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_160] = MODE_UNKNOWN,
+ },
+ [NL80211_BAND_5GHZ] = {
+ [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20,
+ [NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20,
+ [NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40,
+ [NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80,
+ [NL80211_CHAN_WIDTH_160] = MODE_11AX_HE160,
+ [NL80211_CHAN_WIDTH_80P80] = MODE_11AX_HE80_80,
+ },
+};
+
+const struct htt_rx_ring_tlv_filter ath11k_mac_mon_status_filter_default = {
+ .rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START |
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END |
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE,
+ .pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0,
+ .pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1,
+ .pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2,
+ .pkt_filter_flags3 = HTT_RX_FP_DATA_FILTER_FLASG3 |
+ HTT_RX_FP_CTRL_FILTER_FLASG3
+};
+
+#define ATH11K_MAC_FIRST_OFDM_RATE_IDX 4
+#define ath11k_g_rates ath11k_legacy_rates
+#define ath11k_g_rates_size (ARRAY_SIZE(ath11k_legacy_rates))
+#define ath11k_a_rates (ath11k_legacy_rates + 4)
+#define ath11k_a_rates_size (ARRAY_SIZE(ath11k_legacy_rates) - 4)
+
+#define ATH11K_MAC_SCAN_TIMEOUT_MSECS 200 /* in msecs */
+
+static const u32 ath11k_smps_map[] = {
+ [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC,
+ [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC,
+ [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE,
+ [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
+};
+
+u8 ath11k_mac_bw_to_mac80211_bw(u8 bw)
+{
+ u8 ret = 0;
+
+ switch (bw) {
+ case ATH11K_BW_20:
+ ret = RATE_INFO_BW_20;
+ break;
+ case ATH11K_BW_40:
+ ret = RATE_INFO_BW_40;
+ break;
+ case ATH11K_BW_80:
+ ret = RATE_INFO_BW_80;
+ break;
+ case ATH11K_BW_160:
+ ret = RATE_INFO_BW_160;
+ break;
+ }
+
+ return ret;
+}
+
+int ath11k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx,
+ u16 *rate)
+{
+ /* As default, it is OFDM rates */
+ int i = ATH11K_MAC_FIRST_OFDM_RATE_IDX;
+ int max_rates_idx = ath11k_g_rates_size;
+
+ if (preamble == WMI_RATE_PREAMBLE_CCK) {
+ hw_rc &= ~ATH11k_HW_RATECODE_CCK_SHORT_PREAM_MASK;
+ i = 0;
+ max_rates_idx = ATH11K_MAC_FIRST_OFDM_RATE_IDX;
+ }
+
+ while (i < max_rates_idx) {
+ if (hw_rc == ath11k_legacy_rates[i].hw_value) {
+ *rateidx = i;
+ *rate = ath11k_legacy_rates[i].bitrate;
+ return 0;
+ }
+ i++;
+ }
+
+ return -EINVAL;
+}
+
+static int get_num_chains(u32 mask)
+{
+ int num_chains = 0;
+
+ while (mask) {
+ if (mask & BIT(0))
+ num_chains++;
+ mask >>= 1;
+ }
+
+ return num_chains;
+}
+
+u8 ath11k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
+ u32 bitrate)
+{
+ int i;
+
+ for (i = 0; i < sband->n_bitrates; i++)
+ if (sband->bitrates[i].bitrate == bitrate)
+ return i;
+
+ return 0;
+}
+
+static u32
+ath11k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+{
+ int nss;
+
+ for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--)
+ if (ht_mcs_mask[nss])
+ return nss + 1;
+
+ return 1;
+}
+
+static u32
+ath11k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+{
+ int nss;
+
+ for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--)
+ if (vht_mcs_mask[nss])
+ return nss + 1;
+
+ return 1;
+}
+
+static u8 ath11k_parse_mpdudensity(u8 mpdudensity)
+{
+/* 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
+ * 0 for no restriction
+ * 1 for 1/4 us
+ * 2 for 1/2 us
+ * 3 for 1 us
+ * 4 for 2 us
+ * 5 for 4 us
+ * 6 for 8 us
+ * 7 for 16 us
+ */
+ switch (mpdudensity) {
+ case 0:
+ return 0;
+ case 1:
+ case 2:
+ case 3:
+ /* Our lower layer calculations limit our precision to
+ * 1 microsecond
+ */
+ return 1;
+ case 4:
+ return 2;
+ case 5:
+ return 4;
+ case 6:
+ return 8;
+ case 7:
+ return 16;
+ default:
+ return 0;
+ }
+}
+
+static int ath11k_mac_vif_chan(struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *def)
+{
+ struct ieee80211_chanctx_conf *conf;
+
+ rcu_read_lock();
+ conf = rcu_dereference(vif->chanctx_conf);
+ if (!conf) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+
+ *def = conf->def;
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static bool ath11k_mac_bitrate_is_cck(int bitrate)
+{
+ switch (bitrate) {
+ case 10:
+ case 20:
+ case 55:
+ case 110:
+ return true;
+ }
+
+ return false;
+}
+
+u8 ath11k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
+ u8 hw_rate, bool cck)
+{
+ const struct ieee80211_rate *rate;
+ int i;
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ rate = &sband->bitrates[i];
+
+ if (ath11k_mac_bitrate_is_cck(rate->bitrate) != cck)
+ continue;
+
+ if (rate->hw_value == hw_rate)
+ return i;
+ else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE &&
+ rate->hw_value_short == hw_rate)
+ return i;
+ }
+
+ return 0;
+}
+
+static u8 ath11k_mac_bitrate_to_rate(int bitrate)
+{
+ return DIV_ROUND_UP(bitrate, 5) |
+ (ath11k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0);
+}
+
+static void ath11k_get_arvif_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k_vif_iter *arvif_iter = data;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+
+ if (arvif->vdev_id == arvif_iter->vdev_id)
+ arvif_iter->arvif = arvif;
+}
+
+struct ath11k_vif *ath11k_mac_get_arvif(struct ath11k *ar, u32 vdev_id)
+{
+ struct ath11k_vif_iter arvif_iter;
+ u32 flags;
+
+ memset(&arvif_iter, 0, sizeof(struct ath11k_vif_iter));
+ arvif_iter.vdev_id = vdev_id;
+
+ flags = IEEE80211_IFACE_ITER_RESUME_ALL;
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ flags,
+ ath11k_get_arvif_iter,
+ &arvif_iter);
+ if (!arvif_iter.arvif)
+ return NULL;
+
+ return arvif_iter.arvif;
+}
+
+struct ath11k_vif *ath11k_mac_get_arvif_by_vdev_id(struct ath11k_base *ab,
+ u32 vdev_id)
+{
+ int i;
+ struct ath11k_pdev *pdev;
+ struct ath11k_vif *arvif;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar) {
+ arvif = ath11k_mac_get_arvif(pdev->ar, vdev_id);
+ if (arvif)
+ return arvif;
+ }
+ }
+
+ return NULL;
+}
+
+struct ath11k *ath11k_mac_get_ar_by_vdev_id(struct ath11k_base *ab, u32 vdev_id)
+{
+ int i;
+ struct ath11k_pdev *pdev;
+ struct ath11k_vif *arvif;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar) {
+ arvif = ath11k_mac_get_arvif(pdev->ar, vdev_id);
+ if (arvif)
+ return arvif->ar;
+ }
+ }
+
+ return NULL;
+}
+
+struct ath11k *ath11k_mac_get_ar_by_pdev_id(struct ath11k_base *ab, u32 pdev_id)
+{
+ int i;
+ struct ath11k_pdev *pdev;
+
+ if (WARN_ON(pdev_id > ab->num_radios))
+ return NULL;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+
+ if (pdev && pdev->pdev_id == pdev_id)
+ return (pdev->ar ? pdev->ar : NULL);
+ }
+
+ return NULL;
+}
+
+struct ath11k *ath11k_mac_get_ar_vdev_stop_status(struct ath11k_base *ab,
+ u32 vdev_id)
+{
+ int i;
+ struct ath11k_pdev *pdev;
+ struct ath11k *ar;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar) {
+ ar = pdev->ar;
+
+ spin_lock_bh(&ar->data_lock);
+ if (ar->vdev_stop_status.stop_in_progress &&
+ ar->vdev_stop_status.vdev_id == vdev_id) {
+ ar->vdev_stop_status.stop_in_progress = false;
+ spin_unlock_bh(&ar->data_lock);
+ return ar;
+ }
+ spin_unlock_bh(&ar->data_lock);
+ }
+ }
+ return NULL;
+}
+
+static void ath11k_pdev_caps_update(struct ath11k *ar)
+{
+ struct ath11k_base *ab = ar->ab;
+
+ ar->max_tx_power = ab->target_caps.hw_max_tx_power;
+
+ /* FIXME Set min_tx_power to ab->target_caps.hw_min_tx_power.
+ * But since the received value in svcrdy is same as hw_max_tx_power,
+ * we can set ar->min_tx_power to 0 currently until
+ * this is fixed in firmware
+ */
+ ar->min_tx_power = 0;
+
+ ar->txpower_limit_2g = ar->max_tx_power;
+ ar->txpower_limit_5g = ar->max_tx_power;
+ ar->txpower_scale = WMI_HOST_TP_SCALE_MAX;
+}
+
+static int ath11k_mac_txpower_recalc(struct ath11k *ar)
+{
+ struct ath11k_pdev *pdev = ar->pdev;
+ struct ath11k_vif *arvif;
+ int ret, txpower = -1;
+ u32 param;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->txpower <= 0)
+ continue;
+
+ if (txpower == -1)
+ txpower = arvif->txpower;
+ else
+ txpower = min(txpower, arvif->txpower);
+ }
+
+ if (txpower == -1)
+ return 0;
+
+ /* txpwr is set as 2 units per dBm in FW*/
+ txpower = min_t(u32, max_t(u32, ar->min_tx_power, txpower),
+ ar->max_tx_power) * 2;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "txpower to set in hw %d\n",
+ txpower / 2);
+
+ if ((pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) &&
+ ar->txpower_limit_2g != txpower) {
+ param = WMI_PDEV_PARAM_TXPOWER_LIMIT2G;
+ ret = ath11k_wmi_pdev_set_param(ar, param,
+ txpower, ar->pdev->pdev_id);
+ if (ret)
+ goto fail;
+ ar->txpower_limit_2g = txpower;
+ }
+
+ if ((pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) &&
+ ar->txpower_limit_5g != txpower) {
+ param = WMI_PDEV_PARAM_TXPOWER_LIMIT5G;
+ ret = ath11k_wmi_pdev_set_param(ar, param,
+ txpower, ar->pdev->pdev_id);
+ if (ret)
+ goto fail;
+ ar->txpower_limit_5g = txpower;
+ }
+
+ return 0;
+
+fail:
+ ath11k_warn(ar->ab, "failed to recalc txpower limit %d using pdev param %d: %d\n",
+ txpower / 2, param, ret);
+ return ret;
+}
+
+static int ath11k_recalc_rtscts_prot(struct ath11k_vif *arvif)
+{
+ struct ath11k *ar = arvif->ar;
+ u32 vdev_param, rts_cts = 0;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ vdev_param = WMI_VDEV_PARAM_ENABLE_RTSCTS;
+
+ /* Enable RTS/CTS protection for sw retries (when legacy stations
+ * are in BSS) or by default only for second rate series.
+ * TODO: Check if we need to enable CTS 2 Self in any case
+ */
+ rts_cts = WMI_USE_RTS_CTS;
+
+ if (arvif->num_legacy_stations > 0)
+ rts_cts |= WMI_RTSCTS_ACROSS_SW_RETRIES << 4;
+ else
+ rts_cts |= WMI_RTSCTS_FOR_SECOND_RATESERIES << 4;
+
+ /* Need not send duplicate param value to firmware */
+ if (arvif->rtscts_prot_mode == rts_cts)
+ return 0;
+
+ arvif->rtscts_prot_mode = rts_cts;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac vdev %d recalc rts/cts prot %d\n",
+ arvif->vdev_id, rts_cts);
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, rts_cts);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to recalculate rts/cts prot for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+
+ return ret;
+}
+
+static int ath11k_mac_set_kickout(struct ath11k_vif *arvif)
+{
+ struct ath11k *ar = arvif->ar;
+ u32 param;
+ int ret;
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_STA_KICKOUT_TH,
+ ATH11K_KICKOUT_THRESHOLD,
+ ar->pdev->pdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set kickout threshold on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param,
+ ATH11K_KEEPALIVE_MIN_IDLE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set keepalive minimum idle time on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param,
+ ATH11K_KEEPALIVE_MAX_IDLE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set keepalive maximum idle time on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param,
+ ATH11K_KEEPALIVE_MAX_UNRESPONSIVE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void ath11k_mac_peer_cleanup_all(struct ath11k *ar)
+{
+ struct ath11k_peer *peer, *tmp;
+ struct ath11k_base *ab = ar->ab;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ab->base_lock);
+ list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
+ ath11k_peer_rx_tid_cleanup(ar, peer);
+ list_del(&peer->list);
+ kfree(peer);
+ }
+ spin_unlock_bh(&ab->base_lock);
+
+ ar->num_peers = 0;
+ ar->num_stations = 0;
+}
+
+static int ath11k_monitor_vdev_up(struct ath11k *ar, int vdev_id)
+{
+ int ret = 0;
+
+ ret = ath11k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n",
+ vdev_id, ret);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac monitor vdev %i started\n",
+ vdev_id);
+ return 0;
+}
+
+static int ath11k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct ath11k *ar = hw->priv;
+ int ret = 0;
+
+ /* mac80211 requires this op to be present and that's why
+ * there's an empty function, this can be extended when
+ * required.
+ */
+
+ mutex_lock(&ar->conf_mutex);
+
+ /* TODO: Handle configuration changes as appropriate */
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ath11k_base *ab = ar->ab;
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_vif *vif = arvif->vif;
+ struct ieee80211_mutable_offsets offs = {};
+ struct sk_buff *bcn;
+ int ret;
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+ return 0;
+
+ bcn = ieee80211_beacon_get_template(hw, vif, &offs);
+ if (!bcn) {
+ ath11k_warn(ab, "failed to get beacon template from mac80211\n");
+ return -EPERM;
+ }
+
+ ret = ath11k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn);
+
+ kfree_skb(bcn);
+
+ if (ret)
+ ath11k_warn(ab, "failed to submit beacon template command: %d\n",
+ ret);
+
+ return ret;
+}
+
+static void ath11k_control_beaconing(struct ath11k_vif *arvif,
+ struct ieee80211_bss_conf *info)
+{
+ struct ath11k *ar = arvif->ar;
+ int ret = 0;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (!info->enable_beacon) {
+ ret = ath11k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to down vdev_id %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_up = false;
+ return;
+ }
+
+ /* Install the beacon template to the FW */
+ ret = ath11k_mac_setup_bcn_tmpl(arvif);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to update bcn tmpl during vdev up: %d\n",
+ ret);
+ return;
+ }
+
+ arvif->tx_seq_no = 0x1000;
+
+ arvif->aid = 0;
+
+ ether_addr_copy(arvif->bssid, info->bssid);
+
+ ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+ arvif->bssid);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to bring up vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
+ arvif->is_up = true;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
+}
+
+static void ath11k_peer_assoc_h_basic(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ u32 aid;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ aid = vif->bss_conf.aid;
+ else
+ aid = sta->aid;
+
+ ether_addr_copy(arg->peer_mac, sta->addr);
+ arg->vdev_id = arvif->vdev_id;
+ arg->peer_associd = aid;
+ arg->auth_flag = true;
+ /* TODO: STA WAR in ath10k for listen interval required? */
+ arg->peer_listen_intval = ar->hw->conf.listen_interval;
+ arg->peer_nss = 1;
+ arg->peer_caps = vif->bss_conf.assoc_capability;
+}
+
+static void ath11k_peer_assoc_h_crypto(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ struct ieee80211_bss_conf *info = &vif->bss_conf;
+ struct cfg80211_chan_def def;
+ struct cfg80211_bss *bss;
+ const u8 *rsnie = NULL;
+ const u8 *wpaie = NULL;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return;
+
+ bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
+ IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
+ if (bss) {
+ const struct cfg80211_bss_ies *ies;
+
+ rcu_read_lock();
+ rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN);
+
+ ies = rcu_dereference(bss->ies);
+
+ wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WPA,
+ ies->data,
+ ies->len);
+ rcu_read_unlock();
+ cfg80211_put_bss(ar->hw->wiphy, bss);
+ }
+
+ /* FIXME: base on RSN IE/WPA IE is a correct idea? */
+ if (rsnie || wpaie) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "%s: rsn ie found\n", __func__);
+ arg->need_ptk_4_way = true;
+ }
+
+ if (wpaie) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "%s: wpa ie found\n", __func__);
+ arg->need_gtk_2_way = true;
+ }
+
+ if (sta->mfp) {
+ /* TODO: Need to check if FW supports PMF? */
+ arg->is_pmf_enabled = true;
+ }
+
+ /* TODO: safe_mode_enabled (bypass 4-way handshake) flag req? */
+}
+
+static void ath11k_peer_assoc_h_rates(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
+ struct cfg80211_chan_def def;
+ const struct ieee80211_supported_band *sband;
+ const struct ieee80211_rate *rates;
+ enum nl80211_band band;
+ u32 ratemask;
+ u8 rate;
+ int i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return;
+
+ band = def.chan->band;
+ sband = ar->hw->wiphy->bands[band];
+ ratemask = sta->supp_rates[band];
+ ratemask &= arvif->bitrate_mask.control[band].legacy;
+ rates = sband->bitrates;
+
+ rateset->num_rates = 0;
+
+ for (i = 0; i < 32; i++, ratemask >>= 1, rates++) {
+ if (!(ratemask & 1))
+ continue;
+
+ rate = ath11k_mac_bitrate_to_rate(rates->bitrate);
+ rateset->rates[rateset->num_rates] = rate;
+ rateset->num_rates++;
+ }
+}
+
+static bool
+ath11k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+{
+ int nss;
+
+ for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++)
+ if (ht_mcs_mask[nss])
+ return false;
+
+ return true;
+}
+
+static bool
+ath11k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+{
+ int nss;
+
+ for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++)
+ if (vht_mcs_mask[nss])
+ return false;
+
+ return true;
+}
+
+static void ath11k_peer_assoc_h_ht(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ int i, n;
+ u8 max_nss;
+ u32 stbc;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return;
+
+ if (!ht_cap->ht_supported)
+ return;
+
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+
+ if (ath11k_peer_assoc_h_ht_masked(ht_mcs_mask))
+ return;
+
+ arg->ht_flag = true;
+
+ arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+ ht_cap->ampdu_factor)) - 1;
+
+ arg->peer_mpdu_density =
+ ath11k_parse_mpdudensity(ht_cap->ampdu_density);
+
+ arg->peer_ht_caps = ht_cap->cap;
+ arg->peer_rate_caps |= WMI_HOST_RC_HT_FLAG;
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
+ arg->ldpc_flag = true;
+
+ if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
+ arg->bw_40 = true;
+ arg->peer_rate_caps |= WMI_HOST_RC_CW40_FLAG;
+ }
+
+ if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
+ if (ht_cap->cap & (IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40))
+ arg->peer_rate_caps |= WMI_HOST_RC_SGI_FLAG;
+ }
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
+ arg->peer_rate_caps |= WMI_HOST_RC_TX_STBC_FLAG;
+ arg->stbc_flag = true;
+ }
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) {
+ stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC;
+ stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT;
+ stbc = stbc << WMI_HOST_RC_RX_STBC_FLAG_S;
+ arg->peer_rate_caps |= stbc;
+ arg->stbc_flag = true;
+ }
+
+ if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
+ arg->peer_rate_caps |= WMI_HOST_RC_TS_FLAG;
+ else if (ht_cap->mcs.rx_mask[1])
+ arg->peer_rate_caps |= WMI_HOST_RC_DS_FLAG;
+
+ for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++)
+ if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) &&
+ (ht_mcs_mask[i / 8] & BIT(i % 8))) {
+ max_nss = (i / 8) + 1;
+ arg->peer_ht_rates.rates[n++] = i;
+ }
+
+ /* This is a workaround for HT-enabled STAs which break the spec
+ * and have no HT capabilities RX mask (no HT RX MCS map).
+ *
+ * As per spec, in section 20.3.5 Modulation and coding scheme (MCS),
+ * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs.
+ *
+ * Firmware asserts if such situation occurs.
+ */
+ if (n == 0) {
+ arg->peer_ht_rates.num_rates = 8;
+ for (i = 0; i < arg->peer_ht_rates.num_rates; i++)
+ arg->peer_ht_rates.rates[i] = i;
+ } else {
+ arg->peer_ht_rates.num_rates = n;
+ arg->peer_nss = min(sta->rx_nss, max_nss);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
+ arg->peer_mac,
+ arg->peer_ht_rates.num_rates,
+ arg->peer_nss);
+}
+
+static int ath11k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss)
+{
+ switch ((mcs_map >> (2 * nss)) & 0x3) {
+ case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1;
+ case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1;
+ }
+ return 0;
+}
+
+static u16
+ath11k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
+ const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX])
+{
+ int idx_limit;
+ int nss;
+ u16 mcs_map;
+ u16 mcs;
+
+ for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
+ mcs_map = ath11k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) &
+ vht_mcs_limit[nss];
+
+ if (mcs_map)
+ idx_limit = fls(mcs_map) - 1;
+ else
+ idx_limit = -1;
+
+ switch (idx_limit) {
+ case 0: /* fall through */
+ case 1: /* fall through */
+ case 2: /* fall through */
+ case 3: /* fall through */
+ case 4: /* fall through */
+ case 5: /* fall through */
+ case 6: /* fall through */
+ case 7:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_7;
+ break;
+ case 8:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_8;
+ break;
+ case 9:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
+ break;
+ default:
+ WARN_ON(1);
+ /* fall through */
+ case -1:
+ mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED;
+ break;
+ }
+
+ tx_mcs_set &= ~(0x3 << (nss * 2));
+ tx_mcs_set |= mcs << (nss * 2);
+ }
+
+ return tx_mcs_set;
+}
+
+static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ const u16 *vht_mcs_mask;
+ u8 ampdu_factor;
+ u8 max_nss, vht_mcs;
+ int i;
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return;
+
+ if (!vht_cap->vht_supported)
+ return;
+
+ band = def.chan->band;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+ if (ath11k_peer_assoc_h_vht_masked(vht_mcs_mask))
+ return;
+
+ arg->vht_flag = true;
+
+ /* TODO: similar flags required? */
+ arg->vht_capable = true;
+
+ if (def.chan->band == NL80211_BAND_2GHZ)
+ arg->vht_ng_flag = true;
+
+ arg->peer_vht_caps = vht_cap->cap;
+
+ ampdu_factor = (vht_cap->cap &
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
+
+ /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
+ * zero in VHT IE. Using it would result in degraded throughput.
+ * arg->peer_max_mpdu at this point contains HT max_mpdu so keep
+ * it if VHT max_mpdu is smaller.
+ */
+ arg->peer_max_mpdu = max(arg->peer_max_mpdu,
+ (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+ ampdu_factor)) - 1);
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ arg->bw_80 = true;
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
+ arg->bw_160 = true;
+
+ /* Calculate peer NSS capability from VHT capabilities if STA
+ * supports VHT.
+ */
+ for (i = 0, max_nss = 0, vht_mcs = 0; i < NL80211_VHT_NSS_MAX; i++) {
+ vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >>
+ (2 * i) & 3;
+
+ if (vht_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED &&
+ vht_mcs_mask[i])
+ max_nss = i + 1;
+ }
+ arg->peer_nss = min(sta->rx_nss, max_nss);
+ arg->rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest);
+ arg->rx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
+ arg->tx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.tx_highest);
+ arg->tx_mcs_set = ath11k_peer_assoc_h_vht_limit(
+ __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask);
+
+ /* In IPQ8074 platform, VHT mcs rate 10 and 11 is enabled by default.
+ * VHT mcs rate 10 and 11 is not suppoerted in 11ac standard.
+ * so explicitly disable the VHT MCS rate 10 and 11 in 11ac mode.
+ */
+ arg->tx_mcs_set &= ~IEEE80211_VHT_MCS_SUPPORT_0_11_MASK;
+ arg->tx_mcs_set |= IEEE80211_DISABLE_VHT_MCS_SUPPORT_0_11;
+
+ /* TODO: Check */
+ arg->tx_max_mcs_nss = 0xFF;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
+ sta->addr, arg->peer_max_mpdu, arg->peer_flags);
+
+ /* TODO: rxnss_override */
+}
+
+static void ath11k_peer_assoc_h_he(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
+ u16 v;
+
+ if (!he_cap->has_he)
+ return;
+
+ arg->he_flag = true;
+
+ memcpy(&arg->peer_he_cap_macinfo, he_cap->he_cap_elem.mac_cap_info,
+ sizeof(arg->peer_he_cap_macinfo));
+ memcpy(&arg->peer_he_cap_phyinfo, he_cap->he_cap_elem.phy_cap_info,
+ sizeof(arg->peer_he_cap_phyinfo));
+ memcpy(&arg->peer_he_ops, &vif->bss_conf.he_operation,
+ sizeof(arg->peer_he_ops));
+
+ /* the top most byte is used to indicate BSS color info */
+ arg->peer_he_ops &= 0xffffff;
+
+ if (he_cap->he_cap_elem.phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ int bit = 7;
+ int nss, ru;
+
+ arg->peer_ppet.numss_m1 = he_cap->ppe_thres[0] &
+ IEEE80211_PPE_THRES_NSS_MASK;
+ arg->peer_ppet.ru_bit_mask =
+ (he_cap->ppe_thres[0] &
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >>
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS;
+
+ for (nss = 0; nss <= arg->peer_ppet.numss_m1; nss++) {
+ for (ru = 0; ru < 4; ru++) {
+ u32 val = 0;
+ int i;
+
+ if ((arg->peer_ppet.ru_bit_mask & BIT(ru)) == 0)
+ continue;
+ for (i = 0; i < 6; i++) {
+ val >>= 1;
+ val |= ((he_cap->ppe_thres[bit / 8] >>
+ (bit % 8)) & 0x1) << 5;
+ bit++;
+ }
+ arg->peer_ppet.ppet16_ppet8_ru3_ru0[nss] |=
+ val << (ru * 6);
+ }
+ }
+ }
+
+ if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_RES)
+ arg->twt_responder = true;
+ if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_REQ)
+ arg->twt_requester = true;
+
+ switch (sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_160:
+ if (he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) {
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80p80);
+ arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v;
+
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80p80);
+ arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v;
+
+ arg->peer_he_mcs_count++;
+ }
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
+ arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v;
+
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_160);
+ arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v;
+
+ arg->peer_he_mcs_count++;
+ /* fall through */
+
+ default:
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
+ arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v;
+
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80);
+ arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v;
+
+ arg->peer_he_mcs_count++;
+ break;
+ }
+}
+
+static void ath11k_peer_assoc_h_smps(struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ int smps;
+
+ if (!ht_cap->ht_supported)
+ return;
+
+ smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
+ smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ switch (smps) {
+ case WLAN_HT_CAP_SM_PS_STATIC:
+ arg->static_mimops_flag = true;
+ break;
+ case WLAN_HT_CAP_SM_PS_DYNAMIC:
+ arg->dynamic_mimops_flag = true;
+ break;
+ case WLAN_HT_CAP_SM_PS_DISABLED:
+ arg->spatial_mux_flag = true;
+ break;
+ default:
+ break;
+ }
+}
+
+static void ath11k_peer_assoc_h_qos(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_AP:
+ if (sta->wme) {
+ /* TODO: Check WME vs QoS */
+ arg->is_wme_set = true;
+ arg->qos_flag = true;
+ }
+
+ if (sta->wme && sta->uapsd_queues) {
+ /* TODO: Check WME vs QoS */
+ arg->is_wme_set = true;
+ arg->apsd_flag = true;
+ arg->peer_rate_caps |= WMI_HOST_RC_UAPSD_FLAG;
+ }
+ break;
+ case WMI_VDEV_TYPE_STA:
+ if (sta->wme) {
+ arg->is_wme_set = true;
+ arg->qos_flag = true;
+ }
+ break;
+ default:
+ break;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac peer %pM qos %d\n",
+ sta->addr, arg->qos_flag);
+}
+
+static int ath11k_peer_assoc_qos_ap(struct ath11k *ar,
+ struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta)
+{
+ struct ap_ps_params params;
+ u32 max_sp;
+ u32 uapsd;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ params.vdev_id = arvif->vdev_id;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
+ sta->uapsd_queues, sta->max_sp);
+
+ uapsd = 0;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+ uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC3_TRIGGER_EN;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
+ uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC2_TRIGGER_EN;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
+ uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC1_TRIGGER_EN;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+ uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC0_TRIGGER_EN;
+
+ max_sp = 0;
+ if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
+ max_sp = sta->max_sp;
+
+ params.param = WMI_AP_PS_PEER_PARAM_UAPSD;
+ params.value = uapsd;
+ ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &params);
+ if (ret)
+ goto err;
+
+ params.param = WMI_AP_PS_PEER_PARAM_MAX_SP;
+ params.value = max_sp;
+ ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &params);
+ if (ret)
+ goto err;
+
+ /* TODO revisit during testing */
+ params.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_FRMTYPE;
+ params.value = DISABLE_SIFS_RESPONSE_TRIGGER;
+ ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &params);
+ if (ret)
+ goto err;
+
+ params.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_UAPSD;
+ params.value = DISABLE_SIFS_RESPONSE_TRIGGER;
+ ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &params);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ ath11k_warn(ar->ab, "failed to set ap ps peer param %d for vdev %i: %d\n",
+ params.param, arvif->vdev_id, ret);
+ return ret;
+}
+
+static bool ath11k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
+{
+ return sta->supp_rates[NL80211_BAND_2GHZ] >>
+ ATH11K_MAC_FIRST_OFDM_RATE_IDX;
+}
+
+static enum wmi_phy_mode ath11k_mac_get_phymode_vht(struct ath11k *ar,
+ struct ieee80211_sta *sta)
+{
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_160) {
+ switch (sta->vht_cap.cap &
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
+ case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
+ return MODE_11AC_VHT160;
+ case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
+ return MODE_11AC_VHT80_80;
+ default:
+ /* not sure if this is a valid case? */
+ return MODE_11AC_VHT160;
+ }
+ }
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ return MODE_11AC_VHT80;
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ return MODE_11AC_VHT40;
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ return MODE_11AC_VHT20;
+
+ return MODE_UNKNOWN;
+}
+
+static enum wmi_phy_mode ath11k_mac_get_phymode_he(struct ath11k *ar,
+ struct ieee80211_sta *sta)
+{
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_160) {
+ if (sta->he_cap.he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+ return MODE_11AX_HE160;
+ else if (sta->he_cap.he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
+ return MODE_11AX_HE80_80;
+ /* not sure if this is a valid case? */
+ return MODE_11AX_HE160;
+ }
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ return MODE_11AX_HE80;
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ return MODE_11AX_HE40;
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ return MODE_11AX_HE20;
+
+ return MODE_UNKNOWN;
+}
+
+static void ath11k_peer_assoc_h_phymode(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ enum wmi_phy_mode phymode = MODE_UNKNOWN;
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return;
+
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ if (sta->he_cap.has_he) {
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ phymode = MODE_11AX_HE80_2G;
+ else if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11AX_HE40_2G;
+ else
+ phymode = MODE_11AX_HE20_2G;
+ } else if (sta->vht_cap.vht_supported &&
+ !ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11AC_VHT40;
+ else
+ phymode = MODE_11AC_VHT20;
+ } else if (sta->ht_cap.ht_supported &&
+ !ath11k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11NG_HT40;
+ else
+ phymode = MODE_11NG_HT20;
+ } else if (ath11k_mac_sta_has_ofdm_only(sta)) {
+ phymode = MODE_11G;
+ } else {
+ phymode = MODE_11B;
+ }
+ break;
+ case NL80211_BAND_5GHZ:
+ /* Check HE first */
+ if (sta->he_cap.has_he) {
+ phymode = ath11k_mac_get_phymode_he(ar, sta);
+ } else if (sta->vht_cap.vht_supported &&
+ !ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
+ phymode = ath11k_mac_get_phymode_vht(ar, sta);
+ } else if (sta->ht_cap.ht_supported &&
+ !ath11k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
+ if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11NA_HT40;
+ else
+ phymode = MODE_11NA_HT20;
+ } else {
+ phymode = MODE_11A;
+ }
+ break;
+ default:
+ break;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac peer %pM phymode %s\n",
+ sta->addr, ath11k_wmi_phymode_str(phymode));
+
+ arg->peer_phymode = phymode;
+ WARN_ON(phymode == MODE_UNKNOWN);
+}
+
+static void ath11k_peer_assoc_prepare(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg,
+ bool reassoc)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ memset(arg, 0, sizeof(*arg));
+
+ reinit_completion(&ar->peer_assoc_done);
+
+ arg->peer_new_assoc = !reassoc;
+ ath11k_peer_assoc_h_basic(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_crypto(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_rates(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_ht(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_vht(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_he(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_qos(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_phymode(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_smps(sta, arg);
+
+ /* TODO: amsdu_disable req? */
+}
+
+static int ath11k_setup_peer_smps(struct ath11k *ar, struct ath11k_vif *arvif,
+ const u8 *addr,
+ const struct ieee80211_sta_ht_cap *ht_cap)
+{
+ int smps;
+
+ if (!ht_cap->ht_supported)
+ return 0;
+
+ smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
+ smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ if (smps >= ARRAY_SIZE(ath11k_smps_map))
+ return -EINVAL;
+
+ return ath11k_wmi_set_peer_param(ar, addr, arvif->vdev_id,
+ WMI_PEER_MIMO_PS_STATE,
+ ath11k_smps_map[smps]);
+}
+
+static void ath11k_bss_assoc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ struct peer_assoc_params peer_arg;
+ struct ieee80211_sta *ap_sta;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n",
+ arvif->vdev_id, arvif->bssid, arvif->aid);
+
+ rcu_read_lock();
+
+ ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ if (!ap_sta) {
+ ath11k_warn(ar->ab, "failed to find station entry for bss %pM vdev %i\n",
+ bss_conf->bssid, arvif->vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ ath11k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg, false);
+
+ rcu_read_unlock();
+
+ ret = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to run peer assoc for %pM vdev %i: %d\n",
+ bss_conf->bssid, arvif->vdev_id, ret);
+ return;
+ }
+
+ if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) {
+ ath11k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
+ bss_conf->bssid, arvif->vdev_id);
+ return;
+ }
+
+ ret = ath11k_setup_peer_smps(ar, arvif, bss_conf->bssid,
+ &ap_sta->ht_cap);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
+ WARN_ON(arvif->is_up);
+
+ arvif->aid = bss_conf->aid;
+ ether_addr_copy(arvif->bssid, bss_conf->bssid);
+
+ ret = ath11k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set vdev %d up: %d\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
+ arvif->is_up = true;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "mac vdev %d up (associated) bssid %pM aid %d\n",
+ arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
+
+ /* Authorize BSS Peer */
+ ret = ath11k_wmi_set_peer_param(ar, arvif->bssid,
+ arvif->vdev_id,
+ WMI_PEER_AUTHORIZE,
+ 1);
+ if (ret)
+ ath11k_warn(ar->ab, "Unable to authorize BSS peer: %d\n", ret);
+
+ ret = ath11k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id,
+ &bss_conf->he_obss_pd);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set vdev %i OBSS PD parameters: %d\n",
+ arvif->vdev_id, ret);
+}
+
+static void ath11k_bss_disassoc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n",
+ arvif->vdev_id, arvif->bssid);
+
+ ret = ath11k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to down vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_up = false;
+
+ /* TODO: cancel connection_loss_work */
+}
+
+static u32 ath11k_mac_get_rate_hw_value(int bitrate)
+{
+ u32 preamble;
+ u16 hw_value;
+ int rate;
+ size_t i;
+
+ if (ath11k_mac_bitrate_is_cck(bitrate))
+ preamble = WMI_RATE_PREAMBLE_CCK;
+ else
+ preamble = WMI_RATE_PREAMBLE_OFDM;
+
+ for (i = 0; i < ARRAY_SIZE(ath11k_legacy_rates); i++) {
+ if (ath11k_legacy_rates[i].bitrate != bitrate)
+ continue;
+
+ hw_value = ath11k_legacy_rates[i].hw_value;
+ rate = ATH11K_HW_RATE_CODE(hw_value, 0, preamble);
+
+ return rate;
+ }
+
+ return -EINVAL;
+}
+
+static void ath11k_recalculate_mgmt_rate(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *def)
+{
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ const struct ieee80211_supported_band *sband;
+ u8 basic_rate_idx;
+ int hw_rate_code;
+ u32 vdev_param;
+ u16 bitrate;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ sband = ar->hw->wiphy->bands[def->chan->band];
+ basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
+ bitrate = sband->bitrates[basic_rate_idx].bitrate;
+
+ hw_rate_code = ath11k_mac_get_rate_hw_value(bitrate);
+ if (hw_rate_code < 0) {
+ ath11k_warn(ar->ab, "bitrate not supported %d\n", bitrate);
+ return;
+ }
+
+ vdev_param = WMI_VDEV_PARAM_MGMT_RATE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param,
+ hw_rate_code);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set mgmt tx rate %d\n", ret);
+
+ vdev_param = WMI_VDEV_PARAM_BEACON_RATE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param,
+ hw_rate_code);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set beacon tx rate %d\n", ret);
+}
+
+static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u32 changed)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct cfg80211_chan_def def;
+ u32 param_id, param_value;
+ enum nl80211_band band;
+ u32 vdev_param;
+ int mcast_rate;
+ u32 preamble;
+ u16 hw_value;
+ u16 bitrate;
+ int ret = 0;
+ u8 rateidx;
+ u32 rate;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (changed & BSS_CHANGED_BEACON_INT) {
+ arvif->beacon_interval = info->beacon_int;
+
+ param_id = WMI_VDEV_PARAM_BEACON_INTERVAL;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id,
+ arvif->beacon_interval);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set beacon interval for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Beacon interval: %d set for VDEV: %d\n",
+ arvif->beacon_interval, arvif->vdev_id);
+ }
+
+ if (changed & BSS_CHANGED_BEACON) {
+ param_id = WMI_PDEV_PARAM_BEACON_TX_MODE;
+ param_value = WMI_BEACON_STAGGERED_MODE;
+ ret = ath11k_wmi_pdev_set_param(ar, param_id,
+ param_value, ar->pdev->pdev_id);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set beacon mode for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Set staggered beacon mode for VDEV: %d\n",
+ arvif->vdev_id);
+
+ ret = ath11k_mac_setup_bcn_tmpl(arvif);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to update bcn template: %d\n",
+ ret);
+
+ if (vif->bss_conf.he_support) {
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_BA_MODE,
+ WMI_BA_MODE_BUFFER_SIZE_256);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to set BA BUFFER SIZE 256 for vdev: %d\n",
+ arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Set BA BUFFER SIZE 256 for VDEV: %d\n",
+ arvif->vdev_id);
+ }
+ }
+
+ if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
+ arvif->dtim_period = info->dtim_period;
+
+ param_id = WMI_VDEV_PARAM_DTIM_PERIOD;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id,
+ arvif->dtim_period);
+
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set dtim period for VDEV %d: %i\n",
+ arvif->vdev_id, ret);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "DTIM period: %d set for VDEV: %d\n",
+ arvif->dtim_period, arvif->vdev_id);
+ }
+
+ if (changed & BSS_CHANGED_SSID &&
+ vif->type == NL80211_IFTYPE_AP) {
+ arvif->u.ap.ssid_len = info->ssid_len;
+ if (info->ssid_len)
+ memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len);
+ arvif->u.ap.hidden_ssid = info->hidden_ssid;
+ }
+
+ if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid))
+ ether_addr_copy(arvif->bssid, info->bssid);
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED)
+ ath11k_control_beaconing(arvif, info);
+
+ if (changed & BSS_CHANGED_ERP_CTS_PROT) {
+ u32 cts_prot;
+
+ cts_prot = !!(info->use_cts_prot);
+ param_id = WMI_VDEV_PARAM_PROTECTION_MODE;
+
+ if (arvif->is_started) {
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, cts_prot);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set CTS prot for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "Set CTS prot: %d for VDEV: %d\n",
+ cts_prot, arvif->vdev_id);
+ } else {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "defer protection mode setup, vdev is not ready yet\n");
+ }
+ }
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ u32 slottime;
+
+ if (info->use_short_slot)
+ slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */
+
+ else
+ slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
+
+ param_id = WMI_VDEV_PARAM_SLOT_TIME;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, slottime);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set erp slot for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Set slottime: %d for VDEV: %d\n",
+ slottime, arvif->vdev_id);
+ }
+
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ u32 preamble;
+
+ if (info->use_short_preamble)
+ preamble = WMI_VDEV_PREAMBLE_SHORT;
+ else
+ preamble = WMI_VDEV_PREAMBLE_LONG;
+
+ param_id = WMI_VDEV_PARAM_PREAMBLE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, preamble);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set preamble for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Set preamble: %d for VDEV: %d\n",
+ preamble, arvif->vdev_id);
+ }
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ if (info->assoc)
+ ath11k_bss_assoc(hw, vif, info);
+ else
+ ath11k_bss_disassoc(hw, vif);
+ }
+
+ if (changed & BSS_CHANGED_TXPOWER) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac vdev_id %i txpower %d\n",
+ arvif->vdev_id, info->txpower);
+
+ arvif->txpower = info->txpower;
+ ath11k_mac_txpower_recalc(ar);
+ }
+
+ if (changed & BSS_CHANGED_MCAST_RATE &&
+ !ath11k_mac_vif_chan(arvif->vif, &def)) {
+ band = def.chan->band;
+ mcast_rate = vif->bss_conf.mcast_rate[band];
+
+ if (mcast_rate > 0)
+ rateidx = mcast_rate - 1;
+ else
+ rateidx = ffs(vif->bss_conf.basic_rates) - 1;
+
+ if (ar->pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP)
+ rateidx += ATH11K_MAC_FIRST_OFDM_RATE_IDX;
+
+ bitrate = ath11k_legacy_rates[rateidx].bitrate;
+ hw_value = ath11k_legacy_rates[rateidx].hw_value;
+
+ if (ath11k_mac_bitrate_is_cck(bitrate))
+ preamble = WMI_RATE_PREAMBLE_CCK;
+ else
+ preamble = WMI_RATE_PREAMBLE_OFDM;
+
+ rate = ATH11K_HW_RATE_CODE(hw_value, 0, preamble);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "mac vdev %d mcast_rate %x\n",
+ arvif->vdev_id, rate);
+
+ vdev_param = WMI_VDEV_PARAM_MCAST_DATA_RATE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, rate);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to set mcast rate on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ vdev_param = WMI_VDEV_PARAM_BCAST_DATA_RATE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, rate);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to set bcast rate on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (changed & BSS_CHANGED_BASIC_RATES &&
+ !ath11k_mac_vif_chan(arvif->vif, &def))
+ ath11k_recalculate_mgmt_rate(ar, vif, &def);
+
+ if (changed & BSS_CHANGED_TWT) {
+ if (info->twt_requester || info->twt_responder)
+ ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id);
+ else
+ ath11k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id);
+ }
+
+ if (changed & BSS_CHANGED_HE_OBSS_PD)
+ ath11k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id,
+ &info->he_obss_pd);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+void __ath11k_mac_scan_finish(struct ath11k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ break;
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ if (!ar->scan.is_roc) {
+ struct cfg80211_scan_info info = {
+ .aborted = (ar->scan.state ==
+ ATH11K_SCAN_ABORTING),
+ };
+
+ ieee80211_scan_completed(ar->hw, &info);
+ } else if (ar->scan.roc_notify) {
+ ieee80211_remain_on_channel_expired(ar->hw);
+ }
+ /* fall through */
+ case ATH11K_SCAN_STARTING:
+ ar->scan.state = ATH11K_SCAN_IDLE;
+ ar->scan_channel = NULL;
+ ar->scan.roc_freq = 0;
+ cancel_delayed_work(&ar->scan.timeout);
+ complete(&ar->scan.completed);
+ break;
+ }
+}
+
+void ath11k_mac_scan_finish(struct ath11k *ar)
+{
+ spin_lock_bh(&ar->data_lock);
+ __ath11k_mac_scan_finish(ar);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static int ath11k_scan_stop(struct ath11k *ar)
+{
+ struct scan_cancel_param arg = {
+ .req_type = WLAN_SCAN_CANCEL_SINGLE,
+ .scan_id = ATH11K_SCAN_ID,
+ };
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* TODO: Fill other STOP Params */
+ arg.pdev_id = ar->pdev->pdev_id;
+
+ ret = ath11k_wmi_send_scan_stop_cmd(ar, &arg);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to stop wmi scan: %d\n", ret);
+ goto out;
+ }
+
+ ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ);
+ if (ret == 0) {
+ ath11k_warn(ar->ab,
+ "failed to receive scan abort comple: timed out\n");
+ ret = -ETIMEDOUT;
+ } else if (ret > 0) {
+ ret = 0;
+ }
+
+out:
+ /* Scan state should be updated upon scan completion but in case
+ * firmware fails to deliver the event (for whatever reason) it is
+ * desired to clean up scan state anyway. Firmware may have just
+ * dropped the scan completion event delivery due to transport pipe
+ * being overflown with data and/or it can recover on its own before
+ * next scan request is submitted.
+ */
+ spin_lock_bh(&ar->data_lock);
+ if (ar->scan.state != ATH11K_SCAN_IDLE)
+ __ath11k_mac_scan_finish(ar);
+ spin_unlock_bh(&ar->data_lock);
+
+ return ret;
+}
+
+static void ath11k_scan_abort(struct ath11k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ /* This can happen if timeout worker kicked in and called
+ * abortion while scan completion was being processed.
+ */
+ break;
+ case ATH11K_SCAN_STARTING:
+ case ATH11K_SCAN_ABORTING:
+ ath11k_warn(ar->ab, "refusing scan abortion due to invalid scan state: %d\n",
+ ar->scan.state);
+ break;
+ case ATH11K_SCAN_RUNNING:
+ ar->scan.state = ATH11K_SCAN_ABORTING;
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = ath11k_scan_stop(ar);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to abort scan: %d\n", ret);
+
+ spin_lock_bh(&ar->data_lock);
+ break;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath11k_scan_timeout_work(struct work_struct *work)
+{
+ struct ath11k *ar = container_of(work, struct ath11k,
+ scan.timeout.work);
+
+ mutex_lock(&ar->conf_mutex);
+ ath11k_scan_abort(ar);
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath11k_start_scan(struct ath11k *ar,
+ struct scan_req_params *arg)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath11k_wmi_send_scan_start_cmd(ar, arg);
+ if (ret)
+ return ret;
+
+ ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ);
+ if (ret == 0) {
+ ret = ath11k_scan_stop(ar);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to stop scan: %d\n", ret);
+
+ return -ETIMEDOUT;
+ }
+
+ /* If we failed to start the scan, return error code at
+ * this point. This is probably due to some issue in the
+ * firmware, but no need to wedge the driver due to that...
+ */
+ spin_lock_bh(&ar->data_lock);
+ if (ar->scan.state == ATH11K_SCAN_IDLE) {
+ spin_unlock_bh(&ar->data_lock);
+ return -EINVAL;
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ return 0;
+}
+
+static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *hw_req)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct cfg80211_scan_request *req = &hw_req->req;
+ struct scan_req_params arg;
+ int ret = 0;
+ int i;
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ reinit_completion(&ar->scan.started);
+ reinit_completion(&ar->scan.completed);
+ ar->scan.state = ATH11K_SCAN_STARTING;
+ ar->scan.is_roc = false;
+ ar->scan.vdev_id = arvif->vdev_id;
+ ret = 0;
+ break;
+ case ATH11K_SCAN_STARTING:
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ ret = -EBUSY;
+ break;
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ if (ret)
+ goto exit;
+
+ memset(&arg, 0, sizeof(arg));
+ ath11k_wmi_start_scan_init(ar, &arg);
+ arg.vdev_id = arvif->vdev_id;
+ arg.scan_id = ATH11K_SCAN_ID;
+
+ if (req->ie_len) {
+ arg.extraie.len = req->ie_len;
+ arg.extraie.ptr = kzalloc(req->ie_len, GFP_KERNEL);
+ memcpy(arg.extraie.ptr, req->ie, req->ie_len);
+ }
+
+ if (req->n_ssids) {
+ arg.num_ssids = req->n_ssids;
+ for (i = 0; i < arg.num_ssids; i++) {
+ arg.ssid[i].length = req->ssids[i].ssid_len;
+ memcpy(&arg.ssid[i].ssid, req->ssids[i].ssid,
+ req->ssids[i].ssid_len);
+ }
+ } else {
+ arg.scan_flags |= WMI_SCAN_FLAG_PASSIVE;
+ }
+
+ if (req->n_channels) {
+ arg.num_chan = req->n_channels;
+ for (i = 0; i < arg.num_chan; i++)
+ arg.chan_list[i] = req->channels[i]->center_freq;
+ }
+
+ ret = ath11k_start_scan(ar, &arg);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start hw scan: %d\n", ret);
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.state = ATH11K_SCAN_IDLE;
+ spin_unlock_bh(&ar->data_lock);
+ }
+
+ /* Add a 200ms margin to account for event/command processing */
+ ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
+ msecs_to_jiffies(arg.max_scan_time +
+ ATH11K_MAC_SCAN_TIMEOUT_MSECS));
+
+exit:
+ if (req->ie_len)
+ kfree(arg.extraie.ptr);
+
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void ath11k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+ ath11k_scan_abort(ar);
+ mutex_unlock(&ar->conf_mutex);
+
+ cancel_delayed_work_sync(&ar->scan.timeout);
+}
+
+static int ath11k_install_key(struct ath11k_vif *arvif,
+ struct ieee80211_key_conf *key,
+ enum set_key_cmd cmd,
+ const u8 *macaddr, u32 flags)
+{
+ int ret;
+ struct ath11k *ar = arvif->ar;
+ struct wmi_vdev_install_key_arg arg = {
+ .vdev_id = arvif->vdev_id,
+ .key_idx = key->keyidx,
+ .key_len = key->keylen,
+ .key_data = key->key,
+ .key_flags = flags,
+ .macaddr = macaddr,
+ };
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ reinit_completion(&ar->install_key_done);
+
+ if (cmd == DISABLE_KEY) {
+ /* TODO: Check if FW expects value other than NONE for del */
+ /* arg.key_cipher = WMI_CIPHER_NONE; */
+ arg.key_len = 0;
+ arg.key_data = NULL;
+ goto install;
+ }
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ arg.key_cipher = WMI_CIPHER_AES_CCM;
+ /* TODO: Re-check if flag is valid */
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ arg.key_cipher = WMI_CIPHER_TKIP;
+ arg.key_txmic_len = 8;
+ arg.key_rxmic_len = 8;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ arg.key_cipher = WMI_CIPHER_AES_CCM;
+ break;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ arg.key_cipher = WMI_CIPHER_AES_GCM;
+ break;
+ default:
+ ath11k_warn(ar->ab, "cipher %d is not supported\n", key->cipher);
+ return -EOPNOTSUPP;
+ }
+
+install:
+ ret = ath11k_wmi_vdev_install_key(arvif->ar, &arg);
+ if (ret)
+ return ret;
+
+ if (!wait_for_completion_timeout(&ar->install_key_done, 1 * HZ))
+ return -ETIMEDOUT;
+
+ return ar->install_key_status ? -EINVAL : 0;
+}
+
+static int ath11k_clear_peer_keys(struct ath11k_vif *arvif,
+ const u8 *addr)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_peer *peer;
+ int first_errno = 0;
+ int ret;
+ int i;
+ u32 flags = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find(ab, arvif->vdev_id, addr);
+ spin_unlock_bh(&ab->base_lock);
+
+ if (!peer)
+ return -ENOENT;
+
+ for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
+ if (!peer->keys[i])
+ continue;
+
+ /* key flags are not required to delete the key */
+ ret = ath11k_install_key(arvif, peer->keys[i],
+ DISABLE_KEY, addr, flags);
+ if (ret < 0 && first_errno == 0)
+ first_errno = ret;
+
+ if (ret < 0)
+ ath11k_warn(ab, "failed to remove peer key %d: %d\n",
+ i, ret);
+
+ spin_lock_bh(&ab->base_lock);
+ peer->keys[i] = NULL;
+ spin_unlock_bh(&ab->base_lock);
+ }
+
+ return first_errno;
+}
+
+static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_peer *peer;
+ const u8 *peer_addr;
+ int ret = 0;
+ u32 flags = 0;
+
+ /* BIP needs to be done in software */
+ if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256)
+ return 1;
+
+ if (key->keyidx > WMI_MAX_KEY_INDEX)
+ return -ENOSPC;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (sta)
+ peer_addr = sta->addr;
+ else if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ peer_addr = vif->bss_conf.bssid;
+ else
+ peer_addr = vif->addr;
+
+ key->hw_key_idx = key->keyidx;
+
+ /* the peer should not disappear in mid-way (unless FW goes awry) since
+ * we already hold conf_mutex. we just make sure its there now.
+ */
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
+ spin_unlock_bh(&ab->base_lock);
+
+ if (!peer) {
+ if (cmd == SET_KEY) {
+ ath11k_warn(ab, "cannot install key for non-existent peer %pM\n",
+ peer_addr);
+ ret = -EOPNOTSUPP;
+ goto exit;
+ } else {
+ /* if the peer doesn't exist there is no key to disable
+ * anymore
+ */
+ goto exit;
+ }
+ }
+
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ flags |= WMI_KEY_PAIRWISE;
+ else
+ flags |= WMI_KEY_GROUP;
+
+ ret = ath11k_install_key(arvif, key, cmd, peer_addr, flags);
+ if (ret) {
+ ath11k_warn(ab, "ath11k_install_key failed (%d)\n", ret);
+ goto exit;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
+ if (peer && cmd == SET_KEY)
+ peer->keys[key->keyidx] = key;
+ else if (peer && cmd == DISABLE_KEY)
+ peer->keys[key->keyidx] = NULL;
+ else if (!peer)
+ /* impossible unless FW goes crazy */
+ ath11k_warn(ab, "peer %pM disappeared!\n", peer_addr);
+ spin_unlock_bh(&ab->base_lock);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int
+ath11k_mac_bitrate_mask_num_vht_rates(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int num_rates = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++)
+ num_rates += hweight16(mask->control[band].vht_mcs[i]);
+
+ return num_rates;
+}
+
+static int
+ath11k_mac_set_peer_vht_fixed_rate(struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta,
+ const struct cfg80211_bitrate_mask *mask,
+ enum nl80211_band band)
+{
+ struct ath11k *ar = arvif->ar;
+ u8 vht_rate, nss;
+ u32 rate_code;
+ int ret, i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ nss = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+ if (hweight16(mask->control[band].vht_mcs[i]) == 1) {
+ nss = i + 1;
+ vht_rate = ffs(mask->control[band].vht_mcs[i]) - 1;
+ }
+ }
+
+ if (!nss) {
+ ath11k_warn(ar->ab, "No single VHT Fixed rate found to set for %pM",
+ sta->addr);
+ return -EINVAL;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Setting Fixed VHT Rate for peer %pM. Device will not switch to any other selected rates",
+ sta->addr);
+
+ rate_code = ATH11K_HW_RATE_CODE(vht_rate, nss - 1,
+ WMI_RATE_PREAMBLE_VHT);
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_PARAM_FIXED_RATE,
+ rate_code);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to update STA %pM Fixed Rate %d: %d\n",
+ sta->addr, rate_code, ret);
+
+ return ret;
+}
+
+static int ath11k_station_assoc(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ bool reassoc)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct peer_assoc_params peer_arg;
+ int ret = 0;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ struct cfg80211_bitrate_mask *mask;
+ u8 num_vht_rates;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return -EPERM;
+
+ band = def.chan->band;
+ mask = &arvif->bitrate_mask;
+
+ ath11k_peer_assoc_prepare(ar, vif, sta, &peer_arg, reassoc);
+
+ ret = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
+ sta->addr, arvif->vdev_id, ret);
+ return ret;
+ }
+
+ if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) {
+ ath11k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
+ sta->addr, arvif->vdev_id);
+ return -ETIMEDOUT;
+ }
+
+ num_vht_rates = ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask);
+
+ /* If single VHT rate is configured (by set_bitrate_mask()),
+ * peer_assoc will disable VHT. This is now enabled by a peer specific
+ * fixed param.
+ * Note that all other rates and NSS will be disabled for this peer.
+ */
+ if (sta->vht_cap.vht_supported && num_vht_rates == 1) {
+ ret = ath11k_mac_set_peer_vht_fixed_rate(arvif, sta, mask,
+ band);
+ if (ret)
+ return ret;
+ }
+
+ /* Re-assoc is run only to update supported rates for given station. It
+ * doesn't make much sense to reconfigure the peer completely.
+ */
+ if (reassoc)
+ return 0;
+
+ ret = ath11k_setup_peer_smps(ar, arvif, sta->addr,
+ &sta->ht_cap);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ if (!sta->wme) {
+ arvif->num_legacy_stations++;
+ ret = ath11k_recalc_rtscts_prot(arvif);
+ if (ret)
+ return ret;
+ }
+
+ if (sta->wme && sta->uapsd_queues) {
+ ret = ath11k_peer_assoc_qos_ap(ar, arvif, sta);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set qos params for STA %pM for vdev %i: %d\n",
+ sta->addr, arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_station_disassoc(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!sta->wme) {
+ arvif->num_legacy_stations--;
+ ret = ath11k_recalc_rtscts_prot(arvif);
+ if (ret)
+ return ret;
+ }
+
+ ret = ath11k_clear_peer_keys(arvif, sta->addr);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to clear all peer keys for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ return 0;
+}
+
+static void ath11k_sta_rc_update_wk(struct work_struct *wk)
+{
+ struct ath11k *ar;
+ struct ath11k_vif *arvif;
+ struct ath11k_sta *arsta;
+ struct ieee80211_sta *sta;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ u32 changed, bw, nss, smps;
+ int err, num_vht_rates;
+ const struct cfg80211_bitrate_mask *mask;
+ struct peer_assoc_params peer_arg;
+
+ arsta = container_of(wk, struct ath11k_sta, update_wk);
+ sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
+ arvif = arsta->arvif;
+ ar = arvif->ar;
+
+ if (WARN_ON(ath11k_mac_vif_chan(arvif->vif, &def)))
+ return;
+
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+ spin_lock_bh(&ar->data_lock);
+
+ changed = arsta->changed;
+ arsta->changed = 0;
+
+ bw = arsta->bw;
+ nss = arsta->nss;
+ smps = arsta->smps;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ mutex_lock(&ar->conf_mutex);
+
+ nss = max_t(u32, 1, nss);
+ nss = min(nss, max(ath11k_mac_max_ht_nss(ht_mcs_mask),
+ ath11k_mac_max_vht_nss(vht_mcs_mask)));
+
+ if (changed & IEEE80211_RC_BW_CHANGED) {
+ err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_CHWIDTH, bw);
+ if (err)
+ ath11k_warn(ar->ab, "failed to update STA %pM peer bw %d: %d\n",
+ sta->addr, bw, err);
+ }
+
+ if (changed & IEEE80211_RC_NSS_CHANGED) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac update sta %pM nss %d\n",
+ sta->addr, nss);
+
+ err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_NSS, nss);
+ if (err)
+ ath11k_warn(ar->ab, "failed to update STA %pM nss %d: %d\n",
+ sta->addr, nss, err);
+ }
+
+ if (changed & IEEE80211_RC_SMPS_CHANGED) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac update sta %pM smps %d\n",
+ sta->addr, smps);
+
+ err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_MIMO_PS_STATE, smps);
+ if (err)
+ ath11k_warn(ar->ab, "failed to update STA %pM smps %d: %d\n",
+ sta->addr, smps, err);
+ }
+
+ if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
+ mask = &arvif->bitrate_mask;
+ num_vht_rates = ath11k_mac_bitrate_mask_num_vht_rates(ar, band,
+ mask);
+
+ /* Peer_assoc_prepare will reject vht rates in
+ * bitrate_mask if its not available in range format and
+ * sets vht tx_rateset as unsupported. So multiple VHT MCS
+ * setting(eg. MCS 4,5,6) per peer is not supported here.
+ * But, Single rate in VHT mask can be set as per-peer
+ * fixed rate. But even if any HT rates are configured in
+ * the bitrate mask, device will not switch to those rates
+ * when per-peer Fixed rate is set.
+ * TODO: Check RATEMASK_CMDID to support auto rates selection
+ * across HT/VHT and for multiple VHT MCS support.
+ */
+ if (sta->vht_cap.vht_supported && num_vht_rates == 1) {
+ ath11k_mac_set_peer_vht_fixed_rate(arvif, sta, mask,
+ band);
+ } else {
+ /* If the peer is non-VHT or no fixed VHT rate
+ * is provided in the new bitrate mask we set the
+ * other rates using peer_assoc command.
+ */
+ ath11k_peer_assoc_prepare(ar, arvif->vif, sta,
+ &peer_arg, true);
+
+ err = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+ if (err)
+ ath11k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
+ sta->addr, arvif->vdev_id, err);
+
+ if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ))
+ ath11k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
+ sta->addr, arvif->vdev_id);
+ }
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath11k_mac_inc_num_stations(struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
+ return 0;
+
+ if (ar->num_stations >= ar->max_num_stations)
+ return -ENOBUFS;
+
+ ar->num_stations++;
+
+ return 0;
+}
+
+static void ath11k_mac_dec_num_stations(struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
+ return;
+
+ ar->num_stations--;
+}
+
+static int ath11k_mac_station_add(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct peer_create_params peer_param;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath11k_mac_inc_num_stations(arvif, sta);
+ if (ret) {
+ ath11k_warn(ab, "refusing to associate station: too many connected already (%d)\n",
+ ar->max_num_stations);
+ goto exit;
+ }
+
+ arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL);
+ if (!arsta->rx_stats) {
+ ret = -ENOMEM;
+ goto dec_num_station;
+ }
+
+ peer_param.vdev_id = arvif->vdev_id;
+ peer_param.peer_addr = sta->addr;
+ peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
+
+ ret = ath11k_peer_create(ar, arvif, sta, &peer_param);
+ if (ret) {
+ ath11k_warn(ab, "Failed to add peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ goto free_rx_stats;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "Added peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+
+ if (ath11k_debug_is_extd_tx_stats_enabled(ar)) {
+ arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats), GFP_KERNEL);
+ if (!arsta->tx_stats) {
+ ret = -ENOMEM;
+ goto free_peer;
+ }
+ }
+
+ if (ieee80211_vif_is_mesh(vif)) {
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_USE_4ADDR, 1);
+ if (ret) {
+ ath11k_warn(ab, "failed to STA %pM 4addr capability: %d\n",
+ sta->addr, ret);
+ goto free_tx_stats;
+ }
+ }
+
+ ret = ath11k_dp_peer_setup(ar, arvif->vdev_id, sta->addr);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup dp for peer %pM on vdev %i (%d)\n",
+ sta->addr, arvif->vdev_id, ret);
+ goto free_tx_stats;
+ }
+
+ return 0;
+
+free_tx_stats:
+ kfree(arsta->tx_stats);
+ arsta->tx_stats = NULL;
+free_peer:
+ ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
+free_rx_stats:
+ kfree(arsta->rx_stats);
+ arsta->rx_stats = NULL;
+dec_num_station:
+ ath11k_mac_dec_num_stations(arvif, sta);
+exit:
+ return ret;
+}
+
+static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ int ret = 0;
+
+ /* cancel must be done outside the mutex to avoid deadlock */
+ if ((old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST))
+ cancel_work_sync(&arsta->update_wk);
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE) {
+ memset(arsta, 0, sizeof(*arsta));
+ arsta->arvif = arvif;
+ INIT_WORK(&arsta->update_wk, ath11k_sta_rc_update_wk);
+
+ ret = ath11k_mac_station_add(ar, vif, sta);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ } else if ((old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)) {
+ ath11k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
+
+ ret = ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to delete peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+
+ ath11k_mac_dec_num_stations(arvif, sta);
+
+ kfree(arsta->tx_stats);
+ arsta->tx_stats = NULL;
+
+ kfree(arsta->rx_stats);
+ arsta->rx_stats = NULL;
+ } else if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT ||
+ vif->type == NL80211_IFTYPE_ADHOC)) {
+ ret = ath11k_station_assoc(ar, vif, sta, false);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to associate station: %pM\n",
+ sta->addr);
+ else
+ ath11k_info(ar->ab,
+ "Station %pM moved to assoc state\n",
+ sta->addr);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT ||
+ vif->type == NL80211_IFTYPE_ADHOC)) {
+ ret = ath11k_station_disassoc(ar, vif, sta);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to disassociate station: %pM\n",
+ sta->addr);
+ else
+ ath11k_info(ar->ab,
+ "Station %pM moved to disassociated state\n",
+ sta->addr);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath11k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ int ret = 0;
+ s16 txpwr;
+
+ if (sta->txpwr.type == NL80211_TX_POWER_AUTOMATIC) {
+ txpwr = 0;
+ } else {
+ txpwr = sta->txpwr.power;
+ if (!txpwr)
+ return -EINVAL;
+ }
+
+ if (txpwr > ATH11K_TX_POWER_MAX_VAL || txpwr < ATH11K_TX_POWER_MIN_VAL)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_USE_FIXED_PWR, txpwr);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set tx power for station ret: %d\n",
+ ret);
+ goto out;
+ }
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 changed)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ struct ath11k_peer *peer;
+ u32 bw, smps;
+
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ if (!peer) {
+ spin_unlock_bh(&ar->ab->base_lock);
+ ath11k_warn(ar->ab, "mac sta rc update failed to find peer %pM on vdev %i\n",
+ sta->addr, arvif->vdev_id);
+ return;
+ }
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
+ sta->addr, changed, sta->bandwidth, sta->rx_nss,
+ sta->smps_mode);
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (changed & IEEE80211_RC_BW_CHANGED) {
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+
+ switch (sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_20:
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ bw = WMI_PEER_CHWIDTH_40MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_80:
+ bw = WMI_PEER_CHWIDTH_80MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_160:
+ bw = WMI_PEER_CHWIDTH_160MHZ;
+ break;
+ default:
+ ath11k_warn(ar->ab, "Invalid bandwidth %d in rc update for %pM\n",
+ sta->bandwidth, sta->addr);
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+ break;
+ }
+
+ arsta->bw = bw;
+ }
+
+ if (changed & IEEE80211_RC_NSS_CHANGED)
+ arsta->nss = sta->rx_nss;
+
+ if (changed & IEEE80211_RC_SMPS_CHANGED) {
+ smps = WMI_PEER_SMPS_PS_NONE;
+
+ switch (sta->smps_mode) {
+ case IEEE80211_SMPS_AUTOMATIC:
+ case IEEE80211_SMPS_OFF:
+ smps = WMI_PEER_SMPS_PS_NONE;
+ break;
+ case IEEE80211_SMPS_STATIC:
+ smps = WMI_PEER_SMPS_STATIC;
+ break;
+ case IEEE80211_SMPS_DYNAMIC:
+ smps = WMI_PEER_SMPS_DYNAMIC;
+ break;
+ default:
+ ath11k_warn(ar->ab, "Invalid smps %d in sta rc update for %pM\n",
+ sta->smps_mode, sta->addr);
+ smps = WMI_PEER_SMPS_PS_NONE;
+ break;
+ }
+
+ arsta->smps = smps;
+ }
+
+ arsta->changed |= changed;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ ieee80211_queue_work(hw, &arsta->update_wk);
+}
+
+static int ath11k_conf_tx_uapsd(struct ath11k *ar, struct ieee80211_vif *vif,
+ u16 ac, bool enable)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ u32 value = 0;
+ int ret = 0;
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ return 0;
+
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
+ break;
+ case IEEE80211_AC_VI:
+ value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
+ break;
+ case IEEE80211_AC_BE:
+ value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
+ break;
+ case IEEE80211_AC_BK:
+ value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
+ break;
+ }
+
+ if (enable)
+ arvif->u.sta.uapsd |= value;
+ else
+ arvif->u.sta.uapsd &= ~value;
+
+ ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ WMI_STA_PS_PARAM_UAPSD,
+ arvif->u.sta.uapsd);
+ if (ret) {
+ ath11k_warn(ar->ab, "could not set uapsd params %d\n", ret);
+ goto exit;
+ }
+
+ if (arvif->u.sta.uapsd)
+ value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD;
+ else
+ value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
+
+ ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ WMI_STA_PS_PARAM_RX_WAKE_POLICY,
+ value);
+ if (ret)
+ ath11k_warn(ar->ab, "could not set rx wake param %d\n", ret);
+
+exit:
+ return ret;
+}
+
+static int ath11k_mac_op_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ struct wmi_wmm_params_arg *p = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ p = &arvif->wmm_params.ac_vo;
+ break;
+ case IEEE80211_AC_VI:
+ p = &arvif->wmm_params.ac_vi;
+ break;
+ case IEEE80211_AC_BE:
+ p = &arvif->wmm_params.ac_be;
+ break;
+ case IEEE80211_AC_BK:
+ p = &arvif->wmm_params.ac_bk;
+ break;
+ }
+
+ if (WARN_ON(!p)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ p->cwmin = params->cw_min;
+ p->cwmax = params->cw_max;
+ p->aifs = params->aifs;
+ p->txop = params->txop;
+
+ ret = ath11k_wmi_send_wmm_update_cmd_tlv(ar, arvif->vdev_id,
+ &arvif->wmm_params);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set wmm params: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath11k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
+
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set sta uapsd: %d\n", ret);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static struct ieee80211_sta_ht_cap
+ath11k_create_ht_cap(struct ath11k *ar, u32 ar_ht_cap, u32 rate_cap_rx_chainmask)
+{
+ int i;
+ struct ieee80211_sta_ht_cap ht_cap = {0};
+ u32 ar_vht_cap = ar->pdev->cap.vht_cap;
+
+ if (!(ar_ht_cap & WMI_HT_CAP_ENABLED))
+ return ht_cap;
+
+ ht_cap.ht_supported = 1;
+ ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
+ ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
+ ht_cap.cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ if (ar_ht_cap & WMI_HT_CAP_HT20_SGI)
+ ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
+
+ if (ar_ht_cap & WMI_HT_CAP_HT40_SGI)
+ ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+
+ if (ar_ht_cap & WMI_HT_CAP_DYNAMIC_SMPS) {
+ u32 smps;
+
+ smps = WLAN_HT_CAP_SM_PS_DYNAMIC;
+ smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ ht_cap.cap |= smps;
+ }
+
+ if (ar_ht_cap & WMI_HT_CAP_TX_STBC)
+ ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
+
+ if (ar_ht_cap & WMI_HT_CAP_RX_STBC) {
+ u32 stbc;
+
+ stbc = ar_ht_cap;
+ stbc &= WMI_HT_CAP_RX_STBC;
+ stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT;
+ stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT;
+ stbc &= IEEE80211_HT_CAP_RX_STBC;
+
+ ht_cap.cap |= stbc;
+ }
+
+ if (ar_ht_cap & WMI_HT_CAP_RX_LDPC)
+ ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
+ if (ar_ht_cap & WMI_HT_CAP_L_SIG_TXOP_PROT)
+ ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT;
+
+ if (ar_vht_cap & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
+ ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+
+ for (i = 0; i < ar->num_rx_chains; i++) {
+ if (rate_cap_rx_chainmask & BIT(i))
+ ht_cap.mcs.rx_mask[i] = 0xFF;
+ }
+
+ ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
+
+ return ht_cap;
+}
+
+static int ath11k_mac_set_txbf_conf(struct ath11k_vif *arvif)
+{
+ u32 value = 0;
+ struct ath11k *ar = arvif->ar;
+ int nsts;
+ int sound_dim;
+ u32 vht_cap = ar->pdev->cap.vht_cap;
+ u32 vdev_param = WMI_VDEV_PARAM_TXBF;
+
+ if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) {
+ nsts = vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
+ nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+ value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
+ }
+
+ if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) {
+ sound_dim = vht_cap &
+ IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
+ sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
+ if (sound_dim > (ar->num_tx_chains - 1))
+ sound_dim = ar->num_tx_chains - 1;
+ value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET);
+ }
+
+ if (!value)
+ return 0;
+
+ if (vht_cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) {
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
+
+ if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) &&
+ arvif->vdev_type == WMI_VDEV_TYPE_AP)
+ value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER;
+ }
+
+ /* TODO: SUBFEE not validated in HK, disable here until validated? */
+
+ if (vht_cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) {
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
+
+ if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) &&
+ arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE;
+ }
+
+ return ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, value);
+}
+
+static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap)
+{
+ bool subfer, subfee;
+ int sound_dim = 0;
+
+ subfer = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE));
+ subfee = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE));
+
+ if (ar->num_tx_chains < 2) {
+ *vht_cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE);
+ subfer = false;
+ }
+
+ /* If SU Beaformer is not set, then disable MU Beamformer Capability */
+ if (!subfer)
+ *vht_cap &= ~(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE);
+
+ /* If SU Beaformee is not set, then disable MU Beamformee Capability */
+ if (!subfee)
+ *vht_cap &= ~(IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
+
+ sound_dim = (*vht_cap & IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK);
+ sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
+ *vht_cap &= ~IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
+
+ /* TODO: Need to check invalid STS and Sound_dim values set by FW? */
+
+ /* Enable Sounding Dimension Field only if SU BF is enabled */
+ if (subfer) {
+ if (sound_dim > (ar->num_tx_chains - 1))
+ sound_dim = ar->num_tx_chains - 1;
+
+ sound_dim <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
+ sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
+ *vht_cap |= sound_dim;
+ }
+
+ /* Use the STS advertised by FW unless SU Beamformee is not supported*/
+ if (!subfee)
+ *vht_cap &= ~(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK);
+}
+
+static struct ieee80211_sta_vht_cap
+ath11k_create_vht_cap(struct ath11k *ar, u32 rate_cap_tx_chainmask,
+ u32 rate_cap_rx_chainmask)
+{
+ struct ieee80211_sta_vht_cap vht_cap = {0};
+ u16 txmcs_map, rxmcs_map;
+ int i;
+
+ vht_cap.vht_supported = 1;
+ vht_cap.cap = ar->pdev->cap.vht_cap;
+
+ ath11k_set_vht_txbf_cap(ar, &vht_cap.cap);
+
+ /* TODO: Enable back VHT160 mode once association issues are fixed */
+ /* Disabling VHT160 and VHT80+80 modes */
+ vht_cap.cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
+ vht_cap.cap &= ~IEEE80211_VHT_CAP_SHORT_GI_160;
+
+ rxmcs_map = 0;
+ txmcs_map = 0;
+ for (i = 0; i < 8; i++) {
+ if (i < ar->num_tx_chains && rate_cap_tx_chainmask & BIT(i))
+ txmcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
+ else
+ txmcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
+
+ if (i < ar->num_rx_chains && rate_cap_rx_chainmask & BIT(i))
+ rxmcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
+ else
+ rxmcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
+ }
+
+ if (rate_cap_tx_chainmask <= 1)
+ vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC;
+
+ vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(rxmcs_map);
+ vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(txmcs_map);
+
+ return vht_cap;
+}
+
+static void ath11k_mac_setup_ht_vht_cap(struct ath11k *ar,
+ struct ath11k_pdev_cap *cap,
+ u32 *ht_cap_info)
+{
+ struct ieee80211_supported_band *band;
+ u32 rate_cap_tx_chainmask;
+ u32 rate_cap_rx_chainmask;
+ u32 ht_cap;
+
+ rate_cap_tx_chainmask = ar->cfg_tx_chainmask >> cap->tx_chain_mask_shift;
+ rate_cap_rx_chainmask = ar->cfg_rx_chainmask >> cap->rx_chain_mask_shift;
+
+ if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ band = &ar->mac.sbands[NL80211_BAND_2GHZ];
+ ht_cap = cap->band[NL80211_BAND_2GHZ].ht_cap_info;
+ if (ht_cap_info)
+ *ht_cap_info = ht_cap;
+ band->ht_cap = ath11k_create_ht_cap(ar, ht_cap,
+ rate_cap_rx_chainmask);
+ }
+
+ if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ band = &ar->mac.sbands[NL80211_BAND_5GHZ];
+ ht_cap = cap->band[NL80211_BAND_5GHZ].ht_cap_info;
+ if (ht_cap_info)
+ *ht_cap_info = ht_cap;
+ band->ht_cap = ath11k_create_ht_cap(ar, ht_cap,
+ rate_cap_rx_chainmask);
+ band->vht_cap = ath11k_create_vht_cap(ar, rate_cap_tx_chainmask,
+ rate_cap_rx_chainmask);
+ }
+}
+
+static int ath11k_check_chain_mask(struct ath11k *ar, u32 ant, bool is_tx_ant)
+{
+ /* TODO: Check the request chainmask against the supported
+ * chainmask table which is advertised in extented_service_ready event
+ */
+
+ return 0;
+}
+
+static void ath11k_gen_ppe_thresh(struct ath11k_ppe_threshold *fw_ppet,
+ u8 *he_ppet)
+{
+ int nss, ru;
+ u8 bit = 7;
+
+ he_ppet[0] = fw_ppet->numss_m1 & IEEE80211_PPE_THRES_NSS_MASK;
+ he_ppet[0] |= (fw_ppet->ru_bit_mask <<
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS) &
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK;
+ for (nss = 0; nss <= fw_ppet->numss_m1; nss++) {
+ for (ru = 0; ru < 4; ru++) {
+ u8 val;
+ int i;
+
+ if ((fw_ppet->ru_bit_mask & BIT(ru)) == 0)
+ continue;
+ val = (fw_ppet->ppet16_ppet8_ru3_ru0[nss] >> (ru * 6)) &
+ 0x3f;
+ val = ((val >> 3) & 0x7) | ((val & 0x7) << 3);
+ for (i = 5; i >= 0; i--) {
+ he_ppet[bit / 8] |=
+ ((val >> i) & 0x1) << ((bit % 8));
+ bit++;
+ }
+ }
+ }
+}
+
+static void
+ath11k_mac_filter_he_cap_mesh(struct ieee80211_he_cap_elem *he_cap_elem)
+{
+ u8 m;
+
+ m = IEEE80211_HE_MAC_CAP0_TWT_RES |
+ IEEE80211_HE_MAC_CAP0_TWT_REQ;
+ he_cap_elem->mac_cap_info[0] &= ~m;
+
+ m = IEEE80211_HE_MAC_CAP2_TRS |
+ IEEE80211_HE_MAC_CAP2_BCAST_TWT |
+ IEEE80211_HE_MAC_CAP2_MU_CASCADING;
+ he_cap_elem->mac_cap_info[2] &= ~m;
+
+ m = IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED |
+ IEEE80211_HE_MAC_CAP2_BCAST_TWT |
+ IEEE80211_HE_MAC_CAP2_MU_CASCADING;
+ he_cap_elem->mac_cap_info[3] &= ~m;
+
+ m = IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG |
+ IEEE80211_HE_MAC_CAP4_BQR;
+ he_cap_elem->mac_cap_info[4] &= ~m;
+
+ m = IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECVITE_TRANSMISSION |
+ IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU |
+ IEEE80211_HE_MAC_CAP5_PUNCTURED_SOUNDING |
+ IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX;
+ he_cap_elem->mac_cap_info[5] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+ IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
+ he_cap_elem->phy_cap_info[2] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP3_RX_HE_MU_PPDU_FROM_NON_AP_STA |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK;
+ he_cap_elem->phy_cap_info[3] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
+ he_cap_elem->phy_cap_info[4] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK;
+ he_cap_elem->phy_cap_info[5] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
+ IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB |
+ IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB |
+ IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO;
+ he_cap_elem->phy_cap_info[6] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP7_SRP_BASED_SR |
+ IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR |
+ IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ |
+ IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ;
+ he_cap_elem->phy_cap_info[7] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
+ IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU;
+ he_cap_elem->phy_cap_info[8] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
+ IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK |
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB;
+ he_cap_elem->phy_cap_info[9] &= ~m;
+}
+
+static int ath11k_mac_copy_he_cap(struct ath11k *ar,
+ struct ath11k_pdev_cap *cap,
+ struct ieee80211_sband_iftype_data *data,
+ int band)
+{
+ int i, idx = 0;
+
+ for (i = 0; i < NUM_NL80211_IFTYPES; i++) {
+ struct ieee80211_sta_he_cap *he_cap = &data[idx].he_cap;
+ struct ath11k_band_cap *band_cap = &cap->band[band];
+ struct ieee80211_he_cap_elem *he_cap_elem =
+ &he_cap->he_cap_elem;
+
+ switch (i) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_MESH_POINT:
+ break;
+
+ default:
+ continue;
+ }
+
+ data[idx].types_mask = BIT(i);
+ he_cap->has_he = true;
+ memcpy(he_cap_elem->mac_cap_info, band_cap->he_cap_info,
+ sizeof(he_cap_elem->mac_cap_info));
+ memcpy(he_cap_elem->phy_cap_info, band_cap->he_cap_phy_info,
+ sizeof(he_cap_elem->phy_cap_info));
+
+ he_cap_elem->mac_cap_info[1] |=
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK;
+ he_cap_elem->phy_cap_info[4] &=
+ ~IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK;
+ he_cap_elem->phy_cap_info[4] &=
+ ~IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK;
+ he_cap_elem->phy_cap_info[4] |= (ar->num_tx_chains - 1) << 2;
+
+ he_cap_elem->phy_cap_info[5] &=
+ ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK;
+ he_cap_elem->phy_cap_info[5] &=
+ ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK;
+ he_cap_elem->phy_cap_info[5] |= ar->num_tx_chains - 1;
+
+ switch (i) {
+ case NL80211_IFTYPE_AP:
+ he_cap_elem->phy_cap_info[9] |=
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU;
+ break;
+ case NL80211_IFTYPE_STATION:
+ he_cap_elem->mac_cap_info[0] &=
+ ~IEEE80211_HE_MAC_CAP0_TWT_RES;
+ he_cap_elem->mac_cap_info[0] |=
+ IEEE80211_HE_MAC_CAP0_TWT_REQ;
+ he_cap_elem->phy_cap_info[9] |=
+ IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ ath11k_mac_filter_he_cap_mesh(he_cap_elem);
+ break;
+ }
+
+ he_cap->he_mcs_nss_supp.rx_mcs_80 =
+ cpu_to_le16(band_cap->he_mcs & 0xffff);
+ he_cap->he_mcs_nss_supp.tx_mcs_80 =
+ cpu_to_le16(band_cap->he_mcs & 0xffff);
+ he_cap->he_mcs_nss_supp.rx_mcs_160 =
+ cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff);
+ he_cap->he_mcs_nss_supp.tx_mcs_160 =
+ cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff);
+ he_cap->he_mcs_nss_supp.rx_mcs_80p80 =
+ cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff);
+ he_cap->he_mcs_nss_supp.tx_mcs_80p80 =
+ cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff);
+
+ memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres));
+ if (he_cap_elem->phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT)
+ ath11k_gen_ppe_thresh(&band_cap->he_ppet,
+ he_cap->ppe_thres);
+ idx++;
+ }
+
+ return idx;
+}
+
+static void ath11k_mac_setup_he_cap(struct ath11k *ar,
+ struct ath11k_pdev_cap *cap)
+{
+ struct ieee80211_supported_band *band;
+ int count;
+
+ if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ count = ath11k_mac_copy_he_cap(ar, cap,
+ ar->mac.iftype[NL80211_BAND_2GHZ],
+ NL80211_BAND_2GHZ);
+ band = &ar->mac.sbands[NL80211_BAND_2GHZ];
+ band->iftype_data = ar->mac.iftype[NL80211_BAND_2GHZ];
+ band->n_iftype_data = count;
+ }
+
+ if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ count = ath11k_mac_copy_he_cap(ar, cap,
+ ar->mac.iftype[NL80211_BAND_5GHZ],
+ NL80211_BAND_5GHZ);
+ band = &ar->mac.sbands[NL80211_BAND_5GHZ];
+ band->iftype_data = ar->mac.iftype[NL80211_BAND_5GHZ];
+ band->n_iftype_data = count;
+ }
+}
+
+static int __ath11k_set_antenna(struct ath11k *ar, u32 tx_ant, u32 rx_ant)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ath11k_check_chain_mask(ar, tx_ant, true))
+ return -EINVAL;
+
+ if (ath11k_check_chain_mask(ar, rx_ant, false))
+ return -EINVAL;
+
+ ar->cfg_tx_chainmask = tx_ant;
+ ar->cfg_rx_chainmask = rx_ant;
+
+ if (ar->state != ATH11K_STATE_ON &&
+ ar->state != ATH11K_STATE_RESTARTED)
+ return 0;
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_TX_CHAIN_MASK,
+ tx_ant, ar->pdev->pdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set tx-chainmask: %d, req 0x%x\n",
+ ret, tx_ant);
+ return ret;
+ }
+
+ ar->num_tx_chains = get_num_chains(tx_ant);
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_RX_CHAIN_MASK,
+ rx_ant, ar->pdev->pdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set rx-chainmask: %d, req 0x%x\n",
+ ret, rx_ant);
+ return ret;
+ }
+
+ ar->num_rx_chains = get_num_chains(rx_ant);
+
+ /* Reload HT/VHT/HE capability */
+ ath11k_mac_setup_ht_vht_cap(ar, &ar->pdev->cap, NULL);
+ ath11k_mac_setup_he_cap(ar, &ar->pdev->cap);
+
+ return 0;
+}
+
+int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx)
+{
+ struct ath11k *ar = ctx;
+ struct ath11k_base *ab = ar->ab;
+ struct sk_buff *msdu = skb;
+ struct ieee80211_tx_info *info;
+
+ spin_lock_bh(&ar->txmgmt_idr_lock);
+ idr_remove(&ar->txmgmt_idr, buf_id);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+ dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len,
+ DMA_TO_DEVICE);
+
+ info = IEEE80211_SKB_CB(msdu);
+ memset(&info->status, 0, sizeof(info->status));
+
+ ieee80211_free_txskb(ar->hw, msdu);
+
+ return 0;
+}
+
+static int ath11k_mac_vif_txmgmt_idr_remove(int buf_id, void *skb, void *ctx)
+{
+ struct ieee80211_vif *vif = ctx;
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB((struct sk_buff *)skb);
+ struct sk_buff *msdu = skb;
+ struct ath11k *ar = skb_cb->ar;
+ struct ath11k_base *ab = ar->ab;
+
+ if (skb_cb->vif == vif) {
+ spin_lock_bh(&ar->txmgmt_idr_lock);
+ idr_remove(&ar->txmgmt_idr, buf_id);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+ dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len,
+ DMA_TO_DEVICE);
+ }
+
+ return 0;
+}
+
+static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif,
+ struct sk_buff *skb)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ dma_addr_t paddr;
+ int buf_id;
+ int ret;
+
+ spin_lock_bh(&ar->txmgmt_idr_lock);
+ buf_id = idr_alloc(&ar->txmgmt_idr, skb, 0,
+ ATH11K_TX_MGMT_NUM_PENDING_MAX, GFP_ATOMIC);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+ if (buf_id < 0)
+ return -ENOSPC;
+
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(skb, IEEE80211_CCMP_MIC_LEN);
+ }
+
+ paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ab->dev, paddr)) {
+ ath11k_warn(ab, "failed to DMA map mgmt Tx buffer\n");
+ ret = -EIO;
+ goto err_free_idr;
+ }
+
+ ATH11K_SKB_CB(skb)->paddr = paddr;
+
+ ret = ath11k_wmi_mgmt_send(ar, arvif->vdev_id, buf_id, skb);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send mgmt frame: %d\n", ret);
+ goto err_unmap_buf;
+ }
+
+ return 0;
+
+err_unmap_buf:
+ dma_unmap_single(ab->dev, ATH11K_SKB_CB(skb)->paddr,
+ skb->len, DMA_TO_DEVICE);
+err_free_idr:
+ spin_lock_bh(&ar->txmgmt_idr_lock);
+ idr_remove(&ar->txmgmt_idr, buf_id);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+
+ return ret;
+}
+
+static void ath11k_mgmt_over_wmi_tx_purge(struct ath11k *ar)
+{
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL)
+ ieee80211_free_txskb(ar->hw, skb);
+}
+
+static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work)
+{
+ struct ath11k *ar = container_of(work, struct ath11k, wmi_mgmt_tx_work);
+ struct ieee80211_tx_info *info;
+ struct ath11k_vif *arvif;
+ struct sk_buff *skb;
+ int ret;
+
+ while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) {
+ info = IEEE80211_SKB_CB(skb);
+ arvif = ath11k_vif_to_arvif(info->control.vif);
+
+ ret = ath11k_mac_mgmt_tx_wmi(ar, arvif, skb);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to transmit management frame %d\n",
+ ret);
+ ieee80211_free_txskb(ar->hw, skb);
+ } else {
+ atomic_inc(&ar->num_pending_mgmt_tx);
+ }
+ }
+}
+
+static int ath11k_mac_mgmt_tx(struct ath11k *ar, struct sk_buff *skb,
+ bool is_prb_rsp)
+{
+ struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
+
+ if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
+ return -ESHUTDOWN;
+
+ /* Drop probe response packets when the pending management tx
+ * count has reached a certain threshold, so as to prioritize
+ * other mgmt packets like auth and assoc to be sent on time
+ * for establishing successful connections.
+ */
+ if (is_prb_rsp &&
+ atomic_read(&ar->num_pending_mgmt_tx) > ATH11K_PRB_RSP_DROP_THRESHOLD) {
+ ath11k_warn(ar->ab,
+ "dropping probe response as pending queue is almost full\n");
+ return -ENOSPC;
+ }
+
+ if (skb_queue_len(q) == ATH11K_TX_MGMT_NUM_PENDING_MAX) {
+ ath11k_warn(ar->ab, "mgmt tx queue is full\n");
+ return -ENOSPC;
+ }
+
+ skb_queue_tail(q, skb);
+ ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
+
+ return 0;
+}
+
+static void ath11k_mac_op_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ath11k *ar = hw->priv;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ bool is_prb_rsp;
+ int ret;
+
+ if (ieee80211_is_mgmt(hdr->frame_control)) {
+ is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
+ ret = ath11k_mac_mgmt_tx(ar, skb, is_prb_rsp);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to queue management frame %d\n",
+ ret);
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+ return;
+ }
+
+ ret = ath11k_dp_tx(ar, arvif, skb);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to transmit frame %d\n", ret);
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+}
+
+void ath11k_mac_drain_tx(struct ath11k *ar)
+{
+ /* make sure rcu-protected mac80211 tx path itself is drained */
+ synchronize_net();
+
+ cancel_work_sync(&ar->wmi_mgmt_tx_work);
+ ath11k_mgmt_over_wmi_tx_purge(ar);
+}
+
+static int ath11k_mac_config_mon_status_default(struct ath11k *ar, bool enable)
+{
+ struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ u32 ring_id;
+
+ if (enable)
+ tlv_filter = ath11k_mac_mon_status_filter_default;
+
+ ring_id = ar->dp.rx_mon_status_refill_ring.refill_buf_ring.ring_id;
+
+ return ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
+ HAL_RXDMA_MONITOR_STATUS,
+ DP_RX_BUFFER_SIZE, &tlv_filter);
+}
+
+static int ath11k_mac_op_start(struct ieee80211_hw *hw)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_pdev *pdev = ar->pdev;
+ int ret;
+
+ ath11k_mac_drain_tx(ar);
+ mutex_lock(&ar->conf_mutex);
+
+ switch (ar->state) {
+ case ATH11K_STATE_OFF:
+ ar->state = ATH11K_STATE_ON;
+ break;
+ case ATH11K_STATE_RESTARTING:
+ ar->state = ATH11K_STATE_RESTARTED;
+ break;
+ case ATH11K_STATE_RESTARTED:
+ case ATH11K_STATE_WEDGED:
+ case ATH11K_STATE_ON:
+ WARN_ON(1);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS,
+ 1, pdev->pdev_id);
+
+ if (ret) {
+ ath11k_err(ar->ab, "failed to enable PMF QOS: (%d\n", ret);
+ goto err;
+ }
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 1,
+ pdev->pdev_id);
+ if (ret) {
+ ath11k_err(ar->ab, "failed to enable dynamic bw: %d\n", ret);
+ goto err;
+ }
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
+ 0, pdev->pdev_id);
+ if (ret) {
+ ath11k_err(ab, "failed to set ac override for ARP: %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(ar, pdev->pdev_id);
+ if (ret) {
+ ath11k_err(ab, "failed to offload radar detection: %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar,
+ HTT_PPDU_STATS_TAG_DEFAULT);
+ if (ret) {
+ ath11k_err(ab, "failed to req ppdu stats: %d\n", ret);
+ goto err;
+ }
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_MESH_MCAST_ENABLE,
+ 1, pdev->pdev_id);
+
+ if (ret) {
+ ath11k_err(ar->ab, "failed to enable MESH MCAST ENABLE: (%d\n", ret);
+ goto err;
+ }
+
+ __ath11k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask);
+
+ /* TODO: Do we need to enable ANI? */
+
+ ath11k_reg_update_chan_list(ar);
+
+ ar->num_started_vdevs = 0;
+ ar->num_created_vdevs = 0;
+ ar->num_peers = 0;
+
+ /* Configure monitor status ring with default rx_filter to get rx status
+ * such as rssi, rx_duration.
+ */
+ ret = ath11k_mac_config_mon_status_default(ar, true);
+ if (ret) {
+ ath11k_err(ab, "failed to configure monitor status ring with default rx_filter: (%d)\n",
+ ret);
+ goto err;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx],
+ &ab->pdevs[ar->pdev_idx]);
+
+ return 0;
+
+err:
+ ar->state = ATH11K_STATE_OFF;
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static void ath11k_mac_op_stop(struct ieee80211_hw *hw)
+{
+ struct ath11k *ar = hw->priv;
+ struct htt_ppdu_stats_info *ppdu_stats, *tmp;
+ int ret;
+
+ ath11k_mac_drain_tx(ar);
+
+ mutex_lock(&ar->conf_mutex);
+ ret = ath11k_mac_config_mon_status_default(ar, false);
+ if (ret)
+ ath11k_err(ar->ab, "failed to clear rx_filter for monitor status ring: (%d)\n",
+ ret);
+
+ clear_bit(ATH11K_CAC_RUNNING, &ar->dev_flags);
+ ar->state = ATH11K_STATE_OFF;
+ mutex_unlock(&ar->conf_mutex);
+
+ cancel_delayed_work_sync(&ar->scan.timeout);
+ cancel_work_sync(&ar->regd_update_work);
+
+ spin_lock_bh(&ar->data_lock);
+ list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) {
+ list_del(&ppdu_stats->list);
+ kfree(ppdu_stats);
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ rcu_assign_pointer(ar->ab->pdevs_active[ar->pdev_idx], NULL);
+
+ synchronize_rcu();
+
+ atomic_set(&ar->num_pending_mgmt_tx, 0);
+}
+
+static void
+ath11k_mac_setup_vdev_create_params(struct ath11k_vif *arvif,
+ struct vdev_create_params *params)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ath11k_pdev *pdev = ar->pdev;
+
+ params->if_id = arvif->vdev_id;
+ params->type = arvif->vdev_type;
+ params->subtype = arvif->vdev_subtype;
+ params->pdev_id = pdev->pdev_id;
+
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ params->chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains;
+ params->chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains;
+ }
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ params->chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains;
+ params->chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains;
+ }
+}
+
+static u32
+ath11k_mac_prepare_he_mode(struct ath11k_pdev *pdev, u32 viftype)
+{
+ struct ath11k_pdev_cap *pdev_cap = &pdev->cap;
+ struct ath11k_band_cap *cap_band = NULL;
+ u32 *hecap_phy_ptr = NULL;
+ u32 hemode = 0;
+
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP)
+ cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
+ else
+ cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
+
+ hecap_phy_ptr = &cap_band->he_cap_phy_info[0];
+
+ hemode = FIELD_PREP(HE_MODE_SU_TX_BFEE, HE_SU_BFEE_ENABLE) |
+ FIELD_PREP(HE_MODE_SU_TX_BFER, HECAP_PHY_SUBFMR_GET(hecap_phy_ptr)) |
+ FIELD_PREP(HE_MODE_UL_MUMIMO, HECAP_PHY_ULMUMIMO_GET(hecap_phy_ptr));
+
+ /* TODO WDS and other modes */
+ if (viftype == NL80211_IFTYPE_AP) {
+ hemode |= FIELD_PREP(HE_MODE_MU_TX_BFER,
+ HECAP_PHY_MUBFMR_GET(hecap_phy_ptr)) |
+ FIELD_PREP(HE_MODE_DL_OFDMA, HE_DL_MUOFDMA_ENABLE) |
+ FIELD_PREP(HE_MODE_UL_OFDMA, HE_UL_MUOFDMA_ENABLE);
+ } else {
+ hemode |= FIELD_PREP(HE_MODE_MU_TX_BFEE, HE_MU_BFEE_ENABLE);
+ }
+
+ return hemode;
+}
+
+static int ath11k_set_he_mu_sounding_mode(struct ath11k *ar,
+ struct ath11k_vif *arvif)
+{
+ u32 param_id, param_value;
+ struct ath11k_base *ab = ar->ab;
+ int ret = 0;
+
+ param_id = WMI_VDEV_PARAM_SET_HEMU_MODE;
+ param_value = ath11k_mac_prepare_he_mode(ar->pdev, arvif->vif->type);
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ab, "failed to set vdev %d HE MU mode: %d param_value %x\n",
+ arvif->vdev_id, ret, param_value);
+ return ret;
+ }
+ param_id = WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE;
+ param_value =
+ FIELD_PREP(HE_VHT_SOUNDING_MODE, HE_VHT_SOUNDING_MODE_ENABLE) |
+ FIELD_PREP(HE_TRIG_NONTRIG_SOUNDING_MODE,
+ HE_TRIG_NONTRIG_SOUNDING_MODE_ENABLE);
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ab, "failed to set vdev %d HE MU mode: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ return ret;
+}
+
+static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct vdev_create_params vdev_param = {0};
+ struct peer_create_params peer_param;
+ u32 param_id, param_value;
+ u16 nss;
+ int i;
+ int ret;
+ int bit;
+
+ vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (vif->type == NL80211_IFTYPE_AP &&
+ ar->num_peers > (ar->max_num_peers - 1)) {
+ ath11k_warn(ab, "failed to create vdev due to insufficient peer entry resource in firmware\n");
+ ret = -ENOBUFS;
+ goto err;
+ }
+
+ if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) {
+ ath11k_warn(ab, "failed to create vdev, reached max vdev limit %d\n",
+ TARGET_NUM_VDEVS);
+ ret = -EBUSY;
+ goto err;
+ }
+
+ memset(arvif, 0, sizeof(*arvif));
+
+ arvif->ar = ar;
+ arvif->vif = vif;
+
+ INIT_LIST_HEAD(&arvif->list);
+
+ /* Should we initialize any worker to handle connection loss indication
+ * from firmware in sta mode?
+ */
+
+ for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
+ arvif->bitrate_mask.control[i].legacy = 0xffffffff;
+ memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].ht_mcs));
+ memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].vht_mcs));
+ }
+
+ bit = __ffs64(ab->free_vdev_map);
+
+ arvif->vdev_id = bit;
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case NL80211_IFTYPE_STATION:
+ arvif->vdev_type = WMI_VDEV_TYPE_STA;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S;
+ /* fall through */
+ case NL80211_IFTYPE_AP:
+ arvif->vdev_type = WMI_VDEV_TYPE_AP;
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac add interface id %d type %d subtype %d map %llx\n",
+ arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
+ ab->free_vdev_map);
+
+ vif->cab_queue = arvif->vdev_id % (ATH11K_HW_MAX_QUEUES - 1);
+ for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
+ vif->hw_queue[i] = i % (ATH11K_HW_MAX_QUEUES - 1);
+
+ ath11k_mac_setup_vdev_create_params(arvif, &vdev_param);
+
+ ret = ath11k_wmi_vdev_create(ar, vif->addr, &vdev_param);
+ if (ret) {
+ ath11k_warn(ab, "failed to create WMI vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
+
+ ar->num_created_vdevs++;
+
+ ab->free_vdev_map &= ~(1LL << arvif->vdev_id);
+ spin_lock_bh(&ar->data_lock);
+ list_add(&arvif->list, &ar->arvifs);
+ spin_unlock_bh(&ar->data_lock);
+
+ param_id = WMI_VDEV_PARAM_TX_ENCAP_TYPE;
+ param_value = ATH11K_HW_TXRX_NATIVE_WIFI;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ab, "failed to set vdev %d tx encap mode: %d\n",
+ arvif->vdev_id, ret);
+ goto err_vdev_del;
+ }
+
+ nss = get_num_chains(ar->cfg_tx_chainmask) ? : 1;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_NSS, nss);
+ if (ret) {
+ ath11k_warn(ab, "failed to set vdev %d chainmask 0x%x, nss %d :%d\n",
+ arvif->vdev_id, ar->cfg_tx_chainmask, nss, ret);
+ goto err_vdev_del;
+ }
+
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_AP:
+ peer_param.vdev_id = arvif->vdev_id;
+ peer_param.peer_addr = vif->addr;
+ peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
+ ret = ath11k_peer_create(ar, arvif, NULL, &peer_param);
+ if (ret) {
+ ath11k_warn(ab, "failed to vdev %d create peer for AP: %d\n",
+ arvif->vdev_id, ret);
+ goto err_vdev_del;
+ }
+
+ ret = ath11k_mac_set_kickout(arvif);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set vdev %i kickout parameters: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_del;
+ }
+ break;
+ case WMI_VDEV_TYPE_STA:
+ param_id = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
+ param_value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
+ ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set vdev %d RX wake policy: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_del;
+ }
+
+ param_id = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
+ param_value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
+ ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set vdev %d TX wake threshold: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_del;
+ }
+
+ param_id = WMI_STA_PS_PARAM_PSPOLL_COUNT;
+ param_value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
+ ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set vdev %d pspoll count: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_del;
+ }
+
+ ret = ath11k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, false);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to disable vdev %d ps mode: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_del;
+ }
+ break;
+ default:
+ break;
+ }
+
+ arvif->txpower = vif->bss_conf.txpower;
+ ret = ath11k_mac_txpower_recalc(ar);
+ if (ret)
+ goto err_peer_del;
+
+ param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
+ param_value = ar->hw->wiphy->rts_threshold;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set rts threshold for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ ath11k_dp_vdev_tx_attach(ar, arvif);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+
+err_peer_del:
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ ar->num_peers--;
+ ath11k_wmi_send_peer_delete_cmd(ar, vif->addr, arvif->vdev_id);
+ }
+
+err_vdev_del:
+ ath11k_wmi_vdev_delete(ar, arvif->vdev_id);
+ ar->num_created_vdevs--;
+ ab->free_vdev_map |= 1LL << arvif->vdev_id;
+ spin_lock_bh(&ar->data_lock);
+ list_del(&arvif->list);
+ spin_unlock_bh(&ar->data_lock);
+
+err:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static int ath11k_mac_vif_unref(int buf_id, void *skb, void *ctx)
+{
+ struct ieee80211_vif *vif = (struct ieee80211_vif *)ctx;
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB((struct sk_buff *)skb);
+
+ if (skb_cb->vif == vif)
+ skb_cb->vif = NULL;
+
+ return 0;
+}
+
+static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_base *ab = ar->ab;
+ int ret;
+ int i;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "mac remove interface (vdev %d)\n",
+ arvif->vdev_id);
+
+ ab->free_vdev_map |= 1LL << (arvif->vdev_id);
+ spin_lock_bh(&ar->data_lock);
+ list_del(&arvif->list);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ ret = ath11k_peer_delete(ar, arvif->vdev_id, vif->addr);
+ if (ret)
+ ath11k_warn(ab, "failed to submit AP self-peer removal on vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ ret = ath11k_wmi_vdev_delete(ar, arvif->vdev_id);
+ if (ret)
+ ath11k_warn(ab, "failed to delete WMI vdev %d: %d\n",
+ arvif->vdev_id, ret);
+
+ ar->num_created_vdevs--;
+
+ ath11k_peer_cleanup(ar, arvif->vdev_id);
+
+ idr_for_each(&ar->txmgmt_idr,
+ ath11k_mac_vif_txmgmt_idr_remove, vif);
+
+ for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
+ spin_lock_bh(&ab->dp.tx_ring[i].tx_idr_lock);
+ idr_for_each(&ab->dp.tx_ring[i].txbuf_idr,
+ ath11k_mac_vif_unref, vif);
+ spin_unlock_bh(&ab->dp.tx_ring[i].tx_idr_lock);
+ }
+
+ /* Recalc txpower for remaining vdev */
+ ath11k_mac_txpower_recalc(ar);
+ clear_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
+
+ /* TODO: recal traffic pause state based on the available vdevs */
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+/* FIXME: Has to be verified. */
+#define SUPPORTED_FILTERS \
+ (FIF_ALLMULTI | \
+ FIF_CONTROL | \
+ FIF_PSPOLL | \
+ FIF_OTHER_BSS | \
+ FIF_BCN_PRBRESP_PROMISC | \
+ FIF_PROBE_REQ | \
+ FIF_FCSFAIL)
+
+static void ath11k_mac_op_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct ath11k *ar = hw->priv;
+ bool reset_flag = false;
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ changed_flags &= SUPPORTED_FILTERS;
+ *total_flags &= SUPPORTED_FILTERS;
+ ar->filter_flags = *total_flags;
+
+ /* For monitor mode */
+ reset_flag = !(ar->filter_flags & FIF_BCN_PRBRESP_PROMISC);
+
+ ret = ath11k_dp_tx_htt_monitor_mode_ring_config(ar, reset_flag);
+ if (!ret) {
+ if (!reset_flag)
+ set_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
+ else
+ clear_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
+ } else {
+ ath11k_warn(ar->ab,
+ "fail to set monitor filter: %d\n", ret);
+ }
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath11k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
+{
+ struct ath11k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+
+ *tx_ant = ar->cfg_tx_chainmask;
+ *rx_ant = ar->cfg_rx_chainmask;
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static int ath11k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+{
+ struct ath11k *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+ ret = __ath11k_set_antenna(ar, tx_ant, rx_ant);
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static int ath11k_mac_op_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ struct ath11k *ar = hw->priv;
+ int ret = -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ switch (params->action) {
+ case IEEE80211_AMPDU_RX_START:
+ ret = ath11k_dp_rx_ampdu_start(ar, params);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ ret = ath11k_dp_rx_ampdu_stop(ar, params);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ /* Tx A-MPDU aggregation offloaded to hw/fw so deny mac80211
+ * Tx aggregation requests.
+ */
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static int ath11k_mac_op_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "mac chanctx add freq %hu width %d ptr %pK\n",
+ ctx->def.chan->center_freq, ctx->def.width, ctx);
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ /* TODO: In case of multiple channel context, populate rx_channel from
+ * Rx PPDU desc information.
+ */
+ ar->rx_channel = ctx->def.chan;
+ spin_unlock_bh(&ar->data_lock);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static void ath11k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "mac chanctx remove freq %hu width %d ptr %pK\n",
+ ctx->def.chan->center_freq, ctx->def.width, ctx);
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ /* TODO: In case of there is one more channel context left, populate
+ * rx_channel with the channel of that remaining channel context.
+ */
+ ar->rx_channel = NULL;
+ spin_unlock_bh(&ar->data_lock);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static inline int ath11k_mac_vdev_setup_sync(struct ath11k *ar)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
+ return -ESHUTDOWN;
+
+ if (!wait_for_completion_timeout(&ar->vdev_setup_done,
+ ATH11K_VDEV_SETUP_TIMEOUT_HZ))
+ return -ETIMEDOUT;
+
+ return ar->last_wmi_vdev_start_status ? -EINVAL : 0;
+}
+
+static int
+ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif,
+ const struct cfg80211_chan_def *chandef,
+ bool restart)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ath11k_base *ab = ar->ab;
+ struct wmi_vdev_start_req_arg arg = {};
+ int he_support = arvif->vif->bss_conf.he_support;
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.dtim_period = arvif->dtim_period;
+ arg.bcn_intval = arvif->beacon_interval;
+
+ arg.channel.freq = chandef->chan->center_freq;
+ arg.channel.band_center_freq1 = chandef->center_freq1;
+ arg.channel.band_center_freq2 = chandef->center_freq2;
+ arg.channel.mode =
+ ath11k_phymodes[chandef->chan->band][chandef->width];
+
+ arg.channel.min_power = 0;
+ arg.channel.max_power = chandef->chan->max_power * 2;
+ arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
+ arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
+
+ arg.pref_tx_streams = ar->num_tx_chains;
+ arg.pref_rx_streams = ar->num_rx_chains;
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ arg.ssid = arvif->u.ap.ssid;
+ arg.ssid_len = arvif->u.ap.ssid_len;
+ arg.hidden_ssid = arvif->u.ap.hidden_ssid;
+
+ /* For now allow DFS for AP mode */
+ arg.channel.chan_radar =
+ !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
+
+ arg.channel.passive = arg.channel.chan_radar;
+
+ spin_lock_bh(&ab->base_lock);
+ arg.regdomain = ar->ab->dfs_region;
+ spin_unlock_bh(&ab->base_lock);
+
+ /* TODO: Notify if secondary 80Mhz also needs radar detection */
+ if (he_support) {
+ ret = ath11k_set_he_mu_sounding_mode(ar, arvif);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set he mode vdev %i\n",
+ arg.vdev_id);
+ return ret;
+ }
+ }
+ }
+
+ arg.channel.passive |= !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR);
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "mac vdev %d start center_freq %d phymode %s\n",
+ arg.vdev_id, arg.channel.freq,
+ ath11k_wmi_phymode_str(arg.channel.mode));
+
+ ret = ath11k_wmi_vdev_start(ar, &arg, restart);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to %s WMI vdev %i\n",
+ restart ? "restart" : "start", arg.vdev_id);
+ return ret;
+ }
+
+ ret = ath11k_mac_vdev_setup_sync(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to synchronize setup for vdev %i %s: %d\n",
+ arg.vdev_id, restart ? "restart" : "start", ret);
+ return ret;
+ }
+
+ ar->num_started_vdevs++;
+
+ /* Enable CAC Flag in the driver by checking the channel DFS cac time,
+ * i.e dfs_cac_ms value which will be valid only for radar channels
+ * and state as NL80211_DFS_USABLE which indicates CAC needs to be
+ * done before channel usage. This flags is used to drop rx packets.
+ * during CAC.
+ */
+ /* TODO Set the flag for other interface types as required */
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP &&
+ chandef->chan->dfs_cac_ms &&
+ chandef->chan->dfs_state == NL80211_DFS_USABLE) {
+ set_bit(ATH11K_CAC_RUNNING, &ar->dev_flags);
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "CAC Started in chan_freq %d for vdev %d\n",
+ arg.channel.freq, arg.vdev_id);
+ }
+
+ ret = ath11k_mac_set_txbf_conf(arvif);
+ if (ret)
+ ath11k_warn(ab, "failed to set txbf conf for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+
+ return 0;
+}
+
+static int ath11k_mac_vdev_stop(struct ath11k_vif *arvif)
+{
+ struct ath11k *ar = arvif->ar;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ spin_lock_bh(&ar->data_lock);
+
+ ar->vdev_stop_status.stop_in_progress = true;
+ ar->vdev_stop_status.vdev_id = arvif->vdev_id;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = ath11k_wmi_vdev_stop(ar, arvif->vdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to stop WMI vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
+
+ ret = ath11k_mac_vdev_setup_sync(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to synchronize setup for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
+
+ WARN_ON(ar->num_started_vdevs == 0);
+
+ ar->num_started_vdevs--;
+
+ if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
+ clear_bit(ATH11K_CAC_RUNNING, &ar->dev_flags);
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "CAC Stopped for vdev %d\n",
+ arvif->vdev_id);
+ }
+
+ return 0;
+err:
+ spin_lock_bh(&ar->data_lock);
+ ar->vdev_stop_status.stop_in_progress = false;
+ spin_unlock_bh(&ar->data_lock);
+
+ return ret;
+}
+
+static int ath11k_mac_vdev_start(struct ath11k_vif *arvif,
+ const struct cfg80211_chan_def *chandef)
+{
+ return ath11k_mac_vdev_start_restart(arvif, chandef, false);
+}
+
+static int ath11k_mac_vdev_restart(struct ath11k_vif *arvif,
+ const struct cfg80211_chan_def *chandef)
+{
+ return ath11k_mac_vdev_start_restart(arvif, chandef, true);
+}
+
+struct ath11k_mac_change_chanctx_arg {
+ struct ieee80211_chanctx_conf *ctx;
+ struct ieee80211_vif_chanctx_switch *vifs;
+ int n_vifs;
+ int next_vif;
+};
+
+static void
+ath11k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k_mac_change_chanctx_arg *arg = data;
+
+ if (rcu_access_pointer(vif->chanctx_conf) != arg->ctx)
+ return;
+
+ arg->n_vifs++;
+}
+
+static void
+ath11k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k_mac_change_chanctx_arg *arg = data;
+ struct ieee80211_chanctx_conf *ctx;
+
+ ctx = rcu_access_pointer(vif->chanctx_conf);
+ if (ctx != arg->ctx)
+ return;
+
+ if (WARN_ON(arg->next_vif == arg->n_vifs))
+ return;
+
+ arg->vifs[arg->next_vif].vif = vif;
+ arg->vifs[arg->next_vif].old_ctx = ctx;
+ arg->vifs[arg->next_vif].new_ctx = ctx;
+ arg->next_vif++;
+}
+
+static void
+ath11k_mac_update_vif_chan(struct ath11k *ar,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif;
+ int ret;
+ int i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ for (i = 0; i < n_vifs; i++) {
+ arvif = (void *)vifs[i].vif->drv_priv;
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n",
+ arvif->vdev_id,
+ vifs[i].old_ctx->def.chan->center_freq,
+ vifs[i].new_ctx->def.chan->center_freq,
+ vifs[i].old_ctx->def.width,
+ vifs[i].new_ctx->def.width);
+
+ if (WARN_ON(!arvif->is_started))
+ continue;
+
+ if (WARN_ON(!arvif->is_up))
+ continue;
+
+ ret = ath11k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret) {
+ ath11k_warn(ab, "failed to down vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+ }
+
+ /* All relevant vdevs are downed and associated channel resources
+ * should be available for the channel switch now.
+ */
+
+ /* TODO: Update ar->rx_channel */
+
+ for (i = 0; i < n_vifs; i++) {
+ arvif = (void *)vifs[i].vif->drv_priv;
+
+ if (WARN_ON(!arvif->is_started))
+ continue;
+
+ if (WARN_ON(!arvif->is_up))
+ continue;
+
+ ret = ath11k_mac_setup_bcn_tmpl(arvif);
+ if (ret)
+ ath11k_warn(ab, "failed to update bcn tmpl during csa: %d\n",
+ ret);
+
+ ret = ath11k_mac_vdev_restart(arvif, &vifs[i].new_ctx->def);
+ if (ret) {
+ ath11k_warn(ab, "failed to restart vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+
+ ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+ arvif->bssid);
+ if (ret) {
+ ath11k_warn(ab, "failed to bring vdev up %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+ }
+}
+
+static void
+ath11k_mac_update_active_vif_chan(struct ath11k *ar,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath11k_mac_change_chanctx_arg arg = { .ctx = ctx };
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath11k_mac_change_chanctx_cnt_iter,
+ &arg);
+ if (arg.n_vifs == 0)
+ return;
+
+ arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]), GFP_KERNEL);
+ if (!arg.vifs)
+ return;
+
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath11k_mac_change_chanctx_fill_iter,
+ &arg);
+
+ ath11k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs);
+
+ kfree(arg.vifs);
+}
+
+static void ath11k_mac_op_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "mac chanctx change freq %hu width %d ptr %pK changed %x\n",
+ ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
+
+ /* This shouldn't really happen because channel switching should use
+ * switch_vif_chanctx().
+ */
+ if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
+ goto unlock;
+
+ if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH)
+ ath11k_mac_update_active_vif_chan(ar, ctx);
+
+ /* TODO: Recalc radar detection */
+
+unlock:
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int
+ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "mac chanctx assign ptr %pK vdev_id %i\n",
+ ctx, arvif->vdev_id);
+
+ if (WARN_ON(arvif->is_started)) {
+ mutex_unlock(&ar->conf_mutex);
+ return -EBUSY;
+ }
+
+ ret = ath11k_mac_vdev_start(arvif, &ctx->def);
+ if (ret) {
+ ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
+ arvif->vdev_id, vif->addr,
+ ctx->def.chan->center_freq, ret);
+ goto err;
+ }
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ ret = ath11k_monitor_vdev_up(ar, arvif->vdev_id);
+ if (ret)
+ goto err;
+ }
+
+ arvif->is_started = true;
+
+ /* TODO: Setup ps and cts/rts protection */
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+
+err:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static void
+ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "mac chanctx unassign ptr %pK vdev_id %i\n",
+ ctx, arvif->vdev_id);
+
+ WARN_ON(!arvif->is_started);
+
+ ret = ath11k_mac_vdev_stop(arvif);
+ if (ret)
+ ath11k_warn(ab, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_started = false;
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int
+ath11k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode)
+{
+ struct ath11k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "mac chanctx switch n_vifs %d mode %d\n",
+ n_vifs, mode);
+ ath11k_mac_update_vif_chan(ar, vifs, n_vifs);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static int
+ath11k_set_vdev_param_to_all_vifs(struct ath11k *ar, int param, u32 value)
+{
+ struct ath11k_vif *arvif;
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "setting mac vdev %d param %d value %d\n",
+ param, arvif->vdev_id, value);
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param, value);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set param %d for vdev %d: %d\n",
+ param, arvif->vdev_id, ret);
+ break;
+ }
+ }
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+/* mac80211 stores device specific RTS/Fragmentation threshold value,
+ * this is set interface specific to firmware from ath11k driver
+ */
+static int ath11k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct ath11k *ar = hw->priv;
+ int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
+
+ return ath11k_set_vdev_param_to_all_vifs(ar, param_id, value);
+}
+
+static int ath11k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ /* Even though there's a WMI vdev param for fragmentation threshold no
+ * known firmware actually implements it. Moreover it is not possible to
+ * rely frame fragmentation to mac80211 because firmware clears the
+ * "more fragments" bit in frame control making it impossible for remote
+ * devices to reassemble frames.
+ *
+ * Hence implement a dummy callback just to say fragmentation isn't
+ * supported. This effectively prevents mac80211 from doing frame
+ * fragmentation in software.
+ */
+ return -EOPNOTSUPP;
+}
+
+static void ath11k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+ struct ath11k *ar = hw->priv;
+ long time_left;
+
+ if (drop)
+ return;
+
+ time_left = wait_event_timeout(ar->dp.tx_empty_waitq,
+ (atomic_read(&ar->dp.num_tx_pending) == 0),
+ ATH11K_FLUSH_TIMEOUT);
+ if (time_left == 0)
+ ath11k_warn(ar->ab, "failed to flush transmit queue %ld\n", time_left);
+}
+
+static int
+ath11k_mac_bitrate_mask_num_ht_rates(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int num_rates = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
+ num_rates += hweight16(mask->control[band].ht_mcs[i]);
+
+ return num_rates;
+}
+
+static bool
+ath11k_mac_has_single_legacy_rate(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int num_rates = 0;
+
+ num_rates = hweight32(mask->control[band].legacy);
+
+ if (ath11k_mac_bitrate_mask_num_ht_rates(ar, band, mask))
+ return false;
+
+ if (ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask))
+ return false;
+
+ return num_rates == 1;
+}
+
+static bool
+ath11k_mac_bitrate_mask_get_single_nss(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask,
+ int *nss)
+{
+ struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
+ u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
+ u8 ht_nss_mask = 0;
+ u8 vht_nss_mask = 0;
+ int i;
+
+ /* No need to consider legacy here. Basic rates are always present
+ * in bitrate mask
+ */
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
+ if (mask->control[band].ht_mcs[i] == 0)
+ continue;
+ else if (mask->control[band].ht_mcs[i] ==
+ sband->ht_cap.mcs.rx_mask[i])
+ ht_nss_mask |= BIT(i);
+ else
+ return false;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+ if (mask->control[band].vht_mcs[i] == 0)
+ continue;
+ else if (mask->control[band].vht_mcs[i] ==
+ ath11k_mac_get_max_vht_mcs_map(vht_mcs_map, i))
+ vht_nss_mask |= BIT(i);
+ else
+ return false;
+ }
+
+ if (ht_nss_mask != vht_nss_mask)
+ return false;
+
+ if (ht_nss_mask == 0)
+ return false;
+
+ if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask)
+ return false;
+
+ *nss = fls(ht_nss_mask);
+
+ return true;
+}
+
+static int
+ath11k_mac_get_single_legacy_rate(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask,
+ u32 *rate, u8 *nss)
+{
+ int rate_idx;
+ u16 bitrate;
+ u8 preamble;
+ u8 hw_rate;
+
+ if (hweight32(mask->control[band].legacy) != 1)
+ return -EINVAL;
+
+ rate_idx = ffs(mask->control[band].legacy) - 1;
+
+ if (band == NL80211_BAND_5GHZ)
+ rate_idx += ATH11K_MAC_FIRST_OFDM_RATE_IDX;
+
+ hw_rate = ath11k_legacy_rates[rate_idx].hw_value;
+ bitrate = ath11k_legacy_rates[rate_idx].bitrate;
+
+ if (ath11k_mac_bitrate_is_cck(bitrate))
+ preamble = WMI_RATE_PREAMBLE_CCK;
+ else
+ preamble = WMI_RATE_PREAMBLE_OFDM;
+
+ *nss = 1;
+ *rate = ATH11K_HW_RATE_CODE(hw_rate, 0, preamble);
+
+ return 0;
+}
+
+static int ath11k_mac_set_fixed_rate_params(struct ath11k_vif *arvif,
+ u32 rate, u8 nss, u8 sgi, u8 ldpc)
+{
+ struct ath11k *ar = arvif->ar;
+ u32 vdev_param;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu\n",
+ arvif->vdev_id, rate, nss, sgi);
+
+ vdev_param = WMI_VDEV_PARAM_FIXED_RATE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, rate);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set fixed rate param 0x%02x: %d\n",
+ rate, ret);
+ return ret;
+ }
+
+ vdev_param = WMI_VDEV_PARAM_NSS;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, nss);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set nss param %d: %d\n",
+ nss, ret);
+ return ret;
+ }
+
+ vdev_param = WMI_VDEV_PARAM_SGI;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, sgi);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set sgi param %d: %d\n",
+ sgi, ret);
+ return ret;
+ }
+
+ vdev_param = WMI_VDEV_PARAM_LDPC;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, ldpc);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set ldpc param %d: %d\n",
+ ldpc, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool
+ath11k_mac_vht_mcs_range_present(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int i;
+ u16 vht_mcs;
+
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
+ vht_mcs = mask->control[band].vht_mcs[i];
+
+ switch (vht_mcs) {
+ case 0:
+ case BIT(8) - 1:
+ case BIT(9) - 1:
+ case BIT(10) - 1:
+ break;
+ default:
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void ath11k_mac_set_bitrate_mask_iter(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k_vif *arvif = data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arvif->ar;
+
+ spin_lock_bh(&ar->data_lock);
+ arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
+ spin_unlock_bh(&ar->data_lock);
+
+ ieee80211_queue_work(ar->hw, &arsta->update_wk);
+}
+
+static void ath11k_mac_disable_peer_fixed_rate(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k_vif *arvif = data;
+ struct ath11k *ar = arvif->ar;
+ int ret;
+
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_PARAM_FIXED_RATE,
+ WMI_FIXED_RATE_NONE);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to disable peer fixed rate for STA %pM ret %d\n",
+ sta->addr, ret);
+}
+
+static int
+ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ struct cfg80211_chan_def def;
+ struct ath11k *ar = arvif->ar;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ u32 rate;
+ u8 nss;
+ u8 sgi;
+ u8 ldpc;
+ int single_nss;
+ int ret;
+ int num_rates;
+
+ if (ath11k_mac_vif_chan(vif, &def))
+ return -EPERM;
+
+ band = def.chan->band;
+ ht_mcs_mask = mask->control[band].ht_mcs;
+ vht_mcs_mask = mask->control[band].vht_mcs;
+ ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
+
+ sgi = mask->control[band].gi;
+ if (sgi == NL80211_TXRATE_FORCE_LGI)
+ return -EINVAL;
+
+ /* mac80211 doesn't support sending a fixed HT/VHT MCS alone, rather it
+ * requires passing atleast one of used basic rates along with them.
+ * Fixed rate setting across different preambles(legacy, HT, VHT) is
+ * not supported by the FW. Hence use of FIXED_RATE vdev param is not
+ * suitable for setting single HT/VHT rates.
+ * But, there could be a single basic rate passed from userspace which
+ * can be done through the FIXED_RATE param.
+ */
+ if (ath11k_mac_has_single_legacy_rate(ar, band, mask)) {
+ ret = ath11k_mac_get_single_legacy_rate(ar, band, mask, &rate,
+ &nss);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to get single legacy rate for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath11k_mac_disable_peer_fixed_rate,
+ arvif);
+ } else if (ath11k_mac_bitrate_mask_get_single_nss(ar, band, mask,
+ &single_nss)) {
+ rate = WMI_FIXED_RATE_NONE;
+ nss = single_nss;
+ } else {
+ rate = WMI_FIXED_RATE_NONE;
+ nss = min_t(u32, ar->num_tx_chains,
+ max(ath11k_mac_max_ht_nss(ht_mcs_mask),
+ ath11k_mac_max_vht_nss(vht_mcs_mask)));
+
+ /* If multiple rates across different preambles are given
+ * we can reconfigure this info with all peers using PEER_ASSOC
+ * command with the below exception cases.
+ * - Single VHT Rate : peer_assoc command accommodates only MCS
+ * range values i.e 0-7, 0-8, 0-9 for VHT. Though mac80211
+ * mandates passing basic rates along with HT/VHT rates, FW
+ * doesn't allow switching from VHT to Legacy. Hence instead of
+ * setting legacy and VHT rates using RATEMASK_CMD vdev cmd,
+ * we could set this VHT rate as peer fixed rate param, which
+ * will override FIXED rate and FW rate control algorithm.
+ * If single VHT rate is passed along with HT rates, we select
+ * the VHT rate as fixed rate for vht peers.
+ * - Multiple VHT Rates : When Multiple VHT rates are given,this
+ * can be set using RATEMASK CMD which uses FW rate-ctl alg.
+ * TODO: Setting multiple VHT MCS and replacing peer_assoc with
+ * RATEMASK_CMDID can cover all use cases of setting rates
+ * across multiple preambles and rates within same type.
+ * But requires more validation of the command at this point.
+ */
+
+ num_rates = ath11k_mac_bitrate_mask_num_vht_rates(ar, band,
+ mask);
+
+ if (!ath11k_mac_vht_mcs_range_present(ar, band, mask) &&
+ num_rates > 1) {
+ /* TODO: Handle multiple VHT MCS values setting using
+ * RATEMASK CMD
+ */
+ ath11k_warn(ar->ab,
+ "Setting more than one MCS Value in bitrate mask not supported\n");
+ return -EINVAL;
+ }
+
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath11k_mac_disable_peer_fixed_rate,
+ arvif);
+
+ mutex_lock(&ar->conf_mutex);
+
+ arvif->bitrate_mask = *mask;
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath11k_mac_set_bitrate_mask_iter,
+ arvif);
+
+ mutex_unlock(&ar->conf_mutex);
+ }
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath11k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set fixed rate params on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static void
+ath11k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
+{
+ struct ath11k *ar = hw->priv;
+
+ if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
+ return;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state == ATH11K_STATE_RESTARTED) {
+ ath11k_warn(ar->ab, "pdev %d successfully recovered\n",
+ ar->pdev->pdev_id);
+ ar->state = ATH11K_STATE_ON;
+ ieee80211_wake_queues(ar->hw);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static void
+ath11k_mac_update_bss_chan_survey(struct ath11k *ar,
+ struct ieee80211_channel *channel)
+{
+ int ret;
+ enum wmi_bss_chan_info_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!test_bit(WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64, ar->ab->wmi_ab.svc_map) ||
+ ar->rx_channel != channel)
+ return;
+
+ if (ar->scan.state != ATH11K_SCAN_IDLE) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "ignoring bss chan info req while scanning..\n");
+ return;
+ }
+
+ reinit_completion(&ar->bss_survey_done);
+
+ ret = ath11k_wmi_pdev_bss_chan_info_request(ar, type);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send pdev bss chan info request\n");
+ return;
+ }
+
+ ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ);
+ if (ret == 0)
+ ath11k_warn(ar->ab, "bss channel survey timed out\n");
+}
+
+static int ath11k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct ath11k *ar = hw->priv;
+ struct ieee80211_supported_band *sband;
+ struct survey_info *ar_survey;
+ int ret = 0;
+
+ if (idx >= ATH11K_NUM_CHANS)
+ return -ENOENT;
+
+ ar_survey = &ar->survey[idx];
+
+ mutex_lock(&ar->conf_mutex);
+
+ sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
+ if (sband && idx >= sband->n_channels) {
+ idx -= sband->n_channels;
+ sband = NULL;
+ }
+
+ if (!sband)
+ sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
+
+ if (!sband || idx >= sband->n_channels) {
+ ret = -ENOENT;
+ goto exit;
+ }
+
+ ath11k_mac_update_bss_chan_survey(ar, &sband->channels[idx]);
+
+ spin_lock_bh(&ar->data_lock);
+ memcpy(survey, ar_survey, sizeof(*survey));
+ spin_unlock_bh(&ar->data_lock);
+
+ survey->channel = &sband->channels[idx];
+
+ if (ar->rx_channel == survey->channel)
+ survey->filled |= SURVEY_INFO_IN_USE;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo)
+{
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ sinfo->rx_duration = arsta->rx_duration;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
+
+ sinfo->tx_duration = arsta->tx_duration;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION);
+
+ if (!arsta->txrate.legacy && !arsta->txrate.nss)
+ return;
+
+ if (arsta->txrate.legacy) {
+ sinfo->txrate.legacy = arsta->txrate.legacy;
+ } else {
+ sinfo->txrate.mcs = arsta->txrate.mcs;
+ sinfo->txrate.nss = arsta->txrate.nss;
+ sinfo->txrate.bw = arsta->txrate.bw;
+ sinfo->txrate.he_gi = arsta->txrate.he_gi;
+ sinfo->txrate.he_dcm = arsta->txrate.he_dcm;
+ sinfo->txrate.he_ru_alloc = arsta->txrate.he_ru_alloc;
+ }
+ sinfo->txrate.flags = arsta->txrate.flags;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+
+ /* TODO: Use real NF instead of default one. */
+ sinfo->signal = arsta->rssi_comb + ATH11K_DEFAULT_NOISE_FLOOR;
+}
+
+static const struct ieee80211_ops ath11k_ops = {
+ .tx = ath11k_mac_op_tx,
+ .start = ath11k_mac_op_start,
+ .stop = ath11k_mac_op_stop,
+ .reconfig_complete = ath11k_mac_op_reconfig_complete,
+ .add_interface = ath11k_mac_op_add_interface,
+ .remove_interface = ath11k_mac_op_remove_interface,
+ .config = ath11k_mac_op_config,
+ .bss_info_changed = ath11k_mac_op_bss_info_changed,
+ .configure_filter = ath11k_mac_op_configure_filter,
+ .hw_scan = ath11k_mac_op_hw_scan,
+ .cancel_hw_scan = ath11k_mac_op_cancel_hw_scan,
+ .set_key = ath11k_mac_op_set_key,
+ .sta_state = ath11k_mac_op_sta_state,
+ .sta_set_txpwr = ath11k_mac_op_sta_set_txpwr,
+ .sta_rc_update = ath11k_mac_op_sta_rc_update,
+ .conf_tx = ath11k_mac_op_conf_tx,
+ .set_antenna = ath11k_mac_op_set_antenna,
+ .get_antenna = ath11k_mac_op_get_antenna,
+ .ampdu_action = ath11k_mac_op_ampdu_action,
+ .add_chanctx = ath11k_mac_op_add_chanctx,
+ .remove_chanctx = ath11k_mac_op_remove_chanctx,
+ .change_chanctx = ath11k_mac_op_change_chanctx,
+ .assign_vif_chanctx = ath11k_mac_op_assign_vif_chanctx,
+ .unassign_vif_chanctx = ath11k_mac_op_unassign_vif_chanctx,
+ .switch_vif_chanctx = ath11k_mac_op_switch_vif_chanctx,
+ .set_rts_threshold = ath11k_mac_op_set_rts_threshold,
+ .set_frag_threshold = ath11k_mac_op_set_frag_threshold,
+ .set_bitrate_mask = ath11k_mac_op_set_bitrate_mask,
+ .get_survey = ath11k_mac_op_get_survey,
+ .flush = ath11k_mac_op_flush,
+ .sta_statistics = ath11k_mac_op_sta_statistics,
+ CFG80211_TESTMODE_CMD(ath11k_tm_cmd)
+#ifdef CONFIG_ATH11K_DEBUGFS
+ .sta_add_debugfs = ath11k_sta_add_debugfs,
+#endif
+};
+
+static const struct ieee80211_iface_limit ath11k_if_limits[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 16,
+ .types = BIT(NL80211_IFTYPE_AP)
+#ifdef CONFIG_MAC80211_MESH
+ | BIT(NL80211_IFTYPE_MESH_POINT)
+#endif
+ },
+};
+
+static const struct ieee80211_iface_combination ath11k_if_comb[] = {
+ {
+ .limits = ath11k_if_limits,
+ .n_limits = ARRAY_SIZE(ath11k_if_limits),
+ .max_interfaces = 16,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ .beacon_int_min_gcd = 100,
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
+ },
+};
+
+static void ath11k_mac_update_ch_list(struct ath11k *ar,
+ struct ieee80211_supported_band *band,
+ u32 freq_low, u32 freq_high)
+{
+ int i;
+
+ if (!(freq_low && freq_high))
+ return;
+
+ for (i = 0; i < band->n_channels; i++) {
+ if (band->channels[i].center_freq < freq_low ||
+ band->channels[i].center_freq > freq_high)
+ band->channels[i].flags |= IEEE80211_CHAN_DISABLED;
+ }
+}
+
+static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
+ u32 supported_bands)
+{
+ struct ieee80211_supported_band *band;
+ struct ath11k_hal_reg_capabilities_ext *reg_cap;
+ void *channels;
+
+ BUILD_BUG_ON((ARRAY_SIZE(ath11k_2ghz_channels) +
+ ARRAY_SIZE(ath11k_5ghz_channels)) !=
+ ATH11K_NUM_CHANS);
+
+ reg_cap = &ar->ab->hal_reg_cap[ar->pdev_idx];
+
+ if (supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ channels = kmemdup(ath11k_2ghz_channels,
+ sizeof(ath11k_2ghz_channels),
+ GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ band = &ar->mac.sbands[NL80211_BAND_2GHZ];
+ band->n_channels = ARRAY_SIZE(ath11k_2ghz_channels);
+ band->channels = channels;
+ band->n_bitrates = ath11k_g_rates_size;
+ band->bitrates = ath11k_g_rates;
+ ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
+ ath11k_mac_update_ch_list(ar, band,
+ reg_cap->low_2ghz_chan,
+ reg_cap->high_2ghz_chan);
+ }
+
+ if (supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ channels = kmemdup(ath11k_5ghz_channels,
+ sizeof(ath11k_5ghz_channels),
+ GFP_KERNEL);
+ if (!channels) {
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ return -ENOMEM;
+ }
+
+ band = &ar->mac.sbands[NL80211_BAND_5GHZ];
+ band->n_channels = ARRAY_SIZE(ath11k_5ghz_channels);
+ band->channels = channels;
+ band->n_bitrates = ath11k_a_rates_size;
+ band->bitrates = ath11k_a_rates;
+ ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
+ ath11k_mac_update_ch_list(ar, band,
+ reg_cap->low_5ghz_chan,
+ reg_cap->high_5ghz_chan);
+ }
+
+ return 0;
+}
+
+static const u8 ath11k_if_types_ext_capa[] = {
+ [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
+};
+
+static const u8 ath11k_if_types_ext_capa_sta[] = {
+ [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
+ [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
+};
+
+static const u8 ath11k_if_types_ext_capa_ap[] = {
+ [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
+ [9] = WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT,
+};
+
+static const struct wiphy_iftype_ext_capab ath11k_iftypes_ext_capa[] = {
+ {
+ .extended_capabilities = ath11k_if_types_ext_capa,
+ .extended_capabilities_mask = ath11k_if_types_ext_capa,
+ .extended_capabilities_len = sizeof(ath11k_if_types_ext_capa),
+ }, {
+ .iftype = NL80211_IFTYPE_STATION,
+ .extended_capabilities = ath11k_if_types_ext_capa_sta,
+ .extended_capabilities_mask = ath11k_if_types_ext_capa_sta,
+ .extended_capabilities_len =
+ sizeof(ath11k_if_types_ext_capa_sta),
+ }, {
+ .iftype = NL80211_IFTYPE_AP,
+ .extended_capabilities = ath11k_if_types_ext_capa_ap,
+ .extended_capabilities_mask = ath11k_if_types_ext_capa_ap,
+ .extended_capabilities_len =
+ sizeof(ath11k_if_types_ext_capa_ap),
+ },
+};
+
+static void __ath11k_mac_unregister(struct ath11k *ar)
+{
+ cancel_work_sync(&ar->regd_update_work);
+
+ ieee80211_unregister_hw(ar->hw);
+
+ idr_for_each(&ar->txmgmt_idr, ath11k_mac_tx_mgmt_pending_free, ar);
+ idr_destroy(&ar->txmgmt_idr);
+
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
+
+ SET_IEEE80211_DEV(ar->hw, NULL);
+}
+
+void ath11k_mac_unregister(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (!ar)
+ continue;
+
+ __ath11k_mac_unregister(ar);
+ }
+}
+
+static int __ath11k_mac_register(struct ath11k *ar)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_pdev_cap *cap = &ar->pdev->cap;
+ static const u32 cipher_suites[] = {
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+ WLAN_CIPHER_SUITE_AES_CMAC,
+ WLAN_CIPHER_SUITE_BIP_CMAC_256,
+ WLAN_CIPHER_SUITE_BIP_GMAC_128,
+ WLAN_CIPHER_SUITE_BIP_GMAC_256,
+ WLAN_CIPHER_SUITE_GCMP,
+ WLAN_CIPHER_SUITE_GCMP_256,
+ WLAN_CIPHER_SUITE_CCMP_256,
+ };
+ int ret;
+ u32 ht_cap = 0;
+
+ ath11k_pdev_caps_update(ar);
+
+ SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
+
+ SET_IEEE80211_DEV(ar->hw, ab->dev);
+
+ ret = ath11k_mac_setup_channels_rates(ar,
+ cap->supported_bands);
+ if (ret)
+ goto err_free;
+
+ ath11k_mac_setup_ht_vht_cap(ar, cap, &ht_cap);
+ ath11k_mac_setup_he_cap(ar, cap);
+
+ ar->hw->wiphy->available_antennas_rx = cap->rx_chain_mask;
+ ar->hw->wiphy->available_antennas_tx = cap->tx_chain_mask;
+
+ ar->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MESH_POINT);
+
+ ieee80211_hw_set(ar->hw, SIGNAL_DBM);
+ ieee80211_hw_set(ar->hw, SUPPORTS_PS);
+ ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(ar->hw, MFP_CAPABLE);
+ ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(ar->hw, AP_LINK_PS);
+ ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
+ ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
+ ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
+ ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
+ ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF);
+ ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
+ ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
+ ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG);
+ ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK);
+ if (ht_cap & WMI_HT_CAP_ENABLED) {
+ ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
+ ieee80211_hw_set(ar->hw, SUPPORTS_REORDERING_BUFFER);
+ ieee80211_hw_set(ar->hw, SUPPORTS_AMSDU_IN_AMPDU);
+ }
+
+ ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
+ ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+
+ /* TODO: Check if HT capability advertised from firmware is different
+ * for each band for a dual band capable radio. It will be tricky to
+ * handle it when the ht capability different for each band.
+ */
+ if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS)
+ ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
+
+ ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
+ ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
+
+ ar->hw->max_listen_interval = ATH11K_MAX_HW_LISTEN_INTERVAL;
+
+ ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ ar->hw->wiphy->max_remain_on_channel_duration = 5000;
+
+ ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+ ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
+ NL80211_FEATURE_AP_SCAN;
+
+ ar->max_num_stations = TARGET_NUM_STATIONS;
+ ar->max_num_peers = TARGET_NUM_PEERS_PDEV;
+
+ ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
+
+ ar->hw->queues = ATH11K_HW_MAX_QUEUES;
+ ar->hw->offchannel_tx_hw_queue = ATH11K_HW_MAX_QUEUES - 1;
+ ar->hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+
+ ar->hw->vif_data_size = sizeof(struct ath11k_vif);
+ ar->hw->sta_data_size = sizeof(struct ath11k_sta);
+
+ ar->hw->wiphy->iface_combinations = ath11k_if_comb;
+ ar->hw->wiphy->n_iface_combinations = ARRAY_SIZE(ath11k_if_comb);
+
+ wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+ wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_STA_TX_PWR);
+
+ ar->hw->wiphy->cipher_suites = cipher_suites;
+ ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+
+ ar->hw->wiphy->iftype_ext_capab = ath11k_iftypes_ext_capa;
+ ar->hw->wiphy->num_iftype_ext_capab =
+ ARRAY_SIZE(ath11k_iftypes_ext_capa);
+
+ ath11k_reg_init(ar);
+
+ /* advertise HW checksum offload capabilities */
+ ar->hw->netdev_features = NETIF_F_HW_CSUM;
+
+ ret = ieee80211_register_hw(ar->hw);
+ if (ret) {
+ ath11k_err(ar->ab, "ieee80211 registration failed: %d\n", ret);
+ goto err_free;
+ }
+
+ /* Apply the regd received during initialization */
+ ret = ath11k_regd_update(ar, true);
+ if (ret) {
+ ath11k_err(ar->ab, "ath11k regd update failed: %d\n", ret);
+ goto err_free;
+ }
+
+ ret = ath11k_debug_register(ar);
+ if (ret) {
+ ath11k_err(ar->ab, "debugfs registration failed: %d\n", ret);
+ goto err_free;
+ }
+
+ return 0;
+
+err_free:
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
+
+ SET_IEEE80211_DEV(ar->hw, NULL);
+ return ret;
+}
+
+int ath11k_mac_register(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int i;
+ int ret;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (ab->pdevs_macaddr_valid) {
+ ether_addr_copy(ar->mac_addr, pdev->mac_addr);
+ } else {
+ ether_addr_copy(ar->mac_addr, ab->mac_addr);
+ ar->mac_addr[4] += i;
+ }
+
+ ret = __ath11k_mac_register(ar);
+ if (ret)
+ goto err_cleanup;
+
+ idr_init(&ar->txmgmt_idr);
+ spin_lock_init(&ar->txmgmt_idr_lock);
+ }
+
+ /* Initialize channel counters frequency value in hertz */
+ ab->cc_freq_hz = IPQ8074_CC_FREQ_HERTZ;
+ ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
+
+ return 0;
+
+err_cleanup:
+ for (i = i - 1; i >= 0; i--) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ __ath11k_mac_unregister(ar);
+ }
+
+ return ret;
+}
+
+int ath11k_mac_allocate(struct ath11k_base *ab)
+{
+ struct ieee80211_hw *hw;
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int ret;
+ int i;
+
+ if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
+ return 0;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ hw = ieee80211_alloc_hw(sizeof(struct ath11k), &ath11k_ops);
+ if (!hw) {
+ ath11k_warn(ab, "failed to allocate mac80211 hw device\n");
+ ret = -ENOMEM;
+ goto err_free_mac;
+ }
+
+ ar = hw->priv;
+ ar->hw = hw;
+ ar->ab = ab;
+ ar->pdev = pdev;
+ ar->pdev_idx = i;
+ ar->lmac_id = ath11k_core_get_hw_mac_id(ab, i);
+
+ ar->wmi = &ab->wmi_ab.wmi[i];
+ /* FIXME wmi[0] is already initialized during attach,
+ * Should we do this again?
+ */
+ ath11k_wmi_pdev_attach(ab, i);
+
+ ar->cfg_tx_chainmask = pdev->cap.tx_chain_mask;
+ ar->cfg_rx_chainmask = pdev->cap.rx_chain_mask;
+ ar->num_tx_chains = get_num_chains(pdev->cap.tx_chain_mask);
+ ar->num_rx_chains = get_num_chains(pdev->cap.rx_chain_mask);
+
+ pdev->ar = ar;
+ spin_lock_init(&ar->data_lock);
+ INIT_LIST_HEAD(&ar->arvifs);
+ INIT_LIST_HEAD(&ar->ppdu_stats_info);
+ mutex_init(&ar->conf_mutex);
+ init_completion(&ar->vdev_setup_done);
+ init_completion(&ar->peer_assoc_done);
+ init_completion(&ar->install_key_done);
+ init_completion(&ar->bss_survey_done);
+ init_completion(&ar->scan.started);
+ init_completion(&ar->scan.completed);
+ INIT_DELAYED_WORK(&ar->scan.timeout, ath11k_scan_timeout_work);
+ INIT_WORK(&ar->regd_update_work, ath11k_regd_update_work);
+
+ INIT_WORK(&ar->wmi_mgmt_tx_work, ath11k_mgmt_over_wmi_tx_work);
+ skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
+ clear_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
+ }
+
+ return 0;
+
+err_free_mac:
+ ath11k_mac_destroy(ab);
+
+ return ret;
+}
+
+void ath11k_mac_destroy(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (!ar)
+ continue;
+
+ ieee80211_free_hw(ar->hw);
+ pdev->ar = NULL;
+ }
+}
diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h
new file mode 100644
index 000000000000..f286531cdd78
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/mac.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_MAC_H
+#define ATH11K_MAC_H
+
+#include <net/mac80211.h>
+#include <net/cfg80211.h>
+
+struct ath11k;
+struct ath11k_base;
+
+struct ath11k_generic_iter {
+ struct ath11k *ar;
+ int ret;
+};
+
+/* number of failed packets (20 packets with 16 sw reties each) */
+#define ATH11K_KICKOUT_THRESHOLD (20 * 16)
+
+/* Use insanely high numbers to make sure that the firmware implementation
+ * won't start, we have the same functionality already in hostapd. Unit
+ * is seconds.
+ */
+#define ATH11K_KEEPALIVE_MIN_IDLE 3747
+#define ATH11K_KEEPALIVE_MAX_IDLE 3895
+#define ATH11K_KEEPALIVE_MAX_UNRESPONSIVE 3900
+
+#define WMI_HOST_RC_DS_FLAG 0x01
+#define WMI_HOST_RC_CW40_FLAG 0x02
+#define WMI_HOST_RC_SGI_FLAG 0x04
+#define WMI_HOST_RC_HT_FLAG 0x08
+#define WMI_HOST_RC_RTSCTS_FLAG 0x10
+#define WMI_HOST_RC_TX_STBC_FLAG 0x20
+#define WMI_HOST_RC_RX_STBC_FLAG 0xC0
+#define WMI_HOST_RC_RX_STBC_FLAG_S 6
+#define WMI_HOST_RC_WEP_TKIP_FLAG 0x100
+#define WMI_HOST_RC_TS_FLAG 0x200
+#define WMI_HOST_RC_UAPSD_FLAG 0x400
+
+#define WMI_HT_CAP_ENABLED 0x0001
+#define WMI_HT_CAP_HT20_SGI 0x0002
+#define WMI_HT_CAP_DYNAMIC_SMPS 0x0004
+#define WMI_HT_CAP_TX_STBC 0x0008
+#define WMI_HT_CAP_TX_STBC_MASK_SHIFT 3
+#define WMI_HT_CAP_RX_STBC 0x0030
+#define WMI_HT_CAP_RX_STBC_MASK_SHIFT 4
+#define WMI_HT_CAP_LDPC 0x0040
+#define WMI_HT_CAP_L_SIG_TXOP_PROT 0x0080
+#define WMI_HT_CAP_MPDU_DENSITY 0x0700
+#define WMI_HT_CAP_MPDU_DENSITY_MASK_SHIFT 8
+#define WMI_HT_CAP_HT40_SGI 0x0800
+#define WMI_HT_CAP_RX_LDPC 0x1000
+#define WMI_HT_CAP_TX_LDPC 0x2000
+#define WMI_HT_CAP_IBF_BFER 0x4000
+
+/* These macros should be used when we wish to advertise STBC support for
+ * only 1SS or 2SS or 3SS.
+ */
+#define WMI_HT_CAP_RX_STBC_1SS 0x0010
+#define WMI_HT_CAP_RX_STBC_2SS 0x0020
+#define WMI_HT_CAP_RX_STBC_3SS 0x0030
+
+#define WMI_HT_CAP_DEFAULT_ALL (WMI_HT_CAP_ENABLED | \
+ WMI_HT_CAP_HT20_SGI | \
+ WMI_HT_CAP_HT40_SGI | \
+ WMI_HT_CAP_TX_STBC | \
+ WMI_HT_CAP_RX_STBC | \
+ WMI_HT_CAP_LDPC)
+
+#define WMI_VHT_CAP_MAX_MPDU_LEN_MASK 0x00000003
+#define WMI_VHT_CAP_RX_LDPC 0x00000010
+#define WMI_VHT_CAP_SGI_80MHZ 0x00000020
+#define WMI_VHT_CAP_SGI_160MHZ 0x00000040
+#define WMI_VHT_CAP_TX_STBC 0x00000080
+#define WMI_VHT_CAP_RX_STBC_MASK 0x00000300
+#define WMI_VHT_CAP_RX_STBC_MASK_SHIFT 8
+#define WMI_VHT_CAP_SU_BFER 0x00000800
+#define WMI_VHT_CAP_SU_BFEE 0x00001000
+#define WMI_VHT_CAP_MAX_CS_ANT_MASK 0x0000E000
+#define WMI_VHT_CAP_MAX_CS_ANT_MASK_SHIFT 13
+#define WMI_VHT_CAP_MAX_SND_DIM_MASK 0x00070000
+#define WMI_VHT_CAP_MAX_SND_DIM_MASK_SHIFT 16
+#define WMI_VHT_CAP_MU_BFER 0x00080000
+#define WMI_VHT_CAP_MU_BFEE 0x00100000
+#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP 0x03800000
+#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP_SHIT 23
+#define WMI_VHT_CAP_RX_FIXED_ANT 0x10000000
+#define WMI_VHT_CAP_TX_FIXED_ANT 0x20000000
+
+#define WMI_VHT_CAP_MAX_MPDU_LEN_11454 0x00000002
+
+/* These macros should be used when we wish to advertise STBC support for
+ * only 1SS or 2SS or 3SS.
+ */
+#define WMI_VHT_CAP_RX_STBC_1SS 0x00000100
+#define WMI_VHT_CAP_RX_STBC_2SS 0x00000200
+#define WMI_VHT_CAP_RX_STBC_3SS 0x00000300
+
+#define WMI_VHT_CAP_DEFAULT_ALL (WMI_VHT_CAP_MAX_MPDU_LEN_11454 | \
+ WMI_VHT_CAP_SGI_80MHZ | \
+ WMI_VHT_CAP_TX_STBC | \
+ WMI_VHT_CAP_RX_STBC_MASK | \
+ WMI_VHT_CAP_RX_LDPC | \
+ WMI_VHT_CAP_MAX_AMPDU_LEN_EXP | \
+ WMI_VHT_CAP_RX_FIXED_ANT | \
+ WMI_VHT_CAP_TX_FIXED_ANT)
+
+/* FIXME: should these be in ieee80211.h? */
+#define IEEE80211_VHT_MCS_SUPPORT_0_11_MASK GENMASK(23, 16)
+#define IEEE80211_DISABLE_VHT_MCS_SUPPORT_0_11 BIT(24)
+
+#define WMI_MAX_SPATIAL_STREAM 3
+
+#define ATH11K_CHAN_WIDTH_NUM 8
+
+extern const struct htt_rx_ring_tlv_filter ath11k_mac_mon_status_filter_default;
+
+void ath11k_mac_destroy(struct ath11k_base *ab);
+void ath11k_mac_unregister(struct ath11k_base *ab);
+int ath11k_mac_register(struct ath11k_base *ab);
+int ath11k_mac_allocate(struct ath11k_base *ab);
+int ath11k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx,
+ u16 *rate);
+u8 ath11k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
+ u32 bitrate);
+u8 ath11k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
+ u8 hw_rate, bool cck);
+
+void __ath11k_mac_scan_finish(struct ath11k *ar);
+void ath11k_mac_scan_finish(struct ath11k *ar);
+
+struct ath11k_vif *ath11k_mac_get_arvif(struct ath11k *ar, u32 vdev_id);
+struct ath11k_vif *ath11k_mac_get_arvif_by_vdev_id(struct ath11k_base *ab,
+ u32 vdev_id);
+struct ath11k *ath11k_mac_get_ar_by_vdev_id(struct ath11k_base *ab, u32 vdev_id);
+struct ath11k *ath11k_mac_get_ar_by_pdev_id(struct ath11k_base *ab, u32 pdev_id);
+struct ath11k *ath11k_mac_get_ar_vdev_stop_status(struct ath11k_base *ab,
+ u32 vdev_id);
+
+void ath11k_mac_drain_tx(struct ath11k *ar);
+void ath11k_mac_peer_cleanup_all(struct ath11k *ar);
+int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx);
+u8 ath11k_mac_bw_to_mac80211_bw(u8 bw);
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c
new file mode 100644
index 000000000000..4bf1dfa498b6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/peer.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include "core.h"
+#include "peer.h"
+#include "debug.h"
+
+struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
+ const u8 *addr)
+{
+ struct ath11k_peer *peer;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list) {
+ if (peer->vdev_id != vdev_id)
+ continue;
+ if (memcmp(peer->addr, addr, ETH_ALEN))
+ continue;
+
+ return peer;
+ }
+
+ return NULL;
+}
+
+struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,
+ const u8 *addr)
+{
+ struct ath11k_peer *peer;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list) {
+ if (memcmp(peer->addr, addr, ETH_ALEN))
+ continue;
+
+ return peer;
+ }
+
+ return NULL;
+}
+
+struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab,
+ int peer_id)
+{
+ struct ath11k_peer *peer;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list)
+ if (peer_id == peer->peer_id)
+ return peer;
+
+ return NULL;
+}
+
+void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id)
+{
+ struct ath11k_peer *peer;
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find_by_id(ab, peer_id);
+ if (!peer) {
+ ath11k_warn(ab, "peer-unmap-event: unknown peer id %d\n",
+ peer_id);
+ goto exit;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
+ peer->vdev_id, peer->addr, peer_id);
+
+ list_del(&peer->list);
+ kfree(peer);
+ wake_up(&ab->peer_mapping_wq);
+
+exit:
+ spin_unlock_bh(&ab->base_lock);
+}
+
+void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id,
+ u8 *mac_addr, u16 ast_hash)
+{
+ struct ath11k_peer *peer;
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find(ab, vdev_id, mac_addr);
+ if (!peer) {
+ peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
+ if (!peer)
+ goto exit;
+
+ peer->vdev_id = vdev_id;
+ peer->peer_id = peer_id;
+ peer->ast_hash = ast_hash;
+ ether_addr_copy(peer->addr, mac_addr);
+ list_add(&peer->list, &ab->peers);
+ wake_up(&ab->peer_mapping_wq);
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt peer map vdev %d peer %pM id %d\n",
+ vdev_id, mac_addr, peer_id);
+
+exit:
+ spin_unlock_bh(&ab->base_lock);
+}
+
+static int ath11k_wait_for_peer_common(struct ath11k_base *ab, int vdev_id,
+ const u8 *addr, bool expect_mapped)
+{
+ int ret;
+
+ ret = wait_event_timeout(ab->peer_mapping_wq, ({
+ bool mapped;
+
+ spin_lock_bh(&ab->base_lock);
+ mapped = !!ath11k_peer_find(ab, vdev_id, addr);
+ spin_unlock_bh(&ab->base_lock);
+
+ (mapped == expect_mapped ||
+ test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags));
+ }), 3 * HZ);
+
+ if (ret <= 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id)
+{
+ struct ath11k_peer *peer, *tmp;
+ struct ath11k_base *ab = ar->ab;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ab->base_lock);
+ list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
+ if (peer->vdev_id != vdev_id)
+ continue;
+
+ ath11k_warn(ab, "removing stale peer %pM from vdev_id %d\n",
+ peer->addr, vdev_id);
+
+ list_del(&peer->list);
+ kfree(peer);
+ ar->num_peers--;
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+}
+
+static int ath11k_wait_for_peer_deleted(struct ath11k *ar, int vdev_id, const u8 *addr)
+{
+ return ath11k_wait_for_peer_common(ar->ab, vdev_id, addr, false);
+}
+
+int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath11k_wmi_send_peer_delete_cmd(ar, addr, vdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to delete peer vdev_id %d addr %pM ret %d\n",
+ vdev_id, addr, ret);
+ return ret;
+ }
+
+ ret = ath11k_wait_for_peer_deleted(ar, vdev_id, addr);
+ if (ret)
+ return ret;
+
+ ar->num_peers--;
+
+ return 0;
+}
+
+static int ath11k_wait_for_peer_created(struct ath11k *ar, int vdev_id, const u8 *addr)
+{
+ return ath11k_wait_for_peer_common(ar->ab, vdev_id, addr, true);
+}
+
+int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta, struct peer_create_params *param)
+{
+ struct ath11k_peer *peer;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ar->num_peers > (ar->max_num_peers - 1)) {
+ ath11k_warn(ar->ab,
+ "failed to create peer due to insufficient peer entry resource in firmware\n");
+ return -ENOBUFS;
+ }
+
+ ret = ath11k_wmi_send_peer_create_cmd(ar, param);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send peer create vdev_id %d ret %d\n",
+ param->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath11k_wait_for_peer_created(ar, param->vdev_id,
+ param->peer_addr);
+ if (ret)
+ return ret;
+
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath11k_peer_find(ar->ab, param->vdev_id, param->peer_addr);
+ if (!peer) {
+ spin_unlock_bh(&ar->ab->base_lock);
+ ath11k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
+ param->peer_addr, param->vdev_id);
+ ath11k_wmi_send_peer_delete_cmd(ar, param->peer_addr,
+ param->vdev_id);
+ return -ENOENT;
+ }
+
+ peer->sta = sta;
+ arvif->ast_hash = peer->ast_hash;
+
+ ar->num_peers++;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath11k/peer.h b/drivers/net/wireless/ath/ath11k/peer.h
new file mode 100644
index 000000000000..9a40d1f6ccd9
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/peer.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_PEER_H
+#define ATH11K_PEER_H
+
+struct ath11k_peer {
+ struct list_head list;
+ struct ieee80211_sta *sta;
+ int vdev_id;
+ u8 addr[ETH_ALEN];
+ int peer_id;
+ u16 ast_hash;
+
+ /* protected by ab->data_lock */
+ struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
+ struct dp_rx_tid rx_tid[IEEE80211_NUM_TIDS + 1];
+};
+
+void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id);
+void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id,
+ u8 *mac_addr, u16 ast_hash);
+struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
+ const u8 *addr);
+struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,
+ const u8 *addr);
+struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab, int peer_id);
+void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id);
+int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr);
+int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta, struct peer_create_params *param);
+
+#endif /* _PEER_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
new file mode 100644
index 000000000000..2377895a58ec
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -0,0 +1,2433 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include "qmi.h"
+#include "core.h"
+#include "debug.h"
+#include <linux/of.h>
+#include <linux/firmware.h>
+
+static struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ num_clients_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ num_clients),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ wake_msi_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ wake_msi),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ gpios_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ gpios_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = QMI_WLFW_MAX_NUM_GPIO_V01,
+ .elem_size = sizeof(u32),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ gpios),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ nm_modem_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ nm_modem),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ bdf_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ bdf_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ bdf_cache_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ bdf_cache_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ m3_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ m3_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ m3_cache_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ m3_cache_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_filesys_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_filesys_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_cache_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_cache_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_done_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_done),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mem_bucket_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mem_bucket),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1C,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mem_cfg_mode_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1C,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mem_cfg_mode),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_resp_msg_v01, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ initiate_cal_download_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ initiate_cal_download_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ initiate_cal_update_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ initiate_cal_update_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ msa_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ msa_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ pin_connect_result_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ pin_connect_result_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ client_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ client_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ request_mem_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ request_mem_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_mem_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_mem_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_init_done_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_init_done_enable),
+ },
+
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ rejuvenate_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ rejuvenate_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ xo_cal_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ xo_cal_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ cal_done_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ cal_done_enable),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
+ fw_status_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
+ fw_status),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, size),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, secure_flag),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01,
+ size),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_mem_type_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, type),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_NUM_MEM_CFG_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_mem_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg),
+ .ei_array = qmi_wlanfw_mem_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01,
+ mem_seg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_mem_seg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01,
+ mem_seg),
+ .ei_array = qmi_wlanfw_mem_seg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, size),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_mem_type_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, type),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, restore),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01,
+ mem_seg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_mem_seg_resp_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01,
+ mem_seg),
+ .ei_array = qmi_wlanfw_mem_seg_resp_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_respond_mem_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_rf_chip_info_s_v01,
+ chip_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_rf_chip_info_s_v01,
+ chip_family),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_rf_board_info_s_v01,
+ board_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_soc_info_s_v01, soc_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_fw_version_info_s_v01,
+ fw_version),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_fw_version_info_s_v01,
+ fw_build_timestamp),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ chip_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_wlanfw_rf_chip_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ chip_info),
+ .ei_array = qmi_wlanfw_rf_chip_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ board_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_wlanfw_rf_board_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ board_info),
+ .ei_array = qmi_wlanfw_rf_board_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ soc_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_wlanfw_soc_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ soc_info),
+ .ei_array = qmi_wlanfw_soc_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ fw_version_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_wlanfw_fw_version_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ fw_version_info),
+ .ei_array = qmi_wlanfw_fw_version_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ fw_build_id_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ fw_build_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ num_macs_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ num_macs),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ valid),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ file_id_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_cal_temp_id_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ file_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ total_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ seg_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLANFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ end_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ end),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ bdf_type_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ bdf_type),
+ },
+
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_m3_info_resp_msg_v01, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+ pipe_num),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_pipedir_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+ pipe_dir),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+ nentries),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+ nbytes_max),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+ flags),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
+ service_id),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_pipedir_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
+ pipe_dir),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
+ pipe_num),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01, id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_shadow_reg_v2_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01,
+ addr),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
+ mode),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
+ hw_debug_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
+ hw_debug),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_wlan_mode_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ host_version_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLANFW_MAX_STR_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ host_version),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ tgt_cfg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ tgt_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_NUM_CE_V01,
+ .elem_size = sizeof(
+ struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ tgt_cfg),
+ .ei_array = qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ svc_cfg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ svc_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_NUM_SVC_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ svc_cfg),
+ .ei_array = qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_NUM_SHADOW_REG_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_shadow_reg_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg),
+ .ei_array = qmi_wlanfw_shadow_reg_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2),
+ .ei_array = qmi_wlanfw_shadow_reg_v2_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_resp_msg_v01, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_mem_ready_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_fw_ready_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_cold_boot_cal_done_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ },
+};
+
+static int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_host_cap_req_msg_v01 req;
+ struct qmi_wlanfw_host_cap_resp_msg_v01 resp;
+ struct qmi_txn txn = {};
+ int ret = 0;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ req.num_clients_valid = 1;
+ req.num_clients = 1;
+ req.mem_cfg_mode = ab->qmi.target_mem_mode;
+ req.mem_cfg_mode_valid = 1;
+ req.bdf_support_valid = 1;
+ req.bdf_support = 1;
+
+ req.m3_support_valid = 0;
+ req.m3_support = 0;
+
+ req.m3_cache_support_valid = 0;
+ req.m3_cache_support = 0;
+
+ req.cal_done_valid = 1;
+ req.cal_done = ab->qmi.cal_done;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_host_cap_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_HOST_CAP_REQ_V01,
+ QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_host_cap_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ ath11k_warn(ab, "Failed to send host capability request,err = %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "Host capability request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static int ath11k_qmi_fw_ind_register_send(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_ind_register_req_msg_v01 *req;
+ struct qmi_wlanfw_ind_register_resp_msg_v01 *resp;
+ struct qmi_handle *handle = &ab->qmi.handle;
+ struct qmi_txn txn;
+ int ret = 0;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp)
+ goto resp_out;
+
+ req->client_id_valid = 1;
+ req->client_id = QMI_WLANFW_CLIENT_ID;
+ req->fw_ready_enable_valid = 1;
+ req->fw_ready_enable = 1;
+ req->request_mem_enable_valid = 1;
+ req->request_mem_enable = 1;
+ req->fw_mem_ready_enable_valid = 1;
+ req->fw_mem_ready_enable = 1;
+ req->cal_done_enable_valid = 1;
+ req->cal_done_enable = 1;
+ req->fw_init_done_enable_valid = 1;
+ req->fw_init_done_enable = 1;
+
+ req->pin_connect_result_enable_valid = 0;
+ req->pin_connect_result_enable = 0;
+
+ ret = qmi_txn_init(handle, &txn,
+ qmi_wlanfw_ind_register_resp_msg_v01_ei, resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_IND_REGISTER_REQ_V01,
+ QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_ind_register_req_msg_v01_ei, req);
+ if (ret < 0) {
+ ath11k_warn(ab, "Failed to send indication register request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to register fw indication %d\n", ret);
+ goto out;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "FW Ind register request failed, result: %d, err: %d\n",
+ resp->resp.result, resp->resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ kfree(resp);
+resp_out:
+ kfree(req);
+ return ret;
+}
+
+static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_respond_mem_req_msg_v01 *req;
+ struct qmi_wlanfw_respond_mem_resp_msg_v01 resp;
+ struct qmi_txn txn = {};
+ int ret = 0, i;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ memset(&resp, 0, sizeof(resp));
+
+ req->mem_seg_len = ab->qmi.mem_seg_count;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_respond_mem_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ for (i = 0; i < req->mem_seg_len ; i++) {
+ req->mem_seg[i].addr = ab->qmi.target_mem[i].paddr;
+ req->mem_seg[i].size = ab->qmi.target_mem[i].size;
+ req->mem_seg[i].type = ab->qmi.target_mem[i].type;
+ }
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_RESPOND_MEM_REQ_V01,
+ QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_respond_mem_req_msg_v01_ei, req);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to respond memory request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed memory request, err = %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "Respond mem req failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+out:
+ kfree(req);
+ return ret;
+}
+
+static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
+{
+ int i, idx;
+
+ for (i = 0, idx = 0; i < ab->qmi.mem_seg_count; i++) {
+ switch (ab->qmi.target_mem[i].type) {
+ case BDF_MEM_REGION_TYPE:
+ ab->qmi.target_mem[idx].paddr = ATH11K_QMI_BDF_ADDRESS;
+ ab->qmi.target_mem[idx].vaddr = ATH11K_QMI_BDF_ADDRESS;
+ ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
+ ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
+ idx++;
+ break;
+ case CALDB_MEM_REGION_TYPE:
+ if (ab->qmi.target_mem[i].size > ATH11K_QMI_CALDB_SIZE) {
+ ath11k_warn(ab, "qmi mem size is low to load caldata\n");
+ return -EINVAL;
+ }
+ /* TODO ath11k does not support cold boot calibration */
+ ab->qmi.target_mem[idx].paddr = 0;
+ ab->qmi.target_mem[idx].vaddr = 0;
+ ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
+ ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
+ idx++;
+ break;
+ default:
+ ath11k_warn(ab, "qmi ignore invalid mem req type %d\n",
+ ab->qmi.target_mem[i].type);
+ break;
+ }
+ }
+ ab->qmi.mem_seg_count = idx;
+
+ return 0;
+}
+
+static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_cap_req_msg_v01 req;
+ struct qmi_wlanfw_cap_resp_msg_v01 resp;
+ struct qmi_txn txn = {};
+ int ret = 0;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_cap_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_CAP_REQ_V01,
+ QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_cap_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send target cap request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed target cap request %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "qmi targetcap req failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (resp.chip_info_valid) {
+ ab->qmi.target.chip_id = resp.chip_info.chip_id;
+ ab->qmi.target.chip_family = resp.chip_info.chip_family;
+ }
+
+ if (resp.board_info_valid)
+ ab->qmi.target.board_id = resp.board_info.board_id;
+ else
+ ab->qmi.target.board_id = 0xFF;
+
+ if (resp.soc_info_valid)
+ ab->qmi.target.soc_id = resp.soc_info.soc_id;
+
+ if (resp.fw_version_info_valid) {
+ ab->qmi.target.fw_version = resp.fw_version_info.fw_version;
+ strlcpy(ab->qmi.target.fw_build_timestamp,
+ resp.fw_version_info.fw_build_timestamp,
+ sizeof(ab->qmi.target.fw_build_timestamp));
+ }
+
+ if (resp.fw_build_id_valid)
+ strlcpy(ab->qmi.target.fw_build_id, resp.fw_build_id,
+ sizeof(ab->qmi.target.fw_build_id));
+
+ ath11k_info(ab, "qmi target: chip_id: 0x%x, chip_family: 0x%x, board_id: 0x%x, soc_id: 0x%x\n",
+ ab->qmi.target.chip_id, ab->qmi.target.chip_family,
+ ab->qmi.target.board_id, ab->qmi.target.soc_id);
+
+ ath11k_info(ab, "qmi fw_version: 0x%x fw_build_timestamp: %s fw_build_id: %s",
+ ab->qmi.target.fw_version,
+ ab->qmi.target.fw_build_timestamp,
+ ab->qmi.target.fw_build_id);
+
+out:
+ return ret;
+}
+
+static int
+ath11k_qmi_prepare_bdf_download(struct ath11k_base *ab, int type,
+ struct qmi_wlanfw_bdf_download_req_msg_v01 *req,
+ void __iomem *bdf_addr)
+{
+ struct device *dev = ab->dev;
+ char filename[ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE];
+ const struct firmware *fw_entry;
+ struct ath11k_board_data bd;
+ u32 fw_size;
+ int ret = 0;
+
+ memset(&bd, 0, sizeof(bd));
+
+ switch (type) {
+ case ATH11K_QMI_FILE_TYPE_BDF_GOLDEN:
+ ret = ath11k_core_fetch_bdf(ab, &bd);
+ if (ret) {
+ ath11k_warn(ab, "qmi failed to load BDF\n");
+ goto out;
+ }
+
+ fw_size = min_t(u32, ab->hw_params.fw.board_size, bd.len);
+ memcpy_toio(bdf_addr, bd.data, fw_size);
+ ath11k_core_free_bdf(ab, &bd);
+ break;
+ case ATH11K_QMI_FILE_TYPE_CALDATA:
+ snprintf(filename, sizeof(filename),
+ "%s/%s", ab->hw_params.fw.dir, ATH11K_QMI_DEFAULT_CAL_FILE_NAME);
+ ret = request_firmware(&fw_entry, filename, dev);
+ if (ret) {
+ ath11k_warn(ab, "qmi failed to load CAL: %s\n", filename);
+ goto out;
+ }
+
+ fw_size = min_t(u32, ab->hw_params.fw.board_size,
+ fw_entry->size);
+
+ memcpy_toio(bdf_addr + ATH11K_QMI_CALDATA_OFFSET,
+ fw_entry->data, fw_size);
+ ath11k_info(ab, "qmi downloading BDF: %s, size: %zu\n",
+ filename, fw_entry->size);
+
+ release_firmware(fw_entry);
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ req->total_size = fw_size;
+
+out:
+ return ret;
+}
+
+static int ath11k_qmi_load_bdf(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_bdf_download_req_msg_v01 *req;
+ struct qmi_wlanfw_bdf_download_resp_msg_v01 resp;
+ struct qmi_txn txn = {};
+ void __iomem *bdf_addr = NULL;
+ int type, ret;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+ memset(&resp, 0, sizeof(resp));
+
+ bdf_addr = ioremap(ATH11K_QMI_BDF_ADDRESS, ATH11K_QMI_BDF_MAX_SIZE);
+ if (!bdf_addr) {
+ ath11k_warn(ab, "qmi ioremap error for BDF\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ for (type = 0; type < ATH11K_QMI_MAX_FILE_TYPE; type++) {
+ req->valid = 1;
+ req->file_id_valid = 1;
+ req->file_id = ab->qmi.target.board_id;
+ req->total_size_valid = 1;
+ req->seg_id_valid = 1;
+ req->seg_id = type;
+ req->data_valid = 0;
+ req->data_len = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE;
+ req->bdf_type = 0;
+ req->bdf_type_valid = 0;
+ req->end_valid = 1;
+ req->end = 1;
+
+ ret = ath11k_qmi_prepare_bdf_download(ab, type, req, bdf_addr);
+ if (ret < 0)
+ goto out_qmi_bdf;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_bdf_download_resp_msg_v01_ei,
+ &resp);
+ if (ret < 0)
+ goto out_qmi_bdf;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_BDF_DOWNLOAD_REQ_V01,
+ QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_bdf_download_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ goto out_qmi_bdf;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0)
+ goto out_qmi_bdf;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "qmi BDF download failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out_qmi_bdf;
+ }
+ }
+ ath11k_info(ab, "qmi BDF downloaded\n");
+
+out_qmi_bdf:
+ iounmap(bdf_addr);
+out:
+ kfree(req);
+ return ret;
+}
+
+static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_m3_info_req_msg_v01 req;
+ struct qmi_wlanfw_m3_info_resp_msg_v01 resp;
+ struct qmi_txn txn = {};
+ int ret = 0;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+ req.addr = 0;
+ req.size = 0;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_m3_info_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_M3_INFO_REQ_V01,
+ QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN,
+ qmi_wlanfw_m3_info_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send M3 information request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed M3 information request %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "qmi M3 info request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+out:
+ return ret;
+}
+
+static int ath11k_qmi_wlanfw_mode_send(struct ath11k_base *ab,
+ u32 mode)
+{
+ struct qmi_wlanfw_wlan_mode_req_msg_v01 req;
+ struct qmi_wlanfw_wlan_mode_resp_msg_v01 resp;
+ struct qmi_txn txn = {};
+ int ret = 0;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ req.mode = mode;
+ req.hw_debug_valid = 1;
+ req.hw_debug = 0;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_wlan_mode_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_WLAN_MODE_REQ_V01,
+ QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_wlan_mode_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send mode request, mode: %d, err = %d\n",
+ mode, ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ if (mode == ATH11K_FIRMWARE_MODE_OFF && ret == -ENETRESET) {
+ ath11k_warn(ab, "WLFW service is dis-connected\n");
+ return 0;
+ }
+ ath11k_warn(ab, "qmi failed set mode request, mode: %d, err = %d\n",
+ mode, ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "Mode request failed, mode: %d, result: %d err: %d\n",
+ mode, resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static int ath11k_qmi_wlanfw_wlan_cfg_send(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_wlan_cfg_req_msg_v01 *req;
+ struct qmi_wlanfw_wlan_cfg_resp_msg_v01 resp;
+ struct ce_pipe_config *ce_cfg;
+ struct service_to_pipe *svc_cfg;
+ struct qmi_txn txn = {};
+ int ret = 0, pipe_num;
+
+ ce_cfg = (struct ce_pipe_config *)ab->qmi.ce_cfg.tgt_ce;
+ svc_cfg = (struct service_to_pipe *)ab->qmi.ce_cfg.svc_to_ce_map;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ memset(&resp, 0, sizeof(resp));
+
+ req->host_version_valid = 1;
+ strlcpy(req->host_version, ATH11K_HOST_VERSION_STRING,
+ sizeof(req->host_version));
+
+ req->tgt_cfg_valid = 1;
+ /* This is number of CE configs */
+ req->tgt_cfg_len = ab->qmi.ce_cfg.tgt_ce_len;
+ for (pipe_num = 0; pipe_num <= req->tgt_cfg_len ; pipe_num++) {
+ req->tgt_cfg[pipe_num].pipe_num = ce_cfg[pipe_num].pipenum;
+ req->tgt_cfg[pipe_num].pipe_dir = ce_cfg[pipe_num].pipedir;
+ req->tgt_cfg[pipe_num].nentries = ce_cfg[pipe_num].nentries;
+ req->tgt_cfg[pipe_num].nbytes_max = ce_cfg[pipe_num].nbytes_max;
+ req->tgt_cfg[pipe_num].flags = ce_cfg[pipe_num].flags;
+ }
+
+ req->svc_cfg_valid = 1;
+ /* This is number of Service/CE configs */
+ req->svc_cfg_len = ab->qmi.ce_cfg.svc_to_ce_map_len;
+ for (pipe_num = 0; pipe_num < req->svc_cfg_len; pipe_num++) {
+ req->svc_cfg[pipe_num].service_id = svc_cfg[pipe_num].service_id;
+ req->svc_cfg[pipe_num].pipe_dir = svc_cfg[pipe_num].pipedir;
+ req->svc_cfg[pipe_num].pipe_num = svc_cfg[pipe_num].pipenum;
+ }
+ req->shadow_reg_valid = 0;
+ req->shadow_reg_v2_valid = 0;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_wlan_cfg_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_WLAN_CFG_REQ_V01,
+ QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_wlan_cfg_req_msg_v01_ei, req);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send wlan config request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed wlan config request, err = %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "qmi wlan config request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ kfree(req);
+ return ret;
+}
+
+void ath11k_qmi_firmware_stop(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_qmi_wlanfw_mode_send(ab, ATH11K_FIRMWARE_MODE_OFF);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send wlan mode off\n");
+ return;
+ }
+}
+
+int ath11k_qmi_firmware_start(struct ath11k_base *ab,
+ u32 mode)
+{
+ int ret;
+
+ ret = ath11k_qmi_wlanfw_wlan_cfg_send(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send wlan cfg:%d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_qmi_wlanfw_mode_send(ab, mode);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send wlan fw mode:%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+ath11k_qmi_driver_event_post(struct ath11k_qmi *qmi,
+ enum ath11k_qmi_event_type type,
+ void *data)
+{
+ struct ath11k_qmi_driver_event *event;
+
+ event = kzalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return -ENOMEM;
+
+ event->type = type;
+ event->data = data;
+
+ spin_lock(&qmi->event_lock);
+ list_add_tail(&event->list, &qmi->event_list);
+ spin_unlock(&qmi->event_lock);
+
+ queue_work(qmi->event_wq, &qmi->event_work);
+
+ return 0;
+}
+
+static void ath11k_qmi_event_server_arrive(struct ath11k_qmi *qmi)
+{
+ struct ath11k_base *ab = qmi->ab;
+ int ret;
+
+ ret = ath11k_qmi_fw_ind_register_send(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send FW indication QMI:%d\n", ret);
+ return;
+ }
+
+ ret = ath11k_qmi_host_cap_send(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send host cap QMI:%d\n", ret);
+ return;
+ }
+}
+
+static void ath11k_qmi_event_mem_request(struct ath11k_qmi *qmi)
+{
+ struct ath11k_base *ab = qmi->ab;
+ int ret;
+
+ ret = ath11k_qmi_respond_fw_mem_request(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to respond fw mem req:%d\n", ret);
+ return;
+ }
+}
+
+static void ath11k_qmi_event_load_bdf(struct ath11k_qmi *qmi)
+{
+ struct ath11k_base *ab = qmi->ab;
+ int ret;
+
+ ret = ath11k_qmi_request_target_cap(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to req target capabilities:%d\n", ret);
+ return;
+ }
+
+ ret = ath11k_qmi_load_bdf(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to load board data file:%d\n", ret);
+ return;
+ }
+
+ ret = ath11k_qmi_wlanfw_m3_info_send(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send m3 info req:%d\n", ret);
+ return;
+ }
+}
+
+static void ath11k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *data)
+{
+ struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle);
+ struct ath11k_base *ab = qmi->ab;
+ const struct qmi_wlanfw_request_mem_ind_msg_v01 *msg = data;
+ int i, ret;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi firmware request memory request\n");
+
+ if (msg->mem_seg_len == 0 ||
+ msg->mem_seg_len > ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01)
+ ath11k_warn(ab, "Invalid memory segment length: %u\n",
+ msg->mem_seg_len);
+
+ ab->qmi.mem_seg_count = msg->mem_seg_len;
+
+ for (i = 0; i < qmi->mem_seg_count ; i++) {
+ ab->qmi.target_mem[i].type = msg->mem_seg[i].type;
+ ab->qmi.target_mem[i].size = msg->mem_seg[i].size;
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi mem seg type %d size %d\n",
+ msg->mem_seg[i].type, msg->mem_seg[i].size);
+ }
+
+ ret = ath11k_qmi_alloc_target_mem_chunk(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to alloc target memory:%d\n", ret);
+ return;
+ }
+
+ ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_REQUEST_MEM, NULL);
+}
+
+static void ath11k_qmi_msg_mem_ready_cb(struct qmi_handle *qmi_hdl,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+ struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle);
+ struct ath11k_base *ab = qmi->ab;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi firmware memory ready indication\n");
+ ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_FW_MEM_READY, NULL);
+}
+
+static void ath11k_qmi_msg_fw_ready_cb(struct qmi_handle *qmi_hdl,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+ struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle);
+ struct ath11k_base *ab = qmi->ab;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi firmware ready\n");
+ ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_FW_READY, NULL);
+}
+
+static void ath11k_qmi_msg_cold_boot_cal_done_cb(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+}
+
+static const struct qmi_msg_handler ath11k_qmi_msg_handlers[] = {
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_REQUEST_MEM_IND_V01,
+ .ei = qmi_wlanfw_request_mem_ind_msg_v01_ei,
+ .decoded_size = sizeof(qmi_wlanfw_request_mem_ind_msg_v01_ei),
+ .fn = ath11k_qmi_msg_mem_request_cb,
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_FW_MEM_READY_IND_V01,
+ .ei = qmi_wlanfw_mem_ready_ind_msg_v01_ei,
+ .decoded_size = sizeof(qmi_wlanfw_mem_ready_ind_msg_v01_ei),
+ .fn = ath11k_qmi_msg_mem_ready_cb,
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_FW_READY_IND_V01,
+ .ei = qmi_wlanfw_fw_ready_ind_msg_v01_ei,
+ .decoded_size = sizeof(qmi_wlanfw_fw_ready_ind_msg_v01_ei),
+ .fn = ath11k_qmi_msg_fw_ready_cb,
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01,
+ .ei = qmi_wlanfw_cold_boot_cal_done_ind_msg_v01_ei,
+ .decoded_size =
+ sizeof(qmi_wlanfw_cold_boot_cal_done_ind_msg_v01_ei),
+ .fn = ath11k_qmi_msg_cold_boot_cal_done_cb,
+ },
+};
+
+static int ath11k_qmi_ops_new_server(struct qmi_handle *qmi_hdl,
+ struct qmi_service *service)
+{
+ struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle);
+ struct ath11k_base *ab = qmi->ab;
+ struct sockaddr_qrtr *sq = &qmi->sq;
+ int ret;
+
+ sq->sq_family = AF_QIPCRTR;
+ sq->sq_node = service->node;
+ sq->sq_port = service->port;
+
+ ret = kernel_connect(qmi_hdl->sock, (struct sockaddr *)sq,
+ sizeof(*sq), 0);
+ if (ret) {
+ ath11k_warn(ab, "qmi failed to connect to remote service %d\n", ret);
+ return ret;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi wifi fw qmi service connected\n");
+ ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_SERVER_ARRIVE, NULL);
+
+ return 0;
+}
+
+static void ath11k_qmi_ops_del_server(struct qmi_handle *qmi_hdl,
+ struct qmi_service *service)
+{
+ struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle);
+ struct ath11k_base *ab = qmi->ab;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi wifi fw del server\n");
+ ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_SERVER_EXIT, NULL);
+}
+
+static const struct qmi_ops ath11k_qmi_ops = {
+ .new_server = ath11k_qmi_ops_new_server,
+ .del_server = ath11k_qmi_ops_del_server,
+};
+
+static void ath11k_qmi_driver_event_work(struct work_struct *work)
+{
+ struct ath11k_qmi *qmi = container_of(work, struct ath11k_qmi,
+ event_work);
+ struct ath11k_qmi_driver_event *event;
+ struct ath11k_base *ab = qmi->ab;
+
+ spin_lock(&qmi->event_lock);
+ while (!list_empty(&qmi->event_list)) {
+ event = list_first_entry(&qmi->event_list,
+ struct ath11k_qmi_driver_event, list);
+ list_del(&event->list);
+ spin_unlock(&qmi->event_lock);
+
+ if (test_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags))
+ return;
+
+ switch (event->type) {
+ case ATH11K_QMI_EVENT_SERVER_ARRIVE:
+ ath11k_qmi_event_server_arrive(qmi);
+ break;
+ case ATH11K_QMI_EVENT_SERVER_EXIT:
+ set_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags);
+ set_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags);
+ break;
+ case ATH11K_QMI_EVENT_REQUEST_MEM:
+ ath11k_qmi_event_mem_request(qmi);
+ break;
+ case ATH11K_QMI_EVENT_FW_MEM_READY:
+ ath11k_qmi_event_load_bdf(qmi);
+ break;
+ case ATH11K_QMI_EVENT_FW_READY:
+ if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)) {
+ queue_work(ab->workqueue, &ab->restart_work);
+ break;
+ }
+
+ ath11k_core_qmi_firmware_ready(ab);
+ ab->qmi.cal_done = 1;
+ set_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags);
+
+ break;
+ case ATH11K_QMI_EVENT_COLD_BOOT_CAL_DONE:
+ break;
+ default:
+ ath11k_warn(ab, "invalid event type: %d", event->type);
+ break;
+ }
+ kfree(event);
+ spin_lock(&qmi->event_lock);
+ }
+ spin_unlock(&qmi->event_lock);
+}
+
+int ath11k_qmi_init_service(struct ath11k_base *ab)
+{
+ int ret;
+
+ memset(&ab->qmi.target, 0, sizeof(struct target_info));
+ memset(&ab->qmi.target_mem, 0, sizeof(struct target_mem_chunk));
+ ab->qmi.ab = ab;
+
+ ab->qmi.target_mem_mode = ATH11K_QMI_TARGET_MEM_MODE_DEFAULT;
+ ret = qmi_handle_init(&ab->qmi.handle, ATH11K_QMI_RESP_LEN_MAX,
+ &ath11k_qmi_ops, ath11k_qmi_msg_handlers);
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to initialize qmi handle\n");
+ return ret;
+ }
+
+ ab->qmi.event_wq = alloc_workqueue("ath11k_qmi_driver_event",
+ WQ_UNBOUND, 1);
+ if (!ab->qmi.event_wq) {
+ ath11k_err(ab, "failed to allocate workqueue\n");
+ return -EFAULT;
+ }
+
+ INIT_LIST_HEAD(&ab->qmi.event_list);
+ spin_lock_init(&ab->qmi.event_lock);
+ INIT_WORK(&ab->qmi.event_work, ath11k_qmi_driver_event_work);
+
+ ret = qmi_add_lookup(&ab->qmi.handle, ATH11K_QMI_WLFW_SERVICE_ID_V01,
+ ATH11K_QMI_WLFW_SERVICE_VERS_V01,
+ ATH11K_QMI_WLFW_SERVICE_INS_ID_V01);
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to add qmi lookup\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+void ath11k_qmi_deinit_service(struct ath11k_base *ab)
+{
+ qmi_handle_release(&ab->qmi.handle);
+ cancel_work_sync(&ab->qmi.event_work);
+ destroy_workqueue(ab->qmi.event_wq);
+}
+
diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h
new file mode 100644
index 000000000000..3f7db642d869
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/qmi.h
@@ -0,0 +1,445 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_QMI_H
+#define ATH11K_QMI_H
+
+#include <linux/mutex.h>
+#include <linux/soc/qcom/qmi.h>
+
+#define ATH11K_HOST_VERSION_STRING "WIN"
+#define ATH11K_QMI_WLANFW_TIMEOUT_MS 5000
+#define ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE 64
+#define ATH11K_QMI_BDF_ADDRESS 0x4B0C0000
+#define ATH11K_QMI_BDF_MAX_SIZE (256 * 1024)
+#define ATH11K_QMI_CALDATA_OFFSET (128 * 1024)
+#define ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 128
+#define ATH11K_QMI_WLFW_SERVICE_ID_V01 0x45
+#define ATH11K_QMI_WLFW_SERVICE_VERS_V01 0x01
+#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01 0x02
+#define ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 32
+#define ATH11K_QMI_RESP_LEN_MAX 8192
+#define ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01 32
+#define ATH11K_QMI_CALDB_SIZE 0x480000
+#define ATH11K_QMI_DEFAULT_CAL_FILE_NAME "caldata.bin"
+
+#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035
+#define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037
+#define QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01 0x0021
+#define QMI_WLFW_FW_READY_IND_V01 0x0038
+
+#define QMI_WLANFW_MAX_DATA_SIZE_V01 6144
+#define ATH11K_FIRMWARE_MODE_OFF 4
+#define ATH11K_QMI_TARGET_MEM_MODE_DEFAULT 0
+
+struct ath11k_base;
+
+enum ath11k_qmi_file_type {
+ ATH11K_QMI_FILE_TYPE_BDF_GOLDEN,
+ ATH11K_QMI_FILE_TYPE_CALDATA,
+ ATH11K_QMI_MAX_FILE_TYPE,
+};
+
+enum ath11k_qmi_event_type {
+ ATH11K_QMI_EVENT_SERVER_ARRIVE,
+ ATH11K_QMI_EVENT_SERVER_EXIT,
+ ATH11K_QMI_EVENT_REQUEST_MEM,
+ ATH11K_QMI_EVENT_FW_MEM_READY,
+ ATH11K_QMI_EVENT_FW_READY,
+ ATH11K_QMI_EVENT_COLD_BOOT_CAL_START,
+ ATH11K_QMI_EVENT_COLD_BOOT_CAL_DONE,
+ ATH11K_QMI_EVENT_REGISTER_DRIVER,
+ ATH11K_QMI_EVENT_UNREGISTER_DRIVER,
+ ATH11K_QMI_EVENT_RECOVERY,
+ ATH11K_QMI_EVENT_FORCE_FW_ASSERT,
+ ATH11K_QMI_EVENT_POWER_UP,
+ ATH11K_QMI_EVENT_POWER_DOWN,
+ ATH11K_QMI_EVENT_MAX,
+};
+
+struct ath11k_qmi_driver_event {
+ struct list_head list;
+ enum ath11k_qmi_event_type type;
+ void *data;
+};
+
+struct ath11k_qmi_ce_cfg {
+ const struct ce_pipe_config *tgt_ce;
+ int tgt_ce_len;
+ const struct service_to_pipe *svc_to_ce_map;
+ int svc_to_ce_map_len;
+ const u8 *shadow_reg;
+ int shadow_reg_len;
+ u8 *shadow_reg_v2;
+ int shadow_reg_v2_len;
+};
+
+struct ath11k_qmi_event_msg {
+ struct list_head list;
+ enum ath11k_qmi_event_type type;
+};
+
+struct target_mem_chunk {
+ u32 size;
+ u32 type;
+ dma_addr_t paddr;
+ u32 vaddr;
+};
+
+struct target_info {
+ u32 chip_id;
+ u32 chip_family;
+ u32 board_id;
+ u32 soc_id;
+ u32 fw_version;
+ char fw_build_timestamp[ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1];
+ char fw_build_id[ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1];
+};
+
+struct ath11k_qmi {
+ struct ath11k_base *ab;
+ struct qmi_handle handle;
+ struct sockaddr_qrtr sq;
+ struct work_struct event_work;
+ struct workqueue_struct *event_wq;
+ struct list_head event_list;
+ spinlock_t event_lock; /* spinlock for qmi event list */
+ struct ath11k_qmi_ce_cfg ce_cfg;
+ struct target_mem_chunk target_mem[ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
+ u32 mem_seg_count;
+ u32 target_mem_mode;
+ u8 cal_done;
+ struct target_info target;
+};
+
+#define QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN 189
+#define QMI_WLANFW_HOST_CAP_REQ_V01 0x0034
+#define QMI_WLANFW_HOST_CAP_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLFW_HOST_CAP_RESP_V01 0x0034
+#define QMI_WLFW_MAX_NUM_GPIO_V01 32
+#define QMI_IPQ8074_FW_MEM_MODE 0xFF
+#define HOST_DDR_REGION_TYPE 0x1
+#define BDF_MEM_REGION_TYPE 0x2
+#define CALDB_MEM_REGION_TYPE 0x4
+
+struct qmi_wlanfw_host_cap_req_msg_v01 {
+ u8 num_clients_valid;
+ u32 num_clients;
+ u8 wake_msi_valid;
+ u32 wake_msi;
+ u8 gpios_valid;
+ u32 gpios_len;
+ u32 gpios[QMI_WLFW_MAX_NUM_GPIO_V01];
+ u8 nm_modem_valid;
+ u8 nm_modem;
+ u8 bdf_support_valid;
+ u8 bdf_support;
+ u8 bdf_cache_support_valid;
+ u8 bdf_cache_support;
+ u8 m3_support_valid;
+ u8 m3_support;
+ u8 m3_cache_support_valid;
+ u8 m3_cache_support;
+ u8 cal_filesys_support_valid;
+ u8 cal_filesys_support;
+ u8 cal_cache_support_valid;
+ u8 cal_cache_support;
+ u8 cal_done_valid;
+ u8 cal_done;
+ u8 mem_bucket_valid;
+ u32 mem_bucket;
+ u8 mem_cfg_mode_valid;
+ u8 mem_cfg_mode;
+};
+
+struct qmi_wlanfw_host_cap_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN 54
+#define QMI_WLANFW_IND_REGISTER_REQ_V01 0x0020
+#define QMI_WLANFW_IND_REGISTER_RESP_MSG_V01_MAX_LEN 18
+#define QMI_WLANFW_IND_REGISTER_RESP_V01 0x0020
+#define QMI_WLANFW_CLIENT_ID 0x4b4e454c
+
+struct qmi_wlanfw_ind_register_req_msg_v01 {
+ u8 fw_ready_enable_valid;
+ u8 fw_ready_enable;
+ u8 initiate_cal_download_enable_valid;
+ u8 initiate_cal_download_enable;
+ u8 initiate_cal_update_enable_valid;
+ u8 initiate_cal_update_enable;
+ u8 msa_ready_enable_valid;
+ u8 msa_ready_enable;
+ u8 pin_connect_result_enable_valid;
+ u8 pin_connect_result_enable;
+ u8 client_id_valid;
+ u32 client_id;
+ u8 request_mem_enable_valid;
+ u8 request_mem_enable;
+ u8 fw_mem_ready_enable_valid;
+ u8 fw_mem_ready_enable;
+ u8 fw_init_done_enable_valid;
+ u8 fw_init_done_enable;
+ u8 rejuvenate_enable_valid;
+ u32 rejuvenate_enable;
+ u8 xo_cal_enable_valid;
+ u8 xo_cal_enable;
+ u8 cal_done_enable_valid;
+ u8 cal_done_enable;
+};
+
+struct qmi_wlanfw_ind_register_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 fw_status_valid;
+ u64 fw_status;
+};
+
+#define QMI_WLANFW_REQUEST_MEM_IND_MSG_V01_MAX_LEN 1124
+#define QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN 548
+#define QMI_WLANFW_RESPOND_MEM_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLANFW_REQUEST_MEM_IND_V01 0x0035
+#define QMI_WLANFW_RESPOND_MEM_REQ_V01 0x0036
+#define QMI_WLANFW_RESPOND_MEM_RESP_V01 0x0036
+#define QMI_WLANFW_MAX_NUM_MEM_CFG_V01 2
+
+struct qmi_wlanfw_mem_cfg_s_v01 {
+ u64 offset;
+ u32 size;
+ u8 secure_flag;
+};
+
+enum qmi_wlanfw_mem_type_enum_v01 {
+ WLANFW_MEM_TYPE_ENUM_MIN_VAL_V01 = INT_MIN,
+ QMI_WLANFW_MEM_TYPE_MSA_V01 = 0,
+ QMI_WLANFW_MEM_TYPE_DDR_V01 = 1,
+ QMI_WLANFW_MEM_BDF_V01 = 2,
+ QMI_WLANFW_MEM_M3_V01 = 3,
+ QMI_WLANFW_MEM_CAL_V01 = 4,
+ QMI_WLANFW_MEM_DPD_V01 = 5,
+ WLANFW_MEM_TYPE_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+struct qmi_wlanfw_mem_seg_s_v01 {
+ u32 size;
+ enum qmi_wlanfw_mem_type_enum_v01 type;
+ u32 mem_cfg_len;
+ struct qmi_wlanfw_mem_cfg_s_v01 mem_cfg[QMI_WLANFW_MAX_NUM_MEM_CFG_V01];
+};
+
+struct qmi_wlanfw_request_mem_ind_msg_v01 {
+ u32 mem_seg_len;
+ struct qmi_wlanfw_mem_seg_s_v01 mem_seg[ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
+};
+
+struct qmi_wlanfw_mem_seg_resp_s_v01 {
+ u64 addr;
+ u32 size;
+ enum qmi_wlanfw_mem_type_enum_v01 type;
+ u8 restore;
+};
+
+struct qmi_wlanfw_respond_mem_req_msg_v01 {
+ u32 mem_seg_len;
+ struct qmi_wlanfw_mem_seg_resp_s_v01 mem_seg[ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
+};
+
+struct qmi_wlanfw_respond_mem_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+struct qmi_wlanfw_fw_mem_ready_ind_msg_v01 {
+ char placeholder;
+};
+
+#define QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN 0
+#define QMI_WLANFW_CAP_RESP_MSG_V01_MAX_LEN 207
+#define QMI_WLANFW_CAP_REQ_V01 0x0024
+#define QMI_WLANFW_CAP_RESP_V01 0x0024
+
+enum qmi_wlanfw_pipedir_enum_v01 {
+ QMI_WLFW_PIPEDIR_NONE_V01 = 0,
+ QMI_WLFW_PIPEDIR_IN_V01 = 1,
+ QMI_WLFW_PIPEDIR_OUT_V01 = 2,
+ QMI_WLFW_PIPEDIR_INOUT_V01 = 3,
+};
+
+struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01 {
+ __le32 pipe_num;
+ __le32 pipe_dir;
+ __le32 nentries;
+ __le32 nbytes_max;
+ __le32 flags;
+};
+
+struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01 {
+ __le32 service_id;
+ __le32 pipe_dir;
+ __le32 pipe_num;
+};
+
+struct qmi_wlanfw_shadow_reg_cfg_s_v01 {
+ u16 id;
+ u16 offset;
+};
+
+struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01 {
+ u32 addr;
+};
+
+struct qmi_wlanfw_memory_region_info_s_v01 {
+ u64 region_addr;
+ u32 size;
+ u8 secure_flag;
+};
+
+struct qmi_wlanfw_rf_chip_info_s_v01 {
+ u32 chip_id;
+ u32 chip_family;
+};
+
+struct qmi_wlanfw_rf_board_info_s_v01 {
+ u32 board_id;
+};
+
+struct qmi_wlanfw_soc_info_s_v01 {
+ u32 soc_id;
+};
+
+struct qmi_wlanfw_fw_version_info_s_v01 {
+ u32 fw_version;
+ char fw_build_timestamp[ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1];
+};
+
+enum qmi_wlanfw_cal_temp_id_enum_v01 {
+ QMI_WLANFW_CAL_TEMP_IDX_0_V01 = 0,
+ QMI_WLANFW_CAL_TEMP_IDX_1_V01 = 1,
+ QMI_WLANFW_CAL_TEMP_IDX_2_V01 = 2,
+ QMI_WLANFW_CAL_TEMP_IDX_3_V01 = 3,
+ QMI_WLANFW_CAL_TEMP_IDX_4_V01 = 4,
+ QMI_WLANFW_CAL_TEMP_ID_MAX_V01 = 0xFF,
+};
+
+struct qmi_wlanfw_cap_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 chip_info_valid;
+ struct qmi_wlanfw_rf_chip_info_s_v01 chip_info;
+ u8 board_info_valid;
+ struct qmi_wlanfw_rf_board_info_s_v01 board_info;
+ u8 soc_info_valid;
+ struct qmi_wlanfw_soc_info_s_v01 soc_info;
+ u8 fw_version_info_valid;
+ struct qmi_wlanfw_fw_version_info_s_v01 fw_version_info;
+ u8 fw_build_id_valid;
+ char fw_build_id[ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1];
+ u8 num_macs_valid;
+ u8 num_macs;
+};
+
+struct qmi_wlanfw_cap_req_msg_v01 {
+ char placeholder;
+};
+
+#define QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN 6182
+#define QMI_WLANFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLANFW_BDF_DOWNLOAD_RESP_V01 0x0025
+#define QMI_WLANFW_BDF_DOWNLOAD_REQ_V01 0x0025
+/* TODO: Need to check with MCL and FW team that data can be pointer and
+ * can be last element in structure
+ */
+struct qmi_wlanfw_bdf_download_req_msg_v01 {
+ u8 valid;
+ u8 file_id_valid;
+ enum qmi_wlanfw_cal_temp_id_enum_v01 file_id;
+ u8 total_size_valid;
+ u32 total_size;
+ u8 seg_id_valid;
+ u32 seg_id;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLANFW_MAX_DATA_SIZE_V01];
+ u8 end_valid;
+ u8 end;
+ u8 bdf_type_valid;
+ u8 bdf_type;
+
+};
+
+struct qmi_wlanfw_bdf_download_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN 18
+#define QMI_WLANFW_M3_INFO_RESP_MSG_V01_MAX_MSG_LEN 7
+#define QMI_WLANFW_M3_INFO_RESP_V01 0x003C
+#define QMI_WLANFW_M3_INFO_REQ_V01 0x003C
+
+struct qmi_wlanfw_m3_info_req_msg_v01 {
+ u64 addr;
+ u32 size;
+};
+
+struct qmi_wlanfw_m3_info_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN 11
+#define QMI_WLANFW_WLAN_MODE_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN 803
+#define QMI_WLANFW_WLAN_CFG_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLANFW_WLAN_MODE_REQ_V01 0x0022
+#define QMI_WLANFW_WLAN_MODE_RESP_V01 0x0022
+#define QMI_WLANFW_WLAN_CFG_REQ_V01 0x0023
+#define QMI_WLANFW_WLAN_CFG_RESP_V01 0x0023
+#define QMI_WLANFW_MAX_STR_LEN_V01 16
+#define QMI_WLANFW_MAX_NUM_CE_V01 12
+#define QMI_WLANFW_MAX_NUM_SVC_V01 24
+#define QMI_WLANFW_MAX_NUM_SHADOW_REG_V01 24
+#define QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01 36
+
+struct qmi_wlanfw_wlan_mode_req_msg_v01 {
+ u32 mode;
+ u8 hw_debug_valid;
+ u8 hw_debug;
+};
+
+struct qmi_wlanfw_wlan_mode_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+struct qmi_wlanfw_wlan_cfg_req_msg_v01 {
+ u8 host_version_valid;
+ char host_version[QMI_WLANFW_MAX_STR_LEN_V01 + 1];
+ u8 tgt_cfg_valid;
+ u32 tgt_cfg_len;
+ struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01
+ tgt_cfg[QMI_WLANFW_MAX_NUM_CE_V01];
+ u8 svc_cfg_valid;
+ u32 svc_cfg_len;
+ struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01
+ svc_cfg[QMI_WLANFW_MAX_NUM_SVC_V01];
+ u8 shadow_reg_valid;
+ u32 shadow_reg_len;
+ struct qmi_wlanfw_shadow_reg_cfg_s_v01
+ shadow_reg[QMI_WLANFW_MAX_NUM_SHADOW_REG_V01];
+ u8 shadow_reg_v2_valid;
+ u32 shadow_reg_v2_len;
+ struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01
+ shadow_reg_v2[QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01];
+};
+
+struct qmi_wlanfw_wlan_cfg_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+int ath11k_qmi_firmware_start(struct ath11k_base *ab,
+ u32 mode);
+void ath11k_qmi_firmware_stop(struct ath11k_base *ab);
+void ath11k_qmi_event_work(struct work_struct *work);
+void ath11k_qmi_msg_recv_work(struct work_struct *work);
+void ath11k_qmi_deinit_service(struct ath11k_base *ab);
+int ath11k_qmi_init_service(struct ath11k_base *ab);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
new file mode 100644
index 000000000000..453aa9c06969
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/reg.c
@@ -0,0 +1,702 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+#include "core.h"
+#include "debug.h"
+
+/* World regdom to be used in case default regd from fw is unavailable */
+#define ATH11K_2GHZ_CH01_11 REG_RULE(2412 - 10, 2462 + 10, 40, 0, 20, 0)
+#define ATH11K_5GHZ_5150_5350 REG_RULE(5150 - 10, 5350 + 10, 80, 0, 30,\
+ NL80211_RRF_NO_IR)
+#define ATH11K_5GHZ_5725_5850 REG_RULE(5725 - 10, 5850 + 10, 80, 0, 30,\
+ NL80211_RRF_NO_IR)
+
+#define ETSI_WEATHER_RADAR_BAND_LOW 5590
+#define ETSI_WEATHER_RADAR_BAND_HIGH 5650
+#define ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT 600000
+
+static const struct ieee80211_regdomain ath11k_world_regd = {
+ .n_reg_rules = 3,
+ .alpha2 = "00",
+ .reg_rules = {
+ ATH11K_2GHZ_CH01_11,
+ ATH11K_5GHZ_5150_5350,
+ ATH11K_5GHZ_5725_5850,
+ }
+};
+
+static bool ath11k_regdom_changes(struct ath11k *ar, char *alpha2)
+{
+ const struct ieee80211_regdomain *regd;
+
+ regd = rcu_dereference_rtnl(ar->hw->wiphy->regd);
+ /* This can happen during wiphy registration where the previous
+ * user request is received before we update the regd received
+ * from firmware.
+ */
+ if (!regd)
+ return true;
+
+ return memcmp(regd->alpha2, alpha2, 2) != 0;
+}
+
+static void
+ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct wmi_init_country_params init_country_param;
+ struct ath11k *ar = hw->priv;
+ int ret;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "Regulatory Notification received for %s\n", wiphy_name(wiphy));
+
+ /* Currently supporting only General User Hints. Cell base user
+ * hints to be handled later.
+ * Hints from other sources like Core, Beacons are not expected for
+ * self managed wiphy's
+ */
+ if (!(request->initiator == NL80211_REGDOM_SET_BY_USER &&
+ request->user_reg_hint_type == NL80211_USER_REG_HINT_USER)) {
+ ath11k_warn(ar->ab, "Unexpected Regulatory event for this wiphy\n");
+ return;
+ }
+
+ if (!IS_ENABLED(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS)) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "Country Setting is not allowed\n");
+ return;
+ }
+
+ if (!ath11k_regdom_changes(ar, request->alpha2)) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG, "Country is already set\n");
+ return;
+ }
+
+ /* Set the country code to the firmware and wait for
+ * the WMI_REG_CHAN_LIST_CC EVENT for updating the
+ * reg info
+ */
+ init_country_param.flags = ALPHA_IS_SET;
+ memcpy(&init_country_param.cc_info.alpha2, request->alpha2, 2);
+
+ ret = ath11k_wmi_send_init_country_cmd(ar, init_country_param);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "INIT Country code set to fw failed : %d\n", ret);
+}
+
+int ath11k_reg_update_chan_list(struct ath11k *ar)
+{
+ struct ieee80211_supported_band **bands;
+ struct scan_chan_list_params *params;
+ struct ieee80211_channel *channel;
+ struct ieee80211_hw *hw = ar->hw;
+ struct channel_param *ch;
+ enum nl80211_band band;
+ int num_channels = 0;
+ int params_len;
+ int i, ret;
+
+ bands = hw->wiphy->bands;
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ if (!bands[band])
+ continue;
+
+ for (i = 0; i < bands[band]->n_channels; i++) {
+ if (bands[band]->channels[i].flags &
+ IEEE80211_CHAN_DISABLED)
+ continue;
+
+ num_channels++;
+ }
+ }
+
+ if (WARN_ON(!num_channels))
+ return -EINVAL;
+
+ params_len = sizeof(struct scan_chan_list_params) +
+ num_channels * sizeof(struct channel_param);
+ params = kzalloc(params_len, GFP_KERNEL);
+
+ if (!params)
+ return -ENOMEM;
+
+ params->pdev_id = ar->pdev->pdev_id;
+ params->nallchans = num_channels;
+
+ ch = params->ch_param;
+
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ if (!bands[band])
+ continue;
+
+ for (i = 0; i < bands[band]->n_channels; i++) {
+ channel = &bands[band]->channels[i];
+
+ if (channel->flags & IEEE80211_CHAN_DISABLED)
+ continue;
+
+ /* TODO: Set to true/false based on some condition? */
+ ch->allow_ht = true;
+ ch->allow_vht = true;
+ ch->allow_he = true;
+
+ ch->dfs_set =
+ !!(channel->flags & IEEE80211_CHAN_RADAR);
+ ch->is_chan_passive = !!(channel->flags &
+ IEEE80211_CHAN_NO_IR);
+ ch->is_chan_passive |= ch->dfs_set;
+ ch->mhz = channel->center_freq;
+ ch->cfreq1 = channel->center_freq;
+ ch->minpower = 0;
+ ch->maxpower = channel->max_power * 2;
+ ch->maxregpower = channel->max_reg_power * 2;
+ ch->antennamax = channel->max_antenna_gain * 2;
+
+ /* TODO: Use appropriate phymodes */
+ if (channel->band == NL80211_BAND_2GHZ)
+ ch->phy_mode = MODE_11G;
+ else
+ ch->phy_mode = MODE_11A;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "mac channel [%d/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
+ i, params->nallchans,
+ ch->mhz, ch->maxpower, ch->maxregpower,
+ ch->antennamax, ch->phy_mode);
+
+ ch++;
+ /* TODO: use quarrter/half rate, cfreq12, dfs_cfreq2
+ * set_agile, reg_class_idx
+ */
+ }
+ }
+
+ ret = ath11k_wmi_send_scan_chan_list_cmd(ar, params);
+ kfree(params);
+
+ return ret;
+}
+
+static void ath11k_copy_regd(struct ieee80211_regdomain *regd_orig,
+ struct ieee80211_regdomain *regd_copy)
+{
+ u8 i;
+
+ /* The caller should have checked error conditions */
+ memcpy(regd_copy, regd_orig, sizeof(*regd_orig));
+
+ for (i = 0; i < regd_orig->n_reg_rules; i++)
+ memcpy(&regd_copy->reg_rules[i], &regd_orig->reg_rules[i],
+ sizeof(struct ieee80211_reg_rule));
+}
+
+int ath11k_regd_update(struct ath11k *ar, bool init)
+{
+ struct ieee80211_regdomain *regd, *regd_copy = NULL;
+ int ret, regd_len, pdev_id;
+ struct ath11k_base *ab;
+
+ ab = ar->ab;
+ pdev_id = ar->pdev_idx;
+
+ spin_lock(&ab->base_lock);
+
+ if (init) {
+ /* Apply the regd received during init through
+ * WMI_REG_CHAN_LIST_CC event. In case of failure to
+ * receive the regd, initialize with a default world
+ * regulatory.
+ */
+ if (ab->default_regd[pdev_id]) {
+ regd = ab->default_regd[pdev_id];
+ } else {
+ ath11k_warn(ab,
+ "failed to receive default regd during init\n");
+ regd = (struct ieee80211_regdomain *)&ath11k_world_regd;
+ }
+ } else {
+ regd = ab->new_regd[pdev_id];
+ }
+
+ if (!regd) {
+ ret = -EINVAL;
+ spin_unlock(&ab->base_lock);
+ goto err;
+ }
+
+ regd_len = sizeof(*regd) + (regd->n_reg_rules *
+ sizeof(struct ieee80211_reg_rule));
+
+ regd_copy = kzalloc(regd_len, GFP_ATOMIC);
+ if (regd_copy)
+ ath11k_copy_regd(regd, regd_copy);
+
+ spin_unlock(&ab->base_lock);
+
+ if (!regd_copy) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ rtnl_lock();
+ ret = regulatory_set_wiphy_regd_sync_rtnl(ar->hw->wiphy, regd_copy);
+ rtnl_unlock();
+
+ kfree(regd_copy);
+
+ if (ret)
+ goto err;
+
+ if (ar->state == ATH11K_STATE_ON) {
+ ret = ath11k_reg_update_chan_list(ar);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ ath11k_warn(ab, "failed to perform regd update : %d\n", ret);
+ return ret;
+}
+
+static enum nl80211_dfs_regions
+ath11k_map_fw_dfs_region(enum ath11k_dfs_region dfs_region)
+{
+ switch (dfs_region) {
+ case ATH11K_DFS_REG_FCC:
+ case ATH11K_DFS_REG_CN:
+ return NL80211_DFS_FCC;
+ case ATH11K_DFS_REG_ETSI:
+ case ATH11K_DFS_REG_KR:
+ return NL80211_DFS_ETSI;
+ case ATH11K_DFS_REG_MKK:
+ return NL80211_DFS_JP;
+ default:
+ return NL80211_DFS_UNSET;
+ }
+}
+
+static u32 ath11k_map_fw_reg_flags(u16 reg_flags)
+{
+ u32 flags = 0;
+
+ if (reg_flags & REGULATORY_CHAN_NO_IR)
+ flags = NL80211_RRF_NO_IR;
+
+ if (reg_flags & REGULATORY_CHAN_RADAR)
+ flags |= NL80211_RRF_DFS;
+
+ if (reg_flags & REGULATORY_CHAN_NO_OFDM)
+ flags |= NL80211_RRF_NO_OFDM;
+
+ if (reg_flags & REGULATORY_CHAN_INDOOR_ONLY)
+ flags |= NL80211_RRF_NO_OUTDOOR;
+
+ if (reg_flags & REGULATORY_CHAN_NO_HT40)
+ flags |= NL80211_RRF_NO_HT40;
+
+ if (reg_flags & REGULATORY_CHAN_NO_80MHZ)
+ flags |= NL80211_RRF_NO_80MHZ;
+
+ if (reg_flags & REGULATORY_CHAN_NO_160MHZ)
+ flags |= NL80211_RRF_NO_160MHZ;
+
+ return flags;
+}
+
+static bool
+ath11k_reg_can_intersect(struct ieee80211_reg_rule *rule1,
+ struct ieee80211_reg_rule *rule2)
+{
+ u32 start_freq1, end_freq1;
+ u32 start_freq2, end_freq2;
+
+ start_freq1 = rule1->freq_range.start_freq_khz;
+ start_freq2 = rule2->freq_range.start_freq_khz;
+
+ end_freq1 = rule1->freq_range.end_freq_khz;
+ end_freq2 = rule2->freq_range.end_freq_khz;
+
+ if ((start_freq1 >= start_freq2 &&
+ start_freq1 < end_freq2) ||
+ (start_freq2 > start_freq1 &&
+ start_freq2 < end_freq1))
+ return true;
+
+ /* TODO: Should we restrict intersection feasibility
+ * based on min bandwidth of the intersected region also,
+ * say the intersected rule should have a min bandwidth
+ * of 20MHz?
+ */
+
+ return false;
+}
+
+static void ath11k_reg_intersect_rules(struct ieee80211_reg_rule *rule1,
+ struct ieee80211_reg_rule *rule2,
+ struct ieee80211_reg_rule *new_rule)
+{
+ u32 start_freq1, end_freq1;
+ u32 start_freq2, end_freq2;
+ u32 freq_diff, max_bw;
+
+ start_freq1 = rule1->freq_range.start_freq_khz;
+ start_freq2 = rule2->freq_range.start_freq_khz;
+
+ end_freq1 = rule1->freq_range.end_freq_khz;
+ end_freq2 = rule2->freq_range.end_freq_khz;
+
+ new_rule->freq_range.start_freq_khz = max_t(u32, start_freq1,
+ start_freq2);
+ new_rule->freq_range.end_freq_khz = min_t(u32, end_freq1, end_freq2);
+
+ freq_diff = new_rule->freq_range.end_freq_khz -
+ new_rule->freq_range.start_freq_khz;
+ max_bw = min_t(u32, rule1->freq_range.max_bandwidth_khz,
+ rule2->freq_range.max_bandwidth_khz);
+ new_rule->freq_range.max_bandwidth_khz = min_t(u32, max_bw, freq_diff);
+
+ new_rule->power_rule.max_antenna_gain =
+ min_t(u32, rule1->power_rule.max_antenna_gain,
+ rule2->power_rule.max_antenna_gain);
+
+ new_rule->power_rule.max_eirp = min_t(u32, rule1->power_rule.max_eirp,
+ rule2->power_rule.max_eirp);
+
+ /* Use the flags of both the rules */
+ new_rule->flags = rule1->flags | rule2->flags;
+
+ /* To be safe, lts use the max cac timeout of both rules */
+ new_rule->dfs_cac_ms = max_t(u32, rule1->dfs_cac_ms,
+ rule2->dfs_cac_ms);
+}
+
+static struct ieee80211_regdomain *
+ath11k_regd_intersect(struct ieee80211_regdomain *default_regd,
+ struct ieee80211_regdomain *curr_regd)
+{
+ u8 num_old_regd_rules, num_curr_regd_rules, num_new_regd_rules;
+ struct ieee80211_reg_rule *old_rule, *curr_rule, *new_rule;
+ struct ieee80211_regdomain *new_regd = NULL;
+ u8 i, j, k;
+
+ num_old_regd_rules = default_regd->n_reg_rules;
+ num_curr_regd_rules = curr_regd->n_reg_rules;
+ num_new_regd_rules = 0;
+
+ /* Find the number of intersecting rules to allocate new regd memory */
+ for (i = 0; i < num_old_regd_rules; i++) {
+ old_rule = default_regd->reg_rules + i;
+ for (j = 0; j < num_curr_regd_rules; j++) {
+ curr_rule = curr_regd->reg_rules + j;
+
+ if (ath11k_reg_can_intersect(old_rule, curr_rule))
+ num_new_regd_rules++;
+ }
+ }
+
+ if (!num_new_regd_rules)
+ return NULL;
+
+ new_regd = kzalloc(sizeof(*new_regd) + (num_new_regd_rules *
+ sizeof(struct ieee80211_reg_rule)),
+ GFP_ATOMIC);
+
+ if (!new_regd)
+ return NULL;
+
+ /* We set the new country and dfs region directly and only trim
+ * the freq, power, antenna gain by intersecting with the
+ * default regdomain. Also MAX of the dfs cac timeout is selected.
+ */
+ new_regd->n_reg_rules = num_new_regd_rules;
+ memcpy(new_regd->alpha2, curr_regd->alpha2, sizeof(new_regd->alpha2));
+ new_regd->dfs_region = curr_regd->dfs_region;
+ new_rule = new_regd->reg_rules;
+
+ for (i = 0, k = 0; i < num_old_regd_rules; i++) {
+ old_rule = default_regd->reg_rules + i;
+ for (j = 0; j < num_curr_regd_rules; j++) {
+ curr_rule = curr_regd->reg_rules + j;
+
+ if (ath11k_reg_can_intersect(old_rule, curr_rule))
+ ath11k_reg_intersect_rules(old_rule, curr_rule,
+ (new_rule + k++));
+ }
+ }
+ return new_regd;
+}
+
+static const char *
+ath11k_reg_get_regdom_str(enum nl80211_dfs_regions dfs_region)
+{
+ switch (dfs_region) {
+ case NL80211_DFS_FCC:
+ return "FCC";
+ case NL80211_DFS_ETSI:
+ return "ETSI";
+ case NL80211_DFS_JP:
+ return "JP";
+ default:
+ return "UNSET";
+ }
+}
+
+static u16
+ath11k_reg_adjust_bw(u16 start_freq, u16 end_freq, u16 max_bw)
+{
+ u16 bw;
+
+ bw = end_freq - start_freq;
+ bw = min_t(u16, bw, max_bw);
+
+ if (bw >= 80 && bw < 160)
+ bw = 80;
+ else if (bw >= 40 && bw < 80)
+ bw = 40;
+ else if (bw < 40)
+ bw = 20;
+
+ return bw;
+}
+
+static void
+ath11k_reg_update_rule(struct ieee80211_reg_rule *reg_rule, u32 start_freq,
+ u32 end_freq, u32 bw, u32 ant_gain, u32 reg_pwr,
+ u32 reg_flags)
+{
+ reg_rule->freq_range.start_freq_khz = MHZ_TO_KHZ(start_freq);
+ reg_rule->freq_range.end_freq_khz = MHZ_TO_KHZ(end_freq);
+ reg_rule->freq_range.max_bandwidth_khz = MHZ_TO_KHZ(bw);
+ reg_rule->power_rule.max_antenna_gain = DBI_TO_MBI(ant_gain);
+ reg_rule->power_rule.max_eirp = DBM_TO_MBM(reg_pwr);
+ reg_rule->flags = reg_flags;
+}
+
+static void
+ath11k_reg_update_weather_radar_band(struct ath11k_base *ab,
+ struct ieee80211_regdomain *regd,
+ struct cur_reg_rule *reg_rule,
+ u8 *rule_idx, u32 flags, u16 max_bw)
+{
+ u32 end_freq;
+ u16 bw;
+ u8 i;
+
+ i = *rule_idx;
+
+ bw = ath11k_reg_adjust_bw(reg_rule->start_freq,
+ ETSI_WEATHER_RADAR_BAND_LOW, max_bw);
+
+ ath11k_reg_update_rule(regd->reg_rules + i, reg_rule->start_freq,
+ ETSI_WEATHER_RADAR_BAND_LOW, bw,
+ reg_rule->ant_gain, reg_rule->reg_power,
+ flags);
+
+ ath11k_dbg(ab, ATH11K_DBG_REG,
+ "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
+ i + 1, reg_rule->start_freq, ETSI_WEATHER_RADAR_BAND_LOW,
+ bw, reg_rule->ant_gain, reg_rule->reg_power,
+ regd->reg_rules[i].dfs_cac_ms,
+ flags);
+
+ if (reg_rule->end_freq > ETSI_WEATHER_RADAR_BAND_HIGH)
+ end_freq = ETSI_WEATHER_RADAR_BAND_HIGH;
+ else
+ end_freq = reg_rule->end_freq;
+
+ bw = ath11k_reg_adjust_bw(ETSI_WEATHER_RADAR_BAND_LOW, end_freq,
+ max_bw);
+
+ i++;
+
+ ath11k_reg_update_rule(regd->reg_rules + i,
+ ETSI_WEATHER_RADAR_BAND_LOW, end_freq, bw,
+ reg_rule->ant_gain, reg_rule->reg_power,
+ flags);
+
+ regd->reg_rules[i].dfs_cac_ms = ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT;
+
+ ath11k_dbg(ab, ATH11K_DBG_REG,
+ "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
+ i + 1, ETSI_WEATHER_RADAR_BAND_LOW, end_freq,
+ bw, reg_rule->ant_gain, reg_rule->reg_power,
+ regd->reg_rules[i].dfs_cac_ms,
+ flags);
+
+ if (end_freq == reg_rule->end_freq) {
+ regd->n_reg_rules--;
+ *rule_idx = i;
+ return;
+ }
+
+ bw = ath11k_reg_adjust_bw(ETSI_WEATHER_RADAR_BAND_HIGH,
+ reg_rule->end_freq, max_bw);
+
+ i++;
+
+ ath11k_reg_update_rule(regd->reg_rules + i, ETSI_WEATHER_RADAR_BAND_HIGH,
+ reg_rule->end_freq, bw,
+ reg_rule->ant_gain, reg_rule->reg_power,
+ flags);
+
+ ath11k_dbg(ab, ATH11K_DBG_REG,
+ "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
+ i + 1, ETSI_WEATHER_RADAR_BAND_HIGH, reg_rule->end_freq,
+ bw, reg_rule->ant_gain, reg_rule->reg_power,
+ regd->reg_rules[i].dfs_cac_ms,
+ flags);
+
+ *rule_idx = i;
+}
+
+struct ieee80211_regdomain *
+ath11k_reg_build_regd(struct ath11k_base *ab,
+ struct cur_regulatory_info *reg_info, bool intersect)
+{
+ struct ieee80211_regdomain *tmp_regd, *default_regd, *new_regd = NULL;
+ struct cur_reg_rule *reg_rule;
+ u8 i = 0, j = 0;
+ u8 num_rules;
+ u16 max_bw;
+ u32 flags;
+ char alpha2[3];
+
+ num_rules = reg_info->num_5g_reg_rules + reg_info->num_2g_reg_rules;
+
+ if (!num_rules)
+ goto ret;
+
+ /* Add max additional rules to accommodate weather radar band */
+ if (reg_info->dfs_region == ATH11K_DFS_REG_ETSI)
+ num_rules += 2;
+
+ tmp_regd = kzalloc(sizeof(*tmp_regd) +
+ (num_rules * sizeof(struct ieee80211_reg_rule)),
+ GFP_ATOMIC);
+ if (!tmp_regd)
+ goto ret;
+
+ tmp_regd->n_reg_rules = num_rules;
+ memcpy(tmp_regd->alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
+ memcpy(alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
+ alpha2[2] = '\0';
+ tmp_regd->dfs_region = ath11k_map_fw_dfs_region(reg_info->dfs_region);
+
+ ath11k_dbg(ab, ATH11K_DBG_REG,
+ "\r\nCountry %s, CFG Regdomain %s FW Regdomain %d, num_reg_rules %d\n",
+ alpha2, ath11k_reg_get_regdom_str(tmp_regd->dfs_region),
+ reg_info->dfs_region, num_rules);
+ /* Update reg_rules[] below. Firmware is expected to
+ * send these rules in order(2G rules first and then 5G)
+ */
+ for (; i < tmp_regd->n_reg_rules; i++) {
+ if (reg_info->num_2g_reg_rules &&
+ (i < reg_info->num_2g_reg_rules)) {
+ reg_rule = reg_info->reg_rules_2g_ptr + i;
+ max_bw = min_t(u16, reg_rule->max_bw,
+ reg_info->max_bw_2g);
+ flags = 0;
+ } else if (reg_info->num_5g_reg_rules &&
+ (j < reg_info->num_5g_reg_rules)) {
+ reg_rule = reg_info->reg_rules_5g_ptr + j++;
+ max_bw = min_t(u16, reg_rule->max_bw,
+ reg_info->max_bw_5g);
+
+ /* FW doesn't pass NL80211_RRF_AUTO_BW flag for
+ * BW Auto correction, we can enable this by default
+ * for all 5G rules here. The regulatory core performs
+ * BW correction if required and applies flags as
+ * per other BW rule flags we pass from here
+ */
+ flags = NL80211_RRF_AUTO_BW;
+ } else {
+ break;
+ }
+
+ flags |= ath11k_map_fw_reg_flags(reg_rule->flags);
+
+ ath11k_reg_update_rule(tmp_regd->reg_rules + i,
+ reg_rule->start_freq,
+ reg_rule->end_freq, max_bw,
+ reg_rule->ant_gain, reg_rule->reg_power,
+ flags);
+
+ /* Update dfs cac timeout if the dfs domain is ETSI and the
+ * new rule covers weather radar band.
+ * Default value of '0' corresponds to 60s timeout, so no
+ * need to update that for other rules.
+ */
+ if (flags & NL80211_RRF_DFS &&
+ reg_info->dfs_region == ATH11K_DFS_REG_ETSI &&
+ (reg_rule->end_freq > ETSI_WEATHER_RADAR_BAND_LOW &&
+ reg_rule->start_freq < ETSI_WEATHER_RADAR_BAND_HIGH)){
+ ath11k_reg_update_weather_radar_band(ab, tmp_regd,
+ reg_rule, &i,
+ flags, max_bw);
+ continue;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_REG,
+ "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
+ i + 1, reg_rule->start_freq, reg_rule->end_freq,
+ max_bw, reg_rule->ant_gain, reg_rule->reg_power,
+ tmp_regd->reg_rules[i].dfs_cac_ms,
+ flags);
+ }
+
+ if (intersect) {
+ default_regd = ab->default_regd[reg_info->phy_id];
+
+ /* Get a new regd by intersecting the received regd with
+ * our default regd.
+ */
+ new_regd = ath11k_regd_intersect(default_regd, tmp_regd);
+ kfree(tmp_regd);
+ if (!new_regd) {
+ ath11k_warn(ab, "Unable to create intersected regdomain\n");
+ goto ret;
+ }
+ } else {
+ new_regd = tmp_regd;
+ }
+
+ret:
+ return new_regd;
+}
+
+void ath11k_regd_update_work(struct work_struct *work)
+{
+ struct ath11k *ar = container_of(work, struct ath11k,
+ regd_update_work);
+ int ret;
+
+ ret = ath11k_regd_update(ar, false);
+ if (ret) {
+ /* Firmware has already moved to the new regd. We need
+ * to maintain channel consistency across FW, Host driver
+ * and userspace. Hence as a fallback mechanism we can set
+ * the prev or default country code to the firmware.
+ */
+ /* TODO: Implement Fallback Mechanism */
+ }
+}
+
+void ath11k_reg_init(struct ath11k *ar)
+{
+ ar->hw->wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED;
+ ar->hw->wiphy->reg_notifier = ath11k_reg_notifier;
+}
+
+void ath11k_reg_free(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < MAX_RADIOS; i++) {
+ kfree(ab->default_regd[i]);
+ kfree(ab->new_regd[i]);
+ }
+}
diff --git a/drivers/net/wireless/ath/ath11k/reg.h b/drivers/net/wireless/ath/ath11k/reg.h
new file mode 100644
index 000000000000..39b7fc943541
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/reg.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_REG_H
+#define ATH11K_REG_H
+
+#include <linux/kernel.h>
+#include <net/regulatory.h>
+
+struct ath11k_base;
+struct ath11k;
+
+/* DFS regdomains supported by Firmware */
+enum ath11k_dfs_region {
+ ATH11K_DFS_REG_UNSET,
+ ATH11K_DFS_REG_FCC,
+ ATH11K_DFS_REG_ETSI,
+ ATH11K_DFS_REG_MKK,
+ ATH11K_DFS_REG_CN,
+ ATH11K_DFS_REG_KR,
+ ATH11K_DFS_REG_UNDEF,
+};
+
+/* ATH11K Regulatory API's */
+void ath11k_reg_init(struct ath11k *ar);
+void ath11k_reg_free(struct ath11k_base *ab);
+void ath11k_regd_update_work(struct work_struct *work);
+struct ieee80211_regdomain *
+ath11k_reg_build_regd(struct ath11k_base *ab,
+ struct cur_regulatory_info *reg_info, bool intersect);
+int ath11k_regd_update(struct ath11k *ar, bool init);
+int ath11k_reg_update_chan_list(struct ath11k *ar);
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/rx_desc.h b/drivers/net/wireless/ath/ath11k/rx_desc.h
new file mode 100644
index 000000000000..a5aff801f17f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/rx_desc.h
@@ -0,0 +1,1212 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+#ifndef ATH11K_RX_DESC_H
+#define ATH11K_RX_DESC_H
+
+enum rx_desc_rxpcu_filter {
+ RX_DESC_RXPCU_FILTER_PASS,
+ RX_DESC_RXPCU_FILTER_MONITOR_CLIENT,
+ RX_DESC_RXPCU_FILTER_MONITOR_OTHER,
+};
+
+/* rxpcu_filter_pass
+ * This MPDU passed the normal frame filter programming of rxpcu.
+ *
+ * rxpcu_filter_monitor_client
+ * This MPDU did not pass the regular frame filter and would
+ * have been dropped, were it not for the frame fitting into the
+ * 'monitor_client' category.
+ *
+ * rxpcu_filter_monitor_other
+ * This MPDU did not pass the regular frame filter and also did
+ * not pass the rxpcu_monitor_client filter. It would have been
+ * dropped accept that it did pass the 'monitor_other' category.
+ */
+
+#define RX_DESC_INFO0_RXPCU_MPDU_FITLER GENMASK(1, 0)
+#define RX_DESC_INFO0_SW_FRAME_GRP_ID GENMASK(8, 2)
+
+enum rx_desc_sw_frame_grp_id {
+ RX_DESC_SW_FRAME_GRP_ID_NDP_FRAME,
+ RX_DESC_SW_FRAME_GRP_ID_MCAST_DATA,
+ RX_DESC_SW_FRAME_GRP_ID_UCAST_DATA,
+ RX_DESC_SW_FRAME_GRP_ID_NULL_DATA,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0000,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0001,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0010,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0011,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0100,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0101,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0110,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0111,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1000,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1001,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1010,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1011,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1100,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1101,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1110,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1111,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0000,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0001,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0010,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0011,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0100,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0101,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0110,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0111,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1000,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1001,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1010,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1011,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1100,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1101,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1110,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1111,
+ RX_DESC_SW_FRAME_GRP_ID_UNSUPPORTED,
+ RX_DESC_SW_FRAME_GRP_ID_PHY_ERR,
+};
+
+enum rx_desc_decap_type {
+ RX_DESC_DECAP_TYPE_RAW,
+ RX_DESC_DECAP_TYPE_NATIVE_WIFI,
+ RX_DESC_DECAP_TYPE_ETHERNET2_DIX,
+ RX_DESC_DECAP_TYPE_8023,
+};
+
+enum rx_desc_decrypt_status_code {
+ RX_DESC_DECRYPT_STATUS_CODE_OK,
+ RX_DESC_DECRYPT_STATUS_CODE_UNPROTECTED_FRAME,
+ RX_DESC_DECRYPT_STATUS_CODE_DATA_ERR,
+ RX_DESC_DECRYPT_STATUS_CODE_KEY_INVALID,
+ RX_DESC_DECRYPT_STATUS_CODE_PEER_ENTRY_INVALID,
+ RX_DESC_DECRYPT_STATUS_CODE_OTHER,
+};
+
+#define RX_ATTENTION_INFO1_FIRST_MPDU BIT(0)
+#define RX_ATTENTION_INFO1_RSVD_1A BIT(1)
+#define RX_ATTENTION_INFO1_MCAST_BCAST BIT(2)
+#define RX_ATTENTION_INFO1_AST_IDX_NOT_FOUND BIT(3)
+#define RX_ATTENTION_INFO1_AST_IDX_TIMEDOUT BIT(4)
+#define RX_ATTENTION_INFO1_POWER_MGMT BIT(5)
+#define RX_ATTENTION_INFO1_NON_QOS BIT(6)
+#define RX_ATTENTION_INFO1_NULL_DATA BIT(7)
+#define RX_ATTENTION_INFO1_MGMT_TYPE BIT(8)
+#define RX_ATTENTION_INFO1_CTRL_TYPE BIT(9)
+#define RX_ATTENTION_INFO1_MORE_DATA BIT(10)
+#define RX_ATTENTION_INFO1_EOSP BIT(11)
+#define RX_ATTENTION_INFO1_A_MSDU_ERROR BIT(12)
+#define RX_ATTENTION_INFO1_FRAGMENT BIT(13)
+#define RX_ATTENTION_INFO1_ORDER BIT(14)
+#define RX_ATTENTION_INFO1_CCE_MATCH BIT(15)
+#define RX_ATTENTION_INFO1_OVERFLOW_ERR BIT(16)
+#define RX_ATTENTION_INFO1_MSDU_LEN_ERR BIT(17)
+#define RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL BIT(18)
+#define RX_ATTENTION_INFO1_IP_CKSUM_FAIL BIT(19)
+#define RX_ATTENTION_INFO1_SA_IDX_INVALID BIT(20)
+#define RX_ATTENTION_INFO1_DA_IDX_INVALID BIT(21)
+#define RX_ATTENTION_INFO1_RSVD_1B BIT(22)
+#define RX_ATTENTION_INFO1_RX_IN_TX_DECRYPT_BYP BIT(23)
+#define RX_ATTENTION_INFO1_ENCRYPT_REQUIRED BIT(24)
+#define RX_ATTENTION_INFO1_DIRECTED BIT(25)
+#define RX_ATTENTION_INFO1_BUFFER_FRAGMENT BIT(26)
+#define RX_ATTENTION_INFO1_MPDU_LEN_ERR BIT(27)
+#define RX_ATTENTION_INFO1_TKIP_MIC_ERR BIT(28)
+#define RX_ATTENTION_INFO1_DECRYPT_ERR BIT(29)
+#define RX_ATTENTION_INFO1_UNDECRYPT_FRAME_ERR BIT(30)
+#define RX_ATTENTION_INFO1_FCS_ERR BIT(31)
+
+#define RX_ATTENTION_INFO2_FLOW_IDX_TIMEOUT BIT(0)
+#define RX_ATTENTION_INFO2_FLOW_IDX_INVALID BIT(1)
+#define RX_ATTENTION_INFO2_WIFI_PARSER_ERR BIT(2)
+#define RX_ATTENTION_INFO2_AMSDU_PARSER_ERR BIT(3)
+#define RX_ATTENTION_INFO2_SA_IDX_TIMEOUT BIT(4)
+#define RX_ATTENTION_INFO2_DA_IDX_TIMEOUT BIT(5)
+#define RX_ATTENTION_INFO2_MSDU_LIMIT_ERR BIT(6)
+#define RX_ATTENTION_INFO2_DA_IS_VALID BIT(7)
+#define RX_ATTENTION_INFO2_DA_IS_MCBC BIT(8)
+#define RX_ATTENTION_INFO2_SA_IS_VALID BIT(9)
+#define RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE GENMASK(12, 10)
+#define RX_ATTENTION_INFO2_RX_BITMAP_NOT_UPDED BIT(13)
+#define RX_ATTENTION_INFO2_MSDU_DONE BIT(31)
+
+struct rx_attention {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le32 info1;
+ __le32 info2;
+} __packed;
+
+/* rx_attention
+ *
+ * rxpcu_mpdu_filter_in_category
+ * Field indicates what the reason was that this mpdu frame
+ * was allowed to come into the receive path by rxpcu. Values
+ * are defined in enum %RX_DESC_RXPCU_FILTER_*.
+ *
+ * sw_frame_group_id
+ * SW processes frames based on certain classifications. Values
+ * are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*.
+ *
+ * phy_ppdu_id
+ * A ppdu counter value that PHY increments for every PPDU
+ * received. The counter value wraps around.
+ *
+ * first_mpdu
+ * Indicates the first MSDU of the PPDU. If both first_mpdu
+ * and last_mpdu are set in the MSDU then this is a not an
+ * A-MPDU frame but a stand alone MPDU. Interior MPDU in an
+ * A-MPDU shall have both first_mpdu and last_mpdu bits set to
+ * 0. The PPDU start status will only be valid when this bit
+ * is set.
+ *
+ * mcast_bcast
+ * Multicast / broadcast indicator. Only set when the MAC
+ * address 1 bit 0 is set indicating mcast/bcast and the BSSID
+ * matches one of the 4 BSSID registers. Only set when
+ * first_msdu is set.
+ *
+ * ast_index_not_found
+ * Only valid when first_msdu is set. Indicates no AST matching
+ * entries within the the max search count.
+ *
+ * ast_index_timeout
+ * Only valid when first_msdu is set. Indicates an unsuccessful
+ * search in the address search table due to timeout.
+ *
+ * power_mgmt
+ * Power management bit set in the 802.11 header. Only set
+ * when first_msdu is set.
+ *
+ * non_qos
+ * Set if packet is not a non-QoS data frame. Only set when
+ * first_msdu is set.
+ *
+ * null_data
+ * Set if frame type indicates either null data or QoS null
+ * data format. Only set when first_msdu is set.
+ *
+ * mgmt_type
+ * Set if packet is a management packet. Only set when
+ * first_msdu is set.
+ *
+ * ctrl_type
+ * Set if packet is a control packet. Only set when first_msdu
+ * is set.
+ *
+ * more_data
+ * Set if more bit in frame control is set. Only set when
+ * first_msdu is set.
+ *
+ * eosp
+ * Set if the EOSP (end of service period) bit in the QoS
+ * control field is set. Only set when first_msdu is set.
+ *
+ * a_msdu_error
+ * Set if number of MSDUs in A-MSDU is above a threshold or if the
+ * size of the MSDU is invalid. This receive buffer will contain
+ * all of the remainder of MSDUs in this MPDU w/o decapsulation.
+ *
+ * fragment
+ * Indicates that this is an 802.11 fragment frame. This is
+ * set when either the more_frag bit is set in the frame
+ * control or the fragment number is not zero. Only set when
+ * first_msdu is set.
+ *
+ * order
+ * Set if the order bit in the frame control is set. Only set
+ * when first_msdu is set.
+ *
+ * cce_match
+ * Indicates that this status has a corresponding MSDU that
+ * requires FW processing. The OLE will have classification
+ * ring mask registers which will indicate the ring(s) for
+ * packets and descriptors which need FW attention.
+ *
+ * overflow_err
+ * PCU Receive FIFO does not have enough space to store the
+ * full receive packet. Enough space is reserved in the
+ * receive FIFO for the status is written. This MPDU remaining
+ * packets in the PPDU will be filtered and no Ack response
+ * will be transmitted.
+ *
+ * msdu_length_err
+ * Indicates that the MSDU length from the 802.3 encapsulated
+ * length field extends beyond the MPDU boundary.
+ *
+ * tcp_udp_chksum_fail
+ * Indicates that the computed checksum (tcp_udp_chksum) did
+ * not match the checksum in the TCP/UDP header.
+ *
+ * ip_chksum_fail
+ * Indicates that the computed checksum did not match the
+ * checksum in the IP header.
+ *
+ * sa_idx_invalid
+ * Indicates no matching entry was found in the address search
+ * table for the source MAC address.
+ *
+ * da_idx_invalid
+ * Indicates no matching entry was found in the address search
+ * table for the destination MAC address.
+ *
+ * rx_in_tx_decrypt_byp
+ * Indicates that RX packet is not decrypted as Crypto is busy
+ * with TX packet processing.
+ *
+ * encrypt_required
+ * Indicates that this data type frame is not encrypted even if
+ * the policy for this MPDU requires encryption as indicated in
+ * the peer table key type.
+ *
+ * directed
+ * MPDU is a directed packet which means that the RA matched
+ * our STA addresses. In proxySTA it means that the TA matched
+ * an entry in our address search table with the corresponding
+ * 'no_ack' bit is the address search entry cleared.
+ *
+ * buffer_fragment
+ * Indicates that at least one of the rx buffers has been
+ * fragmented. If set the FW should look at the rx_frag_info
+ * descriptor described below.
+ *
+ * mpdu_length_err
+ * Indicates that the MPDU was pre-maturely terminated
+ * resulting in a truncated MPDU. Don't trust the MPDU length
+ * field.
+ *
+ * tkip_mic_err
+ * Indicates that the MPDU Michael integrity check failed
+ *
+ * decrypt_err
+ * Indicates that the MPDU decrypt integrity check failed
+ *
+ * fcs_err
+ * Indicates that the MPDU FCS check failed
+ *
+ * flow_idx_timeout
+ * Indicates an unsuccessful flow search due to the expiring of
+ * the search timer.
+ *
+ * flow_idx_invalid
+ * flow id is not valid.
+ *
+ * amsdu_parser_error
+ * A-MSDU could not be properly de-agregated.
+ *
+ * sa_idx_timeout
+ * Indicates an unsuccessful search for the source MAC address
+ * due to the expiring of the search timer.
+ *
+ * da_idx_timeout
+ * Indicates an unsuccessful search for the destination MAC
+ * address due to the expiring of the search timer.
+ *
+ * msdu_limit_error
+ * Indicates that the MSDU threshold was exceeded and thus
+ * all the rest of the MSDUs will not be scattered and will not
+ * be decasulated but will be DMA'ed in RAW format as a single
+ * MSDU buffer.
+ *
+ * da_is_valid
+ * Indicates that OLE found a valid DA entry.
+ *
+ * da_is_mcbc
+ * Field Only valid if da_is_valid is set. Indicates the DA address
+ * was a Multicast or Broadcast address.
+ *
+ * sa_is_valid
+ * Indicates that OLE found a valid SA entry.
+ *
+ * decrypt_status_code
+ * Field provides insight into the decryption performed. Values are
+ * defined in enum %RX_DESC_DECRYPT_STATUS_CODE*.
+ *
+ * rx_bitmap_not_updated
+ * Frame is received, but RXPCU could not update the receive bitmap
+ * due to (temporary) fifo constraints.
+ *
+ * msdu_done
+ * If set indicates that the RX packet data, RX header data, RX
+ * PPDU start descriptor, RX MPDU start/end descriptor, RX MSDU
+ * start/end descriptors and RX Attention descriptor are all
+ * valid. This bit must be in the last octet of the
+ * descriptor.
+ */
+
+#define RX_MPDU_START_INFO0_NDP_FRAME BIT(9)
+#define RX_MPDU_START_INFO0_PHY_ERR BIT(10)
+#define RX_MPDU_START_INFO0_PHY_ERR_MPDU_HDR BIT(11)
+#define RX_MPDU_START_INFO0_PROTO_VER_ERR BIT(12)
+#define RX_MPDU_START_INFO0_AST_LOOKUP_VALID BIT(13)
+
+#define RX_MPDU_START_INFO1_MPDU_CTRL_VALID BIT(0)
+#define RX_MPDU_START_INFO1_MPDU_DUR_VALID BIT(1)
+#define RX_MPDU_START_INFO1_MAC_ADDR1_VALID BIT(2)
+#define RX_MPDU_START_INFO1_MAC_ADDR2_VALID BIT(3)
+#define RX_MPDU_START_INFO1_MAC_ADDR3_VALID BIT(4)
+#define RX_MPDU_START_INFO1_MAC_ADDR4_VALID BIT(5)
+#define RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID BIT(6)
+#define RX_MPDU_START_INFO1_MPDU_QOS_CTRL_VALID BIT(7)
+#define RX_MPDU_START_INFO1_MPDU_HT_CTRL_VALID BIT(8)
+#define RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID BIT(9)
+#define RX_MPDU_START_INFO1_MPDU_FRAG_NUMBER GENMASK(13, 10)
+#define RX_MPDU_START_INFO1_MORE_FRAG_FLAG BIT(14)
+#define RX_MPDU_START_INFO1_FROM_DS BIT(16)
+#define RX_MPDU_START_INFO1_TO_DS BIT(17)
+#define RX_MPDU_START_INFO1_ENCRYPTED BIT(18)
+#define RX_MPDU_START_INFO1_MPDU_RETRY BIT(19)
+#define RX_MPDU_START_INFO1_MPDU_SEQ_NUM GENMASK(31, 20)
+
+#define RX_MPDU_START_INFO2_EPD_EN BIT(0)
+#define RX_MPDU_START_INFO2_ALL_FRAME_ENCPD BIT(1)
+#define RX_MPDU_START_INFO2_ENC_TYPE GENMASK(5, 2)
+#define RX_MPDU_START_INFO2_VAR_WEP_KEY_WIDTH GENMASK(7, 6)
+#define RX_MPDU_START_INFO2_MESH_STA BIT(8)
+#define RX_MPDU_START_INFO2_BSSID_HIT BIT(9)
+#define RX_MPDU_START_INFO2_BSSID_NUM GENMASK(13, 10)
+#define RX_MPDU_START_INFO2_TID GENMASK(17, 14)
+
+#define RX_MPDU_START_INFO3_REO_DEST_IND GENMASK(4, 0)
+#define RX_MPDU_START_INFO3_FLOW_ID_TOEPLITZ BIT(7)
+#define RX_MPDU_START_INFO3_PKT_SEL_FP_UCAST_DATA BIT(8)
+#define RX_MPDU_START_INFO3_PKT_SEL_FP_MCAST_DATA BIT(9)
+#define RX_MPDU_START_INFO3_PKT_SEL_FP_CTRL_BAR BIT(10)
+#define RX_MPDU_START_INFO3_RXDMA0_SRC_RING_SEL GENMASK(12, 11)
+#define RX_MPDU_START_INFO3_RXDMA0_DST_RING_SEL GENMASK(14, 13)
+
+#define RX_MPDU_START_INFO4_REO_QUEUE_DESC_HI GENMASK(7, 0)
+#define RX_MPDU_START_INFO4_RECV_QUEUE_NUM GENMASK(23, 8)
+#define RX_MPDU_START_INFO4_PRE_DELIM_ERR_WARN BIT(24)
+#define RX_MPDU_START_INFO4_FIRST_DELIM_ERR BIT(25)
+
+#define RX_MPDU_START_INFO5_KEY_ID GENMASK(7, 0)
+#define RX_MPDU_START_INFO5_NEW_PEER_ENTRY BIT(8)
+#define RX_MPDU_START_INFO5_DECRYPT_NEEDED BIT(9)
+#define RX_MPDU_START_INFO5_DECAP_TYPE GENMASK(11, 10)
+#define RX_MPDU_START_INFO5_VLAN_TAG_C_PADDING BIT(12)
+#define RX_MPDU_START_INFO5_VLAN_TAG_S_PADDING BIT(13)
+#define RX_MPDU_START_INFO5_STRIP_VLAN_TAG_C BIT(14)
+#define RX_MPDU_START_INFO5_STRIP_VLAN_TAG_S BIT(15)
+#define RX_MPDU_START_INFO5_PRE_DELIM_COUNT GENMASK(27, 16)
+#define RX_MPDU_START_INFO5_AMPDU_FLAG BIT(28)
+#define RX_MPDU_START_INFO5_BAR_FRAME BIT(29)
+
+#define RX_MPDU_START_INFO6_MPDU_LEN GENMASK(13, 0)
+#define RX_MPDU_START_INFO6_FIRST_MPDU BIT(14)
+#define RX_MPDU_START_INFO6_MCAST_BCAST BIT(15)
+#define RX_MPDU_START_INFO6_AST_IDX_NOT_FOUND BIT(16)
+#define RX_MPDU_START_INFO6_AST_IDX_TIMEOUT BIT(17)
+#define RX_MPDU_START_INFO6_POWER_MGMT BIT(18)
+#define RX_MPDU_START_INFO6_NON_QOS BIT(19)
+#define RX_MPDU_START_INFO6_NULL_DATA BIT(20)
+#define RX_MPDU_START_INFO6_MGMT_TYPE BIT(21)
+#define RX_MPDU_START_INFO6_CTRL_TYPE BIT(22)
+#define RX_MPDU_START_INFO6_MORE_DATA BIT(23)
+#define RX_MPDU_START_INFO6_EOSP BIT(24)
+#define RX_MPDU_START_INFO6_FRAGMENT BIT(25)
+#define RX_MPDU_START_INFO6_ORDER BIT(26)
+#define RX_MPDU_START_INFO6_UAPSD_TRIGGER BIT(27)
+#define RX_MPDU_START_INFO6_ENCRYPT_REQUIRED BIT(28)
+#define RX_MPDU_START_INFO6_DIRECTED BIT(29)
+
+#define RX_MPDU_START_RAW_MPDU BIT(0)
+
+struct rx_mpdu_start {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le16 ast_index;
+ __le16 sw_peer_id;
+ __le32 info1;
+ __le32 info2;
+ __le32 pn[4];
+ __le32 peer_meta_data;
+ __le32 info3;
+ __le32 reo_queue_desc_lo;
+ __le32 info4;
+ __le32 info5;
+ __le32 info6;
+ __le16 frame_ctrl;
+ __le16 duration;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctrl;
+ u8 addr4[ETH_ALEN];
+ __le16 qos_ctrl;
+ __le32 ht_ctrl;
+ __le32 raw;
+} __packed;
+
+/* rx_mpdu_start
+ *
+ * rxpcu_mpdu_filter_in_category
+ * Field indicates what the reason was that this mpdu frame
+ * was allowed to come into the receive path by rxpcu. Values
+ * are defined in enum %RX_DESC_RXPCU_FILTER_*.
+ * Note: for ndp frame, if it was expected because the preceding
+ * NDPA was filter_pass, the setting rxpcu_filter_pass will be
+ * used. This setting will also be used for every ndp frame in
+ * case Promiscuous mode is enabled.
+ *
+ * sw_frame_group_id
+ * SW processes frames based on certain classifications. Values
+ * are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*.
+ *
+ * ndp_frame
+ * Indicates that the received frame was an NDP frame.
+ *
+ * phy_err
+ * Indicates that PHY error was received before MAC received data.
+ *
+ * phy_err_during_mpdu_header
+ * PHY error was received before MAC received the complete MPDU
+ * header which was needed for proper decoding.
+ *
+ * protocol_version_err
+ * RXPCU detected a version error in the frame control field.
+ *
+ * ast_based_lookup_valid
+ * AST based lookup for this frame has found a valid result.
+ *
+ * phy_ppdu_id
+ * A ppdu counter value that PHY increments for every PPDU
+ * received. The counter value wraps around.
+ *
+ * ast_index
+ * This field indicates the index of the AST entry corresponding
+ * to this MPDU. It is provided by the GSE module instantiated in
+ * RXPCU. A value of 0xFFFF indicates an invalid AST index.
+ *
+ * sw_peer_id
+ * This field indicates a unique peer identifier. It is set equal
+ * to field 'sw_peer_id' from the AST entry.
+ *
+ * mpdu_frame_control_valid, mpdu_duration_valid, mpdu_qos_control_valid,
+ * mpdu_ht_control_valid, frame_encryption_info_valid
+ * Indicates that each fields have valid entries.
+ *
+ * mac_addr_adx_valid
+ * Corresponding mac_addr_adx_{lo/hi} has valid entries.
+ *
+ * from_ds, to_ds
+ * Valid only when mpdu_frame_control_valid is set. Indicates that
+ * frame is received from DS and sent to DS.
+ *
+ * encrypted
+ * Protected bit from the frame control.
+ *
+ * mpdu_retry
+ * Retry bit from frame control. Only valid when first_msdu is set.
+ *
+ * mpdu_sequence_number
+ * The sequence number from the 802.11 header.
+ *
+ * epd_en
+ * If set, use EPD instead of LPD.
+ *
+ * all_frames_shall_be_encrypted
+ * If set, all frames (data only?) shall be encrypted. If not,
+ * RX CRYPTO shall set an error flag.
+ *
+ * encrypt_type
+ * Values are defined in enum %HAL_ENCRYPT_TYPE_.
+ *
+ * mesh_sta
+ * Indicates a Mesh (11s) STA.
+ *
+ * bssid_hit
+ * BSSID of the incoming frame matched one of the 8 BSSID
+ * register values.
+ *
+ * bssid_number
+ * This number indicates which one out of the 8 BSSID register
+ * values matched the incoming frame.
+ *
+ * tid
+ * TID field in the QoS control field
+ *
+ * pn
+ * The PN number.
+ *
+ * peer_meta_data
+ * Meta data that SW has programmed in the Peer table entry
+ * of the transmitting STA.
+ *
+ * rx_reo_queue_desc_addr_lo
+ * Address (lower 32 bits) of the REO queue descriptor.
+ *
+ * rx_reo_queue_desc_addr_hi
+ * Address (upper 8 bits) of the REO queue descriptor.
+ *
+ * receive_queue_number
+ * Indicates the MPDU queue ID to which this MPDU link
+ * descriptor belongs.
+ *
+ * pre_delim_err_warning
+ * Indicates that a delimiter FCS error was found in between the
+ * previous MPDU and this MPDU. Note that this is just a warning,
+ * and does not mean that this MPDU is corrupted in any way. If
+ * it is, there will be other errors indicated such as FCS or
+ * decrypt errors.
+ *
+ * first_delim_err
+ * Indicates that the first delimiter had a FCS failure.
+ *
+ * key_id
+ * The key ID octet from the IV.
+ *
+ * new_peer_entry
+ * Set if new RX_PEER_ENTRY TLV follows. If clear, RX_PEER_ENTRY
+ * doesn't follow so RX DECRYPTION module either uses old peer
+ * entry or not decrypt.
+ *
+ * decrypt_needed
+ * When RXPCU sets bit 'ast_index_not_found or ast_index_timeout',
+ * RXPCU will also ensure that this bit is NOT set. CRYPTO for that
+ * reason only needs to evaluate this bit and non of the other ones
+ *
+ * decap_type
+ * Used by the OLE during decapsulation. Values are defined in
+ * enum %MPDU_START_DECAP_TYPE_*.
+ *
+ * rx_insert_vlan_c_tag_padding
+ * rx_insert_vlan_s_tag_padding
+ * Insert 4 byte of all zeros as VLAN tag or double VLAN tag if
+ * the rx payload does not have VLAN.
+ *
+ * strip_vlan_c_tag_decap
+ * strip_vlan_s_tag_decap
+ * Strip VLAN or double VLAN during decapsulation.
+ *
+ * pre_delim_count
+ * The number of delimiters before this MPDU. Note that this
+ * number is cleared at PPDU start. If this MPDU is the first
+ * received MPDU in the PPDU and this MPDU gets filtered-in,
+ * this field will indicate the number of delimiters located
+ * after the last MPDU in the previous PPDU.
+ *
+ * If this MPDU is located after the first received MPDU in
+ * an PPDU, this field will indicate the number of delimiters
+ * located between the previous MPDU and this MPDU.
+ *
+ * ampdu_flag
+ * Received frame was part of an A-MPDU.
+ *
+ * bar_frame
+ * Received frame is a BAR frame
+ *
+ * mpdu_length
+ * MPDU length before decapsulation.
+ *
+ * first_mpdu..directed
+ * See definition in RX attention descriptor
+ *
+ */
+
+enum rx_msdu_start_pkt_type {
+ RX_MSDU_START_PKT_TYPE_11A,
+ RX_MSDU_START_PKT_TYPE_11B,
+ RX_MSDU_START_PKT_TYPE_11N,
+ RX_MSDU_START_PKT_TYPE_11AC,
+ RX_MSDU_START_PKT_TYPE_11AX,
+};
+
+enum rx_msdu_start_sgi {
+ RX_MSDU_START_SGI_0_8_US,
+ RX_MSDU_START_SGI_0_4_US,
+ RX_MSDU_START_SGI_1_6_US,
+ RX_MSDU_START_SGI_3_2_US,
+};
+
+enum rx_msdu_start_recv_bw {
+ RX_MSDU_START_RECV_BW_20MHZ,
+ RX_MSDU_START_RECV_BW_40MHZ,
+ RX_MSDU_START_RECV_BW_80MHZ,
+ RX_MSDU_START_RECV_BW_160MHZ,
+};
+
+enum rx_msdu_start_reception_type {
+ RX_MSDU_START_RECEPTION_TYPE_SU,
+ RX_MSDU_START_RECEPTION_TYPE_DL_MU_MIMO,
+ RX_MSDU_START_RECEPTION_TYPE_DL_MU_OFDMA,
+ RX_MSDU_START_RECEPTION_TYPE_DL_MU_OFDMA_MIMO,
+ RX_MSDU_START_RECEPTION_TYPE_UL_MU_MIMO,
+ RX_MSDU_START_RECEPTION_TYPE_UL_MU_OFDMA,
+ RX_MSDU_START_RECEPTION_TYPE_UL_MU_OFDMA_MIMO,
+};
+
+#define RX_MSDU_START_INFO1_MSDU_LENGTH GENMASK(13, 0)
+#define RX_MSDU_START_INFO1_RSVD_1A BIT(14)
+#define RX_MSDU_START_INFO1_IPSEC_ESP BIT(15)
+#define RX_MSDU_START_INFO1_L3_OFFSET GENMASK(22, 16)
+#define RX_MSDU_START_INFO1_IPSEC_AH BIT(23)
+#define RX_MSDU_START_INFO1_L4_OFFSET GENMASK(31, 24)
+
+#define RX_MSDU_START_INFO2_MSDU_NUMBER GENMASK(7, 0)
+#define RX_MSDU_START_INFO2_DECAP_TYPE GENMASK(9, 8)
+#define RX_MSDU_START_INFO2_IPV4 BIT(10)
+#define RX_MSDU_START_INFO2_IPV6 BIT(11)
+#define RX_MSDU_START_INFO2_TCP BIT(12)
+#define RX_MSDU_START_INFO2_UDP BIT(13)
+#define RX_MSDU_START_INFO2_IP_FRAG BIT(14)
+#define RX_MSDU_START_INFO2_TCP_ONLY_ACK BIT(15)
+#define RX_MSDU_START_INFO2_DA_IS_BCAST_MCAST BIT(16)
+#define RX_MSDU_START_INFO2_SELECTED_TOEPLITZ_HASH GENMASK(18, 17)
+#define RX_MSDU_START_INFO2_IP_FIXED_HDR_VALID BIT(19)
+#define RX_MSDU_START_INFO2_IP_EXTN_HDR_VALID BIT(20)
+#define RX_MSDU_START_INFO2_IP_TCP_UDP_HDR_VALID BIT(21)
+#define RX_MSDU_START_INFO2_MESH_CTRL_PRESENT BIT(22)
+#define RX_MSDU_START_INFO2_LDPC BIT(23)
+#define RX_MSDU_START_INFO2_IP4_IP6_NXT_HDR GENMASK(31, 24)
+#define RX_MSDU_START_INFO2_DECAP_FORMAT GENMASK(9, 8)
+
+#define RX_MSDU_START_INFO3_USER_RSSI GENMASK(7, 0)
+#define RX_MSDU_START_INFO3_PKT_TYPE GENMASK(11, 8)
+#define RX_MSDU_START_INFO3_STBC BIT(12)
+#define RX_MSDU_START_INFO3_SGI GENMASK(14, 13)
+#define RX_MSDU_START_INFO3_RATE_MCS GENMASK(18, 15)
+#define RX_MSDU_START_INFO3_RECV_BW GENMASK(20, 19)
+#define RX_MSDU_START_INFO3_RECEPTION_TYPE GENMASK(23, 21)
+#define RX_MSDU_START_INFO3_MIMO_SS_BITMAP GENMASK(31, 24)
+
+struct rx_msdu_start {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le32 info1;
+ __le32 info2;
+ __le32 toeplitz_hash;
+ __le32 flow_id_toeplitz;
+ __le32 info3;
+ __le32 ppdu_start_timestamp;
+ __le32 phy_meta_data;
+} __packed;
+
+/* rx_msdu_start
+ *
+ * rxpcu_mpdu_filter_in_category
+ * Field indicates what the reason was that this mpdu frame
+ * was allowed to come into the receive path by rxpcu. Values
+ * are defined in enum %RX_DESC_RXPCU_FILTER_*.
+ *
+ * sw_frame_group_id
+ * SW processes frames based on certain classifications. Values
+ * are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*.
+ *
+ * phy_ppdu_id
+ * A ppdu counter value that PHY increments for every PPDU
+ * received. The counter value wraps around.
+ *
+ * msdu_length
+ * MSDU length in bytes after decapsulation.
+ *
+ * ipsec_esp
+ * Set if IPv4/v6 packet is using IPsec ESP.
+ *
+ * l3_offset
+ * Depending upon mode bit, this field either indicates the
+ * L3 offset in bytes from the start of the RX_HEADER or the IP
+ * offset in bytes from the start of the packet after
+ * decapsulation. The latter is only valid if ipv4_proto or
+ * ipv6_proto is set.
+ *
+ * ipsec_ah
+ * Set if IPv4/v6 packet is using IPsec AH
+ *
+ * l4_offset
+ * Depending upon mode bit, this field either indicates the
+ * L4 offset nin bytes from the start of RX_HEADER (only valid
+ * if either ipv4_proto or ipv6_proto is set to 1) or indicates
+ * the offset in bytes to the start of TCP or UDP header from
+ * the start of the IP header after decapsulation (Only valid if
+ * tcp_proto or udp_proto is set). The value 0 indicates that
+ * the offset is longer than 127 bytes.
+ *
+ * msdu_number
+ * Indicates the MSDU number within a MPDU. This value is
+ * reset to zero at the start of each MPDU. If the number of
+ * MSDU exceeds 255 this number will wrap using modulo 256.
+ *
+ * decap_type
+ * Indicates the format after decapsulation. Values are defined in
+ * enum %MPDU_START_DECAP_TYPE_*.
+ *
+ * ipv4_proto
+ * Set if L2 layer indicates IPv4 protocol.
+ *
+ * ipv6_proto
+ * Set if L2 layer indicates IPv6 protocol.
+ *
+ * tcp_proto
+ * Set if the ipv4_proto or ipv6_proto are set and the IP protocol
+ * indicates TCP.
+ *
+ * udp_proto
+ * Set if the ipv4_proto or ipv6_proto are set and the IP protocol
+ * indicates UDP.
+ *
+ * ip_frag
+ * Indicates that either the IP More frag bit is set or IP frag
+ * number is non-zero. If set indicates that this is a fragmented
+ * IP packet.
+ *
+ * tcp_only_ack
+ * Set if only the TCP Ack bit is set in the TCP flags and if
+ * the TCP payload is 0.
+ *
+ * da_is_bcast_mcast
+ * The destination address is broadcast or multicast.
+ *
+ * toeplitz_hash
+ * Actual chosen Hash.
+ * 0 - Toeplitz hash of 2-tuple (IP source address, IP
+ * destination address)
+ * 1 - Toeplitz hash of 4-tuple (IP source address,
+ * IP destination address, L4 (TCP/UDP) source port,
+ * L4 (TCP/UDP) destination port)
+ * 2 - Toeplitz of flow_id
+ * 3 - Zero is used
+ *
+ * ip_fixed_header_valid
+ * Fixed 20-byte IPv4 header or 40-byte IPv6 header parsed
+ * fully within first 256 bytes of the packet
+ *
+ * ip_extn_header_valid
+ * IPv6/IPv6 header, including IPv4 options and
+ * recognizable extension headers parsed fully within first 256
+ * bytes of the packet
+ *
+ * tcp_udp_header_valid
+ * Fixed 20-byte TCP (excluding TCP options) or 8-byte UDP
+ * header parsed fully within first 256 bytes of the packet
+ *
+ * mesh_control_present
+ * When set, this MSDU includes the 'Mesh Control' field
+ *
+ * ldpc
+ *
+ * ip4_protocol_ip6_next_header
+ * For IPv4, this is the 8 bit protocol field set). For IPv6 this
+ * is the 8 bit next_header field.
+ *
+ * toeplitz_hash_2_or_4
+ * Controlled by RxOLE register - If register bit set to 0,
+ * Toeplitz hash is computed over 2-tuple IPv4 or IPv6 src/dest
+ * addresses; otherwise, toeplitz hash is computed over 4-tuple
+ * IPv4 or IPv6 src/dest addresses and src/dest ports.
+ *
+ * flow_id_toeplitz
+ * Toeplitz hash of 5-tuple
+ * {IP source address, IP destination address, IP source port, IP
+ * destination port, L4 protocol} in case of non-IPSec.
+ *
+ * In case of IPSec - Toeplitz hash of 4-tuple
+ * {IP source address, IP destination address, SPI, L4 protocol}
+ *
+ * The relevant Toeplitz key registers are provided in RxOLE's
+ * instance of common parser module. These registers are separate
+ * from the Toeplitz keys used by ASE/FSE modules inside RxOLE.
+ * The actual value will be passed on from common parser module
+ * to RxOLE in one of the WHO_* TLVs.
+ *
+ * user_rssi
+ * RSSI for this user
+ *
+ * pkt_type
+ * Values are defined in enum %RX_MSDU_START_PKT_TYPE_*.
+ *
+ * stbc
+ * When set, use STBC transmission rates.
+ *
+ * sgi
+ * Field only valid when pkt type is HT, VHT or HE. Values are
+ * defined in enum %RX_MSDU_START_SGI_*.
+ *
+ * rate_mcs
+ * MCS Rate used.
+ *
+ * receive_bandwidth
+ * Full receive Bandwidth. Values are defined in enum
+ * %RX_MSDU_START_RECV_*.
+ *
+ * reception_type
+ * Indicates what type of reception this is and defined in enum
+ * %RX_MSDU_START_RECEPTION_TYPE_*.
+ *
+ * mimo_ss_bitmap
+ * Field only valid when
+ * Reception_type is RX_MSDU_START_RECEPTION_TYPE_DL_MU_MIMO or
+ * RX_MSDU_START_RECEPTION_TYPE_DL_MU_OFDMA_MIMO.
+ *
+ * Bitmap, with each bit indicating if the related spatial
+ * stream is used for this STA
+ *
+ * LSB related to SS 0
+ *
+ * 0 - spatial stream not used for this reception
+ * 1 - spatial stream used for this reception
+ *
+ * ppdu_start_timestamp
+ * Timestamp that indicates when the PPDU that contained this MPDU
+ * started on the medium.
+ *
+ * phy_meta_data
+ * SW programmed Meta data provided by the PHY. Can be used for SW
+ * to indicate the channel the device is on.
+ */
+
+#define RX_MSDU_END_INFO0_RXPCU_MPDU_FITLER GENMASK(1, 0)
+#define RX_MSDU_END_INFO0_SW_FRAME_GRP_ID GENMASK(8, 2)
+
+#define RX_MSDU_END_INFO1_KEY_ID GENMASK(7, 0)
+#define RX_MSDU_END_INFO1_CCE_SUPER_RULE GENMASK(13, 8)
+#define RX_MSDU_END_INFO1_CCND_TRUNCATE BIT(14)
+#define RX_MSDU_END_INFO1_CCND_CCE_DIS BIT(15)
+#define RX_MSDU_END_INFO1_EXT_WAPI_PN GENMASK(31, 16)
+
+#define RX_MSDU_END_INFO2_REPORTED_MPDU_LEN GENMASK(13, 0)
+#define RX_MSDU_END_INFO2_FIRST_MSDU BIT(14)
+#define RX_MSDU_END_INFO2_LAST_MSDU BIT(15)
+#define RX_MSDU_END_INFO2_SA_IDX_TIMEOUT BIT(16)
+#define RX_MSDU_END_INFO2_DA_IDX_TIMEOUT BIT(17)
+#define RX_MSDU_END_INFO2_MSDU_LIMIT_ERR BIT(18)
+#define RX_MSDU_END_INFO2_FLOW_IDX_TIMEOUT BIT(19)
+#define RX_MSDU_END_INFO2_FLOW_IDX_INVALID BIT(20)
+#define RX_MSDU_END_INFO2_WIFI_PARSER_ERR BIT(21)
+#define RX_MSDU_END_INFO2_AMSDU_PARSET_ERR BIT(22)
+#define RX_MSDU_END_INFO2_SA_IS_VALID BIT(23)
+#define RX_MSDU_END_INFO2_DA_IS_VALID BIT(24)
+#define RX_MSDU_END_INFO2_DA_IS_MCBC BIT(25)
+#define RX_MSDU_END_INFO2_L3_HDR_PADDING GENMASK(27, 26)
+
+#define RX_MSDU_END_INFO3_TCP_FLAG GENMASK(8, 0)
+#define RX_MSDU_END_INFO3_LRO_ELIGIBLE BIT(9)
+
+#define RX_MSDU_END_INFO4_DA_OFFSET GENMASK(5, 0)
+#define RX_MSDU_END_INFO4_SA_OFFSET GENMASK(11, 6)
+#define RX_MSDU_END_INFO4_DA_OFFSET_VALID BIT(12)
+#define RX_MSDU_END_INFO4_SA_OFFSET_VALID BIT(13)
+#define RX_MSDU_END_INFO4_L3_TYPE GENMASK(31, 16)
+
+#define RX_MSDU_END_INFO5_MSDU_DROP BIT(0)
+#define RX_MSDU_END_INFO5_REO_DEST_IND GENMASK(5, 1)
+#define RX_MSDU_END_INFO5_FLOW_IDX GENMASK(25, 6)
+
+struct rx_msdu_end {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le16 ip_hdr_cksum;
+ __le16 tcp_udp_cksum;
+ __le32 info1;
+ __le32 ext_wapi_pn[2];
+ __le32 info2;
+ __le32 ipv6_options_crc;
+ __le32 tcp_seq_num;
+ __le32 tcp_ack_num;
+ __le16 info3;
+ __le16 window_size;
+ __le32 info4;
+ __le32 rule_indication[2];
+ __le16 sa_idx;
+ __le16 da_idx;
+ __le32 info5;
+ __le32 fse_metadata;
+ __le16 cce_metadata;
+ __le16 sa_sw_peer_id;
+} __packed;
+
+/* rx_msdu_end
+ *
+ * rxpcu_mpdu_filter_in_category
+ * Field indicates what the reason was that this mpdu frame
+ * was allowed to come into the receive path by rxpcu. Values
+ * are defined in enum %RX_DESC_RXPCU_FILTER_*.
+ *
+ * sw_frame_group_id
+ * SW processes frames based on certain classifications. Values
+ * are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*.
+ *
+ * phy_ppdu_id
+ * A ppdu counter value that PHY increments for every PPDU
+ * received. The counter value wraps around.
+ *
+ * ip_hdr_cksum
+ * This can include the IP header checksum or the pseudo
+ * header checksum used by TCP/UDP checksum.
+ *
+ * tcp_udp_chksum
+ * The value of the computed TCP/UDP checksum. A mode bit
+ * selects whether this checksum is the full checksum or the
+ * partial checksum which does not include the pseudo header.
+ *
+ * key_id
+ * The key ID octet from the IV. Only valid when first_msdu is set.
+ *
+ * cce_super_rule
+ * Indicates the super filter rule.
+ *
+ * cce_classify_not_done_truncate
+ * Classification failed due to truncated frame.
+ *
+ * cce_classify_not_done_cce_dis
+ * Classification failed due to CCE global disable
+ *
+ * ext_wapi_pn*
+ * Extension PN (packet number) which is only used by WAPI.
+ *
+ * reported_mpdu_length
+ * MPDU length before decapsulation. Only valid when first_msdu is
+ * set. This field is taken directly from the length field of the
+ * A-MPDU delimiter or the preamble length field for non-A-MPDU
+ * frames.
+ *
+ * first_msdu
+ * Indicates the first MSDU of A-MSDU. If both first_msdu and
+ * last_msdu are set in the MSDU then this is a non-aggregated MSDU
+ * frame: normal MPDU. Interior MSDU in an A-MSDU shall have both
+ * first_mpdu and last_mpdu bits set to 0.
+ *
+ * last_msdu
+ * Indicates the last MSDU of the A-MSDU. MPDU end status is only
+ * valid when last_msdu is set.
+ *
+ * sa_idx_timeout
+ * Indicates an unsuccessful MAC source address search due to the
+ * expiring of the search timer.
+ *
+ * da_idx_timeout
+ * Indicates an unsuccessful MAC destination address search due to
+ * the expiring of the search timer.
+ *
+ * msdu_limit_error
+ * Indicates that the MSDU threshold was exceeded and thus all the
+ * rest of the MSDUs will not be scattered and will not be
+ * decapsulated but will be DMA'ed in RAW format as a single MSDU.
+ *
+ * flow_idx_timeout
+ * Indicates an unsuccessful flow search due to the expiring of
+ * the search timer.
+ *
+ * flow_idx_invalid
+ * flow id is not valid.
+ *
+ * amsdu_parser_error
+ * A-MSDU could not be properly de-agregated.
+ *
+ * sa_is_valid
+ * Indicates that OLE found a valid SA entry.
+ *
+ * da_is_valid
+ * Indicates that OLE found a valid DA entry.
+ *
+ * da_is_mcbc
+ * Field Only valid if da_is_valid is set. Indicates the DA address
+ * was a Multicast of Broadcast address.
+ *
+ * l3_header_padding
+ * Number of bytes padded to make sure that the L3 header will
+ * always start of a Dword boundary.
+ *
+ * ipv6_options_crc
+ * 32 bit CRC computed out of IP v6 extension headers.
+ *
+ * tcp_seq_number
+ * TCP sequence number.
+ *
+ * tcp_ack_number
+ * TCP acknowledge number.
+ *
+ * tcp_flag
+ * TCP flags {NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN}.
+ *
+ * lro_eligible
+ * Computed out of TCP and IP fields to indicate that this
+ * MSDU is eligible for LRO.
+ *
+ * window_size
+ * TCP receive window size.
+ *
+ * da_offset
+ * Offset into MSDU buffer for DA.
+ *
+ * sa_offset
+ * Offset into MSDU buffer for SA.
+ *
+ * da_offset_valid
+ * da_offset field is valid. This will be set to 0 in case
+ * of a dynamic A-MSDU when DA is compressed.
+ *
+ * sa_offset_valid
+ * sa_offset field is valid. This will be set to 0 in case
+ * of a dynamic A-MSDU when SA is compressed.
+ *
+ * l3_type
+ * The 16-bit type value indicating the type of L3 later
+ * extracted from LLC/SNAP, set to zero if SNAP is not
+ * available.
+ *
+ * rule_indication
+ * Bitmap indicating which of rules have matched.
+ *
+ * sa_idx
+ * The offset in the address table which matches MAC source address
+ *
+ * da_idx
+ * The offset in the address table which matches MAC destination
+ * address.
+ *
+ * msdu_drop
+ * REO shall drop this MSDU and not forward it to any other ring.
+ *
+ * reo_destination_indication
+ * The id of the reo exit ring where the msdu frame shall push
+ * after (MPDU level) reordering has finished. Values are defined
+ * in enum %HAL_RX_MSDU_DESC_REO_DEST_IND_.
+ *
+ * flow_idx
+ * Flow table index.
+ *
+ * fse_metadata
+ * FSE related meta data.
+ *
+ * cce_metadata
+ * CCE related meta data.
+ *
+ * sa_sw_peer_id
+ * sw_peer_id from the address search entry corresponding to the
+ * source address of the MSDU.
+ */
+
+enum rx_mpdu_end_rxdma_dest_ring {
+ RX_MPDU_END_RXDMA_DEST_RING_RELEASE,
+ RX_MPDU_END_RXDMA_DEST_RING_FW,
+ RX_MPDU_END_RXDMA_DEST_RING_SW,
+ RX_MPDU_END_RXDMA_DEST_RING_REO,
+};
+
+#define RX_MPDU_END_INFO1_UNSUP_KTYPE_SHORT_FRAME BIT(11)
+#define RX_MPDU_END_INFO1_RX_IN_TX_DECRYPT_BYT BIT(12)
+#define RX_MPDU_END_INFO1_OVERFLOW_ERR BIT(13)
+#define RX_MPDU_END_INFO1_MPDU_LEN_ERR BIT(14)
+#define RX_MPDU_END_INFO1_TKIP_MIC_ERR BIT(15)
+#define RX_MPDU_END_INFO1_DECRYPT_ERR BIT(16)
+#define RX_MPDU_END_INFO1_UNENCRYPTED_FRAME_ERR BIT(17)
+#define RX_MPDU_END_INFO1_PN_FIELDS_VALID BIT(18)
+#define RX_MPDU_END_INFO1_FCS_ERR BIT(19)
+#define RX_MPDU_END_INFO1_MSDU_LEN_ERR BIT(20)
+#define RX_MPDU_END_INFO1_RXDMA0_DEST_RING GENMASK(22, 21)
+#define RX_MPDU_END_INFO1_RXDMA1_DEST_RING GENMASK(24, 23)
+#define RX_MPDU_END_INFO1_DECRYPT_STATUS_CODE GENMASK(27, 25)
+#define RX_MPDU_END_INFO1_RX_BITMAP_NOT_UPD BIT(28)
+
+struct rx_mpdu_end {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le32 info1;
+} __packed;
+
+/* rx_mpdu_end
+ *
+ * rxpcu_mpdu_filter_in_category
+ * Field indicates what the reason was that this mpdu frame
+ * was allowed to come into the receive path by rxpcu. Values
+ * are defined in enum %RX_DESC_RXPCU_FILTER_*.
+ *
+ * sw_frame_group_id
+ * SW processes frames based on certain classifications. Values
+ * are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*.
+ *
+ * phy_ppdu_id
+ * A ppdu counter value that PHY increments for every PPDU
+ * received. The counter value wraps around.
+ *
+ * unsup_ktype_short_frame
+ * This bit will be '1' when WEP or TKIP or WAPI key type is
+ * received for 11ah short frame. Crypto will bypass the received
+ * packet without decryption to RxOLE after setting this bit.
+ *
+ * rx_in_tx_decrypt_byp
+ * Indicates that RX packet is not decrypted as Crypto is
+ * busy with TX packet processing.
+ *
+ * overflow_err
+ * RXPCU Receive FIFO ran out of space to receive the full MPDU.
+ * Therefore this MPDU is terminated early and is thus corrupted.
+ *
+ * This MPDU will not be ACKed.
+ *
+ * RXPCU might still be able to correctly receive the following
+ * MPDUs in the PPDU if enough fifo space became available in time.
+ *
+ * mpdu_length_err
+ * Set by RXPCU if the expected MPDU length does not correspond
+ * with the actually received number of bytes in the MPDU.
+ *
+ * tkip_mic_err
+ * Set by Rx crypto when crypto detected a TKIP MIC error for
+ * this MPDU.
+ *
+ * decrypt_err
+ * Set by RX CRYPTO when CRYPTO detected a decrypt error for this
+ * MPDU or CRYPTO received an encrypted frame, but did not get a
+ * valid corresponding key id in the peer entry.
+ *
+ * unencrypted_frame_err
+ * Set by RX CRYPTO when CRYPTO detected an unencrypted frame while
+ * in the peer entry field 'All_frames_shall_be_encrypted' is set.
+ *
+ * pn_fields_contain_valid_info
+ * Set by RX CRYPTO to indicate that there is a valid PN field
+ * present in this MPDU.
+ *
+ * fcs_err
+ * Set by RXPCU when there is an FCS error detected for this MPDU.
+ *
+ * msdu_length_err
+ * Set by RXOLE when there is an msdu length error detected
+ * in at least 1 of the MSDUs embedded within the MPDU.
+ *
+ * rxdma0_destination_ring
+ * rxdma1_destination_ring
+ * The ring to which RXDMA0/1 shall push the frame, assuming
+ * no MPDU level errors are detected. In case of MPDU level
+ * errors, RXDMA0/1 might change the RXDMA0/1 destination. Values
+ * are defined in %enum RX_MPDU_END_RXDMA_DEST_RING_*.
+ *
+ * decrypt_status_code
+ * Field provides insight into the decryption performed. Values
+ * are defined in enum %RX_DESC_DECRYPT_STATUS_CODE_*.
+ *
+ * rx_bitmap_not_updated
+ * Frame is received, but RXPCU could not update the receive bitmap
+ * due to (temporary) fifo constraints.
+ */
+
+/* Padding bytes to avoid TLV's spanning across 128 byte boundary */
+#define HAL_RX_DESC_PADDING0_BYTES 4
+#define HAL_RX_DESC_PADDING1_BYTES 16
+
+#define HAL_RX_DESC_HDR_STATUS_LEN 120
+
+struct hal_rx_desc {
+ __le32 msdu_end_tag;
+ struct rx_msdu_end msdu_end;
+ __le32 rx_attn_tag;
+ struct rx_attention attention;
+ __le32 msdu_start_tag;
+ struct rx_msdu_start msdu_start;
+ u8 rx_padding0[HAL_RX_DESC_PADDING0_BYTES];
+ __le32 mpdu_start_tag;
+ struct rx_mpdu_start mpdu_start;
+ __le32 mpdu_end_tag;
+ struct rx_mpdu_end mpdu_end;
+ u8 rx_padding1[HAL_RX_DESC_PADDING1_BYTES];
+ __le32 hdr_status_tag;
+ __le32 phy_ppdu_id;
+ u8 hdr_status[HAL_RX_DESC_HDR_STATUS_LEN];
+ u8 msdu_payload[0];
+} __packed;
+
+#endif /* ATH11K_RX_DESC_H */
diff --git a/drivers/net/wireless/ath/ath11k/testmode.c b/drivers/net/wireless/ath/ath11k/testmode.c
new file mode 100644
index 000000000000..d2dc9db01491
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/testmode.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include "testmode.h"
+#include <net/netlink.h>
+#include "debug.h"
+#include "wmi.h"
+#include "hw.h"
+#include "core.h"
+#include "testmode_i.h"
+
+static const struct nla_policy ath11k_tm_policy[ATH11K_TM_ATTR_MAX + 1] = {
+ [ATH11K_TM_ATTR_CMD] = { .type = NLA_U32 },
+ [ATH11K_TM_ATTR_DATA] = { .type = NLA_BINARY,
+ .len = ATH11K_TM_DATA_MAX_LEN },
+ [ATH11K_TM_ATTR_WMI_CMDID] = { .type = NLA_U32 },
+ [ATH11K_TM_ATTR_VERSION_MAJOR] = { .type = NLA_U32 },
+ [ATH11K_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 },
+};
+
+/* Returns true if callee consumes the skb and the skb should be discarded.
+ * Returns false if skb is not used. Does not sleep.
+ */
+bool ath11k_tm_event_wmi(struct ath11k *ar, u32 cmd_id, struct sk_buff *skb)
+{
+ struct sk_buff *nl_skb;
+ bool consumed;
+ int ret;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
+ "testmode event wmi cmd_id %d skb %pK skb->len %d\n",
+ cmd_id, skb, skb->len);
+
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
+
+ spin_lock_bh(&ar->data_lock);
+
+ consumed = true;
+
+ nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy,
+ 2 * sizeof(u32) + skb->len,
+ GFP_ATOMIC);
+ if (!nl_skb) {
+ ath11k_warn(ar->ab,
+ "failed to allocate skb for testmode wmi event\n");
+ goto out;
+ }
+
+ ret = nla_put_u32(nl_skb, ATH11K_TM_ATTR_CMD, ATH11K_TM_CMD_WMI);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to to put testmode wmi event cmd attribute: %d\n",
+ ret);
+ kfree_skb(nl_skb);
+ goto out;
+ }
+
+ ret = nla_put_u32(nl_skb, ATH11K_TM_ATTR_WMI_CMDID, cmd_id);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to to put testmode wmi even cmd_id: %d\n",
+ ret);
+ kfree_skb(nl_skb);
+ goto out;
+ }
+
+ ret = nla_put(nl_skb, ATH11K_TM_ATTR_DATA, skb->len, skb->data);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to copy skb to testmode wmi event: %d\n",
+ ret);
+ kfree_skb(nl_skb);
+ goto out;
+ }
+
+ cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
+
+out:
+ spin_unlock_bh(&ar->data_lock);
+
+ return consumed;
+}
+
+static int ath11k_tm_cmd_get_version(struct ath11k *ar, struct nlattr *tb[])
+{
+ struct sk_buff *skb;
+ int ret;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
+ "testmode cmd get version_major %d version_minor %d\n",
+ ATH11K_TESTMODE_VERSION_MAJOR,
+ ATH11K_TESTMODE_VERSION_MINOR);
+
+ skb = cfg80211_testmode_alloc_reply_skb(ar->hw->wiphy,
+ nla_total_size(sizeof(u32)));
+ if (!skb)
+ return -ENOMEM;
+
+ ret = nla_put_u32(skb, ATH11K_TM_ATTR_VERSION_MAJOR,
+ ATH11K_TESTMODE_VERSION_MAJOR);
+ if (ret) {
+ kfree_skb(skb);
+ return ret;
+ }
+
+ ret = nla_put_u32(skb, ATH11K_TM_ATTR_VERSION_MINOR,
+ ATH11K_TESTMODE_VERSION_MINOR);
+ if (ret) {
+ kfree_skb(skb);
+ return ret;
+ }
+
+ return cfg80211_testmode_reply(skb);
+}
+
+static int ath11k_tm_cmd_wmi(struct ath11k *ar, struct nlattr *tb[])
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct sk_buff *skb;
+ u32 cmd_id, buf_len;
+ int ret;
+ void *buf;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ if (!tb[ATH11K_TM_ATTR_DATA]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!tb[ATH11K_TM_ATTR_WMI_CMDID]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ buf = nla_data(tb[ATH11K_TM_ATTR_DATA]);
+ buf_len = nla_len(tb[ATH11K_TM_ATTR_DATA]);
+ cmd_id = nla_get_u32(tb[ATH11K_TM_ATTR_WMI_CMDID]);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
+ "testmode cmd wmi cmd_id %d buf %pK buf_len %d\n",
+ cmd_id, buf, buf_len);
+
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_TESTMODE, NULL, "", buf, buf_len);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(skb->data, buf, buf_len);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, cmd_id);
+ if (ret) {
+ dev_kfree_skb(skb);
+ ath11k_warn(ar->ab, "failed to transmit wmi command (testmode): %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+int ath11k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len)
+{
+ struct ath11k *ar = hw->priv;
+ struct nlattr *tb[ATH11K_TM_ATTR_MAX + 1];
+ int ret;
+
+ ret = nla_parse(tb, ATH11K_TM_ATTR_MAX, data, len, ath11k_tm_policy,
+ NULL);
+ if (ret)
+ return ret;
+
+ if (!tb[ATH11K_TM_ATTR_CMD])
+ return -EINVAL;
+
+ switch (nla_get_u32(tb[ATH11K_TM_ATTR_CMD])) {
+ case ATH11K_TM_CMD_GET_VERSION:
+ return ath11k_tm_cmd_get_version(ar, tb);
+ case ATH11K_TM_CMD_WMI:
+ return ath11k_tm_cmd_wmi(ar, tb);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/wireless/ath/ath11k/testmode.h b/drivers/net/wireless/ath/ath11k/testmode.h
new file mode 100644
index 000000000000..aaa122ed9069
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/testmode.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include "core.h"
+
+#ifdef CONFIG_NL80211_TESTMODE
+
+bool ath11k_tm_event_wmi(struct ath11k *ar, u32 cmd_id, struct sk_buff *skb);
+int ath11k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len);
+
+#else
+
+static inline bool ath11k_tm_event_wmi(struct ath11k *ar, u32 cmd_id,
+ struct sk_buff *skb)
+{
+ return false;
+}
+
+static inline int ath11k_tm_cmd(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ void *data, int len)
+{
+ return 0;
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/testmode_i.h b/drivers/net/wireless/ath/ath11k/testmode_i.h
new file mode 100644
index 000000000000..4bae2a9eeea4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/testmode_i.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+/* "API" level of the ath11k testmode interface. Bump it after every
+ * incompatible interface change.
+ */
+#define ATH11K_TESTMODE_VERSION_MAJOR 1
+
+/* Bump this after every _compatible_ interface change, for example
+ * addition of a new command or an attribute.
+ */
+#define ATH11K_TESTMODE_VERSION_MINOR 0
+
+#define ATH11K_TM_DATA_MAX_LEN 5000
+
+enum ath11k_tm_attr {
+ __ATH11K_TM_ATTR_INVALID = 0,
+ ATH11K_TM_ATTR_CMD = 1,
+ ATH11K_TM_ATTR_DATA = 2,
+ ATH11K_TM_ATTR_WMI_CMDID = 3,
+ ATH11K_TM_ATTR_VERSION_MAJOR = 4,
+ ATH11K_TM_ATTR_VERSION_MINOR = 5,
+ ATH11K_TM_ATTR_WMI_OP_VERSION = 6,
+
+ /* keep last */
+ __ATH11K_TM_ATTR_AFTER_LAST,
+ ATH11K_TM_ATTR_MAX = __ATH11K_TM_ATTR_AFTER_LAST - 1,
+};
+
+/* All ath11k testmode interface commands specified in
+ * ATH11K_TM_ATTR_CMD
+ */
+enum ath11k_tm_cmd {
+ /* Returns the supported ath11k testmode interface version in
+ * ATH11K_TM_ATTR_VERSION. Always guaranteed to work. User space
+ * uses this to verify it's using the correct version of the
+ * testmode interface
+ */
+ ATH11K_TM_CMD_GET_VERSION = 0,
+
+ /* The command used to transmit a WMI command to the firmware and
+ * the event to receive WMI events from the firmware. Without
+ * struct wmi_cmd_hdr header, only the WMI payload. Command id is
+ * provided with ATH11K_TM_ATTR_WMI_CMDID and payload in
+ * ATH11K_TM_ATTR_DATA.
+ */
+ ATH11K_TM_CMD_WMI = 1,
+};
diff --git a/drivers/net/wireless/ath/ath11k/trace.c b/drivers/net/wireless/ath/ath11k/trace.c
new file mode 100644
index 000000000000..f0cc49ba0387
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/trace.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/net/wireless/ath/ath11k/trace.h b/drivers/net/wireless/ath/ath11k/trace.h
new file mode 100644
index 000000000000..8700a622be7b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/trace.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ */
+
+#if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+
+#include <linux/tracepoint.h>
+#include "core.h"
+
+#define _TRACE_H_
+
+/* create empty functions when tracing is disabled */
+#if !defined(CONFIG_ATH11K_TRACING)
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif /* !CONFIG_ATH11K_TRACING || __CHECKER__ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ath11k
+
+TRACE_EVENT(ath11k_htt_pktlog,
+ TP_PROTO(struct ath11k *ar, const void *buf, u16 buf_len),
+
+ TP_ARGS(ar, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->ab->dev))
+ __string(driver, dev_driver_string(ar->ab->dev))
+ __field(u16, buf_len)
+ __dynamic_array(u8, pktlog, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->ab->dev));
+ __assign_str(driver, dev_driver_string(ar->ab->dev));
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(pktlog), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s size %hu",
+ __get_str(driver),
+ __get_str(device),
+ __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath11k_htt_ppdu_stats,
+ TP_PROTO(struct ath11k *ar, const void *data, size_t len),
+
+ TP_ARGS(ar, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->ab->dev))
+ __string(driver, dev_driver_string(ar->ab->dev))
+ __field(u16, len)
+ __dynamic_array(u8, ppdu, len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->ab->dev));
+ __assign_str(driver, dev_driver_string(ar->ab->dev));
+ __entry->len = len;
+ memcpy(__get_dynamic_array(ppdu), data, len);
+ ),
+
+ TP_printk(
+ "%s %s ppdu len %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
+TRACE_EVENT(ath11k_htt_rxdesc,
+ TP_PROTO(struct ath11k *ar, const void *data, size_t len),
+
+ TP_ARGS(ar, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->ab->dev))
+ __string(driver, dev_driver_string(ar->ab->dev))
+ __field(u16, len)
+ __dynamic_array(u8, rxdesc, len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->ab->dev));
+ __assign_str(driver, dev_driver_string(ar->ab->dev));
+ __entry->len = len;
+ memcpy(__get_dynamic_array(rxdesc), data, len);
+ ),
+
+ TP_printk(
+ "%s %s rxdesc len %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
+#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
+
+/* we don't want to use include/trace/events */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
new file mode 100644
index 000000000000..a9b301ceb24b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -0,0 +1,5810 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+#include <linux/skbuff.h>
+#include <linux/ctype.h>
+#include <net/mac80211.h>
+#include <net/cfg80211.h>
+#include <linux/completion.h>
+#include <linux/if_ether.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/uuid.h>
+#include <linux/time.h>
+#include <linux/of.h>
+#include "core.h"
+#include "debug.h"
+#include "mac.h"
+#include "hw.h"
+#include "peer.h"
+
+struct wmi_tlv_policy {
+ size_t min_len;
+};
+
+struct wmi_tlv_svc_ready_parse {
+ bool wmi_svc_bitmap_done;
+};
+
+struct wmi_tlv_svc_rdy_ext_parse {
+ struct ath11k_service_ext_param param;
+ struct wmi_soc_mac_phy_hw_mode_caps *hw_caps;
+ struct wmi_hw_mode_capabilities *hw_mode_caps;
+ u32 n_hw_mode_caps;
+ u32 tot_phy_id;
+ struct wmi_hw_mode_capabilities pref_hw_mode_caps;
+ struct wmi_mac_phy_capabilities *mac_phy_caps;
+ u32 n_mac_phy_caps;
+ struct wmi_soc_hal_reg_capabilities *soc_hal_reg_caps;
+ struct wmi_hal_reg_capabilities_ext *ext_hal_reg_caps;
+ u32 n_ext_hal_reg_caps;
+ bool hw_mode_done;
+ bool mac_phy_done;
+ bool ext_hal_reg_done;
+};
+
+struct wmi_tlv_rdy_parse {
+ u32 num_extra_mac_addr;
+};
+
+static const struct wmi_tlv_policy wmi_tlv_policies[] = {
+ [WMI_TAG_ARRAY_BYTE]
+ = { .min_len = 0 },
+ [WMI_TAG_ARRAY_UINT32]
+ = { .min_len = 0 },
+ [WMI_TAG_SERVICE_READY_EVENT]
+ = { .min_len = sizeof(struct wmi_service_ready_event) },
+ [WMI_TAG_SERVICE_READY_EXT_EVENT]
+ = { .min_len = sizeof(struct wmi_service_ready_ext_event) },
+ [WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS]
+ = { .min_len = sizeof(struct wmi_soc_mac_phy_hw_mode_caps) },
+ [WMI_TAG_SOC_HAL_REG_CAPABILITIES]
+ = { .min_len = sizeof(struct wmi_soc_hal_reg_capabilities) },
+ [WMI_TAG_VDEV_START_RESPONSE_EVENT]
+ = { .min_len = sizeof(struct wmi_vdev_start_resp_event) },
+ [WMI_TAG_PEER_DELETE_RESP_EVENT]
+ = { .min_len = sizeof(struct wmi_peer_delete_resp_event) },
+ [WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT]
+ = { .min_len = sizeof(struct wmi_bcn_tx_status_event) },
+ [WMI_TAG_VDEV_STOPPED_EVENT]
+ = { .min_len = sizeof(struct wmi_vdev_stopped_event) },
+ [WMI_TAG_REG_CHAN_LIST_CC_EVENT]
+ = { .min_len = sizeof(struct wmi_reg_chan_list_cc_event) },
+ [WMI_TAG_MGMT_RX_HDR]
+ = { .min_len = sizeof(struct wmi_mgmt_rx_hdr) },
+ [WMI_TAG_MGMT_TX_COMPL_EVENT]
+ = { .min_len = sizeof(struct wmi_mgmt_tx_compl_event) },
+ [WMI_TAG_SCAN_EVENT]
+ = { .min_len = sizeof(struct wmi_scan_event) },
+ [WMI_TAG_PEER_STA_KICKOUT_EVENT]
+ = { .min_len = sizeof(struct wmi_peer_sta_kickout_event) },
+ [WMI_TAG_ROAM_EVENT]
+ = { .min_len = sizeof(struct wmi_roam_event) },
+ [WMI_TAG_CHAN_INFO_EVENT]
+ = { .min_len = sizeof(struct wmi_chan_info_event) },
+ [WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT]
+ = { .min_len = sizeof(struct wmi_pdev_bss_chan_info_event) },
+ [WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT]
+ = { .min_len = sizeof(struct wmi_vdev_install_key_compl_event) },
+ [WMI_TAG_READY_EVENT]
+ = {.min_len = sizeof(struct wmi_ready_event) },
+ [WMI_TAG_SERVICE_AVAILABLE_EVENT]
+ = {.min_len = sizeof(struct wmi_service_available_event) },
+ [WMI_TAG_PEER_ASSOC_CONF_EVENT]
+ = { .min_len = sizeof(struct wmi_peer_assoc_conf_event) },
+ [WMI_TAG_STATS_EVENT]
+ = { .min_len = sizeof(struct wmi_stats_event) },
+ [WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT]
+ = { .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
+};
+
+#define PRIMAP(_hw_mode_) \
+ [_hw_mode_] = _hw_mode_##_PRI
+
+static const int ath11k_hw_mode_pri_map[] = {
+ PRIMAP(WMI_HOST_HW_MODE_SINGLE),
+ PRIMAP(WMI_HOST_HW_MODE_DBS),
+ PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE),
+ PRIMAP(WMI_HOST_HW_MODE_SBS),
+ PRIMAP(WMI_HOST_HW_MODE_DBS_SBS),
+ PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS),
+ /* keep last */
+ PRIMAP(WMI_HOST_HW_MODE_MAX),
+};
+
+static int
+ath11k_wmi_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
+ int (*iter)(struct ath11k_base *ab, u16 tag, u16 len,
+ const void *ptr, void *data),
+ void *data)
+{
+ const void *begin = ptr;
+ const struct wmi_tlv *tlv;
+ u16 tlv_tag, tlv_len;
+ int ret;
+
+ while (len > 0) {
+ if (len < sizeof(*tlv)) {
+ ath11k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
+ ptr - begin, len, sizeof(*tlv));
+ return -EINVAL;
+ }
+
+ tlv = ptr;
+ tlv_tag = FIELD_GET(WMI_TLV_TAG, tlv->header);
+ tlv_len = FIELD_GET(WMI_TLV_LEN, tlv->header);
+ ptr += sizeof(*tlv);
+ len -= sizeof(*tlv);
+
+ if (tlv_len > len) {
+ ath11k_err(ab, "wmi tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n",
+ tlv_tag, ptr - begin, len, tlv_len);
+ return -EINVAL;
+ }
+
+ if (tlv_tag < ARRAY_SIZE(wmi_tlv_policies) &&
+ wmi_tlv_policies[tlv_tag].min_len &&
+ wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
+ ath11k_err(ab, "wmi tlv parse failure of tag %hhu at byte %zd (%hhu bytes is less than min length %zu)\n",
+ tlv_tag, ptr - begin, tlv_len,
+ wmi_tlv_policies[tlv_tag].min_len);
+ return -EINVAL;
+ }
+
+ ret = iter(ab, tlv_tag, tlv_len, ptr, data);
+ if (ret)
+ return ret;
+
+ ptr += tlv_len;
+ len -= tlv_len;
+ }
+
+ return 0;
+}
+
+static int ath11k_wmi_tlv_iter_parse(struct ath11k_base *ab, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ const void **tb = data;
+
+ if (tag < WMI_TAG_MAX)
+ tb[tag] = ptr;
+
+ return 0;
+}
+
+static int ath11k_wmi_tlv_parse(struct ath11k_base *ar, const void **tb,
+ const void *ptr, size_t len)
+{
+ return ath11k_wmi_tlv_iter(ar, ptr, len, ath11k_wmi_tlv_iter_parse,
+ (void *)tb);
+}
+
+static const void **
+ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab, const void *ptr,
+ size_t len, gfp_t gfp)
+{
+ const void **tb;
+ int ret;
+
+ tb = kcalloc(WMI_TAG_MAX, sizeof(*tb), gfp);
+ if (!tb)
+ return ERR_PTR(-ENOMEM);
+
+ ret = ath11k_wmi_tlv_parse(ab, tb, ptr, len);
+ if (ret) {
+ kfree(tb);
+ return ERR_PTR(ret);
+ }
+
+ return tb;
+}
+
+static int ath11k_wmi_cmd_send_nowait(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
+ u32 cmd_id)
+{
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_cmd_hdr *cmd_hdr;
+ int ret;
+ u32 cmd = 0;
+
+ if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+ return -ENOMEM;
+
+ cmd |= FIELD_PREP(WMI_CMD_HDR_CMD_ID, cmd_id);
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ cmd_hdr->cmd_id = cmd;
+
+ memset(skb_cb, 0, sizeof(*skb_cb));
+ ret = ath11k_htc_send(&ab->htc, wmi->eid, skb);
+
+ if (ret)
+ goto err_pull;
+
+ return 0;
+
+err_pull:
+ skb_pull(skb, sizeof(struct wmi_cmd_hdr));
+ return ret;
+}
+
+int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
+ u32 cmd_id)
+{
+ struct ath11k_wmi_base *wmi_sc = wmi->wmi_ab;
+ int ret = -EOPNOTSUPP;
+
+ might_sleep();
+
+ wait_event_timeout(wmi_sc->tx_credits_wq, ({
+ ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
+
+ if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH, &wmi_sc->ab->dev_flags))
+ ret = -ESHUTDOWN;
+
+ (ret != -EAGAIN);
+ }), WMI_SEND_TIMEOUT_HZ);
+
+ if (ret == -EAGAIN)
+ ath11k_warn(wmi_sc->ab, "wmi command %d timeout\n", cmd_id);
+
+ return ret;
+}
+
+static int ath11k_pull_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
+ const void *ptr,
+ struct ath11k_service_ext_param *param)
+{
+ const struct wmi_service_ready_ext_event *ev = ptr;
+
+ if (!ev)
+ return -EINVAL;
+
+ /* Move this to host based bitmap */
+ param->default_conc_scan_config_bits = ev->default_conc_scan_config_bits;
+ param->default_fw_config_bits = ev->default_fw_config_bits;
+ param->he_cap_info = ev->he_cap_info;
+ param->mpdu_density = ev->mpdu_density;
+ param->max_bssid_rx_filters = ev->max_bssid_rx_filters;
+ memcpy(&param->ppet, &ev->ppet, sizeof(param->ppet));
+
+ return 0;
+}
+
+static int
+ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
+ struct wmi_soc_mac_phy_hw_mode_caps *hw_caps,
+ struct wmi_hw_mode_capabilities *wmi_hw_mode_caps,
+ struct wmi_soc_hal_reg_capabilities *hal_reg_caps,
+ struct wmi_mac_phy_capabilities *wmi_mac_phy_caps,
+ u8 hw_mode_id, u8 phy_id,
+ struct ath11k_pdev *pdev)
+{
+ struct wmi_mac_phy_capabilities *mac_phy_caps;
+ struct ath11k_band_cap *cap_band;
+ struct ath11k_pdev_cap *pdev_cap = &pdev->cap;
+ u32 phy_map;
+ u32 hw_idx, phy_idx = 0;
+
+ if (!hw_caps || !wmi_hw_mode_caps || !hal_reg_caps)
+ return -EINVAL;
+
+ for (hw_idx = 0; hw_idx < hw_caps->num_hw_modes; hw_idx++) {
+ if (hw_mode_id == wmi_hw_mode_caps[hw_idx].hw_mode_id)
+ break;
+
+ phy_map = wmi_hw_mode_caps[hw_idx].phy_id_map;
+ while (phy_map) {
+ phy_map >>= 1;
+ phy_idx++;
+ }
+ }
+
+ if (hw_idx == hw_caps->num_hw_modes)
+ return -EINVAL;
+
+ phy_idx += phy_id;
+ if (phy_id >= hal_reg_caps->num_phy)
+ return -EINVAL;
+
+ mac_phy_caps = wmi_mac_phy_caps + phy_idx;
+
+ pdev->pdev_id = mac_phy_caps->pdev_id;
+ pdev_cap->supported_bands = mac_phy_caps->supported_bands;
+ pdev_cap->ampdu_density = mac_phy_caps->ampdu_density;
+
+ /* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
+ * band to band for a single radio, need to see how this should be
+ * handled.
+ */
+ if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_2g;
+ pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_2g;
+ } else if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ pdev_cap->vht_cap = mac_phy_caps->vht_cap_info_5g;
+ pdev_cap->vht_mcs = mac_phy_caps->vht_supp_mcs_5g;
+ pdev_cap->he_mcs = mac_phy_caps->he_supp_mcs_5g;
+ pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_5g;
+ pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_5g;
+ } else {
+ return -EINVAL;
+ }
+
+ /* tx/rx chainmask reported from fw depends on the actual hw chains used,
+ * For example, for 4x4 capable macphys, first 4 chains can be used for first
+ * mac and the remaing 4 chains can be used for the second mac or vice-versa.
+ * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
+ * will be advertised for second mac or vice-versa. Compute the shift value for
+ * for tx/rx chainmask which will be used to advertise supported ht/vht rates to
+ * mac80211.
+ */
+ pdev_cap->tx_chain_mask_shift =
+ find_first_bit((unsigned long *)&pdev_cap->tx_chain_mask, 32);
+ pdev_cap->rx_chain_mask_shift =
+ find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32);
+
+ cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
+ cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_2g;
+ cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_2g;
+ cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_2g;
+ cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_2g_ext;
+ cap_band->he_mcs = mac_phy_caps->he_supp_mcs_2g;
+ memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_2g,
+ sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
+ memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet2g,
+ sizeof(struct ath11k_ppe_threshold));
+
+ cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
+ cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
+ cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g;
+ cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g;
+ cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext;
+ cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g;
+ memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g,
+ sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
+ memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
+ sizeof(struct ath11k_ppe_threshold));
+
+ return 0;
+}
+
+static int
+ath11k_pull_reg_cap_svc_rdy_ext(struct ath11k_pdev_wmi *wmi_handle,
+ struct wmi_soc_hal_reg_capabilities *reg_caps,
+ struct wmi_hal_reg_capabilities_ext *wmi_ext_reg_cap,
+ u8 phy_idx,
+ struct ath11k_hal_reg_capabilities_ext *param)
+{
+ struct wmi_hal_reg_capabilities_ext *ext_reg_cap;
+
+ if (!reg_caps || !wmi_ext_reg_cap)
+ return -EINVAL;
+
+ if (phy_idx >= reg_caps->num_phy)
+ return -EINVAL;
+
+ ext_reg_cap = &wmi_ext_reg_cap[phy_idx];
+
+ param->phy_id = ext_reg_cap->phy_id;
+ param->eeprom_reg_domain = ext_reg_cap->eeprom_reg_domain;
+ param->eeprom_reg_domain_ext =
+ ext_reg_cap->eeprom_reg_domain_ext;
+ param->regcap1 = ext_reg_cap->regcap1;
+ param->regcap2 = ext_reg_cap->regcap2;
+ /* check if param->wireless_mode is needed */
+ param->low_2ghz_chan = ext_reg_cap->low_2ghz_chan;
+ param->high_2ghz_chan = ext_reg_cap->high_2ghz_chan;
+ param->low_5ghz_chan = ext_reg_cap->low_5ghz_chan;
+ param->high_5ghz_chan = ext_reg_cap->high_5ghz_chan;
+
+ return 0;
+}
+
+static int ath11k_pull_service_ready_tlv(struct ath11k_base *ab,
+ const void *evt_buf,
+ struct ath11k_targ_cap *cap)
+{
+ const struct wmi_service_ready_event *ev = evt_buf;
+
+ if (!ev) {
+ ath11k_err(ab, "%s: failed by NULL param\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ cap->phy_capability = ev->phy_capability;
+ cap->max_frag_entry = ev->max_frag_entry;
+ cap->num_rf_chains = ev->num_rf_chains;
+ cap->ht_cap_info = ev->ht_cap_info;
+ cap->vht_cap_info = ev->vht_cap_info;
+ cap->vht_supp_mcs = ev->vht_supp_mcs;
+ cap->hw_min_tx_power = ev->hw_min_tx_power;
+ cap->hw_max_tx_power = ev->hw_max_tx_power;
+ cap->sys_cap_info = ev->sys_cap_info;
+ cap->min_pkt_size_enable = ev->min_pkt_size_enable;
+ cap->max_bcn_ie_size = ev->max_bcn_ie_size;
+ cap->max_num_scan_channels = ev->max_num_scan_channels;
+ cap->max_supported_macs = ev->max_supported_macs;
+ cap->wmi_fw_sub_feat_caps = ev->wmi_fw_sub_feat_caps;
+ cap->txrx_chainmask = ev->txrx_chainmask;
+ cap->default_dbs_hw_mode_index = ev->default_dbs_hw_mode_index;
+ cap->num_msdu_desc = ev->num_msdu_desc;
+
+ return 0;
+}
+
+/* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in
+ * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each
+ * 4-byte word.
+ */
+static void ath11k_wmi_service_bitmap_copy(struct ath11k_pdev_wmi *wmi,
+ const u32 *wmi_svc_bm)
+{
+ int i, j;
+
+ for (i = 0, j = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) {
+ do {
+ if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32))
+ set_bit(j, wmi->wmi_ab->svc_map);
+ } while (++j % WMI_SERVICE_BITS_IN_SIZE32);
+ }
+}
+
+static int ath11k_wmi_tlv_svc_rdy_parse(struct ath11k_base *ab, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_svc_ready_parse *svc_ready = data;
+ struct ath11k_pdev_wmi *wmi_handle = &ab->wmi_ab.wmi[0];
+ u16 expect_len;
+
+ switch (tag) {
+ case WMI_TAG_SERVICE_READY_EVENT:
+ if (ath11k_pull_service_ready_tlv(ab, ptr, &ab->target_caps))
+ return -EINVAL;
+ break;
+
+ case WMI_TAG_ARRAY_UINT32:
+ if (!svc_ready->wmi_svc_bitmap_done) {
+ expect_len = WMI_SERVICE_BM_SIZE * sizeof(u32);
+ if (len < expect_len) {
+ ath11k_warn(ab, "invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+
+ ath11k_wmi_service_bitmap_copy(wmi_handle, ptr);
+
+ svc_ready->wmi_svc_bitmap_done = true;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ath11k_service_ready_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_tlv_svc_ready_parse svc_ready = { };
+ int ret;
+
+ ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath11k_wmi_tlv_svc_rdy_parse,
+ &svc_ready);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len)
+{
+ struct sk_buff *skb;
+ struct ath11k_base *ab = wmi_sc->ab;
+ u32 round_len = roundup(len, 4);
+
+ skb = ath11k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, WMI_SKB_HEADROOM);
+ if (!IS_ALIGNED((unsigned long)skb->data, 4))
+ ath11k_warn(ab, "unaligned WMI skb data\n");
+
+ skb_put(skb, round_len);
+ memset(skb->data, 0, round_len);
+
+ return skb;
+}
+
+int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id,
+ struct sk_buff *frame)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_mgmt_send_cmd *cmd;
+ struct wmi_tlv *frame_tlv;
+ struct sk_buff *skb;
+ u32 buf_len;
+ int ret, len;
+
+ buf_len = frame->len < WMI_MGMT_SEND_DOWNLD_LEN ?
+ frame->len : WMI_MGMT_SEND_DOWNLD_LEN;
+
+ len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_mgmt_send_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_MGMT_TX_SEND_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->desc_id = buf_id;
+ cmd->chanfreq = 0;
+ cmd->paddr_lo = lower_32_bits(ATH11K_SKB_CB(frame)->paddr);
+ cmd->paddr_hi = upper_32_bits(ATH11K_SKB_CB(frame)->paddr);
+ cmd->frame_len = frame->len;
+ cmd->buf_len = buf_len;
+ cmd->tx_params_valid = 0;
+
+ frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
+ frame_tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, buf_len);
+
+ memcpy(frame_tlv->value, frame->data, buf_len);
+
+ ath11k_ce_byte_swap(frame_tlv->value, buf_len);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_vdev_create(struct ath11k *ar, u8 *macaddr,
+ struct vdev_create_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_create_cmd *cmd;
+ struct sk_buff *skb;
+ struct wmi_vdev_txrx_streams *txrx_streams;
+ struct wmi_tlv *tlv;
+ int ret, len;
+ void *ptr;
+
+ /* It can be optimized my sending tx/rx chain configuration
+ * only for supported bands instead of always sending it for
+ * both the bands.
+ */
+ len = sizeof(*cmd) + TLV_HDR_SIZE +
+ (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams));
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_create_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_CREATE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = param->if_id;
+ cmd->vdev_type = param->type;
+ cmd->vdev_subtype = param->subtype;
+ cmd->num_cfg_txrx_streams = WMI_NUM_SUPPORTED_BAND_MAX;
+ cmd->pdev_id = param->pdev_id;
+ ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
+
+ ptr = skb->data + sizeof(*cmd);
+ len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+
+ ptr += TLV_HDR_SIZE;
+ txrx_streams = ptr;
+ len = sizeof(*txrx_streams);
+ txrx_streams->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_TXRX_STREAMS) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_2G;
+ txrx_streams->supported_tx_streams =
+ param->chains[NL80211_BAND_2GHZ].tx;
+ txrx_streams->supported_rx_streams =
+ param->chains[NL80211_BAND_2GHZ].rx;
+
+ txrx_streams++;
+ txrx_streams->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_TXRX_STREAMS) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_5G;
+ txrx_streams->supported_tx_streams =
+ param->chains[NL80211_BAND_5GHZ].tx;
+ txrx_streams->supported_rx_streams =
+ param->chains[NL80211_BAND_5GHZ].rx;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_CREATE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to submit WMI_VDEV_CREATE_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
+ param->if_id, param->type, param->subtype,
+ macaddr, param->pdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_vdev_delete(struct ath11k *ar, u8 vdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_delete_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_delete_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_DELETE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_DELETE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to submit WMI_VDEV_DELETE_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "WMI vdev delete id %d\n", vdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_vdev_stop(struct ath11k *ar, u8 vdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_stop_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_stop_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_STOP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_STOP_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to submit WMI_VDEV_STOP cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "WMI vdev stop id 0x%x\n", vdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_vdev_down(struct ath11k *ar, u8 vdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_down_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_down_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_DOWN_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_DOWN_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "WMI vdev down id 0x%x\n", vdev_id);
+
+ return ret;
+}
+
+static void ath11k_wmi_put_wmi_channel(struct wmi_channel *chan,
+ struct wmi_vdev_start_req_arg *arg)
+{
+ memset(chan, 0, sizeof(*chan));
+
+ chan->mhz = arg->channel.freq;
+ chan->band_center_freq1 = arg->channel.band_center_freq1;
+ if (arg->channel.mode == MODE_11AC_VHT80_80)
+ chan->band_center_freq2 = arg->channel.band_center_freq2;
+ else
+ chan->band_center_freq2 = 0;
+
+ chan->info |= FIELD_PREP(WMI_CHAN_INFO_MODE, arg->channel.mode);
+ if (arg->channel.passive)
+ chan->info |= WMI_CHAN_INFO_PASSIVE;
+ if (arg->channel.allow_ibss)
+ chan->info |= WMI_CHAN_INFO_ADHOC_ALLOWED;
+ if (arg->channel.allow_ht)
+ chan->info |= WMI_CHAN_INFO_ALLOW_HT;
+ if (arg->channel.allow_vht)
+ chan->info |= WMI_CHAN_INFO_ALLOW_VHT;
+ if (arg->channel.allow_he)
+ chan->info |= WMI_CHAN_INFO_ALLOW_HE;
+ if (arg->channel.ht40plus)
+ chan->info |= WMI_CHAN_INFO_HT40_PLUS;
+ if (arg->channel.chan_radar)
+ chan->info |= WMI_CHAN_INFO_DFS;
+ if (arg->channel.freq2_radar)
+ chan->info |= WMI_CHAN_INFO_DFS_FREQ2;
+
+ chan->reg_info_1 = FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR,
+ arg->channel.max_power) |
+ FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR,
+ arg->channel.max_reg_power);
+
+ chan->reg_info_2 = FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX,
+ arg->channel.max_antenna_gain) |
+ FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR,
+ arg->channel.max_power);
+}
+
+int ath11k_wmi_vdev_start(struct ath11k *ar, struct wmi_vdev_start_req_arg *arg,
+ bool restart)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_start_request_cmd *cmd;
+ struct sk_buff *skb;
+ struct wmi_channel *chan;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ int ret, len;
+
+ if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
+ return -EINVAL;
+
+ len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_VDEV_START_REQUEST_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = arg->vdev_id;
+ cmd->beacon_interval = arg->bcn_intval;
+ cmd->bcn_tx_rate = arg->bcn_tx_rate;
+ cmd->dtim_period = arg->dtim_period;
+ cmd->num_noa_descriptors = arg->num_noa_descriptors;
+ cmd->preferred_rx_streams = arg->pref_rx_streams;
+ cmd->preferred_tx_streams = arg->pref_tx_streams;
+ cmd->cac_duration_ms = arg->cac_duration_ms;
+ cmd->regdomain = arg->regdomain;
+ cmd->he_ops = arg->he_ops;
+
+ if (!restart) {
+ if (arg->ssid) {
+ cmd->ssid.ssid_len = arg->ssid_len;
+ memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
+ }
+ if (arg->hidden_ssid)
+ cmd->flags |= WMI_VDEV_START_HIDDEN_SSID;
+ if (arg->pmf_enabled)
+ cmd->flags |= WMI_VDEV_START_PMF_ENABLED;
+ }
+
+ cmd->flags |= WMI_VDEV_START_LDPC_RX_ENABLED;
+
+ ptr = skb->data + sizeof(*cmd);
+ chan = ptr;
+
+ ath11k_wmi_put_wmi_channel(chan, arg);
+
+ chan->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_CHANNEL) |
+ FIELD_PREP(WMI_TLV_LEN,
+ sizeof(*chan) - TLV_HDR_SIZE);
+ ptr += sizeof(*chan);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, 0);
+
+ /* Note: This is a nested TLV containing:
+ * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
+ */
+
+ ptr += sizeof(*tlv);
+
+ if (restart)
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_VDEV_RESTART_REQUEST_CMDID);
+ else
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_VDEV_START_REQUEST_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to submit vdev_%s cmd\n",
+ restart ? "restart" : "start");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n",
+ restart ? "restart" : "start", arg->vdev_id,
+ arg->channel.freq, arg->channel.mode);
+
+ return ret;
+}
+
+int ath11k_wmi_vdev_up(struct ath11k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_up_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_up_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_UP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->vdev_assoc_id = aid;
+
+ ether_addr_copy(cmd->vdev_bssid.addr, bssid);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
+ vdev_id, aid, bssid);
+
+ return ret;
+}
+
+int ath11k_wmi_send_peer_create_cmd(struct ath11k *ar,
+ struct peer_create_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_peer_create_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_create_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_CREATE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ ether_addr_copy(cmd->peer_macaddr.addr, param->peer_addr);
+ cmd->peer_type = param->peer_type;
+ cmd->vdev_id = param->vdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to submit WMI_PEER_CREATE cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI peer create vdev_id %d peer_addr %pM\n",
+ param->vdev_id, param->peer_addr);
+
+ return ret;
+}
+
+int ath11k_wmi_send_peer_delete_cmd(struct ath11k *ar,
+ const u8 *peer_addr, u8 vdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_peer_delete_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_delete_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_DELETE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+ cmd->vdev_id = vdev_id;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI peer delete vdev_id %d peer_addr %pM\n",
+ vdev_id, peer_addr);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_send_pdev_set_regdomain(struct ath11k *ar,
+ struct pdev_set_regdomain_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_set_regdomain_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_SET_REGDOMAIN_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->reg_domain = param->current_rd_in_use;
+ cmd->reg_domain_2g = param->current_rd_2g;
+ cmd->reg_domain_5g = param->current_rd_5g;
+ cmd->conformance_test_limit_2g = param->ctl_2g;
+ cmd->conformance_test_limit_5g = param->ctl_5g;
+ cmd->dfs_domain = param->dfs_domain;
+ cmd->pdev_id = param->pdev_id;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n",
+ param->current_rd_in_use, param->current_rd_2g,
+ param->current_rd_5g, param->dfs_domain, param->pdev_id);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_set_peer_param(struct ath11k *ar, const u8 *peer_addr,
+ u32 vdev_id, u32 param_id, u32 param_val)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_peer_set_param_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_set_param_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_SET_PARAM_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+ cmd->vdev_id = vdev_id;
+ cmd->param_id = param_id;
+ cmd->param_value = param_val;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_SET_PARAM_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PEER_SET_PARAM cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI vdev %d peer 0x%pM set param %d value %d\n",
+ vdev_id, peer_addr, param_id, param_val);
+
+ return ret;
+}
+
+int ath11k_wmi_send_peer_flush_tids_cmd(struct ath11k *ar,
+ u8 peer_addr[ETH_ALEN],
+ struct peer_flush_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_peer_flush_tids_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_FLUSH_TIDS_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+ cmd->peer_tid_bitmap = param->peer_tid_bitmap;
+ cmd->vdev_id = param->vdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_FLUSH_TIDS_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_PEER_FLUSH_TIDS cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI peer flush vdev_id %d peer_addr %pM tids %08x\n",
+ param->vdev_id, peer_addr, param->peer_tid_bitmap);
+
+ return ret;
+}
+
+int ath11k_wmi_peer_rx_reorder_queue_setup(struct ath11k *ar,
+ int vdev_id, const u8 *addr,
+ dma_addr_t paddr, u8 tid,
+ u8 ba_window_size_valid,
+ u32 ba_window_size)
+{
+ struct wmi_peer_reorder_queue_setup_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_reorder_queue_setup_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_REORDER_QUEUE_SETUP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ ether_addr_copy(cmd->peer_macaddr.addr, addr);
+ cmd->vdev_id = vdev_id;
+ cmd->tid = tid;
+ cmd->queue_ptr_lo = lower_32_bits(paddr);
+ cmd->queue_ptr_hi = upper_32_bits(paddr);
+ cmd->queue_no = tid;
+ cmd->ba_window_size_valid = ba_window_size_valid;
+ cmd->ba_window_size = ba_window_size;
+
+ ret = ath11k_wmi_cmd_send(ar->wmi, skb,
+ WMI_PEER_REORDER_QUEUE_SETUP_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi rx reorder queue setup addr %pM vdev_id %d tid %d\n",
+ addr, vdev_id, tid);
+
+ return ret;
+}
+
+int
+ath11k_wmi_rx_reord_queue_remove(struct ath11k *ar,
+ struct rx_reorder_queue_remove_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_peer_reorder_queue_remove_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_reorder_queue_remove_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_REORDER_QUEUE_REMOVE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ ether_addr_copy(cmd->peer_macaddr.addr, param->peer_macaddr);
+ cmd->vdev_id = param->vdev_id;
+ cmd->tid_mask = param->peer_tid_bitmap;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "%s: peer_macaddr %pM vdev_id %d, tid_map %d", __func__,
+ param->peer_macaddr, param->vdev_id, param->peer_tid_bitmap);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PEER_REORDER_QUEUE_REMOVE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_pdev_set_param(struct ath11k *ar, u32 param_id,
+ u32 param_value, u8 pdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_set_param_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_PARAM_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->pdev_id = pdev_id;
+ cmd->param_id = param_id;
+ cmd->param_value = param_value;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_PARAM_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI pdev set param %d pdev id %d value %d\n",
+ param_id, pdev_id, param_value);
+
+ return ret;
+}
+
+int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id, u32 enable)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_set_ps_mode_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_ps_mode_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STA_POWERSAVE_MODE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->sta_ps_mode = enable;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI vdev set psmode %d vdev id %d\n",
+ enable, vdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_pdev_suspend(struct ath11k *ar, u32 suspend_opt,
+ u32 pdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_suspend_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SUSPEND_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->suspend_opt = suspend_opt;
+ cmd->pdev_id = pdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SUSPEND_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_SUSPEND cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI pdev suspend pdev_id %d\n", pdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_pdev_resume(struct ath11k *ar, u32 pdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_resume_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_resume_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_RESUME_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->pdev_id = pdev_id;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI pdev resume pdev id %d\n", pdev_id);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_RESUME_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_RESUME cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+/* TODO FW Support for the cmd is not available yet.
+ * Can be tested once the command and corresponding
+ * event is implemented in FW
+ */
+int ath11k_wmi_pdev_bss_chan_info_request(struct ath11k *ar,
+ enum wmi_bss_chan_info_req_type type)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_bss_chan_info_req_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_bss_chan_info_req_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->req_type = type;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI bss chan info req type %d\n", type);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_send_set_ap_ps_param_cmd(struct ath11k *ar, u8 *peer_addr,
+ struct ap_ps_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_ap_ps_peer_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_AP_PS_PEER_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = param->vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+ cmd->param = param->param;
+ cmd->value = param->value;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_AP_PS_PEER_PARAM_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI set ap ps vdev id %d peer %pM param %d value %d\n",
+ param->vdev_id, peer_addr, param->param, param->value);
+
+ return ret;
+}
+
+int ath11k_wmi_set_sta_ps_param(struct ath11k *ar, u32 vdev_id,
+ u32 param, u32 param_value)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_sta_powersave_param_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_STA_POWERSAVE_PARAM_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->param = param;
+ cmd->value = param_value;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI set sta ps vdev_id %d param %d value %d\n",
+ vdev_id, param, param_value);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_force_fw_hang_cmd(struct ath11k *ar, u32 type, u32 delay_time_ms)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_force_fw_hang_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_FORCE_FW_HANG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+ cmd->type = type;
+ cmd->delay_time_ms = delay_time_ms;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_FORCE_FW_HANG_CMDID);
+
+ if (ret) {
+ ath11k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID");
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int ath11k_wmi_vdev_set_param_cmd(struct ath11k *ar, u32 vdev_id,
+ u32 param_id, u32 param_value)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_set_param_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_SET_PARAM_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->param_id = param_id;
+ cmd->param_value = param_value;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_PARAM_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_VDEV_SET_PARAM_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI vdev id 0x%x set param %d value %d\n",
+ vdev_id, param_id, param_value);
+
+ return ret;
+}
+
+int ath11k_wmi_send_stats_request_cmd(struct ath11k *ar,
+ struct stats_request_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_request_stats_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_request_stats_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_REQUEST_STATS_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->stats_id = param->stats_id;
+ cmd->vdev_id = param->vdev_id;
+ cmd->pdev_id = param->pdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_REQUEST_STATS_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_REQUEST_STATS cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI request stats 0x%x vdev id %d pdev id %d\n",
+ param->stats_id, param->vdev_id, param->pdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k *ar,
+ u32 vdev_id, u32 bcn_ctrl_op)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_bcn_offload_ctrl_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_bcn_offload_ctrl_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_BCN_OFFLOAD_CTRL_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->bcn_ctrl_op = bcn_ctrl_op;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI bcn ctrl offload vdev id %d ctrl_op %d\n",
+ vdev_id, bcn_ctrl_op);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_BCN_OFFLOAD_CTRL_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id,
+ struct ieee80211_mutable_offsets *offs,
+ struct sk_buff *bcn)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_bcn_tmpl_cmd *cmd;
+ struct wmi_bcn_prb_info *bcn_prb_info;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ int ret, len;
+ size_t aligned_len = roundup(bcn->len, 4);
+
+ len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_bcn_tmpl_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_BCN_TMPL_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->tim_ie_offset = offs->tim_offset;
+ cmd->csa_switch_count_offset = offs->csa_counter_offs[0];
+ cmd->ext_csa_switch_count_offset = offs->csa_counter_offs[1];
+ cmd->buf_len = bcn->len;
+
+ ptr = skb->data + sizeof(*cmd);
+
+ bcn_prb_info = ptr;
+ len = sizeof(*bcn_prb_info);
+ bcn_prb_info->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_BCN_PRB_INFO) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ bcn_prb_info->caps = 0;
+ bcn_prb_info->erp = 0;
+
+ ptr += sizeof(*bcn_prb_info);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, aligned_len);
+ memcpy(tlv->value, bcn->data, bcn->len);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_BCN_TMPL_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_vdev_install_key(struct ath11k *ar,
+ struct wmi_vdev_install_key_arg *arg)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_install_key_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ int ret, len;
+ int key_len_aligned = roundup(arg->key_len, sizeof(uint32_t));
+
+ len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_INSTALL_KEY_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = arg->vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
+ cmd->key_idx = arg->key_idx;
+ cmd->key_flags = arg->key_flags;
+ cmd->key_cipher = arg->key_cipher;
+ cmd->key_len = arg->key_len;
+ cmd->key_txmic_len = arg->key_txmic_len;
+ cmd->key_rxmic_len = arg->key_rxmic_len;
+
+ if (arg->key_rsc_counter)
+ memcpy(&cmd->key_rsc_counter, &arg->key_rsc_counter,
+ sizeof(struct wmi_key_seq_counter));
+
+ tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, key_len_aligned);
+ memcpy(tlv->value, (u8 *)arg->key_data, key_len_aligned);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_VDEV_INSTALL_KEY cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI vdev install key idx %d cipher %d len %d\n",
+ arg->key_idx, arg->key_cipher, arg->key_len);
+
+ return ret;
+}
+
+static inline void
+ath11k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
+ struct peer_assoc_params *param)
+{
+ cmd->peer_flags = 0;
+
+ if (param->is_wme_set) {
+ if (param->qos_flag)
+ cmd->peer_flags |= WMI_PEER_QOS;
+ if (param->apsd_flag)
+ cmd->peer_flags |= WMI_PEER_APSD;
+ if (param->ht_flag)
+ cmd->peer_flags |= WMI_PEER_HT;
+ if (param->bw_40)
+ cmd->peer_flags |= WMI_PEER_40MHZ;
+ if (param->bw_80)
+ cmd->peer_flags |= WMI_PEER_80MHZ;
+ if (param->bw_160)
+ cmd->peer_flags |= WMI_PEER_160MHZ;
+
+ /* Typically if STBC is enabled for VHT it should be enabled
+ * for HT as well
+ **/
+ if (param->stbc_flag)
+ cmd->peer_flags |= WMI_PEER_STBC;
+
+ /* Typically if LDPC is enabled for VHT it should be enabled
+ * for HT as well
+ **/
+ if (param->ldpc_flag)
+ cmd->peer_flags |= WMI_PEER_LDPC;
+
+ if (param->static_mimops_flag)
+ cmd->peer_flags |= WMI_PEER_STATIC_MIMOPS;
+ if (param->dynamic_mimops_flag)
+ cmd->peer_flags |= WMI_PEER_DYN_MIMOPS;
+ if (param->spatial_mux_flag)
+ cmd->peer_flags |= WMI_PEER_SPATIAL_MUX;
+ if (param->vht_flag)
+ cmd->peer_flags |= WMI_PEER_VHT;
+ if (param->he_flag)
+ cmd->peer_flags |= WMI_PEER_HE;
+ if (param->twt_requester)
+ cmd->peer_flags |= WMI_PEER_TWT_REQ;
+ if (param->twt_responder)
+ cmd->peer_flags |= WMI_PEER_TWT_RESP;
+ }
+
+ /* Suppress authorization for all AUTH modes that need 4-way handshake
+ * (during re-association).
+ * Authorization will be done for these modes on key installation.
+ */
+ if (param->auth_flag)
+ cmd->peer_flags |= WMI_PEER_AUTH;
+ if (param->need_ptk_4_way)
+ cmd->peer_flags |= WMI_PEER_NEED_PTK_4_WAY;
+ else
+ cmd->peer_flags &= ~WMI_PEER_NEED_PTK_4_WAY;
+ if (param->need_gtk_2_way)
+ cmd->peer_flags |= WMI_PEER_NEED_GTK_2_WAY;
+ /* safe mode bypass the 4-way handshake */
+ if (param->safe_mode_enabled)
+ cmd->peer_flags &= ~(WMI_PEER_NEED_PTK_4_WAY |
+ WMI_PEER_NEED_GTK_2_WAY);
+
+ if (param->is_pmf_enabled)
+ cmd->peer_flags |= WMI_PEER_PMF;
+
+ /* Disable AMSDU for station transmit, if user configures it */
+ /* Disable AMSDU for AP transmit to 11n Stations, if user configures
+ * it
+ * if (param->amsdu_disable) Add after FW support
+ **/
+
+ /* Target asserts if node is marked HT and all MCS is set to 0.
+ * Mark the node as non-HT if all the mcs rates are disabled through
+ * iwpriv
+ **/
+ if (param->peer_ht_rates.num_rates == 0)
+ cmd->peer_flags &= ~WMI_PEER_HT;
+}
+
+int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar,
+ struct peer_assoc_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_peer_assoc_complete_cmd *cmd;
+ struct wmi_vht_rate_set *mcs;
+ struct wmi_he_rate_set *he_mcs;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ u32 peer_legacy_rates_align;
+ u32 peer_ht_rates_align;
+ int i, ret, len;
+
+ peer_legacy_rates_align = roundup(param->peer_legacy_rates.num_rates,
+ sizeof(u32));
+ peer_ht_rates_align = roundup(param->peer_ht_rates.num_rates,
+ sizeof(u32));
+
+ len = sizeof(*cmd) +
+ TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(u8)) +
+ TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) +
+ sizeof(*mcs) + TLV_HDR_SIZE +
+ (sizeof(*he_mcs) * param->peer_he_mcs_count);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ ptr = skb->data;
+
+ cmd = ptr;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PEER_ASSOC_COMPLETE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = param->vdev_id;
+
+ cmd->peer_new_assoc = param->peer_new_assoc;
+ cmd->peer_associd = param->peer_associd;
+
+ ath11k_wmi_copy_peer_flags(cmd, param);
+
+ ether_addr_copy(cmd->peer_macaddr.addr, param->peer_mac);
+
+ cmd->peer_rate_caps = param->peer_rate_caps;
+ cmd->peer_caps = param->peer_caps;
+ cmd->peer_listen_intval = param->peer_listen_intval;
+ cmd->peer_ht_caps = param->peer_ht_caps;
+ cmd->peer_max_mpdu = param->peer_max_mpdu;
+ cmd->peer_mpdu_density = param->peer_mpdu_density;
+ cmd->peer_vht_caps = param->peer_vht_caps;
+ cmd->peer_phymode = param->peer_phymode;
+
+ /* Update 11ax capabilities */
+ cmd->peer_he_cap_info = param->peer_he_cap_macinfo[0];
+ cmd->peer_he_cap_info_ext = param->peer_he_cap_macinfo[1];
+ cmd->peer_he_cap_info_internal = param->peer_he_cap_macinfo_internal;
+ cmd->peer_he_ops = param->peer_he_ops;
+ memcpy(&cmd->peer_he_cap_phy, &param->peer_he_cap_phyinfo,
+ sizeof(param->peer_he_cap_phyinfo));
+ memcpy(&cmd->peer_ppet, &param->peer_ppet,
+ sizeof(param->peer_ppet));
+
+ /* Update peer legacy rate information */
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, peer_legacy_rates_align);
+
+ ptr += TLV_HDR_SIZE;
+
+ cmd->num_peer_legacy_rates = param->peer_legacy_rates.num_rates;
+ memcpy(ptr, param->peer_legacy_rates.rates,
+ param->peer_legacy_rates.num_rates);
+
+ /* Update peer HT rate information */
+ ptr += peer_legacy_rates_align;
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, peer_ht_rates_align);
+ ptr += TLV_HDR_SIZE;
+ cmd->num_peer_ht_rates = param->peer_ht_rates.num_rates;
+ memcpy(ptr, param->peer_ht_rates.rates,
+ param->peer_ht_rates.num_rates);
+
+ /* VHT Rates */
+ ptr += peer_ht_rates_align;
+
+ mcs = ptr;
+
+ mcs->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VHT_RATE_SET) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*mcs) - TLV_HDR_SIZE);
+
+ cmd->peer_nss = param->peer_nss;
+
+ /* Update bandwidth-NSS mapping */
+ cmd->peer_bw_rxnss_override = 0;
+ cmd->peer_bw_rxnss_override |= param->peer_bw_rxnss_override;
+
+ if (param->vht_capable) {
+ mcs->rx_max_rate = param->rx_max_rate;
+ mcs->rx_mcs_set = param->rx_mcs_set;
+ mcs->tx_max_rate = param->tx_max_rate;
+ mcs->tx_mcs_set = param->tx_mcs_set;
+ }
+
+ /* HE Rates */
+ cmd->peer_he_mcs = param->peer_he_mcs_count;
+
+ ptr += sizeof(*mcs);
+
+ len = param->peer_he_mcs_count * sizeof(*he_mcs);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+ ptr += TLV_HDR_SIZE;
+
+ /* Loop through the HE rate set */
+ for (i = 0; i < param->peer_he_mcs_count; i++) {
+ he_mcs = ptr;
+ he_mcs->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_HE_RATE_SET) |
+ FIELD_PREP(WMI_TLV_LEN,
+ sizeof(*he_mcs) - TLV_HDR_SIZE);
+
+ he_mcs->rx_mcs_set = param->peer_he_rx_mcs_set[i];
+ he_mcs->tx_mcs_set = param->peer_he_tx_mcs_set[i];
+ ptr += sizeof(*he_mcs);
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_PEER_ASSOC_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x\n",
+ cmd->vdev_id, cmd->peer_associd, param->peer_mac,
+ cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps,
+ cmd->peer_listen_intval, cmd->peer_ht_caps,
+ cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode,
+ cmd->peer_mpdu_density,
+ cmd->peer_vht_caps, cmd->peer_he_cap_info,
+ cmd->peer_he_ops, cmd->peer_he_cap_info_ext,
+ cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1],
+ cmd->peer_he_cap_phy[2],
+ cmd->peer_bw_rxnss_override);
+
+ return ret;
+}
+
+void ath11k_wmi_start_scan_init(struct ath11k *ar,
+ struct scan_req_params *arg)
+{
+ /* setup commonly used values */
+ arg->scan_req_id = 1;
+ arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
+ arg->dwell_time_active = 50;
+ arg->dwell_time_active_2g = 0;
+ arg->dwell_time_passive = 150;
+ arg->min_rest_time = 50;
+ arg->max_rest_time = 500;
+ arg->repeat_probe_time = 0;
+ arg->probe_spacing_time = 0;
+ arg->idle_time = 0;
+ arg->max_scan_time = 20000;
+ arg->probe_delay = 5;
+ arg->notify_scan_events = WMI_SCAN_EVENT_STARTED |
+ WMI_SCAN_EVENT_COMPLETED |
+ WMI_SCAN_EVENT_BSS_CHANNEL |
+ WMI_SCAN_EVENT_FOREIGN_CHAN |
+ WMI_SCAN_EVENT_DEQUEUED;
+ arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT;
+ arg->num_bssid = 1;
+}
+
+static inline void
+ath11k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd,
+ struct scan_req_params *param)
+{
+ /* Scan events subscription */
+ if (param->scan_ev_started)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_STARTED;
+ if (param->scan_ev_completed)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_COMPLETED;
+ if (param->scan_ev_bss_chan)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_BSS_CHANNEL;
+ if (param->scan_ev_foreign_chan)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_FOREIGN_CHAN;
+ if (param->scan_ev_dequeued)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_DEQUEUED;
+ if (param->scan_ev_preempted)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_PREEMPTED;
+ if (param->scan_ev_start_failed)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_START_FAILED;
+ if (param->scan_ev_restarted)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_RESTARTED;
+ if (param->scan_ev_foreign_chn_exit)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT;
+ if (param->scan_ev_suspended)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_SUSPENDED;
+ if (param->scan_ev_resumed)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_RESUMED;
+
+ /** Set scan control flags */
+ cmd->scan_ctrl_flags = 0;
+ if (param->scan_f_passive)
+ cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
+ if (param->scan_f_strict_passive_pch)
+ cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN;
+ if (param->scan_f_promisc_mode)
+ cmd->scan_ctrl_flags |= WMI_SCAN_FILTER_PROMISCUOS;
+ if (param->scan_f_capture_phy_err)
+ cmd->scan_ctrl_flags |= WMI_SCAN_CAPTURE_PHY_ERROR;
+ if (param->scan_f_half_rate)
+ cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_HALF_RATE_SUPPORT;
+ if (param->scan_f_quarter_rate)
+ cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT;
+ if (param->scan_f_cck_rates)
+ cmd->scan_ctrl_flags |= WMI_SCAN_ADD_CCK_RATES;
+ if (param->scan_f_ofdm_rates)
+ cmd->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES;
+ if (param->scan_f_chan_stat_evnt)
+ cmd->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
+ if (param->scan_f_filter_prb_req)
+ cmd->scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
+ if (param->scan_f_bcast_probe)
+ cmd->scan_ctrl_flags |= WMI_SCAN_ADD_BCAST_PROBE_REQ;
+ if (param->scan_f_offchan_mgmt_tx)
+ cmd->scan_ctrl_flags |= WMI_SCAN_OFFCHAN_MGMT_TX;
+ if (param->scan_f_offchan_data_tx)
+ cmd->scan_ctrl_flags |= WMI_SCAN_OFFCHAN_DATA_TX;
+ if (param->scan_f_force_active_dfs_chn)
+ cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS;
+ if (param->scan_f_add_tpc_ie_in_probe)
+ cmd->scan_ctrl_flags |= WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ;
+ if (param->scan_f_add_ds_ie_in_probe)
+ cmd->scan_ctrl_flags |= WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ;
+ if (param->scan_f_add_spoofed_mac_in_probe)
+ cmd->scan_ctrl_flags |= WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ;
+ if (param->scan_f_add_rand_seq_in_probe)
+ cmd->scan_ctrl_flags |= WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ;
+ if (param->scan_f_en_ie_whitelist_in_probe)
+ cmd->scan_ctrl_flags |=
+ WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ;
+
+ /* for adaptive scan mode using 3 bits (21 - 23 bits) */
+ WMI_SCAN_SET_DWELL_MODE(cmd->scan_ctrl_flags,
+ param->adaptive_dwell_time_mode);
+}
+
+int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
+ struct scan_req_params *params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_start_scan_cmd *cmd;
+ struct wmi_ssid *ssid = NULL;
+ struct wmi_mac_addr *bssid;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ int i, ret, len;
+ u32 *tmp_ptr;
+ u8 extraie_len_with_pad = 0;
+
+ len = sizeof(*cmd);
+
+ len += TLV_HDR_SIZE;
+ if (params->num_chan)
+ len += params->num_chan * sizeof(u32);
+
+ len += TLV_HDR_SIZE;
+ if (params->num_ssids)
+ len += params->num_ssids * sizeof(*ssid);
+
+ len += TLV_HDR_SIZE;
+ if (params->num_bssid)
+ len += sizeof(*bssid) * params->num_bssid;
+
+ len += TLV_HDR_SIZE;
+ if (params->extraie.len)
+ extraie_len_with_pad =
+ roundup(params->extraie.len, sizeof(u32));
+ len += extraie_len_with_pad;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ ptr = skb->data;
+
+ cmd = ptr;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_START_SCAN_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->scan_id = params->scan_id;
+ cmd->scan_req_id = params->scan_req_id;
+ cmd->vdev_id = params->vdev_id;
+ cmd->scan_priority = params->scan_priority;
+ cmd->notify_scan_events = params->notify_scan_events;
+
+ ath11k_wmi_copy_scan_event_cntrl_flags(cmd, params);
+
+ cmd->dwell_time_active = params->dwell_time_active;
+ cmd->dwell_time_active_2g = params->dwell_time_active_2g;
+ cmd->dwell_time_passive = params->dwell_time_passive;
+ cmd->min_rest_time = params->min_rest_time;
+ cmd->max_rest_time = params->max_rest_time;
+ cmd->repeat_probe_time = params->repeat_probe_time;
+ cmd->probe_spacing_time = params->probe_spacing_time;
+ cmd->idle_time = params->idle_time;
+ cmd->max_scan_time = params->max_scan_time;
+ cmd->probe_delay = params->probe_delay;
+ cmd->burst_duration = params->burst_duration;
+ cmd->num_chan = params->num_chan;
+ cmd->num_bssid = params->num_bssid;
+ cmd->num_ssids = params->num_ssids;
+ cmd->ie_len = params->extraie.len;
+ cmd->n_probes = params->n_probes;
+
+ ptr += sizeof(*cmd);
+
+ len = params->num_chan * sizeof(u32);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+ ptr += TLV_HDR_SIZE;
+ tmp_ptr = (u32 *)ptr;
+
+ for (i = 0; i < params->num_chan; ++i)
+ tmp_ptr[i] = params->chan_list[i];
+
+ ptr += len;
+
+ len = params->num_ssids * sizeof(*ssid);
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+
+ ptr += TLV_HDR_SIZE;
+
+ if (params->num_ssids) {
+ ssid = ptr;
+ for (i = 0; i < params->num_ssids; ++i) {
+ ssid->ssid_len = params->ssid[i].length;
+ memcpy(ssid->ssid, params->ssid[i].ssid,
+ params->ssid[i].length);
+ ssid++;
+ }
+ }
+
+ ptr += (params->num_ssids * sizeof(*ssid));
+ len = params->num_bssid * sizeof(*bssid);
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+
+ ptr += TLV_HDR_SIZE;
+ bssid = ptr;
+
+ if (params->num_bssid) {
+ for (i = 0; i < params->num_bssid; ++i) {
+ ether_addr_copy(bssid->addr,
+ params->bssid_list[i].addr);
+ bssid++;
+ }
+ }
+
+ ptr += params->num_bssid * sizeof(*bssid);
+
+ len = extraie_len_with_pad;
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+ ptr += TLV_HDR_SIZE;
+
+ if (params->extraie.len)
+ memcpy(ptr, params->extraie.ptr,
+ params->extraie.len);
+
+ ptr += extraie_len_with_pad;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_START_SCAN_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_START_SCAN_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_send_scan_stop_cmd(struct ath11k *ar,
+ struct scan_cancel_param *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_stop_scan_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_stop_scan_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STOP_SCAN_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = param->vdev_id;
+ cmd->requestor = param->requester;
+ cmd->scan_id = param->scan_id;
+ cmd->pdev_id = param->pdev_id;
+ /* stop the scan with the corresponding scan_id */
+ if (param->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) {
+ /* Cancelling all scans */
+ cmd->req_type = WMI_SCAN_STOP_ALL;
+ } else if (param->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) {
+ /* Cancelling VAP scans */
+ cmd->req_type = WMI_SCN_STOP_VAP_ALL;
+ } else if (param->req_type == WLAN_SCAN_CANCEL_SINGLE) {
+ /* Cancelling specific scan */
+ cmd->req_type = WMI_SCAN_STOP_ONE;
+ } else {
+ ath11k_warn(ar->ab, "invalid scan cancel param %d",
+ param->req_type);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_STOP_SCAN_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_STOP_SCAN_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar,
+ struct scan_chan_list_params *chan_list)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_scan_chan_list_cmd *cmd;
+ struct sk_buff *skb;
+ struct wmi_channel *chan_info;
+ struct channel_param *tchan_info;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ int i, ret, len;
+ u32 *reg1, *reg2;
+
+ len = sizeof(*cmd) + TLV_HDR_SIZE +
+ sizeof(*chan_info) * chan_list->nallchans;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_SCAN_CHAN_LIST_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI no.of chan = %d len = %d\n", chan_list->nallchans, len);
+ cmd->pdev_id = chan_list->pdev_id;
+ cmd->num_scan_chans = chan_list->nallchans;
+
+ ptr = skb->data + sizeof(*cmd);
+
+ len = sizeof(*chan_info) * chan_list->nallchans;
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ ptr += TLV_HDR_SIZE;
+
+ tchan_info = &chan_list->ch_param[0];
+
+ for (i = 0; i < chan_list->nallchans; ++i) {
+ chan_info = ptr;
+ memset(chan_info, 0, sizeof(*chan_info));
+ len = sizeof(*chan_info);
+ chan_info->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_CHANNEL) |
+ FIELD_PREP(WMI_TLV_LEN,
+ len - TLV_HDR_SIZE);
+
+ reg1 = &chan_info->reg_info_1;
+ reg2 = &chan_info->reg_info_2;
+ chan_info->mhz = tchan_info->mhz;
+ chan_info->band_center_freq1 = tchan_info->cfreq1;
+ chan_info->band_center_freq2 = tchan_info->cfreq2;
+
+ if (tchan_info->is_chan_passive)
+ chan_info->info |= WMI_CHAN_INFO_PASSIVE;
+ if (tchan_info->allow_he)
+ chan_info->info |= WMI_CHAN_INFO_ALLOW_HE;
+ else if (tchan_info->allow_vht)
+ chan_info->info |= WMI_CHAN_INFO_ALLOW_VHT;
+ else if (tchan_info->allow_ht)
+ chan_info->info |= WMI_CHAN_INFO_ALLOW_HT;
+ if (tchan_info->half_rate)
+ chan_info->info |= WMI_CHAN_INFO_HALF_RATE;
+ if (tchan_info->quarter_rate)
+ chan_info->info |= WMI_CHAN_INFO_QUARTER_RATE;
+
+ chan_info->info |= FIELD_PREP(WMI_CHAN_INFO_MODE,
+ tchan_info->phy_mode);
+ *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MIN_PWR,
+ tchan_info->minpower);
+ *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR,
+ tchan_info->maxpower);
+ *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR,
+ tchan_info->maxregpower);
+ *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_REG_CLS,
+ tchan_info->reg_class_id);
+ *reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX,
+ tchan_info->antennamax);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI chan scan list chan[%d] = %u\n",
+ i, chan_info->mhz);
+
+ ptr += sizeof(*chan_info);
+
+ tchan_info++;
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id,
+ struct wmi_wmm_params_all_arg *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_set_wmm_params_cmd *cmd;
+ struct wmi_wmm_params *wmm_param;
+ struct wmi_wmm_params_arg *wmi_wmm_arg;
+ struct sk_buff *skb;
+ int ret, ac;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_set_wmm_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_VDEV_SET_WMM_PARAMS_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->wmm_param_type = 0;
+
+ for (ac = 0; ac < WME_NUM_AC; ac++) {
+ switch (ac) {
+ case WME_AC_BE:
+ wmi_wmm_arg = &param->ac_be;
+ break;
+ case WME_AC_BK:
+ wmi_wmm_arg = &param->ac_bk;
+ break;
+ case WME_AC_VI:
+ wmi_wmm_arg = &param->ac_vi;
+ break;
+ case WME_AC_VO:
+ wmi_wmm_arg = &param->ac_vo;
+ break;
+ }
+
+ wmm_param = (struct wmi_wmm_params *)&cmd->wmm_params[ac];
+ wmm_param->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_VDEV_SET_WMM_PARAMS_CMD) |
+ FIELD_PREP(WMI_TLV_LEN,
+ sizeof(*wmm_param) - TLV_HDR_SIZE);
+
+ wmm_param->aifs = wmi_wmm_arg->aifs;
+ wmm_param->cwmin = wmi_wmm_arg->cwmin;
+ wmm_param->cwmax = wmi_wmm_arg->cwmax;
+ wmm_param->txoplimit = wmi_wmm_arg->txop;
+ wmm_param->acm = wmi_wmm_arg->acm;
+ wmm_param->no_ack = wmi_wmm_arg->no_ack;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
+ ac, wmm_param->aifs, wmm_param->cwmin,
+ wmm_param->cwmax, wmm_param->txoplimit,
+ wmm_param->acm, wmm_param->no_ack);
+ }
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_VDEV_SET_WMM_PARAMS_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath11k *ar,
+ u32 pdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_dfs_phyerr_offload_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_dfs_phyerr_offload_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = pdev_id;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI dfs phy err offload enable pdev id %d\n", pdev_id);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_pdev_peer_pktlog_filter(struct ath11k *ar, u8 *addr, u8 enable)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_pktlog_filter_cmd *cmd;
+ struct wmi_pdev_pktlog_filter_info *info;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ int ret, len;
+
+ len = sizeof(*cmd) + sizeof(*info) + TLV_HDR_SIZE;
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_pktlog_filter_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PEER_PKTLOG_FILTER_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = DP_HW2SW_MACID(ar->pdev->pdev_id);
+ cmd->num_mac = 1;
+ cmd->enable = enable;
+
+ ptr = skb->data + sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*info));
+
+ ptr += TLV_HDR_SIZE;
+ info = ptr;
+
+ ether_addr_copy(info->peer_macaddr.addr, addr);
+ info->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PEER_PKTLOG_FILTER_INFO) |
+ FIELD_PREP(WMI_TLV_LEN,
+ sizeof(*info) - TLV_HDR_SIZE);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_PKTLOG_FILTER_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int
+ath11k_wmi_send_init_country_cmd(struct ath11k *ar,
+ struct wmi_init_country_params init_cc_params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_init_country_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_init_country_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_SET_INIT_COUNTRY_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = ar->pdev->pdev_id;
+
+ switch (init_cc_params.flags) {
+ case ALPHA_IS_SET:
+ cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_ALPHA;
+ memcpy((u8 *)&cmd->cc_info.alpha2,
+ init_cc_params.cc_info.alpha2, 3);
+ break;
+ case CC_IS_SET:
+ cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE;
+ cmd->cc_info.country_code = init_cc_params.cc_info.country_code;
+ break;
+ case REGDMN_IS_SET:
+ cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_REGDOMAIN;
+ cmd->cc_info.regdom_id = init_cc_params.cc_info.regdom_id;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_SET_INIT_COUNTRY_CMDID);
+
+out:
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n",
+ ret);
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_pdev_pktlog_enable(struct ath11k *ar, u32 pktlog_filter)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pktlog_enable_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pktlog_enable_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PKTLOG_ENABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = DP_HW2SW_MACID(ar->pdev->pdev_id);
+ cmd->evlist = pktlog_filter;
+ cmd->enable = ATH11K_WMI_PKTLOG_ENABLE_FORCE;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_PKTLOG_ENABLE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_pdev_pktlog_disable(struct ath11k *ar)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pktlog_disable_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pktlog_disable_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PKTLOG_DISABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = DP_HW2SW_MACID(ar->pdev->pdev_id);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_PKTLOG_DISABLE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int
+ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_enable_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_enable_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_ENABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->pdev_id = pdev_id;
+ cmd->sta_cong_timer_ms = ATH11K_TWT_DEF_STA_CONG_TIMER_MS;
+ cmd->default_slot_size = ATH11K_TWT_DEF_DEFAULT_SLOT_SIZE;
+ cmd->congestion_thresh_setup = ATH11K_TWT_DEF_CONGESTION_THRESH_SETUP;
+ cmd->congestion_thresh_teardown =
+ ATH11K_TWT_DEF_CONGESTION_THRESH_TEARDOWN;
+ cmd->congestion_thresh_critical =
+ ATH11K_TWT_DEF_CONGESTION_THRESH_CRITICAL;
+ cmd->interference_thresh_teardown =
+ ATH11K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN;
+ cmd->interference_thresh_setup =
+ ATH11K_TWT_DEF_INTERFERENCE_THRESH_SETUP;
+ cmd->min_no_sta_setup = ATH11K_TWT_DEF_MIN_NO_STA_SETUP;
+ cmd->min_no_sta_teardown = ATH11K_TWT_DEF_MIN_NO_STA_TEARDOWN;
+ cmd->no_of_bcast_mcast_slots = ATH11K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS;
+ cmd->min_no_twt_slots = ATH11K_TWT_DEF_MIN_NO_TWT_SLOTS;
+ cmd->max_no_sta_twt = ATH11K_TWT_DEF_MAX_NO_STA_TWT;
+ cmd->mode_check_interval = ATH11K_TWT_DEF_MODE_CHECK_INTERVAL;
+ cmd->add_sta_slot_interval = ATH11K_TWT_DEF_ADD_STA_SLOT_INTERVAL;
+ cmd->remove_sta_slot_interval =
+ ATH11K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL;
+ /* TODO add MBSSID support */
+ cmd->mbss_support = 0;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_TWT_ENABLE_CMDID);
+ if (ret) {
+ ath11k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID");
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int
+ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_disable_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_disable_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_DISABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->pdev_id = pdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_TWT_DISABLE_CMDID);
+ if (ret) {
+ ath11k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID");
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int
+ath11k_wmi_send_obss_spr_cmd(struct ath11k *ar, u32 vdev_id,
+ struct ieee80211_he_obss_pd *he_obss_pd)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_obss_spatial_reuse_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_obss_spatial_reuse_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->enable = he_obss_pd->enable;
+ cmd->obss_min = he_obss_pd->min_offset;
+ cmd->obss_max = he_obss_pd->max_offset;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID");
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+static void
+ath11k_fill_band_to_mac_param(struct ath11k_base *soc,
+ struct wmi_host_pdev_band_to_mac *band_to_mac)
+{
+ u8 i;
+ struct ath11k_hal_reg_capabilities_ext *hal_reg_cap;
+ struct ath11k_pdev *pdev;
+
+ for (i = 0; i < soc->num_radios; i++) {
+ pdev = &soc->pdevs[i];
+ hal_reg_cap = &soc->hal_reg_cap[i];
+ band_to_mac[i].pdev_id = pdev->pdev_id;
+
+ switch (pdev->cap.supported_bands) {
+ case WMI_HOST_WLAN_2G_5G_CAP:
+ band_to_mac[i].start_freq = hal_reg_cap->low_2ghz_chan;
+ band_to_mac[i].end_freq = hal_reg_cap->high_5ghz_chan;
+ break;
+ case WMI_HOST_WLAN_2G_CAP:
+ band_to_mac[i].start_freq = hal_reg_cap->low_2ghz_chan;
+ band_to_mac[i].end_freq = hal_reg_cap->high_2ghz_chan;
+ break;
+ case WMI_HOST_WLAN_5G_CAP:
+ band_to_mac[i].start_freq = hal_reg_cap->low_5ghz_chan;
+ band_to_mac[i].end_freq = hal_reg_cap->high_5ghz_chan;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void
+ath11k_wmi_copy_resource_config(struct wmi_resource_config *wmi_cfg,
+ struct target_resource_config *tg_cfg)
+{
+ wmi_cfg->num_vdevs = tg_cfg->num_vdevs;
+ wmi_cfg->num_peers = tg_cfg->num_peers;
+ wmi_cfg->num_offload_peers = tg_cfg->num_offload_peers;
+ wmi_cfg->num_offload_reorder_buffs = tg_cfg->num_offload_reorder_buffs;
+ wmi_cfg->num_peer_keys = tg_cfg->num_peer_keys;
+ wmi_cfg->num_tids = tg_cfg->num_tids;
+ wmi_cfg->ast_skid_limit = tg_cfg->ast_skid_limit;
+ wmi_cfg->tx_chain_mask = tg_cfg->tx_chain_mask;
+ wmi_cfg->rx_chain_mask = tg_cfg->rx_chain_mask;
+ wmi_cfg->rx_timeout_pri[0] = tg_cfg->rx_timeout_pri[0];
+ wmi_cfg->rx_timeout_pri[1] = tg_cfg->rx_timeout_pri[1];
+ wmi_cfg->rx_timeout_pri[2] = tg_cfg->rx_timeout_pri[2];
+ wmi_cfg->rx_timeout_pri[3] = tg_cfg->rx_timeout_pri[3];
+ wmi_cfg->rx_decap_mode = tg_cfg->rx_decap_mode;
+ wmi_cfg->scan_max_pending_req = tg_cfg->scan_max_pending_req;
+ wmi_cfg->bmiss_offload_max_vdev = tg_cfg->bmiss_offload_max_vdev;
+ wmi_cfg->roam_offload_max_vdev = tg_cfg->roam_offload_max_vdev;
+ wmi_cfg->roam_offload_max_ap_profiles =
+ tg_cfg->roam_offload_max_ap_profiles;
+ wmi_cfg->num_mcast_groups = tg_cfg->num_mcast_groups;
+ wmi_cfg->num_mcast_table_elems = tg_cfg->num_mcast_table_elems;
+ wmi_cfg->mcast2ucast_mode = tg_cfg->mcast2ucast_mode;
+ wmi_cfg->tx_dbg_log_size = tg_cfg->tx_dbg_log_size;
+ wmi_cfg->num_wds_entries = tg_cfg->num_wds_entries;
+ wmi_cfg->dma_burst_size = tg_cfg->dma_burst_size;
+ wmi_cfg->mac_aggr_delim = tg_cfg->mac_aggr_delim;
+ wmi_cfg->rx_skip_defrag_timeout_dup_detection_check =
+ tg_cfg->rx_skip_defrag_timeout_dup_detection_check;
+ wmi_cfg->vow_config = tg_cfg->vow_config;
+ wmi_cfg->gtk_offload_max_vdev = tg_cfg->gtk_offload_max_vdev;
+ wmi_cfg->num_msdu_desc = tg_cfg->num_msdu_desc;
+ wmi_cfg->max_frag_entries = tg_cfg->max_frag_entries;
+ wmi_cfg->num_tdls_vdevs = tg_cfg->num_tdls_vdevs;
+ wmi_cfg->num_tdls_conn_table_entries =
+ tg_cfg->num_tdls_conn_table_entries;
+ wmi_cfg->beacon_tx_offload_max_vdev =
+ tg_cfg->beacon_tx_offload_max_vdev;
+ wmi_cfg->num_multicast_filter_entries =
+ tg_cfg->num_multicast_filter_entries;
+ wmi_cfg->num_wow_filters = tg_cfg->num_wow_filters;
+ wmi_cfg->num_keep_alive_pattern = tg_cfg->num_keep_alive_pattern;
+ wmi_cfg->keep_alive_pattern_size = tg_cfg->keep_alive_pattern_size;
+ wmi_cfg->max_tdls_concurrent_sleep_sta =
+ tg_cfg->max_tdls_concurrent_sleep_sta;
+ wmi_cfg->max_tdls_concurrent_buffer_sta =
+ tg_cfg->max_tdls_concurrent_buffer_sta;
+ wmi_cfg->wmi_send_separate = tg_cfg->wmi_send_separate;
+ wmi_cfg->num_ocb_vdevs = tg_cfg->num_ocb_vdevs;
+ wmi_cfg->num_ocb_channels = tg_cfg->num_ocb_channels;
+ wmi_cfg->num_ocb_schedules = tg_cfg->num_ocb_schedules;
+ wmi_cfg->bpf_instruction_size = tg_cfg->bpf_instruction_size;
+ wmi_cfg->max_bssid_rx_filters = tg_cfg->max_bssid_rx_filters;
+ wmi_cfg->use_pdev_id = tg_cfg->use_pdev_id;
+ wmi_cfg->flag1 = tg_cfg->atf_config;
+ wmi_cfg->peer_map_unmap_v2_support = tg_cfg->peer_map_unmap_v2_support;
+ wmi_cfg->sched_params = tg_cfg->sched_params;
+ wmi_cfg->twt_ap_pdev_count = tg_cfg->twt_ap_pdev_count;
+ wmi_cfg->twt_ap_sta_count = tg_cfg->twt_ap_sta_count;
+}
+
+static int ath11k_init_cmd_send(struct ath11k_pdev_wmi *wmi,
+ struct wmi_init_cmd_param *param)
+{
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct sk_buff *skb;
+ struct wmi_init_cmd *cmd;
+ struct wmi_resource_config *cfg;
+ struct wmi_pdev_set_hw_mode_cmd_param *hw_mode;
+ struct wmi_pdev_band_to_mac *band_to_mac;
+ struct wlan_host_mem_chunk *host_mem_chunks;
+ struct wmi_tlv *tlv;
+ size_t ret, len;
+ void *ptr;
+ u32 hw_mode_len = 0;
+ u16 idx;
+
+ if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX)
+ hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE +
+ (param->num_band_to_mac * sizeof(*band_to_mac));
+
+ len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len +
+ (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_init_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_INIT_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ ptr = skb->data + sizeof(*cmd);
+ cfg = ptr;
+
+ ath11k_wmi_copy_resource_config(cfg, param->res_cfg);
+
+ cfg->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_RESOURCE_CONFIG) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cfg) - TLV_HDR_SIZE);
+
+ ptr += sizeof(*cfg);
+ host_mem_chunks = ptr + TLV_HDR_SIZE;
+ len = sizeof(struct wlan_host_mem_chunk);
+
+ for (idx = 0; idx < param->num_mem_chunks; ++idx) {
+ host_mem_chunks[idx].tlv_header =
+ FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_WLAN_HOST_MEMORY_CHUNK) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+
+ host_mem_chunks[idx].ptr = param->mem_chunks[idx].paddr;
+ host_mem_chunks[idx].size = param->mem_chunks[idx].len;
+ host_mem_chunks[idx].req_id = param->mem_chunks[idx].req_id;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "WMI host mem chunk req_id %d paddr 0x%llx len %d\n",
+ param->mem_chunks[idx].req_id,
+ (u64)param->mem_chunks[idx].paddr,
+ param->mem_chunks[idx].len);
+ }
+ cmd->num_host_mem_chunks = param->num_mem_chunks;
+ len = sizeof(struct wlan_host_mem_chunk) * param->num_mem_chunks;
+
+ /* num_mem_chunks is zero */
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+ ptr += TLV_HDR_SIZE + len;
+
+ if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX) {
+ hw_mode = (struct wmi_pdev_set_hw_mode_cmd_param *)ptr;
+ hw_mode->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_SET_HW_MODE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN,
+ sizeof(*hw_mode) - TLV_HDR_SIZE);
+
+ hw_mode->hw_mode_index = param->hw_mode_id;
+ hw_mode->num_band_to_mac = param->num_band_to_mac;
+
+ ptr += sizeof(*hw_mode);
+
+ len = param->num_band_to_mac * sizeof(*band_to_mac);
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+
+ ptr += TLV_HDR_SIZE;
+ len = sizeof(*band_to_mac);
+
+ for (idx = 0; idx < param->num_band_to_mac; idx++) {
+ band_to_mac = (void *)ptr;
+
+ band_to_mac->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_BAND_TO_MAC) |
+ FIELD_PREP(WMI_TLV_LEN,
+ len - TLV_HDR_SIZE);
+ band_to_mac->pdev_id = param->band_to_mac[idx].pdev_id;
+ band_to_mac->start_freq =
+ param->band_to_mac[idx].start_freq;
+ band_to_mac->end_freq =
+ param->band_to_mac[idx].end_freq;
+ ptr += sizeof(*band_to_mac);
+ }
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_INIT_CMDID);
+ if (ret) {
+ ath11k_warn(ab, "failed to send WMI_INIT_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_wait_for_service_ready(struct ath11k_base *ab)
+{
+ unsigned long time_left;
+
+ time_left = wait_for_completion_timeout(&ab->wmi_ab.service_ready,
+ WMI_SERVICE_READY_TIMEOUT_HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+int ath11k_wmi_wait_for_unified_ready(struct ath11k_base *ab)
+{
+ unsigned long time_left;
+
+ time_left = wait_for_completion_timeout(&ab->wmi_ab.unified_ready,
+ WMI_SERVICE_READY_TIMEOUT_HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+int ath11k_wmi_cmd_init(struct ath11k_base *ab)
+{
+ struct ath11k_wmi_base *wmi_sc = &ab->wmi_ab;
+ struct wmi_init_cmd_param init_param;
+ struct target_resource_config config;
+
+ memset(&init_param, 0, sizeof(init_param));
+ memset(&config, 0, sizeof(config));
+
+ config.num_vdevs = ab->num_radios * TARGET_NUM_VDEVS;
+
+ if (ab->num_radios == 2) {
+ config.num_peers = TARGET_NUM_PEERS(DBS);
+ config.num_tids = TARGET_NUM_TIDS(DBS);
+ } else if (ab->num_radios == 3) {
+ config.num_peers = TARGET_NUM_PEERS(DBS_SBS);
+ config.num_tids = TARGET_NUM_TIDS(DBS_SBS);
+ } else {
+ /* Control should not reach here */
+ config.num_peers = TARGET_NUM_PEERS(SINGLE);
+ config.num_tids = TARGET_NUM_TIDS(SINGLE);
+ }
+ config.num_offload_peers = TARGET_NUM_OFFLD_PEERS;
+ config.num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
+ config.num_peer_keys = TARGET_NUM_PEER_KEYS;
+ config.ast_skid_limit = TARGET_AST_SKID_LIMIT;
+ config.tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+ config.rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+ config.rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
+ config.rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
+ config.rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
+ config.rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
+ config.rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
+ config.scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
+ config.bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
+ config.roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
+ config.roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
+ config.num_mcast_groups = TARGET_NUM_MCAST_GROUPS;
+ config.num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS;
+ config.mcast2ucast_mode = TARGET_MCAST2UCAST_MODE;
+ config.tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
+ config.num_wds_entries = TARGET_NUM_WDS_ENTRIES;
+ config.dma_burst_size = TARGET_DMA_BURST_SIZE;
+ config.rx_skip_defrag_timeout_dup_detection_check =
+ TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
+ config.vow_config = TARGET_VOW_CONFIG;
+ config.gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV;
+ config.num_msdu_desc = TARGET_NUM_MSDU_DESC;
+ config.beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD;
+ config.rx_batchmode = TARGET_RX_BATCHMODE;
+ config.peer_map_unmap_v2_support = 1;
+ config.twt_ap_pdev_count = 2;
+ config.twt_ap_sta_count = 1000;
+
+ memcpy(&wmi_sc->wlan_resource_config, &config, sizeof(config));
+
+ init_param.res_cfg = &wmi_sc->wlan_resource_config;
+ init_param.num_mem_chunks = wmi_sc->num_mem_chunks;
+ init_param.hw_mode_id = wmi_sc->preferred_hw_mode;
+ init_param.mem_chunks = wmi_sc->mem_chunks;
+
+ if (wmi_sc->preferred_hw_mode == WMI_HOST_HW_MODE_SINGLE)
+ init_param.hw_mode_id = WMI_HOST_HW_MODE_MAX;
+
+ init_param.num_band_to_mac = ab->num_radios;
+
+ ath11k_fill_band_to_mac_param(ab, init_param.band_to_mac);
+
+ return ath11k_init_cmd_send(&wmi_sc->wmi[0], &init_param);
+}
+
+static int ath11k_wmi_tlv_hw_mode_caps_parse(struct ath11k_base *soc,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+ struct wmi_hw_mode_capabilities *hw_mode_cap;
+ u32 phy_map = 0;
+
+ if (tag != WMI_TAG_HW_MODE_CAPABILITIES)
+ return -EPROTO;
+
+ if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->param.num_hw_modes)
+ return -ENOBUFS;
+
+ hw_mode_cap = container_of(ptr, struct wmi_hw_mode_capabilities,
+ hw_mode_id);
+ svc_rdy_ext->n_hw_mode_caps++;
+
+ phy_map = hw_mode_cap->phy_id_map;
+ while (phy_map) {
+ svc_rdy_ext->tot_phy_id++;
+ phy_map = phy_map >> 1;
+ }
+
+ return 0;
+}
+
+static int ath11k_wmi_tlv_hw_mode_caps(struct ath11k_base *soc,
+ u16 len, const void *ptr, void *data)
+{
+ struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+ struct wmi_hw_mode_capabilities *hw_mode_caps;
+ enum wmi_host_hw_mode_config_type mode, pref;
+ u32 i;
+ int ret;
+
+ svc_rdy_ext->n_hw_mode_caps = 0;
+ svc_rdy_ext->hw_mode_caps = (struct wmi_hw_mode_capabilities *)ptr;
+
+ ret = ath11k_wmi_tlv_iter(soc, ptr, len,
+ ath11k_wmi_tlv_hw_mode_caps_parse,
+ svc_rdy_ext);
+ if (ret) {
+ ath11k_warn(soc, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ i = 0;
+ while (i < svc_rdy_ext->n_hw_mode_caps) {
+ hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i];
+ mode = hw_mode_caps->hw_mode_id;
+ pref = soc->wmi_ab.preferred_hw_mode;
+
+ if (ath11k_hw_mode_pri_map[mode] < ath11k_hw_mode_pri_map[pref]) {
+ svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps;
+ soc->wmi_ab.preferred_hw_mode = mode;
+ }
+ i++;
+ }
+
+ if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ath11k_wmi_tlv_mac_phy_caps_parse(struct ath11k_base *soc,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+
+ if (tag != WMI_TAG_MAC_PHY_CAPABILITIES)
+ return -EPROTO;
+
+ if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id)
+ return -ENOBUFS;
+
+ len = min_t(u16, len, sizeof(struct wmi_mac_phy_capabilities));
+ if (!svc_rdy_ext->n_mac_phy_caps) {
+ svc_rdy_ext->mac_phy_caps = kzalloc((svc_rdy_ext->tot_phy_id) * len,
+ GFP_ATOMIC);
+ if (!svc_rdy_ext->mac_phy_caps)
+ return -ENOMEM;
+ }
+
+ memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps, ptr, len);
+ svc_rdy_ext->n_mac_phy_caps++;
+ return 0;
+}
+
+static int ath11k_wmi_tlv_ext_hal_reg_caps_parse(struct ath11k_base *soc,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+
+ if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT)
+ return -EPROTO;
+
+ if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->param.num_phy)
+ return -ENOBUFS;
+
+ svc_rdy_ext->n_ext_hal_reg_caps++;
+ return 0;
+}
+
+static int ath11k_wmi_tlv_ext_hal_reg_caps(struct ath11k_base *soc,
+ u16 len, const void *ptr, void *data)
+{
+ struct ath11k_pdev_wmi *wmi_handle = &soc->wmi_ab.wmi[0];
+ struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+ struct ath11k_hal_reg_capabilities_ext reg_cap;
+ int ret;
+ u32 i;
+
+ svc_rdy_ext->n_ext_hal_reg_caps = 0;
+ svc_rdy_ext->ext_hal_reg_caps = (struct wmi_hal_reg_capabilities_ext *)ptr;
+ ret = ath11k_wmi_tlv_iter(soc, ptr, len,
+ ath11k_wmi_tlv_ext_hal_reg_caps_parse,
+ svc_rdy_ext);
+ if (ret) {
+ ath11k_warn(soc, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < svc_rdy_ext->param.num_phy; i++) {
+ ret = ath11k_pull_reg_cap_svc_rdy_ext(wmi_handle,
+ svc_rdy_ext->soc_hal_reg_caps,
+ svc_rdy_ext->ext_hal_reg_caps, i,
+ &reg_cap);
+ if (ret) {
+ ath11k_warn(soc, "failed to extract reg cap %d\n", i);
+ return ret;
+ }
+
+ memcpy(&soc->hal_reg_cap[reg_cap.phy_id],
+ &reg_cap, sizeof(reg_cap));
+ }
+ return 0;
+}
+
+static int ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(struct ath11k_base *soc,
+ u16 len, const void *ptr,
+ void *data)
+{
+ struct ath11k_pdev_wmi *wmi_handle = &soc->wmi_ab.wmi[0];
+ struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+ u8 hw_mode_id = svc_rdy_ext->pref_hw_mode_caps.hw_mode_id;
+ u32 phy_id_map;
+ int ret;
+
+ svc_rdy_ext->soc_hal_reg_caps = (struct wmi_soc_hal_reg_capabilities *)ptr;
+ svc_rdy_ext->param.num_phy = svc_rdy_ext->soc_hal_reg_caps->num_phy;
+
+ soc->num_radios = 0;
+ phy_id_map = svc_rdy_ext->pref_hw_mode_caps.phy_id_map;
+
+ while (phy_id_map && soc->num_radios < MAX_RADIOS) {
+ ret = ath11k_pull_mac_phy_cap_svc_ready_ext(wmi_handle,
+ svc_rdy_ext->hw_caps,
+ svc_rdy_ext->hw_mode_caps,
+ svc_rdy_ext->soc_hal_reg_caps,
+ svc_rdy_ext->mac_phy_caps,
+ hw_mode_id, soc->num_radios,
+ &soc->pdevs[soc->num_radios]);
+ if (ret) {
+ ath11k_warn(soc, "failed to extract mac caps, idx :%d\n",
+ soc->num_radios);
+ return ret;
+ }
+
+ soc->num_radios++;
+
+ /* TODO: mac_phy_cap prints */
+ phy_id_map >>= 1;
+ }
+ return 0;
+}
+
+static int ath11k_wmi_tlv_svc_rdy_ext_parse(struct ath11k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct ath11k_pdev_wmi *wmi_handle = &ab->wmi_ab.wmi[0];
+ struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+ int ret;
+
+ switch (tag) {
+ case WMI_TAG_SERVICE_READY_EXT_EVENT:
+ ret = ath11k_pull_svc_ready_ext(wmi_handle, ptr,
+ &svc_rdy_ext->param);
+ if (ret) {
+ ath11k_warn(ab, "unable to extract ext params\n");
+ return ret;
+ }
+ break;
+
+ case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS:
+ svc_rdy_ext->hw_caps = (struct wmi_soc_mac_phy_hw_mode_caps *)ptr;
+ svc_rdy_ext->param.num_hw_modes = svc_rdy_ext->hw_caps->num_hw_modes;
+ break;
+
+ case WMI_TAG_SOC_HAL_REG_CAPABILITIES:
+ ret = ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(ab, len, ptr,
+ svc_rdy_ext);
+ if (ret)
+ return ret;
+ break;
+
+ case WMI_TAG_ARRAY_STRUCT:
+ if (!svc_rdy_ext->hw_mode_done) {
+ ret = ath11k_wmi_tlv_hw_mode_caps(ab, len, ptr,
+ svc_rdy_ext);
+ if (ret)
+ return ret;
+
+ svc_rdy_ext->hw_mode_done = true;
+ } else if (!svc_rdy_ext->mac_phy_done) {
+ svc_rdy_ext->n_mac_phy_caps = 0;
+ ret = ath11k_wmi_tlv_iter(ab, ptr, len,
+ ath11k_wmi_tlv_mac_phy_caps_parse,
+ svc_rdy_ext);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ svc_rdy_ext->mac_phy_done = true;
+ } else if (!svc_rdy_ext->ext_hal_reg_done) {
+ ret = ath11k_wmi_tlv_ext_hal_reg_caps(ab, len, ptr,
+ svc_rdy_ext);
+ if (ret)
+ return ret;
+
+ svc_rdy_ext->ext_hal_reg_done = true;
+ complete(&ab->wmi_ab.service_ready);
+ }
+ break;
+
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int ath11k_service_ready_ext_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct wmi_tlv_svc_rdy_ext_parse svc_rdy_ext = { };
+ int ret;
+
+ ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath11k_wmi_tlv_svc_rdy_ext_parse,
+ &svc_rdy_ext);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ kfree(svc_rdy_ext.mac_phy_caps);
+ return 0;
+}
+
+static int ath11k_pull_vdev_start_resp_tlv(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_vdev_start_resp_event *vdev_rsp)
+{
+ const void **tb;
+ const struct wmi_vdev_start_resp_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch vdev start resp ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ memset(vdev_rsp, 0, sizeof(*vdev_rsp));
+
+ vdev_rsp->vdev_id = ev->vdev_id;
+ vdev_rsp->requestor_id = ev->requestor_id;
+ vdev_rsp->resp_type = ev->resp_type;
+ vdev_rsp->status = ev->status;
+ vdev_rsp->chain_mask = ev->chain_mask;
+ vdev_rsp->smps_mode = ev->smps_mode;
+ vdev_rsp->mac_id = ev->mac_id;
+ vdev_rsp->cfgd_tx_streams = ev->cfgd_tx_streams;
+ vdev_rsp->cfgd_rx_streams = ev->cfgd_rx_streams;
+
+ kfree(tb);
+ return 0;
+}
+
+static struct cur_reg_rule
+*create_reg_rules_from_wmi(u32 num_reg_rules,
+ struct wmi_regulatory_rule_struct *wmi_reg_rule)
+{
+ struct cur_reg_rule *reg_rule_ptr;
+ u32 count;
+
+ reg_rule_ptr = kzalloc((num_reg_rules * sizeof(*reg_rule_ptr)),
+ GFP_ATOMIC);
+
+ if (!reg_rule_ptr)
+ return NULL;
+
+ for (count = 0; count < num_reg_rules; count++) {
+ reg_rule_ptr[count].start_freq =
+ FIELD_GET(REG_RULE_START_FREQ,
+ wmi_reg_rule[count].freq_info);
+ reg_rule_ptr[count].end_freq =
+ FIELD_GET(REG_RULE_END_FREQ,
+ wmi_reg_rule[count].freq_info);
+ reg_rule_ptr[count].max_bw =
+ FIELD_GET(REG_RULE_MAX_BW,
+ wmi_reg_rule[count].bw_pwr_info);
+ reg_rule_ptr[count].reg_power =
+ FIELD_GET(REG_RULE_REG_PWR,
+ wmi_reg_rule[count].bw_pwr_info);
+ reg_rule_ptr[count].ant_gain =
+ FIELD_GET(REG_RULE_ANT_GAIN,
+ wmi_reg_rule[count].bw_pwr_info);
+ reg_rule_ptr[count].flags =
+ FIELD_GET(REG_RULE_FLAGS,
+ wmi_reg_rule[count].flag_info);
+ }
+
+ return reg_rule_ptr;
+}
+
+static int ath11k_pull_reg_chan_list_update_ev(struct ath11k_base *ab,
+ struct sk_buff *skb,
+ struct cur_regulatory_info *reg_info)
+{
+ const void **tb;
+ const struct wmi_reg_chan_list_cc_event *chan_list_event_hdr;
+ struct wmi_regulatory_rule_struct *wmi_reg_rule;
+ u32 num_2g_reg_rules, num_5g_reg_rules;
+ int ret;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "processing regulatory channel list\n");
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ chan_list_event_hdr = tb[WMI_TAG_REG_CHAN_LIST_CC_EVENT];
+ if (!chan_list_event_hdr) {
+ ath11k_warn(ab, "failed to fetch reg chan list update ev\n");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ reg_info->num_2g_reg_rules = chan_list_event_hdr->num_2g_reg_rules;
+ reg_info->num_5g_reg_rules = chan_list_event_hdr->num_5g_reg_rules;
+
+ if (!(reg_info->num_2g_reg_rules + reg_info->num_5g_reg_rules)) {
+ ath11k_warn(ab, "No regulatory rules available in the event info\n");
+ kfree(tb);
+ return -EINVAL;
+ }
+
+ memcpy(reg_info->alpha2, &chan_list_event_hdr->alpha2,
+ REG_ALPHA2_LEN);
+ reg_info->dfs_region = chan_list_event_hdr->dfs_region;
+ reg_info->phybitmap = chan_list_event_hdr->phybitmap;
+ reg_info->num_phy = chan_list_event_hdr->num_phy;
+ reg_info->phy_id = chan_list_event_hdr->phy_id;
+ reg_info->ctry_code = chan_list_event_hdr->country_id;
+ reg_info->reg_dmn_pair = chan_list_event_hdr->domain_code;
+ if (chan_list_event_hdr->status_code == WMI_REG_SET_CC_STATUS_PASS)
+ reg_info->status_code = REG_SET_CC_STATUS_PASS;
+ else if (chan_list_event_hdr->status_code == WMI_REG_CURRENT_ALPHA2_NOT_FOUND)
+ reg_info->status_code = REG_CURRENT_ALPHA2_NOT_FOUND;
+ else if (chan_list_event_hdr->status_code == WMI_REG_INIT_ALPHA2_NOT_FOUND)
+ reg_info->status_code = REG_INIT_ALPHA2_NOT_FOUND;
+ else if (chan_list_event_hdr->status_code == WMI_REG_SET_CC_CHANGE_NOT_ALLOWED)
+ reg_info->status_code = REG_SET_CC_CHANGE_NOT_ALLOWED;
+ else if (chan_list_event_hdr->status_code == WMI_REG_SET_CC_STATUS_NO_MEMORY)
+ reg_info->status_code = REG_SET_CC_STATUS_NO_MEMORY;
+ else if (chan_list_event_hdr->status_code == WMI_REG_SET_CC_STATUS_FAIL)
+ reg_info->status_code = REG_SET_CC_STATUS_FAIL;
+
+ reg_info->min_bw_2g = chan_list_event_hdr->min_bw_2g;
+ reg_info->max_bw_2g = chan_list_event_hdr->max_bw_2g;
+ reg_info->min_bw_5g = chan_list_event_hdr->min_bw_5g;
+ reg_info->max_bw_5g = chan_list_event_hdr->max_bw_5g;
+
+ num_2g_reg_rules = reg_info->num_2g_reg_rules;
+ num_5g_reg_rules = reg_info->num_5g_reg_rules;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "%s:cc %s dsf %d BW: min_2g %d max_2g %d min_5g %d max_5g %d",
+ __func__, reg_info->alpha2, reg_info->dfs_region,
+ reg_info->min_bw_2g, reg_info->max_bw_2g,
+ reg_info->min_bw_5g, reg_info->max_bw_5g);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "%s: num_2g_reg_rules %d num_5g_reg_rules %d", __func__,
+ num_2g_reg_rules, num_5g_reg_rules);
+
+ wmi_reg_rule =
+ (struct wmi_regulatory_rule_struct *)((u8 *)chan_list_event_hdr
+ + sizeof(*chan_list_event_hdr)
+ + sizeof(struct wmi_tlv));
+
+ if (num_2g_reg_rules) {
+ reg_info->reg_rules_2g_ptr = create_reg_rules_from_wmi(num_2g_reg_rules,
+ wmi_reg_rule);
+ if (!reg_info->reg_rules_2g_ptr) {
+ kfree(tb);
+ ath11k_warn(ab, "Unable to Allocate memory for 2g rules\n");
+ return -ENOMEM;
+ }
+ }
+
+ if (num_5g_reg_rules) {
+ wmi_reg_rule += num_2g_reg_rules;
+ reg_info->reg_rules_5g_ptr = create_reg_rules_from_wmi(num_5g_reg_rules,
+ wmi_reg_rule);
+ if (!reg_info->reg_rules_5g_ptr) {
+ kfree(tb);
+ ath11k_warn(ab, "Unable to Allocate memory for 5g rules\n");
+ return -ENOMEM;
+ }
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "processed regulatory channel list\n");
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_pull_peer_del_resp_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_peer_delete_resp_event *peer_del_resp)
+{
+ const void **tb;
+ const struct wmi_peer_delete_resp_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch peer delete resp ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ memset(peer_del_resp, 0, sizeof(*peer_del_resp));
+
+ peer_del_resp->vdev_id = ev->vdev_id;
+ ether_addr_copy(peer_del_resp->peer_macaddr.addr,
+ ev->peer_macaddr.addr);
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_pull_bcn_tx_status_ev(struct ath11k_base *ab, void *evt_buf,
+ u32 len, u32 *vdev_id,
+ u32 *tx_status)
+{
+ const void **tb;
+ const struct wmi_bcn_tx_status_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch bcn tx status ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ *vdev_id = ev->vdev_id;
+ *tx_status = ev->tx_status;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_pull_vdev_stopped_param_tlv(struct ath11k_base *ab, struct sk_buff *skb,
+ u32 *vdev_id)
+{
+ const void **tb;
+ const struct wmi_vdev_stopped_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_VDEV_STOPPED_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch vdev stop ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ *vdev_id = ev->vdev_id;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_pull_mgmt_rx_params_tlv(struct ath11k_base *ab,
+ struct sk_buff *skb,
+ struct mgmt_rx_event_params *hdr)
+{
+ const void **tb;
+ const struct wmi_mgmt_rx_hdr *ev;
+ const u8 *frame;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_MGMT_RX_HDR];
+ frame = tb[WMI_TAG_ARRAY_BYTE];
+
+ if (!ev || !frame) {
+ ath11k_warn(ab, "failed to fetch mgmt rx hdr");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ hdr->pdev_id = ev->pdev_id;
+ hdr->channel = ev->channel;
+ hdr->snr = ev->snr;
+ hdr->rate = ev->rate;
+ hdr->phy_mode = ev->phy_mode;
+ hdr->buf_len = ev->buf_len;
+ hdr->status = ev->status;
+ hdr->flags = ev->flags;
+ hdr->rssi = ev->rssi;
+ hdr->tsf_delta = ev->tsf_delta;
+ memcpy(hdr->rssi_ctl, ev->rssi_ctl, sizeof(hdr->rssi_ctl));
+
+ if (skb->len < (frame - skb->data) + hdr->buf_len) {
+ ath11k_warn(ab, "invalid length in mgmt rx hdr ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ /* shift the sk_buff to point to `frame` */
+ skb_trim(skb, 0);
+ skb_put(skb, frame - skb->data);
+ skb_pull(skb, frame - skb->data);
+ skb_put(skb, hdr->buf_len);
+
+ ath11k_ce_byte_swap(skb->data, hdr->buf_len);
+
+ kfree(tb);
+ return 0;
+}
+
+static int wmi_process_mgmt_tx_comp(struct ath11k *ar, u32 desc_id,
+ u32 status)
+{
+ struct sk_buff *msdu;
+ struct ieee80211_tx_info *info;
+ struct ath11k_skb_cb *skb_cb;
+
+ spin_lock_bh(&ar->txmgmt_idr_lock);
+ msdu = idr_find(&ar->txmgmt_idr, desc_id);
+
+ if (!msdu) {
+ ath11k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d\n",
+ desc_id);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+ return -ENOENT;
+ }
+
+ idr_remove(&ar->txmgmt_idr, desc_id);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+
+ skb_cb = ATH11K_SKB_CB(msdu);
+ dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+
+ info = IEEE80211_SKB_CB(msdu);
+ if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ ieee80211_tx_status_irqsafe(ar->hw, msdu);
+
+ WARN_ON_ONCE(atomic_read(&ar->num_pending_mgmt_tx) == 0);
+ atomic_dec(&ar->num_pending_mgmt_tx);
+
+ return 0;
+}
+
+static int ath11k_pull_mgmt_tx_compl_param_tlv(struct ath11k_base *ab,
+ struct sk_buff *skb,
+ struct wmi_mgmt_tx_compl_event *param)
+{
+ const void **tb;
+ const struct wmi_mgmt_tx_compl_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch mgmt tx compl ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ param->pdev_id = ev->pdev_id;
+ param->desc_id = ev->desc_id;
+ param->status = ev->status;
+
+ kfree(tb);
+ return 0;
+}
+
+static void ath11k_wmi_event_scan_started(struct ath11k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ ath11k_warn(ar->ab, "received scan started event in an invalid scan state: %s (%d)\n",
+ ath11k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH11K_SCAN_STARTING:
+ ar->scan.state = ATH11K_SCAN_RUNNING;
+ complete(&ar->scan.started);
+ break;
+ }
+}
+
+static void ath11k_wmi_event_scan_start_failed(struct ath11k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ ath11k_warn(ar->ab, "received scan start failed event in an invalid scan state: %s (%d)\n",
+ ath11k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH11K_SCAN_STARTING:
+ complete(&ar->scan.started);
+ __ath11k_mac_scan_finish(ar);
+ break;
+ }
+}
+
+static void ath11k_wmi_event_scan_completed(struct ath11k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ case ATH11K_SCAN_STARTING:
+ /* One suspected reason scan can be completed while starting is
+ * if firmware fails to deliver all scan events to the host,
+ * e.g. when transport pipe is full. This has been observed
+ * with spectral scan phyerr events starving wmi transport
+ * pipe. In such case the "scan completed" event should be (and
+ * is) ignored by the host as it may be just firmware's scan
+ * state machine recovering.
+ */
+ ath11k_warn(ar->ab, "received scan completed event in an invalid scan state: %s (%d)\n",
+ ath11k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ __ath11k_mac_scan_finish(ar);
+ break;
+ }
+}
+
+static void ath11k_wmi_event_scan_bss_chan(struct ath11k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ case ATH11K_SCAN_STARTING:
+ ath11k_warn(ar->ab, "received scan bss chan event in an invalid scan state: %s (%d)\n",
+ ath11k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ ar->scan_channel = NULL;
+ break;
+ }
+}
+
+static void ath11k_wmi_event_scan_foreign_chan(struct ath11k *ar, u32 freq)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ case ATH11K_SCAN_STARTING:
+ ath11k_warn(ar->ab, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
+ ath11k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
+ break;
+ }
+}
+
+static const char *
+ath11k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
+ enum wmi_scan_completion_reason reason)
+{
+ switch (type) {
+ case WMI_SCAN_EVENT_STARTED:
+ return "started";
+ case WMI_SCAN_EVENT_COMPLETED:
+ switch (reason) {
+ case WMI_SCAN_REASON_COMPLETED:
+ return "completed";
+ case WMI_SCAN_REASON_CANCELLED:
+ return "completed [cancelled]";
+ case WMI_SCAN_REASON_PREEMPTED:
+ return "completed [preempted]";
+ case WMI_SCAN_REASON_TIMEDOUT:
+ return "completed [timedout]";
+ case WMI_SCAN_REASON_INTERNAL_FAILURE:
+ return "completed [internal err]";
+ case WMI_SCAN_REASON_MAX:
+ break;
+ }
+ return "completed [unknown]";
+ case WMI_SCAN_EVENT_BSS_CHANNEL:
+ return "bss channel";
+ case WMI_SCAN_EVENT_FOREIGN_CHAN:
+ return "foreign channel";
+ case WMI_SCAN_EVENT_DEQUEUED:
+ return "dequeued";
+ case WMI_SCAN_EVENT_PREEMPTED:
+ return "preempted";
+ case WMI_SCAN_EVENT_START_FAILED:
+ return "start failed";
+ case WMI_SCAN_EVENT_RESTARTED:
+ return "restarted";
+ case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
+ return "foreign channel exit";
+ default:
+ return "unknown";
+ }
+}
+
+static int ath11k_pull_scan_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_scan_event *scan_evt_param)
+{
+ const void **tb;
+ const struct wmi_scan_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_SCAN_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch scan ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ scan_evt_param->event_type = ev->event_type;
+ scan_evt_param->reason = ev->reason;
+ scan_evt_param->channel_freq = ev->channel_freq;
+ scan_evt_param->scan_req_id = ev->scan_req_id;
+ scan_evt_param->scan_id = ev->scan_id;
+ scan_evt_param->vdev_id = ev->vdev_id;
+ scan_evt_param->tsf_timestamp = ev->tsf_timestamp;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_pull_peer_sta_kickout_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_peer_sta_kickout_arg *arg)
+{
+ const void **tb;
+ const struct wmi_peer_sta_kickout_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_PEER_STA_KICKOUT_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch peer sta kickout ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->mac_addr = ev->peer_macaddr.addr;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_pull_roam_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_roam_event *roam_ev)
+{
+ const void **tb;
+ const struct wmi_roam_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_ROAM_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch roam ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ roam_ev->vdev_id = ev->vdev_id;
+ roam_ev->reason = ev->reason;
+ roam_ev->rssi = ev->rssi;
+
+ kfree(tb);
+ return 0;
+}
+
+static int freq_to_idx(struct ath11k *ar, int freq)
+{
+ struct ieee80211_supported_band *sband;
+ int band, ch, idx = 0;
+
+ for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
+ sband = ar->hw->wiphy->bands[band];
+ if (!sband)
+ continue;
+
+ for (ch = 0; ch < sband->n_channels; ch++, idx++)
+ if (sband->channels[ch].center_freq == freq)
+ goto exit;
+ }
+
+exit:
+ return idx;
+}
+
+static int ath11k_pull_chan_info_ev(struct ath11k_base *ab, u8 *evt_buf,
+ u32 len, struct wmi_chan_info_event *ch_info_ev)
+{
+ const void **tb;
+ const struct wmi_chan_info_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_CHAN_INFO_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch chan info ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ ch_info_ev->err_code = ev->err_code;
+ ch_info_ev->freq = ev->freq;
+ ch_info_ev->cmd_flags = ev->cmd_flags;
+ ch_info_ev->noise_floor = ev->noise_floor;
+ ch_info_ev->rx_clear_count = ev->rx_clear_count;
+ ch_info_ev->cycle_count = ev->cycle_count;
+ ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range;
+ ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
+ ch_info_ev->rx_frame_count = ev->rx_frame_count;
+ ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt;
+ ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz;
+ ch_info_ev->vdev_id = ev->vdev_id;
+
+ kfree(tb);
+ return 0;
+}
+
+static int
+ath11k_pull_pdev_bss_chan_info_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_pdev_bss_chan_info_event *bss_ch_info_ev)
+{
+ const void **tb;
+ const struct wmi_pdev_bss_chan_info_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch pdev bss chan info ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ bss_ch_info_ev->pdev_id = ev->pdev_id;
+ bss_ch_info_ev->freq = ev->freq;
+ bss_ch_info_ev->noise_floor = ev->noise_floor;
+ bss_ch_info_ev->rx_clear_count_low = ev->rx_clear_count_low;
+ bss_ch_info_ev->rx_clear_count_high = ev->rx_clear_count_high;
+ bss_ch_info_ev->cycle_count_low = ev->cycle_count_low;
+ bss_ch_info_ev->cycle_count_high = ev->cycle_count_high;
+ bss_ch_info_ev->tx_cycle_count_low = ev->tx_cycle_count_low;
+ bss_ch_info_ev->tx_cycle_count_high = ev->tx_cycle_count_high;
+ bss_ch_info_ev->rx_cycle_count_low = ev->rx_cycle_count_low;
+ bss_ch_info_ev->rx_cycle_count_high = ev->rx_cycle_count_high;
+ bss_ch_info_ev->rx_bss_cycle_count_low = ev->rx_bss_cycle_count_low;
+ bss_ch_info_ev->rx_bss_cycle_count_high = ev->rx_bss_cycle_count_high;
+
+ kfree(tb);
+ return 0;
+}
+
+static int
+ath11k_pull_vdev_install_key_compl_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_vdev_install_key_complete_arg *arg)
+{
+ const void **tb;
+ const struct wmi_vdev_install_key_compl_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch vdev install key compl ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->vdev_id = ev->vdev_id;
+ arg->macaddr = ev->peer_macaddr.addr;
+ arg->key_idx = ev->key_idx;
+ arg->key_flags = ev->key_flags;
+ arg->status = ev->status;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_pull_peer_assoc_conf_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_peer_assoc_conf_arg *peer_assoc_conf)
+{
+ const void **tb;
+ const struct wmi_peer_assoc_conf_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch peer assoc conf ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ peer_assoc_conf->vdev_id = ev->vdev_id;
+ peer_assoc_conf->macaddr = ev->peer_macaddr.addr;
+
+ kfree(tb);
+ return 0;
+}
+
+static void ath11k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
+ struct ath11k_fw_stats_pdev *dst)
+{
+ dst->ch_noise_floor = src->chan_nf;
+ dst->tx_frame_count = src->tx_frame_count;
+ dst->rx_frame_count = src->rx_frame_count;
+ dst->rx_clear_count = src->rx_clear_count;
+ dst->cycle_count = src->cycle_count;
+ dst->phy_err_count = src->phy_err_count;
+ dst->chan_tx_power = src->chan_tx_pwr;
+}
+
+static void
+ath11k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
+ struct ath11k_fw_stats_pdev *dst)
+{
+ dst->comp_queued = src->comp_queued;
+ dst->comp_delivered = src->comp_delivered;
+ dst->msdu_enqued = src->msdu_enqued;
+ dst->mpdu_enqued = src->mpdu_enqued;
+ dst->wmm_drop = src->wmm_drop;
+ dst->local_enqued = src->local_enqued;
+ dst->local_freed = src->local_freed;
+ dst->hw_queued = src->hw_queued;
+ dst->hw_reaped = src->hw_reaped;
+ dst->underrun = src->underrun;
+ dst->tx_abort = src->tx_abort;
+ dst->mpdus_requed = src->mpdus_requed;
+ dst->tx_ko = src->tx_ko;
+ dst->data_rc = src->data_rc;
+ dst->self_triggers = src->self_triggers;
+ dst->sw_retry_failure = src->sw_retry_failure;
+ dst->illgl_rate_phy_err = src->illgl_rate_phy_err;
+ dst->pdev_cont_xretry = src->pdev_cont_xretry;
+ dst->pdev_tx_timeout = src->pdev_tx_timeout;
+ dst->pdev_resets = src->pdev_resets;
+ dst->stateless_tid_alloc_failure = src->stateless_tid_alloc_failure;
+ dst->phy_underrun = src->phy_underrun;
+ dst->txop_ovf = src->txop_ovf;
+}
+
+static void ath11k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
+ struct ath11k_fw_stats_pdev *dst)
+{
+ dst->mid_ppdu_route_change = src->mid_ppdu_route_change;
+ dst->status_rcvd = src->status_rcvd;
+ dst->r0_frags = src->r0_frags;
+ dst->r1_frags = src->r1_frags;
+ dst->r2_frags = src->r2_frags;
+ dst->r3_frags = src->r3_frags;
+ dst->htt_msdus = src->htt_msdus;
+ dst->htt_mpdus = src->htt_mpdus;
+ dst->loc_msdus = src->loc_msdus;
+ dst->loc_mpdus = src->loc_mpdus;
+ dst->oversize_amsdu = src->oversize_amsdu;
+ dst->phy_errs = src->phy_errs;
+ dst->phy_err_drop = src->phy_err_drop;
+ dst->mpdu_errs = src->mpdu_errs;
+}
+
+static void
+ath11k_wmi_pull_vdev_stats(const struct wmi_vdev_stats *src,
+ struct ath11k_fw_stats_vdev *dst)
+{
+ int i;
+
+ dst->vdev_id = src->vdev_id;
+ dst->beacon_snr = src->beacon_snr;
+ dst->data_snr = src->data_snr;
+ dst->num_rx_frames = src->num_rx_frames;
+ dst->num_rts_fail = src->num_rts_fail;
+ dst->num_rts_success = src->num_rts_success;
+ dst->num_rx_err = src->num_rx_err;
+ dst->num_rx_discard = src->num_rx_discard;
+ dst->num_tx_not_acked = src->num_tx_not_acked;
+
+ for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++)
+ dst->num_tx_frames[i] = src->num_tx_frames[i];
+
+ for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++)
+ dst->num_tx_frames_retries[i] = src->num_tx_frames_retries[i];
+
+ for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++)
+ dst->num_tx_frames_failures[i] = src->num_tx_frames_failures[i];
+
+ for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++)
+ dst->tx_rate_history[i] = src->tx_rate_history[i];
+
+ for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++)
+ dst->beacon_rssi_history[i] = src->beacon_rssi_history[i];
+}
+
+static void
+ath11k_wmi_pull_bcn_stats(const struct wmi_bcn_stats *src,
+ struct ath11k_fw_stats_bcn *dst)
+{
+ dst->vdev_id = src->vdev_id;
+ dst->tx_bcn_succ_cnt = src->tx_bcn_succ_cnt;
+ dst->tx_bcn_outage_cnt = src->tx_bcn_outage_cnt;
+}
+
+int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb,
+ struct ath11k_fw_stats *stats)
+{
+ const void **tb;
+ const struct wmi_stats_event *ev;
+ const void *data;
+ int i, ret;
+ u32 len = skb->len;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_STATS_EVENT];
+ data = tb[WMI_TAG_ARRAY_BYTE];
+ if (!ev || !data) {
+ ath11k_warn(ab, "failed to fetch update stats ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi stats update ev pdev_id %d pdev %i vdev %i bcn %i\n",
+ ev->pdev_id,
+ ev->num_pdev_stats, ev->num_vdev_stats,
+ ev->num_bcn_stats);
+
+ stats->pdev_id = ev->pdev_id;
+ stats->stats_id = 0;
+
+ for (i = 0; i < ev->num_pdev_stats; i++) {
+ const struct wmi_pdev_stats *src;
+ struct ath11k_fw_stats_pdev *dst;
+
+ src = data;
+ if (len < sizeof(*src)) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ stats->stats_id = WMI_REQUEST_PDEV_STAT;
+
+ data += sizeof(*src);
+ len -= sizeof(*src);
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath11k_wmi_pull_pdev_stats_base(&src->base, dst);
+ ath11k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+ ath11k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+ list_add_tail(&dst->list, &stats->pdevs);
+ }
+
+ for (i = 0; i < ev->num_vdev_stats; i++) {
+ const struct wmi_vdev_stats *src;
+ struct ath11k_fw_stats_vdev *dst;
+
+ src = data;
+ if (len < sizeof(*src)) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ stats->stats_id = WMI_REQUEST_VDEV_STAT;
+
+ data += sizeof(*src);
+ len -= sizeof(*src);
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath11k_wmi_pull_vdev_stats(src, dst);
+ list_add_tail(&dst->list, &stats->vdevs);
+ }
+
+ for (i = 0; i < ev->num_bcn_stats; i++) {
+ const struct wmi_bcn_stats *src;
+ struct ath11k_fw_stats_bcn *dst;
+
+ src = data;
+ if (len < sizeof(*src)) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ stats->stats_id = WMI_REQUEST_BCN_STAT;
+
+ data += sizeof(*src);
+ len -= sizeof(*src);
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath11k_wmi_pull_bcn_stats(src, dst);
+ list_add_tail(&dst->list, &stats->bcn);
+ }
+
+ kfree(tb);
+ return 0;
+}
+
+size_t ath11k_wmi_fw_stats_num_vdevs(struct list_head *head)
+{
+ struct ath11k_fw_stats_vdev *i;
+ size_t num = 0;
+
+ list_for_each_entry(i, head, list)
+ ++num;
+
+ return num;
+}
+
+static size_t ath11k_wmi_fw_stats_num_bcn(struct list_head *head)
+{
+ struct ath11k_fw_stats_bcn *i;
+ size_t num = 0;
+
+ list_for_each_entry(i, head, list)
+ ++num;
+
+ return num;
+}
+
+static void
+ath11k_wmi_fw_pdev_base_stats_fill(const struct ath11k_fw_stats_pdev *pdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n",
+ "ath11k PDEV stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Channel noise floor", pdev->ch_noise_floor);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Channel TX power", pdev->chan_tx_power);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "TX frame count", pdev->tx_frame_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RX frame count", pdev->rx_frame_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RX clear count", pdev->rx_clear_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Cycle count", pdev->cycle_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "PHY error count", pdev->phy_err_count);
+
+ *length = len;
+}
+
+static void
+ath11k_wmi_fw_pdev_tx_stats_fill(const struct ath11k_fw_stats_pdev *pdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
+ "ath11k PDEV TX stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "====================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HTT cookies queued", pdev->comp_queued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HTT cookies disp.", pdev->comp_delivered);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDU queued", pdev->msdu_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDU queued", pdev->mpdu_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs dropped", pdev->wmm_drop);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Local enqued", pdev->local_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Local freed", pdev->local_freed);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HW queued", pdev->hw_queued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PPDUs reaped", pdev->hw_reaped);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Num underruns", pdev->underrun);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PPDUs cleaned", pdev->tx_abort);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs requed", pdev->mpdus_requed);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Excessive retries", pdev->tx_ko);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "HW rate", pdev->data_rc);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Sched self triggers", pdev->self_triggers);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Dropped due to SW retries",
+ pdev->sw_retry_failure);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Illegal rate phy errors",
+ pdev->illgl_rate_phy_err);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "PDEV continuous xretry", pdev->pdev_cont_xretry);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "TX timeout", pdev->pdev_tx_timeout);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "PDEV resets", pdev->pdev_resets);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Stateless TIDs alloc failures",
+ pdev->stateless_tid_alloc_failure);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "PHY underrun", pdev->phy_underrun);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "MPDU is more than txop limit", pdev->txop_ovf);
+ *length = len;
+}
+
+static void
+ath11k_wmi_fw_pdev_rx_stats_fill(const struct ath11k_fw_stats_pdev *pdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
+ "ath11k PDEV RX stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "====================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Mid PPDU route change",
+ pdev->mid_ppdu_route_change);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Tot. number of statuses", pdev->status_rcvd);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 0", pdev->r0_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 1", pdev->r1_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 2", pdev->r2_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 3", pdev->r3_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs delivered to HTT", pdev->htt_msdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs delivered to HTT", pdev->htt_mpdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs delivered to stack", pdev->loc_msdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs delivered to stack", pdev->loc_mpdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Oversized AMSUs", pdev->oversize_amsdu);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PHY errors", pdev->phy_errs);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PHY errors drops", pdev->phy_err_drop);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
+ *length = len;
+}
+
+static void
+ath11k_wmi_fw_vdev_stats_fill(struct ath11k *ar,
+ const struct ath11k_fw_stats_vdev *vdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
+ struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, vdev->vdev_id);
+ u8 *vif_macaddr;
+ int i;
+
+ /* VDEV stats has all the active VDEVs of other PDEVs as well,
+ * ignoring those not part of requested PDEV
+ */
+ if (!arvif)
+ return;
+
+ vif_macaddr = arvif->vif->addr;
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "VDEV ID", vdev->vdev_id);
+ len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
+ "VDEV MAC address", vif_macaddr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "beacon snr", vdev->beacon_snr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "data snr", vdev->data_snr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx frames", vdev->num_rx_frames);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rts fail", vdev->num_rts_fail);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rts success", vdev->num_rts_success);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx err", vdev->num_rx_err);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx discard", vdev->num_rx_discard);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num tx not acked", vdev->num_tx_not_acked);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames", i,
+ vdev->num_tx_frames[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames retries", i,
+ vdev->num_tx_frames_retries[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames failures", i,
+ vdev->num_tx_frames_failures[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] 0x%08x\n",
+ "tx rate history", i,
+ vdev->tx_rate_history[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "beacon rssi history", i,
+ vdev->beacon_rssi_history[i]);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ *length = len;
+}
+
+static void
+ath11k_wmi_fw_bcn_stats_fill(struct ath11k *ar,
+ const struct ath11k_fw_stats_bcn *bcn,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
+ struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, bcn->vdev_id);
+ u8 *vdev_macaddr;
+
+ if (!arvif) {
+ ath11k_warn(ar->ab, "invalid vdev id %d in bcn stats",
+ bcn->vdev_id);
+ return;
+ }
+
+ vdev_macaddr = arvif->vif->addr;
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "VDEV ID", bcn->vdev_id);
+ len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
+ "VDEV MAC address", vdev_macaddr);
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "================");
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "Num of beacon tx success", bcn->tx_bcn_succ_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "Num of beacon tx failures", bcn->tx_bcn_outage_cnt);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ *length = len;
+}
+
+void ath11k_wmi_fw_stats_fill(struct ath11k *ar,
+ struct ath11k_fw_stats *fw_stats,
+ u32 stats_id, char *buf)
+{
+ u32 len = 0;
+ u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
+ const struct ath11k_fw_stats_pdev *pdev;
+ const struct ath11k_fw_stats_vdev *vdev;
+ const struct ath11k_fw_stats_bcn *bcn;
+ size_t num_bcn;
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (stats_id == WMI_REQUEST_PDEV_STAT) {
+ pdev = list_first_entry_or_null(&fw_stats->pdevs,
+ struct ath11k_fw_stats_pdev, list);
+ if (!pdev) {
+ ath11k_warn(ar->ab, "failed to get pdev stats\n");
+ goto unlock;
+ }
+
+ ath11k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
+ ath11k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
+ ath11k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
+ }
+
+ if (stats_id == WMI_REQUEST_VDEV_STAT) {
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n",
+ "ath11k VDEV stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ list_for_each_entry(vdev, &fw_stats->vdevs, list)
+ ath11k_wmi_fw_vdev_stats_fill(ar, vdev, buf, &len);
+ }
+
+ if (stats_id == WMI_REQUEST_BCN_STAT) {
+ num_bcn = ath11k_wmi_fw_stats_num_bcn(&fw_stats->bcn);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+ "ath11k Beacon stats", num_bcn);
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "===================");
+
+ list_for_each_entry(bcn, &fw_stats->bcn, list)
+ ath11k_wmi_fw_bcn_stats_fill(ar, bcn, buf, &len);
+ }
+
+unlock:
+ spin_unlock_bh(&ar->data_lock);
+
+ if (len >= buf_len)
+ buf[len - 1] = 0;
+ else
+ buf[len] = 0;
+}
+
+static void ath11k_wmi_op_ep_tx_credits(struct ath11k_base *ab)
+{
+ /* try to send pending beacons first. they take priority */
+ wake_up(&ab->wmi_ab.tx_credits_wq);
+}
+
+static void ath11k_wmi_htc_tx_complete(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ dev_kfree_skb(skb);
+}
+
+static bool ath11k_reg_is_world_alpha(char *alpha)
+{
+ return alpha[0] == '0' && alpha[1] == '0';
+}
+
+static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct cur_regulatory_info *reg_info = NULL;
+ struct ieee80211_regdomain *regd = NULL;
+ bool intersect = false;
+ int ret = 0, pdev_idx;
+ struct ath11k *ar;
+
+ reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
+ if (!reg_info) {
+ ret = -ENOMEM;
+ goto fallback;
+ }
+
+ ret = ath11k_pull_reg_chan_list_update_ev(ab, skb, reg_info);
+ if (ret) {
+ ath11k_warn(ab, "failed to extract regulatory info from received event\n");
+ goto fallback;
+ }
+
+ if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
+ /* In case of failure to set the requested ctry,
+ * fw retains the current regd. We print a failure info
+ * and return from here.
+ */
+ ath11k_warn(ab, "Failed to set the requested Country regulatory setting\n");
+ goto mem_free;
+ }
+
+ pdev_idx = reg_info->phy_id;
+
+ if (pdev_idx >= ab->num_radios)
+ goto fallback;
+
+ /* Avoid multiple overwrites to default regd, during core
+ * stop-start after mac registration.
+ */
+ if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
+ !memcmp((char *)ab->default_regd[pdev_idx]->alpha2,
+ (char *)reg_info->alpha2, 2))
+ goto mem_free;
+
+ /* Intersect new rules with default regd if a new country setting was
+ * requested, i.e a default regd was already set during initialization
+ * and the regd coming from this event has a valid country info.
+ */
+ if (ab->default_regd[pdev_idx] &&
+ !ath11k_reg_is_world_alpha((char *)
+ ab->default_regd[pdev_idx]->alpha2) &&
+ !ath11k_reg_is_world_alpha((char *)reg_info->alpha2))
+ intersect = true;
+
+ regd = ath11k_reg_build_regd(ab, reg_info, intersect);
+ if (!regd) {
+ ath11k_warn(ab, "failed to build regd from reg_info\n");
+ goto fallback;
+ }
+
+ spin_lock(&ab->base_lock);
+ if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)) {
+ /* Once mac is registered, ar is valid and all CC events from
+ * fw is considered to be received due to user requests
+ * currently.
+ * Free previously built regd before assigning the newly
+ * generated regd to ar. NULL pointer handling will be
+ * taken care by kfree itself.
+ */
+ ar = ab->pdevs[pdev_idx].ar;
+ kfree(ab->new_regd[pdev_idx]);
+ ab->new_regd[pdev_idx] = regd;
+ ieee80211_queue_work(ar->hw, &ar->regd_update_work);
+ } else {
+ /* Multiple events for the same *ar is not expected. But we
+ * can still clear any previously stored default_regd if we
+ * are receiving this event for the same radio by mistake.
+ * NULL pointer handling will be taken care by kfree itself.
+ */
+ kfree(ab->default_regd[pdev_idx]);
+ /* This regd would be applied during mac registration */
+ ab->default_regd[pdev_idx] = regd;
+ }
+ ab->dfs_region = reg_info->dfs_region;
+ spin_unlock(&ab->base_lock);
+
+ goto mem_free;
+
+fallback:
+ /* Fallback to older reg (by sending previous country setting
+ * again if fw has succeded and we failed to process here.
+ * The Regdomain should be uniform across driver and fw. Since the
+ * FW has processed the command and sent a success status, we expect
+ * this function to succeed as well. If it doesn't, CTRY needs to be
+ * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
+ */
+ /* TODO: This is rare, but still should also be handled */
+ WARN_ON(1);
+mem_free:
+ if (reg_info) {
+ kfree(reg_info->reg_rules_2g_ptr);
+ kfree(reg_info->reg_rules_5g_ptr);
+ kfree(reg_info);
+ }
+ return ret;
+}
+
+static int ath11k_wmi_tlv_rdy_parse(struct ath11k_base *ab, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_rdy_parse *rdy_parse = data;
+ struct wmi_ready_event *fixed_param;
+ struct wmi_mac_addr *addr_list;
+ struct ath11k_pdev *pdev;
+ u32 num_mac_addr;
+ int i;
+
+ switch (tag) {
+ case WMI_TAG_READY_EVENT:
+ fixed_param = (struct wmi_ready_event *)ptr;
+ ab->wlan_init_status = fixed_param->status;
+ rdy_parse->num_extra_mac_addr = fixed_param->num_extra_mac_addr;
+
+ ether_addr_copy(ab->mac_addr, fixed_param->mac_addr.addr);
+ ab->wmi_ready = true;
+ break;
+ case WMI_TAG_ARRAY_FIXED_STRUCT:
+ addr_list = (struct wmi_mac_addr *)ptr;
+ num_mac_addr = rdy_parse->num_extra_mac_addr;
+
+ if (!(ab->num_radios > 1 && num_mac_addr >= ab->num_radios))
+ break;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ether_addr_copy(pdev->mac_addr, addr_list[i].addr);
+ }
+ ab->pdevs_macaddr_valid = true;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ath11k_ready_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_tlv_rdy_parse rdy_parse = { };
+ int ret;
+
+ ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath11k_wmi_tlv_rdy_parse, &rdy_parse);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ complete(&ab->wmi_ab.unified_ready);
+ return 0;
+}
+
+static void ath11k_peer_delete_resp_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_peer_delete_resp_event peer_del_resp;
+
+ if (ath11k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) {
+ ath11k_warn(ab, "failed to extract peer delete resp");
+ return;
+ }
+
+ /* TODO: Do we need to validate whether ath11k_peer_find() return NULL
+ * Why this is needed when there is HTT event for peer delete
+ */
+}
+
+static inline const char *ath11k_wmi_vdev_resp_print(u32 vdev_resp_status)
+{
+ switch (vdev_resp_status) {
+ case WMI_VDEV_START_RESPONSE_INVALID_VDEVID:
+ return "invalid vdev id";
+ case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED:
+ return "not supported";
+ case WMI_VDEV_START_RESPONSE_DFS_VIOLATION:
+ return "dfs violation";
+ case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN:
+ return "invalid regdomain";
+ default:
+ return "unknown";
+ }
+}
+
+static void ath11k_vdev_start_resp_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_vdev_start_resp_event vdev_start_resp;
+ struct ath11k *ar;
+ u32 status;
+
+ if (ath11k_pull_vdev_start_resp_tlv(ab, skb, &vdev_start_resp) != 0) {
+ ath11k_warn(ab, "failed to extract vdev start resp");
+ return;
+ }
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_start_resp.vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in vdev start resp ev %d",
+ vdev_start_resp.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ ar->last_wmi_vdev_start_status = 0;
+
+ status = vdev_start_resp.status;
+
+ if (WARN_ON_ONCE(status)) {
+ ath11k_warn(ab, "vdev start resp error status %d (%s)\n",
+ status, ath11k_wmi_vdev_resp_print(status));
+ ar->last_wmi_vdev_start_status = status;
+ }
+
+ complete(&ar->vdev_setup_done);
+
+ rcu_read_unlock();
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "vdev start resp for vdev id %d",
+ vdev_start_resp.vdev_id);
+}
+
+static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ u32 vdev_id, tx_status;
+
+ if (ath11k_pull_bcn_tx_status_ev(ab, skb->data, skb->len,
+ &vdev_id, &tx_status) != 0) {
+ ath11k_warn(ab, "failed to extract bcn tx status");
+ return;
+ }
+}
+
+static void ath11k_vdev_stopped_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct ath11k *ar;
+ u32 vdev_id = 0;
+
+ if (ath11k_pull_vdev_stopped_param_tlv(ab, skb, &vdev_id) != 0) {
+ ath11k_warn(ab, "failed to extract vdev stopped event");
+ return;
+ }
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_vdev_stop_status(ab, vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in vdev stopped ev %d",
+ vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ complete(&ar->vdev_setup_done);
+
+ rcu_read_unlock();
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "vdev stopped for vdev id %d", vdev_id);
+}
+
+static void ath11k_mgmt_rx_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct mgmt_rx_event_params rx_ev = {0};
+ struct ath11k *ar;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_hdr *hdr;
+ u16 fc;
+ struct ieee80211_supported_band *sband;
+
+ if (ath11k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) {
+ ath11k_warn(ab, "failed to extract mgmt rx event");
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ memset(status, 0, sizeof(*status));
+
+ ath11k_dbg(ab, ATH11K_DBG_MGMT, "mgmt rx event status %08x\n",
+ rx_ev.status);
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, rx_ev.pdev_id);
+
+ if (!ar) {
+ ath11k_warn(ab, "invalid pdev_id %d in mgmt_rx_event\n",
+ rx_ev.pdev_id);
+ dev_kfree_skb(skb);
+ goto exit;
+ }
+
+ if ((test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) ||
+ (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT |
+ WMI_RX_STATUS_ERR_KEY_CACHE_MISS | WMI_RX_STATUS_ERR_CRC))) {
+ dev_kfree_skb(skb);
+ goto exit;
+ }
+
+ if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
+ status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
+ status->band = NL80211_BAND_2GHZ;
+ } else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH11K_MAX_5G_CHAN) {
+ status->band = NL80211_BAND_5GHZ;
+ } else {
+ /* Shouldn't happen unless list of advertised channels to
+ * mac80211 has been changed.
+ */
+ WARN_ON_ONCE(1);
+ dev_kfree_skb(skb);
+ goto exit;
+ }
+
+ if (rx_ev.phy_mode == MODE_11B && status->band == NL80211_BAND_5GHZ)
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi mgmt rx 11b (CCK) on 5GHz\n");
+
+ sband = &ar->mac.sbands[status->band];
+
+ status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
+ status->band);
+ status->signal = rx_ev.snr + ATH11K_DEFAULT_NOISE_FLOOR;
+ status->rate_idx = ath11k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = le16_to_cpu(hdr->frame_control);
+
+ /* Firmware is guaranteed to report all essential management frames via
+ * WMI while it can deliver some extra via HTT. Since there can be
+ * duplicates split the reporting wrt monitor/sniffing.
+ */
+ status->flag |= RX_FLAG_SKIP_MONITOR;
+
+ /* In case of PMF, FW delivers decrypted frames with Protected Bit set.
+ * Don't clear that. Also, FW delivers broadcast management frames
+ * (ex: group privacy action frames in mesh) as encrypted payload.
+ */
+ if (ieee80211_has_protected(hdr->frame_control) &&
+ !is_multicast_ether_addr(ieee80211_get_DA(hdr))) {
+ status->flag |= RX_FLAG_DECRYPTED;
+
+ if (!ieee80211_is_robust_mgmt_frame(skb)) {
+ status->flag |= RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED;
+ hdr->frame_control = __cpu_to_le16(fc &
+ ~IEEE80211_FCTL_PROTECTED);
+ }
+ }
+
+ /* TODO: Pending handle beacon implementation
+ *if (ieee80211_is_beacon(hdr->frame_control))
+ * ath11k_mac_handle_beacon(ar, skb);
+ */
+
+ ath11k_dbg(ab, ATH11K_DBG_MGMT,
+ "event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
+ skb, skb->len,
+ fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
+
+ ath11k_dbg(ab, ATH11K_DBG_MGMT,
+ "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
+ status->freq, status->band, status->signal,
+ status->rate_idx);
+
+ ieee80211_rx_ni(ar->hw, skb);
+
+exit:
+ rcu_read_unlock();
+}
+
+static void ath11k_mgmt_tx_compl_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_mgmt_tx_compl_event tx_compl_param = {0};
+ struct ath11k *ar;
+
+ if (ath11k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) {
+ ath11k_warn(ab, "failed to extract mgmt tx compl event");
+ return;
+ }
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, tx_compl_param.pdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid pdev id %d in mgmt_tx_compl_event\n",
+ tx_compl_param.pdev_id);
+ goto exit;
+ }
+
+ wmi_process_mgmt_tx_comp(ar, tx_compl_param.desc_id,
+ tx_compl_param.status);
+
+ ath11k_dbg(ab, ATH11K_DBG_MGMT,
+ "mgmt tx compl ev pdev_id %d, desc_id %d, status %d",
+ tx_compl_param.pdev_id, tx_compl_param.desc_id,
+ tx_compl_param.status);
+
+exit:
+ rcu_read_unlock();
+}
+
+static struct ath11k *ath11k_get_ar_on_scan_abort(struct ath11k_base *ab,
+ u32 vdev_id)
+{
+ int i;
+ struct ath11k_pdev *pdev;
+ struct ath11k *ar;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar) {
+ ar = pdev->ar;
+
+ spin_lock_bh(&ar->data_lock);
+ if (ar->scan.state == ATH11K_SCAN_ABORTING &&
+ ar->scan.vdev_id == vdev_id) {
+ spin_unlock_bh(&ar->data_lock);
+ return ar;
+ }
+ spin_unlock_bh(&ar->data_lock);
+ }
+ }
+ return NULL;
+}
+
+static void ath11k_scan_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct ath11k *ar;
+ struct wmi_scan_event scan_ev = {0};
+
+ if (ath11k_pull_scan_ev(ab, skb, &scan_ev) != 0) {
+ ath11k_warn(ab, "failed to extract scan event");
+ return;
+ }
+
+ rcu_read_lock();
+
+ /* In case the scan was cancelled, ex. during interface teardown,
+ * the interface will not be found in active interfaces.
+ * Rather, in such scenarios, iterate over the active pdev's to
+ * search 'ar' if the corresponding 'ar' scan is ABORTING and the
+ * aborting scan's vdev id matches this event info.
+ */
+ if (scan_ev.event_type == WMI_SCAN_EVENT_COMPLETED &&
+ scan_ev.reason == WMI_SCAN_REASON_CANCELLED)
+ ar = ath11k_get_ar_on_scan_abort(ab, scan_ev.vdev_id);
+ else
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, scan_ev.vdev_id);
+
+ if (!ar) {
+ ath11k_warn(ab, "Received scan event for unknown vdev");
+ rcu_read_unlock();
+ return;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
+ ath11k_wmi_event_scan_type_str(scan_ev.event_type, scan_ev.reason),
+ scan_ev.event_type, scan_ev.reason, scan_ev.channel_freq,
+ scan_ev.scan_req_id, scan_ev.scan_id, scan_ev.vdev_id,
+ ath11k_scan_state_str(ar->scan.state), ar->scan.state);
+
+ switch (scan_ev.event_type) {
+ case WMI_SCAN_EVENT_STARTED:
+ ath11k_wmi_event_scan_started(ar);
+ break;
+ case WMI_SCAN_EVENT_COMPLETED:
+ ath11k_wmi_event_scan_completed(ar);
+ break;
+ case WMI_SCAN_EVENT_BSS_CHANNEL:
+ ath11k_wmi_event_scan_bss_chan(ar);
+ break;
+ case WMI_SCAN_EVENT_FOREIGN_CHAN:
+ ath11k_wmi_event_scan_foreign_chan(ar, scan_ev.channel_freq);
+ break;
+ case WMI_SCAN_EVENT_START_FAILED:
+ ath11k_warn(ab, "received scan start failure event\n");
+ ath11k_wmi_event_scan_start_failed(ar);
+ break;
+ case WMI_SCAN_EVENT_DEQUEUED:
+ case WMI_SCAN_EVENT_PREEMPTED:
+ case WMI_SCAN_EVENT_RESTARTED:
+ case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
+ default:
+ break;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+
+ rcu_read_unlock();
+}
+
+static void ath11k_peer_sta_kickout_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_peer_sta_kickout_arg arg = {};
+ struct ieee80211_sta *sta;
+ struct ath11k_peer *peer;
+ struct ath11k *ar;
+
+ if (ath11k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) {
+ ath11k_warn(ab, "failed to extract peer sta kickout event");
+ return;
+ }
+
+ rcu_read_lock();
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find_by_addr(ab, arg.mac_addr);
+
+ if (!peer) {
+ ath11k_warn(ab, "peer not found %pM\n",
+ arg.mac_addr);
+ goto exit;
+ }
+
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, peer->vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in peer sta kickout ev %d",
+ peer->vdev_id);
+ goto exit;
+ }
+
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw,
+ arg.mac_addr, NULL);
+ if (!sta) {
+ ath11k_warn(ab, "Spurious quick kickout for STA %pM\n",
+ arg.mac_addr);
+ goto exit;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "peer sta kickout event %pM",
+ arg.mac_addr);
+
+ ieee80211_report_low_ack(sta, 10);
+
+exit:
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+}
+
+static void ath11k_roam_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_roam_event roam_ev = {};
+ struct ath11k *ar;
+
+ if (ath11k_pull_roam_ev(ab, skb, &roam_ev) != 0) {
+ ath11k_warn(ab, "failed to extract roam event");
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi roam event vdev %u reason 0x%08x rssi %d\n",
+ roam_ev.vdev_id, roam_ev.reason, roam_ev.rssi);
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, roam_ev.vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in roam ev %d",
+ roam_ev.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ if (roam_ev.reason >= WMI_ROAM_REASON_MAX)
+ ath11k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n",
+ roam_ev.reason, roam_ev.vdev_id);
+
+ switch (roam_ev.reason) {
+ case WMI_ROAM_REASON_BEACON_MISS:
+ /* TODO: Pending beacon miss and connection_loss_work
+ * implementation
+ * ath11k_mac_handle_beacon_miss(ar, vdev_id);
+ */
+ break;
+ case WMI_ROAM_REASON_BETTER_AP:
+ case WMI_ROAM_REASON_LOW_RSSI:
+ case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
+ case WMI_ROAM_REASON_HO_FAILED:
+ ath11k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n",
+ roam_ev.reason, roam_ev.vdev_id);
+ break;
+ }
+
+ rcu_read_unlock();
+}
+
+static void ath11k_chan_info_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_chan_info_event ch_info_ev = {0};
+ struct ath11k *ar;
+ struct survey_info *survey;
+ int idx;
+ /* HW channel counters frequency value in hertz */
+ u32 cc_freq_hz = ab->cc_freq_hz;
+
+ if (ath11k_pull_chan_info_ev(ab, skb->data, skb->len, &ch_info_ev) != 0) {
+ ath11k_warn(ab, "failed to extract chan info event");
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n",
+ ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq,
+ ch_info_ev.cmd_flags, ch_info_ev.noise_floor,
+ ch_info_ev.rx_clear_count, ch_info_ev.cycle_count,
+ ch_info_ev.mac_clk_mhz);
+
+ if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_END_RESP) {
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "chan info report completed\n");
+ return;
+ }
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, ch_info_ev.vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in chan info ev %d",
+ ch_info_ev.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+ spin_lock_bh(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ case ATH11K_SCAN_STARTING:
+ ath11k_warn(ab, "received chan info event without a scan request, ignoring\n");
+ goto exit;
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ break;
+ }
+
+ idx = freq_to_idx(ar, ch_info_ev.freq);
+ if (idx >= ARRAY_SIZE(ar->survey)) {
+ ath11k_warn(ab, "chan info: invalid frequency %d (idx %d out of bounds)\n",
+ ch_info_ev.freq, idx);
+ goto exit;
+ }
+
+ /* If FW provides MAC clock frequency in Mhz, overriding the initialized
+ * HW channel counters frequency value
+ */
+ if (ch_info_ev.mac_clk_mhz)
+ cc_freq_hz = (ch_info_ev.mac_clk_mhz * 1000);
+
+ if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) {
+ survey = &ar->survey[idx];
+ memset(survey, 0, sizeof(*survey));
+ survey->noise = ch_info_ev.noise_floor;
+ survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY;
+ survey->time = div_u64(ch_info_ev.cycle_count, cc_freq_hz);
+ survey->time_busy = div_u64(ch_info_ev.rx_clear_count, cc_freq_hz);
+ }
+exit:
+ spin_unlock_bh(&ar->data_lock);
+ rcu_read_unlock();
+}
+
+static void
+ath11k_pdev_bss_chan_info_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_pdev_bss_chan_info_event bss_ch_info_ev = {};
+ struct survey_info *survey;
+ struct ath11k *ar;
+ u32 cc_freq_hz = ab->cc_freq_hz;
+ u64 busy, total, tx, rx, rx_bss;
+ int idx;
+
+ if (ath11k_pull_pdev_bss_chan_info_ev(ab, skb, &bss_ch_info_ev) != 0) {
+ ath11k_warn(ab, "failed to extract pdev bss chan info event");
+ return;
+ }
+
+ busy = (u64)(bss_ch_info_ev.rx_clear_count_high) << 32 |
+ bss_ch_info_ev.rx_clear_count_low;
+
+ total = (u64)(bss_ch_info_ev.cycle_count_high) << 32 |
+ bss_ch_info_ev.cycle_count_low;
+
+ tx = (u64)(bss_ch_info_ev.tx_cycle_count_high) << 32 |
+ bss_ch_info_ev.tx_cycle_count_low;
+
+ rx = (u64)(bss_ch_info_ev.rx_cycle_count_high) << 32 |
+ bss_ch_info_ev.rx_cycle_count_low;
+
+ rx_bss = (u64)(bss_ch_info_ev.rx_bss_cycle_count_high) << 32 |
+ bss_ch_info_ev.rx_bss_cycle_count_low;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
+ bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq,
+ bss_ch_info_ev.noise_floor, busy, total,
+ tx, rx, rx_bss);
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, bss_ch_info_ev.pdev_id);
+
+ if (!ar) {
+ ath11k_warn(ab, "invalid pdev id %d in bss_chan_info event\n",
+ bss_ch_info_ev.pdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ idx = freq_to_idx(ar, bss_ch_info_ev.freq);
+ if (idx >= ARRAY_SIZE(ar->survey)) {
+ ath11k_warn(ab, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
+ bss_ch_info_ev.freq, idx);
+ goto exit;
+ }
+
+ survey = &ar->survey[idx];
+
+ survey->noise = bss_ch_info_ev.noise_floor;
+ survey->time = div_u64(total, cc_freq_hz);
+ survey->time_busy = div_u64(busy, cc_freq_hz);
+ survey->time_rx = div_u64(rx_bss, cc_freq_hz);
+ survey->time_tx = div_u64(tx, cc_freq_hz);
+ survey->filled |= (SURVEY_INFO_NOISE_DBM |
+ SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_TX);
+exit:
+ spin_unlock_bh(&ar->data_lock);
+ complete(&ar->bss_survey_done);
+
+ rcu_read_unlock();
+}
+
+static void ath11k_vdev_install_key_compl_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct wmi_vdev_install_key_complete_arg install_key_compl = {0};
+ struct ath11k *ar;
+
+ if (ath11k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) {
+ ath11k_warn(ab, "failed to extract install key compl event");
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "vdev install key ev idx %d flags %08x macaddr %pM status %d\n",
+ install_key_compl.key_idx, install_key_compl.key_flags,
+ install_key_compl.macaddr, install_key_compl.status);
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, install_key_compl.vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in install key compl ev %d",
+ install_key_compl.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ ar->install_key_status = 0;
+
+ if (install_key_compl.status != WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) {
+ ath11k_warn(ab, "install key failed for %pM status %d\n",
+ install_key_compl.macaddr, install_key_compl.status);
+ ar->install_key_status = install_key_compl.status;
+ }
+
+ complete(&ar->install_key_done);
+ rcu_read_unlock();
+}
+
+static void ath11k_service_available_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_service_available_event *ev;
+ int ret;
+ int i, j;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_SERVICE_AVAILABLE_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch svc available ev");
+ kfree(tb);
+ return;
+ }
+
+ /* TODO: Use wmi_service_segment_offset information to get the service
+ * especially when more services are advertised in multiple sevice
+ * available events.
+ */
+ for (i = 0, j = WMI_MAX_SERVICE;
+ i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE;
+ i++) {
+ do {
+ if (ev->wmi_service_segment_bitmap[i] &
+ BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
+ set_bit(j, ab->wmi_ab.svc_map);
+ } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi_ext_service_bitmap 0:0x%x, 1:0x%x, 2:0x%x, 3:0x%x",
+ ev->wmi_service_segment_bitmap[0], ev->wmi_service_segment_bitmap[1],
+ ev->wmi_service_segment_bitmap[2], ev->wmi_service_segment_bitmap[3]);
+
+ kfree(tb);
+}
+
+static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_peer_assoc_conf_arg peer_assoc_conf = {0};
+ struct ath11k *ar;
+
+ if (ath11k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) {
+ ath11k_warn(ab, "failed to extract peer assoc conf event");
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "peer assoc conf ev vdev id %d macaddr %pM\n",
+ peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr);
+
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id);
+
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in peer assoc conf ev %d",
+ peer_assoc_conf.vdev_id);
+ return;
+ }
+
+ complete(&ar->peer_assoc_done);
+}
+
+static void ath11k_update_stats_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ ath11k_debug_fw_stats_process(ab, skb);
+}
+
+/* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
+ * is not part of BDF CTL(Conformance test limits) table entries.
+ */
+static void ath11k_pdev_ctl_failsafe_check_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_pdev_ctl_failsafe_chk_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch pdev ctl failsafe check ev");
+ kfree(tb);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "pdev ctl failsafe check ev status %d\n",
+ ev->ctl_failsafe_status);
+
+ /* If ctl_failsafe_status is set to 1 FW will max out the Transmit power
+ * to 10 dBm else the CTL power entry in the BDF would be picked up.
+ */
+ if (ev->ctl_failsafe_status != 0)
+ ath11k_warn(ab, "pdev ctl failsafe failure status %d",
+ ev->ctl_failsafe_status);
+
+ kfree(tb);
+}
+
+static void
+ath11k_wmi_process_csa_switch_count_event(struct ath11k_base *ab,
+ const struct wmi_pdev_csa_switch_ev *ev,
+ const u32 *vdev_ids)
+{
+ int i;
+ struct ath11k_vif *arvif;
+
+ /* Finish CSA once the switch count becomes NULL */
+ if (ev->current_switch_count)
+ return;
+
+ rcu_read_lock();
+ for (i = 0; i < ev->num_vdevs; i++) {
+ arvif = ath11k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]);
+
+ if (!arvif) {
+ ath11k_warn(ab, "Recvd csa status for unknown vdev %d",
+ vdev_ids[i]);
+ continue;
+ }
+
+ if (arvif->is_up && arvif->vif->csa_active)
+ ieee80211_csa_finish(arvif->vif);
+ }
+ rcu_read_unlock();
+}
+
+static void
+ath11k_wmi_pdev_csa_switch_count_status_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_pdev_csa_switch_ev *ev;
+ const u32 *vdev_ids;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT];
+ vdev_ids = tb[WMI_TAG_ARRAY_UINT32];
+
+ if (!ev || !vdev_ids) {
+ ath11k_warn(ab, "failed to fetch pdev csa switch count ev");
+ kfree(tb);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "pdev csa switch count %d for pdev %d, num_vdevs %d",
+ ev->current_switch_count, ev->pdev_id,
+ ev->num_vdevs);
+
+ ath11k_wmi_process_csa_switch_count_event(ab, ev, vdev_ids);
+
+ kfree(tb);
+}
+
+static void
+ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_pdev_radar_ev *ev;
+ struct ath11k *ar;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT];
+
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch pdev dfs radar detected ev");
+ kfree(tb);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d",
+ ev->pdev_id, ev->detection_mode, ev->chan_freq, ev->chan_width,
+ ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
+ ev->freq_offset, ev->sidx);
+
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
+
+ if (!ar) {
+ ath11k_warn(ab, "radar detected in invalid pdev %d\n",
+ ev->pdev_id);
+ goto exit;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG, "DFS Radar Detected in pdev %d\n",
+ ev->pdev_id);
+
+ if (ar->dfs_block_radar_events)
+ ath11k_info(ab, "DFS Radar detected, but ignored as requested\n");
+ else
+ ieee80211_radar_detected(ar->hw);
+
+exit:
+ kfree(tb);
+}
+
+static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_cmd_hdr *cmd_hdr;
+ enum wmi_tlv_event_id id;
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ id = FIELD_GET(WMI_CMD_HDR_CMD_ID, (cmd_hdr->cmd_id));
+
+ if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+ goto out;
+
+ switch (id) {
+ /* Process all the WMI events here */
+ case WMI_SERVICE_READY_EVENTID:
+ ath11k_service_ready_event(ab, skb);
+ break;
+ case WMI_SERVICE_READY_EXT_EVENTID:
+ ath11k_service_ready_ext_event(ab, skb);
+ break;
+ case WMI_REG_CHAN_LIST_CC_EVENTID:
+ ath11k_reg_chan_list_event(ab, skb);
+ break;
+ case WMI_READY_EVENTID:
+ ath11k_ready_event(ab, skb);
+ break;
+ case WMI_PEER_DELETE_RESP_EVENTID:
+ ath11k_peer_delete_resp_event(ab, skb);
+ break;
+ case WMI_VDEV_START_RESP_EVENTID:
+ ath11k_vdev_start_resp_event(ab, skb);
+ break;
+ case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID:
+ ath11k_bcn_tx_status_event(ab, skb);
+ break;
+ case WMI_VDEV_STOPPED_EVENTID:
+ ath11k_vdev_stopped_event(ab, skb);
+ break;
+ case WMI_MGMT_RX_EVENTID:
+ ath11k_mgmt_rx_event(ab, skb);
+ /* mgmt_rx_event() owns the skb now! */
+ return;
+ case WMI_MGMT_TX_COMPLETION_EVENTID:
+ ath11k_mgmt_tx_compl_event(ab, skb);
+ break;
+ case WMI_SCAN_EVENTID:
+ ath11k_scan_event(ab, skb);
+ break;
+ case WMI_PEER_STA_KICKOUT_EVENTID:
+ ath11k_peer_sta_kickout_event(ab, skb);
+ break;
+ case WMI_ROAM_EVENTID:
+ ath11k_roam_event(ab, skb);
+ break;
+ case WMI_CHAN_INFO_EVENTID:
+ ath11k_chan_info_event(ab, skb);
+ break;
+ case WMI_PDEV_BSS_CHAN_INFO_EVENTID:
+ ath11k_pdev_bss_chan_info_event(ab, skb);
+ break;
+ case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
+ ath11k_vdev_install_key_compl_event(ab, skb);
+ break;
+ case WMI_SERVICE_AVAILABLE_EVENTID:
+ ath11k_service_available_event(ab, skb);
+ break;
+ case WMI_PEER_ASSOC_CONF_EVENTID:
+ ath11k_peer_assoc_conf_event(ab, skb);
+ break;
+ case WMI_UPDATE_STATS_EVENTID:
+ ath11k_update_stats_event(ab, skb);
+ break;
+ case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID:
+ ath11k_pdev_ctl_failsafe_check_event(ab, skb);
+ break;
+ case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID:
+ ath11k_wmi_pdev_csa_switch_count_status_event(ab, skb);
+ break;
+ /* add Unsupported events here */
+ case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
+ case WMI_VDEV_DELETE_RESP_EVENTID:
+ case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
+ case WMI_TWT_ENABLE_EVENTID:
+ case WMI_TWT_DISABLE_EVENTID:
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "ignoring unsupported event 0x%x\n", id);
+ break;
+ case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID:
+ ath11k_wmi_pdev_dfs_radar_detected_event(ab, skb);
+ break;
+ /* TODO: Add remaining events */
+ default:
+ ath11k_warn(ab, "Unknown eventid: 0x%x\n", id);
+ break;
+ }
+
+out:
+ dev_kfree_skb(skb);
+}
+
+static int ath11k_connect_pdev_htc_service(struct ath11k_base *ab,
+ u32 pdev_idx)
+{
+ int status;
+ u32 svc_id[] = { ATH11K_HTC_SVC_ID_WMI_CONTROL,
+ ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1,
+ ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2 };
+
+ struct ath11k_htc_svc_conn_req conn_req;
+ struct ath11k_htc_svc_conn_resp conn_resp;
+
+ memset(&conn_req, 0, sizeof(conn_req));
+ memset(&conn_resp, 0, sizeof(conn_resp));
+
+ /* these fields are the same for all service endpoints */
+ conn_req.ep_ops.ep_tx_complete = ath11k_wmi_htc_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath11k_wmi_tlv_op_rx;
+ conn_req.ep_ops.ep_tx_credits = ath11k_wmi_op_ep_tx_credits;
+
+ /* connect to control service */
+ conn_req.service_id = svc_id[pdev_idx];
+
+ status = ath11k_htc_connect_service(&ab->htc, &conn_req, &conn_resp);
+ if (status) {
+ ath11k_warn(ab, "failed to connect to WMI CONTROL service status: %d\n",
+ status);
+ return status;
+ }
+
+ ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid;
+ ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid;
+ ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len;
+
+ return 0;
+}
+
+static int
+ath11k_wmi_send_unit_test_cmd(struct ath11k *ar,
+ struct wmi_unit_test_cmd ut_cmd,
+ u32 *test_args)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_unit_test_cmd *cmd;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ u32 *ut_cmd_args;
+ int buf_len, arg_len;
+ int ret;
+ int i;
+
+ arg_len = sizeof(u32) * ut_cmd.num_args;
+ buf_len = sizeof(ut_cmd) + arg_len + TLV_HDR_SIZE;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_unit_test_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_UNIT_TEST_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(ut_cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = ut_cmd.vdev_id;
+ cmd->module_id = ut_cmd.module_id;
+ cmd->num_args = ut_cmd.num_args;
+ cmd->diag_token = ut_cmd.diag_token;
+
+ ptr = skb->data + sizeof(ut_cmd);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
+ FIELD_PREP(WMI_TLV_LEN, arg_len);
+
+ ptr += TLV_HDR_SIZE;
+
+ ut_cmd_args = ptr;
+ for (i = 0; i < ut_cmd.num_args; i++)
+ ut_cmd_args[i] = test_args[i];
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_UNIT_TEST_CMDID);
+
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_UNIT_TEST CMD :%d\n",
+ ret);
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI unit test : module %d vdev %d n_args %d token %d\n",
+ cmd->module_id, cmd->vdev_id, cmd->num_args,
+ cmd->diag_token);
+
+ return ret;
+}
+
+int ath11k_wmi_simulate_radar(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+ u32 dfs_args[DFS_MAX_TEST_ARGS];
+ struct wmi_unit_test_cmd wmi_ut;
+ bool arvif_found = false;
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->is_started && arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ arvif_found = true;
+ break;
+ }
+ }
+
+ if (!arvif_found)
+ return -EINVAL;
+
+ dfs_args[DFS_TEST_CMDID] = 0;
+ dfs_args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id;
+ /* Currently we could pass segment_id(b0 - b1), chirp(b2)
+ * freq offset (b3 - b10) to unit test. For simulation
+ * purpose this can be set to 0 which is valid.
+ */
+ dfs_args[DFS_TEST_RADAR_PARAM] = 0;
+
+ wmi_ut.vdev_id = arvif->vdev_id;
+ wmi_ut.module_id = DFS_UNIT_TEST_MODULE;
+ wmi_ut.num_args = DFS_MAX_TEST_ARGS;
+ wmi_ut.diag_token = DFS_UNIT_TEST_TOKEN;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG, "Triggering Radar Simulation\n");
+
+ return ath11k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args);
+}
+
+int ath11k_wmi_connect(struct ath11k_base *ab)
+{
+ u32 i;
+ u8 wmi_ep_count;
+
+ wmi_ep_count = ab->htc.wmi_ep_count;
+ if (wmi_ep_count > MAX_RADIOS)
+ return -1;
+
+ for (i = 0; i < wmi_ep_count; i++)
+ ath11k_connect_pdev_htc_service(ab, i);
+
+ return 0;
+}
+
+static void ath11k_wmi_pdev_detach(struct ath11k_base *ab, u8 pdev_id)
+{
+ if (WARN_ON(pdev_id >= MAX_RADIOS))
+ return;
+
+ /* TODO: Deinit any pdev specific wmi resource */
+}
+
+int ath11k_wmi_pdev_attach(struct ath11k_base *ab,
+ u8 pdev_id)
+{
+ struct ath11k_pdev_wmi *wmi_handle;
+
+ if (pdev_id >= MAX_RADIOS)
+ return -EINVAL;
+
+ wmi_handle = &ab->wmi_ab.wmi[pdev_id];
+
+ wmi_handle->wmi_ab = &ab->wmi_ab;
+
+ ab->wmi_ab.ab = ab;
+ /* TODO: Init remaining resource specific to pdev */
+
+ return 0;
+}
+
+int ath11k_wmi_attach(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_wmi_pdev_attach(ab, 0);
+ if (ret)
+ return ret;
+
+ ab->wmi_ab.ab = ab;
+ ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
+
+ /* TODO: Init remaining wmi soc resources required */
+ init_completion(&ab->wmi_ab.service_ready);
+ init_completion(&ab->wmi_ab.unified_ready);
+
+ return 0;
+}
+
+void ath11k_wmi_detach(struct ath11k_base *ab)
+{
+ int i;
+
+ /* TODO: Deinit wmi resource specific to SOC as required */
+
+ for (i = 0; i < ab->htc.wmi_ep_count; i++)
+ ath11k_wmi_pdev_detach(ab, i);
+}
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
new file mode 100644
index 000000000000..1fde15c762ad
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -0,0 +1,4764 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_WMI_H
+#define ATH11K_WMI_H
+
+#include <net/mac80211.h>
+#include "htc.h"
+
+struct ath11k_base;
+struct ath11k;
+struct ath11k_fw_stats;
+
+#define PSOC_HOST_MAX_NUM_SS (8)
+
+/* defines to set Packet extension values whic can be 0 us, 8 usec or 16 usec */
+#define MAX_HE_NSS 8
+#define MAX_HE_MODULATION 8
+#define MAX_HE_RU 4
+#define HE_MODULATION_NONE 7
+#define HE_PET_0_USEC 0
+#define HE_PET_8_USEC 1
+#define HE_PET_16_USEC 2
+
+#define WMI_MAX_NUM_SS MAX_HE_NSS
+#define WMI_MAX_NUM_RU MAX_HE_RU
+
+#define WMI_TLV_CMD(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_TLV_EV(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_TLV_CMD_UNSUPPORTED 0
+#define WMI_TLV_PDEV_PARAM_UNSUPPORTED 0
+#define WMI_TLV_VDEV_PARAM_UNSUPPORTED 0
+
+struct wmi_cmd_hdr {
+ u32 cmd_id;
+} __packed;
+
+struct wmi_tlv {
+ u32 header;
+ u8 value[0];
+} __packed;
+
+#define WMI_TLV_LEN GENMASK(15, 0)
+#define WMI_TLV_TAG GENMASK(31, 16)
+#define TLV_HDR_SIZE sizeof_field(struct wmi_tlv, header)
+
+#define WMI_CMD_HDR_CMD_ID GENMASK(23, 0)
+#define WMI_MAX_MEM_REQS 32
+#define ATH11K_MAX_HW_LISTEN_INTERVAL 5
+
+#define WLAN_SCAN_PARAMS_MAX_SSID 16
+#define WLAN_SCAN_PARAMS_MAX_BSSID 4
+#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
+
+#define WMI_BA_MODE_BUFFER_SIZE_256 3
+/*
+ * HW mode config type replicated from FW header
+ * @WMI_HOST_HW_MODE_SINGLE: Only one PHY is active.
+ * @WMI_HOST_HW_MODE_DBS: Both PHYs are active in different bands,
+ * one in 2G and another in 5G.
+ * @WMI_HOST_HW_MODE_SBS_PASSIVE: Both PHYs are in passive mode (only rx) in
+ * same band; no tx allowed.
+ * @WMI_HOST_HW_MODE_SBS: Both PHYs are active in the same band.
+ * Support for both PHYs within one band is planned
+ * for 5G only(as indicated in WMI_MAC_PHY_CAPABILITIES),
+ * but could be extended to other bands in the future.
+ * The separation of the band between the two PHYs needs
+ * to be communicated separately.
+ * @WMI_HOST_HW_MODE_DBS_SBS: 3 PHYs, with 2 on the same band doing SBS
+ * as in WMI_HW_MODE_SBS, and 3rd on the other band
+ * @WMI_HOST_HW_MODE_DBS_OR_SBS: Two PHY with one PHY capabale of both 2G and
+ * 5G. It can support SBS (5G + 5G) OR DBS (5G + 2G).
+ * @WMI_HOST_HW_MODE_MAX: Max hw_mode_id. Used to indicate invalid mode.
+ */
+enum wmi_host_hw_mode_config_type {
+ WMI_HOST_HW_MODE_SINGLE = 0,
+ WMI_HOST_HW_MODE_DBS = 1,
+ WMI_HOST_HW_MODE_SBS_PASSIVE = 2,
+ WMI_HOST_HW_MODE_SBS = 3,
+ WMI_HOST_HW_MODE_DBS_SBS = 4,
+ WMI_HOST_HW_MODE_DBS_OR_SBS = 5,
+
+ /* keep last */
+ WMI_HOST_HW_MODE_MAX
+};
+
+/* HW mode priority values used to detect the preferred HW mode
+ * on the available modes.
+ */
+enum wmi_host_hw_mode_priority {
+ WMI_HOST_HW_MODE_DBS_SBS_PRI,
+ WMI_HOST_HW_MODE_DBS_PRI,
+ WMI_HOST_HW_MODE_DBS_OR_SBS_PRI,
+ WMI_HOST_HW_MODE_SBS_PRI,
+ WMI_HOST_HW_MODE_SBS_PASSIVE_PRI,
+ WMI_HOST_HW_MODE_SINGLE_PRI,
+
+ /* keep last the lowest priority */
+ WMI_HOST_HW_MODE_MAX_PRI
+};
+
+enum {
+ WMI_HOST_WLAN_2G_CAP = 0x1,
+ WMI_HOST_WLAN_5G_CAP = 0x2,
+ WMI_HOST_WLAN_2G_5G_CAP = 0x3,
+};
+
+/*
+ * wmi command groups.
+ */
+enum wmi_cmd_group {
+ /* 0 to 2 are reserved */
+ WMI_GRP_START = 0x3,
+ WMI_GRP_SCAN = WMI_GRP_START,
+ WMI_GRP_PDEV = 0x4,
+ WMI_GRP_VDEV = 0x5,
+ WMI_GRP_PEER = 0x6,
+ WMI_GRP_MGMT = 0x7,
+ WMI_GRP_BA_NEG = 0x8,
+ WMI_GRP_STA_PS = 0x9,
+ WMI_GRP_DFS = 0xa,
+ WMI_GRP_ROAM = 0xb,
+ WMI_GRP_OFL_SCAN = 0xc,
+ WMI_GRP_P2P = 0xd,
+ WMI_GRP_AP_PS = 0xe,
+ WMI_GRP_RATE_CTRL = 0xf,
+ WMI_GRP_PROFILE = 0x10,
+ WMI_GRP_SUSPEND = 0x11,
+ WMI_GRP_BCN_FILTER = 0x12,
+ WMI_GRP_WOW = 0x13,
+ WMI_GRP_RTT = 0x14,
+ WMI_GRP_SPECTRAL = 0x15,
+ WMI_GRP_STATS = 0x16,
+ WMI_GRP_ARP_NS_OFL = 0x17,
+ WMI_GRP_NLO_OFL = 0x18,
+ WMI_GRP_GTK_OFL = 0x19,
+ WMI_GRP_CSA_OFL = 0x1a,
+ WMI_GRP_CHATTER = 0x1b,
+ WMI_GRP_TID_ADDBA = 0x1c,
+ WMI_GRP_MISC = 0x1d,
+ WMI_GRP_GPIO = 0x1e,
+ WMI_GRP_FWTEST = 0x1f,
+ WMI_GRP_TDLS = 0x20,
+ WMI_GRP_RESMGR = 0x21,
+ WMI_GRP_STA_SMPS = 0x22,
+ WMI_GRP_WLAN_HB = 0x23,
+ WMI_GRP_RMC = 0x24,
+ WMI_GRP_MHF_OFL = 0x25,
+ WMI_GRP_LOCATION_SCAN = 0x26,
+ WMI_GRP_OEM = 0x27,
+ WMI_GRP_NAN = 0x28,
+ WMI_GRP_COEX = 0x29,
+ WMI_GRP_OBSS_OFL = 0x2a,
+ WMI_GRP_LPI = 0x2b,
+ WMI_GRP_EXTSCAN = 0x2c,
+ WMI_GRP_DHCP_OFL = 0x2d,
+ WMI_GRP_IPA = 0x2e,
+ WMI_GRP_MDNS_OFL = 0x2f,
+ WMI_GRP_SAP_OFL = 0x30,
+ WMI_GRP_OCB = 0x31,
+ WMI_GRP_SOC = 0x32,
+ WMI_GRP_PKT_FILTER = 0x33,
+ WMI_GRP_MAWC = 0x34,
+ WMI_GRP_PMF_OFFLOAD = 0x35,
+ WMI_GRP_BPF_OFFLOAD = 0x36,
+ WMI_GRP_NAN_DATA = 0x37,
+ WMI_GRP_PROTOTYPE = 0x38,
+ WMI_GRP_MONITOR = 0x39,
+ WMI_GRP_REGULATORY = 0x3a,
+ WMI_GRP_HW_DATA_FILTER = 0x3b,
+ WMI_GRP_WLM = 0x3c,
+ WMI_GRP_11K_OFFLOAD = 0x3d,
+ WMI_GRP_TWT = 0x3e,
+ WMI_GRP_MOTION_DET = 0x3f,
+ WMI_GRP_SPATIAL_REUSE = 0x40,
+};
+
+#define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_EVT_GRP_START_ID(grp_id) (((grp_id) << 12) | 0x1)
+
+#define WMI_CMD_UNSUPPORTED 0
+
+enum wmi_tlv_cmd_id {
+ WMI_INIT_CMDID = 0x1,
+ WMI_START_SCAN_CMDID = WMI_TLV_CMD(WMI_GRP_SCAN),
+ WMI_STOP_SCAN_CMDID,
+ WMI_SCAN_CHAN_LIST_CMDID,
+ WMI_SCAN_SCH_PRIO_TBL_CMDID,
+ WMI_SCAN_UPDATE_REQUEST_CMDID,
+ WMI_SCAN_PROB_REQ_OUI_CMDID,
+ WMI_SCAN_ADAPTIVE_DWELL_CONFIG_CMDID,
+ WMI_PDEV_SET_REGDOMAIN_CMDID = WMI_TLV_CMD(WMI_GRP_PDEV),
+ WMI_PDEV_SET_CHANNEL_CMDID,
+ WMI_PDEV_SET_PARAM_CMDID,
+ WMI_PDEV_PKTLOG_ENABLE_CMDID,
+ WMI_PDEV_PKTLOG_DISABLE_CMDID,
+ WMI_PDEV_SET_WMM_PARAMS_CMDID,
+ WMI_PDEV_SET_HT_CAP_IE_CMDID,
+ WMI_PDEV_SET_VHT_CAP_IE_CMDID,
+ WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_PDEV_SET_QUIET_MODE_CMDID,
+ WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ WMI_PDEV_GET_TPC_CONFIG_CMDID,
+ WMI_PDEV_SET_BASE_MACADDR_CMDID,
+ WMI_PDEV_DUMP_CMDID,
+ WMI_PDEV_SET_LED_CONFIG_CMDID,
+ WMI_PDEV_GET_TEMPERATURE_CMDID,
+ WMI_PDEV_SET_LED_FLASHING_CMDID,
+ WMI_PDEV_SMART_ANT_ENABLE_CMDID,
+ WMI_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
+ WMI_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
+ WMI_PDEV_SET_CTL_TABLE_CMDID,
+ WMI_PDEV_SET_MIMOGAIN_TABLE_CMDID,
+ WMI_PDEV_FIPS_CMDID,
+ WMI_PDEV_GET_ANI_CCK_CONFIG_CMDID,
+ WMI_PDEV_GET_ANI_OFDM_CONFIG_CMDID,
+ WMI_PDEV_GET_NFCAL_POWER_CMDID,
+ WMI_PDEV_GET_TPC_CMDID,
+ WMI_MIB_STATS_ENABLE_CMDID,
+ WMI_PDEV_SET_PCL_CMDID,
+ WMI_PDEV_SET_HW_MODE_CMDID,
+ WMI_PDEV_SET_MAC_CONFIG_CMDID,
+ WMI_PDEV_SET_ANTENNA_MODE_CMDID,
+ WMI_SET_PERIODIC_CHANNEL_STATS_CONFIG_CMDID,
+ WMI_PDEV_WAL_POWER_DEBUG_CMDID,
+ WMI_PDEV_SET_REORDER_TIMEOUT_VAL_CMDID,
+ WMI_PDEV_SET_WAKEUP_CONFIG_CMDID,
+ WMI_PDEV_GET_ANTDIV_STATUS_CMDID,
+ WMI_PDEV_GET_CHIP_POWER_STATS_CMDID,
+ WMI_PDEV_SET_STATS_THRESHOLD_CMDID,
+ WMI_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMDID,
+ WMI_PDEV_UPDATE_PKT_ROUTING_CMDID,
+ WMI_PDEV_CHECK_CAL_VERSION_CMDID,
+ WMI_PDEV_SET_DIVERSITY_GAIN_CMDID,
+ WMI_PDEV_DIV_GET_RSSI_ANTID_CMDID,
+ WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
+ WMI_PDEV_UPDATE_PMK_CACHE_CMDID,
+ WMI_PDEV_UPDATE_FILS_HLP_PKT_CMDID,
+ WMI_PDEV_UPDATE_CTLTABLE_REQUEST_CMDID,
+ WMI_PDEV_CONFIG_VENDOR_OUI_ACTION_CMDID,
+ WMI_PDEV_SET_AC_TX_QUEUE_OPTIMIZED_CMDID,
+ WMI_PDEV_SET_RX_FILTER_PROMISCUOUS_CMDID,
+ WMI_PDEV_DMA_RING_CFG_REQ_CMDID,
+ WMI_PDEV_HE_TB_ACTION_FRM_CMDID,
+ WMI_PDEV_PKTLOG_FILTER_CMDID,
+ WMI_VDEV_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_VDEV),
+ WMI_VDEV_DELETE_CMDID,
+ WMI_VDEV_START_REQUEST_CMDID,
+ WMI_VDEV_RESTART_REQUEST_CMDID,
+ WMI_VDEV_UP_CMDID,
+ WMI_VDEV_STOP_CMDID,
+ WMI_VDEV_DOWN_CMDID,
+ WMI_VDEV_SET_PARAM_CMDID,
+ WMI_VDEV_INSTALL_KEY_CMDID,
+ WMI_VDEV_WNM_SLEEPMODE_CMDID,
+ WMI_VDEV_WMM_ADDTS_CMDID,
+ WMI_VDEV_WMM_DELTS_CMDID,
+ WMI_VDEV_SET_WMM_PARAMS_CMDID,
+ WMI_VDEV_SET_GTX_PARAMS_CMDID,
+ WMI_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMDID,
+ WMI_VDEV_PLMREQ_START_CMDID,
+ WMI_VDEV_PLMREQ_STOP_CMDID,
+ WMI_VDEV_TSF_TSTAMP_ACTION_CMDID,
+ WMI_VDEV_SET_IE_CMDID,
+ WMI_VDEV_RATEMASK_CMDID,
+ WMI_VDEV_ATF_REQUEST_CMDID,
+ WMI_VDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID,
+ WMI_VDEV_SET_QUIET_MODE_CMDID,
+ WMI_VDEV_SET_CUSTOM_AGGR_SIZE_CMDID,
+ WMI_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMDID,
+ WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMDID,
+ WMI_PEER_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_PEER),
+ WMI_PEER_DELETE_CMDID,
+ WMI_PEER_FLUSH_TIDS_CMDID,
+ WMI_PEER_SET_PARAM_CMDID,
+ WMI_PEER_ASSOC_CMDID,
+ WMI_PEER_ADD_WDS_ENTRY_CMDID,
+ WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
+ WMI_PEER_MCAST_GROUP_CMDID,
+ WMI_PEER_INFO_REQ_CMDID,
+ WMI_PEER_GET_ESTIMATED_LINKSPEED_CMDID,
+ WMI_PEER_SET_RATE_REPORT_CONDITION_CMDID,
+ WMI_PEER_UPDATE_WDS_ENTRY_CMDID,
+ WMI_PEER_ADD_PROXY_STA_ENTRY_CMDID,
+ WMI_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
+ WMI_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
+ WMI_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
+ WMI_PEER_ATF_REQUEST_CMDID,
+ WMI_PEER_BWF_REQUEST_CMDID,
+ WMI_PEER_REORDER_QUEUE_SETUP_CMDID,
+ WMI_PEER_REORDER_QUEUE_REMOVE_CMDID,
+ WMI_PEER_SET_RX_BLOCKSIZE_CMDID,
+ WMI_PEER_ANTDIV_INFO_REQ_CMDID,
+ WMI_BCN_TX_CMDID = WMI_TLV_CMD(WMI_GRP_MGMT),
+ WMI_PDEV_SEND_BCN_CMDID,
+ WMI_BCN_TMPL_CMDID,
+ WMI_BCN_FILTER_RX_CMDID,
+ WMI_PRB_REQ_FILTER_RX_CMDID,
+ WMI_MGMT_TX_CMDID,
+ WMI_PRB_TMPL_CMDID,
+ WMI_MGMT_TX_SEND_CMDID,
+ WMI_OFFCHAN_DATA_TX_SEND_CMDID,
+ WMI_PDEV_SEND_FD_CMDID,
+ WMI_BCN_OFFLOAD_CTRL_CMDID,
+ WMI_BSS_COLOR_CHANGE_ENABLE_CMDID,
+ WMI_VDEV_BCN_OFFLOAD_QUIET_CONFIG_CMDID,
+ WMI_ADDBA_CLEAR_RESP_CMDID = WMI_TLV_CMD(WMI_GRP_BA_NEG),
+ WMI_ADDBA_SEND_CMDID,
+ WMI_ADDBA_STATUS_CMDID,
+ WMI_DELBA_SEND_CMDID,
+ WMI_ADDBA_SET_RESP_CMDID,
+ WMI_SEND_SINGLEAMSDU_CMDID,
+ WMI_STA_POWERSAVE_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_STA_PS),
+ WMI_STA_POWERSAVE_PARAM_CMDID,
+ WMI_STA_MIMO_PS_MODE_CMDID,
+ WMI_PDEV_DFS_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_DFS),
+ WMI_PDEV_DFS_DISABLE_CMDID,
+ WMI_DFS_PHYERR_FILTER_ENA_CMDID,
+ WMI_DFS_PHYERR_FILTER_DIS_CMDID,
+ WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID,
+ WMI_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMDID,
+ WMI_VDEV_ADFS_CH_CFG_CMDID,
+ WMI_VDEV_ADFS_OCAC_ABORT_CMDID,
+ WMI_ROAM_SCAN_MODE = WMI_TLV_CMD(WMI_GRP_ROAM),
+ WMI_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_ROAM_SCAN_PERIOD,
+ WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_ROAM_AP_PROFILE,
+ WMI_ROAM_CHAN_LIST,
+ WMI_ROAM_SCAN_CMD,
+ WMI_ROAM_SYNCH_COMPLETE,
+ WMI_ROAM_SET_RIC_REQUEST_CMDID,
+ WMI_ROAM_INVOKE_CMDID,
+ WMI_ROAM_FILTER_CMDID,
+ WMI_ROAM_SUBNET_CHANGE_CONFIG_CMDID,
+ WMI_ROAM_CONFIGURE_MAWC_CMDID,
+ WMI_ROAM_SET_MBO_PARAM_CMDID,
+ WMI_ROAM_PER_CONFIG_CMDID,
+ WMI_OFL_SCAN_ADD_AP_PROFILE = WMI_TLV_CMD(WMI_GRP_OFL_SCAN),
+ WMI_OFL_SCAN_REMOVE_AP_PROFILE,
+ WMI_OFL_SCAN_PERIOD,
+ WMI_P2P_DEV_SET_DEVICE_INFO = WMI_TLV_CMD(WMI_GRP_P2P),
+ WMI_P2P_DEV_SET_DISCOVERABILITY,
+ WMI_P2P_GO_SET_BEACON_IE,
+ WMI_P2P_GO_SET_PROBE_RESP_IE,
+ WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
+ WMI_P2P_DISC_OFFLOAD_CONFIG_CMDID,
+ WMI_P2P_DISC_OFFLOAD_APPIE_CMDID,
+ WMI_P2P_DISC_OFFLOAD_PATTERN_CMDID,
+ WMI_P2P_SET_OPPPS_PARAM_CMDID,
+ WMI_P2P_LISTEN_OFFLOAD_START_CMDID,
+ WMI_P2P_LISTEN_OFFLOAD_STOP_CMDID,
+ WMI_AP_PS_PEER_PARAM_CMDID = WMI_TLV_CMD(WMI_GRP_AP_PS),
+ WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
+ WMI_AP_PS_EGAP_PARAM_CMDID,
+ WMI_PEER_RATE_RETRY_SCHED_CMDID = WMI_TLV_CMD(WMI_GRP_RATE_CTRL),
+ WMI_WLAN_PROFILE_TRIGGER_CMDID = WMI_TLV_CMD(WMI_GRP_PROFILE),
+ WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ WMI_PDEV_SUSPEND_CMDID = WMI_TLV_CMD(WMI_GRP_SUSPEND),
+ WMI_PDEV_RESUME_CMDID,
+ WMI_ADD_BCN_FILTER_CMDID = WMI_TLV_CMD(WMI_GRP_BCN_FILTER),
+ WMI_RMV_BCN_FILTER_CMDID,
+ WMI_WOW_ADD_WAKE_PATTERN_CMDID = WMI_TLV_CMD(WMI_GRP_WOW),
+ WMI_WOW_DEL_WAKE_PATTERN_CMDID,
+ WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ WMI_WOW_ENABLE_CMDID,
+ WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ WMI_WOW_IOAC_ADD_KEEPALIVE_CMDID,
+ WMI_WOW_IOAC_DEL_KEEPALIVE_CMDID,
+ WMI_WOW_IOAC_ADD_WAKE_PATTERN_CMDID,
+ WMI_WOW_IOAC_DEL_WAKE_PATTERN_CMDID,
+ WMI_D0_WOW_ENABLE_DISABLE_CMDID,
+ WMI_EXTWOW_ENABLE_CMDID,
+ WMI_EXTWOW_SET_APP_TYPE1_PARAMS_CMDID,
+ WMI_EXTWOW_SET_APP_TYPE2_PARAMS_CMDID,
+ WMI_WOW_ENABLE_ICMPV6_NA_FLT_CMDID,
+ WMI_WOW_UDP_SVC_OFLD_CMDID,
+ WMI_WOW_HOSTWAKEUP_GPIO_PIN_PATTERN_CONFIG_CMDID,
+ WMI_WOW_SET_ACTION_WAKE_UP_CMDID,
+ WMI_RTT_MEASREQ_CMDID = WMI_TLV_CMD(WMI_GRP_RTT),
+ WMI_RTT_TSF_CMDID,
+ WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID = WMI_TLV_CMD(WMI_GRP_SPECTRAL),
+ WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ WMI_REQUEST_STATS_CMDID = WMI_TLV_CMD(WMI_GRP_STATS),
+ WMI_MCC_SCHED_TRAFFIC_STATS_CMDID,
+ WMI_REQUEST_STATS_EXT_CMDID,
+ WMI_REQUEST_LINK_STATS_CMDID,
+ WMI_START_LINK_STATS_CMDID,
+ WMI_CLEAR_LINK_STATS_CMDID,
+ WMI_GET_FW_MEM_DUMP_CMDID,
+ WMI_DEBUG_MESG_FLUSH_CMDID,
+ WMI_DIAG_EVENT_LOG_CONFIG_CMDID,
+ WMI_REQUEST_WLAN_STATS_CMDID,
+ WMI_REQUEST_RCPI_CMDID,
+ WMI_REQUEST_PEER_STATS_INFO_CMDID,
+ WMI_REQUEST_RADIO_CHAN_STATS_CMDID,
+ WMI_SET_ARP_NS_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_GRP_ARP_NS_OFL),
+ WMI_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID,
+ WMI_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID,
+ WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_NLO_OFL),
+ WMI_APFIND_CMDID,
+ WMI_PASSPOINT_LIST_CONFIG_CMDID,
+ WMI_NLO_CONFIGURE_MAWC_CMDID,
+ WMI_GTK_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_GRP_GTK_OFL),
+ WMI_CSA_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_CSA_OFL),
+ WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
+ WMI_CHATTER_SET_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_CHATTER),
+ WMI_CHATTER_ADD_COALESCING_FILTER_CMDID,
+ WMI_CHATTER_DELETE_COALESCING_FILTER_CMDID,
+ WMI_CHATTER_COALESCING_QUERY_CMDID,
+ WMI_PEER_TID_ADDBA_CMDID = WMI_TLV_CMD(WMI_GRP_TID_ADDBA),
+ WMI_PEER_TID_DELBA_CMDID,
+ WMI_STA_DTIM_PS_METHOD_CMDID,
+ WMI_STA_UAPSD_AUTO_TRIG_CMDID,
+ WMI_STA_KEEPALIVE_CMDID,
+ WMI_BA_REQ_SSN_CMDID,
+ WMI_ECHO_CMDID = WMI_TLV_CMD(WMI_GRP_MISC),
+ WMI_PDEV_UTF_CMDID,
+ WMI_DBGLOG_CFG_CMDID,
+ WMI_PDEV_QVIT_CMDID,
+ WMI_PDEV_FTM_INTG_CMDID,
+ WMI_VDEV_SET_KEEPALIVE_CMDID,
+ WMI_VDEV_GET_KEEPALIVE_CMDID,
+ WMI_FORCE_FW_HANG_CMDID,
+ WMI_SET_MCASTBCAST_FILTER_CMDID,
+ WMI_THERMAL_MGMT_CMDID,
+ WMI_HOST_AUTO_SHUTDOWN_CFG_CMDID,
+ WMI_TPC_CHAINMASK_CONFIG_CMDID,
+ WMI_SET_ANTENNA_DIVERSITY_CMDID,
+ WMI_OCB_SET_SCHED_CMDID,
+ WMI_RSSI_BREACH_MONITOR_CONFIG_CMDID,
+ WMI_LRO_CONFIG_CMDID,
+ WMI_TRANSFER_DATA_TO_FLASH_CMDID,
+ WMI_CONFIG_ENHANCED_MCAST_FILTER_CMDID,
+ WMI_VDEV_WISA_CMDID,
+ WMI_DBGLOG_TIME_STAMP_SYNC_CMDID,
+ WMI_SET_MULTIPLE_MCAST_FILTER_CMDID,
+ WMI_READ_DATA_FROM_FLASH_CMDID,
+ WMI_GPIO_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_GPIO),
+ WMI_GPIO_OUTPUT_CMDID,
+ WMI_TXBF_CMDID,
+ WMI_FWTEST_VDEV_MCC_SET_TBTT_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_FWTEST),
+ WMI_FWTEST_P2P_SET_NOA_PARAM_CMDID,
+ WMI_UNIT_TEST_CMDID,
+ WMI_FWTEST_CMDID,
+ WMI_QBOOST_CFG_CMDID,
+ WMI_TDLS_SET_STATE_CMDID = WMI_TLV_CMD(WMI_GRP_TDLS),
+ WMI_TDLS_PEER_UPDATE_CMDID,
+ WMI_TDLS_SET_OFFCHAN_MODE_CMDID,
+ WMI_RESMGR_ADAPTIVE_OCS_EN_DIS_CMDID = WMI_TLV_CMD(WMI_GRP_RESMGR),
+ WMI_RESMGR_SET_CHAN_TIME_QUOTA_CMDID,
+ WMI_RESMGR_SET_CHAN_LATENCY_CMDID,
+ WMI_STA_SMPS_FORCE_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_STA_SMPS),
+ WMI_STA_SMPS_PARAM_CMDID,
+ WMI_HB_SET_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_WLAN_HB),
+ WMI_HB_SET_TCP_PARAMS_CMDID,
+ WMI_HB_SET_TCP_PKT_FILTER_CMDID,
+ WMI_HB_SET_UDP_PARAMS_CMDID,
+ WMI_HB_SET_UDP_PKT_FILTER_CMDID,
+ WMI_RMC_SET_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_RMC),
+ WMI_RMC_SET_ACTION_PERIOD_CMDID,
+ WMI_RMC_CONFIG_CMDID,
+ WMI_RMC_SET_MANUAL_LEADER_CMDID,
+ WMI_MHF_OFFLOAD_SET_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_MHF_OFL),
+ WMI_MHF_OFFLOAD_PLUMB_ROUTING_TBL_CMDID,
+ WMI_BATCH_SCAN_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_LOCATION_SCAN),
+ WMI_BATCH_SCAN_DISABLE_CMDID,
+ WMI_BATCH_SCAN_TRIGGER_RESULT_CMDID,
+ WMI_OEM_REQ_CMDID = WMI_TLV_CMD(WMI_GRP_OEM),
+ WMI_OEM_REQUEST_CMDID,
+ WMI_LPI_OEM_REQ_CMDID,
+ WMI_NAN_CMDID = WMI_TLV_CMD(WMI_GRP_NAN),
+ WMI_MODEM_POWER_STATE_CMDID = WMI_TLV_CMD(WMI_GRP_COEX),
+ WMI_CHAN_AVOID_UPDATE_CMDID,
+ WMI_COEX_CONFIG_CMDID,
+ WMI_CHAN_AVOID_RPT_ALLOW_CMDID,
+ WMI_COEX_GET_ANTENNA_ISOLATION_CMDID,
+ WMI_SAR_LIMITS_CMDID,
+ WMI_OBSS_SCAN_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_OBSS_OFL),
+ WMI_OBSS_SCAN_DISABLE_CMDID,
+ WMI_LPI_MGMT_SNOOPING_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_LPI),
+ WMI_LPI_START_SCAN_CMDID,
+ WMI_LPI_STOP_SCAN_CMDID,
+ WMI_EXTSCAN_START_CMDID = WMI_TLV_CMD(WMI_GRP_EXTSCAN),
+ WMI_EXTSCAN_STOP_CMDID,
+ WMI_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMDID,
+ WMI_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMDID,
+ WMI_EXTSCAN_GET_CACHED_RESULTS_CMDID,
+ WMI_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMDID,
+ WMI_EXTSCAN_SET_CAPABILITIES_CMDID,
+ WMI_EXTSCAN_GET_CAPABILITIES_CMDID,
+ WMI_EXTSCAN_CONFIGURE_HOTLIST_SSID_MONITOR_CMDID,
+ WMI_EXTSCAN_CONFIGURE_MAWC_CMDID,
+ WMI_SET_DHCP_SERVER_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_GRP_DHCP_OFL),
+ WMI_IPA_OFFLOAD_ENABLE_DISABLE_CMDID = WMI_TLV_CMD(WMI_GRP_IPA),
+ WMI_MDNS_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_MDNS_OFL),
+ WMI_MDNS_SET_FQDN_CMDID,
+ WMI_MDNS_SET_RESPONSE_CMDID,
+ WMI_MDNS_GET_STATS_CMDID,
+ WMI_SAP_OFL_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_SAP_OFL),
+ WMI_SAP_SET_BLACKLIST_PARAM_CMDID,
+ WMI_OCB_SET_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_OCB),
+ WMI_OCB_SET_UTC_TIME_CMDID,
+ WMI_OCB_START_TIMING_ADVERT_CMDID,
+ WMI_OCB_STOP_TIMING_ADVERT_CMDID,
+ WMI_OCB_GET_TSF_TIMER_CMDID,
+ WMI_DCC_GET_STATS_CMDID,
+ WMI_DCC_CLEAR_STATS_CMDID,
+ WMI_DCC_UPDATE_NDL_CMDID,
+ WMI_SOC_SET_PCL_CMDID = WMI_TLV_CMD(WMI_GRP_SOC),
+ WMI_SOC_SET_HW_MODE_CMDID,
+ WMI_SOC_SET_DUAL_MAC_CONFIG_CMDID,
+ WMI_SOC_SET_ANTENNA_MODE_CMDID,
+ WMI_PACKET_FILTER_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_PKT_FILTER),
+ WMI_PACKET_FILTER_ENABLE_CMDID,
+ WMI_MAWC_SENSOR_REPORT_IND_CMDID = WMI_TLV_CMD(WMI_GRP_MAWC),
+ WMI_PMF_OFFLOAD_SET_SA_QUERY_CMDID = WMI_TLV_CMD(WMI_GRP_PMF_OFFLOAD),
+ WMI_BPF_GET_CAPABILITY_CMDID = WMI_TLV_CMD(WMI_GRP_BPF_OFFLOAD),
+ WMI_BPF_GET_VDEV_STATS_CMDID,
+ WMI_BPF_SET_VDEV_INSTRUCTIONS_CMDID,
+ WMI_BPF_DEL_VDEV_INSTRUCTIONS_CMDID,
+ WMI_BPF_SET_VDEV_ACTIVE_MODE_CMDID,
+ WMI_MNT_FILTER_CMDID = WMI_TLV_CMD(WMI_GRP_MONITOR),
+ WMI_SET_CURRENT_COUNTRY_CMDID = WMI_TLV_CMD(WMI_GRP_REGULATORY),
+ WMI_11D_SCAN_START_CMDID,
+ WMI_11D_SCAN_STOP_CMDID,
+ WMI_SET_INIT_COUNTRY_CMDID,
+ WMI_NDI_GET_CAP_REQ_CMDID = WMI_TLV_CMD(WMI_GRP_PROTOTYPE),
+ WMI_NDP_INITIATOR_REQ_CMDID,
+ WMI_NDP_RESPONDER_REQ_CMDID,
+ WMI_NDP_END_REQ_CMDID,
+ WMI_HW_DATA_FILTER_CMDID = WMI_TLV_CMD(WMI_GRP_HW_DATA_FILTER),
+ WMI_TWT_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_TWT),
+ WMI_TWT_DISABLE_CMDID,
+ WMI_TWT_ADD_DIALOG_CMDID,
+ WMI_TWT_DEL_DIALOG_CMDID,
+ WMI_TWT_PAUSE_DIALOG_CMDID,
+ WMI_TWT_RESUME_DIALOG_CMDID,
+ WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID =
+ WMI_TLV_CMD(WMI_GRP_SPATIAL_REUSE),
+ WMI_PDEV_OBSS_PD_SPATIAL_REUSE_SET_DEF_OBSS_THRESH_CMDID,
+};
+
+enum wmi_tlv_event_id {
+ WMI_SERVICE_READY_EVENTID = 0x1,
+ WMI_READY_EVENTID,
+ WMI_SERVICE_AVAILABLE_EVENTID,
+ WMI_SCAN_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_SCAN),
+ WMI_PDEV_TPC_CONFIG_EVENTID = WMI_TLV_CMD(WMI_GRP_PDEV),
+ WMI_CHAN_INFO_EVENTID,
+ WMI_PHYERR_EVENTID,
+ WMI_PDEV_DUMP_EVENTID,
+ WMI_TX_PAUSE_EVENTID,
+ WMI_DFS_RADAR_EVENTID,
+ WMI_PDEV_L1SS_TRACK_EVENTID,
+ WMI_PDEV_TEMPERATURE_EVENTID,
+ WMI_SERVICE_READY_EXT_EVENTID,
+ WMI_PDEV_FIPS_EVENTID,
+ WMI_PDEV_CHANNEL_HOPPING_EVENTID,
+ WMI_PDEV_ANI_CCK_LEVEL_EVENTID,
+ WMI_PDEV_ANI_OFDM_LEVEL_EVENTID,
+ WMI_PDEV_TPC_EVENTID,
+ WMI_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENTID,
+ WMI_PDEV_SET_HW_MODE_RESP_EVENTID,
+ WMI_PDEV_HW_MODE_TRANSITION_EVENTID,
+ WMI_PDEV_SET_MAC_CONFIG_RESP_EVENTID,
+ WMI_PDEV_ANTDIV_STATUS_EVENTID,
+ WMI_PDEV_CHIP_POWER_STATS_EVENTID,
+ WMI_PDEV_CHIP_POWER_SAVE_FAILURE_DETECTED_EVENTID,
+ WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID,
+ WMI_PDEV_CHECK_CAL_VERSION_EVENTID,
+ WMI_PDEV_DIV_RSSI_ANTID_EVENTID,
+ WMI_PDEV_BSS_CHAN_INFO_EVENTID,
+ WMI_PDEV_UPDATE_CTLTABLE_EVENTID,
+ WMI_PDEV_DMA_RING_CFG_RSP_EVENTID,
+ WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID,
+ WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID,
+ WMI_VDEV_START_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_VDEV),
+ WMI_VDEV_STOPPED_EVENTID,
+ WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID,
+ WMI_VDEV_MCC_BCN_INTERVAL_CHANGE_REQ_EVENTID,
+ WMI_VDEV_TSF_REPORT_EVENTID,
+ WMI_VDEV_DELETE_RESP_EVENTID,
+ WMI_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENTID,
+ WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_STATUS_EVENTID,
+ WMI_PEER_STA_KICKOUT_EVENTID = WMI_TLV_CMD(WMI_GRP_PEER),
+ WMI_PEER_INFO_EVENTID,
+ WMI_PEER_TX_FAIL_CNT_THR_EVENTID,
+ WMI_PEER_ESTIMATED_LINKSPEED_EVENTID,
+ WMI_PEER_STATE_EVENTID,
+ WMI_PEER_ASSOC_CONF_EVENTID,
+ WMI_PEER_DELETE_RESP_EVENTID,
+ WMI_PEER_RATECODE_LIST_EVENTID,
+ WMI_WDS_PEER_EVENTID,
+ WMI_PEER_STA_PS_STATECHG_EVENTID,
+ WMI_PEER_ANTDIV_INFO_EVENTID,
+ WMI_PEER_RESERVED0_EVENTID,
+ WMI_PEER_RESERVED1_EVENTID,
+ WMI_PEER_RESERVED2_EVENTID,
+ WMI_PEER_RESERVED3_EVENTID,
+ WMI_PEER_RESERVED4_EVENTID,
+ WMI_PEER_RESERVED5_EVENTID,
+ WMI_PEER_RESERVED6_EVENTID,
+ WMI_PEER_RESERVED7_EVENTID,
+ WMI_PEER_RESERVED8_EVENTID,
+ WMI_PEER_RESERVED9_EVENTID,
+ WMI_PEER_RESERVED10_EVENTID,
+ WMI_PEER_OPER_MODE_CHANGE_EVENTID,
+ WMI_MGMT_RX_EVENTID = WMI_TLV_CMD(WMI_GRP_MGMT),
+ WMI_HOST_SWBA_EVENTID,
+ WMI_TBTTOFFSET_UPDATE_EVENTID,
+ WMI_OFFLOAD_BCN_TX_STATUS_EVENTID,
+ WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID,
+ WMI_MGMT_TX_COMPLETION_EVENTID,
+ WMI_MGMT_TX_BUNDLE_COMPLETION_EVENTID,
+ WMI_TBTTOFFSET_EXT_UPDATE_EVENTID,
+ WMI_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_CMD(WMI_GRP_BA_NEG),
+ WMI_TX_ADDBA_COMPLETE_EVENTID,
+ WMI_BA_RSP_SSN_EVENTID,
+ WMI_AGGR_STATE_TRIG_EVENTID,
+ WMI_ROAM_EVENTID = WMI_TLV_CMD(WMI_GRP_ROAM),
+ WMI_PROFILE_MATCH,
+ WMI_ROAM_SYNCH_EVENTID,
+ WMI_P2P_DISC_EVENTID = WMI_TLV_CMD(WMI_GRP_P2P),
+ WMI_P2P_NOA_EVENTID,
+ WMI_P2P_LISTEN_OFFLOAD_STOPPED_EVENTID,
+ WMI_AP_PS_EGAP_INFO_EVENTID = WMI_TLV_CMD(WMI_GRP_AP_PS),
+ WMI_PDEV_RESUME_EVENTID = WMI_TLV_CMD(WMI_GRP_SUSPEND),
+ WMI_WOW_WAKEUP_HOST_EVENTID = WMI_TLV_CMD(WMI_GRP_WOW),
+ WMI_D0_WOW_DISABLE_ACK_EVENTID,
+ WMI_WOW_INITIAL_WAKEUP_EVENTID,
+ WMI_RTT_MEASUREMENT_REPORT_EVENTID = WMI_TLV_CMD(WMI_GRP_RTT),
+ WMI_TSF_MEASUREMENT_REPORT_EVENTID,
+ WMI_RTT_ERROR_REPORT_EVENTID,
+ WMI_STATS_EXT_EVENTID = WMI_TLV_CMD(WMI_GRP_STATS),
+ WMI_IFACE_LINK_STATS_EVENTID,
+ WMI_PEER_LINK_STATS_EVENTID,
+ WMI_RADIO_LINK_STATS_EVENTID,
+ WMI_UPDATE_FW_MEM_DUMP_EVENTID,
+ WMI_DIAG_EVENT_LOG_SUPPORTED_EVENTID,
+ WMI_INST_RSSI_STATS_EVENTID,
+ WMI_RADIO_TX_POWER_LEVEL_STATS_EVENTID,
+ WMI_REPORT_STATS_EVENTID,
+ WMI_UPDATE_RCPI_EVENTID,
+ WMI_PEER_STATS_INFO_EVENTID,
+ WMI_RADIO_CHAN_STATS_EVENTID,
+ WMI_NLO_MATCH_EVENTID = WMI_TLV_CMD(WMI_GRP_NLO_OFL),
+ WMI_NLO_SCAN_COMPLETE_EVENTID,
+ WMI_APFIND_EVENTID,
+ WMI_PASSPOINT_MATCH_EVENTID,
+ WMI_GTK_OFFLOAD_STATUS_EVENTID = WMI_TLV_CMD(WMI_GRP_GTK_OFL),
+ WMI_GTK_REKEY_FAIL_EVENTID,
+ WMI_CSA_HANDLING_EVENTID = WMI_TLV_CMD(WMI_GRP_CSA_OFL),
+ WMI_CHATTER_PC_QUERY_EVENTID = WMI_TLV_CMD(WMI_GRP_CHATTER),
+ WMI_PDEV_DFS_RADAR_DETECTION_EVENTID = WMI_TLV_CMD(WMI_GRP_DFS),
+ WMI_VDEV_DFS_CAC_COMPLETE_EVENTID,
+ WMI_VDEV_ADFS_OCAC_COMPLETE_EVENTID,
+ WMI_ECHO_EVENTID = WMI_TLV_CMD(WMI_GRP_MISC),
+ WMI_PDEV_UTF_EVENTID,
+ WMI_DEBUG_MESG_EVENTID,
+ WMI_UPDATE_STATS_EVENTID,
+ WMI_DEBUG_PRINT_EVENTID,
+ WMI_DCS_INTERFERENCE_EVENTID,
+ WMI_PDEV_QVIT_EVENTID,
+ WMI_WLAN_PROFILE_DATA_EVENTID,
+ WMI_PDEV_FTM_INTG_EVENTID,
+ WMI_WLAN_FREQ_AVOID_EVENTID,
+ WMI_VDEV_GET_KEEPALIVE_EVENTID,
+ WMI_THERMAL_MGMT_EVENTID,
+ WMI_DIAG_DATA_CONTAINER_EVENTID,
+ WMI_HOST_AUTO_SHUTDOWN_EVENTID,
+ WMI_UPDATE_WHAL_MIB_STATS_EVENTID,
+ WMI_UPDATE_VDEV_RATE_STATS_EVENTID,
+ WMI_DIAG_EVENTID,
+ WMI_OCB_SET_SCHED_EVENTID,
+ WMI_DEBUG_MESG_FLUSH_COMPLETE_EVENTID,
+ WMI_RSSI_BREACH_EVENTID,
+ WMI_TRANSFER_DATA_TO_FLASH_COMPLETE_EVENTID,
+ WMI_PDEV_UTF_SCPC_EVENTID,
+ WMI_READ_DATA_FROM_FLASH_EVENTID,
+ WMI_REPORT_RX_AGGR_FAILURE_EVENTID,
+ WMI_PKGID_EVENTID,
+ WMI_GPIO_INPUT_EVENTID = WMI_TLV_CMD(WMI_GRP_GPIO),
+ WMI_UPLOADH_EVENTID,
+ WMI_CAPTUREH_EVENTID,
+ WMI_RFKILL_STATE_CHANGE_EVENTID,
+ WMI_TDLS_PEER_EVENTID = WMI_TLV_CMD(WMI_GRP_TDLS),
+ WMI_STA_SMPS_FORCE_MODE_COMPL_EVENTID = WMI_TLV_CMD(WMI_GRP_STA_SMPS),
+ WMI_BATCH_SCAN_ENABLED_EVENTID = WMI_TLV_CMD(WMI_GRP_LOCATION_SCAN),
+ WMI_BATCH_SCAN_RESULT_EVENTID,
+ WMI_OEM_CAPABILITY_EVENTID = WMI_TLV_CMD(WMI_GRP_OEM),
+ WMI_OEM_MEASUREMENT_REPORT_EVENTID,
+ WMI_OEM_ERROR_REPORT_EVENTID,
+ WMI_OEM_RESPONSE_EVENTID,
+ WMI_NAN_EVENTID = WMI_TLV_CMD(WMI_GRP_NAN),
+ WMI_NAN_DISC_IFACE_CREATED_EVENTID,
+ WMI_NAN_DISC_IFACE_DELETED_EVENTID,
+ WMI_NAN_STARTED_CLUSTER_EVENTID,
+ WMI_NAN_JOINED_CLUSTER_EVENTID,
+ WMI_COEX_REPORT_ANTENNA_ISOLATION_EVENTID = WMI_TLV_CMD(WMI_GRP_COEX),
+ WMI_LPI_RESULT_EVENTID = WMI_TLV_CMD(WMI_GRP_LPI),
+ WMI_LPI_STATUS_EVENTID,
+ WMI_LPI_HANDOFF_EVENTID,
+ WMI_EXTSCAN_START_STOP_EVENTID = WMI_TLV_CMD(WMI_GRP_EXTSCAN),
+ WMI_EXTSCAN_OPERATION_EVENTID,
+ WMI_EXTSCAN_TABLE_USAGE_EVENTID,
+ WMI_EXTSCAN_CACHED_RESULTS_EVENTID,
+ WMI_EXTSCAN_WLAN_CHANGE_RESULTS_EVENTID,
+ WMI_EXTSCAN_HOTLIST_MATCH_EVENTID,
+ WMI_EXTSCAN_CAPABILITIES_EVENTID,
+ WMI_EXTSCAN_HOTLIST_SSID_MATCH_EVENTID,
+ WMI_MDNS_STATS_EVENTID = WMI_TLV_CMD(WMI_GRP_MDNS_OFL),
+ WMI_SAP_OFL_ADD_STA_EVENTID = WMI_TLV_CMD(WMI_GRP_SAP_OFL),
+ WMI_SAP_OFL_DEL_STA_EVENTID,
+ WMI_OCB_SET_CONFIG_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_OCB),
+ WMI_OCB_GET_TSF_TIMER_RESP_EVENTID,
+ WMI_DCC_GET_STATS_RESP_EVENTID,
+ WMI_DCC_UPDATE_NDL_RESP_EVENTID,
+ WMI_DCC_STATS_EVENTID,
+ WMI_SOC_SET_HW_MODE_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_SOC),
+ WMI_SOC_HW_MODE_TRANSITION_EVENTID,
+ WMI_SOC_SET_DUAL_MAC_CONFIG_RESP_EVENTID,
+ WMI_MAWC_ENABLE_SENSOR_EVENTID = WMI_TLV_CMD(WMI_GRP_MAWC),
+ WMI_BPF_CAPABILIY_INFO_EVENTID = WMI_TLV_CMD(WMI_GRP_BPF_OFFLOAD),
+ WMI_BPF_VDEV_STATS_INFO_EVENTID,
+ WMI_RMC_NEW_LEADER_EVENTID = WMI_TLV_CMD(WMI_GRP_RMC),
+ WMI_REG_CHAN_LIST_CC_EVENTID = WMI_TLV_CMD(WMI_GRP_REGULATORY),
+ WMI_11D_NEW_COUNTRY_EVENTID,
+ WMI_NDI_CAP_RSP_EVENTID = WMI_TLV_CMD(WMI_GRP_PROTOTYPE),
+ WMI_NDP_INITIATOR_RSP_EVENTID,
+ WMI_NDP_RESPONDER_RSP_EVENTID,
+ WMI_NDP_END_RSP_EVENTID,
+ WMI_NDP_INDICATION_EVENTID,
+ WMI_NDP_CONFIRM_EVENTID,
+ WMI_NDP_END_INDICATION_EVENTID,
+
+ WMI_TWT_ENABLE_EVENTID = WMI_TLV_CMD(WMI_GRP_TWT),
+ WMI_TWT_DISABLE_EVENTID,
+ WMI_TWT_ADD_DIALOG_EVENTID,
+ WMI_TWT_DEL_DIALOG_EVENTID,
+ WMI_TWT_PAUSE_DIALOG_EVENTID,
+ WMI_TWT_RESUME_DIALOG_EVENTID,
+};
+
+enum wmi_tlv_pdev_param {
+ WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+ WMI_PDEV_PARAM_RX_CHAIN_MASK,
+ WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
+ WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
+ WMI_PDEV_PARAM_TXPOWER_SCALE,
+ WMI_PDEV_PARAM_BEACON_GEN_MODE,
+ WMI_PDEV_PARAM_BEACON_TX_MODE,
+ WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ WMI_PDEV_PARAM_PROTECTION_MODE,
+ WMI_PDEV_PARAM_DYNAMIC_BW,
+ WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
+ WMI_PDEV_PARAM_STA_KICKOUT_TH,
+ WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ WMI_PDEV_PARAM_LTR_ENABLE,
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
+ WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ WMI_PDEV_PARAM_L1SS_ENABLE,
+ WMI_PDEV_PARAM_DSLEEP_ENABLE,
+ WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+ WMI_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+ WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+ WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ WMI_PDEV_PARAM_PMF_QOS,
+ WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
+ WMI_PDEV_PARAM_DCS,
+ WMI_PDEV_PARAM_ANI_ENABLE,
+ WMI_PDEV_PARAM_ANI_POLL_PERIOD,
+ WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
+ WMI_PDEV_PARAM_ANI_CCK_LEVEL,
+ WMI_PDEV_PARAM_DYNTXCHAIN,
+ WMI_PDEV_PARAM_PROXY_STA,
+ WMI_PDEV_PARAM_IDLE_PS_CONFIG,
+ WMI_PDEV_PARAM_POWER_GATING_SLEEP,
+ WMI_PDEV_PARAM_RFKILL_ENABLE,
+ WMI_PDEV_PARAM_BURST_DUR,
+ WMI_PDEV_PARAM_BURST_ENABLE,
+ WMI_PDEV_PARAM_HW_RFKILL_CONFIG,
+ WMI_PDEV_PARAM_LOW_POWER_RF_ENABLE,
+ WMI_PDEV_PARAM_L1SS_TRACK,
+ WMI_PDEV_PARAM_HYST_EN,
+ WMI_PDEV_PARAM_POWER_COLLAPSE_ENABLE,
+ WMI_PDEV_PARAM_LED_SYS_STATE,
+ WMI_PDEV_PARAM_LED_ENABLE,
+ WMI_PDEV_PARAM_AUDIO_OVER_WLAN_LATENCY,
+ WMI_PDEV_PARAM_AUDIO_OVER_WLAN_ENABLE,
+ WMI_PDEV_PARAM_WHAL_MIB_STATS_UPDATE_ENABLE,
+ WMI_PDEV_PARAM_VDEV_RATE_STATS_UPDATE_PERIOD,
+ WMI_PDEV_PARAM_CTS_CBW,
+ WMI_PDEV_PARAM_WNTS_CONFIG,
+ WMI_PDEV_PARAM_ADAPTIVE_EARLY_RX_ENABLE,
+ WMI_PDEV_PARAM_ADAPTIVE_EARLY_RX_MIN_SLEEP_SLOP,
+ WMI_PDEV_PARAM_ADAPTIVE_EARLY_RX_INC_DEC_STEP,
+ WMI_PDEV_PARAM_EARLY_RX_FIX_SLEEP_SLOP,
+ WMI_PDEV_PARAM_BMISS_BASED_ADAPTIVE_BTO_ENABLE,
+ WMI_PDEV_PARAM_BMISS_BTO_MIN_BCN_TIMEOUT,
+ WMI_PDEV_PARAM_BMISS_BTO_INC_DEC_STEP,
+ WMI_PDEV_PARAM_BTO_FIX_BCN_TIMEOUT,
+ WMI_PDEV_PARAM_CE_BASED_ADAPTIVE_BTO_ENABLE,
+ WMI_PDEV_PARAM_CE_BTO_COMBO_CE_VALUE,
+ WMI_PDEV_PARAM_TX_CHAIN_MASK_2G,
+ WMI_PDEV_PARAM_RX_CHAIN_MASK_2G,
+ WMI_PDEV_PARAM_TX_CHAIN_MASK_5G,
+ WMI_PDEV_PARAM_RX_CHAIN_MASK_5G,
+ WMI_PDEV_PARAM_TX_CHAIN_MASK_CCK,
+ WMI_PDEV_PARAM_TX_CHAIN_MASK_1SS,
+ WMI_PDEV_PARAM_CTS2SELF_FOR_P2P_GO_CONFIG,
+ WMI_PDEV_PARAM_TXPOWER_DECR_DB,
+ WMI_PDEV_PARAM_AGGR_BURST,
+ WMI_PDEV_PARAM_RX_DECAP_MODE,
+ WMI_PDEV_PARAM_FAST_CHANNEL_RESET,
+ WMI_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
+ WMI_PDEV_PARAM_ANTENNA_GAIN,
+ WMI_PDEV_PARAM_RX_FILTER,
+ WMI_PDEV_SET_MCAST_TO_UCAST_TID,
+ WMI_PDEV_PARAM_PROXY_STA_MODE,
+ WMI_PDEV_PARAM_SET_MCAST2UCAST_MODE,
+ WMI_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
+ WMI_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
+ WMI_PDEV_PEER_STA_PS_STATECHG_ENABLE,
+ WMI_PDEV_PARAM_IGMPMLD_AC_OVERRIDE,
+ WMI_PDEV_PARAM_BLOCK_INTERBSS,
+ WMI_PDEV_PARAM_SET_DISABLE_RESET_CMDID,
+ WMI_PDEV_PARAM_SET_MSDU_TTL_CMDID,
+ WMI_PDEV_PARAM_SET_PPDU_DURATION_CMDID,
+ WMI_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID,
+ WMI_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
+ WMI_PDEV_PARAM_SET_BURST_MODE_CMDID,
+ WMI_PDEV_PARAM_EN_STATS,
+ WMI_PDEV_PARAM_MU_GROUP_POLICY,
+ WMI_PDEV_PARAM_NOISE_DETECTION,
+ WMI_PDEV_PARAM_NOISE_THRESHOLD,
+ WMI_PDEV_PARAM_DPD_ENABLE,
+ WMI_PDEV_PARAM_SET_MCAST_BCAST_ECHO,
+ WMI_PDEV_PARAM_ATF_STRICT_SCH,
+ WMI_PDEV_PARAM_ATF_SCHED_DURATION,
+ WMI_PDEV_PARAM_ANT_PLZN,
+ WMI_PDEV_PARAM_MGMT_RETRY_LIMIT,
+ WMI_PDEV_PARAM_SENSITIVITY_LEVEL,
+ WMI_PDEV_PARAM_SIGNED_TXPOWER_2G,
+ WMI_PDEV_PARAM_SIGNED_TXPOWER_5G,
+ WMI_PDEV_PARAM_ENABLE_PER_TID_AMSDU,
+ WMI_PDEV_PARAM_ENABLE_PER_TID_AMPDU,
+ WMI_PDEV_PARAM_CCA_THRESHOLD,
+ WMI_PDEV_PARAM_RTS_FIXED_RATE,
+ WMI_PDEV_PARAM_PDEV_RESET,
+ WMI_PDEV_PARAM_WAPI_MBSSID_OFFSET,
+ WMI_PDEV_PARAM_ARP_DBG_SRCADDR,
+ WMI_PDEV_PARAM_ARP_DBG_DSTADDR,
+ WMI_PDEV_PARAM_ATF_OBSS_NOISE_SCH,
+ WMI_PDEV_PARAM_ATF_OBSS_NOISE_SCALING_FACTOR,
+ WMI_PDEV_PARAM_CUST_TXPOWER_SCALE,
+ WMI_PDEV_PARAM_ATF_DYNAMIC_ENABLE,
+ WMI_PDEV_PARAM_CTRL_RETRY_LIMIT,
+ WMI_PDEV_PARAM_PROPAGATION_DELAY,
+ WMI_PDEV_PARAM_ENA_ANT_DIV,
+ WMI_PDEV_PARAM_FORCE_CHAIN_ANT,
+ WMI_PDEV_PARAM_ANT_DIV_SELFTEST,
+ WMI_PDEV_PARAM_ANT_DIV_SELFTEST_INTVL,
+ WMI_PDEV_PARAM_STATS_OBSERVATION_PERIOD,
+ WMI_PDEV_PARAM_TX_PPDU_DELAY_BIN_SIZE_MS,
+ WMI_PDEV_PARAM_TX_PPDU_DELAY_ARRAY_LEN,
+ WMI_PDEV_PARAM_TX_MPDU_AGGR_ARRAY_LEN,
+ WMI_PDEV_PARAM_RX_MPDU_AGGR_ARRAY_LEN,
+ WMI_PDEV_PARAM_TX_SCH_DELAY,
+ WMI_PDEV_PARAM_ENABLE_RTS_SIFS_BURSTING,
+ WMI_PDEV_PARAM_MAX_MPDUS_IN_AMPDU,
+ WMI_PDEV_PARAM_PEER_STATS_INFO_ENABLE,
+ WMI_PDEV_PARAM_FAST_PWR_TRANSITION,
+ WMI_PDEV_PARAM_RADIO_CHAN_STATS_ENABLE,
+ WMI_PDEV_PARAM_RADIO_DIAGNOSIS_ENABLE,
+ WMI_PDEV_PARAM_MESH_MCAST_ENABLE,
+};
+
+enum wmi_tlv_vdev_param {
+ WMI_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+ WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ WMI_VDEV_PARAM_BEACON_INTERVAL,
+ WMI_VDEV_PARAM_LISTEN_INTERVAL,
+ WMI_VDEV_PARAM_MULTICAST_RATE,
+ WMI_VDEV_PARAM_MGMT_TX_RATE,
+ WMI_VDEV_PARAM_SLOT_TIME,
+ WMI_VDEV_PARAM_PREAMBLE,
+ WMI_VDEV_PARAM_SWBA_TIME,
+ WMI_VDEV_STATS_UPDATE_PERIOD,
+ WMI_VDEV_PWRSAVE_AGEOUT_TIME,
+ WMI_VDEV_HOST_SWBA_INTERVAL,
+ WMI_VDEV_PARAM_DTIM_PERIOD,
+ WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ WMI_VDEV_PARAM_WDS,
+ WMI_VDEV_PARAM_ATIM_WINDOW,
+ WMI_VDEV_PARAM_BMISS_COUNT_MAX,
+ WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
+ WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
+ WMI_VDEV_PARAM_FEATURE_WMM,
+ WMI_VDEV_PARAM_CHWIDTH,
+ WMI_VDEV_PARAM_CHEXTOFFSET,
+ WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
+ WMI_VDEV_PARAM_STA_QUICKKICKOUT,
+ WMI_VDEV_PARAM_MGMT_RATE,
+ WMI_VDEV_PARAM_PROTECTION_MODE,
+ WMI_VDEV_PARAM_FIXED_RATE,
+ WMI_VDEV_PARAM_SGI,
+ WMI_VDEV_PARAM_LDPC,
+ WMI_VDEV_PARAM_TX_STBC,
+ WMI_VDEV_PARAM_RX_STBC,
+ WMI_VDEV_PARAM_INTRA_BSS_FWD,
+ WMI_VDEV_PARAM_DEF_KEYID,
+ WMI_VDEV_PARAM_NSS,
+ WMI_VDEV_PARAM_BCAST_DATA_RATE,
+ WMI_VDEV_PARAM_MCAST_DATA_RATE,
+ WMI_VDEV_PARAM_MCAST_INDICATE,
+ WMI_VDEV_PARAM_DHCP_INDICATE,
+ WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
+ WMI_VDEV_PARAM_ENABLE_RTSCTS,
+ WMI_VDEV_PARAM_TXBF,
+ WMI_VDEV_PARAM_PACKET_POWERSAVE,
+ WMI_VDEV_PARAM_DROP_UNENCRY,
+ WMI_VDEV_PARAM_TX_ENCAP_TYPE,
+ WMI_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+ WMI_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
+ WMI_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
+ WMI_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
+ WMI_VDEV_PARAM_EARLY_RX_SLOP_STEP,
+ WMI_VDEV_PARAM_EARLY_RX_INIT_SLOP,
+ WMI_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
+ WMI_VDEV_PARAM_TX_PWRLIMIT,
+ WMI_VDEV_PARAM_SNR_NUM_FOR_CAL,
+ WMI_VDEV_PARAM_ROAM_FW_OFFLOAD,
+ WMI_VDEV_PARAM_ENABLE_RMC,
+ WMI_VDEV_PARAM_IBSS_MAX_BCN_LOST_MS,
+ WMI_VDEV_PARAM_MAX_RATE,
+ WMI_VDEV_PARAM_EARLY_RX_DRIFT_SAMPLE,
+ WMI_VDEV_PARAM_SET_IBSS_TX_FAIL_CNT_THR,
+ WMI_VDEV_PARAM_EBT_RESYNC_TIMEOUT,
+ WMI_VDEV_PARAM_AGGR_TRIG_EVENT_ENABLE,
+ WMI_VDEV_PARAM_IS_IBSS_POWER_SAVE_ALLOWED,
+ WMI_VDEV_PARAM_IS_POWER_COLLAPSE_ALLOWED,
+ WMI_VDEV_PARAM_IS_AWAKE_ON_TXRX_ENABLED,
+ WMI_VDEV_PARAM_INACTIVITY_CNT,
+ WMI_VDEV_PARAM_TXSP_END_INACTIVITY_TIME_MS,
+ WMI_VDEV_PARAM_DTIM_POLICY,
+ WMI_VDEV_PARAM_IBSS_PS_WARMUP_TIME_SECS,
+ WMI_VDEV_PARAM_IBSS_PS_1RX_CHAIN_IN_ATIM_WINDOW_ENABLE,
+ WMI_VDEV_PARAM_RX_LEAK_WINDOW,
+ WMI_VDEV_PARAM_STATS_AVG_FACTOR,
+ WMI_VDEV_PARAM_DISCONNECT_TH,
+ WMI_VDEV_PARAM_RTSCTS_RATE,
+ WMI_VDEV_PARAM_MCC_RTSCTS_PROTECTION_ENABLE,
+ WMI_VDEV_PARAM_MCC_BROADCAST_PROBE_ENABLE,
+ WMI_VDEV_PARAM_TXPOWER_SCALE,
+ WMI_VDEV_PARAM_TXPOWER_SCALE_DECR_DB,
+ WMI_VDEV_PARAM_MCAST2UCAST_SET,
+ WMI_VDEV_PARAM_RC_NUM_RETRIES,
+ WMI_VDEV_PARAM_CABQ_MAXDUR,
+ WMI_VDEV_PARAM_MFPTEST_SET,
+ WMI_VDEV_PARAM_RTS_FIXED_RATE,
+ WMI_VDEV_PARAM_VHT_SGIMASK,
+ WMI_VDEV_PARAM_VHT80_RATEMASK,
+ WMI_VDEV_PARAM_PROXY_STA,
+ WMI_VDEV_PARAM_VIRTUAL_CELL_MODE,
+ WMI_VDEV_PARAM_RX_DECAP_TYPE,
+ WMI_VDEV_PARAM_BW_NSS_RATEMASK,
+ WMI_VDEV_PARAM_SENSOR_AP,
+ WMI_VDEV_PARAM_BEACON_RATE,
+ WMI_VDEV_PARAM_DTIM_ENABLE_CTS,
+ WMI_VDEV_PARAM_STA_KICKOUT,
+ WMI_VDEV_PARAM_CAPABILITIES,
+ WMI_VDEV_PARAM_TSF_INCREMENT,
+ WMI_VDEV_PARAM_AMPDU_PER_AC,
+ WMI_VDEV_PARAM_RX_FILTER,
+ WMI_VDEV_PARAM_MGMT_TX_POWER,
+ WMI_VDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ WMI_VDEV_PARAM_AGG_SW_RETRY_TH,
+ WMI_VDEV_PARAM_DISABLE_DYN_BW_RTS,
+ WMI_VDEV_PARAM_ATF_SSID_SCHED_POLICY,
+ WMI_VDEV_PARAM_HE_DCM,
+ WMI_VDEV_PARAM_HE_RANGE_EXT,
+ WMI_VDEV_PARAM_ENABLE_BCAST_PROBE_RESPONSE,
+ WMI_VDEV_PARAM_FILS_MAX_CHANNEL_GUARD_TIME,
+ WMI_VDEV_PARAM_BA_MODE = 0x7e,
+ WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE = 0x87,
+ WMI_VDEV_PARAM_PROTOTYPE = 0x8000,
+ WMI_VDEV_PARAM_BSS_COLOR,
+ WMI_VDEV_PARAM_SET_HEMU_MODE,
+ WMI_VDEV_PARAM_TX_OFDMA_CPLEN,
+};
+
+enum wmi_tlv_peer_flags {
+ WMI_TLV_PEER_AUTH = 0x00000001,
+ WMI_TLV_PEER_QOS = 0x00000002,
+ WMI_TLV_PEER_NEED_PTK_4_WAY = 0x00000004,
+ WMI_TLV_PEER_NEED_GTK_2_WAY = 0x00000010,
+ WMI_TLV_PEER_APSD = 0x00000800,
+ WMI_TLV_PEER_HT = 0x00001000,
+ WMI_TLV_PEER_40MHZ = 0x00002000,
+ WMI_TLV_PEER_STBC = 0x00008000,
+ WMI_TLV_PEER_LDPC = 0x00010000,
+ WMI_TLV_PEER_DYN_MIMOPS = 0x00020000,
+ WMI_TLV_PEER_STATIC_MIMOPS = 0x00040000,
+ WMI_TLV_PEER_SPATIAL_MUX = 0x00200000,
+ WMI_TLV_PEER_VHT = 0x02000000,
+ WMI_TLV_PEER_80MHZ = 0x04000000,
+ WMI_TLV_PEER_PMF = 0x08000000,
+ WMI_PEER_IS_P2P_CAPABLE = 0x20000000,
+ WMI_PEER_160MHZ = 0x40000000,
+ WMI_PEER_SAFEMODE_EN = 0x80000000,
+
+};
+
+/** Enum list of TLV Tags for each parameter structure type. */
+enum wmi_tlv_tag {
+ WMI_TAG_LAST_RESERVED = 15,
+ WMI_TAG_FIRST_ARRAY_ENUM,
+ WMI_TAG_ARRAY_UINT32 = WMI_TAG_FIRST_ARRAY_ENUM,
+ WMI_TAG_ARRAY_BYTE,
+ WMI_TAG_ARRAY_STRUCT,
+ WMI_TAG_ARRAY_FIXED_STRUCT,
+ WMI_TAG_LAST_ARRAY_ENUM = 31,
+ WMI_TAG_SERVICE_READY_EVENT,
+ WMI_TAG_HAL_REG_CAPABILITIES,
+ WMI_TAG_WLAN_HOST_MEM_REQ,
+ WMI_TAG_READY_EVENT,
+ WMI_TAG_SCAN_EVENT,
+ WMI_TAG_PDEV_TPC_CONFIG_EVENT,
+ WMI_TAG_CHAN_INFO_EVENT,
+ WMI_TAG_COMB_PHYERR_RX_HDR,
+ WMI_TAG_VDEV_START_RESPONSE_EVENT,
+ WMI_TAG_VDEV_STOPPED_EVENT,
+ WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT,
+ WMI_TAG_PEER_STA_KICKOUT_EVENT,
+ WMI_TAG_MGMT_RX_HDR,
+ WMI_TAG_TBTT_OFFSET_EVENT,
+ WMI_TAG_TX_DELBA_COMPLETE_EVENT,
+ WMI_TAG_TX_ADDBA_COMPLETE_EVENT,
+ WMI_TAG_ROAM_EVENT,
+ WMI_TAG_WOW_EVENT_INFO,
+ WMI_TAG_WOW_EVENT_INFO_SECTION_BITMAP,
+ WMI_TAG_RTT_EVENT_HEADER,
+ WMI_TAG_RTT_ERROR_REPORT_EVENT,
+ WMI_TAG_RTT_MEAS_EVENT,
+ WMI_TAG_ECHO_EVENT,
+ WMI_TAG_FTM_INTG_EVENT,
+ WMI_TAG_VDEV_GET_KEEPALIVE_EVENT,
+ WMI_TAG_GPIO_INPUT_EVENT,
+ WMI_TAG_CSA_EVENT,
+ WMI_TAG_GTK_OFFLOAD_STATUS_EVENT,
+ WMI_TAG_IGTK_INFO,
+ WMI_TAG_DCS_INTERFERENCE_EVENT,
+ WMI_TAG_ATH_DCS_CW_INT,
+ WMI_TAG_WLAN_DCS_CW_INT = /* ALIAS */
+ WMI_TAG_ATH_DCS_CW_INT,
+ WMI_TAG_ATH_DCS_WLAN_INT_STAT,
+ WMI_TAG_WLAN_DCS_IM_TGT_STATS_T = /* ALIAS */
+ WMI_TAG_ATH_DCS_WLAN_INT_STAT,
+ WMI_TAG_WLAN_PROFILE_CTX_T,
+ WMI_TAG_WLAN_PROFILE_T,
+ WMI_TAG_PDEV_QVIT_EVENT,
+ WMI_TAG_HOST_SWBA_EVENT,
+ WMI_TAG_TIM_INFO,
+ WMI_TAG_P2P_NOA_INFO,
+ WMI_TAG_STATS_EVENT,
+ WMI_TAG_AVOID_FREQ_RANGES_EVENT,
+ WMI_TAG_AVOID_FREQ_RANGE_DESC,
+ WMI_TAG_GTK_REKEY_FAIL_EVENT,
+ WMI_TAG_INIT_CMD,
+ WMI_TAG_RESOURCE_CONFIG,
+ WMI_TAG_WLAN_HOST_MEMORY_CHUNK,
+ WMI_TAG_START_SCAN_CMD,
+ WMI_TAG_STOP_SCAN_CMD,
+ WMI_TAG_SCAN_CHAN_LIST_CMD,
+ WMI_TAG_CHANNEL,
+ WMI_TAG_PDEV_SET_REGDOMAIN_CMD,
+ WMI_TAG_PDEV_SET_PARAM_CMD,
+ WMI_TAG_PDEV_SET_WMM_PARAMS_CMD,
+ WMI_TAG_WMM_PARAMS,
+ WMI_TAG_PDEV_SET_QUIET_CMD,
+ WMI_TAG_VDEV_CREATE_CMD,
+ WMI_TAG_VDEV_DELETE_CMD,
+ WMI_TAG_VDEV_START_REQUEST_CMD,
+ WMI_TAG_P2P_NOA_DESCRIPTOR,
+ WMI_TAG_P2P_GO_SET_BEACON_IE,
+ WMI_TAG_GTK_OFFLOAD_CMD,
+ WMI_TAG_VDEV_UP_CMD,
+ WMI_TAG_VDEV_STOP_CMD,
+ WMI_TAG_VDEV_DOWN_CMD,
+ WMI_TAG_VDEV_SET_PARAM_CMD,
+ WMI_TAG_VDEV_INSTALL_KEY_CMD,
+ WMI_TAG_PEER_CREATE_CMD,
+ WMI_TAG_PEER_DELETE_CMD,
+ WMI_TAG_PEER_FLUSH_TIDS_CMD,
+ WMI_TAG_PEER_SET_PARAM_CMD,
+ WMI_TAG_PEER_ASSOC_COMPLETE_CMD,
+ WMI_TAG_VHT_RATE_SET,
+ WMI_TAG_BCN_TMPL_CMD,
+ WMI_TAG_PRB_TMPL_CMD,
+ WMI_TAG_BCN_PRB_INFO,
+ WMI_TAG_PEER_TID_ADDBA_CMD,
+ WMI_TAG_PEER_TID_DELBA_CMD,
+ WMI_TAG_STA_POWERSAVE_MODE_CMD,
+ WMI_TAG_STA_POWERSAVE_PARAM_CMD,
+ WMI_TAG_STA_DTIM_PS_METHOD_CMD,
+ WMI_TAG_ROAM_SCAN_MODE,
+ WMI_TAG_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_TAG_ROAM_SCAN_PERIOD,
+ WMI_TAG_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_TAG_PDEV_SUSPEND_CMD,
+ WMI_TAG_PDEV_RESUME_CMD,
+ WMI_TAG_ADD_BCN_FILTER_CMD,
+ WMI_TAG_RMV_BCN_FILTER_CMD,
+ WMI_TAG_WOW_ENABLE_CMD,
+ WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD,
+ WMI_TAG_STA_UAPSD_AUTO_TRIG_CMD,
+ WMI_TAG_STA_UAPSD_AUTO_TRIG_PARAM,
+ WMI_TAG_SET_ARP_NS_OFFLOAD_CMD,
+ WMI_TAG_ARP_OFFLOAD_TUPLE,
+ WMI_TAG_NS_OFFLOAD_TUPLE,
+ WMI_TAG_FTM_INTG_CMD,
+ WMI_TAG_STA_KEEPALIVE_CMD,
+ WMI_TAG_STA_KEEPALVE_ARP_RESPONSE,
+ WMI_TAG_P2P_SET_VENDOR_IE_DATA_CMD,
+ WMI_TAG_AP_PS_PEER_CMD,
+ WMI_TAG_PEER_RATE_RETRY_SCHED_CMD,
+ WMI_TAG_WLAN_PROFILE_TRIGGER_CMD,
+ WMI_TAG_WLAN_PROFILE_SET_HIST_INTVL_CMD,
+ WMI_TAG_WLAN_PROFILE_GET_PROF_DATA_CMD,
+ WMI_TAG_WLAN_PROFILE_ENABLE_PROFILE_ID_CMD,
+ WMI_TAG_WOW_DEL_PATTERN_CMD,
+ WMI_TAG_WOW_ADD_DEL_EVT_CMD,
+ WMI_TAG_RTT_MEASREQ_HEAD,
+ WMI_TAG_RTT_MEASREQ_BODY,
+ WMI_TAG_RTT_TSF_CMD,
+ WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD,
+ WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD,
+ WMI_TAG_REQUEST_STATS_CMD,
+ WMI_TAG_NLO_CONFIG_CMD,
+ WMI_TAG_NLO_CONFIGURED_PARAMETERS,
+ WMI_TAG_CSA_OFFLOAD_ENABLE_CMD,
+ WMI_TAG_CSA_OFFLOAD_CHANSWITCH_CMD,
+ WMI_TAG_CHATTER_SET_MODE_CMD,
+ WMI_TAG_ECHO_CMD,
+ WMI_TAG_VDEV_SET_KEEPALIVE_CMD,
+ WMI_TAG_VDEV_GET_KEEPALIVE_CMD,
+ WMI_TAG_FORCE_FW_HANG_CMD,
+ WMI_TAG_GPIO_CONFIG_CMD,
+ WMI_TAG_GPIO_OUTPUT_CMD,
+ WMI_TAG_PEER_ADD_WDS_ENTRY_CMD,
+ WMI_TAG_PEER_REMOVE_WDS_ENTRY_CMD,
+ WMI_TAG_BCN_TX_HDR,
+ WMI_TAG_BCN_SEND_FROM_HOST_CMD,
+ WMI_TAG_MGMT_TX_HDR,
+ WMI_TAG_ADDBA_CLEAR_RESP_CMD,
+ WMI_TAG_ADDBA_SEND_CMD,
+ WMI_TAG_DELBA_SEND_CMD,
+ WMI_TAG_ADDBA_SETRESPONSE_CMD,
+ WMI_TAG_SEND_SINGLEAMSDU_CMD,
+ WMI_TAG_PDEV_PKTLOG_ENABLE_CMD,
+ WMI_TAG_PDEV_PKTLOG_DISABLE_CMD,
+ WMI_TAG_PDEV_SET_HT_IE_CMD,
+ WMI_TAG_PDEV_SET_VHT_IE_CMD,
+ WMI_TAG_PDEV_SET_DSCP_TID_MAP_CMD,
+ WMI_TAG_PDEV_GREEN_AP_PS_ENABLE_CMD,
+ WMI_TAG_PDEV_GET_TPC_CONFIG_CMD,
+ WMI_TAG_PDEV_SET_BASE_MACADDR_CMD,
+ WMI_TAG_PEER_MCAST_GROUP_CMD,
+ WMI_TAG_ROAM_AP_PROFILE,
+ WMI_TAG_AP_PROFILE,
+ WMI_TAG_SCAN_SCH_PRIORITY_TABLE_CMD,
+ WMI_TAG_PDEV_DFS_ENABLE_CMD,
+ WMI_TAG_PDEV_DFS_DISABLE_CMD,
+ WMI_TAG_WOW_ADD_PATTERN_CMD,
+ WMI_TAG_WOW_BITMAP_PATTERN_T,
+ WMI_TAG_WOW_IPV4_SYNC_PATTERN_T,
+ WMI_TAG_WOW_IPV6_SYNC_PATTERN_T,
+ WMI_TAG_WOW_MAGIC_PATTERN_CMD,
+ WMI_TAG_SCAN_UPDATE_REQUEST_CMD,
+ WMI_TAG_CHATTER_PKT_COALESCING_FILTER,
+ WMI_TAG_CHATTER_COALESCING_ADD_FILTER_CMD,
+ WMI_TAG_CHATTER_COALESCING_DELETE_FILTER_CMD,
+ WMI_TAG_CHATTER_COALESCING_QUERY_CMD,
+ WMI_TAG_TXBF_CMD,
+ WMI_TAG_DEBUG_LOG_CONFIG_CMD,
+ WMI_TAG_NLO_EVENT,
+ WMI_TAG_CHATTER_QUERY_REPLY_EVENT,
+ WMI_TAG_UPLOAD_H_HDR,
+ WMI_TAG_CAPTURE_H_EVENT_HDR,
+ WMI_TAG_VDEV_WNM_SLEEPMODE_CMD,
+ WMI_TAG_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMD,
+ WMI_TAG_VDEV_WMM_ADDTS_CMD,
+ WMI_TAG_VDEV_WMM_DELTS_CMD,
+ WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
+ WMI_TAG_TDLS_SET_STATE_CMD,
+ WMI_TAG_TDLS_PEER_UPDATE_CMD,
+ WMI_TAG_TDLS_PEER_EVENT,
+ WMI_TAG_TDLS_PEER_CAPABILITIES,
+ WMI_TAG_VDEV_MCC_SET_TBTT_MODE_CMD,
+ WMI_TAG_ROAM_CHAN_LIST,
+ WMI_TAG_VDEV_MCC_BCN_INTVL_CHANGE_EVENT,
+ WMI_TAG_RESMGR_ADAPTIVE_OCS_ENABLE_DISABLE_CMD,
+ WMI_TAG_RESMGR_SET_CHAN_TIME_QUOTA_CMD,
+ WMI_TAG_RESMGR_SET_CHAN_LATENCY_CMD,
+ WMI_TAG_BA_REQ_SSN_CMD,
+ WMI_TAG_BA_RSP_SSN_EVENT,
+ WMI_TAG_STA_SMPS_FORCE_MODE_CMD,
+ WMI_TAG_SET_MCASTBCAST_FILTER_CMD,
+ WMI_TAG_P2P_SET_OPPPS_CMD,
+ WMI_TAG_P2P_SET_NOA_CMD,
+ WMI_TAG_BA_REQ_SSN_CMD_SUB_STRUCT_PARAM,
+ WMI_TAG_BA_REQ_SSN_EVENT_SUB_STRUCT_PARAM,
+ WMI_TAG_STA_SMPS_PARAM_CMD,
+ WMI_TAG_VDEV_SET_GTX_PARAMS_CMD,
+ WMI_TAG_MCC_SCHED_TRAFFIC_STATS_CMD,
+ WMI_TAG_MCC_SCHED_STA_TRAFFIC_STATS,
+ WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT,
+ WMI_TAG_P2P_NOA_EVENT,
+ WMI_TAG_HB_SET_ENABLE_CMD,
+ WMI_TAG_HB_SET_TCP_PARAMS_CMD,
+ WMI_TAG_HB_SET_TCP_PKT_FILTER_CMD,
+ WMI_TAG_HB_SET_UDP_PARAMS_CMD,
+ WMI_TAG_HB_SET_UDP_PKT_FILTER_CMD,
+ WMI_TAG_HB_IND_EVENT,
+ WMI_TAG_TX_PAUSE_EVENT,
+ WMI_TAG_RFKILL_EVENT,
+ WMI_TAG_DFS_RADAR_EVENT,
+ WMI_TAG_DFS_PHYERR_FILTER_ENA_CMD,
+ WMI_TAG_DFS_PHYERR_FILTER_DIS_CMD,
+ WMI_TAG_BATCH_SCAN_RESULT_SCAN_LIST,
+ WMI_TAG_BATCH_SCAN_RESULT_NETWORK_INFO,
+ WMI_TAG_BATCH_SCAN_ENABLE_CMD,
+ WMI_TAG_BATCH_SCAN_DISABLE_CMD,
+ WMI_TAG_BATCH_SCAN_TRIGGER_RESULT_CMD,
+ WMI_TAG_BATCH_SCAN_ENABLED_EVENT,
+ WMI_TAG_BATCH_SCAN_RESULT_EVENT,
+ WMI_TAG_VDEV_PLMREQ_START_CMD,
+ WMI_TAG_VDEV_PLMREQ_STOP_CMD,
+ WMI_TAG_THERMAL_MGMT_CMD,
+ WMI_TAG_THERMAL_MGMT_EVENT,
+ WMI_TAG_PEER_INFO_REQ_CMD,
+ WMI_TAG_PEER_INFO_EVENT,
+ WMI_TAG_PEER_INFO,
+ WMI_TAG_PEER_TX_FAIL_CNT_THR_EVENT,
+ WMI_TAG_RMC_SET_MODE_CMD,
+ WMI_TAG_RMC_SET_ACTION_PERIOD_CMD,
+ WMI_TAG_RMC_CONFIG_CMD,
+ WMI_TAG_MHF_OFFLOAD_SET_MODE_CMD,
+ WMI_TAG_MHF_OFFLOAD_PLUMB_ROUTING_TABLE_CMD,
+ WMI_TAG_ADD_PROACTIVE_ARP_RSP_PATTERN_CMD,
+ WMI_TAG_DEL_PROACTIVE_ARP_RSP_PATTERN_CMD,
+ WMI_TAG_NAN_CMD_PARAM,
+ WMI_TAG_NAN_EVENT_HDR,
+ WMI_TAG_PDEV_L1SS_TRACK_EVENT,
+ WMI_TAG_DIAG_DATA_CONTAINER_EVENT,
+ WMI_TAG_MODEM_POWER_STATE_CMD_PARAM,
+ WMI_TAG_PEER_GET_ESTIMATED_LINKSPEED_CMD,
+ WMI_TAG_PEER_ESTIMATED_LINKSPEED_EVENT,
+ WMI_TAG_AGGR_STATE_TRIG_EVENT,
+ WMI_TAG_MHF_OFFLOAD_ROUTING_TABLE_ENTRY,
+ WMI_TAG_ROAM_SCAN_CMD,
+ WMI_TAG_REQ_STATS_EXT_CMD,
+ WMI_TAG_STATS_EXT_EVENT,
+ WMI_TAG_OBSS_SCAN_ENABLE_CMD,
+ WMI_TAG_OBSS_SCAN_DISABLE_CMD,
+ WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT,
+ WMI_TAG_PDEV_SET_LED_CONFIG_CMD,
+ WMI_TAG_HOST_AUTO_SHUTDOWN_CFG_CMD,
+ WMI_TAG_HOST_AUTO_SHUTDOWN_EVENT,
+ WMI_TAG_UPDATE_WHAL_MIB_STATS_EVENT,
+ WMI_TAG_CHAN_AVOID_UPDATE_CMD_PARAM,
+ WMI_TAG_WOW_IOAC_PKT_PATTERN_T,
+ WMI_TAG_WOW_IOAC_TMR_PATTERN_T,
+ WMI_TAG_WOW_IOAC_ADD_KEEPALIVE_CMD,
+ WMI_TAG_WOW_IOAC_DEL_KEEPALIVE_CMD,
+ WMI_TAG_WOW_IOAC_KEEPALIVE_T,
+ WMI_TAG_WOW_IOAC_ADD_PATTERN_CMD,
+ WMI_TAG_WOW_IOAC_DEL_PATTERN_CMD,
+ WMI_TAG_START_LINK_STATS_CMD,
+ WMI_TAG_CLEAR_LINK_STATS_CMD,
+ WMI_TAG_REQUEST_LINK_STATS_CMD,
+ WMI_TAG_IFACE_LINK_STATS_EVENT,
+ WMI_TAG_RADIO_LINK_STATS_EVENT,
+ WMI_TAG_PEER_STATS_EVENT,
+ WMI_TAG_CHANNEL_STATS,
+ WMI_TAG_RADIO_LINK_STATS,
+ WMI_TAG_RATE_STATS,
+ WMI_TAG_PEER_LINK_STATS,
+ WMI_TAG_WMM_AC_STATS,
+ WMI_TAG_IFACE_LINK_STATS,
+ WMI_TAG_LPI_MGMT_SNOOPING_CONFIG_CMD,
+ WMI_TAG_LPI_START_SCAN_CMD,
+ WMI_TAG_LPI_STOP_SCAN_CMD,
+ WMI_TAG_LPI_RESULT_EVENT,
+ WMI_TAG_PEER_STATE_EVENT,
+ WMI_TAG_EXTSCAN_BUCKET_CMD,
+ WMI_TAG_EXTSCAN_BUCKET_CHANNEL_EVENT,
+ WMI_TAG_EXTSCAN_START_CMD,
+ WMI_TAG_EXTSCAN_STOP_CMD,
+ WMI_TAG_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMD,
+ WMI_TAG_EXTSCAN_WLAN_CHANGE_BSSID_PARAM_CMD,
+ WMI_TAG_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMD,
+ WMI_TAG_EXTSCAN_GET_CACHED_RESULTS_CMD,
+ WMI_TAG_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMD,
+ WMI_TAG_EXTSCAN_SET_CAPABILITIES_CMD,
+ WMI_TAG_EXTSCAN_GET_CAPABILITIES_CMD,
+ WMI_TAG_EXTSCAN_OPERATION_EVENT,
+ WMI_TAG_EXTSCAN_START_STOP_EVENT,
+ WMI_TAG_EXTSCAN_TABLE_USAGE_EVENT,
+ WMI_TAG_EXTSCAN_WLAN_DESCRIPTOR_EVENT,
+ WMI_TAG_EXTSCAN_RSSI_INFO_EVENT,
+ WMI_TAG_EXTSCAN_CACHED_RESULTS_EVENT,
+ WMI_TAG_EXTSCAN_WLAN_CHANGE_RESULTS_EVENT,
+ WMI_TAG_EXTSCAN_WLAN_CHANGE_RESULT_BSSID_EVENT,
+ WMI_TAG_EXTSCAN_HOTLIST_MATCH_EVENT,
+ WMI_TAG_EXTSCAN_CAPABILITIES_EVENT,
+ WMI_TAG_EXTSCAN_CACHE_CAPABILITIES_EVENT,
+ WMI_TAG_EXTSCAN_WLAN_CHANGE_MONITOR_CAPABILITIES_EVENT,
+ WMI_TAG_EXTSCAN_HOTLIST_MONITOR_CAPABILITIES_EVENT,
+ WMI_TAG_D0_WOW_ENABLE_DISABLE_CMD,
+ WMI_TAG_D0_WOW_DISABLE_ACK_EVENT,
+ WMI_TAG_UNIT_TEST_CMD,
+ WMI_TAG_ROAM_OFFLOAD_TLV_PARAM,
+ WMI_TAG_ROAM_11I_OFFLOAD_TLV_PARAM,
+ WMI_TAG_ROAM_11R_OFFLOAD_TLV_PARAM,
+ WMI_TAG_ROAM_ESE_OFFLOAD_TLV_PARAM,
+ WMI_TAG_ROAM_SYNCH_EVENT,
+ WMI_TAG_ROAM_SYNCH_COMPLETE,
+ WMI_TAG_EXTWOW_ENABLE_CMD,
+ WMI_TAG_EXTWOW_SET_APP_TYPE1_PARAMS_CMD,
+ WMI_TAG_EXTWOW_SET_APP_TYPE2_PARAMS_CMD,
+ WMI_TAG_LPI_STATUS_EVENT,
+ WMI_TAG_LPI_HANDOFF_EVENT,
+ WMI_TAG_VDEV_RATE_STATS_EVENT,
+ WMI_TAG_VDEV_RATE_HT_INFO,
+ WMI_TAG_RIC_REQUEST,
+ WMI_TAG_PDEV_GET_TEMPERATURE_CMD,
+ WMI_TAG_PDEV_TEMPERATURE_EVENT,
+ WMI_TAG_SET_DHCP_SERVER_OFFLOAD_CMD,
+ WMI_TAG_TPC_CHAINMASK_CONFIG_CMD,
+ WMI_TAG_RIC_TSPEC,
+ WMI_TAG_TPC_CHAINMASK_CONFIG,
+ WMI_TAG_IPA_OFFLOAD_ENABLE_DISABLE_CMD,
+ WMI_TAG_SCAN_PROB_REQ_OUI_CMD,
+ WMI_TAG_KEY_MATERIAL,
+ WMI_TAG_TDLS_SET_OFFCHAN_MODE_CMD,
+ WMI_TAG_SET_LED_FLASHING_CMD,
+ WMI_TAG_MDNS_OFFLOAD_CMD,
+ WMI_TAG_MDNS_SET_FQDN_CMD,
+ WMI_TAG_MDNS_SET_RESP_CMD,
+ WMI_TAG_MDNS_GET_STATS_CMD,
+ WMI_TAG_MDNS_STATS_EVENT,
+ WMI_TAG_ROAM_INVOKE_CMD,
+ WMI_TAG_PDEV_RESUME_EVENT,
+ WMI_TAG_PDEV_SET_ANTENNA_DIVERSITY_CMD,
+ WMI_TAG_SAP_OFL_ENABLE_CMD,
+ WMI_TAG_SAP_OFL_ADD_STA_EVENT,
+ WMI_TAG_SAP_OFL_DEL_STA_EVENT,
+ WMI_TAG_APFIND_CMD_PARAM,
+ WMI_TAG_APFIND_EVENT_HDR,
+ WMI_TAG_OCB_SET_SCHED_CMD,
+ WMI_TAG_OCB_SET_SCHED_EVENT,
+ WMI_TAG_OCB_SET_CONFIG_CMD,
+ WMI_TAG_OCB_SET_CONFIG_RESP_EVENT,
+ WMI_TAG_OCB_SET_UTC_TIME_CMD,
+ WMI_TAG_OCB_START_TIMING_ADVERT_CMD,
+ WMI_TAG_OCB_STOP_TIMING_ADVERT_CMD,
+ WMI_TAG_OCB_GET_TSF_TIMER_CMD,
+ WMI_TAG_OCB_GET_TSF_TIMER_RESP_EVENT,
+ WMI_TAG_DCC_GET_STATS_CMD,
+ WMI_TAG_DCC_CHANNEL_STATS_REQUEST,
+ WMI_TAG_DCC_GET_STATS_RESP_EVENT,
+ WMI_TAG_DCC_CLEAR_STATS_CMD,
+ WMI_TAG_DCC_UPDATE_NDL_CMD,
+ WMI_TAG_DCC_UPDATE_NDL_RESP_EVENT,
+ WMI_TAG_DCC_STATS_EVENT,
+ WMI_TAG_OCB_CHANNEL,
+ WMI_TAG_OCB_SCHEDULE_ELEMENT,
+ WMI_TAG_DCC_NDL_STATS_PER_CHANNEL,
+ WMI_TAG_DCC_NDL_CHAN,
+ WMI_TAG_QOS_PARAMETER,
+ WMI_TAG_DCC_NDL_ACTIVE_STATE_CONFIG,
+ WMI_TAG_ROAM_SCAN_EXTENDED_THRESHOLD_PARAM,
+ WMI_TAG_ROAM_FILTER,
+ WMI_TAG_PASSPOINT_CONFIG_CMD,
+ WMI_TAG_PASSPOINT_EVENT_HDR,
+ WMI_TAG_EXTSCAN_CONFIGURE_HOTLIST_SSID_MONITOR_CMD,
+ WMI_TAG_EXTSCAN_HOTLIST_SSID_MATCH_EVENT,
+ WMI_TAG_VDEV_TSF_TSTAMP_ACTION_CMD,
+ WMI_TAG_VDEV_TSF_REPORT_EVENT,
+ WMI_TAG_GET_FW_MEM_DUMP,
+ WMI_TAG_UPDATE_FW_MEM_DUMP,
+ WMI_TAG_FW_MEM_DUMP_PARAMS,
+ WMI_TAG_DEBUG_MESG_FLUSH,
+ WMI_TAG_DEBUG_MESG_FLUSH_COMPLETE,
+ WMI_TAG_PEER_SET_RATE_REPORT_CONDITION,
+ WMI_TAG_ROAM_SUBNET_CHANGE_CONFIG,
+ WMI_TAG_VDEV_SET_IE_CMD,
+ WMI_TAG_RSSI_BREACH_MONITOR_CONFIG,
+ WMI_TAG_RSSI_BREACH_EVENT,
+ WMI_TAG_WOW_EVENT_INITIAL_WAKEUP,
+ WMI_TAG_SOC_SET_PCL_CMD,
+ WMI_TAG_SOC_SET_HW_MODE_CMD,
+ WMI_TAG_SOC_SET_HW_MODE_RESPONSE_EVENT,
+ WMI_TAG_SOC_HW_MODE_TRANSITION_EVENT,
+ WMI_TAG_VDEV_TXRX_STREAMS,
+ WMI_TAG_SOC_SET_HW_MODE_RESPONSE_VDEV_MAC_ENTRY,
+ WMI_TAG_SOC_SET_DUAL_MAC_CONFIG_CMD,
+ WMI_TAG_SOC_SET_DUAL_MAC_CONFIG_RESPONSE_EVENT,
+ WMI_TAG_WOW_IOAC_SOCK_PATTERN_T,
+ WMI_TAG_WOW_ENABLE_ICMPV6_NA_FLT_CMD,
+ WMI_TAG_DIAG_EVENT_LOG_CONFIG,
+ WMI_TAG_DIAG_EVENT_LOG_SUPPORTED_EVENT_FIXED_PARAMS,
+ WMI_TAG_PACKET_FILTER_CONFIG,
+ WMI_TAG_PACKET_FILTER_ENABLE,
+ WMI_TAG_SAP_SET_BLACKLIST_PARAM_CMD,
+ WMI_TAG_MGMT_TX_SEND_CMD,
+ WMI_TAG_MGMT_TX_COMPL_EVENT,
+ WMI_TAG_SOC_SET_ANTENNA_MODE_CMD,
+ WMI_TAG_WOW_UDP_SVC_OFLD_CMD,
+ WMI_TAG_LRO_INFO_CMD,
+ WMI_TAG_ROAM_EARLYSTOP_RSSI_THRES_PARAM,
+ WMI_TAG_SERVICE_READY_EXT_EVENT,
+ WMI_TAG_MAWC_SENSOR_REPORT_IND_CMD,
+ WMI_TAG_MAWC_ENABLE_SENSOR_EVENT,
+ WMI_TAG_ROAM_CONFIGURE_MAWC_CMD,
+ WMI_TAG_NLO_CONFIGURE_MAWC_CMD,
+ WMI_TAG_EXTSCAN_CONFIGURE_MAWC_CMD,
+ WMI_TAG_PEER_ASSOC_CONF_EVENT,
+ WMI_TAG_WOW_HOSTWAKEUP_GPIO_PIN_PATTERN_CONFIG_CMD,
+ WMI_TAG_AP_PS_EGAP_PARAM_CMD,
+ WMI_TAG_AP_PS_EGAP_INFO_EVENT,
+ WMI_TAG_PMF_OFFLOAD_SET_SA_QUERY_CMD,
+ WMI_TAG_TRANSFER_DATA_TO_FLASH_CMD,
+ WMI_TAG_TRANSFER_DATA_TO_FLASH_COMPLETE_EVENT,
+ WMI_TAG_SCPC_EVENT,
+ WMI_TAG_AP_PS_EGAP_INFO_CHAINMASK_LIST,
+ WMI_TAG_STA_SMPS_FORCE_MODE_COMPLETE_EVENT,
+ WMI_TAG_BPF_GET_CAPABILITY_CMD,
+ WMI_TAG_BPF_CAPABILITY_INFO_EVT,
+ WMI_TAG_BPF_GET_VDEV_STATS_CMD,
+ WMI_TAG_BPF_VDEV_STATS_INFO_EVT,
+ WMI_TAG_BPF_SET_VDEV_INSTRUCTIONS_CMD,
+ WMI_TAG_BPF_DEL_VDEV_INSTRUCTIONS_CMD,
+ WMI_TAG_VDEV_DELETE_RESP_EVENT,
+ WMI_TAG_PEER_DELETE_RESP_EVENT,
+ WMI_TAG_ROAM_DENSE_THRES_PARAM,
+ WMI_TAG_ENLO_CANDIDATE_SCORE_PARAM,
+ WMI_TAG_PEER_UPDATE_WDS_ENTRY_CMD,
+ WMI_TAG_VDEV_CONFIG_RATEMASK,
+ WMI_TAG_PDEV_FIPS_CMD,
+ WMI_TAG_PDEV_SMART_ANT_ENABLE_CMD,
+ WMI_TAG_PDEV_SMART_ANT_SET_RX_ANTENNA_CMD,
+ WMI_TAG_PEER_SMART_ANT_SET_TX_ANTENNA_CMD,
+ WMI_TAG_PEER_SMART_ANT_SET_TRAIN_ANTENNA_CMD,
+ WMI_TAG_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMD,
+ WMI_TAG_PDEV_SET_ANT_SWITCH_TBL_CMD,
+ WMI_TAG_PDEV_SET_CTL_TABLE_CMD,
+ WMI_TAG_PDEV_SET_MIMOGAIN_TABLE_CMD,
+ WMI_TAG_FWTEST_SET_PARAM_CMD,
+ WMI_TAG_PEER_ATF_REQUEST,
+ WMI_TAG_VDEV_ATF_REQUEST,
+ WMI_TAG_PDEV_GET_ANI_CCK_CONFIG_CMD,
+ WMI_TAG_PDEV_GET_ANI_OFDM_CONFIG_CMD,
+ WMI_TAG_INST_RSSI_STATS_RESP,
+ WMI_TAG_MED_UTIL_REPORT_EVENT,
+ WMI_TAG_PEER_STA_PS_STATECHANGE_EVENT,
+ WMI_TAG_WDS_ADDR_EVENT,
+ WMI_TAG_PEER_RATECODE_LIST_EVENT,
+ WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENT,
+ WMI_TAG_PDEV_TPC_EVENT,
+ WMI_TAG_ANI_OFDM_EVENT,
+ WMI_TAG_ANI_CCK_EVENT,
+ WMI_TAG_PDEV_CHANNEL_HOPPING_EVENT,
+ WMI_TAG_PDEV_FIPS_EVENT,
+ WMI_TAG_ATF_PEER_INFO,
+ WMI_TAG_PDEV_GET_TPC_CMD,
+ WMI_TAG_VDEV_FILTER_NRP_CONFIG_CMD,
+ WMI_TAG_QBOOST_CFG_CMD,
+ WMI_TAG_PDEV_SMART_ANT_GPIO_HANDLE,
+ WMI_TAG_PEER_SMART_ANT_SET_TX_ANTENNA_SERIES,
+ WMI_TAG_PEER_SMART_ANT_SET_TRAIN_ANTENNA_PARAM,
+ WMI_TAG_PDEV_SET_ANT_CTRL_CHAIN,
+ WMI_TAG_PEER_CCK_OFDM_RATE_INFO,
+ WMI_TAG_PEER_MCS_RATE_INFO,
+ WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_NFDBR,
+ WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_NFDBM,
+ WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_FREQNUM,
+ WMI_TAG_MU_REPORT_TOTAL_MU,
+ WMI_TAG_VDEV_SET_DSCP_TID_MAP_CMD,
+ WMI_TAG_ROAM_SET_MBO,
+ WMI_TAG_MIB_STATS_ENABLE_CMD,
+ WMI_TAG_NAN_DISC_IFACE_CREATED_EVENT,
+ WMI_TAG_NAN_DISC_IFACE_DELETED_EVENT,
+ WMI_TAG_NAN_STARTED_CLUSTER_EVENT,
+ WMI_TAG_NAN_JOINED_CLUSTER_EVENT,
+ WMI_TAG_NDI_GET_CAP_REQ,
+ WMI_TAG_NDP_INITIATOR_REQ,
+ WMI_TAG_NDP_RESPONDER_REQ,
+ WMI_TAG_NDP_END_REQ,
+ WMI_TAG_NDI_CAP_RSP_EVENT,
+ WMI_TAG_NDP_INITIATOR_RSP_EVENT,
+ WMI_TAG_NDP_RESPONDER_RSP_EVENT,
+ WMI_TAG_NDP_END_RSP_EVENT,
+ WMI_TAG_NDP_INDICATION_EVENT,
+ WMI_TAG_NDP_CONFIRM_EVENT,
+ WMI_TAG_NDP_END_INDICATION_EVENT,
+ WMI_TAG_VDEV_SET_QUIET_CMD,
+ WMI_TAG_PDEV_SET_PCL_CMD,
+ WMI_TAG_PDEV_SET_HW_MODE_CMD,
+ WMI_TAG_PDEV_SET_MAC_CONFIG_CMD,
+ WMI_TAG_PDEV_SET_ANTENNA_MODE_CMD,
+ WMI_TAG_PDEV_SET_HW_MODE_RESPONSE_EVENT,
+ WMI_TAG_PDEV_HW_MODE_TRANSITION_EVENT,
+ WMI_TAG_PDEV_SET_HW_MODE_RESPONSE_VDEV_MAC_ENTRY,
+ WMI_TAG_PDEV_SET_MAC_CONFIG_RESPONSE_EVENT,
+ WMI_TAG_COEX_CONFIG_CMD,
+ WMI_TAG_CONFIG_ENHANCED_MCAST_FILTER,
+ WMI_TAG_CHAN_AVOID_RPT_ALLOW_CMD,
+ WMI_TAG_SET_PERIODIC_CHANNEL_STATS_CONFIG,
+ WMI_TAG_VDEV_SET_CUSTOM_AGGR_SIZE_CMD,
+ WMI_TAG_PDEV_WAL_POWER_DEBUG_CMD,
+ WMI_TAG_MAC_PHY_CAPABILITIES,
+ WMI_TAG_HW_MODE_CAPABILITIES,
+ WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS,
+ WMI_TAG_HAL_REG_CAPABILITIES_EXT,
+ WMI_TAG_SOC_HAL_REG_CAPABILITIES,
+ WMI_TAG_VDEV_WISA_CMD,
+ WMI_TAG_TX_POWER_LEVEL_STATS_EVT,
+ WMI_TAG_SCAN_ADAPTIVE_DWELL_PARAMETERS_TLV,
+ WMI_TAG_SCAN_ADAPTIVE_DWELL_CONFIG,
+ WMI_TAG_WOW_SET_ACTION_WAKE_UP_CMD,
+ WMI_TAG_NDP_END_RSP_PER_NDI,
+ WMI_TAG_PEER_BWF_REQUEST,
+ WMI_TAG_BWF_PEER_INFO,
+ WMI_TAG_DBGLOG_TIME_STAMP_SYNC_CMD,
+ WMI_TAG_RMC_SET_LEADER_CMD,
+ WMI_TAG_RMC_MANUAL_LEADER_EVENT,
+ WMI_TAG_PER_CHAIN_RSSI_STATS,
+ WMI_TAG_RSSI_STATS,
+ WMI_TAG_P2P_LO_START_CMD,
+ WMI_TAG_P2P_LO_STOP_CMD,
+ WMI_TAG_P2P_LO_STOPPED_EVENT,
+ WMI_TAG_REORDER_QUEUE_SETUP_CMD,
+ WMI_TAG_REORDER_QUEUE_REMOVE_CMD,
+ WMI_TAG_SET_MULTIPLE_MCAST_FILTER_CMD,
+ WMI_TAG_MGMT_TX_COMPL_BUNDLE_EVENT,
+ WMI_TAG_READ_DATA_FROM_FLASH_CMD,
+ WMI_TAG_READ_DATA_FROM_FLASH_EVENT,
+ WMI_TAG_PDEV_SET_REORDER_TIMEOUT_VAL_CMD,
+ WMI_TAG_PEER_SET_RX_BLOCKSIZE_CMD,
+ WMI_TAG_PDEV_SET_WAKEUP_CONFIG_CMDID,
+ WMI_TAG_TLV_BUF_LEN_PARAM,
+ WMI_TAG_SERVICE_AVAILABLE_EVENT,
+ WMI_TAG_PEER_ANTDIV_INFO_REQ_CMD,
+ WMI_TAG_PEER_ANTDIV_INFO_EVENT,
+ WMI_TAG_PEER_ANTDIV_INFO,
+ WMI_TAG_PDEV_GET_ANTDIV_STATUS_CMD,
+ WMI_TAG_PDEV_ANTDIV_STATUS_EVENT,
+ WMI_TAG_MNT_FILTER_CMD,
+ WMI_TAG_GET_CHIP_POWER_STATS_CMD,
+ WMI_TAG_PDEV_CHIP_POWER_STATS_EVENT,
+ WMI_TAG_COEX_GET_ANTENNA_ISOLATION_CMD,
+ WMI_TAG_COEX_REPORT_ISOLATION_EVENT,
+ WMI_TAG_CHAN_CCA_STATS,
+ WMI_TAG_PEER_SIGNAL_STATS,
+ WMI_TAG_TX_STATS,
+ WMI_TAG_PEER_AC_TX_STATS,
+ WMI_TAG_RX_STATS,
+ WMI_TAG_PEER_AC_RX_STATS,
+ WMI_TAG_REPORT_STATS_EVENT,
+ WMI_TAG_CHAN_CCA_STATS_THRESH,
+ WMI_TAG_PEER_SIGNAL_STATS_THRESH,
+ WMI_TAG_TX_STATS_THRESH,
+ WMI_TAG_RX_STATS_THRESH,
+ WMI_TAG_PDEV_SET_STATS_THRESHOLD_CMD,
+ WMI_TAG_REQUEST_WLAN_STATS_CMD,
+ WMI_TAG_RX_AGGR_FAILURE_EVENT,
+ WMI_TAG_RX_AGGR_FAILURE_INFO,
+ WMI_TAG_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMD,
+ WMI_TAG_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENT,
+ WMI_TAG_PDEV_BAND_TO_MAC,
+ WMI_TAG_TBTT_OFFSET_INFO,
+ WMI_TAG_TBTT_OFFSET_EXT_EVENT,
+ WMI_TAG_SAR_LIMITS_CMD,
+ WMI_TAG_SAR_LIMIT_CMD_ROW,
+ WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD,
+ WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMD,
+ WMI_TAG_VDEV_ADFS_CH_CFG_CMD,
+ WMI_TAG_VDEV_ADFS_OCAC_ABORT_CMD,
+ WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT,
+ WMI_TAG_VDEV_ADFS_OCAC_COMPLETE_EVENT,
+ WMI_TAG_VDEV_DFS_CAC_COMPLETE_EVENT,
+ WMI_TAG_VENDOR_OUI,
+ WMI_TAG_REQUEST_RCPI_CMD,
+ WMI_TAG_UPDATE_RCPI_EVENT,
+ WMI_TAG_REQUEST_PEER_STATS_INFO_CMD,
+ WMI_TAG_PEER_STATS_INFO,
+ WMI_TAG_PEER_STATS_INFO_EVENT,
+ WMI_TAG_PKGID_EVENT,
+ WMI_TAG_CONNECTED_NLO_RSSI_PARAMS,
+ WMI_TAG_SET_CURRENT_COUNTRY_CMD,
+ WMI_TAG_REGULATORY_RULE_STRUCT,
+ WMI_TAG_REG_CHAN_LIST_CC_EVENT,
+ WMI_TAG_11D_SCAN_START_CMD,
+ WMI_TAG_11D_SCAN_STOP_CMD,
+ WMI_TAG_11D_NEW_COUNTRY_EVENT,
+ WMI_TAG_REQUEST_RADIO_CHAN_STATS_CMD,
+ WMI_TAG_RADIO_CHAN_STATS,
+ WMI_TAG_RADIO_CHAN_STATS_EVENT,
+ WMI_TAG_ROAM_PER_CONFIG,
+ WMI_TAG_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMD,
+ WMI_TAG_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_STATUS_EVENT,
+ WMI_TAG_BPF_SET_VDEV_ACTIVE_MODE_CMD,
+ WMI_TAG_HW_DATA_FILTER_CMD,
+ WMI_TAG_CONNECTED_NLO_BSS_BAND_RSSI_PREF,
+ WMI_TAG_PEER_OPER_MODE_CHANGE_EVENT,
+ WMI_TAG_CHIP_POWER_SAVE_FAILURE_DETECTED,
+ WMI_TAG_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMD,
+ WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT,
+ WMI_TAG_PDEV_UPDATE_PKT_ROUTING_CMD,
+ WMI_TAG_PDEV_CHECK_CAL_VERSION_CMD,
+ WMI_TAG_PDEV_CHECK_CAL_VERSION_EVENT,
+ WMI_TAG_PDEV_SET_DIVERSITY_GAIN_CMD,
+ WMI_TAG_MAC_PHY_CHAINMASK_COMBO,
+ WMI_TAG_MAC_PHY_CHAINMASK_CAPABILITY,
+ WMI_TAG_VDEV_SET_ARP_STATS_CMD,
+ WMI_TAG_VDEV_GET_ARP_STATS_CMD,
+ WMI_TAG_VDEV_GET_ARP_STATS_EVENT,
+ WMI_TAG_IFACE_OFFLOAD_STATS,
+ WMI_TAG_REQUEST_STATS_CMD_SUB_STRUCT_PARAM,
+ WMI_TAG_RSSI_CTL_EXT,
+ WMI_TAG_SINGLE_PHYERR_EXT_RX_HDR,
+ WMI_TAG_COEX_BT_ACTIVITY_EVENT,
+ WMI_TAG_VDEV_GET_TX_POWER_CMD,
+ WMI_TAG_VDEV_TX_POWER_EVENT,
+ WMI_TAG_OFFCHAN_DATA_TX_COMPL_EVENT,
+ WMI_TAG_OFFCHAN_DATA_TX_SEND_CMD,
+ WMI_TAG_TX_SEND_PARAMS,
+ WMI_TAG_HE_RATE_SET,
+ WMI_TAG_CONGESTION_STATS,
+ WMI_TAG_SET_INIT_COUNTRY_CMD,
+ WMI_TAG_SCAN_DBS_DUTY_CYCLE,
+ WMI_TAG_SCAN_DBS_DUTY_CYCLE_PARAM_TLV,
+ WMI_TAG_PDEV_DIV_GET_RSSI_ANTID,
+ WMI_TAG_THERM_THROT_CONFIG_REQUEST,
+ WMI_TAG_THERM_THROT_LEVEL_CONFIG_INFO,
+ WMI_TAG_THERM_THROT_STATS_EVENT,
+ WMI_TAG_THERM_THROT_LEVEL_STATS_INFO,
+ WMI_TAG_PDEV_DIV_RSSI_ANTID_EVENT,
+ WMI_TAG_OEM_DMA_RING_CAPABILITIES,
+ WMI_TAG_OEM_DMA_RING_CFG_REQ,
+ WMI_TAG_OEM_DMA_RING_CFG_RSP,
+ WMI_TAG_OEM_INDIRECT_DATA,
+ WMI_TAG_OEM_DMA_BUF_RELEASE,
+ WMI_TAG_OEM_DMA_BUF_RELEASE_ENTRY,
+ WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST,
+ WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT,
+ WMI_TAG_ROAM_LCA_DISALLOW_CONFIG,
+ WMI_TAG_VDEV_LIMIT_OFFCHAN_CMD,
+ WMI_TAG_ROAM_RSSI_REJECTION_OCE_CONFIG,
+ WMI_TAG_UNIT_TEST_EVENT,
+ WMI_TAG_ROAM_FILS_OFFLOAD,
+ WMI_TAG_PDEV_UPDATE_PMK_CACHE_CMD,
+ WMI_TAG_PMK_CACHE,
+ WMI_TAG_PDEV_UPDATE_FILS_HLP_PKT_CMD,
+ WMI_TAG_ROAM_FILS_SYNCH,
+ WMI_TAG_GTK_OFFLOAD_EXTENDED,
+ WMI_TAG_ROAM_BG_SCAN_ROAMING,
+ WMI_TAG_OIC_PING_OFFLOAD_PARAMS_CMD,
+ WMI_TAG_OIC_PING_OFFLOAD_SET_ENABLE_CMD,
+ WMI_TAG_OIC_PING_HANDOFF_EVENT,
+ WMI_TAG_DHCP_LEASE_RENEW_OFFLOAD_CMD,
+ WMI_TAG_DHCP_LEASE_RENEW_EVENT,
+ WMI_TAG_BTM_CONFIG,
+ WMI_TAG_DEBUG_MESG_FW_DATA_STALL,
+ WMI_TAG_WLM_CONFIG_CMD,
+ WMI_TAG_PDEV_UPDATE_CTLTABLE_REQUEST,
+ WMI_TAG_PDEV_UPDATE_CTLTABLE_EVENT,
+ WMI_TAG_ROAM_CND_SCORING_PARAM,
+ WMI_TAG_PDEV_CONFIG_VENDOR_OUI_ACTION,
+ WMI_TAG_VENDOR_OUI_EXT,
+ WMI_TAG_ROAM_SYNCH_FRAME_EVENT,
+ WMI_TAG_FD_SEND_FROM_HOST_CMD,
+ WMI_TAG_ENABLE_FILS_CMD,
+ WMI_TAG_HOST_SWFDA_EVENT,
+ WMI_TAG_BCN_OFFLOAD_CTRL_CMD,
+ WMI_TAG_PDEV_SET_AC_TX_QUEUE_OPTIMIZED_CMD,
+ WMI_TAG_STATS_PERIOD,
+ WMI_TAG_NDL_SCHEDULE_UPDATE,
+ WMI_TAG_PEER_TID_MSDUQ_QDEPTH_THRESH_UPDATE_CMD,
+ WMI_TAG_MSDUQ_QDEPTH_THRESH_UPDATE,
+ WMI_TAG_PDEV_SET_RX_FILTER_PROMISCUOUS_CMD,
+ WMI_TAG_SAR2_RESULT_EVENT,
+ WMI_TAG_SAR_CAPABILITIES,
+ WMI_TAG_SAP_OBSS_DETECTION_CFG_CMD,
+ WMI_TAG_SAP_OBSS_DETECTION_INFO_EVT,
+ WMI_TAG_DMA_RING_CAPABILITIES,
+ WMI_TAG_DMA_RING_CFG_REQ,
+ WMI_TAG_DMA_RING_CFG_RSP,
+ WMI_TAG_DMA_BUF_RELEASE,
+ WMI_TAG_DMA_BUF_RELEASE_ENTRY,
+ WMI_TAG_SAR_GET_LIMITS_CMD,
+ WMI_TAG_SAR_GET_LIMITS_EVENT,
+ WMI_TAG_SAR_GET_LIMITS_EVENT_ROW,
+ WMI_TAG_OFFLOAD_11K_REPORT,
+ WMI_TAG_INVOKE_NEIGHBOR_REPORT,
+ WMI_TAG_NEIGHBOR_REPORT_OFFLOAD,
+ WMI_TAG_VDEV_SET_CONNECTIVITY_CHECK_STATS,
+ WMI_TAG_VDEV_GET_CONNECTIVITY_CHECK_STATS,
+ WMI_TAG_BPF_SET_VDEV_ENABLE_CMD,
+ WMI_TAG_BPF_SET_VDEV_WORK_MEMORY_CMD,
+ WMI_TAG_BPF_GET_VDEV_WORK_MEMORY_CMD,
+ WMI_TAG_BPF_GET_VDEV_WORK_MEMORY_RESP_EVT,
+ WMI_TAG_PDEV_GET_NFCAL_POWER,
+ WMI_TAG_BSS_COLOR_CHANGE_ENABLE,
+ WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG,
+ WMI_TAG_OBSS_COLOR_COLLISION_EVT,
+ WMI_TAG_RUNTIME_DPD_RECAL_CMD,
+ WMI_TAG_TWT_ENABLE_CMD,
+ WMI_TAG_TWT_DISABLE_CMD,
+ WMI_TAG_TWT_ADD_DIALOG_CMD,
+ WMI_TAG_TWT_DEL_DIALOG_CMD,
+ WMI_TAG_TWT_PAUSE_DIALOG_CMD,
+ WMI_TAG_TWT_RESUME_DIALOG_CMD,
+ WMI_TAG_TWT_ENABLE_COMPLETE_EVENT,
+ WMI_TAG_TWT_DISABLE_COMPLETE_EVENT,
+ WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT,
+ WMI_TAG_TWT_DEL_DIALOG_COMPLETE_EVENT,
+ WMI_TAG_TWT_PAUSE_DIALOG_COMPLETE_EVENT,
+ WMI_TAG_TWT_RESUME_DIALOG_COMPLETE_EVENT,
+ WMI_TAG_REQUEST_ROAM_SCAN_STATS_CMD,
+ WMI_TAG_ROAM_SCAN_STATS_EVENT,
+ WMI_TAG_PEER_TID_CONFIGURATIONS_CMD,
+ WMI_TAG_VDEV_SET_CUSTOM_SW_RETRY_TH_CMD,
+ WMI_TAG_GET_TPC_POWER_CMD,
+ WMI_TAG_GET_TPC_POWER_EVENT,
+ WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA,
+ WMI_TAG_MOTION_DET_CONFIG_PARAMS_CMD,
+ WMI_TAG_MOTION_DET_BASE_LINE_CONFIG_PARAMS_CMD,
+ WMI_TAG_MOTION_DET_START_STOP_CMD,
+ WMI_TAG_MOTION_DET_BASE_LINE_START_STOP_CMD,
+ WMI_TAG_MOTION_DET_EVENT,
+ WMI_TAG_MOTION_DET_BASE_LINE_EVENT,
+ WMI_TAG_NDP_TRANSPORT_IP,
+ WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD,
+ WMI_TAG_ESP_ESTIMATE_EVENT,
+ WMI_TAG_NAN_HOST_CONFIG,
+ WMI_TAG_SPECTRAL_BIN_SCALING_PARAMS,
+ WMI_TAG_PEER_CFR_CAPTURE_CMD,
+ WMI_TAG_PEER_CHAN_WIDTH_SWITCH_CMD,
+ WMI_TAG_CHAN_WIDTH_PEER_LIST,
+ WMI_TAG_OBSS_SPATIAL_REUSE_SET_DEF_OBSS_THRESH_CMD,
+ WMI_TAG_PDEV_HE_TB_ACTION_FRM_CMD,
+ WMI_TAG_PEER_EXTD2_STATS,
+ WMI_TAG_HPCS_PULSE_START_CMD,
+ WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT,
+ WMI_TAG_VDEV_CHAINMASK_CONFIG_CMD,
+ WMI_TAG_VDEV_BCN_OFFLOAD_QUIET_CONFIG_CMD,
+ WMI_TAG_NAN_EVENT_INFO,
+ WMI_TAG_NDP_CHANNEL_INFO,
+ WMI_TAG_NDP_CMD,
+ WMI_TAG_NDP_EVENT,
+ /* TODO add all the missing cmds */
+ WMI_TAG_PDEV_PEER_PKTLOG_FILTER_CMD = 0x301,
+ WMI_TAG_PDEV_PEER_PKTLOG_FILTER_INFO,
+ WMI_TAG_MAX
+};
+
+enum wmi_tlv_service {
+ WMI_TLV_SERVICE_BEACON_OFFLOAD = 0,
+ WMI_TLV_SERVICE_SCAN_OFFLOAD = 1,
+ WMI_TLV_SERVICE_ROAM_SCAN_OFFLOAD = 2,
+ WMI_TLV_SERVICE_BCN_MISS_OFFLOAD = 3,
+ WMI_TLV_SERVICE_STA_PWRSAVE = 4,
+ WMI_TLV_SERVICE_STA_ADVANCED_PWRSAVE = 5,
+ WMI_TLV_SERVICE_AP_UAPSD = 6,
+ WMI_TLV_SERVICE_AP_DFS = 7,
+ WMI_TLV_SERVICE_11AC = 8,
+ WMI_TLV_SERVICE_BLOCKACK = 9,
+ WMI_TLV_SERVICE_PHYERR = 10,
+ WMI_TLV_SERVICE_BCN_FILTER = 11,
+ WMI_TLV_SERVICE_RTT = 12,
+ WMI_TLV_SERVICE_WOW = 13,
+ WMI_TLV_SERVICE_RATECTRL_CACHE = 14,
+ WMI_TLV_SERVICE_IRAM_TIDS = 15,
+ WMI_TLV_SERVICE_ARPNS_OFFLOAD = 16,
+ WMI_TLV_SERVICE_NLO = 17,
+ WMI_TLV_SERVICE_GTK_OFFLOAD = 18,
+ WMI_TLV_SERVICE_SCAN_SCH = 19,
+ WMI_TLV_SERVICE_CSA_OFFLOAD = 20,
+ WMI_TLV_SERVICE_CHATTER = 21,
+ WMI_TLV_SERVICE_COEX_FREQAVOID = 22,
+ WMI_TLV_SERVICE_PACKET_POWER_SAVE = 23,
+ WMI_TLV_SERVICE_FORCE_FW_HANG = 24,
+ WMI_TLV_SERVICE_GPIO = 25,
+ WMI_TLV_SERVICE_STA_DTIM_PS_MODULATED_DTIM = 26,
+ WMI_STA_UAPSD_BASIC_AUTO_TRIG = 27,
+ WMI_STA_UAPSD_VAR_AUTO_TRIG = 28,
+ WMI_TLV_SERVICE_STA_KEEP_ALIVE = 29,
+ WMI_TLV_SERVICE_TX_ENCAP = 30,
+ WMI_TLV_SERVICE_AP_PS_DETECT_OUT_OF_SYNC = 31,
+ WMI_TLV_SERVICE_EARLY_RX = 32,
+ WMI_TLV_SERVICE_STA_SMPS = 33,
+ WMI_TLV_SERVICE_FWTEST = 34,
+ WMI_TLV_SERVICE_STA_WMMAC = 35,
+ WMI_TLV_SERVICE_TDLS = 36,
+ WMI_TLV_SERVICE_BURST = 37,
+ WMI_TLV_SERVICE_MCC_BCN_INTERVAL_CHANGE = 38,
+ WMI_TLV_SERVICE_ADAPTIVE_OCS = 39,
+ WMI_TLV_SERVICE_BA_SSN_SUPPORT = 40,
+ WMI_TLV_SERVICE_FILTER_IPSEC_NATKEEPALIVE = 41,
+ WMI_TLV_SERVICE_WLAN_HB = 42,
+ WMI_TLV_SERVICE_LTE_ANT_SHARE_SUPPORT = 43,
+ WMI_TLV_SERVICE_BATCH_SCAN = 44,
+ WMI_TLV_SERVICE_QPOWER = 45,
+ WMI_TLV_SERVICE_PLMREQ = 46,
+ WMI_TLV_SERVICE_THERMAL_MGMT = 47,
+ WMI_TLV_SERVICE_RMC = 48,
+ WMI_TLV_SERVICE_MHF_OFFLOAD = 49,
+ WMI_TLV_SERVICE_COEX_SAR = 50,
+ WMI_TLV_SERVICE_BCN_TXRATE_OVERRIDE = 51,
+ WMI_TLV_SERVICE_NAN = 52,
+ WMI_TLV_SERVICE_L1SS_STAT = 53,
+ WMI_TLV_SERVICE_ESTIMATE_LINKSPEED = 54,
+ WMI_TLV_SERVICE_OBSS_SCAN = 55,
+ WMI_TLV_SERVICE_TDLS_OFFCHAN = 56,
+ WMI_TLV_SERVICE_TDLS_UAPSD_BUFFER_STA = 57,
+ WMI_TLV_SERVICE_TDLS_UAPSD_SLEEP_STA = 58,
+ WMI_TLV_SERVICE_IBSS_PWRSAVE = 59,
+ WMI_TLV_SERVICE_LPASS = 60,
+ WMI_TLV_SERVICE_EXTSCAN = 61,
+ WMI_TLV_SERVICE_D0WOW = 62,
+ WMI_TLV_SERVICE_HSOFFLOAD = 63,
+ WMI_TLV_SERVICE_ROAM_HO_OFFLOAD = 64,
+ WMI_TLV_SERVICE_RX_FULL_REORDER = 65,
+ WMI_TLV_SERVICE_DHCP_OFFLOAD = 66,
+ WMI_TLV_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT = 67,
+ WMI_TLV_SERVICE_MDNS_OFFLOAD = 68,
+ WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD = 69,
+ WMI_TLV_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT = 70,
+ WMI_TLV_SERVICE_OCB = 71,
+ WMI_TLV_SERVICE_AP_ARPNS_OFFLOAD = 72,
+ WMI_TLV_SERVICE_PER_BAND_CHAINMASK_SUPPORT = 73,
+ WMI_TLV_SERVICE_PACKET_FILTER_OFFLOAD = 74,
+ WMI_TLV_SERVICE_MGMT_TX_HTT = 75,
+ WMI_TLV_SERVICE_MGMT_TX_WMI = 76,
+ WMI_TLV_SERVICE_EXT_MSG = 77,
+ WMI_TLV_SERVICE_MAWC = 78,
+ WMI_TLV_SERVICE_PEER_ASSOC_CONF = 79,
+ WMI_TLV_SERVICE_EGAP = 80,
+ WMI_TLV_SERVICE_STA_PMF_OFFLOAD = 81,
+ WMI_TLV_SERVICE_UNIFIED_WOW_CAPABILITY = 82,
+ WMI_TLV_SERVICE_ENHANCED_PROXY_STA = 83,
+ WMI_TLV_SERVICE_ATF = 84,
+ WMI_TLV_SERVICE_COEX_GPIO = 85,
+ WMI_TLV_SERVICE_AUX_SPECTRAL_INTF = 86,
+ WMI_TLV_SERVICE_AUX_CHAN_LOAD_INTF = 87,
+ WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64 = 88,
+ WMI_TLV_SERVICE_ENTERPRISE_MESH = 89,
+ WMI_TLV_SERVICE_RESTRT_CHNL_SUPPORT = 90,
+ WMI_TLV_SERVICE_BPF_OFFLOAD = 91,
+ WMI_TLV_SERVICE_SYNC_DELETE_CMDS = 92,
+ WMI_TLV_SERVICE_SMART_ANTENNA_SW_SUPPORT = 93,
+ WMI_TLV_SERVICE_SMART_ANTENNA_HW_SUPPORT = 94,
+ WMI_TLV_SERVICE_RATECTRL_LIMIT_MAX_MIN_RATES = 95,
+ WMI_TLV_SERVICE_NAN_DATA = 96,
+ WMI_TLV_SERVICE_NAN_RTT = 97,
+ WMI_TLV_SERVICE_11AX = 98,
+ WMI_TLV_SERVICE_DEPRECATED_REPLACE = 99,
+ WMI_TLV_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE = 100,
+ WMI_TLV_SERVICE_ENHANCED_MCAST_FILTER = 101,
+ WMI_TLV_SERVICE_PERIODIC_CHAN_STAT_SUPPORT = 102,
+ WMI_TLV_SERVICE_MESH_11S = 103,
+ WMI_TLV_SERVICE_HALF_RATE_QUARTER_RATE_SUPPORT = 104,
+ WMI_TLV_SERVICE_VDEV_RX_FILTER = 105,
+ WMI_TLV_SERVICE_P2P_LISTEN_OFFLOAD_SUPPORT = 106,
+ WMI_TLV_SERVICE_MARK_FIRST_WAKEUP_PACKET = 107,
+ WMI_TLV_SERVICE_MULTIPLE_MCAST_FILTER_SET = 108,
+ WMI_TLV_SERVICE_HOST_MANAGED_RX_REORDER = 109,
+ WMI_TLV_SERVICE_FLASH_RDWR_SUPPORT = 110,
+ WMI_TLV_SERVICE_WLAN_STATS_REPORT = 111,
+ WMI_TLV_SERVICE_TX_MSDU_ID_NEW_PARTITION_SUPPORT = 112,
+ WMI_TLV_SERVICE_DFS_PHYERR_OFFLOAD = 113,
+ WMI_TLV_SERVICE_RCPI_SUPPORT = 114,
+ WMI_TLV_SERVICE_FW_MEM_DUMP_SUPPORT = 115,
+ WMI_TLV_SERVICE_PEER_STATS_INFO = 116,
+ WMI_TLV_SERVICE_REGULATORY_DB = 117,
+ WMI_TLV_SERVICE_11D_OFFLOAD = 118,
+ WMI_TLV_SERVICE_HW_DATA_FILTERING = 119,
+ WMI_TLV_SERVICE_MULTIPLE_VDEV_RESTART = 120,
+ WMI_TLV_SERVICE_PKT_ROUTING = 121,
+ WMI_TLV_SERVICE_CHECK_CAL_VERSION = 122,
+ WMI_TLV_SERVICE_OFFCHAN_TX_WMI = 123,
+ WMI_TLV_SERVICE_8SS_TX_BFEE = 124,
+ WMI_TLV_SERVICE_EXTENDED_NSS_SUPPORT = 125,
+ WMI_TLV_SERVICE_ACK_TIMEOUT = 126,
+ WMI_TLV_SERVICE_PDEV_BSS_CHANNEL_INFO_64 = 127,
+
+ WMI_MAX_SERVICE = 128,
+
+ WMI_TLV_SERVICE_CHAN_LOAD_INFO = 128,
+ WMI_TLV_SERVICE_TX_PPDU_INFO_STATS_SUPPORT = 129,
+ WMI_TLV_SERVICE_VDEV_LIMIT_OFFCHAN_SUPPORT = 130,
+ WMI_TLV_SERVICE_FILS_SUPPORT = 131,
+ WMI_TLV_SERVICE_WLAN_OIC_PING_OFFLOAD = 132,
+ WMI_TLV_SERVICE_WLAN_DHCP_RENEW = 133,
+ WMI_TLV_SERVICE_MAWC_SUPPORT = 134,
+ WMI_TLV_SERVICE_VDEV_LATENCY_CONFIG = 135,
+ WMI_TLV_SERVICE_PDEV_UPDATE_CTLTABLE_SUPPORT = 136,
+ WMI_TLV_SERVICE_PKTLOG_SUPPORT_OVER_HTT = 137,
+ WMI_TLV_SERVICE_VDEV_MULTI_GROUP_KEY_SUPPORT = 138,
+ WMI_TLV_SERVICE_SCAN_PHYMODE_SUPPORT = 139,
+ WMI_TLV_SERVICE_THERM_THROT = 140,
+ WMI_TLV_SERVICE_BCN_OFFLOAD_START_STOP_SUPPORT = 141,
+ WMI_TLV_SERVICE_WOW_WAKEUP_BY_TIMER_PATTERN = 142,
+ WMI_TLV_SERVICE_PEER_MAP_UNMAP_V2_SUPPORT = 143,
+ WMI_TLV_SERVICE_OFFCHAN_DATA_TID_SUPPORT = 144,
+ WMI_TLV_SERVICE_RX_PROMISC_ENABLE_SUPPORT = 145,
+ WMI_TLV_SERVICE_SUPPORT_DIRECT_DMA = 146,
+ WMI_TLV_SERVICE_AP_OBSS_DETECTION_OFFLOAD = 147,
+ WMI_TLV_SERVICE_11K_NEIGHBOUR_REPORT_SUPPORT = 148,
+ WMI_TLV_SERVICE_LISTEN_INTERVAL_OFFLOAD_SUPPORT = 149,
+ WMI_TLV_SERVICE_BSS_COLOR_OFFLOAD = 150,
+ WMI_TLV_SERVICE_RUNTIME_DPD_RECAL = 151,
+ WMI_TLV_SERVICE_STA_TWT = 152,
+ WMI_TLV_SERVICE_AP_TWT = 153,
+ WMI_TLV_SERVICE_GMAC_OFFLOAD_SUPPORT = 154,
+ WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT = 155,
+ WMI_TLV_SERVICE_PEER_TID_CONFIGS_SUPPORT = 156,
+ WMI_TLV_SERVICE_VDEV_SWRETRY_PER_AC_CONFIG_SUPPORT = 157,
+ WMI_TLV_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_SCC_SUPPORT = 158,
+ WMI_TLV_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_MCC_SUPPORT = 159,
+ WMI_TLV_SERVICE_MOTION_DET = 160,
+ WMI_TLV_SERVICE_INFRA_MBSSID = 161,
+ WMI_TLV_SERVICE_OBSS_SPATIAL_REUSE = 162,
+ WMI_TLV_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT = 163,
+ WMI_TLV_SERVICE_NAN_DBS_SUPPORT = 164,
+ WMI_TLV_SERVICE_NDI_DBS_SUPPORT = 165,
+ WMI_TLV_SERVICE_NAN_SAP_SUPPORT = 166,
+ WMI_TLV_SERVICE_NDI_SAP_SUPPORT = 167,
+ WMI_TLV_SERVICE_CFR_CAPTURE_SUPPORT = 168,
+ WMI_TLV_SERVICE_CFR_CAPTURE_IND_MSG_TYPE_1 = 169,
+ WMI_TLV_SERVICE_ESP_SUPPORT = 170,
+ WMI_TLV_SERVICE_PEER_CHWIDTH_CHANGE = 171,
+ WMI_TLV_SERVICE_WLAN_HPCS_PULSE = 172,
+ WMI_TLV_SERVICE_PER_VDEV_CHAINMASK_CONFIG_SUPPORT = 173,
+ WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI = 174,
+ WMI_TLV_SERVICE_NAN_DISABLE_SUPPORT = 175,
+ WMI_TLV_SERVICE_HTT_H2T_NO_HTC_HDR_LEN_IN_MSG_LEN = 176,
+
+ WMI_MAX_EXT_SERVICE
+
+};
+
+enum {
+ WMI_SMPS_FORCED_MODE_NONE = 0,
+ WMI_SMPS_FORCED_MODE_DISABLED,
+ WMI_SMPS_FORCED_MODE_STATIC,
+ WMI_SMPS_FORCED_MODE_DYNAMIC
+};
+
+#define WMI_TPC_CHAINMASK_CONFIG_BAND_2G 0
+#define WMI_TPC_CHAINMASK_CONFIG_BAND_5G 1
+#define WMI_NUM_SUPPORTED_BAND_MAX 2
+
+#define WMI_PEER_MIMO_PS_STATE 0x1
+#define WMI_PEER_AMPDU 0x2
+#define WMI_PEER_AUTHORIZE 0x3
+#define WMI_PEER_CHWIDTH 0x4
+#define WMI_PEER_NSS 0x5
+#define WMI_PEER_USE_4ADDR 0x6
+#define WMI_PEER_MEMBERSHIP 0x7
+#define WMI_PEER_USERPOS 0x8
+#define WMI_PEER_CRIT_PROTO_HINT_ENABLED 0x9
+#define WMI_PEER_TX_FAIL_CNT_THR 0xA
+#define WMI_PEER_SET_HW_RETRY_CTS2S 0xB
+#define WMI_PEER_IBSS_ATIM_WINDOW_LENGTH 0xC
+#define WMI_PEER_PHYMODE 0xD
+#define WMI_PEER_USE_FIXED_PWR 0xE
+#define WMI_PEER_PARAM_FIXED_RATE 0xF
+#define WMI_PEER_SET_MU_WHITELIST 0x10
+#define WMI_PEER_SET_MAX_TX_RATE 0x11
+#define WMI_PEER_SET_MIN_TX_RATE 0x12
+#define WMI_PEER_SET_DEFAULT_ROUTING 0x13
+
+/* slot time long */
+#define WMI_VDEV_SLOT_TIME_LONG 0x1
+/* slot time short */
+#define WMI_VDEV_SLOT_TIME_SHORT 0x2
+/* preablbe long */
+#define WMI_VDEV_PREAMBLE_LONG 0x1
+/* preablbe short */
+#define WMI_VDEV_PREAMBLE_SHORT 0x2
+
+enum wmi_peer_smps_state {
+ WMI_PEER_SMPS_PS_NONE = 0x0,
+ WMI_PEER_SMPS_STATIC = 0x1,
+ WMI_PEER_SMPS_DYNAMIC = 0x2
+};
+
+enum wmi_peer_chwidth {
+ WMI_PEER_CHWIDTH_20MHZ = 0,
+ WMI_PEER_CHWIDTH_40MHZ = 1,
+ WMI_PEER_CHWIDTH_80MHZ = 2,
+ WMI_PEER_CHWIDTH_160MHZ = 3,
+};
+
+enum wmi_beacon_gen_mode {
+ WMI_BEACON_STAGGERED_MODE = 0,
+ WMI_BEACON_BURST_MODE = 1
+};
+
+struct wmi_host_pdev_band_to_mac {
+ u32 pdev_id;
+ u32 start_freq;
+ u32 end_freq;
+};
+
+struct ath11k_ppe_threshold {
+ u32 numss_m1;
+ u32 ru_bit_mask;
+ u32 ppet16_ppet8_ru3_ru0[PSOC_HOST_MAX_NUM_SS];
+};
+
+struct ath11k_service_ext_param {
+ u32 default_conc_scan_config_bits;
+ u32 default_fw_config_bits;
+ struct ath11k_ppe_threshold ppet;
+ u32 he_cap_info;
+ u32 mpdu_density;
+ u32 max_bssid_rx_filters;
+ u32 num_hw_modes;
+ u32 num_phy;
+};
+
+struct ath11k_hw_mode_caps {
+ u32 hw_mode_id;
+ u32 phy_id_map;
+ u32 hw_mode_config_type;
+};
+
+#define PSOC_HOST_MAX_PHY_SIZE (3)
+#define ATH11K_11B_SUPPORT BIT(0)
+#define ATH11K_11G_SUPPORT BIT(1)
+#define ATH11K_11A_SUPPORT BIT(2)
+#define ATH11K_11N_SUPPORT BIT(3)
+#define ATH11K_11AC_SUPPORT BIT(4)
+#define ATH11K_11AX_SUPPORT BIT(5)
+
+struct ath11k_hal_reg_capabilities_ext {
+ u32 phy_id;
+ u32 eeprom_reg_domain;
+ u32 eeprom_reg_domain_ext;
+ u32 regcap1;
+ u32 regcap2;
+ u32 wireless_modes;
+ u32 low_2ghz_chan;
+ u32 high_2ghz_chan;
+ u32 low_5ghz_chan;
+ u32 high_5ghz_chan;
+};
+
+#define WMI_HOST_MAX_PDEV 3
+
+struct wlan_host_mem_chunk {
+ u32 tlv_header;
+ u32 req_id;
+ u32 ptr;
+ u32 size;
+} __packed;
+
+struct wmi_host_mem_chunk {
+ void *vaddr;
+ dma_addr_t paddr;
+ u32 len;
+ u32 req_id;
+};
+
+struct wmi_init_cmd_param {
+ u32 tlv_header;
+ struct target_resource_config *res_cfg;
+ u8 num_mem_chunks;
+ struct wmi_host_mem_chunk *mem_chunks;
+ u32 hw_mode_id;
+ u32 num_band_to_mac;
+ struct wmi_host_pdev_band_to_mac band_to_mac[WMI_HOST_MAX_PDEV];
+};
+
+struct wmi_pdev_band_to_mac {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 start_freq;
+ u32 end_freq;
+} __packed;
+
+struct wmi_pdev_set_hw_mode_cmd_param {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 hw_mode_index;
+ u32 num_band_to_mac;
+} __packed;
+
+struct wmi_ppe_threshold {
+ u32 numss_m1; /** NSS - 1*/
+ union {
+ u32 ru_count;
+ u32 ru_mask;
+ } __packed;
+ u32 ppet16_ppet8_ru3_ru0[WMI_MAX_NUM_SS];
+} __packed;
+
+#define HW_BD_INFO_SIZE 5
+
+struct wmi_abi_version {
+ u32 abi_version_0;
+ u32 abi_version_1;
+ u32 abi_version_ns_0;
+ u32 abi_version_ns_1;
+ u32 abi_version_ns_2;
+ u32 abi_version_ns_3;
+} __packed;
+
+struct wmi_init_cmd {
+ u32 tlv_header;
+ struct wmi_abi_version host_abi_vers;
+ u32 num_host_mem_chunks;
+} __packed;
+
+struct wmi_resource_config {
+ u32 tlv_header;
+ u32 num_vdevs;
+ u32 num_peers;
+ u32 num_offload_peers;
+ u32 num_offload_reorder_buffs;
+ u32 num_peer_keys;
+ u32 num_tids;
+ u32 ast_skid_limit;
+ u32 tx_chain_mask;
+ u32 rx_chain_mask;
+ u32 rx_timeout_pri[4];
+ u32 rx_decap_mode;
+ u32 scan_max_pending_req;
+ u32 bmiss_offload_max_vdev;
+ u32 roam_offload_max_vdev;
+ u32 roam_offload_max_ap_profiles;
+ u32 num_mcast_groups;
+ u32 num_mcast_table_elems;
+ u32 mcast2ucast_mode;
+ u32 tx_dbg_log_size;
+ u32 num_wds_entries;
+ u32 dma_burst_size;
+ u32 mac_aggr_delim;
+ u32 rx_skip_defrag_timeout_dup_detection_check;
+ u32 vow_config;
+ u32 gtk_offload_max_vdev;
+ u32 num_msdu_desc;
+ u32 max_frag_entries;
+ u32 num_tdls_vdevs;
+ u32 num_tdls_conn_table_entries;
+ u32 beacon_tx_offload_max_vdev;
+ u32 num_multicast_filter_entries;
+ u32 num_wow_filters;
+ u32 num_keep_alive_pattern;
+ u32 keep_alive_pattern_size;
+ u32 max_tdls_concurrent_sleep_sta;
+ u32 max_tdls_concurrent_buffer_sta;
+ u32 wmi_send_separate;
+ u32 num_ocb_vdevs;
+ u32 num_ocb_channels;
+ u32 num_ocb_schedules;
+ u32 flag1;
+ u32 smart_ant_cap;
+ u32 bk_minfree;
+ u32 be_minfree;
+ u32 vi_minfree;
+ u32 vo_minfree;
+ u32 alloc_frag_desc_for_data_pkt;
+ u32 num_ns_ext_tuples_cfg;
+ u32 bpf_instruction_size;
+ u32 max_bssid_rx_filters;
+ u32 use_pdev_id;
+ u32 max_num_dbs_scan_duty_cycle;
+ u32 max_num_group_keys;
+ u32 peer_map_unmap_v2_support;
+ u32 sched_params;
+ u32 twt_ap_pdev_count;
+ u32 twt_ap_sta_count;
+} __packed;
+
+struct wmi_service_ready_event {
+ u32 fw_build_vers;
+ struct wmi_abi_version fw_abi_vers;
+ u32 phy_capability;
+ u32 max_frag_entry;
+ u32 num_rf_chains;
+ u32 ht_cap_info;
+ u32 vht_cap_info;
+ u32 vht_supp_mcs;
+ u32 hw_min_tx_power;
+ u32 hw_max_tx_power;
+ u32 sys_cap_info;
+ u32 min_pkt_size_enable;
+ u32 max_bcn_ie_size;
+ u32 num_mem_reqs;
+ u32 max_num_scan_channels;
+ u32 hw_bd_id;
+ u32 hw_bd_info[HW_BD_INFO_SIZE];
+ u32 max_supported_macs;
+ u32 wmi_fw_sub_feat_caps;
+ u32 num_dbs_hw_modes;
+ /* txrx_chainmask
+ * [7:0] - 2G band tx chain mask
+ * [15:8] - 2G band rx chain mask
+ * [23:16] - 5G band tx chain mask
+ * [31:24] - 5G band rx chain mask
+ */
+ u32 txrx_chainmask;
+ u32 default_dbs_hw_mode_index;
+ u32 num_msdu_desc;
+} __packed;
+
+#define WMI_SERVICE_BM_SIZE ((WMI_MAX_SERVICE + sizeof(u32) - 1) / sizeof(u32))
+
+#define WMI_SERVICE_SEGMENT_BM_SIZE32 4 /* 4x u32 = 128 bits */
+#define WMI_SERVICE_EXT_BM_SIZE (WMI_SERVICE_SEGMENT_BM_SIZE32 * sizeof(u32))
+#define WMI_AVAIL_SERVICE_BITS_IN_SIZE32 32
+#define WMI_SERVICE_BITS_IN_SIZE32 4
+
+struct wmi_service_ready_ext_event {
+ u32 default_conc_scan_config_bits;
+ u32 default_fw_config_bits;
+ struct wmi_ppe_threshold ppet;
+ u32 he_cap_info;
+ u32 mpdu_density;
+ u32 max_bssid_rx_filters;
+ u32 fw_build_vers_ext;
+ u32 max_nlo_ssids;
+ u32 max_bssid_indicator;
+ u32 he_cap_info_ext;
+} __packed;
+
+struct wmi_soc_mac_phy_hw_mode_caps {
+ u32 num_hw_modes;
+ u32 num_chainmask_tables;
+} __packed;
+
+struct wmi_hw_mode_capabilities {
+ u32 tlv_header;
+ u32 hw_mode_id;
+ u32 phy_id_map;
+ u32 hw_mode_config_type;
+} __packed;
+
+#define WMI_MAX_HECAP_PHY_SIZE (3)
+
+struct wmi_mac_phy_capabilities {
+ u32 hw_mode_id;
+ u32 pdev_id;
+ u32 phy_id;
+ u32 supported_flags;
+ u32 supported_bands;
+ u32 ampdu_density;
+ u32 max_bw_supported_2g;
+ u32 ht_cap_info_2g;
+ u32 vht_cap_info_2g;
+ u32 vht_supp_mcs_2g;
+ u32 he_cap_info_2g;
+ u32 he_supp_mcs_2g;
+ u32 tx_chain_mask_2g;
+ u32 rx_chain_mask_2g;
+ u32 max_bw_supported_5g;
+ u32 ht_cap_info_5g;
+ u32 vht_cap_info_5g;
+ u32 vht_supp_mcs_5g;
+ u32 he_cap_info_5g;
+ u32 he_supp_mcs_5g;
+ u32 tx_chain_mask_5g;
+ u32 rx_chain_mask_5g;
+ u32 he_cap_phy_info_2g[WMI_MAX_HECAP_PHY_SIZE];
+ u32 he_cap_phy_info_5g[WMI_MAX_HECAP_PHY_SIZE];
+ struct wmi_ppe_threshold he_ppet2g;
+ struct wmi_ppe_threshold he_ppet5g;
+ u32 chainmask_table_id;
+ u32 lmac_id;
+ u32 he_cap_info_2g_ext;
+ u32 he_cap_info_5g_ext;
+ u32 he_cap_info_internal;
+} __packed;
+
+struct wmi_hal_reg_capabilities_ext {
+ u32 tlv_header;
+ u32 phy_id;
+ u32 eeprom_reg_domain;
+ u32 eeprom_reg_domain_ext;
+ u32 regcap1;
+ u32 regcap2;
+ u32 wireless_modes;
+ u32 low_2ghz_chan;
+ u32 high_2ghz_chan;
+ u32 low_5ghz_chan;
+ u32 high_5ghz_chan;
+} __packed;
+
+struct wmi_soc_hal_reg_capabilities {
+ u32 num_phy;
+} __packed;
+
+/* 2 word representation of MAC addr */
+struct wmi_mac_addr {
+ union {
+ u8 addr[6];
+ struct {
+ u32 word0;
+ u32 word1;
+ } __packed;
+ } __packed;
+} __packed;
+
+struct wmi_ready_event {
+ struct wmi_abi_version fw_abi_vers;
+ struct wmi_mac_addr mac_addr;
+ u32 status;
+ u32 num_dscp_table;
+ u32 num_extra_mac_addr;
+ u32 num_total_peers;
+ u32 num_extra_peers;
+} __packed;
+
+struct wmi_service_available_event {
+ u32 wmi_service_segment_offset;
+ u32 wmi_service_segment_bitmap[WMI_SERVICE_SEGMENT_BM_SIZE32];
+} __packed;
+
+struct ath11k_pdev_wmi {
+ struct ath11k_wmi_base *wmi_ab;
+ enum ath11k_htc_ep_id eid;
+ const struct wmi_peer_flags_map *peer_flags;
+ u32 rx_decap_mode;
+};
+
+struct vdev_create_params {
+ u8 if_id;
+ u32 type;
+ u32 subtype;
+ struct {
+ u8 tx;
+ u8 rx;
+ } chains[NUM_NL80211_BANDS];
+ u32 pdev_id;
+};
+
+struct wmi_vdev_create_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 vdev_type;
+ u32 vdev_subtype;
+ struct wmi_mac_addr vdev_macaddr;
+ u32 num_cfg_txrx_streams;
+ u32 pdev_id;
+} __packed;
+
+struct wmi_vdev_txrx_streams {
+ u32 tlv_header;
+ u32 band;
+ u32 supported_tx_streams;
+ u32 supported_rx_streams;
+} __packed;
+
+struct wmi_vdev_delete_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+} __packed;
+
+struct wmi_vdev_up_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 vdev_assoc_id;
+ struct wmi_mac_addr vdev_bssid;
+ struct wmi_mac_addr trans_bssid;
+ u32 profile_idx;
+ u32 profile_num;
+} __packed;
+
+struct wmi_vdev_stop_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+} __packed;
+
+struct wmi_vdev_down_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+} __packed;
+
+#define WMI_VDEV_START_HIDDEN_SSID BIT(0)
+#define WMI_VDEV_START_PMF_ENABLED BIT(1)
+#define WMI_VDEV_START_LDPC_RX_ENABLED BIT(3)
+
+struct wmi_ssid {
+ u32 ssid_len;
+ u32 ssid[8];
+} __packed;
+
+#define ATH11K_VDEV_SETUP_TIMEOUT_HZ (1 * HZ)
+
+struct wmi_vdev_start_request_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 requestor_id;
+ u32 beacon_interval;
+ u32 dtim_period;
+ u32 flags;
+ struct wmi_ssid ssid;
+ u32 bcn_tx_rate;
+ u32 bcn_txpower;
+ u32 num_noa_descriptors;
+ u32 disable_hw_ack;
+ u32 preferred_tx_streams;
+ u32 preferred_rx_streams;
+ u32 he_ops;
+ u32 cac_duration_ms;
+ u32 regdomain;
+} __packed;
+
+#define MGMT_TX_DL_FRM_LEN 64
+#define WMI_MAC_MAX_SSID_LENGTH 32
+struct mac_ssid {
+ u8 length;
+ u8 mac_ssid[WMI_MAC_MAX_SSID_LENGTH];
+} __packed;
+
+struct wmi_p2p_noa_descriptor {
+ u32 type_count;
+ u32 duration;
+ u32 interval;
+ u32 start_time;
+};
+
+struct channel_param {
+ u8 chan_id;
+ u8 pwr;
+ u32 mhz;
+ u32 half_rate:1,
+ quarter_rate:1,
+ dfs_set:1,
+ dfs_set_cfreq2:1,
+ is_chan_passive:1,
+ allow_ht:1,
+ allow_vht:1,
+ allow_he:1,
+ set_agile:1;
+ u32 phy_mode;
+ u32 cfreq1;
+ u32 cfreq2;
+ char maxpower;
+ char minpower;
+ char maxregpower;
+ u8 antennamax;
+ u8 reg_class_id;
+} __packed;
+
+enum wmi_phy_mode {
+ MODE_11A = 0,
+ MODE_11G = 1, /* 11b/g Mode */
+ MODE_11B = 2, /* 11b Mode */
+ MODE_11GONLY = 3, /* 11g only Mode */
+ MODE_11NA_HT20 = 4,
+ MODE_11NG_HT20 = 5,
+ MODE_11NA_HT40 = 6,
+ MODE_11NG_HT40 = 7,
+ MODE_11AC_VHT20 = 8,
+ MODE_11AC_VHT40 = 9,
+ MODE_11AC_VHT80 = 10,
+ MODE_11AC_VHT20_2G = 11,
+ MODE_11AC_VHT40_2G = 12,
+ MODE_11AC_VHT80_2G = 13,
+ MODE_11AC_VHT80_80 = 14,
+ MODE_11AC_VHT160 = 15,
+ MODE_11AX_HE20 = 16,
+ MODE_11AX_HE40 = 17,
+ MODE_11AX_HE80 = 18,
+ MODE_11AX_HE80_80 = 19,
+ MODE_11AX_HE160 = 20,
+ MODE_11AX_HE20_2G = 21,
+ MODE_11AX_HE40_2G = 22,
+ MODE_11AX_HE80_2G = 23,
+ MODE_UNKNOWN = 24,
+ MODE_MAX = 24
+};
+
+static inline const char *ath11k_wmi_phymode_str(enum wmi_phy_mode mode)
+{
+ switch (mode) {
+ case MODE_11A:
+ return "11a";
+ case MODE_11G:
+ return "11g";
+ case MODE_11B:
+ return "11b";
+ case MODE_11GONLY:
+ return "11gonly";
+ case MODE_11NA_HT20:
+ return "11na-ht20";
+ case MODE_11NG_HT20:
+ return "11ng-ht20";
+ case MODE_11NA_HT40:
+ return "11na-ht40";
+ case MODE_11NG_HT40:
+ return "11ng-ht40";
+ case MODE_11AC_VHT20:
+ return "11ac-vht20";
+ case MODE_11AC_VHT40:
+ return "11ac-vht40";
+ case MODE_11AC_VHT80:
+ return "11ac-vht80";
+ case MODE_11AC_VHT160:
+ return "11ac-vht160";
+ case MODE_11AC_VHT80_80:
+ return "11ac-vht80+80";
+ case MODE_11AC_VHT20_2G:
+ return "11ac-vht20-2g";
+ case MODE_11AC_VHT40_2G:
+ return "11ac-vht40-2g";
+ case MODE_11AC_VHT80_2G:
+ return "11ac-vht80-2g";
+ case MODE_11AX_HE20:
+ return "11ax-he20";
+ case MODE_11AX_HE40:
+ return "11ax-he40";
+ case MODE_11AX_HE80:
+ return "11ax-he80";
+ case MODE_11AX_HE80_80:
+ return "11ax-he80+80";
+ case MODE_11AX_HE160:
+ return "11ax-he160";
+ case MODE_11AX_HE20_2G:
+ return "11ax-he20-2g";
+ case MODE_11AX_HE40_2G:
+ return "11ax-he40-2g";
+ case MODE_11AX_HE80_2G:
+ return "11ax-he80-2g";
+ case MODE_UNKNOWN:
+ /* skip */
+ break;
+
+ /* no default handler to allow compiler to check that the
+ * enum is fully handled
+ */
+ }
+
+ return "<unknown>";
+}
+
+struct wmi_channel_arg {
+ u32 freq;
+ u32 band_center_freq1;
+ u32 band_center_freq2;
+ bool passive;
+ bool allow_ibss;
+ bool allow_ht;
+ bool allow_vht;
+ bool ht40plus;
+ bool chan_radar;
+ bool freq2_radar;
+ bool allow_he;
+ u32 min_power;
+ u32 max_power;
+ u32 max_reg_power;
+ u32 max_antenna_gain;
+ enum wmi_phy_mode mode;
+};
+
+struct wmi_vdev_start_req_arg {
+ u32 vdev_id;
+ struct wmi_channel_arg channel;
+ u32 bcn_intval;
+ u32 dtim_period;
+ u8 *ssid;
+ u32 ssid_len;
+ u32 bcn_tx_rate;
+ u32 bcn_tx_power;
+ bool disable_hw_ack;
+ bool hidden_ssid;
+ bool pmf_enabled;
+ u32 he_ops;
+ u32 cac_duration_ms;
+ u32 regdomain;
+ u32 pref_rx_streams;
+ u32 pref_tx_streams;
+ u32 num_noa_descriptors;
+};
+
+struct peer_create_params {
+ const u8 *peer_addr;
+ u32 peer_type;
+ u32 vdev_id;
+};
+
+struct peer_delete_params {
+ u8 vdev_id;
+};
+
+struct peer_flush_params {
+ u32 peer_tid_bitmap;
+ u8 vdev_id;
+};
+
+struct pdev_set_regdomain_params {
+ u16 current_rd_in_use;
+ u16 current_rd_2g;
+ u16 current_rd_5g;
+ u32 ctl_2g;
+ u32 ctl_5g;
+ u8 dfs_domain;
+ u32 pdev_id;
+};
+
+struct rx_reorder_queue_remove_params {
+ u8 *peer_macaddr;
+ u16 vdev_id;
+ u32 peer_tid_bitmap;
+};
+
+#define WMI_HOST_PDEV_ID_SOC 0xFF
+#define WMI_HOST_PDEV_ID_0 0
+#define WMI_HOST_PDEV_ID_1 1
+#define WMI_HOST_PDEV_ID_2 2
+
+#define WMI_PDEV_ID_SOC 0
+#define WMI_PDEV_ID_1ST 1
+#define WMI_PDEV_ID_2ND 2
+#define WMI_PDEV_ID_3RD 3
+
+/* Freq units in MHz */
+#define REG_RULE_START_FREQ 0x0000ffff
+#define REG_RULE_END_FREQ 0xffff0000
+#define REG_RULE_FLAGS 0x0000ffff
+#define REG_RULE_MAX_BW 0x0000ffff
+#define REG_RULE_REG_PWR 0x00ff0000
+#define REG_RULE_ANT_GAIN 0xff000000
+
+#define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0)
+#define WMI_VDEV_PARAM_TXBF_MU_TX_BFEE BIT(1)
+#define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2)
+#define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
+
+#define HECAP_PHYDWORD_0 0
+#define HECAP_PHYDWORD_1 1
+#define HECAP_PHYDWORD_2 2
+
+#define HECAP_PHY_SU_BFER BIT(31)
+#define HECAP_PHY_SU_BFEE BIT(0)
+#define HECAP_PHY_MU_BFER BIT(1)
+#define HECAP_PHY_UL_MUMIMO BIT(22)
+#define HECAP_PHY_UL_MUOFDMA BIT(23)
+
+#define HECAP_PHY_SUBFMR_GET(hecap_phy) \
+ FIELD_GET(HECAP_PHY_SU_BFER, hecap_phy[HECAP_PHYDWORD_0])
+
+#define HECAP_PHY_SUBFME_GET(hecap_phy) \
+ FIELD_GET(HECAP_PHY_SU_BFEE, hecap_phy[HECAP_PHYDWORD_1])
+
+#define HECAP_PHY_MUBFMR_GET(hecap_phy) \
+ FIELD_GET(HECAP_PHY_MU_BFER, hecap_phy[HECAP_PHYDWORD_1])
+
+#define HECAP_PHY_ULMUMIMO_GET(hecap_phy) \
+ FIELD_GET(HECAP_PHY_UL_MUMIMO, hecap_phy[HECAP_PHYDWORD_0])
+
+#define HECAP_PHY_ULOFDMA_GET(hecap_phy) \
+ FIELD_GET(HECAP_PHY_UL_MUOFDMA, hecap_phy[HECAP_PHYDWORD_0])
+
+#define HE_MODE_SU_TX_BFEE BIT(0)
+#define HE_MODE_SU_TX_BFER BIT(1)
+#define HE_MODE_MU_TX_BFEE BIT(2)
+#define HE_MODE_MU_TX_BFER BIT(3)
+#define HE_MODE_DL_OFDMA BIT(4)
+#define HE_MODE_UL_OFDMA BIT(5)
+#define HE_MODE_UL_MUMIMO BIT(6)
+
+#define HE_DL_MUOFDMA_ENABLE 1
+#define HE_UL_MUOFDMA_ENABLE 1
+#define HE_DL_MUMIMO_ENABLE 1
+#define HE_MU_BFEE_ENABLE 1
+#define HE_SU_BFEE_ENABLE 1
+
+#define HE_VHT_SOUNDING_MODE_ENABLE 1
+#define HE_SU_MU_SOUNDING_MODE_ENABLE 1
+#define HE_TRIG_NONTRIG_SOUNDING_MODE_ENABLE 1
+
+/* HE or VHT Sounding */
+#define HE_VHT_SOUNDING_MODE BIT(0)
+/* SU or MU Sounding */
+#define HE_SU_MU_SOUNDING_MODE BIT(2)
+/* Trig or Non-Trig Sounding */
+#define HE_TRIG_NONTRIG_SOUNDING_MODE BIT(3)
+
+#define WMI_TXBF_STS_CAP_OFFSET_LSB 4
+#define WMI_TXBF_STS_CAP_OFFSET_MASK 0x70
+#define WMI_BF_SOUND_DIM_OFFSET_LSB 8
+#define WMI_BF_SOUND_DIM_OFFSET_MASK 0x700
+
+struct pdev_params {
+ u32 param_id;
+ u32 param_value;
+};
+
+enum wmi_peer_type {
+ WMI_PEER_TYPE_DEFAULT = 0,
+ WMI_PEER_TYPE_BSS = 1,
+ WMI_PEER_TYPE_TDLS = 2,
+};
+
+struct wmi_peer_create_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 peer_type;
+} __packed;
+
+struct wmi_peer_delete_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_peer_reorder_queue_setup_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 tid;
+ u32 queue_ptr_lo;
+ u32 queue_ptr_hi;
+ u32 queue_no;
+ u32 ba_window_size_valid;
+ u32 ba_window_size;
+} __packed;
+
+struct wmi_peer_reorder_queue_remove_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 tid_mask;
+} __packed;
+
+struct gpio_config_params {
+ u32 gpio_num;
+ u32 input;
+ u32 pull_type;
+ u32 intr_mode;
+};
+
+enum wmi_gpio_type {
+ WMI_GPIO_PULL_NONE,
+ WMI_GPIO_PULL_UP,
+ WMI_GPIO_PULL_DOWN
+};
+
+enum wmi_gpio_intr_type {
+ WMI_GPIO_INTTYPE_DISABLE,
+ WMI_GPIO_INTTYPE_RISING_EDGE,
+ WMI_GPIO_INTTYPE_FALLING_EDGE,
+ WMI_GPIO_INTTYPE_BOTH_EDGE,
+ WMI_GPIO_INTTYPE_LEVEL_LOW,
+ WMI_GPIO_INTTYPE_LEVEL_HIGH
+};
+
+enum wmi_bss_chan_info_req_type {
+ WMI_BSS_SURVEY_REQ_TYPE_READ = 1,
+ WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR,
+};
+
+struct wmi_gpio_config_cmd_param {
+ u32 tlv_header;
+ u32 gpio_num;
+ u32 input;
+ u32 pull_type;
+ u32 intr_mode;
+};
+
+struct gpio_output_params {
+ u32 gpio_num;
+ u32 set;
+};
+
+struct wmi_gpio_output_cmd_param {
+ u32 tlv_header;
+ u32 gpio_num;
+ u32 set;
+};
+
+struct set_fwtest_params {
+ u32 arg;
+ u32 value;
+};
+
+struct wmi_fwtest_set_param_cmd_param {
+ u32 tlv_header;
+ u32 param_id;
+ u32 param_value;
+};
+
+struct wmi_pdev_set_param_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 param_id;
+ u32 param_value;
+} __packed;
+
+struct wmi_pdev_set_ps_mode_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 sta_ps_mode;
+} __packed;
+
+struct wmi_pdev_suspend_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 suspend_opt;
+} __packed;
+
+struct wmi_pdev_resume_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+} __packed;
+
+struct wmi_pdev_bss_chan_info_req_cmd {
+ u32 tlv_header;
+ /* ref wmi_bss_chan_info_req_type */
+ u32 req_type;
+} __packed;
+
+struct wmi_ap_ps_peer_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 param;
+ u32 value;
+} __packed;
+
+struct wmi_sta_powersave_param_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 param;
+ u32 value;
+} __packed;
+
+struct wmi_pdev_set_regdomain_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 reg_domain;
+ u32 reg_domain_2g;
+ u32 reg_domain_5g;
+ u32 conformance_test_limit_2g;
+ u32 conformance_test_limit_5g;
+ u32 dfs_domain;
+} __packed;
+
+struct wmi_peer_set_param_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 param_id;
+ u32 param_value;
+} __packed;
+
+struct wmi_peer_flush_tids_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 peer_tid_bitmap;
+} __packed;
+
+struct wmi_dfs_phyerr_offload_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+} __packed;
+
+struct wmi_bcn_offload_ctrl_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 bcn_ctrl_op;
+} __packed;
+
+enum scan_dwelltime_adaptive_mode {
+ SCAN_DWELL_MODE_DEFAULT = 0,
+ SCAN_DWELL_MODE_CONSERVATIVE = 1,
+ SCAN_DWELL_MODE_MODERATE = 2,
+ SCAN_DWELL_MODE_AGGRESSIVE = 3,
+ SCAN_DWELL_MODE_STATIC = 4
+};
+
+#define WLAN_SCAN_MAX_NUM_SSID 10
+#define WLAN_SCAN_MAX_NUM_BSSID 10
+#define WLAN_SCAN_MAX_NUM_CHANNELS 40
+
+#define WLAN_SSID_MAX_LEN 32
+
+struct element_info {
+ u32 len;
+ u8 *ptr;
+};
+
+struct wlan_ssid {
+ u8 length;
+ u8 ssid[WLAN_SSID_MAX_LEN];
+};
+
+#define WMI_IE_BITMAP_SIZE 8
+
+#define WMI_SCAN_MAX_NUM_SSID 0x0A
+/* prefix used by scan requestor ids on the host */
+#define WMI_HOST_SCAN_REQUESTOR_ID_PREFIX 0xA000
+
+/* prefix used by scan request ids generated on the host */
+/* host cycles through the lower 12 bits to generate ids */
+#define WMI_HOST_SCAN_REQ_ID_PREFIX 0xA000
+
+#define WLAN_SCAN_PARAMS_MAX_SSID 16
+#define WLAN_SCAN_PARAMS_MAX_BSSID 4
+#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
+
+/* Values lower than this may be refused by some firmware revisions with a scan
+ * completion with a timedout reason.
+ */
+#define WMI_SCAN_CHAN_MIN_TIME_MSEC 40
+
+/* Scan priority numbers must be sequential, starting with 0 */
+enum wmi_scan_priority {
+ WMI_SCAN_PRIORITY_VERY_LOW = 0,
+ WMI_SCAN_PRIORITY_LOW,
+ WMI_SCAN_PRIORITY_MEDIUM,
+ WMI_SCAN_PRIORITY_HIGH,
+ WMI_SCAN_PRIORITY_VERY_HIGH,
+ WMI_SCAN_PRIORITY_COUNT /* number of priorities supported */
+};
+
+enum wmi_scan_event_type {
+ WMI_SCAN_EVENT_STARTED = BIT(0),
+ WMI_SCAN_EVENT_COMPLETED = BIT(1),
+ WMI_SCAN_EVENT_BSS_CHANNEL = BIT(2),
+ WMI_SCAN_EVENT_FOREIGN_CHAN = BIT(3),
+ WMI_SCAN_EVENT_DEQUEUED = BIT(4),
+ /* possibly by high-prio scan */
+ WMI_SCAN_EVENT_PREEMPTED = BIT(5),
+ WMI_SCAN_EVENT_START_FAILED = BIT(6),
+ WMI_SCAN_EVENT_RESTARTED = BIT(7),
+ WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT = BIT(8),
+ WMI_SCAN_EVENT_SUSPENDED = BIT(9),
+ WMI_SCAN_EVENT_RESUMED = BIT(10),
+ WMI_SCAN_EVENT_MAX = BIT(15),
+};
+
+enum wmi_scan_completion_reason {
+ WMI_SCAN_REASON_COMPLETED,
+ WMI_SCAN_REASON_CANCELLED,
+ WMI_SCAN_REASON_PREEMPTED,
+ WMI_SCAN_REASON_TIMEDOUT,
+ WMI_SCAN_REASON_INTERNAL_FAILURE,
+ WMI_SCAN_REASON_MAX,
+};
+
+struct wmi_start_scan_cmd {
+ u32 tlv_header;
+ u32 scan_id;
+ u32 scan_req_id;
+ u32 vdev_id;
+ u32 scan_priority;
+ u32 notify_scan_events;
+ u32 dwell_time_active;
+ u32 dwell_time_passive;
+ u32 min_rest_time;
+ u32 max_rest_time;
+ u32 repeat_probe_time;
+ u32 probe_spacing_time;
+ u32 idle_time;
+ u32 max_scan_time;
+ u32 probe_delay;
+ u32 scan_ctrl_flags;
+ u32 burst_duration;
+ u32 num_chan;
+ u32 num_bssid;
+ u32 num_ssids;
+ u32 ie_len;
+ u32 n_probes;
+ struct wmi_mac_addr mac_addr;
+ struct wmi_mac_addr mac_mask;
+ u32 ie_bitmap[WMI_IE_BITMAP_SIZE];
+ u32 num_vendor_oui;
+ u32 scan_ctrl_flags_ext;
+ u32 dwell_time_active_2g;
+} __packed;
+
+#define WMI_SCAN_FLAG_PASSIVE 0x1
+#define WMI_SCAN_ADD_BCAST_PROBE_REQ 0x2
+#define WMI_SCAN_ADD_CCK_RATES 0x4
+#define WMI_SCAN_ADD_OFDM_RATES 0x8
+#define WMI_SCAN_CHAN_STAT_EVENT 0x10
+#define WMI_SCAN_FILTER_PROBE_REQ 0x20
+#define WMI_SCAN_BYPASS_DFS_CHN 0x40
+#define WMI_SCAN_CONTINUE_ON_ERROR 0x80
+#define WMI_SCAN_FILTER_PROMISCUOS 0x100
+#define WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS 0x200
+#define WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ 0x400
+#define WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ 0x800
+#define WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ 0x1000
+#define WMI_SCAN_OFFCHAN_MGMT_TX 0x2000
+#define WMI_SCAN_OFFCHAN_DATA_TX 0x4000
+#define WMI_SCAN_CAPTURE_PHY_ERROR 0x8000
+#define WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN 0x10000
+#define WMI_SCAN_FLAG_HALF_RATE_SUPPORT 0x20000
+#define WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT 0x40000
+#define WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ 0x80000
+#define WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ 0x100000
+
+#define WMI_SCAN_DWELL_MODE_MASK 0x00E00000
+#define WMI_SCAN_DWELL_MODE_SHIFT 21
+
+enum {
+ WMI_SCAN_DWELL_MODE_DEFAULT = 0,
+ WMI_SCAN_DWELL_MODE_CONSERVATIVE = 1,
+ WMI_SCAN_DWELL_MODE_MODERATE = 2,
+ WMI_SCAN_DWELL_MODE_AGGRESSIVE = 3,
+ WMI_SCAN_DWELL_MODE_STATIC = 4,
+};
+
+#define WMI_SCAN_SET_DWELL_MODE(flag, mode) \
+ ((flag) |= (((mode) << WMI_SCAN_DWELL_MODE_SHIFT) & \
+ WMI_SCAN_DWELL_MODE_MASK))
+
+struct scan_req_params {
+ u32 scan_id;
+ u32 scan_req_id;
+ u32 vdev_id;
+ u32 pdev_id;
+ enum wmi_scan_priority scan_priority;
+ union {
+ struct {
+ u32 scan_ev_started:1,
+ scan_ev_completed:1,
+ scan_ev_bss_chan:1,
+ scan_ev_foreign_chan:1,
+ scan_ev_dequeued:1,
+ scan_ev_preempted:1,
+ scan_ev_start_failed:1,
+ scan_ev_restarted:1,
+ scan_ev_foreign_chn_exit:1,
+ scan_ev_invalid:1,
+ scan_ev_gpio_timeout:1,
+ scan_ev_suspended:1,
+ scan_ev_resumed:1;
+ };
+ u32 scan_events;
+ };
+ u32 dwell_time_active;
+ u32 dwell_time_active_2g;
+ u32 dwell_time_passive;
+ u32 min_rest_time;
+ u32 max_rest_time;
+ u32 repeat_probe_time;
+ u32 probe_spacing_time;
+ u32 idle_time;
+ u32 max_scan_time;
+ u32 probe_delay;
+ union {
+ struct {
+ u32 scan_f_passive:1,
+ scan_f_bcast_probe:1,
+ scan_f_cck_rates:1,
+ scan_f_ofdm_rates:1,
+ scan_f_chan_stat_evnt:1,
+ scan_f_filter_prb_req:1,
+ scan_f_bypass_dfs_chn:1,
+ scan_f_continue_on_err:1,
+ scan_f_offchan_mgmt_tx:1,
+ scan_f_offchan_data_tx:1,
+ scan_f_promisc_mode:1,
+ scan_f_capture_phy_err:1,
+ scan_f_strict_passive_pch:1,
+ scan_f_half_rate:1,
+ scan_f_quarter_rate:1,
+ scan_f_force_active_dfs_chn:1,
+ scan_f_add_tpc_ie_in_probe:1,
+ scan_f_add_ds_ie_in_probe:1,
+ scan_f_add_spoofed_mac_in_probe:1,
+ scan_f_add_rand_seq_in_probe:1,
+ scan_f_en_ie_whitelist_in_probe:1,
+ scan_f_forced:1,
+ scan_f_2ghz:1,
+ scan_f_5ghz:1,
+ scan_f_80mhz:1;
+ };
+ u32 scan_flags;
+ };
+ enum scan_dwelltime_adaptive_mode adaptive_dwell_time_mode;
+ u32 burst_duration;
+ u32 num_chan;
+ u32 num_bssid;
+ u32 num_ssids;
+ u32 n_probes;
+ u32 chan_list[WLAN_SCAN_MAX_NUM_CHANNELS];
+ u32 notify_scan_events;
+ struct wlan_ssid ssid[WLAN_SCAN_MAX_NUM_SSID];
+ struct wmi_mac_addr bssid_list[WLAN_SCAN_MAX_NUM_BSSID];
+ struct element_info extraie;
+ struct element_info htcap;
+ struct element_info vhtcap;
+};
+
+struct wmi_ssid_arg {
+ int len;
+ const u8 *ssid;
+};
+
+struct wmi_bssid_arg {
+ const u8 *bssid;
+};
+
+struct wmi_start_scan_arg {
+ u32 scan_id;
+ u32 scan_req_id;
+ u32 vdev_id;
+ u32 scan_priority;
+ u32 notify_scan_events;
+ u32 dwell_time_active;
+ u32 dwell_time_passive;
+ u32 min_rest_time;
+ u32 max_rest_time;
+ u32 repeat_probe_time;
+ u32 probe_spacing_time;
+ u32 idle_time;
+ u32 max_scan_time;
+ u32 probe_delay;
+ u32 scan_ctrl_flags;
+
+ u32 ie_len;
+ u32 n_channels;
+ u32 n_ssids;
+ u32 n_bssids;
+
+ u8 ie[WLAN_SCAN_PARAMS_MAX_IE_LEN];
+ u32 channels[64];
+ struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID];
+ struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID];
+};
+
+#define WMI_SCAN_STOP_ONE 0x00000000
+#define WMI_SCN_STOP_VAP_ALL 0x01000000
+#define WMI_SCAN_STOP_ALL 0x04000000
+
+/* Prefix 0xA000 indicates that the scan request
+ * is trigger by HOST
+ */
+#define ATH11K_SCAN_ID 0xA000
+
+enum scan_cancel_req_type {
+ WLAN_SCAN_CANCEL_SINGLE = 1,
+ WLAN_SCAN_CANCEL_VDEV_ALL,
+ WLAN_SCAN_CANCEL_PDEV_ALL,
+};
+
+struct scan_cancel_param {
+ u32 requester;
+ u32 scan_id;
+ enum scan_cancel_req_type req_type;
+ u32 vdev_id;
+ u32 pdev_id;
+};
+
+struct wmi_bcn_send_from_host_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 data_len;
+ union {
+ u32 frag_ptr;
+ u32 frag_ptr_lo;
+ };
+ u32 frame_ctrl;
+ u32 dtim_flag;
+ u32 bcn_antenna;
+ u32 frag_ptr_hi;
+};
+
+#define WMI_CHAN_INFO_MODE GENMASK(5, 0)
+#define WMI_CHAN_INFO_HT40_PLUS BIT(6)
+#define WMI_CHAN_INFO_PASSIVE BIT(7)
+#define WMI_CHAN_INFO_ADHOC_ALLOWED BIT(8)
+#define WMI_CHAN_INFO_AP_DISABLED BIT(9)
+#define WMI_CHAN_INFO_DFS BIT(10)
+#define WMI_CHAN_INFO_ALLOW_HT BIT(11)
+#define WMI_CHAN_INFO_ALLOW_VHT BIT(12)
+#define WMI_CHAN_INFO_CHAN_CHANGE_CAUSE_CSA BIT(13)
+#define WMI_CHAN_INFO_HALF_RATE BIT(14)
+#define WMI_CHAN_INFO_QUARTER_RATE BIT(15)
+#define WMI_CHAN_INFO_DFS_FREQ2 BIT(16)
+#define WMI_CHAN_INFO_ALLOW_HE BIT(17)
+
+#define WMI_CHAN_REG_INFO1_MIN_PWR GENMASK(7, 0)
+#define WMI_CHAN_REG_INFO1_MAX_PWR GENMASK(15, 8)
+#define WMI_CHAN_REG_INFO1_MAX_REG_PWR GENMASK(23, 16)
+#define WMI_CHAN_REG_INFO1_REG_CLS GENMASK(31, 24)
+
+#define WMI_CHAN_REG_INFO2_ANT_MAX GENMASK(7, 0)
+#define WMI_CHAN_REG_INFO2_MAX_TX_PWR GENMASK(15, 8)
+
+struct wmi_channel {
+ u32 tlv_header;
+ u32 mhz;
+ u32 band_center_freq1;
+ u32 band_center_freq2;
+ u32 info;
+ u32 reg_info_1;
+ u32 reg_info_2;
+} __packed;
+
+struct wmi_mgmt_params {
+ void *tx_frame;
+ u16 frm_len;
+ u8 vdev_id;
+ u16 chanfreq;
+ void *pdata;
+ u16 desc_id;
+ u8 *macaddr;
+ void *qdf_ctx;
+};
+
+enum wmi_sta_ps_mode {
+ WMI_STA_PS_MODE_DISABLED = 0,
+ WMI_STA_PS_MODE_ENABLED = 1,
+};
+
+#define WMI_SMPS_MASK_LOWER_16BITS 0xFF
+#define WMI_SMPS_MASK_UPPER_3BITS 0x7
+#define WMI_SMPS_PARAM_VALUE_SHIFT 29
+
+#define ATH11K_WMI_FW_HANG_ASSERT_TYPE 1
+#define ATH11K_WMI_FW_HANG_DELAY 0
+
+/* type, 0:unused 1: ASSERT 2: not respond detect command
+ * delay_time_ms, the simulate will delay time
+ */
+
+struct wmi_force_fw_hang_cmd {
+ u32 tlv_header;
+ u32 type;
+ u32 delay_time_ms;
+};
+
+struct wmi_vdev_set_param_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 param_id;
+ u32 param_value;
+} __packed;
+
+enum wmi_stats_id {
+ WMI_REQUEST_PEER_STAT = BIT(0),
+ WMI_REQUEST_AP_STAT = BIT(1),
+ WMI_REQUEST_PDEV_STAT = BIT(2),
+ WMI_REQUEST_VDEV_STAT = BIT(3),
+ WMI_REQUEST_BCNFLT_STAT = BIT(4),
+ WMI_REQUEST_VDEV_RATE_STAT = BIT(5),
+ WMI_REQUEST_INST_STAT = BIT(6),
+ WMI_REQUEST_MIB_STAT = BIT(7),
+ WMI_REQUEST_RSSI_PER_CHAIN_STAT = BIT(8),
+ WMI_REQUEST_CONGESTION_STAT = BIT(9),
+ WMI_REQUEST_PEER_EXTD_STAT = BIT(10),
+ WMI_REQUEST_BCN_STAT = BIT(11),
+ WMI_REQUEST_BCN_STAT_RESET = BIT(12),
+ WMI_REQUEST_PEER_EXTD2_STAT = BIT(13),
+};
+
+struct wmi_request_stats_cmd {
+ u32 tlv_header;
+ enum wmi_stats_id stats_id;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 pdev_id;
+} __packed;
+
+#define WMI_BEACON_TX_BUFFER_SIZE 512
+
+struct wmi_bcn_tmpl_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 tim_ie_offset;
+ u32 buf_len;
+ u32 csa_switch_count_offset;
+ u32 ext_csa_switch_count_offset;
+ u32 csa_event_bitmap;
+ u32 mbssid_ie_offset;
+ u32 esp_ie_offset;
+} __packed;
+
+struct wmi_key_seq_counter {
+ u32 key_seq_counter_l;
+ u32 key_seq_counter_h;
+} __packed;
+
+struct wmi_vdev_install_key_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 key_idx;
+ u32 key_flags;
+ u32 key_cipher;
+ struct wmi_key_seq_counter key_rsc_counter;
+ struct wmi_key_seq_counter key_global_rsc_counter;
+ struct wmi_key_seq_counter key_tsc_counter;
+ u8 wpi_key_rsc_counter[16];
+ u8 wpi_key_tsc_counter[16];
+ u32 key_len;
+ u32 key_txmic_len;
+ u32 key_rxmic_len;
+ u32 is_group_key_id_valid;
+ u32 group_key_id;
+
+ /* Followed by key_data containing key followed by
+ * tx mic and then rx mic
+ */
+} __packed;
+
+struct wmi_vdev_install_key_arg {
+ u32 vdev_id;
+ const u8 *macaddr;
+ u32 key_idx;
+ u32 key_flags;
+ u32 key_cipher;
+ u32 key_len;
+ u32 key_txmic_len;
+ u32 key_rxmic_len;
+ u64 key_rsc_counter;
+ const void *key_data;
+};
+
+#define WMI_MAX_SUPPORTED_RATES 128
+#define WMI_HOST_MAX_HECAP_PHY_SIZE 3
+#define WMI_HOST_MAX_HE_RATE_SET 3
+#define WMI_HECAP_TXRX_MCS_NSS_IDX_80 0
+#define WMI_HECAP_TXRX_MCS_NSS_IDX_160 1
+#define WMI_HECAP_TXRX_MCS_NSS_IDX_80_80 2
+
+struct wmi_rate_set_arg {
+ u32 num_rates;
+ u8 rates[WMI_MAX_SUPPORTED_RATES];
+};
+
+struct peer_assoc_params {
+ struct wmi_mac_addr peer_macaddr;
+ u32 vdev_id;
+ u32 peer_new_assoc;
+ u32 peer_associd;
+ u32 peer_flags;
+ u32 peer_caps;
+ u32 peer_listen_intval;
+ u32 peer_ht_caps;
+ u32 peer_max_mpdu;
+ u32 peer_mpdu_density;
+ u32 peer_rate_caps;
+ u32 peer_nss;
+ u32 peer_vht_caps;
+ u32 peer_phymode;
+ u32 peer_ht_info[2];
+ struct wmi_rate_set_arg peer_legacy_rates;
+ struct wmi_rate_set_arg peer_ht_rates;
+ u32 rx_max_rate;
+ u32 rx_mcs_set;
+ u32 tx_max_rate;
+ u32 tx_mcs_set;
+ u8 vht_capable;
+ u32 tx_max_mcs_nss;
+ u32 peer_bw_rxnss_override;
+ bool is_pmf_enabled;
+ bool is_wme_set;
+ bool qos_flag;
+ bool apsd_flag;
+ bool ht_flag;
+ bool bw_40;
+ bool bw_80;
+ bool bw_160;
+ bool stbc_flag;
+ bool ldpc_flag;
+ bool static_mimops_flag;
+ bool dynamic_mimops_flag;
+ bool spatial_mux_flag;
+ bool vht_flag;
+ bool vht_ng_flag;
+ bool need_ptk_4_way;
+ bool need_gtk_2_way;
+ bool auth_flag;
+ bool safe_mode_enabled;
+ bool amsdu_disable;
+ /* Use common structure */
+ u8 peer_mac[ETH_ALEN];
+
+ bool he_flag;
+ u32 peer_he_cap_macinfo[2];
+ u32 peer_he_cap_macinfo_internal;
+ u32 peer_he_ops;
+ u32 peer_he_cap_phyinfo[WMI_HOST_MAX_HECAP_PHY_SIZE];
+ u32 peer_he_mcs_count;
+ u32 peer_he_rx_mcs_set[WMI_HOST_MAX_HE_RATE_SET];
+ u32 peer_he_tx_mcs_set[WMI_HOST_MAX_HE_RATE_SET];
+ bool twt_responder;
+ bool twt_requester;
+ struct ath11k_ppe_threshold peer_ppet;
+};
+
+struct wmi_peer_assoc_complete_cmd {
+ u32 tlv_header;
+ struct wmi_mac_addr peer_macaddr;
+ u32 vdev_id;
+ u32 peer_new_assoc;
+ u32 peer_associd;
+ u32 peer_flags;
+ u32 peer_caps;
+ u32 peer_listen_intval;
+ u32 peer_ht_caps;
+ u32 peer_max_mpdu;
+ u32 peer_mpdu_density;
+ u32 peer_rate_caps;
+ u32 peer_nss;
+ u32 peer_vht_caps;
+ u32 peer_phymode;
+ u32 peer_ht_info[2];
+ u32 num_peer_legacy_rates;
+ u32 num_peer_ht_rates;
+ u32 peer_bw_rxnss_override;
+ struct wmi_ppe_threshold peer_ppet;
+ u32 peer_he_cap_info;
+ u32 peer_he_ops;
+ u32 peer_he_cap_phy[WMI_MAX_HECAP_PHY_SIZE];
+ u32 peer_he_mcs;
+ u32 peer_he_cap_info_ext;
+ u32 peer_he_cap_info_internal;
+} __packed;
+
+struct wmi_stop_scan_cmd {
+ u32 tlv_header;
+ u32 requestor;
+ u32 scan_id;
+ u32 req_type;
+ u32 vdev_id;
+ u32 pdev_id;
+};
+
+struct scan_chan_list_params {
+ u32 pdev_id;
+ u16 nallchans;
+ struct channel_param ch_param[1];
+};
+
+struct wmi_scan_chan_list_cmd {
+ u32 tlv_header;
+ u32 num_scan_chans;
+ u32 flags;
+ u32 pdev_id;
+} __packed;
+
+#define WMI_MGMT_SEND_DOWNLD_LEN 64
+
+#define WMI_TX_PARAMS_DWORD0_POWER GENMASK(7, 0)
+#define WMI_TX_PARAMS_DWORD0_MCS_MASK GENMASK(19, 8)
+#define WMI_TX_PARAMS_DWORD0_NSS_MASK GENMASK(27, 20)
+#define WMI_TX_PARAMS_DWORD0_RETRY_LIMIT GENMASK(31, 28)
+
+#define WMI_TX_PARAMS_DWORD1_CHAIN_MASK GENMASK(7, 0)
+#define WMI_TX_PARAMS_DWORD1_BW_MASK GENMASK(14, 8)
+#define WMI_TX_PARAMS_DWORD1_PREAMBLE_TYPE GENMASK(19, 15)
+#define WMI_TX_PARAMS_DWORD1_FRAME_TYPE BIT(20)
+#define WMI_TX_PARAMS_DWORD1_RSVD GENMASK(31, 21)
+
+struct wmi_mgmt_send_params {
+ u32 tlv_header;
+ u32 tx_params_dword0;
+ u32 tx_params_dword1;
+};
+
+struct wmi_mgmt_send_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 desc_id;
+ u32 chanfreq;
+ u32 paddr_lo;
+ u32 paddr_hi;
+ u32 frame_len;
+ u32 buf_len;
+ u32 tx_params_valid;
+
+ /* This TLV is followed by struct wmi_mgmt_frame */
+
+ /* Followed by struct wmi_mgmt_send_params */
+} __packed;
+
+struct wmi_sta_powersave_mode_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 sta_ps_mode;
+};
+
+struct wmi_sta_smps_force_mode_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 forced_mode;
+};
+
+struct wmi_sta_smps_param_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 param;
+ u32 value;
+};
+
+struct wmi_bcn_prb_info {
+ u32 tlv_header;
+ u32 caps;
+ u32 erp;
+} __packed;
+
+enum {
+ WMI_PDEV_SUSPEND,
+ WMI_PDEV_SUSPEND_AND_DISABLE_INTR,
+};
+
+struct green_ap_ps_params {
+ u32 value;
+};
+
+struct wmi_pdev_green_ap_ps_enable_cmd_param {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 enable;
+};
+
+struct ap_ps_params {
+ u32 vdev_id;
+ u32 param;
+ u32 value;
+};
+
+struct vdev_set_params {
+ u32 if_id;
+ u32 param_id;
+ u32 param_value;
+};
+
+struct stats_request_params {
+ u32 stats_id;
+ u32 vdev_id;
+ u32 pdev_id;
+};
+
+enum set_init_cc_type {
+ WMI_COUNTRY_INFO_TYPE_ALPHA,
+ WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE,
+ WMI_COUNTRY_INFO_TYPE_REGDOMAIN,
+};
+
+enum set_init_cc_flags {
+ INVALID_CC,
+ CC_IS_SET,
+ REGDMN_IS_SET,
+ ALPHA_IS_SET,
+};
+
+struct wmi_init_country_params {
+ union {
+ u16 country_code;
+ u16 regdom_id;
+ u8 alpha2[3];
+ } cc_info;
+ enum set_init_cc_flags flags;
+};
+
+struct wmi_init_country_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 init_cc_type;
+ union {
+ u32 country_code;
+ u32 regdom_id;
+ u32 alpha2;
+ } cc_info;
+} __packed;
+
+struct wmi_pdev_pktlog_filter_info {
+ u32 tlv_header;
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_pdev_pktlog_filter_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 enable;
+ u32 filter_type;
+ u32 num_mac;
+} __packed;
+
+enum ath11k_wmi_pktlog_enable {
+ ATH11K_WMI_PKTLOG_ENABLE_AUTO = 0,
+ ATH11K_WMI_PKTLOG_ENABLE_FORCE = 1,
+};
+
+struct wmi_pktlog_enable_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 evlist; /* WMI_PKTLOG_EVENT */
+ u32 enable;
+} __packed;
+
+struct wmi_pktlog_disable_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+} __packed;
+
+#define DFS_PHYERR_UNIT_TEST_CMD 0
+#define DFS_UNIT_TEST_MODULE 0x2b
+#define DFS_UNIT_TEST_TOKEN 0xAA
+
+enum dfs_test_args_idx {
+ DFS_TEST_CMDID = 0,
+ DFS_TEST_PDEV_ID,
+ DFS_TEST_RADAR_PARAM,
+ DFS_MAX_TEST_ARGS,
+};
+
+struct wmi_dfs_unit_test_arg {
+ u32 cmd_id;
+ u32 pdev_id;
+ u32 radar_param;
+};
+
+struct wmi_unit_test_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 module_id;
+ u32 num_args;
+ u32 diag_token;
+ /* Followed by test args*/
+} __packed;
+
+#define MAX_SUPPORTED_RATES 128
+
+#define WMI_PEER_AUTH 0x00000001
+#define WMI_PEER_QOS 0x00000002
+#define WMI_PEER_NEED_PTK_4_WAY 0x00000004
+#define WMI_PEER_NEED_GTK_2_WAY 0x00000010
+#define WMI_PEER_HE 0x00000400
+#define WMI_PEER_APSD 0x00000800
+#define WMI_PEER_HT 0x00001000
+#define WMI_PEER_40MHZ 0x00002000
+#define WMI_PEER_STBC 0x00008000
+#define WMI_PEER_LDPC 0x00010000
+#define WMI_PEER_DYN_MIMOPS 0x00020000
+#define WMI_PEER_STATIC_MIMOPS 0x00040000
+#define WMI_PEER_SPATIAL_MUX 0x00200000
+#define WMI_PEER_TWT_REQ 0x00400000
+#define WMI_PEER_TWT_RESP 0x00800000
+#define WMI_PEER_VHT 0x02000000
+#define WMI_PEER_80MHZ 0x04000000
+#define WMI_PEER_PMF 0x08000000
+/* TODO: Place holder for WLAN_PEER_F_PS_PRESEND_REQUIRED = 0x10000000.
+ * Need to be cleaned up
+ */
+#define WMI_PEER_IS_P2P_CAPABLE 0x20000000
+#define WMI_PEER_160MHZ 0x40000000
+#define WMI_PEER_SAFEMODE_EN 0x80000000
+
+struct beacon_tmpl_params {
+ u8 vdev_id;
+ u32 tim_ie_offset;
+ u32 tmpl_len;
+ u32 tmpl_len_aligned;
+ u32 csa_switch_count_offset;
+ u32 ext_csa_switch_count_offset;
+ u8 *frm;
+};
+
+struct wmi_rate_set {
+ u32 num_rates;
+ u32 rates[(MAX_SUPPORTED_RATES / 4) + 1];
+};
+
+struct wmi_vht_rate_set {
+ u32 tlv_header;
+ u32 rx_max_rate;
+ u32 rx_mcs_set;
+ u32 tx_max_rate;
+ u32 tx_mcs_set;
+ u32 tx_max_mcs_nss;
+} __packed;
+
+struct wmi_he_rate_set {
+ u32 tlv_header;
+ u32 rx_mcs_set;
+ u32 tx_mcs_set;
+} __packed;
+
+#define MAX_REG_RULES 10
+#define REG_ALPHA2_LEN 2
+
+enum wmi_start_event_param {
+ WMI_VDEV_START_RESP_EVENT = 0,
+ WMI_VDEV_RESTART_RESP_EVENT,
+};
+
+struct wmi_vdev_start_resp_event {
+ u32 vdev_id;
+ u32 requestor_id;
+ enum wmi_start_event_param resp_type;
+ u32 status;
+ u32 chain_mask;
+ u32 smps_mode;
+ union {
+ u32 mac_id;
+ u32 pdev_id;
+ };
+ u32 cfgd_tx_streams;
+ u32 cfgd_rx_streams;
+} __packed;
+
+/* VDEV start response status codes */
+enum wmi_vdev_start_resp_status_code {
+ WMI_VDEV_START_RESPONSE_STATUS_SUCCESS = 0,
+ WMI_VDEV_START_RESPONSE_INVALID_VDEVID = 1,
+ WMI_VDEV_START_RESPONSE_NOT_SUPPORTED = 2,
+ WMI_VDEV_START_RESPONSE_DFS_VIOLATION = 3,
+ WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN = 4,
+};
+
+;
+enum cc_setting_code {
+ REG_SET_CC_STATUS_PASS = 0,
+ REG_CURRENT_ALPHA2_NOT_FOUND = 1,
+ REG_INIT_ALPHA2_NOT_FOUND = 2,
+ REG_SET_CC_CHANGE_NOT_ALLOWED = 3,
+ REG_SET_CC_STATUS_NO_MEMORY = 4,
+ REG_SET_CC_STATUS_FAIL = 5,
+};
+
+/* Regaulatory Rule Flags Passed by FW */
+#define REGULATORY_CHAN_DISABLED BIT(0)
+#define REGULATORY_CHAN_NO_IR BIT(1)
+#define REGULATORY_CHAN_RADAR BIT(3)
+#define REGULATORY_CHAN_NO_OFDM BIT(6)
+#define REGULATORY_CHAN_INDOOR_ONLY BIT(9)
+
+#define REGULATORY_CHAN_NO_HT40 BIT(4)
+#define REGULATORY_CHAN_NO_80MHZ BIT(7)
+#define REGULATORY_CHAN_NO_160MHZ BIT(8)
+#define REGULATORY_CHAN_NO_20MHZ BIT(11)
+#define REGULATORY_CHAN_NO_10MHZ BIT(12)
+
+enum {
+ WMI_REG_SET_CC_STATUS_PASS = 0,
+ WMI_REG_CURRENT_ALPHA2_NOT_FOUND = 1,
+ WMI_REG_INIT_ALPHA2_NOT_FOUND = 2,
+ WMI_REG_SET_CC_CHANGE_NOT_ALLOWED = 3,
+ WMI_REG_SET_CC_STATUS_NO_MEMORY = 4,
+ WMI_REG_SET_CC_STATUS_FAIL = 5,
+};
+
+struct cur_reg_rule {
+ u16 start_freq;
+ u16 end_freq;
+ u16 max_bw;
+ u8 reg_power;
+ u8 ant_gain;
+ u16 flags;
+};
+
+struct cur_regulatory_info {
+ enum cc_setting_code status_code;
+ u8 num_phy;
+ u8 phy_id;
+ u16 reg_dmn_pair;
+ u16 ctry_code;
+ u8 alpha2[REG_ALPHA2_LEN + 1];
+ u32 dfs_region;
+ u32 phybitmap;
+ u32 min_bw_2g;
+ u32 max_bw_2g;
+ u32 min_bw_5g;
+ u32 max_bw_5g;
+ u32 num_2g_reg_rules;
+ u32 num_5g_reg_rules;
+ struct cur_reg_rule *reg_rules_2g_ptr;
+ struct cur_reg_rule *reg_rules_5g_ptr;
+};
+
+struct wmi_reg_chan_list_cc_event {
+ u32 status_code;
+ u32 phy_id;
+ u32 alpha2;
+ u32 num_phy;
+ u32 country_id;
+ u32 domain_code;
+ u32 dfs_region;
+ u32 phybitmap;
+ u32 min_bw_2g;
+ u32 max_bw_2g;
+ u32 min_bw_5g;
+ u32 max_bw_5g;
+ u32 num_2g_reg_rules;
+ u32 num_5g_reg_rules;
+} __packed;
+
+struct wmi_regulatory_rule_struct {
+ u32 tlv_header;
+ u32 freq_info;
+ u32 bw_pwr_info;
+ u32 flag_info;
+};
+
+struct wmi_peer_delete_resp_event {
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_bcn_tx_status_event {
+ u32 vdev_id;
+ u32 tx_status;
+} __packed;
+
+struct wmi_vdev_stopped_event {
+ u32 vdev_id;
+} __packed;
+
+struct wmi_pdev_bss_chan_info_event {
+ u32 pdev_id;
+ u32 freq; /* Units in MHz */
+ u32 noise_floor; /* units are dBm */
+ /* rx clear - how often the channel was unused */
+ u32 rx_clear_count_low;
+ u32 rx_clear_count_high;
+ /* cycle count - elapsed time during measured period, in clock ticks */
+ u32 cycle_count_low;
+ u32 cycle_count_high;
+ /* tx cycle count - elapsed time spent in tx, in clock ticks */
+ u32 tx_cycle_count_low;
+ u32 tx_cycle_count_high;
+ /* rx cycle count - elapsed time spent in rx, in clock ticks */
+ u32 rx_cycle_count_low;
+ u32 rx_cycle_count_high;
+ /*rx_cycle cnt for my bss in 64bits format */
+ u32 rx_bss_cycle_count_low;
+ u32 rx_bss_cycle_count_high;
+} __packed;
+
+#define WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS 0
+
+struct wmi_vdev_install_key_compl_event {
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 key_idx;
+ u32 key_flags;
+ u32 status;
+} __packed;
+
+struct wmi_vdev_install_key_complete_arg {
+ u32 vdev_id;
+ const u8 *macaddr;
+ u32 key_idx;
+ u32 key_flags;
+ u32 status;
+};
+
+struct wmi_peer_assoc_conf_event {
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_peer_assoc_conf_arg {
+ u32 vdev_id;
+ const u8 *macaddr;
+};
+
+/*
+ * PDEV statistics
+ */
+struct wmi_pdev_stats_base {
+ s32 chan_nf;
+ u32 tx_frame_count; /* Cycles spent transmitting frames */
+ u32 rx_frame_count; /* Cycles spent receiving frames */
+ u32 rx_clear_count; /* Total channel busy time, evidently */
+ u32 cycle_count; /* Total on-channel time */
+ u32 phy_err_count;
+ u32 chan_tx_pwr;
+} __packed;
+
+struct wmi_pdev_stats_extra {
+ u32 ack_rx_bad;
+ u32 rts_bad;
+ u32 rts_good;
+ u32 fcs_bad;
+ u32 no_beacons;
+ u32 mib_int_count;
+} __packed;
+
+struct wmi_pdev_stats_tx {
+ /* Num HTT cookies queued to dispatch list */
+ s32 comp_queued;
+
+ /* Num HTT cookies dispatched */
+ s32 comp_delivered;
+
+ /* Num MSDU queued to WAL */
+ s32 msdu_enqued;
+
+ /* Num MPDU queue to WAL */
+ s32 mpdu_enqued;
+
+ /* Num MSDUs dropped by WMM limit */
+ s32 wmm_drop;
+
+ /* Num Local frames queued */
+ s32 local_enqued;
+
+ /* Num Local frames done */
+ s32 local_freed;
+
+ /* Num queued to HW */
+ s32 hw_queued;
+
+ /* Num PPDU reaped from HW */
+ s32 hw_reaped;
+
+ /* Num underruns */
+ s32 underrun;
+
+ /* Num PPDUs cleaned up in TX abort */
+ s32 tx_abort;
+
+ /* Num MPDUs requed by SW */
+ s32 mpdus_requed;
+
+ /* excessive retries */
+ u32 tx_ko;
+
+ /* data hw rate code */
+ u32 data_rc;
+
+ /* Scheduler self triggers */
+ u32 self_triggers;
+
+ /* frames dropped due to excessive sw retries */
+ u32 sw_retry_failure;
+
+ /* illegal rate phy errors */
+ u32 illgl_rate_phy_err;
+
+ /* wal pdev continuous xretry */
+ u32 pdev_cont_xretry;
+
+ /* wal pdev tx timeouts */
+ u32 pdev_tx_timeout;
+
+ /* wal pdev resets */
+ u32 pdev_resets;
+
+ /* frames dropped due to non-availability of stateless TIDs */
+ u32 stateless_tid_alloc_failure;
+
+ /* PhY/BB underrun */
+ u32 phy_underrun;
+
+ /* MPDU is more than txop limit */
+ u32 txop_ovf;
+} __packed;
+
+struct wmi_pdev_stats_rx {
+ /* Cnts any change in ring routing mid-ppdu */
+ s32 mid_ppdu_route_change;
+
+ /* Total number of statuses processed */
+ s32 status_rcvd;
+
+ /* Extra frags on rings 0-3 */
+ s32 r0_frags;
+ s32 r1_frags;
+ s32 r2_frags;
+ s32 r3_frags;
+
+ /* MSDUs / MPDUs delivered to HTT */
+ s32 htt_msdus;
+ s32 htt_mpdus;
+
+ /* MSDUs / MPDUs delivered to local stack */
+ s32 loc_msdus;
+ s32 loc_mpdus;
+
+ /* AMSDUs that have more MSDUs than the status ring size */
+ s32 oversize_amsdu;
+
+ /* Number of PHY errors */
+ s32 phy_errs;
+
+ /* Number of PHY errors drops */
+ s32 phy_err_drop;
+
+ /* Number of mpdu errors - FCS, MIC, ENC etc. */
+ s32 mpdu_errs;
+} __packed;
+
+struct wmi_pdev_stats {
+ struct wmi_pdev_stats_base base;
+ struct wmi_pdev_stats_tx tx;
+ struct wmi_pdev_stats_rx rx;
+} __packed;
+
+#define WLAN_MAX_AC 4
+#define MAX_TX_RATE_VALUES 10
+#define MAX_TX_RATE_VALUES 10
+
+struct wmi_vdev_stats {
+ u32 vdev_id;
+ u32 beacon_snr;
+ u32 data_snr;
+ u32 num_tx_frames[WLAN_MAX_AC];
+ u32 num_rx_frames;
+ u32 num_tx_frames_retries[WLAN_MAX_AC];
+ u32 num_tx_frames_failures[WLAN_MAX_AC];
+ u32 num_rts_fail;
+ u32 num_rts_success;
+ u32 num_rx_err;
+ u32 num_rx_discard;
+ u32 num_tx_not_acked;
+ u32 tx_rate_history[MAX_TX_RATE_VALUES];
+ u32 beacon_rssi_history[MAX_TX_RATE_VALUES];
+} __packed;
+
+struct wmi_bcn_stats {
+ u32 vdev_id;
+ u32 tx_bcn_succ_cnt;
+ u32 tx_bcn_outage_cnt;
+} __packed;
+
+struct wmi_stats_event {
+ u32 stats_id;
+ u32 num_pdev_stats;
+ u32 num_vdev_stats;
+ u32 num_peer_stats;
+ u32 num_bcnflt_stats;
+ u32 num_chan_stats;
+ u32 num_mib_stats;
+ u32 pdev_id;
+ u32 num_bcn_stats;
+ u32 num_peer_extd_stats;
+ u32 num_peer_extd2_stats;
+} __packed;
+
+struct wmi_pdev_ctl_failsafe_chk_event {
+ u32 pdev_id;
+ u32 ctl_failsafe_status;
+} __packed;
+
+struct wmi_pdev_csa_switch_ev {
+ u32 pdev_id;
+ u32 current_switch_count;
+ u32 num_vdevs;
+} __packed;
+
+struct wmi_pdev_radar_ev {
+ u32 pdev_id;
+ u32 detection_mode;
+ u32 chan_freq;
+ u32 chan_width;
+ u32 detector_id;
+ u32 segment_id;
+ u32 timestamp;
+ u32 is_chirp;
+ s32 freq_offset;
+ s32 sidx;
+} __packed;
+
+#define WMI_RX_STATUS_OK 0x00
+#define WMI_RX_STATUS_ERR_CRC 0x01
+#define WMI_RX_STATUS_ERR_DECRYPT 0x08
+#define WMI_RX_STATUS_ERR_MIC 0x10
+#define WMI_RX_STATUS_ERR_KEY_CACHE_MISS 0x20
+
+#define WLAN_MGMT_TXRX_HOST_MAX_ANTENNA 4
+
+struct mgmt_rx_event_params {
+ u32 channel;
+ u32 snr;
+ u8 rssi_ctl[WLAN_MGMT_TXRX_HOST_MAX_ANTENNA];
+ u32 rate;
+ enum wmi_phy_mode phy_mode;
+ u32 buf_len;
+ int status;
+ u32 flags;
+ int rssi;
+ u32 tsf_delta;
+ u8 pdev_id;
+};
+
+#define ATH_MAX_ANTENNA 4
+
+struct wmi_mgmt_rx_hdr {
+ u32 channel;
+ u32 snr;
+ u32 rate;
+ u32 phy_mode;
+ u32 buf_len;
+ u32 status;
+ u32 rssi_ctl[ATH_MAX_ANTENNA];
+ u32 flags;
+ int rssi;
+ u32 tsf_delta;
+ u32 rx_tsf_l32;
+ u32 rx_tsf_u32;
+ u32 pdev_id;
+} __packed;
+
+#define MAX_ANTENNA_EIGHT 8
+
+struct wmi_rssi_ctl_ext {
+ u32 tlv_header;
+ u32 rssi_ctl_ext[MAX_ANTENNA_EIGHT - ATH_MAX_ANTENNA];
+};
+
+struct wmi_mgmt_tx_compl_event {
+ u32 desc_id;
+ u32 status;
+ u32 pdev_id;
+} __packed;
+
+struct wmi_scan_event {
+ u32 event_type; /* %WMI_SCAN_EVENT_ */
+ u32 reason; /* %WMI_SCAN_REASON_ */
+ u32 channel_freq; /* only valid for WMI_SCAN_EVENT_FOREIGN_CHANNEL */
+ u32 scan_req_id;
+ u32 scan_id;
+ u32 vdev_id;
+ /* TSF Timestamp when the scan event (%WMI_SCAN_EVENT_) is completed
+ * In case of AP it is TSF of the AP vdev
+ * In case of STA connected state, this is the TSF of the AP
+ * In case of STA not connected, it will be the free running HW timer
+ */
+ u32 tsf_timestamp;
+} __packed;
+
+struct wmi_peer_sta_kickout_arg {
+ const u8 *mac_addr;
+};
+
+struct wmi_peer_sta_kickout_event {
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+enum wmi_roam_reason {
+ WMI_ROAM_REASON_BETTER_AP = 1,
+ WMI_ROAM_REASON_BEACON_MISS = 2,
+ WMI_ROAM_REASON_LOW_RSSI = 3,
+ WMI_ROAM_REASON_SUITABLE_AP_FOUND = 4,
+ WMI_ROAM_REASON_HO_FAILED = 5,
+
+ /* keep last */
+ WMI_ROAM_REASON_MAX,
+};
+
+struct wmi_roam_event {
+ u32 vdev_id;
+ u32 reason;
+ u32 rssi;
+} __packed;
+
+#define WMI_CHAN_INFO_START_RESP 0
+#define WMI_CHAN_INFO_END_RESP 1
+
+struct wmi_chan_info_event {
+ u32 err_code;
+ u32 freq;
+ u32 cmd_flags;
+ u32 noise_floor;
+ u32 rx_clear_count;
+ u32 cycle_count;
+ u32 chan_tx_pwr_range;
+ u32 chan_tx_pwr_tp;
+ u32 rx_frame_count;
+ u32 my_bss_rx_cycle_count;
+ u32 rx_11b_mode_data_duration;
+ u32 tx_frame_cnt;
+ u32 mac_clk_mhz;
+ u32 vdev_id;
+} __packed;
+
+struct ath11k_targ_cap {
+ u32 phy_capability;
+ u32 max_frag_entry;
+ u32 num_rf_chains;
+ u32 ht_cap_info;
+ u32 vht_cap_info;
+ u32 vht_supp_mcs;
+ u32 hw_min_tx_power;
+ u32 hw_max_tx_power;
+ u32 sys_cap_info;
+ u32 min_pkt_size_enable;
+ u32 max_bcn_ie_size;
+ u32 max_num_scan_channels;
+ u32 max_supported_macs;
+ u32 wmi_fw_sub_feat_caps;
+ u32 txrx_chainmask;
+ u32 default_dbs_hw_mode_index;
+ u32 num_msdu_desc;
+};
+
+enum wmi_vdev_type {
+ WMI_VDEV_TYPE_AP = 1,
+ WMI_VDEV_TYPE_STA = 2,
+ WMI_VDEV_TYPE_IBSS = 3,
+ WMI_VDEV_TYPE_MONITOR = 4,
+};
+
+enum wmi_vdev_subtype {
+ WMI_VDEV_SUBTYPE_NONE,
+ WMI_VDEV_SUBTYPE_P2P_DEVICE,
+ WMI_VDEV_SUBTYPE_P2P_CLIENT,
+ WMI_VDEV_SUBTYPE_P2P_GO,
+ WMI_VDEV_SUBTYPE_PROXY_STA,
+ WMI_VDEV_SUBTYPE_MESH_NON_11S,
+ WMI_VDEV_SUBTYPE_MESH_11S,
+};
+
+enum wmi_sta_powersave_param {
+ WMI_STA_PS_PARAM_RX_WAKE_POLICY = 0,
+ WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD = 1,
+ WMI_STA_PS_PARAM_PSPOLL_COUNT = 2,
+ WMI_STA_PS_PARAM_INACTIVITY_TIME = 3,
+ WMI_STA_PS_PARAM_UAPSD = 4,
+};
+
+#define WMI_UAPSD_AC_TYPE_DELI 0
+#define WMI_UAPSD_AC_TYPE_TRIG 1
+
+#define WMI_UAPSD_AC_BIT_MASK(ac, type) \
+ ((type == WMI_UAPSD_AC_TYPE_DELI) ? \
+ (1 << (ac << 1)) : (1 << ((ac << 1) + 1)))
+
+enum wmi_sta_ps_param_uapsd {
+ WMI_STA_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
+ WMI_STA_PS_UAPSD_AC0_TRIGGER_EN = (1 << 1),
+ WMI_STA_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2),
+ WMI_STA_PS_UAPSD_AC1_TRIGGER_EN = (1 << 3),
+ WMI_STA_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4),
+ WMI_STA_PS_UAPSD_AC2_TRIGGER_EN = (1 << 5),
+ WMI_STA_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6),
+ WMI_STA_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7),
+};
+
+#define WMI_STA_UAPSD_MAX_INTERVAL_MSEC UINT_MAX
+
+struct wmi_sta_uapsd_auto_trig_param {
+ u32 wmm_ac;
+ u32 user_priority;
+ u32 service_interval;
+ u32 suspend_interval;
+ u32 delay_interval;
+};
+
+struct wmi_sta_uapsd_auto_trig_cmd_fixed_param {
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 num_ac;
+};
+
+struct wmi_sta_uapsd_auto_trig_arg {
+ u32 wmm_ac;
+ u32 user_priority;
+ u32 service_interval;
+ u32 suspend_interval;
+ u32 delay_interval;
+};
+
+enum wmi_sta_ps_param_tx_wake_threshold {
+ WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER = 0,
+ WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS = 1,
+
+ /* Values greater than one indicate that many TX attempts per beacon
+ * interval before the STA will wake up
+ */
+};
+
+/* The maximum number of PS-Poll frames the FW will send in response to
+ * traffic advertised in TIM before waking up (by sending a null frame with PS
+ * = 0). Value 0 has a special meaning: there is no maximum count and the FW
+ * will send as many PS-Poll as are necessary to retrieve buffered BU. This
+ * parameter is used when the RX wake policy is
+ * WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD and ignored when the RX wake
+ * policy is WMI_STA_PS_RX_WAKE_POLICY_WAKE.
+ */
+enum wmi_sta_ps_param_pspoll_count {
+ WMI_STA_PS_PSPOLL_COUNT_NO_MAX = 0,
+ /* Values greater than 0 indicate the maximum numer of PS-Poll frames
+ * FW will send before waking up.
+ */
+};
+
+/* U-APSD configuration of peer station from (re)assoc request and TSPECs */
+enum wmi_ap_ps_param_uapsd {
+ WMI_AP_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
+ WMI_AP_PS_UAPSD_AC0_TRIGGER_EN = (1 << 1),
+ WMI_AP_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2),
+ WMI_AP_PS_UAPSD_AC1_TRIGGER_EN = (1 << 3),
+ WMI_AP_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4),
+ WMI_AP_PS_UAPSD_AC2_TRIGGER_EN = (1 << 5),
+ WMI_AP_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6),
+ WMI_AP_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7),
+};
+
+/* U-APSD maximum service period of peer station */
+enum wmi_ap_ps_peer_param_max_sp {
+ WMI_AP_PS_PEER_PARAM_MAX_SP_UNLIMITED = 0,
+ WMI_AP_PS_PEER_PARAM_MAX_SP_2 = 1,
+ WMI_AP_PS_PEER_PARAM_MAX_SP_4 = 2,
+ WMI_AP_PS_PEER_PARAM_MAX_SP_6 = 3,
+ MAX_WMI_AP_PS_PEER_PARAM_MAX_SP,
+};
+
+enum wmi_ap_ps_peer_param {
+ /** Set uapsd configuration for a given peer.
+ *
+ * This include the delivery and trigger enabled state for each AC.
+ * The host MLME needs to set this based on AP capability and stations
+ * request Set in the association request received from the station.
+ *
+ * Lower 8 bits of the value specify the UAPSD configuration.
+ *
+ * (see enum wmi_ap_ps_param_uapsd)
+ * The default value is 0.
+ */
+ WMI_AP_PS_PEER_PARAM_UAPSD = 0,
+
+ /**
+ * Set the service period for a UAPSD capable station
+ *
+ * The service period from wme ie in the (re)assoc request frame.
+ *
+ * (see enum wmi_ap_ps_peer_param_max_sp)
+ */
+ WMI_AP_PS_PEER_PARAM_MAX_SP = 1,
+
+ /** Time in seconds for aging out buffered frames
+ * for STA in power save
+ */
+ WMI_AP_PS_PEER_PARAM_AGEOUT_TIME = 2,
+
+ /** Specify frame types that are considered SIFS
+ * RESP trigger frame
+ */
+ WMI_AP_PS_PEER_PARAM_SIFS_RESP_FRMTYPE = 3,
+
+ /** Specifies the trigger state of TID.
+ * Valid only for UAPSD frame type
+ */
+ WMI_AP_PS_PEER_PARAM_SIFS_RESP_UAPSD = 4,
+
+ /* Specifies the WNM sleep state of a STA */
+ WMI_AP_PS_PEER_PARAM_WNM_SLEEP = 5,
+};
+
+#define DISABLE_SIFS_RESPONSE_TRIGGER 0
+
+#define WMI_MAX_KEY_INDEX 3
+#define WMI_MAX_KEY_LEN 32
+
+#define WMI_KEY_PAIRWISE 0x00
+#define WMI_KEY_GROUP 0x01
+
+#define WMI_CIPHER_NONE 0x0 /* clear key */
+#define WMI_CIPHER_WEP 0x1
+#define WMI_CIPHER_TKIP 0x2
+#define WMI_CIPHER_AES_OCB 0x3
+#define WMI_CIPHER_AES_CCM 0x4
+#define WMI_CIPHER_WAPI 0x5
+#define WMI_CIPHER_CKIP 0x6
+#define WMI_CIPHER_AES_CMAC 0x7
+#define WMI_CIPHER_ANY 0x8
+#define WMI_CIPHER_AES_GCM 0x9
+#define WMI_CIPHER_AES_GMAC 0xa
+
+/* Value to disable fixed rate setting */
+#define WMI_FIXED_RATE_NONE (0xffff)
+
+#define ATH11K_RC_VERSION_OFFSET 28
+#define ATH11K_RC_PREAMBLE_OFFSET 8
+#define ATH11K_RC_NSS_OFFSET 5
+
+#define ATH11K_HW_RATE_CODE(rate, nss, preamble) \
+ ((1 << ATH11K_RC_VERSION_OFFSET) | \
+ ((nss) << ATH11K_RC_NSS_OFFSET) | \
+ ((preamble) << ATH11K_RC_PREAMBLE_OFFSET) | \
+ (rate))
+
+/* Preamble types to be used with VDEV fixed rate configuration */
+enum wmi_rate_preamble {
+ WMI_RATE_PREAMBLE_OFDM,
+ WMI_RATE_PREAMBLE_CCK,
+ WMI_RATE_PREAMBLE_HT,
+ WMI_RATE_PREAMBLE_VHT,
+ WMI_RATE_PREAMBLE_HE,
+};
+
+/**
+ * enum wmi_rtscts_prot_mode - Enable/Disable RTS/CTS and CTS2Self Protection.
+ * @WMI_RTS_CTS_DISABLED : RTS/CTS protection is disabled.
+ * @WMI_USE_RTS_CTS : RTS/CTS Enabled.
+ * @WMI_USE_CTS2SELF : CTS to self protection Enabled.
+ */
+enum wmi_rtscts_prot_mode {
+ WMI_RTS_CTS_DISABLED = 0,
+ WMI_USE_RTS_CTS = 1,
+ WMI_USE_CTS2SELF = 2,
+};
+
+/**
+ * enum wmi_rtscts_profile - Selection of RTS CTS profile along with enabling
+ * protection mode.
+ * @WMI_RTSCTS_FOR_NO_RATESERIES - Neither of rate-series should use RTS-CTS
+ * @WMI_RTSCTS_FOR_SECOND_RATESERIES - Only second rate-series will use RTS-CTS
+ * @WMI_RTSCTS_ACROSS_SW_RETRIES - Only the second rate-series will use RTS-CTS,
+ * but if there's a sw retry, both the rate
+ * series will use RTS-CTS.
+ * @WMI_RTSCTS_ERP - RTS/CTS used for ERP protection for every PPDU.
+ * @WMI_RTSCTS_FOR_ALL_RATESERIES - Enable RTS-CTS for all rate series.
+ */
+enum wmi_rtscts_profile {
+ WMI_RTSCTS_FOR_NO_RATESERIES = 0,
+ WMI_RTSCTS_FOR_SECOND_RATESERIES = 1,
+ WMI_RTSCTS_ACROSS_SW_RETRIES = 2,
+ WMI_RTSCTS_ERP = 3,
+ WMI_RTSCTS_FOR_ALL_RATESERIES = 4,
+};
+
+struct ath11k_hal_reg_cap {
+ u32 eeprom_rd;
+ u32 eeprom_rd_ext;
+ u32 regcap1;
+ u32 regcap2;
+ u32 wireless_modes;
+ u32 low_2ghz_chan;
+ u32 high_2ghz_chan;
+ u32 low_5ghz_chan;
+ u32 high_5ghz_chan;
+};
+
+struct ath11k_mem_chunk {
+ void *vaddr;
+ dma_addr_t paddr;
+ u32 len;
+ u32 req_id;
+};
+
+#define WMI_SKB_HEADROOM sizeof(struct wmi_cmd_hdr)
+
+enum wmi_sta_ps_param_rx_wake_policy {
+ WMI_STA_PS_RX_WAKE_POLICY_WAKE = 0,
+ WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD = 1,
+};
+
+enum ath11k_hw_txrx_mode {
+ ATH11K_HW_TXRX_RAW = 0,
+ ATH11K_HW_TXRX_NATIVE_WIFI = 1,
+ ATH11K_HW_TXRX_ETHERNET = 2,
+};
+
+struct wmi_wmm_params {
+ u32 tlv_header;
+ u32 cwmin;
+ u32 cwmax;
+ u32 aifs;
+ u32 txoplimit;
+ u32 acm;
+ u32 no_ack;
+} __packed;
+
+struct wmi_wmm_params_arg {
+ u8 acm;
+ u8 aifs;
+ u16 cwmin;
+ u16 cwmax;
+ u16 txop;
+ u8 no_ack;
+};
+
+struct wmi_vdev_set_wmm_params_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_wmm_params wmm_params[4];
+ u32 wmm_param_type;
+} __packed;
+
+struct wmi_wmm_params_all_arg {
+ struct wmi_wmm_params_arg ac_be;
+ struct wmi_wmm_params_arg ac_bk;
+ struct wmi_wmm_params_arg ac_vi;
+ struct wmi_wmm_params_arg ac_vo;
+};
+
+#define ATH11K_TWT_DEF_STA_CONG_TIMER_MS 5000
+#define ATH11K_TWT_DEF_DEFAULT_SLOT_SIZE 10
+#define ATH11K_TWT_DEF_CONGESTION_THRESH_SETUP 50
+#define ATH11K_TWT_DEF_CONGESTION_THRESH_TEARDOWN 20
+#define ATH11K_TWT_DEF_CONGESTION_THRESH_CRITICAL 100
+#define ATH11K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN 80
+#define ATH11K_TWT_DEF_INTERFERENCE_THRESH_SETUP 50
+#define ATH11K_TWT_DEF_MIN_NO_STA_SETUP 10
+#define ATH11K_TWT_DEF_MIN_NO_STA_TEARDOWN 2
+#define ATH11K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS 2
+#define ATH11K_TWT_DEF_MIN_NO_TWT_SLOTS 2
+#define ATH11K_TWT_DEF_MAX_NO_STA_TWT 500
+#define ATH11K_TWT_DEF_MODE_CHECK_INTERVAL 10000
+#define ATH11K_TWT_DEF_ADD_STA_SLOT_INTERVAL 1000
+#define ATH11K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL 5000
+
+struct wmi_twt_enable_params_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 sta_cong_timer_ms;
+ u32 mbss_support;
+ u32 default_slot_size;
+ u32 congestion_thresh_setup;
+ u32 congestion_thresh_teardown;
+ u32 congestion_thresh_critical;
+ u32 interference_thresh_teardown;
+ u32 interference_thresh_setup;
+ u32 min_no_sta_setup;
+ u32 min_no_sta_teardown;
+ u32 no_of_bcast_mcast_slots;
+ u32 min_no_twt_slots;
+ u32 max_no_sta_twt;
+ u32 mode_check_interval;
+ u32 add_sta_slot_interval;
+ u32 remove_sta_slot_interval;
+} __packed;
+
+struct wmi_twt_disable_params_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+} __packed;
+
+struct wmi_obss_spatial_reuse_params_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 enable;
+ s32 obss_min;
+ s32 obss_max;
+ u32 vdev_id;
+} __packed;
+
+struct target_resource_config {
+ u32 num_vdevs;
+ u32 num_peers;
+ u32 num_active_peers;
+ u32 num_offload_peers;
+ u32 num_offload_reorder_buffs;
+ u32 num_peer_keys;
+ u32 num_tids;
+ u32 ast_skid_limit;
+ u32 tx_chain_mask;
+ u32 rx_chain_mask;
+ u32 rx_timeout_pri[4];
+ u32 rx_decap_mode;
+ u32 scan_max_pending_req;
+ u32 bmiss_offload_max_vdev;
+ u32 roam_offload_max_vdev;
+ u32 roam_offload_max_ap_profiles;
+ u32 num_mcast_groups;
+ u32 num_mcast_table_elems;
+ u32 mcast2ucast_mode;
+ u32 tx_dbg_log_size;
+ u32 num_wds_entries;
+ u32 dma_burst_size;
+ u32 mac_aggr_delim;
+ u32 rx_skip_defrag_timeout_dup_detection_check;
+ u32 vow_config;
+ u32 gtk_offload_max_vdev;
+ u32 num_msdu_desc;
+ u32 max_frag_entries;
+ u32 max_peer_ext_stats;
+ u32 smart_ant_cap;
+ u32 bk_minfree;
+ u32 be_minfree;
+ u32 vi_minfree;
+ u32 vo_minfree;
+ u32 rx_batchmode;
+ u32 tt_support;
+ u32 atf_config;
+ u32 iphdr_pad_config;
+ u32 qwrap_config:16,
+ alloc_frag_desc_for_data_pkt:16;
+ u32 num_tdls_vdevs;
+ u32 num_tdls_conn_table_entries;
+ u32 beacon_tx_offload_max_vdev;
+ u32 num_multicast_filter_entries;
+ u32 num_wow_filters;
+ u32 num_keep_alive_pattern;
+ u32 keep_alive_pattern_size;
+ u32 max_tdls_concurrent_sleep_sta;
+ u32 max_tdls_concurrent_buffer_sta;
+ u32 wmi_send_separate;
+ u32 num_ocb_vdevs;
+ u32 num_ocb_channels;
+ u32 num_ocb_schedules;
+ u32 num_ns_ext_tuples_cfg;
+ u32 bpf_instruction_size;
+ u32 max_bssid_rx_filters;
+ u32 use_pdev_id;
+ u32 peer_map_unmap_v2_support;
+ u32 sched_params;
+ u32 twt_ap_pdev_count;
+ u32 twt_ap_sta_count;
+};
+
+#define WMI_MAX_MEM_REQS 32
+
+#define MAX_RADIOS 3
+
+#define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ)
+#define WMI_SEND_TIMEOUT_HZ (3 * HZ)
+
+struct ath11k_wmi_base {
+ struct ath11k_base *ab;
+ struct ath11k_pdev_wmi wmi[MAX_RADIOS];
+ enum ath11k_htc_ep_id wmi_endpoint_id[MAX_RADIOS];
+ u32 max_msg_len[MAX_RADIOS];
+
+ struct completion service_ready;
+ struct completion unified_ready;
+ DECLARE_BITMAP(svc_map, WMI_MAX_EXT_SERVICE);
+ wait_queue_head_t tx_credits_wq;
+ const struct wmi_peer_flags_map *peer_flags;
+ u32 num_mem_chunks;
+ u32 rx_decap_mode;
+ struct wmi_host_mem_chunk mem_chunks[WMI_MAX_MEM_REQS];
+
+ enum wmi_host_hw_mode_config_type preferred_hw_mode;
+ struct target_resource_config wlan_resource_config;
+
+ struct ath11k_targ_cap *targ_cap;
+};
+
+int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
+ u32 cmd_id);
+struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len);
+int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id,
+ struct sk_buff *frame);
+int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id,
+ struct ieee80211_mutable_offsets *offs,
+ struct sk_buff *bcn);
+int ath11k_wmi_vdev_down(struct ath11k *ar, u8 vdev_id);
+int ath11k_wmi_vdev_up(struct ath11k *ar, u32 vdev_id, u32 aid,
+ const u8 *bssid);
+int ath11k_wmi_vdev_stop(struct ath11k *ar, u8 vdev_id);
+int ath11k_wmi_vdev_start(struct ath11k *ar, struct wmi_vdev_start_req_arg *arg,
+ bool restart);
+int ath11k_wmi_set_peer_param(struct ath11k *ar, const u8 *peer_addr,
+ u32 vdev_id, u32 param_id, u32 param_val);
+int ath11k_wmi_pdev_set_param(struct ath11k *ar, u32 param_id,
+ u32 param_value, u8 pdev_id);
+int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id, u32 enable);
+int ath11k_wmi_wait_for_unified_ready(struct ath11k_base *ab);
+int ath11k_wmi_cmd_init(struct ath11k_base *ab);
+int ath11k_wmi_wait_for_service_ready(struct ath11k_base *ab);
+int ath11k_wmi_connect(struct ath11k_base *ab);
+int ath11k_wmi_pdev_attach(struct ath11k_base *ab,
+ u8 pdev_id);
+int ath11k_wmi_attach(struct ath11k_base *ab);
+void ath11k_wmi_detach(struct ath11k_base *ab);
+int ath11k_wmi_vdev_create(struct ath11k *ar, u8 *macaddr,
+ struct vdev_create_params *param);
+int ath11k_wmi_peer_rx_reorder_queue_setup(struct ath11k *ar, int vdev_id,
+ const u8 *addr, dma_addr_t paddr,
+ u8 tid, u8 ba_window_size_valid,
+ u32 ba_window_size);
+int ath11k_wmi_send_peer_create_cmd(struct ath11k *ar,
+ struct peer_create_params *param);
+int ath11k_wmi_vdev_set_param_cmd(struct ath11k *ar, u32 vdev_id,
+ u32 param_id, u32 param_value);
+
+int ath11k_wmi_set_sta_ps_param(struct ath11k *ar, u32 vdev_id,
+ u32 param, u32 param_value);
+int ath11k_wmi_force_fw_hang_cmd(struct ath11k *ar, u32 type, u32 delay_time_ms);
+int ath11k_wmi_send_peer_delete_cmd(struct ath11k *ar,
+ const u8 *peer_addr, u8 vdev_id);
+int ath11k_wmi_vdev_delete(struct ath11k *ar, u8 vdev_id);
+void ath11k_wmi_start_scan_init(struct ath11k *ar, struct scan_req_params *arg);
+int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
+ struct scan_req_params *params);
+int ath11k_wmi_send_scan_stop_cmd(struct ath11k *ar,
+ struct scan_cancel_param *param);
+int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id,
+ struct wmi_wmm_params_all_arg *param);
+int ath11k_wmi_pdev_suspend(struct ath11k *ar, u32 suspend_opt,
+ u32 pdev_id);
+int ath11k_wmi_pdev_resume(struct ath11k *ar, u32 pdev_id);
+
+int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar,
+ struct peer_assoc_params *param);
+int ath11k_wmi_vdev_install_key(struct ath11k *ar,
+ struct wmi_vdev_install_key_arg *arg);
+int ath11k_wmi_pdev_bss_chan_info_request(struct ath11k *ar,
+ enum wmi_bss_chan_info_req_type type);
+int ath11k_wmi_send_stats_request_cmd(struct ath11k *ar,
+ struct stats_request_params *param);
+int ath11k_wmi_send_peer_flush_tids_cmd(struct ath11k *ar,
+ u8 peer_addr[ETH_ALEN],
+ struct peer_flush_params *param);
+int ath11k_wmi_send_set_ap_ps_param_cmd(struct ath11k *ar, u8 *peer_addr,
+ struct ap_ps_params *param);
+int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar,
+ struct scan_chan_list_params *chan_list);
+int ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath11k *ar,
+ u32 pdev_id);
+int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k *ar,
+ u32 vdev_id, u32 bcn_ctrl_op);
+int
+ath11k_wmi_send_init_country_cmd(struct ath11k *ar,
+ struct wmi_init_country_params init_cc_param);
+int ath11k_wmi_pdev_pktlog_enable(struct ath11k *ar, u32 pktlog_filter);
+int ath11k_wmi_pdev_pktlog_disable(struct ath11k *ar);
+int ath11k_wmi_pdev_peer_pktlog_filter(struct ath11k *ar, u8 *addr, u8 enable);
+int
+ath11k_wmi_rx_reord_queue_remove(struct ath11k *ar,
+ struct rx_reorder_queue_remove_params *param);
+int ath11k_wmi_send_pdev_set_regdomain(struct ath11k *ar,
+ struct pdev_set_regdomain_params *param);
+int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb,
+ struct ath11k_fw_stats *stats);
+size_t ath11k_wmi_fw_stats_num_peers(struct list_head *head);
+size_t ath11k_wmi_fw_stats_num_peers_extd(struct list_head *head);
+size_t ath11k_wmi_fw_stats_num_vdevs(struct list_head *head);
+void ath11k_wmi_fw_stats_fill(struct ath11k *ar,
+ struct ath11k_fw_stats *fw_stats, u32 stats_id,
+ char *buf);
+int ath11k_wmi_simulate_radar(struct ath11k *ar);
+int ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id);
+int ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id);
+int ath11k_wmi_send_obss_spr_cmd(struct ath11k *ar, u32 vdev_id,
+ struct ieee80211_he_obss_pd *he_obss_pd);
+#endif
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index c0794f5988b3..2c9cec8b53d9 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -106,7 +106,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
goto err_out;
}
- mem = ioremap_nocache(res->start, resource_size(res));
+ mem = ioremap(res->start, resource_size(res));
if (mem == NULL) {
dev_err(&pdev->dev, "ioremap failed\n");
ret = -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 63019c3de034..cdefb8e2daf1 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -92,7 +92,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
return -ENXIO;
}
- mem = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res));
+ mem = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (mem == NULL) {
dev_err(&pdev->dev, "ioremap failed\n");
return -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_aic.c b/drivers/net/wireless/ath/ath9k/ar9003_aic.c
index 547cd46da260..d0f1e8bcc846 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_aic.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_aic.c
@@ -406,7 +406,7 @@ static bool ar9003_aic_cal_post_process(struct ath_hw *ah)
sram.com_att_6db =
ar9003_aic_find_index(1, fixed_com_att_db);
- sram.valid = 1;
+ sram.valid = true;
sram.rot_dir_att_db =
min(max(rot_dir_path_att_db,
diff --git a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
index 956fa7828d0c..56d1a7764b9f 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
+++ b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
@@ -83,7 +83,7 @@ static int ath9k_pci_fixup(struct pci_dev *pdev, const u16 *cal_data,
val = swahb32(val);
}
- __raw_writel(val, mem + reg);
+ iowrite32(val, mem + reg);
usleep_range(100, 120);
}
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index fb649d85b8fc..dd0c32379375 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -1216,7 +1216,7 @@ err_fw:
static int send_eject_command(struct usb_interface *interface)
{
struct usb_device *udev = interface_to_usbdev(interface);
- struct usb_host_interface *iface_desc = &interface->altsetting[0];
+ struct usb_host_interface *iface_desc = interface->cur_altsetting;
struct usb_endpoint_descriptor *endpoint;
unsigned char *cmd;
u8 bulk_out_ep;
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 20f4f8ea9f89..bee9110b91f3 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -666,14 +666,14 @@ ath_regd_init_wiphy(struct ath_regulatory *reg,
/*
* Some users have reported their EEPROM programmed with
- * 0x8000 set, this is not a supported regulatory domain
- * but since we have more than one user with it we need
- * a solution for them. We default to 0x64, which is the
- * default Atheros world regulatory domain.
+ * 0x8000 or 0x0 set, this is not a supported regulatory
+ * domain but since we have more than one user with it we
+ * need a solution for them. We default to 0x64, which is
+ * the default Atheros world regulatory domain.
*/
static void ath_regd_sanitize(struct ath_regulatory *reg)
{
- if (reg->current_rd != COUNTRY_ERD_FLAG)
+ if (reg->current_rd != COUNTRY_ERD_FLAG && reg->current_rd != 0)
return;
printk(KERN_DEBUG "ath: EEPROM regdomain sanitized\n");
reg->current_rd = 0x64;
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index c30fdd0cbf1e..e49c306e0eef 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -1169,7 +1169,6 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
ieee80211_hw_set(wcn->hw, TIMING_BEACON_ONLY);
ieee80211_hw_set(wcn->hw, AMPDU_AGGREGATION);
- ieee80211_hw_set(wcn->hw, CONNECTION_MONITOR);
ieee80211_hw_set(wcn->hw, SUPPORTS_PS);
ieee80211_hw_set(wcn->hw, SIGNAL_DBM);
ieee80211_hw_set(wcn->hw, HAS_RATE_CONTROL);
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 523550f94a3f..77269ac7f352 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -1620,7 +1620,7 @@ int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif,
msg_body.beacon_length6 = msg_body.beacon_length + 6;
if (msg_body.beacon_length > BEACON_TEMPLATE_SIZE) {
- wcn36xx_err("Beacon is to big: beacon size=%d\n",
+ wcn36xx_err("Beacon is too big: beacon size=%d\n",
msg_body.beacon_length);
ret = -ENOMEM;
goto out;
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 7d6f14420855..0851d2bede89 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -2579,6 +2579,38 @@ wil_cfg80211_update_ft_ies(struct wiphy *wiphy, struct net_device *dev,
return rc;
}
+static int wil_cfg80211_set_multicast_to_unicast(struct wiphy *wiphy,
+ struct net_device *dev,
+ const bool enabled)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ if (wil->multicast_to_unicast == enabled)
+ return 0;
+
+ wil_info(wil, "set multicast to unicast, enabled=%d\n", enabled);
+ wil->multicast_to_unicast = enabled;
+
+ return 0;
+}
+
+static int wil_cfg80211_set_cqm_rssi_config(struct wiphy *wiphy,
+ struct net_device *dev,
+ s32 rssi_thold, u32 rssi_hyst)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+
+ wil->cqm_rssi_thold = rssi_thold;
+
+ rc = wmi_set_cqm_rssi_config(wil, rssi_thold, rssi_hyst);
+ if (rc)
+ /* reset stored value upon failure */
+ wil->cqm_rssi_thold = 0;
+
+ return rc;
+}
+
static const struct cfg80211_ops wil_cfg80211_ops = {
.add_virtual_intf = wil_cfg80211_add_iface,
.del_virtual_intf = wil_cfg80211_del_iface,
@@ -2610,11 +2642,13 @@ static const struct cfg80211_ops wil_cfg80211_ops = {
.start_p2p_device = wil_cfg80211_start_p2p_device,
.stop_p2p_device = wil_cfg80211_stop_p2p_device,
.set_power_mgmt = wil_cfg80211_set_power_mgmt,
+ .set_cqm_rssi_config = wil_cfg80211_set_cqm_rssi_config,
.suspend = wil_cfg80211_suspend,
.resume = wil_cfg80211_resume,
.sched_scan_start = wil_cfg80211_sched_scan_start,
.sched_scan_stop = wil_cfg80211_sched_scan_stop,
.update_ft_ies = wil_cfg80211_update_ft_ies,
+ .set_multicast_to_unicast = wil_cfg80211_set_multicast_to_unicast,
};
static void wil_wiphy_init(struct wiphy *wiphy)
diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c
index 912c4eaf017b..fef10886ca4a 100644
--- a/drivers/net/wireless/ath/wil6210/ethtool.c
+++ b/drivers/net/wireless/ath/wil6210/ethtool.c
@@ -11,26 +11,6 @@
#include "wil6210.h"
-static int wil_ethtoolops_begin(struct net_device *ndev)
-{
- struct wil6210_priv *wil = ndev_to_wil(ndev);
-
- mutex_lock(&wil->mutex);
-
- wil_dbg_misc(wil, "ethtoolops_begin\n");
-
- return 0;
-}
-
-static void wil_ethtoolops_complete(struct net_device *ndev)
-{
- struct wil6210_priv *wil = ndev_to_wil(ndev);
-
- wil_dbg_misc(wil, "ethtoolops_complete\n");
-
- mutex_unlock(&wil->mutex);
-}
-
static int wil_ethtoolops_get_coalesce(struct net_device *ndev,
struct ethtool_coalesce *cp)
{
@@ -39,11 +19,12 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev,
u32 rx_itr_en, rx_itr_val = 0;
int ret;
+ mutex_lock(&wil->mutex);
wil_dbg_misc(wil, "ethtoolops_get_coalesce\n");
ret = wil_pm_runtime_get(wil);
if (ret < 0)
- return ret;
+ goto out;
tx_itr_en = wil_r(wil, RGF_DMA_ITR_TX_CNT_CTL);
if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN)
@@ -57,7 +38,11 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev,
cp->tx_coalesce_usecs = tx_itr_val;
cp->rx_coalesce_usecs = rx_itr_val;
- return 0;
+ ret = 0;
+
+out:
+ mutex_unlock(&wil->mutex);
+ return ret;
}
static int wil_ethtoolops_set_coalesce(struct net_device *ndev,
@@ -67,12 +52,14 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev,
struct wireless_dev *wdev = ndev->ieee80211_ptr;
int ret;
+ mutex_lock(&wil->mutex);
wil_dbg_misc(wil, "ethtoolops_set_coalesce: rx %d usec, tx %d usec\n",
cp->rx_coalesce_usecs, cp->tx_coalesce_usecs);
if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
wil_dbg_misc(wil, "No IRQ coalescing in monitor mode\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
/* only @rx_coalesce_usecs and @tx_coalesce_usecs supported,
@@ -88,24 +75,26 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev,
ret = wil_pm_runtime_get(wil);
if (ret < 0)
- return ret;
+ goto out;
wil->txrx_ops.configure_interrupt_moderation(wil);
wil_pm_runtime_put(wil);
+ ret = 0;
- return 0;
+out:
+ mutex_unlock(&wil->mutex);
+ return ret;
out_bad:
wil_dbg_misc(wil, "Unsupported coalescing params. Raw command:\n");
print_hex_dump_debug("DBG[MISC] coal ", DUMP_PREFIX_OFFSET, 16, 4,
cp, sizeof(*cp), false);
+ mutex_unlock(&wil->mutex);
return -EINVAL;
}
static const struct ethtool_ops wil_ethtool_ops = {
- .begin = wil_ethtoolops_begin,
- .complete = wil_ethtoolops_complete,
.get_drvinfo = cfg80211_get_drvinfo,
.get_coalesce = wil_ethtoolops_get_coalesce,
.set_coalesce = wil_ethtoolops_set_coalesce,
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 06091d8a9e23..3ba5b2550a8c 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -762,7 +762,7 @@ int wil_priv_init(struct wil6210_priv *wil)
*/
wil->rx_buff_id_count = WIL_RX_BUFF_ARR_SIZE_DEFAULT;
- wil->amsdu_en = 1;
+ wil->amsdu_en = true;
return 0;
@@ -1654,6 +1654,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
/* Disable device led before reset*/
wmi_led_cfg(wil, false);
+ down_write(&wil->mem_lock);
+
/* prevent NAPI from being scheduled and prevent wmi commands */
mutex_lock(&wil->wmi_mutex);
if (test_bit(wil_status_suspending, wil->status))
@@ -1702,6 +1704,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
if (wil->secured_boot) {
wil_err(wil, "secured boot is not supported\n");
+ up_write(&wil->mem_lock);
return -ENOTSUPP;
}
@@ -1737,6 +1740,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
clear_bit(wil_status_resetting, wil->status);
+ up_write(&wil->mem_lock);
+
if (load_fw) {
wil_unmask_irq(wil);
@@ -1786,6 +1791,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
return rc;
out:
+ up_write(&wil->mem_lock);
clear_bit(wil_status_resetting, wil->status);
return rc;
}
@@ -1811,9 +1817,7 @@ int __wil_up(struct wil6210_priv *wil)
WARN_ON(!mutex_is_locked(&wil->mutex));
- down_write(&wil->mem_lock);
rc = wil_reset(wil, true);
- up_write(&wil->mem_lock);
if (rc)
return rc;
@@ -1905,9 +1909,7 @@ int __wil_down(struct wil6210_priv *wil)
wil_abort_scan_all_vifs(wil, false);
mutex_unlock(&wil->vif_mutex);
- down_write(&wil->mem_lock);
rc = wil_reset(wil, false);
- up_write(&wil->mem_lock);
return rc;
}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 8ebc6d59aa74..bc8c15fb609d 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -10,6 +10,7 @@
#include <linux/moduleparam.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <linux/if_vlan.h>
#include <net/ipv6.h>
#include <linux/prefetch.h>
@@ -1139,7 +1140,7 @@ static int wil_tx_desc_map(union wil_tx_desc *desc, dma_addr_t pa,
void wil_tx_data_init(struct wil_ring_tx_data *txdata)
{
spin_lock_bh(&txdata->lock);
- txdata->dot1x_open = 0;
+ txdata->dot1x_open = false;
txdata->enabled = 0;
txdata->idle = 0;
txdata->last_idle = 0;
@@ -1529,6 +1530,35 @@ static struct wil_ring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
return v;
}
+/* apply multicast to unicast only for ARP and IP packets
+ * (see NL80211_CMD_SET_MULTICAST_TO_UNICAST for more info)
+ */
+static bool wil_check_multicast_to_unicast(struct wil6210_priv *wil,
+ struct sk_buff *skb)
+{
+ const struct ethhdr *eth = (void *)skb->data;
+ const struct vlan_ethhdr *ethvlan = (void *)skb->data;
+ __be16 ethertype;
+
+ if (!wil->multicast_to_unicast)
+ return false;
+
+ /* multicast to unicast conversion only for some payload */
+ ethertype = eth->h_proto;
+ if (ethertype == htons(ETH_P_8021Q) && skb->len >= VLAN_ETH_HLEN)
+ ethertype = ethvlan->h_vlan_encapsulated_proto;
+ switch (ethertype) {
+ case htons(ETH_P_ARP):
+ case htons(ETH_P_IP):
+ case htons(ETH_P_IPV6):
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
static void wil_set_da_for_vring(struct wil6210_priv *wil,
struct sk_buff *skb, int vring_index)
{
@@ -2336,7 +2366,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* in STA mode (ESS), all to same VRING (to AP) */
ring = wil_find_tx_ring_sta(wil, vif, skb);
} else if (bcast) {
- if (vif->pbss)
+ if (vif->pbss || wil_check_multicast_to_unicast(wil, skb))
/* in pbss, no bcast VRING - duplicate skb in
* all stations VRINGs
*/
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
index 778b63be6a9a..7bfe867c7509 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.c
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
@@ -869,6 +869,7 @@ static struct sk_buff *wil_sring_reap_rx_edma(struct wil6210_priv *wil,
u8 data_offset;
struct wil_rx_status_extended *s;
u16 sring_idx = sring - wil->srings;
+ int invalid_buff_id_retry;
BUILD_BUG_ON(sizeof(struct wil_rx_status_extended) > sizeof(skb->cb));
@@ -882,9 +883,9 @@ again:
/* Extract the buffer ID from the status message */
buff_id = le16_to_cpu(wil_rx_status_get_buff_id(msg));
+ invalid_buff_id_retry = 0;
while (!buff_id) {
struct wil_rx_status_extended *s;
- int invalid_buff_id_retry = 0;
wil_dbg_txrx(wil,
"buff_id is not updated yet by HW, (swhead 0x%x)\n",
@@ -902,6 +903,11 @@ again:
if (unlikely(!wil_val_in_range(buff_id, 1, wil->rx_buff_mgmt.size))) {
wil_err(wil, "Corrupt buff_id=%d, sring->swhead=%d\n",
buff_id, sring->swhead);
+ print_hex_dump(KERN_ERR, "RxS ", DUMP_PREFIX_OFFSET, 16, 1,
+ msg, wil->use_compressed_rx_status ?
+ sizeof(struct wil_rx_status_compressed) :
+ sizeof(struct wil_rx_status_extended), false);
+
wil_rx_status_reset_buff_id(sring);
wil_sring_advance_swhead(sring);
sring->invalid_buff_id_cnt++;
@@ -962,6 +968,11 @@ again:
if (unlikely(dmalen > sz)) {
wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
+ print_hex_dump(KERN_ERR, "RxS ", DUMP_PREFIX_OFFSET, 16, 1,
+ msg, wil->use_compressed_rx_status ?
+ sizeof(struct wil_rx_status_compressed) :
+ sizeof(struct wil_rx_status_extended), false);
+
stats->rx_large_frame++;
rxdata->skipping = true;
}
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.h b/drivers/net/wireless/ath/wil6210/txrx_edma.h
index c744c65225da..c736f7413a35 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.h
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.h
@@ -46,7 +46,7 @@
#define WIL_RX_EDMA_DLPF_LU_MISS_TID_POS 5
-#define WIL_RX_EDMA_MID_VALID_BIT BIT(22)
+#define WIL_RX_EDMA_MID_VALID_BIT BIT(20)
#define WIL_EDMA_DESC_TX_MAC_CFG_0_QID_POS 16
#define WIL_EDMA_DESC_TX_MAC_CFG_0_QID_LEN 6
@@ -244,8 +244,8 @@ struct wil_ring_tx_status {
* calculated, Bit1- L4Err - TCP/UDP Checksum Error
* bit 7 : Reserved:1
* bit 8..19 : Flow ID:12 - MSDU flow ID
- * bit 20..21 : MID:2 - The MAC ID
- * bit 22 : MID_V:1 - The MAC ID field is valid
+ * bit 20 : MID_V:1 - The MAC ID field is valid
+ * bit 21..22 : MID:2 - The MAC ID
* bit 23 : L3T:1 - IP types: 0-IPv6, 1-IPv4
* bit 24 : L4T:1 - Layer 4 Type: 0-UDP, 1-TCP
* bit 25 : BC:1 - The received MPDU is broadcast
@@ -479,7 +479,7 @@ static inline int wil_rx_status_get_mid(void *msg)
return 0; /* use the default MID */
return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
- 20, 21);
+ 21, 22);
}
static inline int wil_rx_status_get_error(void *msg)
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 97626bfd4dac..5dc881d3c057 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -1059,6 +1059,8 @@ struct wil6210_priv {
u32 max_agg_wsize;
u32 max_ampdu_size;
+ u8 multicast_to_unicast;
+ s32 cqm_rssi_thold;
};
#define wil_to_wiphy(i) (i->wiphy)
@@ -1148,7 +1150,7 @@ static inline void wil_c(struct wil6210_priv *wil, u32 reg, u32 val)
*/
static inline bool wil_cid_valid(struct wil6210_priv *wil, int cid)
{
- return (cid >= 0 && cid < wil->max_assoc_sta);
+ return (cid >= 0 && cid < wil->max_assoc_sta && cid < WIL6210_MAX_CID);
}
void wil_get_board_file(struct wil6210_priv *wil, char *buf, size_t len);
@@ -1440,4 +1442,6 @@ int wmi_addba_rx_resp_edma(struct wil6210_priv *wil, u8 mid, u8 cid,
void update_supported_bands(struct wil6210_priv *wil);
void wil_clear_fw_log_addr(struct wil6210_priv *wil);
+int wmi_set_cqm_rssi_config(struct wil6210_priv *wil,
+ s32 rssi_thold, u32 rssi_hyst);
#endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
index 1332eb8c831f..89c12cb2aaab 100644
--- a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
+++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
@@ -46,7 +46,7 @@ static int wil_fw_get_crash_dump_bounds(struct wil6210_priv *wil,
int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
{
- int i, rc;
+ int i;
const struct fw_map *map;
void *data;
u32 host_min, dump_size, offset, len;
@@ -62,9 +62,15 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
return -EINVAL;
}
- rc = wil_mem_access_lock(wil);
- if (rc)
- return rc;
+ down_write(&wil->mem_lock);
+
+ if (test_bit(wil_status_suspending, wil->status) ||
+ test_bit(wil_status_suspended, wil->status)) {
+ wil_err(wil,
+ "suspend/resume in progress. cannot copy crash dump\n");
+ up_write(&wil->mem_lock);
+ return -EBUSY;
+ }
/* copy to crash dump area */
for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
@@ -84,7 +90,8 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
wil_memcpy_fromio_32((void * __force)(dest + offset),
(const void __iomem * __force)data, len);
}
- wil_mem_access_unlock(wil);
+
+ up_write(&wil->mem_lock);
return 0;
}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 7a0d934eb271..23e1ed6a9d6d 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -196,8 +196,8 @@ const struct fw_map talyn_mb_fw_mapping[] = {
{0x8c0000, 0x8c0210, 0x8c0000, "dum_user_rgf", true, true},
/* DMA OFU 296b */
{0x8c2000, 0x8c2128, 0x8c2000, "dma_ofu", true, true},
- /* ucode debug 4k */
- {0x8c3000, 0x8c4000, 0x8c3000, "ucode_debug", true, true},
+ /* ucode debug 256b */
+ {0x8c3000, 0x8c3100, 0x8c3000, "ucode_debug", true, true},
/* upper area 1536k */
{0x900000, 0xa80000, 0x900000, "upper", true, true},
/* UCODE areas - accessible by debugfs blobs but not by
@@ -476,6 +476,8 @@ static const char *cmdid2name(u16 cmdid)
return "WMI_RBUFCAP_CFG_CMD";
case WMI_TEMP_SENSE_ALL_CMDID:
return "WMI_TEMP_SENSE_ALL_CMDID";
+ case WMI_SET_LINK_MONITOR_CMDID:
+ return "WMI_SET_LINK_MONITOR_CMD";
default:
return "Untracked CMD";
}
@@ -624,6 +626,10 @@ static const char *eventid2name(u16 eventid)
return "WMI_RBUFCAP_CFG_EVENT";
case WMI_TEMP_SENSE_ALL_DONE_EVENTID:
return "WMI_TEMP_SENSE_ALL_DONE_EVENTID";
+ case WMI_SET_LINK_MONITOR_EVENTID:
+ return "WMI_SET_LINK_MONITOR_EVENT";
+ case WMI_LINK_MONITOR_EVENTID:
+ return "WMI_LINK_MONITOR_EVENT";
default:
return "Untracked EVENT";
}
@@ -1507,14 +1513,14 @@ static void wmi_link_stats_parse(struct wil6210_vif *vif, u64 tsf,
if (vif->fw_stats_ready) {
/* clean old statistics */
vif->fw_stats_tsf = 0;
- vif->fw_stats_ready = 0;
+ vif->fw_stats_ready = false;
}
wil_link_stats_store_basic(vif, payload + hdr_size);
if (!has_next) {
vif->fw_stats_tsf = tsf;
- vif->fw_stats_ready = 1;
+ vif->fw_stats_ready = true;
}
break;
@@ -1529,14 +1535,14 @@ static void wmi_link_stats_parse(struct wil6210_vif *vif, u64 tsf,
if (wil->fw_stats_global.ready) {
/* clean old statistics */
wil->fw_stats_global.tsf = 0;
- wil->fw_stats_global.ready = 0;
+ wil->fw_stats_global.ready = false;
}
wil_link_stats_store_global(vif, payload + hdr_size);
if (!has_next) {
wil->fw_stats_global.tsf = tsf;
- wil->fw_stats_global.ready = 1;
+ wil->fw_stats_global.ready = true;
}
break;
@@ -1836,6 +1842,32 @@ fail:
wil6210_disconnect(vif, NULL, WLAN_REASON_PREV_AUTH_NOT_VALID);
}
+static void
+wmi_evt_link_monitor(struct wil6210_vif *vif, int id, void *d, int len)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct net_device *ndev = vif_to_ndev(vif);
+ struct wmi_link_monitor_event *evt = d;
+ enum nl80211_cqm_rssi_threshold_event event_type;
+
+ if (len < sizeof(*evt)) {
+ wil_err(wil, "link monitor event too short %d\n", len);
+ return;
+ }
+
+ wil_dbg_wmi(wil, "link monitor event, type %d rssi %d (stored %d)\n",
+ evt->type, evt->rssi_level, wil->cqm_rssi_thold);
+
+ if (evt->type != WMI_LINK_MONITOR_NOTIF_RSSI_THRESHOLD_EVT)
+ /* ignore */
+ return;
+
+ event_type = (evt->rssi_level > wil->cqm_rssi_thold ?
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH :
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW);
+ cfg80211_cqm_rssi_notify(ndev, event_type, evt->rssi_level, GFP_KERNEL);
+}
+
/**
* Some events are ignored for purpose; and need not be interpreted as
* "unhandled events"
@@ -1869,6 +1901,7 @@ static const struct {
{WMI_LINK_STATS_EVENTID, wmi_evt_link_stats},
{WMI_FT_AUTH_STATUS_EVENTID, wmi_evt_auth_status},
{WMI_FT_REASSOC_STATUS_EVENTID, wmi_evt_reassoc_status},
+ {WMI_LINK_MONITOR_EVENTID, wmi_evt_link_monitor},
};
/*
@@ -3981,3 +4014,46 @@ int wmi_link_stats_cfg(struct wil6210_vif *vif, u32 type, u8 cid, u32 interval)
return 0;
}
+
+int wmi_set_cqm_rssi_config(struct wil6210_priv *wil,
+ s32 rssi_thold, u32 rssi_hyst)
+{
+ struct net_device *ndev = wil->main_ndev;
+ struct wil6210_vif *vif = ndev_to_vif(ndev);
+ int rc;
+ struct {
+ struct wmi_set_link_monitor_cmd cmd;
+ s8 rssi_thold;
+ } __packed cmd = {
+ .cmd = {
+ .rssi_hyst = rssi_hyst,
+ .rssi_thresholds_list_size = 1,
+ },
+ .rssi_thold = rssi_thold,
+ };
+ struct {
+ struct wmi_cmd_hdr hdr;
+ struct wmi_set_link_monitor_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+
+ if (rssi_thold > S8_MAX || rssi_thold < S8_MIN || rssi_hyst > U8_MAX)
+ return -EINVAL;
+
+ rc = wmi_call(wil, WMI_SET_LINK_MONITOR_CMDID, vif->mid, &cmd,
+ sizeof(cmd), WMI_SET_LINK_MONITOR_EVENTID,
+ &reply, sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc) {
+ wil_err(wil, "WMI_SET_LINK_MONITOR_CMDID failed, rc %d\n", rc);
+ return rc;
+ }
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "WMI_SET_LINK_MONITOR_CMDID failed, status %d\n",
+ reply.evt.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 6bd4ccee28ab..e3558136e0c4 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -192,6 +192,7 @@ enum wmi_command_id {
WMI_RCP_ADDBA_RESP_EDMA_CMDID = 0x83B,
WMI_LINK_MAINTAIN_CFG_WRITE_CMDID = 0x842,
WMI_LINK_MAINTAIN_CFG_READ_CMDID = 0x843,
+ WMI_SET_LINK_MONITOR_CMDID = 0x845,
WMI_SET_SECTORS_CMDID = 0x849,
WMI_MAINTAIN_PAUSE_CMDID = 0x850,
WMI_MAINTAIN_RESUME_CMDID = 0x851,
@@ -1973,6 +1974,7 @@ enum wmi_event_id {
WMI_REPORT_STATISTICS_EVENTID = 0x100B,
WMI_FT_AUTH_STATUS_EVENTID = 0x100C,
WMI_FT_REASSOC_STATUS_EVENTID = 0x100D,
+ WMI_LINK_MONITOR_EVENTID = 0x100E,
WMI_RADAR_GENERAL_CONFIG_EVENTID = 0x1100,
WMI_RADAR_CONFIG_SELECT_EVENTID = 0x1101,
WMI_RADAR_PARAMS_CONFIG_EVENTID = 0x1102,
@@ -2024,6 +2026,7 @@ enum wmi_event_id {
WMI_TX_MGMT_PACKET_EVENTID = 0x1841,
WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID = 0x1842,
WMI_LINK_MAINTAIN_CFG_READ_DONE_EVENTID = 0x1843,
+ WMI_SET_LINK_MONITOR_EVENTID = 0x1845,
WMI_RF_XPM_READ_RESULT_EVENTID = 0x1856,
WMI_RF_XPM_WRITE_RESULT_EVENTID = 0x1857,
WMI_LED_CFG_DONE_EVENTID = 0x1858,
@@ -3312,6 +3315,36 @@ struct wmi_link_maintain_cfg_read_cmd {
__le32 cid;
} __packed;
+/* WMI_SET_LINK_MONITOR_CMDID */
+struct wmi_set_link_monitor_cmd {
+ u8 rssi_hyst;
+ u8 reserved[12];
+ u8 rssi_thresholds_list_size;
+ s8 rssi_thresholds_list[0];
+} __packed;
+
+/* wmi_link_monitor_event_type */
+enum wmi_link_monitor_event_type {
+ WMI_LINK_MONITOR_NOTIF_RSSI_THRESHOLD_EVT = 0x00,
+ WMI_LINK_MONITOR_NOTIF_TX_ERR_EVT = 0x01,
+ WMI_LINK_MONITOR_NOTIF_THERMAL_EVT = 0x02,
+};
+
+/* WMI_SET_LINK_MONITOR_EVENTID */
+struct wmi_set_link_monitor_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_LINK_MONITOR_EVENTID */
+struct wmi_link_monitor_event {
+ /* link_monitor_event_type */
+ u8 type;
+ s8 rssi_level;
+ u8 reserved[2];
+} __packed;
+
/* WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID */
struct wmi_link_maintain_cfg_write_done_event {
/* requested connection ID */
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
index db2c3b8d491e..3b2680772f03 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.c
+++ b/drivers/net/wireless/atmel/at76c50x-usb.c
@@ -2236,7 +2236,7 @@ static int at76_alloc_urbs(struct at76_priv *priv,
at76_dbg(DBG_PROC_ENTRY, "%s: ENTER", __func__);
at76_dbg(DBG_URB, "%s: NumEndpoints %d ", __func__,
- interface->altsetting[0].desc.bNumEndpoints);
+ interface->cur_altsetting->desc.bNumEndpoints);
ep_in = NULL;
ep_out = NULL;
diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
index 4325e91736eb..8b6b657c4b85 100644
--- a/drivers/net/wireless/broadcom/b43legacy/main.c
+++ b/drivers/net/wireless/broadcom/b43legacy/main.c
@@ -1275,8 +1275,9 @@ static void handle_irq_ucode_debug(struct b43legacy_wldev *dev)
}
/* Interrupt handler bottom-half */
-static void b43legacy_interrupt_tasklet(struct b43legacy_wldev *dev)
+static void b43legacy_interrupt_tasklet(unsigned long data)
{
+ struct b43legacy_wldev *dev = (struct b43legacy_wldev *)data;
u32 reason;
u32 dma_reason[ARRAY_SIZE(dev->dma_reason)];
u32 merged_dma_reason = 0;
@@ -3741,7 +3742,7 @@ static int b43legacy_one_core_attach(struct ssb_device *dev,
b43legacy_set_status(wldev, B43legacy_STAT_UNINIT);
wldev->bad_frames_preempt = modparam_bad_frames_preempt;
tasklet_init(&wldev->isr_tasklet,
- (void (*)(unsigned long))b43legacy_interrupt_tasklet,
+ b43legacy_interrupt_tasklet,
(unsigned long)wldev);
if (modparam_pio)
wldev->__using_pio = true;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 96fd8e2bf773..b684a5b6d904 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -43,6 +43,7 @@
#define SDIO_FUNC1_BLOCKSIZE 64
#define SDIO_FUNC2_BLOCKSIZE 512
+#define SDIO_4359_FUNC2_BLOCKSIZE 256
/* Maximum milliseconds to wait for F2 to come up */
#define SDIO_WAIT_F2RDY 3000
@@ -119,7 +120,7 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
brcmf_err("enable_irq_wake failed %d\n", ret);
return ret;
}
- sdiodev->irq_wake = true;
+ disable_irq_wake(pdata->oob_irq_nr);
sdio_claim_host(sdiodev->func1);
@@ -178,10 +179,6 @@ void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
sdio_release_host(sdiodev->func1);
sdiodev->oob_irq_requested = false;
- if (sdiodev->irq_wake) {
- disable_irq_wake(pdata->oob_irq_nr);
- sdiodev->irq_wake = false;
- }
free_irq(pdata->oob_irq_nr, &sdiodev->func1->dev);
sdiodev->irq_en = false;
sdiodev->oob_irq_requested = false;
@@ -903,6 +900,7 @@ static void brcmf_sdiod_host_fixup(struct mmc_host *host)
static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
{
int ret = 0;
+ unsigned int f2_blksz = SDIO_FUNC2_BLOCKSIZE;
sdio_claim_host(sdiodev->func1);
@@ -912,7 +910,9 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
sdio_release_host(sdiodev->func1);
goto out;
}
- ret = sdio_set_block_size(sdiodev->func2, SDIO_FUNC2_BLOCKSIZE);
+ if (sdiodev->func2->device == SDIO_DEVICE_ID_BROADCOM_4359)
+ f2_blksz = SDIO_4359_FUNC2_BLOCKSIZE;
+ ret = sdio_set_block_size(sdiodev->func2, f2_blksz);
if (ret) {
brcmf_err("Failed to set F2 blocksize\n");
sdio_release_host(sdiodev->func1);
@@ -969,8 +969,10 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43455),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4356),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4359),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_CYPRESS_4373),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_CYPRESS_43012),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_CYPRESS_89359),
{ /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
@@ -1167,6 +1169,10 @@ static int brcmf_ops_sdio_resume(struct device *dev)
if (ret)
brcmf_err("Failed to probe device on resume\n");
} else {
+ if (sdiodev->wowl_enabled &&
+ sdiodev->settings->bus.sdio.oob_irq_supported)
+ disable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
+
brcmf_sdiod_freezer_off(sdiodev);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 5598bbd09b62..a2328d3eee03 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -11,6 +11,7 @@
#include <linux/vmalloc.h>
#include <net/cfg80211.h>
#include <net/netlink.h>
+#include <uapi/linux/if_arp.h>
#include <brcmu_utils.h>
#include <defs.h>
@@ -619,6 +620,82 @@ static bool brcmf_is_ibssmode(struct brcmf_cfg80211_vif *vif)
return vif->wdev.iftype == NL80211_IFTYPE_ADHOC;
}
+/**
+ * brcmf_mon_add_vif() - create monitor mode virtual interface
+ *
+ * @wiphy: wiphy device of new interface.
+ * @name: name of the new interface.
+ */
+static struct wireless_dev *brcmf_mon_add_vif(struct wiphy *wiphy,
+ const char *name)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_cfg80211_vif *vif;
+ struct net_device *ndev;
+ struct brcmf_if *ifp;
+ int err;
+
+ if (cfg->pub->mon_if) {
+ err = -EEXIST;
+ goto err_out;
+ }
+
+ vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_MONITOR);
+ if (IS_ERR(vif)) {
+ err = PTR_ERR(vif);
+ goto err_out;
+ }
+
+ ndev = alloc_netdev(sizeof(*ifp), name, NET_NAME_UNKNOWN, ether_setup);
+ if (!ndev) {
+ err = -ENOMEM;
+ goto err_free_vif;
+ }
+ ndev->type = ARPHRD_IEEE80211_RADIOTAP;
+ ndev->ieee80211_ptr = &vif->wdev;
+ ndev->needs_free_netdev = true;
+ ndev->priv_destructor = brcmf_cfg80211_free_netdev;
+ SET_NETDEV_DEV(ndev, wiphy_dev(cfg->wiphy));
+
+ ifp = netdev_priv(ndev);
+ ifp->vif = vif;
+ ifp->ndev = ndev;
+ ifp->drvr = cfg->pub;
+
+ vif->ifp = ifp;
+ vif->wdev.netdev = ndev;
+
+ err = brcmf_net_mon_attach(ifp);
+ if (err) {
+ brcmf_err("Failed to attach %s device\n", ndev->name);
+ free_netdev(ndev);
+ goto err_free_vif;
+ }
+
+ cfg->pub->mon_if = ifp;
+
+ return &vif->wdev;
+
+err_free_vif:
+ brcmf_free_vif(vif);
+err_out:
+ return ERR_PTR(err);
+}
+
+static int brcmf_mon_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct net_device *ndev = wdev->netdev;
+
+ ndev->netdev_ops->ndo_stop(ndev);
+
+ brcmf_net_detach(ndev, true);
+
+ cfg->pub->mon_if = NULL;
+
+ return 0;
+}
+
static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
const char *name,
unsigned char name_assign_type,
@@ -641,9 +718,10 @@ static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_WDS:
- case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_MESH_POINT:
return ERR_PTR(-EOPNOTSUPP);
+ case NL80211_IFTYPE_MONITOR:
+ return brcmf_mon_add_vif(wiphy, name);
case NL80211_IFTYPE_AP:
wdev = brcmf_ap_add_vif(wiphy, name, params);
break;
@@ -826,9 +904,10 @@ int brcmf_cfg80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev)
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_WDS:
- case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_MESH_POINT:
return -EOPNOTSUPP;
+ case NL80211_IFTYPE_MONITOR:
+ return brcmf_mon_del_vif(wiphy, wdev);
case NL80211_IFTYPE_AP:
return brcmf_cfg80211_del_ap_iface(wiphy, wdev);
case NL80211_IFTYPE_P2P_CLIENT:
@@ -5363,6 +5442,7 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
struct brcmf_cfg80211_vif *vif_walk;
struct brcmf_cfg80211_vif *vif;
bool mbss;
+ struct brcmf_if *ifp = brcmf_get_ifp(cfg->pub, 0);
brcmf_dbg(TRACE, "allocating virtual interface (size=%zu)\n",
sizeof(*vif));
@@ -5375,7 +5455,8 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
brcmf_init_prof(&vif->profile);
- if (type == NL80211_IFTYPE_AP) {
+ if (type == NL80211_IFTYPE_AP &&
+ brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS)) {
mbss = false;
list_for_each_entry(vif_walk, &cfg->vif_list, list) {
if (vif_walk->wdev.iftype == NL80211_IFTYPE_AP) {
@@ -6012,19 +6093,17 @@ static s32 brcmf_dongle_roam(struct brcmf_if *ifp)
roamtrigger[1] = cpu_to_le32(BRCM_BAND_ALL);
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_ROAM_TRIGGER,
(void *)roamtrigger, sizeof(roamtrigger));
- if (err) {
+ if (err)
bphy_err(drvr, "WLC_SET_ROAM_TRIGGER error (%d)\n", err);
- goto roam_setup_done;
- }
roam_delta[0] = cpu_to_le32(WL_ROAM_DELTA);
roam_delta[1] = cpu_to_le32(BRCM_BAND_ALL);
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_ROAM_DELTA,
(void *)roam_delta, sizeof(roam_delta));
- if (err) {
+ if (err)
bphy_err(drvr, "WLC_SET_ROAM_DELTA error (%d)\n", err);
- goto roam_setup_done;
- }
+
+ return 0;
roam_setup_done:
return err;
@@ -6522,6 +6601,9 @@ brcmf_txrx_stypes[NUM_NL80211_IFTYPES] = {
* #STA <= 1, #AP <= 1, channels = 1, 2 total
* #AP <= 4, matching BI, channels = 1, 4 total
*
+ * no p2p and rsdb:
+ * #STA <= 2, #AP <= 2, channels = 2, 4 total
+ *
* p2p, no mchan, and mbss:
*
* #STA <= 1, #P2P-DEV <= 1, #{P2P-CL, P2P-GO} <= 1, channels = 1, 3 total
@@ -6533,6 +6615,10 @@ brcmf_txrx_stypes[NUM_NL80211_IFTYPES] = {
* #STA <= 1, #P2P-DEV <= 1, #{P2P-CL, P2P-GO} <= 1, channels = 2, 3 total
* #STA <= 1, #P2P-DEV <= 1, #AP <= 1, #P2P-CL <= 1, channels = 1, 4 total
* #AP <= 4, matching BI, channels = 1, 4 total
+ *
+ * p2p, rsdb, and no mbss:
+ * #STA <= 2, #P2P-DEV <= 1, #{P2P-CL, P2P-GO} <= 2, AP <= 2,
+ * channels = 2, 4 total
*/
static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp)
{
@@ -6540,13 +6626,16 @@ static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp)
struct ieee80211_iface_limit *c0_limits = NULL;
struct ieee80211_iface_limit *p2p_limits = NULL;
struct ieee80211_iface_limit *mbss_limits = NULL;
- bool mbss, p2p;
- int i, c, n_combos;
+ bool mon_flag, mbss, p2p, rsdb, mchan;
+ int i, c, n_combos, n_limits;
+ mon_flag = brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MONITOR_FLAG);
mbss = brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS);
p2p = brcmf_feat_is_enabled(ifp, BRCMF_FEAT_P2P);
+ rsdb = brcmf_feat_is_enabled(ifp, BRCMF_FEAT_RSDB);
+ mchan = brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MCHAN);
- n_combos = 1 + !!p2p + !!mbss;
+ n_combos = 1 + !!(p2p && !rsdb) + !!mbss;
combo = kcalloc(n_combos, sizeof(*combo), GFP_KERNEL);
if (!combo)
goto err;
@@ -6554,37 +6643,53 @@ static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp)
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_AP);
+ if (mon_flag)
+ wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR);
+ if (p2p)
+ wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE);
c = 0;
i = 0;
- c0_limits = kcalloc(p2p ? 3 : 2, sizeof(*c0_limits), GFP_KERNEL);
+ n_limits = 1 + mon_flag + (p2p ? 2 : 0) + (rsdb || !p2p);
+ c0_limits = kcalloc(n_limits, sizeof(*c0_limits), GFP_KERNEL);
if (!c0_limits)
goto err;
- c0_limits[i].max = 1;
+
+ combo[c].num_different_channels = 1 + (rsdb || (p2p && mchan));
+ c0_limits[i].max = 1 + rsdb;
c0_limits[i++].types = BIT(NL80211_IFTYPE_STATION);
+ if (mon_flag) {
+ c0_limits[i].max = 1;
+ c0_limits[i++].types = BIT(NL80211_IFTYPE_MONITOR);
+ }
if (p2p) {
- if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MCHAN))
- combo[c].num_different_channels = 2;
- else
- combo[c].num_different_channels = 1;
- wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_CLIENT) |
- BIT(NL80211_IFTYPE_P2P_GO) |
- BIT(NL80211_IFTYPE_P2P_DEVICE);
c0_limits[i].max = 1;
c0_limits[i++].types = BIT(NL80211_IFTYPE_P2P_DEVICE);
- c0_limits[i].max = 1;
+ c0_limits[i].max = 1 + rsdb;
c0_limits[i++].types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO);
+ }
+ if (p2p && rsdb) {
+ c0_limits[i].max = 2;
+ c0_limits[i++].types = BIT(NL80211_IFTYPE_AP);
+ combo[c].max_interfaces = 5;
+ } else if (p2p) {
+ combo[c].max_interfaces = i;
+ } else if (rsdb) {
+ c0_limits[i].max = 2;
+ c0_limits[i++].types = BIT(NL80211_IFTYPE_AP);
+ combo[c].max_interfaces = 3;
} else {
- combo[c].num_different_channels = 1;
c0_limits[i].max = 1;
c0_limits[i++].types = BIT(NL80211_IFTYPE_AP);
+ combo[c].max_interfaces = i;
}
- combo[c].max_interfaces = i;
combo[c].n_limits = i;
combo[c].limits = c0_limits;
- if (p2p) {
+ if (p2p && !rsdb) {
c++;
i = 0;
p2p_limits = kcalloc(4, sizeof(*p2p_limits), GFP_KERNEL);
@@ -6607,14 +6712,20 @@ static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp)
if (mbss) {
c++;
i = 0;
- mbss_limits = kcalloc(1, sizeof(*mbss_limits), GFP_KERNEL);
+ n_limits = 1 + mon_flag;
+ mbss_limits = kcalloc(n_limits, sizeof(*mbss_limits),
+ GFP_KERNEL);
if (!mbss_limits)
goto err;
mbss_limits[i].max = 4;
mbss_limits[i++].types = BIT(NL80211_IFTYPE_AP);
+ if (mon_flag) {
+ mbss_limits[i].max = 1;
+ mbss_limits[i++].types = BIT(NL80211_IFTYPE_MONITOR);
+ }
combo[c].beacon_int_infra_match = true;
combo[c].num_different_channels = 1;
- combo[c].max_interfaces = 4;
+ combo[c].max_interfaces = 4 + mon_flag;
combo[c].n_limits = i;
combo[c].limits = mbss_limits;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index a795d781b4c5..282d0bc14e8e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -433,11 +433,25 @@ static void brcmf_chip_ai_resetcore(struct brcmf_core_priv *core, u32 prereset,
{
struct brcmf_chip_priv *ci;
int count;
+ struct brcmf_core *d11core2 = NULL;
+ struct brcmf_core_priv *d11priv2 = NULL;
ci = core->chip;
+ /* special handle two D11 cores reset */
+ if (core->pub.id == BCMA_CORE_80211) {
+ d11core2 = brcmf_chip_get_d11core(&ci->pub, 1);
+ if (d11core2) {
+ brcmf_dbg(INFO, "found two d11 cores, reset both\n");
+ d11priv2 = container_of(d11core2,
+ struct brcmf_core_priv, pub);
+ }
+ }
+
/* must disable first to work for arbitrary current core state */
brcmf_chip_ai_coredisable(core, prereset, reset);
+ if (d11priv2)
+ brcmf_chip_ai_coredisable(d11priv2, prereset, reset);
count = 0;
while (ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) &
@@ -449,9 +463,30 @@ static void brcmf_chip_ai_resetcore(struct brcmf_core_priv *core, u32 prereset,
usleep_range(40, 60);
}
+ if (d11priv2) {
+ count = 0;
+ while (ci->ops->read32(ci->ctx,
+ d11priv2->wrapbase + BCMA_RESET_CTL) &
+ BCMA_RESET_CTL_RESET) {
+ ci->ops->write32(ci->ctx,
+ d11priv2->wrapbase + BCMA_RESET_CTL,
+ 0);
+ count++;
+ if (count > 50)
+ break;
+ usleep_range(40, 60);
+ }
+ }
+
ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
postreset | BCMA_IOCTL_CLK);
ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
+
+ if (d11priv2) {
+ ci->ops->write32(ci->ctx, d11priv2->wrapbase + BCMA_IOCTL,
+ postreset | BCMA_IOCTL_CLK);
+ ci->ops->read32(ci->ctx, d11priv2->wrapbase + BCMA_IOCTL);
+ }
}
char *brcmf_chip_name(u32 id, u32 rev, char *buf, uint len)
@@ -677,7 +712,6 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
case BRCM_CC_43569_CHIP_ID:
case BRCM_CC_43570_CHIP_ID:
case BRCM_CC_4358_CHIP_ID:
- case BRCM_CC_4359_CHIP_ID:
case BRCM_CC_43602_CHIP_ID:
case BRCM_CC_4371_CHIP_ID:
return 0x180000;
@@ -687,6 +721,8 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
case BRCM_CC_4366_CHIP_ID:
case BRCM_CC_43664_CHIP_ID:
return 0x200000;
+ case BRCM_CC_4359_CHIP_ID:
+ return (ci->pub.chiprev < 9) ? 0x180000 : 0x160000;
case CY_CC_4373_CHIP_ID:
return 0x160000;
default:
@@ -1109,6 +1145,21 @@ void brcmf_chip_detach(struct brcmf_chip *pub)
kfree(chip);
}
+struct brcmf_core *brcmf_chip_get_d11core(struct brcmf_chip *pub, u8 unit)
+{
+ struct brcmf_chip_priv *chip;
+ struct brcmf_core_priv *core;
+
+ chip = container_of(pub, struct brcmf_chip_priv, pub);
+ list_for_each_entry(core, &chip->cores, list) {
+ if (core->pub.id == BCMA_CORE_80211) {
+ if (unit-- == 0)
+ return &core->pub;
+ }
+ }
+ return NULL;
+}
+
struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *pub, u16 coreid)
{
struct brcmf_chip_priv *chip;
@@ -1357,6 +1408,7 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
addr = CORE_CC_REG(base, sr_control0);
reg = chip->ops->read32(chip->ctx, addr);
return (reg & CC_SR_CTL0_ENABLE_MASK) != 0;
+ case BRCM_CC_4359_CHIP_ID:
case CY_CC_43012_CHIP_ID:
addr = CORE_CC_REG(pmu->base, retention_ctl);
reg = chip->ops->read32(chip->ctx, addr);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h
index 7b00f6a59e89..8fa38658e727 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h
@@ -74,6 +74,7 @@ struct brcmf_chip *brcmf_chip_attach(void *ctx,
const struct brcmf_buscore_ops *ops);
void brcmf_chip_detach(struct brcmf_chip *chip);
struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *chip, u16 coreid);
+struct brcmf_core *brcmf_chip_get_d11core(struct brcmf_chip *pub, u8 unit);
struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *chip);
struct brcmf_core *brcmf_chip_get_pmu(struct brcmf_chip *pub);
bool brcmf_chip_iscoreup(struct brcmf_core *core);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 85cf96461dde..23627c953a5e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -661,6 +661,8 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
goto fail;
}
+ netif_carrier_off(ndev);
+
ndev->priv_destructor = brcmf_cfg80211_free_netdev;
brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
return 0;
@@ -671,7 +673,7 @@ fail:
return -EBADE;
}
-static void brcmf_net_detach(struct net_device *ndev, bool rtnl_locked)
+void brcmf_net_detach(struct net_device *ndev, bool rtnl_locked)
{
if (ndev->reg_state == NETREG_REGISTERED) {
if (rtnl_locked)
@@ -684,6 +686,72 @@ static void brcmf_net_detach(struct net_device *ndev, bool rtnl_locked)
}
}
+static int brcmf_net_mon_open(struct net_device *ndev)
+{
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
+ u32 monitor;
+ int err;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_MONITOR, &monitor);
+ if (err) {
+ bphy_err(drvr, "BRCMF_C_GET_MONITOR error (%d)\n", err);
+ return err;
+ } else if (monitor) {
+ bphy_err(drvr, "Monitor mode is already enabled\n");
+ return -EEXIST;
+ }
+
+ monitor = 3;
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_MONITOR, monitor);
+ if (err)
+ bphy_err(drvr, "BRCMF_C_SET_MONITOR error (%d)\n", err);
+
+ return err;
+}
+
+static int brcmf_net_mon_stop(struct net_device *ndev)
+{
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
+ u32 monitor;
+ int err;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ monitor = 0;
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_MONITOR, monitor);
+ if (err)
+ bphy_err(drvr, "BRCMF_C_SET_MONITOR error (%d)\n", err);
+
+ return err;
+}
+
+static const struct net_device_ops brcmf_netdev_ops_mon = {
+ .ndo_open = brcmf_net_mon_open,
+ .ndo_stop = brcmf_net_mon_stop,
+};
+
+int brcmf_net_mon_attach(struct brcmf_if *ifp)
+{
+ struct brcmf_pub *drvr = ifp->drvr;
+ struct net_device *ndev;
+ int err;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ ndev = ifp->ndev;
+ ndev->netdev_ops = &brcmf_netdev_ops_mon;
+
+ err = register_netdevice(ndev);
+ if (err)
+ bphy_err(drvr, "Failed to register %s device\n", ndev->name);
+
+ return err;
+}
+
void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on)
{
struct net_device *ndev;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index 6699637d3bf8..33b2ab3b54b0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -210,6 +210,8 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp,
void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb);
void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb);
+void brcmf_net_detach(struct net_device *ndev, bool rtnl_locked);
+int brcmf_net_mon_attach(struct brcmf_if *ifp);
void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on);
int __init brcmf_core_init(void);
void __exit brcmf_core_exit(void);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 1c9c74cc958e..5da0dda0d899 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -38,6 +38,7 @@ static const struct brcmf_feat_fwcap brcmf_fwcap_map[] = {
{ BRCMF_FEAT_MCHAN, "mchan" },
{ BRCMF_FEAT_P2P, "p2p" },
{ BRCMF_FEAT_MONITOR, "monitor" },
+ { BRCMF_FEAT_MONITOR_FLAG, "rtap" },
{ BRCMF_FEAT_MONITOR_FMT_RADIOTAP, "rtap" },
{ BRCMF_FEAT_DOT11H, "802.11h" },
{ BRCMF_FEAT_SAE, "sae" },
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
index 280a1f6412d4..cda3fc1bab7f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
@@ -23,6 +23,7 @@
* GSCAN: enhanced scan offload feature.
* FWSUP: Firmware supplicant.
* MONITOR: firmware can pass monitor packets to host.
+ * MONITOR_FLAG: firmware flags monitor packets.
* MONITOR_FMT_RADIOTAP: firmware provides monitor packets with radiotap header
* MONITOR_FMT_HW_RX_HDR: firmware provides monitor packets with hw/ucode header
* DOT11H: firmware supports 802.11h
@@ -44,6 +45,7 @@
BRCMF_FEAT_DEF(GSCAN) \
BRCMF_FEAT_DEF(FWSUP) \
BRCMF_FEAT_DEF(MONITOR) \
+ BRCMF_FEAT_DEF(MONITOR_FLAG) \
BRCMF_FEAT_DEF(MONITOR_FMT_RADIOTAP) \
BRCMF_FEAT_DEF(MONITOR_FMT_HW_RX_HDR) \
BRCMF_FEAT_DEF(DOT11H) \
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
index 0ff6f5212a94..ae4cf4372908 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
@@ -49,6 +49,8 @@
#define BRCMF_C_GET_PM 85
#define BRCMF_C_SET_PM 86
#define BRCMF_C_GET_REVINFO 98
+#define BRCMF_C_GET_MONITOR 107
+#define BRCMF_C_SET_MONITOR 108
#define BRCMF_C_GET_CURR_RATESET 114
#define BRCMF_C_GET_AP 117
#define BRCMF_C_SET_AP 118
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index 2bd892df83cc..5e1a11c07551 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -908,7 +908,7 @@ static u8 brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb)
wlh += wlh[1] + 2;
if (entry->send_tim_signal) {
- entry->send_tim_signal = 0;
+ entry->send_tim_signal = false;
wlh[0] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP;
wlh[1] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN;
wlh[2] = entry->mac_handle;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index e3dd8623be4e..8bb4f1fa790e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -365,7 +365,7 @@ brcmf_msgbuf_get_pktid(struct device *dev, struct brcmf_msgbuf_pktids *pktids,
struct brcmf_msgbuf_pktid *pktid;
struct sk_buff *skb;
- if (idx < 0 || idx >= pktids->array_size) {
+ if (idx >= pktids->array_size) {
brcmf_err("Invalid packet id %d (max %d)\n", idx,
pktids->array_size);
return NULL;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 7ba9f6a68645..1f5deea5a288 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -2092,7 +2092,8 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p,
/* firmware requires unique mac address for p2pdev interface */
if (addr && ether_addr_equal(addr, pri_ifp->mac_addr)) {
bphy_err(drvr, "discovery vif must be different from primary interface\n");
- return ERR_PTR(-EINVAL);
+ err = -EINVAL;
+ goto fail;
}
brcmf_p2p_generate_bss_mac(p2p, addr);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index f64ce5074a55..5105f62767fb 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -78,7 +78,7 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371),
};
-#define BRCMF_PCIE_FW_UP_TIMEOUT 2000 /* msec */
+#define BRCMF_PCIE_FW_UP_TIMEOUT 5000 /* msec */
#define BRCMF_PCIE_REG_MAP_SIZE (32 * 1024)
@@ -1643,8 +1643,8 @@ static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo)
return -EINVAL;
}
- devinfo->regs = ioremap_nocache(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE);
- devinfo->tcm = ioremap_nocache(bar1_addr, bar1_size);
+ devinfo->regs = ioremap(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE);
+ devinfo->tcm = ioremap(bar1_addr, bar1_size);
if (!devinfo->regs || !devinfo->tcm) {
brcmf_err(bus, "ioremap() failed (%p,%p)\n", devinfo->regs,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 264ad63232f8..f9047db6a11d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -42,6 +42,8 @@
#define DEFAULT_F2_WATERMARK 0x8
#define CY_4373_F2_WATERMARK 0x40
#define CY_43012_F2_WATERMARK 0x60
+#define CY_4359_F2_WATERMARK 0x40
+#define CY_4359_F1_MESBUSYCTRL (CY_4359_F2_WATERMARK | SBSDIO_MESBUSYCTRL_ENAB)
#ifdef DEBUG
@@ -614,6 +616,7 @@ BRCMF_FW_DEF(43455, "brcmfmac43455-sdio");
BRCMF_FW_DEF(43456, "brcmfmac43456-sdio");
BRCMF_FW_DEF(4354, "brcmfmac4354-sdio");
BRCMF_FW_DEF(4356, "brcmfmac4356-sdio");
+BRCMF_FW_DEF(4359, "brcmfmac4359-sdio");
BRCMF_FW_DEF(4373, "brcmfmac4373-sdio");
BRCMF_FW_DEF(43012, "brcmfmac43012-sdio");
@@ -636,6 +639,7 @@ static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0xFFFFFDC0, 43455),
BRCMF_FW_ENTRY(BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, 4354),
BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356),
+ BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
BRCMF_FW_ENTRY(CY_CC_4373_CHIP_ID, 0xFFFFFFFF, 4373),
BRCMF_FW_ENTRY(CY_CC_43012_CHIP_ID, 0xFFFFFFFF, 43012)
};
@@ -1935,6 +1939,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
BRCMF_SDIO_FT_NORMAL)) {
rd->len = 0;
brcmu_pkt_buf_free_skb(pkt);
+ continue;
}
bus->sdcnt.rx_readahead_cnt++;
if (rd->len != roundup(rd_new.len, 16)) {
@@ -4205,6 +4210,19 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
brcmf_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl,
&err);
break;
+ case SDIO_DEVICE_ID_BROADCOM_4359:
+ brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n",
+ CY_4359_F2_WATERMARK);
+ brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK,
+ CY_4359_F2_WATERMARK, &err);
+ devctl = brcmf_sdiod_readb(sdiod, SBSDIO_DEVICE_CTL,
+ &err);
+ devctl |= SBSDIO_DEVCTL_F2WM_ENAB;
+ brcmf_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl,
+ &err);
+ brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL,
+ CY_4359_F1_MESBUSYCTRL, &err);
+ break;
default:
brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK,
DEFAULT_F2_WATERMARK, &err);
@@ -4225,6 +4243,12 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
}
if (err == 0) {
+ /* Assign bus interface call back */
+ sdiod->bus_if->dev = sdiod->dev;
+ sdiod->bus_if->ops = &brcmf_sdio_bus_ops;
+ sdiod->bus_if->chip = bus->ci->chip;
+ sdiod->bus_if->chiprev = bus->ci->chiprev;
+
/* Allow full data communication using DPC from now on. */
brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA);
@@ -4241,12 +4265,6 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
sdio_release_host(sdiod->func1);
- /* Assign bus interface call back */
- sdiod->bus_if->dev = sdiod->dev;
- sdiod->bus_if->ops = &brcmf_sdio_bus_ops;
- sdiod->bus_if->chip = bus->ci->chip;
- sdiod->bus_if->chiprev = bus->ci->chiprev;
-
err = brcmf_alloc(sdiod->dev, sdiod->settings);
if (err) {
brcmf_err("brcmf_alloc failed\n");
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
index 0bd47c119dae..163fd664780a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
@@ -178,7 +178,6 @@ struct brcmf_sdio_dev {
bool sd_irq_requested;
bool irq_en; /* irq enable flags */
spinlock_t irq_en_lock;
- bool irq_wake; /* irq wake enable flags */
bool sg_support;
uint max_request_size;
ushort max_segment_count;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index 06f3c01f10b3..575ed19e9195 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -430,6 +430,7 @@ fail:
usb_free_urb(req->urb);
list_del(q->next);
}
+ kfree(reqs);
return NULL;
}
@@ -1348,7 +1349,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
goto fail;
}
- desc = &intf->altsetting[0].desc;
+ desc = &intf->cur_altsetting->desc;
if ((desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC) ||
(desc->bInterfaceSubClass != 2) ||
(desc->bInterfaceProtocol != 0xff)) {
@@ -1361,7 +1362,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
num_of_eps = desc->bNumEndpoints;
for (ep = 0; ep < num_of_eps; ep++) {
- endpoint = &intf->altsetting[0].endpoint[ep].desc;
+ endpoint = &intf->cur_altsetting->endpoint[ep].desc;
endpoint_num = usb_endpoint_num(endpoint);
if (!usb_endpoint_xfer_bulk(endpoint))
continue;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
index 3f09d89ba922..7f2c15c799d2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
@@ -5408,7 +5408,7 @@ int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel)
{
u16 chspec = ch20mhz_chspec(channel);
- if (channel < 0 || channel > MAXCHANNEL)
+ if (channel > MAXCHANNEL)
return -EINVAL;
if (!brcms_c_valid_chanspec_db(wlc->cmi, chspec))
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index f43c06569ea1..8363f91df7ea 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -4420,73 +4420,65 @@ static int proc_BSSList_open( struct inode *inode, struct file *file );
static int proc_config_open( struct inode *inode, struct file *file );
static int proc_wepkey_open( struct inode *inode, struct file *file );
-static const struct file_operations proc_statsdelta_ops = {
- .owner = THIS_MODULE,
- .read = proc_read,
- .open = proc_statsdelta_open,
- .release = proc_close,
- .llseek = default_llseek,
+static const struct proc_ops proc_statsdelta_ops = {
+ .proc_read = proc_read,
+ .proc_open = proc_statsdelta_open,
+ .proc_release = proc_close,
+ .proc_lseek = default_llseek,
};
-static const struct file_operations proc_stats_ops = {
- .owner = THIS_MODULE,
- .read = proc_read,
- .open = proc_stats_open,
- .release = proc_close,
- .llseek = default_llseek,
+static const struct proc_ops proc_stats_ops = {
+ .proc_read = proc_read,
+ .proc_open = proc_stats_open,
+ .proc_release = proc_close,
+ .proc_lseek = default_llseek,
};
-static const struct file_operations proc_status_ops = {
- .owner = THIS_MODULE,
- .read = proc_read,
- .open = proc_status_open,
- .release = proc_close,
- .llseek = default_llseek,
+static const struct proc_ops proc_status_ops = {
+ .proc_read = proc_read,
+ .proc_open = proc_status_open,
+ .proc_release = proc_close,
+ .proc_lseek = default_llseek,
};
-static const struct file_operations proc_SSID_ops = {
- .owner = THIS_MODULE,
- .read = proc_read,
- .write = proc_write,
- .open = proc_SSID_open,
- .release = proc_close,
- .llseek = default_llseek,
+static const struct proc_ops proc_SSID_ops = {
+ .proc_read = proc_read,
+ .proc_write = proc_write,
+ .proc_open = proc_SSID_open,
+ .proc_release = proc_close,
+ .proc_lseek = default_llseek,
};
-static const struct file_operations proc_BSSList_ops = {
- .owner = THIS_MODULE,
- .read = proc_read,
- .write = proc_write,
- .open = proc_BSSList_open,
- .release = proc_close,
- .llseek = default_llseek,
+static const struct proc_ops proc_BSSList_ops = {
+ .proc_read = proc_read,
+ .proc_write = proc_write,
+ .proc_open = proc_BSSList_open,
+ .proc_release = proc_close,
+ .proc_lseek = default_llseek,
};
-static const struct file_operations proc_APList_ops = {
- .owner = THIS_MODULE,
- .read = proc_read,
- .write = proc_write,
- .open = proc_APList_open,
- .release = proc_close,
- .llseek = default_llseek,
+static const struct proc_ops proc_APList_ops = {
+ .proc_read = proc_read,
+ .proc_write = proc_write,
+ .proc_open = proc_APList_open,
+ .proc_release = proc_close,
+ .proc_lseek = default_llseek,
};
-static const struct file_operations proc_config_ops = {
- .owner = THIS_MODULE,
- .read = proc_read,
- .write = proc_write,
- .open = proc_config_open,
- .release = proc_close,
- .llseek = default_llseek,
+static const struct proc_ops proc_config_ops = {
+ .proc_read = proc_read,
+ .proc_write = proc_write,
+ .proc_open = proc_config_open,
+ .proc_release = proc_close,
+ .proc_lseek = default_llseek,
};
-static const struct file_operations proc_wepkey_ops = {
- .owner = THIS_MODULE,
- .read = proc_read,
- .write = proc_write,
- .open = proc_wepkey_open,
- .release = proc_close,
- .llseek = default_llseek,
+static const struct proc_ops proc_wepkey_ops = {
+ .proc_read = proc_read,
+ .proc_write = proc_write,
+ .proc_open = proc_wepkey_open,
+ .proc_release = proc_close,
+ .proc_lseek = default_llseek,
};
static struct proc_dir_entry *airo_entry;
@@ -7790,16 +7782,8 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
case AIROGVLIST: ridcode = RID_APLIST; break;
case AIROGDRVNAM: ridcode = RID_DRVNAME; break;
case AIROGEHTENC: ridcode = RID_ETHERENCAP; break;
- case AIROGWEPKTMP: ridcode = RID_WEP_TEMP;
- /* Only super-user can read WEP keys */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- break;
- case AIROGWEPKNV: ridcode = RID_WEP_PERM;
- /* Only super-user can read WEP keys */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- break;
+ case AIROGWEPKTMP: ridcode = RID_WEP_TEMP; break;
+ case AIROGWEPKNV: ridcode = RID_WEP_PERM; break;
case AIROGSTAT: ridcode = RID_STATUS; break;
case AIROGSTATSD32: ridcode = RID_STATSDELTA; break;
case AIROGSTATSC32: ridcode = RID_STATS; break;
@@ -7813,7 +7797,13 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
return -EINVAL;
}
- if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL)
+ if (ridcode == RID_WEP_TEMP || ridcode == RID_WEP_PERM) {
+ /* Only super-user can read WEP keys */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ }
+
+ if ((iobuf = kzalloc(RIDSIZE, GFP_KERNEL)) == NULL)
return -ENOMEM;
PC4500_readrid(ai,ridcode,iobuf,RIDSIZE, 1);
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index c4c83ab60cbc..536cd729c086 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -3206,8 +3206,9 @@ static void ipw2100_tx_send_data(struct ipw2100_priv *priv)
}
}
-static void ipw2100_irq_tasklet(struct ipw2100_priv *priv)
+static void ipw2100_irq_tasklet(unsigned long data)
{
+ struct ipw2100_priv *priv = (struct ipw2100_priv *)data;
struct net_device *dev = priv->net_dev;
unsigned long flags;
u32 inta, tmp;
@@ -5833,7 +5834,7 @@ static int ipw2100_close(struct net_device *dev)
/*
* TODO: Fix this function... its just wrong
*/
-static void ipw2100_tx_timeout(struct net_device *dev)
+static void ipw2100_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ipw2100_priv *priv = libipw_priv(dev);
@@ -6006,7 +6007,7 @@ static void ipw2100_rf_kill(struct work_struct *work)
spin_unlock_irqrestore(&priv->low_lock, flags);
}
-static void ipw2100_irq_tasklet(struct ipw2100_priv *priv);
+static void ipw2100_irq_tasklet(unsigned long data);
static const struct net_device_ops ipw2100_netdev_ops = {
.ndo_open = ipw2100_open,
@@ -6136,7 +6137,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
INIT_DELAYED_WORK(&priv->rf_kill, ipw2100_rf_kill);
INIT_DELAYED_WORK(&priv->scan_event, ipw2100_scan_event);
- tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
+ tasklet_init(&priv->irq_tasklet,
ipw2100_irq_tasklet, (unsigned long)priv);
/* NOTE: We do not start the deferred work for status checks yet */
@@ -6167,7 +6168,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
ioaddr = pci_iomap(pci_dev, 0, 0);
if (!ioaddr) {
printk(KERN_WARNING DRV_NAME
- "Error calling ioremap_nocache.\n");
+ "Error calling ioremap.\n");
err = -EIO;
goto fail;
}
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index 31e43fc1d12b..5ef6f87a48ac 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -1945,8 +1945,9 @@ static void notify_wx_assoc_event(struct ipw_priv *priv)
wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
}
-static void ipw_irq_tasklet(struct ipw_priv *priv)
+static void ipw_irq_tasklet(unsigned long data)
{
+ struct ipw_priv *priv = (struct ipw_priv *)data;
u32 inta, inta_mask, handled = 0;
unsigned long flags;
int rc = 0;
@@ -10677,7 +10678,7 @@ static int ipw_setup_deferred_work(struct ipw_priv *priv)
INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
#endif /* CONFIG_IPW2200_QOS */
- tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
+ tasklet_init(&priv->irq_tasklet,
ipw_irq_tasklet, (unsigned long)priv);
return ret;
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_module.c b/drivers/net/wireless/intel/ipw2x00/libipw_module.c
index 436b819aeb36..43bab92a4148 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_module.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_module.c
@@ -240,13 +240,12 @@ static ssize_t debug_level_proc_write(struct file *file,
return strnlen(buf, len);
}
-static const struct file_operations debug_level_proc_fops = {
- .owner = THIS_MODULE,
- .open = debug_level_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = debug_level_proc_write,
+static const struct proc_ops debug_level_proc_ops = {
+ .proc_open = debug_level_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = debug_level_proc_write,
};
#endif /* CONFIG_LIBIPW_DEBUG */
@@ -263,7 +262,7 @@ static int __init libipw_init(void)
return -EIO;
}
e = proc_create("debug_level", 0644, libipw_proc,
- &debug_level_proc_fops);
+ &debug_level_proc_ops);
if (!e) {
remove_proc_entry(DRV_PROCNAME, init_net.proc_net);
libipw_proc = NULL;
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index 1168055da182..206b43b9dff8 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -1376,8 +1376,9 @@ il3945_dump_nic_error_log(struct il_priv *il)
}
static void
-il3945_irq_tasklet(struct il_priv *il)
+il3945_irq_tasklet(unsigned long data)
{
+ struct il_priv *il = (struct il_priv *)data;
u32 inta, handled = 0;
u32 inta_fh;
unsigned long flags;
@@ -3401,7 +3402,7 @@ il3945_setup_deferred_work(struct il_priv *il)
timer_setup(&il->watchdog, il_bg_watchdog, 0);
tasklet_init(&il->irq_tasklet,
- (void (*)(unsigned long))il3945_irq_tasklet,
+ il3945_irq_tasklet,
(unsigned long)il);
}
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index 3664f56f8cbd..da6d4202611c 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -27,6 +27,7 @@
#include <linux/firmware.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
+#include <linux/units.h>
#include <net/mac80211.h>
@@ -4343,8 +4344,9 @@ il4965_synchronize_irq(struct il_priv *il)
}
static void
-il4965_irq_tasklet(struct il_priv *il)
+il4965_irq_tasklet(unsigned long data)
{
+ struct il_priv *il = (struct il_priv *)data;
u32 inta, handled = 0;
u32 inta_fh;
unsigned long flags;
@@ -6237,7 +6239,7 @@ il4965_setup_deferred_work(struct il_priv *il)
timer_setup(&il->watchdog, il_bg_watchdog, 0);
tasklet_init(&il->irq_tasklet,
- (void (*)(unsigned long))il4965_irq_tasklet,
+ il4965_irq_tasklet,
(unsigned long)il);
}
@@ -6467,7 +6469,7 @@ il4965_set_hw_params(struct il_priv *il)
il->hw_params.valid_rx_ant = il->cfg->valid_rx_ant;
il->hw_params.ct_kill_threshold =
- CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
+ celsius_to_kelvin(CT_KILL_THRESHOLD_LEGACY);
il->hw_params.sens = &il4965_sensitivity;
il->hw_params.beacon_time_tsf_bits = IL4965_EXT_BEACON_TIME_POS;
diff --git a/drivers/net/wireless/intel/iwlegacy/4965.c b/drivers/net/wireless/intel/iwlegacy/4965.c
index 32699b6a68c2..34d0579132ce 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965.c
@@ -17,6 +17,7 @@
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
+#include <linux/units.h>
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
@@ -1104,7 +1105,7 @@ il4965_fill_txpower_tbl(struct il_priv *il, u8 band, u16 channel, u8 is_ht40,
/* get current temperature (Celsius) */
current_temp = max(il->temperature, IL_TX_POWER_TEMPERATURE_MIN);
current_temp = min(il->temperature, IL_TX_POWER_TEMPERATURE_MAX);
- current_temp = KELVIN_TO_CELSIUS(current_temp);
+ current_temp = kelvin_to_celsius(current_temp);
/* select thermal txpower adjustment params, based on channel group
* (same frequency group used for mimo txatten adjustment) */
@@ -1610,8 +1611,8 @@ il4965_hw_get_temperature(struct il_priv *il)
temperature =
(temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
- D_TEMP("Calibrated temperature: %dK, %dC\n", temperature,
- KELVIN_TO_CELSIUS(temperature));
+ D_TEMP("Calibrated temperature: %dK, %ldC\n", temperature,
+ kelvin_to_celsius(temperature));
return temperature;
}
@@ -1670,12 +1671,12 @@ il4965_temperature_calib(struct il_priv *il)
if (il->temperature != temp) {
if (il->temperature)
- D_TEMP("Temperature changed " "from %dC to %dC\n",
- KELVIN_TO_CELSIUS(il->temperature),
- KELVIN_TO_CELSIUS(temp));
+ D_TEMP("Temperature changed " "from %ldC to %ldC\n",
+ kelvin_to_celsius(il->temperature),
+ kelvin_to_celsius(temp));
else
- D_TEMP("Temperature " "initialized to %dC\n",
- KELVIN_TO_CELSIUS(temp));
+ D_TEMP("Temperature " "initialized to %ldC\n",
+ kelvin_to_celsius(temp));
}
il->temperature = temp;
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index d966b29b45ee..348c17ce72f5 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -699,7 +699,7 @@ il_eeprom_init(struct il_priv *il)
u32 gp = _il_rd(il, CSR_EEPROM_GP);
int sz;
int ret;
- u16 addr;
+ int addr;
/* allocate eeprom */
sz = il->cfg->eeprom_size;
diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h
index e7fb8e6bb9e7..bc9cd7e5ccb8 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.h
+++ b/drivers/net/wireless/intel/iwlegacy/common.h
@@ -779,9 +779,6 @@ struct il_sensitivity_ranges {
u16 nrg_th_cca;
};
-#define KELVIN_TO_CELSIUS(x) ((x)-273)
-#define CELSIUS_TO_KELVIN(x) ((x)+273)
-
/**
* struct il_hw_params
* @bcast_id: f/w broadcast station ID
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
index b92255b91b72..8a4579bb10d3 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
@@ -77,8 +77,7 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = {
.trans.base_params = &iwl1000_base_params, \
.eeprom_params = &iwl1000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl1000_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
@@ -104,8 +103,7 @@ const struct iwl_cfg iwl1000_bg_cfg = {
.eeprom_params = &iwl1000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
.rx_with_siso_diversity = true, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl100_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 100 BGN",
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
index 2b1ae0cecc83..7140a5f3ea8b 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
@@ -103,8 +103,7 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
.trans.base_params = &iwl2000_base_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl2000_2bgn_cfg = {
@@ -131,8 +130,7 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = {
.trans.base_params = &iwl2030_base_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl2030_2bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
@@ -153,8 +151,7 @@ const struct iwl_cfg iwl2030_2bgn_cfg = {
.eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
.rx_with_siso_diversity = true, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl105_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 105 BGN",
@@ -181,8 +178,7 @@ const struct iwl_cfg iwl105_bgn_d_cfg = {
.eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
.rx_with_siso_diversity = true, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl135_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 135 BGN",
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index 56dc335a788c..a22a830019c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -92,6 +92,7 @@
#define IWL_22000_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0-"
#define IWL_22000_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0-"
#define IWL_22000_SO_A_GF4_A_FW_PRE "iwlwifi-so-a0-gf4-a0-"
+#define IWL_22000_SOSNJ_A_GF4_A_FW_PRE "iwlwifi-SoSnj-a0-gf4-a0-"
#define IWL_22000_HR_MODULE_FIRMWARE(api) \
IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
@@ -199,7 +200,6 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
IWL_DEVICE_22000_COMMON, \
.trans.device_family = IWL_DEVICE_FAMILY_22000, \
.trans.base_params = &iwl_22000_base_params, \
- .trans.csr = &iwl_csr_v1, \
.gp2_reg_addr = 0xa02c68, \
.mon_dram_regs = { \
.write_ptr = { \
@@ -217,7 +217,6 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.trans.umac_prph_offset = 0x300000, \
.trans.device_family = IWL_DEVICE_FAMILY_AX210, \
.trans.base_params = &iwl_ax210_base_params, \
- .trans.csr = &iwl_csr_v1, \
.min_txq_size = 128, \
.gp2_reg_addr = 0xd02c68, \
.min_256_ba_txq_size = 512, \
@@ -236,24 +235,16 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
}, \
}
-const struct iwl_cfg iwl22000_2ac_cfg_hr = {
- .name = "Intel(R) Dual Band Wireless AC 22000",
- .fw_name_pre = IWL_22000_HR_FW_PRE,
- IWL_DEVICE_22500,
-};
-
-const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb = {
- .name = "Intel(R) Dual Band Wireless AC 22000",
- .fw_name_pre = IWL_22000_HR_CDB_FW_PRE,
- IWL_DEVICE_22500,
- .cdb = true,
-};
-
-const struct iwl_cfg iwl22000_2ac_cfg_jf = {
- .name = "Intel(R) Dual Band Wireless AC 22000",
- .fw_name_pre = IWL_22000_JF_FW_PRE,
- IWL_DEVICE_22500,
-};
+/*
+ * If the device doesn't support HE, no need to have that many buffers.
+ * 22000 devices can split multiple frames into a single RB, so fewer are
+ * needed; AX210 cannot (but use smaller RBs by default) - these sizes
+ * were picked according to 8 MSDUs inside 256 A-MSDUs in an A-MPDU, with
+ * additional overhead to account for processing time.
+ */
+#define IWL_NUM_RBDS_NON_HE 512
+#define IWL_NUM_RBDS_22000_HE 2048
+#define IWL_NUM_RBDS_AX210_HE 4096
const struct iwl_cfg iwl_ax101_cfg_qu_hr = {
.name = "Intel(R) Wi-Fi 6 AX101",
@@ -266,6 +257,7 @@ const struct iwl_cfg iwl_ax101_cfg_qu_hr = {
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.tx_with_siso_diversity = true,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl_ax201_cfg_qu_hr = {
@@ -278,6 +270,7 @@ const struct iwl_cfg iwl_ax201_cfg_qu_hr = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl_ax101_cfg_qu_c0_hr_b0 = {
@@ -290,6 +283,7 @@ const struct iwl_cfg iwl_ax101_cfg_qu_c0_hr_b0 = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0 = {
@@ -302,6 +296,7 @@ const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0 = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl_ax101_cfg_quz_hr = {
@@ -314,6 +309,7 @@ const struct iwl_cfg iwl_ax101_cfg_quz_hr = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl_ax201_cfg_quz_hr = {
@@ -326,6 +322,7 @@ const struct iwl_cfg iwl_ax201_cfg_quz_hr = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl_ax1650s_cfg_quz_hr = {
@@ -338,6 +335,7 @@ const struct iwl_cfg iwl_ax1650s_cfg_quz_hr = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl_ax1650i_cfg_quz_hr = {
@@ -350,6 +348,7 @@ const struct iwl_cfg iwl_ax1650i_cfg_quz_hr = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl_ax200_cfg_cc = {
@@ -363,6 +362,7 @@ const struct iwl_cfg iwl_ax200_cfg_cc = {
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.trans.bisr_workaround = 1,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg killer1650x_2ax_cfg = {
@@ -376,6 +376,7 @@ const struct iwl_cfg killer1650x_2ax_cfg = {
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.trans.bisr_workaround = 1,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg killer1650w_2ax_cfg = {
@@ -389,6 +390,7 @@ const struct iwl_cfg killer1650w_2ax_cfg = {
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.trans.bisr_workaround = 1,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
/*
@@ -400,48 +402,56 @@ const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0 = {
.name = "Intel(R) Wireless-AC 9461",
.fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
IWL_DEVICE_22500,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9462_2ac_cfg_qu_b0_jf_b0 = {
.name = "Intel(R) Wireless-AC 9462",
.fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
IWL_DEVICE_22500,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0 = {
.name = "Intel(R) Wireless-AC 9560",
.fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
IWL_DEVICE_22500,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9560_2ac_160_cfg_qu_b0_jf_b0 = {
.name = "Intel(R) Wireless-AC 9560 160MHz",
.fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
IWL_DEVICE_22500,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9461_2ac_cfg_qu_c0_jf_b0 = {
.name = "Intel(R) Wireless-AC 9461",
.fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
IWL_DEVICE_22500,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9462_2ac_cfg_qu_c0_jf_b0 = {
.name = "Intel(R) Wireless-AC 9462",
.fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
IWL_DEVICE_22500,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9560_2ac_cfg_qu_c0_jf_b0 = {
.name = "Intel(R) Wireless-AC 9560",
.fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
IWL_DEVICE_22500,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9560_2ac_160_cfg_qu_c0_jf_b0 = {
.name = "Intel(R) Wireless-AC 9560 160MHz",
.fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
IWL_DEVICE_22500,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9560_2ac_cfg_qnj_jf_b0 = {
@@ -454,6 +464,7 @@ const struct iwl_cfg iwl9560_2ac_cfg_qnj_jf_b0 = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9560_2ac_cfg_quz_a0_jf_b0_soc = {
@@ -468,6 +479,7 @@ const struct iwl_cfg iwl9560_2ac_cfg_quz_a0_jf_b0_soc = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.integrated = true,
.soc_latency = 5000,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc = {
@@ -482,6 +494,7 @@ const struct iwl_cfg iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.integrated = true,
.soc_latency = 5000,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9461_2ac_cfg_quz_a0_jf_b0_soc = {
@@ -496,6 +509,7 @@ const struct iwl_cfg iwl9461_2ac_cfg_quz_a0_jf_b0_soc = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.integrated = true,
.soc_latency = 5000,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9462_2ac_cfg_quz_a0_jf_b0_soc = {
@@ -510,6 +524,7 @@ const struct iwl_cfg iwl9462_2ac_cfg_quz_a0_jf_b0_soc = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.integrated = true,
.soc_latency = 5000,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc = {
@@ -524,6 +539,7 @@ const struct iwl_cfg iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.integrated = true,
.soc_latency = 5000,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc = {
@@ -538,18 +554,21 @@ const struct iwl_cfg iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.integrated = true,
.soc_latency = 5000,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0 = {
.name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
.fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
IWL_DEVICE_22500,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0 = {
.name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
.fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
IWL_DEVICE_22500,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
@@ -562,6 +581,7 @@ const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = {
@@ -574,6 +594,7 @@ const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0 = {
@@ -586,6 +607,7 @@ const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0 = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0 = {
@@ -598,6 +620,7 @@ const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0 = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl22000_2ax_cfg_jf = {
@@ -610,6 +633,7 @@ const struct iwl_cfg iwl22000_2ax_cfg_jf = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0 = {
@@ -622,6 +646,7 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0 = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0 = {
@@ -634,6 +659,7 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0 = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = {
@@ -646,18 +672,21 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_22000_HE,
};
const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0 = {
.name = "Intel(R) Wireless-AC 9560 160MHz",
.fw_name_pre = IWL_22000_SO_A_JF_B_FW_PRE,
IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
};
const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0 = {
.name = "Intel(R) Wi-Fi 7 AX210 160MHz",
.fw_name_pre = IWL_22000_SO_A_HR_B_FW_PRE,
IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = {
@@ -665,6 +694,7 @@ const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = {
.fw_name_pre = IWL_22000_SO_A_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = {
@@ -672,12 +702,23 @@ const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = {
.fw_name_pre = IWL_22000_TY_A_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0 = {
.name = "Intel(R) Wi-Fi 7 AX411 160MHz",
.fw_name_pre = IWL_22000_SO_A_GF4_A_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
+const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0 = {
+ .name = "Intel(R) Wi-Fi 7 AX411 160MHz",
+ .fw_name_pre = IWL_22000_SOSNJ_A_GF4_A_FW_PRE,
+ .uhb_supported = true,
IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
};
MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
index aab4495c6085..3591336dc644 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
@@ -75,8 +75,7 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = {
.trans.base_params = &iwl5000_base_params, \
.eeprom_params = &iwl5000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl5300_agn_cfg = {
.name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
@@ -125,7 +124,6 @@ const struct iwl_cfg iwl5350_agn_cfg = {
.ht_params = &iwl5000_ht_params,
.led_mode = IWL_LED_BLINK,
.internal_wimax_coex = true,
- .trans.csr = &iwl_csr_v1,
};
#define IWL_DEVICE_5150 \
@@ -141,8 +139,7 @@ const struct iwl_cfg iwl5350_agn_cfg = {
.eeprom_params = &iwl5000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl5150_agn_cfg = {
.name = "Intel(R) WiMAX/WiFi Link 5150 AGN",
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
index 39ea81903dbe..b4a8a6804c39 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
@@ -124,8 +124,7 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = {
.trans.base_params = &iwl6000_g2_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl6005_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6205 AGN",
@@ -179,8 +178,7 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
.trans.base_params = &iwl6000_g2_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl6030_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6230 AGN",
@@ -216,8 +214,7 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
.trans.base_params = &iwl6000_g2_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl6035_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
@@ -272,8 +269,7 @@ const struct iwl_cfg iwl130_bg_cfg = {
.trans.base_params = &iwl6000_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl6000i_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
@@ -306,8 +302,7 @@ const struct iwl_cfg iwl6000i_2bg_cfg = {
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl6050_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
@@ -333,8 +328,7 @@ const struct iwl_cfg iwl6050_2abg_cfg = {
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .trans.csr = &iwl_csr_v1
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl6150_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
@@ -361,7 +355,6 @@ const struct iwl_cfg iwl6000_3agn_cfg = {
.eeprom_params = &iwl6000_eeprom_params,
.ht_params = &iwl6000_ht_params,
.led_mode = IWL_LED_BLINK,
- .trans.csr = &iwl_csr_v1,
};
MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
index deb520aeb3f8..b72993e07fc2 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
@@ -154,8 +154,7 @@ static const struct iwl_ht_params iwl7000_ht_params = {
.nvm_hw_section_num = 0, \
.non_shared_ant = ANT_A, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .dccm_offset = IWL7000_DCCM_OFFSET, \
- .trans.csr = &iwl_csr_v1
+ .dccm_offset = IWL7000_DCCM_OFFSET
#define IWL_DEVICE_7000 \
IWL_DEVICE_7000_COMMON, \
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
index b3cc477140c0..280d84fa5cb1 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
@@ -151,8 +151,7 @@ static const struct iwl_tt_params iwl8000_tt_params = {
.apmg_not_supported = true, \
.nvm_type = IWL_NVM_EXT, \
.dbgc_supported = true, \
- .min_umac_error_event_table = 0x800000, \
- .trans.csr = &iwl_csr_v1
+ .min_umac_error_event_table = 0x800000
#define IWL_DEVICE_8000 \
IWL_DEVICE_8000_COMMON, \
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index e9155b9b5ee4..379ea788e424 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -138,13 +138,13 @@ static const struct iwl_tt_params iwl9000_tt_params = {
.thermal_params = &iwl9000_tt_params, \
.apmg_not_supported = true, \
.trans.mq_rx_supported = true, \
+ .num_rbds = 512, \
.vht_mu_mimo_supported = true, \
.mac_addr_from_csr = true, \
.trans.rf_id = true, \
.nvm_type = IWL_NVM_EXT, \
.dbgc_supported = true, \
.min_umac_error_event_table = 0x800000, \
- .trans.csr = &iwl_csr_v1, \
.d3_debug_data_base_addr = 0x401000, \
.d3_debug_data_length = 92 * 1024, \
.ht_params = &iwl9000_ht_params, \
@@ -171,6 +171,12 @@ static const struct iwl_tt_params iwl9000_tt_params = {
}, \
}
+const struct iwl_cfg_trans_params iwl9000_trans_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_9000,
+ .base_params = &iwl9000_base_params,
+ .mq_rx_supported = true,
+ .rf_id = true,
+};
const struct iwl_cfg iwl9160_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9160",
@@ -184,8 +190,10 @@ const struct iwl_cfg iwl9260_2ac_cfg = {
IWL_DEVICE_9000,
};
+const char iwl9260_160_name[] = "Intel(R) Wireless-AC 9260 160MHz";
+const char iwl9560_160_name[] = "Intel(R) Wireless-AC 9560 160MHz";
+
const struct iwl_cfg iwl9260_2ac_160_cfg = {
- .name = "Intel(R) Wireless-AC 9260 160MHz",
.fw_name_pre = IWL9260_FW_PRE,
IWL_DEVICE_9000,
};
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
index be5ef4c3e9d0..8d8380026180 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
@@ -237,11 +237,6 @@ struct iwl_sensitivity_ranges {
u16 nrg_th_cca;
};
-
-#define KELVIN_TO_CELSIUS(x) ((x)-273)
-#define CELSIUS_TO_KELVIN(x) ((x)+273)
-
-
/******************************************************************************
*
* Functions implemented in core module which are forward declared here
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
index dc3f197f94d9..d42bc46fe566 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
@@ -10,6 +10,8 @@
*
*****************************************************************************/
+#include <linux/units.h>
+
/*
* DVM device-specific data & functions
*/
@@ -345,7 +347,7 @@ static s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
static void iwl5150_set_ct_threshold(struct iwl_priv *priv)
{
const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF;
- s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY) -
+ s32 threshold = (s32)celsius_to_kelvin(CT_KILL_THRESHOLD_LEGACY) -
iwl_temp_calib_to_offset(priv);
priv->hw_params.ct_kill_threshold = threshold * volt2temp_coef;
@@ -381,7 +383,7 @@ static void iwl5150_temperature(struct iwl_priv *priv)
vt = le32_to_cpu(priv->statistics.common.temperature);
vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset;
/* now vt hold the temperature in Kelvin */
- priv->temperature = KELVIN_TO_CELSIUS(vt);
+ priv->temperature = kelvin_to_celsius(vt);
iwl_tt_handler(priv);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index 4f2789bb3b5b..598ee7315558 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -1255,7 +1255,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
************************/
hw = iwl_alloc_all();
if (!hw) {
- pr_err("%s: Cannot allocate network device\n", cfg->name);
+ pr_err("%s: Cannot allocate network device\n", trans->name);
goto out;
}
@@ -1390,7 +1390,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
* 2. Read REV register
***********************/
IWL_INFO(priv, "Detected %s, REV=0x%X\n",
- priv->cfg->name, priv->trans->hw_rev);
+ priv->trans->name, priv->trans->hw_rev);
if (iwl_trans_start_hw(priv->trans))
goto out_free_hw;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
index cd73fc5cfcbb..fd454836adbe 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
@@ -267,7 +267,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_station_priv *sta_priv = NULL;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct iwl_device_cmd *dev_cmd;
+ struct iwl_device_tx_cmd *dev_cmd;
struct iwl_tx_cmd *tx_cmd;
__le16 fc;
u8 hdr_len;
@@ -348,7 +348,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
if (unlikely(!dev_cmd))
goto drop_unlock_priv;
- memset(dev_cmd, 0, sizeof(*dev_cmd));
dev_cmd->hdr.cmd = REPLY_TX;
tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index 40fe2d667622..48d375a86d86 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -357,8 +357,8 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
{
union acpi_object *wifi_pkg, *data;
bool enabled;
- int i, n_profiles, tbl_rev;
- int ret = 0;
+ int i, n_profiles, tbl_rev, pos;
+ int ret = 0;
data = iwl_acpi_get_object(fwrt->dev, ACPI_EWRD_METHOD);
if (IS_ERR(data))
@@ -390,10 +390,10 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
goto out_free;
}
- for (i = 0; i < n_profiles; i++) {
- /* the tables start at element 3 */
- int pos = 3;
+ /* the tables start at element 3 */
+ pos = 3;
+ for (i = 0; i < n_profiles; i++) {
/* The EWRD profiles officially go from 2 to 4, but we
* save them in sar_profiles[1-3] (because we don't
* have profile 0). So in the array we start from 1.
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
index 7a0fe5adefa5..a0d6802c2715 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
@@ -240,7 +240,7 @@ enum iwl_tof_responder_cfg_flags {
};
/**
- * struct iwl_tof_responder_config_cmd - ToF AP mode (for debug)
+ * struct iwl_tof_responder_config_cmd_v6 - ToF AP mode (for debug)
* @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field
* @responder_cfg_flags: &iwl_tof_responder_cfg_flags
* @bandwidth: current AP Bandwidth: &enum iwl_tof_bandwidth
@@ -258,7 +258,7 @@ enum iwl_tof_responder_cfg_flags {
* @bssid: Current AP BSSID
* @reserved2: reserved
*/
-struct iwl_tof_responder_config_cmd {
+struct iwl_tof_responder_config_cmd_v6 {
__le32 cmd_valid_fields;
__le32 responder_cfg_flags;
u8 bandwidth;
@@ -274,6 +274,42 @@ struct iwl_tof_responder_config_cmd {
__le16 reserved2;
} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_6 */
+/**
+ * struct iwl_tof_responder_config_cmd - ToF AP mode (for debug)
+ * @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field
+ * @responder_cfg_flags: &iwl_tof_responder_cfg_flags
+ * @format_bw: bits 0 - 3: &enum iwl_location_frame_format.
+ * bits 4 - 7: &enum iwl_location_bw.
+ * @rate: current AP rate
+ * @channel_num: current AP Channel
+ * @ctrl_ch_position: coding of the control channel position relative to
+ * the center frequency, see iwl_mvm_get_ctrl_pos()
+ * @sta_id: index of the AP STA when in AP mode
+ * @reserved1: reserved
+ * @toa_offset: Artificial addition [pSec] for the ToA - to be used for debug
+ * purposes, simulating station movement by adding various values
+ * to this field
+ * @common_calib: XVT: common calibration value
+ * @specific_calib: XVT: specific calibration value
+ * @bssid: Current AP BSSID
+ * @reserved2: reserved
+ */
+struct iwl_tof_responder_config_cmd {
+ __le32 cmd_valid_fields;
+ __le32 responder_cfg_flags;
+ u8 format_bw;
+ u8 rate;
+ u8 channel_num;
+ u8 ctrl_ch_position;
+ u8 sta_id;
+ u8 reserved1;
+ __le16 toa_offset;
+ __le16 common_calib;
+ __le16 specific_calib;
+ u8 bssid[ETH_ALEN];
+ __le16 reserved2;
+} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_6 */
+
#define IWL_LCI_CIVIC_IE_MAX_SIZE 400
/**
@@ -403,7 +439,7 @@ enum iwl_initiator_ap_flags {
};
/**
- * struct iwl_tof_range_req_ap_entry - AP configuration parameters
+ * struct iwl_tof_range_req_ap_entry_v3 - AP configuration parameters
* @initiator_ap_flags: see &enum iwl_initiator_ap_flags.
* @channel_num: AP Channel number
* @bandwidth: AP bandwidth. One of iwl_tof_bandwidth.
@@ -420,7 +456,7 @@ enum iwl_initiator_ap_flags {
* @reserved: For alignment and future use
* @tsf_delta: not in use
*/
-struct iwl_tof_range_req_ap_entry {
+struct iwl_tof_range_req_ap_entry_v3 {
__le32 initiator_ap_flags;
u8 channel_num;
u8 bandwidth;
@@ -435,6 +471,72 @@ struct iwl_tof_range_req_ap_entry {
} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_3 */
/**
+ * enum iwl_location_frame_format - location frame formats
+ * @IWL_LOCATION_FRAME_FORMAT_LEGACY: legacy
+ * @IWL_LOCATION_FRAME_FORMAT_HT: HT
+ * @IWL_LOCATION_FRAME_FORMAT_VHT: VHT
+ * @IWL_LOCATION_FRAME_FORMAT_HE: HE
+ */
+enum iwl_location_frame_format {
+ IWL_LOCATION_FRAME_FORMAT_LEGACY,
+ IWL_LOCATION_FRAME_FORMAT_HT,
+ IWL_LOCATION_FRAME_FORMAT_VHT,
+ IWL_LOCATION_FRAME_FORMAT_HE,
+};
+
+/**
+ * enum iwl_location_bw - location bandwidth selection
+ * @IWL_LOCATION_BW_20MHZ: 20MHz
+ * @IWL_LOCATION_BW_40MHZ: 40MHz
+ * @IWL_LOCATION_BW_80MHZ: 80MHz
+ */
+enum iwl_location_bw {
+ IWL_LOCATION_BW_20MHZ,
+ IWL_LOCATION_BW_40MHZ,
+ IWL_LOCATION_BW_80MHZ,
+};
+
+#define HLTK_11AZ_LEN 32
+#define TK_11AZ_LEN 32
+
+#define LOCATION_BW_POS 4
+
+/**
+ * struct iwl_tof_range_req_ap_entry - AP configuration parameters
+ * @initiator_ap_flags: see &enum iwl_initiator_ap_flags.
+ * @channel_num: AP Channel number
+ * @format_bw: bits 0 - 3: &enum iwl_location_frame_format.
+ * bits 4 - 7: &enum iwl_location_bw.
+ * @ctrl_ch_position: Coding of the control channel position relative to the
+ * center frequency, see iwl_mvm_get_ctrl_pos().
+ * @ftmr_max_retries: Max number of retries to send the FTMR in case of no
+ * reply from the AP.
+ * @bssid: AP's BSSID
+ * @burst_period: Recommended value to be sent to the AP. Measurement
+ * periodicity In units of 100ms. ignored if num_of_bursts_exp = 0
+ * @samples_per_burst: the number of FTMs pairs in single Burst (1-31);
+ * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of
+ * the number of measurement iterations (min 2^0 = 1, max 2^14)
+ * @reserved: For alignment and future use
+ * @hltk: HLTK to be used for secured 11az measurement
+ * @tk: TK to be used for secured 11az measurement
+ */
+struct iwl_tof_range_req_ap_entry {
+ __le32 initiator_ap_flags;
+ u8 channel_num;
+ u8 format_bw;
+ u8 ctrl_ch_position;
+ u8 ftmr_max_retries;
+ u8 bssid[ETH_ALEN];
+ __le16 burst_period;
+ u8 samples_per_burst;
+ u8 num_of_bursts;
+ __le16 reserved;
+ u8 hltk[HLTK_11AZ_LEN];
+ u8 tk[TK_11AZ_LEN];
+} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_4 */
+
+/**
* enum iwl_tof_response_mode
* @IWL_MVM_TOF_RESPONSE_ASAP: report each AP measurement separately as soon as
* possible (not supported for this release)
@@ -536,6 +638,38 @@ struct iwl_tof_range_req_cmd_v5 {
/* LOCATION_RANGE_REQ_CMD_API_S_VER_5 */
/**
+ * struct iwl_tof_range_req_cmd_v7 - start measurement cmd
+ * @initiator_flags: see flags @ iwl_tof_initiator_flags
+ * @request_id: A Token incremented per request. The same Token will be
+ * sent back in the range response
+ * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @range_req_bssid: ranging request BSSID
+ * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
+ * Bits set to 1 shall be randomized by the UMAC
+ * @macaddr_template: MAC address template to use for non-randomized bits
+ * @req_timeout_ms: Requested timeout of the response in units of milliseconds.
+ * This is the session time for completing the measurement.
+ * @tsf_mac_id: report the measurement start time for each ap in terms of the
+ * TSF of this mac id. 0xff to disable TSF reporting.
+ * @common_calib: The common calib value to inject to this measurement calc
+ * @specific_calib: The specific calib value to inject to this measurement calc
+ * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v2.
+ */
+struct iwl_tof_range_req_cmd_v7 {
+ __le32 initiator_flags;
+ u8 request_id;
+ u8 num_of_ap;
+ u8 range_req_bssid[ETH_ALEN];
+ u8 macaddr_mask[ETH_ALEN];
+ u8 macaddr_template[ETH_ALEN];
+ __le32 req_timeout_ms;
+ __le32 tsf_mac_id;
+ __le16 common_calib;
+ __le16 specific_calib;
+ struct iwl_tof_range_req_ap_entry_v3 ap[IWL_MVM_TOF_MAX_APS];
+} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_7 */
+
+/**
* struct iwl_tof_range_req_cmd - start measurement cmd
* @initiator_flags: see flags @ iwl_tof_initiator_flags
* @request_id: A Token incremented per request. The same Token will be
@@ -565,7 +699,7 @@ struct iwl_tof_range_req_cmd {
__le16 common_calib;
__le16 specific_calib;
struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS];
-} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_7 */
+} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_8 */
/*
* enum iwl_tof_range_request_status - status of the sent request
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index 408798f351c6..1b2b5fa56e19 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -922,21 +922,6 @@ struct iwl_scan_probe_params_v4 {
#define SCAN_MAX_NUM_CHANS_V3 67
/**
- * struct iwl_scan_channel_params_v3
- * @flags: channel flags &enum iwl_scan_channel_flags
- * @count: num of channels in scan request
- * @reserved: for future use and alignment
- * @channel_config: array of explicit channel configurations
- * for 2.4Ghz and 5.2Ghz bands
- */
-struct iwl_scan_channel_params_v3 {
- u8 flags;
- u8 count;
- __le16 reserved;
- struct iwl_scan_channel_cfg_umac channel_config[SCAN_MAX_NUM_CHANS_V3];
-} __packed; /* SCAN_CHANNEL_PARAMS_API_S_VER_3 */
-
-/**
* struct iwl_scan_channel_params_v4
* @flags: channel flags &enum iwl_scan_channel_flags
* @count: num of channels in scan request
@@ -1011,20 +996,6 @@ struct iwl_scan_periodic_parms_v1 {
} __packed; /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */
/**
- * struct iwl_scan_req_params_v11
- * @general_params: &struct iwl_scan_general_params_v10
- * @channel_params: &struct iwl_scan_channel_params_v3
- * @periodic_params: &struct iwl_scan_periodic_parms_v1
- * @probe_params: &struct iwl_scan_probe_params_v3
- */
-struct iwl_scan_req_params_v11 {
- struct iwl_scan_general_params_v10 general_params;
- struct iwl_scan_channel_params_v3 channel_params;
- struct iwl_scan_periodic_parms_v1 periodic_params;
- struct iwl_scan_probe_params_v3 probe_params;
-} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_11 */
-
-/**
* struct iwl_scan_req_params_v12
* @general_params: &struct iwl_scan_general_params_v10
* @channel_params: &struct iwl_scan_channel_params_v4
@@ -1053,18 +1024,6 @@ struct iwl_scan_req_params_v13 {
} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_13 */
/**
- * struct iwl_scan_req_umac_v11
- * @uid: scan id, &enum iwl_umac_scan_uid_offsets
- * @ooc_priority: out of channel priority - &enum iwl_scan_priority
- * @scan_params: scan parameters
- */
-struct iwl_scan_req_umac_v11 {
- __le32 uid;
- __le32 ooc_priority;
- struct iwl_scan_req_params_v11 scan_params;
-} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_11 */
-
-/**
* struct iwl_scan_req_umac_v12
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @ooc_priority: out of channel priority - &enum iwl_scan_priority
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index f89a9e16a8c0..f1d1fe96fecc 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -813,6 +813,7 @@ enum iwl_mac_beacon_flags {
IWL_MAC_BEACON_ANT_A = BIT(9),
IWL_MAC_BEACON_ANT_B = BIT(10),
IWL_MAC_BEACON_ANT_C = BIT(11),
+ IWL_MAC_BEACON_FILS = BIT(12),
};
/**
@@ -820,6 +821,7 @@ enum iwl_mac_beacon_flags {
* @byte_cnt: byte count of the beacon frame.
* @flags: least significant byte for rate code. The most significant byte
* is &enum iwl_mac_beacon_flags.
+ * @short_ssid: Short SSID
* @reserved: reserved
* @template_id: currently equal to the mac context id of the coresponding mac.
* @tim_idx: the offset of the tim IE in the beacon
@@ -831,14 +833,15 @@ enum iwl_mac_beacon_flags {
struct iwl_mac_beacon_cmd {
__le16 byte_cnt;
__le16 flags;
- __le64 reserved;
+ __le32 short_ssid;
+ __le32 reserved;
__le32 template_id;
__le32 tim_idx;
__le32 tim_size;
__le32 ecsa_offset;
__le32 csa_offset;
struct ieee80211_hdr frame[0];
-} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_9 */
+} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_10 */
struct iwl_beacon_notif {
struct iwl_mvm_tx_resp beacon_notify_hdr;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index ed90dd104366..91df1ee25dd0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -929,7 +929,7 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
sizeof(dump_info->fw_human_readable));
- strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name,
+ strncpy(dump_info->dev_human_readable, fwrt->trans->name,
sizeof(dump_info->dev_human_readable) - 1);
strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name,
sizeof(dump_info->bus_human_readable) - 1);
@@ -1230,13 +1230,15 @@ static bool iwl_ini_txf_iter(struct iwl_fw_runtime *fwrt,
iter->lmac = 0;
}
- if (!iter->internal_txf)
+ if (!iter->internal_txf) {
for (iter->fifo++; iter->fifo < txf_num; iter->fifo++) {
iter->fifo_size =
cfg->lmac[iter->lmac].txfifo_size[iter->fifo];
if (iter->fifo_size && (lmac_bitmap & BIT(iter->fifo)))
return true;
}
+ iter->fifo--;
+ }
iter->internal_txf = 1;
@@ -2351,9 +2353,6 @@ int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
u32 occur, delay;
unsigned long idx;
- if (test_bit(STATUS_GEN_ACTIVE_TRIGS, &fwrt->status))
- return -EBUSY;
-
if (!iwl_fw_ini_trigger_on(fwrt, trig)) {
IWL_WARN(fwrt, "WRT: Trigger %d is not active, aborting dump\n",
tp_id);
@@ -2669,12 +2668,7 @@ int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
{
int ret = 0;
- /* if the FW crashed or not debug monitor cfg was given, there is
- * no point in changing the recording state
- */
- if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status) ||
- (!fwrt->trans->dbg.dest_tlv &&
- fwrt->trans->dbg.ini_dest == IWL_FW_INI_LOCATION_INVALID))
+ if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
return 0;
if (fw_has_capa(&fwrt->fw->ucode_capa,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
index ca3b1a461dea..89f74116569d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
@@ -320,31 +320,6 @@ out:
FWRT_DEBUGFS_WRITE_FILE_OPS(send_hcmd, 512);
-static ssize_t iwl_dbgfs_fw_dbg_domain_write(struct iwl_fw_runtime *fwrt,
- char *buf, size_t count)
-{
- u32 new_domain;
- int ret;
-
- if (!iwl_trans_fw_running(fwrt->trans))
- return -EIO;
-
- ret = kstrtou32(buf, 0, &new_domain);
- if (ret)
- return ret;
-
- if (new_domain != fwrt->trans->dbg.domains_bitmap) {
- ret = iwl_dbg_tlv_gen_active_trigs(fwrt, new_domain);
- if (ret)
- return ret;
-
- iwl_dbg_tlv_time_point(fwrt, IWL_FW_INI_TIME_POINT_PERIODIC,
- NULL);
- }
-
- return count;
-}
-
static ssize_t iwl_dbgfs_fw_dbg_domain_read(struct iwl_fw_runtime *fwrt,
size_t size, char *buf)
{
@@ -352,7 +327,7 @@ static ssize_t iwl_dbgfs_fw_dbg_domain_read(struct iwl_fw_runtime *fwrt,
fwrt->trans->dbg.domains_bitmap);
}
-FWRT_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_domain, 20);
+FWRT_DEBUGFS_READ_FILE_OPS(fw_dbg_domain, 20);
void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
struct dentry *dbgfs_dir)
@@ -360,5 +335,5 @@ void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk);
FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, 0200);
FWRT_DEBUGFS_ADD_FILE(send_hcmd, dbgfs_dir, 0200);
- FWRT_DEBUGFS_ADD_FILE(fw_dbg_domain, dbgfs_dir, 0600);
+ FWRT_DEBUGFS_ADD_FILE(fw_dbg_domain, dbgfs_dir, 0400);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index 994880a83652..90ca5f929cf9 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -251,7 +251,7 @@ struct iwl_fw_dbg {
struct iwl_fw {
u32 ucode_ver;
- char fw_version[ETHTOOL_FWVERS_LEN];
+ char fw_version[64];
/* ucode images */
struct fw_img img[IWL_UCODE_TYPE_MAX];
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index c24575ff0e54..f8c6ed823bc5 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -69,7 +69,7 @@
#include "iwl-eeprom-parse.h"
#include "fw/acpi.h"
-#define IWL_FW_DBG_DOMAIN IWL_FW_INI_DOMAIN_ALWAYS_ON
+#define IWL_FW_DBG_DOMAIN IWL_TRANS_FW_DBG_DOMAIN(fwrt->trans)
struct iwl_fw_runtime_ops {
int (*dump_start)(void *ctx);
@@ -130,14 +130,6 @@ struct iwl_txf_iter_data {
};
/**
- * enum iwl_fw_runtime_status - fw runtime status flags
- * @STATUS_GEN_ACTIVE_TRIGS: generating active trigger list
- */
-enum iwl_fw_runtime_status {
- STATUS_GEN_ACTIVE_TRIGS,
-};
-
-/**
* struct iwl_fw_runtime - runtime data for firmware
* @fw: firmware image
* @cfg: NIC configuration
@@ -150,7 +142,6 @@ enum iwl_fw_runtime_status {
* @smem_cfg: saved firmware SMEM configuration
* @cur_fw_img: current firmware image, must be maintained by
* the driver by calling &iwl_fw_set_current_image()
- * @status: &enum iwl_fw_runtime_status
* @dump: debug dump data
*/
struct iwl_fw_runtime {
@@ -171,8 +162,6 @@ struct iwl_fw_runtime {
/* memory configuration */
struct iwl_fwrt_shared_mem_cfg smem_cfg;
- unsigned long status;
-
/* debug */
struct {
const struct iwl_fw_dump_desc *desc;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 317eac066082..be6a2bf9ce74 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -285,52 +285,6 @@ struct iwl_pwr_tx_backoff {
};
/**
- * struct iwl_csr_params
- *
- * @flag_sw_reset: reset the device
- * @flag_mac_clock_ready:
- * Indicates MAC (ucode processor, etc.) is powered up and can run.
- * Internal resources are accessible.
- * NOTE: This does not indicate that the processor is actually running.
- * NOTE: This does not indicate that device has completed
- * init or post-power-down restore of internal SRAM memory.
- * Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that
- * SRAM is restored and uCode is in normal operation mode.
- * This note is relevant only for pre 5xxx devices.
- * NOTE: After device reset, this bit remains "0" until host sets
- * INIT_DONE
- * @flag_init_done: Host sets this to put device into fully operational
- * D0 power mode. Host resets this after SW_RESET to put device into
- * low power mode.
- * @flag_mac_access_req: Host sets this to request and maintain MAC wakeup,
- * to allow host access to device-internal resources. Host must wait for
- * mac_clock_ready (and !GOING_TO_SLEEP) before accessing non-CSR device
- * registers.
- * @flag_val_mac_access_en: mac access is enabled
- * @flag_master_dis: disable master
- * @flag_stop_master: stop master
- * @addr_sw_reset: address for resetting the device
- * @mac_addr0_otp: first part of MAC address from OTP
- * @mac_addr1_otp: second part of MAC address from OTP
- * @mac_addr0_strap: first part of MAC address from strap
- * @mac_addr1_strap: second part of MAC address from strap
- */
-struct iwl_csr_params {
- u8 flag_sw_reset;
- u8 flag_mac_clock_ready;
- u8 flag_init_done;
- u8 flag_mac_access_req;
- u8 flag_val_mac_access_en;
- u8 flag_master_dis;
- u8 flag_stop_master;
- u8 addr_sw_reset;
- u32 mac_addr0_otp;
- u32 mac_addr1_otp;
- u32 mac_addr0_strap;
- u32 mac_addr1_strap;
-};
-
-/**
* struct iwl_cfg_trans - information needed to start the trans
*
* These values cannot be changed when multiple configs are used for a
@@ -348,7 +302,6 @@ struct iwl_csr_params {
*/
struct iwl_cfg_trans_params {
const struct iwl_base_params *base_params;
- const struct iwl_csr_params *csr;
enum iwl_device_family device_family;
u32 umac_prph_offset;
u32 rf_id:1,
@@ -431,6 +384,8 @@ struct iwl_fw_mon_regs {
* @uhb_supported: ultra high band channels supported
* @min_256_ba_txq_size: minimum number of slots required in a TX queue which
* supports 256 BA aggregation
+ * @num_rbds: number of receive buffer descriptors to use
+ * (only used for multi-queue capable devices)
*
* We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs
@@ -485,6 +440,7 @@ struct iwl_cfg {
u8 max_vht_ampdu_exponent;
u8 ucode_api_max;
u8 ucode_api_min;
+ u16 num_rbds;
u32 min_umac_error_event_table;
u32 extra_phy_cfg_flags;
u32 d3_debug_data_base_addr;
@@ -496,12 +452,22 @@ struct iwl_cfg {
const struct iwl_fw_mon_regs mon_smem_regs;
};
-extern const struct iwl_csr_params iwl_csr_v1;
-extern const struct iwl_csr_params iwl_csr_v2;
+#define IWL_CFG_ANY (~0)
+
+struct iwl_dev_info {
+ u16 device;
+ u16 subdevice;
+ const struct iwl_cfg *cfg;
+ const char *name;
+};
/*
* This list declares the config structures for all devices.
*/
+extern const struct iwl_cfg_trans_params iwl9000_trans_cfg;
+extern const char iwl9260_160_name[];
+extern const char iwl9560_160_name[];
+
#if IS_ENABLED(CONFIG_IWLDVM)
extern const struct iwl_cfg iwl5300_agn_cfg;
extern const struct iwl_cfg iwl5100_agn_cfg;
@@ -595,9 +561,6 @@ extern const struct iwl_cfg iwl9560_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl9560_2ac_160_cfg_shared_clk;
extern const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk;
-extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
-extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
-extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
extern const struct iwl_cfg iwl_ax101_cfg_qu_hr;
extern const struct iwl_cfg iwl_ax101_cfg_qu_c0_hr_b0;
extern const struct iwl_cfg iwl_ax101_cfg_quz_hr;
@@ -636,6 +599,7 @@ extern const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0;
extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0;
extern const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0;
extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0;
+extern const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0;
#endif /* CPTCFG_IWLMVM || CPTCFG_IWLFMAC */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
index 5ed07e37e3ee..eeaa8cbdddce 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -20,7 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -64,12 +64,12 @@
* the init done for driver command that configures several system modes
* @IWL_CTXT_INFO_EARLY_DEBUG: enable early debug
* @IWL_CTXT_INFO_ENABLE_CDMP: enable core dump
- * @IWL_CTXT_INFO_RB_CB_SIZE_POS: position of the RBD Cyclic Buffer Size
+ * @IWL_CTXT_INFO_RB_CB_SIZE: mask of the RBD Cyclic Buffer Size
* exponent, the actual size is 2**value, valid sizes are 8-2048.
* The value is four bits long. Maximum valid exponent is 12
* @IWL_CTXT_INFO_TFD_FORMAT_LONG: use long TFD Format (the
* default is short format - not supported by the driver)
- * @IWL_CTXT_INFO_RB_SIZE_POS: RB size position
+ * @IWL_CTXT_INFO_RB_SIZE: RB size mask
* (values are IWL_CTXT_INFO_RB_SIZE_*K)
* @IWL_CTXT_INFO_RB_SIZE_1K: Value for 1K RB size
* @IWL_CTXT_INFO_RB_SIZE_2K: Value for 2K RB size
@@ -83,12 +83,12 @@
* @IWL_CTXT_INFO_RB_SIZE_32K: Value for 32K RB size
*/
enum iwl_context_info_flags {
- IWL_CTXT_INFO_AUTO_FUNC_INIT = BIT(0),
- IWL_CTXT_INFO_EARLY_DEBUG = BIT(1),
- IWL_CTXT_INFO_ENABLE_CDMP = BIT(2),
- IWL_CTXT_INFO_RB_CB_SIZE_POS = 4,
- IWL_CTXT_INFO_TFD_FORMAT_LONG = BIT(8),
- IWL_CTXT_INFO_RB_SIZE_POS = 9,
+ IWL_CTXT_INFO_AUTO_FUNC_INIT = 0x0001,
+ IWL_CTXT_INFO_EARLY_DEBUG = 0x0002,
+ IWL_CTXT_INFO_ENABLE_CDMP = 0x0004,
+ IWL_CTXT_INFO_RB_CB_SIZE = 0x00f0,
+ IWL_CTXT_INFO_TFD_FORMAT_LONG = 0x0100,
+ IWL_CTXT_INFO_RB_SIZE = 0x1e00,
IWL_CTXT_INFO_RB_SIZE_1K = 0x1,
IWL_CTXT_INFO_RB_SIZE_2K = 0x2,
IWL_CTXT_INFO_RB_SIZE_4K = 0x4,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index 92d9898ab7c2..cb9e8e189a1a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -256,6 +256,7 @@
/* RESET */
#define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001)
#define CSR_RESET_REG_FLAG_FORCE_NMI (0x00000002)
+#define CSR_RESET_REG_FLAG_SW_RESET (0x00000080)
#define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100)
#define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200)
#define CSR_RESET_LINK_PWR_MGMT_DISABLED (0x80000000)
@@ -278,11 +279,35 @@
* 4: GOING_TO_SLEEP
* Indicates MAC is entering a power-saving sleep power-down.
* Not a good time to access device-internal resources.
+ * 3: MAC_ACCESS_REQ
+ * Host sets this to request and maintain MAC wakeup, to allow host
+ * access to device-internal resources. Host must wait for
+ * MAC_CLOCK_READY (and !GOING_TO_SLEEP) before accessing non-CSR
+ * device registers.
+ * 2: INIT_DONE
+ * Host sets this to put device into fully operational D0 power mode.
+ * Host resets this after SW_RESET to put device into low power mode.
+ * 0: MAC_CLOCK_READY
+ * Indicates MAC (ucode processor, etc.) is powered up and can run.
+ * Internal resources are accessible.
+ * NOTE: This does not indicate that the processor is actually running.
+ * NOTE: This does not indicate that device has completed
+ * init or post-power-down restore of internal SRAM memory.
+ * Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that
+ * SRAM is restored and uCode is in normal operation mode.
+ * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
+ * do not need to save/restore it.
+ * NOTE: After device reset, this bit remains "0" until host sets
+ * INIT_DONE
*/
+#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001)
#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004)
-#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010)
+#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008)
+#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010)
#define CSR_GP_CNTRL_REG_FLAG_XTAL_ON (0x00000400)
+#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001)
+
#define CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000)
#define CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN (0x04000000)
#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000)
@@ -379,7 +404,7 @@ enum {
/* CSR GIO */
-#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002)
+#define CSR_GIO_REG_VAL_L0S_DISABLED (0x00000002)
/*
* UCODE-DRIVER GP (general purpose) mailbox register 1
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index f266647dc08c..4208e720f6e6 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -290,10 +290,19 @@ void iwl_dbg_tlv_alloc(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv,
struct iwl_fw_ini_header *hdr = (void *)&tlv->data[0];
u32 type = le32_to_cpu(tlv->type);
u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
+ u32 domain = le32_to_cpu(hdr->domain);
enum iwl_ini_cfg_state *cfg_state = ext ?
&trans->dbg.external_ini_cfg : &trans->dbg.internal_ini_cfg;
int ret;
+ if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON &&
+ !(domain & trans->dbg.domains_bitmap)) {
+ IWL_DEBUG_FW(trans,
+ "WRT: Skipping TLV with disabled domain 0x%0x (0x%0x)\n",
+ domain, trans->dbg.domains_bitmap);
+ return;
+ }
+
if (tlv_idx >= ARRAY_SIZE(dbg_tlv_alloc) || !dbg_tlv_alloc[tlv_idx]) {
IWL_ERR(trans, "WRT: Unsupported TLV type 0x%x\n", type);
goto out_err;
@@ -480,7 +489,14 @@ static int iwl_dbg_tlv_alloc_fragment(struct iwl_fw_runtime *fwrt,
if (!frag || frag->size || !pages)
return -EIO;
- while (pages) {
+ /*
+ * We try to allocate as many pages as we can, starting with
+ * the requested amount and going down until we can allocate
+ * something. Because of DIV_ROUND_UP(), pages will never go
+ * down to 0 and stop the loop, so stop when pages reaches 1,
+ * which is too small anyway.
+ */
+ while (pages > 1) {
block = dma_alloc_coherent(fwrt->dev, pages * PAGE_SIZE,
&physical,
GFP_KERNEL | __GFP_NOWARN);
@@ -660,7 +676,6 @@ static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
list_for_each_entry(node, hcmd_list, list) {
struct iwl_fw_ini_hcmd_tlv *hcmd = (void *)node->tlv.data;
struct iwl_fw_ini_hcmd *hcmd_data = &hcmd->hcmd;
- u32 domain = le32_to_cpu(hcmd->hdr.domain);
u16 hcmd_len = le32_to_cpu(node->tlv.length) - sizeof(*hcmd);
struct iwl_host_cmd cmd = {
.id = WIDE_ID(hcmd_data->group, hcmd_data->id),
@@ -668,10 +683,6 @@ static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
.data = { hcmd_data->data, },
};
- if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON &&
- !(domain & fwrt->trans->dbg.domains_bitmap))
- continue;
-
iwl_trans_send_cmd(fwrt->trans, &cmd);
}
}
@@ -891,55 +902,17 @@ static void
iwl_dbg_tlv_gen_active_trig_list(struct iwl_fw_runtime *fwrt,
struct iwl_dbg_tlv_time_point_data *tp)
{
- struct iwl_dbg_tlv_node *node, *tmp;
+ struct iwl_dbg_tlv_node *node;
struct list_head *trig_list = &tp->trig_list;
struct list_head *active_trig_list = &tp->active_trig_list;
- list_for_each_entry_safe(node, tmp, active_trig_list, list) {
- list_del(&node->list);
- kfree(node);
- }
-
list_for_each_entry(node, trig_list, list) {
struct iwl_ucode_tlv *tlv = &node->tlv;
- struct iwl_fw_ini_trigger_tlv *trig = (void *)tlv->data;
- u32 domain = le32_to_cpu(trig->hdr.domain);
-
- if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON &&
- !(domain & fwrt->trans->dbg.domains_bitmap))
- continue;
iwl_dbg_tlv_add_active_trigger(fwrt, active_trig_list, tlv);
}
}
-int iwl_dbg_tlv_gen_active_trigs(struct iwl_fw_runtime *fwrt, u32 new_domain)
-{
- int i;
-
- if (test_and_set_bit(STATUS_GEN_ACTIVE_TRIGS, &fwrt->status))
- return -EBUSY;
-
- iwl_fw_flush_dumps(fwrt);
-
- fwrt->trans->dbg.domains_bitmap = new_domain;
-
- IWL_DEBUG_FW(fwrt,
- "WRT: Generating active triggers list, domain 0x%x\n",
- fwrt->trans->dbg.domains_bitmap);
-
- for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.time_point); i++) {
- struct iwl_dbg_tlv_time_point_data *tp =
- &fwrt->trans->dbg.time_point[i];
-
- iwl_dbg_tlv_gen_active_trig_list(fwrt, tp);
- }
-
- clear_bit(STATUS_GEN_ACTIVE_TRIGS, &fwrt->status);
-
- return 0;
-}
-
static bool iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime *fwrt,
struct iwl_fwrt_dump_data *dump_data,
union iwl_dbg_tlv_tp_data *tp_data,
@@ -1013,7 +986,16 @@ static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
enum iwl_fw_ini_buffer_location *ini_dest = &fwrt->trans->dbg.ini_dest;
int ret, i;
- iwl_dbg_tlv_gen_active_trigs(fwrt, IWL_FW_DBG_DOMAIN);
+ IWL_DEBUG_FW(fwrt,
+ "WRT: Generating active triggers list, domain 0x%x\n",
+ fwrt->trans->dbg.domains_bitmap);
+
+ for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.time_point); i++) {
+ struct iwl_dbg_tlv_time_point_data *tp =
+ &fwrt->trans->dbg.time_point[i];
+
+ iwl_dbg_tlv_gen_active_trig_list(fwrt, tp);
+ }
*ini_dest = IWL_FW_INI_LOCATION_INVALID;
for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
index f18946872569..1360676b3b21 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
@@ -105,7 +105,6 @@ void iwl_dbg_tlv_init(struct iwl_trans *trans);
void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
enum iwl_fw_ini_time_point tp_id,
union iwl_dbg_tlv_tp_data *tp_data);
-int iwl_dbg_tlv_gen_active_trigs(struct iwl_fw_runtime *fwrt, u32 new_domain);
void iwl_dbg_tlv_del_timers(struct iwl_trans *trans);
#endif /* __iwl_dbg_tlv_h__*/
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 4096ccf58b07..2d1cb4647c3b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -493,6 +493,16 @@ static void iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data,
}
}
+static const char *iwl_reduced_fw_name(struct iwl_drv *drv)
+{
+ const char *name = drv->firmware_name;
+
+ if (strncmp(name, "iwlwifi-", 8) == 0)
+ name += 8;
+
+ return name;
+}
+
static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv,
const struct firmware *ucode_raw,
struct iwl_firmware_pieces *pieces)
@@ -551,12 +561,12 @@ static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv,
snprintf(drv->fw.fw_version,
sizeof(drv->fw.fw_version),
- "%u.%u.%u.%u%s",
+ "%u.%u.%u.%u%s %s",
IWL_UCODE_MAJOR(drv->fw.ucode_ver),
IWL_UCODE_MINOR(drv->fw.ucode_ver),
IWL_UCODE_API(drv->fw.ucode_ver),
IWL_UCODE_SERIAL(drv->fw.ucode_ver),
- buildstr);
+ buildstr, iwl_reduced_fw_name(drv));
/* Verify size of file vs. image size info in file's header */
@@ -636,12 +646,12 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
snprintf(drv->fw.fw_version,
sizeof(drv->fw.fw_version),
- "%u.%u.%u.%u%s",
+ "%u.%u.%u.%u%s %s",
IWL_UCODE_MAJOR(drv->fw.ucode_ver),
IWL_UCODE_MINOR(drv->fw.ucode_ver),
IWL_UCODE_API(drv->fw.ucode_ver),
IWL_UCODE_SERIAL(drv->fw.ucode_ver),
- buildstr);
+ buildstr, iwl_reduced_fw_name(drv));
data = ucode->data;
@@ -895,11 +905,13 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (major >= 35)
snprintf(drv->fw.fw_version,
sizeof(drv->fw.fw_version),
- "%u.%08x.%u", major, minor, local_comp);
+ "%u.%08x.%u %s", major, minor,
+ local_comp, iwl_reduced_fw_name(drv));
else
snprintf(drv->fw.fw_version,
sizeof(drv->fw.fw_version),
- "%u.%u.%u", major, minor, local_comp);
+ "%u.%u.%u %s", major, minor,
+ local_comp, iwl_reduced_fw_name(drv));
break;
}
case IWL_UCODE_TLV_FW_DBG_DEST: {
@@ -1647,6 +1659,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
drv->trans->dbgfs_dir = debugfs_create_dir("trans", drv->dbgfs_drv);
#endif
+ drv->trans->dbg.domains_bitmap = IWL_TRANS_FW_DBG_DOMAIN(drv->trans);
+
ret = iwl_request_firmware(drv, true);
if (ret) {
IWL_ERR(trans, "Couldn't request the fw\n");
@@ -1817,9 +1831,6 @@ MODULE_PARM_DESC(antenna_coupling,
module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, 0444);
MODULE_PARM_DESC(nvm_file, "NVM file name");
-module_param_named(lar_disable, iwlwifi_mod_params.lar_disable, bool, 0444);
-MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)");
-
module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644);
MODULE_PARM_DESC(uapsd_disable,
"disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)");
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index 8836f85afe85..bf673ce5f183 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -611,10 +611,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
*/
#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002)
-#define MQ_RX_TABLE_SIZE 512
-#define MQ_RX_TABLE_MASK (MQ_RX_TABLE_SIZE - 1)
-#define MQ_RX_NUM_RBDS (MQ_RX_TABLE_SIZE - 1)
-#define RX_POOL_SIZE (MQ_RX_NUM_RBDS + \
+#define RX_POOL_SIZE(rbds) ((rbds) - 1 + \
IWL_MAX_RX_HW_QUEUES * \
(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC))
/* cb size is the exponent */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
index 1b7414bf7bef..2139f0b8f2bb 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
@@ -70,36 +70,6 @@
#include "iwl-prph.h"
#include "iwl-fh.h"
-const struct iwl_csr_params iwl_csr_v1 = {
- .flag_mac_clock_ready = 0,
- .flag_val_mac_access_en = 0,
- .flag_init_done = 2,
- .flag_mac_access_req = 3,
- .flag_sw_reset = 7,
- .flag_master_dis = 8,
- .flag_stop_master = 9,
- .addr_sw_reset = CSR_BASE + 0x020,
- .mac_addr0_otp = 0x380,
- .mac_addr1_otp = 0x384,
- .mac_addr0_strap = 0x388,
- .mac_addr1_strap = 0x38C
-};
-
-const struct iwl_csr_params iwl_csr_v2 = {
- .flag_init_done = 6,
- .flag_mac_clock_ready = 20,
- .flag_val_mac_access_en = 20,
- .flag_mac_access_req = 21,
- .flag_master_dis = 28,
- .flag_stop_master = 29,
- .flag_sw_reset = 31,
- .addr_sw_reset = CSR_BASE + 0x024,
- .mac_addr0_otp = 0x30,
- .mac_addr1_otp = 0x34,
- .mac_addr0_strap = 0x38,
- .mac_addr1_strap = 0x3C
-};
-
void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
trace_iwlwifi_dev_iowrite8(trans->dev, ofs, val);
@@ -506,8 +476,7 @@ int iwl_finish_nic_init(struct iwl_trans *trans,
* Set "initialization complete" bit to move adapter from
* D0U* --> D0A* (powered-up active) state.
*/
- iwl_set_bit(trans, CSR_GP_CNTRL,
- BIT(cfg_trans->csr->flag_init_done));
+ iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
if (cfg_trans->device_family == IWL_DEVICE_FAMILY_8000)
udelay(2);
@@ -518,8 +487,8 @@ int iwl_finish_nic_init(struct iwl_trans *trans,
* and accesses to uCode SRAM.
*/
err = iwl_poll_bit(trans, CSR_GP_CNTRL,
- BIT(cfg_trans->csr->flag_mac_clock_ready),
- BIT(cfg_trans->csr->flag_mac_clock_ready),
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
25000);
if (err < 0)
IWL_DEBUG_INFO(trans, "Failed to wake NIC\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
index ebea3f308b5d..82e5cac23d8d 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
@@ -115,7 +115,6 @@ enum iwl_uapsd_disable {
* @nvm_file: specifies a external NVM file
* @uapsd_disable: disable U-APSD, see &enum iwl_uapsd_disable, default =
* IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT
- * @lar_disable: disable LAR (regulatory), default = 0
* @fw_monitor: allow to use firmware monitor
* @disable_11ac: disable VHT capabilities, default = false.
* @remove_when_gone: remove an inaccessible device from the PCIe bus.
@@ -136,7 +135,6 @@ struct iwl_mod_params {
int antenna_coupling;
char *nvm_file;
u32 uapsd_disable;
- bool lar_disable;
bool fw_monitor;
bool disable_11ac;
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 1e240a2a8329..bab0999f002c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -224,6 +224,34 @@ enum iwl_nvm_channel_flags {
NVM_CHANNEL_DC_HIGH = BIT(12),
};
+/**
+ * enum iwl_reg_capa_flags - global flags applied for the whole regulatory
+ * domain.
+ * @REG_CAPA_BF_CCD_LOW_BAND: Beam-forming or Cyclic Delay Diversity in the
+ * 2.4Ghz band is allowed.
+ * @REG_CAPA_BF_CCD_HIGH_BAND: Beam-forming or Cyclic Delay Diversity in the
+ * 5Ghz band is allowed.
+ * @REG_CAPA_160MHZ_ALLOWED: 11ac channel with a width of 160Mhz is allowed
+ * for this regulatory domain (valid only in 5Ghz).
+ * @REG_CAPA_80MHZ_ALLOWED: 11ac channel with a width of 80Mhz is allowed
+ * for this regulatory domain (valid only in 5Ghz).
+ * @REG_CAPA_MCS_8_ALLOWED: 11ac with MCS 8 is allowed.
+ * @REG_CAPA_MCS_9_ALLOWED: 11ac with MCS 9 is allowed.
+ * @REG_CAPA_40MHZ_FORBIDDEN: 11n channel with a width of 40Mhz is forbidden
+ * for this regulatory domain (valid only in 5Ghz).
+ * @REG_CAPA_DC_HIGH_ENABLED: DC HIGH allowed.
+ */
+enum iwl_reg_capa_flags {
+ REG_CAPA_BF_CCD_LOW_BAND = BIT(0),
+ REG_CAPA_BF_CCD_HIGH_BAND = BIT(1),
+ REG_CAPA_160MHZ_ALLOWED = BIT(2),
+ REG_CAPA_80MHZ_ALLOWED = BIT(3),
+ REG_CAPA_MCS_8_ALLOWED = BIT(4),
+ REG_CAPA_MCS_9_ALLOWED = BIT(5),
+ REG_CAPA_40MHZ_FORBIDDEN = BIT(7),
+ REG_CAPA_DC_HIGH_ENABLED = BIT(9),
+};
+
static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
int chan, u32 flags)
{
@@ -801,12 +829,8 @@ static void iwl_flip_hw_address(__le32 mac_addr0, __le32 mac_addr1, u8 *dest)
static void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
struct iwl_nvm_data *data)
{
- __le32 mac_addr0 =
- cpu_to_le32(iwl_read32(trans,
- trans->trans_cfg->csr->mac_addr0_strap));
- __le32 mac_addr1 =
- cpu_to_le32(iwl_read32(trans,
- trans->trans_cfg->csr->mac_addr1_strap));
+ __le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_STRAP));
+ __le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_STRAP));
iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
/*
@@ -816,10 +840,8 @@ static void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
if (is_valid_ether_addr(data->hw_addr))
return;
- mac_addr0 = cpu_to_le32(iwl_read32(trans,
- trans->trans_cfg->csr->mac_addr0_otp));
- mac_addr1 = cpu_to_le32(iwl_read32(trans,
- trans->trans_cfg->csr->mac_addr1_otp));
+ mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_OTP));
+ mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_OTP));
iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
}
@@ -939,10 +961,11 @@ iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_cfg *cfg,
struct iwl_nvm_data *
iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+ const struct iwl_fw *fw,
const __be16 *nvm_hw, const __le16 *nvm_sw,
const __le16 *nvm_calib, const __le16 *regulatory,
const __le16 *mac_override, const __le16 *phy_sku,
- u8 tx_chains, u8 rx_chains, bool lar_fw_supported)
+ u8 tx_chains, u8 rx_chains)
{
struct iwl_nvm_data *data;
bool lar_enabled;
@@ -1022,7 +1045,8 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
return NULL;
}
- if (lar_fw_supported && lar_enabled)
+ if (lar_enabled &&
+ fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT))
sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR;
if (iwl_nvm_no_wide_in_5ghz(trans, cfg, nvm_hw))
@@ -1038,6 +1062,7 @@ IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
int ch_idx, u16 nvm_flags,
+ u16 cap_flags,
const struct iwl_cfg *cfg)
{
u32 flags = NL80211_RRF_NO_HT40;
@@ -1076,13 +1101,27 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
(flags & NL80211_RRF_NO_IR))
flags |= NL80211_RRF_GO_CONCURRENT;
+ /*
+ * cap_flags is per regulatory domain so apply it for every channel
+ */
+ if (ch_idx >= NUM_2GHZ_CHANNELS) {
+ if (cap_flags & REG_CAPA_40MHZ_FORBIDDEN)
+ flags |= NL80211_RRF_NO_HT40;
+
+ if (!(cap_flags & REG_CAPA_80MHZ_ALLOWED))
+ flags |= NL80211_RRF_NO_80MHZ;
+
+ if (!(cap_flags & REG_CAPA_160MHZ_ALLOWED))
+ flags |= NL80211_RRF_NO_160MHZ;
+ }
+
return flags;
}
struct ieee80211_regdomain *
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, __le32 *channels, u16 fw_mcc,
- u16 geo_info)
+ u16 geo_info, u16 cap)
{
int ch_idx;
u16 ch_flags;
@@ -1140,7 +1179,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
}
reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
- ch_flags, cfg);
+ ch_flags, cap,
+ cfg);
/* we can't continue the same rule */
if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags ||
@@ -1405,9 +1445,6 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
.id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO)
};
int ret;
- bool lar_fw_supported = !iwlwifi_mod_params.lar_disable &&
- fw_has_capa(&fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
bool empty_otp;
u32 mac_flags;
u32 sbands_flags = 0;
@@ -1485,7 +1522,9 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
nvm->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains);
nvm->valid_rx_ant = (u8)le32_to_cpu(rsp->phy_sku.rx_chains);
- if (le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported) {
+ if (le32_to_cpu(rsp->regulatory.lar_enabled) &&
+ fw_has_capa(&fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_LAR_SUPPORT)) {
nvm->lar_enabled = true;
sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
index b7e1ddf8f177..fb0b385d10fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
@@ -7,7 +7,7 @@
*
* Copyright(c) 2008 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -29,7 +29,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -85,10 +85,11 @@ enum iwl_nvm_sbands_flags {
*/
struct iwl_nvm_data *
iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+ const struct iwl_fw *fw,
const __be16 *nvm_hw, const __le16 *nvm_sw,
const __le16 *nvm_calib, const __le16 *regulatory,
const __le16 *mac_override, const __le16 *phy_sku,
- u8 tx_chains, u8 rx_chains, bool lar_fw_supported);
+ u8 tx_chains, u8 rx_chains);
/**
* iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW
@@ -103,7 +104,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
struct ieee80211_regdomain *
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, __le32 *channels, u16 fw_mcc,
- u16 geo_info);
+ u16 geo_info, u16 cap);
/**
* struct iwl_nvm_section - describes an NVM section in memory.
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 14c8ba23f3b9..1136d9784f9d 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -411,13 +411,6 @@ enum {
HW_STEP_LOCATION_BITS = 24,
};
-#define AUX_MISC_MASTER1_EN 0xA20818
-enum aux_misc_master1_en {
- AUX_MISC_MASTER1_EN_SBE_MSK = 0x1,
-};
-
-#define AUX_MISC_MASTER1_SMPHR_STATUS 0xA20800
-#define RSA_ENABLE 0xA24B08
#define PREG_AUX_BUS_WPROT_0 0xA04CC0
/* device family 9000 WPROT register */
@@ -430,6 +423,9 @@ enum aux_misc_master1_en {
#define UMAG_SB_CPU_1_STATUS 0xA038C0
#define UMAG_SB_CPU_2_STATUS 0xA038C4
#define UMAG_GEN_HW_STATUS 0xA038C8
+#define UREG_UMAC_CURRENT_PC 0xa05c18
+#define UREG_LMAC1_CURRENT_PC 0xa05c1c
+#define UREG_LMAC2_CURRENT_PC 0xa05c20
/* For UMAG_GEN_HW_STATUS reg check */
enum {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
index 28bdc9a9617e..f91197e4ae40 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -66,7 +66,9 @@
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
- const struct iwl_trans_ops *ops)
+ const struct iwl_trans_ops *ops,
+ unsigned int cmd_pool_size,
+ unsigned int cmd_pool_align)
{
struct iwl_trans *trans;
#ifdef CONFIG_LOCKDEP
@@ -90,10 +92,8 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
"iwl_cmd_pool:%s", dev_name(trans->dev));
trans->dev_cmd_pool =
kmem_cache_create(trans->dev_cmd_pool_name,
- sizeof(struct iwl_device_cmd),
- sizeof(void *),
- SLAB_HWCACHE_ALIGN,
- NULL);
+ cmd_pool_size, cmd_pool_align,
+ SLAB_HWCACHE_ALIGN, NULL);
if (!trans->dev_cmd_pool)
return NULL;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 8cadad7364ac..7b3b1f4c99b4 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -112,6 +112,8 @@
* 6) Eventually, the free function will be called.
*/
+#define IWL_TRANS_FW_DBG_DOMAIN(trans) IWL_FW_INI_DOMAIN_ALWAYS_ON
+
#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */
#define FH_RSCSR_FRAME_INVALID 0x55550000
#define FH_RSCSR_FRAME_ALIGN 0x40
@@ -193,6 +195,18 @@ struct iwl_device_cmd {
};
} __packed;
+/**
+ * struct iwl_device_tx_cmd - buffer for TX command
+ * @hdr: the header
+ * @payload: the payload placeholder
+ *
+ * The actual structure is sized dynamically according to need.
+ */
+struct iwl_device_tx_cmd {
+ struct iwl_cmd_header hdr;
+ u8 payload[];
+} __packed;
+
#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
/*
@@ -358,6 +372,24 @@ iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
}
}
+static inline int
+iwl_trans_get_rb_size(enum iwl_amsdu_size rb_size)
+{
+ switch (rb_size) {
+ case IWL_AMSDU_2K:
+ return 2 * 1024;
+ case IWL_AMSDU_4K:
+ return 4 * 1024;
+ case IWL_AMSDU_8K:
+ return 8 * 1024;
+ case IWL_AMSDU_12K:
+ return 12 * 1024;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+}
+
struct iwl_hcmd_names {
u8 cmd_id;
const char *const cmd_name;
@@ -544,7 +576,7 @@ struct iwl_trans_ops {
int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_cmd *dev_cmd, int queue);
+ struct iwl_device_tx_cmd *dev_cmd, int queue);
void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
struct sk_buff_head *skbs);
@@ -839,6 +871,8 @@ struct iwl_trans {
enum iwl_plat_pm_mode system_pm_mode;
+ const char *name;
+
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[0] __aligned(sizeof(void *));
@@ -948,22 +982,22 @@ iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask)
return trans->ops->dump_data(trans, dump_mask);
}
-static inline struct iwl_device_cmd *
+static inline struct iwl_device_tx_cmd *
iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
{
- return kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
+ return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC);
}
int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
- struct iwl_device_cmd *dev_cmd)
+ struct iwl_device_tx_cmd *dev_cmd)
{
kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
}
static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_cmd *dev_cmd, int queue)
+ struct iwl_device_tx_cmd *dev_cmd, int queue)
{
if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
return -EIO;
@@ -1271,7 +1305,9 @@ static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
*****************************************************/
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
- const struct iwl_trans_ops *ops);
+ const struct iwl_trans_ops *ops,
+ unsigned int cmd_pool_size,
+ unsigned int cmd_pool_align);
void iwl_trans_free(struct iwl_trans *trans);
/*****************************************************
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
index 60aff2ecec12..58df25e2fb32 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
@@ -154,5 +154,6 @@
#define IWL_MVM_D3_DEBUG false
#define IWL_MVM_USE_TWT false
#define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 10
+#define IWL_MVM_USE_NSSN_SYNC 0
#endif /* __MVM_CONSTANTS_H */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 43ebb2149b63..22a32eb10f01 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -989,6 +989,8 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
+ set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
+
vif = iwl_mvm_get_bss_vif(mvm);
if (IS_ERR_OR_NULL(vif)) {
ret = 1;
@@ -1083,6 +1085,8 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
ieee80211_restart_hw(mvm->hw);
}
}
+
+ clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
}
out_noreset:
mutex_unlock(&mvm->mutex);
@@ -1893,27 +1897,55 @@ static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
ieee80211_resume_disconnect(vif);
}
-static int iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+static bool iwl_mvm_rt_status(struct iwl_trans *trans, u32 base, u32 *err_id)
{
- u32 base = mvm->trans->dbg.lmac_error_event_table[0];
struct error_table_start {
/* cf. struct iwl_error_event_table */
u32 valid;
- u32 error_id;
+ __le32 err_id;
} err_info;
- iwl_trans_read_mem_bytes(mvm->trans, base,
+ if (!base)
+ return false;
+
+ iwl_trans_read_mem_bytes(trans, base,
&err_info, sizeof(err_info));
+ if (err_info.valid && err_id)
+ *err_id = le32_to_cpu(err_info.err_id);
- if (err_info.valid &&
- err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
- struct cfg80211_wowlan_wakeup wakeup = {
- .rfkill_release = true,
- };
- ieee80211_report_wowlan_wakeup(vif, &wakeup, GFP_KERNEL);
+ return !!err_info.valid;
+}
+
+static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ u32 err_id;
+
+ /* check for lmac1 error */
+ if (iwl_mvm_rt_status(mvm->trans,
+ mvm->trans->dbg.lmac_error_event_table[0],
+ &err_id)) {
+ if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
+ struct cfg80211_wowlan_wakeup wakeup = {
+ .rfkill_release = true,
+ };
+ ieee80211_report_wowlan_wakeup(vif, &wakeup,
+ GFP_KERNEL);
+ }
+ return true;
}
- return err_info.valid;
+
+ /* check if we have lmac2 set and check for error */
+ if (iwl_mvm_rt_status(mvm->trans,
+ mvm->trans->dbg.lmac_error_event_table[1], NULL))
+ return true;
+
+ /* check for umac error */
+ if (iwl_mvm_rt_status(mvm->trans,
+ mvm->trans->dbg.umac_error_event_table, NULL))
+ return true;
+
+ return false;
}
static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
@@ -1929,6 +1961,8 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
mutex_lock(&mvm->mutex);
+ clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
+
/* get the BSS vif pointer again */
vif = iwl_mvm_get_bss_vif(mvm);
if (IS_ERR_OR_NULL(vif))
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index aa659162a7c2..190cf15b825c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -752,7 +752,7 @@ static ssize_t iwl_dbgfs_fw_ver_read(struct file *file, char __user *user_buf,
pos += scnprintf(pos, endpos - pos, "FW: %s\n",
mvm->fwrt.fw->human_readable);
pos += scnprintf(pos, endpos - pos, "Device: %s\n",
- mvm->fwrt.trans->cfg->name);
+ mvm->fwrt.trans->name);
pos += scnprintf(pos, endpos - pos, "Bus: %s\n",
mvm->fwrt.dev->bus->name);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index 9f4b117db9d7..6e1ea921c299 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -8,6 +8,7 @@
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* Copyright (C) 2018 Intel Corporation
* Copyright (C) 2019 Intel Corporation
+ * Copyright (C) 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* Copyright (C) 2018 Intel Corporation
* Copyright (C) 2019 Intel Corporation
+ * Copyright (C) 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -208,10 +210,11 @@ static void iwl_mvm_ftm_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cmd->tsf_mac_id = cpu_to_le32(0xff);
}
-static int iwl_mvm_ftm_target_chandef(struct iwl_mvm *mvm,
- struct cfg80211_pmsr_request_peer *peer,
- u8 *channel, u8 *bandwidth,
- u8 *ctrl_ch_position)
+static int
+iwl_mvm_ftm_target_chandef_v1(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_request_peer *peer,
+ u8 *channel, u8 *bandwidth,
+ u8 *ctrl_ch_position)
{
u32 freq = peer->chandef.chan->center_freq;
@@ -243,15 +246,54 @@ static int iwl_mvm_ftm_target_chandef(struct iwl_mvm *mvm,
}
static int
+iwl_mvm_ftm_target_chandef_v2(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_request_peer *peer,
+ u8 *channel, u8 *format_bw,
+ u8 *ctrl_ch_position)
+{
+ u32 freq = peer->chandef.chan->center_freq;
+
+ *channel = ieee80211_frequency_to_channel(freq);
+
+ switch (peer->chandef.width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_LEGACY;
+ *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
+ break;
+ case NL80211_CHAN_WIDTH_20:
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
+ *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
+ *format_bw |= IWL_LOCATION_BW_40MHZ << LOCATION_BW_POS;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_VHT;
+ *format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS;
+ break;
+ default:
+ IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n",
+ peer->chandef.width);
+ return -EINVAL;
+ }
+
+ *ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ?
+ iwl_mvm_get_ctrl_pos(&peer->chandef) : 0;
+
+ return 0;
+}
+
+static int
iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm,
struct cfg80211_pmsr_request_peer *peer,
struct iwl_tof_range_req_ap_entry_v2 *target)
{
int ret;
- ret = iwl_mvm_ftm_target_chandef(mvm, peer, &target->channel_num,
- &target->bandwidth,
- &target->ctrl_ch_position);
+ ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num,
+ &target->bandwidth,
+ &target->ctrl_ch_position);
if (ret)
return ret;
@@ -278,18 +320,11 @@ iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm,
#define FTM_PUT_FLAG(flag) (target->initiator_ap_flags |= \
cpu_to_le32(IWL_INITIATOR_AP_FLAGS_##flag))
-static int iwl_mvm_ftm_put_target(struct iwl_mvm *mvm,
- struct cfg80211_pmsr_request_peer *peer,
- struct iwl_tof_range_req_ap_entry *target)
+static void
+iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry *target)
{
- int ret;
-
- ret = iwl_mvm_ftm_target_chandef(mvm, peer, &target->channel_num,
- &target->bandwidth,
- &target->ctrl_ch_position);
- if (ret)
- return ret;
-
memcpy(target->bssid, peer->addr, ETH_ALEN);
target->burst_period =
cpu_to_le16(peer->ftm.burst_period);
@@ -314,60 +349,166 @@ static int iwl_mvm_ftm_put_target(struct iwl_mvm *mvm,
FTM_PUT_FLAG(ALGO_LR);
else if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT)
FTM_PUT_FLAG(ALGO_FFT);
+}
+
+static int
+iwl_mvm_ftm_put_target_v3(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry_v3 *target)
+{
+ int ret;
+
+ ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num,
+ &target->bandwidth,
+ &target->ctrl_ch_position);
+ if (ret)
+ return ret;
+
+ /*
+ * Versions 3 and 4 has some common fields, so
+ * iwl_mvm_ftm_put_target_common() can be used for version 7 too.
+ */
+ iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target);
return 0;
}
-int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct cfg80211_pmsr_request *req)
+static int iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry *target)
+{
+ int ret;
+
+ ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num,
+ &target->format_bw,
+ &target->ctrl_ch_position);
+ if (ret)
+ return ret;
+
+ iwl_mvm_ftm_put_target_common(mvm, peer, target);
+
+ return 0;
+}
+
+static int iwl_mvm_ftm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *hcmd)
+{
+ u32 status;
+ int err = iwl_mvm_send_cmd_status(mvm, hcmd, &status);
+
+ if (!err && status) {
+ IWL_ERR(mvm, "FTM range request command failure, status: %u\n",
+ status);
+ err = iwl_ftm_range_request_status_to_err(status);
+ }
+
+ return err;
+}
+
+static int iwl_mvm_ftm_start_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *req)
{
struct iwl_tof_range_req_cmd_v5 cmd_v5;
- struct iwl_tof_range_req_cmd cmd;
- bool new_api = fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ);
- u8 num_of_ap;
struct iwl_host_cmd hcmd = {
.id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
.dataflags[0] = IWL_HCMD_DFL_DUP,
+ .data[0] = &cmd_v5,
+ .len[0] = sizeof(cmd_v5),
};
- u32 status = 0;
- int err, i;
+ u8 i;
+ int err;
- lockdep_assert_held(&mvm->mutex);
+ iwl_mvm_ftm_cmd_v5(mvm, vif, &cmd_v5, req);
- if (mvm->ftm_initiator.req)
- return -EBUSY;
+ for (i = 0; i < cmd_v5.num_of_ap; i++) {
+ struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
- if (new_api) {
- iwl_mvm_ftm_cmd(mvm, vif, &cmd, req);
- hcmd.data[0] = &cmd;
- hcmd.len[0] = sizeof(cmd);
- num_of_ap = cmd.num_of_ap;
- } else {
- iwl_mvm_ftm_cmd_v5(mvm, vif, &cmd_v5, req);
- hcmd.data[0] = &cmd_v5;
- hcmd.len[0] = sizeof(cmd_v5);
- num_of_ap = cmd_v5.num_of_ap;
+ err = iwl_mvm_ftm_put_target_v2(mvm, peer, &cmd_v5.ap[i]);
+ if (err)
+ return err;
}
- for (i = 0; i < num_of_ap; i++) {
+ return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
+}
+
+static int iwl_mvm_ftm_start_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *req)
+{
+ struct iwl_tof_range_req_cmd_v7 cmd_v7;
+ struct iwl_host_cmd hcmd = {
+ .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ .data[0] = &cmd_v7,
+ .len[0] = sizeof(cmd_v7),
+ };
+ u8 i;
+ int err;
+
+ /*
+ * Versions 7 and 8 has the same structure except from the responders
+ * list, so iwl_mvm_ftm_cmd() can be used for version 7 too.
+ */
+ iwl_mvm_ftm_cmd(mvm, vif, (void *)&cmd_v7, req);
+
+ for (i = 0; i < cmd_v7.num_of_ap; i++) {
struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
- if (new_api)
- err = iwl_mvm_ftm_put_target(mvm, peer, &cmd.ap[i]);
- else
- err = iwl_mvm_ftm_put_target_v2(mvm, peer,
- &cmd_v5.ap[i]);
+ err = iwl_mvm_ftm_put_target_v3(mvm, peer, &cmd_v7.ap[i]);
+ if (err)
+ return err;
+ }
+ return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
+}
+
+static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *req)
+{
+ struct iwl_tof_range_req_cmd cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ .data[0] = &cmd,
+ .len[0] = sizeof(cmd),
+ };
+ u8 i;
+ int err;
+
+ iwl_mvm_ftm_cmd(mvm, vif, &cmd, req);
+
+ for (i = 0; i < cmd.num_of_ap; i++) {
+ struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
+
+ err = iwl_mvm_ftm_put_target_v4(mvm, peer, &cmd.ap[i]);
if (err)
return err;
}
- err = iwl_mvm_send_cmd_status(mvm, &hcmd, &status);
- if (!err && status) {
- IWL_ERR(mvm, "FTM range request command failure, status: %u\n",
- status);
- err = iwl_ftm_range_request_status_to_err(status);
+ return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
+}
+
+int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *req)
+{
+ bool new_api = fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ);
+ int err;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (mvm->ftm_initiator.req)
+ return -EBUSY;
+
+ if (new_api) {
+ u8 cmd_ver = iwl_mvm_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
+ TOF_RANGE_REQ_CMD);
+
+ if (cmd_ver == 8)
+ err = iwl_mvm_ftm_start_v8(mvm, vif, req);
+ else
+ err = iwl_mvm_ftm_start_v7(mvm, vif, req);
+
+ } else {
+ err = iwl_mvm_ftm_start_v5(mvm, vif, req);
}
if (!err) {
@@ -389,6 +530,8 @@ void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req)
if (req != mvm->ftm_initiator.req)
return;
+ iwl_mvm_ftm_reset(mvm);
+
if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RANGE_ABORT_CMD,
LOCATION_GROUP, 0),
0, sizeof(cmd), &cmd))
@@ -502,7 +645,6 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
lockdep_assert_held(&mvm->mutex);
if (!mvm->ftm_initiator.req) {
- IWL_ERR(mvm, "Got FTM response but have no request?\n");
return;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
index 1513b8b4062f..834564198409 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -27,7 +27,7 @@
* BSD LICENSE
*
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -62,12 +62,72 @@
#include "mvm.h"
#include "constants.h"
+static int iwl_mvm_ftm_responder_set_bw_v1(struct cfg80211_chan_def *chandef,
+ u8 *bw, u8 *ctrl_ch_position)
+{
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ *bw = IWL_TOF_BW_20_LEGACY;
+ break;
+ case NL80211_CHAN_WIDTH_20:
+ *bw = IWL_TOF_BW_20_HT;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ *bw = IWL_TOF_BW_40;
+ *ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ *bw = IWL_TOF_BW_80;
+ *ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int iwl_mvm_ftm_responder_set_bw_v2(struct cfg80211_chan_def *chandef,
+ u8 *format_bw,
+ u8 *ctrl_ch_position)
+{
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_LEGACY;
+ *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
+ break;
+ case NL80211_CHAN_WIDTH_20:
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
+ *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
+ *format_bw |= IWL_LOCATION_BW_40MHZ << LOCATION_BW_POS;
+ *ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_VHT;
+ *format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS;
+ *ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
static int
iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct cfg80211_chan_def *chandef)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ /*
+ * The command structure is the same for versions 6 and 7, (only the
+ * field interpretation is different), so the same struct can be use
+ * for all cases.
+ */
struct iwl_tof_responder_config_cmd cmd = {
.channel_num = chandef->chan->hw_value,
.cmd_valid_fields =
@@ -76,27 +136,22 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
IWL_TOF_RESPONDER_CMD_VALID_STA_ID),
.sta_id = mvmvif->bcast_sta.sta_id,
};
+ u8 cmd_ver = iwl_mvm_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
+ TOF_RESPONDER_CONFIG_CMD);
+ int err;
lockdep_assert_held(&mvm->mutex);
- switch (chandef->width) {
- case NL80211_CHAN_WIDTH_20_NOHT:
- cmd.bandwidth = IWL_TOF_BW_20_LEGACY;
- break;
- case NL80211_CHAN_WIDTH_20:
- cmd.bandwidth = IWL_TOF_BW_20_HT;
- break;
- case NL80211_CHAN_WIDTH_40:
- cmd.bandwidth = IWL_TOF_BW_40;
- cmd.ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
- break;
- case NL80211_CHAN_WIDTH_80:
- cmd.bandwidth = IWL_TOF_BW_80;
- cmd.ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
- break;
- default:
- WARN_ON(1);
- return -EINVAL;
+ if (cmd_ver == 7)
+ err = iwl_mvm_ftm_responder_set_bw_v2(chandef, &cmd.format_bw,
+ &cmd.ctrl_ch_position);
+ else
+ err = iwl_mvm_ftm_responder_set_bw_v1(chandef, &cmd.format_bw,
+ &cmd.ctrl_ch_position);
+
+ if (err) {
+ IWL_ERR(mvm, "Failed to set responder bandwidth\n");
+ return err;
}
memcpy(cmd.bssid, vif->addr, ETH_ALEN);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index dd685f7eb410..54c094e88474 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -353,22 +353,35 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
if (ret) {
struct iwl_trans *trans = mvm->trans;
- if (ret == -ETIMEDOUT)
- iwl_fw_dbg_error_collect(&mvm->fwrt,
- FW_DBG_TRIGGER_ALIVE_TIMEOUT);
-
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000)
+ if (trans->trans_cfg->device_family >=
+ IWL_DEVICE_FAMILY_22000) {
IWL_ERR(mvm,
"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS),
iwl_read_umac_prph(trans,
UMAG_SB_CPU_2_STATUS));
- else if (trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_8000)
+ IWL_ERR(mvm, "UMAC PC: 0x%x\n",
+ iwl_read_umac_prph(trans,
+ UREG_UMAC_CURRENT_PC));
+ IWL_ERR(mvm, "LMAC PC: 0x%x\n",
+ iwl_read_umac_prph(trans,
+ UREG_LMAC1_CURRENT_PC));
+ if (iwl_mvm_is_cdb_supported(mvm))
+ IWL_ERR(mvm, "LMAC2 PC: 0x%x\n",
+ iwl_read_umac_prph(trans,
+ UREG_LMAC2_CURRENT_PC));
+ } else if (trans->trans_cfg->device_family >=
+ IWL_DEVICE_FAMILY_8000) {
IWL_ERR(mvm,
"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
iwl_read_prph(trans, SB_CPU_1_STATUS),
iwl_read_prph(trans, SB_CPU_2_STATUS));
+ }
+
+ if (ret == -ETIMEDOUT)
+ iwl_fw_dbg_error_collect(&mvm->fwrt,
+ FW_DBG_TRIGGER_ALIVE_TIMEOUT);
+
iwl_fw_set_current_image(&mvm->fwrt, old_type);
return ret;
}
@@ -841,9 +854,13 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
return 0;
}
+ if (!mvm->fwrt.ppag_table.enabled) {
+ IWL_DEBUG_RADIO(mvm,
+ "PPAG not enabled, command not sent.\n");
+ return 0;
+ }
+
IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
- IWL_DEBUG_RADIO(mvm, "PPAG is %s\n",
- mvm->fwrt.ppag_table.enabled ? "enabled" : "disabled");
for (i = 0; i < ACPI_PPAG_NUM_CHAINS; i++) {
for (j = 0; j < ACPI_PPAG_NUM_SUB_BANDS; j++) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 32dc9d6f0fb6..02df603b6400 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,10 +27,9 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -256,7 +254,8 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
__le32_to_cpu(resp->n_channels),
resp->channels,
__le16_to_cpu(resp->mcc),
- __le16_to_cpu(resp->geo_info));
+ __le16_to_cpu(resp->geo_info),
+ __le16_to_cpu(resp->cap));
/* Store the return source id */
src_id = resp->source_id;
kfree(resp);
@@ -754,6 +753,20 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
return ret;
}
+static void iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_sta *sta)
+{
+ if (likely(sta)) {
+ if (likely(iwl_mvm_tx_skb_sta(mvm, skb, sta) == 0))
+ return;
+ } else {
+ if (likely(iwl_mvm_tx_skb_non_sta(mvm, skb) == 0))
+ return;
+ }
+
+ ieee80211_free_txskb(mvm->hw, skb);
+}
+
static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
@@ -797,14 +810,7 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
}
}
- if (sta) {
- if (iwl_mvm_tx_skb(mvm, skb, sta))
- goto drop;
- return;
- }
-
- if (iwl_mvm_tx_skb_non_sta(mvm, skb))
- goto drop;
+ iwl_mvm_tx_skb(mvm, skb, sta);
return;
drop:
ieee80211_free_txskb(hw, skb);
@@ -854,10 +860,7 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
break;
}
- if (!txq->sta)
- iwl_mvm_tx_skb_non_sta(mvm, skb);
- else
- iwl_mvm_tx_skb(mvm, skb, txq->sta);
+ iwl_mvm_tx_skb(mvm, skb, txq->sta);
}
} while (atomic_dec_return(&mvmtxq->tx_request));
rcu_read_unlock();
@@ -2032,7 +2035,7 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
rcu_read_lock();
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]);
- if (IS_ERR(sta)) {
+ if (IS_ERR_OR_NULL(sta)) {
rcu_read_unlock();
WARN(1, "Can't find STA to configure HE\n");
return;
@@ -3288,7 +3291,7 @@ static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
iwl_mvm_schedule_session_protection(mvm, vif, 900,
- min_duration);
+ min_duration, false);
else
iwl_mvm_protect_session(mvm, vif, duration,
min_duration, 500, false);
@@ -4771,6 +4774,125 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
return ret;
}
+static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
+{
+ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ rinfo->bw = RATE_INFO_BW_20;
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ rinfo->bw = RATE_INFO_BW_40;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ rinfo->bw = RATE_INFO_BW_80;
+ break;
+ case RATE_MCS_CHAN_WIDTH_160:
+ rinfo->bw = RATE_INFO_BW_160;
+ break;
+ }
+
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
+ rinfo->flags |= RATE_INFO_FLAGS_MCS;
+ rinfo->mcs = u32_get_bits(rate_n_flags, RATE_HT_MCS_INDEX_MSK);
+ rinfo->nss = u32_get_bits(rate_n_flags,
+ RATE_HT_MCS_NSS_MSK) + 1;
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
+ } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+ rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS;
+ rinfo->mcs = u32_get_bits(rate_n_flags,
+ RATE_VHT_MCS_RATE_CODE_MSK);
+ rinfo->nss = u32_get_bits(rate_n_flags,
+ RATE_VHT_MCS_NSS_MSK) + 1;
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
+ } else if (rate_n_flags & RATE_MCS_HE_MSK) {
+ u32 gi_ltf = u32_get_bits(rate_n_flags,
+ RATE_MCS_HE_GI_LTF_MSK);
+
+ rinfo->flags |= RATE_INFO_FLAGS_HE_MCS;
+ rinfo->mcs = u32_get_bits(rate_n_flags,
+ RATE_VHT_MCS_RATE_CODE_MSK);
+ rinfo->nss = u32_get_bits(rate_n_flags,
+ RATE_VHT_MCS_NSS_MSK) + 1;
+
+ if (rate_n_flags & RATE_MCS_HE_106T_MSK) {
+ rinfo->bw = RATE_INFO_BW_HE_RU;
+ rinfo->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ }
+
+ switch (rate_n_flags & RATE_MCS_HE_TYPE_MSK) {
+ case RATE_MCS_HE_TYPE_SU:
+ case RATE_MCS_HE_TYPE_EXT_SU:
+ if (gi_ltf == 0 || gi_ltf == 1)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ else if (gi_ltf == 2)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ else if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ else
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ break;
+ case RATE_MCS_HE_TYPE_MU:
+ if (gi_ltf == 0 || gi_ltf == 1)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ else if (gi_ltf == 2)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ else
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ break;
+ case RATE_MCS_HE_TYPE_TRIG:
+ if (gi_ltf == 0 || gi_ltf == 1)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ else
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ break;
+ }
+
+ if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK)
+ rinfo->he_dcm = 1;
+ } else {
+ switch (u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK)) {
+ case IWL_RATE_1M_PLCP:
+ rinfo->legacy = 10;
+ break;
+ case IWL_RATE_2M_PLCP:
+ rinfo->legacy = 20;
+ break;
+ case IWL_RATE_5M_PLCP:
+ rinfo->legacy = 55;
+ break;
+ case IWL_RATE_11M_PLCP:
+ rinfo->legacy = 110;
+ break;
+ case IWL_RATE_6M_PLCP:
+ rinfo->legacy = 60;
+ break;
+ case IWL_RATE_9M_PLCP:
+ rinfo->legacy = 90;
+ break;
+ case IWL_RATE_12M_PLCP:
+ rinfo->legacy = 120;
+ break;
+ case IWL_RATE_18M_PLCP:
+ rinfo->legacy = 180;
+ break;
+ case IWL_RATE_24M_PLCP:
+ rinfo->legacy = 240;
+ break;
+ case IWL_RATE_36M_PLCP:
+ rinfo->legacy = 360;
+ break;
+ case IWL_RATE_48M_PLCP:
+ rinfo->legacy = 480;
+ break;
+ case IWL_RATE_54M_PLCP:
+ rinfo->legacy = 540;
+ break;
+ }
+ }
+}
+
static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -4785,6 +4907,13 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
}
+ if (iwl_mvm_has_tlc_offload(mvm)) {
+ struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
+
+ iwl_mvm_set_sta_rate(lq_sta->last_rate_n_flags, &sinfo->txrate);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+ }
+
/* if beacon filtering isn't on mac80211 does it anyway */
if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 3ec8de00f3aa..d6ecc2d2bf21 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1160,6 +1160,7 @@ struct iwl_mvm {
* @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running
* @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
* @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA
+ * @IWL_MVM_STATUS_IN_D3: in D3 (or at least about to go into it)
*/
enum iwl_mvm_status {
IWL_MVM_STATUS_HW_RFKILL,
@@ -1170,6 +1171,7 @@ enum iwl_mvm_status {
IWL_MVM_STATUS_ROC_AUX_RUNNING,
IWL_MVM_STATUS_FIRMWARE_RUNNING,
IWL_MVM_STATUS_NEED_FLUSH_P2P,
+ IWL_MVM_STATUS_IN_D3,
};
/* Keep track of completed init configuration */
@@ -1298,9 +1300,6 @@ static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
bool tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
- if (iwlwifi_mod_params.lar_disable)
- return false;
-
/*
* Enable LAR only if it is supported by the FW (TLV) &&
* enabled in the NVM
@@ -1508,8 +1507,8 @@ int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm,
int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id,
u16 len, const void *data,
u32 *status);
-int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
- struct ieee80211_sta *sta);
+int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_sta *sta);
int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb);
void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
struct iwl_tx_cmd *tx_cmd,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index 945c1ea5cda8..70b29bf16bb9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -178,7 +178,7 @@ static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
} else {
IWL_DEBUG_EEPROM(mvm->trans->dev,
"NVM access command failed with status %d (device: %s)\n",
- ret, mvm->cfg->name);
+ ret, mvm->trans->name);
ret = -ENODATA;
}
goto exit;
@@ -277,11 +277,10 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
struct iwl_nvm_section *sections = mvm->nvm_sections;
const __be16 *hw;
const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku;
- bool lar_enabled;
int regulatory_type;
/* Checking for required sections */
- if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) {
+ if (mvm->trans->cfg->nvm_type == IWL_NVM) {
if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n");
@@ -327,14 +326,9 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
(const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY_SDP].data :
(const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
- lar_enabled = !iwlwifi_mod_params.lar_disable &&
- fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
-
- return iwl_parse_nvm_data(mvm->trans, mvm->cfg, hw, sw, calib,
+ return iwl_parse_nvm_data(mvm->trans, mvm->cfg, mvm->fw, hw, sw, calib,
regulatory, mac_override, phy_sku,
- mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant,
- lar_enabled);
+ mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant);
}
/* Loads the NVM data stored in mvm->nvm_sections into the NIC */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 1b07a8e8f069..dfe02440d474 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -830,7 +830,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
}
IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
- mvm->cfg->name, mvm->trans->hw_rev);
+ mvm->trans->name, mvm->trans->hw_rev);
if (iwlwifi_mod_params.nvm_file)
mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
index 25d7faea1c62..c146303ec73b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -198,7 +198,7 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
if (!mvmvif->queue_params[ac].uapsd)
continue;
- if (mvm->fwrt.cur_fw_img != IWL_UCODE_WOWLAN)
+ if (!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))
cmd->flags |=
cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
@@ -233,15 +233,15 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
cmd->snooze_interval = cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL);
cmd->snooze_window =
- (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN) ?
+ test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status) ?
cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) :
cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW);
}
cmd->uapsd_max_sp = mvm->hw->uapsd_max_sp_len;
- if (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN || cmd->flags &
- cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
+ if (test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status) ||
+ cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
cmd->rx_data_timeout_uapsd =
cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
cmd->tx_data_timeout_uapsd =
@@ -354,8 +354,7 @@ static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif)
static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- struct iwl_mac_power_cmd *cmd,
- bool host_awake)
+ struct iwl_mac_power_cmd *cmd)
{
int dtimper = vif->bss_conf.dtim_period ?: 1;
int skip;
@@ -370,7 +369,7 @@ static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm,
if (dtimper >= 10)
return;
- if (host_awake) {
+ if (!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status)) {
if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_LP)
return;
skip = 2;
@@ -390,8 +389,7 @@ static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm,
static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- struct iwl_mac_power_cmd *cmd,
- bool host_awake)
+ struct iwl_mac_power_cmd *cmd)
{
int dtimper, bi;
int keep_alive;
@@ -437,9 +435,9 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
cmd->lprx_rssi_threshold = POWER_LPRX_RSSI_THRESHOLD;
}
- iwl_mvm_power_config_skip_dtim(mvm, vif, cmd, host_awake);
+ iwl_mvm_power_config_skip_dtim(mvm, vif, cmd);
- if (!host_awake) {
+ if (test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status)) {
cmd->rx_data_timeout =
cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
cmd->tx_data_timeout =
@@ -512,8 +510,7 @@ static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm,
{
struct iwl_mac_power_cmd cmd = {};
- iwl_mvm_power_build_cmd(mvm, vif, &cmd,
- mvm->fwrt.cur_fw_img != IWL_UCODE_WOWLAN);
+ iwl_mvm_power_build_cmd(mvm, vif, &cmd);
iwl_mvm_power_log(mvm, &cmd);
#ifdef CONFIG_IWLWIFI_DEBUGFS
memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd));
@@ -536,7 +533,7 @@ int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
#ifdef CONFIG_IWLWIFI_DEBUGFS
- if ((mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN) ?
+ if (test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status) ?
mvm->disable_power_off_d3 : mvm->disable_power_off)
cmd.flags &=
cpu_to_le16(~DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
@@ -943,7 +940,7 @@ static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm,
if (!mvmvif->bf_data.bf_enabled)
return 0;
- if (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN)
+ if (test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))
cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
mvmvif->bf_data.ba_enabled = !(!mvmvif->pm_enabled ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index ef99c49247b7..c15f7dbc9516 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -514,14 +514,17 @@ static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size)
static void iwl_mvm_sync_nssn(struct iwl_mvm *mvm, u8 baid, u16 nssn)
{
- struct iwl_mvm_rss_sync_notif notif = {
- .metadata.type = IWL_MVM_RXQ_NSSN_SYNC,
- .metadata.sync = 0,
- .nssn_sync.baid = baid,
- .nssn_sync.nssn = nssn,
- };
-
- iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
+ if (IWL_MVM_USE_NSSN_SYNC) {
+ struct iwl_mvm_rss_sync_notif notif = {
+ .metadata.type = IWL_MVM_RXQ_NSSN_SYNC,
+ .metadata.sync = 0,
+ .nssn_sync.baid = baid,
+ .nssn_sync.nssn = nssn,
+ };
+
+ iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif,
+ sizeof(notif));
+ }
}
#define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index a046ac9fa852..3b263c81bcae 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -1213,7 +1213,7 @@ static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm)
cmd_size = sizeof(struct iwl_scan_config_v2);
else
cmd_size = sizeof(struct iwl_scan_config_v1);
- cmd_size += num_channels;
+ cmd_size += mvm->fw->ucode_capa.n_scan_channels;
cfg = kzalloc(cmd_size, GFP_KERNEL);
if (!cfg)
@@ -1907,20 +1907,6 @@ iwl_mvm_scan_umac_fill_probe_p_v4(struct iwl_mvm_scan_params *params,
}
static void
-iwl_mvm_scan_umac_fill_ch_p_v3(struct iwl_mvm *mvm,
- struct iwl_mvm_scan_params *params,
- struct ieee80211_vif *vif,
- struct iwl_scan_channel_params_v3 *cp)
-{
- cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif);
- cp->count = params->n_channels;
-
- iwl_mvm_umac_scan_cfg_channels(mvm, params->channels,
- params->n_channels, 0,
- cp->channel_config);
-}
-
-static void
iwl_mvm_scan_umac_fill_ch_p_v4(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
struct ieee80211_vif *vif,
@@ -1937,37 +1923,6 @@ iwl_mvm_scan_umac_fill_ch_p_v4(struct iwl_mvm *mvm,
vif->type);
}
-static int iwl_mvm_scan_umac_v11(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct iwl_mvm_scan_params *params, int type,
- int uid)
-{
- struct iwl_scan_req_umac_v11 *cmd = mvm->scan_cmd;
- struct iwl_scan_req_params_v11 *scan_p = &cmd->scan_params;
- int ret;
- u16 gen_flags;
-
- mvm->scan_uid_status[uid] = type;
-
- cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params));
- cmd->uid = cpu_to_le32(uid);
-
- gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
- iwl_mvm_scan_umac_fill_general_p_v10(mvm, params, vif,
- &scan_p->general_params,
- gen_flags);
-
- ret = iwl_mvm_fill_scan_sched_params(params,
- scan_p->periodic_params.schedule,
- &scan_p->periodic_params.delay);
- if (ret)
- return ret;
-
- iwl_mvm_scan_umac_fill_probe_p_v3(params, &scan_p->probe_params);
- iwl_mvm_scan_umac_fill_ch_p_v3(mvm, params, vif,
- &scan_p->channel_params);
-
- return 0;
-}
static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_scan_params *params, int type,
@@ -2152,7 +2107,6 @@ static const struct iwl_scan_umac_handler iwl_scan_umac_handlers[] = {
/* set the newest version first to shorten the list traverse time */
IWL_SCAN_UMAC_HANDLER(13),
IWL_SCAN_UMAC_HANDLER(12),
- IWL_SCAN_UMAC_HANDLER(11),
};
static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
@@ -2511,7 +2465,6 @@ static int iwl_scan_req_umac_get_size(u8 scan_ver)
switch (scan_ver) {
IWL_SCAN_REQ_UMAC_HANDLE_SIZE(13);
IWL_SCAN_REQ_UMAC_HANDLE_SIZE(12);
- IWL_SCAN_REQ_UMAC_HANDLE_SIZE(11);
}
return 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 7b35f416404c..64ef3f3ba23b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -3320,6 +3320,10 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
igtk_cmd.sta_id = cpu_to_le32(sta_id);
if (remove_key) {
+ /* This is a valid situation for IGTK */
+ if (sta_id == IWL_MVM_INVALID_STA)
+ return 0;
+
igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
} else {
struct ieee80211_key_seq seq;
@@ -3574,9 +3578,9 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
keyconf->keyidx, sta_id);
- if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
- keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
- keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
index 1851719e9f4b..d781777b6b96 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
@@ -205,9 +205,15 @@ void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int;
- mutex_lock(&mvm->mutex);
/* Protect the session to hear the TDLS setup response on the channel */
- iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true);
+ mutex_lock(&mvm->mutex);
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
+ iwl_mvm_schedule_session_protection(mvm, vif, duration,
+ duration, true);
+ else
+ iwl_mvm_protect_session(mvm, vif, duration,
+ duration, 100, true);
mutex_unlock(&mvm->mutex);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 51b138673ddb..c0b420fe5e48 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -1056,13 +1056,42 @@ int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
}
+static bool iwl_mvm_session_prot_notif(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_mvm *mvm =
+ container_of(notif_wait, struct iwl_mvm, notif_wait);
+ struct iwl_mvm_session_prot_notif *resp;
+ int resp_len = iwl_rx_packet_payload_len(pkt);
+
+ if (WARN_ON(pkt->hdr.cmd != SESSION_PROTECTION_NOTIF ||
+ pkt->hdr.group_id != MAC_CONF_GROUP))
+ return true;
+
+ if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
+ IWL_ERR(mvm, "Invalid SESSION_PROTECTION_NOTIF response\n");
+ return true;
+ }
+
+ resp = (void *)pkt->data;
+
+ if (!resp->status)
+ IWL_ERR(mvm,
+ "TIME_EVENT_NOTIFICATION received but not executed\n");
+
+ return true;
+}
+
void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- u32 duration, u32 min_duration)
+ u32 duration, u32 min_duration,
+ bool wait_for_notif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
-
+ const u16 notif[] = { iwl_cmd_id(SESSION_PROTECTION_NOTIF,
+ MAC_CONF_GROUP, 0) };
+ struct iwl_notification_wait wait_notif;
struct iwl_mvm_session_prot_cmd cmd = {
.id_and_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
@@ -1071,7 +1100,6 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
.conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC),
.duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
};
- int ret;
lockdep_assert_held(&mvm->mutex);
@@ -1092,14 +1120,35 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
IWL_DEBUG_TE(mvm, "Add new session protection, duration %d TU\n",
le32_to_cpu(cmd.duration_tu));
- ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
- MAC_CONF_GROUP, 0),
- 0, sizeof(cmd), &cmd);
- if (ret) {
+ if (!wait_for_notif) {
+ if (iwl_mvm_send_cmd_pdu(mvm,
+ iwl_cmd_id(SESSION_PROTECTION_CMD,
+ MAC_CONF_GROUP, 0),
+ 0, sizeof(cmd), &cmd)) {
+ IWL_ERR(mvm,
+ "Couldn't send the SESSION_PROTECTION_CMD\n");
+ spin_lock_bh(&mvm->time_event_lock);
+ iwl_mvm_te_clear_data(mvm, te_data);
+ spin_unlock_bh(&mvm->time_event_lock);
+ }
+
+ return;
+ }
+
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_notif,
+ notif, ARRAY_SIZE(notif),
+ iwl_mvm_session_prot_notif, NULL);
+
+ if (iwl_mvm_send_cmd_pdu(mvm,
+ iwl_cmd_id(SESSION_PROTECTION_CMD,
+ MAC_CONF_GROUP, 0),
+ 0, sizeof(cmd), &cmd)) {
IWL_ERR(mvm,
- "Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret);
- spin_lock_bh(&mvm->time_event_lock);
- iwl_mvm_te_clear_data(mvm, te_data);
- spin_unlock_bh(&mvm->time_event_lock);
+ "Couldn't send the SESSION_PROTECTION_CMD\n");
+ iwl_remove_notification(&mvm->notif_wait, &wait_notif);
+ } else if (iwl_wait_notification(&mvm->notif_wait, &wait_notif,
+ TU_TO_JIFFIES(100))) {
+ IWL_ERR(mvm,
+ "Failed to protect session until session protection\n");
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
index df6832b79666..3186d7e40567 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
@@ -250,10 +250,12 @@ iwl_mvm_te_scheduled(struct iwl_mvm_time_event_data *te_data)
* @mvm: the mvm component
* @vif: the virtual interface for which the protection issued
* @duration: the duration of the protection
+ * @wait_for_notif: if true, will block until the start of the protection
*/
void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- u32 duration, u32 min_duration);
+ u32 duration, u32 min_duration,
+ bool wait_for_notif);
/**
* iwl_mvm_rx_session_protect_notif - handles %SESSION_PROTECTION_NOTIF
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index b5a16f00bada..418e59b7c671 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -8,7 +8,7 @@
* Copyright(c) 2013 - 2014, 2019 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2019 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014, 2019 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2019 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -234,7 +234,7 @@ static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
.flags = cpu_to_le32(DTS_TRIGGER_CMD_FLAGS_TEMP),
};
struct iwl_ext_dts_measurement_cmd extcmd = {
- .control_mode = cpu_to_le32(DTS_AUTOMATIC),
+ .control_mode = cpu_to_le32(DTS_DIRECT_WITHOUT_MEASURE),
};
u32 cmdid;
@@ -734,7 +734,8 @@ static struct thermal_zone_device_ops tzone_ops = {
static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm)
{
int i;
- char name[] = "iwlwifi";
+ char name[16];
+ static atomic_t counter = ATOMIC_INIT(0);
if (!iwl_mvm_is_tt_in_fw(mvm)) {
mvm->tz_device.tzone = NULL;
@@ -744,6 +745,7 @@ static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm)
BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH);
+ sprintf(name, "iwlwifi_%u", atomic_inc_return(&counter) & 0xFF);
mvm->tz_device.tzone = thermal_zone_device_register(name,
IWL_MAX_DTS_TRIPS,
IWL_WRITABLE_TRIPS_MSK,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index dc5c02fbc65a..a8d0d17f79fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -490,13 +490,13 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
/*
* Allocates and sets the Tx cmd the driver data pointers in the skb
*/
-static struct iwl_device_cmd *
+static struct iwl_device_tx_cmd *
iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_tx_info *info, int hdrlen,
struct ieee80211_sta *sta, u8 sta_id)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct iwl_device_cmd *dev_cmd;
+ struct iwl_device_tx_cmd *dev_cmd;
struct iwl_tx_cmd *tx_cmd;
dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans);
@@ -504,11 +504,6 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
if (unlikely(!dev_cmd))
return NULL;
- /* Make sure we zero enough of dev_cmd */
- BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd));
- BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) > sizeof(*tx_cmd));
-
- memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd));
dev_cmd->hdr.cmd = TX_CMD;
if (iwl_mvm_has_new_tx_api(mvm)) {
@@ -597,7 +592,7 @@ out:
}
static void iwl_mvm_skb_prepare_status(struct sk_buff *skb,
- struct iwl_device_cmd *cmd)
+ struct iwl_device_tx_cmd *cmd)
{
struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
@@ -716,7 +711,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info info;
- struct iwl_device_cmd *dev_cmd;
+ struct iwl_device_tx_cmd *dev_cmd;
u8 sta_id;
int hdrlen = ieee80211_hdrlen(hdr->frame_control);
__le16 fc = hdr->frame_control;
@@ -847,10 +842,7 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
else if (next)
consume_skb(skb);
- while (next) {
- tmp = next;
- next = tmp->next;
-
+ skb_list_walk_safe(next, tmp, next) {
memcpy(tmp->cb, cb, sizeof(tmp->cb));
/*
* Compute the length of all the data added for the A-MSDU.
@@ -880,9 +872,7 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
skb_shinfo(tmp)->gso_size = 0;
}
- tmp->prev = NULL;
- tmp->next = NULL;
-
+ skb_mark_not_on_list(tmp);
__skb_queue_tail(mpdus_skb, tmp);
i++;
}
@@ -1078,7 +1068,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct iwl_mvm_sta *mvmsta;
- struct iwl_device_cmd *dev_cmd;
+ struct iwl_device_tx_cmd *dev_cmd;
__le16 fc;
u16 seq_number = 0;
u8 tid = IWL_MAX_TID_COUNT;
@@ -1154,7 +1144,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
if (WARN_ONCE(txq_id == IWL_MVM_INVALID_QUEUE, "Invalid TXQ id")) {
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
spin_unlock(&mvmsta->lock);
- return 0;
+ return -1;
}
if (!iwl_mvm_has_new_tx_api(mvm)) {
@@ -1206,8 +1196,8 @@ drop:
return -1;
}
-int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
- struct ieee80211_sta *sta)
+int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct ieee80211_tx_info info;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index a4e09a5b1816..01f248ba8fec 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -206,7 +206,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
ctxt_info_gen3->mtr_size =
cpu_to_le16(TFD_QUEUE_CB_SIZE(cmdq_size));
ctxt_info_gen3->mcr_size =
- cpu_to_le16(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE));
+ cpu_to_le16(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds));
trans_pcie->ctxt_info_gen3 = ctxt_info_gen3;
trans_pcie->prph_info = prph_info;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index d38cefbb779e..acd01d86f101 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -57,6 +57,42 @@
#include "internal.h"
#include "iwl-prph.h"
+static void *_iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
+ size_t size,
+ dma_addr_t *phys,
+ int depth)
+{
+ void *result;
+
+ if (WARN(depth > 2,
+ "failed to allocate DMA memory not crossing 2^32 boundary"))
+ return NULL;
+
+ result = dma_alloc_coherent(trans->dev, size, phys, GFP_KERNEL);
+
+ if (!result)
+ return NULL;
+
+ if (unlikely(iwl_pcie_crosses_4g_boundary(*phys, size))) {
+ void *old = result;
+ dma_addr_t oldphys = *phys;
+
+ result = _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size,
+ phys,
+ depth + 1);
+ dma_free_coherent(trans->dev, size, old, oldphys);
+ }
+
+ return result;
+}
+
+static void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
+ size_t size,
+ dma_addr_t *phys)
+{
+ return _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, phys, 0);
+}
+
void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
{
struct iwl_self_init_dram *dram = &trans->init_dram;
@@ -161,14 +197,17 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
struct iwl_context_info *ctxt_info;
struct iwl_context_info_rbd_cfg *rx_cfg;
u32 control_flags = 0, rb_size;
+ dma_addr_t phys;
int ret;
- ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info),
- &trans_pcie->ctxt_info_dma_addr,
- GFP_KERNEL);
+ ctxt_info = iwl_pcie_ctxt_info_dma_alloc_coherent(trans,
+ sizeof(*ctxt_info),
+ &phys);
if (!ctxt_info)
return -ENOMEM;
+ trans_pcie->ctxt_info_dma_addr = phys;
+
ctxt_info->version.version = 0;
ctxt_info->version.mac_id =
cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
@@ -193,11 +232,12 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
rb_size = IWL_CTXT_INFO_RB_SIZE_4K;
}
- BUILD_BUG_ON(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) > 0xF);
- control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG |
- (RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) <<
- IWL_CTXT_INFO_RB_CB_SIZE_POS) |
- (rb_size << IWL_CTXT_INFO_RB_SIZE_POS);
+ WARN_ON(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds) > 12);
+ control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG;
+ control_flags |=
+ u32_encode_bits(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds),
+ IWL_CTXT_INFO_RB_CB_SIZE);
+ control_flags |= u32_encode_bits(rb_size, IWL_CTXT_INFO_RB_SIZE);
ctxt_info->control.control_flags = cpu_to_le32(control_flags);
/* initialize RX default queue */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 040cec17d3ad..97f227f3cbc3 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -565,14 +565,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x001C, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_160_cfg)},
+
{IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0060, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9462_2ac_cfg_soc)},
@@ -598,21 +592,12 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x4018, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x401C, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x6010, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x6014, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x8010, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0xE010, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0xE014, iwl9260_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2526, PCI_ANY_ID, iwl9000_trans_cfg)},
+
{IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
@@ -910,7 +895,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2720, 0x0074, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0078, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x007C, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x2720, 0x0244, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0310, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwl_ax201_cfg_qu_hr)},
@@ -987,28 +971,74 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
};
MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
+#define IWL_DEV_INFO(_device, _subdevice, _cfg, _name) \
+ { .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \
+ .name = _name }
+
+static const struct iwl_dev_info iwl_dev_info_table[] = {
+#if IS_ENABLED(CONFIG_IWLMVM)
+ IWL_DEV_INFO(0x2526, 0x0010, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x0014, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x0018, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x001C, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x6010, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x6014, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x8014, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x8010, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0xA014, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0xE010, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0xE014, iwl9260_2ac_160_cfg, iwl9260_160_name),
+
+ IWL_DEV_INFO(0x2526, 0x0030, iwl9560_2ac_160_cfg, iwl9560_160_name),
+ IWL_DEV_INFO(0x2526, 0x0038, iwl9560_2ac_160_cfg, iwl9560_160_name),
+ IWL_DEV_INFO(0x2526, 0x003C, iwl9560_2ac_160_cfg, iwl9560_160_name),
+ IWL_DEV_INFO(0x2526, 0x4030, iwl9560_2ac_160_cfg, iwl9560_160_name),
+#endif /* CONFIG_IWLMVM */
+};
+
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
+ const struct iwl_cfg_trans_params *trans =
+ (struct iwl_cfg_trans_params *)(ent->driver_data);
const struct iwl_cfg *cfg_7265d __maybe_unused = NULL;
struct iwl_trans *iwl_trans;
+ struct iwl_trans_pcie *trans_pcie;
unsigned long flags;
- int ret;
+ int i, ret;
+ /*
+ * This is needed for backwards compatibility with the old
+ * tables, so we don't need to change all the config structs
+ * at the same time. The cfg is used to compare with the old
+ * full cfg structs.
+ */
+ const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
- iwl_trans = iwl_trans_pcie_alloc(pdev, ent, &cfg->trans);
+ /* make sure trans is the first element in iwl_cfg */
+ BUILD_BUG_ON(offsetof(struct iwl_cfg, trans));
+
+ iwl_trans = iwl_trans_pcie_alloc(pdev, ent, trans);
if (IS_ERR(iwl_trans))
return PTR_ERR(iwl_trans);
- /* the trans_cfg should never change, so set it now */
- iwl_trans->trans_cfg = &cfg->trans;
+ trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
- if (WARN_ONCE(!iwl_trans->trans_cfg->csr,
- "CSR addresses aren't configured\n")) {
- ret = -EINVAL;
- goto out_free_trans;
+ /* the trans_cfg should never change, so set it now */
+ iwl_trans->trans_cfg = trans;
+
+ for (i = 0; i < ARRAY_SIZE(iwl_dev_info_table); i++) {
+ const struct iwl_dev_info *dev_info = &iwl_dev_info_table[i];
+
+ if ((dev_info->device == IWL_CFG_ANY ||
+ dev_info->device == pdev->device) &&
+ (dev_info->subdevice == IWL_CFG_ANY ||
+ dev_info->subdevice == pdev->subsystem_device)) {
+ iwl_trans->cfg = dev_info->cfg;
+ iwl_trans->name = dev_info->name;
+ goto found;
+ }
}
#if IS_ENABLED(CONFIG_IWLMVM)
@@ -1027,22 +1057,22 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
cfg_7265d = &iwl7265d_n_cfg;
if (cfg_7265d &&
(iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D)
- cfg = cfg_7265d;
+ iwl_trans->cfg = cfg_7265d;
iwl_trans->hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID);
if (cfg == &iwlax210_2ax_cfg_so_hr_a0) {
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_TY) {
- cfg = &iwlax210_2ax_cfg_ty_gf_a0;
+ iwl_trans->cfg = &iwlax210_2ax_cfg_ty_gf_a0;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) {
- cfg = &iwlax210_2ax_cfg_so_jf_a0;
+ iwl_trans->cfg = &iwlax210_2ax_cfg_so_jf_a0;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF)) {
- cfg = &iwlax211_2ax_cfg_so_gf_a0;
+ iwl_trans->cfg = &iwlax211_2ax_cfg_so_gf_a0;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF4)) {
- cfg = &iwlax411_2ax_cfg_so_gf4_a0;
+ iwl_trans->cfg = &iwlax411_2ax_cfg_so_gf4_a0;
}
} else if (cfg == &iwl_ax101_cfg_qu_hr) {
if ((CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
@@ -1050,13 +1080,17 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) ||
(CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR1))) {
- cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
+ iwl_trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
+ } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
+ iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) {
+ iwl_trans->cfg = &iwl_ax101_cfg_quz_hr;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
- cfg = &iwl_ax101_cfg_qu_hr;
+ iwl_trans->cfg = &iwl_ax101_cfg_qu_hr;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) {
- cfg = &iwl22000_2ax_cfg_jf;
+ iwl_trans->cfg = &iwl22000_2ax_cfg_jf;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HRCDB)) {
IWL_ERR(iwl_trans, "RF ID HRCDB is not supported\n");
@@ -1073,19 +1107,11 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw_status = iwl_read_prph(iwl_trans, UMAG_GEN_HW_STATUS);
if (CSR_HW_RF_STEP(iwl_trans->hw_rf_id) == SILICON_B_STEP)
- /*
- * b step fw is the same for physical card and fpga
- */
- cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
+ iwl_trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
else if ((hw_status & UMAG_GEN_HW_IS_FPGA) &&
- CSR_HW_RF_STEP(iwl_trans->hw_rf_id) == SILICON_A_STEP) {
- cfg = &iwl22000_2ax_cfg_qnj_hr_a0_f0;
- } else {
- /*
- * a step no FPGA
- */
- cfg = &iwl22000_2ac_cfg_hr;
- }
+ CSR_HW_RF_STEP(iwl_trans->hw_rf_id) ==
+ SILICON_A_STEP)
+ iwl_trans->cfg = &iwl22000_2ax_cfg_qnj_hr_a0_f0;
}
/*
@@ -1096,38 +1122,61 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QU_C0) {
if (cfg == &iwl_ax101_cfg_qu_hr)
- cfg = &iwl_ax101_cfg_qu_c0_hr_b0;
+ iwl_trans->cfg = &iwl_ax101_cfg_qu_c0_hr_b0;
else if (cfg == &iwl_ax201_cfg_qu_hr)
- cfg = &iwl_ax201_cfg_qu_c0_hr_b0;
+ iwl_trans->cfg = &iwl_ax201_cfg_qu_c0_hr_b0;
else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
- cfg = &iwl9461_2ac_cfg_qu_c0_jf_b0;
+ iwl_trans->cfg = &iwl9461_2ac_cfg_qu_c0_jf_b0;
else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
- cfg = &iwl9462_2ac_cfg_qu_c0_jf_b0;
+ iwl_trans->cfg = &iwl9462_2ac_cfg_qu_c0_jf_b0;
else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
- cfg = &iwl9560_2ac_cfg_qu_c0_jf_b0;
+ iwl_trans->cfg = &iwl9560_2ac_cfg_qu_c0_jf_b0;
else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
- cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0;
+ iwl_trans->cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0;
+ else if (cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0)
+ iwl_trans->cfg = &killer1650s_2ax_cfg_qu_c0_hr_b0;
+ else if (cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0)
+ iwl_trans->cfg = &killer1650i_2ax_cfg_qu_c0_hr_b0;
}
/* same thing for QuZ... */
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) {
- if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr)
- iwl_trans->cfg = &iwl_ax101_cfg_quz_hr;
- else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
- iwl_trans->cfg = &iwl_ax201_cfg_quz_hr;
- else if (iwl_trans->cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
- iwl_trans->cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc;
- else if (iwl_trans->cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
- iwl_trans->cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc;
- else if (iwl_trans->cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
- iwl_trans->cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc;
- else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
- iwl_trans->cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc;
+ if (cfg == &iwl_ax101_cfg_qu_hr)
+ cfg = &iwl_ax101_cfg_quz_hr;
+ else if (cfg == &iwl_ax201_cfg_qu_hr)
+ cfg = &iwl_ax201_cfg_quz_hr;
+ else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
+ cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc;
+ else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
+ cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc;
+ else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
+ cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc;
+ else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
+ cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc;
}
#endif
- /* now set the real cfg we decided to use */
- iwl_trans->cfg = cfg;
+ /*
+ * If we didn't set the cfg yet, assume the trans is actually
+ * a full cfg from the old tables.
+ */
+ if (!iwl_trans->cfg)
+ iwl_trans->cfg = cfg;
+
+found:
+ /* if we don't have a name yet, copy name from the old cfg */
+ if (!iwl_trans->name)
+ iwl_trans->name = iwl_trans->cfg->name;
+
+ if (iwl_trans->trans_cfg->mq_rx_supported) {
+ if (WARN_ON(!iwl_trans->cfg->num_rbds)) {
+ ret = -EINVAL;
+ goto out_free_trans;
+ }
+ trans_pcie->num_rx_bufs = iwl_trans->cfg->num_rbds;
+ } else {
+ trans_pcie->num_rx_bufs = RX_QUEUE_SIZE;
+ }
if (iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000 &&
iwl_trans_grab_nic_access(iwl_trans, &flags)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index a091690f6c79..72f144c3a46e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -106,6 +106,8 @@ struct iwl_host_cmd;
* @page: driver's pointer to the rxb page
* @invalid: rxb is in driver ownership - not owned by HW
* @vid: index of this rxb in the global table
+ * @offset: indicates which offset of the page (in bytes)
+ * this buffer uses (if multiple RBs fit into one page)
*/
struct iwl_rx_mem_buffer {
dma_addr_t page_dma;
@@ -113,6 +115,7 @@ struct iwl_rx_mem_buffer {
u16 vid;
bool invalid;
struct list_head list;
+ u32 offset;
};
/**
@@ -305,7 +308,7 @@ struct iwl_cmd_meta {
#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
struct iwl_pcie_txq_entry {
- struct iwl_device_cmd *cmd;
+ void *cmd;
struct sk_buff *skb;
/* buffer to free after command completes */
const void *free_buf;
@@ -491,6 +494,7 @@ struct cont_rec {
* @sw_csum_tx: if true, then the transport will compute the csum of the TXed
* frame.
* @rx_page_order: page order for receive buffer size
+ * @rx_buf_bytes: RX buffer (RB) size in bytes
* @reg_lock: protect hw register access
* @mutex: to protect stop_device / start_fw / start_hw
* @cmd_in_flight: true when we have a host command in flight
@@ -510,11 +514,16 @@ struct cont_rec {
* @in_rescan: true if we have triggered a device rescan
* @base_rb_stts: base virtual address of receive buffer status for all queues
* @base_rb_stts_dma: base physical address of receive buffer status
+ * @supported_dma_mask: DMA mask to validate the actual address against,
+ * will be DMA_BIT_MASK(11) or DMA_BIT_MASK(12) depending on the device
+ * @alloc_page_lock: spinlock for the page allocator
+ * @alloc_page: allocated page to still use parts of
+ * @alloc_page_used: how much of the allocated page was already used (bytes)
*/
struct iwl_trans_pcie {
struct iwl_rxq *rxq;
- struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
- struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
+ struct iwl_rx_mem_buffer *rx_pool;
+ struct iwl_rx_mem_buffer **global_table;
struct iwl_rb_allocator rba;
union {
struct iwl_context_info *ctxt_info;
@@ -573,6 +582,7 @@ struct iwl_trans_pcie {
u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
u8 max_tbs;
u16 tfd_size;
+ u16 num_rx_bufs;
enum iwl_amsdu_size rx_buf_size;
bool bc_table_dword;
@@ -580,6 +590,13 @@ struct iwl_trans_pcie {
bool sw_csum_tx;
bool pcie_dbg_dumped_once;
u32 rx_page_order;
+ u32 rx_buf_bytes;
+ u32 supported_dma_mask;
+
+ /* allocator lock for the two values below */
+ spinlock_t alloc_page_lock;
+ struct page *alloc_page;
+ u32 alloc_page_used;
/*protect hw register */
spinlock_t reg_lock;
@@ -672,6 +689,16 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans);
/*****************************************************
* TX / HCMD
******************************************************/
+/*
+ * We need this inline in case dma_addr_t is only 32-bits - since the
+ * hardware is always 64-bit, the issue can still occur in that case,
+ * so use u64 for 'phys' here to force the addition in 64-bit.
+ */
+static inline bool iwl_pcie_crosses_4g_boundary(u64 phys, u16 len)
+{
+ return upper_32_bits(phys) != upper_32_bits(phys + len);
+}
+
int iwl_pcie_tx_init(struct iwl_trans *trans);
int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id,
int queue_size);
@@ -688,7 +715,7 @@ void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
struct iwl_txq *txq);
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_cmd *dev_cmd, int txq_id);
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id);
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx);
@@ -1082,7 +1109,8 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans);
void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
struct sk_buff *skb);
#ifdef CONFIG_INET
-struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len);
+struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
+ struct sk_buff *skb);
#endif
/* common functions that are used by gen3 transport */
@@ -1106,7 +1134,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
unsigned int timeout);
void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_cmd *dev_cmd, int txq_id);
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id);
int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd);
void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 452da44a21e0..427fcea5cb2d 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -240,7 +240,7 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
reg);
iwl_set_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
rxq->need_update = true;
return;
}
@@ -298,6 +298,7 @@ static void iwl_pcie_restock_bd(struct iwl_trans *trans,
static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
struct iwl_rxq *rxq)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_mem_buffer *rxb;
/*
@@ -318,8 +319,8 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
list);
list_del(&rxb->list);
rxb->invalid = false;
- /* 12 first bits are expected to be empty */
- WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
+ /* some low bits are expected to be unset (depending on hw) */
+ WARN_ON(rxb->page_dma & trans_pcie->supported_dma_mask);
/* Point to Rx buffer via next RBD in circular buffer */
iwl_pcie_restock_bd(trans, rxq, rxb);
rxq->write = (rxq->write + 1) & (rxq->queue_size - 1);
@@ -412,15 +413,34 @@ void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
*
*/
static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
- gfp_t priority)
+ u32 *offset, gfp_t priority)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ unsigned int rbsize = iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
+ unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order;
struct page *page;
gfp_t gfp_mask = priority;
if (trans_pcie->rx_page_order > 0)
gfp_mask |= __GFP_COMP;
+ if (trans_pcie->alloc_page) {
+ spin_lock_bh(&trans_pcie->alloc_page_lock);
+ /* recheck */
+ if (trans_pcie->alloc_page) {
+ *offset = trans_pcie->alloc_page_used;
+ page = trans_pcie->alloc_page;
+ trans_pcie->alloc_page_used += rbsize;
+ if (trans_pcie->alloc_page_used >= allocsize)
+ trans_pcie->alloc_page = NULL;
+ else
+ get_page(page);
+ spin_unlock_bh(&trans_pcie->alloc_page_lock);
+ return page;
+ }
+ spin_unlock_bh(&trans_pcie->alloc_page_lock);
+ }
+
/* Alloc a new receive buffer */
page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
if (!page) {
@@ -436,6 +456,18 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
"Failed to alloc_pages\n");
return NULL;
}
+
+ if (2 * rbsize <= allocsize) {
+ spin_lock_bh(&trans_pcie->alloc_page_lock);
+ if (!trans_pcie->alloc_page) {
+ get_page(page);
+ trans_pcie->alloc_page = page;
+ trans_pcie->alloc_page_used = rbsize;
+ }
+ spin_unlock_bh(&trans_pcie->alloc_page_lock);
+ }
+
+ *offset = 0;
return page;
}
@@ -456,6 +488,8 @@ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
struct page *page;
while (1) {
+ unsigned int offset;
+
spin_lock(&rxq->lock);
if (list_empty(&rxq->rx_used)) {
spin_unlock(&rxq->lock);
@@ -463,8 +497,7 @@ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
}
spin_unlock(&rxq->lock);
- /* Alloc a new receive buffer */
- page = iwl_pcie_rx_alloc_page(trans, priority);
+ page = iwl_pcie_rx_alloc_page(trans, &offset, priority);
if (!page)
return;
@@ -482,10 +515,11 @@ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
BUG_ON(rxb->page);
rxb->page = page;
+ rxb->offset = offset;
/* Get physical address of the RB */
rxb->page_dma =
- dma_map_page(trans->dev, page, 0,
- PAGE_SIZE << trans_pcie->rx_page_order,
+ dma_map_page(trans->dev, page, rxb->offset,
+ trans_pcie->rx_buf_bytes,
DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, rxb->page_dma)) {
rxb->page = NULL;
@@ -510,12 +544,11 @@ void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
- for (i = 0; i < RX_POOL_SIZE; i++) {
+ for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) {
if (!trans_pcie->rx_pool[i].page)
continue;
dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
- PAGE_SIZE << trans_pcie->rx_page_order,
- DMA_FROM_DEVICE);
+ trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE);
__free_pages(trans_pcie->rx_pool[i].page,
trans_pcie->rx_page_order);
trans_pcie->rx_pool[i].page = NULL;
@@ -568,15 +601,17 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
BUG_ON(rxb->page);
/* Alloc a new receive buffer */
- page = iwl_pcie_rx_alloc_page(trans, gfp_mask);
+ page = iwl_pcie_rx_alloc_page(trans, &rxb->offset,
+ gfp_mask);
if (!page)
continue;
rxb->page = page;
/* Get physical address of the RB */
- rxb->page_dma = dma_map_page(trans->dev, page, 0,
- PAGE_SIZE << trans_pcie->rx_page_order,
- DMA_FROM_DEVICE);
+ rxb->page_dma = dma_map_page(trans->dev, page,
+ rxb->offset,
+ trans_pcie->rx_buf_bytes,
+ DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, rxb->page_dma)) {
rxb->page = NULL;
__free_pages(page, trans_pcie->rx_page_order);
@@ -738,7 +773,7 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
spin_lock_init(&rxq->lock);
if (trans->trans_cfg->mq_rx_supported)
- rxq->queue_size = MQ_RX_TABLE_SIZE;
+ rxq->queue_size = trans->cfg->num_rbds;
else
rxq->queue_size = RX_QUEUE_SIZE;
@@ -807,8 +842,18 @@ static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
GFP_KERNEL);
- if (!trans_pcie->rxq)
- return -ENOMEM;
+ trans_pcie->rx_pool = kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
+ sizeof(trans_pcie->rx_pool[0]),
+ GFP_KERNEL);
+ trans_pcie->global_table =
+ kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
+ sizeof(trans_pcie->global_table[0]),
+ GFP_KERNEL);
+ if (!trans_pcie->rxq || !trans_pcie->rx_pool ||
+ !trans_pcie->global_table) {
+ ret = -ENOMEM;
+ goto err;
+ }
spin_lock_init(&rba->lock);
@@ -845,6 +890,8 @@ err:
trans_pcie->base_rb_stts = NULL;
trans_pcie->base_rb_stts_dma = 0;
}
+ kfree(trans_pcie->rx_pool);
+ kfree(trans_pcie->global_table);
kfree(trans_pcie->rxq);
return ret;
@@ -1081,12 +1128,11 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
/* move the pool to the default queue and allocator ownerships */
queue_size = trans->trans_cfg->mq_rx_supported ?
- MQ_RX_NUM_RBDS : RX_QUEUE_SIZE;
+ trans_pcie->num_rx_bufs - 1 : RX_QUEUE_SIZE;
allocator_pool_size = trans->num_rx_queues *
(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
num_alloc = queue_size + allocator_pool_size;
- BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) !=
- ARRAY_SIZE(trans_pcie->rx_pool));
+
for (i = 0; i < num_alloc; i++) {
struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
@@ -1177,7 +1223,12 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
if (rxq->napi.poll)
netif_napi_del(&rxq->napi);
}
+ kfree(trans_pcie->rx_pool);
+ kfree(trans_pcie->global_table);
kfree(trans_pcie->rxq);
+
+ if (trans_pcie->alloc_page)
+ __free_pages(trans_pcie->alloc_page, trans_pcie->rx_page_order);
}
static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq,
@@ -1235,7 +1286,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
bool page_stolen = false;
- int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
+ int max_len = trans_pcie->rx_buf_bytes;
u32 offset = 0;
if (WARN_ON(!rxb))
@@ -1249,7 +1300,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
bool reclaim;
int index, cmd_index, len;
struct iwl_rx_cmd_buffer rxcb = {
- ._offset = offset,
+ ._offset = rxb->offset + offset,
._rx_page_order = trans_pcie->rx_page_order,
._page = rxb->page,
._page_stolen = false,
@@ -1355,8 +1406,8 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
* rx_free list for reuse later. */
if (rxb->page != NULL) {
rxb->page_dma =
- dma_map_page(trans->dev, rxb->page, 0,
- PAGE_SIZE << trans_pcie->rx_page_order,
+ dma_map_page(trans->dev, rxb->page, rxb->offset,
+ trans_pcie->rx_buf_bytes,
DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, rxb->page_dma)) {
/*
@@ -1390,13 +1441,12 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
return rxb;
}
- /* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF;
+ vid = le16_to_cpu(rxq->cd[i].rbid);
else
- vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF;
+ vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; /* 12-bit VID */
- if (!vid || vid > ARRAY_SIZE(trans_pcie->global_table))
+ if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs))
goto out_err;
rxb = trans_pcie->global_table[vid - 1];
@@ -1529,13 +1579,13 @@ out:
napi = &rxq->napi;
if (napi->poll) {
+ napi_gro_flush(napi, false);
+
if (napi->rx_count) {
netif_receive_skb_list(&napi->rx_list);
INIT_LIST_HEAD(&napi->rx_list);
napi->rx_count = 0;
}
-
- napi_gro_flush(napi, false);
}
iwl_pcie_rxq_restock(trans, rxq);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 0252716c0b24..19a2c72081ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -57,24 +57,6 @@
#include "internal.h"
#include "fw/dbg.h"
-static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)
-{
- iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
- HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
- udelay(20);
- iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
- HPM_HIPM_GEN_CFG_CR_PG_EN |
- HPM_HIPM_GEN_CFG_CR_SLP_EN);
- udelay(20);
- iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG,
- HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
-
- iwl_trans_sw_reset(trans);
- iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-
- return 0;
-}
-
/*
* Start up NIC's basic functionality after it has been reset
* (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
@@ -110,13 +92,6 @@ int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
iwl_pcie_apm_config(trans);
- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
- trans->cfg->integrated) {
- ret = iwl_pcie_gen2_force_power_gating(trans);
- if (ret)
- return ret;
- }
-
ret = iwl_finish_nic_init(trans, trans->trans_cfg);
if (ret)
return ret;
@@ -157,8 +132,7 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
* Clear "initialization complete" bit to move adapter from
* D0A* (powered-up Active) --> D0U* (Uninitialized) state.
*/
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_init_done));
+ iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
}
void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
@@ -200,7 +174,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
/* Make sure (redundant) we've released our request to stay awake */
iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/* Stop the device, and put it in low power state */
iwl_pcie_gen2_apm_stop(trans, false);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index af9bc6b64542..38d8fe21690a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -79,6 +79,7 @@
#include "iwl-agn-hw.h"
#include "fw/error-dump.h"
#include "fw/dbg.h"
+#include "fw/api/tx.h"
#include "internal.h"
#include "iwl-fh.h"
@@ -183,8 +184,7 @@ out:
static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans)
{
/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
- iwl_set_bit(trans, trans->trans_cfg->csr->addr_sw_reset,
- BIT(trans->trans_cfg->csr->flag_sw_reset));
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
usleep_range(5000, 6000);
}
@@ -301,18 +301,13 @@ void iwl_pcie_apm_config(struct iwl_trans *trans)
u16 cap;
/*
- * HW bug W/A for instability in PCIe bus L0S->L1 transition.
- * Check if BIOS (or OS) enabled L1-ASPM on this device.
- * If so (likely), disable L0S, so device moves directly L0->L1;
- * costs negligible amount of power savings.
- * If not (unlikely), enable L0S, so there is at least some
- * power savings, even without L1.
+ * L0S states have been found to be unstable with our devices
+ * and in newer hardware they are not officially supported at
+ * all, so we must always set the L0S_DISABLED bit.
*/
+ iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED);
+
pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
- if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
- iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
- else
- iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
@@ -487,8 +482,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
* Clear "initialization complete" bit to move adapter from
* D0A* (powered-up Active) --> D0U* (Uninitialized) state.
*/
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_init_done));
+ iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
/* Activates XTAL resources monitor */
__iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
@@ -510,12 +504,11 @@ void iwl_pcie_apm_stop_master(struct iwl_trans *trans)
int ret;
/* stop device's busmaster DMA activity */
- iwl_set_bit(trans, trans->trans_cfg->csr->addr_sw_reset,
- BIT(trans->trans_cfg->csr->flag_stop_master));
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
- ret = iwl_poll_bit(trans, trans->trans_cfg->csr->addr_sw_reset,
- BIT(trans->trans_cfg->csr->flag_master_dis),
- BIT(trans->trans_cfg->csr->flag_master_dis), 100);
+ ret = iwl_poll_bit(trans, CSR_RESET,
+ CSR_RESET_REG_FLAG_MASTER_DISABLED,
+ CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
if (ret < 0)
IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
@@ -564,8 +557,7 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
* Clear "initialization complete" bit to move adapter from
* D0A* (powered-up Active) --> D0U* (Uninitialized) state.
*/
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_init_done));
+ iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
}
static int iwl_pcie_nic_init(struct iwl_trans *trans)
@@ -1270,7 +1262,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
/* Make sure (redundant) we've released our request to stay awake */
iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/* Stop the device, and put it in low power state */
iwl_pcie_apm_stop(trans, false);
@@ -1494,9 +1486,8 @@ void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
iwl_pcie_synchronize_irqs(trans);
iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_init_done));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
if (reset) {
/*
@@ -1561,7 +1552,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
}
iwl_set_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
ret = iwl_finish_nic_init(trans, trans->trans_cfg);
if (ret)
@@ -1583,7 +1574,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
if (!reset) {
iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
} else {
iwl_trans_pcie_tx_reset(trans);
@@ -1783,6 +1774,29 @@ static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans)
return 0;
}
+static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)
+{
+ int ret;
+
+ ret = iwl_finish_nic_init(trans, trans->trans_cfg);
+ if (ret < 0)
+ return ret;
+
+ iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
+ HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
+ udelay(20);
+ iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
+ HPM_HIPM_GEN_CFG_CR_PG_EN |
+ HPM_HIPM_GEN_CFG_CR_SLP_EN);
+ udelay(20);
+ iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG,
+ HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
+
+ iwl_trans_pcie_sw_reset(trans);
+
+ return 0;
+}
+
static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1802,6 +1816,13 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
iwl_trans_pcie_sw_reset(trans);
+ if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
+ trans->cfg->integrated) {
+ err = iwl_pcie_gen2_force_power_gating(trans);
+ if (err)
+ return err;
+ }
+
err = iwl_pcie_apm_init(trans);
if (err)
return err;
@@ -1915,6 +1936,11 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
trans_pcie->rx_buf_size = trans_cfg->rx_buf_size;
trans_pcie->rx_page_order =
iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
+ trans_pcie->rx_buf_bytes =
+ iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
+ trans_pcie->supported_dma_mask = DMA_BIT_MASK(12);
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ trans_pcie->supported_dma_mask = DMA_BIT_MASK(11);
trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
trans_pcie->scd_set_active = trans_cfg->scd_set_active;
@@ -2024,7 +2050,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
/* this bit wakes up the NIC */
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
udelay(2);
@@ -2049,8 +2075,8 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
* and do not save/restore SRAM when power cycling.
*/
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_val_mac_access_en),
- (BIT(trans->trans_cfg->csr->flag_mac_clock_ready) |
+ CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+ (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
if (unlikely(ret < 0)) {
u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL);
@@ -2132,7 +2158,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
goto out;
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/*
* Above we read the CSR_GP_CNTRL register, which will flush
* any previous writes, but we need the write that clears the
@@ -2933,7 +2959,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
int allocated_rb_nums)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
+ int max_len = trans_pcie->rx_buf_bytes;
/* Dump RBs is supported only for pre-9000 devices (1 queue) */
struct iwl_rxq *rxq = &trans_pcie->rxq[0];
u32 i, r, j, rb_len = 0;
@@ -2959,9 +2985,9 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
rb->index = cpu_to_le32(i);
memcpy(rb->data, page_address(rxb->page), max_len);
/* remap the page for the free benefit */
- rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0,
- max_len,
- DMA_FROM_DEVICE);
+ rxb->page_dma = dma_map_page(trans->dev, rxb->page,
+ rxb->offset, max_len,
+ DMA_FROM_DEVICE);
*data = iwl_fw_error_next_data(*data);
}
@@ -3430,19 +3456,34 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
{
struct iwl_trans_pcie *trans_pcie;
struct iwl_trans *trans;
- int ret, addr_size;
+ int ret, addr_size, txcmd_size, txcmd_align;
+ const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2;
+
+ if (!cfg_trans->gen2) {
+ ops = &trans_ops_pcie;
+ txcmd_size = sizeof(struct iwl_tx_cmd);
+ txcmd_align = sizeof(void *);
+ } else if (cfg_trans->device_family < IWL_DEVICE_FAMILY_AX210) {
+ txcmd_size = sizeof(struct iwl_tx_cmd_gen2);
+ txcmd_align = 64;
+ } else {
+ txcmd_size = sizeof(struct iwl_tx_cmd_gen3);
+ txcmd_align = 128;
+ }
+
+ txcmd_size += sizeof(struct iwl_cmd_header);
+ txcmd_size += 36; /* biggest possible 802.11 header */
+
+ /* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */
+ if (WARN_ON(cfg_trans->gen2 && txcmd_size >= txcmd_align))
+ return ERR_PTR(-EINVAL);
ret = pcim_enable_device(pdev);
if (ret)
return ERR_PTR(ret);
- if (cfg_trans->gen2)
- trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
- &pdev->dev, &trans_ops_pcie_gen2);
- else
- trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
- &pdev->dev, &trans_ops_pcie);
-
+ trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops,
+ txcmd_size, txcmd_align);
if (!trans)
return ERR_PTR(-ENOMEM);
@@ -3452,6 +3493,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->opmode_down = true;
spin_lock_init(&trans_pcie->irq_lock);
spin_lock_init(&trans_pcie->reg_lock);
+ spin_lock_init(&trans_pcie->alloc_page_lock);
mutex_init(&trans_pcie->mutex);
init_waitqueue_head(&trans_pcie->ucode_write_waitq);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 8ca0250de99e..86fc00167817 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -221,6 +221,17 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd);
struct iwl_tfh_tb *tb;
+ /*
+ * Only WARN here so we know about the issue, but we mess up our
+ * unmap path because not every place currently checks for errors
+ * returned from this function - it can only return an error if
+ * there's no more space, and so when we know there is enough we
+ * don't always check ...
+ */
+ WARN(iwl_pcie_crosses_4g_boundary(addr, len),
+ "possible DMA problem with iova:0x%llx, len:%d\n",
+ (unsigned long long)addr, len);
+
if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
return -EINVAL;
tb = &tfd->tbs[idx];
@@ -240,13 +251,114 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
return idx;
}
+static struct page *get_workaround_page(struct iwl_trans *trans,
+ struct sk_buff *skb)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct page **page_ptr;
+ struct page *ret;
+
+ page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+
+ ret = alloc_page(GFP_ATOMIC);
+ if (!ret)
+ return NULL;
+
+ /* set the chaining pointer to the previous page if there */
+ *(void **)(page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
+ *page_ptr = ret;
+
+ return ret;
+}
+
+/*
+ * Add a TB and if needed apply the FH HW bug workaround;
+ * meta != NULL indicates that it's a page mapping and we
+ * need to dma_unmap_page() and set the meta->tbs bit in
+ * this case.
+ */
+static int iwl_pcie_gen2_set_tb_with_wa(struct iwl_trans *trans,
+ struct sk_buff *skb,
+ struct iwl_tfh_tfd *tfd,
+ dma_addr_t phys, void *virt,
+ u16 len, struct iwl_cmd_meta *meta)
+{
+ dma_addr_t oldphys = phys;
+ struct page *page;
+ int ret;
+
+ if (unlikely(dma_mapping_error(trans->dev, phys)))
+ return -ENOMEM;
+
+ if (likely(!iwl_pcie_crosses_4g_boundary(phys, len))) {
+ ret = iwl_pcie_gen2_set_tb(trans, tfd, phys, len);
+
+ if (ret < 0)
+ goto unmap;
+
+ if (meta)
+ meta->tbs |= BIT(ret);
+
+ ret = 0;
+ goto trace;
+ }
+
+ /*
+ * Work around a hardware bug. If (as expressed in the
+ * condition above) the TB ends on a 32-bit boundary,
+ * then the next TB may be accessed with the wrong
+ * address.
+ * To work around it, copy the data elsewhere and make
+ * a new mapping for it so the device will not fail.
+ */
+
+ if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) {
+ ret = -ENOBUFS;
+ goto unmap;
+ }
+
+ page = get_workaround_page(trans, skb);
+ if (!page) {
+ ret = -ENOMEM;
+ goto unmap;
+ }
+
+ memcpy(page_address(page), virt, len);
+
+ phys = dma_map_single(trans->dev, page_address(page), len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, phys)))
+ return -ENOMEM;
+ ret = iwl_pcie_gen2_set_tb(trans, tfd, phys, len);
+ if (ret < 0) {
+ /* unmap the new allocation as single */
+ oldphys = phys;
+ meta = NULL;
+ goto unmap;
+ }
+ IWL_WARN(trans,
+ "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
+ len, (unsigned long long)oldphys, (unsigned long long)phys);
+
+ ret = 0;
+unmap:
+ if (meta)
+ dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
+ else
+ dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
+trace:
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
+
+ return ret;
+}
+
static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
struct sk_buff *skb,
struct iwl_tfh_tfd *tfd, int start_len,
- u8 hdr_len, struct iwl_device_cmd *dev_cmd)
+ u8 hdr_len,
+ struct iwl_device_tx_cmd *dev_cmd)
{
#ifdef CONFIG_INET
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
@@ -254,7 +366,6 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
u16 length, amsdu_pad;
u8 *start_hdr;
struct iwl_tso_hdr_page *hdr_page;
- struct page **page_ptr;
struct tso_t tso;
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
@@ -270,14 +381,11 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
/* Our device supports 9 segments at most, it will fit in 1 page */
- hdr_page = get_page_hdr(trans, hdr_room);
+ hdr_page = get_page_hdr(trans, hdr_room, skb);
if (!hdr_page)
return -ENOMEM;
- get_page(hdr_page->page);
start_hdr = hdr_page->pos;
- page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
- *page_ptr = hdr_page->page;
/*
* Pull the ieee80211 header to be able to use TSO core,
@@ -332,6 +440,11 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
dev_kfree_skb(csum_skb);
goto out_err;
}
+ /*
+ * No need for _with_wa, this is from the TSO page and
+ * we leave some space at the end of it so can't hit
+ * the buggy scenario.
+ */
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
tb_phys, tb_len);
@@ -343,16 +456,18 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
/* put the payload */
while (data_left) {
+ int ret;
+
tb_len = min_t(unsigned int, tso.size, data_left);
tb_phys = dma_map_single(trans->dev, tso.data,
tb_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
+ ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd,
+ tb_phys, tso.data,
+ tb_len, NULL);
+ if (ret) {
dev_kfree_skb(csum_skb);
goto out_err;
}
- iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
- tb_phys, tb_len);
data_left -= tb_len;
tso_build_data(skb, &tso, tb_len);
@@ -372,7 +487,7 @@ out_err:
static struct
iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
struct iwl_txq *txq,
- struct iwl_device_cmd *dev_cmd,
+ struct iwl_device_tx_cmd *dev_cmd,
struct sk_buff *skb,
struct iwl_cmd_meta *out_meta,
int hdr_len,
@@ -386,6 +501,11 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
+ /*
+ * No need for _with_wa, the first TB allocation is aligned up
+ * to a 64-byte boundary and thus can't be at the end or cross
+ * a page boundary (much less a 2^32 boundary).
+ */
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
/*
@@ -404,6 +524,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
goto out_err;
+ /*
+ * No need for _with_wa(), we ensure (via alignment) that the data
+ * here can never cross or end at a page boundary.
+ */
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len);
if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
@@ -430,24 +554,19 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
dma_addr_t tb_phys;
- int tb_idx;
+ unsigned int fragsz = skb_frag_size(frag);
+ int ret;
- if (!skb_frag_size(frag))
+ if (!fragsz)
continue;
tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
- skb_frag_size(frag), DMA_TO_DEVICE);
-
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
- return -ENOMEM;
- tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
- skb_frag_size(frag));
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag),
- tb_phys, skb_frag_size(frag));
- if (tb_idx < 0)
- return tb_idx;
-
- out_meta->tbs |= BIT(tb_idx);
+ fragsz, DMA_TO_DEVICE);
+ ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ skb_frag_address(frag),
+ fragsz, out_meta);
+ if (ret)
+ return ret;
}
return 0;
@@ -456,7 +575,7 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
static struct
iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
struct iwl_txq *txq,
- struct iwl_device_cmd *dev_cmd,
+ struct iwl_device_tx_cmd *dev_cmd,
struct sk_buff *skb,
struct iwl_cmd_meta *out_meta,
int hdr_len,
@@ -475,6 +594,11 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
/* The first TB points to bi-directional DMA data */
memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
+ /*
+ * No need for _with_wa, the first TB allocation is aligned up
+ * to a 64-byte boundary and thus can't be at the end or cross
+ * a page boundary (much less a 2^32 boundary).
+ */
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
/*
@@ -496,6 +620,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
goto out_err;
+ /*
+ * No need for _with_wa(), we ensure (via alignment) that the data
+ * here can never cross or end at a page boundary.
+ */
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
@@ -504,26 +632,30 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
tb2_len = skb_headlen(skb) - hdr_len;
if (tb2_len > 0) {
+ int ret;
+
tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
tb2_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ skb->data + hdr_len, tb2_len,
+ NULL);
+ if (ret)
goto out_err;
- iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len,
- tb_phys, tb2_len);
}
if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta))
goto out_err;
skb_walk_frags(skb, frag) {
+ int ret;
+
tb_phys = dma_map_single(trans->dev, frag->data,
skb_headlen(frag), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ frag->data,
+ skb_headlen(frag), NULL);
+ if (ret)
goto out_err;
- iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, skb_headlen(frag));
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, frag->data,
- tb_phys, skb_headlen(frag));
if (iwl_pcie_gen2_tx_add_frags(trans, frag, tfd, out_meta))
goto out_err;
}
@@ -538,7 +670,7 @@ out_err:
static
struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
struct iwl_txq *txq,
- struct iwl_device_cmd *dev_cmd,
+ struct iwl_device_tx_cmd *dev_cmd,
struct sk_buff *skb,
struct iwl_cmd_meta *out_meta)
{
@@ -578,7 +710,7 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
}
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_cmd *dev_cmd, int txq_id)
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_cmd_meta *out_meta;
@@ -587,6 +719,10 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
int idx;
void *tfd;
+ if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
+ "queue %d out of range", txq_id))
+ return -EINVAL;
+
if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
"TX on unused queue %d\n", txq_id))
return -EINVAL;
@@ -603,7 +739,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
/* don't put the packet on the ring, if there is no room */
if (unlikely(iwl_queue_space(trans, txq) < 3)) {
- struct iwl_device_cmd **dev_cmd_ptr;
+ struct iwl_device_tx_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
trans_pcie->dev_cmd_offs);
@@ -1101,9 +1237,15 @@ void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ struct iwl_txq *txq;
int i;
+ if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
+ "queue %d out of range", txq_id))
+ return;
+
+ txq = trans_pcie->txq[txq_id];
+
if (WARN_ON(!txq))
return;
@@ -1258,6 +1400,10 @@ void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
+ "queue %d out of range", queue))
+ return;
+
/*
* Upon HW Rfkill - we stop the device, and then stop the queues
* in the op_mode. Just for the sake of the simplicity of the op_mode,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index f21f16ab2a97..2f69a6469fe7 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -213,8 +213,8 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
u8 sec_ctl = 0;
u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
__le16 bc_ent;
- struct iwl_tx_cmd *tx_cmd =
- (void *)txq->entries[txq->write_ptr].cmd->payload;
+ struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
+ struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
u8 sta_id = tx_cmd->sta_id;
scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
@@ -257,8 +257,8 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
int read_ptr = txq->read_ptr;
u8 sta_id = 0;
__le16 bc_ent;
- struct iwl_tx_cmd *tx_cmd =
- (void *)txq->entries[read_ptr].cmd->payload;
+ struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
+ struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
@@ -306,7 +306,7 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
txq_id, reg);
iwl_set_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
txq->need_update = true;
return;
}
@@ -624,12 +624,18 @@ void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
struct sk_buff *skb)
{
struct page **page_ptr;
+ struct page *next;
page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+ next = *page_ptr;
+ *page_ptr = NULL;
- if (*page_ptr) {
- __free_page(*page_ptr);
- *page_ptr = NULL;
+ while (next) {
+ struct page *tmp = next;
+
+ next = *(void **)(page_address(next) + PAGE_SIZE -
+ sizeof(void *));
+ __free_page(tmp);
}
}
@@ -646,7 +652,7 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
trans_pcie->cmd_hold_nic_awake = false;
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
}
/*
@@ -1196,7 +1202,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
while (!skb_queue_empty(&overflow_skbs)) {
struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
- struct iwl_device_cmd *dev_cmd_ptr;
+ struct iwl_device_tx_cmd *dev_cmd_ptr;
dev_cmd_ptr = *(void **)((u8 *)skb->cb +
trans_pcie->dev_cmd_offs);
@@ -1255,16 +1261,16 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
if (trans->trans_cfg->base_params->apmg_wake_up_wa &&
!trans_pcie->cmd_hold_nic_awake) {
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_val_mac_access_en),
- (BIT(trans->trans_cfg->csr->flag_mac_clock_ready) |
+ CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+ (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
15000);
if (ret < 0) {
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->trans_cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
return -EIO;
}
@@ -2052,17 +2058,34 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
}
#ifdef CONFIG_INET
-struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len)
+struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
+ struct sk_buff *skb)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page);
+ struct page **page_ptr;
+
+ page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+
+ if (WARN_ON(*page_ptr))
+ return NULL;
if (!p->page)
goto alloc;
- /* enough room on this page */
- if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE)
- return p;
+ /*
+ * Check if there's enough room on this page
+ *
+ * Note that we put a page chaining pointer *last* in the
+ * page - we need it somewhere, and if it's there then we
+ * avoid DMA mapping the last bits of the page which may
+ * trigger the 32-bit boundary hardware bug.
+ *
+ * (see also get_workaround_page() in tx-gen2.c)
+ */
+ if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE -
+ sizeof(void *))
+ goto out;
/* We don't have enough room on this page, get a new one. */
__free_page(p->page);
@@ -2072,6 +2095,11 @@ alloc:
if (!p->page)
return NULL;
p->pos = page_address(p->page);
+ /* set the chaining pointer to NULL */
+ *(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
+out:
+ *page_ptr = p->page;
+ get_page(p->page);
return p;
}
@@ -2097,7 +2125,8 @@ static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph,
static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_txq *txq, u8 hdr_len,
struct iwl_cmd_meta *out_meta,
- struct iwl_device_cmd *dev_cmd, u16 tb1_len)
+ struct iwl_device_tx_cmd *dev_cmd,
+ u16 tb1_len)
{
struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
@@ -2107,7 +2136,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
u16 length, iv_len, amsdu_pad;
u8 *start_hdr;
struct iwl_tso_hdr_page *hdr_page;
- struct page **page_ptr;
struct tso_t tso;
/* if the packet is protected, then it must be CCMP or GCMP */
@@ -2130,14 +2158,11 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
/* Our device supports 9 segments at most, it will fit in 1 page */
- hdr_page = get_page_hdr(trans, hdr_room);
+ hdr_page = get_page_hdr(trans, hdr_room, skb);
if (!hdr_page)
return -ENOMEM;
- get_page(hdr_page->page);
start_hdr = hdr_page->pos;
- page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
- *page_ptr = hdr_page->page;
memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
hdr_page->pos += iv_len;
@@ -2279,7 +2304,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_txq *txq, u8 hdr_len,
struct iwl_cmd_meta *out_meta,
- struct iwl_device_cmd *dev_cmd, u16 tb1_len)
+ struct iwl_device_tx_cmd *dev_cmd,
+ u16 tb1_len)
{
/* No A-MSDU without CONFIG_INET */
WARN_ON(1);
@@ -2289,7 +2315,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
#endif /* CONFIG_INET */
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_cmd *dev_cmd, int txq_id)
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct ieee80211_hdr *hdr;
@@ -2346,7 +2372,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
/* don't put the packet on the ring, if there is no room */
if (unlikely(iwl_queue_space(trans, txq) < 3)) {
- struct iwl_device_cmd **dev_cmd_ptr;
+ struct iwl_device_tx_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
trans_pcie->dev_cmd_offs);
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ap.c b/drivers/net/wireless/intersil/hostap/hostap_ap.c
index 0094b1d2b577..3ec46f48cfde 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_ap.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_ap.c
@@ -2508,7 +2508,7 @@ static int prism2_hostapd_add_sta(struct ap_data *ap,
sta->supported_rates[0] = 2;
if (sta->tx_supp_rates & WLAN_RATE_2M)
sta->supported_rates[1] = 4;
- if (sta->tx_supp_rates & WLAN_RATE_5M5)
+ if (sta->tx_supp_rates & WLAN_RATE_5M5)
sta->supported_rates[2] = 11;
if (sta->tx_supp_rates & WLAN_RATE_11M)
sta->supported_rates[3] = 22;
diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c
index e323e9a5999f..58212c532c90 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_hw.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c
@@ -126,7 +126,7 @@ static void prism2_check_sta_fw_version(local_info_t *local);
#ifdef PRISM2_DOWNLOAD_SUPPORT
/* hostap_download.c */
-static const struct file_operations prism2_download_aux_dump_proc_fops;
+static const struct proc_ops prism2_download_aux_dump_proc_ops;
static u8 * prism2_read_pda(struct net_device *dev);
static int prism2_download(local_info_t *local,
struct prism2_download_param *param);
@@ -3094,7 +3094,7 @@ prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx,
local->func->reset_port = prism2_reset_port;
local->func->schedule_reset = prism2_schedule_reset;
#ifdef PRISM2_DOWNLOAD_SUPPORT
- local->func->read_aux_fops = &prism2_download_aux_dump_proc_fops;
+ local->func->read_aux_proc_ops = &prism2_download_aux_dump_proc_ops;
local->func->download = prism2_download;
#endif /* PRISM2_DOWNLOAD_SUPPORT */
local->func->tx = prism2_tx_80211;
diff --git a/drivers/net/wireless/intersil/hostap/hostap_main.c b/drivers/net/wireless/intersil/hostap/hostap_main.c
index 05466281afb6..de97b3304115 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_main.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_main.c
@@ -761,7 +761,7 @@ static void hostap_set_multicast_list(struct net_device *dev)
}
-static void prism2_tx_timeout(struct net_device *dev)
+static void prism2_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct hostap_interface *iface;
local_info_t *local;
diff --git a/drivers/net/wireless/intersil/hostap/hostap_proc.c b/drivers/net/wireless/intersil/hostap/hostap_proc.c
index 6151d8db5924..a2ee4693eaed 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_proc.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_proc.c
@@ -211,9 +211,9 @@ static ssize_t prism2_pda_proc_read(struct file *file, char __user *buf,
return count;
}
-static const struct file_operations prism2_pda_proc_fops = {
- .read = prism2_pda_proc_read,
- .llseek = generic_file_llseek,
+static const struct proc_ops prism2_pda_proc_ops = {
+ .proc_read = prism2_pda_proc_read,
+ .proc_lseek = generic_file_llseek,
};
@@ -223,8 +223,8 @@ static ssize_t prism2_aux_dump_proc_no_read(struct file *file, char __user *buf,
return 0;
}
-static const struct file_operations prism2_aux_dump_proc_fops = {
- .read = prism2_aux_dump_proc_no_read,
+static const struct proc_ops prism2_aux_dump_proc_ops = {
+ .proc_read = prism2_aux_dump_proc_no_read,
};
@@ -379,9 +379,9 @@ void hostap_init_proc(local_info_t *local)
proc_create_seq_data("wds", 0, local->proc,
&prism2_wds_proc_seqops, local);
proc_create_data("pda", 0, local->proc,
- &prism2_pda_proc_fops, local);
+ &prism2_pda_proc_ops, local);
proc_create_data("aux_dump", 0, local->proc,
- local->func->read_aux_fops ?: &prism2_aux_dump_proc_fops,
+ local->func->read_aux_proc_ops ?: &prism2_aux_dump_proc_ops,
local);
proc_create_seq_data("bss_list", 0, local->proc,
&prism2_bss_list_proc_seqops, local);
diff --git a/drivers/net/wireless/intersil/hostap/hostap_wlan.h b/drivers/net/wireless/intersil/hostap/hostap_wlan.h
index a8c4c1a8b29d..487883fbb58c 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_wlan.h
+++ b/drivers/net/wireless/intersil/hostap/hostap_wlan.h
@@ -599,7 +599,7 @@ struct prism2_helper_functions {
struct prism2_download_param *param);
int (*tx)(struct sk_buff *skb, struct net_device *dev);
int (*set_tim)(struct net_device *dev, int aid, int set);
- const struct file_operations *read_aux_fops;
+ const struct proc_ops *read_aux_proc_ops;
int need_tx_headroom; /* number of bytes of headroom needed before
* IEEE 802.11 header */
diff --git a/drivers/net/wireless/intersil/orinoco/main.c b/drivers/net/wireless/intersil/orinoco/main.c
index 28dac36d7c4c..00264a14e52c 100644
--- a/drivers/net/wireless/intersil/orinoco/main.c
+++ b/drivers/net/wireless/intersil/orinoco/main.c
@@ -647,7 +647,7 @@ static void __orinoco_ev_txexc(struct net_device *dev, struct hermes *hw)
netif_wake_queue(dev);
}
-void orinoco_tx_timeout(struct net_device *dev)
+void orinoco_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct orinoco_private *priv = ndev_priv(dev);
struct net_device_stats *stats = &dev->stats;
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco.h b/drivers/net/wireless/intersil/orinoco/orinoco.h
index 430862a6a24b..cdd026af100b 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco.h
+++ b/drivers/net/wireless/intersil/orinoco/orinoco.h
@@ -207,7 +207,7 @@ int orinoco_open(struct net_device *dev);
int orinoco_stop(struct net_device *dev);
void orinoco_set_multicast_list(struct net_device *dev);
int orinoco_change_mtu(struct net_device *dev, int new_mtu);
-void orinoco_tx_timeout(struct net_device *dev);
+void orinoco_tx_timeout(struct net_device *dev, unsigned int txqueue);
/********************************************************************/
/* Locking and synchronization functions */
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
index 40a8b941ad5c..e753f43e0162 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
@@ -1361,7 +1361,8 @@ static int ezusb_init(struct hermes *hw)
int retval;
BUG_ON(in_interrupt());
- BUG_ON(!upriv);
+ if (!upriv)
+ return -EINVAL;
upriv->reply_count = 0;
/* Write the MAGIC number on the simulated registers to keep
@@ -1608,9 +1609,9 @@ static int ezusb_probe(struct usb_interface *interface,
/* set up the endpoint information */
/* check out the endpoints */
- iface_desc = &interface->altsetting[0].desc;
+ iface_desc = &interface->cur_altsetting->desc;
for (i = 0; i < iface_desc->bNumEndpoints; ++i) {
- ep = &interface->altsetting[0].endpoint[i].desc;
+ ep = &interface->cur_altsetting->endpoint[i].desc;
if (usb_endpoint_is_bulk_in(ep)) {
/* we found a bulk in endpoint */
diff --git a/drivers/net/wireless/intersil/prism54/islpci_eth.c b/drivers/net/wireless/intersil/prism54/islpci_eth.c
index 2b8fb07d07e7..8d680250a281 100644
--- a/drivers/net/wireless/intersil/prism54/islpci_eth.c
+++ b/drivers/net/wireless/intersil/prism54/islpci_eth.c
@@ -473,7 +473,7 @@ islpci_do_reset_and_wake(struct work_struct *work)
}
void
-islpci_eth_tx_timeout(struct net_device *ndev)
+islpci_eth_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
islpci_private *priv = netdev_priv(ndev);
diff --git a/drivers/net/wireless/intersil/prism54/islpci_eth.h b/drivers/net/wireless/intersil/prism54/islpci_eth.h
index 61f4b43c6054..e433ccdc526b 100644
--- a/drivers/net/wireless/intersil/prism54/islpci_eth.h
+++ b/drivers/net/wireless/intersil/prism54/islpci_eth.h
@@ -53,7 +53,7 @@ struct avs_80211_1_header {
void islpci_eth_cleanup_transmit(islpci_private *, isl38xx_control_block *);
netdev_tx_t islpci_eth_transmit(struct sk_buff *, struct net_device *);
int islpci_eth_receive(islpci_private *);
-void islpci_eth_tx_timeout(struct net_device *);
+void islpci_eth_tx_timeout(struct net_device *, unsigned int txqueue);
void islpci_do_reset_and_wake(struct work_struct *);
#endif /* _ISL_GEN_H */
diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
index 57edfada0665..4e3de684928b 100644
--- a/drivers/net/wireless/marvell/libertas/cfg.c
+++ b/drivers/net/wireless/marvell/libertas/cfg.c
@@ -273,6 +273,10 @@ add_ie_rates(u8 *tlv, const u8 *ie, int *nrates)
int hw, ap, ap_max = ie[1];
u8 hw_rate;
+ if (ap_max > MAX_RATES) {
+ lbs_deb_assoc("invalid rates\n");
+ return tlv;
+ }
/* Advance past IE header */
ie += 2;
@@ -1717,6 +1721,9 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
struct cmd_ds_802_11_ad_hoc_join cmd;
u8 preamble = RADIO_PREAMBLE_SHORT;
int ret = 0;
+ int hw, i;
+ u8 rates_max;
+ u8 *rates;
/* TODO: set preamble based on scan result */
ret = lbs_set_radio(priv, preamble, 1);
@@ -1775,9 +1782,14 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
if (!rates_eid) {
lbs_add_rates(cmd.bss.rates);
} else {
- int hw, i;
- u8 rates_max = rates_eid[1];
- u8 *rates = cmd.bss.rates;
+ rates_max = rates_eid[1];
+ if (rates_max > MAX_RATES) {
+ lbs_deb_join("invalid rates");
+ rcu_read_unlock();
+ ret = -EINVAL;
+ goto out;
+ }
+ rates = cmd.bss.rates;
for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) {
u8 hw_rate = lbs_rates[hw].bitrate / 5;
for (i = 0; i < rates_max; i++) {
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index d14e55e3c9da..7d94695e7961 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -1020,7 +1020,7 @@ static void mwifiex_set_multicast_list(struct net_device *dev)
* CFG802.11 network device handler for transmission timeout.
*/
static void
-mwifiex_tx_timeout(struct net_device *dev)
+mwifiex_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 547ff3c578ee..fa5634af40f7 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -1295,19 +1295,6 @@ mwifiex_copy_rates(u8 *dest, u32 pos, u8 *src, int len)
return pos;
}
-/* This function return interface number with the same bss_type.
- */
-static inline u8
-mwifiex_get_intf_num(struct mwifiex_adapter *adapter, u8 bss_type)
-{
- u8 i, num = 0;
-
- for (i = 0; i < adapter->priv_num; i++)
- if (adapter->priv[i] && adapter->priv[i]->bss_type == bss_type)
- num++;
- return num;
-}
-
/*
* This function returns the correct private structure pointer based
* upon the BSS type and BSS number.
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 98f942b797f7..a7968a84aaf8 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -2884,6 +2884,13 @@ mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv,
vs_param_set->header.len =
cpu_to_le16((((u16) priv->vs_ie[id].ie[1])
& 0x00FF) + 2);
+ if (le16_to_cpu(vs_param_set->header.len) >
+ MWIFIEX_MAX_VSIE_LEN) {
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Invalid param length!\n");
+ break;
+ }
+
memcpy(vs_param_set->ie, priv->vs_ie[id].ie,
le16_to_cpu(vs_param_set->header.len));
*buffer += le16_to_cpu(vs_param_set->header.len) +
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index 74e50566db1f..fbfa0b15d0c8 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -229,6 +229,15 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv,
"11D: skip setting domain info in FW\n");
return 0;
}
+
+ if (country_ie_len >
+ (IEEE80211_COUNTRY_STRING_LEN + MWIFIEX_MAX_TRIPLET_802_11D)) {
+ rcu_read_unlock();
+ mwifiex_dbg(priv->adapter, ERROR,
+ "11D: country_ie_len overflow!, deauth AP\n");
+ return -EINVAL;
+ }
+
memcpy(priv->adapter->country_code, &country_ie[2], 2);
domain_info->country_code[0] = country_ie[2];
@@ -272,8 +281,9 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
priv->scan_block = false;
if (bss) {
- if (adapter->region_code == 0x00)
- mwifiex_process_country_ie(priv, bss);
+ if (adapter->region_code == 0x00 &&
+ mwifiex_process_country_ie(priv, bss))
+ return -EINVAL;
/* Allocate and fill new bss descriptor */
bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor),
diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c
index 09313047beed..f8f282ce39bd 100644
--- a/drivers/net/wireless/marvell/mwifiex/tdls.c
+++ b/drivers/net/wireless/marvell/mwifiex/tdls.c
@@ -894,7 +894,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
u8 *peer, *pos, *end;
u8 i, action, basic;
u16 cap = 0;
- int ie_len = 0;
+ int ies_len = 0;
if (len < (sizeof(struct ethhdr) + 3))
return;
@@ -916,7 +916,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
pos = buf + sizeof(struct ethhdr) + 4;
/* payload 1+ category 1 + action 1 + dialog 1 */
cap = get_unaligned_le16(pos);
- ie_len = len - sizeof(struct ethhdr) - TDLS_REQ_FIX_LEN;
+ ies_len = len - sizeof(struct ethhdr) - TDLS_REQ_FIX_LEN;
pos += 2;
break;
@@ -926,7 +926,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
/* payload 1+ category 1 + action 1 + dialog 1 + status code 2*/
pos = buf + sizeof(struct ethhdr) + 6;
cap = get_unaligned_le16(pos);
- ie_len = len - sizeof(struct ethhdr) - TDLS_RESP_FIX_LEN;
+ ies_len = len - sizeof(struct ethhdr) - TDLS_RESP_FIX_LEN;
pos += 2;
break;
@@ -934,7 +934,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
if (len < (sizeof(struct ethhdr) + TDLS_CONFIRM_FIX_LEN))
return;
pos = buf + sizeof(struct ethhdr) + TDLS_CONFIRM_FIX_LEN;
- ie_len = len - sizeof(struct ethhdr) - TDLS_CONFIRM_FIX_LEN;
+ ies_len = len - sizeof(struct ethhdr) - TDLS_CONFIRM_FIX_LEN;
break;
default:
mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS frame type.\n");
@@ -947,65 +947,104 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
sta_ptr->tdls_cap.capab = cpu_to_le16(cap);
- for (end = pos + ie_len; pos + 1 < end; pos += 2 + pos[1]) {
- if (pos + 2 + pos[1] > end)
+ for (end = pos + ies_len; pos + 1 < end; pos += 2 + pos[1]) {
+ u8 ie_len = pos[1];
+
+ if (pos + 2 + ie_len > end)
break;
switch (*pos) {
case WLAN_EID_SUPP_RATES:
- sta_ptr->tdls_cap.rates_len = pos[1];
- for (i = 0; i < pos[1]; i++)
+ if (ie_len > sizeof(sta_ptr->tdls_cap.rates))
+ return;
+ sta_ptr->tdls_cap.rates_len = ie_len;
+ for (i = 0; i < ie_len; i++)
sta_ptr->tdls_cap.rates[i] = pos[i + 2];
break;
case WLAN_EID_EXT_SUPP_RATES:
+ if (ie_len > sizeof(sta_ptr->tdls_cap.rates))
+ return;
basic = sta_ptr->tdls_cap.rates_len;
- for (i = 0; i < pos[1]; i++)
+ if (ie_len > sizeof(sta_ptr->tdls_cap.rates) - basic)
+ return;
+ for (i = 0; i < ie_len; i++)
sta_ptr->tdls_cap.rates[basic + i] = pos[i + 2];
- sta_ptr->tdls_cap.rates_len += pos[1];
+ sta_ptr->tdls_cap.rates_len += ie_len;
break;
case WLAN_EID_HT_CAPABILITY:
- memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos,
+ if (ie_len != sizeof(struct ieee80211_ht_cap))
+ return;
+ /* copy the ie's value into ht_capb*/
+ memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos + 2,
sizeof(struct ieee80211_ht_cap));
sta_ptr->is_11n_enabled = 1;
break;
case WLAN_EID_HT_OPERATION:
- memcpy(&sta_ptr->tdls_cap.ht_oper, pos,
+ if (ie_len != sizeof(struct ieee80211_ht_operation))
+ return;
+ /* copy the ie's value into ht_oper*/
+ memcpy(&sta_ptr->tdls_cap.ht_oper, pos + 2,
sizeof(struct ieee80211_ht_operation));
break;
case WLAN_EID_BSS_COEX_2040:
+ if (ie_len != sizeof(pos[2]))
+ return;
sta_ptr->tdls_cap.coex_2040 = pos[2];
break;
case WLAN_EID_EXT_CAPABILITY:
+ if (ie_len < sizeof(struct ieee_types_header))
+ return;
+ if (ie_len > 8)
+ return;
memcpy((u8 *)&sta_ptr->tdls_cap.extcap, pos,
sizeof(struct ieee_types_header) +
- min_t(u8, pos[1], 8));
+ min_t(u8, ie_len, 8));
break;
case WLAN_EID_RSN:
+ if (ie_len < sizeof(struct ieee_types_header))
+ return;
+ if (ie_len > IEEE_MAX_IE_SIZE -
+ sizeof(struct ieee_types_header))
+ return;
memcpy((u8 *)&sta_ptr->tdls_cap.rsn_ie, pos,
sizeof(struct ieee_types_header) +
- min_t(u8, pos[1], IEEE_MAX_IE_SIZE -
+ min_t(u8, ie_len, IEEE_MAX_IE_SIZE -
sizeof(struct ieee_types_header)));
break;
case WLAN_EID_QOS_CAPA:
+ if (ie_len != sizeof(pos[2]))
+ return;
sta_ptr->tdls_cap.qos_info = pos[2];
break;
case WLAN_EID_VHT_OPERATION:
- if (priv->adapter->is_hw_11ac_capable)
- memcpy(&sta_ptr->tdls_cap.vhtoper, pos,
+ if (priv->adapter->is_hw_11ac_capable) {
+ if (ie_len !=
+ sizeof(struct ieee80211_vht_operation))
+ return;
+ /* copy the ie's value into vhtoper*/
+ memcpy(&sta_ptr->tdls_cap.vhtoper, pos + 2,
sizeof(struct ieee80211_vht_operation));
+ }
break;
case WLAN_EID_VHT_CAPABILITY:
if (priv->adapter->is_hw_11ac_capable) {
- memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos,
+ if (ie_len != sizeof(struct ieee80211_vht_cap))
+ return;
+ /* copy the ie's value into vhtcap*/
+ memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos + 2,
sizeof(struct ieee80211_vht_cap));
sta_ptr->is_11ac_enabled = 1;
}
break;
case WLAN_EID_AID:
- if (priv->adapter->is_hw_11ac_capable)
+ if (priv->adapter->is_hw_11ac_capable) {
+ if (ie_len != sizeof(u16))
+ return;
sta_ptr->tdls_cap.aid =
get_unaligned_le16((pos + 2));
+ }
+ break;
default:
break;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
index 41f0231376c0..132f9e8ed68c 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
@@ -970,6 +970,10 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
"WMM Parameter Set Count: %d\n",
wmm_param_ie->qos_info_bitmap & mask);
+ if (wmm_param_ie->vend_hdr.len + 2 >
+ sizeof(struct ieee_types_wmm_parameter))
+ break;
+
memcpy((u8 *) &priv->curr_bss_params.bss_descriptor.
wmm_ie, wmm_param_ie,
wmm_param_ie->vend_hdr.len + 2);
diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
index 53b5a4b2dcc5..59c187898132 100644
--- a/drivers/net/wireless/mediatek/mt76/agg-rx.c
+++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
@@ -281,8 +281,8 @@ void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno)
{
struct mt76_rx_tid *tid = NULL;
- rcu_swap_protected(wcid->aggr[tidno], tid,
- lockdep_is_held(&dev->mutex));
+ tid = rcu_replace_pointer(wcid->aggr[tidno], tid,
+ lockdep_is_held(&dev->mutex));
if (tid) {
mt76_rx_aggr_shutdown(dev, tid);
kfree_rcu(tid, rcu_head);
diff --git a/drivers/net/wireless/mediatek/mt76/airtime.c b/drivers/net/wireless/mediatek/mt76/airtime.c
index 55116f395f9a..a4a785467748 100644
--- a/drivers/net/wireless/mediatek/mt76/airtime.c
+++ b/drivers/net/wireless/mediatek/mt76/airtime.c
@@ -242,7 +242,7 @@ u32 mt76_calc_rx_airtime(struct mt76_dev *dev, struct mt76_rx_status *status,
return 0;
sband = dev->hw->wiphy->bands[status->band];
- if (!sband || status->rate_idx > sband->n_bitrates)
+ if (!sband || status->rate_idx >= sband->n_bitrates)
return 0;
rate = &sband->bitrates[status->rate_idx];
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index b9f2a401041a..96018fd65779 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -378,7 +378,8 @@ void mt76_unregister_device(struct mt76_dev *dev)
{
struct ieee80211_hw *hw = dev->hw;
- mt76_led_cleanup(dev);
+ if (IS_ENABLED(CONFIG_MT76_LEDS))
+ mt76_led_cleanup(dev);
mt76_tx_status_check(dev, NULL, true);
ieee80211_unregister_hw(hw);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
index eccad4987ac8..17e277bf39e0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
@@ -92,8 +92,9 @@ static int mt7615_check_eeprom(struct mt76_dev *dev)
static void mt7615_eeprom_parse_hw_cap(struct mt7615_dev *dev)
{
- u8 val, *eeprom = dev->mt76.eeprom.data;
+ u8 *eeprom = dev->mt76.eeprom.data;
u8 tx_mask, rx_mask, max_nss;
+ u32 val;
val = FIELD_GET(MT_EE_NIC_WIFI_CONF_BAND_SEL,
eeprom[MT_EE_WIFI_CONF]);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
index a03e2d01fba7..d1405528b504 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
@@ -342,8 +342,11 @@ int mt76x0_eeprom_init(struct mt76x02_dev *dev)
dev_info(dev->mt76.dev, "EEPROM ver:%02hhx fae:%02hhx\n",
version, fae);
- mt76x02_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
+ memcpy(dev->mt76.macaddr, (u8 *)dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
+ ETH_ALEN);
mt76_eeprom_override(&dev->mt76);
+ mt76x02_mac_setaddr(dev, dev->mt76.macaddr);
+
mt76x0_set_chip_cap(dev);
mt76x0_set_freq_offset(dev);
mt76x0_set_temp_offset(dev);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 59d089e092f9..8849faa5bc10 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -1056,7 +1056,8 @@ static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy,
pr_debug("MAC%u: initiator=%d alpha=%c%c\n", mac->macid, req->initiator,
req->alpha2[0], req->alpha2[1]);
- ret = qtnf_cmd_reg_notify(mac, req, qtnf_mac_slave_radar_get(wiphy));
+ ret = qtnf_cmd_reg_notify(mac, req, qtnf_slave_radar_get(),
+ qtnf_dfs_offload_get());
if (ret) {
pr_err("MAC%u: failed to update region to %c%c: %d\n",
mac->macid, req->alpha2[0], req->alpha2[1], ret);
@@ -1078,7 +1079,8 @@ struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus)
{
struct wiphy *wiphy;
- if (bus->hw_info.hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD)
+ if (qtnf_dfs_offload_get() &&
+ (bus->hw_info.hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD))
qtn_cfg80211_ops.start_radar_detection = NULL;
if (!(bus->hw_info.hw_capab & QLINK_HW_CAPAB_PWR_MGMT))
@@ -1163,7 +1165,8 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
WIPHY_FLAG_NETNS_OK;
wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
- if (hw_info->hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD)
+ if (qtnf_dfs_offload_get() &&
+ (hw_info->hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD))
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD);
if (hw_info->hw_capab & QLINK_HW_CAPAB_SCAN_DWELL)
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index 548f6ff6d0f2..d0d7ec8794c4 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -257,6 +257,14 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
cmd->pbss = s->pbss;
cmd->ht_required = s->ht_required;
cmd->vht_required = s->vht_required;
+ cmd->twt_responder = s->twt_responder;
+ if (s->he_obss_pd.enable) {
+ cmd->sr_params.sr_control |= QLINK_SR_SRG_INFORMATION_PRESENT;
+ cmd->sr_params.srg_obss_pd_min_offset =
+ s->he_obss_pd.min_offset;
+ cmd->sr_params.srg_obss_pd_max_offset =
+ s->he_obss_pd.max_offset;
+ }
aen = &cmd->aen;
aen->auth_type = s->auth_type;
@@ -510,6 +518,8 @@ qtnf_sta_info_parse_rate(struct rate_info *rate_dst,
rate_dst->flags |= RATE_INFO_FLAGS_MCS;
else if (rate_src->flags & QLINK_STA_INFO_RATE_FLAG_VHT_MCS)
rate_dst->flags |= RATE_INFO_FLAGS_VHT_MCS;
+ else if (rate_src->flags & QLINK_STA_INFO_RATE_FLAG_HE_MCS)
+ rate_dst->flags |= RATE_INFO_FLAGS_HE_MCS;
if (rate_src->flags & QLINK_STA_INFO_RATE_FLAG_SHORT_GI)
rate_dst->flags |= RATE_INFO_FLAGS_SHORT_GI;
@@ -2454,7 +2464,7 @@ out:
}
int qtnf_cmd_reg_notify(struct qtnf_wmac *mac, struct regulatory_request *req,
- bool slave_radar)
+ bool slave_radar, bool dfs_offload)
{
struct wiphy *wiphy = priv_to_wiphy(mac);
struct qtnf_bus *bus = mac->bus;
@@ -2517,6 +2527,7 @@ int qtnf_cmd_reg_notify(struct qtnf_wmac *mac, struct regulatory_request *req,
}
cmd->slave_radar = slave_radar;
+ cmd->dfs_offload = dfs_offload;
cmd->num_channels = 0;
for (band = 0; band < NUM_NL80211_BANDS; band++) {
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h
index 761755bf9ede..ab273257b078 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h
@@ -58,7 +58,7 @@ int qtnf_cmd_send_disconnect(struct qtnf_vif *vif,
int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif,
bool up);
int qtnf_cmd_reg_notify(struct qtnf_wmac *mac, struct regulatory_request *req,
- bool slave_radar);
+ bool slave_radar, bool dfs_offload);
int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel,
struct qtnf_chan_stats *stats);
int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif,
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index 5fb598389487..4320180f8c07 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -21,8 +21,22 @@ static bool slave_radar = true;
module_param(slave_radar, bool, 0644);
MODULE_PARM_DESC(slave_radar, "set 0 to disable radar detection in slave mode");
+static bool dfs_offload;
+module_param(dfs_offload, bool, 0644);
+MODULE_PARM_DESC(dfs_offload, "set 1 to enable DFS offload to firmware");
+
static struct dentry *qtnf_debugfs_dir;
+bool qtnf_slave_radar_get(void)
+{
+ return slave_radar;
+}
+
+bool qtnf_dfs_offload_get(void)
+{
+ return dfs_offload;
+}
+
struct qtnf_wmac *qtnf_core_get_mac(const struct qtnf_bus *bus, u8 macid)
{
struct qtnf_wmac *mac = NULL;
@@ -156,7 +170,7 @@ static void qtnf_netdev_get_stats64(struct net_device *ndev,
/* Netdev handler for transmission timeout.
*/
-static void qtnf_netdev_tx_timeout(struct net_device *ndev)
+static void qtnf_netdev_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
struct qtnf_wmac *mac;
@@ -211,9 +225,6 @@ static int qtnf_netdev_port_parent_id(struct net_device *ndev,
const struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
const struct qtnf_bus *bus = vif->mac->bus;
- if (!(bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE))
- return -EOPNOTSUPP;
-
ppid->id_len = sizeof(bus->hw_id);
memcpy(&ppid->id, bus->hw_id, ppid->id_len);
@@ -456,11 +467,6 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
return mac;
}
-bool qtnf_mac_slave_radar_get(struct wiphy *wiphy)
-{
- return slave_radar;
-}
-
static const struct ethtool_ops qtnf_ethtool_ops = {
.get_drvinfo = cfg80211_get_drvinfo,
};
@@ -656,12 +662,24 @@ bool qtnf_netdev_is_qtn(const struct net_device *ndev)
return ndev->netdev_ops == &qtnf_netdev_ops;
}
+static int qtnf_check_br_ports(struct net_device *dev, void *data)
+{
+ struct net_device *ndev = data;
+
+ if (dev != ndev && netdev_port_same_parent_id(dev, ndev))
+ return -ENOTSUPP;
+
+ return 0;
+}
+
static int qtnf_core_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
const struct netdev_notifier_changeupper_info *info;
+ struct net_device *brdev;
struct qtnf_vif *vif;
+ struct qtnf_bus *bus;
int br_domain;
int ret = 0;
@@ -672,25 +690,34 @@ static int qtnf_core_netdevice_event(struct notifier_block *nb,
return NOTIFY_OK;
vif = qtnf_netdev_get_priv(ndev);
+ bus = vif->mac->bus;
switch (event) {
case NETDEV_CHANGEUPPER:
info = ptr;
+ brdev = info->upper_dev;
- if (!netif_is_bridge_master(info->upper_dev))
+ if (!netif_is_bridge_master(brdev))
break;
pr_debug("[VIF%u.%u] change bridge: %s %s\n",
- vif->mac->macid, vif->vifid,
- netdev_name(info->upper_dev),
+ vif->mac->macid, vif->vifid, netdev_name(brdev),
info->linking ? "add" : "del");
- if (info->linking)
- br_domain = info->upper_dev->ifindex;
- else
- br_domain = ndev->ifindex;
+ if (IS_ENABLED(CONFIG_NET_SWITCHDEV) &&
+ (bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE)) {
+ if (info->linking)
+ br_domain = brdev->ifindex;
+ else
+ br_domain = ndev->ifindex;
+
+ ret = qtnf_cmd_netdev_changeupper(vif, br_domain);
+ } else {
+ ret = netdev_walk_all_lower_dev(brdev,
+ qtnf_check_br_ports,
+ ndev);
+ }
- ret = qtnf_cmd_netdev_changeupper(vif, br_domain);
break;
default:
break;
@@ -763,13 +790,11 @@ int qtnf_core_attach(struct qtnf_bus *bus)
}
}
- if (bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE) {
- bus->netdev_nb.notifier_call = qtnf_core_netdevice_event;
- ret = register_netdevice_notifier(&bus->netdev_nb);
- if (ret) {
- pr_err("failed to register netdev notifier: %d\n", ret);
- goto error;
- }
+ bus->netdev_nb.notifier_call = qtnf_core_netdevice_event;
+ ret = register_netdevice_notifier(&bus->netdev_nb);
+ if (ret) {
+ pr_err("failed to register netdev notifier: %d\n", ret);
+ goto error;
}
bus->fw_state = QTNF_FW_STATE_RUNNING;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h
index 116ec16aa15b..d715e1cd0006 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.h
@@ -133,7 +133,8 @@ struct qtnf_vif *qtnf_mac_get_free_vif(struct qtnf_wmac *mac);
struct qtnf_vif *qtnf_mac_get_base_vif(struct qtnf_wmac *mac);
void qtnf_mac_iface_comb_free(struct qtnf_wmac *mac);
void qtnf_mac_ext_caps_free(struct qtnf_wmac *mac);
-bool qtnf_mac_slave_radar_get(struct wiphy *wiphy);
+bool qtnf_slave_radar_get(void);
+bool qtnf_dfs_offload_get(void);
struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus);
int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *priv,
const char *name, unsigned char name_assign_type);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
index 75527f1bb306..b2edb03819d1 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
@@ -6,7 +6,7 @@
#include <linux/ieee80211.h>
-#define QLINK_PROTO_VER 15
+#define QLINK_PROTO_VER 16
#define QLINK_MACID_RSVD 0xFF
#define QLINK_VIFID_RSVD 0xFF
@@ -196,6 +196,45 @@ struct qlink_sta_info_state {
__le32 value;
} __packed;
+/**
+ * enum qlink_sr_ctrl_flags - control flags for spatial reuse parameter set
+ *
+ * @QLINK_SR_PSR_DISALLOWED: indicates whether or not PSR-based spatial reuse
+ * transmissions are allowed for STAs associated with the AP
+ * @QLINK_SR_NON_SRG_OBSS_PD_SR_DISALLOWED: indicates whether or not
+ * Non-SRG OBSS PD spatial reuse transmissions are allowed for STAs associated
+ * with the AP
+ * @NON_SRG_OFFSET_PRESENT: indicates whether or not Non-SRG OBSS PD Max offset
+ * field is valid in the element
+ * @QLINK_SR_SRG_INFORMATION_PRESENT: indicates whether or not SRG OBSS PD
+ * Min/Max offset fields ore valid in the element
+ */
+enum qlink_sr_ctrl_flags {
+ QLINK_SR_PSR_DISALLOWED = BIT(0),
+ QLINK_SR_NON_SRG_OBSS_PD_SR_DISALLOWED = BIT(1),
+ QLINK_SR_NON_SRG_OFFSET_PRESENT = BIT(2),
+ QLINK_SR_SRG_INFORMATION_PRESENT = BIT(3),
+};
+
+/**
+ * struct qlink_sr_params - spatial reuse parameters
+ *
+ * @sr_control: spatial reuse control field; flags contained in this field are
+ * defined in @qlink_sr_ctrl_flags
+ * @non_srg_obss_pd_max: added to -82 dBm to generate the value of the
+ * Non-SRG OBSS PD Max parameter
+ * @srg_obss_pd_min_offset: added to -82 dBm to generate the value of the
+ * SRG OBSS PD Min parameter
+ * @srg_obss_pd_max_offset: added to -82 dBm to generate the value of the
+ * SRG PBSS PD Max parameter
+ */
+struct qlink_sr_params {
+ u8 sr_control;
+ u8 non_srg_obss_pd_max;
+ u8 srg_obss_pd_min_offset;
+ u8 srg_obss_pd_max_offset;
+} __packed;
+
/* QLINK Command messages related definitions
*/
@@ -596,8 +635,9 @@ enum qlink_user_reg_hint_type {
* of &enum qlink_user_reg_hint_type.
* @num_channels: number of &struct qlink_tlv_channel in a variable portion of a
* payload.
- * @slave_radar: whether slave device should enable radar detection.
* @dfs_region: one of &enum qlink_dfs_regions.
+ * @slave_radar: whether slave device should enable radar detection.
+ * @dfs_offload: enable or disable DFS offload to firmware.
* @info: variable portion of regulatory notifier callback.
*/
struct qlink_cmd_reg_notify {
@@ -608,7 +648,7 @@ struct qlink_cmd_reg_notify {
u8 num_channels;
u8 dfs_region;
u8 slave_radar;
- u8 rsvd[1];
+ u8 dfs_offload;
u8 info[0];
} __packed;
@@ -650,6 +690,8 @@ enum qlink_hidden_ssid {
* @ht_required: stations must support HT
* @vht_required: stations must support VHT
* @aen: encryption info
+ * @sr_params: spatial reuse parameters
+ * @twt_responder: enable Target Wake Time
* @info: variable configurations
*/
struct qlink_cmd_start_ap {
@@ -665,6 +707,9 @@ struct qlink_cmd_start_ap {
u8 ht_required;
u8 vht_required;
struct qlink_auth_encr aen;
+ struct qlink_sr_params sr_params;
+ u8 twt_responder;
+ u8 rsvd[3];
u8 info[0];
} __packed;
@@ -948,6 +993,7 @@ enum qlink_sta_info_rate_flags {
QLINK_STA_INFO_RATE_FLAG_VHT_MCS = BIT(1),
QLINK_STA_INFO_RATE_FLAG_SHORT_GI = BIT(2),
QLINK_STA_INFO_RATE_FLAG_60G = BIT(3),
+ QLINK_STA_INFO_RATE_FLAG_HE_MCS = BIT(4),
};
/**
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index a36c3fea7495..6beac1f74e7c 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -1652,20 +1652,17 @@ static void rt2800_config_wcid_attr_cipher(struct rt2x00_dev *rt2x00dev,
rt2800_register_write(rt2x00dev, offset, reg);
}
- offset = MAC_IVEIV_ENTRY(key->hw_key_idx);
+ if (test_bit(DEVICE_STATE_RESET, &rt2x00dev->flags))
+ return;
- if (crypto->cmd == SET_KEY) {
- rt2800_register_multiread(rt2x00dev, offset,
- &iveiv_entry, sizeof(iveiv_entry));
- if ((crypto->cipher == CIPHER_TKIP) ||
- (crypto->cipher == CIPHER_TKIP_NO_MIC) ||
- (crypto->cipher == CIPHER_AES))
- iveiv_entry.iv[3] |= 0x20;
- iveiv_entry.iv[3] |= key->keyidx << 6;
- } else {
- memset(&iveiv_entry, 0, sizeof(iveiv_entry));
- }
+ offset = MAC_IVEIV_ENTRY(key->hw_key_idx);
+ memset(&iveiv_entry, 0, sizeof(iveiv_entry));
+ if ((crypto->cipher == CIPHER_TKIP) ||
+ (crypto->cipher == CIPHER_TKIP_NO_MIC) ||
+ (crypto->cipher == CIPHER_AES))
+ iveiv_entry.iv[3] |= 0x20;
+ iveiv_entry.iv[3] |= key->keyidx << 6;
rt2800_register_multiwrite(rt2x00dev, offset,
&iveiv_entry, sizeof(iveiv_entry));
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
index a23c26574002..3868c07672bd 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
@@ -311,6 +311,7 @@ static const struct ieee80211_ops rt2800pci_mac80211_ops = {
.get_survey = rt2800_get_survey,
.get_ringparam = rt2x00mac_get_ringparam,
.tx_frames_pending = rt2x00mac_tx_frames_pending,
+ .reconfig_complete = rt2x00mac_reconfig_complete,
};
static const struct rt2800_ops rt2800pci_rt2800_ops = {
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
index 7b931bb96a9e..bbfe1425c0ee 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
@@ -156,6 +156,7 @@ static const struct ieee80211_ops rt2800soc_mac80211_ops = {
.get_survey = rt2800_get_survey,
.get_ringparam = rt2x00mac_get_ringparam,
.tx_frames_pending = rt2x00mac_tx_frames_pending,
+ .reconfig_complete = rt2x00mac_reconfig_complete,
};
static const struct rt2800_ops rt2800soc_rt2800_ops = {
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
index 0dfb55c69b73..4cc64fe481a7 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
@@ -654,6 +654,7 @@ static const struct ieee80211_ops rt2800usb_mac80211_ops = {
.get_survey = rt2800_get_survey,
.get_ringparam = rt2x00mac_get_ringparam,
.tx_frames_pending = rt2x00mac_tx_frames_pending,
+ .reconfig_complete = rt2x00mac_reconfig_complete,
};
static const struct rt2800_ops rt2800usb_rt2800_ops = {
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index a90a518b40d3..ea8a34ecae14 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -1439,6 +1439,8 @@ void rt2x00mac_tx(struct ieee80211_hw *hw,
struct sk_buff *skb);
int rt2x00mac_start(struct ieee80211_hw *hw);
void rt2x00mac_stop(struct ieee80211_hw *hw);
+void rt2x00mac_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type);
int rt2x00mac_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index c3eab767bc21..7f9e43a4f805 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -1255,16 +1255,6 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
{
int retval = 0;
- if (test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) {
- /*
- * This is special case for ieee80211_restart_hw(), otherwise
- * mac80211 never call start() two times in row without stop();
- */
- set_bit(DEVICE_STATE_RESET, &rt2x00dev->flags);
- rt2x00dev->ops->lib->pre_reset_hw(rt2x00dev);
- rt2x00lib_stop(rt2x00dev);
- }
-
/*
* If this is the first interface which is added,
* we should load the firmware now.
@@ -1292,7 +1282,6 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
set_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags);
out:
- clear_bit(DEVICE_STATE_RESET, &rt2x00dev->flags);
return retval;
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
index beb20c5faf5f..32efbc8e9f92 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
@@ -165,6 +165,15 @@ int rt2x00mac_start(struct ieee80211_hw *hw)
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
return 0;
+ if (test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) {
+ /*
+ * This is special case for ieee80211_restart_hw(), otherwise
+ * mac80211 never call start() two times in row without stop();
+ */
+ set_bit(DEVICE_STATE_RESET, &rt2x00dev->flags);
+ rt2x00dev->ops->lib->pre_reset_hw(rt2x00dev);
+ rt2x00lib_stop(rt2x00dev);
+ }
return rt2x00lib_start(rt2x00dev);
}
EXPORT_SYMBOL_GPL(rt2x00mac_start);
@@ -180,6 +189,17 @@ void rt2x00mac_stop(struct ieee80211_hw *hw)
}
EXPORT_SYMBOL_GPL(rt2x00mac_stop);
+void
+rt2x00mac_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
+{
+ struct rt2x00_dev *rt2x00dev = hw->priv;
+
+ if (reconfig_type == IEEE80211_RECONFIG_TYPE_RESTART)
+ clear_bit(DEVICE_STATE_RESET, &rt2x00dev->flags);
+}
+EXPORT_SYMBOL_GPL(rt2x00mac_reconfig_complete);
+
int rt2x00mac_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
index bc2dfef0de22..92e9e023c349 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
@@ -522,7 +522,7 @@ EXPORT_SYMBOL_GPL(rt2x00usb_flush_queue);
static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue)
{
- rt2x00_warn(queue->rt2x00dev, "TX queue %d DMA timed out, invoke forced forced reset\n",
+ rt2x00_warn(queue->rt2x00dev, "TX queue %d DMA timed out, invoke forced reset\n",
queue->qid);
rt2x00queue_stop_queue(queue);
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index cf372684b681..c1d542bfa530 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -2717,10 +2717,9 @@ static ssize_t ray_cs_essid_proc_write(struct file *file,
return count;
}
-static const struct file_operations ray_cs_essid_proc_fops = {
- .owner = THIS_MODULE,
- .write = ray_cs_essid_proc_write,
- .llseek = noop_llseek,
+static const struct proc_ops ray_cs_essid_proc_ops = {
+ .proc_write = ray_cs_essid_proc_write,
+ .proc_lseek = noop_llseek,
};
static ssize_t int_proc_write(struct file *file, const char __user *buffer,
@@ -2751,10 +2750,9 @@ static ssize_t int_proc_write(struct file *file, const char __user *buffer,
return count;
}
-static const struct file_operations int_proc_fops = {
- .owner = THIS_MODULE,
- .write = int_proc_write,
- .llseek = noop_llseek,
+static const struct proc_ops int_proc_ops = {
+ .proc_write = int_proc_write,
+ .proc_lseek = noop_llseek,
};
#endif
@@ -2790,10 +2788,10 @@ static int __init init_ray_cs(void)
proc_mkdir("driver/ray_cs", NULL);
proc_create_single("driver/ray_cs/ray_cs", 0, NULL, ray_cs_proc_show);
- proc_create("driver/ray_cs/essid", 0200, NULL, &ray_cs_essid_proc_fops);
- proc_create_data("driver/ray_cs/net_type", 0200, NULL, &int_proc_fops,
+ proc_create("driver/ray_cs/essid", 0200, NULL, &ray_cs_essid_proc_ops);
+ proc_create_data("driver/ray_cs/net_type", 0200, NULL, &int_proc_ops,
&net_type);
- proc_create_data("driver/ray_cs/translate", 0200, NULL, &int_proc_fops,
+ proc_create_data("driver/ray_cs/translate", 0200, NULL, &int_proc_ops,
&translate);
#endif
if (translate != 0)
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index aa2bb2ae9809..54a1a4ea107b 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -6384,7 +6384,7 @@ static int rtl8xxxu_parse_usb(struct rtl8xxxu_priv *priv,
u8 dir, xtype, num;
int ret = 0;
- host_interface = &interface->altsetting[0];
+ host_interface = interface->cur_altsetting;
interface_desc = &host_interface->desc;
endpoints = interface_desc->bNumEndpoints;
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h
index e4a7e074ae3f..fa92e29fffda 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.h
+++ b/drivers/net/wireless/realtek/rtlwifi/base.h
@@ -61,9 +61,9 @@ enum ap_peer {
CP_MACADDR((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS3, (u8 *)(_val))
#define SET_TX_DESC_SPE_RPT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE((__pdesc) + 8, 19, 1, __val)
+ le32p_replace_bits((__le32 *)(__pdesc + 8), __val, BIT(19))
#define SET_TX_DESC_SW_DEFINE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 0, 12, __val)
+ le32p_replace_bits((__le32 *)(__pdesc + 24), __val, GENMASK(11, 0))
int rtl_init_core(struct ieee80211_hw *hw);
void rtl_deinit_core(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
index 3c96c320236c..658ff425c256 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
@@ -862,7 +862,7 @@ static void btc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
/* Resume RF Rx LPF corner
* After initialized, we can use coex_dm->btRf0x1eBackup
*/
- if (btcoexist->initilized) {
+ if (btcoexist->initialized) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Resume RF Rx LPF corner!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
index 191dafd03189..a4940a3842de 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
@@ -1461,7 +1461,7 @@ void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist)
ex_btc8192e2ant_init_coex_dm(btcoexist);
}
- btcoexist->initilized = true;
+ btcoexist->initialized = true;
}
void exhalbtc_ips_notify(struct btc_coexist *btcoexist, u8 type)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
index 8c0a7fdbf200..a96a995dd850 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
@@ -679,7 +679,7 @@ struct btc_coexist {
bool auto_report_2ant;
bool dbg_mode_1ant;
bool dbg_mode_2ant;
- bool initilized;
+ bool initialized;
bool stop_coex_dm;
bool manual_control;
struct btc_statistics statistics;
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index f88d26535978..25335bd2873b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -1061,13 +1061,15 @@ done:
return ret;
}
-static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
+static void _rtl_pci_irq_tasklet(unsigned long data)
{
+ struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
_rtl_pci_tx_chk_waitq(hw);
}
-static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
+static void _rtl_pci_prepare_bcn_tasklet(unsigned long data)
{
+ struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -1193,10 +1195,10 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
/*task */
tasklet_init(&rtlpriv->works.irq_tasklet,
- (void (*)(unsigned long))_rtl_pci_irq_tasklet,
+ _rtl_pci_irq_tasklet,
(unsigned long)hw);
tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet,
- (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet,
+ _rtl_pci_prepare_bcn_tasklet,
(unsigned long)hw);
INIT_WORK(&rtlpriv->works.lps_change_work,
rtl_lps_change_work_callback);
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index e5e1ec5a41dc..bc0ac96ee615 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -737,7 +737,7 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
find_p2p_ie = true;
/*to find noa ie*/
while (ie + 1 < end) {
- noa_len = READEF2BYTE((__le16 *)&ie[1]);
+ noa_len = le16_to_cpu(*((__le16 *)&ie[1]));
if (ie + 3 + ie[1] > end)
return;
@@ -766,16 +766,16 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
index = 5;
for (i = 0; i < noa_num; i++) {
p2pinfo->noa_count_type[i] =
- READEF1BYTE(ie+index);
+ *(u8 *)(ie + index);
index += 1;
p2pinfo->noa_duration[i] =
- READEF4BYTE((__le32 *)ie+index);
+ le32_to_cpu(*(__le32 *)ie + index);
index += 4;
p2pinfo->noa_interval[i] =
- READEF4BYTE((__le32 *)ie+index);
+ le32_to_cpu(*(__le32 *)ie + index);
index += 4;
p2pinfo->noa_start_time[i] =
- READEF4BYTE((__le32 *)ie+index);
+ le32_to_cpu(*(__le32 *)ie + index);
index += 4;
}
@@ -832,7 +832,7 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "action frame find P2P IE.\n");
/*to find noa ie*/
while (ie + 1 < end) {
- noa_len = READEF2BYTE((__le16 *)&ie[1]);
+ noa_len = le16_to_cpu(*(__le16 *)&ie[1]);
if (ie + 3 + ie[1] > end)
return;
@@ -861,16 +861,16 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
index = 5;
for (i = 0; i < noa_num; i++) {
p2pinfo->noa_count_type[i] =
- READEF1BYTE(ie+index);
+ *(u8 *)(ie + index);
index += 1;
p2pinfo->noa_duration[i] =
- READEF4BYTE((__le32 *)ie+index);
+ le32_to_cpu(*(__le32 *)ie + index);
index += 4;
p2pinfo->noa_interval[i] =
- READEF4BYTE((__le32 *)ie+index);
+ le32_to_cpu(*(__le32 *)ie + index);
index += 4;
p2pinfo->noa_start_time[i] =
- READEF4BYTE((__le32 *)ie+index);
+ le32_to_cpu(*(__le32 *)ie + index);
index += 4;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
index e2e0bfbc24fe..fc7b9ad7e5d0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
@@ -372,20 +372,20 @@ void rtl88e_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
u8 rlbm, power_state = 0;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
- SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, ((mode) ? 1 : 0));
+ set_h2ccmd_pwrmode_parm_mode(u1_h2c_set_pwrmode, ((mode) ? 1 : 0));
rlbm = 0;/*YJ, temp, 120316. FW now not support RLBM=2.*/
- SET_H2CCMD_PWRMODE_PARM_RLBM(u1_h2c_set_pwrmode, rlbm);
- SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode,
+ set_h2ccmd_pwrmode_parm_rlbm(u1_h2c_set_pwrmode, rlbm);
+ set_h2ccmd_pwrmode_parm_smart_ps(u1_h2c_set_pwrmode,
(rtlpriv->mac80211.p2p) ? ppsc->smart_ps : 1);
- SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(u1_h2c_set_pwrmode,
+ set_h2ccmd_pwrmode_parm_awake_interval(u1_h2c_set_pwrmode,
ppsc->reg_max_lps_awakeintvl);
- SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(u1_h2c_set_pwrmode, 0);
+ set_h2ccmd_pwrmode_parm_all_queue_uapsd(u1_h2c_set_pwrmode, 0);
if (mode == FW_PS_ACTIVE_MODE)
power_state |= FW_PWR_STATE_ACTIVE;
else
power_state |= FW_PWR_STATE_RF_OFF;
- SET_H2CCMD_PWRMODE_PARM_PWR_STATE(u1_h2c_set_pwrmode, power_state);
+ set_h2ccmd_pwrmode_parm_pwr_state(u1_h2c_set_pwrmode, power_state);
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
"rtl92c_set_fw_pwrmode(): u1_h2c_set_pwrmode\n",
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h
index 39ddb7afea9d..79f095e47d71 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h
@@ -169,82 +169,55 @@ enum rtl8188e_h2c_cmd {
SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value)
-#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
-#define SET_H2CCMD_PWRMODE_PARM_RLBM(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 4, __value)
-#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 4, 4, __value)
-#define SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value)
-#define SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+3, 0, 8, __value)
-#define SET_H2CCMD_PWRMODE_PARM_PWR_STATE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+4, 0, 8, __value)
-#define GET_88E_H2CCMD_PWRMODE_PARM_MODE(__cmd) \
- LE_BITS_TO_1BYTE(__cmd, 0, 8)
+static inline void set_h2ccmd_pwrmode_parm_mode(u8 *__ph2ccmd, u8 __val)
+{
+ *(u8 *)(__ph2ccmd) = __val;
+}
+
+static inline void set_h2ccmd_pwrmode_parm_rlbm(u8 *__cmd, u8 __value)
+{
+ u8p_replace_bits(__cmd + 1, __value, GENMASK(3, 0));
+}
+
+static inline void set_h2ccmd_pwrmode_parm_smart_ps(u8 *__cmd, u8 __value)
+{
+ u8p_replace_bits(__cmd + 1, __value, GENMASK(7, 4));
+}
+
+static inline void set_h2ccmd_pwrmode_parm_awake_interval(u8 *__cmd, u8 __value)
+{
+ *(u8 *)(__cmd + 2) = __value;
+}
+
+static inline void set_h2ccmd_pwrmode_parm_all_queue_uapsd(u8 *__cmd,
+ u8 __value)
+{
+ *(u8 *)(__cmd + 3) = __value;
+}
+
+static inline void set_h2ccmd_pwrmode_parm_pwr_state(u8 *__cmd, u8 __value)
+{
+ *(u8 *)(__cmd + 4) = __value;
+}
#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)(__ph2ccmd) = __val;
#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)(__ph2ccmd) = __val;
#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 1) = __val;
#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 2) = __val;
/* AP_OFFLOAD */
#define SET_H2CCMD_AP_OFFLOAD_ON(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 8, __value)
+ *(u8 *)__cmd = __value;
#define SET_H2CCMD_AP_OFFLOAD_HIDDEN(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value)
+ *(u8 *)(__cmd + 1) = __value;
#define SET_H2CCMD_AP_OFFLOAD_DENYANY(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value)
+ *(u8 *)(__cmd + 2) = __value;
#define SET_H2CCMD_AP_OFFLOAD_WAKEUP_EVT_RPT(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+3, 0, 8, __value)
-
-/* Keep Alive Control*/
-#define SET_88E_H2CCMD_KEEP_ALIVE_ENABLE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 1, __value)
-#define SET_88E_H2CCMD_KEEP_ALIVE_ACCPEPT_USER_DEFINED(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 1, 1, __value)
-#define SET_88E_H2CCMD_KEEP_ALIVE_PERIOD(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value)
-
-/*REMOTE_WAKE_CTRL */
-#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_EN(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 1, __value)
-#if (USE_OLD_WOWLAN_DEBUG_FW == 0)
-#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_ARP_OFFLOAD_EN(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 1, 1, __value)
-#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_NDP_OFFLOAD_EN(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 2, 1, __value)
-#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_GTK_OFFLOAD_EN(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 3, 1, __value)
-#else
-#define SET_88E_H2_REM_WAKE_ENC_ALG(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value)
-#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_GROUP_ENC_ALG(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value)
-#endif
-
-/* GTK_OFFLOAD */
-#define SET_88E_H2CCMD_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 8, __value)
-#define SET_88E_H2CCMD_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value)
-
-/* AOAC_RSVDPAGE_LOC */
-#define SET_88E_H2CCMD_AOAC_RSVD_LOC_REM_WAKE_CTRL_INFO(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd), 0, 8, __value)
-#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value)
-#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_NEIGHBOR_ADV(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value)
-#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_RSP(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+3, 0, 8, __value)
-#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_INFO(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+4, 0, 8, __value)
+ *(u8 *)(__cmd + 3) = __value;
int rtl88e_download_fw(struct ieee80211_hw *hw,
bool buse_wake_on_wlan_fw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
index f92e95f5494f..70716631de85 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
@@ -2486,13 +2486,12 @@ void rtl8188ee_bt_hw_init(struct ieee80211_hw *hw)
if (rtlpriv->btcoexist.bt_ant_isolation)
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0);
- u1_tmp = rtl_read_byte(rtlpriv, 0x4fd) &
- BIT_OFFSET_LEN_MASK_32(0, 1);
+ u1_tmp = rtl_read_byte(rtlpriv, 0x4fd) & BIT(0);
u1_tmp = u1_tmp |
((rtlpriv->btcoexist.bt_ant_isolation == 1) ?
- 0 : BIT_OFFSET_LEN_MASK_32(1, 1)) |
+ 0 : BIT((1)) |
((rtlpriv->btcoexist.bt_service == BT_SCO) ?
- 0 : BIT_OFFSET_LEN_MASK_32(2, 1));
+ 0 : BIT(2)));
rtl_write_byte(rtlpriv, 0x4fd, u1_tmp);
rtl_write_dword(rtlpriv, REG_BT_COEX_TABLE+4, 0xaaaa9aaa);
@@ -2502,11 +2501,11 @@ void rtl8188ee_bt_hw_init(struct ieee80211_hw *hw)
/* Config to 1T1R. */
if (rtlphy->rf_type == RF_1T1R) {
u1_tmp = rtl_read_byte(rtlpriv, ROFDM0_TRXPATHENABLE);
- u1_tmp &= ~(BIT_OFFSET_LEN_MASK_32(1, 1));
+ u1_tmp &= ~(BIT(1));
rtl_write_byte(rtlpriv, ROFDM0_TRXPATHENABLE, u1_tmp);
u1_tmp = rtl_read_byte(rtlpriv, ROFDM1_TRXPATHENABLE);
- u1_tmp &= ~(BIT_OFFSET_LEN_MASK_32(1, 1));
+ u1_tmp &= ~(BIT(1));
rtl_write_byte(rtlpriv, ROFDM1_TRXPATHENABLE, u1_tmp);
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
index 5ca900f97d66..d13983ec09ad 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
@@ -264,7 +264,7 @@ static bool _rtl88e_check_condition(struct ieee80211_hw *hw,
u32 _board = rtlefuse->board_type; /*need efuse define*/
u32 _interface = rtlhal->interface;
u32 _platform = 0x08;/*SupportPlatform */
- u32 cond = condition;
+ u32 cond;
if (condition == 0xCDCDCDCD)
return true;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
index a0eda51e833c..4865639ac9ea 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
@@ -9,7 +9,6 @@
#include "phy.h"
#include "dm.h"
#include "hw.h"
-#include "sw.h"
#include "trx.h"
#include "led.h"
#include "table.h"
@@ -59,7 +58,7 @@ static void rtl88e_init_aspm_vars(struct ieee80211_hw *hw)
rtlpci->const_support_pciaspm = rtlpriv->cfg->mod_params->aspm_support;
}
-int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
+static int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
{
int err = 0;
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -173,7 +172,7 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
return err;
}
-void rtl88e_deinit_sw_vars(struct ieee80211_hw *hw)
+static void rtl88e_deinit_sw_vars(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -189,7 +188,7 @@ void rtl88e_deinit_sw_vars(struct ieee80211_hw *hw)
}
/* get bt coexist status */
-bool rtl88e_get_btc_status(void)
+static bool rtl88e_get_btc_status(void)
{
return false;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.h
deleted file mode 100644
index 1407151503f8..000000000000
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2009-2013 Realtek Corporation.*/
-
-#ifndef __RTL92CE_SW_H__
-#define __RTL92CE_SW_H__
-
-int rtl88e_init_sw_vars(struct ieee80211_hw *hw);
-void rtl88e_deinit_sw_vars(struct ieee80211_hw *hw);
-bool rtl88e_get_btc_status(void);
-
-
-#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
index 4bef237f488d..06fc9b5cdd8f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
@@ -8,11 +8,11 @@
#include "../base.h"
#include "../core.h"
-#define BT_RSSI_STATE_NORMAL_POWER BIT_OFFSET_LEN_MASK_32(0, 1)
-#define BT_RSSI_STATE_AMDPU_OFF BIT_OFFSET_LEN_MASK_32(1, 1)
-#define BT_RSSI_STATE_SPECIAL_LOW BIT_OFFSET_LEN_MASK_32(2, 1)
-#define BT_RSSI_STATE_BG_EDCA_LOW BIT_OFFSET_LEN_MASK_32(3, 1)
-#define BT_RSSI_STATE_TXPOWER_LOW BIT_OFFSET_LEN_MASK_32(4, 1)
+#define BT_RSSI_STATE_NORMAL_POWER BIT(0)
+#define BT_RSSI_STATE_AMDPU_OFF BIT(1)
+#define BT_RSSI_STATE_SPECIAL_LOW BIT(2)
+#define BT_RSSI_STATE_BG_EDCA_LOW BIT(3)
+#define BT_RSSI_STATE_TXPOWER_LOW BIT(4)
#define BT_MASK 0x00ffffff
#define RTLPRIV (struct rtl_priv *)
@@ -1515,7 +1515,7 @@ static bool rtl92c_bt_state_change(struct ieee80211_hw *hw)
polling == 0xffffffff && bt_state == 0xff)
return false;
- bt_state &= BIT_OFFSET_LEN_MASK_32(0, 1);
+ bt_state &= BIT(0);
if (bt_state != rtlpriv->btcoexist.bt_cur_state) {
rtlpriv->btcoexist.bt_cur_state = bt_state;
@@ -1524,8 +1524,7 @@ static bool rtl92c_bt_state_change(struct ieee80211_hw *hw)
bt_state = bt_state |
((rtlpriv->btcoexist.bt_ant_isolation == 1) ?
- 0 : BIT_OFFSET_LEN_MASK_32(1, 1)) |
- BIT_OFFSET_LEN_MASK_32(2, 1);
+ 0 : BIT(1)) | BIT(2);
rtl_write_byte(rtlpriv, 0x4fd, bt_state);
}
return true;
@@ -1555,9 +1554,9 @@ static bool rtl92c_bt_state_change(struct ieee80211_hw *hw)
rtlpriv->btcoexist.bt_service = cur_service_type;
bt_state = bt_state |
((rtlpriv->btcoexist.bt_ant_isolation == 1) ?
- 0 : BIT_OFFSET_LEN_MASK_32(1, 1)) |
+ 0 : BIT(1)) |
((rtlpriv->btcoexist.bt_service != BT_IDLE) ?
- 0 : BIT_OFFSET_LEN_MASK_32(2, 1));
+ 0 : BIT(2));
/* Add interrupt migration when bt is not ini
* idle state (no traffic). */
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h
index 888d9fc94bc2..706fc753dfe6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h
@@ -46,19 +46,19 @@
#define pagenum_128(_len) (u32)(((_len)>>7) + ((_len)&0x7F ? 1 : 0))
#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)(__ph2ccmd) = __val
#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 1) = __val
#define SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 2) = __val
#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)(__ph2ccmd) = __val
#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)(__ph2ccmd) = __val
#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 1) = __val
#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 2) = __val
int rtl92c_download_fw(struct ieee80211_hw *hw);
void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
index a52dd64d528d..6402a9e09be7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
@@ -2299,13 +2299,12 @@ void rtl8192ce_bt_hw_init(struct ieee80211_hw *hw)
if (rtlpriv->btcoexist.bt_ant_isolation)
rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0);
- u1_tmp = rtl_read_byte(rtlpriv, 0x4fd) &
- BIT_OFFSET_LEN_MASK_32(0, 1);
+ u1_tmp = rtl_read_byte(rtlpriv, 0x4fd) & BIT(0);
u1_tmp = u1_tmp |
((rtlpriv->btcoexist.bt_ant_isolation == 1) ?
- 0 : BIT_OFFSET_LEN_MASK_32(1, 1)) |
+ 0 : BIT(1)) |
((rtlpriv->btcoexist.bt_service == BT_SCO) ?
- 0 : BIT_OFFSET_LEN_MASK_32(2, 1));
+ 0 : BIT(2));
rtl_write_byte(rtlpriv, 0x4fd, u1_tmp);
rtl_write_dword(rtlpriv, REG_BT_COEX_TABLE+4, 0xaaaa9aaa);
@@ -2315,11 +2314,11 @@ void rtl8192ce_bt_hw_init(struct ieee80211_hw *hw)
/* Config to 1T1R. */
if (rtlphy->rf_type == RF_1T1R) {
u1_tmp = rtl_read_byte(rtlpriv, ROFDM0_TRXPATHENABLE);
- u1_tmp &= ~(BIT_OFFSET_LEN_MASK_32(1, 1));
+ u1_tmp &= ~(BIT(1));
rtl_write_byte(rtlpriv, ROFDM0_TRXPATHENABLE, u1_tmp);
u1_tmp = rtl_read_byte(rtlpriv, ROFDM1_TRXPATHENABLE);
- u1_tmp &= ~(BIT_OFFSET_LEN_MASK_32(1, 1));
+ u1_tmp &= ~(BIT(1));
rtl_write_byte(rtlpriv, ROFDM1_TRXPATHENABLE, u1_tmp);
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
index 900788e4018c..ed68c850f9a2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
@@ -14,7 +14,6 @@
#include "../rtl8192c/phy_common.h"
#include "hw.h"
#include "rf.h"
-#include "sw.h"
#include "trx.h"
#include "led.h"
@@ -65,7 +64,7 @@ static void rtl92c_init_aspm_vars(struct ieee80211_hw *hw)
rtlpci->const_support_pciaspm = rtlpriv->cfg->mod_params->aspm_support;
}
-int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
+static int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
{
int err;
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -161,7 +160,7 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
return 0;
}
-void rtl92c_deinit_sw_vars(struct ieee80211_hw *hw)
+static void rtl92c_deinit_sw_vars(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h
deleted file mode 100644
index f2d121a60159..000000000000
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2009-2012 Realtek Corporation.*/
-
-#ifndef __RTL92CE_SW_H__
-#define __RTL92CE_SW_H__
-
-int rtl92c_init_sw_vars(struct ieee80211_hw *hw);
-void rtl92c_deinit_sw_vars(struct ieee80211_hw *hw);
-void rtl92c_init_var_map(struct ieee80211_hw *hw);
-bool _rtl92ce_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
- u8 configtype);
-bool _rtl92ce_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
- u8 configtype);
-
-#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
index fc9a3aae047f..8fc3cb824066 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
@@ -23,45 +23,6 @@ static u8 _rtl92ce_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
return skb->priority;
}
-static u8 _rtl92c_query_rxpwrpercentage(s8 antpower)
-{
- if ((antpower <= -100) || (antpower >= 20))
- return 0;
- else if (antpower >= 0)
- return 100;
- else
- return 100 + antpower;
-}
-
-static long _rtl92ce_signal_scale_mapping(struct ieee80211_hw *hw,
- long currsig)
-{
- long retsig;
-
- if (currsig >= 61 && currsig <= 100)
- retsig = 90 + ((currsig - 60) / 4);
- else if (currsig >= 41 && currsig <= 60)
- retsig = 78 + ((currsig - 40) / 2);
- else if (currsig >= 31 && currsig <= 40)
- retsig = 66 + (currsig - 30);
- else if (currsig >= 21 && currsig <= 30)
- retsig = 54 + (currsig - 20);
- else if (currsig >= 5 && currsig <= 20)
- retsig = 42 + (((currsig - 5) * 2) / 3);
- else if (currsig == 4)
- retsig = 36;
- else if (currsig == 3)
- retsig = 27;
- else if (currsig == 2)
- retsig = 18;
- else if (currsig == 1)
- retsig = 9;
- else
- retsig = currsig;
-
- return retsig;
-}
-
static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
struct rtl_stats *pstats,
struct rx_desc_92c *pdesc,
@@ -194,7 +155,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
rx_pwr[i] =
((p_drvinfo->gain_trsw[i] & 0x3f) * 2) - 110;
/* Translate DBM to percentage. */
- rssi = _rtl92c_query_rxpwrpercentage(rx_pwr[i]);
+ rssi = rtl_query_rxpwrpercentage(rx_pwr[i]);
total_rssi += rssi;
/* Get Rx snr value in DB */
rtlpriv->stats.rx_snr_db[i] =
@@ -209,7 +170,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
* hardware (for rate adaptive)
*/
rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
- pwdb_all = _rtl92c_query_rxpwrpercentage(rx_pwr_all);
+ pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
pstats->rx_pwdb_all = pwdb_all;
pstats->rxpower = rx_pwr_all;
pstats->recvsignalpower = rx_pwr_all;
@@ -241,11 +202,10 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
*/
if (is_cck_rate)
pstats->signalstrength =
- (u8)(_rtl92ce_signal_scale_mapping(hw, pwdb_all));
+ (u8)(rtl_signal_scale_mapping(hw, pwdb_all));
else if (rf_rx_num != 0)
pstats->signalstrength =
- (u8)(_rtl92ce_signal_scale_mapping
- (hw, total_rssi /= rf_rx_num));
+ (u8)(rtl_signal_scale_mapping(hw, total_rssi /= rf_rx_num));
}
static void _rtl92ce_translate_rx_signal_stuff(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
index cec19b32c7e2..b4b67341dc83 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
@@ -567,44 +567,6 @@ void rtl92c_set_min_space(struct ieee80211_hw *hw, bool is2T)
/*==============================================================*/
-static u8 _rtl92c_query_rxpwrpercentage(s8 antpower)
-{
- if ((antpower <= -100) || (antpower >= 20))
- return 0;
- else if (antpower >= 0)
- return 100;
- else
- return 100 + antpower;
-}
-
-static long _rtl92c_signal_scale_mapping(struct ieee80211_hw *hw,
- long currsig)
-{
- long retsig;
-
- if (currsig >= 61 && currsig <= 100)
- retsig = 90 + ((currsig - 60) / 4);
- else if (currsig >= 41 && currsig <= 60)
- retsig = 78 + ((currsig - 40) / 2);
- else if (currsig >= 31 && currsig <= 40)
- retsig = 66 + (currsig - 30);
- else if (currsig >= 21 && currsig <= 30)
- retsig = 54 + (currsig - 20);
- else if (currsig >= 5 && currsig <= 20)
- retsig = 42 + (((currsig - 5) * 2) / 3);
- else if (currsig == 4)
- retsig = 36;
- else if (currsig == 3)
- retsig = 27;
- else if (currsig == 2)
- retsig = 18;
- else if (currsig == 1)
- retsig = 9;
- else
- retsig = currsig;
- return retsig;
-}
-
static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
struct rtl_stats *pstats,
struct rx_desc_92c *p_desc,
@@ -678,7 +640,7 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
break;
}
}
- pwdb_all = _rtl92c_query_rxpwrpercentage(rx_pwr_all);
+ pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
pstats->rx_pwdb_all = pwdb_all;
pstats->recvsignalpower = rx_pwr_all;
if (packet_match_bssid) {
@@ -707,7 +669,7 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
rf_rx_num++;
rx_pwr[i] =
((p_drvinfo->gain_trsw[i] & 0x3f) * 2) - 110;
- rssi = _rtl92c_query_rxpwrpercentage(rx_pwr[i]);
+ rssi = rtl_query_rxpwrpercentage(rx_pwr[i]);
total_rssi += rssi;
rtlpriv->stats.rx_snr_db[i] =
(long)(p_drvinfo->rxsnr[i] / 2);
@@ -716,7 +678,7 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
pstats->rx_mimo_signalstrength[i] = (u8) rssi;
}
rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
- pwdb_all = _rtl92c_query_rxpwrpercentage(rx_pwr_all);
+ pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
pstats->rx_pwdb_all = pwdb_all;
pstats->rxpower = rx_pwr_all;
pstats->recvsignalpower = rx_pwr_all;
@@ -739,11 +701,10 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
}
if (is_cck_rate)
pstats->signalstrength =
- (u8) (_rtl92c_signal_scale_mapping(hw, pwdb_all));
+ (u8)(rtl_signal_scale_mapping(hw, pwdb_all));
else if (rf_rx_num != 0)
pstats->signalstrength =
- (u8) (_rtl92c_signal_scale_mapping
- (hw, total_rssi /= rf_rx_num));
+ (u8)(rtl_signal_scale_mapping(hw, total_rssi /= rf_rx_num));
}
void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
index ab3e4aebad39..b53daf1b29f7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
@@ -12,7 +12,6 @@
#include "mac.h"
#include "dm.h"
#include "rf.h"
-#include "sw.h"
#include "trx.h"
#include "led.h"
#include "hw.h"
@@ -252,45 +251,45 @@ static struct rtl_hal_cfg rtl92cu_hal_cfg = {
.maps[RTL_RC_HT_RATEMCS15] = DESC_RATEMCS15,
};
-#define USB_VENDER_ID_REALTEK 0x0bda
+#define USB_VENDOR_ID_REALTEK 0x0bda
/* 2010-10-19 DID_USB_V3.4 */
static const struct usb_device_id rtl8192c_usb_ids[] = {
/*=== Realtek demoboard ===*/
/* Default ID */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8191, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8191, rtl92cu_hal_cfg)},
/****** 8188CU ********/
/* RTL8188CTV */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x018a, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x018a, rtl92cu_hal_cfg)},
/* 8188CE-VAU USB minCard */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8170, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8170, rtl92cu_hal_cfg)},
/* 8188cu 1*1 dongle */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8176, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8176, rtl92cu_hal_cfg)},
/* 8188cu 1*1 dongle, (b/g mode only) */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8177, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8177, rtl92cu_hal_cfg)},
/* 8188cu Slim Solo */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817a, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x817a, rtl92cu_hal_cfg)},
/* 8188cu Slim Combo */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817b, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x817b, rtl92cu_hal_cfg)},
/* 8188RU High-power USB Dongle */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817d, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x817d, rtl92cu_hal_cfg)},
/* 8188CE-VAU USB minCard (b/g mode only) */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817e, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x817e, rtl92cu_hal_cfg)},
/* 8188RU in Alfa AWUS036NHR */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817f, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x817f, rtl92cu_hal_cfg)},
/* RTL8188CUS-VL */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x818a, rtl92cu_hal_cfg)},
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x819a, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x818a, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x819a, rtl92cu_hal_cfg)},
/* 8188 Combo for BC4 */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)},
/****** 8192CU ********/
/* 8192cu 2*2 */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8178, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8178, rtl92cu_hal_cfg)},
/* 8192CE-VAU USB minCard */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817c, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x817c, rtl92cu_hal_cfg)},
/*=== Customer ID ===*/
/****** 8188CU ********/
@@ -329,7 +328,7 @@ static const struct usb_device_id rtl8192c_usb_ids[] = {
/****** 8188 RU ********/
/* Netcore */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x317f, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x317f, rtl92cu_hal_cfg)},
/****** 8188CUS Slim Solo********/
{RTL_USB_DEVICE(0x04f2, 0xaff7, rtl92cu_hal_cfg)}, /*Xavi*/
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h
deleted file mode 100644
index 662440c4cdc9..000000000000
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2009-2012 Realtek Corporation.*/
-
-#ifndef __RTL92CU_SW_H__
-#define __RTL92CU_SW_H__
-
-#define EFUSE_MAX_SECTION 16
-
-void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
- u8 *powerlevel);
-void rtl92cu_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
- u8 *ppowerlevel, u8 channel);
-bool _rtl92cu_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
- u8 configtype);
-bool _rtl92cu_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
- u8 configtype);
-void _rtl92cu_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
-void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath,
- u32 regaddr, u32 bitmask, u32 data);
-bool rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
- enum rf_pwrstate rfpwr_state);
-u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 regaddr, u32 bitmask);
-void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
-
-#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h
index bd8ea6d66dff..7f0a17c1a9ea 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h
@@ -16,73 +16,26 @@
(GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFFF) == 0x92D2 || \
(GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFFF) == 0x92D3)
-/* Define a macro that takes an le32 word, converts it to host ordering,
- * right shifts by a specified count, creates a mask of the specified
- * bit count, and extracts that number of bits.
- */
-
-#define SHIFT_AND_MASK_LE(__pdesc, __shift, __mask) \
- ((le32_to_cpu(*(((__le32 *)(__pdesc)))) >> (__shift)) & \
- BIT_LEN_MASK_32(__mask))
-
/* Firmware Header(8-byte alinment required) */
/* --- LONG WORD 0 ---- */
#define GET_FIRMWARE_HDR_SIGNATURE(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr, 0, 16)
-#define GET_FIRMWARE_HDR_CATEGORY(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr, 16, 8)
-#define GET_FIRMWARE_HDR_FUNCTION(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr, 24, 8)
+ le32_get_bits(*(__le32 *)__fwhdr, GENMASK(15, 0))
#define GET_FIRMWARE_HDR_VERSION(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 4, 0, 16)
+ le32_get_bits(*(__le32 *)(__fwhdr + 4), GENMASK(15, 0))
#define GET_FIRMWARE_HDR_SUB_VER(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 4, 16, 8)
-#define GET_FIRMWARE_HDR_RSVD1(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 4, 24, 8)
-
-/* --- LONG WORD 1 ---- */
-#define GET_FIRMWARE_HDR_MONTH(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 8, 0, 8)
-#define GET_FIRMWARE_HDR_DATE(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 8, 8, 8)
-#define GET_FIRMWARE_HDR_HOUR(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 8, 16, 8)
-#define GET_FIRMWARE_HDR_MINUTE(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 8, 24, 8)
-#define GET_FIRMWARE_HDR_ROMCODE_SIZE(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 12, 0, 16)
-#define GET_FIRMWARE_HDR_RSVD2(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 12, 16, 16)
-
-/* --- LONG WORD 2 ---- */
-#define GET_FIRMWARE_HDR_SVN_IDX(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 16, 0, 32)
-#define GET_FIRMWARE_HDR_RSVD3(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 20, 0, 32)
-
-/* --- LONG WORD 3 ---- */
-#define GET_FIRMWARE_HDR_RSVD4(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 24, 0, 32)
-#define GET_FIRMWARE_HDR_RSVD5(__fwhdr) \
- SHIFT_AND_MASK_LE(__fwhdr + 28, 0, 32)
+ le32_get_bits(*(__le32 *)(__fwhdr + 4), GENMASK(23, 16))
#define pagenum_128(_len) \
(u32)(((_len) >> 7) + ((_len) & 0x7F ? 1 : 0))
-#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
-#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 1, 0, 8, __val)
-#define SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 2, 0, 8, __val)
#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)__ph2ccmd = __val;
#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)__ph2ccmd = __val;
#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 1, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 1) = __val;
#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 2, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 2) = __val;
int rtl92d_download_fw(struct ieee80211_hw *hw);
void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
index 92c9fb45f800..ab5b05ef168e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
@@ -23,16 +23,6 @@ static u8 _rtl92de_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
return skb->priority;
}
-static u8 _rtl92d_query_rxpwrpercentage(s8 antpower)
-{
- if ((antpower <= -100) || (antpower >= 20))
- return 0;
- else if (antpower >= 0)
- return 100;
- else
- return 100 + antpower;
-}
-
static long _rtl92de_translate_todbm(struct ieee80211_hw *hw,
u8 signal_strength_index)
{
@@ -43,33 +33,6 @@ static long _rtl92de_translate_todbm(struct ieee80211_hw *hw,
return signal_power;
}
-static long _rtl92de_signal_scale_mapping(struct ieee80211_hw *hw, long currsig)
-{
- long retsig;
-
- if (currsig >= 61 && currsig <= 100)
- retsig = 90 + ((currsig - 60) / 4);
- else if (currsig >= 41 && currsig <= 60)
- retsig = 78 + ((currsig - 40) / 2);
- else if (currsig >= 31 && currsig <= 40)
- retsig = 66 + (currsig - 30);
- else if (currsig >= 21 && currsig <= 30)
- retsig = 54 + (currsig - 20);
- else if (currsig >= 5 && currsig <= 20)
- retsig = 42 + (((currsig - 5) * 2) / 3);
- else if (currsig == 4)
- retsig = 36;
- else if (currsig == 3)
- retsig = 27;
- else if (currsig == 2)
- retsig = 18;
- else if (currsig == 1)
- retsig = 9;
- else
- retsig = currsig;
- return retsig;
-}
-
static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
struct rtl_stats *pstats,
struct rx_desc_92d *pdesc,
@@ -141,7 +104,7 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
break;
}
}
- pwdb_all = _rtl92d_query_rxpwrpercentage(rx_pwr_all);
+ pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
/* CCK gain is smaller than OFDM/MCS gain, */
/* so we add gain diff by experiences, the val is 6 */
pwdb_all += 6;
@@ -183,7 +146,7 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
rf_rx_num++;
rx_pwr[i] = ((p_drvinfo->gain_trsw[i] & 0x3f) * 2)
- 110;
- rssi = _rtl92d_query_rxpwrpercentage(rx_pwr[i]);
+ rssi = rtl_query_rxpwrpercentage(rx_pwr[i]);
total_rssi += rssi;
rtlpriv->stats.rx_snr_db[i] =
(long)(p_drvinfo->rxsnr[i] / 2);
@@ -191,7 +154,7 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
pstats->rx_mimo_signalstrength[i] = (u8) rssi;
}
rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 106;
- pwdb_all = _rtl92d_query_rxpwrpercentage(rx_pwr_all);
+ pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
pstats->rx_pwdb_all = pwdb_all;
pstats->rxpower = rx_pwr_all;
pstats->recvsignalpower = rx_pwr_all;
@@ -212,10 +175,10 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
}
}
if (is_cck_rate)
- pstats->signalstrength = (u8)(_rtl92de_signal_scale_mapping(hw,
+ pstats->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
pwdb_all));
else if (rf_rx_num != 0)
- pstats->signalstrength = (u8)(_rtl92de_signal_scale_mapping(hw,
+ pstats->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
total_rssi /= rf_rx_num));
}
@@ -438,49 +401,50 @@ static void _rtl92de_translate_rx_signal_stuff(struct ieee80211_hw *hw,
bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
struct ieee80211_rx_status *rx_status,
- u8 *p_desc, struct sk_buff *skb)
+ u8 *pdesc8, struct sk_buff *skb)
{
+ __le32 *pdesc = (__le32 *)pdesc8;
struct rx_fwinfo_92d *p_drvinfo;
- struct rx_desc_92d *pdesc = (struct rx_desc_92d *)p_desc;
- u32 phystatus = GET_RX_DESC_PHYST(pdesc);
+ u32 phystatus = get_rx_desc_physt(pdesc);
- stats->length = (u16) GET_RX_DESC_PKT_LEN(pdesc);
- stats->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) *
+ stats->length = (u16)get_rx_desc_pkt_len(pdesc);
+ stats->rx_drvinfo_size = (u8)get_rx_desc_drv_info_size(pdesc) *
RX_DRV_INFO_SIZE_UNIT;
- stats->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03);
- stats->icv = (u16) GET_RX_DESC_ICV(pdesc);
- stats->crc = (u16) GET_RX_DESC_CRC32(pdesc);
+ stats->rx_bufshift = (u8)(get_rx_desc_shift(pdesc) & 0x03);
+ stats->icv = (u16)get_rx_desc_icv(pdesc);
+ stats->crc = (u16)get_rx_desc_crc32(pdesc);
stats->hwerror = (stats->crc | stats->icv);
- stats->decrypted = !GET_RX_DESC_SWDEC(pdesc);
- stats->rate = (u8) GET_RX_DESC_RXMCS(pdesc);
- stats->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc);
- stats->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
- stats->isfirst_ampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1)
- && (GET_RX_DESC_FAGGR(pdesc) == 1));
- stats->timestamp_low = GET_RX_DESC_TSFL(pdesc);
- stats->rx_is40mhzpacket = (bool)GET_RX_DESC_BW(pdesc);
- stats->is_ht = (bool)GET_RX_DESC_RXHT(pdesc);
+ stats->decrypted = !get_rx_desc_swdec(pdesc);
+ stats->rate = (u8)get_rx_desc_rxmcs(pdesc);
+ stats->shortpreamble = (u16)get_rx_desc_splcp(pdesc);
+ stats->isampdu = (bool)(get_rx_desc_paggr(pdesc) == 1);
+ stats->isfirst_ampdu = (bool)((get_rx_desc_paggr(pdesc) == 1) &&
+ (get_rx_desc_faggr(pdesc) == 1));
+ stats->timestamp_low = get_rx_desc_tsfl(pdesc);
+ stats->rx_is40mhzpacket = (bool)get_rx_desc_bw(pdesc);
+ stats->is_ht = (bool)get_rx_desc_rxht(pdesc);
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->band = hw->conf.chandef.chan->band;
- if (GET_RX_DESC_CRC32(pdesc))
+ if (get_rx_desc_crc32(pdesc))
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
- if (!GET_RX_DESC_SWDEC(pdesc))
+ if (!get_rx_desc_swdec(pdesc))
rx_status->flag |= RX_FLAG_DECRYPTED;
- if (GET_RX_DESC_BW(pdesc))
+ if (get_rx_desc_bw(pdesc))
rx_status->bw = RATE_INFO_BW_40;
- if (GET_RX_DESC_RXHT(pdesc))
+ if (get_rx_desc_rxht(pdesc))
rx_status->encoding = RX_ENC_HT;
rx_status->flag |= RX_FLAG_MACTIME_START;
if (stats->decrypted)
rx_status->flag |= RX_FLAG_DECRYPTED;
rx_status->rate_idx = rtlwifi_rate_mapping(hw, stats->is_ht,
false, stats->rate);
- rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
+ rx_status->mactime = get_rx_desc_tsfl(pdesc);
if (phystatus) {
p_drvinfo = (struct rx_fwinfo_92d *)(skb->data +
stats->rx_bufshift);
_rtl92de_translate_rx_signal_stuff(hw,
- skb, stats, pdesc,
+ skb, stats,
+ (struct rx_desc_92d *)pdesc,
p_drvinfo);
}
/*rx_status->qual = stats->signal; */
@@ -489,21 +453,23 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
}
static void _rtl92de_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
- u8 *virtualaddress)
+ u8 *virtualaddress8)
{
+ __le32 *virtualaddress = (__le32 *)virtualaddress8;
+
memset(virtualaddress, 0, 8);
- SET_EARLYMODE_PKTNUM(virtualaddress, ptcb_desc->empkt_num);
- SET_EARLYMODE_LEN0(virtualaddress, ptcb_desc->empkt_len[0]);
- SET_EARLYMODE_LEN1(virtualaddress, ptcb_desc->empkt_len[1]);
- SET_EARLYMODE_LEN2_1(virtualaddress, ptcb_desc->empkt_len[2] & 0xF);
- SET_EARLYMODE_LEN2_2(virtualaddress, ptcb_desc->empkt_len[2] >> 4);
- SET_EARLYMODE_LEN3(virtualaddress, ptcb_desc->empkt_len[3]);
- SET_EARLYMODE_LEN4(virtualaddress, ptcb_desc->empkt_len[4]);
+ set_earlymode_pktnum(virtualaddress, ptcb_desc->empkt_num);
+ set_earlymode_len0(virtualaddress, ptcb_desc->empkt_len[0]);
+ set_earlymode_len1(virtualaddress, ptcb_desc->empkt_len[1]);
+ set_earlymode_len2_1(virtualaddress, ptcb_desc->empkt_len[2] & 0xF);
+ set_earlymode_len2_2(virtualaddress, ptcb_desc->empkt_len[2] >> 4);
+ set_earlymode_len3(virtualaddress, ptcb_desc->empkt_len[3]);
+ set_earlymode_len4(virtualaddress, ptcb_desc->empkt_len[4]);
}
void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
- struct ieee80211_hdr *hdr, u8 *pdesc_tx,
+ struct ieee80211_hdr *hdr, u8 *pdesc8,
u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb,
@@ -514,7 +480,7 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- u8 *pdesc = pdesc_tx;
+ __le32 *pdesc = (__le32 *)pdesc8;
u16 seq_number;
__le16 fc = hdr->frame_control;
unsigned int buf_len = 0;
@@ -549,15 +515,15 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
"DMA mapping error\n");
return;
}
- CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_92d));
+ clear_pci_tx_desc_content(pdesc, sizeof(struct tx_desc_92d));
if (ieee80211_is_nullfunc(fc) || ieee80211_is_ctl(fc)) {
firstseg = true;
lastseg = true;
}
if (firstseg) {
if (rtlhal->earlymode_enable) {
- SET_TX_DESC_PKT_OFFSET(pdesc, 1);
- SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN +
+ set_tx_desc_pkt_offset(pdesc, 1);
+ set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN +
EM_HDR_LEN);
if (ptcb_desc->empkt_num) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_LOUD,
@@ -567,60 +533,61 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
(u8 *)(skb->data));
}
} else {
- SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
+ set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN);
}
/* 5G have no CCK rate */
if (rtlhal->current_bandtype == BAND_ON_5G)
if (ptcb_desc->hw_rate < DESC_RATE6M)
ptcb_desc->hw_rate = DESC_RATE6M;
- SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate);
+ set_tx_desc_tx_rate(pdesc, ptcb_desc->hw_rate);
if (ptcb_desc->use_shortgi || ptcb_desc->use_shortpreamble)
- SET_TX_DESC_DATA_SHORTGI(pdesc, 1);
+ set_tx_desc_data_shortgi(pdesc, 1);
if (rtlhal->macphymode == DUALMAC_DUALPHY &&
ptcb_desc->hw_rate == DESC_RATEMCS7)
- SET_TX_DESC_DATA_SHORTGI(pdesc, 1);
+ set_tx_desc_data_shortgi(pdesc, 1);
if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- SET_TX_DESC_AGG_ENABLE(pdesc, 1);
- SET_TX_DESC_MAX_AGG_NUM(pdesc, 0x14);
+ set_tx_desc_agg_enable(pdesc, 1);
+ set_tx_desc_max_agg_num(pdesc, 0x14);
}
- SET_TX_DESC_SEQ(pdesc, seq_number);
- SET_TX_DESC_RTS_ENABLE(pdesc, ((ptcb_desc->rts_enable &&
- !ptcb_desc->cts_enable) ? 1 : 0));
- SET_TX_DESC_HW_RTS_ENABLE(pdesc, ((ptcb_desc->rts_enable
+ set_tx_desc_seq(pdesc, seq_number);
+ set_tx_desc_rts_enable(pdesc,
+ ((ptcb_desc->rts_enable &&
+ !ptcb_desc->cts_enable) ? 1 : 0));
+ set_tx_desc_hw_rts_enable(pdesc, ((ptcb_desc->rts_enable
|| ptcb_desc->cts_enable) ? 1 : 0));
- SET_TX_DESC_CTS2SELF(pdesc, ((ptcb_desc->cts_enable) ? 1 : 0));
- SET_TX_DESC_RTS_STBC(pdesc, ((ptcb_desc->rts_stbc) ? 1 : 0));
+ set_tx_desc_cts2self(pdesc, ((ptcb_desc->cts_enable) ? 1 : 0));
+ set_tx_desc_rts_stbc(pdesc, ((ptcb_desc->rts_stbc) ? 1 : 0));
/* 5G have no CCK rate */
if (rtlhal->current_bandtype == BAND_ON_5G)
if (ptcb_desc->rts_rate < DESC_RATE6M)
ptcb_desc->rts_rate = DESC_RATE6M;
- SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate);
- SET_TX_DESC_RTS_BW(pdesc, 0);
- SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc);
- SET_TX_DESC_RTS_SHORT(pdesc, ((ptcb_desc->rts_rate <=
+ set_tx_desc_rts_rate(pdesc, ptcb_desc->rts_rate);
+ set_tx_desc_rts_bw(pdesc, 0);
+ set_tx_desc_rts_sc(pdesc, ptcb_desc->rts_sc);
+ set_tx_desc_rts_short(pdesc, ((ptcb_desc->rts_rate <=
DESC_RATE54M) ?
(ptcb_desc->rts_use_shortpreamble ? 1 : 0) :
(ptcb_desc->rts_use_shortgi ? 1 : 0)));
if (bw_40) {
if (ptcb_desc->packet_bw) {
- SET_TX_DESC_DATA_BW(pdesc, 1);
- SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3);
+ set_tx_desc_data_bw(pdesc, 1);
+ set_tx_desc_tx_sub_carrier(pdesc, 3);
} else {
- SET_TX_DESC_DATA_BW(pdesc, 0);
- SET_TX_DESC_TX_SUB_CARRIER(pdesc,
+ set_tx_desc_data_bw(pdesc, 0);
+ set_tx_desc_tx_sub_carrier(pdesc,
mac->cur_40_prime_sc);
}
} else {
- SET_TX_DESC_DATA_BW(pdesc, 0);
- SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0);
+ set_tx_desc_data_bw(pdesc, 0);
+ set_tx_desc_tx_sub_carrier(pdesc, 0);
}
- SET_TX_DESC_LINIP(pdesc, 0);
- SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb_len);
+ set_tx_desc_linip(pdesc, 0);
+ set_tx_desc_pkt_size(pdesc, (u16)skb_len);
if (sta) {
u8 ampdu_density = sta->ht_cap.ampdu_density;
- SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density);
+ set_tx_desc_ampdu_density(pdesc, ampdu_density);
}
if (info->control.hw_key) {
struct ieee80211_key_conf *keyconf;
@@ -630,66 +597,66 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
case WLAN_CIPHER_SUITE_TKIP:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x1);
+ set_tx_desc_sec_type(pdesc, 0x1);
break;
case WLAN_CIPHER_SUITE_CCMP:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x3);
+ set_tx_desc_sec_type(pdesc, 0x3);
break;
default:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x0);
+ set_tx_desc_sec_type(pdesc, 0x0);
break;
}
}
- SET_TX_DESC_PKT_ID(pdesc, 0);
- SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel);
- SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F);
- SET_TX_DESC_RTS_RATE_FB_LIMIT(pdesc, 0xF);
- SET_TX_DESC_DISABLE_FB(pdesc, ptcb_desc->disable_ratefallback ?
+ set_tx_desc_pkt_id(pdesc, 0);
+ set_tx_desc_queue_sel(pdesc, fw_qsel);
+ set_tx_desc_data_rate_fb_limit(pdesc, 0x1F);
+ set_tx_desc_rts_rate_fb_limit(pdesc, 0xF);
+ set_tx_desc_disable_fb(pdesc, ptcb_desc->disable_ratefallback ?
1 : 0);
- SET_TX_DESC_USE_RATE(pdesc, ptcb_desc->use_driver_rate ? 1 : 0);
+ set_tx_desc_use_rate(pdesc, ptcb_desc->use_driver_rate ? 1 : 0);
/* Set TxRate and RTSRate in TxDesc */
/* This prevent Tx initial rate of new-coming packets */
/* from being overwritten by retried packet rate.*/
if (!ptcb_desc->use_driver_rate) {
- SET_TX_DESC_RTS_RATE(pdesc, 0x08);
- /* SET_TX_DESC_TX_RATE(pdesc, 0x0b); */
+ set_tx_desc_rts_rate(pdesc, 0x08);
+ /* set_tx_desc_tx_rate(pdesc, 0x0b); */
}
if (ieee80211_is_data_qos(fc)) {
if (mac->rdg_en) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
"Enable RDG function\n");
- SET_TX_DESC_RDG_ENABLE(pdesc, 1);
- SET_TX_DESC_HTC(pdesc, 1);
+ set_tx_desc_rdg_enable(pdesc, 1);
+ set_tx_desc_htc(pdesc, 1);
}
}
}
- SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0));
- SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0));
- SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) buf_len);
- SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
+ set_tx_desc_first_seg(pdesc, (firstseg ? 1 : 0));
+ set_tx_desc_last_seg(pdesc, (lastseg ? 1 : 0));
+ set_tx_desc_tx_buffer_size(pdesc, (u16)buf_len);
+ set_tx_desc_tx_buffer_address(pdesc, mapping);
if (rtlpriv->dm.useramask) {
- SET_TX_DESC_RATE_ID(pdesc, ptcb_desc->ratr_index);
- SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id);
+ set_tx_desc_rate_id(pdesc, ptcb_desc->ratr_index);
+ set_tx_desc_macid(pdesc, ptcb_desc->mac_id);
} else {
- SET_TX_DESC_RATE_ID(pdesc, 0xC + ptcb_desc->ratr_index);
- SET_TX_DESC_MACID(pdesc, ptcb_desc->ratr_index);
+ set_tx_desc_rate_id(pdesc, 0xC + ptcb_desc->ratr_index);
+ set_tx_desc_macid(pdesc, ptcb_desc->ratr_index);
}
if (ieee80211_is_data_qos(fc))
- SET_TX_DESC_QOS(pdesc, 1);
+ set_tx_desc_qos(pdesc, 1);
if ((!ieee80211_is_data_qos(fc)) && ppsc->fwctrl_lps) {
- SET_TX_DESC_HWSEQ_EN(pdesc, 1);
- SET_TX_DESC_PKT_ID(pdesc, 8);
+ set_tx_desc_hwseq_en(pdesc, 1);
+ set_tx_desc_pkt_id(pdesc, 8);
}
- SET_TX_DESC_MORE_FRAG(pdesc, (lastseg ? 0 : 1));
+ set_tx_desc_more_frag(pdesc, (lastseg ? 0 : 1));
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
}
void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw,
- u8 *pdesc, bool firstseg,
+ u8 *pdesc8, bool firstseg,
bool lastseg, struct sk_buff *skb)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -701,61 +668,64 @@ void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw,
skb->data, skb->len, PCI_DMA_TODEVICE);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
__le16 fc = hdr->frame_control;
+ __le32 *pdesc = (__le32 *)pdesc8;
if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
"DMA mapping error\n");
return;
}
- CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE);
+ clear_pci_tx_desc_content(pdesc, TX_DESC_SIZE);
if (firstseg)
- SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
+ set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN);
/* 5G have no CCK rate
* Caution: The macros below are multi-line expansions.
* The braces are needed no matter what checkpatch says
*/
if (rtlhal->current_bandtype == BAND_ON_5G) {
- SET_TX_DESC_TX_RATE(pdesc, DESC_RATE6M);
+ set_tx_desc_tx_rate(pdesc, DESC_RATE6M);
} else {
- SET_TX_DESC_TX_RATE(pdesc, DESC_RATE1M);
+ set_tx_desc_tx_rate(pdesc, DESC_RATE1M);
}
- SET_TX_DESC_SEQ(pdesc, 0);
- SET_TX_DESC_LINIP(pdesc, 0);
- SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
- SET_TX_DESC_FIRST_SEG(pdesc, 1);
- SET_TX_DESC_LAST_SEG(pdesc, 1);
- SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)skb->len);
- SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
- SET_TX_DESC_RATE_ID(pdesc, 7);
- SET_TX_DESC_MACID(pdesc, 0);
- SET_TX_DESC_PKT_SIZE(pdesc, (u16) (skb->len));
- SET_TX_DESC_FIRST_SEG(pdesc, 1);
- SET_TX_DESC_LAST_SEG(pdesc, 1);
- SET_TX_DESC_OFFSET(pdesc, 0x20);
- SET_TX_DESC_USE_RATE(pdesc, 1);
+ set_tx_desc_seq(pdesc, 0);
+ set_tx_desc_linip(pdesc, 0);
+ set_tx_desc_queue_sel(pdesc, fw_queue);
+ set_tx_desc_first_seg(pdesc, 1);
+ set_tx_desc_last_seg(pdesc, 1);
+ set_tx_desc_tx_buffer_size(pdesc, (u16)skb->len);
+ set_tx_desc_tx_buffer_address(pdesc, mapping);
+ set_tx_desc_rate_id(pdesc, 7);
+ set_tx_desc_macid(pdesc, 0);
+ set_tx_desc_pkt_size(pdesc, (u16)(skb->len));
+ set_tx_desc_first_seg(pdesc, 1);
+ set_tx_desc_last_seg(pdesc, 1);
+ set_tx_desc_offset(pdesc, 0x20);
+ set_tx_desc_use_rate(pdesc, 1);
if (!ieee80211_is_data_qos(fc) && ppsc->fwctrl_lps) {
- SET_TX_DESC_HWSEQ_EN(pdesc, 1);
- SET_TX_DESC_PKT_ID(pdesc, 8);
+ set_tx_desc_hwseq_en(pdesc, 1);
+ set_tx_desc_pkt_id(pdesc, 8);
}
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
"H2C Tx Cmd Content", pdesc, TX_DESC_SIZE);
wmb();
- SET_TX_DESC_OWN(pdesc, 1);
+ set_tx_desc_own(pdesc, 1);
}
-void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc8, bool istx,
u8 desc_name, u8 *val)
{
+ __le32 *pdesc = (__le32 *)pdesc8;
+
if (istx) {
switch (desc_name) {
case HW_DESC_OWN:
wmb();
- SET_TX_DESC_OWN(pdesc, 1);
+ set_tx_desc_own(pdesc, 1);
break;
case HW_DESC_TX_NEXTDESC_ADDR:
- SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val);
+ set_tx_desc_next_desc_address(pdesc, *(u32 *)val);
break;
default:
WARN_ONCE(true, "rtl8192de: ERR txdesc :%d not processed\n",
@@ -766,16 +736,16 @@ void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
switch (desc_name) {
case HW_DESC_RXOWN:
wmb();
- SET_RX_DESC_OWN(pdesc, 1);
+ set_rx_desc_own(pdesc, 1);
break;
case HW_DESC_RXBUFF_ADDR:
- SET_RX_DESC_BUFF_ADDR(pdesc, *(u32 *) val);
+ set_rx_desc_buff_addr(pdesc, *(u32 *)val);
break;
case HW_DESC_RXPKT_LEN:
- SET_RX_DESC_PKT_LEN(pdesc, *(u32 *) val);
+ set_rx_desc_pkt_len(pdesc, *(u32 *)val);
break;
case HW_DESC_RXERO:
- SET_RX_DESC_EOR(pdesc, 1);
+ set_rx_desc_eor(pdesc, 1);
break;
default:
WARN_ONCE(true, "rtl8192de: ERR rxdesc :%d not processed\n",
@@ -786,17 +756,18 @@ void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
}
u64 rtl92de_get_desc(struct ieee80211_hw *hw,
- u8 *p_desc, bool istx, u8 desc_name)
+ u8 *p_desc8, bool istx, u8 desc_name)
{
+ __le32 *p_desc = (__le32 *)p_desc8;
u32 ret = 0;
if (istx) {
switch (desc_name) {
case HW_DESC_OWN:
- ret = GET_TX_DESC_OWN(p_desc);
+ ret = get_tx_desc_own(p_desc);
break;
case HW_DESC_TXBUFF_ADDR:
- ret = GET_TX_DESC_TX_BUFFER_ADDRESS(p_desc);
+ ret = get_tx_desc_tx_buffer_address(p_desc);
break;
default:
WARN_ONCE(true, "rtl8192de: ERR txdesc :%d not processed\n",
@@ -806,13 +777,13 @@ u64 rtl92de_get_desc(struct ieee80211_hw *hw,
} else {
switch (desc_name) {
case HW_DESC_OWN:
- ret = GET_RX_DESC_OWN(p_desc);
+ ret = get_rx_desc_own(p_desc);
break;
case HW_DESC_RXPKT_LEN:
- ret = GET_RX_DESC_PKT_LEN(p_desc);
- break;
+ ret = get_rx_desc_pkt_len(p_desc);
+ break;
case HW_DESC_RXBUFF_ADDR:
- ret = GET_RX_DESC_BUFF_ADDR(p_desc);
+ ret = get_rx_desc_buff_addr(p_desc);
break;
default:
WARN_ONCE(true, "rtl8192de: ERR rxdesc :%d not processed\n",
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
index 635989e15282..d01578875cd5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
@@ -14,514 +14,359 @@
#define USB_HWDESC_HEADER_LEN 32
#define CRCLENGTH 4
-/* Define a macro that takes a le32 word, converts it to host ordering,
- * right shifts by a specified count, creates a mask of the specified
- * bit count, and extracts that number of bits.
- */
-
-#define SHIFT_AND_MASK_LE(__pdesc, __shift, __mask) \
- ((le32_to_cpu(*(((__le32 *)(__pdesc)))) >> (__shift)) & \
- BIT_LEN_MASK_32(__mask))
-
-/* Define a macro that clears a bit field in an le32 word and
- * sets the specified value into that bit field. The resulting
- * value remains in le32 ordering; however, it is properly converted
- * to host ordering for the clear and set operations before conversion
- * back to le32.
- */
-
-#define SET_BITS_OFFSET_LE(__pdesc, __shift, __len, __val) \
- (*(__le32 *)(__pdesc) = \
- (cpu_to_le32((le32_to_cpu(*((__le32 *)(__pdesc))) & \
- (~(BIT_OFFSET_LEN_MASK_32((__shift), __len)))) | \
- (((u32)(__val) & BIT_LEN_MASK_32(__len)) << (__shift)))));
-
/* macros to read/write various fields in RX or TX descriptors */
-#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 0, 16, __val)
-#define SET_TX_DESC_OFFSET(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 16, 8, __val)
-#define SET_TX_DESC_BMC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 24, 1, __val)
-#define SET_TX_DESC_HTC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 25, 1, __val)
-#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 26, 1, __val)
-#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 27, 1, __val)
-#define SET_TX_DESC_LINIP(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 28, 1, __val)
-#define SET_TX_DESC_NO_ACM(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 29, 1, __val)
-#define SET_TX_DESC_GF(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val)
-#define SET_TX_DESC_OWN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val)
-
-#define GET_TX_DESC_PKT_SIZE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 0, 16)
-#define GET_TX_DESC_OFFSET(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 16, 8)
-#define GET_TX_DESC_BMC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 24, 1)
-#define GET_TX_DESC_HTC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 25, 1)
-#define GET_TX_DESC_LAST_SEG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 26, 1)
-#define GET_TX_DESC_FIRST_SEG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 27, 1)
-#define GET_TX_DESC_LINIP(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 28, 1)
-#define GET_TX_DESC_NO_ACM(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 29, 1)
-#define GET_TX_DESC_GF(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 30, 1)
-#define GET_TX_DESC_OWN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 31, 1)
-
-#define SET_TX_DESC_MACID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 0, 5, __val)
-#define SET_TX_DESC_AGG_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 5, 1, __val)
-#define SET_TX_DESC_BK(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 6, 1, __val)
-#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 7, 1, __val)
-#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 8, 5, __val)
-#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 13, 1, __val)
-#define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 14, 1, __val)
-#define SET_TX_DESC_PIFS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 15, 1, __val)
-#define SET_TX_DESC_RATE_ID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 16, 4, __val)
-#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 20, 1, __val)
-#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 21, 1, __val)
-#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 22, 2, __val)
-#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 26, 8, __val)
-
-#define GET_TX_DESC_MACID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 0, 5)
-#define GET_TX_DESC_AGG_ENABLE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 5, 1)
-#define GET_TX_DESC_AGG_BREAK(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 6, 1)
-#define GET_TX_DESC_RDG_ENABLE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 7, 1)
-#define GET_TX_DESC_QUEUE_SEL(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 8, 5)
-#define GET_TX_DESC_RDG_NAV_EXT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 13, 1)
-#define GET_TX_DESC_LSIG_TXOP_EN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 14, 1)
-#define GET_TX_DESC_PIFS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 15, 1)
-#define GET_TX_DESC_RATE_ID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 16, 4)
-#define GET_TX_DESC_NAV_USE_HDR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 20, 1)
-#define GET_TX_DESC_EN_DESC_ID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 21, 1)
-#define GET_TX_DESC_SEC_TYPE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 22, 2)
-#define GET_TX_DESC_PKT_OFFSET(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 24, 8)
-
-#define SET_TX_DESC_RTS_RC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 0, 6, __val)
-#define SET_TX_DESC_DATA_RC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 6, 6, __val)
-#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 14, 2, __val)
-#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 17, 1, __val)
-#define SET_TX_DESC_RAW(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 18, 1, __val)
-#define SET_TX_DESC_CCX(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 19, 1, __val)
-#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 20, 3, __val)
-#define SET_TX_DESC_ANTSEL_A(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 24, 1, __val)
-#define SET_TX_DESC_ANTSEL_B(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 25, 1, __val)
-#define SET_TX_DESC_TX_ANT_CCK(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 26, 2, __val)
-#define SET_TX_DESC_TX_ANTL(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 28, 2, __val)
-#define SET_TX_DESC_TX_ANT_HT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 30, 2, __val)
-
-#define GET_TX_DESC_RTS_RC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 0, 6)
-#define GET_TX_DESC_DATA_RC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 6, 6)
-#define GET_TX_DESC_BAR_RTY_TH(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 14, 2)
-#define GET_TX_DESC_MORE_FRAG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 17, 1)
-#define GET_TX_DESC_RAW(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 18, 1)
-#define GET_TX_DESC_CCX(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 19, 1)
-#define GET_TX_DESC_AMPDU_DENSITY(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 20, 3)
-#define GET_TX_DESC_ANTSEL_A(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 24, 1)
-#define GET_TX_DESC_ANTSEL_B(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 25, 1)
-#define GET_TX_DESC_TX_ANT_CCK(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 26, 2)
-#define GET_TX_DESC_TX_ANTL(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 28, 2)
-#define GET_TX_DESC_TX_ANT_HT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 30, 2)
-
-#define SET_TX_DESC_NEXT_HEAP_PAGE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+12, 0, 8, __val)
-#define SET_TX_DESC_TAIL_PAGE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+12, 8, 8, __val)
-#define SET_TX_DESC_SEQ(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+12, 16, 12, __val)
-#define SET_TX_DESC_PKT_ID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+12, 28, 4, __val)
-
-#define GET_TX_DESC_NEXT_HEAP_PAGE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 0, 8)
-#define GET_TX_DESC_TAIL_PAGE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 8, 8)
-#define GET_TX_DESC_SEQ(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 16, 12)
-#define GET_TX_DESC_PKT_ID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 28, 4)
-
-#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 0, 5, __val)
-#define SET_TX_DESC_AP_DCFE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 5, 1, __val)
-#define SET_TX_DESC_QOS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 6, 1, __val)
-#define SET_TX_DESC_HWSEQ_EN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 7, 1, __val)
-#define SET_TX_DESC_USE_RATE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 8, 1, __val)
-#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 9, 1, __val)
-#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 10, 1, __val)
-#define SET_TX_DESC_CTS2SELF(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 11, 1, __val)
-#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 12, 1, __val)
-#define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 13, 1, __val)
-#define SET_TX_DESC_PORT_ID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 14, 1, __val)
-#define SET_TX_DESC_WAIT_DCTS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 18, 1, __val)
-#define SET_TX_DESC_CTS2AP_EN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 19, 1, __val)
-#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 20, 2, __val)
-#define SET_TX_DESC_TX_STBC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 22, 2, __val)
-#define SET_TX_DESC_DATA_SHORT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 24, 1, __val)
-#define SET_TX_DESC_DATA_BW(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 25, 1, __val)
-#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 26, 1, __val)
-#define SET_TX_DESC_RTS_BW(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 27, 1, __val)
-#define SET_TX_DESC_RTS_SC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 28, 2, __val)
-#define SET_TX_DESC_RTS_STBC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 30, 2, __val)
-
-#define GET_TX_DESC_RTS_RATE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 0, 5)
-#define GET_TX_DESC_AP_DCFE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 5, 1)
-#define GET_TX_DESC_QOS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 6, 1)
-#define GET_TX_DESC_HWSEQ_EN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 7, 1)
-#define GET_TX_DESC_USE_RATE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 8, 1)
-#define GET_TX_DESC_DISABLE_RTS_FB(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 9, 1)
-#define GET_TX_DESC_DISABLE_FB(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 10, 1)
-#define GET_TX_DESC_CTS2SELF(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 11, 1)
-#define GET_TX_DESC_RTS_ENABLE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 12, 1)
-#define GET_TX_DESC_HW_RTS_ENABLE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 13, 1)
-#define GET_TX_DESC_PORT_ID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 14, 1)
-#define GET_TX_DESC_WAIT_DCTS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 18, 1)
-#define GET_TX_DESC_CTS2AP_EN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 19, 1)
-#define GET_TX_DESC_TX_SUB_CARRIER(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 20, 2)
-#define GET_TX_DESC_TX_STBC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 22, 2)
-#define GET_TX_DESC_DATA_SHORT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 24, 1)
-#define GET_TX_DESC_DATA_BW(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 25, 1)
-#define GET_TX_DESC_RTS_SHORT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 26, 1)
-#define GET_TX_DESC_RTS_BW(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 27, 1)
-#define GET_TX_DESC_RTS_SC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 28, 2)
-#define GET_TX_DESC_RTS_STBC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 30, 2)
-
-#define SET_TX_DESC_TX_RATE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 0, 6, __val)
-#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 6, 1, __val)
-#define SET_TX_DESC_CCX_TAG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 7, 1, __val)
-#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 8, 5, __val)
-#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 13, 4, __val)
-#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 17, 1, __val)
-#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 18, 6, __val)
-#define SET_TX_DESC_USB_TXAGG_NUM(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 24, 8, __val)
-
-#define GET_TX_DESC_TX_RATE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 0, 6)
-#define GET_TX_DESC_DATA_SHORTGI(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 6, 1)
-#define GET_TX_DESC_CCX_TAG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 7, 1)
-#define GET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 8, 5)
-#define GET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 13, 4)
-#define GET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 17, 1)
-#define GET_TX_DESC_DATA_RETRY_LIMIT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 18, 6)
-#define GET_TX_DESC_USB_TXAGG_NUM(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 24, 8)
-
-#define SET_TX_DESC_TXAGC_A(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 0, 5, __val)
-#define SET_TX_DESC_TXAGC_B(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 5, 5, __val)
-#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 10, 1, __val)
-#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 11, 5, __val)
-#define SET_TX_DESC_MCSG1_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 16, 4, __val)
-#define SET_TX_DESC_MCSG2_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 20, 4, __val)
-#define SET_TX_DESC_MCSG3_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 24, 4, __val)
-#define SET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 28, 4, __val)
-
-#define GET_TX_DESC_TXAGC_A(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 0, 5)
-#define GET_TX_DESC_TXAGC_B(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 5, 5)
-#define GET_TX_DESC_USE_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 10, 1)
-#define GET_TX_DESC_MAX_AGG_NUM(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 11, 5)
-#define GET_TX_DESC_MCSG1_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 16, 4)
-#define GET_TX_DESC_MCSG2_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 20, 4)
-#define GET_TX_DESC_MCSG3_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 24, 4)
-#define GET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 28, 4)
-
-#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+28, 0, 16, __val)
-#define SET_TX_DESC_MCSG4_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+28, 16, 4, __val)
-#define SET_TX_DESC_MCSG5_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+28, 20, 4, __val)
-#define SET_TX_DESC_MCSG6_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+28, 24, 4, __val)
-#define SET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+28, 28, 4, __val)
-
-#define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+28, 0, 16)
-#define GET_TX_DESC_MCSG4_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+28, 16, 4)
-#define GET_TX_DESC_MCSG5_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+28, 20, 4)
-#define GET_TX_DESC_MCSG6_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+28, 24, 4)
-#define GET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+28, 28, 4)
-
-#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+32, 0, 32, __val)
-#define SET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+36, 0, 32, __val)
-
-#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+32, 0, 32)
-#define GET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+36, 0, 32)
-
-#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+40, 0, 32, __val)
-#define SET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+44, 0, 32, __val)
-
-#define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+40, 0, 32)
-#define GET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+44, 0, 32)
-
-#define GET_RX_DESC_PKT_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 0, 14)
-#define GET_RX_DESC_CRC32(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 14, 1)
-#define GET_RX_DESC_ICV(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 15, 1)
-#define GET_RX_DESC_DRV_INFO_SIZE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 16, 4)
-#define GET_RX_DESC_SECURITY(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 20, 3)
-#define GET_RX_DESC_QOS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 23, 1)
-#define GET_RX_DESC_SHIFT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 24, 2)
-#define GET_RX_DESC_PHYST(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 26, 1)
-#define GET_RX_DESC_SWDEC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 27, 1)
-#define GET_RX_DESC_LS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 28, 1)
-#define GET_RX_DESC_FS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 29, 1)
-#define GET_RX_DESC_EOR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 30, 1)
-#define GET_RX_DESC_OWN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 31, 1)
-
-#define SET_RX_DESC_PKT_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 0, 14, __val)
-#define SET_RX_DESC_EOR(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val)
-#define SET_RX_DESC_OWN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val)
-
-#define GET_RX_DESC_MACID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 0, 5)
-#define GET_RX_DESC_TID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 5, 4)
-#define GET_RX_DESC_HWRSVD(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 9, 5)
-#define GET_RX_DESC_PAGGR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 14, 1)
-#define GET_RX_DESC_FAGGR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 15, 1)
-#define GET_RX_DESC_A1_FIT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 16, 4)
-#define GET_RX_DESC_A2_FIT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 20, 4)
-#define GET_RX_DESC_PAM(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 24, 1)
-#define GET_RX_DESC_PWR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 25, 1)
-#define GET_RX_DESC_MD(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 26, 1)
-#define GET_RX_DESC_MF(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 27, 1)
-#define GET_RX_DESC_TYPE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 28, 2)
-#define GET_RX_DESC_MC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 30, 1)
-#define GET_RX_DESC_BC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 31, 1)
-#define GET_RX_DESC_SEQ(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 0, 12)
-#define GET_RX_DESC_FRAG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 12, 4)
-#define GET_RX_DESC_NEXT_PKT_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 16, 14)
-#define GET_RX_DESC_NEXT_IND(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 30, 1)
-#define GET_RX_DESC_RSVD(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 31, 1)
-
-#define GET_RX_DESC_RXMCS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 0, 6)
-#define GET_RX_DESC_RXHT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 6, 1)
-#define GET_RX_DESC_SPLCP(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 8, 1)
-#define GET_RX_DESC_BW(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 9, 1)
-#define GET_RX_DESC_HTC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 10, 1)
-#define GET_RX_DESC_HWPC_ERR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 14, 1)
-#define GET_RX_DESC_HWPC_IND(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 15, 1)
-#define GET_RX_DESC_IV0(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 16, 16)
-
-#define GET_RX_DESC_IV1(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 0, 32)
-#define GET_RX_DESC_TSFL(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 0, 32)
-
-#define GET_RX_DESC_BUFF_ADDR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 0, 32)
-#define GET_RX_DESC_BUFF_ADDR64(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+28, 0, 32)
-
-#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 0, 32, __val)
-#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+28, 0, 32, __val)
-
-#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \
- memset((void *)__pdesc, 0, \
- min_t(size_t, _size, TX_DESC_NEXT_DESC_OFFSET))
+static inline void set_tx_desc_pkt_size(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(15, 0));
+}
+
+static inline void set_tx_desc_offset(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(23, 16));
+}
+
+static inline void set_tx_desc_htc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(25));
+}
+
+static inline void set_tx_desc_last_seg(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(26));
+}
+
+static inline void set_tx_desc_first_seg(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(27));
+}
+
+static inline void set_tx_desc_linip(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(28));
+}
+
+static inline void set_tx_desc_own(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(31));
+}
+
+static inline u32 get_tx_desc_own(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, BIT(31));
+}
+
+static inline void set_tx_desc_macid(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(4, 0));
+}
+
+static inline void set_tx_desc_agg_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, BIT(5));
+}
+
+static inline void set_tx_desc_rdg_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, BIT(7));
+}
+
+static inline void set_tx_desc_queue_sel(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(12, 8));
+}
+
+static inline void set_tx_desc_rate_id(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(19, 16));
+}
+
+static inline void set_tx_desc_sec_type(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(23, 22));
+}
+
+static inline void set_tx_desc_pkt_offset(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(30, 26));
+}
+
+static inline void set_tx_desc_more_frag(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 2), __val, BIT(17));
+}
+
+static inline void set_tx_desc_ampdu_density(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 2), __val, GENMASK(22, 20));
+}
+
+static inline void set_tx_desc_seq(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 3), __val, GENMASK(27, 16));
+}
+
+static inline void set_tx_desc_pkt_id(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 3), __val, GENMASK(31, 28));
+}
+
+static inline void set_tx_desc_rts_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(4, 0));
+}
+
+static inline void set_tx_desc_qos(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(6));
+}
+
+static inline void set_tx_desc_hwseq_en(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(7));
+}
+
+static inline void set_tx_desc_use_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(8));
+}
+
+static inline void set_tx_desc_disable_fb(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(10));
+}
+
+static inline void set_tx_desc_cts2self(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(11));
+}
+
+static inline void set_tx_desc_rts_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(12));
+}
+
+static inline void set_tx_desc_hw_rts_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(13));
+}
+
+static inline void set_tx_desc_tx_sub_carrier(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(21, 20));
+}
+
+static inline void set_tx_desc_data_bw(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(25));
+}
+
+static inline void set_tx_desc_rts_short(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(26));
+}
+
+static inline void set_tx_desc_rts_bw(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(27));
+}
+
+static inline void set_tx_desc_rts_sc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(29, 28));
+}
+
+static inline void set_tx_desc_rts_stbc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(31, 30));
+}
+
+static inline void set_tx_desc_tx_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, GENMASK(5, 0));
+}
+
+static inline void set_tx_desc_data_shortgi(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, BIT(6));
+}
+
+static inline void set_tx_desc_data_rate_fb_limit(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, GENMASK(12, 8));
+}
+
+static inline void set_tx_desc_rts_rate_fb_limit(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, GENMASK(16, 13));
+}
+
+static inline void set_tx_desc_max_agg_num(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 6), __val, GENMASK(15, 11));
+}
+
+static inline void set_tx_desc_tx_buffer_size(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 7), __val, GENMASK(15, 0));
+}
+
+static inline void set_tx_desc_tx_buffer_address(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 8) = cpu_to_le32(__val);
+}
+
+static inline u32 get_tx_desc_tx_buffer_address(__le32 *__pdesc)
+{
+ return le32_to_cpu(*(__pdesc + 8));
+}
+
+static inline void set_tx_desc_next_desc_address(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 10) = cpu_to_le32(__val);
+}
+
+static inline u32 get_rx_desc_pkt_len(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, GENMASK(13, 0));
+}
+
+static inline u32 get_rx_desc_crc32(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, BIT(14));
+}
+
+static inline u32 get_rx_desc_icv(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, BIT(15));
+}
+
+static inline u32 get_rx_desc_drv_info_size(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, GENMASK(19, 16));
+}
+
+static inline u32 get_rx_desc_shift(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, GENMASK(25, 24));
+}
+
+static inline u32 get_rx_desc_physt(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, BIT(26));
+}
+
+static inline u32 get_rx_desc_swdec(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, BIT(27));
+}
+
+static inline u32 get_rx_desc_own(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, BIT(31));
+}
+
+static inline void set_rx_desc_pkt_len(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(13, 0));
+}
+
+static inline void set_rx_desc_eor(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(30));
+}
+
+static inline void set_rx_desc_own(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(31));
+}
+
+static inline u32 get_rx_desc_paggr(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), BIT(14));
+}
+
+static inline u32 get_rx_desc_faggr(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), BIT(15));
+}
+
+static inline u32 get_rx_desc_rxmcs(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), GENMASK(5, 0));
+}
+
+static inline u32 get_rx_desc_rxht(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(6));
+}
+
+static inline u32 get_rx_desc_splcp(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(8));
+}
+
+static inline u32 get_rx_desc_bw(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(9));
+}
+
+static inline u32 get_rx_desc_tsfl(__le32 *__pdesc)
+{
+ return le32_to_cpu(*(__pdesc + 5));
+}
+
+static inline u32 get_rx_desc_buff_addr(__le32 *__pdesc)
+{
+ return le32_to_cpu(*(__pdesc + 6));
+}
+
+static inline void set_rx_desc_buff_addr(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 6) = cpu_to_le32(__val);
+}
+
+static inline void clear_pci_tx_desc_content(__le32 *__pdesc, u32 _size)
+{
+ memset((void *)__pdesc, 0,
+ min_t(size_t, _size, TX_DESC_NEXT_DESC_OFFSET));
+}
/* For 92D early mode */
-#define SET_EARLYMODE_PKTNUM(__paddr, __value) \
- SET_BITS_OFFSET_LE(__paddr, 0, 3, __value)
-#define SET_EARLYMODE_LEN0(__paddr, __value) \
- SET_BITS_OFFSET_LE(__paddr, 4, 12, __value)
-#define SET_EARLYMODE_LEN1(__paddr, __value) \
- SET_BITS_OFFSET_LE(__paddr, 16, 12, __value)
-#define SET_EARLYMODE_LEN2_1(__paddr, __value) \
- SET_BITS_OFFSET_LE(__paddr, 28, 4, __value)
-#define SET_EARLYMODE_LEN2_2(__paddr, __value) \
- SET_BITS_OFFSET_LE(__paddr+4, 0, 8, __value)
-#define SET_EARLYMODE_LEN3(__paddr, __value) \
- SET_BITS_OFFSET_LE(__paddr+4, 8, 12, __value)
-#define SET_EARLYMODE_LEN4(__paddr, __value) \
- SET_BITS_OFFSET_LE(__paddr+4, 20, 12, __value)
+static inline void set_earlymode_pktnum(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr, __value, GENMASK(2, 0));
+}
+
+static inline void set_earlymode_len0(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr, __value, GENMASK(15, 4));
+}
+
+static inline void set_earlymode_len1(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr, __value, GENMASK(27, 16));
+}
+
+static inline void set_earlymode_len2_1(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr, __value, GENMASK(31, 28));
+}
+
+static inline void set_earlymode_len2_2(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits((__paddr + 1), __value, GENMASK(7, 0));
+}
+
+static inline void set_earlymode_len3(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits((__paddr + 1), __value, GENMASK(19, 8));
+}
+
+static inline void set_earlymode_len4(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits((__paddr + 1), __value, GENMASK(31, 20));
+}
struct rx_fwinfo_92d {
u8 gain_trsw[4];
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
index 648f9108ed4b..551aa86825ed 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
@@ -12,124 +12,6 @@
#include "fw.h"
#include "trx.h"
-static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = {
- 0x7f8001fe, /* 0, +6.0dB */
- 0x788001e2, /* 1, +5.5dB */
- 0x71c001c7, /* 2, +5.0dB */
- 0x6b8001ae, /* 3, +4.5dB */
- 0x65400195, /* 4, +4.0dB */
- 0x5fc0017f, /* 5, +3.5dB */
- 0x5a400169, /* 6, +3.0dB */
- 0x55400155, /* 7, +2.5dB */
- 0x50800142, /* 8, +2.0dB */
- 0x4c000130, /* 9, +1.5dB */
- 0x47c0011f, /* 10, +1.0dB */
- 0x43c0010f, /* 11, +0.5dB */
- 0x40000100, /* 12, +0dB */
- 0x3c8000f2, /* 13, -0.5dB */
- 0x390000e4, /* 14, -1.0dB */
- 0x35c000d7, /* 15, -1.5dB */
- 0x32c000cb, /* 16, -2.0dB */
- 0x300000c0, /* 17, -2.5dB */
- 0x2d4000b5, /* 18, -3.0dB */
- 0x2ac000ab, /* 19, -3.5dB */
- 0x288000a2, /* 20, -4.0dB */
- 0x26000098, /* 21, -4.5dB */
- 0x24000090, /* 22, -5.0dB */
- 0x22000088, /* 23, -5.5dB */
- 0x20000080, /* 24, -6.0dB */
- 0x1e400079, /* 25, -6.5dB */
- 0x1c800072, /* 26, -7.0dB */
- 0x1b00006c, /* 27. -7.5dB */
- 0x19800066, /* 28, -8.0dB */
- 0x18000060, /* 29, -8.5dB */
- 0x16c0005b, /* 30, -9.0dB */
- 0x15800056, /* 31, -9.5dB */
- 0x14400051, /* 32, -10.0dB */
- 0x1300004c, /* 33, -10.5dB */
- 0x12000048, /* 34, -11.0dB */
- 0x11000044, /* 35, -11.5dB */
- 0x10000040, /* 36, -12.0dB */
- 0x0f00003c, /* 37, -12.5dB */
- 0x0e400039, /* 38, -13.0dB */
- 0x0d800036, /* 39, -13.5dB */
- 0x0cc00033, /* 40, -14.0dB */
- 0x0c000030, /* 41, -14.5dB */
- 0x0b40002d, /* 42, -15.0dB */
-};
-
-static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
- {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, /* 0, +0dB */
- {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 1, -0.5dB */
- {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 2, -1.0dB */
- {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 3, -1.5dB */
- {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 4, -2.0dB */
- {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 5, -2.5dB */
- {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 6, -3.0dB */
- {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 7, -3.5dB */
- {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 8, -4.0dB */
- {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 9, -4.5dB */
- {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 10, -5.0dB */
- {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 11, -5.5dB */
- {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 12, -6.0dB */
- {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 13, -6.5dB */
- {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 14, -7.0dB */
- {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 15, -7.5dB */
- {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */
- {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 17, -8.5dB */
- {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 18, -9.0dB */
- {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 19, -9.5dB */
- {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 20, -10.0dB */
- {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 21, -10.5dB */
- {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 22, -11.0dB */
- {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /* 23, -11.5dB */
- {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /* 24, -12.0dB */
- {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /* 25, -12.5dB */
- {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /* 26, -13.0dB */
- {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 27, -13.5dB */
- {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 28, -14.0dB */
- {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 29, -14.5dB */
- {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 30, -15.0dB */
- {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 31, -15.5dB */
- {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01} /* 32, -16.0dB */
-};
-
-static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
- {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, /* 0, +0dB */
- {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, /* 1, -0.5dB */
- {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 2, -1.0dB */
- {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, /* 3, -1.5dB */
- {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 4, -2.0dB */
- {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, /* 5, -2.5dB */
- {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 6, -3.0dB */
- {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, /* 7, -3.5dB */
- {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 8, -4.0dB */
- {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, /* 9, -4.5dB */
- {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 10, -5.0dB */
- {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 11, -5.5dB */
- {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 12, -6.0dB */
- {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, /* 13, -6.5dB */
- {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 14, -7.0dB */
- {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 15, -7.5dB */
- {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 16, -8.0dB */
- {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 17, -8.5dB */
- {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 18, -9.0dB */
- {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 19, -9.5dB */
- {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 20, -10.0dB */
- {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 21, -10.5dB */
- {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 22, -11.0dB */
- {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 23, -11.5dB */
- {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 24, -12.0dB */
- {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 25, -12.5dB */
- {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 26, -13.0dB */
- {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 27, -13.5dB */
- {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 28, -14.0dB */
- {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 29, -14.5dB */
- {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 30, -15.0dB */
- {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 31, -15.5dB */
- {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} /* 32, -16.0dB */
-};
-
static void rtl92ee_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
{
u32 ret_value;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h
index cbfecea232a3..27ac4f8b9a9b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h
@@ -111,44 +111,40 @@ enum rtl8192e_h2c_cmd {
(u32)(((_len) >> 7) + ((_len) & 0x7F ? 1 : 0))
#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)__ph2ccmd = __val;
#define SET_H2CCMD_PWRMODE_PARM_RLBM(__cmd, __val) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 4, __val)
+ u8p_replace_bits(__cmd + 1, __val, GENMASK(3, 0))
#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__cmd, __val) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 4, 4, __val)
+ u8p_replace_bits(__cmd + 1, __val, GENMASK(7, 4))
#define SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(__cmd, __val) \
- SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __val)
+ *(u8 *)(__cmd + 2) = __val;
#define SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(__cmd, __val) \
- SET_BITS_TO_LE_1BYTE((__cmd)+3, 0, 8, __val)
+ *(u8 *)(__cmd + 3) = __val;
#define SET_H2CCMD_PWRMODE_PARM_PWR_STATE(__cmd, __val) \
- SET_BITS_TO_LE_1BYTE((__cmd)+4, 0, 8, __val)
+ *(u8 *)(__cmd + 4) = __val;
#define SET_H2CCMD_PWRMODE_PARM_BYTE5(__cmd, __val) \
- SET_BITS_TO_LE_1BYTE((__cmd) + 5, 0, 8, __val)
-#define GET_92E_H2CCMD_PWRMODE_PARM_MODE(__cmd) \
- LE_BITS_TO_1BYTE(__cmd, 0, 8)
+ *(u8 *)(__cmd + 5) = __val;
-#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)__ph2ccmd = __val;
#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 1) = __val;
#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 2) = __val;
#define SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 3, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 3) = __val;
#define SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 4, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 4) = __val;
/* _MEDIA_STATUS_RPT_PARM_CMD1 */
#define SET_H2CCMD_MSRRPT_PARM_OPMODE(__cmd, __val) \
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 1, __val)
+ u8p_replace_bits(__cmd, __val, BIT(0))
#define SET_H2CCMD_MSRRPT_PARM_MACID_IND(__cmd, __val) \
- SET_BITS_TO_LE_1BYTE(__cmd, 1, 1, __val)
+ u8p_replace_bits(__cmd, __val, BIT(1))
#define SET_H2CCMD_MSRRPT_PARM_MACID(__cmd, __val) \
- SET_BITS_TO_LE_1BYTE(__cmd+1, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 1) = __val;
#define SET_H2CCMD_MSRRPT_PARM_MACID_END(__cmd, __val) \
- SET_BITS_TO_LE_1BYTE(__cmd+2, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 2) = __val;
int rtl92ee_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw);
void rtl92ee_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
index b6ee7dae5943..b337d599b6f4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
@@ -9,7 +9,6 @@
#include "phy.h"
#include "dm.h"
#include "hw.h"
-#include "sw.h"
#include "fw.h"
#include "trx.h"
#include "led.h"
@@ -65,7 +64,7 @@ static void rtl92ee_init_aspm_vars(struct ieee80211_hw *hw)
rtlpci->const_support_pciaspm = rtlpriv->cfg->mod_params->aspm_support;
}
-int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
+static int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -164,7 +163,7 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
return 0;
}
-void rtl92ee_deinit_sw_vars(struct ieee80211_hw *hw)
+static void rtl92ee_deinit_sw_vars(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -175,7 +174,7 @@ void rtl92ee_deinit_sw_vars(struct ieee80211_hw *hw)
}
/* get bt coexist status */
-bool rtl92ee_get_btc_status(void)
+static bool rtl92ee_get_btc_status(void)
{
return true;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.h
deleted file mode 100644
index 36e29a2da0fd..000000000000
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2009-2014 Realtek Corporation.*/
-
-#ifndef __RTL92E_SW_H__
-#define __RTL92E_SW_H__
-
-int rtl92ee_init_sw_vars(struct ieee80211_hw *hw);
-void rtl92ee_deinit_sw_vars(struct ieee80211_hw *hw);
-bool rtl92ee_get_btc_status(void);
-
-#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
index 1c7ee569f4bf..7a54497b7df2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
@@ -11,7 +11,6 @@
#include "dm.h"
#include "fw.h"
#include "hw.h"
-#include "sw.h"
#include "trx.h"
#include "led.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h
deleted file mode 100644
index a31efba0e6b5..000000000000
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2009-2012 Realtek Corporation.*/
-
-#ifndef __REALTEK_PCI92SE_SW_H__
-#define __REALTEK_PCI92SE_SW_H__
-
-#define EFUSE_MAX_SECTION 16
-
-int rtl92se_init_sw(struct ieee80211_hw *hw);
-void rtl92se_deinit_sw(struct ieee80211_hw *hw);
-void rtl92se_init_var_map(struct ieee80211_hw *hw);
-
-#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
index d8260c7afe09..c61a92df9d73 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
@@ -13,118 +13,6 @@
#include "fw.h"
#include "hal_btc.h"
-static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = {
- 0x7f8001fe,
- 0x788001e2,
- 0x71c001c7,
- 0x6b8001ae,
- 0x65400195,
- 0x5fc0017f,
- 0x5a400169,
- 0x55400155,
- 0x50800142,
- 0x4c000130,
- 0x47c0011f,
- 0x43c0010f,
- 0x40000100,
- 0x3c8000f2,
- 0x390000e4,
- 0x35c000d7,
- 0x32c000cb,
- 0x300000c0,
- 0x2d4000b5,
- 0x2ac000ab,
- 0x288000a2,
- 0x26000098,
- 0x24000090,
- 0x22000088,
- 0x20000080,
- 0x1e400079,
- 0x1c800072,
- 0x1b00006c,
- 0x19800066,
- 0x18000060,
- 0x16c0005b,
- 0x15800056,
- 0x14400051,
- 0x1300004c,
- 0x12000048,
- 0x11000044,
- 0x10000040,
-};
-
-static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
- {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04},
- {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04},
- {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03},
- {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03},
- {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03},
- {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03},
- {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03},
- {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03},
- {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02},
- {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02},
- {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02},
- {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02},
- {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02},
- {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02},
- {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02},
- {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02},
- {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01},
- {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02},
- {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01},
- {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
- {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
- {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01},
- {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01},
- {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01},
- {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01},
- {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01},
- {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01},
- {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01},
- {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01},
- {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01},
- {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01},
- {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01},
- {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01}
-};
-
-static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
- {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00},
- {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00},
- {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00},
- {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00},
- {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00},
- {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00},
- {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00},
- {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00},
- {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00},
- {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00},
- {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00},
- {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00},
- {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00},
- {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00},
- {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00},
- {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00},
- {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00},
- {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00},
- {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00},
- {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
- {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
- {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00},
- {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00},
- {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
- {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
- {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00},
- {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
- {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
- {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
- {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
- {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
- {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
- {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}
-};
-
static u8 rtl8723e_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h
index 5c843736de8d..3f9ed9b4428e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h
@@ -18,19 +18,19 @@
#define pagenum_128(_len) (u32)(((_len)>>7) + ((_len)&0x7F ? 1 : 0))
#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)__ph2ccmd = __val
#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 1) = __val
#define SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 2) = __val
#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)__ph2ccmd = __val
#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)__ph2ccmd = __val
#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 1) = __val
#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 2) = __val
void rtl8723e_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
u32 cmd_len, u8 *p_cmdbuffer);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
index 5702ac6deebf..ea86d5bf33d2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
@@ -11,7 +11,6 @@
#include "fw.h"
#include "../rtl8723com/fw_common.h"
#include "hw.h"
-#include "sw.h"
#include "trx.h"
#include "led.h"
#include "table.h"
@@ -67,7 +66,7 @@ static void rtl8723e_init_aspm_vars(struct ieee80211_hw *hw)
rtlpci->const_support_pciaspm = rtlpriv->cfg->mod_params->aspm_support;
}
-int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
+static int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -166,7 +165,7 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
return 0;
}
-void rtl8723e_deinit_sw_vars(struct ieee80211_hw *hw)
+static void rtl8723e_deinit_sw_vars(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -177,7 +176,7 @@ void rtl8723e_deinit_sw_vars(struct ieee80211_hw *hw)
}
/* get bt coexist status */
-bool rtl8723e_get_btc_status(void)
+static bool rtl8723e_get_btc_status(void)
{
return true;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.h
deleted file mode 100644
index 200ce39a0dd7..000000000000
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2009-2012 Realtek Corporation.*/
-
-#ifndef __RTL8723E_SW_H__
-#define __RTL8723E_SW_H__
-
-int rtl8723e_init_sw_vars(struct ieee80211_hw *hw);
-void rtl8723e_deinit_sw_vars(struct ieee80211_hw *hw);
-void rtl8723e_init_var_map(struct ieee80211_hw *hw);
-bool rtl8723e_get_btc_status(void);
-
-
-#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h
index 97ea77464139..7c5e5e916274 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h
@@ -83,37 +83,35 @@ enum rtl8723b_h2c_cmd {
#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)__ph2ccmd = __val
#define SET_H2CCMD_PWRMODE_PARM_RLBM(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 4, __val)
+ u8p_replace_bits(__ph2ccmd + 1, __val, GENMASK(3, 0))
#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 4, 4, __val)
+ u8p_replace_bits(__ph2ccmd + 1, __val, GENMASK(7, 4))
#define SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 2) = __val
#define SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+3, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 3) = __val
#define SET_H2CCMD_PWRMODE_PARM_PWR_STATE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+4, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 4) = __val
#define SET_H2CCMD_PWRMODE_PARM_BYTE5(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 5, 0, 8, __val)
-#define GET_88E_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd) \
- LE_BITS_TO_1BYTE(__ph2ccmd, 0, 8)
+ *(u8 *)(__ph2ccmd + 5) = __val
#define SET_H2CCMD_MSRRPT_PARM_OPMODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 1, __val)
+ u8p_replace_bits(__ph2ccmd, __val, BIT(0))
#define SET_H2CCMD_MSRRPT_PARM_MACID_IND(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 1, 1, __val)
+ u8p_replace_bits(__ph2ccmd, __val, BIT(1))
#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)(__ph2ccmd) = __val
#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 1) = __val
#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 2) = __val
#define SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 3, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 3) = __val
#define SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 4, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 4) = __val
void rtl8723be_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
index 3c8528f0ecb3..36209ac5b208 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
@@ -13,7 +13,6 @@
#include "hw.h"
#include "fw.h"
#include "../rtl8723com/fw_common.h"
-#include "sw.h"
#include "trx.h"
#include "led.h"
#include "table.h"
@@ -64,7 +63,7 @@ static void rtl8723be_init_aspm_vars(struct ieee80211_hw *hw)
rtlpci->const_support_pciaspm = rtlpriv->cfg->mod_params->aspm_support;
}
-int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
+static int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
{
int err = 0;
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -170,7 +169,7 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
return 0;
}
-void rtl8723be_deinit_sw_vars(struct ieee80211_hw *hw)
+static void rtl8723be_deinit_sw_vars(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -181,7 +180,7 @@ void rtl8723be_deinit_sw_vars(struct ieee80211_hw *hw)
}
/* get bt coexist status */
-bool rtl8723be_get_btc_status(void)
+static bool rtl8723be_get_btc_status(void)
{
return true;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.h
deleted file mode 100644
index 6ecacf9fbfd7..000000000000
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2009-2014 Realtek Corporation.*/
-
-#ifndef __RTL8723BE_SW_H__
-#define __RTL8723BE_SW_H__
-
-int rtl8723be_init_sw_vars(struct ieee80211_hw *hw);
-void rtl8723be_deinit_sw_vars(struct ieee80211_hw *hw);
-void rtl8723be_init_var_map(struct ieee80211_hw *hw);
-bool rtl8723be_get_btc_status(void);
-
-
-#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
index b54230433a6b..f57e8794f0ec 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
@@ -93,124 +93,6 @@ static const u32 rtl8821ae_txscaling_table[TXSCALE_TABLE_SIZE] = {
0x3FE /* 36, +6.0dB */
};
-static const u32 ofdmswing_table[] = {
- 0x0b40002d, /* 0, -15.0dB */
- 0x0c000030, /* 1, -14.5dB */
- 0x0cc00033, /* 2, -14.0dB */
- 0x0d800036, /* 3, -13.5dB */
- 0x0e400039, /* 4, -13.0dB */
- 0x0f00003c, /* 5, -12.5dB */
- 0x10000040, /* 6, -12.0dB */
- 0x11000044, /* 7, -11.5dB */
- 0x12000048, /* 8, -11.0dB */
- 0x1300004c, /* 9, -10.5dB */
- 0x14400051, /* 10, -10.0dB */
- 0x15800056, /* 11, -9.5dB */
- 0x16c0005b, /* 12, -9.0dB */
- 0x18000060, /* 13, -8.5dB */
- 0x19800066, /* 14, -8.0dB */
- 0x1b00006c, /* 15, -7.5dB */
- 0x1c800072, /* 16, -7.0dB */
- 0x1e400079, /* 17, -6.5dB */
- 0x20000080, /* 18, -6.0dB */
- 0x22000088, /* 19, -5.5dB */
- 0x24000090, /* 20, -5.0dB */
- 0x26000098, /* 21, -4.5dB */
- 0x288000a2, /* 22, -4.0dB */
- 0x2ac000ab, /* 23, -3.5dB */
- 0x2d4000b5, /* 24, -3.0dB */
- 0x300000c0, /* 25, -2.5dB */
- 0x32c000cb, /* 26, -2.0dB */
- 0x35c000d7, /* 27, -1.5dB */
- 0x390000e4, /* 28, -1.0dB */
- 0x3c8000f2, /* 29, -0.5dB */
- 0x40000100, /* 30, +0dB */
- 0x43c0010f, /* 31, +0.5dB */
- 0x47c0011f, /* 32, +1.0dB */
- 0x4c000130, /* 33, +1.5dB */
- 0x50800142, /* 34, +2.0dB */
- 0x55400155, /* 35, +2.5dB */
- 0x5a400169, /* 36, +3.0dB */
- 0x5fc0017f, /* 37, +3.5dB */
- 0x65400195, /* 38, +4.0dB */
- 0x6b8001ae, /* 39, +4.5dB */
- 0x71c001c7, /* 40, +5.0dB */
- 0x788001e2, /* 41, +5.5dB */
- 0x7f8001fe /* 42, +6.0dB */
-};
-
-static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
- {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01}, /* 0, -16.0dB */
- {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 1, -15.5dB */
- {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 2, -15.0dB */
- {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 3, -14.5dB */
- {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 4, -14.0dB */
- {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 5, -13.5dB */
- {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /* 6, -13.0dB */
- {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /* 7, -12.5dB */
- {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /* 8, -12.0dB */
- {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /* 9, -11.5dB */
- {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 10, -11.0dB */
- {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 11, -10.5dB */
- {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 12, -10.0dB */
- {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 13, -9.5dB */
- {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 14, -9.0dB */
- {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 15, -8.5dB */
- {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */
- {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 17, -7.5dB */
- {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 18, -7.0dB */
- {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 19, -6.5dB */
- {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 20, -6.0dB */
- {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 21, -5.5dB */
- {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 22, -5.0dB */
- {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 23, -4.5dB */
- {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 24, -4.0dB */
- {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 25, -3.5dB */
- {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 26, -3.0dB */
- {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 27, -2.5dB */
- {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 28, -2.0dB */
- {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 29, -1.5dB */
- {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 30, -1.0dB */
- {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 31, -0.5dB */
- {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04} /* 32, +0dB */
-};
-
-static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
- {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}, /* 0, -16.0dB */
- {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 1, -15.5dB */
- {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 2, -15.0dB */
- {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 3, -14.5dB */
- {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 4, -14.0dB */
- {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 5, -13.5dB */
- {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 6, -13.0dB */
- {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 7, -12.5dB */
- {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 8, -12.0dB */
- {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 9, -11.5dB */
- {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 10, -11.0dB */
- {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 11, -10.5dB */
- {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 12, -10.0dB */
- {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 13, -9.5dB */
- {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 14, -9.0dB */
- {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 15, -8.5dB */
- {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 16, -8.0dB */
- {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 17, -7.5dB */
- {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 18, -7.0dB */
- {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, /* 19, -6.5dB */
- {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 20, -6.0dB */
- {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 21, -5.5dB */
- {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 22, -5.0dB */
- {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, /* 23, -4.5dB */
- {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 24, -4.0dB */
- {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, /* 25, -3.5dB */
- {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 26, -3.0dB */
- {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, /* 27, -2.5dB */
- {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 28, -2.0dB */
- {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, /* 29, -1.5dB */
- {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 30, -1.0dB */
- {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, /* 31, -0.5dB */
- {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00} /* 32, +0dB */
-};
-
static const u32 edca_setting_dl[PEER_MAX] = {
0xa44f, /* 0 UNKNOWN */
0x5ea44f, /* 1 REALTEK_90 */
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
index e11e496b7277..c269942b3f4a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
@@ -151,123 +151,115 @@ enum rtl8821a_h2c_cmd {
#define pagenum_128(_len) (u32)(((_len)>>7) + ((_len)&0x7F ? 1 : 0))
#define SET_8812_H2CCMD_WOWLAN_FUNC_ENABLE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(0))
#define SET_8812_H2CCMD_WOWLAN_PATTERN_MATCH_ENABLE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 1, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(1))
#define SET_8812_H2CCMD_WOWLAN_MAGIC_PKT_ENABLE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 2, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(2))
#define SET_8812_H2CCMD_WOWLAN_UNICAST_PKT_ENABLE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 3, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(3))
#define SET_8812_H2CCMD_WOWLAN_ALL_PKT_DROP(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 4, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(4))
#define SET_8812_H2CCMD_WOWLAN_GPIO_ACTIVE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 5, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(5))
#define SET_8812_H2CCMD_WOWLAN_REKEY_WAKE_UP(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 6, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(6))
#define SET_8812_H2CCMD_WOWLAN_DISCONNECT_WAKE_UP(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 7, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(7))
#define SET_8812_H2CCMD_WOWLAN_GPIONUM(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd) + 1, 0, 8, __value)
+ *(u8 *)(__cmd + 1) = __value
#define SET_8812_H2CCMD_WOWLAN_GPIO_DURATION(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd) + 2, 0, 8, __value)
+ *(u8 *)(__cmd + 2) = __value
#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+ *(u8 *)__ph2ccmd = __val
#define SET_H2CCMD_PWRMODE_PARM_RLBM(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 4, __value)
+ u8p_replace_bits(__cmd + 1, __value, GENMASK(3, 0))
#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 4, 4, __value)
+ u8p_replace_bits(__cmd + 1, __value, GENMASK(7, 4))
#define SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value)
+ *(u8 *)(__cmd + 2) = __value
#define SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+3, 0, 8, __value)
+ *(u8 *)(__cmd + 3) = __value
#define SET_H2CCMD_PWRMODE_PARM_PWR_STATE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+4, 0, 8, __value)
+ *(u8 *)(__cmd + 4) = __value
#define SET_H2CCMD_PWRMODE_PARM_BYTE5(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd) + 5, 0, 8, __value)
-#define GET_8821AE_H2CCMD_PWRMODE_PARM_MODE(__cmd) \
- LE_BITS_TO_1BYTE(__cmd, 0, 8)
+ *(u8 *)(__cmd + 5) = __value
-#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 1) = __val
#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 2) = __val
#define SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd)+3, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 3) = __val
#define SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(__ph2ccmd, __val) \
- SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 4, 0, 8, __val)
+ *(u8 *)(__ph2ccmd + 4) = __val
/* _MEDIA_STATUS_RPT_PARM_CMD1 */
#define SET_H2CCMD_MSRRPT_PARM_OPMODE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 1, __value)
+ u8p_replace_bits(__cmd + 1, __value, BIT(0))
#define SET_H2CCMD_MSRRPT_PARM_MACID_IND(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 1, 1, __value)
-#define SET_H2CCMD_MSRRPT_PARM_MACID(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd+1, 0, 8, __value)
-#define SET_H2CCMD_MSRRPT_PARM_MACID_END(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd+2, 0, 8, __value)
+ u8p_replace_bits(__cmd + 1, __value, BIT(1))
/* AP_OFFLOAD */
#define SET_H2CCMD_AP_OFFLOAD_ON(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 8, __value)
+ *(u8 *)__cmd = __value
#define SET_H2CCMD_AP_OFFLOAD_HIDDEN(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value)
+ *(u8 *)(__cmd + 1) = __value
#define SET_H2CCMD_AP_OFFLOAD_DENYANY(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value)
+ *(u8 *)(__cmd + 2) = __value
#define SET_H2CCMD_AP_OFFLOAD_WAKEUP_EVT_RPT(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+3, 0, 8, __value)
+ *(u8 *)(__cmd + 3) = __value
/* Keep Alive Control*/
#define SET_8812_H2CCMD_KEEP_ALIVE_ENABLE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(0))
#define SET_8812_H2CCMD_KEEP_ALIVE_ACCPEPT_USER_DEFINED(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 1, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(1))
#define SET_8812_H2CCMD_KEEP_ALIVE_PERIOD(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value)
+ *(u8 *)(__cmd + 1) = __value
/*REMOTE_WAKE_CTRL */
#define SET_8812_H2CCMD_REMOTE_WAKECTRL_ENABLE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(0))
#define SET_8812_H2CCMD_REMOTE_WAKE_CTRL_ARP_OFFLOAD_EN(__cmd, __value)\
- SET_BITS_TO_LE_1BYTE(__cmd, 1, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(1))
#define SET_8812_H2CCMD_REMOTE_WAKE_CTRL_NDP_OFFLOAD_EN(__cmd, __value)\
- SET_BITS_TO_LE_1BYTE(__cmd, 2, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(2))
#define SET_8812_H2CCMD_REMOTE_WAKE_CTRL_GTK_OFFLOAD_EN(__cmd, __value)\
- SET_BITS_TO_LE_1BYTE(__cmd, 3, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(3))
#define SET_8812_H2CCMD_REMOTE_WAKE_CTRL_REALWOWV2_EN(__cmd, __value)\
- SET_BITS_TO_LE_1BYTE(__cmd, 6, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(6))
/* GTK_OFFLOAD */
#define SET_8812_H2CCMD_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(__cmd, __value)\
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 8, __value)
+ *(u8 *)__cmd = __value
#define SET_8812_H2CCMD_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value)
+ *(u8 *)(__cmd + 1) = __value
/* AOAC_RSVDPAGE_LOC */
#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_REMOTE_WAKE_CTRL_INFO(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd), 0, 8, __value)
+ *(u8 *)__cmd = __value
#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value)
+ *(u8 *)(__cmd + 1) = __value
#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_NEIGHBOR_ADV(__cmd, __value)\
- SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value)
+ *(u8 *)(__cmd + 2) = __value
#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_RSP(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+3, 0, 8, __value)
+ *(u8 *)(__cmd + 3) = __value
#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_INFO(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+4, 0, 8, __value)
+ *(u8 *)(__cmd + 4) = __value
#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_EXT_MEM(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE((__cmd)+5, 0, 8, __value)
+ *(u8 *)(__cmd + 5) = __value
/* Disconnect_Decision_Control */
#define SET_8812_H2CCMD_DISCONNECT_DECISION_CTRL_ENABLE(__cmd, __value) \
- SET_BITS_TO_LE_1BYTE(__cmd, 0, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(0))
#define SET_8812_H2CCMD_DISCONNECT_DECISION_CTRL_USER_SETTING(__cmd, __value)\
- SET_BITS_TO_LE_1BYTE(__cmd, 1, 1, __value)
+ u8p_replace_bits(__cmd, __value, BIT(1))
#define SET_8812_H2CCMD_DISCONNECT_DECISION_CTRL_CHECK_PERIOD(__cmd, __value)\
- SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value) /* unit: beacon period */
+ *(u8 *)(__cmd + 1) = __value
#define SET_8812_H2CCMD_DISCONNECT_DECISION_CTRL_TRYPKT_NUM(__cmd, __value)\
- SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value)
+ *(u8 *)(__cmd + 2) = __value
int rtl8821ae_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw);
#if (USE_SPECIFIC_FW_TO_SUPPORT_WOWLAN == 1)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
index 3def6a2b3450..d8df816753cb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
@@ -10,7 +10,6 @@
#include "dm.h"
#include "hw.h"
#include "fw.h"
-#include "sw.h"
#include "trx.h"
#include "led.h"
#include "table.h"
@@ -65,7 +64,7 @@ static void rtl8821ae_init_aspm_vars(struct ieee80211_hw *hw)
}
/*InitializeVariables8812E*/
-int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
+static int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
{
int err = 0;
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -211,7 +210,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
return 0;
}
-void rtl8821ae_deinit_sw_vars(struct ieee80211_hw *hw)
+static void rtl8821ae_deinit_sw_vars(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -228,7 +227,7 @@ void rtl8821ae_deinit_sw_vars(struct ieee80211_hw *hw)
}
/* get bt coexist status */
-bool rtl8821ae_get_btc_status(void)
+static bool rtl8821ae_get_btc_status(void)
{
return true;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.h
deleted file mode 100644
index 9d7610f84b20..000000000000
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2009-2010 Realtek Corporation.*/
-
-#ifndef __RTL8821AE_SW_H__
-#define __RTL8821AE_SW_H__
-
-int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw);
-void rtl8821ae_deinit_sw_vars(struct ieee80211_hw *hw);
-void rtl8821ae_init_var_map(struct ieee80211_hw *hw);
-bool rtl8821ae_get_btc_status(void);
-
-#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 3bdda1c98339..1cff9f07c9e9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -2898,121 +2898,6 @@ enum bt_radio_shared {
* 2. Before write integer to IO.
* 3. After read integer from IO.
****************************************/
-/* Convert little data endian to host ordering */
-#define EF1BYTE(_val) \
- ((u8)(_val))
-#define EF2BYTE(_val) \
- (le16_to_cpu(_val))
-#define EF4BYTE(_val) \
- (le32_to_cpu(_val))
-
-/* Read data from memory */
-#define READEF1BYTE(_ptr) \
- EF1BYTE(*((u8 *)(_ptr)))
-/* Read le16 data from memory and convert to host ordering */
-#define READEF2BYTE(_ptr) \
- EF2BYTE(*(_ptr))
-#define READEF4BYTE(_ptr) \
- EF4BYTE(*(_ptr))
-
-/* Create a bit mask
- * Examples:
- * BIT_LEN_MASK_32(0) => 0x00000000
- * BIT_LEN_MASK_32(1) => 0x00000001
- * BIT_LEN_MASK_32(2) => 0x00000003
- * BIT_LEN_MASK_32(32) => 0xFFFFFFFF
- */
-#define BIT_LEN_MASK_32(__bitlen) \
- (0xFFFFFFFF >> (32 - (__bitlen)))
-#define BIT_LEN_MASK_16(__bitlen) \
- (0xFFFF >> (16 - (__bitlen)))
-#define BIT_LEN_MASK_8(__bitlen) \
- (0xFF >> (8 - (__bitlen)))
-
-/* Create an offset bit mask
- * Examples:
- * BIT_OFFSET_LEN_MASK_32(0, 2) => 0x00000003
- * BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000
- */
-#define BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen) \
- (BIT_LEN_MASK_32(__bitlen) << (__bitoffset))
-#define BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen) \
- (BIT_LEN_MASK_16(__bitlen) << (__bitoffset))
-#define BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen) \
- (BIT_LEN_MASK_8(__bitlen) << (__bitoffset))
-
-/*Description:
- * Return 4-byte value in host byte ordering from
- * 4-byte pointer in little-endian system.
- */
-#define LE_P4BYTE_TO_HOST_4BYTE(__pstart) \
- (EF4BYTE(*((__le32 *)(__pstart))))
-#define LE_P2BYTE_TO_HOST_2BYTE(__pstart) \
- (EF2BYTE(*((__le16 *)(__pstart))))
-#define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \
- (EF1BYTE(*((u8 *)(__pstart))))
-
-/*Description:
- * Translate subfield (continuous bits in little-endian) of 4-byte
- * value to host byte ordering.
- */
-#define LE_BITS_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
- ( \
- (LE_P4BYTE_TO_HOST_4BYTE(__pstart) >> (__bitoffset)) & \
- BIT_LEN_MASK_32(__bitlen) \
- )
-#define LE_BITS_TO_2BYTE(__pstart, __bitoffset, __bitlen) \
- ( \
- (LE_P2BYTE_TO_HOST_2BYTE(__pstart) >> (__bitoffset)) & \
- BIT_LEN_MASK_16(__bitlen) \
- )
-#define LE_BITS_TO_1BYTE(__pstart, __bitoffset, __bitlen) \
- ( \
- (LE_P1BYTE_TO_HOST_1BYTE(__pstart) >> (__bitoffset)) & \
- BIT_LEN_MASK_8(__bitlen) \
- )
-
-/* Description:
- * Mask subfield (continuous bits in little-endian) of 4-byte value
- * and return the result in 4-byte value in host byte ordering.
- */
-#define LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
- ( \
- LE_P4BYTE_TO_HOST_4BYTE(__pstart) & \
- (~BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen)) \
- )
-#define LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) \
- ( \
- LE_P2BYTE_TO_HOST_2BYTE(__pstart) & \
- (~BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen)) \
- )
-#define LE_BITS_CLEARED_TO_1BYTE(__pstart, __bitoffset, __bitlen) \
- ( \
- LE_P1BYTE_TO_HOST_1BYTE(__pstart) & \
- (~BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen)) \
- )
-
-/* Description:
- * Set subfield of little-endian 4-byte value to specified value.
- */
-#define SET_BITS_TO_LE_4BYTE(__pstart, __bitoffset, __bitlen, __val) \
- *((__le32 *)(__pstart)) = \
- cpu_to_le32( \
- LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) | \
- ((((u32)__val) & BIT_LEN_MASK_32(__bitlen)) << (__bitoffset)) \
- )
-#define SET_BITS_TO_LE_2BYTE(__pstart, __bitoffset, __bitlen, __val) \
- *((__le16 *)(__pstart)) = \
- cpu_to_le16( \
- LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) | \
- ((((u16)__val) & BIT_LEN_MASK_16(__bitlen)) << (__bitoffset)) \
- )
-#define SET_BITS_TO_LE_1BYTE(__pstart, __bitoffset, __bitlen, __val) \
- *((u8 *)(__pstart)) = EF1BYTE \
- ( \
- LE_BITS_CLEARED_TO_1BYTE(__pstart, __bitoffset, __bitlen) | \
- ((((u8)__val) & BIT_LEN_MASK_8(__bitlen)) << (__bitoffset)) \
- )
#define N_BYTE_ALIGMENT(__value, __aligment) ((__aligment == 1) ? \
(__value) : (((__value + __aligment - 1) / __aligment) * __aligment))
diff --git a/drivers/net/wireless/realtek/rtw88/Makefile b/drivers/net/wireless/realtek/rtw88/Makefile
index 15e12155a04c..cac148d13cf1 100644
--- a/drivers/net/wireless/realtek/rtw88/Makefile
+++ b/drivers/net/wireless/realtek/rtw88/Makefile
@@ -15,6 +15,7 @@ rtw88-y += main.o \
ps.o \
sec.o \
bf.o \
+ wow.o \
regd.o
rtw88-$(CONFIG_RTW88_8822BE) += rtw8822b.o rtw8822b_table.o
diff --git a/drivers/net/wireless/realtek/rtw88/debug.h b/drivers/net/wireless/realtek/rtw88/debug.h
index cd28f675e9cb..a0f36f29b4a6 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.h
+++ b/drivers/net/wireless/realtek/rtw88/debug.h
@@ -18,6 +18,7 @@ enum rtw_debug_mask {
RTW_DBG_DEBUGFS = 0x00000200,
RTW_DBG_PS = 0x00000400,
RTW_DBG_BF = 0x00000800,
+ RTW_DBG_WOW = 0x00001000,
RTW_DBG_ALL = 0xffffffff
};
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index b8c581161f61..243441453ead 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -10,6 +10,7 @@
#include "sec.h"
#include "debug.h"
#include "util.h"
+#include "wow.h"
static void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev,
struct sk_buff *skb)
@@ -482,6 +483,96 @@ void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev)
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
}
+void rtw_fw_set_keep_alive_cmd(struct rtw_dev *rtwdev, bool enable)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+ struct rtw_fw_wow_keep_alive_para mode = {
+ .adopt = true,
+ .pkt_type = KEEP_ALIVE_NULL_PKT,
+ .period = 5,
+ };
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_KEEP_ALIVE);
+ SET_KEEP_ALIVE_ENABLE(h2c_pkt, enable);
+ SET_KEEP_ALIVE_ADOPT(h2c_pkt, mode.adopt);
+ SET_KEEP_ALIVE_PKT_TYPE(h2c_pkt, mode.pkt_type);
+ SET_KEEP_ALIVE_CHECK_PERIOD(h2c_pkt, mode.period);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_set_disconnect_decision_cmd(struct rtw_dev *rtwdev, bool enable)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+ struct rtw_fw_wow_disconnect_para mode = {
+ .adopt = true,
+ .period = 30,
+ .retry_count = 5,
+ };
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_DISCONNECT_DECISION);
+
+ if (test_bit(RTW_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) {
+ SET_DISCONNECT_DECISION_ENABLE(h2c_pkt, enable);
+ SET_DISCONNECT_DECISION_ADOPT(h2c_pkt, mode.adopt);
+ SET_DISCONNECT_DECISION_CHECK_PERIOD(h2c_pkt, mode.period);
+ SET_DISCONNECT_DECISION_TRY_PKT_NUM(h2c_pkt, mode.retry_count);
+ }
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_set_wowlan_ctrl_cmd(struct rtw_dev *rtwdev, bool enable)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WOWLAN);
+
+ SET_WOWLAN_FUNC_ENABLE(h2c_pkt, enable);
+ if (rtw_wow_mgd_linked(rtwdev)) {
+ if (test_bit(RTW_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags))
+ SET_WOWLAN_MAGIC_PKT_ENABLE(h2c_pkt, enable);
+ if (test_bit(RTW_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags))
+ SET_WOWLAN_DEAUTH_WAKEUP_ENABLE(h2c_pkt, enable);
+ if (test_bit(RTW_WOW_FLAG_EN_REKEY_PKT, rtw_wow->flags))
+ SET_WOWLAN_REKEY_WAKEUP_ENABLE(h2c_pkt, enable);
+ if (rtw_wow->pattern_cnt)
+ SET_WOWLAN_PATTERN_MATCH_ENABLE(h2c_pkt, enable);
+ }
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_set_aoac_global_info_cmd(struct rtw_dev *rtwdev,
+ u8 pairwise_key_enc,
+ u8 group_key_enc)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_AOAC_GLOBAL_INFO);
+
+ SET_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(h2c_pkt, pairwise_key_enc);
+ SET_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(h2c_pkt, group_key_enc);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_set_remote_wake_ctrl_cmd(struct rtw_dev *rtwdev, bool enable)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_REMOTE_WAKE_CTRL);
+
+ SET_REMOTE_WAKECTRL_ENABLE(h2c_pkt, enable);
+
+ if (rtw_wow_no_link(rtwdev))
+ SET_REMOTE_WAKE_CTRL_NLO_OFFLOAD_EN(h2c_pkt, enable);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
static u8 rtw_get_rsvd_page_location(struct rtw_dev *rtwdev,
enum rtw_rsvd_packet_type type)
{
@@ -496,6 +587,26 @@ static u8 rtw_get_rsvd_page_location(struct rtw_dev *rtwdev,
return location;
}
+void rtw_fw_set_nlo_info(struct rtw_dev *rtwdev, bool enable)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+ u8 loc_nlo;
+
+ loc_nlo = rtw_get_rsvd_page_location(rtwdev, RSVD_NLO_INFO);
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_NLO_INFO);
+
+ SET_NLO_FUN_EN(h2c_pkt, enable);
+ if (enable) {
+ if (rtw_fw_lps_deep_mode)
+ SET_NLO_PS_32K(h2c_pkt, enable);
+ SET_NLO_IGNORE_SECURITY(h2c_pkt, enable);
+ SET_NLO_LOC_NLO_INFO(h2c_pkt, loc_nlo);
+ }
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
void rtw_fw_set_pg_info(struct rtw_dev *rtwdev)
{
struct rtw_lps_conf *conf = &rtwdev->lps_conf;
@@ -510,10 +621,45 @@ void rtw_fw_set_pg_info(struct rtw_dev *rtwdev)
LPS_PG_INFO_LOC(h2c_pkt, loc_pg);
LPS_PG_DPK_LOC(h2c_pkt, loc_dpk);
LPS_PG_SEC_CAM_EN(h2c_pkt, conf->sec_cam_backup);
+ LPS_PG_PATTERN_CAM_EN(h2c_pkt, conf->pattern_cam_backup);
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
}
+u8 rtw_get_rsvd_page_probe_req_location(struct rtw_dev *rtwdev,
+ struct cfg80211_ssid *ssid)
+{
+ struct rtw_rsvd_page *rsvd_pkt;
+ u8 location = 0;
+
+ list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
+ if (rsvd_pkt->type != RSVD_PROBE_REQ)
+ continue;
+ if ((!ssid && !rsvd_pkt->ssid) ||
+ rtw_ssid_equal(rsvd_pkt->ssid, ssid))
+ location = rsvd_pkt->page;
+ }
+
+ return location;
+}
+
+u16 rtw_get_rsvd_page_probe_req_size(struct rtw_dev *rtwdev,
+ struct cfg80211_ssid *ssid)
+{
+ struct rtw_rsvd_page *rsvd_pkt;
+ u16 size = 0;
+
+ list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
+ if (rsvd_pkt->type != RSVD_PROBE_REQ)
+ continue;
+ if ((!ssid && !rsvd_pkt->ssid) ||
+ rtw_ssid_equal(rsvd_pkt->ssid, ssid))
+ size = rsvd_pkt->skb->len;
+ }
+
+ return size;
+}
+
void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev)
{
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
@@ -559,6 +705,95 @@ rtw_beacon_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
return skb_new;
}
+static struct sk_buff *rtw_nlo_info_get(struct ieee80211_hw *hw)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_pno_request *pno_req = &rtwdev->wow.pno_req;
+ struct rtw_nlo_info_hdr *nlo_hdr;
+ struct cfg80211_ssid *ssid;
+ struct sk_buff *skb;
+ u8 *pos, loc;
+ u32 size;
+ int i;
+
+ if (!pno_req->inited || !pno_req->match_set_cnt)
+ return NULL;
+
+ size = sizeof(struct rtw_nlo_info_hdr) + pno_req->match_set_cnt *
+ IEEE80211_MAX_SSID_LEN + chip->tx_pkt_desc_sz;
+
+ skb = alloc_skb(size, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, chip->tx_pkt_desc_sz);
+
+ nlo_hdr = skb_put_zero(skb, sizeof(struct rtw_nlo_info_hdr));
+
+ nlo_hdr->nlo_count = pno_req->match_set_cnt;
+ nlo_hdr->hidden_ap_count = pno_req->match_set_cnt;
+
+ /* pattern check for firmware */
+ memset(nlo_hdr->pattern_check, 0xA5, FW_NLO_INFO_CHECK_SIZE);
+
+ for (i = 0; i < pno_req->match_set_cnt; i++)
+ nlo_hdr->ssid_len[i] = pno_req->match_sets[i].ssid.ssid_len;
+
+ for (i = 0; i < pno_req->match_set_cnt; i++) {
+ ssid = &pno_req->match_sets[i].ssid;
+ loc = rtw_get_rsvd_page_probe_req_location(rtwdev, ssid);
+ if (!loc) {
+ rtw_err(rtwdev, "failed to get probe req rsvd loc\n");
+ kfree(skb);
+ return NULL;
+ }
+ nlo_hdr->location[i] = loc;
+ }
+
+ for (i = 0; i < pno_req->match_set_cnt; i++) {
+ pos = skb_put_zero(skb, IEEE80211_MAX_SSID_LEN);
+ memcpy(pos, pno_req->match_sets[i].ssid.ssid,
+ pno_req->match_sets[i].ssid.ssid_len);
+ }
+
+ return skb;
+}
+
+static struct sk_buff *rtw_cs_channel_info_get(struct ieee80211_hw *hw)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_pno_request *pno_req = &rtwdev->wow.pno_req;
+ struct ieee80211_channel *channels = pno_req->channels;
+ struct sk_buff *skb;
+ int count = pno_req->channel_cnt;
+ u8 *pos;
+ int i = 0;
+
+ skb = alloc_skb(4 * count + chip->tx_pkt_desc_sz, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, chip->tx_pkt_desc_sz);
+
+ for (i = 0; i < count; i++) {
+ pos = skb_put_zero(skb, 4);
+
+ CHSW_INFO_SET_CH(pos, channels[i].hw_value);
+
+ if (channels[i].flags & IEEE80211_CHAN_RADAR)
+ CHSW_INFO_SET_ACTION_ID(pos, 0);
+ else
+ CHSW_INFO_SET_ACTION_ID(pos, 1);
+ CHSW_INFO_SET_TIMEOUT(pos, 1);
+ CHSW_INFO_SET_PRI_CH_IDX(pos, 1);
+ CHSW_INFO_SET_BW(pos, 0);
+ }
+
+ return skb;
+}
+
static struct sk_buff *rtw_lps_pg_dpk_get(struct ieee80211_hw *hw)
{
struct rtw_dev *rtwdev = hw->priv;
@@ -591,6 +826,7 @@ static struct sk_buff *rtw_lps_pg_info_get(struct ieee80211_hw *hw,
struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_lps_conf *conf = &rtwdev->lps_conf;
struct rtw_lps_pg_info_hdr *pg_info_hdr;
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
struct sk_buff *skb;
u32 size;
@@ -605,19 +841,22 @@ static struct sk_buff *rtw_lps_pg_info_get(struct ieee80211_hw *hw,
pg_info_hdr->macid = find_first_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM);
pg_info_hdr->sec_cam_count =
rtw_sec_cam_pg_backup(rtwdev, pg_info_hdr->sec_cam);
+ pg_info_hdr->pattern_count = rtw_wow->pattern_cnt;
conf->sec_cam_backup = pg_info_hdr->sec_cam_count != 0;
+ conf->pattern_cam_backup = rtw_wow->pattern_cnt != 0;
return skb;
}
static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum rtw_rsvd_packet_type type)
+ struct rtw_rsvd_page *rsvd_pkt)
{
struct sk_buff *skb_new;
+ struct cfg80211_ssid *ssid;
- switch (type) {
+ switch (rsvd_pkt->type) {
case RSVD_BEACON:
skb_new = rtw_beacon_get(hw, vif);
break;
@@ -639,6 +878,21 @@ static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
case RSVD_LPS_PG_INFO:
skb_new = rtw_lps_pg_info_get(hw, vif);
break;
+ case RSVD_PROBE_REQ:
+ ssid = (struct cfg80211_ssid *)rsvd_pkt->ssid;
+ if (ssid)
+ skb_new = ieee80211_probereq_get(hw, vif->addr,
+ ssid->ssid,
+ ssid->ssid_len, 0);
+ else
+ skb_new = ieee80211_probereq_get(hw, vif->addr, NULL, 0, 0);
+ break;
+ case RSVD_NLO_INFO:
+ skb_new = rtw_nlo_info_get(hw);
+ break;
+ case RSVD_CH_INFO:
+ skb_new = rtw_cs_channel_info_get(hw);
+ break;
default:
return NULL;
}
@@ -680,25 +934,53 @@ static void rtw_rsvd_page_list_to_buf(struct rtw_dev *rtwdev, u8 page_size,
memcpy(buf, skb->data, skb->len);
}
+static struct rtw_rsvd_page *rtw_alloc_rsvd_page(struct rtw_dev *rtwdev,
+ enum rtw_rsvd_packet_type type,
+ bool txdesc)
+{
+ struct rtw_rsvd_page *rsvd_pkt = NULL;
+
+ rsvd_pkt = kzalloc(sizeof(*rsvd_pkt), GFP_KERNEL);
+
+ if (!rsvd_pkt)
+ return NULL;
+
+ rsvd_pkt->type = type;
+ rsvd_pkt->add_txdesc = txdesc;
+
+ return rsvd_pkt;
+}
+
+static void rtw_insert_rsvd_page(struct rtw_dev *rtwdev,
+ struct rtw_rsvd_page *rsvd_pkt)
+{
+ lockdep_assert_held(&rtwdev->mutex);
+ list_add_tail(&rsvd_pkt->list, &rtwdev->rsvd_page_list);
+}
+
void rtw_add_rsvd_page(struct rtw_dev *rtwdev, enum rtw_rsvd_packet_type type,
bool txdesc)
{
struct rtw_rsvd_page *rsvd_pkt;
- lockdep_assert_held(&rtwdev->mutex);
+ rsvd_pkt = rtw_alloc_rsvd_page(rtwdev, type, txdesc);
+ if (!rsvd_pkt)
+ return;
- list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
- if (rsvd_pkt->type == type)
- return;
- }
+ rtw_insert_rsvd_page(rtwdev, rsvd_pkt);
+}
- rsvd_pkt = kmalloc(sizeof(*rsvd_pkt), GFP_KERNEL);
+void rtw_add_rsvd_page_probe_req(struct rtw_dev *rtwdev,
+ struct cfg80211_ssid *ssid)
+{
+ struct rtw_rsvd_page *rsvd_pkt;
+
+ rsvd_pkt = rtw_alloc_rsvd_page(rtwdev, RSVD_PROBE_REQ, true);
if (!rsvd_pkt)
return;
- rsvd_pkt->type = type;
- rsvd_pkt->add_txdesc = txdesc;
- list_add_tail(&rsvd_pkt->list, &rtwdev->rsvd_page_list);
+ rsvd_pkt->ssid = ssid;
+ rtw_insert_rsvd_page(rtwdev, rsvd_pkt);
}
void rtw_reset_rsvd_page(struct rtw_dev *rtwdev)
@@ -795,7 +1077,7 @@ static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev,
page_margin = page_size - tx_desc_sz;
list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
- iter = rtw_get_rsvd_page_skb(hw, vif, rsvd_pkt->type);
+ iter = rtw_get_rsvd_page_skb(hw, vif, rsvd_pkt);
if (!iter) {
rtw_err(rtwdev, "failed to build rsvd packet\n");
goto release_skb;
@@ -857,13 +1139,16 @@ static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev,
page += rtw_len_to_page(rsvd_pkt->skb->len, page_size);
kfree_skb(rsvd_pkt->skb);
+ rsvd_pkt->skb = NULL;
}
return buf;
release_skb:
- list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list)
+ list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
kfree_skb(rsvd_pkt->skb);
+ rsvd_pkt->skb = NULL;
+ }
return NULL;
}
@@ -973,3 +1258,81 @@ out:
rtw_write8(rtwdev, REG_RCR + 2, rcr);
return 0;
}
+
+static void __rtw_fw_update_pkt(struct rtw_dev *rtwdev, u8 pkt_id, u16 size,
+ u8 location)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+ u16 total_size = H2C_PKT_HDR_SIZE + H2C_PKT_UPDATE_PKT_LEN;
+
+ rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_UPDATE_PKT);
+
+ SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
+ UPDATE_PKT_SET_PKT_ID(h2c_pkt, pkt_id);
+ UPDATE_PKT_SET_LOCATION(h2c_pkt, location);
+
+ /* include txdesc size */
+ UPDATE_PKT_SET_SIZE(h2c_pkt, size);
+
+ rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_update_pkt_probe_req(struct rtw_dev *rtwdev,
+ struct cfg80211_ssid *ssid)
+{
+ u8 loc;
+ u32 size;
+
+ loc = rtw_get_rsvd_page_probe_req_location(rtwdev, ssid);
+ if (!loc) {
+ rtw_err(rtwdev, "failed to get probe_req rsvd loc\n");
+ return;
+ }
+
+ size = rtw_get_rsvd_page_probe_req_size(rtwdev, ssid);
+ if (!size) {
+ rtw_err(rtwdev, "failed to get probe_req rsvd size\n");
+ return;
+ }
+
+ __rtw_fw_update_pkt(rtwdev, RTW_PACKET_PROBE_REQ, size, loc);
+}
+
+void rtw_fw_channel_switch(struct rtw_dev *rtwdev, bool enable)
+{
+ struct rtw_pno_request *rtw_pno_req = &rtwdev->wow.pno_req;
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+ u16 total_size = H2C_PKT_HDR_SIZE + H2C_PKT_CH_SWITCH_LEN;
+ u8 loc_ch_info;
+ const struct rtw_ch_switch_option cs_option = {
+ .dest_ch_en = 1,
+ .dest_ch = 1,
+ .periodic_option = 2,
+ .normal_period = 5,
+ .normal_period_sel = 0,
+ .normal_cycle = 10,
+ .slow_period = 1,
+ .slow_period_sel = 1,
+ };
+
+ rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_CH_SWITCH);
+ SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
+
+ CH_SWITCH_SET_START(h2c_pkt, enable);
+ CH_SWITCH_SET_DEST_CH_EN(h2c_pkt, cs_option.dest_ch_en);
+ CH_SWITCH_SET_DEST_CH(h2c_pkt, cs_option.dest_ch);
+ CH_SWITCH_SET_NORMAL_PERIOD(h2c_pkt, cs_option.normal_period);
+ CH_SWITCH_SET_NORMAL_PERIOD_SEL(h2c_pkt, cs_option.normal_period_sel);
+ CH_SWITCH_SET_SLOW_PERIOD(h2c_pkt, cs_option.slow_period);
+ CH_SWITCH_SET_SLOW_PERIOD_SEL(h2c_pkt, cs_option.slow_period_sel);
+ CH_SWITCH_SET_NORMAL_CYCLE(h2c_pkt, cs_option.normal_cycle);
+ CH_SWITCH_SET_PERIODIC_OPT(h2c_pkt, cs_option.periodic_option);
+
+ CH_SWITCH_SET_CH_NUM(h2c_pkt, rtw_pno_req->channel_cnt);
+ CH_SWITCH_SET_INFO_SIZE(h2c_pkt, rtw_pno_req->channel_cnt * 4);
+
+ loc_ch_info = rtw_get_rsvd_page_location(rtwdev, RSVD_CH_INFO);
+ CH_SWITCH_SET_INFO_LOC(h2c_pkt, loc_ch_info);
+
+ rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
+}
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
index 73d1b9ca8efc..ccd27bd45775 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.h
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
@@ -12,6 +12,8 @@
#define FW_HDR_SIZE 64
#define FW_HDR_CHKSUM_SIZE 8
+#define FW_NLO_INFO_CHECK_SIZE 4
+
#define FIFO_PAGE_SIZE_SHIFT 12
#define FIFO_PAGE_SIZE 4096
#define RSVD_PAGE_START_ADDR 0x780
@@ -45,6 +47,9 @@ enum rtw_rsvd_packet_type {
RSVD_QOS_NULL,
RSVD_LPS_PG_DPK,
RSVD_LPS_PG_INFO,
+ RSVD_PROBE_REQ,
+ RSVD_NLO_INFO,
+ RSVD_CH_INFO,
};
enum rtw_fw_rf_type {
@@ -98,6 +103,57 @@ struct rtw_rsvd_page {
enum rtw_rsvd_packet_type type;
u8 page;
bool add_txdesc;
+ struct cfg80211_ssid *ssid;
+};
+
+enum rtw_keep_alive_pkt_type {
+ KEEP_ALIVE_NULL_PKT = 0,
+ KEEP_ALIVE_ARP_RSP = 1,
+};
+
+struct rtw_nlo_info_hdr {
+ u8 nlo_count;
+ u8 hidden_ap_count;
+ u8 rsvd1[2];
+ u8 pattern_check[FW_NLO_INFO_CHECK_SIZE];
+ u8 rsvd2[8];
+ u8 ssid_len[16];
+ u8 chiper[16];
+ u8 rsvd3[16];
+ u8 location[8];
+} __packed;
+
+enum rtw_packet_type {
+ RTW_PACKET_PROBE_REQ = 0x00,
+
+ RTW_PACKET_UNDEFINE = 0x7FFFFFFF,
+};
+
+struct rtw_fw_wow_keep_alive_para {
+ bool adopt;
+ u8 pkt_type;
+ u8 period; /* unit: sec */
+};
+
+struct rtw_fw_wow_disconnect_para {
+ bool adopt;
+ u8 period; /* unit: sec */
+ u8 retry_count;
+};
+
+struct rtw_ch_switch_option {
+ u8 periodic_option;
+ u32 tsf_high;
+ u32 tsf_low;
+ u8 dest_ch_en;
+ u8 absolute_time_en;
+ u8 dest_ch;
+ u8 normal_period;
+ u8 normal_period_sel;
+ u8 normal_cycle;
+ u8 slow_period;
+ u8 slow_period_sel;
+ u8 nlo_en;
};
struct rtw_fw_hdr {
@@ -146,6 +202,12 @@ struct rtw_fw_hdr {
#define H2C_PKT_PHYDM_INFO 0x11
#define H2C_PKT_IQK 0x0E
+#define H2C_PKT_CH_SWITCH 0x02
+#define H2C_PKT_UPDATE_PKT 0x0C
+
+#define H2C_PKT_CH_SWITCH_LEN 0x20
+#define H2C_PKT_UPDATE_PKT_LEN 0x4
+
#define SET_PKT_H2C_CATEGORY(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(6, 0))
#define SET_PKT_H2C_CMD_ID(h2c_pkt, value) \
@@ -182,6 +244,57 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define IQK_SET_SEGMENT_IQK(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(1))
+#define CHSW_INFO_SET_CH(pkt, value) \
+ le32p_replace_bits((__le32 *)(pkt) + 0x00, value, GENMASK(7, 0))
+#define CHSW_INFO_SET_PRI_CH_IDX(pkt, value) \
+ le32p_replace_bits((__le32 *)(pkt) + 0x00, value, GENMASK(11, 8))
+#define CHSW_INFO_SET_BW(pkt, value) \
+ le32p_replace_bits((__le32 *)(pkt) + 0x00, value, GENMASK(15, 12))
+#define CHSW_INFO_SET_TIMEOUT(pkt, value) \
+ le32p_replace_bits((__le32 *)(pkt) + 0x00, value, GENMASK(23, 16))
+#define CHSW_INFO_SET_ACTION_ID(pkt, value) \
+ le32p_replace_bits((__le32 *)(pkt) + 0x00, value, GENMASK(30, 24))
+
+#define UPDATE_PKT_SET_SIZE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(15, 0))
+#define UPDATE_PKT_SET_PKT_ID(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(23, 16))
+#define UPDATE_PKT_SET_LOCATION(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(31, 24))
+
+#define CH_SWITCH_SET_START(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(0))
+#define CH_SWITCH_SET_DEST_CH_EN(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(1))
+#define CH_SWITCH_SET_ABSOLUTE_TIME(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(2))
+#define CH_SWITCH_SET_PERIODIC_OPT(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(4, 3))
+#define CH_SWITCH_SET_INFO_LOC(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(15, 8))
+#define CH_SWITCH_SET_CH_NUM(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(23, 16))
+#define CH_SWITCH_SET_PRI_CH_IDX(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(27, 24))
+#define CH_SWITCH_SET_DEST_CH(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(7, 0))
+#define CH_SWITCH_SET_NORMAL_PERIOD(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(13, 8))
+#define CH_SWITCH_SET_NORMAL_PERIOD_SEL(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(15, 14))
+#define CH_SWITCH_SET_SLOW_PERIOD(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(21, 16))
+#define CH_SWITCH_SET_SLOW_PERIOD_SEL(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(23, 22))
+#define CH_SWITCH_SET_NORMAL_CYCLE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(31, 24))
+#define CH_SWITCH_SET_TSF_HIGH(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x04, value, GENMASK(31, 0))
+#define CH_SWITCH_SET_TSF_LOW(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x05, value, GENMASK(31, 0))
+#define CH_SWITCH_SET_INFO_SIZE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x06, value, GENMASK(15, 0))
+
/* Command H2C */
#define H2C_CMD_RSVD_PAGE 0x0
#define H2C_CMD_MEDIA_STATUS_RPT 0x01
@@ -198,6 +311,13 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define H2C_CMD_QUERY_BT_MP_INFO 0x67
#define H2C_CMD_BT_WIFI_CONTROL 0x69
+#define H2C_CMD_KEEP_ALIVE 0x03
+#define H2C_CMD_DISCONNECT_DECISION 0x04
+#define H2C_CMD_WOWLAN 0x80
+#define H2C_CMD_REMOTE_WAKE_CTRL 0x81
+#define H2C_CMD_AOAC_GLOBAL_INFO 0x82
+#define H2C_CMD_NLO_INFO 0x8C
+
#define SET_H2C_CMD_ID_CLASS(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(7, 0))
@@ -224,6 +344,8 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24))
#define LPS_PG_SEC_CAM_EN(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
+#define LPS_PG_PATTERN_CAM_EN(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(10))
#define SET_RSSI_INFO_MACID(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8))
#define SET_RSSI_INFO_RSSI(h2c_pkt, value) \
@@ -301,6 +423,56 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define SET_BT_WIFI_CONTROL_DATA5(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(23, 16))
+#define SET_KEEP_ALIVE_ENABLE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
+#define SET_KEEP_ALIVE_ADOPT(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(9))
+#define SET_KEEP_ALIVE_PKT_TYPE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(10))
+#define SET_KEEP_ALIVE_CHECK_PERIOD(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+
+#define SET_DISCONNECT_DECISION_ENABLE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
+#define SET_DISCONNECT_DECISION_ADOPT(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(9))
+#define SET_DISCONNECT_DECISION_CHECK_PERIOD(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+#define SET_DISCONNECT_DECISION_TRY_PKT_NUM(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24))
+
+#define SET_WOWLAN_FUNC_ENABLE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
+#define SET_WOWLAN_PATTERN_MATCH_ENABLE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(9))
+#define SET_WOWLAN_MAGIC_PKT_ENABLE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(10))
+#define SET_WOWLAN_UNICAST_PKT_ENABLE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(11))
+#define SET_WOWLAN_REKEY_WAKEUP_ENABLE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(14))
+#define SET_WOWLAN_DEAUTH_WAKEUP_ENABLE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(15))
+
+#define SET_REMOTE_WAKECTRL_ENABLE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
+#define SET_REMOTE_WAKE_CTRL_NLO_OFFLOAD_EN(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(12))
+
+#define SET_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8))
+#define SET_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+
+#define SET_NLO_FUN_EN(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
+#define SET_NLO_PS_32K(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(9))
+#define SET_NLO_IGNORE_SECURITY(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(10))
+#define SET_NLO_LOC_NLO_INFO(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+
static inline struct rtw_c2h_cmd *get_c2h_from_skb(struct sk_buff *skb)
{
u32 pkt_offset;
@@ -332,6 +504,8 @@ void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool conn);
void rtw_add_rsvd_page(struct rtw_dev *rtwdev, enum rtw_rsvd_packet_type type,
bool txdesc);
+void rtw_add_rsvd_page_probe_req(struct rtw_dev *rtwdev,
+ struct cfg80211_ssid *ssid);
int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
u8 *buf, u32 size);
void rtw_reset_rsvd_page(struct rtw_dev *rtwdev);
@@ -340,4 +514,16 @@ int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev,
void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev);
int rtw_dump_drv_rsvd_page(struct rtw_dev *rtwdev,
u32 offset, u32 size, u32 *buf);
+void rtw_fw_set_remote_wake_ctrl_cmd(struct rtw_dev *rtwdev, bool enable);
+void rtw_fw_set_wowlan_ctrl_cmd(struct rtw_dev *rtwdev, bool enable);
+void rtw_fw_set_keep_alive_cmd(struct rtw_dev *rtwdev, bool enable);
+void rtw_fw_set_disconnect_decision_cmd(struct rtw_dev *rtwdev, bool enable);
+void rtw_fw_set_aoac_global_info_cmd(struct rtw_dev *rtwdev,
+ u8 pairwise_key_enc,
+ u8 group_key_enc);
+
+void rtw_fw_set_nlo_info(struct rtw_dev *rtwdev, bool enable);
+void rtw_fw_update_pkt_probe_req(struct rtw_dev *rtwdev,
+ struct cfg80211_ssid *ssid);
+void rtw_fw_channel_switch(struct rtw_dev *rtwdev, bool enable);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/hci.h b/drivers/net/wireless/realtek/rtw88/hci.h
index 3d91aea942c3..85a81a578fd5 100644
--- a/drivers/net/wireless/realtek/rtw88/hci.h
+++ b/drivers/net/wireless/realtek/rtw88/hci.h
@@ -15,6 +15,7 @@ struct rtw_hci_ops {
void (*stop)(struct rtw_dev *rtwdev);
void (*deep_ps)(struct rtw_dev *rtwdev, bool enter);
void (*link_ps)(struct rtw_dev *rtwdev, bool enter);
+ void (*interface_cfg)(struct rtw_dev *rtwdev);
int (*write_data_rsvd_page)(struct rtw_dev *rtwdev, u8 *buf, u32 size);
int (*write_data_h2c)(struct rtw_dev *rtwdev, u8 *buf, u32 size);
@@ -59,6 +60,11 @@ static inline void rtw_hci_link_ps(struct rtw_dev *rtwdev, bool enter)
rtwdev->hci.ops->link_ps(rtwdev, enter);
}
+static inline void rtw_hci_interface_cfg(struct rtw_dev *rtwdev)
+{
+ rtwdev->hci.ops->interface_cfg(rtwdev);
+}
+
static inline int
rtw_hci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size)
{
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index 507970387b2a..cadf0abbe16b 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -16,10 +16,12 @@ void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw,
u8 value8;
txsc20 = primary_ch_idx;
- if (txsc20 == 1 || txsc20 == 3)
- txsc40 = 9;
- else
- txsc40 = 10;
+ if (bw == RTW_CHANNEL_WIDTH_80) {
+ if (txsc20 == 1 || txsc20 == 3)
+ txsc40 = 9;
+ else
+ txsc40 = 10;
+ }
rtw_write8(rtwdev, REG_DATA_SC,
BIT_TXSC_20M(txsc20) | BIT_TXSC_40M(txsc40));
@@ -1034,5 +1036,7 @@ int rtw_mac_init(struct rtw_dev *rtwdev)
if (ret)
return ret;
+ rtw_hci_interface_cfg(rtwdev);
+
return 0;
}
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index 34a1c3b53cd4..6fc33e11d08c 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -12,6 +12,7 @@
#include "reg.h"
#include "bf.h"
#include "debug.h"
+#include "wow.h"
static void rtw_ops_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
@@ -152,12 +153,10 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw,
u8 bcn_ctrl = 0;
rtwvif->port = port;
- rtwvif->vif = vif;
rtwvif->stats.tx_unicast = 0;
rtwvif->stats.rx_unicast = 0;
rtwvif->stats.tx_cnt = 0;
rtwvif->stats.rx_cnt = 0;
- rtwvif->in_lps = false;
memset(&rtwvif->bfee, 0, sizeof(struct rtw_bfee));
rtwvif->conf = &rtw_vif_port[port];
rtw_txq_init(rtwdev, vif->txq);
@@ -735,6 +734,44 @@ static int rtw_ops_set_bitrate_mask(struct ieee80211_hw *hw,
return 0;
}
+#ifdef CONFIG_PM
+static int rtw_ops_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ int ret;
+
+ mutex_lock(&rtwdev->mutex);
+ ret = rtw_wow_suspend(rtwdev, wowlan);
+ if (ret)
+ rtw_err(rtwdev, "failed to suspend for wow %d\n", ret);
+ mutex_unlock(&rtwdev->mutex);
+
+ return ret ? 1 : 0;
+}
+
+static int rtw_ops_resume(struct ieee80211_hw *hw)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ int ret;
+
+ mutex_lock(&rtwdev->mutex);
+ ret = rtw_wow_resume(rtwdev);
+ if (ret)
+ rtw_err(rtwdev, "failed to resume for wow %d\n", ret);
+ mutex_unlock(&rtwdev->mutex);
+
+ return ret ? 1 : 0;
+}
+
+static void rtw_ops_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+
+ device_set_wakeup_enable(rtwdev->dev, enabled);
+}
+#endif
+
const struct ieee80211_ops rtw_ops = {
.tx = rtw_ops_tx,
.wake_tx_queue = rtw_ops_wake_tx_queue,
@@ -757,5 +794,10 @@ const struct ieee80211_ops rtw_ops = {
.sta_statistics = rtw_ops_sta_statistics,
.flush = rtw_ops_flush,
.set_bitrate_mask = rtw_ops_set_bitrate_mask,
+#ifdef CONFIG_PM
+ .suspend = rtw_ops_suspend,
+ .resume = rtw_ops_resume,
+ .set_wakeup = rtw_ops_set_wakeup,
+#endif
};
EXPORT_SYMBOL(rtw_ops);
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index ae61415e1665..2845d2838f7b 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -706,8 +706,8 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
is_support_sgi = true;
} else if (sta->ht_cap.ht_supported) {
- ra_mask |= (sta->ht_cap.mcs.rx_mask[NL80211_BAND_5GHZ] << 20) |
- (sta->ht_cap.mcs.rx_mask[NL80211_BAND_2GHZ] << 12);
+ ra_mask |= (sta->ht_cap.mcs.rx_mask[1] << 20) |
+ (sta->ht_cap.mcs.rx_mask[0] << 12);
if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
stbc_en = HT_STBC_EN;
if (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
@@ -717,6 +717,9 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
is_support_sgi = true;
}
+ if (efuse->hw_cap.nss == 1)
+ ra_mask &= RA_MASK_VHT_RATES_1SS | RA_MASK_HT_RATES_1SS;
+
if (hal->current_band_type == RTW_BAND_5G) {
ra_mask |= (u64)sta->supp_rates[NL80211_BAND_5GHZ] << 4;
if (sta->vht_cap.vht_supported) {
@@ -750,11 +753,6 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
wireless_set = 0;
}
- if (efuse->hw_cap.nss == 1) {
- ra_mask &= RA_MASK_VHT_RATES_1SS;
- ra_mask &= RA_MASK_HT_RATES_1SS;
- }
-
switch (sta->bandwidth) {
case IEEE80211_STA_RX_BW_80:
bw_mode = RTW_CHANNEL_WIDTH_80;
@@ -793,6 +791,26 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
rtw_fw_send_ra_info(rtwdev, si);
}
+static int rtw_wait_firmware_completion(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_fw_state *fw;
+
+ fw = &rtwdev->fw;
+ wait_for_completion(&fw->completion);
+ if (!fw->firmware)
+ return -EINVAL;
+
+ if (chip->wow_fw_name) {
+ fw = &rtwdev->wow_fw;
+ wait_for_completion(&fw->completion);
+ if (!fw->firmware)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int rtw_power_on(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
@@ -813,11 +831,10 @@ static int rtw_power_on(struct rtw_dev *rtwdev)
goto err;
}
- wait_for_completion(&fw->completion);
- if (!fw->firmware) {
- ret = -EINVAL;
- rtw_err(rtwdev, "failed to load firmware\n");
- goto err;
+ ret = rtw_wait_firmware_completion(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to wait firmware completion\n");
+ goto err_off;
}
ret = rtw_download_firmware(rtwdev, fw);
@@ -881,7 +898,7 @@ int rtw_core_start(struct rtw_dev *rtwdev)
static void rtw_power_off(struct rtw_dev *rtwdev)
{
- rtwdev->hci.ops->stop(rtwdev);
+ rtw_hci_stop(rtwdev);
rtw_mac_power_off(rtwdev);
}
@@ -1020,8 +1037,8 @@ static void rtw_unset_supported_band(struct ieee80211_hw *hw,
static void rtw_load_firmware_cb(const struct firmware *firmware, void *context)
{
- struct rtw_dev *rtwdev = context;
- struct rtw_fw_state *fw = &rtwdev->fw;
+ struct rtw_fw_state *fw = context;
+ struct rtw_dev *rtwdev = fw->rtwdev;
const struct rtw_fw_hdr *fw_hdr;
if (!firmware || !firmware->data) {
@@ -1043,17 +1060,35 @@ static void rtw_load_firmware_cb(const struct firmware *firmware, void *context)
fw->version, fw->sub_version, fw->sub_index, fw->h2c_version);
}
-static int rtw_load_firmware(struct rtw_dev *rtwdev, const char *fw_name)
+static int rtw_load_firmware(struct rtw_dev *rtwdev, enum rtw_fw_type type)
{
- struct rtw_fw_state *fw = &rtwdev->fw;
+ const char *fw_name;
+ struct rtw_fw_state *fw;
int ret;
+ switch (type) {
+ case RTW_WOWLAN_FW:
+ fw = &rtwdev->wow_fw;
+ fw_name = rtwdev->chip->wow_fw_name;
+ break;
+
+ case RTW_NORMAL_FW:
+ fw = &rtwdev->fw;
+ fw_name = rtwdev->chip->fw_name;
+ break;
+
+ default:
+ rtw_warn(rtwdev, "unsupported firmware type\n");
+ return -ENOENT;
+ }
+
+ fw->rtwdev = rtwdev;
init_completion(&fw->completion);
ret = request_firmware_nowait(THIS_MODULE, true, fw_name, rtwdev->dev,
- GFP_KERNEL, rtwdev, rtw_load_firmware_cb);
+ GFP_KERNEL, fw, rtw_load_firmware_cb);
if (ret) {
- rtw_err(rtwdev, "async firmware request failed\n");
+ rtw_err(rtwdev, "failed to async firmware request\n");
return ret;
}
@@ -1341,7 +1376,6 @@ int rtw_core_init(struct rtw_dev *rtwdev)
skb_queue_head_init(&rtwdev->coex.queue);
skb_queue_head_init(&rtwdev->tx_report.queue);
- spin_lock_init(&rtwdev->dm_lock);
spin_lock_init(&rtwdev->rf_lock);
spin_lock_init(&rtwdev->h2c.lock);
spin_lock_init(&rtwdev->txq_lock);
@@ -1372,12 +1406,19 @@ int rtw_core_init(struct rtw_dev *rtwdev)
BIT_HTC_LOC_CTRL | BIT_APP_PHYSTS |
BIT_AB | BIT_AM | BIT_APM;
- ret = rtw_load_firmware(rtwdev, rtwdev->chip->fw_name);
+ ret = rtw_load_firmware(rtwdev, RTW_NORMAL_FW);
if (ret) {
rtw_warn(rtwdev, "no firmware loaded\n");
return ret;
}
+ if (chip->wow_fw_name) {
+ ret = rtw_load_firmware(rtwdev, RTW_WOWLAN_FW);
+ if (ret) {
+ rtw_warn(rtwdev, "no wow firmware loaded\n");
+ return ret;
+ }
+ }
return 0;
}
EXPORT_SYMBOL(rtw_core_init);
@@ -1385,12 +1426,16 @@ EXPORT_SYMBOL(rtw_core_init);
void rtw_core_deinit(struct rtw_dev *rtwdev)
{
struct rtw_fw_state *fw = &rtwdev->fw;
+ struct rtw_fw_state *wow_fw = &rtwdev->wow_fw;
struct rtw_rsvd_page *rsvd_pkt, *tmp;
unsigned long flags;
if (fw->firmware)
release_firmware(fw->firmware);
+ if (wow_fw->firmware)
+ release_firmware(wow_fw->firmware);
+
tasklet_kill(&rtwdev->tx_tasklet);
spin_lock_irqsave(&rtwdev->tx_report.q_lock, flags);
skb_queue_purge(&rtwdev->tx_report.queue);
@@ -1445,6 +1490,10 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0);
+#ifdef CONFIG_PM
+ hw->wiphy->wowlan = rtwdev->chip->wowlan_stub;
+ hw->wiphy->max_sched_scan_ssids = rtwdev->chip->max_sched_scan_ssids;
+#endif
rtw_set_supported_band(hw, rtwdev->chip);
SET_IEEE80211_PERM_ADDR(hw, rtwdev->efuse.addr);
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index d012eefcd0da..f334d201bfb5 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -19,6 +19,10 @@
#define RTW_MAX_SEC_CAM_NUM 32
#define MAX_PG_CAM_BACKUP_NUM 8
+#define RTW_MAX_PATTERN_NUM 12
+#define RTW_MAX_PATTERN_MASK_SIZE 16
+#define RTW_MAX_PATTERN_SIZE 128
+
#define RTW_WATCH_DOG_DELAY_TIME round_jiffies_relative(HZ * 2)
#define RFREG_MASK 0xfffff
@@ -193,6 +197,11 @@ enum rtw_rx_queue_type {
RTK_MAX_RX_QUEUE_NUM
};
+enum rtw_fw_type {
+ RTW_NORMAL_FW = 0x0,
+ RTW_WOWLAN_FW = 0x1,
+};
+
enum rtw_rate_index {
RTW_RATEID_BGN_40M_2SS = 0,
RTW_RATEID_BGN_40M_1SS = 1,
@@ -337,6 +346,7 @@ enum rtw_flags {
RTW_FLAG_LEISURE_PS_DEEP,
RTW_FLAG_DIG_DISABLE,
RTW_FLAG_BUSY_TRAFFIC,
+ RTW_FLAG_WOWLAN,
NUM_OF_RTW_FLAGS,
};
@@ -367,6 +377,15 @@ enum rtw_snr {
RTW_SNR_NUM
};
+enum rtw_wow_flags {
+ RTW_WOW_FLAG_EN_MAGIC_PKT,
+ RTW_WOW_FLAG_EN_REKEY_PKT,
+ RTW_WOW_FLAG_EN_DISCONNECT,
+
+ /* keep it last */
+ RTW_WOW_FLAG_MAX,
+};
+
/* the power index is represented by differences, which cck-1s & ht40-1s are
* the base values, so for 1s's differences, there are only ht20 & ofdm
*/
@@ -608,6 +627,7 @@ struct rtw_lps_conf {
u8 smart_ps;
u8 port_id;
bool sec_cam_backup;
+ bool pattern_cam_backup;
};
enum rtw_hw_key_type {
@@ -716,7 +736,6 @@ struct rtw_bf_info {
};
struct rtw_vif {
- struct ieee80211_vif *vif;
enum rtw_net_type net_type;
u16 aid;
u8 mac_addr[ETH_ALEN];
@@ -727,7 +746,6 @@ struct rtw_vif {
const struct rtw_vif_port *conf;
struct rtw_traffic_stats stats;
- bool in_lps;
struct rtw_bfee bfee;
};
@@ -902,6 +920,33 @@ struct rtw_intf_phy_para {
u16 platform;
};
+struct rtw_wow_pattern {
+ u16 crc;
+ u8 type;
+ u8 valid;
+ u8 mask[RTW_MAX_PATTERN_MASK_SIZE];
+};
+
+struct rtw_pno_request {
+ bool inited;
+ u32 match_set_cnt;
+ struct cfg80211_match_set *match_sets;
+ u8 channel_cnt;
+ struct ieee80211_channel *channels;
+ struct cfg80211_sched_scan_plan scan_plan;
+};
+
+struct rtw_wow_param {
+ struct ieee80211_vif *wow_vif;
+ DECLARE_BITMAP(flags, RTW_WOW_FLAG_MAX);
+ u8 txpause;
+ u8 pattern_cnt;
+ struct rtw_wow_pattern patterns[RTW_MAX_PATTERN_NUM];
+
+ bool ips_enabled;
+ struct rtw_pno_request pno_req;
+};
+
struct rtw_intf_phy_para_table {
struct rtw_intf_phy_para *usb2_para;
struct rtw_intf_phy_para *usb3_para;
@@ -1030,6 +1075,10 @@ struct rtw_chip_info {
u8 bfer_su_max_num;
u8 bfer_mu_max_num;
+ const char *wow_fw_name;
+ const struct wiphy_wowlan_support *wowlan_stub;
+ const u8 max_sched_scan_ssids;
+
/* coex paras */
u32 coex_para_ver;
u8 bt_desired_ver;
@@ -1456,6 +1505,7 @@ struct rtw_fifo_conf {
struct rtw_fw_state {
const struct firmware *firmware;
+ struct rtw_dev *rtwdev;
struct completion completion;
u16 version;
u8 sub_version;
@@ -1534,9 +1584,6 @@ struct rtw_dev {
/* ensures exclusive access from mac80211 callbacks */
struct mutex mutex;
- /* lock for dm to use */
- spinlock_t dm_lock;
-
/* read/write rf register */
spinlock_t rf_lock;
@@ -1580,6 +1627,9 @@ struct rtw_dev {
u8 mp_mode;
+ struct rtw_fw_state wow_fw;
+ struct rtw_wow_param wow;
+
/* hci related data, must be last */
u8 priv[0] __aligned(sizeof(void *));
};
@@ -1605,6 +1655,18 @@ static inline struct ieee80211_vif *rtwvif_to_vif(struct rtw_vif *rtwvif)
return container_of(p, struct ieee80211_vif, drv_priv);
}
+static inline bool rtw_ssid_equal(struct cfg80211_ssid *a,
+ struct cfg80211_ssid *b)
+{
+ if (!a || !b || a->ssid_len != b->ssid_len)
+ return false;
+
+ if (memcmp(a->ssid, b->ssid, a->ssid_len))
+ return false;
+
+ return true;
+}
+
void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
struct rtw_channel_params *ch_param);
bool check_hw_ready(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target);
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index a58e8276a41a..1fbc14c149ec 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -6,6 +6,7 @@
#include <linux/pci.h>
#include "main.h"
#include "pci.h"
+#include "reg.h"
#include "tx.h"
#include "rx.h"
#include "fw.h"
@@ -486,13 +487,6 @@ static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev,
rtwpci->irq_enabled = false;
}
-static int rtw_pci_setup(struct rtw_dev *rtwdev)
-{
- rtw_pci_reset_trx_ring(rtwdev);
-
- return 0;
-}
-
static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
{
/* reset dma and rx tag */
@@ -501,11 +495,22 @@ static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
rtwpci->rx_tag = 0;
}
+static int rtw_pci_setup(struct rtw_dev *rtwdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+
+ rtw_pci_reset_trx_ring(rtwdev);
+ rtw_pci_dma_reset(rtwdev, rtwpci);
+
+ return 0;
+}
+
static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
{
struct rtw_pci_tx_ring *tx_ring;
u8 queue;
+ rtw_pci_reset_trx_ring(rtwdev);
for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
tx_ring = &rtwpci->tx_rings[queue];
rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
@@ -517,8 +522,6 @@ static int rtw_pci_start(struct rtw_dev *rtwdev)
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
unsigned long flags;
- rtw_pci_dma_reset(rtwdev, rtwpci);
-
spin_lock_irqsave(&rtwpci->irq_lock, flags);
rtw_pci_enable_interrupt(rtwdev, rtwpci);
spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
@@ -832,6 +835,11 @@ static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
while (count--) {
skb = skb_dequeue(&ring->queue);
+ if (!skb) {
+ rtw_err(rtwdev, "failed to dequeue %d skb TX queue %d, BD=0x%08x, rp %d -> %d\n",
+ count, hw_queue, bd_idx, ring->r.rp, cur_rp);
+ break;
+ }
tx_data = rtw_pci_get_tx_data(skb);
pci_unmap_single(rtwpci->pdev, tx_data->dma, skb->len,
PCI_DMA_TODEVICE);
@@ -1222,6 +1230,21 @@ static void rtw_pci_link_cfg(struct rtw_dev *rtwdev)
rtwpci->link_ctrl = link_ctrl;
}
+static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ switch (chip->id) {
+ case RTW_CHIP_TYPE_8822C:
+ if (rtwdev->hal.cut_version >= RTW_CHIP_VER_CUT_D)
+ rtw_write32_mask(rtwdev, REG_HCI_MIX_CFG,
+ BIT_PCIE_EMAC_PDN_AUX_TO_FAST_CLK, 1);
+ break;
+ default:
+ break;
+ }
+}
+
static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
@@ -1264,6 +1287,23 @@ static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
rtw_pci_link_cfg(rtwdev);
}
+#ifdef CONFIG_PM
+static int rtw_pci_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int rtw_pci_resume(struct device *dev)
+{
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(rtw_pm_ops, rtw_pci_suspend, rtw_pci_resume);
+#define RTW_PM_OPS (&rtw_pm_ops)
+#else
+#define RTW_PM_OPS NULL
+#endif
+
static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
{
int ret;
@@ -1330,6 +1370,7 @@ static struct rtw_hci_ops rtw_pci_ops = {
.stop = rtw_pci_stop,
.deep_ps = rtw_pci_deep_ps,
.link_ps = rtw_pci_link_ps,
+ .interface_cfg = rtw_pci_interface_cfg,
.read8 = rtw_pci_read8,
.read16 = rtw_pci_read16,
@@ -1489,6 +1530,7 @@ static struct pci_driver rtw_pci_driver = {
.id_table = rtw_pci_id_table,
.probe = rtw_pci_probe,
.remove = rtw_pci_remove,
+ .driver.pm = RTW_PM_OPS,
};
module_pci_driver(rtw_pci_driver);
diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h
index 49bf29a92152..1580cfc57361 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.h
+++ b/drivers/net/wireless/realtek/rtw88/pci.h
@@ -210,7 +210,7 @@ struct rtw_pci {
void __iomem *mmap;
};
-static u32 max_num_of_tx_queue(u8 queue)
+static inline u32 max_num_of_tx_queue(u8 queue)
{
u32 max_num;
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
index a3e1e9578b65..eea9d888fbf1 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.c
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -1434,7 +1434,7 @@ static void rtw_load_rfk_table(struct rtw_dev *rtwdev)
rtw_load_table(rtwdev, chip->rfk_init_tbl);
- dpk_info->is_dpk_pwr_on = 1;
+ dpk_info->is_dpk_pwr_on = true;
}
void rtw_phy_load_tables(struct rtw_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw88/ps.c b/drivers/net/wireless/realtek/rtw88/ps.c
index 913e6f47130f..7a189a9926fe 100644
--- a/drivers/net/wireless/realtek/rtw88/ps.c
+++ b/drivers/net/wireless/realtek/rtw88/ps.c
@@ -91,11 +91,11 @@ void rtw_power_mode_change(struct rtw_dev *rtwdev, bool enter)
return;
/* check confirm power mode has left power save state */
- for (polling_cnt = 0; polling_cnt < 3; polling_cnt++) {
+ for (polling_cnt = 0; polling_cnt < 50; polling_cnt++) {
polling = rtw_read8(rtwdev, rtwdev->hci.cpwm_addr);
if ((polling ^ confirm) & BIT_RPWM_TOGGLE)
return;
- mdelay(20);
+ udelay(100);
}
/* in case of fw/hw missed the request, retry */
diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
index 7e817bc997eb..9d94534c9674 100644
--- a/drivers/net/wireless/realtek/rtw88/reg.h
+++ b/drivers/net/wireless/realtek/rtw88/reg.h
@@ -9,6 +9,8 @@
#define BIT_FEN_CPUEN BIT(2)
#define BIT_FEN_BB_GLB_RST BIT(1)
#define BIT_FEN_BB_RSTB BIT(0)
+#define BIT_R_DIS_PRST BIT(6)
+#define BIT_WLOCK_1C_B6 BIT(5)
#define REG_SYS_PW_CTRL 0x0004
#define REG_SYS_CLK_CTRL 0x0008
#define BIT_CPU_CLK_EN BIT(14)
@@ -160,8 +162,12 @@
#define REG_CR 0x0100
#define REG_TRXFF_BNDY 0x0114
#define REG_RXFF_BNDY 0x011C
+#define REG_FE1IMR 0x0120
+#define BIT_FS_RXDONE BIT(16)
#define REG_PKTBUF_DBG_CTRL 0x0140
#define REG_C2HEVT 0x01A0
+#define REG_MCUTST_II 0x01C4
+#define REG_WOWLAN_WAKE_REASON 0x01C7
#define REG_HMETFR 0x01CC
#define REG_HMEBOX0 0x01D0
#define REG_HMEBOX1 0x01D4
@@ -192,9 +198,18 @@
#define REG_H2C_TAIL 0x0248
#define REG_H2C_READ_ADDR 0x024C
#define REG_H2C_INFO 0x0254
+#define REG_RXPKT_NUM 0x0284
+#define BIT_RXDMA_REQ BIT(19)
+#define BIT_RW_RELEASE BIT(18)
+#define BIT_RXDMA_IDLE BIT(17)
+#define REG_RXPKTNUM 0x02B0
#define REG_INT_MIG 0x0304
+#define REG_HCI_MIX_CFG 0x03FC
+#define BIT_PCIE_EMAC_PDN_AUX_TO_FAST_CLK BIT(26)
+#define REG_BCNQ_INFO 0x0418
+#define BIT_MGQ_CPU_EMPTY BIT(24)
#define REG_FWHW_TXQ_CTRL 0x0420
#define BIT_EN_BCNQ_DL BIT(22)
#define BIT_EN_WR_FREE_TAIL BIT(20)
@@ -323,6 +338,20 @@
#define BIT_RFMOD_80M BIT(8)
#define BIT_RFMOD_40M BIT(7)
#define REG_WMAC_TRXPTCL_CTL_H 0x066C
+#define REG_WKFMCAM_CMD 0x0698
+#define BIT_WKFCAM_POLLING_V1 BIT(31)
+#define BIT_WKFCAM_CLR_V1 BIT(30)
+#define BIT_WKFCAM_WE BIT(16)
+#define BIT_SHIFT_WKFCAM_ADDR_V2 8
+#define BIT_MASK_WKFCAM_ADDR_V2 0xff
+#define BIT_WKFCAM_ADDR_V2(x) \
+ (((x) & BIT_MASK_WKFCAM_ADDR_V2) << BIT_SHIFT_WKFCAM_ADDR_V2)
+#define REG_WKFMCAM_RWD 0x069C
+#define BIT_WKFMCAM_VALID BIT(31)
+#define BIT_WKFMCAM_BC BIT(26)
+#define BIT_WKFMCAM_MC BIT(25)
+#define BIT_WKFMCAM_UC BIT(24)
+
#define REG_RXFLTMAP0 0x06A0
#define REG_RXFLTMAP1 0x06A2
#define REG_RXFLTMAP2 0x06A4
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index 174029836833..3865097696d4 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -3487,12 +3487,12 @@ static struct rtw_pwr_seq_cmd trans_cardemu_to_act_8822c[] = {
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
- RTW_PWR_CMD_WRITE, BIT(7), 0},
- {0x0005,
+ RTW_PWR_CMD_WRITE, (BIT(4) | BIT(3)), 0},
+ {0x1018,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
- RTW_PWR_CMD_WRITE, (BIT(4) | BIT(3)), 0},
+ RTW_PWR_CMD_WRITE, BIT(2), BIT(2)},
{0x0005,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
@@ -4060,6 +4060,18 @@ static const struct rtw_pwr_track_tbl rtw8822c_rtw_pwr_track_tbl = {
.pwrtrk_2g_ccka_p = rtw8822c_pwrtrk_2g_cck_a_p,
};
+#ifdef CONFIG_PM
+static const struct wiphy_wowlan_support rtw_wowlan_stub_8822c = {
+ .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_GTK_REKEY_FAILURE |
+ WIPHY_WOWLAN_DISCONNECT | WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+ WIPHY_WOWLAN_NET_DETECT,
+ .n_patterns = RTW_MAX_PATTERN_NUM,
+ .pattern_max_len = RTW_MAX_PATTERN_SIZE,
+ .pattern_min_len = 1,
+ .max_nd_match_sets = 4,
+};
+#endif
+
struct rtw_chip_info rtw8822c_hw_spec = {
.ops = &rtw8822c_ops,
.id = RTW_CHIP_TYPE_8822C,
@@ -4106,6 +4118,11 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.bfer_su_max_num = 2,
.bfer_mu_max_num = 1,
+#ifdef CONFIG_PM
+ .wow_fw_name = "rtw88/rtw8822c_wow_fw.bin",
+ .wowlan_stub = &rtw_wowlan_stub_8822c,
+ .max_sched_scan_ssids = 4,
+#endif
.coex_para_ver = 0x19062706,
.bt_desired_ver = 0x6,
.scbd_support = true,
@@ -4135,3 +4152,4 @@ struct rtw_chip_info rtw8822c_hw_spec = {
EXPORT_SYMBOL(rtw8822c_hw_spec);
MODULE_FIRMWARE("rtw88/rtw8822c_fw.bin");
+MODULE_FIRMWARE("rtw88/rtw8822c_wow_fw.bin");
diff --git a/drivers/net/wireless/realtek/rtw88/util.h b/drivers/net/wireless/realtek/rtw88/util.h
index 7bd2843b0bce..41c10e7144df 100644
--- a/drivers/net/wireless/realtek/rtw88/util.h
+++ b/drivers/net/wireless/realtek/rtw88/util.h
@@ -15,6 +15,8 @@ struct rtw_dev;
IEEE80211_IFACE_ITER_NORMAL, iterator, data)
#define rtw_iterate_stas_atomic(rtwdev, iterator, data) \
ieee80211_iterate_stations_atomic(rtwdev->hw, iterator, data)
+#define rtw_iterate_keys(rtwdev, vif, iterator, data) \
+ ieee80211_iter_keys(rtwdev->hw, vif, iterator, data)
static inline u8 *get_hdr_bssid(struct ieee80211_hdr *hdr)
{
diff --git a/drivers/net/wireless/realtek/rtw88/wow.c b/drivers/net/wireless/realtek/rtw88/wow.c
new file mode 100644
index 000000000000..4820dca958dd
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/wow.c
@@ -0,0 +1,889 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "fw.h"
+#include "wow.h"
+#include "reg.h"
+#include "debug.h"
+#include "mac.h"
+#include "ps.h"
+
+static void rtw_wow_show_wakeup_reason(struct rtw_dev *rtwdev)
+{
+ u8 reason;
+
+ reason = rtw_read8(rtwdev, REG_WOWLAN_WAKE_REASON);
+
+ if (reason == RTW_WOW_RSN_RX_DEAUTH)
+ rtw_dbg(rtwdev, RTW_DBG_WOW, "WOW: Rx deauth\n");
+ else if (reason == RTW_WOW_RSN_DISCONNECT)
+ rtw_dbg(rtwdev, RTW_DBG_WOW, "WOW: AP is off\n");
+ else if (reason == RTW_WOW_RSN_RX_MAGIC_PKT)
+ rtw_dbg(rtwdev, RTW_DBG_WOW, "WOW: Rx magic packet\n");
+ else if (reason == RTW_WOW_RSN_RX_GTK_REKEY)
+ rtw_dbg(rtwdev, RTW_DBG_WOW, "WOW: Rx gtk rekey\n");
+ else if (reason == RTW_WOW_RSN_RX_PTK_REKEY)
+ rtw_dbg(rtwdev, RTW_DBG_WOW, "WOW: Rx ptk rekey\n");
+ else if (reason == RTW_WOW_RSN_RX_PATTERN_MATCH)
+ rtw_dbg(rtwdev, RTW_DBG_WOW, "WOW: Rx pattern match packet\n");
+ else if (reason == RTW_WOW_RSN_RX_NLO)
+ rtw_dbg(rtwdev, RTW_DBG_WOW, "Rx NLO\n");
+ else
+ rtw_warn(rtwdev, "Unknown wakeup reason %x\n", reason);
+}
+
+static void rtw_wow_pattern_write_cam(struct rtw_dev *rtwdev, u8 addr,
+ u32 wdata)
+{
+ rtw_write32(rtwdev, REG_WKFMCAM_RWD, wdata);
+ rtw_write32(rtwdev, REG_WKFMCAM_CMD, BIT_WKFCAM_POLLING_V1 |
+ BIT_WKFCAM_WE | BIT_WKFCAM_ADDR_V2(addr));
+
+ if (!check_hw_ready(rtwdev, REG_WKFMCAM_CMD, BIT_WKFCAM_POLLING_V1, 0))
+ rtw_err(rtwdev, "failed to write pattern cam\n");
+}
+
+static void rtw_wow_pattern_write_cam_ent(struct rtw_dev *rtwdev, u8 id,
+ struct rtw_wow_pattern *rtw_pattern)
+{
+ int i;
+ u8 addr;
+ u32 wdata;
+
+ for (i = 0; i < RTW_MAX_PATTERN_MASK_SIZE / 4; i++) {
+ addr = (id << 3) + i;
+ wdata = rtw_pattern->mask[i * 4];
+ wdata |= rtw_pattern->mask[i * 4 + 1] << 8;
+ wdata |= rtw_pattern->mask[i * 4 + 2] << 16;
+ wdata |= rtw_pattern->mask[i * 4 + 3] << 24;
+ rtw_wow_pattern_write_cam(rtwdev, addr, wdata);
+ }
+
+ wdata = rtw_pattern->crc;
+ addr = (id << 3) + RTW_MAX_PATTERN_MASK_SIZE / 4;
+
+ switch (rtw_pattern->type) {
+ case RTW_PATTERN_BROADCAST:
+ wdata |= BIT_WKFMCAM_BC | BIT_WKFMCAM_VALID;
+ break;
+ case RTW_PATTERN_MULTICAST:
+ wdata |= BIT_WKFMCAM_MC | BIT_WKFMCAM_VALID;
+ break;
+ case RTW_PATTERN_UNICAST:
+ wdata |= BIT_WKFMCAM_UC | BIT_WKFMCAM_VALID;
+ break;
+ default:
+ break;
+ }
+ rtw_wow_pattern_write_cam(rtwdev, addr, wdata);
+}
+
+/* RTK internal CRC16 for Pattern Cam */
+static u16 __rtw_cal_crc16(u8 data, u16 crc)
+{
+ u8 shift_in, data_bit;
+ u8 crc_bit4, crc_bit11, crc_bit15;
+ u16 crc_result;
+ int index;
+
+ for (index = 0; index < 8; index++) {
+ crc_bit15 = ((crc & BIT(15)) ? 1 : 0);
+ data_bit = (data & (BIT(0) << index) ? 1 : 0);
+ shift_in = crc_bit15 ^ data_bit;
+
+ crc_result = crc << 1;
+
+ if (shift_in == 0)
+ crc_result &= (~BIT(0));
+ else
+ crc_result |= BIT(0);
+
+ crc_bit11 = ((crc & BIT(11)) ? 1 : 0) ^ shift_in;
+
+ if (crc_bit11 == 0)
+ crc_result &= (~BIT(12));
+ else
+ crc_result |= BIT(12);
+
+ crc_bit4 = ((crc & BIT(4)) ? 1 : 0) ^ shift_in;
+
+ if (crc_bit4 == 0)
+ crc_result &= (~BIT(5));
+ else
+ crc_result |= BIT(5);
+
+ crc = crc_result;
+ }
+ return crc;
+}
+
+static u16 rtw_calc_crc(u8 *pdata, int length)
+{
+ u16 crc = 0xffff;
+ int i;
+
+ for (i = 0; i < length; i++)
+ crc = __rtw_cal_crc16(pdata[i], crc);
+
+ /* get 1' complement */
+ return ~crc;
+}
+
+static void rtw_wow_pattern_generate(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif,
+ const struct cfg80211_pkt_pattern *pkt_pattern,
+ struct rtw_wow_pattern *rtw_pattern)
+{
+ const u8 *mask;
+ const u8 *pattern;
+ u8 mask_hw[RTW_MAX_PATTERN_MASK_SIZE] = {0};
+ u8 content[RTW_MAX_PATTERN_SIZE] = {0};
+ u8 mac_addr[ETH_ALEN] = {0};
+ u8 mask_len;
+ u16 count;
+ int len;
+ int i;
+
+ pattern = pkt_pattern->pattern;
+ len = pkt_pattern->pattern_len;
+ mask = pkt_pattern->mask;
+
+ ether_addr_copy(mac_addr, rtwvif->mac_addr);
+ memset(rtw_pattern, 0, sizeof(*rtw_pattern));
+
+ mask_len = DIV_ROUND_UP(len, 8);
+
+ if (is_broadcast_ether_addr(pattern))
+ rtw_pattern->type = RTW_PATTERN_BROADCAST;
+ else if (is_multicast_ether_addr(pattern))
+ rtw_pattern->type = RTW_PATTERN_MULTICAST;
+ else if (ether_addr_equal(pattern, mac_addr))
+ rtw_pattern->type = RTW_PATTERN_UNICAST;
+ else
+ rtw_pattern->type = RTW_PATTERN_INVALID;
+
+ /* translate mask from os to mask for hw
+ * pattern from OS uses 'ethenet frame', like this:
+ * | 6 | 6 | 2 | 20 | Variable | 4 |
+ * |--------+--------+------+-----------+------------+-----|
+ * | 802.3 Mac Header | IP Header | TCP Packet | FCS |
+ * | DA | SA | Type |
+ *
+ * BUT, packet catched by our HW is in '802.11 frame', begin from LLC
+ * | 24 or 30 | 6 | 2 | 20 | Variable | 4 |
+ * |-------------------+--------+------+-----------+------------+-----|
+ * | 802.11 MAC Header | LLC | IP Header | TCP Packet | FCS |
+ * | Others | Tpye |
+ *
+ * Therefore, we need translate mask_from_OS to mask_to_hw.
+ * We should left-shift mask by 6 bits, then set the new bit[0~5] = 0,
+ * because new mask[0~5] means 'SA', but our HW packet begins from LLC,
+ * bit[0~5] corresponds to first 6 Bytes in LLC, they just don't match.
+ */
+
+ /* Shift 6 bits */
+ for (i = 0; i < mask_len - 1; i++) {
+ mask_hw[i] = u8_get_bits(mask[i], GENMASK(7, 6));
+ mask_hw[i] |= u8_get_bits(mask[i + 1], GENMASK(5, 0)) << 2;
+ }
+ mask_hw[i] = u8_get_bits(mask[i], GENMASK(7, 6));
+
+ /* Set bit 0-5 to zero */
+ mask_hw[0] &= (~GENMASK(5, 0));
+
+ memcpy(rtw_pattern->mask, mask_hw, RTW_MAX_PATTERN_MASK_SIZE);
+
+ /* To get the wake up pattern from the mask.
+ * We do not count first 12 bits which means
+ * DA[6] and SA[6] in the pattern to match HW design.
+ */
+ count = 0;
+ for (i = 12; i < len; i++) {
+ if ((mask[i / 8] >> (i % 8)) & 0x01) {
+ content[count] = pattern[i];
+ count++;
+ }
+ }
+
+ rtw_pattern->crc = rtw_calc_crc(content, count);
+}
+
+static void rtw_wow_pattern_clear_cam(struct rtw_dev *rtwdev)
+{
+ bool ret;
+
+ rtw_write32(rtwdev, REG_WKFMCAM_CMD, BIT_WKFCAM_POLLING_V1 |
+ BIT_WKFCAM_CLR_V1);
+
+ ret = check_hw_ready(rtwdev, REG_WKFMCAM_CMD, BIT_WKFCAM_POLLING_V1, 0);
+ if (!ret)
+ rtw_err(rtwdev, "failed to clean pattern cam\n");
+}
+
+static void rtw_wow_pattern_write(struct rtw_dev *rtwdev)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw_wow_pattern *rtw_pattern = rtw_wow->patterns;
+ int i = 0;
+
+ for (i = 0; i < rtw_wow->pattern_cnt; i++)
+ rtw_wow_pattern_write_cam_ent(rtwdev, i, rtw_pattern + i);
+}
+
+static void rtw_wow_pattern_clear(struct rtw_dev *rtwdev)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+
+ rtw_wow_pattern_clear_cam(rtwdev);
+
+ rtw_wow->pattern_cnt = 0;
+ memset(rtw_wow->patterns, 0, sizeof(rtw_wow->patterns));
+}
+
+static void rtw_wow_bb_stop(struct rtw_dev *rtwdev)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+
+ /* wait 100ms for firmware to finish TX */
+ msleep(100);
+
+ if (!rtw_read32_mask(rtwdev, REG_BCNQ_INFO, BIT_MGQ_CPU_EMPTY))
+ rtw_warn(rtwdev, "Wrong status of MGQ_CPU empty!\n");
+
+ rtw_wow->txpause = rtw_read8(rtwdev, REG_TXPAUSE);
+ rtw_write8(rtwdev, REG_TXPAUSE, 0xff);
+ rtw_write8_clr(rtwdev, REG_SYS_FUNC_EN, BIT_FEN_BB_RSTB);
+}
+
+static void rtw_wow_bb_start(struct rtw_dev *rtwdev)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+
+ rtw_write8_set(rtwdev, REG_SYS_FUNC_EN, BIT_FEN_BB_RSTB);
+ rtw_write8(rtwdev, REG_TXPAUSE, rtw_wow->txpause);
+}
+
+static void rtw_wow_rx_dma_stop(struct rtw_dev *rtwdev)
+{
+ /* wait 100ms for HW to finish rx dma */
+ msleep(100);
+
+ rtw_write32_set(rtwdev, REG_RXPKT_NUM, BIT_RW_RELEASE);
+
+ if (!check_hw_ready(rtwdev, REG_RXPKT_NUM, BIT_RXDMA_IDLE, 1))
+ rtw_err(rtwdev, "failed to stop rx dma\n");
+}
+
+static void rtw_wow_rx_dma_start(struct rtw_dev *rtwdev)
+{
+ rtw_write32_clr(rtwdev, REG_RXPKT_NUM, BIT_RW_RELEASE);
+}
+
+static int rtw_wow_check_fw_status(struct rtw_dev *rtwdev, bool wow_enable)
+{
+ /* wait 100ms for wow firmware to finish work */
+ msleep(100);
+
+ if (wow_enable) {
+ if (rtw_read8(rtwdev, REG_WOWLAN_WAKE_REASON))
+ goto wow_fail;
+ } else {
+ if (rtw_read32_mask(rtwdev, REG_FE1IMR, BIT_FS_RXDONE) ||
+ rtw_read32_mask(rtwdev, REG_RXPKT_NUM, BIT_RW_RELEASE))
+ goto wow_fail;
+ }
+
+ return 0;
+
+wow_fail:
+ rtw_err(rtwdev, "failed to check wow status %s\n",
+ wow_enable ? "enabled" : "disabled");
+ return -EBUSY;
+}
+
+static void rtw_wow_fw_security_type_iter(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *data)
+{
+ struct rtw_fw_key_type_iter_data *iter_data = data;
+ struct rtw_dev *rtwdev = hw->priv;
+ u8 hw_key_type;
+
+ if (vif != rtwdev->wow.wow_vif)
+ return;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ hw_key_type = RTW_CAM_WEP40;
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ hw_key_type = RTW_CAM_WEP104;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ hw_key_type = RTW_CAM_TKIP;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ hw_key_type = RTW_CAM_AES;
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+ break;
+ default:
+ rtw_err(rtwdev, "Unsupported key type for wowlan mode\n");
+ hw_key_type = 0;
+ break;
+ }
+
+ if (sta)
+ iter_data->pairwise_key_type = hw_key_type;
+ else
+ iter_data->group_key_type = hw_key_type;
+}
+
+static void rtw_wow_fw_security_type(struct rtw_dev *rtwdev)
+{
+ struct rtw_fw_key_type_iter_data data = {};
+ struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
+
+ data.rtwdev = rtwdev;
+ rtw_iterate_keys(rtwdev, wow_vif,
+ rtw_wow_fw_security_type_iter, &data);
+ rtw_fw_set_aoac_global_info_cmd(rtwdev, data.pairwise_key_type,
+ data.group_key_type);
+}
+
+static int rtw_wow_fw_start(struct rtw_dev *rtwdev)
+{
+ if (rtw_wow_mgd_linked(rtwdev)) {
+ rtw_send_rsvd_page_h2c(rtwdev);
+ rtw_wow_pattern_write(rtwdev);
+ rtw_wow_fw_security_type(rtwdev);
+ rtw_fw_set_disconnect_decision_cmd(rtwdev, true);
+ rtw_fw_set_keep_alive_cmd(rtwdev, true);
+ } else if (rtw_wow_no_link(rtwdev)) {
+ rtw_fw_set_nlo_info(rtwdev, true);
+ rtw_fw_update_pkt_probe_req(rtwdev, NULL);
+ rtw_fw_channel_switch(rtwdev, true);
+ }
+
+ rtw_fw_set_wowlan_ctrl_cmd(rtwdev, true);
+ rtw_fw_set_remote_wake_ctrl_cmd(rtwdev, true);
+
+ return rtw_wow_check_fw_status(rtwdev, true);
+}
+
+static int rtw_wow_fw_stop(struct rtw_dev *rtwdev)
+{
+ if (rtw_wow_mgd_linked(rtwdev)) {
+ rtw_fw_set_disconnect_decision_cmd(rtwdev, false);
+ rtw_fw_set_keep_alive_cmd(rtwdev, false);
+ rtw_wow_pattern_clear(rtwdev);
+ } else if (rtw_wow_no_link(rtwdev)) {
+ rtw_fw_channel_switch(rtwdev, false);
+ rtw_fw_set_nlo_info(rtwdev, false);
+ }
+
+ rtw_fw_set_wowlan_ctrl_cmd(rtwdev, false);
+ rtw_fw_set_remote_wake_ctrl_cmd(rtwdev, false);
+
+ return rtw_wow_check_fw_status(rtwdev, false);
+}
+
+static void rtw_wow_avoid_reset_mac(struct rtw_dev *rtwdev)
+{
+ /* When resuming from wowlan mode, some hosts issue signal
+ * (PCIE: PREST, USB: SE0RST) to device, and lead to reset
+ * mac core. If it happens, the connection to AP will be lost.
+ * Setting REG_RSV_CTRL Register can avoid this process.
+ */
+ switch (rtw_hci_type(rtwdev)) {
+ case RTW_HCI_TYPE_PCIE:
+ case RTW_HCI_TYPE_USB:
+ rtw_write8(rtwdev, REG_RSV_CTRL, BIT_WLOCK_1C_B6);
+ rtw_write8(rtwdev, REG_RSV_CTRL,
+ BIT_WLOCK_1C_B6 | BIT_R_DIS_PRST);
+ break;
+ default:
+ rtw_warn(rtwdev, "Unsupported hci type to disable reset MAC\n");
+ break;
+ }
+}
+
+static void rtw_wow_fw_media_status_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+ struct rtw_fw_media_status_iter_data *iter_data = data;
+ struct rtw_dev *rtwdev = iter_data->rtwdev;
+
+ rtw_fw_media_status_report(rtwdev, si->mac_id, iter_data->connect);
+}
+
+static void rtw_wow_fw_media_status(struct rtw_dev *rtwdev, bool connect)
+{
+ struct rtw_fw_media_status_iter_data data;
+
+ data.rtwdev = rtwdev;
+ data.connect = connect;
+
+ rtw_iterate_stas_atomic(rtwdev, rtw_wow_fw_media_status_iter, &data);
+}
+
+static void rtw_wow_config_pno_rsvd_page(struct rtw_dev *rtwdev)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw_pno_request *rtw_pno_req = &rtw_wow->pno_req;
+ struct cfg80211_ssid *ssid;
+ int i;
+
+ for (i = 0 ; i < rtw_pno_req->match_set_cnt; i++) {
+ ssid = &rtw_pno_req->match_sets[i].ssid;
+ rtw_add_rsvd_page_probe_req(rtwdev, ssid);
+ }
+ rtw_add_rsvd_page_probe_req(rtwdev, NULL);
+ rtw_add_rsvd_page(rtwdev, RSVD_NLO_INFO, false);
+ rtw_add_rsvd_page(rtwdev, RSVD_CH_INFO, true);
+}
+
+static void rtw_wow_config_linked_rsvd_page(struct rtw_dev *rtwdev)
+{
+ rtw_add_rsvd_page(rtwdev, RSVD_PS_POLL, true);
+ rtw_add_rsvd_page(rtwdev, RSVD_QOS_NULL, true);
+ rtw_add_rsvd_page(rtwdev, RSVD_NULL, true);
+ rtw_add_rsvd_page(rtwdev, RSVD_LPS_PG_DPK, true);
+ rtw_add_rsvd_page(rtwdev, RSVD_LPS_PG_INFO, true);
+}
+
+static void rtw_wow_config_rsvd_page(struct rtw_dev *rtwdev)
+{
+ rtw_reset_rsvd_page(rtwdev);
+
+ if (rtw_wow_mgd_linked(rtwdev)) {
+ rtw_wow_config_linked_rsvd_page(rtwdev);
+ } else if (test_bit(RTW_FLAG_WOWLAN, rtwdev->flags) &&
+ rtw_wow_no_link(rtwdev)) {
+ rtw_wow_config_pno_rsvd_page(rtwdev);
+ }
+}
+
+static int rtw_wow_dl_fw_rsvd_page(struct rtw_dev *rtwdev)
+{
+ struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
+
+ rtw_wow_config_rsvd_page(rtwdev);
+
+ return rtw_fw_download_rsvd_page(rtwdev, wow_vif);
+}
+
+static int rtw_wow_swap_fw(struct rtw_dev *rtwdev, enum rtw_fw_type type)
+{
+ struct rtw_fw_state *fw;
+ int ret;
+
+ switch (type) {
+ case RTW_WOWLAN_FW:
+ fw = &rtwdev->wow_fw;
+ break;
+
+ case RTW_NORMAL_FW:
+ fw = &rtwdev->fw;
+ break;
+
+ default:
+ rtw_warn(rtwdev, "unsupported firmware type to swap\n");
+ return -ENOENT;
+ }
+
+ ret = rtw_download_firmware(rtwdev, fw);
+ if (ret)
+ goto out;
+
+ rtw_fw_send_general_info(rtwdev);
+ rtw_fw_send_phydm_info(rtwdev);
+ rtw_wow_fw_media_status(rtwdev, true);
+
+out:
+ return ret;
+}
+
+static void rtw_wow_check_pno(struct rtw_dev *rtwdev,
+ struct cfg80211_sched_scan_request *nd_config)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw_pno_request *pno_req = &rtw_wow->pno_req;
+ struct ieee80211_channel *channel;
+ int i, size;
+
+ if (!nd_config->n_match_sets || !nd_config->n_channels)
+ goto err;
+
+ pno_req->match_set_cnt = nd_config->n_match_sets;
+ size = sizeof(*pno_req->match_sets) * pno_req->match_set_cnt;
+ pno_req->match_sets = kmemdup(nd_config->match_sets, size, GFP_KERNEL);
+ if (!pno_req->match_sets)
+ goto err;
+
+ pno_req->channel_cnt = nd_config->n_channels;
+ size = sizeof(*nd_config->channels[0]) * nd_config->n_channels;
+ pno_req->channels = kmalloc(size, GFP_KERNEL);
+ if (!pno_req->channels)
+ goto channel_err;
+
+ for (i = 0 ; i < pno_req->channel_cnt; i++) {
+ channel = pno_req->channels + i;
+ memcpy(channel, nd_config->channels[i], sizeof(*channel));
+ }
+
+ pno_req->scan_plan = *nd_config->scan_plans;
+ pno_req->inited = true;
+
+ rtw_dbg(rtwdev, RTW_DBG_WOW, "WOW: net-detect is enabled\n");
+
+ return;
+
+channel_err:
+ kfree(pno_req->match_sets);
+
+err:
+ rtw_dbg(rtwdev, RTW_DBG_WOW, "WOW: net-detect is disabled\n");
+}
+
+static int rtw_wow_leave_linked_ps(struct rtw_dev *rtwdev)
+{
+ if (!test_bit(RTW_FLAG_WOWLAN, rtwdev->flags))
+ cancel_delayed_work_sync(&rtwdev->watch_dog_work);
+
+ rtw_leave_lps(rtwdev);
+
+ return 0;
+}
+
+static int rtw_wow_leave_no_link_ps(struct rtw_dev *rtwdev)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+ int ret = 0;
+
+ if (test_bit(RTW_FLAG_WOWLAN, rtwdev->flags)) {
+ if (rtw_fw_lps_deep_mode)
+ rtw_leave_lps_deep(rtwdev);
+ } else {
+ if (test_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags)) {
+ rtw_wow->ips_enabled = true;
+ ret = rtw_leave_ips(rtwdev);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int rtw_wow_leave_ps(struct rtw_dev *rtwdev)
+{
+ int ret = 0;
+
+ if (rtw_wow_mgd_linked(rtwdev))
+ ret = rtw_wow_leave_linked_ps(rtwdev);
+ else if (rtw_wow_no_link(rtwdev))
+ ret = rtw_wow_leave_no_link_ps(rtwdev);
+
+ return ret;
+}
+
+static int rtw_wow_restore_ps(struct rtw_dev *rtwdev)
+{
+ int ret = 0;
+
+ if (rtw_wow_no_link(rtwdev) && rtwdev->wow.ips_enabled)
+ ret = rtw_enter_ips(rtwdev);
+
+ return ret;
+}
+
+static int rtw_wow_enter_linked_ps(struct rtw_dev *rtwdev)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+ struct ieee80211_vif *wow_vif = rtw_wow->wow_vif;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)wow_vif->drv_priv;
+
+ rtw_enter_lps(rtwdev, rtwvif->port);
+
+ return 0;
+}
+
+static int rtw_wow_enter_no_link_ps(struct rtw_dev *rtwdev)
+{
+ /* firmware enters deep ps by itself if supported */
+ set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags);
+
+ return 0;
+}
+
+static int rtw_wow_enter_ps(struct rtw_dev *rtwdev)
+{
+ int ret = 0;
+
+ if (rtw_wow_mgd_linked(rtwdev))
+ ret = rtw_wow_enter_linked_ps(rtwdev);
+ else if (rtw_wow_no_link(rtwdev) && rtw_fw_lps_deep_mode)
+ ret = rtw_wow_enter_no_link_ps(rtwdev);
+
+ return ret;
+}
+
+static void rtw_wow_stop_trx(struct rtw_dev *rtwdev)
+{
+ rtw_wow_bb_stop(rtwdev);
+ rtw_wow_rx_dma_stop(rtwdev);
+}
+
+static int rtw_wow_start(struct rtw_dev *rtwdev)
+{
+ int ret;
+
+ ret = rtw_wow_fw_start(rtwdev);
+ if (ret)
+ goto out;
+
+ rtw_hci_stop(rtwdev);
+ rtw_wow_bb_start(rtwdev);
+ rtw_wow_avoid_reset_mac(rtwdev);
+
+out:
+ return ret;
+}
+
+static int rtw_wow_enable(struct rtw_dev *rtwdev)
+{
+ int ret = 0;
+
+ rtw_wow_stop_trx(rtwdev);
+
+ ret = rtw_wow_swap_fw(rtwdev, RTW_WOWLAN_FW);
+ if (ret) {
+ rtw_err(rtwdev, "failed to swap wow fw\n");
+ goto error;
+ }
+
+ set_bit(RTW_FLAG_WOWLAN, rtwdev->flags);
+
+ ret = rtw_wow_dl_fw_rsvd_page(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to download wowlan rsvd page\n");
+ goto error;
+ }
+
+ ret = rtw_wow_start(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to start wow\n");
+ goto error;
+ }
+
+ return ret;
+
+error:
+ clear_bit(RTW_FLAG_WOWLAN, rtwdev->flags);
+ return ret;
+}
+
+static int rtw_wow_stop(struct rtw_dev *rtwdev)
+{
+ int ret;
+
+ /* some HCI related registers will be reset after resume,
+ * need to set them again.
+ */
+ ret = rtw_hci_setup(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to setup hci\n");
+ return ret;
+ }
+
+ ret = rtw_hci_start(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to start hci\n");
+ return ret;
+ }
+
+ ret = rtw_wow_fw_stop(rtwdev);
+ if (ret)
+ rtw_err(rtwdev, "failed to stop wowlan fw\n");
+
+ rtw_wow_bb_stop(rtwdev);
+
+ return ret;
+}
+
+static void rtw_wow_resume_trx(struct rtw_dev *rtwdev)
+{
+ rtw_wow_rx_dma_start(rtwdev);
+ rtw_wow_bb_start(rtwdev);
+ ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->watch_dog_work,
+ RTW_WATCH_DOG_DELAY_TIME);
+}
+
+static int rtw_wow_disable(struct rtw_dev *rtwdev)
+{
+ int ret;
+
+ clear_bit(RTW_FLAG_WOWLAN, rtwdev->flags);
+
+ ret = rtw_wow_stop(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to stop wow\n");
+ goto out;
+ }
+
+ ret = rtw_wow_swap_fw(rtwdev, RTW_NORMAL_FW);
+ if (ret) {
+ rtw_err(rtwdev, "failed to swap normal fw\n");
+ goto out;
+ }
+
+ ret = rtw_wow_dl_fw_rsvd_page(rtwdev);
+ if (ret)
+ rtw_err(rtwdev, "failed to download normal rsvd page\n");
+
+out:
+ rtw_wow_resume_trx(rtwdev);
+ return ret;
+}
+
+static void rtw_wow_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct rtw_dev *rtwdev = data;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+
+ /* Current wowlan function support setting of only one STATION vif.
+ * So when one suitable vif is found, stop the iteration.
+ */
+ if (rtw_wow->wow_vif || vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ switch (rtwvif->net_type) {
+ case RTW_NET_MGD_LINKED:
+ rtw_wow->wow_vif = vif;
+ break;
+ case RTW_NET_NO_LINK:
+ if (rtw_wow->pno_req.inited)
+ rtwdev->wow.wow_vif = vif;
+ break;
+ default:
+ break;
+ }
+}
+
+static int rtw_wow_set_wakeups(struct rtw_dev *rtwdev,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw_wow_pattern *rtw_patterns = rtw_wow->patterns;
+ struct rtw_vif *rtwvif;
+ int i;
+
+ if (wowlan->disconnect)
+ set_bit(RTW_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags);
+ if (wowlan->magic_pkt)
+ set_bit(RTW_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags);
+ if (wowlan->gtk_rekey_failure)
+ set_bit(RTW_WOW_FLAG_EN_REKEY_PKT, rtw_wow->flags);
+
+ if (wowlan->nd_config)
+ rtw_wow_check_pno(rtwdev, wowlan->nd_config);
+
+ rtw_iterate_vifs_atomic(rtwdev, rtw_wow_vif_iter, rtwdev);
+ if (!rtw_wow->wow_vif)
+ return -EPERM;
+
+ rtwvif = (struct rtw_vif *)rtw_wow->wow_vif->drv_priv;
+ if (wowlan->n_patterns && wowlan->patterns) {
+ rtw_wow->pattern_cnt = wowlan->n_patterns;
+ for (i = 0; i < wowlan->n_patterns; i++)
+ rtw_wow_pattern_generate(rtwdev, rtwvif,
+ wowlan->patterns + i,
+ rtw_patterns + i);
+ }
+
+ return 0;
+}
+
+static void rtw_wow_clear_wakeups(struct rtw_dev *rtwdev)
+{
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw_pno_request *pno_req = &rtw_wow->pno_req;
+
+ if (pno_req->inited) {
+ kfree(pno_req->channels);
+ kfree(pno_req->match_sets);
+ }
+
+ memset(rtw_wow, 0, sizeof(rtwdev->wow));
+}
+
+int rtw_wow_suspend(struct rtw_dev *rtwdev, struct cfg80211_wowlan *wowlan)
+{
+ int ret = 0;
+
+ ret = rtw_wow_set_wakeups(rtwdev, wowlan);
+ if (ret) {
+ rtw_err(rtwdev, "failed to set wakeup event\n");
+ goto out;
+ }
+
+ ret = rtw_wow_leave_ps(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to leave ps from normal mode\n");
+ goto out;
+ }
+
+ ret = rtw_wow_enable(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to enable wow\n");
+ rtw_wow_restore_ps(rtwdev);
+ goto out;
+ }
+
+ ret = rtw_wow_enter_ps(rtwdev);
+ if (ret)
+ rtw_err(rtwdev, "failed to enter ps for wow\n");
+
+out:
+ return ret;
+}
+
+int rtw_wow_resume(struct rtw_dev *rtwdev)
+{
+ int ret;
+
+ /* If wowlan mode is not enabled, do nothing */
+ if (!test_bit(RTW_FLAG_WOWLAN, rtwdev->flags)) {
+ rtw_err(rtwdev, "wow is not enabled\n");
+ ret = -EPERM;
+ goto out;
+ }
+
+ ret = rtw_wow_leave_ps(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to leave ps from wowlan mode\n");
+ goto out;
+ }
+
+ rtw_wow_show_wakeup_reason(rtwdev);
+
+ ret = rtw_wow_disable(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to disable wow\n");
+ goto out;
+ }
+
+ ret = rtw_wow_restore_ps(rtwdev);
+ if (ret)
+ rtw_err(rtwdev, "failed to restore ps to normal mode\n");
+
+out:
+ rtw_wow_clear_wakeups(rtwdev);
+ return ret;
+}
diff --git a/drivers/net/wireless/realtek/rtw88/wow.h b/drivers/net/wireless/realtek/rtw88/wow.h
new file mode 100644
index 000000000000..289368a2cba4
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/wow.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW_WOW_H__
+#define __RTW_WOW_H__
+
+#define PNO_CHECK_BYTE 4
+
+enum rtw_wow_pattern_type {
+ RTW_PATTERN_BROADCAST = 0,
+ RTW_PATTERN_MULTICAST,
+ RTW_PATTERN_UNICAST,
+ RTW_PATTERN_VALID,
+ RTW_PATTERN_INVALID,
+};
+
+enum rtw_wake_reason {
+ RTW_WOW_RSN_RX_PTK_REKEY = 0x1,
+ RTW_WOW_RSN_RX_GTK_REKEY = 0x2,
+ RTW_WOW_RSN_RX_DEAUTH = 0x8,
+ RTW_WOW_RSN_DISCONNECT = 0x10,
+ RTW_WOW_RSN_RX_MAGIC_PKT = 0x21,
+ RTW_WOW_RSN_RX_PATTERN_MATCH = 0x23,
+ RTW_WOW_RSN_RX_NLO = 0x55,
+};
+
+struct rtw_fw_media_status_iter_data {
+ struct rtw_dev *rtwdev;
+ u8 connect;
+};
+
+struct rtw_fw_key_type_iter_data {
+ struct rtw_dev *rtwdev;
+ u8 group_key_type;
+ u8 pairwise_key_type;
+};
+
+static inline bool rtw_wow_mgd_linked(struct rtw_dev *rtwdev)
+{
+ struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)wow_vif->drv_priv;
+
+ return (rtwvif->net_type == RTW_NET_MGD_LINKED);
+}
+
+static inline bool rtw_wow_no_link(struct rtw_dev *rtwdev)
+{
+ struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)wow_vif->drv_priv;
+
+ return (rtwvif->net_type == RTW_NET_NO_LINK);
+}
+
+int rtw_wow_suspend(struct rtw_dev *rtwdev, struct cfg80211_wowlan *wowlan);
+int rtw_wow_resume(struct rtw_dev *rtwdev);
+
+#endif
diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
index f84250bdb8cf..6f8d5f9a9f7e 100644
--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
+++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
@@ -622,6 +622,7 @@ static int bl_cmd(struct rsi_hw *adapter, u8 cmd, u8 exp_resp, char *str)
bl_start_cmd_timer(adapter, timeout);
status = bl_write_cmd(adapter, cmd, exp_resp, &regout_val);
if (status < 0) {
+ bl_stop_cmd_timer(adapter);
rsi_dbg(ERR_ZONE,
"%s: Command %s (%0x) writing failed..\n",
__func__, str, cmd);
@@ -737,10 +738,9 @@ static int ping_pong_write(struct rsi_hw *adapter, u8 cmd, u8 *addr, u32 size)
}
status = bl_cmd(adapter, cmd_req, cmd_resp, str);
- if (status) {
- bl_stop_cmd_timer(adapter);
+ if (status)
return status;
- }
+
return 0;
}
@@ -828,10 +828,9 @@ static int auto_fw_upgrade(struct rsi_hw *adapter, u8 *flash_content,
status = bl_cmd(adapter, EOF_REACHED, FW_LOADING_SUCCESSFUL,
"EOF_REACHED");
- if (status) {
- bl_stop_cmd_timer(adapter);
+ if (status)
return status;
- }
+
rsi_dbg(INFO_ZONE, "FW loading is done and FW is running..\n");
return 0;
}
@@ -849,6 +848,7 @@ static int rsi_hal_prepare_fwload(struct rsi_hw *adapter)
&regout_val,
RSI_COMMON_REG_SIZE);
if (status < 0) {
+ bl_stop_cmd_timer(adapter);
rsi_dbg(ERR_ZONE,
"%s: REGOUT read failed\n", __func__);
return status;
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
index 53f41fc2cadf..a62d41c0ccbc 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -16,6 +16,7 @@
*/
#include <linux/module.h>
+#include <linux/types.h>
#include <net/rsi_91x.h>
#include "rsi_usb.h"
#include "rsi_hal.h"
@@ -29,7 +30,7 @@ MODULE_PARM_DESC(dev_oper_mode,
"9[Wi-Fi STA + BT LE], 13[Wi-Fi STA + BT classic + BT LE]\n"
"6[AP + BT classic], 14[AP + BT classic + BT LE]");
-static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num);
+static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num, gfp_t flags);
/**
* rsi_usb_card_write() - This function writes to the USB Card.
@@ -117,7 +118,7 @@ static int rsi_find_bulk_in_and_out_endpoints(struct usb_interface *interface,
__le16 buffer_size;
int ii, bin_found = 0, bout_found = 0;
- iface_desc = &(interface->altsetting[0]);
+ iface_desc = interface->cur_altsetting;
for (ii = 0; ii < iface_desc->desc.bNumEndpoints; ++ii) {
endpoint = &(iface_desc->endpoint[ii].desc);
@@ -148,9 +149,17 @@ static int rsi_find_bulk_in_and_out_endpoints(struct usb_interface *interface,
break;
}
- if (!(dev->bulkin_endpoint_addr[0]) &&
- dev->bulkout_endpoint_addr[0])
+ if (!(dev->bulkin_endpoint_addr[0] && dev->bulkout_endpoint_addr[0])) {
+ dev_err(&interface->dev, "missing wlan bulk endpoints\n");
return -EINVAL;
+ }
+
+ if (adapter->priv->coex_mode > 1) {
+ if (!dev->bulkin_endpoint_addr[1]) {
+ dev_err(&interface->dev, "missing bt bulk-in endpoint\n");
+ return -EINVAL;
+ }
+ }
return 0;
}
@@ -285,20 +294,29 @@ static void rsi_rx_done_handler(struct urb *urb)
status = 0;
out:
- if (rsi_rx_urb_submit(dev->priv, rx_cb->ep_num))
+ if (rsi_rx_urb_submit(dev->priv, rx_cb->ep_num, GFP_ATOMIC))
rsi_dbg(ERR_ZONE, "%s: Failed in urb submission", __func__);
if (status)
dev_kfree_skb(rx_cb->rx_skb);
}
+static void rsi_rx_urb_kill(struct rsi_hw *adapter, u8 ep_num)
+{
+ struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+ struct rx_usb_ctrl_block *rx_cb = &dev->rx_cb[ep_num - 1];
+ struct urb *urb = rx_cb->rx_urb;
+
+ usb_kill_urb(urb);
+}
+
/**
* rsi_rx_urb_submit() - This function submits the given URB to the USB stack.
* @adapter: Pointer to the adapter structure.
*
* Return: 0 on success, a negative error code on failure.
*/
-static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num)
+static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num, gfp_t mem_flags)
{
struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
struct rx_usb_ctrl_block *rx_cb = &dev->rx_cb[ep_num - 1];
@@ -328,9 +346,11 @@ static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num)
rsi_rx_done_handler,
rx_cb);
- status = usb_submit_urb(urb, GFP_KERNEL);
- if (status)
+ status = usb_submit_urb(urb, mem_flags);
+ if (status) {
rsi_dbg(ERR_ZONE, "%s: Failed in urb submission\n", __func__);
+ dev_kfree_skb(skb);
+ }
return status;
}
@@ -816,17 +836,20 @@ static int rsi_probe(struct usb_interface *pfunction,
rsi_dbg(INIT_ZONE, "%s: Device Init Done\n", __func__);
}
- status = rsi_rx_urb_submit(adapter, WLAN_EP);
+ status = rsi_rx_urb_submit(adapter, WLAN_EP, GFP_KERNEL);
if (status)
goto err1;
if (adapter->priv->coex_mode > 1) {
- status = rsi_rx_urb_submit(adapter, BT_EP);
+ status = rsi_rx_urb_submit(adapter, BT_EP, GFP_KERNEL);
if (status)
- goto err1;
+ goto err_kill_wlan_urb;
}
return 0;
+
+err_kill_wlan_urb:
+ rsi_rx_urb_kill(adapter, WLAN_EP);
err1:
rsi_deinit_usb_interface(adapter);
err:
@@ -857,6 +880,10 @@ static void rsi_disconnect(struct usb_interface *pfunction)
adapter->priv->bt_adapter = NULL;
}
+ if (adapter->priv->coex_mode > 1)
+ rsi_rx_urb_kill(adapter, BT_EP);
+ rsi_rx_urb_kill(adapter, WLAN_EP);
+
rsi_reset_card(adapter);
rsi_deinit_usb_interface(adapter);
rsi_91x_deinit(adapter);
diff --git a/drivers/net/wireless/st/cw1200/txrx.c b/drivers/net/wireless/st/cw1200/txrx.c
index 2dfcdb145944..400dd585916b 100644
--- a/drivers/net/wireless/st/cw1200/txrx.c
+++ b/drivers/net/wireless/st/cw1200/txrx.c
@@ -715,7 +715,7 @@ void cw1200_tx(struct ieee80211_hw *dev,
};
struct ieee80211_sta *sta;
struct wsm_tx *wsm;
- bool tid_update = 0;
+ bool tid_update = false;
u8 flags = 0;
int ret;
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 2a48fc6ad56c..6ef8fc9ae627 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -1429,7 +1429,7 @@ out:
int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
- u16 tx_seq_16)
+ u16 tx_seq_16, bool is_pairwise)
{
struct wl1271_cmd_set_keys *cmd;
int ret = 0;
@@ -1444,8 +1444,10 @@ int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
lid_type = WEP_DEFAULT_LID_TYPE;
else
lid_type = BROADCAST_LID_TYPE;
- } else {
+ } else if (is_pairwise) {
lid_type = UNICAST_LID_TYPE;
+ } else {
+ lid_type = BROADCAST_LID_TYPE;
}
wl1271_debug(DEBUG_CRYPT, "ap key action: %d id: %d lid: %d type: %d"
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index 084375ba4abf..bfad7b5a1ac6 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -65,7 +65,7 @@ int wl1271_cmd_set_sta_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
- u16 tx_seq_16);
+ u16 tx_seq_16, bool is_pairwise);
int wl12xx_cmd_set_peer_state(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 hlid);
int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id,
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index e994995d79ab..ed049c9f7e29 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -3273,7 +3273,7 @@ out:
static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 id, u8 key_type, u8 key_size,
const u8 *key, u8 hlid, u32 tx_seq_32,
- u16 tx_seq_16)
+ u16 tx_seq_16, bool is_pairwise)
{
struct wl1271_ap_key *ap_key;
int i;
@@ -3311,6 +3311,7 @@ static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
ap_key->hlid = hlid;
ap_key->tx_seq_32 = tx_seq_32;
ap_key->tx_seq_16 = tx_seq_16;
+ ap_key->is_pairwise = is_pairwise;
wlvif->ap.recorded_keys[i] = ap_key;
return 0;
@@ -3346,7 +3347,7 @@ static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
key->id, key->key_type,
key->key_size, key->key,
hlid, key->tx_seq_32,
- key->tx_seq_16);
+ key->tx_seq_16, key->is_pairwise);
if (ret < 0)
goto out;
@@ -3369,7 +3370,8 @@ out:
static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, u32 tx_seq_32,
- u16 tx_seq_16, struct ieee80211_sta *sta)
+ u16 tx_seq_16, struct ieee80211_sta *sta,
+ bool is_pairwise)
{
int ret;
bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
@@ -3396,12 +3398,12 @@ static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
ret = wl1271_record_ap_key(wl, wlvif, id,
key_type, key_size,
key, hlid, tx_seq_32,
- tx_seq_16);
+ tx_seq_16, is_pairwise);
} else {
ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
id, key_type, key_size,
key, hlid, tx_seq_32,
- tx_seq_16);
+ tx_seq_16, is_pairwise);
}
if (ret < 0)
@@ -3501,6 +3503,7 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
u16 tx_seq_16 = 0;
u8 key_type;
u8 hlid;
+ bool is_pairwise;
wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
@@ -3550,12 +3553,14 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
return -EOPNOTSUPP;
}
+ is_pairwise = key_conf->flags & IEEE80211_KEY_FLAG_PAIRWISE;
+
switch (cmd) {
case SET_KEY:
ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
key_conf->keyidx, key_type,
key_conf->keylen, key_conf->key,
- tx_seq_32, tx_seq_16, sta);
+ tx_seq_32, tx_seq_16, sta, is_pairwise);
if (ret < 0) {
wl1271_error("Could not add or replace key");
return ret;
@@ -3581,7 +3586,7 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
key_conf->keyidx, key_type,
key_conf->keylen, key_conf->key,
- 0, 0, sta);
+ 0, 0, sta, is_pairwise);
if (ret < 0) {
wl1271_error("Could not remove key");
return ret;
@@ -6223,6 +6228,7 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
+ ieee80211_hw_set(wl->hw, SUPPORTS_PER_STA_GTK);
ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
@@ -6267,7 +6273,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
- WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ WIPHY_FLAG_HAS_CHANNEL_SWITCH |
++ WIPHY_FLAG_IBSS_RSN;
wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index 6fab60b0e367..eefae3f867b9 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -212,6 +212,7 @@ struct wl1271_ap_key {
u8 hlid;
u32 tx_seq_32;
u16 tx_seq_16;
+ bool is_pairwise;
};
enum wl12xx_flags {
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 007bf6803293..686161db8706 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1285,7 +1285,7 @@ out:
return rc;
}
-static void wl3501_tx_timeout(struct net_device *dev)
+static void wl3501_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct net_device_stats *stats = &dev->stats;
int rc;
diff --git a/drivers/net/wireless/zydas/zd1201.c b/drivers/net/wireless/zydas/zd1201.c
index 0db7362bedb4..41641fc2be74 100644
--- a/drivers/net/wireless/zydas/zd1201.c
+++ b/drivers/net/wireless/zydas/zd1201.c
@@ -830,7 +830,7 @@ static netdev_tx_t zd1201_hard_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-static void zd1201_tx_timeout(struct net_device *dev)
+static void zd1201_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct zd1201 *zd = netdev_priv(dev);
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
index 7b5c2fe5bd4d..8ff0374126e4 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
@@ -1263,7 +1263,7 @@ static void print_id(struct usb_device *udev)
static int eject_installer(struct usb_interface *intf)
{
struct usb_device *udev = interface_to_usbdev(intf);
- struct usb_host_interface *iface_desc = &intf->altsetting[0];
+ struct usb_host_interface *iface_desc = intf->cur_altsetting;
struct usb_endpoint_descriptor *endpoint;
unsigned char *cmd;
u8 bulk_out_ep;
diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
index 10d580c3dea3..6b7532f7c936 100644
--- a/drivers/net/xen-netback/hash.c
+++ b/drivers/net/xen-netback/hash.c
@@ -51,7 +51,8 @@ static void xenvif_add_hash(struct xenvif *vif, const u8 *tag,
found = false;
oldest = NULL;
- list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) {
+ list_for_each_entry_rcu(entry, &vif->hash.cache.list, link,
+ lockdep_is_held(&vif->hash.cache.lock)) {
/* Make sure we don't add duplicate entries */
if (entry->len == len &&
memcmp(entry->tag, tag, len) == 0)
@@ -102,7 +103,8 @@ static void xenvif_flush_hash(struct xenvif *vif)
spin_lock_irqsave(&vif->hash.cache.lock, flags);
- list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) {
+ list_for_each_entry_rcu(entry, &vif->hash.cache.list, link,
+ lockdep_is_held(&vif->hash.cache.lock)) {
list_del_rcu(&entry->link);
vif->hash.cache.count--;
kfree_rcu(entry, rcu);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 68dd7bb07ca6..0c8a02a1ead7 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -585,6 +585,7 @@ int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
struct net_device *dev = vif->dev;
void *addr;
struct xen_netif_ctrl_sring *shared;
+ RING_IDX rsp_prod, req_prod;
int err;
err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
@@ -593,7 +594,14 @@ int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
goto err;
shared = (struct xen_netif_ctrl_sring *)addr;
- BACK_RING_INIT(&vif->ctrl, shared, XEN_PAGE_SIZE);
+ rsp_prod = READ_ONCE(shared->rsp_prod);
+ req_prod = READ_ONCE(shared->req_prod);
+
+ BACK_RING_ATTACH(&vif->ctrl, shared, rsp_prod, XEN_PAGE_SIZE);
+
+ err = -EIO;
+ if (req_prod - rsp_prod > RING_SIZE(&vif->ctrl))
+ goto err_unmap;
err = bind_interdomain_evtchn_to_irq(vif->domid, evtchn);
if (err < 0)
@@ -628,18 +636,6 @@ err:
static void xenvif_disconnect_queue(struct xenvif_queue *queue)
{
- if (queue->tx_irq) {
- unbind_from_irqhandler(queue->tx_irq, queue);
- if (queue->tx_irq == queue->rx_irq)
- queue->rx_irq = 0;
- queue->tx_irq = 0;
- }
-
- if (queue->rx_irq) {
- unbind_from_irqhandler(queue->rx_irq, queue);
- queue->rx_irq = 0;
- }
-
if (queue->task) {
kthread_stop(queue->task);
queue->task = NULL;
@@ -655,6 +651,18 @@ static void xenvif_disconnect_queue(struct xenvif_queue *queue)
queue->napi.poll = NULL;
}
+ if (queue->tx_irq) {
+ unbind_from_irqhandler(queue->tx_irq, queue);
+ if (queue->tx_irq == queue->rx_irq)
+ queue->rx_irq = 0;
+ queue->tx_irq = 0;
+ }
+
+ if (queue->rx_irq) {
+ unbind_from_irqhandler(queue->rx_irq, queue);
+ queue->rx_irq = 0;
+ }
+
xenvif_unmap_frontend_data_rings(queue);
}
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 0020b2e8c279..315dfc6ea297 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1453,7 +1453,7 @@ int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
void *addr;
struct xen_netif_tx_sring *txs;
struct xen_netif_rx_sring *rxs;
-
+ RING_IDX rsp_prod, req_prod;
int err = -ENOMEM;
err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
@@ -1462,7 +1462,14 @@ int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
goto err;
txs = (struct xen_netif_tx_sring *)addr;
- BACK_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
+ rsp_prod = READ_ONCE(txs->rsp_prod);
+ req_prod = READ_ONCE(txs->req_prod);
+
+ BACK_RING_ATTACH(&queue->tx, txs, rsp_prod, XEN_PAGE_SIZE);
+
+ err = -EIO;
+ if (req_prod - rsp_prod > RING_SIZE(&queue->tx))
+ goto err;
err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
&rx_ring_ref, 1, &addr);
@@ -1470,7 +1477,14 @@ int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
goto err;
rxs = (struct xen_netif_rx_sring *)addr;
- BACK_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
+ rsp_prod = READ_ONCE(rxs->rsp_prod);
+ req_prod = READ_ONCE(rxs->req_prod);
+
+ BACK_RING_ATTACH(&queue->rx, rxs, rsp_prod, XEN_PAGE_SIZE);
+
+ err = -EIO;
+ if (req_prod - rsp_prod > RING_SIZE(&queue->rx))
+ goto err;
return 0;
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index f533b7372d59..286054b60d47 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -195,185 +195,6 @@ static void xenvif_debugfs_delif(struct xenvif *vif)
}
#endif /* CONFIG_DEBUG_FS */
-static int netback_remove(struct xenbus_device *dev)
-{
- struct backend_info *be = dev_get_drvdata(&dev->dev);
-
- set_backend_state(be, XenbusStateClosed);
-
- unregister_hotplug_status_watch(be);
- if (be->vif) {
- kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
- xen_unregister_watchers(be->vif);
- xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
- xenvif_free(be->vif);
- be->vif = NULL;
- }
- kfree(be->hotplug_script);
- kfree(be);
- dev_set_drvdata(&dev->dev, NULL);
- return 0;
-}
-
-
-/**
- * Entry point to this code when a new device is created. Allocate the basic
- * structures and switch to InitWait.
- */
-static int netback_probe(struct xenbus_device *dev,
- const struct xenbus_device_id *id)
-{
- const char *message;
- struct xenbus_transaction xbt;
- int err;
- int sg;
- const char *script;
- struct backend_info *be = kzalloc(sizeof(struct backend_info),
- GFP_KERNEL);
- if (!be) {
- xenbus_dev_fatal(dev, -ENOMEM,
- "allocating backend structure");
- return -ENOMEM;
- }
-
- be->dev = dev;
- dev_set_drvdata(&dev->dev, be);
-
- be->state = XenbusStateInitialising;
- err = xenbus_switch_state(dev, XenbusStateInitialising);
- if (err)
- goto fail;
-
- sg = 1;
-
- do {
- err = xenbus_transaction_start(&xbt);
- if (err) {
- xenbus_dev_fatal(dev, err, "starting transaction");
- goto fail;
- }
-
- err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", sg);
- if (err) {
- message = "writing feature-sg";
- goto abort_transaction;
- }
-
- err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4",
- "%d", sg);
- if (err) {
- message = "writing feature-gso-tcpv4";
- goto abort_transaction;
- }
-
- err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv6",
- "%d", sg);
- if (err) {
- message = "writing feature-gso-tcpv6";
- goto abort_transaction;
- }
-
- /* We support partial checksum setup for IPv6 packets */
- err = xenbus_printf(xbt, dev->nodename,
- "feature-ipv6-csum-offload",
- "%d", 1);
- if (err) {
- message = "writing feature-ipv6-csum-offload";
- goto abort_transaction;
- }
-
- /* We support rx-copy path. */
- err = xenbus_printf(xbt, dev->nodename,
- "feature-rx-copy", "%d", 1);
- if (err) {
- message = "writing feature-rx-copy";
- goto abort_transaction;
- }
-
- /*
- * We don't support rx-flip path (except old guests who don't
- * grok this feature flag).
- */
- err = xenbus_printf(xbt, dev->nodename,
- "feature-rx-flip", "%d", 0);
- if (err) {
- message = "writing feature-rx-flip";
- goto abort_transaction;
- }
-
- /* We support dynamic multicast-control. */
- err = xenbus_printf(xbt, dev->nodename,
- "feature-multicast-control", "%d", 1);
- if (err) {
- message = "writing feature-multicast-control";
- goto abort_transaction;
- }
-
- err = xenbus_printf(xbt, dev->nodename,
- "feature-dynamic-multicast-control",
- "%d", 1);
- if (err) {
- message = "writing feature-dynamic-multicast-control";
- goto abort_transaction;
- }
-
- err = xenbus_transaction_end(xbt, 0);
- } while (err == -EAGAIN);
-
- if (err) {
- xenbus_dev_fatal(dev, err, "completing transaction");
- goto fail;
- }
-
- /*
- * Split event channels support, this is optional so it is not
- * put inside the above loop.
- */
- err = xenbus_printf(XBT_NIL, dev->nodename,
- "feature-split-event-channels",
- "%u", separate_tx_rx_irq);
- if (err)
- pr_debug("Error writing feature-split-event-channels\n");
-
- /* Multi-queue support: This is an optional feature. */
- err = xenbus_printf(XBT_NIL, dev->nodename,
- "multi-queue-max-queues", "%u", xenvif_max_queues);
- if (err)
- pr_debug("Error writing multi-queue-max-queues\n");
-
- err = xenbus_printf(XBT_NIL, dev->nodename,
- "feature-ctrl-ring",
- "%u", true);
- if (err)
- pr_debug("Error writing feature-ctrl-ring\n");
-
- script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL);
- if (IS_ERR(script)) {
- err = PTR_ERR(script);
- xenbus_dev_fatal(dev, err, "reading script");
- goto fail;
- }
-
- be->hotplug_script = script;
-
-
- /* This kicks hotplug scripts, so do it immediately. */
- err = backend_create_xenvif(be);
- if (err)
- goto fail;
-
- return 0;
-
-abort_transaction:
- xenbus_transaction_end(xbt, 1);
- xenbus_dev_fatal(dev, err, "%s", message);
-fail:
- pr_debug("failed\n");
- netback_remove(dev);
- return err;
-}
-
-
/*
* Handle the creation of the hotplug script environment. We add the script
* and vif variables to the environment, for the benefit of the vif-* hotplug
@@ -827,6 +648,7 @@ static void hotplug_status_changed(struct xenbus_watch *watch,
/* Not interested in this watch anymore. */
unregister_hotplug_status_watch(be);
+ xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status");
}
kfree(str);
}
@@ -1128,6 +950,174 @@ static int read_xenbus_vif_flags(struct backend_info *be)
return 0;
}
+static int netback_remove(struct xenbus_device *dev)
+{
+ struct backend_info *be = dev_get_drvdata(&dev->dev);
+
+ unregister_hotplug_status_watch(be);
+ if (be->vif) {
+ kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
+ backend_disconnect(be);
+ xenvif_free(be->vif);
+ be->vif = NULL;
+ }
+ kfree(be->hotplug_script);
+ kfree(be);
+ dev_set_drvdata(&dev->dev, NULL);
+ return 0;
+}
+
+/**
+ * Entry point to this code when a new device is created. Allocate the basic
+ * structures and switch to InitWait.
+ */
+static int netback_probe(struct xenbus_device *dev,
+ const struct xenbus_device_id *id)
+{
+ const char *message;
+ struct xenbus_transaction xbt;
+ int err;
+ int sg;
+ const char *script;
+ struct backend_info *be = kzalloc(sizeof(*be), GFP_KERNEL);
+
+ if (!be) {
+ xenbus_dev_fatal(dev, -ENOMEM,
+ "allocating backend structure");
+ return -ENOMEM;
+ }
+
+ be->dev = dev;
+ dev_set_drvdata(&dev->dev, be);
+
+ sg = 1;
+
+ do {
+ err = xenbus_transaction_start(&xbt);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "starting transaction");
+ goto fail;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", sg);
+ if (err) {
+ message = "writing feature-sg";
+ goto abort_transaction;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4",
+ "%d", sg);
+ if (err) {
+ message = "writing feature-gso-tcpv4";
+ goto abort_transaction;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv6",
+ "%d", sg);
+ if (err) {
+ message = "writing feature-gso-tcpv6";
+ goto abort_transaction;
+ }
+
+ /* We support partial checksum setup for IPv6 packets */
+ err = xenbus_printf(xbt, dev->nodename,
+ "feature-ipv6-csum-offload",
+ "%d", 1);
+ if (err) {
+ message = "writing feature-ipv6-csum-offload";
+ goto abort_transaction;
+ }
+
+ /* We support rx-copy path. */
+ err = xenbus_printf(xbt, dev->nodename,
+ "feature-rx-copy", "%d", 1);
+ if (err) {
+ message = "writing feature-rx-copy";
+ goto abort_transaction;
+ }
+
+ /* We don't support rx-flip path (except old guests who
+ * don't grok this feature flag).
+ */
+ err = xenbus_printf(xbt, dev->nodename,
+ "feature-rx-flip", "%d", 0);
+ if (err) {
+ message = "writing feature-rx-flip";
+ goto abort_transaction;
+ }
+
+ /* We support dynamic multicast-control. */
+ err = xenbus_printf(xbt, dev->nodename,
+ "feature-multicast-control", "%d", 1);
+ if (err) {
+ message = "writing feature-multicast-control";
+ goto abort_transaction;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename,
+ "feature-dynamic-multicast-control",
+ "%d", 1);
+ if (err) {
+ message = "writing feature-dynamic-multicast-control";
+ goto abort_transaction;
+ }
+
+ err = xenbus_transaction_end(xbt, 0);
+ } while (err == -EAGAIN);
+
+ if (err) {
+ xenbus_dev_fatal(dev, err, "completing transaction");
+ goto fail;
+ }
+
+ /* Split event channels support, this is optional so it is not
+ * put inside the above loop.
+ */
+ err = xenbus_printf(XBT_NIL, dev->nodename,
+ "feature-split-event-channels",
+ "%u", separate_tx_rx_irq);
+ if (err)
+ pr_debug("Error writing feature-split-event-channels\n");
+
+ /* Multi-queue support: This is an optional feature. */
+ err = xenbus_printf(XBT_NIL, dev->nodename,
+ "multi-queue-max-queues", "%u", xenvif_max_queues);
+ if (err)
+ pr_debug("Error writing multi-queue-max-queues\n");
+
+ err = xenbus_printf(XBT_NIL, dev->nodename,
+ "feature-ctrl-ring",
+ "%u", true);
+ if (err)
+ pr_debug("Error writing feature-ctrl-ring\n");
+
+ backend_switch_state(be, XenbusStateInitWait);
+
+ script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL);
+ if (IS_ERR(script)) {
+ err = PTR_ERR(script);
+ xenbus_dev_fatal(dev, err, "reading script");
+ goto fail;
+ }
+
+ be->hotplug_script = script;
+
+ /* This kicks hotplug scripts, so do it immediately. */
+ err = backend_create_xenvif(be);
+ if (err)
+ goto fail;
+
+ return 0;
+
+abort_transaction:
+ xenbus_transaction_end(xbt, 1);
+ xenbus_dev_fatal(dev, err, "%s", message);
+fail:
+ pr_debug("failed\n");
+ netback_remove(dev);
+ return err;
+}
+
static const struct xenbus_device_id netback_ids[] = {
{ "vif" },
{ "" }
@@ -1139,6 +1129,7 @@ static struct xenbus_driver netback_driver = {
.remove = netback_remove,
.uevent = netback_uevent,
.otherend_changed = frontend_changed,
+ .allow_rebind = true,
};
int xenvif_xenbus_init(void)
diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c
index 4d1909aecd6c..9f60e4dc5a90 100644
--- a/drivers/nfc/nxp-nci/i2c.c
+++ b/drivers/nfc/nxp-nci/i2c.c
@@ -278,7 +278,7 @@ static int nxp_nci_i2c_probe(struct i2c_client *client,
r = devm_acpi_dev_add_driver_gpios(dev, acpi_nxp_nci_gpios);
if (r)
- return r;
+ dev_dbg(dev, "Unable to add GPIO mapping table\n");
phy->gpiod_en = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(phy->gpiod_en)) {
diff --git a/drivers/nfc/pn533/i2c.c b/drivers/nfc/pn533/i2c.c
index 7507176cca0a..0207e66cee21 100644
--- a/drivers/nfc/pn533/i2c.c
+++ b/drivers/nfc/pn533/i2c.c
@@ -274,7 +274,6 @@ MODULE_DEVICE_TABLE(i2c, pn533_i2c_id_table);
static struct i2c_driver pn533_i2c_driver = {
.driver = {
.name = PN533_I2C_DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(of_pn533_i2c_match),
},
.probe = pn533_i2c_probe,
diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
index 4590fbf82dc2..f5bb7ace2ff5 100644
--- a/drivers/nfc/pn533/usb.c
+++ b/drivers/nfc/pn533/usb.c
@@ -391,7 +391,7 @@ static int pn533_acr122_poweron_rdr(struct pn533_usb_phy *phy)
cmd, sizeof(cmd), false);
rc = usb_bulk_msg(phy->udev, phy->out_urb->pipe, buffer, sizeof(cmd),
- &transferred, 0);
+ &transferred, 5000);
kfree(buffer);
if (rc || (transferred != sizeof(cmd))) {
nfc_err(&phy->udev->dev,
diff --git a/drivers/nfc/pn544/pn544.c b/drivers/nfc/pn544/pn544.c
index cda996f6954e..2b83156efe3f 100644
--- a/drivers/nfc/pn544/pn544.c
+++ b/drivers/nfc/pn544/pn544.c
@@ -693,7 +693,7 @@ static int pn544_hci_check_presence(struct nfc_hci_dev *hdev,
target->nfcid1_len != 10)
return -EOPNOTSUPP;
- return nfc_hci_send_cmd(hdev, NFC_HCI_RF_READER_A_GATE,
+ return nfc_hci_send_cmd(hdev, NFC_HCI_RF_READER_A_GATE,
PN544_RF_READER_CMD_ACTIVATE_NEXT,
target->nfcid1, target->nfcid1_len, NULL);
} else if (target->supported_protocols & (NFC_PROTO_JEWEL_MASK |
diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c
index 604dba4f18af..8e4d355dc3ae 100644
--- a/drivers/nfc/port100.c
+++ b/drivers/nfc/port100.c
@@ -565,7 +565,7 @@ static void port100_tx_update_payload_len(void *_frame, int len)
{
struct port100_frame *frame = _frame;
- frame->datalen = cpu_to_le16(le16_to_cpu(frame->datalen) + len);
+ le16_add_cpu(&frame->datalen, len);
}
static bool port100_rx_frame_is_valid(void *_frame)
diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c
index be110d9cef02..de613c623a2c 100644
--- a/drivers/nfc/s3fwrn5/firmware.c
+++ b/drivers/nfc/s3fwrn5/firmware.c
@@ -507,7 +507,10 @@ int s3fwrn5_fw_recv_frame(struct nci_dev *ndev, struct sk_buff *skb)
struct s3fwrn5_info *info = nci_get_drvdata(ndev);
struct s3fwrn5_fw_info *fw_info = &info->fw_info;
- BUG_ON(fw_info->rsp);
+ if (WARN_ON(fw_info->rsp)) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
fw_info->rsp = skb;
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index ad8e4df1282b..4eae441f86c9 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -337,13 +337,7 @@ static void pmem_release_disk(void *__pmem)
put_disk(pmem->disk);
}
-static void pmem_pagemap_page_free(struct page *page)
-{
- wake_up_var(&page->_refcount);
-}
-
static const struct dev_pagemap_ops fsdax_pagemap_ops = {
- .page_free = pmem_pagemap_page_free,
.kill = pmem_pagemap_kill,
.cleanup = pmem_pagemap_cleanup,
};
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index c6439638a419..b9358db83e96 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config NVME_CORE
tristate
+ select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY
config BLK_DEV_NVME
tristate "NVM Express block device"
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 667f18f465be..ada59df642d2 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -66,8 +66,8 @@ MODULE_PARM_DESC(streams, "turn on support for Streams write directives");
* nvme_reset_wq - hosts nvme reset works
* nvme_delete_wq - hosts nvme delete works
*
- * nvme_wq will host works such are scan, aen handling, fw activation,
- * keep-alive error recovery, periodic reconnects etc. nvme_reset_wq
+ * nvme_wq will host works such as scan, aen handling, fw activation,
+ * keep-alive, periodic reconnects etc. nvme_reset_wq
* runs reset works which also flush works hosted on nvme_wq for
* serialization purposes. nvme_delete_wq host controller deletion
* works which flush reset works for serialization.
@@ -222,6 +222,8 @@ static blk_status_t nvme_error_status(u16 status)
case NVME_SC_CAP_EXCEEDED:
return BLK_STS_NOSPC;
case NVME_SC_LBA_RANGE:
+ case NVME_SC_CMD_INTERRUPTED:
+ case NVME_SC_NS_NOT_READY:
return BLK_STS_TARGET;
case NVME_SC_BAD_ATTRIBUTES:
case NVME_SC_ONCS_NOT_SUPPORTED:
@@ -974,7 +976,7 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
startka = true;
spin_unlock_irqrestore(&ctrl->lock, flags);
if (startka)
- schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+ queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
}
static int nvme_keep_alive(struct nvme_ctrl *ctrl)
@@ -1004,7 +1006,7 @@ static void nvme_keep_alive_work(struct work_struct *work)
dev_dbg(ctrl->device,
"reschedule traffic based keep-alive timer\n");
ctrl->comp_seen = false;
- schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+ queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
return;
}
@@ -1021,7 +1023,7 @@ static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
if (unlikely(ctrl->kato == 0))
return;
- schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+ queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
}
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
@@ -3865,7 +3867,7 @@ static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
if (!log)
return;
- if (nvme_get_log(ctrl, NVME_NSID_ALL, 0, NVME_LOG_FW_SLOT, log,
+ if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, log,
sizeof(*log), 0))
dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
kfree(log);
diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c
index a5af21f5d370..2e6477ed420f 100644
--- a/drivers/nvme/host/hwmon.c
+++ b/drivers/nvme/host/hwmon.c
@@ -5,14 +5,11 @@
*/
#include <linux/hwmon.h>
+#include <linux/units.h>
#include <asm/unaligned.h>
#include "nvme.h"
-/* These macros should be moved to linux/temperature.h */
-#define MILLICELSIUS_TO_KELVIN(t) DIV_ROUND_CLOSEST((t) + 273150, 1000)
-#define KELVIN_TO_MILLICELSIUS(t) ((t) * 1000L - 273150)
-
struct nvme_hwmon_data {
struct nvme_ctrl *ctrl;
struct nvme_smart_log log;
@@ -35,7 +32,7 @@ static int nvme_get_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under,
return -EIO;
if (ret < 0)
return ret;
- *temp = KELVIN_TO_MILLICELSIUS(status & NVME_TEMP_THRESH_MASK);
+ *temp = kelvin_to_millicelsius(status & NVME_TEMP_THRESH_MASK);
return 0;
}
@@ -46,7 +43,7 @@ static int nvme_set_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under,
unsigned int threshold = sensor << NVME_TEMP_THRESH_SELECT_SHIFT;
int ret;
- temp = MILLICELSIUS_TO_KELVIN(temp);
+ temp = millicelsius_to_kelvin(temp);
threshold |= clamp_val(temp, 0, NVME_TEMP_THRESH_MASK);
if (under)
@@ -88,7 +85,7 @@ static int nvme_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
case hwmon_temp_min:
return nvme_get_temp_thresh(data->ctrl, channel, true, val);
case hwmon_temp_crit:
- *val = KELVIN_TO_MILLICELSIUS(data->ctrl->cctemp);
+ *val = kelvin_to_millicelsius(data->ctrl->cctemp);
return 0;
default:
break;
@@ -105,7 +102,7 @@ static int nvme_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
temp = get_unaligned_le16(log->temperature);
else
temp = le16_to_cpu(log->temp_sensor[channel - 1]);
- *val = KELVIN_TO_MILLICELSIUS(temp);
+ *val = kelvin_to_millicelsius(temp);
break;
case hwmon_temp_alarm:
*val = !!(log->critical_warning & NVME_SMART_CRIT_TEMPERATURE);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 365a2ddbeaa7..9c80f9f08149 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -167,7 +167,6 @@ struct nvme_queue {
/* only used for poll queues: */
spinlock_t cq_poll_lock ____cacheline_aligned_in_smp;
volatile struct nvme_completion *cqes;
- struct blk_mq_tags **tags;
dma_addr_t sq_dma_addr;
dma_addr_t cq_dma_addr;
u32 __iomem *q_db;
@@ -376,29 +375,17 @@ static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
WARN_ON(hctx_idx != 0);
WARN_ON(dev->admin_tagset.tags[0] != hctx->tags);
- WARN_ON(nvmeq->tags);
hctx->driver_data = nvmeq;
- nvmeq->tags = &dev->admin_tagset.tags[0];
return 0;
}
-static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
-{
- struct nvme_queue *nvmeq = hctx->driver_data;
-
- nvmeq->tags = NULL;
-}
-
static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
struct nvme_dev *dev = data;
struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1];
- if (!nvmeq->tags)
- nvmeq->tags = &dev->tagset.tags[hctx_idx];
-
WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags);
hctx->driver_data = nvmeq;
return 0;
@@ -948,6 +935,13 @@ static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq)
writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
}
+static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq)
+{
+ if (!nvmeq->qid)
+ return nvmeq->dev->admin_tagset.tags[0];
+ return nvmeq->dev->tagset.tags[nvmeq->qid - 1];
+}
+
static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
{
volatile struct nvme_completion *cqe = &nvmeq->cqes[idx];
@@ -972,7 +966,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
return;
}
- req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id);
+ req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
nvme_end_request(req, cqe->status, cqe->result);
}
@@ -1407,6 +1401,23 @@ static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
nvme_poll_irqdisable(nvmeq, -1);
}
+/*
+ * Called only on a device that has been disabled and after all other threads
+ * that can check this device's completion queues have synced. This is the
+ * last chance for the driver to see a natural completion before
+ * nvme_cancel_request() terminates all incomplete requests.
+ */
+static void nvme_reap_pending_cqes(struct nvme_dev *dev)
+{
+ u16 start, end;
+ int i;
+
+ for (i = dev->ctrl.queue_count - 1; i > 0; i--) {
+ nvme_process_cq(&dev->queues[i], &start, &end, -1);
+ nvme_complete_cqes(&dev->queues[i], start, end);
+ }
+}
+
static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
int entry_size)
{
@@ -1572,7 +1583,6 @@ static const struct blk_mq_ops nvme_mq_admin_ops = {
.queue_rq = nvme_queue_rq,
.complete = nvme_pci_complete_rq,
.init_hctx = nvme_admin_init_hctx,
- .exit_hctx = nvme_admin_exit_hctx,
.init_request = nvme_init_request,
.timeout = nvme_timeout,
};
@@ -2242,11 +2252,6 @@ static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode)
if (timeout == 0)
return false;
- /* handle any remaining CQEs */
- if (opcode == nvme_admin_delete_cq &&
- !test_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags))
- nvme_poll_irqdisable(nvmeq, -1);
-
sent--;
if (nr_queues)
goto retry;
@@ -2435,6 +2440,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
nvme_suspend_io_queues(dev);
nvme_suspend_queue(&dev->queues[0]);
nvme_pci_disable(dev);
+ nvme_reap_pending_cqes(dev);
blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl);
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 2a47c6c5007e..3e85c5cacefd 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1088,7 +1088,7 @@ static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
return;
- queue_work(nvme_wq, &ctrl->err_work);
+ queue_work(nvme_reset_wq, &ctrl->err_work);
}
static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 6d43b23a0fc8..49d4373b84eb 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -422,7 +422,7 @@ static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
return;
- queue_work(nvme_wq, &to_tcp_ctrl(ctrl)->err_work);
+ queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
}
static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
@@ -1054,7 +1054,12 @@ static void nvme_tcp_io_work(struct work_struct *w)
} else if (unlikely(result < 0)) {
dev_err(queue->ctrl->ctrl.device,
"failed to send request %d\n", result);
- if (result != -EPIPE)
+
+ /*
+ * Fail the request unless peer closed the connection,
+ * in which case error recovery flow will complete all.
+ */
+ if ((result != -EPIPE) && (result != -ECONNRESET))
nvme_tcp_fail_request(queue->request);
nvme_tcp_done_send_req(queue);
return;
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 56c21b501185..72a7e41f3018 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -24,6 +24,16 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd)
return len;
}
+static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
+{
+ switch (cdw10 & 0xff) {
+ case NVME_FEAT_HOST_ID:
+ return sizeof(req->sq->ctrl->hostid);
+ default:
+ return 0;
+ }
+}
+
u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
{
return le64_to_cpu(cmd->get_log_page.lpo);
@@ -778,7 +788,7 @@ static void nvmet_execute_get_features(struct nvmet_req *req)
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u16 status = 0;
- if (!nvmet_check_data_len(req, 0))
+ if (!nvmet_check_data_len(req, nvmet_feat_data_len(req, cdw10)))
return;
switch (cdw10 & 0xff) {
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 28438b833c1b..576de773b4db 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -129,27 +129,8 @@ static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
}
-static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
-{
- struct nvmet_req *req;
-
- while (1) {
- mutex_lock(&ctrl->lock);
- if (!ctrl->nr_async_event_cmds) {
- mutex_unlock(&ctrl->lock);
- return;
- }
-
- req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
- mutex_unlock(&ctrl->lock);
- nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
- }
-}
-
-static void nvmet_async_event_work(struct work_struct *work)
+static void nvmet_async_events_process(struct nvmet_ctrl *ctrl, u16 status)
{
- struct nvmet_ctrl *ctrl =
- container_of(work, struct nvmet_ctrl, async_event_work);
struct nvmet_async_event *aen;
struct nvmet_req *req;
@@ -159,18 +140,41 @@ static void nvmet_async_event_work(struct work_struct *work)
struct nvmet_async_event, entry);
if (!aen || !ctrl->nr_async_event_cmds) {
mutex_unlock(&ctrl->lock);
- return;
+ break;
}
req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
- nvmet_set_result(req, nvmet_async_event_result(aen));
+ if (status == 0)
+ nvmet_set_result(req, nvmet_async_event_result(aen));
list_del(&aen->entry);
kfree(aen);
mutex_unlock(&ctrl->lock);
- nvmet_req_complete(req, 0);
+ nvmet_req_complete(req, status);
+ }
+}
+
+static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
+{
+ struct nvmet_req *req;
+
+ mutex_lock(&ctrl->lock);
+ while (ctrl->nr_async_event_cmds) {
+ req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
+ mutex_unlock(&ctrl->lock);
+ nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
+ mutex_lock(&ctrl->lock);
}
+ mutex_unlock(&ctrl->lock);
+}
+
+static void nvmet_async_event_work(struct work_struct *work)
+{
+ struct nvmet_ctrl *ctrl =
+ container_of(work, struct nvmet_ctrl, async_event_work);
+
+ nvmet_async_events_process(ctrl, 0);
}
void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
@@ -555,7 +559,8 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
} else {
struct nvmet_ns *old;
- list_for_each_entry_rcu(old, &subsys->namespaces, dev_link) {
+ list_for_each_entry_rcu(old, &subsys->namespaces, dev_link,
+ lockdep_is_held(&subsys->lock)) {
BUG_ON(ns->nsid == old->nsid);
if (ns->nsid < old->nsid)
break;
@@ -752,19 +757,24 @@ static void nvmet_confirm_sq(struct percpu_ref *ref)
void nvmet_sq_destroy(struct nvmet_sq *sq)
{
+ u16 status = NVME_SC_INTERNAL | NVME_SC_DNR;
+ struct nvmet_ctrl *ctrl = sq->ctrl;
+
/*
* If this is the admin queue, complete all AERs so that our
* queue doesn't have outstanding requests on it.
*/
- if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
- nvmet_async_events_free(sq->ctrl);
+ if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq) {
+ nvmet_async_events_process(ctrl, status);
+ nvmet_async_events_free(ctrl);
+ }
percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
wait_for_completion(&sq->confirm_done);
wait_for_completion(&sq->free_done);
percpu_ref_exit(&sq->ref);
- if (sq->ctrl) {
- nvmet_ctrl_put(sq->ctrl);
+ if (ctrl) {
+ nvmet_ctrl_put(ctrl);
sq->ctrl = NULL; /* allows reusing the queue later */
}
}
@@ -938,6 +948,17 @@ bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len)
}
EXPORT_SYMBOL_GPL(nvmet_check_data_len);
+bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
+{
+ if (unlikely(data_len > req->transfer_len)) {
+ req->error_loc = offsetof(struct nvme_common_command, dptr);
+ nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
+ return false;
+ }
+
+ return true;
+}
+
int nvmet_req_alloc_sgl(struct nvmet_req *req)
{
struct pci_dev *p2p_dev = NULL;
@@ -1172,7 +1193,8 @@ static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
ctrl->p2p_client = get_device(req->p2p_client);
- list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link)
+ list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link,
+ lockdep_is_held(&ctrl->subsys->lock))
nvmet_p2pmem_ns_add_p2p(ctrl, ns);
}
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index f7297473d9eb..feef15c38ec9 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -109,6 +109,7 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
u16 qid = le16_to_cpu(c->qid);
u16 sqsize = le16_to_cpu(c->sqsize);
struct nvmet_ctrl *old;
+ u16 ret;
old = cmpxchg(&req->sq->ctrl, NULL, ctrl);
if (old) {
@@ -119,7 +120,8 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
if (!sqsize) {
pr_warn("queue size zero!\n");
req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
- return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ goto err;
}
/* note: convert queue size from 0's-based value to 1's-based value */
@@ -132,16 +134,19 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
}
if (ctrl->ops->install_queue) {
- u16 ret = ctrl->ops->install_queue(req->sq);
-
+ ret = ctrl->ops->install_queue(req->sq);
if (ret) {
pr_err("failed to install queue %d cntlid %d ret %x\n",
- qid, ret, ctrl->cntlid);
- return ret;
+ qid, ctrl->cntlid, ret);
+ goto err;
}
}
return 0;
+
+err:
+ req->sq->ctrl = NULL;
+ return ret;
}
static void nvmet_execute_admin_connect(struct nvmet_req *req)
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index b6fca0e421ef..ea0e596be15d 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -280,7 +280,7 @@ static void nvmet_bdev_execute_discard(struct nvmet_req *req)
static void nvmet_bdev_execute_dsm(struct nvmet_req *req)
{
- if (!nvmet_check_data_len(req, nvmet_dsm_len(req)))
+ if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
return;
switch (le32_to_cpu(req->cmd->dsm.attributes)) {
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index caebfce06605..cd5670b83118 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -336,7 +336,7 @@ static void nvmet_file_dsm_work(struct work_struct *w)
static void nvmet_file_execute_dsm(struct nvmet_req *req)
{
- if (!nvmet_check_data_len(req, nvmet_dsm_len(req)))
+ if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
return;
INIT_WORK(&req->f.work, nvmet_file_dsm_work);
schedule_work(&req->f.work);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 46df45e837c9..eda28b22a2c8 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -374,6 +374,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
void nvmet_req_uninit(struct nvmet_req *req);
bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len);
+bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len);
void nvmet_req_complete(struct nvmet_req *req, u16 status);
int nvmet_req_alloc_sgl(struct nvmet_req *req);
void nvmet_req_free_sgl(struct nvmet_req *req);
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 73567e922491..35efab1ba8d9 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -109,6 +109,14 @@ config QCOM_QFPROM
This driver can also be built as a module. If so, the module
will be called nvmem_qfprom.
+config NVMEM_SPMI_SDAM
+ tristate "SPMI SDAM Support"
+ depends on SPMI
+ help
+ This driver supports the Shared Direct Access Memory Module on
+ Qualcomm Technologies, Inc. PMICs. It provides the clients
+ an interface to read/write to the SDAM module's shared memory.
+
config ROCKCHIP_EFUSE
tristate "Rockchip eFuse Support"
depends on ARCH_ROCKCHIP || COMPILE_TEST
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 9e667823edb3..6b466cd1427b 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -28,6 +28,8 @@ obj-$(CONFIG_MTK_EFUSE) += nvmem_mtk-efuse.o
nvmem_mtk-efuse-y := mtk-efuse.o
obj-$(CONFIG_QCOM_QFPROM) += nvmem_qfprom.o
nvmem_qfprom-y := qfprom.o
+obj-$(CONFIG_NVMEM_SPMI_SDAM) += nvmem_qcom-spmi-sdam.o
+nvmem_qcom-spmi-sdam-y += qcom-spmi-sdam.o
obj-$(CONFIG_ROCKCHIP_EFUSE) += nvmem_rockchip_efuse.o
nvmem_rockchip_efuse-y := rockchip-efuse.o
obj-$(CONFIG_ROCKCHIP_OTP) += nvmem-rockchip-otp.o
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 9f1ee9c766ec..ef326f243f36 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
#include <linux/nvmem-provider.h>
+#include <linux/gpio/consumer.h>
#include <linux/of.h>
#include <linux/slab.h>
#include "nvmem.h"
@@ -54,8 +55,14 @@ static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
void *val, size_t bytes)
{
- if (nvmem->reg_write)
- return nvmem->reg_write(nvmem->priv, offset, val, bytes);
+ int ret;
+
+ if (nvmem->reg_write) {
+ gpiod_set_value_cansleep(nvmem->wp_gpio, 0);
+ ret = nvmem->reg_write(nvmem->priv, offset, val, bytes);
+ gpiod_set_value_cansleep(nvmem->wp_gpio, 1);
+ return ret;
+ }
return -EINVAL;
}
@@ -83,7 +90,7 @@ static void nvmem_cell_drop(struct nvmem_cell *cell)
list_del(&cell->node);
mutex_unlock(&nvmem_mutex);
of_node_put(cell->np);
- kfree(cell->name);
+ kfree_const(cell->name);
kfree(cell);
}
@@ -110,7 +117,9 @@ static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
cell->nvmem = nvmem;
cell->offset = info->offset;
cell->bytes = info->bytes;
- cell->name = info->name;
+ cell->name = kstrdup_const(info->name, GFP_KERNEL);
+ if (!cell->name)
+ return -ENOMEM;
cell->bit_offset = info->bit_offset;
cell->nbits = info->nbits;
@@ -300,7 +309,7 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
dev_err(dev, "cell %s unaligned to nvmem stride %d\n",
cell->name, nvmem->stride);
/* Cells already added will be freed later. */
- kfree(cell->name);
+ kfree_const(cell->name);
kfree(cell);
return -EINVAL;
}
@@ -338,6 +347,14 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
kfree(nvmem);
return ERR_PTR(rval);
}
+ if (config->wp_gpio)
+ nvmem->wp_gpio = config->wp_gpio;
+ else
+ nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(nvmem->wp_gpio))
+ return ERR_CAST(nvmem->wp_gpio);
+
kref_init(&nvmem->refcnt);
INIT_LIST_HEAD(&nvmem->cells);
diff --git a/drivers/nvmem/imx-ocotp-scu.c b/drivers/nvmem/imx-ocotp-scu.c
index 03f1ab23ad51..399e1eb8b4c1 100644
--- a/drivers/nvmem/imx-ocotp-scu.c
+++ b/drivers/nvmem/imx-ocotp-scu.c
@@ -15,8 +15,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
-#define IMX_SIP_OTP 0xC200000A
-#define IMX_SIP_OTP_WRITE 0x2
+#define IMX_SIP_OTP_WRITE 0xc200000B
enum ocotp_devtype {
IMX8QXP,
@@ -139,8 +138,8 @@ static int imx_scu_ocotp_read(void *context, unsigned int offset,
void *p;
int i, ret;
- index = offset >> 2;
- num_bytes = round_up((offset % 4) + bytes, 4);
+ index = offset;
+ num_bytes = round_up(bytes, 4);
count = num_bytes >> 2;
if (count > (priv->data->nregs - index))
@@ -169,7 +168,7 @@ static int imx_scu_ocotp_read(void *context, unsigned int offset,
buf++;
}
- memcpy(val, (u8 *)p + offset % 4, bytes);
+ memcpy(val, (u8 *)p, bytes);
mutex_unlock(&scu_ocotp_mutex);
@@ -189,10 +188,10 @@ static int imx_scu_ocotp_write(void *context, unsigned int offset,
int ret;
/* allow only writing one complete OTP word at a time */
- if ((bytes != 4) || (offset % 4))
+ if (bytes != 4)
return -EINVAL;
- index = offset >> 2;
+ index = offset;
if (in_hole(context, index))
return -EINVAL;
@@ -212,8 +211,7 @@ static int imx_scu_ocotp_write(void *context, unsigned int offset,
mutex_lock(&scu_ocotp_mutex);
- arm_smccc_smc(IMX_SIP_OTP, IMX_SIP_OTP_WRITE, index, *buf,
- 0, 0, 0, 0, &res);
+ arm_smccc_smc(IMX_SIP_OTP_WRITE, index, *buf, 0, 0, 0, 0, 0, &res);
mutex_unlock(&scu_ocotp_mutex);
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index fc40555ca4cd..4ba9cc8f76df 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -44,6 +44,14 @@
#define IMX_OCOTP_BM_CTRL_ERROR 0x00000200
#define IMX_OCOTP_BM_CTRL_REL_SHADOWS 0x00000400
+#define IMX_OCOTP_BM_CTRL_DEFAULT \
+ { \
+ .bm_addr = IMX_OCOTP_BM_CTRL_ADDR, \
+ .bm_busy = IMX_OCOTP_BM_CTRL_BUSY, \
+ .bm_error = IMX_OCOTP_BM_CTRL_ERROR, \
+ .bm_rel_shadows = IMX_OCOTP_BM_CTRL_REL_SHADOWS,\
+ }
+
#define TIMING_STROBE_PROG_US 10 /* Min time to blow a fuse */
#define TIMING_STROBE_READ_NS 37 /* Min time before read */
#define TIMING_RELAX_NS 17
@@ -62,18 +70,31 @@ struct ocotp_priv {
struct nvmem_config *config;
};
+struct ocotp_ctrl_reg {
+ u32 bm_addr;
+ u32 bm_busy;
+ u32 bm_error;
+ u32 bm_rel_shadows;
+};
+
struct ocotp_params {
unsigned int nregs;
unsigned int bank_address_words;
void (*set_timing)(struct ocotp_priv *priv);
+ struct ocotp_ctrl_reg ctrl;
};
-static int imx_ocotp_wait_for_busy(void __iomem *base, u32 flags)
+static int imx_ocotp_wait_for_busy(struct ocotp_priv *priv, u32 flags)
{
int count;
u32 c, mask;
+ u32 bm_ctrl_busy, bm_ctrl_error;
+ void __iomem *base = priv->base;
- mask = IMX_OCOTP_BM_CTRL_BUSY | IMX_OCOTP_BM_CTRL_ERROR | flags;
+ bm_ctrl_busy = priv->params->ctrl.bm_busy;
+ bm_ctrl_error = priv->params->ctrl.bm_error;
+
+ mask = bm_ctrl_busy | bm_ctrl_error | flags;
for (count = 10000; count >= 0; count--) {
c = readl(base + IMX_OCOTP_ADDR_CTRL);
@@ -97,7 +118,7 @@ static int imx_ocotp_wait_for_busy(void __iomem *base, u32 flags)
* - A read is performed to from a fuse word which has been read
* locked.
*/
- if (c & IMX_OCOTP_BM_CTRL_ERROR)
+ if (c & bm_ctrl_error)
return -EPERM;
return -ETIMEDOUT;
}
@@ -105,15 +126,18 @@ static int imx_ocotp_wait_for_busy(void __iomem *base, u32 flags)
return 0;
}
-static void imx_ocotp_clr_err_if_set(void __iomem *base)
+static void imx_ocotp_clr_err_if_set(struct ocotp_priv *priv)
{
- u32 c;
+ u32 c, bm_ctrl_error;
+ void __iomem *base = priv->base;
+
+ bm_ctrl_error = priv->params->ctrl.bm_error;
c = readl(base + IMX_OCOTP_ADDR_CTRL);
- if (!(c & IMX_OCOTP_BM_CTRL_ERROR))
+ if (!(c & bm_ctrl_error))
return;
- writel(IMX_OCOTP_BM_CTRL_ERROR, base + IMX_OCOTP_ADDR_CTRL_CLR);
+ writel(bm_ctrl_error, base + IMX_OCOTP_ADDR_CTRL_CLR);
}
static int imx_ocotp_read(void *context, unsigned int offset,
@@ -140,7 +164,7 @@ static int imx_ocotp_read(void *context, unsigned int offset,
return ret;
}
- ret = imx_ocotp_wait_for_busy(priv->base, 0);
+ ret = imx_ocotp_wait_for_busy(priv, 0);
if (ret < 0) {
dev_err(priv->dev, "timeout during read setup\n");
goto read_end;
@@ -157,7 +181,7 @@ static int imx_ocotp_read(void *context, unsigned int offset,
* issued
*/
if (*(buf - 1) == IMX_OCOTP_READ_LOCKED_VAL)
- imx_ocotp_clr_err_if_set(priv->base);
+ imx_ocotp_clr_err_if_set(priv);
}
ret = 0;
@@ -274,7 +298,7 @@ static int imx_ocotp_write(void *context, unsigned int offset, void *val,
* write or reload must be completed before a write access can be
* requested.
*/
- ret = imx_ocotp_wait_for_busy(priv->base, 0);
+ ret = imx_ocotp_wait_for_busy(priv, 0);
if (ret < 0) {
dev_err(priv->dev, "timeout during timing setup\n");
goto write_end;
@@ -306,8 +330,8 @@ static int imx_ocotp_write(void *context, unsigned int offset, void *val,
}
ctrl = readl(priv->base + IMX_OCOTP_ADDR_CTRL);
- ctrl &= ~IMX_OCOTP_BM_CTRL_ADDR;
- ctrl |= waddr & IMX_OCOTP_BM_CTRL_ADDR;
+ ctrl &= ~priv->params->ctrl.bm_addr;
+ ctrl |= waddr & priv->params->ctrl.bm_addr;
ctrl |= IMX_OCOTP_WR_UNLOCK;
writel(ctrl, priv->base + IMX_OCOTP_ADDR_CTRL);
@@ -374,11 +398,11 @@ static int imx_ocotp_write(void *context, unsigned int offset, void *val,
* be set. It must be cleared by software before any new write access
* can be issued.
*/
- ret = imx_ocotp_wait_for_busy(priv->base, 0);
+ ret = imx_ocotp_wait_for_busy(priv, 0);
if (ret < 0) {
if (ret == -EPERM) {
dev_err(priv->dev, "failed write to locked region");
- imx_ocotp_clr_err_if_set(priv->base);
+ imx_ocotp_clr_err_if_set(priv);
} else {
dev_err(priv->dev, "timeout during data write\n");
}
@@ -394,10 +418,10 @@ static int imx_ocotp_write(void *context, unsigned int offset, void *val,
udelay(2);
/* reload all shadow registers */
- writel(IMX_OCOTP_BM_CTRL_REL_SHADOWS,
+ writel(priv->params->ctrl.bm_rel_shadows,
priv->base + IMX_OCOTP_ADDR_CTRL_SET);
- ret = imx_ocotp_wait_for_busy(priv->base,
- IMX_OCOTP_BM_CTRL_REL_SHADOWS);
+ ret = imx_ocotp_wait_for_busy(priv,
+ priv->params->ctrl.bm_rel_shadows);
if (ret < 0) {
dev_err(priv->dev, "timeout during shadow register reload\n");
goto write_end;
@@ -424,65 +448,76 @@ static const struct ocotp_params imx6q_params = {
.nregs = 128,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
+ .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct ocotp_params imx6sl_params = {
.nregs = 64,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
+ .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct ocotp_params imx6sll_params = {
.nregs = 128,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
+ .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct ocotp_params imx6sx_params = {
.nregs = 128,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
+ .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct ocotp_params imx6ul_params = {
.nregs = 128,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
+ .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct ocotp_params imx6ull_params = {
.nregs = 64,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
+ .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct ocotp_params imx7d_params = {
.nregs = 64,
.bank_address_words = 4,
.set_timing = imx_ocotp_set_imx7_timing,
+ .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct ocotp_params imx7ulp_params = {
.nregs = 256,
.bank_address_words = 0,
+ .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct ocotp_params imx8mq_params = {
.nregs = 256,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
+ .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct ocotp_params imx8mm_params = {
.nregs = 256,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
+ .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct ocotp_params imx8mn_params = {
.nregs = 256,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
+ .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct of_device_id imx_ocotp_dt_ids[] = {
@@ -521,17 +556,17 @@ static int imx_ocotp_probe(struct platform_device *pdev)
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
- clk_prepare_enable(priv->clk);
- imx_ocotp_clr_err_if_set(priv->base);
- clk_disable_unprepare(priv->clk);
-
priv->params = of_device_get_match_data(&pdev->dev);
imx_ocotp_nvmem_config.size = 4 * priv->params->nregs;
imx_ocotp_nvmem_config.dev = dev;
imx_ocotp_nvmem_config.priv = priv;
priv->config = &imx_ocotp_nvmem_config;
- nvmem = devm_nvmem_register(dev, &imx_ocotp_nvmem_config);
+ clk_prepare_enable(priv->clk);
+ imx_ocotp_clr_err_if_set(priv);
+ clk_disable_unprepare(priv->clk);
+
+ nvmem = devm_nvmem_register(dev, &imx_ocotp_nvmem_config);
return PTR_ERR_OR_ZERO(nvmem);
}
diff --git a/drivers/nvmem/nvmem.h b/drivers/nvmem/nvmem.h
index eb8ed7121fa3..be0d66d75c8a 100644
--- a/drivers/nvmem/nvmem.h
+++ b/drivers/nvmem/nvmem.h
@@ -9,6 +9,7 @@
#include <linux/list.h>
#include <linux/nvmem-consumer.h>
#include <linux/nvmem-provider.h>
+#include <linux/gpio/consumer.h>
struct nvmem_device {
struct module *owner;
@@ -26,6 +27,7 @@ struct nvmem_device {
struct list_head cells;
nvmem_reg_read_t reg_read;
nvmem_reg_write_t reg_write;
+ struct gpio_desc *wp_gpio;
void *priv;
};
diff --git a/drivers/nvmem/qcom-spmi-sdam.c b/drivers/nvmem/qcom-spmi-sdam.c
new file mode 100644
index 000000000000..8682cda448d6
--- /dev/null
+++ b/drivers/nvmem/qcom-spmi-sdam.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/nvmem-provider.h>
+#include <linux/regmap.h>
+
+#define SDAM_MEM_START 0x40
+#define REGISTER_MAP_ID 0x40
+#define REGISTER_MAP_VERSION 0x41
+#define SDAM_SIZE 0x44
+#define SDAM_PBS_TRIG_SET 0xE5
+#define SDAM_PBS_TRIG_CLR 0xE6
+
+struct sdam_chip {
+ struct platform_device *pdev;
+ struct regmap *regmap;
+ struct nvmem_config sdam_config;
+ unsigned int base;
+ unsigned int size;
+};
+
+/* read only register offsets */
+static const u8 sdam_ro_map[] = {
+ REGISTER_MAP_ID,
+ REGISTER_MAP_VERSION,
+ SDAM_SIZE
+};
+
+static bool sdam_is_valid(struct sdam_chip *sdam, unsigned int offset,
+ size_t len)
+{
+ unsigned int sdam_mem_end = SDAM_MEM_START + sdam->size - 1;
+
+ if (!len)
+ return false;
+
+ if (offset >= SDAM_MEM_START && offset <= sdam_mem_end
+ && (offset + len - 1) <= sdam_mem_end)
+ return true;
+ else if ((offset == SDAM_PBS_TRIG_SET || offset == SDAM_PBS_TRIG_CLR)
+ && (len == 1))
+ return true;
+
+ return false;
+}
+
+static bool sdam_is_ro(unsigned int offset, size_t len)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sdam_ro_map); i++)
+ if (offset <= sdam_ro_map[i] && (offset + len) > sdam_ro_map[i])
+ return true;
+
+ return false;
+}
+
+static int sdam_read(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct sdam_chip *sdam = priv;
+ struct device *dev = &sdam->pdev->dev;
+ int rc;
+
+ if (!sdam_is_valid(sdam, offset, bytes)) {
+ dev_err(dev, "Invalid SDAM offset %#x len=%zd\n",
+ offset, bytes);
+ return -EINVAL;
+ }
+
+ rc = regmap_bulk_read(sdam->regmap, sdam->base + offset, val, bytes);
+ if (rc < 0)
+ dev_err(dev, "Failed to read SDAM offset %#x len=%zd, rc=%d\n",
+ offset, bytes, rc);
+
+ return rc;
+}
+
+static int sdam_write(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct sdam_chip *sdam = priv;
+ struct device *dev = &sdam->pdev->dev;
+ int rc;
+
+ if (!sdam_is_valid(sdam, offset, bytes)) {
+ dev_err(dev, "Invalid SDAM offset %#x len=%zd\n",
+ offset, bytes);
+ return -EINVAL;
+ }
+
+ if (sdam_is_ro(offset, bytes)) {
+ dev_err(dev, "Invalid write offset %#x len=%zd\n",
+ offset, bytes);
+ return -EINVAL;
+ }
+
+ rc = regmap_bulk_write(sdam->regmap, sdam->base + offset, val, bytes);
+ if (rc < 0)
+ dev_err(dev, "Failed to write SDAM offset %#x len=%zd, rc=%d\n",
+ offset, bytes, rc);
+
+ return rc;
+}
+
+static int sdam_probe(struct platform_device *pdev)
+{
+ struct sdam_chip *sdam;
+ struct nvmem_device *nvmem;
+ unsigned int val;
+ int rc;
+
+ sdam = devm_kzalloc(&pdev->dev, sizeof(*sdam), GFP_KERNEL);
+ if (!sdam)
+ return -ENOMEM;
+
+ sdam->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!sdam->regmap) {
+ dev_err(&pdev->dev, "Failed to get regmap handle\n");
+ return -ENXIO;
+ }
+
+ rc = of_property_read_u32(pdev->dev.of_node, "reg", &sdam->base);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Failed to get SDAM base, rc=%d\n", rc);
+ return -EINVAL;
+ }
+
+ rc = regmap_read(sdam->regmap, sdam->base + SDAM_SIZE, &val);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Failed to read SDAM_SIZE rc=%d\n", rc);
+ return -EINVAL;
+ }
+ sdam->size = val * 32;
+
+ sdam->sdam_config.dev = &pdev->dev;
+ sdam->sdam_config.name = "spmi_sdam";
+ sdam->sdam_config.id = pdev->id;
+ sdam->sdam_config.owner = THIS_MODULE,
+ sdam->sdam_config.stride = 1;
+ sdam->sdam_config.word_size = 1;
+ sdam->sdam_config.reg_read = sdam_read;
+ sdam->sdam_config.reg_write = sdam_write;
+ sdam->sdam_config.priv = sdam;
+
+ nvmem = devm_nvmem_register(&pdev->dev, &sdam->sdam_config);
+ if (IS_ERR(nvmem)) {
+ dev_err(&pdev->dev,
+ "Failed to register SDAM nvmem device rc=%ld\n",
+ PTR_ERR(nvmem));
+ return -ENXIO;
+ }
+ dev_dbg(&pdev->dev,
+ "SDAM base=%#x size=%u registered successfully\n",
+ sdam->base, sdam->size);
+
+ return 0;
+}
+
+static const struct of_device_id sdam_match_table[] = {
+ { .compatible = "qcom,spmi-sdam" },
+ {},
+};
+
+static struct platform_driver sdam_driver = {
+ .driver = {
+ .name = "qcom,spmi-sdam",
+ .of_match_table = sdam_match_table,
+ },
+ .probe = sdam_probe,
+};
+
+static int __init sdam_init(void)
+{
+ return platform_driver_register(&sdam_driver);
+}
+subsys_initcall(sdam_init);
+
+static void __exit sdam_exit(void)
+{
+ return platform_driver_unregister(&sdam_driver);
+}
+module_exit(sdam_exit);
+
+MODULE_DESCRIPTION("QCOM SPMI SDAM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 37c2ccbefecd..d91618641be6 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -103,4 +103,8 @@ config OF_OVERLAY
config OF_NUMA
bool
+config OF_DMA_DEFAULT_COHERENT
+ # arches should select this if DMA is coherent by default for OF devices
+ bool
+
endif # OF
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 99c1b8058559..e8a39c3ec4d4 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -995,12 +995,16 @@ out:
* @np: device node
*
* It returns true if "dma-coherent" property was found
- * for this device in DT.
+ * for this device in the DT, or if DMA is coherent by
+ * default for OF devices on the current platform.
*/
bool of_dma_is_coherent(struct device_node *np)
{
struct device_node *node = of_node_get(np);
+ if (IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT))
+ return true;
+
while (node) {
if (of_property_read_bool(node, "dma-coherent")) {
of_node_put(node);
diff --git a/drivers/of/base.c b/drivers/of/base.c
index db7fbc0c0893..ae03b1218b06 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -135,115 +135,38 @@ int __weak of_node_to_nid(struct device_node *np)
}
#endif
-/*
- * Assumptions behind phandle_cache implementation:
- * - phandle property values are in a contiguous range of 1..n
- *
- * If the assumptions do not hold, then
- * - the phandle lookup overhead reduction provided by the cache
- * will likely be less
- */
+#define OF_PHANDLE_CACHE_BITS 7
+#define OF_PHANDLE_CACHE_SZ BIT(OF_PHANDLE_CACHE_BITS)
-static struct device_node **phandle_cache;
-static u32 phandle_cache_mask;
+static struct device_node *phandle_cache[OF_PHANDLE_CACHE_SZ];
-/*
- * Caller must hold devtree_lock.
- */
-static void __of_free_phandle_cache(void)
+static u32 of_phandle_cache_hash(phandle handle)
{
- u32 cache_entries = phandle_cache_mask + 1;
- u32 k;
-
- if (!phandle_cache)
- return;
-
- for (k = 0; k < cache_entries; k++)
- of_node_put(phandle_cache[k]);
-
- kfree(phandle_cache);
- phandle_cache = NULL;
-}
-
-int of_free_phandle_cache(void)
-{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&devtree_lock, flags);
-
- __of_free_phandle_cache();
-
- raw_spin_unlock_irqrestore(&devtree_lock, flags);
-
- return 0;
+ return hash_32(handle, OF_PHANDLE_CACHE_BITS);
}
-#if !defined(CONFIG_MODULES)
-late_initcall_sync(of_free_phandle_cache);
-#endif
/*
* Caller must hold devtree_lock.
*/
-void __of_free_phandle_cache_entry(phandle handle)
+void __of_phandle_cache_inv_entry(phandle handle)
{
- phandle masked_handle;
+ u32 handle_hash;
struct device_node *np;
if (!handle)
return;
- masked_handle = handle & phandle_cache_mask;
+ handle_hash = of_phandle_cache_hash(handle);
- if (phandle_cache) {
- np = phandle_cache[masked_handle];
- if (np && handle == np->phandle) {
- of_node_put(np);
- phandle_cache[masked_handle] = NULL;
- }
- }
-}
-
-void of_populate_phandle_cache(void)
-{
- unsigned long flags;
- u32 cache_entries;
- struct device_node *np;
- u32 phandles = 0;
-
- raw_spin_lock_irqsave(&devtree_lock, flags);
-
- __of_free_phandle_cache();
-
- for_each_of_allnodes(np)
- if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
- phandles++;
-
- if (!phandles)
- goto out;
-
- cache_entries = roundup_pow_of_two(phandles);
- phandle_cache_mask = cache_entries - 1;
-
- phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache),
- GFP_ATOMIC);
- if (!phandle_cache)
- goto out;
-
- for_each_of_allnodes(np)
- if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) {
- of_node_get(np);
- phandle_cache[np->phandle & phandle_cache_mask] = np;
- }
-
-out:
- raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ np = phandle_cache[handle_hash];
+ if (np && handle == np->phandle)
+ phandle_cache[handle_hash] = NULL;
}
void __init of_core_init(void)
{
struct device_node *np;
- of_populate_phandle_cache();
/* Create the kset, and register existing nodes */
mutex_lock(&of_mutex);
@@ -253,8 +176,11 @@ void __init of_core_init(void)
pr_err("failed to register existing nodes\n");
return;
}
- for_each_of_allnodes(np)
+ for_each_of_allnodes(np) {
__of_attach_node_sysfs(np);
+ if (np->phandle && !phandle_cache[of_phandle_cache_hash(np->phandle)])
+ phandle_cache[of_phandle_cache_hash(np->phandle)] = np;
+ }
mutex_unlock(&of_mutex);
/* Symlink in /proc as required by userspace ABI */
@@ -490,6 +416,42 @@ int of_cpu_node_to_id(struct device_node *cpu_node)
EXPORT_SYMBOL(of_cpu_node_to_id);
/**
+ * of_get_cpu_state_node - Get CPU's idle state node at the given index
+ *
+ * @cpu_node: The device node for the CPU
+ * @index: The index in the list of the idle states
+ *
+ * Two generic methods can be used to describe a CPU's idle states, either via
+ * a flattened description through the "cpu-idle-states" binding or via the
+ * hierarchical layout, using the "power-domains" and the "domain-idle-states"
+ * bindings. This function check for both and returns the idle state node for
+ * the requested index.
+ *
+ * In case an idle state node is found at @index, the refcount is incremented
+ * for it, so call of_node_put() on it when done. Returns NULL if not found.
+ */
+struct device_node *of_get_cpu_state_node(struct device_node *cpu_node,
+ int index)
+{
+ struct of_phandle_args args;
+ int err;
+
+ err = of_parse_phandle_with_args(cpu_node, "power-domains",
+ "#power-domain-cells", 0, &args);
+ if (!err) {
+ struct device_node *state_node =
+ of_parse_phandle(args.np, "domain-idle-states", index);
+
+ of_node_put(args.np);
+ if (state_node)
+ return state_node;
+ }
+
+ return of_parse_phandle(cpu_node, "cpu-idle-states", index);
+}
+EXPORT_SYMBOL(of_get_cpu_state_node);
+
+/**
* __of_device_is_compatible() - Check if the node matches given constraints
* @device: pointer to node
* @compat: required compatible string, NULL or "" for any match
@@ -1235,36 +1197,24 @@ struct device_node *of_find_node_by_phandle(phandle handle)
{
struct device_node *np = NULL;
unsigned long flags;
- phandle masked_handle;
+ u32 handle_hash;
if (!handle)
return NULL;
+ handle_hash = of_phandle_cache_hash(handle);
+
raw_spin_lock_irqsave(&devtree_lock, flags);
- masked_handle = handle & phandle_cache_mask;
-
- if (phandle_cache) {
- if (phandle_cache[masked_handle] &&
- handle == phandle_cache[masked_handle]->phandle)
- np = phandle_cache[masked_handle];
- if (np && of_node_check_flag(np, OF_DETACHED)) {
- WARN_ON(1); /* did not uncache np on node removal */
- of_node_put(np);
- phandle_cache[masked_handle] = NULL;
- np = NULL;
- }
- }
+ if (phandle_cache[handle_hash] &&
+ handle == phandle_cache[handle_hash]->phandle)
+ np = phandle_cache[handle_hash];
if (!np) {
for_each_of_allnodes(np)
if (np->phandle == handle &&
!of_node_check_flag(np, OF_DETACHED)) {
- if (phandle_cache) {
- /* will put when removed from cache */
- of_node_get(np);
- phandle_cache[masked_handle] = np;
- }
+ phandle_cache[handle_hash] = np;
break;
}
}
diff --git a/drivers/of/device.c b/drivers/of/device.c
index e9127db7b067..27203bfd0b22 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -161,7 +161,7 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma)
coherent ? " " : " not ");
iommu = of_iommu_configure(dev, np);
- if (IS_ERR(iommu) && PTR_ERR(iommu) == -EPROBE_DEFER)
+ if (PTR_ERR(iommu) == -EPROBE_DEFER)
return -EPROBE_DEFER;
dev_dbg(dev, "device is%sbehind an iommu\n",
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index 49b16f76d78e..08fd823edac9 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -276,7 +276,7 @@ void __of_detach_node(struct device_node *np)
of_node_set_flag(np, OF_DETACHED);
/* race with of_find_node_by_phandle() prevented by devtree_lock */
- __of_free_phandle_cache_entry(np->phandle);
+ __of_phandle_cache_inv_entry(np->phandle);
}
/**
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index c6b87ce2b0cc..8270bbf505fb 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -42,14 +42,37 @@ static int of_get_phy_id(struct device_node *device, u32 *phy_id)
return -EINVAL;
}
+static struct mii_timestamper *of_find_mii_timestamper(struct device_node *node)
+{
+ struct of_phandle_args arg;
+ int err;
+
+ err = of_parse_phandle_with_fixed_args(node, "timestamper", 1, 0, &arg);
+
+ if (err == -ENOENT)
+ return NULL;
+ else if (err)
+ return ERR_PTR(err);
+
+ if (arg.args_count != 1)
+ return ERR_PTR(-EINVAL);
+
+ return register_mii_timestamper(arg.np, arg.args[0]);
+}
+
static int of_mdiobus_register_phy(struct mii_bus *mdio,
struct device_node *child, u32 addr)
{
+ struct mii_timestamper *mii_ts;
struct phy_device *phy;
bool is_c45;
int rc;
u32 phy_id;
+ mii_ts = of_find_mii_timestamper(child);
+ if (IS_ERR(mii_ts))
+ return PTR_ERR(mii_ts);
+
is_c45 = of_device_is_compatible(child,
"ethernet-phy-ieee802.3-c45");
@@ -57,11 +80,16 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio,
phy = phy_device_create(mdio, addr, phy_id, 0, NULL);
else
phy = get_phy_device(mdio, addr, is_c45);
- if (IS_ERR(phy))
+ if (IS_ERR(phy)) {
+ if (mii_ts)
+ unregister_mii_timestamper(mii_ts);
return PTR_ERR(phy);
+ }
rc = of_irq_get(child, 0);
if (rc == -EPROBE_DEFER) {
+ if (mii_ts)
+ unregister_mii_timestamper(mii_ts);
phy_device_free(phy);
return rc;
}
@@ -90,11 +118,20 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio,
* register it */
rc = phy_device_register(phy);
if (rc) {
+ if (mii_ts)
+ unregister_mii_timestamper(mii_ts);
phy_device_free(phy);
of_node_put(child);
return rc;
}
+ /* phy->mii_ts may already be defined by the PHY driver. A
+ * mii_timestamper probed via the device tree will still have
+ * precedence.
+ */
+ if (mii_ts)
+ phy->mii_ts = mii_ts;
+
dev_dbg(&mdio->dev, "registered phy %pOFn at address %i\n",
child, addr);
return 0;
@@ -162,7 +199,7 @@ static const struct of_device_id whitelist_phys[] = {
* A device which is not a phy is expected to have a compatible string
* indicating what sort of device it is.
*/
-static bool of_mdiobus_child_is_phy(struct device_node *child)
+bool of_mdiobus_child_is_phy(struct device_node *child)
{
u32 phy_id;
@@ -187,6 +224,7 @@ static bool of_mdiobus_child_is_phy(struct device_node *child)
return false;
}
+EXPORT_SYMBOL(of_mdiobus_child_is_phy);
/**
* of_mdiobus_register - Register mii_bus and create PHYs from the device tree
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index 66294d29942a..207863c151a5 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -84,15 +84,11 @@ static inline void __of_detach_node_sysfs(struct device_node *np) {}
int of_resolve_phandles(struct device_node *tree);
#endif
-#if defined(CONFIG_OF_DYNAMIC)
-void __of_free_phandle_cache_entry(phandle handle);
-#endif
+void __of_phandle_cache_inv_entry(phandle handle);
#if defined(CONFIG_OF_OVERLAY)
void of_overlay_mutex_lock(void);
void of_overlay_mutex_unlock(void);
-int of_free_phandle_cache(void);
-void of_populate_phandle_cache(void);
#else
static inline void of_overlay_mutex_lock(void) {};
static inline void of_overlay_mutex_unlock(void) {};
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 9617b7df7c4d..c9219fddf44b 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -974,8 +974,6 @@ static int of_overlay_apply(const void *fdt, struct device_node *tree,
goto err_free_overlay_changeset;
}
- of_populate_phandle_cache();
-
ret = __of_changeset_apply_notify(&ovcs->cset);
if (ret)
pr_err("overlay apply changeset entry notify error %d\n", ret);
@@ -1218,17 +1216,8 @@ int of_overlay_remove(int *ovcs_id)
list_del(&ovcs->ovcs_list);
- /*
- * Disable phandle cache. Avoids race condition that would arise
- * from removing cache entry when the associated node is deleted.
- */
- of_free_phandle_cache();
-
ret_apply = 0;
ret = __of_changeset_revert_entries(&ovcs->cset, &ret_apply);
-
- of_populate_phandle_cache();
-
if (ret) {
if (ret_apply)
devicetree_state_flags |= DTSF_REVERT_FAIL;
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index be7a7d332332..ba43e6a3dc0a 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -988,7 +988,6 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head);
INIT_LIST_HEAD(&opp_table->opp_list);
kref_init(&opp_table->kref);
- kref_init(&opp_table->list_kref);
/* Secure the device table modification */
list_add(&opp_table->node, &opp_tables);
@@ -1072,33 +1071,6 @@ static void _opp_table_kref_release(struct kref *kref)
mutex_unlock(&opp_table_lock);
}
-void _opp_remove_all_static(struct opp_table *opp_table)
-{
- struct dev_pm_opp *opp, *tmp;
-
- list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
- if (!opp->dynamic)
- dev_pm_opp_put(opp);
- }
-
- opp_table->parsed_static_opps = false;
-}
-
-static void _opp_table_list_kref_release(struct kref *kref)
-{
- struct opp_table *opp_table = container_of(kref, struct opp_table,
- list_kref);
-
- _opp_remove_all_static(opp_table);
- mutex_unlock(&opp_table_lock);
-}
-
-void _put_opp_list_kref(struct opp_table *opp_table)
-{
- kref_put_mutex(&opp_table->list_kref, _opp_table_list_kref_release,
- &opp_table_lock);
-}
-
void dev_pm_opp_put_opp_table(struct opp_table *opp_table)
{
kref_put_mutex(&opp_table->kref, _opp_table_kref_release,
@@ -1202,6 +1174,24 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
+void _opp_remove_all_static(struct opp_table *opp_table)
+{
+ struct dev_pm_opp *opp, *tmp;
+
+ mutex_lock(&opp_table->lock);
+
+ if (!opp_table->parsed_static_opps || --opp_table->parsed_static_opps)
+ goto unlock;
+
+ list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
+ if (!opp->dynamic)
+ dev_pm_opp_put_unlocked(opp);
+ }
+
+unlock:
+ mutex_unlock(&opp_table->lock);
+}
+
/**
* dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs
* @dev: device for which we do this operation
@@ -2276,7 +2266,7 @@ void _dev_pm_opp_find_and_remove_table(struct device *dev)
return;
}
- _put_opp_list_kref(opp_table);
+ _opp_remove_all_static(opp_table);
/* Drop reference taken by _find_opp_table() */
dev_pm_opp_put_opp_table(opp_table);
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index 1cbb58240b80..9cd8f0adacae 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -658,17 +658,15 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
struct dev_pm_opp *opp;
/* OPP table is already initialized for the device */
+ mutex_lock(&opp_table->lock);
if (opp_table->parsed_static_opps) {
- kref_get(&opp_table->list_kref);
+ opp_table->parsed_static_opps++;
+ mutex_unlock(&opp_table->lock);
return 0;
}
- /*
- * Re-initialize list_kref every time we add static OPPs to the OPP
- * table as the reference count may be 0 after the last tie static OPPs
- * were removed.
- */
- kref_init(&opp_table->list_kref);
+ opp_table->parsed_static_opps = 1;
+ mutex_unlock(&opp_table->lock);
/* We have opp-table node now, iterate over it and add OPPs */
for_each_available_child_of_node(opp_table->np, np) {
@@ -678,15 +676,17 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
ret);
of_node_put(np);
- return ret;
+ goto remove_static_opp;
} else if (opp) {
count++;
}
}
/* There should be one of more OPP defined */
- if (WARN_ON(!count))
- return -ENOENT;
+ if (WARN_ON(!count)) {
+ ret = -ENOENT;
+ goto remove_static_opp;
+ }
list_for_each_entry(opp, &opp_table->opp_list, node)
pstate_count += !!opp->pstate;
@@ -695,15 +695,19 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
if (pstate_count && pstate_count != count) {
dev_err(dev, "Not all nodes have performance state set (%d: %d)\n",
count, pstate_count);
- return -ENOENT;
+ ret = -ENOENT;
+ goto remove_static_opp;
}
if (pstate_count)
opp_table->genpd_performance_state = true;
- opp_table->parsed_static_opps = true;
-
return 0;
+
+remove_static_opp:
+ _opp_remove_all_static(opp_table);
+
+ return ret;
}
/* Initializes OPP tables based on old-deprecated bindings */
@@ -738,6 +742,7 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table)
if (ret) {
dev_err(dev, "%s: Failed to add OPP %ld (%d)\n",
__func__, freq, ret);
+ _opp_remove_all_static(opp_table);
return ret;
}
nr -= 2;
diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h
index 01a500e2c40a..d14e27102730 100644
--- a/drivers/opp/opp.h
+++ b/drivers/opp/opp.h
@@ -127,11 +127,10 @@ enum opp_table_access {
* @dev_list: list of devices that share these OPPs
* @opp_list: table of opps
* @kref: for reference count of the table.
- * @list_kref: for reference count of the OPP list.
* @lock: mutex protecting the opp_list and dev_list.
* @np: struct device_node pointer for opp's DT node.
* @clock_latency_ns_max: Max clock latency in nanoseconds.
- * @parsed_static_opps: True if OPPs are initialized from DT.
+ * @parsed_static_opps: Count of devices for which OPPs are initialized from DT.
* @shared_opp: OPP is shared between multiple devices.
* @suspend_opp: Pointer to OPP to be used during device suspend.
* @genpd_virt_dev_lock: Mutex protecting the genpd virtual device pointers.
@@ -167,7 +166,6 @@ struct opp_table {
struct list_head dev_list;
struct list_head opp_list;
struct kref kref;
- struct kref list_kref;
struct mutex lock;
struct device_node *np;
@@ -176,7 +174,7 @@ struct opp_table {
/* For backward compatibility with v1 bindings */
unsigned int voltage_tolerance_v1;
- bool parsed_static_opps;
+ unsigned int parsed_static_opps;
enum opp_table_access shared_opp;
struct dev_pm_opp *suspend_opp;
diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c
index 1c69c404df11..e3357e91decb 100644
--- a/drivers/opp/ti-opp-supply.c
+++ b/drivers/opp/ti-opp-supply.c
@@ -90,7 +90,7 @@ static int _store_optimized_voltages(struct device *dev,
goto out_map;
}
- base = ioremap_nocache(res->start, resource_size(res));
+ base = ioremap(res->start, resource_size(res));
if (!base) {
dev_err(dev, "Unable to map Efuse registers\n");
ret = -ENOMEM;
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index eda2633a393d..9210a95cb4e6 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -32,7 +32,7 @@
#define OP_BUFFER_FLAGS 0
-static struct ring_buffer *op_ring_buffer;
+static struct trace_buffer *op_ring_buffer;
DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
static void wq_sync_buffer(struct work_struct *work);
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index ad290f79983b..a5507f75b524 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -1534,7 +1534,7 @@ static int __init ccio_probe(struct parisc_device *dev)
*ioc_p = ioc;
ioc->hw_path = dev->hw_path;
- ioc->ioc_regs = ioremap_nocache(dev->hpa.start, 4096);
+ ioc->ioc_regs = ioremap(dev->hpa.start, 4096);
if (!ioc->ioc_regs) {
kfree(ioc);
return -ENOMEM;
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 2f1cac89ddf5..889d7ce282eb 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -974,7 +974,7 @@ static int __init dino_probe(struct parisc_device *dev)
}
dino_dev->hba.dev = dev;
- dino_dev->hba.base_addr = ioremap_nocache(hpa, 4096);
+ dino_dev->hba.base_addr = ioremap(hpa, 4096);
dino_dev->hba.lmmio_space_offset = PCI_F_EXTEND;
spin_lock_init(&dino_dev->dinosaur_pen);
dino_dev->hba.iommu = ccio_get_iommu(dev);
diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c
index 37a2c5db761d..9d00a24277aa 100644
--- a/drivers/parisc/eisa.c
+++ b/drivers/parisc/eisa.c
@@ -354,10 +354,10 @@ static int __init eisa_probe(struct parisc_device *dev)
eisa_dev.eeprom_addr = MIRAGE_EEPROM_BASE_ADDR;
}
}
- eisa_eeprom_addr = ioremap_nocache(eisa_dev.eeprom_addr, HPEE_MAX_LENGTH);
+ eisa_eeprom_addr = ioremap(eisa_dev.eeprom_addr, HPEE_MAX_LENGTH);
if (!eisa_eeprom_addr) {
result = -ENOMEM;
- printk(KERN_ERR "EISA: ioremap_nocache failed!\n");
+ printk(KERN_ERR "EISA: ioremap failed!\n");
goto error_free_irq;
}
result = eisa_enumerator(eisa_dev.eeprom_addr, &eisa_dev.hba.io_space,
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 32f506f00c89..8a3b0c3a1e92 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -927,7 +927,7 @@ void *iosapic_register(unsigned long hpa)
return NULL;
}
- isi->addr = ioremap_nocache(hpa, 4096);
+ isi->addr = ioremap(hpa, 4096);
isi->isi_hpa = hpa;
isi->isi_version = iosapic_rd_version(isi);
isi->isi_num_vectors = IOSAPIC_IRDT_MAX_ENTRY(isi->isi_version) + 1;
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index a99e385c68bd..732b516c7bf8 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -1134,7 +1134,7 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
** Postable I/O port space is per PCI host adapter.
** base of 64MB PIOP region
*/
- lba_dev->iop_base = ioremap_nocache(p->start, 64 * 1024 * 1024);
+ lba_dev->iop_base = ioremap(p->start, 64 * 1024 * 1024);
sprintf(lba_dev->hba.io_name, "PCI%02x Ports",
(int)lba_dev->hba.bus_num.start);
@@ -1476,7 +1476,7 @@ lba_driver_probe(struct parisc_device *dev)
u32 func_class;
void *tmp_obj;
char *version;
- void __iomem *addr = ioremap_nocache(dev->hpa.start, 4096);
+ void __iomem *addr = ioremap(dev->hpa.start, 4096);
int max;
/* Read HW Rev First */
@@ -1575,7 +1575,7 @@ lba_driver_probe(struct parisc_device *dev)
} else {
if (!astro_iop_base) {
/* Sprockets PDC uses NPIOP region */
- astro_iop_base = ioremap_nocache(LBA_PORT_BASE, 64 * 1024);
+ astro_iop_base = ioremap(LBA_PORT_BASE, 64 * 1024);
pci_port = &lba_astro_port_ops;
}
@@ -1693,7 +1693,7 @@ void __init lba_init(void)
*/
void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask)
{
- void __iomem * base_addr = ioremap_nocache(lba->hpa.start, 4096);
+ void __iomem * base_addr = ioremap(lba->hpa.start, 4096);
imask <<= 2; /* adjust for hints - 2 more bits */
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index 73e37bb877a4..36c6613f7a36 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -230,13 +230,12 @@ parse_error:
return -EINVAL;
}
-static const struct file_operations led_proc_fops = {
- .owner = THIS_MODULE,
- .open = led_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = led_proc_write,
+static const struct proc_ops led_proc_ops = {
+ .proc_open = led_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = led_proc_write,
};
static int __init led_create_procfs(void)
@@ -252,14 +251,14 @@ static int __init led_create_procfs(void)
if (!lcd_no_led_support)
{
ent = proc_create_data("led", S_IRUGO|S_IWUSR, proc_pdc_root,
- &led_proc_fops, (void *)LED_NOLCD); /* LED */
+ &led_proc_ops, (void *)LED_NOLCD); /* LED */
if (!ent) return -1;
}
if (led_type == LED_HASLCD)
{
ent = proc_create_data("lcd", S_IRUGO|S_IWUSR, proc_pdc_root,
- &led_proc_fops, (void *)LED_HASLCD); /* LCD */
+ &led_proc_ops, (void *)LED_HASLCD); /* LCD */
if (!ent) return -1;
}
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index de8e4e347249..7e112829d250 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -1513,7 +1513,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
static void __iomem *ioc_remap(struct sba_device *sba_dev, unsigned int offset)
{
- return ioremap_nocache(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE);
+ return ioremap(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE);
}
static void sba_hw_init(struct sba_device *sba_dev)
@@ -1883,7 +1883,7 @@ static int __init sba_driver_callback(struct parisc_device *dev)
u32 func_class;
int i;
char *version;
- void __iomem *sba_addr = ioremap_nocache(dev->hpa.start, SBA_FUNC_SIZE);
+ void __iomem *sba_addr = ioremap(dev->hpa.start, SBA_FUNC_SIZE);
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *root;
#endif
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 982b46f0a54d..3ef0bb281e7c 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -69,6 +69,7 @@ int pci_enable_ats(struct pci_dev *dev, int ps)
dev->ats_enabled = 1;
return 0;
}
+EXPORT_SYMBOL_GPL(pci_enable_ats);
/**
* pci_disable_ats - disable the ATS capability
@@ -87,6 +88,7 @@ void pci_disable_ats(struct pci_dev *dev)
dev->ats_enabled = 0;
}
+EXPORT_SYMBOL_GPL(pci_disable_ats);
void pci_restore_ats_state(struct pci_dev *dev)
{
@@ -424,11 +426,12 @@ void pci_restore_pasid_state(struct pci_dev *pdev)
int pci_pasid_features(struct pci_dev *pdev)
{
u16 supported;
- int pasid = pdev->pasid_cap;
+ int pasid;
if (pdev->is_virtfn)
pdev = pci_physfn(pdev);
+ pasid = pdev->pasid_cap;
if (!pasid)
return -EINVAL;
@@ -451,11 +454,12 @@ int pci_pasid_features(struct pci_dev *pdev)
int pci_max_pasids(struct pci_dev *pdev)
{
u16 supported;
- int pasid = pdev->pasid_cap;
+ int pasid;
if (pdev->is_virtfn)
pdev = pci_physfn(pdev);
+ pasid = pdev->pasid_cap;
if (!pasid)
return -EINVAL;
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index c77069c8ee5d..20bf00f587bd 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -239,7 +239,6 @@ config PCIE_TANGO_SMP8759
config VMD
depends on PCI_MSI && X86_64 && SRCU
- select X86_DEV_DMA_OPS
tristate "Intel Volume Management Device Driver"
---help---
Adds support for the Intel Volume Management Device (VMD). VMD is a
@@ -253,6 +252,15 @@ config VMD
To compile this driver as a module, choose M here: the
module will be called vmd.
+config PCIE_BRCMSTB
+ tristate "Broadcom Brcmstb PCIe host controller"
+ depends on ARCH_BCM2835 || COMPILE_TEST
+ depends on OF
+ depends on PCI_MSI_IRQ_DOMAIN
+ help
+ Say Y here to enable PCIe host controller support for
+ Broadcom STB based SoCs, like the Raspberry Pi 4.
+
config PCI_HYPERV_INTERFACE
tristate "Hyper-V PCI Interface"
depends on X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
index 3d4f597f15ce..01b2502a5323 100644
--- a/drivers/pci/controller/Makefile
+++ b/drivers/pci/controller/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o
obj-$(CONFIG_PCIE_MOBIVEIL) += pcie-mobiveil.o
obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o
obj-$(CONFIG_VMD) += vmd.o
+obj-$(CONFIG_PCIE_BRCMSTB) += pcie-brcmstb.o
# pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW
obj-y += dwc/
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 625a031b2193..0830dfcfa43a 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -209,6 +209,17 @@ config PCIE_ARTPEC6_EP
Enables support for the PCIe controller in the ARTPEC-6 SoC to work in
endpoint mode. This uses the DesignWare core.
+config PCIE_INTEL_GW
+ bool "Intel Gateway PCIe host controller support"
+ depends on OF && (X86 || COMPILE_TEST)
+ depends on PCI_MSI_IRQ_DOMAIN
+ select PCIE_DW_HOST
+ help
+ Say 'Y' here to enable PCIe Host controller support on Intel
+ Gateway SoCs.
+ The PCIe controller uses the DesignWare core plus Intel-specific
+ hardware wrappers.
+
config PCIE_KIRIN
depends on OF && (ARM64 || COMPILE_TEST)
bool "HiSilicon Kirin series SoCs PCIe controllers"
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index 69faff371f11..8a637cfcf6e9 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_PCI_LAYERSCAPE_EP) += pci-layerscape-ep.o
obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
+obj-$(CONFIG_PCIE_INTEL_GW) += pcie-intel-gw.o
obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o
obj-$(CONFIG_PCI_MESON) += pci-meson.o
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index b20651cea09f..9bf7fa99b103 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -719,7 +719,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf");
- base = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ base = devm_ioremap(dev, res->start, resource_size(res));
if (!base)
return -ENOMEM;
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
index 14a6ba4067fb..c5043d951e80 100644
--- a/drivers/pci/controller/dwc/pci-exynos.c
+++ b/drivers/pci/controller/dwc/pci-exynos.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * PCIe host controller driver for Samsung EXYNOS SoCs
+ * PCIe host controller driver for Samsung Exynos SoCs
*
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index af677254a072..c8c702c494a2 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -422,7 +422,7 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
lower_32_bits(start) | OB_ENABLEN);
ks_pcie_app_writel(ks_pcie, OB_OFFSET_HI(i),
upper_32_bits(start));
- start += OB_WIN_SIZE;
+ start += OB_WIN_SIZE * SZ_1M;
}
val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
@@ -510,7 +510,7 @@ static void ks_pcie_stop_link(struct dw_pcie *pci)
/* Disable Link training */
val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
val &= ~LTSSM_EN_VAL;
- ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
}
static int ks_pcie_start_link(struct dw_pcie *pci)
@@ -1354,7 +1354,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
ret = of_property_read_u32(np, "num-viewport", &num_viewport);
if (ret < 0) {
dev_err(dev, "unable to read *num-viewport* property\n");
- return ret;
+ goto err_get_sync;
}
/*
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index 9e2482bd7b6d..28d5a1095200 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -51,9 +51,6 @@ static const struct of_device_id artpec6_pcie_of_match[];
#define ACK_N_FTS_MASK GENMASK(15, 8)
#define ACK_N_FTS(x) (((x) << 8) & ACK_N_FTS_MASK)
-#define FAST_TRAINING_SEQ_MASK GENMASK(7, 0)
-#define FAST_TRAINING_SEQ(x) (((x) << 0) & FAST_TRAINING_SEQ_MASK)
-
/* ARTPEC-6 specific registers */
#define PCIECFG 0x18
#define PCIECFG_DBG_OEN BIT(24)
@@ -313,10 +310,7 @@ static void artpec6_pcie_set_nfts(struct artpec6_pcie *artpec6_pcie)
* Set the Number of Fast Training Sequences that the core
* advertises as its N_FTS during Gen2 or Gen3 link training.
*/
- val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
- val &= ~FAST_TRAINING_SEQ_MASK;
- val |= FAST_TRAINING_SEQ(180);
- dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+ dw_pcie_link_set_n_fts(pci, 180);
}
static void artpec6_pcie_assert_core_reset(struct artpec6_pcie *artpec6_pcie)
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 3dd2e2697294..cfeccd7e9fff 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -434,7 +434,7 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
tbl_addr += (tbl_offset + ((interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE));
tbl_addr &= PCI_BASE_ADDRESS_MEM_MASK;
- msix_tbl = ioremap_nocache(ep->phys_base + tbl_addr,
+ msix_tbl = ioremap(ep->phys_base + tbl_addr,
PCI_MSIX_ENTRY_SIZE);
if (!msix_tbl)
return -EINVAL;
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 820488dfeaed..681548c88282 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -12,6 +12,7 @@
#include <linux/of.h>
#include <linux/types.h>
+#include "../../pci.h"
#include "pcie-designware.h"
/*
@@ -474,6 +475,61 @@ int dw_pcie_link_up(struct dw_pcie *pci)
(!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING)));
}
+void dw_pcie_upconfig_setup(struct dw_pcie *pci)
+{
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL);
+ val |= PORT_MLTI_UPCFG_SUPPORT;
+ dw_pcie_writel_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL, val);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_upconfig_setup);
+
+void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
+{
+ u32 reg, val;
+ u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+
+ reg = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2);
+ reg &= ~PCI_EXP_LNKCTL2_TLS;
+
+ switch (pcie_link_speed[link_gen]) {
+ case PCIE_SPEED_2_5GT:
+ reg |= PCI_EXP_LNKCTL2_TLS_2_5GT;
+ break;
+ case PCIE_SPEED_5_0GT:
+ reg |= PCI_EXP_LNKCTL2_TLS_5_0GT;
+ break;
+ case PCIE_SPEED_8_0GT:
+ reg |= PCI_EXP_LNKCTL2_TLS_8_0GT;
+ break;
+ case PCIE_SPEED_16_0GT:
+ reg |= PCI_EXP_LNKCTL2_TLS_16_0GT;
+ break;
+ default:
+ /* Use hardware capability */
+ val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+ val = FIELD_GET(PCI_EXP_LNKCAP_SLS, val);
+ reg &= ~PCI_EXP_LNKCTL2_HASD;
+ reg |= FIELD_PREP(PCI_EXP_LNKCTL2_TLS, val);
+ break;
+ }
+
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCTL2, reg);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_link_set_max_speed);
+
+void dw_pcie_link_set_n_fts(struct dw_pcie *pci, u32 n_fts)
+{
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ val &= ~PORT_LOGIC_N_FTS_MASK;
+ val |= n_fts & PORT_LOGIC_N_FTS_MASK;
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_link_set_n_fts);
+
static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
{
u32 val;
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 5accdd6bc388..a22ea5982817 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -30,7 +30,12 @@
#define LINK_WAIT_IATU 9
/* Synopsys-specific PCIe configuration registers */
+#define PCIE_PORT_AFR 0x70C
+#define PORT_AFR_N_FTS_MASK GENMASK(15, 8)
+#define PORT_AFR_CC_N_FTS_MASK GENMASK(23, 16)
+
#define PCIE_PORT_LINK_CONTROL 0x710
+#define PORT_LINK_DLL_LINK_EN BIT(5)
#define PORT_LINK_MODE_MASK GENMASK(21, 16)
#define PORT_LINK_MODE(n) FIELD_PREP(PORT_LINK_MODE_MASK, n)
#define PORT_LINK_MODE_1_LANES PORT_LINK_MODE(0x1)
@@ -46,6 +51,7 @@
#define PCIE_PORT_DEBUG1_LINK_IN_TRAINING BIT(29)
#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
+#define PORT_LOGIC_N_FTS_MASK GENMASK(7, 0)
#define PORT_LOGIC_SPEED_CHANGE BIT(17)
#define PORT_LOGIC_LINK_WIDTH_MASK GENMASK(12, 8)
#define PORT_LOGIC_LINK_WIDTH(n) FIELD_PREP(PORT_LOGIC_LINK_WIDTH_MASK, n)
@@ -60,6 +66,9 @@
#define PCIE_MSI_INTR0_MASK 0x82C
#define PCIE_MSI_INTR0_STATUS 0x830
+#define PCIE_PORT_MULTI_LANE_CTRL 0x8C0
+#define PORT_MLTI_UPCFG_SUPPORT BIT(7)
+
#define PCIE_ATU_VIEWPORT 0x900
#define PCIE_ATU_REGION_INBOUND BIT(31)
#define PCIE_ATU_REGION_OUTBOUND 0
@@ -273,6 +282,9 @@ void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
u32 dw_pcie_read_atu(struct dw_pcie *pci, u32 reg, size_t size);
void dw_pcie_write_atu(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
int dw_pcie_link_up(struct dw_pcie *pci);
+void dw_pcie_upconfig_setup(struct dw_pcie *pci);
+void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen);
+void dw_pcie_link_set_n_fts(struct dw_pcie *pci, u32 n_fts);
int dw_pcie_wait_for_link(struct dw_pcie *pci);
void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,
int type, u64 cpu_addr, u64 pci_addr,
diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c
new file mode 100644
index 000000000000..fc2a12212dec
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-intel-gw.c
@@ -0,0 +1,545 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Intel Gateway SoCs
+ *
+ * Copyright (c) 2019 Intel Corporation.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iopoll.h>
+#include <linux/pci_regs.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "../../pci.h"
+#include "pcie-designware.h"
+
+#define PORT_AFR_N_FTS_GEN12_DFT (SZ_128 - 1)
+#define PORT_AFR_N_FTS_GEN3 180
+#define PORT_AFR_N_FTS_GEN4 196
+
+/* PCIe Application logic Registers */
+#define PCIE_APP_CCR 0x10
+#define PCIE_APP_CCR_LTSSM_ENABLE BIT(0)
+
+#define PCIE_APP_MSG_CR 0x30
+#define PCIE_APP_MSG_XMT_PM_TURNOFF BIT(0)
+
+#define PCIE_APP_PMC 0x44
+#define PCIE_APP_PMC_IN_L2 BIT(20)
+
+#define PCIE_APP_IRNEN 0xF4
+#define PCIE_APP_IRNCR 0xF8
+#define PCIE_APP_IRN_AER_REPORT BIT(0)
+#define PCIE_APP_IRN_PME BIT(2)
+#define PCIE_APP_IRN_RX_VDM_MSG BIT(4)
+#define PCIE_APP_IRN_PM_TO_ACK BIT(9)
+#define PCIE_APP_IRN_LINK_AUTO_BW_STAT BIT(11)
+#define PCIE_APP_IRN_BW_MGT BIT(12)
+#define PCIE_APP_IRN_MSG_LTR BIT(18)
+#define PCIE_APP_IRN_SYS_ERR_RC BIT(29)
+#define PCIE_APP_INTX_OFST 12
+
+#define PCIE_APP_IRN_INT \
+ (PCIE_APP_IRN_AER_REPORT | PCIE_APP_IRN_PME | \
+ PCIE_APP_IRN_RX_VDM_MSG | PCIE_APP_IRN_SYS_ERR_RC | \
+ PCIE_APP_IRN_PM_TO_ACK | PCIE_APP_IRN_MSG_LTR | \
+ PCIE_APP_IRN_BW_MGT | PCIE_APP_IRN_LINK_AUTO_BW_STAT | \
+ (PCIE_APP_INTX_OFST + PCI_INTERRUPT_INTA) | \
+ (PCIE_APP_INTX_OFST + PCI_INTERRUPT_INTB) | \
+ (PCIE_APP_INTX_OFST + PCI_INTERRUPT_INTC) | \
+ (PCIE_APP_INTX_OFST + PCI_INTERRUPT_INTD))
+
+#define BUS_IATU_OFFSET SZ_256M
+#define RESET_INTERVAL_MS 100
+
+struct intel_pcie_soc {
+ unsigned int pcie_ver;
+ unsigned int pcie_atu_offset;
+ u32 num_viewport;
+};
+
+struct intel_pcie_port {
+ struct dw_pcie pci;
+ void __iomem *app_base;
+ struct gpio_desc *reset_gpio;
+ u32 rst_intrvl;
+ u32 max_speed;
+ u32 link_gen;
+ u32 max_width;
+ u32 n_fts;
+ struct clk *core_clk;
+ struct reset_control *core_rst;
+ struct phy *phy;
+ u8 pcie_cap_ofst;
+};
+
+static void pcie_update_bits(void __iomem *base, u32 ofs, u32 mask, u32 val)
+{
+ u32 old;
+
+ old = readl(base + ofs);
+ val = (old & ~mask) | (val & mask);
+
+ if (val != old)
+ writel(val, base + ofs);
+}
+
+static inline u32 pcie_app_rd(struct intel_pcie_port *lpp, u32 ofs)
+{
+ return readl(lpp->app_base + ofs);
+}
+
+static inline void pcie_app_wr(struct intel_pcie_port *lpp, u32 ofs, u32 val)
+{
+ writel(val, lpp->app_base + ofs);
+}
+
+static void pcie_app_wr_mask(struct intel_pcie_port *lpp, u32 ofs,
+ u32 mask, u32 val)
+{
+ pcie_update_bits(lpp->app_base, ofs, mask, val);
+}
+
+static inline u32 pcie_rc_cfg_rd(struct intel_pcie_port *lpp, u32 ofs)
+{
+ return dw_pcie_readl_dbi(&lpp->pci, ofs);
+}
+
+static inline void pcie_rc_cfg_wr(struct intel_pcie_port *lpp, u32 ofs, u32 val)
+{
+ dw_pcie_writel_dbi(&lpp->pci, ofs, val);
+}
+
+static void pcie_rc_cfg_wr_mask(struct intel_pcie_port *lpp, u32 ofs,
+ u32 mask, u32 val)
+{
+ pcie_update_bits(lpp->pci.dbi_base, ofs, mask, val);
+}
+
+static void intel_pcie_ltssm_enable(struct intel_pcie_port *lpp)
+{
+ pcie_app_wr_mask(lpp, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE,
+ PCIE_APP_CCR_LTSSM_ENABLE);
+}
+
+static void intel_pcie_ltssm_disable(struct intel_pcie_port *lpp)
+{
+ pcie_app_wr_mask(lpp, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE, 0);
+}
+
+static void intel_pcie_link_setup(struct intel_pcie_port *lpp)
+{
+ u32 val;
+ u8 offset = lpp->pcie_cap_ofst;
+
+ val = pcie_rc_cfg_rd(lpp, offset + PCI_EXP_LNKCAP);
+ lpp->max_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, val);
+ lpp->max_width = FIELD_GET(PCI_EXP_LNKCAP_MLW, val);
+
+ val = pcie_rc_cfg_rd(lpp, offset + PCI_EXP_LNKCTL);
+
+ val &= ~(PCI_EXP_LNKCTL_LD | PCI_EXP_LNKCTL_ASPMC);
+ pcie_rc_cfg_wr(lpp, offset + PCI_EXP_LNKCTL, val);
+}
+
+static void intel_pcie_port_logic_setup(struct intel_pcie_port *lpp)
+{
+ u32 val, mask;
+
+ switch (pcie_link_speed[lpp->max_speed]) {
+ case PCIE_SPEED_8_0GT:
+ lpp->n_fts = PORT_AFR_N_FTS_GEN3;
+ break;
+ case PCIE_SPEED_16_0GT:
+ lpp->n_fts = PORT_AFR_N_FTS_GEN4;
+ break;
+ default:
+ lpp->n_fts = PORT_AFR_N_FTS_GEN12_DFT;
+ break;
+ }
+
+ mask = PORT_AFR_N_FTS_MASK | PORT_AFR_CC_N_FTS_MASK;
+ val = FIELD_PREP(PORT_AFR_N_FTS_MASK, lpp->n_fts) |
+ FIELD_PREP(PORT_AFR_CC_N_FTS_MASK, lpp->n_fts);
+ pcie_rc_cfg_wr_mask(lpp, PCIE_PORT_AFR, mask, val);
+
+ /* Port Link Control Register */
+ pcie_rc_cfg_wr_mask(lpp, PCIE_PORT_LINK_CONTROL, PORT_LINK_DLL_LINK_EN,
+ PORT_LINK_DLL_LINK_EN);
+}
+
+static void intel_pcie_rc_setup(struct intel_pcie_port *lpp)
+{
+ intel_pcie_ltssm_disable(lpp);
+ intel_pcie_link_setup(lpp);
+ dw_pcie_setup_rc(&lpp->pci.pp);
+ dw_pcie_upconfig_setup(&lpp->pci);
+ intel_pcie_port_logic_setup(lpp);
+ dw_pcie_link_set_max_speed(&lpp->pci, lpp->link_gen);
+ dw_pcie_link_set_n_fts(&lpp->pci, lpp->n_fts);
+}
+
+static int intel_pcie_ep_rst_init(struct intel_pcie_port *lpp)
+{
+ struct device *dev = lpp->pci.dev;
+ int ret;
+
+ lpp->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(lpp->reset_gpio)) {
+ ret = PTR_ERR(lpp->reset_gpio);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to request PCIe GPIO: %d\n", ret);
+ return ret;
+ }
+
+ /* Make initial reset last for 100us */
+ usleep_range(100, 200);
+
+ return 0;
+}
+
+static void intel_pcie_core_rst_assert(struct intel_pcie_port *lpp)
+{
+ reset_control_assert(lpp->core_rst);
+}
+
+static void intel_pcie_core_rst_deassert(struct intel_pcie_port *lpp)
+{
+ /*
+ * One micro-second delay to make sure the reset pulse
+ * wide enough so that core reset is clean.
+ */
+ udelay(1);
+ reset_control_deassert(lpp->core_rst);
+
+ /*
+ * Some SoC core reset also reset PHY, more delay needed
+ * to make sure the reset process is done.
+ */
+ usleep_range(1000, 2000);
+}
+
+static void intel_pcie_device_rst_assert(struct intel_pcie_port *lpp)
+{
+ gpiod_set_value_cansleep(lpp->reset_gpio, 1);
+}
+
+static void intel_pcie_device_rst_deassert(struct intel_pcie_port *lpp)
+{
+ msleep(lpp->rst_intrvl);
+ gpiod_set_value_cansleep(lpp->reset_gpio, 0);
+}
+
+static int intel_pcie_app_logic_setup(struct intel_pcie_port *lpp)
+{
+ intel_pcie_device_rst_deassert(lpp);
+ intel_pcie_ltssm_enable(lpp);
+
+ return dw_pcie_wait_for_link(&lpp->pci);
+}
+
+static void intel_pcie_core_irq_disable(struct intel_pcie_port *lpp)
+{
+ pcie_app_wr(lpp, PCIE_APP_IRNEN, 0);
+ pcie_app_wr(lpp, PCIE_APP_IRNCR, PCIE_APP_IRN_INT);
+}
+
+static int intel_pcie_get_resources(struct platform_device *pdev)
+{
+ struct intel_pcie_port *lpp = platform_get_drvdata(pdev);
+ struct dw_pcie *pci = &lpp->pci;
+ struct device *dev = pci->dev;
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ pci->dbi_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+
+ lpp->core_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(lpp->core_clk)) {
+ ret = PTR_ERR(lpp->core_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get clks: %d\n", ret);
+ return ret;
+ }
+
+ lpp->core_rst = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(lpp->core_rst)) {
+ ret = PTR_ERR(lpp->core_rst);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get resets: %d\n", ret);
+ return ret;
+ }
+
+ ret = device_property_match_string(dev, "device_type", "pci");
+ if (ret) {
+ dev_err(dev, "Failed to find pci device type: %d\n", ret);
+ return ret;
+ }
+
+ ret = device_property_read_u32(dev, "reset-assert-ms",
+ &lpp->rst_intrvl);
+ if (ret)
+ lpp->rst_intrvl = RESET_INTERVAL_MS;
+
+ ret = of_pci_get_max_link_speed(dev->of_node);
+ lpp->link_gen = ret < 0 ? 0 : ret;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "app");
+ lpp->app_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(lpp->app_base))
+ return PTR_ERR(lpp->app_base);
+
+ lpp->phy = devm_phy_get(dev, "pcie");
+ if (IS_ERR(lpp->phy)) {
+ ret = PTR_ERR(lpp->phy);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Couldn't get pcie-phy: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void intel_pcie_deinit_phy(struct intel_pcie_port *lpp)
+{
+ phy_exit(lpp->phy);
+}
+
+static int intel_pcie_wait_l2(struct intel_pcie_port *lpp)
+{
+ u32 value;
+ int ret;
+
+ if (pcie_link_speed[lpp->max_speed] < PCIE_SPEED_8_0GT)
+ return 0;
+
+ /* Send PME_TURN_OFF message */
+ pcie_app_wr_mask(lpp, PCIE_APP_MSG_CR, PCIE_APP_MSG_XMT_PM_TURNOFF,
+ PCIE_APP_MSG_XMT_PM_TURNOFF);
+
+ /* Read PMC status and wait for falling into L2 link state */
+ ret = readl_poll_timeout(lpp->app_base + PCIE_APP_PMC, value,
+ value & PCIE_APP_PMC_IN_L2, 20,
+ jiffies_to_usecs(5 * HZ));
+ if (ret)
+ dev_err(lpp->pci.dev, "PCIe link enter L2 timeout!\n");
+
+ return ret;
+}
+
+static void intel_pcie_turn_off(struct intel_pcie_port *lpp)
+{
+ if (dw_pcie_link_up(&lpp->pci))
+ intel_pcie_wait_l2(lpp);
+
+ /* Put endpoint device in reset state */
+ intel_pcie_device_rst_assert(lpp);
+ pcie_rc_cfg_wr_mask(lpp, PCI_COMMAND, PCI_COMMAND_MEMORY, 0);
+}
+
+static int intel_pcie_host_setup(struct intel_pcie_port *lpp)
+{
+ struct device *dev = lpp->pci.dev;
+ int ret;
+
+ intel_pcie_core_rst_assert(lpp);
+ intel_pcie_device_rst_assert(lpp);
+
+ ret = phy_init(lpp->phy);
+ if (ret)
+ return ret;
+
+ intel_pcie_core_rst_deassert(lpp);
+
+ ret = clk_prepare_enable(lpp->core_clk);
+ if (ret) {
+ dev_err(lpp->pci.dev, "Core clock enable failed: %d\n", ret);
+ goto clk_err;
+ }
+
+ if (!lpp->pcie_cap_ofst) {
+ ret = dw_pcie_find_capability(&lpp->pci, PCI_CAP_ID_EXP);
+ if (!ret) {
+ ret = -ENXIO;
+ dev_err(dev, "Invalid PCIe capability offset\n");
+ goto app_init_err;
+ }
+
+ lpp->pcie_cap_ofst = ret;
+ }
+
+ intel_pcie_rc_setup(lpp);
+ ret = intel_pcie_app_logic_setup(lpp);
+ if (ret)
+ goto app_init_err;
+
+ /* Enable integrated interrupts */
+ pcie_app_wr_mask(lpp, PCIE_APP_IRNEN, PCIE_APP_IRN_INT,
+ PCIE_APP_IRN_INT);
+
+ return 0;
+
+app_init_err:
+ clk_disable_unprepare(lpp->core_clk);
+clk_err:
+ intel_pcie_core_rst_assert(lpp);
+ intel_pcie_deinit_phy(lpp);
+
+ return ret;
+}
+
+static void __intel_pcie_remove(struct intel_pcie_port *lpp)
+{
+ intel_pcie_core_irq_disable(lpp);
+ intel_pcie_turn_off(lpp);
+ clk_disable_unprepare(lpp->core_clk);
+ intel_pcie_core_rst_assert(lpp);
+ intel_pcie_deinit_phy(lpp);
+}
+
+static int intel_pcie_remove(struct platform_device *pdev)
+{
+ struct intel_pcie_port *lpp = platform_get_drvdata(pdev);
+ struct pcie_port *pp = &lpp->pci.pp;
+
+ dw_pcie_host_deinit(pp);
+ __intel_pcie_remove(lpp);
+
+ return 0;
+}
+
+static int __maybe_unused intel_pcie_suspend_noirq(struct device *dev)
+{
+ struct intel_pcie_port *lpp = dev_get_drvdata(dev);
+ int ret;
+
+ intel_pcie_core_irq_disable(lpp);
+ ret = intel_pcie_wait_l2(lpp);
+ if (ret)
+ return ret;
+
+ intel_pcie_deinit_phy(lpp);
+ clk_disable_unprepare(lpp->core_clk);
+ return ret;
+}
+
+static int __maybe_unused intel_pcie_resume_noirq(struct device *dev)
+{
+ struct intel_pcie_port *lpp = dev_get_drvdata(dev);
+
+ return intel_pcie_host_setup(lpp);
+}
+
+static int intel_pcie_rc_init(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct intel_pcie_port *lpp = dev_get_drvdata(pci->dev);
+
+ return intel_pcie_host_setup(lpp);
+}
+
+/*
+ * Dummy function so that DW core doesn't configure MSI
+ */
+static int intel_pcie_msi_init(struct pcie_port *pp)
+{
+ return 0;
+}
+
+u64 intel_pcie_cpu_addr(struct dw_pcie *pcie, u64 cpu_addr)
+{
+ return cpu_addr + BUS_IATU_OFFSET;
+}
+
+static const struct dw_pcie_ops intel_pcie_ops = {
+ .cpu_addr_fixup = intel_pcie_cpu_addr,
+};
+
+static const struct dw_pcie_host_ops intel_pcie_dw_ops = {
+ .host_init = intel_pcie_rc_init,
+ .msi_host_init = intel_pcie_msi_init,
+};
+
+static const struct intel_pcie_soc pcie_data = {
+ .pcie_ver = 0x520A,
+ .pcie_atu_offset = 0xC0000,
+ .num_viewport = 3,
+};
+
+static int intel_pcie_probe(struct platform_device *pdev)
+{
+ const struct intel_pcie_soc *data;
+ struct device *dev = &pdev->dev;
+ struct intel_pcie_port *lpp;
+ struct pcie_port *pp;
+ struct dw_pcie *pci;
+ int ret;
+
+ lpp = devm_kzalloc(dev, sizeof(*lpp), GFP_KERNEL);
+ if (!lpp)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, lpp);
+ pci = &lpp->pci;
+ pci->dev = dev;
+ pp = &pci->pp;
+
+ ret = intel_pcie_get_resources(pdev);
+ if (ret)
+ return ret;
+
+ ret = intel_pcie_ep_rst_init(lpp);
+ if (ret)
+ return ret;
+
+ data = device_get_match_data(dev);
+ if (!data)
+ return -ENODEV;
+
+ pci->ops = &intel_pcie_ops;
+ pci->version = data->pcie_ver;
+ pci->atu_base = pci->dbi_base + data->pcie_atu_offset;
+ pp->ops = &intel_pcie_dw_ops;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "Cannot initialize host\n");
+ return ret;
+ }
+
+ /*
+ * Intel PCIe doesn't configure IO region, so set viewport
+ * to not perform IO region access.
+ */
+ pci->num_viewport = data->num_viewport;
+
+ return 0;
+}
+
+static const struct dev_pm_ops intel_pcie_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(intel_pcie_suspend_noirq,
+ intel_pcie_resume_noirq)
+};
+
+static const struct of_device_id of_intel_pcie_match[] = {
+ { .compatible = "intel,lgm-pcie", .data = &pcie_data },
+ {}
+};
+
+static struct platform_driver intel_pcie_driver = {
+ .probe = intel_pcie_probe,
+ .remove = intel_pcie_remove,
+ .driver = {
+ .name = "intel-gw-pcie",
+ .of_match_table = of_intel_pcie_match,
+ .pm = &intel_pcie_pm_ops,
+ },
+};
+builtin_platform_driver(intel_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 7e581748ee9f..5ea527a6bd9f 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -54,6 +54,7 @@
#define PCIE20_PARF_LTSSM 0x1B0
#define PCIE20_PARF_SID_OFFSET 0x234
#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
+#define PCIE20_PARF_DEVICE_TYPE 0x1000
#define PCIE20_ELBI_SYS_CTRL 0x04
#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
@@ -80,6 +81,8 @@
#define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358
#define SLV_ADDR_SPACE_SZ 0x10000000
+#define DEVICE_TYPE_RC 0x4
+
#define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
struct qcom_pcie_resources_2_1_0 {
struct clk *iface_clk;
@@ -139,12 +142,20 @@ struct qcom_pcie_resources_2_3_3 {
struct reset_control *rst[7];
};
+struct qcom_pcie_resources_2_7_0 {
+ struct clk_bulk_data clks[6];
+ struct regulator_bulk_data supplies[2];
+ struct reset_control *pci_reset;
+ struct clk *pipe_clk;
+};
+
union qcom_pcie_resources {
struct qcom_pcie_resources_1_0_0 v1_0_0;
struct qcom_pcie_resources_2_1_0 v2_1_0;
struct qcom_pcie_resources_2_3_2 v2_3_2;
struct qcom_pcie_resources_2_3_3 v2_3_3;
struct qcom_pcie_resources_2_4_0 v2_4_0;
+ struct qcom_pcie_resources_2_7_0 v2_7_0;
};
struct qcom_pcie;
@@ -1068,6 +1079,134 @@ err_clk_iface:
return ret;
}
+static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
+ if (IS_ERR(res->pci_reset))
+ return PTR_ERR(res->pci_reset);
+
+ res->supplies[0].supply = "vdda";
+ res->supplies[1].supply = "vddpe-3v3";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
+ res->supplies);
+ if (ret)
+ return ret;
+
+ res->clks[0].id = "aux";
+ res->clks[1].id = "cfg";
+ res->clks[2].id = "bus_master";
+ res->clks[3].id = "bus_slave";
+ res->clks[4].id = "slave_q2a";
+ res->clks[5].id = "tbu";
+
+ ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
+ if (ret < 0)
+ return ret;
+
+ res->pipe_clk = devm_clk_get(dev, "pipe");
+ return PTR_ERR_OR_ZERO(res->pipe_clk);
+}
+
+static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ u32 val;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
+ if (ret < 0) {
+ dev_err(dev, "cannot enable regulators\n");
+ return ret;
+ }
+
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ if (ret < 0)
+ goto err_disable_regulators;
+
+ ret = reset_control_assert(res->pci_reset);
+ if (ret < 0) {
+ dev_err(dev, "cannot deassert pci reset\n");
+ goto err_disable_clocks;
+ }
+
+ usleep_range(1000, 1500);
+
+ ret = reset_control_deassert(res->pci_reset);
+ if (ret < 0) {
+ dev_err(dev, "cannot deassert pci reset\n");
+ goto err_disable_clocks;
+ }
+
+ ret = clk_prepare_enable(res->pipe_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable pipe clock\n");
+ goto err_disable_clocks;
+ }
+
+ /* configure PCIe to RC mode */
+ writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE);
+
+ /* enable PCIe clocks and resets */
+ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val &= ~BIT(0);
+ writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+ /* change DBI base address */
+ writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+ /* MAC PHY_POWERDOWN MUX DISABLE */
+ val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
+ val &= ~BIT(29);
+ writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
+
+ val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+ val |= BIT(4);
+ writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+ val |= BIT(31);
+ writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+ }
+
+ return 0;
+err_disable_clocks:
+ clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+err_disable_regulators:
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+
+ return ret;
+}
+
+static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+}
+
+static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+
+ return clk_prepare_enable(res->pipe_clk);
+}
+
+static void qcom_pcie_post_deinit_2_7_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+
+ clk_disable_unprepare(res->pipe_clk);
+}
+
static int qcom_pcie_link_up(struct dw_pcie *pci)
{
u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
@@ -1167,6 +1306,16 @@ static const struct qcom_pcie_ops ops_2_3_3 = {
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
};
+/* Qcom IP rev.: 2.7.0 Synopsys IP rev.: 4.30a */
+static const struct qcom_pcie_ops ops_2_7_0 = {
+ .get_resources = qcom_pcie_get_resources_2_7_0,
+ .init = qcom_pcie_init_2_7_0,
+ .deinit = qcom_pcie_deinit_2_7_0,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+ .post_init = qcom_pcie_post_init_2_7_0,
+ .post_deinit = qcom_pcie_post_deinit_2_7_0,
+};
+
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = qcom_pcie_link_up,
};
@@ -1282,6 +1431,7 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 },
{ .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 },
{ .compatible = "qcom,pcie-qcs404", .data = &ops_2_4_0 },
+ { .compatible = "qcom,pcie-sdm845", .data = &ops_2_7_0 },
{ }
};
diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c
index 8fd7badd59c2..a5401a0b1e58 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier.c
@@ -9,11 +9,11 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
-#include <linux/module.h>
#include <linux/of_irq.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
@@ -171,12 +171,6 @@ static void uniphier_pcie_irq_enable(struct uniphier_pcie_priv *priv)
writel(PCL_RCV_INTX_ALL_ENABLE, priv->base + PCL_RCV_INTX);
}
-static void uniphier_pcie_irq_disable(struct uniphier_pcie_priv *priv)
-{
- writel(0, priv->base + PCL_RCV_INT);
- writel(0, priv->base + PCL_RCV_INTX);
-}
-
static void uniphier_pcie_irq_ack(struct irq_data *d)
{
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
@@ -397,14 +391,6 @@ out_clk_disable:
return ret;
}
-static void uniphier_pcie_host_disable(struct uniphier_pcie_priv *priv)
-{
- uniphier_pcie_irq_disable(priv);
- phy_exit(priv->phy);
- reset_control_assert(priv->rst);
- clk_disable_unprepare(priv->clk);
-}
-
static const struct dw_pcie_ops dw_pcie_ops = {
.start_link = uniphier_pcie_establish_link,
.stop_link = uniphier_pcie_stop_link,
@@ -456,31 +442,16 @@ static int uniphier_pcie_probe(struct platform_device *pdev)
return uniphier_add_pcie_port(priv, pdev);
}
-static int uniphier_pcie_remove(struct platform_device *pdev)
-{
- struct uniphier_pcie_priv *priv = platform_get_drvdata(pdev);
-
- uniphier_pcie_host_disable(priv);
-
- return 0;
-}
-
static const struct of_device_id uniphier_pcie_match[] = {
{ .compatible = "socionext,uniphier-pcie", },
{ /* sentinel */ },
};
-MODULE_DEVICE_TABLE(of, uniphier_pcie_match);
static struct platform_driver uniphier_pcie_driver = {
.probe = uniphier_pcie_probe,
- .remove = uniphier_pcie_remove,
.driver = {
.name = "uniphier-pcie",
.of_match_table = uniphier_pcie_match,
},
};
builtin_platform_driver(uniphier_pcie_driver);
-
-MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
-MODULE_DESCRIPTION("UniPhier PCIe host controller driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index 673a1725ef38..0e03cef72840 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -1406,7 +1406,7 @@ static struct phy *devm_of_phy_optional_get_index(struct device *dev,
phy = devm_of_phy_get(dev, np, name);
kfree(name);
- if (IS_ERR(phy) && PTR_ERR(phy) == -ENODEV)
+ if (PTR_ERR(phy) == -ENODEV)
phy = NULL;
return phy;
@@ -2499,7 +2499,6 @@ static const struct tegra_pcie_soc tegra20_pcie = {
.num_ports = 2,
.ports = tegra20_pcie_ports,
.msi_base_shift = 0,
- .afi_pex2_ctrl = 0x128,
.pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
.pads_refclk_cfg0 = 0xfa5cfa5c,
@@ -2528,6 +2527,7 @@ static const struct tegra_pcie_soc tegra30_pcie = {
.num_ports = 3,
.ports = tegra30_pcie_ports,
.msi_base_shift = 8,
+ .afi_pex2_ctrl = 0x128,
.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
.pads_refclk_cfg0 = 0xfa5cfa5c,
@@ -2798,7 +2798,7 @@ static int tegra_pcie_probe(struct platform_device *pdev)
pm_runtime_enable(pcie->dev);
err = pm_runtime_get_sync(pcie->dev);
- if (err) {
+ if (err < 0) {
dev_err(dev, "fail to enable pcie controller: %d\n", err);
goto teardown_msi;
}
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
new file mode 100644
index 000000000000..d20aabc26273
--- /dev/null
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -0,0 +1,1015 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (C) 2009 - 2019 Broadcom */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/printk.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "../pci.h"
+
+/* BRCM_PCIE_CAP_REGS - Offset for the mandatory capability config regs */
+#define BRCM_PCIE_CAP_REGS 0x00ac
+
+/* Broadcom STB PCIe Register Offsets */
+#define PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1 0x0188
+#define PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK 0xc
+#define PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN 0x0
+
+#define PCIE_RC_CFG_PRIV1_ID_VAL3 0x043c
+#define PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK 0xffffff
+
+#define PCIE_RC_DL_MDIO_ADDR 0x1100
+#define PCIE_RC_DL_MDIO_WR_DATA 0x1104
+#define PCIE_RC_DL_MDIO_RD_DATA 0x1108
+
+#define PCIE_MISC_MISC_CTRL 0x4008
+#define PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK 0x1000
+#define PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK 0x2000
+#define PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK 0x300000
+#define PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_128 0x0
+#define PCIE_MISC_MISC_CTRL_SCB0_SIZE_MASK 0xf8000000
+
+#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO 0x400c
+#define PCIE_MEM_WIN0_LO(win) \
+ PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO + ((win) * 4)
+
+#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI 0x4010
+#define PCIE_MEM_WIN0_HI(win) \
+ PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI + ((win) * 4)
+
+#define PCIE_MISC_RC_BAR1_CONFIG_LO 0x402c
+#define PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK 0x1f
+
+#define PCIE_MISC_RC_BAR2_CONFIG_LO 0x4034
+#define PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK 0x1f
+#define PCIE_MISC_RC_BAR2_CONFIG_HI 0x4038
+
+#define PCIE_MISC_RC_BAR3_CONFIG_LO 0x403c
+#define PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK 0x1f
+
+#define PCIE_MISC_MSI_BAR_CONFIG_LO 0x4044
+#define PCIE_MISC_MSI_BAR_CONFIG_HI 0x4048
+
+#define PCIE_MISC_MSI_DATA_CONFIG 0x404c
+#define PCIE_MISC_MSI_DATA_CONFIG_VAL 0xffe06540
+
+#define PCIE_MISC_PCIE_CTRL 0x4064
+#define PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK 0x1
+
+#define PCIE_MISC_PCIE_STATUS 0x4068
+#define PCIE_MISC_PCIE_STATUS_PCIE_PORT_MASK 0x80
+#define PCIE_MISC_PCIE_STATUS_PCIE_DL_ACTIVE_MASK 0x20
+#define PCIE_MISC_PCIE_STATUS_PCIE_PHYLINKUP_MASK 0x10
+#define PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK 0x40
+
+#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT 0x4070
+#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_LIMIT_MASK 0xfff00000
+#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK 0xfff0
+#define PCIE_MEM_WIN0_BASE_LIMIT(win) \
+ PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT + ((win) * 4)
+
+#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI 0x4080
+#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI_BASE_MASK 0xff
+#define PCIE_MEM_WIN0_BASE_HI(win) \
+ PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI + ((win) * 8)
+
+#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI 0x4084
+#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI_LIMIT_MASK 0xff
+#define PCIE_MEM_WIN0_LIMIT_HI(win) \
+ PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI + ((win) * 8)
+
+#define PCIE_MISC_HARD_PCIE_HARD_DEBUG 0x4204
+#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK 0x2
+#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK 0x08000000
+
+#define PCIE_MSI_INTR2_STATUS 0x4500
+#define PCIE_MSI_INTR2_CLR 0x4508
+#define PCIE_MSI_INTR2_MASK_SET 0x4510
+#define PCIE_MSI_INTR2_MASK_CLR 0x4514
+
+#define PCIE_EXT_CFG_DATA 0x8000
+
+#define PCIE_EXT_CFG_INDEX 0x9000
+#define PCIE_EXT_BUSNUM_SHIFT 20
+#define PCIE_EXT_SLOT_SHIFT 15
+#define PCIE_EXT_FUNC_SHIFT 12
+
+#define PCIE_RGR1_SW_INIT_1 0x9210
+#define PCIE_RGR1_SW_INIT_1_PERST_MASK 0x1
+#define PCIE_RGR1_SW_INIT_1_INIT_MASK 0x2
+
+/* PCIe parameters */
+#define BRCM_NUM_PCIE_OUT_WINS 0x4
+#define BRCM_INT_PCI_MSI_NR 32
+
+/* MSI target adresses */
+#define BRCM_MSI_TARGET_ADDR_LT_4GB 0x0fffffffcULL
+#define BRCM_MSI_TARGET_ADDR_GT_4GB 0xffffffffcULL
+
+/* MDIO registers */
+#define MDIO_PORT0 0x0
+#define MDIO_DATA_MASK 0x7fffffff
+#define MDIO_PORT_MASK 0xf0000
+#define MDIO_REGAD_MASK 0xffff
+#define MDIO_CMD_MASK 0xfff00000
+#define MDIO_CMD_READ 0x1
+#define MDIO_CMD_WRITE 0x0
+#define MDIO_DATA_DONE_MASK 0x80000000
+#define MDIO_RD_DONE(x) (((x) & MDIO_DATA_DONE_MASK) ? 1 : 0)
+#define MDIO_WT_DONE(x) (((x) & MDIO_DATA_DONE_MASK) ? 0 : 1)
+#define SSC_REGS_ADDR 0x1100
+#define SET_ADDR_OFFSET 0x1f
+#define SSC_CNTL_OFFSET 0x2
+#define SSC_CNTL_OVRD_EN_MASK 0x8000
+#define SSC_CNTL_OVRD_VAL_MASK 0x4000
+#define SSC_STATUS_OFFSET 0x1
+#define SSC_STATUS_SSC_MASK 0x400
+#define SSC_STATUS_PLL_LOCK_MASK 0x800
+
+struct brcm_msi {
+ struct device *dev;
+ void __iomem *base;
+ struct device_node *np;
+ struct irq_domain *msi_domain;
+ struct irq_domain *inner_domain;
+ struct mutex lock; /* guards the alloc/free operations */
+ u64 target_addr;
+ int irq;
+ /* used indicates which MSI interrupts have been alloc'd */
+ unsigned long used;
+};
+
+/* Internal PCIe Host Controller Information.*/
+struct brcm_pcie {
+ struct device *dev;
+ void __iomem *base;
+ struct clk *clk;
+ struct pci_bus *root_bus;
+ struct device_node *np;
+ bool ssc;
+ int gen;
+ u64 msi_target_addr;
+ struct brcm_msi *msi;
+};
+
+/*
+ * This is to convert the size of the inbound "BAR" region to the
+ * non-linear values of PCIE_X_MISC_RC_BAR[123]_CONFIG_LO.SIZE
+ */
+static int brcm_pcie_encode_ibar_size(u64 size)
+{
+ int log2_in = ilog2(size);
+
+ if (log2_in >= 12 && log2_in <= 15)
+ /* Covers 4KB to 32KB (inclusive) */
+ return (log2_in - 12) + 0x1c;
+ else if (log2_in >= 16 && log2_in <= 35)
+ /* Covers 64KB to 32GB, (inclusive) */
+ return log2_in - 15;
+ /* Something is awry so disable */
+ return 0;
+}
+
+static u32 brcm_pcie_mdio_form_pkt(int port, int regad, int cmd)
+{
+ u32 pkt = 0;
+
+ pkt |= FIELD_PREP(MDIO_PORT_MASK, port);
+ pkt |= FIELD_PREP(MDIO_REGAD_MASK, regad);
+ pkt |= FIELD_PREP(MDIO_CMD_MASK, cmd);
+
+ return pkt;
+}
+
+/* negative return value indicates error */
+static int brcm_pcie_mdio_read(void __iomem *base, u8 port, u8 regad, u32 *val)
+{
+ int tries;
+ u32 data;
+
+ writel(brcm_pcie_mdio_form_pkt(port, regad, MDIO_CMD_READ),
+ base + PCIE_RC_DL_MDIO_ADDR);
+ readl(base + PCIE_RC_DL_MDIO_ADDR);
+
+ data = readl(base + PCIE_RC_DL_MDIO_RD_DATA);
+ for (tries = 0; !MDIO_RD_DONE(data) && tries < 10; tries++) {
+ udelay(10);
+ data = readl(base + PCIE_RC_DL_MDIO_RD_DATA);
+ }
+
+ *val = FIELD_GET(MDIO_DATA_MASK, data);
+ return MDIO_RD_DONE(data) ? 0 : -EIO;
+}
+
+/* negative return value indicates error */
+static int brcm_pcie_mdio_write(void __iomem *base, u8 port,
+ u8 regad, u16 wrdata)
+{
+ int tries;
+ u32 data;
+
+ writel(brcm_pcie_mdio_form_pkt(port, regad, MDIO_CMD_WRITE),
+ base + PCIE_RC_DL_MDIO_ADDR);
+ readl(base + PCIE_RC_DL_MDIO_ADDR);
+ writel(MDIO_DATA_DONE_MASK | wrdata, base + PCIE_RC_DL_MDIO_WR_DATA);
+
+ data = readl(base + PCIE_RC_DL_MDIO_WR_DATA);
+ for (tries = 0; !MDIO_WT_DONE(data) && tries < 10; tries++) {
+ udelay(10);
+ data = readl(base + PCIE_RC_DL_MDIO_WR_DATA);
+ }
+
+ return MDIO_WT_DONE(data) ? 0 : -EIO;
+}
+
+/*
+ * Configures device for Spread Spectrum Clocking (SSC) mode; a negative
+ * return value indicates error.
+ */
+static int brcm_pcie_set_ssc(struct brcm_pcie *pcie)
+{
+ int pll, ssc;
+ int ret;
+ u32 tmp;
+
+ ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, SET_ADDR_OFFSET,
+ SSC_REGS_ADDR);
+ if (ret < 0)
+ return ret;
+
+ ret = brcm_pcie_mdio_read(pcie->base, MDIO_PORT0,
+ SSC_CNTL_OFFSET, &tmp);
+ if (ret < 0)
+ return ret;
+
+ u32p_replace_bits(&tmp, 1, SSC_CNTL_OVRD_EN_MASK);
+ u32p_replace_bits(&tmp, 1, SSC_CNTL_OVRD_VAL_MASK);
+ ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0,
+ SSC_CNTL_OFFSET, tmp);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(1000, 2000);
+ ret = brcm_pcie_mdio_read(pcie->base, MDIO_PORT0,
+ SSC_STATUS_OFFSET, &tmp);
+ if (ret < 0)
+ return ret;
+
+ ssc = FIELD_GET(SSC_STATUS_SSC_MASK, tmp);
+ pll = FIELD_GET(SSC_STATUS_PLL_LOCK_MASK, tmp);
+
+ return ssc && pll ? 0 : -EIO;
+}
+
+/* Limits operation to a specific generation (1, 2, or 3) */
+static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen)
+{
+ u16 lnkctl2 = readw(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2);
+ u32 lnkcap = readl(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP);
+
+ lnkcap = (lnkcap & ~PCI_EXP_LNKCAP_SLS) | gen;
+ writel(lnkcap, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP);
+
+ lnkctl2 = (lnkctl2 & ~0xf) | gen;
+ writew(lnkctl2, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2);
+}
+
+static void brcm_pcie_set_outbound_win(struct brcm_pcie *pcie,
+ unsigned int win, u64 cpu_addr,
+ u64 pcie_addr, u64 size)
+{
+ u32 cpu_addr_mb_high, limit_addr_mb_high;
+ phys_addr_t cpu_addr_mb, limit_addr_mb;
+ int high_addr_shift;
+ u32 tmp;
+
+ /* Set the base of the pcie_addr window */
+ writel(lower_32_bits(pcie_addr), pcie->base + PCIE_MEM_WIN0_LO(win));
+ writel(upper_32_bits(pcie_addr), pcie->base + PCIE_MEM_WIN0_HI(win));
+
+ /* Write the addr base & limit lower bits (in MBs) */
+ cpu_addr_mb = cpu_addr / SZ_1M;
+ limit_addr_mb = (cpu_addr + size - 1) / SZ_1M;
+
+ tmp = readl(pcie->base + PCIE_MEM_WIN0_BASE_LIMIT(win));
+ u32p_replace_bits(&tmp, cpu_addr_mb,
+ PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK);
+ u32p_replace_bits(&tmp, limit_addr_mb,
+ PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_LIMIT_MASK);
+ writel(tmp, pcie->base + PCIE_MEM_WIN0_BASE_LIMIT(win));
+
+ /* Write the cpu & limit addr upper bits */
+ high_addr_shift =
+ HWEIGHT32(PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK);
+
+ cpu_addr_mb_high = cpu_addr_mb >> high_addr_shift;
+ tmp = readl(pcie->base + PCIE_MEM_WIN0_BASE_HI(win));
+ u32p_replace_bits(&tmp, cpu_addr_mb_high,
+ PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI_BASE_MASK);
+ writel(tmp, pcie->base + PCIE_MEM_WIN0_BASE_HI(win));
+
+ limit_addr_mb_high = limit_addr_mb >> high_addr_shift;
+ tmp = readl(pcie->base + PCIE_MEM_WIN0_LIMIT_HI(win));
+ u32p_replace_bits(&tmp, limit_addr_mb_high,
+ PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI_LIMIT_MASK);
+ writel(tmp, pcie->base + PCIE_MEM_WIN0_LIMIT_HI(win));
+}
+
+static struct irq_chip brcm_msi_irq_chip = {
+ .name = "BRCM STB PCIe MSI",
+ .irq_ack = irq_chip_ack_parent,
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info brcm_msi_domain_info = {
+ /* Multi MSI is supported by the controller, but not by this driver */
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
+ .chip = &brcm_msi_irq_chip,
+};
+
+static void brcm_pcie_msi_isr(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned long status, virq;
+ struct brcm_msi *msi;
+ struct device *dev;
+ u32 bit;
+
+ chained_irq_enter(chip, desc);
+ msi = irq_desc_get_handler_data(desc);
+ dev = msi->dev;
+
+ status = readl(msi->base + PCIE_MSI_INTR2_STATUS);
+ for_each_set_bit(bit, &status, BRCM_INT_PCI_MSI_NR) {
+ virq = irq_find_mapping(msi->inner_domain, bit);
+ if (virq)
+ generic_handle_irq(virq);
+ else
+ dev_dbg(dev, "unexpected MSI\n");
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void brcm_msi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct brcm_msi *msi = irq_data_get_irq_chip_data(data);
+
+ msg->address_lo = lower_32_bits(msi->target_addr);
+ msg->address_hi = upper_32_bits(msi->target_addr);
+ msg->data = (0xffff & PCIE_MISC_MSI_DATA_CONFIG_VAL) | data->hwirq;
+}
+
+static int brcm_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static void brcm_msi_ack_irq(struct irq_data *data)
+{
+ struct brcm_msi *msi = irq_data_get_irq_chip_data(data);
+
+ writel(1 << data->hwirq, msi->base + PCIE_MSI_INTR2_CLR);
+}
+
+
+static struct irq_chip brcm_msi_bottom_irq_chip = {
+ .name = "BRCM STB MSI",
+ .irq_compose_msi_msg = brcm_msi_compose_msi_msg,
+ .irq_set_affinity = brcm_msi_set_affinity,
+ .irq_ack = brcm_msi_ack_irq,
+};
+
+static int brcm_msi_alloc(struct brcm_msi *msi)
+{
+ int hwirq;
+
+ mutex_lock(&msi->lock);
+ hwirq = bitmap_find_free_region(&msi->used, BRCM_INT_PCI_MSI_NR, 0);
+ mutex_unlock(&msi->lock);
+
+ return hwirq;
+}
+
+static void brcm_msi_free(struct brcm_msi *msi, unsigned long hwirq)
+{
+ mutex_lock(&msi->lock);
+ bitmap_release_region(&msi->used, hwirq, 0);
+ mutex_unlock(&msi->lock);
+}
+
+static int brcm_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct brcm_msi *msi = domain->host_data;
+ int hwirq;
+
+ hwirq = brcm_msi_alloc(msi);
+
+ if (hwirq < 0)
+ return hwirq;
+
+ irq_domain_set_info(domain, virq, (irq_hw_number_t)hwirq,
+ &brcm_msi_bottom_irq_chip, domain->host_data,
+ handle_edge_irq, NULL, NULL);
+ return 0;
+}
+
+static void brcm_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct brcm_msi *msi = irq_data_get_irq_chip_data(d);
+
+ brcm_msi_free(msi, d->hwirq);
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+ .alloc = brcm_irq_domain_alloc,
+ .free = brcm_irq_domain_free,
+};
+
+static int brcm_allocate_domains(struct brcm_msi *msi)
+{
+ struct fwnode_handle *fwnode = of_node_to_fwnode(msi->np);
+ struct device *dev = msi->dev;
+
+ msi->inner_domain = irq_domain_add_linear(NULL, BRCM_INT_PCI_MSI_NR,
+ &msi_domain_ops, msi);
+ if (!msi->inner_domain) {
+ dev_err(dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ msi->msi_domain = pci_msi_create_irq_domain(fwnode,
+ &brcm_msi_domain_info,
+ msi->inner_domain);
+ if (!msi->msi_domain) {
+ dev_err(dev, "failed to create MSI domain\n");
+ irq_domain_remove(msi->inner_domain);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void brcm_free_domains(struct brcm_msi *msi)
+{
+ irq_domain_remove(msi->msi_domain);
+ irq_domain_remove(msi->inner_domain);
+}
+
+static void brcm_msi_remove(struct brcm_pcie *pcie)
+{
+ struct brcm_msi *msi = pcie->msi;
+
+ if (!msi)
+ return;
+ irq_set_chained_handler(msi->irq, NULL);
+ irq_set_handler_data(msi->irq, NULL);
+ brcm_free_domains(msi);
+}
+
+static void brcm_msi_set_regs(struct brcm_msi *msi)
+{
+ writel(0xffffffff, msi->base + PCIE_MSI_INTR2_MASK_CLR);
+
+ /*
+ * The 0 bit of PCIE_MISC_MSI_BAR_CONFIG_LO is repurposed to MSI
+ * enable, which we set to 1.
+ */
+ writel(lower_32_bits(msi->target_addr) | 0x1,
+ msi->base + PCIE_MISC_MSI_BAR_CONFIG_LO);
+ writel(upper_32_bits(msi->target_addr),
+ msi->base + PCIE_MISC_MSI_BAR_CONFIG_HI);
+
+ writel(PCIE_MISC_MSI_DATA_CONFIG_VAL,
+ msi->base + PCIE_MISC_MSI_DATA_CONFIG);
+}
+
+static int brcm_pcie_enable_msi(struct brcm_pcie *pcie)
+{
+ struct brcm_msi *msi;
+ int irq, ret;
+ struct device *dev = pcie->dev;
+
+ irq = irq_of_parse_and_map(dev->of_node, 1);
+ if (irq <= 0) {
+ dev_err(dev, "cannot map MSI interrupt\n");
+ return -ENODEV;
+ }
+
+ msi = devm_kzalloc(dev, sizeof(struct brcm_msi), GFP_KERNEL);
+ if (!msi)
+ return -ENOMEM;
+
+ mutex_init(&msi->lock);
+ msi->dev = dev;
+ msi->base = pcie->base;
+ msi->np = pcie->np;
+ msi->target_addr = pcie->msi_target_addr;
+ msi->irq = irq;
+
+ ret = brcm_allocate_domains(msi);
+ if (ret)
+ return ret;
+
+ irq_set_chained_handler_and_data(msi->irq, brcm_pcie_msi_isr, msi);
+
+ brcm_msi_set_regs(msi);
+ pcie->msi = msi;
+
+ return 0;
+}
+
+/* The controller is capable of serving in both RC and EP roles */
+static bool brcm_pcie_rc_mode(struct brcm_pcie *pcie)
+{
+ void __iomem *base = pcie->base;
+ u32 val = readl(base + PCIE_MISC_PCIE_STATUS);
+
+ return !!FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_PORT_MASK, val);
+}
+
+static bool brcm_pcie_link_up(struct brcm_pcie *pcie)
+{
+ u32 val = readl(pcie->base + PCIE_MISC_PCIE_STATUS);
+ u32 dla = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_DL_ACTIVE_MASK, val);
+ u32 plu = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_PHYLINKUP_MASK, val);
+
+ return dla && plu;
+}
+
+/* Configuration space read/write support */
+static inline int brcm_pcie_cfg_index(int busnr, int devfn, int reg)
+{
+ return ((PCI_SLOT(devfn) & 0x1f) << PCIE_EXT_SLOT_SHIFT)
+ | ((PCI_FUNC(devfn) & 0x07) << PCIE_EXT_FUNC_SHIFT)
+ | (busnr << PCIE_EXT_BUSNUM_SHIFT)
+ | (reg & ~3);
+}
+
+static void __iomem *brcm_pcie_map_conf(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ struct brcm_pcie *pcie = bus->sysdata;
+ void __iomem *base = pcie->base;
+ int idx;
+
+ /* Accesses to the RC go right to the RC registers if slot==0 */
+ if (pci_is_root_bus(bus))
+ return PCI_SLOT(devfn) ? NULL : base + where;
+
+ /* For devices, write to the config space index register */
+ idx = brcm_pcie_cfg_index(bus->number, devfn, 0);
+ writel(idx, pcie->base + PCIE_EXT_CFG_INDEX);
+ return base + PCIE_EXT_CFG_DATA + where;
+}
+
+static struct pci_ops brcm_pcie_ops = {
+ .map_bus = brcm_pcie_map_conf,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+static inline void brcm_pcie_bridge_sw_init_set(struct brcm_pcie *pcie, u32 val)
+{
+ u32 tmp;
+
+ tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1);
+ u32p_replace_bits(&tmp, val, PCIE_RGR1_SW_INIT_1_INIT_MASK);
+ writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1);
+}
+
+static inline void brcm_pcie_perst_set(struct brcm_pcie *pcie, u32 val)
+{
+ u32 tmp;
+
+ tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1);
+ u32p_replace_bits(&tmp, val, PCIE_RGR1_SW_INIT_1_PERST_MASK);
+ writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1);
+}
+
+static inline int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
+ u64 *rc_bar2_size,
+ u64 *rc_bar2_offset)
+{
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+ struct device *dev = pcie->dev;
+ struct resource_entry *entry;
+
+ entry = resource_list_first_type(&bridge->dma_ranges, IORESOURCE_MEM);
+ if (!entry)
+ return -ENODEV;
+
+
+ /*
+ * The controller expects the inbound window offset to be calculated as
+ * the difference between PCIe's address space and CPU's. The offset
+ * provided by the firmware is calculated the opposite way, so we
+ * negate it.
+ */
+ *rc_bar2_offset = -entry->offset;
+ *rc_bar2_size = 1ULL << fls64(entry->res->end - entry->res->start);
+
+ /*
+ * We validate the inbound memory view even though we should trust
+ * whatever the device-tree provides. This is because of an HW issue on
+ * early Raspberry Pi 4's revisions (bcm2711). It turns out its
+ * firmware has to dynamically edit dma-ranges due to a bug on the
+ * PCIe controller integration, which prohibits any access above the
+ * lower 3GB of memory. Given this, we decided to keep the dma-ranges
+ * in check, avoiding hard to debug device-tree related issues in the
+ * future:
+ *
+ * The PCIe host controller by design must set the inbound viewport to
+ * be a contiguous arrangement of all of the system's memory. In
+ * addition, its size mut be a power of two. To further complicate
+ * matters, the viewport must start on a pcie-address that is aligned
+ * on a multiple of its size. If a portion of the viewport does not
+ * represent system memory -- e.g. 3GB of memory requires a 4GB
+ * viewport -- we can map the outbound memory in or after 3GB and even
+ * though the viewport will overlap the outbound memory the controller
+ * will know to send outbound memory downstream and everything else
+ * upstream.
+ *
+ * For example:
+ *
+ * - The best-case scenario, memory up to 3GB, is to place the inbound
+ * region in the first 4GB of pcie-space, as some legacy devices can
+ * only address 32bits. We would also like to put the MSI under 4GB
+ * as well, since some devices require a 32bit MSI target address.
+ *
+ * - If the system memory is 4GB or larger we cannot start the inbound
+ * region at location 0 (since we have to allow some space for
+ * outbound memory @ 3GB). So instead it will start at the 1x
+ * multiple of its size
+ */
+ if (!*rc_bar2_size || *rc_bar2_offset % *rc_bar2_size ||
+ (*rc_bar2_offset < SZ_4G && *rc_bar2_offset > SZ_2G)) {
+ dev_err(dev, "Invalid rc_bar2_offset/size: size 0x%llx, off 0x%llx\n",
+ *rc_bar2_size, *rc_bar2_offset);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int brcm_pcie_setup(struct brcm_pcie *pcie)
+{
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+ u64 rc_bar2_offset, rc_bar2_size;
+ void __iomem *base = pcie->base;
+ struct device *dev = pcie->dev;
+ struct resource_entry *entry;
+ unsigned int scb_size_val;
+ bool ssc_good = false;
+ struct resource *res;
+ int num_out_wins = 0;
+ u16 nlw, cls, lnksta;
+ int i, ret;
+ u32 tmp;
+
+ /* Reset the bridge */
+ brcm_pcie_bridge_sw_init_set(pcie, 1);
+
+ usleep_range(100, 200);
+
+ /* Take the bridge out of reset */
+ brcm_pcie_bridge_sw_init_set(pcie, 0);
+
+ tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ tmp &= ~PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
+ writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ /* Wait for SerDes to be stable */
+ usleep_range(100, 200);
+
+ /* Set SCB_MAX_BURST_SIZE, CFG_READ_UR_MODE, SCB_ACCESS_EN */
+ u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK);
+ u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK);
+ u32p_replace_bits(&tmp, PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_128,
+ PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK);
+ writel(tmp, base + PCIE_MISC_MISC_CTRL);
+
+ ret = brcm_pcie_get_rc_bar2_size_and_offset(pcie, &rc_bar2_size,
+ &rc_bar2_offset);
+ if (ret)
+ return ret;
+
+ tmp = lower_32_bits(rc_bar2_offset);
+ u32p_replace_bits(&tmp, brcm_pcie_encode_ibar_size(rc_bar2_size),
+ PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK);
+ writel(tmp, base + PCIE_MISC_RC_BAR2_CONFIG_LO);
+ writel(upper_32_bits(rc_bar2_offset),
+ base + PCIE_MISC_RC_BAR2_CONFIG_HI);
+
+ scb_size_val = rc_bar2_size ?
+ ilog2(rc_bar2_size) - 15 : 0xf; /* 0xf is 1GB */
+ tmp = readl(base + PCIE_MISC_MISC_CTRL);
+ u32p_replace_bits(&tmp, scb_size_val,
+ PCIE_MISC_MISC_CTRL_SCB0_SIZE_MASK);
+ writel(tmp, base + PCIE_MISC_MISC_CTRL);
+
+ /*
+ * We ideally want the MSI target address to be located in the 32bit
+ * addressable memory area. Some devices might depend on it. This is
+ * possible either when the inbound window is located above the lower
+ * 4GB or when the inbound area is smaller than 4GB (taking into
+ * account the rounding-up we're forced to perform).
+ */
+ if (rc_bar2_offset >= SZ_4G || (rc_bar2_size + rc_bar2_offset) < SZ_4G)
+ pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_LT_4GB;
+ else
+ pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_GT_4GB;
+
+ /* disable the PCIe->GISB memory window (RC_BAR1) */
+ tmp = readl(base + PCIE_MISC_RC_BAR1_CONFIG_LO);
+ tmp &= ~PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK;
+ writel(tmp, base + PCIE_MISC_RC_BAR1_CONFIG_LO);
+
+ /* disable the PCIe->SCB memory window (RC_BAR3) */
+ tmp = readl(base + PCIE_MISC_RC_BAR3_CONFIG_LO);
+ tmp &= ~PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK;
+ writel(tmp, base + PCIE_MISC_RC_BAR3_CONFIG_LO);
+
+ /* Mask all interrupts since we are not handling any yet */
+ writel(0xffffffff, pcie->base + PCIE_MSI_INTR2_MASK_SET);
+
+ /* clear any interrupts we find on boot */
+ writel(0xffffffff, pcie->base + PCIE_MSI_INTR2_CLR);
+
+ if (pcie->gen)
+ brcm_pcie_set_gen(pcie, pcie->gen);
+
+ /* Unassert the fundamental reset */
+ brcm_pcie_perst_set(pcie, 0);
+
+ /*
+ * Give the RC/EP time to wake up, before trying to configure RC.
+ * Intermittently check status for link-up, up to a total of 100ms.
+ */
+ for (i = 0; i < 100 && !brcm_pcie_link_up(pcie); i += 5)
+ msleep(5);
+
+ if (!brcm_pcie_link_up(pcie)) {
+ dev_err(dev, "link down\n");
+ return -ENODEV;
+ }
+
+ if (!brcm_pcie_rc_mode(pcie)) {
+ dev_err(dev, "PCIe misconfigured; is in EP mode\n");
+ return -EINVAL;
+ }
+
+ resource_list_for_each_entry(entry, &bridge->windows) {
+ res = entry->res;
+
+ if (resource_type(res) != IORESOURCE_MEM)
+ continue;
+
+ if (num_out_wins >= BRCM_NUM_PCIE_OUT_WINS) {
+ dev_err(pcie->dev, "too many outbound wins\n");
+ return -EINVAL;
+ }
+
+ brcm_pcie_set_outbound_win(pcie, num_out_wins, res->start,
+ res->start - entry->offset,
+ resource_size(res));
+ num_out_wins++;
+ }
+
+ /*
+ * For config space accesses on the RC, show the right class for
+ * a PCIe-PCIe bridge (the default setting is to be EP mode).
+ */
+ tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3);
+ u32p_replace_bits(&tmp, 0x060400,
+ PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK);
+ writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3);
+
+ if (pcie->ssc) {
+ ret = brcm_pcie_set_ssc(pcie);
+ if (ret == 0)
+ ssc_good = true;
+ else
+ dev_err(dev, "failed attempt to enter ssc mode\n");
+ }
+
+ lnksta = readw(base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKSTA);
+ cls = FIELD_GET(PCI_EXP_LNKSTA_CLS, lnksta);
+ nlw = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
+ dev_info(dev, "link up, %s x%u %s\n",
+ PCIE_SPEED2STR(cls + PCI_SPEED_133MHz_PCIX_533),
+ nlw, ssc_good ? "(SSC)" : "(!SSC)");
+
+ /* PCIe->SCB endian mode for BAR */
+ tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
+ u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN,
+ PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK);
+ writel(tmp, base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
+
+ /*
+ * Refclk from RC should be gated with CLKREQ# input when ASPM L0s,L1
+ * is enabled => setting the CLKREQ_DEBUG_ENABLE field to 1.
+ */
+ tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ tmp |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK;
+ writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+
+ return 0;
+}
+
+/* L23 is a low-power PCIe link state */
+static void brcm_pcie_enter_l23(struct brcm_pcie *pcie)
+{
+ void __iomem *base = pcie->base;
+ int l23, i;
+ u32 tmp;
+
+ /* Assert request for L23 */
+ tmp = readl(base + PCIE_MISC_PCIE_CTRL);
+ u32p_replace_bits(&tmp, 1, PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK);
+ writel(tmp, base + PCIE_MISC_PCIE_CTRL);
+
+ /* Wait up to 36 msec for L23 */
+ tmp = readl(base + PCIE_MISC_PCIE_STATUS);
+ l23 = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK, tmp);
+ for (i = 0; i < 15 && !l23; i++) {
+ usleep_range(2000, 2400);
+ tmp = readl(base + PCIE_MISC_PCIE_STATUS);
+ l23 = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK,
+ tmp);
+ }
+
+ if (!l23)
+ dev_err(pcie->dev, "failed to enter low-power link state\n");
+}
+
+static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
+{
+ void __iomem *base = pcie->base;
+ int tmp;
+
+ if (brcm_pcie_link_up(pcie))
+ brcm_pcie_enter_l23(pcie);
+ /* Assert fundamental reset */
+ brcm_pcie_perst_set(pcie, 1);
+
+ /* Deassert request for L23 in case it was asserted */
+ tmp = readl(base + PCIE_MISC_PCIE_CTRL);
+ u32p_replace_bits(&tmp, 0, PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK);
+ writel(tmp, base + PCIE_MISC_PCIE_CTRL);
+
+ /* Turn off SerDes */
+ tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ u32p_replace_bits(&tmp, 1, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
+ writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+
+ /* Shutdown PCIe bridge */
+ brcm_pcie_bridge_sw_init_set(pcie, 1);
+}
+
+static void __brcm_pcie_remove(struct brcm_pcie *pcie)
+{
+ brcm_msi_remove(pcie);
+ brcm_pcie_turn_off(pcie);
+ clk_disable_unprepare(pcie->clk);
+ clk_put(pcie->clk);
+}
+
+static int brcm_pcie_remove(struct platform_device *pdev)
+{
+ struct brcm_pcie *pcie = platform_get_drvdata(pdev);
+
+ pci_stop_root_bus(pcie->root_bus);
+ pci_remove_root_bus(pcie->root_bus);
+ __brcm_pcie_remove(pcie);
+
+ return 0;
+}
+
+static int brcm_pcie_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node, *msi_np;
+ struct pci_host_bridge *bridge;
+ struct brcm_pcie *pcie;
+ struct pci_bus *child;
+ struct resource *res;
+ int ret;
+
+ bridge = devm_pci_alloc_host_bridge(&pdev->dev, sizeof(*pcie));
+ if (!bridge)
+ return -ENOMEM;
+
+ pcie = pci_host_bridge_priv(bridge);
+ pcie->dev = &pdev->dev;
+ pcie->np = np;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pcie->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pcie->base))
+ return PTR_ERR(pcie->base);
+
+ pcie->clk = devm_clk_get_optional(&pdev->dev, "sw_pcie");
+ if (IS_ERR(pcie->clk))
+ return PTR_ERR(pcie->clk);
+
+ ret = of_pci_get_max_link_speed(np);
+ pcie->gen = (ret < 0) ? 0 : ret;
+
+ pcie->ssc = of_property_read_bool(np, "brcm,enable-ssc");
+
+ ret = pci_parse_request_of_pci_ranges(pcie->dev, &bridge->windows,
+ &bridge->dma_ranges, NULL);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(pcie->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "could not enable clock\n");
+ return ret;
+ }
+
+ ret = brcm_pcie_setup(pcie);
+ if (ret)
+ goto fail;
+
+ msi_np = of_parse_phandle(pcie->np, "msi-parent", 0);
+ if (pci_msi_enabled() && msi_np == pcie->np) {
+ ret = brcm_pcie_enable_msi(pcie);
+ if (ret) {
+ dev_err(pcie->dev, "probe of internal MSI failed");
+ goto fail;
+ }
+ }
+
+ bridge->dev.parent = &pdev->dev;
+ bridge->busnr = 0;
+ bridge->ops = &brcm_pcie_ops;
+ bridge->sysdata = pcie;
+ bridge->map_irq = of_irq_parse_and_map_pci;
+ bridge->swizzle_irq = pci_common_swizzle;
+
+ ret = pci_scan_root_bus_bridge(bridge);
+ if (ret < 0) {
+ dev_err(pcie->dev, "Scanning root bridge failed\n");
+ goto fail;
+ }
+
+ pci_assign_unassigned_bus_resources(bridge->bus);
+ list_for_each_entry(child, &bridge->bus->children, node)
+ pcie_bus_configure_settings(child);
+ pci_bus_add_devices(bridge->bus);
+ platform_set_drvdata(pdev, pcie);
+ pcie->root_bus = bridge->bus;
+
+ return 0;
+fail:
+ __brcm_pcie_remove(pcie);
+ return ret;
+}
+
+static const struct of_device_id brcm_pcie_match[] = {
+ { .compatible = "brcm,bcm2711-pcie" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, brcm_pcie_match);
+
+static struct platform_driver brcm_pcie_driver = {
+ .probe = brcm_pcie_probe,
+ .remove = brcm_pcie_remove,
+ .driver = {
+ .name = "brcm-pcie",
+ .of_match_table = brcm_pcie_match,
+ },
+};
+module_platform_driver(brcm_pcie_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Broadcom STB PCIe RC driver");
+MODULE_AUTHOR("Broadcom");
diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
index 0a468c73bae3..8c7f875acf7f 100644
--- a/drivers/pci/controller/pcie-iproc.c
+++ b/drivers/pci/controller/pcie-iproc.c
@@ -1588,6 +1588,30 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802,
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804,
quirk_paxc_disable_msi_parsing);
+static void quirk_paxc_bridge(struct pci_dev *pdev)
+{
+ /*
+ * The PCI config space is shared with the PAXC root port and the first
+ * Ethernet device. So, we need to workaround this by telling the PCI
+ * code that the bridge is not an Ethernet device.
+ */
+ if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
+ pdev->class = PCI_CLASS_BRIDGE_PCI << 8;
+
+ /*
+ * MPSS is not being set properly (as it is currently 0). This is
+ * because that area of the PCI config space is hard coded to zero, and
+ * is not modifiable by firmware. Set this to 2 (e.g., 512 byte MPS)
+ * so that the MPS can be set to the real max value.
+ */
+ pdev->pcie_mpss = 2;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16cd, quirk_paxc_bridge);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, quirk_paxc_bridge);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd750, quirk_paxc_bridge);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802, quirk_paxc_bridge);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804, quirk_paxc_bridge);
+
MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>");
MODULE_DESCRIPTION("Broadcom iPROC PCIe common driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 212842263f55..dac91d60701d 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -98,9 +98,6 @@ struct vmd_dev {
struct irq_domain *irq_domain;
struct pci_bus *bus;
u8 busn_start;
-
- struct dma_map_ops dma_ops;
- struct dma_domain dma_domain;
};
static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus)
@@ -295,151 +292,6 @@ static struct msi_domain_info vmd_msi_domain_info = {
.chip = &vmd_msi_controller,
};
-/*
- * VMD replaces the requester ID with its own. DMA mappings for devices in a
- * VMD domain need to be mapped for the VMD, not the device requiring
- * the mapping.
- */
-static struct device *to_vmd_dev(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct vmd_dev *vmd = vmd_from_bus(pdev->bus);
-
- return &vmd->dev->dev;
-}
-
-static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr,
- gfp_t flag, unsigned long attrs)
-{
- return dma_alloc_attrs(to_vmd_dev(dev), size, addr, flag, attrs);
-}
-
-static void vmd_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t addr, unsigned long attrs)
-{
- return dma_free_attrs(to_vmd_dev(dev), size, vaddr, addr, attrs);
-}
-
-static int vmd_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t addr, size_t size,
- unsigned long attrs)
-{
- return dma_mmap_attrs(to_vmd_dev(dev), vma, cpu_addr, addr, size,
- attrs);
-}
-
-static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t addr, size_t size,
- unsigned long attrs)
-{
- return dma_get_sgtable_attrs(to_vmd_dev(dev), sgt, cpu_addr, addr, size,
- attrs);
-}
-
-static dma_addr_t vmd_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- return dma_map_page_attrs(to_vmd_dev(dev), page, offset, size, dir,
- attrs);
-}
-
-static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size,
- enum dma_data_direction dir, unsigned long attrs)
-{
- dma_unmap_page_attrs(to_vmd_dev(dev), addr, size, dir, attrs);
-}
-
-static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, unsigned long attrs)
-{
- return dma_map_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs);
-}
-
-static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, unsigned long attrs)
-{
- dma_unmap_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs);
-}
-
-static void vmd_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
- size_t size, enum dma_data_direction dir)
-{
- dma_sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir);
-}
-
-static void vmd_sync_single_for_device(struct device *dev, dma_addr_t addr,
- size_t size, enum dma_data_direction dir)
-{
- dma_sync_single_for_device(to_vmd_dev(dev), addr, size, dir);
-}
-
-static void vmd_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir)
-{
- dma_sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir);
-}
-
-static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir)
-{
- dma_sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir);
-}
-
-static int vmd_dma_supported(struct device *dev, u64 mask)
-{
- return dma_supported(to_vmd_dev(dev), mask);
-}
-
-static u64 vmd_get_required_mask(struct device *dev)
-{
- return dma_get_required_mask(to_vmd_dev(dev));
-}
-
-static void vmd_teardown_dma_ops(struct vmd_dev *vmd)
-{
- struct dma_domain *domain = &vmd->dma_domain;
-
- if (get_dma_ops(&vmd->dev->dev))
- del_dma_domain(domain);
-}
-
-#define ASSIGN_VMD_DMA_OPS(source, dest, fn) \
- do { \
- if (source->fn) \
- dest->fn = vmd_##fn; \
- } while (0)
-
-static void vmd_setup_dma_ops(struct vmd_dev *vmd)
-{
- const struct dma_map_ops *source = get_dma_ops(&vmd->dev->dev);
- struct dma_map_ops *dest = &vmd->dma_ops;
- struct dma_domain *domain = &vmd->dma_domain;
-
- domain->domain_nr = vmd->sysdata.domain;
- domain->dma_ops = dest;
-
- if (!source)
- return;
- ASSIGN_VMD_DMA_OPS(source, dest, alloc);
- ASSIGN_VMD_DMA_OPS(source, dest, free);
- ASSIGN_VMD_DMA_OPS(source, dest, mmap);
- ASSIGN_VMD_DMA_OPS(source, dest, get_sgtable);
- ASSIGN_VMD_DMA_OPS(source, dest, map_page);
- ASSIGN_VMD_DMA_OPS(source, dest, unmap_page);
- ASSIGN_VMD_DMA_OPS(source, dest, map_sg);
- ASSIGN_VMD_DMA_OPS(source, dest, unmap_sg);
- ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_cpu);
- ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_device);
- ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_cpu);
- ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device);
- ASSIGN_VMD_DMA_OPS(source, dest, dma_supported);
- ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask);
- add_dma_domain(domain);
-}
-#undef ASSIGN_VMD_DMA_OPS
-
static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus,
unsigned int devfn, int reg, int len)
{
@@ -679,7 +531,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
.parent = res,
};
- sd->vmd_domain = true;
+ sd->vmd_dev = vmd->dev;
sd->domain = vmd_find_free_domain();
if (sd->domain < 0)
return sd->domain;
@@ -709,7 +561,6 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
}
vmd_attach_resources(vmd);
- vmd_setup_dma_ops(vmd);
dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain);
pci_scan_child_bus(vmd->bus);
@@ -824,7 +675,6 @@ static void vmd_remove(struct pci_dev *dev)
pci_stop_root_bus(vmd->bus);
pci_remove_root_bus(vmd->bus);
vmd_cleanup_srcu(vmd);
- vmd_teardown_dma_ops(vmd);
vmd_detach_resources(vmd);
irq_domain_remove(vmd->irq_domain);
}
@@ -868,6 +718,10 @@ static const struct pci_device_id vmd_ids[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0),
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW |
VMD_FEAT_HAS_BUS_RESTRICTIONS,},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x467f),
+ .driver_data = VMD_FEAT_HAS_BUS_RESTRICTIONS,},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4c3d),
+ .driver_data = VMD_FEAT_HAS_BUS_RESTRICTIONS,},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
.driver_data = VMD_FEAT_HAS_BUS_RESTRICTIONS,},
{0,}
diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
index d7b2b47bc33e..04565162a449 100644
--- a/drivers/pci/hotplug/pnv_php.c
+++ b/drivers/pci/hotplug/pnv_php.c
@@ -18,6 +18,9 @@
#define DRIVER_AUTHOR "Gavin Shan, IBM Corporation"
#define DRIVER_DESC "PowerPC PowerNV PCI Hotplug Driver"
+#define SLOT_WARN(sl, x...) \
+ ((sl)->pdev ? pci_warn((sl)->pdev, x) : dev_warn(&(sl)->bus->dev, x))
+
struct pnv_php_event {
bool added;
struct pnv_php_slot *php_slot;
@@ -151,17 +154,11 @@ static void pnv_php_rmv_pdns(struct device_node *dn)
static void pnv_php_detach_device_nodes(struct device_node *parent)
{
struct device_node *dn;
- int refcount;
for_each_child_of_node(parent, dn) {
pnv_php_detach_device_nodes(dn);
of_node_put(dn);
- refcount = kref_read(&dn->kobj.kref);
- if (refcount != 1)
- pr_warn("Invalid refcount %d on <%pOF>\n",
- refcount, dn);
-
of_detach_node(dn);
}
}
@@ -271,7 +268,7 @@ static int pnv_php_add_devtree(struct pnv_php_slot *php_slot)
ret = pnv_pci_get_device_tree(php_slot->dn->phandle, fdt1, 0x10000);
if (ret) {
- pci_warn(php_slot->pdev, "Error %d getting FDT blob\n", ret);
+ SLOT_WARN(php_slot, "Error %d getting FDT blob\n", ret);
goto free_fdt1;
}
@@ -285,7 +282,7 @@ static int pnv_php_add_devtree(struct pnv_php_slot *php_slot)
dt = of_fdt_unflatten_tree(fdt, php_slot->dn, NULL);
if (!dt) {
ret = -EINVAL;
- pci_warn(php_slot->pdev, "Cannot unflatten FDT\n");
+ SLOT_WARN(php_slot, "Cannot unflatten FDT\n");
goto free_fdt;
}
@@ -295,15 +292,15 @@ static int pnv_php_add_devtree(struct pnv_php_slot *php_slot)
ret = pnv_php_populate_changeset(&php_slot->ocs, php_slot->dn);
if (ret) {
pnv_php_reverse_nodes(php_slot->dn);
- pci_warn(php_slot->pdev, "Error %d populating changeset\n",
- ret);
+ SLOT_WARN(php_slot, "Error %d populating changeset\n",
+ ret);
goto free_dt;
}
php_slot->dn->child = NULL;
ret = of_changeset_apply(&php_slot->ocs);
if (ret) {
- pci_warn(php_slot->pdev, "Error %d applying changeset\n", ret);
+ SLOT_WARN(php_slot, "Error %d applying changeset\n", ret);
goto destroy_changeset;
}
@@ -342,18 +339,19 @@ int pnv_php_set_slot_power_state(struct hotplug_slot *slot,
ret = pnv_pci_set_power_state(php_slot->id, state, &msg);
if (ret > 0) {
if (be64_to_cpu(msg.params[1]) != php_slot->dn->phandle ||
- be64_to_cpu(msg.params[2]) != state ||
- be64_to_cpu(msg.params[3]) != OPAL_SUCCESS) {
- pci_warn(php_slot->pdev, "Wrong msg (%lld, %lld, %lld)\n",
- be64_to_cpu(msg.params[1]),
- be64_to_cpu(msg.params[2]),
- be64_to_cpu(msg.params[3]));
+ be64_to_cpu(msg.params[2]) != state) {
+ SLOT_WARN(php_slot, "Wrong msg (%lld, %lld, %lld)\n",
+ be64_to_cpu(msg.params[1]),
+ be64_to_cpu(msg.params[2]),
+ be64_to_cpu(msg.params[3]));
return -ENOMSG;
}
+ if (be64_to_cpu(msg.params[3]) != OPAL_SUCCESS) {
+ ret = -ENODEV;
+ goto error;
+ }
} else if (ret < 0) {
- pci_warn(php_slot->pdev, "Error %d powering %s\n",
- ret, (state == OPAL_PCI_SLOT_POWER_ON) ? "on" : "off");
- return ret;
+ goto error;
}
if (state == OPAL_PCI_SLOT_POWER_OFF || state == OPAL_PCI_SLOT_OFFLINE)
@@ -362,6 +360,11 @@ int pnv_php_set_slot_power_state(struct hotplug_slot *slot,
ret = pnv_php_add_devtree(php_slot);
return ret;
+
+error:
+ SLOT_WARN(php_slot, "Error %d powering %s\n",
+ ret, (state == OPAL_PCI_SLOT_POWER_ON) ? "on" : "off");
+ return ret;
}
EXPORT_SYMBOL_GPL(pnv_php_set_slot_power_state);
@@ -378,8 +381,8 @@ static int pnv_php_get_power_state(struct hotplug_slot *slot, u8 *state)
*/
ret = pnv_pci_get_power_state(php_slot->id, &power_state);
if (ret) {
- pci_warn(php_slot->pdev, "Error %d getting power status\n",
- ret);
+ SLOT_WARN(php_slot, "Error %d getting power status\n",
+ ret);
} else {
*state = power_state;
}
@@ -402,7 +405,7 @@ static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state)
*state = presence;
ret = 0;
} else {
- pci_warn(php_slot->pdev, "Error %d getting presence\n", ret);
+ SLOT_WARN(php_slot, "Error %d getting presence\n", ret);
}
return ret;
@@ -566,7 +569,13 @@ static int pnv_php_disable_slot(struct hotplug_slot *slot)
struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
int ret;
- if (php_slot->state != PNV_PHP_STATE_POPULATED)
+ /*
+ * Allow to disable a slot already in the registered state to
+ * cover cases where the slot couldn't be enabled and never
+ * reached the populated state
+ */
+ if (php_slot->state != PNV_PHP_STATE_POPULATED &&
+ php_slot->state != PNV_PHP_STATE_REGISTERED)
return 0;
/* Remove all devices behind the slot */
@@ -675,7 +684,7 @@ static int pnv_php_register_slot(struct pnv_php_slot *php_slot)
ret = pci_hp_register(&php_slot->slot, php_slot->bus,
php_slot->slot_no, php_slot->name);
if (ret) {
- pci_warn(php_slot->pdev, "Error %d registering slot\n", ret);
+ SLOT_WARN(php_slot, "Error %d registering slot\n", ret);
return ret;
}
@@ -728,7 +737,7 @@ static int pnv_php_enable_msix(struct pnv_php_slot *php_slot)
/* Enable MSIx */
ret = pci_enable_msix_exact(pdev, &entry, 1);
if (ret) {
- pci_warn(pdev, "Error %d enabling MSIx\n", ret);
+ SLOT_WARN(php_slot, "Error %d enabling MSIx\n", ret);
return ret;
}
@@ -778,8 +787,9 @@ static irqreturn_t pnv_php_interrupt(int irq, void *data)
(sts & PCI_EXP_SLTSTA_PDC)) {
ret = pnv_pci_get_presence_state(php_slot->id, &presence);
if (ret) {
- pci_warn(pdev, "PCI slot [%s] error %d getting presence (0x%04x), to retry the operation.\n",
- php_slot->name, ret, sts);
+ SLOT_WARN(php_slot,
+ "PCI slot [%s] error %d getting presence (0x%04x), to retry the operation.\n",
+ php_slot->name, ret, sts);
return IRQ_HANDLED;
}
@@ -809,8 +819,9 @@ static irqreturn_t pnv_php_interrupt(int irq, void *data)
*/
event = kzalloc(sizeof(*event), GFP_ATOMIC);
if (!event) {
- pci_warn(pdev, "PCI slot [%s] missed hotplug event 0x%04x\n",
- php_slot->name, sts);
+ SLOT_WARN(php_slot,
+ "PCI slot [%s] missed hotplug event 0x%04x\n",
+ php_slot->name, sts);
return IRQ_HANDLED;
}
@@ -834,7 +845,7 @@ static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq)
/* Allocate workqueue */
php_slot->wq = alloc_workqueue("pciehp-%s", 0, 0, php_slot->name);
if (!php_slot->wq) {
- pci_warn(pdev, "Cannot alloc workqueue\n");
+ SLOT_WARN(php_slot, "Cannot alloc workqueue\n");
pnv_php_disable_irq(php_slot, true);
return;
}
@@ -858,7 +869,7 @@ static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq)
php_slot->name, php_slot);
if (ret) {
pnv_php_disable_irq(php_slot, true);
- pci_warn(pdev, "Error %d enabling IRQ %d\n", ret, irq);
+ SLOT_WARN(php_slot, "Error %d enabling IRQ %d\n", ret, irq);
return;
}
@@ -894,7 +905,7 @@ static void pnv_php_enable_irq(struct pnv_php_slot *php_slot)
ret = pci_enable_device(pdev);
if (ret) {
- pci_warn(pdev, "Error %d enabling device\n", ret);
+ SLOT_WARN(php_slot, "Error %d enabling device\n", ret);
return;
}
@@ -1009,6 +1020,8 @@ static int __init pnv_php_init(void)
for_each_compatible_node(dn, NULL, "ibm,ioda3-phb")
pnv_php_register(dn);
+ for_each_compatible_node(dn, NULL, "ibm,ioda2-npu2-opencapi-phb")
+ pnv_php_register_one(dn); /* slot directly under the PHB */
return 0;
}
@@ -1021,6 +1034,9 @@ static void __exit pnv_php_exit(void)
for_each_compatible_node(dn, NULL, "ibm,ioda3-phb")
pnv_php_unregister(dn);
+
+ for_each_compatible_node(dn, NULL, "ibm,ioda2-npu2-opencapi-phb")
+ pnv_php_unregister_one(dn); /* slot directly under the PHB */
}
module_init(pnv_php_init);
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 1e88fd427757..4d1f392b05f9 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -186,10 +186,10 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id)
sprintf(buf, "virtfn%u", id);
rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
if (rc)
- goto failed2;
+ goto failed1;
rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn");
if (rc)
- goto failed3;
+ goto failed2;
kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
@@ -197,11 +197,10 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id)
return 0;
-failed3:
- sysfs_remove_link(&dev->dev.kobj, buf);
failed2:
- pci_stop_and_remove_bus_device(virtfn);
+ sysfs_remove_link(&dev->dev.kobj, buf);
failed1:
+ pci_stop_and_remove_bus_device(virtfn);
pci_dev_put(dev);
failed0:
virtfn_remove_bus(dev->bus, bus);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index c7709e49f0e4..6b43a5455c7a 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -688,7 +688,7 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries)
table_offset &= PCI_MSIX_TABLE_OFFSET;
phys_addr = pci_resource_start(dev, bir) + table_offset;
- return ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
+ return ioremap(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
}
static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 0608aae72ccc..9a8a38384121 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -289,6 +289,9 @@ static const struct pci_p2pdma_whitelist_entry {
/* Intel Xeon E7 v3/Xeon E5 v3/Core i7 */
{PCI_VENDOR_ID_INTEL, 0x2f00, REQ_SAME_HOST_BRIDGE},
{PCI_VENDOR_ID_INTEL, 0x2f01, REQ_SAME_HOST_BRIDGE},
+ /* Intel SkyLake-E */
+ {PCI_VENDOR_ID_INTEL, 0x2030, 0},
+ {PCI_VENDOR_ID_INTEL, 0x2020, 0},
{}
};
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e87196cc1a7f..d828ca835a98 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -131,6 +131,7 @@ bool pci_ats_disabled(void)
{
return pcie_ats_disabled;
}
+EXPORT_SYMBOL_GPL(pci_ats_disabled);
/* Disable bridge_d3 for all PCIe ports */
static bool pci_bridge_d3_disable;
@@ -184,7 +185,7 @@ void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
pci_warn(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
return NULL;
}
- return ioremap_nocache(res->start, resource_size(res));
+ return ioremap(res->start, resource_size(res));
}
EXPORT_SYMBOL_GPL(pci_ioremap_bar);
@@ -1372,8 +1373,11 @@ int pci_save_state(struct pci_dev *dev)
{
int i;
/* XXX: 100% dword access ok here? */
- for (i = 0; i < 16; i++)
+ for (i = 0; i < 16; i++) {
pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
+ pci_dbg(dev, "saving config space at offset %#x (reading %#x)\n",
+ i * 4, dev->saved_config_space[i]);
+ }
dev->state_saved = true;
i = pci_save_pcie_state(dev);
@@ -5998,7 +6002,8 @@ EXPORT_SYMBOL_GPL(pci_pr3_present);
/**
* pci_add_dma_alias - Add a DMA devfn alias for a device
* @dev: the PCI device for which alias is added
- * @devfn: alias slot and function
+ * @devfn_from: alias slot and function
+ * @nr_devfns: number of subsequent devfns to alias
*
* This helper encodes an 8-bit devfn as a bit number in dma_alias_mask
* which is used to program permissible bus-devfn source addresses for DMA
@@ -6014,18 +6019,29 @@ EXPORT_SYMBOL_GPL(pci_pr3_present);
* cannot be left as a userspace activity). DMA aliases should therefore
* be configured via quirks, such as the PCI fixup header quirk.
*/
-void pci_add_dma_alias(struct pci_dev *dev, u8 devfn)
+void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns)
{
+ int devfn_to;
+
+ nr_devfns = min(nr_devfns, (unsigned) MAX_NR_DEVFNS - devfn_from);
+ devfn_to = devfn_from + nr_devfns - 1;
+
if (!dev->dma_alias_mask)
- dev->dma_alias_mask = bitmap_zalloc(U8_MAX, GFP_KERNEL);
+ dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
if (!dev->dma_alias_mask) {
pci_warn(dev, "Unable to allocate DMA alias mask\n");
return;
}
- set_bit(devfn, dev->dma_alias_mask);
- pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
- PCI_SLOT(devfn), PCI_FUNC(devfn));
+ bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
+
+ if (nr_devfns == 1)
+ pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
+ PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
+ else if (nr_devfns > 1)
+ pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
+ PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
+ PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
}
bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
@@ -6033,7 +6049,9 @@ bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
return (dev1->dma_alias_mask &&
test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
(dev2->dma_alias_mask &&
- test_bit(dev1->devfn, dev2->dma_alias_mask));
+ test_bit(dev1->devfn, dev2->dma_alias_mask)) ||
+ pci_real_dma_dev(dev1) == dev2 ||
+ pci_real_dma_dev(dev2) == dev1;
}
bool pci_device_is_present(struct pci_dev *pdev)
@@ -6057,6 +6075,21 @@ void pci_ignore_hotplug(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
+/**
+ * pci_real_dma_dev - Get PCI DMA device for PCI device
+ * @dev: the PCI device that may have a PCI DMA alias
+ *
+ * Permits the platform to provide architecture-specific functionality to
+ * devices needing to alias DMA to another PCI device on another PCI bus. If
+ * the PCI device is on the same bus, it is recommended to use
+ * pci_add_dma_alias(). This is the default implementation. Architecture
+ * implementations can override this.
+ */
+struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev)
+{
+ return dev;
+}
+
resource_size_t __weak pcibios_default_alignment(void)
{
return 0;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index a0a53bd05a0b..6394e7746fb5 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -4,6 +4,9 @@
#include <linux/pci.h>
+/* Number of possible devfns: 0.0 to 1f.7 inclusive */
+#define MAX_NR_DEVFNS 256
+
#define PCI_FIND_CAP_TTL 48
#define PCI_VSEC_ID_INTEL_TBT 0x1234 /* Thunderbolt */
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index 1ca86f2e0166..4a818b07a1af 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -1445,6 +1445,7 @@ static int aer_probe(struct pcie_device *dev)
return -ENOMEM;
rpc->rpd = port;
+ INIT_KFIFO(rpc->aer_fifo);
set_service_data(dev, rpc);
status = devm_request_threaded_irq(device, dev->irq, aer_irq, aer_isr,
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index b0e6048a9208..01dfc8bb7ca0 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -10,6 +10,8 @@
* Zhang Yanmin (yanmin.zhang@intel.com)
*/
+#define dev_fmt(fmt) "AER: " fmt
+
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -61,10 +63,12 @@ static int report_error_detected(struct pci_dev *dev,
* error callbacks of "any" device in the subtree, and will
* exit in the disconnected error state.
*/
- if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE)
+ if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
vote = PCI_ERS_RESULT_NO_AER_DRIVER;
- else
+ pci_info(dev, "can't recover (no error_detected callback)\n");
+ } else {
vote = PCI_ERS_RESULT_NONE;
+ }
} else {
err_handler = dev->driver->err_handler;
vote = err_handler->error_detected(dev, state);
@@ -233,12 +237,12 @@ void pcie_do_recovery(struct pci_dev *dev, enum pci_channel_state state,
pci_aer_clear_device_status(dev);
pci_cleanup_aer_uncorrect_error_status(dev);
- pci_info(dev, "AER: Device recovery successful\n");
+ pci_info(dev, "device recovery successful\n");
return;
failed:
pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT);
/* TODO: Should kernel panic here? */
- pci_info(dev, "AER: Device recovery failed\n");
+ pci_info(dev, "device recovery failed\n");
}
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 6ef74bf5013f..bd2b691fa7a3 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -306,19 +306,20 @@ static int proc_bus_pci_release(struct inode *inode, struct file *file)
}
#endif /* HAVE_PCI_MMAP */
-static const struct file_operations proc_bus_pci_operations = {
- .owner = THIS_MODULE,
- .llseek = proc_bus_pci_lseek,
- .read = proc_bus_pci_read,
- .write = proc_bus_pci_write,
- .unlocked_ioctl = proc_bus_pci_ioctl,
- .compat_ioctl = proc_bus_pci_ioctl,
+static const struct proc_ops proc_bus_pci_ops = {
+ .proc_lseek = proc_bus_pci_lseek,
+ .proc_read = proc_bus_pci_read,
+ .proc_write = proc_bus_pci_write,
+ .proc_ioctl = proc_bus_pci_ioctl,
+#ifdef CONFIG_COMPAT
+ .proc_compat_ioctl = proc_bus_pci_ioctl,
+#endif
#ifdef HAVE_PCI_MMAP
- .open = proc_bus_pci_open,
- .release = proc_bus_pci_release,
- .mmap = proc_bus_pci_mmap,
+ .proc_open = proc_bus_pci_open,
+ .proc_release = proc_bus_pci_release,
+ .proc_mmap = proc_bus_pci_mmap,
#ifdef HAVE_ARCH_PCI_GET_UNMAPPED_AREA
- .get_unmapped_area = get_pci_unmapped_area,
+ .proc_get_unmapped_area = get_pci_unmapped_area,
#endif /* HAVE_ARCH_PCI_GET_UNMAPPED_AREA */
#endif /* HAVE_PCI_MMAP */
};
@@ -424,7 +425,7 @@ int pci_proc_attach_device(struct pci_dev *dev)
sprintf(name, "%02x.%x", PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
e = proc_create_data(name, S_IFREG | S_IRUGO | S_IWUSR, bus->procdir,
- &proc_bus_pci_operations, dev);
+ &proc_bus_pci_ops, dev);
if (!e)
return -ENOMEM;
proc_set_size(e, dev->cfg_size);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 4937a088d7d8..29f473ebf20f 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1571,7 +1571,7 @@ static void asus_hides_smbus_lpc_ich6_suspend(struct pci_dev *dev)
pci_read_config_dword(dev, 0xF0, &rcba);
/* use bits 31:14, 16 kB aligned */
- asus_rcba_base = ioremap_nocache(rcba & 0xFFFFC000, 0x4000);
+ asus_rcba_base = ioremap(rcba & 0xFFFFC000, 0x4000);
if (asus_rcba_base == NULL)
return;
}
@@ -1871,19 +1871,40 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2609, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm);
+static void quirk_d3hot_delay(struct pci_dev *dev, unsigned int delay)
+{
+ if (dev->d3_delay >= delay)
+ return;
+
+ dev->d3_delay = delay;
+ pci_info(dev, "extending delay after power-on from D3hot to %d msec\n",
+ dev->d3_delay);
+}
+
static void quirk_radeon_pm(struct pci_dev *dev)
{
if (dev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
- dev->subsystem_device == 0x00e2) {
- if (dev->d3_delay < 20) {
- dev->d3_delay = 20;
- pci_info(dev, "extending delay after power-on from D3 to %d msec\n",
- dev->d3_delay);
- }
- }
+ dev->subsystem_device == 0x00e2)
+ quirk_d3hot_delay(dev, 20);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6741, quirk_radeon_pm);
+/*
+ * Ryzen5/7 XHCI controllers fail upon resume from runtime suspend or s2idle.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=205587
+ *
+ * The kernel attempts to transition these devices to D3cold, but that seems
+ * to be ineffective on the platforms in question; the PCI device appears to
+ * remain on in D3hot state. The D3hot-to-D0 transition then requires an
+ * extended delay in order to succeed.
+ */
+static void quirk_ryzen_xhci_d3hot(struct pci_dev *dev)
+{
+ quirk_d3hot_delay(dev, 20);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e0, quirk_ryzen_xhci_d3hot);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e1, quirk_ryzen_xhci_d3hot);
+
#ifdef CONFIG_X86_IO_APIC
static int dmi_disable_ioapicreroute(const struct dmi_system_id *d)
{
@@ -2381,32 +2402,6 @@ DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_TIGON3_5719,
quirk_brcm_5719_limit_mrrs);
-#ifdef CONFIG_PCIE_IPROC_PLATFORM
-static void quirk_paxc_bridge(struct pci_dev *pdev)
-{
- /*
- * The PCI config space is shared with the PAXC root port and the first
- * Ethernet device. So, we need to workaround this by telling the PCI
- * code that the bridge is not an Ethernet device.
- */
- if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
- pdev->class = PCI_CLASS_BRIDGE_PCI << 8;
-
- /*
- * MPSS is not being set properly (as it is currently 0). This is
- * because that area of the PCI config space is hard coded to zero, and
- * is not modifiable by firmware. Set this to 2 (e.g., 512 byte MPS)
- * so that the MPS can be set to the real max value.
- */
- pdev->pcie_mpss = 2;
-}
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16cd, quirk_paxc_bridge);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, quirk_paxc_bridge);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd750, quirk_paxc_bridge);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802, quirk_paxc_bridge);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804, quirk_paxc_bridge);
-#endif
-
/*
* Originally in EDAC sources for i82875P: Intel tells BIOS developers to
* hide device 6 which configures the overflow device access containing the
@@ -3932,7 +3927,7 @@ int pci_dev_specific_reset(struct pci_dev *dev, int probe)
static void quirk_dma_func0_alias(struct pci_dev *dev)
{
if (PCI_FUNC(dev->devfn) != 0)
- pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+ pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 0), 1);
}
/*
@@ -3946,7 +3941,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias);
static void quirk_dma_func1_alias(struct pci_dev *dev)
{
if (PCI_FUNC(dev->devfn) != 1)
- pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 1));
+ pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 1), 1);
}
/*
@@ -4031,7 +4026,7 @@ static void quirk_fixed_dma_alias(struct pci_dev *dev)
id = pci_match_id(fixed_dma_alias_tbl, dev);
if (id)
- pci_add_dma_alias(dev, id->driver_data);
+ pci_add_dma_alias(dev, id->driver_data, 1);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ADAPTEC2, 0x0285, quirk_fixed_dma_alias);
@@ -4072,9 +4067,9 @@ DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias);
*/
static void quirk_mic_x200_dma_alias(struct pci_dev *pdev)
{
- pci_add_dma_alias(pdev, PCI_DEVFN(0x10, 0x0));
- pci_add_dma_alias(pdev, PCI_DEVFN(0x11, 0x0));
- pci_add_dma_alias(pdev, PCI_DEVFN(0x12, 0x3));
+ pci_add_dma_alias(pdev, PCI_DEVFN(0x10, 0x0), 1);
+ pci_add_dma_alias(pdev, PCI_DEVFN(0x11, 0x0), 1);
+ pci_add_dma_alias(pdev, PCI_DEVFN(0x12, 0x3), 1);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2260, quirk_mic_x200_dma_alias);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2264, quirk_mic_x200_dma_alias);
@@ -4098,13 +4093,8 @@ static void quirk_pex_vca_alias(struct pci_dev *pdev)
const unsigned int num_pci_slots = 0x20;
unsigned int slot;
- for (slot = 0; slot < num_pci_slots; slot++) {
- pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x0));
- pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x1));
- pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x2));
- pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x3));
- pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x4));
- }
+ for (slot = 0; slot < num_pci_slots; slot++)
+ pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x0), 5);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2954, quirk_pex_vca_alias);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2955, quirk_pex_vca_alias);
@@ -4784,7 +4774,7 @@ static int pci_quirk_enable_intel_lpc_acs(struct pci_dev *dev)
if (!(rcba & INTEL_LPC_RCBA_ENABLE))
return -EINVAL;
- rcba_mem = ioremap_nocache(rcba & INTEL_LPC_RCBA_MASK,
+ rcba_mem = ioremap(rcba & INTEL_LPC_RCBA_MASK,
PAGE_ALIGN(INTEL_UPDCR_REG));
if (!rcba_mem)
return -ENOMEM;
@@ -5074,18 +5064,25 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
#ifdef CONFIG_PCI_ATS
/*
- * Some devices have a broken ATS implementation causing IOMMU stalls.
- * Don't use ATS for those devices.
+ * Some devices require additional driver setup to enable ATS. Don't use
+ * ATS for those devices as ATS will be enabled before the driver has had a
+ * chance to load and configure the device.
*/
-static void quirk_no_ats(struct pci_dev *pdev)
+static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
{
- pci_info(pdev, "disabling ATS (broken on this device)\n");
+ if (pdev->device == 0x7340 && pdev->revision != 0xc5)
+ return;
+
+ pci_info(pdev, "disabling ATS\n");
pdev->ats_cap = 0;
}
/* AMD Stoney platform GPU */
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats);
+/* AMD Iceland dGPU */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
+/* AMD Navi14 dGPU */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
#endif /* CONFIG_PCI_ATS */
/* Freescale PCIe doesn't support MSI in RC mode */
@@ -5332,7 +5329,7 @@ static void quirk_switchtec_ntb_dma_alias(struct pci_dev *pdev)
pci_dbg(pdev,
"Aliasing Partition %d Proxy ID %02x.%d\n",
pp, PCI_SLOT(devfn), PCI_FUNC(devfn));
- pci_add_dma_alias(pdev, devfn);
+ pci_add_dma_alias(pdev, devfn, 1);
}
}
@@ -5373,6 +5370,39 @@ SWITCHTEC_QUIRK(0x8573); /* PFXI 48XG3 */
SWITCHTEC_QUIRK(0x8574); /* PFXI 64XG3 */
SWITCHTEC_QUIRK(0x8575); /* PFXI 80XG3 */
SWITCHTEC_QUIRK(0x8576); /* PFXI 96XG3 */
+SWITCHTEC_QUIRK(0x4000); /* PFX 100XG4 */
+SWITCHTEC_QUIRK(0x4084); /* PFX 84XG4 */
+SWITCHTEC_QUIRK(0x4068); /* PFX 68XG4 */
+SWITCHTEC_QUIRK(0x4052); /* PFX 52XG4 */
+SWITCHTEC_QUIRK(0x4036); /* PFX 36XG4 */
+SWITCHTEC_QUIRK(0x4028); /* PFX 28XG4 */
+SWITCHTEC_QUIRK(0x4100); /* PSX 100XG4 */
+SWITCHTEC_QUIRK(0x4184); /* PSX 84XG4 */
+SWITCHTEC_QUIRK(0x4168); /* PSX 68XG4 */
+SWITCHTEC_QUIRK(0x4152); /* PSX 52XG4 */
+SWITCHTEC_QUIRK(0x4136); /* PSX 36XG4 */
+SWITCHTEC_QUIRK(0x4128); /* PSX 28XG4 */
+SWITCHTEC_QUIRK(0x4200); /* PAX 100XG4 */
+SWITCHTEC_QUIRK(0x4284); /* PAX 84XG4 */
+SWITCHTEC_QUIRK(0x4268); /* PAX 68XG4 */
+SWITCHTEC_QUIRK(0x4252); /* PAX 52XG4 */
+SWITCHTEC_QUIRK(0x4236); /* PAX 36XG4 */
+SWITCHTEC_QUIRK(0x4228); /* PAX 28XG4 */
+
+/*
+ * The PLX NTB uses devfn proxy IDs to move TLPs between NT endpoints.
+ * These IDs are used to forward responses to the originator on the other
+ * side of the NTB. Alias all possible IDs to the NTB to permit access when
+ * the IOMMU is turned on.
+ */
+static void quirk_plx_ntb_dma_alias(struct pci_dev *pdev)
+{
+ pci_info(pdev, "Setting PLX NTB proxy ID aliases\n");
+ /* PLX NTB may use all 256 devfns */
+ pci_add_dma_alias(pdev, 0, 256);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, 0x87b0, quirk_plx_ntb_dma_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, 0x87b1, quirk_plx_ntb_dma_alias);
/*
* On Lenovo Thinkpad P50 SKUs with a Nvidia Quadro M1000M, the BIOS does
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index bade14002fd8..2061672954ee 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -32,6 +32,12 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
struct pci_bus *bus;
int ret;
+ /*
+ * The device may have an explicit alias requester ID for DMA where the
+ * requester is on another PCI bus.
+ */
+ pdev = pci_real_dma_dev(pdev);
+
ret = fn(pdev, pci_dev_id(pdev), data);
if (ret)
return ret;
@@ -41,9 +47,9 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
* DMA, iterate over that too.
*/
if (unlikely(pdev->dma_alias_mask)) {
- u8 devfn;
+ unsigned int devfn;
- for_each_set_bit(devfn, pdev->dma_alias_mask, U8_MAX) {
+ for_each_set_bit(devfn, pdev->dma_alias_mask, MAX_NR_DEVFNS) {
ret = fn(pdev, PCI_DEVID(pdev->bus->number, devfn),
data);
if (ret)
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index f279826204eb..f2461bf9243d 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1803,12 +1803,18 @@ again:
/* Restore size and flags */
list_for_each_entry(fail_res, &fail_head, list) {
struct resource *res = fail_res->res;
+ int idx;
res->start = fail_res->start;
res->end = fail_res->end;
res->flags = fail_res->flags;
- if (fail_res->dev->subordinate)
- res->flags = 0;
+
+ if (pci_is_bridge(fail_res->dev)) {
+ idx = res - &fail_res->dev->resource[0];
+ if (idx >= PCI_BRIDGE_RESOURCES &&
+ idx <= PCI_BRIDGE_RESOURCE_END)
+ res->flags = 0;
+ }
}
free_list(&fail_head);
@@ -1832,56 +1838,72 @@ void __init pci_assign_unassigned_resources(void)
}
}
-static void extend_bridge_window(struct pci_dev *bridge, struct resource *res,
+static void adjust_bridge_window(struct pci_dev *bridge, struct resource *res,
struct list_head *add_list,
- resource_size_t available)
+ resource_size_t new_size)
{
- struct pci_dev_resource *dev_res;
+ resource_size_t add_size, size = resource_size(res);
if (res->parent)
return;
- if (resource_size(res) >= available)
+ if (!new_size)
return;
- dev_res = res_to_dev_res(add_list, res);
- if (!dev_res)
- return;
-
- /* Is there room to extend the window? */
- if (available - resource_size(res) <= dev_res->add_size)
- return;
+ if (new_size > size) {
+ add_size = new_size - size;
+ pci_dbg(bridge, "bridge window %pR extended by %pa\n", res,
+ &add_size);
+ } else if (new_size < size) {
+ add_size = size - new_size;
+ pci_dbg(bridge, "bridge window %pR shrunken by %pa\n", res,
+ &add_size);
+ }
- dev_res->add_size = available - resource_size(res);
- pci_dbg(bridge, "bridge window %pR extended by %pa\n", res,
- &dev_res->add_size);
+ res->end = res->start + new_size - 1;
+ remove_from_list(add_list, res);
}
static void pci_bus_distribute_available_resources(struct pci_bus *bus,
struct list_head *add_list,
- resource_size_t available_io,
- resource_size_t available_mmio,
- resource_size_t available_mmio_pref)
+ struct resource io,
+ struct resource mmio,
+ struct resource mmio_pref)
{
- resource_size_t remaining_io, remaining_mmio, remaining_mmio_pref;
unsigned int normal_bridges = 0, hotplug_bridges = 0;
struct resource *io_res, *mmio_res, *mmio_pref_res;
struct pci_dev *dev, *bridge = bus->self;
+ resource_size_t io_per_hp, mmio_per_hp, mmio_pref_per_hp, align;
io_res = &bridge->resource[PCI_BRIDGE_RESOURCES + 0];
mmio_res = &bridge->resource[PCI_BRIDGE_RESOURCES + 1];
mmio_pref_res = &bridge->resource[PCI_BRIDGE_RESOURCES + 2];
/*
- * Update additional resource list (add_list) to fill all the
- * extra resource space available for this port except the space
- * calculated in __pci_bus_size_bridges() which covers all the
- * devices currently connected to the port and below.
+ * The alignment of this bridge is yet to be considered, hence it must
+ * be done now before extending its bridge window.
+ */
+ align = pci_resource_alignment(bridge, io_res);
+ if (!io_res->parent && align)
+ io.start = min(ALIGN(io.start, align), io.end + 1);
+
+ align = pci_resource_alignment(bridge, mmio_res);
+ if (!mmio_res->parent && align)
+ mmio.start = min(ALIGN(mmio.start, align), mmio.end + 1);
+
+ align = pci_resource_alignment(bridge, mmio_pref_res);
+ if (!mmio_pref_res->parent && align)
+ mmio_pref.start = min(ALIGN(mmio_pref.start, align),
+ mmio_pref.end + 1);
+
+ /*
+ * Now that we have adjusted for alignment, update the bridge window
+ * resources to fill as much remaining resource space as possible.
*/
- extend_bridge_window(bridge, io_res, add_list, available_io);
- extend_bridge_window(bridge, mmio_res, add_list, available_mmio);
- extend_bridge_window(bridge, mmio_pref_res, add_list,
- available_mmio_pref);
+ adjust_bridge_window(bridge, io_res, add_list, resource_size(&io));
+ adjust_bridge_window(bridge, mmio_res, add_list, resource_size(&mmio));
+ adjust_bridge_window(bridge, mmio_pref_res, add_list,
+ resource_size(&mmio_pref));
/*
* Calculate how many hotplug bridges and normal bridges there
@@ -1902,11 +1924,9 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
*/
if (hotplug_bridges + normal_bridges == 1) {
dev = list_first_entry(&bus->devices, struct pci_dev, bus_list);
- if (dev->subordinate) {
+ if (dev->subordinate)
pci_bus_distribute_available_resources(dev->subordinate,
- add_list, available_io, available_mmio,
- available_mmio_pref);
- }
+ add_list, io, mmio, mmio_pref);
return;
}
@@ -1919,12 +1939,9 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
* extra space reduced by the minimal required space for the
* non-hotplug bridges.
*/
- remaining_io = available_io;
- remaining_mmio = available_mmio;
- remaining_mmio_pref = available_mmio_pref;
-
for_each_pci_bridge(dev, bus) {
- const struct resource *res;
+ resource_size_t used_size;
+ struct resource *res;
if (dev->is_hotplug_bridge)
continue;
@@ -1934,24 +1951,39 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
* bridge and devices below it occupy.
*/
res = &dev->resource[PCI_BRIDGE_RESOURCES + 0];
- if (!res->parent && available_io > resource_size(res))
- remaining_io -= resource_size(res);
+ align = pci_resource_alignment(dev, res);
+ align = align ? ALIGN(io.start, align) - io.start : 0;
+ used_size = align + resource_size(res);
+ if (!res->parent)
+ io.start = min(io.start + used_size, io.end + 1);
res = &dev->resource[PCI_BRIDGE_RESOURCES + 1];
- if (!res->parent && available_mmio > resource_size(res))
- remaining_mmio -= resource_size(res);
+ align = pci_resource_alignment(dev, res);
+ align = align ? ALIGN(mmio.start, align) - mmio.start : 0;
+ used_size = align + resource_size(res);
+ if (!res->parent)
+ mmio.start = min(mmio.start + used_size, mmio.end + 1);
res = &dev->resource[PCI_BRIDGE_RESOURCES + 2];
- if (!res->parent && available_mmio_pref > resource_size(res))
- remaining_mmio_pref -= resource_size(res);
+ align = pci_resource_alignment(dev, res);
+ align = align ? ALIGN(mmio_pref.start, align) -
+ mmio_pref.start : 0;
+ used_size = align + resource_size(res);
+ if (!res->parent)
+ mmio_pref.start = min(mmio_pref.start + used_size,
+ mmio_pref.end + 1);
}
+ io_per_hp = div64_ul(resource_size(&io), hotplug_bridges);
+ mmio_per_hp = div64_ul(resource_size(&mmio), hotplug_bridges);
+ mmio_pref_per_hp = div64_ul(resource_size(&mmio_pref),
+ hotplug_bridges);
+
/*
* Go over devices on this bus and distribute the remaining
* resource space between hotplug bridges.
*/
for_each_pci_bridge(dev, bus) {
- resource_size_t align, io, mmio, mmio_pref;
struct pci_bus *b;
b = dev->subordinate;
@@ -1963,42 +1995,31 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
* hotplug-capable downstream ports taking alignment into
* account.
*/
- align = pci_resource_alignment(bridge, io_res);
- io = div64_ul(available_io, hotplug_bridges);
- io = min(ALIGN(io, align), remaining_io);
- remaining_io -= io;
-
- align = pci_resource_alignment(bridge, mmio_res);
- mmio = div64_ul(available_mmio, hotplug_bridges);
- mmio = min(ALIGN(mmio, align), remaining_mmio);
- remaining_mmio -= mmio;
-
- align = pci_resource_alignment(bridge, mmio_pref_res);
- mmio_pref = div64_ul(available_mmio_pref, hotplug_bridges);
- mmio_pref = min(ALIGN(mmio_pref, align), remaining_mmio_pref);
- remaining_mmio_pref -= mmio_pref;
+ io.end = io.start + io_per_hp - 1;
+ mmio.end = mmio.start + mmio_per_hp - 1;
+ mmio_pref.end = mmio_pref.start + mmio_pref_per_hp - 1;
pci_bus_distribute_available_resources(b, add_list, io, mmio,
mmio_pref);
+
+ io.start += io_per_hp;
+ mmio.start += mmio_per_hp;
+ mmio_pref.start += mmio_pref_per_hp;
}
}
static void pci_bridge_distribute_available_resources(struct pci_dev *bridge,
struct list_head *add_list)
{
- resource_size_t available_io, available_mmio, available_mmio_pref;
- const struct resource *res;
+ struct resource available_io, available_mmio, available_mmio_pref;
if (!bridge->is_hotplug_bridge)
return;
/* Take the initial extra resources from the hotplug port */
- res = &bridge->resource[PCI_BRIDGE_RESOURCES + 0];
- available_io = resource_size(res);
- res = &bridge->resource[PCI_BRIDGE_RESOURCES + 1];
- available_mmio = resource_size(res);
- res = &bridge->resource[PCI_BRIDGE_RESOURCES + 2];
- available_mmio_pref = resource_size(res);
+ available_io = bridge->resource[PCI_BRIDGE_RESOURCES + 0];
+ available_mmio = bridge->resource[PCI_BRIDGE_RESOURCES + 1];
+ available_mmio_pref = bridge->resource[PCI_BRIDGE_RESOURCES + 2];
pci_bus_distribute_available_resources(bridge->subordinate,
add_list, available_io,
@@ -2055,12 +2076,18 @@ again:
/* Restore size and flags */
list_for_each_entry(fail_res, &fail_head, list) {
struct resource *res = fail_res->res;
+ int idx;
res->start = fail_res->start;
res->end = fail_res->end;
res->flags = fail_res->flags;
- if (fail_res->dev->subordinate)
- res->flags = 0;
+
+ if (pci_is_bridge(fail_res->dev)) {
+ idx = res - &fail_res->dev->resource[0];
+ if (idx >= PCI_BRIDGE_RESOURCES &&
+ idx <= PCI_BRIDGE_RESOURCE_END)
+ res->flags = 0;
+ }
}
free_list(&fail_head);
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 88091bbfe77f..a823b4b8ef8a 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -317,8 +317,15 @@ static ssize_t field ## _show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct switchtec_dev *stdev = to_stdev(dev); \
- return io_string_show(buf, &stdev->mmio_sys_info->field, \
- sizeof(stdev->mmio_sys_info->field)); \
+ struct sys_info_regs __iomem *si = stdev->mmio_sys_info; \
+ if (stdev->gen == SWITCHTEC_GEN3) \
+ return io_string_show(buf, &si->gen3.field, \
+ sizeof(si->gen3.field)); \
+ else if (stdev->gen == SWITCHTEC_GEN4) \
+ return io_string_show(buf, &si->gen4.field, \
+ sizeof(si->gen4.field)); \
+ else \
+ return -ENOTSUPP; \
} \
\
static DEVICE_ATTR_RO(field)
@@ -326,13 +333,31 @@ static DEVICE_ATTR_RO(field)
DEVICE_ATTR_SYS_INFO_STR(vendor_id);
DEVICE_ATTR_SYS_INFO_STR(product_id);
DEVICE_ATTR_SYS_INFO_STR(product_revision);
-DEVICE_ATTR_SYS_INFO_STR(component_vendor);
+
+static ssize_t component_vendor_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct switchtec_dev *stdev = to_stdev(dev);
+ struct sys_info_regs __iomem *si = stdev->mmio_sys_info;
+
+ /* component_vendor field not supported after gen3 */
+ if (stdev->gen != SWITCHTEC_GEN3)
+ return sprintf(buf, "none\n");
+
+ return io_string_show(buf, &si->gen3.component_vendor,
+ sizeof(si->gen3.component_vendor));
+}
+static DEVICE_ATTR_RO(component_vendor);
static ssize_t component_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct switchtec_dev *stdev = to_stdev(dev);
- int id = ioread16(&stdev->mmio_sys_info->component_id);
+ int id = ioread16(&stdev->mmio_sys_info->gen3.component_id);
+
+ /* component_id field not supported after gen3 */
+ if (stdev->gen != SWITCHTEC_GEN3)
+ return sprintf(buf, "none\n");
return sprintf(buf, "PM%04X\n", id);
}
@@ -342,7 +367,11 @@ static ssize_t component_revision_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct switchtec_dev *stdev = to_stdev(dev);
- int rev = ioread8(&stdev->mmio_sys_info->component_revision);
+ int rev = ioread8(&stdev->mmio_sys_info->gen3.component_revision);
+
+ /* component_revision field not supported after gen3 */
+ if (stdev->gen != SWITCHTEC_GEN3)
+ return sprintf(buf, "255\n");
return sprintf(buf, "%d\n", rev);
}
@@ -450,6 +479,12 @@ static ssize_t switchtec_dev_write(struct file *filp, const char __user *data,
rc = -EFAULT;
goto out;
}
+ if (((MRPC_CMD_ID(stuser->cmd) == MRPC_GAS_WRITE) ||
+ (MRPC_CMD_ID(stuser->cmd) == MRPC_GAS_READ)) &&
+ !capable(CAP_SYS_ADMIN)) {
+ rc = -EPERM;
+ goto out;
+ }
data += sizeof(stuser->cmd);
rc = copy_from_user(&stuser->data, data, size - sizeof(stuser->cmd));
@@ -568,8 +603,15 @@ static int ioctl_flash_info(struct switchtec_dev *stdev,
struct switchtec_ioctl_flash_info info = {0};
struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
- info.flash_length = ioread32(&fi->flash_length);
- info.num_partitions = SWITCHTEC_IOCTL_NUM_PARTITIONS;
+ if (stdev->gen == SWITCHTEC_GEN3) {
+ info.flash_length = ioread32(&fi->gen3.flash_length);
+ info.num_partitions = SWITCHTEC_NUM_PARTITIONS_GEN3;
+ } else if (stdev->gen == SWITCHTEC_GEN4) {
+ info.flash_length = ioread32(&fi->gen4.flash_length);
+ info.num_partitions = SWITCHTEC_NUM_PARTITIONS_GEN4;
+ } else {
+ return -ENOTSUPP;
+ }
if (copy_to_user(uinfo, &info, sizeof(info)))
return -EFAULT;
@@ -584,75 +626,200 @@ static void set_fw_info_part(struct switchtec_ioctl_flash_part_info *info,
info->length = ioread32(&pi->length);
}
-static int ioctl_flash_part_info(struct switchtec_dev *stdev,
- struct switchtec_ioctl_flash_part_info __user *uinfo)
+static int flash_part_info_gen3(struct switchtec_dev *stdev,
+ struct switchtec_ioctl_flash_part_info *info)
{
- struct switchtec_ioctl_flash_part_info info = {0};
- struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
- struct sys_info_regs __iomem *si = stdev->mmio_sys_info;
+ struct flash_info_regs_gen3 __iomem *fi =
+ &stdev->mmio_flash_info->gen3;
+ struct sys_info_regs_gen3 __iomem *si = &stdev->mmio_sys_info->gen3;
u32 active_addr = -1;
- if (copy_from_user(&info, uinfo, sizeof(info)))
- return -EFAULT;
-
- switch (info.flash_partition) {
+ switch (info->flash_partition) {
case SWITCHTEC_IOCTL_PART_CFG0:
active_addr = ioread32(&fi->active_cfg);
- set_fw_info_part(&info, &fi->cfg0);
- if (ioread16(&si->cfg_running) == SWITCHTEC_CFG0_RUNNING)
- info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
+ set_fw_info_part(info, &fi->cfg0);
+ if (ioread16(&si->cfg_running) == SWITCHTEC_GEN3_CFG0_RUNNING)
+ info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
break;
case SWITCHTEC_IOCTL_PART_CFG1:
active_addr = ioread32(&fi->active_cfg);
- set_fw_info_part(&info, &fi->cfg1);
- if (ioread16(&si->cfg_running) == SWITCHTEC_CFG1_RUNNING)
- info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
+ set_fw_info_part(info, &fi->cfg1);
+ if (ioread16(&si->cfg_running) == SWITCHTEC_GEN3_CFG1_RUNNING)
+ info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
break;
case SWITCHTEC_IOCTL_PART_IMG0:
active_addr = ioread32(&fi->active_img);
- set_fw_info_part(&info, &fi->img0);
- if (ioread16(&si->img_running) == SWITCHTEC_IMG0_RUNNING)
- info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
+ set_fw_info_part(info, &fi->img0);
+ if (ioread16(&si->img_running) == SWITCHTEC_GEN3_IMG0_RUNNING)
+ info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
break;
case SWITCHTEC_IOCTL_PART_IMG1:
active_addr = ioread32(&fi->active_img);
- set_fw_info_part(&info, &fi->img1);
- if (ioread16(&si->img_running) == SWITCHTEC_IMG1_RUNNING)
- info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
+ set_fw_info_part(info, &fi->img1);
+ if (ioread16(&si->img_running) == SWITCHTEC_GEN3_IMG1_RUNNING)
+ info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
+ break;
+ case SWITCHTEC_IOCTL_PART_NVLOG:
+ set_fw_info_part(info, &fi->nvlog);
+ break;
+ case SWITCHTEC_IOCTL_PART_VENDOR0:
+ set_fw_info_part(info, &fi->vendor[0]);
+ break;
+ case SWITCHTEC_IOCTL_PART_VENDOR1:
+ set_fw_info_part(info, &fi->vendor[1]);
+ break;
+ case SWITCHTEC_IOCTL_PART_VENDOR2:
+ set_fw_info_part(info, &fi->vendor[2]);
+ break;
+ case SWITCHTEC_IOCTL_PART_VENDOR3:
+ set_fw_info_part(info, &fi->vendor[3]);
+ break;
+ case SWITCHTEC_IOCTL_PART_VENDOR4:
+ set_fw_info_part(info, &fi->vendor[4]);
+ break;
+ case SWITCHTEC_IOCTL_PART_VENDOR5:
+ set_fw_info_part(info, &fi->vendor[5]);
+ break;
+ case SWITCHTEC_IOCTL_PART_VENDOR6:
+ set_fw_info_part(info, &fi->vendor[6]);
+ break;
+ case SWITCHTEC_IOCTL_PART_VENDOR7:
+ set_fw_info_part(info, &fi->vendor[7]);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (info->address == active_addr)
+ info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
+
+ return 0;
+}
+
+static int flash_part_info_gen4(struct switchtec_dev *stdev,
+ struct switchtec_ioctl_flash_part_info *info)
+{
+ struct flash_info_regs_gen4 __iomem *fi = &stdev->mmio_flash_info->gen4;
+ struct sys_info_regs_gen4 __iomem *si = &stdev->mmio_sys_info->gen4;
+ struct active_partition_info_gen4 __iomem *af = &fi->active_flag;
+
+ switch (info->flash_partition) {
+ case SWITCHTEC_IOCTL_PART_MAP_0:
+ set_fw_info_part(info, &fi->map0);
+ break;
+ case SWITCHTEC_IOCTL_PART_MAP_1:
+ set_fw_info_part(info, &fi->map1);
+ break;
+ case SWITCHTEC_IOCTL_PART_KEY_0:
+ set_fw_info_part(info, &fi->key0);
+ if (ioread8(&af->key) == SWITCHTEC_GEN4_KEY0_ACTIVE)
+ info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
+ if (ioread16(&si->key_running) == SWITCHTEC_GEN4_KEY0_RUNNING)
+ info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
+ break;
+ case SWITCHTEC_IOCTL_PART_KEY_1:
+ set_fw_info_part(info, &fi->key1);
+ if (ioread8(&af->key) == SWITCHTEC_GEN4_KEY1_ACTIVE)
+ info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
+ if (ioread16(&si->key_running) == SWITCHTEC_GEN4_KEY1_RUNNING)
+ info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
+ break;
+ case SWITCHTEC_IOCTL_PART_BL2_0:
+ set_fw_info_part(info, &fi->bl2_0);
+ if (ioread8(&af->bl2) == SWITCHTEC_GEN4_BL2_0_ACTIVE)
+ info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
+ if (ioread16(&si->bl2_running) == SWITCHTEC_GEN4_BL2_0_RUNNING)
+ info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
+ break;
+ case SWITCHTEC_IOCTL_PART_BL2_1:
+ set_fw_info_part(info, &fi->bl2_1);
+ if (ioread8(&af->bl2) == SWITCHTEC_GEN4_BL2_1_ACTIVE)
+ info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
+ if (ioread16(&si->bl2_running) == SWITCHTEC_GEN4_BL2_1_RUNNING)
+ info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
+ break;
+ case SWITCHTEC_IOCTL_PART_CFG0:
+ set_fw_info_part(info, &fi->cfg0);
+ if (ioread8(&af->cfg) == SWITCHTEC_GEN4_CFG0_ACTIVE)
+ info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
+ if (ioread16(&si->cfg_running) == SWITCHTEC_GEN4_CFG0_RUNNING)
+ info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
+ break;
+ case SWITCHTEC_IOCTL_PART_CFG1:
+ set_fw_info_part(info, &fi->cfg1);
+ if (ioread8(&af->cfg) == SWITCHTEC_GEN4_CFG1_ACTIVE)
+ info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
+ if (ioread16(&si->cfg_running) == SWITCHTEC_GEN4_CFG1_RUNNING)
+ info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
+ break;
+ case SWITCHTEC_IOCTL_PART_IMG0:
+ set_fw_info_part(info, &fi->img0);
+ if (ioread8(&af->img) == SWITCHTEC_GEN4_IMG0_ACTIVE)
+ info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
+ if (ioread16(&si->img_running) == SWITCHTEC_GEN4_IMG0_RUNNING)
+ info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
+ break;
+ case SWITCHTEC_IOCTL_PART_IMG1:
+ set_fw_info_part(info, &fi->img1);
+ if (ioread8(&af->img) == SWITCHTEC_GEN4_IMG1_ACTIVE)
+ info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
+ if (ioread16(&si->img_running) == SWITCHTEC_GEN4_IMG1_RUNNING)
+ info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
break;
case SWITCHTEC_IOCTL_PART_NVLOG:
- set_fw_info_part(&info, &fi->nvlog);
+ set_fw_info_part(info, &fi->nvlog);
break;
case SWITCHTEC_IOCTL_PART_VENDOR0:
- set_fw_info_part(&info, &fi->vendor[0]);
+ set_fw_info_part(info, &fi->vendor[0]);
break;
case SWITCHTEC_IOCTL_PART_VENDOR1:
- set_fw_info_part(&info, &fi->vendor[1]);
+ set_fw_info_part(info, &fi->vendor[1]);
break;
case SWITCHTEC_IOCTL_PART_VENDOR2:
- set_fw_info_part(&info, &fi->vendor[2]);
+ set_fw_info_part(info, &fi->vendor[2]);
break;
case SWITCHTEC_IOCTL_PART_VENDOR3:
- set_fw_info_part(&info, &fi->vendor[3]);
+ set_fw_info_part(info, &fi->vendor[3]);
break;
case SWITCHTEC_IOCTL_PART_VENDOR4:
- set_fw_info_part(&info, &fi->vendor[4]);
+ set_fw_info_part(info, &fi->vendor[4]);
break;
case SWITCHTEC_IOCTL_PART_VENDOR5:
- set_fw_info_part(&info, &fi->vendor[5]);
+ set_fw_info_part(info, &fi->vendor[5]);
break;
case SWITCHTEC_IOCTL_PART_VENDOR6:
- set_fw_info_part(&info, &fi->vendor[6]);
+ set_fw_info_part(info, &fi->vendor[6]);
break;
case SWITCHTEC_IOCTL_PART_VENDOR7:
- set_fw_info_part(&info, &fi->vendor[7]);
+ set_fw_info_part(info, &fi->vendor[7]);
break;
default:
return -EINVAL;
}
- if (info.address == active_addr)
- info.active |= SWITCHTEC_IOCTL_PART_ACTIVE;
+ return 0;
+}
+
+static int ioctl_flash_part_info(struct switchtec_dev *stdev,
+ struct switchtec_ioctl_flash_part_info __user *uinfo)
+{
+ int ret;
+ struct switchtec_ioctl_flash_part_info info = {0};
+
+ if (copy_from_user(&info, uinfo, sizeof(info)))
+ return -EFAULT;
+
+ if (stdev->gen == SWITCHTEC_GEN3) {
+ ret = flash_part_info_gen3(stdev, &info);
+ if (ret)
+ return ret;
+ } else if (stdev->gen == SWITCHTEC_GEN4) {
+ ret = flash_part_info_gen4(stdev, &info);
+ if (ret)
+ return ret;
+ } else {
+ return -ENOTSUPP;
+ }
if (copy_to_user(uinfo, &info, sizeof(info)))
return -EFAULT;
@@ -683,11 +850,7 @@ static int ioctl_event_summary(struct switchtec_dev *stdev,
s->part[i] = reg;
}
- for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
- reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
- if (reg != PCI_VENDOR_ID_MICROSEMI)
- break;
-
+ for (i = 0; i < stdev->pff_csr_count; i++) {
reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary);
s->pff[i] = reg;
}
@@ -751,10 +914,13 @@ static const struct event_reg {
EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP, mrpc_comp_hdr),
EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC, mrpc_comp_async_hdr),
EV_PAR(SWITCHTEC_IOCTL_EVENT_DYN_PART_BIND_COMP, dyn_binding_hdr),
+ EV_PAR(SWITCHTEC_IOCTL_EVENT_INTERCOMM_REQ_NOTIFY,
+ intercomm_notify_hdr),
EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_P2P, aer_in_p2p_hdr),
EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_VEP, aer_in_vep_hdr),
EV_PFF(SWITCHTEC_IOCTL_EVENT_DPC, dpc_hdr),
EV_PFF(SWITCHTEC_IOCTL_EVENT_CTS, cts_hdr),
+ EV_PFF(SWITCHTEC_IOCTL_EVENT_UEC, uec_hdr),
EV_PFF(SWITCHTEC_IOCTL_EVENT_HOTPLUG, hotplug_hdr),
EV_PFF(SWITCHTEC_IOCTL_EVENT_IER, ier_hdr),
EV_PFF(SWITCHTEC_IOCTL_EVENT_THRESH, threshold_hdr),
@@ -1181,10 +1347,6 @@ static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
return 0;
- if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE ||
- eid == SWITCHTEC_IOCTL_EVENT_MRPC_COMP)
- return 0;
-
dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED);
iowrite32(hdr, hdr_reg);
@@ -1231,8 +1393,13 @@ static irqreturn_t switchtec_event_isr(int irq, void *dev)
check_link_state_events(stdev);
- for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++)
+ for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++) {
+ if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE ||
+ eid == SWITCHTEC_IOCTL_EVENT_MRPC_COMP)
+ continue;
+
event_count += mask_all_events(stdev, eid);
+ }
if (event_count) {
atomic_inc(&stdev->event_cnt);
@@ -1276,7 +1443,7 @@ static int switchtec_init_isr(struct switchtec_dev *stdev)
if (nvecs < 0)
return nvecs;
- event_irq = ioread32(&stdev->mmio_part_cfg->vep_vector_number);
+ event_irq = ioread16(&stdev->mmio_part_cfg->vep_vector_number);
if (event_irq < 0 || event_irq >= nvecs)
return -EFAULT;
@@ -1324,16 +1491,16 @@ static void init_pff(struct switchtec_dev *stdev)
stdev->pff_csr_count = i;
reg = ioread32(&pcfg->usp_pff_inst_id);
- if (reg < SWITCHTEC_MAX_PFF_CSR)
+ if (reg < stdev->pff_csr_count)
stdev->pff_local[reg] = 1;
reg = ioread32(&pcfg->vep_pff_inst_id);
- if (reg < SWITCHTEC_MAX_PFF_CSR)
+ if (reg < stdev->pff_csr_count)
stdev->pff_local[reg] = 1;
for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
- if (reg < SWITCHTEC_MAX_PFF_CSR)
+ if (reg < stdev->pff_csr_count)
stdev->pff_local[reg] = 1;
}
}
@@ -1344,12 +1511,13 @@ static int switchtec_init_pci(struct switchtec_dev *stdev,
int rc;
void __iomem *map;
unsigned long res_start, res_len;
+ u32 __iomem *part_id;
rc = pcim_enable_device(pdev);
if (rc)
return rc;
- rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc)
return rc;
@@ -1378,7 +1546,15 @@ static int switchtec_init_pci(struct switchtec_dev *stdev,
stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET;
stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET;
- stdev->partition = ioread8(&stdev->mmio_sys_info->partition_id);
+
+ if (stdev->gen == SWITCHTEC_GEN3)
+ part_id = &stdev->mmio_sys_info->gen3.partition_id;
+ else if (stdev->gen == SWITCHTEC_GEN4)
+ part_id = &stdev->mmio_sys_info->gen4.partition_id;
+ else
+ return -ENOTSUPP;
+
+ stdev->partition = ioread8(part_id);
stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count);
stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET;
stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition];
@@ -1420,6 +1596,8 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
if (IS_ERR(stdev))
return PTR_ERR(stdev);
+ stdev->gen = id->driver_data;
+
rc = switchtec_init_pci(stdev, pdev);
if (rc)
goto err_put;
@@ -1467,7 +1645,7 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
put_device(&stdev->dev);
}
-#define SWITCHTEC_PCI_DEVICE(device_id) \
+#define SWITCHTEC_PCI_DEVICE(device_id, gen) \
{ \
.vendor = PCI_VENDOR_ID_MICROSEMI, \
.device = device_id, \
@@ -1475,6 +1653,7 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
.subdevice = PCI_ANY_ID, \
.class = (PCI_CLASS_MEMORY_OTHER << 8), \
.class_mask = 0xFFFFFFFF, \
+ .driver_data = gen, \
}, \
{ \
.vendor = PCI_VENDOR_ID_MICROSEMI, \
@@ -1483,39 +1662,58 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
.subdevice = PCI_ANY_ID, \
.class = (PCI_CLASS_BRIDGE_OTHER << 8), \
.class_mask = 0xFFFFFFFF, \
+ .driver_data = gen, \
}
static const struct pci_device_id switchtec_pci_tbl[] = {
- SWITCHTEC_PCI_DEVICE(0x8531), //PFX 24xG3
- SWITCHTEC_PCI_DEVICE(0x8532), //PFX 32xG3
- SWITCHTEC_PCI_DEVICE(0x8533), //PFX 48xG3
- SWITCHTEC_PCI_DEVICE(0x8534), //PFX 64xG3
- SWITCHTEC_PCI_DEVICE(0x8535), //PFX 80xG3
- SWITCHTEC_PCI_DEVICE(0x8536), //PFX 96xG3
- SWITCHTEC_PCI_DEVICE(0x8541), //PSX 24xG3
- SWITCHTEC_PCI_DEVICE(0x8542), //PSX 32xG3
- SWITCHTEC_PCI_DEVICE(0x8543), //PSX 48xG3
- SWITCHTEC_PCI_DEVICE(0x8544), //PSX 64xG3
- SWITCHTEC_PCI_DEVICE(0x8545), //PSX 80xG3
- SWITCHTEC_PCI_DEVICE(0x8546), //PSX 96xG3
- SWITCHTEC_PCI_DEVICE(0x8551), //PAX 24XG3
- SWITCHTEC_PCI_DEVICE(0x8552), //PAX 32XG3
- SWITCHTEC_PCI_DEVICE(0x8553), //PAX 48XG3
- SWITCHTEC_PCI_DEVICE(0x8554), //PAX 64XG3
- SWITCHTEC_PCI_DEVICE(0x8555), //PAX 80XG3
- SWITCHTEC_PCI_DEVICE(0x8556), //PAX 96XG3
- SWITCHTEC_PCI_DEVICE(0x8561), //PFXL 24XG3
- SWITCHTEC_PCI_DEVICE(0x8562), //PFXL 32XG3
- SWITCHTEC_PCI_DEVICE(0x8563), //PFXL 48XG3
- SWITCHTEC_PCI_DEVICE(0x8564), //PFXL 64XG3
- SWITCHTEC_PCI_DEVICE(0x8565), //PFXL 80XG3
- SWITCHTEC_PCI_DEVICE(0x8566), //PFXL 96XG3
- SWITCHTEC_PCI_DEVICE(0x8571), //PFXI 24XG3
- SWITCHTEC_PCI_DEVICE(0x8572), //PFXI 32XG3
- SWITCHTEC_PCI_DEVICE(0x8573), //PFXI 48XG3
- SWITCHTEC_PCI_DEVICE(0x8574), //PFXI 64XG3
- SWITCHTEC_PCI_DEVICE(0x8575), //PFXI 80XG3
- SWITCHTEC_PCI_DEVICE(0x8576), //PFXI 96XG3
+ SWITCHTEC_PCI_DEVICE(0x8531, SWITCHTEC_GEN3), //PFX 24xG3
+ SWITCHTEC_PCI_DEVICE(0x8532, SWITCHTEC_GEN3), //PFX 32xG3
+ SWITCHTEC_PCI_DEVICE(0x8533, SWITCHTEC_GEN3), //PFX 48xG3
+ SWITCHTEC_PCI_DEVICE(0x8534, SWITCHTEC_GEN3), //PFX 64xG3
+ SWITCHTEC_PCI_DEVICE(0x8535, SWITCHTEC_GEN3), //PFX 80xG3
+ SWITCHTEC_PCI_DEVICE(0x8536, SWITCHTEC_GEN3), //PFX 96xG3
+ SWITCHTEC_PCI_DEVICE(0x8541, SWITCHTEC_GEN3), //PSX 24xG3
+ SWITCHTEC_PCI_DEVICE(0x8542, SWITCHTEC_GEN3), //PSX 32xG3
+ SWITCHTEC_PCI_DEVICE(0x8543, SWITCHTEC_GEN3), //PSX 48xG3
+ SWITCHTEC_PCI_DEVICE(0x8544, SWITCHTEC_GEN3), //PSX 64xG3
+ SWITCHTEC_PCI_DEVICE(0x8545, SWITCHTEC_GEN3), //PSX 80xG3
+ SWITCHTEC_PCI_DEVICE(0x8546, SWITCHTEC_GEN3), //PSX 96xG3
+ SWITCHTEC_PCI_DEVICE(0x8551, SWITCHTEC_GEN3), //PAX 24XG3
+ SWITCHTEC_PCI_DEVICE(0x8552, SWITCHTEC_GEN3), //PAX 32XG3
+ SWITCHTEC_PCI_DEVICE(0x8553, SWITCHTEC_GEN3), //PAX 48XG3
+ SWITCHTEC_PCI_DEVICE(0x8554, SWITCHTEC_GEN3), //PAX 64XG3
+ SWITCHTEC_PCI_DEVICE(0x8555, SWITCHTEC_GEN3), //PAX 80XG3
+ SWITCHTEC_PCI_DEVICE(0x8556, SWITCHTEC_GEN3), //PAX 96XG3
+ SWITCHTEC_PCI_DEVICE(0x8561, SWITCHTEC_GEN3), //PFXL 24XG3
+ SWITCHTEC_PCI_DEVICE(0x8562, SWITCHTEC_GEN3), //PFXL 32XG3
+ SWITCHTEC_PCI_DEVICE(0x8563, SWITCHTEC_GEN3), //PFXL 48XG3
+ SWITCHTEC_PCI_DEVICE(0x8564, SWITCHTEC_GEN3), //PFXL 64XG3
+ SWITCHTEC_PCI_DEVICE(0x8565, SWITCHTEC_GEN3), //PFXL 80XG3
+ SWITCHTEC_PCI_DEVICE(0x8566, SWITCHTEC_GEN3), //PFXL 96XG3
+ SWITCHTEC_PCI_DEVICE(0x8571, SWITCHTEC_GEN3), //PFXI 24XG3
+ SWITCHTEC_PCI_DEVICE(0x8572, SWITCHTEC_GEN3), //PFXI 32XG3
+ SWITCHTEC_PCI_DEVICE(0x8573, SWITCHTEC_GEN3), //PFXI 48XG3
+ SWITCHTEC_PCI_DEVICE(0x8574, SWITCHTEC_GEN3), //PFXI 64XG3
+ SWITCHTEC_PCI_DEVICE(0x8575, SWITCHTEC_GEN3), //PFXI 80XG3
+ SWITCHTEC_PCI_DEVICE(0x8576, SWITCHTEC_GEN3), //PFXI 96XG3
+ SWITCHTEC_PCI_DEVICE(0x4000, SWITCHTEC_GEN4), //PFX 100XG4
+ SWITCHTEC_PCI_DEVICE(0x4084, SWITCHTEC_GEN4), //PFX 84XG4
+ SWITCHTEC_PCI_DEVICE(0x4068, SWITCHTEC_GEN4), //PFX 68XG4
+ SWITCHTEC_PCI_DEVICE(0x4052, SWITCHTEC_GEN4), //PFX 52XG4
+ SWITCHTEC_PCI_DEVICE(0x4036, SWITCHTEC_GEN4), //PFX 36XG4
+ SWITCHTEC_PCI_DEVICE(0x4028, SWITCHTEC_GEN4), //PFX 28XG4
+ SWITCHTEC_PCI_DEVICE(0x4100, SWITCHTEC_GEN4), //PSX 100XG4
+ SWITCHTEC_PCI_DEVICE(0x4184, SWITCHTEC_GEN4), //PSX 84XG4
+ SWITCHTEC_PCI_DEVICE(0x4168, SWITCHTEC_GEN4), //PSX 68XG4
+ SWITCHTEC_PCI_DEVICE(0x4152, SWITCHTEC_GEN4), //PSX 52XG4
+ SWITCHTEC_PCI_DEVICE(0x4136, SWITCHTEC_GEN4), //PSX 36XG4
+ SWITCHTEC_PCI_DEVICE(0x4128, SWITCHTEC_GEN4), //PSX 28XG4
+ SWITCHTEC_PCI_DEVICE(0x4200, SWITCHTEC_GEN4), //PAX 100XG4
+ SWITCHTEC_PCI_DEVICE(0x4284, SWITCHTEC_GEN4), //PAX 84XG4
+ SWITCHTEC_PCI_DEVICE(0x4268, SWITCHTEC_GEN4), //PAX 68XG4
+ SWITCHTEC_PCI_DEVICE(0x4252, SWITCHTEC_GEN4), //PAX 52XG4
+ SWITCHTEC_PCI_DEVICE(0x4236, SWITCHTEC_GEN4), //PAX 36XG4
+ SWITCHTEC_PCI_DEVICE(0x4228, SWITCHTEC_GEN4), //PAX 28XG4
{0}
};
MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index aad8a46605be..85887d885b5f 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
+/*
* Driver for Intel I82092AA PCI-PCMCIA bridge.
*
* (C) 2001 Red Hat, Inc.
@@ -18,7 +18,7 @@
#include <pcmcia/ss.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include "i82092aa.h"
#include "i82365.h"
@@ -33,16 +33,16 @@ static const struct pci_device_id i82092aa_pci_ids[] = {
MODULE_DEVICE_TABLE(pci, i82092aa_pci_ids);
static struct pci_driver i82092aa_pci_driver = {
- .name = "i82092aa",
- .id_table = i82092aa_pci_ids,
- .probe = i82092aa_pci_probe,
- .remove = i82092aa_pci_remove,
+ .name = "i82092aa",
+ .id_table = i82092aa_pci_ids,
+ .probe = i82092aa_pci_probe,
+ .remove = i82092aa_pci_remove,
};
/* the pccard structure and its functions */
static struct pccard_operations i82092aa_operations = {
- .init = i82092aa_init,
+ .init = i82092aa_init,
.get_status = i82092aa_get_status,
.set_socket = i82092aa_set_socket,
.set_io_map = i82092aa_set_io_map,
@@ -53,57 +53,63 @@ static struct pccard_operations i82092aa_operations = {
struct socket_info {
int number;
- int card_state; /* 0 = no socket,
- 1 = empty socket,
- 2 = card but not initialized,
- 3 = operational card */
- unsigned int io_base; /* base io address of the socket */
-
+ int card_state;
+ /* 0 = no socket,
+ * 1 = empty socket,
+ * 2 = card but not initialized,
+ * 3 = operational card
+ */
+ unsigned int io_base; /* base io address of the socket */
+
struct pcmcia_socket socket;
struct pci_dev *dev; /* The PCI device for the socket */
};
#define MAX_SOCKETS 4
static struct socket_info sockets[MAX_SOCKETS];
-static int socket_count; /* shortcut */
+static int socket_count; /* shortcut */
-static int i82092aa_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int i82092aa_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
{
unsigned char configbyte;
int i, ret;
-
- enter("i82092aa_pci_probe");
-
- if ((ret = pci_enable_device(dev)))
+
+ ret = pci_enable_device(dev);
+ if (ret)
return ret;
-
- pci_read_config_byte(dev, 0x40, &configbyte); /* PCI Configuration Control */
- switch(configbyte&6) {
- case 0:
- socket_count = 2;
- break;
- case 2:
- socket_count = 1;
- break;
- case 4:
- case 6:
- socket_count = 4;
- break;
-
- default:
- printk(KERN_ERR "i82092aa: Oops, you did something we didn't think of.\n");
- ret = -EIO;
- goto err_out_disable;
+
+ /* PCI Configuration Control */
+ pci_read_config_byte(dev, 0x40, &configbyte);
+
+ switch (configbyte&6) {
+ case 0:
+ socket_count = 2;
+ break;
+ case 2:
+ socket_count = 1;
+ break;
+ case 4:
+ case 6:
+ socket_count = 4;
+ break;
+
+ default:
+ dev_err(&dev->dev,
+ "Oops, you did something we didn't think of.\n");
+ ret = -EIO;
+ goto err_out_disable;
}
- printk(KERN_INFO "i82092aa: configured as a %d socket device.\n", socket_count);
+ dev_info(&dev->dev, "configured as a %d socket device.\n",
+ socket_count);
if (!request_region(pci_resource_start(dev, 0), 2, "i82092aa")) {
ret = -EBUSY;
goto err_out_disable;
}
-
- for (i = 0;i<socket_count;i++) {
+
+ for (i = 0; i < socket_count; i++) {
sockets[i].card_state = 1; /* 1 = present but empty */
sockets[i].io_base = pci_resource_start(dev, 0);
sockets[i].socket.features |= SS_CAP_PCCARD;
@@ -114,65 +120,65 @@ static int i82092aa_pci_probe(struct pci_dev *dev, const struct pci_device_id *i
sockets[i].socket.owner = THIS_MODULE;
sockets[i].number = i;
-
+
if (card_present(i)) {
sockets[i].card_state = 3;
- dev_dbg(&dev->dev, "i82092aa: slot %i is occupied\n", i);
+ dev_dbg(&dev->dev, "slot %i is occupied\n", i);
} else {
- dev_dbg(&dev->dev, "i82092aa: slot %i is vacant\n", i);
+ dev_dbg(&dev->dev, "slot %i is vacant\n", i);
}
}
-
- /* Now, specifiy that all interrupts are to be done as PCI interrupts */
- configbyte = 0xFF; /* bitmask, one bit per event, 1 = PCI interrupt, 0 = ISA interrupt */
- pci_write_config_byte(dev, 0x50, configbyte); /* PCI Interrupt Routing Register */
+
+ /* Now, specifiy that all interrupts are to be done as PCI interrupts
+ * bitmask, one bit per event, 1 = PCI interrupt, 0 = ISA interrupt
+ */
+ configbyte = 0xFF;
+
+ /* PCI Interrupt Routing Register */
+ pci_write_config_byte(dev, 0x50, configbyte);
/* Register the interrupt handler */
dev_dbg(&dev->dev, "Requesting interrupt %i\n", dev->irq);
- if ((ret = request_irq(dev->irq, i82092aa_interrupt, IRQF_SHARED, "i82092aa", i82092aa_interrupt))) {
- printk(KERN_ERR "i82092aa: Failed to register IRQ %d, aborting\n", dev->irq);
+ ret = request_irq(dev->irq, i82092aa_interrupt, IRQF_SHARED,
+ "i82092aa", i82092aa_interrupt);
+ if (ret) {
+ dev_err(&dev->dev, "Failed to register IRQ %d, aborting\n",
+ dev->irq);
goto err_out_free_res;
}
- for (i = 0; i<socket_count; i++) {
+ for (i = 0; i < socket_count; i++) {
sockets[i].socket.dev.parent = &dev->dev;
sockets[i].socket.ops = &i82092aa_operations;
sockets[i].socket.resource_ops = &pccard_nonstatic_ops;
ret = pcmcia_register_socket(&sockets[i].socket);
- if (ret) {
+ if (ret)
goto err_out_free_sockets;
- }
}
- leave("i82092aa_pci_probe");
return 0;
err_out_free_sockets:
if (i) {
- for (i--;i>=0;i--) {
+ for (i--; i >= 0; i--)
pcmcia_unregister_socket(&sockets[i].socket);
- }
}
free_irq(dev->irq, i82092aa_interrupt);
err_out_free_res:
release_region(pci_resource_start(dev, 0), 2);
err_out_disable:
pci_disable_device(dev);
- return ret;
+ return ret;
}
static void i82092aa_pci_remove(struct pci_dev *dev)
{
int i;
- enter("i82092aa_pci_remove");
-
free_irq(dev->irq, i82092aa_interrupt);
for (i = 0; i < socket_count; i++)
pcmcia_unregister_socket(&sockets[i].socket);
-
- leave("i82092aa_pci_remove");
}
static DEFINE_SPINLOCK(port_lock);
@@ -184,44 +190,27 @@ static unsigned char indirect_read(int socket, unsigned short reg)
unsigned short int port;
unsigned char val;
unsigned long flags;
- spin_lock_irqsave(&port_lock,flags);
+
+ spin_lock_irqsave(&port_lock, flags);
reg += socket * 0x40;
port = sockets[socket].io_base;
- outb(reg,port);
+ outb(reg, port);
val = inb(port+1);
- spin_unlock_irqrestore(&port_lock,flags);
+ spin_unlock_irqrestore(&port_lock, flags);
return val;
}
-#if 0
-static unsigned short indirect_read16(int socket, unsigned short reg)
-{
- unsigned short int port;
- unsigned short tmp;
- unsigned long flags;
- spin_lock_irqsave(&port_lock,flags);
- reg = reg + socket * 0x40;
- port = sockets[socket].io_base;
- outb(reg,port);
- tmp = inb(port+1);
- reg++;
- outb(reg,port);
- tmp = tmp | (inb(port+1)<<8);
- spin_unlock_irqrestore(&port_lock,flags);
- return tmp;
-}
-#endif
-
static void indirect_write(int socket, unsigned short reg, unsigned char value)
{
unsigned short int port;
unsigned long flags;
- spin_lock_irqsave(&port_lock,flags);
+
+ spin_lock_irqsave(&port_lock, flags);
reg = reg + socket * 0x40;
- port = sockets[socket].io_base;
- outb(reg,port);
- outb(value,port+1);
- spin_unlock_irqrestore(&port_lock,flags);
+ port = sockets[socket].io_base;
+ outb(reg, port);
+ outb(value, port+1);
+ spin_unlock_irqrestore(&port_lock, flags);
}
static void indirect_setbit(int socket, unsigned short reg, unsigned char mask)
@@ -229,53 +218,58 @@ static void indirect_setbit(int socket, unsigned short reg, unsigned char mask)
unsigned short int port;
unsigned char val;
unsigned long flags;
- spin_lock_irqsave(&port_lock,flags);
+
+ spin_lock_irqsave(&port_lock, flags);
reg = reg + socket * 0x40;
- port = sockets[socket].io_base;
- outb(reg,port);
+ port = sockets[socket].io_base;
+ outb(reg, port);
val = inb(port+1);
val |= mask;
- outb(reg,port);
- outb(val,port+1);
- spin_unlock_irqrestore(&port_lock,flags);
+ outb(reg, port);
+ outb(val, port+1);
+ spin_unlock_irqrestore(&port_lock, flags);
}
-static void indirect_resetbit(int socket, unsigned short reg, unsigned char mask)
+static void indirect_resetbit(int socket,
+ unsigned short reg, unsigned char mask)
{
unsigned short int port;
unsigned char val;
unsigned long flags;
- spin_lock_irqsave(&port_lock,flags);
+
+ spin_lock_irqsave(&port_lock, flags);
reg = reg + socket * 0x40;
- port = sockets[socket].io_base;
- outb(reg,port);
+ port = sockets[socket].io_base;
+ outb(reg, port);
val = inb(port+1);
val &= ~mask;
- outb(reg,port);
- outb(val,port+1);
- spin_unlock_irqrestore(&port_lock,flags);
+ outb(reg, port);
+ outb(val, port+1);
+ spin_unlock_irqrestore(&port_lock, flags);
}
-static void indirect_write16(int socket, unsigned short reg, unsigned short value)
+static void indirect_write16(int socket,
+ unsigned short reg, unsigned short value)
{
unsigned short int port;
unsigned char val;
unsigned long flags;
- spin_lock_irqsave(&port_lock,flags);
+
+ spin_lock_irqsave(&port_lock, flags);
reg = reg + socket * 0x40;
- port = sockets[socket].io_base;
-
- outb(reg,port);
+ port = sockets[socket].io_base;
+
+ outb(reg, port);
val = value & 255;
- outb(val,port+1);
-
+ outb(val, port+1);
+
reg++;
-
- outb(reg,port);
+
+ outb(reg, port);
val = value>>8;
- outb(val,port+1);
- spin_unlock_irqrestore(&port_lock,flags);
+ outb(val, port+1);
+ spin_unlock_irqrestore(&port_lock, flags);
}
/* simple helper functions */
@@ -284,12 +278,12 @@ static int cycle_time = 120;
static int to_cycles(int ns)
{
- if (cycle_time!=0)
+ if (cycle_time != 0)
return ns/cycle_time;
else
return 0;
}
-
+
/* Interrupt handler functionality */
@@ -299,58 +293,61 @@ static irqreturn_t i82092aa_interrupt(int irq, void *dev)
int loopcount = 0;
int handled = 0;
- unsigned int events, active=0;
-
-/* enter("i82092aa_interrupt");*/
-
+ unsigned int events, active = 0;
+
while (1) {
loopcount++;
- if (loopcount>20) {
- printk(KERN_ERR "i82092aa: infinite eventloop in interrupt \n");
+ if (loopcount > 20) {
+ pr_err("i82092aa: infinite eventloop in interrupt\n");
break;
}
-
+
active = 0;
-
- for (i=0;i<socket_count;i++) {
+
+ for (i = 0; i < socket_count; i++) {
int csc;
- if (sockets[i].card_state==0) /* Inactive socket, should not happen */
+
+ /* Inactive socket, should not happen */
+ if (sockets[i].card_state == 0)
+ continue;
+
+ /* card status change register */
+ csc = indirect_read(i, I365_CSC);
+
+ if (csc == 0) /* no events on this socket */
continue;
-
- csc = indirect_read(i,I365_CSC); /* card status change register */
-
- if (csc==0) /* no events on this socket */
- continue;
handled = 1;
events = 0;
-
+
if (csc & I365_CSC_DETECT) {
events |= SS_DETECT;
- printk("Card detected in socket %i!\n",i);
- }
-
- if (indirect_read(i,I365_INTCTL) & I365_PC_IOCARD) {
+ dev_info(&sockets[i].dev->dev,
+ "Card detected in socket %i!\n", i);
+ }
+
+ if (indirect_read(i, I365_INTCTL) & I365_PC_IOCARD) {
/* For IO/CARDS, bit 0 means "read the card" */
- events |= (csc & I365_CSC_STSCHG) ? SS_STSCHG : 0;
+ if (csc & I365_CSC_STSCHG)
+ events |= SS_STSCHG;
} else {
/* Check for battery/ready events */
- events |= (csc & I365_CSC_BVD1) ? SS_BATDEAD : 0;
- events |= (csc & I365_CSC_BVD2) ? SS_BATWARN : 0;
- events |= (csc & I365_CSC_READY) ? SS_READY : 0;
+ if (csc & I365_CSC_BVD1)
+ events |= SS_BATDEAD;
+ if (csc & I365_CSC_BVD2)
+ events |= SS_BATWARN;
+ if (csc & I365_CSC_READY)
+ events |= SS_READY;
}
-
- if (events) {
+
+ if (events)
pcmcia_parse_events(&sockets[i].socket, events);
- }
active |= events;
}
-
- if (active==0) /* no more events to handle */
- break;
-
+
+ if (active == 0) /* no more events to handle */
+ break;
}
return IRQ_RETVAL(handled);
-/* leave("i82092aa_interrupt");*/
}
@@ -358,80 +355,67 @@ static irqreturn_t i82092aa_interrupt(int irq, void *dev)
/* socket functions */
static int card_present(int socketno)
-{
+{
unsigned int val;
- enter("card_present");
-
- if ((socketno<0) || (socketno >= MAX_SOCKETS))
+
+ if ((socketno < 0) || (socketno >= MAX_SOCKETS))
return 0;
if (sockets[socketno].io_base == 0)
return 0;
-
+
val = indirect_read(socketno, 1); /* Interface status register */
- if ((val&12)==12) {
- leave("card_present 1");
+ if ((val&12) == 12)
return 1;
- }
-
- leave("card_present 0");
+
return 0;
}
static void set_bridge_state(int sock)
{
- enter("set_bridge_state");
- indirect_write(sock, I365_GBLCTL,0x00);
- indirect_write(sock, I365_GENCTL,0x00);
-
- indirect_setbit(sock, I365_INTCTL,0x08);
- leave("set_bridge_state");
-}
-
-
+ indirect_write(sock, I365_GBLCTL, 0x00);
+ indirect_write(sock, I365_GENCTL, 0x00);
+ indirect_setbit(sock, I365_INTCTL, 0x08);
+}
-
static int i82092aa_init(struct pcmcia_socket *sock)
{
int i;
struct resource res = { .start = 0, .end = 0x0fff };
- pccard_io_map io = { 0, 0, 0, 0, 1 };
+ pccard_io_map io = { 0, 0, 0, 0, 1 };
pccard_mem_map mem = { .res = &res, };
-
- enter("i82092aa_init");
-
- for (i = 0; i < 2; i++) {
- io.map = i;
- i82092aa_set_io_map(sock, &io);
+
+ for (i = 0; i < 2; i++) {
+ io.map = i;
+ i82092aa_set_io_map(sock, &io);
}
- for (i = 0; i < 5; i++) {
- mem.map = i;
- i82092aa_set_mem_map(sock, &mem);
+ for (i = 0; i < 5; i++) {
+ mem.map = i;
+ i82092aa_set_mem_map(sock, &mem);
}
-
- leave("i82092aa_init");
+
return 0;
}
-
+
static int i82092aa_get_status(struct pcmcia_socket *socket, u_int *value)
{
- unsigned int sock = container_of(socket, struct socket_info, socket)->number;
+ unsigned int sock = container_of(socket,
+ struct socket_info, socket)->number;
unsigned int status;
-
- enter("i82092aa_get_status");
-
- status = indirect_read(sock,I365_STATUS); /* Interface Status Register */
+
+ /* Interface Status Register */
+ status = indirect_read(sock, I365_STATUS);
+
*value = 0;
-
- if ((status & I365_CS_DETECT) == I365_CS_DETECT) {
+
+ if ((status & I365_CS_DETECT) == I365_CS_DETECT)
*value |= SS_DETECT;
- }
-
+
/* IO cards have a different meaning of bits 0,1 */
/* Also notice the inverse-logic on the bits */
- if (indirect_read(sock, I365_INTCTL) & I365_PC_IOCARD) {
+ if (indirect_read(sock, I365_INTCTL) & I365_PC_IOCARD) {
/* IO card */
if (!(status & I365_CS_STSCHG))
*value |= SS_STSCHG;
@@ -441,246 +425,238 @@ static int i82092aa_get_status(struct pcmcia_socket *socket, u_int *value)
if (!(status & I365_CS_BVD2))
*value |= SS_BATWARN;
}
-
+
if (status & I365_CS_WRPROT)
(*value) |= SS_WRPROT; /* card is write protected */
-
+
if (status & I365_CS_READY)
(*value) |= SS_READY; /* card is not busy */
-
+
if (status & I365_CS_POWERON)
(*value) |= SS_POWERON; /* power is applied to the card */
- leave("i82092aa_get_status");
return 0;
}
-static int i82092aa_set_socket(struct pcmcia_socket *socket, socket_state_t *state)
+static int i82092aa_set_socket(struct pcmcia_socket *socket,
+ socket_state_t *state)
{
- unsigned int sock = container_of(socket, struct socket_info, socket)->number;
+ struct socket_info *sock_info = container_of(socket, struct socket_info,
+ socket);
+ unsigned int sock = sock_info->number;
unsigned char reg;
-
- enter("i82092aa_set_socket");
-
+
/* First, set the global controller options */
-
+
set_bridge_state(sock);
-
+
/* Values for the IGENC register */
-
+
reg = 0;
- if (!(state->flags & SS_RESET)) /* The reset bit has "inverse" logic */
- reg = reg | I365_PC_RESET;
- if (state->flags & SS_IOCARD)
+
+ /* The reset bit has "inverse" logic */
+ if (!(state->flags & SS_RESET))
+ reg = reg | I365_PC_RESET;
+ if (state->flags & SS_IOCARD)
reg = reg | I365_PC_IOCARD;
-
- indirect_write(sock,I365_INTCTL,reg); /* IGENC, Interrupt and General Control Register */
-
+
+ /* IGENC, Interrupt and General Control Register */
+ indirect_write(sock, I365_INTCTL, reg);
+
/* Power registers */
-
+
reg = I365_PWR_NORESET; /* default: disable resetdrv on resume */
-
+
if (state->flags & SS_PWR_AUTO) {
- printk("Auto power\n");
+ dev_info(&sock_info->dev->dev, "Auto power\n");
reg |= I365_PWR_AUTO; /* automatic power mngmnt */
}
if (state->flags & SS_OUTPUT_ENA) {
- printk("Power Enabled \n");
+ dev_info(&sock_info->dev->dev, "Power Enabled\n");
reg |= I365_PWR_OUT; /* enable power */
}
-
+
switch (state->Vcc) {
- case 0:
- break;
- case 50:
- printk("setting voltage to Vcc to 5V on socket %i\n",sock);
- reg |= I365_VCC_5V;
- break;
- default:
- printk("i82092aa: i82092aa_set_socket called with invalid VCC power value: %i ", state->Vcc);
- leave("i82092aa_set_socket");
- return -EINVAL;
+ case 0:
+ break;
+ case 50:
+ dev_info(&sock_info->dev->dev,
+ "setting voltage to Vcc to 5V on socket %i\n",
+ sock);
+ reg |= I365_VCC_5V;
+ break;
+ default:
+ dev_err(&sock_info->dev->dev,
+ "%s called with invalid VCC power value: %i",
+ __func__, state->Vcc);
+ return -EINVAL;
}
-
-
+
switch (state->Vpp) {
- case 0:
- printk("not setting Vpp on socket %i\n",sock);
- break;
- case 50:
- printk("setting Vpp to 5.0 for socket %i\n",sock);
- reg |= I365_VPP1_5V | I365_VPP2_5V;
- break;
- case 120:
- printk("setting Vpp to 12.0\n");
- reg |= I365_VPP1_12V | I365_VPP2_12V;
- break;
- default:
- printk("i82092aa: i82092aa_set_socket called with invalid VPP power value: %i ", state->Vcc);
- leave("i82092aa_set_socket");
- return -EINVAL;
+ case 0:
+ dev_info(&sock_info->dev->dev,
+ "not setting Vpp on socket %i\n", sock);
+ break;
+ case 50:
+ dev_info(&sock_info->dev->dev,
+ "setting Vpp to 5.0 for socket %i\n", sock);
+ reg |= I365_VPP1_5V | I365_VPP2_5V;
+ break;
+ case 120:
+ dev_info(&sock_info->dev->dev, "setting Vpp to 12.0\n");
+ reg |= I365_VPP1_12V | I365_VPP2_12V;
+ break;
+ default:
+ dev_err(&sock_info->dev->dev,
+ "%s called with invalid VPP power value: %i",
+ __func__, state->Vcc);
+ return -EINVAL;
}
-
- if (reg != indirect_read(sock,I365_POWER)) /* only write if changed */
- indirect_write(sock,I365_POWER,reg);
-
+
+ if (reg != indirect_read(sock, I365_POWER)) /* only write if changed */
+ indirect_write(sock, I365_POWER, reg);
+
/* Enable specific interrupt events */
-
+
reg = 0x00;
- if (state->csc_mask & SS_DETECT) {
+ if (state->csc_mask & SS_DETECT)
reg |= I365_CSC_DETECT;
- }
if (state->flags & SS_IOCARD) {
if (state->csc_mask & SS_STSCHG)
reg |= I365_CSC_STSCHG;
} else {
- if (state->csc_mask & SS_BATDEAD)
+ if (state->csc_mask & SS_BATDEAD)
reg |= I365_CSC_BVD1;
- if (state->csc_mask & SS_BATWARN)
+ if (state->csc_mask & SS_BATWARN)
reg |= I365_CSC_BVD2;
- if (state->csc_mask & SS_READY)
- reg |= I365_CSC_READY;
-
+ if (state->csc_mask & SS_READY)
+ reg |= I365_CSC_READY;
+
}
-
- /* now write the value and clear the (probably bogus) pending stuff by doing a dummy read*/
-
- indirect_write(sock,I365_CSCINT,reg);
- (void)indirect_read(sock,I365_CSC);
- leave("i82092aa_set_socket");
+ /* now write the value and clear the (probably bogus) pending stuff
+ * by doing a dummy read
+ */
+
+ indirect_write(sock, I365_CSCINT, reg);
+ (void)indirect_read(sock, I365_CSC);
+
return 0;
}
-static int i82092aa_set_io_map(struct pcmcia_socket *socket, struct pccard_io_map *io)
+static int i82092aa_set_io_map(struct pcmcia_socket *socket,
+ struct pccard_io_map *io)
{
- unsigned int sock = container_of(socket, struct socket_info, socket)->number;
+ struct socket_info *sock_info = container_of(socket, struct socket_info,
+ socket);
+ unsigned int sock = sock_info->number;
unsigned char map, ioctl;
-
- enter("i82092aa_set_io_map");
-
+
map = io->map;
-
- /* Check error conditions */
- if (map > 1) {
- leave("i82092aa_set_io_map with invalid map");
+
+ /* Check error conditions */
+ if (map > 1)
return -EINVAL;
- }
- if ((io->start > 0xffff) || (io->stop > 0xffff) || (io->stop < io->start)){
- leave("i82092aa_set_io_map with invalid io");
+
+ if ((io->start > 0xffff) || (io->stop > 0xffff)
+ || (io->stop < io->start))
return -EINVAL;
- }
- /* Turn off the window before changing anything */
+ /* Turn off the window before changing anything */
if (indirect_read(sock, I365_ADDRWIN) & I365_ENA_IO(map))
indirect_resetbit(sock, I365_ADDRWIN, I365_ENA_IO(map));
-/* printk("set_io_map: Setting range to %x - %x \n",io->start,io->stop); */
-
/* write the new values */
- indirect_write16(sock,I365_IO(map)+I365_W_START,io->start);
- indirect_write16(sock,I365_IO(map)+I365_W_STOP,io->stop);
-
- ioctl = indirect_read(sock,I365_IOCTL) & ~I365_IOCTL_MASK(map);
-
+ indirect_write16(sock, I365_IO(map)+I365_W_START, io->start);
+ indirect_write16(sock, I365_IO(map)+I365_W_STOP, io->stop);
+
+ ioctl = indirect_read(sock, I365_IOCTL) & ~I365_IOCTL_MASK(map);
+
if (io->flags & (MAP_16BIT|MAP_AUTOSZ))
ioctl |= I365_IOCTL_16BIT(map);
-
- indirect_write(sock,I365_IOCTL,ioctl);
-
+
+ indirect_write(sock, I365_IOCTL, ioctl);
+
/* Turn the window back on if needed */
if (io->flags & MAP_ACTIVE)
- indirect_setbit(sock,I365_ADDRWIN,I365_ENA_IO(map));
-
- leave("i82092aa_set_io_map");
+ indirect_setbit(sock, I365_ADDRWIN, I365_ENA_IO(map));
+
return 0;
}
-static int i82092aa_set_mem_map(struct pcmcia_socket *socket, struct pccard_mem_map *mem)
+static int i82092aa_set_mem_map(struct pcmcia_socket *socket,
+ struct pccard_mem_map *mem)
{
- struct socket_info *sock_info = container_of(socket, struct socket_info, socket);
+ struct socket_info *sock_info = container_of(socket, struct socket_info,
+ socket);
unsigned int sock = sock_info->number;
struct pci_bus_region region;
unsigned short base, i;
unsigned char map;
-
- enter("i82092aa_set_mem_map");
pcibios_resource_to_bus(sock_info->dev->bus, &region, mem->res);
-
+
map = mem->map;
- if (map > 4) {
- leave("i82092aa_set_mem_map: invalid map");
+ if (map > 4)
return -EINVAL;
- }
-
-
- if ( (mem->card_start > 0x3ffffff) || (region.start > region.end) ||
- (mem->speed > 1000) ) {
- leave("i82092aa_set_mem_map: invalid address / speed");
- printk("invalid mem map for socket %i: %llx to %llx with a "
- "start of %x\n",
+
+ if ((mem->card_start > 0x3ffffff) || (region.start > region.end) ||
+ (mem->speed > 1000)) {
+ dev_err(&sock_info->dev->dev,
+ "invalid mem map for socket %i: %llx to %llx with a start of %x\n",
sock,
(unsigned long long)region.start,
(unsigned long long)region.end,
mem->card_start);
return -EINVAL;
}
-
+
/* Turn off the window before changing anything */
if (indirect_read(sock, I365_ADDRWIN) & I365_ENA_MEM(map))
- indirect_resetbit(sock, I365_ADDRWIN, I365_ENA_MEM(map));
-
-
-/* printk("set_mem_map: Setting map %i range to %x - %x on socket %i, speed is %i, active = %i \n",map, region.start,region.end,sock,mem->speed,mem->flags & MAP_ACTIVE); */
+ indirect_resetbit(sock, I365_ADDRWIN, I365_ENA_MEM(map));
/* write the start address */
base = I365_MEM(map);
i = (region.start >> 12) & 0x0fff;
- if (mem->flags & MAP_16BIT)
+ if (mem->flags & MAP_16BIT)
i |= I365_MEM_16BIT;
if (mem->flags & MAP_0WS)
- i |= I365_MEM_0WS;
- indirect_write16(sock,base+I365_W_START,i);
-
+ i |= I365_MEM_0WS;
+ indirect_write16(sock, base+I365_W_START, i);
+
/* write the stop address */
-
- i= (region.end >> 12) & 0x0fff;
+
+ i = (region.end >> 12) & 0x0fff;
switch (to_cycles(mem->speed)) {
- case 0:
- break;
- case 1:
- i |= I365_MEM_WS0;
- break;
- case 2:
- i |= I365_MEM_WS1;
- break;
- default:
- i |= I365_MEM_WS1 | I365_MEM_WS0;
- break;
+ case 0:
+ break;
+ case 1:
+ i |= I365_MEM_WS0;
+ break;
+ case 2:
+ i |= I365_MEM_WS1;
+ break;
+ default:
+ i |= I365_MEM_WS1 | I365_MEM_WS0;
+ break;
}
-
- indirect_write16(sock,base+I365_W_STOP,i);
-
+
+ indirect_write16(sock, base+I365_W_STOP, i);
+
/* card start */
-
+
i = ((mem->card_start - region.start) >> 12) & 0x3fff;
if (mem->flags & MAP_WRPROT)
i |= I365_MEM_WRPROT;
- if (mem->flags & MAP_ATTRIB) {
-/* printk("requesting attribute memory for socket %i\n",sock);*/
+ if (mem->flags & MAP_ATTRIB)
i |= I365_MEM_REG;
- } else {
-/* printk("requesting normal memory for socket %i\n",sock);*/
- }
- indirect_write16(sock,base+I365_W_OFF,i);
-
+ indirect_write16(sock, base+I365_W_OFF, i);
+
/* Enable the window if necessary */
if (mem->flags & MAP_ACTIVE)
indirect_setbit(sock, I365_ADDRWIN, I365_ENA_MEM(map));
-
- leave("i82092aa_set_mem_map");
+
return 0;
}
@@ -691,11 +667,9 @@ static int i82092aa_module_init(void)
static void i82092aa_module_exit(void)
{
- enter("i82092aa_module_exit");
pci_unregister_driver(&i82092aa_pci_driver);
- if (sockets[0].io_base>0)
- release_region(sockets[0].io_base, 2);
- leave("i82092aa_module_exit");
+ if (sockets[0].io_base > 0)
+ release_region(sockets[0].io_base, 2);
}
module_init(i82092aa_module_init);
diff --git a/drivers/pcmcia/i82092aa.h b/drivers/pcmcia/i82092aa.h
index 4586c43c78e2..0f851acab7e5 100644
--- a/drivers/pcmcia/i82092aa.h
+++ b/drivers/pcmcia/i82092aa.h
@@ -4,17 +4,6 @@
#include <linux/interrupt.h>
-/* Debuging defines */
-#ifdef NOTRACE
-#define enter(x) printk("Enter: %s, %s line %i\n",x,__FILE__,__LINE__)
-#define leave(x) printk("Leave: %s, %s line %i\n",x,__FILE__,__LINE__)
-#else
-#define enter(x) do {} while (0)
-#define leave(x) do {} while (0)
-#endif
-
-
-
/* prototypes */
static int i82092aa_pci_probe(struct pci_dev *dev, const struct pci_device_id *id);
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
index 773128f411f1..f01a57e5a5f3 100644
--- a/drivers/perf/arm_smmuv3_pmu.c
+++ b/drivers/perf/arm_smmuv3_pmu.c
@@ -771,7 +771,7 @@ static int smmu_pmu_probe(struct platform_device *pdev)
smmu_pmu->reloc_base = smmu_pmu->reg_base;
}
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
if (irq > 0)
smmu_pmu->irq = irq;
@@ -814,7 +814,7 @@ static int smmu_pmu_probe(struct platform_device *pdev)
if (err) {
dev_err(dev, "Error %d registering hotplug, PMU @%pa\n",
err, &res_0->start);
- goto out_cpuhp_err;
+ return err;
}
err = perf_pmu_register(&smmu_pmu->pmu, name, -1);
@@ -833,8 +833,6 @@ static int smmu_pmu_probe(struct platform_device *pdev)
out_unregister:
cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
-out_cpuhp_err:
- put_cpu();
return err;
}
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
index 55083c67b2bb..95dca2cb5265 100644
--- a/drivers/perf/fsl_imx8_ddr_perf.c
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -633,13 +633,17 @@ static int ddr_perf_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n");
- goto ddr_perf_err;
+ goto cpuhp_state_err;
}
pmu->cpuhp_state = ret;
/* Register the pmu instance for cpu hotplug */
- cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+ ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+ if (ret) {
+ dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
+ goto cpuhp_instance_err;
+ }
/* Request irq */
irq = of_irq_get(np, 0);
@@ -673,9 +677,10 @@ static int ddr_perf_probe(struct platform_device *pdev)
return 0;
ddr_perf_err:
- if (pmu->cpuhp_state)
- cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
-
+ cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+cpuhp_instance_err:
+ cpuhp_remove_multi_state(pmu->cpuhp_state);
+cpuhp_state_err:
ida_simple_remove(&ddr_ida, pmu->id);
dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret);
return ret;
@@ -686,6 +691,7 @@ static int ddr_perf_remove(struct platform_device *pdev)
struct ddr_pmu *pmu = platform_get_drvdata(pdev);
cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+ cpuhp_remove_multi_state(pmu->cpuhp_state);
irq_set_affinity_hint(pmu->irq, NULL);
perf_pmu_unregister(&pmu->pmu);
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c
index 96183e31b96a..584de8f807cc 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c
@@ -337,38 +337,44 @@ void hisi_uncore_pmu_disable(struct pmu *pmu)
hisi_pmu->ops->stop_counters(hisi_pmu);
}
+
/*
- * Read Super CPU cluster and CPU cluster ID from MPIDR_EL1.
- * If multi-threading is supported, On Huawei Kunpeng 920 SoC whose cpu
- * core is tsv110, CCL_ID is the low 3-bits in MPIDR[Aff2] and SCCL_ID
- * is the upper 5-bits of Aff2 field; while for other cpu types, SCCL_ID
- * is in MPIDR[Aff3] and CCL_ID is in MPIDR[Aff2], if not, SCCL_ID
- * is in MPIDR[Aff2] and CCL_ID is in MPIDR[Aff1].
+ * The Super CPU Cluster (SCCL) and CPU Cluster (CCL) IDs can be
+ * determined from the MPIDR_EL1, but the encoding varies by CPU:
+ *
+ * - For MT variants of TSV110:
+ * SCCL is Aff2[7:3], CCL is Aff2[2:0]
+ *
+ * - For other MT parts:
+ * SCCL is Aff3[7:0], CCL is Aff2[7:0]
+ *
+ * - For non-MT parts:
+ * SCCL is Aff2[7:0], CCL is Aff1[7:0]
*/
-static void hisi_read_sccl_and_ccl_id(int *sccl_id, int *ccl_id)
+static void hisi_read_sccl_and_ccl_id(int *scclp, int *cclp)
{
u64 mpidr = read_cpuid_mpidr();
-
- if (mpidr & MPIDR_MT_BITMASK) {
- if (read_cpuid_part_number() == HISI_CPU_PART_TSV110) {
- int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2);
-
- if (sccl_id)
- *sccl_id = aff2 >> 3;
- if (ccl_id)
- *ccl_id = aff2 & 0x7;
- } else {
- if (sccl_id)
- *sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 3);
- if (ccl_id)
- *ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
- }
+ int aff3 = MPIDR_AFFINITY_LEVEL(mpidr, 3);
+ int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2);
+ int aff1 = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ bool mt = mpidr & MPIDR_MT_BITMASK;
+ int sccl, ccl;
+
+ if (mt && read_cpuid_part_number() == HISI_CPU_PART_TSV110) {
+ sccl = aff2 >> 3;
+ ccl = aff2 & 0x7;
+ } else if (mt) {
+ sccl = aff3;
+ ccl = aff2;
} else {
- if (sccl_id)
- *sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
- if (ccl_id)
- *ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ sccl = aff2;
+ ccl = aff1;
}
+
+ if (scclp)
+ *scclp = sccl;
+ if (cclp)
+ *cclp = ccl;
}
/*
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 0263db2ac874..b3ed94b98d9b 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -69,5 +69,6 @@ source "drivers/phy/socionext/Kconfig"
source "drivers/phy/st/Kconfig"
source "drivers/phy/tegra/Kconfig"
source "drivers/phy/ti/Kconfig"
+source "drivers/phy/intel/Kconfig"
endmenu
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index c96a1afc95bd..310c149a9df5 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -18,6 +18,7 @@ obj-y += broadcom/ \
cadence/ \
freescale/ \
hisilicon/ \
+ intel/ \
lantiq/ \
marvell/ \
motorola/ \
diff --git a/drivers/phy/allwinner/Kconfig b/drivers/phy/allwinner/Kconfig
index 3dab79e9d52b..e760d89d3976 100644
--- a/drivers/phy/allwinner/Kconfig
+++ b/drivers/phy/allwinner/Kconfig
@@ -48,7 +48,8 @@ config PHY_SUN9I_USB
config PHY_SUN50I_USB3
tristate "Allwinner H6 SoC USB3 PHY driver"
- depends on ARCH_SUNXI && HAS_IOMEM && OF
+ depends on ARCH_SUNXI || COMPILE_TEST
+ depends on HAS_IOMEM && OF
depends on RESET_CONTROLLER
select GENERIC_PHY
help
diff --git a/drivers/phy/broadcom/Kconfig b/drivers/phy/broadcom/Kconfig
index d3d983c128ea..b29f11c19155 100644
--- a/drivers/phy/broadcom/Kconfig
+++ b/drivers/phy/broadcom/Kconfig
@@ -50,7 +50,7 @@ config PHY_BCM_NS_USB3
config PHY_NS2_PCIE
tristate "Broadcom Northstar2 PCIe PHY driver"
- depends on OF && MDIO_BUS_MUX_BCM_IPROC
+ depends on (OF && MDIO_BUS_MUX_BCM_IPROC) || (COMPILE_TEST && MDIO_BUS)
select GENERIC_PHY
default ARCH_BCM_IPROC
help
@@ -83,7 +83,7 @@ config PHY_BRCM_SATA
config PHY_BRCM_USB
tristate "Broadcom STB USB PHY driver"
- depends on ARCH_BRCMSTB
+ depends on ARCH_BRCMSTB || COMPILE_TEST
depends on OF
select GENERIC_PHY
select SOC_BRCMSTB
diff --git a/drivers/phy/broadcom/Makefile b/drivers/phy/broadcom/Makefile
index f453c7d3ffff..c78de546135c 100644
--- a/drivers/phy/broadcom/Makefile
+++ b/drivers/phy/broadcom/Makefile
@@ -8,7 +8,7 @@ obj-$(CONFIG_PHY_NS2_USB_DRD) += phy-bcm-ns2-usbdrd.o
obj-$(CONFIG_PHY_BRCM_SATA) += phy-brcm-sata.o
obj-$(CONFIG_PHY_BRCM_USB) += phy-brcm-usb-dvr.o
-phy-brcm-usb-dvr-objs := phy-brcm-usb.o phy-brcm-usb-init.o
+phy-brcm-usb-dvr-objs := phy-brcm-usb.o phy-brcm-usb-init.o phy-brcm-usb-init-synopsys.o
obj-$(CONFIG_PHY_BCM_SR_PCIE) += phy-bcm-sr-pcie.o
obj-$(CONFIG_PHY_BCM_SR_USB) += phy-bcm-sr-usb.o
diff --git a/drivers/phy/broadcom/phy-brcm-sata.c b/drivers/phy/broadcom/phy-brcm-sata.c
index 50ac75bbb0c9..4710cfcc3037 100644
--- a/drivers/phy/broadcom/phy-brcm-sata.c
+++ b/drivers/phy/broadcom/phy-brcm-sata.c
@@ -33,6 +33,7 @@
#define SATA_PHY_CTRL_REG_28NM_SPACE_SIZE 0x8
enum brcm_sata_phy_version {
+ BRCM_SATA_PHY_STB_16NM,
BRCM_SATA_PHY_STB_28NM,
BRCM_SATA_PHY_STB_40NM,
BRCM_SATA_PHY_IPROC_NS2,
@@ -104,10 +105,13 @@ enum sata_phy_regs {
PLL1_ACTRL5 = 0x85,
PLL1_ACTRL6 = 0x86,
PLL1_ACTRL7 = 0x87,
+ PLL1_ACTRL8 = 0x88,
TX_REG_BANK = 0x070,
TX_ACTRL0 = 0x80,
TX_ACTRL0_TXPOL_FLIP = BIT(6),
+ TX_ACTRL5 = 0x85,
+ TX_ACTRL5_SSC_EN = BIT(11),
AEQRX_REG_BANK_0 = 0xd0,
AEQ_CONTROL1 = 0x81,
@@ -116,6 +120,7 @@ enum sata_phy_regs {
AEQ_FRC_EQ = 0x83,
AEQ_FRC_EQ_FORCE = BIT(0),
AEQ_FRC_EQ_FORCE_VAL = BIT(1),
+ AEQ_RFZ_FRC_VAL = BIT(8),
AEQRX_REG_BANK_1 = 0xe0,
AEQRX_SLCAL0_CTRL0 = 0x82,
AEQRX_SLCAL1_CTRL0 = 0x86,
@@ -152,7 +157,28 @@ enum sata_phy_regs {
TXPMD_TX_FREQ_CTRL_CONTROL3_FMAX_MASK = 0x3ff,
RXPMD_REG_BANK = 0x1c0,
+ RXPMD_RX_CDR_CONTROL1 = 0x81,
+ RXPMD_RX_PPM_VAL_MASK = 0x1ff,
+ RXPMD_RXPMD_EN_FRC = BIT(12),
+ RXPMD_RXPMD_EN_FRC_VAL = BIT(13),
+ RXPMD_RX_CDR_CDR_PROP_BW = 0x82,
+ RXPMD_G_CDR_PROP_BW_MASK = 0x7,
+ RXPMD_G1_CDR_PROP_BW_SHIFT = 0,
+ RXPMD_G2_CDR_PROP_BW_SHIFT = 3,
+ RXPMD_G3_CDR_PROB_BW_SHIFT = 6,
+ RXPMD_RX_CDR_CDR_ACQ_INTEG_BW = 0x83,
+ RXPMD_G_CDR_ACQ_INT_BW_MASK = 0x7,
+ RXPMD_G1_CDR_ACQ_INT_BW_SHIFT = 0,
+ RXPMD_G2_CDR_ACQ_INT_BW_SHIFT = 3,
+ RXPMD_G3_CDR_ACQ_INT_BW_SHIFT = 6,
+ RXPMD_RX_CDR_CDR_LOCK_INTEG_BW = 0x84,
+ RXPMD_G_CDR_LOCK_INT_BW_MASK = 0x7,
+ RXPMD_G1_CDR_LOCK_INT_BW_SHIFT = 0,
+ RXPMD_G2_CDR_LOCK_INT_BW_SHIFT = 3,
+ RXPMD_G3_CDR_LOCK_INT_BW_SHIFT = 6,
RXPMD_RX_FREQ_MON_CONTROL1 = 0x87,
+ RXPMD_MON_CORRECT_EN = BIT(8),
+ RXPMD_MON_MARGIN_VAL_MASK = 0xff,
};
enum sata_phy_ctrl_regs {
@@ -166,6 +192,7 @@ static inline void __iomem *brcm_sata_pcb_base(struct brcm_sata_port *port)
u32 size = 0;
switch (priv->version) {
+ case BRCM_SATA_PHY_STB_16NM:
case BRCM_SATA_PHY_STB_28NM:
case BRCM_SATA_PHY_IPROC_NS2:
case BRCM_SATA_PHY_DSL_28NM:
@@ -287,6 +314,94 @@ static int brcm_stb_sata_init(struct brcm_sata_port *port)
return brcm_stb_sata_rxaeq_init(port);
}
+static int brcm_stb_sata_16nm_ssc_init(struct brcm_sata_port *port)
+{
+ void __iomem *base = brcm_sata_pcb_base(port);
+ u32 tmp, value;
+
+ /* Reduce CP tail current to 1/16th of its default value */
+ brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL6, 0, 0x141);
+
+ /* Turn off CP tail current boost */
+ brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL8, 0, 0xc006);
+
+ /* Set a specific AEQ equalizer value */
+ tmp = AEQ_FRC_EQ_FORCE_VAL | AEQ_FRC_EQ_FORCE;
+ brcm_sata_phy_wr(base, AEQRX_REG_BANK_0, AEQ_FRC_EQ,
+ ~(tmp | AEQ_RFZ_FRC_VAL |
+ AEQ_FRC_EQ_VAL_MASK << AEQ_FRC_EQ_VAL_SHIFT),
+ tmp | 32 << AEQ_FRC_EQ_VAL_SHIFT);
+
+ /* Set RX PPM val center frequency */
+ if (port->ssc_en)
+ value = 0x52;
+ else
+ value = 0;
+ brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_CDR_CONTROL1,
+ ~RXPMD_RX_PPM_VAL_MASK, value);
+
+ /* Set proportional loop bandwith Gen1/2/3 */
+ tmp = RXPMD_G_CDR_PROP_BW_MASK << RXPMD_G1_CDR_PROP_BW_SHIFT |
+ RXPMD_G_CDR_PROP_BW_MASK << RXPMD_G2_CDR_PROP_BW_SHIFT |
+ RXPMD_G_CDR_PROP_BW_MASK << RXPMD_G3_CDR_PROB_BW_SHIFT;
+ if (port->ssc_en)
+ value = 2 << RXPMD_G1_CDR_PROP_BW_SHIFT |
+ 2 << RXPMD_G2_CDR_PROP_BW_SHIFT |
+ 2 << RXPMD_G3_CDR_PROB_BW_SHIFT;
+ else
+ value = 1 << RXPMD_G1_CDR_PROP_BW_SHIFT |
+ 1 << RXPMD_G2_CDR_PROP_BW_SHIFT |
+ 1 << RXPMD_G3_CDR_PROB_BW_SHIFT;
+ brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_CDR_CDR_PROP_BW, ~tmp,
+ value);
+
+ /* Set CDR integral loop acquisition bandwidth for Gen1/2/3 */
+ tmp = RXPMD_G_CDR_ACQ_INT_BW_MASK << RXPMD_G1_CDR_ACQ_INT_BW_SHIFT |
+ RXPMD_G_CDR_ACQ_INT_BW_MASK << RXPMD_G2_CDR_ACQ_INT_BW_SHIFT |
+ RXPMD_G_CDR_ACQ_INT_BW_MASK << RXPMD_G3_CDR_ACQ_INT_BW_SHIFT;
+ if (port->ssc_en)
+ value = 1 << RXPMD_G1_CDR_ACQ_INT_BW_SHIFT |
+ 1 << RXPMD_G2_CDR_ACQ_INT_BW_SHIFT |
+ 1 << RXPMD_G3_CDR_ACQ_INT_BW_SHIFT;
+ else
+ value = 0;
+ brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_CDR_CDR_ACQ_INTEG_BW,
+ ~tmp, value);
+
+ /* Set CDR integral loop locking bandwidth to 1 for Gen 1/2/3 */
+ tmp = RXPMD_G_CDR_LOCK_INT_BW_MASK << RXPMD_G1_CDR_LOCK_INT_BW_SHIFT |
+ RXPMD_G_CDR_LOCK_INT_BW_MASK << RXPMD_G2_CDR_LOCK_INT_BW_SHIFT |
+ RXPMD_G_CDR_LOCK_INT_BW_MASK << RXPMD_G3_CDR_LOCK_INT_BW_SHIFT;
+ if (port->ssc_en)
+ value = 1 << RXPMD_G1_CDR_LOCK_INT_BW_SHIFT |
+ 1 << RXPMD_G2_CDR_LOCK_INT_BW_SHIFT |
+ 1 << RXPMD_G3_CDR_LOCK_INT_BW_SHIFT;
+ else
+ value = 0;
+ brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_CDR_CDR_LOCK_INTEG_BW,
+ ~tmp, value);
+
+ /* Set no guard band and clamp CDR */
+ tmp = RXPMD_MON_CORRECT_EN | RXPMD_MON_MARGIN_VAL_MASK;
+ if (port->ssc_en)
+ value = 0x51;
+ else
+ value = 0;
+ brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_FREQ_MON_CONTROL1,
+ ~tmp, RXPMD_MON_CORRECT_EN | value);
+
+ /* Turn on/off SSC */
+ brcm_sata_phy_wr(base, TX_REG_BANK, TX_ACTRL5, ~TX_ACTRL5_SSC_EN,
+ port->ssc_en ? TX_ACTRL5_SSC_EN : 0);
+
+ return 0;
+}
+
+static int brcm_stb_sata_16nm_init(struct brcm_sata_port *port)
+{
+ return brcm_stb_sata_16nm_ssc_init(port);
+}
+
/* NS2 SATA PLL1 defaults were characterized by H/W group */
#define NS2_PLL1_ACTRL2_MAGIC 0x1df8
#define NS2_PLL1_ACTRL3_MAGIC 0x2b00
@@ -544,6 +659,9 @@ static int brcm_sata_phy_init(struct phy *phy)
struct brcm_sata_port *port = phy_get_drvdata(phy);
switch (port->phy_priv->version) {
+ case BRCM_SATA_PHY_STB_16NM:
+ rc = brcm_stb_sata_16nm_init(port);
+ break;
case BRCM_SATA_PHY_STB_28NM:
case BRCM_SATA_PHY_STB_40NM:
rc = brcm_stb_sata_init(port);
@@ -601,6 +719,8 @@ static const struct phy_ops phy_ops = {
};
static const struct of_device_id brcm_sata_phy_of_match[] = {
+ { .compatible = "brcm,bcm7216-sata-phy",
+ .data = (void *)BRCM_SATA_PHY_STB_16NM },
{ .compatible = "brcm,bcm7445-sata-phy",
.data = (void *)BRCM_SATA_PHY_STB_28NM },
{ .compatible = "brcm,bcm7425-sata-phy",
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c b/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
new file mode 100644
index 000000000000..456dc4a100c2
--- /dev/null
+++ b/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, Broadcom */
+
+/*
+ * This module contains USB PHY initialization for power up and S3 resume
+ * for newer Synopsys based USB hardware first used on the bcm7216.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <linux/soc/brcmstb/brcmstb.h>
+#include "phy-brcm-usb-init.h"
+
+#define PHY_LOCK_TIMEOUT_MS 200
+
+/* Register definitions for syscon piarbctl registers */
+#define PIARBCTL_CAM 0x00
+#define PIARBCTL_SPLITTER 0x04
+#define PIARBCTL_MISC 0x08
+#define PIARBCTL_MISC_SECURE_MASK 0x80000000
+#define PIARBCTL_MISC_USB_SELECT_MASK 0x40000000
+#define PIARBCTL_MISC_USB_4G_SDRAM_MASK 0x20000000
+#define PIARBCTL_MISC_USB_PRIORITY_MASK 0x000f0000
+#define PIARBCTL_MISC_USB_MEM_PAGE_MASK 0x0000f000
+#define PIARBCTL_MISC_CAM1_MEM_PAGE_MASK 0x00000f00
+#define PIARBCTL_MISC_CAM0_MEM_PAGE_MASK 0x000000f0
+#define PIARBCTL_MISC_SATA_PRIORITY_MASK 0x0000000f
+
+#define PIARBCTL_MISC_USB_ONLY_MASK \
+ (PIARBCTL_MISC_USB_SELECT_MASK | \
+ PIARBCTL_MISC_USB_4G_SDRAM_MASK | \
+ PIARBCTL_MISC_USB_PRIORITY_MASK | \
+ PIARBCTL_MISC_USB_MEM_PAGE_MASK)
+
+/* Register definitions for the USB CTRL block */
+#define USB_CTRL_SETUP 0x00
+#define USB_CTRL_SETUP_STRAP_IPP_SEL_MASK 0x02000000
+#define USB_CTRL_SETUP_SCB2_EN_MASK 0x00008000
+#define USB_CTRL_SETUP_tca_drv_sel_MASK 0x01000000
+#define USB_CTRL_SETUP_SCB1_EN_MASK 0x00004000
+#define USB_CTRL_SETUP_SOFT_SHUTDOWN_MASK 0x00000200
+#define USB_CTRL_SETUP_IPP_MASK 0x00000020
+#define USB_CTRL_SETUP_IOC_MASK 0x00000010
+#define USB_CTRL_USB_PM 0x04
+#define USB_CTRL_USB_PM_USB_PWRDN_MASK 0x80000000
+#define USB_CTRL_USB_PM_SOFT_RESET_MASK 0x40000000
+#define USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK 0x00800000
+#define USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK 0x00400000
+#define USB_CTRL_USB_PM_STATUS 0x08
+#define USB_CTRL_USB_DEVICE_CTL1 0x10
+#define USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK 0x00000003
+#define USB_CTRL_TEST_PORT_CTL 0x30
+#define USB_CTRL_TEST_PORT_CTL_TPOUT_SEL_MASK 0x000000ff
+#define USB_CTRL_TEST_PORT_CTL_TPOUT_SEL_PME_GEN_MASK 0x0000002e
+#define USB_CTRL_TP_DIAG1 0x34
+#define USB_CTLR_TP_DIAG1_wake_MASK 0x00000002
+#define USB_CTRL_CTLR_CSHCR 0x50
+#define USB_CTRL_CTLR_CSHCR_ctl_pme_en_MASK 0x00040000
+
+/* Register definitions for the USB_PHY block in 7211b0 */
+#define USB_PHY_PLL_CTL 0x00
+#define USB_PHY_PLL_CTL_PLL_RESETB_MASK 0x40000000
+#define USB_PHY_PLL_LDO_CTL 0x08
+#define USB_PHY_PLL_LDO_CTL_AFE_CORERDY_MASK 0x00000004
+#define USB_PHY_PLL_LDO_CTL_AFE_LDO_PWRDWNB_MASK 0x00000002
+#define USB_PHY_PLL_LDO_CTL_AFE_BG_PWRDWNB_MASK 0x00000001
+#define USB_PHY_UTMI_CTL_1 0x04
+#define USB_PHY_UTMI_CTL_1_POWER_UP_FSM_EN_MASK 0x00000800
+#define USB_PHY_UTMI_CTL_1_PHY_MODE_MASK 0x0000000c
+#define USB_PHY_UTMI_CTL_1_PHY_MODE_SHIFT 2
+#define USB_PHY_IDDQ 0x1c
+#define USB_PHY_IDDQ_phy_iddq_MASK 0x00000001
+#define USB_PHY_STATUS 0x20
+#define USB_PHY_STATUS_pll_lock_MASK 0x00000001
+
+/* Register definitions for the MDIO registers in the DWC2 block of
+ * the 7211b0.
+ * NOTE: The PHY's MDIO registers are only accessible through the
+ * legacy DesignWare USB controller even though it's not being used.
+ */
+#define USB_GMDIOCSR 0
+#define USB_GMDIOGEN 4
+
+/* Register definitions for the BDC EC block in 7211b0 */
+#define BDC_EC_AXIRDA 0x0c
+#define BDC_EC_AXIRDA_RTS_MASK 0xf0000000
+#define BDC_EC_AXIRDA_RTS_SHIFT 28
+
+
+static void usb_mdio_write_7211b0(struct brcm_usb_init_params *params,
+ uint8_t addr, uint16_t data)
+{
+ void __iomem *usb_mdio = params->regs[BRCM_REGS_USB_MDIO];
+
+ addr &= 0x1f; /* 5-bit address */
+ brcm_usb_writel(0xffffffff, usb_mdio + USB_GMDIOGEN);
+ while (brcm_usb_readl(usb_mdio + USB_GMDIOCSR) & (1<<31))
+ ;
+ brcm_usb_writel(0x59020000 | (addr << 18) | data,
+ usb_mdio + USB_GMDIOGEN);
+ while (brcm_usb_readl(usb_mdio + USB_GMDIOCSR) & (1<<31))
+ ;
+ brcm_usb_writel(0x00000000, usb_mdio + USB_GMDIOGEN);
+ while (brcm_usb_readl(usb_mdio + USB_GMDIOCSR) & (1<<31))
+ ;
+}
+
+static uint16_t __maybe_unused usb_mdio_read_7211b0(
+ struct brcm_usb_init_params *params, uint8_t addr)
+{
+ void __iomem *usb_mdio = params->regs[BRCM_REGS_USB_MDIO];
+
+ addr &= 0x1f; /* 5-bit address */
+ brcm_usb_writel(0xffffffff, usb_mdio + USB_GMDIOGEN);
+ while (brcm_usb_readl(usb_mdio + USB_GMDIOCSR) & (1<<31))
+ ;
+ brcm_usb_writel(0x69020000 | (addr << 18), usb_mdio + USB_GMDIOGEN);
+ while (brcm_usb_readl(usb_mdio + USB_GMDIOCSR) & (1<<31))
+ ;
+ brcm_usb_writel(0x00000000, usb_mdio + USB_GMDIOGEN);
+ while (brcm_usb_readl(usb_mdio + USB_GMDIOCSR) & (1<<31))
+ ;
+ return brcm_usb_readl(usb_mdio + USB_GMDIOCSR) & 0xffff;
+}
+
+static void usb2_eye_fix_7211b0(struct brcm_usb_init_params *params)
+{
+ /* select bank */
+ usb_mdio_write_7211b0(params, 0x1f, 0x80a0);
+
+ /* Set the eye */
+ usb_mdio_write_7211b0(params, 0x0a, 0xc6a0);
+}
+
+static void xhci_soft_reset(struct brcm_usb_init_params *params,
+ int on_off)
+{
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+
+ /* Assert reset */
+ if (on_off)
+ USB_CTRL_UNSET(ctrl, USB_PM, XHC_SOFT_RESETB);
+ /* De-assert reset */
+ else
+ USB_CTRL_SET(ctrl, USB_PM, XHC_SOFT_RESETB);
+}
+
+static void usb_init_ipp(struct brcm_usb_init_params *params)
+{
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+ u32 reg;
+ u32 orig_reg;
+
+ pr_debug("%s\n", __func__);
+
+ orig_reg = reg = brcm_usb_readl(USB_CTRL_REG(ctrl, SETUP));
+ if (params->ipp != 2)
+ /* override ipp strap pin (if it exits) */
+ reg &= ~(USB_CTRL_MASK(SETUP, STRAP_IPP_SEL));
+
+ /* Override the default OC and PP polarity */
+ reg &= ~(USB_CTRL_MASK(SETUP, IPP) | USB_CTRL_MASK(SETUP, IOC));
+ if (params->ioc)
+ reg |= USB_CTRL_MASK(SETUP, IOC);
+ if (params->ipp == 1)
+ reg |= USB_CTRL_MASK(SETUP, IPP);
+ brcm_usb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
+
+ /*
+ * If we're changing IPP, make sure power is off long enough
+ * to turn off any connected devices.
+ */
+ if ((reg ^ orig_reg) & USB_CTRL_MASK(SETUP, IPP))
+ msleep(50);
+}
+
+static void syscon_piarbctl_init(struct regmap *rmap)
+{
+ /* Switch from legacy USB OTG controller to new STB USB controller */
+ regmap_update_bits(rmap, PIARBCTL_MISC, PIARBCTL_MISC_USB_ONLY_MASK,
+ PIARBCTL_MISC_USB_SELECT_MASK |
+ PIARBCTL_MISC_USB_4G_SDRAM_MASK);
+}
+
+static void usb_init_common(struct brcm_usb_init_params *params)
+{
+ u32 reg;
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+
+ pr_debug("%s\n", __func__);
+
+ USB_CTRL_UNSET(ctrl, USB_PM, USB_PWRDN);
+ /* 1 millisecond - for USB clocks to settle down */
+ usleep_range(1000, 2000);
+
+ if (USB_CTRL_MASK(USB_DEVICE_CTL1, PORT_MODE)) {
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ reg &= ~USB_CTRL_MASK(USB_DEVICE_CTL1, PORT_MODE);
+ reg |= params->mode;
+ brcm_usb_writel(reg, USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ }
+ switch (params->mode) {
+ case USB_CTLR_MODE_HOST:
+ USB_CTRL_UNSET(ctrl, USB_PM, BDC_SOFT_RESETB);
+ break;
+ default:
+ USB_CTRL_UNSET(ctrl, USB_PM, BDC_SOFT_RESETB);
+ USB_CTRL_SET(ctrl, USB_PM, BDC_SOFT_RESETB);
+ break;
+ }
+}
+
+static void usb_wake_enable_7211b0(struct brcm_usb_init_params *params,
+ bool enable)
+{
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+
+ if (enable)
+ USB_CTRL_SET(ctrl, CTLR_CSHCR, ctl_pme_en);
+ else
+ USB_CTRL_UNSET(ctrl, CTLR_CSHCR, ctl_pme_en);
+}
+
+static void usb_init_common_7211b0(struct brcm_usb_init_params *params)
+{
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+ void __iomem *usb_phy = params->regs[BRCM_REGS_USB_PHY];
+ void __iomem *bdc_ec = params->regs[BRCM_REGS_BDC_EC];
+ int timeout_ms = PHY_LOCK_TIMEOUT_MS;
+ u32 reg;
+
+ if (params->syscon_piarbctl)
+ syscon_piarbctl_init(params->syscon_piarbctl);
+
+ USB_CTRL_UNSET(ctrl, USB_PM, USB_PWRDN);
+
+ usb_wake_enable_7211b0(params, false);
+ if (!params->wake_enabled) {
+
+ /* undo possible suspend settings */
+ brcm_usb_writel(0, usb_phy + USB_PHY_IDDQ);
+ reg = brcm_usb_readl(usb_phy + USB_PHY_PLL_CTL);
+ reg |= USB_PHY_PLL_CTL_PLL_RESETB_MASK;
+ brcm_usb_writel(reg, usb_phy + USB_PHY_PLL_CTL);
+
+ /* temporarily enable FSM so PHY comes up properly */
+ reg = brcm_usb_readl(usb_phy + USB_PHY_UTMI_CTL_1);
+ reg |= USB_PHY_UTMI_CTL_1_POWER_UP_FSM_EN_MASK;
+ brcm_usb_writel(reg, usb_phy + USB_PHY_UTMI_CTL_1);
+ }
+
+ /* Init the PHY */
+ reg = USB_PHY_PLL_LDO_CTL_AFE_CORERDY_MASK |
+ USB_PHY_PLL_LDO_CTL_AFE_LDO_PWRDWNB_MASK |
+ USB_PHY_PLL_LDO_CTL_AFE_BG_PWRDWNB_MASK;
+ brcm_usb_writel(reg, usb_phy + USB_PHY_PLL_LDO_CTL);
+
+ /* wait for lock */
+ while (timeout_ms-- > 0) {
+ reg = brcm_usb_readl(usb_phy + USB_PHY_STATUS);
+ if (reg & USB_PHY_STATUS_pll_lock_MASK)
+ break;
+ usleep_range(1000, 2000);
+ }
+
+ /* Set the PHY_MODE */
+ reg = brcm_usb_readl(usb_phy + USB_PHY_UTMI_CTL_1);
+ reg &= ~USB_PHY_UTMI_CTL_1_PHY_MODE_MASK;
+ reg |= params->mode << USB_PHY_UTMI_CTL_1_PHY_MODE_SHIFT;
+ brcm_usb_writel(reg, usb_phy + USB_PHY_UTMI_CTL_1);
+
+ /* Fix the incorrect default */
+ reg = brcm_usb_readl(ctrl + USB_CTRL_SETUP);
+ reg &= ~USB_CTRL_SETUP_tca_drv_sel_MASK;
+ brcm_usb_writel(reg, ctrl + USB_CTRL_SETUP);
+
+ usb_init_common(params);
+
+ /*
+ * The BDC controller will get occasional failures with
+ * the default "Read Transaction Size" of 6 (1024 bytes).
+ * Set it to 4 (256 bytes).
+ */
+ if ((params->mode != USB_CTLR_MODE_HOST) && bdc_ec) {
+ reg = brcm_usb_readl(bdc_ec + BDC_EC_AXIRDA);
+ reg &= ~BDC_EC_AXIRDA_RTS_MASK;
+ reg |= (0x4 << BDC_EC_AXIRDA_RTS_SHIFT);
+ brcm_usb_writel(reg, bdc_ec + BDC_EC_AXIRDA);
+ }
+
+ /*
+ * Disable FSM, otherwise the PHY will auto suspend when no
+ * device is connected and will be reset on resume.
+ */
+ reg = brcm_usb_readl(usb_phy + USB_PHY_UTMI_CTL_1);
+ reg &= ~USB_PHY_UTMI_CTL_1_POWER_UP_FSM_EN_MASK;
+ brcm_usb_writel(reg, usb_phy + USB_PHY_UTMI_CTL_1);
+
+ usb2_eye_fix_7211b0(params);
+}
+
+static void usb_init_xhci(struct brcm_usb_init_params *params)
+{
+ pr_debug("%s\n", __func__);
+
+ xhci_soft_reset(params, 0);
+}
+
+static void usb_uninit_common(struct brcm_usb_init_params *params)
+{
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+
+ pr_debug("%s\n", __func__);
+
+ USB_CTRL_SET(ctrl, USB_PM, USB_PWRDN);
+
+}
+
+static void usb_uninit_common_7211b0(struct brcm_usb_init_params *params)
+{
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+ void __iomem *usb_phy = params->regs[BRCM_REGS_USB_PHY];
+ u32 reg;
+
+ pr_debug("%s\n", __func__);
+
+ if (params->wake_enabled) {
+ USB_CTRL_SET(ctrl, TEST_PORT_CTL, TPOUT_SEL_PME_GEN);
+ usb_wake_enable_7211b0(params, true);
+ } else {
+ USB_CTRL_SET(ctrl, USB_PM, USB_PWRDN);
+ brcm_usb_writel(0, usb_phy + USB_PHY_PLL_LDO_CTL);
+ reg = brcm_usb_readl(usb_phy + USB_PHY_PLL_CTL);
+ reg &= ~USB_PHY_PLL_CTL_PLL_RESETB_MASK;
+ brcm_usb_writel(reg, usb_phy + USB_PHY_PLL_CTL);
+ brcm_usb_writel(USB_PHY_IDDQ_phy_iddq_MASK,
+ usb_phy + USB_PHY_IDDQ);
+ }
+
+}
+
+static void usb_uninit_xhci(struct brcm_usb_init_params *params)
+{
+
+ pr_debug("%s\n", __func__);
+
+ if (!params->wake_enabled)
+ xhci_soft_reset(params, 1);
+}
+
+static int usb_get_dual_select(struct brcm_usb_init_params *params)
+{
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+ u32 reg = 0;
+
+ pr_debug("%s\n", __func__);
+
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ reg &= USB_CTRL_MASK(USB_DEVICE_CTL1, PORT_MODE);
+ return reg;
+}
+
+static void usb_set_dual_select(struct brcm_usb_init_params *params, int mode)
+{
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+ u32 reg;
+
+ pr_debug("%s\n", __func__);
+
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ reg &= ~USB_CTRL_MASK(USB_DEVICE_CTL1, PORT_MODE);
+ reg |= mode;
+ brcm_usb_writel(reg, USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+}
+
+static const struct brcm_usb_init_ops bcm7216_ops = {
+ .init_ipp = usb_init_ipp,
+ .init_common = usb_init_common,
+ .init_xhci = usb_init_xhci,
+ .uninit_common = usb_uninit_common,
+ .uninit_xhci = usb_uninit_xhci,
+ .get_dual_select = usb_get_dual_select,
+ .set_dual_select = usb_set_dual_select,
+};
+
+static const struct brcm_usb_init_ops bcm7211b0_ops = {
+ .init_ipp = usb_init_ipp,
+ .init_common = usb_init_common_7211b0,
+ .init_xhci = usb_init_xhci,
+ .uninit_common = usb_uninit_common_7211b0,
+ .uninit_xhci = usb_uninit_xhci,
+ .get_dual_select = usb_get_dual_select,
+ .set_dual_select = usb_set_dual_select,
+};
+
+void brcm_usb_dvr_init_7216(struct brcm_usb_init_params *params)
+{
+
+ pr_debug("%s\n", __func__);
+
+ params->family_name = "7216";
+ params->ops = &bcm7216_ops;
+}
+
+void brcm_usb_dvr_init_7211b0(struct brcm_usb_init_params *params)
+{
+
+ pr_debug("%s\n", __func__);
+
+ params->family_name = "7211";
+ params->ops = &bcm7211b0_ops;
+ params->suspend_with_clocks = true;
+}
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.c b/drivers/phy/broadcom/phy-brcm-usb-init.c
index 91b5b09589d6..9391ab42a12b 100644
--- a/drivers/phy/broadcom/phy-brcm-usb-init.c
+++ b/drivers/phy/broadcom/phy-brcm-usb-init.c
@@ -42,6 +42,7 @@
#define USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK 0x80000000 /* option */
#define USB_CTRL_EBRIDGE 0x0c
#define USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK 0x00020000 /* option */
+#define USB_CTRL_EBRIDGE_EBR_SCB_SIZE_MASK 0x00000f80 /* option */
#define USB_CTRL_OBRIDGE 0x10
#define USB_CTRL_OBRIDGE_LS_KEEP_ALIVE_MASK 0x08000000
#define USB_CTRL_MDIO 0x14
@@ -57,6 +58,8 @@
#define USB_CTRL_USB_PM_SOFT_RESET_MASK 0x40000000 /* option */
#define USB_CTRL_USB_PM_USB20_HC_RESETB_MASK 0x30000000 /* option */
#define USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK 0x00300000 /* option */
+#define USB_CTRL_USB_PM_RMTWKUP_EN_MASK 0x00000001
+#define USB_CTRL_USB_PM_STATUS 0x38
#define USB_CTRL_USB30_CTL1 0x60
#define USB_CTRL_USB30_CTL1_PHY3_PLL_SEQ_START_MASK 0x00000010
#define USB_CTRL_USB30_CTL1_PHY3_RESETB_MASK 0x00010000
@@ -126,10 +129,6 @@ enum {
USB_CTRL_SELECTOR_COUNT,
};
-#define USB_CTRL_REG(base, reg) ((void __iomem *)base + USB_CTRL_##reg)
-#define USB_XHCI_EC_REG(base, reg) ((void __iomem *)base + USB_XHCI_EC_##reg)
-#define USB_CTRL_MASK(reg, field) \
- USB_CTRL_##reg##_##field##_MASK
#define USB_CTRL_MASK_FAMILY(params, reg, field) \
(params->usb_reg_bits_map[USB_CTRL_##reg##_##field##_SELECTOR])
@@ -140,13 +139,6 @@ enum {
usb_ctrl_unset_family(params, USB_CTRL_##reg, \
USB_CTRL_##reg##_##field##_SELECTOR)
-#define USB_CTRL_SET(base, reg, field) \
- usb_ctrl_set(USB_CTRL_REG(base, reg), \
- USB_CTRL_##reg##_##field##_MASK)
-#define USB_CTRL_UNSET(base, reg, field) \
- usb_ctrl_unset(USB_CTRL_REG(base, reg), \
- USB_CTRL_##reg##_##field##_MASK)
-
#define MDIO_USB2 0
#define MDIO_USB3 BIT(31)
@@ -176,6 +168,7 @@ static const struct id_to_type id_to_type_table[] = {
{ 0x33900000, BRCM_FAMILY_3390A0 },
{ 0x72500010, BRCM_FAMILY_7250B0 },
{ 0x72600000, BRCM_FAMILY_7260A0 },
+ { 0x72550000, BRCM_FAMILY_7260A0 },
{ 0x72680000, BRCM_FAMILY_7271A0 },
{ 0x72710000, BRCM_FAMILY_7271A0 },
{ 0x73640000, BRCM_FAMILY_7364A0 },
@@ -401,26 +394,14 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
},
};
-static inline u32 brcmusb_readl(void __iomem *addr)
-{
- return readl(addr);
-}
-
-static inline void brcmusb_writel(u32 val, void __iomem *addr)
-{
- writel(val, addr);
-}
-
static inline
void usb_ctrl_unset_family(struct brcm_usb_init_params *params,
u32 reg_offset, u32 field)
{
u32 mask;
- void __iomem *reg;
mask = params->usb_reg_bits_map[field];
- reg = params->ctrl_regs + reg_offset;
- brcmusb_writel(brcmusb_readl(reg) & ~mask, reg);
+ brcm_usb_ctrl_unset(params->regs[BRCM_REGS_CTRL] + reg_offset, mask);
};
static inline
@@ -428,45 +409,27 @@ void usb_ctrl_set_family(struct brcm_usb_init_params *params,
u32 reg_offset, u32 field)
{
u32 mask;
- void __iomem *reg;
mask = params->usb_reg_bits_map[field];
- reg = params->ctrl_regs + reg_offset;
- brcmusb_writel(brcmusb_readl(reg) | mask, reg);
+ brcm_usb_ctrl_set(params->regs[BRCM_REGS_CTRL] + reg_offset, mask);
};
-static inline void usb_ctrl_set(void __iomem *reg, u32 field)
-{
- u32 value;
-
- value = brcmusb_readl(reg);
- brcmusb_writel(value | field, reg);
-}
-
-static inline void usb_ctrl_unset(void __iomem *reg, u32 field)
-{
- u32 value;
-
- value = brcmusb_readl(reg);
- brcmusb_writel(value & ~field, reg);
-}
-
static u32 brcmusb_usb_mdio_read(void __iomem *ctrl_base, u32 reg, int mode)
{
u32 data;
data = (reg << 16) | mode;
- brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+ brcm_usb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
data |= (1 << 24);
- brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+ brcm_usb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
data &= ~(1 << 24);
/* wait for the 60MHz parallel to serial shifter */
usleep_range(10, 20);
- brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+ brcm_usb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
/* wait for the 60MHz parallel to serial shifter */
usleep_range(10, 20);
- return brcmusb_readl(USB_CTRL_REG(ctrl_base, MDIO2)) & 0xffff;
+ return brcm_usb_readl(USB_CTRL_REG(ctrl_base, MDIO2)) & 0xffff;
}
static void brcmusb_usb_mdio_write(void __iomem *ctrl_base, u32 reg,
@@ -475,14 +438,14 @@ static void brcmusb_usb_mdio_write(void __iomem *ctrl_base, u32 reg,
u32 data;
data = (reg << 16) | val | mode;
- brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+ brcm_usb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
data |= (1 << 25);
- brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+ brcm_usb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
data &= ~(1 << 25);
/* wait for the 60MHz parallel to serial shifter */
usleep_range(10, 20);
- brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+ brcm_usb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
/* wait for the 60MHz parallel to serial shifter */
usleep_range(10, 20);
}
@@ -581,7 +544,7 @@ static void brcmusb_usb3_pll_54mhz(struct brcm_usb_init_params *params)
{
u32 ofs;
int ii;
- void __iomem *ctrl_base = params->ctrl_regs;
+ void __iomem *ctrl_base = params->regs[BRCM_REGS_CTRL];
/*
* On newer B53 based SoC's, the reference clock for the
@@ -662,7 +625,7 @@ static void brcmusb_usb3_ssc_enable(void __iomem *ctrl_base)
static void brcmusb_usb3_phy_workarounds(struct brcm_usb_init_params *params)
{
- void __iomem *ctrl_base = params->ctrl_regs;
+ void __iomem *ctrl_base = params->regs[BRCM_REGS_CTRL];
brcmusb_usb3_pll_fix(ctrl_base);
brcmusb_usb3_pll_54mhz(params);
@@ -704,21 +667,21 @@ static void brcmusb_memc_fix(struct brcm_usb_init_params *params)
static void brcmusb_usb3_otp_fix(struct brcm_usb_init_params *params)
{
- void __iomem *xhci_ec_base = params->xhci_ec_regs;
+ void __iomem *xhci_ec_base = params->regs[BRCM_REGS_XHCI_EC];
u32 val;
if (params->family_id != 0x74371000 || !xhci_ec_base)
return;
- brcmusb_writel(0xa20c, USB_XHCI_EC_REG(xhci_ec_base, IRAADR));
- val = brcmusb_readl(USB_XHCI_EC_REG(xhci_ec_base, IRADAT));
+ brcm_usb_writel(0xa20c, USB_XHCI_EC_REG(xhci_ec_base, IRAADR));
+ val = brcm_usb_readl(USB_XHCI_EC_REG(xhci_ec_base, IRADAT));
/* set cfg_pick_ss_lock */
val |= (1 << 27);
- brcmusb_writel(val, USB_XHCI_EC_REG(xhci_ec_base, IRADAT));
+ brcm_usb_writel(val, USB_XHCI_EC_REG(xhci_ec_base, IRADAT));
/* Reset USB 3.0 PHY for workaround to take effect */
- USB_CTRL_UNSET(params->ctrl_regs, USB30_CTL1, PHY3_RESETB);
- USB_CTRL_SET(params->ctrl_regs, USB30_CTL1, PHY3_RESETB);
+ USB_CTRL_UNSET(params->regs[BRCM_REGS_CTRL], USB30_CTL1, PHY3_RESETB);
+ USB_CTRL_SET(params->regs[BRCM_REGS_CTRL], USB30_CTL1, PHY3_RESETB);
}
static void brcmusb_xhci_soft_reset(struct brcm_usb_init_params *params,
@@ -747,7 +710,7 @@ static void brcmusb_xhci_soft_reset(struct brcm_usb_init_params *params,
* - default chip/rev.
* NOTE: The minor rev is always ignored.
*/
-static enum brcm_family_type brcmusb_get_family_type(
+static enum brcm_family_type get_family_type(
struct brcm_usb_init_params *params)
{
int last_type = -1;
@@ -775,9 +738,9 @@ static enum brcm_family_type brcmusb_get_family_type(
return last_type;
}
-void brcm_usb_init_ipp(struct brcm_usb_init_params *params)
+static void usb_init_ipp(struct brcm_usb_init_params *params)
{
- void __iomem *ctrl = params->ctrl_regs;
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
u32 reg;
u32 orig_reg;
@@ -791,7 +754,7 @@ void brcm_usb_init_ipp(struct brcm_usb_init_params *params)
USB_CTRL_SET_FAMILY(params, USB30_CTL1, USB3_IPP);
}
- reg = brcmusb_readl(USB_CTRL_REG(ctrl, SETUP));
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, SETUP));
orig_reg = reg;
if (USB_CTRL_MASK_FAMILY(params, SETUP, STRAP_CC_DRD_MODE_ENABLE_SEL))
/* Never use the strap, it's going away. */
@@ -799,8 +762,8 @@ void brcm_usb_init_ipp(struct brcm_usb_init_params *params)
SETUP,
STRAP_CC_DRD_MODE_ENABLE_SEL));
if (USB_CTRL_MASK_FAMILY(params, SETUP, STRAP_IPP_SEL))
+ /* override ipp strap pin (if it exits) */
if (params->ipp != 2)
- /* override ipp strap pin (if it exits) */
reg &= ~(USB_CTRL_MASK_FAMILY(params, SETUP,
STRAP_IPP_SEL));
@@ -808,50 +771,38 @@ void brcm_usb_init_ipp(struct brcm_usb_init_params *params)
reg &= ~(USB_CTRL_MASK(SETUP, IPP) | USB_CTRL_MASK(SETUP, IOC));
if (params->ioc)
reg |= USB_CTRL_MASK(SETUP, IOC);
- if (params->ipp == 1 && ((reg & USB_CTRL_MASK(SETUP, IPP)) == 0))
+ if (params->ipp == 1)
reg |= USB_CTRL_MASK(SETUP, IPP);
- brcmusb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
+ brcm_usb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
/*
* If we're changing IPP, make sure power is off long enough
* to turn off any connected devices.
*/
- if (reg != orig_reg)
+ if ((reg ^ orig_reg) & USB_CTRL_MASK(SETUP, IPP))
msleep(50);
}
-int brcm_usb_init_get_dual_select(struct brcm_usb_init_params *params)
+static void usb_wake_enable(struct brcm_usb_init_params *params,
+ bool enable)
{
- void __iomem *ctrl = params->ctrl_regs;
- u32 reg = 0;
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
- if (USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1, PORT_MODE)) {
- reg = brcmusb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
- reg &= USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1,
- PORT_MODE);
- }
- return reg;
+ if (enable)
+ USB_CTRL_SET(ctrl, USB_PM, RMTWKUP_EN);
+ else
+ USB_CTRL_UNSET(ctrl, USB_PM, RMTWKUP_EN);
}
-void brcm_usb_init_set_dual_select(struct brcm_usb_init_params *params,
- int mode)
+static void usb_init_common(struct brcm_usb_init_params *params)
{
- void __iomem *ctrl = params->ctrl_regs;
u32 reg;
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
- if (USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1, PORT_MODE)) {
- reg = brcmusb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
- reg &= ~USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1,
- PORT_MODE);
- reg |= mode;
- brcmusb_writel(reg, USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
- }
-}
-
-void brcm_usb_init_common(struct brcm_usb_init_params *params)
-{
- u32 reg;
- void __iomem *ctrl = params->ctrl_regs;
+ /* Clear any pending wake conditions */
+ usb_wake_enable(params, false);
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, USB_PM_STATUS));
+ brcm_usb_writel(reg, USB_CTRL_REG(ctrl, USB_PM_STATUS));
/* Take USB out of power down */
if (USB_CTRL_MASK_FAMILY(params, PLL_CTL, PLL_IDDQ_PWRDN)) {
@@ -877,7 +828,7 @@ void brcm_usb_init_common(struct brcm_usb_init_params *params)
/* Block auto PLL suspend by USB2 PHY (Sasi) */
USB_CTRL_SET(ctrl, PLL_CTL, PLL_SUSPEND_EN);
- reg = brcmusb_readl(USB_CTRL_REG(ctrl, SETUP));
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, SETUP));
if (params->selected_family == BRCM_FAMILY_7364A0)
/* Suppress overcurrent indication from USB30 ports for A0 */
reg |= USB_CTRL_MASK_FAMILY(params, SETUP, OC3_DISABLE);
@@ -893,16 +844,16 @@ void brcm_usb_init_common(struct brcm_usb_init_params *params)
reg |= USB_CTRL_MASK_FAMILY(params, SETUP, SCB1_EN);
if (USB_CTRL_MASK_FAMILY(params, SETUP, SCB2_EN))
reg |= USB_CTRL_MASK_FAMILY(params, SETUP, SCB2_EN);
- brcmusb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
+ brcm_usb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
brcmusb_memc_fix(params);
if (USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1, PORT_MODE)) {
- reg = brcmusb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
reg &= ~USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1,
PORT_MODE);
reg |= params->mode;
- brcmusb_writel(reg, USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ brcm_usb_writel(reg, USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
}
if (USB_CTRL_MASK_FAMILY(params, USB_PM, BDC_SOFT_RESETB)) {
switch (params->mode) {
@@ -924,10 +875,10 @@ void brcm_usb_init_common(struct brcm_usb_init_params *params)
}
}
-void brcm_usb_init_eohci(struct brcm_usb_init_params *params)
+static void usb_init_eohci(struct brcm_usb_init_params *params)
{
u32 reg;
- void __iomem *ctrl = params->ctrl_regs;
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
if (USB_CTRL_MASK_FAMILY(params, USB_PM, USB20_HC_RESETB))
USB_CTRL_SET_FAMILY(params, USB_PM, USB20_HC_RESETB);
@@ -940,19 +891,30 @@ void brcm_usb_init_eohci(struct brcm_usb_init_params *params)
USB_CTRL_SET(ctrl, EBRIDGE, ESTOP_SCB_REQ);
/* Setup the endian bits */
- reg = brcmusb_readl(USB_CTRL_REG(ctrl, SETUP));
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, SETUP));
reg &= ~USB_CTRL_SETUP_ENDIAN_BITS;
reg |= USB_CTRL_MASK_FAMILY(params, SETUP, ENDIAN);
- brcmusb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
+ brcm_usb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
if (params->selected_family == BRCM_FAMILY_7271A0)
/* Enable LS keep alive fix for certain keyboards */
USB_CTRL_SET(ctrl, OBRIDGE, LS_KEEP_ALIVE);
+
+ if (params->family_id == 0x72550000) {
+ /*
+ * Make the burst size 512 bytes to fix a hardware bug
+ * on the 7255a0. See HW7255-24.
+ */
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, EBRIDGE));
+ reg &= ~USB_CTRL_MASK(EBRIDGE, EBR_SCB_SIZE);
+ reg |= 0x800;
+ brcm_usb_writel(reg, USB_CTRL_REG(ctrl, EBRIDGE));
+ }
}
-void brcm_usb_init_xhci(struct brcm_usb_init_params *params)
+static void usb_init_xhci(struct brcm_usb_init_params *params)
{
- void __iomem *ctrl = params->ctrl_regs;
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
USB_CTRL_UNSET(ctrl, USB30_PCTL, PHY3_IDDQ_OVERRIDE);
/* 1 millisecond - for USB clocks to settle down */
@@ -978,34 +940,80 @@ void brcm_usb_init_xhci(struct brcm_usb_init_params *params)
brcmusb_usb3_otp_fix(params);
}
-void brcm_usb_uninit_common(struct brcm_usb_init_params *params)
+static void usb_uninit_common(struct brcm_usb_init_params *params)
{
if (USB_CTRL_MASK_FAMILY(params, USB_PM, USB_PWRDN))
USB_CTRL_SET_FAMILY(params, USB_PM, USB_PWRDN);
if (USB_CTRL_MASK_FAMILY(params, PLL_CTL, PLL_IDDQ_PWRDN))
USB_CTRL_SET_FAMILY(params, PLL_CTL, PLL_IDDQ_PWRDN);
+ if (params->wake_enabled)
+ usb_wake_enable(params, true);
}
-void brcm_usb_uninit_eohci(struct brcm_usb_init_params *params)
+static void usb_uninit_eohci(struct brcm_usb_init_params *params)
{
- if (USB_CTRL_MASK_FAMILY(params, USB_PM, USB20_HC_RESETB))
- USB_CTRL_UNSET_FAMILY(params, USB_PM, USB20_HC_RESETB);
}
-void brcm_usb_uninit_xhci(struct brcm_usb_init_params *params)
+static void usb_uninit_xhci(struct brcm_usb_init_params *params)
{
brcmusb_xhci_soft_reset(params, 1);
- USB_CTRL_SET(params->ctrl_regs, USB30_PCTL, PHY3_IDDQ_OVERRIDE);
+ USB_CTRL_SET(params->regs[BRCM_REGS_CTRL], USB30_PCTL,
+ PHY3_IDDQ_OVERRIDE);
+}
+
+static int usb_get_dual_select(struct brcm_usb_init_params *params)
+{
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+ u32 reg = 0;
+
+ pr_debug("%s\n", __func__);
+ if (USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1, PORT_MODE)) {
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ reg &= USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1,
+ PORT_MODE);
+ }
+ return reg;
}
-void brcm_usb_set_family_map(struct brcm_usb_init_params *params)
+static void usb_set_dual_select(struct brcm_usb_init_params *params, int mode)
+{
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+ u32 reg;
+
+ pr_debug("%s\n", __func__);
+
+ if (USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1, PORT_MODE)) {
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ reg &= ~USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1,
+ PORT_MODE);
+ reg |= mode;
+ brcm_usb_writel(reg, USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ }
+}
+
+static const struct brcm_usb_init_ops bcm7445_ops = {
+ .init_ipp = usb_init_ipp,
+ .init_common = usb_init_common,
+ .init_eohci = usb_init_eohci,
+ .init_xhci = usb_init_xhci,
+ .uninit_common = usb_uninit_common,
+ .uninit_eohci = usb_uninit_eohci,
+ .uninit_xhci = usb_uninit_xhci,
+ .get_dual_select = usb_get_dual_select,
+ .set_dual_select = usb_set_dual_select,
+};
+
+void brcm_usb_dvr_init_7445(struct brcm_usb_init_params *params)
{
int fam;
- fam = brcmusb_get_family_type(params);
+ pr_debug("%s\n", __func__);
+
+ fam = get_family_type(params);
params->selected_family = fam;
params->usb_reg_bits_map =
&usb_reg_bits_map_table[fam][0];
params->family_name = family_names[fam];
+ params->ops = &bcm7445_ops;
}
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.h b/drivers/phy/broadcom/phy-brcm-usb-init.h
index f4f4f6d5d258..899b9eb43fad 100644
--- a/drivers/phy/broadcom/phy-brcm-usb-init.h
+++ b/drivers/phy/broadcom/phy-brcm-usb-init.h
@@ -6,16 +6,50 @@
#ifndef _USB_BRCM_COMMON_INIT_H
#define _USB_BRCM_COMMON_INIT_H
+#include <linux/regmap.h>
+
#define USB_CTLR_MODE_HOST 0
#define USB_CTLR_MODE_DEVICE 1
#define USB_CTLR_MODE_DRD 2
#define USB_CTLR_MODE_TYPEC_PD 3
+enum brcmusb_reg_sel {
+ BRCM_REGS_CTRL = 0,
+ BRCM_REGS_XHCI_EC,
+ BRCM_REGS_XHCI_GBL,
+ BRCM_REGS_USB_PHY,
+ BRCM_REGS_USB_MDIO,
+ BRCM_REGS_BDC_EC,
+ BRCM_REGS_MAX
+};
+
+#define USB_CTRL_REG(base, reg) ((void __iomem *)base + USB_CTRL_##reg)
+#define USB_XHCI_EC_REG(base, reg) ((void __iomem *)base + USB_XHCI_EC_##reg)
+#define USB_CTRL_MASK(reg, field) \
+ USB_CTRL_##reg##_##field##_MASK
+#define USB_CTRL_SET(base, reg, field) \
+ brcm_usb_ctrl_set(USB_CTRL_REG(base, reg), \
+ USB_CTRL_##reg##_##field##_MASK)
+#define USB_CTRL_UNSET(base, reg, field) \
+ brcm_usb_ctrl_unset(USB_CTRL_REG(base, reg), \
+ USB_CTRL_##reg##_##field##_MASK)
+
struct brcm_usb_init_params;
+struct brcm_usb_init_ops {
+ void (*init_ipp)(struct brcm_usb_init_params *params);
+ void (*init_common)(struct brcm_usb_init_params *params);
+ void (*init_eohci)(struct brcm_usb_init_params *params);
+ void (*init_xhci)(struct brcm_usb_init_params *params);
+ void (*uninit_common)(struct brcm_usb_init_params *params);
+ void (*uninit_eohci)(struct brcm_usb_init_params *params);
+ void (*uninit_xhci)(struct brcm_usb_init_params *params);
+ int (*get_dual_select)(struct brcm_usb_init_params *params);
+ void (*set_dual_select)(struct brcm_usb_init_params *params, int mode);
+};
+
struct brcm_usb_init_params {
- void __iomem *ctrl_regs;
- void __iomem *xhci_ec_regs;
+ void __iomem *regs[BRCM_REGS_MAX];
int ioc;
int ipp;
int mode;
@@ -24,19 +58,105 @@ struct brcm_usb_init_params {
int selected_family;
const char *family_name;
const u32 *usb_reg_bits_map;
+ const struct brcm_usb_init_ops *ops;
+ struct regmap *syscon_piarbctl;
+ bool wake_enabled;
+ bool suspend_with_clocks;
+};
+
+void brcm_usb_dvr_init_7445(struct brcm_usb_init_params *params);
+void brcm_usb_dvr_init_7216(struct brcm_usb_init_params *params);
+void brcm_usb_dvr_init_7211b0(struct brcm_usb_init_params *params);
+
+static inline u32 brcm_usb_readl(void __iomem *addr)
+{
+ /*
+ * MIPS endianness is configured by boot strap, which also reverses all
+ * bus endianness (i.e., big-endian CPU + big endian bus ==> native
+ * endian I/O).
+ *
+ * Other architectures (e.g., ARM) either do not support big endian, or
+ * else leave I/O in little endian mode.
+ */
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+ return __raw_readl(addr);
+ else
+ return readl_relaxed(addr);
+}
+
+static inline void brcm_usb_writel(u32 val, void __iomem *addr)
+{
+ /* See brcmnand_readl() comments */
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+ __raw_writel(val, addr);
+ else
+ writel_relaxed(val, addr);
+}
+
+static inline void brcm_usb_ctrl_unset(void __iomem *reg, u32 mask)
+{
+ brcm_usb_writel(brcm_usb_readl(reg) & ~(mask), reg);
};
-void brcm_usb_set_family_map(struct brcm_usb_init_params *params);
-int brcm_usb_init_get_dual_select(struct brcm_usb_init_params *params);
-void brcm_usb_init_set_dual_select(struct brcm_usb_init_params *params,
- int mode);
-
-void brcm_usb_init_ipp(struct brcm_usb_init_params *ini);
-void brcm_usb_init_common(struct brcm_usb_init_params *ini);
-void brcm_usb_init_eohci(struct brcm_usb_init_params *ini);
-void brcm_usb_init_xhci(struct brcm_usb_init_params *ini);
-void brcm_usb_uninit_common(struct brcm_usb_init_params *ini);
-void brcm_usb_uninit_eohci(struct brcm_usb_init_params *ini);
-void brcm_usb_uninit_xhci(struct brcm_usb_init_params *ini);
+static inline void brcm_usb_ctrl_set(void __iomem *reg, u32 mask)
+{
+ brcm_usb_writel(brcm_usb_readl(reg) | (mask), reg);
+};
+
+static inline void brcm_usb_init_ipp(struct brcm_usb_init_params *ini)
+{
+ if (ini->ops->init_ipp)
+ ini->ops->init_ipp(ini);
+}
+
+static inline void brcm_usb_init_common(struct brcm_usb_init_params *ini)
+{
+ if (ini->ops->init_common)
+ ini->ops->init_common(ini);
+}
+
+static inline void brcm_usb_init_eohci(struct brcm_usb_init_params *ini)
+{
+ if (ini->ops->init_eohci)
+ ini->ops->init_eohci(ini);
+}
+
+static inline void brcm_usb_init_xhci(struct brcm_usb_init_params *ini)
+{
+ if (ini->ops->init_xhci)
+ ini->ops->init_xhci(ini);
+}
+
+static inline void brcm_usb_uninit_common(struct brcm_usb_init_params *ini)
+{
+ if (ini->ops->uninit_common)
+ ini->ops->uninit_common(ini);
+}
+
+static inline void brcm_usb_uninit_eohci(struct brcm_usb_init_params *ini)
+{
+ if (ini->ops->uninit_eohci)
+ ini->ops->uninit_eohci(ini);
+}
+
+static inline void brcm_usb_uninit_xhci(struct brcm_usb_init_params *ini)
+{
+ if (ini->ops->uninit_xhci)
+ ini->ops->uninit_xhci(ini);
+}
+
+static inline int brcm_usb_get_dual_select(struct brcm_usb_init_params *ini)
+{
+ if (ini->ops->get_dual_select)
+ return ini->ops->get_dual_select(ini);
+ return 0;
+}
+
+static inline void brcm_usb_set_dual_select(struct brcm_usb_init_params *ini,
+ int mode)
+{
+ if (ini->ops->set_dual_select)
+ ini->ops->set_dual_select(ini, mode);
+}
#endif /* _USB_BRCM_COMMON_INIT_H */
diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c
index f5c1f2983a1d..491bbd46c5b3 100644
--- a/drivers/phy/broadcom/phy-brcm-usb.c
+++ b/drivers/phy/broadcom/phy-brcm-usb.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/soc/brcmstb/brcmstb.h>
#include <dt-bindings/phy/phy.h>
+#include <linux/mfd/syscon.h>
#include "phy-brcm-usb-init.h"
@@ -32,6 +33,12 @@ struct value_to_name_map {
const char *name;
};
+struct match_chip_info {
+ void *init_func;
+ u8 required_regs[BRCM_REGS_MAX + 1];
+ u8 optional_reg;
+};
+
static struct value_to_name_map brcm_dr_mode_to_name[] = {
{ USB_CTLR_MODE_HOST, "host" },
{ USB_CTLR_MODE_DEVICE, "peripheral" },
@@ -57,11 +64,26 @@ struct brcm_usb_phy_data {
bool has_xhci;
struct clk *usb_20_clk;
struct clk *usb_30_clk;
+ struct clk *suspend_clk;
struct mutex mutex; /* serialize phy init */
int init_count;
+ int wake_irq;
struct brcm_usb_phy phys[BRCM_USB_PHY_ID_MAX];
};
+static s8 *node_reg_names[BRCM_REGS_MAX] = {
+ "crtl", "xhci_ec", "xhci_gbl", "usb_phy", "usb_mdio", "bdc_ec"
+};
+
+static irqreturn_t brcm_usb_phy_wake_isr(int irq, void *dev_id)
+{
+ struct phy *gphy = dev_id;
+
+ pm_wakeup_event(&gphy->dev, 0);
+
+ return IRQ_HANDLED;
+}
+
static int brcm_usb_phy_init(struct phy *gphy)
{
struct brcm_usb_phy *phy = phy_get_drvdata(gphy);
@@ -74,8 +96,9 @@ static int brcm_usb_phy_init(struct phy *gphy)
*/
mutex_lock(&priv->mutex);
if (priv->init_count++ == 0) {
- clk_enable(priv->usb_20_clk);
- clk_enable(priv->usb_30_clk);
+ clk_prepare_enable(priv->usb_20_clk);
+ clk_prepare_enable(priv->usb_30_clk);
+ clk_prepare_enable(priv->suspend_clk);
brcm_usb_init_common(&priv->ini);
}
mutex_unlock(&priv->mutex);
@@ -106,8 +129,9 @@ static int brcm_usb_phy_exit(struct phy *gphy)
mutex_lock(&priv->mutex);
if (--priv->init_count == 0) {
brcm_usb_uninit_common(&priv->ini);
- clk_disable(priv->usb_20_clk);
- clk_disable(priv->usb_30_clk);
+ clk_disable_unprepare(priv->usb_20_clk);
+ clk_disable_unprepare(priv->usb_30_clk);
+ clk_disable_unprepare(priv->suspend_clk);
}
mutex_unlock(&priv->mutex);
phy->inited = false;
@@ -194,7 +218,7 @@ static ssize_t dual_select_store(struct device *dev,
res = name_to_value(&brcm_dual_mode_to_name[0],
ARRAY_SIZE(brcm_dual_mode_to_name), buf, &value);
if (!res) {
- brcm_usb_init_set_dual_select(&priv->ini, value);
+ brcm_usb_set_dual_select(&priv->ini, value);
res = len;
}
mutex_unlock(&sysfs_lock);
@@ -209,7 +233,7 @@ static ssize_t dual_select_show(struct device *dev,
int value;
mutex_lock(&sysfs_lock);
- value = brcm_usb_init_get_dual_select(&priv->ini);
+ value = brcm_usb_get_dual_select(&priv->ini);
mutex_unlock(&sysfs_lock);
return sprintf(buf, "%s\n",
value_to_name(&brcm_dual_mode_to_name[0],
@@ -228,15 +252,106 @@ static const struct attribute_group brcm_usb_phy_group = {
.attrs = brcm_usb_phy_attrs,
};
-static int brcm_usb_phy_dvr_init(struct device *dev,
+static struct match_chip_info chip_info_7216 = {
+ .init_func = &brcm_usb_dvr_init_7216,
+ .required_regs = {
+ BRCM_REGS_CTRL,
+ BRCM_REGS_XHCI_EC,
+ BRCM_REGS_XHCI_GBL,
+ -1,
+ },
+};
+
+static struct match_chip_info chip_info_7211b0 = {
+ .init_func = &brcm_usb_dvr_init_7211b0,
+ .required_regs = {
+ BRCM_REGS_CTRL,
+ BRCM_REGS_XHCI_EC,
+ BRCM_REGS_XHCI_GBL,
+ BRCM_REGS_USB_PHY,
+ BRCM_REGS_USB_MDIO,
+ -1,
+ },
+ .optional_reg = BRCM_REGS_BDC_EC,
+};
+
+static struct match_chip_info chip_info_7445 = {
+ .init_func = &brcm_usb_dvr_init_7445,
+ .required_regs = {
+ BRCM_REGS_CTRL,
+ BRCM_REGS_XHCI_EC,
+ -1,
+ },
+};
+
+static const struct of_device_id brcm_usb_dt_ids[] = {
+ {
+ .compatible = "brcm,bcm7216-usb-phy",
+ .data = &chip_info_7216,
+ },
+ {
+ .compatible = "brcm,bcm7211-usb-phy",
+ .data = &chip_info_7211b0,
+ },
+ {
+ .compatible = "brcm,brcmstb-usb-phy",
+ .data = &chip_info_7445,
+ },
+ { /* sentinel */ }
+};
+
+static int brcm_usb_get_regs(struct platform_device *pdev,
+ enum brcmusb_reg_sel regs,
+ struct brcm_usb_init_params *ini,
+ bool optional)
+{
+ struct resource *res;
+
+ /* Older DT nodes have ctrl and optional xhci_ec by index only */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ node_reg_names[regs]);
+ if (res == NULL) {
+ if (regs == BRCM_REGS_CTRL) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ } else if (regs == BRCM_REGS_XHCI_EC) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ /* XHCI_EC registers are optional */
+ if (res == NULL)
+ return 0;
+ }
+ if (res == NULL) {
+ if (optional) {
+ dev_dbg(&pdev->dev,
+ "Optional reg %s not found\n",
+ node_reg_names[regs]);
+ return 0;
+ }
+ dev_err(&pdev->dev, "can't get %s base addr\n",
+ node_reg_names[regs]);
+ return 1;
+ }
+ }
+ ini->regs[regs] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ini->regs[regs])) {
+ dev_err(&pdev->dev, "can't map %s register space\n",
+ node_reg_names[regs]);
+ return 1;
+ }
+ return 0;
+}
+
+static int brcm_usb_phy_dvr_init(struct platform_device *pdev,
struct brcm_usb_phy_data *priv,
struct device_node *dn)
{
- struct phy *gphy;
+ struct device *dev = &pdev->dev;
+ struct phy *gphy = NULL;
int err;
priv->usb_20_clk = of_clk_get_by_name(dn, "sw_usb");
if (IS_ERR(priv->usb_20_clk)) {
+ if (PTR_ERR(priv->usb_20_clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
dev_info(dev, "Clock not found in Device Tree\n");
priv->usb_20_clk = NULL;
}
@@ -267,6 +382,8 @@ static int brcm_usb_phy_dvr_init(struct device *dev,
priv->usb_30_clk = of_clk_get_by_name(dn, "sw_usb3");
if (IS_ERR(priv->usb_30_clk)) {
+ if (PTR_ERR(priv->usb_30_clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
dev_info(dev,
"USB3.0 clock not found in Device Tree\n");
priv->usb_30_clk = NULL;
@@ -275,18 +392,46 @@ static int brcm_usb_phy_dvr_init(struct device *dev,
if (err)
return err;
}
+
+ priv->suspend_clk = clk_get(dev, "usb0_freerun");
+ if (IS_ERR(priv->suspend_clk)) {
+ if (PTR_ERR(priv->suspend_clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_err(dev, "Suspend Clock not found in Device Tree\n");
+ priv->suspend_clk = NULL;
+ }
+
+ priv->wake_irq = platform_get_irq_byname(pdev, "wake");
+ if (priv->wake_irq < 0)
+ priv->wake_irq = platform_get_irq_byname(pdev, "wakeup");
+ if (priv->wake_irq >= 0) {
+ err = devm_request_irq(dev, priv->wake_irq,
+ brcm_usb_phy_wake_isr, 0,
+ dev_name(dev), gphy);
+ if (err < 0)
+ return err;
+ device_set_wakeup_capable(dev, 1);
+ } else {
+ dev_info(dev,
+ "Wake interrupt missing, system wake not supported\n");
+ }
+
return 0;
}
static int brcm_usb_phy_probe(struct platform_device *pdev)
{
- struct resource *res;
struct device *dev = &pdev->dev;
struct brcm_usb_phy_data *priv;
struct phy_provider *phy_provider;
struct device_node *dn = pdev->dev.of_node;
int err;
const char *mode;
+ const struct of_device_id *match;
+ void (*dvr_init)(struct brcm_usb_init_params *params);
+ const struct match_chip_info *info;
+ struct regmap *rmap;
+ int x;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -295,30 +440,14 @@ static int brcm_usb_phy_probe(struct platform_device *pdev)
priv->ini.family_id = brcmstb_get_family_id();
priv->ini.product_id = brcmstb_get_product_id();
- brcm_usb_set_family_map(&priv->ini);
+
+ match = of_match_node(brcm_usb_dt_ids, dev->of_node);
+ info = match->data;
+ dvr_init = info->init_func;
+ (*dvr_init)(&priv->ini);
+
dev_dbg(dev, "Best mapping table is for %s\n",
priv->ini.family_name);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "can't get USB_CTRL base address\n");
- return -EINVAL;
- }
- priv->ini.ctrl_regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(priv->ini.ctrl_regs)) {
- dev_err(dev, "can't map CTRL register space\n");
- return -EINVAL;
- }
-
- /* The XHCI EC registers are optional */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (res) {
- priv->ini.xhci_ec_regs =
- devm_ioremap_resource(dev, res);
- if (IS_ERR(priv->ini.xhci_ec_regs)) {
- dev_err(dev, "can't map XHCI EC register space\n");
- return -EINVAL;
- }
- }
of_property_read_u32(dn, "brcm,ipp", &priv->ini.ipp);
of_property_read_u32(dn, "brcm,ioc", &priv->ini.ioc);
@@ -335,7 +464,23 @@ static int brcm_usb_phy_probe(struct platform_device *pdev)
if (of_property_read_bool(dn, "brcm,has-eohci"))
priv->has_eohci = true;
- err = brcm_usb_phy_dvr_init(dev, priv, dn);
+ for (x = 0; x < BRCM_REGS_MAX; x++) {
+ if (info->required_regs[x] >= BRCM_REGS_MAX)
+ break;
+
+ err = brcm_usb_get_regs(pdev, info->required_regs[x],
+ &priv->ini, false);
+ if (err)
+ return -EINVAL;
+ }
+ if (info->optional_reg) {
+ err = brcm_usb_get_regs(pdev, info->optional_reg,
+ &priv->ini, true);
+ if (err)
+ return -EINVAL;
+ }
+
+ err = brcm_usb_phy_dvr_init(pdev, priv, dn);
if (err)
return err;
@@ -354,14 +499,23 @@ static int brcm_usb_phy_probe(struct platform_device *pdev)
if (err)
dev_warn(dev, "Error creating sysfs attributes\n");
+ /* Get piarbctl syscon if it exists */
+ rmap = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "syscon-piarbctl");
+ if (IS_ERR(rmap))
+ rmap = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "brcm,syscon-piarbctl");
+ if (!IS_ERR(rmap))
+ priv->ini.syscon_piarbctl = rmap;
+
/* start with everything off */
if (priv->has_xhci)
brcm_usb_uninit_xhci(&priv->ini);
if (priv->has_eohci)
brcm_usb_uninit_eohci(&priv->ini);
brcm_usb_uninit_common(&priv->ini);
- clk_disable(priv->usb_20_clk);
- clk_disable(priv->usb_30_clk);
+ clk_disable_unprepare(priv->usb_20_clk);
+ clk_disable_unprepare(priv->usb_30_clk);
phy_provider = devm_of_phy_provider_register(dev, brcm_usb_phy_xlate);
@@ -381,8 +535,28 @@ static int brcm_usb_phy_suspend(struct device *dev)
struct brcm_usb_phy_data *priv = dev_get_drvdata(dev);
if (priv->init_count) {
- clk_disable(priv->usb_20_clk);
- clk_disable(priv->usb_30_clk);
+ priv->ini.wake_enabled = device_may_wakeup(dev);
+ if (priv->phys[BRCM_USB_PHY_3_0].inited)
+ brcm_usb_uninit_xhci(&priv->ini);
+ if (priv->phys[BRCM_USB_PHY_2_0].inited)
+ brcm_usb_uninit_eohci(&priv->ini);
+ brcm_usb_uninit_common(&priv->ini);
+
+ /*
+ * Handle the clocks unless needed for wake. This has
+ * to work for both older XHCI->3.0-clks, EOHCI->2.0-clks
+ * and newer XHCI->2.0-clks/3.0-clks.
+ */
+
+ if (!priv->ini.suspend_with_clocks) {
+ if (priv->phys[BRCM_USB_PHY_3_0].inited)
+ clk_disable_unprepare(priv->usb_30_clk);
+ if (priv->phys[BRCM_USB_PHY_2_0].inited ||
+ !priv->has_eohci)
+ clk_disable_unprepare(priv->usb_20_clk);
+ }
+ if (priv->wake_irq >= 0)
+ enable_irq_wake(priv->wake_irq);
}
return 0;
}
@@ -391,8 +565,8 @@ static int brcm_usb_phy_resume(struct device *dev)
{
struct brcm_usb_phy_data *priv = dev_get_drvdata(dev);
- clk_enable(priv->usb_20_clk);
- clk_enable(priv->usb_30_clk);
+ clk_prepare_enable(priv->usb_20_clk);
+ clk_prepare_enable(priv->usb_30_clk);
brcm_usb_init_ipp(&priv->ini);
/*
@@ -400,18 +574,22 @@ static int brcm_usb_phy_resume(struct device *dev)
* Uninitialize anything that wasn't previously initialized.
*/
if (priv->init_count) {
+ if (priv->wake_irq >= 0)
+ disable_irq_wake(priv->wake_irq);
brcm_usb_init_common(&priv->ini);
if (priv->phys[BRCM_USB_PHY_2_0].inited) {
brcm_usb_init_eohci(&priv->ini);
} else if (priv->has_eohci) {
brcm_usb_uninit_eohci(&priv->ini);
- clk_disable(priv->usb_20_clk);
+ clk_disable_unprepare(priv->usb_20_clk);
}
if (priv->phys[BRCM_USB_PHY_3_0].inited) {
brcm_usb_init_xhci(&priv->ini);
} else if (priv->has_xhci) {
brcm_usb_uninit_xhci(&priv->ini);
- clk_disable(priv->usb_30_clk);
+ clk_disable_unprepare(priv->usb_30_clk);
+ if (!priv->has_eohci)
+ clk_disable_unprepare(priv->usb_20_clk);
}
} else {
if (priv->has_xhci)
@@ -419,10 +597,10 @@ static int brcm_usb_phy_resume(struct device *dev)
if (priv->has_eohci)
brcm_usb_uninit_eohci(&priv->ini);
brcm_usb_uninit_common(&priv->ini);
- clk_disable(priv->usb_20_clk);
- clk_disable(priv->usb_30_clk);
+ clk_disable_unprepare(priv->usb_20_clk);
+ clk_disable_unprepare(priv->usb_30_clk);
}
-
+ priv->ini.wake_enabled = false;
return 0;
}
#endif /* CONFIG_PM_SLEEP */
@@ -431,11 +609,6 @@ static const struct dev_pm_ops brcm_usb_phy_pm_ops = {
SET_LATE_SYSTEM_SLEEP_PM_OPS(brcm_usb_phy_suspend, brcm_usb_phy_resume)
};
-static const struct of_device_id brcm_usb_dt_ids[] = {
- { .compatible = "brcm,brcmstb-usb-phy" },
- { /* sentinel */ }
-};
-
MODULE_DEVICE_TABLE(of, brcm_usb_dt_ids);
static struct platform_driver brcm_usb_driver = {
diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
index de10402f2931..a5c08e5bd2bf 100644
--- a/drivers/phy/cadence/phy-cadence-sierra.c
+++ b/drivers/phy/cadence/phy-cadence-sierra.c
@@ -22,48 +22,134 @@
#include <dt-bindings/phy/phy.h>
/* PHY register offsets */
-#define SIERRA_PHY_PLL_CFG (0xc00e << 2)
-#define SIERRA_DET_STANDEC_A (0x4000 << 2)
-#define SIERRA_DET_STANDEC_B (0x4001 << 2)
-#define SIERRA_DET_STANDEC_C (0x4002 << 2)
-#define SIERRA_DET_STANDEC_D (0x4003 << 2)
-#define SIERRA_DET_STANDEC_E (0x4004 << 2)
-#define SIERRA_PSM_LANECAL (0x4008 << 2)
-#define SIERRA_PSM_DIAG (0x4015 << 2)
-#define SIERRA_PSC_TX_A0 (0x4028 << 2)
-#define SIERRA_PSC_TX_A1 (0x4029 << 2)
-#define SIERRA_PSC_TX_A2 (0x402A << 2)
-#define SIERRA_PSC_TX_A3 (0x402B << 2)
-#define SIERRA_PSC_RX_A0 (0x4030 << 2)
-#define SIERRA_PSC_RX_A1 (0x4031 << 2)
-#define SIERRA_PSC_RX_A2 (0x4032 << 2)
-#define SIERRA_PSC_RX_A3 (0x4033 << 2)
-#define SIERRA_PLLCTRL_SUBRATE (0x403A << 2)
-#define SIERRA_PLLCTRL_GEN_D (0x403E << 2)
-#define SIERRA_DRVCTRL_ATTEN (0x406A << 2)
-#define SIERRA_CLKPATHCTRL_TMR (0x4081 << 2)
-#define SIERRA_RX_CREQ_FLTR_A_MODE1 (0x4087 << 2)
-#define SIERRA_RX_CREQ_FLTR_A_MODE0 (0x4088 << 2)
-#define SIERRA_CREQ_CCLKDET_MODE01 (0x408E << 2)
-#define SIERRA_RX_CTLE_MAINTENANCE (0x4091 << 2)
-#define SIERRA_CREQ_FSMCLK_SEL (0x4092 << 2)
-#define SIERRA_CTLELUT_CTRL (0x4098 << 2)
-#define SIERRA_DFE_ECMP_RATESEL (0x40C0 << 2)
-#define SIERRA_DFE_SMP_RATESEL (0x40C1 << 2)
-#define SIERRA_DEQ_VGATUNE_CTRL (0x40E1 << 2)
-#define SIERRA_TMRVAL_MODE3 (0x416E << 2)
-#define SIERRA_TMRVAL_MODE2 (0x416F << 2)
-#define SIERRA_TMRVAL_MODE1 (0x4170 << 2)
-#define SIERRA_TMRVAL_MODE0 (0x4171 << 2)
-#define SIERRA_PICNT_MODE1 (0x4174 << 2)
-#define SIERRA_CPI_OUTBUF_RATESEL (0x417C << 2)
-#define SIERRA_LFPSFILT_NS (0x418A << 2)
-#define SIERRA_LFPSFILT_RD (0x418B << 2)
-#define SIERRA_LFPSFILT_MP (0x418C << 2)
-#define SIERRA_SDFILT_H2L_A (0x4191 << 2)
-
-#define SIERRA_MACRO_ID 0x00007364
-#define SIERRA_MAX_LANES 4
+#define SIERRA_COMMON_CDB_OFFSET 0x0
+#define SIERRA_MACRO_ID_REG 0x0
+#define SIERRA_CMN_PLLLC_MODE_PREG 0x48
+#define SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG 0x49
+#define SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG 0x4A
+#define SIERRA_CMN_PLLLC_LOCK_CNTSTART_PREG 0x4B
+#define SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG 0x4F
+#define SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG 0x50
+#define SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG 0x62
+
+#define SIERRA_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \
+ ((0x4000 << (block_offset)) + \
+ (((ln) << 9) << (reg_offset)))
+
+#define SIERRA_DET_STANDEC_A_PREG 0x000
+#define SIERRA_DET_STANDEC_B_PREG 0x001
+#define SIERRA_DET_STANDEC_C_PREG 0x002
+#define SIERRA_DET_STANDEC_D_PREG 0x003
+#define SIERRA_DET_STANDEC_E_PREG 0x004
+#define SIERRA_PSM_LANECAL_DLY_A1_RESETS_PREG 0x008
+#define SIERRA_PSM_A0IN_TMR_PREG 0x009
+#define SIERRA_PSM_DIAG_PREG 0x015
+#define SIERRA_PSC_TX_A0_PREG 0x028
+#define SIERRA_PSC_TX_A1_PREG 0x029
+#define SIERRA_PSC_TX_A2_PREG 0x02A
+#define SIERRA_PSC_TX_A3_PREG 0x02B
+#define SIERRA_PSC_RX_A0_PREG 0x030
+#define SIERRA_PSC_RX_A1_PREG 0x031
+#define SIERRA_PSC_RX_A2_PREG 0x032
+#define SIERRA_PSC_RX_A3_PREG 0x033
+#define SIERRA_PLLCTRL_SUBRATE_PREG 0x03A
+#define SIERRA_PLLCTRL_GEN_D_PREG 0x03E
+#define SIERRA_PLLCTRL_CPGAIN_MODE_PREG 0x03F
+#define SIERRA_PLLCTRL_STATUS_PREG 0x044
+#define SIERRA_CLKPATH_BIASTRIM_PREG 0x04B
+#define SIERRA_DFE_BIASTRIM_PREG 0x04C
+#define SIERRA_DRVCTRL_ATTEN_PREG 0x06A
+#define SIERRA_CLKPATHCTRL_TMR_PREG 0x081
+#define SIERRA_RX_CREQ_FLTR_A_MODE3_PREG 0x085
+#define SIERRA_RX_CREQ_FLTR_A_MODE2_PREG 0x086
+#define SIERRA_RX_CREQ_FLTR_A_MODE1_PREG 0x087
+#define SIERRA_RX_CREQ_FLTR_A_MODE0_PREG 0x088
+#define SIERRA_CREQ_CCLKDET_MODE01_PREG 0x08E
+#define SIERRA_RX_CTLE_MAINTENANCE_PREG 0x091
+#define SIERRA_CREQ_FSMCLK_SEL_PREG 0x092
+#define SIERRA_CREQ_EQ_CTRL_PREG 0x093
+#define SIERRA_CREQ_SPARE_PREG 0x096
+#define SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG 0x097
+#define SIERRA_CTLELUT_CTRL_PREG 0x098
+#define SIERRA_DFE_ECMP_RATESEL_PREG 0x0C0
+#define SIERRA_DFE_SMP_RATESEL_PREG 0x0C1
+#define SIERRA_DEQ_PHALIGN_CTRL 0x0C4
+#define SIERRA_DEQ_CONCUR_CTRL1_PREG 0x0C8
+#define SIERRA_DEQ_CONCUR_CTRL2_PREG 0x0C9
+#define SIERRA_DEQ_EPIPWR_CTRL2_PREG 0x0CD
+#define SIERRA_DEQ_FAST_MAINT_CYCLES_PREG 0x0CE
+#define SIERRA_DEQ_ERRCMP_CTRL_PREG 0x0D0
+#define SIERRA_DEQ_OFFSET_CTRL_PREG 0x0D8
+#define SIERRA_DEQ_GAIN_CTRL_PREG 0x0E0
+#define SIERRA_DEQ_VGATUNE_CTRL_PREG 0x0E1
+#define SIERRA_DEQ_GLUT0 0x0E8
+#define SIERRA_DEQ_GLUT1 0x0E9
+#define SIERRA_DEQ_GLUT2 0x0EA
+#define SIERRA_DEQ_GLUT3 0x0EB
+#define SIERRA_DEQ_GLUT4 0x0EC
+#define SIERRA_DEQ_GLUT5 0x0ED
+#define SIERRA_DEQ_GLUT6 0x0EE
+#define SIERRA_DEQ_GLUT7 0x0EF
+#define SIERRA_DEQ_GLUT8 0x0F0
+#define SIERRA_DEQ_GLUT9 0x0F1
+#define SIERRA_DEQ_GLUT10 0x0F2
+#define SIERRA_DEQ_GLUT11 0x0F3
+#define SIERRA_DEQ_GLUT12 0x0F4
+#define SIERRA_DEQ_GLUT13 0x0F5
+#define SIERRA_DEQ_GLUT14 0x0F6
+#define SIERRA_DEQ_GLUT15 0x0F7
+#define SIERRA_DEQ_GLUT16 0x0F8
+#define SIERRA_DEQ_ALUT0 0x108
+#define SIERRA_DEQ_ALUT1 0x109
+#define SIERRA_DEQ_ALUT2 0x10A
+#define SIERRA_DEQ_ALUT3 0x10B
+#define SIERRA_DEQ_ALUT4 0x10C
+#define SIERRA_DEQ_ALUT5 0x10D
+#define SIERRA_DEQ_ALUT6 0x10E
+#define SIERRA_DEQ_ALUT7 0x10F
+#define SIERRA_DEQ_ALUT8 0x110
+#define SIERRA_DEQ_ALUT9 0x111
+#define SIERRA_DEQ_ALUT10 0x112
+#define SIERRA_DEQ_ALUT11 0x113
+#define SIERRA_DEQ_ALUT12 0x114
+#define SIERRA_DEQ_ALUT13 0x115
+#define SIERRA_DEQ_DFETAP_CTRL_PREG 0x128
+#define SIERRA_DFE_EN_1010_IGNORE_PREG 0x134
+#define SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG 0x150
+#define SIERRA_DEQ_TAU_CTRL2_PREG 0x151
+#define SIERRA_DEQ_PICTRL_PREG 0x161
+#define SIERRA_CPICAL_TMRVAL_MODE1_PREG 0x170
+#define SIERRA_CPICAL_TMRVAL_MODE0_PREG 0x171
+#define SIERRA_CPICAL_PICNT_MODE1_PREG 0x174
+#define SIERRA_CPI_OUTBUF_RATESEL_PREG 0x17C
+#define SIERRA_CPICAL_RES_STARTCODE_MODE23_PREG 0x183
+#define SIERRA_LFPSDET_SUPPORT_PREG 0x188
+#define SIERRA_LFPSFILT_NS_PREG 0x18A
+#define SIERRA_LFPSFILT_RD_PREG 0x18B
+#define SIERRA_LFPSFILT_MP_PREG 0x18C
+#define SIERRA_SIGDET_SUPPORT_PREG 0x190
+#define SIERRA_SDFILT_H2L_A_PREG 0x191
+#define SIERRA_SDFILT_L2H_PREG 0x193
+#define SIERRA_RXBUFFER_CTLECTRL_PREG 0x19E
+#define SIERRA_RXBUFFER_RCDFECTRL_PREG 0x19F
+#define SIERRA_RXBUFFER_DFECTRL_PREG 0x1A0
+#define SIERRA_DEQ_TAU_CTRL1_FAST_MAINT_PREG 0x14F
+#define SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG 0x150
+
+#define SIERRA_PHY_CONFIG_CTRL_OFFSET(block_offset) \
+ (0xc000 << (block_offset))
+#define SIERRA_PHY_PLL_CFG 0xe
+
+#define SIERRA_MACRO_ID 0x00007364
+#define SIERRA_MAX_LANES 16
+#define PLL_LOCK_TIME 100000
+
+static const struct reg_field macro_id_type =
+ REG_FIELD(SIERRA_MACRO_ID_REG, 0, 15);
+static const struct reg_field phy_pll_cfg_1 =
+ REG_FIELD(SIERRA_PHY_PLL_CFG, 1, 1);
+static const struct reg_field pllctrl_lock =
+ REG_FIELD(SIERRA_PLLCTRL_STATUS_PREG, 0, 0);
struct cdns_sierra_inst {
struct phy *phy;
@@ -80,53 +166,172 @@ struct cdns_reg_pairs {
struct cdns_sierra_data {
u32 id_value;
- u32 pcie_regs;
- u32 usb_regs;
- struct cdns_reg_pairs *pcie_vals;
- struct cdns_reg_pairs *usb_vals;
+ u8 block_offset_shift;
+ u8 reg_offset_shift;
+ u32 pcie_cmn_regs;
+ u32 pcie_ln_regs;
+ u32 usb_cmn_regs;
+ u32 usb_ln_regs;
+ struct cdns_reg_pairs *pcie_cmn_vals;
+ struct cdns_reg_pairs *pcie_ln_vals;
+ struct cdns_reg_pairs *usb_cmn_vals;
+ struct cdns_reg_pairs *usb_ln_vals;
};
-struct cdns_sierra_phy {
+struct cdns_regmap_cdb_context {
struct device *dev;
void __iomem *base;
+ u8 reg_offset_shift;
+};
+
+struct cdns_sierra_phy {
+ struct device *dev;
+ struct regmap *regmap;
struct cdns_sierra_data *init_data;
struct cdns_sierra_inst phys[SIERRA_MAX_LANES];
struct reset_control *phy_rst;
struct reset_control *apb_rst;
+ struct regmap *regmap_lane_cdb[SIERRA_MAX_LANES];
+ struct regmap *regmap_phy_config_ctrl;
+ struct regmap *regmap_common_cdb;
+ struct regmap_field *macro_id_type;
+ struct regmap_field *phy_pll_cfg_1;
+ struct regmap_field *pllctrl_lock[SIERRA_MAX_LANES];
struct clk *clk;
+ struct clk *cmn_refclk_dig_div;
+ struct clk *cmn_refclk1_dig_div;
int nsubnodes;
+ u32 num_lanes;
bool autoconf;
};
-static void cdns_sierra_phy_init(struct phy *gphy)
+static int cdns_regmap_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct cdns_regmap_cdb_context *ctx = context;
+ u32 offset = reg << ctx->reg_offset_shift;
+
+ writew(val, ctx->base + offset);
+
+ return 0;
+}
+
+static int cdns_regmap_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct cdns_regmap_cdb_context *ctx = context;
+ u32 offset = reg << ctx->reg_offset_shift;
+
+ *val = readw(ctx->base + offset);
+ return 0;
+}
+
+#define SIERRA_LANE_CDB_REGMAP_CONF(n) \
+{ \
+ .name = "sierra_lane" n "_cdb", \
+ .reg_stride = 1, \
+ .fast_io = true, \
+ .reg_write = cdns_regmap_write, \
+ .reg_read = cdns_regmap_read, \
+}
+
+static struct regmap_config cdns_sierra_lane_cdb_config[] = {
+ SIERRA_LANE_CDB_REGMAP_CONF("0"),
+ SIERRA_LANE_CDB_REGMAP_CONF("1"),
+ SIERRA_LANE_CDB_REGMAP_CONF("2"),
+ SIERRA_LANE_CDB_REGMAP_CONF("3"),
+ SIERRA_LANE_CDB_REGMAP_CONF("4"),
+ SIERRA_LANE_CDB_REGMAP_CONF("5"),
+ SIERRA_LANE_CDB_REGMAP_CONF("6"),
+ SIERRA_LANE_CDB_REGMAP_CONF("7"),
+ SIERRA_LANE_CDB_REGMAP_CONF("8"),
+ SIERRA_LANE_CDB_REGMAP_CONF("9"),
+ SIERRA_LANE_CDB_REGMAP_CONF("10"),
+ SIERRA_LANE_CDB_REGMAP_CONF("11"),
+ SIERRA_LANE_CDB_REGMAP_CONF("12"),
+ SIERRA_LANE_CDB_REGMAP_CONF("13"),
+ SIERRA_LANE_CDB_REGMAP_CONF("14"),
+ SIERRA_LANE_CDB_REGMAP_CONF("15"),
+};
+
+static struct regmap_config cdns_sierra_common_cdb_config = {
+ .name = "sierra_common_cdb",
+ .reg_stride = 1,
+ .fast_io = true,
+ .reg_write = cdns_regmap_write,
+ .reg_read = cdns_regmap_read,
+};
+
+static struct regmap_config cdns_sierra_phy_config_ctrl_config = {
+ .name = "sierra_phy_config_ctrl",
+ .reg_stride = 1,
+ .fast_io = true,
+ .reg_write = cdns_regmap_write,
+ .reg_read = cdns_regmap_read,
+};
+
+static int cdns_sierra_phy_init(struct phy *gphy)
{
struct cdns_sierra_inst *ins = phy_get_drvdata(gphy);
struct cdns_sierra_phy *phy = dev_get_drvdata(gphy->dev.parent);
+ struct regmap *regmap;
int i, j;
- struct cdns_reg_pairs *vals;
- u32 num_regs;
+ struct cdns_reg_pairs *cmn_vals, *ln_vals;
+ u32 num_cmn_regs, num_ln_regs;
+
+ /* Initialise the PHY registers, unless auto configured */
+ if (phy->autoconf)
+ return 0;
+ clk_set_rate(phy->cmn_refclk_dig_div, 25000000);
+ clk_set_rate(phy->cmn_refclk1_dig_div, 25000000);
if (ins->phy_type == PHY_TYPE_PCIE) {
- num_regs = phy->init_data->pcie_regs;
- vals = phy->init_data->pcie_vals;
+ num_cmn_regs = phy->init_data->pcie_cmn_regs;
+ num_ln_regs = phy->init_data->pcie_ln_regs;
+ cmn_vals = phy->init_data->pcie_cmn_vals;
+ ln_vals = phy->init_data->pcie_ln_vals;
} else if (ins->phy_type == PHY_TYPE_USB3) {
- num_regs = phy->init_data->usb_regs;
- vals = phy->init_data->usb_vals;
+ num_cmn_regs = phy->init_data->usb_cmn_regs;
+ num_ln_regs = phy->init_data->usb_ln_regs;
+ cmn_vals = phy->init_data->usb_cmn_vals;
+ ln_vals = phy->init_data->usb_ln_vals;
} else {
- return;
+ return -EINVAL;
}
- for (i = 0; i < ins->num_lanes; i++)
- for (j = 0; j < num_regs ; j++)
- writel(vals[j].val, phy->base +
- vals[j].off + (i + ins->mlane) * 0x800);
+
+ regmap = phy->regmap_common_cdb;
+ for (j = 0; j < num_cmn_regs ; j++)
+ regmap_write(regmap, cmn_vals[j].off, cmn_vals[j].val);
+
+ for (i = 0; i < ins->num_lanes; i++) {
+ for (j = 0; j < num_ln_regs ; j++) {
+ regmap = phy->regmap_lane_cdb[i + ins->mlane];
+ regmap_write(regmap, ln_vals[j].off, ln_vals[j].val);
+ }
+ }
+
+ return 0;
}
static int cdns_sierra_phy_on(struct phy *gphy)
{
+ struct cdns_sierra_phy *sp = dev_get_drvdata(gphy->dev.parent);
struct cdns_sierra_inst *ins = phy_get_drvdata(gphy);
+ struct device *dev = sp->dev;
+ u32 val;
+ int ret;
/* Take the PHY lane group out of reset */
- return reset_control_deassert(ins->lnk_rst);
+ ret = reset_control_deassert(ins->lnk_rst);
+ if (ret) {
+ dev_err(dev, "Failed to take the PHY lane out of reset\n");
+ return ret;
+ }
+
+ ret = regmap_field_read_poll_timeout(sp->pllctrl_lock[ins->mlane],
+ val, val, 1000, PLL_LOCK_TIME);
+ if (ret < 0)
+ dev_err(dev, "PLL lock of lane failed\n");
+
+ return ret;
}
static int cdns_sierra_phy_off(struct phy *gphy)
@@ -136,9 +341,20 @@ static int cdns_sierra_phy_off(struct phy *gphy)
return reset_control_assert(ins->lnk_rst);
}
+static int cdns_sierra_phy_reset(struct phy *gphy)
+{
+ struct cdns_sierra_phy *sp = dev_get_drvdata(gphy->dev.parent);
+
+ reset_control_assert(sp->phy_rst);
+ reset_control_deassert(sp->phy_rst);
+ return 0;
+};
+
static const struct phy_ops ops = {
+ .init = cdns_sierra_phy_init,
.power_on = cdns_sierra_phy_on,
.power_off = cdns_sierra_phy_off,
+ .reset = cdns_sierra_phy_reset,
.owner = THIS_MODULE,
};
@@ -159,41 +375,152 @@ static int cdns_sierra_get_optional(struct cdns_sierra_inst *inst,
static const struct of_device_id cdns_sierra_id_table[];
+static struct regmap *cdns_regmap_init(struct device *dev, void __iomem *base,
+ u32 block_offset, u8 reg_offset_shift,
+ const struct regmap_config *config)
+{
+ struct cdns_regmap_cdb_context *ctx;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ ctx->dev = dev;
+ ctx->base = base + block_offset;
+ ctx->reg_offset_shift = reg_offset_shift;
+
+ return devm_regmap_init(dev, NULL, ctx, config);
+}
+
+static int cdns_regfield_init(struct cdns_sierra_phy *sp)
+{
+ struct device *dev = sp->dev;
+ struct regmap_field *field;
+ struct regmap *regmap;
+ int i;
+
+ regmap = sp->regmap_common_cdb;
+ field = devm_regmap_field_alloc(dev, regmap, macro_id_type);
+ if (IS_ERR(field)) {
+ dev_err(dev, "MACRO_ID_TYPE reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ sp->macro_id_type = field;
+
+ regmap = sp->regmap_phy_config_ctrl;
+ field = devm_regmap_field_alloc(dev, regmap, phy_pll_cfg_1);
+ if (IS_ERR(field)) {
+ dev_err(dev, "PHY_PLL_CFG_1 reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ sp->phy_pll_cfg_1 = field;
+
+ for (i = 0; i < SIERRA_MAX_LANES; i++) {
+ regmap = sp->regmap_lane_cdb[i];
+ field = devm_regmap_field_alloc(dev, regmap, pllctrl_lock);
+ if (IS_ERR(field)) {
+ dev_err(dev, "P%d_ENABLE reg field init failed\n", i);
+ return PTR_ERR(field);
+ }
+ sp->pllctrl_lock[i] = field;
+ }
+
+ return 0;
+}
+
+static int cdns_regmap_init_blocks(struct cdns_sierra_phy *sp,
+ void __iomem *base, u8 block_offset_shift,
+ u8 reg_offset_shift)
+{
+ struct device *dev = sp->dev;
+ struct regmap *regmap;
+ u32 block_offset;
+ int i;
+
+ for (i = 0; i < SIERRA_MAX_LANES; i++) {
+ block_offset = SIERRA_LANE_CDB_OFFSET(i, block_offset_shift,
+ reg_offset_shift);
+ regmap = cdns_regmap_init(dev, base, block_offset,
+ reg_offset_shift,
+ &cdns_sierra_lane_cdb_config[i]);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init lane CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ sp->regmap_lane_cdb[i] = regmap;
+ }
+
+ regmap = cdns_regmap_init(dev, base, SIERRA_COMMON_CDB_OFFSET,
+ reg_offset_shift,
+ &cdns_sierra_common_cdb_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init common CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ sp->regmap_common_cdb = regmap;
+
+ block_offset = SIERRA_PHY_CONFIG_CTRL_OFFSET(block_offset_shift);
+ regmap = cdns_regmap_init(dev, base, block_offset, reg_offset_shift,
+ &cdns_sierra_phy_config_ctrl_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init PHY config and control regmap\n");
+ return PTR_ERR(regmap);
+ }
+ sp->regmap_phy_config_ctrl = regmap;
+
+ return 0;
+}
+
static int cdns_sierra_phy_probe(struct platform_device *pdev)
{
struct cdns_sierra_phy *sp;
struct phy_provider *phy_provider;
struct device *dev = &pdev->dev;
const struct of_device_id *match;
+ struct cdns_sierra_data *data;
+ unsigned int id_value;
struct resource *res;
int i, ret, node = 0;
+ void __iomem *base;
+ struct clk *clk;
struct device_node *dn = dev->of_node, *child;
if (of_get_child_count(dn) == 0)
return -ENODEV;
+ /* Get init data for this PHY */
+ match = of_match_device(cdns_sierra_id_table, dev);
+ if (!match)
+ return -EINVAL;
+
+ data = (struct cdns_sierra_data *)match->data;
+
sp = devm_kzalloc(dev, sizeof(*sp), GFP_KERNEL);
if (!sp)
return -ENOMEM;
dev_set_drvdata(dev, sp);
sp->dev = dev;
+ sp->init_data = data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sp->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(sp->base)) {
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base)) {
dev_err(dev, "missing \"reg\"\n");
- return PTR_ERR(sp->base);
+ return PTR_ERR(base);
}
- /* Get init data for this PHY */
- match = of_match_device(cdns_sierra_id_table, dev);
- if (!match)
- return -EINVAL;
- sp->init_data = (struct cdns_sierra_data *)match->data;
+ ret = cdns_regmap_init_blocks(sp, base, data->block_offset_shift,
+ data->reg_offset_shift);
+ if (ret)
+ return ret;
+
+ ret = cdns_regfield_init(sp);
+ if (ret)
+ return ret;
platform_set_drvdata(pdev, sp);
- sp->clk = devm_clk_get(dev, "phy_clk");
+ sp->clk = devm_clk_get_optional(dev, "phy_clk");
if (IS_ERR(sp->clk)) {
dev_err(dev, "failed to get clock phy_clk\n");
return PTR_ERR(sp->clk);
@@ -205,12 +532,28 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
return PTR_ERR(sp->phy_rst);
}
- sp->apb_rst = devm_reset_control_get(dev, "sierra_apb");
+ sp->apb_rst = devm_reset_control_get_optional(dev, "sierra_apb");
if (IS_ERR(sp->apb_rst)) {
dev_err(dev, "failed to get apb reset\n");
return PTR_ERR(sp->apb_rst);
}
+ clk = devm_clk_get_optional(dev, "cmn_refclk_dig_div");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "cmn_refclk_dig_div clock not found\n");
+ ret = PTR_ERR(clk);
+ return ret;
+ }
+ sp->cmn_refclk_dig_div = clk;
+
+ clk = devm_clk_get_optional(dev, "cmn_refclk1_dig_div");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "cmn_refclk1_dig_div clock not found\n");
+ ret = PTR_ERR(clk);
+ return ret;
+ }
+ sp->cmn_refclk1_dig_div = clk;
+
ret = clk_prepare_enable(sp->clk);
if (ret)
return ret;
@@ -219,7 +562,8 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
reset_control_deassert(sp->apb_rst);
/* Check that PHY is present */
- if (sp->init_data->id_value != readl(sp->base)) {
+ regmap_field_read(sp->macro_id_type, &id_value);
+ if (sp->init_data->id_value != id_value) {
ret = -EINVAL;
goto clk_disable;
}
@@ -230,7 +574,7 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
struct phy *gphy;
sp->phys[node].lnk_rst =
- of_reset_control_get_exclusive_by_index(child, 0);
+ of_reset_control_array_get_exclusive(child);
if (IS_ERR(sp->phys[node].lnk_rst)) {
dev_err(dev, "failed to get reset %s\n",
@@ -248,6 +592,8 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
}
}
+ sp->num_lanes += sp->phys[node].num_lanes;
+
gphy = devm_phy_create(dev, child, &ops);
if (IS_ERR(gphy)) {
@@ -257,17 +603,18 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
sp->phys[node].phy = gphy;
phy_set_drvdata(gphy, &sp->phys[node]);
- /* Initialise the PHY registers, unless auto configured */
- if (!sp->autoconf)
- cdns_sierra_phy_init(gphy);
-
node++;
}
sp->nsubnodes = node;
+ if (sp->num_lanes > SIERRA_MAX_LANES) {
+ dev_err(dev, "Invalid lane configuration\n");
+ goto put_child2;
+ }
+
/* If more than one subnode, configure the PHY as multilink */
if (!sp->autoconf && sp->nsubnodes > 1)
- writel(2, sp->base + SIERRA_PHY_PLL_CFG);
+ regmap_field_write(sp->phy_pll_cfg_1, 0x1);
pm_runtime_enable(dev);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
@@ -288,7 +635,7 @@ clk_disable:
static int cdns_sierra_phy_remove(struct platform_device *pdev)
{
- struct cdns_sierra_phy *phy = dev_get_drvdata(pdev->dev.parent);
+ struct cdns_sierra_phy *phy = platform_get_drvdata(pdev);
int i;
reset_control_assert(phy->phy_rst);
@@ -306,68 +653,158 @@ static int cdns_sierra_phy_remove(struct platform_device *pdev)
return 0;
}
-static struct cdns_reg_pairs cdns_usb_regs[] = {
- /*
- * Write USB configuration parameters to the PHY.
- * These values are specific to this specific hardware
- * configuration.
- */
- {0xFE0A, SIERRA_DET_STANDEC_A},
- {0x000F, SIERRA_DET_STANDEC_B},
- {0x55A5, SIERRA_DET_STANDEC_C},
- {0x69AD, SIERRA_DET_STANDEC_D},
- {0x0241, SIERRA_DET_STANDEC_E},
- {0x0110, SIERRA_PSM_LANECAL},
- {0xCF00, SIERRA_PSM_DIAG},
- {0x001F, SIERRA_PSC_TX_A0},
- {0x0007, SIERRA_PSC_TX_A1},
- {0x0003, SIERRA_PSC_TX_A2},
- {0x0003, SIERRA_PSC_TX_A3},
- {0x0FFF, SIERRA_PSC_RX_A0},
- {0x0003, SIERRA_PSC_RX_A1},
- {0x0003, SIERRA_PSC_RX_A2},
- {0x0001, SIERRA_PSC_RX_A3},
- {0x0001, SIERRA_PLLCTRL_SUBRATE},
- {0x0406, SIERRA_PLLCTRL_GEN_D},
- {0x0000, SIERRA_DRVCTRL_ATTEN},
- {0x823E, SIERRA_CLKPATHCTRL_TMR},
- {0x078F, SIERRA_RX_CREQ_FLTR_A_MODE1},
- {0x078F, SIERRA_RX_CREQ_FLTR_A_MODE0},
- {0x7B3C, SIERRA_CREQ_CCLKDET_MODE01},
- {0x023C, SIERRA_RX_CTLE_MAINTENANCE},
- {0x3232, SIERRA_CREQ_FSMCLK_SEL},
- {0x8452, SIERRA_CTLELUT_CTRL},
- {0x4121, SIERRA_DFE_ECMP_RATESEL},
- {0x4121, SIERRA_DFE_SMP_RATESEL},
- {0x9999, SIERRA_DEQ_VGATUNE_CTRL},
- {0x0330, SIERRA_TMRVAL_MODE0},
- {0x01FF, SIERRA_PICNT_MODE1},
- {0x0009, SIERRA_CPI_OUTBUF_RATESEL},
- {0x000F, SIERRA_LFPSFILT_NS},
- {0x0009, SIERRA_LFPSFILT_RD},
- {0x0001, SIERRA_LFPSFILT_MP},
- {0x8013, SIERRA_SDFILT_H2L_A},
- {0x0400, SIERRA_TMRVAL_MODE1},
+/* refclk100MHz_32b_PCIe_cmn_pll_ext_ssc */
+static struct cdns_reg_pairs cdns_pcie_cmn_regs_ext_ssc[] = {
+ {0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
+ {0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
+ {0x1B1B, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG}
};
-static struct cdns_reg_pairs cdns_pcie_regs[] = {
- /*
- * Write PCIe configuration parameters to the PHY.
- * These values are specific to this specific hardware
- * configuration.
- */
- {0x891f, SIERRA_DET_STANDEC_D},
- {0x0053, SIERRA_DET_STANDEC_E},
- {0x0400, SIERRA_TMRVAL_MODE2},
- {0x0200, SIERRA_TMRVAL_MODE3},
+/* refclk100MHz_32b_PCIe_ln_ext_ssc */
+static struct cdns_reg_pairs cdns_pcie_ln_regs_ext_ssc[] = {
+ {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
+ {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+ {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG}
+};
+
+/* refclk100MHz_20b_USB_cmn_pll_ext_ssc */
+static struct cdns_reg_pairs cdns_usb_cmn_regs_ext_ssc[] = {
+ {0x2085, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
+ {0x2085, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG}
+};
+
+/* refclk100MHz_20b_USB_ln_ext_ssc */
+static struct cdns_reg_pairs cdns_usb_ln_regs_ext_ssc[] = {
+ {0xFE0A, SIERRA_DET_STANDEC_A_PREG},
+ {0x000F, SIERRA_DET_STANDEC_B_PREG},
+ {0x00A5, SIERRA_DET_STANDEC_C_PREG},
+ {0x69ad, SIERRA_DET_STANDEC_D_PREG},
+ {0x0241, SIERRA_DET_STANDEC_E_PREG},
+ {0x0010, SIERRA_PSM_LANECAL_DLY_A1_RESETS_PREG},
+ {0x0014, SIERRA_PSM_A0IN_TMR_PREG},
+ {0xCF00, SIERRA_PSM_DIAG_PREG},
+ {0x001F, SIERRA_PSC_TX_A0_PREG},
+ {0x0007, SIERRA_PSC_TX_A1_PREG},
+ {0x0003, SIERRA_PSC_TX_A2_PREG},
+ {0x0003, SIERRA_PSC_TX_A3_PREG},
+ {0x0FFF, SIERRA_PSC_RX_A0_PREG},
+ {0x0619, SIERRA_PSC_RX_A1_PREG},
+ {0x0003, SIERRA_PSC_RX_A2_PREG},
+ {0x0001, SIERRA_PSC_RX_A3_PREG},
+ {0x0001, SIERRA_PLLCTRL_SUBRATE_PREG},
+ {0x0406, SIERRA_PLLCTRL_GEN_D_PREG},
+ {0x5233, SIERRA_PLLCTRL_CPGAIN_MODE_PREG},
+ {0x00CA, SIERRA_CLKPATH_BIASTRIM_PREG},
+ {0x2512, SIERRA_DFE_BIASTRIM_PREG},
+ {0x0000, SIERRA_DRVCTRL_ATTEN_PREG},
+ {0x873E, SIERRA_CLKPATHCTRL_TMR_PREG},
+ {0x03CF, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x01CE, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x7B3C, SIERRA_CREQ_CCLKDET_MODE01_PREG},
+ {0x033F, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+ {0x3232, SIERRA_CREQ_FSMCLK_SEL_PREG},
+ {0x0000, SIERRA_CREQ_EQ_CTRL_PREG},
+ {0x8000, SIERRA_CREQ_SPARE_PREG},
+ {0xCC44, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+ {0x8453, SIERRA_CTLELUT_CTRL_PREG},
+ {0x4110, SIERRA_DFE_ECMP_RATESEL_PREG},
+ {0x4110, SIERRA_DFE_SMP_RATESEL_PREG},
+ {0x0002, SIERRA_DEQ_PHALIGN_CTRL},
+ {0x3200, SIERRA_DEQ_CONCUR_CTRL1_PREG},
+ {0x5064, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x0030, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x0048, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x5A5A, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02F5, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02F5, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x9A8A, SIERRA_DEQ_VGATUNE_CTRL_PREG},
+ {0x0014, SIERRA_DEQ_GLUT0},
+ {0x0014, SIERRA_DEQ_GLUT1},
+ {0x0014, SIERRA_DEQ_GLUT2},
+ {0x0014, SIERRA_DEQ_GLUT3},
+ {0x0014, SIERRA_DEQ_GLUT4},
+ {0x0014, SIERRA_DEQ_GLUT5},
+ {0x0014, SIERRA_DEQ_GLUT6},
+ {0x0014, SIERRA_DEQ_GLUT7},
+ {0x0014, SIERRA_DEQ_GLUT8},
+ {0x0014, SIERRA_DEQ_GLUT9},
+ {0x0014, SIERRA_DEQ_GLUT10},
+ {0x0014, SIERRA_DEQ_GLUT11},
+ {0x0014, SIERRA_DEQ_GLUT12},
+ {0x0014, SIERRA_DEQ_GLUT13},
+ {0x0014, SIERRA_DEQ_GLUT14},
+ {0x0014, SIERRA_DEQ_GLUT15},
+ {0x0014, SIERRA_DEQ_GLUT16},
+ {0x0BAE, SIERRA_DEQ_ALUT0},
+ {0x0AEB, SIERRA_DEQ_ALUT1},
+ {0x0A28, SIERRA_DEQ_ALUT2},
+ {0x0965, SIERRA_DEQ_ALUT3},
+ {0x08A2, SIERRA_DEQ_ALUT4},
+ {0x07DF, SIERRA_DEQ_ALUT5},
+ {0x071C, SIERRA_DEQ_ALUT6},
+ {0x0659, SIERRA_DEQ_ALUT7},
+ {0x0596, SIERRA_DEQ_ALUT8},
+ {0x0514, SIERRA_DEQ_ALUT9},
+ {0x0492, SIERRA_DEQ_ALUT10},
+ {0x0410, SIERRA_DEQ_ALUT11},
+ {0x038E, SIERRA_DEQ_ALUT12},
+ {0x030C, SIERRA_DEQ_ALUT13},
+ {0x03F4, SIERRA_DEQ_DFETAP_CTRL_PREG},
+ {0x0001, SIERRA_DFE_EN_1010_IGNORE_PREG},
+ {0x3C01, SIERRA_DEQ_TAU_CTRL1_FAST_MAINT_PREG},
+ {0x3C40, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C08, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0033, SIERRA_DEQ_PICTRL_PREG},
+ {0x0400, SIERRA_CPICAL_TMRVAL_MODE1_PREG},
+ {0x0330, SIERRA_CPICAL_TMRVAL_MODE0_PREG},
+ {0x01FF, SIERRA_CPICAL_PICNT_MODE1_PREG},
+ {0x0009, SIERRA_CPI_OUTBUF_RATESEL_PREG},
+ {0x3232, SIERRA_CPICAL_RES_STARTCODE_MODE23_PREG},
+ {0x0005, SIERRA_LFPSDET_SUPPORT_PREG},
+ {0x000F, SIERRA_LFPSFILT_NS_PREG},
+ {0x0009, SIERRA_LFPSFILT_RD_PREG},
+ {0x0001, SIERRA_LFPSFILT_MP_PREG},
+ {0x8013, SIERRA_SDFILT_H2L_A_PREG},
+ {0x8009, SIERRA_SDFILT_L2H_PREG},
+ {0x0024, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x0020, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4243, SIERRA_RXBUFFER_DFECTRL_PREG}
};
static const struct cdns_sierra_data cdns_map_sierra = {
SIERRA_MACRO_ID,
- ARRAY_SIZE(cdns_pcie_regs),
- ARRAY_SIZE(cdns_usb_regs),
- cdns_pcie_regs,
- cdns_usb_regs
+ 0x2,
+ 0x2,
+ ARRAY_SIZE(cdns_pcie_cmn_regs_ext_ssc),
+ ARRAY_SIZE(cdns_pcie_ln_regs_ext_ssc),
+ ARRAY_SIZE(cdns_usb_cmn_regs_ext_ssc),
+ ARRAY_SIZE(cdns_usb_ln_regs_ext_ssc),
+ cdns_pcie_cmn_regs_ext_ssc,
+ cdns_pcie_ln_regs_ext_ssc,
+ cdns_usb_cmn_regs_ext_ssc,
+ cdns_usb_ln_regs_ext_ssc,
+};
+
+static const struct cdns_sierra_data cdns_ti_map_sierra = {
+ SIERRA_MACRO_ID,
+ 0x0,
+ 0x1,
+ ARRAY_SIZE(cdns_pcie_cmn_regs_ext_ssc),
+ ARRAY_SIZE(cdns_pcie_ln_regs_ext_ssc),
+ ARRAY_SIZE(cdns_usb_cmn_regs_ext_ssc),
+ ARRAY_SIZE(cdns_usb_ln_regs_ext_ssc),
+ cdns_pcie_cmn_regs_ext_ssc,
+ cdns_pcie_ln_regs_ext_ssc,
+ cdns_usb_cmn_regs_ext_ssc,
+ cdns_usb_ln_regs_ext_ssc,
};
static const struct of_device_id cdns_sierra_id_table[] = {
@@ -375,6 +812,10 @@ static const struct of_device_id cdns_sierra_id_table[] = {
.compatible = "cdns,sierra-phy-t0",
.data = &cdns_map_sierra,
},
+ {
+ .compatible = "ti,sierra-phy-t0",
+ .data = &cdns_ti_map_sierra,
+ },
{}
};
MODULE_DEVICE_TABLE(of, cdns_sierra_id_table);
diff --git a/drivers/phy/hisilicon/Kconfig b/drivers/phy/hisilicon/Kconfig
index 534e393a09b3..1c73053bcc98 100644
--- a/drivers/phy/hisilicon/Kconfig
+++ b/drivers/phy/hisilicon/Kconfig
@@ -33,14 +33,14 @@ config PHY_HISTB_COMBPHY
If unsure, say N.
config PHY_HISI_INNO_USB2
- tristate "HiSilicon INNO USB2 PHY support"
- depends on (ARCH_HISI && ARM64) || COMPILE_TEST
- select GENERIC_PHY
- select MFD_SYSCON
- help
- Support for INNO USB2 PHY on HiSilicon SoCs. This Phy supports
- USB 1.5Mb/s, USB 12Mb/s, USB 480Mb/s speeds. It supports one
- USB host port to accept one USB device.
+ tristate "HiSilicon INNO USB2 PHY support"
+ depends on (ARCH_HISI && ARM64) || COMPILE_TEST
+ select GENERIC_PHY
+ select MFD_SYSCON
+ help
+ Support for INNO USB2 PHY on HiSilicon SoCs. This Phy supports
+ USB 1.5Mb/s, USB 12Mb/s, USB 480Mb/s speeds. It supports one
+ USB host port to accept one USB device.
config PHY_HIX5HD2_SATA
tristate "HIX5HD2 SATA PHY Driver"
diff --git a/drivers/phy/intel/Kconfig b/drivers/phy/intel/Kconfig
new file mode 100644
index 000000000000..4ea6a8897cd7
--- /dev/null
+++ b/drivers/phy/intel/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Phy drivers for Intel Lightning Mountain(LGM) platform
+#
+config PHY_INTEL_EMMC
+ tristate "Intel EMMC PHY driver"
+ select GENERIC_PHY
+ help
+ Enable this to support the Intel EMMC PHY
diff --git a/drivers/phy/intel/Makefile b/drivers/phy/intel/Makefile
new file mode 100644
index 000000000000..6b876a75599d
--- /dev/null
+++ b/drivers/phy/intel/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PHY_INTEL_EMMC) += phy-intel-emmc.o
diff --git a/drivers/phy/intel/phy-intel-emmc.c b/drivers/phy/intel/phy-intel-emmc.c
new file mode 100644
index 000000000000..703aeb122541
--- /dev/null
+++ b/drivers/phy/intel/phy-intel-emmc.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel eMMC PHY driver
+ * Copyright (C) 2019 Intel, Corp.
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+/* eMMC phy register definitions */
+#define EMMC_PHYCTRL0_REG 0xa8
+#define DR_TY_MASK GENMASK(30, 28)
+#define DR_TY_SHIFT(x) (((x) << 28) & DR_TY_MASK)
+#define OTAPDLYENA BIT(14)
+#define OTAPDLYSEL_MASK GENMASK(13, 10)
+#define OTAPDLYSEL_SHIFT(x) (((x) << 10) & OTAPDLYSEL_MASK)
+
+#define EMMC_PHYCTRL1_REG 0xac
+#define PDB_MASK BIT(0)
+#define PDB_SHIFT(x) (((x) << 0) & PDB_MASK)
+#define ENDLL_MASK BIT(7)
+#define ENDLL_SHIFT(x) (((x) << 7) & ENDLL_MASK)
+
+#define EMMC_PHYCTRL2_REG 0xb0
+#define FRQSEL_25M 0
+#define FRQSEL_50M 1
+#define FRQSEL_100M 2
+#define FRQSEL_150M 3
+#define FRQSEL_MASK GENMASK(24, 22)
+#define FRQSEL_SHIFT(x) (((x) << 22) & FRQSEL_MASK)
+
+#define EMMC_PHYSTAT_REG 0xbc
+#define CALDONE_MASK BIT(9)
+#define DLLRDY_MASK BIT(8)
+#define IS_CALDONE(x) ((x) & CALDONE_MASK)
+#define IS_DLLRDY(x) ((x) & DLLRDY_MASK)
+
+struct intel_emmc_phy {
+ struct regmap *syscfg;
+ struct clk *emmcclk;
+};
+
+static int intel_emmc_phy_power(struct phy *phy, bool on_off)
+{
+ struct intel_emmc_phy *priv = phy_get_drvdata(phy);
+ unsigned int caldone;
+ unsigned int dllrdy;
+ unsigned int freqsel;
+ unsigned long rate;
+ int ret, quot;
+
+ /*
+ * Keep phyctrl_pdb and phyctrl_endll low to allow
+ * initialization of CALIO state M/C DFFs
+ */
+ ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL1_REG, PDB_MASK,
+ PDB_SHIFT(0));
+ if (ret) {
+ dev_err(&phy->dev, "CALIO power down bar failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Already finish power_off above */
+ if (!on_off)
+ return 0;
+
+ rate = clk_get_rate(priv->emmcclk);
+ quot = DIV_ROUND_CLOSEST(rate, 50000000);
+ if (quot > FRQSEL_150M)
+ dev_warn(&phy->dev, "Unsupported rate: %lu\n", rate);
+ freqsel = clamp_t(int, quot, FRQSEL_25M, FRQSEL_150M);
+
+ /*
+ * According to the user manual, calpad calibration
+ * cycle takes more than 2us without the minimal recommended
+ * value, so we may need a little margin here
+ */
+ udelay(5);
+
+ ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL1_REG, PDB_MASK,
+ PDB_SHIFT(1));
+ if (ret) {
+ dev_err(&phy->dev, "CALIO power down bar failed: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * According to the user manual, it asks driver to wait 5us for
+ * calpad busy trimming. However it is documented that this value is
+ * PVT(A.K.A process,voltage and temperature) relevant, so some
+ * failure cases are found which indicates we should be more tolerant
+ * to calpad busy trimming.
+ */
+ ret = regmap_read_poll_timeout(priv->syscfg, EMMC_PHYSTAT_REG,
+ caldone, IS_CALDONE(caldone),
+ 0, 50);
+ if (ret) {
+ dev_err(&phy->dev, "caldone failed, ret=%d\n", ret);
+ return ret;
+ }
+
+ /* Set the frequency of the DLL operation */
+ ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL2_REG, FRQSEL_MASK,
+ FRQSEL_SHIFT(freqsel));
+ if (ret) {
+ dev_err(&phy->dev, "set the frequency of dll failed:%d\n", ret);
+ return ret;
+ }
+
+ /* Turn on the DLL */
+ ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL1_REG, ENDLL_MASK,
+ ENDLL_SHIFT(1));
+ if (ret) {
+ dev_err(&phy->dev, "turn on the dll failed: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * After enabling analog DLL circuits docs say that we need 10.2 us if
+ * our source clock is at 50 MHz and that lock time scales linearly
+ * with clock speed. If we are powering on the PHY and the card clock
+ * is super slow (like 100 kHZ) this could take as long as 5.1 ms as
+ * per the math: 10.2 us * (50000000 Hz / 100000 Hz) => 5.1 ms
+ * Hopefully we won't be running at 100 kHz, but we should still make
+ * sure we wait long enough.
+ *
+ * NOTE: There appear to be corner cases where the DLL seems to take
+ * extra long to lock for reasons that aren't understood. In some
+ * extreme cases we've seen it take up to over 10ms (!). We'll be
+ * generous and give it 50ms.
+ */
+ ret = regmap_read_poll_timeout(priv->syscfg,
+ EMMC_PHYSTAT_REG,
+ dllrdy, IS_DLLRDY(dllrdy),
+ 0, 50 * USEC_PER_MSEC);
+ if (ret) {
+ dev_err(&phy->dev, "dllrdy failed. ret=%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int intel_emmc_phy_init(struct phy *phy)
+{
+ struct intel_emmc_phy *priv = phy_get_drvdata(phy);
+
+ /*
+ * We purposely get the clock here and not in probe to avoid the
+ * circular dependency problem. We expect:
+ * - PHY driver to probe
+ * - SDHCI driver to start probe
+ * - SDHCI driver to register it's clock
+ * - SDHCI driver to get the PHY
+ * - SDHCI driver to init the PHY
+ *
+ * The clock is optional, so upon any error just return it like
+ * any other error to user.
+ *
+ */
+ priv->emmcclk = clk_get_optional(&phy->dev, "emmcclk");
+ if (IS_ERR(priv->emmcclk)) {
+ dev_err(&phy->dev, "ERROR: getting emmcclk\n");
+ return PTR_ERR(priv->emmcclk);
+ }
+
+ return 0;
+}
+
+static int intel_emmc_phy_exit(struct phy *phy)
+{
+ struct intel_emmc_phy *priv = phy_get_drvdata(phy);
+
+ clk_put(priv->emmcclk);
+
+ return 0;
+}
+
+static int intel_emmc_phy_power_on(struct phy *phy)
+{
+ struct intel_emmc_phy *priv = phy_get_drvdata(phy);
+ int ret;
+
+ /* Drive impedance: 50 Ohm */
+ ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL0_REG, DR_TY_MASK,
+ DR_TY_SHIFT(6));
+ if (ret) {
+ dev_err(&phy->dev, "ERROR set drive-impednce-50ohm: %d\n", ret);
+ return ret;
+ }
+
+ /* Output tap delay: disable */
+ ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL0_REG, OTAPDLYENA,
+ 0);
+ if (ret) {
+ dev_err(&phy->dev, "ERROR Set output tap delay : %d\n", ret);
+ return ret;
+ }
+
+ /* Output tap delay */
+ ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL0_REG,
+ OTAPDLYSEL_MASK, OTAPDLYSEL_SHIFT(4));
+ if (ret) {
+ dev_err(&phy->dev, "ERROR: output tap dly select: %d\n", ret);
+ return ret;
+ }
+
+ /* Power up eMMC phy analog blocks */
+ return intel_emmc_phy_power(phy, true);
+}
+
+static int intel_emmc_phy_power_off(struct phy *phy)
+{
+ /* Power down eMMC phy analog blocks */
+ return intel_emmc_phy_power(phy, false);
+}
+
+static const struct phy_ops ops = {
+ .init = intel_emmc_phy_init,
+ .exit = intel_emmc_phy_exit,
+ .power_on = intel_emmc_phy_power_on,
+ .power_off = intel_emmc_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int intel_emmc_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct intel_emmc_phy *priv;
+ struct phy *generic_phy;
+ struct phy_provider *phy_provider;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Get eMMC phy (accessed via chiptop) regmap */
+ priv->syscfg = syscon_regmap_lookup_by_phandle(np, "intel,syscon");
+ if (IS_ERR(priv->syscfg)) {
+ dev_err(dev, "failed to find syscon\n");
+ return PTR_ERR(priv->syscfg);
+ }
+
+ generic_phy = devm_phy_create(dev, np, &ops);
+ if (IS_ERR(generic_phy)) {
+ dev_err(dev, "failed to create PHY\n");
+ return PTR_ERR(generic_phy);
+ }
+
+ phy_set_drvdata(generic_phy, priv);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id intel_emmc_phy_dt_ids[] = {
+ { .compatible = "intel,lgm-emmc-phy" },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, intel_emmc_phy_dt_ids);
+
+static struct platform_driver intel_emmc_driver = {
+ .probe = intel_emmc_phy_probe,
+ .driver = {
+ .name = "intel-emmc-phy",
+ .of_match_table = intel_emmc_phy_dt_ids,
+ },
+};
+
+module_platform_driver(intel_emmc_driver);
+
+MODULE_AUTHOR("Peter Harliman Liem <peter.harliman.liem@intel.com>");
+MODULE_DESCRIPTION("Intel eMMC PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
index 6e457967653e..2ff9a48d833e 100644
--- a/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
+++ b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
@@ -386,7 +386,7 @@ static struct phy *ltq_vrx200_pcie_phy_xlate(struct device *dev,
default:
dev_err(dev, "invalid PHY mode %u\n", mode);
return ERR_PTR(-EINVAL);
- };
+ }
return priv->phy;
}
diff --git a/drivers/phy/marvell/Kconfig b/drivers/phy/marvell/Kconfig
index 005e02dd4a91..8f6273c837ec 100644
--- a/drivers/phy/marvell/Kconfig
+++ b/drivers/phy/marvell/Kconfig
@@ -10,14 +10,16 @@ config ARMADA375_USBCLUSTER_PHY
config PHY_BERLIN_SATA
tristate "Marvell Berlin SATA PHY driver"
- depends on ARCH_BERLIN && HAS_IOMEM && OF
+ depends on ARCH_BERLIN || COMPILE_TEST
+ depends on OF && HAS_IOMEM
select GENERIC_PHY
help
Enable this to support the SATA PHY on Marvell Berlin SoCs.
config PHY_BERLIN_USB
tristate "Marvell Berlin USB PHY Driver"
- depends on ARCH_BERLIN && RESET_CONTROLLER && HAS_IOMEM && OF
+ depends on ARCH_BERLIN || COMPILE_TEST
+ depends on OF && HAS_IOMEM && RESET_CONTROLLER
select GENERIC_PHY
help
Enable this to support the USB PHY on Marvell Berlin SoCs.
@@ -95,7 +97,7 @@ config PHY_PXA_28NM_USB2
config PHY_PXA_USB
tristate "Marvell PXA USB PHY Driver"
- depends on ARCH_PXA || ARCH_MMP
+ depends on ARCH_PXA || ARCH_MMP || COMPILE_TEST
select GENERIC_PHY
help
Enable this to support Marvell PXA USB PHY driver for Marvell
diff --git a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
index e3b87c94aaf6..e41367f36ee1 100644
--- a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
@@ -221,7 +221,7 @@ static const struct mvebu_comphy_conf mvebu_comphy_cp110_modes[] = {
ETH_CONF(2, 0, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII),
ETH_CONF(2, 0, PHY_INTERFACE_MODE_2500BASEX, 0x1, COMPHY_FW_MODE_HS_SGMII),
ETH_CONF(2, 0, PHY_INTERFACE_MODE_RXAUI, 0x1, COMPHY_FW_MODE_RXAUI),
- ETH_CONF(2, 0, PHY_INTERFACE_MODE_10GKR, 0x1, COMPHY_FW_MODE_XFI),
+ ETH_CONF(2, 0, PHY_INTERFACE_MODE_10GBASER, 0x1, COMPHY_FW_MODE_XFI),
GEN_CONF(2, 0, PHY_MODE_USB_HOST_SS, COMPHY_FW_MODE_USB3H),
GEN_CONF(2, 0, PHY_MODE_SATA, COMPHY_FW_MODE_SATA),
GEN_CONF(2, 0, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE),
@@ -235,14 +235,14 @@ static const struct mvebu_comphy_conf mvebu_comphy_cp110_modes[] = {
/* lane 4 */
ETH_CONF(4, 0, PHY_INTERFACE_MODE_SGMII, 0x2, COMPHY_FW_MODE_SGMII),
ETH_CONF(4, 0, PHY_INTERFACE_MODE_2500BASEX, 0x2, COMPHY_FW_MODE_HS_SGMII),
- ETH_CONF(4, 0, PHY_INTERFACE_MODE_10GKR, 0x2, COMPHY_FW_MODE_XFI),
+ ETH_CONF(4, 0, PHY_INTERFACE_MODE_10GBASER, 0x2, COMPHY_FW_MODE_XFI),
ETH_CONF(4, 0, PHY_INTERFACE_MODE_RXAUI, 0x2, COMPHY_FW_MODE_RXAUI),
GEN_CONF(4, 0, PHY_MODE_USB_DEVICE_SS, COMPHY_FW_MODE_USB3D),
GEN_CONF(4, 1, PHY_MODE_USB_HOST_SS, COMPHY_FW_MODE_USB3H),
GEN_CONF(4, 1, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE),
ETH_CONF(4, 1, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII),
ETH_CONF(4, 1, PHY_INTERFACE_MODE_2500BASEX, -1, COMPHY_FW_MODE_HS_SGMII),
- ETH_CONF(4, 1, PHY_INTERFACE_MODE_10GKR, -1, COMPHY_FW_MODE_XFI),
+ ETH_CONF(4, 1, PHY_INTERFACE_MODE_10GBASER, -1, COMPHY_FW_MODE_XFI),
/* lane 5 */
ETH_CONF(5, 1, PHY_INTERFACE_MODE_RXAUI, 0x2, COMPHY_FW_MODE_RXAUI),
GEN_CONF(5, 1, PHY_MODE_SATA, COMPHY_FW_MODE_SATA),
@@ -342,7 +342,7 @@ static int mvebu_comphy_ethernet_init_reset(struct mvebu_comphy_lane *lane)
MVEBU_COMPHY_SERDES_CFG0_RXAUI_MODE);
switch (lane->submode) {
- case PHY_INTERFACE_MODE_10GKR:
+ case PHY_INTERFACE_MODE_10GBASER:
val |= MVEBU_COMPHY_SERDES_CFG0_GEN_RX(0xe) |
MVEBU_COMPHY_SERDES_CFG0_GEN_TX(0xe);
break;
@@ -417,7 +417,7 @@ static int mvebu_comphy_ethernet_init_reset(struct mvebu_comphy_lane *lane)
/* refclk selection */
val = readl(priv->base + MVEBU_COMPHY_MISC_CTRL0(lane->id));
val &= ~MVEBU_COMPHY_MISC_CTRL0_REFCLK_SEL;
- if (lane->submode == PHY_INTERFACE_MODE_10GKR)
+ if (lane->submode == PHY_INTERFACE_MODE_10GBASER)
val |= MVEBU_COMPHY_MISC_CTRL0_ICP_FORCE;
writel(val, priv->base + MVEBU_COMPHY_MISC_CTRL0(lane->id));
@@ -564,7 +564,7 @@ static int mvebu_comphy_set_mode_rxaui(struct phy *phy)
return mvebu_comphy_init_plls(lane);
}
-static int mvebu_comphy_set_mode_10gkr(struct phy *phy)
+static int mvebu_comphy_set_mode_10gbaser(struct phy *phy)
{
struct mvebu_comphy_lane *lane = phy_get_drvdata(phy);
struct mvebu_comphy_priv *priv = lane->priv;
@@ -735,8 +735,8 @@ static int mvebu_comphy_power_on_legacy(struct phy *phy)
case PHY_INTERFACE_MODE_RXAUI:
ret = mvebu_comphy_set_mode_rxaui(phy);
break;
- case PHY_INTERFACE_MODE_10GKR:
- ret = mvebu_comphy_set_mode_10gkr(phy);
+ case PHY_INTERFACE_MODE_10GBASER:
+ ret = mvebu_comphy_set_mode_10gbaser(phy);
break;
default:
return -ENOTSUPP;
@@ -782,8 +782,8 @@ static int mvebu_comphy_power_on(struct phy *phy)
lane->id);
fw_speed = COMPHY_FW_SPEED_3125;
break;
- case PHY_INTERFACE_MODE_10GKR:
- dev_dbg(priv->dev, "set lane %d to 10G-KR mode\n",
+ case PHY_INTERFACE_MODE_10GBASER:
+ dev_dbg(priv->dev, "set lane %d to 10GBASE-R mode\n",
lane->id);
fw_speed = COMPHY_FW_SPEED_103125;
break;
diff --git a/drivers/phy/mediatek/Kconfig b/drivers/phy/mediatek/Kconfig
index 376f5d189da0..dee757c957f2 100644
--- a/drivers/phy/mediatek/Kconfig
+++ b/drivers/phy/mediatek/Kconfig
@@ -3,12 +3,13 @@
# Phy drivers for Mediatek devices
#
config PHY_MTK_TPHY
- tristate "MediaTek T-PHY Driver"
- depends on ARCH_MEDIATEK && OF
- select GENERIC_PHY
- help
- Say 'Y' here to add support for MediaTek T-PHY driver,
- it supports multiple usb2.0, usb3.0 ports, PCIe and
+ tristate "MediaTek T-PHY Driver"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on OF
+ select GENERIC_PHY
+ help
+ Say 'Y' here to add support for MediaTek T-PHY driver,
+ it supports multiple usb2.0, usb3.0 ports, PCIe and
SATA, and meanwhile supports two version T-PHY which have
different banks layout, the T-PHY with shared banks between
multi-ports is first version, otherwise is second veriosn,
@@ -16,7 +17,8 @@ config PHY_MTK_TPHY
config PHY_MTK_UFS
tristate "MediaTek UFS M-PHY driver"
- depends on ARCH_MEDIATEK && OF
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on OF
select GENERIC_PHY
help
Support for UFS M-PHY on MediaTek chipsets.
@@ -25,10 +27,11 @@ config PHY_MTK_UFS
specified M-PHYs.
config PHY_MTK_XSPHY
- tristate "MediaTek XS-PHY Driver"
- depends on ARCH_MEDIATEK && OF
- select GENERIC_PHY
- help
+ tristate "MediaTek XS-PHY Driver"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on OF
+ select GENERIC_PHY
+ help
Enable this to support the SuperSpeedPlus XS-PHY transceiver for
USB3.1 GEN2 controllers on MediaTek chips. The driver supports
multiple USB2.0, USB3.1 GEN2 ports.
diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c
index ead06c6c2601..12e71a315a2c 100644
--- a/drivers/phy/motorola/phy-cpcap-usb.c
+++ b/drivers/phy/motorola/phy-cpcap-usb.c
@@ -115,7 +115,7 @@ struct cpcap_usb_ints_state {
enum cpcap_gpio_mode {
CPCAP_DM_DP,
CPCAP_MDM_RX_TX,
- CPCAP_UNKNOWN,
+ CPCAP_UNKNOWN_DISABLED, /* Seems to disable USB lines */
CPCAP_OTG_DM_DP,
};
@@ -134,6 +134,8 @@ struct cpcap_phy_ddata {
struct iio_channel *id;
struct regulator *vusb;
atomic_t active;
+ unsigned int vbus_provider:1;
+ unsigned int docked:1;
};
static bool cpcap_usb_vbus_valid(struct cpcap_phy_ddata *ddata)
@@ -207,6 +209,19 @@ static int cpcap_phy_get_ints_state(struct cpcap_phy_ddata *ddata,
static int cpcap_usb_set_uart_mode(struct cpcap_phy_ddata *ddata);
static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata);
+static void cpcap_usb_try_musb_mailbox(struct cpcap_phy_ddata *ddata,
+ enum musb_vbus_id_status status)
+{
+ int error;
+
+ error = musb_mailbox(status);
+ if (!error)
+ return;
+
+ dev_dbg(ddata->dev, "%s: musb_mailbox failed: %i\n",
+ __func__, error);
+}
+
static void cpcap_usb_detect(struct work_struct *work)
{
struct cpcap_phy_ddata *ddata;
@@ -220,16 +235,66 @@ static void cpcap_usb_detect(struct work_struct *work)
if (error)
return;
- if (s.id_ground) {
- dev_dbg(ddata->dev, "id ground, USB host mode\n");
+ vbus = cpcap_usb_vbus_valid(ddata);
+
+ /* We need to kick the VBUS as USB A-host */
+ if (s.id_ground && ddata->vbus_provider) {
+ dev_dbg(ddata->dev, "still in USB A-host mode, kicking VBUS\n");
+
+ cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND);
+
+ error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC3,
+ CPCAP_BIT_VBUSSTBY_EN |
+ CPCAP_BIT_VBUSEN_SPI,
+ CPCAP_BIT_VBUSEN_SPI);
+ if (error)
+ goto out_err;
+
+ return;
+ }
+
+ if (vbus && s.id_ground && ddata->docked) {
+ dev_dbg(ddata->dev, "still docked as A-host, signal ID down\n");
+
+ cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND);
+
+ return;
+ }
+
+ /* No VBUS needed with docks */
+ if (vbus && s.id_ground && !ddata->vbus_provider) {
+ dev_dbg(ddata->dev, "connected to a dock\n");
+
+ ddata->docked = true;
+
error = cpcap_usb_set_usb_mode(ddata);
if (error)
goto out_err;
- error = musb_mailbox(MUSB_ID_GROUND);
+ cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND);
+
+ /*
+ * Force check state again after musb has reoriented,
+ * otherwise devices won't enumerate after loading PHY
+ * driver.
+ */
+ schedule_delayed_work(&ddata->detect_work,
+ msecs_to_jiffies(1000));
+
+ return;
+ }
+
+ if (s.id_ground && !ddata->docked) {
+ dev_dbg(ddata->dev, "id ground, USB host mode\n");
+
+ ddata->vbus_provider = true;
+
+ error = cpcap_usb_set_usb_mode(ddata);
if (error)
goto out_err;
+ cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND);
+
error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC3,
CPCAP_BIT_VBUSSTBY_EN |
CPCAP_BIT_VBUSEN_SPI,
@@ -248,43 +313,26 @@ static void cpcap_usb_detect(struct work_struct *work)
vbus = cpcap_usb_vbus_valid(ddata);
+ /* Otherwise assume we're connected to a USB host */
if (vbus) {
- /* Are we connected to a docking station with vbus? */
- if (s.id_ground) {
- dev_dbg(ddata->dev, "connected to a dock\n");
-
- /* No VBUS needed with docks */
- error = cpcap_usb_set_usb_mode(ddata);
- if (error)
- goto out_err;
- error = musb_mailbox(MUSB_ID_GROUND);
- if (error)
- goto out_err;
-
- return;
- }
-
- /* Otherwise assume we're connected to a USB host */
dev_dbg(ddata->dev, "connected to USB host\n");
error = cpcap_usb_set_usb_mode(ddata);
if (error)
goto out_err;
- error = musb_mailbox(MUSB_VBUS_VALID);
- if (error)
- goto out_err;
+ cpcap_usb_try_musb_mailbox(ddata, MUSB_VBUS_VALID);
return;
}
+ ddata->vbus_provider = false;
+ ddata->docked = false;
+ cpcap_usb_try_musb_mailbox(ddata, MUSB_VBUS_OFF);
+
/* Default to debug UART mode */
error = cpcap_usb_set_uart_mode(ddata);
if (error)
goto out_err;
- error = musb_mailbox(MUSB_VBUS_OFF);
- if (error)
- goto out_err;
-
dev_dbg(ddata->dev, "set UART mode\n");
return;
@@ -376,7 +424,8 @@ static int cpcap_usb_set_uart_mode(struct cpcap_phy_ddata *ddata)
{
int error;
- error = cpcap_usb_gpio_set_mode(ddata, CPCAP_DM_DP);
+ /* Disable lines to prevent glitches from waking up mdm6600 */
+ error = cpcap_usb_gpio_set_mode(ddata, CPCAP_UNKNOWN_DISABLED);
if (error)
goto out_err;
@@ -403,6 +452,11 @@ static int cpcap_usb_set_uart_mode(struct cpcap_phy_ddata *ddata)
if (error)
goto out_err;
+ /* Enable UART mode */
+ error = cpcap_usb_gpio_set_mode(ddata, CPCAP_DM_DP);
+ if (error)
+ goto out_err;
+
return 0;
out_err:
@@ -415,7 +469,8 @@ static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata)
{
int error;
- error = cpcap_usb_gpio_set_mode(ddata, CPCAP_OTG_DM_DP);
+ /* Disable lines to prevent glitches from waking up mdm6600 */
+ error = cpcap_usb_gpio_set_mode(ddata, CPCAP_UNKNOWN_DISABLED);
if (error)
return error;
@@ -434,12 +489,6 @@ static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata)
if (error)
goto out_err;
- error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC2,
- CPCAP_BIT_USBXCVREN,
- CPCAP_BIT_USBXCVREN);
- if (error)
- goto out_err;
-
error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC3,
CPCAP_BIT_PU_SPI |
CPCAP_BIT_DMPD_SPI |
@@ -455,6 +504,11 @@ static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata)
if (error)
goto out_err;
+ /* Enable USB mode */
+ error = cpcap_usb_gpio_set_mode(ddata, CPCAP_OTG_DM_DP);
+ if (error)
+ goto out_err;
+
return 0;
out_err:
@@ -649,9 +703,7 @@ static int cpcap_usb_phy_remove(struct platform_device *pdev)
if (error)
dev_err(ddata->dev, "could not set UART mode\n");
- error = musb_mailbox(MUSB_VBUS_OFF);
- if (error)
- dev_err(ddata->dev, "could not set mailbox\n");
+ cpcap_usb_try_musb_mailbox(ddata, MUSB_VBUS_OFF);
usb_remove_phy(&ddata->phy);
cancel_delayed_work_sync(&ddata->detect_work);
diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c
index ee184d5607bd..f20524f0c21d 100644
--- a/drivers/phy/motorola/phy-mapphone-mdm6600.c
+++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c
@@ -200,7 +200,7 @@ static void phy_mdm6600_status(struct work_struct *work)
struct phy_mdm6600 *ddata;
struct device *dev;
DECLARE_BITMAP(values, PHY_MDM6600_NR_STATUS_LINES);
- int error, i, val = 0;
+ int error;
ddata = container_of(work, struct phy_mdm6600, status_work.work);
dev = ddata->dev;
@@ -212,16 +212,11 @@ static void phy_mdm6600_status(struct work_struct *work)
if (error)
return;
- for (i = 0; i < PHY_MDM6600_NR_STATUS_LINES; i++) {
- val |= test_bit(i, values) << i;
- dev_dbg(ddata->dev, "XXX %s: i: %i values[i]: %i val: %i\n",
- __func__, i, test_bit(i, values), val);
- }
- ddata->status = values[0];
+ ddata->status = values[0] & ((1 << PHY_MDM6600_NR_STATUS_LINES) - 1);
dev_info(dev, "modem status: %i %s\n",
ddata->status,
- phy_mdm6600_status_name[ddata->status & 7]);
+ phy_mdm6600_status_name[ddata->status]);
complete(&ddata->ack);
}
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index b04f4fe85ac2..cd5a6c95dbdc 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -29,7 +29,7 @@ static void devm_phy_release(struct device *dev, void *res)
{
struct phy *phy = *(struct phy **)res;
- phy_put(phy);
+ phy_put(dev, phy);
}
static void devm_phy_provider_release(struct device *dev, void *res)
@@ -566,12 +566,12 @@ struct phy *of_phy_get(struct device_node *np, const char *con_id)
EXPORT_SYMBOL_GPL(of_phy_get);
/**
- * phy_put() - release the PHY
- * @phy: the phy returned by phy_get()
+ * of_phy_put() - release the PHY
+ * @phy: the phy returned by of_phy_get()
*
- * Releases a refcount the caller received from phy_get().
+ * Releases a refcount the caller received from of_phy_get().
*/
-void phy_put(struct phy *phy)
+void of_phy_put(struct phy *phy)
{
if (!phy || IS_ERR(phy))
return;
@@ -584,6 +584,20 @@ void phy_put(struct phy *phy)
module_put(phy->ops->owner);
put_device(&phy->dev);
}
+EXPORT_SYMBOL_GPL(of_phy_put);
+
+/**
+ * phy_put() - release the PHY
+ * @dev: device that wants to release this phy
+ * @phy: the phy returned by phy_get()
+ *
+ * Releases a refcount the caller received from phy_get().
+ */
+void phy_put(struct device *dev, struct phy *phy)
+{
+ device_link_remove(dev, &phy->dev);
+ of_phy_put(phy);
+}
EXPORT_SYMBOL_GPL(phy_put);
/**
@@ -651,6 +665,7 @@ struct phy *phy_get(struct device *dev, const char *string)
{
int index = 0;
struct phy *phy;
+ struct device_link *link;
if (string == NULL) {
dev_WARN(dev, "missing string\n");
@@ -672,6 +687,13 @@ struct phy *phy_get(struct device *dev, const char *string)
get_device(&phy->dev);
+ link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS);
+ if (!link) {
+ dev_err(dev, "failed to create device link to %s\n",
+ dev_name(phy->dev.parent));
+ return ERR_PTR(-EINVAL);
+ }
+
return phy;
}
EXPORT_SYMBOL_GPL(phy_get);
@@ -690,7 +712,7 @@ struct phy *phy_optional_get(struct device *dev, const char *string)
{
struct phy *phy = phy_get(dev, string);
- if (IS_ERR(phy) && (PTR_ERR(phy) == -ENODEV))
+ if (PTR_ERR(phy) == -ENODEV)
phy = NULL;
return phy;
@@ -744,7 +766,7 @@ struct phy *devm_phy_optional_get(struct device *dev, const char *string)
{
struct phy *phy = devm_phy_get(dev, string);
- if (IS_ERR(phy) && (PTR_ERR(phy) == -ENODEV))
+ if (PTR_ERR(phy) == -ENODEV)
phy = NULL;
return phy;
@@ -765,6 +787,7 @@ struct phy *devm_of_phy_get(struct device *dev, struct device_node *np,
const char *con_id)
{
struct phy **ptr, *phy;
+ struct device_link *link;
ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
@@ -776,6 +799,14 @@ struct phy *devm_of_phy_get(struct device *dev, struct device_node *np,
devres_add(dev, ptr);
} else {
devres_free(ptr);
+ return phy;
+ }
+
+ link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS);
+ if (!link) {
+ dev_err(dev, "failed to create device link to %s\n",
+ dev_name(phy->dev.parent));
+ return ERR_PTR(-EINVAL);
}
return phy;
@@ -798,6 +829,7 @@ struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np,
int index)
{
struct phy **ptr, *phy;
+ struct device_link *link;
ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
@@ -819,6 +851,13 @@ struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np,
*ptr = phy;
devres_add(dev, ptr);
+ link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS);
+ if (!link) {
+ dev_err(dev, "failed to create device link to %s\n",
+ dev_name(phy->dev.parent));
+ return ERR_PTR(-EINVAL);
+ }
+
return phy;
}
EXPORT_SYMBOL_GPL(devm_of_phy_get_by_index);
diff --git a/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c b/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c
index 42bc5150dd92..febe0aef68d4 100644
--- a/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c
+++ b/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c
@@ -80,7 +80,7 @@ static int read_poll_timeout(void __iomem *addr, u32 mask)
if (readl_relaxed(addr) & mask)
return 0;
- usleep_range(DELAY_INTERVAL_US, DELAY_INTERVAL_US + 50);
+ usleep_range(DELAY_INTERVAL_US, DELAY_INTERVAL_US + 50);
} while (!time_after(jiffies, timeout));
return (readl_relaxed(addr) & mask) ? 0 : -ETIMEDOUT;
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index 091e20303a14..7db2a94f7a99 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -66,7 +66,7 @@
/* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */
#define CLAMP_EN BIT(0) /* enables i/o clamp_n */
-#define PHY_INIT_COMPLETE_TIMEOUT 1000
+#define PHY_INIT_COMPLETE_TIMEOUT 10000
#define POWER_DOWN_DELAY_US_MIN 10
#define POWER_DOWN_DELAY_US_MAX 11
@@ -166,8 +166,9 @@ static const unsigned int sdm845_ufsphy_regs_layout[] = {
};
static const unsigned int sm8150_ufsphy_regs_layout[] = {
- [QPHY_START_CTRL] = 0x00,
- [QPHY_PCS_READY_STATUS] = 0x180,
+ [QPHY_START_CTRL] = QPHY_V4_PHY_START,
+ [QPHY_PCS_READY_STATUS] = QPHY_V4_PCS_READY_STATUS,
+ [QPHY_SW_RESET] = QPHY_V4_SW_RESET,
};
static const struct qmp_phy_init_tbl msm8996_pcie_serdes_tbl[] = {
@@ -885,7 +886,6 @@ static const struct qmp_phy_init_tbl msm8998_usb3_pcs_tbl[] = {
};
static const struct qmp_phy_init_tbl sm8150_ufsphy_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_POWER_DOWN_CONTROL, 0x01),
QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0xd9),
QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x11),
QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_HS_SWITCH_SEL, 0x00),
@@ -1390,7 +1390,6 @@ static const struct qmp_phy_cfg sm8150_ufsphy_cfg = {
.pwrdn_ctrl = SW_PWRDN,
.is_dual_lane_phy = true,
- .no_pcs_sw_reset = true,
};
static void qcom_qmp_phy_configure(void __iomem *base,
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index ab6ff9b45a32..90f793c2293d 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017, The Linux Foundation. All rights reserved.
*/
diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig
index dbd2de4d28b1..0824b9dd5683 100644
--- a/drivers/phy/rockchip/Kconfig
+++ b/drivers/phy/rockchip/Kconfig
@@ -39,6 +39,7 @@ config PHY_ROCKCHIP_INNO_DSIDPHY
tristate "Rockchip Innosilicon MIPI/LVDS/TTL PHY driver"
depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF
select GENERIC_PHY
+ select GENERIC_PHY_MIPI_DPHY
help
Enable this to support the Rockchip MIPI/LVDS/TTL PHY with
Innosilicon IP block.
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
index fc729ecd3fe9..a7c6c940a3a8 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/phy/phy.h>
+#include <linux/phy/phy-mipi-dphy.h>
#include <linux/pm_runtime.h>
#include <linux/mfd/syscon.h>
@@ -167,31 +168,6 @@
#define DSI_PHY_STATUS 0xb0
#define PHY_LOCK BIT(0)
-struct mipi_dphy_timing {
- unsigned int clkmiss;
- unsigned int clkpost;
- unsigned int clkpre;
- unsigned int clkprepare;
- unsigned int clksettle;
- unsigned int clktermen;
- unsigned int clktrail;
- unsigned int clkzero;
- unsigned int dtermen;
- unsigned int eot;
- unsigned int hsexit;
- unsigned int hsprepare;
- unsigned int hszero;
- unsigned int hssettle;
- unsigned int hsskip;
- unsigned int hstrail;
- unsigned int init;
- unsigned int lpx;
- unsigned int taget;
- unsigned int tago;
- unsigned int tasure;
- unsigned int wakeup;
-};
-
struct inno_dsidphy {
struct device *dev;
struct clk *ref_clk;
@@ -201,7 +177,9 @@ struct inno_dsidphy {
void __iomem *host_base;
struct reset_control *rst;
enum phy_mode mode;
+ struct phy_configure_opts_mipi_dphy dphy_cfg;
+ struct clk *pll_clk;
struct {
struct clk_hw hw;
u8 prediv;
@@ -238,37 +216,79 @@ static void phy_update_bits(struct inno_dsidphy *inno,
writel(tmp, inno->phy_base + reg);
}
-static void mipi_dphy_timing_get_default(struct mipi_dphy_timing *timing,
- unsigned long period)
+static unsigned long inno_dsidphy_pll_calc_rate(struct inno_dsidphy *inno,
+ unsigned long rate)
{
- /* Global Operation Timing Parameters */
- timing->clkmiss = 0;
- timing->clkpost = 70000 + 52 * period;
- timing->clkpre = 8 * period;
- timing->clkprepare = 65000;
- timing->clksettle = 95000;
- timing->clktermen = 0;
- timing->clktrail = 80000;
- timing->clkzero = 260000;
- timing->dtermen = 0;
- timing->eot = 0;
- timing->hsexit = 120000;
- timing->hsprepare = 65000 + 4 * period;
- timing->hszero = 145000 + 6 * period;
- timing->hssettle = 85000 + 6 * period;
- timing->hsskip = 40000;
- timing->hstrail = max(8 * period, 60000 + 4 * period);
- timing->init = 100000000;
- timing->lpx = 60000;
- timing->taget = 5 * timing->lpx;
- timing->tago = 4 * timing->lpx;
- timing->tasure = 2 * timing->lpx;
- timing->wakeup = 1000000000;
+ unsigned long prate = clk_get_rate(inno->ref_clk);
+ unsigned long best_freq = 0;
+ unsigned long fref, fout;
+ u8 min_prediv, max_prediv;
+ u8 _prediv, best_prediv = 1;
+ u16 _fbdiv, best_fbdiv = 1;
+ u32 min_delta = UINT_MAX;
+
+ /*
+ * The PLL output frequency can be calculated using a simple formula:
+ * PLL_Output_Frequency = (FREF / PREDIV * FBDIV) / 2
+ * PLL_Output_Frequency: it is equal to DDR-Clock-Frequency * 2
+ */
+ fref = prate / 2;
+ if (rate > 1000000000UL)
+ fout = 1000000000UL;
+ else
+ fout = rate;
+
+ /* 5Mhz < Fref / prediv < 40MHz */
+ min_prediv = DIV_ROUND_UP(fref, 40000000);
+ max_prediv = fref / 5000000;
+
+ for (_prediv = min_prediv; _prediv <= max_prediv; _prediv++) {
+ u64 tmp;
+ u32 delta;
+
+ tmp = (u64)fout * _prediv;
+ do_div(tmp, fref);
+ _fbdiv = tmp;
+
+ /*
+ * The possible settings of feedback divider are
+ * 12, 13, 14, 16, ~ 511
+ */
+ if (_fbdiv == 15)
+ continue;
+
+ if (_fbdiv < 12 || _fbdiv > 511)
+ continue;
+
+ tmp = (u64)_fbdiv * fref;
+ do_div(tmp, _prediv);
+
+ delta = abs(fout - tmp);
+ if (!delta) {
+ best_prediv = _prediv;
+ best_fbdiv = _fbdiv;
+ best_freq = tmp;
+ break;
+ } else if (delta < min_delta) {
+ best_prediv = _prediv;
+ best_fbdiv = _fbdiv;
+ best_freq = tmp;
+ min_delta = delta;
+ }
+ }
+
+ if (best_freq) {
+ inno->pll.prediv = best_prediv;
+ inno->pll.fbdiv = best_fbdiv;
+ inno->pll.rate = best_freq;
+ }
+
+ return best_freq;
}
static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
{
- struct mipi_dphy_timing gotp;
+ struct phy_configure_opts_mipi_dphy *cfg = &inno->dphy_cfg;
const struct {
unsigned long rate;
u8 hs_prepare;
@@ -288,12 +308,14 @@ static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
{ 800000000, 0x21, 0x1f, 0x09, 0x29},
{1000000000, 0x09, 0x20, 0x09, 0x27},
};
- u32 t_txbyteclkhs, t_txclkesc, ui;
+ u32 t_txbyteclkhs, t_txclkesc;
u32 txbyteclkhs, txclkesc, esc_clk_div;
u32 hs_exit, clk_post, clk_pre, wakeup, lpx, ta_go, ta_sure, ta_wait;
u32 hs_prepare, hs_trail, hs_zero, clk_lane_hs_zero, data_lane_hs_zero;
unsigned int i;
+ inno_dsidphy_pll_calc_rate(inno, cfg->hs_clk_rate);
+
/* Select MIPI mode */
phy_update_bits(inno, REGISTER_PART_LVDS, 0x03,
MODE_ENABLE_MASK, MIPI_MODE_ENABLE);
@@ -328,32 +350,27 @@ static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
txclkesc = txbyteclkhs / esc_clk_div;
t_txclkesc = div_u64(PSEC_PER_SEC, txclkesc);
- ui = div_u64(PSEC_PER_SEC, inno->pll.rate);
-
- memset(&gotp, 0, sizeof(gotp));
- mipi_dphy_timing_get_default(&gotp, ui);
-
/*
* The value of counter for HS Ths-exit
* Ths-exit = Tpin_txbyteclkhs * value
*/
- hs_exit = DIV_ROUND_UP(gotp.hsexit, t_txbyteclkhs);
+ hs_exit = DIV_ROUND_UP(cfg->hs_exit, t_txbyteclkhs);
/*
* The value of counter for HS Tclk-post
* Tclk-post = Tpin_txbyteclkhs * value
*/
- clk_post = DIV_ROUND_UP(gotp.clkpost, t_txbyteclkhs);
+ clk_post = DIV_ROUND_UP(cfg->clk_post, t_txbyteclkhs);
/*
* The value of counter for HS Tclk-pre
* Tclk-pre = Tpin_txbyteclkhs * value
*/
- clk_pre = DIV_ROUND_UP(gotp.clkpre, t_txbyteclkhs);
+ clk_pre = DIV_ROUND_UP(cfg->clk_pre, t_txbyteclkhs);
/*
* The value of counter for HS Tlpx Time
* Tlpx = Tpin_txbyteclkhs * (2 + value)
*/
- lpx = DIV_ROUND_UP(gotp.lpx, t_txbyteclkhs);
+ lpx = DIV_ROUND_UP(cfg->lpx, t_txbyteclkhs);
if (lpx >= 2)
lpx -= 2;
@@ -362,19 +379,19 @@ static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
* Tta-go for turnaround
* Tta-go = Ttxclkesc * value
*/
- ta_go = DIV_ROUND_UP(gotp.tago, t_txclkesc);
+ ta_go = DIV_ROUND_UP(cfg->ta_go, t_txclkesc);
/*
* The value of counter for HS Tta-sure
* Tta-sure for turnaround
* Tta-sure = Ttxclkesc * value
*/
- ta_sure = DIV_ROUND_UP(gotp.tasure, t_txclkesc);
+ ta_sure = DIV_ROUND_UP(cfg->ta_sure, t_txclkesc);
/*
* The value of counter for HS Tta-wait
* Tta-wait for turnaround
* Tta-wait = Ttxclkesc * value
*/
- ta_wait = DIV_ROUND_UP(gotp.taget, t_txclkesc);
+ ta_wait = DIV_ROUND_UP(cfg->ta_get, t_txclkesc);
for (i = 0; i < ARRAY_SIZE(timings); i++)
if (inno->pll.rate <= timings[i].rate)
@@ -479,6 +496,7 @@ static int inno_dsidphy_power_on(struct phy *phy)
struct inno_dsidphy *inno = phy_get_drvdata(phy);
clk_prepare_enable(inno->pclk_phy);
+ clk_prepare_enable(inno->ref_clk);
pm_runtime_get_sync(inno->dev);
/* Bandgap power on */
@@ -524,6 +542,7 @@ static int inno_dsidphy_power_off(struct phy *phy)
LVDS_PLL_POWER_OFF | LVDS_BANDGAP_POWER_DOWN);
pm_runtime_put(inno->dev);
+ clk_disable_unprepare(inno->ref_clk);
clk_disable_unprepare(inno->pclk_phy);
return 0;
@@ -546,168 +565,32 @@ static int inno_dsidphy_set_mode(struct phy *phy, enum phy_mode mode,
return 0;
}
-static const struct phy_ops inno_dsidphy_ops = {
- .set_mode = inno_dsidphy_set_mode,
- .power_on = inno_dsidphy_power_on,
- .power_off = inno_dsidphy_power_off,
- .owner = THIS_MODULE,
-};
-
-static unsigned long inno_dsidphy_pll_round_rate(struct inno_dsidphy *inno,
- unsigned long prate,
- unsigned long rate,
- u8 *prediv, u16 *fbdiv)
-{
- unsigned long best_freq = 0;
- unsigned long fref, fout;
- u8 min_prediv, max_prediv;
- u8 _prediv, best_prediv = 1;
- u16 _fbdiv, best_fbdiv = 1;
- u32 min_delta = UINT_MAX;
-
- /*
- * The PLL output frequency can be calculated using a simple formula:
- * PLL_Output_Frequency = (FREF / PREDIV * FBDIV) / 2
- * PLL_Output_Frequency: it is equal to DDR-Clock-Frequency * 2
- */
- fref = prate / 2;
- if (rate > 1000000000UL)
- fout = 1000000000UL;
- else
- fout = rate;
-
- /* 5Mhz < Fref / prediv < 40MHz */
- min_prediv = DIV_ROUND_UP(fref, 40000000);
- max_prediv = fref / 5000000;
-
- for (_prediv = min_prediv; _prediv <= max_prediv; _prediv++) {
- u64 tmp;
- u32 delta;
-
- tmp = (u64)fout * _prediv;
- do_div(tmp, fref);
- _fbdiv = tmp;
-
- /*
- * The possible settings of feedback divider are
- * 12, 13, 14, 16, ~ 511
- */
- if (_fbdiv == 15)
- continue;
-
- if (_fbdiv < 12 || _fbdiv > 511)
- continue;
-
- tmp = (u64)_fbdiv * fref;
- do_div(tmp, _prediv);
-
- delta = abs(fout - tmp);
- if (!delta) {
- best_prediv = _prediv;
- best_fbdiv = _fbdiv;
- best_freq = tmp;
- break;
- } else if (delta < min_delta) {
- best_prediv = _prediv;
- best_fbdiv = _fbdiv;
- best_freq = tmp;
- min_delta = delta;
- }
- }
-
- if (best_freq) {
- *prediv = best_prediv;
- *fbdiv = best_fbdiv;
- }
-
- return best_freq;
-}
-
-static long inno_dsidphy_pll_clk_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *prate)
+static int inno_dsidphy_configure(struct phy *phy,
+ union phy_configure_opts *opts)
{
- struct inno_dsidphy *inno = hw_to_inno(hw);
- unsigned long fout;
- u16 fbdiv = 1;
- u8 prediv = 1;
-
- fout = inno_dsidphy_pll_round_rate(inno, *prate, rate,
- &prediv, &fbdiv);
-
- return fout;
-}
-
-static int inno_dsidphy_pll_clk_set_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long parent_rate)
-{
- struct inno_dsidphy *inno = hw_to_inno(hw);
- unsigned long fout;
- u16 fbdiv = 1;
- u8 prediv = 1;
+ struct inno_dsidphy *inno = phy_get_drvdata(phy);
+ int ret;
- fout = inno_dsidphy_pll_round_rate(inno, parent_rate, rate,
- &prediv, &fbdiv);
+ if (inno->mode != PHY_MODE_MIPI_DPHY)
+ return -EINVAL;
- dev_dbg(inno->dev, "fin=%lu, fout=%lu, prediv=%u, fbdiv=%u\n",
- parent_rate, fout, prediv, fbdiv);
+ ret = phy_mipi_dphy_config_validate(&opts->mipi_dphy);
+ if (ret)
+ return ret;
- inno->pll.prediv = prediv;
- inno->pll.fbdiv = fbdiv;
- inno->pll.rate = fout;
+ memcpy(&inno->dphy_cfg, &opts->mipi_dphy, sizeof(inno->dphy_cfg));
return 0;
}
-static unsigned long
-inno_dsidphy_pll_clk_recalc_rate(struct clk_hw *hw, unsigned long prate)
-{
- struct inno_dsidphy *inno = hw_to_inno(hw);
-
- /* PLL_Output_Frequency = (FREF / PREDIV * FBDIV) / 2 */
- return (prate / inno->pll.prediv * inno->pll.fbdiv) / 2;
-}
-
-static const struct clk_ops inno_dsidphy_pll_clk_ops = {
- .round_rate = inno_dsidphy_pll_clk_round_rate,
- .set_rate = inno_dsidphy_pll_clk_set_rate,
- .recalc_rate = inno_dsidphy_pll_clk_recalc_rate,
+static const struct phy_ops inno_dsidphy_ops = {
+ .configure = inno_dsidphy_configure,
+ .set_mode = inno_dsidphy_set_mode,
+ .power_on = inno_dsidphy_power_on,
+ .power_off = inno_dsidphy_power_off,
+ .owner = THIS_MODULE,
};
-static int inno_dsidphy_pll_register(struct inno_dsidphy *inno)
-{
- struct device *dev = inno->dev;
- struct clk *clk;
- const char *parent_name;
- struct clk_init_data init;
- int ret;
-
- parent_name = __clk_get_name(inno->ref_clk);
-
- init.name = "mipi_dphy_pll";
- ret = of_property_read_string(dev->of_node, "clock-output-names",
- &init.name);
- if (ret < 0)
- dev_dbg(dev, "phy should set clock-output-names property\n");
-
- init.ops = &inno_dsidphy_pll_clk_ops;
- init.parent_names = &parent_name;
- init.num_parents = 1;
- init.flags = 0;
-
- inno->pll.hw.init = &init;
- clk = devm_clk_register(dev, &inno->pll.hw);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- dev_err(dev, "failed to register PLL: %d\n", ret);
- return ret;
- }
-
- return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
- &inno->pll.hw);
-}
-
static int inno_dsidphy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -764,10 +647,6 @@ static int inno_dsidphy_probe(struct platform_device *pdev)
return ret;
}
- ret = inno_dsidphy_pll_register(inno);
- if (ret)
- return ret;
-
pm_runtime_enable(dev);
return 0;
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
index 2b97fb1185a0..9ca20c947283 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
@@ -603,6 +603,8 @@ static long inno_hdmi_phy_rk3228_clk_round_rate(struct clk_hw *hw,
{
const struct pre_pll_config *cfg = pre_pll_cfg_table;
+ rate = (rate / 1000) * 1000;
+
for (; cfg->pixclock != 0; cfg++)
if (cfg->pixclock == rate && !cfg->fracdiv)
break;
@@ -755,6 +757,8 @@ static long inno_hdmi_phy_rk3328_clk_round_rate(struct clk_hw *hw,
{
const struct pre_pll_config *cfg = pre_pll_cfg_table;
+ rate = (rate / 1000) * 1000;
+
for (; cfg->pixclock != 0; cfg++)
if (cfg->pixclock == rate)
break;
diff --git a/drivers/phy/samsung/Kconfig b/drivers/phy/samsung/Kconfig
index 290a6c70f570..9e483d1fdaf2 100644
--- a/drivers/phy/samsung/Kconfig
+++ b/drivers/phy/samsung/Kconfig
@@ -32,7 +32,7 @@ config PHY_EXYNOS_PCIE
config PHY_SAMSUNG_USB2
tristate "Samsung USB 2.0 PHY driver"
depends on HAS_IOMEM
- depends on USB_EHCI_EXYNOS || USB_OHCI_EXYNOS || USB_DWC2
+ depends on USB_EHCI_EXYNOS || USB_OHCI_EXYNOS || USB_DWC2 || COMPILE_TEST
select GENERIC_PHY
select MFD_SYSCON
default ARCH_EXYNOS
@@ -60,7 +60,7 @@ config PHY_EXYNOS5250_USB2
config PHY_S5PV210_USB2
bool "Support for S5PV210"
depends on PHY_SAMSUNG_USB2
- depends on ARCH_S5PV210
+ depends on ARCH_S5PV210 || COMPILE_TEST
help
Enable USB PHY support for S5PV210. This option requires that Samsung
USB 2.0 PHY driver is enabled and means that support for this
@@ -69,7 +69,7 @@ config PHY_S5PV210_USB2
config PHY_EXYNOS5_USBDRD
tristate "Exynos5 SoC series USB DRD PHY driver"
- depends on ARCH_EXYNOS && OF
+ depends on (ARCH_EXYNOS && OF) || COMPILE_TEST
depends on HAS_IOMEM
depends on USB_DWC3_EXYNOS
select GENERIC_PHY
diff --git a/drivers/phy/ti/Kconfig b/drivers/phy/ti/Kconfig
index 174888609779..6dbe9d0b9ff3 100644
--- a/drivers/phy/ti/Kconfig
+++ b/drivers/phy/ti/Kconfig
@@ -4,7 +4,7 @@
#
config PHY_DA8XX_USB
tristate "TI DA8xx USB PHY Driver"
- depends on ARCH_DAVINCI_DA8XX
+ depends on ARCH_DAVINCI_DA8XX || COMPILE_TEST
select GENERIC_PHY
select MFD_SYSCON
help
@@ -14,7 +14,7 @@ config PHY_DA8XX_USB
config PHY_DM816X_USB
tristate "TI dm816x USB PHY driver"
- depends on ARCH_OMAP2PLUS
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
depends on USB_SUPPORT
select GENERIC_PHY
select USB_PHY
@@ -33,6 +33,22 @@ config PHY_AM654_SERDES
This option enables support for TI AM654 SerDes PHY used for
PCIe.
+config PHY_J721E_WIZ
+ tristate "TI J721E WIZ (SERDES Wrapper) support"
+ depends on OF && ARCH_K3 || COMPILE_TEST
+ depends on HAS_IOMEM && OF_ADDRESS
+ depends on COMMON_CLK
+ select GENERIC_PHY
+ select MULTIPLEXER
+ select REGMAP_MMIO
+ select MUX_MMIO
+ help
+ This option enables support for WIZ module present in TI's J721E
+ SoC. WIZ is a serdes wrapper used to configure some of the input
+ signals to the SERDES (Sierra/Torrent). This driver configures
+ three clock selects (pll0, pll1, dig) and resets for each of the
+ lanes.
+
config OMAP_CONTROL_PHY
tristate "OMAP CONTROL PHY Driver"
depends on ARCH_OMAP2PLUS || COMPILE_TEST
diff --git a/drivers/phy/ti/Makefile b/drivers/phy/ti/Makefile
index bff901eb0ecc..dcba2571c9bd 100644
--- a/drivers/phy/ti/Makefile
+++ b/drivers/phy/ti/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_PHY_TUSB1210) += phy-tusb1210.o
obj-$(CONFIG_TWL4030_USB) += phy-twl4030-usb.o
obj-$(CONFIG_PHY_AM654_SERDES) += phy-am654-serdes.o
obj-$(CONFIG_PHY_TI_GMII_SEL) += phy-gmii-sel.o
+obj-$(CONFIG_PHY_J721E_WIZ) += phy-j721e-wiz.o
diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
new file mode 100644
index 000000000000..7b51045df783
--- /dev/null
+++ b/drivers/phy/ti/phy-j721e-wiz.c
@@ -0,0 +1,959 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Wrapper driver for SERDES used in J721E
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+#include <dt-bindings/phy/phy.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mux/consumer.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#define WIZ_SERDES_CTRL 0x404
+#define WIZ_SERDES_TOP_CTRL 0x408
+#define WIZ_SERDES_RST 0x40c
+#define WIZ_SERDES_TYPEC 0x410
+#define WIZ_LANECTL(n) (0x480 + (0x40 * (n)))
+
+#define WIZ_MAX_LANES 4
+#define WIZ_MUX_NUM_CLOCKS 3
+#define WIZ_DIV_NUM_CLOCKS_16G 2
+#define WIZ_DIV_NUM_CLOCKS_10G 1
+
+#define WIZ_SERDES_TYPEC_LN10_SWAP BIT(30)
+
+enum wiz_lane_standard_mode {
+ LANE_MODE_GEN1,
+ LANE_MODE_GEN2,
+ LANE_MODE_GEN3,
+ LANE_MODE_GEN4,
+};
+
+enum wiz_refclk_mux_sel {
+ PLL0_REFCLK,
+ PLL1_REFCLK,
+ REFCLK_DIG,
+};
+
+enum wiz_refclk_div_sel {
+ CMN_REFCLK_DIG_DIV,
+ CMN_REFCLK1_DIG_DIV,
+};
+
+static const struct reg_field por_en = REG_FIELD(WIZ_SERDES_CTRL, 31, 31);
+static const struct reg_field phy_reset_n = REG_FIELD(WIZ_SERDES_RST, 31, 31);
+static const struct reg_field pll1_refclk_mux_sel =
+ REG_FIELD(WIZ_SERDES_RST, 29, 29);
+static const struct reg_field pll0_refclk_mux_sel =
+ REG_FIELD(WIZ_SERDES_RST, 28, 28);
+static const struct reg_field refclk_dig_sel_16g =
+ REG_FIELD(WIZ_SERDES_RST, 24, 25);
+static const struct reg_field refclk_dig_sel_10g =
+ REG_FIELD(WIZ_SERDES_RST, 24, 24);
+static const struct reg_field pma_cmn_refclk_int_mode =
+ REG_FIELD(WIZ_SERDES_TOP_CTRL, 28, 29);
+static const struct reg_field pma_cmn_refclk_mode =
+ REG_FIELD(WIZ_SERDES_TOP_CTRL, 30, 31);
+static const struct reg_field pma_cmn_refclk_dig_div =
+ REG_FIELD(WIZ_SERDES_TOP_CTRL, 26, 27);
+static const struct reg_field pma_cmn_refclk1_dig_div =
+ REG_FIELD(WIZ_SERDES_TOP_CTRL, 24, 25);
+
+static const struct reg_field p_enable[WIZ_MAX_LANES] = {
+ REG_FIELD(WIZ_LANECTL(0), 30, 31),
+ REG_FIELD(WIZ_LANECTL(1), 30, 31),
+ REG_FIELD(WIZ_LANECTL(2), 30, 31),
+ REG_FIELD(WIZ_LANECTL(3), 30, 31),
+};
+
+static const struct reg_field p_align[WIZ_MAX_LANES] = {
+ REG_FIELD(WIZ_LANECTL(0), 29, 29),
+ REG_FIELD(WIZ_LANECTL(1), 29, 29),
+ REG_FIELD(WIZ_LANECTL(2), 29, 29),
+ REG_FIELD(WIZ_LANECTL(3), 29, 29),
+};
+
+static const struct reg_field p_raw_auto_start[WIZ_MAX_LANES] = {
+ REG_FIELD(WIZ_LANECTL(0), 28, 28),
+ REG_FIELD(WIZ_LANECTL(1), 28, 28),
+ REG_FIELD(WIZ_LANECTL(2), 28, 28),
+ REG_FIELD(WIZ_LANECTL(3), 28, 28),
+};
+
+static const struct reg_field p_standard_mode[WIZ_MAX_LANES] = {
+ REG_FIELD(WIZ_LANECTL(0), 24, 25),
+ REG_FIELD(WIZ_LANECTL(1), 24, 25),
+ REG_FIELD(WIZ_LANECTL(2), 24, 25),
+ REG_FIELD(WIZ_LANECTL(3), 24, 25),
+};
+
+static const struct reg_field typec_ln10_swap =
+ REG_FIELD(WIZ_SERDES_TYPEC, 30, 30);
+
+struct wiz_clk_mux {
+ struct clk_hw hw;
+ struct regmap_field *field;
+ u32 *table;
+ struct clk_init_data clk_data;
+};
+
+#define to_wiz_clk_mux(_hw) container_of(_hw, struct wiz_clk_mux, hw)
+
+struct wiz_clk_divider {
+ struct clk_hw hw;
+ struct regmap_field *field;
+ struct clk_div_table *table;
+ struct clk_init_data clk_data;
+};
+
+#define to_wiz_clk_div(_hw) container_of(_hw, struct wiz_clk_divider, hw)
+
+struct wiz_clk_mux_sel {
+ struct regmap_field *field;
+ u32 table[4];
+ const char *node_name;
+};
+
+struct wiz_clk_div_sel {
+ struct regmap_field *field;
+ struct clk_div_table *table;
+ const char *node_name;
+};
+
+static struct wiz_clk_mux_sel clk_mux_sel_16g[] = {
+ {
+ /*
+ * Mux value to be configured for each of the input clocks
+ * in the order populated in device tree
+ */
+ .table = { 1, 0 },
+ .node_name = "pll0-refclk",
+ },
+ {
+ .table = { 1, 0 },
+ .node_name = "pll1-refclk",
+ },
+ {
+ .table = { 1, 3, 0, 2 },
+ .node_name = "refclk-dig",
+ },
+};
+
+static struct wiz_clk_mux_sel clk_mux_sel_10g[] = {
+ {
+ /*
+ * Mux value to be configured for each of the input clocks
+ * in the order populated in device tree
+ */
+ .table = { 1, 0 },
+ .node_name = "pll0-refclk",
+ },
+ {
+ .table = { 1, 0 },
+ .node_name = "pll1-refclk",
+ },
+ {
+ .table = { 1, 0 },
+ .node_name = "refclk-dig",
+ },
+};
+
+static struct clk_div_table clk_div_table[] = {
+ { .val = 0, .div = 1, },
+ { .val = 1, .div = 2, },
+ { .val = 2, .div = 4, },
+ { .val = 3, .div = 8, },
+};
+
+static struct wiz_clk_div_sel clk_div_sel[] = {
+ {
+ .table = clk_div_table,
+ .node_name = "cmn-refclk-dig-div",
+ },
+ {
+ .table = clk_div_table,
+ .node_name = "cmn-refclk1-dig-div",
+ },
+};
+
+enum wiz_type {
+ J721E_WIZ_16G,
+ J721E_WIZ_10G,
+};
+
+#define WIZ_TYPEC_DIR_DEBOUNCE_MIN 100 /* ms */
+#define WIZ_TYPEC_DIR_DEBOUNCE_MAX 1000
+
+struct wiz {
+ struct regmap *regmap;
+ enum wiz_type type;
+ struct wiz_clk_mux_sel *clk_mux_sel;
+ struct wiz_clk_div_sel *clk_div_sel;
+ unsigned int clk_div_sel_num;
+ struct regmap_field *por_en;
+ struct regmap_field *phy_reset_n;
+ struct regmap_field *p_enable[WIZ_MAX_LANES];
+ struct regmap_field *p_align[WIZ_MAX_LANES];
+ struct regmap_field *p_raw_auto_start[WIZ_MAX_LANES];
+ struct regmap_field *p_standard_mode[WIZ_MAX_LANES];
+ struct regmap_field *pma_cmn_refclk_int_mode;
+ struct regmap_field *pma_cmn_refclk_mode;
+ struct regmap_field *pma_cmn_refclk_dig_div;
+ struct regmap_field *pma_cmn_refclk1_dig_div;
+ struct regmap_field *typec_ln10_swap;
+
+ struct device *dev;
+ u32 num_lanes;
+ struct platform_device *serdes_pdev;
+ struct reset_controller_dev wiz_phy_reset_dev;
+ struct gpio_desc *gpio_typec_dir;
+ int typec_dir_delay;
+};
+
+static int wiz_reset(struct wiz *wiz)
+{
+ int ret;
+
+ ret = regmap_field_write(wiz->por_en, 0x1);
+ if (ret)
+ return ret;
+
+ mdelay(1);
+
+ ret = regmap_field_write(wiz->por_en, 0x0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int wiz_mode_select(struct wiz *wiz)
+{
+ u32 num_lanes = wiz->num_lanes;
+ int ret;
+ int i;
+
+ for (i = 0; i < num_lanes; i++) {
+ ret = regmap_field_write(wiz->p_standard_mode[i],
+ LANE_MODE_GEN4);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int wiz_init_raw_interface(struct wiz *wiz, bool enable)
+{
+ u32 num_lanes = wiz->num_lanes;
+ int i;
+ int ret;
+
+ for (i = 0; i < num_lanes; i++) {
+ ret = regmap_field_write(wiz->p_align[i], enable);
+ if (ret)
+ return ret;
+
+ ret = regmap_field_write(wiz->p_raw_auto_start[i], enable);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int wiz_init(struct wiz *wiz)
+{
+ struct device *dev = wiz->dev;
+ int ret;
+
+ ret = wiz_reset(wiz);
+ if (ret) {
+ dev_err(dev, "WIZ reset failed\n");
+ return ret;
+ }
+
+ ret = wiz_mode_select(wiz);
+ if (ret) {
+ dev_err(dev, "WIZ mode select failed\n");
+ return ret;
+ }
+
+ ret = wiz_init_raw_interface(wiz, true);
+ if (ret) {
+ dev_err(dev, "WIZ interface initialization failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int wiz_regfield_init(struct wiz *wiz)
+{
+ struct wiz_clk_mux_sel *clk_mux_sel;
+ struct wiz_clk_div_sel *clk_div_sel;
+ struct regmap *regmap = wiz->regmap;
+ int num_lanes = wiz->num_lanes;
+ struct device *dev = wiz->dev;
+ int i;
+
+ wiz->por_en = devm_regmap_field_alloc(dev, regmap, por_en);
+ if (IS_ERR(wiz->por_en)) {
+ dev_err(dev, "POR_EN reg field init failed\n");
+ return PTR_ERR(wiz->por_en);
+ }
+
+ wiz->phy_reset_n = devm_regmap_field_alloc(dev, regmap,
+ phy_reset_n);
+ if (IS_ERR(wiz->phy_reset_n)) {
+ dev_err(dev, "PHY_RESET_N reg field init failed\n");
+ return PTR_ERR(wiz->phy_reset_n);
+ }
+
+ wiz->pma_cmn_refclk_int_mode =
+ devm_regmap_field_alloc(dev, regmap, pma_cmn_refclk_int_mode);
+ if (IS_ERR(wiz->pma_cmn_refclk_int_mode)) {
+ dev_err(dev, "PMA_CMN_REFCLK_INT_MODE reg field init failed\n");
+ return PTR_ERR(wiz->pma_cmn_refclk_int_mode);
+ }
+
+ wiz->pma_cmn_refclk_mode =
+ devm_regmap_field_alloc(dev, regmap, pma_cmn_refclk_mode);
+ if (IS_ERR(wiz->pma_cmn_refclk_mode)) {
+ dev_err(dev, "PMA_CMN_REFCLK_MODE reg field init failed\n");
+ return PTR_ERR(wiz->pma_cmn_refclk_mode);
+ }
+
+ clk_div_sel = &wiz->clk_div_sel[CMN_REFCLK_DIG_DIV];
+ clk_div_sel->field = devm_regmap_field_alloc(dev, regmap,
+ pma_cmn_refclk_dig_div);
+ if (IS_ERR(clk_div_sel->field)) {
+ dev_err(dev, "PMA_CMN_REFCLK_DIG_DIV reg field init failed\n");
+ return PTR_ERR(clk_div_sel->field);
+ }
+
+ if (wiz->type == J721E_WIZ_16G) {
+ clk_div_sel = &wiz->clk_div_sel[CMN_REFCLK1_DIG_DIV];
+ clk_div_sel->field =
+ devm_regmap_field_alloc(dev, regmap,
+ pma_cmn_refclk1_dig_div);
+ if (IS_ERR(clk_div_sel->field)) {
+ dev_err(dev, "PMA_CMN_REFCLK1_DIG_DIV reg field init failed\n");
+ return PTR_ERR(clk_div_sel->field);
+ }
+ }
+
+ clk_mux_sel = &wiz->clk_mux_sel[PLL0_REFCLK];
+ clk_mux_sel->field = devm_regmap_field_alloc(dev, regmap,
+ pll0_refclk_mux_sel);
+ if (IS_ERR(clk_mux_sel->field)) {
+ dev_err(dev, "PLL0_REFCLK_SEL reg field init failed\n");
+ return PTR_ERR(clk_mux_sel->field);
+ }
+
+ clk_mux_sel = &wiz->clk_mux_sel[PLL1_REFCLK];
+ clk_mux_sel->field = devm_regmap_field_alloc(dev, regmap,
+ pll1_refclk_mux_sel);
+ if (IS_ERR(clk_mux_sel->field)) {
+ dev_err(dev, "PLL1_REFCLK_SEL reg field init failed\n");
+ return PTR_ERR(clk_mux_sel->field);
+ }
+
+ clk_mux_sel = &wiz->clk_mux_sel[REFCLK_DIG];
+ if (wiz->type == J721E_WIZ_10G)
+ clk_mux_sel->field =
+ devm_regmap_field_alloc(dev, regmap,
+ refclk_dig_sel_10g);
+ else
+ clk_mux_sel->field =
+ devm_regmap_field_alloc(dev, regmap,
+ refclk_dig_sel_16g);
+
+ if (IS_ERR(clk_mux_sel->field)) {
+ dev_err(dev, "REFCLK_DIG_SEL reg field init failed\n");
+ return PTR_ERR(clk_mux_sel->field);
+ }
+
+ for (i = 0; i < num_lanes; i++) {
+ wiz->p_enable[i] = devm_regmap_field_alloc(dev, regmap,
+ p_enable[i]);
+ if (IS_ERR(wiz->p_enable[i])) {
+ dev_err(dev, "P%d_ENABLE reg field init failed\n", i);
+ return PTR_ERR(wiz->p_enable[i]);
+ }
+
+ wiz->p_align[i] = devm_regmap_field_alloc(dev, regmap,
+ p_align[i]);
+ if (IS_ERR(wiz->p_align[i])) {
+ dev_err(dev, "P%d_ALIGN reg field init failed\n", i);
+ return PTR_ERR(wiz->p_align[i]);
+ }
+
+ wiz->p_raw_auto_start[i] =
+ devm_regmap_field_alloc(dev, regmap, p_raw_auto_start[i]);
+ if (IS_ERR(wiz->p_raw_auto_start[i])) {
+ dev_err(dev, "P%d_RAW_AUTO_START reg field init fail\n",
+ i);
+ return PTR_ERR(wiz->p_raw_auto_start[i]);
+ }
+
+ wiz->p_standard_mode[i] =
+ devm_regmap_field_alloc(dev, regmap, p_standard_mode[i]);
+ if (IS_ERR(wiz->p_standard_mode[i])) {
+ dev_err(dev, "P%d_STANDARD_MODE reg field init fail\n",
+ i);
+ return PTR_ERR(wiz->p_standard_mode[i]);
+ }
+ }
+
+ wiz->typec_ln10_swap = devm_regmap_field_alloc(dev, regmap,
+ typec_ln10_swap);
+ if (IS_ERR(wiz->typec_ln10_swap)) {
+ dev_err(dev, "LN10_SWAP reg field init failed\n");
+ return PTR_ERR(wiz->typec_ln10_swap);
+ }
+
+ return 0;
+}
+
+static u8 wiz_clk_mux_get_parent(struct clk_hw *hw)
+{
+ struct wiz_clk_mux *mux = to_wiz_clk_mux(hw);
+ struct regmap_field *field = mux->field;
+ unsigned int val;
+
+ regmap_field_read(field, &val);
+ return clk_mux_val_to_index(hw, mux->table, 0, val);
+}
+
+static int wiz_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct wiz_clk_mux *mux = to_wiz_clk_mux(hw);
+ struct regmap_field *field = mux->field;
+ int val;
+
+ val = mux->table[index];
+ return regmap_field_write(field, val);
+}
+
+static const struct clk_ops wiz_clk_mux_ops = {
+ .set_parent = wiz_clk_mux_set_parent,
+ .get_parent = wiz_clk_mux_get_parent,
+};
+
+static int wiz_mux_clk_register(struct wiz *wiz, struct device_node *node,
+ struct regmap_field *field, u32 *table)
+{
+ struct device *dev = wiz->dev;
+ struct clk_init_data *init;
+ const char **parent_names;
+ unsigned int num_parents;
+ struct wiz_clk_mux *mux;
+ char clk_name[100];
+ struct clk *clk;
+ int ret;
+
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return -ENOMEM;
+
+ num_parents = of_clk_get_parent_count(node);
+ if (num_parents < 2) {
+ dev_err(dev, "SERDES clock must have parents\n");
+ return -EINVAL;
+ }
+
+ parent_names = devm_kzalloc(dev, (sizeof(char *) * num_parents),
+ GFP_KERNEL);
+ if (!parent_names)
+ return -ENOMEM;
+
+ of_clk_parent_fill(node, parent_names, num_parents);
+
+ snprintf(clk_name, sizeof(clk_name), "%s_%s", dev_name(dev),
+ node->name);
+
+ init = &mux->clk_data;
+
+ init->ops = &wiz_clk_mux_ops;
+ init->flags = CLK_SET_RATE_NO_REPARENT;
+ init->parent_names = parent_names;
+ init->num_parents = num_parents;
+ init->name = clk_name;
+
+ mux->field = field;
+ mux->table = table;
+ mux->hw.init = init;
+
+ clk = devm_clk_register(dev, &mux->hw);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ if (ret)
+ dev_err(dev, "Failed to add clock provider: %s\n", clk_name);
+
+ return ret;
+}
+
+static unsigned long wiz_clk_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct wiz_clk_divider *div = to_wiz_clk_div(hw);
+ struct regmap_field *field = div->field;
+ int val;
+
+ regmap_field_read(field, &val);
+
+ return divider_recalc_rate(hw, parent_rate, val, div->table, 0x0, 2);
+}
+
+static long wiz_clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct wiz_clk_divider *div = to_wiz_clk_div(hw);
+
+ return divider_round_rate(hw, rate, prate, div->table, 2, 0x0);
+}
+
+static int wiz_clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct wiz_clk_divider *div = to_wiz_clk_div(hw);
+ struct regmap_field *field = div->field;
+ int val;
+
+ val = divider_get_val(rate, parent_rate, div->table, 2, 0x0);
+ if (val < 0)
+ return val;
+
+ return regmap_field_write(field, val);
+}
+
+static const struct clk_ops wiz_clk_div_ops = {
+ .recalc_rate = wiz_clk_div_recalc_rate,
+ .round_rate = wiz_clk_div_round_rate,
+ .set_rate = wiz_clk_div_set_rate,
+};
+
+static int wiz_div_clk_register(struct wiz *wiz, struct device_node *node,
+ struct regmap_field *field,
+ struct clk_div_table *table)
+{
+ struct device *dev = wiz->dev;
+ struct wiz_clk_divider *div;
+ struct clk_init_data *init;
+ const char **parent_names;
+ char clk_name[100];
+ struct clk *clk;
+ int ret;
+
+ div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return -ENOMEM;
+
+ snprintf(clk_name, sizeof(clk_name), "%s_%s", dev_name(dev),
+ node->name);
+
+ parent_names = devm_kzalloc(dev, sizeof(char *), GFP_KERNEL);
+ if (!parent_names)
+ return -ENOMEM;
+
+ of_clk_parent_fill(node, parent_names, 1);
+
+ init = &div->clk_data;
+
+ init->ops = &wiz_clk_div_ops;
+ init->flags = 0;
+ init->parent_names = parent_names;
+ init->num_parents = 1;
+ init->name = clk_name;
+
+ div->field = field;
+ div->table = table;
+ div->hw.init = init;
+
+ clk = devm_clk_register(dev, &div->hw);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ if (ret)
+ dev_err(dev, "Failed to add clock provider: %s\n", clk_name);
+
+ return ret;
+}
+
+static void wiz_clock_cleanup(struct wiz *wiz, struct device_node *node)
+{
+ struct wiz_clk_mux_sel *clk_mux_sel = wiz->clk_mux_sel;
+ struct device_node *clk_node;
+ int i;
+
+ for (i = 0; i < WIZ_MUX_NUM_CLOCKS; i++) {
+ clk_node = of_get_child_by_name(node, clk_mux_sel[i].node_name);
+ of_clk_del_provider(clk_node);
+ of_node_put(clk_node);
+ }
+}
+
+static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
+{
+ struct wiz_clk_mux_sel *clk_mux_sel = wiz->clk_mux_sel;
+ struct device *dev = wiz->dev;
+ struct device_node *clk_node;
+ const char *node_name;
+ unsigned long rate;
+ struct clk *clk;
+ int ret;
+ int i;
+
+ clk = devm_clk_get(dev, "core_ref_clk");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "core_ref_clk clock not found\n");
+ ret = PTR_ERR(clk);
+ return ret;
+ }
+
+ rate = clk_get_rate(clk);
+ if (rate >= 100000000)
+ regmap_field_write(wiz->pma_cmn_refclk_int_mode, 0x1);
+ else
+ regmap_field_write(wiz->pma_cmn_refclk_int_mode, 0x3);
+
+ clk = devm_clk_get(dev, "ext_ref_clk");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "ext_ref_clk clock not found\n");
+ ret = PTR_ERR(clk);
+ return ret;
+ }
+
+ rate = clk_get_rate(clk);
+ if (rate >= 100000000)
+ regmap_field_write(wiz->pma_cmn_refclk_mode, 0x0);
+ else
+ regmap_field_write(wiz->pma_cmn_refclk_mode, 0x2);
+
+ for (i = 0; i < WIZ_MUX_NUM_CLOCKS; i++) {
+ node_name = clk_mux_sel[i].node_name;
+ clk_node = of_get_child_by_name(node, node_name);
+ if (!clk_node) {
+ dev_err(dev, "Unable to get %s node\n", node_name);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = wiz_mux_clk_register(wiz, clk_node, clk_mux_sel[i].field,
+ clk_mux_sel[i].table);
+ if (ret) {
+ dev_err(dev, "Failed to register %s clock\n",
+ node_name);
+ of_node_put(clk_node);
+ goto err;
+ }
+
+ of_node_put(clk_node);
+ }
+
+ for (i = 0; i < wiz->clk_div_sel_num; i++) {
+ node_name = clk_div_sel[i].node_name;
+ clk_node = of_get_child_by_name(node, node_name);
+ if (!clk_node) {
+ dev_err(dev, "Unable to get %s node\n", node_name);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = wiz_div_clk_register(wiz, clk_node, clk_div_sel[i].field,
+ clk_div_sel[i].table);
+ if (ret) {
+ dev_err(dev, "Failed to register %s clock\n",
+ node_name);
+ of_node_put(clk_node);
+ goto err;
+ }
+
+ of_node_put(clk_node);
+ }
+
+ return 0;
+err:
+ wiz_clock_cleanup(wiz, node);
+
+ return ret;
+}
+
+static int wiz_phy_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct device *dev = rcdev->dev;
+ struct wiz *wiz = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (id == 0) {
+ ret = regmap_field_write(wiz->phy_reset_n, false);
+ return ret;
+ }
+
+ ret = regmap_field_write(wiz->p_enable[id - 1], false);
+ return ret;
+}
+
+static int wiz_phy_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct device *dev = rcdev->dev;
+ struct wiz *wiz = dev_get_drvdata(dev);
+ int ret;
+
+ /* if typec-dir gpio was specified, set LN10 SWAP bit based on that */
+ if (id == 0 && wiz->gpio_typec_dir) {
+ if (wiz->typec_dir_delay)
+ msleep_interruptible(wiz->typec_dir_delay);
+
+ if (gpiod_get_value_cansleep(wiz->gpio_typec_dir))
+ regmap_field_write(wiz->typec_ln10_swap, 1);
+ else
+ regmap_field_write(wiz->typec_ln10_swap, 0);
+ }
+
+ if (id == 0) {
+ ret = regmap_field_write(wiz->phy_reset_n, true);
+ return ret;
+ }
+
+ ret = regmap_field_write(wiz->p_enable[id - 1], true);
+ return ret;
+}
+
+static const struct reset_control_ops wiz_phy_reset_ops = {
+ .assert = wiz_phy_reset_assert,
+ .deassert = wiz_phy_reset_deassert,
+};
+
+static struct regmap_config wiz_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .fast_io = true,
+};
+
+static const struct of_device_id wiz_id_table[] = {
+ {
+ .compatible = "ti,j721e-wiz-16g", .data = (void *)J721E_WIZ_16G
+ },
+ {
+ .compatible = "ti,j721e-wiz-10g", .data = (void *)J721E_WIZ_10G
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, wiz_id_table);
+
+static int wiz_probe(struct platform_device *pdev)
+{
+ struct reset_controller_dev *phy_reset_dev;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct platform_device *serdes_pdev;
+ struct device_node *child_node;
+ struct regmap *regmap;
+ struct resource res;
+ void __iomem *base;
+ struct wiz *wiz;
+ u32 num_lanes;
+ int ret;
+
+ wiz = devm_kzalloc(dev, sizeof(*wiz), GFP_KERNEL);
+ if (!wiz)
+ return -ENOMEM;
+
+ wiz->type = (enum wiz_type)of_device_get_match_data(dev);
+
+ child_node = of_get_child_by_name(node, "serdes");
+ if (!child_node) {
+ dev_err(dev, "Failed to get SERDES child DT node\n");
+ return -ENODEV;
+ }
+
+ ret = of_address_to_resource(child_node, 0, &res);
+ if (ret) {
+ dev_err(dev, "Failed to get memory resource\n");
+ goto err_addr_to_resource;
+ }
+
+ base = devm_ioremap(dev, res.start, resource_size(&res));
+ if (!base)
+ goto err_addr_to_resource;
+
+ regmap = devm_regmap_init_mmio(dev, base, &wiz_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to initialize regmap\n");
+ ret = PTR_ERR(regmap);
+ goto err_addr_to_resource;
+ }
+
+ ret = of_property_read_u32(node, "num-lanes", &num_lanes);
+ if (ret) {
+ dev_err(dev, "Failed to read num-lanes property\n");
+ goto err_addr_to_resource;
+ }
+
+ if (num_lanes > WIZ_MAX_LANES) {
+ dev_err(dev, "Cannot support %d lanes\n", num_lanes);
+ goto err_addr_to_resource;
+ }
+
+ wiz->gpio_typec_dir = devm_gpiod_get_optional(dev, "typec-dir",
+ GPIOD_IN);
+ if (IS_ERR(wiz->gpio_typec_dir)) {
+ ret = PTR_ERR(wiz->gpio_typec_dir);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to request typec-dir gpio: %d\n",
+ ret);
+ goto err_addr_to_resource;
+ }
+
+ if (wiz->gpio_typec_dir) {
+ ret = of_property_read_u32(node, "typec-dir-debounce-ms",
+ &wiz->typec_dir_delay);
+ if (ret && ret != -EINVAL) {
+ dev_err(dev, "Invalid typec-dir-debounce property\n");
+ goto err_addr_to_resource;
+ }
+
+ /* use min. debounce from Type-C spec if not provided in DT */
+ if (ret == -EINVAL)
+ wiz->typec_dir_delay = WIZ_TYPEC_DIR_DEBOUNCE_MIN;
+
+ if (wiz->typec_dir_delay < WIZ_TYPEC_DIR_DEBOUNCE_MIN ||
+ wiz->typec_dir_delay > WIZ_TYPEC_DIR_DEBOUNCE_MAX) {
+ dev_err(dev, "Invalid typec-dir-debounce property\n");
+ goto err_addr_to_resource;
+ }
+ }
+
+ wiz->dev = dev;
+ wiz->regmap = regmap;
+ wiz->num_lanes = num_lanes;
+ if (wiz->type == J721E_WIZ_10G)
+ wiz->clk_mux_sel = clk_mux_sel_10g;
+ else
+ wiz->clk_mux_sel = clk_mux_sel_16g;
+
+ wiz->clk_div_sel = clk_div_sel;
+
+ if (wiz->type == J721E_WIZ_10G)
+ wiz->clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_10G;
+ else
+ wiz->clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_16G;
+
+ platform_set_drvdata(pdev, wiz);
+
+ ret = wiz_regfield_init(wiz);
+ if (ret) {
+ dev_err(dev, "Failed to initialize regfields\n");
+ goto err_addr_to_resource;
+ }
+
+ phy_reset_dev = &wiz->wiz_phy_reset_dev;
+ phy_reset_dev->dev = dev;
+ phy_reset_dev->ops = &wiz_phy_reset_ops,
+ phy_reset_dev->owner = THIS_MODULE,
+ phy_reset_dev->of_node = node;
+ /* Reset for each of the lane and one for the entire SERDES */
+ phy_reset_dev->nr_resets = num_lanes + 1;
+
+ ret = devm_reset_controller_register(dev, phy_reset_dev);
+ if (ret < 0) {
+ dev_warn(dev, "Failed to register reset controller\n");
+ goto err_addr_to_resource;
+ }
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync failed\n");
+ goto err_get_sync;
+ }
+
+ ret = wiz_clock_init(wiz, node);
+ if (ret < 0) {
+ dev_warn(dev, "Failed to initialize clocks\n");
+ goto err_get_sync;
+ }
+
+ serdes_pdev = of_platform_device_create(child_node, NULL, dev);
+ if (!serdes_pdev) {
+ dev_WARN(dev, "Unable to create SERDES platform device\n");
+ goto err_pdev_create;
+ }
+ wiz->serdes_pdev = serdes_pdev;
+
+ ret = wiz_init(wiz);
+ if (ret) {
+ dev_err(dev, "WIZ initialization failed\n");
+ goto err_wiz_init;
+ }
+
+ of_node_put(child_node);
+ return 0;
+
+err_wiz_init:
+ of_platform_device_destroy(&serdes_pdev->dev, NULL);
+
+err_pdev_create:
+ wiz_clock_cleanup(wiz, node);
+
+err_get_sync:
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+
+err_addr_to_resource:
+ of_node_put(child_node);
+
+ return ret;
+}
+
+static int wiz_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct platform_device *serdes_pdev;
+ struct wiz *wiz;
+
+ wiz = dev_get_drvdata(dev);
+ serdes_pdev = wiz->serdes_pdev;
+
+ of_platform_device_destroy(&serdes_pdev->dev, NULL);
+ wiz_clock_cleanup(wiz, node);
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+
+ return 0;
+}
+
+static struct platform_driver wiz_driver = {
+ .probe = wiz_probe,
+ .remove = wiz_remove,
+ .driver = {
+ .name = "wiz",
+ .of_match_table = wiz_id_table,
+ },
+};
+module_platform_driver(wiz_driver);
+
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_DESCRIPTION("TI J721E WIZ driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/ti/phy-ti-pipe3.c b/drivers/phy/ti/phy-ti-pipe3.c
index edd6859afba8..a87946589eb7 100644
--- a/drivers/phy/ti/phy-ti-pipe3.c
+++ b/drivers/phy/ti/phy-ti-pipe3.c
@@ -850,6 +850,12 @@ static int ti_pipe3_probe(struct platform_device *pdev)
static int ti_pipe3_remove(struct platform_device *pdev)
{
+ struct ti_pipe3 *phy = platform_get_drvdata(pdev);
+
+ if (phy->mode == PIPE3_MODE_SATA) {
+ clk_disable_unprepare(phy->refclk);
+ phy->sata_refclk_enabled = false;
+ }
pm_runtime_disable(&pdev->dev);
return 0;
@@ -900,18 +906,8 @@ static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy)
{
if (!IS_ERR(phy->wkupclk))
clk_disable_unprepare(phy->wkupclk);
- if (!IS_ERR(phy->refclk)) {
+ if (!IS_ERR(phy->refclk))
clk_disable_unprepare(phy->refclk);
- /*
- * SATA refclk needs an additional disable as we left it
- * on in probe to avoid Errata i783
- */
- if (phy->sata_refclk_enabled) {
- clk_disable_unprepare(phy->refclk);
- phy->sata_refclk_enabled = false;
- }
- }
-
if (!IS_ERR(phy->div_clk))
clk_disable_unprepare(phy->div_clk);
}
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 3bfbf2ff6e2b..df0ef69dd474 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -422,6 +422,7 @@ config PINCTRL_TB10X
config PINCTRL_EQUILIBRIUM
tristate "Generic pinctrl and GPIO driver for Intel Lightning Mountain SoC"
+ depends on OF && HAS_IOMEM
select PINMUX
select PINCONF
select GPIOLIB
diff --git a/drivers/pinctrl/actions/pinctrl-s700.c b/drivers/pinctrl/actions/pinctrl-s700.c
index 8b8121e35edb..771d6fd50b45 100644
--- a/drivers/pinctrl/actions/pinctrl-s700.c
+++ b/drivers/pinctrl/actions/pinctrl-s700.c
@@ -1583,7 +1583,6 @@ static const struct owl_pinmux_func s700_functions[] = {
[S700_MUX_USB30] = FUNCTION(usb30),
[S700_MUX_CLKO_25M] = FUNCTION(clko_25m),
[S700_MUX_MIPI_CSI] = FUNCTION(mipi_csi),
- [S700_MUX_DSI] = FUNCTION(dsi),
[S700_MUX_NAND] = FUNCTION(nand),
[S700_MUX_SPDIF] = FUNCTION(spdif),
[S700_MUX_SIRQ0] = FUNCTION(sirq0),
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c
index 95ea593fa29d..bfed0e274643 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c
@@ -2439,88 +2439,88 @@ static const struct aspeed_pin_function aspeed_g4_functions[] = {
static const struct aspeed_pin_config aspeed_g4_configs[] = {
/* GPIO banks ranges [A, B], [D, J], [M, R] */
- { PIN_CONFIG_BIAS_PULL_DOWN, { D6, D5 }, SCU8C, 16 },
- { PIN_CONFIG_BIAS_DISABLE, { D6, D5 }, SCU8C, 16 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { J21, E18 }, SCU8C, 17 },
- { PIN_CONFIG_BIAS_DISABLE, { J21, E18 }, SCU8C, 17 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { A18, E15 }, SCU8C, 19 },
- { PIN_CONFIG_BIAS_DISABLE, { A18, E15 }, SCU8C, 19 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { D15, B14 }, SCU8C, 20 },
- { PIN_CONFIG_BIAS_DISABLE, { D15, B14 }, SCU8C, 20 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { D18, C17 }, SCU8C, 21 },
- { PIN_CONFIG_BIAS_DISABLE, { D18, C17 }, SCU8C, 21 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { A14, U18 }, SCU8C, 22 },
- { PIN_CONFIG_BIAS_DISABLE, { A14, U18 }, SCU8C, 22 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { A8, E7 }, SCU8C, 23 },
- { PIN_CONFIG_BIAS_DISABLE, { A8, E7 }, SCU8C, 23 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { C22, E20 }, SCU8C, 24 },
- { PIN_CONFIG_BIAS_DISABLE, { C22, E20 }, SCU8C, 24 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { J5, T1 }, SCU8C, 25 },
- { PIN_CONFIG_BIAS_DISABLE, { J5, T1 }, SCU8C, 25 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { U1, U5 }, SCU8C, 26 },
- { PIN_CONFIG_BIAS_DISABLE, { U1, U5 }, SCU8C, 26 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { V3, V5 }, SCU8C, 27 },
- { PIN_CONFIG_BIAS_DISABLE, { V3, V5 }, SCU8C, 27 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { W4, AB2 }, SCU8C, 28 },
- { PIN_CONFIG_BIAS_DISABLE, { W4, AB2 }, SCU8C, 28 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { V6, V7 }, SCU8C, 29 },
- { PIN_CONFIG_BIAS_DISABLE, { V6, V7 }, SCU8C, 29 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { Y6, AB7 }, SCU8C, 30 },
- { PIN_CONFIG_BIAS_DISABLE, { Y6, AB7 }, SCU8C, 30 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { V20, A5 }, SCU8C, 31 },
- { PIN_CONFIG_BIAS_DISABLE, { V20, A5 }, SCU8C, 31 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, D6, D5, SCU8C, 16),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, D6, D5, SCU8C, 16),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, J21, E18, SCU8C, 17),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, J21, E18, SCU8C, 17),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, A18, E15, SCU8C, 19),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, A18, E15, SCU8C, 19),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, D15, B14, SCU8C, 20),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, D15, B14, SCU8C, 20),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, D18, C17, SCU8C, 21),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, D18, C17, SCU8C, 21),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, A14, U18, SCU8C, 22),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, A14, U18, SCU8C, 22),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, A8, E7, SCU8C, 23),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, A8, E7, SCU8C, 23),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, C22, E20, SCU8C, 24),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, C22, E20, SCU8C, 24),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, J5, T1, SCU8C, 25),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, J5, T1, SCU8C, 25),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, U1, U5, SCU8C, 26),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, U1, U5, SCU8C, 26),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, V3, V5, SCU8C, 27),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, V3, V5, SCU8C, 27),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, W4, AB2, SCU8C, 28),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, W4, AB2, SCU8C, 28),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, V6, V7, SCU8C, 29),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, V6, V7, SCU8C, 29),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, Y6, AB7, SCU8C, 30),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, Y6, AB7, SCU8C, 30),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, V20, A5, SCU8C, 31),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, V20, A5, SCU8C, 31),
/* GPIOs T[0-5] (RGMII1 Tx pins) */
- { PIN_CONFIG_DRIVE_STRENGTH, { A12, A13 }, SCU90, 9 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { A12, A13 }, SCU90, 12 },
- { PIN_CONFIG_BIAS_DISABLE, { A12, A13 }, SCU90, 12 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_DRIVE_STRENGTH, A12, A13, SCU90, 9),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, A12, A13, SCU90, 12),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, A12, A13, SCU90, 12),
/* GPIOs T[6-7], U[0-3] (RGMII2 TX pins) */
- { PIN_CONFIG_DRIVE_STRENGTH, { D9, D10 }, SCU90, 11 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { D9, D10 }, SCU90, 14 },
- { PIN_CONFIG_BIAS_DISABLE, { D9, D10 }, SCU90, 14 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_DRIVE_STRENGTH, D9, D10, SCU90, 11),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, D9, D10, SCU90, 14),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, D9, D10, SCU90, 14),
/* GPIOs U[4-7], V[0-1] (RGMII1 Rx pins) */
- { PIN_CONFIG_BIAS_PULL_DOWN, { E11, E10 }, SCU90, 13 },
- { PIN_CONFIG_BIAS_DISABLE, { E11, E10 }, SCU90, 13 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, E11, E10, SCU90, 13),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, E11, E10, SCU90, 13),
/* GPIOs V[2-7] (RGMII2 Rx pins) */
- { PIN_CONFIG_BIAS_PULL_DOWN, { C9, C8 }, SCU90, 15 },
- { PIN_CONFIG_BIAS_DISABLE, { C9, C8 }, SCU90, 15 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, C9, C8, SCU90, 15),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, C9, C8, SCU90, 15),
/* ADC pull-downs (SCUA8[19:4]) */
- { PIN_CONFIG_BIAS_PULL_DOWN, { L5, L5 }, SCUA8, 4 },
- { PIN_CONFIG_BIAS_DISABLE, { L5, L5 }, SCUA8, 4 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { L4, L4 }, SCUA8, 5 },
- { PIN_CONFIG_BIAS_DISABLE, { L4, L4 }, SCUA8, 5 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { L3, L3 }, SCUA8, 6 },
- { PIN_CONFIG_BIAS_DISABLE, { L3, L3 }, SCUA8, 6 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { L2, L2 }, SCUA8, 7 },
- { PIN_CONFIG_BIAS_DISABLE, { L2, L2 }, SCUA8, 7 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { L1, L1 }, SCUA8, 8 },
- { PIN_CONFIG_BIAS_DISABLE, { L1, L1 }, SCUA8, 8 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { M5, M5 }, SCUA8, 9 },
- { PIN_CONFIG_BIAS_DISABLE, { M5, M5 }, SCUA8, 9 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { M4, M4 }, SCUA8, 10 },
- { PIN_CONFIG_BIAS_DISABLE, { M4, M4 }, SCUA8, 10 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { M3, M3 }, SCUA8, 11 },
- { PIN_CONFIG_BIAS_DISABLE, { M3, M3 }, SCUA8, 11 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { M2, M2 }, SCUA8, 12 },
- { PIN_CONFIG_BIAS_DISABLE, { M2, M2 }, SCUA8, 12 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { M1, M1 }, SCUA8, 13 },
- { PIN_CONFIG_BIAS_DISABLE, { M1, M1 }, SCUA8, 13 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { N5, N5 }, SCUA8, 14 },
- { PIN_CONFIG_BIAS_DISABLE, { N5, N5 }, SCUA8, 14 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { N4, N4 }, SCUA8, 15 },
- { PIN_CONFIG_BIAS_DISABLE, { N4, N4 }, SCUA8, 15 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { N3, N3 }, SCUA8, 16 },
- { PIN_CONFIG_BIAS_DISABLE, { N3, N3 }, SCUA8, 16 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { N2, N2 }, SCUA8, 17 },
- { PIN_CONFIG_BIAS_DISABLE, { N2, N2 }, SCUA8, 17 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { N1, N1 }, SCUA8, 18 },
- { PIN_CONFIG_BIAS_DISABLE, { N1, N1 }, SCUA8, 18 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { P5, P5 }, SCUA8, 19 },
- { PIN_CONFIG_BIAS_DISABLE, { P5, P5 }, SCUA8, 19 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, L5, L5, SCUA8, 4),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, L5, L5, SCUA8, 4),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, L4, L4, SCUA8, 5),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, L4, L4, SCUA8, 5),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, L3, L3, SCUA8, 6),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, L3, L3, SCUA8, 6),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, L2, L2, SCUA8, 7),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, L2, L2, SCUA8, 7),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, L1, L1, SCUA8, 8),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, L1, L1, SCUA8, 8),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, M5, M5, SCUA8, 9),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, M5, M5, SCUA8, 9),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, M4, M4, SCUA8, 10),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, M4, M4, SCUA8, 10),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, M3, M3, SCUA8, 11),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, M3, M3, SCUA8, 11),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, M2, M2, SCUA8, 12),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, M2, M2, SCUA8, 12),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, M1, M1, SCUA8, 13),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, M1, M1, SCUA8, 13),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, N5, N5, SCUA8, 14),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, N5, N5, SCUA8, 14),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, N4, N4, SCUA8, 15),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, N4, N4, SCUA8, 15),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, N3, N3, SCUA8, 16),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, N3, N3, SCUA8, 16),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, N2, N2, SCUA8, 17),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, N2, N2, SCUA8, 17),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, N1, N1, SCUA8, 18),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, N1, N1, SCUA8, 18),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, P5, P5, SCUA8, 19),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, P5, P5, SCUA8, 19),
/*
* Debounce settings for GPIOs D and E passthrough mode are in
@@ -2531,14 +2531,14 @@ static const struct aspeed_pin_config aspeed_g4_configs[] = {
* controller. Due to this tangle between GPIO and pinctrl we don't yet
* fully support pass-through debounce.
*/
- { PIN_CONFIG_INPUT_DEBOUNCE, { A18, D16 }, SCUA8, 20 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { B17, A17 }, SCUA8, 21 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { C16, B16 }, SCUA8, 22 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { A16, E15 }, SCUA8, 23 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { D15, C15 }, SCUA8, 24 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { B15, A15 }, SCUA8, 25 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { E14, D14 }, SCUA8, 26 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { C14, B14 }, SCUA8, 27 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, A18, D16, SCUA8, 20),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, B17, A17, SCUA8, 21),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, C16, B16, SCUA8, 22),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, A16, E15, SCUA8, 23),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, D15, C15, SCUA8, 24),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, B15, A15, SCUA8, 25),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, E14, D14, SCUA8, 26),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, C14, B14, SCUA8, 27),
};
static int aspeed_g4_sig_expr_set(struct aspeed_pinmux_data *ctx,
@@ -2594,6 +2594,14 @@ static int aspeed_g4_sig_expr_set(struct aspeed_pinmux_data *ctx,
return 0;
}
+static const struct aspeed_pin_config_map aspeed_g4_pin_config_map[] = {
+ { PIN_CONFIG_BIAS_PULL_DOWN, 0, 1, BIT_MASK(0)},
+ { PIN_CONFIG_BIAS_PULL_DOWN, -1, 0, BIT_MASK(0)},
+ { PIN_CONFIG_BIAS_DISABLE, -1, 1, BIT_MASK(0)},
+ { PIN_CONFIG_DRIVE_STRENGTH, 8, 0, BIT_MASK(0)},
+ { PIN_CONFIG_DRIVE_STRENGTH, 16, 1, BIT_MASK(0)},
+};
+
static const struct aspeed_pinmux_ops aspeed_g4_ops = {
.set = aspeed_g4_sig_expr_set,
};
@@ -2610,6 +2618,8 @@ static struct aspeed_pinctrl_data aspeed_g4_pinctrl_data = {
},
.configs = aspeed_g4_configs,
.nconfigs = ARRAY_SIZE(aspeed_g4_configs),
+ .confmaps = aspeed_g4_pin_config_map,
+ .nconfmaps = ARRAY_SIZE(aspeed_g4_pin_config_map),
};
static const struct pinmux_ops aspeed_g4_pinmux_ops = {
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
index d8a804b9f958..0cab4c2576e2 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
@@ -2476,124 +2476,124 @@ static const struct aspeed_pin_function aspeed_g5_functions[] = {
static struct aspeed_pin_config aspeed_g5_configs[] = {
/* GPIOA, GPIOQ */
- { PIN_CONFIG_BIAS_PULL_DOWN, { B14, B13 }, SCU8C, 16 },
- { PIN_CONFIG_BIAS_DISABLE, { B14, B13 }, SCU8C, 16 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { A11, N20 }, SCU8C, 16 },
- { PIN_CONFIG_BIAS_DISABLE, { A11, N20 }, SCU8C, 16 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, B14, B13, SCU8C, 16),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, B14, B13, SCU8C, 16),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, A11, N20, SCU8C, 16),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, A11, N20, SCU8C, 16),
/* GPIOB, GPIOR */
- { PIN_CONFIG_BIAS_PULL_DOWN, { K19, H20 }, SCU8C, 17 },
- { PIN_CONFIG_BIAS_DISABLE, { K19, H20 }, SCU8C, 17 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { AA19, E10 }, SCU8C, 17 },
- { PIN_CONFIG_BIAS_DISABLE, { AA19, E10 }, SCU8C, 17 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, K19, H20, SCU8C, 17),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, K19, H20, SCU8C, 17),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, AA19, E10, SCU8C, 17),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, AA19, E10, SCU8C, 17),
/* GPIOC, GPIOS*/
- { PIN_CONFIG_BIAS_PULL_DOWN, { C12, B11 }, SCU8C, 18 },
- { PIN_CONFIG_BIAS_DISABLE, { C12, B11 }, SCU8C, 18 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { V20, AA20 }, SCU8C, 18 },
- { PIN_CONFIG_BIAS_DISABLE, { V20, AA20 }, SCU8C, 18 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, C12, B11, SCU8C, 18),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, C12, B11, SCU8C, 18),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, V20, AA20, SCU8C, 18),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, V20, AA20, SCU8C, 18),
/* GPIOD, GPIOY */
- { PIN_CONFIG_BIAS_PULL_DOWN, { F19, C21 }, SCU8C, 19 },
- { PIN_CONFIG_BIAS_DISABLE, { F19, C21 }, SCU8C, 19 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { R22, P20 }, SCU8C, 19 },
- { PIN_CONFIG_BIAS_DISABLE, { R22, P20 }, SCU8C, 19 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, F19, C21, SCU8C, 19),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, F19, C21, SCU8C, 19),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, R22, P20, SCU8C, 19),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, R22, P20, SCU8C, 19),
/* GPIOE, GPIOZ */
- { PIN_CONFIG_BIAS_PULL_DOWN, { B20, B19 }, SCU8C, 20 },
- { PIN_CONFIG_BIAS_DISABLE, { B20, B19 }, SCU8C, 20 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { Y20, W21 }, SCU8C, 20 },
- { PIN_CONFIG_BIAS_DISABLE, { Y20, W21 }, SCU8C, 20 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, B20, B19, SCU8C, 20),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, B20, B19, SCU8C, 20),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, Y20, W21, SCU8C, 20),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, Y20, W21, SCU8C, 20),
/* GPIOF, GPIOAA */
- { PIN_CONFIG_BIAS_PULL_DOWN, { J19, H18 }, SCU8C, 21 },
- { PIN_CONFIG_BIAS_DISABLE, { J19, H18 }, SCU8C, 21 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { Y21, P19 }, SCU8C, 21 },
- { PIN_CONFIG_BIAS_DISABLE, { Y21, P19 }, SCU8C, 21 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, J19, H18, SCU8C, 21),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, J19, H18, SCU8C, 21),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, Y21, P19, SCU8C, 21),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, Y21, P19, SCU8C, 21),
- /* GPIOG, GPIOAB */
- { PIN_CONFIG_BIAS_PULL_DOWN, { A19, E14 }, SCU8C, 22 },
- { PIN_CONFIG_BIAS_DISABLE, { A19, E14 }, SCU8C, 22 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { N19, R20 }, SCU8C, 22 },
- { PIN_CONFIG_BIAS_DISABLE, { N19, R20 }, SCU8C, 22 },
+ /* GPIOG, GPIOAB */
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, A19, E14, SCU8C, 22),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, A19, E14, SCU8C, 22),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, N19, R20, SCU8C, 22),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, N19, R20, SCU8C, 22),
/* GPIOH, GPIOAC */
- { PIN_CONFIG_BIAS_PULL_DOWN, { A18, D18 }, SCU8C, 23 },
- { PIN_CONFIG_BIAS_DISABLE, { A18, D18 }, SCU8C, 23 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { G21, G22 }, SCU8C, 23 },
- { PIN_CONFIG_BIAS_DISABLE, { G21, G22 }, SCU8C, 23 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, A18, D18, SCU8C, 23),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, A18, D18, SCU8C, 23),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, G21, G22, SCU8C, 23),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, G21, G22, SCU8C, 23),
/* GPIOs [I, P] */
- { PIN_CONFIG_BIAS_PULL_DOWN, { C18, A15 }, SCU8C, 24 },
- { PIN_CONFIG_BIAS_DISABLE, { C18, A15 }, SCU8C, 24 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { R2, T3 }, SCU8C, 25 },
- { PIN_CONFIG_BIAS_DISABLE, { R2, T3 }, SCU8C, 25 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { L3, R1 }, SCU8C, 26 },
- { PIN_CONFIG_BIAS_DISABLE, { L3, R1 }, SCU8C, 26 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { T2, W1 }, SCU8C, 27 },
- { PIN_CONFIG_BIAS_DISABLE, { T2, W1 }, SCU8C, 27 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { Y1, T5 }, SCU8C, 28 },
- { PIN_CONFIG_BIAS_DISABLE, { Y1, T5 }, SCU8C, 28 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { V2, T4 }, SCU8C, 29 },
- { PIN_CONFIG_BIAS_DISABLE, { V2, T4 }, SCU8C, 29 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { U5, W4 }, SCU8C, 30 },
- { PIN_CONFIG_BIAS_DISABLE, { U5, W4 }, SCU8C, 30 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { V4, V6 }, SCU8C, 31 },
- { PIN_CONFIG_BIAS_DISABLE, { V4, V6 }, SCU8C, 31 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, C18, A15, SCU8C, 24),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, C18, A15, SCU8C, 24),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, R2, T3, SCU8C, 25),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, R2, T3, SCU8C, 25),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, L3, R1, SCU8C, 26),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, L3, R1, SCU8C, 26),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, T2, W1, SCU8C, 27),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, T2, W1, SCU8C, 27),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, Y1, T5, SCU8C, 28),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, Y1, T5, SCU8C, 28),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, V2, T4, SCU8C, 29),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, V2, T4, SCU8C, 29),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, U5, W4, SCU8C, 30),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, U5, W4, SCU8C, 30),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, V4, V6, SCU8C, 31),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, V4, V6, SCU8C, 31),
/* GPIOs T[0-5] (RGMII1 Tx pins) */
- { PIN_CONFIG_DRIVE_STRENGTH, { B5, B5 }, SCU90, 8 },
- { PIN_CONFIG_DRIVE_STRENGTH, { E9, A5 }, SCU90, 9 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { B5, D7 }, SCU90, 12 },
- { PIN_CONFIG_BIAS_DISABLE, { B5, D7 }, SCU90, 12 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_DRIVE_STRENGTH, B5, B5, SCU90, 8),
+ ASPEED_SB_PINCONF(PIN_CONFIG_DRIVE_STRENGTH, E9, A5, SCU90, 9),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, B5, D7, SCU90, 12),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, B5, D7, SCU90, 12),
/* GPIOs T[6-7], U[0-3] (RGMII2 TX pins) */
- { PIN_CONFIG_DRIVE_STRENGTH, { B2, B2 }, SCU90, 10 },
- { PIN_CONFIG_DRIVE_STRENGTH, { B1, B3 }, SCU90, 11 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { B2, D4 }, SCU90, 14 },
- { PIN_CONFIG_BIAS_DISABLE, { B2, D4 }, SCU90, 14 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_DRIVE_STRENGTH, B2, B2, SCU90, 10),
+ ASPEED_SB_PINCONF(PIN_CONFIG_DRIVE_STRENGTH, B1, B3, SCU90, 11),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, B2, D4, SCU90, 14),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, B2, D4, SCU90, 14),
/* GPIOs U[4-7], V[0-1] (RGMII1 Rx pins) */
- { PIN_CONFIG_BIAS_PULL_DOWN, { B4, C4 }, SCU90, 13 },
- { PIN_CONFIG_BIAS_DISABLE, { B4, C4 }, SCU90, 13 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, B4, C4, SCU90, 13),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, B4, C4, SCU90, 13),
/* GPIOs V[2-7] (RGMII2 Rx pins) */
- { PIN_CONFIG_BIAS_PULL_DOWN, { C2, E6 }, SCU90, 15 },
- { PIN_CONFIG_BIAS_DISABLE, { C2, E6 }, SCU90, 15 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, C2, E6, SCU90, 15),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, C2, E6, SCU90, 15),
/* ADC pull-downs (SCUA8[19:4]) */
- { PIN_CONFIG_BIAS_PULL_DOWN, { F4, F4 }, SCUA8, 4 },
- { PIN_CONFIG_BIAS_DISABLE, { F4, F4 }, SCUA8, 4 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { F5, F5 }, SCUA8, 5 },
- { PIN_CONFIG_BIAS_DISABLE, { F5, F5 }, SCUA8, 5 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { E2, E2 }, SCUA8, 6 },
- { PIN_CONFIG_BIAS_DISABLE, { E2, E2 }, SCUA8, 6 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { E1, E1 }, SCUA8, 7 },
- { PIN_CONFIG_BIAS_DISABLE, { E1, E1 }, SCUA8, 7 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { F3, F3 }, SCUA8, 8 },
- { PIN_CONFIG_BIAS_DISABLE, { F3, F3 }, SCUA8, 8 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { E3, E3 }, SCUA8, 9 },
- { PIN_CONFIG_BIAS_DISABLE, { E3, E3 }, SCUA8, 9 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { G5, G5 }, SCUA8, 10 },
- { PIN_CONFIG_BIAS_DISABLE, { G5, G5 }, SCUA8, 10 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { G4, G4 }, SCUA8, 11 },
- { PIN_CONFIG_BIAS_DISABLE, { G4, G4 }, SCUA8, 11 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { F2, F2 }, SCUA8, 12 },
- { PIN_CONFIG_BIAS_DISABLE, { F2, F2 }, SCUA8, 12 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { G3, G3 }, SCUA8, 13 },
- { PIN_CONFIG_BIAS_DISABLE, { G3, G3 }, SCUA8, 13 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { G2, G2 }, SCUA8, 14 },
- { PIN_CONFIG_BIAS_DISABLE, { G2, G2 }, SCUA8, 14 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { F1, F1 }, SCUA8, 15 },
- { PIN_CONFIG_BIAS_DISABLE, { F1, F1 }, SCUA8, 15 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { H5, H5 }, SCUA8, 16 },
- { PIN_CONFIG_BIAS_DISABLE, { H5, H5 }, SCUA8, 16 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { G1, G1 }, SCUA8, 17 },
- { PIN_CONFIG_BIAS_DISABLE, { G1, G1 }, SCUA8, 17 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { H3, H3 }, SCUA8, 18 },
- { PIN_CONFIG_BIAS_DISABLE, { H3, H3 }, SCUA8, 18 },
- { PIN_CONFIG_BIAS_PULL_DOWN, { H4, H4 }, SCUA8, 19 },
- { PIN_CONFIG_BIAS_DISABLE, { H4, H4 }, SCUA8, 19 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, F4, F4, SCUA8, 4),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, F4, F4, SCUA8, 4),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, F5, F5, SCUA8, 5),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, F5, F5, SCUA8, 5),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, E2, E2, SCUA8, 6),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, E2, E2, SCUA8, 6),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, E1, E1, SCUA8, 7),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, E1, E1, SCUA8, 7),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, F3, F3, SCUA8, 8),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, F3, F3, SCUA8, 8),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, E3, E3, SCUA8, 9),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, E3, E3, SCUA8, 9),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, G5, G5, SCUA8, 10),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, G5, G5, SCUA8, 10),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, G4, G4, SCUA8, 11),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, G4, G4, SCUA8, 11),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, F2, F2, SCUA8, 12),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, F2, F2, SCUA8, 12),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, G3, G3, SCUA8, 13),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, G3, G3, SCUA8, 13),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, G2, G2, SCUA8, 14),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, G2, G2, SCUA8, 14),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, F1, F1, SCUA8, 15),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, F1, F1, SCUA8, 15),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, H5, H5, SCUA8, 16),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, H5, H5, SCUA8, 16),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, G1, G1, SCUA8, 17),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, G1, G1, SCUA8, 17),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, H3, H3, SCUA8, 18),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, H3, H3, SCUA8, 18),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, H4, H4, SCUA8, 19),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, H4, H4, SCUA8, 19),
/*
* Debounce settings for GPIOs D and E passthrough mode are in
@@ -2604,14 +2604,14 @@ static struct aspeed_pin_config aspeed_g5_configs[] = {
* controller. Due to this tangle between GPIO and pinctrl we don't yet
* fully support pass-through debounce.
*/
- { PIN_CONFIG_INPUT_DEBOUNCE, { F19, E21 }, SCUA8, 20 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { F20, D20 }, SCUA8, 21 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { D21, E20 }, SCUA8, 22 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { G18, C21 }, SCUA8, 23 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { B20, C20 }, SCUA8, 24 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { F18, F17 }, SCUA8, 25 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { E18, D19 }, SCUA8, 26 },
- { PIN_CONFIG_INPUT_DEBOUNCE, { A20, B19 }, SCUA8, 27 },
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, F19, E21, SCUA8, 20),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, F20, D20, SCUA8, 21),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, D21, E20, SCUA8, 22),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, G18, C21, SCUA8, 23),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, B20, C20, SCUA8, 24),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, F18, F17, SCUA8, 25),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, E18, D19, SCUA8, 26),
+ ASPEED_SB_PINCONF(PIN_CONFIG_INPUT_DEBOUNCE, A20, B19, SCUA8, 27),
};
static struct regmap *aspeed_g5_acquire_regmap(struct aspeed_pinmux_data *ctx,
@@ -2780,6 +2780,14 @@ static int aspeed_g5_sig_expr_set(struct aspeed_pinmux_data *ctx,
return 0;
}
+static const struct aspeed_pin_config_map aspeed_g5_pin_config_map[] = {
+ { PIN_CONFIG_BIAS_PULL_DOWN, 0, 1, BIT_MASK(0)},
+ { PIN_CONFIG_BIAS_PULL_DOWN, -1, 0, BIT_MASK(0)},
+ { PIN_CONFIG_BIAS_DISABLE, -1, 1, BIT_MASK(0)},
+ { PIN_CONFIG_DRIVE_STRENGTH, 8, 0, BIT_MASK(0)},
+ { PIN_CONFIG_DRIVE_STRENGTH, 16, 1, BIT_MASK(0)},
+};
+
static const struct aspeed_pinmux_ops aspeed_g5_ops = {
.eval = aspeed_g5_sig_expr_eval,
.set = aspeed_g5_sig_expr_set,
@@ -2797,6 +2805,8 @@ static struct aspeed_pinctrl_data aspeed_g5_pinctrl_data = {
},
.configs = aspeed_g5_configs,
.nconfigs = ARRAY_SIZE(aspeed_g5_configs),
+ .confmaps = aspeed_g5_pin_config_map,
+ .nconfmaps = ARRAY_SIZE(aspeed_g5_pin_config_map),
};
static const struct pinmux_ops aspeed_g5_pinmux_ops = {
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
index c6800d220920..fa32c3e9c9d1 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
@@ -26,7 +26,10 @@
#define SCU430 0x430 /* Multi-function Pin Control #8 */
#define SCU434 0x434 /* Multi-function Pin Control #9 */
#define SCU438 0x438 /* Multi-function Pin Control #10 */
+#define SCU440 0x440 /* USB Multi-function Pin Control #12 */
#define SCU450 0x450 /* Multi-function Pin Control #14 */
+#define SCU454 0x454 /* Multi-function Pin Control #15 */
+#define SCU458 0x458 /* Multi-function Pin Control #16 */
#define SCU4B0 0x4B0 /* Multi-function Pin Control #17 */
#define SCU4B4 0x4B4 /* Multi-function Pin Control #18 */
#define SCU4B8 0x4B8 /* Multi-function Pin Control #19 */
@@ -35,9 +38,17 @@
#define SCU4D8 0x4D8 /* Multi-function Pin Control #23 */
#define SCU500 0x500 /* Hardware Strap 1 */
#define SCU510 0x510 /* Hardware Strap 2 */
+#define SCU610 0x610 /* Disable GPIO Internal Pull-Down #0 */
+#define SCU614 0x614 /* Disable GPIO Internal Pull-Down #1 */
+#define SCU618 0x618 /* Disable GPIO Internal Pull-Down #2 */
+#define SCU61C 0x61c /* Disable GPIO Internal Pull-Down #3 */
+#define SCU620 0x620 /* Disable GPIO Internal Pull-Down #4 */
+#define SCU634 0x634 /* Disable GPIO Internal Pull-Down #5 */
+#define SCU638 0x638 /* Disable GPIO Internal Pull-Down #6 */
#define SCU694 0x694 /* Multi-function Pin Control #25 */
+#define SCUC20 0xC20 /* PCIE configuration Setting Control */
-#define ASPEED_G6_NR_PINS 248
+#define ASPEED_G6_NR_PINS 256
#define M24 0
SIG_EXPR_LIST_DECL_SESG(M24, MDC3, MDIO3, SIG_DESC_SET(SCU410, 0));
@@ -1088,60 +1099,52 @@ SSSF_PIN_DECL(AF15, GPIOV7, LPCSMI, SIG_DESC_SET(SCU434, 15));
#define AB7 176
SIG_EXPR_LIST_DECL_SESG(AB7, LAD0, LPC, SIG_DESC_SET(SCU434, 16),
- SIG_DESC_CLEAR(SCU510, 6));
-SIG_EXPR_LIST_DECL_SESG(AB7, ESPID0, ESPI, SIG_DESC_SET(SCU434, 16),
SIG_DESC_SET(SCU510, 6));
+SIG_EXPR_LIST_DECL_SESG(AB7, ESPID0, ESPI, SIG_DESC_SET(SCU434, 16));
PIN_DECL_2(AB7, GPIOW0, LAD0, ESPID0);
#define AB8 177
SIG_EXPR_LIST_DECL_SESG(AB8, LAD1, LPC, SIG_DESC_SET(SCU434, 17),
- SIG_DESC_CLEAR(SCU510, 6));
-SIG_EXPR_LIST_DECL_SESG(AB8, ESPID1, ESPI, SIG_DESC_SET(SCU434, 17),
SIG_DESC_SET(SCU510, 6));
+SIG_EXPR_LIST_DECL_SESG(AB8, ESPID1, ESPI, SIG_DESC_SET(SCU434, 17));
PIN_DECL_2(AB8, GPIOW1, LAD1, ESPID1);
#define AC8 178
SIG_EXPR_LIST_DECL_SESG(AC8, LAD2, LPC, SIG_DESC_SET(SCU434, 18),
- SIG_DESC_CLEAR(SCU510, 6));
-SIG_EXPR_LIST_DECL_SESG(AC8, ESPID2, ESPI, SIG_DESC_SET(SCU434, 18),
SIG_DESC_SET(SCU510, 6));
+SIG_EXPR_LIST_DECL_SESG(AC8, ESPID2, ESPI, SIG_DESC_SET(SCU434, 18));
PIN_DECL_2(AC8, GPIOW2, LAD2, ESPID2);
#define AC7 179
SIG_EXPR_LIST_DECL_SESG(AC7, LAD3, LPC, SIG_DESC_SET(SCU434, 19),
- SIG_DESC_CLEAR(SCU510, 6));
-SIG_EXPR_LIST_DECL_SESG(AC7, ESPID3, ESPI, SIG_DESC_SET(SCU434, 19),
SIG_DESC_SET(SCU510, 6));
+SIG_EXPR_LIST_DECL_SESG(AC7, ESPID3, ESPI, SIG_DESC_SET(SCU434, 19));
PIN_DECL_2(AC7, GPIOW3, LAD3, ESPID3);
#define AE7 180
SIG_EXPR_LIST_DECL_SESG(AE7, LCLK, LPC, SIG_DESC_SET(SCU434, 20),
- SIG_DESC_CLEAR(SCU510, 6));
-SIG_EXPR_LIST_DECL_SESG(AE7, ESPICK, ESPI, SIG_DESC_SET(SCU434, 20),
SIG_DESC_SET(SCU510, 6));
+SIG_EXPR_LIST_DECL_SESG(AE7, ESPICK, ESPI, SIG_DESC_SET(SCU434, 20));
PIN_DECL_2(AE7, GPIOW4, LCLK, ESPICK);
#define AF7 181
SIG_EXPR_LIST_DECL_SESG(AF7, LFRAME, LPC, SIG_DESC_SET(SCU434, 21),
- SIG_DESC_CLEAR(SCU510, 6));
-SIG_EXPR_LIST_DECL_SESG(AF7, ESPICS, ESPI, SIG_DESC_SET(SCU434, 21),
SIG_DESC_SET(SCU510, 6));
+SIG_EXPR_LIST_DECL_SESG(AF7, ESPICS, ESPI, SIG_DESC_SET(SCU434, 21));
PIN_DECL_2(AF7, GPIOW5, LFRAME, ESPICS);
#define AD7 182
SIG_EXPR_LIST_DECL_SESG(AD7, LSIRQ, LSIRQ, SIG_DESC_SET(SCU434, 22),
- SIG_DESC_CLEAR(SCU510, 6));
-SIG_EXPR_LIST_DECL_SESG(AD7, ESPIALT, ESPIALT, SIG_DESC_SET(SCU434, 22),
SIG_DESC_SET(SCU510, 6));
+SIG_EXPR_LIST_DECL_SESG(AD7, ESPIALT, ESPIALT, SIG_DESC_SET(SCU434, 22));
PIN_DECL_2(AD7, GPIOW6, LSIRQ, ESPIALT);
FUNC_GROUP_DECL(LSIRQ, AD7);
FUNC_GROUP_DECL(ESPIALT, AD7);
#define AD8 183
SIG_EXPR_LIST_DECL_SESG(AD8, LPCRST, LPC, SIG_DESC_SET(SCU434, 23),
- SIG_DESC_CLEAR(SCU510, 6));
-SIG_EXPR_LIST_DECL_SESG(AD8, ESPIRST, ESPI, SIG_DESC_SET(SCU434, 23),
SIG_DESC_SET(SCU510, 6));
+SIG_EXPR_LIST_DECL_SESG(AD8, ESPIRST, ESPI, SIG_DESC_SET(SCU434, 23));
PIN_DECL_2(AD8, GPIOW7, LPCRST, ESPIRST);
FUNC_GROUP_DECL(LPC, AB7, AB8, AC8, AC7, AE7, AF7, AD8);
@@ -1542,6 +1545,78 @@ GROUP_DECL(I3C4, AE25, AF24);
FUNC_DECL_2(I3C4, HVI3C4, I3C4);
FUNC_GROUP_DECL(FSI2, AE25, AF24);
+#define AF23 248
+SIG_EXPR_LIST_DECL_SESG(AF23, I3C1SCL, I3C1, SIG_DESC_SET(SCU438, 16));
+PIN_DECL_(AF23, SIG_EXPR_LIST_PTR(AF23, I3C1SCL));
+
+#define AE24 249
+SIG_EXPR_LIST_DECL_SESG(AE24, I3C1SDA, I3C1, SIG_DESC_SET(SCU438, 17));
+PIN_DECL_(AE24, SIG_EXPR_LIST_PTR(AE24, I3C1SDA));
+
+FUNC_GROUP_DECL(I3C1, AF23, AE24);
+
+#define AF22 250
+SIG_EXPR_LIST_DECL_SESG(AF22, I3C2SCL, I3C2, SIG_DESC_SET(SCU438, 18));
+PIN_DECL_(AF22, SIG_EXPR_LIST_PTR(AF22, I3C2SCL));
+
+#define AE22 251
+SIG_EXPR_LIST_DECL_SESG(AE22, I3C2SDA, I3C2, SIG_DESC_SET(SCU438, 19));
+PIN_DECL_(AE22, SIG_EXPR_LIST_PTR(AE22, I3C2SDA));
+
+FUNC_GROUP_DECL(I3C2, AF22, AE22);
+
+#define USB2ADP_DESC { ASPEED_IP_SCU, SCU440, GENMASK(25, 24), 0, 0 }
+#define USB2AD_DESC { ASPEED_IP_SCU, SCU440, GENMASK(25, 24), 1, 0 }
+#define USB2AH_DESC { ASPEED_IP_SCU, SCU440, GENMASK(25, 24), 2, 0 }
+#define USB2AHP_DESC { ASPEED_IP_SCU, SCU440, GENMASK(25, 24), 3, 0 }
+#define USB11BHID_DESC { ASPEED_IP_SCU, SCU440, GENMASK(29, 28), 0, 0 }
+#define USB2BD_DESC { ASPEED_IP_SCU, SCU440, GENMASK(29, 28), 1, 0 }
+#define USB2BH_DESC { ASPEED_IP_SCU, SCU440, GENMASK(29, 28), 2, 0 }
+
+#define A4 252
+SIG_EXPR_LIST_DECL_SEMG(A4, USB2ADPDP, USBA, USB2ADP, USB2ADP_DESC,
+ SIG_DESC_SET(SCUC20, 16));
+SIG_EXPR_LIST_DECL_SEMG(A4, USB2ADDP, USBA, USB2AD, USB2AD_DESC);
+SIG_EXPR_LIST_DECL_SEMG(A4, USB2AHDP, USBA, USB2AH, USB2AH_DESC);
+SIG_EXPR_LIST_DECL_SEMG(A4, USB2AHPDP, USBA, USB2AHP, USB2AHP_DESC);
+PIN_DECL_(A4, SIG_EXPR_LIST_PTR(A4, USB2ADPDP), SIG_EXPR_LIST_PTR(A4, USB2ADDP),
+ SIG_EXPR_LIST_PTR(A4, USB2AHDP));
+
+#define B4 253
+SIG_EXPR_LIST_DECL_SEMG(B4, USB2ADPDN, USBA, USB2ADP, USB2ADP_DESC);
+SIG_EXPR_LIST_DECL_SEMG(B4, USB2ADDN, USBA, USB2AD, USB2AD_DESC);
+SIG_EXPR_LIST_DECL_SEMG(B4, USB2AHDN, USBA, USB2AH, USB2AH_DESC);
+SIG_EXPR_LIST_DECL_SEMG(B4, USB2AHPDN, USBA, USB2AHP, USB2AHP_DESC);
+PIN_DECL_(B4, SIG_EXPR_LIST_PTR(B4, USB2ADPDN), SIG_EXPR_LIST_PTR(B4, USB2ADDN),
+ SIG_EXPR_LIST_PTR(B4, USB2AHDN));
+
+GROUP_DECL(USBA, A4, B4);
+
+FUNC_DECL_1(USB2ADP, USBA);
+FUNC_DECL_1(USB2AD, USBA);
+FUNC_DECL_1(USB2AH, USBA);
+FUNC_DECL_1(USB2AHP, USBA);
+
+#define A6 254
+SIG_EXPR_LIST_DECL_SEMG(A6, USB11BDP, USBB, USB11BHID, USB11BHID_DESC);
+SIG_EXPR_LIST_DECL_SEMG(A6, USB2BDDP, USBB, USB2BD, USB2BD_DESC);
+SIG_EXPR_LIST_DECL_SEMG(A6, USB2BHDP, USBB, USB2BH, USB2BH_DESC);
+PIN_DECL_(A6, SIG_EXPR_LIST_PTR(A6, USB11BDP), SIG_EXPR_LIST_PTR(A6, USB2BDDP),
+ SIG_EXPR_LIST_PTR(A6, USB2BHDP));
+
+#define B6 255
+SIG_EXPR_LIST_DECL_SEMG(B6, USB11BDN, USBB, USB11BHID, USB11BHID_DESC);
+SIG_EXPR_LIST_DECL_SEMG(B6, USB2BDDN, USBB, USB2BD, USB2BD_DESC);
+SIG_EXPR_LIST_DECL_SEMG(B6, USB2BHDN, USBB, USB2BH, USB2BH_DESC);
+PIN_DECL_(B6, SIG_EXPR_LIST_PTR(B6, USB11BDN), SIG_EXPR_LIST_PTR(B6, USB2BDDN),
+ SIG_EXPR_LIST_PTR(B6, USB2BHDN));
+
+GROUP_DECL(USBB, A6, B6);
+
+FUNC_DECL_1(USB11BHID, USBB);
+FUNC_DECL_1(USB2BD, USBB);
+FUNC_DECL_1(USB2BH, USBB);
+
/* Pins, groups and functions are sort(1):ed alphabetically for sanity */
static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
@@ -1562,6 +1637,8 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
ASPEED_PINCTRL_PIN(A24),
ASPEED_PINCTRL_PIN(A25),
ASPEED_PINCTRL_PIN(A3),
+ ASPEED_PINCTRL_PIN(A4),
+ ASPEED_PINCTRL_PIN(A6),
ASPEED_PINCTRL_PIN(AA11),
ASPEED_PINCTRL_PIN(AA12),
ASPEED_PINCTRL_PIN(AA16),
@@ -1633,6 +1710,8 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
ASPEED_PINCTRL_PIN(AE16),
ASPEED_PINCTRL_PIN(AE18),
ASPEED_PINCTRL_PIN(AE19),
+ ASPEED_PINCTRL_PIN(AE22),
+ ASPEED_PINCTRL_PIN(AE24),
ASPEED_PINCTRL_PIN(AE25),
ASPEED_PINCTRL_PIN(AE26),
ASPEED_PINCTRL_PIN(AE7),
@@ -1642,6 +1721,8 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
ASPEED_PINCTRL_PIN(AF12),
ASPEED_PINCTRL_PIN(AF14),
ASPEED_PINCTRL_PIN(AF15),
+ ASPEED_PINCTRL_PIN(AF22),
+ ASPEED_PINCTRL_PIN(AF23),
ASPEED_PINCTRL_PIN(AF24),
ASPEED_PINCTRL_PIN(AF25),
ASPEED_PINCTRL_PIN(AF7),
@@ -1662,6 +1743,8 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
ASPEED_PINCTRL_PIN(B25),
ASPEED_PINCTRL_PIN(B26),
ASPEED_PINCTRL_PIN(B3),
+ ASPEED_PINCTRL_PIN(B4),
+ ASPEED_PINCTRL_PIN(B6),
ASPEED_PINCTRL_PIN(C1),
ASPEED_PINCTRL_PIN(C11),
ASPEED_PINCTRL_PIN(C12),
@@ -1855,6 +1938,8 @@ static const struct aspeed_pin_group aspeed_g6_groups[] = {
ASPEED_PINCTRL_GROUP(I2C7),
ASPEED_PINCTRL_GROUP(I2C8),
ASPEED_PINCTRL_GROUP(I2C9),
+ ASPEED_PINCTRL_GROUP(I3C1),
+ ASPEED_PINCTRL_GROUP(I3C2),
ASPEED_PINCTRL_GROUP(I3C3),
ASPEED_PINCTRL_GROUP(I3C4),
ASPEED_PINCTRL_GROUP(I3C5),
@@ -2020,6 +2105,8 @@ static const struct aspeed_pin_group aspeed_g6_groups[] = {
ASPEED_PINCTRL_GROUP(UART7),
ASPEED_PINCTRL_GROUP(UART8),
ASPEED_PINCTRL_GROUP(UART9),
+ ASPEED_PINCTRL_GROUP(USBA),
+ ASPEED_PINCTRL_GROUP(USBB),
ASPEED_PINCTRL_GROUP(VB),
ASPEED_PINCTRL_GROUP(VGAHS),
ASPEED_PINCTRL_GROUP(VGAVS),
@@ -2087,6 +2174,8 @@ static const struct aspeed_pin_function aspeed_g6_functions[] = {
ASPEED_PINCTRL_FUNC(I2C7),
ASPEED_PINCTRL_FUNC(I2C8),
ASPEED_PINCTRL_FUNC(I2C9),
+ ASPEED_PINCTRL_FUNC(I3C1),
+ ASPEED_PINCTRL_FUNC(I3C2),
ASPEED_PINCTRL_FUNC(I3C3),
ASPEED_PINCTRL_FUNC(I3C4),
ASPEED_PINCTRL_FUNC(I3C5),
@@ -2229,6 +2318,13 @@ static const struct aspeed_pin_function aspeed_g6_functions[] = {
ASPEED_PINCTRL_FUNC(UART7),
ASPEED_PINCTRL_FUNC(UART8),
ASPEED_PINCTRL_FUNC(UART9),
+ ASPEED_PINCTRL_FUNC(USB11BHID),
+ ASPEED_PINCTRL_FUNC(USB2AD),
+ ASPEED_PINCTRL_FUNC(USB2ADP),
+ ASPEED_PINCTRL_FUNC(USB2AH),
+ ASPEED_PINCTRL_FUNC(USB2AHP),
+ ASPEED_PINCTRL_FUNC(USB2BD),
+ ASPEED_PINCTRL_FUNC(USB2BH),
ASPEED_PINCTRL_FUNC(VB),
ASPEED_PINCTRL_FUNC(VGAHS),
ASPEED_PINCTRL_FUNC(VGAVS),
@@ -2238,6 +2334,260 @@ static const struct aspeed_pin_function aspeed_g6_functions[] = {
ASPEED_PINCTRL_FUNC(WDTRST4),
};
+static struct aspeed_pin_config aspeed_g6_configs[] = {
+ /* GPIOB7 */
+ ASPEED_PULL_DOWN_PINCONF(J24, SCU610, 15),
+ /* GPIOB6 */
+ ASPEED_PULL_DOWN_PINCONF(H25, SCU610, 14),
+ /* GPIOB5 */
+ ASPEED_PULL_DOWN_PINCONF(G26, SCU610, 13),
+ /* GPIOB4 */
+ ASPEED_PULL_DOWN_PINCONF(J23, SCU610, 12),
+ /* GPIOB3 */
+ ASPEED_PULL_DOWN_PINCONF(J25, SCU610, 11),
+ /* GPIOB2 */
+ ASPEED_PULL_DOWN_PINCONF(H26, SCU610, 10),
+ /* GPIOB1 */
+ ASPEED_PULL_DOWN_PINCONF(K23, SCU610, 9),
+ /* GPIOB0 */
+ ASPEED_PULL_DOWN_PINCONF(J26, SCU610, 8),
+
+ /* GPIOH3 */
+ ASPEED_PULL_DOWN_PINCONF(A17, SCU614, 27),
+ /* GPIOH2 */
+ ASPEED_PULL_DOWN_PINCONF(C18, SCU614, 26),
+ /* GPIOH1 */
+ ASPEED_PULL_DOWN_PINCONF(B18, SCU614, 25),
+ /* GPIOH0 */
+ ASPEED_PULL_DOWN_PINCONF(A18, SCU614, 24),
+
+ /* GPIOL7 */
+ ASPEED_PULL_DOWN_PINCONF(C14, SCU618, 31),
+ /* GPIOL6 */
+ ASPEED_PULL_DOWN_PINCONF(B14, SCU618, 30),
+ /* GPIOL5 */
+ ASPEED_PULL_DOWN_PINCONF(F15, SCU618, 29),
+ /* GPIOL4 */
+ ASPEED_PULL_DOWN_PINCONF(C15, SCU618, 28),
+
+ /* GPIOJ7 */
+ ASPEED_PULL_UP_PINCONF(D19, SCU618, 15),
+ /* GPIOJ6 */
+ ASPEED_PULL_UP_PINCONF(C20, SCU618, 14),
+ /* GPIOJ5 */
+ ASPEED_PULL_UP_PINCONF(A19, SCU618, 13),
+ /* GPIOJ4 */
+ ASPEED_PULL_UP_PINCONF(C19, SCU618, 12),
+ /* GPIOJ3 */
+ ASPEED_PULL_UP_PINCONF(D20, SCU618, 11),
+ /* GPIOJ2 */
+ ASPEED_PULL_UP_PINCONF(E19, SCU618, 10),
+ /* GPIOJ1 */
+ ASPEED_PULL_UP_PINCONF(A20, SCU618, 9),
+ /* GPIOJ0 */
+ ASPEED_PULL_UP_PINCONF(B20, SCU618, 8),
+
+ /* GPIOI7 */
+ ASPEED_PULL_DOWN_PINCONF(A15, SCU618, 7),
+ /* GPIOI6 */
+ ASPEED_PULL_DOWN_PINCONF(B16, SCU618, 6),
+ /* GPIOI5 */
+ ASPEED_PULL_DOWN_PINCONF(E16, SCU618, 5),
+ /* GPIOI4 */
+ ASPEED_PULL_DOWN_PINCONF(C16, SCU618, 4),
+ /* GPIOI3 */
+ ASPEED_PULL_DOWN_PINCONF(D16, SCU618, 3),
+ /* GPIOI2 */
+ ASPEED_PULL_DOWN_PINCONF(E17, SCU618, 2),
+ /* GPIOI1 */
+ ASPEED_PULL_DOWN_PINCONF(A16, SCU618, 1),
+ /* GPIOI0 */
+ ASPEED_PULL_DOWN_PINCONF(D17, SCU618, 0),
+
+ /* GPIOP7 */
+ ASPEED_PULL_DOWN_PINCONF(Y23, SCU61C, 31),
+ /* GPIOP6 */
+ ASPEED_PULL_DOWN_PINCONF(AB24, SCU61C, 30),
+ /* GPIOP5 */
+ ASPEED_PULL_DOWN_PINCONF(AB23, SCU61C, 29),
+ /* GPIOP4 */
+ ASPEED_PULL_DOWN_PINCONF(W23, SCU61C, 28),
+ /* GPIOP3 */
+ ASPEED_PULL_DOWN_PINCONF(AA24, SCU61C, 27),
+ /* GPIOP2 */
+ ASPEED_PULL_DOWN_PINCONF(AA23, SCU61C, 26),
+ /* GPIOP1 */
+ ASPEED_PULL_DOWN_PINCONF(W24, SCU61C, 25),
+ /* GPIOP0 */
+ ASPEED_PULL_DOWN_PINCONF(AB22, SCU61C, 24),
+
+ /* GPIOO7 */
+ ASPEED_PULL_DOWN_PINCONF(AC23, SCU61C, 23),
+ /* GPIOO6 */
+ ASPEED_PULL_DOWN_PINCONF(AC24, SCU61C, 22),
+ /* GPIOO5 */
+ ASPEED_PULL_DOWN_PINCONF(AC22, SCU61C, 21),
+ /* GPIOO4 */
+ ASPEED_PULL_DOWN_PINCONF(AD25, SCU61C, 20),
+ /* GPIOO3 */
+ ASPEED_PULL_DOWN_PINCONF(AD24, SCU61C, 19),
+ /* GPIOO2 */
+ ASPEED_PULL_DOWN_PINCONF(AD23, SCU61C, 18),
+ /* GPIOO1 */
+ ASPEED_PULL_DOWN_PINCONF(AD22, SCU61C, 17),
+ /* GPIOO0 */
+ ASPEED_PULL_DOWN_PINCONF(AD26, SCU61C, 16),
+
+ /* GPION7 */
+ ASPEED_PULL_DOWN_PINCONF(M26, SCU61C, 15),
+ /* GPION6 */
+ ASPEED_PULL_DOWN_PINCONF(N26, SCU61C, 14),
+ /* GPION5 */
+ ASPEED_PULL_DOWN_PINCONF(M23, SCU61C, 13),
+ /* GPION4 */
+ ASPEED_PULL_DOWN_PINCONF(P26, SCU61C, 12),
+ /* GPION3 */
+ ASPEED_PULL_DOWN_PINCONF(N24, SCU61C, 11),
+ /* GPION2 */
+ ASPEED_PULL_DOWN_PINCONF(N25, SCU61C, 10),
+ /* GPION1 */
+ ASPEED_PULL_DOWN_PINCONF(N23, SCU61C, 9),
+ /* GPION0 */
+ ASPEED_PULL_DOWN_PINCONF(P25, SCU61C, 8),
+
+ /* GPIOM7 */
+ ASPEED_PULL_DOWN_PINCONF(D13, SCU61C, 7),
+ /* GPIOM6 */
+ ASPEED_PULL_DOWN_PINCONF(C13, SCU61C, 6),
+ /* GPIOM5 */
+ ASPEED_PULL_DOWN_PINCONF(C12, SCU61C, 5),
+ /* GPIOM4 */
+ ASPEED_PULL_DOWN_PINCONF(B12, SCU61C, 4),
+ /* GPIOM3 */
+ ASPEED_PULL_DOWN_PINCONF(E14, SCU61C, 3),
+ /* GPIOM2 */
+ ASPEED_PULL_DOWN_PINCONF(A12, SCU61C, 2),
+ /* GPIOM1 */
+ ASPEED_PULL_DOWN_PINCONF(B13, SCU61C, 1),
+ /* GPIOM0 */
+ ASPEED_PULL_DOWN_PINCONF(D14, SCU61C, 0),
+
+ /* GPIOS7 */
+ ASPEED_PULL_DOWN_PINCONF(T24, SCU620, 23),
+ /* GPIOS6 */
+ ASPEED_PULL_DOWN_PINCONF(P23, SCU620, 22),
+ /* GPIOS5 */
+ ASPEED_PULL_DOWN_PINCONF(P24, SCU620, 21),
+ /* GPIOS4 */
+ ASPEED_PULL_DOWN_PINCONF(R26, SCU620, 20),
+ /* GPIOS3*/
+ ASPEED_PULL_DOWN_PINCONF(R24, SCU620, 19),
+ /* GPIOS2 */
+ ASPEED_PULL_DOWN_PINCONF(T26, SCU620, 18),
+ /* GPIOS1 */
+ ASPEED_PULL_DOWN_PINCONF(T25, SCU620, 17),
+ /* GPIOS0 */
+ ASPEED_PULL_DOWN_PINCONF(R23, SCU620, 16),
+
+ /* GPIOR7 */
+ ASPEED_PULL_DOWN_PINCONF(U26, SCU620, 15),
+ /* GPIOR6 */
+ ASPEED_PULL_DOWN_PINCONF(W26, SCU620, 14),
+ /* GPIOR5 */
+ ASPEED_PULL_DOWN_PINCONF(T23, SCU620, 13),
+ /* GPIOR4 */
+ ASPEED_PULL_DOWN_PINCONF(U25, SCU620, 12),
+ /* GPIOR3*/
+ ASPEED_PULL_DOWN_PINCONF(V26, SCU620, 11),
+ /* GPIOR2 */
+ ASPEED_PULL_DOWN_PINCONF(V24, SCU620, 10),
+ /* GPIOR1 */
+ ASPEED_PULL_DOWN_PINCONF(U24, SCU620, 9),
+ /* GPIOR0 */
+ ASPEED_PULL_DOWN_PINCONF(V25, SCU620, 8),
+
+ /* GPIOX7 */
+ ASPEED_PULL_DOWN_PINCONF(AB10, SCU634, 31),
+ /* GPIOX6 */
+ ASPEED_PULL_DOWN_PINCONF(AF9, SCU634, 30),
+ /* GPIOX5 */
+ ASPEED_PULL_DOWN_PINCONF(AD9, SCU634, 29),
+ /* GPIOX4 */
+ ASPEED_PULL_DOWN_PINCONF(AB9, SCU634, 28),
+ /* GPIOX3*/
+ ASPEED_PULL_DOWN_PINCONF(AF8, SCU634, 27),
+ /* GPIOX2 */
+ ASPEED_PULL_DOWN_PINCONF(AC9, SCU634, 26),
+ /* GPIOX1 */
+ ASPEED_PULL_DOWN_PINCONF(AA9, SCU634, 25),
+ /* GPIOX0 */
+ ASPEED_PULL_DOWN_PINCONF(AE8, SCU634, 24),
+
+ /* GPIOV7 */
+ ASPEED_PULL_DOWN_PINCONF(AF15, SCU634, 15),
+ /* GPIOV6 */
+ ASPEED_PULL_DOWN_PINCONF(AD15, SCU634, 14),
+ /* GPIOV5 */
+ ASPEED_PULL_DOWN_PINCONF(AE14, SCU634, 13),
+ /* GPIOV4 */
+ ASPEED_PULL_DOWN_PINCONF(AE15, SCU634, 12),
+ /* GPIOV3*/
+ ASPEED_PULL_DOWN_PINCONF(AC15, SCU634, 11),
+ /* GPIOV2 */
+ ASPEED_PULL_DOWN_PINCONF(AD14, SCU634, 10),
+ /* GPIOV1 */
+ ASPEED_PULL_DOWN_PINCONF(AF14, SCU634, 9),
+ /* GPIOV0 */
+ ASPEED_PULL_DOWN_PINCONF(AB15, SCU634, 8),
+
+ /* GPIOZ7 */
+ ASPEED_PULL_DOWN_PINCONF(AF10, SCU638, 15),
+ /* GPIOZ6 */
+ ASPEED_PULL_DOWN_PINCONF(AD11, SCU638, 14),
+ /* GPIOZ5 */
+ ASPEED_PULL_DOWN_PINCONF(AA11, SCU638, 13),
+ /* GPIOZ4 */
+ ASPEED_PULL_DOWN_PINCONF(AC11, SCU638, 12),
+ /* GPIOZ3*/
+ ASPEED_PULL_DOWN_PINCONF(AB11, SCU638, 11),
+
+ /* GPIOZ1 */
+ ASPEED_PULL_DOWN_PINCONF(AD10, SCU638, 9),
+ /* GPIOZ0 */
+ ASPEED_PULL_DOWN_PINCONF(AC10, SCU638, 8),
+
+ /* GPIOY6 */
+ ASPEED_PULL_DOWN_PINCONF(AC12, SCU638, 6),
+ /* GPIOY5 */
+ ASPEED_PULL_DOWN_PINCONF(AF12, SCU638, 5),
+ /* GPIOY4 */
+ ASPEED_PULL_DOWN_PINCONF(AE12, SCU638, 4),
+ /* GPIOY3 */
+ ASPEED_PULL_DOWN_PINCONF(AA12, SCU638, 3),
+ /* GPIOY2 */
+ ASPEED_PULL_DOWN_PINCONF(AE11, SCU638, 2),
+ /* GPIOY1 */
+ ASPEED_PULL_DOWN_PINCONF(AD12, SCU638, 1),
+ /* GPIOY0 */
+ ASPEED_PULL_DOWN_PINCONF(AF11, SCU638, 0),
+
+ /* LAD3 */
+ { PIN_CONFIG_DRIVE_STRENGTH, { AC7, AC7 }, SCU454, GENMASK(31, 30)},
+ /* LAD2 */
+ { PIN_CONFIG_DRIVE_STRENGTH, { AC8, AC8 }, SCU454, GENMASK(29, 28)},
+ /* LAD1 */
+ { PIN_CONFIG_DRIVE_STRENGTH, { AB8, AB8 }, SCU454, GENMASK(27, 26)},
+ /* LAD0 */
+ { PIN_CONFIG_DRIVE_STRENGTH, { AB7, AB7 }, SCU454, GENMASK(25, 24)},
+
+ /* MAC3 */
+ { PIN_CONFIG_POWER_SOURCE, { H24, E26 }, SCU458, BIT_MASK(4)},
+ { PIN_CONFIG_DRIVE_STRENGTH, { H24, E26 }, SCU458, GENMASK(1, 0)},
+ /* MAC4 */
+ { PIN_CONFIG_POWER_SOURCE, { F24, B24 }, SCU458, BIT_MASK(5)},
+ { PIN_CONFIG_DRIVE_STRENGTH, { F24, B24 }, SCU458, GENMASK(3, 2)},
+};
+
/**
* Configure a pin's signal by applying an expression's descriptor state for
* all descriptors in the expression.
@@ -2305,6 +2655,20 @@ static int aspeed_g6_sig_expr_set(struct aspeed_pinmux_data *ctx,
return 0;
}
+static const struct aspeed_pin_config_map aspeed_g6_pin_config_map[] = {
+ { PIN_CONFIG_BIAS_PULL_DOWN, 0, 1, BIT_MASK(0)},
+ { PIN_CONFIG_BIAS_PULL_DOWN, -1, 0, BIT_MASK(0)},
+ { PIN_CONFIG_BIAS_PULL_UP, 0, 1, BIT_MASK(0)},
+ { PIN_CONFIG_BIAS_PULL_UP, -1, 0, BIT_MASK(0)},
+ { PIN_CONFIG_BIAS_DISABLE, -1, 1, BIT_MASK(0)},
+ { PIN_CONFIG_DRIVE_STRENGTH, 4, 0, GENMASK(1, 0)},
+ { PIN_CONFIG_DRIVE_STRENGTH, 8, 1, GENMASK(1, 0)},
+ { PIN_CONFIG_DRIVE_STRENGTH, 12, 2, GENMASK(1, 0)},
+ { PIN_CONFIG_DRIVE_STRENGTH, 16, 3, GENMASK(1, 0)},
+ { PIN_CONFIG_POWER_SOURCE, 3300, 0, BIT_MASK(0)},
+ { PIN_CONFIG_POWER_SOURCE, 1800, 1, BIT_MASK(0)},
+};
+
static const struct aspeed_pinmux_ops aspeed_g5_ops = {
.set = aspeed_g6_sig_expr_set,
};
@@ -2319,6 +2683,10 @@ static struct aspeed_pinctrl_data aspeed_g6_pinctrl_data = {
.functions = aspeed_g6_functions,
.nfunctions = ARRAY_SIZE(aspeed_g6_functions),
},
+ .configs = aspeed_g6_configs,
+ .nconfigs = ARRAY_SIZE(aspeed_g6_configs),
+ .confmaps = aspeed_g6_pin_config_map,
+ .nconfmaps = ARRAY_SIZE(aspeed_g6_pin_config_map),
};
static const struct pinmux_ops aspeed_g6_pinmux_ops = {
@@ -2339,12 +2707,21 @@ static const struct pinctrl_ops aspeed_g6_pinctrl_ops = {
.dt_free_map = pinctrl_utils_free_map,
};
+static const struct pinconf_ops aspeed_g6_conf_ops = {
+ .is_generic = true,
+ .pin_config_get = aspeed_pin_config_get,
+ .pin_config_set = aspeed_pin_config_set,
+ .pin_config_group_get = aspeed_pin_config_group_get,
+ .pin_config_group_set = aspeed_pin_config_group_set,
+};
+
static struct pinctrl_desc aspeed_g6_pinctrl_desc = {
.name = "aspeed-g6-pinctrl",
.pins = aspeed_g6_pins,
.npins = ARRAY_SIZE(aspeed_g6_pins),
.pctlops = &aspeed_g6_pinctrl_ops,
.pmxops = &aspeed_g6_pinmux_ops,
+ .confops = &aspeed_g6_conf_ops,
};
static int aspeed_g6_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index 54933665b5f8..b625a657171e 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -411,49 +411,21 @@ static inline const struct aspeed_pin_config *find_pinconf_config(
return NULL;
}
-/*
- * Aspeed pin configuration description.
- *
- * @param: pinconf configuration parameter
- * @arg: The supported argument for @param, or -1 if any value is supported
- * @val: The register value to write to configure @arg for @param
- *
- * The map is to be used in conjunction with the configuration array supplied
- * by the driver implementation.
- */
-struct aspeed_pin_config_map {
- enum pin_config_param param;
- s32 arg;
- u32 val;
-};
-
enum aspeed_pin_config_map_type { MAP_TYPE_ARG, MAP_TYPE_VAL };
-/* Aspeed consistently both:
- *
- * 1. Defines "disable bits" for internal pull-downs
- * 2. Uses 8mA or 16mA drive strengths
- */
-static const struct aspeed_pin_config_map pin_config_map[] = {
- { PIN_CONFIG_BIAS_PULL_DOWN, 0, 1 },
- { PIN_CONFIG_BIAS_PULL_DOWN, -1, 0 },
- { PIN_CONFIG_BIAS_DISABLE, -1, 1 },
- { PIN_CONFIG_DRIVE_STRENGTH, 8, 0 },
- { PIN_CONFIG_DRIVE_STRENGTH, 16, 1 },
-};
-
static const struct aspeed_pin_config_map *find_pinconf_map(
+ const struct aspeed_pinctrl_data *pdata,
enum pin_config_param param,
enum aspeed_pin_config_map_type type,
s64 value)
{
int i;
- for (i = 0; i < ARRAY_SIZE(pin_config_map); i++) {
+ for (i = 0; i < pdata->nconfmaps; i++) {
const struct aspeed_pin_config_map *elem;
bool match;
- elem = &pin_config_map[i];
+ elem = &pdata->confmaps[i];
switch (type) {
case MAP_TYPE_ARG:
@@ -491,8 +463,8 @@ int aspeed_pin_config_get(struct pinctrl_dev *pctldev, unsigned int offset,
if (rc < 0)
return rc;
- pmap = find_pinconf_map(param, MAP_TYPE_VAL,
- (val & BIT(pconf->bit)) >> pconf->bit);
+ pmap = find_pinconf_map(pdata, param, MAP_TYPE_VAL,
+ (val & pconf->mask) >> __ffs(pconf->mask));
if (!pmap)
return -EINVAL;
@@ -535,22 +507,22 @@ int aspeed_pin_config_set(struct pinctrl_dev *pctldev, unsigned int offset,
if (!pconf)
return -ENOTSUPP;
- pmap = find_pinconf_map(param, MAP_TYPE_ARG, arg);
+ pmap = find_pinconf_map(pdata, param, MAP_TYPE_ARG, arg);
if (WARN_ON(!pmap))
return -EINVAL;
- val = pmap->val << pconf->bit;
+ val = pmap->val << __ffs(pconf->mask);
rc = regmap_update_bits(pdata->scu, pconf->reg,
- BIT(pconf->bit), val);
+ pmap->mask, val);
if (rc < 0)
return rc;
- pr_debug("%s: Set SCU%02X[%d]=%d for param %d(=%d) on pin %d\n",
- __func__, pconf->reg, pconf->bit, pmap->val,
- param, arg, offset);
+ pr_debug("%s: Set SCU%02X[%lu]=%d for param %d(=%d) on pin %d\n",
+ __func__, pconf->reg, __ffs(pconf->mask),
+ pmap->val, param, arg, offset);
}
return 0;
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.h b/drivers/pinctrl/aspeed/pinctrl-aspeed.h
index a5d83986f32e..4dcde3bc29c8 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.h
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.h
@@ -24,8 +24,7 @@ struct aspeed_pin_config {
enum pin_config_param param;
unsigned int pins[2];
unsigned int reg;
- u8 bit;
- u8 value;
+ u32 mask;
};
#define ASPEED_PINCTRL_PIN(name_) \
@@ -35,6 +34,38 @@ struct aspeed_pin_config {
.drv_data = (void *) &(PIN_SYM(name_)) \
}
+#define ASPEED_SB_PINCONF(param_, pin0_, pin1_, reg_, bit_) { \
+ .param = param_, \
+ .pins = {pin0_, pin1_}, \
+ .reg = reg_, \
+ .mask = BIT_MASK(bit_) \
+}
+
+#define ASPEED_PULL_DOWN_PINCONF(pin_, reg_, bit_) \
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, pin_, pin_, reg_, bit_), \
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, pin_, pin_, reg_, bit_)
+
+#define ASPEED_PULL_UP_PINCONF(pin_, reg_, bit_) \
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_UP, pin_, pin_, reg_, bit_), \
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, pin_, pin_, reg_, bit_)
+/*
+ * Aspeed pin configuration description.
+ *
+ * @param: pinconf configuration parameter
+ * @arg: The supported argument for @param, or -1 if any value is supported
+ * @val: The register value to write to configure @arg for @param
+ * @mask: The bitfield mask for @val
+ *
+ * The map is to be used in conjunction with the configuration array supplied
+ * by the driver implementation.
+ */
+struct aspeed_pin_config_map {
+ enum pin_config_param param;
+ s32 arg;
+ u32 val;
+ u32 mask;
+};
+
struct aspeed_pinctrl_data {
struct regmap *scu;
@@ -45,6 +76,9 @@ struct aspeed_pinctrl_data {
const unsigned int nconfigs;
struct aspeed_pinmux_data pinmux;
+
+ const struct aspeed_pin_config_map *confmaps;
+ const unsigned int nconfmaps;
};
/* Aspeed pinctrl helpers */
diff --git a/drivers/pinctrl/aspeed/pinmux-aspeed.h b/drivers/pinctrl/aspeed/pinmux-aspeed.h
index 140c5ce9fbc1..f86739e800c3 100644
--- a/drivers/pinctrl/aspeed/pinmux-aspeed.h
+++ b/drivers/pinctrl/aspeed/pinmux-aspeed.h
@@ -737,6 +737,7 @@ struct aspeed_pin_desc {
#define FUNC_DECL_(func, ...) \
static const char *FUNC_SYM(func)[] = { __VA_ARGS__ }
+#define FUNC_DECL_1(func, group) FUNC_DECL_(func, #group)
#define FUNC_DECL_2(func, one, two) FUNC_DECL_(func, #one, #two)
#define FUNC_DECL_3(func, one, two, three) FUNC_DECL_(func, #one, #two, #three)
diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
index 831a9318c384..25166217c3e0 100644
--- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
@@ -286,6 +286,12 @@ static int iproc_gpio_irq_set_type(struct irq_data *d, unsigned int type)
iproc_set_bit(chip, IPROC_GPIO_INT_DE_OFFSET, gpio, dual_edge);
iproc_set_bit(chip, IPROC_GPIO_INT_EDGE_OFFSET, gpio,
rising_or_high);
+
+ if (type & IRQ_TYPE_EDGE_BOTH)
+ irq_set_handler_locked(d, handle_edge_irq);
+ else
+ irq_set_handler_locked(d, handle_level_irq);
+
raw_spin_unlock_irqrestore(&chip->lock, flags);
dev_dbg(chip->dev,
@@ -843,7 +849,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
"gpio-ranges");
/* optional GPIO interrupt support */
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
if (irq > 0) {
struct irq_chip *irqc;
struct gpio_irq_chip *girq;
@@ -868,7 +874,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
return -ENOMEM;
girq->parents[0] = irq;
girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_simple_irq;
+ girq->handler = handle_bad_irq;
}
ret = gpiochip_add_data(gc, chip);
diff --git a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
index 32f268f173d1..57044ab376d3 100644
--- a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
@@ -1049,7 +1049,7 @@ static int ns2_pinmux_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res)
return -EINVAL;
- pinctrl->base1 = devm_ioremap_nocache(&pdev->dev, res->start,
+ pinctrl->base1 = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!pinctrl->base1) {
dev_err(&pdev->dev, "unable to map I/O space\n");
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
index 3756fc9d5826..f1d60a708815 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
@@ -578,7 +578,7 @@ static int nsp_pinmux_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res)
return -EINVAL;
- pinctrl->base1 = devm_ioremap_nocache(&pdev->dev, res->start,
+ pinctrl->base1 = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!pinctrl->base1) {
dev_err(&pdev->dev, "unable to map I/O space\n");
diff --git a/drivers/pinctrl/cirrus/Kconfig b/drivers/pinctrl/cirrus/Kconfig
index f1806fd781a0..530426a74f75 100644
--- a/drivers/pinctrl/cirrus/Kconfig
+++ b/drivers/pinctrl/cirrus/Kconfig
@@ -2,6 +2,7 @@
config PINCTRL_LOCHNAGAR
tristate "Cirrus Logic Lochnagar pinctrl driver"
depends on MFD_LOCHNAGAR
+ select GPIOLIB
select PINMUX
select PINCONF
select GENERIC_PINCONF
diff --git a/drivers/pinctrl/cirrus/pinctrl-madera-core.c b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
index e2f72dcce4c9..7b6409ef553c 100644
--- a/drivers/pinctrl/cirrus/pinctrl-madera-core.c
+++ b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
@@ -560,7 +560,6 @@ static void __maybe_unused madera_pin_dbg_show(struct pinctrl_dev *pctldev,
seq_puts(s, " SCHMITT");
}
-
static const struct pinctrl_ops madera_pin_group_ops = {
.get_groups_count = madera_get_groups_count,
.get_group_name = madera_get_group_name,
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 2bbd8ee93507..446d84fe0e31 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1376,8 +1376,15 @@ void devm_pinctrl_put(struct pinctrl *p)
}
EXPORT_SYMBOL_GPL(devm_pinctrl_put);
-int pinctrl_register_map(const struct pinctrl_map *maps, unsigned num_maps,
- bool dup)
+/**
+ * pinctrl_register_mappings() - register a set of pin controller mappings
+ * @maps: the pincontrol mappings table to register. Note the pinctrl-core
+ * keeps a reference to the passed in maps, so they should _not_ be
+ * marked with __initdata.
+ * @num_maps: the number of maps in the mapping table
+ */
+int pinctrl_register_mappings(const struct pinctrl_map *maps,
+ unsigned num_maps)
{
int i, ret;
struct pinctrl_maps *maps_node;
@@ -1430,17 +1437,8 @@ int pinctrl_register_map(const struct pinctrl_map *maps, unsigned num_maps,
if (!maps_node)
return -ENOMEM;
+ maps_node->maps = maps;
maps_node->num_maps = num_maps;
- if (dup) {
- maps_node->maps = kmemdup(maps, sizeof(*maps) * num_maps,
- GFP_KERNEL);
- if (!maps_node->maps) {
- kfree(maps_node);
- return -ENOMEM;
- }
- } else {
- maps_node->maps = maps;
- }
mutex_lock(&pinctrl_maps_mutex);
list_add_tail(&maps_node->node, &pinctrl_maps);
@@ -1448,22 +1446,14 @@ int pinctrl_register_map(const struct pinctrl_map *maps, unsigned num_maps,
return 0;
}
+EXPORT_SYMBOL_GPL(pinctrl_register_mappings);
/**
- * pinctrl_register_mappings() - register a set of pin controller mappings
- * @maps: the pincontrol mappings table to register. This should probably be
- * marked with __initdata so it can be discarded after boot. This
- * function will perform a shallow copy for the mapping entries.
- * @num_maps: the number of maps in the mapping table
+ * pinctrl_unregister_mappings() - unregister a set of pin controller mappings
+ * @maps: the pincontrol mappings table passed to pinctrl_register_mappings()
+ * when registering the mappings.
*/
-int pinctrl_register_mappings(const struct pinctrl_map *maps,
- unsigned num_maps)
-{
- return pinctrl_register_map(maps, num_maps, true);
-}
-EXPORT_SYMBOL_GPL(pinctrl_register_mappings);
-
-void pinctrl_unregister_map(const struct pinctrl_map *map)
+void pinctrl_unregister_mappings(const struct pinctrl_map *map)
{
struct pinctrl_maps *maps_node;
@@ -1478,6 +1468,7 @@ void pinctrl_unregister_map(const struct pinctrl_map *map)
}
mutex_unlock(&pinctrl_maps_mutex);
}
+EXPORT_SYMBOL_GPL(pinctrl_unregister_mappings);
/**
* pinctrl_force_sleep() - turn a given controller device into sleep state
@@ -1535,15 +1526,8 @@ int pinctrl_init_done(struct device *dev)
return ret;
}
-#ifdef CONFIG_PM
-
-/**
- * pinctrl_pm_select_state() - select pinctrl state for PM
- * @dev: device to select default state for
- * @state: state to set
- */
-static int pinctrl_pm_select_state(struct device *dev,
- struct pinctrl_state *state)
+static int pinctrl_select_bound_state(struct device *dev,
+ struct pinctrl_state *state)
{
struct dev_pin_info *pins = dev->pins;
int ret;
@@ -1558,15 +1542,27 @@ static int pinctrl_pm_select_state(struct device *dev,
}
/**
- * pinctrl_pm_select_default_state() - select default pinctrl state for PM
+ * pinctrl_select_default_state() - select default pinctrl state
* @dev: device to select default state for
*/
-int pinctrl_pm_select_default_state(struct device *dev)
+int pinctrl_select_default_state(struct device *dev)
{
if (!dev->pins)
return 0;
- return pinctrl_pm_select_state(dev, dev->pins->default_state);
+ return pinctrl_select_bound_state(dev, dev->pins->default_state);
+}
+EXPORT_SYMBOL_GPL(pinctrl_select_default_state);
+
+#ifdef CONFIG_PM
+
+/**
+ * pinctrl_pm_select_default_state() - select default pinctrl state for PM
+ * @dev: device to select default state for
+ */
+int pinctrl_pm_select_default_state(struct device *dev)
+{
+ return pinctrl_select_default_state(dev);
}
EXPORT_SYMBOL_GPL(pinctrl_pm_select_default_state);
@@ -1579,7 +1575,7 @@ int pinctrl_pm_select_sleep_state(struct device *dev)
if (!dev->pins)
return 0;
- return pinctrl_pm_select_state(dev, dev->pins->sleep_state);
+ return pinctrl_select_bound_state(dev, dev->pins->sleep_state);
}
EXPORT_SYMBOL_GPL(pinctrl_pm_select_sleep_state);
@@ -1592,7 +1588,7 @@ int pinctrl_pm_select_idle_state(struct device *dev)
if (!dev->pins)
return 0;
- return pinctrl_pm_select_state(dev, dev->pins->idle_state);
+ return pinctrl_select_bound_state(dev, dev->pins->idle_state);
}
EXPORT_SYMBOL_GPL(pinctrl_pm_select_idle_state);
#endif
diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
index 7f34167a0405..840103c40c14 100644
--- a/drivers/pinctrl/core.h
+++ b/drivers/pinctrl/core.h
@@ -236,10 +236,6 @@ extern struct pinctrl_gpio_range *
pinctrl_find_gpio_range_from_pin_nolock(struct pinctrl_dev *pctldev,
unsigned int pin);
-int pinctrl_register_map(const struct pinctrl_map *maps, unsigned num_maps,
- bool dup);
-void pinctrl_unregister_map(const struct pinctrl_map *map);
-
extern int pinctrl_force_sleep(struct pinctrl_dev *pctldev);
extern int pinctrl_force_default(struct pinctrl_dev *pctldev);
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index 674920daac26..9357f7c46cf3 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -51,7 +51,7 @@ void pinctrl_dt_free_maps(struct pinctrl *p)
struct pinctrl_dt_map *dt_map, *n1;
list_for_each_entry_safe(dt_map, n1, &p->dt_maps, node) {
- pinctrl_unregister_map(dt_map->map);
+ pinctrl_unregister_mappings(dt_map->map);
list_del(&dt_map->node);
dt_free_map(dt_map->pctldev, dt_map->map,
dt_map->num_maps);
@@ -92,7 +92,7 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
dt_map->num_maps = num_maps;
list_add_tail(&dt_map->node, &p->dt_maps);
- return pinctrl_register_map(map, num_maps, false);
+ return pinctrl_register_mappings(map, num_maps);
err_free_map:
dt_free_map(pctldev, map, num_maps);
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index 3ea9ce3e0cd9..de775a85a51e 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -137,6 +137,13 @@ config PINCTRL_IMX8MN
help
Say Y here to enable the imx8mn pinctrl driver
+config PINCTRL_IMX8MP
+ bool "IMX8MP pinctrl driver"
+ depends on ARCH_MXC && ARM64
+ select PINCTRL_IMX
+ help
+ Say Y here to enable the imx8mp pinctrl driver
+
config PINCTRL_IMX8MQ
bool "IMX8MQ pinctrl driver"
depends on ARCH_MXC && ARM64
diff --git a/drivers/pinctrl/freescale/Makefile b/drivers/pinctrl/freescale/Makefile
index 78e9140c13e3..0ebd3af21e4d 100644
--- a/drivers/pinctrl/freescale/Makefile
+++ b/drivers/pinctrl/freescale/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_PINCTRL_IMX7D) += pinctrl-imx7d.o
obj-$(CONFIG_PINCTRL_IMX7ULP) += pinctrl-imx7ulp.o
obj-$(CONFIG_PINCTRL_IMX8MM) += pinctrl-imx8mm.o
obj-$(CONFIG_PINCTRL_IMX8MN) += pinctrl-imx8mn.o
+obj-$(CONFIG_PINCTRL_IMX8MP) += pinctrl-imx8mp.o
obj-$(CONFIG_PINCTRL_IMX8MQ) += pinctrl-imx8mq.o
obj-$(CONFIG_PINCTRL_IMX8QM) += pinctrl-imx8qm.o
obj-$(CONFIG_PINCTRL_IMX8QXP) += pinctrl-imx8qxp.o
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
index 7e29e3fecdb2..c00d0022d311 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
@@ -611,7 +611,7 @@ int imx1_pinctrl_core_probe(struct platform_device *pdev,
if (!res)
return -ENOENT;
- ipctl->base = devm_ioremap_nocache(&pdev->dev, res->start,
+ ipctl->base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!ipctl->base)
return -ENOMEM;
diff --git a/drivers/pinctrl/freescale/pinctrl-imx8mp.c b/drivers/pinctrl/freescale/pinctrl-imx8mp.c
new file mode 100644
index 000000000000..e3f644c2ec13
--- /dev/null
+++ b/drivers/pinctrl/freescale/pinctrl-imx8mp.c
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-imx.h"
+
+enum imx8mp_pads {
+ MX8MP_IOMUXC_RESERVE0 = 0,
+ MX8MP_IOMUXC_RESERVE1 = 1,
+ MX8MP_IOMUXC_RESERVE2 = 2,
+ MX8MP_IOMUXC_RESERVE3 = 3,
+ MX8MP_IOMUXC_RESERVE4 = 4,
+ MX8MP_IOMUXC_GPIO1_IO00 = 5,
+ MX8MP_IOMUXC_GPIO1_IO01 = 6,
+ MX8MP_IOMUXC_GPIO1_IO02 = 7,
+ MX8MP_IOMUXC_GPIO1_IO03 = 8,
+ MX8MP_IOMUXC_GPIO1_IO04 = 9,
+ MX8MP_IOMUXC_GPIO1_IO05 = 10,
+ MX8MP_IOMUXC_GPIO1_IO06 = 11,
+ MX8MP_IOMUXC_GPIO1_IO07 = 12,
+ MX8MP_IOMUXC_GPIO1_IO08 = 13,
+ MX8MP_IOMUXC_GPIO1_IO09 = 14,
+ MX8MP_IOMUXC_GPIO1_IO10 = 15,
+ MX8MP_IOMUXC_GPIO1_IO11 = 16,
+ MX8MP_IOMUXC_GPIO1_IO12 = 17,
+ MX8MP_IOMUXC_GPIO1_IO13 = 18,
+ MX8MP_IOMUXC_GPIO1_IO14 = 19,
+ MX8MP_IOMUXC_GPIO1_IO15 = 20,
+ MX8MP_IOMUXC_ENET_MDC = 21,
+ MX8MP_IOMUXC_ENET_MDIO = 22,
+ MX8MP_IOMUXC_ENET_TD3 = 23,
+ MX8MP_IOMUXC_ENET_TD2 = 24,
+ MX8MP_IOMUXC_ENET_TD1 = 25,
+ MX8MP_IOMUXC_ENET_TD0 = 26,
+ MX8MP_IOMUXC_ENET_TX_CTL = 27,
+ MX8MP_IOMUXC_ENET_TXC = 28,
+ MX8MP_IOMUXC_ENET_RX_CTL = 29,
+ MX8MP_IOMUXC_ENET_RXC = 30,
+ MX8MP_IOMUXC_ENET_RD0 = 31,
+ MX8MP_IOMUXC_ENET_RD1 = 32,
+ MX8MP_IOMUXC_ENET_RD2 = 33,
+ MX8MP_IOMUXC_ENET_RD3 = 34,
+ MX8MP_IOMUXC_SD1_CLK = 35,
+ MX8MP_IOMUXC_SD1_CMD = 36,
+ MX8MP_IOMUXC_SD1_DATA0 = 37,
+ MX8MP_IOMUXC_SD1_DATA1 = 38,
+ MX8MP_IOMUXC_SD1_DATA2 = 39,
+ MX8MP_IOMUXC_SD1_DATA3 = 40,
+ MX8MP_IOMUXC_SD1_DATA4 = 41,
+ MX8MP_IOMUXC_SD1_DATA5 = 42,
+ MX8MP_IOMUXC_SD1_DATA6 = 43,
+ MX8MP_IOMUXC_SD1_DATA7 = 44,
+ MX8MP_IOMUXC_SD1_RESET_B = 45,
+ MX8MP_IOMUXC_SD1_STROBE = 46,
+ MX8MP_IOMUXC_SD2_CD_B = 47,
+ MX8MP_IOMUXC_SD2_CLK = 48,
+ MX8MP_IOMUXC_SD2_CMD = 49,
+ MX8MP_IOMUXC_SD2_DATA0 = 50,
+ MX8MP_IOMUXC_SD2_DATA1 = 51,
+ MX8MP_IOMUXC_SD2_DATA2 = 52,
+ MX8MP_IOMUXC_SD2_DATA3 = 53,
+ MX8MP_IOMUXC_SD2_RESET_B = 54,
+ MX8MP_IOMUXC_SD2_WP = 55,
+ MX8MP_IOMUXC_NAND_ALE = 56,
+ MX8MP_IOMUXC_NAND_CE0_B = 57,
+ MX8MP_IOMUXC_NAND_CE1_B = 58,
+ MX8MP_IOMUXC_NAND_CE2_B = 59,
+ MX8MP_IOMUXC_NAND_CE3_B = 60,
+ MX8MP_IOMUXC_NAND_CLE = 61,
+ MX8MP_IOMUXC_NAND_DATA00 = 62,
+ MX8MP_IOMUXC_NAND_DATA01 = 63,
+ MX8MP_IOMUXC_NAND_DATA02 = 64,
+ MX8MP_IOMUXC_NAND_DATA03 = 65,
+ MX8MP_IOMUXC_NAND_DATA04 = 66,
+ MX8MP_IOMUXC_NAND_DATA05 = 67,
+ MX8MP_IOMUXC_NAND_DATA06 = 68,
+ MX8MP_IOMUXC_NAND_DATA07 = 69,
+ MX8MP_IOMUXC_NAND_DQS = 70,
+ MX8MP_IOMUXC_NAND_RE_B = 71,
+ MX8MP_IOMUXC_NAND_READY_B = 72,
+ MX8MP_IOMUXC_NAND_WE_B = 73,
+ MX8MP_IOMUXC_NAND_WP_B = 74,
+ MX8MP_IOMUXC_SAI5_RXFS = 75,
+ MX8MP_IOMUXC_SAI5_RXC = 76,
+ MX8MP_IOMUXC_SAI5_RXD0 = 77,
+ MX8MP_IOMUXC_SAI5_RXD1 = 78,
+ MX8MP_IOMUXC_SAI5_RXD2 = 79,
+ MX8MP_IOMUXC_SAI5_RXD3 = 80,
+ MX8MP_IOMUXC_SAI5_MCLK = 81,
+ MX8MP_IOMUXC_SAI1_RXFS = 82,
+ MX8MP_IOMUXC_SAI1_RXC = 83,
+ MX8MP_IOMUXC_SAI1_RXD0 = 84,
+ MX8MP_IOMUXC_SAI1_RXD1 = 85,
+ MX8MP_IOMUXC_SAI1_RXD2 = 86,
+ MX8MP_IOMUXC_SAI1_RXD3 = 87,
+ MX8MP_IOMUXC_SAI1_RXD4 = 88,
+ MX8MP_IOMUXC_SAI1_RXD5 = 89,
+ MX8MP_IOMUXC_SAI1_RXD6 = 90,
+ MX8MP_IOMUXC_SAI1_RXD7 = 91,
+ MX8MP_IOMUXC_SAI1_TXFS = 92,
+ MX8MP_IOMUXC_SAI1_TXC = 93,
+ MX8MP_IOMUXC_SAI1_TXD0 = 94,
+ MX8MP_IOMUXC_SAI1_TXD1 = 95,
+ MX8MP_IOMUXC_SAI1_TXD2 = 96,
+ MX8MP_IOMUXC_SAI1_TXD3 = 97,
+ MX8MP_IOMUXC_SAI1_TXD4 = 98,
+ MX8MP_IOMUXC_SAI1_TXD5 = 99,
+ MX8MP_IOMUXC_SAI1_TXD6 = 100,
+ MX8MP_IOMUXC_SAI1_TXD7 = 101,
+ MX8MP_IOMUXC_SAI1_MCLK = 102,
+ MX8MP_IOMUXC_SAI2_RXFS = 103,
+ MX8MP_IOMUXC_SAI2_RXC = 104,
+ MX8MP_IOMUXC_SAI2_RXD0 = 105,
+ MX8MP_IOMUXC_SAI2_TXFS = 106,
+ MX8MP_IOMUXC_SAI2_TXC = 107,
+ MX8MP_IOMUXC_SAI2_TXD0 = 108,
+ MX8MP_IOMUXC_SAI2_MCLK = 109,
+ MX8MP_IOMUXC_SAI3_RXFS = 110,
+ MX8MP_IOMUXC_SAI3_RXC = 111,
+ MX8MP_IOMUXC_SAI3_RXD = 112,
+ MX8MP_IOMUXC_SAI3_TXFS = 113,
+ MX8MP_IOMUXC_SAI3_TXC = 114,
+ MX8MP_IOMUXC_SAI3_TXD = 115,
+ MX8MP_IOMUXC_SAI3_MCLK = 116,
+ MX8MP_IOMUXC_SPDIF_TX = 117,
+ MX8MP_IOMUXC_SPDIF_RX = 118,
+ MX8MP_IOMUXC_SPDIF_EXT_CLK = 119,
+ MX8MP_IOMUXC_ECSPI1_SCLK = 120,
+ MX8MP_IOMUXC_ECSPI1_MOSI = 121,
+ MX8MP_IOMUXC_ECSPI1_MISO = 122,
+ MX8MP_IOMUXC_ECSPI1_SS0 = 123,
+ MX8MP_IOMUXC_ECSPI2_SCLK = 124,
+ MX8MP_IOMUXC_ECSPI2_MOSI = 125,
+ MX8MP_IOMUXC_ECSPI2_MISO = 126,
+ MX8MP_IOMUXC_ECSPI2_SS0 = 127,
+ MX8MP_IOMUXC_I2C1_SCL = 128,
+ MX8MP_IOMUXC_I2C1_SDA = 129,
+ MX8MP_IOMUXC_I2C2_SCL = 130,
+ MX8MP_IOMUXC_I2C2_SDA = 131,
+ MX8MP_IOMUXC_I2C3_SCL = 132,
+ MX8MP_IOMUXC_I2C3_SDA = 133,
+ MX8MP_IOMUXC_I2C4_SCL = 134,
+ MX8MP_IOMUXC_I2C4_SDA = 135,
+ MX8MP_IOMUXC_UART1_RXD = 136,
+ MX8MP_IOMUXC_UART1_TXD = 137,
+ MX8MP_IOMUXC_UART2_RXD = 138,
+ MX8MP_IOMUXC_UART2_TXD = 139,
+ MX8MP_IOMUXC_UART3_RXD = 140,
+ MX8MP_IOMUXC_UART3_TXD = 141,
+ MX8MP_IOMUXC_UART4_RXD = 142,
+ MX8MP_IOMUXC_UART4_TXD = 143,
+ MX8MP_IOMUXC_HDMI_DDC_SCL = 144,
+ MX8MP_IOMUXC_HDMI_DDC_SDA = 145,
+ MX8MP_IOMUXC_HDMI_CEC = 146,
+ MX8MP_IOMUXC_HDMI_HPD = 147,
+};
+
+/* Pad names for the pinmux subsystem */
+static const struct pinctrl_pin_desc imx8mp_pinctrl_pads[] = {
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_RESERVE0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_RESERVE1),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_RESERVE2),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_RESERVE3),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_RESERVE4),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO00),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO01),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO02),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO03),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO04),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO05),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO06),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO07),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO08),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO09),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO10),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO11),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO12),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO13),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO14),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_GPIO1_IO15),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_MDC),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_MDIO),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_TD3),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_TD2),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_TD1),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_TD0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_TX_CTL),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_TXC),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_RX_CTL),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_RXC),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_RD0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_RD1),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_RD2),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ENET_RD3),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_CLK),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_CMD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_DATA0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_DATA1),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_DATA2),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_DATA3),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_DATA4),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_DATA5),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_DATA6),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_DATA7),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_RESET_B),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD1_STROBE),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_CD_B),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_CLK),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_CMD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_DATA0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_DATA1),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_DATA2),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_DATA3),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_RESET_B),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SD2_WP),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_ALE),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_CE0_B),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_CE1_B),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_CE2_B),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_CE3_B),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_CLE),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DATA00),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DATA01),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DATA02),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DATA03),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DATA04),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DATA05),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DATA06),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DATA07),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_DQS),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_RE_B),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_READY_B),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_WE_B),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_NAND_WP_B),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI5_RXFS),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI5_RXC),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI5_RXD0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI5_RXD1),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI5_RXD2),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI5_RXD3),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI5_MCLK),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXFS),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXC),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXD0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXD1),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXD2),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXD3),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXD4),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXD5),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXD6),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_RXD7),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXFS),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXC),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXD0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXD1),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXD2),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXD3),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXD4),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXD5),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXD6),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_TXD7),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI1_MCLK),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI2_RXFS),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI2_RXC),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI2_RXD0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI2_TXFS),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI2_TXC),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI2_TXD0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI2_MCLK),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI3_RXFS),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI3_RXC),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI3_RXD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI3_TXFS),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI3_TXC),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI3_TXD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SAI3_MCLK),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SPDIF_TX),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SPDIF_RX),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_SPDIF_EXT_CLK),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ECSPI1_SCLK),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ECSPI1_MOSI),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ECSPI1_MISO),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ECSPI1_SS0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ECSPI2_SCLK),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ECSPI2_MOSI),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ECSPI2_MISO),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_ECSPI2_SS0),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_I2C1_SCL),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_I2C1_SDA),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_I2C2_SCL),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_I2C2_SDA),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_I2C3_SCL),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_I2C3_SDA),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_I2C4_SCL),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_I2C4_SDA),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_UART1_RXD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_UART1_TXD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_UART2_RXD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_UART2_TXD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_UART3_RXD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_UART3_TXD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_UART4_RXD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_UART4_TXD),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_HDMI_DDC_SCL),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_HDMI_DDC_SDA),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_HDMI_CEC),
+ IMX_PINCTRL_PIN(MX8MP_IOMUXC_HDMI_HPD),
+};
+
+static const struct imx_pinctrl_soc_info imx8mp_pinctrl_info = {
+ .pins = imx8mp_pinctrl_pads,
+ .npins = ARRAY_SIZE(imx8mp_pinctrl_pads),
+ .gpr_compatible = "fsl,imx8mp-iomuxc-gpr",
+};
+
+static const struct of_device_id imx8mp_pinctrl_of_match[] = {
+ { .compatible = "fsl,imx8mp-iomuxc", .data = &imx8mp_pinctrl_info, },
+ { /* sentinel */ }
+};
+
+static int imx8mp_pinctrl_probe(struct platform_device *pdev)
+{
+ return imx_pinctrl_probe(pdev, &imx8mp_pinctrl_info);
+}
+
+static struct platform_driver imx8mp_pinctrl_driver = {
+ .driver = {
+ .name = "imx8mp-pinctrl",
+ .of_match_table = of_match_ptr(imx8mp_pinctrl_of_match),
+ },
+ .probe = imx8mp_pinctrl_probe,
+};
+
+static int __init imx8mp_pinctrl_init(void)
+{
+ return platform_driver_register(&imx8mp_pinctrl_driver);
+}
+arch_initcall(imx8mp_pinctrl_init);
diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig
index 6091947a8f51..ee440ec4c94c 100644
--- a/drivers/pinctrl/intel/Kconfig
+++ b/drivers/pinctrl/intel/Kconfig
@@ -31,6 +31,19 @@ config PINCTRL_CHERRYVIEW
Cherryview/Braswell pinctrl driver provides an interface that
allows configuring of SoC pins and using them as GPIOs.
+config PINCTRL_LYNXPOINT
+ tristate "Intel Lynxpoint pinctrl and GPIO driver"
+ depends on ACPI
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select GPIOLIB
+ select GPIOLIB_IRQCHIP
+ help
+ Lynxpoint is the PCH of Intel Haswell. This pinctrl driver
+ provides an interface that allows configuring of PCH pins and
+ using them as GPIOs.
+
config PINCTRL_MERRIFIELD
tristate "Intel Merrifield pinctrl driver"
depends on X86_INTEL_MID
diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile
index 7e620b471ef6..f60f99cfa7aa 100644
--- a/drivers/pinctrl/intel/Makefile
+++ b/drivers/pinctrl/intel/Makefile
@@ -3,6 +3,7 @@
obj-$(CONFIG_PINCTRL_BAYTRAIL) += pinctrl-baytrail.o
obj-$(CONFIG_PINCTRL_CHERRYVIEW) += pinctrl-cherryview.o
+obj-$(CONFIG_PINCTRL_LYNXPOINT) += pinctrl-lynxpoint.o
obj-$(CONFIG_PINCTRL_MERRIFIELD) += pinctrl-merrifield.o
obj-$(CONFIG_PINCTRL_INTEL) += pinctrl-intel.o
obj-$(CONFIG_PINCTRL_BROXTON) += pinctrl-broxton.o
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 9ffb22211d2b..b409642f168d 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -93,7 +93,7 @@
#define BYT_DEFAULT_GPIO_MUX 0
#define BYT_ALTER_GPIO_MUX 1
-struct byt_gpio_pin_context {
+struct intel_pad_context {
u32 conf0;
u32 val;
};
@@ -105,17 +105,6 @@ struct byt_gpio_pin_context {
.pad_map = (map),\
}
-struct byt_gpio {
- struct gpio_chip chip;
- struct platform_device *pdev;
- struct pinctrl_dev *pctl_dev;
- struct pinctrl_desc pctl_desc;
- raw_spinlock_t lock;
- const struct intel_pinctrl_soc_data *soc_data;
- struct intel_community *communities_copy;
- struct byt_gpio_pin_context *saved_context;
-};
-
/* SCORE pins, aka GPIOC_<pin_no> or GPIO_S0_SC[<pin_no>] */
static const struct pinctrl_pin_desc byt_score_pins[] = {
PINCTRL_PIN(0, "SATA_GP0"),
@@ -494,34 +483,34 @@ static const struct intel_pinctrl_soc_data byt_sus_soc_data = {
};
static const struct pinctrl_pin_desc byt_ncore_pins[] = {
- PINCTRL_PIN(0, "GPIO_NCORE0"),
- PINCTRL_PIN(1, "GPIO_NCORE1"),
- PINCTRL_PIN(2, "GPIO_NCORE2"),
- PINCTRL_PIN(3, "GPIO_NCORE3"),
- PINCTRL_PIN(4, "GPIO_NCORE4"),
- PINCTRL_PIN(5, "GPIO_NCORE5"),
- PINCTRL_PIN(6, "GPIO_NCORE6"),
- PINCTRL_PIN(7, "GPIO_NCORE7"),
- PINCTRL_PIN(8, "GPIO_NCORE8"),
- PINCTRL_PIN(9, "GPIO_NCORE9"),
- PINCTRL_PIN(10, "GPIO_NCORE10"),
- PINCTRL_PIN(11, "GPIO_NCORE11"),
- PINCTRL_PIN(12, "GPIO_NCORE12"),
- PINCTRL_PIN(13, "GPIO_NCORE13"),
- PINCTRL_PIN(14, "GPIO_NCORE14"),
- PINCTRL_PIN(15, "GPIO_NCORE15"),
- PINCTRL_PIN(16, "GPIO_NCORE16"),
- PINCTRL_PIN(17, "GPIO_NCORE17"),
- PINCTRL_PIN(18, "GPIO_NCORE18"),
- PINCTRL_PIN(19, "GPIO_NCORE19"),
- PINCTRL_PIN(20, "GPIO_NCORE20"),
- PINCTRL_PIN(21, "GPIO_NCORE21"),
- PINCTRL_PIN(22, "GPIO_NCORE22"),
- PINCTRL_PIN(23, "GPIO_NCORE23"),
- PINCTRL_PIN(24, "GPIO_NCORE24"),
- PINCTRL_PIN(25, "GPIO_NCORE25"),
- PINCTRL_PIN(26, "GPIO_NCORE26"),
- PINCTRL_PIN(27, "GPIO_NCORE27"),
+ PINCTRL_PIN(0, "HV_DDI0_HPD"),
+ PINCTRL_PIN(1, "HV_DDI0_DDC_SDA"),
+ PINCTRL_PIN(2, "HV_DDI0_DDC_SCL"),
+ PINCTRL_PIN(3, "PANEL0_VDDEN"),
+ PINCTRL_PIN(4, "PANEL0_BKLTEN"),
+ PINCTRL_PIN(5, "PANEL0_BKLTCTL"),
+ PINCTRL_PIN(6, "HV_DDI1_HPD"),
+ PINCTRL_PIN(7, "HV_DDI1_DDC_SDA"),
+ PINCTRL_PIN(8, "HV_DDI1_DDC_SCL"),
+ PINCTRL_PIN(9, "PANEL1_VDDEN"),
+ PINCTRL_PIN(10, "PANEL1_BKLTEN"),
+ PINCTRL_PIN(11, "PANEL1_BKLTCTL"),
+ PINCTRL_PIN(12, "GP_INTD_DSI_TE1"),
+ PINCTRL_PIN(13, "HV_DDI2_DDC_SDA"),
+ PINCTRL_PIN(14, "HV_DDI2_DDC_SCL"),
+ PINCTRL_PIN(15, "GP_CAMERASB00"),
+ PINCTRL_PIN(16, "GP_CAMERASB01"),
+ PINCTRL_PIN(17, "GP_CAMERASB02"),
+ PINCTRL_PIN(18, "GP_CAMERASB03"),
+ PINCTRL_PIN(19, "GP_CAMERASB04"),
+ PINCTRL_PIN(20, "GP_CAMERASB05"),
+ PINCTRL_PIN(21, "GP_CAMERASB06"),
+ PINCTRL_PIN(22, "GP_CAMERASB07"),
+ PINCTRL_PIN(23, "GP_CAMERASB08"),
+ PINCTRL_PIN(24, "GP_CAMERASB09"),
+ PINCTRL_PIN(25, "GP_CAMERASB10"),
+ PINCTRL_PIN(26, "GP_CAMERASB11"),
+ PINCTRL_PIN(27, "GP_INTD_DSI_TE2"),
};
static const unsigned int byt_ncore_pins_map[BYT_NGPIO_NCORE] = {
@@ -549,14 +538,16 @@ static const struct intel_pinctrl_soc_data *byt_soc_data[] = {
NULL
};
-static struct intel_community *byt_get_community(struct byt_gpio *vg,
+static DEFINE_RAW_SPINLOCK(byt_lock);
+
+static struct intel_community *byt_get_community(struct intel_pinctrl *vg,
unsigned int pin)
{
struct intel_community *comm;
int i;
- for (i = 0; i < vg->soc_data->ncommunities; i++) {
- comm = vg->communities_copy + i;
+ for (i = 0; i < vg->ncommunities; i++) {
+ comm = vg->communities + i;
if (pin < comm->pin_base + comm->npins && pin >= comm->pin_base)
return comm;
}
@@ -564,7 +555,7 @@ static struct intel_community *byt_get_community(struct byt_gpio *vg,
return NULL;
}
-static void __iomem *byt_gpio_reg(struct byt_gpio *vg, unsigned int offset,
+static void __iomem *byt_gpio_reg(struct intel_pinctrl *vg, unsigned int offset,
int reg)
{
struct intel_community *comm = byt_get_community(vg, offset);
@@ -591,17 +582,17 @@ static void __iomem *byt_gpio_reg(struct byt_gpio *vg, unsigned int offset,
static int byt_get_groups_count(struct pinctrl_dev *pctldev)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
- return vg->soc_data->ngroups;
+ return vg->soc->ngroups;
}
static const char *byt_get_group_name(struct pinctrl_dev *pctldev,
unsigned int selector)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
- return vg->soc_data->groups[selector].name;
+ return vg->soc->groups[selector].name;
}
static int byt_get_group_pins(struct pinctrl_dev *pctldev,
@@ -609,10 +600,10 @@ static int byt_get_group_pins(struct pinctrl_dev *pctldev,
const unsigned int **pins,
unsigned int *num_pins)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
- *pins = vg->soc_data->groups[selector].pins;
- *num_pins = vg->soc_data->groups[selector].npins;
+ *pins = vg->soc->groups[selector].pins;
+ *num_pins = vg->soc->groups[selector].npins;
return 0;
}
@@ -625,17 +616,17 @@ static const struct pinctrl_ops byt_pinctrl_ops = {
static int byt_get_functions_count(struct pinctrl_dev *pctldev)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
- return vg->soc_data->nfunctions;
+ return vg->soc->nfunctions;
}
static const char *byt_get_function_name(struct pinctrl_dev *pctldev,
unsigned int selector)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
- return vg->soc_data->functions[selector].name;
+ return vg->soc->functions[selector].name;
}
static int byt_get_function_groups(struct pinctrl_dev *pctldev,
@@ -643,22 +634,22 @@ static int byt_get_function_groups(struct pinctrl_dev *pctldev,
const char * const **groups,
unsigned int *num_groups)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
- *groups = vg->soc_data->functions[selector].groups;
- *num_groups = vg->soc_data->functions[selector].ngroups;
+ *groups = vg->soc->functions[selector].groups;
+ *num_groups = vg->soc->functions[selector].ngroups;
return 0;
}
-static void byt_set_group_simple_mux(struct byt_gpio *vg,
+static void byt_set_group_simple_mux(struct intel_pinctrl *vg,
const struct intel_pingroup group,
unsigned int func)
{
unsigned long flags;
int i;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
for (i = 0; i < group.npins; i++) {
void __iomem *padcfg0;
@@ -666,7 +657,7 @@ static void byt_set_group_simple_mux(struct byt_gpio *vg,
padcfg0 = byt_gpio_reg(vg, group.pins[i], BYT_CONF0_REG);
if (!padcfg0) {
- dev_warn(&vg->pdev->dev,
+ dev_warn(vg->dev,
"Group %s, pin %i not muxed (no padcfg0)\n",
group.name, i);
continue;
@@ -678,17 +669,17 @@ static void byt_set_group_simple_mux(struct byt_gpio *vg,
writel(value, padcfg0);
}
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
}
-static void byt_set_group_mixed_mux(struct byt_gpio *vg,
+static void byt_set_group_mixed_mux(struct intel_pinctrl *vg,
const struct intel_pingroup group,
const unsigned int *func)
{
unsigned long flags;
int i;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
for (i = 0; i < group.npins; i++) {
void __iomem *padcfg0;
@@ -696,7 +687,7 @@ static void byt_set_group_mixed_mux(struct byt_gpio *vg,
padcfg0 = byt_gpio_reg(vg, group.pins[i], BYT_CONF0_REG);
if (!padcfg0) {
- dev_warn(&vg->pdev->dev,
+ dev_warn(vg->dev,
"Group %s, pin %i not muxed (no padcfg0)\n",
group.name, i);
continue;
@@ -708,15 +699,15 @@ static void byt_set_group_mixed_mux(struct byt_gpio *vg,
writel(value, padcfg0);
}
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
}
static int byt_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
unsigned int group_selector)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
- const struct intel_function func = vg->soc_data->functions[func_selector];
- const struct intel_pingroup group = vg->soc_data->groups[group_selector];
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
+ const struct intel_function func = vg->soc->functions[func_selector];
+ const struct intel_pingroup group = vg->soc->groups[group_selector];
if (group.modes)
byt_set_group_mixed_mux(vg, group, group.modes);
@@ -728,44 +719,50 @@ static int byt_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
return 0;
}
-static u32 byt_get_gpio_mux(struct byt_gpio *vg, unsigned int offset)
+static u32 byt_get_gpio_mux(struct intel_pinctrl *vg, unsigned int offset)
{
/* SCORE pin 92-93 */
- if (!strcmp(vg->soc_data->uid, BYT_SCORE_ACPI_UID) &&
+ if (!strcmp(vg->soc->uid, BYT_SCORE_ACPI_UID) &&
offset >= 92 && offset <= 93)
return BYT_ALTER_GPIO_MUX;
/* SUS pin 11-21 */
- if (!strcmp(vg->soc_data->uid, BYT_SUS_ACPI_UID) &&
+ if (!strcmp(vg->soc->uid, BYT_SUS_ACPI_UID) &&
offset >= 11 && offset <= 21)
return BYT_ALTER_GPIO_MUX;
return BYT_DEFAULT_GPIO_MUX;
}
-static void byt_gpio_clear_triggering(struct byt_gpio *vg, unsigned int offset)
+static void byt_gpio_clear_triggering(struct intel_pinctrl *vg, unsigned int offset)
{
void __iomem *reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
unsigned long flags;
u32 value;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
value = readl(reg);
- value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
+
+ /* Do not clear direct-irq enabled IRQs (from gpio_disable_free) */
+ if (value & BYT_DIRECT_IRQ_EN)
+ /* nothing to do */ ;
+ else
+ value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
+
writel(value, reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
}
static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
struct pinctrl_gpio_range *range,
unsigned int offset)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctl_dev);
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev);
void __iomem *reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
u32 value, gpio_mux;
unsigned long flags;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
/*
* In most cases, func pin mux 000 means GPIO function.
@@ -783,13 +780,12 @@ static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
value |= gpio_mux;
writel(value, reg);
- dev_warn(&vg->pdev->dev, FW_BUG
- "pin %u forcibly re-configured as GPIO\n", offset);
+ dev_warn(vg->dev, FW_BUG "pin %u forcibly re-configured as GPIO\n", offset);
}
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
- pm_runtime_get(&vg->pdev->dev);
+ pm_runtime_get(vg->dev);
return 0;
}
@@ -798,10 +794,10 @@ static void byt_gpio_disable_free(struct pinctrl_dev *pctl_dev,
struct pinctrl_gpio_range *range,
unsigned int offset)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctl_dev);
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev);
byt_gpio_clear_triggering(vg, offset);
- pm_runtime_put(&vg->pdev->dev);
+ pm_runtime_put(vg->dev);
}
static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
@@ -809,30 +805,30 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
unsigned int offset,
bool input)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctl_dev);
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev);
void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
unsigned long flags;
u32 value;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
value = readl(val_reg);
value &= ~BYT_DIR_MASK;
if (input)
value |= BYT_OUTPUT_EN;
- else
+ else if (readl(conf_reg) & BYT_DIRECT_IRQ_EN)
/*
* Before making any direction modifications, do a check if gpio
* is set for direct IRQ. On baytrail, setting GPIO to output
- * does not make sense, so let's at least warn the caller before
+ * does not make sense, so let's at least inform the caller before
* they shoot themselves in the foot.
*/
- WARN(readl(conf_reg) & BYT_DIRECT_IRQ_EN,
- "Potential Error: Setting GPIO with direct_irq_en to output");
+ dev_info_once(vg->dev, "Potential Error: Setting GPIO with direct_irq_en to output");
+
writel(value, val_reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
return 0;
}
@@ -892,7 +888,7 @@ static int byt_set_pull_strength(u32 *reg, u16 strength)
static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
unsigned long *config)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctl_dev);
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev);
enum pin_config_param param = pinconf_to_config_param(*config);
void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
@@ -901,11 +897,11 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
u32 conf, pull, val, debounce;
u16 arg = 0;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
conf = readl(conf_reg);
pull = conf & BYT_PULL_ASSIGN_MASK;
val = readl(val_reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
@@ -932,9 +928,9 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
if (!(conf & BYT_DEBOUNCE_EN))
return -EINVAL;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
debounce = readl(db_reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
switch (debounce & BYT_DEBOUNCE_PULSE_MASK) {
case BYT_DEBOUNCE_PULSE_375US:
@@ -977,7 +973,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
unsigned long *configs,
unsigned int num_configs)
{
- struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctl_dev);
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev);
unsigned int param, arg;
void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
@@ -986,7 +982,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
u32 conf, val, debounce;
int i, ret = 0;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
conf = readl(conf_reg);
val = readl(val_reg);
@@ -1011,7 +1007,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
if (val & BYT_INPUT_EN) {
val &= ~BYT_INPUT_EN;
writel(val, val_reg);
- dev_warn(&vg->pdev->dev,
+ dev_warn(vg->dev,
"pin %u forcibly set to input mode\n",
offset);
}
@@ -1033,7 +1029,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
if (val & BYT_INPUT_EN) {
val &= ~BYT_INPUT_EN;
writel(val, val_reg);
- dev_warn(&vg->pdev->dev,
+ dev_warn(vg->dev,
"pin %u forcibly set to input mode\n",
offset);
}
@@ -1094,7 +1090,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
if (!ret)
writel(conf, conf_reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
return ret;
}
@@ -1114,21 +1110,21 @@ static const struct pinctrl_desc byt_pinctrl_desc = {
static int byt_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
- struct byt_gpio *vg = gpiochip_get_data(chip);
+ struct intel_pinctrl *vg = gpiochip_get_data(chip);
void __iomem *reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
unsigned long flags;
u32 val;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
val = readl(reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
return !!(val & BYT_LEVEL);
}
static void byt_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
- struct byt_gpio *vg = gpiochip_get_data(chip);
+ struct intel_pinctrl *vg = gpiochip_get_data(chip);
void __iomem *reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
unsigned long flags;
u32 old_val;
@@ -1136,18 +1132,18 @@ static void byt_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
if (!reg)
return;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
old_val = readl(reg);
if (value)
writel(old_val | BYT_LEVEL, reg);
else
writel(old_val & ~BYT_LEVEL, reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
}
static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
{
- struct byt_gpio *vg = gpiochip_get_data(chip);
+ struct intel_pinctrl *vg = gpiochip_get_data(chip);
void __iomem *reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
unsigned long flags;
u32 value;
@@ -1155,14 +1151,14 @@ static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
if (!reg)
return -EINVAL;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
value = readl(reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
if (!(value & BYT_OUTPUT_EN))
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
if (!(value & BYT_INPUT_EN))
- return 1;
+ return GPIO_LINE_DIRECTION_IN;
return -EINVAL;
}
@@ -1187,11 +1183,11 @@ static int byt_gpio_direction_output(struct gpio_chip *chip,
static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
- struct byt_gpio *vg = gpiochip_get_data(chip);
+ struct intel_pinctrl *vg = gpiochip_get_data(chip);
int i;
u32 conf0, val;
- for (i = 0; i < vg->soc_data->npins; i++) {
+ for (i = 0; i < vg->soc->npins; i++) {
const struct intel_community *comm;
const char *pull_str = NULL;
const char *pull = NULL;
@@ -1200,14 +1196,14 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
const char *label;
unsigned int pin;
- raw_spin_lock_irqsave(&vg->lock, flags);
- pin = vg->soc_data->pins[i].number;
+ raw_spin_lock_irqsave(&byt_lock, flags);
+ pin = vg->soc->pins[i].number;
reg = byt_gpio_reg(vg, pin, BYT_CONF0_REG);
if (!reg) {
seq_printf(s,
"Could not retrieve pin %i conf0 reg\n",
pin);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
continue;
}
conf0 = readl(reg);
@@ -1216,11 +1212,11 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
if (!reg) {
seq_printf(s,
"Could not retrieve pin %i val reg\n", pin);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
continue;
}
val = readl(reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
comm = byt_get_community(vg, pin);
if (!comm) {
@@ -1296,7 +1292,7 @@ static const struct gpio_chip byt_gpio_chip = {
static void byt_irq_ack(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct byt_gpio *vg = gpiochip_get_data(gc);
+ struct intel_pinctrl *vg = gpiochip_get_data(gc);
unsigned int offset = irqd_to_hwirq(d);
void __iomem *reg;
@@ -1304,15 +1300,15 @@ static void byt_irq_ack(struct irq_data *d)
if (!reg)
return;
- raw_spin_lock(&vg->lock);
+ raw_spin_lock(&byt_lock);
writel(BIT(offset % 32), reg);
- raw_spin_unlock(&vg->lock);
+ raw_spin_unlock(&byt_lock);
}
static void byt_irq_mask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct byt_gpio *vg = gpiochip_get_data(gc);
+ struct intel_pinctrl *vg = gpiochip_get_data(gc);
byt_gpio_clear_triggering(vg, irqd_to_hwirq(d));
}
@@ -1320,7 +1316,7 @@ static void byt_irq_mask(struct irq_data *d)
static void byt_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct byt_gpio *vg = gpiochip_get_data(gc);
+ struct intel_pinctrl *vg = gpiochip_get_data(gc);
unsigned int offset = irqd_to_hwirq(d);
unsigned long flags;
void __iomem *reg;
@@ -1330,7 +1326,7 @@ static void byt_irq_unmask(struct irq_data *d)
if (!reg)
return;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
value = readl(reg);
switch (irqd_get_trigger_type(d)) {
@@ -1353,12 +1349,12 @@ static void byt_irq_unmask(struct irq_data *d)
writel(value, reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
}
static int byt_irq_type(struct irq_data *d, unsigned int type)
{
- struct byt_gpio *vg = gpiochip_get_data(irq_data_get_irq_chip_data(d));
+ struct intel_pinctrl *vg = gpiochip_get_data(irq_data_get_irq_chip_data(d));
u32 offset = irqd_to_hwirq(d);
u32 value;
unsigned long flags;
@@ -1367,7 +1363,7 @@ static int byt_irq_type(struct irq_data *d, unsigned int type)
if (!reg || offset >= vg->chip.ngpio)
return -EINVAL;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
value = readl(reg);
WARN(value & BYT_DIRECT_IRQ_EN,
@@ -1389,25 +1385,15 @@ static int byt_irq_type(struct irq_data *d, unsigned int type)
else if (type & IRQ_TYPE_LEVEL_MASK)
irq_set_handler_locked(d, handle_level_irq);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
return 0;
}
-static struct irq_chip byt_irqchip = {
- .name = "BYT-GPIO",
- .irq_ack = byt_irq_ack,
- .irq_mask = byt_irq_mask,
- .irq_unmask = byt_irq_unmask,
- .irq_set_type = byt_irq_type,
- .flags = IRQCHIP_SKIP_SET_WAKE,
-};
-
static void byt_gpio_irq_handler(struct irq_desc *desc)
{
struct irq_data *data = irq_desc_get_irq_data(desc);
- struct byt_gpio *vg = gpiochip_get_data(
- irq_desc_get_handler_data(desc));
+ struct intel_pinctrl *vg = gpiochip_get_data(irq_desc_get_handler_data(desc));
struct irq_chip *chip = irq_data_get_irq_chip(data);
u32 base, pin;
void __iomem *reg;
@@ -1419,15 +1405,15 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
reg = byt_gpio_reg(vg, base, BYT_INT_STAT_REG);
if (!reg) {
- dev_warn(&vg->pdev->dev,
+ dev_warn(vg->dev,
"Pin %i: could not retrieve interrupt status register\n",
base);
continue;
}
- raw_spin_lock(&vg->lock);
+ raw_spin_lock(&byt_lock);
pending = readl(reg);
- raw_spin_unlock(&vg->lock);
+ raw_spin_unlock(&byt_lock);
for_each_set_bit(pin, &pending, 32) {
virq = irq_find_mapping(vg->chip.irq.domain, base + pin);
generic_handle_irq(virq);
@@ -1440,22 +1426,9 @@ static void byt_init_irq_valid_mask(struct gpio_chip *chip,
unsigned long *valid_mask,
unsigned int ngpios)
{
- /*
- * FIXME: currently the valid_mask is filled in as part of
- * initializing the irq_chip below in byt_gpio_irq_init_hw().
- * when converting this driver to the new way of passing the
- * gpio_irq_chip along when adding the gpio_chip, move the
- * mask initialization into this callback instead. Right now
- * this callback is here to make sure the mask gets allocated.
- */
-}
-
-static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
-{
- struct gpio_chip *gc = &vg->chip;
- struct device *dev = &vg->pdev->dev;
+ struct intel_pinctrl *vg = gpiochip_get_data(chip);
void __iomem *reg;
- u32 base, value;
+ u32 value;
int i;
/*
@@ -1463,12 +1436,12 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
* do not use direct IRQ mode. This will prevent spurious
* interrupts from misconfigured pins.
*/
- for (i = 0; i < vg->soc_data->npins; i++) {
- unsigned int pin = vg->soc_data->pins[i].number;
+ for (i = 0; i < vg->soc->npins; i++) {
+ unsigned int pin = vg->soc->pins[i].number;
reg = byt_gpio_reg(vg, pin, BYT_CONF0_REG);
if (!reg) {
- dev_warn(&vg->pdev->dev,
+ dev_warn(vg->dev,
"Pin %i: could not retrieve conf0 register\n",
i);
continue;
@@ -1476,20 +1449,27 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
value = readl(reg);
if (value & BYT_DIRECT_IRQ_EN) {
- clear_bit(i, gc->irq.valid_mask);
- dev_dbg(dev, "excluding GPIO %d from IRQ domain\n", i);
+ clear_bit(i, valid_mask);
+ dev_dbg(vg->dev, "excluding GPIO %d from IRQ domain\n", i);
} else if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i)) {
byt_gpio_clear_triggering(vg, i);
- dev_dbg(dev, "disabling GPIO %d\n", i);
+ dev_dbg(vg->dev, "disabling GPIO %d\n", i);
}
}
+}
+
+static int byt_gpio_irq_init_hw(struct gpio_chip *chip)
+{
+ struct intel_pinctrl *vg = gpiochip_get_data(chip);
+ void __iomem *reg;
+ u32 base, value;
/* clear interrupt status trigger registers */
- for (base = 0; base < vg->soc_data->npins; base += 32) {
+ for (base = 0; base < vg->soc->npins; base += 32) {
reg = byt_gpio_reg(vg, base, BYT_INT_STAT_REG);
if (!reg) {
- dev_warn(&vg->pdev->dev,
+ dev_warn(vg->dev,
"Pin %i: could not retrieve irq status reg\n",
base);
continue;
@@ -1500,14 +1480,30 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
might be misconfigured in bios */
value = readl(reg);
if (value)
- dev_err(&vg->pdev->dev,
+ dev_err(vg->dev,
"GPIO interrupt error, pins misconfigured. INT_STAT%u: 0x%08x\n",
base / 32, value);
}
+
+ return 0;
}
-static int byt_gpio_probe(struct byt_gpio *vg)
+static int byt_gpio_add_pin_ranges(struct gpio_chip *chip)
{
+ struct intel_pinctrl *vg = gpiochip_get_data(chip);
+ struct device *dev = vg->dev;
+ int ret;
+
+ ret = gpiochip_add_pin_range(chip, dev_name(dev), 0, 0, vg->soc->npins);
+ if (ret)
+ dev_err(dev, "failed to add GPIO pin range\n");
+
+ return ret;
+}
+
+static int byt_gpio_probe(struct intel_pinctrl *vg)
+{
+ struct platform_device *pdev = to_platform_device(vg->dev);
struct gpio_chip *gc;
struct resource *irq_rc;
int ret;
@@ -1515,70 +1511,76 @@ static int byt_gpio_probe(struct byt_gpio *vg)
/* Set up gpio chip */
vg->chip = byt_gpio_chip;
gc = &vg->chip;
- gc->label = dev_name(&vg->pdev->dev);
+ gc->label = dev_name(vg->dev);
gc->base = -1;
gc->can_sleep = false;
- gc->parent = &vg->pdev->dev;
- gc->ngpio = vg->soc_data->npins;
- gc->irq.init_valid_mask = byt_init_irq_valid_mask;
+ gc->add_pin_ranges = byt_gpio_add_pin_ranges;
+ gc->parent = vg->dev;
+ gc->ngpio = vg->soc->npins;
#ifdef CONFIG_PM_SLEEP
- vg->saved_context = devm_kcalloc(&vg->pdev->dev, gc->ngpio,
- sizeof(*vg->saved_context), GFP_KERNEL);
- if (!vg->saved_context)
+ vg->context.pads = devm_kcalloc(vg->dev, gc->ngpio, sizeof(*vg->context.pads),
+ GFP_KERNEL);
+ if (!vg->context.pads)
return -ENOMEM;
#endif
- ret = devm_gpiochip_add_data(&vg->pdev->dev, gc, vg);
- if (ret) {
- dev_err(&vg->pdev->dev, "failed adding byt-gpio chip\n");
- return ret;
- }
-
- ret = gpiochip_add_pin_range(&vg->chip, dev_name(&vg->pdev->dev),
- 0, 0, vg->soc_data->npins);
- if (ret) {
- dev_err(&vg->pdev->dev, "failed to add GPIO pin range\n");
- return ret;
- }
/* set up interrupts */
- irq_rc = platform_get_resource(vg->pdev, IORESOURCE_IRQ, 0);
+ irq_rc = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (irq_rc && irq_rc->start) {
- byt_gpio_irq_init_hw(vg);
- ret = gpiochip_irqchip_add(gc, &byt_irqchip, 0,
- handle_bad_irq, IRQ_TYPE_NONE);
- if (ret) {
- dev_err(&vg->pdev->dev, "failed to add irqchip\n");
- return ret;
- }
+ struct gpio_irq_chip *girq;
+
+ vg->irqchip.name = "BYT-GPIO",
+ vg->irqchip.irq_ack = byt_irq_ack,
+ vg->irqchip.irq_mask = byt_irq_mask,
+ vg->irqchip.irq_unmask = byt_irq_unmask,
+ vg->irqchip.irq_set_type = byt_irq_type,
+ vg->irqchip.flags = IRQCHIP_SKIP_SET_WAKE,
+
+ girq = &gc->irq;
+ girq->chip = &vg->irqchip;
+ girq->init_hw = byt_gpio_irq_init_hw;
+ girq->init_valid_mask = byt_init_irq_valid_mask;
+ girq->parent_handler = byt_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(vg->dev, girq->num_parents,
+ sizeof(*girq->parents), GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = (unsigned int)irq_rc->start;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+ }
- gpiochip_set_chained_irqchip(gc, &byt_irqchip,
- (unsigned)irq_rc->start,
- byt_gpio_irq_handler);
+ ret = devm_gpiochip_add_data(vg->dev, gc, vg);
+ if (ret) {
+ dev_err(vg->dev, "failed adding byt-gpio chip\n");
+ return ret;
}
return ret;
}
-static int byt_set_soc_data(struct byt_gpio *vg,
- const struct intel_pinctrl_soc_data *soc_data)
+static int byt_set_soc_data(struct intel_pinctrl *vg,
+ const struct intel_pinctrl_soc_data *soc)
{
+ struct platform_device *pdev = to_platform_device(vg->dev);
int i;
- vg->soc_data = soc_data;
- vg->communities_copy = devm_kcalloc(&vg->pdev->dev,
- soc_data->ncommunities,
- sizeof(*vg->communities_copy),
- GFP_KERNEL);
- if (!vg->communities_copy)
+ vg->soc = soc;
+
+ vg->ncommunities = vg->soc->ncommunities;
+ vg->communities = devm_kcalloc(vg->dev, vg->ncommunities,
+ sizeof(*vg->communities), GFP_KERNEL);
+ if (!vg->communities)
return -ENOMEM;
- for (i = 0; i < soc_data->ncommunities; i++) {
- struct intel_community *comm = vg->communities_copy + i;
+ for (i = 0; i < vg->soc->ncommunities; i++) {
+ struct intel_community *comm = vg->communities + i;
- *comm = vg->soc_data->communities[i];
+ *comm = vg->soc->communities[i];
- comm->pad_regs = devm_platform_ioremap_resource(vg->pdev, 0);
+ comm->pad_regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(comm->pad_regs))
return PTR_ERR(comm->pad_regs);
}
@@ -1596,15 +1598,16 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
{
const struct intel_pinctrl_soc_data *soc_data = NULL;
const struct intel_pinctrl_soc_data **soc_table;
+ struct device *dev = &pdev->dev;
struct acpi_device *acpi_dev;
- struct byt_gpio *vg;
+ struct intel_pinctrl *vg;
int i, ret;
- acpi_dev = ACPI_COMPANION(&pdev->dev);
+ acpi_dev = ACPI_COMPANION(dev);
if (!acpi_dev)
return -ENODEV;
- soc_table = (const struct intel_pinctrl_soc_data **)device_get_match_data(&pdev->dev);
+ soc_table = (const struct intel_pinctrl_soc_data **)device_get_match_data(dev);
for (i = 0; soc_table[i]; i++) {
if (!strcmp(acpi_dev->pnp.unique_id, soc_table[i]->uid)) {
@@ -1616,36 +1619,34 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
if (!soc_data)
return -ENODEV;
- vg = devm_kzalloc(&pdev->dev, sizeof(*vg), GFP_KERNEL);
+ vg = devm_kzalloc(dev, sizeof(*vg), GFP_KERNEL);
if (!vg)
return -ENOMEM;
- vg->pdev = pdev;
+ vg->dev = dev;
ret = byt_set_soc_data(vg, soc_data);
if (ret) {
- dev_err(&pdev->dev, "failed to set soc data\n");
+ dev_err(dev, "failed to set soc data\n");
return ret;
}
- vg->pctl_desc = byt_pinctrl_desc;
- vg->pctl_desc.name = dev_name(&pdev->dev);
- vg->pctl_desc.pins = vg->soc_data->pins;
- vg->pctl_desc.npins = vg->soc_data->npins;
+ vg->pctldesc = byt_pinctrl_desc;
+ vg->pctldesc.name = dev_name(dev);
+ vg->pctldesc.pins = vg->soc->pins;
+ vg->pctldesc.npins = vg->soc->npins;
- vg->pctl_dev = devm_pinctrl_register(&pdev->dev, &vg->pctl_desc, vg);
- if (IS_ERR(vg->pctl_dev)) {
- dev_err(&pdev->dev, "failed to register pinctrl driver\n");
- return PTR_ERR(vg->pctl_dev);
+ vg->pctldev = devm_pinctrl_register(dev, &vg->pctldesc, vg);
+ if (IS_ERR(vg->pctldev)) {
+ dev_err(dev, "failed to register pinctrl driver\n");
+ return PTR_ERR(vg->pctldev);
}
- raw_spin_lock_init(&vg->lock);
-
ret = byt_gpio_probe(vg);
if (ret)
return ret;
platform_set_drvdata(pdev, vg);
- pm_runtime_enable(&pdev->dev);
+ pm_runtime_enable(dev);
return 0;
}
@@ -1653,54 +1654,61 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int byt_gpio_suspend(struct device *dev)
{
- struct byt_gpio *vg = dev_get_drvdata(dev);
+ struct intel_pinctrl *vg = dev_get_drvdata(dev);
+ unsigned long flags;
int i;
- for (i = 0; i < vg->soc_data->npins; i++) {
+ raw_spin_lock_irqsave(&byt_lock, flags);
+
+ for (i = 0; i < vg->soc->npins; i++) {
void __iomem *reg;
u32 value;
- unsigned int pin = vg->soc_data->pins[i].number;
+ unsigned int pin = vg->soc->pins[i].number;
reg = byt_gpio_reg(vg, pin, BYT_CONF0_REG);
if (!reg) {
- dev_warn(&vg->pdev->dev,
+ dev_warn(vg->dev,
"Pin %i: could not retrieve conf0 register\n",
i);
continue;
}
value = readl(reg) & BYT_CONF0_RESTORE_MASK;
- vg->saved_context[i].conf0 = value;
+ vg->context.pads[i].conf0 = value;
reg = byt_gpio_reg(vg, pin, BYT_VAL_REG);
value = readl(reg) & BYT_VAL_RESTORE_MASK;
- vg->saved_context[i].val = value;
+ vg->context.pads[i].val = value;
}
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
return 0;
}
static int byt_gpio_resume(struct device *dev)
{
- struct byt_gpio *vg = dev_get_drvdata(dev);
+ struct intel_pinctrl *vg = dev_get_drvdata(dev);
+ unsigned long flags;
int i;
- for (i = 0; i < vg->soc_data->npins; i++) {
+ raw_spin_lock_irqsave(&byt_lock, flags);
+
+ for (i = 0; i < vg->soc->npins; i++) {
void __iomem *reg;
u32 value;
- unsigned int pin = vg->soc_data->pins[i].number;
+ unsigned int pin = vg->soc->pins[i].number;
reg = byt_gpio_reg(vg, pin, BYT_CONF0_REG);
if (!reg) {
- dev_warn(&vg->pdev->dev,
+ dev_warn(vg->dev,
"Pin %i: could not retrieve conf0 register\n",
i);
continue;
}
value = readl(reg);
if ((value & BYT_CONF0_RESTORE_MASK) !=
- vg->saved_context[i].conf0) {
+ vg->context.pads[i].conf0) {
value &= ~BYT_CONF0_RESTORE_MASK;
- value |= vg->saved_context[i].conf0;
+ value |= vg->context.pads[i].conf0;
writel(value, reg);
dev_info(dev, "restored pin %d conf0 %#08x", i, value);
}
@@ -1708,11 +1716,11 @@ static int byt_gpio_resume(struct device *dev)
reg = byt_gpio_reg(vg, pin, BYT_VAL_REG);
value = readl(reg);
if ((value & BYT_VAL_RESTORE_MASK) !=
- vg->saved_context[i].val) {
+ vg->context.pads[i].val) {
u32 v;
v = value & ~BYT_VAL_RESTORE_MASK;
- v |= vg->saved_context[i].val;
+ v |= vg->context.pads[i].val;
if (v != value) {
writel(v, reg);
dev_dbg(dev, "restored pin %d val %#08x\n",
@@ -1721,6 +1729,7 @@ static int byt_gpio_resume(struct device *dev)
}
}
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
return 0;
}
#endif
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 582fa8a75559..4c74fdde576d 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -149,6 +149,7 @@ struct chv_pin_context {
* @chip: GPIO chip in this pin controller
* @irqchip: IRQ chip in this pin controller
* @regs: MMIO registers
+ * @irq: Our parent irq
* @intr_lines: Stores mapping between 16 HW interrupt wires and GPIO
* offset (in GPIO number space)
* @community: Community this pinctrl instance represents
@@ -165,6 +166,7 @@ struct chv_pinctrl {
struct gpio_chip chip;
struct irq_chip irqchip;
void __iomem *regs;
+ unsigned int irq;
unsigned int intr_lines[16];
const struct chv_community *community;
u32 saved_intmask;
@@ -1287,7 +1289,10 @@ static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
direction = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
direction >>= CHV_PADCTRL0_GPIOCFG_SHIFT;
- return direction != CHV_PADCTRL0_GPIOCFG_GPO;
+ if (direction == CHV_PADCTRL0_GPIOCFG_GPO)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int chv_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
@@ -1555,39 +1560,9 @@ static void chv_init_irq_valid_mask(struct gpio_chip *chip,
}
}
-static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
+static int chv_gpio_irq_init_hw(struct gpio_chip *chip)
{
- const struct chv_gpio_pinrange *range;
- struct gpio_chip *chip = &pctrl->chip;
- bool need_valid_mask = !dmi_check_system(chv_no_valid_mask);
- const struct chv_community *community = pctrl->community;
- int ret, i, irq_base;
-
- *chip = chv_gpio_chip;
-
- chip->ngpio = community->pins[community->npins - 1].number + 1;
- chip->label = dev_name(pctrl->dev);
- chip->parent = pctrl->dev;
- chip->base = -1;
- if (need_valid_mask)
- chip->irq.init_valid_mask = chv_init_irq_valid_mask;
-
- ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl);
- if (ret) {
- dev_err(pctrl->dev, "Failed to register gpiochip\n");
- return ret;
- }
-
- for (i = 0; i < community->ngpio_ranges; i++) {
- range = &community->gpio_ranges[i];
- ret = gpiochip_add_pin_range(chip, dev_name(pctrl->dev),
- range->base, range->base,
- range->npins);
- if (ret) {
- dev_err(pctrl->dev, "failed to add GPIO pin range\n");
- return ret;
- }
- }
+ struct chv_pinctrl *pctrl = gpiochip_get_data(chip);
/*
* The same set of machines in chv_no_valid_mask[] have incorrectly
@@ -1596,7 +1571,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
*
* See also https://bugzilla.kernel.org/show_bug.cgi?id=197953.
*/
- if (!need_valid_mask) {
+ if (!pctrl->chip.irq.init_valid_mask) {
/*
* Mask all interrupts the community is able to generate
* but leave the ones that can only generate GPEs unmasked.
@@ -1608,15 +1583,47 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
/* Clear all interrupts */
chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
- if (!need_valid_mask) {
- irq_base = devm_irq_alloc_descs(pctrl->dev, -1, 0,
- community->npins, NUMA_NO_NODE);
- if (irq_base < 0) {
- dev_err(pctrl->dev, "Failed to allocate IRQ numbers\n");
- return irq_base;
+ return 0;
+}
+
+static int chv_gpio_add_pin_ranges(struct gpio_chip *chip)
+{
+ struct chv_pinctrl *pctrl = gpiochip_get_data(chip);
+ const struct chv_community *community = pctrl->community;
+ const struct chv_gpio_pinrange *range;
+ int ret, i;
+
+ for (i = 0; i < community->ngpio_ranges; i++) {
+ range = &community->gpio_ranges[i];
+ ret = gpiochip_add_pin_range(chip, dev_name(pctrl->dev),
+ range->base, range->base,
+ range->npins);
+ if (ret) {
+ dev_err(pctrl->dev, "failed to add GPIO pin range\n");
+ return ret;
}
}
+ return 0;
+}
+
+static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
+{
+ const struct chv_gpio_pinrange *range;
+ struct gpio_chip *chip = &pctrl->chip;
+ bool need_valid_mask = !dmi_check_system(chv_no_valid_mask);
+ const struct chv_community *community = pctrl->community;
+ int ret, i, irq_base;
+
+ *chip = chv_gpio_chip;
+
+ chip->ngpio = community->pins[community->npins - 1].number + 1;
+ chip->label = dev_name(pctrl->dev);
+ chip->add_pin_ranges = chv_gpio_add_pin_ranges;
+ chip->parent = pctrl->dev;
+ chip->base = -1;
+
+ pctrl->irq = irq;
pctrl->irqchip.name = "chv-gpio";
pctrl->irqchip.irq_startup = chv_gpio_irq_startup;
pctrl->irqchip.irq_ack = chv_gpio_irq_ack;
@@ -1625,10 +1632,27 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
pctrl->irqchip.irq_set_type = chv_gpio_irq_type;
pctrl->irqchip.flags = IRQCHIP_SKIP_SET_WAKE;
- ret = gpiochip_irqchip_add(chip, &pctrl->irqchip, 0,
- handle_bad_irq, IRQ_TYPE_NONE);
+ chip->irq.chip = &pctrl->irqchip;
+ chip->irq.init_hw = chv_gpio_irq_init_hw;
+ chip->irq.parent_handler = chv_gpio_irq_handler;
+ chip->irq.num_parents = 1;
+ chip->irq.parents = &pctrl->irq;
+ chip->irq.default_type = IRQ_TYPE_NONE;
+ chip->irq.handler = handle_bad_irq;
+ if (need_valid_mask) {
+ chip->irq.init_valid_mask = chv_init_irq_valid_mask;
+ } else {
+ irq_base = devm_irq_alloc_descs(pctrl->dev, -1, 0,
+ community->npins, NUMA_NO_NODE);
+ if (irq_base < 0) {
+ dev_err(pctrl->dev, "Failed to allocate IRQ numbers\n");
+ return irq_base;
+ }
+ }
+
+ ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl);
if (ret) {
- dev_err(pctrl->dev, "failed to add IRQ chip\n");
+ dev_err(pctrl->dev, "Failed to register gpiochip\n");
return ret;
}
@@ -1642,8 +1666,6 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
}
}
- gpiochip_set_chained_irqchip(chip, &pctrl->irqchip, irq,
- chv_gpio_irq_handler);
return 0;
}
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 4860bc9a4e48..74fdfd2b9ff5 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -8,8 +8,8 @@
*/
#include <linux/acpi.h>
-#include <linux/interrupt.h>
#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -85,39 +85,6 @@ struct intel_community_context {
u32 *hostown;
};
-struct intel_pinctrl_context {
- struct intel_pad_context *pads;
- struct intel_community_context *communities;
-};
-
-/**
- * struct intel_pinctrl - Intel pinctrl private structure
- * @dev: Pointer to the device structure
- * @lock: Lock to serialize register access
- * @pctldesc: Pin controller description
- * @pctldev: Pointer to the pin controller device
- * @chip: GPIO chip in this pin controller
- * @irqchip: IRQ chip in this pin controller
- * @soc: SoC/PCH specific pin configuration data
- * @communities: All communities in this pin controller
- * @ncommunities: Number of communities in this pin controller
- * @context: Configuration saved over system sleep
- * @irq: pinctrl/GPIO chip irq number
- */
-struct intel_pinctrl {
- struct device *dev;
- raw_spinlock_t lock;
- struct pinctrl_desc pctldesc;
- struct pinctrl_dev *pctldev;
- struct gpio_chip chip;
- struct irq_chip irqchip;
- const struct intel_pinctrl_soc_data *soc;
- struct intel_community *communities;
- size_t ncommunities;
- struct intel_pinctrl_context context;
- int irq;
-};
-
#define pin_to_padno(c, p) ((p) - (c)->pin_base)
#define padgroup_offset(g, p) ((p) - (g)->base)
@@ -944,7 +911,10 @@ static int intel_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
if (padcfg0 & PADCFG0_PMODE_MASK)
return -EINVAL;
- return !!(padcfg0 & PADCFG0_GPIOTXDIS);
+ if (padcfg0 & PADCFG0_GPIOTXDIS)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static int intel_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
@@ -1160,8 +1130,8 @@ static irqreturn_t intel_gpio_irq(int irq, void *data)
return ret;
}
-static int intel_gpio_add_pin_ranges(struct intel_pinctrl *pctrl,
- const struct intel_community *community)
+static int intel_gpio_add_community_ranges(struct intel_pinctrl *pctrl,
+ const struct intel_community *community)
{
int ret = 0, i;
@@ -1181,6 +1151,24 @@ static int intel_gpio_add_pin_ranges(struct intel_pinctrl *pctrl,
return ret;
}
+static int intel_gpio_add_pin_ranges(struct gpio_chip *gc)
+{
+ struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
+ int ret, i;
+
+ for (i = 0; i < pctrl->ncommunities; i++) {
+ struct intel_community *community = &pctrl->communities[i];
+
+ ret = intel_gpio_add_community_ranges(pctrl, community);
+ if (ret) {
+ dev_err(pctrl->dev, "failed to add GPIO pin range\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static unsigned int intel_gpio_ngpio(const struct intel_pinctrl *pctrl)
{
const struct intel_community *community;
@@ -1205,7 +1193,8 @@ static unsigned int intel_gpio_ngpio(const struct intel_pinctrl *pctrl)
static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
{
- int ret, i;
+ int ret;
+ struct gpio_irq_chip *girq;
pctrl->chip = intel_gpio_chip;
@@ -1214,6 +1203,7 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
pctrl->chip.label = dev_name(pctrl->dev);
pctrl->chip.parent = pctrl->dev;
pctrl->chip.base = -1;
+ pctrl->chip.add_pin_ranges = intel_gpio_add_pin_ranges;
pctrl->irq = irq;
/* Setup IRQ chip */
@@ -1225,26 +1215,9 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
pctrl->irqchip.irq_set_wake = intel_gpio_irq_wake;
pctrl->irqchip.flags = IRQCHIP_MASK_ON_SUSPEND;
- ret = devm_gpiochip_add_data(pctrl->dev, &pctrl->chip, pctrl);
- if (ret) {
- dev_err(pctrl->dev, "failed to register gpiochip\n");
- return ret;
- }
-
- for (i = 0; i < pctrl->ncommunities; i++) {
- struct intel_community *community = &pctrl->communities[i];
-
- ret = intel_gpio_add_pin_ranges(pctrl, community);
- if (ret) {
- dev_err(pctrl->dev, "failed to add GPIO pin range\n");
- return ret;
- }
- }
-
/*
- * We need to request the interrupt here (instead of providing chip
- * to the irq directly) because on some platforms several GPIO
- * controllers share the same interrupt line.
+ * On some platforms several GPIO controllers share the same interrupt
+ * line.
*/
ret = devm_request_irq(pctrl->dev, irq, intel_gpio_irq,
IRQF_SHARED | IRQF_NO_THREAD,
@@ -1254,14 +1227,20 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
return ret;
}
- ret = gpiochip_irqchip_add(&pctrl->chip, &pctrl->irqchip, 0,
- handle_bad_irq, IRQ_TYPE_NONE);
+ girq = &pctrl->chip.irq;
+ girq->chip = &pctrl->irqchip;
+ /* This will let us handle the IRQ in the driver */
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+
+ ret = devm_gpiochip_add_data(pctrl->dev, &pctrl->chip, pctrl);
if (ret) {
- dev_err(pctrl->dev, "failed to add irqchip\n");
+ dev_err(pctrl->dev, "failed to register gpiochip\n");
return ret;
}
- gpiochip_set_chained_irqchip(&pctrl->chip, &pctrl->irqchip, irq, NULL);
return 0;
}
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index 34b38a321760..c6f066f6d3fb 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -10,7 +10,10 @@
#ifndef PINCTRL_INTEL_H
#define PINCTRL_INTEL_H
+#include <linux/gpio/driver.h>
+#include <linux/irq.h>
#include <linux/pm.h>
+#include <linux/spinlock_types.h>
struct pinctrl_pin_desc;
struct platform_device;
@@ -174,6 +177,47 @@ struct intel_pinctrl_soc_data {
size_t ncommunities;
};
+struct intel_pad_context;
+struct intel_community_context;
+
+/**
+ * struct intel_pinctrl_context - context to be saved during suspend-resume
+ * @pads: Opaque context per pad (driver dependent)
+ * @communities: Opaque context per community (driver dependent)
+ */
+struct intel_pinctrl_context {
+ struct intel_pad_context *pads;
+ struct intel_community_context *communities;
+};
+
+/**
+ * struct intel_pinctrl - Intel pinctrl private structure
+ * @dev: Pointer to the device structure
+ * @lock: Lock to serialize register access
+ * @pctldesc: Pin controller description
+ * @pctldev: Pointer to the pin controller device
+ * @chip: GPIO chip in this pin controller
+ * @irqchip: IRQ chip in this pin controller
+ * @soc: SoC/PCH specific pin configuration data
+ * @communities: All communities in this pin controller
+ * @ncommunities: Number of communities in this pin controller
+ * @context: Configuration saved over system sleep
+ * @irq: pinctrl/GPIO chip irq number
+ */
+struct intel_pinctrl {
+ struct device *dev;
+ raw_spinlock_t lock;
+ struct pinctrl_desc pctldesc;
+ struct pinctrl_dev *pctldev;
+ struct gpio_chip chip;
+ struct irq_chip irqchip;
+ const struct intel_pinctrl_soc_data *soc;
+ struct intel_community *communities;
+ size_t ncommunities;
+ struct intel_pinctrl_context context;
+ int irq;
+};
+
int intel_pinctrl_probe_by_hid(struct platform_device *pdev);
int intel_pinctrl_probe_by_uid(struct platform_device *pdev);
diff --git a/drivers/pinctrl/intel/pinctrl-lynxpoint.c b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
new file mode 100644
index 000000000000..e928742c7181
--- /dev/null
+++ b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
@@ -0,0 +1,975 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Lynxpoint PCH pinctrl/GPIO driver
+ *
+ * Copyright (c) 2012, 2019, Intel Corporation
+ * Authors: Mathias Nyman <mathias.nyman@linux.intel.com>
+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+
+#include "pinctrl-intel.h"
+
+#define COMMUNITY(p, n) \
+ { \
+ .pin_base = (p), \
+ .npins = (n), \
+ }
+
+static const struct pinctrl_pin_desc lptlp_pins[] = {
+ PINCTRL_PIN(0, "GP0_UART1_RXD"),
+ PINCTRL_PIN(1, "GP1_UART1_TXD"),
+ PINCTRL_PIN(2, "GP2_UART1_RTSB"),
+ PINCTRL_PIN(3, "GP3_UART1_CTSB"),
+ PINCTRL_PIN(4, "GP4_I2C0_SDA"),
+ PINCTRL_PIN(5, "GP5_I2C0_SCL"),
+ PINCTRL_PIN(6, "GP6_I2C1_SDA"),
+ PINCTRL_PIN(7, "GP7_I2C1_SCL"),
+ PINCTRL_PIN(8, "GP8"),
+ PINCTRL_PIN(9, "GP9"),
+ PINCTRL_PIN(10, "GP10"),
+ PINCTRL_PIN(11, "GP11_SMBALERTB"),
+ PINCTRL_PIN(12, "GP12_LANPHYPC"),
+ PINCTRL_PIN(13, "GP13"),
+ PINCTRL_PIN(14, "GP14"),
+ PINCTRL_PIN(15, "GP15"),
+ PINCTRL_PIN(16, "GP16_MGPIO9"),
+ PINCTRL_PIN(17, "GP17_MGPIO10"),
+ PINCTRL_PIN(18, "GP18_SRC0CLKRQB"),
+ PINCTRL_PIN(19, "GP19_SRC1CLKRQB"),
+ PINCTRL_PIN(20, "GP20_SRC2CLKRQB"),
+ PINCTRL_PIN(21, "GP21_SRC3CLKRQB"),
+ PINCTRL_PIN(22, "GP22_SRC4CLKRQB_TRST2"),
+ PINCTRL_PIN(23, "GP23_SRC5CLKRQB_TDI2"),
+ PINCTRL_PIN(24, "GP24_MGPIO0"),
+ PINCTRL_PIN(25, "GP25_USBWAKEOUTB"),
+ PINCTRL_PIN(26, "GP26_MGPIO5"),
+ PINCTRL_PIN(27, "GP27_MGPIO6"),
+ PINCTRL_PIN(28, "GP28_MGPIO7"),
+ PINCTRL_PIN(29, "GP29_SLP_WLANB_MGPIO3"),
+ PINCTRL_PIN(30, "GP30_SUSWARNB_SUSPWRDNACK_MGPIO1"),
+ PINCTRL_PIN(31, "GP31_ACPRESENT_MGPIO2"),
+ PINCTRL_PIN(32, "GP32_CLKRUNB"),
+ PINCTRL_PIN(33, "GP33_DEVSLP0"),
+ PINCTRL_PIN(34, "GP34_SATA0XPCIE6L3B_SATA0GP"),
+ PINCTRL_PIN(35, "GP35_SATA1XPCIE6L2B_SATA1GP"),
+ PINCTRL_PIN(36, "GP36_SATA2XPCIE6L1B_SATA2GP"),
+ PINCTRL_PIN(37, "GP37_SATA3XPCIE6L0B_SATA3GP"),
+ PINCTRL_PIN(38, "GP38_DEVSLP1"),
+ PINCTRL_PIN(39, "GP39_DEVSLP2"),
+ PINCTRL_PIN(40, "GP40_OC0B"),
+ PINCTRL_PIN(41, "GP41_OC1B"),
+ PINCTRL_PIN(42, "GP42_OC2B"),
+ PINCTRL_PIN(43, "GP43_OC3B"),
+ PINCTRL_PIN(44, "GP44"),
+ PINCTRL_PIN(45, "GP45_TMS2"),
+ PINCTRL_PIN(46, "GP46_TDO2"),
+ PINCTRL_PIN(47, "GP47"),
+ PINCTRL_PIN(48, "GP48"),
+ PINCTRL_PIN(49, "GP49"),
+ PINCTRL_PIN(50, "GP50"),
+ PINCTRL_PIN(51, "GP51_GSXDOUT"),
+ PINCTRL_PIN(52, "GP52_GSXSLOAD"),
+ PINCTRL_PIN(53, "GP53_GSXDIN"),
+ PINCTRL_PIN(54, "GP54_GSXSRESETB"),
+ PINCTRL_PIN(55, "GP55_GSXCLK"),
+ PINCTRL_PIN(56, "GP56"),
+ PINCTRL_PIN(57, "GP57"),
+ PINCTRL_PIN(58, "GP58"),
+ PINCTRL_PIN(59, "GP59"),
+ PINCTRL_PIN(60, "GP60_SML0ALERTB_MGPIO4"),
+ PINCTRL_PIN(61, "GP61_SUS_STATB"),
+ PINCTRL_PIN(62, "GP62_SUSCLK"),
+ PINCTRL_PIN(63, "GP63_SLP_S5B"),
+ PINCTRL_PIN(64, "GP64_SDIO_CLK"),
+ PINCTRL_PIN(65, "GP65_SDIO_CMD"),
+ PINCTRL_PIN(66, "GP66_SDIO_D0"),
+ PINCTRL_PIN(67, "GP67_SDIO_D1"),
+ PINCTRL_PIN(68, "GP68_SDIO_D2"),
+ PINCTRL_PIN(69, "GP69_SDIO_D3"),
+ PINCTRL_PIN(70, "GP70_SDIO_POWER_EN"),
+ PINCTRL_PIN(71, "GP71_MPHYPC"),
+ PINCTRL_PIN(72, "GP72_BATLOWB"),
+ PINCTRL_PIN(73, "GP73_SML1ALERTB_PCHHOTB_MGPIO8"),
+ PINCTRL_PIN(74, "GP74_SML1DATA_MGPIO12"),
+ PINCTRL_PIN(75, "GP75_SML1CLK_MGPIO11"),
+ PINCTRL_PIN(76, "GP76_BMBUSYB"),
+ PINCTRL_PIN(77, "GP77_PIRQAB"),
+ PINCTRL_PIN(78, "GP78_PIRQBB"),
+ PINCTRL_PIN(79, "GP79_PIRQCB"),
+ PINCTRL_PIN(80, "GP80_PIRQDB"),
+ PINCTRL_PIN(81, "GP81_SPKR"),
+ PINCTRL_PIN(82, "GP82_RCINB"),
+ PINCTRL_PIN(83, "GP83_GSPI0_CSB"),
+ PINCTRL_PIN(84, "GP84_GSPI0_CLK"),
+ PINCTRL_PIN(85, "GP85_GSPI0_MISO"),
+ PINCTRL_PIN(86, "GP86_GSPI0_MOSI"),
+ PINCTRL_PIN(87, "GP87_GSPI1_CSB"),
+ PINCTRL_PIN(88, "GP88_GSPI1_CLK"),
+ PINCTRL_PIN(89, "GP89_GSPI1_MISO"),
+ PINCTRL_PIN(90, "GP90_GSPI1_MOSI"),
+ PINCTRL_PIN(91, "GP91_UART0_RXD"),
+ PINCTRL_PIN(92, "GP92_UART0_TXD"),
+ PINCTRL_PIN(93, "GP93_UART0_RTSB"),
+ PINCTRL_PIN(94, "GP94_UART0_CTSB"),
+};
+
+static const struct intel_community lptlp_communities[] = {
+ COMMUNITY(0, 95),
+};
+
+static const struct intel_pinctrl_soc_data lptlp_soc_data = {
+ .pins = lptlp_pins,
+ .npins = ARRAY_SIZE(lptlp_pins),
+ .communities = lptlp_communities,
+ .ncommunities = ARRAY_SIZE(lptlp_communities),
+};
+
+/* LynxPoint chipset has support for 95 GPIO pins */
+
+#define LP_NUM_GPIO 95
+
+/* Bitmapped register offsets */
+#define LP_ACPI_OWNED 0x00 /* Bitmap, set by bios, 0: pin reserved for ACPI */
+#define LP_IRQ2IOXAPIC 0x10 /* Bitmap, set by bios, 1: pin routed to IOxAPIC */
+#define LP_GC 0x7C /* set APIC IRQ to IRQ14 or IRQ15 for all pins */
+#define LP_INT_STAT 0x80
+#define LP_INT_ENABLE 0x90
+
+/* Each pin has two 32 bit config registers, starting at 0x100 */
+#define LP_CONFIG1 0x100
+#define LP_CONFIG2 0x104
+
+/* LP_CONFIG1 reg bits */
+#define OUT_LVL_BIT BIT(31)
+#define IN_LVL_BIT BIT(30)
+#define TRIG_SEL_BIT BIT(4) /* 0: Edge, 1: Level */
+#define INT_INV_BIT BIT(3) /* Invert interrupt triggering */
+#define DIR_BIT BIT(2) /* 0: Output, 1: Input */
+#define USE_SEL_MASK GENMASK(1, 0) /* 0: Native, 1: GPIO, ... */
+#define USE_SEL_NATIVE (0 << 0)
+#define USE_SEL_GPIO (1 << 0)
+
+/* LP_CONFIG2 reg bits */
+#define GPINDIS_BIT BIT(2) /* disable input sensing */
+#define GPIWP_MASK GENMASK(1, 0) /* weak pull options */
+#define GPIWP_NONE 0 /* none */
+#define GPIWP_DOWN 1 /* weak pull down */
+#define GPIWP_UP 2 /* weak pull up */
+
+/*
+ * Lynxpoint gpios are controlled through both bitmapped registers and
+ * per gpio specific registers. The bitmapped registers are in chunks of
+ * 3 x 32bit registers to cover all 95 GPIOs
+ *
+ * per gpio specific registers consist of two 32bit registers per gpio
+ * (LP_CONFIG1 and LP_CONFIG2), with 95 GPIOs there's a total of
+ * 190 config registers.
+ *
+ * A simplified view of the register layout look like this:
+ *
+ * LP_ACPI_OWNED[31:0] gpio ownerships for gpios 0-31 (bitmapped registers)
+ * LP_ACPI_OWNED[63:32] gpio ownerships for gpios 32-63
+ * LP_ACPI_OWNED[94:64] gpio ownerships for gpios 63-94
+ * ...
+ * LP_INT_ENABLE[31:0] ...
+ * LP_INT_ENABLE[63:32] ...
+ * LP_INT_ENABLE[94:64] ...
+ * LP0_CONFIG1 (gpio 0) config1 reg for gpio 0 (per gpio registers)
+ * LP0_CONFIG2 (gpio 0) config2 reg for gpio 0
+ * LP1_CONFIG1 (gpio 1) config1 reg for gpio 1
+ * LP1_CONFIG2 (gpio 1) config2 reg for gpio 1
+ * LP2_CONFIG1 (gpio 2) ...
+ * LP2_CONFIG2 (gpio 2) ...
+ * ...
+ * LP94_CONFIG1 (gpio 94) ...
+ * LP94_CONFIG2 (gpio 94) ...
+ *
+ * IOxAPIC redirection map applies only for gpio 8-10, 13-14, 45-55.
+ */
+
+static struct intel_community *lp_get_community(struct intel_pinctrl *lg,
+ unsigned int pin)
+{
+ struct intel_community *comm;
+ int i;
+
+ for (i = 0; i < lg->ncommunities; i++) {
+ comm = &lg->communities[i];
+ if (pin < comm->pin_base + comm->npins && pin >= comm->pin_base)
+ return comm;
+ }
+
+ return NULL;
+}
+
+static void __iomem *lp_gpio_reg(struct gpio_chip *chip, unsigned int offset,
+ int reg)
+{
+ struct intel_pinctrl *lg = gpiochip_get_data(chip);
+ struct intel_community *comm;
+ int reg_offset;
+
+ comm = lp_get_community(lg, offset);
+ if (!comm)
+ return NULL;
+
+ offset -= comm->pin_base;
+
+ if (reg == LP_CONFIG1 || reg == LP_CONFIG2)
+ /* per gpio specific config registers */
+ reg_offset = offset * 8;
+ else
+ /* bitmapped registers */
+ reg_offset = (offset / 32) * 4;
+
+ return comm->regs + reg_offset + reg;
+}
+
+static bool lp_gpio_acpi_use(struct intel_pinctrl *lg, unsigned int pin)
+{
+ void __iomem *acpi_use;
+
+ acpi_use = lp_gpio_reg(&lg->chip, pin, LP_ACPI_OWNED);
+ if (!acpi_use)
+ return true;
+
+ return !(ioread32(acpi_use) & BIT(pin % 32));
+}
+
+static bool lp_gpio_ioxapic_use(struct gpio_chip *chip, unsigned int offset)
+{
+ void __iomem *ioxapic_use = lp_gpio_reg(chip, offset, LP_IRQ2IOXAPIC);
+ u32 value;
+
+ value = ioread32(ioxapic_use);
+
+ if (offset >= 8 && offset <= 10)
+ return !!(value & BIT(offset - 8 + 0));
+ if (offset >= 13 && offset <= 14)
+ return !!(value & BIT(offset - 13 + 3));
+ if (offset >= 45 && offset <= 55)
+ return !!(value & BIT(offset - 45 + 5));
+
+ return false;
+}
+
+static int lp_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+
+ return lg->soc->ngroups;
+}
+
+static const char *lp_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+
+ return lg->soc->groups[selector].name;
+}
+
+static int lp_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = lg->soc->groups[selector].pins;
+ *num_pins = lg->soc->groups[selector].npins;
+
+ return 0;
+}
+
+static void lp_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
+ unsigned int pin)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+ void __iomem *reg = lp_gpio_reg(&lg->chip, pin, LP_CONFIG1);
+ void __iomem *conf2 = lp_gpio_reg(&lg->chip, pin, LP_CONFIG2);
+ u32 value, mode;
+
+ value = ioread32(reg);
+
+ mode = value & USE_SEL_MASK;
+ if (mode == USE_SEL_GPIO)
+ seq_puts(s, "GPIO ");
+ else
+ seq_printf(s, "mode %d ", mode);
+
+ seq_printf(s, "0x%08x 0x%08x", value, ioread32(conf2));
+
+ if (lp_gpio_acpi_use(lg, pin))
+ seq_puts(s, " [ACPI]");
+}
+
+static const struct pinctrl_ops lptlp_pinctrl_ops = {
+ .get_groups_count = lp_get_groups_count,
+ .get_group_name = lp_get_group_name,
+ .get_group_pins = lp_get_group_pins,
+ .pin_dbg_show = lp_pin_dbg_show,
+};
+
+static int lp_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+
+ return lg->soc->nfunctions;
+}
+
+static const char *lp_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+
+ return lg->soc->functions[selector].name;
+}
+
+static int lp_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const char * const **groups,
+ unsigned int *num_groups)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = lg->soc->functions[selector].groups;
+ *num_groups = lg->soc->functions[selector].ngroups;
+
+ return 0;
+}
+
+static int lp_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int function, unsigned int group)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+ const struct intel_pingroup *grp = &lg->soc->groups[group];
+ unsigned long flags;
+ int i;
+
+ raw_spin_lock_irqsave(&lg->lock, flags);
+
+ /* Now enable the mux setting for each pin in the group */
+ for (i = 0; i < grp->npins; i++) {
+ void __iomem *reg = lp_gpio_reg(&lg->chip, grp->pins[i], LP_CONFIG1);
+ u32 value;
+
+ value = ioread32(reg);
+
+ value &= ~USE_SEL_MASK;
+ if (grp->modes)
+ value |= grp->modes[i];
+ else
+ value |= grp->mode;
+
+ iowrite32(value, reg);
+ }
+
+ raw_spin_unlock_irqrestore(&lg->lock, flags);
+
+ return 0;
+}
+
+static int lp_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int pin)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+ void __iomem *reg = lp_gpio_reg(&lg->chip, pin, LP_CONFIG1);
+ void __iomem *conf2 = lp_gpio_reg(&lg->chip, pin, LP_CONFIG2);
+ unsigned long flags;
+ u32 value;
+
+ pm_runtime_get(lg->dev);
+
+ raw_spin_lock_irqsave(&lg->lock, flags);
+
+ /*
+ * Reconfigure pin to GPIO mode if needed and issue a warning,
+ * since we expect firmware to configure it properly.
+ */
+ value = ioread32(reg);
+ if ((value & USE_SEL_MASK) != USE_SEL_GPIO) {
+ iowrite32((value & USE_SEL_MASK) | USE_SEL_GPIO, reg);
+ dev_warn(lg->dev, FW_BUG "pin %u forcibly reconfigured as GPIO\n", pin);
+ }
+
+ /* Enable input sensing */
+ iowrite32(ioread32(conf2) & ~GPINDIS_BIT, conf2);
+
+ raw_spin_unlock_irqrestore(&lg->lock, flags);
+
+ return 0;
+}
+
+static void lp_gpio_disable_free(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int pin)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+ void __iomem *conf2 = lp_gpio_reg(&lg->chip, pin, LP_CONFIG2);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&lg->lock, flags);
+
+ /* Disable input sensing */
+ iowrite32(ioread32(conf2) | GPINDIS_BIT, conf2);
+
+ raw_spin_unlock_irqrestore(&lg->lock, flags);
+
+ pm_runtime_put(lg->dev);
+}
+
+static int lp_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int pin, bool input)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+ void __iomem *reg = lp_gpio_reg(&lg->chip, pin, LP_CONFIG1);
+ unsigned long flags;
+ u32 value;
+
+ raw_spin_lock_irqsave(&lg->lock, flags);
+
+ value = ioread32(reg);
+ value &= ~DIR_BIT;
+ if (input) {
+ value |= DIR_BIT;
+ } else {
+ /*
+ * Before making any direction modifications, do a check if GPIO
+ * is set for direct IRQ. On Lynxpoint, setting GPIO to output
+ * does not make sense, so let's at least warn the caller before
+ * they shoot themselves in the foot.
+ */
+ WARN(lp_gpio_ioxapic_use(&lg->chip, pin),
+ "Potential Error: Setting GPIO to output with IOxAPIC redirection");
+ }
+ iowrite32(value, reg);
+
+ raw_spin_unlock_irqrestore(&lg->lock, flags);
+
+ return 0;
+}
+
+static const struct pinmux_ops lptlp_pinmux_ops = {
+ .get_functions_count = lp_get_functions_count,
+ .get_function_name = lp_get_function_name,
+ .get_function_groups = lp_get_function_groups,
+ .set_mux = lp_pinmux_set_mux,
+ .gpio_request_enable = lp_gpio_request_enable,
+ .gpio_disable_free = lp_gpio_disable_free,
+ .gpio_set_direction = lp_gpio_set_direction,
+};
+
+static int lp_pin_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *config)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+ void __iomem *conf2 = lp_gpio_reg(&lg->chip, pin, LP_CONFIG2);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ unsigned long flags;
+ u32 value, pull;
+ u16 arg = 0;
+
+ raw_spin_lock_irqsave(&lg->lock, flags);
+ value = ioread32(conf2);
+ raw_spin_unlock_irqrestore(&lg->lock, flags);
+
+ pull = value & GPIWP_MASK;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ if (pull)
+ return -EINVAL;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (pull != GPIWP_DOWN)
+ return -EINVAL;
+
+ arg = 1;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if (pull != GPIWP_UP)
+ return -EINVAL;
+
+ arg = 1;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+
+ return 0;
+}
+
+static int lp_pin_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
+ void __iomem *conf2 = lp_gpio_reg(&lg->chip, pin, LP_CONFIG2);
+ enum pin_config_param param;
+ unsigned long flags;
+ int i, ret = 0;
+ u32 value;
+
+ raw_spin_lock_irqsave(&lg->lock, flags);
+
+ value = ioread32(conf2);
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ value &= ~GPIWP_MASK;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ value &= ~GPIWP_MASK;
+ value |= GPIWP_DOWN;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ value &= ~GPIWP_MASK;
+ value |= GPIWP_UP;
+ break;
+ default:
+ ret = -ENOTSUPP;
+ }
+
+ if (ret)
+ break;
+ }
+
+ if (!ret)
+ iowrite32(value, conf2);
+
+ raw_spin_unlock_irqrestore(&lg->lock, flags);
+
+ return ret;
+}
+
+static const struct pinconf_ops lptlp_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_get = lp_pin_config_get,
+ .pin_config_set = lp_pin_config_set,
+};
+
+static const struct pinctrl_desc lptlp_pinctrl_desc = {
+ .pctlops = &lptlp_pinctrl_ops,
+ .pmxops = &lptlp_pinmux_ops,
+ .confops = &lptlp_pinconf_ops,
+ .owner = THIS_MODULE,
+};
+
+static int lp_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ void __iomem *reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
+ return !!(ioread32(reg) & IN_LVL_BIT);
+}
+
+static void lp_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+{
+ struct intel_pinctrl *lg = gpiochip_get_data(chip);
+ void __iomem *reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&lg->lock, flags);
+
+ if (value)
+ iowrite32(ioread32(reg) | OUT_LVL_BIT, reg);
+ else
+ iowrite32(ioread32(reg) & ~OUT_LVL_BIT, reg);
+
+ raw_spin_unlock_irqrestore(&lg->lock, flags);
+}
+
+static int lp_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
+{
+ return pinctrl_gpio_direction_input(chip->base + offset);
+}
+
+static int lp_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ lp_gpio_set(chip, offset, value);
+
+ return pinctrl_gpio_direction_output(chip->base + offset);
+}
+
+static int lp_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ void __iomem *reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
+
+ if (ioread32(reg) & DIR_BIT)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
+}
+
+static void lp_gpio_irq_handler(struct irq_desc *desc)
+{
+ struct irq_data *data = irq_desc_get_irq_data(desc);
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct intel_pinctrl *lg = gpiochip_get_data(gc);
+ struct irq_chip *chip = irq_data_get_irq_chip(data);
+ void __iomem *reg, *ena;
+ unsigned long pending;
+ u32 base, pin;
+
+ /* check from GPIO controller which pin triggered the interrupt */
+ for (base = 0; base < lg->chip.ngpio; base += 32) {
+ reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
+ ena = lp_gpio_reg(&lg->chip, base, LP_INT_ENABLE);
+
+ /* Only interrupts that are enabled */
+ pending = ioread32(reg) & ioread32(ena);
+
+ for_each_set_bit(pin, &pending, 32) {
+ unsigned int irq;
+
+ irq = irq_find_mapping(lg->chip.irq.domain, base + pin);
+ generic_handle_irq(irq);
+ }
+ }
+ chip->irq_eoi(data);
+}
+
+static void lp_irq_ack(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct intel_pinctrl *lg = gpiochip_get_data(gc);
+ u32 hwirq = irqd_to_hwirq(d);
+ void __iomem *reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_STAT);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&lg->lock, flags);
+ iowrite32(BIT(hwirq % 32), reg);
+ raw_spin_unlock_irqrestore(&lg->lock, flags);
+}
+
+static void lp_irq_unmask(struct irq_data *d)
+{
+}
+
+static void lp_irq_mask(struct irq_data *d)
+{
+}
+
+static void lp_irq_enable(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct intel_pinctrl *lg = gpiochip_get_data(gc);
+ u32 hwirq = irqd_to_hwirq(d);
+ void __iomem *reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_ENABLE);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&lg->lock, flags);
+ iowrite32(ioread32(reg) | BIT(hwirq % 32), reg);
+ raw_spin_unlock_irqrestore(&lg->lock, flags);
+}
+
+static void lp_irq_disable(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct intel_pinctrl *lg = gpiochip_get_data(gc);
+ u32 hwirq = irqd_to_hwirq(d);
+ void __iomem *reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_ENABLE);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&lg->lock, flags);
+ iowrite32(ioread32(reg) & ~BIT(hwirq % 32), reg);
+ raw_spin_unlock_irqrestore(&lg->lock, flags);
+}
+
+static int lp_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct intel_pinctrl *lg = gpiochip_get_data(gc);
+ u32 hwirq = irqd_to_hwirq(d);
+ void __iomem *reg = lp_gpio_reg(&lg->chip, hwirq, LP_CONFIG1);
+ unsigned long flags;
+ u32 value;
+
+ if (hwirq >= lg->chip.ngpio)
+ return -EINVAL;
+
+ /* Fail if BIOS reserved pin for ACPI use */
+ if (lp_gpio_acpi_use(lg, hwirq)) {
+ dev_err(lg->dev, "pin %u can't be used as IRQ\n", hwirq);
+ return -EBUSY;
+ }
+
+ raw_spin_lock_irqsave(&lg->lock, flags);
+ value = ioread32(reg);
+
+ /* set both TRIG_SEL and INV bits to 0 for rising edge */
+ if (type & IRQ_TYPE_EDGE_RISING)
+ value &= ~(TRIG_SEL_BIT | INT_INV_BIT);
+
+ /* TRIG_SEL bit 0, INV bit 1 for falling edge */
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ value = (value | INT_INV_BIT) & ~TRIG_SEL_BIT;
+
+ /* TRIG_SEL bit 1, INV bit 0 for level low */
+ if (type & IRQ_TYPE_LEVEL_LOW)
+ value = (value | TRIG_SEL_BIT) & ~INT_INV_BIT;
+
+ /* TRIG_SEL bit 1, INV bit 1 for level high */
+ if (type & IRQ_TYPE_LEVEL_HIGH)
+ value |= TRIG_SEL_BIT | INT_INV_BIT;
+
+ iowrite32(value, reg);
+
+ if (type & IRQ_TYPE_EDGE_BOTH)
+ irq_set_handler_locked(d, handle_edge_irq);
+ else if (type & IRQ_TYPE_LEVEL_MASK)
+ irq_set_handler_locked(d, handle_level_irq);
+
+ raw_spin_unlock_irqrestore(&lg->lock, flags);
+
+ return 0;
+}
+
+static struct irq_chip lp_irqchip = {
+ .name = "LP-GPIO",
+ .irq_ack = lp_irq_ack,
+ .irq_mask = lp_irq_mask,
+ .irq_unmask = lp_irq_unmask,
+ .irq_enable = lp_irq_enable,
+ .irq_disable = lp_irq_disable,
+ .irq_set_type = lp_irq_set_type,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
+};
+
+static int lp_gpio_irq_init_hw(struct gpio_chip *chip)
+{
+ struct intel_pinctrl *lg = gpiochip_get_data(chip);
+ void __iomem *reg;
+ unsigned int base;
+
+ for (base = 0; base < lg->chip.ngpio; base += 32) {
+ /* disable gpio pin interrupts */
+ reg = lp_gpio_reg(&lg->chip, base, LP_INT_ENABLE);
+ iowrite32(0, reg);
+ /* Clear interrupt status register */
+ reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
+ iowrite32(0xffffffff, reg);
+ }
+
+ return 0;
+}
+
+static int lp_gpio_add_pin_ranges(struct gpio_chip *chip)
+{
+ struct intel_pinctrl *lg = gpiochip_get_data(chip);
+ struct device *dev = lg->dev;
+ int ret;
+
+ ret = gpiochip_add_pin_range(chip, dev_name(dev), 0, 0, lg->soc->npins);
+ if (ret)
+ dev_err(dev, "failed to add GPIO pin range\n");
+
+ return ret;
+}
+
+static int lp_gpio_probe(struct platform_device *pdev)
+{
+ const struct intel_pinctrl_soc_data *soc;
+ struct intel_pinctrl *lg;
+ struct gpio_chip *gc;
+ struct resource *io_rc, *irq_rc;
+ struct device *dev = &pdev->dev;
+ void __iomem *regs;
+ unsigned int i;
+ int ret;
+
+ soc = (const struct intel_pinctrl_soc_data *)device_get_match_data(dev);
+ if (!soc)
+ return -ENODEV;
+
+ lg = devm_kzalloc(dev, sizeof(*lg), GFP_KERNEL);
+ if (!lg)
+ return -ENOMEM;
+
+ lg->dev = dev;
+ lg->soc = soc;
+
+ lg->ncommunities = lg->soc->ncommunities;
+ lg->communities = devm_kcalloc(dev, lg->ncommunities,
+ sizeof(*lg->communities), GFP_KERNEL);
+ if (!lg->communities)
+ return -ENOMEM;
+
+ lg->pctldesc = lptlp_pinctrl_desc;
+ lg->pctldesc.name = dev_name(dev);
+ lg->pctldesc.pins = lg->soc->pins;
+ lg->pctldesc.npins = lg->soc->npins;
+
+ lg->pctldev = devm_pinctrl_register(dev, &lg->pctldesc, lg);
+ if (IS_ERR(lg->pctldev)) {
+ dev_err(dev, "failed to register pinctrl driver\n");
+ return PTR_ERR(lg->pctldev);
+ }
+
+ platform_set_drvdata(pdev, lg);
+
+ io_rc = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!io_rc) {
+ dev_err(dev, "missing IO resources\n");
+ return -EINVAL;
+ }
+
+ regs = devm_ioport_map(dev, io_rc->start, resource_size(io_rc));
+ if (!regs) {
+ dev_err(dev, "failed mapping IO region %pR\n", &io_rc);
+ return -EBUSY;
+ }
+
+ for (i = 0; i < lg->soc->ncommunities; i++) {
+ struct intel_community *comm = &lg->communities[i];
+
+ *comm = lg->soc->communities[i];
+
+ comm->regs = regs;
+ comm->pad_regs = regs + 0x100;
+ }
+
+ raw_spin_lock_init(&lg->lock);
+
+ gc = &lg->chip;
+ gc->label = dev_name(dev);
+ gc->owner = THIS_MODULE;
+ gc->request = gpiochip_generic_request;
+ gc->free = gpiochip_generic_free;
+ gc->direction_input = lp_gpio_direction_input;
+ gc->direction_output = lp_gpio_direction_output;
+ gc->get = lp_gpio_get;
+ gc->set = lp_gpio_set;
+ gc->get_direction = lp_gpio_get_direction;
+ gc->base = -1;
+ gc->ngpio = LP_NUM_GPIO;
+ gc->can_sleep = false;
+ gc->add_pin_ranges = lp_gpio_add_pin_ranges;
+ gc->parent = dev;
+
+ /* set up interrupts */
+ irq_rc = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (irq_rc && irq_rc->start) {
+ struct gpio_irq_chip *girq;
+
+ girq = &gc->irq;
+ girq->chip = &lp_irqchip;
+ girq->init_hw = lp_gpio_irq_init_hw;
+ girq->parent_handler = lp_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(dev, girq->num_parents,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = (unsigned int)irq_rc->start;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+ }
+
+ ret = devm_gpiochip_add_data(dev, gc, lg);
+ if (ret) {
+ dev_err(dev, "failed adding lp-gpio chip\n");
+ return ret;
+ }
+
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+
+static int lp_gpio_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static int lp_gpio_runtime_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int lp_gpio_runtime_resume(struct device *dev)
+{
+ return 0;
+}
+
+static int lp_gpio_resume(struct device *dev)
+{
+ struct intel_pinctrl *lg = dev_get_drvdata(dev);
+ void __iomem *reg;
+ int i;
+
+ /* on some hardware suspend clears input sensing, re-enable it here */
+ for (i = 0; i < lg->chip.ngpio; i++) {
+ if (gpiochip_is_requested(&lg->chip, i) != NULL) {
+ reg = lp_gpio_reg(&lg->chip, i, LP_CONFIG2);
+ iowrite32(ioread32(reg) & ~GPINDIS_BIT, reg);
+ }
+ }
+ return 0;
+}
+
+static const struct dev_pm_ops lp_gpio_pm_ops = {
+ .runtime_suspend = lp_gpio_runtime_suspend,
+ .runtime_resume = lp_gpio_runtime_resume,
+ .resume = lp_gpio_resume,
+};
+
+static const struct acpi_device_id lynxpoint_gpio_acpi_match[] = {
+ { "INT33C7", (kernel_ulong_t)&lptlp_soc_data },
+ { "INT3437", (kernel_ulong_t)&lptlp_soc_data },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, lynxpoint_gpio_acpi_match);
+
+static struct platform_driver lp_gpio_driver = {
+ .probe = lp_gpio_probe,
+ .remove = lp_gpio_remove,
+ .driver = {
+ .name = "lp_gpio",
+ .pm = &lp_gpio_pm_ops,
+ .acpi_match_table = ACPI_PTR(lynxpoint_gpio_acpi_match),
+ },
+};
+
+static int __init lp_gpio_init(void)
+{
+ return platform_driver_register(&lp_gpio_driver);
+}
+
+static void __exit lp_gpio_exit(void)
+{
+ platform_driver_unregister(&lp_gpio_driver);
+}
+
+subsys_initcall(lp_gpio_init);
+module_exit(lp_gpio_exit);
+
+MODULE_AUTHOR("Mathias Nyman (Intel)");
+MODULE_AUTHOR("Andy Shevchenko (Intel)");
+MODULE_DESCRIPTION("Intel Lynxpoint pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:lp_gpio");
diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
index 44d7f50bbc82..330c8f077b73 100644
--- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
+++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
@@ -49,6 +49,7 @@
.padown_offset = SPT_PAD_OWN, \
.padcfglock_offset = SPT_PADCFGLOCK, \
.hostown_offset = SPT_HOSTSW_OWN, \
+ .is_offset = SPT_GPI_IS, \
.ie_offset = SPT_GPI_IE, \
.pin_base = (s), \
.npins = ((e) - (s) + 1), \
@@ -588,6 +589,7 @@ static const struct intel_pinctrl_soc_data spth_soc_data = {
static const struct acpi_device_id spt_pinctrl_acpi_match[] = {
{ "INT344B", (kernel_ulong_t)&sptlp_soc_data },
+ { "INT3451", (kernel_ulong_t)&spth_soc_data },
{ "INT345D", (kernel_ulong_t)&spth_soc_data },
{ }
};
diff --git a/drivers/pinctrl/intel/pinctrl-tigerlake.c b/drivers/pinctrl/intel/pinctrl-tigerlake.c
index 58572b15b3ce..08a86f6fdea6 100644
--- a/drivers/pinctrl/intel/pinctrl-tigerlake.c
+++ b/drivers/pinctrl/intel/pinctrl-tigerlake.c
@@ -2,7 +2,7 @@
/*
* Intel Tiger Lake PCH pinctrl/GPIO driver
*
- * Copyright (C) 2019, Intel Corporation
+ * Copyright (C) 2019 - 2020, Intel Corporation
* Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
*/
@@ -21,15 +21,19 @@
#define TGL_GPI_IS 0x100
#define TGL_GPI_IE 0x120
-#define TGL_GPP(r, s, e) \
+#define TGL_NO_GPIO -1
+
+#define TGL_GPP(r, s, e, g) \
{ \
.reg_num = (r), \
.base = (s), \
.size = ((e) - (s) + 1), \
+ .gpio_base = (g), \
}
-#define TGL_COMMUNITY(s, e, g) \
+#define TGL_COMMUNITY(b, s, e, g) \
{ \
+ .barno = (b), \
.padown_offset = TGL_PAD_OWN, \
.padcfglock_offset = TGL_PADCFGLOCK, \
.hostown_offset = TGL_HOSTSW_OWN, \
@@ -42,7 +46,7 @@
}
/* Tiger Lake-LP */
-static const struct pinctrl_pin_desc tgllp_community0_pins[] = {
+static const struct pinctrl_pin_desc tgllp_pins[] = {
/* GPP_B */
PINCTRL_PIN(0, "CORE_VID_0"),
PINCTRL_PIN(1, "CORE_VID_1"),
@@ -113,324 +117,273 @@ static const struct pinctrl_pin_desc tgllp_community0_pins[] = {
PINCTRL_PIN(64, "GPPC_A_22"),
PINCTRL_PIN(65, "I2S1_SCLK"),
PINCTRL_PIN(66, "ESPI_CLK_LOOPBK"),
-};
-
-static const struct intel_padgroup tgllp_community0_gpps[] = {
- TGL_GPP(0, 0, 25), /* GPP_B */
- TGL_GPP(1, 26, 41), /* GPP_T */
- TGL_GPP(2, 42, 66), /* GPP_A */
-};
-
-static const struct intel_community tgllp_community0[] = {
- TGL_COMMUNITY(0, 66, tgllp_community0_gpps),
-};
-
-static const struct intel_pinctrl_soc_data tgllp_community0_soc_data = {
- .uid = "0",
- .pins = tgllp_community0_pins,
- .npins = ARRAY_SIZE(tgllp_community0_pins),
- .communities = tgllp_community0,
- .ncommunities = ARRAY_SIZE(tgllp_community0),
-};
-
-static const struct pinctrl_pin_desc tgllp_community1_pins[] = {
/* GPP_S */
- PINCTRL_PIN(0, "SNDW0_CLK"),
- PINCTRL_PIN(1, "SNDW0_DATA"),
- PINCTRL_PIN(2, "SNDW1_CLK"),
- PINCTRL_PIN(3, "SNDW1_DATA"),
- PINCTRL_PIN(4, "SNDW2_CLK"),
- PINCTRL_PIN(5, "SNDW2_DATA"),
- PINCTRL_PIN(6, "SNDW3_CLK"),
- PINCTRL_PIN(7, "SNDW3_DATA"),
+ PINCTRL_PIN(67, "SNDW0_CLK"),
+ PINCTRL_PIN(68, "SNDW0_DATA"),
+ PINCTRL_PIN(69, "SNDW1_CLK"),
+ PINCTRL_PIN(70, "SNDW1_DATA"),
+ PINCTRL_PIN(71, "SNDW2_CLK"),
+ PINCTRL_PIN(72, "SNDW2_DATA"),
+ PINCTRL_PIN(73, "SNDW3_CLK"),
+ PINCTRL_PIN(74, "SNDW3_DATA"),
/* GPP_H */
- PINCTRL_PIN(8, "GPPC_H_0"),
- PINCTRL_PIN(9, "GPPC_H_1"),
- PINCTRL_PIN(10, "GPPC_H_2"),
- PINCTRL_PIN(11, "SX_EXIT_HOLDOFFB"),
- PINCTRL_PIN(12, "I2C2_SDA"),
- PINCTRL_PIN(13, "I2C2_SCL"),
- PINCTRL_PIN(14, "I2C3_SDA"),
- PINCTRL_PIN(15, "I2C3_SCL"),
- PINCTRL_PIN(16, "I2C4_SDA"),
- PINCTRL_PIN(17, "I2C4_SCL"),
- PINCTRL_PIN(18, "SRCCLKREQB_4"),
- PINCTRL_PIN(19, "SRCCLKREQB_5"),
- PINCTRL_PIN(20, "M2_SKT2_CFG_0"),
- PINCTRL_PIN(21, "M2_SKT2_CFG_1"),
- PINCTRL_PIN(22, "M2_SKT2_CFG_2"),
- PINCTRL_PIN(23, "M2_SKT2_CFG_3"),
- PINCTRL_PIN(24, "DDPB_CTRLCLK"),
- PINCTRL_PIN(25, "DDPB_CTRLDATA"),
- PINCTRL_PIN(26, "CPU_C10_GATEB"),
- PINCTRL_PIN(27, "TIME_SYNC_0"),
- PINCTRL_PIN(28, "IMGCLKOUT_1"),
- PINCTRL_PIN(29, "IMGCLKOUT_2"),
- PINCTRL_PIN(30, "IMGCLKOUT_3"),
- PINCTRL_PIN(31, "IMGCLKOUT_4"),
+ PINCTRL_PIN(75, "GPPC_H_0"),
+ PINCTRL_PIN(76, "GPPC_H_1"),
+ PINCTRL_PIN(77, "GPPC_H_2"),
+ PINCTRL_PIN(78, "SX_EXIT_HOLDOFFB"),
+ PINCTRL_PIN(79, "I2C2_SDA"),
+ PINCTRL_PIN(80, "I2C2_SCL"),
+ PINCTRL_PIN(81, "I2C3_SDA"),
+ PINCTRL_PIN(82, "I2C3_SCL"),
+ PINCTRL_PIN(83, "I2C4_SDA"),
+ PINCTRL_PIN(84, "I2C4_SCL"),
+ PINCTRL_PIN(85, "SRCCLKREQB_4"),
+ PINCTRL_PIN(86, "SRCCLKREQB_5"),
+ PINCTRL_PIN(87, "M2_SKT2_CFG_0"),
+ PINCTRL_PIN(88, "M2_SKT2_CFG_1"),
+ PINCTRL_PIN(89, "M2_SKT2_CFG_2"),
+ PINCTRL_PIN(90, "M2_SKT2_CFG_3"),
+ PINCTRL_PIN(91, "DDPB_CTRLCLK"),
+ PINCTRL_PIN(92, "DDPB_CTRLDATA"),
+ PINCTRL_PIN(93, "CPU_C10_GATEB"),
+ PINCTRL_PIN(94, "TIME_SYNC_0"),
+ PINCTRL_PIN(95, "IMGCLKOUT_1"),
+ PINCTRL_PIN(96, "IMGCLKOUT_2"),
+ PINCTRL_PIN(97, "IMGCLKOUT_3"),
+ PINCTRL_PIN(98, "IMGCLKOUT_4"),
/* GPP_D */
- PINCTRL_PIN(32, "ISH_GP_0"),
- PINCTRL_PIN(33, "ISH_GP_1"),
- PINCTRL_PIN(34, "ISH_GP_2"),
- PINCTRL_PIN(35, "ISH_GP_3"),
- PINCTRL_PIN(36, "IMGCLKOUT_0"),
- PINCTRL_PIN(37, "SRCCLKREQB_0"),
- PINCTRL_PIN(38, "SRCCLKREQB_1"),
- PINCTRL_PIN(39, "SRCCLKREQB_2"),
- PINCTRL_PIN(40, "SRCCLKREQB_3"),
- PINCTRL_PIN(41, "ISH_SPI_CSB"),
- PINCTRL_PIN(42, "ISH_SPI_CLK"),
- PINCTRL_PIN(43, "ISH_SPI_MISO"),
- PINCTRL_PIN(44, "ISH_SPI_MOSI"),
- PINCTRL_PIN(45, "ISH_UART0_RXD"),
- PINCTRL_PIN(46, "ISH_UART0_TXD"),
- PINCTRL_PIN(47, "ISH_UART0_RTSB"),
- PINCTRL_PIN(48, "ISH_UART0_CTSB"),
- PINCTRL_PIN(49, "ISH_GP_4"),
- PINCTRL_PIN(50, "ISH_GP_5"),
- PINCTRL_PIN(51, "I2S_MCLK1_OUT"),
- PINCTRL_PIN(52, "GSPI2_CLK_LOOPBK"),
+ PINCTRL_PIN(99, "ISH_GP_0"),
+ PINCTRL_PIN(100, "ISH_GP_1"),
+ PINCTRL_PIN(101, "ISH_GP_2"),
+ PINCTRL_PIN(102, "ISH_GP_3"),
+ PINCTRL_PIN(103, "IMGCLKOUT_0"),
+ PINCTRL_PIN(104, "SRCCLKREQB_0"),
+ PINCTRL_PIN(105, "SRCCLKREQB_1"),
+ PINCTRL_PIN(106, "SRCCLKREQB_2"),
+ PINCTRL_PIN(107, "SRCCLKREQB_3"),
+ PINCTRL_PIN(108, "ISH_SPI_CSB"),
+ PINCTRL_PIN(109, "ISH_SPI_CLK"),
+ PINCTRL_PIN(110, "ISH_SPI_MISO"),
+ PINCTRL_PIN(111, "ISH_SPI_MOSI"),
+ PINCTRL_PIN(112, "ISH_UART0_RXD"),
+ PINCTRL_PIN(113, "ISH_UART0_TXD"),
+ PINCTRL_PIN(114, "ISH_UART0_RTSB"),
+ PINCTRL_PIN(115, "ISH_UART0_CTSB"),
+ PINCTRL_PIN(116, "ISH_GP_4"),
+ PINCTRL_PIN(117, "ISH_GP_5"),
+ PINCTRL_PIN(118, "I2S_MCLK1_OUT"),
+ PINCTRL_PIN(119, "GSPI2_CLK_LOOPBK"),
/* GPP_U */
- PINCTRL_PIN(53, "UART3_RXD"),
- PINCTRL_PIN(54, "UART3_TXD"),
- PINCTRL_PIN(55, "UART3_RTSB"),
- PINCTRL_PIN(56, "UART3_CTSB"),
- PINCTRL_PIN(57, "GSPI3_CS0B"),
- PINCTRL_PIN(58, "GSPI3_CLK"),
- PINCTRL_PIN(59, "GSPI3_MISO"),
- PINCTRL_PIN(60, "GSPI3_MOSI"),
- PINCTRL_PIN(61, "GSPI4_CS0B"),
- PINCTRL_PIN(62, "GSPI4_CLK"),
- PINCTRL_PIN(63, "GSPI4_MISO"),
- PINCTRL_PIN(64, "GSPI4_MOSI"),
- PINCTRL_PIN(65, "GSPI5_CS0B"),
- PINCTRL_PIN(66, "GSPI5_CLK"),
- PINCTRL_PIN(67, "GSPI5_MISO"),
- PINCTRL_PIN(68, "GSPI5_MOSI"),
- PINCTRL_PIN(69, "GSPI6_CS0B"),
- PINCTRL_PIN(70, "GSPI6_CLK"),
- PINCTRL_PIN(71, "GSPI6_MISO"),
- PINCTRL_PIN(72, "GSPI6_MOSI"),
- PINCTRL_PIN(73, "GSPI3_CLK_LOOPBK"),
- PINCTRL_PIN(74, "GSPI4_CLK_LOOPBK"),
- PINCTRL_PIN(75, "GSPI5_CLK_LOOPBK"),
- PINCTRL_PIN(76, "GSPI6_CLK_LOOPBK"),
+ PINCTRL_PIN(120, "UART3_RXD"),
+ PINCTRL_PIN(121, "UART3_TXD"),
+ PINCTRL_PIN(122, "UART3_RTSB"),
+ PINCTRL_PIN(123, "UART3_CTSB"),
+ PINCTRL_PIN(124, "GSPI3_CS0B"),
+ PINCTRL_PIN(125, "GSPI3_CLK"),
+ PINCTRL_PIN(126, "GSPI3_MISO"),
+ PINCTRL_PIN(127, "GSPI3_MOSI"),
+ PINCTRL_PIN(128, "GSPI4_CS0B"),
+ PINCTRL_PIN(129, "GSPI4_CLK"),
+ PINCTRL_PIN(130, "GSPI4_MISO"),
+ PINCTRL_PIN(131, "GSPI4_MOSI"),
+ PINCTRL_PIN(132, "GSPI5_CS0B"),
+ PINCTRL_PIN(133, "GSPI5_CLK"),
+ PINCTRL_PIN(134, "GSPI5_MISO"),
+ PINCTRL_PIN(135, "GSPI5_MOSI"),
+ PINCTRL_PIN(136, "GSPI6_CS0B"),
+ PINCTRL_PIN(137, "GSPI6_CLK"),
+ PINCTRL_PIN(138, "GSPI6_MISO"),
+ PINCTRL_PIN(139, "GSPI6_MOSI"),
+ PINCTRL_PIN(140, "GSPI3_CLK_LOOPBK"),
+ PINCTRL_PIN(141, "GSPI4_CLK_LOOPBK"),
+ PINCTRL_PIN(142, "GSPI5_CLK_LOOPBK"),
+ PINCTRL_PIN(143, "GSPI6_CLK_LOOPBK"),
/* vGPIO */
- PINCTRL_PIN(77, "CNV_BTEN"),
- PINCTRL_PIN(78, "CNV_BT_HOST_WAKEB"),
- PINCTRL_PIN(79, "CNV_BT_IF_SELECT"),
- PINCTRL_PIN(80, "vCNV_BT_UART_TXD"),
- PINCTRL_PIN(81, "vCNV_BT_UART_RXD"),
- PINCTRL_PIN(82, "vCNV_BT_UART_CTS_B"),
- PINCTRL_PIN(83, "vCNV_BT_UART_RTS_B"),
- PINCTRL_PIN(84, "vCNV_MFUART1_TXD"),
- PINCTRL_PIN(85, "vCNV_MFUART1_RXD"),
- PINCTRL_PIN(86, "vCNV_MFUART1_CTS_B"),
- PINCTRL_PIN(87, "vCNV_MFUART1_RTS_B"),
- PINCTRL_PIN(88, "vUART0_TXD"),
- PINCTRL_PIN(89, "vUART0_RXD"),
- PINCTRL_PIN(90, "vUART0_CTS_B"),
- PINCTRL_PIN(91, "vUART0_RTS_B"),
- PINCTRL_PIN(92, "vISH_UART0_TXD"),
- PINCTRL_PIN(93, "vISH_UART0_RXD"),
- PINCTRL_PIN(94, "vISH_UART0_CTS_B"),
- PINCTRL_PIN(95, "vISH_UART0_RTS_B"),
- PINCTRL_PIN(96, "vCNV_BT_I2S_BCLK"),
- PINCTRL_PIN(97, "vCNV_BT_I2S_WS_SYNC"),
- PINCTRL_PIN(98, "vCNV_BT_I2S_SDO"),
- PINCTRL_PIN(99, "vCNV_BT_I2S_SDI"),
- PINCTRL_PIN(100, "vI2S2_SCLK"),
- PINCTRL_PIN(101, "vI2S2_SFRM"),
- PINCTRL_PIN(102, "vI2S2_TXD"),
- PINCTRL_PIN(103, "vI2S2_RXD"),
-};
-
-static const struct intel_padgroup tgllp_community1_gpps[] = {
- TGL_GPP(0, 0, 7), /* GPP_S */
- TGL_GPP(1, 8, 31), /* GPP_H */
- TGL_GPP(2, 32, 52), /* GPP_D */
- TGL_GPP(3, 53, 76), /* GPP_U */
- TGL_GPP(4, 77, 103), /* vGPIO */
-};
-
-static const struct intel_community tgllp_community1[] = {
- TGL_COMMUNITY(0, 103, tgllp_community1_gpps),
-};
-
-static const struct intel_pinctrl_soc_data tgllp_community1_soc_data = {
- .uid = "1",
- .pins = tgllp_community1_pins,
- .npins = ARRAY_SIZE(tgllp_community1_pins),
- .communities = tgllp_community1,
- .ncommunities = ARRAY_SIZE(tgllp_community1),
-};
-
-static const struct pinctrl_pin_desc tgllp_community4_pins[] = {
+ PINCTRL_PIN(144, "CNV_BTEN"),
+ PINCTRL_PIN(145, "CNV_BT_HOST_WAKEB"),
+ PINCTRL_PIN(146, "CNV_BT_IF_SELECT"),
+ PINCTRL_PIN(147, "vCNV_BT_UART_TXD"),
+ PINCTRL_PIN(148, "vCNV_BT_UART_RXD"),
+ PINCTRL_PIN(149, "vCNV_BT_UART_CTS_B"),
+ PINCTRL_PIN(150, "vCNV_BT_UART_RTS_B"),
+ PINCTRL_PIN(151, "vCNV_MFUART1_TXD"),
+ PINCTRL_PIN(152, "vCNV_MFUART1_RXD"),
+ PINCTRL_PIN(153, "vCNV_MFUART1_CTS_B"),
+ PINCTRL_PIN(154, "vCNV_MFUART1_RTS_B"),
+ PINCTRL_PIN(155, "vUART0_TXD"),
+ PINCTRL_PIN(156, "vUART0_RXD"),
+ PINCTRL_PIN(157, "vUART0_CTS_B"),
+ PINCTRL_PIN(158, "vUART0_RTS_B"),
+ PINCTRL_PIN(159, "vISH_UART0_TXD"),
+ PINCTRL_PIN(160, "vISH_UART0_RXD"),
+ PINCTRL_PIN(161, "vISH_UART0_CTS_B"),
+ PINCTRL_PIN(162, "vISH_UART0_RTS_B"),
+ PINCTRL_PIN(163, "vCNV_BT_I2S_BCLK"),
+ PINCTRL_PIN(164, "vCNV_BT_I2S_WS_SYNC"),
+ PINCTRL_PIN(165, "vCNV_BT_I2S_SDO"),
+ PINCTRL_PIN(166, "vCNV_BT_I2S_SDI"),
+ PINCTRL_PIN(167, "vI2S2_SCLK"),
+ PINCTRL_PIN(168, "vI2S2_SFRM"),
+ PINCTRL_PIN(169, "vI2S2_TXD"),
+ PINCTRL_PIN(170, "vI2S2_RXD"),
/* GPP_C */
- PINCTRL_PIN(0, "SMBCLK"),
- PINCTRL_PIN(1, "SMBDATA"),
- PINCTRL_PIN(2, "SMBALERTB"),
- PINCTRL_PIN(3, "SML0CLK"),
- PINCTRL_PIN(4, "SML0DATA"),
- PINCTRL_PIN(5, "SML0ALERTB"),
- PINCTRL_PIN(6, "SML1CLK"),
- PINCTRL_PIN(7, "SML1DATA"),
- PINCTRL_PIN(8, "UART0_RXD"),
- PINCTRL_PIN(9, "UART0_TXD"),
- PINCTRL_PIN(10, "UART0_RTSB"),
- PINCTRL_PIN(11, "UART0_CTSB"),
- PINCTRL_PIN(12, "UART1_RXD"),
- PINCTRL_PIN(13, "UART1_TXD"),
- PINCTRL_PIN(14, "UART1_RTSB"),
- PINCTRL_PIN(15, "UART1_CTSB"),
- PINCTRL_PIN(16, "I2C0_SDA"),
- PINCTRL_PIN(17, "I2C0_SCL"),
- PINCTRL_PIN(18, "I2C1_SDA"),
- PINCTRL_PIN(19, "I2C1_SCL"),
- PINCTRL_PIN(20, "UART2_RXD"),
- PINCTRL_PIN(21, "UART2_TXD"),
- PINCTRL_PIN(22, "UART2_RTSB"),
- PINCTRL_PIN(23, "UART2_CTSB"),
+ PINCTRL_PIN(171, "SMBCLK"),
+ PINCTRL_PIN(172, "SMBDATA"),
+ PINCTRL_PIN(173, "SMBALERTB"),
+ PINCTRL_PIN(174, "SML0CLK"),
+ PINCTRL_PIN(175, "SML0DATA"),
+ PINCTRL_PIN(176, "SML0ALERTB"),
+ PINCTRL_PIN(177, "SML1CLK"),
+ PINCTRL_PIN(178, "SML1DATA"),
+ PINCTRL_PIN(179, "UART0_RXD"),
+ PINCTRL_PIN(180, "UART0_TXD"),
+ PINCTRL_PIN(181, "UART0_RTSB"),
+ PINCTRL_PIN(182, "UART0_CTSB"),
+ PINCTRL_PIN(183, "UART1_RXD"),
+ PINCTRL_PIN(184, "UART1_TXD"),
+ PINCTRL_PIN(185, "UART1_RTSB"),
+ PINCTRL_PIN(186, "UART1_CTSB"),
+ PINCTRL_PIN(187, "I2C0_SDA"),
+ PINCTRL_PIN(188, "I2C0_SCL"),
+ PINCTRL_PIN(189, "I2C1_SDA"),
+ PINCTRL_PIN(190, "I2C1_SCL"),
+ PINCTRL_PIN(191, "UART2_RXD"),
+ PINCTRL_PIN(192, "UART2_TXD"),
+ PINCTRL_PIN(193, "UART2_RTSB"),
+ PINCTRL_PIN(194, "UART2_CTSB"),
/* GPP_F */
- PINCTRL_PIN(24, "CNV_BRI_DT"),
- PINCTRL_PIN(25, "CNV_BRI_RSP"),
- PINCTRL_PIN(26, "CNV_RGI_DT"),
- PINCTRL_PIN(27, "CNV_RGI_RSP"),
- PINCTRL_PIN(28, "CNV_RF_RESET_B"),
- PINCTRL_PIN(29, "GPPC_F_5"),
- PINCTRL_PIN(30, "CNV_PA_BLANKING"),
- PINCTRL_PIN(31, "GPPC_F_7"),
- PINCTRL_PIN(32, "I2S_MCLK2_INOUT"),
- PINCTRL_PIN(33, "BOOTMPC"),
- PINCTRL_PIN(34, "GPPC_F_10"),
- PINCTRL_PIN(35, "GPPC_F_11"),
- PINCTRL_PIN(36, "GSXDOUT"),
- PINCTRL_PIN(37, "GSXSLOAD"),
- PINCTRL_PIN(38, "GSXDIN"),
- PINCTRL_PIN(39, "GSXSRESETB"),
- PINCTRL_PIN(40, "GSXCLK"),
- PINCTRL_PIN(41, "GMII_MDC"),
- PINCTRL_PIN(42, "GMII_MDIO"),
- PINCTRL_PIN(43, "SRCCLKREQB_6"),
- PINCTRL_PIN(44, "EXT_PWR_GATEB"),
- PINCTRL_PIN(45, "EXT_PWR_GATE2B"),
- PINCTRL_PIN(46, "VNN_CTRL"),
- PINCTRL_PIN(47, "V1P05_CTRL"),
- PINCTRL_PIN(48, "GPPF_CLK_LOOPBACK"),
+ PINCTRL_PIN(195, "CNV_BRI_DT"),
+ PINCTRL_PIN(196, "CNV_BRI_RSP"),
+ PINCTRL_PIN(197, "CNV_RGI_DT"),
+ PINCTRL_PIN(198, "CNV_RGI_RSP"),
+ PINCTRL_PIN(199, "CNV_RF_RESET_B"),
+ PINCTRL_PIN(200, "GPPC_F_5"),
+ PINCTRL_PIN(201, "CNV_PA_BLANKING"),
+ PINCTRL_PIN(202, "GPPC_F_7"),
+ PINCTRL_PIN(203, "I2S_MCLK2_INOUT"),
+ PINCTRL_PIN(204, "BOOTMPC"),
+ PINCTRL_PIN(205, "GPPC_F_10"),
+ PINCTRL_PIN(206, "GPPC_F_11"),
+ PINCTRL_PIN(207, "GSXDOUT"),
+ PINCTRL_PIN(208, "GSXSLOAD"),
+ PINCTRL_PIN(209, "GSXDIN"),
+ PINCTRL_PIN(210, "GSXSRESETB"),
+ PINCTRL_PIN(211, "GSXCLK"),
+ PINCTRL_PIN(212, "GMII_MDC"),
+ PINCTRL_PIN(213, "GMII_MDIO"),
+ PINCTRL_PIN(214, "SRCCLKREQB_6"),
+ PINCTRL_PIN(215, "EXT_PWR_GATEB"),
+ PINCTRL_PIN(216, "EXT_PWR_GATE2B"),
+ PINCTRL_PIN(217, "VNN_CTRL"),
+ PINCTRL_PIN(218, "V1P05_CTRL"),
+ PINCTRL_PIN(219, "GPPF_CLK_LOOPBACK"),
/* HVCMOS */
- PINCTRL_PIN(49, "L_BKLTEN"),
- PINCTRL_PIN(50, "L_BKLTCTL"),
- PINCTRL_PIN(51, "L_VDDEN"),
- PINCTRL_PIN(52, "SYS_PWROK"),
- PINCTRL_PIN(53, "SYS_RESETB"),
- PINCTRL_PIN(54, "MLK_RSTB"),
+ PINCTRL_PIN(220, "L_BKLTEN"),
+ PINCTRL_PIN(221, "L_BKLTCTL"),
+ PINCTRL_PIN(222, "L_VDDEN"),
+ PINCTRL_PIN(223, "SYS_PWROK"),
+ PINCTRL_PIN(224, "SYS_RESETB"),
+ PINCTRL_PIN(225, "MLK_RSTB"),
/* GPP_E */
- PINCTRL_PIN(55, "SATAXPCIE_0"),
- PINCTRL_PIN(56, "SPI1_IO_2"),
- PINCTRL_PIN(57, "SPI1_IO_3"),
- PINCTRL_PIN(58, "CPU_GP_0"),
- PINCTRL_PIN(59, "SATA_DEVSLP_0"),
- PINCTRL_PIN(60, "SATA_DEVSLP_1"),
- PINCTRL_PIN(61, "GPPC_E_6"),
- PINCTRL_PIN(62, "CPU_GP_1"),
- PINCTRL_PIN(63, "SPI1_CS1B"),
- PINCTRL_PIN(64, "USB2_OCB_0"),
- PINCTRL_PIN(65, "SPI1_CSB"),
- PINCTRL_PIN(66, "SPI1_CLK"),
- PINCTRL_PIN(67, "SPI1_MISO_IO_1"),
- PINCTRL_PIN(68, "SPI1_MOSI_IO_0"),
- PINCTRL_PIN(69, "DDSP_HPD_A"),
- PINCTRL_PIN(70, "ISH_GP_6"),
- PINCTRL_PIN(71, "ISH_GP_7"),
- PINCTRL_PIN(72, "GPPC_E_17"),
- PINCTRL_PIN(73, "DDP1_CTRLCLK"),
- PINCTRL_PIN(74, "DDP1_CTRLDATA"),
- PINCTRL_PIN(75, "DDP2_CTRLCLK"),
- PINCTRL_PIN(76, "DDP2_CTRLDATA"),
- PINCTRL_PIN(77, "DDPA_CTRLCLK"),
- PINCTRL_PIN(78, "DDPA_CTRLDATA"),
- PINCTRL_PIN(79, "SPI1_CLK_LOOPBK"),
+ PINCTRL_PIN(226, "SATAXPCIE_0"),
+ PINCTRL_PIN(227, "SPI1_IO_2"),
+ PINCTRL_PIN(228, "SPI1_IO_3"),
+ PINCTRL_PIN(229, "CPU_GP_0"),
+ PINCTRL_PIN(230, "SATA_DEVSLP_0"),
+ PINCTRL_PIN(231, "SATA_DEVSLP_1"),
+ PINCTRL_PIN(232, "GPPC_E_6"),
+ PINCTRL_PIN(233, "CPU_GP_1"),
+ PINCTRL_PIN(234, "SPI1_CS1B"),
+ PINCTRL_PIN(235, "USB2_OCB_0"),
+ PINCTRL_PIN(236, "SPI1_CSB"),
+ PINCTRL_PIN(237, "SPI1_CLK"),
+ PINCTRL_PIN(238, "SPI1_MISO_IO_1"),
+ PINCTRL_PIN(239, "SPI1_MOSI_IO_0"),
+ PINCTRL_PIN(240, "DDSP_HPD_A"),
+ PINCTRL_PIN(241, "ISH_GP_6"),
+ PINCTRL_PIN(242, "ISH_GP_7"),
+ PINCTRL_PIN(243, "GPPC_E_17"),
+ PINCTRL_PIN(244, "DDP1_CTRLCLK"),
+ PINCTRL_PIN(245, "DDP1_CTRLDATA"),
+ PINCTRL_PIN(246, "DDP2_CTRLCLK"),
+ PINCTRL_PIN(247, "DDP2_CTRLDATA"),
+ PINCTRL_PIN(248, "DDPA_CTRLCLK"),
+ PINCTRL_PIN(249, "DDPA_CTRLDATA"),
+ PINCTRL_PIN(250, "SPI1_CLK_LOOPBK"),
/* JTAG */
- PINCTRL_PIN(80, "JTAG_TDO"),
- PINCTRL_PIN(81, "JTAGX"),
- PINCTRL_PIN(82, "PRDYB"),
- PINCTRL_PIN(83, "PREQB"),
- PINCTRL_PIN(84, "CPU_TRSTB"),
- PINCTRL_PIN(85, "JTAG_TDI"),
- PINCTRL_PIN(86, "JTAG_TMS"),
- PINCTRL_PIN(87, "JTAG_TCK"),
- PINCTRL_PIN(88, "DBG_PMODE"),
-};
-
-static const struct intel_padgroup tgllp_community4_gpps[] = {
- TGL_GPP(0, 0, 23), /* GPP_C */
- TGL_GPP(1, 24, 48), /* GPP_F */
- TGL_GPP(2, 49, 54), /* HVCMOS */
- TGL_GPP(3, 55, 79), /* GPP_E */
- TGL_GPP(4, 80, 88), /* JTAG */
+ PINCTRL_PIN(251, "JTAG_TDO"),
+ PINCTRL_PIN(252, "JTAGX"),
+ PINCTRL_PIN(253, "PRDYB"),
+ PINCTRL_PIN(254, "PREQB"),
+ PINCTRL_PIN(255, "CPU_TRSTB"),
+ PINCTRL_PIN(256, "JTAG_TDI"),
+ PINCTRL_PIN(257, "JTAG_TMS"),
+ PINCTRL_PIN(258, "JTAG_TCK"),
+ PINCTRL_PIN(259, "DBG_PMODE"),
+ /* GPP_R */
+ PINCTRL_PIN(260, "HDA_BCLK"),
+ PINCTRL_PIN(261, "HDA_SYNC"),
+ PINCTRL_PIN(262, "HDA_SDO"),
+ PINCTRL_PIN(263, "HDA_SDI_0"),
+ PINCTRL_PIN(264, "HDA_RSTB"),
+ PINCTRL_PIN(265, "HDA_SDI_1"),
+ PINCTRL_PIN(266, "GPP_R_6"),
+ PINCTRL_PIN(267, "GPP_R_7"),
+ /* SPI */
+ PINCTRL_PIN(268, "SPI0_IO_2"),
+ PINCTRL_PIN(269, "SPI0_IO_3"),
+ PINCTRL_PIN(270, "SPI0_MOSI_IO_0"),
+ PINCTRL_PIN(271, "SPI0_MISO_IO_1"),
+ PINCTRL_PIN(272, "SPI0_TPM_CSB"),
+ PINCTRL_PIN(273, "SPI0_FLASH_0_CSB"),
+ PINCTRL_PIN(274, "SPI0_FLASH_1_CSB"),
+ PINCTRL_PIN(275, "SPI0_CLK"),
+ PINCTRL_PIN(276, "SPI0_CLK_LOOPBK"),
};
-static const struct intel_community tgllp_community4[] = {
- TGL_COMMUNITY(0, 88, tgllp_community4_gpps),
+static const struct intel_padgroup tgllp_community0_gpps[] = {
+ TGL_GPP(0, 0, 25, 0), /* GPP_B */
+ TGL_GPP(1, 26, 41, 32), /* GPP_T */
+ TGL_GPP(2, 42, 66, 64), /* GPP_A */
};
-static const struct intel_pinctrl_soc_data tgllp_community4_soc_data = {
- .uid = "4",
- .pins = tgllp_community4_pins,
- .npins = ARRAY_SIZE(tgllp_community4_pins),
- .communities = tgllp_community4,
- .ncommunities = ARRAY_SIZE(tgllp_community4),
+static const struct intel_padgroup tgllp_community1_gpps[] = {
+ TGL_GPP(0, 67, 74, 96), /* GPP_S */
+ TGL_GPP(1, 75, 98, 128), /* GPP_H */
+ TGL_GPP(2, 99, 119, 160), /* GPP_D */
+ TGL_GPP(3, 120, 143, 192), /* GPP_U */
+ TGL_GPP(4, 144, 170, 224), /* vGPIO */
};
-static const struct pinctrl_pin_desc tgllp_community5_pins[] = {
- /* GPP_R */
- PINCTRL_PIN(0, "HDA_BCLK"),
- PINCTRL_PIN(1, "HDA_SYNC"),
- PINCTRL_PIN(2, "HDA_SDO"),
- PINCTRL_PIN(3, "HDA_SDI_0"),
- PINCTRL_PIN(4, "HDA_RSTB"),
- PINCTRL_PIN(5, "HDA_SDI_1"),
- PINCTRL_PIN(6, "GPP_R_6"),
- PINCTRL_PIN(7, "GPP_R_7"),
- /* SPI */
- PINCTRL_PIN(8, "SPI0_IO_2"),
- PINCTRL_PIN(9, "SPI0_IO_3"),
- PINCTRL_PIN(10, "SPI0_MOSI_IO_0"),
- PINCTRL_PIN(11, "SPI0_MISO_IO_1"),
- PINCTRL_PIN(12, "SPI0_TPM_CSB"),
- PINCTRL_PIN(13, "SPI0_FLASH_0_CSB"),
- PINCTRL_PIN(14, "SPI0_FLASH_1_CSB"),
- PINCTRL_PIN(15, "SPI0_CLK"),
- PINCTRL_PIN(16, "SPI0_CLK_LOOPBK"),
+static const struct intel_padgroup tgllp_community4_gpps[] = {
+ TGL_GPP(0, 171, 194, 256), /* GPP_C */
+ TGL_GPP(1, 195, 219, 288), /* GPP_F */
+ TGL_GPP(2, 220, 225, TGL_NO_GPIO), /* HVCMOS */
+ TGL_GPP(3, 226, 250, 320), /* GPP_E */
+ TGL_GPP(4, 251, 259, TGL_NO_GPIO), /* JTAG */
};
static const struct intel_padgroup tgllp_community5_gpps[] = {
- TGL_GPP(0, 0, 7), /* GPP_R */
- TGL_GPP(1, 8, 16), /* SPI */
-};
-
-static const struct intel_community tgllp_community5[] = {
- TGL_COMMUNITY(0, 16, tgllp_community5_gpps),
+ TGL_GPP(0, 260, 267, 352), /* GPP_R */
+ TGL_GPP(1, 268, 276, TGL_NO_GPIO), /* SPI */
};
-static const struct intel_pinctrl_soc_data tgllp_community5_soc_data = {
- .uid = "5",
- .pins = tgllp_community5_pins,
- .npins = ARRAY_SIZE(tgllp_community5_pins),
- .communities = tgllp_community5,
- .ncommunities = ARRAY_SIZE(tgllp_community5),
+static const struct intel_community tgllp_communities[] = {
+ TGL_COMMUNITY(0, 0, 66, tgllp_community0_gpps),
+ TGL_COMMUNITY(1, 67, 170, tgllp_community1_gpps),
+ TGL_COMMUNITY(2, 171, 259, tgllp_community4_gpps),
+ TGL_COMMUNITY(3, 260, 276, tgllp_community5_gpps),
};
-static const struct intel_pinctrl_soc_data *tgllp_soc_data_array[] = {
- &tgllp_community0_soc_data,
- &tgllp_community1_soc_data,
- &tgllp_community4_soc_data,
- &tgllp_community5_soc_data,
- NULL
+static const struct intel_pinctrl_soc_data tgllp_soc_data = {
+ .pins = tgllp_pins,
+ .npins = ARRAY_SIZE(tgllp_pins),
+ .communities = tgllp_communities,
+ .ncommunities = ARRAY_SIZE(tgllp_communities),
};
static const struct acpi_device_id tgl_pinctrl_acpi_match[] = {
- { "INT34C5", (kernel_ulong_t)tgllp_soc_data_array },
+ { "INT34C5", (kernel_ulong_t)&tgllp_soc_data },
{ }
};
MODULE_DEVICE_TABLE(acpi, tgl_pinctrl_acpi_match);
@@ -438,7 +391,7 @@ MODULE_DEVICE_TABLE(acpi, tgl_pinctrl_acpi_match);
static INTEL_PINCTRL_PM_OPS(tgl_pinctrl_pm_ops);
static struct platform_driver tgl_pinctrl_driver = {
- .probe = intel_pinctrl_probe_by_uid,
+ .probe = intel_pinctrl_probe_by_hid,
.driver = {
.name = "tigerlake-pinctrl",
.acpi_match_table = tgl_pinctrl_acpi_match,
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt2712.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt2712.h
index ba2356a8ab89..845c408b5fdb 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-mt2712.h
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt2712.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2018 MediaTek Inc.
* Author: Zhiyong Tao <zhiyong.tao@mediatek.com>
diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h b/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h
index 8ff88bf2e849..aa79d7ecee00 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h
+++ b/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
/*
* Copyright (c) 2017 Baylibre SAS.
* Author: Jerome Brunet <jbrunet@baylibre.com>
@@ -5,7 +6,6 @@
* Copyright (c) 2017 Amlogic, Inc. All rights reserved.
* Author: Xingyu Chen <xingyu.chen@amlogic.com>
*
- * SPDX-License-Identifier: (GPL-2.0+ or MIT)
*/
struct meson_pmx_bank {
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index 3c80828a5e50..bbc919bef2bf 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -441,6 +441,7 @@ static int meson_pinconf_get_drive_strength(struct meson_pinctrl *pc,
return ret;
meson_calc_reg_and_bit(bank, pin, REG_DS, &reg, &bit);
+ bit = bit << 1;
ret = regmap_read(pc->reg_ds, reg, &val);
if (ret)
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index 2d5339edd0b7..6cd4b3ec1b40 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -233,6 +233,8 @@ static const unsigned int hdmi_scl_pins[] = { GPIOH_2 };
static const unsigned int hdmi_cec_0_pins[] = { GPIOH_3 };
static const unsigned int eth_txd1_0_pins[] = { GPIOH_5 };
static const unsigned int eth_txd0_0_pins[] = { GPIOH_6 };
+static const unsigned int eth_rxd3_h_pins[] = { GPIOH_5 };
+static const unsigned int eth_rxd2_h_pins[] = { GPIOH_6 };
static const unsigned int clk_24m_out_pins[] = { GPIOH_9 };
static const unsigned int spi_ss1_pins[] = { GPIOH_0 };
@@ -535,6 +537,8 @@ static struct meson_pmx_group meson8b_cbus_groups[] = {
GROUP(spi_miso_1, 9, 12),
GROUP(spi_mosi_1, 9, 11),
GROUP(spi_sclk_1, 9, 10),
+ GROUP(eth_rxd3_h, 6, 15),
+ GROUP(eth_rxd2_h, 6, 14),
GROUP(eth_txd3, 6, 13),
GROUP(eth_txd2, 6, 12),
GROUP(eth_tx_clk, 6, 11),
@@ -746,7 +750,8 @@ static const char * const ethernet_groups[] = {
"eth_tx_clk", "eth_tx_en", "eth_txd1_0", "eth_txd1_1",
"eth_txd0_0", "eth_txd0_1", "eth_rx_clk", "eth_rx_dv",
"eth_rxd1", "eth_rxd0", "eth_mdio_en", "eth_mdc", "eth_ref_clk",
- "eth_txd2", "eth_txd3", "eth_rxd3", "eth_rxd2"
+ "eth_txd2", "eth_txd3", "eth_rxd3", "eth_rxd2",
+ "eth_rxd3_h", "eth_rxd2_h"
};
static const char * const i2c_a_groups[] = {
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index aa9dcde0f069..243fba254175 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -15,7 +15,6 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinctrl.h>
@@ -733,13 +732,20 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
ret = 0;
break;
}
- };
+ }
if (ret) {
dev_err(dev, "no gpio-controller child node\n");
return ret;
}
- nr_irq_parent = of_irq_count(np);
+ nr_irq_parent = platform_irq_count(pdev);
+ if (nr_irq_parent < 0) {
+ if (nr_irq_parent != -EPROBE_DEFER)
+ dev_err(dev, "Couldn't determine irq count: %pe\n",
+ ERR_PTR(nr_irq_parent));
+ return nr_irq_parent;
+ }
+
spin_lock_init(&info->irq_lock);
if (!nr_irq_parent) {
@@ -776,7 +782,7 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
if (!girq->parents)
return -ENOMEM;
for (i = 0; i < nr_irq_parent; i++) {
- int irq = irq_of_parse_and_map(np, i);
+ int irq = platform_get_irq(pdev, i);
if (irq < 0)
continue;
@@ -800,7 +806,7 @@ static int armada_37xx_gpiochip_register(struct platform_device *pdev,
ret = 0;
break;
}
- };
+ }
if (ret)
return ret;
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
index 22077cbe6880..a935065cdac4 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
@@ -331,7 +331,7 @@ static unsigned int npcmgpio_irq_startup(struct irq_data *d)
return 0;
}
-static struct irq_chip npcmgpio_irqchip = {
+static const struct irq_chip npcmgpio_irqchip = {
.name = "NPCM7XX-GPIO-IRQ",
.irq_ack = npcmgpio_irq_ack,
.irq_unmask = npcmgpio_irq_unmask,
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index eab078244a4c..73aff6591de2 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -866,7 +866,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
return -EINVAL;
}
- gpio_dev->base = devm_ioremap_nocache(&pdev->dev, res->start,
+ gpio_dev->base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!gpio_dev->base)
return -ENOMEM;
diff --git a/drivers/pinctrl/pinctrl-artpec6.c b/drivers/pinctrl/pinctrl-artpec6.c
index 986e04ac6b5b..d6c9f9dcff97 100644
--- a/drivers/pinctrl/pinctrl-artpec6.c
+++ b/drivers/pinctrl/pinctrl-artpec6.c
@@ -798,7 +798,7 @@ static int artpec6_pconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
enum pin_config_param param;
unsigned int arg;
unsigned int regval;
- unsigned int *reg;
+ void __iomem *reg;
int i;
/* Check for valid pin */
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index 24e0e2ef47a4..96f04d121ebd 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -3,7 +3,7 @@
* Ingenic SoCs pinctrl driver
*
* Copyright (c) 2017 Paul Cercueil <paul@crapouillou.net>
- * Copyright (c) 2019 Zhou Yanjie <zhouyanjie@zoho.com>
+ * Copyright (c) 2019 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
*/
#include <linux/compiler.h>
@@ -42,28 +42,36 @@
#define JZ4760_GPIO_FLAG 0x50
#define JZ4760_GPIO_PEN 0x70
-#define X1000_GPIO_PZ_BASE 0x700
-#define X1000_GPIO_PZ_GID2LD 0x7f0
+#define X1830_GPIO_PEL 0x110
+#define X1830_GPIO_PEH 0x120
#define REG_SET(x) ((x) + 0x4)
#define REG_CLEAR(x) ((x) + 0x8)
+#define REG_PZ_BASE(x) ((x) * 7)
+#define REG_PZ_GID2LD(x) ((x) * 7 + 0xf0)
+
+#define GPIO_PULL_DIS 0
+#define GPIO_PULL_UP 1
+#define GPIO_PULL_DOWN 2
+
#define PINS_PER_GPIO_CHIP 32
enum jz_version {
ID_JZ4740,
ID_JZ4725B,
ID_JZ4760,
- ID_JZ4760B,
ID_JZ4770,
ID_JZ4780,
ID_X1000,
- ID_X1000E,
ID_X1500,
+ ID_X1830,
};
struct ingenic_chip_info {
unsigned int num_chips;
+ unsigned int reg_offset;
+ enum jz_version version;
const struct group_desc *groups;
unsigned int num_groups;
@@ -79,7 +87,6 @@ struct ingenic_pinctrl {
struct regmap *map;
struct pinctrl_dev *pctl;
struct pinctrl_pin_desc *pdesc;
- enum jz_version version;
const struct ingenic_chip_info *info;
};
@@ -216,6 +223,8 @@ static const struct function_desc jz4740_functions[] = {
static const struct ingenic_chip_info jz4740_chip_info = {
.num_chips = 4,
+ .reg_offset = 0x100,
+ .version = ID_JZ4740,
.groups = jz4740_groups,
.num_groups = ARRAY_SIZE(jz4740_groups),
.functions = jz4740_functions,
@@ -339,6 +348,8 @@ static const struct function_desc jz4725b_functions[] = {
static const struct ingenic_chip_info jz4725b_chip_info = {
.num_chips = 4,
+ .reg_offset = 0x100,
+ .version = ID_JZ4725B,
.groups = jz4725b_groups,
.num_groups = ARRAY_SIZE(jz4725b_groups),
.functions = jz4725b_functions,
@@ -592,16 +603,8 @@ static const struct function_desc jz4760_functions[] = {
static const struct ingenic_chip_info jz4760_chip_info = {
.num_chips = 6,
- .groups = jz4760_groups,
- .num_groups = ARRAY_SIZE(jz4760_groups),
- .functions = jz4760_functions,
- .num_functions = ARRAY_SIZE(jz4760_functions),
- .pull_ups = jz4760_pull_ups,
- .pull_downs = jz4760_pull_downs,
-};
-
-static const struct ingenic_chip_info jz4760b_chip_info = {
- .num_chips = 6,
+ .reg_offset = 0x100,
+ .version = ID_JZ4760,
.groups = jz4760_groups,
.num_groups = ARRAY_SIZE(jz4760_groups),
.functions = jz4760_functions,
@@ -880,6 +883,8 @@ static const struct function_desc jz4770_functions[] = {
static const struct ingenic_chip_info jz4770_chip_info = {
.num_chips = 6,
+ .reg_offset = 0x100,
+ .version = ID_JZ4770,
.groups = jz4770_groups,
.num_groups = ARRAY_SIZE(jz4770_groups),
.functions = jz4770_functions,
@@ -1013,6 +1018,8 @@ static const struct function_desc jz4780_functions[] = {
static const struct ingenic_chip_info jz4780_chip_info = {
.num_chips = 6,
+ .reg_offset = 0x100,
+ .version = ID_JZ4780,
.groups = jz4780_groups,
.num_groups = ARRAY_SIZE(jz4780_groups),
.functions = jz4780_functions,
@@ -1022,7 +1029,7 @@ static const struct ingenic_chip_info jz4780_chip_info = {
};
static const u32 x1000_pull_ups[4] = {
- 0xffffffff, 0x8dffffff, 0x7d3fffff, 0xffffffff,
+ 0xffffffff, 0xfdffffff, 0x0dffffff, 0x0000003f,
};
static const u32 x1000_pull_downs[4] = {
@@ -1033,28 +1040,45 @@ static int x1000_uart0_data_pins[] = { 0x4a, 0x4b, };
static int x1000_uart0_hwflow_pins[] = { 0x4c, 0x4d, };
static int x1000_uart1_data_a_pins[] = { 0x04, 0x05, };
static int x1000_uart1_data_d_pins[] = { 0x62, 0x63, };
-static int x1000_uart1_hwflow_d_pins[] = { 0x64, 0x65, };
+static int x1000_uart1_hwflow_pins[] = { 0x64, 0x65, };
static int x1000_uart2_data_a_pins[] = { 0x02, 0x03, };
static int x1000_uart2_data_d_pins[] = { 0x65, 0x64, };
+static int x1000_sfc_pins[] = { 0x1d, 0x1c, 0x1e, 0x1f, 0x1a, 0x1b, };
+static int x1000_ssi_dt_a_22_pins[] = { 0x16, };
+static int x1000_ssi_dt_a_29_pins[] = { 0x1d, };
+static int x1000_ssi_dt_d_pins[] = { 0x62, };
+static int x1000_ssi_dr_a_23_pins[] = { 0x17, };
+static int x1000_ssi_dr_a_28_pins[] = { 0x1c, };
+static int x1000_ssi_dr_d_pins[] = { 0x63, };
+static int x1000_ssi_clk_a_24_pins[] = { 0x18, };
+static int x1000_ssi_clk_a_26_pins[] = { 0x1a, };
+static int x1000_ssi_clk_d_pins[] = { 0x60, };
+static int x1000_ssi_gpc_a_20_pins[] = { 0x14, };
+static int x1000_ssi_gpc_a_31_pins[] = { 0x1f, };
+static int x1000_ssi_ce0_a_25_pins[] = { 0x19, };
+static int x1000_ssi_ce0_a_27_pins[] = { 0x1b, };
+static int x1000_ssi_ce0_d_pins[] = { 0x61, };
+static int x1000_ssi_ce1_a_21_pins[] = { 0x15, };
+static int x1000_ssi_ce1_a_30_pins[] = { 0x1e, };
static int x1000_mmc0_1bit_pins[] = { 0x18, 0x19, 0x17, };
static int x1000_mmc0_4bit_pins[] = { 0x16, 0x15, 0x14, };
static int x1000_mmc0_8bit_pins[] = { 0x13, 0x12, 0x11, 0x10, };
static int x1000_mmc1_1bit_pins[] = { 0x40, 0x41, 0x42, };
static int x1000_mmc1_4bit_pins[] = { 0x43, 0x44, 0x45, };
-static int x1000_nemc_8bit_data_pins[] = {
+static int x1000_emc_8bit_data_pins[] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
};
-static int x1000_nemc_16bit_data_pins[] = {
+static int x1000_emc_16bit_data_pins[] = {
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
};
-static int x1000_nemc_addr_pins[] = {
+static int x1000_emc_addr_pins[] = {
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
};
-static int x1000_nemc_rd_we_pins[] = { 0x30, 0x31, };
-static int x1000_nemc_wait_pins[] = { 0x34, };
-static int x1000_nemc_cs1_pins[] = { 0x32, };
-static int x1000_nemc_cs2_pins[] = { 0x33, };
+static int x1000_emc_rd_we_pins[] = { 0x30, 0x31, };
+static int x1000_emc_wait_pins[] = { 0x34, };
+static int x1000_emc_cs1_pins[] = { 0x32, };
+static int x1000_emc_cs2_pins[] = { 0x33, };
static int x1000_i2c0_pins[] = { 0x38, 0x37, };
static int x1000_i2c1_a_pins[] = { 0x01, 0x00, };
static int x1000_i2c1_c_pins[] = { 0x5b, 0x5a, };
@@ -1083,23 +1107,40 @@ static int x1000_uart0_data_funcs[] = { 0, 0, };
static int x1000_uart0_hwflow_funcs[] = { 0, 0, };
static int x1000_uart1_data_a_funcs[] = { 2, 2, };
static int x1000_uart1_data_d_funcs[] = { 1, 1, };
-static int x1000_uart1_hwflow_d_funcs[] = { 1, 1, };
+static int x1000_uart1_hwflow_funcs[] = { 1, 1, };
static int x1000_uart2_data_a_funcs[] = { 2, 2, };
static int x1000_uart2_data_d_funcs[] = { 0, 0, };
+static int x1000_sfc_funcs[] = { 1, 1, 1, 1, 1, 1, };
+static int x1000_ssi_dt_a_22_funcs[] = { 2, };
+static int x1000_ssi_dt_a_29_funcs[] = { 2, };
+static int x1000_ssi_dt_d_funcs[] = { 0, };
+static int x1000_ssi_dr_a_23_funcs[] = { 2, };
+static int x1000_ssi_dr_a_28_funcs[] = { 2, };
+static int x1000_ssi_dr_d_funcs[] = { 0, };
+static int x1000_ssi_clk_a_24_funcs[] = { 2, };
+static int x1000_ssi_clk_a_26_funcs[] = { 2, };
+static int x1000_ssi_clk_d_funcs[] = { 0, };
+static int x1000_ssi_gpc_a_20_funcs[] = { 2, };
+static int x1000_ssi_gpc_a_31_funcs[] = { 2, };
+static int x1000_ssi_ce0_a_25_funcs[] = { 2, };
+static int x1000_ssi_ce0_a_27_funcs[] = { 2, };
+static int x1000_ssi_ce0_d_funcs[] = { 0, };
+static int x1000_ssi_ce1_a_21_funcs[] = { 2, };
+static int x1000_ssi_ce1_a_30_funcs[] = { 2, };
static int x1000_mmc0_1bit_funcs[] = { 1, 1, 1, };
static int x1000_mmc0_4bit_funcs[] = { 1, 1, 1, };
static int x1000_mmc0_8bit_funcs[] = { 1, 1, 1, 1, 1, };
static int x1000_mmc1_1bit_funcs[] = { 0, 0, 0, };
static int x1000_mmc1_4bit_funcs[] = { 0, 0, 0, };
-static int x1000_nemc_8bit_data_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, };
-static int x1000_nemc_16bit_data_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, };
-static int x1000_nemc_addr_funcs[] = {
+static int x1000_emc_8bit_data_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, };
+static int x1000_emc_16bit_data_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, };
+static int x1000_emc_addr_funcs[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
};
-static int x1000_nemc_rd_we_funcs[] = { 0, 0, };
-static int x1000_nemc_wait_funcs[] = { 0, };
-static int x1000_nemc_cs1_funcs[] = { 0, };
-static int x1000_nemc_cs2_funcs[] = { 0, };
+static int x1000_emc_rd_we_funcs[] = { 0, 0, };
+static int x1000_emc_wait_funcs[] = { 0, };
+static int x1000_emc_cs1_funcs[] = { 0, };
+static int x1000_emc_cs2_funcs[] = { 0, };
static int x1000_i2c0_funcs[] = { 0, 0, };
static int x1000_i2c1_a_funcs[] = { 2, 2, };
static int x1000_i2c1_c_funcs[] = { 0, 0, };
@@ -1121,21 +1162,38 @@ static const struct group_desc x1000_groups[] = {
INGENIC_PIN_GROUP("uart0-hwflow", x1000_uart0_hwflow),
INGENIC_PIN_GROUP("uart1-data-a", x1000_uart1_data_a),
INGENIC_PIN_GROUP("uart1-data-d", x1000_uart1_data_d),
- INGENIC_PIN_GROUP("uart1-hwflow-d", x1000_uart1_hwflow_d),
+ INGENIC_PIN_GROUP("uart1-hwflow", x1000_uart1_hwflow),
INGENIC_PIN_GROUP("uart2-data-a", x1000_uart2_data_a),
INGENIC_PIN_GROUP("uart2-data-d", x1000_uart2_data_d),
+ INGENIC_PIN_GROUP("sfc", x1000_sfc),
+ INGENIC_PIN_GROUP("ssi-dt-a-22", x1000_ssi_dt_a_22),
+ INGENIC_PIN_GROUP("ssi-dt-a-29", x1000_ssi_dt_a_29),
+ INGENIC_PIN_GROUP("ssi-dt-d", x1000_ssi_dt_d),
+ INGENIC_PIN_GROUP("ssi-dr-a-23", x1000_ssi_dr_a_23),
+ INGENIC_PIN_GROUP("ssi-dr-a-28", x1000_ssi_dr_a_28),
+ INGENIC_PIN_GROUP("ssi-dr-d", x1000_ssi_dr_d),
+ INGENIC_PIN_GROUP("ssi-clk-a-24", x1000_ssi_clk_a_24),
+ INGENIC_PIN_GROUP("ssi-clk-a-26", x1000_ssi_clk_a_26),
+ INGENIC_PIN_GROUP("ssi-clk-d", x1000_ssi_clk_d),
+ INGENIC_PIN_GROUP("ssi-gpc-a-20", x1000_ssi_gpc_a_20),
+ INGENIC_PIN_GROUP("ssi-gpc-a-31", x1000_ssi_gpc_a_31),
+ INGENIC_PIN_GROUP("ssi-ce0-a-25", x1000_ssi_ce0_a_25),
+ INGENIC_PIN_GROUP("ssi-ce0-a-27", x1000_ssi_ce0_a_27),
+ INGENIC_PIN_GROUP("ssi-ce0-d", x1000_ssi_ce0_d),
+ INGENIC_PIN_GROUP("ssi-ce1-a-21", x1000_ssi_ce1_a_21),
+ INGENIC_PIN_GROUP("ssi-ce1-a-30", x1000_ssi_ce1_a_30),
INGENIC_PIN_GROUP("mmc0-1bit", x1000_mmc0_1bit),
INGENIC_PIN_GROUP("mmc0-4bit", x1000_mmc0_4bit),
INGENIC_PIN_GROUP("mmc0-8bit", x1000_mmc0_8bit),
INGENIC_PIN_GROUP("mmc1-1bit", x1000_mmc1_1bit),
INGENIC_PIN_GROUP("mmc1-4bit", x1000_mmc1_4bit),
- INGENIC_PIN_GROUP("nemc-8bit-data", x1000_nemc_8bit_data),
- INGENIC_PIN_GROUP("nemc-16bit-data", x1000_nemc_16bit_data),
- INGENIC_PIN_GROUP("nemc-addr", x1000_nemc_addr),
- INGENIC_PIN_GROUP("nemc-rd-we", x1000_nemc_rd_we),
- INGENIC_PIN_GROUP("nemc-wait", x1000_nemc_wait),
- INGENIC_PIN_GROUP("nemc-cs1", x1000_nemc_cs1),
- INGENIC_PIN_GROUP("nemc-cs2", x1000_nemc_cs2),
+ INGENIC_PIN_GROUP("emc-8bit-data", x1000_emc_8bit_data),
+ INGENIC_PIN_GROUP("emc-16bit-data", x1000_emc_16bit_data),
+ INGENIC_PIN_GROUP("emc-addr", x1000_emc_addr),
+ INGENIC_PIN_GROUP("emc-rd-we", x1000_emc_rd_we),
+ INGENIC_PIN_GROUP("emc-wait", x1000_emc_wait),
+ INGENIC_PIN_GROUP("emc-cs1", x1000_emc_cs1),
+ INGENIC_PIN_GROUP("emc-cs2", x1000_emc_cs2),
INGENIC_PIN_GROUP("i2c0-data", x1000_i2c0),
INGENIC_PIN_GROUP("i2c1-data-a", x1000_i2c1_a),
INGENIC_PIN_GROUP("i2c1-data-c", x1000_i2c1_c),
@@ -1154,21 +1212,30 @@ static const struct group_desc x1000_groups[] = {
static const char *x1000_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
static const char *x1000_uart1_groups[] = {
- "uart1-data-a", "uart1-data-d", "uart1-hwflow-d",
+ "uart1-data-a", "uart1-data-d", "uart1-hwflow",
};
static const char *x1000_uart2_groups[] = { "uart2-data-a", "uart2-data-d", };
+static const char *x1000_sfc_groups[] = { "sfc", };
+static const char *x1000_ssi_groups[] = {
+ "ssi-dt-a-22", "ssi-dt-a-29", "ssi-dt-d",
+ "ssi-dr-a-23", "ssi-dr-a-28", "ssi-dr-d",
+ "ssi-clk-a-24", "ssi-clk-a-26", "ssi-clk-d",
+ "ssi-gpc-a-20", "ssi-gpc-a-31",
+ "ssi-ce0-a-25", "ssi-ce0-a-27", "ssi-ce0-d",
+ "ssi-ce1-a-21", "ssi-ce1-a-30",
+};
static const char *x1000_mmc0_groups[] = {
"mmc0-1bit", "mmc0-4bit", "mmc0-8bit",
};
static const char *x1000_mmc1_groups[] = {
- "mmc1-1bit-e", "mmc1-4bit-e",
+ "mmc1-1bit", "mmc1-4bit",
};
-static const char *x1000_nemc_groups[] = {
- "nemc-8bit-data", "nemc-16bit-data",
- "nemc-addr", "nemc-rd-we", "nemc-wait",
+static const char *x1000_emc_groups[] = {
+ "emc-8bit-data", "emc-16bit-data",
+ "emc-addr", "emc-rd-we", "emc-wait",
};
-static const char *x1000_cs1_groups[] = { "nemc-cs1", };
-static const char *x1000_cs2_groups[] = { "nemc-cs2", };
+static const char *x1000_cs1_groups[] = { "emc-cs1", };
+static const char *x1000_cs2_groups[] = { "emc-cs2", };
static const char *x1000_i2c0_groups[] = { "i2c0-data", };
static const char *x1000_i2c1_groups[] = { "i2c1-data-a", "i2c1-data-c", };
static const char *x1000_i2c2_groups[] = { "i2c2-data", };
@@ -1187,11 +1254,13 @@ static const struct function_desc x1000_functions[] = {
{ "uart0", x1000_uart0_groups, ARRAY_SIZE(x1000_uart0_groups), },
{ "uart1", x1000_uart1_groups, ARRAY_SIZE(x1000_uart1_groups), },
{ "uart2", x1000_uart2_groups, ARRAY_SIZE(x1000_uart2_groups), },
+ { "sfc", x1000_sfc_groups, ARRAY_SIZE(x1000_sfc_groups), },
+ { "ssi", x1000_ssi_groups, ARRAY_SIZE(x1000_ssi_groups), },
{ "mmc0", x1000_mmc0_groups, ARRAY_SIZE(x1000_mmc0_groups), },
{ "mmc1", x1000_mmc1_groups, ARRAY_SIZE(x1000_mmc1_groups), },
- { "nemc", x1000_nemc_groups, ARRAY_SIZE(x1000_nemc_groups), },
- { "nemc-cs1", x1000_cs1_groups, ARRAY_SIZE(x1000_cs1_groups), },
- { "nemc-cs2", x1000_cs2_groups, ARRAY_SIZE(x1000_cs2_groups), },
+ { "emc", x1000_emc_groups, ARRAY_SIZE(x1000_emc_groups), },
+ { "emc-cs1", x1000_cs1_groups, ARRAY_SIZE(x1000_cs1_groups), },
+ { "emc-cs2", x1000_cs2_groups, ARRAY_SIZE(x1000_cs2_groups), },
{ "i2c0", x1000_i2c0_groups, ARRAY_SIZE(x1000_i2c0_groups), },
{ "i2c1", x1000_i2c1_groups, ARRAY_SIZE(x1000_i2c1_groups), },
{ "i2c2", x1000_i2c2_groups, ARRAY_SIZE(x1000_i2c2_groups), },
@@ -1207,16 +1276,8 @@ static const struct function_desc x1000_functions[] = {
static const struct ingenic_chip_info x1000_chip_info = {
.num_chips = 4,
- .groups = x1000_groups,
- .num_groups = ARRAY_SIZE(x1000_groups),
- .functions = x1000_functions,
- .num_functions = ARRAY_SIZE(x1000_functions),
- .pull_ups = x1000_pull_ups,
- .pull_downs = x1000_pull_downs,
-};
-
-static const struct ingenic_chip_info x1000e_chip_info = {
- .num_chips = 4,
+ .reg_offset = 0x100,
+ .version = ID_X1000,
.groups = x1000_groups,
.num_groups = ARRAY_SIZE(x1000_groups),
.functions = x1000_functions,
@@ -1229,11 +1290,11 @@ static int x1500_uart0_data_pins[] = { 0x4a, 0x4b, };
static int x1500_uart0_hwflow_pins[] = { 0x4c, 0x4d, };
static int x1500_uart1_data_a_pins[] = { 0x04, 0x05, };
static int x1500_uart1_data_d_pins[] = { 0x62, 0x63, };
-static int x1500_uart1_hwflow_d_pins[] = { 0x64, 0x65, };
+static int x1500_uart1_hwflow_pins[] = { 0x64, 0x65, };
static int x1500_uart2_data_a_pins[] = { 0x02, 0x03, };
static int x1500_uart2_data_d_pins[] = { 0x65, 0x64, };
-static int x1500_mmc0_1bit_pins[] = { 0x18, 0x19, 0x17, };
-static int x1500_mmc0_4bit_pins[] = { 0x16, 0x15, 0x14, };
+static int x1500_mmc_1bit_pins[] = { 0x18, 0x19, 0x17, };
+static int x1500_mmc_4bit_pins[] = { 0x16, 0x15, 0x14, };
static int x1500_i2c0_pins[] = { 0x38, 0x37, };
static int x1500_i2c1_a_pins[] = { 0x01, 0x00, };
static int x1500_i2c1_c_pins[] = { 0x5b, 0x5a, };
@@ -1252,11 +1313,11 @@ static int x1500_uart0_data_funcs[] = { 0, 0, };
static int x1500_uart0_hwflow_funcs[] = { 0, 0, };
static int x1500_uart1_data_a_funcs[] = { 2, 2, };
static int x1500_uart1_data_d_funcs[] = { 1, 1, };
-static int x1500_uart1_hwflow_d_funcs[] = { 1, 1, };
+static int x1500_uart1_hwflow_funcs[] = { 1, 1, };
static int x1500_uart2_data_a_funcs[] = { 2, 2, };
static int x1500_uart2_data_d_funcs[] = { 0, 0, };
-static int x1500_mmc0_1bit_funcs[] = { 1, 1, 1, };
-static int x1500_mmc0_4bit_funcs[] = { 1, 1, 1, };
+static int x1500_mmc_1bit_funcs[] = { 1, 1, 1, };
+static int x1500_mmc_4bit_funcs[] = { 1, 1, 1, };
static int x1500_i2c0_funcs[] = { 0, 0, };
static int x1500_i2c1_a_funcs[] = { 2, 2, };
static int x1500_i2c1_c_funcs[] = { 0, 0, };
@@ -1273,11 +1334,12 @@ static const struct group_desc x1500_groups[] = {
INGENIC_PIN_GROUP("uart0-hwflow", x1500_uart0_hwflow),
INGENIC_PIN_GROUP("uart1-data-a", x1500_uart1_data_a),
INGENIC_PIN_GROUP("uart1-data-d", x1500_uart1_data_d),
- INGENIC_PIN_GROUP("uart1-hwflow-d", x1500_uart1_hwflow_d),
+ INGENIC_PIN_GROUP("uart1-hwflow", x1500_uart1_hwflow),
INGENIC_PIN_GROUP("uart2-data-a", x1500_uart2_data_a),
INGENIC_PIN_GROUP("uart2-data-d", x1500_uart2_data_d),
- INGENIC_PIN_GROUP("mmc0-1bit", x1500_mmc0_1bit),
- INGENIC_PIN_GROUP("mmc0-4bit", x1500_mmc0_4bit),
+ INGENIC_PIN_GROUP("sfc", x1000_sfc),
+ INGENIC_PIN_GROUP("mmc-1bit", x1500_mmc_1bit),
+ INGENIC_PIN_GROUP("mmc-4bit", x1500_mmc_4bit),
INGENIC_PIN_GROUP("i2c0-data", x1500_i2c0),
INGENIC_PIN_GROUP("i2c1-data-a", x1500_i2c1_a),
INGENIC_PIN_GROUP("i2c1-data-c", x1500_i2c1_c),
@@ -1293,10 +1355,10 @@ static const struct group_desc x1500_groups[] = {
static const char *x1500_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
static const char *x1500_uart1_groups[] = {
- "uart1-data-a", "uart1-data-d", "uart1-hwflow-d",
+ "uart1-data-a", "uart1-data-d", "uart1-hwflow",
};
static const char *x1500_uart2_groups[] = { "uart2-data-a", "uart2-data-d", };
-static const char *x1500_mmc0_groups[] = { "mmc0-1bit", "mmc0-4bit", };
+static const char *x1500_mmc_groups[] = { "mmc-1bit", "mmc-4bit", };
static const char *x1500_i2c0_groups[] = { "i2c0-data", };
static const char *x1500_i2c1_groups[] = { "i2c1-data-a", "i2c1-data-c", };
static const char *x1500_i2c2_groups[] = { "i2c2-data", };
@@ -1312,7 +1374,8 @@ static const struct function_desc x1500_functions[] = {
{ "uart0", x1500_uart0_groups, ARRAY_SIZE(x1500_uart0_groups), },
{ "uart1", x1500_uart1_groups, ARRAY_SIZE(x1500_uart1_groups), },
{ "uart2", x1500_uart2_groups, ARRAY_SIZE(x1500_uart2_groups), },
- { "mmc0", x1500_mmc0_groups, ARRAY_SIZE(x1500_mmc0_groups), },
+ { "sfc", x1000_sfc_groups, ARRAY_SIZE(x1000_sfc_groups), },
+ { "mmc", x1500_mmc_groups, ARRAY_SIZE(x1500_mmc_groups), },
{ "i2c0", x1500_i2c0_groups, ARRAY_SIZE(x1500_i2c0_groups), },
{ "i2c1", x1500_i2c1_groups, ARRAY_SIZE(x1500_i2c1_groups), },
{ "i2c2", x1500_i2c2_groups, ARRAY_SIZE(x1500_i2c2_groups), },
@@ -1327,6 +1390,8 @@ static const struct function_desc x1500_functions[] = {
static const struct ingenic_chip_info x1500_chip_info = {
.num_chips = 4,
+ .reg_offset = 0x100,
+ .version = ID_X1500,
.groups = x1500_groups,
.num_groups = ARRAY_SIZE(x1500_groups),
.functions = x1500_functions,
@@ -1335,6 +1400,222 @@ static const struct ingenic_chip_info x1500_chip_info = {
.pull_downs = x1000_pull_downs,
};
+static const u32 x1830_pull_ups[4] = {
+ 0x5fdfffc0, 0xffffefff, 0x1ffffbff, 0x0fcff3fc,
+};
+
+static const u32 x1830_pull_downs[4] = {
+ 0x5fdfffc0, 0xffffefff, 0x1ffffbff, 0x0fcff3fc,
+};
+
+static int x1830_uart0_data_pins[] = { 0x33, 0x36, };
+static int x1830_uart0_hwflow_pins[] = { 0x34, 0x35, };
+static int x1830_uart1_data_pins[] = { 0x38, 0x37, };
+static int x1830_sfc_pins[] = { 0x17, 0x18, 0x1a, 0x19, 0x1b, 0x1c, };
+static int x1830_ssi0_dt_pins[] = { 0x4c, };
+static int x1830_ssi0_dr_pins[] = { 0x4b, };
+static int x1830_ssi0_clk_pins[] = { 0x4f, };
+static int x1830_ssi0_gpc_pins[] = { 0x4d, };
+static int x1830_ssi0_ce0_pins[] = { 0x50, };
+static int x1830_ssi0_ce1_pins[] = { 0x4e, };
+static int x1830_ssi1_dt_c_pins[] = { 0x53, };
+static int x1830_ssi1_dr_c_pins[] = { 0x54, };
+static int x1830_ssi1_clk_c_pins[] = { 0x57, };
+static int x1830_ssi1_gpc_c_pins[] = { 0x55, };
+static int x1830_ssi1_ce0_c_pins[] = { 0x58, };
+static int x1830_ssi1_ce1_c_pins[] = { 0x56, };
+static int x1830_ssi1_dt_d_pins[] = { 0x62, };
+static int x1830_ssi1_dr_d_pins[] = { 0x63, };
+static int x1830_ssi1_clk_d_pins[] = { 0x66, };
+static int x1830_ssi1_gpc_d_pins[] = { 0x64, };
+static int x1830_ssi1_ce0_d_pins[] = { 0x67, };
+static int x1830_ssi1_ce1_d_pins[] = { 0x65, };
+static int x1830_mmc0_1bit_pins[] = { 0x24, 0x25, 0x20, };
+static int x1830_mmc0_4bit_pins[] = { 0x21, 0x22, 0x23, };
+static int x1830_mmc1_1bit_pins[] = { 0x42, 0x43, 0x44, };
+static int x1830_mmc1_4bit_pins[] = { 0x45, 0x46, 0x47, };
+static int x1830_i2c0_pins[] = { 0x0c, 0x0d, };
+static int x1830_i2c1_pins[] = { 0x39, 0x3a, };
+static int x1830_i2c2_pins[] = { 0x5b, 0x5c, };
+static int x1830_pwm_pwm0_b_pins[] = { 0x31, };
+static int x1830_pwm_pwm0_c_pins[] = { 0x4b, };
+static int x1830_pwm_pwm1_b_pins[] = { 0x32, };
+static int x1830_pwm_pwm1_c_pins[] = { 0x4c, };
+static int x1830_pwm_pwm2_c_8_pins[] = { 0x48, };
+static int x1830_pwm_pwm2_c_13_pins[] = { 0x4d, };
+static int x1830_pwm_pwm3_c_9_pins[] = { 0x49, };
+static int x1830_pwm_pwm3_c_14_pins[] = { 0x4e, };
+static int x1830_pwm_pwm4_c_15_pins[] = { 0x4f, };
+static int x1830_pwm_pwm4_c_25_pins[] = { 0x59, };
+static int x1830_pwm_pwm5_c_16_pins[] = { 0x50, };
+static int x1830_pwm_pwm5_c_26_pins[] = { 0x5a, };
+static int x1830_pwm_pwm6_c_17_pins[] = { 0x51, };
+static int x1830_pwm_pwm6_c_27_pins[] = { 0x5b, };
+static int x1830_pwm_pwm7_c_18_pins[] = { 0x52, };
+static int x1830_pwm_pwm7_c_28_pins[] = { 0x5c, };
+static int x1830_mac_pins[] = {
+ 0x29, 0x30, 0x2f, 0x28, 0x2e, 0x2d, 0x2a, 0x2b, 0x26, 0x27,
+};
+
+static int x1830_uart0_data_funcs[] = { 0, 0, };
+static int x1830_uart0_hwflow_funcs[] = { 0, 0, };
+static int x1830_uart1_data_funcs[] = { 0, 0, };
+static int x1830_sfc_funcs[] = { 1, 1, 1, 1, 1, 1, };
+static int x1830_ssi0_dt_funcs[] = { 0, };
+static int x1830_ssi0_dr_funcs[] = { 0, };
+static int x1830_ssi0_clk_funcs[] = { 0, };
+static int x1830_ssi0_gpc_funcs[] = { 0, };
+static int x1830_ssi0_ce0_funcs[] = { 0, };
+static int x1830_ssi0_ce1_funcs[] = { 0, };
+static int x1830_ssi1_dt_c_funcs[] = { 1, };
+static int x1830_ssi1_dr_c_funcs[] = { 1, };
+static int x1830_ssi1_clk_c_funcs[] = { 1, };
+static int x1830_ssi1_gpc_c_funcs[] = { 1, };
+static int x1830_ssi1_ce0_c_funcs[] = { 1, };
+static int x1830_ssi1_ce1_c_funcs[] = { 1, };
+static int x1830_ssi1_dt_d_funcs[] = { 2, };
+static int x1830_ssi1_dr_d_funcs[] = { 2, };
+static int x1830_ssi1_clk_d_funcs[] = { 2, };
+static int x1830_ssi1_gpc_d_funcs[] = { 2, };
+static int x1830_ssi1_ce0_d_funcs[] = { 2, };
+static int x1830_ssi1_ce1_d_funcs[] = { 2, };
+static int x1830_mmc0_1bit_funcs[] = { 0, 0, 0, };
+static int x1830_mmc0_4bit_funcs[] = { 0, 0, 0, };
+static int x1830_mmc1_1bit_funcs[] = { 0, 0, 0, };
+static int x1830_mmc1_4bit_funcs[] = { 0, 0, 0, };
+static int x1830_i2c0_funcs[] = { 1, 1, };
+static int x1830_i2c1_funcs[] = { 0, 0, };
+static int x1830_i2c2_funcs[] = { 1, 1, };
+static int x1830_pwm_pwm0_b_funcs[] = { 0, };
+static int x1830_pwm_pwm0_c_funcs[] = { 1, };
+static int x1830_pwm_pwm1_b_funcs[] = { 0, };
+static int x1830_pwm_pwm1_c_funcs[] = { 1, };
+static int x1830_pwm_pwm2_c_8_funcs[] = { 0, };
+static int x1830_pwm_pwm2_c_13_funcs[] = { 1, };
+static int x1830_pwm_pwm3_c_9_funcs[] = { 0, };
+static int x1830_pwm_pwm3_c_14_funcs[] = { 1, };
+static int x1830_pwm_pwm4_c_15_funcs[] = { 1, };
+static int x1830_pwm_pwm4_c_25_funcs[] = { 0, };
+static int x1830_pwm_pwm5_c_16_funcs[] = { 1, };
+static int x1830_pwm_pwm5_c_26_funcs[] = { 0, };
+static int x1830_pwm_pwm6_c_17_funcs[] = { 1, };
+static int x1830_pwm_pwm6_c_27_funcs[] = { 0, };
+static int x1830_pwm_pwm7_c_18_funcs[] = { 1, };
+static int x1830_pwm_pwm7_c_28_funcs[] = { 0, };
+static int x1830_mac_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, };
+
+static const struct group_desc x1830_groups[] = {
+ INGENIC_PIN_GROUP("uart0-data", x1830_uart0_data),
+ INGENIC_PIN_GROUP("uart0-hwflow", x1830_uart0_hwflow),
+ INGENIC_PIN_GROUP("uart1-data", x1830_uart1_data),
+ INGENIC_PIN_GROUP("sfc", x1830_sfc),
+ INGENIC_PIN_GROUP("ssi0-dt", x1830_ssi0_dt),
+ INGENIC_PIN_GROUP("ssi0-dr", x1830_ssi0_dr),
+ INGENIC_PIN_GROUP("ssi0-clk", x1830_ssi0_clk),
+ INGENIC_PIN_GROUP("ssi0-gpc", x1830_ssi0_gpc),
+ INGENIC_PIN_GROUP("ssi0-ce0", x1830_ssi0_ce0),
+ INGENIC_PIN_GROUP("ssi0-ce1", x1830_ssi0_ce1),
+ INGENIC_PIN_GROUP("ssi1-dt-c", x1830_ssi1_dt_c),
+ INGENIC_PIN_GROUP("ssi1-dr-c", x1830_ssi1_dr_c),
+ INGENIC_PIN_GROUP("ssi1-clk-c", x1830_ssi1_clk_c),
+ INGENIC_PIN_GROUP("ssi1-gpc-c", x1830_ssi1_gpc_c),
+ INGENIC_PIN_GROUP("ssi1-ce0-c", x1830_ssi1_ce0_c),
+ INGENIC_PIN_GROUP("ssi1-ce1-c", x1830_ssi1_ce1_c),
+ INGENIC_PIN_GROUP("ssi1-dt-d", x1830_ssi1_dt_d),
+ INGENIC_PIN_GROUP("ssi1-dr-d", x1830_ssi1_dr_d),
+ INGENIC_PIN_GROUP("ssi1-clk-d", x1830_ssi1_clk_d),
+ INGENIC_PIN_GROUP("ssi1-gpc-d", x1830_ssi1_gpc_d),
+ INGENIC_PIN_GROUP("ssi1-ce0-d", x1830_ssi1_ce0_d),
+ INGENIC_PIN_GROUP("ssi1-ce1-d", x1830_ssi1_ce1_d),
+ INGENIC_PIN_GROUP("mmc0-1bit", x1830_mmc0_1bit),
+ INGENIC_PIN_GROUP("mmc0-4bit", x1830_mmc0_4bit),
+ INGENIC_PIN_GROUP("mmc1-1bit", x1830_mmc1_1bit),
+ INGENIC_PIN_GROUP("mmc1-4bit", x1830_mmc1_4bit),
+ INGENIC_PIN_GROUP("i2c0-data", x1830_i2c0),
+ INGENIC_PIN_GROUP("i2c1-data", x1830_i2c1),
+ INGENIC_PIN_GROUP("i2c2-data", x1830_i2c2),
+ INGENIC_PIN_GROUP("pwm0-b", x1830_pwm_pwm0_b),
+ INGENIC_PIN_GROUP("pwm0-c", x1830_pwm_pwm0_c),
+ INGENIC_PIN_GROUP("pwm1-b", x1830_pwm_pwm1_b),
+ INGENIC_PIN_GROUP("pwm1-c", x1830_pwm_pwm1_c),
+ INGENIC_PIN_GROUP("pwm2-c-8", x1830_pwm_pwm2_c_8),
+ INGENIC_PIN_GROUP("pwm2-c-13", x1830_pwm_pwm2_c_13),
+ INGENIC_PIN_GROUP("pwm3-c-9", x1830_pwm_pwm3_c_9),
+ INGENIC_PIN_GROUP("pwm3-c-14", x1830_pwm_pwm3_c_14),
+ INGENIC_PIN_GROUP("pwm4-c-15", x1830_pwm_pwm4_c_15),
+ INGENIC_PIN_GROUP("pwm4-c-25", x1830_pwm_pwm4_c_25),
+ INGENIC_PIN_GROUP("pwm5-c-16", x1830_pwm_pwm5_c_16),
+ INGENIC_PIN_GROUP("pwm5-c-26", x1830_pwm_pwm5_c_26),
+ INGENIC_PIN_GROUP("pwm6-c-17", x1830_pwm_pwm6_c_17),
+ INGENIC_PIN_GROUP("pwm6-c-27", x1830_pwm_pwm6_c_27),
+ INGENIC_PIN_GROUP("pwm7-c-18", x1830_pwm_pwm7_c_18),
+ INGENIC_PIN_GROUP("pwm7-c-28", x1830_pwm_pwm7_c_28),
+ INGENIC_PIN_GROUP("mac", x1830_mac),
+};
+
+static const char *x1830_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
+static const char *x1830_uart1_groups[] = { "uart1-data", };
+static const char *x1830_sfc_groups[] = { "sfc", };
+static const char *x1830_ssi0_groups[] = {
+ "ssi0-dt", "ssi0-dr", "ssi0-clk", "ssi0-gpc", "ssi0-ce0", "ssi0-ce1",
+};
+static const char *x1830_ssi1_groups[] = {
+ "ssi1-dt-c", "ssi1-dt-d",
+ "ssi1-dr-c", "ssi1-dr-d",
+ "ssi1-clk-c", "ssi1-clk-d",
+ "ssi1-gpc-c", "ssi1-gpc-d",
+ "ssi1-ce0-c", "ssi1-ce0-d",
+ "ssi1-ce1-c", "ssi1-ce1-d",
+};
+static const char *x1830_mmc0_groups[] = { "mmc0-1bit", "mmc0-4bit", };
+static const char *x1830_mmc1_groups[] = { "mmc1-1bit", "mmc1-4bit", };
+static const char *x1830_i2c0_groups[] = { "i2c0-data", };
+static const char *x1830_i2c1_groups[] = { "i2c1-data", };
+static const char *x1830_i2c2_groups[] = { "i2c2-data", };
+static const char *x1830_pwm0_groups[] = { "pwm0-b", "pwm0-c", };
+static const char *x1830_pwm1_groups[] = { "pwm1-b", "pwm1-c", };
+static const char *x1830_pwm2_groups[] = { "pwm2-c-8", "pwm2-c-13", };
+static const char *x1830_pwm3_groups[] = { "pwm3-c-9", "pwm3-c-14", };
+static const char *x1830_pwm4_groups[] = { "pwm4-c-15", "pwm4-c-25", };
+static const char *x1830_pwm5_groups[] = { "pwm5-c-16", "pwm5-c-26", };
+static const char *x1830_pwm6_groups[] = { "pwm6-c-17", "pwm6-c-27", };
+static const char *x1830_pwm7_groups[] = { "pwm7-c-18", "pwm7-c-28", };
+static const char *x1830_mac_groups[] = { "mac", };
+
+static const struct function_desc x1830_functions[] = {
+ { "uart0", x1830_uart0_groups, ARRAY_SIZE(x1830_uart0_groups), },
+ { "uart1", x1830_uart1_groups, ARRAY_SIZE(x1830_uart1_groups), },
+ { "sfc", x1830_sfc_groups, ARRAY_SIZE(x1830_sfc_groups), },
+ { "ssi0", x1830_ssi0_groups, ARRAY_SIZE(x1830_ssi0_groups), },
+ { "ssi1", x1830_ssi1_groups, ARRAY_SIZE(x1830_ssi1_groups), },
+ { "mmc0", x1830_mmc0_groups, ARRAY_SIZE(x1830_mmc0_groups), },
+ { "mmc1", x1830_mmc1_groups, ARRAY_SIZE(x1830_mmc1_groups), },
+ { "i2c0", x1830_i2c0_groups, ARRAY_SIZE(x1830_i2c0_groups), },
+ { "i2c1", x1830_i2c1_groups, ARRAY_SIZE(x1830_i2c1_groups), },
+ { "i2c2", x1830_i2c2_groups, ARRAY_SIZE(x1830_i2c2_groups), },
+ { "pwm0", x1830_pwm0_groups, ARRAY_SIZE(x1830_pwm0_groups), },
+ { "pwm1", x1830_pwm1_groups, ARRAY_SIZE(x1830_pwm1_groups), },
+ { "pwm2", x1830_pwm2_groups, ARRAY_SIZE(x1830_pwm2_groups), },
+ { "pwm3", x1830_pwm3_groups, ARRAY_SIZE(x1830_pwm3_groups), },
+ { "pwm4", x1830_pwm4_groups, ARRAY_SIZE(x1830_pwm4_groups), },
+ { "pwm5", x1830_pwm5_groups, ARRAY_SIZE(x1830_pwm4_groups), },
+ { "pwm6", x1830_pwm6_groups, ARRAY_SIZE(x1830_pwm4_groups), },
+ { "pwm7", x1830_pwm7_groups, ARRAY_SIZE(x1830_pwm4_groups), },
+ { "mac", x1830_mac_groups, ARRAY_SIZE(x1830_mac_groups), },
+};
+
+static const struct ingenic_chip_info x1830_chip_info = {
+ .num_chips = 4,
+ .reg_offset = 0x1000,
+ .version = ID_X1830,
+ .groups = x1830_groups,
+ .num_groups = ARRAY_SIZE(x1830_groups),
+ .functions = x1830_functions,
+ .num_functions = ARRAY_SIZE(x1830_functions),
+ .pull_ups = x1830_pull_ups,
+ .pull_downs = x1830_pull_downs,
+};
+
static u32 ingenic_gpio_read_reg(struct ingenic_gpio_chip *jzgc, u8 reg)
{
unsigned int val;
@@ -1363,12 +1644,14 @@ static void ingenic_gpio_shadow_set_bit(struct ingenic_gpio_chip *jzgc,
else
reg = REG_CLEAR(reg);
- regmap_write(jzgc->jzpc->map, X1000_GPIO_PZ_BASE + reg, BIT(offset));
+ regmap_write(jzgc->jzpc->map, REG_PZ_BASE(
+ jzgc->jzpc->info->reg_offset) + reg, BIT(offset));
}
static void ingenic_gpio_shadow_set_bit_load(struct ingenic_gpio_chip *jzgc)
{
- regmap_write(jzgc->jzpc->map, X1000_GPIO_PZ_GID2LD,
+ regmap_write(jzgc->jzpc->map, REG_PZ_GID2LD(
+ jzgc->jzpc->info->reg_offset),
jzgc->gc.base / PINS_PER_GPIO_CHIP);
}
@@ -1383,7 +1666,7 @@ static inline bool ingenic_gpio_get_value(struct ingenic_gpio_chip *jzgc,
static void ingenic_gpio_set_value(struct ingenic_gpio_chip *jzgc,
u8 offset, int value)
{
- if (jzgc->jzpc->version >= ID_JZ4760)
+ if (jzgc->jzpc->info->version >= ID_JZ4760)
ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_PAT0, offset, !!value);
else
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, offset, !!value);
@@ -1393,58 +1676,42 @@ static void irq_set_type(struct ingenic_gpio_chip *jzgc,
u8 offset, unsigned int type)
{
u8 reg1, reg2;
-
- if (jzgc->jzpc->version >= ID_JZ4760) {
- reg1 = JZ4760_GPIO_PAT1;
- reg2 = JZ4760_GPIO_PAT0;
- } else {
- reg1 = JZ4740_GPIO_TRIG;
- reg2 = JZ4740_GPIO_DIR;
- }
+ bool val1, val2;
switch (type) {
case IRQ_TYPE_EDGE_RISING:
- if (jzgc->jzpc->version >= ID_X1000) {
- ingenic_gpio_shadow_set_bit(jzgc, reg2, offset, true);
- ingenic_gpio_shadow_set_bit(jzgc, reg1, offset, true);
- ingenic_gpio_shadow_set_bit_load(jzgc);
- } else {
- ingenic_gpio_set_bit(jzgc, reg2, offset, true);
- ingenic_gpio_set_bit(jzgc, reg1, offset, true);
- }
+ val1 = val2 = true;
break;
case IRQ_TYPE_EDGE_FALLING:
- if (jzgc->jzpc->version >= ID_X1000) {
- ingenic_gpio_shadow_set_bit(jzgc, reg2, offset, false);
- ingenic_gpio_shadow_set_bit(jzgc, reg1, offset, true);
- ingenic_gpio_shadow_set_bit_load(jzgc);
- } else {
- ingenic_gpio_set_bit(jzgc, reg2, offset, false);
- ingenic_gpio_set_bit(jzgc, reg1, offset, true);
- }
+ val1 = false;
+ val2 = true;
break;
case IRQ_TYPE_LEVEL_HIGH:
- if (jzgc->jzpc->version >= ID_X1000) {
- ingenic_gpio_shadow_set_bit(jzgc, reg2, offset, true);
- ingenic_gpio_shadow_set_bit(jzgc, reg1, offset, false);
- ingenic_gpio_shadow_set_bit_load(jzgc);
- } else {
- ingenic_gpio_set_bit(jzgc, reg2, offset, true);
- ingenic_gpio_set_bit(jzgc, reg1, offset, false);
- }
+ val1 = true;
+ val2 = false;
break;
case IRQ_TYPE_LEVEL_LOW:
default:
- if (jzgc->jzpc->version >= ID_X1000) {
- ingenic_gpio_shadow_set_bit(jzgc, reg2, offset, false);
- ingenic_gpio_shadow_set_bit(jzgc, reg1, offset, false);
- ingenic_gpio_shadow_set_bit_load(jzgc);
- } else {
- ingenic_gpio_set_bit(jzgc, reg2, offset, false);
- ingenic_gpio_set_bit(jzgc, reg1, offset, false);
- }
+ val1 = val2 = false;
break;
}
+
+ if (jzgc->jzpc->info->version >= ID_JZ4760) {
+ reg1 = JZ4760_GPIO_PAT1;
+ reg2 = JZ4760_GPIO_PAT0;
+ } else {
+ reg1 = JZ4740_GPIO_TRIG;
+ reg2 = JZ4740_GPIO_DIR;
+ }
+
+ if (jzgc->jzpc->info->version >= ID_X1000) {
+ ingenic_gpio_shadow_set_bit(jzgc, reg2, offset, val1);
+ ingenic_gpio_shadow_set_bit(jzgc, reg1, offset, val2);
+ ingenic_gpio_shadow_set_bit_load(jzgc);
+ } else {
+ ingenic_gpio_set_bit(jzgc, reg2, offset, val1);
+ ingenic_gpio_set_bit(jzgc, reg1, offset, val2);
+ }
}
static void ingenic_gpio_irq_mask(struct irq_data *irqd)
@@ -1469,7 +1736,7 @@ static void ingenic_gpio_irq_enable(struct irq_data *irqd)
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
int irq = irqd->hwirq;
- if (jzgc->jzpc->version >= ID_JZ4760)
+ if (jzgc->jzpc->info->version >= ID_JZ4760)
ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_INT, irq, true);
else
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, true);
@@ -1485,7 +1752,7 @@ static void ingenic_gpio_irq_disable(struct irq_data *irqd)
ingenic_gpio_irq_mask(irqd);
- if (jzgc->jzpc->version >= ID_JZ4760)
+ if (jzgc->jzpc->info->version >= ID_JZ4760)
ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_INT, irq, false);
else
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, false);
@@ -1510,7 +1777,7 @@ static void ingenic_gpio_irq_ack(struct irq_data *irqd)
irq_set_type(jzgc, irq, IRQ_TYPE_EDGE_RISING);
}
- if (jzgc->jzpc->version >= ID_JZ4760)
+ if (jzgc->jzpc->info->version >= ID_JZ4760)
ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_FLAG, irq, false);
else
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, irq, true);
@@ -1567,7 +1834,7 @@ static void ingenic_gpio_irq_handler(struct irq_desc *desc)
chained_irq_enter(irq_chip, desc);
- if (jzgc->jzpc->version >= ID_JZ4760)
+ if (jzgc->jzpc->info->version >= ID_JZ4760)
flag = ingenic_gpio_read_reg(jzgc, JZ4760_GPIO_FLAG);
else
flag = ingenic_gpio_read_reg(jzgc, JZ4740_GPIO_FLAG);
@@ -1611,7 +1878,7 @@ static inline void ingenic_config_pin(struct ingenic_pinctrl *jzpc,
unsigned int idx = pin % PINS_PER_GPIO_CHIP;
unsigned int offt = pin / PINS_PER_GPIO_CHIP;
- regmap_write(jzpc->map, offt * 0x100 +
+ regmap_write(jzpc->map, offt * jzpc->info->reg_offset +
(set ? REG_SET(reg) : REG_CLEAR(reg)), BIT(idx));
}
@@ -1620,14 +1887,15 @@ static inline void ingenic_shadow_config_pin(struct ingenic_pinctrl *jzpc,
{
unsigned int idx = pin % PINS_PER_GPIO_CHIP;
- regmap_write(jzpc->map, X1000_GPIO_PZ_BASE +
+ regmap_write(jzpc->map, REG_PZ_BASE(jzpc->info->reg_offset) +
(set ? REG_SET(reg) : REG_CLEAR(reg)), BIT(idx));
}
static inline void ingenic_shadow_config_pin_load(struct ingenic_pinctrl *jzpc,
unsigned int pin)
{
- regmap_write(jzpc->map, X1000_GPIO_PZ_GID2LD, pin / PINS_PER_GPIO_CHIP);
+ regmap_write(jzpc->map, REG_PZ_GID2LD(jzpc->info->reg_offset),
+ pin / PINS_PER_GPIO_CHIP);
}
static inline bool ingenic_get_pin_config(struct ingenic_pinctrl *jzpc,
@@ -1637,7 +1905,7 @@ static inline bool ingenic_get_pin_config(struct ingenic_pinctrl *jzpc,
unsigned int offt = pin / PINS_PER_GPIO_CHIP;
unsigned int val;
- regmap_read(jzpc->map, offt * 0x100 + reg, &val);
+ regmap_read(jzpc->map, offt * jzpc->info->reg_offset + reg, &val);
return val & BIT(idx);
}
@@ -1648,7 +1916,7 @@ static int ingenic_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
struct ingenic_pinctrl *jzpc = jzgc->jzpc;
unsigned int pin = gc->base + offset;
- if (jzpc->version >= ID_JZ4760)
+ if (jzpc->info->version >= ID_JZ4760)
return ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PAT1);
if (ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_SELECT))
@@ -1674,13 +1942,13 @@ static int ingenic_pinmux_set_pin_fn(struct ingenic_pinctrl *jzpc,
dev_dbg(jzpc->dev, "set pin P%c%u to function %u\n",
'A' + offt, idx, func);
- if (jzpc->version >= ID_X1000) {
+ if (jzpc->info->version >= ID_X1000) {
ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
ingenic_shadow_config_pin(jzpc, pin, GPIO_MSK, false);
ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, func & 0x2);
ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, func & 0x1);
ingenic_shadow_config_pin_load(jzpc, pin);
- } else if (jzpc->version >= ID_JZ4760) {
+ } else if (jzpc->info->version >= ID_JZ4760) {
ingenic_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
ingenic_config_pin(jzpc, pin, GPIO_MSK, false);
ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, func & 0x2);
@@ -1733,12 +2001,12 @@ static int ingenic_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
dev_dbg(pctldev->dev, "set pin P%c%u to %sput\n",
'A' + offt, idx, input ? "in" : "out");
- if (jzpc->version >= ID_X1000) {
+ if (jzpc->info->version >= ID_X1000) {
ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
ingenic_shadow_config_pin(jzpc, pin, GPIO_MSK, true);
ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, input);
ingenic_shadow_config_pin_load(jzpc, pin);
- } else if (jzpc->version >= ID_JZ4760) {
+ } else if (jzpc->info->version >= ID_JZ4760) {
ingenic_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
ingenic_config_pin(jzpc, pin, GPIO_MSK, true);
ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, input);
@@ -1768,7 +2036,7 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev,
unsigned int offt = pin / PINS_PER_GPIO_CHIP;
bool pull;
- if (jzpc->version >= ID_JZ4760)
+ if (jzpc->info->version >= ID_JZ4760)
pull = !ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PEN);
else
pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS);
@@ -1798,18 +2066,37 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev,
}
static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
- unsigned int pin, bool enabled)
+ unsigned int pin, unsigned int bias)
{
- if (jzpc->version >= ID_JZ4760)
- ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PEN, !enabled);
- else
- ingenic_config_pin(jzpc, pin, JZ4740_GPIO_PULL_DIS, !enabled);
+ if (jzpc->info->version >= ID_X1830) {
+ unsigned int idx = pin % PINS_PER_GPIO_CHIP;
+ unsigned int half = PINS_PER_GPIO_CHIP / 2;
+ unsigned int idxh = pin % half * 2;
+ unsigned int offt = pin / PINS_PER_GPIO_CHIP;
+
+ if (idx < half) {
+ regmap_write(jzpc->map, offt * jzpc->info->reg_offset +
+ REG_CLEAR(X1830_GPIO_PEL), 3 << idxh);
+ regmap_write(jzpc->map, offt * jzpc->info->reg_offset +
+ REG_SET(X1830_GPIO_PEL), bias << idxh);
+ } else {
+ regmap_write(jzpc->map, offt * jzpc->info->reg_offset +
+ REG_CLEAR(X1830_GPIO_PEH), 3 << idxh);
+ regmap_write(jzpc->map, offt * jzpc->info->reg_offset +
+ REG_SET(X1830_GPIO_PEH), bias << idxh);
+ }
+
+ } else if (jzpc->info->version >= ID_JZ4760) {
+ ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PEN, !bias);
+ } else {
+ ingenic_config_pin(jzpc, pin, JZ4740_GPIO_PULL_DIS, !bias);
+ }
}
static void ingenic_set_output_level(struct ingenic_pinctrl *jzpc,
unsigned int pin, bool high)
{
- if (jzpc->version >= ID_JZ4770)
+ if (jzpc->info->version >= ID_JZ4760)
ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, high);
else
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DATA, high);
@@ -1843,7 +2130,7 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
case PIN_CONFIG_BIAS_DISABLE:
dev_dbg(jzpc->dev, "disable pull-over for pin P%c%u\n",
'A' + offt, idx);
- ingenic_set_bias(jzpc, pin, false);
+ ingenic_set_bias(jzpc, pin, GPIO_PULL_DIS);
break;
case PIN_CONFIG_BIAS_PULL_UP:
@@ -1851,7 +2138,7 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
return -EINVAL;
dev_dbg(jzpc->dev, "set pull-up for pin P%c%u\n",
'A' + offt, idx);
- ingenic_set_bias(jzpc, pin, true);
+ ingenic_set_bias(jzpc, pin, GPIO_PULL_UP);
break;
case PIN_CONFIG_BIAS_PULL_DOWN:
@@ -1859,7 +2146,7 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
return -EINVAL;
dev_dbg(jzpc->dev, "set pull-down for pin P%c%u\n",
'A' + offt, idx);
- ingenic_set_bias(jzpc, pin, true);
+ ingenic_set_bias(jzpc, pin, GPIO_PULL_DOWN);
break;
case PIN_CONFIG_OUTPUT:
@@ -1939,25 +2226,13 @@ static const struct regmap_config ingenic_pinctrl_regmap_config = {
.reg_stride = 4,
};
-static const struct of_device_id ingenic_pinctrl_of_match[] = {
- { .compatible = "ingenic,jz4740-pinctrl", .data = (void *) ID_JZ4740 },
- { .compatible = "ingenic,jz4725b-pinctrl", .data = (void *)ID_JZ4725B },
- { .compatible = "ingenic,jz4760-pinctrl", .data = (void *) ID_JZ4760 },
- { .compatible = "ingenic,jz4760b-pinctrl", .data = (void *) ID_JZ4760B },
- { .compatible = "ingenic,jz4770-pinctrl", .data = (void *) ID_JZ4770 },
- { .compatible = "ingenic,jz4780-pinctrl", .data = (void *) ID_JZ4780 },
- { .compatible = "ingenic,x1000-pinctrl", .data = (void *) ID_X1000 },
- { .compatible = "ingenic,x1000e-pinctrl", .data = (void *) ID_X1000E },
- { .compatible = "ingenic,x1500-pinctrl", .data = (void *) ID_X1500 },
- {},
-};
-
static const struct of_device_id ingenic_gpio_of_match[] __initconst = {
{ .compatible = "ingenic,jz4740-gpio", },
{ .compatible = "ingenic,jz4760-gpio", },
{ .compatible = "ingenic,jz4770-gpio", },
{ .compatible = "ingenic,jz4780-gpio", },
{ .compatible = "ingenic,x1000-gpio", },
+ { .compatible = "ingenic,x1830-gpio", },
{},
};
@@ -1981,7 +2256,7 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
return -ENOMEM;
jzgc->jzpc = jzpc;
- jzgc->reg_base = bank * 0x100;
+ jzgc->reg_base = bank * jzpc->info->reg_offset;
jzgc->gc.label = devm_kasprintf(dev, GFP_KERNEL, "GPIO%c", 'A' + bank);
if (!jzgc->gc.label)
@@ -2048,9 +2323,6 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
struct ingenic_pinctrl *jzpc;
struct pinctrl_desc *pctl_desc;
void __iomem *base;
- const struct platform_device_id *id = platform_get_device_id(pdev);
- const struct of_device_id *of_id = of_match_device(
- ingenic_pinctrl_of_match, dev);
const struct ingenic_chip_info *chip_info;
struct device_node *node;
unsigned int i;
@@ -2060,8 +2332,7 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
if (!jzpc)
return -ENOMEM;
- base = devm_ioremap_resource(dev,
- platform_get_resource(pdev, IORESOURCE_MEM, 0));
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -2073,31 +2344,7 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
}
jzpc->dev = dev;
-
- if (of_id)
- jzpc->version = (enum jz_version)of_id->data;
- else
- jzpc->version = (enum jz_version)id->driver_data;
-
- if (jzpc->version >= ID_X1500)
- chip_info = &x1500_chip_info;
- else if (jzpc->version >= ID_X1000E)
- chip_info = &x1000e_chip_info;
- else if (jzpc->version >= ID_X1000)
- chip_info = &x1000_chip_info;
- else if (jzpc->version >= ID_JZ4780)
- chip_info = &jz4780_chip_info;
- else if (jzpc->version >= ID_JZ4770)
- chip_info = &jz4770_chip_info;
- else if (jzpc->version >= ID_JZ4760B)
- chip_info = &jz4760b_chip_info;
- else if (jzpc->version >= ID_JZ4760)
- chip_info = &jz4760_chip_info;
- else if (jzpc->version >= ID_JZ4725B)
- chip_info = &jz4725b_chip_info;
- else
- chip_info = &jz4740_chip_info;
- jzpc->info = chip_info;
+ jzpc->info = chip_info = of_device_get_match_data(dev);
pctl_desc = devm_kzalloc(&pdev->dev, sizeof(*pctl_desc), GFP_KERNEL);
if (!pctl_desc)
@@ -2166,25 +2413,25 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
return 0;
}
-static const struct platform_device_id ingenic_pinctrl_ids[] = {
- { "jz4740-pinctrl", ID_JZ4740 },
- { "jz4725b-pinctrl", ID_JZ4725B },
- { "jz4760-pinctrl", ID_JZ4760 },
- { "jz4760b-pinctrl", ID_JZ4760B },
- { "jz4770-pinctrl", ID_JZ4770 },
- { "jz4780-pinctrl", ID_JZ4780 },
- { "x1000-pinctrl", ID_X1000 },
- { "x1000e-pinctrl", ID_X1000E },
- { "x1500-pinctrl", ID_X1500 },
+static const struct of_device_id ingenic_pinctrl_of_match[] = {
+ { .compatible = "ingenic,jz4740-pinctrl", .data = &jz4740_chip_info },
+ { .compatible = "ingenic,jz4725b-pinctrl", .data = &jz4725b_chip_info },
+ { .compatible = "ingenic,jz4760-pinctrl", .data = &jz4760_chip_info },
+ { .compatible = "ingenic,jz4760b-pinctrl", .data = &jz4760_chip_info },
+ { .compatible = "ingenic,jz4770-pinctrl", .data = &jz4770_chip_info },
+ { .compatible = "ingenic,jz4780-pinctrl", .data = &jz4780_chip_info },
+ { .compatible = "ingenic,x1000-pinctrl", .data = &x1000_chip_info },
+ { .compatible = "ingenic,x1000e-pinctrl", .data = &x1000_chip_info },
+ { .compatible = "ingenic,x1500-pinctrl", .data = &x1500_chip_info },
+ { .compatible = "ingenic,x1830-pinctrl", .data = &x1830_chip_info },
{},
};
static struct platform_driver ingenic_pinctrl_driver = {
.driver = {
.name = "pinctrl-ingenic",
- .of_match_table = of_match_ptr(ingenic_pinctrl_of_match),
+ .of_match_table = ingenic_pinctrl_of_match,
},
- .id_table = ingenic_pinctrl_ids,
};
static int __init ingenic_pinctrl_drv_register(void)
diff --git a/drivers/pinctrl/pinctrl-rza1.c b/drivers/pinctrl/pinctrl-rza1.c
index 215db220d795..617585be6a7d 100644
--- a/drivers/pinctrl/pinctrl-rza1.c
+++ b/drivers/pinctrl/pinctrl-rza1.c
@@ -1229,8 +1229,8 @@ static int rza1_parse_gpiochip(struct rza1_pinctrl *rza1_pctl,
pinctrl_add_gpio_range(rza1_pctl->pctl, range);
- dev_info(rza1_pctl->dev, "Parsed gpiochip %s with %d pins\n",
- chip->label, chip->ngpio);
+ dev_dbg(rza1_pctl->dev, "Parsed gpiochip %s with %d pins\n",
+ chip->label, chip->ngpio);
return 0;
}
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index e914f6efd39e..9503ddf2edc7 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -85,7 +85,7 @@ bool pinmux_can_be_used_for_gpio(struct pinctrl_dev *pctldev, unsigned pin)
const struct pinmux_ops *ops = pctldev->desc->pmxops;
/* Can't inspect pin, assume it can be used */
- if (!desc)
+ if (!desc || !ops)
return true;
if (ops->strict && desc->mux_usecount)
diff --git a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
index 21c370dbbfba..bddf2c5dd3bf 100644
--- a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
+++ b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
@@ -10,6 +10,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/module.h>
+#include <linux/pinctrl/machine.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinmux.h>
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 5d6f9f61ce02..9a8daa256a32 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -960,7 +960,6 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
- unsigned long flags;
/*
* While they may not wake up when the TLMM is powered off,
@@ -971,12 +970,8 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
if (d->parent_data)
irq_chip_set_wake_parent(d, on);
- raw_spin_lock_irqsave(&pctrl->lock, flags);
-
irq_set_irq_wake(pctrl->irq, on);
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
-
return 0;
}
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8976.c b/drivers/pinctrl/qcom/pinctrl-msm8976.c
index e1259ce27396..183f0b2d9f8e 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8976.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8976.c
@@ -589,7 +589,7 @@ static const char * const blsp_uart5_groups[] = {
static const char * const qdss_traceclk_a_groups[] = {
"gpio46",
};
-const char * const m_voc_groups[] = {
+static const char * const m_voc_groups[] = {
"gpio123", "gpio124",
};
static const char * const blsp_i2c5_groups[] = {
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7180.c b/drivers/pinctrl/qcom/pinctrl-sc7180.c
index d6cfad7417b1..1b6465a882f2 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc7180.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc7180.c
@@ -456,14 +456,18 @@ enum sc7180_functions {
msm_mux_qspi_data,
msm_mux_qup00,
msm_mux_qup01,
- msm_mux_qup02,
+ msm_mux_qup02_i2c,
+ msm_mux_qup02_uart,
msm_mux_qup03,
- msm_mux_qup04,
+ msm_mux_qup04_i2c,
+ msm_mux_qup04_uart,
msm_mux_qup05,
msm_mux_qup10,
- msm_mux_qup11,
+ msm_mux_qup11_i2c,
+ msm_mux_qup11_uart,
msm_mux_qup12,
- msm_mux_qup13,
+ msm_mux_qup13_i2c,
+ msm_mux_qup13_uart,
msm_mux_qup14,
msm_mux_qup15,
msm_mux_sdc1_tb,
@@ -543,7 +547,10 @@ static const char * const sdc1_tb_groups[] = {
static const char * const sdc2_tb_groups[] = {
"gpio5",
};
-static const char * const qup11_groups[] = {
+static const char * const qup11_i2c_groups[] = {
+ "gpio6", "gpio7",
+};
+static const char * const qup11_uart_groups[] = {
"gpio6", "gpio7",
};
static const char * const ddr_bist_groups[] = {
@@ -593,7 +600,10 @@ static const char * const qdss_groups[] = {
static const char * const pll_reset_groups[] = {
"gpio14",
};
-static const char * const qup02_groups[] = {
+static const char * const qup02_i2c_groups[] = {
+ "gpio15", "gpio16",
+};
+static const char * const qup02_uart_groups[] = {
"gpio15", "gpio16",
};
static const char * const cci_i2c_groups[] = {
@@ -698,7 +708,10 @@ static const char * const wlan1_adc1_groups[] = {
static const char * const atest_usb13_groups[] = {
"gpio44",
};
-static const char * const qup13_groups[] = {
+static const char * const qup13_i2c_groups[] = {
+ "gpio46", "gpio47",
+};
+static const char * const qup13_uart_groups[] = {
"gpio46", "gpio47",
};
static const char * const gcc_gp1_groups[] = {
@@ -848,7 +861,10 @@ static const char * const usb_phy_groups[] = {
static const char * const mss_lte_groups[] = {
"gpio108", "gpio109",
};
-static const char * const qup04_groups[] = {
+static const char * const qup04_i2c_groups[] = {
+ "gpio115", "gpio116",
+};
+static const char * const qup04_uart_groups[] = {
"gpio115", "gpio116",
};
@@ -929,14 +945,18 @@ static const struct msm_function sc7180_functions[] = {
FUNCTION(qspi_data),
FUNCTION(qup00),
FUNCTION(qup01),
- FUNCTION(qup02),
+ FUNCTION(qup02_i2c),
+ FUNCTION(qup02_uart),
FUNCTION(qup03),
- FUNCTION(qup04),
+ FUNCTION(qup04_i2c),
+ FUNCTION(qup04_uart),
FUNCTION(qup05),
FUNCTION(qup10),
- FUNCTION(qup11),
+ FUNCTION(qup11_i2c),
+ FUNCTION(qup11_uart),
FUNCTION(qup12),
- FUNCTION(qup13),
+ FUNCTION(qup13_i2c),
+ FUNCTION(qup13_uart),
FUNCTION(qup14),
FUNCTION(qup15),
FUNCTION(sdc1_tb),
@@ -976,8 +996,8 @@ static const struct msm_pingroup sc7180_groups[] = {
[3] = PINGROUP(3, SOUTH, qup01, sp_cmu, dbg_out, qdss_cti, _, _, _, _, _),
[4] = PINGROUP(4, NORTH, sdc1_tb, _, qdss_cti, _, _, _, _, _, _),
[5] = PINGROUP(5, NORTH, sdc2_tb, _, _, _, _, _, _, _, _),
- [6] = PINGROUP(6, NORTH, qup11, qup11, _, _, _, _, _, _, _),
- [7] = PINGROUP(7, NORTH, qup11, qup11, ddr_bist, _, _, _, _, _, _),
+ [6] = PINGROUP(6, NORTH, qup11_i2c, qup11_uart, _, _, _, _, _, _, _),
+ [7] = PINGROUP(7, NORTH, qup11_i2c, qup11_uart, ddr_bist, _, _, _, _, _, _),
[8] = PINGROUP(8, NORTH, gp_pdm1, ddr_bist, _, phase_flag, qdss_cti, _, _, _, _),
[9] = PINGROUP(9, NORTH, ddr_bist, _, phase_flag, qdss_cti, _, _, _, _, _),
[10] = PINGROUP(10, NORTH, mdp_vsync, ddr_bist, _, _, _, _, _, _, _),
@@ -985,8 +1005,8 @@ static const struct msm_pingroup sc7180_groups[] = {
[12] = PINGROUP(12, SOUTH, mdp_vsync, m_voc, qup01, _, phase_flag, wlan2_adc0, atest_usb10, ddr_pxi3, _),
[13] = PINGROUP(13, SOUTH, cam_mclk, pll_bypassnl, qdss, _, _, _, _, _, _),
[14] = PINGROUP(14, SOUTH, cam_mclk, pll_reset, qdss, _, _, _, _, _, _),
- [15] = PINGROUP(15, SOUTH, cam_mclk, qup02, qup02, qdss, _, _, _, _, _),
- [16] = PINGROUP(16, SOUTH, cam_mclk, qup02, qup02, qdss, _, _, _, _, _),
+ [15] = PINGROUP(15, SOUTH, cam_mclk, qup02_i2c, qup02_uart, qdss, _, _, _, _, _),
+ [16] = PINGROUP(16, SOUTH, cam_mclk, qup02_i2c, qup02_uart, qdss, _, _, _, _, _),
[17] = PINGROUP(17, SOUTH, cci_i2c, _, phase_flag, qdss, _, wlan1_adc0, atest_usb12, ddr_pxi1, atest_char),
[18] = PINGROUP(18, SOUTH, cci_i2c, agera_pll, _, phase_flag, qdss, vsense_trigger, ddr_pxi0, atest_char3, _),
[19] = PINGROUP(19, SOUTH, cci_i2c, _, phase_flag, qdss, atest_char2, _, _, _, _),
@@ -1016,8 +1036,8 @@ static const struct msm_pingroup sc7180_groups[] = {
[43] = PINGROUP(43, NORTH, qup12, _, _, _, _, _, _, _, _),
[44] = PINGROUP(44, NORTH, qup12, _, phase_flag, qdss_cti, wlan1_adc1, atest_usb13, ddr_pxi1, _, _),
[45] = PINGROUP(45, NORTH, qup12, qdss_cti, _, _, _, _, _, _, _),
- [46] = PINGROUP(46, NORTH, qup13, qup13, _, _, _, _, _, _, _),
- [47] = PINGROUP(47, NORTH, qup13, qup13, _, _, _, _, _, _, _),
+ [46] = PINGROUP(46, NORTH, qup13_i2c, qup13_uart, _, _, _, _, _, _, _),
+ [47] = PINGROUP(47, NORTH, qup13_i2c, qup13_uart, _, _, _, _, _, _, _),
[48] = PINGROUP(48, NORTH, gcc_gp1, _, _, _, _, _, _, _, _),
[49] = PINGROUP(49, WEST, mi2s_1, btfm_slimbus, _, _, _, _, _, _, _),
[50] = PINGROUP(50, WEST, mi2s_1, btfm_slimbus, gp_pdm1, _, _, _, _, _, _),
@@ -1085,8 +1105,8 @@ static const struct msm_pingroup sc7180_groups[] = {
[112] = PINGROUP(112, NORTH, _, _, _, _, _, _, _, _, _),
[113] = PINGROUP(113, NORTH, _, _, _, _, _, _, _, _, _),
[114] = PINGROUP(114, NORTH, _, _, _, _, _, _, _, _, _),
- [115] = PINGROUP(115, WEST, qup04, qup04, _, _, _, _, _, _, _),
- [116] = PINGROUP(116, WEST, qup04, qup04, _, _, _, _, _, _, _),
+ [115] = PINGROUP(115, WEST, qup04_i2c, qup04_uart, _, _, _, _, _, _, _),
+ [116] = PINGROUP(116, WEST, qup04_i2c, qup04_uart, _, _, _, _, _, _, _),
[117] = PINGROUP(117, WEST, dp_hot, _, _, _, _, _, _, _, _),
[118] = PINGROUP(118, WEST, _, _, _, _, _, _, _, _, _),
[119] = UFS_RESET(ufs_reset, 0x7f000),
@@ -1099,6 +1119,22 @@ static const struct msm_pingroup sc7180_groups[] = {
[126] = SDC_QDSD_PINGROUP(sdc2_data, 0x7b000, 9, 0),
};
+static const struct msm_gpio_wakeirq_map sc7180_pdc_map[] = {
+ {0, 40}, {3, 50}, {4, 42}, {5, 70}, {6, 41}, {9, 35},
+ {10, 80}, {11, 51}, {16, 20}, {21, 55}, {22, 90}, {23, 21},
+ {24, 61}, {26, 52}, {28, 36}, {30, 100}, {31, 33}, {32, 81},
+ {33, 62}, {34, 43}, {36, 91}, {37, 53}, {38, 63}, {39, 72},
+ {41, 101}, {42, 7}, {43, 34}, {45, 73}, {47, 82}, {49, 17},
+ {52, 109}, {53, 102}, {55, 92}, {56, 56}, {57, 57}, {58, 83},
+ {59, 37}, {62, 110}, {63, 111}, {64, 74}, {65, 44}, {66, 93},
+ {67, 58}, {68, 112}, {69, 32}, {70, 54}, {72, 59}, {73, 64},
+ {74, 71}, {78, 31}, {82, 30}, {85, 103}, {86, 38}, {87, 39},
+ {88, 45}, {89, 46}, {90, 47}, {91, 48}, {92, 60}, {93, 49},
+ {94, 84}, {95, 94}, {98, 65}, {101, 66}, {104, 67}, {109, 104},
+ {110, 68}, {113, 69}, {114, 113}, {115, 108}, {116, 121},
+ {117, 114}, {118, 119},
+};
+
static const struct msm_pinctrl_soc_data sc7180_pinctrl = {
.pins = sc7180_pins,
.npins = ARRAY_SIZE(sc7180_pins),
@@ -1109,6 +1145,8 @@ static const struct msm_pinctrl_soc_data sc7180_pinctrl = {
.ngpios = 120,
.tiles = sc7180_tiles,
.ntiles = ARRAY_SIZE(sc7180_tiles),
+ .wakeirq_map = sc7180_pdc_map,
+ .nwakeirq_map = ARRAY_SIZE(sc7180_pdc_map),
};
static int sc7180_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 653d1095bfea..fe0be8a6ebb7 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -1060,7 +1060,7 @@ static int pmic_gpio_probe(struct platform_device *pdev)
girq->fwnode = of_node_to_fwnode(state->dev->of_node);
girq->parent_domain = parent_domain;
girq->child_to_parent_hwirq = pmic_gpio_child_to_parent_hwirq;
- girq->populate_parent_fwspec = gpiochip_populate_parent_fwspec_fourcell;
+ girq->populate_parent_alloc_arg = gpiochip_populate_parent_fwspec_fourcell;
girq->child_offset_to_irq = pmic_gpio_child_offset_to_irq;
girq->child_irq_domain_ops.translate = pmic_gpio_domain_translate;
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index dca86886b1f9..fba1d41d20ec 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -439,7 +439,7 @@ static const struct pinconf_ops pm8xxx_pinconf_ops = {
.pin_config_group_set = pm8xxx_pin_config_set,
};
-static struct pinctrl_desc pm8xxx_pinctrl_desc = {
+static const struct pinctrl_desc pm8xxx_pinctrl_desc = {
.name = "pm8xxx_gpio",
.pctlops = &pm8xxx_pinctrl_ops,
.pmxops = &pm8xxx_pinmux_ops,
@@ -794,7 +794,7 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
girq->fwnode = of_node_to_fwnode(pctrl->dev->of_node);
girq->parent_domain = parent_domain;
girq->child_to_parent_hwirq = pm8xxx_child_to_parent_hwirq;
- girq->populate_parent_fwspec = gpiochip_populate_parent_fwspec_fourcell;
+ girq->populate_parent_alloc_arg = gpiochip_populate_parent_fwspec_fourcell;
girq->child_offset_to_irq = pm8xxx_child_offset_to_irq;
girq->child_irq_domain_ops.translate = pm8xxx_domain_translate;
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
index 3d8b1d74fa2f..681d8dcf37e3 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
@@ -430,7 +430,7 @@ static const struct pinconf_ops pm8xxx_pinconf_ops = {
.pin_config_group_set = pm8xxx_pin_config_set,
};
-static struct pinctrl_desc pm8xxx_pinctrl_desc = {
+static const struct pinctrl_desc pm8xxx_pinctrl_desc = {
.name = "pm8xxx_mpp",
.pctlops = &pm8xxx_pinctrl_ops,
.pmxops = &pm8xxx_pinmux_ops,
diff --git a/drivers/pinctrl/samsung/Kconfig b/drivers/pinctrl/samsung/Kconfig
index 425fadd6c346..dfd805e76862 100644
--- a/drivers/pinctrl/samsung/Kconfig
+++ b/drivers/pinctrl/samsung/Kconfig
@@ -4,30 +4,34 @@
#
config PINCTRL_SAMSUNG
bool
+ depends on OF_GPIO
select PINMUX
select PINCONF
config PINCTRL_EXYNOS
- bool "Pinctrl driver data for Samsung EXYNOS SoCs"
- depends on OF && GPIOLIB && (ARCH_EXYNOS || ARCH_S5PV210)
+ bool "Pinctrl common driver part for Samsung Exynos SoCs"
+ depends on OF_GPIO
+ depends on ARCH_EXYNOS || ARCH_S5PV210 || COMPILE_TEST
select PINCTRL_SAMSUNG
select PINCTRL_EXYNOS_ARM if ARM && (ARCH_EXYNOS || ARCH_S5PV210)
select PINCTRL_EXYNOS_ARM64 if ARM64 && ARCH_EXYNOS
config PINCTRL_EXYNOS_ARM
- bool "ARMv7-specific pinctrl driver data for Exynos" if COMPILE_TEST
+ bool "ARMv7-specific pinctrl driver for Samsung Exynos SoCs" if COMPILE_TEST
depends on PINCTRL_EXYNOS
config PINCTRL_EXYNOS_ARM64
- bool "ARMv8-specific pinctrl driver data for Exynos" if COMPILE_TEST
+ bool "ARMv8-specific pinctrl driver for Samsung Exynos SoCs" if COMPILE_TEST
depends on PINCTRL_EXYNOS
config PINCTRL_S3C24XX
bool "Samsung S3C24XX SoC pinctrl driver"
- depends on ARCH_S3C24XX && OF
+ depends on OF_GPIO
+ depends on ARCH_S3C24XX || COMPILE_TEST
select PINCTRL_SAMSUNG
config PINCTRL_S3C64XX
bool "Samsung S3C64XX SoC pinctrl driver"
- depends on ARCH_S3C64XX
+ depends on OF_GPIO
+ depends on ARCH_S3C64XX || COMPILE_TEST
select PINCTRL_SAMSUNG
diff --git a/drivers/pinctrl/sh-pfc/Kconfig b/drivers/pinctrl/sh-pfc/Kconfig
index 28d66e7cb098..cf0e0dc42b84 100644
--- a/drivers/pinctrl/sh-pfc/Kconfig
+++ b/drivers/pinctrl/sh-pfc/Kconfig
@@ -26,8 +26,9 @@ config PINCTRL_SH_PFC
select PINCTRL_PFC_R8A7792 if ARCH_R8A7792
select PINCTRL_PFC_R8A7793 if ARCH_R8A7793
select PINCTRL_PFC_R8A7794 if ARCH_R8A7794
- select PINCTRL_PFC_R8A7795 if ARCH_R8A7795
- select PINCTRL_PFC_R8A77960 if ARCH_R8A77960 || ARCH_R8A7796
+ select PINCTRL_PFC_R8A77950 if ARCH_R8A77950 || ARCH_R8A7795
+ select PINCTRL_PFC_R8A77951 if ARCH_R8A77951 || ARCH_R8A7795
+ select PINCTRL_PFC_R8A77960 if ARCH_R8A77960
select PINCTRL_PFC_R8A77961 if ARCH_R8A77961
select PINCTRL_PFC_R8A77965 if ARCH_R8A77965
select PINCTRL_PFC_R8A77970 if ARCH_R8A77970
@@ -115,8 +116,11 @@ config PINCTRL_PFC_R8A7793
config PINCTRL_PFC_R8A7794
bool "R-Car E2 pin control support" if COMPILE_TEST
-config PINCTRL_PFC_R8A7795
- bool "R-Car H3 pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A77950
+ bool "R-Car H3 ES1.x pin control support" if COMPILE_TEST
+
+config PINCTRL_PFC_R8A77951
+ bool "R-Car H3 ES2.0+ pin control support" if COMPILE_TEST
config PINCTRL_PFC_R8A77960
bool "R-Car M3-W pin control support" if COMPILE_TEST
diff --git a/drivers/pinctrl/sh-pfc/Makefile b/drivers/pinctrl/sh-pfc/Makefile
index 3bc05666e1a6..9ebe321d24c4 100644
--- a/drivers/pinctrl/sh-pfc/Makefile
+++ b/drivers/pinctrl/sh-pfc/Makefile
@@ -18,8 +18,8 @@ obj-$(CONFIG_PINCTRL_PFC_R8A7791) += pfc-r8a7791.o
obj-$(CONFIG_PINCTRL_PFC_R8A7792) += pfc-r8a7792.o
obj-$(CONFIG_PINCTRL_PFC_R8A7793) += pfc-r8a7791.o
obj-$(CONFIG_PINCTRL_PFC_R8A7794) += pfc-r8a7794.o
-obj-$(CONFIG_PINCTRL_PFC_R8A7795) += pfc-r8a7795.o
-obj-$(CONFIG_PINCTRL_PFC_R8A7795) += pfc-r8a7795-es1.o
+obj-$(CONFIG_PINCTRL_PFC_R8A77950) += pfc-r8a77950.o
+obj-$(CONFIG_PINCTRL_PFC_R8A77951) += pfc-r8a77951.o
obj-$(CONFIG_PINCTRL_PFC_R8A77960) += pfc-r8a7796.o
obj-$(CONFIG_PINCTRL_PFC_R8A77961) += pfc-r8a7796.o
obj-$(CONFIG_PINCTRL_PFC_R8A77965) += pfc-r8a77965.o
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index 65e52688f091..82209116955b 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -23,6 +23,7 @@
#include <linux/platform_device.h>
#include <linux/psci.h>
#include <linux/slab.h>
+#include <linux/sys_soc.h>
#include "core.h"
@@ -568,18 +569,18 @@ static const struct of_device_id sh_pfc_of_table[] = {
.data = &r8a7794_pinmux_info,
},
#endif
-#ifdef CONFIG_PINCTRL_PFC_R8A7795
+/* Both r8a7795 entries must be present to make sanity checks work */
+#ifdef CONFIG_PINCTRL_PFC_R8A77950
{
.compatible = "renesas,pfc-r8a7795",
- .data = &r8a7795_pinmux_info,
+ .data = &r8a77950_pinmux_info,
},
-#ifdef DEBUG
+#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A77951
{
- /* For sanity checks only (nothing matches against this) */
- .compatible = "renesas,pfc-r8a77950", /* R-Car H3 ES1.0 */
- .data = &r8a7795es1_pinmux_info,
+ .compatible = "renesas,pfc-r8a7795",
+ .data = &r8a77951_pinmux_info,
},
-#endif /* DEBUG */
#endif
#ifdef CONFIG_PINCTRL_PFC_R8A77960
{
@@ -886,19 +887,49 @@ static void __init sh_pfc_check_driver(const struct platform_driver *pdrv)
static inline void sh_pfc_check_driver(struct platform_driver *pdrv) {}
#endif /* !DEBUG */
+#ifdef CONFIG_OF
+static const void *sh_pfc_quirk_match(void)
+{
+#if defined(CONFIG_PINCTRL_PFC_R8A77950) || \
+ defined(CONFIG_PINCTRL_PFC_R8A77951)
+ const struct soc_device_attribute *match;
+ static const struct soc_device_attribute quirks[] = {
+ {
+ .soc_id = "r8a7795", .revision = "ES1.*",
+ .data = &r8a77950_pinmux_info,
+ },
+ {
+ .soc_id = "r8a7795",
+ .data = &r8a77951_pinmux_info,
+ },
+
+ { /* sentinel */ }
+ };
+
+ match = soc_device_match(quirks);
+ if (match)
+ return match->data ?: ERR_PTR(-ENODEV);
+#endif /* CONFIG_PINCTRL_PFC_R8A77950 || CONFIG_PINCTRL_PFC_R8A77951 */
+
+ return NULL;
+}
+#endif /* CONFIG_OF */
+
static int sh_pfc_probe(struct platform_device *pdev)
{
-#ifdef CONFIG_OF
- struct device_node *np = pdev->dev.of_node;
-#endif
const struct sh_pfc_soc_info *info;
struct sh_pfc *pfc;
int ret;
#ifdef CONFIG_OF
- if (np)
- info = of_device_get_match_data(&pdev->dev);
- else
+ if (pdev->dev.of_node) {
+ info = sh_pfc_quirk_match();
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
+ if (!info)
+ info = of_device_get_match_data(&pdev->dev);
+ } else
#endif
info = (const void *)platform_get_device_id(pdev)->driver_data;
diff --git a/drivers/pinctrl/sh-pfc/gpio.c b/drivers/pinctrl/sh-pfc/gpio.c
index 5a55b8da7919..8213e118aa40 100644
--- a/drivers/pinctrl/sh-pfc/gpio.c
+++ b/drivers/pinctrl/sh-pfc/gpio.c
@@ -386,12 +386,11 @@ int sh_pfc_register_gpiochip(struct sh_pfc *pfc)
}
/* Register the function GPIOs chip. */
- if (pfc->info->nr_func_gpios == 0)
- return 0;
-
- chip = sh_pfc_add_gpiochip(pfc, gpio_function_setup, NULL);
- if (IS_ERR(chip))
- return PTR_ERR(chip);
+ if (pfc->info->nr_func_gpios) {
+ chip = sh_pfc_add_gpiochip(pfc, gpio_function_setup, NULL);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ }
#endif /* CONFIG_PINCTRL_SH_FUNC_GPIO */
return 0;
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
index 24866a5958ae..a9875038ed9b 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
@@ -2305,7 +2305,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_ATAG0_A, 0, FN_REMOCON_B, 0,
/* IP0_11_8 [4] */
FN_SD1_DAT2_A, FN_MMC_D2, 0, FN_BS,
- FN_ATADIR0_A, 0, FN_SDSELF_B, 0,
+ FN_ATADIR0_A, 0, FN_SDSELF_A, 0,
FN_PWM4_B, 0, 0, 0,
0, 0, 0, 0,
/* IP0_7_5 [3] */
@@ -2349,7 +2349,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_TS_SDAT0_A, 0, 0, 0,
0, 0, 0, 0,
/* IP1_10_8 [3] */
- FN_SD1_CLK_B, FN_MMC_D6, 0, FN_A24,
+ FN_SD1_CD_A, FN_MMC_D6, 0, FN_A24,
FN_DREQ1_A, 0, FN_HRX0_B, FN_TS_SPSYNC0_A,
/* IP1_7_5 [3] */
FN_A23, FN_HTX0_B, FN_TX2_B, FN_DACK2_A,
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c b/drivers/pinctrl/sh-pfc/pfc-r8a77950.c
index ad05da8f6516..04812e62f3a4 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77950.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * R8A7795 ES1.x processor support - PFC hardware block.
+ * R8A77950 processor support - PFC hardware block.
*
* Copyright (C) 2015-2017 Renesas Electronics Corporation
*/
@@ -5562,8 +5562,8 @@ static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
{ /* sentinel */ },
};
-static int r8a7795es1_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin,
- u32 *pocctrl)
+static int r8a77950_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin,
+ u32 *pocctrl)
{
int bit = -EINVAL;
@@ -5820,8 +5820,8 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
{ /* sentinel */ },
};
-static unsigned int r8a7795es1_pinmux_get_bias(struct sh_pfc *pfc,
- unsigned int pin)
+static unsigned int r8a77950_pinmux_get_bias(struct sh_pfc *pfc,
+ unsigned int pin)
{
const struct pinmux_bias_reg *reg;
unsigned int bit;
@@ -5838,8 +5838,8 @@ static unsigned int r8a7795es1_pinmux_get_bias(struct sh_pfc *pfc,
return PIN_CONFIG_BIAS_PULL_DOWN;
}
-static void r8a7795es1_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
- unsigned int bias)
+static void r8a77950_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
+ unsigned int bias)
{
const struct pinmux_bias_reg *reg;
u32 enable, updown;
@@ -5861,15 +5861,15 @@ static void r8a7795es1_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
sh_pfc_write(pfc, reg->puen, enable);
}
-static const struct sh_pfc_soc_operations r8a7795es1_pinmux_ops = {
- .pin_to_pocctrl = r8a7795es1_pin_to_pocctrl,
- .get_bias = r8a7795es1_pinmux_get_bias,
- .set_bias = r8a7795es1_pinmux_set_bias,
+static const struct sh_pfc_soc_operations r8a77950_pinmux_ops = {
+ .pin_to_pocctrl = r8a77950_pin_to_pocctrl,
+ .get_bias = r8a77950_pinmux_get_bias,
+ .set_bias = r8a77950_pinmux_set_bias,
};
-const struct sh_pfc_soc_info r8a7795es1_pinmux_info = {
+const struct sh_pfc_soc_info r8a77950_pinmux_info = {
.name = "r8a77950_pfc",
- .ops = &r8a7795es1_pinmux_ops,
+ .ops = &r8a77950_pinmux_ops,
.unlock_reg = 0xe6060000, /* PMMR */
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a77951.c
index d3145aa135d0..256fab4b03d3 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77951.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * R8A7795 ES2.0+ processor support - PFC hardware block.
+ * R8A77951 processor support - PFC hardware block.
*
* Copyright (C) 2015-2019 Renesas Electronics Corporation
*/
@@ -5915,7 +5915,8 @@ static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
{ /* sentinel */ },
};
-static int r8a7795_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *pocctrl)
+static int r8a77951_pin_to_pocctrl(struct sh_pfc *pfc,
+ unsigned int pin, u32 *pocctrl)
{
int bit = -EINVAL;
@@ -6172,8 +6173,8 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
{ /* sentinel */ },
};
-static unsigned int r8a7795_pinmux_get_bias(struct sh_pfc *pfc,
- unsigned int pin)
+static unsigned int r8a77951_pinmux_get_bias(struct sh_pfc *pfc,
+ unsigned int pin)
{
const struct pinmux_bias_reg *reg;
unsigned int bit;
@@ -6190,8 +6191,8 @@ static unsigned int r8a7795_pinmux_get_bias(struct sh_pfc *pfc,
return PIN_CONFIG_BIAS_PULL_DOWN;
}
-static void r8a7795_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
- unsigned int bias)
+static void r8a77951_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
+ unsigned int bias)
{
const struct pinmux_bias_reg *reg;
u32 enable, updown;
@@ -6213,29 +6214,15 @@ static void r8a7795_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
sh_pfc_write(pfc, reg->puen, enable);
}
-static const struct soc_device_attribute r8a7795es1[] = {
- { .soc_id = "r8a7795", .revision = "ES1.*" },
- { /* sentinel */ }
+static const struct sh_pfc_soc_operations r8a77951_pinmux_ops = {
+ .pin_to_pocctrl = r8a77951_pin_to_pocctrl,
+ .get_bias = r8a77951_pinmux_get_bias,
+ .set_bias = r8a77951_pinmux_set_bias,
};
-static int r8a7795_pinmux_init(struct sh_pfc *pfc)
-{
- if (soc_device_match(r8a7795es1))
- pfc->info = &r8a7795es1_pinmux_info;
-
- return 0;
-}
-
-static const struct sh_pfc_soc_operations r8a7795_pinmux_ops = {
- .init = r8a7795_pinmux_init,
- .pin_to_pocctrl = r8a7795_pin_to_pocctrl,
- .get_bias = r8a7795_pinmux_get_bias,
- .set_bias = r8a7795_pinmux_set_bias,
-};
-
-const struct sh_pfc_soc_info r8a7795_pinmux_info = {
+const struct sh_pfc_soc_info r8a77951_pinmux_info = {
.name = "r8a77951_pfc",
- .ops = &r8a7795_pinmux_ops,
+ .ops = &r8a77951_pinmux_ops,
.unlock_reg = 0xe6060000, /* PMMR */
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
index 8bdf33c807f6..6616f5210b9d 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
@@ -5998,7 +5998,7 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
{ PIN_DU_DOTCLKIN1, 0, 2 }, /* DU_DOTCLKIN1 */
} },
{ PINMUX_DRIVE_REG("DRVCTRL12", 0xe6060330) {
- { PIN_DU_DOTCLKIN3, 28, 2 }, /* DU_DOTCLKIN3 */
+ { PIN_DU_DOTCLKIN3, 24, 2 }, /* DU_DOTCLKIN3 */
{ PIN_FSCLKST, 20, 2 }, /* FSCLKST */
{ PIN_TMS, 4, 2 }, /* TMS */
} },
@@ -6254,8 +6254,8 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
[31] = PIN_DU_DOTCLKIN1, /* DU_DOTCLKIN1 */
} },
{ PINMUX_BIAS_REG("PUEN3", 0xe606040c, "PUD3", 0xe606044c) {
- [ 0] = PIN_DU_DOTCLKIN3, /* DU_DOTCLKIN3 */
- [ 1] = SH_PFC_PIN_NONE,
+ [ 0] = SH_PFC_PIN_NONE,
+ [ 1] = PIN_DU_DOTCLKIN3, /* DU_DOTCLKIN3 */
[ 2] = PIN_FSCLKST, /* FSCLKST */
[ 3] = PIN_EXTALR, /* EXTALR*/
[ 4] = PIN_TRST_N, /* TRST# */
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7264.c b/drivers/pinctrl/sh-pfc/pfc-sh7264.c
index 4a95867deb8a..908837ea487b 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7264.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7264.c
@@ -497,17 +497,15 @@ enum {
SD_WP_MARK, SD_CLK_MARK, SD_CMD_MARK,
CRX0_MARK, CRX1_MARK,
CTX0_MARK, CTX1_MARK,
+ CRX0_CRX1_MARK, CTX0_CTX1_MARK,
PWM1A_MARK, PWM1B_MARK, PWM1C_MARK, PWM1D_MARK,
PWM1E_MARK, PWM1F_MARK, PWM1G_MARK, PWM1H_MARK,
PWM2A_MARK, PWM2B_MARK, PWM2C_MARK, PWM2D_MARK,
PWM2E_MARK, PWM2F_MARK, PWM2G_MARK, PWM2H_MARK,
IERXD_MARK, IETXD_MARK,
- CRX0_CRX1_MARK,
WDTOVF_MARK,
- CRX0X1_MARK,
-
/* DMAC */
TEND0_MARK, DACK0_MARK, DREQ0_MARK,
TEND1_MARK, DACK1_MARK, DREQ1_MARK,
@@ -995,12 +993,12 @@ static const u16 pinmux_data[] = {
PINMUX_DATA(PJ3_DATA, PJ3MD_00),
PINMUX_DATA(CRX1_MARK, PJ3MD_01),
- PINMUX_DATA(CRX0X1_MARK, PJ3MD_10),
+ PINMUX_DATA(CRX0_CRX1_MARK, PJ3MD_10),
PINMUX_DATA(IRQ1_PJ_MARK, PJ3MD_11),
PINMUX_DATA(PJ2_DATA, PJ2MD_000),
PINMUX_DATA(CTX1_MARK, PJ2MD_001),
- PINMUX_DATA(CRX0_CRX1_MARK, PJ2MD_010),
+ PINMUX_DATA(CTX0_CTX1_MARK, PJ2MD_010),
PINMUX_DATA(CS2_MARK, PJ2MD_011),
PINMUX_DATA(SCK0_MARK, PJ2MD_100),
PINMUX_DATA(LCD_M_DISP_MARK, PJ2MD_101),
@@ -1245,6 +1243,7 @@ static const struct pinmux_func pinmux_func_gpios[] = {
GPIO_FN(CTX1),
GPIO_FN(CRX1),
GPIO_FN(CTX0),
+ GPIO_FN(CTX0_CTX1),
GPIO_FN(CRX0),
GPIO_FN(CRX0_CRX1),
@@ -2020,18 +2019,18 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ PINMUX_CFG_REG("PKIOR0", 0xfffe3932, 16, 1, GROUP(
0, 0, 0, 0, 0, 0, 0, 0,
- PJ11_IN, PJ11_OUT,
- PJ10_IN, PJ10_OUT,
- PJ9_IN, PJ9_OUT,
- PJ8_IN, PJ8_OUT,
- PJ7_IN, PJ7_OUT,
- PJ6_IN, PJ6_OUT,
- PJ5_IN, PJ5_OUT,
- PJ4_IN, PJ4_OUT,
- PJ3_IN, PJ3_OUT,
- PJ2_IN, PJ2_OUT,
- PJ1_IN, PJ1_OUT,
- PJ0_IN, PJ0_OUT ))
+ PK11_IN, PK11_OUT,
+ PK10_IN, PK10_OUT,
+ PK9_IN, PK9_OUT,
+ PK8_IN, PK8_OUT,
+ PK7_IN, PK7_OUT,
+ PK6_IN, PK6_OUT,
+ PK5_IN, PK5_OUT,
+ PK4_IN, PK4_OUT,
+ PK3_IN, PK3_OUT,
+ PK2_IN, PK2_OUT,
+ PK1_IN, PK1_OUT,
+ PK0_IN, PK0_OUT ))
},
{}
};
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7269.c b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
index 6cbb18ef77dc..d20974a55d93 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7269.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
@@ -737,13 +737,12 @@ enum {
CRX0_MARK, CTX0_MARK,
CRX1_MARK, CTX1_MARK,
CRX2_MARK, CTX2_MARK,
- CRX0_CRX1_MARK,
- CRX0_CRX1_CRX2_MARK,
- CTX0CTX1CTX2_MARK,
+ CRX0_CRX1_MARK, CTX0_CTX1_MARK,
+ CRX0_CRX1_CRX2_MARK, CTX0_CTX1_CTX2_MARK,
CRX1_PJ22_MARK, CTX1_PJ23_MARK,
CRX2_PJ20_MARK, CTX2_PJ21_MARK,
- CRX0CRX1_PJ22_MARK,
- CRX0CRX1CRX2_PJ20_MARK,
+ CRX0_CRX1_PJ22_MARK, CTX0_CTX1_PJ23_MARK,
+ CRX0_CRX1_CRX2_PJ20_MARK, CTX0_CTX1_CTX2_PJ21_MARK,
/* VDC */
DV_CLK_MARK,
@@ -821,6 +820,7 @@ static const u16 pinmux_data[] = {
PINMUX_DATA(CS3_MARK, PC8MD_001),
PINMUX_DATA(TXD7_MARK, PC8MD_010),
PINMUX_DATA(CTX1_MARK, PC8MD_011),
+ PINMUX_DATA(CTX0_CTX1_MARK, PC8MD_100),
PINMUX_DATA(PC7_DATA, PC7MD_000),
PINMUX_DATA(CKE_MARK, PC7MD_001),
@@ -833,11 +833,12 @@ static const u16 pinmux_data[] = {
PINMUX_DATA(CAS_MARK, PC6MD_001),
PINMUX_DATA(SCK7_MARK, PC6MD_010),
PINMUX_DATA(CTX0_MARK, PC6MD_011),
+ PINMUX_DATA(CTX0_CTX1_CTX2_MARK, PC6MD_100),
PINMUX_DATA(PC5_DATA, PC5MD_000),
PINMUX_DATA(RAS_MARK, PC5MD_001),
PINMUX_DATA(CRX0_MARK, PC5MD_011),
- PINMUX_DATA(CTX0CTX1CTX2_MARK, PC5MD_100),
+ PINMUX_DATA(CTX0_CTX1_CTX2_MARK, PC5MD_100),
PINMUX_DATA(IRQ0_PC_MARK, PC5MD_101),
PINMUX_DATA(PC4_DATA, PC4MD_00),
@@ -1289,30 +1290,32 @@ static const u16 pinmux_data[] = {
PINMUX_DATA(LCD_DATA23_PJ23_MARK, PJ23MD_010),
PINMUX_DATA(LCD_TCON6_MARK, PJ23MD_011),
PINMUX_DATA(IRQ3_PJ_MARK, PJ23MD_100),
- PINMUX_DATA(CTX1_MARK, PJ23MD_101),
+ PINMUX_DATA(CTX1_PJ23_MARK, PJ23MD_101),
+ PINMUX_DATA(CTX0_CTX1_PJ23_MARK, PJ23MD_110),
PINMUX_DATA(PJ22_DATA, PJ22MD_000),
PINMUX_DATA(DV_DATA22_MARK, PJ22MD_001),
PINMUX_DATA(LCD_DATA22_PJ22_MARK, PJ22MD_010),
PINMUX_DATA(LCD_TCON5_MARK, PJ22MD_011),
PINMUX_DATA(IRQ2_PJ_MARK, PJ22MD_100),
- PINMUX_DATA(CRX1_MARK, PJ22MD_101),
- PINMUX_DATA(CRX0_CRX1_MARK, PJ22MD_110),
+ PINMUX_DATA(CRX1_PJ22_MARK, PJ22MD_101),
+ PINMUX_DATA(CRX0_CRX1_PJ22_MARK, PJ22MD_110),
PINMUX_DATA(PJ21_DATA, PJ21MD_000),
PINMUX_DATA(DV_DATA21_MARK, PJ21MD_001),
PINMUX_DATA(LCD_DATA21_PJ21_MARK, PJ21MD_010),
PINMUX_DATA(LCD_TCON4_MARK, PJ21MD_011),
PINMUX_DATA(IRQ1_PJ_MARK, PJ21MD_100),
- PINMUX_DATA(CTX2_MARK, PJ21MD_101),
+ PINMUX_DATA(CTX2_PJ21_MARK, PJ21MD_101),
+ PINMUX_DATA(CTX0_CTX1_CTX2_PJ21_MARK, PJ21MD_110),
PINMUX_DATA(PJ20_DATA, PJ20MD_000),
PINMUX_DATA(DV_DATA20_MARK, PJ20MD_001),
PINMUX_DATA(LCD_DATA20_PJ20_MARK, PJ20MD_010),
PINMUX_DATA(LCD_TCON3_MARK, PJ20MD_011),
PINMUX_DATA(IRQ0_PJ_MARK, PJ20MD_100),
- PINMUX_DATA(CRX2_MARK, PJ20MD_101),
- PINMUX_DATA(CRX0CRX1CRX2_PJ20_MARK, PJ20MD_110),
+ PINMUX_DATA(CRX2_PJ20_MARK, PJ20MD_101),
+ PINMUX_DATA(CRX0_CRX1_CRX2_PJ20_MARK, PJ20MD_110),
PINMUX_DATA(PJ19_DATA, PJ19MD_000),
PINMUX_DATA(DV_DATA19_MARK, PJ19MD_001),
@@ -1663,12 +1666,24 @@ static const struct pinmux_func pinmux_func_gpios[] = {
GPIO_FN(WDTOVF),
/* CAN */
+ GPIO_FN(CTX2),
+ GPIO_FN(CRX2),
GPIO_FN(CTX1),
GPIO_FN(CRX1),
GPIO_FN(CTX0),
GPIO_FN(CRX0),
+ GPIO_FN(CTX0_CTX1),
GPIO_FN(CRX0_CRX1),
+ GPIO_FN(CTX0_CTX1_CTX2),
GPIO_FN(CRX0_CRX1_CRX2),
+ GPIO_FN(CTX2_PJ21),
+ GPIO_FN(CRX2_PJ20),
+ GPIO_FN(CTX1_PJ23),
+ GPIO_FN(CRX1_PJ22),
+ GPIO_FN(CTX0_CTX1_PJ23),
+ GPIO_FN(CRX0_CRX1_PJ22),
+ GPIO_FN(CTX0_CTX1_CTX2_PJ21),
+ GPIO_FN(CRX0_CRX1_CRX2_PJ20),
/* DMAC */
GPIO_FN(TEND0),
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 640d2a4cb838..d57e633e99c0 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -318,8 +318,8 @@ extern const struct sh_pfc_soc_info r8a7791_pinmux_info;
extern const struct sh_pfc_soc_info r8a7792_pinmux_info;
extern const struct sh_pfc_soc_info r8a7793_pinmux_info;
extern const struct sh_pfc_soc_info r8a7794_pinmux_info;
-extern const struct sh_pfc_soc_info r8a7795_pinmux_info;
-extern const struct sh_pfc_soc_info r8a7795es1_pinmux_info;
+extern const struct sh_pfc_soc_info r8a77950_pinmux_info __weak;
+extern const struct sh_pfc_soc_info r8a77951_pinmux_info __weak;
extern const struct sh_pfc_soc_info r8a77960_pinmux_info;
extern const struct sh_pfc_soc_info r8a77961_pinmux_info;
extern const struct sh_pfc_soc_info r8a77965_pinmux_info;
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.h b/drivers/pinctrl/stm32/pinctrl-stm32.h
index ec0d34c33903..b0882d120765 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.h
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Maxime Coquelin 2015
* Copyright (C) STMicroelectronics 2017
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c
index a78d7b922ef4..31d62bbb7f43 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c
@@ -19,7 +19,6 @@
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/pinctrl/pinctrl.h>
#include "pinctrl-sunxi.h"
@@ -549,7 +548,17 @@ static const struct sunxi_pinctrl_desc sun50i_h5_pinctrl_data = {
static int sun50i_h5_pinctrl_probe(struct platform_device *pdev)
{
- switch (of_irq_count(pdev->dev.of_node)) {
+ int ret;
+
+ ret = platform_irq_count(pdev);
+ if (ret < 0) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Couldn't determine irq count: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ switch (ret) {
case 2:
dev_warn(&pdev->dev,
"Your device tree's pinctrl node is broken, which has no IRQ of PG bank routed.\n");
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
index 692d8b3e2a20..cefbbb8d1a68 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
@@ -648,7 +648,7 @@ static int tegra_pinctrl_suspend(struct device *dev)
{
struct tegra_pmx *pmx = dev_get_drvdata(dev);
u32 *backup_regs = pmx->backup_regs;
- u32 *regs;
+ u32 __iomem *regs;
size_t bank_size;
unsigned int i, k;
@@ -666,7 +666,7 @@ static int tegra_pinctrl_resume(struct device *dev)
{
struct tegra_pmx *pmx = dev_get_drvdata(dev);
u32 *backup_regs = pmx->backup_regs;
- u32 *regs;
+ u32 __iomem *regs;
size_t bank_size;
unsigned int i, k;
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index 8723bcf10c93..4f3651fcd9fe 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -63,7 +63,7 @@ struct acpi_peripheral {
struct chromeos_laptop {
/*
* Note that we can't mark this pointer as const because
- * i2c_new_probed_device() changes passed in I2C board info, so.
+ * i2c_new_scanned_device() changes passed in I2C board info, so.
*/
struct i2c_peripheral *i2c_peripherals;
unsigned int num_i2c_peripherals;
@@ -87,8 +87,8 @@ chromes_laptop_instantiate_i2c_device(struct i2c_adapter *adapter,
* address we scan secondary addresses. In any case the client
* structure gets assigned primary address.
*/
- client = i2c_new_probed_device(adapter, info, addr_list, NULL);
- if (!client && alt_addr) {
+ client = i2c_new_scanned_device(adapter, info, addr_list, NULL);
+ if (IS_ERR(client) && alt_addr) {
struct i2c_board_info dummy_info = {
I2C_BOARD_INFO("dummy", info->addr),
};
@@ -97,9 +97,9 @@ chromes_laptop_instantiate_i2c_device(struct i2c_adapter *adapter,
};
struct i2c_client *dummy;
- dummy = i2c_new_probed_device(adapter, &dummy_info,
- alt_addr_list, NULL);
- if (dummy) {
+ dummy = i2c_new_scanned_device(adapter, &dummy_info,
+ alt_addr_list, NULL);
+ if (!IS_ERR(dummy)) {
pr_debug("%d-%02x is probed at %02x\n",
adapter->nr, info->addr, dummy->addr);
i2c_unregister_device(dummy);
@@ -107,12 +107,14 @@ chromes_laptop_instantiate_i2c_device(struct i2c_adapter *adapter,
}
}
- if (!client)
+ if (IS_ERR(client)) {
+ client = NULL;
pr_debug("failed to register device %d-%02x\n",
adapter->nr, info->addr);
- else
+ } else {
pr_debug("added i2c device %d-%02x\n",
adapter->nr, info->addr);
+ }
return client;
}
diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c
index 6d6ce86a1408..6fc8f2c3ac51 100644
--- a/drivers/platform/chrome/cros_ec.c
+++ b/drivers/platform/chrome/cros_ec.c
@@ -16,7 +16,8 @@
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/suspend.h>
-#include <asm/unaligned.h>
+
+#include "cros_ec.h"
#define CROS_EC_DEV_EC_INDEX 0
#define CROS_EC_DEV_PD_INDEX 1
diff --git a/drivers/platform/chrome/cros_ec.h b/drivers/platform/chrome/cros_ec.h
new file mode 100644
index 000000000000..e69fc1ff68b4
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ChromeOS Embedded Controller core interface.
+ *
+ * Copyright (C) 2020 Google LLC
+ */
+
+#ifndef __CROS_EC_H
+#define __CROS_EC_H
+
+int cros_ec_register(struct cros_ec_device *ec_dev);
+int cros_ec_unregister(struct cros_ec_device *ec_dev);
+
+int cros_ec_suspend(struct cros_ec_device *ec_dev);
+int cros_ec_resume(struct cros_ec_device *ec_dev);
+
+bool cros_ec_handle_event(struct cros_ec_device *ec_dev);
+
+#endif /* __CROS_EC_H */
diff --git a/drivers/platform/chrome/cros_ec_chardev.c b/drivers/platform/chrome/cros_ec_chardev.c
index 74ded441bb50..c65e70bc168d 100644
--- a/drivers/platform/chrome/cros_ec_chardev.c
+++ b/drivers/platform/chrome/cros_ec_chardev.c
@@ -13,7 +13,6 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/fs.h>
-#include <linux/mfd/cros_ec.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/notifier.h>
diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
index 6ae484989d1f..ecfada00e6c5 100644
--- a/drivers/platform/chrome/cros_ec_debugfs.c
+++ b/drivers/platform/chrome/cros_ec_debugfs.c
@@ -7,7 +7,6 @@
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/fs.h>
-#include <linux/mfd/cros_ec.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_data/cros_ec_commands.h>
diff --git a/drivers/platform/chrome/cros_ec_i2c.c b/drivers/platform/chrome/cros_ec_i2c.c
index 9bd97bc8454b..6119eccd8a18 100644
--- a/drivers/platform/chrome/cros_ec_i2c.c
+++ b/drivers/platform/chrome/cros_ec_i2c.c
@@ -14,6 +14,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include "cros_ec.h"
+
/**
* Request format for protocol v3
* byte 0 0xda (EC_COMMAND_PROTOCOL_3)
diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c
index e5996821d08b..93a71e93a2f1 100644
--- a/drivers/platform/chrome/cros_ec_ishtp.c
+++ b/drivers/platform/chrome/cros_ec_ishtp.c
@@ -14,6 +14,8 @@
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/intel-ish-client-if.h>
+#include "cros_ec.h"
+
/*
* ISH TX/RX ring buffer pool size
*
@@ -76,7 +78,7 @@ struct cros_ish_in_msg {
*
* The writers are .reset() and .probe() function.
*/
-DECLARE_RWSEM(init_lock);
+static DECLARE_RWSEM(init_lock);
/**
* struct response_info - Encapsulate firmware response related
diff --git a/drivers/platform/chrome/cros_ec_lightbar.c b/drivers/platform/chrome/cros_ec_lightbar.c
index c0f2eec35a48..b4c110c5fee0 100644
--- a/drivers/platform/chrome/cros_ec_lightbar.c
+++ b/drivers/platform/chrome/cros_ec_lightbar.c
@@ -8,7 +8,6 @@
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/kobject.h>
-#include <linux/mfd/cros_ec.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
index dccf479c6625..1f7861944044 100644
--- a/drivers/platform/chrome/cros_ec_lpc.c
+++ b/drivers/platform/chrome/cros_ec_lpc.c
@@ -23,6 +23,7 @@
#include <linux/printk.h>
#include <linux/suspend.h>
+#include "cros_ec.h"
#include "cros_ec_lpc_mec.h"
#define DRV_NAME "cros_ec_lpcs"
@@ -396,7 +397,7 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
* Some boards do not have an IRQ allotted for cros_ec_lpc,
* which makes ENXIO an expected (and safe) scenario.
*/
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
if (irq > 0)
ec_dev->irq = irq;
else if (irq != -ENXIO) {
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index da1b1c450433..3cfa643f1d07 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -54,8 +54,6 @@ static int send_command(struct cros_ec_device *ec_dev,
int ret;
int (*xfer_fxn)(struct cros_ec_device *ec, struct cros_ec_command *msg);
- trace_cros_ec_cmd(msg);
-
if (ec_dev->proto_version > 2)
xfer_fxn = ec_dev->pkt_xfer;
else
@@ -72,7 +70,9 @@ static int send_command(struct cros_ec_device *ec_dev,
return -EIO;
}
+ trace_cros_ec_request_start(msg);
ret = (*xfer_fxn)(ec_dev, msg);
+ trace_cros_ec_request_done(msg, ret);
if (msg->result == EC_RES_IN_PROGRESS) {
int i;
struct cros_ec_command *status_msg;
@@ -95,7 +95,9 @@ static int send_command(struct cros_ec_device *ec_dev,
for (i = 0; i < EC_COMMAND_RETRIES; i++) {
usleep_range(10000, 11000);
+ trace_cros_ec_request_start(status_msg);
ret = (*xfer_fxn)(ec_dev, status_msg);
+ trace_cros_ec_request_done(status_msg, ret);
if (ret == -EAGAIN)
continue;
if (ret < 0)
diff --git a/drivers/platform/chrome/cros_ec_rpmsg.c b/drivers/platform/chrome/cros_ec_rpmsg.c
index bd068afe43b5..dbc3f5523b83 100644
--- a/drivers/platform/chrome/cros_ec_rpmsg.c
+++ b/drivers/platform/chrome/cros_ec_rpmsg.c
@@ -13,6 +13,8 @@
#include <linux/rpmsg.h>
#include <linux/slab.h>
+#include "cros_ec.h"
+
#define EC_MSG_TIMEOUT_MS 200
#define HOST_COMMAND_MARK 1
#define HOST_EVENT_MARK 2
diff --git a/drivers/platform/chrome/cros_ec_sensorhub.c b/drivers/platform/chrome/cros_ec_sensorhub.c
index 04d8879689e9..79fefd3bb0fa 100644
--- a/drivers/platform/chrome/cros_ec_sensorhub.c
+++ b/drivers/platform/chrome/cros_ec_sensorhub.c
@@ -9,7 +9,6 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/module.h>
-#include <linux/mfd/cros_ec.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_data/cros_ec_sensorhub.h>
diff --git a/drivers/platform/chrome/cros_ec_spi.c b/drivers/platform/chrome/cros_ec_spi.c
index a831bd5a5b2f..46786d2d679a 100644
--- a/drivers/platform/chrome/cros_ec_spi.c
+++ b/drivers/platform/chrome/cros_ec_spi.c
@@ -14,6 +14,8 @@
#include <linux/spi/spi.h>
#include <uapi/linux/sched/types.h>
+#include "cros_ec.h"
+
/* The header byte, which follows the preamble */
#define EC_MSG_HEADER 0xec
diff --git a/drivers/platform/chrome/cros_ec_sysfs.c b/drivers/platform/chrome/cros_ec_sysfs.c
index 74d36b8d4f46..07dac97ad57c 100644
--- a/drivers/platform/chrome/cros_ec_sysfs.c
+++ b/drivers/platform/chrome/cros_ec_sysfs.c
@@ -8,7 +8,6 @@
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/kobject.h>
-#include <linux/mfd/cros_ec.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
diff --git a/drivers/platform/chrome/cros_ec_trace.c b/drivers/platform/chrome/cros_ec_trace.c
index 5af1d66d9eca..523a39bd0ff6 100644
--- a/drivers/platform/chrome/cros_ec_trace.c
+++ b/drivers/platform/chrome/cros_ec_trace.c
@@ -8,6 +8,11 @@
// Generate the list using the following script:
// sed -n 's/^#define \(EC_CMD_[[:alnum:]_]*\)\s.*/\tTRACE_SYMBOL(\1), \\/p' include/linux/platform_data/cros_ec_commands.h
#define EC_CMDS \
+ TRACE_SYMBOL(EC_CMD_ACPI_READ), \
+ TRACE_SYMBOL(EC_CMD_ACPI_WRITE), \
+ TRACE_SYMBOL(EC_CMD_ACPI_BURST_ENABLE), \
+ TRACE_SYMBOL(EC_CMD_ACPI_BURST_DISABLE), \
+ TRACE_SYMBOL(EC_CMD_ACPI_QUERY_EVENT), \
TRACE_SYMBOL(EC_CMD_PROTO_VERSION), \
TRACE_SYMBOL(EC_CMD_HELLO), \
TRACE_SYMBOL(EC_CMD_GET_VERSION), \
@@ -22,6 +27,8 @@
TRACE_SYMBOL(EC_CMD_GET_PROTOCOL_INFO), \
TRACE_SYMBOL(EC_CMD_GSV_PAUSE_IN_S5), \
TRACE_SYMBOL(EC_CMD_GET_FEATURES), \
+ TRACE_SYMBOL(EC_CMD_GET_SKU_ID), \
+ TRACE_SYMBOL(EC_CMD_SET_SKU_ID), \
TRACE_SYMBOL(EC_CMD_FLASH_INFO), \
TRACE_SYMBOL(EC_CMD_FLASH_READ), \
TRACE_SYMBOL(EC_CMD_FLASH_WRITE), \
@@ -29,6 +36,8 @@
TRACE_SYMBOL(EC_CMD_FLASH_PROTECT), \
TRACE_SYMBOL(EC_CMD_FLASH_REGION_INFO), \
TRACE_SYMBOL(EC_CMD_VBNV_CONTEXT), \
+ TRACE_SYMBOL(EC_CMD_FLASH_SPI_INFO), \
+ TRACE_SYMBOL(EC_CMD_FLASH_SELECT), \
TRACE_SYMBOL(EC_CMD_PWM_GET_FAN_TARGET_RPM), \
TRACE_SYMBOL(EC_CMD_PWM_SET_FAN_TARGET_RPM), \
TRACE_SYMBOL(EC_CMD_PWM_GET_KEYBOARD_BACKLIGHT), \
@@ -40,6 +49,8 @@
TRACE_SYMBOL(EC_CMD_LED_CONTROL), \
TRACE_SYMBOL(EC_CMD_VBOOT_HASH), \
TRACE_SYMBOL(EC_CMD_MOTION_SENSE_CMD), \
+ TRACE_SYMBOL(EC_CMD_FORCE_LID_OPEN), \
+ TRACE_SYMBOL(EC_CMD_CONFIG_POWER_BUTTON), \
TRACE_SYMBOL(EC_CMD_USB_CHARGE_SET_MODE), \
TRACE_SYMBOL(EC_CMD_PSTORE_INFO), \
TRACE_SYMBOL(EC_CMD_PSTORE_READ), \
@@ -50,6 +61,9 @@
TRACE_SYMBOL(EC_CMD_RTC_SET_ALARM), \
TRACE_SYMBOL(EC_CMD_PORT80_LAST_BOOT), \
TRACE_SYMBOL(EC_CMD_PORT80_READ), \
+ TRACE_SYMBOL(EC_CMD_VSTORE_INFO), \
+ TRACE_SYMBOL(EC_CMD_VSTORE_READ), \
+ TRACE_SYMBOL(EC_CMD_VSTORE_WRITE), \
TRACE_SYMBOL(EC_CMD_THERMAL_SET_THRESHOLD), \
TRACE_SYMBOL(EC_CMD_THERMAL_GET_THRESHOLD), \
TRACE_SYMBOL(EC_CMD_THERMAL_AUTO_FAN_CTRL), \
@@ -59,10 +73,12 @@
TRACE_SYMBOL(EC_CMD_MKBP_STATE), \
TRACE_SYMBOL(EC_CMD_MKBP_INFO), \
TRACE_SYMBOL(EC_CMD_MKBP_SIMULATE_KEY), \
+ TRACE_SYMBOL(EC_CMD_GET_KEYBOARD_ID), \
TRACE_SYMBOL(EC_CMD_MKBP_SET_CONFIG), \
TRACE_SYMBOL(EC_CMD_MKBP_GET_CONFIG), \
TRACE_SYMBOL(EC_CMD_KEYSCAN_SEQ_CTRL), \
TRACE_SYMBOL(EC_CMD_GET_NEXT_EVENT), \
+ TRACE_SYMBOL(EC_CMD_KEYBOARD_FACTORY_TEST), \
TRACE_SYMBOL(EC_CMD_TEMP_SENSOR_GET_INFO), \
TRACE_SYMBOL(EC_CMD_HOST_EVENT_GET_B), \
TRACE_SYMBOL(EC_CMD_HOST_EVENT_GET_SMI_MASK), \
@@ -73,6 +89,7 @@
TRACE_SYMBOL(EC_CMD_HOST_EVENT_CLEAR), \
TRACE_SYMBOL(EC_CMD_HOST_EVENT_SET_WAKE_MASK), \
TRACE_SYMBOL(EC_CMD_HOST_EVENT_CLEAR_B), \
+ TRACE_SYMBOL(EC_CMD_HOST_EVENT), \
TRACE_SYMBOL(EC_CMD_SWITCH_ENABLE_BKLIGHT), \
TRACE_SYMBOL(EC_CMD_SWITCH_ENABLE_WIRELESS), \
TRACE_SYMBOL(EC_CMD_GPIO_SET), \
@@ -92,36 +109,102 @@
TRACE_SYMBOL(EC_CMD_CHARGE_STATE), \
TRACE_SYMBOL(EC_CMD_CHARGE_CURRENT_LIMIT), \
TRACE_SYMBOL(EC_CMD_EXTERNAL_POWER_LIMIT), \
+ TRACE_SYMBOL(EC_CMD_OVERRIDE_DEDICATED_CHARGER_LIMIT), \
+ TRACE_SYMBOL(EC_CMD_HIBERNATION_DELAY), \
TRACE_SYMBOL(EC_CMD_HOST_SLEEP_EVENT), \
+ TRACE_SYMBOL(EC_CMD_DEVICE_EVENT), \
TRACE_SYMBOL(EC_CMD_SB_READ_WORD), \
TRACE_SYMBOL(EC_CMD_SB_WRITE_WORD), \
TRACE_SYMBOL(EC_CMD_SB_READ_BLOCK), \
TRACE_SYMBOL(EC_CMD_SB_WRITE_BLOCK), \
TRACE_SYMBOL(EC_CMD_BATTERY_VENDOR_PARAM), \
+ TRACE_SYMBOL(EC_CMD_SB_FW_UPDATE), \
+ TRACE_SYMBOL(EC_CMD_ENTERING_MODE), \
+ TRACE_SYMBOL(EC_CMD_I2C_PASSTHRU_PROTECT), \
+ TRACE_SYMBOL(EC_CMD_CEC_WRITE_MSG), \
+ TRACE_SYMBOL(EC_CMD_CEC_SET), \
+ TRACE_SYMBOL(EC_CMD_CEC_GET), \
TRACE_SYMBOL(EC_CMD_EC_CODEC), \
TRACE_SYMBOL(EC_CMD_EC_CODEC_DMIC), \
TRACE_SYMBOL(EC_CMD_EC_CODEC_I2S_RX), \
TRACE_SYMBOL(EC_CMD_EC_CODEC_WOV), \
TRACE_SYMBOL(EC_CMD_REBOOT_EC), \
TRACE_SYMBOL(EC_CMD_GET_PANIC_INFO), \
- TRACE_SYMBOL(EC_CMD_ACPI_READ), \
- TRACE_SYMBOL(EC_CMD_ACPI_WRITE), \
- TRACE_SYMBOL(EC_CMD_ACPI_QUERY_EVENT), \
- TRACE_SYMBOL(EC_CMD_CEC_WRITE_MSG), \
- TRACE_SYMBOL(EC_CMD_CEC_SET), \
- TRACE_SYMBOL(EC_CMD_CEC_GET), \
TRACE_SYMBOL(EC_CMD_REBOOT), \
TRACE_SYMBOL(EC_CMD_RESEND_RESPONSE), \
TRACE_SYMBOL(EC_CMD_VERSION0), \
TRACE_SYMBOL(EC_CMD_PD_EXCHANGE_STATUS), \
+ TRACE_SYMBOL(EC_CMD_PD_HOST_EVENT_STATUS), \
TRACE_SYMBOL(EC_CMD_USB_PD_CONTROL), \
TRACE_SYMBOL(EC_CMD_USB_PD_PORTS), \
TRACE_SYMBOL(EC_CMD_USB_PD_POWER_INFO), \
TRACE_SYMBOL(EC_CMD_CHARGE_PORT_COUNT), \
+ TRACE_SYMBOL(EC_CMD_USB_PD_FW_UPDATE), \
+ TRACE_SYMBOL(EC_CMD_USB_PD_RW_HASH_ENTRY), \
+ TRACE_SYMBOL(EC_CMD_USB_PD_DEV_INFO), \
TRACE_SYMBOL(EC_CMD_USB_PD_DISCOVERY), \
TRACE_SYMBOL(EC_CMD_PD_CHARGE_PORT_OVERRIDE), \
TRACE_SYMBOL(EC_CMD_PD_GET_LOG_ENTRY), \
- TRACE_SYMBOL(EC_CMD_USB_PD_MUX_INFO)
+ TRACE_SYMBOL(EC_CMD_USB_PD_GET_AMODE), \
+ TRACE_SYMBOL(EC_CMD_USB_PD_SET_AMODE), \
+ TRACE_SYMBOL(EC_CMD_PD_WRITE_LOG_ENTRY), \
+ TRACE_SYMBOL(EC_CMD_PD_CONTROL), \
+ TRACE_SYMBOL(EC_CMD_USB_PD_MUX_INFO), \
+ TRACE_SYMBOL(EC_CMD_PD_CHIP_INFO), \
+ TRACE_SYMBOL(EC_CMD_RWSIG_CHECK_STATUS), \
+ TRACE_SYMBOL(EC_CMD_RWSIG_ACTION), \
+ TRACE_SYMBOL(EC_CMD_EFS_VERIFY), \
+ TRACE_SYMBOL(EC_CMD_GET_CROS_BOARD_INFO), \
+ TRACE_SYMBOL(EC_CMD_SET_CROS_BOARD_INFO), \
+ TRACE_SYMBOL(EC_CMD_GET_UPTIME_INFO), \
+ TRACE_SYMBOL(EC_CMD_ADD_ENTROPY), \
+ TRACE_SYMBOL(EC_CMD_ADC_READ), \
+ TRACE_SYMBOL(EC_CMD_ROLLBACK_INFO), \
+ TRACE_SYMBOL(EC_CMD_AP_RESET), \
+ TRACE_SYMBOL(EC_CMD_CR51_BASE), \
+ TRACE_SYMBOL(EC_CMD_CR51_LAST), \
+ TRACE_SYMBOL(EC_CMD_FP_PASSTHRU), \
+ TRACE_SYMBOL(EC_CMD_FP_MODE), \
+ TRACE_SYMBOL(EC_CMD_FP_INFO), \
+ TRACE_SYMBOL(EC_CMD_FP_FRAME), \
+ TRACE_SYMBOL(EC_CMD_FP_TEMPLATE), \
+ TRACE_SYMBOL(EC_CMD_FP_CONTEXT), \
+ TRACE_SYMBOL(EC_CMD_FP_STATS), \
+ TRACE_SYMBOL(EC_CMD_FP_SEED), \
+ TRACE_SYMBOL(EC_CMD_FP_ENC_STATUS), \
+ TRACE_SYMBOL(EC_CMD_TP_SELF_TEST), \
+ TRACE_SYMBOL(EC_CMD_TP_FRAME_INFO), \
+ TRACE_SYMBOL(EC_CMD_TP_FRAME_SNAPSHOT), \
+ TRACE_SYMBOL(EC_CMD_TP_FRAME_GET), \
+ TRACE_SYMBOL(EC_CMD_BATTERY_GET_STATIC), \
+ TRACE_SYMBOL(EC_CMD_BATTERY_GET_DYNAMIC), \
+ TRACE_SYMBOL(EC_CMD_CHARGER_CONTROL), \
+ TRACE_SYMBOL(EC_CMD_BOARD_SPECIFIC_BASE), \
+ TRACE_SYMBOL(EC_CMD_BOARD_SPECIFIC_LAST)
+
+/* See the enum ec_status in include/linux/platform_data/cros_ec_commands.h */
+#define EC_RESULT \
+ TRACE_SYMBOL(EC_RES_SUCCESS), \
+ TRACE_SYMBOL(EC_RES_INVALID_COMMAND), \
+ TRACE_SYMBOL(EC_RES_ERROR), \
+ TRACE_SYMBOL(EC_RES_INVALID_PARAM), \
+ TRACE_SYMBOL(EC_RES_ACCESS_DENIED), \
+ TRACE_SYMBOL(EC_RES_INVALID_RESPONSE), \
+ TRACE_SYMBOL(EC_RES_INVALID_VERSION), \
+ TRACE_SYMBOL(EC_RES_INVALID_CHECKSUM), \
+ TRACE_SYMBOL(EC_RES_IN_PROGRESS), \
+ TRACE_SYMBOL(EC_RES_UNAVAILABLE), \
+ TRACE_SYMBOL(EC_RES_TIMEOUT), \
+ TRACE_SYMBOL(EC_RES_OVERFLOW), \
+ TRACE_SYMBOL(EC_RES_INVALID_HEADER), \
+ TRACE_SYMBOL(EC_RES_REQUEST_TRUNCATED), \
+ TRACE_SYMBOL(EC_RES_RESPONSE_TOO_BIG), \
+ TRACE_SYMBOL(EC_RES_BUS_ERROR), \
+ TRACE_SYMBOL(EC_RES_BUSY), \
+ TRACE_SYMBOL(EC_RES_INVALID_HEADER_VERSION), \
+ TRACE_SYMBOL(EC_RES_INVALID_HEADER_CRC), \
+ TRACE_SYMBOL(EC_RES_INVALID_DATA_CRC), \
+ TRACE_SYMBOL(EC_RES_DUP_UNAVAILABLE)
#define CREATE_TRACE_POINTS
#include "cros_ec_trace.h"
diff --git a/drivers/platform/chrome/cros_ec_trace.h b/drivers/platform/chrome/cros_ec_trace.h
index 0dd4df30fa89..e9fb05f89ef0 100644
--- a/drivers/platform/chrome/cros_ec_trace.h
+++ b/drivers/platform/chrome/cros_ec_trace.h
@@ -18,7 +18,7 @@
#include <linux/tracepoint.h>
-DECLARE_EVENT_CLASS(cros_ec_cmd_class,
+TRACE_EVENT(cros_ec_request_start,
TP_PROTO(struct cros_ec_command *cmd),
TP_ARGS(cmd),
TP_STRUCT__entry(
@@ -33,10 +33,26 @@ DECLARE_EVENT_CLASS(cros_ec_cmd_class,
__print_symbolic(__entry->command, EC_CMDS))
);
-
-DEFINE_EVENT(cros_ec_cmd_class, cros_ec_cmd,
- TP_PROTO(struct cros_ec_command *cmd),
- TP_ARGS(cmd)
+TRACE_EVENT(cros_ec_request_done,
+ TP_PROTO(struct cros_ec_command *cmd, int retval),
+ TP_ARGS(cmd, retval),
+ TP_STRUCT__entry(
+ __field(uint32_t, version)
+ __field(uint32_t, command)
+ __field(uint32_t, result)
+ __field(int, retval)
+ ),
+ TP_fast_assign(
+ __entry->version = cmd->version;
+ __entry->command = cmd->command;
+ __entry->result = cmd->result;
+ __entry->retval = retval;
+ ),
+ TP_printk("version: %u, command: %s, ec result: %s, retval: %d",
+ __entry->version,
+ __print_symbolic(__entry->command, EC_CMDS),
+ __print_symbolic(__entry->result, EC_RESULT),
+ __entry->retval)
);
diff --git a/drivers/platform/chrome/cros_ec_vbc.c b/drivers/platform/chrome/cros_ec_vbc.c
index f11a1283e5c8..8edae465105c 100644
--- a/drivers/platform/chrome/cros_ec_vbc.c
+++ b/drivers/platform/chrome/cros_ec_vbc.c
@@ -6,7 +6,6 @@
#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/mfd/cros_ec.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
diff --git a/drivers/platform/chrome/cros_usbpd_logger.c b/drivers/platform/chrome/cros_usbpd_logger.c
index 374cdd1e868a..7de3ea75ef46 100644
--- a/drivers/platform/chrome/cros_usbpd_logger.c
+++ b/drivers/platform/chrome/cros_usbpd_logger.c
@@ -6,7 +6,6 @@
*/
#include <linux/ktime.h>
-#include <linux/mfd/cros_ec.h>
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
diff --git a/drivers/platform/chrome/wilco_ec/Kconfig b/drivers/platform/chrome/wilco_ec/Kconfig
index 365f30e116ee..49e8530ca0ac 100644
--- a/drivers/platform/chrome/wilco_ec/Kconfig
+++ b/drivers/platform/chrome/wilco_ec/Kconfig
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
config WILCO_EC
tristate "ChromeOS Wilco Embedded Controller"
- depends on ACPI && X86 && CROS_EC_LPC && LEDS_CLASS
+ depends on X86 || COMPILE_TEST
+ depends on ACPI && CROS_EC_LPC && LEDS_CLASS
help
If you say Y here, you get support for talking to the ChromeOS
Wilco EC over an eSPI bus. This uses a simple byte-level protocol
diff --git a/drivers/platform/chrome/wilco_ec/core.c b/drivers/platform/chrome/wilco_ec/core.c
index 5210c357feef..5b42992bff38 100644
--- a/drivers/platform/chrome/wilco_ec/core.c
+++ b/drivers/platform/chrome/wilco_ec/core.c
@@ -94,7 +94,7 @@ static int wilco_ec_probe(struct platform_device *pdev)
ret = wilco_ec_add_sysfs(ec);
if (ret < 0) {
- dev_err(dev, "Failed to create sysfs entries: %d", ret);
+ dev_err(dev, "Failed to create sysfs entries: %d\n", ret);
goto unregister_rtc;
}
@@ -137,9 +137,9 @@ static int wilco_ec_remove(struct platform_device *pdev)
{
struct wilco_ec_device *ec = platform_get_drvdata(pdev);
+ platform_device_unregister(ec->telem_pdev);
platform_device_unregister(ec->charger_pdev);
wilco_ec_remove_sysfs(ec);
- platform_device_unregister(ec->telem_pdev);
platform_device_unregister(ec->rtc_pdev);
if (ec->debugfs_pdev)
platform_device_unregister(ec->debugfs_pdev);
diff --git a/drivers/platform/chrome/wilco_ec/keyboard_leds.c b/drivers/platform/chrome/wilco_ec/keyboard_leds.c
index bb0edf51dfda..6ce9c6782065 100644
--- a/drivers/platform/chrome/wilco_ec/keyboard_leds.c
+++ b/drivers/platform/chrome/wilco_ec/keyboard_leds.c
@@ -69,17 +69,10 @@ static int send_kbbl_msg(struct wilco_ec_device *ec,
ret = wilco_ec_mailbox(ec, &msg);
if (ret < 0) {
dev_err(ec->dev,
- "Failed sending keyboard LEDs command: %d", ret);
+ "Failed sending keyboard LEDs command: %d\n", ret);
return ret;
}
- if (response->status) {
- dev_err(ec->dev,
- "EC reported failure sending keyboard LEDs command: %d",
- response->status);
- return -EIO;
- }
-
return 0;
}
@@ -87,6 +80,7 @@ static int set_kbbl(struct wilco_ec_device *ec, enum led_brightness brightness)
{
struct wilco_keyboard_leds_msg request;
struct wilco_keyboard_leds_msg response;
+ int ret;
memset(&request, 0, sizeof(request));
request.command = WILCO_EC_COMMAND_KBBL;
@@ -94,7 +88,18 @@ static int set_kbbl(struct wilco_ec_device *ec, enum led_brightness brightness)
request.mode = WILCO_KBBL_MODE_FLAG_PWM;
request.percent = brightness;
- return send_kbbl_msg(ec, &request, &response);
+ ret = send_kbbl_msg(ec, &request, &response);
+ if (ret < 0)
+ return ret;
+
+ if (response.status) {
+ dev_err(ec->dev,
+ "EC reported failure sending keyboard LEDs command: %d\n",
+ response.status);
+ return -EIO;
+ }
+
+ return 0;
}
static int kbbl_exist(struct wilco_ec_device *ec, bool *exists)
@@ -140,6 +145,13 @@ static int kbbl_init(struct wilco_ec_device *ec)
if (ret < 0)
return ret;
+ if (response.status) {
+ dev_err(ec->dev,
+ "EC reported failure sending keyboard LEDs command: %d\n",
+ response.status);
+ return -EIO;
+ }
+
if (response.mode & WILCO_KBBL_MODE_FLAG_PWM)
return response.percent;
@@ -167,7 +179,7 @@ int wilco_keyboard_leds_init(struct wilco_ec_device *ec)
ret = kbbl_exist(ec, &leds_exist);
if (ret < 0) {
dev_err(ec->dev,
- "Failed checking keyboard LEDs support: %d", ret);
+ "Failed checking keyboard LEDs support: %d\n", ret);
return ret;
}
if (!leds_exist)
diff --git a/drivers/platform/chrome/wilco_ec/mailbox.c b/drivers/platform/chrome/wilco_ec/mailbox.c
index ced1f9f3dcee..0f98358ea824 100644
--- a/drivers/platform/chrome/wilco_ec/mailbox.c
+++ b/drivers/platform/chrome/wilco_ec/mailbox.c
@@ -163,13 +163,13 @@ static int wilco_ec_transfer(struct wilco_ec_device *ec,
}
if (rs->data_size != EC_MAILBOX_DATA_SIZE) {
- dev_dbg(ec->dev, "unexpected packet size (%u != %u)",
+ dev_dbg(ec->dev, "unexpected packet size (%u != %u)\n",
rs->data_size, EC_MAILBOX_DATA_SIZE);
return -EMSGSIZE;
}
if (rs->data_size < msg->response_size) {
- dev_dbg(ec->dev, "EC didn't return enough data (%u < %zu)",
+ dev_dbg(ec->dev, "EC didn't return enough data (%u < %zu)\n",
rs->data_size, msg->response_size);
return -EMSGSIZE;
}
diff --git a/drivers/platform/chrome/wilco_ec/telemetry.c b/drivers/platform/chrome/wilco_ec/telemetry.c
index 1176d543191a..e06d96fb9426 100644
--- a/drivers/platform/chrome/wilco_ec/telemetry.c
+++ b/drivers/platform/chrome/wilco_ec/telemetry.c
@@ -367,7 +367,7 @@ static int telem_device_probe(struct platform_device *pdev)
minor = ida_alloc_max(&telem_ida, TELEM_MAX_DEV-1, GFP_KERNEL);
if (minor < 0) {
error = minor;
- dev_err(&pdev->dev, "Failed to find minor number: %d", error);
+ dev_err(&pdev->dev, "Failed to find minor number: %d\n", error);
return error;
}
@@ -427,14 +427,14 @@ static int __init telem_module_init(void)
ret = class_register(&telem_class);
if (ret) {
- pr_err(DRV_NAME ": Failed registering class: %d", ret);
+ pr_err(DRV_NAME ": Failed registering class: %d\n", ret);
return ret;
}
/* Request the kernel for device numbers, starting with minor=0 */
ret = alloc_chrdev_region(&dev_num, 0, TELEM_MAX_DEV, TELEM_DEV_NAME);
if (ret) {
- pr_err(DRV_NAME ": Failed allocating dev numbers: %d", ret);
+ pr_err(DRV_NAME ": Failed allocating dev numbers: %d\n", ret);
goto destroy_class;
}
telem_major = MAJOR(dev_num);
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index cef0133aa47a..1ab207ec9c94 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -257,12 +257,12 @@ static int goldfish_pipe_error_convert(int status)
}
}
-static int pin_user_pages(unsigned long first_page,
- unsigned long last_page,
- unsigned int last_page_size,
- int is_write,
- struct page *pages[MAX_BUFFERS_PER_COMMAND],
- unsigned int *iter_last_page_size)
+static int goldfish_pin_pages(unsigned long first_page,
+ unsigned long last_page,
+ unsigned int last_page_size,
+ int is_write,
+ struct page *pages[MAX_BUFFERS_PER_COMMAND],
+ unsigned int *iter_last_page_size)
{
int ret;
int requested_pages = ((last_page - first_page) >> PAGE_SHIFT) + 1;
@@ -274,7 +274,7 @@ static int pin_user_pages(unsigned long first_page,
*iter_last_page_size = last_page_size;
}
- ret = get_user_pages_fast(first_page, requested_pages,
+ ret = pin_user_pages_fast(first_page, requested_pages,
!is_write ? FOLL_WRITE : 0,
pages);
if (ret <= 0)
@@ -285,18 +285,6 @@ static int pin_user_pages(unsigned long first_page,
return ret;
}
-static void release_user_pages(struct page **pages, int pages_count,
- int is_write, s32 consumed_size)
-{
- int i;
-
- for (i = 0; i < pages_count; i++) {
- if (!is_write && consumed_size > 0)
- set_page_dirty(pages[i]);
- put_page(pages[i]);
- }
-}
-
/* Populate the call parameters, merging adjacent pages together */
static void populate_rw_params(struct page **pages,
int pages_count,
@@ -354,9 +342,9 @@ static int transfer_max_buffers(struct goldfish_pipe *pipe,
if (mutex_lock_interruptible(&pipe->lock))
return -ERESTARTSYS;
- pages_count = pin_user_pages(first_page, last_page,
- last_page_size, is_write,
- pipe->pages, &iter_last_page_size);
+ pages_count = goldfish_pin_pages(first_page, last_page,
+ last_page_size, is_write,
+ pipe->pages, &iter_last_page_size);
if (pages_count < 0) {
mutex_unlock(&pipe->lock);
return pages_count;
@@ -372,7 +360,8 @@ static int transfer_max_buffers(struct goldfish_pipe *pipe,
*consumed_size = pipe->command_buffer->rw_params.consumed_size;
- release_user_pages(pipe->pages, pages_count, is_write, *consumed_size);
+ unpin_user_pages_dirty_lock(pipe->pages, pages_count,
+ !is_write && *consumed_size > 0);
mutex_unlock(&pipe->lock);
return 0;
diff --git a/drivers/platform/mellanox/mlxbf-bootctl.c b/drivers/platform/mellanox/mlxbf-bootctl.c
index 61753b648506..5d21c6adf1ab 100644
--- a/drivers/platform/mellanox/mlxbf-bootctl.c
+++ b/drivers/platform/mellanox/mlxbf-bootctl.c
@@ -309,7 +309,7 @@ static struct platform_driver mlxbf_bootctl_driver = {
.probe = mlxbf_bootctl_probe,
.driver = {
.name = "mlxbf-bootctl",
- .groups = mlxbf_bootctl_groups,
+ .dev_groups = mlxbf_bootctl_groups,
.acpi_match_table = mlxbf_bootctl_acpi_ids,
}
};
diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
index 9a5c9fd2dbc6..5739a9669b29 100644
--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
+++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
@@ -149,7 +149,7 @@ struct mlxbf_tmfifo_irq_info {
* @work: work struct for deferred process
* @timer: background timer
* @vring: Tx/Rx ring
- * @spin_lock: spin lock
+ * @spin_lock: Tx/Rx spin lock
* @is_ready: ready flag
*/
struct mlxbf_tmfifo {
@@ -164,7 +164,7 @@ struct mlxbf_tmfifo {
struct work_struct work;
struct timer_list timer;
struct mlxbf_tmfifo_vring *vring[2];
- spinlock_t spin_lock; /* spin lock */
+ spinlock_t spin_lock[2]; /* spin lock */
bool is_ready;
};
@@ -525,7 +525,7 @@ static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail)
writeq(*(u64 *)&hdr, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
/* Use spin-lock to protect the 'cons->tx_buf'. */
- spin_lock_irqsave(&fifo->spin_lock, flags);
+ spin_lock_irqsave(&fifo->spin_lock[0], flags);
while (size > 0) {
addr = cons->tx_buf.buf + cons->tx_buf.tail;
@@ -552,7 +552,7 @@ static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail)
}
}
- spin_unlock_irqrestore(&fifo->spin_lock, flags);
+ spin_unlock_irqrestore(&fifo->spin_lock[0], flags);
}
/* Rx/Tx one word in the descriptor buffer. */
@@ -731,9 +731,9 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
fifo->vring[is_rx] = NULL;
/* Notify upper layer that packet is done. */
- spin_lock_irqsave(&fifo->spin_lock, flags);
+ spin_lock_irqsave(&fifo->spin_lock[is_rx], flags);
vring_interrupt(0, vring->vq);
- spin_unlock_irqrestore(&fifo->spin_lock, flags);
+ spin_unlock_irqrestore(&fifo->spin_lock[is_rx], flags);
}
mlxbf_tmfifo_desc_done:
@@ -852,10 +852,10 @@ static bool mlxbf_tmfifo_virtio_notify(struct virtqueue *vq)
* worker handler.
*/
if (vring->vdev_id == VIRTIO_ID_CONSOLE) {
- spin_lock_irqsave(&fifo->spin_lock, flags);
+ spin_lock_irqsave(&fifo->spin_lock[0], flags);
tm_vdev = fifo->vdev[VIRTIO_ID_CONSOLE];
mlxbf_tmfifo_console_output(tm_vdev, vring);
- spin_unlock_irqrestore(&fifo->spin_lock, flags);
+ spin_unlock_irqrestore(&fifo->spin_lock[0], flags);
} else if (test_and_set_bit(MLXBF_TM_TX_LWM_IRQ,
&fifo->pend_events)) {
return true;
@@ -1189,7 +1189,8 @@ static int mlxbf_tmfifo_probe(struct platform_device *pdev)
if (!fifo)
return -ENOMEM;
- spin_lock_init(&fifo->spin_lock);
+ spin_lock_init(&fifo->spin_lock[0]);
+ spin_lock_init(&fifo->spin_lock[1]);
INIT_WORK(&fifo->work, mlxbf_tmfifo_work_handler);
mutex_init(&fifo->lock);
diff --git a/drivers/platform/mellanox/mlxreg-hotplug.c b/drivers/platform/mellanox/mlxreg-hotplug.c
index 706207d192ae..77be37a1fbcf 100644
--- a/drivers/platform/mellanox/mlxreg-hotplug.c
+++ b/drivers/platform/mellanox/mlxreg-hotplug.c
@@ -504,6 +504,20 @@ static int mlxreg_hotplug_set_irq(struct mlxreg_hotplug_priv_data *priv)
item = pdata->items;
for (i = 0; i < pdata->counter; i++, item++) {
+ if (item->capability) {
+ /*
+ * Read group capability register to get actual number
+ * of interrupt capable components and set group mask
+ * accordingly.
+ */
+ ret = regmap_read(priv->regmap, item->capability,
+ &regval);
+ if (ret)
+ goto out;
+
+ item->mask = GENMASK((regval & item->mask) - 1, 0);
+ }
+
/* Clear group presense event. */
ret = regmap_write(priv->regmap, item->reg +
MLXREG_HOTPLUG_EVENT_OFF, 0);
diff --git a/drivers/platform/mips/Kconfig b/drivers/platform/mips/Kconfig
index f4d0a86c00d0..5e77b0dc5fd6 100644
--- a/drivers/platform/mips/Kconfig
+++ b/drivers/platform/mips/Kconfig
@@ -18,7 +18,7 @@ if MIPS_PLATFORM_DEVICES
config CPU_HWMON
tristate "Loongson-3 CPU HWMon Driver"
- depends on CONFIG_MACH_LOONGSON64
+ depends on MACH_LOONGSON64
select HWMON
default y
help
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 27d5b40fb717..587403c44598 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -997,7 +997,6 @@ config INTEL_SCU_IPC
config INTEL_SCU_IPC_UTIL
tristate "Intel SCU IPC utility driver"
depends on INTEL_SCU_IPC
- default y
---help---
The IPC Util driver provides an interface with the SCU enabling
low level access for debug work and updating the firmware. Say
@@ -1299,9 +1298,9 @@ config INTEL_ATOMISP2_PM
depends on PCI && IOSF_MBI && PM
help
Power-management driver for Intel's Image Signal Processor found on
- Bay and Cherry Trail devices. This dummy driver's sole purpose is to
- turn the ISP off (put it in D3) to save power and to allow entering
- of S0ix modes.
+ Bay Trail and Cherry Trail devices. This dummy driver's sole purpose
+ is to turn the ISP off (put it in D3) to save power and to allow
+ entering of S0ix modes.
To compile this driver as a module, choose M here: the module
will be called intel_atomisp2_pm.
@@ -1337,6 +1336,17 @@ config PCENGINES_APU2
To compile this driver as a module, choose M here: the module
will be called pcengines-apuv2.
+config INTEL_UNCORE_FREQ_CONTROL
+ tristate "Intel Uncore frequency control driver"
+ depends on X86_64
+ help
+ This driver allows control of uncore frequency limits on
+ supported server platforms.
+ Uncore frequency controls RING/LLC (last-level cache) clocks.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel-uncore-frequency.
+
source "drivers/platform/x86/intel_speed_select_if/Kconfig"
config SYSTEM76_ACPI
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 42d85a00be4e..3747b1f07cf1 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -105,3 +105,4 @@ obj-$(CONFIG_INTEL_ATOMISP2_PM) += intel_atomisp2_pm.o
obj-$(CONFIG_PCENGINES_APU2) += pcengines-apuv2.o
obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += intel_speed_select_if/
obj-$(CONFIG_SYSTEM76_ACPI) += system76_acpi.o
+obj-$(CONFIG_INTEL_UNCORE_FREQ_CONTROL) += intel-uncore-frequency.o
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index b361c73636a4..6f12747a359a 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -471,6 +471,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0x67, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV */
{ KE_KEY, 0x6B, { KEY_TOUCHPAD_TOGGLE } },
{ KE_IGNORE, 0x6E, }, /* Low Battery notification */
+ { KE_KEY, 0x71, { KEY_F13 } }, /* General-purpose button */
{ KE_KEY, 0x7a, { KEY_ALS_TOGGLE } }, /* Ambient Light Sensor Toggle */
{ KE_KEY, 0x7c, { KEY_MICMUTE } },
{ KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 821b08e01635..612ef5526226 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -33,9 +33,9 @@
#include <linux/seq_file.h>
#include <linux/platform_data/x86/asus-wmi.h>
#include <linux/platform_device.h>
-#include <linux/thermal.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
+#include <linux/units.h>
#include <acpi/battery.h>
#include <acpi/video.h>
@@ -61,6 +61,7 @@ MODULE_LICENSE("GPL");
#define NOTIFY_KBD_BRTDWN 0xc5
#define NOTIFY_KBD_BRTTOGGLE 0xc7
#define NOTIFY_KBD_FBM 0x99
+#define NOTIFY_KBD_TTP 0xae
#define ASUS_WMI_FNLOCK_BIOS_DISABLED BIT(0)
@@ -81,6 +82,10 @@ MODULE_LICENSE("GPL");
#define ASUS_FAN_BOOST_MODE_SILENT_MASK 0x02
#define ASUS_FAN_BOOST_MODES_MASK 0x03
+#define ASUS_THROTTLE_THERMAL_POLICY_DEFAULT 0
+#define ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST 1
+#define ASUS_THROTTLE_THERMAL_POLICY_SILENT 2
+
#define USB_INTEL_XUSB2PR 0xD0
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
@@ -198,6 +203,9 @@ struct asus_wmi {
u8 fan_boost_mode_mask;
u8 fan_boost_mode;
+ bool throttle_thermal_policy_available;
+ u8 throttle_thermal_policy_mode;
+
// The RSOC controls the maximum charging percentage.
bool battery_rsoc_available;
@@ -512,13 +520,7 @@ static void kbd_led_update(struct asus_wmi *asus)
{
int ctrl_param = 0;
- /*
- * bits 0-2: level
- * bit 7: light on/off
- */
- if (asus->kbd_led_wk > 0)
- ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F);
-
+ ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F);
asus_wmi_set_devstate(ASUS_WMI_DEVID_KBD_BACKLIGHT, ctrl_param, NULL);
}
@@ -1512,9 +1514,8 @@ static ssize_t asus_hwmon_temp1(struct device *dev,
if (err < 0)
return err;
- value = DECI_KELVIN_TO_CELSIUS((value & 0xFFFF)) * 1000;
-
- return sprintf(buf, "%d\n", value);
+ return sprintf(buf, "%ld\n",
+ deci_kelvin_to_millicelsius(value & 0xFFFF));
}
/* Fan1 */
@@ -1724,6 +1725,107 @@ static ssize_t fan_boost_mode_store(struct device *dev,
// Fan boost mode: 0 - normal, 1 - overboost, 2 - silent
static DEVICE_ATTR_RW(fan_boost_mode);
+/* Throttle thermal policy ****************************************************/
+
+static int throttle_thermal_policy_check_present(struct asus_wmi *asus)
+{
+ u32 result;
+ int err;
+
+ asus->throttle_thermal_policy_available = false;
+
+ err = asus_wmi_get_devstate(asus,
+ ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY,
+ &result);
+ if (err) {
+ if (err == -ENODEV)
+ return 0;
+ return err;
+ }
+
+ if (result & ASUS_WMI_DSTS_PRESENCE_BIT)
+ asus->throttle_thermal_policy_available = true;
+
+ return 0;
+}
+
+static int throttle_thermal_policy_write(struct asus_wmi *asus)
+{
+ int err;
+ u8 value;
+ u32 retval;
+
+ value = asus->throttle_thermal_policy_mode;
+
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY,
+ value, &retval);
+ if (err) {
+ pr_warn("Failed to set throttle thermal policy: %d\n", err);
+ return err;
+ }
+
+ if (retval != 1) {
+ pr_warn("Failed to set throttle thermal policy (retval): 0x%x\n",
+ retval);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int throttle_thermal_policy_set_default(struct asus_wmi *asus)
+{
+ if (!asus->throttle_thermal_policy_available)
+ return 0;
+
+ asus->throttle_thermal_policy_mode = ASUS_THROTTLE_THERMAL_POLICY_DEFAULT;
+ return throttle_thermal_policy_write(asus);
+}
+
+static int throttle_thermal_policy_switch_next(struct asus_wmi *asus)
+{
+ u8 new_mode = asus->throttle_thermal_policy_mode + 1;
+
+ if (new_mode > ASUS_THROTTLE_THERMAL_POLICY_SILENT)
+ new_mode = ASUS_THROTTLE_THERMAL_POLICY_DEFAULT;
+
+ asus->throttle_thermal_policy_mode = new_mode;
+ return throttle_thermal_policy_write(asus);
+}
+
+static ssize_t throttle_thermal_policy_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ u8 mode = asus->throttle_thermal_policy_mode;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", mode);
+}
+
+static ssize_t throttle_thermal_policy_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int result;
+ u8 new_mode;
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+
+ result = kstrtou8(buf, 10, &new_mode);
+ if (result < 0)
+ return result;
+
+ if (new_mode > ASUS_THROTTLE_THERMAL_POLICY_SILENT)
+ return -EINVAL;
+
+ asus->throttle_thermal_policy_mode = new_mode;
+ throttle_thermal_policy_write(asus);
+
+ return count;
+}
+
+// Throttle thermal policy: 0 - default, 1 - overboost, 2 - silent
+static DEVICE_ATTR_RW(throttle_thermal_policy);
+
/* Backlight ******************************************************************/
static int read_backlight_power(struct asus_wmi *asus)
@@ -2005,6 +2107,11 @@ static void asus_wmi_handle_event_code(int code, struct asus_wmi *asus)
return;
}
+ if (asus->throttle_thermal_policy_available && code == NOTIFY_KBD_TTP) {
+ throttle_thermal_policy_switch_next(asus);
+ return;
+ }
+
if (is_display_toggle(code) && asus->driver->quirks->no_display_toggle)
return;
@@ -2155,6 +2262,7 @@ static struct attribute *platform_attributes[] = {
&dev_attr_lid_resume.attr,
&dev_attr_als_enable.attr,
&dev_attr_fan_boost_mode.attr,
+ &dev_attr_throttle_thermal_policy.attr,
NULL
};
@@ -2178,6 +2286,8 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
devid = ASUS_WMI_DEVID_ALS_ENABLE;
else if (attr == &dev_attr_fan_boost_mode.attr)
ok = asus->fan_boost_mode_available;
+ else if (attr == &dev_attr_throttle_thermal_policy.attr)
+ ok = asus->throttle_thermal_policy_available;
if (devid != -1)
ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0);
@@ -2437,6 +2547,12 @@ static int asus_wmi_add(struct platform_device *pdev)
if (err)
goto fail_fan_boost_mode;
+ err = throttle_thermal_policy_check_present(asus);
+ if (err)
+ goto fail_throttle_thermal_policy;
+ else
+ throttle_thermal_policy_set_default(asus);
+
err = asus_wmi_sysfs_init(asus->platform_device);
if (err)
goto fail_sysfs;
@@ -2521,6 +2637,7 @@ fail_hwmon:
fail_input:
asus_wmi_sysfs_exit(asus->platform_device);
fail_sysfs:
+fail_throttle_thermal_policy:
fail_fan_boost_mode:
fail_platform:
kfree(asus);
diff --git a/drivers/platform/x86/gpd-pocket-fan.c b/drivers/platform/x86/gpd-pocket-fan.c
index be85ed966bf3..b471b86c28fe 100644
--- a/drivers/platform/x86/gpd-pocket-fan.c
+++ b/drivers/platform/x86/gpd-pocket-fan.c
@@ -16,17 +16,27 @@
#define MAX_SPEED 3
-static int temp_limits[3] = { 55000, 60000, 65000 };
+#define TEMP_LIMIT0_DEFAULT 55000
+#define TEMP_LIMIT1_DEFAULT 60000
+#define TEMP_LIMIT2_DEFAULT 65000
+
+#define HYSTERESIS_DEFAULT 3000
+
+#define SPEED_ON_AC_DEFAULT 2
+
+static int temp_limits[3] = {
+ TEMP_LIMIT0_DEFAULT, TEMP_LIMIT1_DEFAULT, TEMP_LIMIT2_DEFAULT,
+};
module_param_array(temp_limits, int, NULL, 0444);
MODULE_PARM_DESC(temp_limits,
"Millicelsius values above which the fan speed increases");
-static int hysteresis = 3000;
+static int hysteresis = HYSTERESIS_DEFAULT;
module_param(hysteresis, int, 0444);
MODULE_PARM_DESC(hysteresis,
"Hysteresis in millicelsius before lowering the fan speed");
-static int speed_on_ac = 2;
+static int speed_on_ac = SPEED_ON_AC_DEFAULT;
module_param(speed_on_ac, int, 0444);
MODULE_PARM_DESC(speed_on_ac,
"minimum fan speed to allow when system is powered by AC");
@@ -117,21 +127,24 @@ static int gpd_pocket_fan_probe(struct platform_device *pdev)
int i;
for (i = 0; i < ARRAY_SIZE(temp_limits); i++) {
- if (temp_limits[i] < 40000 || temp_limits[i] > 70000) {
+ if (temp_limits[i] < 20000 || temp_limits[i] > 90000) {
dev_err(&pdev->dev, "Invalid temp-limit %d (must be between 40000 and 70000)\n",
temp_limits[i]);
- return -EINVAL;
+ temp_limits[0] = TEMP_LIMIT0_DEFAULT;
+ temp_limits[1] = TEMP_LIMIT1_DEFAULT;
+ temp_limits[2] = TEMP_LIMIT2_DEFAULT;
+ break;
}
}
if (hysteresis < 1000 || hysteresis > 10000) {
dev_err(&pdev->dev, "Invalid hysteresis %d (must be between 1000 and 10000)\n",
hysteresis);
- return -EINVAL;
+ hysteresis = HYSTERESIS_DEFAULT;
}
if (speed_on_ac < 0 || speed_on_ac > MAX_SPEED) {
dev_err(&pdev->dev, "Invalid speed_on_ac %d (must be between 0 and 3)\n",
speed_on_ac);
- return -EINVAL;
+ speed_on_ac = SPEED_ON_AC_DEFAULT;
}
fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL);
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 9579a706fc08..a881b709af25 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -300,7 +300,7 @@ static int __init hp_wmi_bios_2008_later(void)
static int __init hp_wmi_bios_2009_later(void)
{
- int state = 0;
+ u8 state[128];
int ret = hp_wmi_perform_query(HPWMI_FEATURE2_QUERY, HPWMI_READ, &state,
sizeof(state), sizeof(state));
if (!ret)
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index ef6d4bd77b1a..43d590250228 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -19,6 +19,7 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alex Hung");
static const struct acpi_device_id intel_hid_ids[] = {
+ {"INT1051", 0},
{"INT33D5", 0},
{"", 0},
};
diff --git a/drivers/platform/x86/intel-uncore-frequency.c b/drivers/platform/x86/intel-uncore-frequency.c
new file mode 100644
index 000000000000..2b1a0734c3f8
--- /dev/null
+++ b/drivers/platform/x86/intel-uncore-frequency.c
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Uncore Frequency Setting
+ * Copyright (c) 2019, Intel Corporation.
+ * All rights reserved.
+ *
+ * Provide interface to set MSR 620 at a granularity of per die. On CPU online,
+ * one control CPU is identified per die to read/write limit. This control CPU
+ * is changed, if the CPU state is changed to offline. When the last CPU is
+ * offline in a die then remove the sysfs object for that die.
+ * The majority of actual code is related to sysfs create and read/write
+ * attributes.
+ *
+ * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+ */
+
+#include <linux/cpu.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+
+#define MSR_UNCORE_RATIO_LIMIT 0x620
+#define UNCORE_FREQ_KHZ_MULTIPLIER 100000
+
+/**
+ * struct uncore_data - Encapsulate all uncore data
+ * @stored_uncore_data: Last user changed MSR 620 value, which will be restored
+ * on system resume.
+ * @initial_min_freq_khz: Sampled minimum uncore frequency at driver init
+ * @initial_max_freq_khz: Sampled maximum uncore frequency at driver init
+ * @control_cpu: Designated CPU for a die to read/write
+ * @valid: Mark the data valid/invalid
+ *
+ * This structure is used to encapsulate all data related to uncore sysfs
+ * settings for a die/package.
+ */
+struct uncore_data {
+ struct kobject kobj;
+ u64 stored_uncore_data;
+ u32 initial_min_freq_khz;
+ u32 initial_max_freq_khz;
+ int control_cpu;
+ bool valid;
+};
+
+#define to_uncore_data(a) container_of(a, struct uncore_data, kobj)
+
+/* Max instances for uncore data, one for each die */
+static int uncore_max_entries __read_mostly;
+/* Storage for uncore data for all instances */
+static struct uncore_data *uncore_instances;
+/* Root of the all uncore sysfs kobjs */
+struct kobject uncore_root_kobj;
+/* Stores the CPU mask of the target CPUs to use during uncore read/write */
+static cpumask_t uncore_cpu_mask;
+/* CPU online callback register instance */
+static enum cpuhp_state uncore_hp_state __read_mostly;
+/* Mutex to control all mutual exclusions */
+static DEFINE_MUTEX(uncore_lock);
+
+struct uncore_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *kobj,
+ struct attribute *attr, char *buf);
+ ssize_t (*store)(struct kobject *kobj,
+ struct attribute *attr, const char *c, ssize_t count);
+};
+
+#define define_one_uncore_ro(_name) \
+static struct uncore_attr _name = \
+__ATTR(_name, 0444, show_##_name, NULL)
+
+#define define_one_uncore_rw(_name) \
+static struct uncore_attr _name = \
+__ATTR(_name, 0644, show_##_name, store_##_name)
+
+#define show_uncore_data(member_name) \
+ static ssize_t show_##member_name(struct kobject *kobj, \
+ struct attribute *attr, \
+ char *buf) \
+ { \
+ struct uncore_data *data = to_uncore_data(kobj); \
+ return scnprintf(buf, PAGE_SIZE, "%u\n", \
+ data->member_name); \
+ } \
+ define_one_uncore_ro(member_name)
+
+show_uncore_data(initial_min_freq_khz);
+show_uncore_data(initial_max_freq_khz);
+
+/* Common function to read MSR 0x620 and read min/max */
+static int uncore_read_ratio(struct uncore_data *data, unsigned int *min,
+ unsigned int *max)
+{
+ u64 cap;
+ int ret;
+
+ ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap);
+ if (ret)
+ return ret;
+
+ *max = (cap & 0x7F) * UNCORE_FREQ_KHZ_MULTIPLIER;
+ *min = ((cap & GENMASK(14, 8)) >> 8) * UNCORE_FREQ_KHZ_MULTIPLIER;
+
+ return 0;
+}
+
+/* Common function to set min/max ratios to be used by sysfs callbacks */
+static int uncore_write_ratio(struct uncore_data *data, unsigned int input,
+ int set_max)
+{
+ int ret;
+ u64 cap;
+
+ mutex_lock(&uncore_lock);
+
+ input /= UNCORE_FREQ_KHZ_MULTIPLIER;
+ if (!input || input > 0x7F) {
+ ret = -EINVAL;
+ goto finish_write;
+ }
+
+ ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap);
+ if (ret)
+ goto finish_write;
+
+ if (set_max) {
+ cap &= ~0x7F;
+ cap |= input;
+ } else {
+ cap &= ~GENMASK(14, 8);
+ cap |= (input << 8);
+ }
+
+ ret = wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, cap);
+ if (ret)
+ goto finish_write;
+
+ data->stored_uncore_data = cap;
+
+finish_write:
+ mutex_unlock(&uncore_lock);
+
+ return ret;
+}
+
+static ssize_t store_min_max_freq_khz(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf, ssize_t count,
+ int min_max)
+{
+ struct uncore_data *data = to_uncore_data(kobj);
+ unsigned int input;
+
+ if (kstrtouint(buf, 10, &input))
+ return -EINVAL;
+
+ uncore_write_ratio(data, input, min_max);
+
+ return count;
+}
+
+static ssize_t show_min_max_freq_khz(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf, int min_max)
+{
+ struct uncore_data *data = to_uncore_data(kobj);
+ unsigned int min, max;
+ int ret;
+
+ mutex_lock(&uncore_lock);
+ ret = uncore_read_ratio(data, &min, &max);
+ mutex_unlock(&uncore_lock);
+ if (ret)
+ return ret;
+
+ if (min_max)
+ return sprintf(buf, "%u\n", max);
+
+ return sprintf(buf, "%u\n", min);
+}
+
+#define store_uncore_min_max(name, min_max) \
+ static ssize_t store_##name(struct kobject *kobj, \
+ struct attribute *attr, \
+ const char *buf, ssize_t count) \
+ { \
+ \
+ return store_min_max_freq_khz(kobj, attr, buf, count, \
+ min_max); \
+ }
+
+#define show_uncore_min_max(name, min_max) \
+ static ssize_t show_##name(struct kobject *kobj, \
+ struct attribute *attr, char *buf) \
+ { \
+ \
+ return show_min_max_freq_khz(kobj, attr, buf, min_max); \
+ }
+
+store_uncore_min_max(min_freq_khz, 0);
+store_uncore_min_max(max_freq_khz, 1);
+
+show_uncore_min_max(min_freq_khz, 0);
+show_uncore_min_max(max_freq_khz, 1);
+
+define_one_uncore_rw(min_freq_khz);
+define_one_uncore_rw(max_freq_khz);
+
+static struct attribute *uncore_attrs[] = {
+ &initial_min_freq_khz.attr,
+ &initial_max_freq_khz.attr,
+ &max_freq_khz.attr,
+ &min_freq_khz.attr,
+ NULL
+};
+
+static struct kobj_type uncore_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .default_attrs = uncore_attrs,
+};
+
+static struct kobj_type uncore_root_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+/* Caller provides protection */
+static struct uncore_data *uncore_get_instance(unsigned int cpu)
+{
+ int id = topology_logical_die_id(cpu);
+
+ if (id >= 0 && id < uncore_max_entries)
+ return &uncore_instances[id];
+
+ return NULL;
+}
+
+static void uncore_add_die_entry(int cpu)
+{
+ struct uncore_data *data;
+
+ mutex_lock(&uncore_lock);
+ data = uncore_get_instance(cpu);
+ if (!data) {
+ mutex_unlock(&uncore_lock);
+ return;
+ }
+
+ if (data->valid) {
+ /* control cpu changed */
+ data->control_cpu = cpu;
+ } else {
+ char str[64];
+ int ret;
+
+ memset(data, 0, sizeof(*data));
+ sprintf(str, "package_%02d_die_%02d",
+ topology_physical_package_id(cpu),
+ topology_die_id(cpu));
+
+ uncore_read_ratio(data, &data->initial_min_freq_khz,
+ &data->initial_max_freq_khz);
+
+ ret = kobject_init_and_add(&data->kobj, &uncore_ktype,
+ &uncore_root_kobj, str);
+ if (!ret) {
+ data->control_cpu = cpu;
+ data->valid = true;
+ }
+ }
+ mutex_unlock(&uncore_lock);
+}
+
+/* Last CPU in this die is offline, so remove sysfs entries */
+static void uncore_remove_die_entry(int cpu)
+{
+ struct uncore_data *data;
+
+ mutex_lock(&uncore_lock);
+ data = uncore_get_instance(cpu);
+ if (data) {
+ kobject_put(&data->kobj);
+ data->control_cpu = -1;
+ data->valid = false;
+ }
+ mutex_unlock(&uncore_lock);
+}
+
+static int uncore_event_cpu_online(unsigned int cpu)
+{
+ int target;
+
+ /* Check if there is an online cpu in the package for uncore MSR */
+ target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu));
+ if (target < nr_cpu_ids)
+ return 0;
+
+ /* Use this CPU on this die as a control CPU */
+ cpumask_set_cpu(cpu, &uncore_cpu_mask);
+ uncore_add_die_entry(cpu);
+
+ return 0;
+}
+
+static int uncore_event_cpu_offline(unsigned int cpu)
+{
+ int target;
+
+ /* Check if existing cpu is used for uncore MSRs */
+ if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
+ return 0;
+
+ /* Find a new cpu to set uncore MSR */
+ target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
+
+ if (target < nr_cpu_ids) {
+ cpumask_set_cpu(target, &uncore_cpu_mask);
+ uncore_add_die_entry(target);
+ } else {
+ uncore_remove_die_entry(cpu);
+ }
+
+ return 0;
+}
+
+static int uncore_pm_notify(struct notifier_block *nb, unsigned long mode,
+ void *_unused)
+{
+ int cpu;
+
+ switch (mode) {
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+ case PM_POST_SUSPEND:
+ for_each_cpu(cpu, &uncore_cpu_mask) {
+ struct uncore_data *data;
+ int ret;
+
+ data = uncore_get_instance(cpu);
+ if (!data || !data->valid || !data->stored_uncore_data)
+ continue;
+
+ ret = wrmsrl_on_cpu(cpu, MSR_UNCORE_RATIO_LIMIT,
+ data->stored_uncore_data);
+ if (ret)
+ return ret;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static struct notifier_block uncore_pm_nb = {
+ .notifier_call = uncore_pm_notify,
+};
+
+#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
+
+static const struct x86_cpu_id intel_uncore_cpu_ids[] = {
+ ICPU(INTEL_FAM6_BROADWELL_G),
+ ICPU(INTEL_FAM6_BROADWELL_X),
+ ICPU(INTEL_FAM6_BROADWELL_D),
+ ICPU(INTEL_FAM6_SKYLAKE_X),
+ ICPU(INTEL_FAM6_ICELAKE_X),
+ ICPU(INTEL_FAM6_ICELAKE_D),
+ {}
+};
+
+static int __init intel_uncore_init(void)
+{
+ const struct x86_cpu_id *id;
+ int ret;
+
+ id = x86_match_cpu(intel_uncore_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+ uncore_max_entries = topology_max_packages() *
+ topology_max_die_per_package();
+ uncore_instances = kcalloc(uncore_max_entries,
+ sizeof(*uncore_instances), GFP_KERNEL);
+ if (!uncore_instances)
+ return -ENOMEM;
+
+ ret = kobject_init_and_add(&uncore_root_kobj, &uncore_root_ktype,
+ &cpu_subsys.dev_root->kobj,
+ "intel_uncore_frequency");
+ if (ret)
+ goto err_free;
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "platform/x86/uncore-freq:online",
+ uncore_event_cpu_online,
+ uncore_event_cpu_offline);
+ if (ret < 0)
+ goto err_rem_kobj;
+
+ uncore_hp_state = ret;
+
+ ret = register_pm_notifier(&uncore_pm_nb);
+ if (ret)
+ goto err_rem_state;
+
+ return 0;
+
+err_rem_state:
+ cpuhp_remove_state(uncore_hp_state);
+err_rem_kobj:
+ kobject_put(&uncore_root_kobj);
+err_free:
+ kfree(uncore_instances);
+
+ return ret;
+}
+module_init(intel_uncore_init)
+
+static void __exit intel_uncore_exit(void)
+{
+ int i;
+
+ unregister_pm_notifier(&uncore_pm_nb);
+ cpuhp_remove_state(uncore_hp_state);
+ for (i = 0; i < uncore_max_entries; ++i) {
+ if (uncore_instances[i].valid)
+ kobject_put(&uncore_instances[i].kobj);
+ }
+ kobject_put(&uncore_root_kobj);
+ kfree(uncore_instances);
+}
+module_exit(intel_uncore_exit)
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Uncore Frequency Limits Driver");
diff --git a/drivers/platform/x86/intel_atomisp2_pm.c b/drivers/platform/x86/intel_atomisp2_pm.c
index b0f421fea2a5..805fc0d8515c 100644
--- a/drivers/platform/x86/intel_atomisp2_pm.c
+++ b/drivers/platform/x86/intel_atomisp2_pm.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Dummy driver for Intel's Image Signal Processor found on Bay and Cherry
- * Trail devices. The sole purpose of this driver is to allow the ISP to
- * be put in D3.
+ * Dummy driver for Intel's Image Signal Processor found on Bay Trail
+ * and Cherry Trail devices. The sole purpose of this driver is to allow
+ * the ISP to be put in D3.
*
* Copyright (C) 2018 Hans de Goede <hdegoede@redhat.com>
*
@@ -36,8 +36,7 @@
static int isp_set_power(struct pci_dev *dev, bool enable)
{
unsigned long timeout;
- u32 val = enable ? ISPSSPM0_IUNIT_POWER_ON :
- ISPSSPM0_IUNIT_POWER_OFF;
+ u32 val = enable ? ISPSSPM0_IUNIT_POWER_ON : ISPSSPM0_IUNIT_POWER_OFF;
/* Write to ISPSSPM0 bit[1:0] to power on/off the IUNIT */
iosf_mbi_modify(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0,
@@ -45,29 +44,25 @@ static int isp_set_power(struct pci_dev *dev, bool enable)
/*
* There should be no IUNIT access while power-down is
- * in progress HW sighting: 4567865
+ * in progress. HW sighting: 4567865.
* Wait up to 50 ms for the IUNIT to shut down.
* And we do the same for power on.
*/
timeout = jiffies + msecs_to_jiffies(50);
- while (1) {
+ do {
u32 tmp;
/* Wait until ISPSSPM0 bit[25:24] shows the right value */
iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0, &tmp);
tmp = (tmp & ISPSSPM0_ISPSSS_MASK) >> ISPSSPM0_ISPSSS_OFFSET;
if (tmp == val)
- break;
+ return 0;
- if (time_after(jiffies, timeout)) {
- dev_err(&dev->dev, "IUNIT power-%s timeout.\n",
- enable ? "on" : "off");
- return -EBUSY;
- }
usleep_range(1000, 2000);
- }
+ } while (time_before(jiffies, timeout));
- return 0;
+ dev_err(&dev->dev, "IUNIT power-%s timeout.\n", enable ? "on" : "off");
+ return -EBUSY;
}
static int isp_probe(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/platform/x86/intel_cht_int33fe_typec.c b/drivers/platform/x86/intel_cht_int33fe_typec.c
index 2d097fc2dd46..04138215956b 100644
--- a/drivers/platform/x86/intel_cht_int33fe_typec.c
+++ b/drivers/platform/x86/intel_cht_int33fe_typec.c
@@ -36,30 +36,6 @@ enum {
INT33FE_NODE_MAX,
};
-static const struct software_node nodes[];
-
-static const struct software_node_ref_args pi3usb30532_ref = {
- &nodes[INT33FE_NODE_PI3USB30532]
-};
-
-static const struct software_node_ref_args dp_ref = {
- &nodes[INT33FE_NODE_DISPLAYPORT]
-};
-
-static struct software_node_ref_args mux_ref;
-
-static const struct software_node_reference usb_connector_refs[] = {
- { "orientation-switch", 1, &pi3usb30532_ref},
- { "mode-switch", 1, &pi3usb30532_ref},
- { "displayport", 1, &dp_ref},
- { }
-};
-
-static const struct software_node_reference fusb302_refs[] = {
- { "usb-role-switch", 1, &mux_ref},
- { }
-};
-
/*
* Grrr I severly dislike buggy BIOS-es. At least one BIOS enumerates
* the max17047 both through the INT33FE ACPI device (it is right there
@@ -95,8 +71,18 @@ static const struct property_entry max17047_props[] = {
{ }
};
+/*
+ * We are not using inline property here because those are constant,
+ * and we need to adjust this one at runtime to point to real
+ * software node.
+ */
+static struct software_node_ref_args fusb302_mux_refs[] = {
+ { .node = NULL },
+};
+
static const struct property_entry fusb302_props[] = {
PROPERTY_ENTRY_STRING("linux,extcon-name", "cht_wcove_pwrsrc"),
+ PROPERTY_ENTRY_REF_ARRAY("usb-role-switch", fusb302_mux_refs),
{ }
};
@@ -112,6 +98,8 @@ static const u32 snk_pdo[] = {
PDO_VAR(5000, 12000, 3000),
};
+static const struct software_node nodes[];
+
static const struct property_entry usb_connector_props[] = {
PROPERTY_ENTRY_STRING("data-role", "dual"),
PROPERTY_ENTRY_STRING("power-role", "dual"),
@@ -119,15 +107,21 @@ static const struct property_entry usb_connector_props[] = {
PROPERTY_ENTRY_U32_ARRAY("source-pdos", src_pdo),
PROPERTY_ENTRY_U32_ARRAY("sink-pdos", snk_pdo),
PROPERTY_ENTRY_U32("op-sink-microwatt", 2500000),
+ PROPERTY_ENTRY_REF("orientation-switch",
+ &nodes[INT33FE_NODE_PI3USB30532]),
+ PROPERTY_ENTRY_REF("mode-switch",
+ &nodes[INT33FE_NODE_PI3USB30532]),
+ PROPERTY_ENTRY_REF("displayport",
+ &nodes[INT33FE_NODE_DISPLAYPORT]),
{ }
};
static const struct software_node nodes[] = {
- { "fusb302", NULL, fusb302_props, fusb302_refs },
+ { "fusb302", NULL, fusb302_props },
{ "max17047", NULL, max17047_props },
{ "pi3usb30532" },
{ "displayport" },
- { "connector", &nodes[0], usb_connector_props, usb_connector_refs },
+ { "connector", &nodes[0], usb_connector_props },
{ }
};
@@ -163,9 +157,10 @@ static void cht_int33fe_remove_nodes(struct cht_int33fe_data *data)
{
software_node_unregister_nodes(nodes);
- if (mux_ref.node) {
- fwnode_handle_put(software_node_fwnode(mux_ref.node));
- mux_ref.node = NULL;
+ if (fusb302_mux_refs[0].node) {
+ fwnode_handle_put(
+ software_node_fwnode(fusb302_mux_refs[0].node));
+ fusb302_mux_refs[0].node = NULL;
}
if (data->dp) {
@@ -177,25 +172,31 @@ static void cht_int33fe_remove_nodes(struct cht_int33fe_data *data)
static int cht_int33fe_add_nodes(struct cht_int33fe_data *data)
{
+ const struct software_node *mux_ref_node;
int ret;
- ret = software_node_register_nodes(nodes);
- if (ret)
- return ret;
-
- /* The devices that are not created in this driver need extra steps. */
-
/*
* There is no ACPI device node for the USB role mux, so we need to wait
* until the mux driver has created software node for the mux device.
* It means we depend on the mux driver. This function will return
* -EPROBE_DEFER until the mux device is registered.
*/
- mux_ref.node = software_node_find_by_name(NULL, "intel-xhci-usb-sw");
- if (!mux_ref.node) {
- ret = -EPROBE_DEFER;
- goto err_remove_nodes;
- }
+ mux_ref_node = software_node_find_by_name(NULL, "intel-xhci-usb-sw");
+ if (!mux_ref_node)
+ return -EPROBE_DEFER;
+
+ /*
+ * Update node used in "usb-role-switch" property. Note that we
+ * rely on software_node_register_nodes() to use the original
+ * instance of properties instead of copying them.
+ */
+ fusb302_mux_refs[0].node = mux_ref_node;
+
+ ret = software_node_register_nodes(nodes);
+ if (ret)
+ return ret;
+
+ /* The devices that are not created in this driver need extra steps. */
/*
* The DP connector does have ACPI device node. In this case we can just
diff --git a/drivers/platform/x86/intel_ips.h b/drivers/platform/x86/intel_ips.h
index 512ad234ad0d..35ed9711c7b9 100644
--- a/drivers/platform/x86/intel_ips.h
+++ b/drivers/platform/x86/intel_ips.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2010 Intel Corporation
*/
diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c
index b102f6dd5693..101d7e791a13 100644
--- a/drivers/platform/x86/intel_menlow.c
+++ b/drivers/platform/x86/intel_menlow.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <linux/thermal.h>
#include <linux/types.h>
+#include <linux/units.h>
MODULE_AUTHOR("Thomas Sujith");
MODULE_AUTHOR("Zhang Rui");
@@ -302,8 +303,10 @@ static ssize_t aux_show(struct device *dev, struct device_attribute *dev_attr,
int result;
result = sensor_get_auxtrip(attr->handle, idx, &value);
+ if (result)
+ return result;
- return result ? result : sprintf(buf, "%lu", DECI_KELVIN_TO_CELSIUS(value));
+ return sprintf(buf, "%lu", deci_kelvin_to_celsius(value));
}
static ssize_t aux0_show(struct device *dev,
@@ -332,8 +335,8 @@ static ssize_t aux_store(struct device *dev, struct device_attribute *dev_attr,
if (value < 0)
return -EINVAL;
- result = sensor_set_auxtrip(attr->handle, idx,
- CELSIUS_TO_DECI_KELVIN(value));
+ result = sensor_set_auxtrip(attr->handle, idx,
+ celsius_to_deci_kelvin(value));
return result ? result : count;
}
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
index 292bace83f1e..6f436836fe50 100644
--- a/drivers/platform/x86/intel_mid_powerbtn.c
+++ b/drivers/platform/x86/intel_mid_powerbtn.c
@@ -146,9 +146,10 @@ static int mid_pb_probe(struct platform_device *pdev)
input_set_capability(input, EV_KEY, KEY_POWER);
- ddata = (struct mid_pb_ddata *)id->driver_data;
+ ddata = devm_kmemdup(&pdev->dev, (void *)id->driver_data,
+ sizeof(*ddata), GFP_KERNEL);
if (!ddata)
- return -ENODATA;
+ return -ENOMEM;
ddata->dev = &pdev->dev;
ddata->irq = irq;
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
index 571b4754477c..144faa8bad3d 100644
--- a/drivers/platform/x86/intel_pmc_core.c
+++ b/drivers/platform/x86/intel_pmc_core.c
@@ -49,7 +49,7 @@ static const struct pmc_bit_map spt_pll_map[] = {
{"GEN2 USB2PCIE2 PLL", SPT_PMC_BIT_MPHY_CMN_LANE1},
{"DMIPCIE3 PLL", SPT_PMC_BIT_MPHY_CMN_LANE2},
{"SATA PLL", SPT_PMC_BIT_MPHY_CMN_LANE3},
- {},
+ {}
};
static const struct pmc_bit_map spt_mphy_map[] = {
@@ -69,7 +69,7 @@ static const struct pmc_bit_map spt_mphy_map[] = {
{"MPHY CORE LANE 13", SPT_PMC_BIT_MPHY_LANE13},
{"MPHY CORE LANE 14", SPT_PMC_BIT_MPHY_LANE14},
{"MPHY CORE LANE 15", SPT_PMC_BIT_MPHY_LANE15},
- {},
+ {}
};
static const struct pmc_bit_map spt_pfear_map[] = {
@@ -113,7 +113,12 @@ static const struct pmc_bit_map spt_pfear_map[] = {
{"CSME_SMS1", SPT_PMC_BIT_CSME_SMS1},
{"CSME_RTC", SPT_PMC_BIT_CSME_RTC},
{"CSME_PSF", SPT_PMC_BIT_CSME_PSF},
- {},
+ {}
+};
+
+static const struct pmc_bit_map *ext_spt_pfear_map[] = {
+ spt_pfear_map,
+ NULL
};
static const struct pmc_bit_map spt_ltr_show_map[] = {
@@ -142,7 +147,7 @@ static const struct pmc_bit_map spt_ltr_show_map[] = {
};
static const struct pmc_reg_map spt_reg_map = {
- .pfear_sts = spt_pfear_map,
+ .pfear_sts = ext_spt_pfear_map,
.mphy_sts = spt_mphy_map,
.pll_sts = spt_pll_map,
.ltr_show_sts = spt_ltr_show_map,
@@ -186,7 +191,10 @@ static const struct pmc_bit_map cnp_pfear_map[] = {
{"SDX", BIT(4)},
{"SPE", BIT(5)},
{"Fuse", BIT(6)},
- /* Reserved for Cannon Lake but valid for Ice Lake and Comet Lake */
+ /*
+ * Reserved for Cannon Lake but valid for Ice Lake, Comet Lake,
+ * Tiger Lake and Elkhart Lake.
+ */
{"SBR8", BIT(7)},
{"CSME_FSC", BIT(0)},
@@ -230,11 +238,22 @@ static const struct pmc_bit_map cnp_pfear_map[] = {
{"HDA_PGD4", BIT(2)},
{"HDA_PGD5", BIT(3)},
{"HDA_PGD6", BIT(4)},
- /* Reserved for Cannon Lake but valid for Ice Lake and Comet Lake */
+ /*
+ * Reserved for Cannon Lake but valid for Ice Lake, Comet Lake,
+ * Tiger Lake and ELkhart Lake.
+ */
{"PSF6", BIT(5)},
{"PSF7", BIT(6)},
{"PSF8", BIT(7)},
+ {}
+};
+
+static const struct pmc_bit_map *ext_cnp_pfear_map[] = {
+ cnp_pfear_map,
+ NULL
+};
+static const struct pmc_bit_map icl_pfear_map[] = {
/* Ice Lake generation onwards only */
{"RES_65", BIT(0)},
{"RES_66", BIT(1)},
@@ -247,6 +266,30 @@ static const struct pmc_bit_map cnp_pfear_map[] = {
{}
};
+static const struct pmc_bit_map *ext_icl_pfear_map[] = {
+ cnp_pfear_map,
+ icl_pfear_map,
+ NULL
+};
+
+static const struct pmc_bit_map tgl_pfear_map[] = {
+ /* Tiger Lake and Elkhart Lake generation onwards only */
+ {"PSF9", BIT(0)},
+ {"RES_66", BIT(1)},
+ {"RES_67", BIT(2)},
+ {"RES_68", BIT(3)},
+ {"RES_69", BIT(4)},
+ {"RES_70", BIT(5)},
+ {"TBTLSX", BIT(6)},
+ {}
+};
+
+static const struct pmc_bit_map *ext_tgl_pfear_map[] = {
+ cnp_pfear_map,
+ tgl_pfear_map,
+ NULL
+};
+
static const struct pmc_bit_map cnp_slps0_dbg0_map[] = {
{"AUDIO_D3", BIT(0)},
{"OTG_D3", BIT(1)},
@@ -300,7 +343,7 @@ static const struct pmc_bit_map *cnp_slps0_dbg_maps[] = {
cnp_slps0_dbg0_map,
cnp_slps0_dbg1_map,
cnp_slps0_dbg2_map,
- NULL,
+ NULL
};
static const struct pmc_bit_map cnp_ltr_show_map[] = {
@@ -334,7 +377,7 @@ static const struct pmc_bit_map cnp_ltr_show_map[] = {
};
static const struct pmc_reg_map cnp_reg_map = {
- .pfear_sts = cnp_pfear_map,
+ .pfear_sts = ext_cnp_pfear_map,
.slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
.slps0_dbg_maps = cnp_slps0_dbg_maps,
.ltr_show_sts = cnp_ltr_show_map,
@@ -350,7 +393,7 @@ static const struct pmc_reg_map cnp_reg_map = {
};
static const struct pmc_reg_map icl_reg_map = {
- .pfear_sts = cnp_pfear_map,
+ .pfear_sts = ext_icl_pfear_map,
.slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
.slps0_dbg_maps = cnp_slps0_dbg_maps,
.ltr_show_sts = cnp_ltr_show_map,
@@ -365,18 +408,29 @@ static const struct pmc_reg_map icl_reg_map = {
.ltr_ignore_max = ICL_NUM_IP_IGN_ALLOWED,
};
-static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset)
-{
- return readb(pmcdev->regbase + offset);
-}
+static const struct pmc_reg_map tgl_reg_map = {
+ .pfear_sts = ext_tgl_pfear_map,
+ .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
+ .slps0_dbg_maps = cnp_slps0_dbg_maps,
+ .ltr_show_sts = cnp_ltr_show_map,
+ .msr_sts = msr_map,
+ .slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET,
+ .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
+ .regmap_length = CNP_PMC_MMIO_REG_LEN,
+ .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
+ .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES,
+ .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
+ .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
+ .ltr_ignore_max = TGL_NUM_IP_IGN_ALLOWED,
+};
static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset)
{
return readl(pmcdev->regbase + reg_offset);
}
-static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int
- reg_offset, u32 val)
+static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int reg_offset,
+ u32 val)
{
writel(val, pmcdev->regbase + reg_offset);
}
@@ -412,20 +466,25 @@ static int pmc_core_check_read_lock_bit(void)
#if IS_ENABLED(CONFIG_DEBUG_FS)
static bool slps0_dbg_latch;
-static void pmc_core_display_map(struct seq_file *s, int index,
- u8 pf_reg, const struct pmc_bit_map *pf_map)
+static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset)
+{
+ return readb(pmcdev->regbase + offset);
+}
+
+static void pmc_core_display_map(struct seq_file *s, int index, int idx, int ip,
+ u8 pf_reg, const struct pmc_bit_map **pf_map)
{
seq_printf(s, "PCH IP: %-2d - %-32s\tState: %s\n",
- index, pf_map[index].name,
- pf_map[index].bit_mask & pf_reg ? "Off" : "On");
+ ip, pf_map[idx][index].name,
+ pf_map[idx][index].bit_mask & pf_reg ? "Off" : "On");
}
static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
- const struct pmc_bit_map *map = pmcdev->map->pfear_sts;
+ const struct pmc_bit_map **maps = pmcdev->map->pfear_sts;
u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES];
- int index, iter;
+ int index, iter, idx, ip = 0;
iter = pmcdev->map->ppfear0_offset;
@@ -433,9 +492,12 @@ static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++)
pf_regs[index] = pmc_core_reg_read_byte(pmcdev, iter);
- for (index = 0; map[index].name &&
- index < pmcdev->map->ppfear_buckets * 8; index++)
- pmc_core_display_map(s, index, pf_regs[index / 8], map);
+ for (idx = 0; maps[idx]; idx++) {
+ for (index = 0; maps[idx][index].name &&
+ index < pmcdev->map->ppfear_buckets * 8; ip++, index++)
+ pmc_core_display_map(s, index, idx, ip,
+ pf_regs[index / 8], maps);
+ }
return 0;
}
@@ -561,21 +623,22 @@ out_unlock:
}
DEFINE_SHOW_ATTRIBUTE(pmc_core_pll);
-static ssize_t pmc_core_ltr_ignore_write(struct file *file, const char __user
-*userbuf, size_t count, loff_t *ppos)
+static ssize_t pmc_core_ltr_ignore_write(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
{
struct pmc_dev *pmcdev = &pmc;
const struct pmc_reg_map *map = pmcdev->map;
u32 val, buf_size, fd;
- int err = 0;
+ int err;
buf_size = count < 64 ? count : 64;
- mutex_lock(&pmcdev->lock);
- if (kstrtou32_from_user(userbuf, buf_size, 10, &val)) {
- err = -EFAULT;
- goto out_unlock;
- }
+ err = kstrtou32_from_user(userbuf, buf_size, 10, &val);
+ if (err)
+ return err;
+
+ mutex_lock(&pmcdev->lock);
if (val > map->ltr_ignore_max) {
err = -EINVAL;
@@ -767,8 +830,9 @@ static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
debugfs_create_file("slp_s0_residency_usec", 0444, dir, pmcdev,
&pmc_core_dev_state);
- debugfs_create_file("pch_ip_power_gating_status", 0444, dir, pmcdev,
- &pmc_core_ppfear_fops);
+ if (pmcdev->map->pfear_sts)
+ debugfs_create_file("pch_ip_power_gating_status", 0444, dir,
+ pmcdev, &pmc_core_ppfear_fops);
debugfs_create_file("ltr_ignore", 0644, dir, pmcdev,
&pmc_core_ltr_ignore_ops);
@@ -816,19 +880,22 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map),
INTEL_CPU_FAM6(COMETLAKE, cnp_reg_map),
INTEL_CPU_FAM6(COMETLAKE_L, cnp_reg_map),
+ INTEL_CPU_FAM6(TIGERLAKE_L, tgl_reg_map),
+ INTEL_CPU_FAM6(TIGERLAKE, tgl_reg_map),
+ INTEL_CPU_FAM6(ATOM_TREMONT, tgl_reg_map),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_ids);
static const struct pci_device_id pmc_pci_ids[] = {
- { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID), 0},
- { 0, },
+ { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID) },
+ { }
};
/*
* This quirk can be used on those platforms where
- * the platform BIOS enforces 24Mhx Crystal to shutdown
+ * the platform BIOS enforces 24Mhz crystal to shutdown
* before PMC can assert SLP_S0#.
*/
static int quirk_xtal_ignore(const struct dmi_system_id *id)
diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h
index fdee5772e532..f1a0792b3f91 100644
--- a/drivers/platform/x86/intel_pmc_core.h
+++ b/drivers/platform/x86/intel_pmc_core.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Intel Core SoC Power Management Controller Header File
*
@@ -186,6 +186,8 @@ enum ppfear_regs {
#define ICL_NUM_IP_IGN_ALLOWED 20
#define ICL_PMC_LTR_WIGIG 0x1BFC
+#define TGL_NUM_IP_IGN_ALLOWED 22
+
struct pmc_bit_map {
const char *name;
u32 bit_mask;
@@ -213,7 +215,7 @@ struct pmc_bit_map {
* captures them to have a common implementation.
*/
struct pmc_reg_map {
- const struct pmc_bit_map *pfear_sts;
+ const struct pmc_bit_map **pfear_sts;
const struct pmc_bit_map *mphy_sts;
const struct pmc_bit_map *pll_sts;
const struct pmc_bit_map **slps0_dbg_maps;
diff --git a/drivers/platform/x86/intel_pmc_core_pltdrv.c b/drivers/platform/x86/intel_pmc_core_pltdrv.c
index 6fe829f30997..e1266f5c6359 100644
--- a/drivers/platform/x86/intel_pmc_core_pltdrv.c
+++ b/drivers/platform/x86/intel_pmc_core_pltdrv.c
@@ -44,6 +44,8 @@ static const struct x86_cpu_id intel_pmc_core_platform_ids[] = {
INTEL_CPU_FAM6(KABYLAKE, pmc_core_device),
INTEL_CPU_FAM6(CANNONLAKE_L, pmc_core_device),
INTEL_CPU_FAM6(ICELAKE_L, pmc_core_device),
+ INTEL_CPU_FAM6(COMETLAKE, pmc_core_device),
+ INTEL_CPU_FAM6(COMETLAKE_L, pmc_core_device),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_platform_ids);
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
index 5c1da2bb1435..2433bf73f1ed 100644
--- a/drivers/platform/x86/intel_pmc_ipc.c
+++ b/drivers/platform/x86/intel_pmc_ipc.c
@@ -12,23 +12,13 @@
*/
#include <linux/acpi.h>
-#include <linux/atomic.h>
-#include <linux/bitops.h>
#include <linux/delay.h>
-#include <linux/device.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/io-64-nonatomic-lo-hi.h>
-#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/notifier.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
-#include <linux/pm.h>
-#include <linux/pm_qos.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/suspend.h>
#include <asm/intel_pmc_ipc.h>
@@ -184,11 +174,6 @@ static inline void ipc_data_writel(u32 data, u32 offset)
writel(data, ipcdev.ipc_base + IPC_WRITE_BUFFER + offset);
}
-static inline u8 __maybe_unused ipc_data_readb(u32 offset)
-{
- return readb(ipcdev.ipc_base + IPC_READ_BUFFER + offset);
-}
-
static inline u32 ipc_data_readl(u32 offset)
{
return readl(ipcdev.ipc_base + IPC_READ_BUFFER + offset);
@@ -211,35 +196,6 @@ static inline int is_gcr_valid(u32 offset)
}
/**
- * intel_pmc_gcr_read() - Read a 32-bit PMC GCR register
- * @offset: offset of GCR register from GCR address base
- * @data: data pointer for storing the register output
- *
- * Reads the 32-bit PMC GCR register at given offset.
- *
- * Return: negative value on error or 0 on success.
- */
-int intel_pmc_gcr_read(u32 offset, u32 *data)
-{
- int ret;
-
- spin_lock(&ipcdev.gcr_lock);
-
- ret = is_gcr_valid(offset);
- if (ret < 0) {
- spin_unlock(&ipcdev.gcr_lock);
- return ret;
- }
-
- *data = readl(ipcdev.gcr_mem_base + offset);
-
- spin_unlock(&ipcdev.gcr_lock);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(intel_pmc_gcr_read);
-
-/**
* intel_pmc_gcr_read64() - Read a 64-bit PMC GCR register
* @offset: offset of GCR register from GCR address base
* @data: data pointer for storing the register output
@@ -269,36 +225,6 @@ int intel_pmc_gcr_read64(u32 offset, u64 *data)
EXPORT_SYMBOL_GPL(intel_pmc_gcr_read64);
/**
- * intel_pmc_gcr_write() - Write PMC GCR register
- * @offset: offset of GCR register from GCR address base
- * @data: register update value
- *
- * Writes the PMC GCR register of given offset with given
- * value.
- *
- * Return: negative value on error or 0 on success.
- */
-int intel_pmc_gcr_write(u32 offset, u32 data)
-{
- int ret;
-
- spin_lock(&ipcdev.gcr_lock);
-
- ret = is_gcr_valid(offset);
- if (ret < 0) {
- spin_unlock(&ipcdev.gcr_lock);
- return ret;
- }
-
- writel(data, ipcdev.gcr_mem_base + offset);
-
- spin_unlock(&ipcdev.gcr_lock);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(intel_pmc_gcr_write);
-
-/**
* intel_pmc_gcr_update() - Update PMC GCR register bits
* @offset: offset of GCR register from GCR address base
* @mask: bit mask for update operation
@@ -309,7 +235,7 @@ EXPORT_SYMBOL_GPL(intel_pmc_gcr_write);
*
* Return: negative value on error or 0 on success.
*/
-int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val)
+static int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val)
{
u32 new_val;
int ret = 0;
@@ -339,7 +265,6 @@ gcr_ipc_unlock:
spin_unlock(&ipcdev.gcr_lock);
return ret;
}
-EXPORT_SYMBOL_GPL(intel_pmc_gcr_update);
static int update_no_reboot_bit(void *priv, bool set)
{
@@ -405,7 +330,7 @@ static int intel_pmc_ipc_check_status(void)
*
* Return: an IPC error code or 0 on success.
*/
-int intel_pmc_ipc_simple_command(int cmd, int sub)
+static int intel_pmc_ipc_simple_command(int cmd, int sub)
{
int ret;
@@ -420,7 +345,6 @@ int intel_pmc_ipc_simple_command(int cmd, int sub)
return ret;
}
-EXPORT_SYMBOL_GPL(intel_pmc_ipc_simple_command);
/**
* intel_pmc_ipc_raw_cmd() - IPC command with data and pointers
@@ -437,8 +361,8 @@ EXPORT_SYMBOL_GPL(intel_pmc_ipc_simple_command);
*
* Return: an IPC error code or 0 on success.
*/
-int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out,
- u32 outlen, u32 dptr, u32 sptr)
+static int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out,
+ u32 outlen, u32 dptr, u32 sptr)
{
u32 wbuf[4] = { 0 };
int ret;
@@ -470,7 +394,6 @@ int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out,
return ret;
}
-EXPORT_SYMBOL_GPL(intel_pmc_ipc_raw_cmd);
/**
* intel_pmc_ipc_command() - IPC command with input/output data
@@ -579,6 +502,7 @@ static ssize_t intel_pmc_ipc_simple_cmd_store(struct device *dev,
}
return (ssize_t)count;
}
+static DEVICE_ATTR(simplecmd, 0200, NULL, intel_pmc_ipc_simple_cmd_store);
static ssize_t intel_pmc_ipc_northpeak_store(struct device *dev,
struct device_attribute *attr,
@@ -588,8 +512,9 @@ static ssize_t intel_pmc_ipc_northpeak_store(struct device *dev,
int subcmd;
int ret;
- if (kstrtoul(buf, 0, &val))
- return -EINVAL;
+ ret = kstrtoul(buf, 0, &val);
+ if (ret)
+ return ret;
if (val)
subcmd = 1;
@@ -602,11 +527,7 @@ static ssize_t intel_pmc_ipc_northpeak_store(struct device *dev,
}
return (ssize_t)count;
}
-
-static DEVICE_ATTR(simplecmd, S_IWUSR,
- NULL, intel_pmc_ipc_simple_cmd_store);
-static DEVICE_ATTR(northpeak, S_IWUSR,
- NULL, intel_pmc_ipc_northpeak_store);
+static DEVICE_ATTR(northpeak, 0200, NULL, intel_pmc_ipc_northpeak_store);
static struct attribute *intel_ipc_attrs[] = {
&dev_attr_northpeak.attr,
@@ -618,6 +539,11 @@ static const struct attribute_group intel_ipc_group = {
.attrs = intel_ipc_attrs,
};
+static const struct attribute_group *intel_ipc_groups[] = {
+ &intel_ipc_group,
+ NULL
+};
+
static struct resource punit_res_array[] = {
/* Punit BIOS */
{
@@ -958,18 +884,10 @@ static int ipc_plat_probe(struct platform_device *pdev)
goto err_irq;
}
- ret = sysfs_create_group(&pdev->dev.kobj, &intel_ipc_group);
- if (ret) {
- dev_err(&pdev->dev, "Failed to create sysfs group %d\n",
- ret);
- goto err_sys;
- }
-
ipcdev.has_gcr_regs = true;
return 0;
-err_sys:
- devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev);
+
err_irq:
platform_device_unregister(ipcdev.tco_dev);
platform_device_unregister(ipcdev.punit_dev);
@@ -980,7 +898,6 @@ err_irq:
static int ipc_plat_remove(struct platform_device *pdev)
{
- sysfs_remove_group(&pdev->dev.kobj, &intel_ipc_group);
devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev);
platform_device_unregister(ipcdev.tco_dev);
platform_device_unregister(ipcdev.punit_dev);
@@ -995,6 +912,7 @@ static struct platform_driver ipc_plat_driver = {
.driver = {
.name = "pmc-ipc-plat",
.acpi_match_table = ACPI_PTR(ipc_acpi_ids),
+ .dev_groups = intel_ipc_groups,
},
};
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index cdab916fbf92..3d7da5266136 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -26,11 +26,7 @@
#include <asm/intel_scu_ipc.h>
/* IPC defines the following message types */
-#define IPCMSG_WATCHDOG_TIMER 0xF8 /* Set Kernel Watchdog Threshold */
-#define IPCMSG_BATTERY 0xEF /* Coulomb Counter Accumulator */
-#define IPCMSG_FW_UPDATE 0xFE /* Firmware update */
-#define IPCMSG_PCNTRL 0xFF /* Power controller unit read/write */
-#define IPCMSG_FW_REVISION 0xF4 /* Get firmware revision */
+#define IPCMSG_PCNTRL 0xff /* Power controller unit read/write */
/* Command id associated with message IPCMSG_PCNTRL */
#define IPC_CMD_PCNTRL_W 0 /* Register write */
@@ -58,56 +54,29 @@
#define IPC_RWBUF_SIZE 20 /* IPC Read buffer Size */
#define IPC_IOC 0x100 /* IPC command register IOC bit */
-#define PCI_DEVICE_ID_LINCROFT 0x082a
-#define PCI_DEVICE_ID_PENWELL 0x080e
-#define PCI_DEVICE_ID_CLOVERVIEW 0x08ea
-#define PCI_DEVICE_ID_TANGIER 0x11a0
-
-/* intel scu ipc driver data */
-struct intel_scu_ipc_pdata_t {
- u32 i2c_base;
- u32 i2c_len;
- u8 irq_mode;
-};
-
-static const struct intel_scu_ipc_pdata_t intel_scu_ipc_lincroft_pdata = {
- .i2c_base = 0xff12b000,
- .i2c_len = 0x10,
- .irq_mode = 0,
-};
-
-/* Penwell and Cloverview */
-static const struct intel_scu_ipc_pdata_t intel_scu_ipc_penwell_pdata = {
- .i2c_base = 0xff12b000,
- .i2c_len = 0x10,
- .irq_mode = 1,
-};
-
-static const struct intel_scu_ipc_pdata_t intel_scu_ipc_tangier_pdata = {
- .i2c_base = 0xff00d000,
- .i2c_len = 0x10,
- .irq_mode = 0,
-};
-
struct intel_scu_ipc_dev {
struct device *dev;
void __iomem *ipc_base;
- void __iomem *i2c_base;
struct completion cmd_complete;
u8 irq_mode;
};
static struct intel_scu_ipc_dev ipcdev; /* Only one for now */
+#define IPC_STATUS 0x04
+#define IPC_STATUS_IRQ BIT(2)
+#define IPC_STATUS_ERR BIT(1)
+#define IPC_STATUS_BUSY BIT(0)
+
/*
- * IPC Read Buffer (Read Only):
- * 16 byte buffer for receiving data from SCU, if IPC command
- * processing results in response data
+ * IPC Write/Read Buffers:
+ * 16 byte buffer for sending and receiving data to and from SCU.
*/
+#define IPC_WRITE_BUFFER 0x80
#define IPC_READ_BUFFER 0x90
-#define IPC_I2C_CNTRL_ADDR 0
-#define I2C_DATA_ADDR 0x04
+/* Timeout in jiffies */
+#define IPC_TIMEOUT (3 * HZ)
static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
@@ -120,11 +89,8 @@ static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
*/
static inline void ipc_command(struct intel_scu_ipc_dev *scu, u32 cmd)
{
- if (scu->irq_mode) {
- reinit_completion(&scu->cmd_complete);
- writel(cmd | IPC_IOC, scu->ipc_base);
- }
- writel(cmd, scu->ipc_base);
+ reinit_completion(&scu->cmd_complete);
+ writel(cmd | IPC_IOC, scu->ipc_base);
}
/*
@@ -135,7 +101,7 @@ static inline void ipc_command(struct intel_scu_ipc_dev *scu, u32 cmd)
*/
static inline void ipc_data_writel(struct intel_scu_ipc_dev *scu, u32 data, u32 offset)
{
- writel(data, scu->ipc_base + 0x80 + offset);
+ writel(data, scu->ipc_base + IPC_WRITE_BUFFER + offset);
}
/*
@@ -147,7 +113,7 @@ static inline void ipc_data_writel(struct intel_scu_ipc_dev *scu, u32 data, u32
*/
static inline u8 ipc_read_status(struct intel_scu_ipc_dev *scu)
{
- return __raw_readl(scu->ipc_base + 0x04);
+ return __raw_readl(scu->ipc_base + IPC_STATUS);
}
/* Read ipc byte data */
@@ -165,24 +131,20 @@ static inline u32 ipc_data_readl(struct intel_scu_ipc_dev *scu, u32 offset)
/* Wait till scu status is busy */
static inline int busy_loop(struct intel_scu_ipc_dev *scu)
{
- u32 status = ipc_read_status(scu);
- u32 loop_count = 100000;
+ unsigned long end = jiffies + msecs_to_jiffies(IPC_TIMEOUT);
- /* break if scu doesn't reset busy bit after huge retry */
- while ((status & BIT(0)) && --loop_count) {
- udelay(1); /* scu processing time is in few u secods */
- status = ipc_read_status(scu);
- }
+ do {
+ u32 status;
- if (status & BIT(0)) {
- dev_err(scu->dev, "IPC timed out");
- return -ETIMEDOUT;
- }
+ status = ipc_read_status(scu);
+ if (!(status & IPC_STATUS_BUSY))
+ return (status & IPC_STATUS_ERR) ? -EIO : 0;
- if (status & BIT(1))
- return -EIO;
+ usleep_range(50, 100);
+ } while (time_before(jiffies, end));
- return 0;
+ dev_err(scu->dev, "IPC timed out");
+ return -ETIMEDOUT;
}
/* Wait till ipc ioc interrupt is received or timeout in 3 HZ */
@@ -190,13 +152,13 @@ static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
{
int status;
- if (!wait_for_completion_timeout(&scu->cmd_complete, 3 * HZ)) {
+ if (!wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT)) {
dev_err(scu->dev, "IPC timed out\n");
return -ETIMEDOUT;
}
status = ipc_read_status(scu);
- if (status & BIT(1))
+ if (status & IPC_STATUS_ERR)
return -EIO;
return 0;
@@ -260,14 +222,14 @@ static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
}
/**
- * intel_scu_ipc_ioread8 - read a word via the SCU
- * @addr: register on SCU
- * @data: return pointer for read byte
+ * intel_scu_ipc_ioread8 - read a word via the SCU
+ * @addr: Register on SCU
+ * @data: Return pointer for read byte
*
- * Read a single register. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
+ * Read a single register. Returns %0 on success or an error code. All
+ * locking between SCU accesses is handled for the caller.
*
- * This function may sleep.
+ * This function may sleep.
*/
int intel_scu_ipc_ioread8(u16 addr, u8 *data)
{
@@ -276,48 +238,14 @@ int intel_scu_ipc_ioread8(u16 addr, u8 *data)
EXPORT_SYMBOL(intel_scu_ipc_ioread8);
/**
- * intel_scu_ipc_ioread16 - read a word via the SCU
- * @addr: register on SCU
- * @data: return pointer for read word
- *
- * Read a register pair. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
- *
- * This function may sleep.
- */
-int intel_scu_ipc_ioread16(u16 addr, u16 *data)
-{
- u16 x[2] = {addr, addr + 1};
- return pwr_reg_rdwr(x, (u8 *)data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
-}
-EXPORT_SYMBOL(intel_scu_ipc_ioread16);
-
-/**
- * intel_scu_ipc_ioread32 - read a dword via the SCU
- * @addr: register on SCU
- * @data: return pointer for read dword
- *
- * Read four registers. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
- *
- * This function may sleep.
- */
-int intel_scu_ipc_ioread32(u16 addr, u32 *data)
-{
- u16 x[4] = {addr, addr + 1, addr + 2, addr + 3};
- return pwr_reg_rdwr(x, (u8 *)data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
-}
-EXPORT_SYMBOL(intel_scu_ipc_ioread32);
-
-/**
- * intel_scu_ipc_iowrite8 - write a byte via the SCU
- * @addr: register on SCU
- * @data: byte to write
+ * intel_scu_ipc_iowrite8 - write a byte via the SCU
+ * @addr: Register on SCU
+ * @data: Byte to write
*
- * Write a single register. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
+ * Write a single register. Returns %0 on success or an error code. All
+ * locking between SCU accesses is handled for the caller.
*
- * This function may sleep.
+ * This function may sleep.
*/
int intel_scu_ipc_iowrite8(u16 addr, u8 data)
{
@@ -326,51 +254,17 @@ int intel_scu_ipc_iowrite8(u16 addr, u8 data)
EXPORT_SYMBOL(intel_scu_ipc_iowrite8);
/**
- * intel_scu_ipc_iowrite16 - write a word via the SCU
- * @addr: register on SCU
- * @data: word to write
- *
- * Write two registers. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
+ * intel_scu_ipc_readvv - read a set of registers
+ * @addr: Register list
+ * @data: Bytes to return
+ * @len: Length of array
*
- * This function may sleep.
- */
-int intel_scu_ipc_iowrite16(u16 addr, u16 data)
-{
- u16 x[2] = {addr, addr + 1};
- return pwr_reg_rdwr(x, (u8 *)&data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
-}
-EXPORT_SYMBOL(intel_scu_ipc_iowrite16);
-
-/**
- * intel_scu_ipc_iowrite32 - write a dword via the SCU
- * @addr: register on SCU
- * @data: dword to write
+ * Read registers. Returns %0 on success or an error code. All locking
+ * between SCU accesses is handled for the caller.
*
- * Write four registers. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
+ * The largest array length permitted by the hardware is 5 items.
*
- * This function may sleep.
- */
-int intel_scu_ipc_iowrite32(u16 addr, u32 data)
-{
- u16 x[4] = {addr, addr + 1, addr + 2, addr + 3};
- return pwr_reg_rdwr(x, (u8 *)&data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
-}
-EXPORT_SYMBOL(intel_scu_ipc_iowrite32);
-
-/**
- * intel_scu_ipc_readvv - read a set of registers
- * @addr: register list
- * @data: bytes to return
- * @len: length of array
- *
- * Read registers. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
- *
- * The largest array length permitted by the hardware is 5 items.
- *
- * This function may sleep.
+ * This function may sleep.
*/
int intel_scu_ipc_readv(u16 *addr, u8 *data, int len)
{
@@ -379,18 +273,17 @@ int intel_scu_ipc_readv(u16 *addr, u8 *data, int len)
EXPORT_SYMBOL(intel_scu_ipc_readv);
/**
- * intel_scu_ipc_writev - write a set of registers
- * @addr: register list
- * @data: bytes to write
- * @len: length of array
- *
- * Write registers. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
+ * intel_scu_ipc_writev - write a set of registers
+ * @addr: Register list
+ * @data: Bytes to write
+ * @len: Length of array
*
- * The largest array length permitted by the hardware is 5 items.
+ * Write registers. Returns %0 on success or an error code. All locking
+ * between SCU accesses is handled for the caller.
*
- * This function may sleep.
+ * The largest array length permitted by the hardware is 5 items.
*
+ * This function may sleep.
*/
int intel_scu_ipc_writev(u16 *addr, u8 *data, int len)
{
@@ -399,19 +292,18 @@ int intel_scu_ipc_writev(u16 *addr, u8 *data, int len)
EXPORT_SYMBOL(intel_scu_ipc_writev);
/**
- * intel_scu_ipc_update_register - r/m/w a register
- * @addr: register address
- * @bits: bits to update
- * @mask: mask of bits to update
- *
- * Read-modify-write power control unit register. The first data argument
- * must be register value and second is mask value
- * mask is a bitmap that indicates which bits to update.
- * 0 = masked. Don't modify this bit, 1 = modify this bit.
- * returns 0 on success or an error code.
- *
- * This function may sleep. Locking between SCU accesses is handled
- * for the caller.
+ * intel_scu_ipc_update_register - r/m/w a register
+ * @addr: Register address
+ * @bits: Bits to update
+ * @mask: Mask of bits to update
+ *
+ * Read-modify-write power control unit register. The first data argument
+ * must be register value and second is mask value mask is a bitmap that
+ * indicates which bits to update. %0 = masked. Don't modify this bit, %1 =
+ * modify this bit. returns %0 on success or an error code.
+ *
+ * This function may sleep. Locking between SCU accesses is handled
+ * for the caller.
*/
int intel_scu_ipc_update_register(u16 addr, u8 bits, u8 mask)
{
@@ -421,16 +313,16 @@ int intel_scu_ipc_update_register(u16 addr, u8 bits, u8 mask)
EXPORT_SYMBOL(intel_scu_ipc_update_register);
/**
- * intel_scu_ipc_simple_command - send a simple command
- * @cmd: command
- * @sub: sub type
+ * intel_scu_ipc_simple_command - send a simple command
+ * @cmd: Command
+ * @sub: Sub type
*
- * Issue a simple command to the SCU. Do not use this interface if
- * you must then access data as any data values may be overwritten
- * by another SCU access by the time this function returns.
+ * Issue a simple command to the SCU. Do not use this interface if you must
+ * then access data as any data values may be overwritten by another SCU
+ * access by the time this function returns.
*
- * This function may sleep. Locking for SCU accesses is handled for
- * the caller.
+ * This function may sleep. Locking for SCU accesses is handled for the
+ * caller.
*/
int intel_scu_ipc_simple_command(int cmd, int sub)
{
@@ -450,16 +342,16 @@ int intel_scu_ipc_simple_command(int cmd, int sub)
EXPORT_SYMBOL(intel_scu_ipc_simple_command);
/**
- * intel_scu_ipc_command - command with data
- * @cmd: command
- * @sub: sub type
- * @in: input data
- * @inlen: input length in dwords
- * @out: output data
- * @outlein: output length in dwords
- *
- * Issue a command to the SCU which involves data transfers. Do the
- * data copies under the lock but leave it for the caller to interpret
+ * intel_scu_ipc_command - command with data
+ * @cmd: Command
+ * @sub: Sub type
+ * @in: Input data
+ * @inlen: Input length in dwords
+ * @out: Output data
+ * @outlen: Output length in dwords
+ *
+ * Issue a command to the SCU which involves data transfers. Do the
+ * data copies under the lock but leave it for the caller to interpret.
*/
int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
u32 *out, int outlen)
@@ -489,117 +381,6 @@ int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
}
EXPORT_SYMBOL(intel_scu_ipc_command);
-#define IPC_SPTR 0x08
-#define IPC_DPTR 0x0C
-
-/**
- * intel_scu_ipc_raw_command() - IPC command with data and pointers
- * @cmd: IPC command code.
- * @sub: IPC command sub type.
- * @in: input data of this IPC command.
- * @inlen: input data length in dwords.
- * @out: output data of this IPC command.
- * @outlen: output data length in dwords.
- * @sptr: data writing to SPTR register.
- * @dptr: data writing to DPTR register.
- *
- * Send an IPC command to SCU with input/output data and source/dest pointers.
- *
- * Return: an IPC error code or 0 on success.
- */
-int intel_scu_ipc_raw_command(int cmd, int sub, u8 *in, int inlen,
- u32 *out, int outlen, u32 dptr, u32 sptr)
-{
- struct intel_scu_ipc_dev *scu = &ipcdev;
- int inbuflen = DIV_ROUND_UP(inlen, 4);
- u32 inbuf[4];
- int i, err;
-
- /* Up to 16 bytes */
- if (inbuflen > 4)
- return -EINVAL;
-
- mutex_lock(&ipclock);
- if (scu->dev == NULL) {
- mutex_unlock(&ipclock);
- return -ENODEV;
- }
-
- writel(dptr, scu->ipc_base + IPC_DPTR);
- writel(sptr, scu->ipc_base + IPC_SPTR);
-
- /*
- * SRAM controller doesn't support 8-bit writes, it only
- * supports 32-bit writes, so we have to copy input data into
- * the temporary buffer, and SCU FW will use the inlen to
- * determine the actual input data length in the temporary
- * buffer.
- */
- memcpy(inbuf, in, inlen);
-
- for (i = 0; i < inbuflen; i++)
- ipc_data_writel(scu, inbuf[i], 4 * i);
-
- ipc_command(scu, (inlen << 16) | (sub << 12) | cmd);
- err = intel_scu_ipc_check_status(scu);
- if (!err) {
- for (i = 0; i < outlen; i++)
- *out++ = ipc_data_readl(scu, 4 * i);
- }
-
- mutex_unlock(&ipclock);
- return err;
-}
-EXPORT_SYMBOL_GPL(intel_scu_ipc_raw_command);
-
-/* I2C commands */
-#define IPC_I2C_WRITE 1 /* I2C Write command */
-#define IPC_I2C_READ 2 /* I2C Read command */
-
-/**
- * intel_scu_ipc_i2c_cntrl - I2C read/write operations
- * @addr: I2C address + command bits
- * @data: data to read/write
- *
- * Perform an an I2C read/write operation via the SCU. All locking is
- * handled for the caller. This function may sleep.
- *
- * Returns an error code or 0 on success.
- *
- * This has to be in the IPC driver for the locking.
- */
-int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data)
-{
- struct intel_scu_ipc_dev *scu = &ipcdev;
- u32 cmd = 0;
-
- mutex_lock(&ipclock);
- if (scu->dev == NULL) {
- mutex_unlock(&ipclock);
- return -ENODEV;
- }
- cmd = (addr >> 24) & 0xFF;
- if (cmd == IPC_I2C_READ) {
- writel(addr, scu->i2c_base + IPC_I2C_CNTRL_ADDR);
- /* Write not getting updated without delay */
- usleep_range(1000, 2000);
- *data = readl(scu->i2c_base + I2C_DATA_ADDR);
- } else if (cmd == IPC_I2C_WRITE) {
- writel(*data, scu->i2c_base + I2C_DATA_ADDR);
- usleep_range(1000, 2000);
- writel(addr, scu->i2c_base + IPC_I2C_CNTRL_ADDR);
- } else {
- dev_err(scu->dev,
- "intel_scu_ipc: I2C INVALID_CMD = 0x%x\n", cmd);
-
- mutex_unlock(&ipclock);
- return -EIO;
- }
- mutex_unlock(&ipclock);
- return 0;
-}
-EXPORT_SYMBOL(intel_scu_ipc_i2c_cntrl);
-
/*
* Interrupt handler gets called when ioc bit of IPC_COMMAND_REG set to 1
* When ioc bit is set to 1, caller api must wait for interrupt handler called
@@ -610,9 +391,10 @@ EXPORT_SYMBOL(intel_scu_ipc_i2c_cntrl);
static irqreturn_t ioc(int irq, void *dev_id)
{
struct intel_scu_ipc_dev *scu = dev_id;
+ int status = ipc_read_status(scu);
- if (scu->irq_mode)
- complete(&scu->cmd_complete);
+ writel(status | IPC_STATUS_IRQ, scu->ipc_base + IPC_STATUS);
+ complete(&scu->cmd_complete);
return IRQ_HANDLED;
}
@@ -629,17 +411,10 @@ static int ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int err;
struct intel_scu_ipc_dev *scu = &ipcdev;
- struct intel_scu_ipc_pdata_t *pdata;
if (scu->dev) /* We support only one SCU */
return -EBUSY;
- pdata = (struct intel_scu_ipc_pdata_t *)id->driver_data;
- if (!pdata)
- return -ENODEV;
-
- scu->irq_mode = pdata->irq_mode;
-
err = pcim_enable_device(pdev);
if (err)
return err;
@@ -652,10 +427,6 @@ static int ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
scu->ipc_base = pcim_iomap_table(pdev)[0];
- scu->i2c_base = ioremap_nocache(pdata->i2c_base, pdata->i2c_len);
- if (!scu->i2c_base)
- return -ENOMEM;
-
err = devm_request_irq(&pdev->dev, pdev->irq, ioc, 0, "intel_scu_ipc",
scu);
if (err)
@@ -670,13 +441,10 @@ static int ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
}
-#define SCU_DEVICE(id, pdata) {PCI_VDEVICE(INTEL, id), (kernel_ulong_t)&pdata}
-
static const struct pci_device_id pci_ids[] = {
- SCU_DEVICE(PCI_DEVICE_ID_LINCROFT, intel_scu_ipc_lincroft_pdata),
- SCU_DEVICE(PCI_DEVICE_ID_PENWELL, intel_scu_ipc_penwell_pdata),
- SCU_DEVICE(PCI_DEVICE_ID_CLOVERVIEW, intel_scu_ipc_penwell_pdata),
- SCU_DEVICE(PCI_DEVICE_ID_TANGIER, intel_scu_ipc_tangier_pdata),
+ { PCI_VDEVICE(INTEL, 0x080e) },
+ { PCI_VDEVICE(INTEL, 0x08ea) },
+ { PCI_VDEVICE(INTEL, 0x11a0) },
{}
};
diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_common.c b/drivers/platform/x86/intel_speed_select_if/isst_if_common.c
index 3de5a3c66529..0c2aa22c7a12 100644
--- a/drivers/platform/x86/intel_speed_select_if/isst_if_common.c
+++ b/drivers/platform/x86/intel_speed_select_if/isst_if_common.c
@@ -50,6 +50,8 @@ static const struct isst_valid_cmd_ranges isst_valid_cmds[] = {
{0x7F, 0x00, 0x0B},
{0x7F, 0x10, 0x12},
{0x7F, 0x20, 0x23},
+ {0x94, 0x03, 0x03},
+ {0x95, 0x03, 0x03},
};
static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = {
@@ -59,6 +61,7 @@ static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = {
{0xD0, 0x03, 0x08},
{0x7F, 0x02, 0x00},
{0x7F, 0x08, 0x00},
+ {0x95, 0x03, 0x03},
};
struct isst_cmd {
diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c
index e84d3e983e0c..8e3fb55ac1ae 100644
--- a/drivers/platform/x86/intel_telemetry_debugfs.c
+++ b/drivers/platform/x86/intel_telemetry_debugfs.c
@@ -686,13 +686,14 @@ static ssize_t telem_pss_trc_verb_write(struct file *file,
u32 verbosity;
int err;
- if (kstrtou32_from_user(userbuf, count, 0, &verbosity))
- return -EFAULT;
+ err = kstrtou32_from_user(userbuf, count, 0, &verbosity);
+ if (err)
+ return err;
err = telemetry_set_trace_verbosity(TELEM_PSS, verbosity);
if (err) {
pr_err("Changing PSS Trace Verbosity Failed. Error %d\n", err);
- count = err;
+ return err;
}
return count;
@@ -733,13 +734,14 @@ static ssize_t telem_ioss_trc_verb_write(struct file *file,
u32 verbosity;
int err;
- if (kstrtou32_from_user(userbuf, count, 0, &verbosity))
- return -EFAULT;
+ err = kstrtou32_from_user(userbuf, count, 0, &verbosity);
+ if (err)
+ return err;
err = telemetry_set_trace_verbosity(TELEM_IOSS, verbosity);
if (err) {
pr_err("Changing IOSS Trace Verbosity Failed. Error %d\n", err);
- count = err;
+ return err;
}
return count;
diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c
index df8565bad595..c4c742bb23cf 100644
--- a/drivers/platform/x86/intel_telemetry_pltdrv.c
+++ b/drivers/platform/x86/intel_telemetry_pltdrv.c
@@ -1117,9 +1117,9 @@ static const struct telemetry_core_ops telm_pltops = {
static int telemetry_pltdrv_probe(struct platform_device *pdev)
{
- struct resource *res0 = NULL, *res1 = NULL;
const struct x86_cpu_id *id;
- int size, ret = -ENOMEM;
+ void __iomem *mem;
+ int ret;
id = x86_match_cpu(telemetry_cpu_ids);
if (!id)
@@ -1127,50 +1127,17 @@ static int telemetry_pltdrv_probe(struct platform_device *pdev)
telm_conf = (struct telemetry_plt_config *)id->driver_data;
- res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res0) {
- ret = -EINVAL;
- goto out;
- }
- size = resource_size(res0);
- if (!devm_request_mem_region(&pdev->dev, res0->start, size,
- pdev->name)) {
- ret = -EBUSY;
- goto out;
- }
- telm_conf->pss_config.ssram_base_addr = res0->start;
- telm_conf->pss_config.ssram_size = size;
+ mem = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
- res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res1) {
- ret = -EINVAL;
- goto out;
- }
- size = resource_size(res1);
- if (!devm_request_mem_region(&pdev->dev, res1->start, size,
- pdev->name)) {
- ret = -EBUSY;
- goto out;
- }
+ telm_conf->pss_config.regmap = mem;
- telm_conf->ioss_config.ssram_base_addr = res1->start;
- telm_conf->ioss_config.ssram_size = size;
+ mem = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
- telm_conf->pss_config.regmap = ioremap_nocache(
- telm_conf->pss_config.ssram_base_addr,
- telm_conf->pss_config.ssram_size);
- if (!telm_conf->pss_config.regmap) {
- ret = -ENOMEM;
- goto out;
- }
-
- telm_conf->ioss_config.regmap = ioremap_nocache(
- telm_conf->ioss_config.ssram_base_addr,
- telm_conf->ioss_config.ssram_size);
- if (!telm_conf->ioss_config.regmap) {
- ret = -ENOMEM;
- goto out;
- }
+ telm_conf->ioss_config.regmap = mem;
mutex_init(&telm_conf->telem_lock);
mutex_init(&telm_conf->telem_trace_lock);
@@ -1188,14 +1155,6 @@ static int telemetry_pltdrv_probe(struct platform_device *pdev)
return 0;
out:
- if (res0)
- release_mem_region(res0->start, resource_size(res0));
- if (res1)
- release_mem_region(res1->start, resource_size(res1));
- if (telm_conf->pss_config.regmap)
- iounmap(telm_conf->pss_config.regmap);
- if (telm_conf->ioss_config.regmap)
- iounmap(telm_conf->ioss_config.regmap);
dev_err(&pdev->dev, "TELEMETRY Setup Failed.\n");
return ret;
@@ -1204,9 +1163,6 @@ out:
static int telemetry_pltdrv_remove(struct platform_device *pdev)
{
telemetry_clear_pltdata();
- iounmap(telm_conf->pss_config.regmap);
- iounmap(telm_conf->ioss_config.regmap);
-
return 0;
}
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index 8fe51e43f1bc..c27548fd386a 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -35,6 +35,8 @@
#define MLXPLAT_CPLD_LPC_REG_LED4_OFFSET 0x23
#define MLXPLAT_CPLD_LPC_REG_LED5_OFFSET 0x24
#define MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION 0x2a
+#define MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET 0x2b
+#define MLXPLAT_CPLD_LPC_REG_GP0_OFFSET 0x2e
#define MLXPLAT_CPLD_LPC_REG_GP1_OFFSET 0x30
#define MLXPLAT_CPLD_LPC_REG_WP1_OFFSET 0x31
#define MLXPLAT_CPLD_LPC_REG_GP2_OFFSET 0x32
@@ -46,6 +48,8 @@
#define MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET 0x41
#define MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET 0x42
#define MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET 0x43
+#define MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET 0x44
+#define MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET 0x45
#define MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET 0x50
#define MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET 0x51
#define MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET 0x52
@@ -68,6 +72,7 @@
#define MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET 0xd1
#define MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET 0xd2
#define MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET 0xd3
+#define MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET 0xe2
#define MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET 0xe3
#define MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET 0xe4
#define MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET 0xe5
@@ -85,9 +90,13 @@
#define MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET 0xf6
#define MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET 0xf7
#define MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET 0xf8
+#define MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET 0xf9
+#define MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET 0xfb
+#define MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET 0xfc
#define MLXPLAT_CPLD_LPC_IO_RANGE 0x100
#define MLXPLAT_CPLD_LPC_I2C_CH1_OFF 0xdb
#define MLXPLAT_CPLD_LPC_I2C_CH2_OFF 0xda
+#define MLXPLAT_CPLD_LPC_I2C_CH3_OFF 0xdc
#define MLXPLAT_CPLD_LPC_PIO_OFFSET 0x10000UL
#define MLXPLAT_CPLD_LPC_REG1 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
@@ -96,6 +105,9 @@
#define MLXPLAT_CPLD_LPC_REG2 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
MLXPLAT_CPLD_LPC_I2C_CH2_OFF) | \
MLXPLAT_CPLD_LPC_PIO_OFFSET)
+#define MLXPLAT_CPLD_LPC_REG3 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
+ MLXPLAT_CPLD_LPC_I2C_CH3_OFF) | \
+ MLXPLAT_CPLD_LPC_PIO_OFFSET)
/* Masks for aggregation, psu, pwr and fan event in CPLD related registers. */
#define MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF 0x04
@@ -112,17 +124,29 @@
#define MLXPLAT_CPLD_LOW_AGGR_MASK_I2C BIT(6)
#define MLXPLAT_CPLD_PSU_MASK GENMASK(1, 0)
#define MLXPLAT_CPLD_PWR_MASK GENMASK(1, 0)
+#define MLXPLAT_CPLD_PSU_EXT_MASK GENMASK(3, 0)
+#define MLXPLAT_CPLD_PWR_EXT_MASK GENMASK(3, 0)
#define MLXPLAT_CPLD_FAN_MASK GENMASK(3, 0)
#define MLXPLAT_CPLD_ASIC_MASK GENMASK(1, 0)
#define MLXPLAT_CPLD_FAN_NG_MASK GENMASK(5, 0)
#define MLXPLAT_CPLD_LED_LO_NIBBLE_MASK GENMASK(7, 4)
#define MLXPLAT_CPLD_LED_HI_NIBBLE_MASK GENMASK(3, 0)
+#define MLXPLAT_CPLD_VOLTREG_UPD_MASK GENMASK(5, 4)
+#define MLXPLAT_CPLD_I2C_CAP_BIT 0x04
+#define MLXPLAT_CPLD_I2C_CAP_MASK GENMASK(5, MLXPLAT_CPLD_I2C_CAP_BIT)
+
+/* Masks for aggregation for comex carriers */
+#define MLXPLAT_CPLD_AGGR_MASK_CARRIER BIT(1)
+#define MLXPLAT_CPLD_AGGR_MASK_CARR_DEF (MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF | \
+ MLXPLAT_CPLD_AGGR_MASK_CARRIER)
+#define MLXPLAT_CPLD_LOW_AGGRCX_MASK 0xc1
/* Default I2C parent bus number */
#define MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR 1
/* Maximum number of possible physical buses equipped on system */
#define MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM 16
+#define MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM 24
/* Number of channels in group */
#define MLXPLAT_CPLD_GRP_CHNL_NUM 8
@@ -130,14 +154,16 @@
/* Start channel numbers */
#define MLXPLAT_CPLD_CH1 2
#define MLXPLAT_CPLD_CH2 10
+#define MLXPLAT_CPLD_CH3 18
/* Number of LPC attached MUX platform devices */
-#define MLXPLAT_CPLD_LPC_MUX_DEVS 2
+#define MLXPLAT_CPLD_LPC_MUX_DEVS 3
/* Hotplug devices adapter numbers */
#define MLXPLAT_CPLD_NR_NONE -1
#define MLXPLAT_CPLD_PSU_DEFAULT_NR 10
#define MLXPLAT_CPLD_PSU_MSNXXXX_NR 4
+#define MLXPLAT_CPLD_PSU_MSNXXXX_NR2 3
#define MLXPLAT_CPLD_FAN1_DEFAULT_NR 11
#define MLXPLAT_CPLD_FAN2_DEFAULT_NR 12
#define MLXPLAT_CPLD_FAN3_DEFAULT_NR 13
@@ -187,8 +213,24 @@ static const struct resource mlxplat_lpc_resources[] = {
IORESOURCE_IO),
};
+/* Platform i2c next generation systems data */
+static struct mlxreg_core_data mlxplat_mlxcpld_i2c_ng_items_data[] = {
+ {
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .mask = MLXPLAT_CPLD_I2C_CAP_MASK,
+ .bit = MLXPLAT_CPLD_I2C_CAP_BIT,
+ },
+};
+
+static struct mlxreg_core_item mlxplat_mlxcpld_i2c_ng_items[] = {
+ {
+ .data = mlxplat_mlxcpld_i2c_ng_items_data,
+ },
+};
+
/* Platform next generation systems i2c data */
static struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_i2c_ng_data = {
+ .items = mlxplat_mlxcpld_i2c_ng_items,
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_COMEX,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET,
@@ -213,7 +255,7 @@ static const int mlxplat_default_channels[][MLXPLAT_CPLD_GRP_CHNL_NUM] = {
static const int mlxplat_msn21xx_channels[] = { 1, 2, 3, 4, 5, 6, 7, 8 };
/* Platform mux data */
-static struct i2c_mux_reg_platform_data mlxplat_mux_data[] = {
+static struct i2c_mux_reg_platform_data mlxplat_default_mux_data[] = {
{
.parent = 1,
.base_nr = MLXPLAT_CPLD_CH1,
@@ -233,6 +275,40 @@ static struct i2c_mux_reg_platform_data mlxplat_mux_data[] = {
};
+/* Platform mux configuration variables */
+static int mlxplat_max_adap_num;
+static int mlxplat_mux_num;
+static struct i2c_mux_reg_platform_data *mlxplat_mux_data;
+
+/* Platform extended mux data */
+static struct i2c_mux_reg_platform_data mlxplat_extended_mux_data[] = {
+ {
+ .parent = 1,
+ .base_nr = MLXPLAT_CPLD_CH1,
+ .write_only = 1,
+ .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG1,
+ .reg_size = 1,
+ .idle_in_use = 1,
+ },
+ {
+ .parent = 1,
+ .base_nr = MLXPLAT_CPLD_CH2,
+ .write_only = 1,
+ .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG3,
+ .reg_size = 1,
+ .idle_in_use = 1,
+ },
+ {
+ .parent = 1,
+ .base_nr = MLXPLAT_CPLD_CH3,
+ .write_only = 1,
+ .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG2,
+ .reg_size = 1,
+ .idle_in_use = 1,
+ },
+
+};
+
/* Platform hotplug devices */
static struct i2c_board_info mlxplat_mlxcpld_psu[] = {
{
@@ -276,6 +352,22 @@ static struct i2c_board_info mlxplat_mlxcpld_fan[] = {
},
};
+/* Platform hotplug comex carrier system family data */
+static struct mlxreg_core_data mlxplat_mlxcpld_comex_psu_items_data[] = {
+ {
+ .label = "psu1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(0),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "psu2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(1),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
/* Platform hotplug default data */
static struct mlxreg_core_data mlxplat_mlxcpld_default_psu_items_data[] = {
{
@@ -390,6 +482,45 @@ static struct mlxreg_core_item mlxplat_mlxcpld_default_items[] = {
},
};
+static struct mlxreg_core_item mlxplat_mlxcpld_comex_items[] = {
+ {
+ .data = mlxplat_mlxcpld_comex_psu_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER,
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = MLXPLAT_CPLD_PSU_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_psu),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_pwr_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER,
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = MLXPLAT_CPLD_PWR_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_pwr),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_fan_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER,
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = MLXPLAT_CPLD_FAN_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_fan),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_asic_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
+ .inversed = 0,
+ .health = true,
+ },
+};
+
static
struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_data = {
.items = mlxplat_mlxcpld_default_items,
@@ -400,6 +531,16 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_data = {
.mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
};
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_comex_data = {
+ .items = mlxplat_mlxcpld_comex_items,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_comex_items),
+ .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+ .mask = MLXPLAT_CPLD_AGGR_MASK_CARR_DEF,
+ .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGRCX_MASK,
+};
+
static struct mlxreg_core_data mlxplat_mlxcpld_msn21xx_pwr_items_data[] = {
{
.label = "pwr1",
@@ -723,6 +864,116 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_ng_data = {
.mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
};
+/* Platform hotplug extended system family data */
+static struct mlxreg_core_data mlxplat_mlxcpld_ext_psu_items_data[] = {
+ {
+ .label = "psu1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(0),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "psu2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(1),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "psu3",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(2),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "psu4",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(3),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_ext_pwr_items_data[] = {
+ {
+ .label = "pwr1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(0),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[0],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ },
+ {
+ .label = "pwr2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(1),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[1],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ },
+ {
+ .label = "pwr3",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(2),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[0],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR2,
+ },
+ {
+ .label = "pwr4",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(3),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[1],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR2,
+ },
+};
+
+static struct mlxreg_core_item mlxplat_mlxcpld_ext_items[] = {
+ {
+ .data = mlxplat_mlxcpld_ext_psu_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = MLXPLAT_CPLD_PSU_EXT_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_ext_psu_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_ext_pwr_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = MLXPLAT_CPLD_PWR_EXT_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_ext_pwr_items_data),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_ng_fan_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = MLXPLAT_CPLD_FAN_NG_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_fan_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_asic_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
+ .inversed = 0,
+ .health = true,
+ },
+};
+
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_ext_data = {
+ .items = mlxplat_mlxcpld_ext_items,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_ext_items),
+ .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+ .mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF | MLXPLAT_CPLD_AGGR_MASK_COMEX,
+ .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
+};
+
/* Platform led default data */
static struct mlxreg_core_data mlxplat_mlxcpld_default_led_data[] = {
{
@@ -964,6 +1215,80 @@ static struct mlxreg_core_platform_data mlxplat_default_ng_led_data = {
.counter = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_led_data),
};
+/* Platform led for Comex based 100GbE systems */
+static struct mlxreg_core_data mlxplat_mlxcpld_comex_100G_led_data[] = {
+ {
+ .label = "status:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "status:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK
+ },
+ {
+ .label = "psu:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "psu:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "fan1:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "fan1:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "fan2:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "fan2:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "fan3:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "fan3:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "fan4:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "fan4:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "uid:blue",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED5_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_comex_100G_led_data = {
+ .data = mlxplat_mlxcpld_comex_100G_led_data,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_comex_100G_led_data),
+};
+
/* Platform register access default */
static struct mlxreg_core_data mlxplat_mlxcpld_default_regs_io_data[] = {
{
@@ -1157,6 +1482,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_msn21xx_regs_io_data[] = {
.mode = 0200,
},
{
+ .label = "select_iio",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0644,
+ },
+ {
.label = "asic_health",
.reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
.mask = MLXPLAT_CPLD_ASIC_MASK,
@@ -1245,6 +1576,18 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.mode = 0444,
},
{
+ .label = "reset_platform",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_soc",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
.label = "reset_comex_wd",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(6),
@@ -1263,6 +1606,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.mode = 0444,
},
{
+ .label = "reset_sw_pwr_off",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0444,
+ },
+ {
.label = "reset_comex_thermal",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(3),
@@ -1275,6 +1624,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.mode = 0444,
},
{
+ .label = "reset_ac_pwr_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
+ {
.label = "psu1_on",
.reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
@@ -1317,6 +1672,43 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.bit = GENMASK(7, 0),
.mode = 0444,
},
+ {
+ .label = "voltreg_update_status",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET,
+ .mask = MLXPLAT_CPLD_VOLTREG_UPD_MASK,
+ .bit = 5,
+ .mode = 0444,
+ },
+ {
+ .label = "vpd_wp",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0644,
+ },
+ {
+ .label = "pcie_asic_reset_dis",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0644,
+ },
+ {
+ .label = "config1",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "config2",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "ufm_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
};
static struct mlxreg_core_platform_data mlxplat_default_ng_regs_io_data = {
@@ -1575,6 +1967,7 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_LED3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET:
@@ -1582,6 +1975,7 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET:
@@ -1621,6 +2015,8 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION:
+ case MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET:
@@ -1631,6 +2027,8 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
@@ -1671,6 +2069,10 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET:
return true;
}
return false;
@@ -1692,6 +2094,8 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION:
+ case MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET:
@@ -1700,6 +2104,8 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
@@ -1734,6 +2140,10 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET:
return true;
}
return false;
@@ -1751,6 +2161,19 @@ static const struct reg_default mlxplat_mlxcpld_regmap_ng[] = {
{ MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET, 0x00 },
};
+static const struct reg_default mlxplat_mlxcpld_regmap_comex_default[] = {
+ { MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET,
+ MLXPLAT_CPLD_LOW_AGGRCX_MASK },
+ { MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 },
+};
+
+static const struct reg_default mlxplat_mlxcpld_regmap_ng400[] = {
+ { MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET, 0x00 },
+};
+
struct mlxplat_mlxcpld_regmap_context {
void __iomem *base;
};
@@ -1803,6 +2226,34 @@ static const struct regmap_config mlxplat_mlxcpld_regmap_config_ng = {
.reg_write = mlxplat_mlxcpld_reg_write,
};
+static const struct regmap_config mlxplat_mlxcpld_regmap_config_comex = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 255,
+ .cache_type = REGCACHE_FLAT,
+ .writeable_reg = mlxplat_mlxcpld_writeable_reg,
+ .readable_reg = mlxplat_mlxcpld_readable_reg,
+ .volatile_reg = mlxplat_mlxcpld_volatile_reg,
+ .reg_defaults = mlxplat_mlxcpld_regmap_comex_default,
+ .num_reg_defaults = ARRAY_SIZE(mlxplat_mlxcpld_regmap_comex_default),
+ .reg_read = mlxplat_mlxcpld_reg_read,
+ .reg_write = mlxplat_mlxcpld_reg_write,
+};
+
+static const struct regmap_config mlxplat_mlxcpld_regmap_config_ng400 = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 255,
+ .cache_type = REGCACHE_FLAT,
+ .writeable_reg = mlxplat_mlxcpld_writeable_reg,
+ .readable_reg = mlxplat_mlxcpld_readable_reg,
+ .volatile_reg = mlxplat_mlxcpld_volatile_reg,
+ .reg_defaults = mlxplat_mlxcpld_regmap_ng400,
+ .num_reg_defaults = ARRAY_SIZE(mlxplat_mlxcpld_regmap_ng400),
+ .reg_read = mlxplat_mlxcpld_reg_read,
+ .reg_write = mlxplat_mlxcpld_reg_write,
+};
+
static struct resource mlxplat_mlxcpld_resources[] = {
[0] = DEFINE_RES_IRQ_NAMED(17, "mlxreg-hotplug"),
};
@@ -1821,7 +2272,10 @@ static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi)
{
int i;
- for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
mlxplat_mux_data[i].values = mlxplat_default_channels[i];
mlxplat_mux_data[i].n_values =
ARRAY_SIZE(mlxplat_default_channels[i]);
@@ -1834,13 +2288,16 @@ static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi)
mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
return 1;
-};
+}
static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi)
{
int i;
- for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
mlxplat_mux_data[i].n_values =
ARRAY_SIZE(mlxplat_msn21xx_channels);
@@ -1853,13 +2310,16 @@ static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi)
mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
return 1;
-};
+}
static int __init mlxplat_dmi_msn274x_matched(const struct dmi_system_id *dmi)
{
int i;
- for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
mlxplat_mux_data[i].n_values =
ARRAY_SIZE(mlxplat_msn21xx_channels);
@@ -1872,13 +2332,16 @@ static int __init mlxplat_dmi_msn274x_matched(const struct dmi_system_id *dmi)
mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
return 1;
-};
+}
static int __init mlxplat_dmi_msn201x_matched(const struct dmi_system_id *dmi)
{
int i;
- for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
mlxplat_mux_data[i].n_values =
ARRAY_SIZE(mlxplat_msn21xx_channels);
@@ -1891,13 +2354,16 @@ static int __init mlxplat_dmi_msn201x_matched(const struct dmi_system_id *dmi)
mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
return 1;
-};
+}
static int __init mlxplat_dmi_qmb7xx_matched(const struct dmi_system_id *dmi)
{
int i;
- for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
mlxplat_mux_data[i].n_values =
ARRAY_SIZE(mlxplat_msn21xx_channels);
@@ -1914,7 +2380,57 @@ static int __init mlxplat_dmi_qmb7xx_matched(const struct dmi_system_id *dmi)
mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng;
return 1;
-};
+}
+
+static int __init mlxplat_dmi_comex_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_extended_mux_data);
+ mlxplat_mux_data = mlxplat_extended_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
+ mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
+ mlxplat_mux_data[i].n_values =
+ ARRAY_SIZE(mlxplat_msn21xx_channels);
+ }
+ mlxplat_hotplug = &mlxplat_mlxcpld_comex_data;
+ mlxplat_hotplug->deferred_nr = MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM;
+ mlxplat_led = &mlxplat_comex_100G_led_data;
+ mlxplat_regs_io = &mlxplat_default_ng_regs_io_data;
+ mlxplat_fan = &mlxplat_default_fan_data;
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
+ mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
+ mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_comex;
+
+ return 1;
+}
+
+static int __init mlxplat_dmi_ng400_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
+ mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
+ mlxplat_mux_data[i].n_values =
+ ARRAY_SIZE(mlxplat_msn21xx_channels);
+ }
+ mlxplat_hotplug = &mlxplat_mlxcpld_ext_data;
+ mlxplat_hotplug->deferred_nr =
+ mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
+ mlxplat_led = &mlxplat_default_ng_led_data;
+ mlxplat_regs_io = &mlxplat_default_ng_regs_io_data;
+ mlxplat_fan = &mlxplat_default_fan_data;
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
+ mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
+ mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
+ mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng400;
+
+ return 1;
+}
static const struct dmi_system_id mlxplat_dmi_table[] __initconst = {
{
@@ -1954,6 +2470,18 @@ static const struct dmi_system_id mlxplat_dmi_table[] __initconst = {
},
},
{
+ .callback = mlxplat_dmi_comex_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VMOD0009"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_ng400_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VMOD0010"),
+ },
+ },
+ {
.callback = mlxplat_dmi_msn274x_matched,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
@@ -2043,7 +2571,7 @@ static int mlxplat_mlxcpld_verify_bus_topology(int *nr)
/* Scan adapters from expected id to verify it is free. */
*nr = MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR;
for (i = MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR; i <
- MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM; i++) {
+ mlxplat_max_adap_num; i++) {
search_adap = i2c_get_adapter(i);
if (search_adap) {
i2c_put_adapter(search_adap);
@@ -2057,12 +2585,12 @@ static int mlxplat_mlxcpld_verify_bus_topology(int *nr)
}
/* Return with error if free id for adapter is not found. */
- if (i == MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM)
+ if (i == mlxplat_max_adap_num)
return -ENODEV;
/* Shift adapter ids, since expected parent adapter is not free. */
*nr = i;
- for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ for (i = 0; i < mlxplat_mux_num; i++) {
shift = *nr - mlxplat_mux_data[i].parent;
mlxplat_mux_data[i].parent = *nr;
mlxplat_mux_data[i].base_nr += shift;
@@ -2118,7 +2646,7 @@ static int __init mlxplat_init(void)
if (nr < 0)
goto fail_alloc;
- nr = (nr == MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM) ? -1 : nr;
+ nr = (nr == mlxplat_max_adap_num) ? -1 : nr;
if (mlxplat_i2c)
mlxplat_i2c->regmap = priv->regmap;
priv->pdev_i2c = platform_device_register_resndata(
@@ -2131,7 +2659,7 @@ static int __init mlxplat_init(void)
goto fail_alloc;
}
- for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ for (i = 0; i < mlxplat_mux_num; i++) {
priv->pdev_mux[i] = platform_device_register_resndata(
&priv->pdev_i2c->dev,
"i2c-mux-reg", i, NULL,
@@ -2265,7 +2793,7 @@ static void __exit mlxplat_exit(void)
platform_device_unregister(priv->pdev_led);
platform_device_unregister(priv->pdev_hotplug);
- for (i = ARRAY_SIZE(mlxplat_mux_data) - 1; i >= 0 ; i--)
+ for (i = mlxplat_mux_num - 1; i >= 0 ; i--)
platform_device_unregister(priv->pdev_mux[i]);
platform_device_unregister(priv->pdev_i2c);
diff --git a/drivers/platform/x86/pcengines-apuv2.c b/drivers/platform/x86/pcengines-apuv2.c
index 48b112b4f0b0..9b11ef1a401f 100644
--- a/drivers/platform/x86/pcengines-apuv2.c
+++ b/drivers/platform/x86/pcengines-apuv2.c
@@ -2,7 +2,7 @@
/*
* PC-Engines APUv2/APUv3 board platform driver
- * for gpio buttons and LEDs
+ * for GPIO buttons and LEDs
*
* Copyright (C) 2018 metux IT consult
* Author: Enrico Weigelt <info@metux.net>
@@ -23,10 +23,10 @@
/*
* NOTE: this driver only supports APUv2/3 - not APUv1, as this one
- * has completely different register layouts
+ * has completely different register layouts.
*/
-/* register mappings */
+/* Register mappings */
#define APU2_GPIO_REG_LED1 AMD_FCH_GPIO_REG_GPIO57
#define APU2_GPIO_REG_LED2 AMD_FCH_GPIO_REG_GPIO58
#define APU2_GPIO_REG_LED3 AMD_FCH_GPIO_REG_GPIO59_DEVSLP1
@@ -35,7 +35,7 @@
#define APU2_GPIO_REG_MPCIE2 AMD_FCH_GPIO_REG_GPIO59_DEVSLP0
#define APU2_GPIO_REG_MPCIE3 AMD_FCH_GPIO_REG_GPIO51
-/* order in which the gpio lines are defined in the register list */
+/* Order in which the GPIO lines are defined in the register list */
#define APU2_GPIO_LINE_LED1 0
#define APU2_GPIO_LINE_LED2 1
#define APU2_GPIO_LINE_LED3 2
@@ -44,7 +44,7 @@
#define APU2_GPIO_LINE_MPCIE2 5
#define APU2_GPIO_LINE_MPCIE3 6
-/* gpio device */
+/* GPIO device */
static int apu2_gpio_regs[] = {
[APU2_GPIO_LINE_LED1] = APU2_GPIO_REG_LED1,
@@ -72,7 +72,7 @@ static const struct amd_fch_gpio_pdata board_apu2 = {
.gpio_names = apu2_gpio_names,
};
-/* gpio leds device */
+/* GPIO LEDs device */
static const struct gpio_led apu2_leds[] = {
{ .name = "apu:green:1" },
@@ -95,12 +95,12 @@ static struct gpiod_lookup_table gpios_led_table = {
NULL, 1, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_LED3,
NULL, 2, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_REG_SIMSWAP,
+ GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_SIMSWAP,
NULL, 3, GPIO_ACTIVE_LOW),
}
};
-/* gpio keyboard device */
+/* GPIO keyboard device */
static struct gpio_keys_button apu2_keys_buttons[] = {
{
@@ -129,12 +129,12 @@ static struct gpiod_lookup_table gpios_key_table = {
}
};
-/* board setup */
+/* Board setup */
-/* note: matching works on string prefix, so "apu2" must come before "apu" */
+/* Note: matching works on string prefix, so "apu2" must come before "apu" */
static const struct dmi_system_id apu_gpio_dmi_table[] __initconst = {
- /* APU2 w/ legacy bios < 4.0.8 */
+ /* APU2 w/ legacy BIOS < 4.0.8 */
{
.ident = "apu2",
.matches = {
@@ -143,7 +143,7 @@ static const struct dmi_system_id apu_gpio_dmi_table[] __initconst = {
},
.driver_data = (void *)&board_apu2,
},
- /* APU2 w/ legacy bios >= 4.0.8 */
+ /* APU2 w/ legacy BIOS >= 4.0.8 */
{
.ident = "apu2",
.matches = {
@@ -152,7 +152,7 @@ static const struct dmi_system_id apu_gpio_dmi_table[] __initconst = {
},
.driver_data = (void *)&board_apu2,
},
- /* APU2 w/ maainline bios */
+ /* APU2 w/ mainline BIOS */
{
.ident = "apu2",
.matches = {
@@ -162,7 +162,7 @@ static const struct dmi_system_id apu_gpio_dmi_table[] __initconst = {
.driver_data = (void *)&board_apu2,
},
- /* APU3 w/ legacy bios < 4.0.8 */
+ /* APU3 w/ legacy BIOS < 4.0.8 */
{
.ident = "apu3",
.matches = {
@@ -171,7 +171,7 @@ static const struct dmi_system_id apu_gpio_dmi_table[] __initconst = {
},
.driver_data = (void *)&board_apu2,
},
- /* APU3 w/ legacy bios >= 4.0.8 */
+ /* APU3 w/ legacy BIOS >= 4.0.8 */
{
.ident = "apu3",
.matches = {
@@ -180,7 +180,7 @@ static const struct dmi_system_id apu_gpio_dmi_table[] __initconst = {
},
.driver_data = (void *)&board_apu2,
},
- /* APU3 w/ mainline bios */
+ /* APU3 w/ mainline BIOS */
{
.ident = "apu3",
.matches = {
@@ -189,6 +189,33 @@ static const struct dmi_system_id apu_gpio_dmi_table[] __initconst = {
},
.driver_data = (void *)&board_apu2,
},
+ /* APU4 w/ legacy BIOS < 4.0.8 */
+ {
+ .ident = "apu4",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"),
+ DMI_MATCH(DMI_BOARD_NAME, "APU4")
+ },
+ .driver_data = (void *)&board_apu2,
+ },
+ /* APU4 w/ legacy BIOS >= 4.0.8 */
+ {
+ .ident = "apu4",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"),
+ DMI_MATCH(DMI_BOARD_NAME, "apu4")
+ },
+ .driver_data = (void *)&board_apu2,
+ },
+ /* APU4 w/ mainline BIOS */
+ {
+ .ident = "apu4",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"),
+ DMI_MATCH(DMI_BOARD_NAME, "PC Engines apu4")
+ },
+ .driver_data = (void *)&board_apu2,
+ },
{}
};
@@ -223,7 +250,7 @@ static int __init apu_board_init(void)
id = dmi_first_match(apu_gpio_dmi_table);
if (!id) {
- pr_err("failed to detect apu board via dmi\n");
+ pr_err("failed to detect APU board via DMI\n");
return -ENODEV;
}
@@ -262,7 +289,7 @@ module_init(apu_board_init);
module_exit(apu_board_exit);
MODULE_AUTHOR("Enrico Weigelt, metux IT consult <info@metux.net>");
-MODULE_DESCRIPTION("PC Engines APUv2/APUv3 board GPIO/LED/keys driver");
+MODULE_DESCRIPTION("PC Engines APUv2/APUv3 board GPIO/LEDs/keys driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(dmi, apu_gpio_dmi_table);
MODULE_ALIAS("platform:pcengines-apuv2");
diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
index 07d1b911e72f..3e3c66dfec2e 100644
--- a/drivers/platform/x86/pmc_atom.c
+++ b/drivers/platform/x86/pmc_atom.c
@@ -429,6 +429,14 @@ static const struct dmi_system_id critclk_systems[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "6AV7882-0"),
},
},
+ {
+ .ident = "CONNECT X300",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SIEMENS AG"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "A5E45074588"),
+ },
+ },
+
{ /*sentinel*/ }
};
@@ -481,7 +489,7 @@ static int pmc_setup_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_read_config_dword(pdev, PMC_BASE_ADDR_OFFSET, &pmc->base_addr);
pmc->base_addr &= PMC_BASE_ADDR_MASK;
- pmc->regmap = ioremap_nocache(pmc->base_addr, PMC_MMIO_REG_LEN);
+ pmc->regmap = ioremap(pmc->base_addr, PMC_MMIO_REG_LEN);
if (!pmc->regmap) {
dev_err(&pdev->dev, "error: ioremap failed\n");
return -ENOMEM;
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
index 9b6a93ff41ff..23e40aa2176e 100644
--- a/drivers/platform/x86/samsung-laptop.c
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -1394,7 +1394,7 @@ static int __init samsung_sabi_init(struct samsung_laptop *samsung)
int ret = 0;
int i;
- samsung->f0000_segment = ioremap_nocache(0xf0000, 0xffff);
+ samsung->f0000_segment = ioremap(0xf0000, 0xffff);
if (!samsung->f0000_segment) {
if (debug || force)
pr_err("Can't map the segment at 0xf0000\n");
@@ -1434,7 +1434,7 @@ static int __init samsung_sabi_init(struct samsung_laptop *samsung)
if (debug)
samsung_sabi_infos(samsung, loca, ifaceP);
- samsung->sabi_iface = ioremap_nocache(ifaceP, 16);
+ samsung->sabi_iface = ioremap(ifaceP, 16);
if (!samsung->sabi_iface) {
pr_err("Can't remap %x\n", ifaceP);
ret = -EINVAL;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index da794dcfdd92..8eaadbaf8ffa 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -907,13 +907,12 @@ static ssize_t dispatch_proc_write(struct file *file,
return ret;
}
-static const struct file_operations dispatch_proc_fops = {
- .owner = THIS_MODULE,
- .open = dispatch_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = dispatch_proc_write,
+static const struct proc_ops dispatch_proc_ops = {
+ .proc_open = dispatch_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = dispatch_proc_write,
};
static char *next_cmd(char **cmds)
@@ -9984,7 +9983,7 @@ static int __init ibm_init(struct ibm_init_struct *iibm)
if (ibm->write)
mode |= S_IWUSR;
entry = proc_create_data(ibm->name, mode, proc_dir,
- &dispatch_proc_fops, ibm);
+ &dispatch_proc_ops, ibm);
if (!entry) {
pr_err("unable to create proc entry %s\n", ibm->name);
ret = -ENODEV;
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index a1e6569427c3..808944546739 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -1432,13 +1432,12 @@ static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
return count;
}
-static const struct file_operations lcd_proc_fops = {
- .owner = THIS_MODULE,
- .open = lcd_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = lcd_proc_write,
+static const struct proc_ops lcd_proc_ops = {
+ .proc_open = lcd_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = lcd_proc_write,
};
/* Video-Out */
@@ -1539,13 +1538,12 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf,
return ret ? -EIO : count;
}
-static const struct file_operations video_proc_fops = {
- .owner = THIS_MODULE,
- .open = video_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = video_proc_write,
+static const struct proc_ops video_proc_ops = {
+ .proc_open = video_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = video_proc_write,
};
/* Fan status */
@@ -1617,13 +1615,12 @@ static ssize_t fan_proc_write(struct file *file, const char __user *buf,
return count;
}
-static const struct file_operations fan_proc_fops = {
- .owner = THIS_MODULE,
- .open = fan_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = fan_proc_write,
+static const struct proc_ops fan_proc_ops = {
+ .proc_open = fan_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = fan_proc_write,
};
static int keys_proc_show(struct seq_file *m, void *v)
@@ -1662,13 +1659,12 @@ static ssize_t keys_proc_write(struct file *file, const char __user *buf,
return count;
}
-static const struct file_operations keys_proc_fops = {
- .owner = THIS_MODULE,
- .open = keys_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = keys_proc_write,
+static const struct proc_ops keys_proc_ops = {
+ .proc_open = keys_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = keys_proc_write,
};
static int __maybe_unused version_proc_show(struct seq_file *m, void *v)
@@ -1688,16 +1684,16 @@ static void create_toshiba_proc_entries(struct toshiba_acpi_dev *dev)
{
if (dev->backlight_dev)
proc_create_data("lcd", S_IRUGO | S_IWUSR, toshiba_proc_dir,
- &lcd_proc_fops, dev);
+ &lcd_proc_ops, dev);
if (dev->video_supported)
proc_create_data("video", S_IRUGO | S_IWUSR, toshiba_proc_dir,
- &video_proc_fops, dev);
+ &video_proc_ops, dev);
if (dev->fan_supported)
proc_create_data("fan", S_IRUGO | S_IWUSR, toshiba_proc_dir,
- &fan_proc_fops, dev);
+ &fan_proc_ops, dev);
if (dev->hotkey_dev)
proc_create_data("keys", S_IRUGO | S_IWUSR, toshiba_proc_dir,
- &keys_proc_fops, dev);
+ &keys_proc_ops, dev);
proc_create_single_data("version", S_IRUGO, toshiba_proc_dir,
version_proc_show, dev);
}
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 72205771d03d..93177e6e5ecd 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -219,8 +219,7 @@ static const struct property_entry digma_citi_e200_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1686-digma_citi_e200.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-digma_citi_e200.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -236,8 +235,7 @@ static const struct property_entry gp_electronic_t701_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1680-gp-electronic-t701.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-gp-electronic-t701.fw"),
{ }
};
@@ -382,8 +380,7 @@ static const struct property_entry onda_v80_plus_v3_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1698),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl3676-onda-v80-plus-v3.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v80-plus-v3.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -398,8 +395,7 @@ static const struct property_entry onda_v820w_32g_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1665),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1680-onda-v820w-32g.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-onda-v820w-32g.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -415,8 +411,7 @@ static const struct property_entry onda_v891w_v1_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-y", 8),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1676),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1130),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl3680-onda-v891w-v1.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-onda-v891w-v1.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -433,8 +428,7 @@ static const struct property_entry onda_v891w_v3_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1625),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1135),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl3676-onda-v891w-v3.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v891w-v3.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -450,8 +444,7 @@ static const struct property_entry pipo_w2s_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 880),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1680-pipo-w2s.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-pipo-w2s.fw"),
{ }
};
@@ -460,14 +453,29 @@ static const struct ts_dmi_data pipo_w2s_data = {
.properties = pipo_w2s_props,
};
+static const struct property_entry pipo_w11_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 15),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1984),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1532),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-pipo-w11.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data pipo_w11_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = pipo_w11_props,
+};
+
static const struct property_entry pov_mobii_wintab_p800w_v20_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 32),
PROPERTY_ENTRY_U32("touchscreen-min-y", 16),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1692),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1146),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl3680-pov-mobii-wintab-p800w-v20.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-pov-mobii-wintab-p800w-v20.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -484,8 +492,7 @@ static const struct property_entry pov_mobii_wintab_p800w_v21_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1794),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl3692-pov-mobii-wintab-p800w.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p800w.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -502,8 +509,7 @@ static const struct property_entry pov_mobii_wintab_p1006w_v10_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1984),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1520),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl3692-pov-mobii-wintab-p1006w-v10.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p1006w-v10.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -520,8 +526,7 @@ static const struct property_entry schneider_sct101ctm_props[] = {
PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1680-schneider-sct101ctm.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-schneider-sct101ctm.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -551,8 +556,7 @@ static const struct property_entry teclast_x98plus2_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1686-teclast_x98plus2.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-teclast_x98plus2.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
@@ -566,8 +570,7 @@ static const struct property_entry trekstor_primebook_c11_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1970),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1530),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1680-trekstor-primebook-c11.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primebook-c11.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -581,8 +584,7 @@ static const struct ts_dmi_data trekstor_primebook_c11_data = {
static const struct property_entry trekstor_primebook_c13_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 2624),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1920),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1680-trekstor-primebook-c13.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primebook-c13.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -596,8 +598,7 @@ static const struct ts_dmi_data trekstor_primebook_c13_data = {
static const struct property_entry trekstor_primetab_t13b_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 2500),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1900),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1680-trekstor-primetab-t13b.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primetab-t13b.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
@@ -613,8 +614,7 @@ static const struct property_entry trekstor_surftab_twin_10_1_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1900),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
PROPERTY_ENTRY_U32("touchscreen-inverted-y", 1),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl3670-surftab-twin-10-1-st10432-8.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-surftab-twin-10-1-st10432-8.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
@@ -629,8 +629,7 @@ static const struct property_entry trekstor_surftab_wintron70_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-y", 8),
PROPERTY_ENTRY_U32("touchscreen-size-x", 884),
PROPERTY_ENTRY_U32("touchscreen-size-y", 632),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1686-surftab-wintron70-st70416-6.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-surftab-wintron70-st70416-6.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
@@ -910,6 +909,16 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Pipo W11 */
+ .driver_data = (void *)&pipo_w11_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PIPO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."),
+ /* Above matches are too generic, add bios-ver match */
+ DMI_MATCH(DMI_BIOS_VERSION, "JS-BI-10.6-SF133GR300-GA55B-024-F"),
+ },
+ },
+ {
/* Ployer Momo7w (same hardware as the Trekstor ST70416-6) */
.driver_data = (void *)&trekstor_surftab_wintron70_data,
.matches = {
@@ -1032,8 +1041,7 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
.driver_data = (void *)&trekstor_surftab_wintron70_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TrekStor"),
- DMI_MATCH(DMI_PRODUCT_NAME,
- "SurfTab wintron 7.0 ST70416-6"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SurfTab wintron 7.0 ST70416-6"),
/* Exact match, different versions need different fw */
DMI_MATCH(DMI_BIOS_VERSION, "TREK.G.WI71C.JGBMRBA05"),
},
@@ -1065,7 +1073,7 @@ static void ts_dmi_add_props(struct i2c_client *client)
}
static int ts_dmi_notifier_call(struct notifier_block *nb,
- unsigned long action, void *data)
+ unsigned long action, void *data)
{
struct device *dev = data;
struct i2c_client *client;
diff --git a/drivers/pnp/isapnp/core.c b/drivers/pnp/isapnp/core.c
index 179b737280e1..c43d8ad02529 100644
--- a/drivers/pnp/isapnp/core.c
+++ b/drivers/pnp/isapnp/core.c
@@ -747,34 +747,12 @@ __skip:
}
/*
- * Compute ISA PnP checksum for first eight bytes.
- */
-static unsigned char __init isapnp_checksum(unsigned char *data)
-{
- int i, j;
- unsigned char checksum = 0x6a, bit, b;
-
- for (i = 0; i < 8; i++) {
- b = data[i];
- for (j = 0; j < 8; j++) {
- bit = 0;
- if (b & (1 << j))
- bit = 1;
- checksum =
- ((((checksum ^ (checksum >> 1)) & 0x01) ^ bit) << 7)
- | (checksum >> 1);
- }
- }
- return checksum;
-}
-
-/*
* Build device list for all present ISA PnP devices.
*/
static int __init isapnp_build_device_list(void)
{
int csn;
- unsigned char header[9], checksum;
+ unsigned char header[9];
struct pnp_card *card;
u32 eisa_id;
char id[8];
@@ -784,7 +762,6 @@ static int __init isapnp_build_device_list(void)
for (csn = 1; csn <= isapnp_csn_count; csn++) {
isapnp_wake(csn);
isapnp_peek(header, 9);
- checksum = isapnp_checksum(header);
eisa_id = header[0] | header[1] << 8 |
header[2] << 16 | header[3] << 24;
pnp_eisa_id_to_string(eisa_id, id);
diff --git a/drivers/pnp/isapnp/proc.c b/drivers/pnp/isapnp/proc.c
index 36820979bd1c..785a796430fa 100644
--- a/drivers/pnp/isapnp/proc.c
+++ b/drivers/pnp/isapnp/proc.c
@@ -49,10 +49,9 @@ static ssize_t isapnp_proc_bus_read(struct file *file, char __user * buf,
return nbytes;
}
-static const struct file_operations isapnp_proc_bus_file_operations = {
- .owner = THIS_MODULE,
- .llseek = isapnp_proc_bus_lseek,
- .read = isapnp_proc_bus_read,
+static const struct proc_ops isapnp_proc_bus_proc_ops = {
+ .proc_lseek = isapnp_proc_bus_lseek,
+ .proc_read = isapnp_proc_bus_read,
};
static int isapnp_proc_attach_device(struct pnp_dev *dev)
@@ -69,7 +68,7 @@ static int isapnp_proc_attach_device(struct pnp_dev *dev)
}
sprintf(name, "%02x", dev->number);
e = dev->procent = proc_create_data(name, S_IFREG | S_IRUGO, de,
- &isapnp_proc_bus_file_operations, dev);
+ &isapnp_proc_bus_proc_ops, dev);
if (!e)
return -ENOMEM;
proc_set_size(e, 256);
diff --git a/drivers/pnp/pnpbios/proc.c b/drivers/pnp/pnpbios/proc.c
index fe1c8f5d9af0..a806830e3a40 100644
--- a/drivers/pnp/pnpbios/proc.c
+++ b/drivers/pnp/pnpbios/proc.c
@@ -210,13 +210,12 @@ out:
return ret;
}
-static const struct file_operations pnpbios_proc_fops = {
- .owner = THIS_MODULE,
- .open = pnpbios_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = pnpbios_proc_write,
+static const struct proc_ops pnpbios_proc_ops = {
+ .proc_open = pnpbios_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = pnpbios_proc_write,
};
int pnpbios_interface_attach_device(struct pnp_bios_node *node)
@@ -228,13 +227,13 @@ int pnpbios_interface_attach_device(struct pnp_bios_node *node)
if (!proc_pnp)
return -EIO;
if (!pnpbios_dont_use_current_config) {
- proc_create_data(name, 0644, proc_pnp, &pnpbios_proc_fops,
+ proc_create_data(name, 0644, proc_pnp, &pnpbios_proc_ops,
(void *)(long)(node->handle));
}
if (!proc_pnp_boot)
return -EIO;
- if (proc_create_data(name, 0644, proc_pnp_boot, &pnpbios_proc_fops,
+ if (proc_create_data(name, 0644, proc_pnp_boot, &pnpbios_proc_ops,
(void *)(long)(node->handle + 0x100)))
return 0;
return -EIO;
diff --git a/drivers/power/avs/Kconfig b/drivers/power/avs/Kconfig
index 089b6244b716..cdb4237bfd02 100644
--- a/drivers/power/avs/Kconfig
+++ b/drivers/power/avs/Kconfig
@@ -12,6 +12,22 @@ menuconfig POWER_AVS
Say Y here to enable Adaptive Voltage Scaling class support.
+config QCOM_CPR
+ tristate "QCOM Core Power Reduction (CPR) support"
+ depends on POWER_AVS && HAS_IOMEM
+ select PM_OPP
+ select REGMAP
+ help
+ Say Y here to enable support for the CPR hardware found on Qualcomm
+ SoCs like QCS404.
+
+ This driver populates CPU OPPs tables and makes adjustments to the
+ tables based on feedback from the CPR hardware. If you want to do
+ CPUfrequency scaling say Y here.
+
+ To compile this driver as a module, choose M here: the module will
+ be called qcom-cpr
+
config ROCKCHIP_IODOMAIN
tristate "Rockchip IO domain support"
depends on POWER_AVS && ARCH_ROCKCHIP && OF
diff --git a/drivers/power/avs/Makefile b/drivers/power/avs/Makefile
index a1b8cd453f19..9007d05853e2 100644
--- a/drivers/power/avs/Makefile
+++ b/drivers/power/avs/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_POWER_AVS_OMAP) += smartreflex.o
+obj-$(CONFIG_QCOM_CPR) += qcom-cpr.o
obj-$(CONFIG_ROCKCHIP_IODOMAIN) += rockchip-io-domain.o
diff --git a/drivers/power/avs/qcom-cpr.c b/drivers/power/avs/qcom-cpr.c
new file mode 100644
index 000000000000..bd7c3e48b386
--- /dev/null
+++ b/drivers/power/avs/qcom-cpr.c
@@ -0,0 +1,1794 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019, Linaro Limited
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/debugfs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+#include <linux/nvmem-consumer.h>
+
+/* Register Offsets for RB-CPR and Bit Definitions */
+
+/* RBCPR Version Register */
+#define REG_RBCPR_VERSION 0
+#define RBCPR_VER_2 0x02
+#define FLAGS_IGNORE_1ST_IRQ_STATUS BIT(0)
+
+/* RBCPR Gate Count and Target Registers */
+#define REG_RBCPR_GCNT_TARGET(n) (0x60 + 4 * (n))
+
+#define RBCPR_GCNT_TARGET_TARGET_SHIFT 0
+#define RBCPR_GCNT_TARGET_TARGET_MASK GENMASK(11, 0)
+#define RBCPR_GCNT_TARGET_GCNT_SHIFT 12
+#define RBCPR_GCNT_TARGET_GCNT_MASK GENMASK(9, 0)
+
+/* RBCPR Timer Control */
+#define REG_RBCPR_TIMER_INTERVAL 0x44
+#define REG_RBIF_TIMER_ADJUST 0x4c
+
+#define RBIF_TIMER_ADJ_CONS_UP_MASK GENMASK(3, 0)
+#define RBIF_TIMER_ADJ_CONS_UP_SHIFT 0
+#define RBIF_TIMER_ADJ_CONS_DOWN_MASK GENMASK(3, 0)
+#define RBIF_TIMER_ADJ_CONS_DOWN_SHIFT 4
+#define RBIF_TIMER_ADJ_CLAMP_INT_MASK GENMASK(7, 0)
+#define RBIF_TIMER_ADJ_CLAMP_INT_SHIFT 8
+
+/* RBCPR Config Register */
+#define REG_RBIF_LIMIT 0x48
+#define RBIF_LIMIT_CEILING_MASK GENMASK(5, 0)
+#define RBIF_LIMIT_CEILING_SHIFT 6
+#define RBIF_LIMIT_FLOOR_BITS 6
+#define RBIF_LIMIT_FLOOR_MASK GENMASK(5, 0)
+
+#define RBIF_LIMIT_CEILING_DEFAULT RBIF_LIMIT_CEILING_MASK
+#define RBIF_LIMIT_FLOOR_DEFAULT 0
+
+#define REG_RBIF_SW_VLEVEL 0x94
+#define RBIF_SW_VLEVEL_DEFAULT 0x20
+
+#define REG_RBCPR_STEP_QUOT 0x80
+#define RBCPR_STEP_QUOT_STEPQUOT_MASK GENMASK(7, 0)
+#define RBCPR_STEP_QUOT_IDLE_CLK_MASK GENMASK(3, 0)
+#define RBCPR_STEP_QUOT_IDLE_CLK_SHIFT 8
+
+/* RBCPR Control Register */
+#define REG_RBCPR_CTL 0x90
+
+#define RBCPR_CTL_LOOP_EN BIT(0)
+#define RBCPR_CTL_TIMER_EN BIT(3)
+#define RBCPR_CTL_SW_AUTO_CONT_ACK_EN BIT(5)
+#define RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN BIT(6)
+#define RBCPR_CTL_COUNT_MODE BIT(10)
+#define RBCPR_CTL_UP_THRESHOLD_MASK GENMASK(3, 0)
+#define RBCPR_CTL_UP_THRESHOLD_SHIFT 24
+#define RBCPR_CTL_DN_THRESHOLD_MASK GENMASK(3, 0)
+#define RBCPR_CTL_DN_THRESHOLD_SHIFT 28
+
+/* RBCPR Ack/Nack Response */
+#define REG_RBIF_CONT_ACK_CMD 0x98
+#define REG_RBIF_CONT_NACK_CMD 0x9c
+
+/* RBCPR Result status Register */
+#define REG_RBCPR_RESULT_0 0xa0
+
+#define RBCPR_RESULT0_BUSY_SHIFT 19
+#define RBCPR_RESULT0_BUSY_MASK BIT(RBCPR_RESULT0_BUSY_SHIFT)
+#define RBCPR_RESULT0_ERROR_LT0_SHIFT 18
+#define RBCPR_RESULT0_ERROR_SHIFT 6
+#define RBCPR_RESULT0_ERROR_MASK GENMASK(11, 0)
+#define RBCPR_RESULT0_ERROR_STEPS_SHIFT 2
+#define RBCPR_RESULT0_ERROR_STEPS_MASK GENMASK(3, 0)
+#define RBCPR_RESULT0_STEP_UP_SHIFT 1
+
+/* RBCPR Interrupt Control Register */
+#define REG_RBIF_IRQ_EN(n) (0x100 + 4 * (n))
+#define REG_RBIF_IRQ_CLEAR 0x110
+#define REG_RBIF_IRQ_STATUS 0x114
+
+#define CPR_INT_DONE BIT(0)
+#define CPR_INT_MIN BIT(1)
+#define CPR_INT_DOWN BIT(2)
+#define CPR_INT_MID BIT(3)
+#define CPR_INT_UP BIT(4)
+#define CPR_INT_MAX BIT(5)
+#define CPR_INT_CLAMP BIT(6)
+#define CPR_INT_ALL (CPR_INT_DONE | CPR_INT_MIN | CPR_INT_DOWN | \
+ CPR_INT_MID | CPR_INT_UP | CPR_INT_MAX | CPR_INT_CLAMP)
+#define CPR_INT_DEFAULT (CPR_INT_UP | CPR_INT_DOWN)
+
+#define CPR_NUM_RING_OSC 8
+
+/* CPR eFuse parameters */
+#define CPR_FUSE_TARGET_QUOT_BITS_MASK GENMASK(11, 0)
+
+#define CPR_FUSE_MIN_QUOT_DIFF 50
+
+#define FUSE_REVISION_UNKNOWN (-1)
+
+enum voltage_change_dir {
+ NO_CHANGE,
+ DOWN,
+ UP,
+};
+
+struct cpr_fuse {
+ char *ring_osc;
+ char *init_voltage;
+ char *quotient;
+ char *quotient_offset;
+};
+
+struct fuse_corner_data {
+ int ref_uV;
+ int max_uV;
+ int min_uV;
+ int max_volt_scale;
+ int max_quot_scale;
+ /* fuse quot */
+ int quot_offset;
+ int quot_scale;
+ int quot_adjust;
+ /* fuse quot_offset */
+ int quot_offset_scale;
+ int quot_offset_adjust;
+};
+
+struct cpr_fuses {
+ int init_voltage_step;
+ int init_voltage_width;
+ struct fuse_corner_data *fuse_corner_data;
+};
+
+struct corner_data {
+ unsigned int fuse_corner;
+ unsigned long freq;
+};
+
+struct cpr_desc {
+ unsigned int num_fuse_corners;
+ int min_diff_quot;
+ int *step_quot;
+
+ unsigned int timer_delay_us;
+ unsigned int timer_cons_up;
+ unsigned int timer_cons_down;
+ unsigned int up_threshold;
+ unsigned int down_threshold;
+ unsigned int idle_clocks;
+ unsigned int gcnt_us;
+ unsigned int vdd_apc_step_up_limit;
+ unsigned int vdd_apc_step_down_limit;
+ unsigned int clamp_timer_interval;
+
+ struct cpr_fuses cpr_fuses;
+ bool reduce_to_fuse_uV;
+ bool reduce_to_corner_uV;
+};
+
+struct acc_desc {
+ unsigned int enable_reg;
+ u32 enable_mask;
+
+ struct reg_sequence *config;
+ struct reg_sequence *settings;
+ int num_regs_per_fuse;
+};
+
+struct cpr_acc_desc {
+ const struct cpr_desc *cpr_desc;
+ const struct acc_desc *acc_desc;
+};
+
+struct fuse_corner {
+ int min_uV;
+ int max_uV;
+ int uV;
+ int quot;
+ int step_quot;
+ const struct reg_sequence *accs;
+ int num_accs;
+ unsigned long max_freq;
+ u8 ring_osc_idx;
+};
+
+struct corner {
+ int min_uV;
+ int max_uV;
+ int uV;
+ int last_uV;
+ int quot_adjust;
+ u32 save_ctl;
+ u32 save_irq;
+ unsigned long freq;
+ struct fuse_corner *fuse_corner;
+};
+
+struct cpr_drv {
+ unsigned int num_corners;
+ unsigned int ref_clk_khz;
+
+ struct generic_pm_domain pd;
+ struct device *dev;
+ struct device *attached_cpu_dev;
+ struct mutex lock;
+ void __iomem *base;
+ struct corner *corner;
+ struct regulator *vdd_apc;
+ struct clk *cpu_clk;
+ struct regmap *tcsr;
+ bool loop_disabled;
+ u32 gcnt;
+ unsigned long flags;
+
+ struct fuse_corner *fuse_corners;
+ struct corner *corners;
+
+ const struct cpr_desc *desc;
+ const struct acc_desc *acc_desc;
+ const struct cpr_fuse *cpr_fuses;
+
+ struct dentry *debugfs;
+};
+
+static bool cpr_is_allowed(struct cpr_drv *drv)
+{
+ return !drv->loop_disabled;
+}
+
+static void cpr_write(struct cpr_drv *drv, u32 offset, u32 value)
+{
+ writel_relaxed(value, drv->base + offset);
+}
+
+static u32 cpr_read(struct cpr_drv *drv, u32 offset)
+{
+ return readl_relaxed(drv->base + offset);
+}
+
+static void
+cpr_masked_write(struct cpr_drv *drv, u32 offset, u32 mask, u32 value)
+{
+ u32 val;
+
+ val = readl_relaxed(drv->base + offset);
+ val &= ~mask;
+ val |= value & mask;
+ writel_relaxed(val, drv->base + offset);
+}
+
+static void cpr_irq_clr(struct cpr_drv *drv)
+{
+ cpr_write(drv, REG_RBIF_IRQ_CLEAR, CPR_INT_ALL);
+}
+
+static void cpr_irq_clr_nack(struct cpr_drv *drv)
+{
+ cpr_irq_clr(drv);
+ cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1);
+}
+
+static void cpr_irq_clr_ack(struct cpr_drv *drv)
+{
+ cpr_irq_clr(drv);
+ cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1);
+}
+
+static void cpr_irq_set(struct cpr_drv *drv, u32 int_bits)
+{
+ cpr_write(drv, REG_RBIF_IRQ_EN(0), int_bits);
+}
+
+static void cpr_ctl_modify(struct cpr_drv *drv, u32 mask, u32 value)
+{
+ cpr_masked_write(drv, REG_RBCPR_CTL, mask, value);
+}
+
+static void cpr_ctl_enable(struct cpr_drv *drv, struct corner *corner)
+{
+ u32 val, mask;
+ const struct cpr_desc *desc = drv->desc;
+
+ /* Program Consecutive Up & Down */
+ val = desc->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT;
+ val |= desc->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT;
+ mask = RBIF_TIMER_ADJ_CONS_UP_MASK | RBIF_TIMER_ADJ_CONS_DOWN_MASK;
+ cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST, mask, val);
+ cpr_masked_write(drv, REG_RBCPR_CTL,
+ RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
+ RBCPR_CTL_SW_AUTO_CONT_ACK_EN,
+ corner->save_ctl);
+ cpr_irq_set(drv, corner->save_irq);
+
+ if (cpr_is_allowed(drv) && corner->max_uV > corner->min_uV)
+ val = RBCPR_CTL_LOOP_EN;
+ else
+ val = 0;
+ cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, val);
+}
+
+static void cpr_ctl_disable(struct cpr_drv *drv)
+{
+ cpr_irq_set(drv, 0);
+ cpr_ctl_modify(drv, RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
+ RBCPR_CTL_SW_AUTO_CONT_ACK_EN, 0);
+ cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST,
+ RBIF_TIMER_ADJ_CONS_UP_MASK |
+ RBIF_TIMER_ADJ_CONS_DOWN_MASK, 0);
+ cpr_irq_clr(drv);
+ cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1);
+ cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1);
+ cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, 0);
+}
+
+static bool cpr_ctl_is_enabled(struct cpr_drv *drv)
+{
+ u32 reg_val;
+
+ reg_val = cpr_read(drv, REG_RBCPR_CTL);
+ return reg_val & RBCPR_CTL_LOOP_EN;
+}
+
+static bool cpr_ctl_is_busy(struct cpr_drv *drv)
+{
+ u32 reg_val;
+
+ reg_val = cpr_read(drv, REG_RBCPR_RESULT_0);
+ return reg_val & RBCPR_RESULT0_BUSY_MASK;
+}
+
+static void cpr_corner_save(struct cpr_drv *drv, struct corner *corner)
+{
+ corner->save_ctl = cpr_read(drv, REG_RBCPR_CTL);
+ corner->save_irq = cpr_read(drv, REG_RBIF_IRQ_EN(0));
+}
+
+static void cpr_corner_restore(struct cpr_drv *drv, struct corner *corner)
+{
+ u32 gcnt, ctl, irq, ro_sel, step_quot;
+ struct fuse_corner *fuse = corner->fuse_corner;
+ const struct cpr_desc *desc = drv->desc;
+ int i;
+
+ ro_sel = fuse->ring_osc_idx;
+ gcnt = drv->gcnt;
+ gcnt |= fuse->quot - corner->quot_adjust;
+
+ /* Program the step quotient and idle clocks */
+ step_quot = desc->idle_clocks << RBCPR_STEP_QUOT_IDLE_CLK_SHIFT;
+ step_quot |= fuse->step_quot & RBCPR_STEP_QUOT_STEPQUOT_MASK;
+ cpr_write(drv, REG_RBCPR_STEP_QUOT, step_quot);
+
+ /* Clear the target quotient value and gate count of all ROs */
+ for (i = 0; i < CPR_NUM_RING_OSC; i++)
+ cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0);
+
+ cpr_write(drv, REG_RBCPR_GCNT_TARGET(ro_sel), gcnt);
+ ctl = corner->save_ctl;
+ cpr_write(drv, REG_RBCPR_CTL, ctl);
+ irq = corner->save_irq;
+ cpr_irq_set(drv, irq);
+ dev_dbg(drv->dev, "gcnt = %#08x, ctl = %#08x, irq = %#08x\n", gcnt,
+ ctl, irq);
+}
+
+static void cpr_set_acc(struct regmap *tcsr, struct fuse_corner *f,
+ struct fuse_corner *end)
+{
+ if (f == end)
+ return;
+
+ if (f < end) {
+ for (f += 1; f <= end; f++)
+ regmap_multi_reg_write(tcsr, f->accs, f->num_accs);
+ } else {
+ for (f -= 1; f >= end; f--)
+ regmap_multi_reg_write(tcsr, f->accs, f->num_accs);
+ }
+}
+
+static int cpr_pre_voltage(struct cpr_drv *drv,
+ struct fuse_corner *fuse_corner,
+ enum voltage_change_dir dir)
+{
+ struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner;
+
+ if (drv->tcsr && dir == DOWN)
+ cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner);
+
+ return 0;
+}
+
+static int cpr_post_voltage(struct cpr_drv *drv,
+ struct fuse_corner *fuse_corner,
+ enum voltage_change_dir dir)
+{
+ struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner;
+
+ if (drv->tcsr && dir == UP)
+ cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner);
+
+ return 0;
+}
+
+static int cpr_scale_voltage(struct cpr_drv *drv, struct corner *corner,
+ int new_uV, enum voltage_change_dir dir)
+{
+ int ret;
+ struct fuse_corner *fuse_corner = corner->fuse_corner;
+
+ ret = cpr_pre_voltage(drv, fuse_corner, dir);
+ if (ret)
+ return ret;
+
+ ret = regulator_set_voltage(drv->vdd_apc, new_uV, new_uV);
+ if (ret) {
+ dev_err_ratelimited(drv->dev, "failed to set apc voltage %d\n",
+ new_uV);
+ return ret;
+ }
+
+ ret = cpr_post_voltage(drv, fuse_corner, dir);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static unsigned int cpr_get_cur_perf_state(struct cpr_drv *drv)
+{
+ return drv->corner ? drv->corner - drv->corners + 1 : 0;
+}
+
+static int cpr_scale(struct cpr_drv *drv, enum voltage_change_dir dir)
+{
+ u32 val, error_steps, reg_mask;
+ int last_uV, new_uV, step_uV, ret;
+ struct corner *corner;
+ const struct cpr_desc *desc = drv->desc;
+
+ if (dir != UP && dir != DOWN)
+ return 0;
+
+ step_uV = regulator_get_linear_step(drv->vdd_apc);
+ if (!step_uV)
+ return -EINVAL;
+
+ corner = drv->corner;
+
+ val = cpr_read(drv, REG_RBCPR_RESULT_0);
+
+ error_steps = val >> RBCPR_RESULT0_ERROR_STEPS_SHIFT;
+ error_steps &= RBCPR_RESULT0_ERROR_STEPS_MASK;
+ last_uV = corner->last_uV;
+
+ if (dir == UP) {
+ if (desc->clamp_timer_interval &&
+ error_steps < desc->up_threshold) {
+ /*
+ * Handle the case where another measurement started
+ * after the interrupt was triggered due to a core
+ * exiting from power collapse.
+ */
+ error_steps = max(desc->up_threshold,
+ desc->vdd_apc_step_up_limit);
+ }
+
+ if (last_uV >= corner->max_uV) {
+ cpr_irq_clr_nack(drv);
+
+ /* Maximize the UP threshold */
+ reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK;
+ reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ val = reg_mask;
+ cpr_ctl_modify(drv, reg_mask, val);
+
+ /* Disable UP interrupt */
+ cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_UP);
+
+ return 0;
+ }
+
+ if (error_steps > desc->vdd_apc_step_up_limit)
+ error_steps = desc->vdd_apc_step_up_limit;
+
+ /* Calculate new voltage */
+ new_uV = last_uV + error_steps * step_uV;
+ new_uV = min(new_uV, corner->max_uV);
+
+ dev_dbg(drv->dev,
+ "UP: -> new_uV: %d last_uV: %d perf state: %u\n",
+ new_uV, last_uV, cpr_get_cur_perf_state(drv));
+ } else {
+ if (desc->clamp_timer_interval &&
+ error_steps < desc->down_threshold) {
+ /*
+ * Handle the case where another measurement started
+ * after the interrupt was triggered due to a core
+ * exiting from power collapse.
+ */
+ error_steps = max(desc->down_threshold,
+ desc->vdd_apc_step_down_limit);
+ }
+
+ if (last_uV <= corner->min_uV) {
+ cpr_irq_clr_nack(drv);
+
+ /* Enable auto nack down */
+ reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+ val = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+
+ cpr_ctl_modify(drv, reg_mask, val);
+
+ /* Disable DOWN interrupt */
+ cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_DOWN);
+
+ return 0;
+ }
+
+ if (error_steps > desc->vdd_apc_step_down_limit)
+ error_steps = desc->vdd_apc_step_down_limit;
+
+ /* Calculate new voltage */
+ new_uV = last_uV - error_steps * step_uV;
+ new_uV = max(new_uV, corner->min_uV);
+
+ dev_dbg(drv->dev,
+ "DOWN: -> new_uV: %d last_uV: %d perf state: %u\n",
+ new_uV, last_uV, cpr_get_cur_perf_state(drv));
+ }
+
+ ret = cpr_scale_voltage(drv, corner, new_uV, dir);
+ if (ret) {
+ cpr_irq_clr_nack(drv);
+ return ret;
+ }
+ drv->corner->last_uV = new_uV;
+
+ if (dir == UP) {
+ /* Disable auto nack down */
+ reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+ val = 0;
+ } else {
+ /* Restore default threshold for UP */
+ reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK;
+ reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ val = desc->up_threshold;
+ val <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ }
+
+ cpr_ctl_modify(drv, reg_mask, val);
+
+ /* Re-enable default interrupts */
+ cpr_irq_set(drv, CPR_INT_DEFAULT);
+
+ /* Ack */
+ cpr_irq_clr_ack(drv);
+
+ return 0;
+}
+
+static irqreturn_t cpr_irq_handler(int irq, void *dev)
+{
+ struct cpr_drv *drv = dev;
+ const struct cpr_desc *desc = drv->desc;
+ irqreturn_t ret = IRQ_HANDLED;
+ u32 val;
+
+ mutex_lock(&drv->lock);
+
+ val = cpr_read(drv, REG_RBIF_IRQ_STATUS);
+ if (drv->flags & FLAGS_IGNORE_1ST_IRQ_STATUS)
+ val = cpr_read(drv, REG_RBIF_IRQ_STATUS);
+
+ dev_dbg(drv->dev, "IRQ_STATUS = %#02x\n", val);
+
+ if (!cpr_ctl_is_enabled(drv)) {
+ dev_dbg(drv->dev, "CPR is disabled\n");
+ ret = IRQ_NONE;
+ } else if (cpr_ctl_is_busy(drv) && !desc->clamp_timer_interval) {
+ dev_dbg(drv->dev, "CPR measurement is not ready\n");
+ } else if (!cpr_is_allowed(drv)) {
+ val = cpr_read(drv, REG_RBCPR_CTL);
+ dev_err_ratelimited(drv->dev,
+ "Interrupt broken? RBCPR_CTL = %#02x\n",
+ val);
+ ret = IRQ_NONE;
+ } else {
+ /*
+ * Following sequence of handling is as per each IRQ's
+ * priority
+ */
+ if (val & CPR_INT_UP) {
+ cpr_scale(drv, UP);
+ } else if (val & CPR_INT_DOWN) {
+ cpr_scale(drv, DOWN);
+ } else if (val & CPR_INT_MIN) {
+ cpr_irq_clr_nack(drv);
+ } else if (val & CPR_INT_MAX) {
+ cpr_irq_clr_nack(drv);
+ } else if (val & CPR_INT_MID) {
+ /* RBCPR_CTL_SW_AUTO_CONT_ACK_EN is enabled */
+ dev_dbg(drv->dev, "IRQ occurred for Mid Flag\n");
+ } else {
+ dev_dbg(drv->dev,
+ "IRQ occurred for unknown flag (%#08x)\n", val);
+ }
+
+ /* Save register values for the corner */
+ cpr_corner_save(drv, drv->corner);
+ }
+
+ mutex_unlock(&drv->lock);
+
+ return ret;
+}
+
+static int cpr_enable(struct cpr_drv *drv)
+{
+ int ret;
+
+ ret = regulator_enable(drv->vdd_apc);
+ if (ret)
+ return ret;
+
+ mutex_lock(&drv->lock);
+
+ if (cpr_is_allowed(drv) && drv->corner) {
+ cpr_irq_clr(drv);
+ cpr_corner_restore(drv, drv->corner);
+ cpr_ctl_enable(drv, drv->corner);
+ }
+
+ mutex_unlock(&drv->lock);
+
+ return 0;
+}
+
+static int cpr_disable(struct cpr_drv *drv)
+{
+ int ret;
+
+ mutex_lock(&drv->lock);
+
+ if (cpr_is_allowed(drv)) {
+ cpr_ctl_disable(drv);
+ cpr_irq_clr(drv);
+ }
+
+ mutex_unlock(&drv->lock);
+
+ ret = regulator_disable(drv->vdd_apc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int cpr_config(struct cpr_drv *drv)
+{
+ int i;
+ u32 val, gcnt;
+ struct corner *corner;
+ const struct cpr_desc *desc = drv->desc;
+
+ /* Disable interrupt and CPR */
+ cpr_write(drv, REG_RBIF_IRQ_EN(0), 0);
+ cpr_write(drv, REG_RBCPR_CTL, 0);
+
+ /* Program the default HW ceiling, floor and vlevel */
+ val = (RBIF_LIMIT_CEILING_DEFAULT & RBIF_LIMIT_CEILING_MASK)
+ << RBIF_LIMIT_CEILING_SHIFT;
+ val |= RBIF_LIMIT_FLOOR_DEFAULT & RBIF_LIMIT_FLOOR_MASK;
+ cpr_write(drv, REG_RBIF_LIMIT, val);
+ cpr_write(drv, REG_RBIF_SW_VLEVEL, RBIF_SW_VLEVEL_DEFAULT);
+
+ /*
+ * Clear the target quotient value and gate count of all
+ * ring oscillators
+ */
+ for (i = 0; i < CPR_NUM_RING_OSC; i++)
+ cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0);
+
+ /* Init and save gcnt */
+ gcnt = (drv->ref_clk_khz * desc->gcnt_us) / 1000;
+ gcnt = gcnt & RBCPR_GCNT_TARGET_GCNT_MASK;
+ gcnt <<= RBCPR_GCNT_TARGET_GCNT_SHIFT;
+ drv->gcnt = gcnt;
+
+ /* Program the delay count for the timer */
+ val = (drv->ref_clk_khz * desc->timer_delay_us) / 1000;
+ cpr_write(drv, REG_RBCPR_TIMER_INTERVAL, val);
+ dev_dbg(drv->dev, "Timer count: %#0x (for %d us)\n", val,
+ desc->timer_delay_us);
+
+ /* Program Consecutive Up & Down */
+ val = desc->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT;
+ val |= desc->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT;
+ val |= desc->clamp_timer_interval << RBIF_TIMER_ADJ_CLAMP_INT_SHIFT;
+ cpr_write(drv, REG_RBIF_TIMER_ADJUST, val);
+
+ /* Program the control register */
+ val = desc->up_threshold << RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ val |= desc->down_threshold << RBCPR_CTL_DN_THRESHOLD_SHIFT;
+ val |= RBCPR_CTL_TIMER_EN | RBCPR_CTL_COUNT_MODE;
+ val |= RBCPR_CTL_SW_AUTO_CONT_ACK_EN;
+ cpr_write(drv, REG_RBCPR_CTL, val);
+
+ for (i = 0; i < drv->num_corners; i++) {
+ corner = &drv->corners[i];
+ corner->save_ctl = val;
+ corner->save_irq = CPR_INT_DEFAULT;
+ }
+
+ cpr_irq_set(drv, CPR_INT_DEFAULT);
+
+ val = cpr_read(drv, REG_RBCPR_VERSION);
+ if (val <= RBCPR_VER_2)
+ drv->flags |= FLAGS_IGNORE_1ST_IRQ_STATUS;
+
+ return 0;
+}
+
+static int cpr_set_performance_state(struct generic_pm_domain *domain,
+ unsigned int state)
+{
+ struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
+ struct corner *corner, *end;
+ enum voltage_change_dir dir;
+ int ret = 0, new_uV;
+
+ mutex_lock(&drv->lock);
+
+ dev_dbg(drv->dev, "%s: setting perf state: %u (prev state: %u)\n",
+ __func__, state, cpr_get_cur_perf_state(drv));
+
+ /*
+ * Determine new corner we're going to.
+ * Remove one since lowest performance state is 1.
+ */
+ corner = drv->corners + state - 1;
+ end = &drv->corners[drv->num_corners - 1];
+ if (corner > end || corner < drv->corners) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ /* Determine direction */
+ if (drv->corner > corner)
+ dir = DOWN;
+ else if (drv->corner < corner)
+ dir = UP;
+ else
+ dir = NO_CHANGE;
+
+ if (cpr_is_allowed(drv))
+ new_uV = corner->last_uV;
+ else
+ new_uV = corner->uV;
+
+ if (cpr_is_allowed(drv))
+ cpr_ctl_disable(drv);
+
+ ret = cpr_scale_voltage(drv, corner, new_uV, dir);
+ if (ret)
+ goto unlock;
+
+ if (cpr_is_allowed(drv)) {
+ cpr_irq_clr(drv);
+ if (drv->corner != corner)
+ cpr_corner_restore(drv, corner);
+ cpr_ctl_enable(drv, corner);
+ }
+
+ drv->corner = corner;
+
+unlock:
+ mutex_unlock(&drv->lock);
+
+ return ret;
+}
+
+static int cpr_read_efuse(struct device *dev, const char *cname, u32 *data)
+{
+ struct nvmem_cell *cell;
+ ssize_t len;
+ char *ret;
+ int i;
+
+ *data = 0;
+
+ cell = nvmem_cell_get(dev, cname);
+ if (IS_ERR(cell)) {
+ if (PTR_ERR(cell) != -EPROBE_DEFER)
+ dev_err(dev, "undefined cell %s\n", cname);
+ return PTR_ERR(cell);
+ }
+
+ ret = nvmem_cell_read(cell, &len);
+ nvmem_cell_put(cell);
+ if (IS_ERR(ret)) {
+ dev_err(dev, "can't read cell %s\n", cname);
+ return PTR_ERR(ret);
+ }
+
+ for (i = 0; i < len; i++)
+ *data |= ret[i] << (8 * i);
+
+ kfree(ret);
+ dev_dbg(dev, "efuse read(%s) = %x, bytes %zd\n", cname, *data, len);
+
+ return 0;
+}
+
+static int
+cpr_populate_ring_osc_idx(struct cpr_drv *drv)
+{
+ struct fuse_corner *fuse = drv->fuse_corners;
+ struct fuse_corner *end = fuse + drv->desc->num_fuse_corners;
+ const struct cpr_fuse *fuses = drv->cpr_fuses;
+ u32 data;
+ int ret;
+
+ for (; fuse < end; fuse++, fuses++) {
+ ret = cpr_read_efuse(drv->dev, fuses->ring_osc,
+ &data);
+ if (ret)
+ return ret;
+ fuse->ring_osc_idx = data;
+ }
+
+ return 0;
+}
+
+static int cpr_read_fuse_uV(const struct cpr_desc *desc,
+ const struct fuse_corner_data *fdata,
+ const char *init_v_efuse,
+ int step_volt,
+ struct cpr_drv *drv)
+{
+ int step_size_uV, steps, uV;
+ u32 bits = 0;
+ int ret;
+
+ ret = cpr_read_efuse(drv->dev, init_v_efuse, &bits);
+ if (ret)
+ return ret;
+
+ steps = bits & ~BIT(desc->cpr_fuses.init_voltage_width - 1);
+ /* Not two's complement.. instead highest bit is sign bit */
+ if (bits & BIT(desc->cpr_fuses.init_voltage_width - 1))
+ steps = -steps;
+
+ step_size_uV = desc->cpr_fuses.init_voltage_step;
+
+ uV = fdata->ref_uV + steps * step_size_uV;
+ return DIV_ROUND_UP(uV, step_volt) * step_volt;
+}
+
+static int cpr_fuse_corner_init(struct cpr_drv *drv)
+{
+ const struct cpr_desc *desc = drv->desc;
+ const struct cpr_fuse *fuses = drv->cpr_fuses;
+ const struct acc_desc *acc_desc = drv->acc_desc;
+ int i;
+ unsigned int step_volt;
+ struct fuse_corner_data *fdata;
+ struct fuse_corner *fuse, *end;
+ int uV;
+ const struct reg_sequence *accs;
+ int ret;
+
+ accs = acc_desc->settings;
+
+ step_volt = regulator_get_linear_step(drv->vdd_apc);
+ if (!step_volt)
+ return -EINVAL;
+
+ /* Populate fuse_corner members */
+ fuse = drv->fuse_corners;
+ end = &fuse[desc->num_fuse_corners - 1];
+ fdata = desc->cpr_fuses.fuse_corner_data;
+
+ for (i = 0; fuse <= end; fuse++, fuses++, i++, fdata++) {
+ /*
+ * Update SoC voltages: platforms might choose a different
+ * regulators than the one used to characterize the algorithms
+ * (ie, init_voltage_step).
+ */
+ fdata->min_uV = roundup(fdata->min_uV, step_volt);
+ fdata->max_uV = roundup(fdata->max_uV, step_volt);
+
+ /* Populate uV */
+ uV = cpr_read_fuse_uV(desc, fdata, fuses->init_voltage,
+ step_volt, drv);
+ if (uV < 0)
+ return uV;
+
+ fuse->min_uV = fdata->min_uV;
+ fuse->max_uV = fdata->max_uV;
+ fuse->uV = clamp(uV, fuse->min_uV, fuse->max_uV);
+
+ if (fuse == end) {
+ /*
+ * Allow the highest fuse corner's PVS voltage to
+ * define the ceiling voltage for that corner in order
+ * to support SoC's in which variable ceiling values
+ * are required.
+ */
+ end->max_uV = max(end->max_uV, end->uV);
+ }
+
+ /* Populate target quotient by scaling */
+ ret = cpr_read_efuse(drv->dev, fuses->quotient, &fuse->quot);
+ if (ret)
+ return ret;
+
+ fuse->quot *= fdata->quot_scale;
+ fuse->quot += fdata->quot_offset;
+ fuse->quot += fdata->quot_adjust;
+ fuse->step_quot = desc->step_quot[fuse->ring_osc_idx];
+
+ /* Populate acc settings */
+ fuse->accs = accs;
+ fuse->num_accs = acc_desc->num_regs_per_fuse;
+ accs += acc_desc->num_regs_per_fuse;
+ }
+
+ /*
+ * Restrict all fuse corner PVS voltages based upon per corner
+ * ceiling and floor voltages.
+ */
+ for (fuse = drv->fuse_corners, i = 0; fuse <= end; fuse++, i++) {
+ if (fuse->uV > fuse->max_uV)
+ fuse->uV = fuse->max_uV;
+ else if (fuse->uV < fuse->min_uV)
+ fuse->uV = fuse->min_uV;
+
+ ret = regulator_is_supported_voltage(drv->vdd_apc,
+ fuse->min_uV,
+ fuse->min_uV);
+ if (!ret) {
+ dev_err(drv->dev,
+ "min uV: %d (fuse corner: %d) not supported by regulator\n",
+ fuse->min_uV, i);
+ return -EINVAL;
+ }
+
+ ret = regulator_is_supported_voltage(drv->vdd_apc,
+ fuse->max_uV,
+ fuse->max_uV);
+ if (!ret) {
+ dev_err(drv->dev,
+ "max uV: %d (fuse corner: %d) not supported by regulator\n",
+ fuse->max_uV, i);
+ return -EINVAL;
+ }
+
+ dev_dbg(drv->dev,
+ "fuse corner %d: [%d %d %d] RO%hhu quot %d squot %d\n",
+ i, fuse->min_uV, fuse->uV, fuse->max_uV,
+ fuse->ring_osc_idx, fuse->quot, fuse->step_quot);
+ }
+
+ return 0;
+}
+
+static int cpr_calculate_scaling(const char *quot_offset,
+ struct cpr_drv *drv,
+ const struct fuse_corner_data *fdata,
+ const struct corner *corner)
+{
+ u32 quot_diff = 0;
+ unsigned long freq_diff;
+ int scaling;
+ const struct fuse_corner *fuse, *prev_fuse;
+ int ret;
+
+ fuse = corner->fuse_corner;
+ prev_fuse = fuse - 1;
+
+ if (quot_offset) {
+ ret = cpr_read_efuse(drv->dev, quot_offset, &quot_diff);
+ if (ret)
+ return ret;
+
+ quot_diff *= fdata->quot_offset_scale;
+ quot_diff += fdata->quot_offset_adjust;
+ } else {
+ quot_diff = fuse->quot - prev_fuse->quot;
+ }
+
+ freq_diff = fuse->max_freq - prev_fuse->max_freq;
+ freq_diff /= 1000000; /* Convert to MHz */
+ scaling = 1000 * quot_diff / freq_diff;
+ return min(scaling, fdata->max_quot_scale);
+}
+
+static int cpr_interpolate(const struct corner *corner, int step_volt,
+ const struct fuse_corner_data *fdata)
+{
+ unsigned long f_high, f_low, f_diff;
+ int uV_high, uV_low, uV;
+ u64 temp, temp_limit;
+ const struct fuse_corner *fuse, *prev_fuse;
+
+ fuse = corner->fuse_corner;
+ prev_fuse = fuse - 1;
+
+ f_high = fuse->max_freq;
+ f_low = prev_fuse->max_freq;
+ uV_high = fuse->uV;
+ uV_low = prev_fuse->uV;
+ f_diff = fuse->max_freq - corner->freq;
+
+ /*
+ * Don't interpolate in the wrong direction. This could happen
+ * if the adjusted fuse voltage overlaps with the previous fuse's
+ * adjusted voltage.
+ */
+ if (f_high <= f_low || uV_high <= uV_low || f_high <= corner->freq)
+ return corner->uV;
+
+ temp = f_diff * (uV_high - uV_low);
+ do_div(temp, f_high - f_low);
+
+ /*
+ * max_volt_scale has units of uV/MHz while freq values
+ * have units of Hz. Divide by 1000000 to convert to.
+ */
+ temp_limit = f_diff * fdata->max_volt_scale;
+ do_div(temp_limit, 1000000);
+
+ uV = uV_high - min(temp, temp_limit);
+ return roundup(uV, step_volt);
+}
+
+static unsigned int cpr_get_fuse_corner(struct dev_pm_opp *opp)
+{
+ struct device_node *np;
+ unsigned int fuse_corner = 0;
+
+ np = dev_pm_opp_get_of_node(opp);
+ if (of_property_read_u32(np, "qcom,opp-fuse-level", &fuse_corner))
+ pr_err("%s: missing 'qcom,opp-fuse-level' property\n",
+ __func__);
+
+ of_node_put(np);
+
+ return fuse_corner;
+}
+
+static unsigned long cpr_get_opp_hz_for_req(struct dev_pm_opp *ref,
+ struct device *cpu_dev)
+{
+ u64 rate = 0;
+ struct device_node *ref_np;
+ struct device_node *desc_np;
+ struct device_node *child_np = NULL;
+ struct device_node *child_req_np = NULL;
+
+ desc_np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
+ if (!desc_np)
+ return 0;
+
+ ref_np = dev_pm_opp_get_of_node(ref);
+ if (!ref_np)
+ goto out_ref;
+
+ do {
+ of_node_put(child_req_np);
+ child_np = of_get_next_available_child(desc_np, child_np);
+ child_req_np = of_parse_phandle(child_np, "required-opps", 0);
+ } while (child_np && child_req_np != ref_np);
+
+ if (child_np && child_req_np == ref_np)
+ of_property_read_u64(child_np, "opp-hz", &rate);
+
+ of_node_put(child_req_np);
+ of_node_put(child_np);
+ of_node_put(ref_np);
+out_ref:
+ of_node_put(desc_np);
+
+ return (unsigned long) rate;
+}
+
+static int cpr_corner_init(struct cpr_drv *drv)
+{
+ const struct cpr_desc *desc = drv->desc;
+ const struct cpr_fuse *fuses = drv->cpr_fuses;
+ int i, level, scaling = 0;
+ unsigned int fnum, fc;
+ const char *quot_offset;
+ struct fuse_corner *fuse, *prev_fuse;
+ struct corner *corner, *end;
+ struct corner_data *cdata;
+ const struct fuse_corner_data *fdata;
+ bool apply_scaling;
+ unsigned long freq_diff, freq_diff_mhz;
+ unsigned long freq;
+ int step_volt = regulator_get_linear_step(drv->vdd_apc);
+ struct dev_pm_opp *opp;
+
+ if (!step_volt)
+ return -EINVAL;
+
+ corner = drv->corners;
+ end = &corner[drv->num_corners - 1];
+
+ cdata = devm_kcalloc(drv->dev, drv->num_corners,
+ sizeof(struct corner_data),
+ GFP_KERNEL);
+ if (!cdata)
+ return -ENOMEM;
+
+ /*
+ * Store maximum frequency for each fuse corner based on the frequency
+ * plan
+ */
+ for (level = 1; level <= drv->num_corners; level++) {
+ opp = dev_pm_opp_find_level_exact(&drv->pd.dev, level);
+ if (IS_ERR(opp))
+ return -EINVAL;
+ fc = cpr_get_fuse_corner(opp);
+ if (!fc) {
+ dev_pm_opp_put(opp);
+ return -EINVAL;
+ }
+ fnum = fc - 1;
+ freq = cpr_get_opp_hz_for_req(opp, drv->attached_cpu_dev);
+ if (!freq) {
+ dev_pm_opp_put(opp);
+ return -EINVAL;
+ }
+ cdata[level - 1].fuse_corner = fnum;
+ cdata[level - 1].freq = freq;
+
+ fuse = &drv->fuse_corners[fnum];
+ dev_dbg(drv->dev, "freq: %lu level: %u fuse level: %u\n",
+ freq, dev_pm_opp_get_level(opp) - 1, fnum);
+ if (freq > fuse->max_freq)
+ fuse->max_freq = freq;
+ dev_pm_opp_put(opp);
+ }
+
+ /*
+ * Get the quotient adjustment scaling factor, according to:
+ *
+ * scaling = min(1000 * (QUOT(corner_N) - QUOT(corner_N-1))
+ * / (freq(corner_N) - freq(corner_N-1)), max_factor)
+ *
+ * QUOT(corner_N): quotient read from fuse for fuse corner N
+ * QUOT(corner_N-1): quotient read from fuse for fuse corner (N - 1)
+ * freq(corner_N): max frequency in MHz supported by fuse corner N
+ * freq(corner_N-1): max frequency in MHz supported by fuse corner
+ * (N - 1)
+ *
+ * Then walk through the corners mapped to each fuse corner
+ * and calculate the quotient adjustment for each one using the
+ * following formula:
+ *
+ * quot_adjust = (freq_max - freq_corner) * scaling / 1000
+ *
+ * freq_max: max frequency in MHz supported by the fuse corner
+ * freq_corner: frequency in MHz corresponding to the corner
+ * scaling: calculated from above equation
+ *
+ *
+ * + +
+ * | v |
+ * q | f c o | f c
+ * u | c l | c
+ * o | f t | f
+ * t | c a | c
+ * | c f g | c f
+ * | e |
+ * +--------------- +----------------
+ * 0 1 2 3 4 5 6 0 1 2 3 4 5 6
+ * corner corner
+ *
+ * c = corner
+ * f = fuse corner
+ *
+ */
+ for (apply_scaling = false, i = 0; corner <= end; corner++, i++) {
+ fnum = cdata[i].fuse_corner;
+ fdata = &desc->cpr_fuses.fuse_corner_data[fnum];
+ quot_offset = fuses[fnum].quotient_offset;
+ fuse = &drv->fuse_corners[fnum];
+ if (fnum)
+ prev_fuse = &drv->fuse_corners[fnum - 1];
+ else
+ prev_fuse = NULL;
+
+ corner->fuse_corner = fuse;
+ corner->freq = cdata[i].freq;
+ corner->uV = fuse->uV;
+
+ if (prev_fuse && cdata[i - 1].freq == prev_fuse->max_freq) {
+ scaling = cpr_calculate_scaling(quot_offset, drv,
+ fdata, corner);
+ if (scaling < 0)
+ return scaling;
+
+ apply_scaling = true;
+ } else if (corner->freq == fuse->max_freq) {
+ /* This is a fuse corner; don't scale anything */
+ apply_scaling = false;
+ }
+
+ if (apply_scaling) {
+ freq_diff = fuse->max_freq - corner->freq;
+ freq_diff_mhz = freq_diff / 1000000;
+ corner->quot_adjust = scaling * freq_diff_mhz / 1000;
+
+ corner->uV = cpr_interpolate(corner, step_volt, fdata);
+ }
+
+ corner->max_uV = fuse->max_uV;
+ corner->min_uV = fuse->min_uV;
+ corner->uV = clamp(corner->uV, corner->min_uV, corner->max_uV);
+ corner->last_uV = corner->uV;
+
+ /* Reduce the ceiling voltage if needed */
+ if (desc->reduce_to_corner_uV && corner->uV < corner->max_uV)
+ corner->max_uV = corner->uV;
+ else if (desc->reduce_to_fuse_uV && fuse->uV < corner->max_uV)
+ corner->max_uV = max(corner->min_uV, fuse->uV);
+
+ dev_dbg(drv->dev, "corner %d: [%d %d %d] quot %d\n", i,
+ corner->min_uV, corner->uV, corner->max_uV,
+ fuse->quot - corner->quot_adjust);
+ }
+
+ return 0;
+}
+
+static const struct cpr_fuse *cpr_get_fuses(struct cpr_drv *drv)
+{
+ const struct cpr_desc *desc = drv->desc;
+ struct cpr_fuse *fuses;
+ int i;
+
+ fuses = devm_kcalloc(drv->dev, desc->num_fuse_corners,
+ sizeof(struct cpr_fuse),
+ GFP_KERNEL);
+ if (!fuses)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < desc->num_fuse_corners; i++) {
+ char tbuf[32];
+
+ snprintf(tbuf, 32, "cpr_ring_osc%d", i + 1);
+ fuses[i].ring_osc = devm_kstrdup(drv->dev, tbuf, GFP_KERNEL);
+ if (!fuses[i].ring_osc)
+ return ERR_PTR(-ENOMEM);
+
+ snprintf(tbuf, 32, "cpr_init_voltage%d", i + 1);
+ fuses[i].init_voltage = devm_kstrdup(drv->dev, tbuf,
+ GFP_KERNEL);
+ if (!fuses[i].init_voltage)
+ return ERR_PTR(-ENOMEM);
+
+ snprintf(tbuf, 32, "cpr_quotient%d", i + 1);
+ fuses[i].quotient = devm_kstrdup(drv->dev, tbuf, GFP_KERNEL);
+ if (!fuses[i].quotient)
+ return ERR_PTR(-ENOMEM);
+
+ snprintf(tbuf, 32, "cpr_quotient_offset%d", i + 1);
+ fuses[i].quotient_offset = devm_kstrdup(drv->dev, tbuf,
+ GFP_KERNEL);
+ if (!fuses[i].quotient_offset)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return fuses;
+}
+
+static void cpr_set_loop_allowed(struct cpr_drv *drv)
+{
+ drv->loop_disabled = false;
+}
+
+static int cpr_init_parameters(struct cpr_drv *drv)
+{
+ const struct cpr_desc *desc = drv->desc;
+ struct clk *clk;
+
+ clk = clk_get(drv->dev, "ref");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ drv->ref_clk_khz = clk_get_rate(clk) / 1000;
+ clk_put(clk);
+
+ if (desc->timer_cons_up > RBIF_TIMER_ADJ_CONS_UP_MASK ||
+ desc->timer_cons_down > RBIF_TIMER_ADJ_CONS_DOWN_MASK ||
+ desc->up_threshold > RBCPR_CTL_UP_THRESHOLD_MASK ||
+ desc->down_threshold > RBCPR_CTL_DN_THRESHOLD_MASK ||
+ desc->idle_clocks > RBCPR_STEP_QUOT_IDLE_CLK_MASK ||
+ desc->clamp_timer_interval > RBIF_TIMER_ADJ_CLAMP_INT_MASK)
+ return -EINVAL;
+
+ dev_dbg(drv->dev, "up threshold = %u, down threshold = %u\n",
+ desc->up_threshold, desc->down_threshold);
+
+ return 0;
+}
+
+static int cpr_find_initial_corner(struct cpr_drv *drv)
+{
+ unsigned long rate;
+ const struct corner *end;
+ struct corner *iter;
+ unsigned int i = 0;
+
+ if (!drv->cpu_clk) {
+ dev_err(drv->dev, "cannot get rate from NULL clk\n");
+ return -EINVAL;
+ }
+
+ end = &drv->corners[drv->num_corners - 1];
+ rate = clk_get_rate(drv->cpu_clk);
+
+ /*
+ * Some bootloaders set a CPU clock frequency that is not defined
+ * in the OPP table. When running at an unlisted frequency,
+ * cpufreq_online() will change to the OPP which has the lowest
+ * frequency, at or above the unlisted frequency.
+ * Since cpufreq_online() always "rounds up" in the case of an
+ * unlisted frequency, this function always "rounds down" in case
+ * of an unlisted frequency. That way, when cpufreq_online()
+ * triggers the first ever call to cpr_set_performance_state(),
+ * it will correctly determine the direction as UP.
+ */
+ for (iter = drv->corners; iter <= end; iter++) {
+ if (iter->freq > rate)
+ break;
+ i++;
+ if (iter->freq == rate) {
+ drv->corner = iter;
+ break;
+ }
+ if (iter->freq < rate)
+ drv->corner = iter;
+ }
+
+ if (!drv->corner) {
+ dev_err(drv->dev, "boot up corner not found\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(drv->dev, "boot up perf state: %u\n", i);
+
+ return 0;
+}
+
+static const struct cpr_desc qcs404_cpr_desc = {
+ .num_fuse_corners = 3,
+ .min_diff_quot = CPR_FUSE_MIN_QUOT_DIFF,
+ .step_quot = (int []){ 25, 25, 25, },
+ .timer_delay_us = 5000,
+ .timer_cons_up = 0,
+ .timer_cons_down = 2,
+ .up_threshold = 1,
+ .down_threshold = 3,
+ .idle_clocks = 15,
+ .gcnt_us = 1,
+ .vdd_apc_step_up_limit = 1,
+ .vdd_apc_step_down_limit = 1,
+ .cpr_fuses = {
+ .init_voltage_step = 8000,
+ .init_voltage_width = 6,
+ .fuse_corner_data = (struct fuse_corner_data[]){
+ /* fuse corner 0 */
+ {
+ .ref_uV = 1224000,
+ .max_uV = 1224000,
+ .min_uV = 1048000,
+ .max_volt_scale = 0,
+ .max_quot_scale = 0,
+ .quot_offset = 0,
+ .quot_scale = 1,
+ .quot_adjust = 0,
+ .quot_offset_scale = 5,
+ .quot_offset_adjust = 0,
+ },
+ /* fuse corner 1 */
+ {
+ .ref_uV = 1288000,
+ .max_uV = 1288000,
+ .min_uV = 1048000,
+ .max_volt_scale = 2000,
+ .max_quot_scale = 1400,
+ .quot_offset = 0,
+ .quot_scale = 1,
+ .quot_adjust = -20,
+ .quot_offset_scale = 5,
+ .quot_offset_adjust = 0,
+ },
+ /* fuse corner 2 */
+ {
+ .ref_uV = 1352000,
+ .max_uV = 1384000,
+ .min_uV = 1088000,
+ .max_volt_scale = 2000,
+ .max_quot_scale = 1400,
+ .quot_offset = 0,
+ .quot_scale = 1,
+ .quot_adjust = 0,
+ .quot_offset_scale = 5,
+ .quot_offset_adjust = 0,
+ },
+ },
+ },
+};
+
+static const struct acc_desc qcs404_acc_desc = {
+ .settings = (struct reg_sequence[]){
+ { 0xb120, 0x1041040 },
+ { 0xb124, 0x41 },
+ { 0xb120, 0x0 },
+ { 0xb124, 0x0 },
+ { 0xb120, 0x0 },
+ { 0xb124, 0x0 },
+ },
+ .config = (struct reg_sequence[]){
+ { 0xb138, 0xff },
+ { 0xb130, 0x5555 },
+ },
+ .num_regs_per_fuse = 2,
+};
+
+static const struct cpr_acc_desc qcs404_cpr_acc_desc = {
+ .cpr_desc = &qcs404_cpr_desc,
+ .acc_desc = &qcs404_acc_desc,
+};
+
+static unsigned int cpr_get_performance_state(struct generic_pm_domain *genpd,
+ struct dev_pm_opp *opp)
+{
+ return dev_pm_opp_get_level(opp);
+}
+
+static int cpr_power_off(struct generic_pm_domain *domain)
+{
+ struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
+
+ return cpr_disable(drv);
+}
+
+static int cpr_power_on(struct generic_pm_domain *domain)
+{
+ struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
+
+ return cpr_enable(drv);
+}
+
+static int cpr_pd_attach_dev(struct generic_pm_domain *domain,
+ struct device *dev)
+{
+ struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
+ const struct acc_desc *acc_desc = drv->acc_desc;
+ int ret = 0;
+
+ mutex_lock(&drv->lock);
+
+ dev_dbg(drv->dev, "attach callback for: %s\n", dev_name(dev));
+
+ /*
+ * This driver only supports scaling voltage for a CPU cluster
+ * where all CPUs in the cluster share a single regulator.
+ * Therefore, save the struct device pointer only for the first
+ * CPU device that gets attached. There is no need to do any
+ * additional initialization when further CPUs get attached.
+ */
+ if (drv->attached_cpu_dev)
+ goto unlock;
+
+ /*
+ * cpr_scale_voltage() requires the direction (if we are changing
+ * to a higher or lower OPP). The first time
+ * cpr_set_performance_state() is called, there is no previous
+ * performance state defined. Therefore, we call
+ * cpr_find_initial_corner() that gets the CPU clock frequency
+ * set by the bootloader, so that we can determine the direction
+ * the first time cpr_set_performance_state() is called.
+ */
+ drv->cpu_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(drv->cpu_clk)) {
+ ret = PTR_ERR(drv->cpu_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(drv->dev, "could not get cpu clk: %d\n", ret);
+ goto unlock;
+ }
+ drv->attached_cpu_dev = dev;
+
+ dev_dbg(drv->dev, "using cpu clk from: %s\n",
+ dev_name(drv->attached_cpu_dev));
+
+ /*
+ * Everything related to (virtual) corners has to be initialized
+ * here, when attaching to the power domain, since we need to know
+ * the maximum frequency for each fuse corner, and this is only
+ * available after the cpufreq driver has attached to us.
+ * The reason for this is that we need to know the highest
+ * frequency associated with each fuse corner.
+ */
+ ret = dev_pm_opp_get_opp_count(&drv->pd.dev);
+ if (ret < 0) {
+ dev_err(drv->dev, "could not get OPP count\n");
+ goto unlock;
+ }
+ drv->num_corners = ret;
+
+ if (drv->num_corners < 2) {
+ dev_err(drv->dev, "need at least 2 OPPs to use CPR\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ drv->corners = devm_kcalloc(drv->dev, drv->num_corners,
+ sizeof(*drv->corners),
+ GFP_KERNEL);
+ if (!drv->corners) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ ret = cpr_corner_init(drv);
+ if (ret)
+ goto unlock;
+
+ cpr_set_loop_allowed(drv);
+
+ ret = cpr_init_parameters(drv);
+ if (ret)
+ goto unlock;
+
+ /* Configure CPR HW but keep it disabled */
+ ret = cpr_config(drv);
+ if (ret)
+ goto unlock;
+
+ ret = cpr_find_initial_corner(drv);
+ if (ret)
+ goto unlock;
+
+ if (acc_desc->config)
+ regmap_multi_reg_write(drv->tcsr, acc_desc->config,
+ acc_desc->num_regs_per_fuse);
+
+ /* Enable ACC if required */
+ if (acc_desc->enable_mask)
+ regmap_update_bits(drv->tcsr, acc_desc->enable_reg,
+ acc_desc->enable_mask,
+ acc_desc->enable_mask);
+
+ dev_info(drv->dev, "driver initialized with %u OPPs\n",
+ drv->num_corners);
+
+unlock:
+ mutex_unlock(&drv->lock);
+
+ return ret;
+}
+
+static int cpr_debug_info_show(struct seq_file *s, void *unused)
+{
+ u32 gcnt, ro_sel, ctl, irq_status, reg, error_steps;
+ u32 step_dn, step_up, error, error_lt0, busy;
+ struct cpr_drv *drv = s->private;
+ struct fuse_corner *fuse_corner;
+ struct corner *corner;
+
+ corner = drv->corner;
+ fuse_corner = corner->fuse_corner;
+
+ seq_printf(s, "corner, current_volt = %d uV\n",
+ corner->last_uV);
+
+ ro_sel = fuse_corner->ring_osc_idx;
+ gcnt = cpr_read(drv, REG_RBCPR_GCNT_TARGET(ro_sel));
+ seq_printf(s, "rbcpr_gcnt_target (%u) = %#02X\n", ro_sel, gcnt);
+
+ ctl = cpr_read(drv, REG_RBCPR_CTL);
+ seq_printf(s, "rbcpr_ctl = %#02X\n", ctl);
+
+ irq_status = cpr_read(drv, REG_RBIF_IRQ_STATUS);
+ seq_printf(s, "rbcpr_irq_status = %#02X\n", irq_status);
+
+ reg = cpr_read(drv, REG_RBCPR_RESULT_0);
+ seq_printf(s, "rbcpr_result_0 = %#02X\n", reg);
+
+ step_dn = reg & 0x01;
+ step_up = (reg >> RBCPR_RESULT0_STEP_UP_SHIFT) & 0x01;
+ seq_printf(s, " [step_dn = %u", step_dn);
+
+ seq_printf(s, ", step_up = %u", step_up);
+
+ error_steps = (reg >> RBCPR_RESULT0_ERROR_STEPS_SHIFT)
+ & RBCPR_RESULT0_ERROR_STEPS_MASK;
+ seq_printf(s, ", error_steps = %u", error_steps);
+
+ error = (reg >> RBCPR_RESULT0_ERROR_SHIFT) & RBCPR_RESULT0_ERROR_MASK;
+ seq_printf(s, ", error = %u", error);
+
+ error_lt0 = (reg >> RBCPR_RESULT0_ERROR_LT0_SHIFT) & 0x01;
+ seq_printf(s, ", error_lt_0 = %u", error_lt0);
+
+ busy = (reg >> RBCPR_RESULT0_BUSY_SHIFT) & 0x01;
+ seq_printf(s, ", busy = %u]\n", busy);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(cpr_debug_info);
+
+static void cpr_debugfs_init(struct cpr_drv *drv)
+{
+ drv->debugfs = debugfs_create_dir("qcom_cpr", NULL);
+
+ debugfs_create_file("debug_info", 0444, drv->debugfs,
+ drv, &cpr_debug_info_fops);
+}
+
+static int cpr_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct cpr_drv *drv;
+ int irq, ret;
+ const struct cpr_acc_desc *data;
+ struct device_node *np;
+ u32 cpr_rev = FUSE_REVISION_UNKNOWN;
+
+ data = of_device_get_match_data(dev);
+ if (!data || !data->cpr_desc || !data->acc_desc)
+ return -EINVAL;
+
+ drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+ drv->dev = dev;
+ drv->desc = data->cpr_desc;
+ drv->acc_desc = data->acc_desc;
+
+ drv->fuse_corners = devm_kcalloc(dev, drv->desc->num_fuse_corners,
+ sizeof(*drv->fuse_corners),
+ GFP_KERNEL);
+ if (!drv->fuse_corners)
+ return -ENOMEM;
+
+ np = of_parse_phandle(dev->of_node, "acc-syscon", 0);
+ if (!np)
+ return -ENODEV;
+
+ drv->tcsr = syscon_node_to_regmap(np);
+ of_node_put(np);
+ if (IS_ERR(drv->tcsr))
+ return PTR_ERR(drv->tcsr);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ drv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(drv->base))
+ return PTR_ERR(drv->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -EINVAL;
+
+ drv->vdd_apc = devm_regulator_get(dev, "vdd-apc");
+ if (IS_ERR(drv->vdd_apc))
+ return PTR_ERR(drv->vdd_apc);
+
+ /*
+ * Initialize fuse corners, since it simply depends
+ * on data in efuses.
+ * Everything related to (virtual) corners has to be
+ * initialized after attaching to the power domain,
+ * since it depends on the CPU's OPP table.
+ */
+ ret = cpr_read_efuse(dev, "cpr_fuse_revision", &cpr_rev);
+ if (ret)
+ return ret;
+
+ drv->cpr_fuses = cpr_get_fuses(drv);
+ if (IS_ERR(drv->cpr_fuses))
+ return PTR_ERR(drv->cpr_fuses);
+
+ ret = cpr_populate_ring_osc_idx(drv);
+ if (ret)
+ return ret;
+
+ ret = cpr_fuse_corner_init(drv);
+ if (ret)
+ return ret;
+
+ mutex_init(&drv->lock);
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ cpr_irq_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ "cpr", drv);
+ if (ret)
+ return ret;
+
+ drv->pd.name = devm_kstrdup_const(dev, dev->of_node->full_name,
+ GFP_KERNEL);
+ if (!drv->pd.name)
+ return -EINVAL;
+
+ drv->pd.power_off = cpr_power_off;
+ drv->pd.power_on = cpr_power_on;
+ drv->pd.set_performance_state = cpr_set_performance_state;
+ drv->pd.opp_to_performance_state = cpr_get_performance_state;
+ drv->pd.attach_dev = cpr_pd_attach_dev;
+
+ ret = pm_genpd_init(&drv->pd, NULL, true);
+ if (ret)
+ return ret;
+
+ ret = of_genpd_add_provider_simple(dev->of_node, &drv->pd);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, drv);
+ cpr_debugfs_init(drv);
+
+ return 0;
+}
+
+static int cpr_remove(struct platform_device *pdev)
+{
+ struct cpr_drv *drv = platform_get_drvdata(pdev);
+
+ if (cpr_is_allowed(drv)) {
+ cpr_ctl_disable(drv);
+ cpr_irq_set(drv, 0);
+ }
+
+ of_genpd_del_provider(pdev->dev.of_node);
+ pm_genpd_remove(&drv->pd);
+
+ debugfs_remove_recursive(drv->debugfs);
+
+ return 0;
+}
+
+static const struct of_device_id cpr_match_table[] = {
+ { .compatible = "qcom,qcs404-cpr", .data = &qcs404_cpr_acc_desc },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cpr_match_table);
+
+static struct platform_driver cpr_driver = {
+ .probe = cpr_probe,
+ .remove = cpr_remove,
+ .driver = {
+ .name = "qcom-cpr",
+ .of_match_table = cpr_match_table,
+ },
+};
+module_platform_driver(cpr_driver);
+
+MODULE_DESCRIPTION("Core Power Reduction (CPR) driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/avs/rockchip-io-domain.c b/drivers/power/avs/rockchip-io-domain.c
index 398fc954419e..eece97f97ef8 100644
--- a/drivers/power/avs/rockchip-io-domain.c
+++ b/drivers/power/avs/rockchip-io-domain.c
@@ -152,18 +152,18 @@ static void px30_iodomain_init(struct rockchip_iodomain *iod)
int ret;
u32 val;
- /* if no VCCIO0 supply we should leave things alone */
+ /* if no VCCIO6 supply we should leave things alone */
if (!iod->supplies[PX30_IO_VSEL_VCCIO6_SUPPLY_NUM].reg)
return;
/*
- * set vccio0 iodomain to also use this framework
+ * set vccio6 iodomain to also use this framework
* instead of a special gpio.
*/
val = PX30_IO_VSEL_VCCIO6_SRC | (PX30_IO_VSEL_VCCIO6_SRC << 16);
ret = regmap_write(iod->grf, PX30_IO_VSEL, val);
if (ret < 0)
- dev_warn(iod->dev, "couldn't update vccio0 ctrl\n");
+ dev_warn(iod->dev, "couldn't update vccio6 ctrl\n");
}
static void rk3288_iodomain_init(struct rockchip_iodomain *iod)
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index c721939767eb..513efe8e7628 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -26,7 +26,7 @@ config POWER_RESET_AT91_POWEROFF
config POWER_RESET_AT91_RESET
tristate "Atmel AT91 reset driver"
depends on ARCH_AT91
- default SOC_AT91SAM9 || SOC_SAMA5
+ default SOC_AT91SAM9 || SOC_SAM9X60 || SOC_SAMA5
help
This driver supports restart for Atmel AT91SAM9 and SAMA5
SoCs
@@ -34,7 +34,7 @@ config POWER_RESET_AT91_RESET
config POWER_RESET_AT91_SAMA5D2_SHDWC
tristate "Atmel AT91 SAMA5D2-Compatible shutdown controller driver"
depends on ARCH_AT91
- default SOC_SAMA5
+ default SOC_SAM9X60 || SOC_SAMA5
help
This driver supports the alternate shutdown controller for some Atmel
SAMA5 SoCs. It is present for example on SAMA5D2 SoC.
@@ -141,14 +141,14 @@ config POWER_RESET_LTC2952
down via the LTC2952. Bindings are made in the device tree.
config POWER_RESET_MT6323
- bool "MediaTek MT6323 power-off driver"
- depends on MFD_MT6397
- help
- The power-off driver is responsible for externally shutdown down
- the power of a remote MediaTek SoC MT6323 is connected to through
- controlling a tiny circuit BBPU inside MT6323 RTC.
-
- Say Y if you have a board where MT6323 could be found.
+ bool "MediaTek MT6323 power-off driver"
+ depends on MFD_MT6397
+ help
+ The power-off driver is responsible for externally shutdown down
+ the power of a remote MediaTek SoC MT6323 is connected to through
+ controlling a tiny circuit BBPU inside MT6323 RTC.
+
+ Say Y if you have a board where MT6323 could be found.
config POWER_RESET_QNAP
bool "QNAP power-off driver"
diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
index 1c18f465a245..2fe3a627cb53 100644
--- a/drivers/power/reset/at91-sama5d2_shdwc.c
+++ b/drivers/power/reset/at91-sama5d2_shdwc.c
@@ -66,7 +66,7 @@
#define SHDW_CFG_NOT_USED (32)
-struct shdwc_config {
+struct shdwc_reg_config {
u8 wkup_pin_input;
u8 mr_rtcwk_shift;
u8 mr_rttwk_shift;
@@ -74,8 +74,17 @@ struct shdwc_config {
u8 sr_rttwk_shift;
};
+struct pmc_reg_config {
+ u8 mckr;
+};
+
+struct reg_config {
+ struct shdwc_reg_config shdwc;
+ struct pmc_reg_config pmc;
+};
+
struct shdwc {
- const struct shdwc_config *cfg;
+ const struct reg_config *rcfg;
struct clk *sclk;
void __iomem *shdwc_base;
void __iomem *mpddrc_base;
@@ -95,6 +104,7 @@ static const unsigned long long sdwc_dbc_period[] = {
static void __init at91_wakeup_status(struct platform_device *pdev)
{
struct shdwc *shdw = platform_get_drvdata(pdev);
+ const struct reg_config *rcfg = shdw->rcfg;
u32 reg;
char *reason = "unknown";
@@ -106,11 +116,11 @@ static void __init at91_wakeup_status(struct platform_device *pdev)
if (!reg)
return;
- if (SHDW_WK_PIN(reg, shdw->cfg))
+ if (SHDW_WK_PIN(reg, &rcfg->shdwc))
reason = "WKUP pin";
- else if (SHDW_RTCWK(reg, shdw->cfg))
+ else if (SHDW_RTCWK(reg, &rcfg->shdwc))
reason = "RTC";
- else if (SHDW_RTTWK(reg, shdw->cfg))
+ else if (SHDW_RTTWK(reg, &rcfg->shdwc))
reason = "RTT";
pr_info("AT91: Wake-Up source: %s\n", reason);
@@ -131,9 +141,9 @@ static void at91_poweroff(void)
" str %1, [%0, #" __stringify(AT91_DDRSDRC_LPR) "]\n\t"
/* Switch the master clock source to slow clock. */
- "1: ldr r6, [%4, #" __stringify(AT91_PMC_MCKR) "]\n\t"
+ "1: ldr r6, [%4, %5]\n\t"
" bic r6, r6, #" __stringify(AT91_PMC_CSS) "\n\t"
- " str r6, [%4, #" __stringify(AT91_PMC_MCKR) "]\n\t"
+ " str r6, [%4, %5]\n\t"
/* Wait for clock switch. */
"2: ldr r6, [%4, #" __stringify(AT91_PMC_SR) "]\n\t"
" tst r6, #" __stringify(AT91_PMC_MCKRDY) "\n\t"
@@ -148,7 +158,8 @@ static void at91_poweroff(void)
"r" cpu_to_le32(AT91_DDRSDRC_LPDDR2_PWOFF),
"r" (at91_shdwc->shdwc_base),
"r" cpu_to_le32(AT91_SHDW_KEY | AT91_SHDW_SHDW),
- "r" (at91_shdwc->pmc_base)
+ "r" (at91_shdwc->pmc_base),
+ "r" (at91_shdwc->rcfg->pmc.mckr)
: "r6");
}
@@ -215,6 +226,7 @@ static u32 at91_shdwc_get_wakeup_input(struct platform_device *pdev,
static void at91_shdwc_dt_configure(struct platform_device *pdev)
{
struct shdwc *shdw = platform_get_drvdata(pdev);
+ const struct reg_config *rcfg = shdw->rcfg;
struct device_node *np = pdev->dev.of_node;
u32 mode = 0, tmp, input;
@@ -227,10 +239,10 @@ static void at91_shdwc_dt_configure(struct platform_device *pdev)
mode |= AT91_SHDW_WKUPDBC(at91_shdwc_debouncer_value(pdev, tmp));
if (of_property_read_bool(np, "atmel,wakeup-rtc-timer"))
- mode |= SHDW_RTCWKEN(shdw->cfg);
+ mode |= SHDW_RTCWKEN(&rcfg->shdwc);
if (of_property_read_bool(np, "atmel,wakeup-rtt-timer"))
- mode |= SHDW_RTTWKEN(shdw->cfg);
+ mode |= SHDW_RTTWKEN(&rcfg->shdwc);
dev_dbg(&pdev->dev, "%s: mode = %#x\n", __func__, mode);
writel(mode, shdw->shdwc_base + AT91_SHDW_MR);
@@ -239,30 +251,40 @@ static void at91_shdwc_dt_configure(struct platform_device *pdev)
writel(input, shdw->shdwc_base + AT91_SHDW_WUIR);
}
-static const struct shdwc_config sama5d2_shdwc_config = {
- .wkup_pin_input = 0,
- .mr_rtcwk_shift = 17,
- .mr_rttwk_shift = SHDW_CFG_NOT_USED,
- .sr_rtcwk_shift = 5,
- .sr_rttwk_shift = SHDW_CFG_NOT_USED,
+static const struct reg_config sama5d2_reg_config = {
+ .shdwc = {
+ .wkup_pin_input = 0,
+ .mr_rtcwk_shift = 17,
+ .mr_rttwk_shift = SHDW_CFG_NOT_USED,
+ .sr_rtcwk_shift = 5,
+ .sr_rttwk_shift = SHDW_CFG_NOT_USED,
+ },
+ .pmc = {
+ .mckr = 0x30,
+ },
};
-static const struct shdwc_config sam9x60_shdwc_config = {
- .wkup_pin_input = 0,
- .mr_rtcwk_shift = 17,
- .mr_rttwk_shift = 16,
- .sr_rtcwk_shift = 5,
- .sr_rttwk_shift = 4,
+static const struct reg_config sam9x60_reg_config = {
+ .shdwc = {
+ .wkup_pin_input = 0,
+ .mr_rtcwk_shift = 17,
+ .mr_rttwk_shift = 16,
+ .sr_rtcwk_shift = 5,
+ .sr_rttwk_shift = 4,
+ },
+ .pmc = {
+ .mckr = 0x28,
+ },
};
static const struct of_device_id at91_shdwc_of_match[] = {
{
.compatible = "atmel,sama5d2-shdwc",
- .data = &sama5d2_shdwc_config,
+ .data = &sama5d2_reg_config,
},
{
.compatible = "microchip,sam9x60-shdwc",
- .data = &sam9x60_shdwc_config,
+ .data = &sam9x60_reg_config,
}, {
/*sentinel*/
}
@@ -303,7 +325,7 @@ static int __init at91_shdwc_probe(struct platform_device *pdev)
}
match = of_match_node(at91_shdwc_of_match, pdev->dev.of_node);
- at91_shdwc->cfg = match->data;
+ at91_shdwc->rcfg = match->data;
at91_shdwc->sclk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(at91_shdwc->sclk))
diff --git a/drivers/power/reset/gpio-restart.c b/drivers/power/reset/gpio-restart.c
index 308ca9d9d276..5466eeea261c 100644
--- a/drivers/power/reset/gpio-restart.c
+++ b/drivers/power/reset/gpio-restart.c
@@ -64,9 +64,11 @@ static int gpio_restart_probe(struct platform_device *pdev)
gpio_restart->reset_gpio = devm_gpiod_get(&pdev->dev, NULL,
open_source ? GPIOD_IN : GPIOD_OUT_LOW);
- if (IS_ERR(gpio_restart->reset_gpio)) {
- dev_err(&pdev->dev, "Could not get reset GPIO\n");
- return PTR_ERR(gpio_restart->reset_gpio);
+ ret = PTR_ERR_OR_ZERO(gpio_restart->reset_gpio);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Could not get reset GPIO\n");
+ return ret;
}
gpio_restart->restart_handler.notifier_call = gpio_restart_notify;
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index 27164a1d3c7c..9a5591ab90d0 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -73,10 +73,10 @@ config WM831X_POWER
provided by Wolfson Microelectronics WM831x PMICs.
config WM8350_POWER
- tristate "WM8350 PMU support"
- depends on MFD_WM8350
- help
- Say Y here to enable support for the power management unit
+ tristate "WM8350 PMU support"
+ depends on MFD_WM8350
+ help
+ Say Y here to enable support for the power management unit
provided by the Wolfson Microelectronics WM8350 PMIC.
config TEST_POWER
@@ -209,16 +209,16 @@ config BATTERY_WM97XX
Say Y to enable support for battery measured by WM97xx aux port.
config BATTERY_SBS
- tristate "SBS Compliant gas gauge"
- depends on I2C
- help
+ tristate "SBS Compliant gas gauge"
+ depends on I2C
+ help
Say Y to include support for SBS battery driver for SBS-compliant
gas gauges.
config CHARGER_SBS
- tristate "SBS Compliant charger"
- depends on I2C
- help
+ tristate "SBS Compliant charger"
+ depends on I2C
+ help
Say Y to include support for SBS compliant battery chargers.
config MANAGER_SBS
@@ -484,11 +484,11 @@ config CHARGER_MANAGER
depends on REGULATOR
select EXTCON
help
- Say Y to enable charger-manager support, which allows multiple
- chargers attached to a battery and multiple batteries attached to a
- system. The charger-manager also can monitor charging status in
- runtime and in suspend-to-RAM by waking up the system periodically
- with help of suspend_again support.
+ Say Y to enable charger-manager support, which allows multiple
+ chargers attached to a battery and multiple batteries attached to a
+ system. The charger-manager also can monitor charging status in
+ runtime and in suspend-to-RAM by waking up the system periodically
+ with help of suspend_again support.
config CHARGER_LT3651
tristate "Analog Devices LT3651 charger"
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index 8a0f9d769690..f69550d64f09 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -789,7 +789,7 @@ static int ab8500_charger_max_usb_curr(struct ab8500_charger *di,
di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P05;
ret = -ENXIO;
break;
- };
+ }
di->max_usb_in_curr.set_max = di->max_usb_in_curr.usb_type_max;
dev_dbg(di->dev, "USB Type - 0x%02x MaxCurr: %d",
@@ -1079,7 +1079,7 @@ static int ab8500_charger_get_usb_cur(struct ab8500_charger *di)
di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P05;
ret = -EPERM;
break;
- };
+ }
di->max_usb_in_curr.set_max = di->max_usb_in_curr.usb_type_max;
return ret;
}
@@ -2427,7 +2427,7 @@ static void ab8500_charger_usb_state_changed_work(struct work_struct *work)
default:
break;
- };
+ }
}
/**
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index c3912ee9eb99..b96f90a82ecf 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -2221,10 +2221,10 @@ static int ab8500_fg_get_ext_psy_data(struct device *dev, void *data)
ab8500_fg_update_cap_scalers(di);
queue_work(di->fg_wq, &di->fg_work);
break;
- };
+ }
default:
break;
- };
+ }
break;
case POWER_SUPPLY_PROP_TECHNOLOGY:
switch (ext->desc->type) {
@@ -2331,7 +2331,7 @@ static int ab8500_fg_init_hw_registers(struct ab8500_fg *di)
if (ret) {
dev_err(di->dev, "%s write failed AB8505_RTC_PCUT_MAX_TIME_REG\n", __func__);
goto out;
- };
+ }
ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
AB8505_RTC_PCUT_FLAG_TIME_REG, di->bm->fg_params->pcut_flag_time);
@@ -2339,7 +2339,7 @@ static int ab8500_fg_init_hw_registers(struct ab8500_fg *di)
if (ret) {
dev_err(di->dev, "%s write failed AB8505_RTC_PCUT_FLAG_TIME_REG\n", __func__);
goto out;
- };
+ }
ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
AB8505_RTC_PCUT_RESTART_REG, di->bm->fg_params->pcut_max_restart);
@@ -2347,7 +2347,7 @@ static int ab8500_fg_init_hw_registers(struct ab8500_fg *di)
if (ret) {
dev_err(di->dev, "%s write failed AB8505_RTC_PCUT_RESTART_REG\n", __func__);
goto out;
- };
+ }
ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
AB8505_RTC_PCUT_DEBOUNCE_REG, di->bm->fg_params->pcut_debounce_time);
@@ -2355,7 +2355,7 @@ static int ab8500_fg_init_hw_registers(struct ab8500_fg *di)
if (ret) {
dev_err(di->dev, "%s write failed AB8505_RTC_PCUT_DEBOUNCE_REG\n", __func__);
goto out;
- };
+ }
ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
AB8505_RTC_PCUT_CTL_STATUS_REG, di->bm->fg_params->pcut_enable);
@@ -2363,7 +2363,7 @@ static int ab8500_fg_init_hw_registers(struct ab8500_fg *di)
if (ret) {
dev_err(di->dev, "%s write failed AB8505_RTC_PCUT_CTL_STATUS_REG\n", __func__);
goto out;
- };
+ }
}
out:
return ret;
diff --git a/drivers/power/supply/abx500_chargalg.c b/drivers/power/supply/abx500_chargalg.c
index e6e37d4f20e4..2fb33a07879a 100644
--- a/drivers/power/supply/abx500_chargalg.c
+++ b/drivers/power/supply/abx500_chargalg.c
@@ -1823,7 +1823,7 @@ static ssize_t abx500_chargalg_en_store(struct abx500_chargalg *di,
"Enter 0. Disable AC/USB Charging\n"
"1. Enable AC charging\n"
"2. Enable USB Charging\n");
- };
+ }
return strlen(buf);
}
diff --git a/drivers/power/supply/axp20x_ac_power.c b/drivers/power/supply/axp20x_ac_power.c
index 0d34a932b6d5..ac360016b08a 100644
--- a/drivers/power/supply/axp20x_ac_power.c
+++ b/drivers/power/supply/axp20x_ac_power.c
@@ -15,6 +15,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/power_supply.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -23,6 +24,9 @@
#define AXP20X_PWR_STATUS_ACIN_PRESENT BIT(7)
#define AXP20X_PWR_STATUS_ACIN_AVAIL BIT(6)
+#define AXP813_ACIN_PATH_SEL BIT(7)
+#define AXP813_ACIN_PATH_SEL_TO_BIT(x) (!!(x) << 7)
+
#define AXP813_VHOLD_MASK GENMASK(5, 3)
#define AXP813_VHOLD_UV_TO_BIT(x) ((((x) / 100000) - 40) << 3)
#define AXP813_VHOLD_REG_TO_UV(x) \
@@ -40,6 +44,9 @@ struct axp20x_ac_power {
struct power_supply *supply;
struct iio_channel *acin_v;
struct iio_channel *acin_i;
+ bool has_acin_path_sel;
+ unsigned int num_irqs;
+ unsigned int irqs[];
};
static irqreturn_t axp20x_ac_power_irq(int irq, void *devid)
@@ -86,6 +93,17 @@ static int axp20x_ac_power_get_property(struct power_supply *psy,
return ret;
val->intval = !!(reg & AXP20X_PWR_STATUS_ACIN_AVAIL);
+
+ /* ACIN_PATH_SEL disables ACIN even if ACIN_AVAIL is set. */
+ if (val->intval && power->has_acin_path_sel) {
+ ret = regmap_read(power->regmap, AXP813_ACIN_PATH_CTRL,
+ &reg);
+ if (ret)
+ return ret;
+
+ val->intval = !!(reg & AXP813_ACIN_PATH_SEL);
+ }
+
return 0;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
@@ -143,6 +161,11 @@ static int axp813_ac_power_set_property(struct power_supply *psy,
struct axp20x_ac_power *power = power_supply_get_drvdata(psy);
switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ return regmap_update_bits(power->regmap, AXP813_ACIN_PATH_CTRL,
+ AXP813_ACIN_PATH_SEL,
+ AXP813_ACIN_PATH_SEL_TO_BIT(val->intval));
+
case POWER_SUPPLY_PROP_VOLTAGE_MIN:
if (val->intval < 4000000 || val->intval > 4700000)
return -EINVAL;
@@ -169,7 +192,8 @@ static int axp813_ac_power_set_property(struct power_supply *psy,
static int axp813_ac_power_prop_writeable(struct power_supply *psy,
enum power_supply_property psp)
{
- return psp == POWER_SUPPLY_PROP_VOLTAGE_MIN ||
+ return psp == POWER_SUPPLY_PROP_ONLINE ||
+ psp == POWER_SUPPLY_PROP_VOLTAGE_MIN ||
psp == POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT;
}
@@ -221,34 +245,86 @@ static const struct power_supply_desc axp813_ac_power_desc = {
.set_property = axp813_ac_power_set_property,
};
+static const char * const axp20x_irq_names[] = {
+ "ACIN_PLUGIN",
+ "ACIN_REMOVAL",
+};
+
struct axp_data {
const struct power_supply_desc *power_desc;
+ const char * const *irq_names;
+ unsigned int num_irq_names;
bool acin_adc;
+ bool acin_path_sel;
};
static const struct axp_data axp20x_data = {
- .power_desc = &axp20x_ac_power_desc,
- .acin_adc = true,
+ .power_desc = &axp20x_ac_power_desc,
+ .irq_names = axp20x_irq_names,
+ .num_irq_names = ARRAY_SIZE(axp20x_irq_names),
+ .acin_adc = true,
+ .acin_path_sel = false,
};
static const struct axp_data axp22x_data = {
- .power_desc = &axp22x_ac_power_desc,
- .acin_adc = false,
+ .power_desc = &axp22x_ac_power_desc,
+ .irq_names = axp20x_irq_names,
+ .num_irq_names = ARRAY_SIZE(axp20x_irq_names),
+ .acin_adc = false,
+ .acin_path_sel = false,
};
static const struct axp_data axp813_data = {
- .power_desc = &axp813_ac_power_desc,
- .acin_adc = false,
+ .power_desc = &axp813_ac_power_desc,
+ .irq_names = axp20x_irq_names,
+ .num_irq_names = ARRAY_SIZE(axp20x_irq_names),
+ .acin_adc = false,
+ .acin_path_sel = true,
};
+#ifdef CONFIG_PM_SLEEP
+static int axp20x_ac_power_suspend(struct device *dev)
+{
+ struct axp20x_ac_power *power = dev_get_drvdata(dev);
+ int i = 0;
+
+ /*
+ * Allow wake via ACIN_PLUGIN only.
+ *
+ * As nested threaded IRQs are not automatically disabled during
+ * suspend, we must explicitly disable the remainder of the IRQs.
+ */
+ if (device_may_wakeup(&power->supply->dev))
+ enable_irq_wake(power->irqs[i++]);
+ while (i < power->num_irqs)
+ disable_irq(power->irqs[i++]);
+
+ return 0;
+}
+
+static int axp20x_ac_power_resume(struct device *dev)
+{
+ struct axp20x_ac_power *power = dev_get_drvdata(dev);
+ int i = 0;
+
+ if (device_may_wakeup(&power->supply->dev))
+ disable_irq_wake(power->irqs[i++]);
+ while (i < power->num_irqs)
+ enable_irq(power->irqs[i++]);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(axp20x_ac_power_pm_ops, axp20x_ac_power_suspend,
+ axp20x_ac_power_resume);
+
static int axp20x_ac_power_probe(struct platform_device *pdev)
{
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
struct power_supply_config psy_cfg = {};
struct axp20x_ac_power *power;
const struct axp_data *axp_data;
- static const char * const irq_names[] = { "ACIN_PLUGIN", "ACIN_REMOVAL",
- NULL };
int i, irq, ret;
if (!of_device_is_available(pdev->dev.of_node))
@@ -259,12 +335,14 @@ static int axp20x_ac_power_probe(struct platform_device *pdev)
return -EINVAL;
}
- power = devm_kzalloc(&pdev->dev, sizeof(*power), GFP_KERNEL);
+ axp_data = of_device_get_match_data(&pdev->dev);
+
+ power = devm_kzalloc(&pdev->dev,
+ struct_size(power, irqs, axp_data->num_irq_names),
+ GFP_KERNEL);
if (!power)
return -ENOMEM;
- axp_data = of_device_get_match_data(&pdev->dev);
-
if (axp_data->acin_adc) {
power->acin_v = devm_iio_channel_get(&pdev->dev, "acin_v");
if (IS_ERR(power->acin_v)) {
@@ -282,6 +360,8 @@ static int axp20x_ac_power_probe(struct platform_device *pdev)
}
power->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ power->has_acin_path_sel = axp_data->acin_path_sel;
+ power->num_irqs = axp_data->num_irq_names;
platform_set_drvdata(pdev, power);
@@ -295,20 +375,22 @@ static int axp20x_ac_power_probe(struct platform_device *pdev)
return PTR_ERR(power->supply);
/* Request irqs after registering, as irqs may trigger immediately */
- for (i = 0; irq_names[i]; i++) {
- irq = platform_get_irq_byname(pdev, irq_names[i]);
+ for (i = 0; i < axp_data->num_irq_names; i++) {
+ irq = platform_get_irq_byname(pdev, axp_data->irq_names[i]);
if (irq < 0) {
- dev_warn(&pdev->dev, "No IRQ for %s: %d\n",
- irq_names[i], irq);
- continue;
+ dev_err(&pdev->dev, "No IRQ for %s: %d\n",
+ axp_data->irq_names[i], irq);
+ return irq;
}
- irq = regmap_irq_get_virq(axp20x->regmap_irqc, irq);
- ret = devm_request_any_context_irq(&pdev->dev, irq,
+ power->irqs[i] = regmap_irq_get_virq(axp20x->regmap_irqc, irq);
+ ret = devm_request_any_context_irq(&pdev->dev, power->irqs[i],
axp20x_ac_power_irq, 0,
DRVNAME, power);
- if (ret < 0)
- dev_warn(&pdev->dev, "Error requesting %s IRQ: %d\n",
- irq_names[i], ret);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Error requesting %s IRQ: %d\n",
+ axp_data->irq_names[i], ret);
+ return ret;
+ }
}
return 0;
@@ -331,8 +413,9 @@ MODULE_DEVICE_TABLE(of, axp20x_ac_power_match);
static struct platform_driver axp20x_ac_power_driver = {
.probe = axp20x_ac_power_probe,
.driver = {
- .name = DRVNAME,
- .of_match_table = axp20x_ac_power_match,
+ .name = DRVNAME,
+ .of_match_table = axp20x_ac_power_match,
+ .pm = &axp20x_ac_power_pm_ops,
},
};
diff --git a/drivers/power/supply/axp20x_usb_power.c b/drivers/power/supply/axp20x_usb_power.c
index 5f0a5722b19e..4fde24b5f35a 100644
--- a/drivers/power/supply/axp20x_usb_power.c
+++ b/drivers/power/supply/axp20x_usb_power.c
@@ -16,6 +16,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/power_supply.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -29,6 +30,9 @@
#define AXP20X_USB_STATUS_VBUS_VALID BIT(2)
+#define AXP20X_VBUS_PATH_SEL BIT(7)
+#define AXP20X_VBUS_PATH_SEL_OFFSET 7
+
#define AXP20X_VBUS_VHOLD_uV(b) (4000000 + (((b) >> 3) & 7) * 100000)
#define AXP20X_VBUS_VHOLD_MASK GENMASK(5, 3)
#define AXP20X_VBUS_VHOLD_OFFSET 3
@@ -57,7 +61,6 @@
#define DEBOUNCE_TIME msecs_to_jiffies(50)
struct axp20x_usb_power {
- struct device_node *np;
struct regmap *regmap;
struct power_supply *supply;
enum axp20x_variants axp20x_id;
@@ -65,14 +68,32 @@ struct axp20x_usb_power {
struct iio_channel *vbus_i;
struct delayed_work vbus_detect;
unsigned int old_status;
+ unsigned int online;
+ unsigned int num_irqs;
+ unsigned int irqs[];
};
+static bool axp20x_usb_vbus_needs_polling(struct axp20x_usb_power *power)
+{
+ /*
+ * Polling is only necessary while VBUS is offline. While online, a
+ * present->absent transition implies an online->offline transition
+ * and will triger the VBUS_REMOVAL IRQ.
+ */
+ if (power->axp20x_id >= AXP221_ID && !power->online)
+ return true;
+
+ return false;
+}
+
static irqreturn_t axp20x_usb_power_irq(int irq, void *devid)
{
struct axp20x_usb_power *power = devid;
power_supply_changed(power->supply);
+ mod_delayed_work(system_wq, &power->vbus_detect, DEBOUNCE_TIME);
+
return IRQ_HANDLED;
}
@@ -92,17 +113,11 @@ static void axp20x_usb_power_poll_vbus(struct work_struct *work)
power_supply_changed(power->supply);
power->old_status = val;
+ power->online = val & AXP20X_PWR_STATUS_VBUS_USED;
out:
- mod_delayed_work(system_wq, &power->vbus_detect, DEBOUNCE_TIME);
-}
-
-static bool axp20x_usb_vbus_needs_polling(struct axp20x_usb_power *power)
-{
- if (power->axp20x_id >= AXP221_ID)
- return true;
-
- return false;
+ if (axp20x_usb_vbus_needs_polling(power))
+ mod_delayed_work(system_wq, &power->vbus_detect, DEBOUNCE_TIME);
}
static int axp20x_get_current_max(struct axp20x_usb_power *power, int *val)
@@ -264,6 +279,16 @@ static int axp20x_usb_power_get_property(struct power_supply *psy,
return 0;
}
+static int axp813_usb_power_set_online(struct axp20x_usb_power *power,
+ int intval)
+{
+ int val = !intval << AXP20X_VBUS_PATH_SEL_OFFSET;
+
+ return regmap_update_bits(power->regmap,
+ AXP20X_VBUS_IPSOUT_MGMT,
+ AXP20X_VBUS_PATH_SEL, val);
+}
+
static int axp20x_usb_power_set_voltage_min(struct axp20x_usb_power *power,
int intval)
{
@@ -345,6 +370,11 @@ static int axp20x_usb_power_set_property(struct power_supply *psy,
struct axp20x_usb_power *power = power_supply_get_drvdata(psy);
switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ if (power->axp20x_id != AXP813_ID)
+ return -EINVAL;
+ return axp813_usb_power_set_online(power, val->intval);
+
case POWER_SUPPLY_PROP_VOLTAGE_MIN:
return axp20x_usb_power_set_voltage_min(power, val->intval);
@@ -364,6 +394,18 @@ static int axp20x_usb_power_set_property(struct power_supply *psy,
static int axp20x_usb_power_prop_writeable(struct power_supply *psy,
enum power_supply_property psp)
{
+ struct axp20x_usb_power *power = power_supply_get_drvdata(psy);
+
+ /*
+ * The VBUS path select flag works differently on on AXP288 and newer:
+ * - On AXP20x and AXP22x, the flag enables VBUS (ignoring N_VBUSEN).
+ * - On AXP288 and AXP8xx, the flag disables VBUS (ignoring N_VBUSEN).
+ * We only expose the control on variants where it can be used to force
+ * the VBUS input offline.
+ */
+ if (psp == POWER_SUPPLY_PROP_ONLINE)
+ return power->axp20x_id == AXP813_ID;
+
return psp == POWER_SUPPLY_PROP_VOLTAGE_MIN ||
psp == POWER_SUPPLY_PROP_CURRENT_MAX;
}
@@ -406,6 +448,92 @@ static const struct power_supply_desc axp22x_usb_power_desc = {
.set_property = axp20x_usb_power_set_property,
};
+static const char * const axp20x_irq_names[] = {
+ "VBUS_PLUGIN",
+ "VBUS_REMOVAL",
+ "VBUS_VALID",
+ "VBUS_NOT_VALID",
+};
+
+static const char * const axp22x_irq_names[] = {
+ "VBUS_PLUGIN",
+ "VBUS_REMOVAL",
+};
+
+struct axp_data {
+ const struct power_supply_desc *power_desc;
+ const char * const *irq_names;
+ unsigned int num_irq_names;
+ enum axp20x_variants axp20x_id;
+};
+
+static const struct axp_data axp202_data = {
+ .power_desc = &axp20x_usb_power_desc,
+ .irq_names = axp20x_irq_names,
+ .num_irq_names = ARRAY_SIZE(axp20x_irq_names),
+ .axp20x_id = AXP202_ID,
+};
+
+static const struct axp_data axp221_data = {
+ .power_desc = &axp22x_usb_power_desc,
+ .irq_names = axp22x_irq_names,
+ .num_irq_names = ARRAY_SIZE(axp22x_irq_names),
+ .axp20x_id = AXP221_ID,
+};
+
+static const struct axp_data axp223_data = {
+ .power_desc = &axp22x_usb_power_desc,
+ .irq_names = axp22x_irq_names,
+ .num_irq_names = ARRAY_SIZE(axp22x_irq_names),
+ .axp20x_id = AXP223_ID,
+};
+
+static const struct axp_data axp813_data = {
+ .power_desc = &axp22x_usb_power_desc,
+ .irq_names = axp22x_irq_names,
+ .num_irq_names = ARRAY_SIZE(axp22x_irq_names),
+ .axp20x_id = AXP813_ID,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int axp20x_usb_power_suspend(struct device *dev)
+{
+ struct axp20x_usb_power *power = dev_get_drvdata(dev);
+ int i = 0;
+
+ /*
+ * Allow wake via VBUS_PLUGIN only.
+ *
+ * As nested threaded IRQs are not automatically disabled during
+ * suspend, we must explicitly disable the remainder of the IRQs.
+ */
+ if (device_may_wakeup(&power->supply->dev))
+ enable_irq_wake(power->irqs[i++]);
+ while (i < power->num_irqs)
+ disable_irq(power->irqs[i++]);
+
+ return 0;
+}
+
+static int axp20x_usb_power_resume(struct device *dev)
+{
+ struct axp20x_usb_power *power = dev_get_drvdata(dev);
+ int i = 0;
+
+ if (device_may_wakeup(&power->supply->dev))
+ disable_irq_wake(power->irqs[i++]);
+ while (i < power->num_irqs)
+ enable_irq(power->irqs[i++]);
+
+ mod_delayed_work(system_wq, &power->vbus_detect, DEBOUNCE_TIME);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(axp20x_usb_power_pm_ops, axp20x_usb_power_suspend,
+ axp20x_usb_power_resume);
+
static int configure_iio_channels(struct platform_device *pdev,
struct axp20x_usb_power *power)
{
@@ -441,12 +569,7 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
struct power_supply_config psy_cfg = {};
struct axp20x_usb_power *power;
- static const char * const axp20x_irq_names[] = { "VBUS_PLUGIN",
- "VBUS_REMOVAL", "VBUS_VALID", "VBUS_NOT_VALID", NULL };
- static const char * const axp22x_irq_names[] = {
- "VBUS_PLUGIN", "VBUS_REMOVAL", NULL };
- const char * const *irq_names;
- const struct power_supply_desc *usb_power_desc;
+ const struct axp_data *axp_data;
int i, irq, ret;
if (!of_device_is_available(pdev->dev.of_node))
@@ -457,16 +580,19 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
return -EINVAL;
}
- power = devm_kzalloc(&pdev->dev, sizeof(*power), GFP_KERNEL);
+ axp_data = of_device_get_match_data(&pdev->dev);
+
+ power = devm_kzalloc(&pdev->dev,
+ struct_size(power, irqs, axp_data->num_irq_names),
+ GFP_KERNEL);
if (!power)
return -ENOMEM;
platform_set_drvdata(pdev, power);
- power->axp20x_id = (enum axp20x_variants)of_device_get_match_data(
- &pdev->dev);
- power->np = pdev->dev.of_node;
+ power->axp20x_id = axp_data->axp20x_id;
power->regmap = axp20x->regmap;
+ power->num_irqs = axp_data->num_irq_names;
if (power->axp20x_id == AXP202_ID) {
/* Enable vbus valid checking */
@@ -483,18 +609,6 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
if (ret)
return ret;
-
- usb_power_desc = &axp20x_usb_power_desc;
- irq_names = axp20x_irq_names;
- } else if (power->axp20x_id == AXP221_ID ||
- power->axp20x_id == AXP223_ID ||
- power->axp20x_id == AXP813_ID) {
- usb_power_desc = &axp22x_usb_power_desc;
- irq_names = axp22x_irq_names;
- } else {
- dev_err(&pdev->dev, "Unsupported AXP variant: %ld\n",
- axp20x->variant);
- return -EINVAL;
}
if (power->axp20x_id == AXP813_ID) {
@@ -506,25 +620,29 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
psy_cfg.of_node = pdev->dev.of_node;
psy_cfg.drv_data = power;
- power->supply = devm_power_supply_register(&pdev->dev, usb_power_desc,
+ power->supply = devm_power_supply_register(&pdev->dev,
+ axp_data->power_desc,
&psy_cfg);
if (IS_ERR(power->supply))
return PTR_ERR(power->supply);
/* Request irqs after registering, as irqs may trigger immediately */
- for (i = 0; irq_names[i]; i++) {
- irq = platform_get_irq_byname(pdev, irq_names[i]);
+ for (i = 0; i < axp_data->num_irq_names; i++) {
+ irq = platform_get_irq_byname(pdev, axp_data->irq_names[i]);
if (irq < 0) {
- dev_warn(&pdev->dev, "No IRQ for %s: %d\n",
- irq_names[i], irq);
- continue;
+ dev_err(&pdev->dev, "No IRQ for %s: %d\n",
+ axp_data->irq_names[i], irq);
+ return irq;
+ }
+ power->irqs[i] = regmap_irq_get_virq(axp20x->regmap_irqc, irq);
+ ret = devm_request_any_context_irq(&pdev->dev, power->irqs[i],
+ axp20x_usb_power_irq, 0,
+ DRVNAME, power);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Error requesting %s IRQ: %d\n",
+ axp_data->irq_names[i], ret);
+ return ret;
}
- irq = regmap_irq_get_virq(axp20x->regmap_irqc, irq);
- ret = devm_request_any_context_irq(&pdev->dev, irq,
- axp20x_usb_power_irq, 0, DRVNAME, power);
- if (ret < 0)
- dev_warn(&pdev->dev, "Error requesting %s IRQ: %d\n",
- irq_names[i], ret);
}
INIT_DELAYED_WORK(&power->vbus_detect, axp20x_usb_power_poll_vbus);
@@ -546,16 +664,16 @@ static int axp20x_usb_power_remove(struct platform_device *pdev)
static const struct of_device_id axp20x_usb_power_match[] = {
{
.compatible = "x-powers,axp202-usb-power-supply",
- .data = (void *)AXP202_ID,
+ .data = &axp202_data,
}, {
.compatible = "x-powers,axp221-usb-power-supply",
- .data = (void *)AXP221_ID,
+ .data = &axp221_data,
}, {
.compatible = "x-powers,axp223-usb-power-supply",
- .data = (void *)AXP223_ID,
+ .data = &axp223_data,
}, {
.compatible = "x-powers,axp813-usb-power-supply",
- .data = (void *)AXP813_ID,
+ .data = &axp813_data,
}, { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, axp20x_usb_power_match);
@@ -564,8 +682,9 @@ static struct platform_driver axp20x_usb_power_driver = {
.probe = axp20x_usb_power_probe,
.remove = axp20x_usb_power_remove,
.driver = {
- .name = DRVNAME,
- .of_match_table = axp20x_usb_power_match,
+ .name = DRVNAME,
+ .of_match_table = axp20x_usb_power_match,
+ .pm = &axp20x_usb_power_pm_ops,
},
};
diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c
index 9d1ec8d677de..aebd1253dbc9 100644
--- a/drivers/power/supply/bq25890_charger.c
+++ b/drivers/power/supply/bq25890_charger.c
@@ -25,12 +25,20 @@
#define BQ25895_ID 7
#define BQ25896_ID 0
+enum bq25890_chip_version {
+ BQ25890,
+ BQ25892,
+ BQ25895,
+ BQ25896,
+};
+
enum bq25890_fields {
F_EN_HIZ, F_EN_ILIM, F_IILIM, /* Reg00 */
F_BHOT, F_BCOLD, F_VINDPM_OFS, /* Reg01 */
F_CONV_START, F_CONV_RATE, F_BOOSTF, F_ICO_EN,
F_HVDCP_EN, F_MAXC_EN, F_FORCE_DPM, F_AUTO_DPDM_EN, /* Reg02 */
- F_BAT_LOAD_EN, F_WD_RST, F_OTG_CFG, F_CHG_CFG, F_SYSVMIN, /* Reg03 */
+ F_BAT_LOAD_EN, F_WD_RST, F_OTG_CFG, F_CHG_CFG, F_SYSVMIN,
+ F_MIN_VBAT_SEL, /* Reg03 */
F_PUMPX_EN, F_ICHG, /* Reg04 */
F_IPRECHG, F_ITERM, /* Reg05 */
F_VREG, F_BATLOWV, F_VRECHG, /* Reg06 */
@@ -39,8 +47,9 @@ enum bq25890_fields {
F_BATCMP, F_VCLAMP, F_TREG, /* Reg08 */
F_FORCE_ICO, F_TMR2X_EN, F_BATFET_DIS, F_JEITA_VSET,
F_BATFET_DLY, F_BATFET_RST_EN, F_PUMPX_UP, F_PUMPX_DN, /* Reg09 */
- F_BOOSTV, F_BOOSTI, /* Reg0A */
- F_VBUS_STAT, F_CHG_STAT, F_PG_STAT, F_SDP_STAT, F_VSYS_STAT, /* Reg0B */
+ F_BOOSTV, F_PFM_OTG_DIS, F_BOOSTI, /* Reg0A */
+ F_VBUS_STAT, F_CHG_STAT, F_PG_STAT, F_SDP_STAT, F_0B_RSVD,
+ F_VSYS_STAT, /* Reg0B */
F_WD_FAULT, F_BOOST_FAULT, F_CHG_FAULT, F_BAT_FAULT,
F_NTC_FAULT, /* Reg0C */
F_FORCE_VINDPM, F_VINDPM, /* Reg0D */
@@ -91,7 +100,7 @@ struct bq25890_device {
struct regmap *rmap;
struct regmap_field *rmap_fields[F_MAX_FIELDS];
- int chip_id;
+ enum bq25890_chip_version chip_version;
struct bq25890_init_data init_data;
struct bq25890_state state;
@@ -111,8 +120,7 @@ static const struct regmap_access_table bq25890_writeable_regs = {
static const struct regmap_range bq25890_volatile_reg_ranges[] = {
regmap_reg_range(0x00, 0x00),
regmap_reg_range(0x09, 0x09),
- regmap_reg_range(0x0b, 0x0c),
- regmap_reg_range(0x0e, 0x14),
+ regmap_reg_range(0x0b, 0x14),
};
static const struct regmap_access_table bq25890_volatile_regs = {
@@ -155,7 +163,7 @@ static const struct reg_field bq25890_reg_fields[] = {
[F_OTG_CFG] = REG_FIELD(0x03, 5, 5),
[F_CHG_CFG] = REG_FIELD(0x03, 4, 4),
[F_SYSVMIN] = REG_FIELD(0x03, 1, 3),
- /* MIN_VBAT_SEL on BQ25896 */
+ [F_MIN_VBAT_SEL] = REG_FIELD(0x03, 0, 0), // BQ25896 only
/* REG04 */
[F_PUMPX_EN] = REG_FIELD(0x04, 7, 7),
[F_ICHG] = REG_FIELD(0x04, 0, 6),
@@ -188,8 +196,8 @@ static const struct reg_field bq25890_reg_fields[] = {
[F_PUMPX_DN] = REG_FIELD(0x09, 0, 0),
/* REG0A */
[F_BOOSTV] = REG_FIELD(0x0A, 4, 7),
- /* PFM_OTG_DIS 3 on BQ25896 */
[F_BOOSTI] = REG_FIELD(0x0A, 0, 2), // reserved on BQ25895
+ [F_PFM_OTG_DIS] = REG_FIELD(0x0A, 3, 3), // BQ25896 only
/* REG0B */
[F_VBUS_STAT] = REG_FIELD(0x0B, 5, 7),
[F_CHG_STAT] = REG_FIELD(0x0B, 3, 4),
@@ -275,6 +283,7 @@ static const union {
struct bq25890_lookup lt;
} bq25890_tables[] = {
/* range tables */
+ /* TODO: BQ25896 has max ICHG 3008 mA */
[TBL_ICHG] = { .rt = {0, 5056000, 64000} }, /* uA */
[TBL_ITERM] = { .rt = {64000, 1024000, 64000} }, /* uA */
[TBL_VREG] = { .rt = {3840000, 4608000, 16000} }, /* uV */
@@ -391,11 +400,13 @@ static int bq25890_power_supply_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_MODEL_NAME:
- if (bq->chip_id == BQ25890_ID)
+ if (bq->chip_version == BQ25890)
val->strval = "BQ25890";
- else if (bq->chip_id == BQ25895_ID)
+ else if (bq->chip_version == BQ25892)
+ val->strval = "BQ25892";
+ else if (bq->chip_version == BQ25895)
val->strval = "BQ25895";
- else if (bq->chip_id == BQ25896_ID)
+ else if (bq->chip_version == BQ25896)
val->strval = "BQ25896";
else
val->strval = "UNKNOWN";
@@ -741,6 +752,56 @@ static int bq25890_usb_notifier(struct notifier_block *nb, unsigned long val,
return NOTIFY_OK;
}
+static int bq25890_get_chip_version(struct bq25890_device *bq)
+{
+ int id, rev;
+
+ id = bq25890_field_read(bq, F_PN);
+ if (id < 0) {
+ dev_err(bq->dev, "Cannot read chip ID.\n");
+ return id;
+ }
+
+ rev = bq25890_field_read(bq, F_DEV_REV);
+ if (rev < 0) {
+ dev_err(bq->dev, "Cannot read chip revision.\n");
+ return rev;
+ }
+
+ switch (id) {
+ case BQ25890_ID:
+ bq->chip_version = BQ25890;
+ break;
+
+ /* BQ25892 and BQ25896 share same ID 0 */
+ case BQ25896_ID:
+ switch (rev) {
+ case 2:
+ bq->chip_version = BQ25896;
+ break;
+ case 1:
+ bq->chip_version = BQ25892;
+ break;
+ default:
+ dev_err(bq->dev,
+ "Unknown device revision %d, assume BQ25892\n",
+ rev);
+ bq->chip_version = BQ25892;
+ }
+ break;
+
+ case BQ25895_ID:
+ bq->chip_version = BQ25895;
+ break;
+
+ default:
+ dev_err(bq->dev, "Unknown chip ID %d\n", id);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static int bq25890_irq_probe(struct bq25890_device *bq)
{
struct gpio_desc *irq;
@@ -859,16 +920,10 @@ static int bq25890_probe(struct i2c_client *client,
i2c_set_clientdata(client, bq);
- bq->chip_id = bq25890_field_read(bq, F_PN);
- if (bq->chip_id < 0) {
- dev_err(dev, "Cannot read chip ID.\n");
- return bq->chip_id;
- }
-
- if ((bq->chip_id != BQ25890_ID) && (bq->chip_id != BQ25895_ID)
- && (bq->chip_id != BQ25896_ID)) {
- dev_err(dev, "Chip with ID=%d, not supported!\n", bq->chip_id);
- return -ENODEV;
+ ret = bq25890_get_chip_version(bq);
+ if (ret) {
+ dev_err(dev, "Cannot read chip ID or unknown chip.\n");
+ return ret;
}
if (!dev->platform_data) {
@@ -986,12 +1041,18 @@ static const struct dev_pm_ops bq25890_pm = {
static const struct i2c_device_id bq25890_i2c_ids[] = {
{ "bq25890", 0 },
+ { "bq25892", 0 },
+ { "bq25895", 0 },
+ { "bq25896", 0 },
{},
};
MODULE_DEVICE_TABLE(i2c, bq25890_i2c_ids);
static const struct of_device_id bq25890_of_match[] = {
{ .compatible = "ti,bq25890", },
+ { .compatible = "ti,bq25892", },
+ { .compatible = "ti,bq25895", },
+ { .compatible = "ti,bq25896", },
{ },
};
MODULE_DEVICE_TABLE(of, bq25890_of_match);
diff --git a/drivers/power/supply/cros_usbpd-charger.c b/drivers/power/supply/cros_usbpd-charger.c
index 6cc7c3910e09..30c3d37511c9 100644
--- a/drivers/power/supply/cros_usbpd-charger.c
+++ b/drivers/power/supply/cros_usbpd-charger.c
@@ -5,7 +5,6 @@
* Copyright (c) 2014 - 2018 Google, Inc
*/
-#include <linux/mfd/cros_ec.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
@@ -132,11 +131,8 @@ static int cros_usbpd_charger_get_num_ports(struct charger_data *charger)
ret = cros_usbpd_charger_ec_command(charger, 0,
EC_CMD_CHARGE_PORT_COUNT,
NULL, 0, &resp, sizeof(resp));
- if (ret < 0) {
- dev_err(charger->dev,
- "Unable to get the number of ports (err:0x%x)\n", ret);
+ if (ret < 0)
return ret;
- }
return resp.port_count;
}
@@ -148,11 +144,8 @@ static int cros_usbpd_charger_get_usbpd_num_ports(struct charger_data *charger)
ret = cros_usbpd_charger_ec_command(charger, 0, EC_CMD_USB_PD_PORTS,
NULL, 0, &resp, sizeof(resp));
- if (ret < 0) {
- dev_err(charger->dev,
- "Unable to get the number or ports (err:0x%x)\n", ret);
+ if (ret < 0)
return ret;
- }
return resp.num_ports;
}
diff --git a/drivers/power/supply/ingenic-battery.c b/drivers/power/supply/ingenic-battery.c
index 35816d4b3012..2748715c4c75 100644
--- a/drivers/power/supply/ingenic-battery.c
+++ b/drivers/power/supply/ingenic-battery.c
@@ -100,10 +100,17 @@ static int ingenic_battery_set_scale(struct ingenic_battery *bat)
return -EINVAL;
}
- return iio_write_channel_attribute(bat->channel,
- scale_raw[best_idx],
- scale_raw[best_idx + 1],
- IIO_CHAN_INFO_SCALE);
+ /* Only set scale if there is more than one (fractional) entry */
+ if (scale_len > 2) {
+ ret = iio_write_channel_attribute(bat->channel,
+ scale_raw[best_idx],
+ scale_raw[best_idx + 1],
+ IIO_CHAN_INFO_SCALE);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static enum power_supply_property ingenic_battery_properties[] = {
diff --git a/drivers/power/supply/ipaq_micro_battery.c b/drivers/power/supply/ipaq_micro_battery.c
index 03592ceaca88..192d9db0fb00 100644
--- a/drivers/power/supply/ipaq_micro_battery.c
+++ b/drivers/power/supply/ipaq_micro_battery.c
@@ -149,7 +149,7 @@ static int micro_batt_get_property(struct power_supply *b,
default:
val->intval = POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
break;
- };
+ }
break;
case POWER_SUPPLY_PROP_STATUS:
val->intval = get_status(b);
@@ -168,7 +168,7 @@ static int micro_batt_get_property(struct power_supply *b,
break;
default:
return -EINVAL;
- };
+ }
return 0;
}
@@ -185,7 +185,7 @@ static int micro_ac_get_property(struct power_supply *b,
break;
default:
return -EINVAL;
- };
+ }
return 0;
}
diff --git a/drivers/power/supply/ltc2941-battery-gauge.c b/drivers/power/supply/ltc2941-battery-gauge.c
index da49436176cd..30a9014b2f95 100644
--- a/drivers/power/supply/ltc2941-battery-gauge.c
+++ b/drivers/power/supply/ltc2941-battery-gauge.c
@@ -449,7 +449,7 @@ static int ltc294x_i2c_remove(struct i2c_client *client)
{
struct ltc294x_info *info = i2c_get_clientdata(client);
- cancel_delayed_work(&info->work);
+ cancel_delayed_work_sync(&info->work);
power_supply_unregister(info->supply);
return 0;
}
diff --git a/drivers/power/supply/max17040_battery.c b/drivers/power/supply/max17040_battery.c
index 62499018e68b..8a1f0ee493aa 100644
--- a/drivers/power/supply/max17040_battery.c
+++ b/drivers/power/supply/max17040_battery.c
@@ -13,6 +13,7 @@
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/delay.h>
+#include <linux/interrupt.h>
#include <linux/power_supply.h>
#include <linux/max17040_battery.h>
#include <linux/slab.h>
@@ -28,6 +29,9 @@
#define MAX17040_DELAY 1000
#define MAX17040_BATTERY_FULL 95
+#define MAX17040_ATHD_MASK 0xFFC0
+#define MAX17040_ATHD_DEFAULT_POWER_UP 4
+
struct max17040_chip {
struct i2c_client *client;
struct delayed_work work;
@@ -42,6 +46,8 @@ struct max17040_chip {
int soc;
/* State Of Charge */
int status;
+ /* Low alert threshold from 32% to 1% of the State of Charge */
+ u32 low_soc_alert;
};
static int max17040_get_property(struct power_supply *psy,
@@ -98,6 +104,21 @@ static void max17040_reset(struct i2c_client *client)
max17040_write_reg(client, MAX17040_CMD, 0x0054);
}
+static int max17040_set_low_soc_alert(struct i2c_client *client, u32 level)
+{
+ int ret;
+ u16 data;
+
+ level = 32 - level;
+ data = max17040_read_reg(client, MAX17040_RCOMP);
+ /* clear the alrt bit and set LSb 5 bits */
+ data &= MAX17040_ATHD_MASK;
+ data |= level;
+ ret = max17040_write_reg(client, MAX17040_RCOMP, data);
+
+ return ret;
+}
+
static void max17040_get_vcell(struct i2c_client *client)
{
struct max17040_chip *chip = i2c_get_clientdata(client);
@@ -160,21 +181,81 @@ static void max17040_get_status(struct i2c_client *client)
chip->status = POWER_SUPPLY_STATUS_FULL;
}
+static int max17040_get_of_data(struct max17040_chip *chip)
+{
+ struct device *dev = &chip->client->dev;
+
+ chip->low_soc_alert = MAX17040_ATHD_DEFAULT_POWER_UP;
+ device_property_read_u32(dev,
+ "maxim,alert-low-soc-level",
+ &chip->low_soc_alert);
+
+ if (chip->low_soc_alert <= 0 || chip->low_soc_alert >= 33)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void max17040_check_changes(struct i2c_client *client)
+{
+ max17040_get_vcell(client);
+ max17040_get_soc(client);
+ max17040_get_online(client);
+ max17040_get_status(client);
+}
+
static void max17040_work(struct work_struct *work)
{
struct max17040_chip *chip;
+ int last_soc, last_status;
chip = container_of(work, struct max17040_chip, work.work);
- max17040_get_vcell(chip->client);
- max17040_get_soc(chip->client);
- max17040_get_online(chip->client);
- max17040_get_status(chip->client);
+ /* store SOC and status to check changes */
+ last_soc = chip->soc;
+ last_status = chip->status;
+ max17040_check_changes(chip->client);
+
+ /* check changes and send uevent */
+ if (last_soc != chip->soc || last_status != chip->status)
+ power_supply_changed(chip->battery);
queue_delayed_work(system_power_efficient_wq, &chip->work,
MAX17040_DELAY);
}
+static irqreturn_t max17040_thread_handler(int id, void *dev)
+{
+ struct max17040_chip *chip = dev;
+ struct i2c_client *client = chip->client;
+
+ dev_warn(&client->dev, "IRQ: Alert battery low level");
+ /* read registers */
+ max17040_check_changes(chip->client);
+
+ /* send uevent */
+ power_supply_changed(chip->battery);
+
+ /* reset alert bit */
+ max17040_set_low_soc_alert(client, chip->low_soc_alert);
+
+ return IRQ_HANDLED;
+}
+
+static int max17040_enable_alert_irq(struct max17040_chip *chip)
+{
+ struct i2c_client *client = chip->client;
+ unsigned int flags;
+ int ret;
+
+ flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT;
+ ret = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+ max17040_thread_handler, flags,
+ chip->battery->desc->name, chip);
+
+ return ret;
+}
+
static enum power_supply_property max17040_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_ONLINE,
@@ -196,6 +277,7 @@ static int max17040_probe(struct i2c_client *client,
struct i2c_adapter *adapter = client->adapter;
struct power_supply_config psy_cfg = {};
struct max17040_chip *chip;
+ int ret;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
return -EIO;
@@ -206,6 +288,12 @@ static int max17040_probe(struct i2c_client *client,
chip->client = client;
chip->pdata = client->dev.platform_data;
+ ret = max17040_get_of_data(chip);
+ if (ret) {
+ dev_err(&client->dev,
+ "failed: low SOC alert OF data out of bounds\n");
+ return ret;
+ }
i2c_set_clientdata(client, chip);
psy_cfg.drv_data = chip;
@@ -220,6 +308,24 @@ static int max17040_probe(struct i2c_client *client,
max17040_reset(client);
max17040_get_version(client);
+ /* check interrupt */
+ if (client->irq && of_device_is_compatible(client->dev.of_node,
+ "maxim,max77836-battery")) {
+ ret = max17040_set_low_soc_alert(client, chip->low_soc_alert);
+ if (ret) {
+ dev_err(&client->dev,
+ "Failed to set low SOC alert: err %d\n", ret);
+ return ret;
+ }
+
+ ret = max17040_enable_alert_irq(chip);
+ if (ret) {
+ client->irq = 0;
+ dev_warn(&client->dev,
+ "Failed to get IRQ err %d\n", ret);
+ }
+ }
+
INIT_DEFERRABLE_WORK(&chip->work, max17040_work);
queue_delayed_work(system_power_efficient_wq, &chip->work,
MAX17040_DELAY);
@@ -244,6 +350,10 @@ static int max17040_suspend(struct device *dev)
struct max17040_chip *chip = i2c_get_clientdata(client);
cancel_delayed_work(&chip->work);
+
+ if (client->irq && device_may_wakeup(dev))
+ enable_irq_wake(client->irq);
+
return 0;
}
@@ -254,6 +364,10 @@ static int max17040_resume(struct device *dev)
queue_delayed_work(system_power_efficient_wq, &chip->work,
MAX17040_DELAY);
+
+ if (client->irq && device_may_wakeup(dev))
+ disable_irq_wake(client->irq);
+
return 0;
}
diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c
index 0dfad2cf13fe..69ec4295d55d 100644
--- a/drivers/power/supply/max17042_battery.c
+++ b/drivers/power/supply/max17042_battery.c
@@ -282,6 +282,8 @@ static int max17042_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
if (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17042)
ret = regmap_read(map, MAX17042_V_empty, &data);
+ else if (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17055)
+ ret = regmap_read(map, MAX17055_V_empty, &data);
else
ret = regmap_read(map, MAX17047_V_empty, &data);
if (ret < 0)
@@ -627,7 +629,8 @@ static void max17042_write_config_regs(struct max17042_chip *chip)
config->filter_cfg);
regmap_write(map, MAX17042_RelaxCFG, config->relax_cfg);
if (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17047 ||
- chip->chip_type == MAXIM_DEVICE_TYPE_MAX17050)
+ chip->chip_type == MAXIM_DEVICE_TYPE_MAX17050 ||
+ chip->chip_type == MAXIM_DEVICE_TYPE_MAX17055)
regmap_write(map, MAX17047_FullSOCThr,
config->full_soc_thresh);
}
@@ -758,6 +761,8 @@ static inline void max17042_override_por_values(struct max17042_chip *chip)
if (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17042)
max17042_override_por(map, MAX17042_V_empty, config->vempty);
+ if (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17055)
+ max17042_override_por(map, MAX17055_V_empty, config->vempty);
else
max17042_override_por(map, MAX17047_V_empty, config->vempty);
max17042_override_por(map, MAX17042_TempNom, config->temp_nom);
@@ -765,7 +770,10 @@ static inline void max17042_override_por_values(struct max17042_chip *chip)
max17042_override_por(map, MAX17042_FCTC, config->fctc);
max17042_override_por(map, MAX17042_RCOMP0, config->rcomp0);
max17042_override_por(map, MAX17042_TempCo, config->tcompc0);
- if (chip->chip_type) {
+ if (chip->chip_type &&
+ ((chip->chip_type == MAXIM_DEVICE_TYPE_MAX17042) ||
+ (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17047) ||
+ (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17050))) {
max17042_override_por(map, MAX17042_EmptyTempCo,
config->empty_tempco);
max17042_override_por(map, MAX17042_K_empty0,
@@ -929,7 +937,8 @@ max17042_get_default_pdata(struct max17042_chip *chip)
if (!pdata)
return pdata;
- if (chip->chip_type != MAXIM_DEVICE_TYPE_MAX17042) {
+ if ((chip->chip_type == MAXIM_DEVICE_TYPE_MAX17047) ||
+ (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17050)) {
pdata->init_data = max17047_default_pdata_init_regs;
pdata->num_init_data =
ARRAY_SIZE(max17047_default_pdata_init_regs);
@@ -1167,6 +1176,7 @@ static const struct of_device_id max17042_dt_match[] = {
{ .compatible = "maxim,max17042" },
{ .compatible = "maxim,max17047" },
{ .compatible = "maxim,max17050" },
+ { .compatible = "maxim,max17055" },
{ },
};
MODULE_DEVICE_TABLE(of, max17042_dt_match);
@@ -1176,6 +1186,7 @@ static const struct i2c_device_id max17042_id[] = {
{ "max17042", MAXIM_DEVICE_TYPE_MAX17042 },
{ "max17047", MAXIM_DEVICE_TYPE_MAX17047 },
{ "max17050", MAXIM_DEVICE_TYPE_MAX17050 },
+ { "max17055", MAXIM_DEVICE_TYPE_MAX17055 },
{ }
};
MODULE_DEVICE_TABLE(i2c, max17042_id);
diff --git a/drivers/power/supply/max77650-charger.c b/drivers/power/supply/max77650-charger.c
index 5f9477c5cf5a..d913428bedc0 100644
--- a/drivers/power/supply/max77650-charger.c
+++ b/drivers/power/supply/max77650-charger.c
@@ -354,9 +354,16 @@ static int max77650_charger_remove(struct platform_device *pdev)
return max77650_charger_disable(chg);
}
+static const struct of_device_id max77650_charger_of_match[] = {
+ { .compatible = "maxim,max77650-charger" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max77650_charger_of_match);
+
static struct platform_driver max77650_charger_driver = {
.driver = {
.name = "max77650-charger",
+ .of_match_table = max77650_charger_of_match,
},
.probe = max77650_charger_probe,
.remove = max77650_charger_remove,
diff --git a/drivers/power/supply/pda_power.c b/drivers/power/supply/pda_power.c
index 3ae5707d39fa..03a37fd6be27 100644
--- a/drivers/power/supply/pda_power.c
+++ b/drivers/power/supply/pda_power.c
@@ -429,6 +429,10 @@ wrongid:
static int pda_power_remove(struct platform_device *pdev)
{
+#if IS_ENABLED(CONFIG_USB_PHY)
+ if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier)
+ usb_unregister_notifier(transceiver, &otg_nb);
+#endif
if (pdata->is_usb_online && usb_irq)
free_irq(usb_irq->start, pda_psy_usb);
if (pdata->is_ac_online && ac_irq)
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index 5c36c430ce8b..1a9a9fae73d3 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -565,9 +565,11 @@ EXPORT_SYMBOL_GPL(devm_power_supply_get_by_phandle);
int power_supply_get_battery_info(struct power_supply *psy,
struct power_supply_battery_info *info)
{
+ struct power_supply_resistance_temp_table *resist_table;
struct device_node *battery_np;
const char *value;
int err, len, index;
+ const __be32 *list;
info->energy_full_design_uwh = -EINVAL;
info->charge_full_design_uah = -EINVAL;
@@ -578,6 +580,7 @@ int power_supply_get_battery_info(struct power_supply *psy,
info->constant_charge_current_max_ua = -EINVAL;
info->constant_charge_voltage_max_uv = -EINVAL;
info->factory_internal_resistance_uohm = -EINVAL;
+ info->resist_table = NULL;
for (index = 0; index < POWER_SUPPLY_OCV_TEMP_MAX; index++) {
info->ocv_table[index] = NULL;
@@ -644,7 +647,6 @@ int power_supply_get_battery_info(struct power_supply *psy,
for (index = 0; index < len; index++) {
struct power_supply_battery_ocv_table *table;
char *propname;
- const __be32 *list;
int i, tab_len, size;
propname = kasprintf(GFP_KERNEL, "ocv-capacity-table-%d", index);
@@ -677,6 +679,26 @@ int power_supply_get_battery_info(struct power_supply *psy,
}
}
+ list = of_get_property(battery_np, "resistance-temp-table", &len);
+ if (!list || !len)
+ goto out_put_node;
+
+ info->resist_table_size = len / (2 * sizeof(__be32));
+ resist_table = info->resist_table = devm_kcalloc(&psy->dev,
+ info->resist_table_size,
+ sizeof(*resist_table),
+ GFP_KERNEL);
+ if (!info->resist_table) {
+ power_supply_put_battery_info(psy, info);
+ err = -ENOMEM;
+ goto out_put_node;
+ }
+
+ for (index = 0; index < info->resist_table_size; index++) {
+ resist_table[index].temp = be32_to_cpu(*list++);
+ resist_table[index].resistance = be32_to_cpu(*list++);
+ }
+
out_put_node:
of_node_put(battery_np);
return err;
@@ -692,10 +714,53 @@ void power_supply_put_battery_info(struct power_supply *psy,
if (info->ocv_table[i])
devm_kfree(&psy->dev, info->ocv_table[i]);
}
+
+ if (info->resist_table)
+ devm_kfree(&psy->dev, info->resist_table);
}
EXPORT_SYMBOL_GPL(power_supply_put_battery_info);
/**
+ * power_supply_temp2resist_simple() - find the battery internal resistance
+ * percent
+ * @table: Pointer to battery resistance temperature table
+ * @table_len: The table length
+ * @ocv: Current temperature
+ *
+ * This helper function is used to look up battery internal resistance percent
+ * according to current temperature value from the resistance temperature table,
+ * and the table must be ordered descending. Then the actual battery internal
+ * resistance = the ideal battery internal resistance * percent / 100.
+ *
+ * Return: the battery internal resistance percent
+ */
+int power_supply_temp2resist_simple(struct power_supply_resistance_temp_table *table,
+ int table_len, int temp)
+{
+ int i, resist;
+
+ for (i = 0; i < table_len; i++)
+ if (temp > table[i].temp)
+ break;
+
+ if (i > 0 && i < table_len) {
+ int tmp;
+
+ tmp = (table[i - 1].resistance - table[i].resistance) *
+ (temp - table[i].temp);
+ tmp /= table[i - 1].temp - table[i].temp;
+ resist = tmp + table[i].resistance;
+ } else if (i == 0) {
+ resist = table[0].resistance;
+ } else {
+ resist = table[table_len - 1].resistance;
+ }
+
+ return resist;
+}
+EXPORT_SYMBOL_GPL(power_supply_temp2resist_simple);
+
+/**
* power_supply_ocv2cap_simple() - find the battery capacity
* @table: Pointer to battery OCV lookup table
* @table_len: OCV table length
diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c
index f8d74e9f7931..6acd242eed48 100644
--- a/drivers/power/supply/sbs-battery.c
+++ b/drivers/power/supply/sbs-battery.c
@@ -5,6 +5,7 @@
* Copyright (c) 2010, NVIDIA Corporation.
*/
+#include <linux/bits.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
@@ -46,10 +47,10 @@ enum {
/* Battery Mode defines */
#define BATTERY_MODE_OFFSET 0x03
-#define BATTERY_MODE_MASK 0x8000
-enum sbs_battery_mode {
- BATTERY_MODE_AMPS = 0,
- BATTERY_MODE_WATTS = 0x8000
+#define BATTERY_MODE_CAPACITY_MASK BIT(15)
+enum sbs_capacity_mode {
+ CAPACITY_MODE_AMPS = 0,
+ CAPACITY_MODE_WATTS = BATTERY_MODE_CAPACITY_MASK
};
/* manufacturer access defines */
@@ -518,8 +519,8 @@ static void sbs_unit_adjustment(struct i2c_client *client,
}
}
-static enum sbs_battery_mode sbs_set_battery_mode(struct i2c_client *client,
- enum sbs_battery_mode mode)
+static enum sbs_capacity_mode sbs_set_capacity_mode(struct i2c_client *client,
+ enum sbs_capacity_mode mode)
{
int ret, original_val;
@@ -527,13 +528,13 @@ static enum sbs_battery_mode sbs_set_battery_mode(struct i2c_client *client,
if (original_val < 0)
return original_val;
- if ((original_val & BATTERY_MODE_MASK) == mode)
+ if ((original_val & BATTERY_MODE_CAPACITY_MASK) == mode)
return mode;
- if (mode == BATTERY_MODE_AMPS)
- ret = original_val & ~BATTERY_MODE_MASK;
+ if (mode == CAPACITY_MODE_AMPS)
+ ret = original_val & ~BATTERY_MODE_CAPACITY_MASK;
else
- ret = original_val | BATTERY_MODE_MASK;
+ ret = original_val | BATTERY_MODE_CAPACITY_MASK;
ret = sbs_write_word_data(client, BATTERY_MODE_OFFSET, ret);
if (ret < 0)
@@ -541,7 +542,7 @@ static enum sbs_battery_mode sbs_set_battery_mode(struct i2c_client *client,
usleep_range(1000, 2000);
- return original_val & BATTERY_MODE_MASK;
+ return original_val & BATTERY_MODE_CAPACITY_MASK;
}
static int sbs_get_battery_capacity(struct i2c_client *client,
@@ -549,13 +550,13 @@ static int sbs_get_battery_capacity(struct i2c_client *client,
union power_supply_propval *val)
{
s32 ret;
- enum sbs_battery_mode mode = BATTERY_MODE_WATTS;
+ enum sbs_capacity_mode mode = CAPACITY_MODE_WATTS;
if (power_supply_is_amp_property(psp))
- mode = BATTERY_MODE_AMPS;
+ mode = CAPACITY_MODE_AMPS;
- mode = sbs_set_battery_mode(client, mode);
- if (mode < 0)
+ mode = sbs_set_capacity_mode(client, mode);
+ if ((int)mode < 0)
return mode;
ret = sbs_read_word_data(client, sbs_data[reg_offset].addr);
@@ -564,7 +565,7 @@ static int sbs_get_battery_capacity(struct i2c_client *client,
val->intval = ret;
- ret = sbs_set_battery_mode(client, mode);
+ ret = sbs_set_capacity_mode(client, mode);
if (ret < 0)
return ret;
@@ -1001,6 +1002,6 @@ module_i2c_driver(sbs_battery_driver);
MODULE_DESCRIPTION("SBS battery monitor driver");
MODULE_LICENSE("GPL");
-module_param(force_load, bool, S_IRUSR | S_IRGRP | S_IROTH);
+module_param(force_load, bool, 0444);
MODULE_PARM_DESC(force_load,
"Attempt to load the driver even if no battery is connected");
diff --git a/drivers/power/supply/sc27xx_fuel_gauge.c b/drivers/power/supply/sc27xx_fuel_gauge.c
index bc8f5bda5762..469c83fdaa8e 100644
--- a/drivers/power/supply/sc27xx_fuel_gauge.c
+++ b/drivers/power/supply/sc27xx_fuel_gauge.c
@@ -62,6 +62,8 @@
#define SC27XX_FGU_CUR_BASIC_ADC 8192
#define SC27XX_FGU_SAMPLE_HZ 2
+/* micro Ohms */
+#define SC27XX_FGU_IDEAL_RESISTANCE 20000
/*
* struct sc27xx_fgu_data: describe the FGU device
@@ -81,9 +83,12 @@
* @max_volt: the maximum constant input voltage in millivolt
* @min_volt: the minimum drained battery voltage in microvolt
* @table_len: the capacity table length
+ * @resist_table_len: the resistance table length
* @cur_1000ma_adc: ADC value corresponding to 1000 mA
* @vol_1000mv_adc: ADC value corresponding to 1000 mV
+ * @calib_resist: the real resistance of coulomb counter chip in uOhm
* @cap_table: capacity table with corresponding ocv
+ * @resist_table: resistance percent table with corresponding temperature
*/
struct sc27xx_fgu_data {
struct regmap *regmap;
@@ -103,15 +108,19 @@ struct sc27xx_fgu_data {
int max_volt;
int min_volt;
int table_len;
+ int resist_table_len;
int cur_1000ma_adc;
int vol_1000mv_adc;
+ int calib_resist;
struct power_supply_battery_ocv_table *cap_table;
+ struct power_supply_resistance_temp_table *resist_table;
};
static int sc27xx_fgu_cap_to_clbcnt(struct sc27xx_fgu_data *data, int capacity);
static void sc27xx_fgu_capacity_calibration(struct sc27xx_fgu_data *data,
int cap, bool int_mode);
static void sc27xx_fgu_adjust_cap(struct sc27xx_fgu_data *data, int cap);
+static int sc27xx_fgu_get_temp(struct sc27xx_fgu_data *data, int *temp);
static const char * const sc27xx_charger_supply_name[] = {
"sc2731_charger",
@@ -434,7 +443,7 @@ static int sc27xx_fgu_get_current(struct sc27xx_fgu_data *data, int *val)
static int sc27xx_fgu_get_vbat_ocv(struct sc27xx_fgu_data *data, int *val)
{
- int vol, cur, ret;
+ int vol, cur, ret, temp, resistance;
ret = sc27xx_fgu_get_vbat_vol(data, &vol);
if (ret)
@@ -444,8 +453,19 @@ static int sc27xx_fgu_get_vbat_ocv(struct sc27xx_fgu_data *data, int *val)
if (ret)
return ret;
+ resistance = data->internal_resist;
+ if (data->resist_table_len > 0) {
+ ret = sc27xx_fgu_get_temp(data, &temp);
+ if (ret)
+ return ret;
+
+ resistance = power_supply_temp2resist_simple(data->resist_table,
+ data->resist_table_len, temp);
+ resistance = data->internal_resist * resistance / 100;
+ }
+
/* Return the battery OCV in micro volts. */
- *val = vol * 1000 - cur * data->internal_resist;
+ *val = vol * 1000 - cur * resistance;
return 0;
}
@@ -884,7 +904,9 @@ static int sc27xx_fgu_calibration(struct sc27xx_fgu_data *data)
*/
cal_4200mv = (calib_data & 0x1ff) + 6963 - 4096 - 256;
data->vol_1000mv_adc = DIV_ROUND_CLOSEST(cal_4200mv * 10, 42);
- data->cur_1000ma_adc = data->vol_1000mv_adc * 4;
+ data->cur_1000ma_adc =
+ DIV_ROUND_CLOSEST(data->vol_1000mv_adc * 4 * data->calib_resist,
+ SC27XX_FGU_IDEAL_RESISTANCE);
kfree(buf);
return 0;
@@ -929,6 +951,18 @@ static int sc27xx_fgu_hw_init(struct sc27xx_fgu_data *data)
if (!data->alarm_cap)
data->alarm_cap += 1;
+ data->resist_table_len = info.resist_table_size;
+ if (data->resist_table_len > 0) {
+ data->resist_table = devm_kmemdup(data->dev, info.resist_table,
+ data->resist_table_len *
+ sizeof(struct power_supply_resistance_temp_table),
+ GFP_KERNEL);
+ if (!data->resist_table) {
+ power_supply_put_battery_info(data->battery, &info);
+ return -ENOMEM;
+ }
+ }
+
power_supply_put_battery_info(data->battery, &info);
ret = sc27xx_fgu_calibration(data);
@@ -1051,6 +1085,15 @@ static int sc27xx_fgu_probe(struct platform_device *pdev)
return ret;
}
+ ret = device_property_read_u32(&pdev->dev,
+ "sprd,calib-resistance-micro-ohms",
+ &data->calib_resist);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to get fgu calibration resistance\n");
+ return ret;
+ }
+
data->channel = devm_iio_channel_get(dev, "bat-temp");
if (IS_ERR(data->channel)) {
dev_err(dev, "failed to get IIO channel\n");
diff --git a/drivers/power/supply/ucs1002_power.c b/drivers/power/supply/ucs1002_power.c
index 1b80ae479e7d..cdb9a23d825f 100644
--- a/drivers/power/supply/ucs1002_power.c
+++ b/drivers/power/supply/ucs1002_power.c
@@ -100,7 +100,9 @@ struct ucs1002_info {
struct i2c_client *client;
struct regmap *regmap;
struct regulator_desc *regulator_descriptor;
+ struct regulator_dev *rdev;
bool present;
+ bool output_disable;
};
static enum power_supply_property ucs1002_props[] = {
@@ -233,6 +235,11 @@ static int ucs1002_get_max_current(struct ucs1002_info *info,
unsigned int reg;
int ret;
+ if (info->output_disable) {
+ val->intval = 0;
+ return 0;
+ }
+
ret = regmap_read(info->regmap, UCS1002_REG_ILIMIT, &reg);
if (ret)
return ret;
@@ -247,6 +254,12 @@ static int ucs1002_set_max_current(struct ucs1002_info *info, u32 val)
unsigned int reg;
int ret, idx;
+ if (val == 0) {
+ info->output_disable = true;
+ regulator_disable_regmap(info->rdev);
+ return 0;
+ }
+
for (idx = 0; idx < ARRAY_SIZE(ucs1002_current_limit_uA); idx++) {
if (val == ucs1002_current_limit_uA[idx])
break;
@@ -270,6 +283,12 @@ static int ucs1002_set_max_current(struct ucs1002_info *info, u32 val)
if (reg != idx)
return -EINVAL;
+ info->output_disable = false;
+
+ if (info->rdev && info->rdev->use_count &&
+ !regulator_is_enabled_regmap(info->rdev))
+ regulator_enable_regmap(info->rdev);
+
return 0;
}
@@ -470,9 +489,24 @@ static irqreturn_t ucs1002_alert_irq(int irq, void *data)
return IRQ_HANDLED;
}
+static int ucs1002_regulator_enable(struct regulator_dev *rdev)
+{
+ struct ucs1002_info *info = rdev_get_drvdata(rdev);
+
+ /*
+ * If the output is disabled due to 0 maximum current, just pretend the
+ * enable did work. The regulator will be enabled as soon as we get a
+ * a non-zero maximum current budget.
+ */
+ if (info->output_disable)
+ return 0;
+
+ return regulator_enable_regmap(rdev);
+}
+
static const struct regulator_ops ucs1002_regulator_ops = {
.is_enabled = regulator_is_enabled_regmap,
- .enable = regulator_enable_regmap,
+ .enable = ucs1002_regulator_enable,
.disable = regulator_disable_regmap,
};
@@ -499,7 +533,6 @@ static int ucs1002_probe(struct i2c_client *client,
};
struct regulator_config regulator_config = {};
int irq_a_det, irq_alert, ret;
- struct regulator_dev *rdev;
struct ucs1002_info *info;
unsigned int regval;
@@ -589,10 +622,11 @@ static int ucs1002_probe(struct i2c_client *client,
regulator_config.dev = dev;
regulator_config.of_node = dev->of_node;
regulator_config.regmap = info->regmap;
+ regulator_config.driver_data = info;
- rdev = devm_regulator_register(dev, info->regulator_descriptor,
+ info->rdev = devm_regulator_register(dev, info->regulator_descriptor,
&regulator_config);
- ret = PTR_ERR_OR_ZERO(rdev);
+ ret = PTR_ERR_OR_ZERO(info->rdev);
if (ret) {
dev_err(dev, "Failed to register VBUS regulator: %d\n", ret);
return ret;
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index a67701ed93e8..73257cf107d9 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -980,6 +980,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
INTEL_CPU_FAM6(ICELAKE_D, rapl_defaults_hsw_server),
INTEL_CPU_FAM6(COMETLAKE_L, rapl_defaults_core),
INTEL_CPU_FAM6(COMETLAKE, rapl_defaults_core),
+ INTEL_CPU_FAM6(TIGERLAKE_L, rapl_defaults_core),
INTEL_CPU_FAM6(ATOM_SILVERMONT, rapl_defaults_byt),
INTEL_CPU_FAM6(ATOM_AIRMONT, rapl_defaults_cht),
@@ -989,6 +990,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, rapl_defaults_core),
INTEL_CPU_FAM6(ATOM_GOLDMONT_D, rapl_defaults_core),
INTEL_CPU_FAM6(ATOM_TREMONT_D, rapl_defaults_core),
+ INTEL_CPU_FAM6(ATOM_TREMONT_L, rapl_defaults_core),
INTEL_CPU_FAM6(XEON_PHI_KNL, rapl_defaults_hsw_server),
INTEL_CPU_FAM6(XEON_PHI_KNM, rapl_defaults_hsw_server),
@@ -1295,6 +1297,9 @@ struct rapl_package *rapl_add_package(int cpu, struct rapl_if_priv *priv)
struct cpuinfo_x86 *c = &cpu_data(cpu);
int ret;
+ if (!rapl_defaults)
+ return ERR_PTR(-ENODEV);
+
rp = kzalloc(sizeof(struct rapl_package), GFP_KERNEL);
if (!rp)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index b45d2b86d8ca..475c60dccaa4 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -56,20 +56,6 @@ config PTP_1588_CLOCK_QORIQ
To compile this driver as a module, choose M here: the module
will be called ptp-qoriq.
-config PTP_1588_CLOCK_IXP46X
- tristate "Intel IXP46x as PTP clock"
- depends on IXP4XX_ETH
- depends on PTP_1588_CLOCK
- default y
- help
- This driver adds support for using the IXP46X as a PTP
- clock. This clock is only useful if your PTP programs are
- getting hardware time stamps on the PTP Ethernet packets
- using the SO_TIMESTAMPING API.
-
- To compile this driver as a module, choose M here: the module
- will be called ptp_ixp46x.
-
comment "Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks."
depends on PHYLIB=n || NETWORK_PHY_TIMESTAMPING=n
@@ -89,6 +75,16 @@ config DP83640_PHY
In order for this to work, your MAC driver must also
implement the skb_tx_timestamp() function.
+config PTP_1588_CLOCK_INES
+ tristate "ZHAW InES PTP time stamping IP core"
+ depends on NETWORK_PHY_TIMESTAMPING
+ depends on PHYLIB
+ depends on PTP_1588_CLOCK
+ help
+ This driver adds support for using the ZHAW InES 1588 IP
+ core. This clock is only useful if the MII bus of your MAC
+ is wired up to the core.
+
config PTP_1588_CLOCK_PCH
tristate "Intel PCH EG20T as PTP clock"
depends on X86_32 || COMPILE_TEST
@@ -121,7 +117,7 @@ config PTP_1588_CLOCK_KVM
config PTP_1588_CLOCK_IDTCM
tristate "IDT CLOCKMATRIX as PTP clock"
- depends on PTP_1588_CLOCK
+ depends on PTP_1588_CLOCK && I2C
default n
help
This driver adds support for using IDT CLOCKMATRIX(TM) as a PTP
diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile
index 69a06f86a450..8c830336f178 100644
--- a/drivers/ptp/Makefile
+++ b/drivers/ptp/Makefile
@@ -6,10 +6,10 @@
ptp-y := ptp_clock.o ptp_chardev.o ptp_sysfs.o
obj-$(CONFIG_PTP_1588_CLOCK) += ptp.o
obj-$(CONFIG_PTP_1588_CLOCK_DTE) += ptp_dte.o
-obj-$(CONFIG_PTP_1588_CLOCK_IXP46X) += ptp_ixp46x.o
+obj-$(CONFIG_PTP_1588_CLOCK_INES) += ptp_ines.o
obj-$(CONFIG_PTP_1588_CLOCK_PCH) += ptp_pch.o
obj-$(CONFIG_PTP_1588_CLOCK_KVM) += ptp_kvm.o
obj-$(CONFIG_PTP_1588_CLOCK_QORIQ) += ptp-qoriq.o
ptp-qoriq-y += ptp_qoriq.o
ptp-qoriq-$(CONFIG_DEBUG_FS) += ptp_qoriq_debugfs.o
-obj-$(CONFIG_PTP_1588_CLOCK_IDTCM) += ptp_clockmatrix.o \ No newline at end of file
+obj-$(CONFIG_PTP_1588_CLOCK_IDTCM) += ptp_clockmatrix.o
diff --git a/drivers/ptp/idt8a340_reg.h b/drivers/ptp/idt8a340_reg.h
index 9263bc33b8f4..69eedda9f731 100644
--- a/drivers/ptp/idt8a340_reg.h
+++ b/drivers/ptp/idt8a340_reg.h
@@ -77,6 +77,8 @@
#define JTAG_DEVICE_ID 0x001c
#define PRODUCT_ID 0x001e
+#define OTP_SCSR_CONFIG_SELECT 0x0022
+
#define STATUS 0xc03c
#define USER_GPIO0_TO_7_STATUS 0x008a
#define USER_GPIO8_TO_15_STATUS 0x008b
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index e60eab7f8a61..ac1f2bf9e888 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -166,10 +166,11 @@ static struct posix_clock_operations ptp_clock_ops = {
.read = ptp_read,
};
-static void delete_ptp_clock(struct posix_clock *pc)
+static void ptp_clock_release(struct device *dev)
{
- struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
+ struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
+ ptp_cleanup_pin_groups(ptp);
mutex_destroy(&ptp->tsevq_mux);
mutex_destroy(&ptp->pincfg_mux);
ida_simple_remove(&ptp_clocks_map, ptp->index);
@@ -213,7 +214,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
}
ptp->clock.ops = ptp_clock_ops;
- ptp->clock.release = delete_ptp_clock;
ptp->info = info;
ptp->devid = MKDEV(major, index);
ptp->index = index;
@@ -236,15 +236,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
if (err)
goto no_pin_groups;
- /* Create a new device in our class. */
- ptp->dev = device_create_with_groups(ptp_class, parent, ptp->devid,
- ptp, ptp->pin_attr_groups,
- "ptp%d", ptp->index);
- if (IS_ERR(ptp->dev)) {
- err = PTR_ERR(ptp->dev);
- goto no_device;
- }
-
/* Register a new PPS source. */
if (info->pps) {
struct pps_source_info pps;
@@ -260,8 +251,18 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
}
}
- /* Create a posix clock. */
- err = posix_clock_register(&ptp->clock, ptp->devid);
+ /* Initialize a new device of our class in our clock structure. */
+ device_initialize(&ptp->dev);
+ ptp->dev.devt = ptp->devid;
+ ptp->dev.class = ptp_class;
+ ptp->dev.parent = parent;
+ ptp->dev.groups = ptp->pin_attr_groups;
+ ptp->dev.release = ptp_clock_release;
+ dev_set_drvdata(&ptp->dev, ptp);
+ dev_set_name(&ptp->dev, "ptp%d", ptp->index);
+
+ /* Create a posix clock and link it to the device. */
+ err = posix_clock_register(&ptp->clock, &ptp->dev);
if (err) {
pr_err("failed to create posix clock\n");
goto no_clock;
@@ -273,8 +274,6 @@ no_clock:
if (ptp->pps_source)
pps_unregister_source(ptp->pps_source);
no_pps:
- device_destroy(ptp_class, ptp->devid);
-no_device:
ptp_cleanup_pin_groups(ptp);
no_pin_groups:
if (ptp->kworker)
@@ -304,10 +303,8 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
if (ptp->pps_source)
pps_unregister_source(ptp->pps_source);
- device_destroy(ptp_class, ptp->devid);
- ptp_cleanup_pin_groups(ptp);
-
posix_clock_unregister(&ptp->clock);
+
return 0;
}
EXPORT_SYMBOL(ptp_clock_unregister);
@@ -371,6 +368,12 @@ int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay)
}
EXPORT_SYMBOL(ptp_schedule_worker);
+void ptp_cancel_worker_sync(struct ptp_clock *ptp)
+{
+ kthread_cancel_delayed_work_sync(&ptp->aux_work);
+}
+EXPORT_SYMBOL(ptp_cancel_worker_sync);
+
/* module operations */
static void __exit ptp_exit(void)
diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c
index a5110b7b4ece..032e112c3dd9 100644
--- a/drivers/ptp/ptp_clockmatrix.c
+++ b/drivers/ptp/ptp_clockmatrix.c
@@ -405,6 +405,7 @@ static int _idtcm_set_dpll_tod(struct idtcm_channel *channel,
if (wr_trig == HW_TOD_WR_TRIG_SEL_MSB) {
if (idtcm->calculate_overhead_flag) {
+ /* Assumption: I2C @ 400KHz */
total_overhead_ns = ktime_to_ns(ktime_get_raw()
- idtcm->start_time)
+ idtcm->tod_write_overhead_ns
@@ -596,44 +597,7 @@ static int idtcm_state_machine_reset(struct idtcm *idtcm)
static int idtcm_read_hw_rev_id(struct idtcm *idtcm, u8 *hw_rev_id)
{
- return idtcm_read(idtcm,
- GENERAL_STATUS,
- HW_REV_ID,
- hw_rev_id,
- sizeof(u8));
-}
-
-static int idtcm_read_bond_id(struct idtcm *idtcm, u8 *bond_id)
-{
- return idtcm_read(idtcm,
- GENERAL_STATUS,
- BOND_ID,
- bond_id,
- sizeof(u8));
-}
-
-static int idtcm_read_hw_csr_id(struct idtcm *idtcm, u16 *hw_csr_id)
-{
- int err;
- u8 buf[2] = {0};
-
- err = idtcm_read(idtcm, GENERAL_STATUS, HW_CSR_ID, buf, sizeof(buf));
-
- *hw_csr_id = (buf[1] << 8) | buf[0];
-
- return err;
-}
-
-static int idtcm_read_hw_irq_id(struct idtcm *idtcm, u16 *hw_irq_id)
-{
- int err;
- u8 buf[2] = {0};
-
- err = idtcm_read(idtcm, GENERAL_STATUS, HW_IRQ_ID, buf, sizeof(buf));
-
- *hw_irq_id = (buf[1] << 8) | buf[0];
-
- return err;
+ return idtcm_read(idtcm, HW_REVISION, REV_ID, hw_rev_id, sizeof(u8));
}
static int idtcm_read_product_id(struct idtcm *idtcm, u16 *product_id)
@@ -674,20 +638,11 @@ static int idtcm_read_hotfix_release(struct idtcm *idtcm, u8 *hotfix)
sizeof(u8));
}
-static int idtcm_read_pipeline(struct idtcm *idtcm, u32 *pipeline)
+static int idtcm_read_otp_scsr_config_select(struct idtcm *idtcm,
+ u8 *config_select)
{
- int err;
- u8 buf[4] = {0};
-
- err = idtcm_read(idtcm,
- GENERAL_STATUS,
- PIPELINE_ID,
- &buf[0],
- sizeof(buf));
-
- *pipeline = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
-
- return err;
+ return idtcm_read(idtcm, GENERAL_STATUS, OTP_SCSR_CONFIG_SELECT,
+ config_select, sizeof(u8));
}
static int process_pll_mask(struct idtcm *idtcm, u32 addr, u8 val, u8 *mask)
@@ -1078,31 +1033,25 @@ static void idtcm_display_version_info(struct idtcm *idtcm)
u8 major;
u8 minor;
u8 hotfix;
- u32 pipeline;
u16 product_id;
- u16 csr_id;
- u16 irq_id;
u8 hw_rev_id;
- u8 bond_id;
+ u8 config_select;
+ char *fmt = "%d.%d.%d, Id: 0x%04x HW Rev: %d OTP Config Select: %d\n";
idtcm_read_major_release(idtcm, &major);
idtcm_read_minor_release(idtcm, &minor);
idtcm_read_hotfix_release(idtcm, &hotfix);
- idtcm_read_pipeline(idtcm, &pipeline);
idtcm_read_product_id(idtcm, &product_id);
idtcm_read_hw_rev_id(idtcm, &hw_rev_id);
- idtcm_read_bond_id(idtcm, &bond_id);
- idtcm_read_hw_csr_id(idtcm, &csr_id);
- idtcm_read_hw_irq_id(idtcm, &irq_id);
-
- dev_info(&idtcm->client->dev, "Version: %d.%d.%d, Pipeline %u\t"
- "0x%04x, Rev %d, Bond %d, CSR %d, IRQ %d\n",
- major, minor, hotfix, pipeline,
- product_id, hw_rev_id, bond_id, csr_id, irq_id);
+
+ idtcm_read_otp_scsr_config_select(idtcm, &config_select);
+
+ dev_info(&idtcm->client->dev, fmt, major, minor, hotfix,
+ product_id, hw_rev_id, config_select);
}
-static struct ptp_clock_info idtcm_caps = {
+static const struct ptp_clock_info idtcm_caps = {
.owner = THIS_MODULE,
.max_adj = 244000,
.n_per_out = 1,
diff --git a/drivers/ptp/ptp_ines.c b/drivers/ptp/ptp_ines.c
new file mode 100644
index 000000000000..dfda54cbd866
--- /dev/null
+++ b/drivers/ptp/ptp_ines.c
@@ -0,0 +1,852 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2018 MOSER-BAER AG
+//
+
+#define pr_fmt(fmt) "InES_PTP: " fmt
+
+#include <linux/ethtool.h>
+#include <linux/export.h>
+#include <linux/if_vlan.h>
+#include <linux/mii_timestamper.h>
+#include <linux/module.h>
+#include <linux/net_tstamp.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/ptp_classify.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/stddef.h>
+
+MODULE_DESCRIPTION("Driver for the ZHAW InES PTP time stamping IP core");
+MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
+
+/* GLOBAL register */
+#define MCAST_MAC_SELECT_SHIFT 2
+#define MCAST_MAC_SELECT_MASK 0x3
+#define IO_RESET BIT(1)
+#define PTP_RESET BIT(0)
+
+/* VERSION register */
+#define IF_MAJOR_VER_SHIFT 12
+#define IF_MAJOR_VER_MASK 0xf
+#define IF_MINOR_VER_SHIFT 8
+#define IF_MINOR_VER_MASK 0xf
+#define FPGA_MAJOR_VER_SHIFT 4
+#define FPGA_MAJOR_VER_MASK 0xf
+#define FPGA_MINOR_VER_SHIFT 0
+#define FPGA_MINOR_VER_MASK 0xf
+
+/* INT_STAT register */
+#define RX_INTR_STATUS_3 BIT(5)
+#define RX_INTR_STATUS_2 BIT(4)
+#define RX_INTR_STATUS_1 BIT(3)
+#define TX_INTR_STATUS_3 BIT(2)
+#define TX_INTR_STATUS_2 BIT(1)
+#define TX_INTR_STATUS_1 BIT(0)
+
+/* INT_MSK register */
+#define RX_INTR_MASK_3 BIT(5)
+#define RX_INTR_MASK_2 BIT(4)
+#define RX_INTR_MASK_1 BIT(3)
+#define TX_INTR_MASK_3 BIT(2)
+#define TX_INTR_MASK_2 BIT(1)
+#define TX_INTR_MASK_1 BIT(0)
+
+/* BUF_STAT register */
+#define RX_FIFO_NE_3 BIT(5)
+#define RX_FIFO_NE_2 BIT(4)
+#define RX_FIFO_NE_1 BIT(3)
+#define TX_FIFO_NE_3 BIT(2)
+#define TX_FIFO_NE_2 BIT(1)
+#define TX_FIFO_NE_1 BIT(0)
+
+/* PORT_CONF register */
+#define CM_ONE_STEP BIT(6)
+#define PHY_SPEED_SHIFT 4
+#define PHY_SPEED_MASK 0x3
+#define P2P_DELAY_WR_POS_SHIFT 2
+#define P2P_DELAY_WR_POS_MASK 0x3
+#define PTP_MODE_SHIFT 0
+#define PTP_MODE_MASK 0x3
+
+/* TS_STAT_TX register */
+#define TS_ENABLE BIT(15)
+#define DATA_READ_POS_SHIFT 8
+#define DATA_READ_POS_MASK 0x1f
+#define DISCARDED_EVENTS_SHIFT 4
+#define DISCARDED_EVENTS_MASK 0xf
+
+#define INES_N_PORTS 3
+#define INES_REGISTER_SIZE 0x80
+#define INES_PORT_OFFSET 0x20
+#define INES_PORT_SIZE 0x20
+#define INES_FIFO_DEPTH 90
+#define INES_MAX_EVENTS 100
+
+#define BC_PTP_V1 0
+#define BC_PTP_V2 1
+#define TC_E2E_PTP_V2 2
+#define TC_P2P_PTP_V2 3
+
+#define OFF_PTP_CLOCK_ID 20
+#define OFF_PTP_PORT_NUM 28
+
+#define PHY_SPEED_10 0
+#define PHY_SPEED_100 1
+#define PHY_SPEED_1000 2
+
+#define PORT_CONF \
+ ((PHY_SPEED_1000 << PHY_SPEED_SHIFT) | (BC_PTP_V2 << PTP_MODE_SHIFT))
+
+#define ines_read32(s, r) __raw_readl((void __iomem *)&s->regs->r)
+#define ines_write32(s, v, r) __raw_writel(v, (void __iomem *)&s->regs->r)
+
+#define MESSAGE_TYPE_SYNC 1
+#define MESSAGE_TYPE_P_DELAY_REQ 2
+#define MESSAGE_TYPE_P_DELAY_RESP 3
+#define MESSAGE_TYPE_DELAY_REQ 4
+
+#define SYNC 0x0
+#define DELAY_REQ 0x1
+#define PDELAY_REQ 0x2
+#define PDELAY_RESP 0x3
+
+static LIST_HEAD(ines_clocks);
+static DEFINE_MUTEX(ines_clocks_lock);
+
+struct ines_global_regs {
+ u32 id;
+ u32 test;
+ u32 global;
+ u32 version;
+ u32 test2;
+ u32 int_stat;
+ u32 int_msk;
+ u32 buf_stat;
+};
+
+struct ines_port_registers {
+ u32 port_conf;
+ u32 p_delay;
+ u32 ts_stat_tx;
+ u32 ts_stat_rx;
+ u32 ts_tx;
+ u32 ts_rx;
+};
+
+struct ines_timestamp {
+ struct list_head list;
+ unsigned long tmo;
+ u16 tag;
+ u64 sec;
+ u64 nsec;
+ u64 clkid;
+ u16 portnum;
+ u16 seqid;
+};
+
+struct ines_port {
+ struct ines_port_registers *regs;
+ struct mii_timestamper mii_ts;
+ struct ines_clock *clock;
+ bool rxts_enabled;
+ bool txts_enabled;
+ unsigned int index;
+ struct delayed_work ts_work;
+ /* lock protects event list and tx_skb */
+ spinlock_t lock;
+ struct sk_buff *tx_skb;
+ struct list_head events;
+ struct list_head pool;
+ struct ines_timestamp pool_data[INES_MAX_EVENTS];
+};
+
+struct ines_clock {
+ struct ines_port port[INES_N_PORTS];
+ struct ines_global_regs __iomem *regs;
+ void __iomem *base;
+ struct device_node *node;
+ struct device *dev;
+ struct list_head list;
+};
+
+static bool ines_match(struct sk_buff *skb, unsigned int ptp_class,
+ struct ines_timestamp *ts, struct device *dev);
+static int ines_rxfifo_read(struct ines_port *port);
+static u64 ines_rxts64(struct ines_port *port, unsigned int words);
+static bool ines_timestamp_expired(struct ines_timestamp *ts);
+static u64 ines_txts64(struct ines_port *port, unsigned int words);
+static void ines_txtstamp_work(struct work_struct *work);
+static bool is_sync_pdelay_resp(struct sk_buff *skb, int type);
+static u8 tag_to_msgtype(u8 tag);
+
+static void ines_clock_cleanup(struct ines_clock *clock)
+{
+ struct ines_port *port;
+ int i;
+
+ for (i = 0; i < INES_N_PORTS; i++) {
+ port = &clock->port[i];
+ cancel_delayed_work_sync(&port->ts_work);
+ }
+}
+
+static int ines_clock_init(struct ines_clock *clock, struct device *device,
+ void __iomem *addr)
+{
+ struct device_node *node = device->of_node;
+ unsigned long port_addr;
+ struct ines_port *port;
+ int i, j;
+
+ INIT_LIST_HEAD(&clock->list);
+ clock->node = node;
+ clock->dev = device;
+ clock->base = addr;
+ clock->regs = clock->base;
+
+ for (i = 0; i < INES_N_PORTS; i++) {
+ port = &clock->port[i];
+ port_addr = (unsigned long) clock->base +
+ INES_PORT_OFFSET + i * INES_PORT_SIZE;
+ port->regs = (struct ines_port_registers *) port_addr;
+ port->clock = clock;
+ port->index = i;
+ INIT_DELAYED_WORK(&port->ts_work, ines_txtstamp_work);
+ spin_lock_init(&port->lock);
+ INIT_LIST_HEAD(&port->events);
+ INIT_LIST_HEAD(&port->pool);
+ for (j = 0; j < INES_MAX_EVENTS; j++)
+ list_add(&port->pool_data[j].list, &port->pool);
+ }
+
+ ines_write32(clock, 0xBEEF, test);
+ ines_write32(clock, 0xBEEF, test2);
+
+ dev_dbg(device, "ID 0x%x\n", ines_read32(clock, id));
+ dev_dbg(device, "TEST 0x%x\n", ines_read32(clock, test));
+ dev_dbg(device, "VERSION 0x%x\n", ines_read32(clock, version));
+ dev_dbg(device, "TEST2 0x%x\n", ines_read32(clock, test2));
+
+ for (i = 0; i < INES_N_PORTS; i++) {
+ port = &clock->port[i];
+ ines_write32(port, PORT_CONF, port_conf);
+ }
+
+ return 0;
+}
+
+static struct ines_port *ines_find_port(struct device_node *node, u32 index)
+{
+ struct ines_port *port = NULL;
+ struct ines_clock *clock;
+ struct list_head *this;
+
+ mutex_lock(&ines_clocks_lock);
+ list_for_each(this, &ines_clocks) {
+ clock = list_entry(this, struct ines_clock, list);
+ if (clock->node == node) {
+ port = &clock->port[index];
+ break;
+ }
+ }
+ mutex_unlock(&ines_clocks_lock);
+ return port;
+}
+
+static u64 ines_find_rxts(struct ines_port *port, struct sk_buff *skb, int type)
+{
+ struct list_head *this, *next;
+ struct ines_timestamp *ts;
+ unsigned long flags;
+ u64 ns = 0;
+
+ if (type == PTP_CLASS_NONE)
+ return 0;
+
+ spin_lock_irqsave(&port->lock, flags);
+ ines_rxfifo_read(port);
+ list_for_each_safe(this, next, &port->events) {
+ ts = list_entry(this, struct ines_timestamp, list);
+ if (ines_timestamp_expired(ts)) {
+ list_del_init(&ts->list);
+ list_add(&ts->list, &port->pool);
+ continue;
+ }
+ if (ines_match(skb, type, ts, port->clock->dev)) {
+ ns = ts->sec * 1000000000ULL + ts->nsec;
+ list_del_init(&ts->list);
+ list_add(&ts->list, &port->pool);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return ns;
+}
+
+static u64 ines_find_txts(struct ines_port *port, struct sk_buff *skb)
+{
+ unsigned int class = ptp_classify_raw(skb), i;
+ u32 data_rd_pos, buf_stat, mask, ts_stat_tx;
+ struct ines_timestamp ts;
+ unsigned long flags;
+ u64 ns = 0;
+
+ mask = TX_FIFO_NE_1 << port->index;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ for (i = 0; i < INES_FIFO_DEPTH; i++) {
+
+ buf_stat = ines_read32(port->clock, buf_stat);
+ if (!(buf_stat & mask)) {
+ dev_dbg(port->clock->dev,
+ "Tx timestamp FIFO unexpectedly empty\n");
+ break;
+ }
+ ts_stat_tx = ines_read32(port, ts_stat_tx);
+ data_rd_pos = (ts_stat_tx >> DATA_READ_POS_SHIFT) &
+ DATA_READ_POS_MASK;
+ if (data_rd_pos) {
+ dev_err(port->clock->dev,
+ "unexpected Tx read pos %u\n", data_rd_pos);
+ break;
+ }
+
+ ts.tag = ines_read32(port, ts_tx);
+ ts.sec = ines_txts64(port, 3);
+ ts.nsec = ines_txts64(port, 2);
+ ts.clkid = ines_txts64(port, 4);
+ ts.portnum = ines_read32(port, ts_tx);
+ ts.seqid = ines_read32(port, ts_tx);
+
+ if (ines_match(skb, class, &ts, port->clock->dev)) {
+ ns = ts.sec * 1000000000ULL + ts.nsec;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+ return ns;
+}
+
+static int ines_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
+{
+ struct ines_port *port = container_of(mii_ts, struct ines_port, mii_ts);
+ u32 cm_one_step = 0, port_conf, ts_stat_rx, ts_stat_tx;
+ struct hwtstamp_config cfg;
+ unsigned long flags;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (cfg.flags)
+ return -EINVAL;
+
+ switch (cfg.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ ts_stat_tx = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ ts_stat_tx = TS_ENABLE;
+ break;
+ case HWTSTAMP_TX_ONESTEP_P2P:
+ ts_stat_tx = TS_ENABLE;
+ cm_one_step = CM_ONE_STEP;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ ts_stat_rx = 0;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ return -ERANGE;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ ts_stat_rx = TS_ENABLE;
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ port_conf = ines_read32(port, port_conf);
+ port_conf &= ~CM_ONE_STEP;
+ port_conf |= cm_one_step;
+
+ ines_write32(port, port_conf, port_conf);
+ ines_write32(port, ts_stat_rx, ts_stat_rx);
+ ines_write32(port, ts_stat_tx, ts_stat_tx);
+
+ port->rxts_enabled = ts_stat_rx == TS_ENABLE ? true : false;
+ port->txts_enabled = ts_stat_tx == TS_ENABLE ? true : false;
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static void ines_link_state(struct mii_timestamper *mii_ts,
+ struct phy_device *phydev)
+{
+ struct ines_port *port = container_of(mii_ts, struct ines_port, mii_ts);
+ u32 port_conf, speed_conf;
+ unsigned long flags;
+
+ switch (phydev->speed) {
+ case SPEED_10:
+ speed_conf = PHY_SPEED_10 << PHY_SPEED_SHIFT;
+ break;
+ case SPEED_100:
+ speed_conf = PHY_SPEED_100 << PHY_SPEED_SHIFT;
+ break;
+ case SPEED_1000:
+ speed_conf = PHY_SPEED_1000 << PHY_SPEED_SHIFT;
+ break;
+ default:
+ dev_err(port->clock->dev, "bad speed: %d\n", phydev->speed);
+ return;
+ }
+ spin_lock_irqsave(&port->lock, flags);
+
+ port_conf = ines_read32(port, port_conf);
+ port_conf &= ~(0x3 << PHY_SPEED_SHIFT);
+ port_conf |= speed_conf;
+
+ ines_write32(port, port_conf, port_conf);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static bool ines_match(struct sk_buff *skb, unsigned int ptp_class,
+ struct ines_timestamp *ts, struct device *dev)
+{
+ u8 *msgtype, *data = skb_mac_header(skb);
+ unsigned int offset = 0;
+ __be16 *portn, *seqid;
+ __be64 *clkid;
+
+ if (unlikely(ptp_class & PTP_CLASS_V1))
+ return false;
+
+ if (ptp_class & PTP_CLASS_VLAN)
+ offset += VLAN_HLEN;
+
+ switch (ptp_class & PTP_CLASS_PMASK) {
+ case PTP_CLASS_IPV4:
+ offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
+ break;
+ case PTP_CLASS_IPV6:
+ offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
+ break;
+ case PTP_CLASS_L2:
+ offset += ETH_HLEN;
+ break;
+ default:
+ return false;
+ }
+
+ if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
+ return false;
+
+ msgtype = data + offset;
+ clkid = (__be64 *)(data + offset + OFF_PTP_CLOCK_ID);
+ portn = (__be16 *)(data + offset + OFF_PTP_PORT_NUM);
+ seqid = (__be16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
+
+ if (tag_to_msgtype(ts->tag & 0x7) != (*msgtype & 0xf)) {
+ dev_dbg(dev, "msgtype mismatch ts %hhu != skb %hhu\n",
+ tag_to_msgtype(ts->tag & 0x7), *msgtype & 0xf);
+ return false;
+ }
+ if (cpu_to_be64(ts->clkid) != *clkid) {
+ dev_dbg(dev, "clkid mismatch ts %llx != skb %llx\n",
+ cpu_to_be64(ts->clkid), *clkid);
+ return false;
+ }
+ if (ts->portnum != ntohs(*portn)) {
+ dev_dbg(dev, "portn mismatch ts %hu != skb %hu\n",
+ ts->portnum, ntohs(*portn));
+ return false;
+ }
+ if (ts->seqid != ntohs(*seqid)) {
+ dev_dbg(dev, "seqid mismatch ts %hu != skb %hu\n",
+ ts->seqid, ntohs(*seqid));
+ return false;
+ }
+
+ return true;
+}
+
+static bool ines_rxtstamp(struct mii_timestamper *mii_ts,
+ struct sk_buff *skb, int type)
+{
+ struct ines_port *port = container_of(mii_ts, struct ines_port, mii_ts);
+ struct skb_shared_hwtstamps *ssh;
+ u64 ns;
+
+ if (!port->rxts_enabled)
+ return false;
+
+ ns = ines_find_rxts(port, skb, type);
+ if (!ns)
+ return false;
+
+ ssh = skb_hwtstamps(skb);
+ ssh->hwtstamp = ns_to_ktime(ns);
+ netif_rx(skb);
+
+ return true;
+}
+
+static int ines_rxfifo_read(struct ines_port *port)
+{
+ u32 data_rd_pos, buf_stat, mask, ts_stat_rx;
+ struct ines_timestamp *ts;
+ unsigned int i;
+
+ mask = RX_FIFO_NE_1 << port->index;
+
+ for (i = 0; i < INES_FIFO_DEPTH; i++) {
+ if (list_empty(&port->pool)) {
+ dev_err(port->clock->dev, "event pool is empty\n");
+ return -1;
+ }
+ buf_stat = ines_read32(port->clock, buf_stat);
+ if (!(buf_stat & mask))
+ break;
+
+ ts_stat_rx = ines_read32(port, ts_stat_rx);
+ data_rd_pos = (ts_stat_rx >> DATA_READ_POS_SHIFT) &
+ DATA_READ_POS_MASK;
+ if (data_rd_pos) {
+ dev_err(port->clock->dev, "unexpected Rx read pos %u\n",
+ data_rd_pos);
+ break;
+ }
+
+ ts = list_first_entry(&port->pool, struct ines_timestamp, list);
+ ts->tmo = jiffies + HZ;
+ ts->tag = ines_read32(port, ts_rx);
+ ts->sec = ines_rxts64(port, 3);
+ ts->nsec = ines_rxts64(port, 2);
+ ts->clkid = ines_rxts64(port, 4);
+ ts->portnum = ines_read32(port, ts_rx);
+ ts->seqid = ines_read32(port, ts_rx);
+
+ list_del_init(&ts->list);
+ list_add_tail(&ts->list, &port->events);
+ }
+
+ return 0;
+}
+
+static u64 ines_rxts64(struct ines_port *port, unsigned int words)
+{
+ unsigned int i;
+ u64 result;
+ u16 word;
+
+ word = ines_read32(port, ts_rx);
+ result = word;
+ words--;
+ for (i = 0; i < words; i++) {
+ word = ines_read32(port, ts_rx);
+ result <<= 16;
+ result |= word;
+ }
+ return result;
+}
+
+static bool ines_timestamp_expired(struct ines_timestamp *ts)
+{
+ return time_after(jiffies, ts->tmo);
+}
+
+static int ines_ts_info(struct mii_timestamper *mii_ts,
+ struct ethtool_ts_info *info)
+{
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = -1;
+
+ info->tx_types =
+ (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON) |
+ (1 << HWTSTAMP_TX_ONESTEP_P2P);
+
+ info->rx_filters =
+ (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+}
+
+static u64 ines_txts64(struct ines_port *port, unsigned int words)
+{
+ unsigned int i;
+ u64 result;
+ u16 word;
+
+ word = ines_read32(port, ts_tx);
+ result = word;
+ words--;
+ for (i = 0; i < words; i++) {
+ word = ines_read32(port, ts_tx);
+ result <<= 16;
+ result |= word;
+ }
+ return result;
+}
+
+static bool ines_txts_onestep(struct ines_port *port, struct sk_buff *skb, int type)
+{
+ unsigned long flags;
+ u32 port_conf;
+
+ spin_lock_irqsave(&port->lock, flags);
+ port_conf = ines_read32(port, port_conf);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (port_conf & CM_ONE_STEP)
+ return is_sync_pdelay_resp(skb, type);
+
+ return false;
+}
+
+static void ines_txtstamp(struct mii_timestamper *mii_ts,
+ struct sk_buff *skb, int type)
+{
+ struct ines_port *port = container_of(mii_ts, struct ines_port, mii_ts);
+ struct sk_buff *old_skb = NULL;
+ unsigned long flags;
+
+ if (!port->txts_enabled || ines_txts_onestep(port, skb, type)) {
+ kfree_skb(skb);
+ return;
+ }
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ if (port->tx_skb)
+ old_skb = port->tx_skb;
+
+ port->tx_skb = skb;
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (old_skb)
+ kfree_skb(old_skb);
+
+ schedule_delayed_work(&port->ts_work, 1);
+}
+
+static void ines_txtstamp_work(struct work_struct *work)
+{
+ struct ines_port *port =
+ container_of(work, struct ines_port, ts_work.work);
+ struct skb_shared_hwtstamps ssh;
+ struct sk_buff *skb;
+ unsigned long flags;
+ u64 ns;
+
+ spin_lock_irqsave(&port->lock, flags);
+ skb = port->tx_skb;
+ port->tx_skb = NULL;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ ns = ines_find_txts(port, skb);
+ if (!ns) {
+ kfree_skb(skb);
+ return;
+ }
+ ssh.hwtstamp = ns_to_ktime(ns);
+ skb_complete_tx_timestamp(skb, &ssh);
+}
+
+static bool is_sync_pdelay_resp(struct sk_buff *skb, int type)
+{
+ u8 *data = skb->data, *msgtype;
+ unsigned int offset = 0;
+
+ if (type & PTP_CLASS_VLAN)
+ offset += VLAN_HLEN;
+
+ switch (type & PTP_CLASS_PMASK) {
+ case PTP_CLASS_IPV4:
+ offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
+ break;
+ case PTP_CLASS_IPV6:
+ offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
+ break;
+ case PTP_CLASS_L2:
+ offset += ETH_HLEN;
+ break;
+ default:
+ return 0;
+ }
+
+ if (type & PTP_CLASS_V1)
+ offset += OFF_PTP_CONTROL;
+
+ if (skb->len < offset + 1)
+ return 0;
+
+ msgtype = data + offset;
+
+ switch ((*msgtype & 0xf)) {
+ case SYNC:
+ case PDELAY_RESP:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static u8 tag_to_msgtype(u8 tag)
+{
+ switch (tag) {
+ case MESSAGE_TYPE_SYNC:
+ return SYNC;
+ case MESSAGE_TYPE_P_DELAY_REQ:
+ return PDELAY_REQ;
+ case MESSAGE_TYPE_P_DELAY_RESP:
+ return PDELAY_RESP;
+ case MESSAGE_TYPE_DELAY_REQ:
+ return DELAY_REQ;
+ }
+ return 0xf;
+}
+
+static struct mii_timestamper *ines_ptp_probe_channel(struct device *device,
+ unsigned int index)
+{
+ struct device_node *node = device->of_node;
+ struct ines_port *port;
+
+ if (index > INES_N_PORTS - 1) {
+ dev_err(device, "bad port index %u\n", index);
+ return ERR_PTR(-EINVAL);
+ }
+ port = ines_find_port(node, index);
+ if (!port) {
+ dev_err(device, "missing port index %u\n", index);
+ return ERR_PTR(-ENODEV);
+ }
+ port->mii_ts.rxtstamp = ines_rxtstamp;
+ port->mii_ts.txtstamp = ines_txtstamp;
+ port->mii_ts.hwtstamp = ines_hwtstamp;
+ port->mii_ts.link_state = ines_link_state;
+ port->mii_ts.ts_info = ines_ts_info;
+
+ return &port->mii_ts;
+}
+
+static void ines_ptp_release_channel(struct device *device,
+ struct mii_timestamper *mii_ts)
+{
+}
+
+static struct mii_timestamping_ctrl ines_ctrl = {
+ .probe_channel = ines_ptp_probe_channel,
+ .release_channel = ines_ptp_release_channel,
+};
+
+static int ines_ptp_ctrl_probe(struct platform_device *pld)
+{
+ struct ines_clock *clock;
+ struct resource *res;
+ void __iomem *addr;
+ int err = 0;
+
+ res = platform_get_resource(pld, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pld->dev, "missing memory resource\n");
+ return -EINVAL;
+ }
+ addr = devm_ioremap_resource(&pld->dev, res);
+ if (IS_ERR(addr)) {
+ err = PTR_ERR(addr);
+ goto out;
+ }
+ clock = kzalloc(sizeof(*clock), GFP_KERNEL);
+ if (!clock) {
+ err = -ENOMEM;
+ goto out;
+ }
+ if (ines_clock_init(clock, &pld->dev, addr)) {
+ kfree(clock);
+ err = -ENOMEM;
+ goto out;
+ }
+ err = register_mii_tstamp_controller(&pld->dev, &ines_ctrl);
+ if (err) {
+ kfree(clock);
+ goto out;
+ }
+ mutex_lock(&ines_clocks_lock);
+ list_add_tail(&ines_clocks, &clock->list);
+ mutex_unlock(&ines_clocks_lock);
+
+ dev_set_drvdata(&pld->dev, clock);
+out:
+ return err;
+}
+
+static int ines_ptp_ctrl_remove(struct platform_device *pld)
+{
+ struct ines_clock *clock = dev_get_drvdata(&pld->dev);
+
+ unregister_mii_tstamp_controller(&pld->dev);
+ mutex_lock(&ines_clocks_lock);
+ list_del(&clock->list);
+ mutex_unlock(&ines_clocks_lock);
+ ines_clock_cleanup(clock);
+ kfree(clock);
+ return 0;
+}
+
+static const struct of_device_id ines_ptp_ctrl_of_match[] = {
+ { .compatible = "ines,ptp-ctrl" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, ines_ptp_ctrl_of_match);
+
+static struct platform_driver ines_ptp_ctrl_driver = {
+ .probe = ines_ptp_ctrl_probe,
+ .remove = ines_ptp_ctrl_remove,
+ .driver = {
+ .name = "ines_ptp_ctrl",
+ .of_match_table = of_match_ptr(ines_ptp_ctrl_of_match),
+ },
+};
+module_platform_driver(ines_ptp_ctrl_driver);
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index 9171d42468fd..6b97155148f1 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -28,7 +28,7 @@ struct timestamp_event_queue {
struct ptp_clock {
struct posix_clock clock;
- struct device *dev;
+ struct device dev;
struct ptp_clock_info *info;
dev_t devid;
int index; /* index into clocks.map */
diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c
index a577218d1ab7..b27c46ebfc8f 100644
--- a/drivers/ptp/ptp_qoriq.c
+++ b/drivers/ptp/ptp_qoriq.c
@@ -74,14 +74,13 @@ static void set_fipers(struct ptp_qoriq *ptp_qoriq)
ptp_qoriq->write(&regs->fiper_regs->tmr_fiper2, ptp_qoriq->tmr_fiper2);
}
-static int extts_clean_up(struct ptp_qoriq *ptp_qoriq, int index,
- bool update_event)
+int extts_clean_up(struct ptp_qoriq *ptp_qoriq, int index, bool update_event)
{
struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
struct ptp_clock_event event;
void __iomem *reg_etts_l;
void __iomem *reg_etts_h;
- u32 valid, stat, lo, hi;
+ u32 valid, lo, hi;
switch (index) {
case 0:
@@ -101,6 +100,10 @@ static int extts_clean_up(struct ptp_qoriq *ptp_qoriq, int index,
event.type = PTP_CLOCK_EXTTS;
event.index = index;
+ if (ptp_qoriq->extts_fifo_support)
+ if (!(ptp_qoriq->read(&regs->ctrl_regs->tmr_stat) & valid))
+ return 0;
+
do {
lo = ptp_qoriq->read(reg_etts_l);
hi = ptp_qoriq->read(reg_etts_h);
@@ -111,11 +114,13 @@ static int extts_clean_up(struct ptp_qoriq *ptp_qoriq, int index,
ptp_clock_event(ptp_qoriq->clock, &event);
}
- stat = ptp_qoriq->read(&regs->ctrl_regs->tmr_stat);
- } while (ptp_qoriq->extts_fifo_support && (stat & valid));
+ if (!ptp_qoriq->extts_fifo_support)
+ break;
+ } while (ptp_qoriq->read(&regs->ctrl_regs->tmr_stat) & valid);
return 0;
}
+EXPORT_SYMBOL_GPL(extts_clean_up);
/*
* Interrupt service routine
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index bd21655c37a6..30190beeb6e9 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -100,7 +100,7 @@ config PWM_BCM_KONA
config PWM_BCM2835
tristate "BCM2835 PWM support"
- depends on ARCH_BCM2835
+ depends on ARCH_BCM2835 || ARCH_BRCMSTB
help
PWM framework driver for BCM2835 controller (Raspberry Pi)
@@ -328,7 +328,8 @@ config PWM_MXS
config PWM_OMAP_DMTIMER
tristate "OMAP Dual-Mode Timer PWM support"
- depends on OF && ARCH_OMAP && OMAP_DM_TIMER
+ depends on OF
+ depends on OMAP_DM_TIMER || COMPILE_TEST
help
Generic PWM framework driver for OMAP Dual-Mode Timer PWM output
@@ -490,7 +491,7 @@ config PWM_TEGRA
To compile this driver as a module, choose M here: the module
will be called pwm-tegra.
-config PWM_TIECAP
+config PWM_TIECAP
tristate "ECAP PWM support"
depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX || ARCH_KEYSTONE || ARCH_K3
help
@@ -499,7 +500,7 @@ config PWM_TIECAP
To compile this driver as a module, choose M here: the module
will be called pwm-tiecap.
-config PWM_TIEHRPWM
+config PWM_TIEHRPWM
tristate "EHRPWM PWM support"
depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX || ARCH_K3
help
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index f877e77d9184..5a7f6598c05f 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -20,6 +20,9 @@
#include <dt-bindings/pwm/pwm.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/pwm.h>
+
#define MAX_PWMS 1024
static DEFINE_MUTEX(pwm_lookup_lock);
@@ -114,6 +117,11 @@ static int pwm_device_request(struct pwm_device *pwm, const char *label)
}
}
+ if (pwm->chip->ops->get_state) {
+ pwm->chip->ops->get_state(pwm->chip, pwm, &pwm->state);
+ trace_pwm_get(pwm, &pwm->state);
+ }
+
set_bit(PWMF_REQUESTED, &pwm->flags);
pwm->label = label;
@@ -283,9 +291,6 @@ int pwmchip_add_with_polarity(struct pwm_chip *chip,
pwm->hwpwm = i;
pwm->state.polarity = polarity;
- if (chip->ops->get_state)
- chip->ops->get_state(chip, pwm, &pwm->state);
-
radix_tree_insert(&pwm_tree, pwm->pwm, pwm);
}
@@ -472,6 +477,8 @@ int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state)
if (err)
return err;
+ trace_pwm_apply(pwm, state);
+
pwm->state = *state;
} else {
/*
diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
index 9ba733467e26..6161e7e3e9ac 100644
--- a/drivers/pwm/pwm-atmel.c
+++ b/drivers/pwm/pwm-atmel.c
@@ -4,6 +4,19 @@
*
* Copyright (C) 2013 Atmel Corporation
* Bo Shen <voice.shen@atmel.com>
+ *
+ * Links to reference manuals for the supported PWM chips can be found in
+ * Documentation/arm/microchip.rst.
+ *
+ * Limitations:
+ * - Periods start with the inactive level.
+ * - Hardware has to be stopped in general to update settings.
+ *
+ * Software bugs/possible improvements:
+ * - When atmel_pwm_apply() is called with state->enabled=false a change in
+ * state->polarity isn't honored.
+ * - Instead of sleeping to wait for a completed period, the interrupt
+ * functionality could be used.
*/
#include <linux/clk.h>
@@ -47,6 +60,8 @@
#define PWMV2_CPRD 0x0C
#define PWMV2_CPRDUPD 0x10
+#define PWM_MAX_PRES 10
+
struct atmel_pwm_registers {
u8 period;
u8 period_upd;
@@ -55,8 +70,7 @@ struct atmel_pwm_registers {
};
struct atmel_pwm_config {
- u32 max_period;
- u32 max_pres;
+ u32 period_bits;
};
struct atmel_pwm_data {
@@ -97,7 +111,7 @@ static inline u32 atmel_pwm_ch_readl(struct atmel_pwm_chip *chip,
{
unsigned long base = PWM_CH_REG_OFFSET + ch * PWM_CH_REG_SIZE;
- return readl_relaxed(chip->base + base + offset);
+ return atmel_pwm_readl(chip, base + offset);
}
static inline void atmel_pwm_ch_writel(struct atmel_pwm_chip *chip,
@@ -106,7 +120,7 @@ static inline void atmel_pwm_ch_writel(struct atmel_pwm_chip *chip,
{
unsigned long base = PWM_CH_REG_OFFSET + ch * PWM_CH_REG_SIZE;
- writel_relaxed(val, chip->base + base + offset);
+ atmel_pwm_writel(chip, base + offset, val);
}
static int atmel_pwm_calculate_cprd_and_pres(struct pwm_chip *chip,
@@ -115,17 +129,27 @@ static int atmel_pwm_calculate_cprd_and_pres(struct pwm_chip *chip,
{
struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
unsigned long long cycles = state->period;
+ int shift;
/* Calculate the period cycles and prescale value */
cycles *= clk_get_rate(atmel_pwm->clk);
do_div(cycles, NSEC_PER_SEC);
- for (*pres = 0; cycles > atmel_pwm->data->cfg.max_period; cycles >>= 1)
- (*pres)++;
+ /*
+ * The register for the period length is cfg.period_bits bits wide.
+ * So for each bit the number of clock cycles is wider divide the input
+ * clock frequency by two using pres and shift cprd accordingly.
+ */
+ shift = fls(cycles) - atmel_pwm->data->cfg.period_bits;
- if (*pres > atmel_pwm->data->cfg.max_pres) {
+ if (shift > PWM_MAX_PRES) {
dev_err(chip->dev, "pres exceeds the maximum value\n");
return -EINVAL;
+ } else if (shift > 0) {
+ *pres = shift;
+ cycles >>= *pres;
+ } else {
+ *pres = 0;
}
*cprd = cycles;
@@ -271,8 +295,48 @@ static int atmel_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return 0;
}
+static void atmel_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
+ u32 sr, cmr;
+
+ sr = atmel_pwm_readl(atmel_pwm, PWM_SR);
+ cmr = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR);
+
+ if (sr & (1 << pwm->hwpwm)) {
+ unsigned long rate = clk_get_rate(atmel_pwm->clk);
+ u32 cdty, cprd, pres;
+ u64 tmp;
+
+ pres = cmr & PWM_CMR_CPRE_MSK;
+
+ cprd = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm,
+ atmel_pwm->data->regs.period);
+ tmp = (u64)cprd * NSEC_PER_SEC;
+ tmp <<= pres;
+ state->period = DIV64_U64_ROUND_UP(tmp, rate);
+
+ cdty = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm,
+ atmel_pwm->data->regs.duty);
+ tmp = (u64)cdty * NSEC_PER_SEC;
+ tmp <<= pres;
+ state->duty_cycle = DIV64_U64_ROUND_UP(tmp, rate);
+
+ state->enabled = true;
+ } else {
+ state->enabled = false;
+ }
+
+ if (cmr & PWM_CMR_CPOL)
+ state->polarity = PWM_POLARITY_INVERSED;
+ else
+ state->polarity = PWM_POLARITY_NORMAL;
+}
+
static const struct pwm_ops atmel_pwm_ops = {
.apply = atmel_pwm_apply,
+ .get_state = atmel_pwm_get_state,
.owner = THIS_MODULE,
};
@@ -285,8 +349,7 @@ static const struct atmel_pwm_data atmel_sam9rl_pwm_data = {
},
.cfg = {
/* 16 bits to keep period and duty. */
- .max_period = 0xffff,
- .max_pres = 10,
+ .period_bits = 16,
},
};
@@ -299,8 +362,7 @@ static const struct atmel_pwm_data atmel_sama5_pwm_data = {
},
.cfg = {
/* 16 bits to keep period and duty. */
- .max_period = 0xffff,
- .max_pres = 10,
+ .period_bits = 16,
},
};
@@ -313,8 +375,7 @@ static const struct atmel_pwm_data mchp_sam9x60_pwm_data = {
},
.cfg = {
/* 32 bits to keep period and duty. */
- .max_period = 0xffffffff,
- .max_pres = 10,
+ .period_bits = 32,
},
};
diff --git a/drivers/pwm/pwm-cros-ec.c b/drivers/pwm/pwm-cros-ec.c
index 89497448d217..09c08dee099e 100644
--- a/drivers/pwm/pwm-cros-ec.c
+++ b/drivers/pwm/pwm-cros-ec.c
@@ -25,11 +25,39 @@ struct cros_ec_pwm_device {
struct pwm_chip chip;
};
+/**
+ * struct cros_ec_pwm - per-PWM driver data
+ * @duty_cycle: cached duty cycle
+ */
+struct cros_ec_pwm {
+ u16 duty_cycle;
+};
+
static inline struct cros_ec_pwm_device *pwm_to_cros_ec_pwm(struct pwm_chip *c)
{
return container_of(c, struct cros_ec_pwm_device, chip);
}
+static int cros_ec_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct cros_ec_pwm *channel;
+
+ channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return -ENOMEM;
+
+ pwm_set_chip_data(pwm, channel);
+
+ return 0;
+}
+
+static void cros_ec_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct cros_ec_pwm *channel = pwm_get_chip_data(pwm);
+
+ kfree(channel);
+}
+
static int cros_ec_pwm_set_duty(struct cros_ec_device *ec, u8 index, u16 duty)
{
struct {
@@ -96,7 +124,9 @@ static int cros_ec_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct cros_ec_pwm_device *ec_pwm = pwm_to_cros_ec_pwm(chip);
- int duty_cycle;
+ struct cros_ec_pwm *channel = pwm_get_chip_data(pwm);
+ u16 duty_cycle;
+ int ret;
/* The EC won't let us change the period */
if (state->period != EC_PWM_MAX_DUTY)
@@ -108,13 +138,20 @@ static int cros_ec_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
*/
duty_cycle = state->enabled ? state->duty_cycle : 0;
- return cros_ec_pwm_set_duty(ec_pwm->ec, pwm->hwpwm, duty_cycle);
+ ret = cros_ec_pwm_set_duty(ec_pwm->ec, pwm->hwpwm, duty_cycle);
+ if (ret < 0)
+ return ret;
+
+ channel->duty_cycle = state->duty_cycle;
+
+ return 0;
}
static void cros_ec_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state)
{
struct cros_ec_pwm_device *ec_pwm = pwm_to_cros_ec_pwm(chip);
+ struct cros_ec_pwm *channel = pwm_get_chip_data(pwm);
int ret;
ret = cros_ec_pwm_get_duty(ec_pwm->ec, pwm->hwpwm);
@@ -126,8 +163,19 @@ static void cros_ec_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
state->enabled = (ret > 0);
state->period = EC_PWM_MAX_DUTY;
- /* Note that "disabled" and "duty cycle == 0" are treated the same */
- state->duty_cycle = ret;
+ /*
+ * Note that "disabled" and "duty cycle == 0" are treated the same. If
+ * the cached duty cycle is not zero, used the cached duty cycle. This
+ * ensures that the configured duty cycle is kept across a disable and
+ * enable operation and avoids potentially confusing consumers.
+ *
+ * For the case of the initial hardware readout, channel->duty_cycle
+ * will be 0 and the actual duty cycle read from the EC is used.
+ */
+ if (ret == 0 && channel->duty_cycle > 0)
+ state->duty_cycle = channel->duty_cycle;
+ else
+ state->duty_cycle = ret;
}
static struct pwm_device *
@@ -149,6 +197,8 @@ cros_ec_pwm_xlate(struct pwm_chip *pc, const struct of_phandle_args *args)
}
static const struct pwm_ops cros_ec_pwm_ops = {
+ .request = cros_ec_pwm_request,
+ .free = cros_ec_pwm_free,
.get_state = cros_ec_pwm_get_state,
.apply = cros_ec_pwm_apply,
.owner = THIS_MODULE,
diff --git a/drivers/pwm/pwm-imx27.c b/drivers/pwm/pwm-imx27.c
index ae11d8577f18..35a7ac42269c 100644
--- a/drivers/pwm/pwm-imx27.c
+++ b/drivers/pwm/pwm-imx27.c
@@ -85,6 +85,13 @@ struct pwm_imx27_chip {
struct clk *clk_per;
void __iomem *mmio_base;
struct pwm_chip chip;
+
+ /*
+ * The driver cannot read the current duty cycle from the hardware if
+ * the hardware is disabled. Cache the last programmed duty cycle
+ * value to return in that case.
+ */
+ unsigned int duty_cycle;
};
#define to_pwm_imx27_chip(chip) container_of(chip, struct pwm_imx27_chip, chip)
@@ -155,14 +162,17 @@ static void pwm_imx27_get_state(struct pwm_chip *chip,
tmp = NSEC_PER_SEC * (u64)(period + 2);
state->period = DIV_ROUND_CLOSEST_ULL(tmp, pwm_clk);
- /* PWMSAR can be read only if PWM is enabled */
- if (state->enabled) {
+ /*
+ * PWMSAR can be read only if PWM is enabled. If the PWM is disabled,
+ * use the cached value.
+ */
+ if (state->enabled)
val = readl(imx->mmio_base + MX3_PWMSAR);
- tmp = NSEC_PER_SEC * (u64)(val);
- state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, pwm_clk);
- } else {
- state->duty_cycle = 0;
- }
+ else
+ val = imx->duty_cycle;
+
+ tmp = NSEC_PER_SEC * (u64)(val);
+ state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, pwm_clk);
if (!state->enabled)
pwm_imx27_clk_disable_unprepare(chip);
@@ -220,63 +230,68 @@ static int pwm_imx27_apply(struct pwm_chip *chip, struct pwm_device *pwm,
pwm_get_state(pwm, &cstate);
- if (state->enabled) {
- c = clk_get_rate(imx->clk_per);
- c *= state->period;
-
- do_div(c, 1000000000);
- period_cycles = c;
-
- prescale = period_cycles / 0x10000 + 1;
-
- period_cycles /= prescale;
- c = (unsigned long long)period_cycles * state->duty_cycle;
- do_div(c, state->period);
- duty_cycles = c;
-
- /*
- * according to imx pwm RM, the real period value should be
- * PERIOD value in PWMPR plus 2.
- */
- if (period_cycles > 2)
- period_cycles -= 2;
- else
- period_cycles = 0;
-
- /*
- * Wait for a free FIFO slot if the PWM is already enabled, and
- * flush the FIFO if the PWM was disabled and is about to be
- * enabled.
- */
- if (cstate.enabled) {
- pwm_imx27_wait_fifo_slot(chip, pwm);
- } else {
- ret = pwm_imx27_clk_prepare_enable(chip);
- if (ret)
- return ret;
-
- pwm_imx27_sw_reset(chip);
- }
-
- writel(duty_cycles, imx->mmio_base + MX3_PWMSAR);
- writel(period_cycles, imx->mmio_base + MX3_PWMPR);
-
- cr = MX3_PWMCR_PRESCALER_SET(prescale) |
- MX3_PWMCR_STOPEN | MX3_PWMCR_DOZEN | MX3_PWMCR_WAITEN |
- FIELD_PREP(MX3_PWMCR_CLKSRC, MX3_PWMCR_CLKSRC_IPG_HIGH) |
- MX3_PWMCR_DBGEN | MX3_PWMCR_EN;
-
- if (state->polarity == PWM_POLARITY_INVERSED)
- cr |= FIELD_PREP(MX3_PWMCR_POUTC,
- MX3_PWMCR_POUTC_INVERTED);
-
- writel(cr, imx->mmio_base + MX3_PWMCR);
- } else if (cstate.enabled) {
- writel(0, imx->mmio_base + MX3_PWMCR);
+ c = clk_get_rate(imx->clk_per);
+ c *= state->period;
- pwm_imx27_clk_disable_unprepare(chip);
+ do_div(c, 1000000000);
+ period_cycles = c;
+
+ prescale = period_cycles / 0x10000 + 1;
+
+ period_cycles /= prescale;
+ c = (unsigned long long)period_cycles * state->duty_cycle;
+ do_div(c, state->period);
+ duty_cycles = c;
+
+ /*
+ * according to imx pwm RM, the real period value should be PERIOD
+ * value in PWMPR plus 2.
+ */
+ if (period_cycles > 2)
+ period_cycles -= 2;
+ else
+ period_cycles = 0;
+
+ /*
+ * Wait for a free FIFO slot if the PWM is already enabled, and flush
+ * the FIFO if the PWM was disabled and is about to be enabled.
+ */
+ if (cstate.enabled) {
+ pwm_imx27_wait_fifo_slot(chip, pwm);
+ } else {
+ ret = pwm_imx27_clk_prepare_enable(chip);
+ if (ret)
+ return ret;
+
+ pwm_imx27_sw_reset(chip);
}
+ writel(duty_cycles, imx->mmio_base + MX3_PWMSAR);
+ writel(period_cycles, imx->mmio_base + MX3_PWMPR);
+
+ /*
+ * Store the duty cycle for future reference in cases where the
+ * MX3_PWMSAR register can't be read (i.e. when the PWM is disabled).
+ */
+ imx->duty_cycle = duty_cycles;
+
+ cr = MX3_PWMCR_PRESCALER_SET(prescale) |
+ MX3_PWMCR_STOPEN | MX3_PWMCR_DOZEN | MX3_PWMCR_WAITEN |
+ FIELD_PREP(MX3_PWMCR_CLKSRC, MX3_PWMCR_CLKSRC_IPG_HIGH) |
+ MX3_PWMCR_DBGEN;
+
+ if (state->polarity == PWM_POLARITY_INVERSED)
+ cr |= FIELD_PREP(MX3_PWMCR_POUTC,
+ MX3_PWMCR_POUTC_INVERTED);
+
+ if (state->enabled)
+ cr |= MX3_PWMCR_EN;
+
+ writel(cr, imx->mmio_base + MX3_PWMCR);
+
+ if (!state->enabled && cstate.enabled)
+ pwm_imx27_clk_disable_unprepare(chip);
+
return 0;
}
@@ -304,9 +319,13 @@ static int pwm_imx27_probe(struct platform_device *pdev)
imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(imx->clk_ipg)) {
- dev_err(&pdev->dev, "getting ipg clock failed with %ld\n",
- PTR_ERR(imx->clk_ipg));
- return PTR_ERR(imx->clk_ipg);
+ int ret = PTR_ERR(imx->clk_ipg);
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "getting ipg clock failed with %d\n",
+ ret);
+ return ret;
}
imx->clk_per = devm_clk_get(&pdev->dev, "per");
diff --git a/drivers/pwm/pwm-mxs.c b/drivers/pwm/pwm-mxs.c
index b14376b47ac8..f2e57fcf8f8b 100644
--- a/drivers/pwm/pwm-mxs.c
+++ b/drivers/pwm/pwm-mxs.c
@@ -25,12 +25,16 @@
#define PERIOD_PERIOD(p) ((p) & 0xffff)
#define PERIOD_PERIOD_MAX 0x10000
#define PERIOD_ACTIVE_HIGH (3 << 16)
+#define PERIOD_ACTIVE_LOW (2 << 16)
+#define PERIOD_INACTIVE_HIGH (3 << 18)
#define PERIOD_INACTIVE_LOW (2 << 18)
+#define PERIOD_POLARITY_NORMAL (PERIOD_ACTIVE_HIGH | PERIOD_INACTIVE_LOW)
+#define PERIOD_POLARITY_INVERSE (PERIOD_ACTIVE_LOW | PERIOD_INACTIVE_HIGH)
#define PERIOD_CDIV(div) (((div) & 0x7) << 20)
#define PERIOD_CDIV_MAX 8
-static const unsigned int cdiv[PERIOD_CDIV_MAX] = {
- 1, 2, 4, 8, 16, 64, 256, 1024
+static const u8 cdiv_shift[PERIOD_CDIV_MAX] = {
+ 0, 1, 2, 3, 4, 6, 8, 10
};
struct mxs_pwm_chip {
@@ -41,19 +45,34 @@ struct mxs_pwm_chip {
#define to_mxs_pwm_chip(_chip) container_of(_chip, struct mxs_pwm_chip, chip)
-static int mxs_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns)
+static int mxs_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
{
struct mxs_pwm_chip *mxs = to_mxs_pwm_chip(chip);
int ret, div = 0;
unsigned int period_cycles, duty_cycles;
unsigned long rate;
unsigned long long c;
+ unsigned int pol_bits;
+
+ /*
+ * If the PWM channel is disabled, make sure to turn on the
+ * clock before calling clk_get_rate() and writing to the
+ * registers. Otherwise, just keep it enabled.
+ */
+ if (!pwm_is_enabled(pwm)) {
+ ret = clk_prepare_enable(mxs->clk);
+ if (ret)
+ return ret;
+ }
+
+ if (!state->enabled && pwm_is_enabled(pwm))
+ writel(1 << pwm->hwpwm, mxs->base + PWM_CTRL + CLR);
rate = clk_get_rate(mxs->clk);
while (1) {
- c = rate / cdiv[div];
- c = c * period_ns;
+ c = rate >> cdiv_shift[div];
+ c = c * state->period;
do_div(c, 1000000000);
if (c < PERIOD_PERIOD_MAX)
break;
@@ -63,62 +82,40 @@ static int mxs_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
}
period_cycles = c;
- c *= duty_ns;
- do_div(c, period_ns);
+ c *= state->duty_cycle;
+ do_div(c, state->period);
duty_cycles = c;
/*
- * If the PWM channel is disabled, make sure to turn on the clock
- * before writing the register. Otherwise, keep it enabled.
+ * The data sheet the says registers must be written to in
+ * this order (ACTIVEn, then PERIODn). Also, the new settings
+ * only take effect at the beginning of a new period, avoiding
+ * glitches.
*/
- if (!pwm_is_enabled(pwm)) {
- ret = clk_prepare_enable(mxs->clk);
- if (ret)
- return ret;
- }
+ pol_bits = state->polarity == PWM_POLARITY_NORMAL ?
+ PERIOD_POLARITY_NORMAL : PERIOD_POLARITY_INVERSE;
writel(duty_cycles << 16,
- mxs->base + PWM_ACTIVE0 + pwm->hwpwm * 0x20);
- writel(PERIOD_PERIOD(period_cycles) | PERIOD_ACTIVE_HIGH |
- PERIOD_INACTIVE_LOW | PERIOD_CDIV(div),
- mxs->base + PWM_PERIOD0 + pwm->hwpwm * 0x20);
-
- /*
- * If the PWM is not enabled, turn the clock off again to save power.
- */
- if (!pwm_is_enabled(pwm))
+ mxs->base + PWM_ACTIVE0 + pwm->hwpwm * 0x20);
+ writel(PERIOD_PERIOD(period_cycles) | pol_bits | PERIOD_CDIV(div),
+ mxs->base + PWM_PERIOD0 + pwm->hwpwm * 0x20);
+
+ if (state->enabled) {
+ if (!pwm_is_enabled(pwm)) {
+ /*
+ * The clock was enabled above. Just enable
+ * the channel in the control register.
+ */
+ writel(1 << pwm->hwpwm, mxs->base + PWM_CTRL + SET);
+ }
+ } else {
clk_disable_unprepare(mxs->clk);
-
- return 0;
-}
-
-static int mxs_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct mxs_pwm_chip *mxs = to_mxs_pwm_chip(chip);
- int ret;
-
- ret = clk_prepare_enable(mxs->clk);
- if (ret)
- return ret;
-
- writel(1 << pwm->hwpwm, mxs->base + PWM_CTRL + SET);
-
+ }
return 0;
}
-static void mxs_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct mxs_pwm_chip *mxs = to_mxs_pwm_chip(chip);
-
- writel(1 << pwm->hwpwm, mxs->base + PWM_CTRL + CLR);
-
- clk_disable_unprepare(mxs->clk);
-}
-
static const struct pwm_ops mxs_pwm_ops = {
- .config = mxs_pwm_config,
- .enable = mxs_pwm_enable,
- .disable = mxs_pwm_disable,
+ .apply = mxs_pwm_apply,
.owner = THIS_MODULE,
};
@@ -142,6 +139,8 @@ static int mxs_pwm_probe(struct platform_device *pdev)
mxs->chip.dev = &pdev->dev;
mxs->chip.ops = &mxs_pwm_ops;
+ mxs->chip.of_xlate = of_pwm_xlate_with_flags;
+ mxs->chip.of_pwm_n_cells = 3;
mxs->chip.base = -1;
ret = of_property_read_u32(np, "fsl,pwm-number", &mxs->chip.npwm);
diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
index 00772fc53490..88a3c5690fea 100644
--- a/drivers/pwm/pwm-omap-dmtimer.c
+++ b/drivers/pwm/pwm-omap-dmtimer.c
@@ -256,7 +256,7 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
if (!timer_pdev) {
dev_err(&pdev->dev, "Unable to find Timer pdev\n");
ret = -ENODEV;
- goto put;
+ goto err_find_timer_pdev;
}
timer_pdata = dev_get_platdata(&timer_pdev->dev);
@@ -264,7 +264,7 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev,
"dmtimer pdata structure NULL, deferring probe\n");
ret = -EPROBE_DEFER;
- goto put;
+ goto err_platdata;
}
pdata = timer_pdata->timer_ops;
@@ -283,30 +283,25 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
!pdata->write_counter) {
dev_err(&pdev->dev, "Incomplete dmtimer pdata structure\n");
ret = -EINVAL;
- goto put;
+ goto err_platdata;
}
if (!of_get_property(timer, "ti,timer-pwm", NULL)) {
dev_err(&pdev->dev, "Missing ti,timer-pwm capability\n");
ret = -ENODEV;
- goto put;
+ goto err_timer_property;
}
dm_timer = pdata->request_by_node(timer);
if (!dm_timer) {
ret = -EPROBE_DEFER;
- goto put;
+ goto err_request_timer;
}
-put:
- of_node_put(timer);
- if (ret < 0)
- return ret;
-
omap = devm_kzalloc(&pdev->dev, sizeof(*omap), GFP_KERNEL);
if (!omap) {
- pdata->free(dm_timer);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_alloc_omap;
}
omap->pdata = pdata;
@@ -339,27 +334,56 @@ put:
ret = pwmchip_add(&omap->chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to register PWM\n");
- omap->pdata->free(omap->dm_timer);
- return ret;
+ goto err_pwmchip_add;
}
+ of_node_put(timer);
+
platform_set_drvdata(pdev, omap);
return 0;
+
+err_pwmchip_add:
+
+ /*
+ * *omap is allocated using devm_kzalloc,
+ * so no free necessary here
+ */
+err_alloc_omap:
+
+ pdata->free(dm_timer);
+err_request_timer:
+
+err_timer_property:
+err_platdata:
+
+ put_device(&timer_pdev->dev);
+err_find_timer_pdev:
+
+ of_node_put(timer);
+
+ return ret;
}
static int pwm_omap_dmtimer_remove(struct platform_device *pdev)
{
struct pwm_omap_dmtimer_chip *omap = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = pwmchip_remove(&omap->chip);
+ if (ret)
+ return ret;
if (pm_runtime_active(&omap->dm_timer_pdev->dev))
omap->pdata->stop(omap->dm_timer);
omap->pdata->free(omap->dm_timer);
+ put_device(&omap->dm_timer_pdev->dev);
+
mutex_destroy(&omap->mutex);
- return pwmchip_remove(&omap->chip);
+ return 0;
}
static const struct of_device_id pwm_omap_dmtimer_of_match[] = {
diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
index 168684b02ebc..b07bdca3d510 100644
--- a/drivers/pwm/pwm-pca9685.c
+++ b/drivers/pwm/pwm-pca9685.c
@@ -159,13 +159,9 @@ static void pca9685_pwm_gpio_set(struct gpio_chip *gpio, unsigned int offset,
static void pca9685_pwm_gpio_free(struct gpio_chip *gpio, unsigned int offset)
{
struct pca9685 *pca = gpiochip_get_data(gpio);
- struct pwm_device *pwm;
pca9685_pwm_gpio_set(gpio, offset, 0);
pm_runtime_put(pca->chip.dev);
- mutex_lock(&pca->lock);
- pwm = &pca->chip.pwms[offset];
- mutex_unlock(&pca->lock);
}
static int pca9685_pwm_gpio_get_direction(struct gpio_chip *chip,
diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c
index 852eb2347954..2685577b6dd4 100644
--- a/drivers/pwm/pwm-rcar.c
+++ b/drivers/pwm/pwm-rcar.c
@@ -3,6 +3,9 @@
* R-Car PWM Timer driver
*
* Copyright (C) 2015 Renesas Electronics Corporation
+ *
+ * Limitations:
+ * - The hardware cannot generate a 0% duty cycle.
*/
#include <linux/clk.h>
@@ -161,11 +164,9 @@ static int rcar_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct rcar_pwm_chip *rp = to_rcar_pwm_chip(chip);
- struct pwm_state cur_state;
int div, ret;
/* This HW/driver only supports normal polarity */
- pwm_get_state(pwm, &cur_state);
if (state->polarity != PWM_POLARITY_NORMAL)
return -ENOTSUPP;
diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
index 7ff48c14fae8..d3be944f2ae9 100644
--- a/drivers/pwm/pwm-stm32.c
+++ b/drivers/pwm/pwm-stm32.c
@@ -377,9 +377,7 @@ static int stm32_pwm_config(struct stm32_pwm *priv, int ch,
else
regmap_update_bits(priv->regmap, TIM_CCMR2, mask, ccmr);
- regmap_update_bits(priv->regmap, TIM_BDTR,
- TIM_BDTR_MOE | TIM_BDTR_AOE,
- TIM_BDTR_MOE | TIM_BDTR_AOE);
+ regmap_update_bits(priv->regmap, TIM_BDTR, TIM_BDTR_MOE, TIM_BDTR_MOE);
return 0;
}
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
index 581d23287333..3e3efa6c768f 100644
--- a/drivers/pwm/pwm-sun4i.c
+++ b/drivers/pwm/pwm-sun4i.c
@@ -3,6 +3,10 @@
* Driver for Allwinner sun4i Pulse Width Modulation Controller
*
* Copyright (C) 2014 Alexandre Belloni <alexandre.belloni@free-electrons.com>
+ *
+ * Limitations:
+ * - When outputing the source clock directly, the PWM logic will be bypassed
+ * and the currently running period is not guaranteed to be completed
*/
#include <linux/bitops.h>
@@ -16,6 +20,7 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/time.h>
@@ -72,12 +77,15 @@ static const u32 prescaler_table[] = {
struct sun4i_pwm_data {
bool has_prescaler_bypass;
+ bool has_direct_mod_clk_output;
unsigned int npwm;
};
struct sun4i_pwm_chip {
struct pwm_chip chip;
+ struct clk *bus_clk;
struct clk *clk;
+ struct reset_control *rst;
void __iomem *base;
spinlock_t ctrl_lock;
const struct sun4i_pwm_data *data;
@@ -115,6 +123,20 @@ static void sun4i_pwm_get_state(struct pwm_chip *chip,
val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
+ /*
+ * PWM chapter in H6 manual has a diagram which explains that if bypass
+ * bit is set, no other setting has any meaning. Even more, experiment
+ * proved that also enable bit is ignored in this case.
+ */
+ if ((val & BIT_CH(PWM_BYPASS, pwm->hwpwm)) &&
+ sun4i_pwm->data->has_direct_mod_clk_output) {
+ state->period = DIV_ROUND_UP_ULL(NSEC_PER_SEC, clk_rate);
+ state->duty_cycle = DIV_ROUND_UP_ULL(state->period, 2);
+ state->polarity = PWM_POLARITY_NORMAL;
+ state->enabled = true;
+ return;
+ }
+
if ((PWM_REG_PRESCAL(val, pwm->hwpwm) == PWM_PRESCAL_MASK) &&
sun4i_pwm->data->has_prescaler_bypass)
prescaler = 1;
@@ -146,13 +168,24 @@ static void sun4i_pwm_get_state(struct pwm_chip *chip,
static int sun4i_pwm_calculate(struct sun4i_pwm_chip *sun4i_pwm,
const struct pwm_state *state,
- u32 *dty, u32 *prd, unsigned int *prsclr)
+ u32 *dty, u32 *prd, unsigned int *prsclr,
+ bool *bypass)
{
u64 clk_rate, div = 0;
- unsigned int pval, prescaler = 0;
+ unsigned int prescaler = 0;
clk_rate = clk_get_rate(sun4i_pwm->clk);
+ *bypass = sun4i_pwm->data->has_direct_mod_clk_output &&
+ state->enabled &&
+ (state->period * clk_rate >= NSEC_PER_SEC) &&
+ (state->period * clk_rate < 2 * NSEC_PER_SEC) &&
+ (state->duty_cycle * clk_rate * 2 >= NSEC_PER_SEC);
+
+ /* Skip calculation of other parameters if we bypass them */
+ if (*bypass)
+ return 0;
+
if (sun4i_pwm->data->has_prescaler_bypass) {
/* First, test without any prescaler when available */
prescaler = PWM_PRESCAL_MASK;
@@ -170,9 +203,11 @@ static int sun4i_pwm_calculate(struct sun4i_pwm_chip *sun4i_pwm,
if (prescaler == 0) {
/* Go up from the first divider */
for (prescaler = 0; prescaler < PWM_PRESCAL_MASK; prescaler++) {
- if (!prescaler_table[prescaler])
+ unsigned int pval = prescaler_table[prescaler];
+
+ if (!pval)
continue;
- pval = prescaler_table[prescaler];
+
div = clk_rate;
do_div(div, pval);
div = div * state->period;
@@ -199,10 +234,11 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
{
struct sun4i_pwm_chip *sun4i_pwm = to_sun4i_pwm_chip(chip);
struct pwm_state cstate;
- u32 ctrl;
+ u32 ctrl, duty = 0, period = 0, val;
int ret;
- unsigned int delay_us;
+ unsigned int delay_us, prescaler = 0;
unsigned long now;
+ bool bypass;
pwm_get_state(pwm, &cstate);
@@ -214,46 +250,52 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
}
}
+ ret = sun4i_pwm_calculate(sun4i_pwm, state, &duty, &period, &prescaler,
+ &bypass);
+ if (ret) {
+ dev_err(chip->dev, "period exceeds the maximum value\n");
+ if (!cstate.enabled)
+ clk_disable_unprepare(sun4i_pwm->clk);
+ return ret;
+ }
+
spin_lock(&sun4i_pwm->ctrl_lock);
ctrl = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
- if ((cstate.period != state->period) ||
- (cstate.duty_cycle != state->duty_cycle)) {
- u32 period, duty, val;
- unsigned int prescaler;
-
- ret = sun4i_pwm_calculate(sun4i_pwm, state,
- &duty, &period, &prescaler);
- if (ret) {
- dev_err(chip->dev, "period exceeds the maximum value\n");
+ if (sun4i_pwm->data->has_direct_mod_clk_output) {
+ if (bypass) {
+ ctrl |= BIT_CH(PWM_BYPASS, pwm->hwpwm);
+ /* We can skip other parameter */
+ sun4i_pwm_writel(sun4i_pwm, ctrl, PWM_CTRL_REG);
spin_unlock(&sun4i_pwm->ctrl_lock);
- if (!cstate.enabled)
- clk_disable_unprepare(sun4i_pwm->clk);
- return ret;
+ return 0;
}
- if (PWM_REG_PRESCAL(ctrl, pwm->hwpwm) != prescaler) {
- /* Prescaler changed, the clock has to be gated */
- ctrl &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
- sun4i_pwm_writel(sun4i_pwm, ctrl, PWM_CTRL_REG);
+ ctrl &= ~BIT_CH(PWM_BYPASS, pwm->hwpwm);
+ }
- ctrl &= ~BIT_CH(PWM_PRESCAL_MASK, pwm->hwpwm);
- ctrl |= BIT_CH(prescaler, pwm->hwpwm);
- }
+ if (PWM_REG_PRESCAL(ctrl, pwm->hwpwm) != prescaler) {
+ /* Prescaler changed, the clock has to be gated */
+ ctrl &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
+ sun4i_pwm_writel(sun4i_pwm, ctrl, PWM_CTRL_REG);
- val = (duty & PWM_DTY_MASK) | PWM_PRD(period);
- sun4i_pwm_writel(sun4i_pwm, val, PWM_CH_PRD(pwm->hwpwm));
- sun4i_pwm->next_period[pwm->hwpwm] = jiffies +
- usecs_to_jiffies(cstate.period / 1000 + 1);
- sun4i_pwm->needs_delay[pwm->hwpwm] = true;
+ ctrl &= ~BIT_CH(PWM_PRESCAL_MASK, pwm->hwpwm);
+ ctrl |= BIT_CH(prescaler, pwm->hwpwm);
}
+ val = (duty & PWM_DTY_MASK) | PWM_PRD(period);
+ sun4i_pwm_writel(sun4i_pwm, val, PWM_CH_PRD(pwm->hwpwm));
+ sun4i_pwm->next_period[pwm->hwpwm] = jiffies +
+ usecs_to_jiffies(cstate.period / 1000 + 1);
+ sun4i_pwm->needs_delay[pwm->hwpwm] = true;
+
if (state->polarity != PWM_POLARITY_NORMAL)
ctrl &= ~BIT_CH(PWM_ACT_STATE, pwm->hwpwm);
else
ctrl |= BIT_CH(PWM_ACT_STATE, pwm->hwpwm);
ctrl |= BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
+
if (state->enabled) {
ctrl |= BIT_CH(PWM_EN, pwm->hwpwm);
} else if (!sun4i_pwm->needs_delay[pwm->hwpwm]) {
@@ -319,6 +361,12 @@ static const struct sun4i_pwm_data sun4i_pwm_single_bypass = {
.npwm = 1,
};
+static const struct sun4i_pwm_data sun50i_h6_pwm_data = {
+ .has_prescaler_bypass = true,
+ .has_direct_mod_clk_output = true,
+ .npwm = 2,
+};
+
static const struct of_device_id sun4i_pwm_dt_ids[] = {
{
.compatible = "allwinner,sun4i-a10-pwm",
@@ -336,6 +384,9 @@ static const struct of_device_id sun4i_pwm_dt_ids[] = {
.compatible = "allwinner,sun8i-h3-pwm",
.data = &sun4i_pwm_single_bypass,
}, {
+ .compatible = "allwinner,sun50i-h6-pwm",
+ .data = &sun50i_h6_pwm_data,
+ }, {
/* sentinel */
},
};
@@ -360,9 +411,69 @@ static int sun4i_pwm_probe(struct platform_device *pdev)
if (IS_ERR(pwm->base))
return PTR_ERR(pwm->base);
- pwm->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(pwm->clk))
+ /*
+ * All hardware variants need a source clock that is divided and
+ * then feeds the counter that defines the output wave form. In the
+ * device tree this clock is either unnamed or called "mod".
+ * Some variants (e.g. H6) need another clock to access the
+ * hardware registers; this is called "bus".
+ * So we request "mod" first (and ignore the corner case that a
+ * parent provides a "mod" clock while the right one would be the
+ * unnamed one of the PWM device) and if this is not found we fall
+ * back to the first clock of the PWM.
+ */
+ pwm->clk = devm_clk_get_optional(&pdev->dev, "mod");
+ if (IS_ERR(pwm->clk)) {
+ if (PTR_ERR(pwm->clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "get mod clock failed %pe\n",
+ pwm->clk);
return PTR_ERR(pwm->clk);
+ }
+
+ if (!pwm->clk) {
+ pwm->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pwm->clk)) {
+ if (PTR_ERR(pwm->clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "get unnamed clock failed %pe\n",
+ pwm->clk);
+ return PTR_ERR(pwm->clk);
+ }
+ }
+
+ pwm->bus_clk = devm_clk_get_optional(&pdev->dev, "bus");
+ if (IS_ERR(pwm->bus_clk)) {
+ if (PTR_ERR(pwm->bus_clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "get bus clock failed %pe\n",
+ pwm->bus_clk);
+ return PTR_ERR(pwm->bus_clk);
+ }
+
+ pwm->rst = devm_reset_control_get_optional_shared(&pdev->dev, NULL);
+ if (IS_ERR(pwm->rst)) {
+ if (PTR_ERR(pwm->rst) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "get reset failed %pe\n",
+ pwm->rst);
+ return PTR_ERR(pwm->rst);
+ }
+
+ /* Deassert reset */
+ ret = reset_control_deassert(pwm->rst);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot deassert reset control: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ /*
+ * We're keeping the bus clock on for the sake of simplicity.
+ * Actually it only needs to be on for hardware register accesses.
+ */
+ ret = clk_prepare_enable(pwm->bus_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot prepare and enable bus_clk %pe\n",
+ ERR_PTR(ret));
+ goto err_bus;
+ }
pwm->chip.dev = &pdev->dev;
pwm->chip.ops = &sun4i_pwm_ops;
@@ -376,19 +487,34 @@ static int sun4i_pwm_probe(struct platform_device *pdev)
ret = pwmchip_add(&pwm->chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
- return ret;
+ goto err_pwm_add;
}
platform_set_drvdata(pdev, pwm);
return 0;
+
+err_pwm_add:
+ clk_disable_unprepare(pwm->bus_clk);
+err_bus:
+ reset_control_assert(pwm->rst);
+
+ return ret;
}
static int sun4i_pwm_remove(struct platform_device *pdev)
{
struct sun4i_pwm_chip *pwm = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = pwmchip_remove(&pwm->chip);
+ if (ret)
+ return ret;
+
+ clk_disable_unprepare(pwm->bus_clk);
+ reset_control_assert(pwm->rst);
- return pwmchip_remove(&pwm->chip);
+ return 0;
}
static struct platform_driver sun4i_pwm_driver = {
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 74eb5af7295f..074a2ef55943 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -194,9 +194,22 @@ config REGULATOR_BD70528
This driver can also be built as a module. If so, the module
will be called bd70528-regulator.
+config REGULATOR_BD71828
+ tristate "ROHM BD71828 Power Regulator"
+ depends on MFD_ROHM_BD71828
+ select REGULATOR_ROHM
+ help
+ This driver supports voltage regulators on ROHM BD71828 PMIC.
+ This will enable support for the software controllable buck
+ and LDO regulators.
+
+ This driver can also be built as a module. If so, the module
+ will be called bd71828-regulator.
+
config REGULATOR_BD718XX
tristate "ROHM BD71837 Power Regulator"
depends on MFD_ROHM_BD718XX
+ select REGULATOR_ROHM
help
This driver supports voltage regulators on ROHM BD71837 PMIC.
This will enable support for the software controllable buck
@@ -600,6 +613,27 @@ config REGULATOR_MCP16502
through the regulator interface. In addition it enables
suspend-to-ram/standby transition.
+config REGULATOR_MP8859
+ tristate "MPS MP8859 regulator driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say y here to support the MP8859 voltage regulator. This driver
+ supports basic operations (get/set voltage) through the regulator
+ interface.
+ Say M here if you want to include support for the regulator as a
+ module. The module will be named "mp8859".
+
+config REGULATOR_MPQ7920
+ tristate "Monolithic MPQ7920 PMIC"
+ depends on I2C && OF
+ select REGMAP_I2C
+ help
+ Say y here to support the MPQ7920 PMIC. This will enable supports
+ the software controllable 4 buck and 5 LDO regulators.
+ This driver supports the control of different power rails of device
+ through regulator interface.
+
config REGULATOR_MT6311
tristate "MediaTek MT6311 PMIC"
depends on I2C
@@ -790,6 +824,9 @@ config REGULATOR_RN5T618
Say y here to support the regulators found on Ricoh RN5T567,
RN5T618 or RC5T619 PMIC.
+config REGULATOR_ROHM
+ tristate
+
config REGULATOR_RT5033
tristate "Richtek RT5033 Regulators"
depends on MFD_RT5033
@@ -1077,6 +1114,13 @@ config REGULATOR_VEXPRESS
This driver provides support for voltage regulators available
on the ARM Ltd's Versatile Express platform.
+config REGULATOR_VQMMC_IPQ4019
+ tristate "IPQ4019 VQMMC SD LDO regulator support"
+ depends on ARCH_QCOM
+ help
+ This driver provides support for the VQMMC LDO I/0
+ voltage regulator of the IPQ4019 SD/EMMC controller.
+
config REGULATOR_WM831X
tristate "Wolfson Microelectronics WM831x PMIC regulators"
depends on MFD_WM831X
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 2210ba56f9bd..c0d6b96ebd78 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_REGULATOR_AS3722) += as3722-regulator.o
obj-$(CONFIG_REGULATOR_AXP20X) += axp20x-regulator.o
obj-$(CONFIG_REGULATOR_BCM590XX) += bcm590xx-regulator.o
obj-$(CONFIG_REGULATOR_BD70528) += bd70528-regulator.o
+obj-$(CONFIG_REGULATOR_BD71828) += bd71828-regulator.o
obj-$(CONFIG_REGULATOR_BD718XX) += bd718x7-regulator.o
obj-$(CONFIG_REGULATOR_BD9571MWV) += bd9571mwv-regulator.o
obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
@@ -77,6 +78,8 @@ obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o
obj-$(CONFIG_REGULATOR_MCP16502) += mcp16502.o
+obj-$(CONFIG_REGULATOR_MP8859) += mp8859.o
+obj-$(CONFIG_REGULATOR_MPQ7920) += mpq7920.o
obj-$(CONFIG_REGULATOR_MT6311) += mt6311-regulator.o
obj-$(CONFIG_REGULATOR_MT6323) += mt6323-regulator.o
obj-$(CONFIG_REGULATOR_MT6358) += mt6358-regulator.o
@@ -99,6 +102,7 @@ obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
obj-$(CONFIG_REGULATOR_RC5T583) += rc5t583-regulator.o
obj-$(CONFIG_REGULATOR_RK808) += rk808-regulator.o
obj-$(CONFIG_REGULATOR_RN5T618) += rn5t618-regulator.o
+obj-$(CONFIG_REGULATOR_ROHM) += rohm-regulator.o
obj-$(CONFIG_REGULATOR_RT5033) += rt5033-regulator.o
obj-$(CONFIG_REGULATOR_S2MPA01) += s2mpa01.o
obj-$(CONFIG_REGULATOR_S2MPS11) += s2mps11.o
@@ -132,6 +136,7 @@ obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o twl6030-regulator.o
obj-$(CONFIG_REGULATOR_UNIPHIER) += uniphier-regulator.o
obj-$(CONFIG_REGULATOR_VCTRL) += vctrl-regulator.o
obj-$(CONFIG_REGULATOR_VEXPRESS) += vexpress-regulator.o
+obj-$(CONFIG_REGULATOR_VQMMC_IPQ4019) += vqmmc-ipq4019-regulator.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-ldo.o
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 989506bd90b1..16f0c8570036 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -413,10 +413,13 @@ static int axp20x_set_ramp_delay(struct regulator_dev *rdev, int ramp)
int i;
for (i = 0; i < rate_count; i++) {
- if (ramp <= slew_rates[i])
- cfg = AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE(i);
- else
+ if (ramp > slew_rates[i])
break;
+
+ if (id == AXP20X_DCDC2)
+ cfg = AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_RATE(i);
+ else
+ cfg = AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE(i);
}
if (cfg == 0xff) {
@@ -605,7 +608,7 @@ static const struct regulator_desc axp22x_regulators[] = {
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK),
AXP_DESC(AXP22X, ELDO2, "eldo2", "eldoin", 700, 3300, 100,
AXP22X_ELDO2_V_OUT, AXP22X_ELDO2_V_OUT_MASK,
- AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK),
+ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO2_MASK),
AXP_DESC(AXP22X, ELDO3, "eldo3", "eldoin", 700, 3300, 100,
AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO3_MASK),
diff --git a/drivers/regulator/bd70528-regulator.c b/drivers/regulator/bd70528-regulator.c
index ec764022621f..5bf8a2dc5fe7 100644
--- a/drivers/regulator/bd70528-regulator.c
+++ b/drivers/regulator/bd70528-regulator.c
@@ -101,7 +101,6 @@ static const struct regulator_ops bd70528_ldo_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
- .set_ramp_delay = bd70528_set_ramp_delay,
};
static const struct regulator_ops bd70528_led_ops = {
diff --git a/drivers/regulator/bd71828-regulator.c b/drivers/regulator/bd71828-regulator.c
new file mode 100644
index 000000000000..b2fa17be4988
--- /dev/null
+++ b/drivers/regulator/bd71828-regulator.c
@@ -0,0 +1,807 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2019 ROHM Semiconductors
+// bd71828-regulator.c ROHM BD71828GW-DS1 regulator driver
+//
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/rohm-bd71828.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+struct reg_init {
+ unsigned int reg;
+ unsigned int mask;
+ unsigned int val;
+};
+struct bd71828_regulator_data {
+ struct regulator_desc desc;
+ const struct rohm_dvs_config dvs;
+ const struct reg_init *reg_inits;
+ int reg_init_amnt;
+};
+
+static const struct reg_init buck1_inits[] = {
+ /*
+ * DVS Buck voltages can be changed by register values or via GPIO.
+ * Use register accesses by default.
+ */
+ {
+ .reg = BD71828_REG_PS_CTRL_1,
+ .mask = BD71828_MASK_DVS_BUCK1_CTRL,
+ .val = BD71828_DVS_BUCK1_CTRL_I2C,
+ },
+};
+
+static const struct reg_init buck2_inits[] = {
+ {
+ .reg = BD71828_REG_PS_CTRL_1,
+ .mask = BD71828_MASK_DVS_BUCK2_CTRL,
+ .val = BD71828_DVS_BUCK2_CTRL_I2C,
+ },
+};
+
+static const struct reg_init buck6_inits[] = {
+ {
+ .reg = BD71828_REG_PS_CTRL_1,
+ .mask = BD71828_MASK_DVS_BUCK6_CTRL,
+ .val = BD71828_DVS_BUCK6_CTRL_I2C,
+ },
+};
+
+static const struct reg_init buck7_inits[] = {
+ {
+ .reg = BD71828_REG_PS_CTRL_1,
+ .mask = BD71828_MASK_DVS_BUCK7_CTRL,
+ .val = BD71828_DVS_BUCK7_CTRL_I2C,
+ },
+};
+
+static const struct regulator_linear_range bd71828_buck1267_volts[] = {
+ REGULATOR_LINEAR_RANGE(500000, 0x00, 0xef, 6250),
+ REGULATOR_LINEAR_RANGE(2000000, 0xf0, 0xff, 0),
+};
+
+static const struct regulator_linear_range bd71828_buck3_volts[] = {
+ REGULATOR_LINEAR_RANGE(1200000, 0x00, 0x0f, 50000),
+ REGULATOR_LINEAR_RANGE(2000000, 0x10, 0x1f, 0),
+};
+
+static const struct regulator_linear_range bd71828_buck4_volts[] = {
+ REGULATOR_LINEAR_RANGE(1000000, 0x00, 0x1f, 25000),
+ REGULATOR_LINEAR_RANGE(1800000, 0x20, 0x3f, 0),
+};
+
+static const struct regulator_linear_range bd71828_buck5_volts[] = {
+ REGULATOR_LINEAR_RANGE(2500000, 0x00, 0x0f, 50000),
+ REGULATOR_LINEAR_RANGE(3300000, 0x10, 0x1f, 0),
+};
+
+static const struct regulator_linear_range bd71828_ldo_volts[] = {
+ REGULATOR_LINEAR_RANGE(800000, 0x00, 0x31, 50000),
+ REGULATOR_LINEAR_RANGE(3300000, 0x32, 0x3f, 0),
+};
+
+static int bd71828_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
+{
+ unsigned int val;
+
+ switch (ramp_delay) {
+ case 1 ... 2500:
+ val = 0;
+ break;
+ case 2501 ... 5000:
+ val = 1;
+ break;
+ case 5001 ... 10000:
+ val = 2;
+ break;
+ case 10001 ... 20000:
+ val = 3;
+ break;
+ default:
+ val = 3;
+ dev_err(&rdev->dev,
+ "ramp_delay: %d not supported, setting 20mV/uS",
+ ramp_delay);
+ }
+
+ /*
+ * On BD71828 the ramp delay level control reg is at offset +2 to
+ * enable reg
+ */
+ return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg + 2,
+ BD71828_MASK_RAMP_DELAY,
+ val << (ffs(BD71828_MASK_RAMP_DELAY) - 1));
+}
+
+static int buck_set_hw_dvs_levels(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *cfg)
+{
+ struct bd71828_regulator_data *data;
+
+ data = container_of(desc, struct bd71828_regulator_data, desc);
+
+ return rohm_regulator_set_dvs_levels(&data->dvs, np, desc, cfg->regmap);
+}
+
+static int ldo6_parse_dt(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *cfg)
+{
+ int ret, i;
+ uint32_t uv = 0;
+ unsigned int en;
+ struct regmap *regmap = cfg->regmap;
+ static const char * const props[] = { "rohm,dvs-run-voltage",
+ "rohm,dvs-idle-voltage",
+ "rohm,dvs-suspend-voltage",
+ "rohm,dvs-lpsr-voltage" };
+ unsigned int mask[] = { BD71828_MASK_RUN_EN, BD71828_MASK_IDLE_EN,
+ BD71828_MASK_SUSP_EN, BD71828_MASK_LPSR_EN };
+
+ for (i = 0; i < ARRAY_SIZE(props); i++) {
+ ret = of_property_read_u32(np, props[i], &uv);
+ if (ret) {
+ if (ret != -EINVAL)
+ return ret;
+ continue;
+ }
+ if (uv)
+ en = 0xffffffff;
+ else
+ en = 0;
+
+ ret = regmap_update_bits(regmap, desc->enable_reg, mask[i], en);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static const struct regulator_ops bd71828_buck_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+};
+
+static const struct regulator_ops bd71828_dvs_buck_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_ramp_delay = bd71828_set_ramp_delay,
+};
+
+static const struct regulator_ops bd71828_ldo_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+};
+
+static const struct regulator_ops bd71828_ldo6_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+};
+
+static const struct bd71828_regulator_data bd71828_rdata[] = {
+ {
+ .desc = {
+ .name = "buck1",
+ .of_match = of_match_ptr("BUCK1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_BUCK1,
+ .ops = &bd71828_dvs_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_buck1267_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts),
+ .n_voltages = BD71828_BUCK1267_VOLTS,
+ .enable_reg = BD71828_REG_BUCK1_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_BUCK1_VOLT,
+ .vsel_mask = BD71828_MASK_BUCK1267_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_BUCK1_VOLT,
+ .run_mask = BD71828_MASK_BUCK1267_VOLT,
+ .idle_reg = BD71828_REG_BUCK1_IDLE_VOLT,
+ .idle_mask = BD71828_MASK_BUCK1267_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_reg = BD71828_REG_BUCK1_SUSP_VOLT,
+ .suspend_mask = BD71828_MASK_BUCK1267_VOLT,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ /*
+ * LPSR voltage is same as SUSPEND voltage. Allow
+ * setting it so that regulator can be set enabled at
+ * LPSR state
+ */
+ .lpsr_reg = BD71828_REG_BUCK1_SUSP_VOLT,
+ .lpsr_mask = BD71828_MASK_BUCK1267_VOLT,
+ },
+ .reg_inits = buck1_inits,
+ .reg_init_amnt = ARRAY_SIZE(buck1_inits),
+ },
+ {
+ .desc = {
+ .name = "buck2",
+ .of_match = of_match_ptr("BUCK2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_BUCK2,
+ .ops = &bd71828_dvs_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_buck1267_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts),
+ .n_voltages = BD71828_BUCK1267_VOLTS,
+ .enable_reg = BD71828_REG_BUCK2_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_BUCK2_VOLT,
+ .vsel_mask = BD71828_MASK_BUCK1267_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_BUCK2_VOLT,
+ .run_mask = BD71828_MASK_BUCK1267_VOLT,
+ .idle_reg = BD71828_REG_BUCK2_IDLE_VOLT,
+ .idle_mask = BD71828_MASK_BUCK1267_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_reg = BD71828_REG_BUCK2_SUSP_VOLT,
+ .suspend_mask = BD71828_MASK_BUCK1267_VOLT,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ .lpsr_reg = BD71828_REG_BUCK2_SUSP_VOLT,
+ .lpsr_mask = BD71828_MASK_BUCK1267_VOLT,
+ },
+ .reg_inits = buck2_inits,
+ .reg_init_amnt = ARRAY_SIZE(buck2_inits),
+ },
+ {
+ .desc = {
+ .name = "buck3",
+ .of_match = of_match_ptr("BUCK3"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_BUCK3,
+ .ops = &bd71828_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_buck3_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_buck3_volts),
+ .n_voltages = BD71828_BUCK3_VOLTS,
+ .enable_reg = BD71828_REG_BUCK3_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_BUCK3_VOLT,
+ .vsel_mask = BD71828_MASK_BUCK3_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ /*
+ * BUCK3 only supports single voltage for all states.
+ * voltage can be individually enabled for each state
+ * though => allow setting all states to support
+ * enabling power rail on different states.
+ */
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_BUCK3_VOLT,
+ .idle_reg = BD71828_REG_BUCK3_VOLT,
+ .suspend_reg = BD71828_REG_BUCK3_VOLT,
+ .lpsr_reg = BD71828_REG_BUCK3_VOLT,
+ .run_mask = BD71828_MASK_BUCK3_VOLT,
+ .idle_mask = BD71828_MASK_BUCK3_VOLT,
+ .suspend_mask = BD71828_MASK_BUCK3_VOLT,
+ .lpsr_mask = BD71828_MASK_BUCK3_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck4",
+ .of_match = of_match_ptr("BUCK4"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_BUCK4,
+ .ops = &bd71828_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_buck4_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_buck4_volts),
+ .n_voltages = BD71828_BUCK4_VOLTS,
+ .enable_reg = BD71828_REG_BUCK4_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_BUCK4_VOLT,
+ .vsel_mask = BD71828_MASK_BUCK4_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ /*
+ * BUCK4 only supports single voltage for all states.
+ * voltage can be individually enabled for each state
+ * though => allow setting all states to support
+ * enabling power rail on different states.
+ */
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_BUCK4_VOLT,
+ .idle_reg = BD71828_REG_BUCK4_VOLT,
+ .suspend_reg = BD71828_REG_BUCK4_VOLT,
+ .lpsr_reg = BD71828_REG_BUCK4_VOLT,
+ .run_mask = BD71828_MASK_BUCK4_VOLT,
+ .idle_mask = BD71828_MASK_BUCK4_VOLT,
+ .suspend_mask = BD71828_MASK_BUCK4_VOLT,
+ .lpsr_mask = BD71828_MASK_BUCK4_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck5",
+ .of_match = of_match_ptr("BUCK5"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_BUCK5,
+ .ops = &bd71828_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_buck5_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_buck5_volts),
+ .n_voltages = BD71828_BUCK5_VOLTS,
+ .enable_reg = BD71828_REG_BUCK5_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_BUCK5_VOLT,
+ .vsel_mask = BD71828_MASK_BUCK5_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ /*
+ * BUCK5 only supports single voltage for all states.
+ * voltage can be individually enabled for each state
+ * though => allow setting all states to support
+ * enabling power rail on different states.
+ */
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_BUCK5_VOLT,
+ .idle_reg = BD71828_REG_BUCK5_VOLT,
+ .suspend_reg = BD71828_REG_BUCK5_VOLT,
+ .lpsr_reg = BD71828_REG_BUCK5_VOLT,
+ .run_mask = BD71828_MASK_BUCK5_VOLT,
+ .idle_mask = BD71828_MASK_BUCK5_VOLT,
+ .suspend_mask = BD71828_MASK_BUCK5_VOLT,
+ .lpsr_mask = BD71828_MASK_BUCK5_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck6",
+ .of_match = of_match_ptr("BUCK6"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_BUCK6,
+ .ops = &bd71828_dvs_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_buck1267_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts),
+ .n_voltages = BD71828_BUCK1267_VOLTS,
+ .enable_reg = BD71828_REG_BUCK6_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_BUCK6_VOLT,
+ .vsel_mask = BD71828_MASK_BUCK1267_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_BUCK6_VOLT,
+ .run_mask = BD71828_MASK_BUCK1267_VOLT,
+ .idle_reg = BD71828_REG_BUCK6_IDLE_VOLT,
+ .idle_mask = BD71828_MASK_BUCK1267_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_reg = BD71828_REG_BUCK6_SUSP_VOLT,
+ .suspend_mask = BD71828_MASK_BUCK1267_VOLT,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ .lpsr_reg = BD71828_REG_BUCK6_SUSP_VOLT,
+ .lpsr_mask = BD71828_MASK_BUCK1267_VOLT,
+ },
+ .reg_inits = buck6_inits,
+ .reg_init_amnt = ARRAY_SIZE(buck6_inits),
+ },
+ {
+ .desc = {
+ .name = "buck7",
+ .of_match = of_match_ptr("BUCK7"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_BUCK7,
+ .ops = &bd71828_dvs_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_buck1267_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts),
+ .n_voltages = BD71828_BUCK1267_VOLTS,
+ .enable_reg = BD71828_REG_BUCK7_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_BUCK7_VOLT,
+ .vsel_mask = BD71828_MASK_BUCK1267_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_BUCK7_VOLT,
+ .run_mask = BD71828_MASK_BUCK1267_VOLT,
+ .idle_reg = BD71828_REG_BUCK7_IDLE_VOLT,
+ .idle_mask = BD71828_MASK_BUCK1267_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_reg = BD71828_REG_BUCK7_SUSP_VOLT,
+ .suspend_mask = BD71828_MASK_BUCK1267_VOLT,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ .lpsr_reg = BD71828_REG_BUCK7_SUSP_VOLT,
+ .lpsr_mask = BD71828_MASK_BUCK1267_VOLT,
+ },
+ .reg_inits = buck7_inits,
+ .reg_init_amnt = ARRAY_SIZE(buck7_inits),
+ },
+ {
+ .desc = {
+ .name = "ldo1",
+ .of_match = of_match_ptr("LDO1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_LDO1,
+ .ops = &bd71828_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_ldo_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts),
+ .n_voltages = BD71828_LDO_VOLTS,
+ .enable_reg = BD71828_REG_LDO1_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_LDO1_VOLT,
+ .vsel_mask = BD71828_MASK_LDO_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ /*
+ * LDO1 only supports single voltage for all states.
+ * voltage can be individually enabled for each state
+ * though => allow setting all states to support
+ * enabling power rail on different states.
+ */
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_LDO1_VOLT,
+ .idle_reg = BD71828_REG_LDO1_VOLT,
+ .suspend_reg = BD71828_REG_LDO1_VOLT,
+ .lpsr_reg = BD71828_REG_LDO1_VOLT,
+ .run_mask = BD71828_MASK_LDO_VOLT,
+ .idle_mask = BD71828_MASK_LDO_VOLT,
+ .suspend_mask = BD71828_MASK_LDO_VOLT,
+ .lpsr_mask = BD71828_MASK_LDO_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+ }, {
+ .desc = {
+ .name = "ldo2",
+ .of_match = of_match_ptr("LDO2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_LDO2,
+ .ops = &bd71828_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_ldo_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts),
+ .n_voltages = BD71828_LDO_VOLTS,
+ .enable_reg = BD71828_REG_LDO2_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_LDO2_VOLT,
+ .vsel_mask = BD71828_MASK_LDO_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ /*
+ * LDO2 only supports single voltage for all states.
+ * voltage can be individually enabled for each state
+ * though => allow setting all states to support
+ * enabling power rail on different states.
+ */
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_LDO2_VOLT,
+ .idle_reg = BD71828_REG_LDO2_VOLT,
+ .suspend_reg = BD71828_REG_LDO2_VOLT,
+ .lpsr_reg = BD71828_REG_LDO2_VOLT,
+ .run_mask = BD71828_MASK_LDO_VOLT,
+ .idle_mask = BD71828_MASK_LDO_VOLT,
+ .suspend_mask = BD71828_MASK_LDO_VOLT,
+ .lpsr_mask = BD71828_MASK_LDO_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+ }, {
+ .desc = {
+ .name = "ldo3",
+ .of_match = of_match_ptr("LDO3"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_LDO3,
+ .ops = &bd71828_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_ldo_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts),
+ .n_voltages = BD71828_LDO_VOLTS,
+ .enable_reg = BD71828_REG_LDO3_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_LDO3_VOLT,
+ .vsel_mask = BD71828_MASK_LDO_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ /*
+ * LDO3 only supports single voltage for all states.
+ * voltage can be individually enabled for each state
+ * though => allow setting all states to support
+ * enabling power rail on different states.
+ */
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_LDO3_VOLT,
+ .idle_reg = BD71828_REG_LDO3_VOLT,
+ .suspend_reg = BD71828_REG_LDO3_VOLT,
+ .lpsr_reg = BD71828_REG_LDO3_VOLT,
+ .run_mask = BD71828_MASK_LDO_VOLT,
+ .idle_mask = BD71828_MASK_LDO_VOLT,
+ .suspend_mask = BD71828_MASK_LDO_VOLT,
+ .lpsr_mask = BD71828_MASK_LDO_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+
+ }, {
+ .desc = {
+ .name = "ldo4",
+ .of_match = of_match_ptr("LDO4"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_LDO4,
+ .ops = &bd71828_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_ldo_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts),
+ .n_voltages = BD71828_LDO_VOLTS,
+ .enable_reg = BD71828_REG_LDO4_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_LDO4_VOLT,
+ .vsel_mask = BD71828_MASK_LDO_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ /*
+ * LDO1 only supports single voltage for all states.
+ * voltage can be individually enabled for each state
+ * though => allow setting all states to support
+ * enabling power rail on different states.
+ */
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_LDO4_VOLT,
+ .idle_reg = BD71828_REG_LDO4_VOLT,
+ .suspend_reg = BD71828_REG_LDO4_VOLT,
+ .lpsr_reg = BD71828_REG_LDO4_VOLT,
+ .run_mask = BD71828_MASK_LDO_VOLT,
+ .idle_mask = BD71828_MASK_LDO_VOLT,
+ .suspend_mask = BD71828_MASK_LDO_VOLT,
+ .lpsr_mask = BD71828_MASK_LDO_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+ }, {
+ .desc = {
+ .name = "ldo5",
+ .of_match = of_match_ptr("LDO5"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_LDO5,
+ .ops = &bd71828_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_ldo_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts),
+ .n_voltages = BD71828_LDO_VOLTS,
+ .enable_reg = BD71828_REG_LDO5_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_LDO5_VOLT,
+ .vsel_mask = BD71828_MASK_LDO_VOLT,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ .owner = THIS_MODULE,
+ },
+ /*
+ * LDO5 is special. It can choose vsel settings to be configured
+ * from 2 different registers (by GPIO).
+ *
+ * This driver supports only configuration where
+ * BD71828_REG_LDO5_VOLT_L is used.
+ */
+ .dvs = {
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_LDO5_VOLT,
+ .idle_reg = BD71828_REG_LDO5_VOLT,
+ .suspend_reg = BD71828_REG_LDO5_VOLT,
+ .lpsr_reg = BD71828_REG_LDO5_VOLT,
+ .run_mask = BD71828_MASK_LDO_VOLT,
+ .idle_mask = BD71828_MASK_LDO_VOLT,
+ .suspend_mask = BD71828_MASK_LDO_VOLT,
+ .lpsr_mask = BD71828_MASK_LDO_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+
+ }, {
+ .desc = {
+ .name = "ldo6",
+ .of_match = of_match_ptr("LDO6"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_LDO6,
+ .ops = &bd71828_ldo6_ops,
+ .type = REGULATOR_VOLTAGE,
+ .fixed_uV = BD71828_LDO_6_VOLTAGE,
+ .n_voltages = 1,
+ .enable_reg = BD71828_REG_LDO6_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .owner = THIS_MODULE,
+ /*
+ * LDO6 only supports enable/disable for all states.
+ * Voltage for LDO6 is fixed.
+ */
+ .of_parse_cb = ldo6_parse_dt,
+ },
+ }, {
+ .desc = {
+ /* SNVS LDO in data-sheet */
+ .name = "ldo7",
+ .of_match = of_match_ptr("LDO7"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_LDO_SNVS,
+ .ops = &bd71828_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_ldo_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts),
+ .n_voltages = BD71828_LDO_VOLTS,
+ .enable_reg = BD71828_REG_LDO7_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_LDO7_VOLT,
+ .vsel_mask = BD71828_MASK_LDO_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ /*
+ * LDO7 only supports single voltage for all states.
+ * voltage can be individually enabled for each state
+ * though => allow setting all states to support
+ * enabling power rail on different states.
+ */
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_LDO7_VOLT,
+ .idle_reg = BD71828_REG_LDO7_VOLT,
+ .suspend_reg = BD71828_REG_LDO7_VOLT,
+ .lpsr_reg = BD71828_REG_LDO7_VOLT,
+ .run_mask = BD71828_MASK_LDO_VOLT,
+ .idle_mask = BD71828_MASK_LDO_VOLT,
+ .suspend_mask = BD71828_MASK_LDO_VOLT,
+ .lpsr_mask = BD71828_MASK_LDO_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+
+ },
+};
+
+static int bd71828_probe(struct platform_device *pdev)
+{
+ struct rohm_regmap_dev *bd71828;
+ int i, j, ret;
+ struct regulator_config config = {
+ .dev = pdev->dev.parent,
+ };
+
+ bd71828 = dev_get_drvdata(pdev->dev.parent);
+ if (!bd71828) {
+ dev_err(&pdev->dev, "No MFD driver data\n");
+ return -EINVAL;
+ }
+
+ config.regmap = bd71828->regmap;
+
+ for (i = 0; i < ARRAY_SIZE(bd71828_rdata); i++) {
+ struct regulator_dev *rdev;
+ const struct bd71828_regulator_data *rd;
+
+ rd = &bd71828_rdata[i];
+ rdev = devm_regulator_register(&pdev->dev,
+ &rd->desc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev,
+ "failed to register %s regulator\n",
+ rd->desc.name);
+ return PTR_ERR(rdev);
+ }
+ for (j = 0; j < rd->reg_init_amnt; j++) {
+ ret = regmap_update_bits(bd71828->regmap,
+ rd->reg_inits[j].reg,
+ rd->reg_inits[j].mask,
+ rd->reg_inits[j].val);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "regulator %s init failed\n",
+ rd->desc.name);
+ return ret;
+ }
+ }
+ }
+ return 0;
+}
+
+static struct platform_driver bd71828_regulator = {
+ .driver = {
+ .name = "bd71828-pmic"
+ },
+ .probe = bd71828_probe,
+};
+
+module_platform_driver(bd71828_regulator);
+
+MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
+MODULE_DESCRIPTION("BD71828 voltage regulator driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:bd71828-pmic");
diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c
index 13a43eee2e46..cf3872837abc 100644
--- a/drivers/regulator/bd718x7-regulator.c
+++ b/drivers/regulator/bd718x7-regulator.c
@@ -318,6 +318,7 @@ struct reg_init {
};
struct bd718xx_regulator_data {
struct regulator_desc desc;
+ const struct rohm_dvs_config dvs;
const struct reg_init init;
const struct reg_init *additional_inits;
int additional_init_amnt;
@@ -349,133 +350,15 @@ static const struct reg_init bd71837_ldo6_inits[] = {
},
};
-#define NUM_DVS_BUCKS 4
-
-struct of_dvs_setting {
- const char *prop;
- unsigned int reg;
-};
-
-static int set_dvs_levels(const struct of_dvs_setting *dvs,
- struct device_node *np,
- const struct regulator_desc *desc,
- struct regmap *regmap)
-{
- int ret, i;
- unsigned int uv;
-
- ret = of_property_read_u32(np, dvs->prop, &uv);
- if (ret) {
- if (ret != -EINVAL)
- return ret;
- return 0;
- }
-
- for (i = 0; i < desc->n_voltages; i++) {
- ret = regulator_desc_list_voltage_linear_range(desc, i);
- if (ret < 0)
- continue;
- if (ret == uv) {
- i <<= ffs(desc->vsel_mask) - 1;
- ret = regmap_update_bits(regmap, dvs->reg,
- DVS_BUCK_RUN_MASK, i);
- break;
- }
- }
- return ret;
-}
-
-static int buck4_set_hw_dvs_levels(struct device_node *np,
+static int buck_set_hw_dvs_levels(struct device_node *np,
const struct regulator_desc *desc,
struct regulator_config *cfg)
{
- int ret, i;
- const struct of_dvs_setting dvs[] = {
- {
- .prop = "rohm,dvs-run-voltage",
- .reg = BD71837_REG_BUCK4_VOLT_RUN,
- },
- };
+ struct bd718xx_regulator_data *data;
- for (i = 0; i < ARRAY_SIZE(dvs); i++) {
- ret = set_dvs_levels(&dvs[i], np, desc, cfg->regmap);
- if (ret)
- break;
- }
- return ret;
-}
-static int buck3_set_hw_dvs_levels(struct device_node *np,
- const struct regulator_desc *desc,
- struct regulator_config *cfg)
-{
- int ret, i;
- const struct of_dvs_setting dvs[] = {
- {
- .prop = "rohm,dvs-run-voltage",
- .reg = BD71837_REG_BUCK3_VOLT_RUN,
- },
- };
+ data = container_of(desc, struct bd718xx_regulator_data, desc);
- for (i = 0; i < ARRAY_SIZE(dvs); i++) {
- ret = set_dvs_levels(&dvs[i], np, desc, cfg->regmap);
- if (ret)
- break;
- }
- return ret;
-}
-
-static int buck2_set_hw_dvs_levels(struct device_node *np,
- const struct regulator_desc *desc,
- struct regulator_config *cfg)
-{
- int ret, i;
- const struct of_dvs_setting dvs[] = {
- {
- .prop = "rohm,dvs-run-voltage",
- .reg = BD718XX_REG_BUCK2_VOLT_RUN,
- },
- {
- .prop = "rohm,dvs-idle-voltage",
- .reg = BD718XX_REG_BUCK2_VOLT_IDLE,
- },
- };
-
-
-
- for (i = 0; i < ARRAY_SIZE(dvs); i++) {
- ret = set_dvs_levels(&dvs[i], np, desc, cfg->regmap);
- if (ret)
- break;
- }
- return ret;
-}
-
-static int buck1_set_hw_dvs_levels(struct device_node *np,
- const struct regulator_desc *desc,
- struct regulator_config *cfg)
-{
- int ret, i;
- const struct of_dvs_setting dvs[] = {
- {
- .prop = "rohm,dvs-run-voltage",
- .reg = BD718XX_REG_BUCK1_VOLT_RUN,
- },
- {
- .prop = "rohm,dvs-idle-voltage",
- .reg = BD718XX_REG_BUCK1_VOLT_IDLE,
- },
- {
- .prop = "rohm,dvs-suspend-voltage",
- .reg = BD718XX_REG_BUCK1_VOLT_SUSP,
- },
- };
-
- for (i = 0; i < ARRAY_SIZE(dvs); i++) {
- ret = set_dvs_levels(&dvs[i], np, desc, cfg->regmap);
- if (ret)
- break;
- }
- return ret;
+ return rohm_regulator_set_dvs_levels(&data->dvs, np, desc, cfg->regmap);
}
static const struct bd718xx_regulator_data bd71847_regulators[] = {
@@ -496,7 +379,17 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = {
.enable_reg = BD718XX_REG_BUCK1_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.owner = THIS_MODULE,
- .of_parse_cb = buck1_set_hw_dvs_levels,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND,
+ .run_reg = BD718XX_REG_BUCK1_VOLT_RUN,
+ .run_mask = DVS_BUCK_RUN_MASK,
+ .idle_reg = BD718XX_REG_BUCK1_VOLT_IDLE,
+ .idle_mask = DVS_BUCK_RUN_MASK,
+ .suspend_reg = BD718XX_REG_BUCK1_VOLT_SUSP,
+ .suspend_mask = DVS_BUCK_RUN_MASK,
},
.init = {
.reg = BD718XX_REG_BUCK1_CTRL,
@@ -520,7 +413,14 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = {
.enable_reg = BD718XX_REG_BUCK2_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.owner = THIS_MODULE,
- .of_parse_cb = buck2_set_hw_dvs_levels,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE,
+ .run_reg = BD718XX_REG_BUCK2_VOLT_RUN,
+ .run_mask = DVS_BUCK_RUN_MASK,
+ .idle_reg = BD718XX_REG_BUCK2_VOLT_IDLE,
+ .idle_mask = DVS_BUCK_RUN_MASK,
},
.init = {
.reg = BD718XX_REG_BUCK2_CTRL,
@@ -792,7 +692,17 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.enable_reg = BD718XX_REG_BUCK1_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.owner = THIS_MODULE,
- .of_parse_cb = buck1_set_hw_dvs_levels,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND,
+ .run_reg = BD718XX_REG_BUCK1_VOLT_RUN,
+ .run_mask = DVS_BUCK_RUN_MASK,
+ .idle_reg = BD718XX_REG_BUCK1_VOLT_IDLE,
+ .idle_mask = DVS_BUCK_RUN_MASK,
+ .suspend_reg = BD718XX_REG_BUCK1_VOLT_SUSP,
+ .suspend_mask = DVS_BUCK_RUN_MASK,
},
.init = {
.reg = BD718XX_REG_BUCK1_CTRL,
@@ -816,7 +726,14 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.enable_reg = BD718XX_REG_BUCK2_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.owner = THIS_MODULE,
- .of_parse_cb = buck2_set_hw_dvs_levels,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE,
+ .run_reg = BD718XX_REG_BUCK2_VOLT_RUN,
+ .run_mask = DVS_BUCK_RUN_MASK,
+ .idle_reg = BD718XX_REG_BUCK2_VOLT_IDLE,
+ .idle_mask = DVS_BUCK_RUN_MASK,
},
.init = {
.reg = BD718XX_REG_BUCK2_CTRL,
@@ -840,7 +757,12 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.enable_reg = BD71837_REG_BUCK3_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.owner = THIS_MODULE,
- .of_parse_cb = buck3_set_hw_dvs_levels,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ .level_map = ROHM_DVS_LEVEL_RUN,
+ .run_reg = BD71837_REG_BUCK3_VOLT_RUN,
+ .run_mask = DVS_BUCK_RUN_MASK,
},
.init = {
.reg = BD71837_REG_BUCK3_CTRL,
@@ -864,7 +786,12 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.enable_reg = BD71837_REG_BUCK4_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.owner = THIS_MODULE,
- .of_parse_cb = buck4_set_hw_dvs_levels,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ .level_map = ROHM_DVS_LEVEL_RUN,
+ .run_reg = BD71837_REG_BUCK4_VOLT_RUN,
+ .run_mask = DVS_BUCK_RUN_MASK,
},
.init = {
.reg = BD71837_REG_BUCK4_CTRL,
@@ -1142,28 +1069,15 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
},
};
-struct bd718xx_pmic_inits {
- const struct bd718xx_regulator_data *r_datas;
- unsigned int r_amount;
-};
-
static int bd718xx_probe(struct platform_device *pdev)
{
struct bd718xx *mfd;
struct regulator_config config = { 0 };
- struct bd718xx_pmic_inits pmic_regulators[ROHM_CHIP_TYPE_AMOUNT] = {
- [ROHM_CHIP_TYPE_BD71837] = {
- .r_datas = bd71837_regulators,
- .r_amount = ARRAY_SIZE(bd71837_regulators),
- },
- [ROHM_CHIP_TYPE_BD71847] = {
- .r_datas = bd71847_regulators,
- .r_amount = ARRAY_SIZE(bd71847_regulators),
- },
- };
-
int i, j, err;
bool use_snvs;
+ const struct bd718xx_regulator_data *reg_data;
+ unsigned int num_reg_data;
+ enum rohm_chip_type chip = platform_get_device_id(pdev)->driver_data;
mfd = dev_get_drvdata(pdev->dev.parent);
if (!mfd) {
@@ -1172,8 +1086,16 @@ static int bd718xx_probe(struct platform_device *pdev)
goto err;
}
- if (mfd->chip.chip_type >= ROHM_CHIP_TYPE_AMOUNT ||
- !pmic_regulators[mfd->chip.chip_type].r_datas) {
+ switch (chip) {
+ case ROHM_CHIP_TYPE_BD71837:
+ reg_data = bd71837_regulators;
+ num_reg_data = ARRAY_SIZE(bd71837_regulators);
+ break;
+ case ROHM_CHIP_TYPE_BD71847:
+ reg_data = bd71847_regulators;
+ num_reg_data = ARRAY_SIZE(bd71847_regulators);
+ break;
+ default:
dev_err(&pdev->dev, "Unsupported chip type\n");
err = -EINVAL;
goto err;
@@ -1215,13 +1137,13 @@ static int bd718xx_probe(struct platform_device *pdev)
}
}
- for (i = 0; i < pmic_regulators[mfd->chip.chip_type].r_amount; i++) {
+ for (i = 0; i < num_reg_data; i++) {
const struct regulator_desc *desc;
struct regulator_dev *rdev;
const struct bd718xx_regulator_data *r;
- r = &pmic_regulators[mfd->chip.chip_type].r_datas[i];
+ r = &reg_data[i];
desc = &r->desc;
config.dev = pdev->dev.parent;
@@ -1281,11 +1203,19 @@ err:
return err;
}
+static const struct platform_device_id bd718x7_pmic_id[] = {
+ { "bd71837-pmic", ROHM_CHIP_TYPE_BD71837 },
+ { "bd71847-pmic", ROHM_CHIP_TYPE_BD71847 },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, bd718x7_pmic_id);
+
static struct platform_driver bd718xx_regulator = {
.driver = {
.name = "bd718xx-pmic",
},
.probe = bd718xx_probe,
+ .id_table = bd718x7_pmic_id,
};
module_platform_driver(bd718xx_regulator);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 679ad3d2ed23..d015d99cb59d 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1198,6 +1198,10 @@ static int machine_constraints_voltage(struct regulator_dev *rdev,
return -EINVAL;
}
+ /* no need to loop voltages if range is continuous */
+ if (rdev->desc->continuous_voltage_range)
+ return 0;
+
/* initial: [cmin..cmax] valid, [min_uV..max_uV] not */
for (i = 0; i < count; i++) {
int value;
@@ -1938,8 +1942,8 @@ struct regulator *_regulator_get(struct device *dev, const char *id,
regulator = create_regulator(rdev, dev, id);
if (regulator == NULL) {
regulator = ERR_PTR(-ENOMEM);
- put_device(&rdev->dev);
module_put(rdev->owner);
+ put_device(&rdev->dev);
return regulator;
}
@@ -2063,13 +2067,13 @@ static void _regulator_put(struct regulator *regulator)
rdev->open_count--;
rdev->exclusive = 0;
- put_device(&rdev->dev);
regulator_unlock(rdev);
kfree_const(regulator->supply_name);
kfree(regulator);
module_put(rdev->owner);
+ put_device(&rdev->dev);
}
/**
@@ -3466,6 +3470,7 @@ int regulator_set_voltage_rdev(struct regulator_dev *rdev, int min_uV,
out:
return ret;
}
+EXPORT_SYMBOL_GPL(regulator_set_voltage_rdev);
static int regulator_limit_voltage_step(struct regulator_dev *rdev,
int *current_uV, int *min_uV)
@@ -4030,6 +4035,7 @@ int regulator_get_voltage_rdev(struct regulator_dev *rdev)
return ret;
return ret - rdev->constraints->uV_offset;
}
+EXPORT_SYMBOL_GPL(regulator_get_voltage_rdev);
/**
* regulator_get_voltage - get regulator output voltage
@@ -5002,6 +5008,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
struct regulator_dev *rdev;
bool dangling_cfg_gpiod = false;
bool dangling_of_gpiod = false;
+ bool reg_device_fail = false;
struct device *dev;
int ret, i;
@@ -5187,7 +5194,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
dev_set_drvdata(&rdev->dev, rdev);
ret = device_register(&rdev->dev);
if (ret != 0) {
- put_device(&rdev->dev);
+ reg_device_fail = true;
goto unset_supplies;
}
@@ -5218,7 +5225,10 @@ wash:
clean:
if (dangling_of_gpiod)
gpiod_put(config->ena_gpiod);
- kfree(rdev);
+ if (reg_device_fail)
+ put_device(&rdev->dev);
+ else
+ kfree(rdev);
kfree(config);
rinse:
if (dangling_cfg_gpiod)
diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c
index f9448ed50e05..0cdeb6186529 100644
--- a/drivers/regulator/da9210-regulator.c
+++ b/drivers/regulator/da9210-regulator.c
@@ -131,8 +131,7 @@ static const struct of_device_id da9210_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, da9210_dt_ids);
-static int da9210_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int da9210_i2c_probe(struct i2c_client *i2c)
{
struct da9210 *chip;
struct device *dev = &i2c->dev;
@@ -228,7 +227,7 @@ static struct i2c_driver da9210_regulator_driver = {
.name = "da9210",
.of_match_table = of_match_ptr(da9210_dt_ids),
},
- .probe = da9210_i2c_probe,
+ .probe_new = da9210_i2c_probe,
.id_table = da9210_i2c_id,
};
diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
index 523dc1b95826..2ea4362ffa5c 100644
--- a/drivers/regulator/da9211-regulator.c
+++ b/drivers/regulator/da9211-regulator.c
@@ -416,8 +416,7 @@ static int da9211_regulator_init(struct da9211 *chip)
/*
* I2C driver interface functions
*/
-static int da9211_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int da9211_i2c_probe(struct i2c_client *i2c)
{
struct da9211 *chip;
int error, ret;
@@ -526,7 +525,7 @@ static struct i2c_driver da9211_regulator_driver = {
.name = "da9211",
.of_match_table = of_match_ptr(da9211_dt_ids),
},
- .probe = da9211_i2c_probe,
+ .probe_new = da9211_i2c_probe,
.id_table = da9211_i2c_id,
};
diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c
index ca3dc3f3bb29..bb16c465426e 100644
--- a/drivers/regulator/helpers.c
+++ b/drivers/regulator/helpers.c
@@ -13,6 +13,8 @@
#include <linux/regulator/driver.h>
#include <linux/module.h>
+#include "internal.h"
+
/**
* regulator_is_enabled_regmap - standard is_enabled() for regmap users
*
@@ -881,3 +883,15 @@ void regulator_bulk_set_supply_names(struct regulator_bulk_data *consumers,
consumers[i].supply = supply_names[i];
}
EXPORT_SYMBOL_GPL(regulator_bulk_set_supply_names);
+
+/**
+ * regulator_is_equal - test whether two regulators are the same
+ *
+ * @reg1: first regulator to operate on
+ * @reg2: second regulator to operate on
+ */
+bool regulator_is_equal(struct regulator *reg1, struct regulator *reg2)
+{
+ return reg1->rdev == reg2->rdev;
+}
+EXPORT_SYMBOL_GPL(regulator_is_equal);
diff --git a/drivers/regulator/isl9305.c b/drivers/regulator/isl9305.c
index 978f5e903cae..cfb765986d0d 100644
--- a/drivers/regulator/isl9305.c
+++ b/drivers/regulator/isl9305.c
@@ -137,8 +137,7 @@ static const struct regmap_config isl9305_regmap = {
.cache_type = REGCACHE_RBTREE,
};
-static int isl9305_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int isl9305_i2c_probe(struct i2c_client *i2c)
{
struct regulator_config config = { };
struct isl9305_pdata *pdata = i2c->dev.platform_data;
@@ -198,7 +197,7 @@ static struct i2c_driver isl9305_regulator_driver = {
.name = "isl9305",
.of_match_table = of_match_ptr(isl9305_dt_ids),
},
- .probe = isl9305_i2c_probe,
+ .probe_new = isl9305_i2c_probe,
.id_table = isl9305_i2c_id,
};
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index bc96e65ef7c0..8be252f81b09 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -400,8 +400,7 @@ static int setup_regulators(struct lp3971 *lp3971,
return 0;
}
-static int lp3971_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int lp3971_i2c_probe(struct i2c_client *i2c)
{
struct lp3971 *lp3971;
struct lp3971_platform_data *pdata = dev_get_platdata(&i2c->dev);
@@ -449,7 +448,7 @@ static struct i2c_driver lp3971_i2c_driver = {
.driver = {
.name = "LP3971",
},
- .probe = lp3971_i2c_probe,
+ .probe_new = lp3971_i2c_probe,
.id_table = lp3971_i2c_id,
};
diff --git a/drivers/regulator/ltc3676.c b/drivers/regulator/ltc3676.c
index d934540eb8c4..e12e52c69e52 100644
--- a/drivers/regulator/ltc3676.c
+++ b/drivers/regulator/ltc3676.c
@@ -301,8 +301,7 @@ static irqreturn_t ltc3676_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int ltc3676_regulator_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ltc3676_regulator_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct regulator_init_data *init_data = dev_get_platdata(dev);
@@ -380,7 +379,7 @@ static struct i2c_driver ltc3676_driver = {
.name = DRIVER_NAME,
.of_match_table = of_match_ptr(ltc3676_of_match),
},
- .probe = ltc3676_regulator_probe,
+ .probe_new = ltc3676_regulator_probe,
.id_table = ltc3676_i2c_id,
};
module_i2c_driver(ltc3676_driver);
diff --git a/drivers/regulator/max77650-regulator.c b/drivers/regulator/max77650-regulator.c
index e57fc9197d62..ac89a412f665 100644
--- a/drivers/regulator/max77650-regulator.c
+++ b/drivers/regulator/max77650-regulator.c
@@ -386,9 +386,16 @@ static int max77650_regulator_probe(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id max77650_regulator_of_match[] = {
+ { .compatible = "maxim,max77650-regulator" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max77650_regulator_of_match);
+
static struct platform_driver max77650_regulator_driver = {
.driver = {
.name = "max77650-regulator",
+ .of_match_table = max77650_regulator_of_match,
},
.probe = max77650_regulator_probe,
};
diff --git a/drivers/regulator/mp8859.c b/drivers/regulator/mp8859.c
new file mode 100644
index 000000000000..1d26b506ee5b
--- /dev/null
+++ b/drivers/regulator/mp8859.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2019 five technologies GmbH
+// Author: Markus Reichl <m.reichl@fivetechno.de>
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/of.h>
+#include <linux/regulator/driver.h>
+#include <linux/regmap.h>
+
+
+#define VOL_MIN_IDX 0x00
+#define VOL_MAX_IDX 0x7ff
+
+/* Register definitions */
+#define MP8859_VOUT_L_REG 0 //3 lo Bits
+#define MP8859_VOUT_H_REG 1 //8 hi Bits
+#define MP8859_VOUT_GO_REG 2
+#define MP8859_IOUT_LIM_REG 3
+#define MP8859_CTL1_REG 4
+#define MP8859_CTL2_REG 5
+#define MP8859_RESERVED1_REG 6
+#define MP8859_RESERVED2_REG 7
+#define MP8859_RESERVED3_REG 8
+#define MP8859_STATUS_REG 9
+#define MP8859_INTERRUPT_REG 0x0A
+#define MP8859_MASK_REG 0x0B
+#define MP8859_ID1_REG 0x0C
+#define MP8859_MFR_ID_REG 0x27
+#define MP8859_DEV_ID_REG 0x28
+#define MP8859_IC_REV_REG 0x29
+
+#define MP8859_MAX_REG 0x29
+
+#define MP8859_GO_BIT 0x01
+
+
+static int mp8859_set_voltage_sel(struct regulator_dev *rdev, unsigned int sel)
+{
+ int ret;
+
+ ret = regmap_write(rdev->regmap, MP8859_VOUT_L_REG, sel & 0x7);
+
+ if (ret)
+ return ret;
+ ret = regmap_write(rdev->regmap, MP8859_VOUT_H_REG, sel >> 3);
+
+ if (ret)
+ return ret;
+ ret = regmap_update_bits(rdev->regmap, MP8859_VOUT_GO_REG,
+ MP8859_GO_BIT, 1);
+ return ret;
+}
+
+static int mp8859_get_voltage_sel(struct regulator_dev *rdev)
+{
+ unsigned int val_tmp;
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, MP8859_VOUT_H_REG, &val_tmp);
+
+ if (ret)
+ return ret;
+ val = val_tmp << 3;
+
+ ret = regmap_read(rdev->regmap, MP8859_VOUT_L_REG, &val_tmp);
+
+ if (ret)
+ return ret;
+ val |= val_tmp & 0x07;
+ return val;
+}
+
+static const struct regulator_linear_range mp8859_dcdc_ranges[] = {
+ REGULATOR_LINEAR_RANGE(0, VOL_MIN_IDX, VOL_MAX_IDX, 10000),
+};
+
+static const struct regmap_config mp8859_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MP8859_MAX_REG,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static const struct regulator_ops mp8859_ops = {
+ .set_voltage_sel = mp8859_set_voltage_sel,
+ .get_voltage_sel = mp8859_get_voltage_sel,
+ .list_voltage = regulator_list_voltage_linear_range,
+};
+
+static const struct regulator_desc mp8859_regulators[] = {
+ {
+ .id = 0,
+ .type = REGULATOR_VOLTAGE,
+ .name = "mp8859_dcdc",
+ .of_match = of_match_ptr("mp8859_dcdc"),
+ .n_voltages = VOL_MAX_IDX + 1,
+ .linear_ranges = mp8859_dcdc_ranges,
+ .n_linear_ranges = 1,
+ .ops = &mp8859_ops,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int mp8859_i2c_probe(struct i2c_client *i2c)
+{
+ int ret;
+ struct regulator_config config = {.dev = &i2c->dev};
+ struct regmap *regmap = devm_regmap_init_i2c(i2c, &mp8859_regmap);
+ struct regulator_dev *rdev;
+
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ dev_err(&i2c->dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+ rdev = devm_regulator_register(&i2c->dev, &mp8859_regulators[0],
+ &config);
+
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
+ dev_err(&i2c->dev, "failed to register %s: %d\n",
+ mp8859_regulators[0].name, ret);
+ return ret;
+ }
+ return 0;
+}
+
+static const struct of_device_id mp8859_dt_id[] = {
+ {.compatible = "mps,mp8859"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, mp8859_dt_id);
+
+static const struct i2c_device_id mp8859_i2c_id[] = {
+ { "mp8859", },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, mp8859_i2c_id);
+
+static struct i2c_driver mp8859_regulator_driver = {
+ .driver = {
+ .name = "mp8859",
+ .of_match_table = of_match_ptr(mp8859_dt_id),
+ },
+ .probe_new = mp8859_i2c_probe,
+ .id_table = mp8859_i2c_id,
+};
+
+module_i2c_driver(mp8859_regulator_driver);
+
+MODULE_DESCRIPTION("Monolithic Power Systems MP8859 voltage regulator driver");
+MODULE_AUTHOR("Markus Reichl <m.reichl@fivetechno.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/mpq7920.c b/drivers/regulator/mpq7920.c
new file mode 100644
index 000000000000..54c862edf571
--- /dev/null
+++ b/drivers/regulator/mpq7920.c
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// mpq7920.c - regulator driver for mps mpq7920
+//
+// Copyright 2019 Monolithic Power Systems, Inc
+//
+// Author: Saravanan Sekar <sravanhome@gmail.com>
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include "mpq7920.h"
+
+#define MPQ7920_BUCK_VOLT_RANGE \
+ ((MPQ7920_VOLT_MAX - MPQ7920_BUCK_VOLT_MIN)/MPQ7920_VOLT_STEP + 1)
+#define MPQ7920_LDO_VOLT_RANGE \
+ ((MPQ7920_VOLT_MAX - MPQ7920_LDO_VOLT_MIN)/MPQ7920_VOLT_STEP + 1)
+
+#define MPQ7920BUCK(_name, _id, _ilim) \
+ [MPQ7920_BUCK ## _id] = { \
+ .id = MPQ7920_BUCK ## _id, \
+ .name = _name, \
+ .of_match = _name, \
+ .regulators_node = "regulators", \
+ .of_parse_cb = mpq7920_parse_cb, \
+ .ops = &mpq7920_buck_ops, \
+ .min_uV = MPQ7920_BUCK_VOLT_MIN, \
+ .uV_step = MPQ7920_VOLT_STEP, \
+ .n_voltages = MPQ7920_BUCK_VOLT_RANGE, \
+ .curr_table = _ilim, \
+ .n_current_limits = ARRAY_SIZE(_ilim), \
+ .csel_reg = MPQ7920_BUCK ##_id## _REG_C, \
+ .csel_mask = MPQ7920_MASK_BUCK_ILIM, \
+ .enable_reg = MPQ7920_REG_REGULATOR_EN, \
+ .enable_mask = BIT(MPQ7920_REGULATOR_EN_OFFSET - \
+ MPQ7920_BUCK ## _id), \
+ .vsel_reg = MPQ7920_BUCK ##_id## _REG_A, \
+ .vsel_mask = MPQ7920_MASK_VREF, \
+ .active_discharge_on = MPQ7920_DISCHARGE_ON, \
+ .active_discharge_reg = MPQ7920_BUCK ##_id## _REG_B, \
+ .active_discharge_mask = MPQ7920_MASK_DISCHARGE, \
+ .soft_start_reg = MPQ7920_BUCK ##_id## _REG_C, \
+ .soft_start_mask = MPQ7920_MASK_SOFTSTART, \
+ .owner = THIS_MODULE, \
+ }
+
+#define MPQ7920LDO(_name, _id, _ops, _ilim, _ilim_sz, _creg, _cmask) \
+ [MPQ7920_LDO ## _id] = { \
+ .id = MPQ7920_LDO ## _id, \
+ .name = _name, \
+ .of_match = _name, \
+ .regulators_node = "regulators", \
+ .ops = _ops, \
+ .min_uV = MPQ7920_LDO_VOLT_MIN, \
+ .uV_step = MPQ7920_VOLT_STEP, \
+ .n_voltages = MPQ7920_LDO_VOLT_RANGE, \
+ .vsel_reg = MPQ7920_LDO ##_id## _REG_A, \
+ .vsel_mask = MPQ7920_MASK_VREF, \
+ .curr_table = _ilim, \
+ .n_current_limits = _ilim_sz, \
+ .csel_reg = _creg, \
+ .csel_mask = _cmask, \
+ .enable_reg = (_id == 1) ? 0 : MPQ7920_REG_REGULATOR_EN,\
+ .enable_mask = BIT(MPQ7920_REGULATOR_EN_OFFSET - \
+ MPQ7920_LDO ##_id + 1), \
+ .active_discharge_on = MPQ7920_DISCHARGE_ON, \
+ .active_discharge_mask = MPQ7920_MASK_DISCHARGE, \
+ .active_discharge_reg = MPQ7920_LDO ##_id## _REG_B, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }
+
+enum mpq7920_regulators {
+ MPQ7920_BUCK1,
+ MPQ7920_BUCK2,
+ MPQ7920_BUCK3,
+ MPQ7920_BUCK4,
+ MPQ7920_LDO1, /* LDORTC */
+ MPQ7920_LDO2,
+ MPQ7920_LDO3,
+ MPQ7920_LDO4,
+ MPQ7920_LDO5,
+ MPQ7920_MAX_REGULATORS,
+};
+
+struct mpq7920_regulator_info {
+ struct regmap *regmap;
+ struct regulator_desc *rdesc;
+};
+
+static const struct regmap_config mpq7920_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x25,
+};
+
+/* Current limits array (in uA)
+ * ILIM1 & ILIM3
+ */
+static const unsigned int mpq7920_I_limits1[] = {
+ 4600000, 6600000, 7600000, 9300000
+};
+
+/* ILIM2 & ILIM4 */
+static const unsigned int mpq7920_I_limits2[] = {
+ 2700000, 3900000, 5100000, 6100000
+};
+
+/* LDO4 & LDO5 */
+static const unsigned int mpq7920_I_limits3[] = {
+ 300000, 700000
+};
+
+static int mpq7920_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay);
+static int mpq7920_parse_cb(struct device_node *np,
+ const struct regulator_desc *rdesc,
+ struct regulator_config *config);
+
+/* RTCLDO not controllable, always ON */
+static const struct regulator_ops mpq7920_ldortc_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+};
+
+static const struct regulator_ops mpq7920_ldo_wo_current_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+};
+
+static const struct regulator_ops mpq7920_ldo_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
+ .set_current_limit = regulator_set_current_limit_regmap,
+};
+
+static const struct regulator_ops mpq7920_buck_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+ .set_soft_start = regulator_set_soft_start_regmap,
+ .set_ramp_delay = mpq7920_set_ramp_delay,
+};
+
+static struct regulator_desc mpq7920_regulators_desc[MPQ7920_MAX_REGULATORS] = {
+ MPQ7920BUCK("buck1", 1, mpq7920_I_limits1),
+ MPQ7920BUCK("buck2", 2, mpq7920_I_limits2),
+ MPQ7920BUCK("buck3", 3, mpq7920_I_limits1),
+ MPQ7920BUCK("buck4", 4, mpq7920_I_limits2),
+ MPQ7920LDO("ldortc", 1, &mpq7920_ldortc_ops, NULL, 0, 0, 0),
+ MPQ7920LDO("ldo2", 2, &mpq7920_ldo_wo_current_ops, NULL, 0, 0, 0),
+ MPQ7920LDO("ldo3", 3, &mpq7920_ldo_wo_current_ops, NULL, 0, 0, 0),
+ MPQ7920LDO("ldo4", 4, &mpq7920_ldo_ops, mpq7920_I_limits3,
+ ARRAY_SIZE(mpq7920_I_limits3), MPQ7920_LDO4_REG_B,
+ MPQ7920_MASK_LDO_ILIM),
+ MPQ7920LDO("ldo5", 5, &mpq7920_ldo_ops, mpq7920_I_limits3,
+ ARRAY_SIZE(mpq7920_I_limits3), MPQ7920_LDO5_REG_B,
+ MPQ7920_MASK_LDO_ILIM),
+};
+
+/*
+ * DVS ramp rate BUCK1 to BUCK4
+ * 00-01: Reserved
+ * 10: 8mV/us
+ * 11: 4mV/us
+ */
+static int mpq7920_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
+{
+ unsigned int ramp_val;
+
+ if (ramp_delay > 8000 || ramp_delay < 0)
+ return -EINVAL;
+
+ if (ramp_delay <= 4000)
+ ramp_val = 3;
+ else
+ ramp_val = 2;
+
+ return regmap_update_bits(rdev->regmap, MPQ7920_REG_CTL0,
+ MPQ7920_MASK_DVS_SLEWRATE, ramp_val << 6);
+}
+
+static int mpq7920_parse_cb(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *config)
+{
+ uint8_t val;
+ int ret;
+ struct mpq7920_regulator_info *info = config->driver_data;
+ struct regulator_desc *rdesc = &info->rdesc[desc->id];
+
+ if (of_property_read_bool(np, "mps,buck-ovp-disable")) {
+ regmap_update_bits(config->regmap,
+ MPQ7920_BUCK1_REG_B + (rdesc->id * 4),
+ MPQ7920_MASK_OVP, MPQ7920_OVP_DISABLE);
+ }
+
+ ret = of_property_read_u8(np, "mps,buck-phase-delay", &val);
+ if (!ret) {
+ regmap_update_bits(config->regmap,
+ MPQ7920_BUCK1_REG_C + (rdesc->id * 4),
+ MPQ7920_MASK_BUCK_PHASE_DEALY,
+ (val & 3) << 4);
+ }
+
+ ret = of_property_read_u8(np, "mps,buck-softstart", &val);
+ if (!ret)
+ rdesc->soft_start_val_on = (val & 3) << 2;
+
+ return 0;
+}
+
+static void mpq7920_parse_dt(struct device *dev,
+ struct mpq7920_regulator_info *info)
+{
+ int ret;
+ struct device_node *np = dev->of_node;
+ uint8_t freq;
+
+ np = of_get_child_by_name(np, "regulators");
+ if (!np) {
+ dev_err(dev, "missing 'regulators' subnode in DT\n");
+ return;
+ }
+
+ ret = of_property_read_u8(np, "mps,switch-freq", &freq);
+ if (!ret) {
+ regmap_update_bits(info->regmap, MPQ7920_REG_CTL0,
+ MPQ7920_MASK_SWITCH_FREQ,
+ (freq & 3) << 4);
+ }
+
+ of_node_put(np);
+}
+
+static int mpq7920_i2c_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct mpq7920_regulator_info *info;
+ struct regulator_config config = { NULL, };
+ struct regulator_dev *rdev;
+ struct regmap *regmap;
+ int i;
+
+ info = devm_kzalloc(dev, sizeof(struct mpq7920_regulator_info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->rdesc = mpq7920_regulators_desc;
+ regmap = devm_regmap_init_i2c(client, &mpq7920_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to allocate regmap!\n");
+ return PTR_ERR(regmap);
+ }
+
+ i2c_set_clientdata(client, info);
+ info->regmap = regmap;
+ if (client->dev.of_node)
+ mpq7920_parse_dt(&client->dev, info);
+
+ config.dev = dev;
+ config.regmap = regmap;
+ config.driver_data = info;
+
+ for (i = 0; i < MPQ7920_MAX_REGULATORS; i++) {
+ rdev = devm_regulator_register(dev,
+ &mpq7920_regulators_desc[i],
+ &config);
+ if (IS_ERR(rdev)) {
+ dev_err(dev, "Failed to register regulator!\n");
+ return PTR_ERR(rdev);
+ }
+ }
+
+ return 0;
+}
+
+static const struct of_device_id mpq7920_of_match[] = {
+ { .compatible = "mps,mpq7920"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, mpq7920_of_match);
+
+static const struct i2c_device_id mpq7920_id[] = {
+ { "mpq7920", },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, mpq7920_id);
+
+static struct i2c_driver mpq7920_regulator_driver = {
+ .driver = {
+ .name = "mpq7920",
+ .of_match_table = of_match_ptr(mpq7920_of_match),
+ },
+ .probe_new = mpq7920_i2c_probe,
+ .id_table = mpq7920_id,
+};
+module_i2c_driver(mpq7920_regulator_driver);
+
+MODULE_AUTHOR("Saravanan Sekar <sravanhome@gmail.com>");
+MODULE_DESCRIPTION("MPQ7920 PMIC regulator driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/mpq7920.h b/drivers/regulator/mpq7920.h
new file mode 100644
index 000000000000..489924655a96
--- /dev/null
+++ b/drivers/regulator/mpq7920.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * mpq7920.h - Regulator definitions for mpq7920
+ *
+ * Copyright 2019 Monolithic Power Systems, Inc
+ *
+ */
+
+#ifndef __MPQ7920_H__
+#define __MPQ7920_H__
+
+#define MPQ7920_REG_CTL0 0x00
+#define MPQ7920_REG_CTL1 0x01
+#define MPQ7920_REG_CTL2 0x02
+#define MPQ7920_BUCK1_REG_A 0x03
+#define MPQ7920_BUCK1_REG_B 0x04
+#define MPQ7920_BUCK1_REG_C 0x05
+#define MPQ7920_BUCK1_REG_D 0x06
+#define MPQ7920_BUCK2_REG_A 0x07
+#define MPQ7920_BUCK2_REG_B 0x08
+#define MPQ7920_BUCK2_REG_C 0x09
+#define MPQ7920_BUCK2_REG_D 0x0a
+#define MPQ7920_BUCK3_REG_A 0x0b
+#define MPQ7920_BUCK3_REG_B 0x0c
+#define MPQ7920_BUCK3_REG_C 0x0d
+#define MPQ7920_BUCK3_REG_D 0x0e
+#define MPQ7920_BUCK4_REG_A 0x0f
+#define MPQ7920_BUCK4_REG_B 0x10
+#define MPQ7920_BUCK4_REG_C 0x11
+#define MPQ7920_BUCK4_REG_D 0x12
+#define MPQ7920_LDO1_REG_A 0x13
+#define MPQ7920_LDO1_REG_B 0x0
+#define MPQ7920_LDO2_REG_A 0x14
+#define MPQ7920_LDO2_REG_B 0x15
+#define MPQ7920_LDO2_REG_C 0x16
+#define MPQ7920_LDO3_REG_A 0x17
+#define MPQ7920_LDO3_REG_B 0x18
+#define MPQ7920_LDO3_REG_C 0x19
+#define MPQ7920_LDO4_REG_A 0x1a
+#define MPQ7920_LDO4_REG_B 0x1b
+#define MPQ7920_LDO4_REG_C 0x1c
+#define MPQ7920_LDO5_REG_A 0x1d
+#define MPQ7920_LDO5_REG_B 0x1e
+#define MPQ7920_LDO5_REG_C 0x1f
+#define MPQ7920_REG_MODE 0x20
+#define MPQ7920_REG_REGULATOR_EN 0x22
+
+#define MPQ7920_MASK_VREF 0x7f
+#define MPQ7920_MASK_BUCK_ILIM 0xc0
+#define MPQ7920_MASK_LDO_ILIM BIT(6)
+#define MPQ7920_MASK_DISCHARGE BIT(5)
+#define MPQ7920_MASK_MODE 0xc0
+#define MPQ7920_MASK_SOFTSTART 0x0c
+#define MPQ7920_MASK_SWITCH_FREQ 0x30
+#define MPQ7920_MASK_BUCK_PHASE_DEALY 0x30
+#define MPQ7920_MASK_DVS_SLEWRATE 0xc0
+#define MPQ7920_MASK_OVP 0x40
+#define MPQ7920_OVP_DISABLE ~(0x40)
+#define MPQ7920_DISCHARGE_ON BIT(5)
+
+#define MPQ7920_REGULATOR_EN_OFFSET 7
+
+/* values in mV */
+#define MPQ7920_BUCK_VOLT_MIN 400000
+#define MPQ7920_LDO_VOLT_MIN 650000
+#define MPQ7920_VOLT_MAX 3587500
+#define MPQ7920_VOLT_STEP 12500
+
+#endif /* __MPQ7920_H__ */
diff --git a/drivers/regulator/mt6311-regulator.c b/drivers/regulator/mt6311-regulator.c
index af95449d3590..69e6af3cd505 100644
--- a/drivers/regulator/mt6311-regulator.c
+++ b/drivers/regulator/mt6311-regulator.c
@@ -85,8 +85,7 @@ static const struct regulator_desc mt6311_regulators[] = {
/*
* I2C driver interface functions
*/
-static int mt6311_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int mt6311_i2c_probe(struct i2c_client *i2c)
{
struct regulator_config config = { };
struct regulator_dev *rdev;
@@ -154,7 +153,7 @@ static struct i2c_driver mt6311_regulator_driver = {
.name = "mt6311",
.of_match_table = of_match_ptr(mt6311_dt_ids),
},
- .probe = mt6311_i2c_probe,
+ .probe_new = mt6311_i2c_probe,
.id_table = mt6311_i2c_id,
};
diff --git a/drivers/regulator/pv88060-regulator.c b/drivers/regulator/pv88060-regulator.c
index 3d3415839ba2..787ced918372 100644
--- a/drivers/regulator/pv88060-regulator.c
+++ b/drivers/regulator/pv88060-regulator.c
@@ -279,8 +279,7 @@ error_i2c:
/*
* I2C driver interface functions
*/
-static int pv88060_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int pv88060_i2c_probe(struct i2c_client *i2c)
{
struct regulator_init_data *init_data = dev_get_platdata(&i2c->dev);
struct pv88060 *chip;
@@ -385,7 +384,7 @@ static struct i2c_driver pv88060_regulator_driver = {
.name = "pv88060",
.of_match_table = of_match_ptr(pv88060_dt_ids),
},
- .probe = pv88060_i2c_probe,
+ .probe_new = pv88060_i2c_probe,
.id_table = pv88060_i2c_id,
};
diff --git a/drivers/regulator/pv88090-regulator.c b/drivers/regulator/pv88090-regulator.c
index b1d0d97ae935..784729ec2182 100644
--- a/drivers/regulator/pv88090-regulator.c
+++ b/drivers/regulator/pv88090-regulator.c
@@ -272,8 +272,7 @@ error_i2c:
/*
* I2C driver interface functions
*/
-static int pv88090_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int pv88090_i2c_probe(struct i2c_client *i2c)
{
struct regulator_init_data *init_data = dev_get_platdata(&i2c->dev);
struct pv88090 *chip;
@@ -406,7 +405,7 @@ static struct i2c_driver pv88090_regulator_driver = {
.name = "pv88090",
.of_match_table = of_match_ptr(pv88090_dt_ids),
},
- .probe = pv88090_i2c_probe,
+ .probe_new = pv88090_i2c_probe,
.id_table = pv88090_i2c_id,
};
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index 5b4003226484..31f79fda3238 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -1282,7 +1282,7 @@ static int rk808_regulator_dt_parse_pdata(struct device *dev,
}
if (!pdata->dvs_gpio[i]) {
- dev_warn(dev, "there is no dvs%d gpio\n", i);
+ dev_info(dev, "there is no dvs%d gpio\n", i);
continue;
}
diff --git a/drivers/regulator/rn5t618-regulator.c b/drivers/regulator/rn5t618-regulator.c
index 4a91be0ad5ae..5c12d57be040 100644
--- a/drivers/regulator/rn5t618-regulator.c
+++ b/drivers/regulator/rn5t618-regulator.c
@@ -148,6 +148,7 @@ static struct platform_driver rn5t618_regulator_driver = {
module_platform_driver(rn5t618_regulator_driver);
+MODULE_ALIAS("platform:rn5t618-regulator");
MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
MODULE_DESCRIPTION("RN5T618 regulator driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/rohm-regulator.c b/drivers/regulator/rohm-regulator.c
new file mode 100644
index 000000000000..399002383b28
--- /dev/null
+++ b/drivers/regulator/rohm-regulator.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2020 ROHM Semiconductors
+
+#include <linux/errno.h>
+#include <linux/mfd/rohm-generic.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+
+static int set_dvs_level(const struct regulator_desc *desc,
+ struct device_node *np, struct regmap *regmap,
+ char *prop, unsigned int reg, unsigned int mask,
+ unsigned int omask, unsigned int oreg)
+{
+ int ret, i;
+ uint32_t uv;
+
+ ret = of_property_read_u32(np, prop, &uv);
+ if (ret) {
+ if (ret != -EINVAL)
+ return ret;
+ return 0;
+ }
+
+ if (uv == 0) {
+ if (omask)
+ return regmap_update_bits(regmap, oreg, omask, 0);
+ }
+ for (i = 0; i < desc->n_voltages; i++) {
+ ret = regulator_desc_list_voltage_linear_range(desc, i);
+ if (ret < 0)
+ continue;
+ if (ret == uv) {
+ i <<= ffs(desc->vsel_mask) - 1;
+ ret = regmap_update_bits(regmap, reg, mask, i);
+ if (omask && !ret)
+ ret = regmap_update_bits(regmap, oreg, omask,
+ omask);
+ break;
+ }
+ }
+ return ret;
+}
+
+int rohm_regulator_set_dvs_levels(const struct rohm_dvs_config *dvs,
+ struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regmap *regmap)
+{
+ int i, ret = 0;
+ char *prop;
+ unsigned int reg, mask, omask, oreg = desc->enable_reg;
+
+ for (i = 0; i < ROHM_DVS_LEVEL_MAX && !ret; i++) {
+ if (dvs->level_map & (1 << i)) {
+ switch (i + 1) {
+ case ROHM_DVS_LEVEL_RUN:
+ prop = "rohm,dvs-run-voltage";
+ reg = dvs->run_reg;
+ mask = dvs->run_mask;
+ omask = dvs->run_on_mask;
+ break;
+ case ROHM_DVS_LEVEL_IDLE:
+ prop = "rohm,dvs-idle-voltage";
+ reg = dvs->idle_reg;
+ mask = dvs->idle_mask;
+ omask = dvs->idle_on_mask;
+ break;
+ case ROHM_DVS_LEVEL_SUSPEND:
+ prop = "rohm,dvs-suspend-voltage";
+ reg = dvs->suspend_reg;
+ mask = dvs->suspend_mask;
+ omask = dvs->suspend_on_mask;
+ break;
+ case ROHM_DVS_LEVEL_LPSR:
+ prop = "rohm,dvs-lpsr-voltage";
+ reg = dvs->lpsr_reg;
+ mask = dvs->lpsr_mask;
+ omask = dvs->lpsr_on_mask;
+ break;
+ default:
+ return -EINVAL;
+ }
+ ret = set_dvs_level(desc, np, regmap, prop, reg, mask,
+ omask, oreg);
+ }
+ }
+ return ret;
+}
+EXPORT_SYMBOL(rohm_regulator_set_dvs_levels);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
+MODULE_DESCRIPTION("Generic helpers for ROHM PMIC regulator drivers");
diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c
index 51f7e8b74d8c..115f59530852 100644
--- a/drivers/regulator/s2mpa01.c
+++ b/drivers/regulator/s2mpa01.c
@@ -390,5 +390,5 @@ module_platform_driver(s2mpa01_pmic_driver);
/* Module information */
MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
MODULE_AUTHOR("Sachin Kamat <sachin.kamat@samsung.com>");
-MODULE_DESCRIPTION("SAMSUNG S2MPA01 Regulator Driver");
+MODULE_DESCRIPTION("Samsung S2MPA01 Regulator Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 4f2dc5ebffdc..23d288278957 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -1265,5 +1265,5 @@ module_platform_driver(s2mps11_pmic_driver);
/* Module information */
MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
-MODULE_DESCRIPTION("SAMSUNG S2MPS11/S2MPS14/S2MPS15/S2MPU02 Regulator Driver");
+MODULE_DESCRIPTION("Samsung S2MPS11/S2MPS14/S2MPS15/S2MPU02 Regulator Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index bdc07739e9a2..4abd3ed31f60 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -588,7 +588,7 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
if (of_property_read_u32(reg_np, "op_mode",
&rmode->mode)) {
dev_warn(iodev->dev,
- "no op_mode property property at %pOF\n",
+ "no op_mode property at %pOF\n",
reg_np);
rmode->mode = S5M8767_OPMODE_NORMAL_MODE;
@@ -1015,5 +1015,5 @@ module_exit(s5m8767_pmic_exit);
/* Module information */
MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
-MODULE_DESCRIPTION("SAMSUNG S5M8767 Regulator Driver");
+MODULE_DESCRIPTION("Samsung S5M8767 Regulator Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/slg51000-regulator.c b/drivers/regulator/slg51000-regulator.c
index bf1a3508ebc4..44e4cecbf6de 100644
--- a/drivers/regulator/slg51000-regulator.c
+++ b/drivers/regulator/slg51000-regulator.c
@@ -439,8 +439,7 @@ static void slg51000_clear_fault_log(struct slg51000 *chip)
dev_dbg(chip->dev, "Fault log: FLT_POR\n");
}
-static int slg51000_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int slg51000_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct slg51000 *chip;
@@ -509,7 +508,7 @@ static struct i2c_driver slg51000_regulator_driver = {
.driver = {
.name = "slg51000-regulator",
},
- .probe = slg51000_i2c_probe,
+ .probe_new = slg51000_i2c_probe,
.id_table = slg51000_i2c_id,
};
diff --git a/drivers/regulator/sy8106a-regulator.c b/drivers/regulator/sy8106a-regulator.c
index 42e03b2c10a0..2222e739e62b 100644
--- a/drivers/regulator/sy8106a-regulator.c
+++ b/drivers/regulator/sy8106a-regulator.c
@@ -61,8 +61,7 @@ static const struct regulator_desc sy8106a_reg = {
/*
* I2C driver interface functions
*/
-static int sy8106a_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int sy8106a_i2c_probe(struct i2c_client *i2c)
{
struct device *dev = &i2c->dev;
struct regulator_dev *rdev;
@@ -141,7 +140,7 @@ static struct i2c_driver sy8106a_regulator_driver = {
.name = "sy8106a",
.of_match_table = of_match_ptr(sy8106a_i2c_of_match),
},
- .probe = sy8106a_i2c_probe,
+ .probe_new = sy8106a_i2c_probe,
.id_table = sy8106a_i2c_id,
};
diff --git a/drivers/regulator/sy8824x.c b/drivers/regulator/sy8824x.c
index 92adb4f3ee19..62d243f3b904 100644
--- a/drivers/regulator/sy8824x.c
+++ b/drivers/regulator/sy8824x.c
@@ -112,8 +112,7 @@ static const struct regmap_config sy8824_regmap_config = {
.val_bits = 8,
};
-static int sy8824_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int sy8824_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device_node *np = dev->of_node;
@@ -222,7 +221,7 @@ static struct i2c_driver sy8824_regulator_driver = {
.name = "sy8824-regulator",
.of_match_table = of_match_ptr(sy8824_dt_ids),
},
- .probe = sy8824_i2c_probe,
+ .probe_new = sy8824_i2c_probe,
.id_table = sy8824_id,
};
module_i2c_driver(sy8824_regulator_driver);
diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c
index 89b9314d64c9..af9abcd9c166 100644
--- a/drivers/regulator/ti-abb-regulator.c
+++ b/drivers/regulator/ti-abb-regulator.c
@@ -748,7 +748,7 @@ static int ti_abb_probe(struct platform_device *pdev)
* We may have shared interrupt register offsets which are
* write-1-to-clear between domains ensuring exclusivity.
*/
- abb->int_base = devm_ioremap_nocache(dev, res->start,
+ abb->int_base = devm_ioremap(dev, res->start,
resource_size(res));
if (!abb->int_base) {
dev_err(dev, "Unable to map '%s'\n", pname);
@@ -768,7 +768,7 @@ static int ti_abb_probe(struct platform_device *pdev)
* We may have shared efuse register offsets which are read-only
* between domains
*/
- abb->efuse_base = devm_ioremap_nocache(dev, res->start,
+ abb->efuse_base = devm_ioremap(dev, res->start,
resource_size(res));
if (!abb->efuse_base) {
dev_err(dev, "Unable to map '%s'\n", pname);
diff --git a/drivers/regulator/tps65132-regulator.c b/drivers/regulator/tps65132-regulator.c
index 7b0e38f8d627..0edc83089ba2 100644
--- a/drivers/regulator/tps65132-regulator.c
+++ b/drivers/regulator/tps65132-regulator.c
@@ -220,8 +220,7 @@ static const struct regmap_config tps65132_regmap_config = {
.wr_table = &tps65132_no_reg_table,
};
-static int tps65132_probe(struct i2c_client *client,
- const struct i2c_device_id *client_id)
+static int tps65132_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct tps65132_regulator *tps;
@@ -272,7 +271,7 @@ static struct i2c_driver tps65132_i2c_driver = {
.driver = {
.name = "tps65132",
},
- .probe = tps65132_probe,
+ .probe_new = tps65132_probe,
.id_table = tps65132_id,
};
diff --git a/drivers/regulator/vctrl-regulator.c b/drivers/regulator/vctrl-regulator.c
index 9a9ee8188109..cbadb1c99679 100644
--- a/drivers/regulator/vctrl-regulator.c
+++ b/drivers/regulator/vctrl-regulator.c
@@ -11,10 +11,13 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/regulator/coupler.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
#include <linux/sort.h>
+#include "internal.h"
+
struct vctrl_voltage_range {
int min_uV;
int max_uV;
@@ -79,7 +82,7 @@ static int vctrl_calc_output_voltage(struct vctrl_data *vctrl, int ctrl_uV)
static int vctrl_get_voltage(struct regulator_dev *rdev)
{
struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
- int ctrl_uV = regulator_get_voltage(vctrl->ctrl_reg);
+ int ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev);
return vctrl_calc_output_voltage(vctrl, ctrl_uV);
}
@@ -90,16 +93,16 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
{
struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
struct regulator *ctrl_reg = vctrl->ctrl_reg;
- int orig_ctrl_uV = regulator_get_voltage(ctrl_reg);
+ int orig_ctrl_uV = regulator_get_voltage_rdev(ctrl_reg->rdev);
int uV = vctrl_calc_output_voltage(vctrl, orig_ctrl_uV);
int ret;
if (req_min_uV >= uV || !vctrl->ovp_threshold)
/* voltage rising or no OVP */
- return regulator_set_voltage(
- ctrl_reg,
+ return regulator_set_voltage_rdev(ctrl_reg->rdev,
vctrl_calc_ctrl_voltage(vctrl, req_min_uV),
- vctrl_calc_ctrl_voltage(vctrl, req_max_uV));
+ vctrl_calc_ctrl_voltage(vctrl, req_max_uV),
+ PM_SUSPEND_ON);
while (uV > req_min_uV) {
int max_drop_uV = (uV * vctrl->ovp_threshold) / 100;
@@ -114,9 +117,10 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
next_uV = max_t(int, req_min_uV, uV - max_drop_uV);
next_ctrl_uV = vctrl_calc_ctrl_voltage(vctrl, next_uV);
- ret = regulator_set_voltage(ctrl_reg,
+ ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
+ next_ctrl_uV,
next_ctrl_uV,
- next_ctrl_uV);
+ PM_SUSPEND_ON);
if (ret)
goto err;
@@ -130,7 +134,8 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
err:
/* Try to go back to original voltage */
- regulator_set_voltage(ctrl_reg, orig_ctrl_uV, orig_ctrl_uV);
+ regulator_set_voltage_rdev(ctrl_reg->rdev, orig_ctrl_uV, orig_ctrl_uV,
+ PM_SUSPEND_ON);
return ret;
}
@@ -155,9 +160,10 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
if (selector >= vctrl->sel || !vctrl->ovp_threshold) {
/* voltage rising or no OVP */
- ret = regulator_set_voltage(ctrl_reg,
+ ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
+ vctrl->vtable[selector].ctrl,
vctrl->vtable[selector].ctrl,
- vctrl->vtable[selector].ctrl);
+ PM_SUSPEND_ON);
if (!ret)
vctrl->sel = selector;
@@ -173,9 +179,10 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
else
next_sel = vctrl->vtable[vctrl->sel].ovp_min_sel;
- ret = regulator_set_voltage(ctrl_reg,
+ ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
vctrl->vtable[next_sel].ctrl,
- vctrl->vtable[next_sel].ctrl);
+ vctrl->vtable[next_sel].ctrl,
+ PM_SUSPEND_ON);
if (ret) {
dev_err(&rdev->dev,
"failed to set control voltage to %duV\n",
@@ -195,9 +202,10 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
err:
if (vctrl->sel != orig_sel) {
/* Try to go back to original voltage */
- if (!regulator_set_voltage(ctrl_reg,
+ if (!regulator_set_voltage_rdev(ctrl_reg->rdev,
+ vctrl->vtable[orig_sel].ctrl,
vctrl->vtable[orig_sel].ctrl,
- vctrl->vtable[orig_sel].ctrl))
+ PM_SUSPEND_ON))
vctrl->sel = orig_sel;
else
dev_warn(&rdev->dev,
@@ -482,7 +490,7 @@ static int vctrl_probe(struct platform_device *pdev)
if (ret)
return ret;
- ctrl_uV = regulator_get_voltage(vctrl->ctrl_reg);
+ ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev);
if (ctrl_uV < 0) {
dev_err(&pdev->dev, "failed to get control voltage\n");
return ctrl_uV;
diff --git a/drivers/regulator/vqmmc-ipq4019-regulator.c b/drivers/regulator/vqmmc-ipq4019-regulator.c
new file mode 100644
index 000000000000..6d5ae25d08d1
--- /dev/null
+++ b/drivers/regulator/vqmmc-ipq4019-regulator.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (c) 2019 Mantas Pucka <mantas@8devices.com>
+// Copyright (c) 2019 Robert Marko <robert.marko@sartura.hr>
+//
+// Driver for IPQ4019 SD/MMC controller's I/O LDO voltage regulator
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+static const unsigned int ipq4019_vmmc_voltages[] = {
+ 1500000, 1800000, 2500000, 3000000,
+};
+
+static const struct regulator_ops ipq4019_regulator_voltage_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_ascend,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+};
+
+static const struct regulator_desc vmmc_regulator = {
+ .name = "vmmcq",
+ .ops = &ipq4019_regulator_voltage_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .volt_table = ipq4019_vmmc_voltages,
+ .n_voltages = ARRAY_SIZE(ipq4019_vmmc_voltages),
+ .vsel_reg = 0,
+ .vsel_mask = 0x3,
+};
+
+static const struct regmap_config ipq4019_vmmcq_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+};
+
+static int ipq4019_regulator_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct regulator_init_data *init_data;
+ struct regulator_config cfg = {};
+ struct regulator_dev *rdev;
+ struct resource *res;
+ struct regmap *rmap;
+ void __iomem *base;
+
+ init_data = of_get_regulator_init_data(dev, dev->of_node,
+ &vmmc_regulator);
+ if (!init_data)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ rmap = devm_regmap_init_mmio(dev, base, &ipq4019_vmmcq_regmap_config);
+ if (IS_ERR(rmap))
+ return PTR_ERR(rmap);
+
+ cfg.dev = dev;
+ cfg.init_data = init_data;
+ cfg.of_node = dev->of_node;
+ cfg.regmap = rmap;
+
+ rdev = devm_regulator_register(dev, &vmmc_regulator, &cfg);
+ if (IS_ERR(rdev)) {
+ dev_err(dev, "Failed to register regulator: %ld\n",
+ PTR_ERR(rdev));
+ return PTR_ERR(rdev);
+ }
+ platform_set_drvdata(pdev, rdev);
+
+ return 0;
+}
+
+static const struct of_device_id regulator_ipq4019_of_match[] = {
+ { .compatible = "qcom,vqmmc-ipq4019-regulator", },
+ {},
+};
+
+static struct platform_driver ipq4019_regulator_driver = {
+ .probe = ipq4019_regulator_probe,
+ .driver = {
+ .name = "vqmmc-ipq4019-regulator",
+ .of_match_table = of_match_ptr(regulator_ipq4019_of_match),
+ },
+};
+module_platform_driver(ipq4019_regulator_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mantas Pucka <mantas@8devices.com>");
+MODULE_DESCRIPTION("IPQ4019 VQMMC voltage regulator");
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index 94afdde4bc9f..de3862c15fcc 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -23,6 +23,16 @@ config IMX_REMOTEPROC
It's safe to say N here.
+config MTK_SCP
+ tristate "Mediatek SCP support"
+ depends on ARCH_MEDIATEK
+ select RPMSG_MTK_SCP
+ help
+ Say y here to support Mediatek's System Companion Processor (SCP) via
+ the remote processor framework.
+
+ It's safe to say N here.
+
config OMAP_REMOTEPROC
tristate "OMAP remoteproc support"
depends on ARCH_OMAP4 || SOC_OMAP5
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index 00f09e658cb3..e30a1b15fbac 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -10,6 +10,7 @@ remoteproc-y += remoteproc_sysfs.o
remoteproc-y += remoteproc_virtio.o
remoteproc-y += remoteproc_elf_loader.o
obj-$(CONFIG_IMX_REMOTEPROC) += imx_rproc.o
+obj-$(CONFIG_MTK_SCP) += mtk_scp.o mtk_scp_ipi.o
obj-$(CONFIG_OMAP_REMOTEPROC) += omap_remoteproc.o
obj-$(CONFIG_WKUP_M3_RPROC) += wkup_m3_rproc.o
obj-$(CONFIG_DA8XX_REMOTEPROC) += da8xx_remoteproc.o
diff --git a/drivers/remoteproc/mtk_common.h b/drivers/remoteproc/mtk_common.h
new file mode 100644
index 000000000000..deb20096146a
--- /dev/null
+++ b/drivers/remoteproc/mtk_common.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ */
+
+#ifndef __RPROC_MTK_COMMON_H
+#define __RPROC_MTK_COMMON_H
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/remoteproc/mtk_scp.h>
+
+#define MT8183_SW_RSTN 0x0
+#define MT8183_SW_RSTN_BIT BIT(0)
+#define MT8183_SCP_TO_HOST 0x1C
+#define MT8183_SCP_IPC_INT_BIT BIT(0)
+#define MT8183_SCP_WDT_INT_BIT BIT(8)
+#define MT8183_HOST_TO_SCP 0x28
+#define MT8183_HOST_IPC_INT_BIT BIT(0)
+#define MT8183_WDT_CFG 0x84
+#define MT8183_SCP_CLK_SW_SEL 0x4000
+#define MT8183_SCP_CLK_DIV_SEL 0x4024
+#define MT8183_SCP_SRAM_PDN 0x402C
+#define MT8183_SCP_L1_SRAM_PD 0x4080
+#define MT8183_SCP_TCM_TAIL_SRAM_PD 0x4094
+
+#define MT8183_SCP_CACHE_SEL(x) (0x14000 + (x) * 0x3000)
+#define MT8183_SCP_CACHE_CON MT8183_SCP_CACHE_SEL(0)
+#define MT8183_SCP_DCACHE_CON MT8183_SCP_CACHE_SEL(1)
+#define MT8183_SCP_CACHESIZE_8KB BIT(8)
+#define MT8183_SCP_CACHE_CON_WAYEN BIT(10)
+
+#define SCP_FW_VER_LEN 32
+#define SCP_SHARE_BUFFER_SIZE 288
+
+struct scp_run {
+ u32 signaled;
+ s8 fw_ver[SCP_FW_VER_LEN];
+ u32 dec_capability;
+ u32 enc_capability;
+ wait_queue_head_t wq;
+};
+
+struct scp_ipi_desc {
+ /* For protecting handler. */
+ struct mutex lock;
+ scp_ipi_handler_t handler;
+ void *priv;
+};
+
+struct mtk_scp {
+ struct device *dev;
+ struct rproc *rproc;
+ struct clk *clk;
+ void __iomem *reg_base;
+ void __iomem *sram_base;
+ size_t sram_size;
+
+ struct mtk_share_obj __iomem *recv_buf;
+ struct mtk_share_obj __iomem *send_buf;
+ struct scp_run run;
+ /* To prevent multiple ipi_send run concurrently. */
+ struct mutex send_lock;
+ struct scp_ipi_desc ipi_desc[SCP_IPI_MAX];
+ bool ipi_id_ack[SCP_IPI_MAX];
+ wait_queue_head_t ack_wq;
+
+ void __iomem *cpu_addr;
+ phys_addr_t phys_addr;
+ size_t dram_size;
+
+ struct rproc_subdev *rpmsg_subdev;
+};
+
+/**
+ * struct mtk_share_obj - SRAM buffer shared with AP and SCP
+ *
+ * @id: IPI id
+ * @len: share buffer length
+ * @share_buf: share buffer data
+ */
+struct mtk_share_obj {
+ u32 id;
+ u32 len;
+ u8 share_buf[SCP_SHARE_BUFFER_SIZE];
+};
+
+void scp_memcpy_aligned(void __iomem *dst, const void *src, unsigned int len);
+void scp_ipi_lock(struct mtk_scp *scp, u32 id);
+void scp_ipi_unlock(struct mtk_scp *scp, u32 id);
+
+#endif
diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
new file mode 100644
index 000000000000..7ccdf64ff3ea
--- /dev/null
+++ b/drivers/remoteproc/mtk_scp.c
@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2019 MediaTek Inc.
+
+#include <asm/barrier.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/remoteproc/mtk_scp.h>
+#include <linux/rpmsg/mtk_rpmsg.h>
+
+#include "mtk_common.h"
+#include "remoteproc_internal.h"
+
+#define MAX_CODE_SIZE 0x500000
+#define SCP_FW_END 0x7C000
+
+/**
+ * scp_get() - get a reference to SCP.
+ *
+ * @pdev: the platform device of the module requesting SCP platform
+ * device for using SCP API.
+ *
+ * Return: Return NULL if failed. otherwise reference to SCP.
+ **/
+struct mtk_scp *scp_get(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *scp_node;
+ struct platform_device *scp_pdev;
+
+ scp_node = of_parse_phandle(dev->of_node, "mediatek,scp", 0);
+ if (!scp_node) {
+ dev_err(dev, "can't get SCP node\n");
+ return NULL;
+ }
+
+ scp_pdev = of_find_device_by_node(scp_node);
+ of_node_put(scp_node);
+
+ if (WARN_ON(!scp_pdev)) {
+ dev_err(dev, "SCP pdev failed\n");
+ return NULL;
+ }
+
+ return platform_get_drvdata(scp_pdev);
+}
+EXPORT_SYMBOL_GPL(scp_get);
+
+/**
+ * scp_put() - "free" the SCP
+ *
+ * @scp: mtk_scp structure from scp_get().
+ **/
+void scp_put(struct mtk_scp *scp)
+{
+ put_device(scp->dev);
+}
+EXPORT_SYMBOL_GPL(scp_put);
+
+static void scp_wdt_handler(struct mtk_scp *scp, u32 scp_to_host)
+{
+ dev_err(scp->dev, "SCP watchdog timeout! 0x%x", scp_to_host);
+ rproc_report_crash(scp->rproc, RPROC_WATCHDOG);
+}
+
+static void scp_init_ipi_handler(void *data, unsigned int len, void *priv)
+{
+ struct mtk_scp *scp = (struct mtk_scp *)priv;
+ struct scp_run *run = (struct scp_run *)data;
+
+ scp->run.signaled = run->signaled;
+ strscpy(scp->run.fw_ver, run->fw_ver, SCP_FW_VER_LEN);
+ scp->run.dec_capability = run->dec_capability;
+ scp->run.enc_capability = run->enc_capability;
+ wake_up_interruptible(&scp->run.wq);
+}
+
+static void scp_ipi_handler(struct mtk_scp *scp)
+{
+ struct mtk_share_obj __iomem *rcv_obj = scp->recv_buf;
+ struct scp_ipi_desc *ipi_desc = scp->ipi_desc;
+ u8 tmp_data[SCP_SHARE_BUFFER_SIZE];
+ scp_ipi_handler_t handler;
+ u32 id = readl(&rcv_obj->id);
+ u32 len = readl(&rcv_obj->len);
+
+ if (len > SCP_SHARE_BUFFER_SIZE) {
+ dev_err(scp->dev, "ipi message too long (len %d, max %d)", len,
+ SCP_SHARE_BUFFER_SIZE);
+ return;
+ }
+ if (id >= SCP_IPI_MAX) {
+ dev_err(scp->dev, "No such ipi id = %d\n", id);
+ return;
+ }
+
+ scp_ipi_lock(scp, id);
+ handler = ipi_desc[id].handler;
+ if (!handler) {
+ dev_err(scp->dev, "No such ipi id = %d\n", id);
+ scp_ipi_unlock(scp, id);
+ return;
+ }
+
+ memcpy_fromio(tmp_data, &rcv_obj->share_buf, len);
+ handler(tmp_data, len, ipi_desc[id].priv);
+ scp_ipi_unlock(scp, id);
+
+ scp->ipi_id_ack[id] = true;
+ wake_up(&scp->ack_wq);
+}
+
+static int scp_ipi_init(struct mtk_scp *scp)
+{
+ size_t send_offset = SCP_FW_END - sizeof(struct mtk_share_obj);
+ size_t recv_offset = send_offset - sizeof(struct mtk_share_obj);
+
+ /* Disable SCP to host interrupt */
+ writel(MT8183_SCP_IPC_INT_BIT, scp->reg_base + MT8183_SCP_TO_HOST);
+
+ /* shared buffer initialization */
+ scp->recv_buf =
+ (struct mtk_share_obj __iomem *)(scp->sram_base + recv_offset);
+ scp->send_buf =
+ (struct mtk_share_obj __iomem *)(scp->sram_base + send_offset);
+ memset_io(scp->recv_buf, 0, sizeof(scp->recv_buf));
+ memset_io(scp->send_buf, 0, sizeof(scp->send_buf));
+
+ return 0;
+}
+
+static void scp_reset_assert(const struct mtk_scp *scp)
+{
+ u32 val;
+
+ val = readl(scp->reg_base + MT8183_SW_RSTN);
+ val &= ~MT8183_SW_RSTN_BIT;
+ writel(val, scp->reg_base + MT8183_SW_RSTN);
+}
+
+static void scp_reset_deassert(const struct mtk_scp *scp)
+{
+ u32 val;
+
+ val = readl(scp->reg_base + MT8183_SW_RSTN);
+ val |= MT8183_SW_RSTN_BIT;
+ writel(val, scp->reg_base + MT8183_SW_RSTN);
+}
+
+static irqreturn_t scp_irq_handler(int irq, void *priv)
+{
+ struct mtk_scp *scp = priv;
+ u32 scp_to_host;
+ int ret;
+
+ ret = clk_prepare_enable(scp->clk);
+ if (ret) {
+ dev_err(scp->dev, "failed to enable clocks\n");
+ return IRQ_NONE;
+ }
+
+ scp_to_host = readl(scp->reg_base + MT8183_SCP_TO_HOST);
+ if (scp_to_host & MT8183_SCP_IPC_INT_BIT)
+ scp_ipi_handler(scp);
+ else
+ scp_wdt_handler(scp, scp_to_host);
+
+ /* SCP won't send another interrupt until we set SCP_TO_HOST to 0. */
+ writel(MT8183_SCP_IPC_INT_BIT | MT8183_SCP_WDT_INT_BIT,
+ scp->reg_base + MT8183_SCP_TO_HOST);
+ clk_disable_unprepare(scp->clk);
+
+ return IRQ_HANDLED;
+}
+
+static int scp_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
+{
+ struct device *dev = &rproc->dev;
+ struct elf32_hdr *ehdr;
+ struct elf32_phdr *phdr;
+ int i, ret = 0;
+ const u8 *elf_data = fw->data;
+
+ ehdr = (struct elf32_hdr *)elf_data;
+ phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
+
+ /* go through the available ELF segments */
+ for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
+ u32 da = phdr->p_paddr;
+ u32 memsz = phdr->p_memsz;
+ u32 filesz = phdr->p_filesz;
+ u32 offset = phdr->p_offset;
+ void __iomem *ptr;
+
+ if (phdr->p_type != PT_LOAD)
+ continue;
+
+ dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n",
+ phdr->p_type, da, memsz, filesz);
+
+ if (filesz > memsz) {
+ dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n",
+ filesz, memsz);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (offset + filesz > fw->size) {
+ dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
+ offset + filesz, fw->size);
+ ret = -EINVAL;
+ break;
+ }
+
+ /* grab the kernel address for this device address */
+ ptr = (void __iomem *)rproc_da_to_va(rproc, da, memsz);
+ if (!ptr) {
+ dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz);
+ ret = -EINVAL;
+ break;
+ }
+
+ /* put the segment where the remote processor expects it */
+ if (phdr->p_filesz)
+ scp_memcpy_aligned(ptr, elf_data + phdr->p_offset,
+ filesz);
+ }
+
+ return ret;
+}
+
+static int scp_load(struct rproc *rproc, const struct firmware *fw)
+{
+ const struct mtk_scp *scp = rproc->priv;
+ struct device *dev = scp->dev;
+ int ret;
+
+ ret = clk_prepare_enable(scp->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clocks\n");
+ return ret;
+ }
+
+ /* Hold SCP in reset while loading FW. */
+ scp_reset_assert(scp);
+
+ /* Reset clocks before loading FW */
+ writel(0x0, scp->reg_base + MT8183_SCP_CLK_SW_SEL);
+ writel(0x0, scp->reg_base + MT8183_SCP_CLK_DIV_SEL);
+
+ /* Initialize TCM before loading FW. */
+ writel(0x0, scp->reg_base + MT8183_SCP_L1_SRAM_PD);
+ writel(0x0, scp->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD);
+
+ /* Turn on the power of SCP's SRAM before using it. */
+ writel(0x0, scp->reg_base + MT8183_SCP_SRAM_PDN);
+
+ /*
+ * Set I-cache and D-cache size before loading SCP FW.
+ * SCP SRAM logical address may change when cache size setting differs.
+ */
+ writel(MT8183_SCP_CACHE_CON_WAYEN | MT8183_SCP_CACHESIZE_8KB,
+ scp->reg_base + MT8183_SCP_CACHE_CON);
+ writel(MT8183_SCP_CACHESIZE_8KB, scp->reg_base + MT8183_SCP_DCACHE_CON);
+
+ ret = scp_elf_load_segments(rproc, fw);
+ clk_disable_unprepare(scp->clk);
+
+ return ret;
+}
+
+static int scp_start(struct rproc *rproc)
+{
+ struct mtk_scp *scp = (struct mtk_scp *)rproc->priv;
+ struct device *dev = scp->dev;
+ struct scp_run *run = &scp->run;
+ int ret;
+
+ ret = clk_prepare_enable(scp->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clocks\n");
+ return ret;
+ }
+
+ run->signaled = false;
+
+ scp_reset_deassert(scp);
+
+ ret = wait_event_interruptible_timeout(
+ run->wq,
+ run->signaled,
+ msecs_to_jiffies(2000));
+
+ if (ret == 0) {
+ dev_err(dev, "wait SCP initialization timeout!\n");
+ ret = -ETIME;
+ goto stop;
+ }
+ if (ret == -ERESTARTSYS) {
+ dev_err(dev, "wait SCP interrupted by a signal!\n");
+ goto stop;
+ }
+ clk_disable_unprepare(scp->clk);
+ dev_info(dev, "SCP is ready. FW version %s\n", run->fw_ver);
+
+ return 0;
+
+stop:
+ scp_reset_assert(scp);
+ clk_disable_unprepare(scp->clk);
+ return ret;
+}
+
+static void *scp_da_to_va(struct rproc *rproc, u64 da, int len)
+{
+ struct mtk_scp *scp = (struct mtk_scp *)rproc->priv;
+ int offset;
+
+ if (da < scp->sram_size) {
+ offset = da;
+ if (offset >= 0 && (offset + len) < scp->sram_size)
+ return (void __force *)scp->sram_base + offset;
+ } else {
+ offset = da - scp->phys_addr;
+ if (offset >= 0 && (offset + len) < scp->dram_size)
+ return (void __force *)scp->cpu_addr + offset;
+ }
+
+ return NULL;
+}
+
+static int scp_stop(struct rproc *rproc)
+{
+ struct mtk_scp *scp = (struct mtk_scp *)rproc->priv;
+ int ret;
+
+ ret = clk_prepare_enable(scp->clk);
+ if (ret) {
+ dev_err(scp->dev, "failed to enable clocks\n");
+ return ret;
+ }
+
+ scp_reset_assert(scp);
+ /* Disable SCP watchdog */
+ writel(0, scp->reg_base + MT8183_WDT_CFG);
+ clk_disable_unprepare(scp->clk);
+
+ return 0;
+}
+
+static const struct rproc_ops scp_ops = {
+ .start = scp_start,
+ .stop = scp_stop,
+ .load = scp_load,
+ .da_to_va = scp_da_to_va,
+};
+
+/**
+ * scp_get_device() - get device struct of SCP
+ *
+ * @scp: mtk_scp structure
+ **/
+struct device *scp_get_device(struct mtk_scp *scp)
+{
+ return scp->dev;
+}
+EXPORT_SYMBOL_GPL(scp_get_device);
+
+/**
+ * scp_get_rproc() - get rproc struct of SCP
+ *
+ * @scp: mtk_scp structure
+ **/
+struct rproc *scp_get_rproc(struct mtk_scp *scp)
+{
+ return scp->rproc;
+}
+EXPORT_SYMBOL_GPL(scp_get_rproc);
+
+/**
+ * scp_get_vdec_hw_capa() - get video decoder hardware capability
+ *
+ * @scp: mtk_scp structure
+ *
+ * Return: video decoder hardware capability
+ **/
+unsigned int scp_get_vdec_hw_capa(struct mtk_scp *scp)
+{
+ return scp->run.dec_capability;
+}
+EXPORT_SYMBOL_GPL(scp_get_vdec_hw_capa);
+
+/**
+ * scp_get_venc_hw_capa() - get video encoder hardware capability
+ *
+ * @scp: mtk_scp structure
+ *
+ * Return: video encoder hardware capability
+ **/
+unsigned int scp_get_venc_hw_capa(struct mtk_scp *scp)
+{
+ return scp->run.enc_capability;
+}
+EXPORT_SYMBOL_GPL(scp_get_venc_hw_capa);
+
+/**
+ * scp_mapping_dm_addr() - Mapping SRAM/DRAM to kernel virtual address
+ *
+ * @scp: mtk_scp structure
+ * @mem_addr: SCP views memory address
+ *
+ * Mapping the SCP's SRAM address /
+ * DMEM (Data Extended Memory) memory address /
+ * Working buffer memory address to
+ * kernel virtual address.
+ *
+ * Return: Return ERR_PTR(-EINVAL) if mapping failed,
+ * otherwise the mapped kernel virtual address
+ **/
+void *scp_mapping_dm_addr(struct mtk_scp *scp, u32 mem_addr)
+{
+ void *ptr;
+
+ ptr = scp_da_to_va(scp->rproc, mem_addr, 0);
+ if (!ptr)
+ return ERR_PTR(-EINVAL);
+
+ return ptr;
+}
+EXPORT_SYMBOL_GPL(scp_mapping_dm_addr);
+
+static int scp_map_memory_region(struct mtk_scp *scp)
+{
+ int ret;
+
+ ret = of_reserved_mem_device_init(scp->dev);
+ if (ret) {
+ dev_err(scp->dev, "failed to assign memory-region: %d\n", ret);
+ return -ENOMEM;
+ }
+
+ /* Reserved SCP code size */
+ scp->dram_size = MAX_CODE_SIZE;
+ scp->cpu_addr = dma_alloc_coherent(scp->dev, scp->dram_size,
+ &scp->phys_addr, GFP_KERNEL);
+ if (!scp->cpu_addr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void scp_unmap_memory_region(struct mtk_scp *scp)
+{
+ dma_free_coherent(scp->dev, scp->dram_size, scp->cpu_addr,
+ scp->phys_addr);
+ of_reserved_mem_device_release(scp->dev);
+}
+
+static int scp_register_ipi(struct platform_device *pdev, u32 id,
+ ipi_handler_t handler, void *priv)
+{
+ struct mtk_scp *scp = platform_get_drvdata(pdev);
+
+ return scp_ipi_register(scp, id, handler, priv);
+}
+
+static void scp_unregister_ipi(struct platform_device *pdev, u32 id)
+{
+ struct mtk_scp *scp = platform_get_drvdata(pdev);
+
+ scp_ipi_unregister(scp, id);
+}
+
+static int scp_send_ipi(struct platform_device *pdev, u32 id, void *buf,
+ unsigned int len, unsigned int wait)
+{
+ struct mtk_scp *scp = platform_get_drvdata(pdev);
+
+ return scp_ipi_send(scp, id, buf, len, wait);
+}
+
+static struct mtk_rpmsg_info mtk_scp_rpmsg_info = {
+ .send_ipi = scp_send_ipi,
+ .register_ipi = scp_register_ipi,
+ .unregister_ipi = scp_unregister_ipi,
+ .ns_ipi_id = SCP_IPI_NS_SERVICE,
+};
+
+static void scp_add_rpmsg_subdev(struct mtk_scp *scp)
+{
+ scp->rpmsg_subdev =
+ mtk_rpmsg_create_rproc_subdev(to_platform_device(scp->dev),
+ &mtk_scp_rpmsg_info);
+ if (scp->rpmsg_subdev)
+ rproc_add_subdev(scp->rproc, scp->rpmsg_subdev);
+}
+
+static void scp_remove_rpmsg_subdev(struct mtk_scp *scp)
+{
+ if (scp->rpmsg_subdev) {
+ rproc_remove_subdev(scp->rproc, scp->rpmsg_subdev);
+ mtk_rpmsg_destroy_rproc_subdev(scp->rpmsg_subdev);
+ scp->rpmsg_subdev = NULL;
+ }
+}
+
+static int scp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct mtk_scp *scp;
+ struct rproc *rproc;
+ struct resource *res;
+ char *fw_name = "scp.img";
+ int ret, i;
+
+ rproc = rproc_alloc(dev,
+ np->name,
+ &scp_ops,
+ fw_name,
+ sizeof(*scp));
+ if (!rproc) {
+ dev_err(dev, "unable to allocate remoteproc\n");
+ return -ENOMEM;
+ }
+
+ scp = (struct mtk_scp *)rproc->priv;
+ scp->rproc = rproc;
+ scp->dev = dev;
+ platform_set_drvdata(pdev, scp);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
+ scp->sram_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR((__force void *)scp->sram_base)) {
+ dev_err(dev, "Failed to parse and map sram memory\n");
+ ret = PTR_ERR((__force void *)scp->sram_base);
+ goto free_rproc;
+ }
+ scp->sram_size = resource_size(res);
+
+ mutex_init(&scp->send_lock);
+ for (i = 0; i < SCP_IPI_MAX; i++)
+ mutex_init(&scp->ipi_desc[i].lock);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
+ scp->reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR((__force void *)scp->reg_base)) {
+ dev_err(dev, "Failed to parse and map cfg memory\n");
+ ret = PTR_ERR((__force void *)scp->reg_base);
+ goto destroy_mutex;
+ }
+
+ ret = scp_map_memory_region(scp);
+ if (ret)
+ goto destroy_mutex;
+
+ scp->clk = devm_clk_get(dev, "main");
+ if (IS_ERR(scp->clk)) {
+ dev_err(dev, "Failed to get clock\n");
+ ret = PTR_ERR(scp->clk);
+ goto release_dev_mem;
+ }
+
+ ret = clk_prepare_enable(scp->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clocks\n");
+ goto release_dev_mem;
+ }
+
+ ret = scp_ipi_init(scp);
+ clk_disable_unprepare(scp->clk);
+ if (ret) {
+ dev_err(dev, "Failed to init ipi\n");
+ goto release_dev_mem;
+ }
+
+ /* register SCP initialization IPI */
+ ret = scp_ipi_register(scp, SCP_IPI_INIT, scp_init_ipi_handler, scp);
+ if (ret) {
+ dev_err(dev, "Failed to register IPI_SCP_INIT\n");
+ goto release_dev_mem;
+ }
+
+ init_waitqueue_head(&scp->run.wq);
+ init_waitqueue_head(&scp->ack_wq);
+
+ scp_add_rpmsg_subdev(scp);
+
+ ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0), NULL,
+ scp_irq_handler, IRQF_ONESHOT,
+ pdev->name, scp);
+
+ if (ret) {
+ dev_err(dev, "failed to request irq\n");
+ goto remove_subdev;
+ }
+
+ ret = rproc_add(rproc);
+ if (ret)
+ goto remove_subdev;
+
+ return 0;
+
+remove_subdev:
+ scp_remove_rpmsg_subdev(scp);
+ scp_ipi_unregister(scp, SCP_IPI_INIT);
+release_dev_mem:
+ scp_unmap_memory_region(scp);
+destroy_mutex:
+ for (i = 0; i < SCP_IPI_MAX; i++)
+ mutex_destroy(&scp->ipi_desc[i].lock);
+ mutex_destroy(&scp->send_lock);
+free_rproc:
+ rproc_free(rproc);
+
+ return ret;
+}
+
+static int scp_remove(struct platform_device *pdev)
+{
+ struct mtk_scp *scp = platform_get_drvdata(pdev);
+ int i;
+
+ rproc_del(scp->rproc);
+ scp_remove_rpmsg_subdev(scp);
+ scp_ipi_unregister(scp, SCP_IPI_INIT);
+ scp_unmap_memory_region(scp);
+ for (i = 0; i < SCP_IPI_MAX; i++)
+ mutex_destroy(&scp->ipi_desc[i].lock);
+ mutex_destroy(&scp->send_lock);
+ rproc_free(scp->rproc);
+
+ return 0;
+}
+
+static const struct of_device_id mtk_scp_of_match[] = {
+ { .compatible = "mediatek,mt8183-scp"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtk_scp_of_match);
+
+static struct platform_driver mtk_scp_driver = {
+ .probe = scp_probe,
+ .remove = scp_remove,
+ .driver = {
+ .name = "mtk-scp",
+ .of_match_table = of_match_ptr(mtk_scp_of_match),
+ },
+};
+
+module_platform_driver(mtk_scp_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MediaTek SCP control driver");
diff --git a/drivers/remoteproc/mtk_scp_ipi.c b/drivers/remoteproc/mtk_scp_ipi.c
new file mode 100644
index 000000000000..3d3d87210ef2
--- /dev/null
+++ b/drivers/remoteproc/mtk_scp_ipi.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2019 MediaTek Inc.
+
+#include <asm/barrier.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc/mtk_scp.h>
+
+#include "mtk_common.h"
+
+/**
+ * scp_ipi_register() - register an ipi function
+ *
+ * @scp: mtk_scp structure
+ * @id: IPI ID
+ * @handler: IPI handler
+ * @priv: private data for IPI handler
+ *
+ * Register an ipi function to receive ipi interrupt from SCP.
+ *
+ * Returns 0 if ipi registers successfully, -error on error.
+ */
+int scp_ipi_register(struct mtk_scp *scp,
+ u32 id,
+ scp_ipi_handler_t handler,
+ void *priv)
+{
+ if (!scp) {
+ dev_err(scp->dev, "scp device is not ready\n");
+ return -EPROBE_DEFER;
+ }
+
+ if (WARN_ON(id >= SCP_IPI_MAX) || WARN_ON(handler == NULL))
+ return -EINVAL;
+
+ scp_ipi_lock(scp, id);
+ scp->ipi_desc[id].handler = handler;
+ scp->ipi_desc[id].priv = priv;
+ scp_ipi_unlock(scp, id);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(scp_ipi_register);
+
+/**
+ * scp_ipi_unregister() - unregister an ipi function
+ *
+ * @scp: mtk_scp structure
+ * @id: IPI ID
+ *
+ * Unregister an ipi function to receive ipi interrupt from SCP.
+ */
+void scp_ipi_unregister(struct mtk_scp *scp, u32 id)
+{
+ if (!scp)
+ return;
+
+ if (WARN_ON(id >= SCP_IPI_MAX))
+ return;
+
+ scp_ipi_lock(scp, id);
+ scp->ipi_desc[id].handler = NULL;
+ scp->ipi_desc[id].priv = NULL;
+ scp_ipi_unlock(scp, id);
+}
+EXPORT_SYMBOL_GPL(scp_ipi_unregister);
+
+/*
+ * scp_memcpy_aligned() - Copy src to dst, where dst is in SCP SRAM region.
+ *
+ * @dst: Pointer to the destination buffer, should be in SCP SRAM region.
+ * @src: Pointer to the source buffer.
+ * @len: Length of the source buffer to be copied.
+ *
+ * Since AP access of SCP SRAM don't support byte write, this always write a
+ * full word at a time, and may cause some extra bytes to be written at the
+ * beginning & ending of dst.
+ */
+void scp_memcpy_aligned(void __iomem *dst, const void *src, unsigned int len)
+{
+ void __iomem *ptr;
+ u32 val;
+ unsigned int i = 0, remain;
+
+ if (!IS_ALIGNED((unsigned long)dst, 4)) {
+ ptr = (void __iomem *)ALIGN_DOWN((unsigned long)dst, 4);
+ i = 4 - (dst - ptr);
+ val = readl_relaxed(ptr);
+ memcpy((u8 *)&val + (4 - i), src, i);
+ writel_relaxed(val, ptr);
+ }
+
+ __iowrite32_copy(dst + i, src + i, (len - i) / 4);
+ remain = (len - i) % 4;
+
+ if (remain > 0) {
+ val = readl_relaxed(dst + len - remain);
+ memcpy(&val, src + len - remain, remain);
+ writel_relaxed(val, dst + len - remain);
+ }
+}
+EXPORT_SYMBOL_GPL(scp_memcpy_aligned);
+
+/**
+ * scp_ipi_lock() - Lock before operations of an IPI ID
+ *
+ * @scp: mtk_scp structure
+ * @id: IPI ID
+ *
+ * Note: This should not be used by drivers other than mtk_scp.
+ */
+void scp_ipi_lock(struct mtk_scp *scp, u32 id)
+{
+ if (WARN_ON(id >= SCP_IPI_MAX))
+ return;
+ mutex_lock(&scp->ipi_desc[id].lock);
+}
+EXPORT_SYMBOL_GPL(scp_ipi_lock);
+
+/**
+ * scp_ipi_lock() - Unlock after operations of an IPI ID
+ *
+ * @scp: mtk_scp structure
+ * @id: IPI ID
+ *
+ * Note: This should not be used by drivers other than mtk_scp.
+ */
+void scp_ipi_unlock(struct mtk_scp *scp, u32 id)
+{
+ if (WARN_ON(id >= SCP_IPI_MAX))
+ return;
+ mutex_unlock(&scp->ipi_desc[id].lock);
+}
+EXPORT_SYMBOL_GPL(scp_ipi_unlock);
+
+/**
+ * scp_ipi_send() - send data from AP to scp.
+ *
+ * @scp: mtk_scp structure
+ * @id: IPI ID
+ * @buf: the data buffer
+ * @len: the data buffer length
+ * @wait: number of msecs to wait for ack. 0 to skip waiting.
+ *
+ * This function is thread-safe. When this function returns,
+ * SCP has received the data and starts the processing.
+ * When the processing completes, IPI handler registered
+ * by scp_ipi_register will be called in interrupt context.
+ *
+ * Returns 0 if sending data successfully, -error on error.
+ **/
+int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len,
+ unsigned int wait)
+{
+ struct mtk_share_obj __iomem *send_obj = scp->send_buf;
+ unsigned long timeout;
+ int ret;
+
+ if (WARN_ON(id <= SCP_IPI_INIT) || WARN_ON(id >= SCP_IPI_MAX) ||
+ WARN_ON(id == SCP_IPI_NS_SERVICE) ||
+ WARN_ON(len > sizeof(send_obj->share_buf)) || WARN_ON(!buf))
+ return -EINVAL;
+
+ mutex_lock(&scp->send_lock);
+
+ ret = clk_prepare_enable(scp->clk);
+ if (ret) {
+ dev_err(scp->dev, "failed to enable clock\n");
+ goto unlock_mutex;
+ }
+
+ /* Wait until SCP receives the last command */
+ timeout = jiffies + msecs_to_jiffies(2000);
+ do {
+ if (time_after(jiffies, timeout)) {
+ dev_err(scp->dev, "%s: IPI timeout!\n", __func__);
+ ret = -ETIMEDOUT;
+ goto clock_disable;
+ }
+ } while (readl(scp->reg_base + MT8183_HOST_TO_SCP));
+
+ scp_memcpy_aligned(send_obj->share_buf, buf, len);
+
+ writel(len, &send_obj->len);
+ writel(id, &send_obj->id);
+
+ scp->ipi_id_ack[id] = false;
+ /* send the command to SCP */
+ writel(MT8183_HOST_IPC_INT_BIT, scp->reg_base + MT8183_HOST_TO_SCP);
+
+ if (wait) {
+ /* wait for SCP's ACK */
+ timeout = msecs_to_jiffies(wait);
+ ret = wait_event_timeout(scp->ack_wq,
+ scp->ipi_id_ack[id],
+ timeout);
+ scp->ipi_id_ack[id] = false;
+ if (WARN(!ret, "scp ipi %d ack time out !", id))
+ ret = -EIO;
+ else
+ ret = 0;
+ }
+
+clock_disable:
+ clk_disable_unprepare(scp->clk);
+unlock_mutex:
+ mutex_unlock(&scp->send_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(scp_ipi_send);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MediaTek scp IPI interface");
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index 471128a2e723..a1cc9cbe038f 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -68,14 +68,24 @@
#define AXI_HALTREQ_REG 0x0
#define AXI_HALTACK_REG 0x4
#define AXI_IDLE_REG 0x8
+#define NAV_AXI_HALTREQ_BIT BIT(0)
+#define NAV_AXI_HALTACK_BIT BIT(1)
+#define NAV_AXI_IDLE_BIT BIT(2)
+#define AXI_GATING_VALID_OVERRIDE BIT(0)
-#define HALT_ACK_TIMEOUT_MS 100
+#define HALT_ACK_TIMEOUT_US 100000
+#define NAV_HALT_ACK_TIMEOUT_US 200
/* QDSP6SS_RESET */
#define Q6SS_STOP_CORE BIT(0)
#define Q6SS_CORE_ARES BIT(1)
#define Q6SS_BUS_ARES_ENABLE BIT(2)
+/* QDSP6SS CBCR */
+#define Q6SS_CBCR_CLKEN BIT(0)
+#define Q6SS_CBCR_CLKOFF BIT(31)
+#define Q6SS_CBCR_TIMEOUT_US 200
+
/* QDSP6SS_GFMUX_CTL */
#define Q6SS_CLK_ENABLE BIT(1)
@@ -96,15 +106,16 @@
#define QDSP6v56_BHS_ON BIT(24)
#define QDSP6v56_CLAMP_WL BIT(21)
#define QDSP6v56_CLAMP_QMC_MEM BIT(22)
-#define HALT_CHECK_MAX_LOOPS 200
#define QDSP6SS_XO_CBCR 0x0038
#define QDSP6SS_ACC_OVERRIDE_VAL 0x20
/* QDSP6v65 parameters */
+#define QDSP6SS_CORE_CBCR 0x20
#define QDSP6SS_SLEEP 0x3C
#define QDSP6SS_BOOT_CORE_START 0x400
#define QDSP6SS_BOOT_CMD 0x404
-#define SLEEP_CHECK_MAX_LOOPS 200
+#define QDSP6SS_BOOT_STATUS 0x408
+#define BOOT_STATUS_TIMEOUT_US 200
#define BOOT_FSM_TIMEOUT 10000
struct reg_info {
@@ -131,6 +142,7 @@ struct rproc_hexagon_res {
int version;
bool need_mem_protection;
bool has_alt_reset;
+ bool has_halt_nav;
};
struct q6v5 {
@@ -141,9 +153,14 @@ struct q6v5 {
void __iomem *rmb_base;
struct regmap *halt_map;
+ struct regmap *halt_nav_map;
+ struct regmap *conn_map;
+
u32 halt_q6;
u32 halt_modem;
u32 halt_nc;
+ u32 halt_nav;
+ u32 conn_box;
struct reset_control *mss_restart;
struct reset_control *pdc_reset;
@@ -187,6 +204,7 @@ struct q6v5 {
struct qcom_sysmon *sysmon;
bool need_mem_protection;
bool has_alt_reset;
+ bool has_halt_nav;
int mpss_perm;
int mba_perm;
const char *hexagon_mdt_image;
@@ -198,6 +216,7 @@ enum {
MSS_MSM8974,
MSS_MSM8996,
MSS_MSM8998,
+ MSS_SC7180,
MSS_SDM845,
};
@@ -396,6 +415,26 @@ static int q6v5_reset_assert(struct q6v5 *qproc)
reset_control_assert(qproc->pdc_reset);
ret = reset_control_reset(qproc->mss_restart);
reset_control_deassert(qproc->pdc_reset);
+ } else if (qproc->has_halt_nav) {
+ /*
+ * When the AXI pipeline is being reset with the Q6 modem partly
+ * operational there is possibility of AXI valid signal to
+ * glitch, leading to spurious transactions and Q6 hangs. A work
+ * around is employed by asserting the AXI_GATING_VALID_OVERRIDE
+ * BIT before triggering Q6 MSS reset. Both the HALTREQ and
+ * AXI_GATING_VALID_OVERRIDE are withdrawn post MSS assert
+ * followed by a MSS deassert, while holding the PDC reset.
+ */
+ reset_control_assert(qproc->pdc_reset);
+ regmap_update_bits(qproc->conn_map, qproc->conn_box,
+ AXI_GATING_VALID_OVERRIDE, 1);
+ regmap_update_bits(qproc->halt_nav_map, qproc->halt_nav,
+ NAV_AXI_HALTREQ_BIT, 0);
+ reset_control_assert(qproc->mss_restart);
+ reset_control_deassert(qproc->pdc_reset);
+ regmap_update_bits(qproc->conn_map, qproc->conn_box,
+ AXI_GATING_VALID_OVERRIDE, 0);
+ ret = reset_control_deassert(qproc->mss_restart);
} else {
ret = reset_control_assert(qproc->mss_restart);
}
@@ -413,6 +452,8 @@ static int q6v5_reset_deassert(struct q6v5 *qproc)
ret = reset_control_reset(qproc->mss_restart);
writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET);
reset_control_deassert(qproc->pdc_reset);
+ } else if (qproc->has_halt_nav) {
+ ret = reset_control_reset(qproc->mss_restart);
} else {
ret = reset_control_deassert(qproc->mss_restart);
}
@@ -474,12 +515,12 @@ static int q6v5proc_reset(struct q6v5 *qproc)
if (qproc->version == MSS_SDM845) {
val = readl(qproc->reg_base + QDSP6SS_SLEEP);
- val |= 0x1;
+ val |= Q6SS_CBCR_CLKEN;
writel(val, qproc->reg_base + QDSP6SS_SLEEP);
ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
- val, !(val & BIT(31)), 1,
- SLEEP_CHECK_MAX_LOOPS);
+ val, !(val & Q6SS_CBCR_CLKOFF), 1,
+ Q6SS_CBCR_TIMEOUT_US);
if (ret) {
dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
return -ETIMEDOUT;
@@ -500,6 +541,54 @@ static int q6v5proc_reset(struct q6v5 *qproc)
}
goto pbl_wait;
+ } else if (qproc->version == MSS_SC7180) {
+ val = readl(qproc->reg_base + QDSP6SS_SLEEP);
+ val |= Q6SS_CBCR_CLKEN;
+ writel(val, qproc->reg_base + QDSP6SS_SLEEP);
+
+ ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
+ val, !(val & Q6SS_CBCR_CLKOFF), 1,
+ Q6SS_CBCR_TIMEOUT_US);
+ if (ret) {
+ dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Turn on the XO clock needed for PLL setup */
+ val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
+ val |= Q6SS_CBCR_CLKEN;
+ writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
+
+ ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
+ val, !(val & Q6SS_CBCR_CLKOFF), 1,
+ Q6SS_CBCR_TIMEOUT_US);
+ if (ret) {
+ dev_err(qproc->dev, "QDSP6SS XO clock timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Configure Q6 core CBCR to auto-enable after reset sequence */
+ val = readl(qproc->reg_base + QDSP6SS_CORE_CBCR);
+ val |= Q6SS_CBCR_CLKEN;
+ writel(val, qproc->reg_base + QDSP6SS_CORE_CBCR);
+
+ /* De-assert the Q6 stop core signal */
+ writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
+
+ /* Trigger the boot FSM to start the Q6 out-of-reset sequence */
+ writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
+
+ /* Poll the QDSP6SS_BOOT_STATUS for FSM completion */
+ ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_BOOT_STATUS,
+ val, (val & BIT(0)) != 0, 1,
+ BOOT_STATUS_TIMEOUT_US);
+ if (ret) {
+ dev_err(qproc->dev, "Boot FSM failed to complete.\n");
+ /* Reset the modem so that boot FSM is in reset state */
+ q6v5_reset_deassert(qproc);
+ return ret;
+ }
+ goto pbl_wait;
} else if (qproc->version == MSS_MSM8996 ||
qproc->version == MSS_MSM8998) {
int mem_pwr_ctl;
@@ -515,13 +604,13 @@ static int q6v5proc_reset(struct q6v5 *qproc)
/* BHS require xo cbcr to be enabled */
val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
- val |= 0x1;
+ val |= Q6SS_CBCR_CLKEN;
writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
/* Read CLKOFF bit to go low indicating CLK is enabled */
ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
- val, !(val & BIT(31)), 1,
- HALT_CHECK_MAX_LOOPS);
+ val, !(val & Q6SS_CBCR_CLKOFF), 1,
+ Q6SS_CBCR_TIMEOUT_US);
if (ret) {
dev_err(qproc->dev,
"xo cbcr enabling timed out (rc:%d)\n", ret);
@@ -637,7 +726,6 @@ static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
struct regmap *halt_map,
u32 offset)
{
- unsigned long timeout;
unsigned int val;
int ret;
@@ -650,14 +738,8 @@ static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
/* Wait for halt */
- timeout = jiffies + msecs_to_jiffies(HALT_ACK_TIMEOUT_MS);
- for (;;) {
- ret = regmap_read(halt_map, offset + AXI_HALTACK_REG, &val);
- if (ret || val || time_after(jiffies, timeout))
- break;
-
- msleep(1);
- }
+ regmap_read_poll_timeout(halt_map, offset + AXI_HALTACK_REG, val,
+ val, 1000, HALT_ACK_TIMEOUT_US);
ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
if (ret || !val)
@@ -667,6 +749,32 @@ static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
}
+static void q6v5proc_halt_nav_axi_port(struct q6v5 *qproc,
+ struct regmap *halt_map,
+ u32 offset)
+{
+ unsigned int val;
+ int ret;
+
+ /* Check if we're already idle */
+ ret = regmap_read(halt_map, offset, &val);
+ if (!ret && (val & NAV_AXI_IDLE_BIT))
+ return;
+
+ /* Assert halt request */
+ regmap_update_bits(halt_map, offset, NAV_AXI_HALTREQ_BIT,
+ NAV_AXI_HALTREQ_BIT);
+
+ /* Wait for halt ack*/
+ regmap_read_poll_timeout(halt_map, offset, val,
+ (val & NAV_AXI_HALTACK_BIT),
+ 5, NAV_HALT_ACK_TIMEOUT_US);
+
+ ret = regmap_read(halt_map, offset, &val);
+ if (ret || !(val & NAV_AXI_IDLE_BIT))
+ dev_err(qproc->dev, "port failed halt\n");
+}
+
static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
{
unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
@@ -829,6 +937,9 @@ static int q6v5_mba_load(struct q6v5 *qproc)
halt_axi_ports:
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
+ if (qproc->has_halt_nav)
+ q6v5proc_halt_nav_axi_port(qproc, qproc->halt_nav_map,
+ qproc->halt_nav);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
reclaim_mba:
@@ -876,6 +987,9 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc)
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
+ if (qproc->has_halt_nav)
+ q6v5proc_halt_nav_axi_port(qproc, qproc->halt_nav_map,
+ qproc->halt_nav);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
if (qproc->version == MSS_MSM8996) {
/*
@@ -1253,6 +1367,47 @@ static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
qproc->halt_modem = args.args[1];
qproc->halt_nc = args.args[2];
+ if (qproc->has_halt_nav) {
+ struct platform_device *nav_pdev;
+
+ ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
+ "qcom,halt-nav-regs",
+ 1, 0, &args);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to parse halt-nav-regs\n");
+ return -EINVAL;
+ }
+
+ nav_pdev = of_find_device_by_node(args.np);
+ of_node_put(args.np);
+ if (!nav_pdev) {
+ dev_err(&pdev->dev, "failed to get mss clock device\n");
+ return -EPROBE_DEFER;
+ }
+
+ qproc->halt_nav_map = dev_get_regmap(&nav_pdev->dev, NULL);
+ if (!qproc->halt_nav_map) {
+ dev_err(&pdev->dev, "failed to get map from device\n");
+ return -EINVAL;
+ }
+ qproc->halt_nav = args.args[0];
+
+ ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
+ "qcom,halt-nav-regs",
+ 1, 1, &args);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to parse halt-nav-regs\n");
+ return -EINVAL;
+ }
+
+ qproc->conn_map = syscon_node_to_regmap(args.np);
+ of_node_put(args.np);
+ if (IS_ERR(qproc->conn_map))
+ return PTR_ERR(qproc->conn_map);
+
+ qproc->conn_box = args.args[0];
+ }
+
return 0;
}
@@ -1327,7 +1482,7 @@ static int q6v5_init_reset(struct q6v5 *qproc)
return PTR_ERR(qproc->mss_restart);
}
- if (qproc->has_alt_reset) {
+ if (qproc->has_alt_reset || qproc->has_halt_nav) {
qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev,
"pdc_reset");
if (IS_ERR(qproc->pdc_reset)) {
@@ -1426,6 +1581,7 @@ static int q6v5_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, qproc);
+ qproc->has_halt_nav = desc->has_halt_nav;
ret = q6v5_init_mem(qproc, pdev);
if (ret)
goto free_rproc;
@@ -1549,6 +1705,41 @@ static int q6v5_remove(struct platform_device *pdev)
return 0;
}
+static const struct rproc_hexagon_res sc7180_mss = {
+ .hexagon_mba_image = "mba.mbn",
+ .proxy_clk_names = (char*[]){
+ "xo",
+ NULL
+ },
+ .reset_clk_names = (char*[]){
+ "iface",
+ "bus",
+ "snoc_axi",
+ NULL
+ },
+ .active_clk_names = (char*[]){
+ "mnoc_axi",
+ "nav",
+ "mss_nav",
+ "mss_crypto",
+ NULL
+ },
+ .active_pd_names = (char*[]){
+ "load_state",
+ NULL
+ },
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mx",
+ "mss",
+ NULL
+ },
+ .need_mem_protection = true,
+ .has_alt_reset = false,
+ .has_halt_nav = true,
+ .version = MSS_SC7180,
+};
+
static const struct rproc_hexagon_res sdm845_mss = {
.hexagon_mba_image = "mba.mbn",
.proxy_clk_names = (char*[]){
@@ -1580,6 +1771,7 @@ static const struct rproc_hexagon_res sdm845_mss = {
},
.need_mem_protection = true,
.has_alt_reset = true,
+ .has_halt_nav = false,
.version = MSS_SDM845,
};
@@ -1594,7 +1786,6 @@ static const struct rproc_hexagon_res msm8998_mss = {
.active_clk_names = (char*[]){
"iface",
"bus",
- "mem",
"gpll0_mss",
"mnoc_axi",
"snoc_axi",
@@ -1607,6 +1798,7 @@ static const struct rproc_hexagon_res msm8998_mss = {
},
.need_mem_protection = true,
.has_alt_reset = false,
+ .has_halt_nav = false,
.version = MSS_MSM8998,
};
@@ -1636,6 +1828,7 @@ static const struct rproc_hexagon_res msm8996_mss = {
},
.need_mem_protection = true,
.has_alt_reset = false,
+ .has_halt_nav = false,
.version = MSS_MSM8996,
};
@@ -1668,6 +1861,7 @@ static const struct rproc_hexagon_res msm8916_mss = {
},
.need_mem_protection = false,
.has_alt_reset = false,
+ .has_halt_nav = false,
.version = MSS_MSM8916,
};
@@ -1708,6 +1902,7 @@ static const struct rproc_hexagon_res msm8974_mss = {
},
.need_mem_protection = false,
.has_alt_reset = false,
+ .has_halt_nav = false,
.version = MSS_MSM8974,
};
@@ -1717,6 +1912,7 @@ static const struct of_device_id q6v5_of_match[] = {
{ .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
{ .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
{ .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss},
+ { .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss},
{ .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss},
{ },
};
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index db4b3c4bacd7..edf9d0e1a235 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -15,6 +15,8 @@
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
#include <linux/qcom_scm.h>
#include <linux/regulator/consumer.h>
#include <linux/remoteproc.h>
@@ -31,6 +33,10 @@ struct adsp_data {
const char *firmware_name;
int pas_id;
bool has_aggre2_clk;
+ bool auto_boot;
+
+ char **active_pd_names;
+ char **proxy_pd_names;
const char *ssr_name;
const char *sysmon_name;
@@ -49,6 +55,12 @@ struct qcom_adsp {
struct regulator *cx_supply;
struct regulator *px_supply;
+ struct device *active_pds[1];
+ struct device *proxy_pds[3];
+
+ int active_pd_count;
+ int proxy_pd_count;
+
int pas_id;
int crash_reason_smem;
bool has_aggre2_clk;
@@ -67,6 +79,41 @@ struct qcom_adsp {
struct qcom_sysmon *sysmon;
};
+static int adsp_pds_enable(struct qcom_adsp *adsp, struct device **pds,
+ size_t pd_count)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < pd_count; i++) {
+ dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
+ ret = pm_runtime_get_sync(pds[i]);
+ if (ret < 0)
+ goto unroll_pd_votes;
+ }
+
+ return 0;
+
+unroll_pd_votes:
+ for (i--; i >= 0; i--) {
+ dev_pm_genpd_set_performance_state(pds[i], 0);
+ pm_runtime_put(pds[i]);
+ }
+
+ return ret;
+};
+
+static void adsp_pds_disable(struct qcom_adsp *adsp, struct device **pds,
+ size_t pd_count)
+{
+ int i;
+
+ for (i = 0; i < pd_count; i++) {
+ dev_pm_genpd_set_performance_state(pds[i], 0);
+ pm_runtime_put(pds[i]);
+ }
+}
+
static int adsp_load(struct rproc *rproc, const struct firmware *fw)
{
struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
@@ -84,9 +131,17 @@ static int adsp_start(struct rproc *rproc)
qcom_q6v5_prepare(&adsp->q6v5);
+ ret = adsp_pds_enable(adsp, adsp->active_pds, adsp->active_pd_count);
+ if (ret < 0)
+ goto disable_irqs;
+
+ ret = adsp_pds_enable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
+ if (ret < 0)
+ goto disable_active_pds;
+
ret = clk_prepare_enable(adsp->xo);
if (ret)
- return ret;
+ goto disable_proxy_pds;
ret = clk_prepare_enable(adsp->aggre2_clk);
if (ret)
@@ -124,6 +179,12 @@ disable_aggre2_clk:
clk_disable_unprepare(adsp->aggre2_clk);
disable_xo_clk:
clk_disable_unprepare(adsp->xo);
+disable_proxy_pds:
+ adsp_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
+disable_active_pds:
+ adsp_pds_disable(adsp, adsp->active_pds, adsp->active_pd_count);
+disable_irqs:
+ qcom_q6v5_unprepare(&adsp->q6v5);
return ret;
}
@@ -136,6 +197,7 @@ static void qcom_pas_handover(struct qcom_q6v5 *q6v5)
regulator_disable(adsp->cx_supply);
clk_disable_unprepare(adsp->aggre2_clk);
clk_disable_unprepare(adsp->xo);
+ adsp_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
}
static int adsp_stop(struct rproc *rproc)
@@ -152,6 +214,7 @@ static int adsp_stop(struct rproc *rproc)
if (ret)
dev_err(adsp->dev, "failed to shutdown: %d\n", ret);
+ adsp_pds_disable(adsp, adsp->active_pds, adsp->active_pd_count);
handover = qcom_q6v5_unprepare(&adsp->q6v5);
if (handover)
qcom_pas_handover(&adsp->q6v5);
@@ -217,6 +280,59 @@ static int adsp_init_regulator(struct qcom_adsp *adsp)
return PTR_ERR_OR_ZERO(adsp->px_supply);
}
+static int adsp_pds_attach(struct device *dev, struct device **devs,
+ char **pd_names)
+{
+ size_t num_pds = 0;
+ int ret;
+ int i;
+
+ if (!pd_names)
+ return 0;
+
+ /* Handle single power domain */
+ if (dev->pm_domain) {
+ devs[0] = dev;
+ pm_runtime_enable(dev);
+ return 1;
+ }
+
+ while (pd_names[num_pds])
+ num_pds++;
+
+ for (i = 0; i < num_pds; i++) {
+ devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
+ if (IS_ERR_OR_NULL(devs[i])) {
+ ret = PTR_ERR(devs[i]) ? : -ENODATA;
+ goto unroll_attach;
+ }
+ }
+
+ return num_pds;
+
+unroll_attach:
+ for (i--; i >= 0; i--)
+ dev_pm_domain_detach(devs[i], false);
+
+ return ret;
+};
+
+static void adsp_pds_detach(struct qcom_adsp *adsp, struct device **pds,
+ size_t pd_count)
+{
+ struct device *dev = adsp->dev;
+ int i;
+
+ /* Handle single power domain */
+ if (dev->pm_domain && pd_count) {
+ pm_runtime_disable(dev);
+ return;
+ }
+
+ for (i = 0; i < pd_count; i++)
+ dev_pm_domain_detach(pds[i], false);
+}
+
static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
{
struct device_node *node;
@@ -273,6 +389,8 @@ static int adsp_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ rproc->auto_boot = desc->auto_boot;
+
adsp = (struct qcom_adsp *)rproc->priv;
adsp->dev = &pdev->dev;
adsp->rproc = rproc;
@@ -292,10 +410,22 @@ static int adsp_probe(struct platform_device *pdev)
if (ret)
goto free_rproc;
+ ret = adsp_pds_attach(&pdev->dev, adsp->active_pds,
+ desc->active_pd_names);
+ if (ret < 0)
+ goto free_rproc;
+ adsp->active_pd_count = ret;
+
+ ret = adsp_pds_attach(&pdev->dev, adsp->proxy_pds,
+ desc->proxy_pd_names);
+ if (ret < 0)
+ goto detach_active_pds;
+ adsp->proxy_pd_count = ret;
+
ret = qcom_q6v5_init(&adsp->q6v5, pdev, rproc, desc->crash_reason_smem,
qcom_pas_handover);
if (ret)
- goto free_rproc;
+ goto detach_proxy_pds;
qcom_add_glink_subdev(rproc, &adsp->glink_subdev);
qcom_add_smd_subdev(rproc, &adsp->smd_subdev);
@@ -305,15 +435,19 @@ static int adsp_probe(struct platform_device *pdev)
desc->ssctl_id);
if (IS_ERR(adsp->sysmon)) {
ret = PTR_ERR(adsp->sysmon);
- goto free_rproc;
+ goto detach_proxy_pds;
}
ret = rproc_add(rproc);
if (ret)
- goto free_rproc;
+ goto detach_proxy_pds;
return 0;
+detach_proxy_pds:
+ adsp_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
+detach_active_pds:
+ adsp_pds_detach(adsp, adsp->active_pds, adsp->active_pd_count);
free_rproc:
rproc_free(rproc);
@@ -340,6 +474,41 @@ static const struct adsp_data adsp_resource_init = {
.firmware_name = "adsp.mdt",
.pas_id = 1,
.has_aggre2_clk = false,
+ .auto_boot = true,
+ .ssr_name = "lpass",
+ .sysmon_name = "adsp",
+ .ssctl_id = 0x14,
+};
+
+static const struct adsp_data sm8150_adsp_resource = {
+ .crash_reason_smem = 423,
+ .firmware_name = "adsp.mdt",
+ .pas_id = 1,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .active_pd_names = (char*[]){
+ "load_state",
+ NULL
+ },
+ .proxy_pd_names = (char*[]){
+ "cx",
+ NULL
+ },
+ .ssr_name = "lpass",
+ .sysmon_name = "adsp",
+ .ssctl_id = 0x14,
+};
+
+static const struct adsp_data msm8998_adsp_resource = {
+ .crash_reason_smem = 423,
+ .firmware_name = "adsp.mdt",
+ .pas_id = 1,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ NULL
+ },
.ssr_name = "lpass",
.sysmon_name = "adsp",
.ssctl_id = 0x14,
@@ -350,16 +519,92 @@ static const struct adsp_data cdsp_resource_init = {
.firmware_name = "cdsp.mdt",
.pas_id = 18,
.has_aggre2_clk = false,
+ .auto_boot = true,
+ .ssr_name = "cdsp",
+ .sysmon_name = "cdsp",
+ .ssctl_id = 0x17,
+};
+
+static const struct adsp_data sm8150_cdsp_resource = {
+ .crash_reason_smem = 601,
+ .firmware_name = "cdsp.mdt",
+ .pas_id = 18,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .active_pd_names = (char*[]){
+ "load_state",
+ NULL
+ },
+ .proxy_pd_names = (char*[]){
+ "cx",
+ NULL
+ },
.ssr_name = "cdsp",
.sysmon_name = "cdsp",
.ssctl_id = 0x17,
};
+static const struct adsp_data mpss_resource_init = {
+ .crash_reason_smem = 421,
+ .firmware_name = "modem.mdt",
+ .pas_id = 4,
+ .has_aggre2_clk = false,
+ .auto_boot = false,
+ .active_pd_names = (char*[]){
+ "load_state",
+ NULL
+ },
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mss",
+ NULL
+ },
+ .ssr_name = "mpss",
+ .sysmon_name = "modem",
+ .ssctl_id = 0x12,
+};
+
static const struct adsp_data slpi_resource_init = {
.crash_reason_smem = 424,
.firmware_name = "slpi.mdt",
.pas_id = 12,
.has_aggre2_clk = true,
+ .auto_boot = true,
+ .ssr_name = "dsps",
+ .sysmon_name = "slpi",
+ .ssctl_id = 0x16,
+};
+
+static const struct adsp_data sm8150_slpi_resource = {
+ .crash_reason_smem = 424,
+ .firmware_name = "slpi.mdt",
+ .pas_id = 12,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .active_pd_names = (char*[]){
+ "load_state",
+ NULL
+ },
+ .proxy_pd_names = (char*[]){
+ "lcx",
+ "lmx",
+ NULL
+ },
+ .ssr_name = "dsps",
+ .sysmon_name = "slpi",
+ .ssctl_id = 0x16,
+};
+
+static const struct adsp_data msm8998_slpi_resource = {
+ .crash_reason_smem = 424,
+ .firmware_name = "slpi.mdt",
+ .pas_id = 12,
+ .has_aggre2_clk = true,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "ssc_cx",
+ NULL
+ },
.ssr_name = "dsps",
.sysmon_name = "slpi",
.ssctl_id = 0x16,
@@ -369,6 +614,7 @@ static const struct adsp_data wcss_resource_init = {
.crash_reason_smem = 421,
.firmware_name = "wcnss.mdt",
.pas_id = 6,
+ .auto_boot = true,
.ssr_name = "mpss",
.sysmon_name = "wcnss",
.ssctl_id = 0x12,
@@ -378,11 +624,17 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init},
{ .compatible = "qcom,msm8996-adsp-pil", .data = &adsp_resource_init},
{ .compatible = "qcom,msm8996-slpi-pil", .data = &slpi_resource_init},
+ { .compatible = "qcom,msm8998-adsp-pas", .data = &msm8998_adsp_resource},
+ { .compatible = "qcom,msm8998-slpi-pas", .data = &msm8998_slpi_resource},
{ .compatible = "qcom,qcs404-adsp-pas", .data = &adsp_resource_init },
{ .compatible = "qcom,qcs404-cdsp-pas", .data = &cdsp_resource_init },
{ .compatible = "qcom,qcs404-wcss-pas", .data = &wcss_resource_init },
{ .compatible = "qcom,sdm845-adsp-pas", .data = &adsp_resource_init},
{ .compatible = "qcom,sdm845-cdsp-pas", .data = &cdsp_resource_init},
+ { .compatible = "qcom,sm8150-adsp-pas", .data = &sm8150_adsp_resource},
+ { .compatible = "qcom,sm8150-cdsp-pas", .data = &sm8150_cdsp_resource},
+ { .compatible = "qcom,sm8150-mpss-pas", .data = &mpss_resource_init},
+ { .compatible = "qcom,sm8150-slpi-pas", .data = &sm8150_slpi_resource},
{ },
};
MODULE_DEVICE_TABLE(of, adsp_of_match);
diff --git a/drivers/remoteproc/qcom_sysmon.c b/drivers/remoteproc/qcom_sysmon.c
index c231314eab66..faf3822d8791 100644
--- a/drivers/remoteproc/qcom_sysmon.c
+++ b/drivers/remoteproc/qcom_sysmon.c
@@ -394,7 +394,7 @@ static int ssctl_new_server(struct qmi_handle *qmi, struct qmi_service *svc)
break;
default:
return -EINVAL;
- };
+ }
sysmon->ssctl_version = svc->version;
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 307df98347ba..097f33e4f1f3 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -477,8 +477,8 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
char name[16];
/* make sure resource isn't truncated */
- if (sizeof(*rsc) + rsc->num_of_vrings * sizeof(struct fw_rsc_vdev_vring)
- + rsc->config_len > avail) {
+ if (struct_size(rsc, vring, rsc->num_of_vrings) + rsc->config_len >
+ avail) {
dev_err(dev, "vdev rsc is truncated\n");
return -EINVAL;
}
@@ -2223,7 +2223,7 @@ static int __init remoteproc_init(void)
return 0;
}
-module_init(remoteproc_init);
+subsys_initcall(remoteproc_init);
static void __exit remoteproc_exit(void)
{
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 3ad7817ce1f0..461b0e506a26 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -49,6 +49,13 @@ config RESET_BRCMSTB
This enables the reset controller driver for Broadcom STB SoCs using
a SUN_TOP_CTRL_SW_INIT style controller.
+config RESET_BRCMSTB_RESCAL
+ bool "Broadcom STB RESCAL reset controller"
+ default ARCH_BRCMSTB || COMPILE_TEST
+ help
+ This enables the RESCAL reset controller for SATA, PCIe0, or PCIe1 on
+ BCM7216.
+
config RESET_HSDK
bool "Synopsys HSDK Reset Driver"
depends on HAS_IOMEM
@@ -64,6 +71,15 @@ config RESET_IMX7
help
This enables the reset controller driver for i.MX7 SoCs.
+config RESET_INTEL_GW
+ bool "Intel Reset Controller Driver"
+ depends on OF
+ select REGMAP_MMIO
+ help
+ This enables the reset controller driver for Intel Gateway SoCs.
+ Say Y to control the reset signals provided by reset controller.
+ Otherwise, say N.
+
config RESET_LANTIQ
bool "Lantiq XWAY Reset Driver" if COMPILE_TEST
default SOC_TYPE_XWAY
@@ -89,6 +105,13 @@ config RESET_MESON_AUDIO_ARB
This enables the reset driver for Audio Memory Arbiter of
Amlogic's A113 based SoCs
+config RESET_NPCM
+ bool "NPCM BMC Reset Driver" if COMPILE_TEST
+ default ARCH_NPCM
+ help
+ This enables the reset controller driver for Nuvoton NPCM
+ BMC SoCs.
+
config RESET_OXNAS
bool
@@ -99,7 +122,7 @@ config RESET_PISTACHIO
This enables the reset driver for ImgTec Pistachio SoCs.
config RESET_QCOM_AOSS
- bool "Qcom AOSS Reset Driver"
+ tristate "Qcom AOSS Reset Driver"
depends on ARCH_QCOM || COMPILE_TEST
help
This enables the AOSS (always on subsystem) reset driver
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index cf60ce526064..249ed357c997 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -8,12 +8,15 @@ obj-$(CONFIG_RESET_ATH79) += reset-ath79.o
obj-$(CONFIG_RESET_AXS10X) += reset-axs10x.o
obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o
obj-$(CONFIG_RESET_BRCMSTB) += reset-brcmstb.o
+obj-$(CONFIG_RESET_BRCMSTB_RESCAL) += reset-brcmstb-rescal.o
obj-$(CONFIG_RESET_HSDK) += reset-hsdk.o
obj-$(CONFIG_RESET_IMX7) += reset-imx7.o
+obj-$(CONFIG_RESET_INTEL_GW) += reset-intel-gw.o
obj-$(CONFIG_RESET_LANTIQ) += reset-lantiq.o
obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o
obj-$(CONFIG_RESET_MESON) += reset-meson.o
obj-$(CONFIG_RESET_MESON_AUDIO_ARB) += reset-meson-audio-arb.o
+obj-$(CONFIG_RESET_NPCM) += reset-npcm.o
obj-$(CONFIG_RESET_OXNAS) += reset-oxnas.o
obj-$(CONFIG_RESET_PISTACHIO) += reset-pistachio.o
obj-$(CONFIG_RESET_QCOM_AOSS) += reset-qcom-aoss.o
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index ca1d49146f61..01c0c7aa835c 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -150,13 +150,14 @@ int devm_reset_controller_register(struct device *dev,
return -ENOMEM;
ret = reset_controller_register(rcdev);
- if (!ret) {
- *rcdevp = rcdev;
- devres_add(dev, rcdevp);
- } else {
+ if (ret) {
devres_free(rcdevp);
+ return ret;
}
+ *rcdevp = rcdev;
+ devres_add(dev, rcdevp);
+
return ret;
}
EXPORT_SYMBOL_GPL(devm_reset_controller_register);
@@ -787,13 +788,14 @@ struct reset_control *__devm_reset_control_get(struct device *dev,
return ERR_PTR(-ENOMEM);
rstc = __reset_control_get(dev, id, index, shared, optional, acquired);
- if (!IS_ERR(rstc)) {
- *ptr = rstc;
- devres_add(dev, ptr);
- } else {
+ if (IS_ERR_OR_NULL(rstc)) {
devres_free(ptr);
+ return rstc;
}
+ *ptr = rstc;
+ devres_add(dev, ptr);
+
return rstc;
}
EXPORT_SYMBOL_GPL(__devm_reset_control_get);
@@ -861,8 +863,7 @@ static int of_reset_control_get_count(struct device_node *node)
* @acquired: only one reset control may be acquired for a given controller
* and ID
*
- * Returns pointer to allocated reset_control_array on success or
- * error on failure
+ * Returns pointer to allocated reset_control on success or error on failure
*/
struct reset_control *
of_reset_control_array_get(struct device_node *np, bool shared, bool optional,
@@ -915,28 +916,26 @@ EXPORT_SYMBOL_GPL(of_reset_control_array_get);
* that just have to be asserted or deasserted, without any
* requirements on the order.
*
- * Returns pointer to allocated reset_control_array on success or
- * error on failure
+ * Returns pointer to allocated reset_control on success or error on failure
*/
struct reset_control *
devm_reset_control_array_get(struct device *dev, bool shared, bool optional)
{
- struct reset_control **devres;
- struct reset_control *rstc;
+ struct reset_control **ptr, *rstc;
- devres = devres_alloc(devm_reset_control_release, sizeof(*devres),
- GFP_KERNEL);
- if (!devres)
+ ptr = devres_alloc(devm_reset_control_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
return ERR_PTR(-ENOMEM);
rstc = of_reset_control_array_get(dev->of_node, shared, optional, true);
- if (IS_ERR(rstc)) {
- devres_free(devres);
+ if (IS_ERR_OR_NULL(rstc)) {
+ devres_free(ptr);
return rstc;
}
- *devres = rstc;
- devres_add(dev, devres);
+ *ptr = rstc;
+ devres_add(dev, ptr);
return rstc;
}
diff --git a/drivers/reset/reset-brcmstb-rescal.c b/drivers/reset/reset-brcmstb-rescal.c
new file mode 100644
index 000000000000..b6f074d6a65f
--- /dev/null
+++ b/drivers/reset/reset-brcmstb-rescal.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2020 Broadcom */
+
+#include <linux/device.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+
+#define BRCM_RESCAL_START 0x0
+#define BRCM_RESCAL_START_BIT BIT(0)
+#define BRCM_RESCAL_CTRL 0x4
+#define BRCM_RESCAL_STATUS 0x8
+#define BRCM_RESCAL_STATUS_BIT BIT(0)
+
+struct brcm_rescal_reset {
+ void __iomem *base;
+ struct device *dev;
+ struct reset_controller_dev rcdev;
+};
+
+static int brcm_rescal_reset_set(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct brcm_rescal_reset *data =
+ container_of(rcdev, struct brcm_rescal_reset, rcdev);
+ void __iomem *base = data->base;
+ u32 reg;
+ int ret;
+
+ reg = readl(base + BRCM_RESCAL_START);
+ writel(reg | BRCM_RESCAL_START_BIT, base + BRCM_RESCAL_START);
+ reg = readl(base + BRCM_RESCAL_START);
+ if (!(reg & BRCM_RESCAL_START_BIT)) {
+ dev_err(data->dev, "failed to start SATA/PCIe rescal\n");
+ return -EIO;
+ }
+
+ ret = readl_poll_timeout(base + BRCM_RESCAL_STATUS, reg,
+ !(reg & BRCM_RESCAL_STATUS_BIT), 100, 1000);
+ if (ret) {
+ dev_err(data->dev, "time out on SATA/PCIe rescal\n");
+ return ret;
+ }
+
+ reg = readl(base + BRCM_RESCAL_START);
+ writel(reg & ~BRCM_RESCAL_START_BIT, base + BRCM_RESCAL_START);
+
+ dev_dbg(data->dev, "SATA/PCIe rescal success\n");
+
+ return 0;
+}
+
+static int brcm_rescal_reset_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ /* This is needed if #reset-cells == 0. */
+ return 0;
+}
+
+static const struct reset_control_ops brcm_rescal_reset_ops = {
+ .reset = brcm_rescal_reset_set,
+};
+
+static int brcm_rescal_reset_probe(struct platform_device *pdev)
+{
+ struct brcm_rescal_reset *data;
+ struct resource *res;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->base))
+ return PTR_ERR(data->base);
+
+ data->rcdev.owner = THIS_MODULE;
+ data->rcdev.nr_resets = 1;
+ data->rcdev.ops = &brcm_rescal_reset_ops;
+ data->rcdev.of_node = pdev->dev.of_node;
+ data->rcdev.of_xlate = brcm_rescal_reset_xlate;
+ data->dev = &pdev->dev;
+
+ return devm_reset_controller_register(&pdev->dev, &data->rcdev);
+}
+
+static const struct of_device_id brcm_rescal_reset_of_match[] = {
+ { .compatible = "brcm,bcm7216-pcie-sata-rescal" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, brcm_rescal_reset_of_match);
+
+static struct platform_driver brcm_rescal_reset_driver = {
+ .probe = brcm_rescal_reset_probe,
+ .driver = {
+ .name = "brcm-rescal-reset",
+ .of_match_table = brcm_rescal_reset_of_match,
+ }
+};
+module_platform_driver(brcm_rescal_reset_driver);
+
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION("Broadcom SATA/PCIe rescal reset controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/reset/reset-brcmstb.c b/drivers/reset/reset-brcmstb.c
index a608f445dad6..f213264c8567 100644
--- a/drivers/reset/reset-brcmstb.c
+++ b/drivers/reset/reset-brcmstb.c
@@ -91,12 +91,6 @@ static int brcmstb_reset_probe(struct platform_device *pdev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!IS_ALIGNED(res->start, SW_INIT_BANK_SIZE) ||
- !IS_ALIGNED(resource_size(res), SW_INIT_BANK_SIZE)) {
- dev_err(kdev, "incorrect register range\n");
- return -EINVAL;
- }
-
priv->base = devm_ioremap_resource(kdev, res);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/reset/reset-intel-gw.c b/drivers/reset/reset-intel-gw.c
new file mode 100644
index 000000000000..854238444616
--- /dev/null
+++ b/drivers/reset/reset-intel-gw.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 Intel Corporation.
+ * Lei Chuanhua <Chuanhua.lei@intel.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#define RCU_RST_STAT 0x0024
+#define RCU_RST_REQ 0x0048
+
+#define REG_OFFSET GENMASK(31, 16)
+#define BIT_OFFSET GENMASK(15, 8)
+#define STAT_BIT_OFFSET GENMASK(7, 0)
+
+#define to_reset_data(x) container_of(x, struct intel_reset_data, rcdev)
+
+struct intel_reset_soc {
+ bool legacy;
+ u32 reset_cell_count;
+};
+
+struct intel_reset_data {
+ struct reset_controller_dev rcdev;
+ struct notifier_block restart_nb;
+ const struct intel_reset_soc *soc_data;
+ struct regmap *regmap;
+ struct device *dev;
+ u32 reboot_id;
+};
+
+static const struct regmap_config intel_rcu_regmap_config = {
+ .name = "intel-reset",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+};
+
+/*
+ * Reset status register offset relative to
+ * the reset control register(X) is X + 4
+ */
+static u32 id_to_reg_and_bit_offsets(struct intel_reset_data *data,
+ unsigned long id, u32 *rst_req,
+ u32 *req_bit, u32 *stat_bit)
+{
+ *rst_req = FIELD_GET(REG_OFFSET, id);
+ *req_bit = FIELD_GET(BIT_OFFSET, id);
+
+ if (data->soc_data->legacy)
+ *stat_bit = FIELD_GET(STAT_BIT_OFFSET, id);
+ else
+ *stat_bit = *req_bit;
+
+ if (data->soc_data->legacy && *rst_req == RCU_RST_REQ)
+ return RCU_RST_STAT;
+ else
+ return *rst_req + 0x4;
+}
+
+static int intel_set_clr_bits(struct intel_reset_data *data, unsigned long id,
+ bool set)
+{
+ u32 rst_req, req_bit, rst_stat, stat_bit, val;
+ int ret;
+
+ rst_stat = id_to_reg_and_bit_offsets(data, id, &rst_req,
+ &req_bit, &stat_bit);
+
+ val = set ? BIT(req_bit) : 0;
+ ret = regmap_update_bits(data->regmap, rst_req, BIT(req_bit), val);
+ if (ret)
+ return ret;
+
+ return regmap_read_poll_timeout(data->regmap, rst_stat, val,
+ set == !!(val & BIT(stat_bit)), 20,
+ 200);
+}
+
+static int intel_assert_device(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct intel_reset_data *data = to_reset_data(rcdev);
+ int ret;
+
+ ret = intel_set_clr_bits(data, id, true);
+ if (ret)
+ dev_err(data->dev, "Reset assert failed %d\n", ret);
+
+ return ret;
+}
+
+static int intel_deassert_device(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct intel_reset_data *data = to_reset_data(rcdev);
+ int ret;
+
+ ret = intel_set_clr_bits(data, id, false);
+ if (ret)
+ dev_err(data->dev, "Reset deassert failed %d\n", ret);
+
+ return ret;
+}
+
+static int intel_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct intel_reset_data *data = to_reset_data(rcdev);
+ u32 rst_req, req_bit, rst_stat, stat_bit, val;
+ int ret;
+
+ rst_stat = id_to_reg_and_bit_offsets(data, id, &rst_req,
+ &req_bit, &stat_bit);
+ ret = regmap_read(data->regmap, rst_stat, &val);
+ if (ret)
+ return ret;
+
+ return !!(val & BIT(stat_bit));
+}
+
+static const struct reset_control_ops intel_reset_ops = {
+ .assert = intel_assert_device,
+ .deassert = intel_deassert_device,
+ .status = intel_reset_status,
+};
+
+static int intel_reset_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *spec)
+{
+ struct intel_reset_data *data = to_reset_data(rcdev);
+ u32 id;
+
+ if (spec->args[1] > 31)
+ return -EINVAL;
+
+ id = FIELD_PREP(REG_OFFSET, spec->args[0]);
+ id |= FIELD_PREP(BIT_OFFSET, spec->args[1]);
+
+ if (data->soc_data->legacy) {
+ if (spec->args[2] > 31)
+ return -EINVAL;
+
+ id |= FIELD_PREP(STAT_BIT_OFFSET, spec->args[2]);
+ }
+
+ return id;
+}
+
+static int intel_reset_restart_handler(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct intel_reset_data *reset_data;
+
+ reset_data = container_of(nb, struct intel_reset_data, restart_nb);
+ intel_assert_device(&reset_data->rcdev, reset_data->reboot_id);
+
+ return NOTIFY_DONE;
+}
+
+static int intel_reset_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct intel_reset_data *data;
+ void __iomem *base;
+ u32 rb_id[3];
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->soc_data = of_device_get_match_data(dev);
+ if (!data->soc_data)
+ return -ENODEV;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ data->regmap = devm_regmap_init_mmio(dev, base,
+ &intel_rcu_regmap_config);
+ if (IS_ERR(data->regmap)) {
+ dev_err(dev, "regmap initialization failed\n");
+ return PTR_ERR(data->regmap);
+ }
+
+ ret = device_property_read_u32_array(dev, "intel,global-reset", rb_id,
+ data->soc_data->reset_cell_count);
+ if (ret) {
+ dev_err(dev, "Failed to get global reset offset!\n");
+ return ret;
+ }
+
+ data->dev = dev;
+ data->rcdev.of_node = np;
+ data->rcdev.owner = dev->driver->owner;
+ data->rcdev.ops = &intel_reset_ops;
+ data->rcdev.of_xlate = intel_reset_xlate;
+ data->rcdev.of_reset_n_cells = data->soc_data->reset_cell_count;
+ ret = devm_reset_controller_register(&pdev->dev, &data->rcdev);
+ if (ret)
+ return ret;
+
+ data->reboot_id = FIELD_PREP(REG_OFFSET, rb_id[0]);
+ data->reboot_id |= FIELD_PREP(BIT_OFFSET, rb_id[1]);
+
+ if (data->soc_data->legacy)
+ data->reboot_id |= FIELD_PREP(STAT_BIT_OFFSET, rb_id[2]);
+
+ data->restart_nb.notifier_call = intel_reset_restart_handler;
+ data->restart_nb.priority = 128;
+ register_restart_handler(&data->restart_nb);
+
+ return 0;
+}
+
+static const struct intel_reset_soc xrx200_data = {
+ .legacy = true,
+ .reset_cell_count = 3,
+};
+
+static const struct intel_reset_soc lgm_data = {
+ .legacy = false,
+ .reset_cell_count = 2,
+};
+
+static const struct of_device_id intel_reset_match[] = {
+ { .compatible = "intel,rcu-lgm", .data = &lgm_data },
+ { .compatible = "intel,rcu-xrx200", .data = &xrx200_data },
+ {}
+};
+
+static struct platform_driver intel_reset_driver = {
+ .probe = intel_reset_probe,
+ .driver = {
+ .name = "intel-reset",
+ .of_match_table = intel_reset_match,
+ },
+};
+
+static int __init intel_reset_init(void)
+{
+ return platform_driver_register(&intel_reset_driver);
+}
+
+/*
+ * RCU is system core entity which is in Always On Domain whose clocks
+ * or resource initialization happens in system core initialization.
+ * Also, it is required for most of the platform or architecture
+ * specific devices to perform reset operation as part of initialization.
+ * So perform RCU as post core initialization.
+ */
+postcore_initcall(intel_reset_init);
diff --git a/drivers/reset/reset-npcm.c b/drivers/reset/reset-npcm.c
new file mode 100644
index 000000000000..2ea4d3136e15
--- /dev/null
+++ b/drivers/reset/reset-npcm.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Nuvoton Technology corporation.
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/reset-controller.h>
+#include <linux/spinlock.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/of_address.h>
+
+/* NPCM7xx GCR registers */
+#define NPCM_MDLR_OFFSET 0x7C
+#define NPCM_MDLR_USBD0 BIT(9)
+#define NPCM_MDLR_USBD1 BIT(8)
+#define NPCM_MDLR_USBD2_4 BIT(21)
+#define NPCM_MDLR_USBD5_9 BIT(22)
+
+#define NPCM_USB1PHYCTL_OFFSET 0x140
+#define NPCM_USB2PHYCTL_OFFSET 0x144
+#define NPCM_USBXPHYCTL_RS BIT(28)
+
+/* NPCM7xx Reset registers */
+#define NPCM_SWRSTR 0x14
+#define NPCM_SWRST BIT(2)
+
+#define NPCM_IPSRST1 0x20
+#define NPCM_IPSRST1_USBD1 BIT(5)
+#define NPCM_IPSRST1_USBD2 BIT(8)
+#define NPCM_IPSRST1_USBD3 BIT(25)
+#define NPCM_IPSRST1_USBD4 BIT(22)
+#define NPCM_IPSRST1_USBD5 BIT(23)
+#define NPCM_IPSRST1_USBD6 BIT(24)
+
+#define NPCM_IPSRST2 0x24
+#define NPCM_IPSRST2_USB_HOST BIT(26)
+
+#define NPCM_IPSRST3 0x34
+#define NPCM_IPSRST3_USBD0 BIT(4)
+#define NPCM_IPSRST3_USBD7 BIT(5)
+#define NPCM_IPSRST3_USBD8 BIT(6)
+#define NPCM_IPSRST3_USBD9 BIT(7)
+#define NPCM_IPSRST3_USBPHY1 BIT(24)
+#define NPCM_IPSRST3_USBPHY2 BIT(25)
+
+#define NPCM_RC_RESETS_PER_REG 32
+#define NPCM_MASK_RESETS GENMASK(4, 0)
+
+struct npcm_rc_data {
+ struct reset_controller_dev rcdev;
+ struct notifier_block restart_nb;
+ u32 sw_reset_number;
+ void __iomem *base;
+ spinlock_t lock;
+};
+
+#define to_rc_data(p) container_of(p, struct npcm_rc_data, rcdev)
+
+static int npcm_rc_restart(struct notifier_block *nb, unsigned long mode,
+ void *cmd)
+{
+ struct npcm_rc_data *rc = container_of(nb, struct npcm_rc_data,
+ restart_nb);
+
+ writel(NPCM_SWRST << rc->sw_reset_number, rc->base + NPCM_SWRSTR);
+ mdelay(1000);
+
+ pr_emerg("%s: unable to restart system\n", __func__);
+
+ return NOTIFY_DONE;
+}
+
+static int npcm_rc_setclear_reset(struct reset_controller_dev *rcdev,
+ unsigned long id, bool set)
+{
+ struct npcm_rc_data *rc = to_rc_data(rcdev);
+ unsigned int rst_bit = BIT(id & NPCM_MASK_RESETS);
+ unsigned int ctrl_offset = id >> 8;
+ unsigned long flags;
+ u32 stat;
+
+ spin_lock_irqsave(&rc->lock, flags);
+ stat = readl(rc->base + ctrl_offset);
+ if (set)
+ writel(stat | rst_bit, rc->base + ctrl_offset);
+ else
+ writel(stat & ~rst_bit, rc->base + ctrl_offset);
+ spin_unlock_irqrestore(&rc->lock, flags);
+
+ return 0;
+}
+
+static int npcm_rc_assert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ return npcm_rc_setclear_reset(rcdev, id, true);
+}
+
+static int npcm_rc_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return npcm_rc_setclear_reset(rcdev, id, false);
+}
+
+static int npcm_rc_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct npcm_rc_data *rc = to_rc_data(rcdev);
+ unsigned int rst_bit = BIT(id & NPCM_MASK_RESETS);
+ unsigned int ctrl_offset = id >> 8;
+
+ return (readl(rc->base + ctrl_offset) & rst_bit);
+}
+
+static int npcm_reset_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ unsigned int offset, bit;
+
+ offset = reset_spec->args[0];
+ if (offset != NPCM_IPSRST1 && offset != NPCM_IPSRST2 &&
+ offset != NPCM_IPSRST3) {
+ dev_err(rcdev->dev, "Error reset register (0x%x)\n", offset);
+ return -EINVAL;
+ }
+ bit = reset_spec->args[1];
+ if (bit >= NPCM_RC_RESETS_PER_REG) {
+ dev_err(rcdev->dev, "Error reset number (%d)\n", bit);
+ return -EINVAL;
+ }
+
+ return (offset << 8) | bit;
+}
+
+static const struct of_device_id npcm_rc_match[] = {
+ { .compatible = "nuvoton,npcm750-reset",
+ .data = (void *)"nuvoton,npcm750-gcr" },
+ { }
+};
+
+/*
+ * The following procedure should be observed in USB PHY, USB device and
+ * USB host initialization at BMC boot
+ */
+static int npcm_usb_reset(struct platform_device *pdev, struct npcm_rc_data *rc)
+{
+ u32 mdlr, iprst1, iprst2, iprst3;
+ struct device *dev = &pdev->dev;
+ struct regmap *gcr_regmap;
+ u32 ipsrst1_bits = 0;
+ u32 ipsrst2_bits = NPCM_IPSRST2_USB_HOST;
+ u32 ipsrst3_bits = 0;
+ const char *gcr_dt;
+
+ gcr_dt = (const char *)
+ of_match_device(dev->driver->of_match_table, dev)->data;
+
+ gcr_regmap = syscon_regmap_lookup_by_compatible(gcr_dt);
+ if (IS_ERR(gcr_regmap)) {
+ dev_err(&pdev->dev, "Failed to find %s\n", gcr_dt);
+ return PTR_ERR(gcr_regmap);
+ }
+
+ /* checking which USB device is enabled */
+ regmap_read(gcr_regmap, NPCM_MDLR_OFFSET, &mdlr);
+ if (!(mdlr & NPCM_MDLR_USBD0))
+ ipsrst3_bits |= NPCM_IPSRST3_USBD0;
+ if (!(mdlr & NPCM_MDLR_USBD1))
+ ipsrst1_bits |= NPCM_IPSRST1_USBD1;
+ if (!(mdlr & NPCM_MDLR_USBD2_4))
+ ipsrst1_bits |= (NPCM_IPSRST1_USBD2 |
+ NPCM_IPSRST1_USBD3 |
+ NPCM_IPSRST1_USBD4);
+ if (!(mdlr & NPCM_MDLR_USBD0)) {
+ ipsrst1_bits |= (NPCM_IPSRST1_USBD5 |
+ NPCM_IPSRST1_USBD6);
+ ipsrst3_bits |= (NPCM_IPSRST3_USBD7 |
+ NPCM_IPSRST3_USBD8 |
+ NPCM_IPSRST3_USBD9);
+ }
+
+ /* assert reset USB PHY and USB devices */
+ iprst1 = readl(rc->base + NPCM_IPSRST1);
+ iprst2 = readl(rc->base + NPCM_IPSRST2);
+ iprst3 = readl(rc->base + NPCM_IPSRST3);
+
+ iprst1 |= ipsrst1_bits;
+ iprst2 |= ipsrst2_bits;
+ iprst3 |= (ipsrst3_bits | NPCM_IPSRST3_USBPHY1 |
+ NPCM_IPSRST3_USBPHY2);
+
+ writel(iprst1, rc->base + NPCM_IPSRST1);
+ writel(iprst2, rc->base + NPCM_IPSRST2);
+ writel(iprst3, rc->base + NPCM_IPSRST3);
+
+ /* clear USB PHY RS bit */
+ regmap_update_bits(gcr_regmap, NPCM_USB1PHYCTL_OFFSET,
+ NPCM_USBXPHYCTL_RS, 0);
+ regmap_update_bits(gcr_regmap, NPCM_USB2PHYCTL_OFFSET,
+ NPCM_USBXPHYCTL_RS, 0);
+
+ /* deassert reset USB PHY */
+ iprst3 &= ~(NPCM_IPSRST3_USBPHY1 | NPCM_IPSRST3_USBPHY2);
+ writel(iprst3, rc->base + NPCM_IPSRST3);
+
+ udelay(50);
+
+ /* set USB PHY RS bit */
+ regmap_update_bits(gcr_regmap, NPCM_USB1PHYCTL_OFFSET,
+ NPCM_USBXPHYCTL_RS, NPCM_USBXPHYCTL_RS);
+ regmap_update_bits(gcr_regmap, NPCM_USB2PHYCTL_OFFSET,
+ NPCM_USBXPHYCTL_RS, NPCM_USBXPHYCTL_RS);
+
+ /* deassert reset USB devices*/
+ iprst1 &= ~ipsrst1_bits;
+ iprst2 &= ~ipsrst2_bits;
+ iprst3 &= ~ipsrst3_bits;
+
+ writel(iprst1, rc->base + NPCM_IPSRST1);
+ writel(iprst2, rc->base + NPCM_IPSRST2);
+ writel(iprst3, rc->base + NPCM_IPSRST3);
+
+ return 0;
+}
+
+static const struct reset_control_ops npcm_rc_ops = {
+ .assert = npcm_rc_assert,
+ .deassert = npcm_rc_deassert,
+ .status = npcm_rc_status,
+};
+
+static int npcm_rc_probe(struct platform_device *pdev)
+{
+ struct npcm_rc_data *rc;
+ int ret;
+
+ rc = devm_kzalloc(&pdev->dev, sizeof(*rc), GFP_KERNEL);
+ if (!rc)
+ return -ENOMEM;
+
+ rc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rc->base))
+ return PTR_ERR(rc->base);
+
+ spin_lock_init(&rc->lock);
+
+ rc->rcdev.owner = THIS_MODULE;
+ rc->rcdev.ops = &npcm_rc_ops;
+ rc->rcdev.of_node = pdev->dev.of_node;
+ rc->rcdev.of_reset_n_cells = 2;
+ rc->rcdev.of_xlate = npcm_reset_xlate;
+
+ platform_set_drvdata(pdev, rc);
+
+ ret = devm_reset_controller_register(&pdev->dev, &rc->rcdev);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to register device\n");
+ return ret;
+ }
+
+ if (npcm_usb_reset(pdev, rc))
+ dev_warn(&pdev->dev, "NPCM USB reset failed, can cause issues with UDC and USB host\n");
+
+ if (!of_property_read_u32(pdev->dev.of_node, "nuvoton,sw-reset-number",
+ &rc->sw_reset_number)) {
+ if (rc->sw_reset_number && rc->sw_reset_number < 5) {
+ rc->restart_nb.priority = 192,
+ rc->restart_nb.notifier_call = npcm_rc_restart,
+ ret = register_restart_handler(&rc->restart_nb);
+ if (ret)
+ dev_warn(&pdev->dev, "failed to register restart handler\n");
+ }
+ }
+
+ return ret;
+}
+
+static struct platform_driver npcm_rc_driver = {
+ .probe = npcm_rc_probe,
+ .driver = {
+ .name = "npcm-reset",
+ .of_match_table = npcm_rc_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(npcm_rc_driver);
diff --git a/drivers/reset/reset-qcom-aoss.c b/drivers/reset/reset-qcom-aoss.c
index 36db96750450..9333b923dda0 100644
--- a/drivers/reset/reset-qcom-aoss.c
+++ b/drivers/reset/reset-qcom-aoss.c
@@ -118,6 +118,7 @@ static const struct of_device_id qcom_aoss_reset_of_match[] = {
{ .compatible = "qcom,sdm845-aoss-cc", .data = &sdm845_aoss_desc },
{}
};
+MODULE_DEVICE_TABLE(of, qcom_aoss_reset_of_match);
static struct platform_driver qcom_aoss_reset_driver = {
.probe = qcom_aoss_reset_probe,
@@ -127,7 +128,7 @@ static struct platform_driver qcom_aoss_reset_driver = {
},
};
-builtin_platform_driver(qcom_aoss_reset_driver);
+module_platform_driver(qcom_aoss_reset_driver);
MODULE_DESCRIPTION("Qualcomm AOSS Reset Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/reset/reset-scmi.c b/drivers/reset/reset-scmi.c
index b46df80ec6c3..8d3a858e3b19 100644
--- a/drivers/reset/reset-scmi.c
+++ b/drivers/reset/reset-scmi.c
@@ -108,7 +108,7 @@ static int scmi_reset_probe(struct scmi_device *sdev)
}
static const struct scmi_device_id scmi_id_table[] = {
- { SCMI_PROTOCOL_RESET },
+ { SCMI_PROTOCOL_RESET, "reset" },
{ },
};
MODULE_DEVICE_TABLE(scmi, scmi_id_table);
diff --git a/drivers/reset/reset-uniphier.c b/drivers/reset/reset-uniphier.c
index 74e589f5dd6a..279e535bf5d8 100644
--- a/drivers/reset/reset-uniphier.c
+++ b/drivers/reset/reset-uniphier.c
@@ -193,8 +193,8 @@ static const struct uniphier_reset_data uniphier_pro5_sd_reset_data[] = {
#define UNIPHIER_PERI_RESET_FI2C(id, ch) \
UNIPHIER_RESETX((id), 0x114, 24 + (ch))
-#define UNIPHIER_PERI_RESET_SCSSI(id) \
- UNIPHIER_RESETX((id), 0x110, 17)
+#define UNIPHIER_PERI_RESET_SCSSI(id, ch) \
+ UNIPHIER_RESETX((id), 0x110, 17 + (ch))
#define UNIPHIER_PERI_RESET_MCSSI(id) \
UNIPHIER_RESETX((id), 0x114, 14)
@@ -209,7 +209,7 @@ static const struct uniphier_reset_data uniphier_ld4_peri_reset_data[] = {
UNIPHIER_PERI_RESET_I2C(6, 2),
UNIPHIER_PERI_RESET_I2C(7, 3),
UNIPHIER_PERI_RESET_I2C(8, 4),
- UNIPHIER_PERI_RESET_SCSSI(11),
+ UNIPHIER_PERI_RESET_SCSSI(11, 0),
UNIPHIER_RESET_END,
};
@@ -225,8 +225,11 @@ static const struct uniphier_reset_data uniphier_pro4_peri_reset_data[] = {
UNIPHIER_PERI_RESET_FI2C(8, 4),
UNIPHIER_PERI_RESET_FI2C(9, 5),
UNIPHIER_PERI_RESET_FI2C(10, 6),
- UNIPHIER_PERI_RESET_SCSSI(11),
- UNIPHIER_PERI_RESET_MCSSI(12),
+ UNIPHIER_PERI_RESET_SCSSI(11, 0),
+ UNIPHIER_PERI_RESET_SCSSI(12, 1),
+ UNIPHIER_PERI_RESET_SCSSI(13, 2),
+ UNIPHIER_PERI_RESET_SCSSI(14, 3),
+ UNIPHIER_PERI_RESET_MCSSI(15),
UNIPHIER_RESET_END,
};
diff --git a/drivers/rpmsg/Kconfig b/drivers/rpmsg/Kconfig
index 709276540ef1..a9108ff563dc 100644
--- a/drivers/rpmsg/Kconfig
+++ b/drivers/rpmsg/Kconfig
@@ -15,6 +15,15 @@ config RPMSG_CHAR
in /dev. They make it possible for user-space programs to send and
receive rpmsg packets.
+config RPMSG_MTK_SCP
+ tristate "MediaTek SCP"
+ depends on MTK_SCP
+ select RPMSG
+ help
+ Say y here to enable support providing communication channels to
+ remote processors in MediaTek platforms.
+ This use IPI and IPC to communicate with remote processors.
+
config RPMSG_QCOM_GLINK_NATIVE
tristate
select RPMSG
diff --git a/drivers/rpmsg/Makefile b/drivers/rpmsg/Makefile
index 9aa859502d27..ae92a7fb08f6 100644
--- a/drivers/rpmsg/Makefile
+++ b/drivers/rpmsg/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_RPMSG) += rpmsg_core.o
obj-$(CONFIG_RPMSG_CHAR) += rpmsg_char.o
+obj-$(CONFIG_RPMSG_MTK_SCP) += mtk_rpmsg.o
obj-$(CONFIG_RPMSG_QCOM_GLINK_RPM) += qcom_glink_rpm.o
obj-$(CONFIG_RPMSG_QCOM_GLINK_NATIVE) += qcom_glink_native.o
obj-$(CONFIG_RPMSG_QCOM_GLINK_SMEM) += qcom_glink_smem.o
diff --git a/drivers/rpmsg/mtk_rpmsg.c b/drivers/rpmsg/mtk_rpmsg.c
new file mode 100644
index 000000000000..232aa4e40133
--- /dev/null
+++ b/drivers/rpmsg/mtk_rpmsg.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright 2019 Google LLC.
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/rpmsg/mtk_rpmsg.h>
+#include <linux/workqueue.h>
+
+#include "rpmsg_internal.h"
+
+struct mtk_rpmsg_rproc_subdev {
+ struct platform_device *pdev;
+ struct mtk_rpmsg_info *info;
+ struct rpmsg_endpoint *ns_ept;
+ struct rproc_subdev subdev;
+
+ struct work_struct register_work;
+ struct list_head channels;
+ struct mutex channels_lock;
+};
+
+#define to_mtk_subdev(d) container_of(d, struct mtk_rpmsg_rproc_subdev, subdev)
+
+struct mtk_rpmsg_channel_info {
+ struct rpmsg_channel_info info;
+ bool registered;
+ struct list_head list;
+};
+
+/**
+ * struct rpmsg_ns_msg - dynamic name service announcement message
+ * @name: name of remote service that is published
+ * @addr: address of remote service that is published
+ *
+ * This message is sent across to publish a new service. When we receive these
+ * messages, an appropriate rpmsg channel (i.e device) is created. In turn, the
+ * ->probe() handler of the appropriate rpmsg driver will be invoked
+ * (if/as-soon-as one is registered).
+ */
+struct rpmsg_ns_msg {
+ char name[RPMSG_NAME_SIZE];
+ u32 addr;
+} __packed;
+
+struct mtk_rpmsg_device {
+ struct rpmsg_device rpdev;
+ struct mtk_rpmsg_rproc_subdev *mtk_subdev;
+};
+
+struct mtk_rpmsg_endpoint {
+ struct rpmsg_endpoint ept;
+ struct mtk_rpmsg_rproc_subdev *mtk_subdev;
+};
+
+#define to_mtk_rpmsg_device(r) container_of(r, struct mtk_rpmsg_device, rpdev)
+#define to_mtk_rpmsg_endpoint(r) container_of(r, struct mtk_rpmsg_endpoint, ept)
+
+static const struct rpmsg_endpoint_ops mtk_rpmsg_endpoint_ops;
+
+static void __mtk_ept_release(struct kref *kref)
+{
+ struct rpmsg_endpoint *ept = container_of(kref, struct rpmsg_endpoint,
+ refcount);
+ kfree(to_mtk_rpmsg_endpoint(ept));
+}
+
+static void mtk_rpmsg_ipi_handler(void *data, unsigned int len, void *priv)
+{
+ struct mtk_rpmsg_endpoint *mept = priv;
+ struct rpmsg_endpoint *ept = &mept->ept;
+ int ret;
+
+ ret = (*ept->cb)(ept->rpdev, data, len, ept->priv, ept->addr);
+ if (ret)
+ dev_warn(&ept->rpdev->dev, "rpmsg handler return error = %d",
+ ret);
+}
+
+static struct rpmsg_endpoint *
+__mtk_create_ept(struct mtk_rpmsg_rproc_subdev *mtk_subdev,
+ struct rpmsg_device *rpdev, rpmsg_rx_cb_t cb, void *priv,
+ u32 id)
+{
+ struct mtk_rpmsg_endpoint *mept;
+ struct rpmsg_endpoint *ept;
+ struct platform_device *pdev = mtk_subdev->pdev;
+ int ret;
+
+ mept = kzalloc(sizeof(*mept), GFP_KERNEL);
+ if (!mept)
+ return NULL;
+ mept->mtk_subdev = mtk_subdev;
+
+ ept = &mept->ept;
+ kref_init(&ept->refcount);
+
+ ept->rpdev = rpdev;
+ ept->cb = cb;
+ ept->priv = priv;
+ ept->ops = &mtk_rpmsg_endpoint_ops;
+ ept->addr = id;
+
+ ret = mtk_subdev->info->register_ipi(pdev, id, mtk_rpmsg_ipi_handler,
+ mept);
+ if (ret) {
+ dev_err(&pdev->dev, "IPI register failed, id = %d", id);
+ kref_put(&ept->refcount, __mtk_ept_release);
+ return NULL;
+ }
+
+ return ept;
+}
+
+static struct rpmsg_endpoint *
+mtk_rpmsg_create_ept(struct rpmsg_device *rpdev, rpmsg_rx_cb_t cb, void *priv,
+ struct rpmsg_channel_info chinfo)
+{
+ struct mtk_rpmsg_rproc_subdev *mtk_subdev =
+ to_mtk_rpmsg_device(rpdev)->mtk_subdev;
+
+ return __mtk_create_ept(mtk_subdev, rpdev, cb, priv, chinfo.src);
+}
+
+static void mtk_rpmsg_destroy_ept(struct rpmsg_endpoint *ept)
+{
+ struct mtk_rpmsg_rproc_subdev *mtk_subdev =
+ to_mtk_rpmsg_endpoint(ept)->mtk_subdev;
+
+ mtk_subdev->info->unregister_ipi(mtk_subdev->pdev, ept->addr);
+ kref_put(&ept->refcount, __mtk_ept_release);
+}
+
+static int mtk_rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len)
+{
+ struct mtk_rpmsg_rproc_subdev *mtk_subdev =
+ to_mtk_rpmsg_endpoint(ept)->mtk_subdev;
+
+ return mtk_subdev->info->send_ipi(mtk_subdev->pdev, ept->addr, data,
+ len, 0);
+}
+
+static int mtk_rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len)
+{
+ struct mtk_rpmsg_rproc_subdev *mtk_subdev =
+ to_mtk_rpmsg_endpoint(ept)->mtk_subdev;
+
+ /*
+ * TODO: This currently is same as mtk_rpmsg_send, and wait until SCP
+ * received the last command.
+ */
+ return mtk_subdev->info->send_ipi(mtk_subdev->pdev, ept->addr, data,
+ len, 0);
+}
+
+static const struct rpmsg_endpoint_ops mtk_rpmsg_endpoint_ops = {
+ .destroy_ept = mtk_rpmsg_destroy_ept,
+ .send = mtk_rpmsg_send,
+ .trysend = mtk_rpmsg_trysend,
+};
+
+static void mtk_rpmsg_release_device(struct device *dev)
+{
+ struct rpmsg_device *rpdev = to_rpmsg_device(dev);
+ struct mtk_rpmsg_device *mdev = to_mtk_rpmsg_device(rpdev);
+
+ kfree(mdev);
+}
+
+static const struct rpmsg_device_ops mtk_rpmsg_device_ops = {
+ .create_ept = mtk_rpmsg_create_ept,
+};
+
+static struct device_node *
+mtk_rpmsg_match_device_subnode(struct device_node *node, const char *channel)
+{
+ struct device_node *child;
+ const char *name;
+ int ret;
+
+ for_each_available_child_of_node(node, child) {
+ ret = of_property_read_string(child, "mtk,rpmsg-name", &name);
+ if (ret)
+ continue;
+
+ if (strcmp(name, channel) == 0)
+ return child;
+ }
+
+ return NULL;
+}
+
+static int mtk_rpmsg_register_device(struct mtk_rpmsg_rproc_subdev *mtk_subdev,
+ struct rpmsg_channel_info *info)
+{
+ struct rpmsg_device *rpdev;
+ struct mtk_rpmsg_device *mdev;
+ struct platform_device *pdev = mtk_subdev->pdev;
+ int ret;
+
+ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+ if (!mdev)
+ return -ENOMEM;
+
+ mdev->mtk_subdev = mtk_subdev;
+
+ rpdev = &mdev->rpdev;
+ rpdev->ops = &mtk_rpmsg_device_ops;
+ rpdev->src = info->src;
+ rpdev->dst = info->dst;
+ strscpy(rpdev->id.name, info->name, RPMSG_NAME_SIZE);
+
+ rpdev->dev.of_node =
+ mtk_rpmsg_match_device_subnode(pdev->dev.of_node, info->name);
+ rpdev->dev.parent = &pdev->dev;
+ rpdev->dev.release = mtk_rpmsg_release_device;
+
+ ret = rpmsg_register_device(rpdev);
+ if (ret) {
+ kfree(mdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mtk_register_device_work_function(struct work_struct *register_work)
+{
+ struct mtk_rpmsg_rproc_subdev *subdev = container_of(
+ register_work, struct mtk_rpmsg_rproc_subdev, register_work);
+ struct platform_device *pdev = subdev->pdev;
+ struct mtk_rpmsg_channel_info *info;
+ int ret;
+
+ mutex_lock(&subdev->channels_lock);
+ list_for_each_entry(info, &subdev->channels, list) {
+ if (info->registered)
+ continue;
+
+ ret = mtk_rpmsg_register_device(subdev, &info->info);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't create rpmsg_device\n");
+ continue;
+ }
+
+ info->registered = true;
+ }
+ mutex_unlock(&subdev->channels_lock);
+}
+
+static int mtk_rpmsg_create_device(struct mtk_rpmsg_rproc_subdev *mtk_subdev,
+ char *name, u32 addr)
+{
+ struct mtk_rpmsg_channel_info *info;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ strscpy(info->info.name, name, RPMSG_NAME_SIZE);
+ info->info.src = addr;
+ info->info.dst = RPMSG_ADDR_ANY;
+ mutex_lock(&mtk_subdev->channels_lock);
+ list_add(&info->list, &mtk_subdev->channels);
+ mutex_unlock(&mtk_subdev->channels_lock);
+
+ schedule_work(&mtk_subdev->register_work);
+ return 0;
+}
+
+static int mtk_rpmsg_ns_cb(struct rpmsg_device *rpdev, void *data, int len,
+ void *priv, u32 src)
+{
+ struct rpmsg_ns_msg *msg = data;
+ struct mtk_rpmsg_rproc_subdev *mtk_subdev = priv;
+ struct device *dev = &mtk_subdev->pdev->dev;
+
+ int ret;
+
+ if (len != sizeof(*msg)) {
+ dev_err(dev, "malformed ns msg (%d)\n", len);
+ return -EINVAL;
+ }
+
+ /*
+ * the name service ept does _not_ belong to a real rpmsg channel,
+ * and is handled by the rpmsg bus itself.
+ * for sanity reasons, make sure a valid rpdev has _not_ sneaked
+ * in somehow.
+ */
+ if (rpdev) {
+ dev_err(dev, "anomaly: ns ept has an rpdev handle\n");
+ return -EINVAL;
+ }
+
+ /* don't trust the remote processor for null terminating the name */
+ msg->name[RPMSG_NAME_SIZE - 1] = '\0';
+
+ dev_info(dev, "creating channel %s addr 0x%x\n", msg->name, msg->addr);
+
+ ret = mtk_rpmsg_create_device(mtk_subdev, msg->name, msg->addr);
+ if (ret) {
+ dev_err(dev, "create rpmsg device failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mtk_rpmsg_prepare(struct rproc_subdev *subdev)
+{
+ struct mtk_rpmsg_rproc_subdev *mtk_subdev = to_mtk_subdev(subdev);
+
+ /* a dedicated endpoint handles the name service msgs */
+ if (mtk_subdev->info->ns_ipi_id >= 0) {
+ mtk_subdev->ns_ept =
+ __mtk_create_ept(mtk_subdev, NULL, mtk_rpmsg_ns_cb,
+ mtk_subdev,
+ mtk_subdev->info->ns_ipi_id);
+ if (!mtk_subdev->ns_ept) {
+ dev_err(&mtk_subdev->pdev->dev,
+ "failed to create name service endpoint\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static void mtk_rpmsg_unprepare(struct rproc_subdev *subdev)
+{
+ struct mtk_rpmsg_rproc_subdev *mtk_subdev = to_mtk_subdev(subdev);
+
+ if (mtk_subdev->ns_ept) {
+ mtk_rpmsg_destroy_ept(mtk_subdev->ns_ept);
+ mtk_subdev->ns_ept = NULL;
+ }
+}
+
+static void mtk_rpmsg_stop(struct rproc_subdev *subdev, bool crashed)
+{
+ struct mtk_rpmsg_channel_info *info, *next;
+ struct mtk_rpmsg_rproc_subdev *mtk_subdev = to_mtk_subdev(subdev);
+ struct device *dev = &mtk_subdev->pdev->dev;
+
+ /*
+ * Destroy the name service endpoint here, to avoid new channel being
+ * created after the rpmsg_unregister_device loop below.
+ */
+ if (mtk_subdev->ns_ept) {
+ mtk_rpmsg_destroy_ept(mtk_subdev->ns_ept);
+ mtk_subdev->ns_ept = NULL;
+ }
+
+ cancel_work_sync(&mtk_subdev->register_work);
+
+ mutex_lock(&mtk_subdev->channels_lock);
+ list_for_each_entry(info, &mtk_subdev->channels, list) {
+ if (!info->registered)
+ continue;
+ if (rpmsg_unregister_device(dev, &info->info)) {
+ dev_warn(
+ dev,
+ "rpmsg_unregister_device failed for %s.%d.%d\n",
+ info->info.name, info->info.src,
+ info->info.dst);
+ }
+ }
+
+ list_for_each_entry_safe(info, next,
+ &mtk_subdev->channels, list) {
+ list_del(&info->list);
+ kfree(info);
+ }
+ mutex_unlock(&mtk_subdev->channels_lock);
+}
+
+struct rproc_subdev *
+mtk_rpmsg_create_rproc_subdev(struct platform_device *pdev,
+ struct mtk_rpmsg_info *info)
+{
+ struct mtk_rpmsg_rproc_subdev *mtk_subdev;
+
+ mtk_subdev = kzalloc(sizeof(*mtk_subdev), GFP_KERNEL);
+ if (!mtk_subdev)
+ return NULL;
+
+ mtk_subdev->pdev = pdev;
+ mtk_subdev->subdev.prepare = mtk_rpmsg_prepare;
+ mtk_subdev->subdev.stop = mtk_rpmsg_stop;
+ mtk_subdev->subdev.unprepare = mtk_rpmsg_unprepare;
+ mtk_subdev->info = info;
+ INIT_LIST_HEAD(&mtk_subdev->channels);
+ INIT_WORK(&mtk_subdev->register_work,
+ mtk_register_device_work_function);
+ mutex_init(&mtk_subdev->channels_lock);
+
+ return &mtk_subdev->subdev;
+}
+EXPORT_SYMBOL_GPL(mtk_rpmsg_create_rproc_subdev);
+
+void mtk_rpmsg_destroy_rproc_subdev(struct rproc_subdev *subdev)
+{
+ struct mtk_rpmsg_rproc_subdev *mtk_subdev = to_mtk_subdev(subdev);
+
+ kfree(mtk_subdev);
+}
+EXPORT_SYMBOL_GPL(mtk_rpmsg_destroy_rproc_subdev);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MediaTek scp rpmsg driver");
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index d77515d8382c..34c8b6c7e095 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -240,6 +240,7 @@ config RTC_DRV_AS3722
config RTC_DRV_DS1307
tristate "Dallas/Maxim DS1307/37/38/39/40/41, ST M41T00, EPSON RX-8025, ISL12057"
+ select REGMAP_I2C
help
If you say yes here you get support for various compatible RTC
chips (often with battery backup) connected with I2C. This driver
@@ -498,12 +499,13 @@ config RTC_DRV_M41T80_WDT
help
If you say Y here you will get support for the
watchdog timer in the ST M41T60 and M41T80 RTC chips series.
+
config RTC_DRV_BD70528
tristate "ROHM BD70528 PMIC RTC"
depends on MFD_ROHM_BD70528 && (BD70528_WATCHDOG || !BD70528_WATCHDOG)
help
If you say Y here you will get support for the RTC
- on ROHM BD70528 Power Management IC.
+ block on ROHM BD70528 and BD71828 Power Management IC.
This driver can also be built as a module. If so, the module
will be called rtc-bd70528.
@@ -621,6 +623,7 @@ config RTC_DRV_RX8010
config RTC_DRV_RX8581
tristate "Epson RX-8571/RX-8581"
+ select REGMAP_I2C
help
If you say yes here you will get support for the Epson RX-8571/
RX-8581.
@@ -648,6 +651,7 @@ config RTC_DRV_EM3027
config RTC_DRV_RV3028
tristate "Micro Crystal RV3028"
+ select REGMAP_I2C
help
If you say yes here you get support for the Micro Crystal
RV3028.
@@ -676,13 +680,14 @@ config RTC_DRV_S5M
will be called rtc-s5m.
config RTC_DRV_SD3078
- tristate "ZXW Shenzhen whwave SD3078"
- help
- If you say yes here you get support for the ZXW Shenzhen whwave
- SD3078 RTC chips.
+ tristate "ZXW Shenzhen whwave SD3078"
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for the ZXW Shenzhen whwave
+ SD3078 RTC chips.
- This driver can also be built as a module. If so, the module
- will be called rtc-sd3078
+ This driver can also be built as a module. If so, the module
+ will be called rtc-sd3078
endif # I2C
@@ -848,14 +853,14 @@ config RTC_I2C_AND_SPI
default m if I2C=m
default y if I2C=y
default y if SPI_MASTER=y
- select REGMAP_I2C if I2C
- select REGMAP_SPI if SPI_MASTER
comment "SPI and I2C RTC drivers"
config RTC_DRV_DS3232
tristate "Dallas/Maxim DS3232/DS3234"
depends on RTC_I2C_AND_SPI
+ select REGMAP_I2C if I2C
+ select REGMAP_SPI if SPI_MASTER
help
If you say yes here you get support for Dallas Semiconductor
DS3232 and DS3234 real-time clock chips. If an interrupt is associated
@@ -875,6 +880,8 @@ config RTC_DRV_DS3232_HWMON
config RTC_DRV_PCF2127
tristate "NXP PCF2127"
depends on RTC_I2C_AND_SPI
+ select REGMAP_I2C if I2C
+ select REGMAP_SPI if SPI_MASTER
select WATCHDOG_CORE if WATCHDOG
help
If you say yes here you get support for the NXP PCF2127/29 RTC
@@ -891,6 +898,8 @@ config RTC_DRV_PCF2127
config RTC_DRV_RV3029C2
tristate "Micro Crystal RV3029/3049"
depends on RTC_I2C_AND_SPI
+ select REGMAP_I2C if I2C
+ select REGMAP_SPI if SPI_MASTER
help
If you say yes here you get support for the Micro Crystal
RV3029 and RV3049 RTC chips.
diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c
index 73830670a41f..3521d8e8dc38 100644
--- a/drivers/rtc/rtc-abx80x.c
+++ b/drivers/rtc/rtc-abx80x.c
@@ -523,12 +523,9 @@ static int abx80x_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
if (status < 0)
return status;
- tmp = !!(status & ABX8XX_STATUS_BLF);
+ tmp = status & ABX8XX_STATUS_BLF ? RTC_VL_BACKUP_LOW : 0;
- if (copy_to_user((void __user *)arg, &tmp, sizeof(int)))
- return -EFAULT;
-
- return 0;
+ return put_user(tmp, (unsigned int __user *)arg);
case RTC_VL_CLR:
status = i2c_smbus_read_byte_data(client, ABX8XX_REG_STATUS);
diff --git a/drivers/rtc/rtc-asm9260.c b/drivers/rtc/rtc-asm9260.c
index 10064bdabdff..3ab81cdec00b 100644
--- a/drivers/rtc/rtc-asm9260.c
+++ b/drivers/rtc/rtc-asm9260.c
@@ -264,6 +264,9 @@ static int asm9260_rtc_probe(struct platform_device *pdev)
return PTR_ERR(priv->iobase);
priv->clk = devm_clk_get(dev, "ahb");
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
ret = clk_prepare_enable(priv->clk);
if (ret) {
dev_err(dev, "Failed to enable clk!\n");
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index 3b833e02a657..5e811e04cb21 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -14,6 +14,7 @@
*/
#include <linux/bcd.h>
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
@@ -30,7 +31,51 @@
#include <linux/time.h>
#include <linux/uaccess.h>
-#include "rtc-at91rm9200.h"
+#define AT91_RTC_CR 0x00 /* Control Register */
+#define AT91_RTC_UPDTIM BIT(0) /* Update Request Time Register */
+#define AT91_RTC_UPDCAL BIT(1) /* Update Request Calendar Register */
+
+#define AT91_RTC_MR 0x04 /* Mode Register */
+
+#define AT91_RTC_TIMR 0x08 /* Time Register */
+#define AT91_RTC_SEC GENMASK(6, 0) /* Current Second */
+#define AT91_RTC_MIN GENMASK(14, 8) /* Current Minute */
+#define AT91_RTC_HOUR GENMASK(21, 16) /* Current Hour */
+#define AT91_RTC_AMPM BIT(22) /* Ante Meridiem Post Meridiem Indicator */
+
+#define AT91_RTC_CALR 0x0c /* Calendar Register */
+#define AT91_RTC_CENT GENMASK(6, 0) /* Current Century */
+#define AT91_RTC_YEAR GENMASK(15, 8) /* Current Year */
+#define AT91_RTC_MONTH GENMASK(20, 16) /* Current Month */
+#define AT91_RTC_DAY GENMASK(23, 21) /* Current Day */
+#define AT91_RTC_DATE GENMASK(29, 24) /* Current Date */
+
+#define AT91_RTC_TIMALR 0x10 /* Time Alarm Register */
+#define AT91_RTC_SECEN BIT(7) /* Second Alarm Enable */
+#define AT91_RTC_MINEN BIT(15) /* Minute Alarm Enable */
+#define AT91_RTC_HOUREN BIT(23) /* Hour Alarm Enable */
+
+#define AT91_RTC_CALALR 0x14 /* Calendar Alarm Register */
+#define AT91_RTC_MTHEN BIT(23) /* Month Alarm Enable */
+#define AT91_RTC_DATEEN BIT(31) /* Date Alarm Enable */
+
+#define AT91_RTC_SR 0x18 /* Status Register */
+#define AT91_RTC_ACKUPD BIT(0) /* Acknowledge for Update */
+#define AT91_RTC_ALARM BIT(1) /* Alarm Flag */
+#define AT91_RTC_SECEV BIT(2) /* Second Event */
+#define AT91_RTC_TIMEV BIT(3) /* Time Event */
+#define AT91_RTC_CALEV BIT(4) /* Calendar Event */
+
+#define AT91_RTC_SCCR 0x1c /* Status Clear Command Register */
+#define AT91_RTC_IER 0x20 /* Interrupt Enable Register */
+#define AT91_RTC_IDR 0x24 /* Interrupt Disable Register */
+#define AT91_RTC_IMR 0x28 /* Interrupt Mask Register */
+
+#define AT91_RTC_VER 0x2c /* Valid Entry Register */
+#define AT91_RTC_NVTIM BIT(0) /* Non valid Time */
+#define AT91_RTC_NVCAL BIT(1) /* Non valid Calendar */
+#define AT91_RTC_NVTIMALR BIT(2) /* Non valid Time Alarm */
+#define AT91_RTC_NVCALALR BIT(3) /* Non valid Calendar Alarm */
#define at91_rtc_read(field) \
readl_relaxed(at91_rtc_regs + field)
@@ -117,20 +162,20 @@ static void at91_rtc_decodetime(unsigned int timereg, unsigned int calreg,
} while ((time != at91_rtc_read(timereg)) ||
(date != at91_rtc_read(calreg)));
- tm->tm_sec = bcd2bin((time & AT91_RTC_SEC) >> 0);
- tm->tm_min = bcd2bin((time & AT91_RTC_MIN) >> 8);
- tm->tm_hour = bcd2bin((time & AT91_RTC_HOUR) >> 16);
+ tm->tm_sec = bcd2bin(FIELD_GET(AT91_RTC_SEC, time));
+ tm->tm_min = bcd2bin(FIELD_GET(AT91_RTC_MIN, time));
+ tm->tm_hour = bcd2bin(FIELD_GET(AT91_RTC_HOUR, time));
/*
* The Calendar Alarm register does not have a field for
* the year - so these will return an invalid value.
*/
tm->tm_year = bcd2bin(date & AT91_RTC_CENT) * 100; /* century */
- tm->tm_year += bcd2bin((date & AT91_RTC_YEAR) >> 8); /* year */
+ tm->tm_year += bcd2bin(FIELD_GET(AT91_RTC_YEAR, date)); /* year */
- tm->tm_wday = bcd2bin((date & AT91_RTC_DAY) >> 21) - 1; /* day of the week [0-6], Sunday=0 */
- tm->tm_mon = bcd2bin((date & AT91_RTC_MONTH) >> 16) - 1;
- tm->tm_mday = bcd2bin((date & AT91_RTC_DATE) >> 24);
+ tm->tm_wday = bcd2bin(FIELD_GET(AT91_RTC_DAY, date)) - 1; /* day of the week [0-6], Sunday=0 */
+ tm->tm_mon = bcd2bin(FIELD_GET(AT91_RTC_MONTH, date)) - 1;
+ tm->tm_mday = bcd2bin(FIELD_GET(AT91_RTC_DATE, date));
}
/*
@@ -167,16 +212,17 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
at91_rtc_write_idr(AT91_RTC_ACKUPD);
at91_rtc_write(AT91_RTC_TIMR,
- bin2bcd(tm->tm_sec) << 0
- | bin2bcd(tm->tm_min) << 8
- | bin2bcd(tm->tm_hour) << 16);
+ FIELD_PREP(AT91_RTC_SEC, bin2bcd(tm->tm_sec))
+ | FIELD_PREP(AT91_RTC_MIN, bin2bcd(tm->tm_min))
+ | FIELD_PREP(AT91_RTC_HOUR, bin2bcd(tm->tm_hour)));
at91_rtc_write(AT91_RTC_CALR,
- bin2bcd((tm->tm_year + 1900) / 100) /* century */
- | bin2bcd(tm->tm_year % 100) << 8 /* year */
- | bin2bcd(tm->tm_mon + 1) << 16 /* tm_mon starts at zero */
- | bin2bcd(tm->tm_wday + 1) << 21 /* day of the week [0-6], Sunday=0 */
- | bin2bcd(tm->tm_mday) << 24);
+ FIELD_PREP(AT91_RTC_CENT,
+ bin2bcd((tm->tm_year + 1900) / 100))
+ | FIELD_PREP(AT91_RTC_YEAR, bin2bcd(tm->tm_year % 100))
+ | FIELD_PREP(AT91_RTC_MONTH, bin2bcd(tm->tm_mon + 1))
+ | FIELD_PREP(AT91_RTC_DAY, bin2bcd(tm->tm_wday + 1))
+ | FIELD_PREP(AT91_RTC_DATE, bin2bcd(tm->tm_mday)));
/* Restart Time/Calendar */
cr = at91_rtc_read(AT91_RTC_CR);
@@ -211,25 +257,17 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
*/
static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
{
- struct rtc_time tm;
-
- at91_rtc_decodetime(AT91_RTC_TIMR, AT91_RTC_CALR, &tm);
-
- tm.tm_mon = alrm->time.tm_mon;
- tm.tm_mday = alrm->time.tm_mday;
- tm.tm_hour = alrm->time.tm_hour;
- tm.tm_min = alrm->time.tm_min;
- tm.tm_sec = alrm->time.tm_sec;
+ struct rtc_time tm = alrm->time;
at91_rtc_write_idr(AT91_RTC_ALARM);
at91_rtc_write(AT91_RTC_TIMALR,
- bin2bcd(tm.tm_sec) << 0
- | bin2bcd(tm.tm_min) << 8
- | bin2bcd(tm.tm_hour) << 16
+ FIELD_PREP(AT91_RTC_SEC, bin2bcd(alrm->time.tm_sec))
+ | FIELD_PREP(AT91_RTC_MIN, bin2bcd(alrm->time.tm_min))
+ | FIELD_PREP(AT91_RTC_HOUR, bin2bcd(alrm->time.tm_hour))
| AT91_RTC_HOUREN | AT91_RTC_MINEN | AT91_RTC_SECEN);
at91_rtc_write(AT91_RTC_CALALR,
- bin2bcd(tm.tm_mon + 1) << 16 /* tm_mon starts at zero */
- | bin2bcd(tm.tm_mday) << 24
+ FIELD_PREP(AT91_RTC_MONTH, bin2bcd(alrm->time.tm_mon + 1))
+ | FIELD_PREP(AT91_RTC_DATE, bin2bcd(alrm->time.tm_mday))
| AT91_RTC_DATEEN | AT91_RTC_MTHEN);
if (alrm->enabled) {
@@ -254,20 +292,6 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
return 0;
}
-/*
- * Provide additional RTC information in /proc/driver/rtc
- */
-static int at91_rtc_proc(struct device *dev, struct seq_file *seq)
-{
- unsigned long imr = at91_rtc_read_imr();
-
- seq_printf(seq, "update_IRQ\t: %s\n",
- (imr & AT91_RTC_ACKUPD) ? "yes" : "no");
- seq_printf(seq, "periodic_IRQ\t: %s\n",
- (imr & AT91_RTC_SECEV) ? "yes" : "no");
-
- return 0;
-}
/*
* IRQ handler for the RTC
@@ -327,6 +351,12 @@ static const struct of_device_id at91_rtc_dt_ids[] = {
.compatible = "atmel,at91sam9x5-rtc",
.data = &at91sam9x5_config,
}, {
+ .compatible = "atmel,sama5d4-rtc",
+ .data = &at91rm9200_config,
+ }, {
+ .compatible = "atmel,sama5d2-rtc",
+ .data = &at91rm9200_config,
+ }, {
/* sentinel */
}
};
@@ -337,7 +367,6 @@ static const struct rtc_class_ops at91_rtc_ops = {
.set_time = at91_rtc_settime,
.read_alarm = at91_rtc_readalarm,
.set_alarm = at91_rtc_setalarm,
- .proc = at91_rtc_proc,
.alarm_irq_enable = at91_rtc_alarm_irq_enable,
};
diff --git a/drivers/rtc/rtc-at91rm9200.h b/drivers/rtc/rtc-at91rm9200.h
deleted file mode 100644
index 8be5289da8e2..000000000000
--- a/drivers/rtc/rtc-at91rm9200.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * arch/arm/mach-at91/include/mach/at91_rtc.h
- *
- * Copyright (C) 2005 Ivan Kokshaysky
- * Copyright (C) SAN People
- *
- * Real Time Clock (RTC) - System peripheral registers.
- * Based on AT91RM9200 datasheet revision E.
- */
-
-#ifndef AT91_RTC_H
-#define AT91_RTC_H
-
-#define AT91_RTC_CR 0x00 /* Control Register */
-#define AT91_RTC_UPDTIM (1 << 0) /* Update Request Time Register */
-#define AT91_RTC_UPDCAL (1 << 1) /* Update Request Calendar Register */
-#define AT91_RTC_TIMEVSEL (3 << 8) /* Time Event Selection */
-#define AT91_RTC_TIMEVSEL_MINUTE (0 << 8)
-#define AT91_RTC_TIMEVSEL_HOUR (1 << 8)
-#define AT91_RTC_TIMEVSEL_DAY24 (2 << 8)
-#define AT91_RTC_TIMEVSEL_DAY12 (3 << 8)
-#define AT91_RTC_CALEVSEL (3 << 16) /* Calendar Event Selection */
-#define AT91_RTC_CALEVSEL_WEEK (0 << 16)
-#define AT91_RTC_CALEVSEL_MONTH (1 << 16)
-#define AT91_RTC_CALEVSEL_YEAR (2 << 16)
-
-#define AT91_RTC_MR 0x04 /* Mode Register */
-#define AT91_RTC_HRMOD (1 << 0) /* 12/24 Hour Mode */
-
-#define AT91_RTC_TIMR 0x08 /* Time Register */
-#define AT91_RTC_SEC (0x7f << 0) /* Current Second */
-#define AT91_RTC_MIN (0x7f << 8) /* Current Minute */
-#define AT91_RTC_HOUR (0x3f << 16) /* Current Hour */
-#define AT91_RTC_AMPM (1 << 22) /* Ante Meridiem Post Meridiem Indicator */
-
-#define AT91_RTC_CALR 0x0c /* Calendar Register */
-#define AT91_RTC_CENT (0x7f << 0) /* Current Century */
-#define AT91_RTC_YEAR (0xff << 8) /* Current Year */
-#define AT91_RTC_MONTH (0x1f << 16) /* Current Month */
-#define AT91_RTC_DAY (7 << 21) /* Current Day */
-#define AT91_RTC_DATE (0x3f << 24) /* Current Date */
-
-#define AT91_RTC_TIMALR 0x10 /* Time Alarm Register */
-#define AT91_RTC_SECEN (1 << 7) /* Second Alarm Enable */
-#define AT91_RTC_MINEN (1 << 15) /* Minute Alarm Enable */
-#define AT91_RTC_HOUREN (1 << 23) /* Hour Alarm Enable */
-
-#define AT91_RTC_CALALR 0x14 /* Calendar Alarm Register */
-#define AT91_RTC_MTHEN (1 << 23) /* Month Alarm Enable */
-#define AT91_RTC_DATEEN (1 << 31) /* Date Alarm Enable */
-
-#define AT91_RTC_SR 0x18 /* Status Register */
-#define AT91_RTC_ACKUPD (1 << 0) /* Acknowledge for Update */
-#define AT91_RTC_ALARM (1 << 1) /* Alarm Flag */
-#define AT91_RTC_SECEV (1 << 2) /* Second Event */
-#define AT91_RTC_TIMEV (1 << 3) /* Time Event */
-#define AT91_RTC_CALEV (1 << 4) /* Calendar Event */
-
-#define AT91_RTC_SCCR 0x1c /* Status Clear Command Register */
-#define AT91_RTC_IER 0x20 /* Interrupt Enable Register */
-#define AT91_RTC_IDR 0x24 /* Interrupt Disable Register */
-#define AT91_RTC_IMR 0x28 /* Interrupt Mask Register */
-
-#define AT91_RTC_VER 0x2c /* Valid Entry Register */
-#define AT91_RTC_NVTIM (1 << 0) /* Non valid Time */
-#define AT91_RTC_NVCAL (1 << 1) /* Non valid Calendar */
-#define AT91_RTC_NVTIMALR (1 << 2) /* Non valid Time Alarm */
-#define AT91_RTC_NVCALALR (1 << 3) /* Non valid Calendar Alarm */
-
-#endif
diff --git a/drivers/rtc/rtc-bd70528.c b/drivers/rtc/rtc-bd70528.c
index 627037aa66a8..bbbb1f07c91f 100644
--- a/drivers/rtc/rtc-bd70528.c
+++ b/drivers/rtc/rtc-bd70528.c
@@ -6,6 +6,7 @@
#include <linux/bcd.h>
#include <linux/mfd/rohm-bd70528.h>
+#include <linux/mfd/rohm-bd71828.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -15,7 +16,7 @@
/*
* We read regs RTC_SEC => RTC_YEAR
* this struct is ordered according to chip registers.
- * Keep it u8 only to avoid padding issues.
+ * Keep it u8 only (or packed) to avoid padding issues.
*/
struct bd70528_rtc_day {
u8 sec;
@@ -36,6 +37,13 @@ struct bd70528_rtc_wake {
u8 ctrl;
} __packed;
+struct bd71828_rtc_alm {
+ struct bd70528_rtc_data alm0;
+ struct bd70528_rtc_data alm1;
+ u8 alm_mask;
+ u8 alm1_mask;
+} __packed;
+
struct bd70528_rtc_alm {
struct bd70528_rtc_data data;
u8 alm_mask;
@@ -43,8 +51,10 @@ struct bd70528_rtc_alm {
} __packed;
struct bd70528_rtc {
- struct rohm_regmap_dev *mfd;
+ struct rohm_regmap_dev *parent;
struct device *dev;
+ u8 reg_time_start;
+ bool has_rtc_timers;
};
static int bd70528_set_wake(struct rohm_regmap_dev *bd70528,
@@ -123,14 +133,14 @@ static int bd70528_set_rtc_based_timers(struct bd70528_rtc *r, int new_state,
{
int ret;
- ret = bd70528_wdt_set(r->mfd, new_state & BD70528_WDT_STATE_BIT,
+ ret = bd70528_wdt_set(r->parent, new_state & BD70528_WDT_STATE_BIT,
old_state);
if (ret) {
dev_err(r->dev,
"Failed to disable WDG for RTC setting (%d)\n", ret);
return ret;
}
- ret = bd70528_set_elapsed_tmr(r->mfd,
+ ret = bd70528_set_elapsed_tmr(r->parent,
new_state & BD70528_ELAPSED_STATE_BIT,
old_state);
if (ret) {
@@ -138,7 +148,7 @@ static int bd70528_set_rtc_based_timers(struct bd70528_rtc *r, int new_state,
"Failed to disable 'elapsed timer' for RTC setting\n");
return ret;
}
- ret = bd70528_set_wake(r->mfd, new_state & BD70528_WAKE_STATE_BIT,
+ ret = bd70528_set_wake(r->parent, new_state & BD70528_WAKE_STATE_BIT,
old_state);
if (ret) {
dev_err(r->dev,
@@ -152,12 +162,18 @@ static int bd70528_set_rtc_based_timers(struct bd70528_rtc *r, int new_state,
static int bd70528_re_enable_rtc_based_timers(struct bd70528_rtc *r,
int old_state)
{
+ if (!r->has_rtc_timers)
+ return 0;
+
return bd70528_set_rtc_based_timers(r, old_state, NULL);
}
static int bd70528_disable_rtc_based_timers(struct bd70528_rtc *r,
int *old_state)
{
+ if (!r->has_rtc_timers)
+ return 0;
+
return bd70528_set_rtc_based_timers(r, 0, old_state);
}
@@ -213,22 +229,52 @@ static inline void rtc2tm(struct bd70528_rtc_data *r, struct rtc_time *t)
t->tm_wday = bcd2bin(r->week & BD70528_MASK_RTC_WEEK);
}
+static int bd71828_set_alarm(struct device *dev, struct rtc_wkalrm *a)
+{
+ int ret;
+ struct bd71828_rtc_alm alm;
+ struct bd70528_rtc *r = dev_get_drvdata(dev);
+ struct rohm_regmap_dev *parent = r->parent;
+
+ ret = regmap_bulk_read(parent->regmap, BD71828_REG_RTC_ALM_START,
+ &alm, sizeof(alm));
+ if (ret) {
+ dev_err(dev, "Failed to read alarm regs\n");
+ return ret;
+ }
+
+ tm2rtc(&a->time, &alm.alm0);
+
+ if (!a->enabled)
+ alm.alm_mask &= ~BD70528_MASK_ALM_EN;
+ else
+ alm.alm_mask |= BD70528_MASK_ALM_EN;
+
+ ret = regmap_bulk_write(parent->regmap, BD71828_REG_RTC_ALM_START,
+ &alm, sizeof(alm));
+ if (ret)
+ dev_err(dev, "Failed to set alarm time\n");
+
+ return ret;
+
+}
+
static int bd70528_set_alarm(struct device *dev, struct rtc_wkalrm *a)
{
struct bd70528_rtc_wake wake;
struct bd70528_rtc_alm alm;
int ret;
struct bd70528_rtc *r = dev_get_drvdata(dev);
- struct rohm_regmap_dev *bd70528 = r->mfd;
+ struct rohm_regmap_dev *parent = r->parent;
- ret = regmap_bulk_read(bd70528->regmap, BD70528_REG_RTC_WAKE_START,
+ ret = regmap_bulk_read(parent->regmap, BD70528_REG_RTC_WAKE_START,
&wake, sizeof(wake));
if (ret) {
dev_err(dev, "Failed to read wake regs\n");
return ret;
}
- ret = regmap_bulk_read(bd70528->regmap, BD70528_REG_RTC_ALM_START,
+ ret = regmap_bulk_read(parent->regmap, BD70528_REG_RTC_ALM_START,
&alm, sizeof(alm));
if (ret) {
dev_err(dev, "Failed to read alarm regs\n");
@@ -246,14 +292,14 @@ static int bd70528_set_alarm(struct device *dev, struct rtc_wkalrm *a)
wake.ctrl &= ~BD70528_MASK_WAKE_EN;
}
- ret = regmap_bulk_write(bd70528->regmap,
+ ret = regmap_bulk_write(parent->regmap,
BD70528_REG_RTC_WAKE_START, &wake,
sizeof(wake));
if (ret) {
dev_err(dev, "Failed to set wake time\n");
return ret;
}
- ret = regmap_bulk_write(bd70528->regmap, BD70528_REG_RTC_ALM_START,
+ ret = regmap_bulk_write(parent->regmap, BD70528_REG_RTC_ALM_START,
&alm, sizeof(alm));
if (ret)
dev_err(dev, "Failed to set alarm time\n");
@@ -261,14 +307,38 @@ static int bd70528_set_alarm(struct device *dev, struct rtc_wkalrm *a)
return ret;
}
+static int bd71828_read_alarm(struct device *dev, struct rtc_wkalrm *a)
+{
+ int ret;
+ struct bd71828_rtc_alm alm;
+ struct bd70528_rtc *r = dev_get_drvdata(dev);
+ struct rohm_regmap_dev *parent = r->parent;
+
+ ret = regmap_bulk_read(parent->regmap, BD71828_REG_RTC_ALM_START,
+ &alm, sizeof(alm));
+ if (ret) {
+ dev_err(dev, "Failed to read alarm regs\n");
+ return ret;
+ }
+
+ rtc2tm(&alm.alm0, &a->time);
+ a->time.tm_mday = -1;
+ a->time.tm_mon = -1;
+ a->time.tm_year = -1;
+ a->enabled = !!(alm.alm_mask & BD70528_MASK_ALM_EN);
+ a->pending = 0;
+
+ return 0;
+}
+
static int bd70528_read_alarm(struct device *dev, struct rtc_wkalrm *a)
{
struct bd70528_rtc_alm alm;
int ret;
struct bd70528_rtc *r = dev_get_drvdata(dev);
- struct rohm_regmap_dev *bd70528 = r->mfd;
+ struct rohm_regmap_dev *parent = r->parent;
- ret = regmap_bulk_read(bd70528->regmap, BD70528_REG_RTC_ALM_START,
+ ret = regmap_bulk_read(parent->regmap, BD70528_REG_RTC_ALM_START,
&alm, sizeof(alm));
if (ret) {
dev_err(dev, "Failed to read alarm regs\n");
@@ -290,14 +360,14 @@ static int bd70528_set_time_locked(struct device *dev, struct rtc_time *t)
int ret, tmpret, old_states;
struct bd70528_rtc_data rtc_data;
struct bd70528_rtc *r = dev_get_drvdata(dev);
- struct rohm_regmap_dev *bd70528 = r->mfd;
+ struct rohm_regmap_dev *parent = r->parent;
ret = bd70528_disable_rtc_based_timers(r, &old_states);
if (ret)
return ret;
- tmpret = regmap_bulk_read(bd70528->regmap,
- BD70528_REG_RTC_START, &rtc_data,
+ tmpret = regmap_bulk_read(parent->regmap,
+ r->reg_time_start, &rtc_data,
sizeof(rtc_data));
if (tmpret) {
dev_err(dev, "Failed to read RTC time registers\n");
@@ -305,8 +375,8 @@ static int bd70528_set_time_locked(struct device *dev, struct rtc_time *t)
}
tm2rtc(t, &rtc_data);
- tmpret = regmap_bulk_write(bd70528->regmap,
- BD70528_REG_RTC_START, &rtc_data,
+ tmpret = regmap_bulk_write(parent->regmap,
+ r->reg_time_start, &rtc_data,
sizeof(rtc_data));
if (tmpret) {
dev_err(dev, "Failed to set RTC time\n");
@@ -321,27 +391,32 @@ renable_out:
return ret;
}
+static int bd71828_set_time(struct device *dev, struct rtc_time *t)
+{
+ return bd70528_set_time_locked(dev, t);
+}
+
static int bd70528_set_time(struct device *dev, struct rtc_time *t)
{
int ret;
struct bd70528_rtc *r = dev_get_drvdata(dev);
- bd70528_wdt_lock(r->mfd);
+ bd70528_wdt_lock(r->parent);
ret = bd70528_set_time_locked(dev, t);
- bd70528_wdt_unlock(r->mfd);
+ bd70528_wdt_unlock(r->parent);
return ret;
}
static int bd70528_get_time(struct device *dev, struct rtc_time *t)
{
struct bd70528_rtc *r = dev_get_drvdata(dev);
- struct rohm_regmap_dev *bd70528 = r->mfd;
+ struct rohm_regmap_dev *parent = r->parent;
struct bd70528_rtc_data rtc_data;
int ret;
/* read the RTC date and time registers all at once */
- ret = regmap_bulk_read(bd70528->regmap,
- BD70528_REG_RTC_START, &rtc_data,
+ ret = regmap_bulk_read(parent->regmap,
+ r->reg_time_start, &rtc_data,
sizeof(rtc_data));
if (ret) {
dev_err(dev, "Failed to read RTC time (err %d)\n", ret);
@@ -362,19 +437,36 @@ static int bd70528_alm_enable(struct device *dev, unsigned int enabled)
if (enabled)
enableval = 0;
- bd70528_wdt_lock(r->mfd);
- ret = bd70528_set_wake(r->mfd, enabled, NULL);
+ bd70528_wdt_lock(r->parent);
+ ret = bd70528_set_wake(r->parent, enabled, NULL);
if (ret) {
dev_err(dev, "Failed to change wake state\n");
goto out_unlock;
}
- ret = regmap_update_bits(r->mfd->regmap, BD70528_REG_RTC_ALM_MASK,
+ ret = regmap_update_bits(r->parent->regmap, BD70528_REG_RTC_ALM_MASK,
BD70528_MASK_ALM_EN, enableval);
if (ret)
dev_err(dev, "Failed to change alarm state\n");
out_unlock:
- bd70528_wdt_unlock(r->mfd);
+ bd70528_wdt_unlock(r->parent);
+ return ret;
+}
+
+static int bd71828_alm_enable(struct device *dev, unsigned int enabled)
+{
+ int ret;
+ struct bd70528_rtc *r = dev_get_drvdata(dev);
+ unsigned int enableval = BD70528_MASK_ALM_EN;
+
+ if (!enabled)
+ enableval = 0;
+
+ ret = regmap_update_bits(r->parent->regmap, BD71828_REG_RTC_ALM0_MASK,
+ BD70528_MASK_ALM_EN, enableval);
+ if (ret)
+ dev_err(dev, "Failed to change alarm state\n");
+
return ret;
}
@@ -386,6 +478,14 @@ static const struct rtc_class_ops bd70528_rtc_ops = {
.alarm_irq_enable = bd70528_alm_enable,
};
+static const struct rtc_class_ops bd71828_rtc_ops = {
+ .read_time = bd70528_get_time,
+ .set_time = bd71828_set_time,
+ .read_alarm = bd71828_read_alarm,
+ .set_alarm = bd71828_set_alarm,
+ .alarm_irq_enable = bd71828_alm_enable,
+};
+
static irqreturn_t alm_hndlr(int irq, void *data)
{
struct rtc_device *rtc = data;
@@ -397,14 +497,19 @@ static irqreturn_t alm_hndlr(int irq, void *data)
static int bd70528_probe(struct platform_device *pdev)
{
struct bd70528_rtc *bd_rtc;
- struct rohm_regmap_dev *mfd;
+ const struct rtc_class_ops *rtc_ops;
+ struct rohm_regmap_dev *parent;
+ const char *irq_name;
int ret;
struct rtc_device *rtc;
int irq;
unsigned int hr;
+ bool enable_main_irq = false;
+ u8 hour_reg;
+ enum rohm_chip_type chip = platform_get_device_id(pdev)->driver_data;
- mfd = dev_get_drvdata(pdev->dev.parent);
- if (!mfd) {
+ parent = dev_get_drvdata(pdev->dev.parent);
+ if (!parent) {
dev_err(&pdev->dev, "No MFD driver data\n");
return -EINVAL;
}
@@ -412,16 +517,39 @@ static int bd70528_probe(struct platform_device *pdev)
if (!bd_rtc)
return -ENOMEM;
- bd_rtc->mfd = mfd;
+ bd_rtc->parent = parent;
bd_rtc->dev = &pdev->dev;
- irq = platform_get_irq_byname(pdev, "bd70528-rtc-alm");
- if (irq < 0)
+ switch (chip) {
+ case ROHM_CHIP_TYPE_BD70528:
+ irq_name = "bd70528-rtc-alm";
+ bd_rtc->has_rtc_timers = true;
+ bd_rtc->reg_time_start = BD70528_REG_RTC_START;
+ hour_reg = BD70528_REG_RTC_HOUR;
+ enable_main_irq = true;
+ rtc_ops = &bd70528_rtc_ops;
+ break;
+ case ROHM_CHIP_TYPE_BD71828:
+ irq_name = "bd71828-rtc-alm-0";
+ bd_rtc->reg_time_start = BD71828_REG_RTC_START;
+ hour_reg = BD71828_REG_RTC_HOUR;
+ rtc_ops = &bd71828_rtc_ops;
+ break;
+ default:
+ dev_err(&pdev->dev, "Unknown chip\n");
+ return -ENOENT;
+ }
+
+ irq = platform_get_irq_byname(pdev, irq_name);
+
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Failed to get irq\n");
return irq;
+ }
platform_set_drvdata(pdev, bd_rtc);
- ret = regmap_read(mfd->regmap, BD70528_REG_RTC_HOUR, &hr);
+ ret = regmap_read(parent->regmap, hour_reg, &hr);
if (ret) {
dev_err(&pdev->dev, "Failed to reag RTC clock\n");
@@ -431,10 +559,10 @@ static int bd70528_probe(struct platform_device *pdev)
if (!(hr & BD70528_MASK_RTC_HOUR_24H)) {
struct rtc_time t;
- ret = bd70528_get_time(&pdev->dev, &t);
+ ret = rtc_ops->read_time(&pdev->dev, &t);
if (!ret)
- ret = bd70528_set_time(&pdev->dev, &t);
+ ret = rtc_ops->set_time(&pdev->dev, &t);
if (ret) {
dev_err(&pdev->dev,
@@ -454,7 +582,7 @@ static int bd70528_probe(struct platform_device *pdev)
rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
rtc->range_max = RTC_TIMESTAMP_END_2099;
- rtc->ops = &bd70528_rtc_ops;
+ rtc->ops = rtc_ops;
/* Request alarm IRQ prior to registerig the RTC */
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, &alm_hndlr,
@@ -468,27 +596,37 @@ static int bd70528_probe(struct platform_device *pdev)
* leave them enabled as irq-controller should disable irqs
* from sub-registers when IRQ is disabled or freed.
*/
- ret = regmap_update_bits(mfd->regmap,
+ if (enable_main_irq) {
+ ret = regmap_update_bits(parent->regmap,
BD70528_REG_INT_MAIN_MASK,
BD70528_INT_RTC_MASK, 0);
- if (ret) {
- dev_err(&pdev->dev, "Failed to enable RTC interrupts\n");
- return ret;
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to enable RTC interrupts\n");
+ return ret;
+ }
}
return rtc_register_device(rtc);
}
+static const struct platform_device_id bd718x7_rtc_id[] = {
+ { "bd70528-rtc", ROHM_CHIP_TYPE_BD70528 },
+ { "bd71828-rtc", ROHM_CHIP_TYPE_BD71828 },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, bd718x7_rtc_id);
+
static struct platform_driver bd70528_rtc = {
.driver = {
.name = "bd70528-rtc"
},
.probe = bd70528_probe,
+ .id_table = bd718x7_rtc_id,
};
module_platform_driver(bd70528_rtc);
MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
-MODULE_DESCRIPTION("BD70528 RTC driver");
+MODULE_DESCRIPTION("ROHM BD70528 and BD71828 PMIC RTC driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:bd70528-rtc");
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 033303708c8b..b795fe4cbd2e 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -850,7 +850,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
rtc_cmos_int_handler = cmos_interrupt;
retval = request_irq(rtc_irq, rtc_cmos_int_handler,
- IRQF_SHARED, dev_name(&cmos_rtc.rtc->dev),
+ 0, dev_name(&cmos_rtc.rtc->dev),
cmos_rtc.rtc);
if (retval < 0) {
dev_dbg(dev, "IRQ %d is already in use\n", rtc_irq);
@@ -1197,8 +1197,6 @@ static void rtc_wake_off(struct device *dev)
/* Enable use_acpi_alarm mode for Intel platforms no earlier than 2015 */
static void use_acpi_alarm_quirks(void)
{
- int year;
-
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return;
@@ -1208,8 +1206,10 @@ static void use_acpi_alarm_quirks(void)
if (!is_hpet_enabled())
return;
- if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year >= 2015)
- use_acpi_alarm = true;
+ if (dmi_get_bios_year() < 2015)
+ return;
+
+ use_acpi_alarm = true;
}
#else
static inline void use_acpi_alarm_quirks(void) { }
@@ -1305,7 +1305,7 @@ static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
* hardcode it on systems with a legacy PIC.
*/
if (nr_legacy_irqs())
- irq = 8;
+ irq = RTC_IRQ;
#endif
return cmos_do_probe(&pnp->dev,
pnp_get_resource(pnp, IORESOURCE_IO, 0), irq);
diff --git a/drivers/rtc/rtc-cros-ec.c b/drivers/rtc/rtc-cros-ec.c
index d043d30f05bc..f7343c289cab 100644
--- a/drivers/rtc/rtc-cros-ec.c
+++ b/drivers/rtc/rtc-cros-ec.c
@@ -5,7 +5,6 @@
// Author: Stephen Barber <smbarber@chromium.org>
#include <linux/kernel.h>
-#include <linux/mfd/cros_ec.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
diff --git a/drivers/rtc/rtc-ds1343.c b/drivers/rtc/rtc-ds1343.c
index d21004a68ee0..ba143423875b 100644
--- a/drivers/rtc/rtc-ds1343.c
+++ b/drivers/rtc/rtc-ds1343.c
@@ -75,7 +75,6 @@ static const struct spi_device_id ds1343_id[] = {
MODULE_DEVICE_TABLE(spi, ds1343_id);
struct ds1343_priv {
- struct spi_device *spi;
struct rtc_device *rtc;
struct regmap *map;
int irq;
@@ -362,12 +361,13 @@ static int ds1343_probe(struct spi_device *spi)
if (!priv)
return -ENOMEM;
- priv->spi = spi;
-
/* RTC DS1347 works in spi mode 3 and
- * its chip select is active high
+ * its chip select is active high. Active high should be defined as
+ * "inverse polarity" as GPIO-based chip selects can be logically
+ * active high but inverted by the GPIO library.
*/
- spi->mode = SPI_MODE_3 | SPI_CS_HIGH;
+ spi->mode |= SPI_MODE_3;
+ spi->mode ^= SPI_CS_HIGH;
spi->bits_per_word = 8;
res = spi_setup(spi);
if (res)
diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
index 443f6d05ce29..0fb79c4afb46 100644
--- a/drivers/rtc/rtc-hym8563.c
+++ b/drivers/rtc/rtc-hym8563.c
@@ -78,7 +78,6 @@
struct hym8563 {
struct i2c_client *client;
struct rtc_device *rtc;
- bool valid;
#ifdef CONFIG_COMMON_CLK
struct clk_hw clkout_hw;
#endif
@@ -91,19 +90,19 @@ struct hym8563 {
static int hym8563_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct i2c_client *client = to_i2c_client(dev);
- struct hym8563 *hym8563 = i2c_get_clientdata(client);
u8 buf[7];
int ret;
- if (!hym8563->valid) {
- dev_warn(&client->dev, "no valid clock/calendar values available\n");
- return -EPERM;
- }
-
ret = i2c_smbus_read_i2c_block_data(client, HYM8563_SEC, 7, buf);
if (ret < 0)
return ret;
+ if (buf[0] & HYM8563_SEC_VL) {
+ dev_warn(&client->dev,
+ "no valid clock/calendar values available\n");
+ return -EINVAL;
+ }
+
tm->tm_sec = bcd2bin(buf[0] & HYM8563_SEC_MASK);
tm->tm_min = bcd2bin(buf[1] & HYM8563_MIN_MASK);
tm->tm_hour = bcd2bin(buf[2] & HYM8563_HOUR_MASK);
@@ -118,7 +117,6 @@ static int hym8563_rtc_read_time(struct device *dev, struct rtc_time *tm)
static int hym8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct i2c_client *client = to_i2c_client(dev);
- struct hym8563 *hym8563 = i2c_get_clientdata(client);
u8 buf[7];
int ret;
@@ -157,8 +155,6 @@ static int hym8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
if (ret < 0)
return ret;
- hym8563->valid = true;
-
return 0;
}
@@ -556,9 +552,8 @@ static int hym8563_probe(struct i2c_client *client,
if (ret < 0)
return ret;
- hym8563->valid = !(ret & HYM8563_SEC_VL);
dev_dbg(&client->dev, "rtc information is %s\n",
- hym8563->valid ? "valid" : "invalid");
+ (ret & HYM8563_SEC_VL) ? "invalid" : "valid");
hym8563->rtc = devm_rtc_device_register(&client->dev, client->name,
&hym8563_rtc_ops, THIS_MODULE);
diff --git a/drivers/rtc/rtc-m48t35.c b/drivers/rtc/rtc-m48t35.c
index d3a75d447fce..e8194f1f01a8 100644
--- a/drivers/rtc/rtc-m48t35.c
+++ b/drivers/rtc/rtc-m48t35.c
@@ -20,6 +20,16 @@
struct m48t35_rtc {
u8 pad[0x7ff8]; /* starts at 0x7ff8 */
+#ifdef CONFIG_SGI_IP27
+ u8 hour;
+ u8 min;
+ u8 sec;
+ u8 control;
+ u8 year;
+ u8 month;
+ u8 date;
+ u8 day;
+#else
u8 control;
u8 sec;
u8 min;
@@ -28,6 +38,7 @@ struct m48t35_rtc {
u8 date;
u8 month;
u8 year;
+#endif
};
#define M48T35_RTC_SET 0x80
diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c
index df2829dd55ad..2ecd8752b088 100644
--- a/drivers/rtc/rtc-mc146818-lib.c
+++ b/drivers/rtc/rtc-mc146818-lib.c
@@ -172,20 +172,7 @@ int mc146818_set_time(struct rtc_time *time)
save_control = CMOS_READ(RTC_CONTROL);
CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
-
-#ifdef CONFIG_X86
- if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
- boot_cpu_data.x86 == 0x17) ||
- boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
- CMOS_WRITE((save_freq_select & (~RTC_DIV_RESET2)),
- RTC_FREQ_SELECT);
- save_freq_select &= ~RTC_DIV_RESET2;
- } else
- CMOS_WRITE((save_freq_select | RTC_DIV_RESET2),
- RTC_FREQ_SELECT);
-#else
- CMOS_WRITE((save_freq_select | RTC_DIV_RESET2), RTC_FREQ_SELECT);
-#endif
+ CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
#ifdef CONFIG_MACH_DECSTATION
CMOS_WRITE(real_yrs, RTC_DEC_YEAR);
diff --git a/drivers/rtc/rtc-moxart.c b/drivers/rtc/rtc-moxart.c
index 07b30a373a92..6b24ac9e1cfa 100644
--- a/drivers/rtc/rtc-moxart.c
+++ b/drivers/rtc/rtc-moxart.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MOXA ART RTC driver.
*
@@ -7,10 +8,6 @@
*
* Based on code from
* Moxa Technology Co., Ltd. <www.moxa.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/init.h>
diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
index 5249fc99fd5f..cda238dfe69b 100644
--- a/drivers/rtc/rtc-mt6397.c
+++ b/drivers/rtc/rtc-mt6397.c
@@ -47,7 +47,7 @@ static irqreturn_t mtk_rtc_irq_handler_thread(int irq, void *data)
irqen = irqsta & ~RTC_IRQ_EN_AL;
mutex_lock(&rtc->lock);
if (regmap_write(rtc->regmap, rtc->addr_base + RTC_IRQ_EN,
- irqen) < 0)
+ irqen) == 0)
mtk_rtc_write_trigger(rtc);
mutex_unlock(&rtc->lock);
@@ -169,12 +169,12 @@ static int mtk_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
alm->pending = !!(pdn2 & RTC_PDN2_PWRON_ALARM);
mutex_unlock(&rtc->lock);
- tm->tm_sec = data[RTC_OFFSET_SEC];
- tm->tm_min = data[RTC_OFFSET_MIN];
- tm->tm_hour = data[RTC_OFFSET_HOUR];
- tm->tm_mday = data[RTC_OFFSET_DOM];
- tm->tm_mon = data[RTC_OFFSET_MTH];
- tm->tm_year = data[RTC_OFFSET_YEAR];
+ tm->tm_sec = data[RTC_OFFSET_SEC] & RTC_AL_SEC_MASK;
+ tm->tm_min = data[RTC_OFFSET_MIN] & RTC_AL_MIN_MASK;
+ tm->tm_hour = data[RTC_OFFSET_HOUR] & RTC_AL_HOU_MASK;
+ tm->tm_mday = data[RTC_OFFSET_DOM] & RTC_AL_DOM_MASK;
+ tm->tm_mon = data[RTC_OFFSET_MTH] & RTC_AL_MTH_MASK;
+ tm->tm_year = data[RTC_OFFSET_YEAR] & RTC_AL_YEA_MASK;
tm->tm_year += RTC_MIN_YEAR_OFFSET;
tm->tm_mon--;
@@ -195,14 +195,25 @@ static int mtk_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
tm->tm_year -= RTC_MIN_YEAR_OFFSET;
tm->tm_mon++;
- data[RTC_OFFSET_SEC] = tm->tm_sec;
- data[RTC_OFFSET_MIN] = tm->tm_min;
- data[RTC_OFFSET_HOUR] = tm->tm_hour;
- data[RTC_OFFSET_DOM] = tm->tm_mday;
- data[RTC_OFFSET_MTH] = tm->tm_mon;
- data[RTC_OFFSET_YEAR] = tm->tm_year;
-
mutex_lock(&rtc->lock);
+ ret = regmap_bulk_read(rtc->regmap, rtc->addr_base + RTC_AL_SEC,
+ data, RTC_OFFSET_COUNT);
+ if (ret < 0)
+ goto exit;
+
+ data[RTC_OFFSET_SEC] = ((data[RTC_OFFSET_SEC] & ~(RTC_AL_SEC_MASK)) |
+ (tm->tm_sec & RTC_AL_SEC_MASK));
+ data[RTC_OFFSET_MIN] = ((data[RTC_OFFSET_MIN] & ~(RTC_AL_MIN_MASK)) |
+ (tm->tm_min & RTC_AL_MIN_MASK));
+ data[RTC_OFFSET_HOUR] = ((data[RTC_OFFSET_HOUR] & ~(RTC_AL_HOU_MASK)) |
+ (tm->tm_hour & RTC_AL_HOU_MASK));
+ data[RTC_OFFSET_DOM] = ((data[RTC_OFFSET_DOM] & ~(RTC_AL_DOM_MASK)) |
+ (tm->tm_mday & RTC_AL_DOM_MASK));
+ data[RTC_OFFSET_MTH] = ((data[RTC_OFFSET_MTH] & ~(RTC_AL_MTH_MASK)) |
+ (tm->tm_mon & RTC_AL_MTH_MASK));
+ data[RTC_OFFSET_YEAR] = ((data[RTC_OFFSET_YEAR] & ~(RTC_AL_YEA_MASK)) |
+ (tm->tm_year & RTC_AL_YEA_MASK));
+
if (alm->enabled) {
ret = regmap_bulk_write(rtc->regmap,
rtc->addr_base + RTC_AL_SEC,
@@ -286,15 +297,7 @@ static int mtk_rtc_probe(struct platform_device *pdev)
rtc->rtc_dev->ops = &mtk_rtc_ops;
- ret = rtc_register_device(rtc->rtc_dev);
- if (ret)
- goto out_free_irq;
-
- return 0;
-
-out_free_irq:
- free_irq(rtc->irq, rtc);
- return ret;
+ return rtc_register_device(rtc->rtc_dev);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 988a4dfcfaf8..d4ed20fb3194 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -616,7 +616,7 @@ static int rtc_pinconf_get(struct pinctrl_dev *pctldev,
break;
default:
return -ENOTSUPP;
- };
+ }
*config = pinconf_to_config_packed(param, arg);
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index ba5baaca47be..4e50d6768f13 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -199,11 +199,9 @@ static int pcf2127_rtc_ioctl(struct device *dev,
if (ret)
return ret;
- touser = touser & PCF2127_BIT_CTRL3_BLF ? 1 : 0;
+ touser = touser & PCF2127_BIT_CTRL3_BLF ? RTC_VL_BACKUP_LOW : 0;
- if (copy_to_user((void __user *)arg, &touser, sizeof(int)))
- return -EFAULT;
- return 0;
+ return put_user(touser, (unsigned int __user *)arg);
default:
return -ENOIOCTLCMD;
}
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index 1afa6d9fa9fb..1db17ba1fc64 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -289,21 +289,9 @@ static int pcf85063_ioctl(struct device *dev, unsigned int cmd,
if (ret < 0)
return ret;
- if (status & PCF85063_REG_SC_OS)
- dev_warn(&pcf85063->rtc->dev, "Voltage low, data loss detected.\n");
+ status = status & PCF85063_REG_SC_OS ? RTC_VL_DATA_INVALID : 0;
- status &= PCF85063_REG_SC_OS;
-
- if (copy_to_user((void __user *)arg, &status, sizeof(int)))
- return -EFAULT;
-
- return 0;
-
- case RTC_VL_CLR:
- ret = regmap_update_bits(pcf85063->regmap, PCF85063_REG_SC,
- PCF85063_REG_SC_OS, 0);
-
- return ret;
+ return put_user(status, (unsigned int __user *)arg);
default:
return -ENOIOCTLCMD;
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index b24c908f5f06..47e0f411dd5c 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -282,11 +282,11 @@ static int pcf8523_rtc_ioctl(struct device *dev, unsigned int cmd,
ret = pcf8523_voltage_low(client);
if (ret < 0)
return ret;
+ if (ret)
+ ret = RTC_VL_BACKUP_LOW;
- if (copy_to_user((void __user *)arg, &ret, sizeof(int)))
- return -EFAULT;
+ return put_user(ret, (unsigned int __user *)arg);
- return 0;
default:
return -ENOIOCTLCMD;
}
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index 3c322f3079b0..2dc30eafa639 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -22,8 +22,8 @@
#define PCF8563_REG_ST1 0x00 /* status */
#define PCF8563_REG_ST2 0x01
-#define PCF8563_BIT_AIE (1 << 1)
-#define PCF8563_BIT_AF (1 << 3)
+#define PCF8563_BIT_AIE BIT(1)
+#define PCF8563_BIT_AF BIT(3)
#define PCF8563_BITS_ST2_N (7 << 5)
#define PCF8563_REG_SC 0x02 /* datetime */
@@ -76,7 +76,6 @@ struct pcf8563 {
* 1970...2069.
*/
int c_polarity; /* 0: MO_C=1 means 19xx, otherwise MO_C=1 means 20xx */
- int voltage_low; /* incicates if a low_voltage was detected */
struct i2c_client *client;
#ifdef CONFIG_COMMON_CLK
@@ -208,7 +207,6 @@ static int pcf8563_rtc_read_time(struct device *dev, struct rtc_time *tm)
return err;
if (buf[PCF8563_REG_SC] & PCF8563_SC_LV) {
- pcf8563->voltage_low = 1;
dev_err(&client->dev,
"low voltage detected, date/time is not reliable.\n");
return -EINVAL;
@@ -276,43 +274,23 @@ static int pcf8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
9 - PCF8563_REG_SC, buf + PCF8563_REG_SC);
}
-#ifdef CONFIG_RTC_INTF_DEV
static int pcf8563_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
{
- struct pcf8563 *pcf8563 = i2c_get_clientdata(to_i2c_client(dev));
- struct rtc_time tm;
+ struct i2c_client *client = to_i2c_client(dev);
+ int ret;
switch (cmd) {
case RTC_VL_READ:
- if (pcf8563->voltage_low)
- dev_info(dev, "low voltage detected, date/time is not reliable.\n");
-
- if (copy_to_user((void __user *)arg, &pcf8563->voltage_low,
- sizeof(int)))
- return -EFAULT;
- return 0;
- case RTC_VL_CLR:
- /*
- * Clear the VL bit in the seconds register in case
- * the time has not been set already (which would
- * have cleared it). This does not really matter
- * because of the cached voltage_low value but do it
- * anyway for consistency.
- */
- if (pcf8563_rtc_read_time(dev, &tm))
- pcf8563_rtc_set_time(dev, &tm);
-
- /* Clear the cached value. */
- pcf8563->voltage_low = 0;
+ ret = i2c_smbus_read_byte_data(client, PCF8563_REG_SC);
+ if (ret < 0)
+ return ret;
- return 0;
+ return put_user(ret & PCF8563_SC_LV ? RTC_VL_DATA_INVALID : 0,
+ (unsigned int __user *)arg);
default:
return -ENOIOCTLCMD;
}
}
-#else
-#define pcf8563_rtc_ioctl NULL
-#endif
static int pcf8563_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *tm)
{
diff --git a/drivers/rtc/rtc-rv3028.c b/drivers/rtc/rtc-rv3028.c
index 6b7b3a69601a..a0ddc86c975a 100644
--- a/drivers/rtc/rtc-rv3028.c
+++ b/drivers/rtc/rtc-rv3028.c
@@ -428,21 +428,8 @@ static int rv3028_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
if (ret < 0)
return ret;
- if (status & RV3028_STATUS_PORF)
- dev_warn(&rv3028->rtc->dev, "Voltage low, data loss detected.\n");
-
- status &= RV3028_STATUS_PORF;
-
- if (copy_to_user((void __user *)arg, &status, sizeof(int)))
- return -EFAULT;
-
- return 0;
-
- case RTC_VL_CLR:
- ret = regmap_update_bits(rv3028->regmap, RV3028_STATUS,
- RV3028_STATUS_PORF, 0);
-
- return ret;
+ status = status & RV3028_STATUS_PORF ? RTC_VL_DATA_INVALID : 0;
+ return put_user(status, (unsigned int __user *)arg);
default:
return -ENOIOCTLCMD;
diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c
index 4cdf6588e1d9..62718231731b 100644
--- a/drivers/rtc/rtc-rv3029c2.c
+++ b/drivers/rtc/rtc-rv3029c2.c
@@ -109,10 +109,8 @@
#define RV3029_CONTROL_E2P_TOV_MASK 0x3F /* XTAL turnover temp mask */
/* user ram section */
-#define RV3029_USR1_RAM_PAGE 0x38
-#define RV3029_USR1_SECTION_LEN 0x04
-#define RV3029_USR2_RAM_PAGE 0x3C
-#define RV3029_USR2_SECTION_LEN 0x04
+#define RV3029_RAM_PAGE 0x38
+#define RV3029_RAM_SECTION_LEN 8
struct rv3029_data {
struct device *dev;
@@ -121,77 +119,13 @@ struct rv3029_data {
int irq;
};
-static int rv3029_read_regs(struct device *dev, u8 reg, u8 *buf,
- unsigned int len)
-{
- struct rv3029_data *rv3029 = dev_get_drvdata(dev);
-
- if ((reg > RV3029_USR1_RAM_PAGE + 7) ||
- (reg + len > RV3029_USR1_RAM_PAGE + 8))
- return -EINVAL;
-
- return regmap_bulk_read(rv3029->regmap, reg, buf, len);
-}
-
-static int rv3029_write_regs(struct device *dev, u8 reg, u8 const buf[],
- unsigned int len)
-{
- struct rv3029_data *rv3029 = dev_get_drvdata(dev);
-
- if ((reg > RV3029_USR1_RAM_PAGE + 7) ||
- (reg + len > RV3029_USR1_RAM_PAGE + 8))
- return -EINVAL;
-
- return regmap_bulk_write(rv3029->regmap, reg, buf, len);
-}
-
-static int rv3029_update_bits(struct device *dev, u8 reg, u8 mask, u8 set)
-{
- u8 buf;
- int ret;
-
- ret = rv3029_read_regs(dev, reg, &buf, 1);
- if (ret < 0)
- return ret;
- buf &= ~mask;
- buf |= set & mask;
- ret = rv3029_write_regs(dev, reg, &buf, 1);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static int rv3029_get_sr(struct device *dev, u8 *buf)
-{
- int ret = rv3029_read_regs(dev, RV3029_STATUS, buf, 1);
-
- if (ret < 0)
- return -EIO;
- dev_dbg(dev, "status = 0x%.2x (%d)\n", buf[0], buf[0]);
- return 0;
-}
-
-static int rv3029_set_sr(struct device *dev, u8 val)
-{
- u8 buf[1];
- int sr;
-
- buf[0] = val;
- sr = rv3029_write_regs(dev, RV3029_STATUS, buf, 1);
- dev_dbg(dev, "status = 0x%.2x (%d)\n", buf[0], buf[0]);
- if (sr < 0)
- return -EIO;
- return 0;
-}
-
-static int rv3029_eeprom_busywait(struct device *dev)
+static int rv3029_eeprom_busywait(struct rv3029_data *rv3029)
{
+ unsigned int sr;
int i, ret;
- u8 sr;
for (i = 100; i > 0; i--) {
- ret = rv3029_get_sr(dev, &sr);
+ ret = regmap_read(rv3029->regmap, RV3029_STATUS, &sr);
if (ret < 0)
break;
if (!(sr & RV3029_STATUS_EEBUSY))
@@ -199,126 +133,128 @@ static int rv3029_eeprom_busywait(struct device *dev)
usleep_range(1000, 10000);
}
if (i <= 0) {
- dev_err(dev, "EEPROM busy wait timeout.\n");
+ dev_err(rv3029->dev, "EEPROM busy wait timeout.\n");
return -ETIMEDOUT;
}
return ret;
}
-static int rv3029_eeprom_exit(struct device *dev)
+static int rv3029_eeprom_exit(struct rv3029_data *rv3029)
{
/* Re-enable eeprom refresh */
- return rv3029_update_bits(dev, RV3029_ONOFF_CTRL,
+ return regmap_update_bits(rv3029->regmap, RV3029_ONOFF_CTRL,
RV3029_ONOFF_CTRL_EERE,
RV3029_ONOFF_CTRL_EERE);
}
-static int rv3029_eeprom_enter(struct device *dev)
+static int rv3029_eeprom_enter(struct rv3029_data *rv3029)
{
+ unsigned int sr;
int ret;
- u8 sr;
/* Check whether we are in the allowed voltage range. */
- ret = rv3029_get_sr(dev, &sr);
+ ret = regmap_read(rv3029->regmap, RV3029_STATUS, &sr);
if (ret < 0)
return ret;
- if (sr & (RV3029_STATUS_VLOW1 | RV3029_STATUS_VLOW2)) {
+ if (sr & RV3029_STATUS_VLOW2)
+ return -ENODEV;
+ if (sr & RV3029_STATUS_VLOW1) {
/* We clear the bits and retry once just in case
* we had a brown out in early startup.
*/
- sr &= ~RV3029_STATUS_VLOW1;
- sr &= ~RV3029_STATUS_VLOW2;
- ret = rv3029_set_sr(dev, sr);
+ ret = regmap_update_bits(rv3029->regmap, RV3029_STATUS,
+ RV3029_STATUS_VLOW1, 0);
if (ret < 0)
return ret;
usleep_range(1000, 10000);
- ret = rv3029_get_sr(dev, &sr);
+ ret = regmap_read(rv3029->regmap, RV3029_STATUS, &sr);
if (ret < 0)
return ret;
- if (sr & (RV3029_STATUS_VLOW1 | RV3029_STATUS_VLOW2)) {
- dev_err(dev,
+ if (sr & RV3029_STATUS_VLOW1) {
+ dev_err(rv3029->dev,
"Supply voltage is too low to safely access the EEPROM.\n");
return -ENODEV;
}
}
/* Disable eeprom refresh. */
- ret = rv3029_update_bits(dev, RV3029_ONOFF_CTRL, RV3029_ONOFF_CTRL_EERE,
- 0);
+ ret = regmap_update_bits(rv3029->regmap, RV3029_ONOFF_CTRL,
+ RV3029_ONOFF_CTRL_EERE, 0);
if (ret < 0)
return ret;
/* Wait for any previous eeprom accesses to finish. */
- ret = rv3029_eeprom_busywait(dev);
+ ret = rv3029_eeprom_busywait(rv3029);
if (ret < 0)
- rv3029_eeprom_exit(dev);
+ rv3029_eeprom_exit(rv3029);
return ret;
}
-static int rv3029_eeprom_read(struct device *dev, u8 reg,
+static int rv3029_eeprom_read(struct rv3029_data *rv3029, u8 reg,
u8 buf[], size_t len)
{
int ret, err;
- err = rv3029_eeprom_enter(dev);
+ err = rv3029_eeprom_enter(rv3029);
if (err < 0)
return err;
- ret = rv3029_read_regs(dev, reg, buf, len);
+ ret = regmap_bulk_read(rv3029->regmap, reg, buf, len);
- err = rv3029_eeprom_exit(dev);
+ err = rv3029_eeprom_exit(rv3029);
if (err < 0)
return err;
return ret;
}
-static int rv3029_eeprom_write(struct device *dev, u8 reg,
+static int rv3029_eeprom_write(struct rv3029_data *rv3029, u8 reg,
u8 const buf[], size_t len)
{
+ unsigned int tmp;
int ret, err;
size_t i;
- u8 tmp;
- err = rv3029_eeprom_enter(dev);
+ err = rv3029_eeprom_enter(rv3029);
if (err < 0)
return err;
for (i = 0; i < len; i++, reg++) {
- ret = rv3029_read_regs(dev, reg, &tmp, 1);
+ ret = regmap_read(rv3029->regmap, reg, &tmp);
if (ret < 0)
break;
if (tmp != buf[i]) {
- ret = rv3029_write_regs(dev, reg, &buf[i], 1);
+ tmp = buf[i];
+ ret = regmap_write(rv3029->regmap, reg, tmp);
if (ret < 0)
break;
}
- ret = rv3029_eeprom_busywait(dev);
+ ret = rv3029_eeprom_busywait(rv3029);
if (ret < 0)
break;
}
- err = rv3029_eeprom_exit(dev);
+ err = rv3029_eeprom_exit(rv3029);
if (err < 0)
return err;
return ret;
}
-static int rv3029_eeprom_update_bits(struct device *dev,
+static int rv3029_eeprom_update_bits(struct rv3029_data *rv3029,
u8 reg, u8 mask, u8 set)
{
u8 buf;
int ret;
- ret = rv3029_eeprom_read(dev, reg, &buf, 1);
+ ret = rv3029_eeprom_read(rv3029, reg, &buf, 1);
if (ret < 0)
return ret;
buf &= ~mask;
buf |= set & mask;
- ret = rv3029_eeprom_write(dev, reg, &buf, 1);
+ ret = rv3029_eeprom_write(rv3029, reg, &buf, 1);
if (ret < 0)
return ret;
@@ -330,20 +266,20 @@ static irqreturn_t rv3029_handle_irq(int irq, void *dev_id)
struct device *dev = dev_id;
struct rv3029_data *rv3029 = dev_get_drvdata(dev);
struct mutex *lock = &rv3029->rtc->ops_lock;
+ unsigned int flags, controls;
unsigned long events = 0;
- u8 flags, controls;
int ret;
mutex_lock(lock);
- ret = rv3029_read_regs(dev, RV3029_IRQ_CTRL, &controls, 1);
+ ret = regmap_read(rv3029->regmap, RV3029_IRQ_CTRL, &controls);
if (ret) {
dev_warn(dev, "Read IRQ Control Register error %d\n", ret);
mutex_unlock(lock);
return IRQ_NONE;
}
- ret = rv3029_read_regs(dev, RV3029_IRQ_FLAGS, &flags, 1);
+ ret = regmap_read(rv3029->regmap, RV3029_IRQ_FLAGS, &flags);
if (ret) {
dev_warn(dev, "Read IRQ Flags Register error %d\n", ret);
mutex_unlock(lock);
@@ -358,8 +294,8 @@ static irqreturn_t rv3029_handle_irq(int irq, void *dev_id)
if (events) {
rtc_update_irq(rv3029->rtc, 1, events);
- rv3029_write_regs(dev, RV3029_IRQ_FLAGS, &flags, 1);
- rv3029_write_regs(dev, RV3029_IRQ_CTRL, &controls, 1);
+ regmap_write(rv3029->regmap, RV3029_IRQ_FLAGS, flags);
+ regmap_write(rv3029->regmap, RV3029_IRQ_CTRL, controls);
}
mutex_unlock(lock);
@@ -368,22 +304,22 @@ static irqreturn_t rv3029_handle_irq(int irq, void *dev_id)
static int rv3029_read_time(struct device *dev, struct rtc_time *tm)
{
- u8 buf[1];
+ struct rv3029_data *rv3029 = dev_get_drvdata(dev);
+ unsigned int sr;
int ret;
u8 regs[RV3029_WATCH_SECTION_LEN] = { 0, };
- ret = rv3029_get_sr(dev, buf);
- if (ret < 0) {
- dev_err(dev, "%s: reading SR failed\n", __func__);
- return -EIO;
- }
+ ret = regmap_read(rv3029->regmap, RV3029_STATUS, &sr);
+ if (ret < 0)
+ return ret;
+
+ if (sr & (RV3029_STATUS_VLOW2 | RV3029_STATUS_PON))
+ return -EINVAL;
- ret = rv3029_read_regs(dev, RV3029_W_SEC, regs,
+ ret = regmap_bulk_read(rv3029->regmap, RV3029_W_SEC, regs,
RV3029_WATCH_SECTION_LEN);
- if (ret < 0) {
- dev_err(dev, "%s: reading RTC section failed\n", __func__);
+ if (ret < 0)
return ret;
- }
tm->tm_sec = bcd2bin(regs[RV3029_W_SEC - RV3029_W_SEC]);
tm->tm_min = bcd2bin(regs[RV3029_W_MINUTES - RV3029_W_SEC]);
@@ -411,34 +347,24 @@ static int rv3029_read_time(struct device *dev, struct rtc_time *tm)
static int rv3029_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
+ struct rv3029_data *rv3029 = dev_get_drvdata(dev);
struct rtc_time *const tm = &alarm->time;
+ unsigned int controls, flags;
int ret;
- u8 regs[8], controls, flags;
-
- ret = rv3029_get_sr(dev, regs);
- if (ret < 0) {
- dev_err(dev, "%s: reading SR failed\n", __func__);
- return -EIO;
- }
+ u8 regs[8];
- ret = rv3029_read_regs(dev, RV3029_A_SC, regs,
+ ret = regmap_bulk_read(rv3029->regmap, RV3029_A_SC, regs,
RV3029_ALARM_SECTION_LEN);
-
- if (ret < 0) {
- dev_err(dev, "%s: reading alarm section failed\n", __func__);
+ if (ret < 0)
return ret;
- }
- ret = rv3029_read_regs(dev, RV3029_IRQ_CTRL, &controls, 1);
- if (ret) {
- dev_err(dev, "Read IRQ Control Register error %d\n", ret);
+ ret = regmap_read(rv3029->regmap, RV3029_IRQ_CTRL, &controls);
+ if (ret)
return ret;
- }
- ret = rv3029_read_regs(dev, RV3029_IRQ_FLAGS, &flags, 1);
- if (ret < 0) {
- dev_err(dev, "Read IRQ Flags Register error %d\n", ret);
+
+ ret = regmap_read(rv3029->regmap, RV3029_IRQ_FLAGS, &flags);
+ if (ret < 0)
return ret;
- }
tm->tm_sec = bcd2bin(regs[RV3029_A_SC - RV3029_A_SC] & 0x7f);
tm->tm_min = bcd2bin(regs[RV3029_A_MN - RV3029_A_SC] & 0x7f);
@@ -456,50 +382,20 @@ static int rv3029_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
static int rv3029_alarm_irq_enable(struct device *dev, unsigned int enable)
{
- int ret;
- u8 controls;
-
- ret = rv3029_read_regs(dev, RV3029_IRQ_CTRL, &controls, 1);
- if (ret < 0) {
- dev_warn(dev, "Read IRQ Control Register error %d\n", ret);
- return ret;
- }
-
- /* enable/disable AIE irq */
- if (enable)
- controls |= RV3029_IRQ_CTRL_AIE;
- else
- controls &= ~RV3029_IRQ_CTRL_AIE;
-
- ret = rv3029_write_regs(dev, RV3029_IRQ_CTRL, &controls, 1);
- if (ret < 0) {
- dev_err(dev, "can't update INT reg\n");
- return ret;
- }
+ struct rv3029_data *rv3029 = dev_get_drvdata(dev);
- return 0;
+ return regmap_update_bits(rv3029->regmap, RV3029_IRQ_CTRL,
+ RV3029_IRQ_CTRL_AIE,
+ enable ? RV3029_IRQ_CTRL_AIE : 0);
}
static int rv3029_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
+ struct rv3029_data *rv3029 = dev_get_drvdata(dev);
struct rtc_time *const tm = &alarm->time;
int ret;
u8 regs[8];
- /*
- * The clock has an 8 bit wide bcd-coded register (they never learn)
- * for the year. tm_year is an offset from 1900 and we are interested
- * in the 2000-2099 range, so any value less than 100 is invalid.
- */
- if (tm->tm_year < 100)
- return -EINVAL;
-
- ret = rv3029_get_sr(dev, regs);
- if (ret < 0) {
- dev_err(dev, "%s: reading SR failed\n", __func__);
- return -EIO;
- }
-
/* Activate all the alarms with AE_x bit */
regs[RV3029_A_SC - RV3029_A_SC] = bin2bcd(tm->tm_sec) | RV3029_A_AE_X;
regs[RV3029_A_MN - RV3029_A_SC] = bin2bcd(tm->tm_min) | RV3029_A_AE_X;
@@ -515,39 +411,20 @@ static int rv3029_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
| RV3029_A_AE_X;
/* Write the alarm */
- ret = rv3029_write_regs(dev, RV3029_A_SC, regs,
+ ret = regmap_bulk_write(rv3029->regmap, RV3029_A_SC, regs,
RV3029_ALARM_SECTION_LEN);
if (ret < 0)
return ret;
- if (alarm->enabled) {
- /* enable AIE irq */
- ret = rv3029_alarm_irq_enable(dev, 1);
- if (ret)
- return ret;
- } else {
- /* disable AIE irq */
- ret = rv3029_alarm_irq_enable(dev, 0);
- if (ret)
- return ret;
- }
-
- return 0;
+ return rv3029_alarm_irq_enable(dev, alarm->enabled);
}
static int rv3029_set_time(struct device *dev, struct rtc_time *tm)
{
+ struct rv3029_data *rv3029 = dev_get_drvdata(dev);
u8 regs[8];
int ret;
- /*
- * The clock has an 8 bit wide bcd-coded register (they never learn)
- * for the year. tm_year is an offset from 1900 and we are interested
- * in the 2000-2099 range, so any value less than 100 is invalid.
- */
- if (tm->tm_year < 100)
- return -EINVAL;
-
regs[RV3029_W_SEC - RV3029_W_SEC] = bin2bcd(tm->tm_sec);
regs[RV3029_W_MINUTES - RV3029_W_SEC] = bin2bcd(tm->tm_min);
regs[RV3029_W_HOURS - RV3029_W_SEC] = bin2bcd(tm->tm_hour);
@@ -556,24 +433,55 @@ static int rv3029_set_time(struct device *dev, struct rtc_time *tm)
regs[RV3029_W_DAYS - RV3029_W_SEC] = bin2bcd(tm->tm_wday + 1) & 0x7;
regs[RV3029_W_YEARS - RV3029_W_SEC] = bin2bcd(tm->tm_year - 100);
- ret = rv3029_write_regs(dev, RV3029_W_SEC, regs,
+ ret = regmap_bulk_write(rv3029->regmap, RV3029_W_SEC, regs,
RV3029_WATCH_SECTION_LEN);
if (ret < 0)
return ret;
- ret = rv3029_get_sr(dev, regs);
- if (ret < 0) {
- dev_err(dev, "%s: reading SR failed\n", __func__);
- return ret;
- }
- /* clear PON bit */
- ret = rv3029_set_sr(dev, (regs[0] & ~RV3029_STATUS_PON));
- if (ret < 0) {
- dev_err(dev, "%s: reading SR failed\n", __func__);
- return ret;
+ /* clear PON and VLOW2 bits */
+ return regmap_update_bits(rv3029->regmap, RV3029_STATUS,
+ RV3029_STATUS_PON | RV3029_STATUS_VLOW2, 0);
+}
+
+static int rv3029_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+{
+ struct rv3029_data *rv3029 = dev_get_drvdata(dev);
+ unsigned long vl = 0;
+ int sr, ret = 0;
+
+ switch (cmd) {
+ case RTC_VL_READ:
+ ret = regmap_read(rv3029->regmap, RV3029_STATUS, &sr);
+ if (ret < 0)
+ return ret;
+
+ if (sr & RV3029_STATUS_VLOW1)
+ vl = RTC_VL_ACCURACY_LOW;
+
+ if (sr & (RV3029_STATUS_VLOW2 | RV3029_STATUS_PON))
+ vl |= RTC_VL_DATA_INVALID;
+
+ return put_user(vl, (unsigned int __user *)arg);
+
+ case RTC_VL_CLR:
+ return regmap_update_bits(rv3029->regmap, RV3029_STATUS,
+ RV3029_STATUS_VLOW1, 0);
+
+ default:
+ return -ENOIOCTLCMD;
}
+}
- return 0;
+static int rv3029_nvram_write(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ return regmap_bulk_write(priv, RV3029_RAM_PAGE + offset, val, bytes);
+}
+
+static int rv3029_nvram_read(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ return regmap_bulk_read(priv, RV3029_RAM_PAGE + offset, val, bytes);
}
static const struct rv3029_trickle_tab_elem {
@@ -635,6 +543,7 @@ static const struct rv3029_trickle_tab_elem {
static void rv3029_trickle_config(struct device *dev)
{
+ struct rv3029_data *rv3029 = dev_get_drvdata(dev);
struct device_node *of_node = dev->of_node;
const struct rv3029_trickle_tab_elem *elem;
int i, err;
@@ -661,7 +570,7 @@ static void rv3029_trickle_config(struct device *dev)
"Trickle charger enabled at %d ohms resistance.\n",
elem->r);
}
- err = rv3029_eeprom_update_bits(dev, RV3029_CONTROL_E2P_EECTRL,
+ err = rv3029_eeprom_update_bits(rv3029, RV3029_CONTROL_E2P_EECTRL,
RV3029_TRICKLE_MASK,
trickle_set_bits);
if (err < 0)
@@ -670,12 +579,12 @@ static void rv3029_trickle_config(struct device *dev)
#ifdef CONFIG_RTC_DRV_RV3029_HWMON
-static int rv3029_read_temp(struct device *dev, int *temp_mC)
+static int rv3029_read_temp(struct rv3029_data *rv3029, int *temp_mC)
{
+ unsigned int temp;
int ret;
- u8 temp;
- ret = rv3029_read_regs(dev, RV3029_TEMP_PAGE, &temp, 1);
+ ret = regmap_read(rv3029->regmap, RV3029_TEMP_PAGE, &temp);
if (ret < 0)
return ret;
@@ -688,9 +597,10 @@ static ssize_t rv3029_hwmon_show_temp(struct device *dev,
struct device_attribute *attr,
char *buf)
{
+ struct rv3029_data *rv3029 = dev_get_drvdata(dev);
int ret, temp_mC;
- ret = rv3029_read_temp(dev, &temp_mC);
+ ret = rv3029_read_temp(rv3029, &temp_mC);
if (ret < 0)
return ret;
@@ -702,9 +612,10 @@ static ssize_t rv3029_hwmon_set_update_interval(struct device *dev,
const char *buf,
size_t count)
{
+ struct rv3029_data *rv3029 = dev_get_drvdata(dev);
+ unsigned int th_set_bits = 0;
unsigned long interval_ms;
int ret;
- u8 th_set_bits = 0;
ret = kstrtoul(buf, 10, &interval_ms);
if (ret < 0)
@@ -715,7 +626,7 @@ static ssize_t rv3029_hwmon_set_update_interval(struct device *dev,
if (interval_ms >= 16000)
th_set_bits |= RV3029_EECTRL_THP;
}
- ret = rv3029_eeprom_update_bits(dev, RV3029_CONTROL_E2P_EECTRL,
+ ret = rv3029_eeprom_update_bits(rv3029, RV3029_CONTROL_E2P_EECTRL,
RV3029_EECTRL_THE | RV3029_EECTRL_THP,
th_set_bits);
if (ret < 0)
@@ -728,10 +639,11 @@ static ssize_t rv3029_hwmon_show_update_interval(struct device *dev,
struct device_attribute *attr,
char *buf)
{
+ struct rv3029_data *rv3029 = dev_get_drvdata(dev);
int ret, interval_ms;
u8 eectrl;
- ret = rv3029_eeprom_read(dev, RV3029_CONTROL_E2P_EECTRL,
+ ret = rv3029_eeprom_read(rv3029, RV3029_CONTROL_E2P_EECTRL,
&eectrl, 1);
if (ret < 0)
return ret;
@@ -785,14 +697,23 @@ static void rv3029_hwmon_register(struct device *dev, const char *name)
static struct rtc_class_ops rv3029_rtc_ops = {
.read_time = rv3029_read_time,
.set_time = rv3029_set_time,
+ .ioctl = rv3029_ioctl,
};
static int rv3029_probe(struct device *dev, struct regmap *regmap, int irq,
const char *name)
{
struct rv3029_data *rv3029;
+ struct nvmem_config nvmem_cfg = {
+ .name = "rv3029_nvram",
+ .word_size = 1,
+ .stride = 1,
+ .size = RV3029_RAM_SECTION_LEN,
+ .type = NVMEM_TYPE_BATTERY_BACKED,
+ .reg_read = rv3029_nvram_read,
+ .reg_write = rv3029_nvram_write,
+ };
int rc = 0;
- u8 buf[1];
rv3029 = devm_kzalloc(dev, sizeof(*rv3029), GFP_KERNEL);
if (!rv3029)
@@ -803,21 +724,12 @@ static int rv3029_probe(struct device *dev, struct regmap *regmap, int irq,
rv3029->dev = dev;
dev_set_drvdata(dev, rv3029);
- rc = rv3029_get_sr(dev, buf);
- if (rc < 0) {
- dev_err(dev, "reading status failed\n");
- return rc;
- }
-
rv3029_trickle_config(dev);
rv3029_hwmon_register(dev, name);
- rv3029->rtc = devm_rtc_device_register(dev, name, &rv3029_rtc_ops,
- THIS_MODULE);
- if (IS_ERR(rv3029->rtc)) {
- dev_err(dev, "unable to register the class device\n");
+ rv3029->rtc = devm_rtc_allocate_device(dev);
+ if (IS_ERR(rv3029->rtc))
return PTR_ERR(rv3029->rtc);
- }
if (rv3029->irq > 0) {
rc = devm_request_threaded_irq(dev, rv3029->irq,
@@ -834,20 +746,48 @@ static int rv3029_probe(struct device *dev, struct regmap *regmap, int irq,
}
}
+ rv3029->rtc->ops = &rv3029_rtc_ops;
+ rv3029->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ rv3029->rtc->range_max = RTC_TIMESTAMP_END_2079;
+
+ rc = rtc_register_device(rv3029->rtc);
+ if (rc)
+ return rc;
+
+ nvmem_cfg.priv = rv3029->regmap;
+ rtc_nvmem_register(rv3029->rtc, &nvmem_cfg);
+
return 0;
}
+static const struct regmap_range rv3029_holes_range[] = {
+ regmap_reg_range(0x05, 0x07),
+ regmap_reg_range(0x0f, 0x0f),
+ regmap_reg_range(0x17, 0x17),
+ regmap_reg_range(0x1a, 0x1f),
+ regmap_reg_range(0x21, 0x27),
+ regmap_reg_range(0x34, 0x37),
+};
+
+static const struct regmap_access_table rv3029_regs = {
+ .no_ranges = rv3029_holes_range,
+ .n_no_ranges = ARRAY_SIZE(rv3029_holes_range),
+};
+
+static const struct regmap_config config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .rd_table = &rv3029_regs,
+ .wr_table = &rv3029_regs,
+ .max_register = 0x3f,
+};
+
#if IS_ENABLED(CONFIG_I2C)
static int rv3029_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct regmap *regmap;
- static const struct regmap_config config = {
- .reg_bits = 8,
- .val_bits = 8,
- };
-
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK |
I2C_FUNC_SMBUS_BYTE)) {
dev_err(&client->dev, "Adapter does not support SMBUS_I2C_BLOCK or SMBUS_I2C_BYTE\n");
@@ -855,11 +795,8 @@ static int rv3029_i2c_probe(struct i2c_client *client,
}
regmap = devm_regmap_init_i2c(client, &config);
- if (IS_ERR(regmap)) {
- dev_err(&client->dev, "%s: regmap allocation failed: %ld\n",
- __func__, PTR_ERR(regmap));
+ if (IS_ERR(regmap))
return PTR_ERR(regmap);
- }
return rv3029_probe(&client->dev, regmap, client->irq, client->name);
}
@@ -873,24 +810,20 @@ MODULE_DEVICE_TABLE(i2c, rv3029_id);
static const struct of_device_id rv3029_of_match[] = {
{ .compatible = "microcrystal,rv3029" },
- /* Backward compatibility only, do not use compatibles below: */
- { .compatible = "rv3029" },
- { .compatible = "rv3029c2" },
- { .compatible = "mc,rv3029c2" },
{ }
};
MODULE_DEVICE_TABLE(of, rv3029_of_match);
static struct i2c_driver rv3029_driver = {
.driver = {
- .name = "rtc-rv3029c2",
+ .name = "rv3029",
.of_match_table = of_match_ptr(rv3029_of_match),
},
.probe = rv3029_i2c_probe,
.id_table = rv3029_id,
};
-static int rv3029_register_driver(void)
+static int __init rv3029_register_driver(void)
{
return i2c_add_driver(&rv3029_driver);
}
@@ -902,7 +835,7 @@ static void rv3029_unregister_driver(void)
#else
-static int rv3029_register_driver(void)
+static int __init rv3029_register_driver(void)
{
return 0;
}
@@ -917,18 +850,11 @@ static void rv3029_unregister_driver(void)
static int rv3049_probe(struct spi_device *spi)
{
- static const struct regmap_config config = {
- .reg_bits = 8,
- .val_bits = 8,
- };
struct regmap *regmap;
regmap = devm_regmap_init_spi(spi, &config);
- if (IS_ERR(regmap)) {
- dev_err(&spi->dev, "%s: regmap allocation failed: %ld\n",
- __func__, PTR_ERR(regmap));
+ if (IS_ERR(regmap))
return PTR_ERR(regmap);
- }
return rv3029_probe(&spi->dev, regmap, spi->irq, "rv3049");
}
@@ -940,24 +866,24 @@ static struct spi_driver rv3049_driver = {
.probe = rv3049_probe,
};
-static int rv3049_register_driver(void)
+static int __init rv3049_register_driver(void)
{
return spi_register_driver(&rv3049_driver);
}
-static void rv3049_unregister_driver(void)
+static void __exit rv3049_unregister_driver(void)
{
spi_unregister_driver(&rv3049_driver);
}
#else
-static int rv3049_register_driver(void)
+static int __init rv3049_register_driver(void)
{
return 0;
}
-static void rv3049_unregister_driver(void)
+static void __exit rv3049_unregister_driver(void)
{
}
@@ -968,16 +894,12 @@ static int __init rv30x9_init(void)
int ret;
ret = rv3029_register_driver();
- if (ret) {
- pr_err("Failed to register rv3029 driver: %d\n", ret);
+ if (ret)
return ret;
- }
ret = rv3049_register_driver();
- if (ret) {
- pr_err("Failed to register rv3049 driver: %d\n", ret);
+ if (ret)
rv3029_unregister_driver();
- }
return ret;
}
diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c
index 4960f0a2b249..93c3a6b627bd 100644
--- a/drivers/rtc/rtc-rv8803.c
+++ b/drivers/rtc/rtc-rv8803.c
@@ -411,6 +411,7 @@ static int rv8803_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
{
struct i2c_client *client = to_i2c_client(dev);
struct rv8803_data *rv8803 = dev_get_drvdata(dev);
+ unsigned int vl = 0;
int flags, ret = 0;
switch (cmd) {
@@ -419,18 +420,15 @@ static int rv8803_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
if (flags < 0)
return flags;
- if (flags & RV8803_FLAG_V1F)
+ if (flags & RV8803_FLAG_V1F) {
dev_warn(&client->dev, "Voltage low, temperature compensation stopped.\n");
+ vl = RTC_VL_ACCURACY_LOW;
+ }
if (flags & RV8803_FLAG_V2F)
- dev_warn(&client->dev, "Voltage low, data loss detected.\n");
-
- flags &= RV8803_FLAG_V1F | RV8803_FLAG_V2F;
+ vl |= RTC_VL_DATA_INVALID;
- if (copy_to_user((void __user *)arg, &flags, sizeof(int)))
- return -EFAULT;
-
- return 0;
+ return put_user(vl, (unsigned int __user *)arg);
case RTC_VL_CLR:
mutex_lock(&rv8803->flags_lock);
@@ -440,7 +438,7 @@ static int rv8803_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
return flags;
}
- flags &= ~(RV8803_FLAG_V1F | RV8803_FLAG_V2F);
+ flags &= ~RV8803_FLAG_V1F;
ret = rv8803_write_reg(client, RV8803_FLAG, flags);
mutex_unlock(&rv8803->flags_lock);
if (ret)
diff --git a/drivers/rtc/rtc-rx8010.c b/drivers/rtc/rtc-rx8010.c
index 8102469e27c0..fe010151ec8f 100644
--- a/drivers/rtc/rtc-rx8010.c
+++ b/drivers/rtc/rtc-rx8010.c
@@ -389,9 +389,8 @@ static int rx8010_alarm_irq_enable(struct device *dev,
static int rx8010_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
{
- struct i2c_client *client = to_i2c_client(dev);
struct rx8010_data *rx8010 = dev_get_drvdata(dev);
- int ret, tmp;
+ int tmp;
int flagreg;
switch (cmd) {
@@ -400,24 +399,8 @@ static int rx8010_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
if (flagreg < 0)
return flagreg;
- tmp = !!(flagreg & RX8010_FLAG_VLF);
- if (copy_to_user((void __user *)arg, &tmp, sizeof(int)))
- return -EFAULT;
-
- return 0;
-
- case RTC_VL_CLR:
- flagreg = i2c_smbus_read_byte_data(rx8010->client, RX8010_FLAG);
- if (flagreg < 0) {
- return flagreg;
- }
-
- flagreg &= ~RX8010_FLAG_VLF;
- ret = i2c_smbus_write_byte_data(client, RX8010_FLAG, flagreg);
- if (ret < 0)
- return ret;
-
- return 0;
+ tmp = flagreg & RX8010_FLAG_VLF ? RTC_VL_DATA_INVALID : 0;
+ return put_user(tmp, (unsigned int __user *)arg);
default:
return -ENOIOCTLCMD;
@@ -482,7 +465,7 @@ static int rx8010_probe(struct i2c_client *client,
rx8010->rtc->max_user_freq = 1;
- return err;
+ return 0;
}
static struct i2c_driver rx8010_driver = {
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
index b9bda10589e0..a24f85893f90 100644
--- a/drivers/rtc/rtc-rx8025.c
+++ b/drivers/rtc/rtc-rx8025.c
@@ -67,7 +67,6 @@ static const struct i2c_device_id rx8025_id[] = {
MODULE_DEVICE_TABLE(i2c, rx8025_id);
struct rx8025_data {
- struct i2c_client *client;
struct rtc_device *rtc;
u8 ctrl1;
};
@@ -103,10 +102,10 @@ static s32 rx8025_write_regs(const struct i2c_client *client,
static int rx8025_check_validity(struct device *dev)
{
- struct rx8025_data *rx8025 = dev_get_drvdata(dev);
+ struct i2c_client *client = to_i2c_client(dev);
int ctrl2;
- ctrl2 = rx8025_read_reg(rx8025->client, RX8025_REG_CTRL2);
+ ctrl2 = rx8025_read_reg(client, RX8025_REG_CTRL2);
if (ctrl2 < 0)
return ctrl2;
@@ -178,6 +177,7 @@ out:
static int rx8025_get_time(struct device *dev, struct rtc_time *dt)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct rx8025_data *rx8025 = dev_get_drvdata(dev);
u8 date[7];
int err;
@@ -186,7 +186,7 @@ static int rx8025_get_time(struct device *dev, struct rtc_time *dt)
if (err)
return err;
- err = rx8025_read_regs(rx8025->client, RX8025_REG_SEC, 7, date);
+ err = rx8025_read_regs(client, RX8025_REG_SEC, 7, date);
if (err)
return err;
@@ -211,6 +211,7 @@ static int rx8025_get_time(struct device *dev, struct rtc_time *dt)
static int rx8025_set_time(struct device *dev, struct rtc_time *dt)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct rx8025_data *rx8025 = dev_get_drvdata(dev);
u8 date[7];
int ret;
@@ -237,11 +238,11 @@ static int rx8025_set_time(struct device *dev, struct rtc_time *dt)
dev_dbg(dev, "%s: write %7ph\n", __func__, date);
- ret = rx8025_write_regs(rx8025->client, RX8025_REG_SEC, 7, date);
+ ret = rx8025_write_regs(client, RX8025_REG_SEC, 7, date);
if (ret < 0)
return ret;
- return rx8025_reset_validity(rx8025->client);
+ return rx8025_reset_validity(client);
}
static int rx8025_init_client(struct i2c_client *client)
@@ -251,7 +252,7 @@ static int rx8025_init_client(struct i2c_client *client)
int need_clear = 0;
int err;
- err = rx8025_read_regs(rx8025->client, RX8025_REG_CTRL1, 2, ctrl);
+ err = rx8025_read_regs(client, RX8025_REG_CTRL1, 2, ctrl);
if (err)
goto out;
@@ -280,8 +281,8 @@ out:
/* Alarm support */
static int rx8025_read_alarm(struct device *dev, struct rtc_wkalrm *t)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct rx8025_data *rx8025 = dev_get_drvdata(dev);
- struct i2c_client *client = rx8025->client;
u8 ald[2];
int ctrl2, err;
@@ -347,18 +348,18 @@ static int rx8025_set_alarm(struct device *dev, struct rtc_wkalrm *t)
if (rx8025->ctrl1 & RX8025_BIT_CTRL1_DALE) {
rx8025->ctrl1 &= ~RX8025_BIT_CTRL1_DALE;
- err = rx8025_write_reg(rx8025->client, RX8025_REG_CTRL1,
+ err = rx8025_write_reg(client, RX8025_REG_CTRL1,
rx8025->ctrl1);
if (err)
return err;
}
- err = rx8025_write_regs(rx8025->client, RX8025_REG_ALDMIN, 2, ald);
+ err = rx8025_write_regs(client, RX8025_REG_ALDMIN, 2, ald);
if (err)
return err;
if (t->enabled) {
rx8025->ctrl1 |= RX8025_BIT_CTRL1_DALE;
- err = rx8025_write_reg(rx8025->client, RX8025_REG_CTRL1,
+ err = rx8025_write_reg(client, RX8025_REG_CTRL1,
rx8025->ctrl1);
if (err)
return err;
@@ -369,6 +370,7 @@ static int rx8025_set_alarm(struct device *dev, struct rtc_wkalrm *t)
static int rx8025_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct rx8025_data *rx8025 = dev_get_drvdata(dev);
u8 ctrl1;
int err;
@@ -381,7 +383,7 @@ static int rx8025_alarm_irq_enable(struct device *dev, unsigned int enabled)
if (ctrl1 != rx8025->ctrl1) {
rx8025->ctrl1 = ctrl1;
- err = rx8025_write_reg(rx8025->client, RX8025_REG_CTRL1,
+ err = rx8025_write_reg(client, RX8025_REG_CTRL1,
rx8025->ctrl1);
if (err)
return err;
@@ -516,7 +518,6 @@ static int rx8025_probe(struct i2c_client *client,
if (!rx8025)
return -ENOMEM;
- rx8025->client = client;
i2c_set_clientdata(client, rx8025);
err = rx8025_init_client(client);
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index 579b3ff5c644..feb1f8e52c00 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -504,7 +504,7 @@ static int __init sh_rtc_probe(struct platform_device *pdev)
if (unlikely(!rtc->res))
return -EBUSY;
- rtc->regbase = devm_ioremap_nocache(&pdev->dev, rtc->res->start,
+ rtc->regbase = devm_ioremap(&pdev->dev, rtc->res->start,
rtc->regsize);
if (unlikely(!rtc->regbase))
return -EINVAL;
diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c
index 781cabb2afca..d774aa18f57a 100644
--- a/drivers/rtc/rtc-stm32.c
+++ b/drivers/rtc/rtc-stm32.c
@@ -897,8 +897,11 @@ static int stm32_rtc_resume(struct device *dev)
}
ret = stm32_rtc_wait_sync(rtc);
- if (ret < 0)
+ if (ret < 0) {
+ if (rtc->data->has_pclk)
+ clk_disable_unprepare(rtc->pclk);
return ret;
+ }
if (device_may_wakeup(dev))
return disable_irq_wake(rtc->irq_alarm);
diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
index 8dcd20b34dde..852f5f3b3592 100644
--- a/drivers/rtc/rtc-sun6i.c
+++ b/drivers/rtc/rtc-sun6i.c
@@ -379,6 +379,22 @@ static void __init sun50i_h6_rtc_clk_init(struct device_node *node)
CLK_OF_DECLARE_DRIVER(sun50i_h6_rtc_clk, "allwinner,sun50i-h6-rtc",
sun50i_h6_rtc_clk_init);
+/*
+ * The R40 user manual is self-conflicting on whether the prescaler is
+ * fixed or configurable. The clock diagram shows it as fixed, but there
+ * is also a configurable divider in the RTC block.
+ */
+static const struct sun6i_rtc_clk_data sun8i_r40_rtc_data = {
+ .rc_osc_rate = 16000000,
+ .fixed_prescaler = 512,
+};
+static void __init sun8i_r40_rtc_clk_init(struct device_node *node)
+{
+ sun6i_rtc_clk_init(node, &sun8i_r40_rtc_data);
+}
+CLK_OF_DECLARE_DRIVER(sun8i_r40_rtc_clk, "allwinner,sun8i-r40-rtc",
+ sun8i_r40_rtc_clk_init);
+
static const struct sun6i_rtc_clk_data sun8i_v3_rtc_data = {
.rc_osc_rate = 32000,
.has_out_clk = 1,
diff --git a/drivers/rtc/rtc-tps6586x.c b/drivers/rtc/rtc-tps6586x.c
index 859d901fa6cb..e39af2d67051 100644
--- a/drivers/rtc/rtc-tps6586x.c
+++ b/drivers/rtc/rtc-tps6586x.c
@@ -23,6 +23,7 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/mfd/tps6586x.h>
#include <linux/module.h>
@@ -267,6 +268,8 @@ static int tps6586x_rtc_probe(struct platform_device *pdev)
rtc->rtc->start_secs = mktime64(2009, 1, 1, 0, 0, 0);
rtc->rtc->set_start_time = true;
+ irq_set_status_flags(rtc->irq, IRQ_NOAUTOEN);
+
ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
tps6586x_rtc_irq,
IRQF_ONESHOT,
@@ -276,7 +279,6 @@ static int tps6586x_rtc_probe(struct platform_device *pdev)
rtc->irq, ret);
goto fail_rtc_register;
}
- disable_irq(rtc->irq);
ret = rtc_register_device(rtc->rtc);
if (ret)
diff --git a/drivers/rtc/rtc-zynqmp.c b/drivers/rtc/rtc-zynqmp.c
index 539690568298..5786866c09e9 100644
--- a/drivers/rtc/rtc-zynqmp.c
+++ b/drivers/rtc/rtc-zynqmp.c
@@ -94,7 +94,7 @@ static int xlnx_rtc_read_time(struct device *dev, struct rtc_time *tm)
* RTC has updated the CURRENT_TIME with the time written into
* SET_TIME_WRITE register.
*/
- rtc_time64_to_tm(readl(xrtcdev->reg_base + RTC_CUR_TM), tm);
+ read_time = readl(xrtcdev->reg_base + RTC_CUR_TM);
} else {
/*
* Time written in SET_TIME_WRITE has not yet updated into
@@ -104,8 +104,8 @@ static int xlnx_rtc_read_time(struct device *dev, struct rtc_time *tm)
* reading.
*/
read_time = readl(xrtcdev->reg_base + RTC_SET_TM_RD) - 1;
- rtc_time64_to_tm(read_time, tm);
}
+ rtc_time64_to_tm(read_time, tm);
return 0;
}
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index c94184d080f8..a28b9ff82378 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1128,7 +1128,8 @@ static u32 get_fcx_max_data(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
int fcx_in_css, fcx_in_gneq, fcx_in_features;
- int tpm, mdc;
+ unsigned int mdc;
+ int tpm;
if (dasd_nofcx)
return 0;
@@ -1142,7 +1143,7 @@ static u32 get_fcx_max_data(struct dasd_device *device)
return 0;
mdc = ccw_device_get_mdc(device->cdev, 0);
- if (mdc < 0) {
+ if (mdc == 0) {
dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n");
return 0;
} else {
@@ -1153,12 +1154,12 @@ static u32 get_fcx_max_data(struct dasd_device *device)
static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
{
struct dasd_eckd_private *private = device->private;
- int mdc;
+ unsigned int mdc;
u32 fcx_max_data;
if (private->fcx_max_data) {
mdc = ccw_device_get_mdc(device->cdev, lpm);
- if ((mdc < 0)) {
+ if (mdc == 0) {
dev_warn(&device->cdev->dev,
"Detecting the maximum data size for zHPF "
"requests failed (rc=%d) for a new path %x\n",
@@ -2073,7 +2074,7 @@ out_err2:
dasd_free_block(device->block);
device->block = NULL;
out_err1:
- kfree(private->conf_data);
+ dasd_eckd_clear_conf_data(device);
kfree(device->private);
device->private = NULL;
return rc;
@@ -2082,7 +2083,6 @@ out_err1:
static void dasd_eckd_uncheck_device(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
- int i;
if (!private)
return;
@@ -2092,21 +2092,7 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device)
private->sneq = NULL;
private->vdsneq = NULL;
private->gneq = NULL;
- private->conf_len = 0;
- for (i = 0; i < 8; i++) {
- kfree(device->path[i].conf_data);
- if ((__u8 *)device->path[i].conf_data ==
- private->conf_data) {
- private->conf_data = NULL;
- private->conf_len = 0;
- }
- device->path[i].conf_data = NULL;
- device->path[i].cssid = 0;
- device->path[i].ssid = 0;
- device->path[i].chpid = 0;
- }
- kfree(private->conf_data);
- private->conf_data = NULL;
+ dasd_eckd_clear_conf_data(device);
}
static struct dasd_ccw_req *
diff --git a/drivers/s390/block/dasd_fba.h b/drivers/s390/block/dasd_fba.h
index 8f75df06e893..45ddabec4017 100644
--- a/drivers/s390/block/dasd_fba.h
+++ b/drivers/s390/block/dasd_fba.h
@@ -2,7 +2,7 @@
/*
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
- * Coypright IBM Corp. 1999, 2000
+ * Copyright IBM Corp. 1999, 2000
*
*/
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 1770b99f607e..62a859ea67f8 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -5,7 +5,7 @@
* Carsten Otte <Cotte@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
- * Coypright IBM Corp. 1999, 2002
+ * Copyright IBM Corp. 1999, 2002
*
* /proc interface for the dasd driver.
*
@@ -320,13 +320,12 @@ out_error:
#endif /* CONFIG_DASD_PROFILE */
}
-static const struct file_operations dasd_stats_proc_fops = {
- .owner = THIS_MODULE,
- .open = dasd_stats_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = dasd_stats_proc_write,
+static const struct proc_ops dasd_stats_proc_ops = {
+ .proc_open = dasd_stats_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = dasd_stats_proc_write,
};
/*
@@ -347,7 +346,7 @@ dasd_proc_init(void)
dasd_statistics_entry = proc_create("statistics",
S_IFREG | S_IRUGO | S_IWUSR,
dasd_proc_root_entry,
- &dasd_stats_proc_fops);
+ &dasd_stats_proc_ops);
if (!dasd_statistics_entry)
goto out_nostatistics;
return 0;
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 2a3f874a21d5..da642e811f7f 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -398,12 +398,12 @@ cio_ignore_proc_open(struct inode *inode, struct file *file)
sizeof(struct ccwdev_iter));
}
-static const struct file_operations cio_ignore_proc_fops = {
- .open = cio_ignore_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release_private,
- .write = cio_ignore_write,
+static const struct proc_ops cio_ignore_proc_ops = {
+ .proc_open = cio_ignore_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = seq_release_private,
+ .proc_write = cio_ignore_write,
};
static int
@@ -412,7 +412,7 @@ cio_ignore_proc_init (void)
struct proc_dir_entry *entry;
entry = proc_create("cio_ignore", S_IFREG | S_IRUGO | S_IWUSR, NULL,
- &cio_ignore_proc_fops);
+ &cio_ignore_proc_ops);
if (!entry)
return -ENOENT;
return 0;
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 831850435c23..94edbb33d0d1 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -1372,18 +1372,17 @@ static ssize_t cio_settle_write(struct file *file, const char __user *buf,
return ret ? ret : count;
}
-static const struct file_operations cio_settle_proc_fops = {
- .open = nonseekable_open,
- .write = cio_settle_write,
- .llseek = no_llseek,
+static const struct proc_ops cio_settle_proc_ops = {
+ .proc_open = nonseekable_open,
+ .proc_write = cio_settle_write,
+ .proc_lseek = no_llseek,
};
static int __init cio_settle_init(void)
{
struct proc_dir_entry *entry;
- entry = proc_create("cio_settle", S_IWUSR, NULL,
- &cio_settle_proc_fops);
+ entry = proc_create("cio_settle", S_IWUSR, NULL, &cio_settle_proc_ops);
if (!entry)
return -ENOMEM;
return 0;
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 65841af15748..ccecf6b9504e 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -635,7 +635,7 @@ EXPORT_SYMBOL(ccw_device_tm_start_timeout);
* @mask: mask of paths to use
*
* Return the number of 64K-bytes blocks all paths at least support
- * for a transport command. Return values <= 0 indicate failures.
+ * for a transport command. Return value 0 indicates failure.
*/
int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
{
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 4b0798472643..ff74eb5fce50 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -182,11 +182,9 @@ enum qdio_queue_irq_states {
};
struct qdio_input_q {
- /* input buffer acknowledgement flag */
- int polling;
/* first ACK'ed buffer */
int ack_start;
- /* how much sbals are acknowledged with qebsm */
+ /* how many SBALs are acknowledged */
int ack_count;
/* last time of noticing incoming data */
u64 timestamp;
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 35410e6eda2e..9c0370b27426 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -124,9 +124,8 @@ static int qstat_show(struct seq_file *m, void *v)
seq_printf(m, "nr_used: %d ftc: %d\n",
atomic_read(&q->nr_buf_used), q->first_to_check);
if (q->is_input_q) {
- seq_printf(m, "polling: %d ack start: %d ack count: %d\n",
- q->u.in.polling, q->u.in.ack_start,
- q->u.in.ack_count);
+ seq_printf(m, "ack start: %d ack count: %d\n",
+ q->u.in.ack_start, q->u.in.ack_count);
seq_printf(m, "DSCI: %x IRQs disabled: %u\n",
*(u8 *)q->irq_ptr->dsci,
test_bit(QDIO_QUEUE_IRQS_DISABLED,
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index f8b897b7e78b..3475317c42e5 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -393,19 +393,15 @@ int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
static inline void qdio_stop_polling(struct qdio_q *q)
{
- if (!q->u.in.polling)
+ if (!q->u.in.ack_count)
return;
- q->u.in.polling = 0;
qperf_inc(q, stop_polling);
/* show the card that we are not polling anymore */
- if (is_qebsm(q)) {
- set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
- q->u.in.ack_count);
- q->u.in.ack_count = 0;
- } else
- set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
+ set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
+ q->u.in.ack_count);
+ q->u.in.ack_count = 0;
}
static inline void account_sbals(struct qdio_q *q, unsigned int count)
@@ -451,8 +447,7 @@ static inline void inbound_primed(struct qdio_q *q, unsigned int start,
/* for QEBSM the ACK was already set by EQBS */
if (is_qebsm(q)) {
- if (!q->u.in.polling) {
- q->u.in.polling = 1;
+ if (!q->u.in.ack_count) {
q->u.in.ack_count = count;
q->u.in.ack_start = start;
return;
@@ -471,12 +466,12 @@ static inline void inbound_primed(struct qdio_q *q, unsigned int start,
* or by the next inbound run.
*/
new = add_buf(start, count - 1);
- if (q->u.in.polling) {
+ if (q->u.in.ack_count) {
/* reset the previous ACK but first set the new one */
set_buf_state(q, new, SLSB_P_INPUT_ACK);
set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
} else {
- q->u.in.polling = 1;
+ q->u.in.ack_count = 1;
set_buf_state(q, new, SLSB_P_INPUT_ACK);
}
@@ -1479,13 +1474,12 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
qperf_inc(q, inbound_call);
- if (!q->u.in.polling)
+ if (!q->u.in.ack_count)
goto set;
/* protect against stop polling setting an ACK for an emptied slsb */
if (count == QDIO_MAX_BUFFERS_PER_Q) {
/* overwriting everything, just delete polling status */
- q->u.in.polling = 0;
q->u.in.ack_count = 0;
goto set;
} else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
@@ -1495,15 +1489,14 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
diff = sub_buf(diff, q->u.in.ack_start);
q->u.in.ack_count -= diff;
if (q->u.in.ack_count <= 0) {
- q->u.in.polling = 0;
q->u.in.ack_count = 0;
goto set;
}
q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
+ } else {
+ /* the only ACK will be deleted */
+ q->u.in.ack_count = 0;
}
- else
- /* the only ACK will be deleted, so stop polling */
- q->u.in.polling = 0;
}
set:
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index dc430bd86ade..3ab8e80d7bbc 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -536,7 +536,7 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
int qdio_enable_async_operation(struct qdio_output_q *outq)
{
outq->aobs = kcalloc(QDIO_MAX_BUFFERS_PER_Q, sizeof(struct qaob *),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!outq->aobs) {
outq->use_cq = 0;
return -ENOMEM;
diff --git a/drivers/s390/cio/vfio_ccw_trace.h b/drivers/s390/cio/vfio_ccw_trace.h
index 30162a318a8a..f5d31887d413 100644
--- a/drivers/s390/cio/vfio_ccw_trace.h
+++ b/drivers/s390/cio/vfio_ccw_trace.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Tracepoints for vfio_ccw driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Tracepoints for vfio_ccw driver
*
* Copyright IBM Corp. 2018
*
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index 52aa95c8af4b..22d2db690cd3 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -7,7 +7,8 @@ ap-objs := ap_bus.o ap_card.o ap_queue.o
obj-$(subst m,y,$(CONFIG_ZCRYPT)) += ap.o
# zcrypt_api.o and zcrypt_msgtype*.o depend on ap.o
zcrypt-objs := zcrypt_api.o zcrypt_card.o zcrypt_queue.o
-zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o zcrypt_ccamisc.o
+zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o
+zcrypt-objs += zcrypt_ccamisc.o zcrypt_ep11misc.o
obj-$(CONFIG_ZCRYPT) += zcrypt.o
# adapter drivers depend on ap.o and zcrypt.o
obj-$(CONFIG_ZCRYPT) += zcrypt_cex2c.o zcrypt_cex2a.o zcrypt_cex4.o
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index a1915061932e..5256e3ce84e5 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -793,8 +793,6 @@ static int ap_device_probe(struct device *dev)
drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
if (!!devres != !!drvres)
return -ENODEV;
- /* (re-)init queue's state machine */
- ap_queue_reinit_state(to_ap_queue(dev));
}
/* Add queue/card to list of active queues/cards */
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 433b7b64368d..4348fdff1c61 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -162,7 +162,7 @@ struct ap_card {
unsigned int functions; /* AP device function bitfield. */
int queue_depth; /* AP queue depth.*/
int id; /* AP card number. */
- atomic_t total_request_count; /* # requests ever for this AP device.*/
+ atomic64_t total_request_count; /* # requests ever for this AP device.*/
};
#define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device)
@@ -179,7 +179,7 @@ struct ap_queue {
enum ap_state state; /* State of the AP device. */
int pendingq_count; /* # requests on pendingq list. */
int requestq_count; /* # requests on requestq list. */
- int total_request_count; /* # requests ever for this AP device.*/
+ u64 total_request_count; /* # requests ever for this AP device.*/
int request_timeout; /* Request timeout in jiffies. */
struct timer_list timeout; /* Timer for request timeouts. */
struct list_head pendingq; /* List of message sent to AP queue. */
@@ -261,7 +261,7 @@ void ap_queue_prepare_remove(struct ap_queue *aq);
void ap_queue_remove(struct ap_queue *aq);
void ap_queue_suspend(struct ap_device *ap_dev);
void ap_queue_resume(struct ap_device *ap_dev);
-void ap_queue_reinit_state(struct ap_queue *aq);
+void ap_queue_init_state(struct ap_queue *aq);
struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type,
int comp_device_type, unsigned int functions);
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
index 63b4cc6cd7e5..e85bfca1ed16 100644
--- a/drivers/s390/crypto/ap_card.c
+++ b/drivers/s390/crypto/ap_card.c
@@ -63,13 +63,13 @@ static ssize_t request_count_show(struct device *dev,
char *buf)
{
struct ap_card *ac = to_ap_card(dev);
- unsigned int req_cnt;
+ u64 req_cnt;
req_cnt = 0;
spin_lock_bh(&ap_list_lock);
- req_cnt = atomic_read(&ac->total_request_count);
+ req_cnt = atomic64_read(&ac->total_request_count);
spin_unlock_bh(&ap_list_lock);
- return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
+ return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
}
static ssize_t request_count_store(struct device *dev,
@@ -83,7 +83,7 @@ static ssize_t request_count_store(struct device *dev,
for_each_ap_queue(aq, ac)
aq->total_request_count = 0;
spin_unlock_bh(&ap_list_lock);
- atomic_set(&ac->total_request_count, 0);
+ atomic64_set(&ac->total_request_count, 0);
return count;
}
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index dad2be333d82..a317ab484932 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -479,12 +479,12 @@ static ssize_t request_count_show(struct device *dev,
char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
- unsigned int req_cnt;
+ u64 req_cnt;
spin_lock_bh(&aq->lock);
req_cnt = aq->total_request_count;
spin_unlock_bh(&aq->lock);
- return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
+ return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
}
static ssize_t request_count_store(struct device *dev,
@@ -638,7 +638,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
aq->ap_dev.device.type = &ap_queue_type;
aq->ap_dev.device_type = device_type;
aq->qid = qid;
- aq->state = AP_STATE_RESET_START;
+ aq->state = AP_STATE_UNBOUND;
aq->interrupt = AP_INTR_DISABLED;
spin_lock_init(&aq->lock);
INIT_LIST_HEAD(&aq->list);
@@ -676,7 +676,7 @@ void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
list_add_tail(&ap_msg->list, &aq->requestq);
aq->requestq_count++;
aq->total_request_count++;
- atomic_inc(&aq->card->total_request_count);
+ atomic64_inc(&aq->card->total_request_count);
/* Send/receive as many request from the queue as possible. */
ap_wait(ap_sm_event_loop(aq, AP_EVENT_POLL));
spin_unlock_bh(&aq->lock);
@@ -771,10 +771,11 @@ void ap_queue_remove(struct ap_queue *aq)
spin_unlock_bh(&aq->lock);
}
-void ap_queue_reinit_state(struct ap_queue *aq)
+void ap_queue_init_state(struct ap_queue *aq)
{
spin_lock_bh(&aq->lock);
aq->state = AP_STATE_RESET_START;
ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
spin_unlock_bh(&aq->lock);
}
+EXPORT_SYMBOL(ap_queue_init_state);
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index d78d77686d7b..2f33c5fcf676 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -25,6 +25,7 @@
#include "zcrypt_api.h"
#include "zcrypt_ccamisc.h"
+#include "zcrypt_ep11misc.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("IBM Corporation");
@@ -71,6 +72,17 @@ struct protaeskeytoken {
u8 protkey[MAXPROTKEYSIZE]; /* the protected key blob */
} __packed;
+/* inside view of a clear key token (type 0x00 version 0x02) */
+struct clearaeskeytoken {
+ u8 type; /* 0x00 for PAES specific key tokens */
+ u8 res0[3];
+ u8 version; /* 0x02 for clear AES key token */
+ u8 res1[3];
+ u32 keytype; /* key type, one of the PKEY_KEYTYPE values */
+ u32 len; /* bytes actually stored in clearkey[] */
+ u8 clearkey[0]; /* clear key value */
+} __packed;
+
/*
* Create a protected key from a clear key value.
*/
@@ -173,6 +185,72 @@ static int pkey_skey2pkey(const u8 *key, struct pkey_protkey *pkey)
}
/*
+ * Construct EP11 key with given clear key value.
+ */
+static int pkey_clr2ep11key(const u8 *clrkey, size_t clrkeylen,
+ u8 *keybuf, size_t *keybuflen)
+{
+ int i, rc;
+ u16 card, dom;
+ u32 nr_apqns, *apqns = NULL;
+
+ /* build a list of apqns suitable for ep11 keys with cpacf support */
+ rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX7, EP11_API_V, NULL);
+ if (rc)
+ goto out;
+
+ /* go through the list of apqns and try to bild an ep11 key */
+ for (rc = -ENODEV, i = 0; i < nr_apqns; i++) {
+ card = apqns[i] >> 16;
+ dom = apqns[i] & 0xFFFF;
+ rc = ep11_clr2keyblob(card, dom, clrkeylen * 8,
+ 0, clrkey, keybuf, keybuflen);
+ if (rc == 0)
+ break;
+ }
+
+out:
+ kfree(apqns);
+ if (rc)
+ DEBUG_DBG("%s failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+/*
+ * Find card and transform EP11 secure key into protected key.
+ */
+static int pkey_ep11key2pkey(const u8 *key, struct pkey_protkey *pkey)
+{
+ int i, rc;
+ u16 card, dom;
+ u32 nr_apqns, *apqns = NULL;
+ struct ep11keyblob *kb = (struct ep11keyblob *) key;
+
+ /* build a list of apqns suitable for this key */
+ rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX7, EP11_API_V, kb->wkvp);
+ if (rc)
+ goto out;
+
+ /* go through the list of apqns and try to derive an pkey */
+ for (rc = -ENODEV, i = 0; i < nr_apqns; i++) {
+ card = apqns[i] >> 16;
+ dom = apqns[i] & 0xFFFF;
+ rc = ep11_key2protkey(card, dom, key, kb->head.len,
+ pkey->protkey, &pkey->len, &pkey->type);
+ if (rc == 0)
+ break;
+ }
+
+out:
+ kfree(apqns);
+ if (rc)
+ DEBUG_DBG("%s failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+/*
* Verify key and give back some info about the key.
*/
static int pkey_verifykey(const struct pkey_seckey *seckey,
@@ -305,26 +383,90 @@ static int pkey_verifyprotkey(const struct pkey_protkey *protkey)
static int pkey_nonccatok2pkey(const u8 *key, u32 keylen,
struct pkey_protkey *protkey)
{
+ int rc = -EINVAL;
+ u8 *tmpbuf = NULL;
struct keytoken_header *hdr = (struct keytoken_header *)key;
- struct protaeskeytoken *t;
switch (hdr->version) {
- case TOKVER_PROTECTED_KEY:
- if (keylen != sizeof(struct protaeskeytoken))
- return -EINVAL;
+ case TOKVER_PROTECTED_KEY: {
+ struct protaeskeytoken *t;
+ if (keylen != sizeof(struct protaeskeytoken))
+ goto out;
t = (struct protaeskeytoken *)key;
protkey->len = t->len;
protkey->type = t->keytype;
memcpy(protkey->protkey, t->protkey,
sizeof(protkey->protkey));
-
- return pkey_verifyprotkey(protkey);
+ rc = pkey_verifyprotkey(protkey);
+ break;
+ }
+ case TOKVER_CLEAR_KEY: {
+ struct clearaeskeytoken *t;
+ struct pkey_clrkey ckey;
+ union u_tmpbuf {
+ u8 skey[SECKEYBLOBSIZE];
+ u8 ep11key[MAXEP11AESKEYBLOBSIZE];
+ };
+ size_t tmpbuflen = sizeof(union u_tmpbuf);
+
+ if (keylen < sizeof(struct clearaeskeytoken))
+ goto out;
+ t = (struct clearaeskeytoken *)key;
+ if (keylen != sizeof(*t) + t->len)
+ goto out;
+ if ((t->keytype == PKEY_KEYTYPE_AES_128 && t->len == 16)
+ || (t->keytype == PKEY_KEYTYPE_AES_192 && t->len == 24)
+ || (t->keytype == PKEY_KEYTYPE_AES_256 && t->len == 32))
+ memcpy(ckey.clrkey, t->clearkey, t->len);
+ else
+ goto out;
+ /* alloc temp key buffer space */
+ tmpbuf = kmalloc(tmpbuflen, GFP_ATOMIC);
+ if (!tmpbuf) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ /* try direct way with the PCKMO instruction */
+ rc = pkey_clr2protkey(t->keytype, &ckey, protkey);
+ if (rc == 0)
+ break;
+ /* PCKMO failed, so try the CCA secure key way */
+ rc = cca_clr2seckey(0xFFFF, 0xFFFF, t->keytype,
+ ckey.clrkey, tmpbuf);
+ if (rc == 0)
+ rc = pkey_skey2pkey(tmpbuf, protkey);
+ if (rc == 0)
+ break;
+ /* if the CCA way also failed, let's try via EP11 */
+ rc = pkey_clr2ep11key(ckey.clrkey, t->len,
+ tmpbuf, &tmpbuflen);
+ if (rc == 0)
+ rc = pkey_ep11key2pkey(tmpbuf, protkey);
+ /* now we should really have an protected key */
+ DEBUG_ERR("%s unable to build protected key from clear",
+ __func__);
+ break;
+ }
+ case TOKVER_EP11_AES: {
+ if (keylen < MINEP11AESKEYBLOBSIZE)
+ goto out;
+ /* check ep11 key for exportable as protected key */
+ rc = ep11_check_aeskeyblob(debug_info, 3, key, 0, 1);
+ if (rc)
+ goto out;
+ rc = pkey_ep11key2pkey(key, protkey);
+ break;
+ }
default:
DEBUG_ERR("%s unknown/unsupported non-CCA token version %d\n",
__func__, hdr->version);
- return -EINVAL;
+ rc = -EINVAL;
}
+
+out:
+ kfree(tmpbuf);
+ return rc;
}
/*
@@ -403,6 +545,10 @@ static int pkey_genseckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
if (*keybufsize < SECKEYBLOBSIZE)
return -EINVAL;
break;
+ case PKEY_TYPE_EP11:
+ if (*keybufsize < MINEP11AESKEYBLOBSIZE)
+ return -EINVAL;
+ break;
default:
return -EINVAL;
}
@@ -419,7 +565,10 @@ static int pkey_genseckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
card = apqns[i].card;
dom = apqns[i].domain;
- if (ktype == PKEY_TYPE_CCA_DATA) {
+ if (ktype == PKEY_TYPE_EP11) {
+ rc = ep11_genaeskey(card, dom, ksize, kflags,
+ keybuf, keybufsize);
+ } else if (ktype == PKEY_TYPE_CCA_DATA) {
rc = cca_genseckey(card, dom, ksize, keybuf);
*keybufsize = (rc ? 0 : SECKEYBLOBSIZE);
} else /* TOKVER_CCA_VLSC */
@@ -450,6 +599,10 @@ static int pkey_clr2seckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
if (*keybufsize < SECKEYBLOBSIZE)
return -EINVAL;
break;
+ case PKEY_TYPE_EP11:
+ if (*keybufsize < MINEP11AESKEYBLOBSIZE)
+ return -EINVAL;
+ break;
default:
return -EINVAL;
}
@@ -466,7 +619,10 @@ static int pkey_clr2seckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
card = apqns[i].card;
dom = apqns[i].domain;
- if (ktype == PKEY_TYPE_CCA_DATA) {
+ if (ktype == PKEY_TYPE_EP11) {
+ rc = ep11_clr2keyblob(card, dom, ksize, kflags,
+ clrkey, keybuf, keybufsize);
+ } else if (ktype == PKEY_TYPE_CCA_DATA) {
rc = cca_clr2seckey(card, dom, ksize,
clrkey, keybuf);
*keybufsize = (rc ? 0 : SECKEYBLOBSIZE);
@@ -489,11 +645,11 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
u32 _nr_apqns, *_apqns = NULL;
struct keytoken_header *hdr = (struct keytoken_header *)key;
- if (keylen < sizeof(struct keytoken_header) ||
- hdr->type != TOKTYPE_CCA_INTERNAL)
+ if (keylen < sizeof(struct keytoken_header))
return -EINVAL;
- if (hdr->version == TOKVER_CCA_AES) {
+ if (hdr->type == TOKTYPE_CCA_INTERNAL
+ && hdr->version == TOKVER_CCA_AES) {
struct secaeskeytoken *t = (struct secaeskeytoken *)key;
rc = cca_check_secaeskeytoken(debug_info, 3, key, 0);
@@ -521,7 +677,8 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
*cardnr = ((struct pkey_apqn *)_apqns)->card;
*domain = ((struct pkey_apqn *)_apqns)->domain;
- } else if (hdr->version == TOKVER_CCA_VLSC) {
+ } else if (hdr->type == TOKTYPE_CCA_INTERNAL
+ && hdr->version == TOKVER_CCA_VLSC) {
struct cipherkeytoken *t = (struct cipherkeytoken *)key;
rc = cca_check_secaescipherkey(debug_info, 3, key, 0, 1);
@@ -556,6 +713,29 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
*cardnr = ((struct pkey_apqn *)_apqns)->card;
*domain = ((struct pkey_apqn *)_apqns)->domain;
+ } else if (hdr->type == TOKTYPE_NON_CCA
+ && hdr->version == TOKVER_EP11_AES) {
+ struct ep11keyblob *kb = (struct ep11keyblob *)key;
+
+ rc = ep11_check_aeskeyblob(debug_info, 3, key, 0, 1);
+ if (rc)
+ goto out;
+ if (ktype)
+ *ktype = PKEY_TYPE_EP11;
+ if (ksize)
+ *ksize = kb->head.keybitlen;
+
+ rc = ep11_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
+ ZCRYPT_CEX7, EP11_API_V, kb->wkvp);
+ if (rc)
+ goto out;
+
+ if (flags)
+ *flags = PKEY_FLAGS_MATCH_CUR_MKVP;
+
+ *cardnr = ((struct pkey_apqn *)_apqns)->card;
+ *domain = ((struct pkey_apqn *)_apqns)->domain;
+
} else
rc = -EINVAL;
@@ -578,30 +758,32 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
if (keylen < sizeof(struct keytoken_header))
return -EINVAL;
- switch (hdr->type) {
- case TOKTYPE_NON_CCA:
- return pkey_nonccatok2pkey(key, keylen, pkey);
- case TOKTYPE_CCA_INTERNAL:
- switch (hdr->version) {
- case TOKVER_CCA_AES:
+ if (hdr->type == TOKTYPE_CCA_INTERNAL) {
+ if (hdr->version == TOKVER_CCA_AES) {
if (keylen != sizeof(struct secaeskeytoken))
return -EINVAL;
if (cca_check_secaeskeytoken(debug_info, 3, key, 0))
return -EINVAL;
- break;
- case TOKVER_CCA_VLSC:
+ } else if (hdr->version == TOKVER_CCA_VLSC) {
if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
return -EINVAL;
if (cca_check_secaescipherkey(debug_info, 3, key, 0, 1))
return -EINVAL;
- break;
- default:
+ } else {
DEBUG_ERR("%s unknown CCA internal token version %d\n",
__func__, hdr->version);
return -EINVAL;
}
- break;
- default:
+ } else if (hdr->type == TOKTYPE_NON_CCA) {
+ if (hdr->version == TOKVER_EP11_AES) {
+ if (keylen < sizeof(struct ep11keyblob))
+ return -EINVAL;
+ if (ep11_check_aeskeyblob(debug_info, 3, key, 0, 1))
+ return -EINVAL;
+ } else {
+ return pkey_nonccatok2pkey(key, keylen, pkey);
+ }
+ } else {
DEBUG_ERR("%s unknown/unsupported blob type %d\n",
__func__, hdr->type);
return -EINVAL;
@@ -611,12 +793,21 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
card = apqns[i].card;
dom = apqns[i].domain;
- if (hdr->version == TOKVER_CCA_AES)
+ if (hdr->type == TOKTYPE_CCA_INTERNAL
+ && hdr->version == TOKVER_CCA_AES)
rc = cca_sec2protkey(card, dom, key, pkey->protkey,
&pkey->len, &pkey->type);
- else /* TOKVER_CCA_VLSC */
+ else if (hdr->type == TOKTYPE_CCA_INTERNAL
+ && hdr->version == TOKVER_CCA_VLSC)
rc = cca_cipher2protkey(card, dom, key, pkey->protkey,
&pkey->len, &pkey->type);
+ else { /* EP11 AES secure key blob */
+ struct ep11keyblob *kb = (struct ep11keyblob *) key;
+
+ rc = ep11_key2protkey(card, dom, key, kb->head.len,
+ pkey->protkey, &pkey->len,
+ &pkey->type);
+ }
if (rc == 0)
break;
}
@@ -631,12 +822,24 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
u32 _nr_apqns, *_apqns = NULL;
struct keytoken_header *hdr = (struct keytoken_header *)key;
- if (keylen < sizeof(struct keytoken_header) ||
- hdr->type != TOKTYPE_CCA_INTERNAL ||
- flags == 0)
+ if (keylen < sizeof(struct keytoken_header) || flags == 0)
return -EINVAL;
- if (hdr->version == TOKVER_CCA_AES || hdr->version == TOKVER_CCA_VLSC) {
+ if (hdr->type == TOKTYPE_NON_CCA && hdr->version == TOKVER_EP11_AES) {
+ int minhwtype = 0, api = 0;
+ struct ep11keyblob *kb = (struct ep11keyblob *) key;
+
+ if (flags != PKEY_FLAGS_MATCH_CUR_MKVP)
+ return -EINVAL;
+ if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) {
+ minhwtype = ZCRYPT_CEX7;
+ api = EP11_API_V;
+ }
+ rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ minhwtype, api, kb->wkvp);
+ if (rc)
+ goto out;
+ } else if (hdr->type == TOKTYPE_CCA_INTERNAL) {
int minhwtype = ZCRYPT_CEX3C;
u64 cur_mkvp = 0, old_mkvp = 0;
@@ -647,7 +850,7 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
cur_mkvp = t->mkvp;
if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
old_mkvp = t->mkvp;
- } else {
+ } else if (hdr->version == TOKVER_CCA_VLSC) {
struct cipherkeytoken *t = (struct cipherkeytoken *)key;
minhwtype = ZCRYPT_CEX6;
@@ -655,19 +858,24 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
cur_mkvp = t->mkvp0;
if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
old_mkvp = t->mkvp0;
+ } else {
+ /* unknown cca internal token type */
+ return -EINVAL;
}
rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
minhwtype, cur_mkvp, old_mkvp, 1);
if (rc)
goto out;
- if (apqns) {
- if (*nr_apqns < _nr_apqns)
- rc = -ENOSPC;
- else
- memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
- }
- *nr_apqns = _nr_apqns;
+ } else
+ return -EINVAL;
+
+ if (apqns) {
+ if (*nr_apqns < _nr_apqns)
+ rc = -ENOSPC;
+ else
+ memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
}
+ *nr_apqns = _nr_apqns;
out:
kfree(_apqns);
@@ -695,14 +903,26 @@ static int pkey_apqns4keytype(enum pkey_key_type ktype,
minhwtype, cur_mkvp, old_mkvp, 1);
if (rc)
goto out;
- if (apqns) {
- if (*nr_apqns < _nr_apqns)
- rc = -ENOSPC;
- else
- memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
- }
- *nr_apqns = _nr_apqns;
+ } else if (ktype == PKEY_TYPE_EP11) {
+ u8 *wkvp = NULL;
+
+ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+ wkvp = cur_mkvp;
+ rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX7, EP11_API_V, wkvp);
+ if (rc)
+ goto out;
+
+ } else
+ return -EINVAL;
+
+ if (apqns) {
+ if (*nr_apqns < _nr_apqns)
+ rc = -ENOSPC;
+ else
+ memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
}
+ *nr_apqns = _nr_apqns;
out:
kfree(_apqns);
@@ -774,7 +994,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
return -EFAULT;
rc = cca_sec2protkey(ksp.cardnr, ksp.domain,
ksp.seckey.seckey, ksp.protkey.protkey,
- NULL, &ksp.protkey.type);
+ &ksp.protkey.len, &ksp.protkey.type);
DEBUG_DBG("%s cca_sec2protkey()=%d\n", __func__, rc);
if (rc)
break;
@@ -1357,8 +1577,9 @@ static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits,
bool is_xts, char *buf, loff_t off,
size_t count)
{
- size_t keysize;
- int rc;
+ int i, rc, card, dom;
+ u32 nr_apqns, *apqns = NULL;
+ size_t keysize = CCACIPHERTOKENSIZE;
if (off != 0 || count < CCACIPHERTOKENSIZE)
return -EINVAL;
@@ -1366,22 +1587,31 @@ static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits,
if (count < 2 * CCACIPHERTOKENSIZE)
return -EINVAL;
- keysize = CCACIPHERTOKENSIZE;
- rc = cca_gencipherkey(-1, -1, keybits, 0, buf, &keysize);
+ /* build a list of apqns able to generate an cipher key */
+ rc = cca_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX6, 0, 0, 0);
if (rc)
return rc;
- memset(buf + keysize, 0, CCACIPHERTOKENSIZE - keysize);
- if (is_xts) {
- keysize = CCACIPHERTOKENSIZE;
- rc = cca_gencipherkey(-1, -1, keybits, 0,
- buf + CCACIPHERTOKENSIZE, &keysize);
+ memset(buf, 0, is_xts ? 2 * keysize : keysize);
+
+ /* simple try all apqns from the list */
+ for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
+ card = apqns[i] >> 16;
+ dom = apqns[i] & 0xFFFF;
+ rc = cca_gencipherkey(card, dom, keybits, 0, buf, &keysize);
+ if (rc == 0)
+ break;
+ }
if (rc)
return rc;
- memset(buf + CCACIPHERTOKENSIZE + keysize, 0,
- CCACIPHERTOKENSIZE - keysize);
- return 2 * CCACIPHERTOKENSIZE;
+ if (is_xts) {
+ keysize = CCACIPHERTOKENSIZE;
+ buf += CCACIPHERTOKENSIZE;
+ rc = cca_gencipherkey(card, dom, keybits, 0, buf, &keysize);
+ if (rc == 0)
+ return 2 * CCACIPHERTOKENSIZE;
}
return CCACIPHERTOKENSIZE;
@@ -1457,10 +1687,134 @@ static struct attribute_group ccacipher_attr_group = {
.bin_attrs = ccacipher_attrs,
};
+/*
+ * Sysfs attribute read function for all ep11 aes key binary attributes.
+ * The implementation can not deal with partial reads, because a new random
+ * secure key blob is generated with each read. In case of partial reads
+ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ * This function and the sysfs attributes using it provide EP11 key blobs
+ * padded to the upper limit of MAXEP11AESKEYBLOBSIZE which is currently
+ * 320 bytes.
+ */
+static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits,
+ bool is_xts, char *buf, loff_t off,
+ size_t count)
+{
+ int i, rc, card, dom;
+ u32 nr_apqns, *apqns = NULL;
+ size_t keysize = MAXEP11AESKEYBLOBSIZE;
+
+ if (off != 0 || count < MAXEP11AESKEYBLOBSIZE)
+ return -EINVAL;
+ if (is_xts)
+ if (count < 2 * MAXEP11AESKEYBLOBSIZE)
+ return -EINVAL;
+
+ /* build a list of apqns able to generate an cipher key */
+ rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX7, EP11_API_V, NULL);
+ if (rc)
+ return rc;
+
+ memset(buf, 0, is_xts ? 2 * keysize : keysize);
+
+ /* simple try all apqns from the list */
+ for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
+ card = apqns[i] >> 16;
+ dom = apqns[i] & 0xFFFF;
+ rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize);
+ if (rc == 0)
+ break;
+ }
+ if (rc)
+ return rc;
+
+ if (is_xts) {
+ keysize = MAXEP11AESKEYBLOBSIZE;
+ buf += MAXEP11AESKEYBLOBSIZE;
+ rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize);
+ if (rc == 0)
+ return 2 * MAXEP11AESKEYBLOBSIZE;
+ }
+
+ return MAXEP11AESKEYBLOBSIZE;
+}
+
+static ssize_t ep11_aes_128_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, false, buf,
+ off, count);
+}
+
+static ssize_t ep11_aes_192_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_192, false, buf,
+ off, count);
+}
+
+static ssize_t ep11_aes_256_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, false, buf,
+ off, count);
+}
+
+static ssize_t ep11_aes_128_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, true, buf,
+ off, count);
+}
+
+static ssize_t ep11_aes_256_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, true, buf,
+ off, count);
+}
+
+static BIN_ATTR_RO(ep11_aes_128, MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_192, MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_256, MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_128_xts, 2 * MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_256_xts, 2 * MAXEP11AESKEYBLOBSIZE);
+
+static struct bin_attribute *ep11_attrs[] = {
+ &bin_attr_ep11_aes_128,
+ &bin_attr_ep11_aes_192,
+ &bin_attr_ep11_aes_256,
+ &bin_attr_ep11_aes_128_xts,
+ &bin_attr_ep11_aes_256_xts,
+ NULL
+};
+
+static struct attribute_group ep11_attr_group = {
+ .name = "ep11",
+ .bin_attrs = ep11_attrs,
+};
+
static const struct attribute_group *pkey_attr_groups[] = {
&protkey_attr_group,
&ccadata_attr_group,
&ccacipher_attr_group,
+ &ep11_attr_group,
NULL,
};
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 9157e728a362..56a405dce8bc 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -36,6 +36,7 @@
#include "zcrypt_msgtype6.h"
#include "zcrypt_msgtype50.h"
#include "zcrypt_ccamisc.h"
+#include "zcrypt_ep11misc.h"
/*
* Module description.
@@ -605,8 +606,8 @@ static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
weight += atomic_read(&zc->load);
pref_weight += atomic_read(&pref_zc->load);
if (weight == pref_weight)
- return atomic_read(&zc->card->total_request_count) >
- atomic_read(&pref_zc->card->total_request_count);
+ return atomic64_read(&zc->card->total_request_count) >
+ atomic64_read(&pref_zc->card->total_request_count);
return weight > pref_weight;
}
@@ -849,7 +850,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
/* check if device is online and eligible */
if (!zq->online ||
!zq->ops->send_cprb ||
- (tdom != (unsigned short) AUTOSELECT &&
+ (tdom != AUTOSEL_DOM &&
tdom != AP_QID_QUEUE(zq->queue->qid)))
continue;
/* check if device node has admission for this queue */
@@ -874,7 +875,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
/* in case of auto select, provide the correct domain */
qid = pref_zq->queue->qid;
- if (*domain == (unsigned short) AUTOSELECT)
+ if (*domain == AUTOSEL_DOM)
*domain = AP_QID_QUEUE(qid);
rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg);
@@ -901,7 +902,7 @@ static bool is_desired_ep11_card(unsigned int dev_id,
struct ep11_target_dev *targets)
{
while (target_num-- > 0) {
- if (dev_id == targets->ap_id)
+ if (targets->ap_id == dev_id || targets->ap_id == AUTOSEL_AP)
return true;
targets++;
}
@@ -912,16 +913,19 @@ static bool is_desired_ep11_queue(unsigned int dev_qid,
unsigned short target_num,
struct ep11_target_dev *targets)
{
+ int card = AP_QID_CARD(dev_qid), dom = AP_QID_QUEUE(dev_qid);
+
while (target_num-- > 0) {
- if (AP_MKQID(targets->ap_id, targets->dom_id) == dev_qid)
+ if ((targets->ap_id == card || targets->ap_id == AUTOSEL_AP) &&
+ (targets->dom_id == dom || targets->dom_id == AUTOSEL_DOM))
return true;
targets++;
}
return false;
}
-static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
- struct ep11_urb *xcrb)
+static long _zcrypt_send_ep11_cprb(struct ap_perms *perms,
+ struct ep11_urb *xcrb)
{
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
@@ -1026,6 +1030,12 @@ out:
return rc;
}
+long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
+{
+ return _zcrypt_send_ep11_cprb(&ap_perms, xcrb);
+}
+EXPORT_SYMBOL(zcrypt_send_ep11_cprb);
+
static long zcrypt_rng(char *buffer)
{
struct zcrypt_card *zc, *pref_zc;
@@ -1216,11 +1226,12 @@ static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters)
spin_unlock(&zcrypt_list_lock);
}
-static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters)
+static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters)
{
struct zcrypt_card *zc;
struct zcrypt_queue *zq;
int card;
+ u64 cnt;
memset(reqcnt, 0, sizeof(int) * max_adapters);
spin_lock(&zcrypt_list_lock);
@@ -1232,8 +1243,9 @@ static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters)
|| card >= max_adapters)
continue;
spin_lock(&zq->queue->lock);
- reqcnt[card] = zq->queue->total_request_count;
+ cnt = zq->queue->total_request_count;
spin_unlock(&zq->queue->lock);
+ reqcnt[card] = (cnt < UINT_MAX) ? (u32) cnt : UINT_MAX;
}
}
local_bh_enable();
@@ -1366,12 +1378,12 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
return -EFAULT;
do {
- rc = zcrypt_send_ep11_cprb(perms, &xcrb);
+ rc = _zcrypt_send_ep11_cprb(perms, &xcrb);
} while (rc == -EAGAIN);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
- rc = zcrypt_send_ep11_cprb(perms, &xcrb);
+ rc = _zcrypt_send_ep11_cprb(perms, &xcrb);
} while (rc == -EAGAIN);
if (rc)
ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc);
@@ -1411,9 +1423,9 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
return 0;
}
case ZCRYPT_PERDEV_REQCNT: {
- int *reqcnt;
+ u32 *reqcnt;
- reqcnt = kcalloc(AP_DEVICES, sizeof(int), GFP_KERNEL);
+ reqcnt = kcalloc(AP_DEVICES, sizeof(u32), GFP_KERNEL);
if (!reqcnt)
return -ENOMEM;
zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES);
@@ -1470,7 +1482,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
}
case Z90STAT_PERDEV_REQCNT: {
/* the old ioctl supports only 64 adapters */
- int reqcnt[MAX_ZDEV_CARDIDS];
+ u32 reqcnt[MAX_ZDEV_CARDIDS];
zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS);
if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
@@ -1885,6 +1897,7 @@ void __exit zcrypt_api_exit(void)
zcrypt_msgtype6_exit();
zcrypt_msgtype50_exit();
zcrypt_ccamisc_exit();
+ zcrypt_ep11misc_exit();
zcrypt_debug_exit();
}
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index d464618cd84f..599e68bf53f7 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -140,6 +140,7 @@ struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int);
int zcrypt_api_init(void);
void zcrypt_api_exit(void);
long zcrypt_send_cprb(struct ica_xcRB *xcRB);
+long zcrypt_send_ep11_cprb(struct ep11_urb *urb);
void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus);
int zcrypt_device_status_ext(int card, int queue,
struct zcrypt_device_status_ext *devstatus);
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c
index c1db64a2db21..110fe9d0cb91 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.c
+++ b/drivers/s390/crypto/zcrypt_ccamisc.c
@@ -1037,8 +1037,8 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain,
prepparm = (struct iprepparm *) prepcblk->rpl_parmb;
/* do some plausibility checks on the key block */
- if (prepparm->kb.len < 120 + 5 * sizeof(uint16_t) ||
- prepparm->kb.len > 136 + 5 * sizeof(uint16_t)) {
+ if (prepparm->kb.len < 120 + 3 * sizeof(uint16_t) ||
+ prepparm->kb.len > 136 + 3 * sizeof(uint16_t)) {
DEBUG_ERR("%s reply with invalid or unknown key block\n",
__func__);
rc = -EIO;
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.h b/drivers/s390/crypto/zcrypt_ccamisc.h
index 77b6cc7b8f82..3a9876d5ab0e 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.h
+++ b/drivers/s390/crypto/zcrypt_ccamisc.h
@@ -19,6 +19,7 @@
/* For TOKTYPE_NON_CCA: */
#define TOKVER_PROTECTED_KEY 0x01 /* Protected key token */
+#define TOKVER_CLEAR_KEY 0x02 /* Clear key token */
/* For TOKTYPE_CCA_INTERNAL: */
#define TOKVER_CCA_AES 0x04 /* CCA AES key token */
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index c50f3e86cc74..7cbb384ec535 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -175,6 +175,7 @@ static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev)
zq->queue = aq;
zq->online = 1;
atomic_set(&zq->load, 0);
+ ap_queue_init_state(aq);
ap_queue_init_reply(aq, &zq->reply);
aq->request_timeout = CEX2A_CLEANUP_TIME,
aq->private = zq;
diff --git a/drivers/s390/crypto/zcrypt_cex2c.c b/drivers/s390/crypto/zcrypt_cex2c.c
index 35c7c6672713..c78c0d119806 100644
--- a/drivers/s390/crypto/zcrypt_cex2c.c
+++ b/drivers/s390/crypto/zcrypt_cex2c.c
@@ -220,6 +220,7 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
zq->queue = aq;
zq->online = 1;
atomic_set(&zq->load, 0);
+ ap_rapq(aq->qid);
rc = zcrypt_cex2c_rng_supported(aq);
if (rc < 0) {
zcrypt_queue_free(zq);
@@ -231,6 +232,7 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
else
zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
MSGTYPE06_VARIANT_NORNG);
+ ap_queue_init_state(aq);
ap_queue_init_reply(aq, &zq->reply);
aq->request_timeout = CEX2C_CLEANUP_TIME;
aq->private = zq;
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index 442e3d6162f7..9a9d02e19774 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -19,6 +19,7 @@
#include "zcrypt_error.h"
#include "zcrypt_cex4.h"
#include "zcrypt_ccamisc.h"
+#include "zcrypt_ep11misc.h"
#define CEX4A_MIN_MOD_SIZE 1 /* 8 bits */
#define CEX4A_MAX_MOD_SIZE_2K 256 /* 2048 bits */
@@ -71,11 +72,11 @@ static struct ap_device_id zcrypt_cex4_queue_ids[] = {
MODULE_DEVICE_TABLE(ap, zcrypt_cex4_queue_ids);
/*
- * CCA card addditional device attributes
+ * CCA card additional device attributes
*/
-static ssize_t serialnr_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t cca_serialnr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct cca_info ci;
struct ap_card *ac = to_ap_card(dev);
@@ -88,23 +89,25 @@ static ssize_t serialnr_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%s\n", ci.serial);
}
-static DEVICE_ATTR_RO(serialnr);
+
+static struct device_attribute dev_attr_cca_serialnr =
+ __ATTR(serialnr, 0444, cca_serialnr_show, NULL);
static struct attribute *cca_card_attrs[] = {
- &dev_attr_serialnr.attr,
+ &dev_attr_cca_serialnr.attr,
NULL,
};
-static const struct attribute_group cca_card_attr_group = {
+static const struct attribute_group cca_card_attr_grp = {
.attrs = cca_card_attrs,
};
-/*
- * CCA queue addditional device attributes
- */
-static ssize_t mkvps_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ /*
+ * CCA queue additional device attributes
+ */
+static ssize_t cca_mkvps_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
int n = 0;
struct cca_info ci;
@@ -138,17 +141,233 @@ static ssize_t mkvps_show(struct device *dev,
return n;
}
-static DEVICE_ATTR_RO(mkvps);
+
+static struct device_attribute dev_attr_cca_mkvps =
+ __ATTR(mkvps, 0444, cca_mkvps_show, NULL);
static struct attribute *cca_queue_attrs[] = {
- &dev_attr_mkvps.attr,
+ &dev_attr_cca_mkvps.attr,
NULL,
};
-static const struct attribute_group cca_queue_attr_group = {
+static const struct attribute_group cca_queue_attr_grp = {
.attrs = cca_queue_attrs,
};
+/*
+ * EP11 card additional device attributes
+ */
+static ssize_t ep11_api_ordinalnr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ep11_card_info ci;
+ struct ap_card *ac = to_ap_card(dev);
+ struct zcrypt_card *zc = ac->private;
+
+ memset(&ci, 0, sizeof(ci));
+
+ ep11_get_card_info(ac->id, &ci, zc->online);
+
+ if (ci.API_ord_nr > 0)
+ return snprintf(buf, PAGE_SIZE, "%u\n", ci.API_ord_nr);
+ else
+ return snprintf(buf, PAGE_SIZE, "\n");
+}
+
+static struct device_attribute dev_attr_ep11_api_ordinalnr =
+ __ATTR(API_ordinalnr, 0444, ep11_api_ordinalnr_show, NULL);
+
+static ssize_t ep11_fw_version_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ep11_card_info ci;
+ struct ap_card *ac = to_ap_card(dev);
+ struct zcrypt_card *zc = ac->private;
+
+ memset(&ci, 0, sizeof(ci));
+
+ ep11_get_card_info(ac->id, &ci, zc->online);
+
+ if (ci.FW_version > 0)
+ return snprintf(buf, PAGE_SIZE, "%d.%d\n",
+ (int)(ci.FW_version >> 8),
+ (int)(ci.FW_version & 0xFF));
+ else
+ return snprintf(buf, PAGE_SIZE, "\n");
+}
+
+static struct device_attribute dev_attr_ep11_fw_version =
+ __ATTR(FW_version, 0444, ep11_fw_version_show, NULL);
+
+static ssize_t ep11_serialnr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ep11_card_info ci;
+ struct ap_card *ac = to_ap_card(dev);
+ struct zcrypt_card *zc = ac->private;
+
+ memset(&ci, 0, sizeof(ci));
+
+ ep11_get_card_info(ac->id, &ci, zc->online);
+
+ if (ci.serial[0])
+ return snprintf(buf, PAGE_SIZE, "%16.16s\n", ci.serial);
+ else
+ return snprintf(buf, PAGE_SIZE, "\n");
+}
+
+static struct device_attribute dev_attr_ep11_serialnr =
+ __ATTR(serialnr, 0444, ep11_serialnr_show, NULL);
+
+static const struct {
+ int mode_bit;
+ const char *mode_txt;
+} ep11_op_modes[] = {
+ { 0, "FIPS2009" },
+ { 1, "BSI2009" },
+ { 2, "FIPS2011" },
+ { 3, "BSI2011" },
+ { 6, "BSICC2017" },
+ { 0, NULL }
+};
+
+static ssize_t ep11_card_op_modes_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int i, n = 0;
+ struct ep11_card_info ci;
+ struct ap_card *ac = to_ap_card(dev);
+ struct zcrypt_card *zc = ac->private;
+
+ memset(&ci, 0, sizeof(ci));
+
+ ep11_get_card_info(ac->id, &ci, zc->online);
+
+ for (i = 0; ep11_op_modes[i].mode_txt; i++) {
+ if (ci.op_mode & (1 << ep11_op_modes[i].mode_bit)) {
+ if (n > 0)
+ buf[n++] = ' ';
+ n += snprintf(buf + n, PAGE_SIZE - n,
+ "%s", ep11_op_modes[i].mode_txt);
+ }
+ }
+ n += snprintf(buf + n, PAGE_SIZE - n, "\n");
+
+ return n;
+}
+
+static struct device_attribute dev_attr_ep11_card_op_modes =
+ __ATTR(op_modes, 0444, ep11_card_op_modes_show, NULL);
+
+static struct attribute *ep11_card_attrs[] = {
+ &dev_attr_ep11_api_ordinalnr.attr,
+ &dev_attr_ep11_fw_version.attr,
+ &dev_attr_ep11_serialnr.attr,
+ &dev_attr_ep11_card_op_modes.attr,
+ NULL,
+};
+
+static const struct attribute_group ep11_card_attr_grp = {
+ .attrs = ep11_card_attrs,
+};
+
+/*
+ * EP11 queue additional device attributes
+ */
+
+static ssize_t ep11_mkvps_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int n = 0;
+ struct ep11_domain_info di;
+ struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+ static const char * const cwk_state[] = { "invalid", "valid" };
+ static const char * const nwk_state[] = { "empty", "uncommitted",
+ "committed" };
+
+ memset(&di, 0, sizeof(di));
+
+ if (zq->online)
+ ep11_get_domain_info(AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ &di);
+
+ if (di.cur_wk_state == '0') {
+ n = snprintf(buf, PAGE_SIZE, "WK CUR: %s -\n",
+ cwk_state[di.cur_wk_state - '0']);
+ } else if (di.cur_wk_state == '1') {
+ n = snprintf(buf, PAGE_SIZE, "WK CUR: %s 0x",
+ cwk_state[di.cur_wk_state - '0']);
+ bin2hex(buf + n, di.cur_wkvp, sizeof(di.cur_wkvp));
+ n += 2 * sizeof(di.cur_wkvp);
+ n += snprintf(buf + n, PAGE_SIZE - n, "\n");
+ } else
+ n = snprintf(buf, PAGE_SIZE, "WK CUR: - -\n");
+
+ if (di.new_wk_state == '0') {
+ n += snprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s -\n",
+ nwk_state[di.new_wk_state - '0']);
+ } else if (di.new_wk_state >= '1' && di.new_wk_state <= '2') {
+ n += snprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s 0x",
+ nwk_state[di.new_wk_state - '0']);
+ bin2hex(buf + n, di.new_wkvp, sizeof(di.new_wkvp));
+ n += 2 * sizeof(di.new_wkvp);
+ n += snprintf(buf + n, PAGE_SIZE - n, "\n");
+ } else
+ n += snprintf(buf + n, PAGE_SIZE - n, "WK NEW: - -\n");
+
+ return n;
+}
+
+static struct device_attribute dev_attr_ep11_mkvps =
+ __ATTR(mkvps, 0444, ep11_mkvps_show, NULL);
+
+static ssize_t ep11_queue_op_modes_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int i, n = 0;
+ struct ep11_domain_info di;
+ struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+
+ memset(&di, 0, sizeof(di));
+
+ if (zq->online)
+ ep11_get_domain_info(AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ &di);
+
+ for (i = 0; ep11_op_modes[i].mode_txt; i++) {
+ if (di.op_mode & (1 << ep11_op_modes[i].mode_bit)) {
+ if (n > 0)
+ buf[n++] = ' ';
+ n += snprintf(buf + n, PAGE_SIZE - n,
+ "%s", ep11_op_modes[i].mode_txt);
+ }
+ }
+ n += snprintf(buf + n, PAGE_SIZE - n, "\n");
+
+ return n;
+}
+
+static struct device_attribute dev_attr_ep11_queue_op_modes =
+ __ATTR(op_modes, 0444, ep11_queue_op_modes_show, NULL);
+
+static struct attribute *ep11_queue_attrs[] = {
+ &dev_attr_ep11_mkvps.attr,
+ &dev_attr_ep11_queue_op_modes.attr,
+ NULL,
+};
+
+static const struct attribute_group ep11_queue_attr_grp = {
+ .attrs = ep11_queue_attrs,
+};
+
/**
* Probe function for CEX4/CEX5/CEX6/CEX7 card device. It always
* accepts the AP device since the bus_match already checked
@@ -313,7 +532,12 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) {
rc = sysfs_create_group(&ap_dev->device.kobj,
- &cca_card_attr_group);
+ &cca_card_attr_grp);
+ if (rc)
+ zcrypt_card_unregister(zc);
+ } else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) {
+ rc = sysfs_create_group(&ap_dev->device.kobj,
+ &ep11_card_attr_grp);
if (rc)
zcrypt_card_unregister(zc);
}
@@ -332,7 +556,9 @@ static void zcrypt_cex4_card_remove(struct ap_device *ap_dev)
struct zcrypt_card *zc = ac->private;
if (ap_test_bit(&ac->functions, AP_FUNC_COPRO))
- sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_group);
+ sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_grp);
+ else if (ap_test_bit(&ac->functions, AP_FUNC_EP11))
+ sysfs_remove_group(&ap_dev->device.kobj, &ep11_card_attr_grp);
if (zc)
zcrypt_card_unregister(zc);
}
@@ -381,6 +607,7 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
zq->queue = aq;
zq->online = 1;
atomic_set(&zq->load, 0);
+ ap_queue_init_state(aq);
ap_queue_init_reply(aq, &zq->reply);
aq->request_timeout = CEX4_CLEANUP_TIME,
aq->private = zq;
@@ -393,7 +620,12 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) {
rc = sysfs_create_group(&ap_dev->device.kobj,
- &cca_queue_attr_group);
+ &cca_queue_attr_grp);
+ if (rc)
+ zcrypt_queue_unregister(zq);
+ } else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) {
+ rc = sysfs_create_group(&ap_dev->device.kobj,
+ &ep11_queue_attr_grp);
if (rc)
zcrypt_queue_unregister(zq);
}
@@ -412,7 +644,9 @@ static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev)
struct zcrypt_queue *zq = aq->private;
if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO))
- sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_group);
+ sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_grp);
+ else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11))
+ sysfs_remove_group(&ap_dev->device.kobj, &ep11_queue_attr_grp);
if (zq)
zcrypt_queue_unregister(zq);
}
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c
new file mode 100644
index 000000000000..d4caf46ff9df
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_ep11misc.c
@@ -0,0 +1,1293 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright IBM Corp. 2019
+ * Author(s): Harald Freudenberger <freude@linux.ibm.com>
+ *
+ * Collection of EP11 misc functions used by zcrypt and pkey
+ */
+
+#define KMSG_COMPONENT "zcrypt"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <asm/zcrypt.h>
+#include <asm/pkey.h>
+
+#include "ap_bus.h"
+#include "zcrypt_api.h"
+#include "zcrypt_debug.h"
+#include "zcrypt_msgtype6.h"
+#include "zcrypt_ep11misc.h"
+#include "zcrypt_ccamisc.h"
+
+#define DEBUG_DBG(...) ZCRYPT_DBF(DBF_DEBUG, ##__VA_ARGS__)
+#define DEBUG_INFO(...) ZCRYPT_DBF(DBF_INFO, ##__VA_ARGS__)
+#define DEBUG_WARN(...) ZCRYPT_DBF(DBF_WARN, ##__VA_ARGS__)
+#define DEBUG_ERR(...) ZCRYPT_DBF(DBF_ERR, ##__VA_ARGS__)
+
+/* default iv used here */
+static const u8 def_iv[16] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
+ 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff };
+
+/* ep11 card info cache */
+struct card_list_entry {
+ struct list_head list;
+ u16 cardnr;
+ struct ep11_card_info info;
+};
+static LIST_HEAD(card_list);
+static DEFINE_SPINLOCK(card_list_lock);
+
+static int card_cache_fetch(u16 cardnr, struct ep11_card_info *ci)
+{
+ int rc = -ENOENT;
+ struct card_list_entry *ptr;
+
+ spin_lock_bh(&card_list_lock);
+ list_for_each_entry(ptr, &card_list, list) {
+ if (ptr->cardnr == cardnr) {
+ memcpy(ci, &ptr->info, sizeof(*ci));
+ rc = 0;
+ break;
+ }
+ }
+ spin_unlock_bh(&card_list_lock);
+
+ return rc;
+}
+
+static void card_cache_update(u16 cardnr, const struct ep11_card_info *ci)
+{
+ int found = 0;
+ struct card_list_entry *ptr;
+
+ spin_lock_bh(&card_list_lock);
+ list_for_each_entry(ptr, &card_list, list) {
+ if (ptr->cardnr == cardnr) {
+ memcpy(&ptr->info, ci, sizeof(*ci));
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC);
+ if (!ptr) {
+ spin_unlock_bh(&card_list_lock);
+ return;
+ }
+ ptr->cardnr = cardnr;
+ memcpy(&ptr->info, ci, sizeof(*ci));
+ list_add(&ptr->list, &card_list);
+ }
+ spin_unlock_bh(&card_list_lock);
+}
+
+static void card_cache_scrub(u16 cardnr)
+{
+ struct card_list_entry *ptr;
+
+ spin_lock_bh(&card_list_lock);
+ list_for_each_entry(ptr, &card_list, list) {
+ if (ptr->cardnr == cardnr) {
+ list_del(&ptr->list);
+ kfree(ptr);
+ break;
+ }
+ }
+ spin_unlock_bh(&card_list_lock);
+}
+
+static void __exit card_cache_free(void)
+{
+ struct card_list_entry *ptr, *pnext;
+
+ spin_lock_bh(&card_list_lock);
+ list_for_each_entry_safe(ptr, pnext, &card_list, list) {
+ list_del(&ptr->list);
+ kfree(ptr);
+ }
+ spin_unlock_bh(&card_list_lock);
+}
+
+/*
+ * Simple check if the key blob is a valid EP11 secure AES key.
+ */
+int ep11_check_aeskeyblob(debug_info_t *dbg, int dbflvl,
+ const u8 *key, int keybitsize,
+ int checkcpacfexport)
+{
+ struct ep11keyblob *kb = (struct ep11keyblob *) key;
+
+#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
+
+ if (kb->head.type != TOKTYPE_NON_CCA) {
+ if (dbg)
+ DBF("%s key check failed, type 0x%02x != 0x%02x\n",
+ __func__, (int) kb->head.type, TOKTYPE_NON_CCA);
+ return -EINVAL;
+ }
+ if (kb->head.version != TOKVER_EP11_AES) {
+ if (dbg)
+ DBF("%s key check failed, version 0x%02x != 0x%02x\n",
+ __func__, (int) kb->head.version, TOKVER_EP11_AES);
+ return -EINVAL;
+ }
+ if (kb->version != EP11_STRUCT_MAGIC) {
+ if (dbg)
+ DBF("%s key check failed, magic 0x%04x != 0x%04x\n",
+ __func__, (int) kb->version, EP11_STRUCT_MAGIC);
+ return -EINVAL;
+ }
+ switch (kb->head.keybitlen) {
+ case 128:
+ case 192:
+ case 256:
+ break;
+ default:
+ if (dbg)
+ DBF("%s key check failed, keybitlen %d invalid\n",
+ __func__, (int) kb->head.keybitlen);
+ return -EINVAL;
+ }
+ if (keybitsize > 0 && keybitsize != (int) kb->head.keybitlen) {
+ DBF("%s key check failed, keybitsize %d\n",
+ __func__, keybitsize);
+ return -EINVAL;
+ }
+ if (checkcpacfexport && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) {
+ if (dbg)
+ DBF("%s key check failed, PKEY_EXTRACTABLE is 0\n",
+ __func__);
+ return -EINVAL;
+ }
+#undef DBF
+
+ return 0;
+}
+EXPORT_SYMBOL(ep11_check_aeskeyblob);
+
+/*
+ * Helper function which calls zcrypt_send_ep11_cprb with
+ * memory management segment adjusted to kernel space
+ * so that the copy_from_user called within this
+ * function do in fact copy from kernel space.
+ */
+static inline int _zcrypt_send_ep11_cprb(struct ep11_urb *urb)
+{
+ int rc;
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(KERNEL_DS);
+ rc = zcrypt_send_ep11_cprb(urb);
+ set_fs(old_fs);
+
+ return rc;
+}
+
+/*
+ * Allocate and prepare ep11 cprb plus additional payload.
+ */
+static inline struct ep11_cprb *alloc_cprb(size_t payload_len)
+{
+ size_t len = sizeof(struct ep11_cprb) + payload_len;
+ struct ep11_cprb *cprb;
+
+ cprb = kmalloc(len, GFP_KERNEL);
+ if (!cprb)
+ return NULL;
+
+ memset(cprb, 0, len);
+ cprb->cprb_len = sizeof(struct ep11_cprb);
+ cprb->cprb_ver_id = 0x04;
+ memcpy(cprb->func_id, "T4", 2);
+ cprb->ret_code = 0xFFFFFFFF;
+ cprb->payload_len = payload_len;
+
+ return cprb;
+}
+
+/*
+ * Some helper functions related to ASN1 encoding.
+ * Limited to length info <= 2 byte.
+ */
+
+#define ASN1TAGLEN(x) (2 + (x) + ((x) > 127 ? 1 : 0) + ((x) > 255 ? 1 : 0))
+
+static int asn1tag_write(u8 *ptr, u8 tag, const u8 *pvalue, u16 valuelen)
+{
+ ptr[0] = tag;
+ if (valuelen > 255) {
+ ptr[1] = 0x82;
+ *((u16 *)(ptr + 2)) = valuelen;
+ memcpy(ptr + 4, pvalue, valuelen);
+ return 4 + valuelen;
+ }
+ if (valuelen > 127) {
+ ptr[1] = 0x81;
+ ptr[2] = (u8) valuelen;
+ memcpy(ptr + 3, pvalue, valuelen);
+ return 3 + valuelen;
+ }
+ ptr[1] = (u8) valuelen;
+ memcpy(ptr + 2, pvalue, valuelen);
+ return 2 + valuelen;
+}
+
+/* EP11 payload > 127 bytes starts with this struct */
+struct pl_head {
+ u8 tag;
+ u8 lenfmt;
+ u16 len;
+ u8 func_tag;
+ u8 func_len;
+ u32 func;
+ u8 dom_tag;
+ u8 dom_len;
+ u32 dom;
+} __packed;
+
+/* prep ep11 payload head helper function */
+static inline void prep_head(struct pl_head *h,
+ size_t pl_size, int api, int func)
+{
+ h->tag = 0x30;
+ h->lenfmt = 0x82;
+ h->len = pl_size - 4;
+ h->func_tag = 0x04;
+ h->func_len = sizeof(u32);
+ h->func = (api << 16) + func;
+ h->dom_tag = 0x04;
+ h->dom_len = sizeof(u32);
+}
+
+/* prep urb helper function */
+static inline void prep_urb(struct ep11_urb *u,
+ struct ep11_target_dev *t, int nt,
+ struct ep11_cprb *req, size_t req_len,
+ struct ep11_cprb *rep, size_t rep_len)
+{
+ u->targets = (u8 __user *) t;
+ u->targets_num = nt;
+ u->req = (u8 __user *) req;
+ u->req_len = req_len;
+ u->resp = (u8 __user *) rep;
+ u->resp_len = rep_len;
+}
+
+/* Check ep11 reply payload, return 0 or suggested errno value. */
+static int check_reply_pl(const u8 *pl, const char *func)
+{
+ int len;
+ u32 ret;
+
+ /* start tag */
+ if (*pl++ != 0x30) {
+ DEBUG_ERR("%s reply start tag mismatch\n", func);
+ return -EIO;
+ }
+
+ /* payload length format */
+ if (*pl < 127) {
+ len = *pl;
+ pl++;
+ } else if (*pl == 0x81) {
+ pl++;
+ len = *pl;
+ pl++;
+ } else if (*pl == 0x82) {
+ pl++;
+ len = *((u16 *)pl);
+ pl += 2;
+ } else {
+ DEBUG_ERR("%s reply start tag lenfmt mismatch 0x%02hhx\n",
+ func, *pl);
+ return -EIO;
+ }
+
+ /* len should cover at least 3 fields with 32 bit value each */
+ if (len < 3 * 6) {
+ DEBUG_ERR("%s reply length %d too small\n", func, len);
+ return -EIO;
+ }
+
+ /* function tag, length and value */
+ if (pl[0] != 0x04 || pl[1] != 0x04) {
+ DEBUG_ERR("%s function tag or length mismatch\n", func);
+ return -EIO;
+ }
+ pl += 6;
+
+ /* dom tag, length and value */
+ if (pl[0] != 0x04 || pl[1] != 0x04) {
+ DEBUG_ERR("%s dom tag or length mismatch\n", func);
+ return -EIO;
+ }
+ pl += 6;
+
+ /* return value tag, length and value */
+ if (pl[0] != 0x04 || pl[1] != 0x04) {
+ DEBUG_ERR("%s return value tag or length mismatch\n", func);
+ return -EIO;
+ }
+ pl += 2;
+ ret = *((u32 *)pl);
+ if (ret != 0) {
+ DEBUG_ERR("%s return value 0x%04x != 0\n", func, ret);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+
+/*
+ * Helper function which does an ep11 query with given query type.
+ */
+static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type,
+ size_t buflen, u8 *buf)
+{
+ struct ep11_info_req_pl {
+ struct pl_head head;
+ u8 query_type_tag;
+ u8 query_type_len;
+ u32 query_type;
+ u8 query_subtype_tag;
+ u8 query_subtype_len;
+ u32 query_subtype;
+ } __packed * req_pl;
+ struct ep11_info_rep_pl {
+ struct pl_head head;
+ u8 rc_tag;
+ u8 rc_len;
+ u32 rc;
+ u8 data_tag;
+ u8 data_lenfmt;
+ u16 data_len;
+ } __packed * rep_pl;
+ struct ep11_cprb *req = NULL, *rep = NULL;
+ struct ep11_target_dev target;
+ struct ep11_urb *urb = NULL;
+ int api = 1, rc = -ENOMEM;
+
+ /* request cprb and payload */
+ req = alloc_cprb(sizeof(struct ep11_info_req_pl));
+ if (!req)
+ goto out;
+ req_pl = (struct ep11_info_req_pl *) (((u8 *) req) + sizeof(*req));
+ prep_head(&req_pl->head, sizeof(*req_pl), api, 38); /* get xcp info */
+ req_pl->query_type_tag = 0x04;
+ req_pl->query_type_len = sizeof(u32);
+ req_pl->query_type = query_type;
+ req_pl->query_subtype_tag = 0x04;
+ req_pl->query_subtype_len = sizeof(u32);
+
+ /* reply cprb and payload */
+ rep = alloc_cprb(sizeof(struct ep11_info_rep_pl) + buflen);
+ if (!rep)
+ goto out;
+ rep_pl = (struct ep11_info_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+ /* urb and target */
+ urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+ if (!urb)
+ goto out;
+ target.ap_id = cardnr;
+ target.dom_id = domain;
+ prep_urb(urb, &target, 1,
+ req, sizeof(*req) + sizeof(*req_pl),
+ rep, sizeof(*rep) + sizeof(*rep_pl) + buflen);
+
+ rc = _zcrypt_send_ep11_cprb(urb);
+ if (rc) {
+ DEBUG_ERR(
+ "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int) cardnr, (int) domain, rc);
+ goto out;
+ }
+
+ rc = check_reply_pl((u8 *)rep_pl, __func__);
+ if (rc)
+ goto out;
+ if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+ DEBUG_ERR("%s unknown reply data format\n", __func__);
+ rc = -EIO;
+ goto out;
+ }
+ if (rep_pl->data_len > buflen) {
+ DEBUG_ERR("%s mismatch between reply data len and buffer len\n",
+ __func__);
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ memcpy(buf, ((u8 *) rep_pl) + sizeof(*rep_pl), rep_pl->data_len);
+
+out:
+ kfree(req);
+ kfree(rep);
+ kfree(urb);
+ return rc;
+}
+
+/*
+ * Provide information about an EP11 card.
+ */
+int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify)
+{
+ int rc;
+ struct ep11_module_query_info {
+ u32 API_ord_nr;
+ u32 firmware_id;
+ u8 FW_major_vers;
+ u8 FW_minor_vers;
+ u8 CSP_major_vers;
+ u8 CSP_minor_vers;
+ u8 fwid[32];
+ u8 xcp_config_hash[32];
+ u8 CSP_config_hash[32];
+ u8 serial[16];
+ u8 module_date_time[16];
+ u64 op_mode;
+ u32 PKCS11_flags;
+ u32 ext_flags;
+ u32 domains;
+ u32 sym_state_bytes;
+ u32 digest_state_bytes;
+ u32 pin_blob_bytes;
+ u32 SPKI_bytes;
+ u32 priv_key_blob_bytes;
+ u32 sym_blob_bytes;
+ u32 max_payload_bytes;
+ u32 CP_profile_bytes;
+ u32 max_CP_index;
+ } __packed * pmqi = NULL;
+
+ rc = card_cache_fetch(card, info);
+ if (rc || verify) {
+ pmqi = kmalloc(sizeof(*pmqi), GFP_KERNEL);
+ if (!pmqi)
+ return -ENOMEM;
+ rc = ep11_query_info(card, AUTOSEL_DOM,
+ 0x01 /* module info query */,
+ sizeof(*pmqi), (u8 *) pmqi);
+ if (rc) {
+ if (rc == -ENODEV)
+ card_cache_scrub(card);
+ goto out;
+ }
+ memset(info, 0, sizeof(*info));
+ info->API_ord_nr = pmqi->API_ord_nr;
+ info->FW_version =
+ (pmqi->FW_major_vers << 8) + pmqi->FW_minor_vers;
+ memcpy(info->serial, pmqi->serial, sizeof(info->serial));
+ info->op_mode = pmqi->op_mode;
+ card_cache_update(card, info);
+ }
+
+out:
+ kfree(pmqi);
+ return rc;
+}
+EXPORT_SYMBOL(ep11_get_card_info);
+
+/*
+ * Provide information about a domain within an EP11 card.
+ */
+int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info)
+{
+ int rc;
+ struct ep11_domain_query_info {
+ u32 dom_index;
+ u8 cur_WK_VP[32];
+ u8 new_WK_VP[32];
+ u32 dom_flags;
+ u64 op_mode;
+ } __packed * p_dom_info;
+
+ p_dom_info = kmalloc(sizeof(*p_dom_info), GFP_KERNEL);
+ if (!p_dom_info)
+ return -ENOMEM;
+
+ rc = ep11_query_info(card, domain, 0x03 /* domain info query */,
+ sizeof(*p_dom_info), (u8 *) p_dom_info);
+ if (rc)
+ goto out;
+
+ memset(info, 0, sizeof(*info));
+ info->cur_wk_state = '0';
+ info->new_wk_state = '0';
+ if (p_dom_info->dom_flags & 0x10 /* left imprint mode */) {
+ if (p_dom_info->dom_flags & 0x02 /* cur wk valid */) {
+ info->cur_wk_state = '1';
+ memcpy(info->cur_wkvp, p_dom_info->cur_WK_VP, 32);
+ }
+ if (p_dom_info->dom_flags & 0x04 /* new wk present */
+ || p_dom_info->dom_flags & 0x08 /* new wk committed */) {
+ info->new_wk_state =
+ p_dom_info->dom_flags & 0x08 ? '2' : '1';
+ memcpy(info->new_wkvp, p_dom_info->new_WK_VP, 32);
+ }
+ }
+ info->op_mode = p_dom_info->op_mode;
+
+out:
+ kfree(p_dom_info);
+ return rc;
+}
+EXPORT_SYMBOL(ep11_get_domain_info);
+
+/*
+ * Default EP11 AES key generate attributes, used when no keygenflags given:
+ * XCP_BLOB_ENCRYPT | XCP_BLOB_DECRYPT | XCP_BLOB_PROTKEY_EXTRACTABLE
+ */
+#define KEY_ATTR_DEFAULTS 0x00200c00
+
+int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+ u8 *keybuf, size_t *keybufsize)
+{
+ struct keygen_req_pl {
+ struct pl_head head;
+ u8 var_tag;
+ u8 var_len;
+ u32 var;
+ u8 keybytes_tag;
+ u8 keybytes_len;
+ u32 keybytes;
+ u8 mech_tag;
+ u8 mech_len;
+ u32 mech;
+ u8 attr_tag;
+ u8 attr_len;
+ u32 attr_header;
+ u32 attr_bool_mask;
+ u32 attr_bool_bits;
+ u32 attr_val_len_type;
+ u32 attr_val_len_value;
+ u8 pin_tag;
+ u8 pin_len;
+ } __packed * req_pl;
+ struct keygen_rep_pl {
+ struct pl_head head;
+ u8 rc_tag;
+ u8 rc_len;
+ u32 rc;
+ u8 data_tag;
+ u8 data_lenfmt;
+ u16 data_len;
+ u8 data[512];
+ } __packed * rep_pl;
+ struct ep11_cprb *req = NULL, *rep = NULL;
+ struct ep11_target_dev target;
+ struct ep11_urb *urb = NULL;
+ struct ep11keyblob *kb;
+ int api, rc = -ENOMEM;
+
+ switch (keybitsize) {
+ case 128:
+ case 192:
+ case 256:
+ break;
+ default:
+ DEBUG_ERR(
+ "%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* request cprb and payload */
+ req = alloc_cprb(sizeof(struct keygen_req_pl));
+ if (!req)
+ goto out;
+ req_pl = (struct keygen_req_pl *) (((u8 *) req) + sizeof(*req));
+ api = (!keygenflags || keygenflags & 0x00200000) ? 4 : 1;
+ prep_head(&req_pl->head, sizeof(*req_pl), api, 21); /* GenerateKey */
+ req_pl->var_tag = 0x04;
+ req_pl->var_len = sizeof(u32);
+ req_pl->keybytes_tag = 0x04;
+ req_pl->keybytes_len = sizeof(u32);
+ req_pl->keybytes = keybitsize / 8;
+ req_pl->mech_tag = 0x04;
+ req_pl->mech_len = sizeof(u32);
+ req_pl->mech = 0x00001080; /* CKM_AES_KEY_GEN */
+ req_pl->attr_tag = 0x04;
+ req_pl->attr_len = 5 * sizeof(u32);
+ req_pl->attr_header = 0x10010000;
+ req_pl->attr_bool_mask = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+ req_pl->attr_bool_bits = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+ req_pl->attr_val_len_type = 0x00000161; /* CKA_VALUE_LEN */
+ req_pl->attr_val_len_value = keybitsize / 8;
+ req_pl->pin_tag = 0x04;
+
+ /* reply cprb and payload */
+ rep = alloc_cprb(sizeof(struct keygen_rep_pl));
+ if (!rep)
+ goto out;
+ rep_pl = (struct keygen_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+ /* urb and target */
+ urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+ if (!urb)
+ goto out;
+ target.ap_id = card;
+ target.dom_id = domain;
+ prep_urb(urb, &target, 1,
+ req, sizeof(*req) + sizeof(*req_pl),
+ rep, sizeof(*rep) + sizeof(*rep_pl));
+
+ rc = _zcrypt_send_ep11_cprb(urb);
+ if (rc) {
+ DEBUG_ERR(
+ "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int) card, (int) domain, rc);
+ goto out;
+ }
+
+ rc = check_reply_pl((u8 *)rep_pl, __func__);
+ if (rc)
+ goto out;
+ if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+ DEBUG_ERR("%s unknown reply data format\n", __func__);
+ rc = -EIO;
+ goto out;
+ }
+ if (rep_pl->data_len > *keybufsize) {
+ DEBUG_ERR("%s mismatch reply data len / key buffer len\n",
+ __func__);
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ /* copy key blob and set header values */
+ memcpy(keybuf, rep_pl->data, rep_pl->data_len);
+ *keybufsize = rep_pl->data_len;
+ kb = (struct ep11keyblob *) keybuf;
+ kb->head.type = TOKTYPE_NON_CCA;
+ kb->head.len = rep_pl->data_len;
+ kb->head.version = TOKVER_EP11_AES;
+ kb->head.keybitlen = keybitsize;
+
+out:
+ kfree(req);
+ kfree(rep);
+ kfree(urb);
+ return rc;
+}
+EXPORT_SYMBOL(ep11_genaeskey);
+
+static int ep11_cryptsingle(u16 card, u16 domain,
+ u16 mode, u32 mech, const u8 *iv,
+ const u8 *key, size_t keysize,
+ const u8 *inbuf, size_t inbufsize,
+ u8 *outbuf, size_t *outbufsize)
+{
+ struct crypt_req_pl {
+ struct pl_head head;
+ u8 var_tag;
+ u8 var_len;
+ u32 var;
+ u8 mech_tag;
+ u8 mech_len;
+ u32 mech;
+ /*
+ * maybe followed by iv data
+ * followed by key tag + key blob
+ * followed by plaintext tag + plaintext
+ */
+ } __packed * req_pl;
+ struct crypt_rep_pl {
+ struct pl_head head;
+ u8 rc_tag;
+ u8 rc_len;
+ u32 rc;
+ u8 data_tag;
+ u8 data_lenfmt;
+ /* data follows */
+ } __packed * rep_pl;
+ struct ep11_cprb *req = NULL, *rep = NULL;
+ struct ep11_target_dev target;
+ struct ep11_urb *urb = NULL;
+ size_t req_pl_size, rep_pl_size;
+ int n, api = 1, rc = -ENOMEM;
+ u8 *p;
+
+ /* the simple asn1 coding used has length limits */
+ if (keysize > 0xFFFF || inbufsize > 0xFFFF)
+ return -EINVAL;
+
+ /* request cprb and payload */
+ req_pl_size = sizeof(struct crypt_req_pl) + (iv ? 16 : 0)
+ + ASN1TAGLEN(keysize) + ASN1TAGLEN(inbufsize);
+ req = alloc_cprb(req_pl_size);
+ if (!req)
+ goto out;
+ req_pl = (struct crypt_req_pl *) (((u8 *) req) + sizeof(*req));
+ prep_head(&req_pl->head, req_pl_size, api, (mode ? 20 : 19));
+ req_pl->var_tag = 0x04;
+ req_pl->var_len = sizeof(u32);
+ /* mech is mech + mech params (iv here) */
+ req_pl->mech_tag = 0x04;
+ req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0);
+ req_pl->mech = (mech ? mech : 0x00001085); /* CKM_AES_CBC_PAD */
+ p = ((u8 *) req_pl) + sizeof(*req_pl);
+ if (iv) {
+ memcpy(p, iv, 16);
+ p += 16;
+ }
+ /* key and input data */
+ p += asn1tag_write(p, 0x04, key, keysize);
+ p += asn1tag_write(p, 0x04, inbuf, inbufsize);
+
+ /* reply cprb and payload, assume out data size <= in data size + 32 */
+ rep_pl_size = sizeof(struct crypt_rep_pl) + ASN1TAGLEN(inbufsize + 32);
+ rep = alloc_cprb(rep_pl_size);
+ if (!rep)
+ goto out;
+ rep_pl = (struct crypt_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+ /* urb and target */
+ urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+ if (!urb)
+ goto out;
+ target.ap_id = card;
+ target.dom_id = domain;
+ prep_urb(urb, &target, 1,
+ req, sizeof(*req) + req_pl_size,
+ rep, sizeof(*rep) + rep_pl_size);
+
+ rc = _zcrypt_send_ep11_cprb(urb);
+ if (rc) {
+ DEBUG_ERR(
+ "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int) card, (int) domain, rc);
+ goto out;
+ }
+
+ rc = check_reply_pl((u8 *)rep_pl, __func__);
+ if (rc)
+ goto out;
+ if (rep_pl->data_tag != 0x04) {
+ DEBUG_ERR("%s unknown reply data format\n", __func__);
+ rc = -EIO;
+ goto out;
+ }
+ p = ((u8 *) rep_pl) + sizeof(*rep_pl);
+ if (rep_pl->data_lenfmt <= 127)
+ n = rep_pl->data_lenfmt;
+ else if (rep_pl->data_lenfmt == 0x81)
+ n = *p++;
+ else if (rep_pl->data_lenfmt == 0x82) {
+ n = *((u16 *) p);
+ p += 2;
+ } else {
+ DEBUG_ERR("%s unknown reply data length format 0x%02hhx\n",
+ __func__, rep_pl->data_lenfmt);
+ rc = -EIO;
+ goto out;
+ }
+ if (n > *outbufsize) {
+ DEBUG_ERR("%s mismatch reply data len %d / output buffer %zu\n",
+ __func__, n, *outbufsize);
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ memcpy(outbuf, p, n);
+ *outbufsize = n;
+
+out:
+ kfree(req);
+ kfree(rep);
+ kfree(urb);
+ return rc;
+}
+
+static int ep11_unwrapkey(u16 card, u16 domain,
+ const u8 *kek, size_t keksize,
+ const u8 *enckey, size_t enckeysize,
+ u32 mech, const u8 *iv,
+ u32 keybitsize, u32 keygenflags,
+ u8 *keybuf, size_t *keybufsize)
+{
+ struct uw_req_pl {
+ struct pl_head head;
+ u8 attr_tag;
+ u8 attr_len;
+ u32 attr_header;
+ u32 attr_bool_mask;
+ u32 attr_bool_bits;
+ u32 attr_key_type;
+ u32 attr_key_type_value;
+ u32 attr_val_len;
+ u32 attr_val_len_value;
+ u8 mech_tag;
+ u8 mech_len;
+ u32 mech;
+ /*
+ * maybe followed by iv data
+ * followed by kek tag + kek blob
+ * followed by empty mac tag
+ * followed by empty pin tag
+ * followed by encryted key tag + bytes
+ */
+ } __packed * req_pl;
+ struct uw_rep_pl {
+ struct pl_head head;
+ u8 rc_tag;
+ u8 rc_len;
+ u32 rc;
+ u8 data_tag;
+ u8 data_lenfmt;
+ u16 data_len;
+ u8 data[512];
+ } __packed * rep_pl;
+ struct ep11_cprb *req = NULL, *rep = NULL;
+ struct ep11_target_dev target;
+ struct ep11_urb *urb = NULL;
+ struct ep11keyblob *kb;
+ size_t req_pl_size;
+ int api, rc = -ENOMEM;
+ u8 *p;
+
+ /* request cprb and payload */
+ req_pl_size = sizeof(struct uw_req_pl) + (iv ? 16 : 0)
+ + ASN1TAGLEN(keksize) + 4 + ASN1TAGLEN(enckeysize);
+ req = alloc_cprb(req_pl_size);
+ if (!req)
+ goto out;
+ req_pl = (struct uw_req_pl *) (((u8 *) req) + sizeof(*req));
+ api = (!keygenflags || keygenflags & 0x00200000) ? 4 : 1;
+ prep_head(&req_pl->head, req_pl_size, api, 34); /* UnwrapKey */
+ req_pl->attr_tag = 0x04;
+ req_pl->attr_len = 7 * sizeof(u32);
+ req_pl->attr_header = 0x10020000;
+ req_pl->attr_bool_mask = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+ req_pl->attr_bool_bits = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+ req_pl->attr_key_type = 0x00000100; /* CKA_KEY_TYPE */
+ req_pl->attr_key_type_value = 0x0000001f; /* CKK_AES */
+ req_pl->attr_val_len = 0x00000161; /* CKA_VALUE_LEN */
+ req_pl->attr_val_len_value = keybitsize / 8;
+ /* mech is mech + mech params (iv here) */
+ req_pl->mech_tag = 0x04;
+ req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0);
+ req_pl->mech = (mech ? mech : 0x00001085); /* CKM_AES_CBC_PAD */
+ p = ((u8 *) req_pl) + sizeof(*req_pl);
+ if (iv) {
+ memcpy(p, iv, 16);
+ p += 16;
+ }
+ /* kek */
+ p += asn1tag_write(p, 0x04, kek, keksize);
+ /* empty mac key tag */
+ *p++ = 0x04;
+ *p++ = 0;
+ /* empty pin tag */
+ *p++ = 0x04;
+ *p++ = 0;
+ /* encrytped key value tag and bytes */
+ p += asn1tag_write(p, 0x04, enckey, enckeysize);
+
+ /* reply cprb and payload */
+ rep = alloc_cprb(sizeof(struct uw_rep_pl));
+ if (!rep)
+ goto out;
+ rep_pl = (struct uw_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+ /* urb and target */
+ urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+ if (!urb)
+ goto out;
+ target.ap_id = card;
+ target.dom_id = domain;
+ prep_urb(urb, &target, 1,
+ req, sizeof(*req) + req_pl_size,
+ rep, sizeof(*rep) + sizeof(*rep_pl));
+
+ rc = _zcrypt_send_ep11_cprb(urb);
+ if (rc) {
+ DEBUG_ERR(
+ "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int) card, (int) domain, rc);
+ goto out;
+ }
+
+ rc = check_reply_pl((u8 *)rep_pl, __func__);
+ if (rc)
+ goto out;
+ if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+ DEBUG_ERR("%s unknown reply data format\n", __func__);
+ rc = -EIO;
+ goto out;
+ }
+ if (rep_pl->data_len > *keybufsize) {
+ DEBUG_ERR("%s mismatch reply data len / key buffer len\n",
+ __func__);
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ /* copy key blob and set header values */
+ memcpy(keybuf, rep_pl->data, rep_pl->data_len);
+ *keybufsize = rep_pl->data_len;
+ kb = (struct ep11keyblob *) keybuf;
+ kb->head.type = TOKTYPE_NON_CCA;
+ kb->head.len = rep_pl->data_len;
+ kb->head.version = TOKVER_EP11_AES;
+ kb->head.keybitlen = keybitsize;
+
+out:
+ kfree(req);
+ kfree(rep);
+ kfree(urb);
+ return rc;
+}
+
+static int ep11_wrapkey(u16 card, u16 domain,
+ const u8 *key, size_t keysize,
+ u32 mech, const u8 *iv,
+ u8 *databuf, size_t *datasize)
+{
+ struct wk_req_pl {
+ struct pl_head head;
+ u8 var_tag;
+ u8 var_len;
+ u32 var;
+ u8 mech_tag;
+ u8 mech_len;
+ u32 mech;
+ /*
+ * followed by iv data
+ * followed by key tag + key blob
+ * followed by dummy kek param
+ * followed by dummy mac param
+ */
+ } __packed * req_pl;
+ struct wk_rep_pl {
+ struct pl_head head;
+ u8 rc_tag;
+ u8 rc_len;
+ u32 rc;
+ u8 data_tag;
+ u8 data_lenfmt;
+ u16 data_len;
+ u8 data[512];
+ } __packed * rep_pl;
+ struct ep11_cprb *req = NULL, *rep = NULL;
+ struct ep11_target_dev target;
+ struct ep11_urb *urb = NULL;
+ struct ep11keyblob *kb;
+ size_t req_pl_size;
+ int api, rc = -ENOMEM;
+ u8 *p;
+
+ /* request cprb and payload */
+ req_pl_size = sizeof(struct wk_req_pl) + (iv ? 16 : 0)
+ + ASN1TAGLEN(keysize) + 4;
+ req = alloc_cprb(req_pl_size);
+ if (!req)
+ goto out;
+ if (!mech || mech == 0x80060001)
+ req->flags |= 0x20; /* CPACF_WRAP needs special bit */
+ req_pl = (struct wk_req_pl *) (((u8 *) req) + sizeof(*req));
+ api = (!mech || mech == 0x80060001) ? 4 : 1; /* CKM_IBM_CPACF_WRAP */
+ prep_head(&req_pl->head, req_pl_size, api, 33); /* WrapKey */
+ req_pl->var_tag = 0x04;
+ req_pl->var_len = sizeof(u32);
+ /* mech is mech + mech params (iv here) */
+ req_pl->mech_tag = 0x04;
+ req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0);
+ req_pl->mech = (mech ? mech : 0x80060001); /* CKM_IBM_CPACF_WRAP */
+ p = ((u8 *) req_pl) + sizeof(*req_pl);
+ if (iv) {
+ memcpy(p, iv, 16);
+ p += 16;
+ }
+ /* key blob */
+ p += asn1tag_write(p, 0x04, key, keysize);
+ /* maybe the key argument needs the head data cleaned out */
+ kb = (struct ep11keyblob *)(p - keysize);
+ if (kb->head.version == TOKVER_EP11_AES)
+ memset(&kb->head, 0, sizeof(kb->head));
+ /* empty kek tag */
+ *p++ = 0x04;
+ *p++ = 0;
+ /* empty mac tag */
+ *p++ = 0x04;
+ *p++ = 0;
+
+ /* reply cprb and payload */
+ rep = alloc_cprb(sizeof(struct wk_rep_pl));
+ if (!rep)
+ goto out;
+ rep_pl = (struct wk_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+ /* urb and target */
+ urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+ if (!urb)
+ goto out;
+ target.ap_id = card;
+ target.dom_id = domain;
+ prep_urb(urb, &target, 1,
+ req, sizeof(*req) + req_pl_size,
+ rep, sizeof(*rep) + sizeof(*rep_pl));
+
+ rc = _zcrypt_send_ep11_cprb(urb);
+ if (rc) {
+ DEBUG_ERR(
+ "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int) card, (int) domain, rc);
+ goto out;
+ }
+
+ rc = check_reply_pl((u8 *)rep_pl, __func__);
+ if (rc)
+ goto out;
+ if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+ DEBUG_ERR("%s unknown reply data format\n", __func__);
+ rc = -EIO;
+ goto out;
+ }
+ if (rep_pl->data_len > *datasize) {
+ DEBUG_ERR("%s mismatch reply data len / data buffer len\n",
+ __func__);
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ /* copy the data from the cprb to the data buffer */
+ memcpy(databuf, rep_pl->data, rep_pl->data_len);
+ *datasize = rep_pl->data_len;
+
+out:
+ kfree(req);
+ kfree(rep);
+ kfree(urb);
+ return rc;
+}
+
+int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+ const u8 *clrkey, u8 *keybuf, size_t *keybufsize)
+{
+ int rc;
+ struct ep11keyblob *kb;
+ u8 encbuf[64], *kek = NULL;
+ size_t clrkeylen, keklen, encbuflen = sizeof(encbuf);
+
+ if (keybitsize == 128 || keybitsize == 192 || keybitsize == 256)
+ clrkeylen = keybitsize / 8;
+ else {
+ DEBUG_ERR(
+ "%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
+ return -EINVAL;
+ }
+
+ /* allocate memory for the temp kek */
+ keklen = MAXEP11AESKEYBLOBSIZE;
+ kek = kmalloc(keklen, GFP_ATOMIC);
+ if (!kek) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Step 1: generate AES 256 bit random kek key */
+ rc = ep11_genaeskey(card, domain, 256,
+ 0x00006c00, /* EN/DECRYTP, WRAP/UNWRAP */
+ kek, &keklen);
+ if (rc) {
+ DEBUG_ERR(
+ "%s generate kek key failed, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+ kb = (struct ep11keyblob *) kek;
+ memset(&kb->head, 0, sizeof(kb->head));
+
+ /* Step 2: encrypt clear key value with the kek key */
+ rc = ep11_cryptsingle(card, domain, 0, 0, def_iv, kek, keklen,
+ clrkey, clrkeylen, encbuf, &encbuflen);
+ if (rc) {
+ DEBUG_ERR(
+ "%s encrypting key value with kek key failed, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+
+ /* Step 3: import the encrypted key value as a new key */
+ rc = ep11_unwrapkey(card, domain, kek, keklen,
+ encbuf, encbuflen, 0, def_iv,
+ keybitsize, 0, keybuf, keybufsize);
+ if (rc) {
+ DEBUG_ERR(
+ "%s importing key value as new key failed,, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+
+out:
+ kfree(kek);
+ return rc;
+}
+EXPORT_SYMBOL(ep11_clr2keyblob);
+
+int ep11_key2protkey(u16 card, u16 dom, const u8 *key, size_t keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+{
+ int rc = -EIO;
+ u8 *wkbuf = NULL;
+ size_t wkbuflen = 256;
+ struct wk_info {
+ u16 version;
+ u8 res1[16];
+ u32 pkeytype;
+ u32 pkeybitsize;
+ u64 pkeysize;
+ u8 res2[8];
+ u8 pkey[0];
+ } __packed * wki;
+
+ /* alloc temp working buffer */
+ wkbuf = kmalloc(wkbuflen, GFP_ATOMIC);
+ if (!wkbuf)
+ return -ENOMEM;
+
+ /* ep11 secure key -> protected key + info */
+ rc = ep11_wrapkey(card, dom, key, keylen,
+ 0, def_iv, wkbuf, &wkbuflen);
+ if (rc) {
+ DEBUG_ERR(
+ "%s rewrapping ep11 key to pkey failed, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+ wki = (struct wk_info *) wkbuf;
+
+ /* check struct version and pkey type */
+ if (wki->version != 1 || wki->pkeytype != 1) {
+ DEBUG_ERR("%s wk info version %d or pkeytype %d mismatch.\n",
+ __func__, (int) wki->version, (int) wki->pkeytype);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* copy the tanslated protected key */
+ switch (wki->pkeysize) {
+ case 16+32:
+ /* AES 128 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_128;
+ break;
+ case 24+32:
+ /* AES 192 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_192;
+ break;
+ case 32+32:
+ /* AES 256 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_256;
+ break;
+ default:
+ DEBUG_ERR("%s unknown/unsupported pkeysize %d\n",
+ __func__, (int) wki->pkeysize);
+ rc = -EIO;
+ goto out;
+ }
+ memcpy(protkey, wki->pkey, wki->pkeysize);
+ if (protkeylen)
+ *protkeylen = (u32) wki->pkeysize;
+ rc = 0;
+
+out:
+ kfree(wkbuf);
+ return rc;
+}
+EXPORT_SYMBOL(ep11_key2protkey);
+
+int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
+ int minhwtype, int minapi, const u8 *wkvp)
+{
+ struct zcrypt_device_status_ext *device_status;
+ u32 *_apqns = NULL, _nr_apqns = 0;
+ int i, card, dom, rc = -ENOMEM;
+ struct ep11_domain_info edi;
+ struct ep11_card_info eci;
+
+ /* fetch status of all crypto cards */
+ device_status = kmalloc_array(MAX_ZDEV_ENTRIES_EXT,
+ sizeof(struct zcrypt_device_status_ext),
+ GFP_KERNEL);
+ if (!device_status)
+ return -ENOMEM;
+ zcrypt_device_status_mask_ext(device_status);
+
+ /* allocate 1k space for up to 256 apqns */
+ _apqns = kmalloc_array(256, sizeof(u32), GFP_KERNEL);
+ if (!_apqns) {
+ kfree(device_status);
+ return -ENOMEM;
+ }
+
+ /* walk through all the crypto apqnss */
+ for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
+ card = AP_QID_CARD(device_status[i].qid);
+ dom = AP_QID_QUEUE(device_status[i].qid);
+ /* check online state */
+ if (!device_status[i].online)
+ continue;
+ /* check for ep11 functions */
+ if (!(device_status[i].functions & 0x01))
+ continue;
+ /* check cardnr */
+ if (cardnr != 0xFFFF && card != cardnr)
+ continue;
+ /* check domain */
+ if (domain != 0xFFFF && dom != domain)
+ continue;
+ /* check min hardware type */
+ if (minhwtype && device_status[i].hwtype < minhwtype)
+ continue;
+ /* check min api version if given */
+ if (minapi > 0) {
+ if (ep11_get_card_info(card, &eci, 0))
+ continue;
+ if (minapi > eci.API_ord_nr)
+ continue;
+ }
+ /* check wkvp if given */
+ if (wkvp) {
+ if (ep11_get_domain_info(card, dom, &edi))
+ continue;
+ if (edi.cur_wk_state != '1')
+ continue;
+ if (memcmp(wkvp, edi.cur_wkvp, 16))
+ continue;
+ }
+ /* apqn passed all filtering criterons, add to the array */
+ if (_nr_apqns < 256)
+ _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16) dom);
+ }
+
+ /* nothing found ? */
+ if (!_nr_apqns) {
+ kfree(_apqns);
+ rc = -ENODEV;
+ } else {
+ /* no re-allocation, simple return the _apqns array */
+ *apqns = _apqns;
+ *nr_apqns = _nr_apqns;
+ rc = 0;
+ }
+
+ kfree(device_status);
+ return rc;
+}
+EXPORT_SYMBOL(ep11_findcard2);
+
+void __exit zcrypt_ep11misc_exit(void)
+{
+ card_cache_free();
+}
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.h b/drivers/s390/crypto/zcrypt_ep11misc.h
new file mode 100644
index 000000000000..e3ed5ed1de86
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_ep11misc.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright IBM Corp. 2019
+ * Author(s): Harald Freudenberger <freude@linux.ibm.com>
+ *
+ * Collection of EP11 misc functions used by zcrypt and pkey
+ */
+
+#ifndef _ZCRYPT_EP11MISC_H_
+#define _ZCRYPT_EP11MISC_H_
+
+#include <asm/zcrypt.h>
+#include <asm/pkey.h>
+
+#define TOKVER_EP11_AES 0x03 /* EP11 AES key blob */
+
+#define EP11_API_V 4 /* highest known and supported EP11 API version */
+
+#define EP11_STRUCT_MAGIC 0x1234
+#define EP11_BLOB_PKEY_EXTRACTABLE 0x200000
+
+/* inside view of an EP11 secure key blob */
+struct ep11keyblob {
+ union {
+ u8 session[32];
+ struct {
+ u8 type; /* 0x00 (TOKTYPE_NON_CCA) */
+ u8 res0; /* unused */
+ u16 len; /* total length in bytes of this blob */
+ u8 version; /* 0x06 (TOKVER_EP11_AES) */
+ u8 res1; /* unused */
+ u16 keybitlen; /* clear key bit len, 0 for unknown */
+ } head;
+ };
+ u8 wkvp[16]; /* wrapping key verification pattern */
+ u64 attr; /* boolean key attributes */
+ u64 mode; /* mode bits */
+ u16 version; /* 0x1234, EP11_STRUCT_MAGIC */
+ u8 iv[14];
+ u8 encrypted_key_data[144];
+ u8 mac[32];
+} __packed;
+
+/*
+ * Simple check if the key blob is a valid EP11 secure AES key.
+ * If keybitsize is given, the bitsize of the key is also checked.
+ * If checkcpacfexport is enabled, the key is also checked for the
+ * attributes needed to export this key for CPACF use.
+ * Returns 0 on success or errno value on failure.
+ */
+int ep11_check_aeskeyblob(debug_info_t *dbg, int dbflvl,
+ const u8 *key, int keybitsize,
+ int checkcpacfexport);
+
+/* EP11 card info struct */
+struct ep11_card_info {
+ u32 API_ord_nr; /* API ordinal number */
+ u16 FW_version; /* Firmware major and minor version */
+ char serial[16]; /* serial number string (16 ascii, no 0x00 !) */
+ u64 op_mode; /* card operational mode(s) */
+};
+
+/* EP11 domain info struct */
+struct ep11_domain_info {
+ char cur_wk_state; /* '0' invalid, '1' valid */
+ char new_wk_state; /* '0' empty, '1' uncommitted, '2' committed */
+ u8 cur_wkvp[32]; /* current wrapping key verification pattern */
+ u8 new_wkvp[32]; /* new wrapping key verification pattern */
+ u64 op_mode; /* domain operational mode(s) */
+};
+
+/*
+ * Provide information about an EP11 card.
+ */
+int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify);
+
+/*
+ * Provide information about a domain within an EP11 card.
+ */
+int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info);
+
+/*
+ * Generate (random) EP11 AES secure key.
+ */
+int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+ u8 *keybuf, size_t *keybufsize);
+
+/*
+ * Generate EP11 AES secure key with given clear key value.
+ */
+int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
+ const u8 *clrkey, u8 *keybuf, size_t *keybufsize);
+
+/*
+ * Derive proteced key from EP11 AES secure key blob.
+ */
+int ep11_key2protkey(u16 cardnr, u16 domain, const u8 *key, size_t keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype);
+
+/*
+ * Build a list of ep11 apqns meeting the following constrains:
+ * - apqn is online and is in fact an EP11 apqn
+ * - if cardnr is not FFFF only apqns with this cardnr
+ * - if domain is not FFFF only apqns with this domainnr
+ * - if minhwtype > 0 only apqns with hwtype >= minhwtype
+ * - if minapi > 0 only apqns with API_ord_nr >= minapi
+ * - if wkvp != NULL only apqns where the wkvp (EP11_WKVPLEN bytes) matches
+ * to the first EP11_WKVPLEN bytes of the wkvp of the current wrapping
+ * key for this domain. When a wkvp is given there will aways be a re-fetch
+ * of the domain info for the potential apqn - so this triggers an request
+ * reply to each apqn eligible.
+ * The array of apqn entries is allocated with kmalloc and returned in *apqns;
+ * the number of apqns stored into the list is returned in *nr_apqns. One apqn
+ * entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and
+ * may be casted to struct pkey_apqn. The return value is either 0 for success
+ * or a negative errno value. If no apqn meeting the criterias is found,
+ * -ENODEV is returned.
+ */
+int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
+ int minhwtype, int minapi, const u8 *wkvp);
+
+void zcrypt_ep11misc_exit(void);
+
+#endif /* _ZCRYPT_EP11MISC_H_ */
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 871d44746f5c..9575a627a1e1 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -125,12 +125,6 @@ struct qeth_routing_info {
enum qeth_routing_types type;
};
-/* IPA stuff */
-struct qeth_ipa_info {
- __u32 supported_funcs;
- __u32 enabled_funcs;
-};
-
/* SETBRIDGEPORT stuff */
enum qeth_sbp_roles {
QETH_SBP_ROLE_NONE = 0,
@@ -169,41 +163,6 @@ struct qeth_vnicc_info {
bool rx_bcast_enabled;
};
-static inline int qeth_is_adp_supported(struct qeth_ipa_info *ipa,
- enum qeth_ipa_setadp_cmd func)
-{
- return (ipa->supported_funcs & func);
-}
-
-static inline int qeth_is_ipa_supported(struct qeth_ipa_info *ipa,
- enum qeth_ipa_funcs func)
-{
- return (ipa->supported_funcs & func);
-}
-
-static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
- enum qeth_ipa_funcs func)
-{
- return (ipa->supported_funcs & ipa->enabled_funcs & func);
-}
-
-#define qeth_adp_supported(c, f) \
- qeth_is_adp_supported(&c->options.adp, f)
-#define qeth_is_supported(c, f) \
- qeth_is_ipa_supported(&c->options.ipa4, f)
-#define qeth_is_enabled(c, f) \
- qeth_is_ipa_enabled(&c->options.ipa4, f)
-#define qeth_is_supported6(c, f) \
- qeth_is_ipa_supported(&c->options.ipa6, f)
-#define qeth_is_enabled6(c, f) \
- qeth_is_ipa_enabled(&c->options.ipa6, f)
-#define qeth_is_ipafunc_supported(c, prot, f) \
- ((prot == QETH_PROT_IPV6) ? \
- qeth_is_supported6(c, f) : qeth_is_supported(c, f))
-#define qeth_is_ipafunc_enabled(c, prot, f) \
- ((prot == QETH_PROT_IPV6) ? \
- qeth_is_enabled6(c, f) : qeth_is_enabled(c, f))
-
#define QETH_IDX_FUNC_LEVEL_OSD 0x0101
#define QETH_IDX_FUNC_LEVEL_IQD 0x4108
@@ -262,7 +221,6 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
/* large receive scatter gather copy break */
#define QETH_RX_SG_CB (PAGE_SIZE >> 1)
-#define QETH_RX_PULL_LEN 256
struct qeth_hdr_layer3 {
__u8 id;
@@ -600,7 +558,6 @@ enum qeth_channel_states {
*/
enum qeth_card_states {
CARD_STATE_DOWN,
- CARD_STATE_HARDSETUP,
CARD_STATE_SOFTSETUP,
};
@@ -650,6 +607,8 @@ struct qeth_cmd_buffer {
long timeout;
unsigned char *data;
void (*finalize)(struct qeth_card *card, struct qeth_cmd_buffer *iob);
+ bool (*match)(struct qeth_cmd_buffer *iob,
+ struct qeth_cmd_buffer *reply);
void (*callback)(struct qeth_card *card, struct qeth_cmd_buffer *iob,
unsigned int data_length);
int rc;
@@ -660,6 +619,14 @@ static inline void qeth_get_cmd(struct qeth_cmd_buffer *iob)
refcount_inc(&iob->ref_count);
}
+static inline struct qeth_ipa_cmd *__ipa_reply(struct qeth_cmd_buffer *iob)
+{
+ if (!IS_IPA(iob->data))
+ return NULL;
+
+ return (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
+}
+
static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob)
{
return (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
@@ -735,11 +702,11 @@ enum qeth_discipline_id {
};
struct qeth_card_options {
+ struct qeth_ipa_caps ipa4;
+ struct qeth_ipa_caps ipa6;
struct qeth_routing_info route4;
- struct qeth_ipa_info ipa4;
- struct qeth_ipa_info adp; /*Adapter parameters*/
struct qeth_routing_info route6;
- struct qeth_ipa_info ipa6;
+ struct qeth_ipa_caps adp; /* Adapter parameters */
struct qeth_sbp_info sbp; /* SETBRIDGEPORT options */
struct qeth_vnicc_info vnicc; /* VNICC options */
int fake_broadcast;
@@ -769,12 +736,10 @@ struct qeth_osn_info {
struct qeth_discipline {
const struct device_type *devtype;
- int (*process_rx_buffer)(struct qeth_card *card, int budget, int *done);
- int (*recover)(void *ptr);
int (*setup) (struct ccwgroup_device *);
void (*remove) (struct ccwgroup_device *);
- int (*set_online) (struct ccwgroup_device *);
- int (*set_offline) (struct ccwgroup_device *);
+ int (*set_online)(struct qeth_card *card);
+ void (*set_offline)(struct qeth_card *card);
int (*do_ioctl)(struct net_device *dev, struct ifreq *rq, int cmd);
int (*control_event_handler)(struct qeth_card *card,
struct qeth_ipa_cmd *cmd);
@@ -862,6 +827,13 @@ static inline bool qeth_card_hw_is_reachable(struct qeth_card *card)
return card->state == CARD_STATE_SOFTSETUP;
}
+static inline void qeth_unlock_channel(struct qeth_card *card,
+ struct qeth_channel *channel)
+{
+ atomic_set(&channel->irq_pending, 0);
+ wake_up(&card->wait_q);
+}
+
struct qeth_trap_id {
__u16 lparnr;
char vmname[8];
@@ -957,18 +929,6 @@ static inline struct dst_entry *qeth_dst_check_rcu(struct sk_buff *skb, int ipv)
return dst;
}
-static inline void qeth_rx_csum(struct qeth_card *card, struct sk_buff *skb,
- u8 flags)
-{
- if ((card->dev->features & NETIF_F_RXCSUM) &&
- (flags & QETH_HDR_EXT_CSUM_TRANSP_REQ)) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- QETH_CARD_STAT_INC(card, rx_skb_csum);
- } else {
- skb->ip_summed = CHECKSUM_NONE;
- }
-}
-
static inline void qeth_tx_csum(struct sk_buff *skb, u8 *flags, int ipv)
{
*flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ;
@@ -1035,14 +995,11 @@ struct net_device *qeth_clone_netdev(struct net_device *orig);
struct qeth_card *qeth_get_card_by_busid(char *bus_id);
void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int);
int qeth_threads_running(struct qeth_card *, unsigned long);
-int qeth_do_run_thread(struct qeth_card *, unsigned long);
-void qeth_clear_thread_start_bit(struct qeth_card *, unsigned long);
-void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long);
int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok);
int qeth_stop_channel(struct qeth_channel *channel);
+int qeth_set_offline(struct qeth_card *card, bool resetting);
void qeth_print_status_message(struct qeth_card *);
-int qeth_init_qdio_queues(struct qeth_card *);
int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
int (*reply_cb)
(struct qeth_card *, struct qeth_reply *, unsigned long),
@@ -1065,9 +1022,6 @@ struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason);
void qeth_put_cmd(struct qeth_cmd_buffer *iob);
-struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
- struct qeth_qdio_buffer *, struct qdio_buffer_element **, int *,
- struct qeth_hdr **);
void qeth_schedule_recovery(struct qeth_card *);
int qeth_poll(struct napi_struct *napi, int budget);
void qeth_clear_ipacmd_list(struct qeth_card *);
@@ -1076,9 +1030,11 @@ void qeth_clear_working_pool_list(struct qeth_card *);
void qeth_drain_output_queues(struct qeth_card *card);
void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable);
int qeth_setadpparms_change_macaddr(struct qeth_card *);
-void qeth_tx_timeout(struct net_device *);
+void qeth_tx_timeout(struct net_device *, unsigned int txqueue);
void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
- u16 cmd_length);
+ u16 cmd_length,
+ bool (*match)(struct qeth_cmd_buffer *iob,
+ struct qeth_cmd_buffer *reply));
int qeth_query_switch_attributes(struct qeth_card *card,
struct qeth_switch_info *sw_info);
int qeth_query_card_info(struct qeth_card *card,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 33a62a6692c0..9639938581f5 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -247,9 +247,6 @@ int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
{
QETH_CARD_TEXT(card, 2, "realcbp");
- if (card->state != CARD_STATE_DOWN)
- return -EPERM;
-
/* TODO: steel/add buffers from/to a running card's buffer pool (?) */
qeth_clear_working_pool_list(card);
qeth_free_buffer_pool(card);
@@ -520,11 +517,10 @@ static int __qeth_issue_next_read(struct qeth_card *card)
} else {
QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
rc, CARD_DEVID(card));
- atomic_set(&channel->irq_pending, 0);
+ qeth_unlock_channel(card, channel);
qeth_put_cmd(iob);
card->read_or_write_problem = 1;
qeth_schedule_recovery(card);
- wake_up(&card->wait_q);
}
return rc;
}
@@ -655,17 +651,17 @@ static int qeth_check_idx_response(struct qeth_card *card,
unsigned char *buffer)
{
QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
- if ((buffer[2] & 0xc0) == 0xc0) {
+ if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) {
QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
buffer[4]);
QETH_CARD_TEXT(card, 2, "ckidxres");
QETH_CARD_TEXT(card, 2, " idxterm");
- QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
- if (buffer[4] == 0xf6) {
+ QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]);
+ if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT ||
+ buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) {
dev_err(&card->gdev->dev,
- "The qeth device is not configured "
- "for the OSI layer required by z/VM\n");
- return -EPERM;
+ "The device does not support the configured transport mode\n");
+ return -EPROTONOSUPPORT;
}
return -EIO;
}
@@ -742,15 +738,15 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
case 0:
break;
case -EIO:
- qeth_clear_ipacmd_list(card);
qeth_schedule_recovery(card);
/* fall through */
default:
+ qeth_clear_ipacmd_list(card);
goto out;
}
- if (IS_IPA(iob->data)) {
- cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
+ cmd = __ipa_reply(iob);
+ if (cmd) {
cmd = qeth_check_ipa_data(card, cmd);
if (!cmd)
goto out;
@@ -759,17 +755,12 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
card->osn_info.assist_cb(card->dev, cmd);
goto out;
}
- } else {
- /* non-IPA commands should only flow during initialization */
- if (card->state != CARD_STATE_DOWN)
- goto out;
}
/* match against pending cmd requests */
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry(tmp, &card->cmd_waiter_list, list) {
- if (!IS_IPA(tmp->data) ||
- __ipa_cmd(tmp)->hdr.seqno == cmd->hdr.seqno) {
+ if (tmp->match && tmp->match(tmp, iob)) {
request = tmp;
/* take the object outside the lock */
qeth_get_cmd(request);
@@ -824,7 +815,8 @@ static int qeth_set_thread_start_bit(struct qeth_card *card,
return 0;
}
-void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
+static void qeth_clear_thread_start_bit(struct qeth_card *card,
+ unsigned long thread)
{
unsigned long flags;
@@ -833,9 +825,9 @@ void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
wake_up(&card->wait_q);
}
-EXPORT_SYMBOL_GPL(qeth_clear_thread_start_bit);
-void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
+static void qeth_clear_thread_running_bit(struct qeth_card *card,
+ unsigned long thread)
{
unsigned long flags;
@@ -844,7 +836,6 @@ void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
wake_up_all(&card->wait_q);
}
-EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit);
static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
{
@@ -865,7 +856,7 @@ static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
return rc;
}
-int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
+static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
{
int rc = 0;
@@ -873,7 +864,6 @@ int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
(rc = __qeth_do_run_thread(card, thread)) >= 0);
return rc;
}
-EXPORT_SYMBOL_GPL(qeth_do_run_thread);
void qeth_schedule_recovery(struct qeth_card *card)
{
@@ -881,7 +871,6 @@ void qeth_schedule_recovery(struct qeth_card *card)
if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
schedule_work(&card->kernel_thread_starter);
}
-EXPORT_SYMBOL_GPL(qeth_schedule_recovery);
static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
struct irb *irb)
@@ -972,8 +961,6 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
/* while we hold the ccwdev lock, this stays valid: */
gdev = dev_get_drvdata(&cdev->dev);
card = dev_get_drvdata(&gdev->dev);
- if (!card)
- return;
QETH_CARD_TEXT(card, 5, "irq");
@@ -1003,24 +990,25 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
}
channel->active_cmd = NULL;
+ qeth_unlock_channel(card, channel);
rc = qeth_check_irb_error(card, cdev, irb);
if (rc) {
/* IO was terminated, free its resources. */
if (iob)
qeth_cancel_cmd(iob, rc);
- atomic_set(&channel->irq_pending, 0);
- wake_up(&card->wait_q);
return;
}
- atomic_set(&channel->irq_pending, 0);
-
- if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC))
+ if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
channel->state = CH_STATE_STOPPED;
+ wake_up(&card->wait_q);
+ }
- if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC))
+ if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
channel->state = CH_STATE_HALTED;
+ wake_up(&card->wait_q);
+ }
if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC |
SCSW_FCTL_HALT_FUNC))) {
@@ -1054,7 +1042,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
qeth_cancel_cmd(iob, rc);
qeth_clear_ipacmd_list(card);
qeth_schedule_recovery(card);
- goto out;
+ return;
}
}
@@ -1062,16 +1050,12 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
/* sanity check: */
if (irb->scsw.cmd.count > iob->length) {
qeth_cancel_cmd(iob, -EIO);
- goto out;
+ return;
}
if (iob->callback)
iob->callback(card, iob,
iob->length - irb->scsw.cmd.count);
}
-
-out:
- wake_up(&card->wait_q);
- return;
}
static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
@@ -1198,31 +1182,6 @@ static void qeth_free_buffer_pool(struct qeth_card *card)
}
}
-static void qeth_clean_channel(struct qeth_channel *channel)
-{
- struct ccw_device *cdev = channel->ccwdev;
-
- QETH_DBF_TEXT(SETUP, 2, "freech");
-
- spin_lock_irq(get_ccwdev_lock(cdev));
- cdev->handler = NULL;
- spin_unlock_irq(get_ccwdev_lock(cdev));
-}
-
-static void qeth_setup_channel(struct qeth_channel *channel)
-{
- struct ccw_device *cdev = channel->ccwdev;
-
- QETH_DBF_TEXT(SETUP, 2, "setupch");
-
- channel->state = CH_STATE_DOWN;
- atomic_set(&channel->irq_pending, 0);
-
- spin_lock_irq(get_ccwdev_lock(cdev));
- cdev->handler = qeth_irq;
- spin_unlock_irq(get_ccwdev_lock(cdev));
-}
-
static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
{
unsigned int count = single ? 1 : card->dev->num_tx_queues;
@@ -1318,6 +1277,7 @@ static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
return rc;
}
+static int qeth_do_reset(void *data);
static void qeth_start_kernel_thread(struct work_struct *work)
{
struct task_struct *ts;
@@ -1329,8 +1289,7 @@ static void qeth_start_kernel_thread(struct work_struct *work)
card->write.state != CH_STATE_UP)
return;
if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
- ts = kthread_run(card->discipline->recover, (void *)card,
- "qeth_recover");
+ ts = kthread_run(qeth_do_reset, card, "qeth_recover");
if (IS_ERR(ts)) {
qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
qeth_clear_thread_running_bit(card,
@@ -1395,9 +1354,6 @@ static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
if (!card->read_cmd)
goto out_read_cmd;
- qeth_setup_channel(&card->read);
- qeth_setup_channel(&card->write);
- qeth_setup_channel(&card->data);
card->qeth_service_level.seq_print = qeth_core_sl_print;
register_service_level(&card->qeth_service_level);
return card;
@@ -1467,12 +1423,38 @@ int qeth_stop_channel(struct qeth_channel *channel)
channel->active_cmd);
channel->active_cmd = NULL;
}
+ cdev->handler = NULL;
spin_unlock_irq(get_ccwdev_lock(cdev));
return rc;
}
EXPORT_SYMBOL_GPL(qeth_stop_channel);
+static int qeth_start_channel(struct qeth_channel *channel)
+{
+ struct ccw_device *cdev = channel->ccwdev;
+ int rc;
+
+ channel->state = CH_STATE_DOWN;
+ atomic_set(&channel->irq_pending, 0);
+
+ spin_lock_irq(get_ccwdev_lock(cdev));
+ cdev->handler = qeth_irq;
+ spin_unlock_irq(get_ccwdev_lock(cdev));
+
+ rc = ccw_device_set_online(cdev);
+ if (rc)
+ goto err;
+
+ return 0;
+
+err:
+ spin_lock_irq(get_ccwdev_lock(cdev));
+ cdev->handler = NULL;
+ spin_unlock_irq(get_ccwdev_lock(cdev));
+ return rc;
+}
+
static int qeth_halt_channels(struct qeth_card *card)
{
int rc1 = 0, rc2 = 0, rc3 = 0;
@@ -1698,6 +1680,13 @@ static void qeth_mpc_finalize_cmd(struct qeth_card *card,
iob->callback = qeth_release_buffer_cb;
}
+static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob,
+ struct qeth_cmd_buffer *reply)
+{
+ /* MPC cmds are issued strictly in sequence. */
+ return !IS_IPA(reply->data);
+}
+
static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
void *data,
unsigned int data_length)
@@ -1712,6 +1701,7 @@ static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length,
iob->data);
iob->finalize = qeth_mpc_finalize_cmd;
+ iob->match = qeth_mpc_match_reply;
return iob;
}
@@ -1784,8 +1774,7 @@ static int qeth_send_control_data(struct qeth_card *card,
QETH_CARD_TEXT_(card, 2, " err%d", rc);
qeth_dequeue_cmd(card, iob);
qeth_put_cmd(iob);
- atomic_set(&channel->irq_pending, 0);
- wake_up(&card->wait_q);
+ qeth_unlock_channel(card, channel);
goto out;
}
@@ -2482,50 +2471,46 @@ static int qeth_mpc_initialize(struct qeth_card *card)
rc = qeth_cm_enable(card);
if (rc) {
QETH_CARD_TEXT_(card, 2, "2err%d", rc);
- goto out_qdio;
+ return rc;
}
rc = qeth_cm_setup(card);
if (rc) {
QETH_CARD_TEXT_(card, 2, "3err%d", rc);
- goto out_qdio;
+ return rc;
}
rc = qeth_ulp_enable(card);
if (rc) {
QETH_CARD_TEXT_(card, 2, "4err%d", rc);
- goto out_qdio;
+ return rc;
}
rc = qeth_ulp_setup(card);
if (rc) {
QETH_CARD_TEXT_(card, 2, "5err%d", rc);
- goto out_qdio;
+ return rc;
}
rc = qeth_alloc_qdio_queues(card);
if (rc) {
QETH_CARD_TEXT_(card, 2, "5err%d", rc);
- goto out_qdio;
+ return rc;
}
rc = qeth_qdio_establish(card);
if (rc) {
QETH_CARD_TEXT_(card, 2, "6err%d", rc);
qeth_free_qdio_queues(card);
- goto out_qdio;
+ return rc;
}
rc = qeth_qdio_activate(card);
if (rc) {
QETH_CARD_TEXT_(card, 2, "7err%d", rc);
- goto out_qdio;
+ return rc;
}
rc = qeth_dm_act(card);
if (rc) {
QETH_CARD_TEXT_(card, 2, "8err%d", rc);
- goto out_qdio;
+ return rc;
}
return 0;
-out_qdio:
- qeth_qdio_clear_card(card, !IS_IQD(card));
- qdio_free(CARD_DDEV(card));
- return rc;
}
void qeth_print_status_message(struct qeth_card *card)
@@ -2636,7 +2621,8 @@ static int qeth_init_input_buffer(struct qeth_card *card,
if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
buf->rx_skb = netdev_alloc_skb(card->dev,
- QETH_RX_PULL_LEN + ETH_HLEN);
+ ETH_HLEN +
+ sizeof(struct ipv6hdr));
if (!buf->rx_skb)
return 1;
}
@@ -2677,7 +2663,7 @@ static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card,
return card->ssqd.mmwc ? card->ssqd.mmwc : 1;
}
-int qeth_init_qdio_queues(struct qeth_card *card)
+static int qeth_init_qdio_queues(struct qeth_card *card)
{
unsigned int i;
int rc;
@@ -2725,7 +2711,6 @@ int qeth_init_qdio_queues(struct qeth_card *card)
}
return 0;
}
-EXPORT_SYMBOL_GPL(qeth_init_qdio_queues);
static void qeth_ipa_finalize_cmd(struct qeth_card *card,
struct qeth_cmd_buffer *iob)
@@ -2737,7 +2722,9 @@ static void qeth_ipa_finalize_cmd(struct qeth_card *card,
}
void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
- u16 cmd_length)
+ u16 cmd_length,
+ bool (*match)(struct qeth_cmd_buffer *iob,
+ struct qeth_cmd_buffer *reply))
{
u8 prot_type = qeth_mpc_select_prot_type(card);
u16 total_length = iob->length;
@@ -2745,6 +2732,7 @@ void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
iob->data);
iob->finalize = qeth_ipa_finalize_cmd;
+ iob->match = match;
memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
@@ -2757,6 +2745,14 @@ void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
}
EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
+static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob,
+ struct qeth_cmd_buffer *reply)
+{
+ struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply);
+
+ return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno);
+}
+
struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
enum qeth_ipa_cmds cmd_code,
enum qeth_prot_versions prot,
@@ -2772,7 +2768,7 @@ struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
if (!iob)
return NULL;
- qeth_prepare_ipa_cmd(card, iob, data_length);
+ qeth_prepare_ipa_cmd(card, iob, data_length, qeth_ipa_match_reply);
hdr = &__ipa_cmd(iob)->hdr;
hdr->command = cmd_code;
@@ -2871,7 +2867,7 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card,
cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type);
}
- card->options.adp.supported_funcs =
+ card->options.adp.supported =
cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
return 0;
}
@@ -2927,8 +2923,8 @@ static int qeth_query_ipassists_cb(struct qeth_card *card,
case IPA_RC_NOTSUPP:
case IPA_RC_L2_UNSUPPORTED_CMD:
QETH_CARD_TEXT(card, 2, "ipaunsup");
- card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS;
- card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS;
+ card->options.ipa4.supported |= IPA_SETADAPTERPARMS;
+ card->options.ipa6.supported |= IPA_SETADAPTERPARMS;
return -EOPNOTSUPP;
default:
QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
@@ -2936,13 +2932,11 @@ static int qeth_query_ipassists_cb(struct qeth_card *card,
return -EIO;
}
- if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
- card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
- card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
- } else if (cmd->hdr.prot_version == QETH_PROT_IPV6) {
- card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
- card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
- } else
+ if (cmd->hdr.prot_version == QETH_PROT_IPV4)
+ card->options.ipa4 = cmd->hdr.assists;
+ else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
+ card->options.ipa6 = cmd->hdr.assists;
+ else
QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
CARD_DEVID(card));
return 0;
@@ -3110,7 +3104,6 @@ int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
}
return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
}
-EXPORT_SYMBOL_GPL(qeth_hw_trap);
static int qeth_check_qdio_errors(struct qeth_card *card,
struct qdio_buffer *buf,
@@ -3413,7 +3406,7 @@ static void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
struct qeth_card *card = (struct qeth_card *)card_ptr;
if (card->dev->flags & IFF_UP)
- napi_schedule(&card->napi);
+ napi_schedule_irqoff(&card->napi);
}
int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
@@ -3429,11 +3422,6 @@ int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
goto out;
}
- if (card->state != CARD_STATE_DOWN) {
- rc = -1;
- goto out;
- }
-
qeth_free_qdio_queues(card);
card->options.cq = cq;
rc = 0;
@@ -4325,7 +4313,7 @@ int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback)
return rc;
}
-void qeth_tx_timeout(struct net_device *dev)
+void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct qeth_card *card;
@@ -4706,7 +4694,7 @@ static void qeth_determine_capabilities(struct qeth_card *card)
QETH_CARD_TEXT(card, 2, "detcapab");
if (!ddev->online) {
ddev_offline = 1;
- rc = ccw_device_set_online(ddev);
+ rc = qeth_start_channel(channel);
if (rc) {
QETH_CARD_TEXT_(card, 2, "3err%d", rc);
goto out;
@@ -4881,9 +4869,6 @@ out_free_nothing:
static void qeth_core_free_card(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 2, "freecrd");
- qeth_clean_channel(&card->read);
- qeth_clean_channel(&card->write);
- qeth_clean_channel(&card->data);
qeth_put_cmd(card->read_cmd);
destroy_workqueue(card->event_wq);
unregister_service_level(&card->qeth_service_level);
@@ -4946,13 +4931,14 @@ retry:
qeth_stop_channel(&card->write);
qeth_stop_channel(&card->read);
qdio_free(CARD_DDEV(card));
- rc = ccw_device_set_online(CARD_RDEV(card));
+
+ rc = qeth_start_channel(&card->read);
if (rc)
goto retriable;
- rc = ccw_device_set_online(CARD_WDEV(card));
+ rc = qeth_start_channel(&card->write);
if (rc)
goto retriable;
- rc = ccw_device_set_online(CARD_DDEV(card));
+ rc = qeth_start_channel(&card->data);
if (rc)
goto retriable;
retriable:
@@ -5013,9 +4999,9 @@ retriable:
*carrier_ok = true;
}
- card->options.ipa4.supported_funcs = 0;
- card->options.ipa6.supported_funcs = 0;
- card->options.adp.supported_funcs = 0;
+ card->options.ipa4.supported = 0;
+ card->options.ipa6.supported = 0;
+ card->options.adp.supported = 0;
card->options.sbp.supported_funcs = 0;
card->info.diagass_support = 0;
rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
@@ -5035,10 +5021,8 @@ retriable:
}
if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
rc = qeth_query_setdiagass(card);
- if (rc < 0) {
+ if (rc)
QETH_CARD_TEXT_(card, 2, "8err%d", rc);
- goto out;
- }
}
if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
@@ -5049,6 +5033,12 @@ retriable:
if (rc)
goto out;
+ rc = qeth_init_qdio_queues(card);
+ if (rc) {
+ QETH_CARD_TEXT_(card, 2, "9err%d", rc);
+ goto out;
+ }
+
return 0;
out:
dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
@@ -5059,6 +5049,204 @@ out:
}
EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
+static int qeth_set_online(struct qeth_card *card)
+{
+ int rc;
+
+ mutex_lock(&card->discipline_mutex);
+ mutex_lock(&card->conf_mutex);
+ QETH_CARD_TEXT(card, 2, "setonlin");
+
+ rc = card->discipline->set_online(card);
+
+ mutex_unlock(&card->conf_mutex);
+ mutex_unlock(&card->discipline_mutex);
+
+ return rc;
+}
+
+int qeth_set_offline(struct qeth_card *card, bool resetting)
+{
+ int rc, rc2, rc3;
+
+ mutex_lock(&card->discipline_mutex);
+ mutex_lock(&card->conf_mutex);
+ QETH_CARD_TEXT(card, 3, "setoffl");
+
+ if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) {
+ qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
+ card->info.hwtrap = 1;
+ }
+
+ rtnl_lock();
+ card->info.open_when_online = card->dev->flags & IFF_UP;
+ dev_close(card->dev);
+ netif_device_detach(card->dev);
+ netif_carrier_off(card->dev);
+ rtnl_unlock();
+
+ card->discipline->set_offline(card);
+
+ rc = qeth_stop_channel(&card->data);
+ rc2 = qeth_stop_channel(&card->write);
+ rc3 = qeth_stop_channel(&card->read);
+ if (!rc)
+ rc = (rc2) ? rc2 : rc3;
+ if (rc)
+ QETH_CARD_TEXT_(card, 2, "1err%d", rc);
+ qdio_free(CARD_DDEV(card));
+
+ /* let user_space know that device is offline */
+ kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
+
+ mutex_unlock(&card->conf_mutex);
+ mutex_unlock(&card->discipline_mutex);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qeth_set_offline);
+
+static int qeth_do_reset(void *data)
+{
+ struct qeth_card *card = data;
+ int rc;
+
+ QETH_CARD_TEXT(card, 2, "recover1");
+ if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
+ return 0;
+ QETH_CARD_TEXT(card, 2, "recover2");
+ dev_warn(&card->gdev->dev,
+ "A recovery process has been started for the device\n");
+
+ qeth_set_offline(card, true);
+ rc = qeth_set_online(card);
+ if (!rc) {
+ dev_info(&card->gdev->dev,
+ "Device successfully recovered!\n");
+ } else {
+ ccwgroup_set_offline(card->gdev);
+ dev_warn(&card->gdev->dev,
+ "The qeth device driver failed to recover an error on the device\n");
+ }
+ qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
+ qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_QETH_L3)
+static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
+ struct qeth_hdr *hdr)
+{
+ struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data;
+ struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
+ struct net_device *dev = skb->dev;
+
+ if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) {
+ dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr,
+ "FAKELL", skb->len);
+ return;
+ }
+
+ if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) {
+ u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
+ ETH_P_IP;
+ unsigned char tg_addr[ETH_ALEN];
+
+ skb_reset_network_header(skb);
+ switch (l3_hdr->flags & QETH_HDR_CAST_MASK) {
+ case QETH_CAST_MULTICAST:
+ if (prot == ETH_P_IP)
+ ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
+ else
+ ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
+ QETH_CARD_STAT_INC(card, rx_multicast);
+ break;
+ case QETH_CAST_BROADCAST:
+ ether_addr_copy(tg_addr, dev->broadcast);
+ QETH_CARD_STAT_INC(card, rx_multicast);
+ break;
+ default:
+ if (card->options.sniffer)
+ skb->pkt_type = PACKET_OTHERHOST;
+ ether_addr_copy(tg_addr, dev->dev_addr);
+ }
+
+ if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
+ dev_hard_header(skb, dev, prot, tg_addr,
+ &l3_hdr->next_hop.rx.src_mac, skb->len);
+ else
+ dev_hard_header(skb, dev, prot, tg_addr, "FAKELL",
+ skb->len);
+ }
+
+ /* copy VLAN tag from hdr into skb */
+ if (!card->options.sniffer &&
+ (l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
+ QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
+ u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
+ l3_hdr->vlan_id :
+ l3_hdr->next_hop.rx.vlan_id;
+
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
+ }
+}
+#endif
+
+static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb,
+ struct qeth_hdr *hdr, bool uses_frags)
+{
+ struct napi_struct *napi = &card->napi;
+ bool is_cso;
+
+ switch (hdr->hdr.l2.id) {
+ case QETH_HEADER_TYPE_OSN:
+ skb_push(skb, sizeof(*hdr));
+ skb_copy_to_linear_data(skb, hdr, sizeof(*hdr));
+ QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
+ QETH_CARD_STAT_INC(card, rx_packets);
+
+ card->osn_info.data_cb(skb);
+ return;
+#if IS_ENABLED(CONFIG_QETH_L3)
+ case QETH_HEADER_TYPE_LAYER3:
+ qeth_l3_rebuild_skb(card, skb, hdr);
+ is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ;
+ break;
+#endif
+ case QETH_HEADER_TYPE_LAYER2:
+ is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ;
+ break;
+ default:
+ /* never happens */
+ if (uses_frags)
+ napi_free_frags(napi);
+ else
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ QETH_CARD_STAT_INC(card, rx_skb_csum);
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+
+ QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
+ QETH_CARD_STAT_INC(card, rx_packets);
+ if (skb_is_nonlinear(skb)) {
+ QETH_CARD_STAT_INC(card, rx_sg_skbs);
+ QETH_CARD_STAT_ADD(card, rx_sg_frags,
+ skb_shinfo(skb)->nr_frags);
+ }
+
+ if (uses_frags) {
+ napi_gro_frags(napi);
+ } else {
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ napi_gro_receive(napi, skb);
+ }
+}
+
static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len)
{
struct page *page = virt_to_page(data);
@@ -5075,17 +5263,20 @@ static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
}
-struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
- struct qeth_qdio_buffer *qethbuffer,
- struct qdio_buffer_element **__element, int *__offset,
- struct qeth_hdr **hdr)
+static int qeth_extract_skb(struct qeth_card *card,
+ struct qeth_qdio_buffer *qethbuffer,
+ struct qdio_buffer_element **__element,
+ int *__offset)
{
struct qdio_buffer_element *element = *__element;
struct qdio_buffer *buffer = qethbuffer->buffer;
+ struct napi_struct *napi = &card->napi;
unsigned int linear_len = 0;
+ bool uses_frags = false;
int offset = *__offset;
bool use_rx_sg = false;
unsigned int headroom;
+ struct qeth_hdr *hdr;
struct sk_buff *skb;
int skb_len = 0;
@@ -5093,42 +5284,42 @@ next_packet:
/* qeth_hdr must not cross element boundaries */
while (element->length < offset + sizeof(struct qeth_hdr)) {
if (qeth_is_last_sbale(element))
- return NULL;
+ return -ENODATA;
element++;
offset = 0;
}
- *hdr = element->addr + offset;
- offset += sizeof(struct qeth_hdr);
+ hdr = element->addr + offset;
+ offset += sizeof(*hdr);
skb = NULL;
- switch ((*hdr)->hdr.l2.id) {
+ switch (hdr->hdr.l2.id) {
case QETH_HEADER_TYPE_LAYER2:
- skb_len = (*hdr)->hdr.l2.pkt_length;
+ skb_len = hdr->hdr.l2.pkt_length;
linear_len = ETH_HLEN;
headroom = 0;
break;
case QETH_HEADER_TYPE_LAYER3:
- skb_len = (*hdr)->hdr.l3.length;
+ skb_len = hdr->hdr.l3.length;
if (!IS_LAYER3(card)) {
QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
goto walk_packet;
}
- if ((*hdr)->hdr.l3.flags & QETH_HDR_PASSTHRU) {
+ if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
linear_len = ETH_HLEN;
headroom = 0;
break;
}
- if ((*hdr)->hdr.l3.flags & QETH_HDR_IPV6)
+ if (hdr->hdr.l3.flags & QETH_HDR_IPV6)
linear_len = sizeof(struct ipv6hdr);
else
linear_len = sizeof(struct iphdr);
headroom = ETH_HLEN;
break;
case QETH_HEADER_TYPE_OSN:
- skb_len = (*hdr)->hdr.osn.pdu_length;
+ skb_len = hdr->hdr.osn.pdu_length;
if (!IS_OSN(card)) {
QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
goto walk_packet;
@@ -5138,13 +5329,13 @@ next_packet:
headroom = sizeof(struct qeth_hdr);
break;
default:
- if ((*hdr)->hdr.l2.id & QETH_HEADER_MASK_INVAL)
+ if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL)
QETH_CARD_STAT_INC(card, rx_frame_errors);
else
QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
/* Can't determine packet length, drop the whole buffer. */
- return NULL;
+ return -EPROTONOSUPPORT;
}
if (skb_len < linear_len) {
@@ -5157,21 +5348,43 @@ next_packet:
!atomic_read(&card->force_alloc_skb) &&
!IS_OSN(card));
- if (use_rx_sg && qethbuffer->rx_skb) {
+ if (use_rx_sg) {
/* QETH_CQ_ENABLED only: */
- skb = qethbuffer->rx_skb;
- qethbuffer->rx_skb = NULL;
- } else {
- if (!use_rx_sg)
- linear_len = skb_len;
- skb = napi_alloc_skb(&card->napi, linear_len + headroom);
+ if (qethbuffer->rx_skb &&
+ skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) {
+ skb = qethbuffer->rx_skb;
+ qethbuffer->rx_skb = NULL;
+ goto use_skb;
+ }
+
+ skb = napi_get_frags(napi);
+ if (!skb) {
+ /* -ENOMEM, no point in falling back further. */
+ QETH_CARD_STAT_INC(card, rx_dropped_nomem);
+ goto walk_packet;
+ }
+
+ if (skb_tailroom(skb) >= linear_len + headroom) {
+ uses_frags = true;
+ goto use_skb;
+ }
+
+ netdev_info_once(card->dev,
+ "Insufficient linear space in NAPI frags skb, need %u but have %u\n",
+ linear_len + headroom, skb_tailroom(skb));
+ /* Shouldn't happen. Don't optimize, fall back to linear skb. */
}
- if (!skb)
+ linear_len = skb_len;
+ skb = napi_alloc_skb(napi, linear_len + headroom);
+ if (!skb) {
QETH_CARD_STAT_INC(card, rx_dropped_nomem);
- else if (headroom)
- skb_reserve(skb, headroom);
+ goto walk_packet;
+ }
+use_skb:
+ if (headroom)
+ skb_reserve(skb, headroom);
walk_packet:
while (skb_len) {
int data_len = min(skb_len, (int)(element->length - offset));
@@ -5204,11 +5417,14 @@ walk_packet:
QETH_CARD_TEXT(card, 4, "unexeob");
QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
if (skb) {
- dev_kfree_skb_any(skb);
+ if (uses_frags)
+ napi_free_frags(napi);
+ else
+ dev_kfree_skb_any(skb);
QETH_CARD_STAT_INC(card,
rx_length_errors);
}
- return NULL;
+ return -EMSGSIZE;
}
element++;
offset = 0;
@@ -5221,22 +5437,40 @@ walk_packet:
*__element = element;
*__offset = offset;
- if (use_rx_sg) {
- QETH_CARD_STAT_INC(card, rx_sg_skbs);
- QETH_CARD_STAT_ADD(card, rx_sg_frags,
- skb_shinfo(skb)->nr_frags);
+
+ qeth_receive_skb(card, skb, hdr, uses_frags);
+ return 0;
+}
+
+static int qeth_extract_skbs(struct qeth_card *card, int budget,
+ struct qeth_qdio_buffer *buf, bool *done)
+{
+ int work_done = 0;
+
+ WARN_ON_ONCE(!budget);
+ *done = false;
+
+ while (budget) {
+ if (qeth_extract_skb(card, buf, &card->rx.b_element,
+ &card->rx.e_offset)) {
+ *done = true;
+ break;
+ }
+
+ work_done++;
+ budget--;
}
- return skb;
+
+ return work_done;
}
-EXPORT_SYMBOL_GPL(qeth_core_get_next_skb);
int qeth_poll(struct napi_struct *napi, int budget)
{
struct qeth_card *card = container_of(napi, struct qeth_card, napi);
int work_done = 0;
struct qeth_qdio_buffer *buffer;
- int done;
int new_budget = budget;
+ bool done;
while (1) {
if (!card->rx.b_count) {
@@ -5259,11 +5493,10 @@ int qeth_poll(struct napi_struct *napi, int budget)
if (!(card->rx.qdio_err &&
qeth_check_qdio_errors(card, buffer->buffer,
card->rx.qdio_err, "qinerr")))
- work_done +=
- card->discipline->process_rx_buffer(
- card, new_budget, &done);
+ work_done += qeth_extract_skbs(card, new_budget,
+ buffer, &done);
else
- done = 1;
+ done = true;
if (done) {
QETH_CARD_STAT_INC(card, rx_bufs);
@@ -5432,9 +5665,9 @@ int qeth_setassparms_cb(struct qeth_card *card,
cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
if (cmd->hdr.prot_version == QETH_PROT_IPV4)
- card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
+ card->options.ipa4.enabled = cmd->hdr.assists.enabled;
if (cmd->hdr.prot_version == QETH_PROT_IPV6)
- card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
+ card->options.ipa6.enabled = cmd->hdr.assists.enabled;
return 0;
}
EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
@@ -5835,7 +6068,8 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
goto err;
}
}
- rc = card->discipline->set_online(gdev);
+
+ rc = qeth_set_online(card);
err:
return rc;
}
@@ -5843,7 +6077,8 @@ err:
static int qeth_core_set_offline(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- return card->discipline->set_offline(gdev);
+
+ return qeth_set_offline(card, false);
}
static void qeth_core_shutdown(struct ccwgroup_device *gdev)
@@ -5866,7 +6101,7 @@ static int qeth_suspend(struct ccwgroup_device *gdev)
if (gdev->state == CCWGROUP_OFFLINE)
return 0;
- card->discipline->set_offline(gdev);
+ qeth_set_offline(card, false);
return 0;
}
@@ -5875,7 +6110,7 @@ static int qeth_resume(struct ccwgroup_device *gdev)
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc;
- rc = card->discipline->set_online(gdev);
+ rc = qeth_set_online(card);
qeth_set_allowed_threads(card, 0xffffffff, 0);
if (rc)
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index c1ecce95094d..3865f7258449 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -53,6 +53,16 @@ static inline bool qeth_ipa_caps_enabled(struct qeth_ipa_caps *caps, u32 mask)
return (caps->enabled & mask) == mask;
}
+#define qeth_adp_supported(c, f) \
+ qeth_ipa_caps_supported(&c->options.adp, f)
+#define qeth_is_supported(c, f) \
+ qeth_ipa_caps_supported(&c->options.ipa4, f)
+#define qeth_is_supported6(c, f) \
+ qeth_ipa_caps_supported(&c->options.ipa6, f)
+#define qeth_is_ipafunc_supported(c, prot, f) \
+ ((prot == QETH_PROT_IPV6) ? qeth_is_supported6(c, f) : \
+ qeth_is_supported(c, f))
+
enum qeth_card_types {
QETH_CARD_TYPE_OSD = 1,
QETH_CARD_TYPE_IQD = 5,
@@ -338,14 +348,14 @@ enum qeth_card_info_port_speed {
/* (SET)DELIP(M) IPA stuff ***************************************************/
struct qeth_ipacmd_setdelip4 {
- __u8 ip_addr[4];
- __u8 mask[4];
+ __be32 addr;
+ __be32 mask;
__u32 flags;
} __attribute__ ((packed));
struct qeth_ipacmd_setdelip6 {
- __u8 ip_addr[16];
- __u8 mask[16];
+ struct in6_addr addr;
+ struct in6_addr prefix;
__u32 flags;
} __attribute__ ((packed));
@@ -766,8 +776,7 @@ struct qeth_ipacmd_hdr {
__u8 prim_version_no;
__u8 param_count;
__u16 prot_version;
- __u32 ipa_supported;
- __u32 ipa_enabled;
+ struct qeth_ipa_caps assists;
} __attribute__ ((packed));
/* The IPA command itself */
@@ -899,6 +908,11 @@ extern unsigned char IDX_ACTIVATE_WRITE[];
#define QETH_IDX_ACT_ERR_AUTH 0x1E
#define QETH_IDX_ACT_ERR_AUTH_USER 0x20
+#define QETH_IDX_TERMINATE 0xc0
+#define QETH_IDX_TERMINATE_MASK 0xc0
+#define QETH_IDX_TERM_BAD_TRANSPORT 0x41
+#define QETH_IDX_TERM_BAD_TRANSPORT_VM 0xf6
+
#define PDU_ENCAPSULATION(buffer) \
(buffer + *(buffer + (*(buffer + 0x0b)) + \
*(buffer + *(buffer + 0x0b) + 0x11) + 0x07))
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index e81170ab6d9a..2bd9993aa60b 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -24,8 +24,6 @@ static ssize_t qeth_dev_state_show(struct device *dev,
switch (card->state) {
case CARD_STATE_DOWN:
return sprintf(buf, "DOWN\n");
- case CARD_STATE_HARDSETUP:
- return sprintf(buf, "HARDSETUP\n");
case CARD_STATE_SOFTSETUP:
if (card->dev->flags & IFF_UP)
return sprintf(buf, "UP (LAN %s)\n",
@@ -207,7 +205,7 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
} else if (sysfs_streq(buf, "prio_queueing_vlan")) {
if (IS_LAYER3(card)) {
- rc = -ENOTSUPP;
+ rc = -EOPNOTSUPP;
goto out;
}
card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_VLAN;
diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h
index ddc615b431a8..adf25c9fd2b3 100644
--- a/drivers/s390/net/qeth_l2.h
+++ b/drivers/s390/net/qeth_l2.h
@@ -13,7 +13,6 @@ extern const struct attribute_group *qeth_l2_attr_groups[];
int qeth_l2_create_device_attributes(struct device *);
void qeth_l2_remove_device_attributes(struct device *);
-void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card);
int qeth_bridgeport_query_ports(struct qeth_card *card,
enum qeth_sbp_roles *role,
enum qeth_sbp_states *state);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 9086bc04fa6b..692bd2623401 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -24,7 +24,6 @@
#include "qeth_core.h"
#include "qeth_l2.h"
-static int qeth_l2_set_offline(struct ccwgroup_device *);
static void qeth_bridgeport_query_support(struct qeth_card *card);
static void qeth_bridge_state_change(struct qeth_card *card,
struct qeth_ipa_cmd *cmd);
@@ -284,56 +283,15 @@ static void qeth_l2_stop_card(struct qeth_card *card)
if (card->state == CARD_STATE_SOFTSETUP) {
qeth_clear_ipacmd_list(card);
- card->state = CARD_STATE_HARDSETUP;
- }
- if (card->state == CARD_STATE_HARDSETUP) {
- qeth_qdio_clear_card(card, 0);
qeth_drain_output_queues(card);
- qeth_clear_working_pool_list(card);
card->state = CARD_STATE_DOWN;
}
+ qeth_qdio_clear_card(card, 0);
+ qeth_clear_working_pool_list(card);
flush_workqueue(card->event_wq);
card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
-}
-
-static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
- int budget, int *done)
-{
- int work_done = 0;
- struct sk_buff *skb;
- struct qeth_hdr *hdr;
- unsigned int len;
-
- *done = 0;
- WARN_ON_ONCE(!budget);
- while (budget) {
- skb = qeth_core_get_next_skb(card,
- &card->qdio.in_q->bufs[card->rx.b_index],
- &card->rx.b_element, &card->rx.e_offset, &hdr);
- if (!skb) {
- *done = 1;
- break;
- }
-
- if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
- skb->protocol = eth_type_trans(skb, skb->dev);
- qeth_rx_csum(card, skb, hdr->hdr.l2.flags[1]);
- len = skb->len;
- napi_gro_receive(&card->napi, skb);
- } else {
- skb_push(skb, sizeof(*hdr));
- skb_copy_to_linear_data(skb, hdr, sizeof(*hdr));
- len = skb->len;
- card->osn_info.data_cb(skb);
- }
-
- work_done++;
- budget--;
- QETH_CARD_STAT_INC(card, rx_packets);
- QETH_CARD_STAT_ADD(card, rx_bytes, len);
- }
- return work_done;
+ card->info.promisc_mode = 0;
}
static int qeth_l2_request_initial_mac(struct qeth_card *card)
@@ -648,7 +606,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (cgdev->state == CCWGROUP_ONLINE)
- qeth_l2_set_offline(cgdev);
+ qeth_set_offline(card, false);
cancel_work_sync(&card->close_dev_work);
if (qeth_netdev_is_registered(card->dev))
@@ -766,17 +724,31 @@ static void qeth_l2_trace_features(struct qeth_card *card)
sizeof(card->options.vnicc.sup_chars));
}
-static int qeth_l2_set_online(struct ccwgroup_device *gdev)
+static void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
{
- struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ if (!card->options.sbp.reflect_promisc &&
+ card->options.sbp.role != QETH_SBP_ROLE_NONE) {
+ /* Conditional to avoid spurious error messages */
+ qeth_bridgeport_setrole(card, card->options.sbp.role);
+ /* Let the callback function refresh the stored role value. */
+ qeth_bridgeport_query_ports(card, &card->options.sbp.role,
+ NULL);
+ }
+ if (card->options.sbp.hostnotification) {
+ if (qeth_bridgeport_an_set(card, 1))
+ card->options.sbp.hostnotification = 0;
+ } else {
+ qeth_bridgeport_an_set(card, 0);
+ }
+}
+
+static int qeth_l2_set_online(struct qeth_card *card)
+{
+ struct ccwgroup_device *gdev = card->gdev;
struct net_device *dev = card->dev;
int rc = 0;
bool carrier_ok;
- mutex_lock(&card->discipline_mutex);
- mutex_lock(&card->conf_mutex);
- QETH_CARD_TEXT(card, 2, "setonlin");
-
rc = qeth_core_hardsetup_card(card, &carrier_ok);
if (rc) {
QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
@@ -786,9 +758,11 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev)
mutex_lock(&card->sbp_lock);
qeth_bridgeport_query_support(card);
- if (card->options.sbp.supported_funcs)
+ if (card->options.sbp.supported_funcs) {
+ qeth_l2_setup_bridgeport_attrs(card);
dev_info(&card->gdev->dev,
- "The device represents a Bridge Capable Port\n");
+ "The device represents a Bridge Capable Port\n");
+ }
mutex_unlock(&card->sbp_lock);
qeth_l2_register_dev_addr(card);
@@ -799,20 +773,11 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev)
qeth_trace_features(card);
qeth_l2_trace_features(card);
- qeth_l2_setup_bridgeport_attrs(card);
-
- card->state = CARD_STATE_HARDSETUP;
qeth_print_status_message(card);
/* softsetup */
QETH_CARD_TEXT(card, 2, "softsetp");
- rc = qeth_init_qdio_queues(card);
- if (rc) {
- QETH_CARD_TEXT_(card, 2, "6err%d", rc);
- rc = -ENODEV;
- goto out_remove;
- }
card->state = CARD_STATE_SOFTSETUP;
qeth_set_allowed_threads(card, 0xffffffff, 0);
@@ -839,8 +804,6 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev)
}
/* let user_space know that device is online */
kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
- mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
return 0;
out_remove:
@@ -849,81 +812,12 @@ out_remove:
qeth_stop_channel(&card->write);
qeth_stop_channel(&card->read);
qdio_free(CARD_DDEV(card));
-
- mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
return rc;
}
-static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
- int recovery_mode)
+static void qeth_l2_set_offline(struct qeth_card *card)
{
- struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
- int rc = 0, rc2 = 0, rc3 = 0;
-
- mutex_lock(&card->discipline_mutex);
- mutex_lock(&card->conf_mutex);
- QETH_CARD_TEXT(card, 3, "setoffl");
-
- if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) {
- qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
- card->info.hwtrap = 1;
- }
-
- rtnl_lock();
- card->info.open_when_online = card->dev->flags & IFF_UP;
- dev_close(card->dev);
- netif_device_detach(card->dev);
- netif_carrier_off(card->dev);
- rtnl_unlock();
-
qeth_l2_stop_card(card);
- rc = qeth_stop_channel(&card->data);
- rc2 = qeth_stop_channel(&card->write);
- rc3 = qeth_stop_channel(&card->read);
- if (!rc)
- rc = (rc2) ? rc2 : rc3;
- if (rc)
- QETH_CARD_TEXT_(card, 2, "1err%d", rc);
- qdio_free(CARD_DDEV(card));
-
- /* let user_space know that device is offline */
- kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
- mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
- return 0;
-}
-
-static int qeth_l2_set_offline(struct ccwgroup_device *cgdev)
-{
- return __qeth_l2_set_offline(cgdev, 0);
-}
-
-static int qeth_l2_recover(void *ptr)
-{
- struct qeth_card *card;
- int rc = 0;
-
- card = (struct qeth_card *) ptr;
- QETH_CARD_TEXT(card, 2, "recover1");
- if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
- return 0;
- QETH_CARD_TEXT(card, 2, "recover2");
- dev_warn(&card->gdev->dev,
- "A recovery process has been started for the device\n");
- __qeth_l2_set_offline(card->gdev, 1);
- rc = qeth_l2_set_online(card->gdev);
- if (!rc)
- dev_info(&card->gdev->dev,
- "Device successfully recovered!\n");
- else {
- ccwgroup_set_offline(card->gdev);
- dev_warn(&card->gdev->dev, "The qeth device driver "
- "failed to recover an error on the device\n");
- }
- qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
- qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
- return 0;
}
static int __init qeth_l2_init(void)
@@ -960,8 +854,6 @@ static int qeth_l2_control_event(struct qeth_card *card,
struct qeth_discipline qeth_l2_discipline = {
.devtype = &qeth_l2_devtype,
- .process_rx_buffer = qeth_l2_process_inbound_buffer,
- .recover = qeth_l2_recover,
.setup = qeth_l2_probe_device,
.remove = qeth_l2_remove_device,
.set_online = qeth_l2_set_online,
@@ -1000,7 +892,8 @@ int qeth_osn_assist(struct net_device *dev, void *data, int data_len)
if (!iob)
return -ENOMEM;
- qeth_prepare_ipa_cmd(card, iob, (u16) data_len);
+ qeth_prepare_ipa_cmd(card, iob, (u16) data_len, NULL);
+
memcpy(__ipa_cmd(iob), data, data_len);
iob->callback = qeth_osn_assist_cb;
return qeth_send_ipa_cmd(card, iob, NULL, NULL);
@@ -1951,8 +1844,7 @@ int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout)
/* check if VNICC is currently enabled */
bool qeth_l2_vnicc_is_in_use(struct qeth_card *card)
{
- /* if everything is turned off, VNICC is not active */
- if (!card->options.vnicc.cur_chars)
+ if (!card->options.vnicc.sup_chars)
return false;
/* default values are only OK if rx_bcast was not enabled by user
* or the card is offline.
@@ -2039,8 +1931,9 @@ static void qeth_l2_vnicc_init(struct qeth_card *card)
/* enforce assumed default values and recover settings, if changed */
error |= qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING,
timeout);
- chars_tmp = card->options.vnicc.wanted_chars ^ QETH_VNICC_DEFAULT;
- chars_tmp |= QETH_VNICC_BRIDGE_INVISIBLE;
+ /* Change chars, if necessary */
+ chars_tmp = card->options.vnicc.wanted_chars ^
+ card->options.vnicc.cur_chars;
chars_len = sizeof(card->options.vnicc.wanted_chars) * BITS_PER_BYTE;
for_each_set_bit(i, &chars_tmp, chars_len) {
vnicc = BIT(i);
diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c
index f70c7aac2dcc..86bcae992f72 100644
--- a/drivers/s390/net/qeth_l2_sys.c
+++ b/drivers/s390/net/qeth_l2_sys.c
@@ -246,39 +246,6 @@ static struct attribute_group qeth_l2_bridgeport_attr_group = {
.attrs = qeth_l2_bridgeport_attrs,
};
-/**
- * qeth_l2_setup_bridgeport_attrs() - set/restore attrs when turning online.
- * @card: qeth_card structure pointer
- *
- * Note: this function is called with conf_mutex held by the caller
- */
-void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
-{
- int rc;
-
- if (!card)
- return;
- if (!card->options.sbp.supported_funcs)
- return;
-
- mutex_lock(&card->sbp_lock);
- if (card->options.sbp.role != QETH_SBP_ROLE_NONE) {
- /* Conditional to avoid spurious error messages */
- qeth_bridgeport_setrole(card, card->options.sbp.role);
- /* Let the callback function refresh the stored role value. */
- qeth_bridgeport_query_ports(card,
- &card->options.sbp.role, NULL);
- }
- if (card->options.sbp.hostnotification) {
- rc = qeth_bridgeport_an_set(card, 1);
- if (rc)
- card->options.sbp.hostnotification = 0;
- } else {
- qeth_bridgeport_an_set(card, 0);
- }
- mutex_unlock(&card->sbp_lock);
-}
-
/* VNIC CHARS support */
/* convert sysfs attr name to VNIC characteristic */
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 5db04fe472c0..6ccfe2121095 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -23,7 +23,6 @@ struct qeth_ipaddr {
struct hlist_node hnode;
enum qeth_ip_types type;
u8 is_multicast:1;
- u8 in_progress:1;
u8 disp_flag:2;
u8 ipato:1; /* ucast only */
@@ -35,7 +34,7 @@ struct qeth_ipaddr {
union {
struct {
__be32 addr;
- unsigned int mask;
+ __be32 mask;
} a4;
struct {
struct in6_addr addr;
@@ -102,7 +101,8 @@ struct qeth_ipato_entry {
extern const struct attribute_group *qeth_l3_attr_groups[];
-void qeth_l3_ipaddr_to_string(enum qeth_prot_versions, const __u8 *, char *);
+int qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const u8 *addr,
+ char *buf);
int qeth_l3_create_device_attributes(struct device *);
void qeth_l3_remove_device_attributes(struct device *);
int qeth_l3_setrouting_v4(struct qeth_card *);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 27126330a4b0..317d56647a4a 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -37,30 +37,18 @@
#include "qeth_l3.h"
-
-static int qeth_l3_set_offline(struct ccwgroup_device *);
static int qeth_l3_register_addr_entry(struct qeth_card *,
struct qeth_ipaddr *);
static int qeth_l3_deregister_addr_entry(struct qeth_card *,
struct qeth_ipaddr *);
-static void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf)
-{
- sprintf(buf, "%pI4", addr);
-}
-
-static void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf)
-{
- sprintf(buf, "%pI6", addr);
-}
-
-void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
- char *buf)
+int qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const u8 *addr,
+ char *buf)
{
if (proto == QETH_PROT_IPV4)
- qeth_l3_ipaddr4_to_string(addr, buf);
- else if (proto == QETH_PROT_IPV6)
- qeth_l3_ipaddr6_to_string(addr, buf);
+ return sprintf(buf, "%pI4", addr);
+ else
+ return sprintf(buf, "%pI6", addr);
}
static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card,
@@ -161,8 +149,6 @@ static int qeth_l3_delete_ip(struct qeth_card *card,
addr->ref_counter--;
if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0)
return rc;
- if (addr->in_progress)
- return -EINPROGRESS;
if (qeth_card_hw_is_reachable(card))
rc = qeth_l3_deregister_addr_entry(card, addr);
@@ -223,29 +209,10 @@ static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
return 0;
}
- /* qeth_l3_register_addr_entry can go to sleep
- * if we add a IPV4 addr. It is caused by the reason
- * that SETIP ipa cmd starts ARP staff for IPV4 addr.
- * Thus we should unlock spinlock, and make a protection
- * using in_progress variable to indicate that there is
- * an hardware operation with this IPV4 address
- */
- if (addr->proto == QETH_PROT_IPV4) {
- addr->in_progress = 1;
- mutex_unlock(&card->ip_lock);
- rc = qeth_l3_register_addr_entry(card, addr);
- mutex_lock(&card->ip_lock);
- addr->in_progress = 0;
- } else
- rc = qeth_l3_register_addr_entry(card, addr);
+ rc = qeth_l3_register_addr_entry(card, addr);
if (!rc || rc == -EADDRINUSE || rc == -ENETDOWN) {
addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
- if (addr->ref_counter < 1) {
- qeth_l3_deregister_addr_entry(card, addr);
- hash_del(&addr->hnode);
- kfree(addr);
- }
} else {
hash_del(&addr->hnode);
kfree(addr);
@@ -313,19 +280,10 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
- if (addr->proto == QETH_PROT_IPV4) {
- addr->in_progress = 1;
- mutex_unlock(&card->ip_lock);
- rc = qeth_l3_register_addr_entry(card, addr);
- mutex_lock(&card->ip_lock);
- addr->in_progress = 0;
- } else
- rc = qeth_l3_register_addr_entry(card, addr);
+ rc = qeth_l3_register_addr_entry(card, addr);
if (!rc) {
addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
- if (addr->ref_counter < 1)
- qeth_l3_delete_ip(card, addr);
} else {
hash_del(&addr->hnode);
kfree(addr);
@@ -379,17 +337,16 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card,
return qeth_send_ipa_cmd(card, iob, qeth_l3_setdelip_cb, NULL);
}
-static void qeth_l3_fill_netmask(u8 *netmask, unsigned int len)
+static void qeth_l3_set_ipv6_prefix(struct in6_addr *prefix, unsigned int len)
{
- int i, j;
- for (i = 0; i < 16; i++) {
- j = (len) - (i * 8);
- if (j >= 8)
- netmask[i] = 0xff;
- else if (j > 0)
- netmask[i] = (u8)(0xFF00 >> j);
- else
- netmask[i] = 0;
+ unsigned int i = 0;
+
+ while (len && i < 4) {
+ int mask_len = min_t(int, len, 32);
+
+ prefix->s6_addr32[i] = inet_make_mask(mask_len);
+ len -= mask_len;
+ i++;
}
}
@@ -412,7 +369,6 @@ static int qeth_l3_send_setdelip(struct qeth_card *card,
{
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- __u8 netmask[16];
u32 flags;
QETH_CARD_TEXT(card, 4, "setdelip");
@@ -427,15 +383,13 @@ static int qeth_l3_send_setdelip(struct qeth_card *card,
QETH_CARD_TEXT_(card, 4, "flags%02X", flags);
if (addr->proto == QETH_PROT_IPV6) {
- memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr,
- sizeof(struct in6_addr));
- qeth_l3_fill_netmask(netmask, addr->u.a6.pfxlen);
- memcpy(cmd->data.setdelip6.mask, netmask,
- sizeof(struct in6_addr));
+ cmd->data.setdelip6.addr = addr->u.a6.addr;
+ qeth_l3_set_ipv6_prefix(&cmd->data.setdelip6.prefix,
+ addr->u.a6.pfxlen);
cmd->data.setdelip6.flags = flags;
} else {
- memcpy(cmd->data.setdelip4.ip_addr, &addr->u.a4.addr, 4);
- memcpy(cmd->data.setdelip4.mask, &addr->u.a4.mask, 4);
+ cmd->data.setdelip4.addr = addr->u.a4.addr;
+ cmd->data.setdelip4.mask = addr->u.a4.mask;
cmd->data.setdelip4.flags = flags;
}
@@ -581,6 +535,7 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
QETH_CARD_TEXT(card, 2, "addipato");
+ mutex_lock(&card->conf_mutex);
mutex_lock(&card->ip_lock);
list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
@@ -600,6 +555,7 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
}
mutex_unlock(&card->ip_lock);
+ mutex_unlock(&card->conf_mutex);
return rc;
}
@@ -613,6 +569,7 @@ int qeth_l3_del_ipato_entry(struct qeth_card *card,
QETH_CARD_TEXT(card, 2, "delipato");
+ mutex_lock(&card->conf_mutex);
mutex_lock(&card->ip_lock);
list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
@@ -629,6 +586,8 @@ int qeth_l3_del_ipato_entry(struct qeth_card *card,
}
mutex_unlock(&card->ip_lock);
+ mutex_unlock(&card->conf_mutex);
+
return rc;
}
@@ -637,6 +596,7 @@ int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip,
enum qeth_prot_versions proto)
{
struct qeth_ipaddr addr;
+ int rc;
qeth_l3_init_ipaddr(&addr, type, proto);
if (proto == QETH_PROT_IPV4)
@@ -644,7 +604,11 @@ int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip,
else
memcpy(&addr.u.a6.addr, ip, 16);
- return qeth_l3_modify_ip(card, &addr, add);
+ mutex_lock(&card->conf_mutex);
+ rc = qeth_l3_modify_ip(card, &addr, add);
+ mutex_unlock(&card->conf_mutex);
+
+ return rc;
}
int qeth_l3_modify_hsuid(struct qeth_card *card, bool add)
@@ -937,7 +901,7 @@ out:
return rc;
}
-static int qeth_l3_start_ipassists(struct qeth_card *card)
+static void qeth_l3_start_ipassists(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 3, "strtipas");
@@ -947,7 +911,6 @@ static int qeth_l3_start_ipassists(struct qeth_card *card)
qeth_l3_start_ipa_multicast(card); /* go on*/
qeth_l3_start_ipa_ipv6(card); /* go on*/
qeth_l3_start_ipa_broadcast(card); /* go on*/
- return 0;
}
static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
@@ -1198,96 +1161,6 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
return 0;
}
-static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_hdr *hdr)
-{
- struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data;
- struct net_device *dev = skb->dev;
-
- if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) {
- dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr,
- "FAKELL", skb->len);
- return;
- }
-
- if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) {
- u16 prot = (hdr->hdr.l3.flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
- ETH_P_IP;
- unsigned char tg_addr[ETH_ALEN];
-
- skb_reset_network_header(skb);
- switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK) {
- case QETH_CAST_MULTICAST:
- if (prot == ETH_P_IP)
- ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
- else
- ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
- QETH_CARD_STAT_INC(card, rx_multicast);
- break;
- case QETH_CAST_BROADCAST:
- ether_addr_copy(tg_addr, card->dev->broadcast);
- QETH_CARD_STAT_INC(card, rx_multicast);
- break;
- default:
- if (card->options.sniffer)
- skb->pkt_type = PACKET_OTHERHOST;
- ether_addr_copy(tg_addr, card->dev->dev_addr);
- }
-
- if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
- card->dev->header_ops->create(skb, card->dev, prot,
- tg_addr, &hdr->hdr.l3.next_hop.rx.src_mac,
- skb->len);
- else
- card->dev->header_ops->create(skb, card->dev, prot,
- tg_addr, "FAKELL", skb->len);
- }
-
- /* copy VLAN tag from hdr into skb */
- if (!card->options.sniffer &&
- (hdr->hdr.l3.ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
- QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
- u16 tag = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
- hdr->hdr.l3.vlan_id :
- hdr->hdr.l3.next_hop.rx.vlan_id;
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
- }
-
- qeth_rx_csum(card, skb, hdr->hdr.l3.ext_flags);
-}
-
-static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
- int budget, int *done)
-{
- int work_done = 0;
- struct sk_buff *skb;
- struct qeth_hdr *hdr;
-
- *done = 0;
- WARN_ON_ONCE(!budget);
- while (budget) {
- skb = qeth_core_get_next_skb(card,
- &card->qdio.in_q->bufs[card->rx.b_index],
- &card->rx.b_element, &card->rx.e_offset, &hdr);
- if (!skb) {
- *done = 1;
- break;
- }
-
- if (hdr->hdr.l3.id == QETH_HEADER_TYPE_LAYER3)
- qeth_l3_rebuild_skb(card, skb, hdr);
-
- skb->protocol = eth_type_trans(skb, skb->dev);
- QETH_CARD_STAT_INC(card, rx_packets);
- QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
-
- napi_gro_receive(&card->napi, skb);
- work_done++;
- budget--;
- }
- return work_done;
-}
-
static void qeth_l3_stop_card(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 2, "stopcard");
@@ -1304,16 +1177,14 @@ static void qeth_l3_stop_card(struct qeth_card *card)
if (card->state == CARD_STATE_SOFTSETUP) {
qeth_l3_clear_ip_htable(card, 1);
qeth_clear_ipacmd_list(card);
- card->state = CARD_STATE_HARDSETUP;
- }
- if (card->state == CARD_STATE_HARDSETUP) {
- qeth_qdio_clear_card(card, 0);
qeth_drain_output_queues(card);
- qeth_clear_working_pool_list(card);
card->state = CARD_STATE_DOWN;
}
+ qeth_qdio_clear_card(card, 0);
+ qeth_clear_working_pool_list(card);
flush_workqueue(card->event_wq);
+ card->info.promisc_mode = 0;
}
static void qeth_l3_set_promisc_mode(struct qeth_card *card)
@@ -2167,7 +2038,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (cgdev->state == CCWGROUP_ONLINE)
- qeth_l3_set_offline(cgdev);
+ qeth_set_offline(card, false);
cancel_work_sync(&card->close_dev_work);
if (qeth_netdev_is_registered(card->dev))
@@ -2179,17 +2050,13 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
qeth_l3_clear_ipato_list(card);
}
-static int qeth_l3_set_online(struct ccwgroup_device *gdev)
+static int qeth_l3_set_online(struct qeth_card *card)
{
- struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ struct ccwgroup_device *gdev = card->gdev;
struct net_device *dev = card->dev;
int rc = 0;
bool carrier_ok;
- mutex_lock(&card->discipline_mutex);
- mutex_lock(&card->conf_mutex);
- QETH_CARD_TEXT(card, 2, "setonlin");
-
rc = qeth_core_hardsetup_card(card, &carrier_ok);
if (rc) {
QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
@@ -2197,7 +2064,6 @@ static int qeth_l3_set_online(struct ccwgroup_device *gdev)
goto out_remove;
}
- card->state = CARD_STATE_HARDSETUP;
qeth_print_status_message(card);
/* softsetup */
@@ -2207,11 +2073,8 @@ static int qeth_l3_set_online(struct ccwgroup_device *gdev)
if (rc)
QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
if (!card->options.sniffer) {
- rc = qeth_l3_start_ipassists(card);
- if (rc) {
- QETH_CARD_TEXT_(card, 2, "3err%d", rc);
- goto out_remove;
- }
+ qeth_l3_start_ipassists(card);
+
rc = qeth_l3_setrouting_v4(card);
if (rc)
QETH_CARD_TEXT_(card, 2, "4err%04x", rc);
@@ -2220,12 +2083,6 @@ static int qeth_l3_set_online(struct ccwgroup_device *gdev)
QETH_CARD_TEXT_(card, 2, "5err%04x", rc);
}
- rc = qeth_init_qdio_queues(card);
- if (rc) {
- QETH_CARD_TEXT_(card, 2, "6err%d", rc);
- rc = -ENODEV;
- goto out_remove;
- }
card->state = CARD_STATE_SOFTSETUP;
qeth_set_allowed_threads(card, 0xffffffff, 0);
@@ -2254,8 +2111,6 @@ static int qeth_l3_set_online(struct ccwgroup_device *gdev)
qeth_trace_features(card);
/* let user_space know that device is online */
kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
- mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
return 0;
out_remove:
qeth_l3_stop_card(card);
@@ -2263,88 +2118,12 @@ out_remove:
qeth_stop_channel(&card->write);
qeth_stop_channel(&card->read);
qdio_free(CARD_DDEV(card));
-
- mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
return rc;
}
-static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
- int recovery_mode)
+static void qeth_l3_set_offline(struct qeth_card *card)
{
- struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
- int rc = 0, rc2 = 0, rc3 = 0;
-
- mutex_lock(&card->discipline_mutex);
- mutex_lock(&card->conf_mutex);
- QETH_CARD_TEXT(card, 3, "setoffl");
-
- if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) {
- qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
- card->info.hwtrap = 1;
- }
-
- rtnl_lock();
- card->info.open_when_online = card->dev->flags & IFF_UP;
- dev_close(card->dev);
- netif_device_detach(card->dev);
- netif_carrier_off(card->dev);
- rtnl_unlock();
-
qeth_l3_stop_card(card);
- if (card->options.cq == QETH_CQ_ENABLED) {
- rtnl_lock();
- call_netdevice_notifiers(NETDEV_REBOOT, card->dev);
- rtnl_unlock();
- }
-
- rc = qeth_stop_channel(&card->data);
- rc2 = qeth_stop_channel(&card->write);
- rc3 = qeth_stop_channel(&card->read);
- if (!rc)
- rc = (rc2) ? rc2 : rc3;
- if (rc)
- QETH_CARD_TEXT_(card, 2, "1err%d", rc);
- qdio_free(CARD_DDEV(card));
-
- /* let user_space know that device is offline */
- kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
- mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
- return 0;
-}
-
-static int qeth_l3_set_offline(struct ccwgroup_device *cgdev)
-{
- return __qeth_l3_set_offline(cgdev, 0);
-}
-
-static int qeth_l3_recover(void *ptr)
-{
- struct qeth_card *card;
- int rc = 0;
-
- card = (struct qeth_card *) ptr;
- QETH_CARD_TEXT(card, 2, "recover1");
- QETH_CARD_HEX(card, 2, &card, sizeof(void *));
- if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
- return 0;
- QETH_CARD_TEXT(card, 2, "recover2");
- dev_warn(&card->gdev->dev,
- "A recovery process has been started for the device\n");
- __qeth_l3_set_offline(card->gdev, 1);
- rc = qeth_l3_set_online(card->gdev);
- if (!rc)
- dev_info(&card->gdev->dev,
- "Device successfully recovered!\n");
- else {
- ccwgroup_set_offline(card->gdev);
- dev_warn(&card->gdev->dev, "The qeth device driver "
- "failed to recover an error on the device\n");
- }
- qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
- qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
- return 0;
}
/* Returns zero if the command is successfully "consumed" */
@@ -2356,8 +2135,6 @@ static int qeth_l3_control_event(struct qeth_card *card,
struct qeth_discipline qeth_l3_discipline = {
.devtype = &qeth_l3_devtype,
- .process_rx_buffer = qeth_l3_process_inbound_buffer,
- .recover = qeth_l3_recover,
.setup = qeth_l3_probe_device,
.remove = qeth_l3_remove_device,
.set_online = qeth_l3_set_online,
@@ -2436,7 +2213,7 @@ static int qeth_l3_ip_event(struct notifier_block *this,
qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV4);
addr.u.a4.addr = ifa->ifa_address;
- addr.u.a4.mask = be32_to_cpu(ifa->ifa_mask);
+ addr.u.a4.mask = ifa->ifa_mask;
return qeth_l3_handle_ip_event(card, &addr, event);
}
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index f9067ed6c7d3..29f2517d2a31 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -242,21 +242,33 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
+ int rc = 0;
char *tmp;
- int rc;
if (!IS_IQD(card))
return -EPERM;
- if (card->state != CARD_STATE_DOWN)
- return -EPERM;
- if (card->options.sniffer)
- return -EPERM;
- if (card->options.cq == QETH_CQ_NOTAVAILABLE)
- return -EPERM;
+
+ mutex_lock(&card->conf_mutex);
+ if (card->state != CARD_STATE_DOWN) {
+ rc = -EPERM;
+ goto out;
+ }
+
+ if (card->options.sniffer) {
+ rc = -EPERM;
+ goto out;
+ }
+
+ if (card->options.cq == QETH_CQ_NOTAVAILABLE) {
+ rc = -EPERM;
+ goto out;
+ }
tmp = strsep((char **)&buf, "\n");
- if (strlen(tmp) > 8)
- return -EINVAL;
+ if (strlen(tmp) > 8) {
+ rc = -EINVAL;
+ goto out;
+ }
if (card->options.hsuid[0])
/* delete old ip address */
@@ -267,11 +279,13 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
card->options.hsuid[0] = '\0';
memcpy(card->dev->perm_addr, card->options.hsuid, 9);
qeth_configure_cq(card, QETH_CQ_DISABLED);
- return count;
+ goto out;
}
- if (qeth_configure_cq(card, QETH_CQ_ENABLED))
- return -EPERM;
+ if (qeth_configure_cq(card, QETH_CQ_ENABLED)) {
+ rc = -EPERM;
+ goto out;
+ }
snprintf(card->options.hsuid, sizeof(card->options.hsuid),
"%-8s", tmp);
@@ -280,6 +294,8 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
rc = qeth_l3_modify_hsuid(card, true);
+out:
+ mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
@@ -386,30 +402,35 @@ static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card,
enum qeth_prot_versions proto)
{
struct qeth_ipato_entry *ipatoe;
- char addr_str[40];
- int entry_len; /* length of 1 entry string, differs between v4 and v6 */
- int i = 0;
+ int str_len = 0;
- entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
- /* add strlen for "/<mask>\n" */
- entry_len += (proto == QETH_PROT_IPV4)? 5 : 6;
mutex_lock(&card->ip_lock);
list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
+ char addr_str[40];
+ int entry_len;
+
if (ipatoe->proto != proto)
continue;
- /* String must not be longer than PAGE_SIZE. So we check if
- * string length gets near PAGE_SIZE. Then we can savely display
- * the next IPv6 address (worst case, compared to IPv4) */
- if ((PAGE_SIZE - i) <= entry_len)
+
+ entry_len = qeth_l3_ipaddr_to_string(proto, ipatoe->addr,
+ addr_str);
+ if (entry_len < 0)
+ continue;
+
+ /* Append /%mask to the entry: */
+ entry_len += 1 + ((proto == QETH_PROT_IPV4) ? 2 : 3);
+ /* Enough room to format %entry\n into null terminated page? */
+ if (entry_len + 1 > PAGE_SIZE - str_len - 1)
break;
- qeth_l3_ipaddr_to_string(proto, ipatoe->addr, addr_str);
- i += snprintf(buf + i, PAGE_SIZE - i,
- "%s/%i\n", addr_str, ipatoe->mask_bits);
+
+ entry_len = scnprintf(buf, PAGE_SIZE - str_len,
+ "%s/%i\n", addr_str, ipatoe->mask_bits);
+ str_len += entry_len;
+ buf += entry_len;
}
mutex_unlock(&card->ip_lock);
- i += snprintf(buf + i, PAGE_SIZE - i, "\n");
- return i;
+ return str_len ? str_len : scnprintf(buf, PAGE_SIZE, "\n");
}
static ssize_t qeth_l3_dev_ipato_add4_show(struct device *dev,
@@ -455,16 +476,14 @@ static ssize_t qeth_l3_dev_ipato_add_store(const char *buf, size_t count,
int mask_bits;
int rc = 0;
- mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
if (rc)
- goto out;
+ return rc;
ipatoe = kzalloc(sizeof(struct qeth_ipato_entry), GFP_KERNEL);
- if (!ipatoe) {
- rc = -ENOMEM;
- goto out;
- }
+ if (!ipatoe)
+ return -ENOMEM;
+
ipatoe->proto = proto;
memcpy(ipatoe->addr, addr, (proto == QETH_PROT_IPV4)? 4:16);
ipatoe->mask_bits = mask_bits;
@@ -472,8 +491,7 @@ static ssize_t qeth_l3_dev_ipato_add_store(const char *buf, size_t count,
rc = qeth_l3_add_ipato_entry(card, ipatoe);
if (rc)
kfree(ipatoe);
-out:
- mutex_unlock(&card->conf_mutex);
+
return rc ? rc : count;
}
@@ -496,11 +514,9 @@ static ssize_t qeth_l3_dev_ipato_del_store(const char *buf, size_t count,
int mask_bits;
int rc = 0;
- mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
if (!rc)
rc = qeth_l3_del_ipato_entry(card, proto, addr, mask_bits);
- mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
@@ -607,31 +623,34 @@ static ssize_t qeth_l3_dev_ip_add_show(struct device *dev, char *buf,
{
struct qeth_card *card = dev_get_drvdata(dev);
struct qeth_ipaddr *ipaddr;
- char addr_str[40];
int str_len = 0;
- int entry_len; /* length of 1 entry string, differs between v4 and v6 */
int i;
- entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
- entry_len += 2; /* \n + terminator */
mutex_lock(&card->ip_lock);
hash_for_each(card->ip_htable, i, ipaddr, hnode) {
+ char addr_str[40];
+ int entry_len;
+
if (ipaddr->proto != proto || ipaddr->type != type)
continue;
- /* String must not be longer than PAGE_SIZE. So we check if
- * string length gets near PAGE_SIZE. Then we can savely display
- * the next IPv6 address (worst case, compared to IPv4) */
- if ((PAGE_SIZE - str_len) <= entry_len)
+
+ entry_len = qeth_l3_ipaddr_to_string(proto, (u8 *)&ipaddr->u,
+ addr_str);
+ if (entry_len < 0)
+ continue;
+
+ /* Enough room to format %addr\n into null terminated page? */
+ if (entry_len + 1 > PAGE_SIZE - str_len - 1)
break;
- qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u,
- addr_str);
- str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n",
- addr_str);
+
+ entry_len = scnprintf(buf, PAGE_SIZE - str_len, "%s\n",
+ addr_str);
+ str_len += entry_len;
+ buf += entry_len;
}
mutex_unlock(&card->ip_lock);
- str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n");
- return str_len;
+ return str_len ? str_len : scnprintf(buf, PAGE_SIZE, "\n");
}
static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev,
@@ -642,63 +661,34 @@ static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev,
QETH_IP_TYPE_VIPA);
}
-static int qeth_l3_parse_vipae(const char *buf, enum qeth_prot_versions proto,
- u8 *addr)
-{
- if (qeth_l3_string_to_ipaddr(buf, proto, addr)) {
- return -EINVAL;
- }
- return 0;
-}
-
-static ssize_t qeth_l3_dev_vipa_add_store(const char *buf, size_t count,
- struct qeth_card *card, enum qeth_prot_versions proto)
+static ssize_t qeth_l3_vipa_store(struct device *dev, const char *buf, bool add,
+ size_t count, enum qeth_prot_versions proto)
{
+ struct qeth_card *card = dev_get_drvdata(dev);
u8 addr[16] = {0, };
int rc;
- mutex_lock(&card->conf_mutex);
- rc = qeth_l3_parse_vipae(buf, proto, addr);
+ rc = qeth_l3_string_to_ipaddr(buf, proto, addr);
if (!rc)
- rc = qeth_l3_modify_rxip_vipa(card, true, addr,
+ rc = qeth_l3_modify_rxip_vipa(card, add, addr,
QETH_IP_TYPE_VIPA, proto);
- mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static ssize_t qeth_l3_dev_vipa_add4_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- return qeth_l3_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV4);
+ return qeth_l3_vipa_store(dev, buf, true, count, QETH_PROT_IPV4);
}
static QETH_DEVICE_ATTR(vipa_add4, add4, 0644,
qeth_l3_dev_vipa_add4_show,
qeth_l3_dev_vipa_add4_store);
-static ssize_t qeth_l3_dev_vipa_del_store(const char *buf, size_t count,
- struct qeth_card *card, enum qeth_prot_versions proto)
-{
- u8 addr[16];
- int rc;
-
- mutex_lock(&card->conf_mutex);
- rc = qeth_l3_parse_vipae(buf, proto, addr);
- if (!rc)
- rc = qeth_l3_modify_rxip_vipa(card, false, addr,
- QETH_IP_TYPE_VIPA, proto);
- mutex_unlock(&card->conf_mutex);
- return rc ? rc : count;
-}
-
static ssize_t qeth_l3_dev_vipa_del4_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- return qeth_l3_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV4);
+ return qeth_l3_vipa_store(dev, buf, true, count, QETH_PROT_IPV4);
}
static QETH_DEVICE_ATTR(vipa_del4, del4, 0200, NULL,
@@ -715,9 +705,7 @@ static ssize_t qeth_l3_dev_vipa_add6_show(struct device *dev,
static ssize_t qeth_l3_dev_vipa_add6_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- return qeth_l3_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV6);
+ return qeth_l3_vipa_store(dev, buf, true, count, QETH_PROT_IPV6);
}
static QETH_DEVICE_ATTR(vipa_add6, add6, 0644,
@@ -727,9 +715,7 @@ static QETH_DEVICE_ATTR(vipa_add6, add6, 0644,
static ssize_t qeth_l3_dev_vipa_del6_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- return qeth_l3_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV6);
+ return qeth_l3_vipa_store(dev, buf, false, count, QETH_PROT_IPV6);
}
static QETH_DEVICE_ATTR(vipa_del6, del6, 0200, NULL,
@@ -782,54 +768,34 @@ static int qeth_l3_parse_rxipe(const char *buf, enum qeth_prot_versions proto,
return 0;
}
-static ssize_t qeth_l3_dev_rxip_add_store(const char *buf, size_t count,
- struct qeth_card *card, enum qeth_prot_versions proto)
+static ssize_t qeth_l3_rxip_store(struct device *dev, const char *buf, bool add,
+ size_t count, enum qeth_prot_versions proto)
{
+ struct qeth_card *card = dev_get_drvdata(dev);
u8 addr[16] = {0, };
int rc;
- mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_rxipe(buf, proto, addr);
if (!rc)
- rc = qeth_l3_modify_rxip_vipa(card, true, addr,
+ rc = qeth_l3_modify_rxip_vipa(card, add, addr,
QETH_IP_TYPE_RXIP, proto);
- mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static ssize_t qeth_l3_dev_rxip_add4_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- return qeth_l3_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV4);
+ return qeth_l3_rxip_store(dev, buf, true, count, QETH_PROT_IPV4);
}
static QETH_DEVICE_ATTR(rxip_add4, add4, 0644,
qeth_l3_dev_rxip_add4_show,
qeth_l3_dev_rxip_add4_store);
-static ssize_t qeth_l3_dev_rxip_del_store(const char *buf, size_t count,
- struct qeth_card *card, enum qeth_prot_versions proto)
-{
- u8 addr[16];
- int rc;
-
- mutex_lock(&card->conf_mutex);
- rc = qeth_l3_parse_rxipe(buf, proto, addr);
- if (!rc)
- rc = qeth_l3_modify_rxip_vipa(card, false, addr,
- QETH_IP_TYPE_RXIP, proto);
- mutex_unlock(&card->conf_mutex);
- return rc ? rc : count;
-}
-
static ssize_t qeth_l3_dev_rxip_del4_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- return qeth_l3_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV4);
+ return qeth_l3_rxip_store(dev, buf, false, count, QETH_PROT_IPV4);
}
static QETH_DEVICE_ATTR(rxip_del4, del4, 0200, NULL,
@@ -846,9 +812,7 @@ static ssize_t qeth_l3_dev_rxip_add6_show(struct device *dev,
static ssize_t qeth_l3_dev_rxip_add6_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- return qeth_l3_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV6);
+ return qeth_l3_rxip_store(dev, buf, true, count, QETH_PROT_IPV6);
}
static QETH_DEVICE_ATTR(rxip_add6, add6, 0644,
@@ -858,9 +822,7 @@ static QETH_DEVICE_ATTR(rxip_add6, add6, 0644,
static ssize_t qeth_l3_dev_rxip_del6_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- return qeth_l3_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV6);
+ return qeth_l3_rxip_store(dev, buf, false, count, QETH_PROT_IPV6);
}
static QETH_DEVICE_ATTR(rxip_del6, del6, 0200, NULL,
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index c25e8a54e869..3170b295a5da 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -134,7 +134,7 @@ static char *blogic_cmd_failure_reason;
static void blogic_announce_drvr(struct blogic_adapter *adapter)
{
blogic_announce("***** BusLogic SCSI Driver Version " blogic_drvr_version " of " blogic_drvr_date " *****\n", adapter);
- blogic_announce("Copyright 1995-1998 by Leonard N. Zubkoff " "<lnz@dandelion.com>\n", adapter);
+ blogic_announce("Copyright 1995-1998 by Leonard N. Zubkoff <lnz@dandelion.com>\n", adapter);
}
@@ -440,7 +440,7 @@ static int blogic_cmd(struct blogic_adapter *adapter, enum blogic_opcode opcode,
goto done;
}
if (blogic_global_options.trace_config)
- blogic_notice("blogic_cmd(%02X) Status = %02X: " "(Modify I/O Address)\n", adapter, opcode, statusreg.all);
+ blogic_notice("blogic_cmd(%02X) Status = %02X: (Modify I/O Address)\n", adapter, opcode, statusreg.all);
result = 0;
goto done;
}
@@ -716,23 +716,23 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
pci_addr = base_addr1 = pci_resource_start(pci_device, 1);
if (pci_resource_flags(pci_device, 0) & IORESOURCE_MEM) {
- blogic_err("BusLogic: Base Address0 0x%X not I/O for " "MultiMaster Host Adapter\n", NULL, base_addr0);
- blogic_err("at PCI Bus %d Device %d I/O Address 0x%X\n", NULL, bus, device, io_addr);
+ blogic_err("BusLogic: Base Address0 0x%lX not I/O for MultiMaster Host Adapter\n", NULL, base_addr0);
+ blogic_err("at PCI Bus %d Device %d I/O Address 0x%lX\n", NULL, bus, device, io_addr);
continue;
}
if (pci_resource_flags(pci_device, 1) & IORESOURCE_IO) {
- blogic_err("BusLogic: Base Address1 0x%X not Memory for " "MultiMaster Host Adapter\n", NULL, base_addr1);
- blogic_err("at PCI Bus %d Device %d PCI Address 0x%X\n", NULL, bus, device, pci_addr);
+ blogic_err("BusLogic: Base Address1 0x%lX not Memory for MultiMaster Host Adapter\n", NULL, base_addr1);
+ blogic_err("at PCI Bus %d Device %d PCI Address 0x%lX\n", NULL, bus, device, pci_addr);
continue;
}
if (irq_ch == 0) {
- blogic_err("BusLogic: IRQ Channel %d invalid for " "MultiMaster Host Adapter\n", NULL, irq_ch);
- blogic_err("at PCI Bus %d Device %d I/O Address 0x%X\n", NULL, bus, device, io_addr);
+ blogic_err("BusLogic: IRQ Channel %d invalid for MultiMaster Host Adapter\n", NULL, irq_ch);
+ blogic_err("at PCI Bus %d Device %d I/O Address 0x%lX\n", NULL, bus, device, io_addr);
continue;
}
if (blogic_global_options.trace_probe) {
- blogic_notice("BusLogic: PCI MultiMaster Host Adapter " "detected at\n", NULL);
- blogic_notice("BusLogic: PCI Bus %d Device %d I/O Address " "0x%X PCI Address 0x%X\n", NULL, bus, device, io_addr, pci_addr);
+ blogic_notice("BusLogic: PCI MultiMaster Host Adapter detected at\n", NULL);
+ blogic_notice("BusLogic: PCI Bus %d Device %d I/O Address 0x%lX PCI Address 0x%lX\n", NULL, bus, device, io_addr, pci_addr);
}
/*
Issue the Inquire PCI Host Adapter Information command to determine
@@ -818,7 +818,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
nonpr_mmcount++;
mmcount++;
} else
- blogic_warn("BusLogic: Too many Host Adapters " "detected\n", NULL);
+ blogic_warn("BusLogic: Too many Host Adapters detected\n", NULL);
}
/*
If the AutoSCSI "Use Bus And Device # For PCI Scanning Seq."
@@ -956,23 +956,23 @@ static int __init blogic_init_fp_probeinfo(struct blogic_adapter *adapter)
pci_addr = base_addr1 = pci_resource_start(pci_device, 1);
#ifdef CONFIG_SCSI_FLASHPOINT
if (pci_resource_flags(pci_device, 0) & IORESOURCE_MEM) {
- blogic_err("BusLogic: Base Address0 0x%X not I/O for " "FlashPoint Host Adapter\n", NULL, base_addr0);
- blogic_err("at PCI Bus %d Device %d I/O Address 0x%X\n", NULL, bus, device, io_addr);
+ blogic_err("BusLogic: Base Address0 0x%lX not I/O for FlashPoint Host Adapter\n", NULL, base_addr0);
+ blogic_err("at PCI Bus %d Device %d I/O Address 0x%lX\n", NULL, bus, device, io_addr);
continue;
}
if (pci_resource_flags(pci_device, 1) & IORESOURCE_IO) {
- blogic_err("BusLogic: Base Address1 0x%X not Memory for " "FlashPoint Host Adapter\n", NULL, base_addr1);
- blogic_err("at PCI Bus %d Device %d PCI Address 0x%X\n", NULL, bus, device, pci_addr);
+ blogic_err("BusLogic: Base Address1 0x%lX not Memory for FlashPoint Host Adapter\n", NULL, base_addr1);
+ blogic_err("at PCI Bus %d Device %d PCI Address 0x%lX\n", NULL, bus, device, pci_addr);
continue;
}
if (irq_ch == 0) {
- blogic_err("BusLogic: IRQ Channel %d invalid for " "FlashPoint Host Adapter\n", NULL, irq_ch);
- blogic_err("at PCI Bus %d Device %d I/O Address 0x%X\n", NULL, bus, device, io_addr);
+ blogic_err("BusLogic: IRQ Channel %d invalid for FlashPoint Host Adapter\n", NULL, irq_ch);
+ blogic_err("at PCI Bus %d Device %d I/O Address 0x%lX\n", NULL, bus, device, io_addr);
continue;
}
if (blogic_global_options.trace_probe) {
- blogic_notice("BusLogic: FlashPoint Host Adapter " "detected at\n", NULL);
- blogic_notice("BusLogic: PCI Bus %d Device %d I/O Address " "0x%X PCI Address 0x%X\n", NULL, bus, device, io_addr, pci_addr);
+ blogic_notice("BusLogic: FlashPoint Host Adapter detected at\n", NULL);
+ blogic_notice("BusLogic: PCI Bus %d Device %d I/O Address 0x%lX PCI Address 0x%lX\n", NULL, bus, device, io_addr, pci_addr);
}
if (blogic_probeinfo_count < BLOGIC_MAX_ADAPTERS) {
struct blogic_probeinfo *probeinfo =
@@ -987,11 +987,11 @@ static int __init blogic_init_fp_probeinfo(struct blogic_adapter *adapter)
probeinfo->pci_device = pci_dev_get(pci_device);
fpcount++;
} else
- blogic_warn("BusLogic: Too many Host Adapters " "detected\n", NULL);
+ blogic_warn("BusLogic: Too many Host Adapters detected\n", NULL);
#else
- blogic_err("BusLogic: FlashPoint Host Adapter detected at " "PCI Bus %d Device %d\n", NULL, bus, device);
- blogic_err("BusLogic: I/O Address 0x%X PCI Address 0x%X, irq %d, " "but FlashPoint\n", NULL, io_addr, pci_addr, irq_ch);
- blogic_err("BusLogic: support was omitted in this kernel " "configuration.\n", NULL);
+ blogic_err("BusLogic: FlashPoint Host Adapter detected at PCI Bus %d Device %d\n", NULL, bus, device);
+ blogic_err("BusLogic: I/O Address 0x%lX PCI Address 0x%lX, irq %d, but FlashPoint\n", NULL, io_addr, pci_addr, irq_ch);
+ blogic_err("BusLogic: support was omitted in this kernel configuration.\n", NULL);
#endif
}
/*
@@ -1099,9 +1099,9 @@ static bool blogic_failure(struct blogic_adapter *adapter, char *msg)
if (adapter->adapter_bus_type == BLOGIC_PCI_BUS) {
blogic_err("While configuring BusLogic PCI Host Adapter at\n",
adapter);
- blogic_err("Bus %d Device %d I/O Address 0x%X PCI Address 0x%X:\n", adapter, adapter->bus, adapter->dev, adapter->io_addr, adapter->pci_addr);
+ blogic_err("Bus %d Device %d I/O Address 0x%lX PCI Address 0x%lX:\n", adapter, adapter->bus, adapter->dev, adapter->io_addr, adapter->pci_addr);
} else
- blogic_err("While configuring BusLogic Host Adapter at " "I/O Address 0x%X:\n", adapter, adapter->io_addr);
+ blogic_err("While configuring BusLogic Host Adapter at I/O Address 0x%lX:\n", adapter, adapter->io_addr);
blogic_err("%s FAILED - DETACHING\n", adapter, msg);
if (blogic_cmd_failure_reason != NULL)
blogic_err("ADDITIONAL FAILURE INFO - %s\n", adapter,
@@ -1129,13 +1129,13 @@ static bool __init blogic_probe(struct blogic_adapter *adapter)
fpinfo->present = false;
if (!(FlashPoint_ProbeHostAdapter(fpinfo) == 0 &&
fpinfo->present)) {
- blogic_err("BusLogic: FlashPoint Host Adapter detected at " "PCI Bus %d Device %d\n", adapter, adapter->bus, adapter->dev);
- blogic_err("BusLogic: I/O Address 0x%X PCI Address 0x%X, " "but FlashPoint\n", adapter, adapter->io_addr, adapter->pci_addr);
+ blogic_err("BusLogic: FlashPoint Host Adapter detected at PCI Bus %d Device %d\n", adapter, adapter->bus, adapter->dev);
+ blogic_err("BusLogic: I/O Address 0x%lX PCI Address 0x%lX, but FlashPoint\n", adapter, adapter->io_addr, adapter->pci_addr);
blogic_err("BusLogic: Probe Function failed to validate it.\n", adapter);
return false;
}
if (blogic_global_options.trace_probe)
- blogic_notice("BusLogic_Probe(0x%X): FlashPoint Found\n", adapter, adapter->io_addr);
+ blogic_notice("BusLogic_Probe(0x%lX): FlashPoint Found\n", adapter, adapter->io_addr);
/*
Indicate the Host Adapter Probe completed successfully.
*/
@@ -1152,7 +1152,7 @@ static bool __init blogic_probe(struct blogic_adapter *adapter)
intreg.all = blogic_rdint(adapter);
georeg.all = blogic_rdgeom(adapter);
if (blogic_global_options.trace_probe)
- blogic_notice("BusLogic_Probe(0x%X): Status 0x%02X, Interrupt 0x%02X, " "Geometry 0x%02X\n", adapter, adapter->io_addr, statusreg.all, intreg.all, georeg.all);
+ blogic_notice("BusLogic_Probe(0x%lX): Status 0x%02X, Interrupt 0x%02X, Geometry 0x%02X\n", adapter, adapter->io_addr, statusreg.all, intreg.all, georeg.all);
if (statusreg.all == 0 || statusreg.sr.diag_active ||
statusreg.sr.cmd_param_busy || statusreg.sr.rsvd ||
statusreg.sr.cmd_invalid || intreg.ir.rsvd != 0)
@@ -1231,7 +1231,7 @@ static bool blogic_hwreset(struct blogic_adapter *adapter, bool hard_reset)
udelay(100);
}
if (blogic_global_options.trace_hw_reset)
- blogic_notice("BusLogic_HardwareReset(0x%X): Diagnostic Active, " "Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all);
+ blogic_notice("BusLogic_HardwareReset(0x%lX): Diagnostic Active, Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all);
if (timeout < 0)
return false;
/*
@@ -1251,7 +1251,7 @@ static bool blogic_hwreset(struct blogic_adapter *adapter, bool hard_reset)
udelay(100);
}
if (blogic_global_options.trace_hw_reset)
- blogic_notice("BusLogic_HardwareReset(0x%X): Diagnostic Completed, " "Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all);
+ blogic_notice("BusLogic_HardwareReset(0x%lX): Diagnostic Completed, Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all);
if (timeout < 0)
return false;
/*
@@ -1267,7 +1267,7 @@ static bool blogic_hwreset(struct blogic_adapter *adapter, bool hard_reset)
udelay(100);
}
if (blogic_global_options.trace_hw_reset)
- blogic_notice("BusLogic_HardwareReset(0x%X): Host Adapter Ready, " "Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all);
+ blogic_notice("BusLogic_HardwareReset(0x%lX): Host Adapter Ready, Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all);
if (timeout < 0)
return false;
/*
@@ -1323,7 +1323,7 @@ static bool __init blogic_checkadapter(struct blogic_adapter *adapter)
Provide tracing information if requested and return.
*/
if (blogic_global_options.trace_probe)
- blogic_notice("BusLogic_Check(0x%X): MultiMaster %s\n", adapter,
+ blogic_notice("BusLogic_Check(0x%lX): MultiMaster %s\n", adapter,
adapter->io_addr,
(result ? "Found" : "Not Found"));
return result;
@@ -1836,7 +1836,7 @@ static bool __init blogic_reportconfig(struct blogic_adapter *adapter)
int tgt_id;
blogic_info("Configuring BusLogic Model %s %s%s%s%s SCSI Host Adapter\n", adapter, adapter->model, blogic_adapter_busnames[adapter->adapter_bus_type], (adapter->wide ? " Wide" : ""), (adapter->differential ? " Differential" : ""), (adapter->ultra ? " Ultra" : ""));
- blogic_info(" Firmware Version: %s, I/O Address: 0x%X, " "IRQ Channel: %d/%s\n", adapter, adapter->fw_ver, adapter->io_addr, adapter->irq_ch, (adapter->level_int ? "Level" : "Edge"));
+ blogic_info(" Firmware Version: %s, I/O Address: 0x%lX, IRQ Channel: %d/%s\n", adapter, adapter->fw_ver, adapter->io_addr, adapter->irq_ch, (adapter->level_int ? "Level" : "Edge"));
if (adapter->adapter_bus_type != BLOGIC_PCI_BUS) {
blogic_info(" DMA Channel: ", adapter);
if (adapter->dma_ch > 0)
@@ -1844,7 +1844,7 @@ static bool __init blogic_reportconfig(struct blogic_adapter *adapter)
else
blogic_info("None, ", adapter);
if (adapter->bios_addr > 0)
- blogic_info("BIOS Address: 0x%X, ", adapter,
+ blogic_info("BIOS Address: 0x%lX, ", adapter,
adapter->bios_addr);
else
blogic_info("BIOS Address: None, ", adapter);
@@ -1852,7 +1852,7 @@ static bool __init blogic_reportconfig(struct blogic_adapter *adapter)
blogic_info(" PCI Bus: %d, Device: %d, Address: ", adapter,
adapter->bus, adapter->dev);
if (adapter->pci_addr > 0)
- blogic_info("0x%X, ", adapter, adapter->pci_addr);
+ blogic_info("0x%lX, ", adapter, adapter->pci_addr);
else
blogic_info("Unassigned, ", adapter);
}
@@ -1932,10 +1932,10 @@ static bool __init blogic_reportconfig(struct blogic_adapter *adapter)
blogic_info(" Disconnect/Reconnect: %s, Tagged Queuing: %s\n", adapter,
discon_msg, tagq_msg);
if (blogic_multimaster_type(adapter)) {
- blogic_info(" Scatter/Gather Limit: %d of %d segments, " "Mailboxes: %d\n", adapter, adapter->drvr_sglimit, adapter->adapter_sglimit, adapter->mbox_count);
- blogic_info(" Driver Queue Depth: %d, " "Host Adapter Queue Depth: %d\n", adapter, adapter->drvr_qdepth, adapter->adapter_qdepth);
+ blogic_info(" Scatter/Gather Limit: %d of %d segments, Mailboxes: %d\n", adapter, adapter->drvr_sglimit, adapter->adapter_sglimit, adapter->mbox_count);
+ blogic_info(" Driver Queue Depth: %d, Host Adapter Queue Depth: %d\n", adapter, adapter->drvr_qdepth, adapter->adapter_qdepth);
} else
- blogic_info(" Driver Queue Depth: %d, " "Scatter/Gather Limit: %d segments\n", adapter, adapter->drvr_qdepth, adapter->drvr_sglimit);
+ blogic_info(" Driver Queue Depth: %d, Scatter/Gather Limit: %d segments\n", adapter, adapter->drvr_qdepth, adapter->drvr_sglimit);
blogic_info(" Tagged Queue Depth: ", adapter);
common_tagq_depth = true;
for (tgt_id = 1; tgt_id < adapter->maxdev; tgt_id++)
@@ -2717,7 +2717,7 @@ static void blogic_scan_inbox(struct blogic_adapter *adapter)
then there is most likely a bug in
the Host Adapter firmware.
*/
- blogic_warn("Illegal CCB #%ld status %d in " "Incoming Mailbox\n", adapter, ccb->serial, ccb->status);
+ blogic_warn("Illegal CCB #%ld status %d in Incoming Mailbox\n", adapter, ccb->serial, ccb->status);
}
}
next_inbox->comp_code = BLOGIC_INBOX_FREE;
@@ -2752,7 +2752,7 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter)
if (ccb->opcode == BLOGIC_BDR) {
int tgt_id = ccb->tgt_id;
- blogic_warn("Bus Device Reset CCB #%ld to Target " "%d Completed\n", adapter, ccb->serial, tgt_id);
+ blogic_warn("Bus Device Reset CCB #%ld to Target %d Completed\n", adapter, ccb->serial, tgt_id);
blogic_inc_count(&adapter->tgt_stats[tgt_id].bdr_done);
adapter->tgt_flags[tgt_id].tagq_active = false;
adapter->cmds_since_rst[tgt_id] = 0;
@@ -2829,7 +2829,7 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter)
if (blogic_global_options.trace_err) {
int i;
blogic_notice("CCB #%ld Target %d: Result %X Host "
- "Adapter Status %02X " "Target Status %02X\n", adapter, ccb->serial, ccb->tgt_id, command->result, ccb->adapter_status, ccb->tgt_status);
+ "Adapter Status %02X Target Status %02X\n", adapter, ccb->serial, ccb->tgt_id, command->result, ccb->adapter_status, ccb->tgt_status);
blogic_notice("CDB ", adapter);
for (i = 0; i < ccb->cdblen; i++)
blogic_notice(" %02X", adapter, ccb->cdb[i]);
@@ -3203,12 +3203,12 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,
*/
if (!blogic_write_outbox(adapter, BLOGIC_MBOX_START, ccb)) {
spin_unlock_irq(adapter->scsi_host->host_lock);
- blogic_warn("Unable to write Outgoing Mailbox - " "Pausing for 1 second\n", adapter);
+ blogic_warn("Unable to write Outgoing Mailbox - Pausing for 1 second\n", adapter);
blogic_delay(1);
spin_lock_irq(adapter->scsi_host->host_lock);
if (!blogic_write_outbox(adapter, BLOGIC_MBOX_START,
ccb)) {
- blogic_warn("Still unable to write Outgoing Mailbox - " "Host Adapter Dead?\n", adapter);
+ blogic_warn("Still unable to write Outgoing Mailbox - Host Adapter Dead?\n", adapter);
blogic_dealloc_ccb(ccb, 1);
command->result = DID_ERROR << 16;
command->scsi_done(command);
@@ -3443,8 +3443,8 @@ static int blogic_diskparam(struct scsi_device *sdev, struct block_device *dev,
if (diskparam->cylinders != saved_cyl)
blogic_warn("Adopting Geometry %d/%d from Partition Table\n", adapter, diskparam->heads, diskparam->sectors);
} else if (part_end_head > 0 || part_end_sector > 0) {
- blogic_warn("Warning: Partition Table appears to " "have Geometry %d/%d which is\n", adapter, part_end_head + 1, part_end_sector);
- blogic_warn("not compatible with current BusLogic " "Host Adapter Geometry %d/%d\n", adapter, diskparam->heads, diskparam->sectors);
+ blogic_warn("Warning: Partition Table appears to have Geometry %d/%d which is\n", adapter, part_end_head + 1, part_end_sector);
+ blogic_warn("not compatible with current BusLogic Host Adapter Geometry %d/%d\n", adapter, diskparam->heads, diskparam->sectors);
}
}
kfree(buf);
@@ -3689,7 +3689,7 @@ static int __init blogic_parseopts(char *options)
blogic_probe_options.probe134 = true;
break;
default:
- blogic_err("BusLogic: Invalid Driver Options " "(invalid I/O Address 0x%X)\n", NULL, io_addr);
+ blogic_err("BusLogic: Invalid Driver Options (invalid I/O Address 0x%lX)\n", NULL, io_addr);
return 0;
}
} else if (blogic_parse(&options, "NoProbeISA"))
@@ -3710,7 +3710,7 @@ static int __init blogic_parseopts(char *options)
for (tgt_id = 0; tgt_id < BLOGIC_MAXDEV; tgt_id++) {
unsigned short qdepth = simple_strtoul(options, &options, 0);
if (qdepth > BLOGIC_MAX_TAG_DEPTH) {
- blogic_err("BusLogic: Invalid Driver Options " "(invalid Queue Depth %d)\n", NULL, qdepth);
+ blogic_err("BusLogic: Invalid Driver Options (invalid Queue Depth %d)\n", NULL, qdepth);
return 0;
}
drvr_opts->qdepth[tgt_id] = qdepth;
@@ -3719,12 +3719,12 @@ static int __init blogic_parseopts(char *options)
else if (*options == ']')
break;
else {
- blogic_err("BusLogic: Invalid Driver Options " "(',' or ']' expected at '%s')\n", NULL, options);
+ blogic_err("BusLogic: Invalid Driver Options (',' or ']' expected at '%s')\n", NULL, options);
return 0;
}
}
if (*options != ']') {
- blogic_err("BusLogic: Invalid Driver Options " "(']' expected at '%s')\n", NULL, options);
+ blogic_err("BusLogic: Invalid Driver Options (']' expected at '%s')\n", NULL, options);
return 0;
} else
options++;
@@ -3732,7 +3732,7 @@ static int __init blogic_parseopts(char *options)
unsigned short qdepth = simple_strtoul(options, &options, 0);
if (qdepth == 0 ||
qdepth > BLOGIC_MAX_TAG_DEPTH) {
- blogic_err("BusLogic: Invalid Driver Options " "(invalid Queue Depth %d)\n", NULL, qdepth);
+ blogic_err("BusLogic: Invalid Driver Options (invalid Queue Depth %d)\n", NULL, qdepth);
return 0;
}
drvr_opts->common_qdepth = qdepth;
@@ -3778,7 +3778,7 @@ static int __init blogic_parseopts(char *options)
unsigned short bus_settle_time =
simple_strtoul(options, &options, 0);
if (bus_settle_time > 5 * 60) {
- blogic_err("BusLogic: Invalid Driver Options " "(invalid Bus Settle Time %d)\n", NULL, bus_settle_time);
+ blogic_err("BusLogic: Invalid Driver Options (invalid Bus Settle Time %d)\n", NULL, bus_settle_time);
return 0;
}
drvr_opts->bus_settle_time = bus_settle_time;
@@ -3803,14 +3803,14 @@ static int __init blogic_parseopts(char *options)
if (*options == ',')
options++;
else if (*options != ';' && *options != '\0') {
- blogic_err("BusLogic: Unexpected Driver Option '%s' " "ignored\n", NULL, options);
+ blogic_err("BusLogic: Unexpected Driver Option '%s' ignored\n", NULL, options);
*options = '\0';
}
}
if (!(blogic_drvr_options_count == 0 ||
blogic_probeinfo_count == 0 ||
blogic_drvr_options_count == blogic_probeinfo_count)) {
- blogic_err("BusLogic: Invalid Driver Options " "(all or no I/O Addresses must be specified)\n", NULL);
+ blogic_err("BusLogic: Invalid Driver Options (all or no I/O Addresses must be specified)\n", NULL);
return 0;
}
/*
@@ -3864,7 +3864,7 @@ static int __init blogic_setup(char *str)
(void) get_options(str, ARRAY_SIZE(ints), ints);
if (ints[0] != 0) {
- blogic_err("BusLogic: Obsolete Command Line Entry " "Format Ignored\n", NULL);
+ blogic_err("BusLogic: Obsolete Command Line Entry Format Ignored\n", NULL);
return 0;
}
if (str == NULL || *str == '\0')
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 90cf4691b8c3..a7881f8eb05e 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -68,6 +68,7 @@ comment "SCSI support type (disk, tape, CD-ROM)"
config BLK_DEV_SD
tristate "SCSI disk support"
depends on SCSI
+ select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY
---help---
If you want to use SCSI hard disks, Fibre Channel disks,
Serial ATA (SATA) or Parallel ATA (PATA) hard disks,
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
index 8466aa784ec1..8b891a05d9e7 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
@@ -293,7 +293,7 @@ ahd_linux_pci_reserve_mem_region(struct ahd_softc *ahd,
if (!request_mem_region(start, 0x1000, "aic79xx"))
error = ENOMEM;
if (!error) {
- *maddr = ioremap_nocache(base_page, base_offset + 512);
+ *maddr = ioremap(base_page, base_offset + 512);
if (*maddr == NULL) {
error = ENOMEM;
release_mem_region(start, 0x1000);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index a9d40d3b90ef..4190a025381a 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -2314,7 +2314,7 @@ ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
* At some speeds, we only support
* ST transfers.
*/
- if ((syncrate->sxfr_u2 & ST_SXFR) != 0)
+ if ((syncrate->sxfr_u2 & ST_SXFR) != 0)
*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
break;
}
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index 717d8d1082ce..9b293b1f0b71 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -372,7 +372,7 @@ ahc_linux_pci_reserve_mem_region(struct ahc_softc *ahc,
if (!request_mem_region(start, 0x1000, "aic7xxx"))
error = ENOMEM;
if (error == 0) {
- *maddr = ioremap_nocache(start, 256);
+ *maddr = ioremap(start, 256);
if (*maddr == NULL) {
error = ENOMEM;
release_mem_region(start, 0x1000);
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index f5781e31f57c..d022407e5645 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -54,6 +54,9 @@ static struct scsi_host_template aic94xx_sht = {
.eh_target_reset_handler = sas_eh_target_reset_handler,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = sas_ioctl,
+#endif
.track_queue_depth = 1,
};
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index db687ef8a99e..40dc8eac0e3a 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -270,7 +270,7 @@ static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
break;
}
case ACB_ADAPTER_TYPE_C:{
- acb->pmuC = ioremap_nocache(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
+ acb->pmuC = ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
if (!acb->pmuC) {
printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
return false;
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 0760d0bd8a10..9b81cfbbc5c5 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -453,14 +453,14 @@ static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
u8 __iomem *addr;
int pcicfg_reg;
- addr = ioremap_nocache(pci_resource_start(pcidev, 2),
+ addr = ioremap(pci_resource_start(pcidev, 2),
pci_resource_len(pcidev, 2));
if (addr == NULL)
return -ENOMEM;
phba->ctrl.csr = addr;
phba->csr_va = addr;
- addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
+ addr = ioremap(pci_resource_start(pcidev, 4), 128 * 1024);
if (addr == NULL)
goto pci_map_err;
phba->ctrl.db = addr;
@@ -471,7 +471,7 @@ static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
else
pcicfg_reg = 0;
- addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg),
+ addr = ioremap(pci_resource_start(pcidev, pcicfg_reg),
pci_resource_len(pcidev, pcicfg_reg));
if (addr == NULL)
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index f069e09beb10..6f8335ddb1f2 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1414,7 +1414,7 @@ int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
reg_base = pci_resource_start(hba->pcidev,
BNX2X_DOORBELL_PCI_BAR);
reg_off = (1 << BNX2X_DB_SHIFT) * (context_id & 0x1FFFF);
- tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4);
+ tgt->ctx_base = ioremap(reg_base + reg_off, 4);
if (!tgt->ctx_base)
return -ENOMEM;
return 0;
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 12666313b937..e53ebc5eff85 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -2715,7 +2715,7 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
reg_base = pci_resource_start(ep->hba->pcidev,
BNX2X_DOORBELL_PCI_BAR);
reg_off = (1 << BNX2X_DB_SHIFT) * (cid_num & 0x1FFFF);
- ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
+ ep->qp.ctx_base = ioremap(reg_base + reg_off, 4);
if (!ep->qp.ctx_base)
return -ENOMEM;
goto arm_cq;
@@ -2736,7 +2736,7 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
/* 5709 device in normal node and 5706/5708 devices */
reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
- ep->qp.ctx_base = ioremap_nocache(ep->hba->reg_base + reg_off,
+ ep->qp.ctx_base = ioremap(ep->hba->reg_base + reg_off,
MB_KERNEL_CTX_SIZE);
if (!ep->qp.ctx_base)
return -ENOMEM;
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 76751d6c7f0d..ed5f4a6ae270 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -872,6 +872,10 @@ static long ch_ioctl_compat(struct file * file,
unsigned int cmd, unsigned long arg)
{
scsi_changer *ch = file->private_data;
+ int retval = scsi_ioctl_block_when_processing_errors(ch->device, cmd,
+ file->f_flags & O_NDELAY);
+ if (retval)
+ return retval;
switch (cmd) {
case CHIOGPARAMS:
@@ -883,7 +887,7 @@ static long ch_ioctl_compat(struct file * file,
case CHIOINITELEM:
case CHIOSVOLTAG:
/* compatible */
- return ch_ioctl(file, cmd, arg);
+ return ch_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
case CHIOGSTATUS32:
{
struct changer_element_status32 ces32;
@@ -898,8 +902,7 @@ static long ch_ioctl_compat(struct file * file,
return ch_gstatus(ch, ces32.ces_type, data);
}
default:
- // return scsi_ioctl_compat(ch->device, cmd, (void*)arg);
- return -ENOIOCTLCMD;
+ return scsi_compat_ioctl(ch->device, cmd, compat_ptr(arg));
}
}
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index 2e8a3ac575cb..8dea7d53788a 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -529,7 +529,7 @@ static struct csio_hw *csio_hw_alloc(struct pci_dev *pdev)
goto err_free_hw;
/* Get the start address of registers from BAR 0 */
- hw->regstart = ioremap_nocache(pci_resource_start(pdev, 0),
+ hw->regstart = ioremap(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
if (!hw->regstart) {
csio_err(hw, "Could not map BAR 0, regstart = %p\n",
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index 469d0bc9f5fe..00cf33573136 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -1383,7 +1383,7 @@ csio_device_reset(struct device *dev,
return -EINVAL;
/* Delete NPIV lnodes */
- csio_lnodes_exit(hw, 1);
+ csio_lnodes_exit(hw, 1);
/* Block upper IOs */
csio_lnodes_block_request(hw);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index c4e4b0136f86..4bc794d2f51c 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -121,7 +121,8 @@ static inline void cxgbi_device_destroy(struct cxgbi_device *cdev)
"cdev 0x%p, p# %u.\n", cdev, cdev->nports);
cxgbi_hbas_remove(cdev);
cxgbi_device_portmap_cleanup(cdev);
- cxgbi_ppm_release(cdev->cdev2ppm(cdev));
+ if (cdev->cdev2ppm)
+ cxgbi_ppm_release(cdev->cdev2ppm(cdev));
if (cdev->pmap.max_connect)
cxgbi_free_big_mem(cdev->pmap.port_csk);
kfree(cdev);
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index 80c5a235d193..7b49e2e9fcde 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -617,6 +617,13 @@ static const struct file_operations esas2r_proc_fops = {
.unlocked_ioctl = esas2r_proc_ioctl,
};
+static const struct proc_ops esas2r_proc_ops = {
+ .proc_ioctl = esas2r_proc_ioctl,
+#ifdef CONFIG_COMPAT
+ .proc_compat_ioctl = compat_ptr_ioctl,
+#endif
+};
+
static struct Scsi_Host *esas2r_proc_host;
static int esas2r_proc_major;
@@ -728,7 +735,7 @@ const char *esas2r_info(struct Scsi_Host *sh)
pde = proc_create(ATTONODE_NAME, 0,
sh->hostt->proc_dir,
- &esas2r_proc_fops);
+ &esas2r_proc_ops);
if (!pde) {
esas2r_log_dev(ESAS2R_LOG_WARN,
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index bb88995a12c7..89afa31e33cb 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -243,8 +243,6 @@ static void esp_set_all_config3(struct esp *esp, u8 val)
/* Reset the ESP chip, _not_ the SCSI bus. */
static void esp_reset_esp(struct esp *esp)
{
- u8 family_code, version;
-
/* Now reset the ESP chip */
scsi_esp_cmd(esp, ESP_CMD_RC);
scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
@@ -257,14 +255,19 @@ static void esp_reset_esp(struct esp *esp)
*/
esp->max_period = ((35 * esp->ccycle) / 1000);
if (esp->rev == FAST) {
- version = esp_read8(ESP_UID);
- family_code = (version & 0xf8) >> 3;
- if (family_code == 0x02)
+ u8 family_code = ESP_FAMILY(esp_read8(ESP_UID));
+
+ if (family_code == ESP_UID_F236) {
esp->rev = FAS236;
- else if (family_code == 0x0a)
+ } else if (family_code == ESP_UID_HME) {
esp->rev = FASHME; /* Version is usually '5'. */
- else
+ } else if (family_code == ESP_UID_FSC) {
+ esp->rev = FSC;
+ /* Enable Active Negation */
+ esp_write8(ESP_CONFIG4_RADE, ESP_CFG4);
+ } else {
esp->rev = FAS100A;
+ }
esp->min_period = ((4 * esp->ccycle) / 1000);
} else {
esp->min_period = ((5 * esp->ccycle) / 1000);
@@ -308,7 +311,7 @@ static void esp_reset_esp(struct esp *esp)
case FAS236:
case PCSCSI:
- /* Fast 236, AM53c974 or HME */
+ case FSC:
esp_write8(esp->config2, ESP_CFG2);
if (esp->rev == FASHME) {
u8 cfg3 = esp->target[0].esp_config3;
@@ -2373,10 +2376,11 @@ static const char *esp_chip_names[] = {
"ESP100A",
"ESP236",
"FAS236",
+ "AM53C974",
+ "53CF9x-2",
"FAS100A",
"FAST",
"FASHME",
- "AM53C974",
};
static struct scsi_transport_template *esp_transport_template;
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index 91b32f2a1a1b..446a3d18c022 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -78,12 +78,14 @@
#define ESP_CONFIG3_IMS 0x80 /* ID msg chk'ng (esp/fas236) */
#define ESP_CONFIG3_OBPUSH 0x80 /* Push odd-byte to dma (hme) */
-/* ESP config register 4 read-write, found only on am53c974 chips */
-#define ESP_CONFIG4_RADE 0x04 /* Active negation */
-#define ESP_CONFIG4_RAE 0x08 /* Active negation on REQ and ACK */
-#define ESP_CONFIG4_PWD 0x20 /* Reduced power feature */
-#define ESP_CONFIG4_GE0 0x40 /* Glitch eater bit 0 */
-#define ESP_CONFIG4_GE1 0x80 /* Glitch eater bit 1 */
+/* ESP config register 4 read-write */
+#define ESP_CONFIG4_BBTE 0x01 /* Back-to-back transfers (fsc) */
+#define ESP_CONGIG4_TEST 0x02 /* Transfer counter test mode (fsc) */
+#define ESP_CONFIG4_RADE 0x04 /* Active negation (am53c974/fsc) */
+#define ESP_CONFIG4_RAE 0x08 /* Act. negation REQ/ACK (am53c974) */
+#define ESP_CONFIG4_PWD 0x20 /* Reduced power feature (am53c974) */
+#define ESP_CONFIG4_GE0 0x40 /* Glitch eater bit 0 (am53c974) */
+#define ESP_CONFIG4_GE1 0x80 /* Glitch eater bit 1 (am53c974) */
#define ESP_CONFIG_GE_12NS (0)
#define ESP_CONFIG_GE_25NS (ESP_CONFIG_GE1)
@@ -209,10 +211,15 @@
#define ESP_TEST_TS 0x04 /* Tristate test mode */
/* ESP unique ID register read-only, found on fas236+fas100a only */
+#define ESP_UID_FAM 0xf8 /* ESP family bitmask */
+
+#define ESP_FAMILY(uid) (((uid) & ESP_UID_FAM) >> 3)
+
+/* Values for the ESP family bits */
#define ESP_UID_F100A 0x00 /* ESP FAS100A */
#define ESP_UID_F236 0x02 /* ESP FAS236 */
-#define ESP_UID_REV 0x07 /* ESP revision */
-#define ESP_UID_FAM 0xf8 /* ESP family */
+#define ESP_UID_HME 0x0a /* FAS HME */
+#define ESP_UID_FSC 0x14 /* NCR/Symbios Logic 53CF9x-2 */
/* ESP fifo flags register read-only */
/* Note that the following implies a 16 byte FIFO on the ESP. */
@@ -257,15 +264,17 @@ struct esp_cmd_priv {
};
#define ESP_CMD_PRIV(CMD) ((struct esp_cmd_priv *)(&(CMD)->SCp))
+/* NOTE: this enum is ordered based on chip features! */
enum esp_rev {
- ESP100 = 0x00, /* NCR53C90 - very broken */
- ESP100A = 0x01, /* NCR53C90A */
- ESP236 = 0x02,
- FAS236 = 0x03,
- FAS100A = 0x04,
- FAST = 0x05,
- FASHME = 0x06,
- PCSCSI = 0x07, /* AM53c974 */
+ ESP100, /* NCR53C90 - very broken */
+ ESP100A, /* NCR53C90A */
+ ESP236,
+ FAS236,
+ PCSCSI, /* AM53c974 */
+ FSC, /* NCR/Symbios Logic 53CF9x-2 */
+ FAS100A,
+ FAST,
+ FASHME,
};
struct esp_cmd_entry {
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 8ef150dfb6f7..b60795893994 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -439,6 +439,9 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
return SCSI_MLQUEUE_HOST_BUSY;
+ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET)))
+ return SCSI_MLQUEUE_HOST_BUSY;
+
rport = starget_to_rport(scsi_target(sc->device));
if (!rport) {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index 1f55b9e4e74a..1b88a3b53eee 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -688,26 +688,26 @@ int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
int vnic_dev_hang_notify(struct vnic_dev *vdev)
{
- u64 a0, a1;
+ u64 a0 = 0, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
}
int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
{
- u64 a0, a1;
+ u64 a[2] = {};
int wait = 1000;
int err, i;
for (i = 0; i < ETH_ALEN; i++)
mac_addr[i] = 0;
- err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
+ err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a[0], &a[1], wait);
if (err)
return err;
for (i = 0; i < ETH_ALEN; i++)
- mac_addr[i] = ((u8 *)&a0)[i];
+ mac_addr[i] = ((u8 *)&a)[i];
return 0;
}
@@ -732,30 +732,30 @@ void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
{
- u64 a0 = 0, a1 = 0;
+ u64 a[2] = {};
int wait = 1000;
int err;
int i;
for (i = 0; i < ETH_ALEN; i++)
- ((u8 *)&a0)[i] = addr[i];
+ ((u8 *)&a)[i] = addr[i];
- err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
+ err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a[0], &a[1], wait);
if (err)
pr_err("Can't add addr [%pM], %d\n", addr, err);
}
void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
{
- u64 a0 = 0, a1 = 0;
+ u64 a[2] = {};
int wait = 1000;
int err;
int i;
for (i = 0; i < ETH_ALEN; i++)
- ((u8 *)&a0)[i] = addr[i];
+ ((u8 *)&a)[i] = addr[i];
- err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
+ err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a[0], &a[1], wait);
if (err)
pr_err("Can't del addr [%pM], %d\n", addr, err);
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 233c73e01246..2bdd64648ef0 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -180,10 +180,10 @@ struct hisi_sas_port {
struct hisi_sas_cq {
struct hisi_hba *hisi_hba;
- const struct cpumask *pci_irq_mask;
- struct tasklet_struct tasklet;
+ const struct cpumask *irq_mask;
int rd_point;
int id;
+ int irq_no;
};
struct hisi_sas_dq {
@@ -627,7 +627,7 @@ extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba,
extern void hisi_sas_init_mem(struct hisi_hba *hisi_hba);
extern void hisi_sas_rst_work_handler(struct work_struct *work);
extern void hisi_sas_sync_rst_work_handler(struct work_struct *work);
-extern void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba);
+extern void hisi_sas_sync_irqs(struct hisi_hba *hisi_hba);
extern void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no);
extern bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
enum hisi_sas_phy_event event);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 03588ec3c394..9a6deb21fe4d 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -163,13 +163,11 @@ static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
{
- unsigned long flags;
-
if (hisi_hba->hw->slot_index_alloc ||
slot_idx >= HISI_SAS_UNRESERVED_IPTT) {
- spin_lock_irqsave(&hisi_hba->lock, flags);
+ spin_lock(&hisi_hba->lock);
hisi_sas_slot_index_clear(hisi_hba, slot_idx);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ spin_unlock(&hisi_hba->lock);
}
}
@@ -185,12 +183,11 @@ static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
{
int index;
void *bitmap = hisi_hba->slot_index_tags;
- unsigned long flags;
if (scsi_cmnd)
return scsi_cmnd->request->tag;
- spin_lock_irqsave(&hisi_hba->lock, flags);
+ spin_lock(&hisi_hba->lock);
index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
hisi_hba->last_slot_index + 1);
if (index >= hisi_hba->slot_index_count) {
@@ -198,13 +195,13 @@ static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
hisi_hba->slot_index_count,
HISI_SAS_UNRESERVED_IPTT);
if (index >= hisi_hba->slot_index_count) {
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ spin_unlock(&hisi_hba->lock);
return -SAS_QUEUE_FULL;
}
}
hisi_sas_slot_index_set(hisi_hba, index);
hisi_hba->last_slot_index = index;
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ spin_unlock(&hisi_hba->lock);
return index;
}
@@ -220,7 +217,6 @@ static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
struct hisi_sas_slot *slot)
{
- unsigned long flags;
int device_id = slot->device_id;
struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id];
@@ -247,9 +243,9 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
}
}
- spin_lock_irqsave(&sas_dev->lock, flags);
+ spin_lock(&sas_dev->lock);
list_del_init(&slot->entry);
- spin_unlock_irqrestore(&sas_dev->lock, flags);
+ spin_unlock(&sas_dev->lock);
memset(slot, 0, offsetof(struct hisi_sas_slot, buf));
@@ -489,14 +485,14 @@ static int hisi_sas_task_prep(struct sas_task *task,
slot_idx = rc;
slot = &hisi_hba->slot_info[slot_idx];
- spin_lock_irqsave(&dq->lock, flags);
+ spin_lock(&dq->lock);
wr_q_index = dq->wr_point;
dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
list_add_tail(&slot->delivery, &dq->list);
- spin_unlock_irqrestore(&dq->lock, flags);
- spin_lock_irqsave(&sas_dev->lock, flags);
+ spin_unlock(&dq->lock);
+ spin_lock(&sas_dev->lock);
list_add_tail(&slot->entry, &sas_dev->list);
- spin_unlock_irqrestore(&sas_dev->lock, flags);
+ spin_unlock(&sas_dev->lock);
dlvry_queue = dq->id;
dlvry_queue_slot = wr_q_index;
@@ -562,7 +558,6 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
{
u32 rc;
u32 pass = 0;
- unsigned long flags;
struct hisi_hba *hisi_hba;
struct device *dev;
struct domain_device *device = task->dev;
@@ -606,9 +601,9 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
dev_err(dev, "task exec: failed[%d]!\n", rc);
if (likely(pass)) {
- spin_lock_irqsave(&dq->lock, flags);
+ spin_lock(&dq->lock);
hisi_hba->hw->start_delivery(dq);
- spin_unlock_irqrestore(&dq->lock, flags);
+ spin_unlock(&dq->lock);
}
return rc;
@@ -659,12 +654,11 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
{
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct hisi_sas_device *sas_dev = NULL;
- unsigned long flags;
int last = hisi_hba->last_dev_id;
int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES;
int i;
- spin_lock_irqsave(&hisi_hba->lock, flags);
+ spin_lock(&hisi_hba->lock);
for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) {
if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
int queue = i % hisi_hba->queue_count;
@@ -684,7 +678,7 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
i++;
}
hisi_hba->last_dev_id = i;
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ spin_unlock(&hisi_hba->lock);
return sas_dev;
}
@@ -1233,10 +1227,10 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
struct hisi_sas_cq *cq =
&hisi_hba->cq[slot->dlvry_queue];
/*
- * flush tasklet to avoid free'ing task
+ * sync irq to avoid free'ing task
* before using task in IO completion
*/
- tasklet_kill(&cq->tasklet);
+ synchronize_irq(cq->irq_no);
slot->task = NULL;
}
@@ -1626,11 +1620,11 @@ static int hisi_sas_abort_task(struct sas_task *task)
if (slot) {
/*
- * flush tasklet to avoid free'ing task
+ * sync irq to avoid free'ing task
* before using task in IO completion
*/
cq = &hisi_hba->cq[slot->dlvry_queue];
- tasklet_kill(&cq->tasklet);
+ synchronize_irq(cq->irq_no);
}
spin_unlock_irqrestore(&task->task_state_lock, flags);
rc = TMF_RESP_FUNC_COMPLETE;
@@ -1694,10 +1688,10 @@ static int hisi_sas_abort_task(struct sas_task *task)
if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
task->lldd_task) {
/*
- * flush tasklet to avoid free'ing task
+ * sync irq to avoid free'ing task
* before using task in IO completion
*/
- tasklet_kill(&cq->tasklet);
+ synchronize_irq(cq->irq_no);
slot->task = NULL;
}
}
@@ -1965,14 +1959,14 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
slot_idx = rc;
slot = &hisi_hba->slot_info[slot_idx];
- spin_lock_irqsave(&dq->lock, flags);
+ spin_lock(&dq->lock);
wr_q_index = dq->wr_point;
dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
list_add_tail(&slot->delivery, &dq->list);
- spin_unlock_irqrestore(&dq->lock, flags);
- spin_lock_irqsave(&sas_dev->lock, flags);
+ spin_unlock(&dq->lock);
+ spin_lock(&sas_dev->lock);
list_add_tail(&slot->entry, &sas_dev->list);
- spin_unlock_irqrestore(&sas_dev->lock, flags);
+ spin_unlock(&sas_dev->lock);
dlvry_queue = dq->id;
dlvry_queue_slot = wr_q_index;
@@ -2001,9 +1995,9 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
spin_unlock_irqrestore(&task->task_state_lock, flags);
WRITE_ONCE(slot->ready, 1);
/* send abort command to the chip */
- spin_lock_irqsave(&dq->lock, flags);
+ spin_lock(&dq->lock);
hisi_hba->hw->start_delivery(dq);
- spin_unlock_irqrestore(&dq->lock, flags);
+ spin_unlock(&dq->lock);
return 0;
@@ -2076,10 +2070,10 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
struct hisi_sas_cq *cq =
&hisi_hba->cq[slot->dlvry_queue];
/*
- * flush tasklet to avoid free'ing task
+ * sync irq to avoid free'ing task
* before using task in IO completion
*/
- tasklet_kill(&cq->tasklet);
+ synchronize_irq(cq->irq_no);
slot->task = NULL;
}
dev_err(dev, "internal task abort: timeout and not done.\n");
@@ -2131,7 +2125,7 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
case HISI_SAS_INT_ABT_DEV:
for (i = 0; i < hisi_hba->cq_nvecs; i++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
- const struct cpumask *mask = cq->pci_irq_mask;
+ const struct cpumask *mask = cq->irq_mask;
if (mask && !cpumask_intersects(cpu_online_mask, mask))
continue;
@@ -2225,17 +2219,17 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
}
EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
-void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba)
+void hisi_sas_sync_irqs(struct hisi_hba *hisi_hba)
{
int i;
for (i = 0; i < hisi_hba->cq_nvecs; i++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
- tasklet_kill(&cq->tasklet);
+ synchronize_irq(cq->irq_no);
}
}
-EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
+EXPORT_SYMBOL_GPL(hisi_sas_sync_irqs);
int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type)
{
@@ -3936,7 +3930,7 @@ void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba)
hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev),
hisi_sas_debugfs_dir);
- debugfs_create_file("trigger_dump", 0600,
+ debugfs_create_file("trigger_dump", 0200,
hisi_hba->debugfs_dir,
hisi_hba,
&hisi_sas_debugfs_trigger_dump_fops);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 3af53cc42bd6..fa25766502a2 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1772,6 +1772,9 @@ static struct scsi_host_template sht_v1_hw = {
.eh_target_reset_handler = sas_eh_target_reset_handler,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = sas_ioctl,
+#endif
.shost_attrs = host_attrs_v1_hw,
.host_reset = hisi_sas_host_reset,
};
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 61b1e2693b08..e05faf315dcd 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -773,7 +773,6 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_device *sas_dev = device->lldd_dev;
int sata_idx = sas_dev->sata_idx;
int start, end;
- unsigned long flags;
if (!sata_dev) {
/*
@@ -797,12 +796,12 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba,
end = 64 * (sata_idx + 2);
}
- spin_lock_irqsave(&hisi_hba->lock, flags);
+ spin_lock(&hisi_hba->lock);
while (1) {
start = find_next_zero_bit(bitmap,
hisi_hba->slot_index_count, start);
if (start >= end) {
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ spin_unlock(&hisi_hba->lock);
return -SAS_QUEUE_FULL;
}
/*
@@ -814,7 +813,7 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba,
}
set_bit(start, bitmap);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ spin_unlock(&hisi_hba->lock);
return start;
}
@@ -843,9 +842,8 @@ hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device)
struct hisi_sas_device *sas_dev = NULL;
int i, sata_dev = dev_is_sata(device);
int sata_idx = -1;
- unsigned long flags;
- spin_lock_irqsave(&hisi_hba->lock, flags);
+ spin_lock(&hisi_hba->lock);
if (sata_dev)
if (!sata_index_alloc_v2_hw(hisi_hba, &sata_idx))
@@ -876,7 +874,7 @@ hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device)
}
out:
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ spin_unlock(&hisi_hba->lock);
return sas_dev;
}
@@ -3111,9 +3109,9 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
return IRQ_HANDLED;
}
-static void cq_tasklet_v2_hw(unsigned long val)
+static irqreturn_t cq_thread_v2_hw(int irq_no, void *p)
{
- struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val;
+ struct hisi_sas_cq *cq = p;
struct hisi_hba *hisi_hba = cq->hisi_hba;
struct hisi_sas_slot *slot;
struct hisi_sas_itct *itct;
@@ -3181,6 +3179,8 @@ static void cq_tasklet_v2_hw(unsigned long val)
/* update rd_point */
cq->rd_point = rd_point;
hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
+
+ return IRQ_HANDLED;
}
static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
@@ -3191,9 +3191,7 @@ static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
- tasklet_schedule(&cq->tasklet);
-
- return IRQ_HANDLED;
+ return IRQ_WAKE_THREAD;
}
static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
@@ -3360,18 +3358,18 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
for (queue_no = 0; queue_no < hisi_hba->queue_count; queue_no++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[queue_no];
- struct tasklet_struct *t = &cq->tasklet;
- irq = irq_map[queue_no + 96];
- rc = devm_request_irq(dev, irq, cq_interrupt_v2_hw, 0,
- DRV_NAME " cq", cq);
+ cq->irq_no = irq_map[queue_no + 96];
+ rc = devm_request_threaded_irq(dev, cq->irq_no,
+ cq_interrupt_v2_hw,
+ cq_thread_v2_hw, IRQF_ONESHOT,
+ DRV_NAME " cq", cq);
if (rc) {
dev_err(dev, "irq init: could not request cq interrupt %d, rc=%d\n",
irq, rc);
rc = -ENOENT;
goto err_out;
}
- tasklet_init(t, cq_tasklet_v2_hw, (unsigned long)cq);
}
hisi_hba->cq_nvecs = hisi_hba->queue_count;
@@ -3432,7 +3430,6 @@ static int soft_reset_v2_hw(struct hisi_hba *hisi_hba)
interrupt_disable_v2_hw(hisi_hba);
hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
- hisi_sas_kill_tasklets(hisi_hba);
hisi_sas_stop_phys(hisi_hba);
@@ -3551,6 +3548,9 @@ static struct scsi_host_template sht_v2_hw = {
.eh_target_reset_handler = sas_eh_target_reset_handler,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = sas_ioctl,
+#endif
.shost_attrs = host_attrs_v2_hw,
.host_reset = hisi_sas_host_reset,
};
@@ -3603,11 +3603,6 @@ static int hisi_sas_v2_probe(struct platform_device *pdev)
static int hisi_sas_v2_remove(struct platform_device *pdev)
{
- struct sas_ha_struct *sha = platform_get_drvdata(pdev);
- struct hisi_hba *hisi_hba = sha->lldd_ha;
-
- hisi_sas_kill_tasklets(hisi_hba);
-
return hisi_sas_remove(pdev);
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index bf5d5f138437..a2debe0c8185 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -495,6 +495,13 @@ struct hisi_sas_err_record_v3 {
#define BASE_VECTORS_V3_HW 16
#define MIN_AFFINE_VECTORS_V3_HW (BASE_VECTORS_V3_HW + 1)
+#define CHNL_INT_STS_MSK 0xeeeeeeee
+#define CHNL_INT_STS_PHY_MSK 0xe
+#define CHNL_INT_STS_INT0_MSK BIT(1)
+#define CHNL_INT_STS_INT1_MSK BIT(2)
+#define CHNL_INT_STS_INT2_MSK BIT(3)
+#define CHNL_WIDTH 4
+
enum {
DSM_FUNC_ERR_HANDLE_MSI = 0,
};
@@ -1819,19 +1826,19 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
int phy_no = 0;
irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS)
- & 0xeeeeeeee;
+ & CHNL_INT_STS_MSK;
while (irq_msk) {
- if (irq_msk & (2 << (phy_no * 4)))
+ if (irq_msk & (CHNL_INT_STS_INT0_MSK << (phy_no * CHNL_WIDTH)))
handle_chl_int0_v3_hw(hisi_hba, phy_no);
- if (irq_msk & (4 << (phy_no * 4)))
+ if (irq_msk & (CHNL_INT_STS_INT1_MSK << (phy_no * CHNL_WIDTH)))
handle_chl_int1_v3_hw(hisi_hba, phy_no);
- if (irq_msk & (8 << (phy_no * 4)))
+ if (irq_msk & (CHNL_INT_STS_INT2_MSK << (phy_no * CHNL_WIDTH)))
handle_chl_int2_v3_hw(hisi_hba, phy_no);
- irq_msk &= ~(0xe << (phy_no * 4));
+ irq_msk &= ~(CHNL_INT_STS_PHY_MSK << (phy_no * CHNL_WIDTH));
phy_no++;
}
@@ -2299,9 +2306,9 @@ out:
return sts;
}
-static void cq_tasklet_v3_hw(unsigned long val)
+static irqreturn_t cq_thread_v3_hw(int irq_no, void *p)
{
- struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val;
+ struct hisi_sas_cq *cq = p;
struct hisi_hba *hisi_hba = cq->hisi_hba;
struct hisi_sas_slot *slot;
struct hisi_sas_complete_v3_hdr *complete_queue;
@@ -2338,6 +2345,8 @@ static void cq_tasklet_v3_hw(unsigned long val)
/* update rd_point */
cq->rd_point = rd_point;
hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
+
+ return IRQ_HANDLED;
}
static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p)
@@ -2348,9 +2357,7 @@ static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p)
hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
- tasklet_schedule(&cq->tasklet);
-
- return IRQ_HANDLED;
+ return IRQ_WAKE_THREAD;
}
static void setup_reply_map_v3_hw(struct hisi_hba *hisi_hba, int nvecs)
@@ -2365,7 +2372,7 @@ static void setup_reply_map_v3_hw(struct hisi_hba *hisi_hba, int nvecs)
BASE_VECTORS_V3_HW);
if (!mask)
goto fallback;
- cq->pci_irq_mask = mask;
+ cq->irq_mask = mask;
for_each_cpu(cpu, mask)
hisi_hba->reply_map[cpu] = queue;
}
@@ -2389,6 +2396,8 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
.pre_vectors = BASE_VECTORS_V3_HW,
};
+ dev_info(dev, "Enable MSI auto-affinity\n");
+
min_msi = MIN_AFFINE_VECTORS_V3_HW;
hisi_hba->reply_map = devm_kcalloc(dev, nr_cpu_ids,
@@ -2441,15 +2450,20 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
goto free_irq_vectors;
}
- /* Init tasklets for cq only */
+ if (hisi_sas_intr_conv)
+ dev_info(dev, "Enable interrupt converge\n");
+
for (i = 0; i < hisi_hba->cq_nvecs; i++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
- struct tasklet_struct *t = &cq->tasklet;
int nr = hisi_sas_intr_conv ? 16 : 16 + i;
- unsigned long irqflags = hisi_sas_intr_conv ? IRQF_SHARED : 0;
-
- rc = devm_request_irq(dev, pci_irq_vector(pdev, nr),
- cq_interrupt_v3_hw, irqflags,
+ unsigned long irqflags = hisi_sas_intr_conv ? IRQF_SHARED :
+ IRQF_ONESHOT;
+
+ cq->irq_no = pci_irq_vector(pdev, nr);
+ rc = devm_request_threaded_irq(dev, cq->irq_no,
+ cq_interrupt_v3_hw,
+ cq_thread_v3_hw,
+ irqflags,
DRV_NAME " cq", cq);
if (rc) {
dev_err(dev, "could not request cq%d interrupt, rc=%d\n",
@@ -2457,8 +2471,6 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
rc = -ENOENT;
goto free_irq_vectors;
}
-
- tasklet_init(t, cq_tasklet_v3_hw, (unsigned long)cq);
}
return 0;
@@ -2534,7 +2546,6 @@ static int disable_host_v3_hw(struct hisi_hba *hisi_hba)
interrupt_disable_v3_hw(hisi_hba);
hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
- hisi_sas_kill_tasklets(hisi_hba);
hisi_sas_stop_phys(hisi_hba);
@@ -2910,7 +2921,7 @@ static void debugfs_snapshot_prepare_v3_hw(struct hisi_hba *hisi_hba)
wait_cmds_complete_timeout_v3_hw(hisi_hba, 100, 5000);
- hisi_sas_kill_tasklets(hisi_hba);
+ hisi_sas_sync_irqs(hisi_hba);
}
static void debugfs_snapshot_restore_v3_hw(struct hisi_hba *hisi_hba)
@@ -3075,6 +3086,9 @@ static struct scsi_host_template sht_v3_hw = {
.eh_target_reset_handler = sas_eh_target_reset_handler,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = sas_ioctl,
+#endif
.shost_attrs = host_attrs_v3_hw,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
.host_reset = hisi_sas_host_reset,
@@ -3309,7 +3323,6 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
sas_remove_host(sha->core.shost);
hisi_sas_v3_destroy_irqs(pdev, hisi_hba);
- hisi_sas_kill_tasklets(hisi_hba);
pci_release_regions(pdev);
pci_disable_device(pdev);
hisi_sas_free(hisi_hba);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 216e557f703e..1a4ddfacb458 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -6876,7 +6876,7 @@ static void __iomem *remap_pci_mem(ulong base, ulong size)
{
ulong page_base = ((ulong) base) & PAGE_MASK;
ulong page_offs = ((ulong) base) - page_base;
- void __iomem *page_remapped = ioremap_nocache(page_base,
+ void __iomem *page_remapped = ioremap(page_base,
page_offs + size);
return page_remapped ? (page_remapped + page_offs) : NULL;
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 54b8c6f9daf4..d9e94e81da01 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -1877,7 +1877,6 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
*/
struct viosrp_crq *crq = (struct viosrp_crq *)&msg_hi;
struct ibmvscsis_cmd *cmd, *nxt;
- struct iu_entry *iue;
long rc = ADAPT_SUCCESS;
bool retry = false;
@@ -1931,8 +1930,6 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
*/
vscsi->credit += 1;
} else {
- iue = cmd->iue;
-
crq->valid = VALID_CMD_RESP_EL;
crq->format = cmd->rsp.format;
@@ -3796,7 +3793,6 @@ static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd)
se_cmd);
struct iu_entry *iue = cmd->iue;
struct scsi_info *vscsi = cmd->adapter;
- char *sd;
uint len = 0;
int rc;
@@ -3804,7 +3800,6 @@ static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd)
1);
if (rc) {
dev_err(&vscsi->dev, "srp_transfer_data failed: %d\n", rc);
- sd = se_cmd->sense_buffer;
se_cmd->scsi_sense_length = 18;
memset(se_cmd->sense_buffer, 0, se_cmd->scsi_sense_length);
/* Logical Unit Communication Time-out asc/ascq = 0x0801 */
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index 41fd64c9c8e9..1d39628ac947 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -1640,7 +1640,7 @@ static int initio_state_6(struct initio_host * host)
*
*/
-int initio_state_7(struct initio_host * host)
+static int initio_state_7(struct initio_host * host)
{
int cnt, i;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 079c04bc448a..ae45cbe98ae2 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -6727,6 +6727,9 @@ static struct scsi_host_template driver_template = {
.name = "IPR",
.info = ipr_ioa_info,
.ioctl = ipr_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = ipr_ioctl,
+#endif
.queuecommand = ipr_queuecommand,
.eh_abort_handler = ipr_eh_abort,
.eh_device_reset_handler = ipr_eh_dev_reset,
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 1727d0c71b12..b48aac8dfcb8 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -168,6 +168,9 @@ static struct scsi_host_template isci_sht = {
.eh_target_reset_handler = sas_eh_target_reset_handler,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = sas_ioctl,
+#endif
.shost_attrs = isci_host_attrs,
.track_queue_depth = 1,
};
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 0bc63a7ab41c..b5dd1caae5e9 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -887,6 +887,10 @@ free_host:
static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
{
struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+ struct iscsi_session *session = cls_session->dd_data;
+
+ if (WARN_ON_ONCE(session->leadconn))
+ return;
iscsi_tcp_r2tpool_free(cls_session->dd_data);
iscsi_session_teardown(cls_session);
diff --git a/drivers/scsi/lasi700.c b/drivers/scsi/lasi700.c
index abac2f350aee..c48a73a0f517 100644
--- a/drivers/scsi/lasi700.c
+++ b/drivers/scsi/lasi700.c
@@ -98,7 +98,7 @@ lasi700_probe(struct parisc_device *dev)
hostdata->dev = &dev->dev;
dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
- hostdata->base = ioremap_nocache(base, 0x100);
+ hostdata->base = ioremap(base, 0x100);
hostdata->differential = 0;
if (dev->id.sversion == LASI_700_SVERSION) {
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index e9e00740f7ca..c5a828a041e0 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -137,7 +137,7 @@ static void sas_ata_task_done(struct sas_task *task)
} else {
ac = sas_to_ata_err(stat);
if (ac) {
- pr_warn("%s: SAS error %x\n", __func__, stat->stat);
+ pr_warn("%s: SAS error 0x%x\n", __func__, stat->stat);
/* We saw a SAS error. Send a vague error. */
if (!link->sactive) {
qc->err_mask = ac;
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index d7302c2052f9..daf951b0b3f5 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -179,7 +179,7 @@ int sas_notify_lldd_dev_found(struct domain_device *dev)
res = i->dft->lldd_dev_found(dev);
if (res) {
- pr_warn("driver on host %s cannot handle device %llx, error:%d\n",
+ pr_warn("driver on host %s cannot handle device %016llx, error:%d\n",
dev_name(sas_ha->dev),
SAS_ADDR(dev->sas_addr), res);
}
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 9fdb9c9fbda4..ab671cdd4cfb 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -500,7 +500,7 @@ static int sas_ex_general(struct domain_device *dev)
ex_assign_report_general(dev, rg_resp);
if (dev->ex_dev.configuring) {
- pr_debug("RG: ex %llx self-configuring...\n",
+ pr_debug("RG: ex %016llx self-configuring...\n",
SAS_ADDR(dev->sas_addr));
schedule_timeout_interruptible(5*HZ);
} else
@@ -881,7 +881,7 @@ static struct domain_device *sas_ex_discover_end_dev(
res = sas_discover_end_dev(child);
if (res) {
- pr_notice("sas_discover_end_dev() for device %16llx at %016llx:%02d returned 0x%x\n",
+ pr_notice("sas_discover_end_dev() for device %016llx at %016llx:%02d returned 0x%x\n",
SAS_ADDR(child->sas_addr),
SAS_ADDR(parent->sas_addr), phy_id, res);
goto out_list_del;
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 01f1738ce6df..1f1d01901978 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -107,7 +107,7 @@ static inline void sas_smp_host_handler(struct bsg_job *job,
static inline void sas_fail_probe(struct domain_device *dev, const char *func, int err)
{
- pr_warn("%s: for %s device %16llx returned %d\n",
+ pr_warn("%s: for %s device %016llx returned %d\n",
func, dev->parent ? "exp-attached" :
"direct-attached",
SAS_ADDR(dev->sas_addr), err);
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 7c86fd248129..19cf418928fa 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -165,7 +165,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
}
sas_port_add_phy(port->port, phy->phy);
- pr_debug("%s added to %s, phy_mask:0x%x (%16llx)\n",
+ pr_debug("%s added to %s, phy_mask:0x%x (%016llx)\n",
dev_name(&phy->phy->dev), dev_name(&port->port->dev),
port->phy_mask,
SAS_ADDR(port->attached_sas_addr));
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index bec83eb8ab87..9e0975e55c27 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -330,7 +330,7 @@ static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd)
int_to_scsilun(cmd->device->lun, &lun);
- pr_notice("eh: device %llx LUN %llx has the task\n",
+ pr_notice("eh: device %016llx LUN 0x%llx has the task\n",
SAS_ADDR(dev->sas_addr),
cmd->device->lun);
@@ -615,7 +615,7 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
reset:
tmf_resp = sas_recover_lu(task->dev, cmd);
if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
- pr_notice("dev %016llx LU %llx is recovered\n",
+ pr_notice("dev %016llx LU 0x%llx is recovered\n",
SAS_ADDR(task->dev),
cmd->device->lun);
sas_eh_finish_cmd(cmd);
@@ -666,7 +666,7 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
* of effort could recover from errors. Quite
* possibly the HA just disappeared.
*/
- pr_err("error from device %llx, LUN %llx couldn't be recovered in any way\n",
+ pr_err("error from device %016llx, LUN 0x%llx couldn't be recovered in any way\n",
SAS_ADDR(task->dev->sas_addr),
cmd->device->lun);
@@ -851,7 +851,7 @@ int sas_slave_configure(struct scsi_device *scsi_dev)
if (scsi_dev->tagged_supported) {
scsi_change_queue_depth(scsi_dev, SAS_DEF_QD);
} else {
- pr_notice("device %llx, LUN %llx doesn't support TCQ\n",
+ pr_notice("device %016llx, LUN 0x%llx doesn't support TCQ\n",
SAS_ADDR(dev->sas_addr), scsi_dev->lun);
scsi_change_queue_depth(scsi_dev, 1);
}
diff --git a/drivers/scsi/libsas/sas_task.c b/drivers/scsi/libsas/sas_task.c
index 1ded7d85027e..e2d42593ce52 100644
--- a/drivers/scsi/libsas/sas_task.c
+++ b/drivers/scsi/libsas/sas_task.c
@@ -27,7 +27,7 @@ void sas_ssp_task_response(struct device *dev, struct sas_task *task,
memcpy(tstat->buf, iu->sense_data, tstat->buf_valid_size);
if (iu->status != SAM_STAT_CHECK_CONDITION)
- dev_warn(dev, "dev %llx sent sense data, but stat(%x) is not CHECK CONDITION\n",
+ dev_warn(dev, "dev %016llx sent sense data, but stat(0x%x) is not CHECK CONDITION\n",
SAS_ADDR(task->dev->sas_addr), iu->status);
}
else
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 935f98804198..04d73e2be373 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1223,6 +1223,8 @@ struct lpfc_hba {
#define LPFC_POLL_HB 1 /* slowpath heartbeat */
#define LPFC_POLL_FASTPATH 0 /* called from fastpath */
#define LPFC_POLL_SLOWPATH 1 /* called from slowpath */
+
+ char os_host_name[MAXHOSTNAMELEN];
};
static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 4ff82b36a37a..46f56f30f77e 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -4123,14 +4123,13 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
/*
* The 'topology' is not a configurable parameter if :
* - persistent topology enabled
- * - G7 adapters
- * - G6 with no private loop support
+ * - G7/G6 with no private loop support
*/
- if (((phba->hba_flag & HBA_PERSISTENT_TOPO) ||
+ if ((phba->hba_flag & HBA_PERSISTENT_TOPO ||
(!phba->sli4_hba.pc_sli4_params.pls &&
- phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC) ||
- phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) &&
+ (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
+ phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC))) &&
val == 4) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"3114 Loop mode not supported\n");
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index ee353c84a097..25d3dd39bc05 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -180,7 +180,7 @@ int lpfc_issue_gidft(struct lpfc_vport *vport);
int lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *iocbq);
int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int, uint32_t);
-void lpfc_fdmi_num_disc_check(struct lpfc_vport *);
+void lpfc_fdmi_change_check(struct lpfc_vport *vport);
void lpfc_delayed_disc_tmo(struct timer_list *);
void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 99c9bb249758..58b35a1442c1 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1493,33 +1493,35 @@ int
lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol,
size_t size)
{
- char fwrev[FW_REV_STR_SIZE];
- int n;
+ char fwrev[FW_REV_STR_SIZE] = {0};
+ char tmp[MAXHOSTNAMELEN] = {0};
- lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
+ memset(symbol, 0, size);
- n = scnprintf(symbol, size, "Emulex %s", vport->phba->ModelName);
- if (size < n)
- return n;
+ scnprintf(tmp, sizeof(tmp), "Emulex %s", vport->phba->ModelName);
+ if (strlcat(symbol, tmp, size) >= size)
+ goto buffer_done;
- n += scnprintf(symbol + n, size - n, " FV%s", fwrev);
- if (size < n)
- return n;
+ lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
+ scnprintf(tmp, sizeof(tmp), " FV%s", fwrev);
+ if (strlcat(symbol, tmp, size) >= size)
+ goto buffer_done;
- n += scnprintf(symbol + n, size - n, " DV%s.",
- lpfc_release_version);
- if (size < n)
- return n;
+ scnprintf(tmp, sizeof(tmp), " DV%s", lpfc_release_version);
+ if (strlcat(symbol, tmp, size) >= size)
+ goto buffer_done;
- n += scnprintf(symbol + n, size - n, " HN:%s.",
- init_utsname()->nodename);
- if (size < n)
- return n;
+ scnprintf(tmp, sizeof(tmp), " HN:%s", vport->phba->os_host_name);
+ if (strlcat(symbol, tmp, size) >= size)
+ goto buffer_done;
/* Note :- OS name is "Linux" */
- n += scnprintf(symbol + n, size - n, " OS:%s",
- init_utsname()->sysname);
- return n;
+ scnprintf(tmp, sizeof(tmp), " OS:%s", init_utsname()->sysname);
+ strlcat(symbol, tmp, size);
+
+buffer_done:
+ return strnlen(symbol, size);
+
}
static uint32_t
@@ -1998,14 +2000,16 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/**
- * lpfc_fdmi_num_disc_check - Check how many mapped NPorts we are connected to
+ * lpfc_fdmi_change_check - Check for changed FDMI parameters
* @vport: pointer to a host virtual N_Port data structure.
*
- * Called from hbeat timeout routine to check if the number of discovered
- * ports has changed. If so, re-register thar port Attribute.
+ * Check how many mapped NPorts we are connected to
+ * Check if our hostname changed
+ * Called from hbeat timeout routine to check if any FDMI parameters
+ * changed. If so, re-register those Attributes.
*/
void
-lpfc_fdmi_num_disc_check(struct lpfc_vport *vport)
+lpfc_fdmi_change_check(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp;
@@ -2018,17 +2022,41 @@ lpfc_fdmi_num_disc_check(struct lpfc_vport *vport)
if (!(vport->fc_flag & FC_FABRIC))
return;
+ ndlp = lpfc_findnode_did(vport, FDMI_DID);
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ return;
+
+ /* Check if system hostname changed */
+ if (strcmp(phba->os_host_name, init_utsname()->nodename)) {
+ memset(phba->os_host_name, 0, sizeof(phba->os_host_name));
+ scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s",
+ init_utsname()->nodename);
+ lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
+
+ /* Since this effects multiple HBA and PORT attributes, we need
+ * de-register and go thru the whole FDMI registration cycle.
+ * DHBA -> DPRT -> RHBA -> RPA (physical port)
+ * DPRT -> RPRT (vports)
+ */
+ if (vport->port_type == LPFC_PHYSICAL_PORT)
+ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
+ else
+ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
+
+ /* Since this code path registers all the port attributes
+ * we can just return without further checking.
+ */
+ return;
+ }
+
if (!(vport->fdmi_port_mask & LPFC_FDMI_PORT_ATTR_num_disc))
return;
+ /* Check if the number of mapped NPorts changed */
cnt = lpfc_find_map_node(vport);
if (cnt == vport->fdmi_num_disc)
return;
- ndlp = lpfc_findnode_did(vport, FDMI_DID);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
- return;
-
if (vport->port_type == LPFC_PHYSICAL_PORT) {
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA,
LPFC_FDMI_PORT_ATTR_num_disc);
@@ -2616,8 +2644,8 @@ lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport,
ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
memset(ae, 0, 256);
- snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s",
- init_utsname()->nodename);
+ scnprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s",
+ vport->phba->os_host_name);
len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
len += (len & 3) ? (4 - (len & 3)) : 4;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 2e6a68d9ea4f..819335b16c2e 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -2085,6 +2085,8 @@ static int lpfc_debugfs_ras_log_data(struct lpfc_hba *phba,
int copied = 0;
struct lpfc_dmabuf *dmabuf, *next;
+ memset(buffer, 0, size);
+
spin_lock_irq(&phba->hbalock);
if (phba->ras_fwlog.state != ACTIVE) {
spin_unlock_irq(&phba->hbalock);
@@ -2094,10 +2096,15 @@ static int lpfc_debugfs_ras_log_data(struct lpfc_hba *phba,
list_for_each_entry_safe(dmabuf, next,
&phba->ras_fwlog.fwlog_buff_list, list) {
+ /* Check if copying will go over size and a '\0' char */
+ if ((copied + LPFC_RAS_MAX_ENTRY_SIZE) >= (size - 1)) {
+ memcpy(buffer + copied, dmabuf->virt,
+ size - copied - 1);
+ copied += size - copied - 1;
+ break;
+ }
memcpy(buffer + copied, dmabuf->virt, LPFC_RAS_MAX_ENTRY_SIZE);
copied += LPFC_RAS_MAX_ENTRY_SIZE;
- if (size > copied)
- break;
}
return copied;
}
@@ -5385,7 +5392,6 @@ static const struct file_operations lpfc_debugfs_ras_log = {
.read = lpfc_debugfs_read,
.release = lpfc_debugfs_ras_log_release,
};
-#endif
#undef lpfc_debugfs_op_dumpHBASlim
static const struct file_operations lpfc_debugfs_op_dumpHBASlim = {
@@ -5557,7 +5563,7 @@ static const struct file_operations lpfc_idiag_op_extAcc = {
.write = lpfc_idiag_extacc_write,
.release = lpfc_idiag_cmd_release,
};
-
+#endif
/* lpfc_idiag_mbxacc_dump_bsg_mbox - idiag debugfs dump bsg mailbox command
* @phba: Pointer to HBA context object.
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 85ada3deb47d..dcc8999c6a68 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -28,6 +28,7 @@
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include <linux/lockdep.h>
+#include <linux/utsname.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -3315,6 +3316,10 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
lpfc_sli4_clear_fcf_rr_bmask(phba);
}
+ /* Prepare for LINK up registrations */
+ memset(phba->os_host_name, 0, sizeof(phba->os_host_name));
+ scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s",
+ init_utsname()->nodename);
return;
out:
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 25cdcbc2b02f..9a064b96e570 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -3925,6 +3925,9 @@ struct lpfc_mbx_wr_object {
#define LPFC_CHANGE_STATUS_FW_RESET 0x02
#define LPFC_CHANGE_STATUS_PORT_MIGRATION 0x04
#define LPFC_CHANGE_STATUS_PCI_RESET 0x05
+#define lpfc_wr_object_csf_SHIFT 8
+#define lpfc_wr_object_csf_MASK 0x00000001
+#define lpfc_wr_object_csf_WORD word5
} response;
} u;
};
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 6298b1729098..5a605773dd0a 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1362,7 +1362,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
if (vports != NULL)
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
lpfc_rcv_seq_check_edtov(vports[i]);
- lpfc_fdmi_num_disc_check(vports[i]);
+ lpfc_fdmi_change_check(vports[i]);
}
lpfc_destroy_vport_work_array(phba, vports);
@@ -5883,7 +5883,7 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "1804 Invalid asynchrous event code: "
+ "1804 Invalid asynchronous event code: "
"x%x\n", bf_get(lpfc_trailer_code,
&cq_event->cqe.mcqe_cmpl));
break;
@@ -8320,14 +8320,6 @@ lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
phba->hba_flag |= HBA_PERSISTENT_TOPO;
switch (phba->pcidev->device) {
case PCI_DEVICE_ID_LANCER_G7_FC:
- if (tf || (pt == LINK_FLAGS_LOOP)) {
- /* Invalid values from FW - use driver params */
- phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
- } else {
- /* Prism only supports PT2PT topology */
- phba->cfg_topology = FLAGS_TOPOLOGY_MODE_PT_PT;
- }
- break;
case PCI_DEVICE_ID_LANCER_G6_FC:
if (!tf) {
phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
@@ -10449,6 +10441,8 @@ lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
case LPFC_SLI_INTF_IF_TYPE_6:
iounmap(phba->sli4_hba.drbl_regs_memmap_p);
iounmap(phba->sli4_hba.conf_regs_memmap_p);
+ if (phba->sli4_hba.dpp_regs_memmap_p)
+ iounmap(phba->sli4_hba.dpp_regs_memmap_p);
break;
case LPFC_SLI_INTF_IF_TYPE_1:
default:
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index ae4359013846..a024e5a3918f 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -308,7 +308,7 @@ lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox)
mb->mbxStatus);
mempool_free(login_mbox, phba->mbox_mem_pool);
mempool_free(link_mbox, phba->mbox_mem_pool);
- lpfc_sli_release_iocbq(phba, save_iocb);
+ kfree(save_iocb);
return;
}
@@ -325,7 +325,61 @@ lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox)
}
mempool_free(link_mbox, phba->mbox_mem_pool);
- lpfc_sli_release_iocbq(phba, save_iocb);
+ kfree(save_iocb);
+}
+
+/**
+ * lpfc_defer_tgt_acc - Progress SLI4 target rcv PLOGI handler
+ * @phba: Pointer to HBA context object.
+ * @pmb: Pointer to mailbox object.
+ *
+ * This function provides the unreg rpi mailbox completion handler for a tgt.
+ * The routine frees the memory resources associated with the completed
+ * mailbox command and transmits the ELS ACC.
+ *
+ * This routine is only called if we are SLI4, acting in target
+ * mode and the remote NPort issues the PLOGI after link up.
+ **/
+static void
+lpfc_defer_acc_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ struct lpfc_vport *vport = pmb->vport;
+ struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
+ LPFC_MBOXQ_t *mbox = pmb->context3;
+ struct lpfc_iocbq *piocb = NULL;
+ int rc;
+
+ if (mbox) {
+ pmb->context3 = NULL;
+ piocb = mbox->context3;
+ mbox->context3 = NULL;
+ }
+
+ /*
+ * Complete the unreg rpi mbx request, and update flags.
+ * This will also restart any deferred events.
+ */
+ lpfc_nlp_get(ndlp);
+ lpfc_sli4_unreg_rpi_cmpl_clr(phba, pmb);
+
+ if (!piocb) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY | LOG_ELS,
+ "4578 PLOGI ACC fail\n");
+ if (mbox)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ goto out;
+ }
+
+ rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, piocb, ndlp, mbox);
+ if (rc) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY | LOG_ELS,
+ "4579 PLOGI ACC fail %x\n", rc);
+ if (mbox)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ }
+ kfree(piocb);
+out:
+ lpfc_nlp_put(ndlp);
}
static int
@@ -345,6 +399,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *save_iocb;
struct ls_rjt stat;
uint32_t vid, flag;
+ u16 rpi;
int rc, defer_acc;
memset(&stat, 0, sizeof (struct ls_rjt));
@@ -488,7 +543,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
link_mbox->vport = vport;
link_mbox->ctx_ndlp = ndlp;
- save_iocb = lpfc_sli_get_iocbq(phba);
+ save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
if (!save_iocb)
goto out;
/* Save info from cmd IOCB used in rsp */
@@ -513,7 +568,36 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
goto out;
/* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
- if (phba->sli_rev == LPFC_SLI_REV4)
+ if (phba->nvmet_support && !defer_acc) {
+ link_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!link_mbox)
+ goto out;
+
+ /* As unique identifiers such as iotag would be overwritten
+ * with those from the cmdiocb, allocate separate temporary
+ * storage for the copy.
+ */
+ save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
+ if (!save_iocb)
+ goto out;
+
+ /* Unreg RPI is required for SLI4. */
+ rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ lpfc_unreg_login(phba, vport->vpi, rpi, link_mbox);
+ link_mbox->vport = vport;
+ link_mbox->ctx_ndlp = ndlp;
+ link_mbox->mbox_cmpl = lpfc_defer_acc_rsp;
+
+ if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
+ (!(vport->fc_flag & FC_OFFLINE_MODE)))
+ ndlp->nlp_flag |= NLP_UNREG_INP;
+
+ /* Save info from cmd IOCB used in rsp */
+ memcpy(save_iocb, cmdiocb, sizeof(*save_iocb));
+
+ /* Delay sending ACC till unreg RPI completes. */
+ defer_acc = 1;
+ } else if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_unreg_rpi(vport, ndlp);
rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
@@ -553,6 +637,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if ((vport->port_type == LPFC_NPIV_PORT &&
vport->cfg_restrict_login)) {
+ /* no deferred ACC */
+ kfree(save_iocb);
+
/* In order to preserve RPIs, we want to cleanup
* the default RPI the firmware created to rcv
* this ELS request. The only way to do this is
@@ -571,8 +658,12 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
if (defer_acc) {
/* So the order here should be:
- * Issue CONFIG_LINK mbox
- * CONFIG_LINK cmpl
+ * SLI3 pt2pt
+ * Issue CONFIG_LINK mbox
+ * CONFIG_LINK cmpl
+ * SLI4 tgt
+ * Issue UNREG RPI mbx
+ * UNREG RPI cmpl
* Issue PLOGI ACC
* PLOGI ACC cmpl
* Issue REG_LOGIN mbox
@@ -596,10 +687,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
out:
if (defer_acc)
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
- "4577 pt2pt discovery failure: %p %p %p\n",
+ "4577 discovery failure: %p %p %p\n",
save_iocb, link_mbox, login_mbox);
- if (save_iocb)
- lpfc_sli_release_iocbq(phba, save_iocb);
+ kfree(save_iocb);
if (link_mbox)
mempool_free(link_mbox, phba->mbox_mem_pool);
if (login_mbox)
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index b138d9fee675..2c7e0b22db2f 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -481,7 +481,7 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
spin_lock(&qp->abts_io_buf_list_lock);
list_for_each_entry_safe(psb, next_psb,
&qp->lpfc_abts_io_buf_list, list) {
- if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME)
+ if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME)
continue;
if (psb->rdata && psb->rdata->pnode &&
@@ -528,7 +528,7 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
list_del_init(&psb->list);
psb->flags &= ~LPFC_SBUF_XBUSY;
psb->status = IOSTAT_SUCCESS;
- if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME) {
+ if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME) {
qp->abts_nvme_io_bufs--;
spin_unlock(&qp->abts_io_buf_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index c82b5792da98..64002b0cb02d 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -4918,8 +4918,17 @@ static int
lpfc_sli4_rb_setup(struct lpfc_hba *phba)
{
phba->hbq_in_use = 1;
- phba->hbqs[LPFC_ELS_HBQ].entry_count =
- lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
+ /**
+ * Specific case when the MDS diagnostics is enabled and supported.
+ * The receive buffer count is truncated to manage the incoming
+ * traffic.
+ **/
+ if (phba->cfg_enable_mds_diags && phba->mds_diags_support)
+ phba->hbqs[LPFC_ELS_HBQ].entry_count =
+ lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1;
+ else
+ phba->hbqs[LPFC_ELS_HBQ].entry_count =
+ lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
phba->hbq_count = 1;
lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
/* Initially populate or replenish the HBQs */
@@ -8555,7 +8564,7 @@ lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
spin_unlock_irq(&phba->hbalock);
- /* wake up worker thread to post asynchronlous mailbox command */
+ /* wake up worker thread to post asynchronous mailbox command */
lpfc_worker_wake_up(phba);
}
@@ -8823,7 +8832,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
return rc;
}
- /* Now, interrupt mode asynchrous mailbox command */
+ /* Now, interrupt mode asynchronous mailbox command */
rc = lpfc_mbox_cmd_check(phba, mboxq);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
@@ -13112,11 +13121,11 @@ lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
}
/**
- * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
+ * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
* @phba: Pointer to HBA context object.
* @cqe: Pointer to mailbox completion queue entry.
*
- * This routine process a mailbox completion queue entry with asynchrous
+ * This routine process a mailbox completion queue entry with asynchronous
* event.
*
* Return: true if work posted to worker thread, otherwise false.
@@ -13270,7 +13279,7 @@ out_no_mqe_complete:
* @cqe: Pointer to mailbox completion queue entry.
*
* This routine process a mailbox completion queue entry, it invokes the
- * proper mailbox complete handling or asynchrous event handling routine
+ * proper mailbox complete handling or asynchronous event handling routine
* according to the MCQE's async bit.
*
* Return: true if work posted to worker thread, otherwise false.
@@ -19449,7 +19458,7 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
struct lpfc_mbx_wr_object *wr_object;
LPFC_MBOXQ_t *mbox;
int rc = 0, i = 0;
- uint32_t shdr_status, shdr_add_status, shdr_change_status;
+ uint32_t shdr_status, shdr_add_status, shdr_change_status, shdr_csf;
uint32_t mbox_tmo;
struct lpfc_dmabuf *dmabuf;
uint32_t written = 0;
@@ -19506,6 +19515,16 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
if (check_change_status) {
shdr_change_status = bf_get(lpfc_wr_object_change_status,
&wr_object->u.response);
+
+ if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
+ shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
+ shdr_csf = bf_get(lpfc_wr_object_csf,
+ &wr_object->u.response);
+ if (shdr_csf)
+ shdr_change_status =
+ LPFC_CHANGE_STATUS_PCI_RESET;
+ }
+
switch (shdr_change_status) {
case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 9e5ff58edaca..9563c49f36ab 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.6.0.2"
+#define LPFC_DRIVER_VERSION "12.6.0.3"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index f6ac819e6e96..8443f2f35be2 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -731,7 +731,7 @@ megaraid_init_mbox(adapter_t *adapter)
goto out_free_raid_dev;
}
- raid_dev->baseaddr = ioremap_nocache(raid_dev->baseport, 128);
+ raid_dev->baseaddr = ioremap(raid_dev->baseport, 128);
if (!raid_dev->baseaddr) {
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index bd8184072bed..83d8c4cb1ad5 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -21,8 +21,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "07.710.50.00-rc1"
-#define MEGASAS_RELDATE "June 28, 2019"
+#define MEGASAS_VERSION "07.713.01.00-rc1"
+#define MEGASAS_RELDATE "Dec 27, 2019"
#define MEGASAS_MSIX_NAME_LEN 32
@@ -2233,9 +2233,9 @@ enum MR_PD_TYPE {
/* JBOD Queue depth definitions */
#define MEGASAS_SATA_QD 32
-#define MEGASAS_SAS_QD 64
+#define MEGASAS_SAS_QD 256
#define MEGASAS_DEFAULT_PD_QD 64
-#define MEGASAS_NVME_QD 32
+#define MEGASAS_NVME_QD 64
#define MR_DEFAULT_NVME_PAGE_SIZE 4096
#define MR_DEFAULT_NVME_PAGE_SHIFT 12
@@ -2640,10 +2640,11 @@ enum MEGASAS_OCR_CAUSE {
};
enum DCMD_RETURN_STATUS {
- DCMD_SUCCESS = 0,
- DCMD_TIMEOUT = 1,
- DCMD_FAILED = 2,
- DCMD_NOT_FIRED = 3,
+ DCMD_SUCCESS = 0x00,
+ DCMD_TIMEOUT = 0x01,
+ DCMD_FAILED = 0x02,
+ DCMD_BUSY = 0x03,
+ DCMD_INIT = 0xff,
};
u8
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index a4bc81479284..fd4b5ac6ac5b 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1099,7 +1099,7 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
dev_err(&instance->pdev->dev, "Failed from %s %d\n",
__func__, __LINE__);
- return DCMD_NOT_FIRED;
+ return DCMD_INIT;
}
instance->instancet->issue_dcmd(instance, cmd);
@@ -1123,19 +1123,19 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance,
struct megasas_cmd *cmd, int timeout)
{
int ret = 0;
- cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
+ cmd->cmd_status_drv = DCMD_INIT;
if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
dev_err(&instance->pdev->dev, "Failed from %s %d\n",
__func__, __LINE__);
- return DCMD_NOT_FIRED;
+ return DCMD_INIT;
}
instance->instancet->issue_dcmd(instance, cmd);
if (timeout) {
ret = wait_event_timeout(instance->int_cmd_wait_q,
- cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
+ cmd->cmd_status_drv != DCMD_INIT, timeout * HZ);
if (!ret) {
dev_err(&instance->pdev->dev,
"DCMD(opcode: 0x%x) is timed out, func:%s\n",
@@ -1144,10 +1144,9 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance,
}
} else
wait_event(instance->int_cmd_wait_q,
- cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
+ cmd->cmd_status_drv != DCMD_INIT);
- return (cmd->cmd_status_drv == MFI_STAT_OK) ?
- DCMD_SUCCESS : DCMD_FAILED;
+ return cmd->cmd_status_drv;
}
/**
@@ -1190,19 +1189,19 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
cmd->sync_cmd = 1;
- cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
+ cmd->cmd_status_drv = DCMD_INIT;
if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
dev_err(&instance->pdev->dev, "Failed from %s %d\n",
__func__, __LINE__);
- return DCMD_NOT_FIRED;
+ return DCMD_INIT;
}
instance->instancet->issue_dcmd(instance, cmd);
if (timeout) {
ret = wait_event_timeout(instance->abort_cmd_wait_q,
- cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
+ cmd->cmd_status_drv != DCMD_INIT, timeout * HZ);
if (!ret) {
opcode = cmd_to_abort->frame->dcmd.opcode;
dev_err(&instance->pdev->dev,
@@ -1212,13 +1211,12 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
}
} else
wait_event(instance->abort_cmd_wait_q,
- cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
+ cmd->cmd_status_drv != DCMD_INIT);
cmd->sync_cmd = 0;
megasas_return_cmd(instance, cmd);
- return (cmd->cmd_status_drv == MFI_STAT_OK) ?
- DCMD_SUCCESS : DCMD_FAILED;
+ return cmd->cmd_status_drv;
}
/**
@@ -1887,6 +1885,10 @@ void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
mr_device_priv_data->is_tm_capable =
raid->capability.tmCapable;
+
+ if (!raid->flags.isEPD)
+ sdev->no_write_same = 1;
+
} else if (instance->use_seqnum_jbod_fp) {
pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
sdev->id;
@@ -2150,6 +2152,12 @@ static void megasas_complete_outstanding_ioctls(struct megasas_instance *instanc
void megaraid_sas_kill_hba(struct megasas_instance *instance)
{
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
+ dev_warn(&instance->pdev->dev,
+ "Adapter already dead, skipping kill HBA\n");
+ return;
+ }
+
/* Set critical error to block I/O & ioctls in case caller didn't */
atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
/* Wait 1 second to ensure IO or ioctls in build have posted */
@@ -2726,7 +2734,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
"reset queue\n",
reset_cmd);
- reset_cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
+ reset_cmd->cmd_status_drv = DCMD_INIT;
instance->instancet->fire_cmd(instance,
reset_cmd->frame_phys_addr,
0, instance->reg_set);
@@ -3416,7 +3424,6 @@ static struct scsi_host_template megasas_template = {
.bios_param = megasas_bios_param,
.change_queue_depth = scsi_change_queue_depth,
.max_segment_size = 0xffffffff,
- .no_write_same = 1,
};
/**
@@ -3432,7 +3439,11 @@ static void
megasas_complete_int_cmd(struct megasas_instance *instance,
struct megasas_cmd *cmd)
{
- cmd->cmd_status_drv = cmd->frame->io.cmd_status;
+ if (cmd->cmd_status_drv == DCMD_INIT)
+ cmd->cmd_status_drv =
+ (cmd->frame->io.cmd_status == MFI_STAT_OK) ?
+ DCMD_SUCCESS : DCMD_FAILED;
+
wake_up(&instance->int_cmd_wait_q);
}
@@ -3451,7 +3462,7 @@ megasas_complete_abort(struct megasas_instance *instance,
{
if (cmd->sync_cmd) {
cmd->sync_cmd = 0;
- cmd->cmd_status_drv = 0;
+ cmd->cmd_status_drv = DCMD_SUCCESS;
wake_up(&instance->abort_cmd_wait_q);
}
}
@@ -3727,7 +3738,7 @@ megasas_issue_pending_cmds_again(struct megasas_instance *instance)
dev_notice(&instance->pdev->dev, "%p synchronous cmd"
"on the internal reset queue,"
"issue it again.\n", cmd);
- cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
+ cmd->cmd_status_drv = DCMD_INIT;
instance->instancet->fire_cmd(instance,
cmd->frame_phys_addr,
0, instance->reg_set);
@@ -4392,7 +4403,8 @@ dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
if (instance->adapter_type == MFI_SERIES)
return KILL_ADAPTER;
else if (instance->unload ||
- test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags))
+ test_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE,
+ &instance->reset_flags))
return IGNORE_TIMEOUT;
else
return INITIATE_OCR;
@@ -5875,7 +5887,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
}
base_addr = pci_resource_start(instance->pdev, instance->bar);
- instance->reg_set = ioremap_nocache(base_addr, 8192);
+ instance->reg_set = ioremap(base_addr, 8192);
if (!instance->reg_set) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n");
@@ -7592,7 +7604,7 @@ megasas_resume(struct pci_dev *pdev)
int rval;
struct Scsi_Host *host;
struct megasas_instance *instance;
- int irq_flags = PCI_IRQ_LEGACY;
+ u32 status_reg;
instance = pci_get_drvdata(pdev);
@@ -7620,9 +7632,35 @@ megasas_resume(struct pci_dev *pdev)
/*
* We expect the FW state to be READY
*/
- if (megasas_transition_to_ready(instance, 0))
- goto fail_ready_state;
+ if (megasas_transition_to_ready(instance, 0)) {
+ dev_info(&instance->pdev->dev,
+ "Failed to transition controller to ready from %s!\n",
+ __func__);
+ if (instance->adapter_type != MFI_SERIES) {
+ status_reg =
+ instance->instancet->read_fw_status_reg(instance);
+ if (!(status_reg & MFI_RESET_ADAPTER) ||
+ ((megasas_adp_reset_wait_for_ready
+ (instance, true, 0)) == FAILED))
+ goto fail_ready_state;
+ } else {
+ atomic_set(&instance->fw_reset_no_pci_access, 1);
+ instance->instancet->adp_reset
+ (instance, instance->reg_set);
+ atomic_set(&instance->fw_reset_no_pci_access, 0);
+
+ /* waiting for about 30 seconds before retry */
+ ssleep(30);
+
+ if (megasas_transition_to_ready(instance, 0))
+ goto fail_ready_state;
+ }
+
+ dev_info(&instance->pdev->dev,
+ "FW restarted successfully from %s!\n",
+ __func__);
+ }
if (megasas_set_dma_mask(instance))
goto fail_set_dma_mask;
@@ -7634,16 +7672,15 @@ megasas_resume(struct pci_dev *pdev)
atomic_set(&instance->ldio_outstanding, 0);
/* Now re-enable MSI-X */
- if (instance->msix_vectors) {
- irq_flags = PCI_IRQ_MSIX;
- if (instance->smp_affinity_enable)
- irq_flags |= PCI_IRQ_AFFINITY;
+ if (instance->msix_vectors)
+ megasas_alloc_irq_vectors(instance);
+
+ if (!instance->msix_vectors) {
+ rval = pci_alloc_irq_vectors(instance->pdev, 1, 1,
+ PCI_IRQ_LEGACY);
+ if (rval < 0)
+ goto fail_reenable_msix;
}
- rval = pci_alloc_irq_vectors(instance->pdev, 1,
- instance->msix_vectors ?
- instance->msix_vectors : 1, irq_flags);
- if (rval < 0)
- goto fail_reenable_msix;
megasas_setup_reply_map(instance);
@@ -8036,6 +8073,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
dma_addr_t sense_handle;
unsigned long *sense_ptr;
u32 opcode = 0;
+ int ret = DCMD_SUCCESS;
memset(kbuff_arr, 0, sizeof(kbuff_arr));
@@ -8176,13 +8214,18 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
* cmd to the SCSI mid-layer
*/
cmd->sync_cmd = 1;
- if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) {
+
+ ret = megasas_issue_blocked_cmd(instance, cmd, 0);
+ switch (ret) {
+ case DCMD_INIT:
+ case DCMD_BUSY:
cmd->sync_cmd = 0;
dev_err(&instance->pdev->dev,
"return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n",
- __func__, __LINE__, cmd->frame->hdr.cmd, opcode,
- cmd->cmd_status_drv);
- return -EBUSY;
+ __func__, __LINE__, cmd->frame->hdr.cmd, opcode,
+ cmd->cmd_status_drv);
+ error = -EBUSY;
+ goto out;
}
cmd->sync_cmd = 0;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index e301458bcbae..f3b36fd0a0eb 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -364,6 +364,35 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
instance->max_fw_cmds = instance->max_fw_cmds-1;
}
}
+
+static inline void
+megasas_get_msix_index(struct megasas_instance *instance,
+ struct scsi_cmnd *scmd,
+ struct megasas_cmd_fusion *cmd,
+ u8 data_arms)
+{
+ int sdev_busy;
+
+ /* nr_hw_queue = 1 for MegaRAID */
+ struct blk_mq_hw_ctx *hctx =
+ scmd->device->request_queue->queue_hw_ctx[0];
+
+ sdev_busy = atomic_read(&hctx->nr_active);
+
+ if (instance->perf_mode == MR_BALANCED_PERF_MODE &&
+ sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH))
+ cmd->request_desc->SCSIIO.MSIxIndex =
+ mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) /
+ MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start);
+ else if (instance->msix_load_balance)
+ cmd->request_desc->SCSIIO.MSIxIndex =
+ (mega_mod64(atomic64_add_return(1, &instance->total_io_count),
+ instance->msix_vectors));
+ else
+ cmd->request_desc->SCSIIO.MSIxIndex =
+ instance->reply_map[raw_smp_processor_id()];
+}
+
/**
* megasas_free_cmds_fusion - Free all the cmds in the free cmd pool
* @instance: Adapter soft state
@@ -1312,7 +1341,9 @@ megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
}
if (ret == DCMD_TIMEOUT)
- megaraid_sas_kill_hba(instance);
+ dev_warn(&instance->pdev->dev,
+ "%s DCMD timed out, continue without JBOD sequence map\n",
+ __func__);
if (ret == DCMD_SUCCESS)
instance->pd_seq_map_id++;
@@ -1394,7 +1425,9 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
ret = megasas_issue_polled(instance, cmd);
if (ret == DCMD_TIMEOUT)
- megaraid_sas_kill_hba(instance);
+ dev_warn(&instance->pdev->dev,
+ "%s DCMD timed out, RAID map is disabled\n",
+ __func__);
megasas_return_cmd(instance, cmd);
@@ -2825,19 +2858,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
fp_possible = (io_info.fpOkForIo > 0) ? true : false;
}
- if ((instance->perf_mode == MR_BALANCED_PERF_MODE) &&
- atomic_read(&scp->device->device_busy) >
- (io_info.data_arms * MR_DEVICE_HIGH_IOPS_DEPTH))
- cmd->request_desc->SCSIIO.MSIxIndex =
- mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) /
- MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start);
- else if (instance->msix_load_balance)
- cmd->request_desc->SCSIIO.MSIxIndex =
- (mega_mod64(atomic64_add_return(1, &instance->total_io_count),
- instance->msix_vectors));
- else
- cmd->request_desc->SCSIIO.MSIxIndex =
- instance->reply_map[raw_smp_processor_id()];
+ megasas_get_msix_index(instance, scp, cmd, io_info.data_arms);
if (instance->adapter_type >= VENTURA_SERIES) {
/* FP for Optimal raid level 1.
@@ -3158,18 +3179,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
- if ((instance->perf_mode == MR_BALANCED_PERF_MODE) &&
- atomic_read(&scmd->device->device_busy) > MR_DEVICE_HIGH_IOPS_DEPTH)
- cmd->request_desc->SCSIIO.MSIxIndex =
- mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) /
- MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start);
- else if (instance->msix_load_balance)
- cmd->request_desc->SCSIIO.MSIxIndex =
- (mega_mod64(atomic64_add_return(1, &instance->total_io_count),
- instance->msix_vectors));
- else
- cmd->request_desc->SCSIIO.MSIxIndex =
- instance->reply_map[raw_smp_processor_id()];
+ megasas_get_msix_index(instance, scmd, cmd, 1);
if (!fp_possible) {
/* system pd firmware path */
@@ -4219,7 +4229,8 @@ void megasas_reset_reply_desc(struct megasas_instance *instance)
* megasas_refire_mgmt_cmd : Re-fire management commands
* @instance: Controller's soft instance
*/
-static void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
+void megasas_refire_mgmt_cmd(struct megasas_instance *instance,
+ bool return_ioctl)
{
int j;
struct megasas_cmd_fusion *cmd_fusion;
@@ -4283,6 +4294,16 @@ static void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
break;
}
+ if (return_ioctl && cmd_mfi->sync_cmd &&
+ cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) {
+ dev_err(&instance->pdev->dev,
+ "return -EBUSY from %s %d cmd 0x%x opcode 0x%x\n",
+ __func__, __LINE__, cmd_mfi->frame->hdr.cmd,
+ le32_to_cpu(cmd_mfi->frame->dcmd.opcode));
+ cmd_mfi->cmd_status_drv = DCMD_BUSY;
+ result = COMPLETE_CMD;
+ }
+
switch (result) {
case REFIRE_CMD:
megasas_fire_cmd_fusion(instance, req_desc);
@@ -4298,6 +4319,37 @@ static void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
}
/*
+ * megasas_return_polled_cmds: Return polled mode commands back to the pool
+ * before initiating an OCR.
+ * @instance: Controller's soft instance
+ */
+static void
+megasas_return_polled_cmds(struct megasas_instance *instance)
+{
+ int i;
+ struct megasas_cmd_fusion *cmd_fusion;
+ struct fusion_context *fusion;
+ struct megasas_cmd *cmd_mfi;
+
+ fusion = instance->ctrl_context;
+
+ for (i = instance->max_scsi_cmds; i < instance->max_fw_cmds; i++) {
+ cmd_fusion = fusion->cmd_list[i];
+ cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
+
+ if (cmd_mfi->flags & DRV_DCMD_POLLED_MODE) {
+ if (megasas_dbg_lvl & OCR_DEBUG)
+ dev_info(&instance->pdev->dev,
+ "%s %d return cmd 0x%x opcode 0x%x\n",
+ __func__, __LINE__, cmd_mfi->frame->hdr.cmd,
+ le32_to_cpu(cmd_mfi->frame->dcmd.opcode));
+ cmd_mfi->flags &= ~DRV_DCMD_POLLED_MODE;
+ megasas_return_cmd(instance, cmd_mfi);
+ }
+ }
+}
+
+/*
* megasas_track_scsiio : Track SCSI IOs outstanding to a SCSI device
* @instance: per adapter struct
* @channel: the channel assigned by the OS
@@ -4847,6 +4899,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
if (instance->requestorId && !instance->skip_heartbeat_timer_del)
del_timer_sync(&instance->sriov_heartbeat_timer);
set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
+ set_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, &instance->reset_flags);
atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_POLLING);
instance->instancet->disable_intr(instance);
megasas_sync_irqs((unsigned long)instance);
@@ -4951,7 +5004,9 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
goto kill_hba;
}
- megasas_refire_mgmt_cmd(instance);
+ megasas_refire_mgmt_cmd(instance,
+ (i == (MEGASAS_FUSION_MAX_RESET_TRIES - 1)
+ ? 1 : 0));
/* Reset load balance info */
if (fusion->load_balance_info)
@@ -4959,8 +5014,16 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
(sizeof(struct LD_LOAD_BALANCE_INFO) *
MAX_LOGICAL_DRIVES_EXT));
- if (!megasas_get_map_info(instance))
+ if (!megasas_get_map_info(instance)) {
megasas_sync_map_info(instance);
+ } else {
+ /*
+ * Return pending polled mode cmds before
+ * retrying OCR
+ */
+ megasas_return_polled_cmds(instance);
+ continue;
+ }
megasas_setup_jbod_map(instance);
@@ -4987,6 +5050,15 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
megasas_set_dynamic_target_properties(sdev, is_target_prop);
}
+ status_reg = instance->instancet->read_fw_status_reg
+ (instance);
+ abs_state = status_reg & MFI_STATE_MASK;
+ if (abs_state != MFI_STATE_OPERATIONAL) {
+ dev_info(&instance->pdev->dev,
+ "Adapter is not OPERATIONAL, state 0x%x for scsi:%d\n",
+ abs_state, instance->host->host_no);
+ goto out;
+ }
atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
dev_info(&instance->pdev->dev,
@@ -5046,7 +5118,7 @@ kill_hba:
instance->skip_heartbeat_timer_del = 1;
retval = FAILED;
out:
- clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
+ clear_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, &instance->reset_flags);
mutex_unlock(&instance->reset_mutex);
return retval;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index c013c80fe4e6..d57ecc7f88d8 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -89,6 +89,7 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE {
#define MEGASAS_FP_CMD_LEN 16
#define MEGASAS_FUSION_IN_RESET 0
+#define MEGASAS_FUSION_OCR_NOT_POSSIBLE 1
#define RAID_1_PEER_CMDS 2
#define JBOD_MAPS_COUNT 2
#define MEGASAS_REDUCE_QD_COUNT 64
@@ -864,9 +865,20 @@ struct MR_LD_RAID {
u8 regTypeReqOnRead;
__le16 seqNum;
- struct {
- u32 ldSyncRequired:1;
- u32 reserved:31;
+struct {
+#ifndef MFI_BIG_ENDIAN
+ u32 ldSyncRequired:1;
+ u32 regTypeReqOnReadIsValid:1;
+ u32 isEPD:1;
+ u32 enableSLDOnAllRWIOs:1;
+ u32 reserved:28;
+#else
+ u32 reserved:28;
+ u32 enableSLDOnAllRWIOs:1;
+ u32 isEPD:1;
+ u32 regTypeReqOnReadIsValid:1;
+ u32 ldSyncRequired:1;
+#endif
} flags;
u8 LUN[8]; /* 0x24 8 byte LUN field used for SCSI IO's */
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h
index 18b1e31b5eb8..ed3923f8db4f 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2.h
@@ -122,6 +122,9 @@
* 08-28-18 02.00.53 Bumped MPI2_HEADER_VERSION_UNIT.
* Added MPI2_IOCSTATUS_FAILURE
* 12-17-18 02.00.54 Bumped MPI2_HEADER_VERSION_UNIT
+ * 06-24-19 02.00.55 Bumped MPI2_HEADER_VERSION_UNIT
+ * 08-01-19 02.00.56 Bumped MPI2_HEADER_VERSION_UNIT
+ * 10-02-19 02.00.57 Bumped MPI2_HEADER_VERSION_UNIT
* --------------------------------------------------------------------------
*/
@@ -162,7 +165,7 @@
/* Unit and Dev versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT (0x36)
+#define MPI2_HEADER_VERSION_UNIT (0x39)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
@@ -181,6 +184,7 @@
#define MPI2_IOC_STATE_READY (0x10000000)
#define MPI2_IOC_STATE_OPERATIONAL (0x20000000)
#define MPI2_IOC_STATE_FAULT (0x40000000)
+#define MPI2_IOC_STATE_COREDUMP (0x50000000)
#define MPI2_IOC_STATE_MASK (0xF0000000)
#define MPI2_IOC_STATE_SHIFT (28)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 3a6871aecada..43a3bf8ff428 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -249,6 +249,8 @@
* 08-28-18 02.00.46 Added NVMs Write Cache flag to IOUnitPage1
* Added DMDReport Delay Time defines to PCIeIOUnitPage1
* 12-17-18 02.00.47 Swap locations of Slotx2 and Slotx4 in ManPage 7.
+ * 08-01-19 02.00.49 Add MPI26_MANPAGE7_FLAG_X2_X4_SLOT_INFO_VALID
+ * Add MPI26_IOUNITPAGE1_NVME_WRCACHE_SHIFT
*/
#ifndef MPI2_CNFG_H
@@ -891,6 +893,8 @@ typedef struct _MPI2_CONFIG_PAGE_MAN_7 {
#define MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER (0x00000002)
#define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001)
+#define MPI26_MANPAGE7_FLAG_CONN_LANE_USE_PINOUT (0x00000020)
+#define MPI26_MANPAGE7_FLAG_X2_X4_SLOT_INFO_VALID (0x00000010)
/*
*Generic structure to use for product-specific manufacturing pages
@@ -962,9 +966,10 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1 {
/* IO Unit Page 1 Flags defines */
#define MPI26_IOUNITPAGE1_NVME_WRCACHE_MASK (0x00030000)
-#define MPI26_IOUNITPAGE1_NVME_WRCACHE_ENABLE (0x00000000)
-#define MPI26_IOUNITPAGE1_NVME_WRCACHE_DISABLE (0x00010000)
-#define MPI26_IOUNITPAGE1_NVME_WRCACHE_NO_CHANGE (0x00020000)
+#define MPI26_IOUNITPAGE1_NVME_WRCACHE_SHIFT (16)
+#define MPI26_IOUNITPAGE1_NVME_WRCACHE_NO_CHANGE (0x00000000)
+#define MPI26_IOUNITPAGE1_NVME_WRCACHE_ENABLE (0x00010000)
+#define MPI26_IOUNITPAGE1_NVME_WRCACHE_DISABLE (0x00020000)
#define MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK (0x00004000)
#define MPI25_IOUNITPAGE1_NEW_DEVICE_FAST_PATH_DISABLE (0x00002000)
#define MPI25_IOUNITPAGE1_DISABLE_FAST_PATH (0x00001000)
@@ -3931,7 +3936,13 @@ typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_2 {
U32 MaximumDataTransferSize; /*0x0C */
U32 Capabilities; /*0x10 */
U16 NOIOB; /* 0x14 */
- U16 Reserved2; /* 0x16 */
+ U16 ShutdownLatency; /* 0x16 */
+ U16 VendorID; /* 0x18 */
+ U16 DeviceID; /* 0x1A */
+ U16 SubsystemVendorID; /* 0x1C */
+ U16 SubsystemID; /* 0x1E */
+ U8 RevisionID; /* 0x20 */
+ U8 Reserved21[3]; /* 0x21 */
} MPI26_CONFIG_PAGE_PCIEDEV_2, *PTR_MPI26_CONFIG_PAGE_PCIEDEV_2,
Mpi26PCIeDevicePage2_t, *pMpi26PCIeDevicePage2_t;
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_image.h b/drivers/scsi/mpt3sas/mpi/mpi2_image.h
index a3f677853098..33b9c3a6fd40 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_image.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_image.h
@@ -19,6 +19,10 @@
* 09-07-18 02.06.03 Added MPI26_EVENT_PCIE_TOPO_PI_16_LANES
* 12-17-18 02.06.04 Addd MPI2_EXT_IMAGE_TYPE_PBLP
* Shorten some defines to be compatible with DOS
+ * 06-24-19 02.06.05 Whitespace adjustments to help with identifier
+ * checking tool.
+ * 10-02-19 02.06.06 Added MPI26_IMAGE_HEADER_SIG1_COREDUMP
+ * Added MPI2_FLASH_REGION_COREDUMP
*/
#ifndef MPI2_IMAGE_H
#define MPI2_IMAGE_H
@@ -213,6 +217,8 @@ typedef struct _MPI26_COMPONENT_IMAGE_HEADER {
#define MPI26_IMAGE_HEADER_SIG1_NVDATA (0x5444564E)
#define MPI26_IMAGE_HEADER_SIG1_GAS_GAUGE (0x20534147)
#define MPI26_IMAGE_HEADER_SIG1_PBLP (0x504C4250)
+/* little-endian "DUMP" */
+#define MPI26_IMAGE_HEADER_SIG1_COREDUMP (0x504D5544)
/**** Definitions for Signature2 field ****/
#define MPI26_IMAGE_HEADER_SIGNATURE2_VALUE (0x50584546)
@@ -359,6 +365,7 @@ typedef struct _MPI2_FLASH_LAYOUT_DATA {
#define MPI2_FLASH_REGION_MR_NVDATA (0x14)
#define MPI2_FLASH_REGION_CPLD (0x15)
#define MPI2_FLASH_REGION_PSOC (0x16)
+#define MPI2_FLASH_REGION_COREDUMP (0x17)
/*ImageRevision */
#define MPI2_FLASH_LAYOUT_IMAGE_REVISION (0x00)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
index 68ea408cd5c5..e83c7c529dc9 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
@@ -175,6 +175,10 @@
* Moved FW image definitions ionto new mpi2_image,h
* 08-14-18 02.00.36 Fixed definition of MPI2_FW_DOWNLOAD_ITYPE_PSOC (0x16)
* 09-07-18 02.00.37 Added MPI26_EVENT_PCIE_TOPO_PI_16_LANES
+ * 10-02-19 02.00.38 Added MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE
+ * Added MPI26_IOCFACTS_CAPABILITY_COREDUMP_ENABLED
+ * Added MPI2_FW_DOWNLOAD_ITYPE_COREDUMP
+ * Added MPI2_FW_UPLOAD_ITYPE_COREDUMP
* --------------------------------------------------------------------------
*/
@@ -248,6 +252,7 @@ typedef struct _MPI2_IOC_INIT_REQUEST {
/*ConfigurationFlags */
#define MPI26_IOCINIT_CFGFLAGS_NVME_SGL_FORMAT (0x0001)
+#define MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE (0x0002)
/*minimum depth for a Reply Descriptor Post Queue */
#define MPI2_RDPQ_DEPTH_MIN (16)
@@ -377,6 +382,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY {
/*ProductID field uses MPI2_FW_HEADER_PID_ */
/*IOCCapabilities */
+#define MPI26_IOCFACTS_CAPABILITY_COREDUMP_ENABLED (0x00200000)
#define MPI26_IOCFACTS_CAPABILITY_PCIE_SRIOV (0x00100000)
#define MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ (0x00080000)
#define MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE (0x00040000)
@@ -1458,8 +1464,8 @@ typedef struct _MPI2_FW_DOWNLOAD_REQUEST {
/*MPI v2.6 and newer */
#define MPI2_FW_DOWNLOAD_ITYPE_CPLD (0x15)
#define MPI2_FW_DOWNLOAD_ITYPE_PSOC (0x16)
+#define MPI2_FW_DOWNLOAD_ITYPE_COREDUMP (0x17)
#define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0)
-#define MPI2_FW_DOWNLOAD_ITYPE_TERMINATE (0xFF)
/*MPI v2.0 FWDownload TransactionContext Element */
typedef struct _MPI2_FW_DOWNLOAD_TCSGE {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 848fbec7bda6..663782bb790d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -124,7 +124,14 @@ enum mpt3sas_perf_mode {
};
static int
+_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc,
+ u32 ioc_state, int timeout);
+static int
_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
+static void
+_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc);
+static void
+_base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc);
/**
* mpt3sas_base_check_cmd_timeout - Function
@@ -609,7 +616,8 @@ _base_fault_reset_work(struct work_struct *work)
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
- if (ioc->shost_recovery || ioc->pci_error_recovery)
+ if ((ioc->shost_recovery && (ioc->ioc_coredump_loop == 0)) ||
+ ioc->pci_error_recovery)
goto rearm_timer;
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
@@ -656,20 +664,64 @@ _base_fault_reset_work(struct work_struct *work)
return; /* don't rearm timer */
}
- ioc->non_operational_loop = 0;
+ if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) {
+ u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
+ ioc->manu_pg11.CoreDumpTOSec :
+ MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS;
+
+ timeout /= (FAULT_POLLING_INTERVAL/1000);
+
+ if (ioc->ioc_coredump_loop == 0) {
+ mpt3sas_print_coredump_info(ioc,
+ doorbell & MPI2_DOORBELL_DATA_MASK);
+ /* do not accept any IOs and disable the interrupts */
+ spin_lock_irqsave(
+ &ioc->ioc_reset_in_progress_lock, flags);
+ ioc->shost_recovery = 1;
+ spin_unlock_irqrestore(
+ &ioc->ioc_reset_in_progress_lock, flags);
+ _base_mask_interrupts(ioc);
+ _base_clear_outstanding_commands(ioc);
+ }
+
+ ioc_info(ioc, "%s: CoreDump loop %d.",
+ __func__, ioc->ioc_coredump_loop);
+ /* Wait until CoreDump completes or times out */
+ if (ioc->ioc_coredump_loop++ < timeout) {
+ spin_lock_irqsave(
+ &ioc->ioc_reset_in_progress_lock, flags);
+ goto rearm_timer;
+ }
+ }
+
+ if (ioc->ioc_coredump_loop) {
+ if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_COREDUMP)
+ ioc_err(ioc, "%s: CoreDump completed. LoopCount: %d",
+ __func__, ioc->ioc_coredump_loop);
+ else
+ ioc_err(ioc, "%s: CoreDump Timed out. LoopCount: %d",
+ __func__, ioc->ioc_coredump_loop);
+ ioc->ioc_coredump_loop = MPT3SAS_COREDUMP_LOOP_DONE;
+ }
+ ioc->non_operational_loop = 0;
if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
ioc_warn(ioc, "%s: hard reset: %s\n",
__func__, rc == 0 ? "success" : "failed");
doorbell = mpt3sas_base_get_iocstate(ioc, 0);
- if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
- mpt3sas_base_fault_info(ioc, doorbell &
+ if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
+ mpt3sas_print_fault_code(ioc, doorbell &
+ MPI2_DOORBELL_DATA_MASK);
+ } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_COREDUMP)
+ mpt3sas_print_coredump_info(ioc, doorbell &
MPI2_DOORBELL_DATA_MASK);
if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
MPI2_IOC_STATE_OPERATIONAL)
return; /* don't rearm timer */
}
+ ioc->ioc_coredump_loop = 0;
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
rearm_timer:
@@ -749,6 +801,49 @@ mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
}
/**
+ * mpt3sas_base_coredump_info - verbose translation of firmware CoreDump state
+ * @ioc: per adapter object
+ * @fault_code: fault code
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code)
+{
+ ioc_err(ioc, "coredump_state(0x%04x)!\n", fault_code);
+}
+
+/**
+ * mpt3sas_base_wait_for_coredump_completion - Wait until coredump
+ * completes or times out
+ * @ioc: per adapter object
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc,
+ const char *caller)
+{
+ u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
+ ioc->manu_pg11.CoreDumpTOSec :
+ MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS;
+
+ int ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_FAULT,
+ timeout);
+
+ if (ioc_state)
+ ioc_err(ioc,
+ "%s: CoreDump timed out. (ioc_state=0x%x)\n",
+ caller, ioc_state);
+ else
+ ioc_info(ioc,
+ "%s: CoreDump completed. (ioc_state=0x%x)\n",
+ caller, ioc_state);
+
+ return ioc_state;
+}
+
+/**
* mpt3sas_halt_firmware - halt's mpt controller firmware
* @ioc: per adapter object
*
@@ -768,9 +863,14 @@ mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
dump_stack();
doorbell = ioc->base_readl(&ioc->chip->Doorbell);
- if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
- mpt3sas_base_fault_info(ioc , doorbell);
- else {
+ if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
+ mpt3sas_print_fault_code(ioc, doorbell &
+ MPI2_DOORBELL_DATA_MASK);
+ } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_COREDUMP) {
+ mpt3sas_print_coredump_info(ioc, doorbell &
+ MPI2_DOORBELL_DATA_MASK);
+ } else {
writel(0xC0FFEE00, &ioc->chip->Doorbell);
ioc_err(ioc, "Firmware is halted due to command timeout\n");
}
@@ -3103,6 +3203,8 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
*/
if (!ioc->combined_reply_queue &&
ioc->hba_mpi_version_belonged != MPI2_VERSION) {
+ ioc_info(ioc,
+ "combined ReplyQueue is off, Enabling msix load balance\n");
ioc->msix_load_balance = true;
}
@@ -3115,9 +3217,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
r = _base_alloc_irq_vectors(ioc);
if (r < 0) {
- dfailprintk(ioc,
- ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n",
- r));
+ ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n", r);
goto try_ioapic;
}
@@ -3206,9 +3306,15 @@ _base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc)
dhsprintk(ioc, pr_info("%s: ioc_state(0x%08x)\n", __func__, ioc_state));
if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
- mpt3sas_base_fault_info(ioc, ioc_state &
+ mpt3sas_print_fault_code(ioc, ioc_state &
MPI2_DOORBELL_DATA_MASK);
rc = _base_diag_reset(ioc);
+ } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_COREDUMP) {
+ mpt3sas_print_coredump_info(ioc, ioc_state &
+ MPI2_DOORBELL_DATA_MASK);
+ mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
+ rc = _base_diag_reset(ioc);
}
return rc;
@@ -3279,7 +3385,8 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
}
if (ioc->chip == NULL) {
- ioc_err(ioc, "unable to map adapter memory! or resource not found\n");
+ ioc_err(ioc,
+ "unable to map adapter memory! or resource not found\n");
r = -EINVAL;
goto out_fail;
}
@@ -3318,8 +3425,8 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
ioc->combined_reply_index_count,
sizeof(resource_size_t *), GFP_KERNEL);
if (!ioc->replyPostRegisterIndex) {
- dfailprintk(ioc,
- ioc_warn(ioc, "allocation for reply Post Register Index failed!!!\n"));
+ ioc_err(ioc,
+ "allocation for replyPostRegisterIndex failed!\n");
r = -ENOMEM;
goto out_fail;
}
@@ -3467,6 +3574,22 @@ _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc,
}
/**
+ * _base_sdev_nr_inflight_request -get number of inflight requests
+ * of a request queue.
+ * @q: request_queue object
+ *
+ * returns number of inflight request of a request queue.
+ */
+inline unsigned long
+_base_sdev_nr_inflight_request(struct request_queue *q)
+{
+ struct blk_mq_hw_ctx *hctx = q->queue_hw_ctx[0];
+
+ return atomic_read(&hctx->nr_active);
+}
+
+
+/**
* _base_get_high_iops_msix_index - get the msix index of
* high iops queues
* @ioc: per adapter object
@@ -3485,7 +3608,7 @@ _base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc,
* reply queues in terms of batch count 16 when outstanding
* IOs on the target device is >=8.
*/
- if (atomic_read(&scmd->device->device_busy) >
+ if (_base_sdev_nr_inflight_request(scmd->device->request_queue) >
MPT3SAS_DEVICE_HIGH_IOPS_DEPTH)
return base_mod64((
atomic64_add_return(1, &ioc->high_iops_outstanding) /
@@ -4264,7 +4387,8 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
&fwpkg_data_dma, GFP_KERNEL);
if (!fwpkg_data) {
- ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ ioc_err(ioc,
+ "Memory allocation for fwpkg data failed at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
return -ENOMEM;
}
@@ -4994,12 +5118,13 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
}
- dinitprintk(ioc,
- ioc_info(ioc, "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), chains_per_io(%d)\n",
- ioc->max_sges_in_main_message,
- ioc->max_sges_in_chain_message,
- ioc->shost->sg_tablesize,
- ioc->chains_needed_per_io));
+ ioc_info(ioc,
+ "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), "
+ "sge_per_io(%d), chains_per_io(%d)\n",
+ ioc->max_sges_in_main_message,
+ ioc->max_sges_in_chain_message,
+ ioc->shost->sg_tablesize,
+ ioc->chains_needed_per_io);
/* reply post queue, 16 byte align */
reply_post_free_sz = ioc->reply_post_queue_depth *
@@ -5109,15 +5234,13 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
ioc->request_sz);
- dinitprintk(ioc,
- ioc_info(ioc, "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
- ioc->request, ioc->hba_queue_depth,
- ioc->request_sz,
- (ioc->hba_queue_depth * ioc->request_sz) / 1024));
+ ioc_info(ioc,
+ "request pool(0x%p) - dma(0x%llx): "
+ "depth(%d), frame_size(%d), pool_size(%d kB)\n",
+ ioc->request, (unsigned long long) ioc->request_dma,
+ ioc->hba_queue_depth, ioc->request_sz,
+ (ioc->hba_queue_depth * ioc->request_sz) / 1024);
- dinitprintk(ioc,
- ioc_info(ioc, "request pool: dma(0x%llx)\n",
- (unsigned long long)ioc->request_dma));
total_sz += sz;
dinitprintk(ioc,
@@ -5248,7 +5371,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
&ct->chain_buffer_dma);
if (!ct->chain_buffer) {
ioc_err(ioc, "chain_lookup: pci_pool_alloc failed\n");
- _base_release_memory_pools(ioc);
goto out;
}
}
@@ -5303,13 +5425,12 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
goto out;
}
}
- dinitprintk(ioc,
- ioc_info(ioc, "sense pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
- ioc->sense, ioc->scsiio_depth,
- SCSI_SENSE_BUFFERSIZE, sz / 1024));
- dinitprintk(ioc,
- ioc_info(ioc, "sense_dma(0x%llx)\n",
- (unsigned long long)ioc->sense_dma));
+ ioc_info(ioc,
+ "sense pool(0x%p)- dma(0x%llx): depth(%d),"
+ "element_size(%d), pool_size(%d kB)\n",
+ ioc->sense, (unsigned long long)ioc->sense_dma, ioc->scsiio_depth,
+ SCSI_SENSE_BUFFERSIZE, sz / 1024);
+
total_sz += sz;
/* reply pool, 4 byte align */
@@ -5387,12 +5508,10 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc_err(ioc, "config page: dma_pool_alloc failed\n");
goto out;
}
- dinitprintk(ioc,
- ioc_info(ioc, "config page(0x%p): size(%d)\n",
- ioc->config_page, ioc->config_page_sz));
- dinitprintk(ioc,
- ioc_info(ioc, "config_page_dma(0x%llx)\n",
- (unsigned long long)ioc->config_page_dma));
+
+ ioc_info(ioc, "config page(0x%p) - dma(0x%llx): size(%d)\n",
+ ioc->config_page, (unsigned long long)ioc->config_page_dma,
+ ioc->config_page_sz);
total_sz += ioc->config_page_sz;
ioc_info(ioc, "Allocated physical memory: size(%d kB)\n",
@@ -5447,6 +5566,8 @@ _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
return 0;
if (count && current_state == MPI2_IOC_STATE_FAULT)
break;
+ if (count && current_state == MPI2_IOC_STATE_COREDUMP)
+ break;
usleep_range(1000, 1500);
count++;
@@ -5548,7 +5669,12 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
doorbell = ioc->base_readl(&ioc->chip->Doorbell);
if ((doorbell & MPI2_IOC_STATE_MASK) ==
MPI2_IOC_STATE_FAULT) {
- mpt3sas_base_fault_info(ioc , doorbell);
+ mpt3sas_print_fault_code(ioc, doorbell);
+ return -EFAULT;
+ }
+ if ((doorbell & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_COREDUMP) {
+ mpt3sas_print_coredump_info(ioc, doorbell);
return -EFAULT;
}
} else if (int_status == 0xFFFFFFFF)
@@ -5610,6 +5736,7 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
{
u32 ioc_state;
int r = 0;
+ unsigned long flags;
if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
ioc_err(ioc, "%s: unknown reset_type\n", __func__);
@@ -5628,6 +5755,7 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
r = -EFAULT;
goto out;
}
+
ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
if (ioc_state) {
ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
@@ -5636,6 +5764,26 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
goto out;
}
out:
+ if (r != 0) {
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ /*
+ * Wait for IOC state CoreDump to clear only during
+ * HBA initialization & release time.
+ */
+ if ((ioc_state & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_COREDUMP && (ioc->is_driver_loading == 1 ||
+ ioc->fault_reset_work_q == NULL)) {
+ spin_unlock_irqrestore(
+ &ioc->ioc_reset_in_progress_lock, flags);
+ mpt3sas_print_coredump_info(ioc, ioc_state);
+ mpt3sas_base_wait_for_coredump_completion(ioc,
+ __func__);
+ spin_lock_irqsave(
+ &ioc->ioc_reset_in_progress_lock, flags);
+ }
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+ }
ioc_info(ioc, "message unit reset: %s\n",
r == 0 ? "SUCCESS" : "FAILED");
return r;
@@ -5783,7 +5931,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
mfp = (__le32 *)reply;
pr_info("\toffset:data\n");
for (i = 0; i < reply_bytes/4; i++)
- pr_info("\t[0x%02x]:%08x\n", i*4,
+ ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
le32_to_cpu(mfp[i]));
}
return 0;
@@ -5851,10 +5999,9 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
ioc->ioc_link_reset_in_progress)
ioc->ioc_link_reset_in_progress = 0;
if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
- issue_reset =
- mpt3sas_base_check_cmd_timeout(ioc,
- ioc->base_cmds.status, mpi_request,
- sizeof(Mpi2SasIoUnitControlRequest_t)/4);
+ mpt3sas_check_cmd_timeout(ioc, ioc->base_cmds.status,
+ mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)/4,
+ issue_reset);
goto issue_host_reset;
}
if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
@@ -5927,10 +6074,9 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
wait_for_completion_timeout(&ioc->base_cmds.done,
msecs_to_jiffies(10000));
if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
- issue_reset =
- mpt3sas_base_check_cmd_timeout(ioc,
- ioc->base_cmds.status, mpi_request,
- sizeof(Mpi2SepRequest_t)/4);
+ mpt3sas_check_cmd_timeout(ioc,
+ ioc->base_cmds.status, mpi_request,
+ sizeof(Mpi2SepRequest_t)/4, issue_reset);
goto issue_host_reset;
}
if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
@@ -6029,9 +6175,15 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
}
if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
- mpt3sas_base_fault_info(ioc, ioc_state &
+ mpt3sas_print_fault_code(ioc, ioc_state &
MPI2_DOORBELL_DATA_MASK);
goto issue_diag_reset;
+ } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_COREDUMP) {
+ ioc_info(ioc,
+ "%s: Skipping the diag reset here. (ioc_state=0x%x)\n",
+ __func__, ioc_state);
+ return -EFAULT;
}
ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
@@ -6210,6 +6362,12 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
}
+ /*
+ * Set the flag to enable CoreDump state feature in IOC firmware.
+ */
+ mpi_request.ConfigurationFlags |=
+ cpu_to_le16(MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE);
+
/* This time stamp specifies number of milliseconds
* since epoch ~ midnight January 1, 1970.
*/
@@ -6221,9 +6379,9 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
int i;
mfp = (__le32 *)&mpi_request;
- pr_info("\toffset:data\n");
+ ioc_info(ioc, "\toffset:data\n");
for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
- pr_info("\t[0x%02x]:%08x\n", i*4,
+ ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
le32_to_cpu(mfp[i]));
}
@@ -6593,8 +6751,11 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
/* wait 100 msec */
msleep(100);
- if (count++ > 20)
+ if (count++ > 20) {
+ ioc_info(ioc,
+ "Stop writing magic sequence after 20 retries\n");
goto out;
+ }
host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
drsprintk(ioc,
@@ -6618,8 +6779,11 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
- if (host_diagnostic == 0xFFFFFFFF)
+ if (host_diagnostic == 0xFFFFFFFF) {
+ ioc_info(ioc,
+ "Invalid host diagnostic register value\n");
goto out;
+ }
if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
break;
@@ -6706,16 +6870,33 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
return 0;
if (ioc_state & MPI2_DOORBELL_USED) {
- dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
+ ioc_info(ioc, "unexpected doorbell active!\n");
goto issue_diag_reset;
}
if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
- mpt3sas_base_fault_info(ioc, ioc_state &
+ mpt3sas_print_fault_code(ioc, ioc_state &
MPI2_DOORBELL_DATA_MASK);
goto issue_diag_reset;
}
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) {
+ /*
+ * if host reset is invoked while watch dog thread is waiting
+ * for IOC state to be changed to Fault state then driver has
+ * to wait here for CoreDump state to clear otherwise reset
+ * will be issued to the FW and FW move the IOC state to
+ * reset state without copying the FW logs to coredump region.
+ */
+ if (ioc->ioc_coredump_loop != MPT3SAS_COREDUMP_LOOP_DONE) {
+ mpt3sas_print_coredump_info(ioc, ioc_state &
+ MPI2_DOORBELL_DATA_MASK);
+ mpt3sas_base_wait_for_coredump_completion(ioc,
+ __func__);
+ }
+ goto issue_diag_reset;
+ }
+
if (type == FORCE_BIG_HAMMER)
goto issue_diag_reset;
@@ -6959,8 +7140,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
ioc->reply_queue_count = 1;
if (!ioc->cpu_msix_table) {
- dfailprintk(ioc,
- ioc_info(ioc, "allocation for cpu_msix_table failed!!!\n"));
+ ioc_info(ioc, "Allocation for cpu_msix_table failed!!!\n");
r = -ENOMEM;
goto out_free_resources;
}
@@ -6969,8 +7149,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
sizeof(resource_size_t *), GFP_KERNEL);
if (!ioc->reply_post_host_index) {
- dfailprintk(ioc,
- ioc_info(ioc, "allocation for reply_post_host_index failed!!!\n"));
+ ioc_info(ioc, "Allocation for reply_post_host_index failed!!!\n");
r = -ENOMEM;
goto out_free_resources;
}
@@ -7196,6 +7375,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
sizeof(struct mpt3sas_facts));
ioc->non_operational_loop = 0;
+ ioc->ioc_coredump_loop = 0;
ioc->got_task_abort_from_ioctl = 0;
return 0;
@@ -7277,14 +7457,14 @@ static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
}
/**
- * _base_after_reset_handler - after reset handler
+ * _base_clear_outstanding_mpt_commands - clears outstanding mpt commands
* @ioc: per adapter object
*/
-static void _base_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
+static void
+_base_clear_outstanding_mpt_commands(struct MPT3SAS_ADAPTER *ioc)
{
- mpt3sas_scsih_after_reset_handler(ioc);
- mpt3sas_ctl_after_reset_handler(ioc);
- dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
+ dtmprintk(ioc,
+ ioc_info(ioc, "%s: clear outstanding mpt cmds\n", __func__));
if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
ioc->transport_cmds.status |= MPT3_CMD_RESET;
mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
@@ -7318,6 +7498,17 @@ static void _base_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
}
/**
+ * _base_clear_outstanding_commands - clear all outstanding commands
+ * @ioc: per adapter object
+ */
+static void _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc)
+{
+ mpt3sas_scsih_clear_outstanding_scsi_tm_commands(ioc);
+ mpt3sas_ctl_clear_outstanding_ioctls(ioc);
+ _base_clear_outstanding_mpt_commands(ioc);
+}
+
+/**
* _base_reset_done_handler - reset done handler
* @ioc: per adapter object
*/
@@ -7475,7 +7666,9 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
MPT3_DIAG_BUFFER_IS_RELEASED))) {
is_trigger = 1;
ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
- if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT ||
+ (ioc_state & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_COREDUMP)
is_fault = 1;
}
_base_pre_reset_handler(ioc);
@@ -7484,7 +7677,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
r = _base_make_ioc_ready(ioc, type);
if (r)
goto out;
- _base_after_reset_handler(ioc);
+ _base_clear_outstanding_commands(ioc);
/* If this hard reset is called while port enable is active, then
* there is no reason to call make_ioc_operational
@@ -7515,9 +7708,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
_base_reset_done_handler(ioc);
out:
- dtmprintk(ioc,
- ioc_info(ioc, "%s: %s\n",
- __func__, r == 0 ? "SUCCESS" : "FAILED"));
+ ioc_info(ioc, "%s: %s\n", __func__, r == 0 ? "SUCCESS" : "FAILED");
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
ioc->shost_recovery = 0;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 4ebf81ea4d2f..e7197150721f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -76,8 +76,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "32.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 32
+#define MPT3SAS_DRIVER_VERSION "33.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 33
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -90,6 +90,10 @@
#define MPT2SAS_BUILD_VERSION 0
#define MPT2SAS_RELEASE_VERSION 00
+/* CoreDump: Default timeout */
+#define MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS (15) /*15 seconds*/
+#define MPT3SAS_COREDUMP_LOOP_DONE (0xFF)
+
/*
* Set MPT3SAS_SG_DEPTH value based on user input.
*/
@@ -140,6 +144,7 @@
#define MAX_CHAIN_ELEMT_SZ 16
#define DEFAULT_NUM_FWCHAIN_ELEMTS 8
+#define IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT 6
#define FW_IMG_HDR_READ_TIMEOUT 15
#define IOC_OPERATIONAL_WAIT_COUNT 10
@@ -398,7 +403,10 @@ struct Mpi2ManufacturingPage11_t {
u8 HostTraceBufferFlags; /* 4Fh */
u16 HostTraceBufferMaxSizeKB; /* 50h */
u16 HostTraceBufferMinSizeKB; /* 52h */
- __le32 Reserved10[2]; /* 54h - 5Bh */
+ u8 CoreDumpTOSec; /* 54h */
+ u8 Reserved8; /* 55h */
+ u16 Reserved9; /* 56h */
+ __le32 Reserved10; /* 58h */
};
/**
@@ -589,6 +597,7 @@ static inline void sas_device_put(struct _sas_device *s)
* @connector_name: ASCII value of the Connector's name
* @serial_number: pointer of serial number string allocated runtime
* @access_status: Device's Access Status
+ * @shutdown_latency: NVMe device's RTD3 Entry Latency
* @refcount: reference count for deletion
*/
struct _pcie_device {
@@ -611,6 +620,7 @@ struct _pcie_device {
u8 *serial_number;
u8 reset_timeout;
u8 access_status;
+ u16 shutdown_latency;
struct kref refcount;
};
/**
@@ -1045,6 +1055,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @cpu_msix_table: table for mapping cpus to msix index
* @cpu_msix_table_sz: table size
* @total_io_cnt: Gives total IO count, used to load balance the interrupts
+ * @ioc_coredump_loop: will have non-zero value when FW is in CoreDump state
* @high_iops_outstanding: used to load balance the interrupts
* within high iops reply queues
* @msix_load_balance: Enables load balancing of interrupts across
@@ -1073,6 +1084,10 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @event_context: unique id for each logged event
* @event_log: event log pointer
* @event_masks: events that are masked
+ * @max_shutdown_latency: timeout value for NVMe shutdown operation,
+ * which is equal that NVMe drive's RTD3 Entry Latency
+ * which has reported maximum RTD3 Entry Latency value
+ * among attached NVMe drives.
* @facts: static facts data
* @prev_fw_facts: previous fw facts data
* @pfacts: static port facts data
@@ -1231,6 +1246,7 @@ struct MPT3SAS_ADAPTER {
u32 ioc_reset_count;
MPT3SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds;
u32 non_operational_loop;
+ u8 ioc_coredump_loop;
atomic64_t total_io_cnt;
atomic64_t high_iops_outstanding;
bool msix_load_balance;
@@ -1283,7 +1299,7 @@ struct MPT3SAS_ADAPTER {
u8 tm_custom_handling;
u8 nvme_abort_timeout;
-
+ u16 max_shutdown_latency;
/* static config pages */
struct mpt3sas_facts facts;
@@ -1531,6 +1547,17 @@ void *mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc,
u32 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked);
void mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code);
+#define mpt3sas_print_fault_code(ioc, fault_code) \
+do { pr_err("%s fault info from func: %s\n", ioc->name, __func__); \
+ mpt3sas_base_fault_info(ioc, fault_code); } while (0)
+
+void mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code);
+#define mpt3sas_print_coredump_info(ioc, fault_code) \
+do { pr_err("%s fault info from func: %s\n", ioc->name, __func__); \
+ mpt3sas_base_coredump_info(ioc, fault_code); } while (0)
+
+int mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc,
+ const char *caller);
int mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
Mpi2SasIoUnitControlReply_t *mpi_reply,
Mpi2SasIoUnitControlRequest_t *mpi_request);
@@ -1552,6 +1579,11 @@ mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc);
u8 mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
u8 status, void *mpi_request, int sz);
+#define mpt3sas_check_cmd_timeout(ioc, status, mpi_request, sz, issue_reset) \
+do { ioc_err(ioc, "In func: %s\n", __func__); \
+ issue_reset = mpt3sas_base_check_cmd_timeout(ioc, \
+ status, mpi_request, sz); } while (0)
+
int mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int wait_count);
/* scsih shared API */
@@ -1560,7 +1592,8 @@ struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc,
u8 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
u32 reply);
void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc);
-void mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_scsih_clear_outstanding_scsi_tm_commands(
+ struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc);
int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
@@ -1694,7 +1727,7 @@ void mpt3sas_ctl_exit(ushort hbas_to_enumerate);
u8 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
u32 reply);
void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc);
-void mpt3sas_ctl_after_reset_handler(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_ctl_clear_outstanding_ioctls(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_ctl_reset_done_handler(struct MPT3SAS_ADAPTER *ioc);
u8 mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc,
u8 msix_index, u32 reply);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index 14a1a2793dd5..62ddf53ab3ae 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -101,9 +101,6 @@ _config_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
Mpi2ConfigRequest_t *mpi_request;
char *desc = NULL;
- if (!(ioc->logging_level & MPT_DEBUG_CONFIG))
- return;
-
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
switch (mpi_request->Header.PageType & MPI2_CONFIG_PAGETYPE_MASK) {
case MPI2_CONFIG_PAGETYPE_IO_UNIT:
@@ -269,7 +266,8 @@ mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
mpi_reply->MsgLength*4);
}
ioc->config_cmds.status &= ~MPT3_CMD_PENDING;
- _config_display_some_debug(ioc, smid, "config_done", mpi_reply);
+ if (ioc->logging_level & MPT_DEBUG_CONFIG)
+ _config_display_some_debug(ioc, smid, "config_done", mpi_reply);
ioc->config_cmds.smid = USHRT_MAX;
complete(&ioc->config_cmds.done);
return 1;
@@ -305,6 +303,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
u8 retry_count, issue_host_reset = 0;
struct config_request mem;
u32 ioc_status = UINT_MAX;
+ u8 issue_reset = 0;
mutex_lock(&ioc->config_cmds.mutex);
if (ioc->config_cmds.status != MPT3_CMD_NOT_USED) {
@@ -378,14 +377,18 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
config_request = mpt3sas_base_get_msg_frame(ioc, smid);
ioc->config_cmds.smid = smid;
memcpy(config_request, mpi_request, sizeof(Mpi2ConfigRequest_t));
- _config_display_some_debug(ioc, smid, "config_request", NULL);
+ if (ioc->logging_level & MPT_DEBUG_CONFIG)
+ _config_display_some_debug(ioc, smid, "config_request", NULL);
init_completion(&ioc->config_cmds.done);
ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->config_cmds.done, timeout*HZ);
if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) {
- mpt3sas_base_check_cmd_timeout(ioc,
- ioc->config_cmds.status, mpi_request,
- sizeof(Mpi2ConfigRequest_t)/4);
+ if (!(ioc->logging_level & MPT_DEBUG_CONFIG))
+ _config_display_some_debug(ioc,
+ smid, "config_request", NULL);
+ mpt3sas_check_cmd_timeout(ioc,
+ ioc->config_cmds.status, mpi_request,
+ sizeof(Mpi2ConfigRequest_t)/4, issue_reset);
retry_count++;
if (ioc->config_cmds.smid == smid)
mpt3sas_base_free_smid(ioc, smid);
@@ -404,8 +407,11 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
/* Reply Frame Sanity Checks to workaround FW issues */
if ((mpi_request->Header.PageType & 0xF) !=
(mpi_reply->Header.PageType & 0xF)) {
+ if (!(ioc->logging_level & MPT_DEBUG_CONFIG))
+ _config_display_some_debug(ioc,
+ smid, "config_request", NULL);
_debug_dump_mf(mpi_request, ioc->request_sz/4);
- _debug_dump_reply(mpi_reply, ioc->request_sz/4);
+ _debug_dump_reply(mpi_reply, ioc->reply_sz/4);
panic("%s: %s: Firmware BUG: mpi_reply mismatch: Requested PageType(0x%02x) Reply PageType(0x%02x)\n",
ioc->name, __func__,
mpi_request->Header.PageType & 0xF,
@@ -415,8 +421,11 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
if (((mpi_request->Header.PageType & 0xF) ==
MPI2_CONFIG_PAGETYPE_EXTENDED) &&
mpi_request->ExtPageType != mpi_reply->ExtPageType) {
+ if (!(ioc->logging_level & MPT_DEBUG_CONFIG))
+ _config_display_some_debug(ioc,
+ smid, "config_request", NULL);
_debug_dump_mf(mpi_request, ioc->request_sz/4);
- _debug_dump_reply(mpi_reply, ioc->request_sz/4);
+ _debug_dump_reply(mpi_reply, ioc->reply_sz/4);
panic("%s: %s: Firmware BUG: mpi_reply mismatch: Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n",
ioc->name, __func__,
mpi_request->ExtPageType,
@@ -439,8 +448,11 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
if (p) {
if ((mpi_request->Header.PageType & 0xF) !=
(p[3] & 0xF)) {
+ if (!(ioc->logging_level & MPT_DEBUG_CONFIG))
+ _config_display_some_debug(ioc,
+ smid, "config_request", NULL);
_debug_dump_mf(mpi_request, ioc->request_sz/4);
- _debug_dump_reply(mpi_reply, ioc->request_sz/4);
+ _debug_dump_reply(mpi_reply, ioc->reply_sz/4);
_debug_dump_config(p, min_t(u16, mem.sz,
config_page_sz)/4);
panic("%s: %s: Firmware BUG: config page mismatch: Requested PageType(0x%02x) Reply PageType(0x%02x)\n",
@@ -452,8 +464,11 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
if (((mpi_request->Header.PageType & 0xF) ==
MPI2_CONFIG_PAGETYPE_EXTENDED) &&
(mpi_request->ExtPageType != p[6])) {
+ if (!(ioc->logging_level & MPT_DEBUG_CONFIG))
+ _config_display_some_debug(ioc,
+ smid, "config_request", NULL);
_debug_dump_mf(mpi_request, ioc->request_sz/4);
- _debug_dump_reply(mpi_reply, ioc->request_sz/4);
+ _debug_dump_reply(mpi_reply, ioc->reply_sz/4);
_debug_dump_config(p, min_t(u16, mem.sz,
config_page_sz)/4);
panic("%s: %s: Firmware BUG: config page mismatch: Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n",
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 6874cf017739..62e552838565 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -180,6 +180,12 @@ _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
case MPI2_FUNCTION_SMP_PASSTHROUGH:
desc = "smp_passthrough";
break;
+ case MPI2_FUNCTION_TOOLBOX:
+ desc = "toolbox";
+ break;
+ case MPI2_FUNCTION_NVME_ENCAPSULATED:
+ desc = "nvme_encapsulated";
+ break;
}
if (!desc)
@@ -478,14 +484,15 @@ void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
}
/**
- * mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
+ * mpt3sas_ctl_reset_handler - clears outstanding ioctl cmd.
* @ioc: per adapter object
*
* The handler for doing any required cleanup or initialization.
*/
-void mpt3sas_ctl_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
+void mpt3sas_ctl_clear_outstanding_ioctls(struct MPT3SAS_ADAPTER *ioc)
{
- dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
+ dtmprintk(ioc,
+ ioc_info(ioc, "%s: clear outstanding ioctl cmd\n", __func__));
if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) {
ioc->ctl_cmds.status |= MPT3_CMD_RESET;
mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid);
@@ -1021,10 +1028,9 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
ioc->ignore_loginfos = 0;
}
if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
- issue_reset =
- mpt3sas_base_check_cmd_timeout(ioc,
- ioc->ctl_cmds.status, mpi_request,
- karg.data_sge_offset);
+ mpt3sas_check_cmd_timeout(ioc,
+ ioc->ctl_cmds.status, mpi_request,
+ karg.data_sge_offset, issue_reset);
goto issue_host_reset;
}
@@ -1325,7 +1331,8 @@ _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
__func__));
retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
- ioc_info(ioc, "host reset: %s\n", ((!retval) ? "SUCCESS" : "FAILED"));
+ ioc_info(ioc,
+ "Ioctl: host reset: %s\n", ((!retval) ? "SUCCESS" : "FAILED"));
return 0;
}
@@ -1733,10 +1740,9 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
- issue_reset =
- mpt3sas_base_check_cmd_timeout(ioc,
- ioc->ctl_cmds.status, mpi_request,
- sizeof(Mpi2DiagBufferPostRequest_t)/4);
+ mpt3sas_check_cmd_timeout(ioc,
+ ioc->ctl_cmds.status, mpi_request,
+ sizeof(Mpi2DiagBufferPostRequest_t)/4, issue_reset);
goto issue_host_reset;
}
@@ -2108,6 +2114,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
u16 ioc_status;
u32 ioc_state;
int rc;
+ u8 reset_needed = 0;
dctlprintk(ioc, ioc_info(ioc, "%s\n",
__func__));
@@ -2115,6 +2122,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
rc = 0;
*issue_reset = 0;
+
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
if (ioc->diag_buffer_status[buffer_type] &
@@ -2157,9 +2165,10 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
- *issue_reset = mpt3sas_base_check_cmd_timeout(ioc,
- ioc->ctl_cmds.status, mpi_request,
- sizeof(Mpi2DiagReleaseRequest_t)/4);
+ mpt3sas_check_cmd_timeout(ioc,
+ ioc->ctl_cmds.status, mpi_request,
+ sizeof(Mpi2DiagReleaseRequest_t)/4, reset_needed);
+ *issue_reset = reset_needed;
rc = -EFAULT;
goto out;
}
@@ -2417,10 +2426,9 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
- issue_reset =
- mpt3sas_base_check_cmd_timeout(ioc,
- ioc->ctl_cmds.status, mpi_request,
- sizeof(Mpi2DiagBufferPostRequest_t)/4);
+ mpt3sas_check_cmd_timeout(ioc,
+ ioc->ctl_cmds.status, mpi_request,
+ sizeof(Mpi2DiagBufferPostRequest_t)/4, issue_reset);
goto issue_host_reset;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index a038be8a0e90..c597d544eb39 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -1050,6 +1050,34 @@ mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
}
/**
+ * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency.
+ * @ioc: per adapter object
+ * Context: This function will acquire ioc->pcie_device_lock
+ *
+ * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency
+ * which has reported maximum among all available NVMe drives.
+ * Minimum max_shutdown_latency will be six seconds.
+ */
+static void
+_scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct _pcie_device *pcie_device;
+ unsigned long flags;
+ u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
+ if (pcie_device->shutdown_latency) {
+ if (shutdown_latency < pcie_device->shutdown_latency)
+ shutdown_latency =
+ pcie_device->shutdown_latency;
+ }
+ }
+ ioc->max_shutdown_latency = shutdown_latency;
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+}
+
+/**
* _scsih_pcie_device_remove - remove pcie_device from list.
* @ioc: per adapter object
* @pcie_device: the pcie_device object
@@ -1063,6 +1091,7 @@ _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
{
unsigned long flags;
int was_on_pcie_device_list = 0;
+ u8 update_latency = 0;
if (!pcie_device)
return;
@@ -1082,11 +1111,21 @@ _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
list_del_init(&pcie_device->list);
was_on_pcie_device_list = 1;
}
+ if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
+ update_latency = 1;
spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
if (was_on_pcie_device_list) {
kfree(pcie_device->serial_number);
pcie_device_put(pcie_device);
}
+
+ /*
+ * This device's RTD3 Entry Latency matches IOC's
+ * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
+ * from the available drives as current drive is getting removed.
+ */
+ if (update_latency)
+ _scsih_set_nvme_max_shutdown_latency(ioc);
}
@@ -1101,6 +1140,7 @@ _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
struct _pcie_device *pcie_device;
unsigned long flags;
int was_on_pcie_device_list = 0;
+ u8 update_latency = 0;
if (ioc->shost_recovery)
return;
@@ -1113,12 +1153,22 @@ _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
was_on_pcie_device_list = 1;
pcie_device_put(pcie_device);
}
+ if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
+ update_latency = 1;
}
spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
if (was_on_pcie_device_list) {
_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
pcie_device_put(pcie_device);
}
+
+ /*
+ * This device's RTD3 Entry Latency matches IOC's
+ * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
+ * from the available drives as current drive is getting removed.
+ */
+ if (update_latency)
+ _scsih_set_nvme_max_shutdown_latency(ioc);
}
/**
@@ -1554,7 +1604,12 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
max_depth = 1;
if (qdepth > max_depth)
qdepth = max_depth;
- return scsi_change_queue_depth(sdev, qdepth);
+ scsi_change_queue_depth(sdev, qdepth);
+ sdev_printk(KERN_INFO, sdev,
+ "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n",
+ sdev->queue_depth, sdev->tagged_supported,
+ sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1));
+ return sdev->queue_depth;
}
/**
@@ -2673,6 +2728,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
u16 smid = 0;
u32 ioc_state;
int rc;
+ u8 issue_reset = 0;
lockdep_assert_held(&ioc->tm_cmds.mutex);
@@ -2695,7 +2751,13 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
}
if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
- mpt3sas_base_fault_info(ioc, ioc_state &
+ mpt3sas_print_fault_code(ioc, ioc_state &
+ MPI2_DOORBELL_DATA_MASK);
+ rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
+ return (!rc) ? SUCCESS : FAILED;
+ } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_COREDUMP) {
+ mpt3sas_print_coredump_info(ioc, ioc_state &
MPI2_DOORBELL_DATA_MASK);
rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
return (!rc) ? SUCCESS : FAILED;
@@ -2726,9 +2788,10 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
ioc->put_smid_hi_priority(ioc, smid, msix_task);
wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
- if (mpt3sas_base_check_cmd_timeout(ioc,
- ioc->tm_cmds.status, mpi_request,
- sizeof(Mpi2SCSITaskManagementRequest_t)/4)) {
+ mpt3sas_check_cmd_timeout(ioc,
+ ioc->tm_cmds.status, mpi_request,
+ sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset);
+ if (issue_reset) {
rc = mpt3sas_base_hard_reset_handler(ioc,
FORCE_BIG_HAMMER);
rc = (!rc) ? SUCCESS : FAILED;
@@ -2875,15 +2938,17 @@ scsih_abort(struct scsi_cmnd *scmd)
u8 timeout = 30;
struct _pcie_device *pcie_device = NULL;
- sdev_printk(KERN_INFO, scmd->device,
- "attempting task abort! scmd(%p)\n", scmd);
+ sdev_printk(KERN_INFO, scmd->device, "attempting task abort!"
+ "scmd(0x%p), outstanding for %u ms & timeout %u ms\n",
+ scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
+ (scmd->request->timeout / HZ) * 1000);
_scsih_tm_display_info(ioc, scmd);
sas_device_priv_data = scmd->device->hostdata;
if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
ioc->remove_host) {
sdev_printk(KERN_INFO, scmd->device,
- "device been deleted! scmd(%p)\n", scmd);
+ "device been deleted! scmd(0x%p)\n", scmd);
scmd->result = DID_NO_CONNECT << 16;
scmd->scsi_done(scmd);
r = SUCCESS;
@@ -2892,6 +2957,8 @@ scsih_abort(struct scsi_cmnd *scmd)
/* check for completed command */
if (st == NULL || st->cb_idx == 0xFF) {
+ sdev_printk(KERN_INFO, scmd->device, "No reference found at "
+ "driver, assuming scmd(0x%p) might have completed\n", scmd);
scmd->result = DID_RESET << 16;
r = SUCCESS;
goto out;
@@ -2920,7 +2987,7 @@ scsih_abort(struct scsi_cmnd *scmd)
if (r == SUCCESS && st->cb_idx != 0xFF)
r = FAILED;
out:
- sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
+ sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n",
((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
if (pcie_device)
pcie_device_put(pcie_device);
@@ -2949,14 +3016,14 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
sdev_printk(KERN_INFO, scmd->device,
- "attempting device reset! scmd(%p)\n", scmd);
+ "attempting device reset! scmd(0x%p)\n", scmd);
_scsih_tm_display_info(ioc, scmd);
sas_device_priv_data = scmd->device->hostdata;
if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
ioc->remove_host) {
sdev_printk(KERN_INFO, scmd->device,
- "device been deleted! scmd(%p)\n", scmd);
+ "device been deleted! scmd(0x%p)\n", scmd);
scmd->result = DID_NO_CONNECT << 16;
scmd->scsi_done(scmd);
r = SUCCESS;
@@ -2996,7 +3063,7 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
if (r == SUCCESS && atomic_read(&scmd->device->device_busy))
r = FAILED;
out:
- sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
+ sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n",
((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
if (sas_device)
@@ -3027,15 +3094,15 @@ scsih_target_reset(struct scsi_cmnd *scmd)
struct scsi_target *starget = scmd->device->sdev_target;
struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
- starget_printk(KERN_INFO, starget, "attempting target reset! scmd(%p)\n",
- scmd);
+ starget_printk(KERN_INFO, starget,
+ "attempting target reset! scmd(0x%p)\n", scmd);
_scsih_tm_display_info(ioc, scmd);
sas_device_priv_data = scmd->device->hostdata;
if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
ioc->remove_host) {
- starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n",
- scmd);
+ starget_printk(KERN_INFO, starget,
+ "target been deleted! scmd(0x%p)\n", scmd);
scmd->result = DID_NO_CONNECT << 16;
scmd->scsi_done(scmd);
r = SUCCESS;
@@ -3074,7 +3141,7 @@ scsih_target_reset(struct scsi_cmnd *scmd)
if (r == SUCCESS && atomic_read(&starget->target_busy))
r = FAILED;
out:
- starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
+ starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n",
((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
if (sas_device)
@@ -3097,7 +3164,7 @@ scsih_host_reset(struct scsi_cmnd *scmd)
struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
int r, retval;
- ioc_info(ioc, "attempting host reset! scmd(%p)\n", scmd);
+ ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd);
scsi_print_command(scmd);
if (ioc->is_driver_loading || ioc->remove_host) {
@@ -3109,7 +3176,7 @@ scsih_host_reset(struct scsi_cmnd *scmd)
retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
r = (retval < 0) ? FAILED : SUCCESS;
out:
- ioc_info(ioc, "host reset: %s scmd(%p)\n",
+ ioc_info(ioc, "host reset: %s scmd(0x%p)\n",
r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
return r;
@@ -4475,6 +4542,7 @@ static void
_scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
Mpi2EventDataTemperature_t *event_data)
{
+ u32 doorbell;
if (ioc->temp_sensors_count >= event_data->SensorNum) {
ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
@@ -4484,6 +4552,18 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
event_data->SensorNum);
ioc_err(ioc, "Current Temp In Celsius: %d\n",
event_data->CurrentTemperature);
+ if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
+ doorbell = mpt3sas_base_get_iocstate(ioc, 0);
+ if ((doorbell & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_FAULT) {
+ mpt3sas_print_fault_code(ioc,
+ doorbell & MPI2_DOORBELL_DATA_MASK);
+ } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_COREDUMP) {
+ mpt3sas_print_coredump_info(ioc,
+ doorbell & MPI2_DOORBELL_DATA_MASK);
+ }
+ }
}
}
@@ -6933,6 +7013,16 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
pcie_device->nvme_mdts =
le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
+ pcie_device->shutdown_latency =
+ le16_to_cpu(pcie_device_pg2.ShutdownLatency);
+ /*
+ * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency
+ * if drive's RTD3 Entry Latency is greater then IOC's
+ * max_shutdown_latency.
+ */
+ if (pcie_device->shutdown_latency > ioc->max_shutdown_latency)
+ ioc->max_shutdown_latency =
+ pcie_device->shutdown_latency;
if (pcie_device_pg2.ControllerResetTO)
pcie_device->reset_timeout =
pcie_device_pg2.ControllerResetTO;
@@ -7669,10 +7759,9 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
- issue_reset =
- mpt3sas_base_check_cmd_timeout(ioc,
- ioc->scsih_cmds.status, mpi_request,
- sizeof(Mpi2RaidActionRequest_t)/4);
+ mpt3sas_check_cmd_timeout(ioc,
+ ioc->scsih_cmds.status, mpi_request,
+ sizeof(Mpi2RaidActionRequest_t)/4, issue_reset);
rc = -EFAULT;
goto out;
}
@@ -9272,15 +9361,17 @@ void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
}
/**
- * mpt3sas_scsih_after_reset_handler - reset callback handler (for scsih)
+ * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding
+ * scsi & tm cmds.
* @ioc: per adapter object
*
* The handler for doing any required cleanup or initialization.
*/
void
-mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
+mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc)
{
- dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
+ dtmprintk(ioc,
+ ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__));
if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
ioc->scsih_cmds.status |= MPT3_CMD_RESET;
mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
@@ -9357,6 +9448,7 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
}
_scsih_remove_unresponding_devices(ioc);
_scsih_scan_for_devices_after_reset(ioc);
+ _scsih_set_nvme_max_shutdown_latency(ioc);
break;
case MPT3SAS_PORT_ENABLE_COMPLETE:
ioc->start_scan = 0;
@@ -9660,6 +9752,75 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
}
/**
+ * _scsih_nvme_shutdown - NVMe shutdown notification
+ * @ioc: per adapter object
+ *
+ * Sending IoUnitControl request with shutdown operation code to alert IOC that
+ * the host system is shutting down so that IOC can issue NVMe shutdown to
+ * NVMe drives attached to it.
+ */
+static void
+_scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi26IoUnitControlRequest_t *mpi_request;
+ Mpi26IoUnitControlReply_t *mpi_reply;
+ u16 smid;
+
+ /* are there any NVMe devices ? */
+ if (list_empty(&ioc->pcie_device_list))
+ return;
+
+ mutex_lock(&ioc->scsih_cmds.mutex);
+
+ if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
+ ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
+ goto out;
+ }
+
+ ioc->scsih_cmds.status = MPT3_CMD_PENDING;
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
+ if (!smid) {
+ ioc_err(ioc,
+ "%s: failed obtaining a smid\n", __func__);
+ ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+ goto out;
+ }
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->scsih_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
+ mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN;
+
+ init_completion(&ioc->scsih_cmds.done);
+ ioc->put_smid_default(ioc, smid);
+ /* Wait for max_shutdown_latency seconds */
+ ioc_info(ioc,
+ "Io Unit Control shutdown (sending), Shutdown latency %d sec\n",
+ ioc->max_shutdown_latency);
+ wait_for_completion_timeout(&ioc->scsih_cmds.done,
+ ioc->max_shutdown_latency*HZ);
+
+ if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
+ ioc_err(ioc, "%s: timeout\n", __func__);
+ goto out;
+ }
+
+ if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
+ mpi_reply = ioc->scsih_cmds.reply;
+ ioc_info(ioc, "Io Unit Control shutdown (complete):"
+ "ioc_status(0x%04x), loginfo(0x%08x)\n",
+ le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo));
+ }
+ out:
+ ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_unlock(&ioc->scsih_cmds.mutex);
+}
+
+
+/**
* _scsih_ir_shutdown - IR shutdown notification
* @ioc: per adapter object
*
@@ -9851,6 +10012,7 @@ scsih_shutdown(struct pci_dev *pdev)
&ioc->ioc_pg1_copy);
_scsih_ir_shutdown(ioc);
+ _scsih_nvme_shutdown(ioc);
mpt3sas_base_detach(ioc);
}
@@ -10533,6 +10695,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
ioc->logging_level = logging_level;
ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
+ /* Host waits for minimum of six seconds */
+ ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
/*
* Enable MEMORY MOVE support flag.
*/
@@ -10681,6 +10845,7 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state)
mpt3sas_base_stop_watchdog(ioc);
flush_scheduled_work();
scsi_block_requests(shost);
+ _scsih_nvme_shutdown(ioc);
device_state = pci_choose_state(pdev, state);
ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
pdev, pci_name(pdev), device_state);
@@ -10715,7 +10880,7 @@ scsih_resume(struct pci_dev *pdev)
r = mpt3sas_base_map_resources(ioc);
if (r)
return r;
-
+ ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n");
mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
scsi_unblock_requests(shost);
mpt3sas_base_start_watchdog(ioc);
@@ -10784,6 +10949,7 @@ scsih_pci_slot_reset(struct pci_dev *pdev)
if (rc)
return PCI_ERS_RESULT_DISCONNECT;
+ ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n");
rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
ioc_warn(ioc, "hard reset: %s\n",
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index 5324662751bf..6ec5b7f33dfd 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -719,11 +719,10 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
sas_device_put(sas_device);
}
- if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
- dev_printk(KERN_INFO, &rphy->dev,
- "add: handle(0x%04x), sas_addr(0x%016llx)\n",
- handle, (unsigned long long)
- mpt3sas_port->remote_identify.sas_address);
+ dev_info(&rphy->dev,
+ "add: handle(0x%04x), sas_addr(0x%016llx)\n", handle,
+ (unsigned long long)mpt3sas_port->remote_identify.sas_address);
+
mpt3sas_port->rphy = rphy;
spin_lock_irqsave(&ioc->sas_node_lock, flags);
list_add_tail(&mpt3sas_port->port_list, &sas_node->sas_port_list);
@@ -813,6 +812,8 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
}
if (!ioc->remove_host)
sas_port_delete(mpt3sas_port->port);
+ ioc_info(ioc, "%s: removed: sas_addr(0x%016llx)\n",
+ __func__, (unsigned long long)sas_address);
kfree(mpt3sas_port);
}
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index da719b0694dc..7af9173c4925 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -47,6 +47,9 @@ static struct scsi_host_template mvs_sht = {
.eh_target_reset_handler = sas_eh_target_reset_handler,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = sas_ioctl,
+#endif
.shost_attrs = mvst_host_attrs,
.track_queue_depth = 1,
};
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index 539ac8ce4fcd..d4bd31a75b9d 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -3531,7 +3531,7 @@ static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
spin_lock_init(&cb->queue_lock);
if (mmio_size < PAGE_SIZE)
mmio_size = PAGE_SIZE;
- cb->mmio_base = ioremap_nocache(cb->pci_addr & PAGE_MASK, mmio_size);
+ cb->mmio_base = ioremap(cb->pci_addr & PAGE_MASK, mmio_size);
if (cb->mmio_base == NULL) {
dev_err(&pdev->dev,
"Unable to map Controller Register Window\n");
diff --git a/drivers/scsi/myrb.h b/drivers/scsi/myrb.h
index 9289c19fcb2f..fb8eacfceee8 100644
--- a/drivers/scsi/myrb.h
+++ b/drivers/scsi/myrb.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
*
* Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
index eb0dd566330a..5c5666491c2e 100644
--- a/drivers/scsi/myrs.c
+++ b/drivers/scsi/myrs.c
@@ -2311,7 +2311,7 @@ static struct myrs_hba *myrs_detect(struct pci_dev *pdev,
/* Map the Controller Register Window. */
if (mmio_size < PAGE_SIZE)
mmio_size = PAGE_SIZE;
- cs->mmio_base = ioremap_nocache(cs->pci_addr & PAGE_MASK, mmio_size);
+ cs->mmio_base = ioremap(cs->pci_addr & PAGE_MASK, mmio_size);
if (cs->mmio_base == NULL) {
dev_err(&pdev->dev,
"Unable to map Controller Register Window\n");
diff --git a/drivers/scsi/myrs.h b/drivers/scsi/myrs.h
index e6702ee85e9f..9f6696d0ddd5 100644
--- a/drivers/scsi/myrs.h
+++ b/drivers/scsi/myrs.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
*
* This driver supports the newer, SCSI-based firmware interface only.
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 93616f9fd6d7..d79ce97a04bd 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -1560,7 +1560,7 @@ static int nsp_cs_config_check(struct pcmcia_device *p_dev, void *priv_data)
goto next_entry;
data->MmioAddress = (unsigned long)
- ioremap_nocache(p_dev->resource[2]->start,
+ ioremap(p_dev->resource[2]->start,
resource_size(p_dev->resource[2]));
data->MmioLength = resource_size(p_dev->resource[2]);
}
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index ff618ad80ebd..3c6076e4c6d2 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -101,6 +101,9 @@ static struct scsi_host_template pm8001_sht = {
.eh_target_reset_handler = sas_eh_target_reset_handler,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = sas_ioctl,
+#endif
.shost_attrs = pm8001_host_attrs,
.track_queue_depth = 1,
};
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 98dcdbd146d5..d1d95f1a2c6a 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -2377,7 +2377,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->buf_valid_size = sizeof(*resp);
} else
PM8001_IO_DBG(pm8001_ha,
- pm8001_printk("response to large\n"));
+ pm8001_printk("response too large\n"));
}
if (pm8001_dev)
pm8001_dev->running_req--;
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 832af4213046..3337cd341d21 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -1699,6 +1699,16 @@ qla1280_load_firmware_pio(struct scsi_qla_host *ha)
return err;
}
+#ifdef QLA_64BIT_PTR
+#define LOAD_CMD MBC_LOAD_RAM_A64_ROM
+#define DUMP_CMD MBC_DUMP_RAM_A64_ROM
+#define CMD_ARGS (BIT_7 | BIT_6 | BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0)
+#else
+#define LOAD_CMD MBC_LOAD_RAM
+#define DUMP_CMD MBC_DUMP_RAM
+#define CMD_ARGS (BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0)
+#endif
+
#define DUMP_IT_BACK 0 /* for debug of RISC loading */
static int
qla1280_load_firmware_dma(struct scsi_qla_host *ha)
@@ -1748,7 +1758,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
for(i = 0; i < cnt; i++)
((__le16 *)ha->request_ring)[i] = fw_data[i];
- mb[0] = MBC_LOAD_RAM;
+ mb[0] = LOAD_CMD;
mb[1] = risc_address;
mb[4] = cnt;
mb[3] = ha->request_dma & 0xffff;
@@ -1759,8 +1769,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
__func__, mb[0],
(void *)(long)ha->request_dma,
mb[6], mb[7], mb[2], mb[3]);
- err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
- BIT_1 | BIT_0, mb);
+ err = qla1280_mailbox_command(ha, CMD_ARGS, mb);
if (err) {
printk(KERN_ERR "scsi(%li): Failed to load partial "
"segment of f\n", ha->host_no);
@@ -1768,7 +1777,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
}
#if DUMP_IT_BACK
- mb[0] = MBC_DUMP_RAM;
+ mb[0] = DUMP_CMD;
mb[1] = risc_address;
mb[4] = cnt;
mb[3] = p_tbuf & 0xffff;
@@ -1776,8 +1785,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
mb[7] = upper_32_bits(p_tbuf) & 0xffff;
mb[6] = upper_32_bits(p_tbuf) >> 16;
- err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
- BIT_1 | BIT_0, mb);
+ err = qla1280_mailbox_command(ha, CMD_ARGS, mb);
if (err) {
printk(KERN_ERR
"Failed to dump partial segment of f/w\n");
diff --git a/drivers/scsi/qla1280.h b/drivers/scsi/qla1280.h
index a1a8aefc7cc3..e7820b5bca38 100644
--- a/drivers/scsi/qla1280.h
+++ b/drivers/scsi/qla1280.h
@@ -277,6 +277,8 @@ struct device_reg {
#define MBC_MAILBOX_REGISTER_TEST 6 /* Wrap incoming mailboxes */
#define MBC_VERIFY_CHECKSUM 7 /* Verify checksum */
#define MBC_ABOUT_FIRMWARE 8 /* Get firmware revision */
+#define MBC_LOAD_RAM_A64_ROM 9 /* Load RAM 64bit ROM version */
+#define MBC_DUMP_RAM_A64_ROM 0x0a /* Dump RAM 64bit ROM version */
#define MBC_INIT_REQUEST_QUEUE 0x10 /* Initialize request queue */
#define MBC_INIT_RESPONSE_QUEUE 0x11 /* Initialize response queue */
#define MBC_EXECUTE_IOCB 0x12 /* Execute IOCB command */
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index cbaf178fc979..d7169e43f5e1 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -54,7 +54,8 @@ void qla2x00_bsg_sp_free(srb_t *sp)
if (sp->type == SRB_CT_CMD ||
sp->type == SRB_FXIOCB_BCMD ||
sp->type == SRB_ELS_CMD_HST)
- kfree(sp->fcport);
+ qla2x00_free_fcport(sp->fcport);
+
qla2x00_rel_sp(sp);
}
@@ -405,7 +406,7 @@ done_unmap_sg:
done_free_fcport:
if (bsg_request->msgcode == FC_BSG_RPT_ELS)
- kfree(fcport);
+ qla2x00_free_fcport(fcport);
done:
return rval;
}
@@ -545,7 +546,7 @@ qla2x00_process_ct(struct bsg_job *bsg_job)
return rval;
done_free_fcport:
- kfree(fcport);
+ qla2x00_free_fcport(fcport);
done_unmap_sg:
dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
@@ -796,7 +797,7 @@ qla2x00_process_loopback(struct bsg_job *bsg_job)
if (atomic_read(&vha->loop_state) == LOOP_READY &&
(ha->current_topology == ISP_CFG_F ||
- (le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE &&
+ (get_unaligned_le32(req_data) == ELS_OPCODE_BYTE &&
req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
elreq.options == EXTERNAL_LOOPBACK) {
type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
@@ -2049,7 +2050,7 @@ qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
return rval;
done_free_fcport:
- kfree(fcport);
+ qla2x00_free_fcport(fcport);
done_unmap_rsp_sg:
if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 30afc59c1870..88a56e8480f7 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -18,7 +18,7 @@
* | Device Discovery | 0x2134 | 0x210e-0x2116 |
* | | | 0x211a |
* | | | 0x211c-0x2128 |
- * | | | 0x212a-0x2130 |
+ * | | | 0x212a-0x2134 |
* | Queue Command and IO tracing | 0x3074 | 0x300b |
* | | | 0x3027-0x3028 |
* | | | 0x303d-0x3041 |
@@ -2519,12 +2519,6 @@ qla83xx_fw_dump_failed:
/* Driver Debug Functions. */
/****************************************************************************/
-static inline int
-ql_mask_match(uint level)
-{
- return (level & ql2xextended_error_logging) == level;
-}
-
/*
* This function is for formatting and logging debug information.
* It is to be used when vha is available. It formats the message
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index bb01b680ce9f..433e95502808 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -374,3 +374,9 @@ extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, uint32_t *,
extern void qla24xx_pause_risc(struct device_reg_24xx __iomem *,
struct qla_hw_data *);
extern int qla24xx_soft_reset(struct qla_hw_data *);
+
+static inline int
+ql_mask_match(uint level)
+{
+ return (level & ql2xextended_error_logging) == level;
+}
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 2edd9f7b3074..ed32e9715794 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2402,6 +2402,7 @@ typedef struct fc_port {
unsigned int scan_needed:1;
unsigned int n2n_flag:1;
unsigned int explicit_logout:1;
+ unsigned int prli_pend_timer:1;
struct completion nvme_del_done;
uint32_t nvme_prli_service_param;
@@ -2428,6 +2429,7 @@ typedef struct fc_port {
struct work_struct free_work;
struct work_struct reg_work;
uint64_t jiffies_at_registration;
+ unsigned long prli_expired;
struct qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX];
uint16_t tgt_id;
@@ -2464,6 +2466,7 @@ typedef struct fc_port {
struct qla_tgt_sess *tgt_session;
struct ct_sns_desc ct_desc;
enum discovery_state disc_state;
+ atomic_t shadow_disc_state;
enum discovery_state next_disc_state;
enum login_state fw_login_state;
unsigned long dm_login_expire;
@@ -2510,6 +2513,19 @@ struct event_arg {
extern const char *const port_state_str[5];
+static const char * const port_dstate_str[] = {
+ "DELETED",
+ "GNN_ID",
+ "GNL",
+ "LOGIN_PEND",
+ "LOGIN_FAILED",
+ "GPDB",
+ "UPD_FCPORT",
+ "LOGIN_COMPLETE",
+ "ADISC",
+ "DELETE_PEND"
+};
+
/*
* FC port flags.
*/
@@ -3263,7 +3279,6 @@ enum qla_work_type {
QLA_EVT_IDC_ACK,
QLA_EVT_ASYNC_LOGIN,
QLA_EVT_ASYNC_LOGOUT,
- QLA_EVT_ASYNC_LOGOUT_DONE,
QLA_EVT_ASYNC_ADISC,
QLA_EVT_UEVENT,
QLA_EVT_AENFX,
@@ -3953,7 +3968,7 @@ struct qla_hw_data {
void *sfp_data;
dma_addr_t sfp_data_dma;
- void *flt;
+ struct qla_flt_header *flt;
dma_addr_t flt_dma;
#define XGMAC_DATA_SIZE 4096
@@ -4845,6 +4860,9 @@ struct sff_8247_a0 {
(ha->fc4_type_priority == FC4_PRIORITY_NVME)) || \
NVME_ONLY_TARGET(fcport)) \
+#define PRLI_PHASE(_cls) \
+ ((_cls == DSC_LS_PRLI_PEND) || (_cls == DSC_LS_PRLI_COMP))
+
#include "qla_target.h"
#include "qla_gbl.h"
#include "qla_dbg.h"
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 9dc09c117416..d641918cdd46 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1354,12 +1354,12 @@ struct vp_rpt_id_entry_24xx {
uint8_t port_id[3];
uint8_t format;
union {
- struct {
+ struct _f0 {
/* format 0 loop */
uint8_t vp_idx_map[16];
uint8_t reserved_4[32];
} f0;
- struct {
+ struct _f1 {
/* format 1 fabric */
uint8_t vpstat1_subcode; /* vp_status=1 subcode */
uint8_t flags;
@@ -1381,21 +1381,22 @@ struct vp_rpt_id_entry_24xx {
uint16_t bbcr;
uint8_t reserved_5[6];
} f1;
- struct { /* format 2: N2N direct connect */
- uint8_t vpstat1_subcode;
- uint8_t flags;
- uint16_t rsv6;
- uint8_t rsv2[12];
-
- uint8_t ls_rjt_vendor;
- uint8_t ls_rjt_explanation;
- uint8_t ls_rjt_reason;
- uint8_t rsv3[5];
-
- uint8_t port_name[8];
- uint8_t node_name[8];
- uint8_t remote_nport_id[4];
- uint32_t reserved_5;
+ struct _f2 { /* format 2: N2N direct connect */
+ uint8_t vpstat1_subcode;
+ uint8_t flags;
+ uint16_t fip_flags;
+ uint8_t rsv2[12];
+
+ uint8_t ls_rjt_vendor;
+ uint8_t ls_rjt_explanation;
+ uint8_t ls_rjt_reason;
+ uint8_t rsv3[5];
+
+ uint8_t port_name[8];
+ uint8_t node_name[8];
+ uint16_t bbcr;
+ uint8_t reserved_5[2];
+ uint8_t remote_nport_id[4];
} f2;
} u;
};
@@ -1470,13 +1471,6 @@ struct qla_flt_location {
uint16_t checksum;
};
-struct qla_flt_header {
- uint16_t version;
- uint16_t length;
- uint16_t checksum;
- uint16_t unused;
-};
-
#define FLT_REG_FW 0x01
#define FLT_REG_BOOT_CODE 0x07
#define FLT_REG_VPD_0 0x14
@@ -1537,6 +1531,14 @@ struct qla_flt_region {
uint32_t end;
};
+struct qla_flt_header {
+ uint16_t version;
+ uint16_t length;
+ uint16_t checksum;
+ uint16_t unused;
+ struct qla_flt_region region[0];
+};
+
#define FLT_REGION_SIZE 16
#define FLT_MAX_REGIONS 0xFF
#define FLT_REGIONS_SIZE (FLT_REGION_SIZE * FLT_MAX_REGIONS)
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 5b163ad85c34..2a64729a2bc5 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -72,14 +72,13 @@ extern int qla2x00_async_adisc(struct scsi_qla_host *, fc_port_t *,
extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint32_t, uint32_t);
extern void qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
-extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
- uint16_t *);
struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *,
enum qla_work_type);
extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *);
int qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e);
extern void *qla2x00_alloc_iocbs_ready(struct qla_qpair *, srb_t *);
extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *);
+extern int qla24xx_async_abort_cmd(srb_t *, bool);
extern void qla2x00_set_fcport_state(fc_port_t *fcport, int state);
extern fc_port_t *
@@ -182,8 +181,6 @@ extern int qla2x00_post_async_login_work(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_post_async_logout_work(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
-extern int qla2x00_post_async_logout_done_work(struct scsi_qla_host *,
- fc_port_t *, uint16_t *);
extern int qla2x00_post_async_adisc_work(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_post_async_adisc_done_work(struct scsi_qla_host *,
@@ -201,6 +198,7 @@ extern void qla2x00_free_host(struct scsi_qla_host *);
extern void qla2x00_relogin(struct scsi_qla_host *);
extern void qla2x00_do_work(struct scsi_qla_host *);
extern void qla2x00_free_fcports(struct scsi_qla_host *);
+extern void qla2x00_free_fcport(fc_port_t *);
extern void qla83xx_schedule_work(scsi_qla_host_t *, int);
extern void qla83xx_service_idc_aen(struct work_struct *);
@@ -253,8 +251,9 @@ extern scsi_qla_host_t *qla24xx_create_vhost(struct fc_vport *);
extern void qla2x00_sp_free_dma(srb_t *sp);
extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *);
-extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int);
-extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *, int);
+extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int);
+extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *);
+extern int qla24xx_async_abort_cmd(srb_t *, bool);
extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 446a9d6ba255..aaa4a5bbf2ff 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -2963,7 +2963,6 @@ int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport)
return QLA_FUNCTION_FAILED;
e->u.fcport.fcport = fcport;
- fcport->flags |= FCF_ASYNC_ACTIVE;
return qla2x00_post_work(vha, e);
}
@@ -3097,9 +3096,7 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
done_free_sp:
sp->free(sp);
- fcport->flags &= ~FCF_ASYNC_SENT;
done:
- fcport->flags &= ~FCF_ASYNC_ACTIVE;
return rval;
}
@@ -4290,7 +4287,7 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
return rval;
- fcport->disc_state = DSC_GNN_ID;
+ qla2x00_set_fcport_disc_state(fcport, DSC_GNN_ID);
sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
if (!sp)
goto done;
@@ -4464,7 +4461,6 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
done_free_sp:
sp->free(sp);
- fcport->flags &= ~FCF_ASYNC_SENT;
done:
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index aa5204163bec..9e6b56527b25 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -49,16 +49,9 @@ qla2x00_sp_timeout(struct timer_list *t)
{
srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer);
struct srb_iocb *iocb;
- struct req_que *req;
- unsigned long flags;
- struct qla_hw_data *ha = sp->vha->hw;
- WARN_ON_ONCE(irqs_disabled());
- spin_lock_irqsave(&ha->hardware_lock, flags);
- req = sp->qpair->req;
- req->outstanding_cmds[sp->handle] = NULL;
+ WARN_ON(irqs_disabled());
iocb = &sp->u.iocb_cmd;
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
iocb->timeout(sp);
}
@@ -142,7 +135,7 @@ static void qla24xx_abort_sp_done(srb_t *sp, int res)
sp->free(sp);
}
-static int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
+int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
{
scsi_qla_host_t *vha = cmd_sp->vha;
struct srb_iocb *abt_iocb;
@@ -242,6 +235,7 @@ qla2x00_async_iocb_timeout(void *data)
case SRB_NACK_PRLI:
case SRB_NACK_LOGO:
case SRB_CTRL_VP:
+ default:
rc = qla24xx_async_abort_cmd(sp, false);
if (rc) {
spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
@@ -258,10 +252,6 @@ qla2x00_async_iocb_timeout(void *data)
sp->done(sp, QLA_FUNCTION_TIMEOUT);
}
break;
- default:
- WARN_ON_ONCE(true);
- sp->done(sp, QLA_FUNCTION_TIMEOUT);
- break;
}
}
@@ -326,10 +316,10 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
if (!sp)
goto done;
+ qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
fcport->flags |= FCF_ASYNC_SENT;
fcport->logout_completed = 0;
- fcport->disc_state = DSC_LOGIN_PEND;
sp->type = SRB_LOGIN_CMD;
sp->name = "login";
sp->gen1 = fcport->rscn_gen;
@@ -425,7 +415,7 @@ qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport,
fcport->flags &= ~FCF_ASYNC_ACTIVE;
/* Don't re-login in target mode */
if (!fcport->tgt_session)
- qla2x00_mark_device_lost(vha, fcport, 1, 0);
+ qla2x00_mark_device_lost(vha, fcport, 1);
qlt_logo_completion_handler(fcport, data[0]);
}
@@ -533,7 +523,7 @@ static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport)
e->u.fcport.fcport = fcport;
fcport->flags |= FCF_ASYNC_ACTIVE;
- fcport->disc_state = DSC_LOGIN_PEND;
+ qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
return qla2x00_post_work(vha, e);
}
@@ -685,7 +675,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
port_id_t id;
u64 wwn;
u16 data[2];
- u8 current_login_state;
+ u8 current_login_state, nvme_cls;
fcport = ea->fcport;
ql_dbg(ql_dbg_disc, vha, 0xffff,
@@ -744,10 +734,17 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
loop_id = le16_to_cpu(e->nport_handle);
loop_id = (loop_id & 0x7fff);
- if (NVME_TARGET(vha->hw, fcport))
- current_login_state = e->current_login_state >> 4;
- else
- current_login_state = e->current_login_state & 0xf;
+ nvme_cls = e->current_login_state >> 4;
+ current_login_state = e->current_login_state & 0xf;
+
+ if (PRLI_PHASE(nvme_cls)) {
+ current_login_state = nvme_cls;
+ fcport->fc4_type &= ~FS_FC4TYPE_FCP;
+ fcport->fc4_type |= FS_FC4TYPE_NVME;
+ } else if (PRLI_PHASE(current_login_state)) {
+ fcport->fc4_type |= FS_FC4TYPE_FCP;
+ fcport->fc4_type &= ~FS_FC4TYPE_NVME;
+ }
ql_dbg(ql_dbg_disc, vha, 0x20e2,
"%s found %8phC CLS [%x|%x] fc4_type %d ID[%06x|%06x] lid[%d|%d]\n",
@@ -836,7 +833,8 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
* with GNL. Push disc_state back to DELETED
* so GNL can go out again
*/
- fcport->disc_state = DSC_DELETED;
+ qla2x00_set_fcport_disc_state(fcport,
+ DSC_DELETED);
break;
case DSC_LS_PRLI_COMP:
if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
@@ -912,7 +910,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
qla24xx_fcport_handle_login(vha, fcport);
break;
case ISP_CFG_N:
- fcport->disc_state = DSC_DELETED;
+ qla2x00_set_fcport_disc_state(fcport, DSC_DELETED);
if (time_after_eq(jiffies, fcport->dm_login_expire)) {
if (fcport->n2n_link_reset_cnt < 2) {
fcport->n2n_link_reset_cnt++;
@@ -992,7 +990,7 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
set_bit(loop_id, vha->hw->loop_id_map);
wwn = wwn_to_u64(e->port_name);
- ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8,
+ ql_dbg(ql_dbg_disc, vha, 0x20e8,
"%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n",
__func__, (void *)&wwn, e->port_id[2], e->port_id[1],
e->port_id[0], e->current_login_state, e->last_login_state,
@@ -1051,6 +1049,16 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
vha->gnl.sent = 0;
+ if (!list_empty(&vha->gnl.fcports)) {
+ /* retrigger gnl */
+ list_for_each_entry_safe(fcport, tf, &vha->gnl.fcports,
+ gnl_entry) {
+ list_del_init(&fcport->gnl_entry);
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+ if (qla24xx_post_gnl_work(vha, fcport) == QLA_SUCCESS)
+ break;
+ }
+ }
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
sp->free(sp);
@@ -1072,7 +1080,7 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
fcport->flags |= FCF_ASYNC_SENT;
- fcport->disc_state = DSC_GNL;
+ qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
fcport->last_rscn_gen = fcport->rscn_gen;
fcport->last_login_gen = fcport->login_gen;
@@ -1121,8 +1129,8 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
done_free_sp:
sp->free(sp);
- fcport->flags &= ~FCF_ASYNC_SENT;
done:
+ fcport->flags &= ~(FCF_ASYNC_ACTIVE | FCF_ASYNC_SENT);
return rval;
}
@@ -1216,12 +1224,19 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
struct srb_iocb *lio;
int rval = QLA_FUNCTION_FAILED;
- if (!vha->flags.online)
+ if (!vha->flags.online) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n",
+ __func__, __LINE__, fcport->port_name);
return rval;
+ }
- if (fcport->fw_login_state == DSC_LS_PLOGI_PEND ||
- fcport->fw_login_state == DSC_LS_PRLI_PEND)
+ if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND ||
+ fcport->fw_login_state == DSC_LS_PRLI_PEND) &&
+ qla_dual_mode_enabled(vha)) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n",
+ __func__, __LINE__, fcport->port_name);
return rval;
+ }
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
@@ -1295,12 +1310,12 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
return rval;
}
- fcport->disc_state = DSC_GPDB;
-
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
+ qla2x00_set_fcport_disc_state(fcport, DSC_GPDB);
+
fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_MB_IOCB;
sp->name = "gpdb";
@@ -1349,6 +1364,7 @@ done_free_sp:
sp->free(sp);
fcport->flags &= ~FCF_ASYNC_SENT;
done:
+ fcport->flags &= ~FCF_ASYNC_ACTIVE;
qla24xx_post_gpdb_work(vha, fcport, opt);
return rval;
}
@@ -1379,7 +1395,7 @@ void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
ql_dbg(ql_dbg_disc, vha, 0x20d6,
"%s %d %8phC session revalidate success\n",
__func__, __LINE__, ea->fcport->port_name);
- ea->fcport->disc_state = DSC_LOGIN_COMPLETE;
+ qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_COMPLETE);
}
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
}
@@ -1433,7 +1449,7 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
/* Set discovery state back to GNL to Relogin attempt */
if (qla_dual_mode_enabled(vha) ||
qla_ini_mode_enabled(vha)) {
- fcport->disc_state = DSC_GNL;
+ qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
}
return;
@@ -1600,6 +1616,10 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
break;
default:
if (fcport->login_pause) {
+ ql_dbg(ql_dbg_disc, vha, 0x20d8,
+ "%s %d %8phC exit\n",
+ __func__, __LINE__,
+ fcport->port_name);
fcport->last_rscn_gen = fcport->rscn_gen;
fcport->last_login_gen = fcport->login_gen;
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
@@ -1758,9 +1778,23 @@ qla2x00_tmf_iocb_timeout(void *data)
{
srb_t *sp = data;
struct srb_iocb *tmf = &sp->u.iocb_cmd;
+ int rc, h;
+ unsigned long flags;
- tmf->u.tmf.comp_status = CS_TIMEOUT;
- complete(&tmf->u.tmf.comp);
+ rc = qla24xx_async_abort_cmd(sp, false);
+ if (rc) {
+ spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
+ for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
+ if (sp->qpair->req->outstanding_cmds[h] == sp) {
+ sp->qpair->req->outstanding_cmds[h] = NULL;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
+ tmf->u.tmf.comp_status = CS_TIMEOUT;
+ tmf->u.tmf.data = QLA_FUNCTION_FAILED;
+ complete(&tmf->u.tmf.comp);
+ }
}
static void qla2x00_tmf_sp_done(srb_t *sp, int res)
@@ -1976,7 +2010,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
qla24xx_post_prli_work(vha, ea->fcport);
} else {
ql_dbg(ql_dbg_disc, vha, 0x20ea,
- "%s %d %8phC LoopID 0x%x in use with %06x. post gnl\n",
+ "%s %d %8phC LoopID 0x%x in use with %06x. post gpdb\n",
__func__, __LINE__, ea->fcport->port_name,
ea->fcport->loop_id, ea->fcport->d_id.b24);
@@ -1996,11 +2030,11 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
__func__, __LINE__, ea->fcport->port_name, ea->data[1]);
ea->fcport->flags &= ~FCF_ASYNC_SENT;
- ea->fcport->disc_state = DSC_LOGIN_FAILED;
+ qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_FAILED);
if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED)
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
else
- qla2x00_mark_device_lost(vha, ea->fcport, 1, 0);
+ qla2x00_mark_device_lost(vha, ea->fcport, 1);
break;
case MBS_LOOP_ID_USED:
/* data[1] = IO PARAM 1 = nport ID */
@@ -2047,6 +2081,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
set_bit(lid, vha->hw->loop_id_map);
ea->fcport->loop_id = lid;
ea->fcport->keep_nport_handle = 0;
+ ea->fcport->logout_on_delete = 1;
qlt_schedule_sess_for_deletion(ea->fcport);
}
break;
@@ -2054,16 +2089,6 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
return;
}
-void
-qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
- uint16_t *data)
-{
- qlt_logo_completion_handler(fcport, data[0]);
- fcport->login_gen++;
- fcport->flags &= ~FCF_ASYNC_ACTIVE;
- return;
-}
-
/****************************************************************************/
/* QLogic ISP2x00 Hardware Support Functions. */
/****************************************************************************/
@@ -4925,12 +4950,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
qla2x00_get_data_rate(vha);
/* Determine what we need to do */
- if (ha->current_topology == ISP_CFG_FL &&
- (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
-
- set_bit(RSCN_UPDATE, &flags);
-
- } else if (ha->current_topology == ISP_CFG_F &&
+ if ((ha->current_topology == ISP_CFG_FL ||
+ ha->current_topology == ISP_CFG_F) &&
(test_bit(LOCAL_LOOP_UPDATE, &flags))) {
set_bit(RSCN_UPDATE, &flags);
@@ -5087,7 +5108,7 @@ skip_login:
rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
&entries);
if (rval != QLA_SUCCESS)
- goto cleanup_allocation;
+ goto err;
ql_dbg(ql_dbg_disc, vha, 0x2011,
"Entries in ID list (%d).\n", entries);
@@ -5117,7 +5138,7 @@ skip_login:
ql_log(ql_log_warn, vha, 0x2012,
"Memory allocation failed for fcport.\n");
rval = QLA_MEMORY_ALLOC_FAILED;
- goto cleanup_allocation;
+ goto err;
}
new_fcport->flags &= ~FCF_FABRIC_DEVICE;
@@ -5207,7 +5228,7 @@ skip_login:
ql_log(ql_log_warn, vha, 0xd031,
"Failed to allocate memory for fcport.\n");
rval = QLA_MEMORY_ALLOC_FAILED;
- goto cleanup_allocation;
+ goto err;
}
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
new_fcport->flags &= ~FCF_FABRIC_DEVICE;
@@ -5230,7 +5251,7 @@ skip_login:
qla_ini_mode_enabled(vha)) &&
atomic_read(&fcport->state) == FCS_ONLINE) {
qla2x00_mark_device_lost(vha, fcport,
- ql2xplogiabsentdevice, 0);
+ ql2xplogiabsentdevice);
if (fcport->loop_id != FC_NO_LOOP_ID &&
(fcport->flags & FCF_FCP2_DEVICE) == 0 &&
fcport->port_type != FCT_INITIATOR &&
@@ -5250,15 +5271,14 @@ skip_login:
qla24xx_fcport_handle_login(vha, fcport);
}
-cleanup_allocation:
- kfree(new_fcport);
+ qla2x00_free_fcport(new_fcport);
- if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_disc, vha, 0x2098,
- "Configure local loop error exit: rval=%x.\n", rval);
- }
+ return rval;
- return (rval);
+err:
+ ql_dbg(ql_dbg_disc, vha, 0x2098,
+ "Configure local loop error exit: rval=%x.\n", rval);
+ return rval;
}
static void
@@ -5385,11 +5405,14 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
__func__, fcport->port_name);
- fcport->disc_state = DSC_UPD_FCPORT;
+ qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT);
fcport->login_retry = vha->hw->login_retry_count;
fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
fcport->deleted = 0;
- fcport->logout_on_delete = 1;
+ if (vha->hw->current_topology == ISP_CFG_NL)
+ fcport->logout_on_delete = 0;
+ else
+ fcport->logout_on_delete = 1;
fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0;
switch (vha->hw->current_topology) {
@@ -5405,7 +5428,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
if (NVME_TARGET(vha->hw, fcport)) {
qla_nvme_register_remote(vha, fcport);
- fcport->disc_state = DSC_LOGIN_COMPLETE;
+ qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
qla2x00_set_fcport_state(fcport, FCS_ONLINE);
return;
}
@@ -5450,7 +5473,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
}
}
- fcport->disc_state = DSC_LOGIN_COMPLETE;
+ qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
}
void qla_register_fcport_fn(struct work_struct *work)
@@ -5859,7 +5882,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
if (NVME_TARGET(vha->hw, fcport)) {
if (fcport->disc_state == DSC_DELETE_PEND) {
- fcport->disc_state = DSC_GNL;
+ qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
vha->fcport_count--;
fcport->login_succ = 0;
}
@@ -5905,7 +5928,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
qla_ini_mode_enabled(vha)) &&
atomic_read(&fcport->state) == FCS_ONLINE) {
qla2x00_mark_device_lost(vha, fcport,
- ql2xplogiabsentdevice, 0);
+ ql2xplogiabsentdevice);
if (fcport->loop_id != FC_NO_LOOP_ID &&
(fcport->flags & FCF_FCP2_DEVICE) == 0 &&
fcport->port_type != FCT_INITIATOR &&
@@ -6071,7 +6094,7 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
ha->isp_ops->fabric_logout(vha, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
- qla2x00_mark_device_lost(vha, fcport, 1, 0);
+ qla2x00_mark_device_lost(vha, fcport, 1);
rval = 1;
break;
@@ -6585,9 +6608,9 @@ qla2x00_quiesce_io(scsi_qla_host_t *vha)
atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
- qla2x00_mark_all_devices_lost(vha, 0);
+ qla2x00_mark_all_devices_lost(vha);
list_for_each_entry(vp, &ha->vp_list, list)
- qla2x00_mark_all_devices_lost(vp, 0);
+ qla2x00_mark_all_devices_lost(vp);
} else {
if (!atomic_read(&vha->loop_down_timer))
atomic_set(&vha->loop_down_timer,
@@ -6663,14 +6686,14 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
- qla2x00_mark_all_devices_lost(vha, 0);
+ qla2x00_mark_all_devices_lost(vha);
spin_lock_irqsave(&ha->vport_slock, flags);
list_for_each_entry(vp, &ha->vp_list, list) {
atomic_inc(&vp->vref_count);
spin_unlock_irqrestore(&ha->vport_slock, flags);
- qla2x00_mark_all_devices_lost(vp, 0);
+ qla2x00_mark_all_devices_lost(vp);
spin_lock_irqsave(&ha->vport_slock, flags);
atomic_dec(&vp->vref_count);
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 352aba4127f7..364b3db8b2dc 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -105,6 +105,30 @@ qla2x00_clean_dsd_pool(struct qla_hw_data *ha, struct crc_context *ctx)
INIT_LIST_HEAD(&ctx->dsd_list);
}
+static inline void
+qla2x00_set_fcport_disc_state(fc_port_t *fcport, int state)
+{
+ int old_val;
+ uint8_t shiftbits, mask;
+
+ /* This will have to change when the max no. of states > 16 */
+ shiftbits = 4;
+ mask = (1 << shiftbits) - 1;
+
+ fcport->disc_state = state;
+ while (1) {
+ old_val = atomic_read(&fcport->shadow_disc_state);
+ if (old_val == atomic_cmpxchg(&fcport->shadow_disc_state,
+ old_val, (old_val << shiftbits) | state)) {
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x2134,
+ "FCPort %8phC disc_state transition: %s to %s - portid=%06x.\n",
+ fcport->port_name, port_dstate_str[old_val & mask],
+ port_dstate_str[state], fcport->d_id.b24);
+ return;
+ }
+ }
+}
+
static inline int
qla2x00_hba_err_chk_enabled(srb_t *sp)
{
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 8b050f0b4333..47bf60a9490a 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2537,13 +2537,32 @@ qla2x00_els_dcmd_iocb_timeout(void *data)
fc_port_t *fcport = sp->fcport;
struct scsi_qla_host *vha = sp->vha;
struct srb_iocb *lio = &sp->u.iocb_cmd;
+ unsigned long flags = 0;
+ int res, h;
ql_dbg(ql_dbg_io, vha, 0x3069,
"%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
- complete(&lio->u.els_logo.comp);
+ /* Abort the exchange */
+ res = qla24xx_async_abort_cmd(sp, false);
+ if (res) {
+ ql_dbg(ql_dbg_io, vha, 0x3070,
+ "mbx abort_command failed.\n");
+ spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
+ for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
+ if (sp->qpair->req->outstanding_cmds[h] == sp) {
+ sp->qpair->req->outstanding_cmds[h] = NULL;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
+ complete(&lio->u.els_logo.comp);
+ } else {
+ ql_dbg(ql_dbg_io, vha, 0x3071,
+ "mbx abort_command success.\n");
+ }
}
static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res)
@@ -2717,23 +2736,29 @@ qla2x00_els_dcmd2_iocb_timeout(void *data)
srb_t *sp = data;
fc_port_t *fcport = sp->fcport;
struct scsi_qla_host *vha = sp->vha;
- struct qla_hw_data *ha = vha->hw;
unsigned long flags = 0;
- int res;
+ int res, h;
ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
"%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
/* Abort the exchange */
- spin_lock_irqsave(&ha->hardware_lock, flags);
- res = ha->isp_ops->abort_command(sp);
+ res = qla24xx_async_abort_cmd(sp, false);
ql_dbg(ql_dbg_io, vha, 0x3070,
"mbx abort_command %s\n",
(res == QLA_SUCCESS) ? "successful" : "failed");
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
- sp->done(sp, QLA_FUNCTION_TIMEOUT);
+ if (res) {
+ spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
+ for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
+ if (sp->qpair->req->outstanding_cmds[h] == sp) {
+ sp->qpair->req->outstanding_cmds[h] = NULL;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
+ sp->done(sp, QLA_FUNCTION_TIMEOUT);
+ }
}
void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi)
@@ -2852,7 +2877,8 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
fw_status[0], fw_status[1], fw_status[2]);
fcport->flags &= ~FCF_ASYNC_SENT;
- fcport->disc_state = DSC_LOGIN_FAILED;
+ qla2x00_set_fcport_disc_state(fcport,
+ DSC_LOGIN_FAILED);
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
break;
}
@@ -2865,7 +2891,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
fw_status[0], fw_status[1], fw_status[2]);
sp->fcport->flags &= ~FCF_ASYNC_SENT;
- sp->fcport->disc_state = DSC_LOGIN_FAILED;
+ qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_FAILED);
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
break;
}
@@ -2898,11 +2924,12 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
if (!sp) {
ql_log(ql_log_info, vha, 0x70e6,
"SRB allocation failed\n");
+ fcport->flags &= ~FCF_ASYNC_ACTIVE;
return -ENOMEM;
}
fcport->flags |= FCF_ASYNC_SENT;
- fcport->disc_state = DSC_LOGIN_PEND;
+ qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
elsio = &sp->u.iocb_cmd;
ql_dbg(ql_dbg_io, vha, 0x3073,
"Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
@@ -2975,7 +3002,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
}
out:
- fcport->flags &= ~(FCF_ASYNC_SENT);
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
sp->free(sp);
done:
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 7b8a6bfcf08d..e40705d38cea 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -788,7 +788,7 @@ skip_rio:
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
- qla2x00_mark_all_devices_lost(vha, 1);
+ qla2x00_mark_all_devices_lost(vha);
}
if (vha->vp_idx) {
@@ -861,7 +861,7 @@ skip_rio:
}
vha->device_flags |= DFLG_NO_CABLE;
- qla2x00_mark_all_devices_lost(vha, 1);
+ qla2x00_mark_all_devices_lost(vha);
}
if (vha->vp_idx) {
@@ -881,7 +881,7 @@ skip_rio:
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
- qla2x00_mark_all_devices_lost(vha, 1);
+ qla2x00_mark_all_devices_lost(vha);
}
if (vha->vp_idx) {
@@ -924,7 +924,7 @@ skip_rio:
atomic_set(&vha->loop_down_timer,
LOOP_DOWN_TIME);
if (!N2N_TOPO(ha))
- qla2x00_mark_all_devices_lost(vha, 1);
+ qla2x00_mark_all_devices_lost(vha);
}
if (vha->vp_idx) {
@@ -953,7 +953,7 @@ skip_rio:
if (!atomic_read(&vha->loop_down_timer))
atomic_set(&vha->loop_down_timer,
LOOP_DOWN_TIME);
- qla2x00_mark_all_devices_lost(vha, 1);
+ qla2x00_mark_all_devices_lost(vha);
}
if (vha->vp_idx) {
@@ -1022,7 +1022,6 @@ skip_rio:
"Marking port lost loopid=%04x portid=%06x.\n",
fcport->loop_id, fcport->d_id.b24);
if (qla_ini_mode_enabled(vha)) {
- qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
fcport->logout_on_delete = 0;
qlt_schedule_sess_for_deletion(fcport);
}
@@ -1034,14 +1033,14 @@ global_port_update:
atomic_set(&vha->loop_down_timer,
LOOP_DOWN_TIME);
vha->device_flags |= DFLG_NO_CABLE;
- qla2x00_mark_all_devices_lost(vha, 1);
+ qla2x00_mark_all_devices_lost(vha);
}
if (vha->vp_idx) {
atomic_set(&vha->vp_state, VP_FAILED);
fc_vport_set_state(vha->fc_vport,
FC_VPORT_FAILED);
- qla2x00_mark_all_devices_lost(vha, 1);
+ qla2x00_mark_all_devices_lost(vha);
}
vha->flags.management_server_logged_in = 0;
@@ -1253,11 +1252,33 @@ global_port_update:
case MBA_DPORT_DIAGNOSTICS:
ql_dbg(ql_dbg_async, vha, 0x5052,
- "D-Port Diagnostics: %04x result=%s\n",
- mb[0],
- mb[1] == 0 ? "start" :
- mb[1] == 1 ? "done (pass)" :
- mb[1] == 2 ? "done (error)" : "other");
+ "D-Port Diagnostics: %04x %04x %04x %04x\n",
+ mb[0], mb[1], mb[2], mb[3]);
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ static char *results[] = {
+ "start", "done(pass)", "done(error)", "undefined" };
+ static char *types[] = {
+ "none", "dynamic", "static", "other" };
+ uint result = mb[1] >> 0 & 0x3;
+ uint type = mb[1] >> 6 & 0x3;
+ uint sw = mb[1] >> 15 & 0x1;
+ ql_dbg(ql_dbg_async, vha, 0x5052,
+ "D-Port Diagnostics: result=%s type=%s [sw=%u]\n",
+ results[result], types[type], sw);
+ if (result == 2) {
+ static char *reasons[] = {
+ "reserved", "unexpected reject",
+ "unexpected phase", "retry exceeded",
+ "timed out", "not supported",
+ "user stopped" };
+ uint reason = mb[2] >> 0 & 0xf;
+ uint phase = mb[2] >> 12 & 0xf;
+ ql_dbg(ql_dbg_async, vha, 0x5052,
+ "D-Port Diagnostics: reason=%s phase=%u \n",
+ reason < 7 ? reasons[reason] : "other",
+ phase >> 1);
+ }
+ }
break;
case MBA_TEMPERATURE_ALERT:
@@ -1918,6 +1939,18 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
inbuf = (uint32_t *)&sts->nvme_ersp_data;
outbuf = (uint32_t *)fd->rspaddr;
iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
+ if (unlikely(iocb->u.nvme.rsp_pyld_len >
+ sizeof(struct nvme_fc_ersp_iu))) {
+ if (ql_mask_match(ql_dbg_io)) {
+ WARN_ONCE(1, "Unexpected response payload length %u.\n",
+ iocb->u.nvme.rsp_pyld_len);
+ ql_log(ql_log_warn, fcport->vha, 0x5100,
+ "Unexpected response payload length %u.\n",
+ iocb->u.nvme.rsp_pyld_len);
+ }
+ iocb->u.nvme.rsp_pyld_len =
+ sizeof(struct nvme_fc_ersp_iu);
+ }
iter = iocb->u.nvme.rsp_pyld_len >> 2;
for (; iter; iter--)
*outbuf++ = swab32(*inbuf++);
@@ -2152,12 +2185,12 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
* swab32 of the "data" field in the beginning of qla2x00_status_entry()
* would make guard field appear at offset 2
*/
- a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
- a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
- a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
- e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
- e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
- e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
+ a_guard = get_unaligned_le16(ap + 2);
+ a_app_tag = get_unaligned_le16(ap + 0);
+ a_ref_tag = get_unaligned_le32(ap + 4);
+ e_guard = get_unaligned_le16(ep + 2);
+ e_app_tag = get_unaligned_le16(ep + 0);
+ e_ref_tag = get_unaligned_le32(ep + 4);
ql_dbg(ql_dbg_io, vha, 0x3023,
"iocb(s) %p Returned STATUS.\n", sts24);
@@ -2745,7 +2778,6 @@ check_scsi_status:
port_state_str[FCS_ONLINE],
comp_status);
- qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
qlt_schedule_sess_for_deletion(fcport);
}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index b7c1108c48e2..9e09964f5c0e 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -6152,9 +6152,8 @@ qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
mcp->mb[7] = LSW(MSD(req_dma));
mcp->mb[8] = MSW(addr);
/* Setting RAM ID to valid */
- mcp->mb[10] |= BIT_7;
/* For MCTP RAM ID is 0x40 */
- mcp->mb[10] |= 0x40;
+ mcp->mb[10] = BIT_7 | 0x40;
mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
MBX_0;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index eabc5127174e..8ae639d089d1 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -147,7 +147,7 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
"Marking port dead, loop_id=0x%04x : %x.\n",
fcport->loop_id, fcport->vha->vp_idx);
- qla2x00_mark_device_lost(vha, fcport, 0, 0);
+ qla2x00_mark_device_lost(vha, fcport, 0);
qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
}
}
@@ -167,7 +167,7 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
list_for_each_entry(fcport, &vha->vp_fcports, list)
fcport->logout_on_delete = 0;
- qla2x00_mark_all_devices_lost(vha, 0);
+ qla2x00_mark_all_devices_lost(vha);
/* Remove port id from vp target map */
spin_lock_irqsave(&vha->hw->hardware_lock, flags);
@@ -327,7 +327,7 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
*/
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
- qla2x00_mark_all_devices_lost(vha, 0);
+ qla2x00_mark_all_devices_lost(vha);
} else {
if (!atomic_read(&vha->loop_down_timer))
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 605b59c76c90..cad1fc2a1b28 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -789,7 +789,7 @@ qlafx00_iospace_config(struct qla_hw_data *ha)
}
ha->cregbase =
- ioremap_nocache(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00);
+ ioremap(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00);
if (!ha->cregbase) {
ql_log_pci(ql_log_fatal, ha->pdev, 0x0128,
"cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
@@ -810,7 +810,7 @@ qlafx00_iospace_config(struct qla_hw_data *ha)
}
ha->iobase =
- ioremap_nocache(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00);
+ ioremap(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00);
if (!ha->iobase) {
ql_log_pci(ql_log_fatal, ha->pdev, 0x012b,
"cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
@@ -1210,9 +1210,9 @@ qlafx00_find_all_targets(scsi_qla_host_t *vha,
" Existing TGT-ID %x did not get "
" offline event from firmware.\n",
fcport->old_tgt_id);
- qla2x00_mark_device_lost(vha, fcport, 0, 0);
+ qla2x00_mark_device_lost(vha, fcport, 0);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- kfree(new_fcport);
+ qla2x00_free_fcport(new_fcport);
return rval;
}
break;
@@ -1230,7 +1230,7 @@ qlafx00_find_all_targets(scsi_qla_host_t *vha,
return QLA_MEMORY_ALLOC_FAILED;
}
- kfree(new_fcport);
+ qla2x00_free_fcport(new_fcport);
return rval;
}
@@ -1274,7 +1274,7 @@ qlafx00_configure_all_targets(scsi_qla_host_t *vha)
if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
if (fcport->port_type != FCT_INITIATOR)
- qla2x00_mark_device_lost(vha, fcport, 0, 0);
+ qla2x00_mark_device_lost(vha, fcport, 0);
}
}
@@ -1298,7 +1298,7 @@ qlafx00_configure_all_targets(scsi_qla_host_t *vha)
/* Free all new device structures not processed. */
list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) {
list_del(&fcport->list);
- kfree(fcport);
+ qla2x00_free_fcport(fcport);
}
return rval;
@@ -1706,7 +1706,7 @@ qlafx00_tgt_detach(struct scsi_qla_host *vha, int tgt_id)
if (!fcport)
return;
- qla2x00_mark_device_lost(vha, fcport, 0, 0);
+ qla2x00_mark_device_lost(vha, fcport, 0);
return;
}
@@ -1740,7 +1740,7 @@ qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt)
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
} else if (evt->u.aenfx.mbx[2] == 2) {
vha->device_flags |= DFLG_NO_CABLE;
- qla2x00_mark_all_devices_lost(vha, 1);
+ qla2x00_mark_all_devices_lost(vha);
}
}
break;
@@ -2513,7 +2513,7 @@ check_scsi_status:
atomic_read(&fcport->state));
if (atomic_read(&fcport->state) == FCS_ONLINE)
- qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
+ qla2x00_mark_device_lost(fcport->vha, fcport, 1);
break;
case CS_ABORTED:
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 2b2028f2383e..185c5f34d4c1 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1612,8 +1612,7 @@ qla82xx_get_bootld_offset(struct qla_hw_data *ha)
return (u8 *)&ha->hablob->fw->data[offset];
}
-static __le32
-qla82xx_get_fw_size(struct qla_hw_data *ha)
+static u32 qla82xx_get_fw_size(struct qla_hw_data *ha)
{
struct qla82xx_uri_data_desc *uri_desc = NULL;
@@ -1624,7 +1623,7 @@ qla82xx_get_fw_size(struct qla_hw_data *ha)
return cpu_to_le32(uri_desc->size);
}
- return cpu_to_le32(*(u32 *)&ha->hablob->fw->data[FW_SIZE_OFFSET]);
+ return get_unaligned_le32(&ha->hablob->fw->data[FW_SIZE_OFFSET]);
}
static u8 *
@@ -1816,7 +1815,7 @@ qla82xx_fw_load_from_blob(struct qla_hw_data *ha)
}
flashaddr = FLASH_ADDR_START;
- size = (__force u32)qla82xx_get_fw_size(ha) / 8;
+ size = qla82xx_get_fw_size(ha) / 8;
ptr64 = (u64 *)qla82xx_get_fw_offs(ha);
for (i = 0; i < size; i++) {
@@ -1883,7 +1882,7 @@ qla82xx_set_product_offset(struct qla_hw_data *ha)
static int
qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type)
{
- __le32 val;
+ uint32_t val;
uint32_t min_size;
struct qla_hw_data *ha = vha->hw;
const struct firmware *fw = ha->hablob->fw;
@@ -1896,8 +1895,8 @@ qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type)
min_size = QLA82XX_URI_FW_MIN_SIZE;
} else {
- val = cpu_to_le32(*(u32 *)&fw->data[QLA82XX_FW_MAGIC_OFFSET]);
- if ((__force u32)val != QLA82XX_BDINFO_MAGIC)
+ val = get_unaligned_le32(&fw->data[QLA82XX_FW_MAGIC_OFFSET]);
+ if (val != QLA82XX_BDINFO_MAGIC)
return -EINVAL;
min_size = QLA82XX_FW_MIN_SIZE;
@@ -3030,7 +3029,7 @@ qla8xxx_dev_failed_handler(scsi_qla_host_t *vha)
/* Set DEV_FAILED flag to disable timer */
vha->device_flags |= DFLG_DEV_FAILED;
qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
- qla2x00_mark_all_devices_lost(vha, 0);
+ qla2x00_mark_all_devices_lost(vha);
vha->flags.online = 0;
vha->flags.init_done = 0;
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8b84bc4a6ac8..b520a980d1dc 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1110,7 +1110,7 @@ qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
{
u8 i;
- qla2x00_mark_all_devices_lost(vha, 0);
+ qla2x00_mark_all_devices_lost(vha);
for (i = 0; i < 10; i++) {
if (wait_event_timeout(vha->fcport_waitQ,
@@ -1667,7 +1667,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
- qla2x00_mark_all_devices_lost(vha, 0);
+ qla2x00_mark_all_devices_lost(vha);
ret = qla2x00_full_login_lip(vha);
if (ret != QLA_SUCCESS) {
ql_dbg(ql_dbg_taskm, vha, 0x802d,
@@ -3854,37 +3854,21 @@ void qla2x00_free_fcports(struct scsi_qla_host *vha)
}
static inline void
-qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
- int defer)
+qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport)
{
- struct fc_rport *rport;
- scsi_qla_host_t *base_vha;
- unsigned long flags;
+ int now;
if (!fcport->rport)
return;
- rport = fcport->rport;
- if (defer) {
- base_vha = pci_get_drvdata(vha->hw->pdev);
- spin_lock_irqsave(vha->host->host_lock, flags);
- fcport->drport = rport;
- spin_unlock_irqrestore(vha->host->host_lock, flags);
- qlt_do_generation_tick(vha, &base_vha->total_fcport_update_gen);
- set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
- qla2xxx_wake_dpc(base_vha);
- } else {
- int now;
-
- if (rport) {
- ql_dbg(ql_dbg_disc, fcport->vha, 0x2109,
- "%s %8phN. rport %p roles %x\n",
- __func__, fcport->port_name, rport,
- rport->roles);
- fc_remote_port_delete(rport);
- }
- qlt_do_generation_tick(vha, &now);
+ if (fcport->rport) {
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x2109,
+ "%s %8phN. rport %p roles %x\n",
+ __func__, fcport->port_name, fcport->rport,
+ fcport->rport->roles);
+ fc_remote_port_delete(fcport->rport);
}
+ qlt_do_generation_tick(vha, &now);
}
/*
@@ -3897,18 +3881,18 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
* Context:
*/
void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
- int do_login, int defer)
+ int do_login)
{
if (IS_QLAFX00(vha->hw)) {
qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
- qla2x00_schedule_rport_del(vha, fcport, defer);
+ qla2x00_schedule_rport_del(vha, fcport);
return;
}
if (atomic_read(&fcport->state) == FCS_ONLINE &&
vha->vp_idx == fcport->vha->vp_idx) {
qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
- qla2x00_schedule_rport_del(vha, fcport, defer);
+ qla2x00_schedule_rport_del(vha, fcport);
}
/*
* We may need to retry the login, so don't change the state of the
@@ -3937,7 +3921,7 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
* Context:
*/
void
-qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
+qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha)
{
fc_port_t *fcport;
@@ -3957,13 +3941,6 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
*/
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
continue;
- if (atomic_read(&fcport->state) == FCS_ONLINE) {
- qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
- if (defer)
- qla2x00_schedule_rport_del(vha, fcport, defer);
- else if (vha->vp_idx == fcport->vha->vp_idx)
- qla2x00_schedule_rport_del(vha, fcport, defer);
- }
}
}
@@ -4965,7 +4942,6 @@ int qla2x00_post_async_##name##_work( \
qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
-qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
qla2x00_post_async_work(prlo, QLA_EVT_ASYNC_PRLO);
qla2x00_post_async_work(prlo_done, QLA_EVT_ASYNC_PRLO_DONE);
@@ -5032,7 +5008,7 @@ void qla24xx_sched_upd_fcport(fc_port_t *fcport)
fcport->jiffies_at_registration = jiffies;
fcport->sec_since_registration = 0;
fcport->next_disc_state = DSC_DELETED;
- fcport->disc_state = DSC_UPD_FCPORT;
+ qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT);
spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
queue_work(system_unbound_wq, &fcport->reg_work);
@@ -5253,10 +5229,6 @@ qla2x00_do_work(struct scsi_qla_host *vha)
case QLA_EVT_ASYNC_LOGOUT:
rc = qla2x00_async_logout(vha, e->u.logio.fcport);
break;
- case QLA_EVT_ASYNC_LOGOUT_DONE:
- qla2x00_async_logout_done(vha, e->u.logio.fcport,
- e->u.logio.data);
- break;
case QLA_EVT_ASYNC_ADISC:
qla2x00_async_adisc(vha, e->u.logio.fcport,
e->u.logio.data);
@@ -6899,13 +6871,13 @@ static void qla_pci_error_cleanup(scsi_qla_host_t *vha)
qpair->online = 0;
mutex_unlock(&ha->mq_lock);
- qla2x00_mark_all_devices_lost(vha, 0);
+ qla2x00_mark_all_devices_lost(vha);
spin_lock_irqsave(&ha->vport_slock, flags);
list_for_each_entry(vp, &ha->vp_list, list) {
atomic_inc(&vp->vref_count);
spin_unlock_irqrestore(&ha->vport_slock, flags);
- qla2x00_mark_all_devices_lost(vp, 0);
+ qla2x00_mark_all_devices_lost(vp);
spin_lock_irqsave(&ha->vport_slock, flags);
atomic_dec(&vp->vref_count);
}
@@ -7270,6 +7242,8 @@ qla2x00_module_init(void)
BUILD_BUG_ON(sizeof(struct sns_cmd_pkt) != 2064);
BUILD_BUG_ON(sizeof(struct verify_chip_entry_84xx) != 64);
BUILD_BUG_ON(sizeof(struct vf_evfp_entry_24xx) != 56);
+ BUILD_BUG_ON(sizeof(struct qla_flt_region) != 16);
+ BUILD_BUG_ON(sizeof(struct qla_flt_header) != 8);
/* Allocate cache for SRBs. */
srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index bbe90354f49b..76a38bf86cbc 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -669,8 +669,8 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
struct qla_hw_data *ha = vha->hw;
uint32_t def = IS_QLA81XX(ha) ? 2 : IS_QLA25XX(ha) ? 1 : 0;
- struct qla_flt_header *flt = (void *)ha->flt;
- struct qla_flt_region *region = (void *)&flt[1];
+ struct qla_flt_header *flt = ha->flt;
+ struct qla_flt_region *region = &flt->region[0];
uint16_t *wptr, cnt, chksum;
uint32_t start;
@@ -2652,18 +2652,15 @@ qla28xx_get_flash_region(struct scsi_qla_host *vha, uint32_t start,
struct qla_flt_region *region)
{
struct qla_hw_data *ha = vha->hw;
- struct qla_flt_header *flt;
- struct qla_flt_region *flt_reg;
+ struct qla_flt_header *flt = ha->flt;
+ struct qla_flt_region *flt_reg = &flt->region[0];
uint16_t cnt;
int rval = QLA_FUNCTION_FAILED;
if (!ha->flt)
return QLA_FUNCTION_FAILED;
- flt = (struct qla_flt_header *)ha->flt;
- flt_reg = (struct qla_flt_region *)&flt[1];
cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region);
-
for (; cnt; cnt--, flt_reg++) {
if (flt_reg->start == start) {
memcpy((uint8_t *)region, flt_reg,
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 68c14143e50e..70081b395fb2 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -596,7 +596,8 @@ static void qla2x00_async_nack_sp_done(srb_t *sp, int res)
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
} else {
sp->fcport->login_retry = 0;
- sp->fcport->disc_state = DSC_LOGIN_COMPLETE;
+ qla2x00_set_fcport_disc_state(sp->fcport,
+ DSC_LOGIN_COMPLETE);
sp->fcport->deleted = 0;
sp->fcport->logout_on_delete = 1;
}
@@ -957,7 +958,7 @@ void qlt_free_session_done(struct work_struct *work)
struct qlt_plogi_ack_t *own =
sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
+ ql_dbg(ql_dbg_disc, vha, 0xf084,
"%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
" s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n",
__func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
@@ -966,7 +967,7 @@ void qlt_free_session_done(struct work_struct *work)
sess->send_els_logo);
if (!IS_SW_RESV_ADDR(sess->d_id)) {
- qla2x00_mark_device_lost(vha, sess, 0, 0);
+ qla2x00_mark_device_lost(vha, sess, 0);
if (sess->send_els_logo) {
qlt_port_logo_t logo;
@@ -1024,7 +1025,7 @@ void qlt_free_session_done(struct work_struct *work)
while (!READ_ONCE(sess->logout_completed)) {
if (!traced) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
+ ql_dbg(ql_dbg_disc, vha, 0xf086,
"%s: waiting for sess %p logout\n",
__func__, sess);
traced = true;
@@ -1045,6 +1046,10 @@ void qlt_free_session_done(struct work_struct *work)
(struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO);
}
+ spin_lock_irqsave(&vha->work_lock, flags);
+ sess->flags &= ~FCF_ASYNC_SENT;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
if (sess->se_sess) {
sess->se_sess = NULL;
@@ -1052,7 +1057,7 @@ void qlt_free_session_done(struct work_struct *work)
tgt->sess_count--;
}
- sess->disc_state = DSC_DELETED;
+ qla2x00_set_fcport_disc_state(sess, DSC_DELETED);
sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
sess->deleted = QLA_SESS_DELETED;
@@ -1108,7 +1113,7 @@ void qlt_free_session_done(struct work_struct *work)
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
sess->free_pending = 0;
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
+ ql_dbg(ql_dbg_disc, vha, 0xf001,
"Unregistration of sess %p %8phC finished fcp_cnt %d\n",
sess, sess->port_name, vha->fcport_count);
@@ -1151,13 +1156,18 @@ void qlt_unreg_sess(struct fc_port *sess)
return;
}
sess->free_pending = 1;
+ /*
+ * Use FCF_ASYNC_SENT flag to block other cmds used in sess
+ * management from being sent.
+ */
+ sess->flags |= FCF_ASYNC_SENT;
spin_unlock_irqrestore(&sess->vha->work_lock, flags);
if (sess->se_sess)
vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
- sess->disc_state = DSC_DELETE_PEND;
+ qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND);
sess->last_rscn_gen = sess->rscn_gen;
sess->last_login_gen = sess->login_gen;
@@ -1257,7 +1267,8 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
spin_unlock_irqrestore(&sess->vha->work_lock, flags);
- sess->disc_state = DSC_DELETE_PEND;
+ sess->prli_pend_timer = 0;
+ qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND);
qla24xx_chk_fcp_state(sess);
@@ -3446,13 +3457,13 @@ qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
cmd->trc_flags |= TRC_DIF_ERR;
- cmd->a_guard = be16_to_cpu(*(uint16_t *)(ap + 0));
- cmd->a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
- cmd->a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
+ cmd->a_guard = get_unaligned_be16(ap + 0);
+ cmd->a_app_tag = get_unaligned_be16(ap + 2);
+ cmd->a_ref_tag = get_unaligned_be32(ap + 4);
- cmd->e_guard = be16_to_cpu(*(uint16_t *)(ep + 0));
- cmd->e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
- cmd->e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
+ cmd->e_guard = get_unaligned_be16(ep + 0);
+ cmd->e_app_tag = get_unaligned_be16(ep + 2);
+ cmd->e_ref_tag = get_unaligned_be32(ep + 4);
ql_dbg(ql_dbg_tgt_dif, vha, 0xf075,
"%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state);
@@ -4579,7 +4590,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
/* find other sess with nport_id collision */
if (port_id.b24 == other_sess->d_id.b24) {
if (loop_id != other_sess->loop_id) {
- ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000c,
+ ql_dbg(ql_dbg_disc, vha, 0x1000c,
"Invalidating sess %p loop_id %d wwn %llx.\n",
other_sess, other_sess->loop_id, other_wwn);
@@ -4595,7 +4606,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
* Another wwn used to have our s_id/loop_id
* kill the session, but don't free the loop_id
*/
- ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01b,
+ ql_dbg(ql_dbg_disc, vha, 0xf01b,
"Invalidating sess %p loop_id %d wwn %llx.\n",
other_sess, other_sess->loop_id, other_wwn);
@@ -4610,7 +4621,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
/* find other sess with nport handle collision */
if ((loop_id == other_sess->loop_id) &&
(loop_id != FC_NO_LOOP_ID)) {
- ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000d,
+ ql_dbg(ql_dbg_disc, vha, 0x1000d,
"Invalidating sess %p loop_id %d wwn %llx.\n",
other_sess, other_sess->loop_id, other_wwn);
@@ -6053,7 +6064,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
if (!IS_SW_RESV_ADDR(fcport->d_id))
vha->fcport_count++;
fcport->login_gen++;
- fcport->disc_state = DSC_LOGIN_COMPLETE;
+ qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
fcport->login_succ = 1;
newfcport = 1;
}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index d006f0a97b8c..6539499e9e95 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -379,8 +379,7 @@ static inline int get_datalen_for_atio(struct atio_from_isp *atio)
{
int len = atio->u.isp24.fcp_cmnd.add_cdb_len;
- return (be32_to_cpu(get_unaligned((uint32_t *)
- &atio->u.isp24.fcp_cmnd.add_cdb[len * 4])));
+ return get_unaligned_be32(&atio->u.isp24.fcp_cmnd.add_cdb[len * 4]);
}
#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 03bd3b712b77..bb03c022e023 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.01.00.21-k"
+#define QLA2XXX_VERSION "10.01.00.22-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 1
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 2323432a0edb..5504ab11decc 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -4145,7 +4145,7 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
ha->queues_dma);
- if (ha->fw_dump)
+ if (ha->fw_dump)
vfree(ha->fw_dump);
ha->queues_len = 0;
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index df14597752ec..eed31021e788 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -736,13 +736,12 @@ out:
return err;
}
-static const struct file_operations scsi_devinfo_proc_fops = {
- .owner = THIS_MODULE,
- .open = proc_scsi_devinfo_open,
- .read = seq_read,
- .write = proc_scsi_devinfo_write,
- .llseek = seq_lseek,
- .release = seq_release,
+static const struct proc_ops scsi_devinfo_proc_ops = {
+ .proc_open = proc_scsi_devinfo_open,
+ .proc_read = seq_read,
+ .proc_write = proc_scsi_devinfo_write,
+ .proc_lseek = seq_lseek,
+ .proc_release = seq_release,
};
#endif /* CONFIG_SCSI_PROC_FS */
@@ -867,7 +866,7 @@ int __init scsi_init_devinfo(void)
}
#ifdef CONFIG_SCSI_PROC_FS
- p = proc_create("scsi/device_info", 0, NULL, &scsi_devinfo_proc_fops);
+ p = proc_create("scsi/device_info", 0, NULL, &scsi_devinfo_proc_ops);
if (!p) {
error = -ENOMEM;
goto out;
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 57bcd05605bf..8f3af87b6bb0 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -189,17 +189,7 @@ static int scsi_ioctl_get_pci(struct scsi_device *sdev, void __user *arg)
}
-/**
- * scsi_ioctl - Dispatch ioctl to scsi device
- * @sdev: scsi device receiving ioctl
- * @cmd: which ioctl is it
- * @arg: data associated with ioctl
- *
- * Description: The scsi_ioctl() function differs from most ioctls in that it
- * does not take a major/minor number as the dev field. Rather, it takes
- * a pointer to a &struct scsi_device.
- */
-int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+static int scsi_ioctl_common(struct scsi_device *sdev, int cmd, void __user *arg)
{
char scsi_cmd[MAX_COMMAND_SIZE];
struct scsi_sense_hdr sense_hdr;
@@ -266,14 +256,50 @@ int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
return scsi_ioctl_get_pci(sdev, arg);
case SG_SCSI_RESET:
return scsi_ioctl_reset(sdev, arg);
- default:
- if (sdev->host->hostt->ioctl)
- return sdev->host->hostt->ioctl(sdev, cmd, arg);
}
+ return -ENOIOCTLCMD;
+}
+
+/**
+ * scsi_ioctl - Dispatch ioctl to scsi device
+ * @sdev: scsi device receiving ioctl
+ * @cmd: which ioctl is it
+ * @arg: data associated with ioctl
+ *
+ * Description: The scsi_ioctl() function differs from most ioctls in that it
+ * does not take a major/minor number as the dev field. Rather, it takes
+ * a pointer to a &struct scsi_device.
+ */
+int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+{
+ int ret = scsi_ioctl_common(sdev, cmd, arg);
+
+ if (ret != -ENOIOCTLCMD)
+ return ret;
+
+ if (sdev->host->hostt->ioctl)
+ return sdev->host->hostt->ioctl(sdev, cmd, arg);
+
return -EINVAL;
}
EXPORT_SYMBOL(scsi_ioctl);
+#ifdef CONFIG_COMPAT
+int scsi_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+{
+ int ret = scsi_ioctl_common(sdev, cmd, arg);
+
+ if (ret != -ENOIOCTLCMD)
+ return ret;
+
+ if (sdev->host->hostt->compat_ioctl)
+ return sdev->host->hostt->compat_ioctl(sdev, cmd, arg);
+
+ return ret;
+}
+EXPORT_SYMBOL(scsi_compat_ioctl);
+#endif
+
/*
* We can process a reset even when a device isn't fully operable.
*/
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 3e7a45d0daca..610ee41fa54c 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2108,6 +2108,8 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
memset(data, 0, sizeof(*data));
memset(&cmd[0], 0, 12);
+
+ dbd = sdev->set_dbd_for_ms ? 8 : dbd;
cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */
cmd[2] = modepage;
diff --git a/drivers/scsi/scsi_logging.h b/drivers/scsi/scsi_logging.h
index 836185de28c4..3df877886119 100644
--- a/drivers/scsi/scsi_logging.h
+++ b/drivers/scsi/scsi_logging.h
@@ -53,7 +53,7 @@ do { \
} while (0)
#else
#define SCSI_LOG_LEVEL(SHIFT, BITS) 0
-#define SCSI_CHECK_LOGGING(SHIFT, BITS, LEVEL, CMD)
+#define SCSI_CHECK_LOGGING(SHIFT, BITS, LEVEL, CMD) do { } while (0)
#endif /* CONFIG_SCSI_LOGGING */
/*
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index 5b313226f11c..d6982d355739 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -83,12 +83,12 @@ static int proc_scsi_host_open(struct inode *inode, struct file *file)
4 * PAGE_SIZE);
}
-static const struct file_operations proc_scsi_fops = {
- .open = proc_scsi_host_open,
- .release = single_release,
- .read = seq_read,
- .llseek = seq_lseek,
- .write = proc_scsi_host_write
+static const struct proc_ops proc_scsi_ops = {
+ .proc_open = proc_scsi_host_open,
+ .proc_release = single_release,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_write = proc_scsi_host_write
};
/**
@@ -146,7 +146,7 @@ void scsi_proc_host_add(struct Scsi_Host *shost)
sprintf(name,"%d", shost->host_no);
p = proc_create_data(name, S_IRUGO | S_IWUSR,
- sht->proc_dir, &proc_scsi_fops, shost);
+ sht->proc_dir, &proc_scsi_ops, shost);
if (!p)
printk(KERN_ERR "%s: Failed to register host %d in"
"%s\n", __func__, shost->host_no,
@@ -436,13 +436,12 @@ static int proc_scsi_open(struct inode *inode, struct file *file)
return seq_open(file, &scsi_seq_ops);
}
-static const struct file_operations proc_scsi_operations = {
- .owner = THIS_MODULE,
- .open = proc_scsi_open,
- .read = seq_read,
- .write = proc_scsi_write,
- .llseek = seq_lseek,
- .release = seq_release,
+static const struct proc_ops scsi_scsi_proc_ops = {
+ .proc_open = proc_scsi_open,
+ .proc_read = seq_read,
+ .proc_write = proc_scsi_write,
+ .proc_lseek = seq_lseek,
+ .proc_release = seq_release,
};
/**
@@ -456,7 +455,7 @@ int __init scsi_init_procfs(void)
if (!proc_scsi)
goto err1;
- pde = proc_create("scsi/scsi", 0, NULL, &proc_scsi_operations);
+ pde = proc_create("scsi/scsi", 0, NULL, &scsi_scsi_proc_ops);
if (!pde)
goto err2;
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index ed8d9709b9b9..dfc726fa34e3 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2093,7 +2093,12 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
"could not register session's dev\n");
goto release_ida;
}
- transport_register_device(&session->dev);
+ err = transport_register_device(&session->dev);
+ if (err) {
+ iscsi_cls_session_printk(KERN_ERR, session,
+ "could not register transport's dev\n");
+ goto release_dev;
+ }
spin_lock_irqsave(&sesslock, flags);
list_add(&session->sess_list, &sesslist);
@@ -2103,6 +2108,8 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
ISCSI_DBG_TRANS_SESSION(session, "Completed session adding\n");
return 0;
+release_dev:
+ device_del(&session->dev);
release_ida:
if (session->ida_used)
ida_simple_remove(&iscsi_sess_ida, session->target_id);
@@ -2263,7 +2270,12 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
"register connection's dev\n");
goto release_parent_ref;
}
- transport_register_device(&conn->dev);
+ err = transport_register_device(&conn->dev);
+ if (err) {
+ iscsi_cls_session_printk(KERN_ERR, session, "could not "
+ "register transport's dev\n");
+ goto release_conn_ref;
+ }
spin_lock_irqsave(&connlock, flags);
list_add(&conn->conn_list, &connlist);
@@ -2272,6 +2284,8 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
ISCSI_DBG_TRANS_CONN(conn, "Completed conn creation\n");
return conn;
+release_conn_ref:
+ put_device(&conn->dev);
release_parent_ref:
put_device(&session->dev);
free_conn:
@@ -2947,6 +2961,24 @@ iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev)
return err;
}
+static int iscsi_session_has_conns(int sid)
+{
+ struct iscsi_cls_conn *conn;
+ unsigned long flags;
+ int found = 0;
+
+ spin_lock_irqsave(&connlock, flags);
+ list_for_each_entry(conn, &connlist, conn_list) {
+ if (iscsi_conn_get_sid(conn) == sid) {
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&connlock, flags);
+
+ return found;
+}
+
static int
iscsi_set_iface_params(struct iscsi_transport *transport,
struct iscsi_uevent *ev, uint32_t len)
@@ -3524,10 +3556,12 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
break;
case ISCSI_UEVENT_DESTROY_SESSION:
session = iscsi_session_lookup(ev->u.d_session.sid);
- if (session)
- transport->destroy_session(session);
- else
+ if (!session)
err = -EINVAL;
+ else if (iscsi_session_has_conns(ev->u.d_session.sid))
+ err = -EBUSY;
+ else
+ transport->destroy_session(session);
break;
case ISCSI_UEVENT_UNBIND_SESSION:
session = iscsi_session_lookup(ev->u.d_session.sid);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index cea625906440..8ca9299ffd36 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1465,13 +1465,12 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
* Note: most ioctls are forward onto the block subsystem or further
* down in the scsi subsystem.
**/
-static int sd_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
+static int sd_ioctl_common(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, void __user *p)
{
struct gendisk *disk = bdev->bd_disk;
struct scsi_disk *sdkp = scsi_disk(disk);
struct scsi_device *sdp = sdkp->device;
- void __user *p = (void __user *)arg;
int error;
SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, "
@@ -1507,9 +1506,6 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
break;
default:
error = scsi_cmd_blk_ioctl(bdev, mode, cmd, p);
- if (error != -ENOTTY)
- break;
- error = scsi_ioctl(sdp, cmd, p);
break;
}
out:
@@ -1691,39 +1687,31 @@ static void sd_rescan(struct device *dev)
revalidate_disk(sdkp->disk);
}
+static int sd_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ void __user *p = (void __user *)arg;
+ int ret;
+
+ ret = sd_ioctl_common(bdev, mode, cmd, p);
+ if (ret != -ENOTTY)
+ return ret;
+
+ return scsi_ioctl(scsi_disk(bdev->bd_disk)->device, cmd, p);
+}
#ifdef CONFIG_COMPAT
-/*
- * This gets directly called from VFS. When the ioctl
- * is not recognized we go back to the other translation paths.
- */
static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct gendisk *disk = bdev->bd_disk;
- struct scsi_disk *sdkp = scsi_disk(disk);
- struct scsi_device *sdev = sdkp->device;
void __user *p = compat_ptr(arg);
- int error;
-
- error = scsi_verify_blk_ioctl(bdev, cmd);
- if (error < 0)
- return error;
+ int ret;
- error = scsi_ioctl_block_when_processing_errors(sdev, cmd,
- (mode & FMODE_NDELAY) != 0);
- if (error)
- return error;
+ ret = sd_ioctl_common(bdev, mode, cmd, p);
+ if (ret != -ENOTTY)
+ return ret;
- if (is_sed_ioctl(cmd))
- return sed_ioctl(sdkp->opal_dev, cmd, p);
-
- /*
- * Let the static ioctl translation table take care of it.
- */
- if (!sdev->host->hostt->compat_ioctl)
- return -ENOIOCTLCMD;
- return sdev->host->hostt->compat_ioctl(sdev, cmd, p);
+ return scsi_compat_ioctl(scsi_disk(bdev->bd_disk)->device, cmd, p);
}
#endif
@@ -2211,8 +2199,10 @@ static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer
u8 type;
int ret = 0;
- if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0)
+ if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) {
+ sdkp->protection_type = 0;
return ret;
+ }
type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
@@ -2956,15 +2946,16 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
q->limits.zoned = BLK_ZONED_HM;
} else {
sdkp->zoned = (buffer[8] >> 4) & 3;
- if (sdkp->zoned == 1)
+ if (sdkp->zoned == 1 && !disk_has_partitions(sdkp->disk)) {
/* Host-aware */
q->limits.zoned = BLK_ZONED_HA;
- else
+ } else {
/*
- * Treat drive-managed devices as
- * regular block devices.
+ * Treat drive-managed devices and host-aware devices
+ * with partitions as regular block devices.
*/
q->limits.zoned = BLK_ZONED_NONE;
+ }
}
if (blk_queue_is_zoned(q) && sdkp->first_scan)
sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index e0bd4cf17230..e4282bce5834 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -325,22 +325,21 @@ static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp,
}
/**
- * sd_zbc_check_zones - Check the device capacity and zone sizes
+ * sd_zbc_check_capacity - Check the device capacity
* @sdkp: Target disk
+ * @buf: command buffer
+ * @zblock: zone size in number of blocks
*
- * Check that the device capacity as reported by READ CAPACITY matches the
- * max_lba value (plus one)of the report zones command reply. Also check that
- * all zones of the device have an equal size, only allowing the last zone of
- * the disk to have a smaller size (runt zone). The zone size must also be a
- * power of two.
+ * Get the device zone size and check that the device capacity as reported
+ * by READ CAPACITY matches the max_lba value (plus one) of the report zones
+ * command reply for devices with RC_BASIS == 0.
*
- * Returns the zone size in number of blocks upon success or an error code
- * upon failure.
+ * Returns 0 upon success or an error code upon failure.
*/
-static int sd_zbc_check_zones(struct scsi_disk *sdkp, unsigned char *buf,
- u32 *zblocks)
+static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf,
+ u32 *zblocks)
{
- u64 zone_blocks = 0;
+ u64 zone_blocks;
sector_t max_lba;
unsigned char *rec;
int ret;
@@ -363,17 +362,9 @@ static int sd_zbc_check_zones(struct scsi_disk *sdkp, unsigned char *buf,
}
}
- /* Parse REPORT ZONES header */
+ /* Get the size of the first reported zone */
rec = buf + 64;
zone_blocks = get_unaligned_be64(&rec[8]);
- if (!zone_blocks || !is_power_of_2(zone_blocks)) {
- if (sdkp->first_scan)
- sd_printk(KERN_NOTICE, sdkp,
- "Devices with non power of 2 zone "
- "size are not supported\n");
- return -ENODEV;
- }
-
if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
if (sdkp->first_scan)
sd_printk(KERN_NOTICE, sdkp,
@@ -405,11 +396,8 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
if (ret)
goto err;
- /*
- * Check zone size: only devices with a constant zone size (except
- * an eventual last runt zone) that is a power of 2 are supported.
- */
- ret = sd_zbc_check_zones(sdkp, buf, &zone_blocks);
+ /* Check the device capacity reported by report zones */
+ ret = sd_zbc_check_capacity(sdkp, buf, &zone_blocks);
if (ret != 0)
goto err;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 160748ad9c0f..4e6af592f018 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -405,6 +405,38 @@ sg_release(struct inode *inode, struct file *filp)
return 0;
}
+static int get_sg_io_pack_id(int *pack_id, void __user *buf, size_t count)
+{
+ struct sg_header __user *old_hdr = buf;
+ int reply_len;
+
+ if (count >= SZ_SG_HEADER) {
+ /* negative reply_len means v3 format, otherwise v1/v2 */
+ if (get_user(reply_len, &old_hdr->reply_len))
+ return -EFAULT;
+
+ if (reply_len >= 0)
+ return get_user(*pack_id, &old_hdr->pack_id);
+
+ if (in_compat_syscall() &&
+ count >= sizeof(struct compat_sg_io_hdr)) {
+ struct compat_sg_io_hdr __user *hp = buf;
+
+ return get_user(*pack_id, &hp->pack_id);
+ }
+
+ if (count >= sizeof(struct sg_io_hdr)) {
+ struct sg_io_hdr __user *hp = buf;
+
+ return get_user(*pack_id, &hp->pack_id);
+ }
+ }
+
+ /* no valid header was passed, so ignore the pack_id */
+ *pack_id = -1;
+ return 0;
+}
+
static ssize_t
sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
{
@@ -413,8 +445,8 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
Sg_request *srp;
int req_pack_id = -1;
sg_io_hdr_t *hp;
- struct sg_header *old_hdr = NULL;
- int retval = 0;
+ struct sg_header *old_hdr;
+ int retval;
/*
* This could cause a response to be stranded. Close the associated
@@ -429,79 +461,34 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_read: count=%d\n", (int) count));
- if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
- old_hdr = memdup_user(buf, SZ_SG_HEADER);
- if (IS_ERR(old_hdr))
- return PTR_ERR(old_hdr);
- if (old_hdr->reply_len < 0) {
- if (count >= SZ_SG_IO_HDR) {
- /*
- * This is stupid.
- *
- * We're copying the whole sg_io_hdr_t from user
- * space just to get the 'pack_id' field. But the
- * field is at different offsets for the compat
- * case, so we'll use "get_sg_io_hdr()" to copy
- * the whole thing and convert it.
- *
- * We could do something like just calculating the
- * offset based of 'in_compat_syscall()', but the
- * 'compat_sg_io_hdr' definition is in the wrong
- * place for that.
- */
- sg_io_hdr_t *new_hdr;
- new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL);
- if (!new_hdr) {
- retval = -ENOMEM;
- goto free_old_hdr;
- }
- retval = get_sg_io_hdr(new_hdr, buf);
- req_pack_id = new_hdr->pack_id;
- kfree(new_hdr);
- if (retval) {
- retval = -EFAULT;
- goto free_old_hdr;
- }
- }
- } else
- req_pack_id = old_hdr->pack_id;
- }
+ if (sfp->force_packid)
+ retval = get_sg_io_pack_id(&req_pack_id, buf, count);
+ if (retval)
+ return retval;
+
srp = sg_get_rq_mark(sfp, req_pack_id);
if (!srp) { /* now wait on packet to arrive */
- if (atomic_read(&sdp->detaching)) {
- retval = -ENODEV;
- goto free_old_hdr;
- }
- if (filp->f_flags & O_NONBLOCK) {
- retval = -EAGAIN;
- goto free_old_hdr;
- }
+ if (atomic_read(&sdp->detaching))
+ return -ENODEV;
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
retval = wait_event_interruptible(sfp->read_wait,
(atomic_read(&sdp->detaching) ||
(srp = sg_get_rq_mark(sfp, req_pack_id))));
- if (atomic_read(&sdp->detaching)) {
- retval = -ENODEV;
- goto free_old_hdr;
- }
- if (retval) {
+ if (atomic_read(&sdp->detaching))
+ return -ENODEV;
+ if (retval)
/* -ERESTARTSYS as signal hit process */
- goto free_old_hdr;
- }
- }
- if (srp->header.interface_id != '\0') {
- retval = sg_new_read(sfp, buf, count, srp);
- goto free_old_hdr;
+ return retval;
}
+ if (srp->header.interface_id != '\0')
+ return sg_new_read(sfp, buf, count, srp);
hp = &srp->header;
- if (old_hdr == NULL) {
- old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
- if (! old_hdr) {
- retval = -ENOMEM;
- goto free_old_hdr;
- }
- }
- memset(old_hdr, 0, SZ_SG_HEADER);
+ old_hdr = kzalloc(SZ_SG_HEADER, GFP_KERNEL);
+ if (!old_hdr)
+ return -ENOMEM;
+
old_hdr->reply_len = (int) hp->timeout;
old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */
old_hdr->pack_id = hp->pack_id;
@@ -575,7 +562,12 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
int err = 0, err2;
int len;
- if (count < SZ_SG_IO_HDR) {
+ if (in_compat_syscall()) {
+ if (count < sizeof(struct compat_sg_io_hdr)) {
+ err = -EINVAL;
+ goto err_out;
+ }
+ } else if (count < SZ_SG_IO_HDR) {
err = -EINVAL;
goto err_out;
}
@@ -919,19 +911,14 @@ static int put_compat_request_table(struct compat_sg_req_info __user *o,
#endif
static long
-sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
+sg_ioctl_common(struct file *filp, Sg_device *sdp, Sg_fd *sfp,
+ unsigned int cmd_in, void __user *p)
{
- void __user *p = (void __user *)arg;
int __user *ip = p;
int result, val, read_only;
- Sg_device *sdp;
- Sg_fd *sfp;
Sg_request *srp;
unsigned long iflags;
- if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
- return -ENXIO;
-
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_ioctl: cmd=0x%x\n", (int) cmd_in));
read_only = (O_RDWR != (filp->f_flags & O_ACCMODE));
@@ -1154,29 +1141,44 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
cmd_in, filp->f_flags & O_NDELAY);
if (result)
return result;
+
+ return -ENOIOCTLCMD;
+}
+
+static long
+sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
+{
+ void __user *p = (void __user *)arg;
+ Sg_device *sdp;
+ Sg_fd *sfp;
+ int ret;
+
+ if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
+ return -ENXIO;
+
+ ret = sg_ioctl_common(filp, sdp, sfp, cmd_in, p);
+ if (ret != -ENOIOCTLCMD)
+ return ret;
+
return scsi_ioctl(sdp->device, cmd_in, p);
}
#ifdef CONFIG_COMPAT
static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
{
+ void __user *p = compat_ptr(arg);
Sg_device *sdp;
Sg_fd *sfp;
- struct scsi_device *sdev;
+ int ret;
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
- sdev = sdp->device;
- if (sdev->host->hostt->compat_ioctl) {
- int ret;
-
- ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
-
+ ret = sg_ioctl_common(filp, sdp, sfp, cmd_in, p);
+ if (ret != -ENOIOCTLCMD)
return ret;
- }
-
- return -ENOIOCTLCMD;
+
+ return scsi_compat_ioctl(sdp->device, cmd_in, p);
}
#endif
@@ -2320,25 +2322,23 @@ static int sg_proc_seq_show_int(struct seq_file *s, void *v);
static int sg_proc_single_open_adio(struct inode *inode, struct file *file);
static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer,
size_t count, loff_t *off);
-static const struct file_operations adio_fops = {
- .owner = THIS_MODULE,
- .open = sg_proc_single_open_adio,
- .read = seq_read,
- .llseek = seq_lseek,
- .write = sg_proc_write_adio,
- .release = single_release,
+static const struct proc_ops adio_proc_ops = {
+ .proc_open = sg_proc_single_open_adio,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_write = sg_proc_write_adio,
+ .proc_release = single_release,
};
static int sg_proc_single_open_dressz(struct inode *inode, struct file *file);
static ssize_t sg_proc_write_dressz(struct file *filp,
const char __user *buffer, size_t count, loff_t *off);
-static const struct file_operations dressz_fops = {
- .owner = THIS_MODULE,
- .open = sg_proc_single_open_dressz,
- .read = seq_read,
- .llseek = seq_lseek,
- .write = sg_proc_write_dressz,
- .release = single_release,
+static const struct proc_ops dressz_proc_ops = {
+ .proc_open = sg_proc_single_open_dressz,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_write = sg_proc_write_dressz,
+ .proc_release = single_release,
};
static int sg_proc_seq_show_version(struct seq_file *s, void *v);
@@ -2379,9 +2379,9 @@ sg_proc_init(void)
if (!p)
return 1;
- proc_create("allow_dio", S_IRUGO | S_IWUSR, p, &adio_fops);
+ proc_create("allow_dio", S_IRUGO | S_IWUSR, p, &adio_proc_ops);
proc_create_seq("debug", S_IRUGO, p, &debug_seq_ops);
- proc_create("def_reserved_size", S_IRUGO | S_IWUSR, p, &dressz_fops);
+ proc_create("def_reserved_size", S_IRUGO | S_IWUSR, p, &dressz_proc_ops);
proc_create_single("device_hdr", S_IRUGO, p, sg_proc_seq_show_devhdr);
proc_create_seq("devices", S_IRUGO, p, &dev_seq_ops);
proc_create_seq("device_strs", S_IRUGO, p, &devstrs_seq_ops);
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 412ac56ecd60..b7492568e02f 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -7457,7 +7457,7 @@ static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
goto disable_device;
}
- ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
+ ctrl_info->iomem_base = ioremap(pci_resource_start(
ctrl_info->pci_dev, 0),
sizeof(struct pqi_ctrl_registers));
if (!ctrl_info->iomem_base) {
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
index a85d52b5dc32..f8397978f8ab 100644
--- a/drivers/scsi/sni_53c710.c
+++ b/drivers/scsi/sni_53c710.c
@@ -71,7 +71,7 @@ static int snirm710_probe(struct platform_device *dev)
hostdata->dev = &dev->dev;
dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
- hostdata->base = ioremap_nocache(base, 0x100);
+ hostdata->base = ioremap(base, 0x100);
hostdata->differential = 0;
hostdata->clock = SNIRM710_CLOCK;
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 4664fdf75c0f..0fbb8fe6e521 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -38,6 +38,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/bio.h>
+#include <linux/compat.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/cdrom.h>
@@ -598,6 +599,51 @@ out:
return ret;
}
+#ifdef CONFIG_COMPAT
+static int sr_block_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
+ unsigned long arg)
+{
+ struct scsi_cd *cd = scsi_cd(bdev->bd_disk);
+ struct scsi_device *sdev = cd->device;
+ void __user *argp = compat_ptr(arg);
+ int ret;
+
+ mutex_lock(&sr_mutex);
+
+ ret = scsi_ioctl_block_when_processing_errors(sdev, cmd,
+ (mode & FMODE_NDELAY) != 0);
+ if (ret)
+ goto out;
+
+ scsi_autopm_get_device(sdev);
+
+ /*
+ * Send SCSI addressing ioctls directly to mid level, send other
+ * ioctls to cdrom/block level.
+ */
+ switch (cmd) {
+ case SCSI_IOCTL_GET_IDLUN:
+ case SCSI_IOCTL_GET_BUS_NUMBER:
+ ret = scsi_compat_ioctl(sdev, cmd, argp);
+ goto put;
+ }
+
+ ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, (unsigned long)argp);
+ if (ret != -ENOSYS)
+ goto put;
+
+ ret = scsi_compat_ioctl(sdev, cmd, argp);
+
+put:
+ scsi_autopm_put_device(sdev);
+
+out:
+ mutex_unlock(&sr_mutex);
+ return ret;
+
+}
+#endif
+
static unsigned int sr_block_check_events(struct gendisk *disk,
unsigned int clearing)
{
@@ -641,12 +687,11 @@ static const struct block_device_operations sr_bdops =
.open = sr_block_open,
.release = sr_block_release,
.ioctl = sr_block_ioctl,
+#ifdef CONFIG_COMPAT
+ .ioctl = sr_block_compat_ioctl,
+#endif
.check_events = sr_block_check_events,
.revalidate_disk = sr_block_revalidate_disk,
- /*
- * No compat_ioctl for now because sr_block_ioctl never
- * seems to pass arbitrary ioctls down to host drivers.
- */
};
static int sr_open(struct cdrom_device_info *cdi, int purpose)
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 9e3fff2de83e..393f3019ccac 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -3501,7 +3501,7 @@ out:
/* The ioctl command */
-static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
+static long st_ioctl_common(struct file *file, unsigned int cmd_in, void __user *p)
{
int i, cmd_nr, cmd_type, bt;
int retval = 0;
@@ -3509,7 +3509,6 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
struct scsi_tape *STp = file->private_data;
struct st_modedef *STm;
struct st_partstat *STps;
- void __user *p = (void __user *)arg;
if (mutex_lock_interruptible(&STp->lock))
return -ERESTARTSYS;
@@ -3824,9 +3823,19 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
}
mutex_unlock(&STp->lock);
switch (cmd_in) {
+ case SCSI_IOCTL_STOP_UNIT:
+ /* unload */
+ retval = scsi_ioctl(STp->device, cmd_in, p);
+ if (!retval) {
+ STp->rew_at_close = 0;
+ STp->ready = ST_NO_TAPE;
+ }
+ return retval;
+
case SCSI_IOCTL_GET_IDLUN:
case SCSI_IOCTL_GET_BUS_NUMBER:
break;
+
default:
if ((cmd_in == SG_IO ||
cmd_in == SCSI_IOCTL_SEND_COMMAND ||
@@ -3840,42 +3849,46 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
return i;
break;
}
- retval = scsi_ioctl(STp->device, cmd_in, p);
- if (!retval && cmd_in == SCSI_IOCTL_STOP_UNIT) { /* unload */
- STp->rew_at_close = 0;
- STp->ready = ST_NO_TAPE;
- }
- return retval;
+ return -ENOTTY;
out:
mutex_unlock(&STp->lock);
return retval;
}
+static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
+{
+ void __user *p = (void __user *)arg;
+ struct scsi_tape *STp = file->private_data;
+ int ret;
+
+ ret = st_ioctl_common(file, cmd_in, p);
+ if (ret != -ENOTTY)
+ return ret;
+
+ return scsi_ioctl(STp->device, cmd_in, p);
+}
+
#ifdef CONFIG_COMPAT
static long st_compat_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
{
void __user *p = compat_ptr(arg);
struct scsi_tape *STp = file->private_data;
- struct scsi_device *sdev = STp->device;
- int ret = -ENOIOCTLCMD;
+ int ret;
/* argument conversion is handled using put_user_mtpos/put_user_mtget */
switch (cmd_in) {
- case MTIOCTOP:
- return st_ioctl(file, MTIOCTOP, (unsigned long)p);
case MTIOCPOS32:
- return st_ioctl(file, MTIOCPOS, (unsigned long)p);
+ return st_ioctl_common(file, MTIOCPOS, p);
case MTIOCGET32:
- return st_ioctl(file, MTIOCGET, (unsigned long)p);
+ return st_ioctl_common(file, MTIOCGET, p);
}
- if (sdev->host->hostt->compat_ioctl) {
+ ret = st_ioctl_common(file, cmd_in, p);
+ if (ret != -ENOTTY)
+ return ret;
- ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
-
- }
- return ret;
+ return scsi_compat_ioctl(STp->device, cmd_in, p);
}
#endif
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index f8faf8b3d965..fb41636519ee 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1842,9 +1842,11 @@ static int storvsc_probe(struct hv_device *device,
*/
host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT);
/*
+ * For non-IDE disks, the host supports multiple channels.
* Set the number of HW queues we are supporting.
*/
- host->nr_hw_queues = num_present_cpus();
+ if (!dev_is_ide)
+ host->nr_hw_queues = num_present_cpus();
/*
* Set the error handler work queue.
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index 440a73eae647..f37df79e37e1 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -190,7 +190,7 @@ static int esp_sun3x_probe(struct platform_device *dev)
if (!res || !res->start)
goto fail_unlink;
- esp->regs = ioremap_nocache(res->start, 0x20);
+ esp->regs = ioremap(res->start, 0x20);
if (!esp->regs)
goto fail_unmap_regs;
@@ -198,7 +198,7 @@ static int esp_sun3x_probe(struct platform_device *dev)
if (!res || !res->start)
goto fail_unmap_regs;
- esp->dma_regs = ioremap_nocache(res->start, 0x10);
+ esp->dma_regs = ioremap(res->start, 0x10);
esp->command_block = dma_alloc_coherent(esp->dev, 16,
&esp->command_block_dma,
diff --git a/drivers/scsi/sym53c8xx_2/sym_nvram.c b/drivers/scsi/sym53c8xx_2/sym_nvram.c
index 9dc17f1288f9..d37e2a69136a 100644
--- a/drivers/scsi/sym53c8xx_2/sym_nvram.c
+++ b/drivers/scsi/sym53c8xx_2/sym_nvram.c
@@ -227,7 +227,7 @@ static void sym_display_Tekram_nvram(struct sym_device *np, Tekram_nvram *nvram)
/*
* 24C16 EEPROM reading.
*
- * GPOI0 - data in/data out
+ * GPIO0 - data in/data out
* GPIO1 - clock
* Symbios NVRAM wiring now also used by Tekram.
*/
@@ -524,7 +524,7 @@ static int sym_read_Symbios_nvram(struct sym_device *np, Symbios_nvram *nvram)
/*
* 93C46 EEPROM reading.
*
- * GPOI0 - data in
+ * GPIO0 - data in
* GPIO1 - data out
* GPIO2 - clock
* GPIO4 - chip select
diff --git a/drivers/scsi/ufs/cdns-pltfrm.c b/drivers/scsi/ufs/cdns-pltfrm.c
index 6feeb0faf123..56a6a1ed5ec2 100644
--- a/drivers/scsi/ufs/cdns-pltfrm.c
+++ b/drivers/scsi/ufs/cdns-pltfrm.c
@@ -19,6 +19,85 @@
#define CDNS_UFS_REG_HCLKDIV 0xFC
#define CDNS_UFS_REG_PHY_XCFGD1 0x113C
+#define CDNS_UFS_MAX_L4_ATTRS 12
+
+struct cdns_ufs_host {
+ /**
+ * cdns_ufs_dme_attr_val - for storing L4 attributes
+ */
+ u32 cdns_ufs_dme_attr_val[CDNS_UFS_MAX_L4_ATTRS];
+};
+
+/**
+ * cdns_ufs_get_l4_attr - get L4 attributes on local side
+ * @hba: per adapter instance
+ *
+ */
+static void cdns_ufs_get_l4_attr(struct ufs_hba *hba)
+{
+ struct cdns_ufs_host *host = ufshcd_get_variant(hba);
+
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERDEVICEID),
+ &host->cdns_ufs_dme_attr_val[0]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERCPORTID),
+ &host->cdns_ufs_dme_attr_val[1]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_TRAFFICCLASS),
+ &host->cdns_ufs_dme_attr_val[2]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_PROTOCOLID),
+ &host->cdns_ufs_dme_attr_val[3]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_CPORTFLAGS),
+ &host->cdns_ufs_dme_attr_val[4]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_TXTOKENVALUE),
+ &host->cdns_ufs_dme_attr_val[5]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_RXTOKENVALUE),
+ &host->cdns_ufs_dme_attr_val[6]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_LOCALBUFFERSPACE),
+ &host->cdns_ufs_dme_attr_val[7]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERBUFFERSPACE),
+ &host->cdns_ufs_dme_attr_val[8]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_CREDITSTOSEND),
+ &host->cdns_ufs_dme_attr_val[9]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_CPORTMODE),
+ &host->cdns_ufs_dme_attr_val[10]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_CONNECTIONSTATE),
+ &host->cdns_ufs_dme_attr_val[11]);
+}
+
+/**
+ * cdns_ufs_set_l4_attr - set L4 attributes on local side
+ * @hba: per adapter instance
+ *
+ */
+static void cdns_ufs_set_l4_attr(struct ufs_hba *hba)
+{
+ struct cdns_ufs_host *host = ufshcd_get_variant(hba);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), 0);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERDEVICEID),
+ host->cdns_ufs_dme_attr_val[0]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERCPORTID),
+ host->cdns_ufs_dme_attr_val[1]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_TRAFFICCLASS),
+ host->cdns_ufs_dme_attr_val[2]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_PROTOCOLID),
+ host->cdns_ufs_dme_attr_val[3]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTFLAGS),
+ host->cdns_ufs_dme_attr_val[4]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_TXTOKENVALUE),
+ host->cdns_ufs_dme_attr_val[5]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_RXTOKENVALUE),
+ host->cdns_ufs_dme_attr_val[6]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_LOCALBUFFERSPACE),
+ host->cdns_ufs_dme_attr_val[7]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERBUFFERSPACE),
+ host->cdns_ufs_dme_attr_val[8]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_CREDITSTOSEND),
+ host->cdns_ufs_dme_attr_val[9]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTMODE),
+ host->cdns_ufs_dme_attr_val[10]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE),
+ host->cdns_ufs_dme_attr_val[11]);
+}
/**
* Sets HCLKDIV register value based on the core_clk
@@ -78,6 +157,22 @@ static int cdns_ufs_hce_enable_notify(struct ufs_hba *hba,
}
/**
+ * Called around hibern8 enter/exit.
+ * @hba: host controller instance
+ * @cmd: UIC Command
+ * @status: notify stage (pre, post change)
+ *
+ */
+static void cdns_ufs_hibern8_notify(struct ufs_hba *hba, enum uic_cmd_dme cmd,
+ enum ufs_notify_change_status status)
+{
+ if (status == PRE_CHANGE && cmd == UIC_CMD_DME_HIBER_ENTER)
+ cdns_ufs_get_l4_attr(hba);
+ if (status == POST_CHANGE && cmd == UIC_CMD_DME_HIBER_EXIT)
+ cdns_ufs_set_l4_attr(hba);
+}
+
+/**
* Called before and after Link startup is carried out.
* @hba: host controller instance
* @status: notify stage (pre, post change)
@@ -117,6 +212,14 @@ static int cdns_ufs_link_startup_notify(struct ufs_hba *hba,
static int cdns_ufs_init(struct ufs_hba *hba)
{
int status = 0;
+ struct cdns_ufs_host *host;
+ struct device *dev = hba->dev;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+
+ if (!host)
+ return -ENOMEM;
+ ufshcd_set_variant(hba, host);
if (hba->vops && hba->vops->phy_initialization)
status = hba->vops->phy_initialization(hba);
@@ -144,8 +247,10 @@ static int cdns_ufs_m31_16nm_phy_initialization(struct ufs_hba *hba)
static const struct ufs_hba_variant_ops cdns_ufs_pltfm_hba_vops = {
.name = "cdns-ufs-pltfm",
+ .init = cdns_ufs_init,
.hce_enable_notify = cdns_ufs_hce_enable_notify,
.link_startup_notify = cdns_ufs_link_startup_notify,
+ .hibern8_notify = cdns_ufs_hibern8_notify,
};
static const struct ufs_hba_variant_ops cdns_ufs_m31_16nm_pltfm_hba_vops = {
@@ -154,6 +259,7 @@ static const struct ufs_hba_variant_ops cdns_ufs_m31_16nm_pltfm_hba_vops = {
.hce_enable_notify = cdns_ufs_hce_enable_notify,
.link_startup_notify = cdns_ufs_link_startup_notify,
.phy_initialization = cdns_ufs_m31_16nm_phy_initialization,
+ .hibern8_notify = cdns_ufs_hibern8_notify,
};
static const struct of_device_id cdns_ufs_of_match[] = {
@@ -219,6 +325,7 @@ static const struct dev_pm_ops cdns_ufs_dev_pm_ops = {
static struct platform_driver cdns_ufs_pltfrm_driver = {
.probe = cdns_ufs_pltfrm_probe,
.remove = cdns_ufs_pltfrm_remove,
+ .shutdown = ufshcd_pltfrm_shutdown,
.driver = {
.name = "cdns-ufshcd",
.pm = &cdns_ufs_dev_pm_ops,
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index 83e28edc3ac5..53eae5fe2ade 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -6,16 +6,30 @@
* Peter Wang <peter.wang@mediatek.com>
*/
+#include <linux/arm-smccc.h>
+#include <linux/bitfield.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/soc/mediatek/mtk_sip_svc.h>
#include "ufshcd.h"
#include "ufshcd-pltfrm.h"
+#include "ufs_quirks.h"
#include "unipro.h"
#include "ufs-mediatek.h"
+#define ufs_mtk_smc(cmd, val, res) \
+ arm_smccc_smc(MTK_SIP_UFS_CONTROL, \
+ cmd, val, 0, 0, 0, 0, 0, &(res))
+
+#define ufs_mtk_ref_clk_notify(on, res) \
+ ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, on, res)
+
+#define ufs_mtk_device_reset_ctrl(high, res) \
+ ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res)
+
static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
{
u32 tmp;
@@ -81,6 +95,49 @@ static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
return err;
}
+static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct arm_smccc_res res;
+ unsigned long timeout;
+ u32 value;
+
+ if (host->ref_clk_enabled == on)
+ return 0;
+
+ if (on) {
+ ufs_mtk_ref_clk_notify(on, res);
+ ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
+ } else {
+ ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
+ }
+
+ /* Wait for ack */
+ timeout = jiffies + msecs_to_jiffies(REFCLK_REQ_TIMEOUT_MS);
+ do {
+ value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
+
+ /* Wait until ack bit equals to req bit */
+ if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
+ goto out;
+
+ usleep_range(100, 200);
+ } while (time_before(jiffies, timeout));
+
+ dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
+
+ ufs_mtk_ref_clk_notify(host->ref_clk_enabled, res);
+
+ return -ETIMEDOUT;
+
+out:
+ host->ref_clk_enabled = on;
+ if (!on)
+ ufs_mtk_ref_clk_notify(on, res);
+
+ return 0;
+}
+
/**
* ufs_mtk_setup_clocks - enables/disable clocks
* @hba: host controller instance
@@ -105,12 +162,16 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
switch (status) {
case PRE_CHANGE:
- if (!on)
+ if (!on) {
+ ufs_mtk_setup_ref_clk(hba, on);
ret = phy_power_off(host->mphy);
+ }
break;
case POST_CHANGE:
- if (on)
+ if (on) {
ret = phy_power_on(host->mphy);
+ ufs_mtk_setup_ref_clk(hba, on);
+ }
break;
}
@@ -150,6 +211,9 @@ static int ufs_mtk_init(struct ufs_hba *hba)
/* Enable runtime autosuspend */
hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
+ /* Enable clock-gating */
+ hba->caps |= UFSHCD_CAP_CLK_GATING;
+
/*
* ufshcd_vops_init() is invoked after
* ufshcd_setup_clock(true) in ufshcd_hba_init() thus
@@ -238,6 +302,23 @@ static int ufs_mtk_pre_link(struct ufs_hba *hba)
return ret;
}
+static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
+{
+ unsigned long flags;
+ u32 ah_ms;
+
+ if (ufshcd_is_clkgating_allowed(hba)) {
+ if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
+ ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
+ hba->ahit);
+ else
+ ah_ms = 10;
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->clk_gating.delay_ms = ah_ms + 5;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
+}
+
static int ufs_mtk_post_link(struct ufs_hba *hba)
{
/* disable device LCC */
@@ -246,6 +327,15 @@ static int ufs_mtk_post_link(struct ufs_hba *hba)
/* enable unipro clock gating feature */
ufs_mtk_cfg_unipro_cg(hba, true);
+ /* configure auto-hibern8 timer to 10ms */
+ if (ufshcd_is_auto_hibern8_supported(hba)) {
+ ufshcd_auto_hibern8_update(hba,
+ FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
+ FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3));
+ }
+
+ ufs_mtk_setup_clk_gating(hba);
+
return 0;
}
@@ -269,12 +359,86 @@ static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
return ret;
}
+static void ufs_mtk_device_reset(struct ufs_hba *hba)
+{
+ struct arm_smccc_res res;
+
+ ufs_mtk_device_reset_ctrl(0, res);
+
+ /*
+ * The reset signal is active low. UFS devices shall detect
+ * more than or equal to 1us of positive or negative RST_n
+ * pulse width.
+ *
+ * To be on safe side, keep the reset low for at least 10us.
+ */
+ usleep_range(10, 15);
+
+ ufs_mtk_device_reset_ctrl(1, res);
+
+ /* Some devices may need time to respond to rst_n */
+ usleep_range(10000, 15000);
+
+ dev_info(hba->dev, "device reset done\n");
+}
+
+static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
+{
+ int err;
+
+ err = ufshcd_hba_enable(hba);
+ if (err)
+ return err;
+
+ err = ufshcd_dme_set(hba,
+ UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
+ 0);
+ if (err)
+ return err;
+
+ err = ufshcd_uic_hibern8_exit(hba);
+ if (!err)
+ ufshcd_set_link_active(hba);
+ else
+ return err;
+
+ err = ufshcd_make_hba_operational(hba);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
+{
+ int err;
+
+ err = ufshcd_dme_set(hba,
+ UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
+ 1);
+ if (err) {
+ /* Resume UniPro state for following error recovery */
+ ufshcd_dme_set(hba,
+ UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
+ 0);
+ return err;
+ }
+
+ return 0;
+}
+
static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
+ int err;
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- if (ufshcd_is_link_hibern8(hba))
+ if (ufshcd_is_link_hibern8(hba)) {
+ err = ufs_mtk_link_set_lpm(hba);
+ if (err)
+ return -EAGAIN;
phy_power_off(host->mphy);
+ ufs_mtk_setup_ref_clk(hba, false);
+ }
return 0;
}
@@ -282,9 +446,40 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ int err;
- if (ufshcd_is_link_hibern8(hba))
+ if (ufshcd_is_link_hibern8(hba)) {
+ ufs_mtk_setup_ref_clk(hba, true);
phy_power_on(host->mphy);
+ err = ufs_mtk_link_set_hpm(hba);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
+{
+ ufshcd_dump_regs(hba, REG_UFS_REFCLK_CTRL, 0x4, "Ref-Clk Ctrl ");
+
+ ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
+
+ ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
+ REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
+ "MPHY Ctrl ");
+
+ /* Direct debugging information to REG_MTK_PROBE */
+ ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
+ ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
+}
+
+static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
+{
+ struct ufs_dev_info *dev_info = &hba->dev_info;
+
+ if (dev_info->wmanufacturerid == UFS_VENDOR_SAMSUNG)
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
return 0;
}
@@ -301,8 +496,11 @@ static struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
.setup_clocks = ufs_mtk_setup_clocks,
.link_startup_notify = ufs_mtk_link_startup_notify,
.pwr_change_notify = ufs_mtk_pwr_change_notify,
+ .apply_dev_quirks = ufs_mtk_apply_dev_quirks,
.suspend = ufs_mtk_suspend,
.resume = ufs_mtk_resume,
+ .dbg_register_dump = ufs_mtk_dbg_register_dump,
+ .device_reset = ufs_mtk_device_reset,
};
/**
diff --git a/drivers/scsi/ufs/ufs-mediatek.h b/drivers/scsi/ufs/ufs-mediatek.h
index 19f8c42fe06f..fccdd979d6fb 100644
--- a/drivers/scsi/ufs/ufs-mediatek.h
+++ b/drivers/scsi/ufs/ufs-mediatek.h
@@ -6,6 +6,30 @@
#ifndef _UFS_MEDIATEK_H
#define _UFS_MEDIATEK_H
+#include <linux/bitops.h>
+#include <linux/soc/mediatek/mtk_sip_svc.h>
+
+/*
+ * Vendor specific UFSHCI Registers
+ */
+#define REG_UFS_REFCLK_CTRL 0x144
+#define REG_UFS_EXTREG 0x2100
+#define REG_UFS_MPHYCTRL 0x2200
+#define REG_UFS_REJECT_MON 0x22AC
+#define REG_UFS_DEBUG_SEL 0x22C0
+#define REG_UFS_PROBE 0x22C8
+
+/*
+ * Ref-clk control
+ *
+ * Values for register REG_UFS_REFCLK_CTRL
+ */
+#define REFCLK_RELEASE 0x0
+#define REFCLK_REQUEST BIT(0)
+#define REFCLK_ACK BIT(1)
+
+#define REFCLK_REQ_TIMEOUT_MS 3
+
/*
* Vendor specific pre-defined parameters
*/
@@ -30,6 +54,13 @@
#define VS_UNIPROPOWERDOWNCONTROL 0xD0A8
/*
+ * SiP commands
+ */
+#define MTK_SIP_UFS_CONTROL MTK_SIP_SMC_CMD(0x276)
+#define UFS_MTK_SIP_DEVICE_RESET BIT(1)
+#define UFS_MTK_SIP_REF_CLK_NOTIFICATION BIT(3)
+
+/*
* VS_DEBUGCLOCKENABLE
*/
enum {
@@ -48,6 +79,7 @@ enum {
struct ufs_mtk_host {
struct ufs_hba *hba;
struct phy *mphy;
+ bool ref_clk_enabled;
};
#endif /* !_UFS_MEDIATEK_H */
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
index ad2abc96c0f1..dbdf8b01abed 100644
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ b/drivers/scsi/ufs/ufs-sysfs.c
@@ -118,26 +118,6 @@ static ssize_t spm_target_link_state_show(struct device *dev,
ufs_pm_lvl_states[hba->spm_lvl].link_state));
}
-static void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
-{
- unsigned long flags;
-
- if (!ufshcd_is_auto_hibern8_supported(hba))
- return;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->ahit != ahit)
- hba->ahit = ahit;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- if (!pm_runtime_suspended(hba->dev)) {
- pm_runtime_get_sync(hba->dev);
- ufshcd_hold(hba, false);
- ufshcd_auto_hibern8_enable(hba);
- ufshcd_release(hba);
- pm_runtime_put(hba->dev);
- }
-}
-
/* Convert Auto-Hibernate Idle Timer register value to microseconds */
static int ufshcd_ahit_to_us(u32 ahit)
{
@@ -733,7 +713,7 @@ static ssize_t _pname##_show(struct device *dev, \
struct scsi_device *sdev = to_scsi_device(dev); \
struct ufs_hba *hba = shost_priv(sdev->host); \
u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun); \
- if (!ufs_is_valid_unit_desc_lun(lun)) \
+ if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun)) \
return -EINVAL; \
return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \
lun, _duname##_DESC_PARAM##_puname, buf, _size); \
diff --git a/drivers/scsi/ufs/ufs-sysfs.h b/drivers/scsi/ufs/ufs-sysfs.h
index e5621e59a432..0f4e750a6748 100644
--- a/drivers/scsi/ufs/ufs-sysfs.h
+++ b/drivers/scsi/ufs/ufs-sysfs.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Copyright (C) 2018 Western Digital Corporation
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 Western Digital Corporation
*/
#ifndef __UFS_SYSFS_H__
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 3327981ef894..cfe380348bf0 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -63,7 +63,6 @@
#define UFS_UPIU_MAX_UNIT_NUM_ID 0x7F
#define UFS_MAX_LUNS (SCSI_W_LUN_BASE + UFS_UPIU_MAX_UNIT_NUM_ID)
#define UFS_UPIU_WLUN_ID (1 << 7)
-#define UFS_UPIU_MAX_GENERAL_LUN 8
/* Well known logical unit id in LUN field of UPIU */
enum {
@@ -499,9 +498,9 @@ struct ufs_query_res {
#define UFS_VREG_VCC_MAX_UV 3600000 /* uV */
#define UFS_VREG_VCC_1P8_MIN_UV 1700000 /* uV */
#define UFS_VREG_VCC_1P8_MAX_UV 1950000 /* uV */
-#define UFS_VREG_VCCQ_MIN_UV 1100000 /* uV */
-#define UFS_VREG_VCCQ_MAX_UV 1300000 /* uV */
-#define UFS_VREG_VCCQ2_MIN_UV 1650000 /* uV */
+#define UFS_VREG_VCCQ_MIN_UV 1140000 /* uV */
+#define UFS_VREG_VCCQ_MAX_UV 1260000 /* uV */
+#define UFS_VREG_VCCQ2_MIN_UV 1700000 /* uV */
#define UFS_VREG_VCCQ2_MAX_UV 1950000 /* uV */
/*
@@ -530,28 +529,28 @@ struct ufs_dev_info {
bool f_power_on_wp_en;
/* Keeps information if any of the LU is power on write protected */
bool is_lu_power_on_wp;
-};
-
-#define MAX_MODEL_LEN 16
-/**
- * ufs_dev_desc - ufs device details from the device descriptor
- *
- * @wmanufacturerid: card details
- * @model: card model
- */
-struct ufs_dev_desc {
+ /* Maximum number of general LU supported by the UFS device */
+ u8 max_lu_supported;
u16 wmanufacturerid;
+ /*UFS device Product Name */
u8 *model;
};
/**
* ufs_is_valid_unit_desc_lun - checks if the given LUN has a unit descriptor
+ * @dev_info: pointer of instance of struct ufs_dev_info
* @lun: LU number to check
* @return: true if the lun has a matching unit descriptor, false otherwise
*/
-static inline bool ufs_is_valid_unit_desc_lun(u8 lun)
+static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info,
+ u8 lun)
{
- return lun == UFS_UPIU_RPMB_WLUN || (lun < UFS_UPIU_MAX_GENERAL_LUN);
+ if (!dev_info || !dev_info->max_lu_supported) {
+ pr_err("Max General LU supported by UFS isn't initialized\n");
+ return false;
+ }
+
+ return lun == UFS_UPIU_RPMB_WLUN || (lun < dev_info->max_lu_supported);
}
#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index fe6cad9b2a0d..d0ab147f98d3 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -22,16 +22,17 @@
* @quirk: device quirk
*/
struct ufs_dev_fix {
- struct ufs_dev_desc card;
+ u16 wmanufacturerid;
+ u8 *model;
unsigned int quirk;
};
-#define END_FIX { { 0 }, 0 }
+#define END_FIX { }
/* add specific device quirk */
#define UFS_FIX(_vendor, _model, _quirk) { \
- .card.wmanufacturerid = (_vendor),\
- .card.model = (_model), \
+ .wmanufacturerid = (_vendor),\
+ .model = (_model), \
.quirk = (_quirk), \
}
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index b5966faf3e98..abd0e6b05f79 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -246,11 +246,10 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba);
static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
static void ufshcd_hba_exit(struct ufs_hba *hba);
-static int ufshcd_probe_hba(struct ufs_hba *hba);
+static int ufshcd_probe_hba(struct ufs_hba *hba, bool async);
static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
bool skip_ref_clk);
static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
-static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
@@ -266,26 +265,18 @@ static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
return tag >= 0 && tag < hba->nutrs;
}
-static inline int ufshcd_enable_irq(struct ufs_hba *hba)
+static inline void ufshcd_enable_irq(struct ufs_hba *hba)
{
- int ret = 0;
-
if (!hba->is_irq_enabled) {
- ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
- hba);
- if (ret)
- dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
- __func__, ret);
+ enable_irq(hba->irq);
hba->is_irq_enabled = true;
}
-
- return ret;
}
static inline void ufshcd_disable_irq(struct ufs_hba *hba)
{
if (hba->is_irq_enabled) {
- free_irq(hba->irq, hba);
+ disable_irq(hba->irq);
hba->is_irq_enabled = false;
}
}
@@ -335,27 +326,27 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba,
u8 opcode = 0;
u32 intr, doorbell;
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+ struct scsi_cmnd *cmd = lrbp->cmd;
int transfer_len = -1;
if (!trace_ufshcd_command_enabled()) {
/* trace UPIU W/O tracing command */
- if (lrbp->cmd)
+ if (cmd)
ufshcd_add_cmd_upiu_trace(hba, tag, str);
return;
}
- if (lrbp->cmd) { /* data phase exists */
+ if (cmd) { /* data phase exists */
/* trace UPIU also */
ufshcd_add_cmd_upiu_trace(hba, tag, str);
- opcode = (u8)(*lrbp->cmd->cmnd);
+ opcode = cmd->cmnd[0];
if ((opcode == READ_10) || (opcode == WRITE_10)) {
/*
* Currently we only fully trace read(10) and write(10)
* commands
*/
- if (lrbp->cmd->request && lrbp->cmd->request->bio)
- lba =
- lrbp->cmd->request->bio->bi_iter.bi_sector;
+ if (cmd->request && cmd->request->bio)
+ lba = cmd->request->bio->bi_iter.bi_sector;
transfer_len = be32_to_cpu(
lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
}
@@ -393,7 +384,7 @@ static void ufshcd_print_err_hist(struct ufs_hba *hba,
for (i = 0; i < UFS_ERR_REG_HIST_LENGTH; i++) {
int p = (i + err_hist->pos) % UFS_ERR_REG_HIST_LENGTH;
- if (err_hist->reg[p] == 0)
+ if (err_hist->tstamp[p] == 0)
continue;
dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
@@ -401,7 +392,7 @@ static void ufshcd_print_err_hist(struct ufs_hba *hba,
}
if (!found)
- dev_err(hba->dev, "No record of %s errors\n", err_name);
+ dev_err(hba->dev, "No record of %s\n", err_name);
}
static void ufshcd_print_host_regs(struct ufs_hba *hba)
@@ -436,8 +427,7 @@ static void ufshcd_print_host_regs(struct ufs_hba *hba)
ufshcd_print_clk_freqs(hba);
- if (hba->vops && hba->vops->dbg_register_dump)
- hba->vops->dbg_register_dump(hba);
+ ufshcd_vops_dbg_register_dump(hba);
}
static
@@ -497,8 +487,8 @@ static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
static void ufshcd_print_host_state(struct ufs_hba *hba)
{
dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
- dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
- hba->lrb_in_use, hba->outstanding_reqs, hba->outstanding_tasks);
+ dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
+ hba->outstanding_reqs, hba->outstanding_tasks);
dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
hba->saved_err, hba->saved_uic_err);
dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
@@ -646,40 +636,6 @@ static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
}
/**
- * ufshcd_get_tm_free_slot - get a free slot for task management request
- * @hba: per adapter instance
- * @free_slot: pointer to variable with available slot value
- *
- * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
- * Returns 0 if free slot is not available, else return 1 with tag value
- * in @free_slot.
- */
-static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
-{
- int tag;
- bool ret = false;
-
- if (!free_slot)
- goto out;
-
- do {
- tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
- if (tag >= hba->nutmrs)
- goto out;
- } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
-
- *free_slot = tag;
- ret = true;
-out:
- return ret;
-}
-
-static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
-{
- clear_bit_unlock(slot, &hba->tm_slots_in_use);
-}
-
-/**
* ufshcd_utrl_clear - Clear a bit in UTRLCLR register
* @hba: per adapter instance
* @pos: position of the bit to be cleared
@@ -1273,6 +1229,24 @@ out:
return ret;
}
+static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved)
+{
+ int *busy = priv;
+
+ WARN_ON_ONCE(reserved);
+ (*busy)++;
+ return false;
+}
+
+/* Whether or not any tag is in use by a request that is in progress. */
+static bool ufshcd_any_tag_in_use(struct ufs_hba *hba)
+{
+ struct request_queue *q = hba->cmd_queue;
+ int busy = 0;
+
+ blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy);
+ return busy;
+}
static int ufshcd_devfreq_get_dev_status(struct device *dev,
struct devfreq_dev_status *stat)
@@ -1490,6 +1464,8 @@ static void ufshcd_ungate_work(struct work_struct *work)
spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_setup_clocks(hba, true);
+ ufshcd_enable_irq(hba);
+
/* Exit from hibern8 */
if (ufshcd_can_hibern8_during_gating(hba)) {
/* Prevent gating in this path */
@@ -1619,7 +1595,7 @@ static void ufshcd_gate_work(struct work_struct *work)
if (hba->clk_gating.active_reqs
|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
- || hba->lrb_in_use || hba->outstanding_tasks
+ || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
|| hba->active_uic_cmd || hba->uic_async_done)
goto rel_lock;
@@ -1636,6 +1612,8 @@ static void ufshcd_gate_work(struct work_struct *work)
ufshcd_set_link_hibern8(hba);
}
+ ufshcd_disable_irq(hba);
+
if (!ufshcd_is_link_active(hba))
ufshcd_setup_clocks(hba, false);
else
@@ -1673,7 +1651,7 @@ static void __ufshcd_release(struct ufs_hba *hba)
if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
- || hba->lrb_in_use || hba->outstanding_tasks
+ || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
|| hba->active_uic_cmd || hba->uic_async_done
|| ufshcd_eh_in_progress(hba))
return;
@@ -1881,12 +1859,12 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
{
hba->lrb[task_tag].issue_time_stamp = ktime_get();
hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0);
+ ufshcd_add_command_trace(hba, task_tag, "send");
ufshcd_clk_scaling_start_busy(hba);
__set_bit(task_tag, &hba->outstanding_reqs);
ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
/* Make sure that doorbell is committed immediately */
wmb();
- ufshcd_add_command_trace(hba, task_tag, "send");
}
/**
@@ -2239,6 +2217,7 @@ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
static
void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
{
+ struct scsi_cmnd *cmd = lrbp->cmd;
struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
unsigned short cdb_len;
@@ -2252,12 +2231,11 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
/* Total EHS length and Data segment length will be zero */
ucd_req_ptr->header.dword_2 = 0;
- ucd_req_ptr->sc.exp_data_transfer_len =
- cpu_to_be32(lrbp->cmd->sdb.length);
+ ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
- cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, UFS_CDB_SIZE);
+ cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
- memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
+ memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
}
@@ -2443,22 +2421,9 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
hba->req_abort_count = 0;
- /* acquire the tag to make sure device cmds don't use it */
- if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
- /*
- * Dev manage command in progress, requeue the command.
- * Requeuing the command helps in cases where the request *may*
- * find different tag instead of waiting for dev manage command
- * completion.
- */
- err = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- }
-
err = ufshcd_hold(hba, true);
if (err) {
err = SCSI_MLQUEUE_HOST_BUSY;
- clear_bit_unlock(tag, &hba->lrb_in_use);
goto out;
}
WARN_ON(hba->clk_gating.state != CLKS_ON);
@@ -2479,7 +2444,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
err = ufshcd_map_sg(hba, lrbp);
if (err) {
lrbp->cmd = NULL;
- clear_bit_unlock(tag, &hba->lrb_in_use);
+ ufshcd_release(hba);
goto out;
}
/* Make sure descriptors are ready before ringing the doorbell */
@@ -2627,44 +2592,6 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
}
/**
- * ufshcd_get_dev_cmd_tag - Get device management command tag
- * @hba: per-adapter instance
- * @tag_out: pointer to variable with available slot value
- *
- * Get a free slot and lock it until device management command
- * completes.
- *
- * Returns false if free slot is unavailable for locking, else
- * return true with tag value in @tag.
- */
-static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
-{
- int tag;
- bool ret = false;
- unsigned long tmp;
-
- if (!tag_out)
- goto out;
-
- do {
- tmp = ~hba->lrb_in_use;
- tag = find_last_bit(&tmp, hba->nutrs);
- if (tag >= hba->nutrs)
- goto out;
- } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
-
- *tag_out = tag;
- ret = true;
-out:
- return ret;
-}
-
-static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
-{
- clear_bit_unlock(tag, &hba->lrb_in_use);
-}
-
-/**
* ufshcd_exec_dev_cmd - API for sending device management requests
* @hba: UFS hba
* @cmd_type: specifies the type (NOP, Query...)
@@ -2676,6 +2603,8 @@ static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type, int timeout)
{
+ struct request_queue *q = hba->cmd_queue;
+ struct request *req;
struct ufshcd_lrb *lrbp;
int err;
int tag;
@@ -2689,7 +2618,13 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
* Even though we use wait_event() which sleeps indefinitely,
* the maximum wait time is bounded by SCSI request timeout.
*/
- wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
+ req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ goto out_unlock;
+ }
+ tag = req->tag;
+ WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
init_completion(&wait);
lrbp = &hba->lrb[tag];
@@ -2714,8 +2649,8 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
err ? "query_complete_err" : "query_complete");
out_put_tag:
- ufshcd_put_dev_cmd_tag(hba, tag);
- wake_up(&hba->dev_cmd.tag_wq);
+ blk_put_request(req);
+out_unlock:
up_read(&hba->clk_scaling_lock);
return err;
}
@@ -2918,7 +2853,7 @@ static int ufshcd_query_attr_retry(struct ufs_hba *hba,
int ret = 0;
u32 retries;
- for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
ret = ufshcd_query_attr(hba, opcode, idn, index,
selector, attr_val);
if (ret)
@@ -3210,17 +3145,6 @@ static inline int ufshcd_read_desc(struct ufs_hba *hba,
return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
}
-static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
- u8 *buf,
- u32 size)
-{
- return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
-}
-
-static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
-{
- return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
-}
/**
* struct uc_string_id - unicode string
@@ -3345,7 +3269,7 @@ static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
* Unit descriptors are only available for general purpose LUs (LUN id
* from 0 to 7) and RPMB Well known LU.
*/
- if (!ufs_is_valid_unit_desc_lun(lun))
+ if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun))
return -EOPNOTSUPP;
return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
@@ -3929,7 +3853,7 @@ out:
return ret;
}
-static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
+int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
{
struct uic_command uic_cmd = {0};
int ret;
@@ -3955,6 +3879,25 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
return ret;
}
+EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
+
+void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
+{
+ unsigned long flags;
+
+ if (!(hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT))
+ return;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->ahit == ahit)
+ goto out_unlock;
+ hba->ahit = ahit;
+ if (!pm_runtime_suspended(hba->dev))
+ ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
+out_unlock:
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
{
@@ -4095,6 +4038,26 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba,
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
pwr_mode->hs_rate);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
+ DL_FC0ProtectionTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
+ DL_TC0ReplayTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
+ DL_AFC0ReqTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
+ DL_FC1ProtectionTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
+ DL_TC1ReplayTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
+ DL_AFC1ReqTimeOutVal_Default);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
+ DL_FC0ProtectionTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
+ DL_TC0ReplayTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
+ DL_AFC0ReqTimeOutVal_Default);
+
ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
| pwr_mode->pwr_tx);
@@ -4188,7 +4151,7 @@ out:
*
* Returns 0 on success, non-zero value on failure
*/
-static int ufshcd_make_hba_operational(struct ufs_hba *hba)
+int ufshcd_make_hba_operational(struct ufs_hba *hba)
{
int err = 0;
u32 reg;
@@ -4234,6 +4197,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
out:
return err;
}
+EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
/**
* ufshcd_hba_stop - Send controller to reset state
@@ -4311,7 +4275,7 @@ static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
return 0;
}
-static int ufshcd_hba_enable(struct ufs_hba *hba)
+int ufshcd_hba_enable(struct ufs_hba *hba)
{
int ret;
@@ -4336,6 +4300,8 @@ static int ufshcd_hba_enable(struct ufs_hba *hba)
return ret;
}
+EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
+
static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
{
int tx_lanes, i, err = 0;
@@ -4372,13 +4338,14 @@ static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
return ufshcd_disable_tx_lcc(hba, true);
}
-static void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist,
- u32 reg)
+void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist,
+ u32 reg)
{
reg_hist->reg[reg_hist->pos] = reg;
reg_hist->tstamp[reg_hist->pos] = ktime_get();
reg_hist->pos = (reg_hist->pos + 1) % UFS_ERR_REG_HIST_LENGTH;
}
+EXPORT_SYMBOL_GPL(ufshcd_update_reg_hist);
/**
* ufshcd_link_startup - Initialize unipro link startup
@@ -4561,7 +4528,7 @@ static int ufshcd_get_lu_wp(struct ufs_hba *hba,
* protected so skip reading bLUWriteProtect parameter for
* it. For other W-LUs, UNIT DESCRIPTOR is not available.
*/
- else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
+ else if (lun >= hba->dev_info.max_lu_supported)
ret = -ENOTSUPP;
else
ret = ufshcd_read_unit_desc_param(hba,
@@ -4608,6 +4575,9 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev)
/* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
sdev->use_10_for_ms = 1;
+ /* DBD field should be set to 1 in mode sense(10) */
+ sdev->set_dbd_for_ms = 1;
+
/* allow SCSI layer to restart the device in case of errors */
sdev->allow_restart = 1;
@@ -4799,7 +4769,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
break;
} /* end of switch */
- if (host_byte(result) != DID_OK)
+ if ((host_byte(result) != DID_OK) && !hba->silence_err_logs)
ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
return result;
}
@@ -4856,12 +4826,13 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
cmd->result = result;
/* Mark completed command as NULL in LRB */
lrbp->cmd = NULL;
- clear_bit_unlock(index, &hba->lrb_in_use);
+ lrbp->compl_time_stamp = ktime_get();
/* Do not touch lrbp after scsi done */
cmd->scsi_done(cmd);
__ufshcd_release(hba);
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
+ lrbp->compl_time_stamp = ktime_get();
if (hba->dev_cmd.complete) {
ufshcd_add_command_trace(hba, index,
"dev_complete");
@@ -4870,17 +4841,12 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
}
if (ufshcd_is_clkscaling_supported(hba))
hba->clk_scaling.active_reqs--;
-
- lrbp->compl_time_stamp = ktime_get();
}
/* clear corresponding bits of completed commands */
hba->outstanding_reqs ^= completed_reqs;
ufshcd_clk_scaling_update_busy(hba);
-
- /* we might have free'd some tags above */
- wake_up(&hba->dev_cmd.tag_wq);
}
/**
@@ -5053,6 +5019,7 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
hba->auto_bkops_enabled = false;
trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
+ hba->is_urgent_bkops_lvl_checked = false;
out:
return err;
}
@@ -5077,6 +5044,7 @@ static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
ufshcd_disable_auto_bkops(hba);
}
+ hba->is_urgent_bkops_lvl_checked = false;
}
static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
@@ -5123,6 +5091,7 @@ static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
err = ufshcd_enable_auto_bkops(hba);
else
err = ufshcd_disable_auto_bkops(hba);
+ hba->urgent_bkops_lvl = curr_status;
out:
return err;
}
@@ -5200,7 +5169,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
hba = container_of(work, struct ufs_hba, eeh_work);
pm_runtime_get_sync(hba->dev);
- scsi_block_requests(hba->host);
+ ufshcd_scsi_block_requests(hba);
err = ufshcd_get_ee_status(hba, &status);
if (err) {
dev_err(hba->dev, "%s: failed to get exception status %d\n",
@@ -5214,7 +5183,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
ufshcd_bkops_exception_event_handler(hba);
out:
- scsi_unblock_requests(hba->host);
+ ufshcd_scsi_unblock_requests(hba);
pm_runtime_put_sync(hba->dev);
return;
}
@@ -5348,8 +5317,8 @@ static void ufshcd_err_handler(struct work_struct *work)
/*
* if host reset is required then skip clearing the pending
- * transfers forcefully because they will automatically get
- * cleared after link startup.
+ * transfers forcefully because they will get cleared during
+ * host reset and restore
*/
if (needs_reset)
goto skip_pending_xfer_clear;
@@ -5603,6 +5572,27 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
return retval;
}
+struct ctm_info {
+ struct ufs_hba *hba;
+ unsigned long pending;
+ unsigned int ncpl;
+};
+
+static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
+{
+ struct ctm_info *const ci = priv;
+ struct completion *c;
+
+ WARN_ON_ONCE(reserved);
+ if (test_bit(req->tag, &ci->pending))
+ return true;
+ ci->ncpl++;
+ c = req->end_io_data;
+ if (c)
+ complete(c);
+ return true;
+}
+
/**
* ufshcd_tmc_handler - handle task management function completion
* @hba: per adapter instance
@@ -5613,16 +5603,14 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
*/
static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
{
- u32 tm_doorbell;
+ struct request_queue *q = hba->tmf_queue;
+ struct ctm_info ci = {
+ .hba = hba,
+ .pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL),
+ };
- tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
- hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
- if (hba->tm_condition) {
- wake_up(&hba->tm_wq);
- return IRQ_HANDLED;
- } else {
- return IRQ_NONE;
- }
+ blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci);
+ return ci.ncpl ? IRQ_HANDLED : IRQ_NONE;
}
/**
@@ -5728,7 +5716,10 @@ out:
static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
struct utp_task_req_desc *treq, u8 tm_function)
{
+ struct request_queue *q = hba->tmf_queue;
struct Scsi_Host *host = hba->host;
+ DECLARE_COMPLETION_ONSTACK(wait);
+ struct request *req;
unsigned long flags;
int free_slot, task_tag, err;
@@ -5737,7 +5728,10 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
* Even though we use wait_event() which sleeps indefinitely,
* the maximum wait time is bounded by %TM_CMD_TIMEOUT.
*/
- wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
+ req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED);
+ req->end_io_data = &wait;
+ free_slot = req->tag;
+ WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs);
ufshcd_hold(hba, false);
spin_lock_irqsave(host->host_lock, flags);
@@ -5763,10 +5757,14 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
/* wait until the task management command is completed */
- err = wait_event_timeout(hba->tm_wq,
- test_bit(free_slot, &hba->tm_condition),
+ err = wait_for_completion_io_timeout(&wait,
msecs_to_jiffies(TM_CMD_TIMEOUT));
if (!err) {
+ /*
+ * Make sure that ufshcd_compl_tm() does not trigger a
+ * use-after-free.
+ */
+ req->end_io_data = NULL;
ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
__func__, tm_function);
@@ -5785,9 +5783,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
__clear_bit(free_slot, &hba->outstanding_tasks);
spin_unlock_irqrestore(hba->host->host_lock, flags);
- clear_bit(free_slot, &hba->tm_condition);
- ufshcd_put_tm_slot(hba, free_slot);
- wake_up(&hba->tm_tag_wq);
+ blk_put_request(req);
ufshcd_release(hba);
return err;
@@ -5863,6 +5859,8 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type,
enum query_opcode desc_op)
{
+ struct request_queue *q = hba->cmd_queue;
+ struct request *req;
struct ufshcd_lrb *lrbp;
int err = 0;
int tag;
@@ -5872,7 +5870,13 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
down_read(&hba->clk_scaling_lock);
- wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
+ req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ goto out_unlock;
+ }
+ tag = req->tag;
+ WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
init_completion(&wait);
lrbp = &hba->lrb[tag];
@@ -5948,8 +5952,8 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
}
}
- ufshcd_put_dev_cmd_tag(hba, tag);
- wake_up(&hba->dev_cmd.tag_wq);
+ blk_put_request(req);
+out_unlock:
up_read(&hba->clk_scaling_lock);
return err;
}
@@ -6244,9 +6248,6 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
hba->lrb[tag].cmd = NULL;
spin_unlock_irqrestore(host->host_lock, flags);
- clear_bit_unlock(tag, &hba->lrb_in_use);
- wake_up(&hba->dev_cmd.tag_wq);
-
out:
if (!err) {
err = SUCCESS;
@@ -6279,9 +6280,15 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
int err;
unsigned long flags;
- /* Reset the host controller */
+ /*
+ * Stop the host controller and complete the requests
+ * cleared by h/w
+ */
spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_hba_stop(hba, false);
+ hba->silence_err_logs = true;
+ ufshcd_complete_requests(hba);
+ hba->silence_err_logs = false;
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* scale up clocks to max frequency before full reinitialization */
@@ -6292,7 +6299,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
goto out;
/* Establish the link again and restore the device */
- err = ufshcd_probe_hba(hba);
+ err = ufshcd_probe_hba(hba, false);
if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
err = -EIO;
@@ -6315,7 +6322,6 @@ out:
static int ufshcd_reset_and_restore(struct ufs_hba *hba)
{
int err = 0;
- unsigned long flags;
int retries = MAX_HOST_RESET_RETRIES;
do {
@@ -6325,15 +6331,6 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
err = ufshcd_host_reset_and_restore(hba);
} while (err && --retries);
- /*
- * After reset the door-bell might be cleared, complete
- * outstanding requests in s/w here.
- */
- spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_transfer_req_compl(hba);
- ufshcd_tmc_handler(hba);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
return err;
}
@@ -6488,7 +6485,8 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba)
if (!desc_buf)
return;
- ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
+ ret = ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0,
+ desc_buf, buff_len);
if (ret) {
dev_err(hba->dev,
"%s: Failed reading power descriptor.len = %d ret = %d",
@@ -6578,16 +6576,13 @@ out:
return ret;
}
-static int ufs_get_device_desc(struct ufs_hba *hba,
- struct ufs_dev_desc *dev_desc)
+static int ufs_get_device_desc(struct ufs_hba *hba)
{
int err;
size_t buff_len;
u8 model_index;
u8 *desc_buf;
-
- if (!dev_desc)
- return -EINVAL;
+ struct ufs_dev_info *dev_info = &hba->dev_info;
buff_len = max_t(size_t, hba->desc_size.dev_desc,
QUERY_DESC_MAX_SIZE + 1);
@@ -6597,7 +6592,8 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
goto out;
}
- err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
+ err = ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, desc_buf,
+ hba->desc_size.dev_desc);
if (err) {
dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
__func__, err);
@@ -6608,12 +6604,12 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
* getting vendor (manufacturerID) and Bank Index in big endian
* format
*/
- dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
+ dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
err = ufshcd_read_string_desc(hba, model_index,
- &dev_desc->model, SD_ASCII_STD);
+ &dev_info->model, SD_ASCII_STD);
if (err < 0) {
dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
__func__, err);
@@ -6631,23 +6627,25 @@ out:
return err;
}
-static void ufs_put_device_desc(struct ufs_dev_desc *dev_desc)
+static void ufs_put_device_desc(struct ufs_hba *hba)
{
- kfree(dev_desc->model);
- dev_desc->model = NULL;
+ struct ufs_dev_info *dev_info = &hba->dev_info;
+
+ kfree(dev_info->model);
+ dev_info->model = NULL;
}
-static void ufs_fixup_device_setup(struct ufs_hba *hba,
- struct ufs_dev_desc *dev_desc)
+static void ufs_fixup_device_setup(struct ufs_hba *hba)
{
struct ufs_dev_fix *f;
+ struct ufs_dev_info *dev_info = &hba->dev_info;
for (f = ufs_fixups; f->quirk; f++) {
- if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid ||
- f->card.wmanufacturerid == UFS_ANY_VENDOR) &&
- ((dev_desc->model &&
- STR_PRFX_EQUAL(f->card.model, dev_desc->model)) ||
- !strcmp(f->card.model, UFS_ANY_MODEL)))
+ if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
+ f->wmanufacturerid == UFS_ANY_VENDOR) &&
+ ((dev_info->model &&
+ STR_PRFX_EQUAL(f->model, dev_info->model)) ||
+ !strcmp(f->model, UFS_ANY_MODEL)))
hba->dev_quirks |= f->quirk;
}
}
@@ -6863,6 +6861,37 @@ static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
}
+static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
+{
+ int err;
+ size_t buff_len;
+ u8 *desc_buf;
+
+ buff_len = hba->desc_size.geom_desc;
+ desc_buf = kmalloc(buff_len, GFP_KERNEL);
+ if (!desc_buf) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = ufshcd_read_desc(hba, QUERY_DESC_IDN_GEOMETRY, 0,
+ desc_buf, buff_len);
+ if (err) {
+ dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1)
+ hba->dev_info.max_lu_supported = 32;
+ else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
+ hba->dev_info.max_lu_supported = 8;
+
+out:
+ kfree(desc_buf);
+ return err;
+}
+
static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
{19200000, REF_CLK_FREQ_19_2_MHZ},
{26000000, REF_CLK_FREQ_26_MHZ},
@@ -6931,15 +6960,92 @@ out:
return err;
}
+static int ufshcd_device_params_init(struct ufs_hba *hba)
+{
+ bool flag;
+ int ret;
+
+ /* Clear any previous UFS device information */
+ memset(&hba->dev_info, 0, sizeof(hba->dev_info));
+
+ /* Init check for device descriptor sizes */
+ ufshcd_init_desc_sizes(hba);
+
+ /* Init UFS geometry descriptor related parameters */
+ ret = ufshcd_device_geo_params_init(hba);
+ if (ret)
+ goto out;
+
+ /* Check and apply UFS device quirks */
+ ret = ufs_get_device_desc(hba);
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
+ __func__, ret);
+ goto out;
+ }
+
+ ufs_fixup_device_setup(hba);
+
+ if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
+ QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
+ hba->dev_info.f_power_on_wp_en = flag;
+
+ /* Probe maximum power mode co-supported by both UFS host and device */
+ if (ufshcd_get_max_pwr_mode(hba))
+ dev_err(hba->dev,
+ "%s: Failed getting max supported power mode\n",
+ __func__);
+out:
+ return ret;
+}
+
+/**
+ * ufshcd_add_lus - probe and add UFS logical units
+ * @hba: per-adapter instance
+ */
+static int ufshcd_add_lus(struct ufs_hba *hba)
+{
+ int ret;
+
+ ufshcd_init_icc_levels(hba);
+
+ /* Add required well known logical units to scsi mid layer */
+ ret = ufshcd_scsi_add_wlus(hba);
+ if (ret)
+ goto out;
+
+ /* Initialize devfreq after UFS device is detected */
+ if (ufshcd_is_clkscaling_supported(hba)) {
+ memcpy(&hba->clk_scaling.saved_pwr_info.info,
+ &hba->pwr_info,
+ sizeof(struct ufs_pa_layer_attr));
+ hba->clk_scaling.saved_pwr_info.is_valid = true;
+ if (!hba->devfreq) {
+ ret = ufshcd_devfreq_init(hba);
+ if (ret)
+ goto out;
+ }
+
+ hba->clk_scaling.is_allowed = true;
+ }
+
+ ufs_bsg_probe(hba);
+ scsi_scan_host(hba->host);
+ pm_runtime_put_sync(hba->dev);
+
+out:
+ return ret;
+}
+
/**
* ufshcd_probe_hba - probe hba to detect device and initialize
* @hba: per-adapter instance
+ * @async: asynchronous execution or not
*
* Execute link-startup and verify device initialization
*/
-static int ufshcd_probe_hba(struct ufs_hba *hba)
+static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
{
- struct ufs_dev_desc card = {0};
int ret;
ktime_t start = ktime_get();
@@ -6957,27 +7063,26 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
/* UniPro link is active now */
ufshcd_set_link_active(hba);
+ /* Verify device initialization by sending NOP OUT UPIU */
ret = ufshcd_verify_dev_init(hba);
if (ret)
goto out;
+ /* Initiate UFS initialization, and waiting until completion */
ret = ufshcd_complete_dev_init(hba);
if (ret)
goto out;
- /* Init check for device descriptor sizes */
- ufshcd_init_desc_sizes(hba);
-
- ret = ufs_get_device_desc(hba, &card);
- if (ret) {
- dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
- __func__, ret);
- goto out;
+ /*
+ * Initialize UFS device parameters used by driver, these
+ * parameters are associated with UFS descriptors.
+ */
+ if (async) {
+ ret = ufshcd_device_params_init(hba);
+ if (ret)
+ goto out;
}
- ufs_fixup_device_setup(hba, &card);
- ufs_put_device_desc(&card);
-
ufshcd_tune_unipro_params(hba);
/* UFS device is also active now */
@@ -6985,11 +7090,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
ufshcd_force_reset_auto_bkops(hba);
hba->wlun_dev_clr_ua = true;
- if (ufshcd_get_max_pwr_mode(hba)) {
- dev_err(hba->dev,
- "%s: Failed getting max supported power mode\n",
- __func__);
- } else {
+ /* Gear up to HS gear if supported */
+ if (hba->max_pwr_info.is_valid) {
/*
* Set the right value to bRefClkFreq before attempting to
* switch to HS gears.
@@ -7010,59 +7112,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
- /*
- * If we are in error handling context or in power management callbacks
- * context, no need to scan the host
- */
- if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
- bool flag;
-
- /* clear any previous UFS device information */
- memset(&hba->dev_info, 0, sizeof(hba->dev_info));
- if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
- QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
- hba->dev_info.f_power_on_wp_en = flag;
-
- if (!hba->is_init_prefetch)
- ufshcd_init_icc_levels(hba);
-
- /* Add required well known logical units to scsi mid layer */
- if (ufshcd_scsi_add_wlus(hba))
- goto out;
-
- /* Initialize devfreq after UFS device is detected */
- if (ufshcd_is_clkscaling_supported(hba)) {
- memcpy(&hba->clk_scaling.saved_pwr_info.info,
- &hba->pwr_info,
- sizeof(struct ufs_pa_layer_attr));
- hba->clk_scaling.saved_pwr_info.is_valid = true;
- if (!hba->devfreq) {
- ret = ufshcd_devfreq_init(hba);
- if (ret)
- goto out;
- }
- hba->clk_scaling.is_allowed = true;
- }
-
- ufs_bsg_probe(hba);
-
- scsi_scan_host(hba->host);
- pm_runtime_put_sync(hba->dev);
- }
-
- if (!hba->is_init_prefetch)
- hba->is_init_prefetch = true;
-
out:
- /*
- * If we failed to initialize the device or the device is not
- * present, turn off the power/clocks etc.
- */
- if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
- pm_runtime_put_sync(hba->dev);
- ufshcd_exit_clk_scaling(hba);
- ufshcd_hba_exit(hba);
- }
trace_ufshcd_init(dev_name(hba->dev), ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
@@ -7078,43 +7128,25 @@ out:
static void ufshcd_async_scan(void *data, async_cookie_t cookie)
{
struct ufs_hba *hba = (struct ufs_hba *)data;
+ int ret;
- ufshcd_probe_hba(hba);
-}
-
-static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
-{
- unsigned long flags;
- struct Scsi_Host *host;
- struct ufs_hba *hba;
- int index;
- bool found = false;
-
- if (!scmd || !scmd->device || !scmd->device->host)
- return BLK_EH_DONE;
-
- host = scmd->device->host;
- hba = shost_priv(host);
- if (!hba)
- return BLK_EH_DONE;
-
- spin_lock_irqsave(host->host_lock, flags);
-
- for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
- if (hba->lrb[index].cmd == scmd) {
- found = true;
- break;
- }
- }
-
- spin_unlock_irqrestore(host->host_lock, flags);
+ /* Initialize hba, detect and initialize UFS device */
+ ret = ufshcd_probe_hba(hba, true);
+ if (ret)
+ goto out;
+ /* Probe and add UFS logical units */
+ ret = ufshcd_add_lus(hba);
+out:
/*
- * Bypass SCSI error handling and reset the block layer timer if this
- * SCSI command was not actually dispatched to UFS driver, otherwise
- * let SCSI layer handle the error as usual.
+ * If we failed to initialize the device or the device is not
+ * present, turn off the power/clocks etc.
*/
- return found ? BLK_EH_DONE : BLK_EH_RESET_TIMER;
+ if (ret) {
+ pm_runtime_put_sync(hba->dev);
+ ufshcd_exit_clk_scaling(hba);
+ ufshcd_hba_exit(hba);
+ }
}
static const struct attribute_group *ufshcd_driver_groups[] = {
@@ -7135,7 +7167,6 @@ static struct scsi_host_template ufshcd_driver_template = {
.eh_abort_handler = ufshcd_abort,
.eh_device_reset_handler = ufshcd_eh_device_reset_handler,
.eh_host_reset_handler = ufshcd_eh_host_reset_handler,
- .eh_timed_out = ufshcd_eh_timed_out,
.this_id = -1,
.sg_tablesize = SG_ALL,
.cmd_per_lun = UFSHCD_CMD_PER_LUN,
@@ -7574,6 +7605,7 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
ufshcd_setup_clocks(hba, false);
ufshcd_setup_hba_vreg(hba, false);
hba->is_powered = false;
+ ufs_put_device_desc(hba);
}
}
@@ -7701,8 +7733,7 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba,
* turning off the link would also turn off the device.
*/
else if ((req_link_state == UIC_LINK_OFF_STATE) &&
- (!check_for_bkops || (check_for_bkops &&
- !hba->auto_bkops_enabled))) {
+ (!check_for_bkops || !hba->auto_bkops_enabled)) {
/*
* Let's make sure that link is in low power mode, we are doing
* this currently by putting the link in Hibern8. Otherway to
@@ -7908,6 +7939,11 @@ disable_clks:
ret = ufshcd_vops_suspend(hba, pm_op);
if (ret)
goto set_link_active;
+ /*
+ * Disable the host irq as host controller as there won't be any
+ * host controller transaction expected till resume.
+ */
+ ufshcd_disable_irq(hba);
if (!ufshcd_is_link_active(hba))
ufshcd_setup_clocks(hba, false);
@@ -7917,11 +7953,7 @@ disable_clks:
hba->clk_gating.state = CLKS_OFF;
trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
- /*
- * Disable the host irq as host controller as there won't be any
- * host controller transaction expected till resume.
- */
- ufshcd_disable_irq(hba);
+
/* Put the host controller in low power mode if possible */
ufshcd_hba_vreg_set_lpm(hba);
goto out;
@@ -7974,9 +8006,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
goto out;
/* enable the host irq as host controller would be active soon */
- ret = ufshcd_enable_irq(hba);
- if (ret)
- goto disable_irq_and_vops_clks;
+ ufshcd_enable_irq(hba);
ret = ufshcd_vreg_set_hpm(hba);
if (ret)
@@ -8250,6 +8280,9 @@ void ufshcd_remove(struct ufs_hba *hba)
{
ufs_bsg_remove(hba);
ufs_sysfs_remove_nodes(hba->dev);
+ blk_cleanup_queue(hba->tmf_queue);
+ blk_mq_free_tag_set(&hba->tmf_tag_set);
+ blk_cleanup_queue(hba->cmd_queue);
scsi_remove_host(hba->host);
/* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask);
@@ -8328,6 +8361,18 @@ out_error:
}
EXPORT_SYMBOL(ufshcd_alloc_host);
+/* This function exists because blk_mq_alloc_tag_set() requires this. */
+static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *qd)
+{
+ WARN_ON_ONCE(true);
+ return BLK_STS_NOTSUPP;
+}
+
+static const struct blk_mq_ops ufshcd_tmf_ops = {
+ .queue_rq = ufshcd_queue_tmf,
+};
+
/**
* ufshcd_init - Driver initialization routine
* @hba: per-adapter instance
@@ -8397,10 +8442,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->max_pwr_info.is_valid = false;
- /* Initailize wait queue for task management */
- init_waitqueue_head(&hba->tm_wq);
- init_waitqueue_head(&hba->tm_tag_wq);
-
/* Initialize work queues */
INIT_WORK(&hba->eh_work, ufshcd_err_handler);
INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
@@ -8413,9 +8454,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
init_rwsem(&hba->clk_scaling_lock);
- /* Initialize device management tag acquire wait queue */
- init_waitqueue_head(&hba->dev_cmd.tag_wq);
-
ufshcd_init_clk_gating(hba);
ufshcd_init_clk_scaling(hba);
@@ -8449,6 +8487,27 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
goto exit_gating;
}
+ hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set);
+ if (IS_ERR(hba->cmd_queue)) {
+ err = PTR_ERR(hba->cmd_queue);
+ goto out_remove_scsi_host;
+ }
+
+ hba->tmf_tag_set = (struct blk_mq_tag_set) {
+ .nr_hw_queues = 1,
+ .queue_depth = hba->nutmrs,
+ .ops = &ufshcd_tmf_ops,
+ .flags = BLK_MQ_F_NO_SCHED,
+ };
+ err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
+ if (err < 0)
+ goto free_cmd_queue;
+ hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
+ if (IS_ERR(hba->tmf_queue)) {
+ err = PTR_ERR(hba->tmf_queue);
+ goto free_tmf_tag_set;
+ }
+
/* Reset the attached device */
ufshcd_vops_device_reset(hba);
@@ -8458,7 +8517,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
dev_err(hba->dev, "Host controller enable failed\n");
ufshcd_print_host_regs(hba);
ufshcd_print_host_state(hba);
- goto out_remove_scsi_host;
+ goto free_tmf_queue;
}
/*
@@ -8495,6 +8554,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
return 0;
+free_tmf_queue:
+ blk_cleanup_queue(hba->tmf_queue);
+free_tmf_tag_set:
+ blk_mq_free_tag_set(&hba->tmf_tag_set);
+free_cmd_queue:
+ blk_cleanup_queue(hba->cmd_queue);
out_remove_scsi_host:
scsi_remove_host(hba->host);
exit_gating:
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 2740f6941ec6..2ae6c7c8528c 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -212,13 +212,11 @@ struct ufs_query {
* @type: device management command type - Query, NOP OUT
* @lock: lock to allow one command at a time
* @complete: internal commands completion
- * @tag_wq: wait queue until free command slot is available
*/
struct ufs_dev_cmd {
enum dev_cmd_type type;
struct mutex lock;
struct completion *complete;
- wait_queue_head_t tag_wq;
struct ufs_query query;
};
@@ -322,7 +320,7 @@ struct ufs_hba_variant_ops {
void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
enum ufs_notify_change_status);
- int (*apply_dev_quirks)(struct ufs_hba *);
+ int (*apply_dev_quirks)(struct ufs_hba *hba);
int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
int (*resume)(struct ufs_hba *, enum ufs_pm_op);
void (*dbg_register_dump)(struct ufs_hba *hba);
@@ -483,7 +481,7 @@ struct ufs_stats {
* @host: Scsi_Host instance of the driver
* @dev: device handle
* @lrb: local reference block
- * @lrb_in_use: lrb in use
+ * @cmd_queue: Used to allocate command tags from hba->host->tag_set.
* @outstanding_tasks: Bits representing outstanding task requests
* @outstanding_reqs: Bits representing outstanding transfer requests
* @capabilities: UFS Controller Capabilities
@@ -495,17 +493,14 @@ struct ufs_stats {
* @irq: Irq number of the controller
* @active_uic_cmd: handle of active UIC command
* @uic_cmd_mutex: mutex for uic command
- * @tm_wq: wait queue for task management
- * @tm_tag_wq: wait queue for free task management slots
- * @tm_slots_in_use: bit map of task management request slots in use
+ * @tmf_tag_set: TMF tag set.
+ * @tmf_queue: Used to allocate TMF tags.
* @pwr_done: completion for power mode change
- * @tm_condition: condition variable for task management
* @ufshcd_state: UFSHCD states
* @eh_flags: Error handling flags
* @intr_mask: Interrupt Mask Bits
* @ee_ctrl_mask: Exception event control mask
* @is_powered: flag to check if HBA is powered
- * @is_init_prefetch: flag to check if data was pre-fetched in initialization
* @init_prefetch_data: data pre-fetched during initialization
* @eh_work: Worker to handle UFS errors that require s/w attention
* @eeh_work: Worker to handle exception events
@@ -513,6 +508,7 @@ struct ufs_stats {
* @uic_error: UFS interconnect layer error status
* @saved_err: sticky error mask
* @saved_uic_err: sticky UIC error mask
+ * @silence_err_logs: flag to silence error logs
* @dev_cmd: ufs device management command information
* @last_dme_cmd_tstamp: time stamp of the last completed DME command
* @auto_bkops_enabled: to track whether bkops is enabled in device
@@ -541,6 +537,7 @@ struct ufs_hba {
struct Scsi_Host *host;
struct device *dev;
+ struct request_queue *cmd_queue;
/*
* This field is to keep a reference to "scsi_device" corresponding to
* "UFS device" W-LU.
@@ -561,7 +558,6 @@ struct ufs_hba {
u32 ahit;
struct ufshcd_lrb *lrb;
- unsigned long lrb_in_use;
unsigned long outstanding_tasks;
unsigned long outstanding_reqs;
@@ -643,10 +639,8 @@ struct ufs_hba {
/* Device deviations from standard UFS device spec. */
unsigned int dev_quirks;
- wait_queue_head_t tm_wq;
- wait_queue_head_t tm_tag_wq;
- unsigned long tm_condition;
- unsigned long tm_slots_in_use;
+ struct blk_mq_tag_set tmf_tag_set;
+ struct request_queue *tmf_queue;
struct uic_command *active_uic_cmd;
struct mutex uic_cmd_mutex;
@@ -657,7 +651,6 @@ struct ufs_hba {
u32 intr_mask;
u16 ee_ctrl_mask;
bool is_powered;
- bool is_init_prefetch;
struct ufs_init_prefetch init_prefetch_data;
/* Work Queues */
@@ -670,6 +663,7 @@ struct ufs_hba {
u32 saved_err;
u32 saved_uic_err;
struct ufs_stats ufs_stats;
+ bool silence_err_logs;
/* Device management request data */
struct ufs_dev_cmd dev_cmd;
@@ -803,12 +797,17 @@ static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
int ufshcd_alloc_host(struct device *, struct ufs_hba **);
void ufshcd_dealloc_host(struct ufs_hba *);
+int ufshcd_hba_enable(struct ufs_hba *hba);
int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int);
+int ufshcd_make_hba_operational(struct ufs_hba *hba);
void ufshcd_remove(struct ufs_hba *);
+int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
u32 val, unsigned long interval_us,
unsigned long timeout_ms, bool can_sleep);
void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
+void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist,
+ u32 reg);
static inline void check_upiu_size(void)
{
@@ -927,6 +926,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, bool *flag_res);
void ufshcd_auto_hibern8_enable(struct ufs_hba *hba);
+void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
#define SD_ASCII_STD true
#define SD_RAW false
@@ -1086,8 +1086,10 @@ static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba)
static inline void ufshcd_vops_device_reset(struct ufs_hba *hba)
{
- if (hba->vops && hba->vops->device_reset)
+ if (hba->vops && hba->vops->device_reset) {
hba->vops->device_reset(hba);
+ ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, 0);
+ }
}
extern struct ufs_pm_lvl_states ufs_pm_lvl_states[];
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
index f539f873f94d..3dc4d8b76509 100644
--- a/drivers/scsi/ufs/unipro.h
+++ b/drivers/scsi/ufs/unipro.h
@@ -161,6 +161,17 @@
/* PHY Adapter Protocol Constants */
#define PA_MAXDATALANES 4
+#define DL_FC0ProtectionTimeOutVal_Default 8191
+#define DL_TC0ReplayTimeOutVal_Default 65535
+#define DL_AFC0ReqTimeOutVal_Default 32767
+#define DL_FC1ProtectionTimeOutVal_Default 8191
+#define DL_TC1ReplayTimeOutVal_Default 65535
+#define DL_AFC1ReqTimeOutVal_Default 32767
+
+#define DME_LocalFC0ProtectionTimeOutVal 0xD041
+#define DME_LocalTC0ReplayTimeOutVal 0xD042
+#define DME_LocalAFC0ReqTimeOutVal 0xD043
+
/* PA power modes */
enum {
FAST_MODE = 1,
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 70008816c91f..c3f010df641e 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -365,7 +365,7 @@ static int pvscsi_map_buffers(struct pvscsi_adapter *adapter,
int segs = scsi_dma_map(cmd);
if (segs == -ENOMEM) {
- scmd_printk(KERN_ERR, cmd,
+ scmd_printk(KERN_DEBUG, cmd,
"vmw_pvscsi: Failed to map cmd sglist for DMA.\n");
return -ENOMEM;
} else if (segs > 1) {
@@ -392,7 +392,7 @@ static int pvscsi_map_buffers(struct pvscsi_adapter *adapter,
ctx->dataPA = dma_map_single(&adapter->dev->dev, sg, bufflen,
cmd->sc_data_direction);
if (dma_mapping_error(&adapter->dev->dev, ctx->dataPA)) {
- scmd_printk(KERN_ERR, cmd,
+ scmd_printk(KERN_DEBUG, cmd,
"vmw_pvscsi: Failed to map direct data buffer for DMA.\n");
return -ENOMEM;
}
@@ -402,6 +402,17 @@ static int pvscsi_map_buffers(struct pvscsi_adapter *adapter,
return 0;
}
+/*
+ * The device incorrectly doesn't clear the first byte of the sense
+ * buffer in some cases. We have to do it ourselves.
+ * Otherwise we run into trouble when SWIOTLB is forced.
+ */
+static void pvscsi_patch_sense(struct scsi_cmnd *cmd)
+{
+ if (cmd->sense_buffer)
+ cmd->sense_buffer[0] = 0;
+}
+
static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter,
struct pvscsi_ctx *ctx)
{
@@ -544,6 +555,8 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
cmd = ctx->cmd;
abort_cmp = ctx->abort_cmp;
pvscsi_unmap_buffers(adapter, ctx);
+ if (sdstat != SAM_STAT_CHECK_CONDITION)
+ pvscsi_patch_sense(cmd);
pvscsi_release_context(adapter, ctx);
if (abort_cmp) {
/*
@@ -712,7 +725,7 @@ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(&adapter->dev->dev, ctx->sensePA)) {
- scmd_printk(KERN_ERR, cmd,
+ scmd_printk(KERN_DEBUG, cmd,
"vmw_pvscsi: Failed to map sense buffer for DMA.\n");
ctx->sensePA = 0;
return -ENOMEM;
@@ -873,6 +886,7 @@ static void pvscsi_reset_all(struct pvscsi_adapter *adapter)
scmd_printk(KERN_ERR, cmd,
"Forced reset on cmd %p\n", cmd);
pvscsi_unmap_buffers(adapter, ctx);
+ pvscsi_patch_sense(cmd);
pvscsi_release_context(adapter, ctx);
cmd->result = (DID_RESET << 16);
cmd->scsi_done(cmd);
diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c
index 77bce208210e..7eac76cccc4c 100644
--- a/drivers/scsi/zalon.c
+++ b/drivers/scsi/zalon.c
@@ -89,7 +89,7 @@ zalon_probe(struct parisc_device *dev)
struct gsc_irq gsc_irq;
u32 zalon_vers;
int error = -ENODEV;
- void __iomem *zalon = ioremap_nocache(dev->hpa.start, 4096);
+ void __iomem *zalon = ioremap(dev->hpa.start, 4096);
void __iomem *io_port = zalon + GSC_SCSI_ZALON_OFFSET;
static int unit = 0;
struct Scsi_Host *host;
diff --git a/drivers/scsi/zorro_esp.c b/drivers/scsi/zorro_esp.c
index a23a8e5794f5..bdd82e497d5f 100644
--- a/drivers/scsi/zorro_esp.c
+++ b/drivers/scsi/zorro_esp.c
@@ -801,7 +801,7 @@ static int zorro_esp_probe(struct zorro_dev *z,
/* additional setup required for Fastlane */
if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) {
/* map full address space up to ESP base for DMA */
- zep->board_base = ioremap_nocache(board,
+ zep->board_base = ioremap(board,
FASTLANE_ESP_ADDR-1);
if (!zep->board_base) {
pr_err("Cannot allocate board address space\n");
@@ -816,7 +816,7 @@ static int zorro_esp_probe(struct zorro_dev *z,
esp->ops = zdd->esp_ops;
if (ioaddr > 0xffffff)
- esp->regs = ioremap_nocache(ioaddr, 0x20);
+ esp->regs = ioremap(ioaddr, 0x20);
else
/* ZorroII address space remapped nocache by early startup */
esp->regs = ZTWO_VADDR(ioaddr);
@@ -842,7 +842,7 @@ static int zorro_esp_probe(struct zorro_dev *z,
* Only Fastlane Z3 for now - add switch for correct struct
* dma_registers size if adding any more
*/
- esp->dma_regs = ioremap_nocache(dmaaddr,
+ esp->dma_regs = ioremap(dmaaddr,
sizeof(struct fastlane_dma_registers));
} else
/* ZorroII address space remapped nocache by early startup */
diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c
index 9475353f49d6..d996782a7106 100644
--- a/drivers/sh/clk/core.c
+++ b/drivers/sh/clk/core.c
@@ -368,7 +368,7 @@ static int clk_establish_mapping(struct clk *clk)
if (!mapping->base && mapping->phys) {
kref_init(&mapping->ref);
- mapping->base = ioremap_nocache(mapping->phys, mapping->len);
+ mapping->base = ioremap(mapping->phys, mapping->len);
if (unlikely(!mapping->base))
return -ENXIO;
} else if (mapping->base) {
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index 8485e812d9b2..f8e070d67fa3 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -213,7 +213,7 @@ int __init register_intc_controller(struct intc_desc *desc)
WARN_ON(resource_type(res) != IORESOURCE_MEM);
d->window[k].phys = res->start;
d->window[k].size = resource_size(res);
- d->window[k].virt = ioremap_nocache(res->start,
+ d->window[k].virt = ioremap(res->start,
resource_size(res));
if (!d->window[k].virt)
goto err2;
diff --git a/drivers/sh/intc/userimask.c b/drivers/sh/intc/userimask.c
index 87d69e7471f9..f9f043a3d90a 100644
--- a/drivers/sh/intc/userimask.c
+++ b/drivers/sh/intc/userimask.c
@@ -73,7 +73,7 @@ int register_intc_userimask(unsigned long addr)
if (unlikely(uimask))
return -EBUSY;
- uimask = ioremap_nocache(addr, SZ_4K);
+ uimask = ioremap(addr, SZ_4K);
if (unlikely(!uimask))
return -ENOMEM;
diff --git a/drivers/siox/siox.h b/drivers/siox/siox.h
index c674bf6fb119..f08b43b713c5 100644
--- a/drivers/siox/siox.h
+++ b/drivers/siox/siox.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2015-2017 Pengutronix, Uwe Kleine-König <kernel@pengutronix.de>
*/
diff --git a/drivers/slimbus/qcom-ctrl.c b/drivers/slimbus/qcom-ctrl.c
index a444badd8df5..4aad2566f52d 100644
--- a/drivers/slimbus/qcom-ctrl.c
+++ b/drivers/slimbus/qcom-ctrl.c
@@ -641,6 +641,8 @@ static int qcom_slim_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
slim_unregister_controller(&ctrl->ctrl);
+ clk_disable_unprepare(ctrl->rclk);
+ clk_disable_unprepare(ctrl->hclk);
destroy_workqueue(ctrl->rxwq);
return 0;
}
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
index 29fbab55c3b3..e3f5ebc0c05e 100644
--- a/drivers/slimbus/qcom-ngd-ctrl.c
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -666,10 +666,12 @@ static int qcom_slim_ngd_init_rx_msgq(struct qcom_slim_ngd_ctrl *ctrl)
struct device *dev = ctrl->dev;
int ret, size;
- ctrl->dma_rx_channel = dma_request_slave_channel(dev, "rx");
- if (!ctrl->dma_rx_channel) {
- dev_err(dev, "Failed to request dma channels");
- return -EINVAL;
+ ctrl->dma_rx_channel = dma_request_chan(dev, "rx");
+ if (IS_ERR(ctrl->dma_rx_channel)) {
+ dev_err(dev, "Failed to request RX dma channel");
+ ret = PTR_ERR(ctrl->dma_rx_channel);
+ ctrl->dma_rx_channel = NULL;
+ return ret;
}
size = QCOM_SLIM_NGD_DESC_NUM * SLIM_MSGQ_BUF_LEN;
@@ -703,10 +705,12 @@ static int qcom_slim_ngd_init_tx_msgq(struct qcom_slim_ngd_ctrl *ctrl)
int ret = 0;
int size;
- ctrl->dma_tx_channel = dma_request_slave_channel(dev, "tx");
- if (!ctrl->dma_tx_channel) {
- dev_err(dev, "Failed to request dma channels");
- return -EINVAL;
+ ctrl->dma_tx_channel = dma_request_chan(dev, "tx");
+ if (IS_ERR(ctrl->dma_tx_channel)) {
+ dev_err(dev, "Failed to request TX dma channel");
+ ret = PTR_ERR(ctrl->dma_tx_channel);
+ ctrl->dma_tx_channel = NULL;
+ return ret;
}
size = ((QCOM_SLIM_NGD_DESC_NUM + 1) * SLIM_MSGQ_BUF_LEN);
diff --git a/drivers/slimbus/slimbus.h b/drivers/slimbus/slimbus.h
index b2f013bfe42e..c73035915f1d 100644
--- a/drivers/slimbus/slimbus.h
+++ b/drivers/slimbus/slimbus.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2011-2017, The Linux Foundation
*/
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index 833e04a7835c..1778f8c62861 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -14,6 +14,7 @@ source "drivers/soc/qcom/Kconfig"
source "drivers/soc/renesas/Kconfig"
source "drivers/soc/rockchip/Kconfig"
source "drivers/soc/samsung/Kconfig"
+source "drivers/soc/sifive/Kconfig"
source "drivers/soc/sunxi/Kconfig"
source "drivers/soc/tegra/Kconfig"
source "drivers/soc/ti/Kconfig"
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 2ec355003524..8b49d782a1ab 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -20,6 +20,7 @@ obj-y += qcom/
obj-y += renesas/
obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
obj-$(CONFIG_SOC_SAMSUNG) += samsung/
+obj-$(CONFIG_SOC_SIFIVE) += sifive/
obj-y += sunxi/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-y += ti/
diff --git a/drivers/soc/amlogic/meson-ee-pwrc.c b/drivers/soc/amlogic/meson-ee-pwrc.c
index 5823f5b67d16..3f0261d53ad9 100644
--- a/drivers/soc/amlogic/meson-ee-pwrc.c
+++ b/drivers/soc/amlogic/meson-ee-pwrc.c
@@ -323,6 +323,8 @@ static int meson_ee_pwrc_init_domain(struct platform_device *pdev,
struct meson_ee_pwrc *pwrc,
struct meson_ee_pwrc_domain *dom)
{
+ int ret;
+
dom->pwrc = pwrc;
dom->num_rstc = dom->desc.reset_names_count;
dom->num_clks = dom->desc.clk_names_count;
@@ -368,15 +370,21 @@ static int meson_ee_pwrc_init_domain(struct platform_device *pdev,
* prepare/enable counters won't be in sync.
*/
if (dom->num_clks && dom->desc.get_power && !dom->desc.get_power(dom)) {
- int ret = clk_bulk_prepare_enable(dom->num_clks, dom->clks);
+ ret = clk_bulk_prepare_enable(dom->num_clks, dom->clks);
if (ret)
return ret;
- pm_genpd_init(&dom->base, &pm_domain_always_on_gov, false);
- } else
- pm_genpd_init(&dom->base, NULL,
- (dom->desc.get_power ?
- dom->desc.get_power(dom) : true));
+ ret = pm_genpd_init(&dom->base, &pm_domain_always_on_gov,
+ false);
+ if (ret)
+ return ret;
+ } else {
+ ret = pm_genpd_init(&dom->base, NULL,
+ (dom->desc.get_power ?
+ dom->desc.get_power(dom) : true));
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -441,9 +449,7 @@ static int meson_ee_pwrc_probe(struct platform_device *pdev)
pwrc->xlate.domains[i] = &dom->base;
}
- of_genpd_add_provider_onecell(pdev->dev.of_node, &pwrc->xlate);
-
- return 0;
+ return of_genpd_add_provider_onecell(pdev->dev.of_node, &pwrc->xlate);
}
static void meson_ee_pwrc_shutdown(struct platform_device *pdev)
diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c
index 096a83cf0caf..55a1f57a4d8c 100644
--- a/drivers/soc/atmel/soc.c
+++ b/drivers/soc/atmel/soc.c
@@ -66,8 +66,9 @@ static const struct at91_soc __initconst socs[] = {
AT91_SOC(AT91SAM9XE128_CIDR_MATCH, 0, "at91sam9xe128", "at91sam9xe128"),
AT91_SOC(AT91SAM9XE256_CIDR_MATCH, 0, "at91sam9xe256", "at91sam9xe256"),
AT91_SOC(AT91SAM9XE512_CIDR_MATCH, 0, "at91sam9xe512", "at91sam9xe512"),
- AT91_SOC(SAM9X60_CIDR_MATCH, SAM9X60_EXID_MATCH,
- "sam9x60", "sam9x60"),
+#endif
+#ifdef CONFIG_SOC_SAM9X60
+ AT91_SOC(SAM9X60_CIDR_MATCH, SAM9X60_EXID_MATCH, "sam9x60", "sam9x60"),
#endif
#ifdef CONFIG_SOC_SAMA5
AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D21CU_EXID_MATCH,
diff --git a/drivers/soc/bcm/brcmstb/biuctrl.c b/drivers/soc/bcm/brcmstb/biuctrl.c
index d326915e0f40..61731e01f94b 100644
--- a/drivers/soc/bcm/brcmstb/biuctrl.c
+++ b/drivers/soc/bcm/brcmstb/biuctrl.c
@@ -63,7 +63,7 @@ static const int b15_cpubiuctrl_regs[] = {
[CPU_WRITEBACK_CTRL_REG] = -1,
};
-/* Odd cases, e.g: 7260 */
+/* Odd cases, e.g: 7260A0 */
static const int b53_cpubiuctrl_no_wb_regs[] = {
[CPU_CREDIT_REG] = 0x0b0,
[CPU_MCP_FLOW_REG] = 0x0b4,
@@ -76,6 +76,12 @@ static const int b53_cpubiuctrl_regs[] = {
[CPU_WRITEBACK_CTRL_REG] = 0x22c,
};
+static const int a72_cpubiuctrl_regs[] = {
+ [CPU_CREDIT_REG] = 0x18,
+ [CPU_MCP_FLOW_REG] = 0x1c,
+ [CPU_WRITEBACK_CTRL_REG] = 0x20,
+};
+
#define NUM_CPU_BIUCTRL_REGS 3
static int __init mcp_write_pairing_set(void)
@@ -101,25 +107,29 @@ static int __init mcp_write_pairing_set(void)
return 0;
}
-static const u32 b53_mach_compat[] = {
+static const u32 a72_b53_mach_compat[] = {
+ 0x7211,
+ 0x7216,
+ 0x7255,
+ 0x7260,
0x7268,
0x7271,
0x7278,
};
-static void __init mcp_b53_set(void)
+static void __init mcp_a72_b53_set(void)
{
unsigned int i;
u32 reg;
reg = brcmstb_get_family_id();
- for (i = 0; i < ARRAY_SIZE(b53_mach_compat); i++) {
- if (BRCM_ID(reg) == b53_mach_compat[i])
+ for (i = 0; i < ARRAY_SIZE(a72_b53_mach_compat); i++) {
+ if (BRCM_ID(reg) == a72_b53_mach_compat[i])
break;
}
- if (i == ARRAY_SIZE(b53_mach_compat))
+ if (i == ARRAY_SIZE(a72_b53_mach_compat))
return;
/* Set all 3 MCP interfaces to 8 credits */
@@ -157,6 +167,7 @@ static void __init mcp_b53_set(void)
static int __init setup_hifcpubiuctrl_regs(struct device_node *np)
{
struct device_node *cpu_dn;
+ u32 family_id;
int ret = 0;
cpubiuctrl_base = of_iomap(np, 0);
@@ -179,13 +190,16 @@ static int __init setup_hifcpubiuctrl_regs(struct device_node *np)
cpubiuctrl_regs = b15_cpubiuctrl_regs;
else if (of_device_is_compatible(cpu_dn, "brcm,brahma-b53"))
cpubiuctrl_regs = b53_cpubiuctrl_regs;
+ else if (of_device_is_compatible(cpu_dn, "arm,cortex-a72"))
+ cpubiuctrl_regs = a72_cpubiuctrl_regs;
else {
pr_err("unsupported CPU\n");
ret = -EINVAL;
}
of_node_put(cpu_dn);
- if (BRCM_ID(brcmstb_get_family_id()) == 0x7260)
+ family_id = brcmstb_get_family_id();
+ if (BRCM_ID(family_id) == 0x7260 && BRCM_REV(family_id) == 0)
cpubiuctrl_regs = b53_cpubiuctrl_no_wb_regs;
out:
of_node_put(np);
@@ -248,7 +262,7 @@ static int __init brcmstb_biuctrl_init(void)
return ret;
}
- mcp_b53_set();
+ mcp_a72_b53_set();
#ifdef CONFIG_PM_SLEEP
register_syscore_ops(&brcmstb_cpu_credit_syscore_ops);
#endif
diff --git a/drivers/soc/fsl/qe/Kconfig b/drivers/soc/fsl/qe/Kconfig
index cfa4b2939992..357c5800b112 100644
--- a/drivers/soc/fsl/qe/Kconfig
+++ b/drivers/soc/fsl/qe/Kconfig
@@ -5,7 +5,8 @@
config QUICC_ENGINE
bool "QUICC Engine (QE) framework support"
- depends on FSL_SOC && PPC32
+ depends on OF && HAS_IOMEM
+ depends on PPC || ARM || ARM64 || COMPILE_TEST
select GENERIC_ALLOCATOR
select CRC32
help
diff --git a/drivers/soc/fsl/qe/gpio.c b/drivers/soc/fsl/qe/gpio.c
index f0c29ed8f0ff..ed75198ed254 100644
--- a/drivers/soc/fsl/qe/gpio.c
+++ b/drivers/soc/fsl/qe/gpio.c
@@ -41,13 +41,13 @@ static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
container_of(mm_gc, struct qe_gpio_chip, mm_gc);
struct qe_pio_regs __iomem *regs = mm_gc->regs;
- qe_gc->cpdata = in_be32(&regs->cpdata);
+ qe_gc->cpdata = qe_ioread32be(&regs->cpdata);
qe_gc->saved_regs.cpdata = qe_gc->cpdata;
- qe_gc->saved_regs.cpdir1 = in_be32(&regs->cpdir1);
- qe_gc->saved_regs.cpdir2 = in_be32(&regs->cpdir2);
- qe_gc->saved_regs.cppar1 = in_be32(&regs->cppar1);
- qe_gc->saved_regs.cppar2 = in_be32(&regs->cppar2);
- qe_gc->saved_regs.cpodr = in_be32(&regs->cpodr);
+ qe_gc->saved_regs.cpdir1 = qe_ioread32be(&regs->cpdir1);
+ qe_gc->saved_regs.cpdir2 = qe_ioread32be(&regs->cpdir2);
+ qe_gc->saved_regs.cppar1 = qe_ioread32be(&regs->cppar1);
+ qe_gc->saved_regs.cppar2 = qe_ioread32be(&regs->cppar2);
+ qe_gc->saved_regs.cpodr = qe_ioread32be(&regs->cpodr);
}
static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio)
@@ -56,7 +56,7 @@ static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio)
struct qe_pio_regs __iomem *regs = mm_gc->regs;
u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
- return !!(in_be32(&regs->cpdata) & pin_mask);
+ return !!(qe_ioread32be(&regs->cpdata) & pin_mask);
}
static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
@@ -74,7 +74,7 @@ static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
else
qe_gc->cpdata &= ~pin_mask;
- out_be32(&regs->cpdata, qe_gc->cpdata);
+ qe_iowrite32be(qe_gc->cpdata, &regs->cpdata);
spin_unlock_irqrestore(&qe_gc->lock, flags);
}
@@ -101,7 +101,7 @@ static void qe_gpio_set_multiple(struct gpio_chip *gc,
}
}
- out_be32(&regs->cpdata, qe_gc->cpdata);
+ qe_iowrite32be(qe_gc->cpdata, &regs->cpdata);
spin_unlock_irqrestore(&qe_gc->lock, flags);
}
@@ -160,7 +160,6 @@ struct qe_pin *qe_pin_request(struct device_node *np, int index)
{
struct qe_pin *qe_pin;
struct gpio_chip *gc;
- struct of_mm_gpio_chip *mm_gc;
struct qe_gpio_chip *qe_gc;
int err;
unsigned long flags;
@@ -186,7 +185,6 @@ struct qe_pin *qe_pin_request(struct device_node *np, int index)
goto err0;
}
- mm_gc = to_of_mm_gpio_chip(gc);
qe_gc = gpiochip_get_data(gc);
spin_lock_irqsave(&qe_gc->lock, flags);
@@ -255,11 +253,15 @@ void qe_pin_set_dedicated(struct qe_pin *qe_pin)
spin_lock_irqsave(&qe_gc->lock, flags);
if (second_reg) {
- clrsetbits_be32(&regs->cpdir2, mask2, sregs->cpdir2 & mask2);
- clrsetbits_be32(&regs->cppar2, mask2, sregs->cppar2 & mask2);
+ qe_clrsetbits_be32(&regs->cpdir2, mask2,
+ sregs->cpdir2 & mask2);
+ qe_clrsetbits_be32(&regs->cppar2, mask2,
+ sregs->cppar2 & mask2);
} else {
- clrsetbits_be32(&regs->cpdir1, mask2, sregs->cpdir1 & mask2);
- clrsetbits_be32(&regs->cppar1, mask2, sregs->cppar1 & mask2);
+ qe_clrsetbits_be32(&regs->cpdir1, mask2,
+ sregs->cpdir1 & mask2);
+ qe_clrsetbits_be32(&regs->cppar1, mask2,
+ sregs->cppar1 & mask2);
}
if (sregs->cpdata & mask1)
@@ -267,8 +269,8 @@ void qe_pin_set_dedicated(struct qe_pin *qe_pin)
else
qe_gc->cpdata &= ~mask1;
- out_be32(&regs->cpdata, qe_gc->cpdata);
- clrsetbits_be32(&regs->cpodr, mask1, sregs->cpodr & mask1);
+ qe_iowrite32be(qe_gc->cpdata, &regs->cpdata);
+ qe_clrsetbits_be32(&regs->cpodr, mask1, sregs->cpodr & mask1);
spin_unlock_irqrestore(&qe_gc->lock, flags);
}
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
index 417df7e19281..96c2057d8d8e 100644
--- a/drivers/soc/fsl/qe/qe.c
+++ b/drivers/soc/fsl/qe/qe.c
@@ -22,16 +22,12 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/ioport.h>
+#include <linux/iopoll.h>
#include <linux/crc32.h>
#include <linux/mod_devicetable.h>
#include <linux/of_platform.h>
-#include <asm/irq.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
#include <soc/fsl/qe/immap_qe.h>
#include <soc/fsl/qe/qe.h>
-#include <asm/prom.h>
-#include <asm/rheap.h>
static void qe_snums_init(void);
static int qe_sdma_init(void);
@@ -108,11 +104,12 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
{
unsigned long flags;
u8 mcn_shift = 0, dev_shift = 0;
- u32 ret;
+ u32 val;
+ int ret;
spin_lock_irqsave(&qe_lock, flags);
if (cmd == QE_RESET) {
- out_be32(&qe_immr->cp.cecr, (u32) (cmd | QE_CR_FLG));
+ qe_iowrite32be((u32)(cmd | QE_CR_FLG), &qe_immr->cp.cecr);
} else {
if (cmd == QE_ASSIGN_PAGE) {
/* Here device is the SNUM, not sub-block */
@@ -129,20 +126,18 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
mcn_shift = QE_CR_MCN_NORMAL_SHIFT;
}
- out_be32(&qe_immr->cp.cecdr, cmd_input);
- out_be32(&qe_immr->cp.cecr,
- (cmd | QE_CR_FLG | ((u32) device << dev_shift) | (u32)
- mcn_protocol << mcn_shift));
+ qe_iowrite32be(cmd_input, &qe_immr->cp.cecdr);
+ qe_iowrite32be((cmd | QE_CR_FLG | ((u32)device << dev_shift) | (u32)mcn_protocol << mcn_shift),
+ &qe_immr->cp.cecr);
}
/* wait for the QE_CR_FLG to clear */
- ret = spin_event_timeout((in_be32(&qe_immr->cp.cecr) & QE_CR_FLG) == 0,
- 100, 0);
- /* On timeout (e.g. failure), the expression will be false (ret == 0),
- otherwise it will be true (ret == 1). */
+ ret = readx_poll_timeout_atomic(qe_ioread32be, &qe_immr->cp.cecr, val,
+ (val & QE_CR_FLG) == 0, 0, 100);
+ /* On timeout, ret is -ETIMEDOUT, otherwise it will be 0. */
spin_unlock_irqrestore(&qe_lock, flags);
- return ret == 1;
+ return ret == 0;
}
EXPORT_SYMBOL(qe_issue_cmd);
@@ -164,8 +159,7 @@ static unsigned int brg_clk = 0;
unsigned int qe_get_brg_clk(void)
{
struct device_node *qe;
- int size;
- const u32 *prop;
+ u32 brg;
unsigned int mod;
if (brg_clk)
@@ -175,9 +169,8 @@ unsigned int qe_get_brg_clk(void)
if (!qe)
return brg_clk;
- prop = of_get_property(qe, "brg-frequency", &size);
- if (prop && size == sizeof(*prop))
- brg_clk = *prop;
+ if (!of_property_read_u32(qe, "brg-frequency", &brg))
+ brg_clk = brg;
of_node_put(qe);
@@ -197,6 +190,14 @@ EXPORT_SYMBOL(qe_get_brg_clk);
#define PVR_VER_836x 0x8083
#define PVR_VER_832x 0x8084
+static bool qe_general4_errata(void)
+{
+#ifdef CONFIG_PPC32
+ return pvr_version_is(PVR_VER_836x) || pvr_version_is(PVR_VER_832x);
+#endif
+ return false;
+}
+
/* Program the BRG to the given sampling rate and multiplier
*
* @brg: the BRG, QE_BRG1 - QE_BRG16
@@ -223,14 +224,14 @@ int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier)
/* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says
that the BRG divisor must be even if you're not using divide-by-16
mode. */
- if (pvr_version_is(PVR_VER_836x) || pvr_version_is(PVR_VER_832x))
+ if (qe_general4_errata())
if (!div16 && (divisor & 1) && (divisor > 3))
divisor++;
tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) |
QE_BRGC_ENABLE | div16;
- out_be32(&qe_immr->brg.brgc[brg - QE_BRG1], tempval);
+ qe_iowrite32be(tempval, &qe_immr->brg.brgc[brg - QE_BRG1]);
return 0;
}
@@ -364,22 +365,20 @@ EXPORT_SYMBOL(qe_put_snum);
static int qe_sdma_init(void)
{
struct sdma __iomem *sdma = &qe_immr->sdma;
- static unsigned long sdma_buf_offset = (unsigned long)-ENOMEM;
-
- if (!sdma)
- return -ENODEV;
+ static s32 sdma_buf_offset = -ENOMEM;
/* allocate 2 internal temporary buffers (512 bytes size each) for
* the SDMA */
- if (IS_ERR_VALUE(sdma_buf_offset)) {
+ if (sdma_buf_offset < 0) {
sdma_buf_offset = qe_muram_alloc(512 * 2, 4096);
- if (IS_ERR_VALUE(sdma_buf_offset))
+ if (sdma_buf_offset < 0)
return -ENOMEM;
}
- out_be32(&sdma->sdebcr, (u32) sdma_buf_offset & QE_SDEBCR_BA_MASK);
- out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK |
- (0x1 << QE_SDMR_CEN_SHIFT)));
+ qe_iowrite32be((u32)sdma_buf_offset & QE_SDEBCR_BA_MASK,
+ &sdma->sdebcr);
+ qe_iowrite32be((QE_SDMR_GLB_1_MSK | (0x1 << QE_SDMR_CEN_SHIFT)),
+ &sdma->sdmr);
return 0;
}
@@ -417,14 +416,14 @@ static void qe_upload_microcode(const void *base,
"uploading microcode '%s'\n", ucode->id);
/* Use auto-increment */
- out_be32(&qe_immr->iram.iadd, be32_to_cpu(ucode->iram_offset) |
- QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR);
+ qe_iowrite32be(be32_to_cpu(ucode->iram_offset) | QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR,
+ &qe_immr->iram.iadd);
for (i = 0; i < be32_to_cpu(ucode->count); i++)
- out_be32(&qe_immr->iram.idata, be32_to_cpu(code[i]));
+ qe_iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata);
/* Set I-RAM Ready Register */
- out_be32(&qe_immr->iram.iready, be32_to_cpu(QE_IRAM_READY));
+ qe_iowrite32be(be32_to_cpu(QE_IRAM_READY), &qe_immr->iram.iready);
}
/*
@@ -509,7 +508,7 @@ int qe_upload_firmware(const struct qe_firmware *firmware)
* If the microcode calls for it, split the I-RAM.
*/
if (!firmware->split)
- setbits16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR);
+ qe_setbits_be16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR);
if (firmware->soc.model)
printk(KERN_INFO
@@ -543,11 +542,13 @@ int qe_upload_firmware(const struct qe_firmware *firmware)
u32 trap = be32_to_cpu(ucode->traps[j]);
if (trap)
- out_be32(&qe_immr->rsp[i].tibcr[j], trap);
+ qe_iowrite32be(trap,
+ &qe_immr->rsp[i].tibcr[j]);
}
/* Enable traps */
- out_be32(&qe_immr->rsp[i].eccr, be32_to_cpu(ucode->eccr));
+ qe_iowrite32be(be32_to_cpu(ucode->eccr),
+ &qe_immr->rsp[i].eccr);
}
qe_firmware_uploaded = 1;
@@ -565,11 +566,9 @@ EXPORT_SYMBOL(qe_upload_firmware);
struct qe_firmware_info *qe_get_firmware_info(void)
{
static int initialized;
- struct property *prop;
struct device_node *qe;
struct device_node *fw = NULL;
const char *sprop;
- unsigned int i;
/*
* If we haven't checked yet, and a driver hasn't uploaded a firmware
@@ -603,20 +602,11 @@ struct qe_firmware_info *qe_get_firmware_info(void)
strlcpy(qe_firmware_info.id, sprop,
sizeof(qe_firmware_info.id));
- prop = of_find_property(fw, "extended-modes", NULL);
- if (prop && (prop->length == sizeof(u64))) {
- const u64 *iprop = prop->value;
-
- qe_firmware_info.extended_modes = *iprop;
- }
+ of_property_read_u64(fw, "extended-modes",
+ &qe_firmware_info.extended_modes);
- prop = of_find_property(fw, "virtual-traps", NULL);
- if (prop && (prop->length == 32)) {
- const u32 *iprop = prop->value;
-
- for (i = 0; i < ARRAY_SIZE(qe_firmware_info.vtraps); i++)
- qe_firmware_info.vtraps[i] = iprop[i];
- }
+ of_property_read_u32_array(fw, "virtual-traps", qe_firmware_info.vtraps,
+ ARRAY_SIZE(qe_firmware_info.vtraps));
of_node_put(fw);
@@ -627,17 +617,13 @@ EXPORT_SYMBOL(qe_get_firmware_info);
unsigned int qe_get_num_of_risc(void)
{
struct device_node *qe;
- int size;
unsigned int num_of_risc = 0;
- const u32 *prop;
qe = qe_get_device_node();
if (!qe)
return num_of_risc;
- prop = of_get_property(qe, "fsl,qe-num-riscs", &size);
- if (prop && size == sizeof(*prop))
- num_of_risc = *prop;
+ of_property_read_u32(qe, "fsl,qe-num-riscs", &num_of_risc);
of_node_put(qe);
diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c
index 83e85e61669f..a81a1a79f1ca 100644
--- a/drivers/soc/fsl/qe/qe_common.c
+++ b/drivers/soc/fsl/qe/qe_common.c
@@ -32,7 +32,7 @@ static phys_addr_t muram_pbase;
struct muram_block {
struct list_head head;
- unsigned long start;
+ s32 start;
int size;
};
@@ -110,34 +110,30 @@ out_muram:
* @algo: algorithm for alloc.
* @data: data for genalloc's algorithm.
*
- * This function returns an offset into the muram area.
+ * This function returns a non-negative offset into the muram area, or
+ * a negative errno on failure.
*/
-static unsigned long cpm_muram_alloc_common(unsigned long size,
- genpool_algo_t algo, void *data)
+static s32 cpm_muram_alloc_common(unsigned long size,
+ genpool_algo_t algo, void *data)
{
struct muram_block *entry;
- unsigned long start;
-
- if (!muram_pool && cpm_muram_init())
- goto out2;
+ s32 start;
+ entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry)
+ return -ENOMEM;
start = gen_pool_alloc_algo(muram_pool, size, algo, data);
- if (!start)
- goto out2;
+ if (!start) {
+ kfree(entry);
+ return -ENOMEM;
+ }
start = start - GENPOOL_OFFSET;
memset_io(cpm_muram_addr(start), 0, size);
- entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
- if (!entry)
- goto out1;
entry->start = start;
entry->size = size;
list_add(&entry->head, &muram_block_list);
return start;
-out1:
- gen_pool_free(muram_pool, start, size);
-out2:
- return (unsigned long)-ENOMEM;
}
/*
@@ -145,13 +141,14 @@ out2:
* @size: number of bytes to allocate
* @align: requested alignment, in bytes
*
- * This function returns an offset into the muram area.
+ * This function returns a non-negative offset into the muram area, or
+ * a negative errno on failure.
* Use cpm_dpram_addr() to get the virtual address of the area.
* Use cpm_muram_free() to free the allocation.
*/
-unsigned long cpm_muram_alloc(unsigned long size, unsigned long align)
+s32 cpm_muram_alloc(unsigned long size, unsigned long align)
{
- unsigned long start;
+ s32 start;
unsigned long flags;
struct genpool_data_align muram_pool_data;
@@ -168,12 +165,15 @@ EXPORT_SYMBOL(cpm_muram_alloc);
* cpm_muram_free - free a chunk of multi-user ram
* @offset: The beginning of the chunk as returned by cpm_muram_alloc().
*/
-int cpm_muram_free(unsigned long offset)
+void cpm_muram_free(s32 offset)
{
unsigned long flags;
int size;
struct muram_block *tmp;
+ if (offset < 0)
+ return;
+
size = 0;
spin_lock_irqsave(&cpm_muram_lock, flags);
list_for_each_entry(tmp, &muram_block_list, head) {
@@ -186,7 +186,6 @@ int cpm_muram_free(unsigned long offset)
}
gen_pool_free(muram_pool, offset + GENPOOL_OFFSET, size);
spin_unlock_irqrestore(&cpm_muram_lock, flags);
- return size;
}
EXPORT_SYMBOL(cpm_muram_free);
@@ -194,13 +193,14 @@ EXPORT_SYMBOL(cpm_muram_free);
* cpm_muram_alloc_fixed - reserve a specific region of multi-user ram
* @offset: offset of allocation start address
* @size: number of bytes to allocate
- * This function returns an offset into the muram area
+ * This function returns @offset if the area was available, a negative
+ * errno otherwise.
* Use cpm_dpram_addr() to get the virtual address of the area.
* Use cpm_muram_free() to free the allocation.
*/
-unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size)
+s32 cpm_muram_alloc_fixed(unsigned long offset, unsigned long size)
{
- unsigned long start;
+ s32 start;
unsigned long flags;
struct genpool_data_fixed muram_pool_data_fixed;
diff --git a/drivers/soc/fsl/qe/qe_ic.c b/drivers/soc/fsl/qe/qe_ic.c
index 9bac546998d3..0dd5bdb04a14 100644
--- a/drivers/soc/fsl/qe/qe_ic.c
+++ b/drivers/soc/fsl/qe/qe_ic.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
+#include <linux/irq.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/stddef.h>
@@ -24,9 +25,57 @@
#include <linux/spinlock.h>
#include <asm/irq.h>
#include <asm/io.h>
-#include <soc/fsl/qe/qe_ic.h>
+#include <soc/fsl/qe/qe.h>
+
+#define NR_QE_IC_INTS 64
+
+/* QE IC registers offset */
+#define QEIC_CICR 0x00
+#define QEIC_CIVEC 0x04
+#define QEIC_CIPXCC 0x10
+#define QEIC_CIPYCC 0x14
+#define QEIC_CIPWCC 0x18
+#define QEIC_CIPZCC 0x1c
+#define QEIC_CIMR 0x20
+#define QEIC_CRIMR 0x24
+#define QEIC_CIPRTA 0x30
+#define QEIC_CIPRTB 0x34
+#define QEIC_CHIVEC 0x60
+
+struct qe_ic {
+ /* Control registers offset */
+ u32 __iomem *regs;
+
+ /* The remapper for this QEIC */
+ struct irq_domain *irqhost;
+
+ /* The "linux" controller struct */
+ struct irq_chip hc_irq;
+
+ /* VIRQ numbers of QE high/low irqs */
+ unsigned int virq_high;
+ unsigned int virq_low;
+};
-#include "qe_ic.h"
+/*
+ * QE interrupt controller internal structure
+ */
+struct qe_ic_info {
+ /* Location of this source at the QIMR register */
+ u32 mask;
+
+ /* Mask register offset */
+ u32 mask_reg;
+
+ /*
+ * For grouped interrupts sources - the interrupt code as
+ * appears at the group priority register
+ */
+ u8 pri_code;
+
+ /* Group priority register offset */
+ u32 pri_reg;
+};
static DEFINE_RAW_SPINLOCK(qe_ic_lock);
@@ -171,15 +220,15 @@ static struct qe_ic_info qe_ic_info[] = {
},
};
-static inline u32 qe_ic_read(volatile __be32 __iomem * base, unsigned int reg)
+static inline u32 qe_ic_read(__be32 __iomem *base, unsigned int reg)
{
- return in_be32(base + (reg >> 2));
+ return qe_ioread32be(base + (reg >> 2));
}
-static inline void qe_ic_write(volatile __be32 __iomem * base, unsigned int reg,
+static inline void qe_ic_write(__be32 __iomem *base, unsigned int reg,
u32 value)
{
- out_be32(base + (reg >> 2), value);
+ qe_iowrite32be(value, base + (reg >> 2));
}
static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
@@ -281,8 +330,8 @@ static const struct irq_domain_ops qe_ic_host_ops = {
.xlate = irq_domain_xlate_onetwocell,
};
-/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
-unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
+/* Return an interrupt vector or 0 if no interrupt is pending. */
+static unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
{
int irq;
@@ -292,13 +341,13 @@ unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26;
if (irq == 0)
- return NO_IRQ;
+ return 0;
return irq_linear_revmap(qe_ic->irqhost, irq);
}
-/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
-unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
+/* Return an interrupt vector or 0 if no interrupt is pending. */
+static unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
{
int irq;
@@ -308,18 +357,60 @@ unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26;
if (irq == 0)
- return NO_IRQ;
+ return 0;
return irq_linear_revmap(qe_ic->irqhost, irq);
}
-void __init qe_ic_init(struct device_node *node, unsigned int flags,
- void (*low_handler)(struct irq_desc *desc),
- void (*high_handler)(struct irq_desc *desc))
+static void qe_ic_cascade_low(struct irq_desc *desc)
+{
+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
+ unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ if (cascade_irq != 0)
+ generic_handle_irq(cascade_irq);
+
+ if (chip->irq_eoi)
+ chip->irq_eoi(&desc->irq_data);
+}
+
+static void qe_ic_cascade_high(struct irq_desc *desc)
+{
+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
+ unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ if (cascade_irq != 0)
+ generic_handle_irq(cascade_irq);
+
+ if (chip->irq_eoi)
+ chip->irq_eoi(&desc->irq_data);
+}
+
+static void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
{
+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
+ unsigned int cascade_irq;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ cascade_irq = qe_ic_get_high_irq(qe_ic);
+ if (cascade_irq == 0)
+ cascade_irq = qe_ic_get_low_irq(qe_ic);
+
+ if (cascade_irq != 0)
+ generic_handle_irq(cascade_irq);
+
+ chip->irq_eoi(&desc->irq_data);
+}
+
+static void __init qe_ic_init(struct device_node *node)
+{
+ void (*low_handler)(struct irq_desc *desc);
+ void (*high_handler)(struct irq_desc *desc);
struct qe_ic *qe_ic;
struct resource res;
- u32 temp = 0, ret, high_active = 0;
+ u32 ret;
ret = of_address_to_resource(node, 0, &res);
if (ret)
@@ -343,166 +434,42 @@ void __init qe_ic_init(struct device_node *node, unsigned int flags,
qe_ic->virq_high = irq_of_parse_and_map(node, 0);
qe_ic->virq_low = irq_of_parse_and_map(node, 1);
- if (qe_ic->virq_low == NO_IRQ) {
+ if (!qe_ic->virq_low) {
printk(KERN_ERR "Failed to map QE_IC low IRQ\n");
kfree(qe_ic);
return;
}
-
- /* default priority scheme is grouped. If spread mode is */
- /* required, configure cicr accordingly. */
- if (flags & QE_IC_SPREADMODE_GRP_W)
- temp |= CICR_GWCC;
- if (flags & QE_IC_SPREADMODE_GRP_X)
- temp |= CICR_GXCC;
- if (flags & QE_IC_SPREADMODE_GRP_Y)
- temp |= CICR_GYCC;
- if (flags & QE_IC_SPREADMODE_GRP_Z)
- temp |= CICR_GZCC;
- if (flags & QE_IC_SPREADMODE_GRP_RISCA)
- temp |= CICR_GRTA;
- if (flags & QE_IC_SPREADMODE_GRP_RISCB)
- temp |= CICR_GRTB;
-
- /* choose destination signal for highest priority interrupt */
- if (flags & QE_IC_HIGH_SIGNAL) {
- temp |= (SIGNAL_HIGH << CICR_HPIT_SHIFT);
- high_active = 1;
+ if (qe_ic->virq_high != qe_ic->virq_low) {
+ low_handler = qe_ic_cascade_low;
+ high_handler = qe_ic_cascade_high;
+ } else {
+ low_handler = qe_ic_cascade_muxed_mpic;
+ high_handler = NULL;
}
- qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
+ qe_ic_write(qe_ic->regs, QEIC_CICR, 0);
irq_set_handler_data(qe_ic->virq_low, qe_ic);
irq_set_chained_handler(qe_ic->virq_low, low_handler);
- if (qe_ic->virq_high != NO_IRQ &&
- qe_ic->virq_high != qe_ic->virq_low) {
+ if (qe_ic->virq_high && qe_ic->virq_high != qe_ic->virq_low) {
irq_set_handler_data(qe_ic->virq_high, qe_ic);
irq_set_chained_handler(qe_ic->virq_high, high_handler);
}
}
-void qe_ic_set_highest_priority(unsigned int virq, int high)
+static int __init qe_ic_of_init(void)
{
- struct qe_ic *qe_ic = qe_ic_from_irq(virq);
- unsigned int src = virq_to_hw(virq);
- u32 temp = 0;
-
- temp = qe_ic_read(qe_ic->regs, QEIC_CICR);
-
- temp &= ~CICR_HP_MASK;
- temp |= src << CICR_HP_SHIFT;
-
- temp &= ~CICR_HPIT_MASK;
- temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << CICR_HPIT_SHIFT;
-
- qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
-}
-
-/* Set Priority level within its group, from 1 to 8 */
-int qe_ic_set_priority(unsigned int virq, unsigned int priority)
-{
- struct qe_ic *qe_ic = qe_ic_from_irq(virq);
- unsigned int src = virq_to_hw(virq);
- u32 temp;
+ struct device_node *np;
- if (priority > 8 || priority == 0)
- return -EINVAL;
- if (WARN_ONCE(src >= ARRAY_SIZE(qe_ic_info),
- "%s: Invalid hw irq number for QEIC\n", __func__))
- return -EINVAL;
- if (qe_ic_info[src].pri_reg == 0)
- return -EINVAL;
-
- temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].pri_reg);
-
- if (priority < 4) {
- temp &= ~(0x7 << (32 - priority * 3));
- temp |= qe_ic_info[src].pri_code << (32 - priority * 3);
- } else {
- temp &= ~(0x7 << (24 - priority * 3));
- temp |= qe_ic_info[src].pri_code << (24 - priority * 3);
+ np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
+ if (!np) {
+ np = of_find_node_by_type(NULL, "qeic");
+ if (!np)
+ return -ENODEV;
}
-
- qe_ic_write(qe_ic->regs, qe_ic_info[src].pri_reg, temp);
-
+ qe_ic_init(np);
+ of_node_put(np);
return 0;
}
-
-/* Set a QE priority to use high irq, only priority 1~2 can use high irq */
-int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high)
-{
- struct qe_ic *qe_ic = qe_ic_from_irq(virq);
- unsigned int src = virq_to_hw(virq);
- u32 temp, control_reg = QEIC_CICNR, shift = 0;
-
- if (priority > 2 || priority == 0)
- return -EINVAL;
- if (WARN_ONCE(src >= ARRAY_SIZE(qe_ic_info),
- "%s: Invalid hw irq number for QEIC\n", __func__))
- return -EINVAL;
-
- switch (qe_ic_info[src].pri_reg) {
- case QEIC_CIPZCC:
- shift = CICNR_ZCC1T_SHIFT;
- break;
- case QEIC_CIPWCC:
- shift = CICNR_WCC1T_SHIFT;
- break;
- case QEIC_CIPYCC:
- shift = CICNR_YCC1T_SHIFT;
- break;
- case QEIC_CIPXCC:
- shift = CICNR_XCC1T_SHIFT;
- break;
- case QEIC_CIPRTA:
- shift = CRICR_RTA1T_SHIFT;
- control_reg = QEIC_CRICR;
- break;
- case QEIC_CIPRTB:
- shift = CRICR_RTB1T_SHIFT;
- control_reg = QEIC_CRICR;
- break;
- default:
- return -EINVAL;
- }
-
- shift += (2 - priority) * 2;
- temp = qe_ic_read(qe_ic->regs, control_reg);
- temp &= ~(SIGNAL_MASK << shift);
- temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << shift;
- qe_ic_write(qe_ic->regs, control_reg, temp);
-
- return 0;
-}
-
-static struct bus_type qe_ic_subsys = {
- .name = "qe_ic",
- .dev_name = "qe_ic",
-};
-
-static struct device device_qe_ic = {
- .id = 0,
- .bus = &qe_ic_subsys,
-};
-
-static int __init init_qe_ic_sysfs(void)
-{
- int rc;
-
- printk(KERN_DEBUG "Registering qe_ic with sysfs...\n");
-
- rc = subsys_system_register(&qe_ic_subsys, NULL);
- if (rc) {
- printk(KERN_ERR "Failed registering qe_ic sys class\n");
- return -ENODEV;
- }
- rc = device_register(&device_qe_ic);
- if (rc) {
- printk(KERN_ERR "Failed registering qe_ic sys device\n");
- return -ENODEV;
- }
- return 0;
-}
-
-subsys_initcall(init_qe_ic_sysfs);
+subsys_initcall(qe_ic_of_init);
diff --git a/drivers/soc/fsl/qe/qe_ic.h b/drivers/soc/fsl/qe/qe_ic.h
deleted file mode 100644
index 08c695672a03..000000000000
--- a/drivers/soc/fsl/qe/qe_ic.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * drivers/soc/fsl/qe/qe_ic.h
- *
- * QUICC ENGINE Interrupt Controller Header
- *
- * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
- *
- * Author: Li Yang <leoli@freescale.com>
- * Based on code from Shlomi Gridish <gridish@freescale.com>
- */
-#ifndef _POWERPC_SYSDEV_QE_IC_H
-#define _POWERPC_SYSDEV_QE_IC_H
-
-#include <soc/fsl/qe/qe_ic.h>
-
-#define NR_QE_IC_INTS 64
-
-/* QE IC registers offset */
-#define QEIC_CICR 0x00
-#define QEIC_CIVEC 0x04
-#define QEIC_CRIPNR 0x08
-#define QEIC_CIPNR 0x0c
-#define QEIC_CIPXCC 0x10
-#define QEIC_CIPYCC 0x14
-#define QEIC_CIPWCC 0x18
-#define QEIC_CIPZCC 0x1c
-#define QEIC_CIMR 0x20
-#define QEIC_CRIMR 0x24
-#define QEIC_CICNR 0x28
-#define QEIC_CIPRTA 0x30
-#define QEIC_CIPRTB 0x34
-#define QEIC_CRICR 0x3c
-#define QEIC_CHIVEC 0x60
-
-/* Interrupt priority registers */
-#define CIPCC_SHIFT_PRI0 29
-#define CIPCC_SHIFT_PRI1 26
-#define CIPCC_SHIFT_PRI2 23
-#define CIPCC_SHIFT_PRI3 20
-#define CIPCC_SHIFT_PRI4 13
-#define CIPCC_SHIFT_PRI5 10
-#define CIPCC_SHIFT_PRI6 7
-#define CIPCC_SHIFT_PRI7 4
-
-/* CICR priority modes */
-#define CICR_GWCC 0x00040000
-#define CICR_GXCC 0x00020000
-#define CICR_GYCC 0x00010000
-#define CICR_GZCC 0x00080000
-#define CICR_GRTA 0x00200000
-#define CICR_GRTB 0x00400000
-#define CICR_HPIT_SHIFT 8
-#define CICR_HPIT_MASK 0x00000300
-#define CICR_HP_SHIFT 24
-#define CICR_HP_MASK 0x3f000000
-
-/* CICNR */
-#define CICNR_WCC1T_SHIFT 20
-#define CICNR_ZCC1T_SHIFT 28
-#define CICNR_YCC1T_SHIFT 12
-#define CICNR_XCC1T_SHIFT 4
-
-/* CRICR */
-#define CRICR_RTA1T_SHIFT 20
-#define CRICR_RTB1T_SHIFT 28
-
-/* Signal indicator */
-#define SIGNAL_MASK 3
-#define SIGNAL_HIGH 2
-#define SIGNAL_LOW 0
-
-struct qe_ic {
- /* Control registers offset */
- volatile u32 __iomem *regs;
-
- /* The remapper for this QEIC */
- struct irq_domain *irqhost;
-
- /* The "linux" controller struct */
- struct irq_chip hc_irq;
-
- /* VIRQ numbers of QE high/low irqs */
- unsigned int virq_high;
- unsigned int virq_low;
-};
-
-/*
- * QE interrupt controller internal structure
- */
-struct qe_ic_info {
- u32 mask; /* location of this source at the QIMR register. */
- u32 mask_reg; /* Mask register offset */
- u8 pri_code; /* for grouped interrupts sources - the interrupt
- code as appears at the group priority register */
- u32 pri_reg; /* Group priority register offset */
-};
-
-#endif /* _POWERPC_SYSDEV_QE_IC_H */
diff --git a/drivers/soc/fsl/qe/qe_io.c b/drivers/soc/fsl/qe/qe_io.c
index 3657e296a8a2..11ea08e97db7 100644
--- a/drivers/soc/fsl/qe/qe_io.c
+++ b/drivers/soc/fsl/qe/qe_io.c
@@ -18,8 +18,6 @@
#include <asm/io.h>
#include <soc/fsl/qe/qe.h>
-#include <asm/prom.h>
-#include <sysdev/fsl_soc.h>
#undef DEBUG
@@ -30,7 +28,7 @@ int par_io_init(struct device_node *np)
{
struct resource res;
int ret;
- const u32 *num_ports;
+ u32 num_ports;
/* Map Parallel I/O ports registers */
ret = of_address_to_resource(np, 0, &res);
@@ -38,9 +36,8 @@ int par_io_init(struct device_node *np)
return ret;
par_io = ioremap(res.start, resource_size(&res));
- num_ports = of_get_property(np, "num-ports", NULL);
- if (num_ports)
- num_par_io_ports = *num_ports;
+ if (!of_property_read_u32(np, "num-ports", &num_ports))
+ num_par_io_ports = num_ports;
return 0;
}
@@ -57,16 +54,16 @@ void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir,
pin_mask1bit = (u32) (1 << (QE_PIO_PINS - (pin + 1)));
/* Set open drain, if required */
- tmp_val = in_be32(&par_io->cpodr);
+ tmp_val = qe_ioread32be(&par_io->cpodr);
if (open_drain)
- out_be32(&par_io->cpodr, pin_mask1bit | tmp_val);
+ qe_iowrite32be(pin_mask1bit | tmp_val, &par_io->cpodr);
else
- out_be32(&par_io->cpodr, ~pin_mask1bit & tmp_val);
+ qe_iowrite32be(~pin_mask1bit & tmp_val, &par_io->cpodr);
/* define direction */
tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
- in_be32(&par_io->cpdir2) :
- in_be32(&par_io->cpdir1);
+ qe_ioread32be(&par_io->cpdir2) :
+ qe_ioread32be(&par_io->cpdir1);
/* get all bits mask for 2 bit per port */
pin_mask2bits = (u32) (0x3 << (QE_PIO_PINS -
@@ -78,34 +75,30 @@ void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir,
/* clear and set 2 bits mask */
if (pin > (QE_PIO_PINS / 2) - 1) {
- out_be32(&par_io->cpdir2,
- ~pin_mask2bits & tmp_val);
+ qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir2);
tmp_val &= ~pin_mask2bits;
- out_be32(&par_io->cpdir2, new_mask2bits | tmp_val);
+ qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir2);
} else {
- out_be32(&par_io->cpdir1,
- ~pin_mask2bits & tmp_val);
+ qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir1);
tmp_val &= ~pin_mask2bits;
- out_be32(&par_io->cpdir1, new_mask2bits | tmp_val);
+ qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir1);
}
/* define pin assignment */
tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
- in_be32(&par_io->cppar2) :
- in_be32(&par_io->cppar1);
+ qe_ioread32be(&par_io->cppar2) :
+ qe_ioread32be(&par_io->cppar1);
new_mask2bits = (u32) (assignment << (QE_PIO_PINS -
(pin % (QE_PIO_PINS / 2) + 1) * 2));
/* clear and set 2 bits mask */
if (pin > (QE_PIO_PINS / 2) - 1) {
- out_be32(&par_io->cppar2,
- ~pin_mask2bits & tmp_val);
+ qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar2);
tmp_val &= ~pin_mask2bits;
- out_be32(&par_io->cppar2, new_mask2bits | tmp_val);
+ qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cppar2);
} else {
- out_be32(&par_io->cppar1,
- ~pin_mask2bits & tmp_val);
+ qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar1);
tmp_val &= ~pin_mask2bits;
- out_be32(&par_io->cppar1, new_mask2bits | tmp_val);
+ qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cppar1);
}
}
EXPORT_SYMBOL(__par_io_config_pin);
@@ -133,12 +126,12 @@ int par_io_data_set(u8 port, u8 pin, u8 val)
/* calculate pin location */
pin_mask = (u32) (1 << (QE_PIO_PINS - 1 - pin));
- tmp_val = in_be32(&par_io[port].cpdata);
+ tmp_val = qe_ioread32be(&par_io[port].cpdata);
if (val == 0) /* clear */
- out_be32(&par_io[port].cpdata, ~pin_mask & tmp_val);
+ qe_iowrite32be(~pin_mask & tmp_val, &par_io[port].cpdata);
else /* set */
- out_be32(&par_io[port].cpdata, pin_mask | tmp_val);
+ qe_iowrite32be(pin_mask | tmp_val, &par_io[port].cpdata);
return 0;
}
@@ -147,23 +140,20 @@ EXPORT_SYMBOL(par_io_data_set);
int par_io_of_config(struct device_node *np)
{
struct device_node *pio;
- const phandle *ph;
int pio_map_len;
- const unsigned int *pio_map;
+ const __be32 *pio_map;
if (par_io == NULL) {
printk(KERN_ERR "par_io not initialized\n");
return -1;
}
- ph = of_get_property(np, "pio-handle", NULL);
- if (ph == NULL) {
+ pio = of_parse_phandle(np, "pio-handle", 0);
+ if (pio == NULL) {
printk(KERN_ERR "pio-handle not available\n");
return -1;
}
- pio = of_find_node_by_phandle(*ph);
-
pio_map = of_get_property(pio, "pio-map", &pio_map_len);
if (pio_map == NULL) {
printk(KERN_ERR "pio-map is not set!\n");
@@ -176,9 +166,15 @@ int par_io_of_config(struct device_node *np)
}
while (pio_map_len > 0) {
- par_io_config_pin((u8) pio_map[0], (u8) pio_map[1],
- (int) pio_map[2], (int) pio_map[3],
- (int) pio_map[4], (int) pio_map[5]);
+ u8 port = be32_to_cpu(pio_map[0]);
+ u8 pin = be32_to_cpu(pio_map[1]);
+ int dir = be32_to_cpu(pio_map[2]);
+ int open_drain = be32_to_cpu(pio_map[3]);
+ int assignment = be32_to_cpu(pio_map[4]);
+ int has_irq = be32_to_cpu(pio_map[5]);
+
+ par_io_config_pin(port, pin, dir, open_drain,
+ assignment, has_irq);
pio_map += 6;
pio_map_len -= 6;
}
diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c
index e37ebc3be661..7d7d78d3ee50 100644
--- a/drivers/soc/fsl/qe/qe_tdm.c
+++ b/drivers/soc/fsl/qe/qe_tdm.c
@@ -169,10 +169,10 @@ void ucc_tdm_init(struct ucc_tdm *utdm, struct ucc_tdm_info *ut_info)
&siram[siram_entry_id * 32 + 0x200 + i]);
}
- setbits16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)],
- SIR_LAST);
- setbits16(&siram[(siram_entry_id * 32) + 0x200 + (utdm->num_of_ts - 1)],
- SIR_LAST);
+ qe_setbits_be16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)],
+ SIR_LAST);
+ qe_setbits_be16(&siram[(siram_entry_id * 32) + 0x200 + (utdm->num_of_ts - 1)],
+ SIR_LAST);
/* Set SIxMR register */
sixmr = SIMR_SAD(siram_entry_id);
diff --git a/drivers/soc/fsl/qe/ucc.c b/drivers/soc/fsl/qe/ucc.c
index 024d239ac1e1..90157acc5ba6 100644
--- a/drivers/soc/fsl/qe/ucc.c
+++ b/drivers/soc/fsl/qe/ucc.c
@@ -15,7 +15,6 @@
#include <linux/spinlock.h>
#include <linux/export.h>
-#include <asm/irq.h>
#include <asm/io.h>
#include <soc/fsl/qe/immap_qe.h>
#include <soc/fsl/qe/qe.h>
@@ -35,8 +34,8 @@ int ucc_set_qe_mux_mii_mng(unsigned int ucc_num)
return -EINVAL;
spin_lock_irqsave(&cmxgcr_lock, flags);
- clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG,
- ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT);
+ qe_clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG,
+ ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT);
spin_unlock_irqrestore(&cmxgcr_lock, flags);
return 0;
@@ -80,8 +79,8 @@ int ucc_set_type(unsigned int ucc_num, enum ucc_speed_type speed)
return -EINVAL;
}
- clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK,
- UCC_GUEMR_SET_RESERVED3 | speed);
+ qe_clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK,
+ UCC_GUEMR_SET_RESERVED3 | speed);
return 0;
}
@@ -109,9 +108,9 @@ int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask)
get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift);
if (set)
- setbits32(cmxucr, mask << shift);
+ qe_setbits_be32(cmxucr, mask << shift);
else
- clrbits32(cmxucr, mask << shift);
+ qe_clrbits_be32(cmxucr, mask << shift);
return 0;
}
@@ -207,8 +206,8 @@ int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock,
if (mode == COMM_DIR_RX)
shift += 4;
- clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
- clock_bits << shift);
+ qe_clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
+ clock_bits << shift);
return 0;
}
@@ -540,8 +539,8 @@ int ucc_set_tdm_rxtx_clk(u32 tdm_num, enum qe_clock clock,
cmxs1cr = (tdm_num < 4) ? &qe_mux_reg->cmxsi1cr_l :
&qe_mux_reg->cmxsi1cr_h;
- qe_clrsetbits32(cmxs1cr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
- clock_bits << shift);
+ qe_clrsetbits_be32(cmxs1cr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
+ clock_bits << shift);
return 0;
}
@@ -650,9 +649,9 @@ int ucc_set_tdm_rxtx_sync(u32 tdm_num, enum qe_clock clock,
shift = ucc_get_tdm_sync_shift(mode, tdm_num);
- qe_clrsetbits32(&qe_mux_reg->cmxsi1syr,
- QE_CMXUCR_TX_CLK_SRC_MASK << shift,
- source << shift);
+ qe_clrsetbits_be32(&qe_mux_reg->cmxsi1syr,
+ QE_CMXUCR_TX_CLK_SRC_MASK << shift,
+ source << shift);
return 0;
}
diff --git a/drivers/soc/fsl/qe/ucc_fast.c b/drivers/soc/fsl/qe/ucc_fast.c
index af4d80e38521..ad6193ea4597 100644
--- a/drivers/soc/fsl/qe/ucc_fast.c
+++ b/drivers/soc/fsl/qe/ucc_fast.c
@@ -29,41 +29,42 @@ void ucc_fast_dump_regs(struct ucc_fast_private * uccf)
printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs);
printk(KERN_INFO "gumr : addr=0x%p, val=0x%08x\n",
- &uccf->uf_regs->gumr, in_be32(&uccf->uf_regs->gumr));
+ &uccf->uf_regs->gumr, qe_ioread32be(&uccf->uf_regs->gumr));
printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n",
- &uccf->uf_regs->upsmr, in_be32(&uccf->uf_regs->upsmr));
+ &uccf->uf_regs->upsmr, qe_ioread32be(&uccf->uf_regs->upsmr));
printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n",
- &uccf->uf_regs->utodr, in_be16(&uccf->uf_regs->utodr));
+ &uccf->uf_regs->utodr, qe_ioread16be(&uccf->uf_regs->utodr));
printk(KERN_INFO "udsr : addr=0x%p, val=0x%04x\n",
- &uccf->uf_regs->udsr, in_be16(&uccf->uf_regs->udsr));
+ &uccf->uf_regs->udsr, qe_ioread16be(&uccf->uf_regs->udsr));
printk(KERN_INFO "ucce : addr=0x%p, val=0x%08x\n",
- &uccf->uf_regs->ucce, in_be32(&uccf->uf_regs->ucce));
+ &uccf->uf_regs->ucce, qe_ioread32be(&uccf->uf_regs->ucce));
printk(KERN_INFO "uccm : addr=0x%p, val=0x%08x\n",
- &uccf->uf_regs->uccm, in_be32(&uccf->uf_regs->uccm));
+ &uccf->uf_regs->uccm, qe_ioread32be(&uccf->uf_regs->uccm));
printk(KERN_INFO "uccs : addr=0x%p, val=0x%02x\n",
- &uccf->uf_regs->uccs, in_8(&uccf->uf_regs->uccs));
+ &uccf->uf_regs->uccs, qe_ioread8(&uccf->uf_regs->uccs));
printk(KERN_INFO "urfb : addr=0x%p, val=0x%08x\n",
- &uccf->uf_regs->urfb, in_be32(&uccf->uf_regs->urfb));
+ &uccf->uf_regs->urfb, qe_ioread32be(&uccf->uf_regs->urfb));
printk(KERN_INFO "urfs : addr=0x%p, val=0x%04x\n",
- &uccf->uf_regs->urfs, in_be16(&uccf->uf_regs->urfs));
+ &uccf->uf_regs->urfs, qe_ioread16be(&uccf->uf_regs->urfs));
printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n",
- &uccf->uf_regs->urfet, in_be16(&uccf->uf_regs->urfet));
+ &uccf->uf_regs->urfet, qe_ioread16be(&uccf->uf_regs->urfet));
printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n",
- &uccf->uf_regs->urfset, in_be16(&uccf->uf_regs->urfset));
+ &uccf->uf_regs->urfset,
+ qe_ioread16be(&uccf->uf_regs->urfset));
printk(KERN_INFO "utfb : addr=0x%p, val=0x%08x\n",
- &uccf->uf_regs->utfb, in_be32(&uccf->uf_regs->utfb));
+ &uccf->uf_regs->utfb, qe_ioread32be(&uccf->uf_regs->utfb));
printk(KERN_INFO "utfs : addr=0x%p, val=0x%04x\n",
- &uccf->uf_regs->utfs, in_be16(&uccf->uf_regs->utfs));
+ &uccf->uf_regs->utfs, qe_ioread16be(&uccf->uf_regs->utfs));
printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n",
- &uccf->uf_regs->utfet, in_be16(&uccf->uf_regs->utfet));
+ &uccf->uf_regs->utfet, qe_ioread16be(&uccf->uf_regs->utfet));
printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n",
- &uccf->uf_regs->utftt, in_be16(&uccf->uf_regs->utftt));
+ &uccf->uf_regs->utftt, qe_ioread16be(&uccf->uf_regs->utftt));
printk(KERN_INFO "utpt : addr=0x%p, val=0x%04x\n",
- &uccf->uf_regs->utpt, in_be16(&uccf->uf_regs->utpt));
+ &uccf->uf_regs->utpt, qe_ioread16be(&uccf->uf_regs->utpt));
printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n",
- &uccf->uf_regs->urtry, in_be32(&uccf->uf_regs->urtry));
+ &uccf->uf_regs->urtry, qe_ioread32be(&uccf->uf_regs->urtry));
printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n",
- &uccf->uf_regs->guemr, in_8(&uccf->uf_regs->guemr));
+ &uccf->uf_regs->guemr, qe_ioread8(&uccf->uf_regs->guemr));
}
EXPORT_SYMBOL(ucc_fast_dump_regs);
@@ -85,7 +86,7 @@ EXPORT_SYMBOL(ucc_fast_get_qe_cr_subblock);
void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf)
{
- out_be16(&uccf->uf_regs->utodr, UCC_FAST_TOD);
+ qe_iowrite16be(UCC_FAST_TOD, &uccf->uf_regs->utodr);
}
EXPORT_SYMBOL(ucc_fast_transmit_on_demand);
@@ -97,7 +98,7 @@ void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode)
uf_regs = uccf->uf_regs;
/* Enable reception and/or transmission on this UCC. */
- gumr = in_be32(&uf_regs->gumr);
+ gumr = qe_ioread32be(&uf_regs->gumr);
if (mode & COMM_DIR_TX) {
gumr |= UCC_FAST_GUMR_ENT;
uccf->enabled_tx = 1;
@@ -106,7 +107,7 @@ void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode)
gumr |= UCC_FAST_GUMR_ENR;
uccf->enabled_rx = 1;
}
- out_be32(&uf_regs->gumr, gumr);
+ qe_iowrite32be(gumr, &uf_regs->gumr);
}
EXPORT_SYMBOL(ucc_fast_enable);
@@ -118,7 +119,7 @@ void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode)
uf_regs = uccf->uf_regs;
/* Disable reception and/or transmission on this UCC. */
- gumr = in_be32(&uf_regs->gumr);
+ gumr = qe_ioread32be(&uf_regs->gumr);
if (mode & COMM_DIR_TX) {
gumr &= ~UCC_FAST_GUMR_ENT;
uccf->enabled_tx = 0;
@@ -127,7 +128,7 @@ void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode)
gumr &= ~UCC_FAST_GUMR_ENR;
uccf->enabled_rx = 0;
}
- out_be32(&uf_regs->gumr, gumr);
+ qe_iowrite32be(gumr, &uf_regs->gumr);
}
EXPORT_SYMBOL(ucc_fast_disable);
@@ -196,6 +197,8 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
__func__);
return -ENOMEM;
}
+ uccf->ucc_fast_tx_virtual_fifo_base_offset = -1;
+ uccf->ucc_fast_rx_virtual_fifo_base_offset = -1;
/* Fill fast UCC structure */
uccf->uf_info = uf_info;
@@ -259,15 +262,14 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
gumr |= uf_info->tenc;
gumr |= uf_info->tcrc;
gumr |= uf_info->mode;
- out_be32(&uf_regs->gumr, gumr);
+ qe_iowrite32be(gumr, &uf_regs->gumr);
/* Allocate memory for Tx Virtual Fifo */
uccf->ucc_fast_tx_virtual_fifo_base_offset =
qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
- if (IS_ERR_VALUE(uccf->ucc_fast_tx_virtual_fifo_base_offset)) {
+ if (uccf->ucc_fast_tx_virtual_fifo_base_offset < 0) {
printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO\n",
__func__);
- uccf->ucc_fast_tx_virtual_fifo_base_offset = 0;
ucc_fast_free(uccf);
return -ENOMEM;
}
@@ -277,24 +279,25 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
qe_muram_alloc(uf_info->urfs +
UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR,
UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
- if (IS_ERR_VALUE(uccf->ucc_fast_rx_virtual_fifo_base_offset)) {
+ if (uccf->ucc_fast_rx_virtual_fifo_base_offset < 0) {
printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO\n",
__func__);
- uccf->ucc_fast_rx_virtual_fifo_base_offset = 0;
ucc_fast_free(uccf);
return -ENOMEM;
}
/* Set Virtual Fifo registers */
- out_be16(&uf_regs->urfs, uf_info->urfs);
- out_be16(&uf_regs->urfet, uf_info->urfet);
- out_be16(&uf_regs->urfset, uf_info->urfset);
- out_be16(&uf_regs->utfs, uf_info->utfs);
- out_be16(&uf_regs->utfet, uf_info->utfet);
- out_be16(&uf_regs->utftt, uf_info->utftt);
+ qe_iowrite16be(uf_info->urfs, &uf_regs->urfs);
+ qe_iowrite16be(uf_info->urfet, &uf_regs->urfet);
+ qe_iowrite16be(uf_info->urfset, &uf_regs->urfset);
+ qe_iowrite16be(uf_info->utfs, &uf_regs->utfs);
+ qe_iowrite16be(uf_info->utfet, &uf_regs->utfet);
+ qe_iowrite16be(uf_info->utftt, &uf_regs->utftt);
/* utfb, urfb are offsets from MURAM base */
- out_be32(&uf_regs->utfb, uccf->ucc_fast_tx_virtual_fifo_base_offset);
- out_be32(&uf_regs->urfb, uccf->ucc_fast_rx_virtual_fifo_base_offset);
+ qe_iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset,
+ &uf_regs->utfb);
+ qe_iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset,
+ &uf_regs->urfb);
/* Mux clocking */
/* Grant Support */
@@ -362,14 +365,14 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
}
/* Set interrupt mask register at UCC level. */
- out_be32(&uf_regs->uccm, uf_info->uccm_mask);
+ qe_iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
/* First, clear anything pending at UCC level,
* otherwise, old garbage may come through
* as soon as the dam is opened. */
/* Writing '1' clears */
- out_be32(&uf_regs->ucce, 0xffffffff);
+ qe_iowrite32be(0xffffffff, &uf_regs->ucce);
*uccf_ret = uccf;
return 0;
@@ -381,11 +384,8 @@ void ucc_fast_free(struct ucc_fast_private * uccf)
if (!uccf)
return;
- if (uccf->ucc_fast_tx_virtual_fifo_base_offset)
- qe_muram_free(uccf->ucc_fast_tx_virtual_fifo_base_offset);
-
- if (uccf->ucc_fast_rx_virtual_fifo_base_offset)
- qe_muram_free(uccf->ucc_fast_rx_virtual_fifo_base_offset);
+ qe_muram_free(uccf->ucc_fast_tx_virtual_fifo_base_offset);
+ qe_muram_free(uccf->ucc_fast_rx_virtual_fifo_base_offset);
if (uccf->uf_regs)
iounmap(uccf->uf_regs);
diff --git a/drivers/soc/fsl/qe/ucc_slow.c b/drivers/soc/fsl/qe/ucc_slow.c
index 34f0ec3a63b5..274d34449846 100644
--- a/drivers/soc/fsl/qe/ucc_slow.c
+++ b/drivers/soc/fsl/qe/ucc_slow.c
@@ -78,7 +78,7 @@ void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode)
us_regs = uccs->us_regs;
/* Enable reception and/or transmission on this UCC. */
- gumr_l = in_be32(&us_regs->gumr_l);
+ gumr_l = qe_ioread32be(&us_regs->gumr_l);
if (mode & COMM_DIR_TX) {
gumr_l |= UCC_SLOW_GUMR_L_ENT;
uccs->enabled_tx = 1;
@@ -87,7 +87,7 @@ void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode)
gumr_l |= UCC_SLOW_GUMR_L_ENR;
uccs->enabled_rx = 1;
}
- out_be32(&us_regs->gumr_l, gumr_l);
+ qe_iowrite32be(gumr_l, &us_regs->gumr_l);
}
EXPORT_SYMBOL(ucc_slow_enable);
@@ -99,7 +99,7 @@ void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
us_regs = uccs->us_regs;
/* Disable reception and/or transmission on this UCC. */
- gumr_l = in_be32(&us_regs->gumr_l);
+ gumr_l = qe_ioread32be(&us_regs->gumr_l);
if (mode & COMM_DIR_TX) {
gumr_l &= ~UCC_SLOW_GUMR_L_ENT;
uccs->enabled_tx = 0;
@@ -108,7 +108,7 @@ void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
gumr_l &= ~UCC_SLOW_GUMR_L_ENR;
uccs->enabled_rx = 0;
}
- out_be32(&us_regs->gumr_l, gumr_l);
+ qe_iowrite32be(gumr_l, &us_regs->gumr_l);
}
EXPORT_SYMBOL(ucc_slow_disable);
@@ -154,6 +154,9 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
__func__);
return -ENOMEM;
}
+ uccs->rx_base_offset = -1;
+ uccs->tx_base_offset = -1;
+ uccs->us_pram_offset = -1;
/* Fill slow UCC structure */
uccs->us_info = us_info;
@@ -179,7 +182,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
/* Get PRAM base */
uccs->us_pram_offset =
qe_muram_alloc(UCC_SLOW_PRAM_SIZE, ALIGNMENT_OF_UCC_SLOW_PRAM);
- if (IS_ERR_VALUE(uccs->us_pram_offset)) {
+ if (uccs->us_pram_offset < 0) {
printk(KERN_ERR "%s: cannot allocate MURAM for PRAM", __func__);
ucc_slow_free(uccs);
return -ENOMEM;
@@ -198,7 +201,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
return ret;
}
- out_be16(&uccs->us_pram->mrblr, us_info->max_rx_buf_length);
+ qe_iowrite16be(us_info->max_rx_buf_length, &uccs->us_pram->mrblr);
INIT_LIST_HEAD(&uccs->confQ);
@@ -206,10 +209,9 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
uccs->rx_base_offset =
qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd),
QE_ALIGNMENT_OF_BD);
- if (IS_ERR_VALUE(uccs->rx_base_offset)) {
+ if (uccs->rx_base_offset < 0) {
printk(KERN_ERR "%s: cannot allocate %u RX BDs\n", __func__,
us_info->rx_bd_ring_len);
- uccs->rx_base_offset = 0;
ucc_slow_free(uccs);
return -ENOMEM;
}
@@ -217,9 +219,8 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
uccs->tx_base_offset =
qe_muram_alloc(us_info->tx_bd_ring_len * sizeof(struct qe_bd),
QE_ALIGNMENT_OF_BD);
- if (IS_ERR_VALUE(uccs->tx_base_offset)) {
+ if (uccs->tx_base_offset < 0) {
printk(KERN_ERR "%s: cannot allocate TX BDs", __func__);
- uccs->tx_base_offset = 0;
ucc_slow_free(uccs);
return -ENOMEM;
}
@@ -228,27 +229,27 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(uccs->tx_base_offset);
for (i = 0; i < us_info->tx_bd_ring_len - 1; i++) {
/* clear bd buffer */
- out_be32(&bd->buf, 0);
+ qe_iowrite32be(0, &bd->buf);
/* set bd status and length */
- out_be32((u32 *) bd, 0);
+ qe_iowrite32be(0, (u32 *)bd);
bd++;
}
/* for last BD set Wrap bit */
- out_be32(&bd->buf, 0);
- out_be32((u32 *) bd, cpu_to_be32(T_W));
+ qe_iowrite32be(0, &bd->buf);
+ qe_iowrite32be(cpu_to_be32(T_W), (u32 *)bd);
/* Init Rx bds */
bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset);
for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) {
/* set bd status and length */
- out_be32((u32*)bd, 0);
+ qe_iowrite32be(0, (u32 *)bd);
/* clear bd buffer */
- out_be32(&bd->buf, 0);
+ qe_iowrite32be(0, &bd->buf);
bd++;
}
/* for last BD set Wrap bit */
- out_be32((u32*)bd, cpu_to_be32(R_W));
- out_be32(&bd->buf, 0);
+ qe_iowrite32be(cpu_to_be32(R_W), (u32 *)bd);
+ qe_iowrite32be(0, &bd->buf);
/* Set GUMR (For more details see the hardware spec.). */
/* gumr_h */
@@ -269,7 +270,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
gumr |= UCC_SLOW_GUMR_H_TXSY;
if (us_info->rtsm)
gumr |= UCC_SLOW_GUMR_H_RTSM;
- out_be32(&us_regs->gumr_h, gumr);
+ qe_iowrite32be(gumr, &us_regs->gumr_h);
/* gumr_l */
gumr = us_info->tdcr | us_info->rdcr | us_info->tenc | us_info->renc |
@@ -282,7 +283,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
gumr |= UCC_SLOW_GUMR_L_TINV;
if (us_info->tend)
gumr |= UCC_SLOW_GUMR_L_TEND;
- out_be32(&us_regs->gumr_l, gumr);
+ qe_iowrite32be(gumr, &us_regs->gumr_l);
/* Function code registers */
@@ -292,8 +293,8 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
uccs->us_pram->rbmr = UCC_BMR_BO_BE;
/* rbase, tbase are offsets from MURAM base */
- out_be16(&uccs->us_pram->rbase, uccs->rx_base_offset);
- out_be16(&uccs->us_pram->tbase, uccs->tx_base_offset);
+ qe_iowrite16be(uccs->rx_base_offset, &uccs->us_pram->rbase);
+ qe_iowrite16be(uccs->tx_base_offset, &uccs->us_pram->tbase);
/* Mux clocking */
/* Grant Support */
@@ -323,14 +324,14 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
}
/* Set interrupt mask register at UCC level. */
- out_be16(&us_regs->uccm, us_info->uccm_mask);
+ qe_iowrite16be(us_info->uccm_mask, &us_regs->uccm);
/* First, clear anything pending at UCC level,
* otherwise, old garbage may come through
* as soon as the dam is opened. */
/* Writing '1' clears */
- out_be16(&us_regs->ucce, 0xffff);
+ qe_iowrite16be(0xffff, &us_regs->ucce);
/* Issue QE Init command */
if (us_info->init_tx && us_info->init_rx)
@@ -352,14 +353,9 @@ void ucc_slow_free(struct ucc_slow_private * uccs)
if (!uccs)
return;
- if (uccs->rx_base_offset)
- qe_muram_free(uccs->rx_base_offset);
-
- if (uccs->tx_base_offset)
- qe_muram_free(uccs->tx_base_offset);
-
- if (uccs->us_pram)
- qe_muram_free(uccs->us_pram_offset);
+ qe_muram_free(uccs->rx_base_offset);
+ qe_muram_free(uccs->tx_base_offset);
+ qe_muram_free(uccs->us_pram_offset);
if (uccs->us_regs)
iounmap(uccs->us_regs);
diff --git a/drivers/soc/fsl/qe/usb.c b/drivers/soc/fsl/qe/usb.c
index 32d8269fa692..890f236ea697 100644
--- a/drivers/soc/fsl/qe/usb.c
+++ b/drivers/soc/fsl/qe/usb.c
@@ -43,7 +43,7 @@ int qe_usb_clock_set(enum qe_clock clk, int rate)
spin_lock_irqsave(&cmxgcr_lock, flags);
- clrsetbits_be32(&mux->cmxgcr, QE_CMXGCR_USBCS, val);
+ qe_clrsetbits_be32(&mux->cmxgcr, QE_CMXGCR_USBCS, val);
spin_unlock_irqrestore(&cmxgcr_lock, flags);
diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig
index 8aaebf13e2e6..0281ef9a1800 100644
--- a/drivers/soc/imx/Kconfig
+++ b/drivers/soc/imx/Kconfig
@@ -10,7 +10,7 @@ config IMX_GPCV2_PM_DOMAINS
config IMX_SCU_SOC
bool "i.MX System Controller Unit SoC info support"
- depends on IMX_SCU
+ depends on IMX_SCU || COMPILE_TEST
select SOC_BUS
help
If you say yes here you get support for the NXP i.MX System
diff --git a/drivers/soc/imx/soc-imx8.c b/drivers/soc/imx/soc-imx8.c
index d84ed736cdb0..719e1f189ebf 100644
--- a/drivers/soc/imx/soc-imx8.c
+++ b/drivers/soc/imx/soc-imx8.c
@@ -142,10 +142,16 @@ static const struct imx8_soc_data imx8mn_soc_data = {
.soc_revision = imx8mm_soc_revision,
};
+static const struct imx8_soc_data imx8mp_soc_data = {
+ .name = "i.MX8MP",
+ .soc_revision = imx8mm_soc_revision,
+};
+
static const struct of_device_id imx8_soc_match[] = {
{ .compatible = "fsl,imx8mq", .data = &imx8mq_soc_data, },
{ .compatible = "fsl,imx8mm", .data = &imx8mm_soc_data, },
{ .compatible = "fsl,imx8mn", .data = &imx8mn_soc_data, },
+ { .compatible = "fsl,imx8mp", .data = &imx8mp_soc_data, },
{ }
};
@@ -204,6 +210,9 @@ static int __init imx8_soc_init(void)
goto free_serial_number;
}
+ pr_info("SoC: %s revision %s\n", soc_dev_attr->soc_id,
+ soc_dev_attr->revision);
+
if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT))
platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0);
diff --git a/drivers/soc/lantiq/fpi-bus.c b/drivers/soc/lantiq/fpi-bus.c
index cb0303a0fe60..dff1375851cf 100644
--- a/drivers/soc/lantiq/fpi-bus.c
+++ b/drivers/soc/lantiq/fpi-bus.c
@@ -28,14 +28,12 @@ static int ltq_fpi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- struct resource *res_xbar;
struct regmap *rcu_regmap;
void __iomem *xbar_membase;
u32 rcu_ahb_endianness_reg_offset;
int ret;
- res_xbar = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- xbar_membase = devm_ioremap_resource(dev, res_xbar);
+ xbar_membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xbar_membase))
return PTR_ERR(xbar_membase);
diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
index 3c82de5f9417..de20e6cba83b 100644
--- a/drivers/soc/mediatek/mtk-cmdq-helper.c
+++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
@@ -9,11 +9,51 @@
#include <linux/mailbox_controller.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
-#define CMDQ_ARG_A_WRITE_MASK 0xffff
#define CMDQ_WRITE_ENABLE_MASK BIT(0)
+#define CMDQ_POLL_ENABLE_MASK BIT(0)
#define CMDQ_EOC_IRQ_EN BIT(0)
-#define CMDQ_EOC_CMD ((u64)((CMDQ_CODE_EOC << CMDQ_OP_CODE_SHIFT)) \
- << 32 | CMDQ_EOC_IRQ_EN)
+
+struct cmdq_instruction {
+ union {
+ u32 value;
+ u32 mask;
+ };
+ union {
+ u16 offset;
+ u16 event;
+ };
+ u8 subsys;
+ u8 op;
+};
+
+int cmdq_dev_get_client_reg(struct device *dev,
+ struct cmdq_client_reg *client_reg, int idx)
+{
+ struct of_phandle_args spec;
+ int err;
+
+ if (!client_reg)
+ return -ENOENT;
+
+ err = of_parse_phandle_with_fixed_args(dev->of_node,
+ "mediatek,gce-client-reg",
+ 3, idx, &spec);
+ if (err < 0) {
+ dev_err(dev,
+ "error %d can't parse gce-client-reg property (%d)",
+ err, idx);
+
+ return err;
+ }
+
+ client_reg->subsys = (u8)spec.args[0];
+ client_reg->offset = (u16)spec.args[1];
+ client_reg->size = (u16)spec.args[2];
+ of_node_put(spec.np);
+
+ return 0;
+}
+EXPORT_SYMBOL(cmdq_dev_get_client_reg);
static void cmdq_client_timeout(struct timer_list *t)
{
@@ -110,10 +150,10 @@ void cmdq_pkt_destroy(struct cmdq_pkt *pkt)
}
EXPORT_SYMBOL(cmdq_pkt_destroy);
-static int cmdq_pkt_append_command(struct cmdq_pkt *pkt, enum cmdq_code code,
- u32 arg_a, u32 arg_b)
+static int cmdq_pkt_append_command(struct cmdq_pkt *pkt,
+ struct cmdq_instruction inst)
{
- u64 *cmd_ptr;
+ struct cmdq_instruction *cmd_ptr;
if (unlikely(pkt->cmd_buf_size + CMDQ_INST_SIZE > pkt->buf_size)) {
/*
@@ -129,8 +169,9 @@ static int cmdq_pkt_append_command(struct cmdq_pkt *pkt, enum cmdq_code code,
__func__, (u32)pkt->buf_size);
return -ENOMEM;
}
+
cmd_ptr = pkt->va_base + pkt->cmd_buf_size;
- (*cmd_ptr) = (u64)((code << CMDQ_OP_CODE_SHIFT) | arg_a) << 32 | arg_b;
+ *cmd_ptr = inst;
pkt->cmd_buf_size += CMDQ_INST_SIZE;
return 0;
@@ -138,24 +179,34 @@ static int cmdq_pkt_append_command(struct cmdq_pkt *pkt, enum cmdq_code code,
int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value)
{
- u32 arg_a = (offset & CMDQ_ARG_A_WRITE_MASK) |
- (subsys << CMDQ_SUBSYS_SHIFT);
+ struct cmdq_instruction inst;
- return cmdq_pkt_append_command(pkt, CMDQ_CODE_WRITE, arg_a, value);
+ inst.op = CMDQ_CODE_WRITE;
+ inst.value = value;
+ inst.offset = offset;
+ inst.subsys = subsys;
+
+ return cmdq_pkt_append_command(pkt, inst);
}
EXPORT_SYMBOL(cmdq_pkt_write);
int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
u16 offset, u32 value, u32 mask)
{
- u32 offset_mask = offset;
- int err = 0;
+ struct cmdq_instruction inst = { {0} };
+ u16 offset_mask = offset;
+ int err;
if (mask != 0xffffffff) {
- err = cmdq_pkt_append_command(pkt, CMDQ_CODE_MASK, 0, ~mask);
+ inst.op = CMDQ_CODE_MASK;
+ inst.mask = ~mask;
+ err = cmdq_pkt_append_command(pkt, inst);
+ if (err < 0)
+ return err;
+
offset_mask |= CMDQ_WRITE_ENABLE_MASK;
}
- err |= cmdq_pkt_write(pkt, subsys, offset_mask, value);
+ err = cmdq_pkt_write(pkt, subsys, offset_mask, value);
return err;
}
@@ -163,43 +214,85 @@ EXPORT_SYMBOL(cmdq_pkt_write_mask);
int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event)
{
- u32 arg_b;
+ struct cmdq_instruction inst = { {0} };
if (event >= CMDQ_MAX_EVENT)
return -EINVAL;
- /*
- * WFE arg_b
- * bit 0-11: wait value
- * bit 15: 1 - wait, 0 - no wait
- * bit 16-27: update value
- * bit 31: 1 - update, 0 - no update
- */
- arg_b = CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE;
+ inst.op = CMDQ_CODE_WFE;
+ inst.value = CMDQ_WFE_OPTION;
+ inst.event = event;
- return cmdq_pkt_append_command(pkt, CMDQ_CODE_WFE, event, arg_b);
+ return cmdq_pkt_append_command(pkt, inst);
}
EXPORT_SYMBOL(cmdq_pkt_wfe);
int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event)
{
+ struct cmdq_instruction inst = { {0} };
+
if (event >= CMDQ_MAX_EVENT)
return -EINVAL;
- return cmdq_pkt_append_command(pkt, CMDQ_CODE_WFE, event,
- CMDQ_WFE_UPDATE);
+ inst.op = CMDQ_CODE_WFE;
+ inst.value = CMDQ_WFE_UPDATE;
+ inst.event = event;
+
+ return cmdq_pkt_append_command(pkt, inst);
}
EXPORT_SYMBOL(cmdq_pkt_clear_event);
+int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
+ u16 offset, u32 value)
+{
+ struct cmdq_instruction inst = { {0} };
+ int err;
+
+ inst.op = CMDQ_CODE_POLL;
+ inst.value = value;
+ inst.offset = offset;
+ inst.subsys = subsys;
+ err = cmdq_pkt_append_command(pkt, inst);
+
+ return err;
+}
+EXPORT_SYMBOL(cmdq_pkt_poll);
+
+int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
+ u16 offset, u32 value, u32 mask)
+{
+ struct cmdq_instruction inst = { {0} };
+ int err;
+
+ inst.op = CMDQ_CODE_MASK;
+ inst.mask = ~mask;
+ err = cmdq_pkt_append_command(pkt, inst);
+ if (err < 0)
+ return err;
+
+ offset = offset | CMDQ_POLL_ENABLE_MASK;
+ err = cmdq_pkt_poll(pkt, subsys, offset, value);
+
+ return err;
+}
+EXPORT_SYMBOL(cmdq_pkt_poll_mask);
+
static int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
{
+ struct cmdq_instruction inst = { {0} };
int err;
/* insert EOC and generate IRQ for each command iteration */
- err = cmdq_pkt_append_command(pkt, CMDQ_CODE_EOC, 0, CMDQ_EOC_IRQ_EN);
+ inst.op = CMDQ_CODE_EOC;
+ inst.value = CMDQ_EOC_IRQ_EN;
+ err = cmdq_pkt_append_command(pkt, inst);
+ if (err < 0)
+ return err;
/* JUMP to end */
- err |= cmdq_pkt_append_command(pkt, CMDQ_CODE_JUMP, 0, CMDQ_JUMP_PASS);
+ inst.op = CMDQ_CODE_JUMP;
+ inst.value = CMDQ_JUMP_PASS;
+ err = cmdq_pkt_append_command(pkt, inst);
return err;
}
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 79d826553ac8..d0a73e76d563 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -45,13 +45,13 @@ config QCOM_GLINK_SSR
neighboring subsystems going up or down.
config QCOM_GSBI
- tristate "QCOM General Serial Bus Interface"
- depends on ARCH_QCOM || COMPILE_TEST
- select MFD_SYSCON
- help
- Say y here to enable GSBI support. The GSBI provides control
- functions for connecting the underlying serial UART, SPI, and I2C
- devices to the output pins.
+ tristate "QCOM General Serial Bus Interface"
+ depends on ARCH_QCOM || COMPILE_TEST
+ select MFD_SYSCON
+ help
+ Say y here to enable GSBI support. The GSBI provides control
+ functions for connecting the underlying serial UART, SPI, and I2C
+ devices to the output pins.
config QCOM_LLCC
tristate "Qualcomm Technologies, Inc. LLCC driver"
@@ -71,10 +71,10 @@ config QCOM_OCMEM
depends on ARCH_QCOM
select QCOM_SCM
help
- The On Chip Memory (OCMEM) allocator allows various clients to
- allocate memory from OCMEM based on performance, latency and power
- requirements. This is typically used by the GPU, camera/video, and
- audio components on some Snapdragon SoCs.
+ The On Chip Memory (OCMEM) allocator allows various clients to
+ allocate memory from OCMEM based on performance, latency and power
+ requirements. This is typically used by the GPU, camera/video, and
+ audio components on some Snapdragon SoCs.
config QCOM_PM
bool "Qualcomm Power Management"
@@ -198,8 +198,8 @@ config QCOM_APR
depends on ARCH_QCOM || COMPILE_TEST
depends on RPMSG
help
- Enable APR IPC protocol support between
- application processor and QDSP6. APR is
- used by audio driver to configure QDSP6
- ASM, ADM and AFE modules.
+ Enable APR IPC protocol support between
+ application processor and QDSP6. APR is
+ used by audio driver to configure QDSP6
+ ASM, ADM and AFE modules.
endmenu
diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c
index f9e309f0acd3..1a03eaa38c46 100644
--- a/drivers/soc/qcom/qmi_interface.c
+++ b/drivers/soc/qcom/qmi_interface.c
@@ -655,8 +655,12 @@ int qmi_handle_init(struct qmi_handle *qmi, size_t recv_buf_size,
qmi->sock = qmi_sock_create(qmi, &qmi->sq);
if (IS_ERR(qmi->sock)) {
- pr_err("failed to create QMI socket\n");
- ret = PTR_ERR(qmi->sock);
+ if (PTR_ERR(qmi->sock) == -EAFNOSUPPORT) {
+ ret = -EPROBE_DEFER;
+ } else {
+ pr_err("failed to create QMI socket\n");
+ ret = PTR_ERR(qmi->sock);
+ }
goto err_destroy_wq;
}
diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
index 5741ec3fa814..4d264d0672c4 100644
--- a/drivers/soc/qcom/rpmhpd.c
+++ b/drivers/soc/qcom/rpmhpd.c
@@ -93,6 +93,7 @@ static struct rpmhpd sdm845_mx = {
static struct rpmhpd sdm845_mx_ao = {
.pd = { .name = "mx_ao", },
+ .active_only = true,
.peer = &sdm845_mx,
.res_name = "mx.lvl",
};
@@ -107,6 +108,7 @@ static struct rpmhpd sdm845_cx = {
static struct rpmhpd sdm845_cx_ao = {
.pd = { .name = "cx_ao", },
+ .active_only = true,
.peer = &sdm845_cx,
.parent = &sdm845_mx_ao.pd,
.res_name = "cx.lvl",
@@ -129,8 +131,62 @@ static const struct rpmhpd_desc sdm845_desc = {
.num_pds = ARRAY_SIZE(sdm845_rpmhpds),
};
+/* SM8150 RPMH powerdomains */
+
+static struct rpmhpd sm8150_mmcx_ao;
+static struct rpmhpd sm8150_mmcx = {
+ .pd = { .name = "mmcx", },
+ .peer = &sm8150_mmcx_ao,
+ .res_name = "mmcx.lvl",
+};
+
+static struct rpmhpd sm8150_mmcx_ao = {
+ .pd = { .name = "mmcx_ao", },
+ .active_only = true,
+ .peer = &sm8150_mmcx,
+ .res_name = "mmcx.lvl",
+};
+
+static struct rpmhpd *sm8150_rpmhpds[] = {
+ [SM8150_MSS] = &sdm845_mss,
+ [SM8150_EBI] = &sdm845_ebi,
+ [SM8150_LMX] = &sdm845_lmx,
+ [SM8150_LCX] = &sdm845_lcx,
+ [SM8150_GFX] = &sdm845_gfx,
+ [SM8150_MX] = &sdm845_mx,
+ [SM8150_MX_AO] = &sdm845_mx_ao,
+ [SM8150_CX] = &sdm845_cx,
+ [SM8150_CX_AO] = &sdm845_cx_ao,
+ [SM8150_MMCX] = &sm8150_mmcx,
+ [SM8150_MMCX_AO] = &sm8150_mmcx_ao,
+};
+
+static const struct rpmhpd_desc sm8150_desc = {
+ .rpmhpds = sm8150_rpmhpds,
+ .num_pds = ARRAY_SIZE(sm8150_rpmhpds),
+};
+
+/* SC7180 RPMH powerdomains */
+static struct rpmhpd *sc7180_rpmhpds[] = {
+ [SC7180_CX] = &sdm845_cx,
+ [SC7180_CX_AO] = &sdm845_cx_ao,
+ [SC7180_GFX] = &sdm845_gfx,
+ [SC7180_MX] = &sdm845_mx,
+ [SC7180_MX_AO] = &sdm845_mx_ao,
+ [SC7180_LMX] = &sdm845_lmx,
+ [SC7180_LCX] = &sdm845_lcx,
+ [SC7180_MSS] = &sdm845_mss,
+};
+
+static const struct rpmhpd_desc sc7180_desc = {
+ .rpmhpds = sc7180_rpmhpds,
+ .num_pds = ARRAY_SIZE(sc7180_rpmhpds),
+};
+
static const struct of_device_id rpmhpd_match_table[] = {
+ { .compatible = "qcom,sc7180-rpmhpd", .data = &sc7180_desc },
{ .compatible = "qcom,sdm845-rpmhpd", .data = &sdm845_desc },
+ { .compatible = "qcom,sm8150-rpmhpd", .data = &sm8150_desc },
{ }
};
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index f93492b72c04..ba2b8b51d2d9 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -192,21 +192,25 @@ config ARCH_R8A774C0
help
This enables support for the Renesas RZ/G2E SoC.
+config ARCH_R8A77950
+ bool
+
+config ARCH_R8A77951
+ bool
+
config ARCH_R8A7795
bool "Renesas R-Car H3 SoC Platform"
+ select ARCH_R8A77950
+ select ARCH_R8A77951
select ARCH_RCAR_GEN3
select SYSC_R8A7795
help
This enables support for the Renesas R-Car H3 SoC.
config ARCH_R8A77960
- bool
+ bool "Renesas R-Car M3-W SoC Platform"
select ARCH_RCAR_GEN3
select SYSC_R8A77960
-
-config ARCH_R8A7796
- bool "Renesas R-Car M3-W SoC Platform"
- select ARCH_R8A77960
help
This enables support for the Renesas R-Car M3-W SoC.
diff --git a/drivers/soc/renesas/rcar-rst.c b/drivers/soc/renesas/rcar-rst.c
index 14d05a070dd3..2af2e0dd83fe 100644
--- a/drivers/soc/renesas/rcar-rst.c
+++ b/drivers/soc/renesas/rcar-rst.c
@@ -21,7 +21,7 @@ static int rcar_rst_enable_wdt_reset(void __iomem *base)
struct rst_config {
unsigned int modemr; /* Mode Monitoring Register Offset */
- int (*configure)(void *base); /* Platform specific configuration */
+ int (*configure)(void __iomem *base); /* Platform specific config */
};
static const struct rst_config rcar_rst_gen1 __initconst = {
diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig
index 27fc59bbb520..c7a2003687c7 100644
--- a/drivers/soc/samsung/Kconfig
+++ b/drivers/soc/samsung/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
#
-# SAMSUNG SoC drivers
+# Samsung SoC drivers
#
menuconfig SOC_SAMSUNG
bool "Samsung SoC driver support" if COMPILE_TEST
diff --git a/drivers/soc/samsung/exynos-chipid.c b/drivers/soc/samsung/exynos-chipid.c
index b89c26a71c6e..2dad4961a80b 100644
--- a/drivers/soc/samsung/exynos-chipid.c
+++ b/drivers/soc/samsung/exynos-chipid.c
@@ -3,7 +3,7 @@
* Copyright (c) 2019 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
- * EXYNOS - CHIP ID support
+ * Exynos - CHIP ID support
* Author: Pankaj Dubey <pankaj.dubey@samsung.com>
* Author: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
*/
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
index d34ca201b8b7..17304fa18429 100644
--- a/drivers/soc/samsung/exynos-pmu.c
+++ b/drivers/soc/samsung/exynos-pmu.c
@@ -3,7 +3,7 @@
// Copyright (c) 2011-2014 Samsung Electronics Co., Ltd.
// http://www.samsung.com/
//
-// EXYNOS - CPU PMU(Power Management Unit) support
+// Exynos - CPU PMU(Power Management Unit) support
#include <linux/of.h>
#include <linux/of_address.h>
@@ -110,10 +110,8 @@ EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap);
static int exynos_pmu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pmu_base_addr = devm_ioremap_resource(dev, res);
+ pmu_base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pmu_base_addr))
return PTR_ERR(pmu_base_addr);
diff --git a/drivers/soc/samsung/exynos-pmu.h b/drivers/soc/samsung/exynos-pmu.h
index 977e4daf5a0f..5e851f32307e 100644
--- a/drivers/soc/samsung/exynos-pmu.h
+++ b/drivers/soc/samsung/exynos-pmu.h
@@ -3,7 +3,7 @@
* Copyright (c) 2015 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * Header for EXYNOS PMU Driver support
+ * Header for Exynos PMU Driver support
*/
#ifndef __EXYNOS_PMU_H
diff --git a/drivers/soc/samsung/exynos3250-pmu.c b/drivers/soc/samsung/exynos3250-pmu.c
index 275d348ed9c9..30f230ed1769 100644
--- a/drivers/soc/samsung/exynos3250-pmu.c
+++ b/drivers/soc/samsung/exynos3250-pmu.c
@@ -3,7 +3,7 @@
// Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
// http://www.samsung.com/
//
-// EXYNOS3250 - CPU PMU (Power Management Unit) support
+// Exynos3250 - CPU PMU (Power Management Unit) support
#include <linux/soc/samsung/exynos-regs-pmu.h>
#include <linux/soc/samsung/exynos-pmu.h>
diff --git a/drivers/soc/samsung/exynos4-pmu.c b/drivers/soc/samsung/exynos4-pmu.c
index a7cdbf1aac0c..cb35103565a6 100644
--- a/drivers/soc/samsung/exynos4-pmu.c
+++ b/drivers/soc/samsung/exynos4-pmu.c
@@ -3,7 +3,7 @@
// Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
// http://www.samsung.com/
//
-// EXYNOS4 - CPU PMU(Power Management Unit) support
+// Exynos4 - CPU PMU(Power Management Unit) support
#include <linux/soc/samsung/exynos-regs-pmu.h>
#include <linux/soc/samsung/exynos-pmu.h>
diff --git a/drivers/soc/samsung/exynos5250-pmu.c b/drivers/soc/samsung/exynos5250-pmu.c
index 19b38e008145..7a2d50be6b4a 100644
--- a/drivers/soc/samsung/exynos5250-pmu.c
+++ b/drivers/soc/samsung/exynos5250-pmu.c
@@ -3,7 +3,7 @@
// Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
// http://www.samsung.com/
//
-// EXYNOS5250 - CPU PMU (Power Management Unit) support
+// Exynos5250 - CPU PMU (Power Management Unit) support
#include <linux/soc/samsung/exynos-regs-pmu.h>
#include <linux/soc/samsung/exynos-pmu.h>
diff --git a/drivers/soc/samsung/exynos5420-pmu.c b/drivers/soc/samsung/exynos5420-pmu.c
index b236d3b47b49..6fedcd78cb45 100644
--- a/drivers/soc/samsung/exynos5420-pmu.c
+++ b/drivers/soc/samsung/exynos5420-pmu.c
@@ -3,7 +3,7 @@
// Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
// http://www.samsung.com/
//
-// EXYNOS5420 - CPU PMU (Power Management Unit) support
+// Exynos5420 - CPU PMU (Power Management Unit) support
#include <linux/pm.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
diff --git a/drivers/soc/sifive/Kconfig b/drivers/soc/sifive/Kconfig
new file mode 100644
index 000000000000..58cf8c40d08d
--- /dev/null
+++ b/drivers/soc/sifive/Kconfig
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+if SOC_SIFIVE
+
+config SIFIVE_L2
+ bool "Sifive L2 Cache controller"
+ help
+ Support for the L2 cache controller on SiFive platforms.
+
+endif
diff --git a/drivers/soc/sifive/Makefile b/drivers/soc/sifive/Makefile
new file mode 100644
index 000000000000..b5caff77938f
--- /dev/null
+++ b/drivers/soc/sifive/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_SIFIVE_L2) += sifive_l2_cache.o
diff --git a/drivers/soc/sifive/sifive_l2_cache.c b/drivers/soc/sifive/sifive_l2_cache.c
new file mode 100644
index 000000000000..a5069394cd61
--- /dev/null
+++ b/drivers/soc/sifive/sifive_l2_cache.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SiFive L2 cache controller Driver
+ *
+ * Copyright (C) 2018-2019 SiFive, Inc.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <soc/sifive/sifive_l2_cache.h>
+
+#define SIFIVE_L2_DIRECCFIX_LOW 0x100
+#define SIFIVE_L2_DIRECCFIX_HIGH 0x104
+#define SIFIVE_L2_DIRECCFIX_COUNT 0x108
+
+#define SIFIVE_L2_DATECCFIX_LOW 0x140
+#define SIFIVE_L2_DATECCFIX_HIGH 0x144
+#define SIFIVE_L2_DATECCFIX_COUNT 0x148
+
+#define SIFIVE_L2_DATECCFAIL_LOW 0x160
+#define SIFIVE_L2_DATECCFAIL_HIGH 0x164
+#define SIFIVE_L2_DATECCFAIL_COUNT 0x168
+
+#define SIFIVE_L2_CONFIG 0x00
+#define SIFIVE_L2_WAYENABLE 0x08
+#define SIFIVE_L2_ECCINJECTERR 0x40
+
+#define SIFIVE_L2_MAX_ECCINTR 3
+
+static void __iomem *l2_base;
+static int g_irq[SIFIVE_L2_MAX_ECCINTR];
+
+enum {
+ DIR_CORR = 0,
+ DATA_CORR,
+ DATA_UNCORR,
+};
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *sifive_test;
+
+static ssize_t l2_write(struct file *file, const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ unsigned int val;
+
+ if (kstrtouint_from_user(data, count, 0, &val))
+ return -EINVAL;
+ if ((val >= 0 && val < 0xFF) || (val >= 0x10000 && val < 0x100FF))
+ writel(val, l2_base + SIFIVE_L2_ECCINJECTERR);
+ else
+ return -EINVAL;
+ return count;
+}
+
+static const struct file_operations l2_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = l2_write
+};
+
+static void setup_sifive_debug(void)
+{
+ sifive_test = debugfs_create_dir("sifive_l2_cache", NULL);
+
+ debugfs_create_file("sifive_debug_inject_error", 0200,
+ sifive_test, NULL, &l2_fops);
+}
+#endif
+
+static void l2_config_read(void)
+{
+ u32 regval, val;
+
+ regval = readl(l2_base + SIFIVE_L2_CONFIG);
+ val = regval & 0xFF;
+ pr_info("L2CACHE: No. of Banks in the cache: %d\n", val);
+ val = (regval & 0xFF00) >> 8;
+ pr_info("L2CACHE: No. of ways per bank: %d\n", val);
+ val = (regval & 0xFF0000) >> 16;
+ pr_info("L2CACHE: Sets per bank: %llu\n", (uint64_t)1 << val);
+ val = (regval & 0xFF000000) >> 24;
+ pr_info("L2CACHE: Bytes per cache block: %llu\n", (uint64_t)1 << val);
+
+ regval = readl(l2_base + SIFIVE_L2_WAYENABLE);
+ pr_info("L2CACHE: Index of the largest way enabled: %d\n", regval);
+}
+
+static const struct of_device_id sifive_l2_ids[] = {
+ { .compatible = "sifive,fu540-c000-ccache" },
+ { /* end of table */ },
+};
+
+static ATOMIC_NOTIFIER_HEAD(l2_err_chain);
+
+int register_sifive_l2_error_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&l2_err_chain, nb);
+}
+EXPORT_SYMBOL_GPL(register_sifive_l2_error_notifier);
+
+int unregister_sifive_l2_error_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&l2_err_chain, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_sifive_l2_error_notifier);
+
+static irqreturn_t l2_int_handler(int irq, void *device)
+{
+ unsigned int add_h, add_l;
+
+ if (irq == g_irq[DIR_CORR]) {
+ add_h = readl(l2_base + SIFIVE_L2_DIRECCFIX_HIGH);
+ add_l = readl(l2_base + SIFIVE_L2_DIRECCFIX_LOW);
+ pr_err("L2CACHE: DirError @ 0x%08X.%08X\n", add_h, add_l);
+ /* Reading this register clears the DirError interrupt sig */
+ readl(l2_base + SIFIVE_L2_DIRECCFIX_COUNT);
+ atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_CE,
+ "DirECCFix");
+ }
+ if (irq == g_irq[DATA_CORR]) {
+ add_h = readl(l2_base + SIFIVE_L2_DATECCFIX_HIGH);
+ add_l = readl(l2_base + SIFIVE_L2_DATECCFIX_LOW);
+ pr_err("L2CACHE: DataError @ 0x%08X.%08X\n", add_h, add_l);
+ /* Reading this register clears the DataError interrupt sig */
+ readl(l2_base + SIFIVE_L2_DATECCFIX_COUNT);
+ atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_CE,
+ "DatECCFix");
+ }
+ if (irq == g_irq[DATA_UNCORR]) {
+ add_h = readl(l2_base + SIFIVE_L2_DATECCFAIL_HIGH);
+ add_l = readl(l2_base + SIFIVE_L2_DATECCFAIL_LOW);
+ pr_err("L2CACHE: DataFail @ 0x%08X.%08X\n", add_h, add_l);
+ /* Reading this register clears the DataFail interrupt sig */
+ readl(l2_base + SIFIVE_L2_DATECCFAIL_COUNT);
+ atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_UE,
+ "DatECCFail");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int __init sifive_l2_init(void)
+{
+ struct device_node *np;
+ struct resource res;
+ int i, rc;
+
+ np = of_find_matching_node(NULL, sifive_l2_ids);
+ if (!np)
+ return -ENODEV;
+
+ if (of_address_to_resource(np, 0, &res))
+ return -ENODEV;
+
+ l2_base = ioremap(res.start, resource_size(&res));
+ if (!l2_base)
+ return -ENOMEM;
+
+ for (i = 0; i < SIFIVE_L2_MAX_ECCINTR; i++) {
+ g_irq[i] = irq_of_parse_and_map(np, i);
+ rc = request_irq(g_irq[i], l2_int_handler, 0, "l2_ecc", NULL);
+ if (rc) {
+ pr_err("L2CACHE: Could not request IRQ %d\n", g_irq[i]);
+ return rc;
+ }
+ }
+
+ l2_config_read();
+
+#ifdef CONFIG_DEBUG_FS
+ setup_sifive_debug();
+#endif
+ return 0;
+}
+device_initcall(sifive_l2_init);
diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig
index 84bd615c4a92..3693532949b8 100644
--- a/drivers/soc/tegra/Kconfig
+++ b/drivers/soc/tegra/Kconfig
@@ -126,6 +126,7 @@ config SOC_TEGRA_FUSE
def_bool y
depends on ARCH_TEGRA
select SOC_BUS
+ select TEGRA20_APB_DMA if ARCH_TEGRA_2x_SOC
config SOC_TEGRA_FLOWCTRL
bool
diff --git a/drivers/soc/tegra/flowctrl.c b/drivers/soc/tegra/flowctrl.c
index eb96a3086d6d..5db919d96aba 100644
--- a/drivers/soc/tegra/flowctrl.c
+++ b/drivers/soc/tegra/flowctrl.c
@@ -219,7 +219,7 @@ static int __init tegra_flowctrl_init(void)
return 0;
}
- tegra_flowctrl_base = ioremap_nocache(res.start, resource_size(&res));
+ tegra_flowctrl_base = ioremap(res.start, resource_size(&res));
if (!tegra_flowctrl_base)
return -ENXIO;
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index 4d719d4b8d5a..802717b9f6a3 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -49,6 +49,9 @@ static struct tegra_fuse *fuse = &(struct tegra_fuse) {
};
static const struct of_device_id tegra_fuse_match[] = {
+#ifdef CONFIG_ARCH_TEGRA_194_SOC
+ { .compatible = "nvidia,tegra194-efuse", .data = &tegra194_fuse_soc },
+#endif
#ifdef CONFIG_ARCH_TEGRA_186_SOC
{ .compatible = "nvidia,tegra186-efuse", .data = &tegra186_fuse_soc },
#endif
@@ -408,7 +411,7 @@ static int __init tegra_init_fuse(void)
}
}
- fuse->base = ioremap_nocache(regs.start, resource_size(&regs));
+ fuse->base = ioremap(regs.start, resource_size(&regs));
if (!fuse->base) {
pr_err("failed to map FUSE registers\n");
return -ENXIO;
diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c
index b8daaf5b7291..e6037f900fb7 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra30.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra30.c
@@ -36,7 +36,8 @@
defined(CONFIG_ARCH_TEGRA_124_SOC) || \
defined(CONFIG_ARCH_TEGRA_132_SOC) || \
defined(CONFIG_ARCH_TEGRA_210_SOC) || \
- defined(CONFIG_ARCH_TEGRA_186_SOC)
+ defined(CONFIG_ARCH_TEGRA_186_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_194_SOC)
static u32 tegra30_fuse_read_early(struct tegra_fuse *fuse, unsigned int offset)
{
if (WARN_ON(!fuse->base))
@@ -320,3 +321,32 @@ const struct tegra_fuse_soc tegra186_fuse_soc = {
.num_lookups = ARRAY_SIZE(tegra186_fuse_lookups),
};
#endif
+
+#if defined(CONFIG_ARCH_TEGRA_194_SOC)
+static const struct nvmem_cell_lookup tegra194_fuse_lookups[] = {
+ {
+ .nvmem_name = "fuse",
+ .cell_name = "xusb-pad-calibration",
+ .dev_id = "3520000.padctl",
+ .con_id = "calibration",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "xusb-pad-calibration-ext",
+ .dev_id = "3520000.padctl",
+ .con_id = "calibration-ext",
+ },
+};
+
+static const struct tegra_fuse_info tegra194_fuse_info = {
+ .read = tegra30_fuse_read,
+ .size = 0x300,
+ .spare = 0x280,
+};
+
+const struct tegra_fuse_soc tegra194_fuse_soc = {
+ .init = tegra30_fuse_init,
+ .info = &tegra194_fuse_info,
+ .lookups = tegra194_fuse_lookups,
+ .num_lookups = ARRAY_SIZE(tegra194_fuse_lookups),
+};
+#endif
diff --git a/drivers/soc/tegra/fuse/fuse.h b/drivers/soc/tegra/fuse/fuse.h
index 0f74c2c34af0..94a059e577a1 100644
--- a/drivers/soc/tegra/fuse/fuse.h
+++ b/drivers/soc/tegra/fuse/fuse.h
@@ -108,4 +108,8 @@ extern const struct tegra_fuse_soc tegra210_fuse_soc;
extern const struct tegra_fuse_soc tegra186_fuse_soc;
#endif
+#ifdef CONFIG_ARCH_TEGRA_194_SOC
+extern const struct tegra_fuse_soc tegra194_fuse_soc;
+#endif
+
#endif
diff --git a/drivers/soc/tegra/fuse/tegra-apbmisc.c b/drivers/soc/tegra/fuse/tegra-apbmisc.c
index df76778af601..089d9340564b 100644
--- a/drivers/soc/tegra/fuse/tegra-apbmisc.c
+++ b/drivers/soc/tegra/fuse/tegra-apbmisc.c
@@ -21,18 +21,15 @@
#define PMC_STRAPPING_OPT_A_RAM_CODE_MASK_SHORT \
(0x3 << PMC_STRAPPING_OPT_A_RAM_CODE_SHIFT)
-static void __iomem *apbmisc_base;
-static void __iomem *strapping_base;
static bool long_ram_code;
+static u32 strapping;
+static u32 chipid;
u32 tegra_read_chipid(void)
{
- if (!apbmisc_base) {
- WARN(1, "Tegra Chip ID not yet available\n");
- return 0;
- }
+ WARN(!chipid, "Tegra ABP MISC not yet available\n");
- return readl_relaxed(apbmisc_base + 4);
+ return chipid;
}
u8 tegra_get_chip_id(void)
@@ -42,10 +39,9 @@ u8 tegra_get_chip_id(void)
u32 tegra_read_straps(void)
{
- if (strapping_base)
- return readl_relaxed(strapping_base);
- else
- return 0;
+ WARN(!chipid, "Tegra ABP MISC not yet available\n");
+
+ return strapping;
}
u32 tegra_read_ram_code(void)
@@ -63,6 +59,7 @@ u32 tegra_read_ram_code(void)
static const struct of_device_id apbmisc_match[] __initconst = {
{ .compatible = "nvidia,tegra20-apbmisc", },
{ .compatible = "nvidia,tegra186-misc", },
+ { .compatible = "nvidia,tegra194-misc", },
{},
};
@@ -103,6 +100,7 @@ void __init tegra_init_revision(void)
void __init tegra_init_apbmisc(void)
{
+ void __iomem *apbmisc_base, *strapping_base;
struct resource apbmisc, straps;
struct device_node *np;
@@ -123,7 +121,7 @@ void __init tegra_init_apbmisc(void)
apbmisc.flags = IORESOURCE_MEM;
/* strapping options */
- if (tegra_get_chip_id() == TEGRA124) {
+ if (of_machine_is_compatible("nvidia,tegra124")) {
straps.start = 0x7000e864;
straps.end = 0x7000e867;
} else {
@@ -159,13 +157,21 @@ void __init tegra_init_apbmisc(void)
}
}
- apbmisc_base = ioremap_nocache(apbmisc.start, resource_size(&apbmisc));
- if (!apbmisc_base)
+ apbmisc_base = ioremap(apbmisc.start, resource_size(&apbmisc));
+ if (!apbmisc_base) {
pr_err("failed to map APBMISC registers\n");
+ } else {
+ chipid = readl_relaxed(apbmisc_base + 4);
+ iounmap(apbmisc_base);
+ }
- strapping_base = ioremap_nocache(straps.start, resource_size(&straps));
- if (!strapping_base)
+ strapping_base = ioremap(straps.start, resource_size(&straps));
+ if (!strapping_base) {
pr_err("failed to map strapping options registers\n");
+ } else {
+ strapping = readl_relaxed(strapping_base);
+ iounmap(strapping_base);
+ }
long_ram_code = of_property_read_bool(np, "nvidia,long-ram-code");
}
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index ea0e11a09c12..1699dda6b393 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -2826,7 +2826,7 @@ static void tegra186_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
of_address_to_resource(np, index, &regs);
- wake = ioremap_nocache(regs.start, resource_size(&regs));
+ wake = ioremap(regs.start, resource_size(&regs));
if (!wake) {
dev_err(pmc->dev, "failed to map PMC wake registers\n");
return;
@@ -3097,7 +3097,7 @@ static int __init tegra_pmc_early_init(void)
}
}
- pmc->base = ioremap_nocache(regs.start, resource_size(&regs));
+ pmc->base = ioremap(regs.start, resource_size(&regs));
if (!pmc->base) {
pr_err("failed to map PMC registers\n");
of_node_put(np);
diff --git a/drivers/soc/tegra/regulators-tegra20.c b/drivers/soc/tegra/regulators-tegra20.c
index ea0eede48802..367a71a3cd10 100644
--- a/drivers/soc/tegra/regulators-tegra20.c
+++ b/drivers/soc/tegra/regulators-tegra20.c
@@ -162,6 +162,9 @@ static int tegra20_core_rtc_update(struct tegra_regulator_coupler *tegra,
core_target_uV = max(rtc_uV - max_spread, core_target_uV);
}
+ if (core_uV == core_target_uV)
+ goto update_rtc;
+
err = regulator_set_voltage_rdev(core_rdev,
core_target_uV,
core_max_uV,
@@ -170,7 +173,7 @@ static int tegra20_core_rtc_update(struct tegra_regulator_coupler *tegra,
return err;
core_uV = core_target_uV;
-
+update_rtc:
if (rtc_uV < rtc_min_uV) {
rtc_target_uV = min(rtc_uV + max_spread, rtc_min_uV);
rtc_target_uV = min(core_uV + max_spread, rtc_target_uV);
@@ -179,6 +182,9 @@ static int tegra20_core_rtc_update(struct tegra_regulator_coupler *tegra,
rtc_target_uV = max(core_uV - max_spread, rtc_target_uV);
}
+ if (rtc_uV == rtc_target_uV)
+ continue;
+
err = regulator_set_voltage_rdev(rtc_rdev,
rtc_target_uV,
rtc_max_uV,
diff --git a/drivers/soc/tegra/regulators-tegra30.c b/drivers/soc/tegra/regulators-tegra30.c
index 8e623ff18e70..7f21f31de09d 100644
--- a/drivers/soc/tegra/regulators-tegra30.c
+++ b/drivers/soc/tegra/regulators-tegra30.c
@@ -209,6 +209,9 @@ static int tegra30_voltage_update(struct tegra_regulator_coupler *tegra,
cpu_target_uV = max(core_uV - max_spread, cpu_target_uV);
}
+ if (cpu_uV == cpu_target_uV)
+ goto update_core;
+
err = regulator_set_voltage_rdev(cpu_rdev,
cpu_target_uV,
cpu_max_uV,
@@ -231,6 +234,9 @@ update_core:
core_target_uV = max(core_target_uV, core_uV - core_max_step);
}
+ if (core_uV == core_target_uV)
+ continue;
+
err = regulator_set_voltage_rdev(core_rdev,
core_target_uV,
core_max_uV,
diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig
index cf545f428d03..4486e055794c 100644
--- a/drivers/soc/ti/Kconfig
+++ b/drivers/soc/ti/Kconfig
@@ -80,6 +80,17 @@ config TI_SCI_PM_DOMAINS
called ti_sci_pm_domains. Note this is needed early in boot before
rootfs may be available.
+config TI_K3_RINGACC
+ bool "K3 Ring accelerator Sub System"
+ depends on ARCH_K3 || COMPILE_TEST
+ depends on TI_SCI_INTA_IRQCHIP
+ help
+ Say y here to support the K3 Ring accelerator module.
+ The Ring Accelerator (RINGACC or RA) provides hardware acceleration
+ to enable straightforward passing of work between a producer
+ and a consumer. There is one RINGACC module per NAVSS on TI AM65x SoCs
+ If unsure, say N.
+
endif # SOC_TI
config TI_SCI_INTA_MSI_DOMAIN
diff --git a/drivers/soc/ti/Makefile b/drivers/soc/ti/Makefile
index 788b5cd1e180..bec827937a5f 100644
--- a/drivers/soc/ti/Makefile
+++ b/drivers/soc/ti/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_ARCH_OMAP2PLUS) += omap_prm.o
obj-$(CONFIG_WKUP_M3_IPC) += wkup_m3_ipc.o
obj-$(CONFIG_TI_SCI_PM_DOMAINS) += ti_sci_pm_domains.o
obj-$(CONFIG_TI_SCI_INTA_MSI_DOMAIN) += ti_sci_inta_msi.o
+obj-$(CONFIG_TI_K3_RINGACC) += k3-ringacc.o
diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c
new file mode 100644
index 000000000000..5fb2ee2ac978
--- /dev/null
+++ b/drivers/soc/ti/k3-ringacc.c
@@ -0,0 +1,1157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TI K3 NAVSS Ring Accelerator subsystem driver
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ti/k3-ringacc.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+#include <linux/soc/ti/ti_sci_inta_msi.h>
+#include <linux/of_irq.h>
+#include <linux/irqdomain.h>
+
+static LIST_HEAD(k3_ringacc_list);
+static DEFINE_MUTEX(k3_ringacc_list_lock);
+
+#define K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK GENMASK(19, 0)
+
+/**
+ * struct k3_ring_rt_regs - The RA realtime Control/Status Registers region
+ *
+ * @resv_16: Reserved
+ * @db: Ring Doorbell Register
+ * @resv_4: Reserved
+ * @occ: Ring Occupancy Register
+ * @indx: Ring Current Index Register
+ * @hwocc: Ring Hardware Occupancy Register
+ * @hwindx: Ring Hardware Current Index Register
+ */
+struct k3_ring_rt_regs {
+ u32 resv_16[4];
+ u32 db;
+ u32 resv_4[1];
+ u32 occ;
+ u32 indx;
+ u32 hwocc;
+ u32 hwindx;
+};
+
+#define K3_RINGACC_RT_REGS_STEP 0x1000
+
+/**
+ * struct k3_ring_fifo_regs - The Ring Accelerator Queues Registers region
+ *
+ * @head_data: Ring Head Entry Data Registers
+ * @tail_data: Ring Tail Entry Data Registers
+ * @peek_head_data: Ring Peek Head Entry Data Regs
+ * @peek_tail_data: Ring Peek Tail Entry Data Regs
+ */
+struct k3_ring_fifo_regs {
+ u32 head_data[128];
+ u32 tail_data[128];
+ u32 peek_head_data[128];
+ u32 peek_tail_data[128];
+};
+
+/**
+ * struct k3_ringacc_proxy_gcfg_regs - RA Proxy Global Config MMIO Region
+ *
+ * @revision: Revision Register
+ * @config: Config Register
+ */
+struct k3_ringacc_proxy_gcfg_regs {
+ u32 revision;
+ u32 config;
+};
+
+#define K3_RINGACC_PROXY_CFG_THREADS_MASK GENMASK(15, 0)
+
+/**
+ * struct k3_ringacc_proxy_target_regs - Proxy Datapath MMIO Region
+ *
+ * @control: Proxy Control Register
+ * @status: Proxy Status Register
+ * @resv_512: Reserved
+ * @data: Proxy Data Register
+ */
+struct k3_ringacc_proxy_target_regs {
+ u32 control;
+ u32 status;
+ u8 resv_512[504];
+ u32 data[128];
+};
+
+#define K3_RINGACC_PROXY_TARGET_STEP 0x1000
+#define K3_RINGACC_PROXY_NOT_USED (-1)
+
+enum k3_ringacc_proxy_access_mode {
+ PROXY_ACCESS_MODE_HEAD = 0,
+ PROXY_ACCESS_MODE_TAIL = 1,
+ PROXY_ACCESS_MODE_PEEK_HEAD = 2,
+ PROXY_ACCESS_MODE_PEEK_TAIL = 3,
+};
+
+#define K3_RINGACC_FIFO_WINDOW_SIZE_BYTES (512U)
+#define K3_RINGACC_FIFO_REGS_STEP 0x1000
+#define K3_RINGACC_MAX_DB_RING_CNT (127U)
+
+struct k3_ring_ops {
+ int (*push_tail)(struct k3_ring *ring, void *elm);
+ int (*push_head)(struct k3_ring *ring, void *elm);
+ int (*pop_tail)(struct k3_ring *ring, void *elm);
+ int (*pop_head)(struct k3_ring *ring, void *elm);
+};
+
+/**
+ * struct k3_ring - RA Ring descriptor
+ *
+ * @rt: Ring control/status registers
+ * @fifos: Ring queues registers
+ * @proxy: Ring Proxy Datapath registers
+ * @ring_mem_dma: Ring buffer dma address
+ * @ring_mem_virt: Ring buffer virt address
+ * @ops: Ring operations
+ * @size: Ring size in elements
+ * @elm_size: Size of the ring element
+ * @mode: Ring mode
+ * @flags: flags
+ * @free: Number of free elements
+ * @occ: Ring occupancy
+ * @windex: Write index (only for @K3_RINGACC_RING_MODE_RING)
+ * @rindex: Read index (only for @K3_RINGACC_RING_MODE_RING)
+ * @ring_id: Ring Id
+ * @parent: Pointer on struct @k3_ringacc
+ * @use_count: Use count for shared rings
+ * @proxy_id: RA Ring Proxy Id (only if @K3_RINGACC_RING_USE_PROXY)
+ */
+struct k3_ring {
+ struct k3_ring_rt_regs __iomem *rt;
+ struct k3_ring_fifo_regs __iomem *fifos;
+ struct k3_ringacc_proxy_target_regs __iomem *proxy;
+ dma_addr_t ring_mem_dma;
+ void *ring_mem_virt;
+ struct k3_ring_ops *ops;
+ u32 size;
+ enum k3_ring_size elm_size;
+ enum k3_ring_mode mode;
+ u32 flags;
+#define K3_RING_FLAG_BUSY BIT(1)
+#define K3_RING_FLAG_SHARED BIT(2)
+ u32 free;
+ u32 occ;
+ u32 windex;
+ u32 rindex;
+ u32 ring_id;
+ struct k3_ringacc *parent;
+ u32 use_count;
+ int proxy_id;
+};
+
+/**
+ * struct k3_ringacc - Rings accelerator descriptor
+ *
+ * @dev: pointer on RA device
+ * @proxy_gcfg: RA proxy global config registers
+ * @proxy_target_base: RA proxy datapath region
+ * @num_rings: number of ring in RA
+ * @rings_inuse: bitfield for ring usage tracking
+ * @rm_gp_range: general purpose rings range from tisci
+ * @dma_ring_reset_quirk: DMA reset w/a enable
+ * @num_proxies: number of RA proxies
+ * @proxy_inuse: bitfield for proxy usage tracking
+ * @rings: array of rings descriptors (struct @k3_ring)
+ * @list: list of RAs in the system
+ * @req_lock: protect rings allocation
+ * @tisci: pointer ti-sci handle
+ * @tisci_ring_ops: ti-sci rings ops
+ * @tisci_dev_id: ti-sci device id
+ */
+struct k3_ringacc {
+ struct device *dev;
+ struct k3_ringacc_proxy_gcfg_regs __iomem *proxy_gcfg;
+ void __iomem *proxy_target_base;
+ u32 num_rings; /* number of rings in Ringacc module */
+ unsigned long *rings_inuse;
+ struct ti_sci_resource *rm_gp_range;
+
+ bool dma_ring_reset_quirk;
+ u32 num_proxies;
+ unsigned long *proxy_inuse;
+
+ struct k3_ring *rings;
+ struct list_head list;
+ struct mutex req_lock; /* protect rings allocation */
+
+ const struct ti_sci_handle *tisci;
+ const struct ti_sci_rm_ringacc_ops *tisci_ring_ops;
+ u32 tisci_dev_id;
+};
+
+static long k3_ringacc_ring_get_fifo_pos(struct k3_ring *ring)
+{
+ return K3_RINGACC_FIFO_WINDOW_SIZE_BYTES -
+ (4 << ring->elm_size);
+}
+
+static void *k3_ringacc_get_elm_addr(struct k3_ring *ring, u32 idx)
+{
+ return (ring->ring_mem_virt + idx * (4 << ring->elm_size));
+}
+
+static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem);
+
+static struct k3_ring_ops k3_ring_mode_ring_ops = {
+ .push_tail = k3_ringacc_ring_push_mem,
+ .pop_head = k3_ringacc_ring_pop_mem,
+};
+
+static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem);
+
+static struct k3_ring_ops k3_ring_mode_msg_ops = {
+ .push_tail = k3_ringacc_ring_push_io,
+ .push_head = k3_ringacc_ring_push_head_io,
+ .pop_tail = k3_ringacc_ring_pop_tail_io,
+ .pop_head = k3_ringacc_ring_pop_io,
+};
+
+static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem);
+
+static struct k3_ring_ops k3_ring_mode_proxy_ops = {
+ .push_tail = k3_ringacc_ring_push_tail_proxy,
+ .push_head = k3_ringacc_ring_push_head_proxy,
+ .pop_tail = k3_ringacc_ring_pop_tail_proxy,
+ .pop_head = k3_ringacc_ring_pop_head_proxy,
+};
+
+static void k3_ringacc_ring_dump(struct k3_ring *ring)
+{
+ struct device *dev = ring->parent->dev;
+
+ dev_dbg(dev, "dump ring: %d\n", ring->ring_id);
+ dev_dbg(dev, "dump mem virt %p, dma %pad\n", ring->ring_mem_virt,
+ &ring->ring_mem_dma);
+ dev_dbg(dev, "dump elmsize %d, size %d, mode %d, proxy_id %d\n",
+ ring->elm_size, ring->size, ring->mode, ring->proxy_id);
+
+ dev_dbg(dev, "dump ring_rt_regs: db%08x\n", readl(&ring->rt->db));
+ dev_dbg(dev, "dump occ%08x\n", readl(&ring->rt->occ));
+ dev_dbg(dev, "dump indx%08x\n", readl(&ring->rt->indx));
+ dev_dbg(dev, "dump hwocc%08x\n", readl(&ring->rt->hwocc));
+ dev_dbg(dev, "dump hwindx%08x\n", readl(&ring->rt->hwindx));
+
+ if (ring->ring_mem_virt)
+ print_hex_dump_debug("dump ring_mem_virt ", DUMP_PREFIX_NONE,
+ 16, 1, ring->ring_mem_virt, 16 * 8, false);
+}
+
+struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc,
+ int id, u32 flags)
+{
+ int proxy_id = K3_RINGACC_PROXY_NOT_USED;
+
+ mutex_lock(&ringacc->req_lock);
+
+ if (id == K3_RINGACC_RING_ID_ANY) {
+ /* Request for any general purpose ring */
+ struct ti_sci_resource_desc *gp_rings =
+ &ringacc->rm_gp_range->desc[0];
+ unsigned long size;
+
+ size = gp_rings->start + gp_rings->num;
+ id = find_next_zero_bit(ringacc->rings_inuse, size,
+ gp_rings->start);
+ if (id == size)
+ goto error;
+ } else if (id < 0) {
+ goto error;
+ }
+
+ if (test_bit(id, ringacc->rings_inuse) &&
+ !(ringacc->rings[id].flags & K3_RING_FLAG_SHARED))
+ goto error;
+ else if (ringacc->rings[id].flags & K3_RING_FLAG_SHARED)
+ goto out;
+
+ if (flags & K3_RINGACC_RING_USE_PROXY) {
+ proxy_id = find_next_zero_bit(ringacc->proxy_inuse,
+ ringacc->num_proxies, 0);
+ if (proxy_id == ringacc->num_proxies)
+ goto error;
+ }
+
+ if (proxy_id != K3_RINGACC_PROXY_NOT_USED) {
+ set_bit(proxy_id, ringacc->proxy_inuse);
+ ringacc->rings[id].proxy_id = proxy_id;
+ dev_dbg(ringacc->dev, "Giving ring#%d proxy#%d\n", id,
+ proxy_id);
+ } else {
+ dev_dbg(ringacc->dev, "Giving ring#%d\n", id);
+ }
+
+ set_bit(id, ringacc->rings_inuse);
+out:
+ ringacc->rings[id].use_count++;
+ mutex_unlock(&ringacc->req_lock);
+ return &ringacc->rings[id];
+
+error:
+ mutex_unlock(&ringacc->req_lock);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_request_ring);
+
+static void k3_ringacc_ring_reset_sci(struct k3_ring *ring)
+{
+ struct k3_ringacc *ringacc = ring->parent;
+ int ret;
+
+ ret = ringacc->tisci_ring_ops->config(
+ ringacc->tisci,
+ TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID,
+ ringacc->tisci_dev_id,
+ ring->ring_id,
+ 0,
+ 0,
+ ring->size,
+ 0,
+ 0,
+ 0);
+ if (ret)
+ dev_err(ringacc->dev, "TISCI reset ring fail (%d) ring_idx %d\n",
+ ret, ring->ring_id);
+}
+
+void k3_ringacc_ring_reset(struct k3_ring *ring)
+{
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return;
+
+ ring->occ = 0;
+ ring->free = 0;
+ ring->rindex = 0;
+ ring->windex = 0;
+
+ k3_ringacc_ring_reset_sci(ring);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset);
+
+static void k3_ringacc_ring_reconfig_qmode_sci(struct k3_ring *ring,
+ enum k3_ring_mode mode)
+{
+ struct k3_ringacc *ringacc = ring->parent;
+ int ret;
+
+ ret = ringacc->tisci_ring_ops->config(
+ ringacc->tisci,
+ TI_SCI_MSG_VALUE_RM_RING_MODE_VALID,
+ ringacc->tisci_dev_id,
+ ring->ring_id,
+ 0,
+ 0,
+ 0,
+ mode,
+ 0,
+ 0);
+ if (ret)
+ dev_err(ringacc->dev, "TISCI reconf qmode fail (%d) ring_idx %d\n",
+ ret, ring->ring_id);
+}
+
+void k3_ringacc_ring_reset_dma(struct k3_ring *ring, u32 occ)
+{
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return;
+
+ if (!ring->parent->dma_ring_reset_quirk)
+ goto reset;
+
+ if (!occ)
+ occ = readl(&ring->rt->occ);
+
+ if (occ) {
+ u32 db_ring_cnt, db_ring_cnt_cur;
+
+ dev_dbg(ring->parent->dev, "%s %u occ: %u\n", __func__,
+ ring->ring_id, occ);
+ /* TI-SCI ring reset */
+ k3_ringacc_ring_reset_sci(ring);
+
+ /*
+ * Setup the ring in ring/doorbell mode (if not already in this
+ * mode)
+ */
+ if (ring->mode != K3_RINGACC_RING_MODE_RING)
+ k3_ringacc_ring_reconfig_qmode_sci(
+ ring, K3_RINGACC_RING_MODE_RING);
+ /*
+ * Ring the doorbell 2**22 – ringOcc times.
+ * This will wrap the internal UDMAP ring state occupancy
+ * counter (which is 21-bits wide) to 0.
+ */
+ db_ring_cnt = (1U << 22) - occ;
+
+ while (db_ring_cnt != 0) {
+ /*
+ * Ring the doorbell with the maximum count each
+ * iteration if possible to minimize the total
+ * of writes
+ */
+ if (db_ring_cnt > K3_RINGACC_MAX_DB_RING_CNT)
+ db_ring_cnt_cur = K3_RINGACC_MAX_DB_RING_CNT;
+ else
+ db_ring_cnt_cur = db_ring_cnt;
+
+ writel(db_ring_cnt_cur, &ring->rt->db);
+ db_ring_cnt -= db_ring_cnt_cur;
+ }
+
+ /* Restore the original ring mode (if not ring mode) */
+ if (ring->mode != K3_RINGACC_RING_MODE_RING)
+ k3_ringacc_ring_reconfig_qmode_sci(ring, ring->mode);
+ }
+
+reset:
+ /* Reset the ring */
+ k3_ringacc_ring_reset(ring);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset_dma);
+
+static void k3_ringacc_ring_free_sci(struct k3_ring *ring)
+{
+ struct k3_ringacc *ringacc = ring->parent;
+ int ret;
+
+ ret = ringacc->tisci_ring_ops->config(
+ ringacc->tisci,
+ TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
+ ringacc->tisci_dev_id,
+ ring->ring_id,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0);
+ if (ret)
+ dev_err(ringacc->dev, "TISCI ring free fail (%d) ring_idx %d\n",
+ ret, ring->ring_id);
+}
+
+int k3_ringacc_ring_free(struct k3_ring *ring)
+{
+ struct k3_ringacc *ringacc;
+
+ if (!ring)
+ return -EINVAL;
+
+ ringacc = ring->parent;
+
+ dev_dbg(ring->parent->dev, "flags: 0x%08x\n", ring->flags);
+
+ if (!test_bit(ring->ring_id, ringacc->rings_inuse))
+ return -EINVAL;
+
+ mutex_lock(&ringacc->req_lock);
+
+ if (--ring->use_count)
+ goto out;
+
+ if (!(ring->flags & K3_RING_FLAG_BUSY))
+ goto no_init;
+
+ k3_ringacc_ring_free_sci(ring);
+
+ dma_free_coherent(ringacc->dev,
+ ring->size * (4 << ring->elm_size),
+ ring->ring_mem_virt, ring->ring_mem_dma);
+ ring->flags = 0;
+ ring->ops = NULL;
+ if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) {
+ clear_bit(ring->proxy_id, ringacc->proxy_inuse);
+ ring->proxy = NULL;
+ ring->proxy_id = K3_RINGACC_PROXY_NOT_USED;
+ }
+
+no_init:
+ clear_bit(ring->ring_id, ringacc->rings_inuse);
+
+out:
+ mutex_unlock(&ringacc->req_lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_free);
+
+u32 k3_ringacc_get_ring_id(struct k3_ring *ring)
+{
+ if (!ring)
+ return -EINVAL;
+
+ return ring->ring_id;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_id);
+
+u32 k3_ringacc_get_tisci_dev_id(struct k3_ring *ring)
+{
+ if (!ring)
+ return -EINVAL;
+
+ return ring->parent->tisci_dev_id;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_get_tisci_dev_id);
+
+int k3_ringacc_get_ring_irq_num(struct k3_ring *ring)
+{
+ int irq_num;
+
+ if (!ring)
+ return -EINVAL;
+
+ irq_num = ti_sci_inta_msi_get_virq(ring->parent->dev, ring->ring_id);
+ if (irq_num <= 0)
+ irq_num = -EINVAL;
+ return irq_num;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_irq_num);
+
+static int k3_ringacc_ring_cfg_sci(struct k3_ring *ring)
+{
+ struct k3_ringacc *ringacc = ring->parent;
+ u32 ring_idx;
+ int ret;
+
+ if (!ringacc->tisci)
+ return -EINVAL;
+
+ ring_idx = ring->ring_id;
+ ret = ringacc->tisci_ring_ops->config(
+ ringacc->tisci,
+ TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
+ ringacc->tisci_dev_id,
+ ring_idx,
+ lower_32_bits(ring->ring_mem_dma),
+ upper_32_bits(ring->ring_mem_dma),
+ ring->size,
+ ring->mode,
+ ring->elm_size,
+ 0);
+ if (ret)
+ dev_err(ringacc->dev, "TISCI config ring fail (%d) ring_idx %d\n",
+ ret, ring_idx);
+
+ return ret;
+}
+
+int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
+{
+ struct k3_ringacc *ringacc = ring->parent;
+ int ret = 0;
+
+ if (!ring || !cfg)
+ return -EINVAL;
+ if (cfg->elm_size > K3_RINGACC_RING_ELSIZE_256 ||
+ cfg->mode >= K3_RINGACC_RING_MODE_INVALID ||
+ cfg->size & ~K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK ||
+ !test_bit(ring->ring_id, ringacc->rings_inuse))
+ return -EINVAL;
+
+ if (cfg->mode == K3_RINGACC_RING_MODE_MESSAGE &&
+ ring->proxy_id == K3_RINGACC_PROXY_NOT_USED &&
+ cfg->elm_size > K3_RINGACC_RING_ELSIZE_8) {
+ dev_err(ringacc->dev,
+ "Message mode must use proxy for %u element size\n",
+ 4 << ring->elm_size);
+ return -EINVAL;
+ }
+
+ /*
+ * In case of shared ring only the first user (master user) can
+ * configure the ring. The sequence should be by the client:
+ * ring = k3_ringacc_request_ring(ringacc, ring_id, 0); # master user
+ * k3_ringacc_ring_cfg(ring, cfg); # master configuration
+ * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED);
+ * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED);
+ */
+ if (ring->use_count != 1)
+ return 0;
+
+ ring->size = cfg->size;
+ ring->elm_size = cfg->elm_size;
+ ring->mode = cfg->mode;
+ ring->occ = 0;
+ ring->free = 0;
+ ring->rindex = 0;
+ ring->windex = 0;
+
+ if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED)
+ ring->proxy = ringacc->proxy_target_base +
+ ring->proxy_id * K3_RINGACC_PROXY_TARGET_STEP;
+
+ switch (ring->mode) {
+ case K3_RINGACC_RING_MODE_RING:
+ ring->ops = &k3_ring_mode_ring_ops;
+ break;
+ case K3_RINGACC_RING_MODE_MESSAGE:
+ if (ring->proxy)
+ ring->ops = &k3_ring_mode_proxy_ops;
+ else
+ ring->ops = &k3_ring_mode_msg_ops;
+ break;
+ default:
+ ring->ops = NULL;
+ ret = -EINVAL;
+ goto err_free_proxy;
+ };
+
+ ring->ring_mem_virt = dma_alloc_coherent(ringacc->dev,
+ ring->size * (4 << ring->elm_size),
+ &ring->ring_mem_dma, GFP_KERNEL);
+ if (!ring->ring_mem_virt) {
+ dev_err(ringacc->dev, "Failed to alloc ring mem\n");
+ ret = -ENOMEM;
+ goto err_free_ops;
+ }
+
+ ret = k3_ringacc_ring_cfg_sci(ring);
+
+ if (ret)
+ goto err_free_mem;
+
+ ring->flags |= K3_RING_FLAG_BUSY;
+ ring->flags |= (cfg->flags & K3_RINGACC_RING_SHARED) ?
+ K3_RING_FLAG_SHARED : 0;
+
+ k3_ringacc_ring_dump(ring);
+
+ return 0;
+
+err_free_mem:
+ dma_free_coherent(ringacc->dev,
+ ring->size * (4 << ring->elm_size),
+ ring->ring_mem_virt,
+ ring->ring_mem_dma);
+err_free_ops:
+ ring->ops = NULL;
+err_free_proxy:
+ ring->proxy = NULL;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_cfg);
+
+u32 k3_ringacc_ring_get_size(struct k3_ring *ring)
+{
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ return ring->size;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_size);
+
+u32 k3_ringacc_ring_get_free(struct k3_ring *ring)
+{
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ if (!ring->free)
+ ring->free = ring->size - readl(&ring->rt->occ);
+
+ return ring->free;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_free);
+
+u32 k3_ringacc_ring_get_occ(struct k3_ring *ring)
+{
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ return readl(&ring->rt->occ);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_occ);
+
+u32 k3_ringacc_ring_is_full(struct k3_ring *ring)
+{
+ return !k3_ringacc_ring_get_free(ring);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_is_full);
+
+enum k3_ringacc_access_mode {
+ K3_RINGACC_ACCESS_MODE_PUSH_HEAD,
+ K3_RINGACC_ACCESS_MODE_POP_HEAD,
+ K3_RINGACC_ACCESS_MODE_PUSH_TAIL,
+ K3_RINGACC_ACCESS_MODE_POP_TAIL,
+ K3_RINGACC_ACCESS_MODE_PEEK_HEAD,
+ K3_RINGACC_ACCESS_MODE_PEEK_TAIL,
+};
+
+#define K3_RINGACC_PROXY_MODE(x) (((x) & 0x3) << 16)
+#define K3_RINGACC_PROXY_ELSIZE(x) (((x) & 0x7) << 24)
+static int k3_ringacc_ring_cfg_proxy(struct k3_ring *ring,
+ enum k3_ringacc_proxy_access_mode mode)
+{
+ u32 val;
+
+ val = ring->ring_id;
+ val |= K3_RINGACC_PROXY_MODE(mode);
+ val |= K3_RINGACC_PROXY_ELSIZE(ring->elm_size);
+ writel(val, &ring->proxy->control);
+ return 0;
+}
+
+static int k3_ringacc_ring_access_proxy(struct k3_ring *ring, void *elem,
+ enum k3_ringacc_access_mode access_mode)
+{
+ void __iomem *ptr;
+
+ ptr = (void __iomem *)&ring->proxy->data;
+
+ switch (access_mode) {
+ case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+ case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+ k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_HEAD);
+ break;
+ case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+ case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+ k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_TAIL);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ptr += k3_ringacc_ring_get_fifo_pos(ring);
+
+ switch (access_mode) {
+ case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+ case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+ dev_dbg(ring->parent->dev,
+ "proxy:memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr,
+ access_mode);
+ memcpy_fromio(elem, ptr, (4 << ring->elm_size));
+ ring->occ--;
+ break;
+ case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+ case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+ dev_dbg(ring->parent->dev,
+ "proxy:memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr,
+ access_mode);
+ memcpy_toio(ptr, elem, (4 << ring->elm_size));
+ ring->free--;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(ring->parent->dev, "proxy: free%d occ%d\n", ring->free,
+ ring->occ);
+ return 0;
+}
+
+static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_proxy(ring, elem,
+ K3_RINGACC_ACCESS_MODE_PUSH_HEAD);
+}
+
+static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_proxy(ring, elem,
+ K3_RINGACC_ACCESS_MODE_PUSH_TAIL);
+}
+
+static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_proxy(ring, elem,
+ K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_proxy(ring, elem,
+ K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+static int k3_ringacc_ring_access_io(struct k3_ring *ring, void *elem,
+ enum k3_ringacc_access_mode access_mode)
+{
+ void __iomem *ptr;
+
+ switch (access_mode) {
+ case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+ case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+ ptr = (void __iomem *)&ring->fifos->head_data;
+ break;
+ case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+ case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+ ptr = (void __iomem *)&ring->fifos->tail_data;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ptr += k3_ringacc_ring_get_fifo_pos(ring);
+
+ switch (access_mode) {
+ case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+ case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+ dev_dbg(ring->parent->dev,
+ "memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr,
+ access_mode);
+ memcpy_fromio(elem, ptr, (4 << ring->elm_size));
+ ring->occ--;
+ break;
+ case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+ case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+ dev_dbg(ring->parent->dev,
+ "memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr,
+ access_mode);
+ memcpy_toio(ptr, elem, (4 << ring->elm_size));
+ ring->free--;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(ring->parent->dev, "free%d index%d occ%d index%d\n", ring->free,
+ ring->windex, ring->occ, ring->rindex);
+ return 0;
+}
+
+static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_io(ring, elem,
+ K3_RINGACC_ACCESS_MODE_PUSH_HEAD);
+}
+
+static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_io(ring, elem,
+ K3_RINGACC_ACCESS_MODE_PUSH_TAIL);
+}
+
+static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_io(ring, elem,
+ K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_io(ring, elem,
+ K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem)
+{
+ void *elem_ptr;
+
+ elem_ptr = k3_ringacc_get_elm_addr(ring, ring->windex);
+
+ memcpy(elem_ptr, elem, (4 << ring->elm_size));
+
+ ring->windex = (ring->windex + 1) % ring->size;
+ ring->free--;
+ writel(1, &ring->rt->db);
+
+ dev_dbg(ring->parent->dev, "ring_push_mem: free%d index%d\n",
+ ring->free, ring->windex);
+
+ return 0;
+}
+
+static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem)
+{
+ void *elem_ptr;
+
+ elem_ptr = k3_ringacc_get_elm_addr(ring, ring->rindex);
+
+ memcpy(elem, elem_ptr, (4 << ring->elm_size));
+
+ ring->rindex = (ring->rindex + 1) % ring->size;
+ ring->occ--;
+ writel(-1, &ring->rt->db);
+
+ dev_dbg(ring->parent->dev, "ring_pop_mem: occ%d index%d pos_ptr%p\n",
+ ring->occ, ring->rindex, elem_ptr);
+ return 0;
+}
+
+int k3_ringacc_ring_push(struct k3_ring *ring, void *elem)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ dev_dbg(ring->parent->dev, "ring_push: free%d index%d\n", ring->free,
+ ring->windex);
+
+ if (k3_ringacc_ring_is_full(ring))
+ return -ENOMEM;
+
+ if (ring->ops && ring->ops->push_tail)
+ ret = ring->ops->push_tail(ring, elem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_push);
+
+int k3_ringacc_ring_push_head(struct k3_ring *ring, void *elem)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ dev_dbg(ring->parent->dev, "ring_push_head: free%d index%d\n",
+ ring->free, ring->windex);
+
+ if (k3_ringacc_ring_is_full(ring))
+ return -ENOMEM;
+
+ if (ring->ops && ring->ops->push_head)
+ ret = ring->ops->push_head(ring, elem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_push_head);
+
+int k3_ringacc_ring_pop(struct k3_ring *ring, void *elem)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ if (!ring->occ)
+ ring->occ = k3_ringacc_ring_get_occ(ring);
+
+ dev_dbg(ring->parent->dev, "ring_pop: occ%d index%d\n", ring->occ,
+ ring->rindex);
+
+ if (!ring->occ)
+ return -ENODATA;
+
+ if (ring->ops && ring->ops->pop_head)
+ ret = ring->ops->pop_head(ring, elem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop);
+
+int k3_ringacc_ring_pop_tail(struct k3_ring *ring, void *elem)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ if (!ring->occ)
+ ring->occ = k3_ringacc_ring_get_occ(ring);
+
+ dev_dbg(ring->parent->dev, "ring_pop_tail: occ%d index%d\n", ring->occ,
+ ring->rindex);
+
+ if (!ring->occ)
+ return -ENODATA;
+
+ if (ring->ops && ring->ops->pop_tail)
+ ret = ring->ops->pop_tail(ring, elem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop_tail);
+
+struct k3_ringacc *of_k3_ringacc_get_by_phandle(struct device_node *np,
+ const char *property)
+{
+ struct device_node *ringacc_np;
+ struct k3_ringacc *ringacc = ERR_PTR(-EPROBE_DEFER);
+ struct k3_ringacc *entry;
+
+ ringacc_np = of_parse_phandle(np, property, 0);
+ if (!ringacc_np)
+ return ERR_PTR(-ENODEV);
+
+ mutex_lock(&k3_ringacc_list_lock);
+ list_for_each_entry(entry, &k3_ringacc_list, list)
+ if (entry->dev->of_node == ringacc_np) {
+ ringacc = entry;
+ break;
+ }
+ mutex_unlock(&k3_ringacc_list_lock);
+ of_node_put(ringacc_np);
+
+ return ringacc;
+}
+EXPORT_SYMBOL_GPL(of_k3_ringacc_get_by_phandle);
+
+static int k3_ringacc_probe_dt(struct k3_ringacc *ringacc)
+{
+ struct device_node *node = ringacc->dev->of_node;
+ struct device *dev = ringacc->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int ret;
+
+ if (!node) {
+ dev_err(dev, "device tree info unavailable\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(node, "ti,num-rings", &ringacc->num_rings);
+ if (ret) {
+ dev_err(dev, "ti,num-rings read failure %d\n", ret);
+ return ret;
+ }
+
+ ringacc->dma_ring_reset_quirk =
+ of_property_read_bool(node, "ti,dma-ring-reset-quirk");
+
+ ringacc->tisci = ti_sci_get_by_phandle(node, "ti,sci");
+ if (IS_ERR(ringacc->tisci)) {
+ ret = PTR_ERR(ringacc->tisci);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "ti,sci read fail %d\n", ret);
+ ringacc->tisci = NULL;
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "ti,sci-dev-id",
+ &ringacc->tisci_dev_id);
+ if (ret) {
+ dev_err(dev, "ti,sci-dev-id read fail %d\n", ret);
+ return ret;
+ }
+
+ pdev->id = ringacc->tisci_dev_id;
+
+ ringacc->rm_gp_range = devm_ti_sci_get_of_resource(ringacc->tisci, dev,
+ ringacc->tisci_dev_id,
+ "ti,sci-rm-range-gp-rings");
+ if (IS_ERR(ringacc->rm_gp_range)) {
+ dev_err(dev, "Failed to allocate MSI interrupts\n");
+ return PTR_ERR(ringacc->rm_gp_range);
+ }
+
+ return ti_sci_inta_msi_domain_alloc_irqs(ringacc->dev,
+ ringacc->rm_gp_range);
+}
+
+static int k3_ringacc_probe(struct platform_device *pdev)
+{
+ struct k3_ringacc *ringacc;
+ void __iomem *base_fifo, *base_rt;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int ret, i;
+
+ ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL);
+ if (!ringacc)
+ return -ENOMEM;
+
+ ringacc->dev = dev;
+ mutex_init(&ringacc->req_lock);
+
+ dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
+ DOMAIN_BUS_TI_SCI_INTA_MSI);
+ if (!dev->msi_domain) {
+ dev_err(dev, "Failed to get MSI domain\n");
+ return -EPROBE_DEFER;
+ }
+
+ ret = k3_ringacc_probe_dt(ringacc);
+ if (ret)
+ return ret;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rt");
+ base_rt = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base_rt))
+ return PTR_ERR(base_rt);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fifos");
+ base_fifo = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base_fifo))
+ return PTR_ERR(base_fifo);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "proxy_gcfg");
+ ringacc->proxy_gcfg = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ringacc->proxy_gcfg))
+ return PTR_ERR(ringacc->proxy_gcfg);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "proxy_target");
+ ringacc->proxy_target_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ringacc->proxy_target_base))
+ return PTR_ERR(ringacc->proxy_target_base);
+
+ ringacc->num_proxies = readl(&ringacc->proxy_gcfg->config) &
+ K3_RINGACC_PROXY_CFG_THREADS_MASK;
+
+ ringacc->rings = devm_kzalloc(dev,
+ sizeof(*ringacc->rings) *
+ ringacc->num_rings,
+ GFP_KERNEL);
+ ringacc->rings_inuse = devm_kcalloc(dev,
+ BITS_TO_LONGS(ringacc->num_rings),
+ sizeof(unsigned long), GFP_KERNEL);
+ ringacc->proxy_inuse = devm_kcalloc(dev,
+ BITS_TO_LONGS(ringacc->num_proxies),
+ sizeof(unsigned long), GFP_KERNEL);
+
+ if (!ringacc->rings || !ringacc->rings_inuse || !ringacc->proxy_inuse)
+ return -ENOMEM;
+
+ for (i = 0; i < ringacc->num_rings; i++) {
+ ringacc->rings[i].rt = base_rt +
+ K3_RINGACC_RT_REGS_STEP * i;
+ ringacc->rings[i].fifos = base_fifo +
+ K3_RINGACC_FIFO_REGS_STEP * i;
+ ringacc->rings[i].parent = ringacc;
+ ringacc->rings[i].ring_id = i;
+ ringacc->rings[i].proxy_id = K3_RINGACC_PROXY_NOT_USED;
+ }
+ dev_set_drvdata(dev, ringacc);
+
+ ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops;
+
+ mutex_lock(&k3_ringacc_list_lock);
+ list_add_tail(&ringacc->list, &k3_ringacc_list);
+ mutex_unlock(&k3_ringacc_list_lock);
+
+ dev_info(dev, "Ring Accelerator probed rings:%u, gp-rings[%u,%u] sci-dev-id:%u\n",
+ ringacc->num_rings,
+ ringacc->rm_gp_range->desc[0].start,
+ ringacc->rm_gp_range->desc[0].num,
+ ringacc->tisci_dev_id);
+ dev_info(dev, "dma-ring-reset-quirk: %s\n",
+ ringacc->dma_ring_reset_quirk ? "enabled" : "disabled");
+ dev_info(dev, "RA Proxy rev. %08x, num_proxies:%u\n",
+ readl(&ringacc->proxy_gcfg->revision), ringacc->num_proxies);
+ return 0;
+}
+
+/* Match table for of_platform binding */
+static const struct of_device_id k3_ringacc_of_match[] = {
+ { .compatible = "ti,am654-navss-ringacc", },
+ {},
+};
+
+static struct platform_driver k3_ringacc_driver = {
+ .probe = k3_ringacc_probe,
+ .driver = {
+ .name = "k3-ringacc",
+ .of_match_table = k3_ringacc_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(k3_ringacc_driver);
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index 1ccc9064e1eb..37f3db6c041c 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -25,6 +25,8 @@
static struct knav_device *kdev;
static DEFINE_MUTEX(knav_dev_lock);
+#define knav_dev_lock_held() \
+ lockdep_is_held(&knav_dev_lock)
/* Queue manager register indices in DTS */
#define KNAV_QUEUE_PEEK_REG_INDEX 0
@@ -52,8 +54,9 @@ static DEFINE_MUTEX(knav_dev_lock);
#define knav_queue_idx_to_inst(kdev, idx) \
(kdev->instances + (idx << kdev->inst_shift))
-#define for_each_handle_rcu(qh, inst) \
- list_for_each_entry_rcu(qh, &inst->handles, list)
+#define for_each_handle_rcu(qh, inst) \
+ list_for_each_entry_rcu(qh, &inst->handles, list, \
+ knav_dev_lock_held())
#define for_each_instance(idx, inst, kdev) \
for (idx = 0, inst = kdev->instances; \
diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
index 378369d9364a..e9ece45d7a33 100644
--- a/drivers/soc/ti/wkup_m3_ipc.c
+++ b/drivers/soc/ti/wkup_m3_ipc.c
@@ -419,6 +419,8 @@ static void wkup_m3_rproc_boot_thread(struct wkup_m3_ipc *m3_ipc)
ret = rproc_boot(m3_ipc->rproc);
if (ret)
dev_err(dev, "rproc_boot failed\n");
+ else
+ m3_ipc_state = m3_ipc;
do_exit(0);
}
@@ -505,8 +507,6 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
goto err_put_rproc;
}
- m3_ipc_state = m3_ipc;
-
return 0;
err_put_rproc:
diff --git a/drivers/soc/xilinx/Kconfig b/drivers/soc/xilinx/Kconfig
index 01e76b58dd78..223f1f9d0922 100644
--- a/drivers/soc/xilinx/Kconfig
+++ b/drivers/soc/xilinx/Kconfig
@@ -21,11 +21,15 @@ config ZYNQMP_POWER
bool "Enable Xilinx Zynq MPSoC Power Management driver"
depends on PM && ARCH_ZYNQMP
default y
+ select MAILBOX
+ select ZYNQMP_IPI_MBOX
help
Say yes to enable power management support for ZyqnMP SoC.
This driver uses firmware driver as an interface for power
management request to firmware. It registers isr to handle
- power management callbacks from firmware.
+ power management callbacks from firmware. It registers mailbox client
+ to handle power management callbacks from firmware.
+
If in doubt, say N.
config ZYNQMP_PM_DOMAINS
diff --git a/drivers/soc/xilinx/xlnx_vcu.c b/drivers/soc/xilinx/xlnx_vcu.c
index a840c0272135..a3aa40996f13 100644
--- a/drivers/soc/xilinx/xlnx_vcu.c
+++ b/drivers/soc/xilinx/xlnx_vcu.c
@@ -511,7 +511,7 @@ static int xvcu_probe(struct platform_device *pdev)
return -ENODEV;
}
- xvcu->vcu_slcr_ba = devm_ioremap_nocache(&pdev->dev, res->start,
+ xvcu->vcu_slcr_ba = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!xvcu->vcu_slcr_ba) {
dev_err(&pdev->dev, "vcu_slcr register mapping failed.\n");
@@ -524,7 +524,7 @@ static int xvcu_probe(struct platform_device *pdev)
return -ENODEV;
}
- xvcu->logicore_reg_ba = devm_ioremap_nocache(&pdev->dev, res->start,
+ xvcu->logicore_reg_ba = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!xvcu->logicore_reg_ba) {
dev_err(&pdev->dev, "logicore register mapping failed.\n");
diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c
index 1b9d14411a15..09227895d216 100644
--- a/drivers/soc/xilinx/zynqmp_power.c
+++ b/drivers/soc/xilinx/zynqmp_power.c
@@ -2,7 +2,7 @@
/*
* Xilinx Zynq MPSoC Power Management
*
- * Copyright (C) 2014-2018 Xilinx, Inc.
+ * Copyright (C) 2014-2019 Xilinx, Inc.
*
* Davorin Mista <davorin.mista@aggios.com>
* Jolly Shah <jollys@xilinx.com>
@@ -16,6 +16,21 @@
#include <linux/suspend.h>
#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/mailbox/zynqmp-ipi-message.h>
+
+/**
+ * struct zynqmp_pm_work_struct - Wrapper for struct work_struct
+ * @callback_work: Work structure
+ * @args: Callback arguments
+ */
+struct zynqmp_pm_work_struct {
+ struct work_struct callback_work;
+ u32 args[CB_ARG_CNT];
+};
+
+static struct zynqmp_pm_work_struct *zynqmp_pm_init_suspend_work;
+static struct mbox_chan *rx_chan;
+static const struct zynqmp_eemi_ops *eemi_ops;
enum pm_suspend_mode {
PM_SUSPEND_MODE_FIRST = 0,
@@ -31,7 +46,6 @@ static const char *const suspend_modes[] = {
};
static enum pm_suspend_mode suspend_mode = PM_SUSPEND_MODE_STD;
-static const struct zynqmp_eemi_ops *eemi_ops;
enum pm_api_cb_id {
PM_INIT_SUSPEND_CB = 30,
@@ -68,6 +82,53 @@ static irqreturn_t zynqmp_pm_isr(int irq, void *data)
return IRQ_HANDLED;
}
+static void ipi_receive_callback(struct mbox_client *cl, void *data)
+{
+ struct zynqmp_ipi_message *msg = (struct zynqmp_ipi_message *)data;
+ u32 payload[CB_PAYLOAD_SIZE];
+ int ret;
+
+ memcpy(payload, msg->data, sizeof(msg->len));
+ /* First element is callback API ID, others are callback arguments */
+ if (payload[0] == PM_INIT_SUSPEND_CB) {
+ if (work_pending(&zynqmp_pm_init_suspend_work->callback_work))
+ return;
+
+ /* Copy callback arguments into work's structure */
+ memcpy(zynqmp_pm_init_suspend_work->args, &payload[1],
+ sizeof(zynqmp_pm_init_suspend_work->args));
+
+ queue_work(system_unbound_wq,
+ &zynqmp_pm_init_suspend_work->callback_work);
+
+ /* Send NULL message to mbox controller to ack the message */
+ ret = mbox_send_message(rx_chan, NULL);
+ if (ret)
+ pr_err("IPI ack failed. Error %d\n", ret);
+ }
+}
+
+/**
+ * zynqmp_pm_init_suspend_work_fn - Initialize suspend
+ * @work: Pointer to work_struct
+ *
+ * Bottom-half of PM callback IRQ handler.
+ */
+static void zynqmp_pm_init_suspend_work_fn(struct work_struct *work)
+{
+ struct zynqmp_pm_work_struct *pm_work =
+ container_of(work, struct zynqmp_pm_work_struct, callback_work);
+
+ if (pm_work->args[0] == SUSPEND_SYSTEM_SHUTDOWN) {
+ orderly_poweroff(true);
+ } else if (pm_work->args[0] == SUSPEND_POWER_REQUEST) {
+ pm_suspend(PM_SUSPEND_MEM);
+ } else {
+ pr_err("%s Unsupported InitSuspendCb reason code %d.\n",
+ __func__, pm_work->args[0]);
+ }
+}
+
static ssize_t suspend_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -119,6 +180,7 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
{
int ret, irq;
u32 pm_api_version;
+ struct mbox_client *client;
eemi_ops = zynqmp_pm_get_eemi_ops();
if (IS_ERR(eemi_ops))
@@ -134,17 +196,46 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
if (pm_api_version < ZYNQMP_PM_VERSION)
return -ENODEV;
- irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
- return -ENXIO;
-
- ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, zynqmp_pm_isr,
- IRQF_NO_SUSPEND | IRQF_ONESHOT,
- dev_name(&pdev->dev), &pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "devm_request_threaded_irq '%d' failed "
- "with %d\n", irq, ret);
- return ret;
+ if (of_find_property(pdev->dev.of_node, "mboxes", NULL)) {
+ zynqmp_pm_init_suspend_work =
+ devm_kzalloc(&pdev->dev,
+ sizeof(struct zynqmp_pm_work_struct),
+ GFP_KERNEL);
+ if (!zynqmp_pm_init_suspend_work)
+ return -ENOMEM;
+
+ INIT_WORK(&zynqmp_pm_init_suspend_work->callback_work,
+ zynqmp_pm_init_suspend_work_fn);
+ client = devm_kzalloc(&pdev->dev, sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return -ENOMEM;
+
+ client->dev = &pdev->dev;
+ client->rx_callback = ipi_receive_callback;
+
+ rx_chan = mbox_request_channel_byname(client, "rx");
+ if (IS_ERR(rx_chan)) {
+ dev_err(&pdev->dev, "Failed to request rx channel\n");
+ return IS_ERR(rx_chan);
+ }
+ } else if (of_find_property(pdev->dev.of_node, "interrupts", NULL)) {
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return -ENXIO;
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ zynqmp_pm_isr,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ dev_name(&pdev->dev),
+ &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "devm_request_threaded_irq '%d' "
+ "failed with %d\n", irq, ret);
+ return ret;
+ }
+ } else {
+ dev_err(&pdev->dev, "Required property not found in DT node\n");
+ return -ENOENT;
}
ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
@@ -160,6 +251,9 @@ static int zynqmp_pm_remove(struct platform_device *pdev)
{
sysfs_remove_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
+ if (!rx_chan)
+ mbox_free_channel(rx_chan);
+
return 0;
}
diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig
index c725d0a8b288..fa2b4ab92ed9 100644
--- a/drivers/soundwire/Kconfig
+++ b/drivers/soundwire/Kconfig
@@ -31,4 +31,13 @@ config SOUNDWIRE_INTEL
enable this config option to get the SoundWire support for that
device.
+config SOUNDWIRE_QCOM
+ tristate "Qualcomm SoundWire Master driver"
+ depends on SLIMBUS
+ depends on SND_SOC
+ help
+ SoundWire Qualcomm Master driver.
+ If you have an Qualcomm platform which has a SoundWire Master then
+ enable this config option to get the SoundWire support for that
+ device
endif
diff --git a/drivers/soundwire/Makefile b/drivers/soundwire/Makefile
index 563894e5ecaf..e2cdff990e9f 100644
--- a/drivers/soundwire/Makefile
+++ b/drivers/soundwire/Makefile
@@ -21,3 +21,7 @@ obj-$(CONFIG_SOUNDWIRE_INTEL) += soundwire-intel.o
soundwire-intel-init-objs := intel_init.o
obj-$(CONFIG_SOUNDWIRE_INTEL) += soundwire-intel-init.o
+
+#Qualcomm driver
+soundwire-qcom-objs := qcom.o
+obj-$(CONFIG_SOUNDWIRE_QCOM) += soundwire-qcom.o
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index be5d437058ed..6106577fb3ed 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -456,26 +456,35 @@ err:
static int sdw_assign_device_num(struct sdw_slave *slave)
{
int ret, dev_num;
+ bool new_device = false;
/* check first if device number is assigned, if so reuse that */
if (!slave->dev_num) {
- mutex_lock(&slave->bus->bus_lock);
- dev_num = sdw_get_device_num(slave);
- mutex_unlock(&slave->bus->bus_lock);
- if (dev_num < 0) {
- dev_err(slave->bus->dev, "Get dev_num failed: %d\n",
- dev_num);
- return dev_num;
+ if (!slave->dev_num_sticky) {
+ mutex_lock(&slave->bus->bus_lock);
+ dev_num = sdw_get_device_num(slave);
+ mutex_unlock(&slave->bus->bus_lock);
+ if (dev_num < 0) {
+ dev_err(slave->bus->dev, "Get dev_num failed: %d\n",
+ dev_num);
+ return dev_num;
+ }
+ slave->dev_num = dev_num;
+ slave->dev_num_sticky = dev_num;
+ new_device = true;
+ } else {
+ slave->dev_num = slave->dev_num_sticky;
}
- } else {
+ }
+
+ if (!new_device)
dev_info(slave->bus->dev,
- "Slave already registered dev_num:%d\n",
+ "Slave already registered, reusing dev_num:%d\n",
slave->dev_num);
- /* Clear the slave->dev_num to transfer message on device 0 */
- dev_num = slave->dev_num;
- slave->dev_num = 0;
- }
+ /* Clear the slave->dev_num to transfer message on device 0 */
+ dev_num = slave->dev_num;
+ slave->dev_num = 0;
ret = sdw_write(slave, SDW_SCP_DEVNUMBER, dev_num);
if (ret < 0) {
@@ -485,7 +494,7 @@ static int sdw_assign_device_num(struct sdw_slave *slave)
}
/* After xfer of msg, restore dev_num */
- slave->dev_num = dev_num;
+ slave->dev_num = slave->dev_num_sticky;
return 0;
}
@@ -979,6 +988,24 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
struct sdw_slave *slave;
int i, ret = 0;
+ /* first check if any Slaves fell off the bus */
+ for (i = 1; i <= SDW_MAX_DEVICES; i++) {
+ mutex_lock(&bus->bus_lock);
+ if (test_bit(i, bus->assigned) == false) {
+ mutex_unlock(&bus->bus_lock);
+ continue;
+ }
+ mutex_unlock(&bus->bus_lock);
+
+ slave = sdw_get_slave(bus, i);
+ if (!slave)
+ continue;
+
+ if (status[i] == SDW_SLAVE_UNATTACHED &&
+ slave->status != SDW_SLAVE_UNATTACHED)
+ sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED);
+ }
+
if (status[0] == SDW_SLAVE_ATTACHED) {
dev_dbg(bus->dev, "Slave attached, programming device number\n");
ret = sdw_program_device_num(bus);
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index fed21e2b2277..9bec270d0fa4 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -74,6 +74,7 @@ MODULE_PARM_DESC(cdns_mcp_int_mask, "Cadence MCP IntMask");
#define CDNS_MCP_INTMASK 0x48
#define CDNS_MCP_INT_IRQ BIT(31)
+#define CDNS_MCP_INT_RESERVED1 GENMASK(30, 17)
#define CDNS_MCP_INT_WAKEUP BIT(16)
#define CDNS_MCP_INT_SLAVE_RSVD BIT(15)
#define CDNS_MCP_INT_SLAVE_ALERT BIT(14)
@@ -85,10 +86,12 @@ MODULE_PARM_DESC(cdns_mcp_int_mask, "Cadence MCP IntMask");
#define CDNS_MCP_INT_DATA_CLASH BIT(9)
#define CDNS_MCP_INT_PARITY BIT(8)
#define CDNS_MCP_INT_CMD_ERR BIT(7)
+#define CDNS_MCP_INT_RESERVED2 GENMASK(6, 4)
#define CDNS_MCP_INT_RX_NE BIT(3)
#define CDNS_MCP_INT_RX_WL BIT(2)
#define CDNS_MCP_INT_TXE BIT(1)
#define CDNS_MCP_INT_TXF BIT(0)
+#define CDNS_MCP_INT_RESERVED (CDNS_MCP_INT_RESERVED1 | CDNS_MCP_INT_RESERVED2)
#define CDNS_MCP_INTSET 0x4C
@@ -444,7 +447,8 @@ _cdns_xfer_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int cmd,
time = wait_for_completion_timeout(&cdns->tx_complete,
msecs_to_jiffies(CDNS_TX_TIMEOUT));
if (!time) {
- dev_err(cdns->dev, "IO transfer timed out\n");
+ dev_err(cdns->dev, "IO transfer timed out, cmd %d device %d addr %x len %d\n",
+ cmd, msg->dev_num, msg->addr, msg->len);
msg->len = 0;
return SDW_CMD_TIMEOUT;
}
@@ -672,13 +676,36 @@ static int cdns_update_slave_status(struct sdw_cdns *cdns,
/* first check if Slave reported multiple status */
if (set_status > 1) {
+ u32 val;
+
+ dev_warn_ratelimited(cdns->dev,
+ "Slave %d reported multiple Status: %d\n",
+ i, mask);
+
+ /* check latest status extracted from PING commands */
+ val = cdns_readl(cdns, CDNS_MCP_SLAVE_STAT);
+ val >>= (i * 2);
+
+ switch (val & 0x3) {
+ case 0:
+ status[i] = SDW_SLAVE_UNATTACHED;
+ break;
+ case 1:
+ status[i] = SDW_SLAVE_ATTACHED;
+ break;
+ case 2:
+ status[i] = SDW_SLAVE_ALERT;
+ break;
+ case 3:
+ default:
+ status[i] = SDW_SLAVE_RESERVED;
+ break;
+ }
+
dev_warn_ratelimited(cdns->dev,
- "Slave reported multiple Status: %d\n",
- mask);
- /*
- * TODO: we need to reread the status here by
- * issuing a PING cmd
- */
+ "Slave %d status updated to %d\n",
+ i, status[i]);
+
}
}
@@ -705,6 +732,10 @@ irqreturn_t sdw_cdns_irq(int irq, void *dev_id)
int_status = cdns_readl(cdns, CDNS_MCP_INTSTAT);
+ /* check for reserved values read as zero */
+ if (int_status & CDNS_MCP_INT_RESERVED)
+ return IRQ_NONE;
+
if (!(int_status & CDNS_MCP_INT_IRQ))
return IRQ_NONE;
@@ -812,8 +843,9 @@ int sdw_cdns_exit_reset(struct sdw_cdns *cdns)
EXPORT_SYMBOL(sdw_cdns_exit_reset);
/**
- * sdw_cdns_enable_interrupt() - Enable SDW interrupts and update config
+ * sdw_cdns_enable_interrupt() - Enable SDW interrupts
* @cdns: Cadence instance
+ * @state: True if we are trying to enable interrupt.
*/
int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns, bool state)
{
@@ -849,12 +881,21 @@ int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns, bool state)
mask = interrupt_mask;
update_masks:
+ /* clear slave interrupt status before enabling interrupt */
+ if (state) {
+ u32 slave_state;
+
+ slave_state = cdns_readl(cdns, CDNS_MCP_SLAVE_INTSTAT0);
+ cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT0, slave_state);
+ slave_state = cdns_readl(cdns, CDNS_MCP_SLAVE_INTSTAT1);
+ cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT1, slave_state);
+ }
+
cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK0, slave_intmask0);
cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK1, slave_intmask1);
cdns_writel(cdns, CDNS_MCP_INTMASK, mask);
- /* commit changes */
- return cdns_update_config(cdns);
+ return 0;
}
EXPORT_SYMBOL(sdw_cdns_enable_interrupt);
@@ -948,8 +989,6 @@ int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
ret = cdns_allocate_pdi(cdns, &stream->out,
stream->num_out, offset);
- offset += stream->num_out;
-
if (ret)
return ret;
@@ -1224,8 +1263,10 @@ EXPORT_SYMBOL(cdns_set_sdw_stream);
* cdns_find_pdi() - Find a free PDI
*
* @cdns: Cadence instance
+ * @offset: Starting offset
* @num: Number of PDIs
* @pdi: PDI instances
+ * @dai_id: DAI id
*
* Find a PDI for a given PDI array. The PDI num and dai_id are
* expected to match, return NULL otherwise.
@@ -1277,6 +1318,7 @@ EXPORT_SYMBOL(sdw_cdns_config_stream);
* @stream: Stream to be allocated
* @ch: Channel count
* @dir: Data direction
+ * @dai_id: DAI id
*/
struct sdw_cdns_pdi *sdw_cdns_alloc_pdi(struct sdw_cdns *cdns,
struct sdw_cdns_streams *stream,
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index 99dc61021211..06ef3a3ac080 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -529,17 +529,24 @@ intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
intel_writel(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id), conf);
}
-static int intel_config_stream(struct sdw_intel *sdw,
+static int intel_params_stream(struct sdw_intel *sdw,
struct snd_pcm_substream *substream,
struct snd_soc_dai *dai,
- struct snd_pcm_hw_params *hw_params, int link_id)
+ struct snd_pcm_hw_params *hw_params,
+ int link_id, int alh_stream_id)
{
struct sdw_intel_link_res *res = sdw->res;
+ struct sdw_intel_stream_params_data params_data;
- if (res->ops && res->ops->config_stream && res->arg)
- return res->ops->config_stream(res->arg,
- substream, dai, hw_params, link_id);
+ params_data.substream = substream;
+ params_data.dai = dai;
+ params_data.hw_params = hw_params;
+ params_data.link_id = link_id;
+ params_data.alh_stream_id = alh_stream_id;
+ if (res->ops && res->ops->params_stream && res->dev)
+ return res->ops->params_stream(res->dev,
+ &params_data);
return -EIO;
}
@@ -654,7 +661,8 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
/* Inform DSP about PDI stream number */
- ret = intel_config_stream(sdw, substream, dai, params,
+ ret = intel_params_stream(sdw, substream, dai, params,
+ sdw->instance,
pdi->intel_alh_id);
if (ret)
goto error;
@@ -872,6 +880,9 @@ static int sdw_master_read_intel_prop(struct sdw_bus *bus)
"intel-sdw-ip-clock",
&prop->mclk_freq);
+ /* the values reported by BIOS are the 2x clock, not the bus clock */
+ prop->mclk_freq /= 2;
+
fwnode_property_read_u32(link,
"intel-quirk-mask",
&quirk_mask);
diff --git a/drivers/soundwire/intel.h b/drivers/soundwire/intel.h
index d923b6262330..38b7c125fb10 100644
--- a/drivers/soundwire/intel.h
+++ b/drivers/soundwire/intel.h
@@ -5,23 +5,26 @@
#define __SDW_INTEL_LOCAL_H
/**
- * struct sdw_intel_link_res - Soundwire link resources
+ * struct sdw_intel_link_res - Soundwire Intel link resource structure,
+ * typically populated by the controller driver.
+ * @pdev: platform_device
+ * @mmio_base: mmio base of SoundWire registers
* @registers: Link IO registers base
* @shim: Audio shim pointer
* @alh: ALH (Audio Link Hub) pointer
* @irq: Interrupt line
* @ops: Shim callback ops
- * @arg: Shim callback ops argument
- *
- * This is set as pdata for each link instance.
+ * @dev: device implementing hw_params and free callbacks
*/
struct sdw_intel_link_res {
+ struct platform_device *pdev;
+ void __iomem *mmio_base; /* not strictly needed, useful for debug */
void __iomem *registers;
void __iomem *shim;
void __iomem *alh;
int irq;
const struct sdw_intel_ops *ops;
- void *arg;
+ struct device *dev;
};
#endif /* __SDW_INTEL_LOCAL_H */
diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c
index 2a2b4d8df462..4b769409f6f8 100644
--- a/drivers/soundwire/intel_init.c
+++ b/drivers/soundwire/intel_init.c
@@ -27,19 +27,9 @@ static int link_mask;
module_param_named(sdw_link_mask, link_mask, int, 0444);
MODULE_PARM_DESC(sdw_link_mask, "Intel link mask (one bit per link)");
-struct sdw_link_data {
- struct sdw_intel_link_res res;
- struct platform_device *pdev;
-};
-
-struct sdw_intel_ctx {
- int count;
- struct sdw_link_data *links;
-};
-
static int sdw_intel_cleanup_pdev(struct sdw_intel_ctx *ctx)
{
- struct sdw_link_data *link = ctx->links;
+ struct sdw_intel_link_res *link = ctx->links;
int i;
if (!link)
@@ -62,7 +52,7 @@ static struct sdw_intel_ctx
{
struct platform_device_info pdevinfo;
struct platform_device *pdev;
- struct sdw_link_data *link;
+ struct sdw_intel_link_res *link;
struct sdw_intel_ctx *ctx;
struct acpi_device *adev;
int ret, i;
@@ -123,14 +113,13 @@ static struct sdw_intel_ctx
continue;
}
- link->res.irq = res->irq;
- link->res.registers = res->mmio_base + SDW_LINK_BASE
+ link->registers = res->mmio_base + SDW_LINK_BASE
+ (SDW_LINK_SIZE * i);
- link->res.shim = res->mmio_base + SDW_SHIM_BASE;
- link->res.alh = res->mmio_base + SDW_ALH_BASE;
+ link->shim = res->mmio_base + SDW_SHIM_BASE;
+ link->alh = res->mmio_base + SDW_ALH_BASE;
- link->res.ops = res->ops;
- link->res.arg = res->arg;
+ link->ops = res->ops;
+ link->dev = res->dev;
memset(&pdevinfo, 0, sizeof(pdevinfo));
@@ -138,8 +127,6 @@ static struct sdw_intel_ctx
pdevinfo.name = "int-sdw";
pdevinfo.id = i;
pdevinfo.fwnode = acpi_fwnode_handle(adev);
- pdevinfo.data = &link->res;
- pdevinfo.size_data = sizeof(link->res);
pdev = platform_device_register_full(&pdevinfo);
if (IS_ERR(pdev)) {
@@ -216,7 +203,6 @@ void *sdw_intel_init(acpi_handle *parent_handle, struct sdw_intel_res *res)
return sdw_intel_add_controller(res);
}
-EXPORT_SYMBOL(sdw_intel_init);
/**
* sdw_intel_exit() - SoundWire Intel exit
@@ -224,10 +210,8 @@ EXPORT_SYMBOL(sdw_intel_init);
*
* Delete the controller instances created and cleanup
*/
-void sdw_intel_exit(void *arg)
+void sdw_intel_exit(struct sdw_intel_ctx *ctx)
{
- struct sdw_intel_ctx *ctx = arg;
-
sdw_intel_cleanup_pdev(ctx);
kfree(ctx);
}
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
new file mode 100644
index 000000000000..1c6c6a2e0def
--- /dev/null
+++ b/drivers/soundwire/qcom.c
@@ -0,0 +1,861 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019, Linaro Limited
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/slimbus.h>
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_registers.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include "bus.h"
+
+#define SWRM_COMP_HW_VERSION 0x00
+#define SWRM_COMP_CFG_ADDR 0x04
+#define SWRM_COMP_CFG_IRQ_LEVEL_OR_PULSE_MSK BIT(1)
+#define SWRM_COMP_CFG_ENABLE_MSK BIT(0)
+#define SWRM_COMP_PARAMS 0x100
+#define SWRM_COMP_PARAMS_DOUT_PORTS_MASK GENMASK(4, 0)
+#define SWRM_COMP_PARAMS_DIN_PORTS_MASK GENMASK(9, 5)
+#define SWRM_INTERRUPT_STATUS 0x200
+#define SWRM_INTERRUPT_STATUS_RMSK GENMASK(16, 0)
+#define SWRM_INTERRUPT_STATUS_NEW_SLAVE_ATTACHED BIT(1)
+#define SWRM_INTERRUPT_STATUS_CHANGE_ENUM_SLAVE_STATUS BIT(2)
+#define SWRM_INTERRUPT_STATUS_CMD_ERROR BIT(7)
+#define SWRM_INTERRUPT_STATUS_SPECIAL_CMD_ID_FINISHED BIT(10)
+#define SWRM_INTERRUPT_MASK_ADDR 0x204
+#define SWRM_INTERRUPT_CLEAR 0x208
+#define SWRM_CMD_FIFO_WR_CMD 0x300
+#define SWRM_CMD_FIFO_RD_CMD 0x304
+#define SWRM_CMD_FIFO_CMD 0x308
+#define SWRM_CMD_FIFO_STATUS 0x30C
+#define SWRM_CMD_FIFO_CFG_ADDR 0x314
+#define SWRM_RD_WR_CMD_RETRIES 0x7
+#define SWRM_CMD_FIFO_RD_FIFO_ADDR 0x318
+#define SWRM_ENUMERATOR_CFG_ADDR 0x500
+#define SWRM_MCP_FRAME_CTRL_BANK_ADDR(m) (0x101C + 0x40 * (m))
+#define SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_SHFT 3
+#define SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_BMSK GENMASK(2, 0)
+#define SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_BMSK GENMASK(7, 3)
+#define SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_SHFT 0
+#define SWRM_MCP_CFG_ADDR 0x1048
+#define SWRM_MCP_CFG_MAX_NUM_OF_CMD_NO_PINGS_BMSK GENMASK(21, 17)
+#define SWRM_MCP_CFG_MAX_NUM_OF_CMD_NO_PINGS_SHFT 0x11
+#define SWRM_DEF_CMD_NO_PINGS 0x1f
+#define SWRM_MCP_STATUS 0x104C
+#define SWRM_MCP_STATUS_BANK_NUM_MASK BIT(0)
+#define SWRM_MCP_SLV_STATUS 0x1090
+#define SWRM_MCP_SLV_STATUS_MASK GENMASK(1, 0)
+#define SWRM_DP_PORT_CTRL_BANK(n, m) (0x1124 + 0x100 * (n - 1) + 0x40 * m)
+#define SWRM_DP_PORT_CTRL_EN_CHAN_SHFT 0x18
+#define SWRM_DP_PORT_CTRL_OFFSET2_SHFT 0x10
+#define SWRM_DP_PORT_CTRL_OFFSET1_SHFT 0x08
+#define SWRM_AHB_BRIDGE_WR_DATA_0 0xc85
+#define SWRM_AHB_BRIDGE_WR_ADDR_0 0xc89
+#define SWRM_AHB_BRIDGE_RD_ADDR_0 0xc8d
+#define SWRM_AHB_BRIDGE_RD_DATA_0 0xc91
+
+#define SWRM_REG_VAL_PACK(data, dev, id, reg) \
+ ((reg) | ((id) << 16) | ((dev) << 20) | ((data) << 24))
+
+#define SWRM_MAX_ROW_VAL 0 /* Rows = 48 */
+#define SWRM_DEFAULT_ROWS 48
+#define SWRM_MIN_COL_VAL 0 /* Cols = 2 */
+#define SWRM_DEFAULT_COL 16
+#define SWRM_MAX_COL_VAL 7
+#define SWRM_SPECIAL_CMD_ID 0xF
+#define MAX_FREQ_NUM 1
+#define TIMEOUT_MS (2 * HZ)
+#define QCOM_SWRM_MAX_RD_LEN 0xf
+#define QCOM_SDW_MAX_PORTS 14
+#define DEFAULT_CLK_FREQ 9600000
+#define SWRM_MAX_DAIS 0xF
+
+struct qcom_swrm_port_config {
+ u8 si;
+ u8 off1;
+ u8 off2;
+};
+
+struct qcom_swrm_ctrl {
+ struct sdw_bus bus;
+ struct device *dev;
+ struct regmap *regmap;
+ struct completion *comp;
+ struct work_struct slave_work;
+ /* read/write lock */
+ spinlock_t comp_lock;
+ /* Port alloc/free lock */
+ struct mutex port_lock;
+ struct clk *hclk;
+ u8 wr_cmd_id;
+ u8 rd_cmd_id;
+ int irq;
+ unsigned int version;
+ int num_din_ports;
+ int num_dout_ports;
+ unsigned long dout_port_mask;
+ unsigned long din_port_mask;
+ struct qcom_swrm_port_config pconfig[QCOM_SDW_MAX_PORTS];
+ struct sdw_stream_runtime *sruntime[SWRM_MAX_DAIS];
+ enum sdw_slave_status status[SDW_MAX_DEVICES];
+ int (*reg_read)(struct qcom_swrm_ctrl *ctrl, int reg, u32 *val);
+ int (*reg_write)(struct qcom_swrm_ctrl *ctrl, int reg, int val);
+};
+
+#define to_qcom_sdw(b) container_of(b, struct qcom_swrm_ctrl, bus)
+
+static int qcom_swrm_abh_reg_read(struct qcom_swrm_ctrl *ctrl, int reg,
+ u32 *val)
+{
+ struct regmap *wcd_regmap = ctrl->regmap;
+ int ret;
+
+ /* pg register + offset */
+ ret = regmap_bulk_write(wcd_regmap, SWRM_AHB_BRIDGE_RD_ADDR_0,
+ (u8 *)&reg, 4);
+ if (ret < 0)
+ return SDW_CMD_FAIL;
+
+ ret = regmap_bulk_read(wcd_regmap, SWRM_AHB_BRIDGE_RD_DATA_0,
+ val, 4);
+ if (ret < 0)
+ return SDW_CMD_FAIL;
+
+ return SDW_CMD_OK;
+}
+
+static int qcom_swrm_ahb_reg_write(struct qcom_swrm_ctrl *ctrl,
+ int reg, int val)
+{
+ struct regmap *wcd_regmap = ctrl->regmap;
+ int ret;
+ /* pg register + offset */
+ ret = regmap_bulk_write(wcd_regmap, SWRM_AHB_BRIDGE_WR_DATA_0,
+ (u8 *)&val, 4);
+ if (ret)
+ return SDW_CMD_FAIL;
+
+ /* write address register */
+ ret = regmap_bulk_write(wcd_regmap, SWRM_AHB_BRIDGE_WR_ADDR_0,
+ (u8 *)&reg, 4);
+ if (ret)
+ return SDW_CMD_FAIL;
+
+ return SDW_CMD_OK;
+}
+
+static int qcom_swrm_cmd_fifo_wr_cmd(struct qcom_swrm_ctrl *ctrl, u8 cmd_data,
+ u8 dev_addr, u16 reg_addr)
+{
+ DECLARE_COMPLETION_ONSTACK(comp);
+ unsigned long flags;
+ u32 val;
+ int ret;
+
+ spin_lock_irqsave(&ctrl->comp_lock, flags);
+ ctrl->comp = &comp;
+ spin_unlock_irqrestore(&ctrl->comp_lock, flags);
+ val = SWRM_REG_VAL_PACK(cmd_data, dev_addr,
+ SWRM_SPECIAL_CMD_ID, reg_addr);
+ ret = ctrl->reg_write(ctrl, SWRM_CMD_FIFO_WR_CMD, val);
+ if (ret)
+ goto err;
+
+ ret = wait_for_completion_timeout(ctrl->comp,
+ msecs_to_jiffies(TIMEOUT_MS));
+
+ if (!ret)
+ ret = SDW_CMD_IGNORED;
+ else
+ ret = SDW_CMD_OK;
+err:
+ spin_lock_irqsave(&ctrl->comp_lock, flags);
+ ctrl->comp = NULL;
+ spin_unlock_irqrestore(&ctrl->comp_lock, flags);
+
+ return ret;
+}
+
+static int qcom_swrm_cmd_fifo_rd_cmd(struct qcom_swrm_ctrl *ctrl,
+ u8 dev_addr, u16 reg_addr,
+ u32 len, u8 *rval)
+{
+ int i, ret;
+ u32 val;
+ DECLARE_COMPLETION_ONSTACK(comp);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrl->comp_lock, flags);
+ ctrl->comp = &comp;
+ spin_unlock_irqrestore(&ctrl->comp_lock, flags);
+
+ val = SWRM_REG_VAL_PACK(len, dev_addr, SWRM_SPECIAL_CMD_ID, reg_addr);
+ ret = ctrl->reg_write(ctrl, SWRM_CMD_FIFO_RD_CMD, val);
+ if (ret)
+ goto err;
+
+ ret = wait_for_completion_timeout(ctrl->comp,
+ msecs_to_jiffies(TIMEOUT_MS));
+
+ if (!ret) {
+ ret = SDW_CMD_IGNORED;
+ goto err;
+ } else {
+ ret = SDW_CMD_OK;
+ }
+
+ for (i = 0; i < len; i++) {
+ ctrl->reg_read(ctrl, SWRM_CMD_FIFO_RD_FIFO_ADDR, &val);
+ rval[i] = val & 0xFF;
+ }
+
+err:
+ spin_lock_irqsave(&ctrl->comp_lock, flags);
+ ctrl->comp = NULL;
+ spin_unlock_irqrestore(&ctrl->comp_lock, flags);
+
+ return ret;
+}
+
+static void qcom_swrm_get_device_status(struct qcom_swrm_ctrl *ctrl)
+{
+ u32 val;
+ int i;
+
+ ctrl->reg_read(ctrl, SWRM_MCP_SLV_STATUS, &val);
+
+ for (i = 0; i < SDW_MAX_DEVICES; i++) {
+ u32 s;
+
+ s = (val >> (i * 2));
+ s &= SWRM_MCP_SLV_STATUS_MASK;
+ ctrl->status[i] = s;
+ }
+}
+
+static irqreturn_t qcom_swrm_irq_handler(int irq, void *dev_id)
+{
+ struct qcom_swrm_ctrl *ctrl = dev_id;
+ u32 sts, value;
+ unsigned long flags;
+
+ ctrl->reg_read(ctrl, SWRM_INTERRUPT_STATUS, &sts);
+
+ if (sts & SWRM_INTERRUPT_STATUS_CMD_ERROR) {
+ ctrl->reg_read(ctrl, SWRM_CMD_FIFO_STATUS, &value);
+ dev_err_ratelimited(ctrl->dev,
+ "CMD error, fifo status 0x%x\n",
+ value);
+ ctrl->reg_write(ctrl, SWRM_CMD_FIFO_CMD, 0x1);
+ }
+
+ if ((sts & SWRM_INTERRUPT_STATUS_NEW_SLAVE_ATTACHED) ||
+ sts & SWRM_INTERRUPT_STATUS_CHANGE_ENUM_SLAVE_STATUS)
+ schedule_work(&ctrl->slave_work);
+
+ /**
+ * clear the interrupt before complete() is called, as complete can
+ * schedule new read/writes which require interrupts, clearing the
+ * interrupt would avoid missing interrupts in such cases.
+ */
+ ctrl->reg_write(ctrl, SWRM_INTERRUPT_CLEAR, sts);
+
+ if (sts & SWRM_INTERRUPT_STATUS_SPECIAL_CMD_ID_FINISHED) {
+ spin_lock_irqsave(&ctrl->comp_lock, flags);
+ if (ctrl->comp)
+ complete(ctrl->comp);
+ spin_unlock_irqrestore(&ctrl->comp_lock, flags);
+ }
+
+ return IRQ_HANDLED;
+}
+static int qcom_swrm_init(struct qcom_swrm_ctrl *ctrl)
+{
+ u32 val;
+
+ /* Clear Rows and Cols */
+ val = (SWRM_MAX_ROW_VAL << SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_SHFT |
+ SWRM_MIN_COL_VAL << SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_SHFT);
+
+ ctrl->reg_write(ctrl, SWRM_MCP_FRAME_CTRL_BANK_ADDR(0), val);
+
+ /* Disable Auto enumeration */
+ ctrl->reg_write(ctrl, SWRM_ENUMERATOR_CFG_ADDR, 0);
+
+ /* Mask soundwire interrupts */
+ ctrl->reg_write(ctrl, SWRM_INTERRUPT_MASK_ADDR,
+ SWRM_INTERRUPT_STATUS_RMSK);
+
+ /* Configure No pings */
+ ctrl->reg_read(ctrl, SWRM_MCP_CFG_ADDR, &val);
+ val &= ~SWRM_MCP_CFG_MAX_NUM_OF_CMD_NO_PINGS_BMSK;
+ val |= (SWRM_DEF_CMD_NO_PINGS <<
+ SWRM_MCP_CFG_MAX_NUM_OF_CMD_NO_PINGS_SHFT);
+ ctrl->reg_write(ctrl, SWRM_MCP_CFG_ADDR, val);
+
+ /* Configure number of retries of a read/write cmd */
+ ctrl->reg_write(ctrl, SWRM_CMD_FIFO_CFG_ADDR, SWRM_RD_WR_CMD_RETRIES);
+
+ /* Set IRQ to PULSE */
+ ctrl->reg_write(ctrl, SWRM_COMP_CFG_ADDR,
+ SWRM_COMP_CFG_IRQ_LEVEL_OR_PULSE_MSK |
+ SWRM_COMP_CFG_ENABLE_MSK);
+ return 0;
+}
+
+static enum sdw_command_response qcom_swrm_xfer_msg(struct sdw_bus *bus,
+ struct sdw_msg *msg)
+{
+ struct qcom_swrm_ctrl *ctrl = to_qcom_sdw(bus);
+ int ret, i, len;
+
+ if (msg->flags == SDW_MSG_FLAG_READ) {
+ for (i = 0; i < msg->len;) {
+ if ((msg->len - i) < QCOM_SWRM_MAX_RD_LEN)
+ len = msg->len - i;
+ else
+ len = QCOM_SWRM_MAX_RD_LEN;
+
+ ret = qcom_swrm_cmd_fifo_rd_cmd(ctrl, msg->dev_num,
+ msg->addr + i, len,
+ &msg->buf[i]);
+ if (ret)
+ return ret;
+
+ i = i + len;
+ }
+ } else if (msg->flags == SDW_MSG_FLAG_WRITE) {
+ for (i = 0; i < msg->len; i++) {
+ ret = qcom_swrm_cmd_fifo_wr_cmd(ctrl, msg->buf[i],
+ msg->dev_num,
+ msg->addr + i);
+ if (ret)
+ return SDW_CMD_IGNORED;
+ }
+ }
+
+ return SDW_CMD_OK;
+}
+
+static int qcom_swrm_pre_bank_switch(struct sdw_bus *bus)
+{
+ u32 reg = SWRM_MCP_FRAME_CTRL_BANK_ADDR(bus->params.next_bank);
+ struct qcom_swrm_ctrl *ctrl = to_qcom_sdw(bus);
+ u32 val;
+
+ ctrl->reg_read(ctrl, reg, &val);
+
+ val &= ~SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_BMSK;
+ val &= ~SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_BMSK;
+
+ val |= (SWRM_MAX_ROW_VAL << SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_SHFT |
+ SWRM_MAX_COL_VAL << SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_SHFT);
+
+ return ctrl->reg_write(ctrl, reg, val);
+}
+
+static int qcom_swrm_port_params(struct sdw_bus *bus,
+ struct sdw_port_params *p_params,
+ unsigned int bank)
+{
+ /* TBD */
+ return 0;
+}
+
+static int qcom_swrm_transport_params(struct sdw_bus *bus,
+ struct sdw_transport_params *params,
+ enum sdw_reg_bank bank)
+{
+ struct qcom_swrm_ctrl *ctrl = to_qcom_sdw(bus);
+ u32 value;
+
+ value = params->offset1 << SWRM_DP_PORT_CTRL_OFFSET1_SHFT;
+ value |= params->offset2 << SWRM_DP_PORT_CTRL_OFFSET2_SHFT;
+ value |= params->sample_interval - 1;
+
+ return ctrl->reg_write(ctrl,
+ SWRM_DP_PORT_CTRL_BANK((params->port_num), bank),
+ value);
+}
+
+static int qcom_swrm_port_enable(struct sdw_bus *bus,
+ struct sdw_enable_ch *enable_ch,
+ unsigned int bank)
+{
+ u32 reg = SWRM_DP_PORT_CTRL_BANK(enable_ch->port_num, bank);
+ struct qcom_swrm_ctrl *ctrl = to_qcom_sdw(bus);
+ u32 val;
+
+ ctrl->reg_read(ctrl, reg, &val);
+
+ if (enable_ch->enable)
+ val |= (enable_ch->ch_mask << SWRM_DP_PORT_CTRL_EN_CHAN_SHFT);
+ else
+ val &= ~(0xff << SWRM_DP_PORT_CTRL_EN_CHAN_SHFT);
+
+ return ctrl->reg_write(ctrl, reg, val);
+}
+
+static struct sdw_master_port_ops qcom_swrm_port_ops = {
+ .dpn_set_port_params = qcom_swrm_port_params,
+ .dpn_set_port_transport_params = qcom_swrm_transport_params,
+ .dpn_port_enable_ch = qcom_swrm_port_enable,
+};
+
+static struct sdw_master_ops qcom_swrm_ops = {
+ .xfer_msg = qcom_swrm_xfer_msg,
+ .pre_bank_switch = qcom_swrm_pre_bank_switch,
+};
+
+static int qcom_swrm_compute_params(struct sdw_bus *bus)
+{
+ struct qcom_swrm_ctrl *ctrl = to_qcom_sdw(bus);
+ struct sdw_master_runtime *m_rt;
+ struct sdw_slave_runtime *s_rt;
+ struct sdw_port_runtime *p_rt;
+ struct qcom_swrm_port_config *pcfg;
+ int i = 0;
+
+ list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
+ list_for_each_entry(p_rt, &m_rt->port_list, port_node) {
+ pcfg = &ctrl->pconfig[p_rt->num - 1];
+ p_rt->transport_params.port_num = p_rt->num;
+ p_rt->transport_params.sample_interval = pcfg->si + 1;
+ p_rt->transport_params.offset1 = pcfg->off1;
+ p_rt->transport_params.offset2 = pcfg->off2;
+ }
+
+ list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
+ list_for_each_entry(p_rt, &s_rt->port_list, port_node) {
+ pcfg = &ctrl->pconfig[i];
+ p_rt->transport_params.port_num = p_rt->num;
+ p_rt->transport_params.sample_interval =
+ pcfg->si + 1;
+ p_rt->transport_params.offset1 = pcfg->off1;
+ p_rt->transport_params.offset2 = pcfg->off2;
+ i++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static u32 qcom_swrm_freq_tbl[MAX_FREQ_NUM] = {
+ DEFAULT_CLK_FREQ,
+};
+
+static void qcom_swrm_slave_wq(struct work_struct *work)
+{
+ struct qcom_swrm_ctrl *ctrl =
+ container_of(work, struct qcom_swrm_ctrl, slave_work);
+
+ qcom_swrm_get_device_status(ctrl);
+ sdw_handle_slave_status(&ctrl->bus, ctrl->status);
+}
+
+
+static void qcom_swrm_stream_free_ports(struct qcom_swrm_ctrl *ctrl,
+ struct sdw_stream_runtime *stream)
+{
+ struct sdw_master_runtime *m_rt;
+ struct sdw_port_runtime *p_rt;
+ unsigned long *port_mask;
+
+ mutex_lock(&ctrl->port_lock);
+
+ list_for_each_entry(m_rt, &stream->master_list, stream_node) {
+ if (m_rt->direction == SDW_DATA_DIR_RX)
+ port_mask = &ctrl->dout_port_mask;
+ else
+ port_mask = &ctrl->din_port_mask;
+
+ list_for_each_entry(p_rt, &m_rt->port_list, port_node)
+ clear_bit(p_rt->num - 1, port_mask);
+ }
+
+ mutex_unlock(&ctrl->port_lock);
+}
+
+static int qcom_swrm_stream_alloc_ports(struct qcom_swrm_ctrl *ctrl,
+ struct sdw_stream_runtime *stream,
+ struct snd_pcm_hw_params *params,
+ int direction)
+{
+ struct sdw_port_config pconfig[QCOM_SDW_MAX_PORTS];
+ struct sdw_stream_config sconfig;
+ struct sdw_master_runtime *m_rt;
+ struct sdw_slave_runtime *s_rt;
+ struct sdw_port_runtime *p_rt;
+ unsigned long *port_mask;
+ int i, maxport, pn, nports = 0, ret = 0;
+
+ mutex_lock(&ctrl->port_lock);
+ list_for_each_entry(m_rt, &stream->master_list, stream_node) {
+ if (m_rt->direction == SDW_DATA_DIR_RX) {
+ maxport = ctrl->num_dout_ports;
+ port_mask = &ctrl->dout_port_mask;
+ } else {
+ maxport = ctrl->num_din_ports;
+ port_mask = &ctrl->din_port_mask;
+ }
+
+ list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
+ list_for_each_entry(p_rt, &s_rt->port_list, port_node) {
+ /* Port numbers start from 1 - 14*/
+ pn = find_first_zero_bit(port_mask, maxport);
+ if (pn > (maxport - 1)) {
+ dev_err(ctrl->dev, "All ports busy\n");
+ ret = -EBUSY;
+ goto err;
+ }
+ set_bit(pn, port_mask);
+ pconfig[nports].num = pn + 1;
+ pconfig[nports].ch_mask = p_rt->ch_mask;
+ nports++;
+ }
+ }
+ }
+
+ if (direction == SNDRV_PCM_STREAM_CAPTURE)
+ sconfig.direction = SDW_DATA_DIR_TX;
+ else
+ sconfig.direction = SDW_DATA_DIR_RX;
+
+ /* hw parameters wil be ignored as we only support PDM */
+ sconfig.ch_count = 1;
+ sconfig.frame_rate = params_rate(params);
+ sconfig.type = stream->type;
+ sconfig.bps = 1;
+ sdw_stream_add_master(&ctrl->bus, &sconfig, pconfig,
+ nports, stream);
+err:
+ if (ret) {
+ for (i = 0; i < nports; i++)
+ clear_bit(pconfig[i].num - 1, port_mask);
+ }
+
+ mutex_unlock(&ctrl->port_lock);
+
+ return ret;
+}
+
+static int qcom_swrm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct qcom_swrm_ctrl *ctrl = dev_get_drvdata(dai->dev);
+ struct sdw_stream_runtime *sruntime = ctrl->sruntime[dai->id];
+ int ret;
+
+ ret = qcom_swrm_stream_alloc_ports(ctrl, sruntime, params,
+ substream->stream);
+ if (ret)
+ qcom_swrm_stream_free_ports(ctrl, sruntime);
+
+ return ret;
+}
+
+static int qcom_swrm_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct qcom_swrm_ctrl *ctrl = dev_get_drvdata(dai->dev);
+ struct sdw_stream_runtime *sruntime = ctrl->sruntime[dai->id];
+
+ qcom_swrm_stream_free_ports(ctrl, sruntime);
+ sdw_stream_remove_master(&ctrl->bus, sruntime);
+
+ return 0;
+}
+
+static int qcom_swrm_set_sdw_stream(struct snd_soc_dai *dai,
+ void *stream, int direction)
+{
+ struct qcom_swrm_ctrl *ctrl = dev_get_drvdata(dai->dev);
+
+ ctrl->sruntime[dai->id] = stream;
+
+ return 0;
+}
+
+static int qcom_swrm_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct qcom_swrm_ctrl *ctrl = dev_get_drvdata(dai->dev);
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct sdw_stream_runtime *sruntime;
+ int ret, i;
+
+ sruntime = sdw_alloc_stream(dai->name);
+ if (!sruntime)
+ return -ENOMEM;
+
+ ctrl->sruntime[dai->id] = sruntime;
+
+ for (i = 0; i < rtd->num_codecs; i++) {
+ ret = snd_soc_dai_set_sdw_stream(rtd->codec_dais[i], sruntime,
+ substream->stream);
+ if (ret < 0 && ret != -ENOTSUPP) {
+ dev_err(dai->dev, "Failed to set sdw stream on %s",
+ rtd->codec_dais[i]->name);
+ sdw_release_stream(sruntime);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void qcom_swrm_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct qcom_swrm_ctrl *ctrl = dev_get_drvdata(dai->dev);
+
+ sdw_release_stream(ctrl->sruntime[dai->id]);
+ ctrl->sruntime[dai->id] = NULL;
+}
+
+static const struct snd_soc_dai_ops qcom_swrm_pdm_dai_ops = {
+ .hw_params = qcom_swrm_hw_params,
+ .hw_free = qcom_swrm_hw_free,
+ .startup = qcom_swrm_startup,
+ .shutdown = qcom_swrm_shutdown,
+ .set_sdw_stream = qcom_swrm_set_sdw_stream,
+};
+
+static const struct snd_soc_component_driver qcom_swrm_dai_component = {
+ .name = "soundwire",
+};
+
+static int qcom_swrm_register_dais(struct qcom_swrm_ctrl *ctrl)
+{
+ int num_dais = ctrl->num_dout_ports + ctrl->num_din_ports;
+ struct snd_soc_dai_driver *dais;
+ struct snd_soc_pcm_stream *stream;
+ struct device *dev = ctrl->dev;
+ int i;
+
+ /* PDM dais are only tested for now */
+ dais = devm_kcalloc(dev, num_dais, sizeof(*dais), GFP_KERNEL);
+ if (!dais)
+ return -ENOMEM;
+
+ for (i = 0; i < num_dais; i++) {
+ dais[i].name = devm_kasprintf(dev, GFP_KERNEL, "SDW Pin%d", i);
+ if (!dais[i].name)
+ return -ENOMEM;
+
+ if (i < ctrl->num_dout_ports)
+ stream = &dais[i].playback;
+ else
+ stream = &dais[i].capture;
+
+ stream->channels_min = 1;
+ stream->channels_max = 1;
+ stream->rates = SNDRV_PCM_RATE_48000;
+ stream->formats = SNDRV_PCM_FMTBIT_S16_LE;
+
+ dais[i].ops = &qcom_swrm_pdm_dai_ops;
+ dais[i].id = i;
+ }
+
+ return devm_snd_soc_register_component(ctrl->dev,
+ &qcom_swrm_dai_component,
+ dais, num_dais);
+}
+
+static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl)
+{
+ struct device_node *np = ctrl->dev->of_node;
+ u8 off1[QCOM_SDW_MAX_PORTS];
+ u8 off2[QCOM_SDW_MAX_PORTS];
+ u8 si[QCOM_SDW_MAX_PORTS];
+ int i, ret, nports, val;
+
+ ctrl->reg_read(ctrl, SWRM_COMP_PARAMS, &val);
+
+ ctrl->num_dout_ports = val & SWRM_COMP_PARAMS_DOUT_PORTS_MASK;
+ ctrl->num_din_ports = (val & SWRM_COMP_PARAMS_DIN_PORTS_MASK) >> 5;
+
+ ret = of_property_read_u32(np, "qcom,din-ports", &val);
+ if (ret)
+ return ret;
+
+ if (val > ctrl->num_din_ports)
+ return -EINVAL;
+
+ ctrl->num_din_ports = val;
+
+ ret = of_property_read_u32(np, "qcom,dout-ports", &val);
+ if (ret)
+ return ret;
+
+ if (val > ctrl->num_dout_ports)
+ return -EINVAL;
+
+ ctrl->num_dout_ports = val;
+
+ nports = ctrl->num_dout_ports + ctrl->num_din_ports;
+
+ ret = of_property_read_u8_array(np, "qcom,ports-offset1",
+ off1, nports);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u8_array(np, "qcom,ports-offset2",
+ off2, nports);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u8_array(np, "qcom,ports-sinterval-low",
+ si, nports);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nports; i++) {
+ ctrl->pconfig[i].si = si[i];
+ ctrl->pconfig[i].off1 = off1[i];
+ ctrl->pconfig[i].off2 = off2[i];
+ }
+
+ return 0;
+}
+
+static int qcom_swrm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sdw_master_prop *prop;
+ struct sdw_bus_params *params;
+ struct qcom_swrm_ctrl *ctrl;
+ int ret;
+ u32 val;
+
+ ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ if (dev->parent->bus == &slimbus_bus) {
+ ctrl->reg_read = qcom_swrm_abh_reg_read;
+ ctrl->reg_write = qcom_swrm_ahb_reg_write;
+ ctrl->regmap = dev_get_regmap(dev->parent, NULL);
+ if (!ctrl->regmap)
+ return -EINVAL;
+ } else {
+ /* Only WCD based SoundWire controller is supported */
+ return -ENOTSUPP;
+ }
+
+ ctrl->irq = of_irq_get(dev->of_node, 0);
+ if (ctrl->irq < 0)
+ return ctrl->irq;
+
+ ctrl->hclk = devm_clk_get(dev, "iface");
+ if (IS_ERR(ctrl->hclk))
+ return PTR_ERR(ctrl->hclk);
+
+ clk_prepare_enable(ctrl->hclk);
+
+ ctrl->dev = dev;
+ dev_set_drvdata(&pdev->dev, ctrl);
+ spin_lock_init(&ctrl->comp_lock);
+ mutex_init(&ctrl->port_lock);
+ INIT_WORK(&ctrl->slave_work, qcom_swrm_slave_wq);
+
+ ctrl->bus.dev = dev;
+ ctrl->bus.ops = &qcom_swrm_ops;
+ ctrl->bus.port_ops = &qcom_swrm_port_ops;
+ ctrl->bus.compute_params = &qcom_swrm_compute_params;
+
+ ret = qcom_swrm_get_port_config(ctrl);
+ if (ret)
+ return ret;
+
+ params = &ctrl->bus.params;
+ params->max_dr_freq = DEFAULT_CLK_FREQ;
+ params->curr_dr_freq = DEFAULT_CLK_FREQ;
+ params->col = SWRM_DEFAULT_COL;
+ params->row = SWRM_DEFAULT_ROWS;
+ ctrl->reg_read(ctrl, SWRM_MCP_STATUS, &val);
+ params->curr_bank = val & SWRM_MCP_STATUS_BANK_NUM_MASK;
+ params->next_bank = !params->curr_bank;
+
+ prop = &ctrl->bus.prop;
+ prop->max_clk_freq = DEFAULT_CLK_FREQ;
+ prop->num_clk_gears = 0;
+ prop->num_clk_freq = MAX_FREQ_NUM;
+ prop->clk_freq = &qcom_swrm_freq_tbl[0];
+ prop->default_col = SWRM_DEFAULT_COL;
+ prop->default_row = SWRM_DEFAULT_ROWS;
+
+ ctrl->reg_read(ctrl, SWRM_COMP_HW_VERSION, &ctrl->version);
+
+ ret = devm_request_threaded_irq(dev, ctrl->irq, NULL,
+ qcom_swrm_irq_handler,
+ IRQF_TRIGGER_RISING,
+ "soundwire", ctrl);
+ if (ret) {
+ dev_err(dev, "Failed to request soundwire irq\n");
+ goto err;
+ }
+
+ ret = sdw_add_bus_master(&ctrl->bus);
+ if (ret) {
+ dev_err(dev, "Failed to register Soundwire controller (%d)\n",
+ ret);
+ goto err;
+ }
+
+ qcom_swrm_init(ctrl);
+ ret = qcom_swrm_register_dais(ctrl);
+ if (ret)
+ goto err;
+
+ dev_info(dev, "Qualcomm Soundwire controller v%x.%x.%x Registered\n",
+ (ctrl->version >> 24) & 0xff, (ctrl->version >> 16) & 0xff,
+ ctrl->version & 0xffff);
+
+ return 0;
+err:
+ clk_disable_unprepare(ctrl->hclk);
+ return ret;
+}
+
+static int qcom_swrm_remove(struct platform_device *pdev)
+{
+ struct qcom_swrm_ctrl *ctrl = dev_get_drvdata(&pdev->dev);
+
+ sdw_delete_bus_master(&ctrl->bus);
+ clk_disable_unprepare(ctrl->hclk);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_swrm_of_match[] = {
+ { .compatible = "qcom,soundwire-v1.3.0", },
+ {/* sentinel */},
+};
+
+MODULE_DEVICE_TABLE(of, qcom_swrm_of_match);
+
+static struct platform_driver qcom_swrm_driver = {
+ .probe = &qcom_swrm_probe,
+ .remove = &qcom_swrm_remove,
+ .driver = {
+ .name = "qcom-soundwire",
+ .of_match_table = qcom_swrm_of_match,
+ }
+};
+module_platform_driver(qcom_swrm_driver);
+
+MODULE_DESCRIPTION("Qualcomm soundwire driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index e69f94a8c3a8..178ae92b8cc1 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -1554,8 +1554,6 @@ int sdw_prepare_stream(struct sdw_stream_runtime *stream)
sdw_acquire_bus_lock(stream);
ret = _sdw_prepare_stream(stream);
- if (ret < 0)
- pr_err("Prepare for stream:%s failed: %d\n", stream->name, ret);
sdw_release_bus_lock(stream);
return ret;
@@ -1622,8 +1620,6 @@ int sdw_enable_stream(struct sdw_stream_runtime *stream)
sdw_acquire_bus_lock(stream);
ret = _sdw_enable_stream(stream);
- if (ret < 0)
- pr_err("Enable for stream:%s failed: %d\n", stream->name, ret);
sdw_release_bus_lock(stream);
return ret;
@@ -1698,8 +1694,6 @@ int sdw_disable_stream(struct sdw_stream_runtime *stream)
sdw_acquire_bus_lock(stream);
ret = _sdw_disable_stream(stream);
- if (ret < 0)
- pr_err("Disable for stream:%s failed: %d\n", stream->name, ret);
sdw_release_bus_lock(stream);
return ret;
@@ -1756,8 +1750,6 @@ int sdw_deprepare_stream(struct sdw_stream_runtime *stream)
sdw_acquire_bus_lock(stream);
ret = _sdw_deprepare_stream(stream);
- if (ret < 0)
- pr_err("De-prepare for stream:%d failed: %d\n", ret, ret);
sdw_release_bus_lock(stream);
return ret;
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 870f7797b56b..d6ed0c355954 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -281,6 +281,15 @@ config SPI_FSL_QUADSPI
This controller does not support generic SPI messages. It only
supports the high-level SPI memory interface.
+config SPI_HISI_SFC_V3XX
+ tristate "HiSilicon SPI-NOR Flash Controller for Hi16XX chipsets"
+ depends on (ARM64 && ACPI) || COMPILE_TEST
+ depends on HAS_IOMEM
+ select CONFIG_MTD_SPI_NOR
+ help
+ This enables support for HiSilicon v3xx SPI-NOR flash controller
+ found in hi16xx chipsets.
+
config SPI_NXP_FLEXSPI
tristate "NXP Flex SPI controller"
depends on ARCH_LAYERSCAPE || HAS_IOMEM
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index bb49c9e6d0a0..9b65ec5afc5e 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_SPI_FSL_LPSPI) += spi-fsl-lpspi.o
obj-$(CONFIG_SPI_FSL_QUADSPI) += spi-fsl-qspi.o
obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o
obj-$(CONFIG_SPI_GPIO) += spi-gpio.o
+obj-$(CONFIG_SPI_HISI_SFC_V3XX) += spi-hisi-sfc-v3xx.o
obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o
obj-$(CONFIG_SPI_IMX) += spi-imx.o
obj-$(CONFIG_SPI_LANTIQ_SSC) += spi-lantiq-ssc.o
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 56f0ca361deb..013458cabe3c 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -514,26 +514,19 @@ static int atmel_spi_configure_dma(struct spi_master *master,
master->dma_tx = dma_request_chan(dev, "tx");
if (IS_ERR(master->dma_tx)) {
err = PTR_ERR(master->dma_tx);
- if (err == -EPROBE_DEFER) {
- dev_warn(dev, "no DMA channel available at the moment\n");
- goto error_clear;
- }
- dev_err(dev,
- "DMA TX channel not available, SPI unable to use DMA\n");
- err = -EBUSY;
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "No TX DMA channel, DMA is disabled\n");
goto error_clear;
}
- /*
- * No reason to check EPROBE_DEFER here since we have already requested
- * tx channel. If it fails here, it's for another reason.
- */
- master->dma_rx = dma_request_slave_channel(dev, "rx");
-
- if (!master->dma_rx) {
- dev_err(dev,
- "DMA RX channel not available, SPI unable to use DMA\n");
- err = -EBUSY;
+ master->dma_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(master->dma_rx)) {
+ err = PTR_ERR(master->dma_rx);
+ /*
+ * No reason to check EPROBE_DEFER here since we have already
+ * requested tx channel.
+ */
+ dev_err(dev, "No RX DMA channel, DMA is disabled\n");
goto error;
}
@@ -548,7 +541,7 @@ static int atmel_spi_configure_dma(struct spi_master *master,
return 0;
error:
- if (master->dma_rx)
+ if (!IS_ERR(master->dma_rx))
dma_release_channel(master->dma_rx);
if (!IS_ERR(master->dma_tx))
dma_release_channel(master->dma_tx);
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index 85bad70f59e3..23d295f36c80 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -1293,7 +1293,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
name = qspi_irq_tab[val].irq_name;
if (qspi_irq_tab[val].irq_source == SINGLE_L2) {
/* get the l2 interrupts */
- irq = platform_get_irq_byname(pdev, name);
+ irq = platform_get_irq_byname_optional(pdev, name);
} else if (!num_ints && soc_intc) {
/* all mspi, bspi intrs muxed to one L1 intr */
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index fb61a620effc..11c235879bb7 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -68,7 +68,7 @@
#define BCM2835_SPI_FIFO_SIZE 64
#define BCM2835_SPI_FIFO_SIZE_3_4 48
#define BCM2835_SPI_DMA_MIN_LENGTH 96
-#define BCM2835_SPI_NUM_CS 3 /* raise as necessary */
+#define BCM2835_SPI_NUM_CS 4 /* raise as necessary */
#define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
| SPI_NO_CS | SPI_3WIRE)
@@ -888,8 +888,8 @@ static void bcm2835_dma_release(struct spi_controller *ctlr,
}
}
-static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
- struct bcm2835_spi *bs)
+static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
+ struct bcm2835_spi *bs)
{
struct dma_slave_config slave_config;
const __be32 *addr;
@@ -900,19 +900,24 @@ static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
addr = of_get_address(ctlr->dev.of_node, 0, NULL, NULL);
if (!addr) {
dev_err(dev, "could not get DMA-register address - not using dma mode\n");
- goto err;
+ /* Fall back to interrupt mode */
+ return 0;
}
dma_reg_base = be32_to_cpup(addr);
/* get tx/rx dma */
- ctlr->dma_tx = dma_request_slave_channel(dev, "tx");
- if (!ctlr->dma_tx) {
+ ctlr->dma_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(ctlr->dma_tx)) {
dev_err(dev, "no tx-dma configuration found - not using dma mode\n");
+ ret = PTR_ERR(ctlr->dma_tx);
+ ctlr->dma_tx = NULL;
goto err;
}
- ctlr->dma_rx = dma_request_slave_channel(dev, "rx");
- if (!ctlr->dma_rx) {
+ ctlr->dma_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(ctlr->dma_rx)) {
dev_err(dev, "no rx-dma configuration found - not using dma mode\n");
+ ret = PTR_ERR(ctlr->dma_rx);
+ ctlr->dma_rx = NULL;
goto err_release;
}
@@ -997,7 +1002,7 @@ static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
/* all went well, so set can_dma */
ctlr->can_dma = bcm2835_spi_can_dma;
- return;
+ return 0;
err_config:
dev_err(dev, "issue configuring dma: %d - not using DMA mode\n",
@@ -1005,7 +1010,14 @@ err_config:
err_release:
bcm2835_dma_release(ctlr, bs);
err:
- return;
+ /*
+ * Only report error for deferred probing, otherwise fall back to
+ * interrupt mode
+ */
+ if (ret != -EPROBE_DEFER)
+ ret = 0;
+
+ return ret;
}
static int bcm2835_spi_transfer_one_poll(struct spi_controller *ctlr,
@@ -1305,7 +1317,10 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
bs->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(bs->clk)) {
err = PTR_ERR(bs->clk);
- dev_err(&pdev->dev, "could not get clk: %d\n", err);
+ if (err == -EPROBE_DEFER)
+ dev_dbg(&pdev->dev, "could not get clk: %d\n", err);
+ else
+ dev_err(&pdev->dev, "could not get clk: %d\n", err);
goto out_controller_put;
}
@@ -1317,7 +1332,9 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
clk_prepare_enable(bs->clk);
- bcm2835_dma_init(ctlr, &pdev->dev, bs);
+ err = bcm2835_dma_init(ctlr, &pdev->dev, bs);
+ if (err)
+ goto out_clk_disable;
/* initialise the hardware with the default polarities */
bcm2835_wr(bs, BCM2835_SPI_CS,
@@ -1327,20 +1344,22 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
dev_name(&pdev->dev), ctlr);
if (err) {
dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
- goto out_clk_disable;
+ goto out_dma_release;
}
err = devm_spi_register_controller(&pdev->dev, ctlr);
if (err) {
dev_err(&pdev->dev, "could not register SPI controller: %d\n",
err);
- goto out_clk_disable;
+ goto out_dma_release;
}
bcm2835_debugfs_create(bs, dev_name(&pdev->dev));
return 0;
+out_dma_release:
+ bcm2835_dma_release(ctlr, bs);
out_clk_disable:
clk_disable_unprepare(bs->clk);
out_controller_put:
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index d84e22dd6f9f..68491a8bf7b5 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -329,8 +329,20 @@ static void spi_bitbang_set_cs(struct spi_device *spi, bool enable)
int spi_bitbang_init(struct spi_bitbang *bitbang)
{
struct spi_master *master = bitbang->master;
+ bool custom_cs;
- if (!master || !bitbang->chipselect)
+ if (!master)
+ return -EINVAL;
+ /*
+ * We only need the chipselect callback if we are actually using it.
+ * If we just use GPIO descriptors, it is surplus. If the
+ * SPI_MASTER_GPIO_SS flag is set, we always need to call the
+ * driver-specific chipselect routine.
+ */
+ custom_cs = (!master->use_gpio_descriptors ||
+ (master->flags & SPI_MASTER_GPIO_SS));
+
+ if (custom_cs && !bitbang->chipselect)
return -EINVAL;
mutex_init(&bitbang->lock);
@@ -344,7 +356,12 @@ int spi_bitbang_init(struct spi_bitbang *bitbang)
master->prepare_transfer_hardware = spi_bitbang_prepare_hardware;
master->unprepare_transfer_hardware = spi_bitbang_unprepare_hardware;
master->transfer_one = spi_bitbang_transfer_one;
- master->set_cs = spi_bitbang_set_cs;
+ /*
+ * When using GPIO descriptors, the ->set_cs() callback doesn't even
+ * get called unless SPI_MASTER_GPIO_SS is set.
+ */
+ if (custom_cs)
+ master->set_cs = spi_bitbang_set_cs;
if (!bitbang->txrx_bufs) {
bitbang->use_dma = 0;
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
index c36587b42e95..82a0ee09cbe1 100644
--- a/drivers/spi/spi-cadence.c
+++ b/drivers/spi/spi-cadence.c
@@ -168,16 +168,16 @@ static void cdns_spi_init_hw(struct cdns_spi *xspi)
/**
* cdns_spi_chipselect - Select or deselect the chip select line
* @spi: Pointer to the spi_device structure
- * @enable: Select (1) or deselect (0) the chip select line
+ * @is_high: Select(0) or deselect (1) the chip select line
*/
-static void cdns_spi_chipselect(struct spi_device *spi, bool enable)
+static void cdns_spi_chipselect(struct spi_device *spi, bool is_high)
{
struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
u32 ctrl_reg;
ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR);
- if (!enable) {
+ if (is_high) {
/* Deselect the slave */
ctrl_reg |= CDNS_SPI_CR_SSCTRL;
} else {
diff --git a/drivers/spi/spi-cavium-thunderx.c b/drivers/spi/spi-cavium-thunderx.c
index d12e149f1a41..fd6b9caffaf0 100644
--- a/drivers/spi/spi-cavium-thunderx.c
+++ b/drivers/spi/spi-cavium-thunderx.c
@@ -82,6 +82,7 @@ static int thunderx_spi_probe(struct pci_dev *pdev,
error:
clk_disable_unprepare(p->clk);
+ pci_release_regions(pdev);
spi_master_put(master);
return ret;
}
@@ -96,6 +97,7 @@ static void thunderx_spi_remove(struct pci_dev *pdev)
return;
clk_disable_unprepare(p->clk);
+ pci_release_regions(pdev);
/* Put everything in a known state. */
writeq(0, p->register_base + OCTEON_SPI_CFG(p));
}
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index 2663bb12d9ce..0d86c37e0aeb 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -301,7 +301,7 @@ int dw_spi_mid_init(struct dw_spi *dws)
void __iomem *clk_reg;
u32 clk_cdiv;
- clk_reg = ioremap_nocache(MRST_CLK_SPI_REG, 16);
+ clk_reg = ioremap(MRST_CLK_SPI_REG, 16);
if (!clk_reg)
return -ENOMEM;
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index a92aa5cd4fbe..31e3f866d11a 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -129,10 +129,11 @@ void dw_spi_set_cs(struct spi_device *spi, bool enable)
struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
struct chip_data *chip = spi_get_ctldata(spi);
+ /* Chip select logic is inverted from spi_set_cs() */
if (chip && chip->cs_control)
- chip->cs_control(enable);
+ chip->cs_control(!enable);
- if (enable)
+ if (!enable)
dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select));
else if (dws->cs_override)
dw_writel(dws, DW_SPI_SER, 0);
@@ -171,9 +172,11 @@ static inline u32 rx_max(struct dw_spi *dws)
static void dw_writer(struct dw_spi *dws)
{
- u32 max = tx_max(dws);
+ u32 max;
u16 txw = 0;
+ spin_lock(&dws->buf_lock);
+ max = tx_max(dws);
while (max--) {
/* Set the tx word if the transfer's original "tx" is not null */
if (dws->tx_end - dws->len) {
@@ -185,13 +188,16 @@ static void dw_writer(struct dw_spi *dws)
dw_write_io_reg(dws, DW_SPI_DR, txw);
dws->tx += dws->n_bytes;
}
+ spin_unlock(&dws->buf_lock);
}
static void dw_reader(struct dw_spi *dws)
{
- u32 max = rx_max(dws);
+ u32 max;
u16 rxw;
+ spin_lock(&dws->buf_lock);
+ max = rx_max(dws);
while (max--) {
rxw = dw_read_io_reg(dws, DW_SPI_DR);
/* Care rx only if the transfer's original "rx" is not null */
@@ -203,6 +209,7 @@ static void dw_reader(struct dw_spi *dws)
}
dws->rx += dws->n_bytes;
}
+ spin_unlock(&dws->buf_lock);
}
static void int_error_stop(struct dw_spi *dws, const char *msg)
@@ -275,18 +282,23 @@ static int dw_spi_transfer_one(struct spi_controller *master,
{
struct dw_spi *dws = spi_controller_get_devdata(master);
struct chip_data *chip = spi_get_ctldata(spi);
+ unsigned long flags;
u8 imask = 0;
u16 txlevel = 0;
u32 cr0;
int ret;
dws->dma_mapped = 0;
-
+ spin_lock_irqsave(&dws->buf_lock, flags);
dws->tx = (void *)transfer->tx_buf;
dws->tx_end = dws->tx + transfer->len;
dws->rx = transfer->rx_buf;
dws->rx_end = dws->rx + transfer->len;
dws->len = transfer->len;
+ spin_unlock_irqrestore(&dws->buf_lock, flags);
+
+ /* Ensure dw->rx and dw->rx_end are visible */
+ smp_mb();
spi_enable_chip(dws, 0);
@@ -460,7 +472,8 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
struct spi_controller *master;
int ret;
- BUG_ON(dws == NULL);
+ if (!dws)
+ return -EINVAL;
master = spi_alloc_master(dev, 0);
if (!master)
@@ -470,6 +483,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
dws->type = SSI_MOTO_SPI;
dws->dma_inited = 0;
dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
+ spin_lock_init(&dws->buf_lock);
spi_controller_set_devdata(master, dws);
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
index 38c7de1f0aa9..1bf5713e047d 100644
--- a/drivers/spi/spi-dw.h
+++ b/drivers/spi/spi-dw.h
@@ -119,6 +119,7 @@ struct dw_spi {
size_t len;
void *tx;
void *tx_end;
+ spinlock_t buf_lock;
void *rx;
void *rx_end;
int dma_mapped;
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 442cff71a0d2..6ec2dcb8c57a 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -185,6 +185,7 @@ struct fsl_dspi {
struct spi_transfer *cur_transfer;
struct spi_message *cur_msg;
struct chip_data *cur_chip;
+ size_t progress;
size_t len;
const void *tx;
void *rx;
@@ -395,17 +396,17 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
if (!dma)
return -ENOMEM;
- dma->chan_rx = dma_request_slave_channel(dev, "rx");
- if (!dma->chan_rx) {
+ dma->chan_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(dma->chan_rx)) {
dev_err(dev, "rx dma channel not available\n");
- ret = -ENODEV;
+ ret = PTR_ERR(dma->chan_rx);
return ret;
}
- dma->chan_tx = dma_request_slave_channel(dev, "tx");
- if (!dma->chan_tx) {
+ dma->chan_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(dma->chan_tx)) {
dev_err(dev, "tx dma channel not available\n");
- ret = -ENODEV;
+ ret = PTR_ERR(dma->chan_tx);
goto err_tx_channel;
}
@@ -586,21 +587,14 @@ static void dspi_tcfq_write(struct fsl_dspi *dspi)
dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT;
if (dspi->devtype_data->xspi_mode && dspi->bits_per_word > 16) {
- /* Write two TX FIFO entries first, and then the corresponding
- * CMD FIFO entry.
+ /* Write the CMD FIFO entry first, and then the two
+ * corresponding TX FIFO entries.
*/
u32 data = dspi_pop_tx(dspi);
- if (dspi->cur_chip->ctar_val & SPI_CTAR_LSBFE) {
- /* LSB */
- tx_fifo_write(dspi, data & 0xFFFF);
- tx_fifo_write(dspi, data >> 16);
- } else {
- /* MSB */
- tx_fifo_write(dspi, data >> 16);
- tx_fifo_write(dspi, data & 0xFFFF);
- }
cmd_fifo_write(dspi);
+ tx_fifo_write(dspi, data & 0xFFFF);
+ tx_fifo_write(dspi, data >> 16);
} else {
/* Write one entry to both TX FIFO and CMD FIFO
* simultaneously.
@@ -658,7 +652,7 @@ static int dspi_rxtx(struct fsl_dspi *dspi)
u32 spi_tcr;
spi_take_timestamp_post(dspi->ctlr, dspi->cur_transfer,
- dspi->tx - dspi->bytes_per_word, !dspi->irq);
+ dspi->progress, !dspi->irq);
/* Get transfer counter (in number of SPI transfers). It was
* reset to 0 when transfer(s) were started.
@@ -667,6 +661,7 @@ static int dspi_rxtx(struct fsl_dspi *dspi)
spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr);
/* Update total number of bytes that were transferred */
msg->actual_length += spi_tcnt * dspi->bytes_per_word;
+ dspi->progress += spi_tcnt;
trans_mode = dspi->devtype_data->trans_mode;
if (trans_mode == DSPI_EOQ_MODE)
@@ -679,7 +674,7 @@ static int dspi_rxtx(struct fsl_dspi *dspi)
return 0;
spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer,
- dspi->tx, !dspi->irq);
+ dspi->progress, !dspi->irq);
if (trans_mode == DSPI_EOQ_MODE)
dspi_eoq_write(dspi);
@@ -768,6 +763,7 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
dspi->rx = transfer->rx_buf;
dspi->rx_end = dspi->rx + transfer->len;
dspi->len = transfer->len;
+ dspi->progress = 0;
/* Validated transfer specific frame size (defaults applied) */
dspi->bits_per_word = transfer->bits_per_word;
if (transfer->bits_per_word <= 8)
@@ -789,7 +785,7 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
SPI_CTARE_DTCP(1));
spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer,
- dspi->tx, !dspi->irq);
+ dspi->progress, !dspi->irq);
trans_mode = dspi->devtype_data->trans_mode;
switch (trans_mode) {
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 2cc0ddb4a988..d0b8cc741a24 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -469,9 +469,9 @@ static int fsl_lpspi_setup_transfer(struct spi_controller *controller,
fsl_lpspi->watermark = fsl_lpspi->txfifosize;
if (fsl_lpspi_can_dma(controller, spi, t))
- fsl_lpspi->usedma = 1;
+ fsl_lpspi->usedma = true;
else
- fsl_lpspi->usedma = 0;
+ fsl_lpspi->usedma = false;
return fsl_lpspi_config(fsl_lpspi);
}
@@ -862,6 +862,22 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
fsl_lpspi->dev = &pdev->dev;
fsl_lpspi->is_slave = is_slave;
+ controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
+ controller->transfer_one = fsl_lpspi_transfer_one;
+ controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
+ controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
+ controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
+ controller->dev.of_node = pdev->dev.of_node;
+ controller->bus_num = pdev->id;
+ controller->slave_abort = fsl_lpspi_slave_abort;
+
+ ret = devm_spi_register_controller(&pdev->dev, controller);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "spi_register_controller error.\n");
+ goto out_controller_put;
+ }
+
if (!fsl_lpspi->is_slave) {
for (i = 0; i < controller->num_chipselect; i++) {
int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
@@ -885,16 +901,6 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
controller->prepare_message = fsl_lpspi_prepare_message;
}
- controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
- controller->transfer_one = fsl_lpspi_transfer_one;
- controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
- controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
- controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
- controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
- controller->dev.of_node = pdev->dev.of_node;
- controller->bus_num = pdev->id;
- controller->slave_abort = fsl_lpspi_slave_abort;
-
init_completion(&fsl_lpspi->xfer_done);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -952,12 +958,6 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
if (ret < 0)
dev_err(&pdev->dev, "dma setup error %d, use pio\n", ret);
- ret = devm_spi_register_controller(&pdev->dev, controller);
- if (ret < 0) {
- dev_err(&pdev->dev, "spi_register_controller error.\n");
- goto out_controller_put;
- }
-
return 0;
out_controller_put:
diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
index 79b1558b74b8..e8a499cd1f13 100644
--- a/drivers/spi/spi-fsl-qspi.c
+++ b/drivers/spi/spi-fsl-qspi.c
@@ -410,7 +410,7 @@ static bool fsl_qspi_supports_op(struct spi_mem *mem,
op->data.nbytes > q->devtype_data->txfifo)
return false;
- return true;
+ return spi_mem_default_supports_op(mem, op);
}
static void fsl_qspi_prepare_lut(struct fsl_qspi *q,
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 114801a32371..3b81772fea0d 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -611,6 +611,7 @@ static struct spi_master * fsl_spi_probe(struct device *dev,
master->setup = fsl_spi_setup;
master->cleanup = fsl_spi_cleanup;
master->transfer_one_message = fsl_spi_do_one_msg;
+ master->use_gpio_descriptors = true;
mpc8xxx_spi = spi_master_get_devdata(master);
mpc8xxx_spi->max_bits_per_word = 32;
@@ -705,8 +706,8 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
struct device_node *np = ofdev->dev.of_node;
struct spi_master *master;
struct resource mem;
- int irq = 0, type;
- int ret = -ENOMEM;
+ int irq, type;
+ int ret;
ret = of_mpc8xxx_spi_probe(ofdev);
if (ret)
@@ -721,37 +722,35 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
if (spisel_boot) {
pinfo->immr_spi_cs = ioremap(get_immrbase() + IMMR_SPI_CS_OFFSET, 4);
- if (!pinfo->immr_spi_cs) {
- ret = -ENOMEM;
- goto err;
- }
+ if (!pinfo->immr_spi_cs)
+ return -ENOMEM;
}
#endif
-
- pdata->cs_control = fsl_spi_cs_control;
+ /*
+ * Handle the case where we have one hardwired (always selected)
+ * device on the first "chipselect". Else we let the core code
+ * handle any GPIOs or native chip selects and assign the
+ * appropriate callback for dealing with the CS lines. This isn't
+ * supported on the GRLIB variant.
+ */
+ ret = gpiod_count(dev, "cs");
+ if (ret <= 0)
+ pdata->max_chipselect = 1;
+ else
+ pdata->cs_control = fsl_spi_cs_control;
}
ret = of_address_to_resource(np, 0, &mem);
if (ret)
- goto err;
+ return ret;
- irq = irq_of_parse_and_map(np, 0);
- if (!irq) {
- ret = -EINVAL;
- goto err;
- }
+ irq = platform_get_irq(ofdev, 0);
+ if (irq < 0)
+ return irq;
master = fsl_spi_probe(dev, &mem, irq);
- if (IS_ERR(master)) {
- ret = PTR_ERR(master);
- goto err;
- }
-
- return 0;
-err:
- irq_dispose_mapping(irq);
- return ret;
+ return PTR_ERR_OR_ZERO(master);
}
static int of_fsl_spi_remove(struct platform_device *ofdev)
diff --git a/drivers/spi/spi-hisi-sfc-v3xx.c b/drivers/spi/spi-hisi-sfc-v3xx.c
new file mode 100644
index 000000000000..4cf8fc80a7b7
--- /dev/null
+++ b/drivers/spi/spi-hisi-sfc-v3xx.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// HiSilicon SPI NOR V3XX Flash Controller Driver for hi16xx chipsets
+//
+// Copyright (c) 2019 HiSilicon Technologies Co., Ltd.
+// Author: John Garry <john.garry@huawei.com>
+
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+#define HISI_SFC_V3XX_VERSION (0x1f8)
+
+#define HISI_SFC_V3XX_CMD_CFG (0x300)
+#define HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF 9
+#define HISI_SFC_V3XX_CMD_CFG_RW_MSK BIT(8)
+#define HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK BIT(7)
+#define HISI_SFC_V3XX_CMD_CFG_DUMMY_CNT_OFF 4
+#define HISI_SFC_V3XX_CMD_CFG_ADDR_EN_MSK BIT(3)
+#define HISI_SFC_V3XX_CMD_CFG_CS_SEL_OFF 1
+#define HISI_SFC_V3XX_CMD_CFG_START_MSK BIT(0)
+#define HISI_SFC_V3XX_CMD_INS (0x308)
+#define HISI_SFC_V3XX_CMD_ADDR (0x30c)
+#define HISI_SFC_V3XX_CMD_DATABUF0 (0x400)
+
+struct hisi_sfc_v3xx_host {
+ struct device *dev;
+ void __iomem *regbase;
+ int max_cmd_dword;
+};
+
+#define HISI_SFC_V3XX_WAIT_TIMEOUT_US 1000000
+#define HISI_SFC_V3XX_WAIT_POLL_INTERVAL_US 10
+
+static int hisi_sfc_v3xx_wait_cmd_idle(struct hisi_sfc_v3xx_host *host)
+{
+ u32 reg;
+
+ return readl_poll_timeout(host->regbase + HISI_SFC_V3XX_CMD_CFG, reg,
+ !(reg & HISI_SFC_V3XX_CMD_CFG_START_MSK),
+ HISI_SFC_V3XX_WAIT_POLL_INTERVAL_US,
+ HISI_SFC_V3XX_WAIT_TIMEOUT_US);
+}
+
+static int hisi_sfc_v3xx_adjust_op_size(struct spi_mem *mem,
+ struct spi_mem_op *op)
+{
+ struct spi_device *spi = mem->spi;
+ struct hisi_sfc_v3xx_host *host;
+ uintptr_t addr = (uintptr_t)op->data.buf.in;
+ int max_byte_count;
+
+ host = spi_controller_get_devdata(spi->master);
+
+ max_byte_count = host->max_cmd_dword * 4;
+
+ if (!IS_ALIGNED(addr, 4) && op->data.nbytes >= 4)
+ op->data.nbytes = 4 - (addr % 4);
+ else if (op->data.nbytes > max_byte_count)
+ op->data.nbytes = max_byte_count;
+
+ return 0;
+}
+
+/*
+ * memcpy_{to,from}io doesn't gurantee 32b accesses - which we require for the
+ * DATABUF registers -so use __io{read,write}32_copy when possible. For
+ * trailing bytes, copy them byte-by-byte from the DATABUF register, as we
+ * can't clobber outside the source/dest buffer.
+ *
+ * For efficient data read/write, we try to put any start 32b unaligned data
+ * into a separate transaction in hisi_sfc_v3xx_adjust_op_size().
+ */
+static void hisi_sfc_v3xx_read_databuf(struct hisi_sfc_v3xx_host *host,
+ u8 *to, unsigned int len)
+{
+ void __iomem *from;
+ int i;
+
+ from = host->regbase + HISI_SFC_V3XX_CMD_DATABUF0;
+
+ if (IS_ALIGNED((uintptr_t)to, 4)) {
+ int words = len / 4;
+
+ __ioread32_copy(to, from, words);
+
+ len -= words * 4;
+ if (len) {
+ u32 val;
+
+ to += words * 4;
+ from += words * 4;
+
+ val = __raw_readl(from);
+
+ for (i = 0; i < len; i++, val >>= 8, to++)
+ *to = (u8)val;
+ }
+ } else {
+ for (i = 0; i < DIV_ROUND_UP(len, 4); i++, from += 4) {
+ u32 val = __raw_readl(from);
+ int j;
+
+ for (j = 0; j < 4 && (j + (i * 4) < len);
+ to++, val >>= 8, j++)
+ *to = (u8)val;
+ }
+ }
+}
+
+static void hisi_sfc_v3xx_write_databuf(struct hisi_sfc_v3xx_host *host,
+ const u8 *from, unsigned int len)
+{
+ void __iomem *to;
+ int i;
+
+ to = host->regbase + HISI_SFC_V3XX_CMD_DATABUF0;
+
+ if (IS_ALIGNED((uintptr_t)from, 4)) {
+ int words = len / 4;
+
+ __iowrite32_copy(to, from, words);
+
+ len -= words * 4;
+ if (len) {
+ u32 val = 0;
+
+ to += words * 4;
+ from += words * 4;
+
+ for (i = 0; i < len; i++, from++)
+ val |= *from << i * 8;
+ __raw_writel(val, to);
+ }
+
+ } else {
+ for (i = 0; i < DIV_ROUND_UP(len, 4); i++, to += 4) {
+ u32 val = 0;
+ int j;
+
+ for (j = 0; j < 4 && (j + (i * 4) < len);
+ from++, j++)
+ val |= *from << j * 8;
+ __raw_writel(val, to);
+ }
+ }
+}
+
+static int hisi_sfc_v3xx_generic_exec_op(struct hisi_sfc_v3xx_host *host,
+ const struct spi_mem_op *op,
+ u8 chip_select)
+{
+ int ret, len = op->data.nbytes;
+ u32 config = 0;
+
+ if (op->addr.nbytes)
+ config |= HISI_SFC_V3XX_CMD_CFG_ADDR_EN_MSK;
+
+ if (op->data.dir != SPI_MEM_NO_DATA) {
+ config |= (len - 1) << HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF;
+ config |= HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK;
+ }
+
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ hisi_sfc_v3xx_write_databuf(host, op->data.buf.out, len);
+ else if (op->data.dir == SPI_MEM_DATA_IN)
+ config |= HISI_SFC_V3XX_CMD_CFG_RW_MSK;
+
+ config |= op->dummy.nbytes << HISI_SFC_V3XX_CMD_CFG_DUMMY_CNT_OFF |
+ chip_select << HISI_SFC_V3XX_CMD_CFG_CS_SEL_OFF |
+ HISI_SFC_V3XX_CMD_CFG_START_MSK;
+
+ writel(op->addr.val, host->regbase + HISI_SFC_V3XX_CMD_ADDR);
+ writel(op->cmd.opcode, host->regbase + HISI_SFC_V3XX_CMD_INS);
+
+ writel(config, host->regbase + HISI_SFC_V3XX_CMD_CFG);
+
+ ret = hisi_sfc_v3xx_wait_cmd_idle(host);
+ if (ret)
+ return ret;
+
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ hisi_sfc_v3xx_read_databuf(host, op->data.buf.in, len);
+
+ return 0;
+}
+
+static int hisi_sfc_v3xx_exec_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct hisi_sfc_v3xx_host *host;
+ struct spi_device *spi = mem->spi;
+ u8 chip_select = spi->chip_select;
+
+ host = spi_controller_get_devdata(spi->master);
+
+ return hisi_sfc_v3xx_generic_exec_op(host, op, chip_select);
+}
+
+static const struct spi_controller_mem_ops hisi_sfc_v3xx_mem_ops = {
+ .adjust_op_size = hisi_sfc_v3xx_adjust_op_size,
+ .exec_op = hisi_sfc_v3xx_exec_op,
+};
+
+static int hisi_sfc_v3xx_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hisi_sfc_v3xx_host *host;
+ struct spi_controller *ctlr;
+ u32 version;
+ int ret;
+
+ ctlr = spi_alloc_master(&pdev->dev, sizeof(*host));
+ if (!ctlr)
+ return -ENOMEM;
+
+ ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
+ SPI_TX_DUAL | SPI_TX_QUAD;
+
+ host = spi_controller_get_devdata(ctlr);
+ host->dev = dev;
+
+ platform_set_drvdata(pdev, host);
+
+ host->regbase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(host->regbase)) {
+ ret = PTR_ERR(host->regbase);
+ goto err_put_master;
+ }
+
+ ctlr->bus_num = -1;
+ ctlr->num_chipselect = 1;
+ ctlr->mem_ops = &hisi_sfc_v3xx_mem_ops;
+
+ version = readl(host->regbase + HISI_SFC_V3XX_VERSION);
+
+ switch (version) {
+ case 0x351:
+ host->max_cmd_dword = 64;
+ break;
+ default:
+ host->max_cmd_dword = 16;
+ break;
+ }
+
+ ret = devm_spi_register_controller(dev, ctlr);
+ if (ret)
+ goto err_put_master;
+
+ dev_info(&pdev->dev, "hw version 0x%x\n", version);
+
+ return 0;
+
+err_put_master:
+ spi_master_put(ctlr);
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_ACPI)
+static const struct acpi_device_id hisi_sfc_v3xx_acpi_ids[] = {
+ {"HISI0341", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, hisi_sfc_v3xx_acpi_ids);
+#endif
+
+static struct platform_driver hisi_sfc_v3xx_spi_driver = {
+ .driver = {
+ .name = "hisi-sfc-v3xx",
+ .acpi_match_table = ACPI_PTR(hisi_sfc_v3xx_acpi_ids),
+ },
+ .probe = hisi_sfc_v3xx_probe,
+};
+
+module_platform_driver(hisi_sfc_v3xx_spi_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
+MODULE_DESCRIPTION("HiSilicon SPI NOR V3XX Flash Controller Driver for hi16xx chipsets");
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index f4a8f470aecc..8543f5ed1099 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -666,8 +666,22 @@ static int img_spfi_probe(struct platform_device *pdev)
master->unprepare_message = img_spfi_unprepare;
master->handle_err = img_spfi_handle_err;
- spfi->tx_ch = dma_request_slave_channel(spfi->dev, "tx");
- spfi->rx_ch = dma_request_slave_channel(spfi->dev, "rx");
+ spfi->tx_ch = dma_request_chan(spfi->dev, "tx");
+ if (IS_ERR(spfi->tx_ch)) {
+ ret = PTR_ERR(spfi->tx_ch);
+ spfi->tx_ch = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto disable_pm;
+ }
+
+ spfi->rx_ch = dma_request_chan(spfi->dev, "rx");
+ if (IS_ERR(spfi->rx_ch)) {
+ ret = PTR_ERR(spfi->rx_ch);
+ spfi->rx_ch = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto disable_pm;
+ }
+
if (!spfi->tx_ch || !spfi->rx_ch) {
if (spfi->tx_ch)
dma_release_channel(spfi->tx_ch);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 49f0099db0cb..f4f28a400a96 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -1230,9 +1230,9 @@ static int spi_imx_setupxfer(struct spi_device *spi,
}
if (spi_imx_can_dma(spi_imx->bitbang.master, spi, t))
- spi_imx->usedma = 1;
+ spi_imx->usedma = true;
else
- spi_imx->usedma = 0;
+ spi_imx->usedma = false;
if (is_imx53_ecspi(spi_imx) && spi_imx->slave_mode) {
spi_imx->rx = mx53_ecspi_rx_slave;
diff --git a/drivers/spi/spi-jcore.c b/drivers/spi/spi-jcore.c
index cc49fa41fbab..bba10f030e33 100644
--- a/drivers/spi/spi-jcore.c
+++ b/drivers/spi/spi-jcore.c
@@ -170,7 +170,7 @@ static int jcore_spi_probe(struct platform_device *pdev)
if (!devm_request_mem_region(&pdev->dev, res->start,
resource_size(res), pdev->name))
goto exit_busy;
- hw->base = devm_ioremap_nocache(&pdev->dev, res->start,
+ hw->base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!hw->base)
goto exit_busy;
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
index f3f10443f9e2..7f5680fe2568 100644
--- a/drivers/spi/spi-meson-spicc.c
+++ b/drivers/spi/spi-meson-spicc.c
@@ -19,7 +19,6 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/reset.h>
-#include <linux/gpio.h>
/*
* The Meson SPICC controller could support DMA based transfers, but is not
@@ -467,35 +466,14 @@ static int meson_spicc_unprepare_transfer(struct spi_master *master)
static int meson_spicc_setup(struct spi_device *spi)
{
- int ret = 0;
-
if (!spi->controller_state)
spi->controller_state = spi_master_get_devdata(spi->master);
- else if (gpio_is_valid(spi->cs_gpio))
- goto out_gpio;
- else if (spi->cs_gpio == -ENOENT)
- return 0;
-
- if (gpio_is_valid(spi->cs_gpio)) {
- ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev));
- if (ret) {
- dev_err(&spi->dev, "failed to request cs gpio\n");
- return ret;
- }
- }
-
-out_gpio:
- ret = gpio_direction_output(spi->cs_gpio,
- !(spi->mode & SPI_CS_HIGH));
- return ret;
+ return 0;
}
static void meson_spicc_cleanup(struct spi_device *spi)
{
- if (gpio_is_valid(spi->cs_gpio))
- gpio_free(spi->cs_gpio);
-
spi->controller_state = NULL;
}
@@ -564,6 +542,7 @@ static int meson_spicc_probe(struct platform_device *pdev)
master->prepare_message = meson_spicc_prepare_message;
master->unprepare_transfer_hardware = meson_spicc_unprepare_transfer;
master->transfer_one = meson_spicc_transfer_one;
+ master->use_gpio_descriptors = true;
/* Setup max rate according to the Meson GX datasheet */
if ((rate >> 2) > SPICC_MAX_FREQ)
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 996c1c8a9c71..dce85ee07cd0 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -590,10 +590,10 @@ static int mxs_spi_probe(struct platform_device *pdev)
if (ret)
goto out_master_free;
- ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx");
- if (!ssp->dmach) {
+ ssp->dmach = dma_request_chan(&pdev->dev, "rx-tx");
+ if (IS_ERR(ssp->dmach)) {
dev_err(ssp->dev, "Failed to request DMA\n");
- ret = -ENODEV;
+ ret = PTR_ERR(ssp->dmach);
goto out_master_free;
}
diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c
index cb52fd8008d0..d25ee32862e0 100644
--- a/drivers/spi/spi-npcm-fiu.c
+++ b/drivers/spi/spi-npcm-fiu.c
@@ -603,7 +603,7 @@ static int npcm_fiu_dirmap_create(struct spi_mem_dirmap_desc *desc)
if (!chip->flash_region_mapped_ptr) {
chip->flash_region_mapped_ptr =
- devm_ioremap_nocache(fiu->dev, (fiu->res_mem->start +
+ devm_ioremap(fiu->dev, (fiu->res_mem->start +
(fiu->info->max_map_size *
desc->mem->spi->chip_select)),
(u32)desc->info.length);
diff --git a/drivers/spi/spi-npcm-pspi.c b/drivers/spi/spi-npcm-pspi.c
index fe624731c74c..87cd0233c60b 100644
--- a/drivers/spi/spi-npcm-pspi.c
+++ b/drivers/spi/spi-npcm-pspi.c
@@ -12,6 +12,7 @@
#include <linux/spi/spi.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
+#include <linux/reset.h>
#include <asm/unaligned.h>
@@ -20,7 +21,7 @@
struct npcm_pspi {
struct completion xfer_done;
- struct regmap *rst_regmap;
+ struct reset_control *reset;
struct spi_master *master;
unsigned int tx_bytes;
unsigned int rx_bytes;
@@ -59,12 +60,6 @@ struct npcm_pspi {
#define NPCM_PSPI_MIN_CLK_DIVIDER 4
#define NPCM_PSPI_DEFAULT_CLK 25000000
-/* reset register */
-#define NPCM7XX_IPSRST2_OFFSET 0x24
-
-#define NPCM7XX_PSPI1_RESET BIT(22)
-#define NPCM7XX_PSPI2_RESET BIT(23)
-
static inline unsigned int bytes_per_word(unsigned int bits)
{
return bits <= 8 ? 1 : 2;
@@ -178,6 +173,13 @@ static void npcm_pspi_setup_transfer(struct spi_device *spi,
priv->mode = spi->mode;
}
+ /*
+ * If transfer is even length, and 8 bits per word transfer,
+ * then implement 16 bits-per-word transfer.
+ */
+ if (priv->bits_per_word == 8 && !(t->len & 0x1))
+ t->bits_per_word = 16;
+
if (!priv->is_save_param || priv->bits_per_word != t->bits_per_word) {
npcm_pspi_set_transfer_size(priv, t->bits_per_word);
priv->bits_per_word = t->bits_per_word;
@@ -195,6 +197,7 @@ static void npcm_pspi_setup_transfer(struct spi_device *spi,
static void npcm_pspi_send(struct npcm_pspi *priv)
{
int wsize;
+ u16 val;
wsize = min(bytes_per_word(priv->bits_per_word), priv->tx_bytes);
priv->tx_bytes -= wsize;
@@ -204,17 +207,18 @@ static void npcm_pspi_send(struct npcm_pspi *priv)
switch (wsize) {
case 1:
- iowrite8(*priv->tx_buf, NPCM_PSPI_DATA + priv->base);
+ val = *priv->tx_buf++;
+ iowrite8(val, NPCM_PSPI_DATA + priv->base);
break;
case 2:
- iowrite16(*priv->tx_buf, NPCM_PSPI_DATA + priv->base);
+ val = *priv->tx_buf++;
+ val = *priv->tx_buf++ | (val << 8);
+ iowrite16(val, NPCM_PSPI_DATA + priv->base);
break;
default:
WARN_ON_ONCE(1);
return;
}
-
- priv->tx_buf += wsize;
}
static void npcm_pspi_recv(struct npcm_pspi *priv)
@@ -230,18 +234,17 @@ static void npcm_pspi_recv(struct npcm_pspi *priv)
switch (rsize) {
case 1:
- val = ioread8(priv->base + NPCM_PSPI_DATA);
+ *priv->rx_buf++ = ioread8(priv->base + NPCM_PSPI_DATA);
break;
case 2:
val = ioread16(priv->base + NPCM_PSPI_DATA);
+ *priv->rx_buf++ = (val >> 8);
+ *priv->rx_buf++ = val & 0xff;
break;
default:
WARN_ON_ONCE(1);
return;
}
-
- *priv->rx_buf = val;
- priv->rx_buf += rsize;
}
static int npcm_pspi_transfer_one(struct spi_master *master,
@@ -285,9 +288,9 @@ static int npcm_pspi_unprepare_transfer_hardware(struct spi_master *master)
static void npcm_pspi_reset_hw(struct npcm_pspi *priv)
{
- regmap_write(priv->rst_regmap, NPCM7XX_IPSRST2_OFFSET,
- NPCM7XX_PSPI1_RESET << priv->id);
- regmap_write(priv->rst_regmap, NPCM7XX_IPSRST2_OFFSET, 0x0);
+ reset_control_assert(priv->reset);
+ udelay(5);
+ reset_control_deassert(priv->reset);
}
static irqreturn_t npcm_pspi_handler(int irq, void *dev_id)
@@ -351,10 +354,6 @@ static int npcm_pspi_probe(struct platform_device *pdev)
if (num_cs < 0)
return num_cs;
- pdev->id = of_alias_get_id(np, "spi");
- if (pdev->id < 0)
- pdev->id = 0;
-
master = spi_alloc_master(&pdev->dev, sizeof(*priv));
if (!master)
return -ENOMEM;
@@ -364,7 +363,6 @@ static int npcm_pspi_probe(struct platform_device *pdev)
priv = spi_master_get_devdata(master);
priv->master = master;
priv->is_save_param = false;
- priv->id = pdev->id;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
@@ -389,11 +387,10 @@ static int npcm_pspi_probe(struct platform_device *pdev)
goto out_disable_clk;
}
- priv->rst_regmap =
- syscon_regmap_lookup_by_compatible("nuvoton,npcm750-rst");
- if (IS_ERR(priv->rst_regmap)) {
- dev_err(&pdev->dev, "failed to find nuvoton,npcm750-rst\n");
- return PTR_ERR(priv->rst_regmap);
+ priv->reset = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->reset)) {
+ ret = PTR_ERR(priv->reset);
+ goto out_disable_clk;
}
/* reset SPI-HW block */
@@ -414,7 +411,7 @@ static int npcm_pspi_probe(struct platform_device *pdev)
master->min_speed_hz = DIV_ROUND_UP(clk_hz, NPCM_PSPI_MAX_CLK_DIVIDER);
master->mode_bits = SPI_CPHA | SPI_CPOL;
master->dev.of_node = pdev->dev.of_node;
- master->bus_num = pdev->id;
+ master->bus_num = -1;
master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
master->transfer_one = npcm_pspi_transfer_one;
master->prepare_transfer_hardware =
@@ -447,7 +444,7 @@ static int npcm_pspi_probe(struct platform_device *pdev)
if (ret)
goto out_disable_clk;
- pr_info("NPCM Peripheral SPI %d probed\n", pdev->id);
+ pr_info("NPCM Peripheral SPI %d probed\n", master->bus_num);
return 0;
diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
index c36bb1bb464e..8c5084a3a617 100644
--- a/drivers/spi/spi-nxp-fspi.c
+++ b/drivers/spi/spi-nxp-fspi.c
@@ -439,7 +439,7 @@ static bool nxp_fspi_supports_op(struct spi_mem *mem,
op->data.nbytes > f->devtype_data->txfifo)
return false;
- return true;
+ return spi_mem_default_supports_op(mem, op);
}
/* Instead of busy looping invoke readl_poll_timeout functionality. */
diff --git a/drivers/spi/spi-oc-tiny.c b/drivers/spi/spi-oc-tiny.c
index e2331eb7b47a..9df7c5979c29 100644
--- a/drivers/spi/spi-oc-tiny.c
+++ b/drivers/spi/spi-oc-tiny.c
@@ -20,7 +20,6 @@
#include <linux/spi/spi_bitbang.h>
#include <linux/spi/spi_oc_tiny.h>
#include <linux/io.h>
-#include <linux/gpio.h>
#include <linux/of.h>
#define DRV_NAME "spi_oc_tiny"
@@ -50,8 +49,6 @@ struct tiny_spi {
unsigned int txc, rxc;
const u8 *txp;
u8 *rxp;
- int gpio_cs_count;
- int *gpio_cs;
};
static inline struct tiny_spi *tiny_spi_to_hw(struct spi_device *sdev)
@@ -66,16 +63,6 @@ static unsigned int tiny_spi_baud(struct spi_device *spi, unsigned int hz)
return min(DIV_ROUND_UP(hw->freq, hz * 2), (1U << hw->baudwidth)) - 1;
}
-static void tiny_spi_chipselect(struct spi_device *spi, int is_active)
-{
- struct tiny_spi *hw = tiny_spi_to_hw(spi);
-
- if (hw->gpio_cs_count > 0) {
- gpio_set_value(hw->gpio_cs[spi->chip_select],
- (spi->mode & SPI_CS_HIGH) ? is_active : !is_active);
- }
-}
-
static int tiny_spi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
@@ -203,24 +190,10 @@ static int tiny_spi_of_probe(struct platform_device *pdev)
{
struct tiny_spi *hw = platform_get_drvdata(pdev);
struct device_node *np = pdev->dev.of_node;
- unsigned int i;
u32 val;
if (!np)
return 0;
- hw->gpio_cs_count = of_gpio_count(np);
- if (hw->gpio_cs_count > 0) {
- hw->gpio_cs = devm_kcalloc(&pdev->dev,
- hw->gpio_cs_count, sizeof(unsigned int),
- GFP_KERNEL);
- if (!hw->gpio_cs)
- return -ENOMEM;
- }
- for (i = 0; i < hw->gpio_cs_count; i++) {
- hw->gpio_cs[i] = of_get_gpio_flags(np, i, NULL);
- if (hw->gpio_cs[i] < 0)
- return -ENODEV;
- }
hw->bitbang.master->dev.of_node = pdev->dev.of_node;
if (!of_property_read_u32(np, "clock-frequency", &val))
hw->freq = val;
@@ -240,7 +213,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
struct tiny_spi_platform_data *platp = dev_get_platdata(&pdev->dev);
struct tiny_spi *hw;
struct spi_master *master;
- unsigned int i;
int err = -ENODEV;
master = spi_alloc_master(&pdev->dev, sizeof(struct tiny_spi));
@@ -249,9 +221,9 @@ static int tiny_spi_probe(struct platform_device *pdev)
/* setup the master state. */
master->bus_num = pdev->id;
- master->num_chipselect = 255;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->setup = tiny_spi_setup;
+ master->use_gpio_descriptors = true;
hw = spi_master_get_devdata(master);
platform_set_drvdata(pdev, hw);
@@ -259,7 +231,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
/* setup the state for the bitbang driver */
hw->bitbang.master = master;
hw->bitbang.setup_transfer = tiny_spi_setup_transfer;
- hw->bitbang.chipselect = tiny_spi_chipselect;
hw->bitbang.txrx_bufs = tiny_spi_txrx_bufs;
/* find and map our resources */
@@ -279,12 +250,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
}
/* find platform data */
if (platp) {
- hw->gpio_cs_count = platp->gpio_cs_count;
- hw->gpio_cs = platp->gpio_cs;
- if (platp->gpio_cs_count && !platp->gpio_cs) {
- err = -EBUSY;
- goto exit;
- }
hw->freq = platp->freq;
hw->baudwidth = platp->baudwidth;
} else {
@@ -292,13 +257,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
if (err)
goto exit;
}
- for (i = 0; i < hw->gpio_cs_count; i++) {
- err = gpio_request(hw->gpio_cs[i], dev_name(&pdev->dev));
- if (err)
- goto exit_gpio;
- gpio_direction_output(hw->gpio_cs[i], 1);
- }
- hw->bitbang.master->num_chipselect = max(1, hw->gpio_cs_count);
/* register our spi controller */
err = spi_bitbang_start(&hw->bitbang);
@@ -308,9 +266,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
return 0;
-exit_gpio:
- while (i-- > 0)
- gpio_free(hw->gpio_cs[i]);
exit:
spi_master_put(master);
return err;
@@ -320,11 +275,8 @@ static int tiny_spi_remove(struct platform_device *pdev)
{
struct tiny_spi *hw = platform_get_drvdata(pdev);
struct spi_master *master = hw->bitbang.master;
- unsigned int i;
spi_bitbang_stop(&hw->bitbang);
- for (i = 0; i < hw->gpio_cs_count; i++)
- gpio_free(hw->gpio_cs[i]);
spi_master_put(master);
return 0;
}
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index c7266ef295fd..1f59beb7d27e 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -646,8 +646,7 @@ static int orion_spi_probe(struct platform_device *pdev)
/* The following clock is only used by some SoCs */
spi->axi_clk = devm_clk_get(&pdev->dev, "axi");
- if (IS_ERR(spi->axi_clk) &&
- PTR_ERR(spi->axi_clk) == -EPROBE_DEFER) {
+ if (PTR_ERR(spi->axi_clk) == -EPROBE_DEFER) {
status = -EPROBE_DEFER;
goto out_rel_clk;
}
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 16b6b2ad4e7c..4c7a71f0fb3e 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -461,6 +461,16 @@ int pxa2xx_spi_flush(struct driver_data *drv_data)
return limit;
}
+static void pxa2xx_spi_off(struct driver_data *drv_data)
+{
+ /* On MMP, disabling SSE seems to corrupt the rx fifo */
+ if (drv_data->ssp_type == MMP2_SSP)
+ return;
+
+ pxa2xx_spi_write(drv_data, SSCR0,
+ pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+}
+
static int null_writer(struct driver_data *drv_data)
{
u8 n_bytes = drv_data->n_bytes;
@@ -587,8 +597,7 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg)
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, 0);
pxa2xx_spi_flush(drv_data);
- pxa2xx_spi_write(drv_data, SSCR0,
- pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+ pxa2xx_spi_off(drv_data);
dev_err(&drv_data->pdev->dev, "%s\n", msg);
@@ -686,8 +695,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
static void handle_bad_msg(struct driver_data *drv_data)
{
- pxa2xx_spi_write(drv_data, SSCR0,
- pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+ pxa2xx_spi_off(drv_data);
pxa2xx_spi_write(drv_data, SSCR1,
pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1);
if (!pxa25x_ssp_comp(drv_data))
@@ -1062,7 +1070,8 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
|| (pxa2xx_spi_read(drv_data, SSCR1) & change_mask)
!= (cr1 & change_mask)) {
/* stop the SSP, and update the other bits */
- pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE);
+ if (drv_data->ssp_type != MMP2_SSP)
+ pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE);
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
/* first set CR1 without interrupt and service enables */
@@ -1118,8 +1127,7 @@ static int pxa2xx_spi_slave_abort(struct spi_controller *controller)
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, 0);
pxa2xx_spi_flush(drv_data);
- pxa2xx_spi_write(drv_data, SSCR0,
- pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+ pxa2xx_spi_off(drv_data);
dev_dbg(&drv_data->pdev->dev, "transfer aborted\n");
@@ -1135,8 +1143,7 @@ static void pxa2xx_spi_handle_err(struct spi_controller *controller,
struct driver_data *drv_data = spi_controller_get_devdata(controller);
/* Disable the SSP */
- pxa2xx_spi_write(drv_data, SSCR0,
- pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+ pxa2xx_spi_off(drv_data);
/* Clear and disable interrupts and service requests */
write_SSSR_CS(drv_data, drv_data->clear_sr);
pxa2xx_spi_write(drv_data, SSCR1,
@@ -1161,8 +1168,7 @@ static int pxa2xx_spi_unprepare_transfer(struct spi_controller *controller)
struct driver_data *drv_data = spi_controller_get_devdata(controller);
/* Disable the SSP now */
- pxa2xx_spi_write(drv_data, SSCR0,
- pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+ pxa2xx_spi_off(drv_data);
return 0;
}
@@ -1423,6 +1429,9 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
/* KBL-H */
{ PCI_VDEVICE(INTEL, 0xa2a9), LPSS_SPT_SSP },
{ PCI_VDEVICE(INTEL, 0xa2aa), LPSS_SPT_SSP },
+ /* CML-V */
+ { PCI_VDEVICE(INTEL, 0xa3a9), LPSS_SPT_SSP },
+ { PCI_VDEVICE(INTEL, 0xa3aa), LPSS_SPT_SSP },
/* BXT A-Step */
{ PCI_VDEVICE(INTEL, 0x0ac2), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x0ac4), LPSS_BXT_SSP },
@@ -1443,6 +1452,10 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
{ PCI_VDEVICE(INTEL, 0x4b2a), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x4b2b), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x4b37), LPSS_BXT_SSP },
+ /* JSL */
+ { PCI_VDEVICE(INTEL, 0x4daa), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x4dab), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x4dfb), LPSS_CNL_SSP },
/* APL */
{ PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP },
diff --git a/drivers/spi/spi-qcom-qspi.c b/drivers/spi/spi-qcom-qspi.c
index 250fd60e1678..3c4f83bf7084 100644
--- a/drivers/spi/spi-qcom-qspi.c
+++ b/drivers/spi/spi-qcom-qspi.c
@@ -137,7 +137,7 @@ enum qspi_clocks {
struct qcom_qspi {
void __iomem *base;
struct device *dev;
- struct clk_bulk_data clks[QSPI_NUM_CLKS];
+ struct clk_bulk_data *clks;
struct qspi_xfer xfer;
/* Lock to protect xfer and IRQ accessed registers */
spinlock_t lock;
@@ -445,6 +445,13 @@ static int qcom_qspi_probe(struct platform_device *pdev)
goto exit_probe_master_put;
}
+ ctrl->clks = devm_kcalloc(dev, QSPI_NUM_CLKS,
+ sizeof(*ctrl->clks), GFP_KERNEL);
+ if (!ctrl->clks) {
+ ret = -ENOMEM;
+ goto exit_probe_master_put;
+ }
+
ctrl->clks[QSPI_CLK_CORE].id = "core";
ctrl->clks[QSPI_CLK_IFACE].id = "iface";
ret = devm_clk_bulk_get(dev, QSPI_NUM_CLKS, ctrl->clks);
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 7222c7689c3c..85575d45901c 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -159,7 +159,7 @@
#define SPCMD_SPIMOD_DUAL SPCMD_SPIMOD0
#define SPCMD_SPIMOD_QUAD SPCMD_SPIMOD1
#define SPCMD_SPRW 0x0010 /* SPI Read/Write Access (Dual/Quad) */
-#define SPCMD_SSLA_MASK 0x0030 /* SSL Assert Signal Setting (RSPI) */
+#define SPCMD_SSLA(i) ((i) << 4) /* SSL Assert Signal Setting */
#define SPCMD_BRDV_MASK 0x000c /* Bit Rate Division Setting */
#define SPCMD_CPOL 0x0002 /* Clock Polarity Setting */
#define SPCMD_CPHA 0x0001 /* Clock Phase Setting */
@@ -242,6 +242,7 @@ struct spi_ops {
u16 mode_bits;
u16 flags;
u16 fifo_size;
+ u8 num_hw_ss;
};
/*
@@ -426,8 +427,6 @@ static int qspi_set_receive_trigger(struct rspi_data *rspi, unsigned int len)
return n;
}
-#define set_config_register(spi, n) spi->ops->set_config_register(spi, n)
-
static void rspi_enable_irq(const struct rspi_data *rspi, u8 enable)
{
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | enable, RSPI_SPCR);
@@ -620,9 +619,8 @@ no_dma_tx:
dmaengine_terminate_all(rspi->ctlr->dma_rx);
no_dma_rx:
if (ret == -EAGAIN) {
- pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
- dev_driver_string(&rspi->ctlr->dev),
- dev_name(&rspi->ctlr->dev));
+ dev_warn_once(&rspi->ctlr->dev,
+ "DMA not available, falling back to PIO\n");
}
return ret;
}
@@ -936,12 +934,16 @@ static int rspi_prepare_message(struct spi_controller *ctlr,
if (spi->mode & SPI_CPHA)
rspi->spcmd |= SPCMD_CPHA;
+ /* Configure slave signal to assert */
+ rspi->spcmd |= SPCMD_SSLA(spi->cs_gpiod ? rspi->ctlr->unused_native_cs
+ : spi->chip_select);
+
/* CMOS output mode and MOSI signal from previous transfer */
rspi->sppcr = 0;
if (spi->mode & SPI_LOOP)
rspi->sppcr |= SPPCR_SPLP;
- set_config_register(rspi, 8);
+ rspi->ops->set_config_register(rspi, 8);
if (msg->spi->mode &
(SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)) {
@@ -1123,6 +1125,7 @@ static const struct spi_ops rspi_ops = {
.mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
.flags = SPI_CONTROLLER_MUST_TX,
.fifo_size = 8,
+ .num_hw_ss = 2,
};
static const struct spi_ops rspi_rz_ops = {
@@ -1131,6 +1134,7 @@ static const struct spi_ops rspi_rz_ops = {
.mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
.flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
.fifo_size = 8, /* 8 for TX, 32 for RX */
+ .num_hw_ss = 1,
};
static const struct spi_ops qspi_ops = {
@@ -1141,6 +1145,7 @@ static const struct spi_ops qspi_ops = {
SPI_RX_DUAL | SPI_RX_QUAD,
.flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
.fifo_size = 32,
+ .num_hw_ss = 1,
};
#ifdef CONFIG_OF
@@ -1256,6 +1261,8 @@ static int rspi_probe(struct platform_device *pdev)
ctlr->mode_bits = ops->mode_bits;
ctlr->flags = ops->flags;
ctlr->dev.of_node = pdev->dev.of_node;
+ ctlr->use_gpio_descriptors = true;
+ ctlr->max_native_cs = rspi->ops->num_hw_ss;
ret = platform_get_irq_byname_optional(pdev, "rx");
if (ret < 0) {
@@ -1314,8 +1321,6 @@ error1:
static const struct platform_device_id spi_driver_ids[] = {
{ "rspi", (kernel_ulong_t)&rspi_ops },
- { "rspi-rz", (kernel_ulong_t)&rspi_rz_ops },
- { "qspi", (kernel_ulong_t)&qspi_ops },
{},
};
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 8f134735291f..1c11a00a2c36 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -14,8 +14,6 @@
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -55,7 +53,6 @@ struct sh_msiof_spi_priv {
void *rx_dma_page;
dma_addr_t tx_dma_addr;
dma_addr_t rx_dma_addr;
- unsigned short unused_ss;
bool native_cs_inited;
bool native_cs_high;
bool slave_aborted;
@@ -63,140 +60,140 @@ struct sh_msiof_spi_priv {
#define MAX_SS 3 /* Maximum number of native chip selects */
-#define TMDR1 0x00 /* Transmit Mode Register 1 */
-#define TMDR2 0x04 /* Transmit Mode Register 2 */
-#define TMDR3 0x08 /* Transmit Mode Register 3 */
-#define RMDR1 0x10 /* Receive Mode Register 1 */
-#define RMDR2 0x14 /* Receive Mode Register 2 */
-#define RMDR3 0x18 /* Receive Mode Register 3 */
-#define TSCR 0x20 /* Transmit Clock Select Register */
-#define RSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */
-#define CTR 0x28 /* Control Register */
-#define FCTR 0x30 /* FIFO Control Register */
-#define STR 0x40 /* Status Register */
-#define IER 0x44 /* Interrupt Enable Register */
-#define TDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */
-#define TDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */
-#define TFDR 0x50 /* Transmit FIFO Data Register */
-#define RDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */
-#define RDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */
-#define RFDR 0x60 /* Receive FIFO Data Register */
-
-/* TMDR1 and RMDR1 */
-#define MDR1_TRMD BIT(31) /* Transfer Mode (1 = Master mode) */
-#define MDR1_SYNCMD_MASK GENMASK(29, 28) /* SYNC Mode */
-#define MDR1_SYNCMD_SPI (2 << 28)/* Level mode/SPI */
-#define MDR1_SYNCMD_LR (3 << 28)/* L/R mode */
-#define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */
-#define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */
-#define MDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */
-#define MDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */
-#define MDR1_FLD_MASK GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */
-#define MDR1_FLD_SHIFT 2
-#define MDR1_XXSTP BIT(0) /* Transmission/Reception Stop on FIFO */
-/* TMDR1 */
-#define TMDR1_PCON BIT(30) /* Transfer Signal Connection */
-#define TMDR1_SYNCCH_MASK GENMASK(27, 26) /* Sync Signal Channel Select */
-#define TMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
-
-/* TMDR2 and RMDR2 */
-#define MDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */
-#define MDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */
-#define MDR2_GRPMASK1 BIT(0) /* Group Output Mask 1 (SH, A1) */
-
-/* TSCR and RSCR */
-#define SCR_BRPS_MASK GENMASK(12, 8) /* Prescaler Setting (1-32) */
-#define SCR_BRPS(i) (((i) - 1) << 8)
-#define SCR_BRDV_MASK GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */
-#define SCR_BRDV_DIV_2 0
-#define SCR_BRDV_DIV_4 1
-#define SCR_BRDV_DIV_8 2
-#define SCR_BRDV_DIV_16 3
-#define SCR_BRDV_DIV_32 4
-#define SCR_BRDV_DIV_1 7
-
-/* CTR */
-#define CTR_TSCKIZ_MASK GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */
-#define CTR_TSCKIZ_SCK BIT(31) /* Disable SCK when TX disabled */
-#define CTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */
-#define CTR_RSCKIZ_MASK GENMASK(29, 28) /* Receive Clock Polarity Select */
-#define CTR_RSCKIZ_SCK BIT(29) /* Must match CTR_TSCKIZ_SCK */
-#define CTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */
-#define CTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */
-#define CTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */
-#define CTR_TXDIZ_MASK GENMASK(23, 22) /* Pin Output When TX is Disabled */
-#define CTR_TXDIZ_LOW (0 << 22) /* 0 */
-#define CTR_TXDIZ_HIGH (1 << 22) /* 1 */
-#define CTR_TXDIZ_HIZ (2 << 22) /* High-impedance */
-#define CTR_TSCKE BIT(15) /* Transmit Serial Clock Output Enable */
-#define CTR_TFSE BIT(14) /* Transmit Frame Sync Signal Output Enable */
-#define CTR_TXE BIT(9) /* Transmit Enable */
-#define CTR_RXE BIT(8) /* Receive Enable */
-#define CTR_TXRST BIT(1) /* Transmit Reset */
-#define CTR_RXRST BIT(0) /* Receive Reset */
-
-/* FCTR */
-#define FCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */
-#define FCTR_TFWM_64 (0 << 29) /* Transfer Request when 64 empty stages */
-#define FCTR_TFWM_32 (1 << 29) /* Transfer Request when 32 empty stages */
-#define FCTR_TFWM_24 (2 << 29) /* Transfer Request when 24 empty stages */
-#define FCTR_TFWM_16 (3 << 29) /* Transfer Request when 16 empty stages */
-#define FCTR_TFWM_12 (4 << 29) /* Transfer Request when 12 empty stages */
-#define FCTR_TFWM_8 (5 << 29) /* Transfer Request when 8 empty stages */
-#define FCTR_TFWM_4 (6 << 29) /* Transfer Request when 4 empty stages */
-#define FCTR_TFWM_1 (7 << 29) /* Transfer Request when 1 empty stage */
-#define FCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */
-#define FCTR_TFUA_SHIFT 20
-#define FCTR_TFUA(i) ((i) << FCTR_TFUA_SHIFT)
-#define FCTR_RFWM_MASK GENMASK(15, 13) /* Receive FIFO Watermark */
-#define FCTR_RFWM_1 (0 << 13) /* Transfer Request when 1 valid stages */
-#define FCTR_RFWM_4 (1 << 13) /* Transfer Request when 4 valid stages */
-#define FCTR_RFWM_8 (2 << 13) /* Transfer Request when 8 valid stages */
-#define FCTR_RFWM_16 (3 << 13) /* Transfer Request when 16 valid stages */
-#define FCTR_RFWM_32 (4 << 13) /* Transfer Request when 32 valid stages */
-#define FCTR_RFWM_64 (5 << 13) /* Transfer Request when 64 valid stages */
-#define FCTR_RFWM_128 (6 << 13) /* Transfer Request when 128 valid stages */
-#define FCTR_RFWM_256 (7 << 13) /* Transfer Request when 256 valid stages */
-#define FCTR_RFUA_MASK GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */
-#define FCTR_RFUA_SHIFT 4
-#define FCTR_RFUA(i) ((i) << FCTR_RFUA_SHIFT)
-
-/* STR */
-#define STR_TFEMP BIT(29) /* Transmit FIFO Empty */
-#define STR_TDREQ BIT(28) /* Transmit Data Transfer Request */
-#define STR_TEOF BIT(23) /* Frame Transmission End */
-#define STR_TFSERR BIT(21) /* Transmit Frame Synchronization Error */
-#define STR_TFOVF BIT(20) /* Transmit FIFO Overflow */
-#define STR_TFUDF BIT(19) /* Transmit FIFO Underflow */
-#define STR_RFFUL BIT(13) /* Receive FIFO Full */
-#define STR_RDREQ BIT(12) /* Receive Data Transfer Request */
-#define STR_REOF BIT(7) /* Frame Reception End */
-#define STR_RFSERR BIT(5) /* Receive Frame Synchronization Error */
-#define STR_RFUDF BIT(4) /* Receive FIFO Underflow */
-#define STR_RFOVF BIT(3) /* Receive FIFO Overflow */
-
-/* IER */
-#define IER_TDMAE BIT(31) /* Transmit Data DMA Transfer Req. Enable */
-#define IER_TFEMPE BIT(29) /* Transmit FIFO Empty Enable */
-#define IER_TDREQE BIT(28) /* Transmit Data Transfer Request Enable */
-#define IER_TEOFE BIT(23) /* Frame Transmission End Enable */
-#define IER_TFSERRE BIT(21) /* Transmit Frame Sync Error Enable */
-#define IER_TFOVFE BIT(20) /* Transmit FIFO Overflow Enable */
-#define IER_TFUDFE BIT(19) /* Transmit FIFO Underflow Enable */
-#define IER_RDMAE BIT(15) /* Receive Data DMA Transfer Req. Enable */
-#define IER_RFFULE BIT(13) /* Receive FIFO Full Enable */
-#define IER_RDREQE BIT(12) /* Receive Data Transfer Request Enable */
-#define IER_REOFE BIT(7) /* Frame Reception End Enable */
-#define IER_RFSERRE BIT(5) /* Receive Frame Sync Error Enable */
-#define IER_RFUDFE BIT(4) /* Receive FIFO Underflow Enable */
-#define IER_RFOVFE BIT(3) /* Receive FIFO Overflow Enable */
+#define SITMDR1 0x00 /* Transmit Mode Register 1 */
+#define SITMDR2 0x04 /* Transmit Mode Register 2 */
+#define SITMDR3 0x08 /* Transmit Mode Register 3 */
+#define SIRMDR1 0x10 /* Receive Mode Register 1 */
+#define SIRMDR2 0x14 /* Receive Mode Register 2 */
+#define SIRMDR3 0x18 /* Receive Mode Register 3 */
+#define SITSCR 0x20 /* Transmit Clock Select Register */
+#define SIRSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */
+#define SICTR 0x28 /* Control Register */
+#define SIFCTR 0x30 /* FIFO Control Register */
+#define SISTR 0x40 /* Status Register */
+#define SIIER 0x44 /* Interrupt Enable Register */
+#define SITDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */
+#define SITDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */
+#define SITFDR 0x50 /* Transmit FIFO Data Register */
+#define SIRDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */
+#define SIRDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */
+#define SIRFDR 0x60 /* Receive FIFO Data Register */
+
+/* SITMDR1 and SIRMDR1 */
+#define SIMDR1_TRMD BIT(31) /* Transfer Mode (1 = Master mode) */
+#define SIMDR1_SYNCMD_MASK GENMASK(29, 28) /* SYNC Mode */
+#define SIMDR1_SYNCMD_SPI (2 << 28) /* Level mode/SPI */
+#define SIMDR1_SYNCMD_LR (3 << 28) /* L/R mode */
+#define SIMDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */
+#define SIMDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */
+#define SIMDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */
+#define SIMDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */
+#define SIMDR1_FLD_MASK GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */
+#define SIMDR1_FLD_SHIFT 2
+#define SIMDR1_XXSTP BIT(0) /* Transmission/Reception Stop on FIFO */
+/* SITMDR1 */
+#define SITMDR1_PCON BIT(30) /* Transfer Signal Connection */
+#define SITMDR1_SYNCCH_MASK GENMASK(27, 26) /* Sync Signal Channel Select */
+#define SITMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
+
+/* SITMDR2 and SIRMDR2 */
+#define SIMDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */
+#define SIMDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */
+#define SIMDR2_GRPMASK1 BIT(0) /* Group Output Mask 1 (SH, A1) */
+
+/* SITSCR and SIRSCR */
+#define SISCR_BRPS_MASK GENMASK(12, 8) /* Prescaler Setting (1-32) */
+#define SISCR_BRPS(i) (((i) - 1) << 8)
+#define SISCR_BRDV_MASK GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */
+#define SISCR_BRDV_DIV_2 0
+#define SISCR_BRDV_DIV_4 1
+#define SISCR_BRDV_DIV_8 2
+#define SISCR_BRDV_DIV_16 3
+#define SISCR_BRDV_DIV_32 4
+#define SISCR_BRDV_DIV_1 7
+
+/* SICTR */
+#define SICTR_TSCKIZ_MASK GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */
+#define SICTR_TSCKIZ_SCK BIT(31) /* Disable SCK when TX disabled */
+#define SICTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */
+#define SICTR_RSCKIZ_MASK GENMASK(29, 28) /* Receive Clock Polarity Select */
+#define SICTR_RSCKIZ_SCK BIT(29) /* Must match CTR_TSCKIZ_SCK */
+#define SICTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */
+#define SICTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */
+#define SICTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */
+#define SICTR_TXDIZ_MASK GENMASK(23, 22) /* Pin Output When TX is Disabled */
+#define SICTR_TXDIZ_LOW (0 << 22) /* 0 */
+#define SICTR_TXDIZ_HIGH (1 << 22) /* 1 */
+#define SICTR_TXDIZ_HIZ (2 << 22) /* High-impedance */
+#define SICTR_TSCKE BIT(15) /* Transmit Serial Clock Output Enable */
+#define SICTR_TFSE BIT(14) /* Transmit Frame Sync Signal Output Enable */
+#define SICTR_TXE BIT(9) /* Transmit Enable */
+#define SICTR_RXE BIT(8) /* Receive Enable */
+#define SICTR_TXRST BIT(1) /* Transmit Reset */
+#define SICTR_RXRST BIT(0) /* Receive Reset */
+
+/* SIFCTR */
+#define SIFCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */
+#define SIFCTR_TFWM_64 (0 << 29) /* Transfer Request when 64 empty stages */
+#define SIFCTR_TFWM_32 (1 << 29) /* Transfer Request when 32 empty stages */
+#define SIFCTR_TFWM_24 (2 << 29) /* Transfer Request when 24 empty stages */
+#define SIFCTR_TFWM_16 (3 << 29) /* Transfer Request when 16 empty stages */
+#define SIFCTR_TFWM_12 (4 << 29) /* Transfer Request when 12 empty stages */
+#define SIFCTR_TFWM_8 (5 << 29) /* Transfer Request when 8 empty stages */
+#define SIFCTR_TFWM_4 (6 << 29) /* Transfer Request when 4 empty stages */
+#define SIFCTR_TFWM_1 (7 << 29) /* Transfer Request when 1 empty stage */
+#define SIFCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */
+#define SIFCTR_TFUA_SHIFT 20
+#define SIFCTR_TFUA(i) ((i) << SIFCTR_TFUA_SHIFT)
+#define SIFCTR_RFWM_MASK GENMASK(15, 13) /* Receive FIFO Watermark */
+#define SIFCTR_RFWM_1 (0 << 13) /* Transfer Request when 1 valid stages */
+#define SIFCTR_RFWM_4 (1 << 13) /* Transfer Request when 4 valid stages */
+#define SIFCTR_RFWM_8 (2 << 13) /* Transfer Request when 8 valid stages */
+#define SIFCTR_RFWM_16 (3 << 13) /* Transfer Request when 16 valid stages */
+#define SIFCTR_RFWM_32 (4 << 13) /* Transfer Request when 32 valid stages */
+#define SIFCTR_RFWM_64 (5 << 13) /* Transfer Request when 64 valid stages */
+#define SIFCTR_RFWM_128 (6 << 13) /* Transfer Request when 128 valid stages */
+#define SIFCTR_RFWM_256 (7 << 13) /* Transfer Request when 256 valid stages */
+#define SIFCTR_RFUA_MASK GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */
+#define SIFCTR_RFUA_SHIFT 4
+#define SIFCTR_RFUA(i) ((i) << SIFCTR_RFUA_SHIFT)
+
+/* SISTR */
+#define SISTR_TFEMP BIT(29) /* Transmit FIFO Empty */
+#define SISTR_TDREQ BIT(28) /* Transmit Data Transfer Request */
+#define SISTR_TEOF BIT(23) /* Frame Transmission End */
+#define SISTR_TFSERR BIT(21) /* Transmit Frame Synchronization Error */
+#define SISTR_TFOVF BIT(20) /* Transmit FIFO Overflow */
+#define SISTR_TFUDF BIT(19) /* Transmit FIFO Underflow */
+#define SISTR_RFFUL BIT(13) /* Receive FIFO Full */
+#define SISTR_RDREQ BIT(12) /* Receive Data Transfer Request */
+#define SISTR_REOF BIT(7) /* Frame Reception End */
+#define SISTR_RFSERR BIT(5) /* Receive Frame Synchronization Error */
+#define SISTR_RFUDF BIT(4) /* Receive FIFO Underflow */
+#define SISTR_RFOVF BIT(3) /* Receive FIFO Overflow */
+
+/* SIIER */
+#define SIIER_TDMAE BIT(31) /* Transmit Data DMA Transfer Req. Enable */
+#define SIIER_TFEMPE BIT(29) /* Transmit FIFO Empty Enable */
+#define SIIER_TDREQE BIT(28) /* Transmit Data Transfer Request Enable */
+#define SIIER_TEOFE BIT(23) /* Frame Transmission End Enable */
+#define SIIER_TFSERRE BIT(21) /* Transmit Frame Sync Error Enable */
+#define SIIER_TFOVFE BIT(20) /* Transmit FIFO Overflow Enable */
+#define SIIER_TFUDFE BIT(19) /* Transmit FIFO Underflow Enable */
+#define SIIER_RDMAE BIT(15) /* Receive Data DMA Transfer Req. Enable */
+#define SIIER_RFFULE BIT(13) /* Receive FIFO Full Enable */
+#define SIIER_RDREQE BIT(12) /* Receive Data Transfer Request Enable */
+#define SIIER_REOFE BIT(7) /* Frame Reception End Enable */
+#define SIIER_RFSERRE BIT(5) /* Receive Frame Sync Error Enable */
+#define SIIER_RFUDFE BIT(4) /* Receive FIFO Underflow Enable */
+#define SIIER_RFOVFE BIT(3) /* Receive FIFO Overflow Enable */
static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs)
{
switch (reg_offs) {
- case TSCR:
- case RSCR:
+ case SITSCR:
+ case SIRSCR:
return ioread16(p->mapbase + reg_offs);
default:
return ioread32(p->mapbase + reg_offs);
@@ -207,8 +204,8 @@ static void sh_msiof_write(struct sh_msiof_spi_priv *p, int reg_offs,
u32 value)
{
switch (reg_offs) {
- case TSCR:
- case RSCR:
+ case SITSCR:
+ case SIRSCR:
iowrite16(value, p->mapbase + reg_offs);
break;
default:
@@ -223,12 +220,12 @@ static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p,
u32 mask = clr | set;
u32 data;
- data = sh_msiof_read(p, CTR);
+ data = sh_msiof_read(p, SICTR);
data &= ~clr;
data |= set;
- sh_msiof_write(p, CTR, data);
+ sh_msiof_write(p, SICTR, data);
- return readl_poll_timeout_atomic(p->mapbase + CTR, data,
+ return readl_poll_timeout_atomic(p->mapbase + SICTR, data,
(data & mask) == set, 1, 100);
}
@@ -237,7 +234,7 @@ static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
struct sh_msiof_spi_priv *p = data;
/* just disable the interrupt and wake up */
- sh_msiof_write(p, IER, 0);
+ sh_msiof_write(p, SIIER, 0);
complete(&p->done);
return IRQ_HANDLED;
@@ -245,20 +242,20 @@ static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
static void sh_msiof_spi_reset_regs(struct sh_msiof_spi_priv *p)
{
- u32 mask = CTR_TXRST | CTR_RXRST;
+ u32 mask = SICTR_TXRST | SICTR_RXRST;
u32 data;
- data = sh_msiof_read(p, CTR);
+ data = sh_msiof_read(p, SICTR);
data |= mask;
- sh_msiof_write(p, CTR, data);
+ sh_msiof_write(p, SICTR, data);
- readl_poll_timeout_atomic(p->mapbase + CTR, data, !(data & mask), 1,
+ readl_poll_timeout_atomic(p->mapbase + SICTR, data, !(data & mask), 1,
100);
}
static const u32 sh_msiof_spi_div_array[] = {
- SCR_BRDV_DIV_1, SCR_BRDV_DIV_2, SCR_BRDV_DIV_4,
- SCR_BRDV_DIV_8, SCR_BRDV_DIV_16, SCR_BRDV_DIV_32,
+ SISCR_BRDV_DIV_1, SISCR_BRDV_DIV_2, SISCR_BRDV_DIV_4,
+ SISCR_BRDV_DIV_8, SISCR_BRDV_DIV_16, SISCR_BRDV_DIV_32,
};
static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
@@ -276,7 +273,7 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
div = DIV_ROUND_UP(parent_rate, spi_hz);
if (div <= 1024) {
- /* SCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */
+ /* SISCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */
if (!div_pow && div <= 32 && div > 2)
div_pow = 1;
@@ -295,10 +292,10 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
brps = 32;
}
- scr = sh_msiof_spi_div_array[div_pow] | SCR_BRPS(brps);
- sh_msiof_write(p, TSCR, scr);
+ scr = sh_msiof_spi_div_array[div_pow] | SISCR_BRPS(brps);
+ sh_msiof_write(p, SITSCR, scr);
if (!(p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
- sh_msiof_write(p, RSCR, scr);
+ sh_msiof_write(p, SIRSCR, scr);
}
static u32 sh_msiof_get_delay_bit(u32 dtdl_or_syncdl)
@@ -337,8 +334,8 @@ static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p)
return 0;
}
- val = sh_msiof_get_delay_bit(p->info->dtdl) << MDR1_DTDL_SHIFT;
- val |= sh_msiof_get_delay_bit(p->info->syncdl) << MDR1_SYNCDL_SHIFT;
+ val = sh_msiof_get_delay_bit(p->info->dtdl) << SIMDR1_DTDL_SHIFT;
+ val |= sh_msiof_get_delay_bit(p->info->syncdl) << SIMDR1_SYNCDL_SHIFT;
return val;
}
@@ -357,54 +354,54 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss,
* 1 0 11 11 0 0
* 1 1 11 11 1 1
*/
- tmp = MDR1_SYNCMD_SPI | 1 << MDR1_FLD_SHIFT | MDR1_XXSTP;
- tmp |= !cs_high << MDR1_SYNCAC_SHIFT;
- tmp |= lsb_first << MDR1_BITLSB_SHIFT;
+ tmp = SIMDR1_SYNCMD_SPI | 1 << SIMDR1_FLD_SHIFT | SIMDR1_XXSTP;
+ tmp |= !cs_high << SIMDR1_SYNCAC_SHIFT;
+ tmp |= lsb_first << SIMDR1_BITLSB_SHIFT;
tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
if (spi_controller_is_slave(p->ctlr)) {
- sh_msiof_write(p, TMDR1, tmp | TMDR1_PCON);
+ sh_msiof_write(p, SITMDR1, tmp | SITMDR1_PCON);
} else {
- sh_msiof_write(p, TMDR1,
- tmp | MDR1_TRMD | TMDR1_PCON |
- (ss < MAX_SS ? ss : 0) << TMDR1_SYNCCH_SHIFT);
+ sh_msiof_write(p, SITMDR1,
+ tmp | SIMDR1_TRMD | SITMDR1_PCON |
+ (ss < MAX_SS ? ss : 0) << SITMDR1_SYNCCH_SHIFT);
}
if (p->ctlr->flags & SPI_CONTROLLER_MUST_TX) {
/* These bits are reserved if RX needs TX */
tmp &= ~0x0000ffff;
}
- sh_msiof_write(p, RMDR1, tmp);
+ sh_msiof_write(p, SIRMDR1, tmp);
tmp = 0;
- tmp |= CTR_TSCKIZ_SCK | cpol << CTR_TSCKIZ_POL_SHIFT;
- tmp |= CTR_RSCKIZ_SCK | cpol << CTR_RSCKIZ_POL_SHIFT;
+ tmp |= SICTR_TSCKIZ_SCK | cpol << SICTR_TSCKIZ_POL_SHIFT;
+ tmp |= SICTR_RSCKIZ_SCK | cpol << SICTR_RSCKIZ_POL_SHIFT;
edge = cpol ^ !cpha;
- tmp |= edge << CTR_TEDG_SHIFT;
- tmp |= edge << CTR_REDG_SHIFT;
- tmp |= tx_hi_z ? CTR_TXDIZ_HIZ : CTR_TXDIZ_LOW;
- sh_msiof_write(p, CTR, tmp);
+ tmp |= edge << SICTR_TEDG_SHIFT;
+ tmp |= edge << SICTR_REDG_SHIFT;
+ tmp |= tx_hi_z ? SICTR_TXDIZ_HIZ : SICTR_TXDIZ_LOW;
+ sh_msiof_write(p, SICTR, tmp);
}
static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
const void *tx_buf, void *rx_buf,
u32 bits, u32 words)
{
- u32 dr2 = MDR2_BITLEN1(bits) | MDR2_WDLEN1(words);
+ u32 dr2 = SIMDR2_BITLEN1(bits) | SIMDR2_WDLEN1(words);
if (tx_buf || (p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
- sh_msiof_write(p, TMDR2, dr2);
+ sh_msiof_write(p, SITMDR2, dr2);
else
- sh_msiof_write(p, TMDR2, dr2 | MDR2_GRPMASK1);
+ sh_msiof_write(p, SITMDR2, dr2 | SIMDR2_GRPMASK1);
if (rx_buf)
- sh_msiof_write(p, RMDR2, dr2);
+ sh_msiof_write(p, SIRMDR2, dr2);
}
static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
{
- sh_msiof_write(p, STR,
- sh_msiof_read(p, STR) & ~(STR_TDREQ | STR_RDREQ));
+ sh_msiof_write(p, SISTR,
+ sh_msiof_read(p, SISTR) & ~(SISTR_TDREQ | SISTR_RDREQ));
}
static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
@@ -414,7 +411,7 @@ static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- sh_msiof_write(p, TFDR, buf_8[k] << fs);
+ sh_msiof_write(p, SITFDR, buf_8[k] << fs);
}
static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p,
@@ -424,7 +421,7 @@ static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- sh_msiof_write(p, TFDR, buf_16[k] << fs);
+ sh_msiof_write(p, SITFDR, buf_16[k] << fs);
}
static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p,
@@ -434,7 +431,7 @@ static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- sh_msiof_write(p, TFDR, get_unaligned(&buf_16[k]) << fs);
+ sh_msiof_write(p, SITFDR, get_unaligned(&buf_16[k]) << fs);
}
static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p,
@@ -444,7 +441,7 @@ static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- sh_msiof_write(p, TFDR, buf_32[k] << fs);
+ sh_msiof_write(p, SITFDR, buf_32[k] << fs);
}
static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p,
@@ -454,7 +451,7 @@ static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- sh_msiof_write(p, TFDR, get_unaligned(&buf_32[k]) << fs);
+ sh_msiof_write(p, SITFDR, get_unaligned(&buf_32[k]) << fs);
}
static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p,
@@ -464,7 +461,7 @@ static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- sh_msiof_write(p, TFDR, swab32(buf_32[k] << fs));
+ sh_msiof_write(p, SITFDR, swab32(buf_32[k] << fs));
}
static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p,
@@ -474,7 +471,7 @@ static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- sh_msiof_write(p, TFDR, swab32(get_unaligned(&buf_32[k]) << fs));
+ sh_msiof_write(p, SITFDR, swab32(get_unaligned(&buf_32[k]) << fs));
}
static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p,
@@ -484,7 +481,7 @@ static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- buf_8[k] = sh_msiof_read(p, RFDR) >> fs;
+ buf_8[k] = sh_msiof_read(p, SIRFDR) >> fs;
}
static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p,
@@ -494,7 +491,7 @@ static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- buf_16[k] = sh_msiof_read(p, RFDR) >> fs;
+ buf_16[k] = sh_msiof_read(p, SIRFDR) >> fs;
}
static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p,
@@ -504,7 +501,7 @@ static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_16[k]);
+ put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_16[k]);
}
static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p,
@@ -514,7 +511,7 @@ static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- buf_32[k] = sh_msiof_read(p, RFDR) >> fs;
+ buf_32[k] = sh_msiof_read(p, SIRFDR) >> fs;
}
static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p,
@@ -524,7 +521,7 @@ static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_32[k]);
+ put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_32[k]);
}
static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p,
@@ -534,7 +531,7 @@ static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- buf_32[k] = swab32(sh_msiof_read(p, RFDR) >> fs);
+ buf_32[k] = swab32(sh_msiof_read(p, SIRFDR) >> fs);
}
static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
@@ -544,7 +541,7 @@ static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- put_unaligned(swab32(sh_msiof_read(p, RFDR) >> fs), &buf_32[k]);
+ put_unaligned(swab32(sh_msiof_read(p, SIRFDR) >> fs), &buf_32[k]);
}
static int sh_msiof_spi_setup(struct spi_device *spi)
@@ -561,17 +558,17 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
return 0;
/* Configure native chip select mode/polarity early */
- clr = MDR1_SYNCMD_MASK;
- set = MDR1_SYNCMD_SPI;
+ clr = SIMDR1_SYNCMD_MASK;
+ set = SIMDR1_SYNCMD_SPI;
if (spi->mode & SPI_CS_HIGH)
- clr |= BIT(MDR1_SYNCAC_SHIFT);
+ clr |= BIT(SIMDR1_SYNCAC_SHIFT);
else
- set |= BIT(MDR1_SYNCAC_SHIFT);
+ set |= BIT(SIMDR1_SYNCAC_SHIFT);
pm_runtime_get_sync(&p->pdev->dev);
- tmp = sh_msiof_read(p, TMDR1) & ~clr;
- sh_msiof_write(p, TMDR1, tmp | set | MDR1_TRMD | TMDR1_PCON);
- tmp = sh_msiof_read(p, RMDR1) & ~clr;
- sh_msiof_write(p, RMDR1, tmp | set);
+ tmp = sh_msiof_read(p, SITMDR1) & ~clr;
+ sh_msiof_write(p, SITMDR1, tmp | set | SIMDR1_TRMD | SITMDR1_PCON);
+ tmp = sh_msiof_read(p, SIRMDR1) & ~clr;
+ sh_msiof_write(p, SIRMDR1, tmp | set);
pm_runtime_put(&p->pdev->dev);
p->native_cs_high = spi->mode & SPI_CS_HIGH;
p->native_cs_inited = true;
@@ -587,7 +584,7 @@ static int sh_msiof_prepare_message(struct spi_controller *ctlr,
/* Configure pins before asserting CS */
if (spi->cs_gpiod) {
- ss = p->unused_ss;
+ ss = ctlr->unused_native_cs;
cs_high = p->native_cs_high;
} else {
ss = spi->chip_select;
@@ -607,15 +604,15 @@ static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf)
/* setup clock and rx/tx signals */
if (!slave)
- ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TSCKE);
+ ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TSCKE);
if (rx_buf && !ret)
- ret = sh_msiof_modify_ctr_wait(p, 0, CTR_RXE);
+ ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_RXE);
if (!ret)
- ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TXE);
+ ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TXE);
/* start by setting frame bit */
if (!ret && !slave)
- ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TFSE);
+ ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TFSE);
return ret;
}
@@ -627,13 +624,13 @@ static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf)
/* shut down frame, rx/tx and clock signals */
if (!slave)
- ret = sh_msiof_modify_ctr_wait(p, CTR_TFSE, 0);
+ ret = sh_msiof_modify_ctr_wait(p, SICTR_TFSE, 0);
if (!ret)
- ret = sh_msiof_modify_ctr_wait(p, CTR_TXE, 0);
+ ret = sh_msiof_modify_ctr_wait(p, SICTR_TXE, 0);
if (rx_buf && !ret)
- ret = sh_msiof_modify_ctr_wait(p, CTR_RXE, 0);
+ ret = sh_msiof_modify_ctr_wait(p, SICTR_RXE, 0);
if (!ret && !slave)
- ret = sh_msiof_modify_ctr_wait(p, CTR_TSCKE, 0);
+ ret = sh_msiof_modify_ctr_wait(p, SICTR_TSCKE, 0);
return ret;
}
@@ -688,11 +685,11 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
fifo_shift = 32 - bits;
/* default FIFO watermarks for PIO */
- sh_msiof_write(p, FCTR, 0);
+ sh_msiof_write(p, SIFCTR, 0);
/* setup msiof transfer mode registers */
sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words);
- sh_msiof_write(p, IER, IER_TEOFE | IER_REOFE);
+ sh_msiof_write(p, SIIER, SIIER_TEOFE | SIIER_REOFE);
/* write tx fifo */
if (tx_buf)
@@ -731,7 +728,7 @@ stop_reset:
sh_msiof_reset_str(p);
sh_msiof_spi_stop(p, rx_buf);
stop_ier:
- sh_msiof_write(p, IER, 0);
+ sh_msiof_write(p, SIIER, 0);
return ret;
}
@@ -750,7 +747,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
/* First prepare and submit the DMA request(s), as this may fail */
if (rx) {
- ier_bits |= IER_RDREQE | IER_RDMAE;
+ ier_bits |= SIIER_RDREQE | SIIER_RDMAE;
desc_rx = dmaengine_prep_slave_single(p->ctlr->dma_rx,
p->rx_dma_addr, len, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
@@ -765,7 +762,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
}
if (tx) {
- ier_bits |= IER_TDREQE | IER_TDMAE;
+ ier_bits |= SIIER_TDREQE | SIIER_TDMAE;
dma_sync_single_for_device(p->ctlr->dma_tx->device->dev,
p->tx_dma_addr, len, DMA_TO_DEVICE);
desc_tx = dmaengine_prep_slave_single(p->ctlr->dma_tx,
@@ -786,12 +783,12 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
}
/* 1 stage FIFO watermarks for DMA */
- sh_msiof_write(p, FCTR, FCTR_TFWM_1 | FCTR_RFWM_1);
+ sh_msiof_write(p, SIFCTR, SIFCTR_TFWM_1 | SIFCTR_RFWM_1);
/* setup msiof transfer mode registers (32-bit words) */
sh_msiof_spi_set_mode_regs(p, tx, rx, 32, len / 4);
- sh_msiof_write(p, IER, ier_bits);
+ sh_msiof_write(p, SIIER, ier_bits);
reinit_completion(&p->done);
if (tx)
@@ -823,10 +820,10 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
if (ret)
goto stop_reset;
- sh_msiof_write(p, IER, 0);
+ sh_msiof_write(p, SIIER, 0);
} else {
/* wait for tx fifo to be emptied */
- sh_msiof_write(p, IER, IER_TEOFE);
+ sh_msiof_write(p, SIIER, SIIER_TEOFE);
ret = sh_msiof_wait_for_completion(p, &p->done);
if (ret)
goto stop_reset;
@@ -856,7 +853,7 @@ stop_dma:
no_dma_tx:
if (rx)
dmaengine_terminate_all(p->ctlr->dma_rx);
- sh_msiof_write(p, IER, 0);
+ sh_msiof_write(p, SIIER, 0);
return ret;
}
@@ -1124,46 +1121,6 @@ static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
}
#endif
-static int sh_msiof_get_cs_gpios(struct sh_msiof_spi_priv *p)
-{
- struct device *dev = &p->pdev->dev;
- unsigned int used_ss_mask = 0;
- unsigned int cs_gpios = 0;
- unsigned int num_cs, i;
- int ret;
-
- ret = gpiod_count(dev, "cs");
- if (ret <= 0)
- return 0;
-
- num_cs = max_t(unsigned int, ret, p->ctlr->num_chipselect);
- for (i = 0; i < num_cs; i++) {
- struct gpio_desc *gpiod;
-
- gpiod = devm_gpiod_get_index(dev, "cs", i, GPIOD_ASIS);
- if (!IS_ERR(gpiod)) {
- devm_gpiod_put(dev, gpiod);
- cs_gpios++;
- continue;
- }
-
- if (PTR_ERR(gpiod) != -ENOENT)
- return PTR_ERR(gpiod);
-
- if (i >= MAX_SS) {
- dev_err(dev, "Invalid native chip select %d\n", i);
- return -EINVAL;
- }
- used_ss_mask |= BIT(i);
- }
- p->unused_ss = ffz(used_ss_mask);
- if (cs_gpios && p->unused_ss >= MAX_SS) {
- dev_err(dev, "No unused native chip select available\n");
- return -EINVAL;
- }
- return 0;
-}
-
static struct dma_chan *sh_msiof_request_dma_chan(struct device *dev,
enum dma_transfer_direction dir, unsigned int id, dma_addr_t port_addr)
{
@@ -1232,12 +1189,12 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p)
ctlr = p->ctlr;
ctlr->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV,
- dma_tx_id, res->start + TFDR);
+ dma_tx_id, res->start + SITFDR);
if (!ctlr->dma_tx)
return -ENODEV;
ctlr->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM,
- dma_rx_id, res->start + RFDR);
+ dma_rx_id, res->start + SIRFDR);
if (!ctlr->dma_rx)
goto free_tx_chan;
@@ -1373,17 +1330,12 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
if (p->info->rx_fifo_override)
p->rx_fifo_size = p->info->rx_fifo_override;
- /* Setup GPIO chip selects */
- ctlr->num_chipselect = p->info->num_chipselect;
- ret = sh_msiof_get_cs_gpios(p);
- if (ret)
- goto err1;
-
/* init controller code */
ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
ctlr->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
ctlr->flags = chipdata->ctlr_flags;
ctlr->bus_num = pdev->id;
+ ctlr->num_chipselect = p->info->num_chipselect;
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->setup = sh_msiof_spi_setup;
ctlr->prepare_message = sh_msiof_prepare_message;
@@ -1392,6 +1344,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
ctlr->auto_runtime_pm = true;
ctlr->transfer_one = sh_msiof_transfer_one;
ctlr->use_gpio_descriptors = true;
+ ctlr->max_native_cs = MAX_SS;
ret = sh_msiof_request_dma(p);
if (ret < 0)
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index e1e639191557..8419e6722e17 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -1126,16 +1126,16 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
sspi->bitbang.master->dev.of_node = pdev->dev.of_node;
/* request DMA channels */
- sspi->rx_chan = dma_request_slave_channel(&pdev->dev, "rx");
- if (!sspi->rx_chan) {
+ sspi->rx_chan = dma_request_chan(&pdev->dev, "rx");
+ if (IS_ERR(sspi->rx_chan)) {
dev_err(&pdev->dev, "can not allocate rx dma channel\n");
- ret = -ENODEV;
+ ret = PTR_ERR(sspi->rx_chan);
goto free_master;
}
- sspi->tx_chan = dma_request_slave_channel(&pdev->dev, "tx");
- if (!sspi->tx_chan) {
+ sspi->tx_chan = dma_request_chan(&pdev->dev, "tx");
+ if (IS_ERR(sspi->tx_chan)) {
dev_err(&pdev->dev, "can not allocate tx dma channel\n");
- ret = -ENODEV;
+ ret = PTR_ERR(sspi->tx_chan);
goto free_rx_dma;
}
diff --git a/drivers/spi/spi-sprd.c b/drivers/spi/spi-sprd.c
index 2ee1feb41681..6678f1cbc566 100644
--- a/drivers/spi/spi-sprd.c
+++ b/drivers/spi/spi-sprd.c
@@ -678,7 +678,7 @@ static int sprd_spi_init_hw(struct sprd_spi *ss, struct spi_transfer *t)
if (d->unit != SPI_DELAY_UNIT_SCK)
return -EINVAL;
- val = readl_relaxed(ss->base + SPRD_SPI_CTL7);
+ val = readl_relaxed(ss->base + SPRD_SPI_CTL0);
val &= ~(SPRD_SPI_SCK_REV | SPRD_SPI_NG_TX | SPRD_SPI_NG_RX);
/* Set default chip selection, clock phase and clock polarity */
val |= ss->hw_mode & SPI_CPHA ? SPRD_SPI_NG_RX : SPRD_SPI_NG_TX;
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index 4e726929bb4f..4ef569b47aa6 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -470,10 +470,11 @@ static int stm32_qspi_setup(struct spi_device *spi)
return 0;
}
-static void stm32_qspi_dma_setup(struct stm32_qspi *qspi)
+static int stm32_qspi_dma_setup(struct stm32_qspi *qspi)
{
struct dma_slave_config dma_cfg;
struct device *dev = qspi->dev;
+ int ret = 0;
memset(&dma_cfg, 0, sizeof(dma_cfg));
@@ -484,8 +485,13 @@ static void stm32_qspi_dma_setup(struct stm32_qspi *qspi)
dma_cfg.src_maxburst = 4;
dma_cfg.dst_maxburst = 4;
- qspi->dma_chrx = dma_request_slave_channel(dev, "rx");
- if (qspi->dma_chrx) {
+ qspi->dma_chrx = dma_request_chan(dev, "rx");
+ if (IS_ERR(qspi->dma_chrx)) {
+ ret = PTR_ERR(qspi->dma_chrx);
+ qspi->dma_chrx = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto out;
+ } else {
if (dmaengine_slave_config(qspi->dma_chrx, &dma_cfg)) {
dev_err(dev, "dma rx config failed\n");
dma_release_channel(qspi->dma_chrx);
@@ -493,8 +499,11 @@ static void stm32_qspi_dma_setup(struct stm32_qspi *qspi)
}
}
- qspi->dma_chtx = dma_request_slave_channel(dev, "tx");
- if (qspi->dma_chtx) {
+ qspi->dma_chtx = dma_request_chan(dev, "tx");
+ if (IS_ERR(qspi->dma_chtx)) {
+ ret = PTR_ERR(qspi->dma_chtx);
+ qspi->dma_chtx = NULL;
+ } else {
if (dmaengine_slave_config(qspi->dma_chtx, &dma_cfg)) {
dev_err(dev, "dma tx config failed\n");
dma_release_channel(qspi->dma_chtx);
@@ -502,7 +511,13 @@ static void stm32_qspi_dma_setup(struct stm32_qspi *qspi)
}
}
+out:
init_completion(&qspi->dma_completion);
+
+ if (ret != -EPROBE_DEFER)
+ ret = 0;
+
+ return ret;
}
static void stm32_qspi_dma_free(struct stm32_qspi *qspi)
@@ -608,7 +623,10 @@ static int stm32_qspi_probe(struct platform_device *pdev)
qspi->dev = dev;
platform_set_drvdata(pdev, qspi);
- stm32_qspi_dma_setup(qspi);
+ ret = stm32_qspi_dma_setup(qspi);
+ if (ret)
+ goto err;
+
mutex_init(&qspi->lock);
ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index b222ce8d083e..e041f9c4ec47 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -9,7 +9,6 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
-#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/module.h>
@@ -974,29 +973,6 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
}
/**
- * stm32_spi_setup - setup device chip select
- */
-static int stm32_spi_setup(struct spi_device *spi_dev)
-{
- int ret = 0;
-
- if (!gpio_is_valid(spi_dev->cs_gpio)) {
- dev_err(&spi_dev->dev, "%d is not a valid gpio\n",
- spi_dev->cs_gpio);
- return -EINVAL;
- }
-
- dev_dbg(&spi_dev->dev, "%s: set gpio%d output %s\n", __func__,
- spi_dev->cs_gpio,
- (spi_dev->mode & SPI_CS_HIGH) ? "low" : "high");
-
- ret = gpio_direction_output(spi_dev->cs_gpio,
- !(spi_dev->mode & SPI_CS_HIGH));
-
- return ret;
-}
-
-/**
* stm32_spi_prepare_msg - set up the controller to transfer a single message
*/
static int stm32_spi_prepare_msg(struct spi_master *master,
@@ -1810,7 +1786,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
struct spi_master *master;
struct stm32_spi *spi;
struct resource *res;
- int i, ret;
+ int ret;
master = spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi));
if (!master) {
@@ -1898,22 +1874,34 @@ static int stm32_spi_probe(struct platform_device *pdev)
master->bits_per_word_mask = spi->cfg->get_bpw_mask(spi);
master->max_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_min;
master->min_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_max;
- master->setup = stm32_spi_setup;
+ master->use_gpio_descriptors = true;
master->prepare_message = stm32_spi_prepare_msg;
master->transfer_one = stm32_spi_transfer_one;
master->unprepare_message = stm32_spi_unprepare_msg;
- spi->dma_tx = dma_request_slave_channel(spi->dev, "tx");
- if (!spi->dma_tx)
+ spi->dma_tx = dma_request_chan(spi->dev, "tx");
+ if (IS_ERR(spi->dma_tx)) {
+ ret = PTR_ERR(spi->dma_tx);
+ spi->dma_tx = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto err_clk_disable;
+
dev_warn(&pdev->dev, "failed to request tx dma channel\n");
- else
+ } else {
master->dma_tx = spi->dma_tx;
+ }
+
+ spi->dma_rx = dma_request_chan(spi->dev, "rx");
+ if (IS_ERR(spi->dma_rx)) {
+ ret = PTR_ERR(spi->dma_rx);
+ spi->dma_rx = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto err_dma_release;
- spi->dma_rx = dma_request_slave_channel(spi->dev, "rx");
- if (!spi->dma_rx)
dev_warn(&pdev->dev, "failed to request rx dma channel\n");
- else
+ } else {
master->dma_rx = spi->dma_rx;
+ }
if (spi->dma_tx || spi->dma_rx)
master->can_dma = stm32_spi_can_dma;
@@ -1925,43 +1913,26 @@ static int stm32_spi_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "spi master registration failed: %d\n",
ret);
- goto err_dma_release;
+ goto err_pm_disable;
}
- if (!master->cs_gpios) {
+ if (!master->cs_gpiods) {
dev_err(&pdev->dev, "no CS gpios available\n");
ret = -EINVAL;
- goto err_dma_release;
- }
-
- for (i = 0; i < master->num_chipselect; i++) {
- if (!gpio_is_valid(master->cs_gpios[i])) {
- dev_err(&pdev->dev, "%i is not a valid gpio\n",
- master->cs_gpios[i]);
- ret = -EINVAL;
- goto err_dma_release;
- }
-
- ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i],
- DRIVER_NAME);
- if (ret) {
- dev_err(&pdev->dev, "can't get CS gpio %i\n",
- master->cs_gpios[i]);
- goto err_dma_release;
- }
+ goto err_pm_disable;
}
dev_info(&pdev->dev, "driver initialized\n");
return 0;
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
err_dma_release:
if (spi->dma_tx)
dma_release_channel(spi->dma_tx);
if (spi->dma_rx)
dma_release_channel(spi->dma_rx);
-
- pm_runtime_disable(&pdev->dev);
err_clk_disable:
clk_disable_unprepare(spi->clk);
err_master_put:
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index fc40ab146c86..83edabdb41ad 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -269,10 +269,10 @@ static unsigned tegra_spi_calculate_curr_xfer_param(
if ((bits_per_word == 8 || bits_per_word == 16 ||
bits_per_word == 32) && t->len > 3) {
- tspi->is_packed = 1;
+ tspi->is_packed = true;
tspi->words_per_32bit = 32/bits_per_word;
} else {
- tspi->is_packed = 0;
+ tspi->is_packed = false;
tspi->words_per_32bit = 1;
}
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 3cb65371ae3b..366a3e5cca6b 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -62,6 +62,7 @@ struct ti_qspi {
u32 dc;
bool mmap_enabled;
+ int current_cs;
};
#define QSPI_PID (0x0)
@@ -79,8 +80,6 @@ struct ti_qspi {
#define QSPI_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
-#define QSPI_FCLK 192000000
-
/* Clock Control */
#define QSPI_CLK_EN (1 << 31)
#define QSPI_CLK_DIV_MAX 0xffff
@@ -315,6 +314,8 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
{
int wlen;
unsigned int cmd;
+ u32 rx;
+ u8 rxlen, rx_wlen;
u8 *rxbuf;
rxbuf = t->rx_buf;
@@ -331,20 +332,67 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
break;
}
wlen = t->bits_per_word >> 3; /* in bytes */
+ rx_wlen = wlen;
while (count) {
dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc);
if (qspi_is_busy(qspi))
return -EBUSY;
+ switch (wlen) {
+ case 1:
+ /*
+ * Optimize the 8-bit words transfers, as used by
+ * the SPI flash devices.
+ */
+ if (count >= QSPI_WLEN_MAX_BYTES) {
+ rxlen = QSPI_WLEN_MAX_BYTES;
+ } else {
+ rxlen = min(count, 4);
+ }
+ rx_wlen = rxlen << 3;
+ cmd &= ~QSPI_WLEN_MASK;
+ cmd |= QSPI_WLEN(rx_wlen);
+ break;
+ default:
+ rxlen = wlen;
+ break;
+ }
+
ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
if (ti_qspi_poll_wc(qspi)) {
dev_err(qspi->dev, "read timed out\n");
return -ETIMEDOUT;
}
+
switch (wlen) {
case 1:
- *rxbuf = readb(qspi->base + QSPI_SPI_DATA_REG);
+ /*
+ * Optimize the 8-bit words transfers, as used by
+ * the SPI flash devices.
+ */
+ if (count >= QSPI_WLEN_MAX_BYTES) {
+ u32 *rxp = (u32 *) rxbuf;
+ rx = readl(qspi->base + QSPI_SPI_DATA_REG_3);
+ *rxp++ = be32_to_cpu(rx);
+ rx = readl(qspi->base + QSPI_SPI_DATA_REG_2);
+ *rxp++ = be32_to_cpu(rx);
+ rx = readl(qspi->base + QSPI_SPI_DATA_REG_1);
+ *rxp++ = be32_to_cpu(rx);
+ rx = readl(qspi->base + QSPI_SPI_DATA_REG);
+ *rxp++ = be32_to_cpu(rx);
+ } else {
+ u8 *rxp = rxbuf;
+ rx = readl(qspi->base + QSPI_SPI_DATA_REG);
+ if (rx_wlen >= 8)
+ *rxp++ = rx >> (rx_wlen - 8);
+ if (rx_wlen >= 16)
+ *rxp++ = rx >> (rx_wlen - 16);
+ if (rx_wlen >= 24)
+ *rxp++ = rx >> (rx_wlen - 24);
+ if (rx_wlen >= 32)
+ *rxp++ = rx;
+ }
break;
case 2:
*((u16 *)rxbuf) = readw(qspi->base + QSPI_SPI_DATA_REG);
@@ -353,8 +401,8 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
*((u32 *)rxbuf) = readl(qspi->base + QSPI_SPI_DATA_REG);
break;
}
- rxbuf += wlen;
- count -= wlen;
+ rxbuf += rxlen;
+ count -= rxlen;
}
return 0;
@@ -487,6 +535,7 @@ static void ti_qspi_enable_memory_map(struct spi_device *spi)
MEM_CS_EN(spi->chip_select));
}
qspi->mmap_enabled = true;
+ qspi->current_cs = spi->chip_select;
}
static void ti_qspi_disable_memory_map(struct spi_device *spi)
@@ -498,6 +547,7 @@ static void ti_qspi_disable_memory_map(struct spi_device *spi)
regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
MEM_CS_MASK, 0);
qspi->mmap_enabled = false;
+ qspi->current_cs = -1;
}
static void ti_qspi_setup_mmap_read(struct spi_device *spi, u8 opcode,
@@ -524,6 +574,35 @@ static void ti_qspi_setup_mmap_read(struct spi_device *spi, u8 opcode,
QSPI_SPI_SETUP_REG(spi->chip_select));
}
+static int ti_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+ struct ti_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
+ size_t max_len;
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ if (op->addr.val < qspi->mmap_size) {
+ /* Limit MMIO to the mmaped region */
+ if (op->addr.val + op->data.nbytes > qspi->mmap_size) {
+ max_len = qspi->mmap_size - op->addr.val;
+ op->data.nbytes = min((size_t) op->data.nbytes,
+ max_len);
+ }
+ } else {
+ /*
+ * Use fallback mode (SW generated transfers) above the
+ * mmaped region.
+ * Adjust size to comply with the QSPI max frame length.
+ */
+ max_len = QSPI_FRAME;
+ max_len -= 1 + op->addr.nbytes + op->dummy.nbytes;
+ op->data.nbytes = min((size_t) op->data.nbytes,
+ max_len);
+ }
+ }
+
+ return 0;
+}
+
static int ti_qspi_exec_mem_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
@@ -543,7 +622,7 @@ static int ti_qspi_exec_mem_op(struct spi_mem *mem,
mutex_lock(&qspi->list_lock);
- if (!qspi->mmap_enabled)
+ if (!qspi->mmap_enabled || qspi->current_cs != mem->spi->chip_select)
ti_qspi_enable_memory_map(mem->spi);
ti_qspi_setup_mmap_read(mem->spi, op->cmd.opcode, op->data.buswidth,
op->addr.nbytes, op->dummy.nbytes);
@@ -574,6 +653,7 @@ static int ti_qspi_exec_mem_op(struct spi_mem *mem,
static const struct spi_controller_mem_ops ti_qspi_mem_ops = {
.exec_op = ti_qspi_exec_mem_op,
+ .adjust_op_size = ti_qspi_adjust_op_size,
};
static int ti_qspi_start_transfer_one(struct spi_master *master,
@@ -799,6 +879,7 @@ no_dma:
}
}
qspi->mmap_enabled = false;
+ qspi->current_cs = -1;
ret = devm_spi_register_master(&pdev->dev, master);
if (!ret)
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 223353fa2d8a..d7ea6af74743 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -863,7 +863,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
/* Set Tx DMA */
param = &dma->param_tx;
param->dma_dev = &dma_dev->dev;
- param->chan_id = data->ch * 2; /* Tx = 0, 2 */;
+ param->chan_id = data->ch * 2; /* Tx = 0, 2 */
param->tx_reg = data->io_base_addr + PCH_SPDWR;
param->width = width;
chan = dma_request_channel(mask, pch_spi_filter, param);
@@ -878,7 +878,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
/* Set Rx DMA */
param = &dma->param_rx;
param->dma_dev = &dma_dev->dev;
- param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */;
+ param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */
param->rx_reg = data->io_base_addr + PCH_SPDRR;
param->width = width;
chan = dma_request_channel(mask, pch_spi_filter, param);
diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c
index 47cde1864630..0fa50979644d 100644
--- a/drivers/spi/spi-uniphier.c
+++ b/drivers/spi/spi-uniphier.c
@@ -8,6 +8,7 @@
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dmaengine.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -23,6 +24,7 @@
struct uniphier_spi_priv {
void __iomem *base;
+ dma_addr_t base_dma_addr;
struct clk *clk;
struct spi_master *master;
struct completion xfer_done;
@@ -32,6 +34,7 @@ struct uniphier_spi_priv {
unsigned int rx_bytes;
const u8 *tx_buf;
u8 *rx_buf;
+ atomic_t dma_busy;
bool is_save_param;
u8 bits_per_word;
@@ -61,11 +64,16 @@ struct uniphier_spi_priv {
#define SSI_FPS_FSTRT BIT(14)
#define SSI_SR 0x14
+#define SSI_SR_BUSY BIT(7)
#define SSI_SR_RNE BIT(0)
#define SSI_IE 0x18
+#define SSI_IE_TCIE BIT(4)
#define SSI_IE_RCIE BIT(3)
+#define SSI_IE_TXRE BIT(2)
+#define SSI_IE_RXRE BIT(1)
#define SSI_IE_RORIE BIT(0)
+#define SSI_IE_ALL_MASK GENMASK(4, 0)
#define SSI_IS 0x1c
#define SSI_IS_RXRS BIT(9)
@@ -87,15 +95,19 @@ struct uniphier_spi_priv {
#define SSI_RXDR 0x24
#define SSI_FIFO_DEPTH 8U
+#define SSI_FIFO_BURST_NUM 1
+
+#define SSI_DMA_RX_BUSY BIT(1)
+#define SSI_DMA_TX_BUSY BIT(0)
static inline unsigned int bytes_per_word(unsigned int bits)
{
return bits <= 8 ? 1 : (bits <= 16 ? 2 : 4);
}
-static inline void uniphier_spi_irq_enable(struct spi_device *spi, u32 mask)
+static inline void uniphier_spi_irq_enable(struct uniphier_spi_priv *priv,
+ u32 mask)
{
- struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
u32 val;
val = readl(priv->base + SSI_IE);
@@ -103,9 +115,9 @@ static inline void uniphier_spi_irq_enable(struct spi_device *spi, u32 mask)
writel(val, priv->base + SSI_IE);
}
-static inline void uniphier_spi_irq_disable(struct spi_device *spi, u32 mask)
+static inline void uniphier_spi_irq_disable(struct uniphier_spi_priv *priv,
+ u32 mask)
{
- struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
u32 val;
val = readl(priv->base + SSI_IE);
@@ -290,25 +302,32 @@ static void uniphier_spi_recv(struct uniphier_spi_priv *priv)
}
}
-static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv)
+static void uniphier_spi_set_fifo_threshold(struct uniphier_spi_priv *priv,
+ unsigned int threshold)
{
- unsigned int fifo_threshold, fill_bytes;
u32 val;
- fifo_threshold = DIV_ROUND_UP(priv->rx_bytes,
- bytes_per_word(priv->bits_per_word));
- fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH);
-
- fill_bytes = fifo_threshold - (priv->rx_bytes - priv->tx_bytes);
-
- /* set fifo threshold */
val = readl(priv->base + SSI_FC);
val &= ~(SSI_FC_TXFTH_MASK | SSI_FC_RXFTH_MASK);
- val |= FIELD_PREP(SSI_FC_TXFTH_MASK, fifo_threshold);
- val |= FIELD_PREP(SSI_FC_RXFTH_MASK, fifo_threshold);
+ val |= FIELD_PREP(SSI_FC_TXFTH_MASK, SSI_FIFO_DEPTH - threshold);
+ val |= FIELD_PREP(SSI_FC_RXFTH_MASK, threshold);
writel(val, priv->base + SSI_FC);
+}
+
+static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv)
+{
+ unsigned int fifo_threshold, fill_words;
+ unsigned int bpw = bytes_per_word(priv->bits_per_word);
+
+ fifo_threshold = DIV_ROUND_UP(priv->rx_bytes, bpw);
+ fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH);
+
+ uniphier_spi_set_fifo_threshold(priv, fifo_threshold);
+
+ fill_words = fifo_threshold -
+ DIV_ROUND_UP(priv->rx_bytes - priv->tx_bytes, bpw);
- while (fill_bytes--)
+ while (fill_words--)
uniphier_spi_send(priv);
}
@@ -327,6 +346,128 @@ static void uniphier_spi_set_cs(struct spi_device *spi, bool enable)
writel(val, priv->base + SSI_FPS);
}
+static bool uniphier_spi_can_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ unsigned int bpw = bytes_per_word(priv->bits_per_word);
+
+ if ((!master->dma_tx && !master->dma_rx)
+ || (!master->dma_tx && t->tx_buf)
+ || (!master->dma_rx && t->rx_buf))
+ return false;
+
+ return DIV_ROUND_UP(t->len, bpw) > SSI_FIFO_DEPTH;
+}
+
+static void uniphier_spi_dma_rxcb(void *data)
+{
+ struct spi_master *master = data;
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ int state = atomic_fetch_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy);
+
+ uniphier_spi_irq_disable(priv, SSI_IE_RXRE);
+
+ if (!(state & SSI_DMA_TX_BUSY))
+ spi_finalize_current_transfer(master);
+}
+
+static void uniphier_spi_dma_txcb(void *data)
+{
+ struct spi_master *master = data;
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ int state = atomic_fetch_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy);
+
+ uniphier_spi_irq_disable(priv, SSI_IE_TXRE);
+
+ if (!(state & SSI_DMA_RX_BUSY))
+ spi_finalize_current_transfer(master);
+}
+
+static int uniphier_spi_transfer_one_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL;
+ int buswidth;
+
+ atomic_set(&priv->dma_busy, 0);
+
+ uniphier_spi_set_fifo_threshold(priv, SSI_FIFO_BURST_NUM);
+
+ if (priv->bits_per_word <= 8)
+ buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ else if (priv->bits_per_word <= 16)
+ buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ else
+ buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+ if (priv->rx_buf) {
+ struct dma_slave_config rxconf = {
+ .direction = DMA_DEV_TO_MEM,
+ .src_addr = priv->base_dma_addr + SSI_RXDR,
+ .src_addr_width = buswidth,
+ .src_maxburst = SSI_FIFO_BURST_NUM,
+ };
+
+ dmaengine_slave_config(master->dma_rx, &rxconf);
+
+ rxdesc = dmaengine_prep_slave_sg(
+ master->dma_rx,
+ t->rx_sg.sgl, t->rx_sg.nents,
+ DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!rxdesc)
+ goto out_err_prep;
+
+ rxdesc->callback = uniphier_spi_dma_rxcb;
+ rxdesc->callback_param = master;
+
+ uniphier_spi_irq_enable(priv, SSI_IE_RXRE);
+ atomic_or(SSI_DMA_RX_BUSY, &priv->dma_busy);
+
+ dmaengine_submit(rxdesc);
+ dma_async_issue_pending(master->dma_rx);
+ }
+
+ if (priv->tx_buf) {
+ struct dma_slave_config txconf = {
+ .direction = DMA_MEM_TO_DEV,
+ .dst_addr = priv->base_dma_addr + SSI_TXDR,
+ .dst_addr_width = buswidth,
+ .dst_maxburst = SSI_FIFO_BURST_NUM,
+ };
+
+ dmaengine_slave_config(master->dma_tx, &txconf);
+
+ txdesc = dmaengine_prep_slave_sg(
+ master->dma_tx,
+ t->tx_sg.sgl, t->tx_sg.nents,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!txdesc)
+ goto out_err_prep;
+
+ txdesc->callback = uniphier_spi_dma_txcb;
+ txdesc->callback_param = master;
+
+ uniphier_spi_irq_enable(priv, SSI_IE_TXRE);
+ atomic_or(SSI_DMA_TX_BUSY, &priv->dma_busy);
+
+ dmaengine_submit(txdesc);
+ dma_async_issue_pending(master->dma_tx);
+ }
+
+ /* signal that we need to wait for completion */
+ return (priv->tx_buf || priv->rx_buf);
+
+out_err_prep:
+ if (rxdesc)
+ dmaengine_terminate_sync(master->dma_rx);
+
+ return -EINVAL;
+}
+
static int uniphier_spi_transfer_one_irq(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *t)
@@ -339,12 +480,12 @@ static int uniphier_spi_transfer_one_irq(struct spi_master *master,
uniphier_spi_fill_tx_fifo(priv);
- uniphier_spi_irq_enable(spi, SSI_IE_RCIE | SSI_IE_RORIE);
+ uniphier_spi_irq_enable(priv, SSI_IE_RCIE | SSI_IE_RORIE);
time_left = wait_for_completion_timeout(&priv->xfer_done,
msecs_to_jiffies(SSI_TIMEOUT_MS));
- uniphier_spi_irq_disable(spi, SSI_IE_RCIE | SSI_IE_RORIE);
+ uniphier_spi_irq_disable(priv, SSI_IE_RCIE | SSI_IE_RORIE);
if (!time_left) {
dev_err(dev, "transfer timeout.\n");
@@ -388,6 +529,7 @@ static int uniphier_spi_transfer_one(struct spi_master *master,
{
struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
unsigned long threshold;
+ bool use_dma;
/* Terminate and return success for 0 byte length transfer */
if (!t->len)
@@ -395,6 +537,10 @@ static int uniphier_spi_transfer_one(struct spi_master *master,
uniphier_spi_setup_transfer(spi, t);
+ use_dma = master->can_dma ? master->can_dma(master, spi, t) : false;
+ if (use_dma)
+ return uniphier_spi_transfer_one_dma(master, spi, t);
+
/*
* If the transfer operation will take longer than
* SSI_POLL_TIMEOUT_US, it should use irq.
@@ -425,6 +571,32 @@ static int uniphier_spi_unprepare_transfer_hardware(struct spi_master *master)
return 0;
}
+static void uniphier_spi_handle_err(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ u32 val;
+
+ /* stop running spi transfer */
+ writel(0, priv->base + SSI_CTL);
+
+ /* reset FIFOs */
+ val = SSI_FC_TXFFL | SSI_FC_RXFFL;
+ writel(val, priv->base + SSI_FC);
+
+ uniphier_spi_irq_disable(priv, SSI_IE_ALL_MASK);
+
+ if (atomic_read(&priv->dma_busy) & SSI_DMA_TX_BUSY) {
+ dmaengine_terminate_async(master->dma_tx);
+ atomic_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy);
+ }
+
+ if (atomic_read(&priv->dma_busy) & SSI_DMA_RX_BUSY) {
+ dmaengine_terminate_async(master->dma_rx);
+ atomic_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy);
+ }
+}
+
static irqreturn_t uniphier_spi_handler(int irq, void *dev_id)
{
struct uniphier_spi_priv *priv = dev_id;
@@ -470,6 +642,9 @@ static int uniphier_spi_probe(struct platform_device *pdev)
{
struct uniphier_spi_priv *priv;
struct spi_master *master;
+ struct resource *res;
+ struct dma_slave_caps caps;
+ u32 dma_tx_burst = 0, dma_rx_burst = 0;
unsigned long clk_rate;
int irq;
int ret;
@@ -484,11 +659,13 @@ static int uniphier_spi_probe(struct platform_device *pdev)
priv->master = master;
priv->is_save_param = false;
- priv->base = devm_platform_ioremap_resource(pdev, 0);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->base)) {
ret = PTR_ERR(priv->base);
goto out_master_put;
}
+ priv->base_dma_addr = res->start;
priv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(priv->clk)) {
@@ -531,7 +708,45 @@ static int uniphier_spi_probe(struct platform_device *pdev)
= uniphier_spi_prepare_transfer_hardware;
master->unprepare_transfer_hardware
= uniphier_spi_unprepare_transfer_hardware;
+ master->handle_err = uniphier_spi_handle_err;
+ master->can_dma = uniphier_spi_can_dma;
+
master->num_chipselect = 1;
+ master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
+
+ master->dma_tx = dma_request_chan(&pdev->dev, "tx");
+ if (IS_ERR_OR_NULL(master->dma_tx)) {
+ if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER)
+ goto out_disable_clk;
+ master->dma_tx = NULL;
+ dma_tx_burst = INT_MAX;
+ } else {
+ ret = dma_get_slave_caps(master->dma_tx, &caps);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get TX DMA capacities: %d\n",
+ ret);
+ goto out_disable_clk;
+ }
+ dma_tx_burst = caps.max_burst;
+ }
+
+ master->dma_rx = dma_request_chan(&pdev->dev, "rx");
+ if (IS_ERR_OR_NULL(master->dma_rx)) {
+ if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER)
+ goto out_disable_clk;
+ master->dma_rx = NULL;
+ dma_rx_burst = INT_MAX;
+ } else {
+ ret = dma_get_slave_caps(master->dma_rx, &caps);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get RX DMA capacities: %d\n",
+ ret);
+ goto out_disable_clk;
+ }
+ dma_rx_burst = caps.max_burst;
+ }
+
+ master->max_dma_len = min(dma_tx_burst, dma_rx_burst);
ret = devm_spi_register_master(&pdev->dev, master);
if (ret)
@@ -551,6 +766,11 @@ static int uniphier_spi_remove(struct platform_device *pdev)
{
struct uniphier_spi_priv *priv = platform_get_drvdata(pdev);
+ if (priv->master->dma_tx)
+ dma_release_channel(priv->master->dma_tx);
+ if (priv->master->dma_rx)
+ dma_release_channel(priv->master->dma_rx);
+
clk_disable_unprepare(priv->clk);
return 0;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 5e4c4532f7f3..38b4c78df506 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1499,8 +1499,7 @@ static void spi_pump_messages(struct kthread_work *work)
* advances its @tx buffer pointer monotonically.
* @ctlr: Pointer to the spi_controller structure of the driver
* @xfer: Pointer to the transfer being timestamped
- * @tx: Pointer to the current word within the xfer->tx_buf that the driver is
- * preparing to transmit right now.
+ * @progress: How many words (not bytes) have been transferred so far
* @irqs_off: If true, will disable IRQs and preemption for the duration of the
* transfer, for less jitter in time measurement. Only compatible
* with PIO drivers. If true, must follow up with
@@ -1510,21 +1509,19 @@ static void spi_pump_messages(struct kthread_work *work)
*/
void spi_take_timestamp_pre(struct spi_controller *ctlr,
struct spi_transfer *xfer,
- const void *tx, bool irqs_off)
+ size_t progress, bool irqs_off)
{
- u8 bytes_per_word = DIV_ROUND_UP(xfer->bits_per_word, 8);
-
if (!xfer->ptp_sts)
return;
if (xfer->timestamped_pre)
return;
- if (tx < (xfer->tx_buf + xfer->ptp_sts_word_pre * bytes_per_word))
+ if (progress < xfer->ptp_sts_word_pre)
return;
/* Capture the resolution of the timestamp */
- xfer->ptp_sts_word_pre = (tx - xfer->tx_buf) / bytes_per_word;
+ xfer->ptp_sts_word_pre = progress;
xfer->timestamped_pre = true;
@@ -1546,23 +1543,20 @@ EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
* timestamped.
* @ctlr: Pointer to the spi_controller structure of the driver
* @xfer: Pointer to the transfer being timestamped
- * @tx: Pointer to the current word within the xfer->tx_buf that the driver has
- * just transmitted.
+ * @progress: How many words (not bytes) have been transferred so far
* @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
*/
void spi_take_timestamp_post(struct spi_controller *ctlr,
struct spi_transfer *xfer,
- const void *tx, bool irqs_off)
+ size_t progress, bool irqs_off)
{
- u8 bytes_per_word = DIV_ROUND_UP(xfer->bits_per_word, 8);
-
if (!xfer->ptp_sts)
return;
if (xfer->timestamped_post)
return;
- if (tx < (xfer->tx_buf + xfer->ptp_sts_word_post * bytes_per_word))
+ if (progress < xfer->ptp_sts_word_post)
return;
ptp_read_system_postts(xfer->ptp_sts);
@@ -1573,7 +1567,7 @@ void spi_take_timestamp_post(struct spi_controller *ctlr,
}
/* Capture the resolution of the timestamp */
- xfer->ptp_sts_word_post = (tx - xfer->tx_buf) / bytes_per_word;
+ xfer->ptp_sts_word_post = progress;
xfer->timestamped_post = true;
}
@@ -1680,6 +1674,13 @@ void spi_finalize_current_message(struct spi_controller *ctlr)
}
}
+ if (unlikely(ctlr->ptp_sts_supported)) {
+ list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
+ WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped_pre);
+ WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped_post);
+ }
+ }
+
spi_unmap_msg(ctlr, mesg);
if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
@@ -2457,6 +2458,8 @@ static int spi_get_gpio_descs(struct spi_controller *ctlr)
int nb, i;
struct gpio_desc **cs;
struct device *dev = &ctlr->dev;
+ unsigned long native_cs_mask = 0;
+ unsigned int num_cs_gpios = 0;
nb = gpiod_count(dev, "cs");
ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
@@ -2498,7 +2501,22 @@ static int spi_get_gpio_descs(struct spi_controller *ctlr)
if (!gpioname)
return -ENOMEM;
gpiod_set_consumer_name(cs[i], gpioname);
+ num_cs_gpios++;
+ continue;
}
+
+ if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
+ dev_err(dev, "Invalid native chip select %d\n", i);
+ return -EINVAL;
+ }
+ native_cs_mask |= BIT(i);
+ }
+
+ ctlr->unused_native_cs = ffz(native_cs_mask);
+ if (num_cs_gpios && ctlr->max_native_cs &&
+ ctlr->unused_native_cs >= ctlr->max_native_cs) {
+ dev_err(dev, "No unused native chip select available\n");
+ return -EINVAL;
}
return 0;
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 97acc2ba2912..de844b412110 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -731,6 +731,7 @@ static int qpnpint_irq_domain_translate(struct irq_domain *d,
return 0;
}
+static struct lock_class_key qpnpint_irq_lock_class, qpnpint_irq_request_class;
static void qpnpint_irq_domain_map(struct spmi_pmic_arb *pmic_arb,
struct irq_domain *domain, unsigned int virq,
@@ -746,6 +747,9 @@ static void qpnpint_irq_domain_map(struct spmi_pmic_arb *pmic_arb,
else
handler = handle_level_irq;
+
+ irq_set_lockdep_class(virq, &qpnpint_irq_lock_class,
+ &qpnpint_irq_request_class);
irq_domain_set_info(domain, virq, hwirq, &pmic_arb_irqchip, pmic_arb,
handler, NULL, NULL);
}
diff --git a/drivers/ssb/driver_extif.c b/drivers/ssb/driver_extif.c
index 06b68dd6e022..bc275968fcc6 100644
--- a/drivers/ssb/driver_extif.c
+++ b/drivers/ssb/driver_extif.c
@@ -63,7 +63,7 @@ int ssb_extif_serial_init(struct ssb_extif *extif, struct ssb_serial_port *ports
for (i = 0; i < 2; i++) {
void __iomem *uart_regs;
- uart_regs = ioremap_nocache(SSB_EUART, 16);
+ uart_regs = ioremap(SSB_EUART, 16);
if (uart_regs) {
uart_regs += (i * 8);
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index 6a5622e0ded5..c1186415896b 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -122,7 +122,7 @@ static int ssb_extpci_read_config(struct ssb_pcicore *pc,
if (unlikely(!addr))
goto out;
err = -ENOMEM;
- mmio = ioremap_nocache(addr, len);
+ mmio = ioremap(addr, len);
if (!mmio)
goto out;
@@ -168,7 +168,7 @@ static int ssb_extpci_write_config(struct ssb_pcicore *pc,
if (unlikely(!addr))
goto out;
err = -ENOMEM;
- mmio = ioremap_nocache(addr, len);
+ mmio = ioremap(addr, len);
if (!mmio)
goto out;
@@ -382,7 +382,7 @@ static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc)
/* Ok, ready to run, register it to the system.
* The following needs change, if we want to port hostmode
* to non-MIPS platform. */
- ssb_pcicore_controller.io_map_base = (unsigned long)ioremap_nocache(SSB_PCI_MEM, 0x04000000);
+ ssb_pcicore_controller.io_map_base = (unsigned long)ioremap(SSB_PCI_MEM, 0x04000000);
set_io_port_base(ssb_pcicore_controller.io_map_base);
/* Give some time to the PCI controller to configure itself with the new
* values. Not waiting at this point causes crashes of the machine. */
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index eaf753b70ec5..baccd7c883cc 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -42,10 +42,6 @@ source "drivers/staging/rtl8188eu/Kconfig"
source "drivers/staging/rts5208/Kconfig"
-source "drivers/staging/octeon/Kconfig"
-
-source "drivers/staging/octeon-usb/Kconfig"
-
source "drivers/staging/vt6655/Kconfig"
source "drivers/staging/vt6656/Kconfig"
@@ -116,8 +112,6 @@ source "drivers/staging/fieldbus/Kconfig"
source "drivers/staging/kpc2000/Kconfig"
-source "drivers/staging/isdn/Kconfig"
-
source "drivers/staging/wusbcore/Kconfig"
source "drivers/staging/uwb/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 0a4396c9067b..fdd03fd6e704 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -12,8 +12,6 @@ obj-$(CONFIG_R8712U) += rtl8712/
obj-$(CONFIG_R8188EU) += rtl8188eu/
obj-$(CONFIG_RTS5208) += rts5208/
obj-$(CONFIG_NETLOGIC_XLR_NET) += netlogic/
-obj-$(CONFIG_OCTEON_ETHERNET) += octeon/
-obj-$(CONFIG_OCTEON_USB) += octeon-usb/
obj-$(CONFIG_VT6655) += vt6655/
obj-$(CONFIG_VT6656) += vt6656/
obj-$(CONFIG_VME_BUS) += vme/
@@ -48,10 +46,9 @@ obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/
obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/
obj-$(CONFIG_FIELDBUS_DEV) += fieldbus/
obj-$(CONFIG_KPC2000) += kpc2000/
-obj-$(CONFIG_ISDN_CAPI) += isdn/
obj-$(CONFIG_UWB) += uwb/
obj-$(CONFIG_USB_WUSB) += wusbcore/
-obj-$(CONFIG_EXFAT_FS) += exfat/
+obj-$(CONFIG_STAGING_EXFAT_FS) += exfat/
obj-$(CONFIG_QLGE) += qlge/
obj-$(CONFIG_NET_VENDOR_HP) += hp/
obj-$(CONFIG_WFX) += wfx/
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 74d497d39c5a..5891d0744a76 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -537,14 +537,14 @@ static int set_name(struct ashmem_area *asma, void __user *name)
len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN);
if (len < 0)
return len;
- if (len == ASHMEM_NAME_LEN)
- local_name[ASHMEM_NAME_LEN - 1] = '\0';
+
mutex_lock(&ashmem_mutex);
/* cannot change an existing mapping's name */
if (asma->file)
ret = -EINVAL;
else
- strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name);
+ strscpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name,
+ ASHMEM_NAME_LEN);
mutex_unlock(&ashmem_mutex);
return ret;
diff --git a/drivers/staging/axis-fifo/Kconfig b/drivers/staging/axis-fifo/Kconfig
index 3fffe4d6f327..f180a8e9f58a 100644
--- a/drivers/staging/axis-fifo/Kconfig
+++ b/drivers/staging/axis-fifo/Kconfig
@@ -4,7 +4,7 @@
#
config XIL_AXIS_FIFO
tristate "Xilinx AXI-Stream FIFO IP core driver"
- depends on OF
+ depends on OF && HAS_IOMEM
help
This adds support for the Xilinx AXI-Stream FIFO IP core driver.
The AXI Streaming FIFO allows memory mapped access to a AXI Streaming
diff --git a/drivers/staging/axis-fifo/axis-fifo.c b/drivers/staging/axis-fifo/axis-fifo.c
index 39e6c59df1e9..5801067e7c1b 100644
--- a/drivers/staging/axis-fifo/axis-fifo.c
+++ b/drivers/staging/axis-fifo/axis-fifo.c
@@ -16,7 +16,7 @@
#include <linux/kernel.h>
#include <linux/wait.h>
-#include <linux/spinlock_types.h>
+#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/cdev.h>
#include <linux/init.h>
@@ -133,9 +133,9 @@ struct axis_fifo {
int has_tx_fifo; /* whether the IP has the tx fifo enabled */
wait_queue_head_t read_queue; /* wait queue for asynchronos read */
- spinlock_t read_queue_lock; /* lock for reading waitqueue */
+ struct mutex read_lock; /* lock for reading */
wait_queue_head_t write_queue; /* wait queue for asynchronos write */
- spinlock_t write_queue_lock; /* lock for writing waitqueue */
+ struct mutex write_lock; /* lock for writing */
unsigned int write_flags; /* write file flags */
unsigned int read_flags; /* read file flags */
@@ -336,7 +336,21 @@ static void reset_ip_core(struct axis_fifo *fifo)
iowrite32(XLLF_INT_ALL_MASK, fifo->base_addr + XLLF_ISR_OFFSET);
}
-/* reads a single packet from the fifo as dictated by the tlast signal */
+/**
+ * axis_fifo_write() - Read a packet from AXIS-FIFO character device.
+ * @f Open file.
+ * @buf User space buffer to read to.
+ * @len User space buffer length.
+ * @off Buffer offset.
+ *
+ * As defined by the device's documentation, we need to check the device's
+ * occupancy before reading the length register and then the data. All these
+ * operations must be executed atomically, in order and one after the other
+ * without missing any.
+ *
+ * Returns the number of bytes read from the device or negative error code
+ * on failure.
+ */
static ssize_t axis_fifo_read(struct file *f, char __user *buf,
size_t len, loff_t *off)
{
@@ -350,36 +364,37 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf,
u32 tmp_buf[READ_BUF_SIZE];
if (fifo->read_flags & O_NONBLOCK) {
- /* opened in non-blocking mode
- * return if there are no packets available
+ /*
+ * Device opened in non-blocking mode. Try to lock it and then
+ * check if any packet is available.
*/
- if (!ioread32(fifo->base_addr + XLLF_RDFO_OFFSET))
+ if (!mutex_trylock(&fifo->read_lock))
return -EAGAIN;
+
+ if (!ioread32(fifo->base_addr + XLLF_RDFO_OFFSET)) {
+ ret = -EAGAIN;
+ goto end_unlock;
+ }
} else {
/* opened in blocking mode
* wait for a packet available interrupt (or timeout)
* if nothing is currently available
*/
- spin_lock_irq(&fifo->read_queue_lock);
- ret = wait_event_interruptible_lock_irq_timeout
- (fifo->read_queue,
- ioread32(fifo->base_addr + XLLF_RDFO_OFFSET),
- fifo->read_queue_lock,
- (read_timeout >= 0) ? msecs_to_jiffies(read_timeout) :
+ mutex_lock(&fifo->read_lock);
+ ret = wait_event_interruptible_timeout(fifo->read_queue,
+ ioread32(fifo->base_addr + XLLF_RDFO_OFFSET),
+ (read_timeout >= 0) ? msecs_to_jiffies(read_timeout) :
MAX_SCHEDULE_TIMEOUT);
- spin_unlock_irq(&fifo->read_queue_lock);
- if (ret == 0) {
- /* timeout occurred */
- dev_dbg(fifo->dt_device, "read timeout");
- return -EAGAIN;
- } else if (ret == -ERESTARTSYS) {
- /* signal received */
- return -ERESTARTSYS;
- } else if (ret < 0) {
- dev_err(fifo->dt_device, "wait_event_interruptible_timeout() error in read (ret=%i)\n",
- ret);
- return ret;
+ if (ret <= 0) {
+ if (ret == 0) {
+ ret = -EAGAIN;
+ } else if (ret != -ERESTARTSYS) {
+ dev_err(fifo->dt_device, "wait_event_interruptible_timeout() error in read (ret=%i)\n",
+ ret);
+ }
+
+ goto end_unlock;
}
}
@@ -387,14 +402,16 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf,
if (!bytes_available) {
dev_err(fifo->dt_device, "received a packet of length 0 - fifo core will be reset\n");
reset_ip_core(fifo);
- return -EIO;
+ ret = -EIO;
+ goto end_unlock;
}
if (bytes_available > len) {
dev_err(fifo->dt_device, "user read buffer too small (available bytes=%zu user buffer bytes=%zu) - fifo core will be reset\n",
bytes_available, len);
reset_ip_core(fifo);
- return -EINVAL;
+ ret = -EINVAL;
+ goto end_unlock;
}
if (bytes_available % sizeof(u32)) {
@@ -403,7 +420,8 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf,
*/
dev_err(fifo->dt_device, "received a packet that isn't word-aligned - fifo core will be reset\n");
reset_ip_core(fifo);
- return -EIO;
+ ret = -EIO;
+ goto end_unlock;
}
words_available = bytes_available / sizeof(u32);
@@ -423,16 +441,37 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf,
if (copy_to_user(buf + copied * sizeof(u32), tmp_buf,
copy * sizeof(u32))) {
reset_ip_core(fifo);
- return -EFAULT;
+ ret = -EFAULT;
+ goto end_unlock;
}
copied += copy;
words_available -= copy;
}
- return bytes_available;
+ ret = bytes_available;
+
+end_unlock:
+ mutex_unlock(&fifo->read_lock);
+
+ return ret;
}
+/**
+ * axis_fifo_write() - Write buffer to AXIS-FIFO character device.
+ * @f Open file.
+ * @buf User space buffer to write to the device.
+ * @len User space buffer length.
+ * @off Buffer offset.
+ *
+ * As defined by the device's documentation, we need to write to the device's
+ * data buffer then to the device's packet length register atomically. Also,
+ * we need to lock before checking if the device has available space to avoid
+ * any concurrency issue.
+ *
+ * Returns the number of bytes written to the device or negative error code
+ * on failure.
+ */
static ssize_t axis_fifo_write(struct file *f, const char __user *buf,
size_t len, loff_t *off)
{
@@ -465,12 +504,17 @@ static ssize_t axis_fifo_write(struct file *f, const char __user *buf,
}
if (fifo->write_flags & O_NONBLOCK) {
- /* opened in non-blocking mode
- * return if there is not enough room available in the fifo
+ /*
+ * Device opened in non-blocking mode. Try to lock it and then
+ * check if there is any room to write the given buffer.
*/
+ if (!mutex_trylock(&fifo->write_lock))
+ return -EAGAIN;
+
if (words_to_write > ioread32(fifo->base_addr +
XLLF_TDFV_OFFSET)) {
- return -EAGAIN;
+ ret = -EAGAIN;
+ goto end_unlock;
}
} else {
/* opened in blocking mode */
@@ -478,30 +522,22 @@ static ssize_t axis_fifo_write(struct file *f, const char __user *buf,
/* wait for an interrupt (or timeout) if there isn't
* currently enough room in the fifo
*/
- spin_lock_irq(&fifo->write_queue_lock);
- ret = wait_event_interruptible_lock_irq_timeout
- (fifo->write_queue,
- ioread32(fifo->base_addr + XLLF_TDFV_OFFSET)
+ mutex_lock(&fifo->write_lock);
+ ret = wait_event_interruptible_timeout(fifo->write_queue,
+ ioread32(fifo->base_addr + XLLF_TDFV_OFFSET)
>= words_to_write,
- fifo->write_queue_lock,
- (write_timeout >= 0) ?
- msecs_to_jiffies(write_timeout) :
+ (write_timeout >= 0) ? msecs_to_jiffies(write_timeout) :
MAX_SCHEDULE_TIMEOUT);
- spin_unlock_irq(&fifo->write_queue_lock);
- if (ret == 0) {
- /* timeout occurred */
- dev_dbg(fifo->dt_device, "write timeout\n");
- return -EAGAIN;
- } else if (ret == -ERESTARTSYS) {
- /* signal received */
- return -ERESTARTSYS;
- } else if (ret < 0) {
- /* unknown error */
- dev_err(fifo->dt_device,
- "wait_event_interruptible_timeout() error in write (ret=%i)\n",
- ret);
- return ret;
+ if (ret <= 0) {
+ if (ret == 0) {
+ ret = -EAGAIN;
+ } else if (ret != -ERESTARTSYS) {
+ dev_err(fifo->dt_device, "wait_event_interruptible_timeout() error in write (ret=%i)\n",
+ ret);
+ }
+
+ goto end_unlock;
}
}
@@ -515,7 +551,8 @@ static ssize_t axis_fifo_write(struct file *f, const char __user *buf,
if (copy_from_user(tmp_buf, buf + copied * sizeof(u32),
copy * sizeof(u32))) {
reset_ip_core(fifo);
- return -EFAULT;
+ ret = -EFAULT;
+ goto end_unlock;
}
for (i = 0; i < copy; i++)
@@ -526,10 +563,15 @@ static ssize_t axis_fifo_write(struct file *f, const char __user *buf,
words_to_write -= copy;
}
+ ret = copied * sizeof(u32);
+
/* write packet size to fifo */
- iowrite32(copied * sizeof(u32), fifo->base_addr + XLLF_TLR_OFFSET);
+ iowrite32(ret, fifo->base_addr + XLLF_TLR_OFFSET);
+
+end_unlock:
+ mutex_unlock(&fifo->write_lock);
- return (ssize_t)copied * sizeof(u32);
+ return ret;
}
static irqreturn_t axis_fifo_irq(int irq, void *dw)
@@ -789,8 +831,8 @@ static int axis_fifo_probe(struct platform_device *pdev)
init_waitqueue_head(&fifo->read_queue);
init_waitqueue_head(&fifo->write_queue);
- spin_lock_init(&fifo->read_queue_lock);
- spin_lock_init(&fifo->write_queue_lock);
+ mutex_init(&fifo->read_lock);
+ mutex_init(&fifo->write_lock);
/* ----------------------------
* init device memory space
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
index dbff0f7e7cf5..ddc0dc93d08b 100644
--- a/drivers/staging/comedi/drivers/adv_pci1710.c
+++ b/drivers/staging/comedi/drivers/adv_pci1710.c
@@ -46,8 +46,8 @@
#define PCI171X_RANGE_UNI BIT(4)
#define PCI171X_RANGE_GAIN(x) (((x) & 0x7) << 0)
#define PCI171X_MUX_REG 0x04 /* W: A/D multiplexor control */
-#define PCI171X_MUX_CHANH(x) (((x) & 0xf) << 8)
-#define PCI171X_MUX_CHANL(x) (((x) & 0xf) << 0)
+#define PCI171X_MUX_CHANH(x) (((x) & 0xff) << 8)
+#define PCI171X_MUX_CHANL(x) (((x) & 0xff) << 0)
#define PCI171X_MUX_CHAN(x) (PCI171X_MUX_CHANH(x) | PCI171X_MUX_CHANL(x))
#define PCI171X_STATUS_REG 0x06 /* R: status register */
#define PCI171X_STATUS_IRQ BIT(11) /* 1=IRQ occurred */
diff --git a/drivers/staging/comedi/drivers/das6402.c b/drivers/staging/comedi/drivers/das6402.c
index f99211ec46de..04e224f8b779 100644
--- a/drivers/staging/comedi/drivers/das6402.c
+++ b/drivers/staging/comedi/drivers/das6402.c
@@ -279,7 +279,7 @@ static int das6402_ai_check_chanlist(struct comedi_device *dev,
if (aref0 == AREF_DIFF && chan > (s->n_chan / 2)) {
dev_dbg(dev->class_dev,
- "chanlist differential channel to large\n");
+ "chanlist differential channel too large\n");
return -EINVAL;
}
}
diff --git a/drivers/staging/comedi/drivers/gsc_hpdi.c b/drivers/staging/comedi/drivers/gsc_hpdi.c
index 4bdf44d82879..dc62db1ee1dd 100644
--- a/drivers/staging/comedi/drivers/gsc_hpdi.c
+++ b/drivers/staging/comedi/drivers/gsc_hpdi.c
@@ -623,6 +623,11 @@ static int gsc_hpdi_auto_attach(struct comedi_device *dev,
dma_alloc_coherent(&pcidev->dev, DMA_BUFFER_SIZE,
&devpriv->dio_buffer_phys_addr[i],
GFP_KERNEL);
+ if (!devpriv->dio_buffer[i]) {
+ dev_warn(dev->class_dev,
+ "failed to allocate DMA buffer\n");
+ return -ENOMEM;
+ }
}
/* allocate dma descriptors */
devpriv->dma_desc = dma_alloc_coherent(&pcidev->dev,
@@ -630,6 +635,11 @@ static int gsc_hpdi_auto_attach(struct comedi_device *dev,
NUM_DMA_DESCRIPTORS,
&devpriv->dma_desc_phys_addr,
GFP_KERNEL);
+ if (!devpriv->dma_desc) {
+ dev_warn(dev->class_dev,
+ "failed to allocate DMA descriptors\n");
+ return -ENOMEM;
+ }
if (devpriv->dma_desc_phys_addr & 0xf) {
dev_warn(dev->class_dev,
" dma descriptors not quad-word aligned (bug)\n");
diff --git a/drivers/staging/comedi/drivers/ni_routes.c b/drivers/staging/comedi/drivers/ni_routes.c
index 673d732dcb8f..8f398b30f5bf 100644
--- a/drivers/staging/comedi/drivers/ni_routes.c
+++ b/drivers/staging/comedi/drivers/ni_routes.c
@@ -72,9 +72,6 @@ static int ni_find_device_routes(const char *device_family,
}
}
- if (!rv)
- return -ENODATA;
-
/* Second, find the set of routes valid for this device. */
for (i = 0; ni_device_routes_list[i]; ++i) {
if (memcmp(ni_device_routes_list[i]->device, board_name,
@@ -84,12 +81,12 @@ static int ni_find_device_routes(const char *device_family,
}
}
- if (!dr)
- return -ENODATA;
-
tables->route_values = rv;
tables->valid_routes = dr;
+ if (!rv || !dr)
+ return -ENODATA;
+
return 0;
}
@@ -487,6 +484,9 @@ int ni_find_route_source(const u8 src_sel_reg_value, int dest,
{
int src;
+ if (!tables->route_values)
+ return -EINVAL;
+
dest = B(dest); /* subtract NI names offset */
/* ensure we are not going to under/over run the route value table */
if (dest < 0 || dest >= NI_NUM_NAMES)
diff --git a/drivers/staging/exfat/Kconfig b/drivers/staging/exfat/Kconfig
index 0130019cbec2..292a19dfcaf5 100644
--- a/drivers/staging/exfat/Kconfig
+++ b/drivers/staging/exfat/Kconfig
@@ -1,41 +1,41 @@
# SPDX-License-Identifier: GPL-2.0
-config EXFAT_FS
+config STAGING_EXFAT_FS
tristate "exFAT fs support"
depends on BLOCK
select NLS
help
This adds support for the exFAT file system.
-config EXFAT_DISCARD
+config STAGING_EXFAT_DISCARD
bool "enable discard support"
- depends on EXFAT_FS
+ depends on STAGING_EXFAT_FS
default y
-config EXFAT_DELAYED_SYNC
+config STAGING_EXFAT_DELAYED_SYNC
bool "enable delayed sync"
- depends on EXFAT_FS
+ depends on STAGING_EXFAT_FS
default n
-config EXFAT_KERNEL_DEBUG
+config STAGING_EXFAT_KERNEL_DEBUG
bool "enable kernel debug features via ioctl"
- depends on EXFAT_FS
+ depends on STAGING_EXFAT_FS
default n
-config EXFAT_DEBUG_MSG
+config STAGING_EXFAT_DEBUG_MSG
bool "print debug messages"
- depends on EXFAT_FS
+ depends on STAGING_EXFAT_FS
default n
-config EXFAT_DEFAULT_CODEPAGE
+config STAGING_EXFAT_DEFAULT_CODEPAGE
int "Default codepage for exFAT"
default 437
- depends on EXFAT_FS
+ depends on STAGING_EXFAT_FS
help
This option should be set to the codepage of your exFAT filesystems.
-config EXFAT_DEFAULT_IOCHARSET
+config STAGING_EXFAT_DEFAULT_IOCHARSET
string "Default iocharset for exFAT"
default "utf8"
- depends on EXFAT_FS
+ depends on STAGING_EXFAT_FS
help
Set this to the default input/output character set you'd like exFAT to use.
diff --git a/drivers/staging/exfat/Makefile b/drivers/staging/exfat/Makefile
index 6c90aec83feb..057556eeca0c 100644
--- a/drivers/staging/exfat/Makefile
+++ b/drivers/staging/exfat/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-or-later
-obj-$(CONFIG_EXFAT_FS) += exfat.o
+obj-$(CONFIG_STAGING_EXFAT_FS) += exfat.o
exfat-y := exfat_core.o \
exfat_super.o \
diff --git a/drivers/staging/exfat/exfat.h b/drivers/staging/exfat/exfat.h
index 51c665a924b7..4d87360fab35 100644
--- a/drivers/staging/exfat/exfat.h
+++ b/drivers/staging/exfat/exfat.h
@@ -9,7 +9,7 @@
#include <linux/types.h>
#include <linux/buffer_head.h>
-#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
/* For Debugging Purpose */
/* IOCTL code 'f' used by
* - file systems typically #0~0x1F
@@ -22,9 +22,9 @@
#define EXFAT_DEBUGFLAGS_INVALID_UMOUNT 0x01
#define EXFAT_DEBUGFLAGS_ERROR_RW 0x02
-#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
-#ifdef CONFIG_EXFAT_DEBUG_MSG
+#ifdef CONFIG_STAGING_EXFAT_DEBUG_MSG
#define DEBUG 1
#else
#undef DEBUG
@@ -516,49 +516,6 @@ struct buf_cache_t {
struct buffer_head *buf_bh;
};
-struct fs_func {
- s32 (*alloc_cluster)(struct super_block *sb, s32 num_alloc,
- struct chain_t *p_chain);
- void (*free_cluster)(struct super_block *sb, struct chain_t *p_chain,
- s32 do_relse);
- s32 (*count_used_clusters)(struct super_block *sb);
-
- s32 (*init_dir_entry)(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, u32 type, u32 start_clu, u64 size);
- s32 (*init_ext_entry)(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, s32 num_entries,
- struct uni_name_t *p_uniname,
- struct dos_name_t *p_dosname);
- s32 (*find_dir_entry)(struct super_block *sb, struct chain_t *p_dir,
- struct uni_name_t *p_uniname, s32 num_entries,
- struct dos_name_t *p_dosname, u32 type);
- void (*delete_dir_entry)(struct super_block *sb,
- struct chain_t *p_dir, s32 entry,
- s32 offset, s32 num_entries);
- void (*get_uni_name_from_ext_entry)(struct super_block *sb,
- struct chain_t *p_dir, s32 entry,
- u16 *uniname);
- s32 (*count_ext_entries)(struct super_block *sb,
- struct chain_t *p_dir, s32 entry,
- struct dentry_t *p_entry);
- s32 (*calc_num_entries)(struct uni_name_t *p_uniname);
-
- u32 (*get_entry_type)(struct dentry_t *p_entry);
- void (*set_entry_type)(struct dentry_t *p_entry, u32 type);
- u32 (*get_entry_attr)(struct dentry_t *p_entry);
- void (*set_entry_attr)(struct dentry_t *p_entry, u32 attr);
- u8 (*get_entry_flag)(struct dentry_t *p_entry);
- void (*set_entry_flag)(struct dentry_t *p_entry, u8 flag);
- u32 (*get_entry_clu0)(struct dentry_t *p_entry);
- void (*set_entry_clu0)(struct dentry_t *p_entry, u32 clu0);
- u64 (*get_entry_size)(struct dentry_t *p_entry);
- void (*set_entry_size)(struct dentry_t *p_entry, u64 size);
- void (*get_entry_time)(struct dentry_t *p_entry,
- struct timestamp_t *tp, u8 mode);
- void (*set_entry_time)(struct dentry_t *p_entry,
- struct timestamp_t *tp, u8 mode);
-};
-
struct fs_info_t {
u32 drv; /* drive ID */
u32 vol_type; /* volume FAT type */
@@ -597,7 +554,6 @@ struct fs_info_t {
u32 dev_ejected; /* block device operation error flag */
- struct fs_func *fs_func;
struct mutex v_mutex;
/* FAT cache */
@@ -661,10 +617,10 @@ struct exfat_mount_options {
/* on error: continue, panic, remount-ro */
unsigned char errors;
-#ifdef CONFIG_EXFAT_DISCARD
+#ifdef CONFIG_STAGING_EXFAT_DISCARD
/* flag on if -o dicard specified and device support discard() */
unsigned char discard;
-#endif /* CONFIG_EXFAT_DISCARD */
+#endif /* CONFIG_STAGING_EXFAT_DISCARD */
};
#define EXFAT_HASH_BITS 8
@@ -700,9 +656,9 @@ struct exfat_sb_info {
spinlock_t inode_hash_lock;
struct hlist_head inode_hashtable[EXFAT_HASH_SIZE];
-#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
long debug_flags;
-#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
};
/*
@@ -829,5 +785,40 @@ int exfat_bdev_write(struct super_block *sb, sector_t secno,
struct buffer_head *bh, u32 num_secs, bool sync);
int exfat_bdev_sync(struct super_block *sb);
+/* cluster operation functions */
+s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc,
+ struct chain_t *p_chain);
+void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
+ s32 do_relse);
+s32 exfat_count_used_clusters(struct super_block *sb);
+
+/* dir operation functions */
+s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+ struct uni_name_t *p_uniname, s32 num_entries,
+ struct dos_name_t *p_dosname, u32 type);
+void exfat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, s32 order, s32 num_entries);
+void exfat_get_uni_name_from_ext_entry(struct super_block *sb,
+ struct chain_t *p_dir, s32 entry,
+ u16 *uniname);
+s32 exfat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, struct dentry_t *p_entry);
+s32 exfat_calc_num_entries(struct uni_name_t *p_uniname);
+
+/* dir entry getter/setter */
+u32 exfat_get_entry_type(struct dentry_t *p_entry);
+u32 exfat_get_entry_attr(struct dentry_t *p_entry);
+void exfat_set_entry_attr(struct dentry_t *p_entry, u32 attr);
+u8 exfat_get_entry_flag(struct dentry_t *p_entry);
+void exfat_set_entry_flag(struct dentry_t *p_entry, u8 flags);
+u32 exfat_get_entry_clu0(struct dentry_t *p_entry);
+void exfat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu);
+u64 exfat_get_entry_size(struct dentry_t *p_entry);
+void exfat_set_entry_size(struct dentry_t *p_entry, u64 size);
+void exfat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
+ u8 mode);
+void exfat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
+ u8 mode);
+
extern const u8 uni_upcase[];
#endif /* _EXFAT_H */
diff --git a/drivers/staging/exfat/exfat_blkdev.c b/drivers/staging/exfat/exfat_blkdev.c
index 7bcd98b13109..0a3dc3568293 100644
--- a/drivers/staging/exfat/exfat_blkdev.c
+++ b/drivers/staging/exfat/exfat_blkdev.c
@@ -31,17 +31,17 @@ void exfat_bdev_close(struct super_block *sb)
}
int exfat_bdev_read(struct super_block *sb, sector_t secno, struct buffer_head **bh,
- u32 num_secs, bool read)
+ u32 num_secs, bool read)
{
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
struct exfat_sb_info *sbi = EXFAT_SB(sb);
long flags = sbi->debug_flags;
if (flags & EXFAT_DEBUGFLAGS_ERROR_RW)
return -EIO;
-#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
if (!p_bd->opened)
return -ENODEV;
@@ -66,19 +66,19 @@ int exfat_bdev_read(struct super_block *sb, sector_t secno, struct buffer_head *
}
int exfat_bdev_write(struct super_block *sb, sector_t secno, struct buffer_head *bh,
- u32 num_secs, bool sync)
+ u32 num_secs, bool sync)
{
s32 count;
struct buffer_head *bh2;
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
struct exfat_sb_info *sbi = EXFAT_SB(sb);
long flags = sbi->debug_flags;
if (flags & EXFAT_DEBUGFLAGS_ERROR_RW)
return -EIO;
-#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
if (!p_bd->opened)
return -ENODEV;
@@ -121,13 +121,13 @@ no_bh:
int exfat_bdev_sync(struct super_block *sb)
{
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
-#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
struct exfat_sb_info *sbi = EXFAT_SB(sb);
long flags = sbi->debug_flags;
if (flags & EXFAT_DEBUGFLAGS_ERROR_RW)
return -EIO;
-#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
if (!p_bd->opened)
return -ENODEV;
diff --git a/drivers/staging/exfat/exfat_core.c b/drivers/staging/exfat/exfat_core.c
index 794000e7bc6f..07b460d01334 100644
--- a/drivers/staging/exfat/exfat_core.c
+++ b/drivers/staging/exfat/exfat_core.c
@@ -177,11 +177,11 @@ static s32 clr_alloc_bitmap(struct super_block *sb, u32 clu)
{
int i, b;
sector_t sector;
-#ifdef CONFIG_EXFAT_DISCARD
+#ifdef CONFIG_STAGING_EXFAT_DISCARD
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct exfat_mount_options *opts = &sbi->options;
int ret;
-#endif /* CONFIG_EXFAT_DISCARD */
+#endif /* CONFIG_STAGING_EXFAT_DISCARD */
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
@@ -192,7 +192,7 @@ static s32 clr_alloc_bitmap(struct super_block *sb, u32 clu)
exfat_bitmap_clear((u8 *)p_fs->vol_amap[i]->b_data, b);
-#ifdef CONFIG_EXFAT_DISCARD
+#ifdef CONFIG_STAGING_EXFAT_DISCARD
if (opts->discard) {
ret = sb_issue_discard(sb, START_SECTOR(clu),
(1 << p_fs->sectors_per_clu_bits),
@@ -204,7 +204,7 @@ static s32 clr_alloc_bitmap(struct super_block *sb, u32 clu)
return ret;
}
}
-#endif /* CONFIG_EXFAT_DISCARD */
+#endif /* CONFIG_STAGING_EXFAT_DISCARD */
return sector_write(sb, sector, p_fs->vol_amap[i], 0);
}
@@ -249,7 +249,7 @@ static u32 test_alloc_bitmap(struct super_block *sb, u32 clu)
return CLUSTER_32(~0);
}
-static s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc,
+s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc,
struct chain_t *p_chain)
{
s32 num_clusters = 0;
@@ -328,7 +328,7 @@ static s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc,
return num_clusters;
}
-static void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
+void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
s32 do_relse)
{
s32 num_clusters = 0;
@@ -434,7 +434,7 @@ s32 count_num_clusters(struct super_block *sb, struct chain_t *p_chain)
return count;
}
-static s32 exfat_count_used_clusters(struct super_block *sb)
+s32 exfat_count_used_clusters(struct super_block *sb)
{
int i, map_i, map_b, count = 0;
u8 k;
@@ -499,7 +499,7 @@ s32 load_alloc_bitmap(struct super_block *sb)
if (!ep)
return -ENOENT;
- type = p_fs->fs_func->get_entry_type((struct dentry_t *)ep);
+ type = exfat_get_entry_type((struct dentry_t *)ep);
if (type == TYPE_UNUSED)
break;
@@ -745,7 +745,7 @@ s32 load_upcase_table(struct super_block *sb)
if (!ep)
return -ENOENT;
- type = p_fs->fs_func->get_entry_type((struct dentry_t *)ep);
+ type = exfat_get_entry_type((struct dentry_t *)ep);
if (type == TYPE_UNUSED)
break;
@@ -787,7 +787,7 @@ void free_upcase_table(struct super_block *sb)
* Directory Entry Management Functions
*/
-static u32 exfat_get_entry_type(struct dentry_t *p_entry)
+u32 exfat_get_entry_type(struct dentry_t *p_entry)
{
struct file_dentry_t *ep = (struct file_dentry_t *)p_entry;
@@ -862,56 +862,56 @@ static void exfat_set_entry_type(struct dentry_t *p_entry, u32 type)
}
}
-static u32 exfat_get_entry_attr(struct dentry_t *p_entry)
+u32 exfat_get_entry_attr(struct dentry_t *p_entry)
{
struct file_dentry_t *ep = (struct file_dentry_t *)p_entry;
return (u32)GET16_A(ep->attr);
}
-static void exfat_set_entry_attr(struct dentry_t *p_entry, u32 attr)
+void exfat_set_entry_attr(struct dentry_t *p_entry, u32 attr)
{
struct file_dentry_t *ep = (struct file_dentry_t *)p_entry;
SET16_A(ep->attr, (u16)attr);
}
-static u8 exfat_get_entry_flag(struct dentry_t *p_entry)
+u8 exfat_get_entry_flag(struct dentry_t *p_entry)
{
struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
return ep->flags;
}
-static void exfat_set_entry_flag(struct dentry_t *p_entry, u8 flags)
+void exfat_set_entry_flag(struct dentry_t *p_entry, u8 flags)
{
struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
ep->flags = flags;
}
-static u32 exfat_get_entry_clu0(struct dentry_t *p_entry)
+u32 exfat_get_entry_clu0(struct dentry_t *p_entry)
{
struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
return GET32_A(ep->start_clu);
}
-static void exfat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu)
+void exfat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu)
{
struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
SET32_A(ep->start_clu, start_clu);
}
-static u64 exfat_get_entry_size(struct dentry_t *p_entry)
+u64 exfat_get_entry_size(struct dentry_t *p_entry)
{
struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
return GET64_A(ep->valid_size);
}
-static void exfat_set_entry_size(struct dentry_t *p_entry, u64 size)
+void exfat_set_entry_size(struct dentry_t *p_entry, u64 size)
{
struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
@@ -919,7 +919,7 @@ static void exfat_set_entry_size(struct dentry_t *p_entry, u64 size)
SET64_A(ep->size, size);
}
-static void exfat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
+void exfat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
u8 mode)
{
u16 t = 0x00, d = 0x21;
@@ -948,7 +948,7 @@ static void exfat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *t
tp->year = (d >> 9);
}
-static void exfat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
+void exfat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
u8 mode)
{
u16 t, d;
@@ -1013,7 +1013,7 @@ static void init_name_entry(struct name_dentry_t *ep, u16 *uniname)
}
static s32 exfat_init_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, u32 type, u32 start_clu, u64 size)
+ s32 entry, u32 type, u32 start_clu, u64 size)
{
sector_t sector;
u8 flags;
@@ -1088,20 +1088,19 @@ static s32 exfat_init_ext_entry(struct super_block *sb, struct chain_t *p_dir,
return 0;
}
-static void exfat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+void exfat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
s32 entry, s32 order, s32 num_entries)
{
int i;
sector_t sector;
struct dentry_t *ep;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
for (i = order; i < num_entries; i++) {
ep = get_entry_in_dir(sb, p_dir, entry + i, &sector);
if (!ep)
return;
- p_fs->fs_func->set_entry_type(ep, TYPE_DELETED);
+ exfat_set_entry_type(ep, TYPE_DELETED);
exfat_buf_modify(sb, sector);
}
}
@@ -1256,7 +1255,7 @@ static s32 _walk_fat_chain(struct super_block *sb, struct chain_t *p_dir,
}
static s32 find_location(struct super_block *sb, struct chain_t *p_dir, s32 entry,
- sector_t *sector, s32 *offset)
+ sector_t *sector, s32 *offset)
{
s32 off, ret;
u32 clu = 0;
@@ -1366,7 +1365,7 @@ struct entry_set_cache_t *get_entry_set_in_dir(struct super_block *sb,
goto err_out;
ep = (struct dentry_t *)(buf + off);
- entry_type = p_fs->fs_func->get_entry_type(ep);
+ entry_type = exfat_get_entry_type(ep);
if ((entry_type != TYPE_FILE) && (entry_type != TYPE_DIR))
goto err_out;
@@ -1396,7 +1395,7 @@ struct entry_set_cache_t *get_entry_set_in_dir(struct super_block *sb,
* instead of copying whole sector, we will check every entry.
* this will provide minimum stablity and consistency.
*/
- entry_type = p_fs->fs_func->get_entry_type(ep);
+ entry_type = exfat_get_entry_type(ep);
if ((entry_type == TYPE_UNUSED) || (entry_type == TYPE_DELETED))
goto err_out;
@@ -1492,7 +1491,8 @@ void release_entry_set(struct entry_set_cache_t *es)
/* search EMPTY CONTINUOUS "num_entries" entries */
static s32 search_deleted_or_unused_entry(struct super_block *sb,
- struct chain_t *p_dir, s32 num_entries)
+ struct chain_t *p_dir,
+ s32 num_entries)
{
int i, dentry, num_empty = 0;
s32 dentries_per_clu;
@@ -1539,7 +1539,7 @@ static s32 search_deleted_or_unused_entry(struct super_block *sb,
if (!ep)
return -1;
- type = p_fs->fs_func->get_entry_type(ep);
+ type = exfat_get_entry_type(ep);
if (type == TYPE_UNUSED) {
num_empty++;
@@ -1613,7 +1613,7 @@ static s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, s32 num_
clu.flags = p_dir->flags;
/* (1) allocate a cluster */
- ret = p_fs->fs_func->alloc_cluster(sb, 1, &clu);
+ ret = exfat_alloc_cluster(sb, 1, &clu);
if (ret < 1)
return -EIO;
@@ -1649,8 +1649,8 @@ static s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, s32 num_
fid->entry + 1, &sector);
if (!ep)
return -ENOENT;
- p_fs->fs_func->set_entry_size(ep, size);
- p_fs->fs_func->set_entry_flag(ep, p_dir->flags);
+ exfat_set_entry_size(ep, size);
+ exfat_set_entry_flag(ep, p_dir->flags);
exfat_buf_modify(sb, sector);
update_dir_checksum(sb, &fid->dir,
@@ -1668,7 +1668,7 @@ static s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, s32 num_
}
static s32 extract_uni_name_from_name_entry(struct name_dentry_t *ep, u16 *uniname,
- s32 order)
+ s32 order)
{
int i, len = 0;
@@ -1689,7 +1689,7 @@ static s32 extract_uni_name_from_name_entry(struct name_dentry_t *ep, u16 *unina
* -1 : (root dir, ".") it is the root dir itself
* -2 : entry with the name does not exist
*/
-static s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
struct uni_name_t *p_uniname, s32 num_entries,
struct dos_name_t *p_dosname, u32 type)
{
@@ -1735,7 +1735,7 @@ static s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
if (!ep)
return -2;
- entry_type = p_fs->fs_func->get_entry_type(ep);
+ entry_type = exfat_get_entry_type(ep);
step = 1;
if ((entry_type == TYPE_UNUSED) || (entry_type == TYPE_DELETED)) {
@@ -1832,21 +1832,20 @@ static s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
return -2;
}
-static s32 exfat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir,
+s32 exfat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir,
s32 entry, struct dentry_t *p_entry)
{
int i, count = 0;
u32 type;
struct file_dentry_t *file_ep = (struct file_dentry_t *)p_entry;
struct dentry_t *ext_ep;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
for (i = 0, entry++; i < file_ep->num_ext; i++, entry++) {
ext_ep = get_entry_in_dir(sb, p_dir, entry, NULL);
if (!ext_ep)
return -1;
- type = p_fs->fs_func->get_entry_type(ext_ep);
+ type = exfat_get_entry_type(ext_ep);
if ((type == TYPE_EXTEND) || (type == TYPE_STREAM))
count++;
else
@@ -1884,7 +1883,7 @@ s32 count_dos_name_entries(struct super_block *sb, struct chain_t *p_dir,
if (!ep)
return -ENOENT;
- entry_type = p_fs->fs_func->get_entry_type(ep);
+ entry_type = exfat_get_entry_type(ep);
if (entry_type == TYPE_UNUSED)
return count;
@@ -1940,7 +1939,7 @@ bool is_dir_empty(struct super_block *sb, struct chain_t *p_dir)
if (!ep)
break;
- type = p_fs->fs_func->get_entry_type(ep);
+ type = exfat_get_entry_type(ep);
if (type == TYPE_UNUSED)
return true;
@@ -1984,9 +1983,8 @@ s32 get_num_entries_and_dos_name(struct super_block *sb, struct chain_t *p_dir,
struct dos_name_t *p_dosname)
{
s32 num_entries;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- num_entries = p_fs->fs_func->calc_num_entries(p_uniname);
+ num_entries = exfat_calc_num_entries(p_uniname);
if (num_entries == 0)
return -EINVAL;
@@ -1995,14 +1993,13 @@ s32 get_num_entries_and_dos_name(struct super_block *sb, struct chain_t *p_dir,
return 0;
}
-static void exfat_get_uni_name_from_ext_entry(struct super_block *sb,
+void exfat_get_uni_name_from_ext_entry(struct super_block *sb,
struct chain_t *p_dir, s32 entry,
u16 *uniname)
{
int i;
struct dentry_t *ep;
struct entry_set_cache_t *es;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
es = get_entry_set_in_dir(sb, p_dir, entry, ES_ALL_ENTRIES, &ep);
if (!es || es->num_entries < 3) {
@@ -2020,7 +2017,7 @@ static void exfat_get_uni_name_from_ext_entry(struct super_block *sb,
* So, the index of first file-name dentry should start from 2.
*/
for (i = 2; i < es->num_entries; i++, ep++) {
- if (p_fs->fs_func->get_entry_type(ep) == TYPE_EXTEND)
+ if (exfat_get_entry_type(ep) == TYPE_EXTEND)
extract_uni_name_from_name_entry((struct name_dentry_t *)
ep, uniname, i);
else
@@ -2032,7 +2029,7 @@ out:
release_entry_set(es);
}
-static s32 exfat_calc_num_entries(struct uni_name_t *p_uniname)
+s32 exfat_calc_num_entries(struct uni_name_t *p_uniname)
{
s32 len;
@@ -2100,36 +2097,6 @@ s32 resolve_path(struct inode *inode, char *path, struct chain_t *p_dir,
return 0;
}
-/*
- * File Operation Functions
- */
-static struct fs_func exfat_fs_func = {
- .alloc_cluster = exfat_alloc_cluster,
- .free_cluster = exfat_free_cluster,
- .count_used_clusters = exfat_count_used_clusters,
-
- .init_dir_entry = exfat_init_dir_entry,
- .init_ext_entry = exfat_init_ext_entry,
- .find_dir_entry = exfat_find_dir_entry,
- .delete_dir_entry = exfat_delete_dir_entry,
- .get_uni_name_from_ext_entry = exfat_get_uni_name_from_ext_entry,
- .count_ext_entries = exfat_count_ext_entries,
- .calc_num_entries = exfat_calc_num_entries,
-
- .get_entry_type = exfat_get_entry_type,
- .set_entry_type = exfat_set_entry_type,
- .get_entry_attr = exfat_get_entry_attr,
- .set_entry_attr = exfat_set_entry_attr,
- .get_entry_flag = exfat_get_entry_flag,
- .set_entry_flag = exfat_set_entry_flag,
- .get_entry_clu0 = exfat_get_entry_clu0,
- .set_entry_clu0 = exfat_set_entry_clu0,
- .get_entry_size = exfat_get_entry_size,
- .set_entry_size = exfat_set_entry_size,
- .get_entry_time = exfat_get_entry_time,
- .set_entry_time = exfat_set_entry_time,
-};
-
s32 exfat_mount(struct super_block *sb, struct pbr_sector_t *p_pbr)
{
struct bpbex_t *p_bpb = (struct bpbex_t *)p_pbr->bpb;
@@ -2173,8 +2140,6 @@ s32 exfat_mount(struct super_block *sb, struct pbr_sector_t *p_pbr)
p_fs->clu_srch_ptr = 2;
p_fs->used_clusters = UINT_MAX;
- p_fs->fs_func = &exfat_fs_func;
-
return 0;
}
@@ -2187,7 +2152,6 @@ s32 create_dir(struct inode *inode, struct chain_t *p_dir,
struct dos_name_t dos_name;
struct super_block *sb = inode->i_sb;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct fs_func *fs_func = p_fs->fs_func;
ret = get_num_entries_and_dos_name(sb, p_dir, p_uniname, &num_entries,
&dos_name);
@@ -2204,7 +2168,7 @@ s32 create_dir(struct inode *inode, struct chain_t *p_dir,
clu.flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
/* (1) allocate a cluster */
- ret = fs_func->alloc_cluster(sb, 1, &clu);
+ ret = exfat_alloc_cluster(sb, 1, &clu);
if (ret < 0)
return ret;
else if (ret == 0)
@@ -2218,13 +2182,13 @@ s32 create_dir(struct inode *inode, struct chain_t *p_dir,
/* (2) update the directory entry */
/* make sub-dir entry in parent directory */
- ret = fs_func->init_dir_entry(sb, p_dir, dentry, TYPE_DIR, clu.dir,
- size);
+ ret = exfat_init_dir_entry(sb, p_dir, dentry, TYPE_DIR, clu.dir,
+ size);
if (ret != 0)
return ret;
- ret = fs_func->init_ext_entry(sb, p_dir, dentry, num_entries, p_uniname,
- &dos_name);
+ ret = exfat_init_ext_entry(sb, p_dir, dentry, num_entries, p_uniname,
+ &dos_name);
if (ret != 0)
return ret;
@@ -2252,7 +2216,6 @@ s32 create_file(struct inode *inode, struct chain_t *p_dir,
struct dos_name_t dos_name;
struct super_block *sb = inode->i_sb;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct fs_func *fs_func = p_fs->fs_func;
ret = get_num_entries_and_dos_name(sb, p_dir, p_uniname, &num_entries,
&dos_name);
@@ -2268,13 +2231,13 @@ s32 create_file(struct inode *inode, struct chain_t *p_dir,
/* fill the dos name directory entry information of the created file.
* the first cluster is not determined yet. (0)
*/
- ret = fs_func->init_dir_entry(sb, p_dir, dentry, TYPE_FILE | mode,
- CLUSTER_32(0), 0);
+ ret = exfat_init_dir_entry(sb, p_dir, dentry, TYPE_FILE | mode,
+ CLUSTER_32(0), 0);
if (ret != 0)
return ret;
- ret = fs_func->init_ext_entry(sb, p_dir, dentry, num_entries, p_uniname,
- &dos_name);
+ ret = exfat_init_ext_entry(sb, p_dir, dentry, num_entries, p_uniname,
+ &dos_name);
if (ret != 0)
return ret;
@@ -2301,8 +2264,6 @@ void remove_file(struct inode *inode, struct chain_t *p_dir, s32 entry)
sector_t sector;
struct dentry_t *ep;
struct super_block *sb = inode->i_sb;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct fs_func *fs_func = p_fs->fs_func;
ep = get_entry_in_dir(sb, p_dir, entry, &sector);
if (!ep)
@@ -2311,7 +2272,7 @@ void remove_file(struct inode *inode, struct chain_t *p_dir, s32 entry)
exfat_buf_lock(sb, sector);
/* exfat_buf_lock() before call count_ext_entries() */
- num_entries = fs_func->count_ext_entries(sb, p_dir, entry, ep);
+ num_entries = exfat_count_ext_entries(sb, p_dir, entry, ep);
if (num_entries < 0) {
exfat_buf_unlock(sb, sector);
return;
@@ -2321,7 +2282,7 @@ void remove_file(struct inode *inode, struct chain_t *p_dir, s32 entry)
exfat_buf_unlock(sb, sector);
/* (1) update the directory entry */
- fs_func->delete_dir_entry(sb, p_dir, entry, 0, num_entries);
+ exfat_delete_dir_entry(sb, p_dir, entry, 0, num_entries);
}
s32 exfat_rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry,
@@ -2332,8 +2293,6 @@ s32 exfat_rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry,
struct dos_name_t dos_name;
struct dentry_t *epold, *epnew;
struct super_block *sb = inode->i_sb;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct fs_func *fs_func = p_fs->fs_func;
epold = get_entry_in_dir(sb, p_dir, oldentry, &sector_old);
if (!epold)
@@ -2342,8 +2301,8 @@ s32 exfat_rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry,
exfat_buf_lock(sb, sector_old);
/* exfat_buf_lock() before call count_ext_entries() */
- num_old_entries = fs_func->count_ext_entries(sb, p_dir, oldentry,
- epold);
+ num_old_entries = exfat_count_ext_entries(sb, p_dir, oldentry,
+ epold);
if (num_old_entries < 0) {
exfat_buf_unlock(sb, sector_old);
return -ENOENT;
@@ -2371,10 +2330,10 @@ s32 exfat_rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry,
}
memcpy((void *)epnew, (void *)epold, DENTRY_SIZE);
- if (fs_func->get_entry_type(epnew) == TYPE_FILE) {
- fs_func->set_entry_attr(epnew,
- fs_func->get_entry_attr(epnew) |
- ATTR_ARCHIVE);
+ if (exfat_get_entry_type(epnew) == TYPE_FILE) {
+ exfat_set_entry_attr(epnew,
+ exfat_get_entry_attr(epnew) |
+ ATTR_ARCHIVE);
fid->attr |= ATTR_ARCHIVE;
}
exfat_buf_modify(sb, sector_new);
@@ -2395,33 +2354,33 @@ s32 exfat_rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry,
exfat_buf_modify(sb, sector_new);
exfat_buf_unlock(sb, sector_old);
- ret = fs_func->init_ext_entry(sb, p_dir, newentry,
- num_new_entries, p_uniname,
- &dos_name);
+ ret = exfat_init_ext_entry(sb, p_dir, newentry,
+ num_new_entries, p_uniname,
+ &dos_name);
if (ret != 0)
return ret;
- fs_func->delete_dir_entry(sb, p_dir, oldentry, 0,
- num_old_entries);
+ exfat_delete_dir_entry(sb, p_dir, oldentry, 0,
+ num_old_entries);
fid->entry = newentry;
} else {
- if (fs_func->get_entry_type(epold) == TYPE_FILE) {
- fs_func->set_entry_attr(epold,
- fs_func->get_entry_attr(epold) |
- ATTR_ARCHIVE);
+ if (exfat_get_entry_type(epold) == TYPE_FILE) {
+ exfat_set_entry_attr(epold,
+ exfat_get_entry_attr(epold) |
+ ATTR_ARCHIVE);
fid->attr |= ATTR_ARCHIVE;
}
exfat_buf_modify(sb, sector_old);
exfat_buf_unlock(sb, sector_old);
- ret = fs_func->init_ext_entry(sb, p_dir, oldentry,
- num_new_entries, p_uniname,
- &dos_name);
+ ret = exfat_init_ext_entry(sb, p_dir, oldentry,
+ num_new_entries, p_uniname,
+ &dos_name);
if (ret != 0)
return ret;
- fs_func->delete_dir_entry(sb, p_dir, oldentry, num_new_entries,
- num_old_entries);
+ exfat_delete_dir_entry(sb, p_dir, oldentry, num_new_entries,
+ num_old_entries);
}
return 0;
@@ -2436,23 +2395,21 @@ s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry,
struct dos_name_t dos_name;
struct dentry_t *epmov, *epnew;
struct super_block *sb = inode->i_sb;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct fs_func *fs_func = p_fs->fs_func;
epmov = get_entry_in_dir(sb, p_olddir, oldentry, &sector_mov);
if (!epmov)
return -ENOENT;
/* check if the source and target directory is the same */
- if (fs_func->get_entry_type(epmov) == TYPE_DIR &&
- fs_func->get_entry_clu0(epmov) == p_newdir->dir)
+ if (exfat_get_entry_type(epmov) == TYPE_DIR &&
+ exfat_get_entry_clu0(epmov) == p_newdir->dir)
return -EINVAL;
exfat_buf_lock(sb, sector_mov);
/* exfat_buf_lock() before call count_ext_entries() */
- num_old_entries = fs_func->count_ext_entries(sb, p_olddir, oldentry,
- epmov);
+ num_old_entries = exfat_count_ext_entries(sb, p_olddir, oldentry,
+ epmov);
if (num_old_entries < 0) {
exfat_buf_unlock(sb, sector_mov);
return -ENOENT;
@@ -2479,9 +2436,9 @@ s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry,
}
memcpy((void *)epnew, (void *)epmov, DENTRY_SIZE);
- if (fs_func->get_entry_type(epnew) == TYPE_FILE) {
- fs_func->set_entry_attr(epnew, fs_func->get_entry_attr(epnew) |
- ATTR_ARCHIVE);
+ if (exfat_get_entry_type(epnew) == TYPE_FILE) {
+ exfat_set_entry_attr(epnew, exfat_get_entry_attr(epnew) |
+ ATTR_ARCHIVE);
fid->attr |= ATTR_ARCHIVE;
}
exfat_buf_modify(sb, sector_new);
@@ -2501,12 +2458,12 @@ s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry,
exfat_buf_modify(sb, sector_new);
exfat_buf_unlock(sb, sector_mov);
- ret = fs_func->init_ext_entry(sb, p_newdir, newentry, num_new_entries,
- p_uniname, &dos_name);
+ ret = exfat_init_ext_entry(sb, p_newdir, newentry, num_new_entries,
+ p_uniname, &dos_name);
if (ret != 0)
return ret;
- fs_func->delete_dir_entry(sb, p_olddir, oldentry, 0, num_old_entries);
+ exfat_delete_dir_entry(sb, p_olddir, oldentry, 0, num_old_entries);
fid->dir.dir = p_newdir->dir;
fid->dir.size = p_newdir->size;
diff --git a/drivers/staging/exfat/exfat_super.c b/drivers/staging/exfat/exfat_super.c
index 9f91853b189b..b81d2a87b82e 100644
--- a/drivers/staging/exfat/exfat_super.c
+++ b/drivers/staging/exfat/exfat_super.c
@@ -38,8 +38,8 @@
static struct kmem_cache *exfat_inode_cachep;
-static int exfat_default_codepage = CONFIG_EXFAT_DEFAULT_CODEPAGE;
-static char exfat_default_iocharset[] = CONFIG_EXFAT_DEFAULT_IOCHARSET;
+static int exfat_default_codepage = CONFIG_STAGING_EXFAT_DEFAULT_CODEPAGE;
+static char exfat_default_iocharset[] = CONFIG_STAGING_EXFAT_DEFAULT_IOCHARSET;
#define INC_IVERSION(x) (inode_inc_iversion(x))
#define GET_IVERSION(x) (inode_peek_iversion_raw(x))
@@ -365,7 +365,7 @@ static int ffsMountVol(struct super_block *sb)
if (p_bd->sector_size < sb->s_blocksize) {
printk(KERN_INFO "EXFAT: mount failed - sector size %d less than blocksize %ld\n",
- p_bd->sector_size, sb->s_blocksize);
+ p_bd->sector_size, sb->s_blocksize);
ret = -EINVAL;
goto out;
}
@@ -492,7 +492,7 @@ static int ffsGetVolInfo(struct super_block *sb, struct vol_info_t *info)
mutex_lock(&p_fs->v_mutex);
if (p_fs->used_clusters == UINT_MAX)
- p_fs->used_clusters = p_fs->fs_func->count_used_clusters(sb);
+ p_fs->used_clusters = exfat_count_used_clusters(sb);
info->FatType = p_fs->vol_type;
info->ClusterSize = p_fs->cluster_size;
@@ -565,8 +565,8 @@ static int ffsLookupFile(struct inode *inode, char *path, struct file_id_t *fid)
goto out;
/* search the file name for directories */
- dentry = p_fs->fs_func->find_dir_entry(sb, &dir, &uni_name, num_entries,
- &dos_name, TYPE_ALL);
+ dentry = exfat_find_dir_entry(sb, &dir, &uni_name, num_entries,
+ &dos_name, TYPE_ALL);
if (dentry < -1) {
ret = -ENOENT;
goto out;
@@ -595,18 +595,18 @@ static int ffsLookupFile(struct inode *inode, char *path, struct file_id_t *fid)
}
ep2 = ep + 1;
- fid->type = p_fs->fs_func->get_entry_type(ep);
+ fid->type = exfat_get_entry_type(ep);
fid->rwoffset = 0;
fid->hint_last_off = -1;
- fid->attr = p_fs->fs_func->get_entry_attr(ep);
+ fid->attr = exfat_get_entry_attr(ep);
- fid->size = p_fs->fs_func->get_entry_size(ep2);
+ fid->size = exfat_get_entry_size(ep2);
if ((fid->type == TYPE_FILE) && (fid->size == 0)) {
fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
fid->start_clu = CLUSTER_32(~0);
} else {
- fid->flags = p_fs->fs_func->get_entry_flag(ep2);
- fid->start_clu = p_fs->fs_func->get_entry_clu0(ep2);
+ fid->flags = exfat_get_entry_flag(ep2);
+ fid->start_clu = exfat_get_entry_clu0(ep2);
}
release_entry_set(es);
@@ -647,7 +647,7 @@ static int ffsCreateFile(struct inode *inode, char *path, u8 mode,
/* create a new file */
ret = create_file(inode, &dir, &uni_name, mode, fid);
-#ifndef CONFIG_EXFAT_DELAYED_SYNC
+#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC
fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
@@ -886,9 +886,9 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
new_clu.flags = fid->flags;
/* (1) allocate a chain of clusters */
- num_alloced = p_fs->fs_func->alloc_cluster(sb,
- num_alloc,
- &new_clu);
+ num_alloced = exfat_alloc_cluster(sb,
+ num_alloc,
+ &new_clu);
if (num_alloced == 0)
break;
if (num_alloced < 0) {
@@ -991,24 +991,24 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
goto err_out;
ep2 = ep + 1;
- p_fs->fs_func->set_entry_time(ep, tm_current(&tm), TM_MODIFY);
- p_fs->fs_func->set_entry_attr(ep, fid->attr);
+ exfat_set_entry_time(ep, tm_current(&tm), TM_MODIFY);
+ exfat_set_entry_attr(ep, fid->attr);
if (modified) {
- if (p_fs->fs_func->get_entry_flag(ep2) != fid->flags)
- p_fs->fs_func->set_entry_flag(ep2, fid->flags);
+ if (exfat_get_entry_flag(ep2) != fid->flags)
+ exfat_set_entry_flag(ep2, fid->flags);
- if (p_fs->fs_func->get_entry_size(ep2) != fid->size)
- p_fs->fs_func->set_entry_size(ep2, fid->size);
+ if (exfat_get_entry_size(ep2) != fid->size)
+ exfat_set_entry_size(ep2, fid->size);
- if (p_fs->fs_func->get_entry_clu0(ep2) != fid->start_clu)
- p_fs->fs_func->set_entry_clu0(ep2, fid->start_clu);
+ if (exfat_get_entry_clu0(ep2) != fid->start_clu)
+ exfat_set_entry_clu0(ep2, fid->start_clu);
}
update_dir_checksum_with_entry_set(sb, es);
release_entry_set(es);
-#ifndef CONFIG_EXFAT_DELAYED_SYNC
+#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC
fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
@@ -1108,13 +1108,13 @@ static int ffsTruncateFile(struct inode *inode, u64 old_size, u64 new_size)
}
ep2 = ep + 1;
- p_fs->fs_func->set_entry_time(ep, tm_current(&tm), TM_MODIFY);
- p_fs->fs_func->set_entry_attr(ep, fid->attr);
+ exfat_set_entry_time(ep, tm_current(&tm), TM_MODIFY);
+ exfat_set_entry_attr(ep, fid->attr);
- p_fs->fs_func->set_entry_size(ep2, new_size);
+ exfat_set_entry_size(ep2, new_size);
if (new_size == 0) {
- p_fs->fs_func->set_entry_flag(ep2, 0x01);
- p_fs->fs_func->set_entry_clu0(ep2, CLUSTER_32(0));
+ exfat_set_entry_flag(ep2, 0x01);
+ exfat_set_entry_clu0(ep2, CLUSTER_32(0));
}
update_dir_checksum_with_entry_set(sb, es);
@@ -1127,14 +1127,14 @@ static int ffsTruncateFile(struct inode *inode, u64 old_size, u64 new_size)
}
/* (3) free the clusters */
- p_fs->fs_func->free_cluster(sb, &clu, 0);
+ exfat_free_cluster(sb, &clu, 0);
/* hint information */
fid->hint_last_off = -1;
if (fid->rwoffset > fid->size)
fid->rwoffset = fid->size;
-#ifndef CONFIG_EXFAT_DELAYED_SYNC
+#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC
fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
@@ -1217,7 +1217,7 @@ static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid,
goto out2;
}
- if (p_fs->fs_func->get_entry_attr(ep) & ATTR_READONLY) {
+ if (exfat_get_entry_attr(ep) & ATTR_READONLY) {
ret = -EPERM;
goto out2;
}
@@ -1237,7 +1237,7 @@ static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid,
if (!ep)
goto out;
- entry_type = p_fs->fs_func->get_entry_type(ep);
+ entry_type = exfat_get_entry_type(ep);
if (entry_type == TYPE_DIR) {
struct chain_t new_clu;
@@ -1274,15 +1274,15 @@ static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid,
if (!ep)
goto out;
- num_entries = p_fs->fs_func->count_ext_entries(sb, p_dir,
- new_entry, ep);
+ num_entries = exfat_count_ext_entries(sb, p_dir,
+ new_entry, ep);
if (num_entries < 0)
goto out;
- p_fs->fs_func->delete_dir_entry(sb, p_dir, new_entry, 0,
- num_entries + 1);
+ exfat_delete_dir_entry(sb, p_dir, new_entry, 0,
+ num_entries + 1);
}
out:
-#ifndef CONFIG_EXFAT_DELAYED_SYNC
+#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC
fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
@@ -1324,7 +1324,7 @@ static int ffsRemoveFile(struct inode *inode, struct file_id_t *fid)
goto out;
}
- if (p_fs->fs_func->get_entry_attr(ep) & ATTR_READONLY) {
+ if (exfat_get_entry_attr(ep) & ATTR_READONLY) {
ret = -EPERM;
goto out;
}
@@ -1338,13 +1338,13 @@ static int ffsRemoveFile(struct inode *inode, struct file_id_t *fid)
clu_to_free.flags = fid->flags;
/* (2) free the clusters */
- p_fs->fs_func->free_cluster(sb, &clu_to_free, 0);
+ exfat_free_cluster(sb, &clu_to_free, 0);
fid->size = 0;
fid->start_clu = CLUSTER_32(~0);
fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
-#ifndef CONFIG_EXFAT_DELAYED_SYNC
+#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC
fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
@@ -1398,7 +1398,7 @@ static int ffsSetAttr(struct inode *inode, u32 attr)
goto out;
}
- type = p_fs->fs_func->get_entry_type(ep);
+ type = exfat_get_entry_type(ep);
if (((type == TYPE_FILE) && (attr & ATTR_SUBDIR)) ||
((type == TYPE_DIR) && (!(attr & ATTR_SUBDIR)))) {
@@ -1415,12 +1415,12 @@ static int ffsSetAttr(struct inode *inode, u32 attr)
/* set the file attribute */
fid->attr = attr;
- p_fs->fs_func->set_entry_attr(ep, attr);
+ exfat_set_entry_attr(ep, attr);
update_dir_checksum_with_entry_set(sb, es);
release_entry_set(es);
-#ifndef CONFIG_EXFAT_DELAYED_SYNC
+#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC
fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
@@ -1481,7 +1481,7 @@ static int ffsReadStat(struct inode *inode, struct dir_entry_t *info)
count = count_dos_name_entries(sb, &dir, TYPE_DIR);
if (count < 0) {
- ret = count; /* propogate error upward */
+ ret = count; /* propagate error upward */
goto out;
}
info->NumSubdirs = count;
@@ -1502,9 +1502,9 @@ static int ffsReadStat(struct inode *inode, struct dir_entry_t *info)
ep2 = ep + 1;
/* set FILE_INFO structure using the acquired struct dentry_t */
- info->Attr = p_fs->fs_func->get_entry_attr(ep);
+ info->Attr = exfat_get_entry_attr(ep);
- p_fs->fs_func->get_entry_time(ep, &tm, TM_CREATE);
+ exfat_get_entry_time(ep, &tm, TM_CREATE);
info->CreateTimestamp.Year = tm.year;
info->CreateTimestamp.Month = tm.mon;
info->CreateTimestamp.Day = tm.day;
@@ -1513,7 +1513,7 @@ static int ffsReadStat(struct inode *inode, struct dir_entry_t *info)
info->CreateTimestamp.Second = tm.sec;
info->CreateTimestamp.MilliSecond = 0;
- p_fs->fs_func->get_entry_time(ep, &tm, TM_MODIFY);
+ exfat_get_entry_time(ep, &tm, TM_MODIFY);
info->ModifyTimestamp.Year = tm.year;
info->ModifyTimestamp.Month = tm.mon;
info->ModifyTimestamp.Day = tm.day;
@@ -1528,13 +1528,13 @@ static int ffsReadStat(struct inode *inode, struct dir_entry_t *info)
/* XXX this is very bad for exfat cuz name is already included in es.
* API should be revised
*/
- p_fs->fs_func->get_uni_name_from_ext_entry(sb, &fid->dir, fid->entry,
- uni_name.name);
+ exfat_get_uni_name_from_ext_entry(sb, &fid->dir, fid->entry,
+ uni_name.name);
nls_uniname_to_cstring(sb, info->Name, &uni_name);
info->NumSubdirs = 2;
- info->Size = p_fs->fs_func->get_entry_size(ep2);
+ info->Size = exfat_get_entry_size(ep2);
release_entry_set(es);
@@ -1548,7 +1548,7 @@ static int ffsReadStat(struct inode *inode, struct dir_entry_t *info)
count = count_dos_name_entries(sb, &dir, TYPE_DIR);
if (count < 0) {
- ret = count; /* propogate error upward */
+ ret = count; /* propagate error upward */
goto out;
}
info->NumSubdirs += count;
@@ -1602,7 +1602,7 @@ static int ffsWriteStat(struct inode *inode, struct dir_entry_t *info)
}
ep2 = ep + 1;
- p_fs->fs_func->set_entry_attr(ep, info->Attr);
+ exfat_set_entry_attr(ep, info->Attr);
/* set FILE_INFO structure using the acquired struct dentry_t */
tm.sec = info->CreateTimestamp.Second;
@@ -1611,7 +1611,7 @@ static int ffsWriteStat(struct inode *inode, struct dir_entry_t *info)
tm.day = info->CreateTimestamp.Day;
tm.mon = info->CreateTimestamp.Month;
tm.year = info->CreateTimestamp.Year;
- p_fs->fs_func->set_entry_time(ep, &tm, TM_CREATE);
+ exfat_set_entry_time(ep, &tm, TM_CREATE);
tm.sec = info->ModifyTimestamp.Second;
tm.min = info->ModifyTimestamp.Minute;
@@ -1619,9 +1619,9 @@ static int ffsWriteStat(struct inode *inode, struct dir_entry_t *info)
tm.day = info->ModifyTimestamp.Day;
tm.mon = info->ModifyTimestamp.Month;
tm.year = info->ModifyTimestamp.Year;
- p_fs->fs_func->set_entry_time(ep, &tm, TM_MODIFY);
+ exfat_set_entry_time(ep, &tm, TM_MODIFY);
- p_fs->fs_func->set_entry_size(ep2, info->Size);
+ exfat_set_entry_size(ep2, info->Size);
update_dir_checksum_with_entry_set(sb, es);
release_entry_set(es);
@@ -1704,7 +1704,7 @@ static int ffsMapCluster(struct inode *inode, s32 clu_offset, u32 *clu)
new_clu.flags = fid->flags;
/* (1) allocate a cluster */
- num_alloced = p_fs->fs_func->alloc_cluster(sb, 1, &new_clu);
+ num_alloced = exfat_alloc_cluster(sb, 1, &new_clu);
if (num_alloced < 0) {
ret = -EIO;
goto out;
@@ -1744,13 +1744,11 @@ static int ffsMapCluster(struct inode *inode, s32 clu_offset, u32 *clu)
/* (3) update directory entry */
if (modified) {
- if (p_fs->fs_func->get_entry_flag(ep) != fid->flags)
- p_fs->fs_func->set_entry_flag(ep, fid->flags);
-
- if (p_fs->fs_func->get_entry_clu0(ep) != fid->start_clu)
- p_fs->fs_func->set_entry_clu0(ep,
- fid->start_clu);
+ if (exfat_get_entry_flag(ep) != fid->flags)
+ exfat_set_entry_flag(ep, fid->flags);
+ if (exfat_get_entry_clu0(ep) != fid->start_clu)
+ exfat_set_entry_clu0(ep, fid->start_clu);
}
update_dir_checksum_with_entry_set(sb, es);
@@ -1804,7 +1802,7 @@ static int ffsCreateDir(struct inode *inode, char *path, struct file_id_t *fid)
ret = create_dir(inode, &dir, &uni_name, fid);
-#ifndef CONFIG_EXFAT_DELAYED_SYNC
+#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC
fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
@@ -1831,7 +1829,6 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
struct dentry_t *ep;
struct super_block *sb = inode->i_sb;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct fs_func *fs_func = p_fs->fs_func;
struct file_id_t *fid = &(EXFAT_I(inode)->fid);
/* check the validity of pointer parameters */
@@ -1913,7 +1910,7 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
ret = -ENOENT;
goto out;
}
- type = fs_func->get_entry_type(ep);
+ type = exfat_get_entry_type(ep);
if (type == TYPE_UNUSED)
break;
@@ -1922,9 +1919,9 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
continue;
exfat_buf_lock(sb, sector);
- dir_entry->Attr = fs_func->get_entry_attr(ep);
+ dir_entry->Attr = exfat_get_entry_attr(ep);
- fs_func->get_entry_time(ep, &tm, TM_CREATE);
+ exfat_get_entry_time(ep, &tm, TM_CREATE);
dir_entry->CreateTimestamp.Year = tm.year;
dir_entry->CreateTimestamp.Month = tm.mon;
dir_entry->CreateTimestamp.Day = tm.day;
@@ -1933,7 +1930,7 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
dir_entry->CreateTimestamp.Second = tm.sec;
dir_entry->CreateTimestamp.MilliSecond = 0;
- fs_func->get_entry_time(ep, &tm, TM_MODIFY);
+ exfat_get_entry_time(ep, &tm, TM_MODIFY);
dir_entry->ModifyTimestamp.Year = tm.year;
dir_entry->ModifyTimestamp.Month = tm.mon;
dir_entry->ModifyTimestamp.Day = tm.day;
@@ -1946,8 +1943,8 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
sizeof(struct date_time_t));
*uni_name.name = 0x0;
- fs_func->get_uni_name_from_ext_entry(sb, &dir, dentry,
- uni_name.name);
+ exfat_get_uni_name_from_ext_entry(sb, &dir, dentry,
+ uni_name.name);
nls_uniname_to_cstring(sb, dir_entry->Name, &uni_name);
exfat_buf_unlock(sb, sector);
@@ -1957,7 +1954,7 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
goto out;
}
- dir_entry->Size = fs_func->get_entry_size(ep);
+ dir_entry->Size = exfat_get_entry_size(ep);
/* hint information */
if (dir.dir == CLUSTER_32(0)) { /* FAT16 root_dir */
@@ -2047,13 +2044,13 @@ static int ffsRemoveDir(struct inode *inode, struct file_id_t *fid)
remove_file(inode, &dir, dentry);
/* (2) free the clusters */
- p_fs->fs_func->free_cluster(sb, &clu_to_free, 1);
+ exfat_free_cluster(sb, &clu_to_free, 1);
fid->size = 0;
fid->start_clu = CLUSTER_32(~0);
fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
-#ifndef CONFIG_EXFAT_DELAYED_SYNC
+#ifndef CONFIG_STAGING_EXFAT_DELAYED_SYNC
fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
@@ -2176,14 +2173,14 @@ static long exfat_generic_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct inode *inode = filp->f_path.dentry->d_inode;
-#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
unsigned int flags;
-#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
switch (cmd) {
case EXFAT_IOCTL_GET_VOLUME_ID:
return exfat_ioctl_volume_id(inode);
-#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
case EXFAT_IOC_GET_DEBUGFLAGS: {
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
@@ -2207,7 +2204,7 @@ static long exfat_generic_ioctl(struct file *filp, unsigned int cmd,
return 0;
}
-#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
default:
return -ENOTTY; /* Inappropriate ioctl for device */
}
@@ -3400,7 +3397,7 @@ static int exfat_show_options(struct seq_file *m, struct dentry *root)
seq_puts(m, ",errors=panic");
else
seq_puts(m, ",errors=remount-ro");
-#ifdef CONFIG_EXFAT_DISCARD
+#ifdef CONFIG_STAGING_EXFAT_DISCARD
if (opts->discard)
seq_puts(m, ",discard");
#endif
@@ -3481,7 +3478,7 @@ enum {
Opt_err_ro,
Opt_utf8_hack,
Opt_err,
-#ifdef CONFIG_EXFAT_DISCARD
+#ifdef CONFIG_STAGING_EXFAT_DISCARD
Opt_discard,
#endif /* EXFAT_CONFIG_DISCARD */
};
@@ -3501,9 +3498,9 @@ static const match_table_t exfat_tokens = {
{Opt_err_panic, "errors=panic"},
{Opt_err_ro, "errors=remount-ro"},
{Opt_utf8_hack, "utf8"},
-#ifdef CONFIG_EXFAT_DISCARD
+#ifdef CONFIG_STAGING_EXFAT_DISCARD
{Opt_discard, "discard"},
-#endif /* CONFIG_EXFAT_DISCARD */
+#endif /* CONFIG_STAGING_EXFAT_DISCARD */
{Opt_err, NULL}
};
@@ -3524,7 +3521,7 @@ static int parse_options(char *options, int silent, int *debug,
opts->iocharset = exfat_default_iocharset;
opts->casesensitive = 0;
opts->errors = EXFAT_ERRORS_RO;
-#ifdef CONFIG_EXFAT_DISCARD
+#ifdef CONFIG_STAGING_EXFAT_DISCARD
opts->discard = 0;
#endif
*debug = 0;
@@ -3595,11 +3592,11 @@ static int parse_options(char *options, int silent, int *debug,
case Opt_debug:
*debug = 1;
break;
-#ifdef CONFIG_EXFAT_DISCARD
+#ifdef CONFIG_STAGING_EXFAT_DISCARD
case Opt_discard:
opts->discard = 1;
break;
-#endif /* CONFIG_EXFAT_DISCARD */
+#endif /* CONFIG_STAGING_EXFAT_DISCARD */
case Opt_utf8_hack:
break;
default:
@@ -3803,7 +3800,7 @@ static void __exit exfat_destroy_inodecache(void)
kmem_cache_destroy(exfat_inode_cachep);
}
-#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
static void exfat_debug_kill_sb(struct super_block *sb)
{
struct exfat_sb_info *sbi = EXFAT_SB(sb);
@@ -3831,17 +3828,17 @@ static void exfat_debug_kill_sb(struct super_block *sb)
kill_block_super(sb);
}
-#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
static struct file_system_type exfat_fs_type = {
.owner = THIS_MODULE,
.name = "exfat",
.mount = exfat_fs_mount,
-#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+#ifdef CONFIG_STAGING_EXFAT_KERNEL_DEBUG
.kill_sb = exfat_debug_kill_sb,
#else
.kill_sb = kill_block_super,
-#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+#endif /* CONFIG_STAGING_EXFAT_KERNEL_DEBUG */
.fs_flags = FS_REQUIRES_DEV,
};
diff --git a/drivers/staging/gasket/gasket_core.c b/drivers/staging/gasket/gasket_core.c
index cd8be80d2076..be6b50f454b4 100644
--- a/drivers/staging/gasket/gasket_core.c
+++ b/drivers/staging/gasket/gasket_core.c
@@ -303,7 +303,7 @@ static int gasket_map_pci_bar(struct gasket_dev *gasket_dev, int bar_num)
}
gasket_dev->bar_data[bar_num].virt_base =
- ioremap_nocache(gasket_dev->bar_data[bar_num].phys_base,
+ ioremap(gasket_dev->bar_data[bar_num].phys_base,
gasket_dev->bar_data[bar_num].length_bytes);
if (!gasket_dev->bar_data[bar_num].virt_base) {
dev_err(gasket_dev->dev,
diff --git a/drivers/staging/hp/hp100.c b/drivers/staging/hp/hp100.c
index 6ec78f5c602f..e2f0b58e5dfd 100644
--- a/drivers/staging/hp/hp100.c
+++ b/drivers/staging/hp/hp100.c
@@ -339,14 +339,11 @@ static __init int hp100_isa_probe1(struct net_device *dev, int ioaddr)
if (sig == NULL)
goto err;
- for (i = 0; i < ARRAY_SIZE(hp100_isa_tbl); i++) {
- if (!strcmp(hp100_isa_tbl[i], sig))
- break;
-
- }
+ i = match_string(hp100_isa_tbl, ARRAY_SIZE(hp100_isa_tbl), sig);
+ if (i < 0)
+ goto err;
- if (i < ARRAY_SIZE(hp100_isa_tbl))
- return hp100_probe1(dev, ioaddr, HP100_BUS_ISA, NULL);
+ return hp100_probe1(dev, ioaddr, HP100_BUS_ISA, NULL);
err:
return -ENODEV;
diff --git a/drivers/staging/iio/accel/adis16203.c b/drivers/staging/iio/accel/adis16203.c
index 39687139a7d3..39dfe3f7f254 100644
--- a/drivers/staging/iio/accel/adis16203.c
+++ b/drivers/staging/iio/accel/adis16203.c
@@ -237,6 +237,12 @@ static const char * const adis16203_status_error_msgs[] = {
[ADIS16203_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 2.975V",
};
+static const struct adis_timeout adis16203_timeouts = {
+ .reset_ms = ADIS16203_STARTUP_DELAY,
+ .sw_reset_ms = ADIS16203_STARTUP_DELAY,
+ .self_test_ms = ADIS16203_STARTUP_DELAY
+};
+
static const struct adis_data adis16203_data = {
.read_delay = 20,
.msc_ctrl_reg = ADIS16203_MSC_CTRL,
@@ -245,7 +251,7 @@ static const struct adis_data adis16203_data = {
.self_test_mask = ADIS16203_MSC_CTRL_SELF_TEST_EN,
.self_test_no_autoclear = true,
- .startup_delay = ADIS16203_STARTUP_DELAY,
+ .timeouts = &adis16203_timeouts,
.status_error_msgs = adis16203_status_error_msgs,
.status_error_mask = BIT(ADIS16203_DIAG_STAT_SELFTEST_FAIL_BIT) |
diff --git a/drivers/staging/iio/accel/adis16240.c b/drivers/staging/iio/accel/adis16240.c
index a480409090c0..39eb8364aa95 100644
--- a/drivers/staging/iio/accel/adis16240.c
+++ b/drivers/staging/iio/accel/adis16240.c
@@ -359,6 +359,12 @@ static const char * const adis16240_status_error_msgs[] = {
[ADIS16240_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 2.225V",
};
+static const struct adis_timeout adis16240_timeouts = {
+ .reset_ms = ADIS16240_STARTUP_DELAY,
+ .sw_reset_ms = ADIS16240_STARTUP_DELAY,
+ .self_test_ms = ADIS16240_STARTUP_DELAY,
+};
+
static const struct adis_data adis16240_data = {
.write_delay = 35,
.read_delay = 35,
@@ -368,7 +374,7 @@ static const struct adis_data adis16240_data = {
.self_test_mask = ADIS16240_MSC_CTRL_SELF_TEST_EN,
.self_test_no_autoclear = true,
- .startup_delay = ADIS16240_STARTUP_DELAY,
+ .timeouts = &adis16240_timeouts,
.status_error_msgs = adis16240_status_error_msgs,
.status_error_mask = BIT(ADIS16240_DIAG_STAT_PWRON_FAIL_BIT) |
@@ -399,6 +405,13 @@ static int adis16240_probe(struct spi_device *spi)
indio_dev->num_channels = ARRAY_SIZE(adis16240_channels);
indio_dev->modes = INDIO_DIRECT_MODE;
+ spi->mode = SPI_MODE_3;
+ ret = spi_setup(spi);
+ if (ret) {
+ dev_err(&spi->dev, "spi_setup failed!\n");
+ return ret;
+ }
+
ret = adis_init(st, indio_dev, spi, &adis16240_data);
if (ret)
return ret;
diff --git a/drivers/staging/isdn/Kconfig b/drivers/staging/isdn/Kconfig
deleted file mode 100644
index faaf63887094..000000000000
--- a/drivers/staging/isdn/Kconfig
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-menu "ISDN CAPI drivers"
- depends on ISDN_CAPI
-
-source "drivers/staging/isdn/avm/Kconfig"
-
-source "drivers/staging/isdn/gigaset/Kconfig"
-
-source "drivers/staging/isdn/hysdn/Kconfig"
-
-endmenu
-
diff --git a/drivers/staging/isdn/Makefile b/drivers/staging/isdn/Makefile
deleted file mode 100644
index 025504bae5df..000000000000
--- a/drivers/staging/isdn/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# Makefile for the kernel ISDN subsystem and device drivers.
-
-# Object files in subdirectories
-
-obj-$(CONFIG_CAPI_AVM) += avm/
-obj-$(CONFIG_HYSDN) += hysdn/
-obj-$(CONFIG_ISDN_DRV_GIGASET) += gigaset/
diff --git a/drivers/staging/isdn/TODO b/drivers/staging/isdn/TODO
deleted file mode 100644
index 9210d11eb68b..000000000000
--- a/drivers/staging/isdn/TODO
+++ /dev/null
@@ -1,22 +0,0 @@
-TODO: Remove in late 2019 unless there are users
-
-
-I tried to find any indication of whether the capi drivers are
-still in use, and have not found anything from a long time ago.
-
-With public ISDN networks almost completely shut down over the past 12
-months, there is very little you can actually do with this hardware. The
-main remaining use case would be to connect ISDN voice phones to an
-in-house installation with Asterisk or LCR, but anyone trying this in
-turn seems to be using either the mISDN driver stack, or out-of-tree
-drivers from the hardware vendors.
-
-I may of course have missed something, so I would suggest moving
-these into drivers/staging/ just in case someone still uses one
-of the three remaining in-kernel drivers (avm, hysdn, gigaset).
-
-If nobody complains, we can remove them entirely in six months,
-or otherwise move the core code and any drivers that are still
-needed back into drivers/isdn.
-
- Arnd Bergmann <arnd@arndb.de>
diff --git a/drivers/staging/isdn/avm/Kconfig b/drivers/staging/isdn/avm/Kconfig
deleted file mode 100644
index 81483db067bb..000000000000
--- a/drivers/staging/isdn/avm/Kconfig
+++ /dev/null
@@ -1,65 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# ISDN AVM drivers
-#
-
-menuconfig CAPI_AVM
- bool "Active AVM cards"
- help
- Enable support for AVM active ISDN cards.
-
-if CAPI_AVM
-
-config ISDN_DRV_AVMB1_B1ISA
- tristate "AVM B1 ISA support"
- depends on ISA
- help
- Enable support for the ISA version of the AVM B1 card.
-
-config ISDN_DRV_AVMB1_B1PCI
- tristate "AVM B1 PCI support"
- depends on PCI
- help
- Enable support for the PCI version of the AVM B1 card.
-
-config ISDN_DRV_AVMB1_B1PCIV4
- bool "AVM B1 PCI V4 support"
- depends on ISDN_DRV_AVMB1_B1PCI
- help
- Enable support for the V4 version of AVM B1 PCI card.
-
-config ISDN_DRV_AVMB1_T1ISA
- tristate "AVM T1/T1-B ISA support"
- depends on ISA
- help
- Enable support for the AVM T1 T1B card.
- Note: This is a PRI card and handle 30 B-channels.
-
-config ISDN_DRV_AVMB1_B1PCMCIA
- tristate "AVM B1/M1/M2 PCMCIA support"
- depends on PCMCIA
- help
- Enable support for the PCMCIA version of the AVM B1 card.
-
-config ISDN_DRV_AVMB1_AVM_CS
- tristate "AVM B1/M1/M2 PCMCIA cs module"
- depends on ISDN_DRV_AVMB1_B1PCMCIA
- help
- Enable the PCMCIA client driver for the AVM B1/M1/M2
- PCMCIA cards.
-
-config ISDN_DRV_AVMB1_T1PCI
- tristate "AVM T1/T1-B PCI support"
- depends on PCI
- help
- Enable support for the AVM T1 T1B card.
- Note: This is a PRI card and handle 30 B-channels.
-
-config ISDN_DRV_AVMB1_C4
- tristate "AVM C4/C2 support"
- depends on PCI
- help
- Enable support for the AVM C4/C2 PCI cards.
- These cards handle 4/2 BRI ISDN lines (8/4 channels).
-
-endif # CAPI_AVM
diff --git a/drivers/staging/isdn/avm/Makefile b/drivers/staging/isdn/avm/Makefile
deleted file mode 100644
index 3830a0573fcc..000000000000
--- a/drivers/staging/isdn/avm/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# Makefile for the AVM ISDN device drivers
-
-# Each configuration option enables a list of files.
-
-obj-$(CONFIG_ISDN_DRV_AVMB1_B1ISA) += b1isa.o b1.o
-obj-$(CONFIG_ISDN_DRV_AVMB1_B1PCI) += b1pci.o b1.o b1dma.o
-obj-$(CONFIG_ISDN_DRV_AVMB1_B1PCMCIA) += b1pcmcia.o b1.o
-obj-$(CONFIG_ISDN_DRV_AVMB1_AVM_CS) += avm_cs.o
-obj-$(CONFIG_ISDN_DRV_AVMB1_T1ISA) += t1isa.o b1.o
-obj-$(CONFIG_ISDN_DRV_AVMB1_T1PCI) += t1pci.o b1.o b1dma.o
-obj-$(CONFIG_ISDN_DRV_AVMB1_C4) += c4.o b1.o
diff --git a/drivers/staging/isdn/avm/avm_cs.c b/drivers/staging/isdn/avm/avm_cs.c
deleted file mode 100644
index 62b8030ee331..000000000000
--- a/drivers/staging/isdn/avm/avm_cs.c
+++ /dev/null
@@ -1,166 +0,0 @@
-/* $Id: avm_cs.c,v 1.4.6.3 2001/09/23 22:24:33 kai Exp $
- *
- * A PCMCIA client driver for AVM B1/M1/M2
- *
- * Copyright 1999 by Carsten Paeth <calle@calle.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/ptrace.h>
-#include <linux/string.h>
-#include <linux/tty.h>
-#include <linux/serial.h>
-#include <linux/major.h>
-#include <asm/io.h>
-
-#include <pcmcia/cistpl.h>
-#include <pcmcia/ciscode.h>
-#include <pcmcia/ds.h>
-#include <pcmcia/cisreg.h>
-
-#include <linux/skbuff.h>
-#include <linux/capi.h>
-#include <linux/b1lli.h>
-#include <linux/b1pcmcia.h>
-
-/*====================================================================*/
-
-MODULE_DESCRIPTION("CAPI4Linux: PCMCIA client driver for AVM B1/M1/M2");
-MODULE_AUTHOR("Carsten Paeth");
-MODULE_LICENSE("GPL");
-
-/*====================================================================*/
-
-static int avmcs_config(struct pcmcia_device *link);
-static void avmcs_release(struct pcmcia_device *link);
-static void avmcs_detach(struct pcmcia_device *p_dev);
-
-static int avmcs_probe(struct pcmcia_device *p_dev)
-{
- /* General socket configuration */
- p_dev->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
- p_dev->config_index = 1;
- p_dev->config_regs = PRESENT_OPTION;
-
- return avmcs_config(p_dev);
-} /* avmcs_attach */
-
-
-static void avmcs_detach(struct pcmcia_device *link)
-{
- avmcs_release(link);
-} /* avmcs_detach */
-
-static int avmcs_configcheck(struct pcmcia_device *p_dev, void *priv_data)
-{
- p_dev->resource[0]->end = 16;
- p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
- p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
-
- return pcmcia_request_io(p_dev);
-}
-
-static int avmcs_config(struct pcmcia_device *link)
-{
- int i = -1;
- char devname[128];
- int cardtype;
- int (*addcard)(unsigned int port, unsigned irq);
-
- devname[0] = 0;
- if (link->prod_id[1])
- strlcpy(devname, link->prod_id[1], sizeof(devname));
-
- /*
- * find IO port
- */
- if (pcmcia_loop_config(link, avmcs_configcheck, NULL))
- return -ENODEV;
-
- do {
- if (!link->irq) {
- /* undo */
- pcmcia_disable_device(link);
- break;
- }
-
- /*
- * configure the PCMCIA socket
- */
- i = pcmcia_enable_device(link);
- if (i != 0) {
- pcmcia_disable_device(link);
- break;
- }
-
- } while (0);
-
- if (devname[0]) {
- char *s = strrchr(devname, ' ');
- if (!s)
- s = devname;
- else s++;
- if (strcmp("M1", s) == 0) {
- cardtype = AVM_CARDTYPE_M1;
- } else if (strcmp("M2", s) == 0) {
- cardtype = AVM_CARDTYPE_M2;
- } else {
- cardtype = AVM_CARDTYPE_B1;
- }
- } else
- cardtype = AVM_CARDTYPE_B1;
-
- /* If any step failed, release any partially configured state */
- if (i != 0) {
- avmcs_release(link);
- return -ENODEV;
- }
-
-
- switch (cardtype) {
- case AVM_CARDTYPE_M1: addcard = b1pcmcia_addcard_m1; break;
- case AVM_CARDTYPE_M2: addcard = b1pcmcia_addcard_m2; break;
- default:
- case AVM_CARDTYPE_B1: addcard = b1pcmcia_addcard_b1; break;
- }
- if ((i = (*addcard)(link->resource[0]->start, link->irq)) < 0) {
- dev_err(&link->dev,
- "avm_cs: failed to add AVM-Controller at i/o %#x, irq %d\n",
- (unsigned int) link->resource[0]->start, link->irq);
- avmcs_release(link);
- return -ENODEV;
- }
- return 0;
-
-} /* avmcs_config */
-
-
-static void avmcs_release(struct pcmcia_device *link)
-{
- b1pcmcia_delcard(link->resource[0]->start, link->irq);
- pcmcia_disable_device(link);
-} /* avmcs_release */
-
-
-static const struct pcmcia_device_id avmcs_ids[] = {
- PCMCIA_DEVICE_PROD_ID12("AVM", "ISDN-Controller B1", 0x95d42008, 0x845dc335),
- PCMCIA_DEVICE_PROD_ID12("AVM", "Mobile ISDN-Controller M1", 0x95d42008, 0x81e10430),
- PCMCIA_DEVICE_PROD_ID12("AVM", "Mobile ISDN-Controller M2", 0x95d42008, 0x18e8558a),
- PCMCIA_DEVICE_NULL
-};
-MODULE_DEVICE_TABLE(pcmcia, avmcs_ids);
-
-static struct pcmcia_driver avmcs_driver = {
- .owner = THIS_MODULE,
- .name = "avm_cs",
- .probe = avmcs_probe,
- .remove = avmcs_detach,
- .id_table = avmcs_ids,
-};
-module_pcmcia_driver(avmcs_driver);
diff --git a/drivers/staging/isdn/avm/avmcard.h b/drivers/staging/isdn/avm/avmcard.h
deleted file mode 100644
index cdfa89c71997..000000000000
--- a/drivers/staging/isdn/avm/avmcard.h
+++ /dev/null
@@ -1,581 +0,0 @@
-/* $Id: avmcard.h,v 1.1.4.1.2.1 2001/12/21 15:00:17 kai Exp $
- *
- * Copyright 1999 by Carsten Paeth <calle@calle.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#ifndef _AVMCARD_H_
-#define _AVMCARD_H_
-
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
-
-#define AVMB1_PORTLEN 0x1f
-#define AVM_MAXVERSION 8
-#define AVM_NCCI_PER_CHANNEL 4
-
-/*
- * Versions
- */
-
-#define VER_DRIVER 0
-#define VER_CARDTYPE 1
-#define VER_HWID 2
-#define VER_SERIAL 3
-#define VER_OPTION 4
-#define VER_PROTO 5
-#define VER_PROFILE 6
-#define VER_CAPI 7
-
-enum avmcardtype {
- avm_b1isa,
- avm_b1pci,
- avm_b1pcmcia,
- avm_m1,
- avm_m2,
- avm_t1isa,
- avm_t1pci,
- avm_c4,
- avm_c2
-};
-
-typedef struct avmcard_dmabuf {
- long size;
- u8 *dmabuf;
- dma_addr_t dmaaddr;
-} avmcard_dmabuf;
-
-typedef struct avmcard_dmainfo {
- u32 recvlen;
- avmcard_dmabuf recvbuf;
-
- avmcard_dmabuf sendbuf;
- struct sk_buff_head send_queue;
-
- struct pci_dev *pcidev;
-} avmcard_dmainfo;
-
-typedef struct avmctrl_info {
- char cardname[32];
-
- int versionlen;
- char versionbuf[1024];
- char *version[AVM_MAXVERSION];
-
- char infobuf[128]; /* for function procinfo */
-
- struct avmcard *card;
- struct capi_ctr capi_ctrl;
-
- struct list_head ncci_head;
-} avmctrl_info;
-
-typedef struct avmcard {
- char name[32];
-
- spinlock_t lock;
- unsigned int port;
- unsigned irq;
- unsigned long membase;
- enum avmcardtype cardtype;
- unsigned char revision;
- unsigned char class;
- int cardnr; /* for t1isa */
-
- char msgbuf[128]; /* capimsg msg part */
- char databuf[2048]; /* capimsg data part */
-
- void __iomem *mbase;
- volatile u32 csr;
- avmcard_dmainfo *dma;
-
- struct avmctrl_info *ctrlinfo;
-
- u_int nr_controllers;
- u_int nlogcontr;
- struct list_head list;
-} avmcard;
-
-extern int b1_irq_table[16];
-
-/*
- * LLI Messages to the ISDN-ControllerISDN Controller
- */
-
-#define SEND_POLL 0x72 /*
- * after load <- RECEIVE_POLL
- */
-#define SEND_INIT 0x11 /*
- * first message <- RECEIVE_INIT
- * int32 NumApplications int32
- * NumNCCIs int32 BoardNumber
- */
-#define SEND_REGISTER 0x12 /*
- * register an application int32
- * ApplIDId int32 NumMessages
- * int32 NumB3Connections int32
- * NumB3Blocks int32 B3Size
- *
- * AnzB3Connection != 0 &&
- * AnzB3Blocks >= 1 && B3Size >= 1
- */
-#define SEND_RELEASE 0x14 /*
- * deregister an application int32
- * ApplID
- */
-#define SEND_MESSAGE 0x15 /*
- * send capi-message int32 length
- * capi-data ...
- */
-#define SEND_DATA_B3_REQ 0x13 /*
- * send capi-data-message int32
- * MsgLength capi-data ... int32
- * B3Length data ....
- */
-
-#define SEND_CONFIG 0x21 /*
- */
-
-#define SEND_POLLACK 0x73 /* T1 Watchdog */
-
-/*
- * LLI Messages from the ISDN-ControllerISDN Controller
- */
-
-#define RECEIVE_POLL 0x32 /*
- * <- after SEND_POLL
- */
-#define RECEIVE_INIT 0x27 /*
- * <- after SEND_INIT int32 length
- * byte total length b1struct board
- * driver revision b1struct card
- * type b1struct reserved b1struct
- * serial number b1struct driver
- * capability b1struct d-channel
- * protocol b1struct CAPI-2.0
- * profile b1struct capi version
- */
-#define RECEIVE_MESSAGE 0x21 /*
- * <- after SEND_MESSAGE int32
- * AppllID int32 Length capi-data
- * ....
- */
-#define RECEIVE_DATA_B3_IND 0x22 /*
- * received data int32 AppllID
- * int32 Length capi-data ...
- * int32 B3Length data ...
- */
-#define RECEIVE_START 0x23 /*
- * Handshake
- */
-#define RECEIVE_STOP 0x24 /*
- * Handshake
- */
-#define RECEIVE_NEW_NCCI 0x25 /*
- * int32 AppllID int32 NCCI int32
- * WindowSize
- */
-#define RECEIVE_FREE_NCCI 0x26 /*
- * int32 AppllID int32 NCCI
- */
-#define RECEIVE_RELEASE 0x26 /*
- * int32 AppllID int32 0xffffffff
- */
-#define RECEIVE_TASK_READY 0x31 /*
- * int32 tasknr
- * int32 Length Taskname ...
- */
-#define RECEIVE_DEBUGMSG 0x71 /*
- * int32 Length message
- *
- */
-#define RECEIVE_POLLDWORD 0x75 /* t1pci in dword mode */
-
-#define WRITE_REGISTER 0x00
-#define READ_REGISTER 0x01
-
-/*
- * port offsets
- */
-
-#define B1_READ 0x00
-#define B1_WRITE 0x01
-#define B1_INSTAT 0x02
-#define B1_OUTSTAT 0x03
-#define B1_ANALYSE 0x04
-#define B1_REVISION 0x05
-#define B1_RESET 0x10
-
-
-#define B1_STAT0(cardtype) ((cardtype) == avm_m1 ? 0x81200000l : 0x80A00000l)
-#define B1_STAT1(cardtype) (0x80E00000l)
-
-/* ---------------------------------------------------------------- */
-
-static inline unsigned char b1outp(unsigned int base,
- unsigned short offset,
- unsigned char value)
-{
- outb(value, base + offset);
- return inb(base + B1_ANALYSE);
-}
-
-
-static inline int b1_rx_full(unsigned int base)
-{
- return inb(base + B1_INSTAT) & 0x1;
-}
-
-static inline unsigned char b1_get_byte(unsigned int base)
-{
- unsigned long stop = jiffies + 1 * HZ; /* maximum wait time 1 sec */
- while (!b1_rx_full(base) && time_before(jiffies, stop));
- if (b1_rx_full(base))
- return inb(base + B1_READ);
- printk(KERN_CRIT "b1lli(0x%x): rx not full after 1 second\n", base);
- return 0;
-}
-
-static inline unsigned int b1_get_word(unsigned int base)
-{
- unsigned int val = 0;
- val |= b1_get_byte(base);
- val |= (b1_get_byte(base) << 8);
- val |= (b1_get_byte(base) << 16);
- val |= (b1_get_byte(base) << 24);
- return val;
-}
-
-static inline int b1_tx_empty(unsigned int base)
-{
- return inb(base + B1_OUTSTAT) & 0x1;
-}
-
-static inline void b1_put_byte(unsigned int base, unsigned char val)
-{
- while (!b1_tx_empty(base));
- b1outp(base, B1_WRITE, val);
-}
-
-static inline int b1_save_put_byte(unsigned int base, unsigned char val)
-{
- unsigned long stop = jiffies + 2 * HZ;
- while (!b1_tx_empty(base) && time_before(jiffies, stop));
- if (!b1_tx_empty(base)) return -1;
- b1outp(base, B1_WRITE, val);
- return 0;
-}
-
-static inline void b1_put_word(unsigned int base, unsigned int val)
-{
- b1_put_byte(base, val & 0xff);
- b1_put_byte(base, (val >> 8) & 0xff);
- b1_put_byte(base, (val >> 16) & 0xff);
- b1_put_byte(base, (val >> 24) & 0xff);
-}
-
-static inline unsigned int b1_get_slice(unsigned int base,
- unsigned char *dp)
-{
- unsigned int len, i;
-
- len = i = b1_get_word(base);
- while (i-- > 0) *dp++ = b1_get_byte(base);
- return len;
-}
-
-static inline void b1_put_slice(unsigned int base,
- unsigned char *dp, unsigned int len)
-{
- unsigned i = len;
- b1_put_word(base, i);
- while (i-- > 0)
- b1_put_byte(base, *dp++);
-}
-
-static void b1_wr_reg(unsigned int base,
- unsigned int reg,
- unsigned int value)
-{
- b1_put_byte(base, WRITE_REGISTER);
- b1_put_word(base, reg);
- b1_put_word(base, value);
-}
-
-static inline unsigned int b1_rd_reg(unsigned int base,
- unsigned int reg)
-{
- b1_put_byte(base, READ_REGISTER);
- b1_put_word(base, reg);
- return b1_get_word(base);
-
-}
-
-static inline void b1_reset(unsigned int base)
-{
- b1outp(base, B1_RESET, 0);
- mdelay(55 * 2); /* 2 TIC's */
-
- b1outp(base, B1_RESET, 1);
- mdelay(55 * 2); /* 2 TIC's */
-
- b1outp(base, B1_RESET, 0);
- mdelay(55 * 2); /* 2 TIC's */
-}
-
-static inline unsigned char b1_disable_irq(unsigned int base)
-{
- return b1outp(base, B1_INSTAT, 0x00);
-}
-
-/* ---------------------------------------------------------------- */
-
-static inline void b1_set_test_bit(unsigned int base,
- enum avmcardtype cardtype,
- int onoff)
-{
- b1_wr_reg(base, B1_STAT0(cardtype), onoff ? 0x21 : 0x20);
-}
-
-static inline int b1_get_test_bit(unsigned int base,
- enum avmcardtype cardtype)
-{
- return (b1_rd_reg(base, B1_STAT0(cardtype)) & 0x01) != 0;
-}
-
-/* ---------------------------------------------------------------- */
-
-#define T1_FASTLINK 0x00
-#define T1_SLOWLINK 0x08
-
-#define T1_READ B1_READ
-#define T1_WRITE B1_WRITE
-#define T1_INSTAT B1_INSTAT
-#define T1_OUTSTAT B1_OUTSTAT
-#define T1_IRQENABLE 0x05
-#define T1_FIFOSTAT 0x06
-#define T1_RESETLINK 0x10
-#define T1_ANALYSE 0x11
-#define T1_IRQMASTER 0x12
-#define T1_IDENT 0x17
-#define T1_RESETBOARD 0x1f
-
-#define T1F_IREADY 0x01
-#define T1F_IHALF 0x02
-#define T1F_IFULL 0x04
-#define T1F_IEMPTY 0x08
-#define T1F_IFLAGS 0xF0
-
-#define T1F_OREADY 0x10
-#define T1F_OHALF 0x20
-#define T1F_OEMPTY 0x40
-#define T1F_OFULL 0x80
-#define T1F_OFLAGS 0xF0
-
-/* there are HEMA cards with 1k and 4k FIFO out */
-#define FIFO_OUTBSIZE 256
-#define FIFO_INPBSIZE 512
-
-#define HEMA_VERSION_ID 0
-#define HEMA_PAL_ID 0
-
-static inline void t1outp(unsigned int base,
- unsigned short offset,
- unsigned char value)
-{
- outb(value, base + offset);
-}
-
-static inline unsigned char t1inp(unsigned int base,
- unsigned short offset)
-{
- return inb(base + offset);
-}
-
-static inline int t1_isfastlink(unsigned int base)
-{
- return (inb(base + T1_IDENT) & ~0x82) == 1;
-}
-
-static inline unsigned char t1_fifostatus(unsigned int base)
-{
- return inb(base + T1_FIFOSTAT);
-}
-
-static inline unsigned int t1_get_slice(unsigned int base,
- unsigned char *dp)
-{
- unsigned int len, i;
-#ifdef FASTLINK_DEBUG
- unsigned wcnt = 0, bcnt = 0;
-#endif
-
- len = i = b1_get_word(base);
- if (t1_isfastlink(base)) {
- int status;
- while (i > 0) {
- status = t1_fifostatus(base) & (T1F_IREADY | T1F_IHALF);
- if (i >= FIFO_INPBSIZE) status |= T1F_IFULL;
-
- switch (status) {
- case T1F_IREADY | T1F_IHALF | T1F_IFULL:
- insb(base + B1_READ, dp, FIFO_INPBSIZE);
- dp += FIFO_INPBSIZE;
- i -= FIFO_INPBSIZE;
-#ifdef FASTLINK_DEBUG
- wcnt += FIFO_INPBSIZE;
-#endif
- break;
- case T1F_IREADY | T1F_IHALF:
- insb(base + B1_READ, dp, i);
-#ifdef FASTLINK_DEBUG
- wcnt += i;
-#endif
- dp += i;
- i = 0;
- break;
- default:
- *dp++ = b1_get_byte(base);
- i--;
-#ifdef FASTLINK_DEBUG
- bcnt++;
-#endif
- break;
- }
- }
-#ifdef FASTLINK_DEBUG
- if (wcnt)
- printk(KERN_DEBUG "b1lli(0x%x): get_slice l=%d w=%d b=%d\n",
- base, len, wcnt, bcnt);
-#endif
- } else {
- while (i-- > 0)
- *dp++ = b1_get_byte(base);
- }
- return len;
-}
-
-static inline void t1_put_slice(unsigned int base,
- unsigned char *dp, unsigned int len)
-{
- unsigned i = len;
- b1_put_word(base, i);
- if (t1_isfastlink(base)) {
- int status;
- while (i > 0) {
- status = t1_fifostatus(base) & (T1F_OREADY | T1F_OHALF);
- if (i >= FIFO_OUTBSIZE) status |= T1F_OEMPTY;
- switch (status) {
- case T1F_OREADY | T1F_OHALF | T1F_OEMPTY:
- outsb(base + B1_WRITE, dp, FIFO_OUTBSIZE);
- dp += FIFO_OUTBSIZE;
- i -= FIFO_OUTBSIZE;
- break;
- case T1F_OREADY | T1F_OHALF:
- outsb(base + B1_WRITE, dp, i);
- dp += i;
- i = 0;
- break;
- default:
- b1_put_byte(base, *dp++);
- i--;
- break;
- }
- }
- } else {
- while (i-- > 0)
- b1_put_byte(base, *dp++);
- }
-}
-
-static inline void t1_disable_irq(unsigned int base)
-{
- t1outp(base, T1_IRQMASTER, 0x00);
-}
-
-static inline void t1_reset(unsigned int base)
-{
- /* reset T1 Controller */
- b1_reset(base);
- /* disable irq on HEMA */
- t1outp(base, B1_INSTAT, 0x00);
- t1outp(base, B1_OUTSTAT, 0x00);
- t1outp(base, T1_IRQMASTER, 0x00);
- /* reset HEMA board configuration */
- t1outp(base, T1_RESETBOARD, 0xf);
-}
-
-static inline void b1_setinterrupt(unsigned int base, unsigned irq,
- enum avmcardtype cardtype)
-{
- switch (cardtype) {
- case avm_t1isa:
- t1outp(base, B1_INSTAT, 0x00);
- t1outp(base, B1_INSTAT, 0x02);
- t1outp(base, T1_IRQMASTER, 0x08);
- break;
- case avm_b1isa:
- b1outp(base, B1_INSTAT, 0x00);
- b1outp(base, B1_RESET, b1_irq_table[irq]);
- b1outp(base, B1_INSTAT, 0x02);
- break;
- default:
- case avm_m1:
- case avm_m2:
- case avm_b1pci:
- b1outp(base, B1_INSTAT, 0x00);
- b1outp(base, B1_RESET, 0xf0);
- b1outp(base, B1_INSTAT, 0x02);
- break;
- case avm_c4:
- case avm_t1pci:
- b1outp(base, B1_RESET, 0xf0);
- break;
- }
-}
-
-/* b1.c */
-avmcard *b1_alloc_card(int nr_controllers);
-void b1_free_card(avmcard *card);
-int b1_detect(unsigned int base, enum avmcardtype cardtype);
-void b1_getrevision(avmcard *card);
-int b1_load_t4file(avmcard *card, capiloaddatapart *t4file);
-int b1_load_config(avmcard *card, capiloaddatapart *config);
-int b1_loaded(avmcard *card);
-
-int b1_load_firmware(struct capi_ctr *ctrl, capiloaddata *data);
-void b1_reset_ctr(struct capi_ctr *ctrl);
-void b1_register_appl(struct capi_ctr *ctrl, u16 appl,
- capi_register_params *rp);
-void b1_release_appl(struct capi_ctr *ctrl, u16 appl);
-u16 b1_send_message(struct capi_ctr *ctrl, struct sk_buff *skb);
-void b1_parse_version(avmctrl_info *card);
-irqreturn_t b1_interrupt(int interrupt, void *devptr);
-
-int b1_proc_show(struct seq_file *m, void *v);
-
-avmcard_dmainfo *avmcard_dma_alloc(char *name, struct pci_dev *,
- long rsize, long ssize);
-void avmcard_dma_free(avmcard_dmainfo *);
-
-/* b1dma.c */
-int b1pciv4_detect(avmcard *card);
-int t1pci_detect(avmcard *card);
-void b1dma_reset(avmcard *card);
-irqreturn_t b1dma_interrupt(int interrupt, void *devptr);
-
-int b1dma_load_firmware(struct capi_ctr *ctrl, capiloaddata *data);
-void b1dma_reset_ctr(struct capi_ctr *ctrl);
-void b1dma_remove_ctr(struct capi_ctr *ctrl);
-void b1dma_register_appl(struct capi_ctr *ctrl,
- u16 appl,
- capi_register_params *rp);
-void b1dma_release_appl(struct capi_ctr *ctrl, u16 appl);
-u16 b1dma_send_message(struct capi_ctr *ctrl, struct sk_buff *skb);
-int b1dma_proc_show(struct seq_file *m, void *v);
-
-#endif /* _AVMCARD_H_ */
diff --git a/drivers/staging/isdn/avm/b1.c b/drivers/staging/isdn/avm/b1.c
deleted file mode 100644
index 32ec8cf31fd0..000000000000
--- a/drivers/staging/isdn/avm/b1.c
+++ /dev/null
@@ -1,819 +0,0 @@
-/* $Id: b1.c,v 1.1.2.2 2004/01/16 21:09:27 keil Exp $
- *
- * Common module for AVM B1 cards.
- *
- * Copyright 1999 by Carsten Paeth <calle@calle.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/capi.h>
-#include <linux/kernelcapi.h>
-#include <linux/slab.h>
-#include <asm/io.h>
-#include <linux/init.h>
-#include <linux/uaccess.h>
-#include <linux/netdevice.h>
-#include <linux/isdn/capilli.h>
-#include "avmcard.h"
-#include <linux/isdn/capicmd.h>
-#include <linux/isdn/capiutil.h>
-
-static char *revision = "$Revision: 1.1.2.2 $";
-
-/* ------------------------------------------------------------- */
-
-MODULE_DESCRIPTION("CAPI4Linux: Common support for active AVM cards");
-MODULE_AUTHOR("Carsten Paeth");
-MODULE_LICENSE("GPL");
-
-/* ------------------------------------------------------------- */
-
-int b1_irq_table[16] =
-{0,
- 0,
- 0,
- 192, /* irq 3 */
- 32, /* irq 4 */
- 160, /* irq 5 */
- 96, /* irq 6 */
- 224, /* irq 7 */
- 0,
- 64, /* irq 9 */
- 80, /* irq 10 */
- 208, /* irq 11 */
- 48, /* irq 12 */
- 0,
- 0,
- 112, /* irq 15 */
-};
-
-/* ------------------------------------------------------------- */
-
-avmcard *b1_alloc_card(int nr_controllers)
-{
- avmcard *card;
- avmctrl_info *cinfo;
- int i;
-
- card = kzalloc(sizeof(*card), GFP_KERNEL);
- if (!card)
- return NULL;
-
- cinfo = kcalloc(nr_controllers, sizeof(*cinfo), GFP_KERNEL);
- if (!cinfo) {
- kfree(card);
- return NULL;
- }
-
- card->ctrlinfo = cinfo;
- for (i = 0; i < nr_controllers; i++) {
- INIT_LIST_HEAD(&cinfo[i].ncci_head);
- cinfo[i].card = card;
- }
- spin_lock_init(&card->lock);
- card->nr_controllers = nr_controllers;
-
- return card;
-}
-
-/* ------------------------------------------------------------- */
-
-void b1_free_card(avmcard *card)
-{
- kfree(card->ctrlinfo);
- kfree(card);
-}
-
-/* ------------------------------------------------------------- */
-
-int b1_detect(unsigned int base, enum avmcardtype cardtype)
-{
- int onoff, i;
-
- /*
- * Statusregister 0000 00xx
- */
- if ((inb(base + B1_INSTAT) & 0xfc)
- || (inb(base + B1_OUTSTAT) & 0xfc))
- return 1;
- /*
- * Statusregister 0000 001x
- */
- b1outp(base, B1_INSTAT, 0x2); /* enable irq */
- /* b1outp(base, B1_OUTSTAT, 0x2); */
- if ((inb(base + B1_INSTAT) & 0xfe) != 0x2
- /* || (inb(base + B1_OUTSTAT) & 0xfe) != 0x2 */)
- return 2;
- /*
- * Statusregister 0000 000x
- */
- b1outp(base, B1_INSTAT, 0x0); /* disable irq */
- b1outp(base, B1_OUTSTAT, 0x0);
- if ((inb(base + B1_INSTAT) & 0xfe)
- || (inb(base + B1_OUTSTAT) & 0xfe))
- return 3;
-
- for (onoff = !0, i = 0; i < 10; i++) {
- b1_set_test_bit(base, cardtype, onoff);
- if (b1_get_test_bit(base, cardtype) != onoff)
- return 4;
- onoff = !onoff;
- }
-
- if (cardtype == avm_m1)
- return 0;
-
- if ((b1_rd_reg(base, B1_STAT1(cardtype)) & 0x0f) != 0x01)
- return 5;
-
- return 0;
-}
-
-void b1_getrevision(avmcard *card)
-{
- card->class = inb(card->port + B1_ANALYSE);
- card->revision = inb(card->port + B1_REVISION);
-}
-
-#define FWBUF_SIZE 256
-int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
-{
- unsigned char buf[FWBUF_SIZE];
- unsigned char *dp;
- int i, left;
- unsigned int base = card->port;
-
- dp = t4file->data;
- left = t4file->len;
- while (left > FWBUF_SIZE) {
- if (t4file->user) {
- if (copy_from_user(buf, dp, FWBUF_SIZE))
- return -EFAULT;
- } else {
- memcpy(buf, dp, FWBUF_SIZE);
- }
- for (i = 0; i < FWBUF_SIZE; i++)
- if (b1_save_put_byte(base, buf[i]) < 0) {
- printk(KERN_ERR "%s: corrupted firmware file ?\n",
- card->name);
- return -EIO;
- }
- left -= FWBUF_SIZE;
- dp += FWBUF_SIZE;
- }
- if (left) {
- if (t4file->user) {
- if (copy_from_user(buf, dp, left))
- return -EFAULT;
- } else {
- memcpy(buf, dp, left);
- }
- for (i = 0; i < left; i++)
- if (b1_save_put_byte(base, buf[i]) < 0) {
- printk(KERN_ERR "%s: corrupted firmware file ?\n",
- card->name);
- return -EIO;
- }
- }
- return 0;
-}
-
-int b1_load_config(avmcard *card, capiloaddatapart *config)
-{
- unsigned char buf[FWBUF_SIZE];
- unsigned char *dp;
- unsigned int base = card->port;
- int i, j, left;
-
- dp = config->data;
- left = config->len;
- if (left) {
- b1_put_byte(base, SEND_CONFIG);
- b1_put_word(base, 1);
- b1_put_byte(base, SEND_CONFIG);
- b1_put_word(base, left);
- }
- while (left > FWBUF_SIZE) {
- if (config->user) {
- if (copy_from_user(buf, dp, FWBUF_SIZE))
- return -EFAULT;
- } else {
- memcpy(buf, dp, FWBUF_SIZE);
- }
- for (i = 0; i < FWBUF_SIZE; ) {
- b1_put_byte(base, SEND_CONFIG);
- for (j = 0; j < 4; j++) {
- b1_put_byte(base, buf[i++]);
- }
- }
- left -= FWBUF_SIZE;
- dp += FWBUF_SIZE;
- }
- if (left) {
- if (config->user) {
- if (copy_from_user(buf, dp, left))
- return -EFAULT;
- } else {
- memcpy(buf, dp, left);
- }
- for (i = 0; i < left; ) {
- b1_put_byte(base, SEND_CONFIG);
- for (j = 0; j < 4; j++) {
- if (i < left)
- b1_put_byte(base, buf[i++]);
- else
- b1_put_byte(base, 0);
- }
- }
- }
- return 0;
-}
-
-int b1_loaded(avmcard *card)
-{
- unsigned int base = card->port;
- unsigned long stop;
- unsigned char ans;
- unsigned long tout = 2;
-
- for (stop = jiffies + tout * HZ; time_before(jiffies, stop);) {
- if (b1_tx_empty(base))
- break;
- }
- if (!b1_tx_empty(base)) {
- printk(KERN_ERR "%s: b1_loaded: tx err, corrupted t4 file ?\n",
- card->name);
- return 0;
- }
- b1_put_byte(base, SEND_POLL);
- for (stop = jiffies + tout * HZ; time_before(jiffies, stop);) {
- if (b1_rx_full(base)) {
- ans = b1_get_byte(base);
- if (ans == RECEIVE_POLL)
- return 1;
-
- printk(KERN_ERR "%s: b1_loaded: got 0x%x, firmware not running\n",
- card->name, ans);
- return 0;
- }
- }
- printk(KERN_ERR "%s: b1_loaded: firmware not running\n", card->name);
- return 0;
-}
-
-/* ------------------------------------------------------------- */
-
-int b1_load_firmware(struct capi_ctr *ctrl, capiloaddata *data)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- unsigned int port = card->port;
- unsigned long flags;
- int retval;
-
- b1_reset(port);
- retval = b1_load_t4file(card, &data->firmware);
-
- if (retval) {
- b1_reset(port);
- printk(KERN_ERR "%s: failed to load t4file!!\n",
- card->name);
- return retval;
- }
-
- b1_disable_irq(port);
-
- if (data->configuration.len > 0 && data->configuration.data) {
- retval = b1_load_config(card, &data->configuration);
- if (retval) {
- b1_reset(port);
- printk(KERN_ERR "%s: failed to load config!!\n",
- card->name);
- return retval;
- }
- }
-
- if (!b1_loaded(card)) {
- printk(KERN_ERR "%s: failed to load t4file.\n", card->name);
- return -EIO;
- }
-
- spin_lock_irqsave(&card->lock, flags);
- b1_setinterrupt(port, card->irq, card->cardtype);
- b1_put_byte(port, SEND_INIT);
- b1_put_word(port, CAPI_MAXAPPL);
- b1_put_word(port, AVM_NCCI_PER_CHANNEL * 2);
- b1_put_word(port, ctrl->cnr - 1);
- spin_unlock_irqrestore(&card->lock, flags);
-
- return 0;
-}
-
-void b1_reset_ctr(struct capi_ctr *ctrl)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- unsigned int port = card->port;
- unsigned long flags;
-
- b1_reset(port);
- b1_reset(port);
-
- memset(cinfo->version, 0, sizeof(cinfo->version));
- spin_lock_irqsave(&card->lock, flags);
- capilib_release(&cinfo->ncci_head);
- spin_unlock_irqrestore(&card->lock, flags);
- capi_ctr_down(ctrl);
-}
-
-void b1_register_appl(struct capi_ctr *ctrl,
- u16 appl,
- capi_register_params *rp)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- unsigned int port = card->port;
- unsigned long flags;
- int nconn, want = rp->level3cnt;
-
- if (want > 0) nconn = want;
- else nconn = ctrl->profile.nbchannel * -want;
- if (nconn == 0) nconn = ctrl->profile.nbchannel;
-
- spin_lock_irqsave(&card->lock, flags);
- b1_put_byte(port, SEND_REGISTER);
- b1_put_word(port, appl);
- b1_put_word(port, 1024 * (nconn + 1));
- b1_put_word(port, nconn);
- b1_put_word(port, rp->datablkcnt);
- b1_put_word(port, rp->datablklen);
- spin_unlock_irqrestore(&card->lock, flags);
-}
-
-void b1_release_appl(struct capi_ctr *ctrl, u16 appl)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- unsigned int port = card->port;
- unsigned long flags;
-
- spin_lock_irqsave(&card->lock, flags);
- capilib_release_appl(&cinfo->ncci_head, appl);
- b1_put_byte(port, SEND_RELEASE);
- b1_put_word(port, appl);
- spin_unlock_irqrestore(&card->lock, flags);
-}
-
-u16 b1_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- unsigned int port = card->port;
- unsigned long flags;
- u16 len = CAPIMSG_LEN(skb->data);
- u8 cmd = CAPIMSG_COMMAND(skb->data);
- u8 subcmd = CAPIMSG_SUBCOMMAND(skb->data);
- u16 dlen, retval;
-
- spin_lock_irqsave(&card->lock, flags);
- if (CAPICMD(cmd, subcmd) == CAPI_DATA_B3_REQ) {
- retval = capilib_data_b3_req(&cinfo->ncci_head,
- CAPIMSG_APPID(skb->data),
- CAPIMSG_NCCI(skb->data),
- CAPIMSG_MSGID(skb->data));
- if (retval != CAPI_NOERROR) {
- spin_unlock_irqrestore(&card->lock, flags);
- return retval;
- }
-
- dlen = CAPIMSG_DATALEN(skb->data);
-
- b1_put_byte(port, SEND_DATA_B3_REQ);
- b1_put_slice(port, skb->data, len);
- b1_put_slice(port, skb->data + len, dlen);
- } else {
- b1_put_byte(port, SEND_MESSAGE);
- b1_put_slice(port, skb->data, len);
- }
- spin_unlock_irqrestore(&card->lock, flags);
-
- dev_kfree_skb_any(skb);
- return CAPI_NOERROR;
-}
-
-/* ------------------------------------------------------------- */
-
-void b1_parse_version(avmctrl_info *cinfo)
-{
- struct capi_ctr *ctrl = &cinfo->capi_ctrl;
- avmcard *card = cinfo->card;
- capi_profile *profp;
- u8 *dversion;
- u8 flag;
- int i, j;
-
- for (j = 0; j < AVM_MAXVERSION; j++)
- cinfo->version[j] = "";
- for (i = 0, j = 0;
- j < AVM_MAXVERSION && i < cinfo->versionlen;
- j++, i += cinfo->versionbuf[i] + 1)
- cinfo->version[j] = &cinfo->versionbuf[i + 1];
-
- strlcpy(ctrl->serial, cinfo->version[VER_SERIAL], sizeof(ctrl->serial));
- memcpy(&ctrl->profile, cinfo->version[VER_PROFILE], sizeof(capi_profile));
- strlcpy(ctrl->manu, "AVM GmbH", sizeof(ctrl->manu));
- dversion = cinfo->version[VER_DRIVER];
- ctrl->version.majorversion = 2;
- ctrl->version.minorversion = 0;
- ctrl->version.majormanuversion = (((dversion[0] - '0') & 0xf) << 4);
- ctrl->version.majormanuversion |= ((dversion[2] - '0') & 0xf);
- ctrl->version.minormanuversion = (dversion[3] - '0') << 4;
- ctrl->version.minormanuversion |=
- (dversion[5] - '0') * 10 + ((dversion[6] - '0') & 0xf);
-
- profp = &ctrl->profile;
-
- flag = ((u8 *)(profp->manu))[1];
- switch (flag) {
- case 0: if (cinfo->version[VER_CARDTYPE])
- strcpy(cinfo->cardname, cinfo->version[VER_CARDTYPE]);
- else strcpy(cinfo->cardname, "B1");
- break;
- case 3: strcpy(cinfo->cardname, "PCMCIA B"); break;
- case 4: strcpy(cinfo->cardname, "PCMCIA M1"); break;
- case 5: strcpy(cinfo->cardname, "PCMCIA M2"); break;
- case 6: strcpy(cinfo->cardname, "B1 V3.0"); break;
- case 7: strcpy(cinfo->cardname, "B1 PCI"); break;
- default: sprintf(cinfo->cardname, "AVM?%u", (unsigned int)flag); break;
- }
- printk(KERN_NOTICE "%s: card %d \"%s\" ready.\n",
- card->name, ctrl->cnr, cinfo->cardname);
-
- flag = ((u8 *)(profp->manu))[3];
- if (flag)
- printk(KERN_NOTICE "%s: card %d Protocol:%s%s%s%s%s%s%s\n",
- card->name,
- ctrl->cnr,
- (flag & 0x01) ? " DSS1" : "",
- (flag & 0x02) ? " CT1" : "",
- (flag & 0x04) ? " VN3" : "",
- (flag & 0x08) ? " NI1" : "",
- (flag & 0x10) ? " AUSTEL" : "",
- (flag & 0x20) ? " ESS" : "",
- (flag & 0x40) ? " 1TR6" : ""
- );
-
- flag = ((u8 *)(profp->manu))[5];
- if (flag)
- printk(KERN_NOTICE "%s: card %d Linetype:%s%s%s%s\n",
- card->name,
- ctrl->cnr,
- (flag & 0x01) ? " point to point" : "",
- (flag & 0x02) ? " point to multipoint" : "",
- (flag & 0x08) ? " leased line without D-channel" : "",
- (flag & 0x04) ? " leased line with D-channel" : ""
- );
-}
-
-/* ------------------------------------------------------------- */
-
-irqreturn_t b1_interrupt(int interrupt, void *devptr)
-{
- avmcard *card = devptr;
- avmctrl_info *cinfo = &card->ctrlinfo[0];
- struct capi_ctr *ctrl = &cinfo->capi_ctrl;
- unsigned char b1cmd;
- struct sk_buff *skb;
-
- unsigned ApplId;
- unsigned MsgLen;
- unsigned DataB3Len;
- unsigned NCCI;
- unsigned WindowSize;
- unsigned long flags;
-
- spin_lock_irqsave(&card->lock, flags);
-
- if (!b1_rx_full(card->port)) {
- spin_unlock_irqrestore(&card->lock, flags);
- return IRQ_NONE;
- }
-
- b1cmd = b1_get_byte(card->port);
-
- switch (b1cmd) {
-
- case RECEIVE_DATA_B3_IND:
-
- ApplId = (unsigned) b1_get_word(card->port);
- MsgLen = b1_get_slice(card->port, card->msgbuf);
- DataB3Len = b1_get_slice(card->port, card->databuf);
- spin_unlock_irqrestore(&card->lock, flags);
-
- if (MsgLen < 30) { /* not CAPI 64Bit */
- memset(card->msgbuf + MsgLen, 0, 30-MsgLen);
- MsgLen = 30;
- CAPIMSG_SETLEN(card->msgbuf, 30);
- }
-
- skb = alloc_skb(DataB3Len + MsgLen, GFP_ATOMIC);
- if (!skb) {
- printk(KERN_ERR "%s: incoming packet dropped\n",
- card->name);
- } else {
- skb_put_data(skb, card->msgbuf, MsgLen);
- skb_put_data(skb, card->databuf, DataB3Len);
- capi_ctr_handle_message(ctrl, ApplId, skb);
- }
- break;
-
- case RECEIVE_MESSAGE:
-
- ApplId = (unsigned) b1_get_word(card->port);
- MsgLen = b1_get_slice(card->port, card->msgbuf);
- skb = alloc_skb(MsgLen, GFP_ATOMIC);
-
- if (!skb) {
- printk(KERN_ERR "%s: incoming packet dropped\n",
- card->name);
- spin_unlock_irqrestore(&card->lock, flags);
- } else {
- skb_put_data(skb, card->msgbuf, MsgLen);
- if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_CONF)
- capilib_data_b3_conf(&cinfo->ncci_head, ApplId,
- CAPIMSG_NCCI(skb->data),
- CAPIMSG_MSGID(skb->data));
- spin_unlock_irqrestore(&card->lock, flags);
- capi_ctr_handle_message(ctrl, ApplId, skb);
- }
- break;
-
- case RECEIVE_NEW_NCCI:
-
- ApplId = b1_get_word(card->port);
- NCCI = b1_get_word(card->port);
- WindowSize = b1_get_word(card->port);
- capilib_new_ncci(&cinfo->ncci_head, ApplId, NCCI, WindowSize);
- spin_unlock_irqrestore(&card->lock, flags);
- break;
-
- case RECEIVE_FREE_NCCI:
-
- ApplId = b1_get_word(card->port);
- NCCI = b1_get_word(card->port);
- if (NCCI != 0xffffffff)
- capilib_free_ncci(&cinfo->ncci_head, ApplId, NCCI);
- spin_unlock_irqrestore(&card->lock, flags);
- break;
-
- case RECEIVE_START:
- /* b1_put_byte(card->port, SEND_POLLACK); */
- spin_unlock_irqrestore(&card->lock, flags);
- capi_ctr_resume_output(ctrl);
- break;
-
- case RECEIVE_STOP:
- spin_unlock_irqrestore(&card->lock, flags);
- capi_ctr_suspend_output(ctrl);
- break;
-
- case RECEIVE_INIT:
-
- cinfo->versionlen = b1_get_slice(card->port, cinfo->versionbuf);
- spin_unlock_irqrestore(&card->lock, flags);
- b1_parse_version(cinfo);
- printk(KERN_INFO "%s: %s-card (%s) now active\n",
- card->name,
- cinfo->version[VER_CARDTYPE],
- cinfo->version[VER_DRIVER]);
- capi_ctr_ready(ctrl);
- break;
-
- case RECEIVE_TASK_READY:
- ApplId = (unsigned) b1_get_word(card->port);
- MsgLen = b1_get_slice(card->port, card->msgbuf);
- spin_unlock_irqrestore(&card->lock, flags);
- card->msgbuf[MsgLen] = 0;
- while (MsgLen > 0
- && (card->msgbuf[MsgLen - 1] == '\n'
- || card->msgbuf[MsgLen - 1] == '\r')) {
- card->msgbuf[MsgLen - 1] = 0;
- MsgLen--;
- }
- printk(KERN_INFO "%s: task %d \"%s\" ready.\n",
- card->name, ApplId, card->msgbuf);
- break;
-
- case RECEIVE_DEBUGMSG:
- MsgLen = b1_get_slice(card->port, card->msgbuf);
- spin_unlock_irqrestore(&card->lock, flags);
- card->msgbuf[MsgLen] = 0;
- while (MsgLen > 0
- && (card->msgbuf[MsgLen - 1] == '\n'
- || card->msgbuf[MsgLen - 1] == '\r')) {
- card->msgbuf[MsgLen - 1] = 0;
- MsgLen--;
- }
- printk(KERN_INFO "%s: DEBUG: %s\n", card->name, card->msgbuf);
- break;
-
- case 0xff:
- spin_unlock_irqrestore(&card->lock, flags);
- printk(KERN_ERR "%s: card removed ?\n", card->name);
- return IRQ_NONE;
- default:
- spin_unlock_irqrestore(&card->lock, flags);
- printk(KERN_ERR "%s: b1_interrupt: 0x%x ???\n",
- card->name, b1cmd);
- return IRQ_HANDLED;
- }
- return IRQ_HANDLED;
-}
-
-/* ------------------------------------------------------------- */
-int b1_proc_show(struct seq_file *m, void *v)
-{
- struct capi_ctr *ctrl = m->private;
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- u8 flag;
- char *s;
-
- seq_printf(m, "%-16s %s\n", "name", card->name);
- seq_printf(m, "%-16s 0x%x\n", "io", card->port);
- seq_printf(m, "%-16s %d\n", "irq", card->irq);
- switch (card->cardtype) {
- case avm_b1isa: s = "B1 ISA"; break;
- case avm_b1pci: s = "B1 PCI"; break;
- case avm_b1pcmcia: s = "B1 PCMCIA"; break;
- case avm_m1: s = "M1"; break;
- case avm_m2: s = "M2"; break;
- case avm_t1isa: s = "T1 ISA (HEMA)"; break;
- case avm_t1pci: s = "T1 PCI"; break;
- case avm_c4: s = "C4"; break;
- case avm_c2: s = "C2"; break;
- default: s = "???"; break;
- }
- seq_printf(m, "%-16s %s\n", "type", s);
- if (card->cardtype == avm_t1isa)
- seq_printf(m, "%-16s %d\n", "cardnr", card->cardnr);
-
- s = cinfo->version[VER_DRIVER];
- if (s)
- seq_printf(m, "%-16s %s\n", "ver_driver", s);
-
- s = cinfo->version[VER_CARDTYPE];
- if (s)
- seq_printf(m, "%-16s %s\n", "ver_cardtype", s);
-
- s = cinfo->version[VER_SERIAL];
- if (s)
- seq_printf(m, "%-16s %s\n", "ver_serial", s);
-
- if (card->cardtype != avm_m1) {
- flag = ((u8 *)(ctrl->profile.manu))[3];
- if (flag)
- seq_printf(m, "%-16s%s%s%s%s%s%s%s\n",
- "protocol",
- (flag & 0x01) ? " DSS1" : "",
- (flag & 0x02) ? " CT1" : "",
- (flag & 0x04) ? " VN3" : "",
- (flag & 0x08) ? " NI1" : "",
- (flag & 0x10) ? " AUSTEL" : "",
- (flag & 0x20) ? " ESS" : "",
- (flag & 0x40) ? " 1TR6" : ""
- );
- }
- if (card->cardtype != avm_m1) {
- flag = ((u8 *)(ctrl->profile.manu))[5];
- if (flag)
- seq_printf(m, "%-16s%s%s%s%s\n",
- "linetype",
- (flag & 0x01) ? " point to point" : "",
- (flag & 0x02) ? " point to multipoint" : "",
- (flag & 0x08) ? " leased line without D-channel" : "",
- (flag & 0x04) ? " leased line with D-channel" : ""
- );
- }
- seq_printf(m, "%-16s %s\n", "cardname", cinfo->cardname);
-
- return 0;
-}
-EXPORT_SYMBOL(b1_proc_show);
-
-/* ------------------------------------------------------------- */
-
-#ifdef CONFIG_PCI
-
-avmcard_dmainfo *
-avmcard_dma_alloc(char *name, struct pci_dev *pdev, long rsize, long ssize)
-{
- avmcard_dmainfo *p;
- void *buf;
-
- p = kzalloc(sizeof(avmcard_dmainfo), GFP_KERNEL);
- if (!p) {
- printk(KERN_WARNING "%s: no memory.\n", name);
- goto err;
- }
-
- p->recvbuf.size = rsize;
- buf = pci_alloc_consistent(pdev, rsize, &p->recvbuf.dmaaddr);
- if (!buf) {
- printk(KERN_WARNING "%s: allocation of receive dma buffer failed.\n", name);
- goto err_kfree;
- }
- p->recvbuf.dmabuf = buf;
-
- p->sendbuf.size = ssize;
- buf = pci_alloc_consistent(pdev, ssize, &p->sendbuf.dmaaddr);
- if (!buf) {
- printk(KERN_WARNING "%s: allocation of send dma buffer failed.\n", name);
- goto err_free_consistent;
- }
-
- p->sendbuf.dmabuf = buf;
- skb_queue_head_init(&p->send_queue);
-
- return p;
-
-err_free_consistent:
- pci_free_consistent(p->pcidev, p->recvbuf.size,
- p->recvbuf.dmabuf, p->recvbuf.dmaaddr);
-err_kfree:
- kfree(p);
-err:
- return NULL;
-}
-
-void avmcard_dma_free(avmcard_dmainfo *p)
-{
- pci_free_consistent(p->pcidev, p->recvbuf.size,
- p->recvbuf.dmabuf, p->recvbuf.dmaaddr);
- pci_free_consistent(p->pcidev, p->sendbuf.size,
- p->sendbuf.dmabuf, p->sendbuf.dmaaddr);
- skb_queue_purge(&p->send_queue);
- kfree(p);
-}
-
-EXPORT_SYMBOL(avmcard_dma_alloc);
-EXPORT_SYMBOL(avmcard_dma_free);
-
-#endif
-
-EXPORT_SYMBOL(b1_irq_table);
-
-EXPORT_SYMBOL(b1_alloc_card);
-EXPORT_SYMBOL(b1_free_card);
-EXPORT_SYMBOL(b1_detect);
-EXPORT_SYMBOL(b1_getrevision);
-EXPORT_SYMBOL(b1_load_t4file);
-EXPORT_SYMBOL(b1_load_config);
-EXPORT_SYMBOL(b1_loaded);
-EXPORT_SYMBOL(b1_load_firmware);
-EXPORT_SYMBOL(b1_reset_ctr);
-EXPORT_SYMBOL(b1_register_appl);
-EXPORT_SYMBOL(b1_release_appl);
-EXPORT_SYMBOL(b1_send_message);
-
-EXPORT_SYMBOL(b1_parse_version);
-EXPORT_SYMBOL(b1_interrupt);
-
-static int __init b1_init(void)
-{
- char *p;
- char rev[32];
-
- p = strchr(revision, ':');
- if (p && p[1]) {
- strlcpy(rev, p + 2, 32);
- p = strchr(rev, '$');
- if (p && p > rev)
- *(p - 1) = 0;
- } else {
- strcpy(rev, "1.0");
- }
- printk(KERN_INFO "b1: revision %s\n", rev);
-
- return 0;
-}
-
-static void __exit b1_exit(void)
-{
-}
-
-module_init(b1_init);
-module_exit(b1_exit);
diff --git a/drivers/staging/isdn/avm/b1dma.c b/drivers/staging/isdn/avm/b1dma.c
deleted file mode 100644
index 6a3dc9937ce5..000000000000
--- a/drivers/staging/isdn/avm/b1dma.c
+++ /dev/null
@@ -1,981 +0,0 @@
-/* $Id: b1dma.c,v 1.1.2.3 2004/02/10 01:07:12 keil Exp $
- *
- * Common module for AVM B1 cards that support dma with AMCC
- *
- * Copyright 2000 by Carsten Paeth <calle@calle.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/capi.h>
-#include <linux/kernelcapi.h>
-#include <linux/gfp.h>
-#include <asm/io.h>
-#include <linux/init.h>
-#include <linux/uaccess.h>
-#include <linux/netdevice.h>
-#include <linux/isdn/capilli.h>
-#include "avmcard.h"
-#include <linux/isdn/capicmd.h>
-#include <linux/isdn/capiutil.h>
-
-static char *revision = "$Revision: 1.1.2.3 $";
-
-#undef AVM_B1DMA_DEBUG
-
-/* ------------------------------------------------------------- */
-
-MODULE_DESCRIPTION("CAPI4Linux: DMA support for active AVM cards");
-MODULE_AUTHOR("Carsten Paeth");
-MODULE_LICENSE("GPL");
-
-static bool suppress_pollack = 0;
-module_param(suppress_pollack, bool, 0);
-
-/* ------------------------------------------------------------- */
-
-static void b1dma_dispatch_tx(avmcard *card);
-
-/* ------------------------------------------------------------- */
-
-/* S5933 */
-
-#define AMCC_RXPTR 0x24
-#define AMCC_RXLEN 0x28
-#define AMCC_TXPTR 0x2c
-#define AMCC_TXLEN 0x30
-
-#define AMCC_INTCSR 0x38
-# define EN_READ_TC_INT 0x00008000L
-# define EN_WRITE_TC_INT 0x00004000L
-# define EN_TX_TC_INT EN_READ_TC_INT
-# define EN_RX_TC_INT EN_WRITE_TC_INT
-# define AVM_FLAG 0x30000000L
-
-# define ANY_S5933_INT 0x00800000L
-# define READ_TC_INT 0x00080000L
-# define WRITE_TC_INT 0x00040000L
-# define TX_TC_INT READ_TC_INT
-# define RX_TC_INT WRITE_TC_INT
-# define MASTER_ABORT_INT 0x00100000L
-# define TARGET_ABORT_INT 0x00200000L
-# define BUS_MASTER_INT 0x00200000L
-# define ALL_INT 0x000C0000L
-
-#define AMCC_MCSR 0x3c
-# define A2P_HI_PRIORITY 0x00000100L
-# define EN_A2P_TRANSFERS 0x00000400L
-# define P2A_HI_PRIORITY 0x00001000L
-# define EN_P2A_TRANSFERS 0x00004000L
-# define RESET_A2P_FLAGS 0x04000000L
-# define RESET_P2A_FLAGS 0x02000000L
-
-/* ------------------------------------------------------------- */
-
-static inline void b1dma_writel(avmcard *card, u32 value, int off)
-{
- writel(value, card->mbase + off);
-}
-
-static inline u32 b1dma_readl(avmcard *card, int off)
-{
- return readl(card->mbase + off);
-}
-
-/* ------------------------------------------------------------- */
-
-static inline int b1dma_tx_empty(unsigned int port)
-{
- return inb(port + 0x03) & 0x1;
-}
-
-static inline int b1dma_rx_full(unsigned int port)
-{
- return inb(port + 0x02) & 0x1;
-}
-
-static int b1dma_tolink(avmcard *card, void *buf, unsigned int len)
-{
- unsigned long stop = jiffies + 1 * HZ; /* maximum wait time 1 sec */
- unsigned char *s = (unsigned char *)buf;
- while (len--) {
- while (!b1dma_tx_empty(card->port)
- && time_before(jiffies, stop));
- if (!b1dma_tx_empty(card->port))
- return -1;
- t1outp(card->port, 0x01, *s++);
- }
- return 0;
-}
-
-static int b1dma_fromlink(avmcard *card, void *buf, unsigned int len)
-{
- unsigned long stop = jiffies + 1 * HZ; /* maximum wait time 1 sec */
- unsigned char *s = (unsigned char *)buf;
- while (len--) {
- while (!b1dma_rx_full(card->port)
- && time_before(jiffies, stop));
- if (!b1dma_rx_full(card->port))
- return -1;
- *s++ = t1inp(card->port, 0x00);
- }
- return 0;
-}
-
-static int WriteReg(avmcard *card, u32 reg, u8 val)
-{
- u8 cmd = 0x00;
- if (b1dma_tolink(card, &cmd, 1) == 0
- && b1dma_tolink(card, &reg, 4) == 0) {
- u32 tmp = val;
- return b1dma_tolink(card, &tmp, 4);
- }
- return -1;
-}
-
-static u8 ReadReg(avmcard *card, u32 reg)
-{
- u8 cmd = 0x01;
- if (b1dma_tolink(card, &cmd, 1) == 0
- && b1dma_tolink(card, &reg, 4) == 0) {
- u32 tmp;
- if (b1dma_fromlink(card, &tmp, 4) == 0)
- return (u8)tmp;
- }
- return 0xff;
-}
-
-/* ------------------------------------------------------------- */
-
-static inline void _put_byte(void **pp, u8 val)
-{
- u8 *s = *pp;
- *s++ = val;
- *pp = s;
-}
-
-static inline void _put_word(void **pp, u32 val)
-{
- u8 *s = *pp;
- *s++ = val & 0xff;
- *s++ = (val >> 8) & 0xff;
- *s++ = (val >> 16) & 0xff;
- *s++ = (val >> 24) & 0xff;
- *pp = s;
-}
-
-static inline void _put_slice(void **pp, unsigned char *dp, unsigned int len)
-{
- unsigned i = len;
- _put_word(pp, i);
- while (i-- > 0)
- _put_byte(pp, *dp++);
-}
-
-static inline u8 _get_byte(void **pp)
-{
- u8 *s = *pp;
- u8 val;
- val = *s++;
- *pp = s;
- return val;
-}
-
-static inline u32 _get_word(void **pp)
-{
- u8 *s = *pp;
- u32 val;
- val = *s++;
- val |= (*s++ << 8);
- val |= (*s++ << 16);
- val |= (*s++ << 24);
- *pp = s;
- return val;
-}
-
-static inline u32 _get_slice(void **pp, unsigned char *dp)
-{
- unsigned int len, i;
-
- len = i = _get_word(pp);
- while (i-- > 0) *dp++ = _get_byte(pp);
- return len;
-}
-
-/* ------------------------------------------------------------- */
-
-void b1dma_reset(avmcard *card)
-{
- card->csr = 0x0;
- b1dma_writel(card, card->csr, AMCC_INTCSR);
- b1dma_writel(card, 0, AMCC_MCSR);
- b1dma_writel(card, 0, AMCC_RXLEN);
- b1dma_writel(card, 0, AMCC_TXLEN);
-
- t1outp(card->port, 0x10, 0x00);
- t1outp(card->port, 0x07, 0x00);
-
- b1dma_writel(card, 0, AMCC_MCSR);
- mdelay(10);
- b1dma_writel(card, 0x0f000000, AMCC_MCSR); /* reset all */
- mdelay(10);
- b1dma_writel(card, 0, AMCC_MCSR);
- if (card->cardtype == avm_t1pci)
- mdelay(42);
- else
- mdelay(10);
-}
-
-/* ------------------------------------------------------------- */
-
-static int b1dma_detect(avmcard *card)
-{
- b1dma_writel(card, 0, AMCC_MCSR);
- mdelay(10);
- b1dma_writel(card, 0x0f000000, AMCC_MCSR); /* reset all */
- mdelay(10);
- b1dma_writel(card, 0, AMCC_MCSR);
- mdelay(42);
-
- b1dma_writel(card, 0, AMCC_RXLEN);
- b1dma_writel(card, 0, AMCC_TXLEN);
- card->csr = 0x0;
- b1dma_writel(card, card->csr, AMCC_INTCSR);
-
- if (b1dma_readl(card, AMCC_MCSR) != 0x000000E6)
- return 1;
-
- b1dma_writel(card, 0xffffffff, AMCC_RXPTR);
- b1dma_writel(card, 0xffffffff, AMCC_TXPTR);
- if (b1dma_readl(card, AMCC_RXPTR) != 0xfffffffc
- || b1dma_readl(card, AMCC_TXPTR) != 0xfffffffc)
- return 2;
-
- b1dma_writel(card, 0x0, AMCC_RXPTR);
- b1dma_writel(card, 0x0, AMCC_TXPTR);
- if (b1dma_readl(card, AMCC_RXPTR) != 0x0
- || b1dma_readl(card, AMCC_TXPTR) != 0x0)
- return 3;
-
- t1outp(card->port, 0x10, 0x00);
- t1outp(card->port, 0x07, 0x00);
-
- t1outp(card->port, 0x02, 0x02);
- t1outp(card->port, 0x03, 0x02);
-
- if ((t1inp(card->port, 0x02) & 0xFE) != 0x02
- || t1inp(card->port, 0x3) != 0x03)
- return 4;
-
- t1outp(card->port, 0x02, 0x00);
- t1outp(card->port, 0x03, 0x00);
-
- if ((t1inp(card->port, 0x02) & 0xFE) != 0x00
- || t1inp(card->port, 0x3) != 0x01)
- return 5;
-
- return 0;
-}
-
-int t1pci_detect(avmcard *card)
-{
- int ret;
-
- if ((ret = b1dma_detect(card)) != 0)
- return ret;
-
- /* Transputer test */
-
- if (WriteReg(card, 0x80001000, 0x11) != 0
- || WriteReg(card, 0x80101000, 0x22) != 0
- || WriteReg(card, 0x80201000, 0x33) != 0
- || WriteReg(card, 0x80301000, 0x44) != 0)
- return 6;
-
- if (ReadReg(card, 0x80001000) != 0x11
- || ReadReg(card, 0x80101000) != 0x22
- || ReadReg(card, 0x80201000) != 0x33
- || ReadReg(card, 0x80301000) != 0x44)
- return 7;
-
- if (WriteReg(card, 0x80001000, 0x55) != 0
- || WriteReg(card, 0x80101000, 0x66) != 0
- || WriteReg(card, 0x80201000, 0x77) != 0
- || WriteReg(card, 0x80301000, 0x88) != 0)
- return 8;
-
- if (ReadReg(card, 0x80001000) != 0x55
- || ReadReg(card, 0x80101000) != 0x66
- || ReadReg(card, 0x80201000) != 0x77
- || ReadReg(card, 0x80301000) != 0x88)
- return 9;
-
- return 0;
-}
-
-int b1pciv4_detect(avmcard *card)
-{
- int ret, i;
-
- if ((ret = b1dma_detect(card)) != 0)
- return ret;
-
- for (i = 0; i < 5; i++) {
- if (WriteReg(card, 0x80A00000, 0x21) != 0)
- return 6;
- if ((ReadReg(card, 0x80A00000) & 0x01) != 0x01)
- return 7;
- }
- for (i = 0; i < 5; i++) {
- if (WriteReg(card, 0x80A00000, 0x20) != 0)
- return 8;
- if ((ReadReg(card, 0x80A00000) & 0x01) != 0x00)
- return 9;
- }
-
- return 0;
-}
-
-static void b1dma_queue_tx(avmcard *card, struct sk_buff *skb)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&card->lock, flags);
-
- skb_queue_tail(&card->dma->send_queue, skb);
-
- if (!(card->csr & EN_TX_TC_INT)) {
- b1dma_dispatch_tx(card);
- b1dma_writel(card, card->csr, AMCC_INTCSR);
- }
-
- spin_unlock_irqrestore(&card->lock, flags);
-}
-
-/* ------------------------------------------------------------- */
-
-static void b1dma_dispatch_tx(avmcard *card)
-{
- avmcard_dmainfo *dma = card->dma;
- struct sk_buff *skb;
- u8 cmd, subcmd;
- u16 len;
- u32 txlen;
- void *p;
-
- skb = skb_dequeue(&dma->send_queue);
-
- len = CAPIMSG_LEN(skb->data);
-
- if (len) {
- cmd = CAPIMSG_COMMAND(skb->data);
- subcmd = CAPIMSG_SUBCOMMAND(skb->data);
-
- p = dma->sendbuf.dmabuf;
-
- if (CAPICMD(cmd, subcmd) == CAPI_DATA_B3_REQ) {
- u16 dlen = CAPIMSG_DATALEN(skb->data);
- _put_byte(&p, SEND_DATA_B3_REQ);
- _put_slice(&p, skb->data, len);
- _put_slice(&p, skb->data + len, dlen);
- } else {
- _put_byte(&p, SEND_MESSAGE);
- _put_slice(&p, skb->data, len);
- }
- txlen = (u8 *)p - (u8 *)dma->sendbuf.dmabuf;
-#ifdef AVM_B1DMA_DEBUG
- printk(KERN_DEBUG "tx: put msg len=%d\n", txlen);
-#endif
- } else {
- txlen = skb->len - 2;
-#ifdef AVM_B1DMA_POLLDEBUG
- if (skb->data[2] == SEND_POLLACK)
- printk(KERN_INFO "%s: send ack\n", card->name);
-#endif
-#ifdef AVM_B1DMA_DEBUG
- printk(KERN_DEBUG "tx: put 0x%x len=%d\n",
- skb->data[2], txlen);
-#endif
- skb_copy_from_linear_data_offset(skb, 2, dma->sendbuf.dmabuf,
- skb->len - 2);
- }
- txlen = (txlen + 3) & ~3;
-
- b1dma_writel(card, dma->sendbuf.dmaaddr, AMCC_TXPTR);
- b1dma_writel(card, txlen, AMCC_TXLEN);
-
- card->csr |= EN_TX_TC_INT;
-
- dev_kfree_skb_any(skb);
-}
-
-/* ------------------------------------------------------------- */
-
-static void queue_pollack(avmcard *card)
-{
- struct sk_buff *skb;
- void *p;
-
- skb = alloc_skb(3, GFP_ATOMIC);
- if (!skb) {
- printk(KERN_CRIT "%s: no memory, lost poll ack\n",
- card->name);
- return;
- }
- p = skb->data;
- _put_byte(&p, 0);
- _put_byte(&p, 0);
- _put_byte(&p, SEND_POLLACK);
- skb_put(skb, (u8 *)p - (u8 *)skb->data);
-
- b1dma_queue_tx(card, skb);
-}
-
-/* ------------------------------------------------------------- */
-
-static void b1dma_handle_rx(avmcard *card)
-{
- avmctrl_info *cinfo = &card->ctrlinfo[0];
- avmcard_dmainfo *dma = card->dma;
- struct capi_ctr *ctrl = &cinfo->capi_ctrl;
- struct sk_buff *skb;
- void *p = dma->recvbuf.dmabuf + 4;
- u32 ApplId, MsgLen, DataB3Len, NCCI, WindowSize;
- u8 b1cmd = _get_byte(&p);
-
-#ifdef AVM_B1DMA_DEBUG
- printk(KERN_DEBUG "rx: 0x%x %lu\n", b1cmd, (unsigned long)dma->recvlen);
-#endif
-
- switch (b1cmd) {
- case RECEIVE_DATA_B3_IND:
-
- ApplId = (unsigned) _get_word(&p);
- MsgLen = _get_slice(&p, card->msgbuf);
- DataB3Len = _get_slice(&p, card->databuf);
-
- if (MsgLen < 30) { /* not CAPI 64Bit */
- memset(card->msgbuf + MsgLen, 0, 30 - MsgLen);
- MsgLen = 30;
- CAPIMSG_SETLEN(card->msgbuf, 30);
- }
- if (!(skb = alloc_skb(DataB3Len + MsgLen, GFP_ATOMIC))) {
- printk(KERN_ERR "%s: incoming packet dropped\n",
- card->name);
- } else {
- skb_put_data(skb, card->msgbuf, MsgLen);
- skb_put_data(skb, card->databuf, DataB3Len);
- capi_ctr_handle_message(ctrl, ApplId, skb);
- }
- break;
-
- case RECEIVE_MESSAGE:
-
- ApplId = (unsigned) _get_word(&p);
- MsgLen = _get_slice(&p, card->msgbuf);
- if (!(skb = alloc_skb(MsgLen, GFP_ATOMIC))) {
- printk(KERN_ERR "%s: incoming packet dropped\n",
- card->name);
- } else {
- skb_put_data(skb, card->msgbuf, MsgLen);
- if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_CONF) {
- spin_lock(&card->lock);
- capilib_data_b3_conf(&cinfo->ncci_head, ApplId,
- CAPIMSG_NCCI(skb->data),
- CAPIMSG_MSGID(skb->data));
- spin_unlock(&card->lock);
- }
- capi_ctr_handle_message(ctrl, ApplId, skb);
- }
- break;
-
- case RECEIVE_NEW_NCCI:
-
- ApplId = _get_word(&p);
- NCCI = _get_word(&p);
- WindowSize = _get_word(&p);
- spin_lock(&card->lock);
- capilib_new_ncci(&cinfo->ncci_head, ApplId, NCCI, WindowSize);
- spin_unlock(&card->lock);
- break;
-
- case RECEIVE_FREE_NCCI:
-
- ApplId = _get_word(&p);
- NCCI = _get_word(&p);
-
- if (NCCI != 0xffffffff) {
- spin_lock(&card->lock);
- capilib_free_ncci(&cinfo->ncci_head, ApplId, NCCI);
- spin_unlock(&card->lock);
- }
- break;
-
- case RECEIVE_START:
-#ifdef AVM_B1DMA_POLLDEBUG
- printk(KERN_INFO "%s: receive poll\n", card->name);
-#endif
- if (!suppress_pollack)
- queue_pollack(card);
- capi_ctr_resume_output(ctrl);
- break;
-
- case RECEIVE_STOP:
- capi_ctr_suspend_output(ctrl);
- break;
-
- case RECEIVE_INIT:
-
- cinfo->versionlen = _get_slice(&p, cinfo->versionbuf);
- b1_parse_version(cinfo);
- printk(KERN_INFO "%s: %s-card (%s) now active\n",
- card->name,
- cinfo->version[VER_CARDTYPE],
- cinfo->version[VER_DRIVER]);
- capi_ctr_ready(ctrl);
- break;
-
- case RECEIVE_TASK_READY:
- ApplId = (unsigned) _get_word(&p);
- MsgLen = _get_slice(&p, card->msgbuf);
- card->msgbuf[MsgLen] = 0;
- while (MsgLen > 0
- && (card->msgbuf[MsgLen - 1] == '\n'
- || card->msgbuf[MsgLen - 1] == '\r')) {
- card->msgbuf[MsgLen - 1] = 0;
- MsgLen--;
- }
- printk(KERN_INFO "%s: task %d \"%s\" ready.\n",
- card->name, ApplId, card->msgbuf);
- break;
-
- case RECEIVE_DEBUGMSG:
- MsgLen = _get_slice(&p, card->msgbuf);
- card->msgbuf[MsgLen] = 0;
- while (MsgLen > 0
- && (card->msgbuf[MsgLen - 1] == '\n'
- || card->msgbuf[MsgLen - 1] == '\r')) {
- card->msgbuf[MsgLen - 1] = 0;
- MsgLen--;
- }
- printk(KERN_INFO "%s: DEBUG: %s\n", card->name, card->msgbuf);
- break;
-
- default:
- printk(KERN_ERR "%s: b1dma_interrupt: 0x%x ???\n",
- card->name, b1cmd);
- return;
- }
-}
-
-/* ------------------------------------------------------------- */
-
-static void b1dma_handle_interrupt(avmcard *card)
-{
- u32 status;
- u32 newcsr;
-
- spin_lock(&card->lock);
-
- status = b1dma_readl(card, AMCC_INTCSR);
- if ((status & ANY_S5933_INT) == 0) {
- spin_unlock(&card->lock);
- return;
- }
-
- newcsr = card->csr | (status & ALL_INT);
- if (status & TX_TC_INT) newcsr &= ~EN_TX_TC_INT;
- if (status & RX_TC_INT) newcsr &= ~EN_RX_TC_INT;
- b1dma_writel(card, newcsr, AMCC_INTCSR);
-
- if ((status & RX_TC_INT) != 0) {
- struct avmcard_dmainfo *dma = card->dma;
- u32 rxlen;
- if (card->dma->recvlen == 0) {
- rxlen = b1dma_readl(card, AMCC_RXLEN);
- if (rxlen == 0) {
- dma->recvlen = *((u32 *)dma->recvbuf.dmabuf);
- rxlen = (dma->recvlen + 3) & ~3;
- b1dma_writel(card, dma->recvbuf.dmaaddr + 4, AMCC_RXPTR);
- b1dma_writel(card, rxlen, AMCC_RXLEN);
-#ifdef AVM_B1DMA_DEBUG
- } else {
- printk(KERN_ERR "%s: rx not complete (%d).\n",
- card->name, rxlen);
-#endif
- }
- } else {
- spin_unlock(&card->lock);
- b1dma_handle_rx(card);
- dma->recvlen = 0;
- spin_lock(&card->lock);
- b1dma_writel(card, dma->recvbuf.dmaaddr, AMCC_RXPTR);
- b1dma_writel(card, 4, AMCC_RXLEN);
- }
- }
-
- if ((status & TX_TC_INT) != 0) {
- if (skb_queue_empty(&card->dma->send_queue))
- card->csr &= ~EN_TX_TC_INT;
- else
- b1dma_dispatch_tx(card);
- }
- b1dma_writel(card, card->csr, AMCC_INTCSR);
-
- spin_unlock(&card->lock);
-}
-
-irqreturn_t b1dma_interrupt(int interrupt, void *devptr)
-{
- avmcard *card = devptr;
-
- b1dma_handle_interrupt(card);
- return IRQ_HANDLED;
-}
-
-/* ------------------------------------------------------------- */
-
-static int b1dma_loaded(avmcard *card)
-{
- unsigned long stop;
- unsigned char ans;
- unsigned long tout = 2;
- unsigned int base = card->port;
-
- for (stop = jiffies + tout * HZ; time_before(jiffies, stop);) {
- if (b1_tx_empty(base))
- break;
- }
- if (!b1_tx_empty(base)) {
- printk(KERN_ERR "%s: b1dma_loaded: tx err, corrupted t4 file ?\n",
- card->name);
- return 0;
- }
- b1_put_byte(base, SEND_POLLACK);
- for (stop = jiffies + tout * HZ; time_before(jiffies, stop);) {
- if (b1_rx_full(base)) {
- if ((ans = b1_get_byte(base)) == RECEIVE_POLLDWORD) {
- return 1;
- }
- printk(KERN_ERR "%s: b1dma_loaded: got 0x%x, firmware not running in dword mode\n", card->name, ans);
- return 0;
- }
- }
- printk(KERN_ERR "%s: b1dma_loaded: firmware not running\n", card->name);
- return 0;
-}
-
-/* ------------------------------------------------------------- */
-
-static void b1dma_send_init(avmcard *card)
-{
- struct sk_buff *skb;
- void *p;
-
- skb = alloc_skb(15, GFP_ATOMIC);
- if (!skb) {
- printk(KERN_CRIT "%s: no memory, lost register appl.\n",
- card->name);
- return;
- }
- p = skb->data;
- _put_byte(&p, 0);
- _put_byte(&p, 0);
- _put_byte(&p, SEND_INIT);
- _put_word(&p, CAPI_MAXAPPL);
- _put_word(&p, AVM_NCCI_PER_CHANNEL * 30);
- _put_word(&p, card->cardnr - 1);
- skb_put(skb, (u8 *)p - (u8 *)skb->data);
-
- b1dma_queue_tx(card, skb);
-}
-
-int b1dma_load_firmware(struct capi_ctr *ctrl, capiloaddata *data)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- int retval;
-
- b1dma_reset(card);
-
- if ((retval = b1_load_t4file(card, &data->firmware))) {
- b1dma_reset(card);
- printk(KERN_ERR "%s: failed to load t4file!!\n",
- card->name);
- return retval;
- }
-
- if (data->configuration.len > 0 && data->configuration.data) {
- if ((retval = b1_load_config(card, &data->configuration))) {
- b1dma_reset(card);
- printk(KERN_ERR "%s: failed to load config!!\n",
- card->name);
- return retval;
- }
- }
-
- if (!b1dma_loaded(card)) {
- b1dma_reset(card);
- printk(KERN_ERR "%s: failed to load t4file.\n", card->name);
- return -EIO;
- }
-
- card->csr = AVM_FLAG;
- b1dma_writel(card, card->csr, AMCC_INTCSR);
- b1dma_writel(card, EN_A2P_TRANSFERS | EN_P2A_TRANSFERS | A2P_HI_PRIORITY |
- P2A_HI_PRIORITY | RESET_A2P_FLAGS | RESET_P2A_FLAGS,
- AMCC_MCSR);
- t1outp(card->port, 0x07, 0x30);
- t1outp(card->port, 0x10, 0xF0);
-
- card->dma->recvlen = 0;
- b1dma_writel(card, card->dma->recvbuf.dmaaddr, AMCC_RXPTR);
- b1dma_writel(card, 4, AMCC_RXLEN);
- card->csr |= EN_RX_TC_INT;
- b1dma_writel(card, card->csr, AMCC_INTCSR);
-
- b1dma_send_init(card);
-
- return 0;
-}
-
-void b1dma_reset_ctr(struct capi_ctr *ctrl)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- unsigned long flags;
-
- spin_lock_irqsave(&card->lock, flags);
- b1dma_reset(card);
-
- memset(cinfo->version, 0, sizeof(cinfo->version));
- capilib_release(&cinfo->ncci_head);
- spin_unlock_irqrestore(&card->lock, flags);
- capi_ctr_down(ctrl);
-}
-
-/* ------------------------------------------------------------- */
-
-void b1dma_register_appl(struct capi_ctr *ctrl,
- u16 appl,
- capi_register_params *rp)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- struct sk_buff *skb;
- int want = rp->level3cnt;
- int nconn;
- void *p;
-
- if (want > 0) nconn = want;
- else nconn = ctrl->profile.nbchannel * -want;
- if (nconn == 0) nconn = ctrl->profile.nbchannel;
-
- skb = alloc_skb(23, GFP_ATOMIC);
- if (!skb) {
- printk(KERN_CRIT "%s: no memory, lost register appl.\n",
- card->name);
- return;
- }
- p = skb->data;
- _put_byte(&p, 0);
- _put_byte(&p, 0);
- _put_byte(&p, SEND_REGISTER);
- _put_word(&p, appl);
- _put_word(&p, 1024 * (nconn + 1));
- _put_word(&p, nconn);
- _put_word(&p, rp->datablkcnt);
- _put_word(&p, rp->datablklen);
- skb_put(skb, (u8 *)p - (u8 *)skb->data);
-
- b1dma_queue_tx(card, skb);
-}
-
-/* ------------------------------------------------------------- */
-
-void b1dma_release_appl(struct capi_ctr *ctrl, u16 appl)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- struct sk_buff *skb;
- void *p;
- unsigned long flags;
-
- spin_lock_irqsave(&card->lock, flags);
- capilib_release_appl(&cinfo->ncci_head, appl);
- spin_unlock_irqrestore(&card->lock, flags);
-
- skb = alloc_skb(7, GFP_ATOMIC);
- if (!skb) {
- printk(KERN_CRIT "%s: no memory, lost release appl.\n",
- card->name);
- return;
- }
- p = skb->data;
- _put_byte(&p, 0);
- _put_byte(&p, 0);
- _put_byte(&p, SEND_RELEASE);
- _put_word(&p, appl);
-
- skb_put(skb, (u8 *)p - (u8 *)skb->data);
-
- b1dma_queue_tx(card, skb);
-}
-
-/* ------------------------------------------------------------- */
-
-u16 b1dma_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- u16 retval = CAPI_NOERROR;
-
- if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) {
- unsigned long flags;
- spin_lock_irqsave(&card->lock, flags);
- retval = capilib_data_b3_req(&cinfo->ncci_head,
- CAPIMSG_APPID(skb->data),
- CAPIMSG_NCCI(skb->data),
- CAPIMSG_MSGID(skb->data));
- spin_unlock_irqrestore(&card->lock, flags);
- }
- if (retval == CAPI_NOERROR)
- b1dma_queue_tx(card, skb);
-
- return retval;
-}
-
-/* ------------------------------------------------------------- */
-
-int b1dma_proc_show(struct seq_file *m, void *v)
-{
- struct capi_ctr *ctrl = m->private;
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- u8 flag;
- char *s;
- u32 txoff, txlen, rxoff, rxlen, csr;
- unsigned long flags;
-
- seq_printf(m, "%-16s %s\n", "name", card->name);
- seq_printf(m, "%-16s 0x%x\n", "io", card->port);
- seq_printf(m, "%-16s %d\n", "irq", card->irq);
- seq_printf(m, "%-16s 0x%lx\n", "membase", card->membase);
- switch (card->cardtype) {
- case avm_b1isa: s = "B1 ISA"; break;
- case avm_b1pci: s = "B1 PCI"; break;
- case avm_b1pcmcia: s = "B1 PCMCIA"; break;
- case avm_m1: s = "M1"; break;
- case avm_m2: s = "M2"; break;
- case avm_t1isa: s = "T1 ISA (HEMA)"; break;
- case avm_t1pci: s = "T1 PCI"; break;
- case avm_c4: s = "C4"; break;
- case avm_c2: s = "C2"; break;
- default: s = "???"; break;
- }
- seq_printf(m, "%-16s %s\n", "type", s);
- if ((s = cinfo->version[VER_DRIVER]) != NULL)
- seq_printf(m, "%-16s %s\n", "ver_driver", s);
- if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
- seq_printf(m, "%-16s %s\n", "ver_cardtype", s);
- if ((s = cinfo->version[VER_SERIAL]) != NULL)
- seq_printf(m, "%-16s %s\n", "ver_serial", s);
-
- if (card->cardtype != avm_m1) {
- flag = ((u8 *)(ctrl->profile.manu))[3];
- if (flag)
- seq_printf(m, "%-16s%s%s%s%s%s%s%s\n",
- "protocol",
- (flag & 0x01) ? " DSS1" : "",
- (flag & 0x02) ? " CT1" : "",
- (flag & 0x04) ? " VN3" : "",
- (flag & 0x08) ? " NI1" : "",
- (flag & 0x10) ? " AUSTEL" : "",
- (flag & 0x20) ? " ESS" : "",
- (flag & 0x40) ? " 1TR6" : ""
- );
- }
- if (card->cardtype != avm_m1) {
- flag = ((u8 *)(ctrl->profile.manu))[5];
- if (flag)
- seq_printf(m, "%-16s%s%s%s%s\n",
- "linetype",
- (flag & 0x01) ? " point to point" : "",
- (flag & 0x02) ? " point to multipoint" : "",
- (flag & 0x08) ? " leased line without D-channel" : "",
- (flag & 0x04) ? " leased line with D-channel" : ""
- );
- }
- seq_printf(m, "%-16s %s\n", "cardname", cinfo->cardname);
-
-
- spin_lock_irqsave(&card->lock, flags);
-
- txoff = (dma_addr_t)b1dma_readl(card, AMCC_TXPTR)-card->dma->sendbuf.dmaaddr;
- txlen = b1dma_readl(card, AMCC_TXLEN);
-
- rxoff = (dma_addr_t)b1dma_readl(card, AMCC_RXPTR)-card->dma->recvbuf.dmaaddr;
- rxlen = b1dma_readl(card, AMCC_RXLEN);
-
- csr = b1dma_readl(card, AMCC_INTCSR);
-
- spin_unlock_irqrestore(&card->lock, flags);
-
- seq_printf(m, "%-16s 0x%lx\n", "csr (cached)", (unsigned long)card->csr);
- seq_printf(m, "%-16s 0x%lx\n", "csr", (unsigned long)csr);
- seq_printf(m, "%-16s %lu\n", "txoff", (unsigned long)txoff);
- seq_printf(m, "%-16s %lu\n", "txlen", (unsigned long)txlen);
- seq_printf(m, "%-16s %lu\n", "rxoff", (unsigned long)rxoff);
- seq_printf(m, "%-16s %lu\n", "rxlen", (unsigned long)rxlen);
-
- return 0;
-}
-EXPORT_SYMBOL(b1dma_proc_show);
-
-/* ------------------------------------------------------------- */
-
-EXPORT_SYMBOL(b1dma_reset);
-EXPORT_SYMBOL(t1pci_detect);
-EXPORT_SYMBOL(b1pciv4_detect);
-EXPORT_SYMBOL(b1dma_interrupt);
-
-EXPORT_SYMBOL(b1dma_load_firmware);
-EXPORT_SYMBOL(b1dma_reset_ctr);
-EXPORT_SYMBOL(b1dma_register_appl);
-EXPORT_SYMBOL(b1dma_release_appl);
-EXPORT_SYMBOL(b1dma_send_message);
-
-static int __init b1dma_init(void)
-{
- char *p;
- char rev[32];
-
- if ((p = strchr(revision, ':')) != NULL && p[1]) {
- strlcpy(rev, p + 2, sizeof(rev));
- if ((p = strchr(rev, '$')) != NULL && p > rev)
- *(p - 1) = 0;
- } else
- strcpy(rev, "1.0");
-
- printk(KERN_INFO "b1dma: revision %s\n", rev);
-
- return 0;
-}
-
-static void __exit b1dma_exit(void)
-{
-}
-
-module_init(b1dma_init);
-module_exit(b1dma_exit);
diff --git a/drivers/staging/isdn/avm/b1isa.c b/drivers/staging/isdn/avm/b1isa.c
deleted file mode 100644
index cdfea72e0ef6..000000000000
--- a/drivers/staging/isdn/avm/b1isa.c
+++ /dev/null
@@ -1,243 +0,0 @@
-/* $Id: b1isa.c,v 1.1.2.3 2004/02/10 01:07:12 keil Exp $
- *
- * Module for AVM B1 ISA-card.
- *
- * Copyright 1999 by Carsten Paeth <calle@calle.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/capi.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <asm/io.h>
-#include <linux/isdn/capicmd.h>
-#include <linux/isdn/capiutil.h>
-#include <linux/isdn/capilli.h>
-#include "avmcard.h"
-
-/* ------------------------------------------------------------- */
-
-static char *revision = "$Revision: 1.1.2.3 $";
-
-/* ------------------------------------------------------------- */
-
-MODULE_DESCRIPTION("CAPI4Linux: Driver for AVM B1 ISA card");
-MODULE_AUTHOR("Carsten Paeth");
-MODULE_LICENSE("GPL");
-
-/* ------------------------------------------------------------- */
-
-static void b1isa_remove(struct pci_dev *pdev)
-{
- avmctrl_info *cinfo = pci_get_drvdata(pdev);
- avmcard *card;
-
- if (!cinfo)
- return;
-
- card = cinfo->card;
-
- b1_reset(card->port);
- b1_reset(card->port);
-
- detach_capi_ctr(&cinfo->capi_ctrl);
- free_irq(card->irq, card);
- release_region(card->port, AVMB1_PORTLEN);
- b1_free_card(card);
-}
-
-/* ------------------------------------------------------------- */
-
-static char *b1isa_procinfo(struct capi_ctr *ctrl);
-
-static int b1isa_probe(struct pci_dev *pdev)
-{
- avmctrl_info *cinfo;
- avmcard *card;
- int retval;
-
- card = b1_alloc_card(1);
- if (!card) {
- printk(KERN_WARNING "b1isa: no memory.\n");
- retval = -ENOMEM;
- goto err;
- }
-
- cinfo = card->ctrlinfo;
-
- card->port = pci_resource_start(pdev, 0);
- card->irq = pdev->irq;
- card->cardtype = avm_b1isa;
- sprintf(card->name, "b1isa-%x", card->port);
-
- if (card->port != 0x150 && card->port != 0x250
- && card->port != 0x300 && card->port != 0x340) {
- printk(KERN_WARNING "b1isa: invalid port 0x%x.\n", card->port);
- retval = -EINVAL;
- goto err_free;
- }
- if (b1_irq_table[card->irq & 0xf] == 0) {
- printk(KERN_WARNING "b1isa: irq %d not valid.\n", card->irq);
- retval = -EINVAL;
- goto err_free;
- }
- if (!request_region(card->port, AVMB1_PORTLEN, card->name)) {
- printk(KERN_WARNING "b1isa: ports 0x%03x-0x%03x in use.\n",
- card->port, card->port + AVMB1_PORTLEN);
- retval = -EBUSY;
- goto err_free;
- }
- retval = request_irq(card->irq, b1_interrupt, 0, card->name, card);
- if (retval) {
- printk(KERN_ERR "b1isa: unable to get IRQ %d.\n", card->irq);
- goto err_release_region;
- }
- b1_reset(card->port);
- if ((retval = b1_detect(card->port, card->cardtype)) != 0) {
- printk(KERN_NOTICE "b1isa: NO card at 0x%x (%d)\n",
- card->port, retval);
- retval = -ENODEV;
- goto err_free_irq;
- }
- b1_reset(card->port);
- b1_getrevision(card);
-
- cinfo->capi_ctrl.owner = THIS_MODULE;
- cinfo->capi_ctrl.driver_name = "b1isa";
- cinfo->capi_ctrl.driverdata = cinfo;
- cinfo->capi_ctrl.register_appl = b1_register_appl;
- cinfo->capi_ctrl.release_appl = b1_release_appl;
- cinfo->capi_ctrl.send_message = b1_send_message;
- cinfo->capi_ctrl.load_firmware = b1_load_firmware;
- cinfo->capi_ctrl.reset_ctr = b1_reset_ctr;
- cinfo->capi_ctrl.procinfo = b1isa_procinfo;
- cinfo->capi_ctrl.proc_show = b1_proc_show;
- strcpy(cinfo->capi_ctrl.name, card->name);
-
- retval = attach_capi_ctr(&cinfo->capi_ctrl);
- if (retval) {
- printk(KERN_ERR "b1isa: attach controller failed.\n");
- goto err_free_irq;
- }
-
- printk(KERN_INFO "b1isa: AVM B1 ISA at i/o %#x, irq %d, revision %d\n",
- card->port, card->irq, card->revision);
-
- pci_set_drvdata(pdev, cinfo);
- return 0;
-
-err_free_irq:
- free_irq(card->irq, card);
-err_release_region:
- release_region(card->port, AVMB1_PORTLEN);
-err_free:
- b1_free_card(card);
-err:
- return retval;
-}
-
-static char *b1isa_procinfo(struct capi_ctr *ctrl)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-
- if (!cinfo)
- return "";
- sprintf(cinfo->infobuf, "%s %s 0x%x %d r%d",
- cinfo->cardname[0] ? cinfo->cardname : "-",
- cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
- cinfo->card ? cinfo->card->port : 0x0,
- cinfo->card ? cinfo->card->irq : 0,
- cinfo->card ? cinfo->card->revision : 0
- );
- return cinfo->infobuf;
-}
-
-/* ------------------------------------------------------------- */
-
-#define MAX_CARDS 4
-static struct pci_dev isa_dev[MAX_CARDS];
-static int io[MAX_CARDS];
-static int irq[MAX_CARDS];
-
-module_param_hw_array(io, int, ioport, NULL, 0);
-module_param_hw_array(irq, int, irq, NULL, 0);
-MODULE_PARM_DESC(io, "I/O base address(es)");
-MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)");
-
-static int b1isa_add_card(struct capi_driver *driver, capicardparams *data)
-{
- int i;
-
- for (i = 0; i < MAX_CARDS; i++) {
- if (isa_dev[i].resource[0].start)
- continue;
-
- isa_dev[i].resource[0].start = data->port;
- isa_dev[i].irq = data->irq;
-
- if (b1isa_probe(&isa_dev[i]) == 0)
- return 0;
- }
- return -ENODEV;
-}
-
-static struct capi_driver capi_driver_b1isa = {
- .name = "b1isa",
- .revision = "1.0",
- .add_card = b1isa_add_card,
-};
-
-static int __init b1isa_init(void)
-{
- char *p;
- char rev[32];
- int i;
-
- if ((p = strchr(revision, ':')) != NULL && p[1]) {
- strlcpy(rev, p + 2, 32);
- if ((p = strchr(rev, '$')) != NULL && p > rev)
- *(p - 1) = 0;
- } else
- strcpy(rev, "1.0");
-
- for (i = 0; i < MAX_CARDS; i++) {
- if (!io[i])
- break;
-
- isa_dev[i].resource[0].start = io[i];
- isa_dev[i].irq = irq[i];
-
- if (b1isa_probe(&isa_dev[i]) != 0)
- return -ENODEV;
- }
-
- strlcpy(capi_driver_b1isa.revision, rev, 32);
- register_capi_driver(&capi_driver_b1isa);
- printk(KERN_INFO "b1isa: revision %s\n", rev);
-
- return 0;
-}
-
-static void __exit b1isa_exit(void)
-{
- int i;
-
- for (i = 0; i < MAX_CARDS; i++) {
- if (isa_dev[i].resource[0].start)
- b1isa_remove(&isa_dev[i]);
- }
- unregister_capi_driver(&capi_driver_b1isa);
-}
-
-module_init(b1isa_init);
-module_exit(b1isa_exit);
diff --git a/drivers/staging/isdn/avm/b1pci.c b/drivers/staging/isdn/avm/b1pci.c
deleted file mode 100644
index b76b57a82c02..000000000000
--- a/drivers/staging/isdn/avm/b1pci.c
+++ /dev/null
@@ -1,416 +0,0 @@
-/* $Id: b1pci.c,v 1.1.2.2 2004/01/16 21:09:27 keil Exp $
- *
- * Module for AVM B1 PCI-card.
- *
- * Copyright 1999 by Carsten Paeth <calle@calle.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <linux/capi.h>
-#include <asm/io.h>
-#include <linux/init.h>
-#include <linux/isdn/capicmd.h>
-#include <linux/isdn/capiutil.h>
-#include <linux/isdn/capilli.h>
-#include "avmcard.h"
-
-/* ------------------------------------------------------------- */
-
-static char *revision = "$Revision: 1.1.2.2 $";
-
-/* ------------------------------------------------------------- */
-
-static struct pci_device_id b1pci_pci_tbl[] = {
- { PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_B1, PCI_ANY_ID, PCI_ANY_ID },
- { } /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE(pci, b1pci_pci_tbl);
-MODULE_DESCRIPTION("CAPI4Linux: Driver for AVM B1 PCI card");
-MODULE_AUTHOR("Carsten Paeth");
-MODULE_LICENSE("GPL");
-
-/* ------------------------------------------------------------- */
-
-static char *b1pci_procinfo(struct capi_ctr *ctrl)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-
- if (!cinfo)
- return "";
- sprintf(cinfo->infobuf, "%s %s 0x%x %d r%d",
- cinfo->cardname[0] ? cinfo->cardname : "-",
- cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
- cinfo->card ? cinfo->card->port : 0x0,
- cinfo->card ? cinfo->card->irq : 0,
- cinfo->card ? cinfo->card->revision : 0
- );
- return cinfo->infobuf;
-}
-
-/* ------------------------------------------------------------- */
-
-static int b1pci_probe(struct capicardparams *p, struct pci_dev *pdev)
-{
- avmcard *card;
- avmctrl_info *cinfo;
- int retval;
-
- card = b1_alloc_card(1);
- if (!card) {
- printk(KERN_WARNING "b1pci: no memory.\n");
- retval = -ENOMEM;
- goto err;
- }
-
- cinfo = card->ctrlinfo;
- sprintf(card->name, "b1pci-%x", p->port);
- card->port = p->port;
- card->irq = p->irq;
- card->cardtype = avm_b1pci;
-
- if (!request_region(card->port, AVMB1_PORTLEN, card->name)) {
- printk(KERN_WARNING "b1pci: ports 0x%03x-0x%03x in use.\n",
- card->port, card->port + AVMB1_PORTLEN);
- retval = -EBUSY;
- goto err_free;
- }
- b1_reset(card->port);
- retval = b1_detect(card->port, card->cardtype);
- if (retval) {
- printk(KERN_NOTICE "b1pci: NO card at 0x%x (%d)\n",
- card->port, retval);
- retval = -ENODEV;
- goto err_release_region;
- }
- b1_reset(card->port);
- b1_getrevision(card);
-
- retval = request_irq(card->irq, b1_interrupt, IRQF_SHARED, card->name, card);
- if (retval) {
- printk(KERN_ERR "b1pci: unable to get IRQ %d.\n", card->irq);
- retval = -EBUSY;
- goto err_release_region;
- }
-
- cinfo->capi_ctrl.driver_name = "b1pci";
- cinfo->capi_ctrl.driverdata = cinfo;
- cinfo->capi_ctrl.register_appl = b1_register_appl;
- cinfo->capi_ctrl.release_appl = b1_release_appl;
- cinfo->capi_ctrl.send_message = b1_send_message;
- cinfo->capi_ctrl.load_firmware = b1_load_firmware;
- cinfo->capi_ctrl.reset_ctr = b1_reset_ctr;
- cinfo->capi_ctrl.procinfo = b1pci_procinfo;
- cinfo->capi_ctrl.proc_show = b1_proc_show;
- strcpy(cinfo->capi_ctrl.name, card->name);
- cinfo->capi_ctrl.owner = THIS_MODULE;
-
- retval = attach_capi_ctr(&cinfo->capi_ctrl);
- if (retval) {
- printk(KERN_ERR "b1pci: attach controller failed.\n");
- goto err_free_irq;
- }
-
- if (card->revision >= 4) {
- printk(KERN_INFO "b1pci: AVM B1 PCI V4 at i/o %#x, irq %d, revision %d (no dma)\n",
- card->port, card->irq, card->revision);
- } else {
- printk(KERN_INFO "b1pci: AVM B1 PCI at i/o %#x, irq %d, revision %d\n",
- card->port, card->irq, card->revision);
- }
-
- pci_set_drvdata(pdev, card);
- return 0;
-
-err_free_irq:
- free_irq(card->irq, card);
-err_release_region:
- release_region(card->port, AVMB1_PORTLEN);
-err_free:
- b1_free_card(card);
-err:
- return retval;
-}
-
-static void b1pci_remove(struct pci_dev *pdev)
-{
- avmcard *card = pci_get_drvdata(pdev);
- avmctrl_info *cinfo = card->ctrlinfo;
- unsigned int port = card->port;
-
- b1_reset(port);
- b1_reset(port);
-
- detach_capi_ctr(&cinfo->capi_ctrl);
- free_irq(card->irq, card);
- release_region(card->port, AVMB1_PORTLEN);
- b1_free_card(card);
-}
-
-#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
-/* ------------------------------------------------------------- */
-
-static char *b1pciv4_procinfo(struct capi_ctr *ctrl)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-
- if (!cinfo)
- return "";
- sprintf(cinfo->infobuf, "%s %s 0x%x %d 0x%lx r%d",
- cinfo->cardname[0] ? cinfo->cardname : "-",
- cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
- cinfo->card ? cinfo->card->port : 0x0,
- cinfo->card ? cinfo->card->irq : 0,
- cinfo->card ? cinfo->card->membase : 0,
- cinfo->card ? cinfo->card->revision : 0
- );
- return cinfo->infobuf;
-}
-
-/* ------------------------------------------------------------- */
-
-static int b1pciv4_probe(struct capicardparams *p, struct pci_dev *pdev)
-{
- avmcard *card;
- avmctrl_info *cinfo;
- int retval;
-
- card = b1_alloc_card(1);
- if (!card) {
- printk(KERN_WARNING "b1pci: no memory.\n");
- retval = -ENOMEM;
- goto err;
- }
-
- card->dma = avmcard_dma_alloc("b1pci", pdev, 2048 + 128, 2048 + 128);
- if (!card->dma) {
- printk(KERN_WARNING "b1pci: dma alloc.\n");
- retval = -ENOMEM;
- goto err_free;
- }
-
- cinfo = card->ctrlinfo;
- sprintf(card->name, "b1pciv4-%x", p->port);
- card->port = p->port;
- card->irq = p->irq;
- card->membase = p->membase;
- card->cardtype = avm_b1pci;
-
- if (!request_region(card->port, AVMB1_PORTLEN, card->name)) {
- printk(KERN_WARNING "b1pci: ports 0x%03x-0x%03x in use.\n",
- card->port, card->port + AVMB1_PORTLEN);
- retval = -EBUSY;
- goto err_free_dma;
- }
-
- card->mbase = ioremap(card->membase, 64);
- if (!card->mbase) {
- printk(KERN_NOTICE "b1pci: can't remap memory at 0x%lx\n",
- card->membase);
- retval = -ENOMEM;
- goto err_release_region;
- }
-
- b1dma_reset(card);
-
- retval = b1pciv4_detect(card);
- if (retval) {
- printk(KERN_NOTICE "b1pci: NO card at 0x%x (%d)\n",
- card->port, retval);
- retval = -ENODEV;
- goto err_unmap;
- }
- b1dma_reset(card);
- b1_getrevision(card);
-
- retval = request_irq(card->irq, b1dma_interrupt, IRQF_SHARED, card->name, card);
- if (retval) {
- printk(KERN_ERR "b1pci: unable to get IRQ %d.\n",
- card->irq);
- retval = -EBUSY;
- goto err_unmap;
- }
-
- cinfo->capi_ctrl.owner = THIS_MODULE;
- cinfo->capi_ctrl.driver_name = "b1pciv4";
- cinfo->capi_ctrl.driverdata = cinfo;
- cinfo->capi_ctrl.register_appl = b1dma_register_appl;
- cinfo->capi_ctrl.release_appl = b1dma_release_appl;
- cinfo->capi_ctrl.send_message = b1dma_send_message;
- cinfo->capi_ctrl.load_firmware = b1dma_load_firmware;
- cinfo->capi_ctrl.reset_ctr = b1dma_reset_ctr;
- cinfo->capi_ctrl.procinfo = b1pciv4_procinfo;
- cinfo->capi_ctrl.proc_show = b1dma_proc_show;
- strcpy(cinfo->capi_ctrl.name, card->name);
-
- retval = attach_capi_ctr(&cinfo->capi_ctrl);
- if (retval) {
- printk(KERN_ERR "b1pci: attach controller failed.\n");
- goto err_free_irq;
- }
- card->cardnr = cinfo->capi_ctrl.cnr;
-
- printk(KERN_INFO "b1pci: AVM B1 PCI V4 at i/o %#x, irq %d, mem %#lx, revision %d (dma)\n",
- card->port, card->irq, card->membase, card->revision);
-
- pci_set_drvdata(pdev, card);
- return 0;
-
-err_free_irq:
- free_irq(card->irq, card);
-err_unmap:
- iounmap(card->mbase);
-err_release_region:
- release_region(card->port, AVMB1_PORTLEN);
-err_free_dma:
- avmcard_dma_free(card->dma);
-err_free:
- b1_free_card(card);
-err:
- return retval;
-
-}
-
-static void b1pciv4_remove(struct pci_dev *pdev)
-{
- avmcard *card = pci_get_drvdata(pdev);
- avmctrl_info *cinfo = card->ctrlinfo;
-
- b1dma_reset(card);
-
- detach_capi_ctr(&cinfo->capi_ctrl);
- free_irq(card->irq, card);
- iounmap(card->mbase);
- release_region(card->port, AVMB1_PORTLEN);
- avmcard_dma_free(card->dma);
- b1_free_card(card);
-}
-
-#endif /* CONFIG_ISDN_DRV_AVMB1_B1PCIV4 */
-
-static int b1pci_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct capicardparams param;
- int retval;
-
- if (pci_enable_device(pdev) < 0) {
- printk(KERN_ERR "b1pci: failed to enable AVM-B1\n");
- return -ENODEV;
- }
- param.irq = pdev->irq;
-
- if (pci_resource_start(pdev, 2)) { /* B1 PCI V4 */
-#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
- pci_set_master(pdev);
-#endif
- param.membase = pci_resource_start(pdev, 0);
- param.port = pci_resource_start(pdev, 2);
-
- printk(KERN_INFO "b1pci: PCI BIOS reports AVM-B1 V4 at i/o %#x, irq %d, mem %#x\n",
- param.port, param.irq, param.membase);
-#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
- retval = b1pciv4_probe(&param, pdev);
-#else
- retval = b1pci_probe(&param, pdev);
-#endif
- if (retval != 0) {
- printk(KERN_ERR "b1pci: no AVM-B1 V4 at i/o %#x, irq %d, mem %#x detected\n",
- param.port, param.irq, param.membase);
- }
- } else {
- param.membase = 0;
- param.port = pci_resource_start(pdev, 1);
-
- printk(KERN_INFO "b1pci: PCI BIOS reports AVM-B1 at i/o %#x, irq %d\n",
- param.port, param.irq);
- retval = b1pci_probe(&param, pdev);
- if (retval != 0) {
- printk(KERN_ERR "b1pci: no AVM-B1 at i/o %#x, irq %d detected\n",
- param.port, param.irq);
- }
- }
- return retval;
-}
-
-static void b1pci_pci_remove(struct pci_dev *pdev)
-{
-#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
- avmcard *card = pci_get_drvdata(pdev);
-
- if (card->dma)
- b1pciv4_remove(pdev);
- else
- b1pci_remove(pdev);
-#else
- b1pci_remove(pdev);
-#endif
-}
-
-static struct pci_driver b1pci_pci_driver = {
- .name = "b1pci",
- .id_table = b1pci_pci_tbl,
- .probe = b1pci_pci_probe,
- .remove = b1pci_pci_remove,
-};
-
-static struct capi_driver capi_driver_b1pci = {
- .name = "b1pci",
- .revision = "1.0",
-};
-#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
-static struct capi_driver capi_driver_b1pciv4 = {
- .name = "b1pciv4",
- .revision = "1.0",
-};
-#endif
-
-static int __init b1pci_init(void)
-{
- char *p;
- char rev[32];
- int err;
-
- if ((p = strchr(revision, ':')) != NULL && p[1]) {
- strlcpy(rev, p + 2, 32);
- if ((p = strchr(rev, '$')) != NULL && p > rev)
- *(p - 1) = 0;
- } else
- strcpy(rev, "1.0");
-
-
- err = pci_register_driver(&b1pci_pci_driver);
- if (!err) {
- strlcpy(capi_driver_b1pci.revision, rev, 32);
- register_capi_driver(&capi_driver_b1pci);
-#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
- strlcpy(capi_driver_b1pciv4.revision, rev, 32);
- register_capi_driver(&capi_driver_b1pciv4);
-#endif
- printk(KERN_INFO "b1pci: revision %s\n", rev);
- }
- return err;
-}
-
-static void __exit b1pci_exit(void)
-{
- unregister_capi_driver(&capi_driver_b1pci);
-#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
- unregister_capi_driver(&capi_driver_b1pciv4);
-#endif
- pci_unregister_driver(&b1pci_pci_driver);
-}
-
-module_init(b1pci_init);
-module_exit(b1pci_exit);
diff --git a/drivers/staging/isdn/avm/b1pcmcia.c b/drivers/staging/isdn/avm/b1pcmcia.c
deleted file mode 100644
index 3aca16e62902..000000000000
--- a/drivers/staging/isdn/avm/b1pcmcia.c
+++ /dev/null
@@ -1,224 +0,0 @@
-/* $Id: b1pcmcia.c,v 1.1.2.2 2004/01/16 21:09:27 keil Exp $
- *
- * Module for AVM B1/M1/M2 PCMCIA-card.
- *
- * Copyright 1999 by Carsten Paeth <calle@calle.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <asm/io.h>
-#include <linux/capi.h>
-#include <linux/b1pcmcia.h>
-#include <linux/isdn/capicmd.h>
-#include <linux/isdn/capiutil.h>
-#include <linux/isdn/capilli.h>
-#include "avmcard.h"
-
-/* ------------------------------------------------------------- */
-
-static char *revision = "$Revision: 1.1.2.2 $";
-
-/* ------------------------------------------------------------- */
-
-MODULE_DESCRIPTION("CAPI4Linux: Driver for AVM PCMCIA cards");
-MODULE_AUTHOR("Carsten Paeth");
-MODULE_LICENSE("GPL");
-
-/* ------------------------------------------------------------- */
-
-static void b1pcmcia_remove_ctr(struct capi_ctr *ctrl)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- unsigned int port = card->port;
-
- b1_reset(port);
- b1_reset(port);
-
- detach_capi_ctr(ctrl);
- free_irq(card->irq, card);
- b1_free_card(card);
-}
-
-/* ------------------------------------------------------------- */
-
-static LIST_HEAD(cards);
-
-static char *b1pcmcia_procinfo(struct capi_ctr *ctrl);
-
-static int b1pcmcia_add_card(unsigned int port, unsigned irq,
- enum avmcardtype cardtype)
-{
- avmctrl_info *cinfo;
- avmcard *card;
- char *cardname;
- int retval;
-
- card = b1_alloc_card(1);
- if (!card) {
- printk(KERN_WARNING "b1pcmcia: no memory.\n");
- retval = -ENOMEM;
- goto err;
- }
- cinfo = card->ctrlinfo;
-
- switch (cardtype) {
- case avm_m1: sprintf(card->name, "m1-%x", port); break;
- case avm_m2: sprintf(card->name, "m2-%x", port); break;
- default: sprintf(card->name, "b1pcmcia-%x", port); break;
- }
- card->port = port;
- card->irq = irq;
- card->cardtype = cardtype;
-
- retval = request_irq(card->irq, b1_interrupt, IRQF_SHARED, card->name, card);
- if (retval) {
- printk(KERN_ERR "b1pcmcia: unable to get IRQ %d.\n",
- card->irq);
- retval = -EBUSY;
- goto err_free;
- }
- b1_reset(card->port);
- if ((retval = b1_detect(card->port, card->cardtype)) != 0) {
- printk(KERN_NOTICE "b1pcmcia: NO card at 0x%x (%d)\n",
- card->port, retval);
- retval = -ENODEV;
- goto err_free_irq;
- }
- b1_reset(card->port);
- b1_getrevision(card);
-
- cinfo->capi_ctrl.owner = THIS_MODULE;
- cinfo->capi_ctrl.driver_name = "b1pcmcia";
- cinfo->capi_ctrl.driverdata = cinfo;
- cinfo->capi_ctrl.register_appl = b1_register_appl;
- cinfo->capi_ctrl.release_appl = b1_release_appl;
- cinfo->capi_ctrl.send_message = b1_send_message;
- cinfo->capi_ctrl.load_firmware = b1_load_firmware;
- cinfo->capi_ctrl.reset_ctr = b1_reset_ctr;
- cinfo->capi_ctrl.procinfo = b1pcmcia_procinfo;
- cinfo->capi_ctrl.proc_show = b1_proc_show;
- strcpy(cinfo->capi_ctrl.name, card->name);
-
- retval = attach_capi_ctr(&cinfo->capi_ctrl);
- if (retval) {
- printk(KERN_ERR "b1pcmcia: attach controller failed.\n");
- goto err_free_irq;
- }
- switch (cardtype) {
- case avm_m1: cardname = "M1"; break;
- case avm_m2: cardname = "M2"; break;
- default: cardname = "B1 PCMCIA"; break;
- }
-
- printk(KERN_INFO "b1pcmcia: AVM %s at i/o %#x, irq %d, revision %d\n",
- cardname, card->port, card->irq, card->revision);
-
- list_add(&card->list, &cards);
- return cinfo->capi_ctrl.cnr;
-
-err_free_irq:
- free_irq(card->irq, card);
-err_free:
- b1_free_card(card);
-err:
- return retval;
-}
-
-/* ------------------------------------------------------------- */
-
-static char *b1pcmcia_procinfo(struct capi_ctr *ctrl)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-
- if (!cinfo)
- return "";
- sprintf(cinfo->infobuf, "%s %s 0x%x %d r%d",
- cinfo->cardname[0] ? cinfo->cardname : "-",
- cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
- cinfo->card ? cinfo->card->port : 0x0,
- cinfo->card ? cinfo->card->irq : 0,
- cinfo->card ? cinfo->card->revision : 0
- );
- return cinfo->infobuf;
-}
-
-/* ------------------------------------------------------------- */
-
-int b1pcmcia_addcard_b1(unsigned int port, unsigned irq)
-{
- return b1pcmcia_add_card(port, irq, avm_b1pcmcia);
-}
-
-int b1pcmcia_addcard_m1(unsigned int port, unsigned irq)
-{
- return b1pcmcia_add_card(port, irq, avm_m1);
-}
-
-int b1pcmcia_addcard_m2(unsigned int port, unsigned irq)
-{
- return b1pcmcia_add_card(port, irq, avm_m2);
-}
-
-int b1pcmcia_delcard(unsigned int port, unsigned irq)
-{
- struct list_head *l;
- avmcard *card;
-
- list_for_each(l, &cards) {
- card = list_entry(l, avmcard, list);
- if (card->port == port && card->irq == irq) {
- b1pcmcia_remove_ctr(&card->ctrlinfo[0].capi_ctrl);
- return 0;
- }
- }
- return -ESRCH;
-}
-
-EXPORT_SYMBOL(b1pcmcia_addcard_b1);
-EXPORT_SYMBOL(b1pcmcia_addcard_m1);
-EXPORT_SYMBOL(b1pcmcia_addcard_m2);
-EXPORT_SYMBOL(b1pcmcia_delcard);
-
-static struct capi_driver capi_driver_b1pcmcia = {
- .name = "b1pcmcia",
- .revision = "1.0",
-};
-
-static int __init b1pcmcia_init(void)
-{
- char *p;
- char rev[32];
-
- if ((p = strchr(revision, ':')) != NULL && p[1]) {
- strlcpy(rev, p + 2, 32);
- if ((p = strchr(rev, '$')) != NULL && p > rev)
- *(p - 1) = 0;
- } else
- strcpy(rev, "1.0");
-
- strlcpy(capi_driver_b1pcmcia.revision, rev, 32);
- register_capi_driver(&capi_driver_b1pcmcia);
- printk(KERN_INFO "b1pci: revision %s\n", rev);
-
- return 0;
-}
-
-static void __exit b1pcmcia_exit(void)
-{
- unregister_capi_driver(&capi_driver_b1pcmcia);
-}
-
-module_init(b1pcmcia_init);
-module_exit(b1pcmcia_exit);
diff --git a/drivers/staging/isdn/avm/c4.c b/drivers/staging/isdn/avm/c4.c
deleted file mode 100644
index ac72cd204c4d..000000000000
--- a/drivers/staging/isdn/avm/c4.c
+++ /dev/null
@@ -1,1317 +0,0 @@
-/* $Id: c4.c,v 1.1.2.2 2004/01/16 21:09:27 keil Exp $
- *
- * Module for AVM C4 & C2 card.
- *
- * Copyright 1999 by Carsten Paeth <calle@calle.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <linux/capi.h>
-#include <linux/kernelcapi.h>
-#include <linux/init.h>
-#include <linux/gfp.h>
-#include <asm/io.h>
-#include <linux/uaccess.h>
-#include <linux/netdevice.h>
-#include <linux/isdn/capicmd.h>
-#include <linux/isdn/capiutil.h>
-#include <linux/isdn/capilli.h>
-#include "avmcard.h"
-
-#undef AVM_C4_DEBUG
-#undef AVM_C4_POLLDEBUG
-
-/* ------------------------------------------------------------- */
-
-static char *revision = "$Revision: 1.1.2.2 $";
-
-/* ------------------------------------------------------------- */
-
-static bool suppress_pollack;
-
-static const struct pci_device_id c4_pci_tbl[] = {
- { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C4, 0, 0, (unsigned long)4 },
- { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C2, 0, 0, (unsigned long)2 },
- { } /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE(pci, c4_pci_tbl);
-MODULE_DESCRIPTION("CAPI4Linux: Driver for AVM C2/C4 cards");
-MODULE_AUTHOR("Carsten Paeth");
-MODULE_LICENSE("GPL");
-module_param(suppress_pollack, bool, 0);
-
-/* ------------------------------------------------------------- */
-
-static void c4_dispatch_tx(avmcard *card);
-
-/* ------------------------------------------------------------- */
-
-#define DC21285_DRAM_A0MR 0x40000000
-#define DC21285_DRAM_A1MR 0x40004000
-#define DC21285_DRAM_A2MR 0x40008000
-#define DC21285_DRAM_A3MR 0x4000C000
-
-#define CAS_OFFSET 0x88
-
-#define DC21285_ARMCSR_BASE 0x42000000
-
-#define PCI_OUT_INT_STATUS 0x30
-#define PCI_OUT_INT_MASK 0x34
-#define MAILBOX_0 0x50
-#define MAILBOX_1 0x54
-#define MAILBOX_2 0x58
-#define MAILBOX_3 0x5C
-#define DOORBELL 0x60
-#define DOORBELL_SETUP 0x64
-
-#define CHAN_1_CONTROL 0x90
-#define CHAN_2_CONTROL 0xB0
-#define DRAM_TIMING 0x10C
-#define DRAM_ADDR_SIZE_0 0x110
-#define DRAM_ADDR_SIZE_1 0x114
-#define DRAM_ADDR_SIZE_2 0x118
-#define DRAM_ADDR_SIZE_3 0x11C
-#define SA_CONTROL 0x13C
-#define XBUS_CYCLE 0x148
-#define XBUS_STROBE 0x14C
-#define DBELL_PCI_MASK 0x150
-#define DBELL_SA_MASK 0x154
-
-#define SDRAM_SIZE 0x1000000
-
-/* ------------------------------------------------------------- */
-
-#define MBOX_PEEK_POKE MAILBOX_0
-
-#define DBELL_ADDR 0x01
-#define DBELL_DATA 0x02
-#define DBELL_RNWR 0x40
-#define DBELL_INIT 0x80
-
-/* ------------------------------------------------------------- */
-
-#define MBOX_UP_ADDR MAILBOX_0
-#define MBOX_UP_LEN MAILBOX_1
-#define MBOX_DOWN_ADDR MAILBOX_2
-#define MBOX_DOWN_LEN MAILBOX_3
-
-#define DBELL_UP_HOST 0x00000100
-#define DBELL_UP_ARM 0x00000200
-#define DBELL_DOWN_HOST 0x00000400
-#define DBELL_DOWN_ARM 0x00000800
-#define DBELL_RESET_HOST 0x40000000
-#define DBELL_RESET_ARM 0x80000000
-
-/* ------------------------------------------------------------- */
-
-#define DRAM_TIMING_DEF 0x001A01A5
-#define DRAM_AD_SZ_DEF0 0x00000045
-#define DRAM_AD_SZ_NULL 0x00000000
-
-#define SA_CTL_ALLRIGHT 0x64AA0271
-
-#define INIT_XBUS_CYCLE 0x100016DB
-#define INIT_XBUS_STROBE 0xF1F1F1F1
-
-/* ------------------------------------------------------------- */
-
-#define RESET_TIMEOUT (15 * HZ) /* 15 sec */
-#define PEEK_POKE_TIMEOUT (HZ / 10) /* 0.1 sec */
-
-/* ------------------------------------------------------------- */
-
-#define c4outmeml(addr, value) writel(value, addr)
-#define c4inmeml(addr) readl(addr)
-#define c4outmemw(addr, value) writew(value, addr)
-#define c4inmemw(addr) readw(addr)
-#define c4outmemb(addr, value) writeb(value, addr)
-#define c4inmemb(addr) readb(addr)
-
-/* ------------------------------------------------------------- */
-
-static inline int wait_for_doorbell(avmcard *card, unsigned long t)
-{
- unsigned long stop;
-
- stop = jiffies + t;
- while (c4inmeml(card->mbase + DOORBELL) != 0xffffffff) {
- if (!time_before(jiffies, stop))
- return -1;
- mb();
- }
- return 0;
-}
-
-static int c4_poke(avmcard *card, unsigned long off, unsigned long value)
-{
-
- if (wait_for_doorbell(card, HZ / 10) < 0)
- return -1;
-
- c4outmeml(card->mbase + MBOX_PEEK_POKE, off);
- c4outmeml(card->mbase + DOORBELL, DBELL_ADDR);
-
- if (wait_for_doorbell(card, HZ / 10) < 0)
- return -1;
-
- c4outmeml(card->mbase + MBOX_PEEK_POKE, value);
- c4outmeml(card->mbase + DOORBELL, DBELL_DATA | DBELL_ADDR);
-
- return 0;
-}
-
-static int c4_peek(avmcard *card, unsigned long off, unsigned long *valuep)
-{
- if (wait_for_doorbell(card, HZ / 10) < 0)
- return -1;
-
- c4outmeml(card->mbase + MBOX_PEEK_POKE, off);
- c4outmeml(card->mbase + DOORBELL, DBELL_RNWR | DBELL_ADDR);
-
- if (wait_for_doorbell(card, HZ / 10) < 0)
- return -1;
-
- *valuep = c4inmeml(card->mbase + MBOX_PEEK_POKE);
-
- return 0;
-}
-
-/* ------------------------------------------------------------- */
-
-static int c4_load_t4file(avmcard *card, capiloaddatapart *t4file)
-{
- u32 val;
- unsigned char *dp;
- u_int left;
- u32 loadoff = 0;
-
- dp = t4file->data;
- left = t4file->len;
- while (left >= sizeof(u32)) {
- if (t4file->user) {
- if (copy_from_user(&val, dp, sizeof(val)))
- return -EFAULT;
- } else {
- memcpy(&val, dp, sizeof(val));
- }
- if (c4_poke(card, loadoff, val)) {
- printk(KERN_ERR "%s: corrupted firmware file ?\n",
- card->name);
- return -EIO;
- }
- left -= sizeof(u32);
- dp += sizeof(u32);
- loadoff += sizeof(u32);
- }
- if (left) {
- val = 0;
- if (t4file->user) {
- if (copy_from_user(&val, dp, left))
- return -EFAULT;
- } else {
- memcpy(&val, dp, left);
- }
- if (c4_poke(card, loadoff, val)) {
- printk(KERN_ERR "%s: corrupted firmware file ?\n",
- card->name);
- return -EIO;
- }
- }
- return 0;
-}
-
-/* ------------------------------------------------------------- */
-
-static inline void _put_byte(void **pp, u8 val)
-{
- u8 *s = *pp;
- *s++ = val;
- *pp = s;
-}
-
-static inline void _put_word(void **pp, u32 val)
-{
- u8 *s = *pp;
- *s++ = val & 0xff;
- *s++ = (val >> 8) & 0xff;
- *s++ = (val >> 16) & 0xff;
- *s++ = (val >> 24) & 0xff;
- *pp = s;
-}
-
-static inline void _put_slice(void **pp, unsigned char *dp, unsigned int len)
-{
- unsigned i = len;
- _put_word(pp, i);
- while (i-- > 0)
- _put_byte(pp, *dp++);
-}
-
-static inline u8 _get_byte(void **pp)
-{
- u8 *s = *pp;
- u8 val;
- val = *s++;
- *pp = s;
- return val;
-}
-
-static inline u32 _get_word(void **pp)
-{
- u8 *s = *pp;
- u32 val;
- val = *s++;
- val |= (*s++ << 8);
- val |= (*s++ << 16);
- val |= (*s++ << 24);
- *pp = s;
- return val;
-}
-
-static inline u32 _get_slice(void **pp, unsigned char *dp)
-{
- unsigned int len, i;
-
- len = i = _get_word(pp);
- while (i-- > 0) *dp++ = _get_byte(pp);
- return len;
-}
-
-/* ------------------------------------------------------------- */
-
-static void c4_reset(avmcard *card)
-{
- unsigned long stop;
-
- c4outmeml(card->mbase + DOORBELL, DBELL_RESET_ARM);
-
- stop = jiffies + HZ * 10;
- while (c4inmeml(card->mbase + DOORBELL) != 0xffffffff) {
- if (!time_before(jiffies, stop))
- return;
- c4outmeml(card->mbase + DOORBELL, DBELL_ADDR);
- mb();
- }
-
- c4_poke(card, DC21285_ARMCSR_BASE + CHAN_1_CONTROL, 0);
- c4_poke(card, DC21285_ARMCSR_BASE + CHAN_2_CONTROL, 0);
-}
-
-/* ------------------------------------------------------------- */
-
-static int c4_detect(avmcard *card)
-{
- unsigned long stop, dummy;
-
- c4outmeml(card->mbase + PCI_OUT_INT_MASK, 0x0c);
- if (c4inmeml(card->mbase + PCI_OUT_INT_MASK) != 0x0c)
- return 1;
-
- c4outmeml(card->mbase + DOORBELL, DBELL_RESET_ARM);
-
- stop = jiffies + HZ * 10;
- while (c4inmeml(card->mbase + DOORBELL) != 0xffffffff) {
- if (!time_before(jiffies, stop))
- return 2;
- c4outmeml(card->mbase + DOORBELL, DBELL_ADDR);
- mb();
- }
-
- c4_poke(card, DC21285_ARMCSR_BASE + CHAN_1_CONTROL, 0);
- c4_poke(card, DC21285_ARMCSR_BASE + CHAN_2_CONTROL, 0);
-
- c4outmeml(card->mbase + MAILBOX_0, 0x55aa55aa);
- if (c4inmeml(card->mbase + MAILBOX_0) != 0x55aa55aa) return 3;
-
- c4outmeml(card->mbase + MAILBOX_0, 0xaa55aa55);
- if (c4inmeml(card->mbase + MAILBOX_0) != 0xaa55aa55) return 4;
-
- if (c4_poke(card, DC21285_ARMCSR_BASE + DBELL_SA_MASK, 0)) return 5;
- if (c4_poke(card, DC21285_ARMCSR_BASE + DBELL_PCI_MASK, 0)) return 6;
- if (c4_poke(card, DC21285_ARMCSR_BASE + SA_CONTROL, SA_CTL_ALLRIGHT))
- return 7;
- if (c4_poke(card, DC21285_ARMCSR_BASE + XBUS_CYCLE, INIT_XBUS_CYCLE))
- return 8;
- if (c4_poke(card, DC21285_ARMCSR_BASE + XBUS_STROBE, INIT_XBUS_STROBE))
- return 8;
- if (c4_poke(card, DC21285_ARMCSR_BASE + DRAM_TIMING, 0)) return 9;
-
- mdelay(1);
-
- if (c4_peek(card, DC21285_DRAM_A0MR, &dummy)) return 10;
- if (c4_peek(card, DC21285_DRAM_A1MR, &dummy)) return 11;
- if (c4_peek(card, DC21285_DRAM_A2MR, &dummy)) return 12;
- if (c4_peek(card, DC21285_DRAM_A3MR, &dummy)) return 13;
-
- if (c4_poke(card, DC21285_DRAM_A0MR + CAS_OFFSET, 0)) return 14;
- if (c4_poke(card, DC21285_DRAM_A1MR + CAS_OFFSET, 0)) return 15;
- if (c4_poke(card, DC21285_DRAM_A2MR + CAS_OFFSET, 0)) return 16;
- if (c4_poke(card, DC21285_DRAM_A3MR + CAS_OFFSET, 0)) return 17;
-
- mdelay(1);
-
- if (c4_poke(card, DC21285_ARMCSR_BASE + DRAM_TIMING, DRAM_TIMING_DEF))
- return 18;
-
- if (c4_poke(card, DC21285_ARMCSR_BASE + DRAM_ADDR_SIZE_0, DRAM_AD_SZ_DEF0))
- return 19;
- if (c4_poke(card, DC21285_ARMCSR_BASE + DRAM_ADDR_SIZE_1, DRAM_AD_SZ_NULL))
- return 20;
- if (c4_poke(card, DC21285_ARMCSR_BASE + DRAM_ADDR_SIZE_2, DRAM_AD_SZ_NULL))
- return 21;
- if (c4_poke(card, DC21285_ARMCSR_BASE + DRAM_ADDR_SIZE_3, DRAM_AD_SZ_NULL))
- return 22;
-
- /* Transputer test */
-
- if (c4_poke(card, 0x000000, 0x11111111)
- || c4_poke(card, 0x400000, 0x22222222)
- || c4_poke(card, 0x800000, 0x33333333)
- || c4_poke(card, 0xC00000, 0x44444444))
- return 23;
-
- if (c4_peek(card, 0x000000, &dummy) || dummy != 0x11111111
- || c4_peek(card, 0x400000, &dummy) || dummy != 0x22222222
- || c4_peek(card, 0x800000, &dummy) || dummy != 0x33333333
- || c4_peek(card, 0xC00000, &dummy) || dummy != 0x44444444)
- return 24;
-
- if (c4_poke(card, 0x000000, 0x55555555)
- || c4_poke(card, 0x400000, 0x66666666)
- || c4_poke(card, 0x800000, 0x77777777)
- || c4_poke(card, 0xC00000, 0x88888888))
- return 25;
-
- if (c4_peek(card, 0x000000, &dummy) || dummy != 0x55555555
- || c4_peek(card, 0x400000, &dummy) || dummy != 0x66666666
- || c4_peek(card, 0x800000, &dummy) || dummy != 0x77777777
- || c4_peek(card, 0xC00000, &dummy) || dummy != 0x88888888)
- return 26;
-
- return 0;
-}
-
-/* ------------------------------------------------------------- */
-
-static void c4_dispatch_tx(avmcard *card)
-{
- avmcard_dmainfo *dma = card->dma;
- struct sk_buff *skb;
- u8 cmd, subcmd;
- u16 len;
- u32 txlen;
- void *p;
-
-
- if (card->csr & DBELL_DOWN_ARM) { /* tx busy */
- return;
- }
-
- skb = skb_dequeue(&dma->send_queue);
- if (!skb) {
-#ifdef AVM_C4_DEBUG
- printk(KERN_DEBUG "%s: tx underrun\n", card->name);
-#endif
- return;
- }
-
- len = CAPIMSG_LEN(skb->data);
-
- if (len) {
- cmd = CAPIMSG_COMMAND(skb->data);
- subcmd = CAPIMSG_SUBCOMMAND(skb->data);
-
- p = dma->sendbuf.dmabuf;
-
- if (CAPICMD(cmd, subcmd) == CAPI_DATA_B3_REQ) {
- u16 dlen = CAPIMSG_DATALEN(skb->data);
- _put_byte(&p, SEND_DATA_B3_REQ);
- _put_slice(&p, skb->data, len);
- _put_slice(&p, skb->data + len, dlen);
- } else {
- _put_byte(&p, SEND_MESSAGE);
- _put_slice(&p, skb->data, len);
- }
- txlen = (u8 *)p - (u8 *)dma->sendbuf.dmabuf;
-#ifdef AVM_C4_DEBUG
- printk(KERN_DEBUG "%s: tx put msg len=%d\n", card->name, txlen);
-#endif
- } else {
- txlen = skb->len - 2;
-#ifdef AVM_C4_POLLDEBUG
- if (skb->data[2] == SEND_POLLACK)
- printk(KERN_INFO "%s: ack to c4\n", card->name);
-#endif
-#ifdef AVM_C4_DEBUG
- printk(KERN_DEBUG "%s: tx put 0x%x len=%d\n",
- card->name, skb->data[2], txlen);
-#endif
- skb_copy_from_linear_data_offset(skb, 2, dma->sendbuf.dmabuf,
- skb->len - 2);
- }
- txlen = (txlen + 3) & ~3;
-
- c4outmeml(card->mbase + MBOX_DOWN_ADDR, dma->sendbuf.dmaaddr);
- c4outmeml(card->mbase + MBOX_DOWN_LEN, txlen);
-
- card->csr |= DBELL_DOWN_ARM;
-
- c4outmeml(card->mbase + DOORBELL, DBELL_DOWN_ARM);
-
- dev_kfree_skb_any(skb);
-}
-
-/* ------------------------------------------------------------- */
-
-static void queue_pollack(avmcard *card)
-{
- struct sk_buff *skb;
- void *p;
-
- skb = alloc_skb(3, GFP_ATOMIC);
- if (!skb) {
- printk(KERN_CRIT "%s: no memory, lost poll ack\n",
- card->name);
- return;
- }
- p = skb->data;
- _put_byte(&p, 0);
- _put_byte(&p, 0);
- _put_byte(&p, SEND_POLLACK);
- skb_put(skb, (u8 *)p - (u8 *)skb->data);
-
- skb_queue_tail(&card->dma->send_queue, skb);
- c4_dispatch_tx(card);
-}
-
-/* ------------------------------------------------------------- */
-
-static void c4_handle_rx(avmcard *card)
-{
- avmcard_dmainfo *dma = card->dma;
- struct capi_ctr *ctrl;
- avmctrl_info *cinfo;
- struct sk_buff *skb;
- void *p = dma->recvbuf.dmabuf;
- u32 ApplId, MsgLen, DataB3Len, NCCI, WindowSize;
- u8 b1cmd = _get_byte(&p);
- u32 cidx;
-
-
-#ifdef AVM_C4_DEBUG
- printk(KERN_DEBUG "%s: rx 0x%x len=%lu\n", card->name,
- b1cmd, (unsigned long)dma->recvlen);
-#endif
-
- switch (b1cmd) {
- case RECEIVE_DATA_B3_IND:
-
- ApplId = (unsigned) _get_word(&p);
- MsgLen = _get_slice(&p, card->msgbuf);
- DataB3Len = _get_slice(&p, card->databuf);
- cidx = CAPIMSG_CONTROLLER(card->msgbuf)-card->cardnr;
- if (cidx >= card->nlogcontr) cidx = 0;
- ctrl = &card->ctrlinfo[cidx].capi_ctrl;
-
- if (MsgLen < 30) { /* not CAPI 64Bit */
- memset(card->msgbuf + MsgLen, 0, 30 - MsgLen);
- MsgLen = 30;
- CAPIMSG_SETLEN(card->msgbuf, 30);
- }
- if (!(skb = alloc_skb(DataB3Len + MsgLen, GFP_ATOMIC))) {
- printk(KERN_ERR "%s: incoming packet dropped\n",
- card->name);
- } else {
- skb_put_data(skb, card->msgbuf, MsgLen);
- skb_put_data(skb, card->databuf, DataB3Len);
- capi_ctr_handle_message(ctrl, ApplId, skb);
- }
- break;
-
- case RECEIVE_MESSAGE:
-
- ApplId = (unsigned) _get_word(&p);
- MsgLen = _get_slice(&p, card->msgbuf);
- cidx = CAPIMSG_CONTROLLER(card->msgbuf)-card->cardnr;
- if (cidx >= card->nlogcontr) cidx = 0;
- cinfo = &card->ctrlinfo[cidx];
- ctrl = &card->ctrlinfo[cidx].capi_ctrl;
-
- if (!(skb = alloc_skb(MsgLen, GFP_ATOMIC))) {
- printk(KERN_ERR "%s: incoming packet dropped\n",
- card->name);
- } else {
- skb_put_data(skb, card->msgbuf, MsgLen);
- if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_CONF)
- capilib_data_b3_conf(&cinfo->ncci_head, ApplId,
- CAPIMSG_NCCI(skb->data),
- CAPIMSG_MSGID(skb->data));
-
- capi_ctr_handle_message(ctrl, ApplId, skb);
- }
- break;
-
- case RECEIVE_NEW_NCCI:
-
- ApplId = _get_word(&p);
- NCCI = _get_word(&p);
- WindowSize = _get_word(&p);
- cidx = (NCCI & 0x7f) - card->cardnr;
- if (cidx >= card->nlogcontr) cidx = 0;
-
- capilib_new_ncci(&card->ctrlinfo[cidx].ncci_head, ApplId, NCCI, WindowSize);
-
- break;
-
- case RECEIVE_FREE_NCCI:
-
- ApplId = _get_word(&p);
- NCCI = _get_word(&p);
-
- if (NCCI != 0xffffffff) {
- cidx = (NCCI & 0x7f) - card->cardnr;
- if (cidx >= card->nlogcontr) cidx = 0;
- capilib_free_ncci(&card->ctrlinfo[cidx].ncci_head, ApplId, NCCI);
- }
- break;
-
- case RECEIVE_START:
-#ifdef AVM_C4_POLLDEBUG
- printk(KERN_INFO "%s: poll from c4\n", card->name);
-#endif
- if (!suppress_pollack)
- queue_pollack(card);
- for (cidx = 0; cidx < card->nr_controllers; cidx++) {
- ctrl = &card->ctrlinfo[cidx].capi_ctrl;
- capi_ctr_resume_output(ctrl);
- }
- break;
-
- case RECEIVE_STOP:
- for (cidx = 0; cidx < card->nr_controllers; cidx++) {
- ctrl = &card->ctrlinfo[cidx].capi_ctrl;
- capi_ctr_suspend_output(ctrl);
- }
- break;
-
- case RECEIVE_INIT:
-
- cidx = card->nlogcontr;
- if (cidx >= card->nr_controllers) {
- printk(KERN_ERR "%s: card with %d controllers ??\n",
- card->name, cidx + 1);
- break;
- }
- card->nlogcontr++;
- cinfo = &card->ctrlinfo[cidx];
- ctrl = &cinfo->capi_ctrl;
- cinfo->versionlen = _get_slice(&p, cinfo->versionbuf);
- b1_parse_version(cinfo);
- printk(KERN_INFO "%s: %s-card (%s) now active\n",
- card->name,
- cinfo->version[VER_CARDTYPE],
- cinfo->version[VER_DRIVER]);
- capi_ctr_ready(&cinfo->capi_ctrl);
- break;
-
- case RECEIVE_TASK_READY:
- ApplId = (unsigned) _get_word(&p);
- MsgLen = _get_slice(&p, card->msgbuf);
- card->msgbuf[MsgLen] = 0;
- while (MsgLen > 0
- && (card->msgbuf[MsgLen - 1] == '\n'
- || card->msgbuf[MsgLen - 1] == '\r')) {
- card->msgbuf[MsgLen - 1] = 0;
- MsgLen--;
- }
- printk(KERN_INFO "%s: task %d \"%s\" ready.\n",
- card->name, ApplId, card->msgbuf);
- break;
-
- case RECEIVE_DEBUGMSG:
- MsgLen = _get_slice(&p, card->msgbuf);
- card->msgbuf[MsgLen] = 0;
- while (MsgLen > 0
- && (card->msgbuf[MsgLen - 1] == '\n'
- || card->msgbuf[MsgLen - 1] == '\r')) {
- card->msgbuf[MsgLen - 1] = 0;
- MsgLen--;
- }
- printk(KERN_INFO "%s: DEBUG: %s\n", card->name, card->msgbuf);
- break;
-
- default:
- printk(KERN_ERR "%s: c4_interrupt: 0x%x ???\n",
- card->name, b1cmd);
- return;
- }
-}
-
-/* ------------------------------------------------------------- */
-
-static irqreturn_t c4_handle_interrupt(avmcard *card)
-{
- unsigned long flags;
- u32 status;
-
- spin_lock_irqsave(&card->lock, flags);
- status = c4inmeml(card->mbase + DOORBELL);
-
- if (status & DBELL_RESET_HOST) {
- u_int i;
- c4outmeml(card->mbase + PCI_OUT_INT_MASK, 0x0c);
- spin_unlock_irqrestore(&card->lock, flags);
- if (card->nlogcontr == 0)
- return IRQ_HANDLED;
- printk(KERN_ERR "%s: unexpected reset\n", card->name);
- for (i = 0; i < card->nr_controllers; i++) {
- avmctrl_info *cinfo = &card->ctrlinfo[i];
- memset(cinfo->version, 0, sizeof(cinfo->version));
- spin_lock_irqsave(&card->lock, flags);
- capilib_release(&cinfo->ncci_head);
- spin_unlock_irqrestore(&card->lock, flags);
- capi_ctr_down(&cinfo->capi_ctrl);
- }
- card->nlogcontr = 0;
- return IRQ_HANDLED;
- }
-
- status &= (DBELL_UP_HOST | DBELL_DOWN_HOST);
- if (!status) {
- spin_unlock_irqrestore(&card->lock, flags);
- return IRQ_HANDLED;
- }
- c4outmeml(card->mbase + DOORBELL, status);
-
- if ((status & DBELL_UP_HOST) != 0) {
- card->dma->recvlen = c4inmeml(card->mbase + MBOX_UP_LEN);
- c4outmeml(card->mbase + MBOX_UP_LEN, 0);
- c4_handle_rx(card);
- card->dma->recvlen = 0;
- c4outmeml(card->mbase + MBOX_UP_LEN, card->dma->recvbuf.size);
- c4outmeml(card->mbase + DOORBELL, DBELL_UP_ARM);
- }
-
- if ((status & DBELL_DOWN_HOST) != 0) {
- card->csr &= ~DBELL_DOWN_ARM;
- c4_dispatch_tx(card);
- } else if (card->csr & DBELL_DOWN_HOST) {
- if (c4inmeml(card->mbase + MBOX_DOWN_LEN) == 0) {
- card->csr &= ~DBELL_DOWN_ARM;
- c4_dispatch_tx(card);
- }
- }
- spin_unlock_irqrestore(&card->lock, flags);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t c4_interrupt(int interrupt, void *devptr)
-{
- avmcard *card = devptr;
-
- return c4_handle_interrupt(card);
-}
-
-/* ------------------------------------------------------------- */
-
-static void c4_send_init(avmcard *card)
-{
- struct sk_buff *skb;
- void *p;
- unsigned long flags;
-
- skb = alloc_skb(15, GFP_ATOMIC);
- if (!skb) {
- printk(KERN_CRIT "%s: no memory, lost register appl.\n",
- card->name);
- return;
- }
- p = skb->data;
- _put_byte(&p, 0);
- _put_byte(&p, 0);
- _put_byte(&p, SEND_INIT);
- _put_word(&p, CAPI_MAXAPPL);
- _put_word(&p, AVM_NCCI_PER_CHANNEL * 30);
- _put_word(&p, card->cardnr - 1);
- skb_put(skb, (u8 *)p - (u8 *)skb->data);
-
- skb_queue_tail(&card->dma->send_queue, skb);
- spin_lock_irqsave(&card->lock, flags);
- c4_dispatch_tx(card);
- spin_unlock_irqrestore(&card->lock, flags);
-}
-
-static int queue_sendconfigword(avmcard *card, u32 val)
-{
- struct sk_buff *skb;
- unsigned long flags;
- void *p;
-
- skb = alloc_skb(3 + 4, GFP_ATOMIC);
- if (!skb) {
- printk(KERN_CRIT "%s: no memory, send config\n",
- card->name);
- return -ENOMEM;
- }
- p = skb->data;
- _put_byte(&p, 0);
- _put_byte(&p, 0);
- _put_byte(&p, SEND_CONFIG);
- _put_word(&p, val);
- skb_put(skb, (u8 *)p - (u8 *)skb->data);
-
- skb_queue_tail(&card->dma->send_queue, skb);
- spin_lock_irqsave(&card->lock, flags);
- c4_dispatch_tx(card);
- spin_unlock_irqrestore(&card->lock, flags);
- return 0;
-}
-
-static int queue_sendconfig(avmcard *card, char cval[4])
-{
- struct sk_buff *skb;
- unsigned long flags;
- void *p;
-
- skb = alloc_skb(3 + 4, GFP_ATOMIC);
- if (!skb) {
- printk(KERN_CRIT "%s: no memory, send config\n",
- card->name);
- return -ENOMEM;
- }
- p = skb->data;
- _put_byte(&p, 0);
- _put_byte(&p, 0);
- _put_byte(&p, SEND_CONFIG);
- _put_byte(&p, cval[0]);
- _put_byte(&p, cval[1]);
- _put_byte(&p, cval[2]);
- _put_byte(&p, cval[3]);
- skb_put(skb, (u8 *)p - (u8 *)skb->data);
-
- skb_queue_tail(&card->dma->send_queue, skb);
-
- spin_lock_irqsave(&card->lock, flags);
- c4_dispatch_tx(card);
- spin_unlock_irqrestore(&card->lock, flags);
- return 0;
-}
-
-static int c4_send_config(avmcard *card, capiloaddatapart *config)
-{
- u8 val[4];
- unsigned char *dp;
- u_int left;
- int retval;
-
- if ((retval = queue_sendconfigword(card, 1)) != 0)
- return retval;
- if ((retval = queue_sendconfigword(card, config->len)) != 0)
- return retval;
-
- dp = config->data;
- left = config->len;
- while (left >= sizeof(u32)) {
- if (config->user) {
- if (copy_from_user(val, dp, sizeof(val)))
- return -EFAULT;
- } else {
- memcpy(val, dp, sizeof(val));
- }
- if ((retval = queue_sendconfig(card, val)) != 0)
- return retval;
- left -= sizeof(val);
- dp += sizeof(val);
- }
- if (left) {
- memset(val, 0, sizeof(val));
- if (config->user) {
- if (copy_from_user(&val, dp, left))
- return -EFAULT;
- } else {
- memcpy(&val, dp, left);
- }
- if ((retval = queue_sendconfig(card, val)) != 0)
- return retval;
- }
-
- return 0;
-}
-
-static int c4_load_firmware(struct capi_ctr *ctrl, capiloaddata *data)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- int retval;
-
- if ((retval = c4_load_t4file(card, &data->firmware))) {
- printk(KERN_ERR "%s: failed to load t4file!!\n",
- card->name);
- c4_reset(card);
- return retval;
- }
-
- card->csr = 0;
- c4outmeml(card->mbase + MBOX_UP_LEN, 0);
- c4outmeml(card->mbase + MBOX_DOWN_LEN, 0);
- c4outmeml(card->mbase + DOORBELL, DBELL_INIT);
- mdelay(1);
- c4outmeml(card->mbase + DOORBELL,
- DBELL_UP_HOST | DBELL_DOWN_HOST | DBELL_RESET_HOST);
-
- c4outmeml(card->mbase + PCI_OUT_INT_MASK, 0x08);
-
- card->dma->recvlen = 0;
- c4outmeml(card->mbase + MBOX_UP_ADDR, card->dma->recvbuf.dmaaddr);
- c4outmeml(card->mbase + MBOX_UP_LEN, card->dma->recvbuf.size);
- c4outmeml(card->mbase + DOORBELL, DBELL_UP_ARM);
-
- if (data->configuration.len > 0 && data->configuration.data) {
- retval = c4_send_config(card, &data->configuration);
- if (retval) {
- printk(KERN_ERR "%s: failed to set config!!\n",
- card->name);
- c4_reset(card);
- return retval;
- }
- }
-
- c4_send_init(card);
-
- return 0;
-}
-
-
-static void c4_reset_ctr(struct capi_ctr *ctrl)
-{
- avmcard *card = ((avmctrl_info *)(ctrl->driverdata))->card;
- avmctrl_info *cinfo;
- u_int i;
- unsigned long flags;
-
- spin_lock_irqsave(&card->lock, flags);
-
- c4_reset(card);
-
- spin_unlock_irqrestore(&card->lock, flags);
-
- for (i = 0; i < card->nr_controllers; i++) {
- cinfo = &card->ctrlinfo[i];
- memset(cinfo->version, 0, sizeof(cinfo->version));
- capi_ctr_down(&cinfo->capi_ctrl);
- }
- card->nlogcontr = 0;
-}
-
-static void c4_remove(struct pci_dev *pdev)
-{
- avmcard *card = pci_get_drvdata(pdev);
- avmctrl_info *cinfo;
- u_int i;
-
- if (!card)
- return;
-
- c4_reset(card);
-
- for (i = 0; i < card->nr_controllers; i++) {
- cinfo = &card->ctrlinfo[i];
- detach_capi_ctr(&cinfo->capi_ctrl);
- }
-
- free_irq(card->irq, card);
- iounmap(card->mbase);
- release_region(card->port, AVMB1_PORTLEN);
- avmcard_dma_free(card->dma);
- pci_set_drvdata(pdev, NULL);
- b1_free_card(card);
-}
-
-/* ------------------------------------------------------------- */
-
-
-static void c4_register_appl(struct capi_ctr *ctrl,
- u16 appl,
- capi_register_params *rp)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- struct sk_buff *skb;
- int want = rp->level3cnt;
- unsigned long flags;
- int nconn;
- void *p;
-
- if (ctrl->cnr == card->cardnr) {
-
- if (want > 0) nconn = want;
- else nconn = ctrl->profile.nbchannel * 4 * -want;
- if (nconn == 0) nconn = ctrl->profile.nbchannel * 4;
-
- skb = alloc_skb(23, GFP_ATOMIC);
- if (!skb) {
- printk(KERN_CRIT "%s: no memory, lost register appl.\n",
- card->name);
- return;
- }
- p = skb->data;
- _put_byte(&p, 0);
- _put_byte(&p, 0);
- _put_byte(&p, SEND_REGISTER);
- _put_word(&p, appl);
- _put_word(&p, 1024 * (nconn + 1));
- _put_word(&p, nconn);
- _put_word(&p, rp->datablkcnt);
- _put_word(&p, rp->datablklen);
- skb_put(skb, (u8 *)p - (u8 *)skb->data);
-
- skb_queue_tail(&card->dma->send_queue, skb);
-
- spin_lock_irqsave(&card->lock, flags);
- c4_dispatch_tx(card);
- spin_unlock_irqrestore(&card->lock, flags);
- }
-}
-
-/* ------------------------------------------------------------- */
-
-static void c4_release_appl(struct capi_ctr *ctrl, u16 appl)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- unsigned long flags;
- struct sk_buff *skb;
- void *p;
-
- spin_lock_irqsave(&card->lock, flags);
- capilib_release_appl(&cinfo->ncci_head, appl);
- spin_unlock_irqrestore(&card->lock, flags);
-
- if (ctrl->cnr == card->cardnr) {
- skb = alloc_skb(7, GFP_ATOMIC);
- if (!skb) {
- printk(KERN_CRIT "%s: no memory, lost release appl.\n",
- card->name);
- return;
- }
- p = skb->data;
- _put_byte(&p, 0);
- _put_byte(&p, 0);
- _put_byte(&p, SEND_RELEASE);
- _put_word(&p, appl);
-
- skb_put(skb, (u8 *)p - (u8 *)skb->data);
- skb_queue_tail(&card->dma->send_queue, skb);
- spin_lock_irqsave(&card->lock, flags);
- c4_dispatch_tx(card);
- spin_unlock_irqrestore(&card->lock, flags);
- }
-}
-
-/* ------------------------------------------------------------- */
-
-
-static u16 c4_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- u16 retval = CAPI_NOERROR;
- unsigned long flags;
-
- spin_lock_irqsave(&card->lock, flags);
- if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) {
- retval = capilib_data_b3_req(&cinfo->ncci_head,
- CAPIMSG_APPID(skb->data),
- CAPIMSG_NCCI(skb->data),
- CAPIMSG_MSGID(skb->data));
- }
- if (retval == CAPI_NOERROR) {
- skb_queue_tail(&card->dma->send_queue, skb);
- c4_dispatch_tx(card);
- }
- spin_unlock_irqrestore(&card->lock, flags);
- return retval;
-}
-
-/* ------------------------------------------------------------- */
-
-static char *c4_procinfo(struct capi_ctr *ctrl)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-
- if (!cinfo)
- return "";
- sprintf(cinfo->infobuf, "%s %s 0x%x %d 0x%lx",
- cinfo->cardname[0] ? cinfo->cardname : "-",
- cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
- cinfo->card ? cinfo->card->port : 0x0,
- cinfo->card ? cinfo->card->irq : 0,
- cinfo->card ? cinfo->card->membase : 0
- );
- return cinfo->infobuf;
-}
-
-static int c4_proc_show(struct seq_file *m, void *v)
-{
- struct capi_ctr *ctrl = m->private;
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- u8 flag;
- char *s;
-
- seq_printf(m, "%-16s %s\n", "name", card->name);
- seq_printf(m, "%-16s 0x%x\n", "io", card->port);
- seq_printf(m, "%-16s %d\n", "irq", card->irq);
- seq_printf(m, "%-16s 0x%lx\n", "membase", card->membase);
- switch (card->cardtype) {
- case avm_b1isa: s = "B1 ISA"; break;
- case avm_b1pci: s = "B1 PCI"; break;
- case avm_b1pcmcia: s = "B1 PCMCIA"; break;
- case avm_m1: s = "M1"; break;
- case avm_m2: s = "M2"; break;
- case avm_t1isa: s = "T1 ISA (HEMA)"; break;
- case avm_t1pci: s = "T1 PCI"; break;
- case avm_c4: s = "C4"; break;
- case avm_c2: s = "C2"; break;
- default: s = "???"; break;
- }
- seq_printf(m, "%-16s %s\n", "type", s);
- if ((s = cinfo->version[VER_DRIVER]) != NULL)
- seq_printf(m, "%-16s %s\n", "ver_driver", s);
- if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
- seq_printf(m, "%-16s %s\n", "ver_cardtype", s);
- if ((s = cinfo->version[VER_SERIAL]) != NULL)
- seq_printf(m, "%-16s %s\n", "ver_serial", s);
-
- if (card->cardtype != avm_m1) {
- flag = ((u8 *)(ctrl->profile.manu))[3];
- if (flag)
- seq_printf(m, "%-16s%s%s%s%s%s%s%s\n",
- "protocol",
- (flag & 0x01) ? " DSS1" : "",
- (flag & 0x02) ? " CT1" : "",
- (flag & 0x04) ? " VN3" : "",
- (flag & 0x08) ? " NI1" : "",
- (flag & 0x10) ? " AUSTEL" : "",
- (flag & 0x20) ? " ESS" : "",
- (flag & 0x40) ? " 1TR6" : ""
- );
- }
- if (card->cardtype != avm_m1) {
- flag = ((u8 *)(ctrl->profile.manu))[5];
- if (flag)
- seq_printf(m, "%-16s%s%s%s%s\n",
- "linetype",
- (flag & 0x01) ? " point to point" : "",
- (flag & 0x02) ? " point to multipoint" : "",
- (flag & 0x08) ? " leased line without D-channel" : "",
- (flag & 0x04) ? " leased line with D-channel" : ""
- );
- }
- seq_printf(m, "%-16s %s\n", "cardname", cinfo->cardname);
-
- return 0;
-}
-
-/* ------------------------------------------------------------- */
-
-static int c4_add_card(struct capicardparams *p, struct pci_dev *dev,
- int nr_controllers)
-{
- avmcard *card;
- avmctrl_info *cinfo;
- int retval;
- int i;
-
- card = b1_alloc_card(nr_controllers);
- if (!card) {
- printk(KERN_WARNING "c4: no memory.\n");
- retval = -ENOMEM;
- goto err;
- }
- card->dma = avmcard_dma_alloc("c4", dev, 2048 + 128, 2048 + 128);
- if (!card->dma) {
- printk(KERN_WARNING "c4: no memory.\n");
- retval = -ENOMEM;
- goto err_free;
- }
-
- sprintf(card->name, "c%d-%x", nr_controllers, p->port);
- card->port = p->port;
- card->irq = p->irq;
- card->membase = p->membase;
- card->cardtype = (nr_controllers == 4) ? avm_c4 : avm_c2;
-
- if (!request_region(card->port, AVMB1_PORTLEN, card->name)) {
- printk(KERN_WARNING "c4: ports 0x%03x-0x%03x in use.\n",
- card->port, card->port + AVMB1_PORTLEN);
- retval = -EBUSY;
- goto err_free_dma;
- }
-
- card->mbase = ioremap(card->membase, 128);
- if (card->mbase == NULL) {
- printk(KERN_NOTICE "c4: can't remap memory at 0x%lx\n",
- card->membase);
- retval = -EIO;
- goto err_release_region;
- }
-
- retval = c4_detect(card);
- if (retval != 0) {
- printk(KERN_NOTICE "c4: NO card at 0x%x error(%d)\n",
- card->port, retval);
- retval = -EIO;
- goto err_unmap;
- }
- c4_reset(card);
-
- retval = request_irq(card->irq, c4_interrupt, IRQF_SHARED, card->name, card);
- if (retval) {
- printk(KERN_ERR "c4: unable to get IRQ %d.\n", card->irq);
- retval = -EBUSY;
- goto err_unmap;
- }
-
- for (i = 0; i < nr_controllers; i++) {
- cinfo = &card->ctrlinfo[i];
- cinfo->capi_ctrl.owner = THIS_MODULE;
- cinfo->capi_ctrl.driver_name = "c4";
- cinfo->capi_ctrl.driverdata = cinfo;
- cinfo->capi_ctrl.register_appl = c4_register_appl;
- cinfo->capi_ctrl.release_appl = c4_release_appl;
- cinfo->capi_ctrl.send_message = c4_send_message;
- cinfo->capi_ctrl.load_firmware = c4_load_firmware;
- cinfo->capi_ctrl.reset_ctr = c4_reset_ctr;
- cinfo->capi_ctrl.procinfo = c4_procinfo;
- cinfo->capi_ctrl.proc_show = c4_proc_show;
- strcpy(cinfo->capi_ctrl.name, card->name);
-
- retval = attach_capi_ctr(&cinfo->capi_ctrl);
- if (retval) {
- printk(KERN_ERR "c4: attach controller failed (%d).\n", i);
- for (i--; i >= 0; i--) {
- cinfo = &card->ctrlinfo[i];
- detach_capi_ctr(&cinfo->capi_ctrl);
- }
- goto err_free_irq;
- }
- if (i == 0)
- card->cardnr = cinfo->capi_ctrl.cnr;
- }
-
- printk(KERN_INFO "c4: AVM C%d at i/o %#x, irq %d, mem %#lx\n",
- nr_controllers, card->port, card->irq,
- card->membase);
- pci_set_drvdata(dev, card);
- return 0;
-
-err_free_irq:
- free_irq(card->irq, card);
-err_unmap:
- iounmap(card->mbase);
-err_release_region:
- release_region(card->port, AVMB1_PORTLEN);
-err_free_dma:
- avmcard_dma_free(card->dma);
-err_free:
- b1_free_card(card);
-err:
- return retval;
-}
-
-/* ------------------------------------------------------------- */
-
-static int c4_probe(struct pci_dev *dev, const struct pci_device_id *ent)
-{
- int nr = ent->driver_data;
- int retval = 0;
- struct capicardparams param;
-
- if (pci_enable_device(dev) < 0) {
- printk(KERN_ERR "c4: failed to enable AVM-C%d\n", nr);
- return -ENODEV;
- }
- pci_set_master(dev);
-
- param.port = pci_resource_start(dev, 1);
- param.irq = dev->irq;
- param.membase = pci_resource_start(dev, 0);
-
- printk(KERN_INFO "c4: PCI BIOS reports AVM-C%d at i/o %#x, irq %d, mem %#x\n",
- nr, param.port, param.irq, param.membase);
-
- retval = c4_add_card(&param, dev, nr);
- if (retval != 0) {
- printk(KERN_ERR "c4: no AVM-C%d at i/o %#x, irq %d detected, mem %#x\n",
- nr, param.port, param.irq, param.membase);
- pci_disable_device(dev);
- return -ENODEV;
- }
- return 0;
-}
-
-static struct pci_driver c4_pci_driver = {
- .name = "c4",
- .id_table = c4_pci_tbl,
- .probe = c4_probe,
- .remove = c4_remove,
-};
-
-static struct capi_driver capi_driver_c2 = {
- .name = "c2",
- .revision = "1.0",
-};
-
-static struct capi_driver capi_driver_c4 = {
- .name = "c4",
- .revision = "1.0",
-};
-
-static int __init c4_init(void)
-{
- char *p;
- char rev[32];
- int err;
-
- if ((p = strchr(revision, ':')) != NULL && p[1]) {
- strlcpy(rev, p + 2, 32);
- if ((p = strchr(rev, '$')) != NULL && p > rev)
- *(p - 1) = 0;
- } else
- strcpy(rev, "1.0");
-
- err = pci_register_driver(&c4_pci_driver);
- if (!err) {
- strlcpy(capi_driver_c2.revision, rev, 32);
- register_capi_driver(&capi_driver_c2);
- strlcpy(capi_driver_c4.revision, rev, 32);
- register_capi_driver(&capi_driver_c4);
- printk(KERN_INFO "c4: revision %s\n", rev);
- }
- return err;
-}
-
-static void __exit c4_exit(void)
-{
- unregister_capi_driver(&capi_driver_c2);
- unregister_capi_driver(&capi_driver_c4);
- pci_unregister_driver(&c4_pci_driver);
-}
-
-module_init(c4_init);
-module_exit(c4_exit);
diff --git a/drivers/staging/isdn/avm/t1isa.c b/drivers/staging/isdn/avm/t1isa.c
deleted file mode 100644
index 2153619c5b31..000000000000
--- a/drivers/staging/isdn/avm/t1isa.c
+++ /dev/null
@@ -1,594 +0,0 @@
-/* $Id: t1isa.c,v 1.1.2.3 2004/02/10 01:07:12 keil Exp $
- *
- * Module for AVM T1 HEMA-card.
- *
- * Copyright 1999 by Carsten Paeth <calle@calle.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/capi.h>
-#include <linux/netdevice.h>
-#include <linux/kernelcapi.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/gfp.h>
-#include <asm/io.h>
-#include <linux/isdn/capicmd.h>
-#include <linux/isdn/capiutil.h>
-#include <linux/isdn/capilli.h>
-#include "avmcard.h"
-
-/* ------------------------------------------------------------- */
-
-static char *revision = "$Revision: 1.1.2.3 $";
-
-/* ------------------------------------------------------------- */
-
-MODULE_DESCRIPTION("CAPI4Linux: Driver for AVM T1 HEMA ISA card");
-MODULE_AUTHOR("Carsten Paeth");
-MODULE_LICENSE("GPL");
-
-/* ------------------------------------------------------------- */
-
-static int hema_irq_table[16] =
-{0,
- 0,
- 0,
- 0x80, /* irq 3 */
- 0,
- 0x90, /* irq 5 */
- 0,
- 0xA0, /* irq 7 */
- 0,
- 0xB0, /* irq 9 */
- 0xC0, /* irq 10 */
- 0xD0, /* irq 11 */
- 0xE0, /* irq 12 */
- 0,
- 0,
- 0xF0, /* irq 15 */
-};
-
-static int t1_detectandinit(unsigned int base, unsigned irq, int cardnr)
-{
- unsigned char cregs[8];
- unsigned char reverse_cardnr;
- unsigned char dummy;
- int i;
-
- reverse_cardnr = ((cardnr & 0x01) << 3) | ((cardnr & 0x02) << 1)
- | ((cardnr & 0x04) >> 1) | ((cardnr & 0x08) >> 3);
- cregs[0] = (HEMA_VERSION_ID << 4) | (reverse_cardnr & 0xf);
- cregs[1] = 0x00; /* fast & slow link connected to CON1 */
- cregs[2] = 0x05; /* fast link 20MBit, slow link 20 MBit */
- cregs[3] = 0;
- cregs[4] = 0x11; /* zero wait state */
- cregs[5] = hema_irq_table[irq & 0xf];
- cregs[6] = 0;
- cregs[7] = 0;
-
- /*
- * no one else should use the ISA bus in this moment,
- * but no function there to prevent this :-(
- * save_flags(flags); cli();
- */
-
- /* board reset */
- t1outp(base, T1_RESETBOARD, 0xf);
- mdelay(100);
- dummy = t1inp(base, T1_FASTLINK + T1_OUTSTAT); /* first read */
-
- /* write config */
- dummy = (base >> 4) & 0xff;
- for (i = 1; i <= 0xf; i++) t1outp(base, i, dummy);
- t1outp(base, HEMA_PAL_ID & 0xf, dummy);
- t1outp(base, HEMA_PAL_ID >> 4, cregs[0]);
- for (i = 1; i < 7; i++) t1outp(base, 0, cregs[i]);
- t1outp(base, ((base >> 4)) & 0x3, cregs[7]);
- /* restore_flags(flags); */
-
- mdelay(100);
- t1outp(base, T1_FASTLINK + T1_RESETLINK, 0);
- t1outp(base, T1_SLOWLINK + T1_RESETLINK, 0);
- mdelay(10);
- t1outp(base, T1_FASTLINK + T1_RESETLINK, 1);
- t1outp(base, T1_SLOWLINK + T1_RESETLINK, 1);
- mdelay(100);
- t1outp(base, T1_FASTLINK + T1_RESETLINK, 0);
- t1outp(base, T1_SLOWLINK + T1_RESETLINK, 0);
- mdelay(10);
- t1outp(base, T1_FASTLINK + T1_ANALYSE, 0);
- mdelay(5);
- t1outp(base, T1_SLOWLINK + T1_ANALYSE, 0);
-
- if (t1inp(base, T1_FASTLINK + T1_OUTSTAT) != 0x1) /* tx empty */
- return 1;
- if (t1inp(base, T1_FASTLINK + T1_INSTAT) != 0x0) /* rx empty */
- return 2;
- if (t1inp(base, T1_FASTLINK + T1_IRQENABLE) != 0x0)
- return 3;
- if ((t1inp(base, T1_FASTLINK + T1_FIFOSTAT) & 0xf0) != 0x70)
- return 4;
- if ((t1inp(base, T1_FASTLINK + T1_IRQMASTER) & 0x0e) != 0)
- return 5;
- if ((t1inp(base, T1_FASTLINK + T1_IDENT) & 0x7d) != 1)
- return 6;
- if (t1inp(base, T1_SLOWLINK + T1_OUTSTAT) != 0x1) /* tx empty */
- return 7;
- if ((t1inp(base, T1_SLOWLINK + T1_IRQMASTER) & 0x0e) != 0)
- return 8;
- if ((t1inp(base, T1_SLOWLINK + T1_IDENT) & 0x7d) != 0)
- return 9;
- return 0;
-}
-
-static irqreturn_t t1isa_interrupt(int interrupt, void *devptr)
-{
- avmcard *card = devptr;
- avmctrl_info *cinfo = &card->ctrlinfo[0];
- struct capi_ctr *ctrl = &cinfo->capi_ctrl;
- unsigned char b1cmd;
- struct sk_buff *skb;
-
- unsigned ApplId;
- unsigned MsgLen;
- unsigned DataB3Len;
- unsigned NCCI;
- unsigned WindowSize;
- unsigned long flags;
-
- spin_lock_irqsave(&card->lock, flags);
-
- while (b1_rx_full(card->port)) {
-
- b1cmd = b1_get_byte(card->port);
-
- switch (b1cmd) {
-
- case RECEIVE_DATA_B3_IND:
-
- ApplId = (unsigned) b1_get_word(card->port);
- MsgLen = t1_get_slice(card->port, card->msgbuf);
- DataB3Len = t1_get_slice(card->port, card->databuf);
- spin_unlock_irqrestore(&card->lock, flags);
-
- if (MsgLen < 30) { /* not CAPI 64Bit */
- memset(card->msgbuf + MsgLen, 0, 30 - MsgLen);
- MsgLen = 30;
- CAPIMSG_SETLEN(card->msgbuf, 30);
- }
- if (!(skb = alloc_skb(DataB3Len + MsgLen, GFP_ATOMIC))) {
- printk(KERN_ERR "%s: incoming packet dropped\n",
- card->name);
- } else {
- skb_put_data(skb, card->msgbuf, MsgLen);
- skb_put_data(skb, card->databuf, DataB3Len);
- capi_ctr_handle_message(ctrl, ApplId, skb);
- }
- break;
-
- case RECEIVE_MESSAGE:
-
- ApplId = (unsigned) b1_get_word(card->port);
- MsgLen = t1_get_slice(card->port, card->msgbuf);
- if (!(skb = alloc_skb(MsgLen, GFP_ATOMIC))) {
- spin_unlock_irqrestore(&card->lock, flags);
- printk(KERN_ERR "%s: incoming packet dropped\n",
- card->name);
- } else {
- skb_put_data(skb, card->msgbuf, MsgLen);
- if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3)
- capilib_data_b3_conf(&cinfo->ncci_head, ApplId,
- CAPIMSG_NCCI(skb->data),
- CAPIMSG_MSGID(skb->data));
- spin_unlock_irqrestore(&card->lock, flags);
- capi_ctr_handle_message(ctrl, ApplId, skb);
- }
- break;
-
- case RECEIVE_NEW_NCCI:
-
- ApplId = b1_get_word(card->port);
- NCCI = b1_get_word(card->port);
- WindowSize = b1_get_word(card->port);
- capilib_new_ncci(&cinfo->ncci_head, ApplId, NCCI, WindowSize);
- spin_unlock_irqrestore(&card->lock, flags);
- break;
-
- case RECEIVE_FREE_NCCI:
-
- ApplId = b1_get_word(card->port);
- NCCI = b1_get_word(card->port);
- if (NCCI != 0xffffffff)
- capilib_free_ncci(&cinfo->ncci_head, ApplId, NCCI);
- spin_unlock_irqrestore(&card->lock, flags);
- break;
-
- case RECEIVE_START:
- b1_put_byte(card->port, SEND_POLLACK);
- spin_unlock_irqrestore(&card->lock, flags);
- capi_ctr_resume_output(ctrl);
- break;
-
- case RECEIVE_STOP:
- spin_unlock_irqrestore(&card->lock, flags);
- capi_ctr_suspend_output(ctrl);
- break;
-
- case RECEIVE_INIT:
-
- cinfo->versionlen = t1_get_slice(card->port, cinfo->versionbuf);
- spin_unlock_irqrestore(&card->lock, flags);
- b1_parse_version(cinfo);
- printk(KERN_INFO "%s: %s-card (%s) now active\n",
- card->name,
- cinfo->version[VER_CARDTYPE],
- cinfo->version[VER_DRIVER]);
- capi_ctr_ready(ctrl);
- break;
-
- case RECEIVE_TASK_READY:
- ApplId = (unsigned) b1_get_word(card->port);
- MsgLen = t1_get_slice(card->port, card->msgbuf);
- spin_unlock_irqrestore(&card->lock, flags);
- card->msgbuf[MsgLen] = 0;
- while (MsgLen > 0
- && (card->msgbuf[MsgLen - 1] == '\n'
- || card->msgbuf[MsgLen - 1] == '\r')) {
- card->msgbuf[MsgLen - 1] = 0;
- MsgLen--;
- }
- printk(KERN_INFO "%s: task %d \"%s\" ready.\n",
- card->name, ApplId, card->msgbuf);
- break;
-
- case RECEIVE_DEBUGMSG:
- MsgLen = t1_get_slice(card->port, card->msgbuf);
- spin_unlock_irqrestore(&card->lock, flags);
- card->msgbuf[MsgLen] = 0;
- while (MsgLen > 0
- && (card->msgbuf[MsgLen - 1] == '\n'
- || card->msgbuf[MsgLen - 1] == '\r')) {
- card->msgbuf[MsgLen - 1] = 0;
- MsgLen--;
- }
- printk(KERN_INFO "%s: DEBUG: %s\n", card->name, card->msgbuf);
- break;
-
-
- case 0xff:
- spin_unlock_irqrestore(&card->lock, flags);
- printk(KERN_ERR "%s: card reseted ?\n", card->name);
- return IRQ_HANDLED;
- default:
- spin_unlock_irqrestore(&card->lock, flags);
- printk(KERN_ERR "%s: b1_interrupt: 0x%x ???\n",
- card->name, b1cmd);
- return IRQ_NONE;
- }
- }
- return IRQ_HANDLED;
-}
-
-/* ------------------------------------------------------------- */
-
-static int t1isa_load_firmware(struct capi_ctr *ctrl, capiloaddata *data)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- unsigned int port = card->port;
- unsigned long flags;
- int retval;
-
- t1_disable_irq(port);
- b1_reset(port);
-
- if ((retval = b1_load_t4file(card, &data->firmware))) {
- b1_reset(port);
- printk(KERN_ERR "%s: failed to load t4file!!\n",
- card->name);
- return retval;
- }
-
- if (data->configuration.len > 0 && data->configuration.data) {
- if ((retval = b1_load_config(card, &data->configuration))) {
- b1_reset(port);
- printk(KERN_ERR "%s: failed to load config!!\n",
- card->name);
- return retval;
- }
- }
-
- if (!b1_loaded(card)) {
- printk(KERN_ERR "%s: failed to load t4file.\n", card->name);
- return -EIO;
- }
-
- spin_lock_irqsave(&card->lock, flags);
- b1_setinterrupt(port, card->irq, card->cardtype);
- b1_put_byte(port, SEND_INIT);
- b1_put_word(port, CAPI_MAXAPPL);
- b1_put_word(port, AVM_NCCI_PER_CHANNEL * 30);
- b1_put_word(port, ctrl->cnr - 1);
- spin_unlock_irqrestore(&card->lock, flags);
-
- return 0;
-}
-
-static void t1isa_reset_ctr(struct capi_ctr *ctrl)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- unsigned int port = card->port;
- unsigned long flags;
-
- t1_disable_irq(port);
- b1_reset(port);
- b1_reset(port);
-
- memset(cinfo->version, 0, sizeof(cinfo->version));
- spin_lock_irqsave(&card->lock, flags);
- capilib_release(&cinfo->ncci_head);
- spin_unlock_irqrestore(&card->lock, flags);
- capi_ctr_down(ctrl);
-}
-
-static void t1isa_remove(struct pci_dev *pdev)
-{
- avmctrl_info *cinfo = pci_get_drvdata(pdev);
- avmcard *card;
-
- if (!cinfo)
- return;
-
- card = cinfo->card;
-
- t1_disable_irq(card->port);
- b1_reset(card->port);
- b1_reset(card->port);
- t1_reset(card->port);
-
- detach_capi_ctr(&cinfo->capi_ctrl);
- free_irq(card->irq, card);
- release_region(card->port, AVMB1_PORTLEN);
- b1_free_card(card);
-}
-
-/* ------------------------------------------------------------- */
-
-static u16 t1isa_send_message(struct capi_ctr *ctrl, struct sk_buff *skb);
-static char *t1isa_procinfo(struct capi_ctr *ctrl);
-
-static int t1isa_probe(struct pci_dev *pdev, int cardnr)
-{
- avmctrl_info *cinfo;
- avmcard *card;
- int retval;
-
- card = b1_alloc_card(1);
- if (!card) {
- printk(KERN_WARNING "t1isa: no memory.\n");
- retval = -ENOMEM;
- goto err;
- }
-
- cinfo = card->ctrlinfo;
- card->port = pci_resource_start(pdev, 0);
- card->irq = pdev->irq;
- card->cardtype = avm_t1isa;
- card->cardnr = cardnr;
- sprintf(card->name, "t1isa-%x", card->port);
-
- if (!(((card->port & 0x7) == 0) && ((card->port & 0x30) != 0x30))) {
- printk(KERN_WARNING "t1isa: invalid port 0x%x.\n", card->port);
- retval = -EINVAL;
- goto err_free;
- }
- if (hema_irq_table[card->irq & 0xf] == 0) {
- printk(KERN_WARNING "t1isa: irq %d not valid.\n", card->irq);
- retval = -EINVAL;
- goto err_free;
- }
- if (!request_region(card->port, AVMB1_PORTLEN, card->name)) {
- printk(KERN_INFO "t1isa: ports 0x%03x-0x%03x in use.\n",
- card->port, card->port + AVMB1_PORTLEN);
- retval = -EBUSY;
- goto err_free;
- }
- retval = request_irq(card->irq, t1isa_interrupt, 0, card->name, card);
- if (retval) {
- printk(KERN_INFO "t1isa: unable to get IRQ %d.\n", card->irq);
- retval = -EBUSY;
- goto err_release_region;
- }
-
- if ((retval = t1_detectandinit(card->port, card->irq, card->cardnr)) != 0) {
- printk(KERN_INFO "t1isa: NO card at 0x%x (%d)\n",
- card->port, retval);
- retval = -ENODEV;
- goto err_free_irq;
- }
- t1_disable_irq(card->port);
- b1_reset(card->port);
-
- cinfo->capi_ctrl.owner = THIS_MODULE;
- cinfo->capi_ctrl.driver_name = "t1isa";
- cinfo->capi_ctrl.driverdata = cinfo;
- cinfo->capi_ctrl.register_appl = b1_register_appl;
- cinfo->capi_ctrl.release_appl = b1_release_appl;
- cinfo->capi_ctrl.send_message = t1isa_send_message;
- cinfo->capi_ctrl.load_firmware = t1isa_load_firmware;
- cinfo->capi_ctrl.reset_ctr = t1isa_reset_ctr;
- cinfo->capi_ctrl.procinfo = t1isa_procinfo;
- cinfo->capi_ctrl.proc_show = b1_proc_show;
- strcpy(cinfo->capi_ctrl.name, card->name);
-
- retval = attach_capi_ctr(&cinfo->capi_ctrl);
- if (retval) {
- printk(KERN_INFO "t1isa: attach controller failed.\n");
- goto err_free_irq;
- }
-
- printk(KERN_INFO "t1isa: AVM T1 ISA at i/o %#x, irq %d, card %d\n",
- card->port, card->irq, card->cardnr);
-
- pci_set_drvdata(pdev, cinfo);
- return 0;
-
-err_free_irq:
- free_irq(card->irq, card);
-err_release_region:
- release_region(card->port, AVMB1_PORTLEN);
-err_free:
- b1_free_card(card);
-err:
- return retval;
-}
-
-static u16 t1isa_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
- avmcard *card = cinfo->card;
- unsigned int port = card->port;
- unsigned long flags;
- u16 len = CAPIMSG_LEN(skb->data);
- u8 cmd = CAPIMSG_COMMAND(skb->data);
- u8 subcmd = CAPIMSG_SUBCOMMAND(skb->data);
- u16 dlen, retval;
-
- spin_lock_irqsave(&card->lock, flags);
- if (CAPICMD(cmd, subcmd) == CAPI_DATA_B3_REQ) {
- retval = capilib_data_b3_req(&cinfo->ncci_head,
- CAPIMSG_APPID(skb->data),
- CAPIMSG_NCCI(skb->data),
- CAPIMSG_MSGID(skb->data));
- if (retval != CAPI_NOERROR) {
- spin_unlock_irqrestore(&card->lock, flags);
- return retval;
- }
- dlen = CAPIMSG_DATALEN(skb->data);
-
- b1_put_byte(port, SEND_DATA_B3_REQ);
- t1_put_slice(port, skb->data, len);
- t1_put_slice(port, skb->data + len, dlen);
- } else {
- b1_put_byte(port, SEND_MESSAGE);
- t1_put_slice(port, skb->data, len);
- }
- spin_unlock_irqrestore(&card->lock, flags);
- dev_kfree_skb_any(skb);
- return CAPI_NOERROR;
-}
-/* ------------------------------------------------------------- */
-
-static char *t1isa_procinfo(struct capi_ctr *ctrl)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-
- if (!cinfo)
- return "";
- sprintf(cinfo->infobuf, "%s %s 0x%x %d %d",
- cinfo->cardname[0] ? cinfo->cardname : "-",
- cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
- cinfo->card ? cinfo->card->port : 0x0,
- cinfo->card ? cinfo->card->irq : 0,
- cinfo->card ? cinfo->card->cardnr : 0
- );
- return cinfo->infobuf;
-}
-
-
-/* ------------------------------------------------------------- */
-
-#define MAX_CARDS 4
-static struct pci_dev isa_dev[MAX_CARDS];
-static int io[MAX_CARDS];
-static int irq[MAX_CARDS];
-static int cardnr[MAX_CARDS];
-
-module_param_hw_array(io, int, ioport, NULL, 0);
-module_param_hw_array(irq, int, irq, NULL, 0);
-module_param_array(cardnr, int, NULL, 0);
-MODULE_PARM_DESC(io, "I/O base address(es)");
-MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)");
-MODULE_PARM_DESC(cardnr, "Card number(s) (as jumpered)");
-
-static int t1isa_add_card(struct capi_driver *driver, capicardparams *data)
-{
- int i;
-
- for (i = 0; i < MAX_CARDS; i++) {
- if (isa_dev[i].resource[0].start)
- continue;
-
- isa_dev[i].resource[0].start = data->port;
- isa_dev[i].irq = data->irq;
-
- if (t1isa_probe(&isa_dev[i], data->cardnr) == 0)
- return 0;
- }
- return -ENODEV;
-}
-
-static struct capi_driver capi_driver_t1isa = {
- .name = "t1isa",
- .revision = "1.0",
- .add_card = t1isa_add_card,
-};
-
-static int __init t1isa_init(void)
-{
- char rev[32];
- char *p;
- int i;
-
- if ((p = strchr(revision, ':')) != NULL && p[1]) {
- strlcpy(rev, p + 2, 32);
- if ((p = strchr(rev, '$')) != NULL && p > rev)
- *(p - 1) = 0;
- } else
- strcpy(rev, "1.0");
-
- for (i = 0; i < MAX_CARDS; i++) {
- if (!io[i])
- break;
-
- isa_dev[i].resource[0].start = io[i];
- isa_dev[i].irq = irq[i];
-
- if (t1isa_probe(&isa_dev[i], cardnr[i]) != 0)
- return -ENODEV;
- }
-
- strlcpy(capi_driver_t1isa.revision, rev, 32);
- register_capi_driver(&capi_driver_t1isa);
- printk(KERN_INFO "t1isa: revision %s\n", rev);
-
- return 0;
-}
-
-static void __exit t1isa_exit(void)
-{
- int i;
-
- unregister_capi_driver(&capi_driver_t1isa);
- for (i = 0; i < MAX_CARDS; i++) {
- if (!io[i])
- break;
-
- t1isa_remove(&isa_dev[i]);
- }
-}
-
-module_init(t1isa_init);
-module_exit(t1isa_exit);
diff --git a/drivers/staging/isdn/avm/t1pci.c b/drivers/staging/isdn/avm/t1pci.c
deleted file mode 100644
index f5ed1d5004c9..000000000000
--- a/drivers/staging/isdn/avm/t1pci.c
+++ /dev/null
@@ -1,259 +0,0 @@
-/* $Id: t1pci.c,v 1.1.2.2 2004/01/16 21:09:27 keil Exp $
- *
- * Module for AVM T1 PCI-card.
- *
- * Copyright 1999 by Carsten Paeth <calle@calle.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <linux/capi.h>
-#include <linux/init.h>
-#include <asm/io.h>
-#include <linux/isdn/capicmd.h>
-#include <linux/isdn/capiutil.h>
-#include <linux/isdn/capilli.h>
-#include "avmcard.h"
-
-#undef CONFIG_T1PCI_DEBUG
-#undef CONFIG_T1PCI_POLLDEBUG
-
-/* ------------------------------------------------------------- */
-static char *revision = "$Revision: 1.1.2.2 $";
-/* ------------------------------------------------------------- */
-
-static struct pci_device_id t1pci_pci_tbl[] = {
- { PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_T1, PCI_ANY_ID, PCI_ANY_ID },
- { } /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE(pci, t1pci_pci_tbl);
-MODULE_DESCRIPTION("CAPI4Linux: Driver for AVM T1 PCI card");
-MODULE_AUTHOR("Carsten Paeth");
-MODULE_LICENSE("GPL");
-
-/* ------------------------------------------------------------- */
-
-static char *t1pci_procinfo(struct capi_ctr *ctrl);
-
-static int t1pci_add_card(struct capicardparams *p, struct pci_dev *pdev)
-{
- avmcard *card;
- avmctrl_info *cinfo;
- int retval;
-
- card = b1_alloc_card(1);
- if (!card) {
- printk(KERN_WARNING "t1pci: no memory.\n");
- retval = -ENOMEM;
- goto err;
- }
-
- card->dma = avmcard_dma_alloc("t1pci", pdev, 2048 + 128, 2048 + 128);
- if (!card->dma) {
- printk(KERN_WARNING "t1pci: no memory.\n");
- retval = -ENOMEM;
- goto err_free;
- }
-
- cinfo = card->ctrlinfo;
- sprintf(card->name, "t1pci-%x", p->port);
- card->port = p->port;
- card->irq = p->irq;
- card->membase = p->membase;
- card->cardtype = avm_t1pci;
-
- if (!request_region(card->port, AVMB1_PORTLEN, card->name)) {
- printk(KERN_WARNING "t1pci: ports 0x%03x-0x%03x in use.\n",
- card->port, card->port + AVMB1_PORTLEN);
- retval = -EBUSY;
- goto err_free_dma;
- }
-
- card->mbase = ioremap(card->membase, 64);
- if (!card->mbase) {
- printk(KERN_NOTICE "t1pci: can't remap memory at 0x%lx\n",
- card->membase);
- retval = -EIO;
- goto err_release_region;
- }
-
- b1dma_reset(card);
-
- retval = t1pci_detect(card);
- if (retval != 0) {
- if (retval < 6)
- printk(KERN_NOTICE "t1pci: NO card at 0x%x (%d)\n",
- card->port, retval);
- else
- printk(KERN_NOTICE "t1pci: card at 0x%x, but cable not connected or T1 has no power (%d)\n",
- card->port, retval);
- retval = -EIO;
- goto err_unmap;
- }
- b1dma_reset(card);
-
- retval = request_irq(card->irq, b1dma_interrupt, IRQF_SHARED, card->name, card);
- if (retval) {
- printk(KERN_ERR "t1pci: unable to get IRQ %d.\n", card->irq);
- retval = -EBUSY;
- goto err_unmap;
- }
-
- cinfo->capi_ctrl.owner = THIS_MODULE;
- cinfo->capi_ctrl.driver_name = "t1pci";
- cinfo->capi_ctrl.driverdata = cinfo;
- cinfo->capi_ctrl.register_appl = b1dma_register_appl;
- cinfo->capi_ctrl.release_appl = b1dma_release_appl;
- cinfo->capi_ctrl.send_message = b1dma_send_message;
- cinfo->capi_ctrl.load_firmware = b1dma_load_firmware;
- cinfo->capi_ctrl.reset_ctr = b1dma_reset_ctr;
- cinfo->capi_ctrl.procinfo = t1pci_procinfo;
- cinfo->capi_ctrl.proc_show = b1dma_proc_show;
- strcpy(cinfo->capi_ctrl.name, card->name);
-
- retval = attach_capi_ctr(&cinfo->capi_ctrl);
- if (retval) {
- printk(KERN_ERR "t1pci: attach controller failed.\n");
- retval = -EBUSY;
- goto err_free_irq;
- }
- card->cardnr = cinfo->capi_ctrl.cnr;
-
- printk(KERN_INFO "t1pci: AVM T1 PCI at i/o %#x, irq %d, mem %#lx\n",
- card->port, card->irq, card->membase);
-
- pci_set_drvdata(pdev, card);
- return 0;
-
-err_free_irq:
- free_irq(card->irq, card);
-err_unmap:
- iounmap(card->mbase);
-err_release_region:
- release_region(card->port, AVMB1_PORTLEN);
-err_free_dma:
- avmcard_dma_free(card->dma);
-err_free:
- b1_free_card(card);
-err:
- return retval;
-}
-
-/* ------------------------------------------------------------- */
-
-static void t1pci_remove(struct pci_dev *pdev)
-{
- avmcard *card = pci_get_drvdata(pdev);
- avmctrl_info *cinfo = card->ctrlinfo;
-
- b1dma_reset(card);
-
- detach_capi_ctr(&cinfo->capi_ctrl);
- free_irq(card->irq, card);
- iounmap(card->mbase);
- release_region(card->port, AVMB1_PORTLEN);
- avmcard_dma_free(card->dma);
- b1_free_card(card);
-}
-
-/* ------------------------------------------------------------- */
-
-static char *t1pci_procinfo(struct capi_ctr *ctrl)
-{
- avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
-
- if (!cinfo)
- return "";
- sprintf(cinfo->infobuf, "%s %s 0x%x %d 0x%lx",
- cinfo->cardname[0] ? cinfo->cardname : "-",
- cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
- cinfo->card ? cinfo->card->port : 0x0,
- cinfo->card ? cinfo->card->irq : 0,
- cinfo->card ? cinfo->card->membase : 0
- );
- return cinfo->infobuf;
-}
-
-/* ------------------------------------------------------------- */
-
-static int t1pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
-{
- struct capicardparams param;
- int retval;
-
- if (pci_enable_device(dev) < 0) {
- printk(KERN_ERR "t1pci: failed to enable AVM-T1-PCI\n");
- return -ENODEV;
- }
- pci_set_master(dev);
-
- param.port = pci_resource_start(dev, 1);
- param.irq = dev->irq;
- param.membase = pci_resource_start(dev, 0);
-
- printk(KERN_INFO "t1pci: PCI BIOS reports AVM-T1-PCI at i/o %#x, irq %d, mem %#x\n",
- param.port, param.irq, param.membase);
-
- retval = t1pci_add_card(&param, dev);
- if (retval != 0) {
- printk(KERN_ERR "t1pci: no AVM-T1-PCI at i/o %#x, irq %d detected, mem %#x\n",
- param.port, param.irq, param.membase);
- pci_disable_device(dev);
- return -ENODEV;
- }
- return 0;
-}
-
-static struct pci_driver t1pci_pci_driver = {
- .name = "t1pci",
- .id_table = t1pci_pci_tbl,
- .probe = t1pci_probe,
- .remove = t1pci_remove,
-};
-
-static struct capi_driver capi_driver_t1pci = {
- .name = "t1pci",
- .revision = "1.0",
-};
-
-static int __init t1pci_init(void)
-{
- char *p;
- char rev[32];
- int err;
-
- if ((p = strchr(revision, ':')) != NULL && p[1]) {
- strlcpy(rev, p + 2, 32);
- if ((p = strchr(rev, '$')) != NULL && p > rev)
- *(p - 1) = 0;
- } else
- strcpy(rev, "1.0");
-
- err = pci_register_driver(&t1pci_pci_driver);
- if (!err) {
- strlcpy(capi_driver_t1pci.revision, rev, 32);
- register_capi_driver(&capi_driver_t1pci);
- printk(KERN_INFO "t1pci: revision %s\n", rev);
- }
- return err;
-}
-
-static void __exit t1pci_exit(void)
-{
- unregister_capi_driver(&capi_driver_t1pci);
- pci_unregister_driver(&t1pci_pci_driver);
-}
-
-module_init(t1pci_init);
-module_exit(t1pci_exit);
diff --git a/drivers/staging/isdn/gigaset/Kconfig b/drivers/staging/isdn/gigaset/Kconfig
deleted file mode 100644
index c593105b3600..000000000000
--- a/drivers/staging/isdn/gigaset/Kconfig
+++ /dev/null
@@ -1,62 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-menuconfig ISDN_DRV_GIGASET
- tristate "Siemens Gigaset support"
- depends on TTY
- select CRC_CCITT
- select BITREVERSE
- help
- This driver supports the Siemens Gigaset SX205/255 family of
- ISDN DECT bases, including the predecessors Gigaset 3070/3075
- and 4170/4175 and their T-Com versions Sinus 45isdn and Sinus
- 721X.
- If you have one of these devices, say M here and for at least
- one of the connection specific parts that follow.
- This will build a module called "gigaset".
- Note: If you build your ISDN subsystem (ISDN_CAPI or ISDN_I4L)
- as a module, you have to build this driver as a module too,
- otherwise the Gigaset device won't show up as an ISDN device.
-
-if ISDN_DRV_GIGASET
-
-config GIGASET_CAPI
- bool "Gigaset CAPI support"
- depends on ISDN_CAPI='y'||(ISDN_CAPI='m'&&ISDN_DRV_GIGASET='m')
- default 'y'
- help
- Build the Gigaset driver as a CAPI 2.0 driver interfacing with
- the Kernel CAPI subsystem. To use it with the old ISDN4Linux
- subsystem you'll have to enable the capidrv glue driver.
- (select ISDN_CAPI_CAPIDRV.)
- Say N to build the old native ISDN4Linux variant.
- If unsure, say Y.
-
-config GIGASET_BASE
- tristate "Gigaset base station support"
- depends on USB
- help
- Say M here if you want to use the USB interface of the Gigaset
- base for connection to your system.
- This will build a module called "bas_gigaset".
-
-config GIGASET_M105
- tristate "Gigaset M105 support"
- depends on USB
- help
- Say M here if you want to connect to the Gigaset base via DECT
- using a Gigaset M105 (Sinus 45 Data 2) USB DECT device.
- This will build a module called "usb_gigaset".
-
-config GIGASET_M101
- tristate "Gigaset M101 support"
- help
- Say M here if you want to connect to the Gigaset base via DECT
- using a Gigaset M101 (Sinus 45 Data 1) RS232 DECT device.
- This will build a module called "ser_gigaset".
-
-config GIGASET_DEBUG
- bool "Gigaset debugging"
- help
- This enables debugging code in the Gigaset drivers.
- If in doubt, say yes.
-
-endif # ISDN_DRV_GIGASET
diff --git a/drivers/staging/isdn/gigaset/Makefile b/drivers/staging/isdn/gigaset/Makefile
deleted file mode 100644
index 9c010891dcd7..000000000000
--- a/drivers/staging/isdn/gigaset/Makefile
+++ /dev/null
@@ -1,17 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-gigaset-y := common.o interface.o proc.o ev-layer.o asyncdata.o
-
-ifdef CONFIG_GIGASET_CAPI
-gigaset-y += capi.o
-else
-gigaset-y += dummyll.o
-endif
-
-usb_gigaset-y := usb-gigaset.o
-ser_gigaset-y := ser-gigaset.o
-bas_gigaset-y := bas-gigaset.o isocdata.o
-
-obj-$(CONFIG_ISDN_DRV_GIGASET) += gigaset.o
-obj-$(CONFIG_GIGASET_M105) += usb_gigaset.o
-obj-$(CONFIG_GIGASET_BASE) += bas_gigaset.o
-obj-$(CONFIG_GIGASET_M101) += ser_gigaset.o
diff --git a/drivers/staging/isdn/gigaset/asyncdata.c b/drivers/staging/isdn/gigaset/asyncdata.c
deleted file mode 100644
index a34b3c9d8a71..000000000000
--- a/drivers/staging/isdn/gigaset/asyncdata.c
+++ /dev/null
@@ -1,606 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Common data handling layer for ser_gigaset and usb_gigaset
- *
- * Copyright (c) 2005 by Tilman Schmidt <tilman@imap.cc>,
- * Hansjoerg Lipp <hjlipp@web.de>,
- * Stefan Eilers.
- *
- * =====================================================================
- * =====================================================================
- */
-
-#include "gigaset.h"
-#include <linux/crc-ccitt.h>
-#include <linux/bitrev.h>
-#include <linux/export.h>
-
-/* check if byte must be stuffed/escaped
- * I'm not sure which data should be encoded.
- * Therefore I will go the hard way and encode every value
- * less than 0x20, the flag sequence and the control escape char.
- */
-static inline int muststuff(unsigned char c)
-{
- if (c < PPP_TRANS) return 1;
- if (c == PPP_FLAG) return 1;
- if (c == PPP_ESCAPE) return 1;
- /* other possible candidates: */
- /* 0x91: XON with parity set */
- /* 0x93: XOFF with parity set */
- return 0;
-}
-
-/* == data input =========================================================== */
-
-/* process a block of received bytes in command mode
- * (mstate != MS_LOCKED && (inputstate & INS_command))
- * Append received bytes to the command response buffer and forward them
- * line by line to the response handler. Exit whenever a mode/state change
- * might have occurred.
- * Note: Received lines may be terminated by CR, LF, or CR LF, which will be
- * removed before passing the line to the response handler.
- * Return value:
- * number of processed bytes
- */
-static unsigned cmd_loop(unsigned numbytes, struct inbuf_t *inbuf)
-{
- unsigned char *src = inbuf->data + inbuf->head;
- struct cardstate *cs = inbuf->cs;
- unsigned cbytes = cs->cbytes;
- unsigned procbytes = 0;
- unsigned char c;
-
- while (procbytes < numbytes) {
- c = *src++;
- procbytes++;
-
- switch (c) {
- case '\n':
- if (cbytes == 0 && cs->respdata[0] == '\r') {
- /* collapse LF with preceding CR */
- cs->respdata[0] = 0;
- break;
- }
- /* fall through */
- case '\r':
- /* end of message line, pass to response handler */
- if (cbytes >= MAX_RESP_SIZE) {
- dev_warn(cs->dev, "response too large (%d)\n",
- cbytes);
- cbytes = MAX_RESP_SIZE;
- }
- cs->cbytes = cbytes;
- gigaset_dbg_buffer(DEBUG_TRANSCMD, "received response",
- cbytes, cs->respdata);
- gigaset_handle_modem_response(cs);
- cbytes = 0;
-
- /* store EOL byte for CRLF collapsing */
- cs->respdata[0] = c;
-
- /* cs->dle may have changed */
- if (cs->dle && !(inbuf->inputstate & INS_DLE_command))
- inbuf->inputstate &= ~INS_command;
-
- /* return for reevaluating state */
- goto exit;
-
- case DLE_FLAG:
- if (inbuf->inputstate & INS_DLE_char) {
- /* quoted DLE: clear quote flag */
- inbuf->inputstate &= ~INS_DLE_char;
- } else if (cs->dle ||
- (inbuf->inputstate & INS_DLE_command)) {
- /* DLE escape, pass up for handling */
- inbuf->inputstate |= INS_DLE_char;
- goto exit;
- }
- /* quoted or not in DLE mode: treat as regular data */
- /* fall through */
- default:
- /* append to line buffer if possible */
- if (cbytes < MAX_RESP_SIZE)
- cs->respdata[cbytes] = c;
- cbytes++;
- }
- }
-exit:
- cs->cbytes = cbytes;
- return procbytes;
-}
-
-/* process a block of received bytes in lock mode
- * All received bytes are passed unmodified to the tty i/f.
- * Return value:
- * number of processed bytes
- */
-static unsigned lock_loop(unsigned numbytes, struct inbuf_t *inbuf)
-{
- unsigned char *src = inbuf->data + inbuf->head;
-
- gigaset_dbg_buffer(DEBUG_LOCKCMD, "received response", numbytes, src);
- gigaset_if_receive(inbuf->cs, src, numbytes);
- return numbytes;
-}
-
-/* process a block of received bytes in HDLC data mode
- * (mstate != MS_LOCKED && !(inputstate & INS_command) && proto2 == L2_HDLC)
- * Collect HDLC frames, undoing byte stuffing and watching for DLE escapes.
- * When a frame is complete, check the FCS and pass valid frames to the LL.
- * If DLE is encountered, return immediately to let the caller handle it.
- * Return value:
- * number of processed bytes
- */
-static unsigned hdlc_loop(unsigned numbytes, struct inbuf_t *inbuf)
-{
- struct cardstate *cs = inbuf->cs;
- struct bc_state *bcs = cs->bcs;
- int inputstate = bcs->inputstate;
- __u16 fcs = bcs->rx_fcs;
- struct sk_buff *skb = bcs->rx_skb;
- unsigned char *src = inbuf->data + inbuf->head;
- unsigned procbytes = 0;
- unsigned char c;
-
- if (inputstate & INS_byte_stuff) {
- if (!numbytes)
- return 0;
- inputstate &= ~INS_byte_stuff;
- goto byte_stuff;
- }
-
- while (procbytes < numbytes) {
- c = *src++;
- procbytes++;
- if (c == DLE_FLAG) {
- if (inputstate & INS_DLE_char) {
- /* quoted DLE: clear quote flag */
- inputstate &= ~INS_DLE_char;
- } else if (cs->dle || (inputstate & INS_DLE_command)) {
- /* DLE escape, pass up for handling */
- inputstate |= INS_DLE_char;
- break;
- }
- }
-
- if (c == PPP_ESCAPE) {
- /* byte stuffing indicator: pull in next byte */
- if (procbytes >= numbytes) {
- /* end of buffer, save for later processing */
- inputstate |= INS_byte_stuff;
- break;
- }
-byte_stuff:
- c = *src++;
- procbytes++;
- if (c == DLE_FLAG) {
- if (inputstate & INS_DLE_char) {
- /* quoted DLE: clear quote flag */
- inputstate &= ~INS_DLE_char;
- } else if (cs->dle ||
- (inputstate & INS_DLE_command)) {
- /* DLE escape, pass up for handling */
- inputstate |=
- INS_DLE_char | INS_byte_stuff;
- break;
- }
- }
- c ^= PPP_TRANS;
-#ifdef CONFIG_GIGASET_DEBUG
- if (!muststuff(c))
- gig_dbg(DEBUG_HDLC, "byte stuffed: 0x%02x", c);
-#endif
- } else if (c == PPP_FLAG) {
- /* end of frame: process content if any */
- if (inputstate & INS_have_data) {
- gig_dbg(DEBUG_HDLC,
- "7e----------------------------");
-
- /* check and pass received frame */
- if (!skb) {
- /* skipped frame */
- gigaset_isdn_rcv_err(bcs);
- } else if (skb->len < 2) {
- /* frame too short for FCS */
- dev_warn(cs->dev,
- "short frame (%d)\n",
- skb->len);
- gigaset_isdn_rcv_err(bcs);
- dev_kfree_skb_any(skb);
- } else if (fcs != PPP_GOODFCS) {
- /* frame check error */
- dev_err(cs->dev,
- "Checksum failed, %u bytes corrupted!\n",
- skb->len);
- gigaset_isdn_rcv_err(bcs);
- dev_kfree_skb_any(skb);
- } else {
- /* good frame */
- __skb_trim(skb, skb->len - 2);
- gigaset_skb_rcvd(bcs, skb);
- }
-
- /* prepare reception of next frame */
- inputstate &= ~INS_have_data;
- skb = gigaset_new_rx_skb(bcs);
- } else {
- /* empty frame (7E 7E) */
-#ifdef CONFIG_GIGASET_DEBUG
- ++bcs->emptycount;
-#endif
- if (!skb) {
- /* skipped (?) */
- gigaset_isdn_rcv_err(bcs);
- skb = gigaset_new_rx_skb(bcs);
- }
- }
-
- fcs = PPP_INITFCS;
- continue;
-#ifdef CONFIG_GIGASET_DEBUG
- } else if (muststuff(c)) {
- /* Should not happen. Possible after ZDLE=1<CR><LF>. */
- gig_dbg(DEBUG_HDLC, "not byte stuffed: 0x%02x", c);
-#endif
- }
-
- /* regular data byte, append to skb */
-#ifdef CONFIG_GIGASET_DEBUG
- if (!(inputstate & INS_have_data)) {
- gig_dbg(DEBUG_HDLC, "7e (%d x) ================",
- bcs->emptycount);
- bcs->emptycount = 0;
- }
-#endif
- inputstate |= INS_have_data;
- if (skb) {
- if (skb->len >= bcs->rx_bufsize) {
- dev_warn(cs->dev, "received packet too long\n");
- dev_kfree_skb_any(skb);
- /* skip remainder of packet */
- bcs->rx_skb = skb = NULL;
- } else {
- __skb_put_u8(skb, c);
- fcs = crc_ccitt_byte(fcs, c);
- }
- }
- }
-
- bcs->inputstate = inputstate;
- bcs->rx_fcs = fcs;
- return procbytes;
-}
-
-/* process a block of received bytes in transparent data mode
- * (mstate != MS_LOCKED && !(inputstate & INS_command) && proto2 != L2_HDLC)
- * Invert bytes, undoing byte stuffing and watching for DLE escapes.
- * If DLE is encountered, return immediately to let the caller handle it.
- * Return value:
- * number of processed bytes
- */
-static unsigned iraw_loop(unsigned numbytes, struct inbuf_t *inbuf)
-{
- struct cardstate *cs = inbuf->cs;
- struct bc_state *bcs = cs->bcs;
- int inputstate = bcs->inputstate;
- struct sk_buff *skb = bcs->rx_skb;
- unsigned char *src = inbuf->data + inbuf->head;
- unsigned procbytes = 0;
- unsigned char c;
-
- if (!skb) {
- /* skip this block */
- gigaset_new_rx_skb(bcs);
- return numbytes;
- }
-
- while (procbytes < numbytes && skb->len < bcs->rx_bufsize) {
- c = *src++;
- procbytes++;
-
- if (c == DLE_FLAG) {
- if (inputstate & INS_DLE_char) {
- /* quoted DLE: clear quote flag */
- inputstate &= ~INS_DLE_char;
- } else if (cs->dle || (inputstate & INS_DLE_command)) {
- /* DLE escape, pass up for handling */
- inputstate |= INS_DLE_char;
- break;
- }
- }
-
- /* regular data byte: append to current skb */
- inputstate |= INS_have_data;
- __skb_put_u8(skb, bitrev8(c));
- }
-
- /* pass data up */
- if (inputstate & INS_have_data) {
- gigaset_skb_rcvd(bcs, skb);
- inputstate &= ~INS_have_data;
- gigaset_new_rx_skb(bcs);
- }
-
- bcs->inputstate = inputstate;
- return procbytes;
-}
-
-/* process DLE escapes
- * Called whenever a DLE sequence might be encountered in the input stream.
- * Either processes the entire DLE sequence or, if that isn't possible,
- * notes the fact that an initial DLE has been received in the INS_DLE_char
- * inputstate flag and resumes processing of the sequence on the next call.
- */
-static void handle_dle(struct inbuf_t *inbuf)
-{
- struct cardstate *cs = inbuf->cs;
-
- if (cs->mstate == MS_LOCKED)
- return; /* no DLE processing in lock mode */
-
- if (!(inbuf->inputstate & INS_DLE_char)) {
- /* no DLE pending */
- if (inbuf->data[inbuf->head] == DLE_FLAG &&
- (cs->dle || inbuf->inputstate & INS_DLE_command)) {
- /* start of DLE sequence */
- inbuf->head++;
- if (inbuf->head == inbuf->tail ||
- inbuf->head == RBUFSIZE) {
- /* end of buffer, save for later processing */
- inbuf->inputstate |= INS_DLE_char;
- return;
- }
- } else {
- /* regular data byte */
- return;
- }
- }
-
- /* consume pending DLE */
- inbuf->inputstate &= ~INS_DLE_char;
-
- switch (inbuf->data[inbuf->head]) {
- case 'X': /* begin of event message */
- if (inbuf->inputstate & INS_command)
- dev_notice(cs->dev,
- "received <DLE>X in command mode\n");
- inbuf->inputstate |= INS_command | INS_DLE_command;
- inbuf->head++; /* byte consumed */
- break;
- case '.': /* end of event message */
- if (!(inbuf->inputstate & INS_DLE_command))
- dev_notice(cs->dev,
- "received <DLE>. without <DLE>X\n");
- inbuf->inputstate &= ~INS_DLE_command;
- /* return to data mode if in DLE mode */
- if (cs->dle)
- inbuf->inputstate &= ~INS_command;
- inbuf->head++; /* byte consumed */
- break;
- case DLE_FLAG: /* DLE in data stream */
- /* mark as quoted */
- inbuf->inputstate |= INS_DLE_char;
- if (!(cs->dle || inbuf->inputstate & INS_DLE_command))
- dev_notice(cs->dev,
- "received <DLE><DLE> not in DLE mode\n");
- break; /* quoted byte left in buffer */
- default:
- dev_notice(cs->dev, "received <DLE><%02x>\n",
- inbuf->data[inbuf->head]);
- /* quoted byte left in buffer */
- }
-}
-
-/**
- * gigaset_m10x_input() - process a block of data received from the device
- * @inbuf: received data and device descriptor structure.
- *
- * Called by hardware module {ser,usb}_gigaset with a block of received
- * bytes. Separates the bytes received over the serial data channel into
- * user data and command replies (locked/unlocked) according to the
- * current state of the interface.
- */
-void gigaset_m10x_input(struct inbuf_t *inbuf)
-{
- struct cardstate *cs = inbuf->cs;
- unsigned numbytes, procbytes;
-
- gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", inbuf->head, inbuf->tail);
-
- while (inbuf->head != inbuf->tail) {
- /* check for DLE escape */
- handle_dle(inbuf);
-
- /* process a contiguous block of bytes */
- numbytes = (inbuf->head > inbuf->tail ?
- RBUFSIZE : inbuf->tail) - inbuf->head;
- gig_dbg(DEBUG_INTR, "processing %u bytes", numbytes);
- /*
- * numbytes may be 0 if handle_dle() ate the last byte.
- * This does no harm, *_loop() will just return 0 immediately.
- */
-
- if (cs->mstate == MS_LOCKED)
- procbytes = lock_loop(numbytes, inbuf);
- else if (inbuf->inputstate & INS_command)
- procbytes = cmd_loop(numbytes, inbuf);
- else if (cs->bcs->proto2 == L2_HDLC)
- procbytes = hdlc_loop(numbytes, inbuf);
- else
- procbytes = iraw_loop(numbytes, inbuf);
- inbuf->head += procbytes;
-
- /* check for buffer wraparound */
- if (inbuf->head >= RBUFSIZE)
- inbuf->head = 0;
-
- gig_dbg(DEBUG_INTR, "head set to %u", inbuf->head);
- }
-}
-EXPORT_SYMBOL_GPL(gigaset_m10x_input);
-
-
-/* == data output ========================================================== */
-
-/*
- * Encode a data packet into an octet stuffed HDLC frame with FCS,
- * opening and closing flags, preserving headroom data.
- * parameters:
- * skb skb containing original packet (freed upon return)
- * Return value:
- * pointer to newly allocated skb containing the result frame
- * and the original link layer header, NULL on error
- */
-static struct sk_buff *HDLC_Encode(struct sk_buff *skb)
-{
- struct sk_buff *hdlc_skb;
- __u16 fcs;
- unsigned char c;
- unsigned char *cp;
- int len;
- unsigned int stuf_cnt;
-
- stuf_cnt = 0;
- fcs = PPP_INITFCS;
- cp = skb->data;
- len = skb->len;
- while (len--) {
- if (muststuff(*cp))
- stuf_cnt++;
- fcs = crc_ccitt_byte(fcs, *cp++);
- }
- fcs ^= 0xffff; /* complement */
-
- /* size of new buffer: original size + number of stuffing bytes
- * + 2 bytes FCS + 2 stuffing bytes for FCS (if needed) + 2 flag bytes
- * + room for link layer header
- */
- hdlc_skb = dev_alloc_skb(skb->len + stuf_cnt + 6 + skb->mac_len);
- if (!hdlc_skb) {
- dev_kfree_skb_any(skb);
- return NULL;
- }
-
- /* Copy link layer header into new skb */
- skb_reset_mac_header(hdlc_skb);
- skb_reserve(hdlc_skb, skb->mac_len);
- memcpy(skb_mac_header(hdlc_skb), skb_mac_header(skb), skb->mac_len);
- hdlc_skb->mac_len = skb->mac_len;
-
- /* Add flag sequence in front of everything.. */
- skb_put_u8(hdlc_skb, PPP_FLAG);
-
- /* Perform byte stuffing while copying data. */
- while (skb->len--) {
- if (muststuff(*skb->data)) {
- skb_put_u8(hdlc_skb, PPP_ESCAPE);
- skb_put_u8(hdlc_skb, (*skb->data++) ^ PPP_TRANS);
- } else
- skb_put_u8(hdlc_skb, *skb->data++);
- }
-
- /* Finally add FCS (byte stuffed) and flag sequence */
- c = (fcs & 0x00ff); /* least significant byte first */
- if (muststuff(c)) {
- skb_put_u8(hdlc_skb, PPP_ESCAPE);
- c ^= PPP_TRANS;
- }
- skb_put_u8(hdlc_skb, c);
-
- c = ((fcs >> 8) & 0x00ff);
- if (muststuff(c)) {
- skb_put_u8(hdlc_skb, PPP_ESCAPE);
- c ^= PPP_TRANS;
- }
- skb_put_u8(hdlc_skb, c);
-
- skb_put_u8(hdlc_skb, PPP_FLAG);
-
- dev_kfree_skb_any(skb);
- return hdlc_skb;
-}
-
-/*
- * Encode a data packet into an octet stuffed raw bit inverted frame,
- * preserving headroom data.
- * parameters:
- * skb skb containing original packet (freed upon return)
- * Return value:
- * pointer to newly allocated skb containing the result frame
- * and the original link layer header, NULL on error
- */
-static struct sk_buff *iraw_encode(struct sk_buff *skb)
-{
- struct sk_buff *iraw_skb;
- unsigned char c;
- unsigned char *cp;
- int len;
-
- /* size of new buffer (worst case = every byte must be stuffed):
- * 2 * original size + room for link layer header
- */
- iraw_skb = dev_alloc_skb(2 * skb->len + skb->mac_len);
- if (!iraw_skb) {
- dev_kfree_skb_any(skb);
- return NULL;
- }
-
- /* copy link layer header into new skb */
- skb_reset_mac_header(iraw_skb);
- skb_reserve(iraw_skb, skb->mac_len);
- memcpy(skb_mac_header(iraw_skb), skb_mac_header(skb), skb->mac_len);
- iraw_skb->mac_len = skb->mac_len;
-
- /* copy and stuff data */
- cp = skb->data;
- len = skb->len;
- while (len--) {
- c = bitrev8(*cp++);
- if (c == DLE_FLAG)
- skb_put_u8(iraw_skb, c);
- skb_put_u8(iraw_skb, c);
- }
- dev_kfree_skb_any(skb);
- return iraw_skb;
-}
-
-/**
- * gigaset_m10x_send_skb() - queue an skb for sending
- * @bcs: B channel descriptor structure.
- * @skb: data to send.
- *
- * Called by LL to encode and queue an skb for sending, and start
- * transmission if necessary.
- * Once the payload data has been transmitted completely, gigaset_skb_sent()
- * will be called with the skb's link layer header preserved.
- *
- * Return value:
- * number of bytes accepted for sending (skb->len) if ok,
- * error code < 0 (eg. -ENOMEM) on error
- */
-int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb)
-{
- struct cardstate *cs = bcs->cs;
- unsigned len = skb->len;
- unsigned long flags;
-
- if (bcs->proto2 == L2_HDLC)
- skb = HDLC_Encode(skb);
- else
- skb = iraw_encode(skb);
- if (!skb) {
- dev_err(cs->dev,
- "unable to allocate memory for encoding!\n");
- return -ENOMEM;
- }
-
- skb_queue_tail(&bcs->squeue, skb);
- spin_lock_irqsave(&cs->lock, flags);
- if (cs->connected)
- tasklet_schedule(&cs->write_tasklet);
- spin_unlock_irqrestore(&cs->lock, flags);
-
- return len; /* ok so far */
-}
-EXPORT_SYMBOL_GPL(gigaset_m10x_send_skb);
diff --git a/drivers/staging/isdn/gigaset/bas-gigaset.c b/drivers/staging/isdn/gigaset/bas-gigaset.c
deleted file mode 100644
index c334525a5f63..000000000000
--- a/drivers/staging/isdn/gigaset/bas-gigaset.c
+++ /dev/null
@@ -1,2672 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * USB driver for Gigaset 307x base via direct USB connection.
- *
- * Copyright (c) 2001 by Hansjoerg Lipp <hjlipp@web.de>,
- * Tilman Schmidt <tilman@imap.cc>,
- * Stefan Eilers.
- *
- * =====================================================================
- * =====================================================================
- */
-
-#include "gigaset.h"
-#include <linux/usb.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-
-/* Version Information */
-#define DRIVER_AUTHOR "Tilman Schmidt <tilman@imap.cc>, Hansjoerg Lipp <hjlipp@web.de>, Stefan Eilers"
-#define DRIVER_DESC "USB Driver for Gigaset 307x"
-
-
-/* Module parameters */
-
-static int startmode = SM_ISDN;
-static int cidmode = 1;
-
-module_param(startmode, int, S_IRUGO);
-module_param(cidmode, int, S_IRUGO);
-MODULE_PARM_DESC(startmode, "start in isdn4linux mode");
-MODULE_PARM_DESC(cidmode, "Call-ID mode");
-
-#define GIGASET_MINORS 1
-#define GIGASET_MINOR 16
-#define GIGASET_MODULENAME "bas_gigaset"
-#define GIGASET_DEVNAME "ttyGB"
-
-/* length limit according to Siemens 3070usb-protokoll.doc ch. 2.1 */
-#define IF_WRITEBUF 264
-
-/* interrupt pipe message size according to ibid. ch. 2.2 */
-#define IP_MSGSIZE 3
-
-/* Values for the Gigaset 307x */
-#define USB_GIGA_VENDOR_ID 0x0681
-#define USB_3070_PRODUCT_ID 0x0001
-#define USB_3075_PRODUCT_ID 0x0002
-#define USB_SX303_PRODUCT_ID 0x0021
-#define USB_SX353_PRODUCT_ID 0x0022
-
-/* table of devices that work with this driver */
-static const struct usb_device_id gigaset_table[] = {
- { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_3070_PRODUCT_ID) },
- { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_3075_PRODUCT_ID) },
- { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_SX303_PRODUCT_ID) },
- { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_SX353_PRODUCT_ID) },
- { } /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE(usb, gigaset_table);
-
-/*======================= local function prototypes ==========================*/
-
-/* function called if a new device belonging to this driver is connected */
-static int gigaset_probe(struct usb_interface *interface,
- const struct usb_device_id *id);
-
-/* Function will be called if the device is unplugged */
-static void gigaset_disconnect(struct usb_interface *interface);
-
-/* functions called before/after suspend */
-static int gigaset_suspend(struct usb_interface *intf, pm_message_t message);
-static int gigaset_resume(struct usb_interface *intf);
-
-/* functions called before/after device reset */
-static int gigaset_pre_reset(struct usb_interface *intf);
-static int gigaset_post_reset(struct usb_interface *intf);
-
-static int atread_submit(struct cardstate *, int);
-static void stopurbs(struct bas_bc_state *);
-static int req_submit(struct bc_state *, int, int, int);
-static int atwrite_submit(struct cardstate *, unsigned char *, int);
-static int start_cbsend(struct cardstate *);
-
-/*============================================================================*/
-
-struct bas_cardstate {
- struct usb_device *udev; /* USB device pointer */
- struct cardstate *cs;
- struct usb_interface *interface; /* interface for this device */
- unsigned char minor; /* starting minor number */
-
- struct urb *urb_ctrl; /* control pipe default URB */
- struct usb_ctrlrequest dr_ctrl;
- struct timer_list timer_ctrl; /* control request timeout */
- int retry_ctrl;
-
- struct timer_list timer_atrdy; /* AT command ready timeout */
- struct urb *urb_cmd_out; /* for sending AT commands */
- struct usb_ctrlrequest dr_cmd_out;
- int retry_cmd_out;
-
- struct urb *urb_cmd_in; /* for receiving AT replies */
- struct usb_ctrlrequest dr_cmd_in;
- struct timer_list timer_cmd_in; /* receive request timeout */
- unsigned char *rcvbuf; /* AT reply receive buffer */
-
- struct urb *urb_int_in; /* URB for interrupt pipe */
- unsigned char *int_in_buf;
- struct work_struct int_in_wq; /* for usb_clear_halt() */
- struct timer_list timer_int_in; /* int read retry delay */
- int retry_int_in;
-
- spinlock_t lock; /* locks all following */
- int basstate; /* bitmap (BS_*) */
- int pending; /* uncompleted base request */
- wait_queue_head_t waitqueue;
- int rcvbuf_size; /* size of AT receive buffer */
- /* 0: no receive in progress */
- int retry_cmd_in; /* receive req retry count */
-};
-
-/* status of direct USB connection to 307x base (bits in basstate) */
-#define BS_ATOPEN 0x001 /* AT channel open */
-#define BS_B1OPEN 0x002 /* B channel 1 open */
-#define BS_B2OPEN 0x004 /* B channel 2 open */
-#define BS_ATREADY 0x008 /* base ready for AT command */
-#define BS_INIT 0x010 /* base has signalled INIT_OK */
-#define BS_ATTIMER 0x020 /* waiting for HD_READY_SEND_ATDATA */
-#define BS_ATRDPEND 0x040 /* urb_cmd_in in use */
-#define BS_ATWRPEND 0x080 /* urb_cmd_out in use */
-#define BS_SUSPEND 0x100 /* USB port suspended */
-#define BS_RESETTING 0x200 /* waiting for HD_RESET_INTERRUPT_PIPE_ACK */
-
-
-static struct gigaset_driver *driver;
-
-/* usb specific object needed to register this driver with the usb subsystem */
-static struct usb_driver gigaset_usb_driver = {
- .name = GIGASET_MODULENAME,
- .probe = gigaset_probe,
- .disconnect = gigaset_disconnect,
- .id_table = gigaset_table,
- .suspend = gigaset_suspend,
- .resume = gigaset_resume,
- .reset_resume = gigaset_post_reset,
- .pre_reset = gigaset_pre_reset,
- .post_reset = gigaset_post_reset,
- .disable_hub_initiated_lpm = 1,
-};
-
-/* get message text for usb_submit_urb return code
- */
-static char *get_usb_rcmsg(int rc)
-{
- static char unkmsg[28];
-
- switch (rc) {
- case 0:
- return "success";
- case -ENOMEM:
- return "out of memory";
- case -ENODEV:
- return "device not present";
- case -ENOENT:
- return "endpoint not present";
- case -ENXIO:
- return "URB type not supported";
- case -EINVAL:
- return "invalid argument";
- case -EAGAIN:
- return "start frame too early or too much scheduled";
- case -EFBIG:
- return "too many isoc frames requested";
- case -EPIPE:
- return "endpoint stalled";
- case -EMSGSIZE:
- return "invalid packet size";
- case -ENOSPC:
- return "would overcommit USB bandwidth";
- case -ESHUTDOWN:
- return "device shut down";
- case -EPERM:
- return "reject flag set";
- case -EHOSTUNREACH:
- return "device suspended";
- default:
- snprintf(unkmsg, sizeof(unkmsg), "unknown error %d", rc);
- return unkmsg;
- }
-}
-
-/* get message text for USB status code
- */
-static char *get_usb_statmsg(int status)
-{
- static char unkmsg[28];
-
- switch (status) {
- case 0:
- return "success";
- case -ENOENT:
- return "unlinked (sync)";
- case -EINPROGRESS:
- return "URB still pending";
- case -EPROTO:
- return "bitstuff error, timeout, or unknown USB error";
- case -EILSEQ:
- return "CRC mismatch, timeout, or unknown USB error";
- case -ETIME:
- return "USB response timeout";
- case -EPIPE:
- return "endpoint stalled";
- case -ECOMM:
- return "IN buffer overrun";
- case -ENOSR:
- return "OUT buffer underrun";
- case -EOVERFLOW:
- return "endpoint babble";
- case -EREMOTEIO:
- return "short packet";
- case -ENODEV:
- return "device removed";
- case -EXDEV:
- return "partial isoc transfer";
- case -EINVAL:
- return "ISO madness";
- case -ECONNRESET:
- return "unlinked (async)";
- case -ESHUTDOWN:
- return "device shut down";
- default:
- snprintf(unkmsg, sizeof(unkmsg), "unknown status %d", status);
- return unkmsg;
- }
-}
-
-/* usb_pipetype_str
- * retrieve string representation of USB pipe type
- */
-static inline char *usb_pipetype_str(int pipe)
-{
- if (usb_pipeisoc(pipe))
- return "Isoc";
- if (usb_pipeint(pipe))
- return "Int";
- if (usb_pipecontrol(pipe))
- return "Ctrl";
- if (usb_pipebulk(pipe))
- return "Bulk";
- return "?";
-}
-
-/* dump_urb
- * write content of URB to syslog for debugging
- */
-static inline void dump_urb(enum debuglevel level, const char *tag,
- struct urb *urb)
-{
-#ifdef CONFIG_GIGASET_DEBUG
- int i;
- gig_dbg(level, "%s urb(0x%08lx)->{", tag, (unsigned long) urb);
- if (urb) {
- gig_dbg(level,
- " dev=0x%08lx, pipe=%s:EP%d/DV%d:%s, "
- "hcpriv=0x%08lx, transfer_flags=0x%x,",
- (unsigned long) urb->dev,
- usb_pipetype_str(urb->pipe),
- usb_pipeendpoint(urb->pipe), usb_pipedevice(urb->pipe),
- usb_pipein(urb->pipe) ? "in" : "out",
- (unsigned long) urb->hcpriv,
- urb->transfer_flags);
- gig_dbg(level,
- " transfer_buffer=0x%08lx[%d], actual_length=%d, "
- "setup_packet=0x%08lx,",
- (unsigned long) urb->transfer_buffer,
- urb->transfer_buffer_length, urb->actual_length,
- (unsigned long) urb->setup_packet);
- gig_dbg(level,
- " start_frame=%d, number_of_packets=%d, interval=%d, "
- "error_count=%d,",
- urb->start_frame, urb->number_of_packets, urb->interval,
- urb->error_count);
- gig_dbg(level,
- " context=0x%08lx, complete=0x%08lx, "
- "iso_frame_desc[]={",
- (unsigned long) urb->context,
- (unsigned long) urb->complete);
- for (i = 0; i < urb->number_of_packets; i++) {
- struct usb_iso_packet_descriptor *pifd
- = &urb->iso_frame_desc[i];
- gig_dbg(level,
- " {offset=%u, length=%u, actual_length=%u, "
- "status=%u}",
- pifd->offset, pifd->length, pifd->actual_length,
- pifd->status);
- }
- }
- gig_dbg(level, "}}");
-#endif
-}
-
-/* read/set modem control bits etc. (m10x only) */
-static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
- unsigned new_state)
-{
- return -EINVAL;
-}
-
-static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag)
-{
- return -EINVAL;
-}
-
-static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
-{
- return -EINVAL;
-}
-
-/* set/clear bits in base connection state, return previous state
- */
-static inline int update_basstate(struct bas_cardstate *ucs,
- int set, int clear)
-{
- unsigned long flags;
- int state;
-
- spin_lock_irqsave(&ucs->lock, flags);
- state = ucs->basstate;
- ucs->basstate = (state & ~clear) | set;
- spin_unlock_irqrestore(&ucs->lock, flags);
- return state;
-}
-
-/* error_hangup
- * hang up any existing connection because of an unrecoverable error
- * This function may be called from any context and takes care of scheduling
- * the necessary actions for execution outside of interrupt context.
- * cs->lock must not be held.
- * argument:
- * B channel control structure
- */
-static inline void error_hangup(struct bc_state *bcs)
-{
- struct cardstate *cs = bcs->cs;
-
- gigaset_add_event(cs, &bcs->at_state, EV_HUP, NULL, 0, NULL);
- gigaset_schedule_event(cs);
-}
-
-/* error_reset
- * reset Gigaset device because of an unrecoverable error
- * This function may be called from any context, and takes care of
- * scheduling the necessary actions for execution outside of interrupt context.
- * cs->hw.bas->lock must not be held.
- * argument:
- * controller state structure
- */
-static inline void error_reset(struct cardstate *cs)
-{
- /* reset interrupt pipe to recover (ignore errors) */
- update_basstate(cs->hw.bas, BS_RESETTING, 0);
- if (req_submit(cs->bcs, HD_RESET_INTERRUPT_PIPE, 0, BAS_TIMEOUT))
- /* submission failed, escalate to USB port reset */
- usb_queue_reset_device(cs->hw.bas->interface);
-}
-
-/* check_pending
- * check for completion of pending control request
- * parameter:
- * ucs hardware specific controller state structure
- */
-static void check_pending(struct bas_cardstate *ucs)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ucs->lock, flags);
- switch (ucs->pending) {
- case 0:
- break;
- case HD_OPEN_ATCHANNEL:
- if (ucs->basstate & BS_ATOPEN)
- ucs->pending = 0;
- break;
- case HD_OPEN_B1CHANNEL:
- if (ucs->basstate & BS_B1OPEN)
- ucs->pending = 0;
- break;
- case HD_OPEN_B2CHANNEL:
- if (ucs->basstate & BS_B2OPEN)
- ucs->pending = 0;
- break;
- case HD_CLOSE_ATCHANNEL:
- if (!(ucs->basstate & BS_ATOPEN))
- ucs->pending = 0;
- break;
- case HD_CLOSE_B1CHANNEL:
- if (!(ucs->basstate & BS_B1OPEN))
- ucs->pending = 0;
- break;
- case HD_CLOSE_B2CHANNEL:
- if (!(ucs->basstate & BS_B2OPEN))
- ucs->pending = 0;
- break;
- case HD_DEVICE_INIT_ACK: /* no reply expected */
- ucs->pending = 0;
- break;
- case HD_RESET_INTERRUPT_PIPE:
- if (!(ucs->basstate & BS_RESETTING))
- ucs->pending = 0;
- break;
- /*
- * HD_READ_ATMESSAGE and HD_WRITE_ATMESSAGE are handled separately
- * and should never end up here
- */
- default:
- dev_warn(&ucs->interface->dev,
- "unknown pending request 0x%02x cleared\n",
- ucs->pending);
- ucs->pending = 0;
- }
-
- if (!ucs->pending)
- del_timer(&ucs->timer_ctrl);
-
- spin_unlock_irqrestore(&ucs->lock, flags);
-}
-
-/* cmd_in_timeout
- * timeout routine for command input request
- * argument:
- * controller state structure
- */
-static void cmd_in_timeout(struct timer_list *t)
-{
- struct bas_cardstate *ucs = from_timer(ucs, t, timer_cmd_in);
- struct cardstate *cs = ucs->cs;
- int rc;
-
- if (!ucs->rcvbuf_size) {
- gig_dbg(DEBUG_USBREQ, "%s: no receive in progress", __func__);
- return;
- }
-
- if (ucs->retry_cmd_in++ >= BAS_RETRY) {
- dev_err(cs->dev,
- "control read: timeout, giving up after %d tries\n",
- ucs->retry_cmd_in);
- kfree(ucs->rcvbuf);
- ucs->rcvbuf = NULL;
- ucs->rcvbuf_size = 0;
- error_reset(cs);
- return;
- }
-
- gig_dbg(DEBUG_USBREQ, "%s: timeout, retry %d",
- __func__, ucs->retry_cmd_in);
- rc = atread_submit(cs, BAS_TIMEOUT);
- if (rc < 0) {
- kfree(ucs->rcvbuf);
- ucs->rcvbuf = NULL;
- ucs->rcvbuf_size = 0;
- if (rc != -ENODEV)
- error_reset(cs);
- }
-}
-
-/* read_ctrl_callback
- * USB completion handler for control pipe input
- * called by the USB subsystem in interrupt context
- * parameter:
- * urb USB request block
- * urb->context = inbuf structure for controller state
- */
-static void read_ctrl_callback(struct urb *urb)
-{
- struct inbuf_t *inbuf = urb->context;
- struct cardstate *cs = inbuf->cs;
- struct bas_cardstate *ucs = cs->hw.bas;
- int status = urb->status;
- unsigned numbytes;
- int rc;
-
- update_basstate(ucs, 0, BS_ATRDPEND);
- wake_up(&ucs->waitqueue);
- del_timer(&ucs->timer_cmd_in);
-
- switch (status) {
- case 0: /* normal completion */
- numbytes = urb->actual_length;
- if (unlikely(numbytes != ucs->rcvbuf_size)) {
- dev_warn(cs->dev,
- "control read: received %d chars, expected %d\n",
- numbytes, ucs->rcvbuf_size);
- if (numbytes > ucs->rcvbuf_size)
- numbytes = ucs->rcvbuf_size;
- }
-
- /* copy received bytes to inbuf, notify event layer */
- if (gigaset_fill_inbuf(inbuf, ucs->rcvbuf, numbytes)) {
- gig_dbg(DEBUG_INTR, "%s-->BH", __func__);
- gigaset_schedule_event(cs);
- }
- break;
-
- case -ENOENT: /* cancelled */
- case -ECONNRESET: /* cancelled (async) */
- case -EINPROGRESS: /* pending */
- case -ENODEV: /* device removed */
- case -ESHUTDOWN: /* device shut down */
- /* no further action necessary */
- gig_dbg(DEBUG_USBREQ, "%s: %s",
- __func__, get_usb_statmsg(status));
- break;
-
- default: /* other errors: retry */
- if (ucs->retry_cmd_in++ < BAS_RETRY) {
- gig_dbg(DEBUG_USBREQ, "%s: %s, retry %d", __func__,
- get_usb_statmsg(status), ucs->retry_cmd_in);
- rc = atread_submit(cs, BAS_TIMEOUT);
- if (rc >= 0)
- /* successfully resubmitted, skip freeing */
- return;
- if (rc == -ENODEV)
- /* disconnect, no further action necessary */
- break;
- }
- dev_err(cs->dev, "control read: %s, giving up after %d tries\n",
- get_usb_statmsg(status), ucs->retry_cmd_in);
- error_reset(cs);
- }
-
- /* read finished, free buffer */
- kfree(ucs->rcvbuf);
- ucs->rcvbuf = NULL;
- ucs->rcvbuf_size = 0;
-}
-
-/* atread_submit
- * submit an HD_READ_ATMESSAGE command URB and optionally start a timeout
- * parameters:
- * cs controller state structure
- * timeout timeout in 1/10 sec., 0: none
- * return value:
- * 0 on success
- * -EBUSY if another request is pending
- * any URB submission error code
- */
-static int atread_submit(struct cardstate *cs, int timeout)
-{
- struct bas_cardstate *ucs = cs->hw.bas;
- int basstate;
- int ret;
-
- gig_dbg(DEBUG_USBREQ, "-------> HD_READ_ATMESSAGE (%d)",
- ucs->rcvbuf_size);
-
- basstate = update_basstate(ucs, BS_ATRDPEND, 0);
- if (basstate & BS_ATRDPEND) {
- dev_err(cs->dev,
- "could not submit HD_READ_ATMESSAGE: URB busy\n");
- return -EBUSY;
- }
-
- if (basstate & BS_SUSPEND) {
- dev_notice(cs->dev,
- "HD_READ_ATMESSAGE not submitted, "
- "suspend in progress\n");
- update_basstate(ucs, 0, BS_ATRDPEND);
- /* treat like disconnect */
- return -ENODEV;
- }
-
- ucs->dr_cmd_in.bRequestType = IN_VENDOR_REQ;
- ucs->dr_cmd_in.bRequest = HD_READ_ATMESSAGE;
- ucs->dr_cmd_in.wValue = 0;
- ucs->dr_cmd_in.wIndex = 0;
- ucs->dr_cmd_in.wLength = cpu_to_le16(ucs->rcvbuf_size);
- usb_fill_control_urb(ucs->urb_cmd_in, ucs->udev,
- usb_rcvctrlpipe(ucs->udev, 0),
- (unsigned char *) &ucs->dr_cmd_in,
- ucs->rcvbuf, ucs->rcvbuf_size,
- read_ctrl_callback, cs->inbuf);
-
- ret = usb_submit_urb(ucs->urb_cmd_in, GFP_ATOMIC);
- if (ret != 0) {
- update_basstate(ucs, 0, BS_ATRDPEND);
- dev_err(cs->dev, "could not submit HD_READ_ATMESSAGE: %s\n",
- get_usb_rcmsg(ret));
- return ret;
- }
-
- if (timeout > 0) {
- gig_dbg(DEBUG_USBREQ, "setting timeout of %d/10 secs", timeout);
- mod_timer(&ucs->timer_cmd_in, jiffies + timeout * HZ / 10);
- }
- return 0;
-}
-
-/* int_in_work
- * workqueue routine to clear halt on interrupt in endpoint
- */
-
-static void int_in_work(struct work_struct *work)
-{
- struct bas_cardstate *ucs =
- container_of(work, struct bas_cardstate, int_in_wq);
- struct urb *urb = ucs->urb_int_in;
- struct cardstate *cs = urb->context;
- int rc;
-
- /* clear halt condition */
- rc = usb_clear_halt(ucs->udev, urb->pipe);
- gig_dbg(DEBUG_USBREQ, "clear_halt: %s", get_usb_rcmsg(rc));
- if (rc == 0)
- /* success, resubmit interrupt read URB */
- rc = usb_submit_urb(urb, GFP_ATOMIC);
-
- switch (rc) {
- case 0: /* success */
- case -ENODEV: /* device gone */
- case -EINVAL: /* URB already resubmitted, or terminal badness */
- break;
- default: /* failure: try to recover by resetting the device */
- dev_err(cs->dev, "clear halt failed: %s\n", get_usb_rcmsg(rc));
- rc = usb_lock_device_for_reset(ucs->udev, ucs->interface);
- if (rc == 0) {
- rc = usb_reset_device(ucs->udev);
- usb_unlock_device(ucs->udev);
- }
- }
- ucs->retry_int_in = 0;
-}
-
-/* int_in_resubmit
- * timer routine for interrupt read delayed resubmit
- * argument:
- * controller state structure
- */
-static void int_in_resubmit(struct timer_list *t)
-{
- struct bas_cardstate *ucs = from_timer(ucs, t, timer_int_in);
- struct cardstate *cs = ucs->cs;
- int rc;
-
- if (ucs->retry_int_in++ >= BAS_RETRY) {
- dev_err(cs->dev, "interrupt read: giving up after %d tries\n",
- ucs->retry_int_in);
- usb_queue_reset_device(ucs->interface);
- return;
- }
-
- gig_dbg(DEBUG_USBREQ, "%s: retry %d", __func__, ucs->retry_int_in);
- rc = usb_submit_urb(ucs->urb_int_in, GFP_ATOMIC);
- if (rc != 0 && rc != -ENODEV) {
- dev_err(cs->dev, "could not resubmit interrupt URB: %s\n",
- get_usb_rcmsg(rc));
- usb_queue_reset_device(ucs->interface);
- }
-}
-
-/* read_int_callback
- * USB completion handler for interrupt pipe input
- * called by the USB subsystem in interrupt context
- * parameter:
- * urb USB request block
- * urb->context = controller state structure
- */
-static void read_int_callback(struct urb *urb)
-{
- struct cardstate *cs = urb->context;
- struct bas_cardstate *ucs = cs->hw.bas;
- struct bc_state *bcs;
- int status = urb->status;
- unsigned long flags;
- int rc;
- unsigned l;
- int channel;
-
- switch (status) {
- case 0: /* success */
- ucs->retry_int_in = 0;
- break;
- case -EPIPE: /* endpoint stalled */
- schedule_work(&ucs->int_in_wq);
- /* fall through */
- case -ENOENT: /* cancelled */
- case -ECONNRESET: /* cancelled (async) */
- case -EINPROGRESS: /* pending */
- case -ENODEV: /* device removed */
- case -ESHUTDOWN: /* device shut down */
- /* no further action necessary */
- gig_dbg(DEBUG_USBREQ, "%s: %s",
- __func__, get_usb_statmsg(status));
- return;
- case -EPROTO: /* protocol error or unplug */
- case -EILSEQ:
- case -ETIME:
- /* resubmit after delay */
- gig_dbg(DEBUG_USBREQ, "%s: %s",
- __func__, get_usb_statmsg(status));
- mod_timer(&ucs->timer_int_in, jiffies + HZ / 10);
- return;
- default: /* other errors: just resubmit */
- dev_warn(cs->dev, "interrupt read: %s\n",
- get_usb_statmsg(status));
- goto resubmit;
- }
-
- /* drop incomplete packets even if the missing bytes wouldn't matter */
- if (unlikely(urb->actual_length < IP_MSGSIZE)) {
- dev_warn(cs->dev, "incomplete interrupt packet (%d bytes)\n",
- urb->actual_length);
- goto resubmit;
- }
-
- l = (unsigned) ucs->int_in_buf[1] +
- (((unsigned) ucs->int_in_buf[2]) << 8);
-
- gig_dbg(DEBUG_USBREQ, "<-------%d: 0x%02x (%u [0x%02x 0x%02x])",
- urb->actual_length, (int)ucs->int_in_buf[0], l,
- (int)ucs->int_in_buf[1], (int)ucs->int_in_buf[2]);
-
- channel = 0;
-
- switch (ucs->int_in_buf[0]) {
- case HD_DEVICE_INIT_OK:
- update_basstate(ucs, BS_INIT, 0);
- break;
-
- case HD_READY_SEND_ATDATA:
- del_timer(&ucs->timer_atrdy);
- update_basstate(ucs, BS_ATREADY, BS_ATTIMER);
- start_cbsend(cs);
- break;
-
- case HD_OPEN_B2CHANNEL_ACK:
- ++channel;
- /* fall through */
- case HD_OPEN_B1CHANNEL_ACK:
- bcs = cs->bcs + channel;
- update_basstate(ucs, BS_B1OPEN << channel, 0);
- gigaset_bchannel_up(bcs);
- break;
-
- case HD_OPEN_ATCHANNEL_ACK:
- update_basstate(ucs, BS_ATOPEN, 0);
- start_cbsend(cs);
- break;
-
- case HD_CLOSE_B2CHANNEL_ACK:
- ++channel;
- /* fall through */
- case HD_CLOSE_B1CHANNEL_ACK:
- bcs = cs->bcs + channel;
- update_basstate(ucs, 0, BS_B1OPEN << channel);
- stopurbs(bcs->hw.bas);
- gigaset_bchannel_down(bcs);
- break;
-
- case HD_CLOSE_ATCHANNEL_ACK:
- update_basstate(ucs, 0, BS_ATOPEN);
- break;
-
- case HD_B2_FLOW_CONTROL:
- ++channel;
- /* fall through */
- case HD_B1_FLOW_CONTROL:
- bcs = cs->bcs + channel;
- atomic_add((l - BAS_NORMFRAME) * BAS_CORRFRAMES,
- &bcs->hw.bas->corrbytes);
- gig_dbg(DEBUG_ISO,
- "Flow control (channel %d, sub %d): 0x%02x => %d",
- channel, bcs->hw.bas->numsub, l,
- atomic_read(&bcs->hw.bas->corrbytes));
- break;
-
- case HD_RECEIVEATDATA_ACK: /* AT response ready to be received */
- if (!l) {
- dev_warn(cs->dev,
- "HD_RECEIVEATDATA_ACK with length 0 ignored\n");
- break;
- }
- spin_lock_irqsave(&cs->lock, flags);
- if (ucs->basstate & BS_ATRDPEND) {
- spin_unlock_irqrestore(&cs->lock, flags);
- dev_warn(cs->dev,
- "HD_RECEIVEATDATA_ACK(%d) during HD_READ_ATMESSAGE(%d) ignored\n",
- l, ucs->rcvbuf_size);
- break;
- }
- if (ucs->rcvbuf_size) {
- /* throw away previous buffer - we have no queue */
- dev_err(cs->dev,
- "receive AT data overrun, %d bytes lost\n",
- ucs->rcvbuf_size);
- kfree(ucs->rcvbuf);
- ucs->rcvbuf_size = 0;
- }
- ucs->rcvbuf = kmalloc(l, GFP_ATOMIC);
- if (ucs->rcvbuf == NULL) {
- spin_unlock_irqrestore(&cs->lock, flags);
- dev_err(cs->dev, "out of memory receiving AT data\n");
- break;
- }
- ucs->rcvbuf_size = l;
- ucs->retry_cmd_in = 0;
- rc = atread_submit(cs, BAS_TIMEOUT);
- if (rc < 0) {
- kfree(ucs->rcvbuf);
- ucs->rcvbuf = NULL;
- ucs->rcvbuf_size = 0;
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- if (rc < 0 && rc != -ENODEV)
- error_reset(cs);
- break;
-
- case HD_RESET_INTERRUPT_PIPE_ACK:
- update_basstate(ucs, 0, BS_RESETTING);
- dev_notice(cs->dev, "interrupt pipe reset\n");
- break;
-
- case HD_SUSPEND_END:
- gig_dbg(DEBUG_USBREQ, "HD_SUSPEND_END");
- break;
-
- default:
- dev_warn(cs->dev,
- "unknown Gigaset signal 0x%02x (%u) ignored\n",
- (int) ucs->int_in_buf[0], l);
- }
-
- check_pending(ucs);
- wake_up(&ucs->waitqueue);
-
-resubmit:
- rc = usb_submit_urb(urb, GFP_ATOMIC);
- if (unlikely(rc != 0 && rc != -ENODEV)) {
- dev_err(cs->dev, "could not resubmit interrupt URB: %s\n",
- get_usb_rcmsg(rc));
- error_reset(cs);
- }
-}
-
-/* read_iso_callback
- * USB completion handler for B channel isochronous input
- * called by the USB subsystem in interrupt context
- * parameter:
- * urb USB request block of completed request
- * urb->context = bc_state structure
- */
-static void read_iso_callback(struct urb *urb)
-{
- struct bc_state *bcs;
- struct bas_bc_state *ubc;
- int status = urb->status;
- unsigned long flags;
- int i, rc;
-
- /* status codes not worth bothering the tasklet with */
- if (unlikely(status == -ENOENT ||
- status == -ECONNRESET ||
- status == -EINPROGRESS ||
- status == -ENODEV ||
- status == -ESHUTDOWN)) {
- gig_dbg(DEBUG_ISO, "%s: %s",
- __func__, get_usb_statmsg(status));
- return;
- }
-
- bcs = urb->context;
- ubc = bcs->hw.bas;
-
- spin_lock_irqsave(&ubc->isoinlock, flags);
- if (likely(ubc->isoindone == NULL)) {
- /* pass URB to tasklet */
- ubc->isoindone = urb;
- ubc->isoinstatus = status;
- tasklet_hi_schedule(&ubc->rcvd_tasklet);
- } else {
- /* tasklet still busy, drop data and resubmit URB */
- gig_dbg(DEBUG_ISO, "%s: overrun", __func__);
- ubc->loststatus = status;
- for (i = 0; i < BAS_NUMFRAMES; i++) {
- ubc->isoinlost += urb->iso_frame_desc[i].actual_length;
- if (unlikely(urb->iso_frame_desc[i].status != 0 &&
- urb->iso_frame_desc[i].status != -EINPROGRESS))
- ubc->loststatus = urb->iso_frame_desc[i].status;
- urb->iso_frame_desc[i].status = 0;
- urb->iso_frame_desc[i].actual_length = 0;
- }
- if (likely(ubc->running)) {
- /* urb->dev is clobbered by USB subsystem */
- urb->dev = bcs->cs->hw.bas->udev;
- urb->transfer_flags = URB_ISO_ASAP;
- urb->number_of_packets = BAS_NUMFRAMES;
- rc = usb_submit_urb(urb, GFP_ATOMIC);
- if (unlikely(rc != 0 && rc != -ENODEV)) {
- dev_err(bcs->cs->dev,
- "could not resubmit isoc read URB: %s\n",
- get_usb_rcmsg(rc));
- dump_urb(DEBUG_ISO, "isoc read", urb);
- error_hangup(bcs);
- }
- }
- }
- spin_unlock_irqrestore(&ubc->isoinlock, flags);
-}
-
-/* write_iso_callback
- * USB completion handler for B channel isochronous output
- * called by the USB subsystem in interrupt context
- * parameter:
- * urb USB request block of completed request
- * urb->context = isow_urbctx_t structure
- */
-static void write_iso_callback(struct urb *urb)
-{
- struct isow_urbctx_t *ucx;
- struct bas_bc_state *ubc;
- int status = urb->status;
- unsigned long flags;
-
- /* status codes not worth bothering the tasklet with */
- if (unlikely(status == -ENOENT ||
- status == -ECONNRESET ||
- status == -EINPROGRESS ||
- status == -ENODEV ||
- status == -ESHUTDOWN)) {
- gig_dbg(DEBUG_ISO, "%s: %s",
- __func__, get_usb_statmsg(status));
- return;
- }
-
- /* pass URB context to tasklet */
- ucx = urb->context;
- ubc = ucx->bcs->hw.bas;
- ucx->status = status;
-
- spin_lock_irqsave(&ubc->isooutlock, flags);
- ubc->isooutovfl = ubc->isooutdone;
- ubc->isooutdone = ucx;
- spin_unlock_irqrestore(&ubc->isooutlock, flags);
- tasklet_hi_schedule(&ubc->sent_tasklet);
-}
-
-/* starturbs
- * prepare and submit USB request blocks for isochronous input and output
- * argument:
- * B channel control structure
- * return value:
- * 0 on success
- * < 0 on error (no URBs submitted)
- */
-static int starturbs(struct bc_state *bcs)
-{
- struct usb_device *udev = bcs->cs->hw.bas->udev;
- struct bas_bc_state *ubc = bcs->hw.bas;
- struct urb *urb;
- int j, k;
- int rc;
-
- /* initialize L2 reception */
- if (bcs->proto2 == L2_HDLC)
- bcs->inputstate |= INS_flag_hunt;
-
- /* submit all isochronous input URBs */
- ubc->running = 1;
- for (k = 0; k < BAS_INURBS; k++) {
- urb = ubc->isoinurbs[k];
- if (!urb) {
- rc = -EFAULT;
- goto error;
- }
- usb_fill_int_urb(urb, udev,
- usb_rcvisocpipe(udev, 3 + 2 * bcs->channel),
- ubc->isoinbuf + k * BAS_INBUFSIZE,
- BAS_INBUFSIZE, read_iso_callback, bcs,
- BAS_FRAMETIME);
-
- urb->transfer_flags = URB_ISO_ASAP;
- urb->number_of_packets = BAS_NUMFRAMES;
- for (j = 0; j < BAS_NUMFRAMES; j++) {
- urb->iso_frame_desc[j].offset = j * BAS_MAXFRAME;
- urb->iso_frame_desc[j].length = BAS_MAXFRAME;
- urb->iso_frame_desc[j].status = 0;
- urb->iso_frame_desc[j].actual_length = 0;
- }
-
- dump_urb(DEBUG_ISO, "Initial isoc read", urb);
- rc = usb_submit_urb(urb, GFP_ATOMIC);
- if (rc != 0)
- goto error;
- }
-
- /* initialize L2 transmission */
- gigaset_isowbuf_init(ubc->isooutbuf, PPP_FLAG);
-
- /* set up isochronous output URBs for flag idling */
- for (k = 0; k < BAS_OUTURBS; ++k) {
- urb = ubc->isoouturbs[k].urb;
- if (!urb) {
- rc = -EFAULT;
- goto error;
- }
- usb_fill_int_urb(urb, udev,
- usb_sndisocpipe(udev, 4 + 2 * bcs->channel),
- ubc->isooutbuf->data,
- sizeof(ubc->isooutbuf->data),
- write_iso_callback, &ubc->isoouturbs[k],
- BAS_FRAMETIME);
-
- urb->transfer_flags = URB_ISO_ASAP;
- urb->number_of_packets = BAS_NUMFRAMES;
- for (j = 0; j < BAS_NUMFRAMES; ++j) {
- urb->iso_frame_desc[j].offset = BAS_OUTBUFSIZE;
- urb->iso_frame_desc[j].length = BAS_NORMFRAME;
- urb->iso_frame_desc[j].status = 0;
- urb->iso_frame_desc[j].actual_length = 0;
- }
- ubc->isoouturbs[k].limit = -1;
- }
-
- /* keep one URB free, submit the others */
- for (k = 0; k < BAS_OUTURBS - 1; ++k) {
- dump_urb(DEBUG_ISO, "Initial isoc write", urb);
- rc = usb_submit_urb(ubc->isoouturbs[k].urb, GFP_ATOMIC);
- if (rc != 0)
- goto error;
- }
- dump_urb(DEBUG_ISO, "Initial isoc write (free)", urb);
- ubc->isooutfree = &ubc->isoouturbs[BAS_OUTURBS - 1];
- ubc->isooutdone = ubc->isooutovfl = NULL;
- return 0;
-error:
- stopurbs(ubc);
- return rc;
-}
-
-/* stopurbs
- * cancel the USB request blocks for isochronous input and output
- * errors are silently ignored
- * argument:
- * B channel control structure
- */
-static void stopurbs(struct bas_bc_state *ubc)
-{
- int k, rc;
-
- ubc->running = 0;
-
- for (k = 0; k < BAS_INURBS; ++k) {
- rc = usb_unlink_urb(ubc->isoinurbs[k]);
- gig_dbg(DEBUG_ISO,
- "%s: isoc input URB %d unlinked, result = %s",
- __func__, k, get_usb_rcmsg(rc));
- }
-
- for (k = 0; k < BAS_OUTURBS; ++k) {
- rc = usb_unlink_urb(ubc->isoouturbs[k].urb);
- gig_dbg(DEBUG_ISO,
- "%s: isoc output URB %d unlinked, result = %s",
- __func__, k, get_usb_rcmsg(rc));
- }
-}
-
-/* Isochronous Write - Bottom Half */
-/* =============================== */
-
-/* submit_iso_write_urb
- * fill and submit the next isochronous write URB
- * parameters:
- * ucx context structure containing URB
- * return value:
- * number of frames submitted in URB
- * 0 if URB not submitted because no data available (isooutbuf busy)
- * error code < 0 on error
- */
-static int submit_iso_write_urb(struct isow_urbctx_t *ucx)
-{
- struct urb *urb = ucx->urb;
- struct bas_bc_state *ubc = ucx->bcs->hw.bas;
- struct usb_iso_packet_descriptor *ifd;
- int corrbytes, nframe, rc;
-
- /* urb->dev is clobbered by USB subsystem */
- urb->dev = ucx->bcs->cs->hw.bas->udev;
- urb->transfer_flags = URB_ISO_ASAP;
- urb->transfer_buffer = ubc->isooutbuf->data;
- urb->transfer_buffer_length = sizeof(ubc->isooutbuf->data);
-
- for (nframe = 0; nframe < BAS_NUMFRAMES; nframe++) {
- ifd = &urb->iso_frame_desc[nframe];
-
- /* compute frame length according to flow control */
- ifd->length = BAS_NORMFRAME;
- corrbytes = atomic_read(&ubc->corrbytes);
- if (corrbytes != 0) {
- gig_dbg(DEBUG_ISO, "%s: corrbytes=%d",
- __func__, corrbytes);
- if (corrbytes > BAS_HIGHFRAME - BAS_NORMFRAME)
- corrbytes = BAS_HIGHFRAME - BAS_NORMFRAME;
- else if (corrbytes < BAS_LOWFRAME - BAS_NORMFRAME)
- corrbytes = BAS_LOWFRAME - BAS_NORMFRAME;
- ifd->length += corrbytes;
- atomic_add(-corrbytes, &ubc->corrbytes);
- }
-
- /* retrieve block of data to send */
- rc = gigaset_isowbuf_getbytes(ubc->isooutbuf, ifd->length);
- if (rc < 0) {
- if (rc == -EBUSY) {
- gig_dbg(DEBUG_ISO,
- "%s: buffer busy at frame %d",
- __func__, nframe);
- /* tasklet will be restarted from
- gigaset_isoc_send_skb() */
- } else {
- dev_err(ucx->bcs->cs->dev,
- "%s: buffer error %d at frame %d\n",
- __func__, rc, nframe);
- return rc;
- }
- break;
- }
- ifd->offset = rc;
- ucx->limit = ubc->isooutbuf->nextread;
- ifd->status = 0;
- ifd->actual_length = 0;
- }
- if (unlikely(nframe == 0))
- return 0; /* no data to send */
- urb->number_of_packets = nframe;
-
- rc = usb_submit_urb(urb, GFP_ATOMIC);
- if (unlikely(rc)) {
- if (rc == -ENODEV)
- /* device removed - give up silently */
- gig_dbg(DEBUG_ISO, "%s: disconnected", __func__);
- else
- dev_err(ucx->bcs->cs->dev,
- "could not submit isoc write URB: %s\n",
- get_usb_rcmsg(rc));
- return rc;
- }
- ++ubc->numsub;
- return nframe;
-}
-
-/* write_iso_tasklet
- * tasklet scheduled when an isochronous output URB from the Gigaset device
- * has completed
- * parameter:
- * data B channel state structure
- */
-static void write_iso_tasklet(unsigned long data)
-{
- struct bc_state *bcs = (struct bc_state *) data;
- struct bas_bc_state *ubc = bcs->hw.bas;
- struct cardstate *cs = bcs->cs;
- struct isow_urbctx_t *done, *next, *ovfl;
- struct urb *urb;
- int status;
- struct usb_iso_packet_descriptor *ifd;
- unsigned long flags;
- int i;
- struct sk_buff *skb;
- int len;
- int rc;
-
- /* loop while completed URBs arrive in time */
- for (;;) {
- if (unlikely(!(ubc->running))) {
- gig_dbg(DEBUG_ISO, "%s: not running", __func__);
- return;
- }
-
- /* retrieve completed URBs */
- spin_lock_irqsave(&ubc->isooutlock, flags);
- done = ubc->isooutdone;
- ubc->isooutdone = NULL;
- ovfl = ubc->isooutovfl;
- ubc->isooutovfl = NULL;
- spin_unlock_irqrestore(&ubc->isooutlock, flags);
- if (ovfl) {
- dev_err(cs->dev, "isoc write underrun\n");
- error_hangup(bcs);
- break;
- }
- if (!done)
- break;
-
- /* submit free URB if available */
- spin_lock_irqsave(&ubc->isooutlock, flags);
- next = ubc->isooutfree;
- ubc->isooutfree = NULL;
- spin_unlock_irqrestore(&ubc->isooutlock, flags);
- if (next) {
- rc = submit_iso_write_urb(next);
- if (unlikely(rc <= 0 && rc != -ENODEV)) {
- /* could not submit URB, put it back */
- spin_lock_irqsave(&ubc->isooutlock, flags);
- if (ubc->isooutfree == NULL) {
- ubc->isooutfree = next;
- next = NULL;
- }
- spin_unlock_irqrestore(&ubc->isooutlock, flags);
- if (next) {
- /* couldn't put it back */
- dev_err(cs->dev,
- "losing isoc write URB\n");
- error_hangup(bcs);
- }
- }
- }
-
- /* process completed URB */
- urb = done->urb;
- status = done->status;
- switch (status) {
- case -EXDEV: /* partial completion */
- gig_dbg(DEBUG_ISO, "%s: URB partially completed",
- __func__);
- /* fall through - what's the difference anyway? */
- case 0: /* normal completion */
- /* inspect individual frames
- * assumptions (for lack of documentation):
- * - actual_length bytes of first frame in error are
- * successfully sent
- * - all following frames are not sent at all
- */
- for (i = 0; i < BAS_NUMFRAMES; i++) {
- ifd = &urb->iso_frame_desc[i];
- if (ifd->status ||
- ifd->actual_length != ifd->length) {
- dev_warn(cs->dev,
- "isoc write: frame %d[%d/%d]: %s\n",
- i, ifd->actual_length,
- ifd->length,
- get_usb_statmsg(ifd->status));
- break;
- }
- }
- break;
- case -EPIPE: /* stall - probably underrun */
- dev_err(cs->dev, "isoc write: stalled\n");
- error_hangup(bcs);
- break;
- default: /* other errors */
- dev_warn(cs->dev, "isoc write: %s\n",
- get_usb_statmsg(status));
- }
-
- /* mark the write buffer area covered by this URB as free */
- if (done->limit >= 0)
- ubc->isooutbuf->read = done->limit;
-
- /* mark URB as free */
- spin_lock_irqsave(&ubc->isooutlock, flags);
- next = ubc->isooutfree;
- ubc->isooutfree = done;
- spin_unlock_irqrestore(&ubc->isooutlock, flags);
- if (next) {
- /* only one URB still active - resubmit one */
- rc = submit_iso_write_urb(next);
- if (unlikely(rc <= 0 && rc != -ENODEV)) {
- /* couldn't submit */
- error_hangup(bcs);
- }
- }
- }
-
- /* process queued SKBs */
- while ((skb = skb_dequeue(&bcs->squeue))) {
- /* copy to output buffer, doing L2 encapsulation */
- len = skb->len;
- if (gigaset_isoc_buildframe(bcs, skb->data, len) == -EAGAIN) {
- /* insufficient buffer space, push back onto queue */
- skb_queue_head(&bcs->squeue, skb);
- gig_dbg(DEBUG_ISO, "%s: skb requeued, qlen=%d",
- __func__, skb_queue_len(&bcs->squeue));
- break;
- }
- skb_pull(skb, len);
- gigaset_skb_sent(bcs, skb);
- dev_kfree_skb_any(skb);
- }
-}
-
-/* Isochronous Read - Bottom Half */
-/* ============================== */
-
-/* read_iso_tasklet
- * tasklet scheduled when an isochronous input URB from the Gigaset device
- * has completed
- * parameter:
- * data B channel state structure
- */
-static void read_iso_tasklet(unsigned long data)
-{
- struct bc_state *bcs = (struct bc_state *) data;
- struct bas_bc_state *ubc = bcs->hw.bas;
- struct cardstate *cs = bcs->cs;
- struct urb *urb;
- int status;
- struct usb_iso_packet_descriptor *ifd;
- char *rcvbuf;
- unsigned long flags;
- int totleft, numbytes, offset, frame, rc;
-
- /* loop while more completed URBs arrive in the meantime */
- for (;;) {
- /* retrieve URB */
- spin_lock_irqsave(&ubc->isoinlock, flags);
- urb = ubc->isoindone;
- if (!urb) {
- spin_unlock_irqrestore(&ubc->isoinlock, flags);
- return;
- }
- status = ubc->isoinstatus;
- ubc->isoindone = NULL;
- if (unlikely(ubc->loststatus != -EINPROGRESS)) {
- dev_warn(cs->dev,
- "isoc read overrun, URB dropped (status: %s, %d bytes)\n",
- get_usb_statmsg(ubc->loststatus),
- ubc->isoinlost);
- ubc->loststatus = -EINPROGRESS;
- }
- spin_unlock_irqrestore(&ubc->isoinlock, flags);
-
- if (unlikely(!(ubc->running))) {
- gig_dbg(DEBUG_ISO,
- "%s: channel not running, "
- "dropped URB with status: %s",
- __func__, get_usb_statmsg(status));
- return;
- }
-
- switch (status) {
- case 0: /* normal completion */
- break;
- case -EXDEV: /* inspect individual frames
- (we do that anyway) */
- gig_dbg(DEBUG_ISO, "%s: URB partially completed",
- __func__);
- break;
- case -ENOENT:
- case -ECONNRESET:
- case -EINPROGRESS:
- gig_dbg(DEBUG_ISO, "%s: %s",
- __func__, get_usb_statmsg(status));
- continue; /* -> skip */
- case -EPIPE:
- dev_err(cs->dev, "isoc read: stalled\n");
- error_hangup(bcs);
- continue; /* -> skip */
- default: /* other error */
- dev_warn(cs->dev, "isoc read: %s\n",
- get_usb_statmsg(status));
- goto error;
- }
-
- rcvbuf = urb->transfer_buffer;
- totleft = urb->actual_length;
- for (frame = 0; totleft > 0 && frame < BAS_NUMFRAMES; frame++) {
- ifd = &urb->iso_frame_desc[frame];
- numbytes = ifd->actual_length;
- switch (ifd->status) {
- case 0: /* success */
- break;
- case -EPROTO: /* protocol error or unplug */
- case -EILSEQ:
- case -ETIME:
- /* probably just disconnected, ignore */
- gig_dbg(DEBUG_ISO,
- "isoc read: frame %d[%d]: %s\n",
- frame, numbytes,
- get_usb_statmsg(ifd->status));
- break;
- default: /* other error */
- /* report, assume transferred bytes are ok */
- dev_warn(cs->dev,
- "isoc read: frame %d[%d]: %s\n",
- frame, numbytes,
- get_usb_statmsg(ifd->status));
- }
- if (unlikely(numbytes > BAS_MAXFRAME))
- dev_warn(cs->dev,
- "isoc read: frame %d[%d]: %s\n",
- frame, numbytes,
- "exceeds max frame size");
- if (unlikely(numbytes > totleft)) {
- dev_warn(cs->dev,
- "isoc read: frame %d[%d]: %s\n",
- frame, numbytes,
- "exceeds total transfer length");
- numbytes = totleft;
- }
- offset = ifd->offset;
- if (unlikely(offset + numbytes > BAS_INBUFSIZE)) {
- dev_warn(cs->dev,
- "isoc read: frame %d[%d]: %s\n",
- frame, numbytes,
- "exceeds end of buffer");
- numbytes = BAS_INBUFSIZE - offset;
- }
- gigaset_isoc_receive(rcvbuf + offset, numbytes, bcs);
- totleft -= numbytes;
- }
- if (unlikely(totleft > 0))
- dev_warn(cs->dev, "isoc read: %d data bytes missing\n",
- totleft);
-
-error:
- /* URB processed, resubmit */
- for (frame = 0; frame < BAS_NUMFRAMES; frame++) {
- urb->iso_frame_desc[frame].status = 0;
- urb->iso_frame_desc[frame].actual_length = 0;
- }
- /* urb->dev is clobbered by USB subsystem */
- urb->dev = bcs->cs->hw.bas->udev;
- urb->transfer_flags = URB_ISO_ASAP;
- urb->number_of_packets = BAS_NUMFRAMES;
- rc = usb_submit_urb(urb, GFP_ATOMIC);
- if (unlikely(rc != 0 && rc != -ENODEV)) {
- dev_err(cs->dev,
- "could not resubmit isoc read URB: %s\n",
- get_usb_rcmsg(rc));
- dump_urb(DEBUG_ISO, "resubmit isoc read", urb);
- error_hangup(bcs);
- }
- }
-}
-
-/* Channel Operations */
-/* ================== */
-
-/* req_timeout
- * timeout routine for control output request
- * argument:
- * controller state structure
- */
-static void req_timeout(struct timer_list *t)
-{
- struct bas_cardstate *ucs = from_timer(ucs, t, timer_ctrl);
- struct cardstate *cs = ucs->cs;
- int pending;
- unsigned long flags;
-
- check_pending(ucs);
-
- spin_lock_irqsave(&ucs->lock, flags);
- pending = ucs->pending;
- ucs->pending = 0;
- spin_unlock_irqrestore(&ucs->lock, flags);
-
- switch (pending) {
- case 0: /* no pending request */
- gig_dbg(DEBUG_USBREQ, "%s: no request pending", __func__);
- break;
-
- case HD_OPEN_ATCHANNEL:
- dev_err(cs->dev, "timeout opening AT channel\n");
- error_reset(cs);
- break;
-
- case HD_OPEN_B1CHANNEL:
- dev_err(cs->dev, "timeout opening channel 1\n");
- error_hangup(&cs->bcs[0]);
- break;
-
- case HD_OPEN_B2CHANNEL:
- dev_err(cs->dev, "timeout opening channel 2\n");
- error_hangup(&cs->bcs[1]);
- break;
-
- case HD_CLOSE_ATCHANNEL:
- dev_err(cs->dev, "timeout closing AT channel\n");
- error_reset(cs);
- break;
-
- case HD_CLOSE_B1CHANNEL:
- dev_err(cs->dev, "timeout closing channel 1\n");
- error_reset(cs);
- break;
-
- case HD_CLOSE_B2CHANNEL:
- dev_err(cs->dev, "timeout closing channel 2\n");
- error_reset(cs);
- break;
-
- case HD_RESET_INTERRUPT_PIPE:
- /* error recovery escalation */
- dev_err(cs->dev,
- "reset interrupt pipe timeout, attempting USB reset\n");
- usb_queue_reset_device(ucs->interface);
- break;
-
- default:
- dev_warn(cs->dev, "request 0x%02x timed out, clearing\n",
- pending);
- }
-
- wake_up(&ucs->waitqueue);
-}
-
-/* write_ctrl_callback
- * USB completion handler for control pipe output
- * called by the USB subsystem in interrupt context
- * parameter:
- * urb USB request block of completed request
- * urb->context = hardware specific controller state structure
- */
-static void write_ctrl_callback(struct urb *urb)
-{
- struct bas_cardstate *ucs = urb->context;
- int status = urb->status;
- int rc;
- unsigned long flags;
-
- /* check status */
- switch (status) {
- case 0: /* normal completion */
- spin_lock_irqsave(&ucs->lock, flags);
- switch (ucs->pending) {
- case HD_DEVICE_INIT_ACK: /* no reply expected */
- del_timer(&ucs->timer_ctrl);
- ucs->pending = 0;
- break;
- }
- spin_unlock_irqrestore(&ucs->lock, flags);
- return;
-
- case -ENOENT: /* cancelled */
- case -ECONNRESET: /* cancelled (async) */
- case -EINPROGRESS: /* pending */
- case -ENODEV: /* device removed */
- case -ESHUTDOWN: /* device shut down */
- /* ignore silently */
- gig_dbg(DEBUG_USBREQ, "%s: %s",
- __func__, get_usb_statmsg(status));
- break;
-
- default: /* any failure */
- /* don't retry if suspend requested */
- if (++ucs->retry_ctrl > BAS_RETRY ||
- (ucs->basstate & BS_SUSPEND)) {
- dev_err(&ucs->interface->dev,
- "control request 0x%02x failed: %s\n",
- ucs->dr_ctrl.bRequest,
- get_usb_statmsg(status));
- break; /* give up */
- }
- dev_notice(&ucs->interface->dev,
- "control request 0x%02x: %s, retry %d\n",
- ucs->dr_ctrl.bRequest, get_usb_statmsg(status),
- ucs->retry_ctrl);
- /* urb->dev is clobbered by USB subsystem */
- urb->dev = ucs->udev;
- rc = usb_submit_urb(urb, GFP_ATOMIC);
- if (unlikely(rc)) {
- dev_err(&ucs->interface->dev,
- "could not resubmit request 0x%02x: %s\n",
- ucs->dr_ctrl.bRequest, get_usb_rcmsg(rc));
- break;
- }
- /* resubmitted */
- return;
- }
-
- /* failed, clear pending request */
- spin_lock_irqsave(&ucs->lock, flags);
- del_timer(&ucs->timer_ctrl);
- ucs->pending = 0;
- spin_unlock_irqrestore(&ucs->lock, flags);
- wake_up(&ucs->waitqueue);
-}
-
-/* req_submit
- * submit a control output request without message buffer to the Gigaset base
- * and optionally start a timeout
- * parameters:
- * bcs B channel control structure
- * req control request code (HD_*)
- * val control request parameter value (set to 0 if unused)
- * timeout timeout in seconds (0: no timeout)
- * return value:
- * 0 on success
- * -EBUSY if another request is pending
- * any URB submission error code
- */
-static int req_submit(struct bc_state *bcs, int req, int val, int timeout)
-{
- struct bas_cardstate *ucs = bcs->cs->hw.bas;
- int ret;
- unsigned long flags;
-
- gig_dbg(DEBUG_USBREQ, "-------> 0x%02x (%d)", req, val);
-
- spin_lock_irqsave(&ucs->lock, flags);
- if (ucs->pending) {
- spin_unlock_irqrestore(&ucs->lock, flags);
- dev_err(bcs->cs->dev,
- "submission of request 0x%02x failed: "
- "request 0x%02x still pending\n",
- req, ucs->pending);
- return -EBUSY;
- }
-
- ucs->dr_ctrl.bRequestType = OUT_VENDOR_REQ;
- ucs->dr_ctrl.bRequest = req;
- ucs->dr_ctrl.wValue = cpu_to_le16(val);
- ucs->dr_ctrl.wIndex = 0;
- ucs->dr_ctrl.wLength = 0;
- usb_fill_control_urb(ucs->urb_ctrl, ucs->udev,
- usb_sndctrlpipe(ucs->udev, 0),
- (unsigned char *) &ucs->dr_ctrl, NULL, 0,
- write_ctrl_callback, ucs);
- ucs->retry_ctrl = 0;
- ret = usb_submit_urb(ucs->urb_ctrl, GFP_ATOMIC);
- if (unlikely(ret)) {
- dev_err(bcs->cs->dev, "could not submit request 0x%02x: %s\n",
- req, get_usb_rcmsg(ret));
- spin_unlock_irqrestore(&ucs->lock, flags);
- return ret;
- }
- ucs->pending = req;
-
- if (timeout > 0) {
- gig_dbg(DEBUG_USBREQ, "setting timeout of %d/10 secs", timeout);
- mod_timer(&ucs->timer_ctrl, jiffies + timeout * HZ / 10);
- }
-
- spin_unlock_irqrestore(&ucs->lock, flags);
- return 0;
-}
-
-/* gigaset_init_bchannel
- * called by common.c to connect a B channel
- * initialize isochronous I/O and tell the Gigaset base to open the channel
- * argument:
- * B channel control structure
- * return value:
- * 0 on success, error code < 0 on error
- */
-static int gigaset_init_bchannel(struct bc_state *bcs)
-{
- struct cardstate *cs = bcs->cs;
- int req, ret;
- unsigned long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- if (unlikely(!cs->connected)) {
- gig_dbg(DEBUG_USBREQ, "%s: not connected", __func__);
- spin_unlock_irqrestore(&cs->lock, flags);
- return -ENODEV;
- }
-
- if (cs->hw.bas->basstate & BS_SUSPEND) {
- dev_notice(cs->dev,
- "not starting isoc I/O, suspend in progress\n");
- spin_unlock_irqrestore(&cs->lock, flags);
- return -EHOSTUNREACH;
- }
-
- ret = starturbs(bcs);
- if (ret < 0) {
- spin_unlock_irqrestore(&cs->lock, flags);
- dev_err(cs->dev,
- "could not start isoc I/O for channel B%d: %s\n",
- bcs->channel + 1,
- ret == -EFAULT ? "null URB" : get_usb_rcmsg(ret));
- if (ret != -ENODEV)
- error_hangup(bcs);
- return ret;
- }
-
- req = bcs->channel ? HD_OPEN_B2CHANNEL : HD_OPEN_B1CHANNEL;
- ret = req_submit(bcs, req, 0, BAS_TIMEOUT);
- if (ret < 0) {
- dev_err(cs->dev, "could not open channel B%d\n",
- bcs->channel + 1);
- stopurbs(bcs->hw.bas);
- }
-
- spin_unlock_irqrestore(&cs->lock, flags);
- if (ret < 0 && ret != -ENODEV)
- error_hangup(bcs);
- return ret;
-}
-
-/* gigaset_close_bchannel
- * called by common.c to disconnect a B channel
- * tell the Gigaset base to close the channel
- * stopping isochronous I/O and LL notification will be done when the
- * acknowledgement for the close arrives
- * argument:
- * B channel control structure
- * return value:
- * 0 on success, error code < 0 on error
- */
-static int gigaset_close_bchannel(struct bc_state *bcs)
-{
- struct cardstate *cs = bcs->cs;
- int req, ret;
- unsigned long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- if (unlikely(!cs->connected)) {
- spin_unlock_irqrestore(&cs->lock, flags);
- gig_dbg(DEBUG_USBREQ, "%s: not connected", __func__);
- return -ENODEV;
- }
-
- if (!(cs->hw.bas->basstate & (bcs->channel ? BS_B2OPEN : BS_B1OPEN))) {
- /* channel not running: just signal common.c */
- spin_unlock_irqrestore(&cs->lock, flags);
- gigaset_bchannel_down(bcs);
- return 0;
- }
-
- /* channel running: tell device to close it */
- req = bcs->channel ? HD_CLOSE_B2CHANNEL : HD_CLOSE_B1CHANNEL;
- ret = req_submit(bcs, req, 0, BAS_TIMEOUT);
- if (ret < 0)
- dev_err(cs->dev, "closing channel B%d failed\n",
- bcs->channel + 1);
-
- spin_unlock_irqrestore(&cs->lock, flags);
- return ret;
-}
-
-/* Device Operations */
-/* ================= */
-
-/* complete_cb
- * unqueue first command buffer from queue, waking any sleepers
- * must be called with cs->cmdlock held
- * parameter:
- * cs controller state structure
- */
-static void complete_cb(struct cardstate *cs)
-{
- struct cmdbuf_t *cb = cs->cmdbuf;
-
- /* unqueue completed buffer */
- cs->cmdbytes -= cs->curlen;
- gig_dbg(DEBUG_OUTPUT, "write_command: sent %u bytes, %u left",
- cs->curlen, cs->cmdbytes);
- if (cb->next != NULL) {
- cs->cmdbuf = cb->next;
- cs->cmdbuf->prev = NULL;
- cs->curlen = cs->cmdbuf->len;
- } else {
- cs->cmdbuf = NULL;
- cs->lastcmdbuf = NULL;
- cs->curlen = 0;
- }
-
- if (cb->wake_tasklet)
- tasklet_schedule(cb->wake_tasklet);
-
- kfree(cb);
-}
-
-/* write_command_callback
- * USB completion handler for AT command transmission
- * called by the USB subsystem in interrupt context
- * parameter:
- * urb USB request block of completed request
- * urb->context = controller state structure
- */
-static void write_command_callback(struct urb *urb)
-{
- struct cardstate *cs = urb->context;
- struct bas_cardstate *ucs = cs->hw.bas;
- int status = urb->status;
- unsigned long flags;
-
- update_basstate(ucs, 0, BS_ATWRPEND);
- wake_up(&ucs->waitqueue);
-
- /* check status */
- switch (status) {
- case 0: /* normal completion */
- break;
- case -ENOENT: /* cancelled */
- case -ECONNRESET: /* cancelled (async) */
- case -EINPROGRESS: /* pending */
- case -ENODEV: /* device removed */
- case -ESHUTDOWN: /* device shut down */
- /* ignore silently */
- gig_dbg(DEBUG_USBREQ, "%s: %s",
- __func__, get_usb_statmsg(status));
- return;
- default: /* any failure */
- if (++ucs->retry_cmd_out > BAS_RETRY) {
- dev_warn(cs->dev,
- "command write: %s, "
- "giving up after %d retries\n",
- get_usb_statmsg(status),
- ucs->retry_cmd_out);
- break;
- }
- if (ucs->basstate & BS_SUSPEND) {
- dev_warn(cs->dev,
- "command write: %s, "
- "won't retry - suspend requested\n",
- get_usb_statmsg(status));
- break;
- }
- if (cs->cmdbuf == NULL) {
- dev_warn(cs->dev,
- "command write: %s, "
- "cannot retry - cmdbuf gone\n",
- get_usb_statmsg(status));
- break;
- }
- dev_notice(cs->dev, "command write: %s, retry %d\n",
- get_usb_statmsg(status), ucs->retry_cmd_out);
- if (atwrite_submit(cs, cs->cmdbuf->buf, cs->cmdbuf->len) >= 0)
- /* resubmitted - bypass regular exit block */
- return;
- /* command send failed, assume base still waiting */
- update_basstate(ucs, BS_ATREADY, 0);
- }
-
- spin_lock_irqsave(&cs->cmdlock, flags);
- if (cs->cmdbuf != NULL)
- complete_cb(cs);
- spin_unlock_irqrestore(&cs->cmdlock, flags);
-}
-
-/* atrdy_timeout
- * timeout routine for AT command transmission
- * argument:
- * controller state structure
- */
-static void atrdy_timeout(struct timer_list *t)
-{
- struct bas_cardstate *ucs = from_timer(ucs, t, timer_atrdy);
- struct cardstate *cs = ucs->cs;
-
- dev_warn(cs->dev, "timeout waiting for HD_READY_SEND_ATDATA\n");
-
- /* fake the missing signal - what else can I do? */
- update_basstate(ucs, BS_ATREADY, BS_ATTIMER);
- start_cbsend(cs);
-}
-
-/* atwrite_submit
- * submit an HD_WRITE_ATMESSAGE command URB
- * parameters:
- * cs controller state structure
- * buf buffer containing command to send
- * len length of command to send
- * return value:
- * 0 on success
- * -EBUSY if another request is pending
- * any URB submission error code
- */
-static int atwrite_submit(struct cardstate *cs, unsigned char *buf, int len)
-{
- struct bas_cardstate *ucs = cs->hw.bas;
- int rc;
-
- gig_dbg(DEBUG_USBREQ, "-------> HD_WRITE_ATMESSAGE (%d)", len);
-
- if (update_basstate(ucs, BS_ATWRPEND, 0) & BS_ATWRPEND) {
- dev_err(cs->dev,
- "could not submit HD_WRITE_ATMESSAGE: URB busy\n");
- return -EBUSY;
- }
-
- ucs->dr_cmd_out.bRequestType = OUT_VENDOR_REQ;
- ucs->dr_cmd_out.bRequest = HD_WRITE_ATMESSAGE;
- ucs->dr_cmd_out.wValue = 0;
- ucs->dr_cmd_out.wIndex = 0;
- ucs->dr_cmd_out.wLength = cpu_to_le16(len);
- usb_fill_control_urb(ucs->urb_cmd_out, ucs->udev,
- usb_sndctrlpipe(ucs->udev, 0),
- (unsigned char *) &ucs->dr_cmd_out, buf, len,
- write_command_callback, cs);
- rc = usb_submit_urb(ucs->urb_cmd_out, GFP_ATOMIC);
- if (unlikely(rc)) {
- update_basstate(ucs, 0, BS_ATWRPEND);
- dev_err(cs->dev, "could not submit HD_WRITE_ATMESSAGE: %s\n",
- get_usb_rcmsg(rc));
- return rc;
- }
-
- /* submitted successfully, start timeout if necessary */
- if (!(update_basstate(ucs, BS_ATTIMER, BS_ATREADY) & BS_ATTIMER)) {
- gig_dbg(DEBUG_OUTPUT, "setting ATREADY timeout of %d/10 secs",
- ATRDY_TIMEOUT);
- mod_timer(&ucs->timer_atrdy, jiffies + ATRDY_TIMEOUT * HZ / 10);
- }
- return 0;
-}
-
-/* start_cbsend
- * start transmission of AT command queue if necessary
- * parameter:
- * cs controller state structure
- * return value:
- * 0 on success
- * error code < 0 on error
- */
-static int start_cbsend(struct cardstate *cs)
-{
- struct cmdbuf_t *cb;
- struct bas_cardstate *ucs = cs->hw.bas;
- unsigned long flags;
- int rc;
- int retval = 0;
-
- /* check if suspend requested */
- if (ucs->basstate & BS_SUSPEND) {
- gig_dbg(DEBUG_OUTPUT, "suspending");
- return -EHOSTUNREACH;
- }
-
- /* check if AT channel is open */
- if (!(ucs->basstate & BS_ATOPEN)) {
- gig_dbg(DEBUG_OUTPUT, "AT channel not open");
- rc = req_submit(cs->bcs, HD_OPEN_ATCHANNEL, 0, BAS_TIMEOUT);
- if (rc < 0) {
- /* flush command queue */
- spin_lock_irqsave(&cs->cmdlock, flags);
- while (cs->cmdbuf != NULL)
- complete_cb(cs);
- spin_unlock_irqrestore(&cs->cmdlock, flags);
- }
- return rc;
- }
-
- /* try to send first command in queue */
- spin_lock_irqsave(&cs->cmdlock, flags);
-
- while ((cb = cs->cmdbuf) != NULL && (ucs->basstate & BS_ATREADY)) {
- ucs->retry_cmd_out = 0;
- rc = atwrite_submit(cs, cb->buf, cb->len);
- if (unlikely(rc)) {
- retval = rc;
- complete_cb(cs);
- }
- }
-
- spin_unlock_irqrestore(&cs->cmdlock, flags);
- return retval;
-}
-
-/* gigaset_write_cmd
- * This function is called by the device independent part of the driver
- * to transmit an AT command string to the Gigaset device.
- * It encapsulates the device specific method for transmission over the
- * direct USB connection to the base.
- * The command string is added to the queue of commands to send, and
- * USB transmission is started if necessary.
- * parameters:
- * cs controller state structure
- * cb command buffer structure
- * return value:
- * number of bytes queued on success
- * error code < 0 on error
- */
-static int gigaset_write_cmd(struct cardstate *cs, struct cmdbuf_t *cb)
-{
- unsigned long flags;
- int rc;
-
- gigaset_dbg_buffer(cs->mstate != MS_LOCKED ?
- DEBUG_TRANSCMD : DEBUG_LOCKCMD,
- "CMD Transmit", cb->len, cb->buf);
-
- /* translate "+++" escape sequence sent as a single separate command
- * into "close AT channel" command for error recovery
- * The next command will reopen the AT channel automatically.
- */
- if (cb->len == 3 && !memcmp(cb->buf, "+++", 3)) {
- /* If an HD_RECEIVEATDATA_ACK message remains unhandled
- * because of an error, the base never sends another one.
- * The response channel is thus effectively blocked.
- * Closing and reopening the AT channel does *not* clear
- * this condition.
- * As a stopgap measure, submit a zero-length AT read
- * before closing the AT channel. This has the undocumented
- * effect of triggering a new HD_RECEIVEATDATA_ACK message
- * from the base if necessary.
- * The subsequent AT channel close then discards any pending
- * messages.
- */
- spin_lock_irqsave(&cs->lock, flags);
- if (!(cs->hw.bas->basstate & BS_ATRDPEND)) {
- kfree(cs->hw.bas->rcvbuf);
- cs->hw.bas->rcvbuf = NULL;
- cs->hw.bas->rcvbuf_size = 0;
- cs->hw.bas->retry_cmd_in = 0;
- atread_submit(cs, 0);
- }
- spin_unlock_irqrestore(&cs->lock, flags);
-
- rc = req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, BAS_TIMEOUT);
- if (cb->wake_tasklet)
- tasklet_schedule(cb->wake_tasklet);
- if (!rc)
- rc = cb->len;
- kfree(cb);
- return rc;
- }
-
- spin_lock_irqsave(&cs->cmdlock, flags);
- cb->prev = cs->lastcmdbuf;
- if (cs->lastcmdbuf)
- cs->lastcmdbuf->next = cb;
- else {
- cs->cmdbuf = cb;
- cs->curlen = cb->len;
- }
- cs->cmdbytes += cb->len;
- cs->lastcmdbuf = cb;
- spin_unlock_irqrestore(&cs->cmdlock, flags);
-
- spin_lock_irqsave(&cs->lock, flags);
- if (unlikely(!cs->connected)) {
- spin_unlock_irqrestore(&cs->lock, flags);
- gig_dbg(DEBUG_USBREQ, "%s: not connected", __func__);
- /* flush command queue */
- spin_lock_irqsave(&cs->cmdlock, flags);
- while (cs->cmdbuf != NULL)
- complete_cb(cs);
- spin_unlock_irqrestore(&cs->cmdlock, flags);
- return -ENODEV;
- }
- rc = start_cbsend(cs);
- spin_unlock_irqrestore(&cs->lock, flags);
- return rc < 0 ? rc : cb->len;
-}
-
-/* gigaset_write_room
- * tty_driver.write_room interface routine
- * return number of characters the driver will accept to be written via
- * gigaset_write_cmd
- * parameter:
- * controller state structure
- * return value:
- * number of characters
- */
-static int gigaset_write_room(struct cardstate *cs)
-{
- return IF_WRITEBUF;
-}
-
-/* gigaset_chars_in_buffer
- * tty_driver.chars_in_buffer interface routine
- * return number of characters waiting to be sent
- * parameter:
- * controller state structure
- * return value:
- * number of characters
- */
-static int gigaset_chars_in_buffer(struct cardstate *cs)
-{
- return cs->cmdbytes;
-}
-
-/* gigaset_brkchars
- * implementation of ioctl(GIGASET_BRKCHARS)
- * parameter:
- * controller state structure
- * return value:
- * -EINVAL (unimplemented function)
- */
-static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
-{
- return -EINVAL;
-}
-
-
-/* Device Initialization/Shutdown */
-/* ============================== */
-
-/* Free hardware dependent part of the B channel structure
- * parameter:
- * bcs B channel structure
- */
-static void gigaset_freebcshw(struct bc_state *bcs)
-{
- struct bas_bc_state *ubc = bcs->hw.bas;
- int i;
-
- if (!ubc)
- return;
-
- /* kill URBs and tasklets before freeing - better safe than sorry */
- ubc->running = 0;
- gig_dbg(DEBUG_INIT, "%s: killing isoc URBs", __func__);
- for (i = 0; i < BAS_OUTURBS; ++i) {
- usb_kill_urb(ubc->isoouturbs[i].urb);
- usb_free_urb(ubc->isoouturbs[i].urb);
- }
- for (i = 0; i < BAS_INURBS; ++i) {
- usb_kill_urb(ubc->isoinurbs[i]);
- usb_free_urb(ubc->isoinurbs[i]);
- }
- tasklet_kill(&ubc->sent_tasklet);
- tasklet_kill(&ubc->rcvd_tasklet);
- kfree(ubc->isooutbuf);
- kfree(ubc);
- bcs->hw.bas = NULL;
-}
-
-/* Initialize hardware dependent part of the B channel structure
- * parameter:
- * bcs B channel structure
- * return value:
- * 0 on success, error code < 0 on failure
- */
-static int gigaset_initbcshw(struct bc_state *bcs)
-{
- int i;
- struct bas_bc_state *ubc;
-
- bcs->hw.bas = ubc = kmalloc(sizeof(struct bas_bc_state), GFP_KERNEL);
- if (!ubc) {
- pr_err("out of memory\n");
- return -ENOMEM;
- }
-
- ubc->running = 0;
- atomic_set(&ubc->corrbytes, 0);
- spin_lock_init(&ubc->isooutlock);
- for (i = 0; i < BAS_OUTURBS; ++i) {
- ubc->isoouturbs[i].urb = NULL;
- ubc->isoouturbs[i].bcs = bcs;
- }
- ubc->isooutdone = ubc->isooutfree = ubc->isooutovfl = NULL;
- ubc->numsub = 0;
- ubc->isooutbuf = kmalloc(sizeof(struct isowbuf_t), GFP_KERNEL);
- if (!ubc->isooutbuf) {
- pr_err("out of memory\n");
- kfree(ubc);
- bcs->hw.bas = NULL;
- return -ENOMEM;
- }
- tasklet_init(&ubc->sent_tasklet,
- write_iso_tasklet, (unsigned long) bcs);
-
- spin_lock_init(&ubc->isoinlock);
- for (i = 0; i < BAS_INURBS; ++i)
- ubc->isoinurbs[i] = NULL;
- ubc->isoindone = NULL;
- ubc->loststatus = -EINPROGRESS;
- ubc->isoinlost = 0;
- ubc->seqlen = 0;
- ubc->inbyte = 0;
- ubc->inbits = 0;
- ubc->goodbytes = 0;
- ubc->alignerrs = 0;
- ubc->fcserrs = 0;
- ubc->frameerrs = 0;
- ubc->giants = 0;
- ubc->runts = 0;
- ubc->aborts = 0;
- ubc->shared0s = 0;
- ubc->stolen0s = 0;
- tasklet_init(&ubc->rcvd_tasklet,
- read_iso_tasklet, (unsigned long) bcs);
- return 0;
-}
-
-static void gigaset_reinitbcshw(struct bc_state *bcs)
-{
- struct bas_bc_state *ubc = bcs->hw.bas;
-
- bcs->hw.bas->running = 0;
- atomic_set(&bcs->hw.bas->corrbytes, 0);
- bcs->hw.bas->numsub = 0;
- spin_lock_init(&ubc->isooutlock);
- spin_lock_init(&ubc->isoinlock);
- ubc->loststatus = -EINPROGRESS;
-}
-
-static void gigaset_freecshw(struct cardstate *cs)
-{
- /* timers, URBs and rcvbuf are disposed of in disconnect */
- kfree(cs->hw.bas->int_in_buf);
- kfree(cs->hw.bas);
- cs->hw.bas = NULL;
-}
-
-/* Initialize hardware dependent part of the cardstate structure
- * parameter:
- * cs cardstate structure
- * return value:
- * 0 on success, error code < 0 on failure
- */
-static int gigaset_initcshw(struct cardstate *cs)
-{
- struct bas_cardstate *ucs;
-
- cs->hw.bas = ucs = kzalloc(sizeof(*ucs), GFP_KERNEL);
- if (!ucs) {
- pr_err("out of memory\n");
- return -ENOMEM;
- }
- ucs->int_in_buf = kmalloc(IP_MSGSIZE, GFP_KERNEL);
- if (!ucs->int_in_buf) {
- kfree(ucs);
- pr_err("out of memory\n");
- return -ENOMEM;
- }
-
- spin_lock_init(&ucs->lock);
- ucs->cs = cs;
- timer_setup(&ucs->timer_ctrl, req_timeout, 0);
- timer_setup(&ucs->timer_atrdy, atrdy_timeout, 0);
- timer_setup(&ucs->timer_cmd_in, cmd_in_timeout, 0);
- timer_setup(&ucs->timer_int_in, int_in_resubmit, 0);
- init_waitqueue_head(&ucs->waitqueue);
- INIT_WORK(&ucs->int_in_wq, int_in_work);
-
- return 0;
-}
-
-/* freeurbs
- * unlink and deallocate all URBs unconditionally
- * caller must make sure that no commands are still in progress
- * parameter:
- * cs controller state structure
- */
-static void freeurbs(struct cardstate *cs)
-{
- struct bas_cardstate *ucs = cs->hw.bas;
- struct bas_bc_state *ubc;
- int i, j;
-
- gig_dbg(DEBUG_INIT, "%s: killing URBs", __func__);
- for (j = 0; j < BAS_CHANNELS; ++j) {
- ubc = cs->bcs[j].hw.bas;
- for (i = 0; i < BAS_OUTURBS; ++i) {
- usb_kill_urb(ubc->isoouturbs[i].urb);
- usb_free_urb(ubc->isoouturbs[i].urb);
- ubc->isoouturbs[i].urb = NULL;
- }
- for (i = 0; i < BAS_INURBS; ++i) {
- usb_kill_urb(ubc->isoinurbs[i]);
- usb_free_urb(ubc->isoinurbs[i]);
- ubc->isoinurbs[i] = NULL;
- }
- }
- usb_kill_urb(ucs->urb_int_in);
- usb_free_urb(ucs->urb_int_in);
- ucs->urb_int_in = NULL;
- usb_kill_urb(ucs->urb_cmd_out);
- usb_free_urb(ucs->urb_cmd_out);
- ucs->urb_cmd_out = NULL;
- usb_kill_urb(ucs->urb_cmd_in);
- usb_free_urb(ucs->urb_cmd_in);
- ucs->urb_cmd_in = NULL;
- usb_kill_urb(ucs->urb_ctrl);
- usb_free_urb(ucs->urb_ctrl);
- ucs->urb_ctrl = NULL;
-}
-
-/* gigaset_probe
- * This function is called when a new USB device is connected.
- * It checks whether the new device is handled by this driver.
- */
-static int gigaset_probe(struct usb_interface *interface,
- const struct usb_device_id *id)
-{
- struct usb_host_interface *hostif;
- struct usb_device *udev = interface_to_usbdev(interface);
- struct cardstate *cs = NULL;
- struct bas_cardstate *ucs = NULL;
- struct bas_bc_state *ubc;
- struct usb_endpoint_descriptor *endpoint;
- int i, j;
- int rc;
-
- gig_dbg(DEBUG_INIT,
- "%s: Check if device matches .. (Vendor: 0x%x, Product: 0x%x)",
- __func__, le16_to_cpu(udev->descriptor.idVendor),
- le16_to_cpu(udev->descriptor.idProduct));
-
- /* set required alternate setting */
- hostif = interface->cur_altsetting;
- if (hostif->desc.bAlternateSetting != 3) {
- gig_dbg(DEBUG_INIT,
- "%s: wrong alternate setting %d - trying to switch",
- __func__, hostif->desc.bAlternateSetting);
- if (usb_set_interface(udev, hostif->desc.bInterfaceNumber, 3)
- < 0) {
- dev_warn(&udev->dev, "usb_set_interface failed, "
- "device %d interface %d altsetting %d\n",
- udev->devnum, hostif->desc.bInterfaceNumber,
- hostif->desc.bAlternateSetting);
- return -ENODEV;
- }
- hostif = interface->cur_altsetting;
- }
-
- /* Reject application specific interfaces
- */
- if (hostif->desc.bInterfaceClass != 255) {
- dev_warn(&udev->dev, "%s: bInterfaceClass == %d\n",
- __func__, hostif->desc.bInterfaceClass);
- return -ENODEV;
- }
-
- if (hostif->desc.bNumEndpoints < 1)
- return -ENODEV;
-
- dev_info(&udev->dev,
- "%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n",
- __func__, le16_to_cpu(udev->descriptor.idVendor),
- le16_to_cpu(udev->descriptor.idProduct));
-
- /* allocate memory for our device state and initialize it */
- cs = gigaset_initcs(driver, BAS_CHANNELS, 0, 0, cidmode,
- GIGASET_MODULENAME);
- if (!cs)
- return -ENODEV;
- ucs = cs->hw.bas;
-
- /* save off device structure ptrs for later use */
- usb_get_dev(udev);
- ucs->udev = udev;
- ucs->interface = interface;
- cs->dev = &interface->dev;
-
- /* allocate URBs:
- * - one for the interrupt pipe
- * - three for the different uses of the default control pipe
- * - three for each isochronous pipe
- */
- if (!(ucs->urb_int_in = usb_alloc_urb(0, GFP_KERNEL)) ||
- !(ucs->urb_cmd_in = usb_alloc_urb(0, GFP_KERNEL)) ||
- !(ucs->urb_cmd_out = usb_alloc_urb(0, GFP_KERNEL)) ||
- !(ucs->urb_ctrl = usb_alloc_urb(0, GFP_KERNEL)))
- goto allocerr;
-
- for (j = 0; j < BAS_CHANNELS; ++j) {
- ubc = cs->bcs[j].hw.bas;
- for (i = 0; i < BAS_OUTURBS; ++i)
- if (!(ubc->isoouturbs[i].urb =
- usb_alloc_urb(BAS_NUMFRAMES, GFP_KERNEL)))
- goto allocerr;
- for (i = 0; i < BAS_INURBS; ++i)
- if (!(ubc->isoinurbs[i] =
- usb_alloc_urb(BAS_NUMFRAMES, GFP_KERNEL)))
- goto allocerr;
- }
-
- ucs->rcvbuf = NULL;
- ucs->rcvbuf_size = 0;
-
- /* Fill the interrupt urb and send it to the core */
- endpoint = &hostif->endpoint[0].desc;
- usb_fill_int_urb(ucs->urb_int_in, udev,
- usb_rcvintpipe(udev,
- usb_endpoint_num(endpoint)),
- ucs->int_in_buf, IP_MSGSIZE, read_int_callback, cs,
- endpoint->bInterval);
- rc = usb_submit_urb(ucs->urb_int_in, GFP_KERNEL);
- if (rc != 0) {
- dev_err(cs->dev, "could not submit interrupt URB: %s\n",
- get_usb_rcmsg(rc));
- goto error;
- }
- ucs->retry_int_in = 0;
-
- /* tell the device that the driver is ready */
- rc = req_submit(cs->bcs, HD_DEVICE_INIT_ACK, 0, 0);
- if (rc != 0)
- goto error;
-
- /* tell common part that the device is ready */
- if (startmode == SM_LOCKED)
- cs->mstate = MS_LOCKED;
-
- /* save address of controller structure */
- usb_set_intfdata(interface, cs);
-
- rc = gigaset_start(cs);
- if (rc < 0)
- goto error;
-
- return 0;
-
-allocerr:
- dev_err(cs->dev, "could not allocate URBs\n");
- rc = -ENOMEM;
-error:
- freeurbs(cs);
- usb_set_intfdata(interface, NULL);
- usb_put_dev(udev);
- gigaset_freecs(cs);
- return rc;
-}
-
-/* gigaset_disconnect
- * This function is called when the Gigaset base is unplugged.
- */
-static void gigaset_disconnect(struct usb_interface *interface)
-{
- struct cardstate *cs;
- struct bas_cardstate *ucs;
- int j;
-
- cs = usb_get_intfdata(interface);
-
- ucs = cs->hw.bas;
-
- dev_info(cs->dev, "disconnecting Gigaset base\n");
-
- /* mark base as not ready, all channels disconnected */
- ucs->basstate = 0;
-
- /* tell LL all channels are down */
- for (j = 0; j < BAS_CHANNELS; ++j)
- gigaset_bchannel_down(cs->bcs + j);
-
- /* stop driver (common part) */
- gigaset_stop(cs);
-
- /* stop delayed work and URBs, free ressources */
- del_timer_sync(&ucs->timer_ctrl);
- del_timer_sync(&ucs->timer_atrdy);
- del_timer_sync(&ucs->timer_cmd_in);
- del_timer_sync(&ucs->timer_int_in);
- cancel_work_sync(&ucs->int_in_wq);
- freeurbs(cs);
- usb_set_intfdata(interface, NULL);
- kfree(ucs->rcvbuf);
- ucs->rcvbuf = NULL;
- ucs->rcvbuf_size = 0;
- usb_put_dev(ucs->udev);
- ucs->interface = NULL;
- ucs->udev = NULL;
- cs->dev = NULL;
- gigaset_freecs(cs);
-}
-
-/* gigaset_suspend
- * This function is called before the USB connection is suspended
- * or before the USB device is reset.
- * In the latter case, message == PMSG_ON.
- */
-static int gigaset_suspend(struct usb_interface *intf, pm_message_t message)
-{
- struct cardstate *cs = usb_get_intfdata(intf);
- struct bas_cardstate *ucs = cs->hw.bas;
- int rc;
-
- /* set suspend flag; this stops AT command/response traffic */
- if (update_basstate(ucs, BS_SUSPEND, 0) & BS_SUSPEND) {
- gig_dbg(DEBUG_SUSPEND, "already suspended");
- return 0;
- }
-
- /* wait a bit for blocking conditions to go away */
- rc = wait_event_timeout(ucs->waitqueue,
- !(ucs->basstate &
- (BS_B1OPEN | BS_B2OPEN | BS_ATRDPEND | BS_ATWRPEND)),
- BAS_TIMEOUT * HZ / 10);
- gig_dbg(DEBUG_SUSPEND, "wait_event_timeout() -> %d", rc);
-
- /* check for conditions preventing suspend */
- if (ucs->basstate & (BS_B1OPEN | BS_B2OPEN | BS_ATRDPEND | BS_ATWRPEND)) {
- dev_warn(cs->dev, "cannot suspend:\n");
- if (ucs->basstate & BS_B1OPEN)
- dev_warn(cs->dev, " B channel 1 open\n");
- if (ucs->basstate & BS_B2OPEN)
- dev_warn(cs->dev, " B channel 2 open\n");
- if (ucs->basstate & BS_ATRDPEND)
- dev_warn(cs->dev, " receiving AT reply\n");
- if (ucs->basstate & BS_ATWRPEND)
- dev_warn(cs->dev, " sending AT command\n");
- update_basstate(ucs, 0, BS_SUSPEND);
- return -EBUSY;
- }
-
- /* close AT channel if open */
- if (ucs->basstate & BS_ATOPEN) {
- gig_dbg(DEBUG_SUSPEND, "closing AT channel");
- rc = req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, 0);
- if (rc) {
- update_basstate(ucs, 0, BS_SUSPEND);
- return rc;
- }
- wait_event_timeout(ucs->waitqueue, !ucs->pending,
- BAS_TIMEOUT * HZ / 10);
- /* in case of timeout, proceed anyway */
- }
-
- /* kill all URBs and delayed work that might still be pending */
- usb_kill_urb(ucs->urb_ctrl);
- usb_kill_urb(ucs->urb_int_in);
- del_timer_sync(&ucs->timer_ctrl);
- del_timer_sync(&ucs->timer_atrdy);
- del_timer_sync(&ucs->timer_cmd_in);
- del_timer_sync(&ucs->timer_int_in);
-
- /* don't try to cancel int_in_wq from within reset as it
- * might be the one requesting the reset
- */
- if (message.event != PM_EVENT_ON)
- cancel_work_sync(&ucs->int_in_wq);
-
- gig_dbg(DEBUG_SUSPEND, "suspend complete");
- return 0;
-}
-
-/* gigaset_resume
- * This function is called after the USB connection has been resumed.
- */
-static int gigaset_resume(struct usb_interface *intf)
-{
- struct cardstate *cs = usb_get_intfdata(intf);
- struct bas_cardstate *ucs = cs->hw.bas;
- int rc;
-
- /* resubmit interrupt URB for spontaneous messages from base */
- rc = usb_submit_urb(ucs->urb_int_in, GFP_KERNEL);
- if (rc) {
- dev_err(cs->dev, "could not resubmit interrupt URB: %s\n",
- get_usb_rcmsg(rc));
- return rc;
- }
- ucs->retry_int_in = 0;
-
- /* clear suspend flag to reallow activity */
- update_basstate(ucs, 0, BS_SUSPEND);
-
- gig_dbg(DEBUG_SUSPEND, "resume complete");
- return 0;
-}
-
-/* gigaset_pre_reset
- * This function is called before the USB connection is reset.
- */
-static int gigaset_pre_reset(struct usb_interface *intf)
-{
- /* handle just like suspend */
- return gigaset_suspend(intf, PMSG_ON);
-}
-
-/* gigaset_post_reset
- * This function is called after the USB connection has been reset.
- */
-static int gigaset_post_reset(struct usb_interface *intf)
-{
- /* FIXME: send HD_DEVICE_INIT_ACK? */
-
- /* resume operations */
- return gigaset_resume(intf);
-}
-
-
-static const struct gigaset_ops gigops = {
- .write_cmd = gigaset_write_cmd,
- .write_room = gigaset_write_room,
- .chars_in_buffer = gigaset_chars_in_buffer,
- .brkchars = gigaset_brkchars,
- .init_bchannel = gigaset_init_bchannel,
- .close_bchannel = gigaset_close_bchannel,
- .initbcshw = gigaset_initbcshw,
- .freebcshw = gigaset_freebcshw,
- .reinitbcshw = gigaset_reinitbcshw,
- .initcshw = gigaset_initcshw,
- .freecshw = gigaset_freecshw,
- .set_modem_ctrl = gigaset_set_modem_ctrl,
- .baud_rate = gigaset_baud_rate,
- .set_line_ctrl = gigaset_set_line_ctrl,
- .send_skb = gigaset_isoc_send_skb,
- .handle_input = gigaset_isoc_input,
-};
-
-/* bas_gigaset_init
- * This function is called after the kernel module is loaded.
- */
-static int __init bas_gigaset_init(void)
-{
- int result;
-
- /* allocate memory for our driver state and initialize it */
- driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
- GIGASET_MODULENAME, GIGASET_DEVNAME,
- &gigops, THIS_MODULE);
- if (driver == NULL)
- goto error;
-
- /* register this driver with the USB subsystem */
- result = usb_register(&gigaset_usb_driver);
- if (result < 0) {
- pr_err("error %d registering USB driver\n", -result);
- goto error;
- }
-
- pr_info(DRIVER_DESC "\n");
- return 0;
-
-error:
- if (driver)
- gigaset_freedriver(driver);
- driver = NULL;
- return -1;
-}
-
-/* bas_gigaset_exit
- * This function is called before the kernel module is unloaded.
- */
-static void __exit bas_gigaset_exit(void)
-{
- struct bas_cardstate *ucs;
- int i;
-
- gigaset_blockdriver(driver); /* => probe will fail
- * => no gigaset_start any more
- */
-
- /* stop all connected devices */
- for (i = 0; i < driver->minors; i++) {
- if (gigaset_shutdown(driver->cs + i) < 0)
- continue; /* no device */
- /* from now on, no isdn callback should be possible */
-
- /* close all still open channels */
- ucs = driver->cs[i].hw.bas;
- if (ucs->basstate & BS_B1OPEN) {
- gig_dbg(DEBUG_INIT, "closing B1 channel");
- usb_control_msg(ucs->udev,
- usb_sndctrlpipe(ucs->udev, 0),
- HD_CLOSE_B1CHANNEL, OUT_VENDOR_REQ,
- 0, 0, NULL, 0, BAS_TIMEOUT);
- }
- if (ucs->basstate & BS_B2OPEN) {
- gig_dbg(DEBUG_INIT, "closing B2 channel");
- usb_control_msg(ucs->udev,
- usb_sndctrlpipe(ucs->udev, 0),
- HD_CLOSE_B2CHANNEL, OUT_VENDOR_REQ,
- 0, 0, NULL, 0, BAS_TIMEOUT);
- }
- if (ucs->basstate & BS_ATOPEN) {
- gig_dbg(DEBUG_INIT, "closing AT channel");
- usb_control_msg(ucs->udev,
- usb_sndctrlpipe(ucs->udev, 0),
- HD_CLOSE_ATCHANNEL, OUT_VENDOR_REQ,
- 0, 0, NULL, 0, BAS_TIMEOUT);
- }
- ucs->basstate = 0;
- }
-
- /* deregister this driver with the USB subsystem */
- usb_deregister(&gigaset_usb_driver);
- /* this will call the disconnect-callback */
- /* from now on, no disconnect/probe callback should be running */
-
- gigaset_freedriver(driver);
- driver = NULL;
-}
-
-
-module_init(bas_gigaset_init);
-module_exit(bas_gigaset_exit);
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/isdn/gigaset/capi.c b/drivers/staging/isdn/gigaset/capi.c
deleted file mode 100644
index 83d7dd48c61d..000000000000
--- a/drivers/staging/isdn/gigaset/capi.c
+++ /dev/null
@@ -1,2517 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Kernel CAPI interface for the Gigaset driver
- *
- * Copyright (c) 2009 by Tilman Schmidt <tilman@imap.cc>.
- *
- * =====================================================================
- * =====================================================================
- */
-
-#include "gigaset.h"
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/ratelimit.h>
-#include <linux/isdn/capilli.h>
-#include <linux/isdn/capicmd.h>
-#include <linux/isdn/capiutil.h>
-#include <linux/export.h>
-
-/* missing from kernelcapi.h */
-#define CapiNcpiNotSupportedByProtocol 0x0001
-#define CapiFlagsNotSupportedByProtocol 0x0002
-#define CapiAlertAlreadySent 0x0003
-#define CapiFacilitySpecificFunctionNotSupported 0x3011
-
-/* missing from capicmd.h */
-#define CAPI_CONNECT_IND_BASELEN (CAPI_MSG_BASELEN + 4 + 2 + 8 * 1)
-#define CAPI_CONNECT_ACTIVE_IND_BASELEN (CAPI_MSG_BASELEN + 4 + 3 * 1)
-#define CAPI_CONNECT_B3_IND_BASELEN (CAPI_MSG_BASELEN + 4 + 1)
-#define CAPI_CONNECT_B3_ACTIVE_IND_BASELEN (CAPI_MSG_BASELEN + 4 + 1)
-#define CAPI_DATA_B3_REQ_LEN64 (CAPI_MSG_BASELEN + 4 + 4 + 2 + 2 + 2 + 8)
-#define CAPI_DATA_B3_CONF_LEN (CAPI_MSG_BASELEN + 4 + 2 + 2)
-#define CAPI_DISCONNECT_IND_LEN (CAPI_MSG_BASELEN + 4 + 2)
-#define CAPI_DISCONNECT_B3_IND_BASELEN (CAPI_MSG_BASELEN + 4 + 2 + 1)
-#define CAPI_FACILITY_CONF_BASELEN (CAPI_MSG_BASELEN + 4 + 2 + 2 + 1)
-/* most _CONF messages contain only Controller/PLCI/NCCI and Info parameters */
-#define CAPI_STDCONF_LEN (CAPI_MSG_BASELEN + 4 + 2)
-
-#define CAPI_FACILITY_HANDSET 0x0000
-#define CAPI_FACILITY_DTMF 0x0001
-#define CAPI_FACILITY_V42BIS 0x0002
-#define CAPI_FACILITY_SUPPSVC 0x0003
-#define CAPI_FACILITY_WAKEUP 0x0004
-#define CAPI_FACILITY_LI 0x0005
-
-#define CAPI_SUPPSVC_GETSUPPORTED 0x0000
-#define CAPI_SUPPSVC_LISTEN 0x0001
-
-/* missing from capiutil.h */
-#define CAPIMSG_PLCI_PART(m) CAPIMSG_U8(m, 9)
-#define CAPIMSG_NCCI_PART(m) CAPIMSG_U16(m, 10)
-#define CAPIMSG_HANDLE_REQ(m) CAPIMSG_U16(m, 18) /* DATA_B3_REQ/_IND only! */
-#define CAPIMSG_FLAGS(m) CAPIMSG_U16(m, 20)
-#define CAPIMSG_SETCONTROLLER(m, contr) capimsg_setu8(m, 8, contr)
-#define CAPIMSG_SETPLCI_PART(m, plci) capimsg_setu8(m, 9, plci)
-#define CAPIMSG_SETNCCI_PART(m, ncci) capimsg_setu16(m, 10, ncci)
-#define CAPIMSG_SETFLAGS(m, flags) capimsg_setu16(m, 20, flags)
-
-/* parameters with differing location in DATA_B3_CONF/_RESP: */
-#define CAPIMSG_SETHANDLE_CONF(m, handle) capimsg_setu16(m, 12, handle)
-#define CAPIMSG_SETINFO_CONF(m, info) capimsg_setu16(m, 14, info)
-
-/* Flags (DATA_B3_REQ/_IND) */
-#define CAPI_FLAGS_DELIVERY_CONFIRMATION 0x04
-#define CAPI_FLAGS_RESERVED (~0x1f)
-
-/* buffer sizes */
-#define MAX_BC_OCTETS 11
-#define MAX_HLC_OCTETS 3
-#define MAX_NUMBER_DIGITS 20
-#define MAX_FMT_IE_LEN 20
-
-/* values for bcs->apconnstate */
-#define APCONN_NONE 0 /* inactive/listening */
-#define APCONN_SETUP 1 /* connecting */
-#define APCONN_ACTIVE 2 /* B channel up */
-
-/* registered application data structure */
-struct gigaset_capi_appl {
- struct list_head ctrlist;
- struct gigaset_capi_appl *bcnext;
- u16 id;
- struct capi_register_params rp;
- u16 nextMessageNumber;
- u32 listenInfoMask;
- u32 listenCIPmask;
-};
-
-/* CAPI specific controller data structure */
-struct gigaset_capi_ctr {
- struct capi_ctr ctr;
- struct list_head appls;
- struct sk_buff_head sendqueue;
- atomic_t sendqlen;
- /* two _cmsg structures possibly used concurrently: */
- _cmsg hcmsg; /* for message composition triggered from hardware */
- _cmsg acmsg; /* for dissection of messages sent from application */
- u8 bc_buf[MAX_BC_OCTETS + 1];
- u8 hlc_buf[MAX_HLC_OCTETS + 1];
- u8 cgpty_buf[MAX_NUMBER_DIGITS + 3];
- u8 cdpty_buf[MAX_NUMBER_DIGITS + 2];
-};
-
-/* CIP Value table (from CAPI 2.0 standard, ch. 6.1) */
-static struct {
- u8 *bc;
- u8 *hlc;
-} cip2bchlc[] = {
- [1] = { "8090A3", NULL }, /* Speech (A-law) */
- [2] = { "8890", NULL }, /* Unrestricted digital information */
- [3] = { "8990", NULL }, /* Restricted digital information */
- [4] = { "9090A3", NULL }, /* 3,1 kHz audio (A-law) */
- [5] = { "9190", NULL }, /* 7 kHz audio */
- [6] = { "9890", NULL }, /* Video */
- [7] = { "88C0C6E6", NULL }, /* Packet mode */
- [8] = { "8890218F", NULL }, /* 56 kbit/s rate adaptation */
- [9] = { "9190A5", NULL }, /* Unrestricted digital information
- * with tones/announcements */
- [16] = { "8090A3", "9181" }, /* Telephony */
- [17] = { "9090A3", "9184" }, /* Group 2/3 facsimile */
- [18] = { "8890", "91A1" }, /* Group 4 facsimile Class 1 */
- [19] = { "8890", "91A4" }, /* Teletex service basic and mixed mode
- * and Group 4 facsimile service
- * Classes II and III */
- [20] = { "8890", "91A8" }, /* Teletex service basic and
- * processable mode */
- [21] = { "8890", "91B1" }, /* Teletex service basic mode */
- [22] = { "8890", "91B2" }, /* International interworking for
- * Videotex */
- [23] = { "8890", "91B5" }, /* Telex */
- [24] = { "8890", "91B8" }, /* Message Handling Systems
- * in accordance with X.400 */
- [25] = { "8890", "91C1" }, /* OSI application
- * in accordance with X.200 */
- [26] = { "9190A5", "9181" }, /* 7 kHz telephony */
- [27] = { "9190A5", "916001" }, /* Video telephony, first connection */
- [28] = { "8890", "916002" }, /* Video telephony, second connection */
-};
-
-/*
- * helper functions
- * ================
- */
-
-/*
- * emit unsupported parameter warning
- */
-static inline void ignore_cstruct_param(struct cardstate *cs, _cstruct param,
- char *msgname, char *paramname)
-{
- if (param && *param)
- dev_warn(cs->dev, "%s: ignoring unsupported parameter: %s\n",
- msgname, paramname);
-}
-
-/*
- * convert an IE from Gigaset hex string to ETSI binary representation
- * including length byte
- * return value: result length, -1 on error
- */
-static int encode_ie(char *in, u8 *out, int maxlen)
-{
- int l = 0;
- while (*in) {
- if (!isxdigit(in[0]) || !isxdigit(in[1]) || l >= maxlen)
- return -1;
- out[++l] = (hex_to_bin(in[0]) << 4) + hex_to_bin(in[1]);
- in += 2;
- }
- out[0] = l;
- return l;
-}
-
-/*
- * convert an IE from ETSI binary representation including length byte
- * to Gigaset hex string
- */
-static void decode_ie(u8 *in, char *out)
-{
- int i = *in;
- while (i-- > 0) {
- /* ToDo: conversion to upper case necessary? */
- *out++ = toupper(hex_asc_hi(*++in));
- *out++ = toupper(hex_asc_lo(*in));
- }
-}
-
-/*
- * retrieve application data structure for an application ID
- */
-static inline struct gigaset_capi_appl *
-get_appl(struct gigaset_capi_ctr *iif, u16 appl)
-{
- struct gigaset_capi_appl *ap;
-
- list_for_each_entry(ap, &iif->appls, ctrlist)
- if (ap->id == appl)
- return ap;
- return NULL;
-}
-
-/*
- * dump CAPI message to kernel messages for debugging
- */
-static inline void dump_cmsg(enum debuglevel level, const char *tag, _cmsg *p)
-{
-#ifdef CONFIG_GIGASET_DEBUG
- /* dump at most 20 messages in 20 secs */
- static DEFINE_RATELIMIT_STATE(msg_dump_ratelimit, 20 * HZ, 20);
- _cdebbuf *cdb;
-
- if (!(gigaset_debuglevel & level))
- return;
- if (!___ratelimit(&msg_dump_ratelimit, tag))
- return;
-
- cdb = capi_cmsg2str(p);
- if (cdb) {
- gig_dbg(level, "%s: [%d] %s", tag, p->ApplId, cdb->buf);
- cdebbuf_free(cdb);
- } else {
- gig_dbg(level, "%s: [%d] %s", tag, p->ApplId,
- capi_cmd2str(p->Command, p->Subcommand));
- }
-#endif
-}
-
-static inline void dump_rawmsg(enum debuglevel level, const char *tag,
- unsigned char *data)
-{
-#ifdef CONFIG_GIGASET_DEBUG
- char *dbgline;
- int i, l;
-
- if (!(gigaset_debuglevel & level))
- return;
-
- l = CAPIMSG_LEN(data);
- if (l < 12) {
- gig_dbg(level, "%s: ??? LEN=%04d", tag, l);
- return;
- }
- gig_dbg(level, "%s: 0x%02x:0x%02x: ID=%03d #0x%04x LEN=%04d NCCI=0x%x",
- tag, CAPIMSG_COMMAND(data), CAPIMSG_SUBCOMMAND(data),
- CAPIMSG_APPID(data), CAPIMSG_MSGID(data), l,
- CAPIMSG_CONTROL(data));
- l -= 12;
- if (l <= 0)
- return;
- if (l > 64)
- l = 64; /* arbitrary limit */
- dbgline = kmalloc_array(3, l, GFP_ATOMIC);
- if (!dbgline)
- return;
- for (i = 0; i < l; i++) {
- dbgline[3 * i] = hex_asc_hi(data[12 + i]);
- dbgline[3 * i + 1] = hex_asc_lo(data[12 + i]);
- dbgline[3 * i + 2] = ' ';
- }
- dbgline[3 * l - 1] = '\0';
- gig_dbg(level, " %s", dbgline);
- kfree(dbgline);
- if (CAPIMSG_COMMAND(data) == CAPI_DATA_B3 &&
- (CAPIMSG_SUBCOMMAND(data) == CAPI_REQ ||
- CAPIMSG_SUBCOMMAND(data) == CAPI_IND)) {
- l = CAPIMSG_DATALEN(data);
- gig_dbg(level, " DataLength=%d", l);
- if (l <= 0 || !(gigaset_debuglevel & DEBUG_LLDATA))
- return;
- if (l > 64)
- l = 64; /* arbitrary limit */
- dbgline = kmalloc_array(3, l, GFP_ATOMIC);
- if (!dbgline)
- return;
- data += CAPIMSG_LEN(data);
- for (i = 0; i < l; i++) {
- dbgline[3 * i] = hex_asc_hi(data[i]);
- dbgline[3 * i + 1] = hex_asc_lo(data[i]);
- dbgline[3 * i + 2] = ' ';
- }
- dbgline[3 * l - 1] = '\0';
- gig_dbg(level, " %s", dbgline);
- kfree(dbgline);
- }
-#endif
-}
-
-/*
- * format CAPI IE as string
- */
-
-#ifdef CONFIG_GIGASET_DEBUG
-static const char *format_ie(const char *ie)
-{
- static char result[3 * MAX_FMT_IE_LEN];
- int len, count;
- char *pout = result;
-
- if (!ie)
- return "NULL";
-
- count = len = ie[0];
- if (count > MAX_FMT_IE_LEN)
- count = MAX_FMT_IE_LEN - 1;
- while (count--) {
- *pout++ = hex_asc_hi(*++ie);
- *pout++ = hex_asc_lo(*ie);
- *pout++ = ' ';
- }
- if (len > MAX_FMT_IE_LEN) {
- *pout++ = '.';
- *pout++ = '.';
- *pout++ = '.';
- }
- *--pout = 0;
- return result;
-}
-#endif
-
-/*
- * emit DATA_B3_CONF message
- */
-static void send_data_b3_conf(struct cardstate *cs, struct capi_ctr *ctr,
- u16 appl, u16 msgid, int channel,
- u16 handle, u16 info)
-{
- struct sk_buff *cskb;
- u8 *msg;
-
- cskb = alloc_skb(CAPI_DATA_B3_CONF_LEN, GFP_ATOMIC);
- if (!cskb) {
- dev_err(cs->dev, "%s: out of memory\n", __func__);
- return;
- }
- /* frequent message, avoid _cmsg overhead */
- msg = __skb_put(cskb, CAPI_DATA_B3_CONF_LEN);
- CAPIMSG_SETLEN(msg, CAPI_DATA_B3_CONF_LEN);
- CAPIMSG_SETAPPID(msg, appl);
- CAPIMSG_SETCOMMAND(msg, CAPI_DATA_B3);
- CAPIMSG_SETSUBCOMMAND(msg, CAPI_CONF);
- CAPIMSG_SETMSGID(msg, msgid);
- CAPIMSG_SETCONTROLLER(msg, ctr->cnr);
- CAPIMSG_SETPLCI_PART(msg, channel);
- CAPIMSG_SETNCCI_PART(msg, 1);
- CAPIMSG_SETHANDLE_CONF(msg, handle);
- CAPIMSG_SETINFO_CONF(msg, info);
-
- /* emit message */
- dump_rawmsg(DEBUG_MCMD, __func__, msg);
- capi_ctr_handle_message(ctr, appl, cskb);
-}
-
-
-/*
- * driver interface functions
- * ==========================
- */
-
-/**
- * gigaset_skb_sent() - acknowledge transmission of outgoing skb
- * @bcs: B channel descriptor structure.
- * @skb: sent data.
- *
- * Called by hardware module {bas,ser,usb}_gigaset when the data in a
- * skb has been successfully sent, for signalling completion to the LL.
- */
-void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *dskb)
-{
- struct cardstate *cs = bcs->cs;
- struct gigaset_capi_ctr *iif = cs->iif;
- struct gigaset_capi_appl *ap = bcs->ap;
- unsigned char *req = skb_mac_header(dskb);
- u16 flags;
-
- /* update statistics */
- ++bcs->trans_up;
-
- if (!ap) {
- gig_dbg(DEBUG_MCMD, "%s: application gone", __func__);
- return;
- }
-
- /* don't send further B3 messages if disconnected */
- if (bcs->apconnstate < APCONN_ACTIVE) {
- gig_dbg(DEBUG_MCMD, "%s: disconnected", __func__);
- return;
- }
-
- /*
- * send DATA_B3_CONF if "delivery confirmation" bit was set in request;
- * otherwise it has already been sent by do_data_b3_req()
- */
- flags = CAPIMSG_FLAGS(req);
- if (flags & CAPI_FLAGS_DELIVERY_CONFIRMATION)
- send_data_b3_conf(cs, &iif->ctr, ap->id, CAPIMSG_MSGID(req),
- bcs->channel + 1, CAPIMSG_HANDLE_REQ(req),
- (flags & ~CAPI_FLAGS_DELIVERY_CONFIRMATION) ?
- CapiFlagsNotSupportedByProtocol :
- CAPI_NOERROR);
-}
-EXPORT_SYMBOL_GPL(gigaset_skb_sent);
-
-/**
- * gigaset_skb_rcvd() - pass received skb to LL
- * @bcs: B channel descriptor structure.
- * @skb: received data.
- *
- * Called by hardware module {bas,ser,usb}_gigaset when user data has
- * been successfully received, for passing to the LL.
- * Warning: skb must not be accessed anymore!
- */
-void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb)
-{
- struct cardstate *cs = bcs->cs;
- struct gigaset_capi_ctr *iif = cs->iif;
- struct gigaset_capi_appl *ap = bcs->ap;
- int len = skb->len;
-
- /* update statistics */
- bcs->trans_down++;
-
- if (!ap) {
- gig_dbg(DEBUG_MCMD, "%s: application gone", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
-
- /* don't send further B3 messages if disconnected */
- if (bcs->apconnstate < APCONN_ACTIVE) {
- gig_dbg(DEBUG_MCMD, "%s: disconnected", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
-
- /*
- * prepend DATA_B3_IND message to payload
- * Parameters: NCCI = 1, all others 0/unused
- * frequent message, avoid _cmsg overhead
- */
- skb_push(skb, CAPI_DATA_B3_REQ_LEN);
- CAPIMSG_SETLEN(skb->data, CAPI_DATA_B3_REQ_LEN);
- CAPIMSG_SETAPPID(skb->data, ap->id);
- CAPIMSG_SETCOMMAND(skb->data, CAPI_DATA_B3);
- CAPIMSG_SETSUBCOMMAND(skb->data, CAPI_IND);
- CAPIMSG_SETMSGID(skb->data, ap->nextMessageNumber++);
- CAPIMSG_SETCONTROLLER(skb->data, iif->ctr.cnr);
- CAPIMSG_SETPLCI_PART(skb->data, bcs->channel + 1);
- CAPIMSG_SETNCCI_PART(skb->data, 1);
- /* Data parameter not used */
- CAPIMSG_SETDATALEN(skb->data, len);
- /* Data handle parameter not used */
- CAPIMSG_SETFLAGS(skb->data, 0);
- /* Data64 parameter not present */
-
- /* emit message */
- dump_rawmsg(DEBUG_MCMD, __func__, skb->data);
- capi_ctr_handle_message(&iif->ctr, ap->id, skb);
-}
-EXPORT_SYMBOL_GPL(gigaset_skb_rcvd);
-
-/**
- * gigaset_isdn_rcv_err() - signal receive error
- * @bcs: B channel descriptor structure.
- *
- * Called by hardware module {bas,ser,usb}_gigaset when a receive error
- * has occurred, for signalling to the LL.
- */
-void gigaset_isdn_rcv_err(struct bc_state *bcs)
-{
- /* if currently ignoring packets, just count down */
- if (bcs->ignore) {
- bcs->ignore--;
- return;
- }
-
- /* update statistics */
- bcs->corrupted++;
-
- /* ToDo: signal error -> LL */
-}
-EXPORT_SYMBOL_GPL(gigaset_isdn_rcv_err);
-
-/**
- * gigaset_isdn_icall() - signal incoming call
- * @at_state: connection state structure.
- *
- * Called by main module at tasklet level to notify the LL that an incoming
- * call has been received. @at_state contains the parameters of the call.
- *
- * Return value: call disposition (ICALL_*)
- */
-int gigaset_isdn_icall(struct at_state_t *at_state)
-{
- struct cardstate *cs = at_state->cs;
- struct bc_state *bcs = at_state->bcs;
- struct gigaset_capi_ctr *iif = cs->iif;
- struct gigaset_capi_appl *ap;
- u32 actCIPmask;
- struct sk_buff *skb;
- unsigned int msgsize;
- unsigned long flags;
- int i;
-
- /*
- * ToDo: signal calls without a free B channel, too
- * (requires a u8 handle for the at_state structure that can
- * be stored in the PLCI and used in the CONNECT_RESP message
- * handler to retrieve it)
- */
- if (!bcs)
- return ICALL_IGNORE;
-
- /* prepare CONNECT_IND message, using B channel number as PLCI */
- capi_cmsg_header(&iif->hcmsg, 0, CAPI_CONNECT, CAPI_IND, 0,
- iif->ctr.cnr | ((bcs->channel + 1) << 8));
-
- /* minimum size, all structs empty */
- msgsize = CAPI_CONNECT_IND_BASELEN;
-
- /* Bearer Capability (mandatory) */
- if (at_state->str_var[STR_ZBC]) {
- /* pass on BC from Gigaset */
- if (encode_ie(at_state->str_var[STR_ZBC], iif->bc_buf,
- MAX_BC_OCTETS) < 0) {
- dev_warn(cs->dev, "RING ignored - bad BC %s\n",
- at_state->str_var[STR_ZBC]);
- return ICALL_IGNORE;
- }
-
- /* look up corresponding CIP value */
- iif->hcmsg.CIPValue = 0; /* default if nothing found */
- for (i = 0; i < ARRAY_SIZE(cip2bchlc); i++)
- if (cip2bchlc[i].bc != NULL &&
- cip2bchlc[i].hlc == NULL &&
- !strcmp(cip2bchlc[i].bc,
- at_state->str_var[STR_ZBC])) {
- iif->hcmsg.CIPValue = i;
- break;
- }
- } else {
- /* no BC (internal call): assume CIP 1 (speech, A-law) */
- iif->hcmsg.CIPValue = 1;
- encode_ie(cip2bchlc[1].bc, iif->bc_buf, MAX_BC_OCTETS);
- }
- iif->hcmsg.BC = iif->bc_buf;
- msgsize += iif->hcmsg.BC[0];
-
- /* High Layer Compatibility (optional) */
- if (at_state->str_var[STR_ZHLC]) {
- /* pass on HLC from Gigaset */
- if (encode_ie(at_state->str_var[STR_ZHLC], iif->hlc_buf,
- MAX_HLC_OCTETS) < 0) {
- dev_warn(cs->dev, "RING ignored - bad HLC %s\n",
- at_state->str_var[STR_ZHLC]);
- return ICALL_IGNORE;
- }
- iif->hcmsg.HLC = iif->hlc_buf;
- msgsize += iif->hcmsg.HLC[0];
-
- /* look up corresponding CIP value */
- /* keep BC based CIP value if none found */
- if (at_state->str_var[STR_ZBC])
- for (i = 0; i < ARRAY_SIZE(cip2bchlc); i++)
- if (cip2bchlc[i].hlc != NULL &&
- !strcmp(cip2bchlc[i].hlc,
- at_state->str_var[STR_ZHLC]) &&
- !strcmp(cip2bchlc[i].bc,
- at_state->str_var[STR_ZBC])) {
- iif->hcmsg.CIPValue = i;
- break;
- }
- }
-
- /* Called Party Number (optional) */
- if (at_state->str_var[STR_ZCPN]) {
- i = strlen(at_state->str_var[STR_ZCPN]);
- if (i > MAX_NUMBER_DIGITS) {
- dev_warn(cs->dev, "RING ignored - bad number %s\n",
- at_state->str_var[STR_ZBC]);
- return ICALL_IGNORE;
- }
- iif->cdpty_buf[0] = i + 1;
- iif->cdpty_buf[1] = 0x80; /* type / numbering plan unknown */
- memcpy(iif->cdpty_buf + 2, at_state->str_var[STR_ZCPN], i);
- iif->hcmsg.CalledPartyNumber = iif->cdpty_buf;
- msgsize += iif->hcmsg.CalledPartyNumber[0];
- }
-
- /* Calling Party Number (optional) */
- if (at_state->str_var[STR_NMBR]) {
- i = strlen(at_state->str_var[STR_NMBR]);
- if (i > MAX_NUMBER_DIGITS) {
- dev_warn(cs->dev, "RING ignored - bad number %s\n",
- at_state->str_var[STR_ZBC]);
- return ICALL_IGNORE;
- }
- iif->cgpty_buf[0] = i + 2;
- iif->cgpty_buf[1] = 0x00; /* type / numbering plan unknown */
- iif->cgpty_buf[2] = 0x80; /* pres. allowed, not screened */
- memcpy(iif->cgpty_buf + 3, at_state->str_var[STR_NMBR], i);
- iif->hcmsg.CallingPartyNumber = iif->cgpty_buf;
- msgsize += iif->hcmsg.CallingPartyNumber[0];
- }
-
- /* remaining parameters (not supported, always left NULL):
- * - CalledPartySubaddress
- * - CallingPartySubaddress
- * - AdditionalInfo
- * - BChannelinformation
- * - Keypadfacility
- * - Useruserdata
- * - Facilitydataarray
- */
-
- gig_dbg(DEBUG_CMD, "icall: PLCI %x CIP %d BC %s",
- iif->hcmsg.adr.adrPLCI, iif->hcmsg.CIPValue,
- format_ie(iif->hcmsg.BC));
- gig_dbg(DEBUG_CMD, "icall: HLC %s",
- format_ie(iif->hcmsg.HLC));
- gig_dbg(DEBUG_CMD, "icall: CgPty %s",
- format_ie(iif->hcmsg.CallingPartyNumber));
- gig_dbg(DEBUG_CMD, "icall: CdPty %s",
- format_ie(iif->hcmsg.CalledPartyNumber));
-
- /* scan application list for matching listeners */
- spin_lock_irqsave(&bcs->aplock, flags);
- if (bcs->ap != NULL || bcs->apconnstate != APCONN_NONE) {
- dev_warn(cs->dev, "%s: channel not properly cleared (%p/%d)\n",
- __func__, bcs->ap, bcs->apconnstate);
- bcs->ap = NULL;
- bcs->apconnstate = APCONN_NONE;
- }
- spin_unlock_irqrestore(&bcs->aplock, flags);
- actCIPmask = 1 | (1 << iif->hcmsg.CIPValue);
- list_for_each_entry(ap, &iif->appls, ctrlist)
- if (actCIPmask & ap->listenCIPmask) {
- /* build CONNECT_IND message for this application */
- iif->hcmsg.ApplId = ap->id;
- iif->hcmsg.Messagenumber = ap->nextMessageNumber++;
-
- skb = alloc_skb(msgsize, GFP_ATOMIC);
- if (!skb) {
- dev_err(cs->dev, "%s: out of memory\n",
- __func__);
- break;
- }
- if (capi_cmsg2message(&iif->hcmsg,
- __skb_put(skb, msgsize))) {
- dev_err(cs->dev, "%s: message parser failure\n",
- __func__);
- dev_kfree_skb_any(skb);
- break;
- }
- dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
-
- /* add to listeners on this B channel, update state */
- spin_lock_irqsave(&bcs->aplock, flags);
- ap->bcnext = bcs->ap;
- bcs->ap = ap;
- bcs->chstate |= CHS_NOTIFY_LL;
- bcs->apconnstate = APCONN_SETUP;
- spin_unlock_irqrestore(&bcs->aplock, flags);
-
- /* emit message */
- capi_ctr_handle_message(&iif->ctr, ap->id, skb);
- }
-
- /*
- * Return "accept" if any listeners.
- * Gigaset will send ALERTING.
- * There doesn't seem to be a way to avoid this.
- */
- return bcs->ap ? ICALL_ACCEPT : ICALL_IGNORE;
-}
-
-/*
- * send a DISCONNECT_IND message to an application
- * does not sleep, clobbers the controller's hcmsg structure
- */
-static void send_disconnect_ind(struct bc_state *bcs,
- struct gigaset_capi_appl *ap, u16 reason)
-{
- struct cardstate *cs = bcs->cs;
- struct gigaset_capi_ctr *iif = cs->iif;
- struct sk_buff *skb;
-
- if (bcs->apconnstate == APCONN_NONE)
- return;
-
- capi_cmsg_header(&iif->hcmsg, ap->id, CAPI_DISCONNECT, CAPI_IND,
- ap->nextMessageNumber++,
- iif->ctr.cnr | ((bcs->channel + 1) << 8));
- iif->hcmsg.Reason = reason;
- skb = alloc_skb(CAPI_DISCONNECT_IND_LEN, GFP_ATOMIC);
- if (!skb) {
- dev_err(cs->dev, "%s: out of memory\n", __func__);
- return;
- }
- if (capi_cmsg2message(&iif->hcmsg,
- __skb_put(skb, CAPI_DISCONNECT_IND_LEN))) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
- capi_ctr_handle_message(&iif->ctr, ap->id, skb);
-}
-
-/*
- * send a DISCONNECT_B3_IND message to an application
- * Parameters: NCCI = 1, NCPI empty, Reason_B3 = 0
- * does not sleep, clobbers the controller's hcmsg structure
- */
-static void send_disconnect_b3_ind(struct bc_state *bcs,
- struct gigaset_capi_appl *ap)
-{
- struct cardstate *cs = bcs->cs;
- struct gigaset_capi_ctr *iif = cs->iif;
- struct sk_buff *skb;
-
- /* nothing to do if no logical connection active */
- if (bcs->apconnstate < APCONN_ACTIVE)
- return;
- bcs->apconnstate = APCONN_SETUP;
-
- capi_cmsg_header(&iif->hcmsg, ap->id, CAPI_DISCONNECT_B3, CAPI_IND,
- ap->nextMessageNumber++,
- iif->ctr.cnr | ((bcs->channel + 1) << 8) | (1 << 16));
- skb = alloc_skb(CAPI_DISCONNECT_B3_IND_BASELEN, GFP_ATOMIC);
- if (!skb) {
- dev_err(cs->dev, "%s: out of memory\n", __func__);
- return;
- }
- if (capi_cmsg2message(&iif->hcmsg,
- __skb_put(skb, CAPI_DISCONNECT_B3_IND_BASELEN))) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
- capi_ctr_handle_message(&iif->ctr, ap->id, skb);
-}
-
-/**
- * gigaset_isdn_connD() - signal D channel connect
- * @bcs: B channel descriptor structure.
- *
- * Called by main module at tasklet level to notify the LL that the D channel
- * connection has been established.
- */
-void gigaset_isdn_connD(struct bc_state *bcs)
-{
- struct cardstate *cs = bcs->cs;
- struct gigaset_capi_ctr *iif = cs->iif;
- struct gigaset_capi_appl *ap;
- struct sk_buff *skb;
- unsigned int msgsize;
- unsigned long flags;
-
- spin_lock_irqsave(&bcs->aplock, flags);
- ap = bcs->ap;
- if (!ap) {
- spin_unlock_irqrestore(&bcs->aplock, flags);
- gig_dbg(DEBUG_CMD, "%s: application gone", __func__);
- return;
- }
- if (bcs->apconnstate == APCONN_NONE) {
- spin_unlock_irqrestore(&bcs->aplock, flags);
- dev_warn(cs->dev, "%s: application %u not connected\n",
- __func__, ap->id);
- return;
- }
- spin_unlock_irqrestore(&bcs->aplock, flags);
- while (ap->bcnext) {
- /* this should never happen */
- dev_warn(cs->dev, "%s: dropping extra application %u\n",
- __func__, ap->bcnext->id);
- send_disconnect_ind(bcs, ap->bcnext,
- CapiCallGivenToOtherApplication);
- ap->bcnext = ap->bcnext->bcnext;
- }
-
- /* prepare CONNECT_ACTIVE_IND message
- * Note: LLC not supported by device
- */
- capi_cmsg_header(&iif->hcmsg, ap->id, CAPI_CONNECT_ACTIVE, CAPI_IND,
- ap->nextMessageNumber++,
- iif->ctr.cnr | ((bcs->channel + 1) << 8));
-
- /* minimum size, all structs empty */
- msgsize = CAPI_CONNECT_ACTIVE_IND_BASELEN;
-
- /* ToDo: set parameter: Connected number
- * (requires ev-layer state machine extension to collect
- * ZCON device reply)
- */
-
- /* build and emit CONNECT_ACTIVE_IND message */
- skb = alloc_skb(msgsize, GFP_ATOMIC);
- if (!skb) {
- dev_err(cs->dev, "%s: out of memory\n", __func__);
- return;
- }
- if (capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize))) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
- capi_ctr_handle_message(&iif->ctr, ap->id, skb);
-}
-
-/**
- * gigaset_isdn_hupD() - signal D channel hangup
- * @bcs: B channel descriptor structure.
- *
- * Called by main module at tasklet level to notify the LL that the D channel
- * connection has been shut down.
- */
-void gigaset_isdn_hupD(struct bc_state *bcs)
-{
- struct gigaset_capi_appl *ap;
- unsigned long flags;
-
- /*
- * ToDo: pass on reason code reported by device
- * (requires ev-layer state machine extension to collect
- * ZCAU device reply)
- */
- spin_lock_irqsave(&bcs->aplock, flags);
- while (bcs->ap != NULL) {
- ap = bcs->ap;
- bcs->ap = ap->bcnext;
- spin_unlock_irqrestore(&bcs->aplock, flags);
- send_disconnect_b3_ind(bcs, ap);
- send_disconnect_ind(bcs, ap, 0);
- spin_lock_irqsave(&bcs->aplock, flags);
- }
- bcs->apconnstate = APCONN_NONE;
- spin_unlock_irqrestore(&bcs->aplock, flags);
-}
-
-/**
- * gigaset_isdn_connB() - signal B channel connect
- * @bcs: B channel descriptor structure.
- *
- * Called by main module at tasklet level to notify the LL that the B channel
- * connection has been established.
- */
-void gigaset_isdn_connB(struct bc_state *bcs)
-{
- struct cardstate *cs = bcs->cs;
- struct gigaset_capi_ctr *iif = cs->iif;
- struct gigaset_capi_appl *ap;
- struct sk_buff *skb;
- unsigned long flags;
- unsigned int msgsize;
- u8 command;
-
- spin_lock_irqsave(&bcs->aplock, flags);
- ap = bcs->ap;
- if (!ap) {
- spin_unlock_irqrestore(&bcs->aplock, flags);
- gig_dbg(DEBUG_CMD, "%s: application gone", __func__);
- return;
- }
- if (!bcs->apconnstate) {
- spin_unlock_irqrestore(&bcs->aplock, flags);
- dev_warn(cs->dev, "%s: application %u not connected\n",
- __func__, ap->id);
- return;
- }
-
- /*
- * emit CONNECT_B3_ACTIVE_IND if we already got CONNECT_B3_REQ;
- * otherwise we have to emit CONNECT_B3_IND first, and follow up with
- * CONNECT_B3_ACTIVE_IND in reply to CONNECT_B3_RESP
- * Parameters in both cases always: NCCI = 1, NCPI empty
- */
- if (bcs->apconnstate >= APCONN_ACTIVE) {
- command = CAPI_CONNECT_B3_ACTIVE;
- msgsize = CAPI_CONNECT_B3_ACTIVE_IND_BASELEN;
- } else {
- command = CAPI_CONNECT_B3;
- msgsize = CAPI_CONNECT_B3_IND_BASELEN;
- }
- bcs->apconnstate = APCONN_ACTIVE;
-
- spin_unlock_irqrestore(&bcs->aplock, flags);
-
- while (ap->bcnext) {
- /* this should never happen */
- dev_warn(cs->dev, "%s: dropping extra application %u\n",
- __func__, ap->bcnext->id);
- send_disconnect_ind(bcs, ap->bcnext,
- CapiCallGivenToOtherApplication);
- ap->bcnext = ap->bcnext->bcnext;
- }
-
- capi_cmsg_header(&iif->hcmsg, ap->id, command, CAPI_IND,
- ap->nextMessageNumber++,
- iif->ctr.cnr | ((bcs->channel + 1) << 8) | (1 << 16));
- skb = alloc_skb(msgsize, GFP_ATOMIC);
- if (!skb) {
- dev_err(cs->dev, "%s: out of memory\n", __func__);
- return;
- }
- if (capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize))) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
- capi_ctr_handle_message(&iif->ctr, ap->id, skb);
-}
-
-/**
- * gigaset_isdn_hupB() - signal B channel hangup
- * @bcs: B channel descriptor structure.
- *
- * Called by main module to notify the LL that the B channel connection has
- * been shut down.
- */
-void gigaset_isdn_hupB(struct bc_state *bcs)
-{
- struct gigaset_capi_appl *ap = bcs->ap;
-
- /* ToDo: assure order of DISCONNECT_B3_IND and DISCONNECT_IND ? */
-
- if (!ap) {
- gig_dbg(DEBUG_CMD, "%s: application gone", __func__);
- return;
- }
-
- send_disconnect_b3_ind(bcs, ap);
-}
-
-/**
- * gigaset_isdn_start() - signal device availability
- * @cs: device descriptor structure.
- *
- * Called by main module to notify the LL that the device is available for
- * use.
- */
-void gigaset_isdn_start(struct cardstate *cs)
-{
- struct gigaset_capi_ctr *iif = cs->iif;
-
- /* fill profile data: manufacturer name */
- strcpy(iif->ctr.manu, "Siemens");
- /* CAPI and device version */
- iif->ctr.version.majorversion = 2; /* CAPI 2.0 */
- iif->ctr.version.minorversion = 0;
- /* ToDo: check/assert cs->gotfwver? */
- iif->ctr.version.majormanuversion = cs->fwver[0];
- iif->ctr.version.minormanuversion = cs->fwver[1];
- /* number of B channels supported */
- iif->ctr.profile.nbchannel = cs->channels;
- /* global options: internal controller, supplementary services */
- iif->ctr.profile.goptions = 0x11;
- /* B1 protocols: 64 kbit/s HDLC or transparent */
- iif->ctr.profile.support1 = 0x03;
- /* B2 protocols: transparent only */
- /* ToDo: X.75 SLP ? */
- iif->ctr.profile.support2 = 0x02;
- /* B3 protocols: transparent only */
- iif->ctr.profile.support3 = 0x01;
- /* no serial number */
- strcpy(iif->ctr.serial, "0");
- capi_ctr_ready(&iif->ctr);
-}
-
-/**
- * gigaset_isdn_stop() - signal device unavailability
- * @cs: device descriptor structure.
- *
- * Called by main module to notify the LL that the device is no longer
- * available for use.
- */
-void gigaset_isdn_stop(struct cardstate *cs)
-{
- struct gigaset_capi_ctr *iif = cs->iif;
- capi_ctr_down(&iif->ctr);
-}
-
-/*
- * kernel CAPI callback methods
- * ============================
- */
-
-/*
- * register CAPI application
- */
-static void gigaset_register_appl(struct capi_ctr *ctr, u16 appl,
- capi_register_params *rp)
-{
- struct gigaset_capi_ctr *iif
- = container_of(ctr, struct gigaset_capi_ctr, ctr);
- struct cardstate *cs = ctr->driverdata;
- struct gigaset_capi_appl *ap;
-
- gig_dbg(DEBUG_CMD, "%s [%u] l3cnt=%u blkcnt=%u blklen=%u",
- __func__, appl, rp->level3cnt, rp->datablkcnt, rp->datablklen);
-
- list_for_each_entry(ap, &iif->appls, ctrlist)
- if (ap->id == appl) {
- dev_notice(cs->dev,
- "application %u already registered\n", appl);
- return;
- }
-
- ap = kzalloc(sizeof(*ap), GFP_KERNEL);
- if (!ap) {
- dev_err(cs->dev, "%s: out of memory\n", __func__);
- return;
- }
- ap->id = appl;
- ap->rp = *rp;
-
- list_add(&ap->ctrlist, &iif->appls);
- dev_info(cs->dev, "application %u registered\n", ap->id);
-}
-
-/*
- * remove CAPI application from channel
- * helper function to keep indentation levels down and stay in 80 columns
- */
-
-static inline void remove_appl_from_channel(struct bc_state *bcs,
- struct gigaset_capi_appl *ap)
-{
- struct cardstate *cs = bcs->cs;
- struct gigaset_capi_appl *bcap;
- unsigned long flags;
- int prevconnstate;
-
- spin_lock_irqsave(&bcs->aplock, flags);
- bcap = bcs->ap;
- if (bcap == NULL) {
- spin_unlock_irqrestore(&bcs->aplock, flags);
- return;
- }
-
- /* check first application on channel */
- if (bcap == ap) {
- bcs->ap = ap->bcnext;
- if (bcs->ap != NULL) {
- spin_unlock_irqrestore(&bcs->aplock, flags);
- return;
- }
-
- /* none left, clear channel state */
- prevconnstate = bcs->apconnstate;
- bcs->apconnstate = APCONN_NONE;
- spin_unlock_irqrestore(&bcs->aplock, flags);
-
- if (prevconnstate == APCONN_ACTIVE) {
- dev_notice(cs->dev, "%s: hanging up channel %u\n",
- __func__, bcs->channel);
- gigaset_add_event(cs, &bcs->at_state,
- EV_HUP, NULL, 0, NULL);
- gigaset_schedule_event(cs);
- }
- return;
- }
-
- /* check remaining list */
- do {
- if (bcap->bcnext == ap) {
- bcap->bcnext = bcap->bcnext->bcnext;
- spin_unlock_irqrestore(&bcs->aplock, flags);
- return;
- }
- bcap = bcap->bcnext;
- } while (bcap != NULL);
- spin_unlock_irqrestore(&bcs->aplock, flags);
-}
-
-/*
- * release CAPI application
- */
-static void gigaset_release_appl(struct capi_ctr *ctr, u16 appl)
-{
- struct gigaset_capi_ctr *iif
- = container_of(ctr, struct gigaset_capi_ctr, ctr);
- struct cardstate *cs = iif->ctr.driverdata;
- struct gigaset_capi_appl *ap, *tmp;
- unsigned ch;
-
- gig_dbg(DEBUG_CMD, "%s [%u]", __func__, appl);
-
- list_for_each_entry_safe(ap, tmp, &iif->appls, ctrlist)
- if (ap->id == appl) {
- /* remove from any channels */
- for (ch = 0; ch < cs->channels; ch++)
- remove_appl_from_channel(&cs->bcs[ch], ap);
-
- /* remove from registration list */
- list_del(&ap->ctrlist);
- kfree(ap);
- dev_info(cs->dev, "application %u released\n", appl);
- }
-}
-
-/*
- * =====================================================================
- * outgoing CAPI message handler
- * =====================================================================
- */
-
-/*
- * helper function: emit reply message with given Info value
- */
-static void send_conf(struct gigaset_capi_ctr *iif,
- struct gigaset_capi_appl *ap,
- struct sk_buff *skb,
- u16 info)
-{
- struct cardstate *cs = iif->ctr.driverdata;
-
- /*
- * _CONF replies always only have NCCI and Info parameters
- * so they'll fit into the _REQ message skb
- */
- capi_cmsg_answer(&iif->acmsg);
- iif->acmsg.Info = info;
- if (capi_cmsg2message(&iif->acmsg, skb->data)) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- __skb_trim(skb, CAPI_STDCONF_LEN);
- dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
- capi_ctr_handle_message(&iif->ctr, ap->id, skb);
-}
-
-/*
- * process FACILITY_REQ message
- */
-static void do_facility_req(struct gigaset_capi_ctr *iif,
- struct gigaset_capi_appl *ap,
- struct sk_buff *skb)
-{
- struct cardstate *cs = iif->ctr.driverdata;
- _cmsg *cmsg = &iif->acmsg;
- struct sk_buff *cskb;
- u8 *pparam;
- unsigned int msgsize = CAPI_FACILITY_CONF_BASELEN;
- u16 function, info;
- static u8 confparam[10]; /* max. 9 octets + length byte */
-
- /* decode message */
- if (capi_message2cmsg(cmsg, skb->data)) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, cmsg);
-
- /*
- * Facility Request Parameter is not decoded by capi_message2cmsg()
- * encoding depends on Facility Selector
- */
- switch (cmsg->FacilitySelector) {
- case CAPI_FACILITY_DTMF: /* ToDo */
- info = CapiFacilityNotSupported;
- confparam[0] = 2; /* length */
- /* DTMF information: Unknown DTMF request */
- capimsg_setu16(confparam, 1, 2);
- break;
-
- case CAPI_FACILITY_V42BIS: /* not supported */
- info = CapiFacilityNotSupported;
- confparam[0] = 2; /* length */
- /* V.42 bis information: not available */
- capimsg_setu16(confparam, 1, 1);
- break;
-
- case CAPI_FACILITY_SUPPSVC:
- /* decode Function parameter */
- pparam = cmsg->FacilityRequestParameter;
- if (pparam == NULL || pparam[0] < 2) {
- dev_notice(cs->dev, "%s: %s missing\n", "FACILITY_REQ",
- "Facility Request Parameter");
- send_conf(iif, ap, skb, CapiIllMessageParmCoding);
- return;
- }
- function = CAPIMSG_U16(pparam, 1);
- switch (function) {
- case CAPI_SUPPSVC_GETSUPPORTED:
- info = CapiSuccess;
- /* Supplementary Service specific parameter */
- confparam[3] = 6; /* length */
- /* Supplementary services info: Success */
- capimsg_setu16(confparam, 4, CapiSuccess);
- /* Supported Services: none */
- capimsg_setu32(confparam, 6, 0);
- break;
- case CAPI_SUPPSVC_LISTEN:
- if (pparam[0] < 7 || pparam[3] < 4) {
- dev_notice(cs->dev, "%s: %s missing\n",
- "FACILITY_REQ", "Notification Mask");
- send_conf(iif, ap, skb,
- CapiIllMessageParmCoding);
- return;
- }
- if (CAPIMSG_U32(pparam, 4) != 0) {
- dev_notice(cs->dev,
- "%s: unsupported supplementary service notification mask 0x%x\n",
- "FACILITY_REQ", CAPIMSG_U32(pparam, 4));
- info = CapiFacilitySpecificFunctionNotSupported;
- confparam[3] = 2; /* length */
- capimsg_setu16(confparam, 4,
- CapiSupplementaryServiceNotSupported);
- break;
- }
- info = CapiSuccess;
- confparam[3] = 2; /* length */
- capimsg_setu16(confparam, 4, CapiSuccess);
- break;
-
- /* ToDo: add supported services */
-
- default:
- dev_notice(cs->dev,
- "%s: unsupported supplementary service function 0x%04x\n",
- "FACILITY_REQ", function);
- info = CapiFacilitySpecificFunctionNotSupported;
- /* Supplementary Service specific parameter */
- confparam[3] = 2; /* length */
- /* Supplementary services info: not supported */
- capimsg_setu16(confparam, 4,
- CapiSupplementaryServiceNotSupported);
- }
-
- /* Facility confirmation parameter */
- confparam[0] = confparam[3] + 3; /* total length */
- /* Function: copy from _REQ message */
- capimsg_setu16(confparam, 1, function);
- /* Supplementary Service specific parameter already set above */
- break;
-
- case CAPI_FACILITY_WAKEUP: /* ToDo */
- info = CapiFacilityNotSupported;
- confparam[0] = 2; /* length */
- /* Number of accepted awake request parameters: 0 */
- capimsg_setu16(confparam, 1, 0);
- break;
-
- default:
- info = CapiFacilityNotSupported;
- confparam[0] = 0; /* empty struct */
- }
-
- /* send FACILITY_CONF with given Info and confirmation parameter */
- dev_kfree_skb_any(skb);
- capi_cmsg_answer(cmsg);
- cmsg->Info = info;
- cmsg->FacilityConfirmationParameter = confparam;
- msgsize += confparam[0]; /* length */
- cskb = alloc_skb(msgsize, GFP_ATOMIC);
- if (!cskb) {
- dev_err(cs->dev, "%s: out of memory\n", __func__);
- return;
- }
- if (capi_cmsg2message(cmsg, __skb_put(cskb, msgsize))) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(cskb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, cmsg);
- capi_ctr_handle_message(&iif->ctr, ap->id, cskb);
-}
-
-
-/*
- * process LISTEN_REQ message
- * just store the masks in the application data structure
- */
-static void do_listen_req(struct gigaset_capi_ctr *iif,
- struct gigaset_capi_appl *ap,
- struct sk_buff *skb)
-{
- struct cardstate *cs = iif->ctr.driverdata;
-
- /* decode message */
- if (capi_message2cmsg(&iif->acmsg, skb->data)) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
-
- /* store listening parameters */
- ap->listenInfoMask = iif->acmsg.InfoMask;
- ap->listenCIPmask = iif->acmsg.CIPmask;
- send_conf(iif, ap, skb, CapiSuccess);
-}
-
-/*
- * process ALERT_REQ message
- * nothing to do, Gigaset always alerts anyway
- */
-static void do_alert_req(struct gigaset_capi_ctr *iif,
- struct gigaset_capi_appl *ap,
- struct sk_buff *skb)
-{
- struct cardstate *cs = iif->ctr.driverdata;
-
- /* decode message */
- if (capi_message2cmsg(&iif->acmsg, skb->data)) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
- send_conf(iif, ap, skb, CapiAlertAlreadySent);
-}
-
-/*
- * process CONNECT_REQ message
- * allocate a B channel, prepare dial commands, queue a DIAL event,
- * emit CONNECT_CONF reply
- */
-static void do_connect_req(struct gigaset_capi_ctr *iif,
- struct gigaset_capi_appl *ap,
- struct sk_buff *skb)
-{
- struct cardstate *cs = iif->ctr.driverdata;
- _cmsg *cmsg = &iif->acmsg;
- struct bc_state *bcs;
- char **commands;
- char *s;
- u8 *pp;
- unsigned long flags;
- int i, l, lbc, lhlc;
- u16 info;
-
- /* decode message */
- if (capi_message2cmsg(cmsg, skb->data)) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, cmsg);
-
- /* get free B channel & construct PLCI */
- bcs = gigaset_get_free_channel(cs);
- if (!bcs) {
- dev_notice(cs->dev, "%s: no B channel available\n",
- "CONNECT_REQ");
- send_conf(iif, ap, skb, CapiNoPlciAvailable);
- return;
- }
- spin_lock_irqsave(&bcs->aplock, flags);
- if (bcs->ap != NULL || bcs->apconnstate != APCONN_NONE)
- dev_warn(cs->dev, "%s: channel not properly cleared (%p/%d)\n",
- __func__, bcs->ap, bcs->apconnstate);
- ap->bcnext = NULL;
- bcs->ap = ap;
- bcs->apconnstate = APCONN_SETUP;
- spin_unlock_irqrestore(&bcs->aplock, flags);
-
- bcs->rx_bufsize = ap->rp.datablklen;
- dev_kfree_skb(bcs->rx_skb);
- gigaset_new_rx_skb(bcs);
- cmsg->adr.adrPLCI |= (bcs->channel + 1) << 8;
-
- /* build command table */
- commands = kcalloc(AT_NUM, sizeof(*commands), GFP_KERNEL);
- if (!commands)
- goto oom;
-
- /* encode parameter: Called party number */
- pp = cmsg->CalledPartyNumber;
- if (pp == NULL || *pp == 0) {
- dev_notice(cs->dev, "%s: %s missing\n",
- "CONNECT_REQ", "Called party number");
- info = CapiIllMessageParmCoding;
- goto error;
- }
- l = *pp++;
- /* check type of number/numbering plan byte */
- switch (*pp) {
- case 0x80: /* unknown type / unknown numbering plan */
- case 0x81: /* unknown type / ISDN/Telephony numbering plan */
- break;
- default: /* others: warn about potential misinterpretation */
- dev_notice(cs->dev, "%s: %s type/plan 0x%02x unsupported\n",
- "CONNECT_REQ", "Called party number", *pp);
- }
- pp++;
- l--;
- /* translate "**" internal call prefix to CTP value */
- if (l >= 2 && pp[0] == '*' && pp[1] == '*') {
- s = "^SCTP=0\r";
- pp += 2;
- l -= 2;
- } else {
- s = "^SCTP=1\r";
- }
- commands[AT_TYPE] = kstrdup(s, GFP_KERNEL);
- if (!commands[AT_TYPE])
- goto oom;
- commands[AT_DIAL] = kmalloc(l + 3, GFP_KERNEL);
- if (!commands[AT_DIAL])
- goto oom;
- snprintf(commands[AT_DIAL], l + 3, "D%.*s\r", l, pp);
-
- /* encode parameter: Calling party number */
- pp = cmsg->CallingPartyNumber;
- if (pp != NULL && *pp > 0) {
- l = *pp++;
-
- /* check type of number/numbering plan byte */
- /* ToDo: allow for/handle Ext=1? */
- switch (*pp) {
- case 0x00: /* unknown type / unknown numbering plan */
- case 0x01: /* unknown type / ISDN/Telephony num. plan */
- break;
- default:
- dev_notice(cs->dev,
- "%s: %s type/plan 0x%02x unsupported\n",
- "CONNECT_REQ", "Calling party number", *pp);
- }
- pp++;
- l--;
-
- /* check presentation indicator */
- if (!l) {
- dev_notice(cs->dev, "%s: %s IE truncated\n",
- "CONNECT_REQ", "Calling party number");
- info = CapiIllMessageParmCoding;
- goto error;
- }
- switch (*pp & 0xfc) { /* ignore Screening indicator */
- case 0x80: /* Presentation allowed */
- s = "^SCLIP=1\r";
- break;
- case 0xa0: /* Presentation restricted */
- s = "^SCLIP=0\r";
- break;
- default:
- dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
- "CONNECT_REQ",
- "Presentation/Screening indicator",
- *pp);
- s = "^SCLIP=1\r";
- }
- commands[AT_CLIP] = kstrdup(s, GFP_KERNEL);
- if (!commands[AT_CLIP])
- goto oom;
- pp++;
- l--;
-
- if (l) {
- /* number */
- commands[AT_MSN] = kmalloc(l + 8, GFP_KERNEL);
- if (!commands[AT_MSN])
- goto oom;
- snprintf(commands[AT_MSN], l + 8, "^SMSN=%*s\r", l, pp);
- }
- }
-
- /* check parameter: CIP Value */
- if (cmsg->CIPValue >= ARRAY_SIZE(cip2bchlc) ||
- (cmsg->CIPValue > 0 && cip2bchlc[cmsg->CIPValue].bc == NULL)) {
- dev_notice(cs->dev, "%s: unknown CIP value %d\n",
- "CONNECT_REQ", cmsg->CIPValue);
- info = CapiCipValueUnknown;
- goto error;
- }
-
- /*
- * check/encode parameters: BC & HLC
- * must be encoded together as device doesn't accept HLC separately
- * explicit parameters override values derived from CIP
- */
-
- /* determine lengths */
- if (cmsg->BC && cmsg->BC[0]) /* BC specified explicitly */
- lbc = 2 * cmsg->BC[0];
- else if (cip2bchlc[cmsg->CIPValue].bc) /* BC derived from CIP */
- lbc = strlen(cip2bchlc[cmsg->CIPValue].bc);
- else /* no BC */
- lbc = 0;
- if (cmsg->HLC && cmsg->HLC[0]) /* HLC specified explicitly */
- lhlc = 2 * cmsg->HLC[0];
- else if (cip2bchlc[cmsg->CIPValue].hlc) /* HLC derived from CIP */
- lhlc = strlen(cip2bchlc[cmsg->CIPValue].hlc);
- else /* no HLC */
- lhlc = 0;
-
- if (lbc) {
- /* have BC: allocate and assemble command string */
- l = lbc + 7; /* "^SBC=" + value + "\r" + null byte */
- if (lhlc)
- l += lhlc + 7; /* ";^SHLC=" + value */
- commands[AT_BC] = kmalloc(l, GFP_KERNEL);
- if (!commands[AT_BC])
- goto oom;
- strcpy(commands[AT_BC], "^SBC=");
- if (cmsg->BC && cmsg->BC[0]) /* BC specified explicitly */
- decode_ie(cmsg->BC, commands[AT_BC] + 5);
- else /* BC derived from CIP */
- strcpy(commands[AT_BC] + 5,
- cip2bchlc[cmsg->CIPValue].bc);
- if (lhlc) {
- strcpy(commands[AT_BC] + lbc + 5, ";^SHLC=");
- if (cmsg->HLC && cmsg->HLC[0])
- /* HLC specified explicitly */
- decode_ie(cmsg->HLC,
- commands[AT_BC] + lbc + 12);
- else /* HLC derived from CIP */
- strcpy(commands[AT_BC] + lbc + 12,
- cip2bchlc[cmsg->CIPValue].hlc);
- }
- strcpy(commands[AT_BC] + l - 2, "\r");
- } else {
- /* no BC */
- if (lhlc) {
- dev_notice(cs->dev, "%s: cannot set HLC without BC\n",
- "CONNECT_REQ");
- info = CapiIllMessageParmCoding; /* ? */
- goto error;
- }
- }
-
- /* check/encode parameter: B Protocol */
- if (cmsg->BProtocol == CAPI_DEFAULT) {
- bcs->proto2 = L2_HDLC;
- dev_warn(cs->dev,
- "B2 Protocol X.75 SLP unsupported, using Transparent\n");
- } else {
- switch (cmsg->B1protocol) {
- case 0:
- bcs->proto2 = L2_HDLC;
- break;
- case 1:
- bcs->proto2 = L2_VOICE;
- break;
- default:
- dev_warn(cs->dev,
- "B1 Protocol %u unsupported, using Transparent\n",
- cmsg->B1protocol);
- bcs->proto2 = L2_VOICE;
- }
- if (cmsg->B2protocol != 1)
- dev_warn(cs->dev,
- "B2 Protocol %u unsupported, using Transparent\n",
- cmsg->B2protocol);
- if (cmsg->B3protocol != 0)
- dev_warn(cs->dev,
- "B3 Protocol %u unsupported, using Transparent\n",
- cmsg->B3protocol);
- ignore_cstruct_param(cs, cmsg->B1configuration,
- "CONNECT_REQ", "B1 Configuration");
- ignore_cstruct_param(cs, cmsg->B2configuration,
- "CONNECT_REQ", "B2 Configuration");
- ignore_cstruct_param(cs, cmsg->B3configuration,
- "CONNECT_REQ", "B3 Configuration");
- }
- commands[AT_PROTO] = kmalloc(9, GFP_KERNEL);
- if (!commands[AT_PROTO])
- goto oom;
- snprintf(commands[AT_PROTO], 9, "^SBPR=%u\r", bcs->proto2);
-
- /* ToDo: check/encode remaining parameters */
- ignore_cstruct_param(cs, cmsg->CalledPartySubaddress,
- "CONNECT_REQ", "Called pty subaddr");
- ignore_cstruct_param(cs, cmsg->CallingPartySubaddress,
- "CONNECT_REQ", "Calling pty subaddr");
- ignore_cstruct_param(cs, cmsg->LLC,
- "CONNECT_REQ", "LLC");
- if (cmsg->AdditionalInfo != CAPI_DEFAULT) {
- ignore_cstruct_param(cs, cmsg->BChannelinformation,
- "CONNECT_REQ", "B Channel Information");
- ignore_cstruct_param(cs, cmsg->Keypadfacility,
- "CONNECT_REQ", "Keypad Facility");
- ignore_cstruct_param(cs, cmsg->Useruserdata,
- "CONNECT_REQ", "User-User Data");
- ignore_cstruct_param(cs, cmsg->Facilitydataarray,
- "CONNECT_REQ", "Facility Data Array");
- }
-
- /* encode parameter: B channel to use */
- commands[AT_ISO] = kmalloc(9, GFP_KERNEL);
- if (!commands[AT_ISO])
- goto oom;
- snprintf(commands[AT_ISO], 9, "^SISO=%u\r",
- (unsigned) bcs->channel + 1);
-
- /* queue & schedule EV_DIAL event */
- if (!gigaset_add_event(cs, &bcs->at_state, EV_DIAL, commands,
- bcs->at_state.seq_index, NULL)) {
- info = CAPI_MSGOSRESOURCEERR;
- goto error;
- }
- gigaset_schedule_event(cs);
- send_conf(iif, ap, skb, CapiSuccess);
- return;
-
-oom:
- dev_err(cs->dev, "%s: out of memory\n", __func__);
- info = CAPI_MSGOSRESOURCEERR;
-error:
- if (commands)
- for (i = 0; i < AT_NUM; i++)
- kfree(commands[i]);
- kfree(commands);
- gigaset_free_channel(bcs);
- send_conf(iif, ap, skb, info);
-}
-
-/*
- * process CONNECT_RESP message
- * checks protocol parameters and queues an ACCEPT or HUP event
- */
-static void do_connect_resp(struct gigaset_capi_ctr *iif,
- struct gigaset_capi_appl *ap,
- struct sk_buff *skb)
-{
- struct cardstate *cs = iif->ctr.driverdata;
- _cmsg *cmsg = &iif->acmsg;
- struct bc_state *bcs;
- struct gigaset_capi_appl *oap;
- unsigned long flags;
- int channel;
-
- /* decode message */
- if (capi_message2cmsg(cmsg, skb->data)) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, cmsg);
- dev_kfree_skb_any(skb);
-
- /* extract and check channel number from PLCI */
- channel = (cmsg->adr.adrPLCI >> 8) & 0xff;
- if (!channel || channel > cs->channels) {
- dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
- "CONNECT_RESP", "PLCI", cmsg->adr.adrPLCI);
- return;
- }
- bcs = cs->bcs + channel - 1;
-
- switch (cmsg->Reject) {
- case 0: /* Accept */
- /* drop all competing applications, keep only this one */
- spin_lock_irqsave(&bcs->aplock, flags);
- while (bcs->ap != NULL) {
- oap = bcs->ap;
- bcs->ap = oap->bcnext;
- if (oap != ap) {
- spin_unlock_irqrestore(&bcs->aplock, flags);
- send_disconnect_ind(bcs, oap,
- CapiCallGivenToOtherApplication);
- spin_lock_irqsave(&bcs->aplock, flags);
- }
- }
- ap->bcnext = NULL;
- bcs->ap = ap;
- spin_unlock_irqrestore(&bcs->aplock, flags);
-
- bcs->rx_bufsize = ap->rp.datablklen;
- dev_kfree_skb(bcs->rx_skb);
- gigaset_new_rx_skb(bcs);
- bcs->chstate |= CHS_NOTIFY_LL;
-
- /* check/encode B channel protocol */
- if (cmsg->BProtocol == CAPI_DEFAULT) {
- bcs->proto2 = L2_HDLC;
- dev_warn(cs->dev,
- "B2 Protocol X.75 SLP unsupported, using Transparent\n");
- } else {
- switch (cmsg->B1protocol) {
- case 0:
- bcs->proto2 = L2_HDLC;
- break;
- case 1:
- bcs->proto2 = L2_VOICE;
- break;
- default:
- dev_warn(cs->dev,
- "B1 Protocol %u unsupported, using Transparent\n",
- cmsg->B1protocol);
- bcs->proto2 = L2_VOICE;
- }
- if (cmsg->B2protocol != 1)
- dev_warn(cs->dev,
- "B2 Protocol %u unsupported, using Transparent\n",
- cmsg->B2protocol);
- if (cmsg->B3protocol != 0)
- dev_warn(cs->dev,
- "B3 Protocol %u unsupported, using Transparent\n",
- cmsg->B3protocol);
- ignore_cstruct_param(cs, cmsg->B1configuration,
- "CONNECT_RESP", "B1 Configuration");
- ignore_cstruct_param(cs, cmsg->B2configuration,
- "CONNECT_RESP", "B2 Configuration");
- ignore_cstruct_param(cs, cmsg->B3configuration,
- "CONNECT_RESP", "B3 Configuration");
- }
-
- /* ToDo: check/encode remaining parameters */
- ignore_cstruct_param(cs, cmsg->ConnectedNumber,
- "CONNECT_RESP", "Connected Number");
- ignore_cstruct_param(cs, cmsg->ConnectedSubaddress,
- "CONNECT_RESP", "Connected Subaddress");
- ignore_cstruct_param(cs, cmsg->LLC,
- "CONNECT_RESP", "LLC");
- if (cmsg->AdditionalInfo != CAPI_DEFAULT) {
- ignore_cstruct_param(cs, cmsg->BChannelinformation,
- "CONNECT_RESP", "BChannel Information");
- ignore_cstruct_param(cs, cmsg->Keypadfacility,
- "CONNECT_RESP", "Keypad Facility");
- ignore_cstruct_param(cs, cmsg->Useruserdata,
- "CONNECT_RESP", "User-User Data");
- ignore_cstruct_param(cs, cmsg->Facilitydataarray,
- "CONNECT_RESP", "Facility Data Array");
- }
-
- /* Accept call */
- if (!gigaset_add_event(cs, &cs->bcs[channel - 1].at_state,
- EV_ACCEPT, NULL, 0, NULL))
- return;
- gigaset_schedule_event(cs);
- return;
-
- case 1: /* Ignore */
- /* send DISCONNECT_IND to this application */
- send_disconnect_ind(bcs, ap, 0);
-
- /* remove it from the list of listening apps */
- spin_lock_irqsave(&bcs->aplock, flags);
- if (bcs->ap == ap) {
- bcs->ap = ap->bcnext;
- if (bcs->ap == NULL) {
- /* last one: stop ev-layer hupD notifications */
- bcs->apconnstate = APCONN_NONE;
- bcs->chstate &= ~CHS_NOTIFY_LL;
- }
- spin_unlock_irqrestore(&bcs->aplock, flags);
- return;
- }
- for (oap = bcs->ap; oap != NULL; oap = oap->bcnext) {
- if (oap->bcnext == ap) {
- oap->bcnext = oap->bcnext->bcnext;
- spin_unlock_irqrestore(&bcs->aplock, flags);
- return;
- }
- }
- spin_unlock_irqrestore(&bcs->aplock, flags);
- dev_err(cs->dev, "%s: application %u not found\n",
- __func__, ap->id);
- return;
-
- default: /* Reject */
- /* drop all competing applications, keep only this one */
- spin_lock_irqsave(&bcs->aplock, flags);
- while (bcs->ap != NULL) {
- oap = bcs->ap;
- bcs->ap = oap->bcnext;
- if (oap != ap) {
- spin_unlock_irqrestore(&bcs->aplock, flags);
- send_disconnect_ind(bcs, oap,
- CapiCallGivenToOtherApplication);
- spin_lock_irqsave(&bcs->aplock, flags);
- }
- }
- ap->bcnext = NULL;
- bcs->ap = ap;
- spin_unlock_irqrestore(&bcs->aplock, flags);
-
- /* reject call - will trigger DISCONNECT_IND for this app */
- dev_info(cs->dev, "%s: Reject=%x\n",
- "CONNECT_RESP", cmsg->Reject);
- if (!gigaset_add_event(cs, &cs->bcs[channel - 1].at_state,
- EV_HUP, NULL, 0, NULL))
- return;
- gigaset_schedule_event(cs);
- return;
- }
-}
-
-/*
- * process CONNECT_B3_REQ message
- * build NCCI and emit CONNECT_B3_CONF reply
- */
-static void do_connect_b3_req(struct gigaset_capi_ctr *iif,
- struct gigaset_capi_appl *ap,
- struct sk_buff *skb)
-{
- struct cardstate *cs = iif->ctr.driverdata;
- _cmsg *cmsg = &iif->acmsg;
- struct bc_state *bcs;
- int channel;
-
- /* decode message */
- if (capi_message2cmsg(cmsg, skb->data)) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, cmsg);
-
- /* extract and check channel number from PLCI */
- channel = (cmsg->adr.adrPLCI >> 8) & 0xff;
- if (!channel || channel > cs->channels) {
- dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
- "CONNECT_B3_REQ", "PLCI", cmsg->adr.adrPLCI);
- send_conf(iif, ap, skb, CapiIllContrPlciNcci);
- return;
- }
- bcs = &cs->bcs[channel - 1];
-
- /* mark logical connection active */
- bcs->apconnstate = APCONN_ACTIVE;
-
- /* build NCCI: always 1 (one B3 connection only) */
- cmsg->adr.adrNCCI |= 1 << 16;
-
- /* NCPI parameter: not applicable for B3 Transparent */
- ignore_cstruct_param(cs, cmsg->NCPI, "CONNECT_B3_REQ", "NCPI");
- send_conf(iif, ap, skb,
- (cmsg->NCPI && cmsg->NCPI[0]) ?
- CapiNcpiNotSupportedByProtocol : CapiSuccess);
-}
-
-/*
- * process CONNECT_B3_RESP message
- * Depending on the Reject parameter, either emit CONNECT_B3_ACTIVE_IND
- * or queue EV_HUP and emit DISCONNECT_B3_IND.
- * The emitted message is always shorter than the received one,
- * allowing to reuse the skb.
- */
-static void do_connect_b3_resp(struct gigaset_capi_ctr *iif,
- struct gigaset_capi_appl *ap,
- struct sk_buff *skb)
-{
- struct cardstate *cs = iif->ctr.driverdata;
- _cmsg *cmsg = &iif->acmsg;
- struct bc_state *bcs;
- int channel;
- unsigned int msgsize;
- u8 command;
-
- /* decode message */
- if (capi_message2cmsg(cmsg, skb->data)) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, cmsg);
-
- /* extract and check channel number and NCCI */
- channel = (cmsg->adr.adrNCCI >> 8) & 0xff;
- if (!channel || channel > cs->channels ||
- ((cmsg->adr.adrNCCI >> 16) & 0xffff) != 1) {
- dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
- "CONNECT_B3_RESP", "NCCI", cmsg->adr.adrNCCI);
- dev_kfree_skb_any(skb);
- return;
- }
- bcs = &cs->bcs[channel - 1];
-
- if (cmsg->Reject) {
- /* Reject: clear B3 connect received flag */
- bcs->apconnstate = APCONN_SETUP;
-
- /* trigger hangup, causing eventual DISCONNECT_IND */
- if (!gigaset_add_event(cs, &bcs->at_state,
- EV_HUP, NULL, 0, NULL)) {
- dev_kfree_skb_any(skb);
- return;
- }
- gigaset_schedule_event(cs);
-
- /* emit DISCONNECT_B3_IND */
- command = CAPI_DISCONNECT_B3;
- msgsize = CAPI_DISCONNECT_B3_IND_BASELEN;
- } else {
- /*
- * Accept: emit CONNECT_B3_ACTIVE_IND immediately, as
- * we only send CONNECT_B3_IND if the B channel is up
- */
- command = CAPI_CONNECT_B3_ACTIVE;
- msgsize = CAPI_CONNECT_B3_ACTIVE_IND_BASELEN;
- }
- capi_cmsg_header(cmsg, ap->id, command, CAPI_IND,
- ap->nextMessageNumber++, cmsg->adr.adrNCCI);
- __skb_trim(skb, msgsize);
- if (capi_cmsg2message(cmsg, skb->data)) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, cmsg);
- capi_ctr_handle_message(&iif->ctr, ap->id, skb);
-}
-
-/*
- * process DISCONNECT_REQ message
- * schedule EV_HUP and emit DISCONNECT_B3_IND if necessary,
- * emit DISCONNECT_CONF reply
- */
-static void do_disconnect_req(struct gigaset_capi_ctr *iif,
- struct gigaset_capi_appl *ap,
- struct sk_buff *skb)
-{
- struct cardstate *cs = iif->ctr.driverdata;
- _cmsg *cmsg = &iif->acmsg;
- struct bc_state *bcs;
- _cmsg *b3cmsg;
- struct sk_buff *b3skb;
- int channel;
-
- /* decode message */
- if (capi_message2cmsg(cmsg, skb->data)) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, cmsg);
-
- /* extract and check channel number from PLCI */
- channel = (cmsg->adr.adrPLCI >> 8) & 0xff;
- if (!channel || channel > cs->channels) {
- dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
- "DISCONNECT_REQ", "PLCI", cmsg->adr.adrPLCI);
- send_conf(iif, ap, skb, CapiIllContrPlciNcci);
- return;
- }
- bcs = cs->bcs + channel - 1;
-
- /* ToDo: process parameter: Additional info */
- if (cmsg->AdditionalInfo != CAPI_DEFAULT) {
- ignore_cstruct_param(cs, cmsg->BChannelinformation,
- "DISCONNECT_REQ", "B Channel Information");
- ignore_cstruct_param(cs, cmsg->Keypadfacility,
- "DISCONNECT_REQ", "Keypad Facility");
- ignore_cstruct_param(cs, cmsg->Useruserdata,
- "DISCONNECT_REQ", "User-User Data");
- ignore_cstruct_param(cs, cmsg->Facilitydataarray,
- "DISCONNECT_REQ", "Facility Data Array");
- }
-
- /* skip if DISCONNECT_IND already sent */
- if (!bcs->apconnstate)
- return;
-
- /* check for active logical connection */
- if (bcs->apconnstate >= APCONN_ACTIVE) {
- /* clear it */
- bcs->apconnstate = APCONN_SETUP;
-
- /*
- * emit DISCONNECT_B3_IND with cause 0x3301
- * use separate cmsg structure, as the content of iif->acmsg
- * is still needed for creating the _CONF message
- */
- b3cmsg = kmalloc(sizeof(*b3cmsg), GFP_KERNEL);
- if (!b3cmsg) {
- dev_err(cs->dev, "%s: out of memory\n", __func__);
- send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
- return;
- }
- capi_cmsg_header(b3cmsg, ap->id, CAPI_DISCONNECT_B3, CAPI_IND,
- ap->nextMessageNumber++,
- cmsg->adr.adrPLCI | (1 << 16));
- b3cmsg->Reason_B3 = CapiProtocolErrorLayer1;
- b3skb = alloc_skb(CAPI_DISCONNECT_B3_IND_BASELEN, GFP_KERNEL);
- if (b3skb == NULL) {
- dev_err(cs->dev, "%s: out of memory\n", __func__);
- send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
- kfree(b3cmsg);
- return;
- }
- if (capi_cmsg2message(b3cmsg,
- __skb_put(b3skb, CAPI_DISCONNECT_B3_IND_BASELEN))) {
- dev_err(cs->dev, "%s: message parser failure\n",
- __func__);
- kfree(b3cmsg);
- dev_kfree_skb_any(b3skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, b3cmsg);
- kfree(b3cmsg);
- capi_ctr_handle_message(&iif->ctr, ap->id, b3skb);
- }
-
- /* trigger hangup, causing eventual DISCONNECT_IND */
- if (!gigaset_add_event(cs, &bcs->at_state, EV_HUP, NULL, 0, NULL)) {
- send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
- return;
- }
- gigaset_schedule_event(cs);
-
- /* emit reply */
- send_conf(iif, ap, skb, CapiSuccess);
-}
-
-/*
- * process DISCONNECT_B3_REQ message
- * schedule EV_HUP and emit DISCONNECT_B3_CONF reply
- */
-static void do_disconnect_b3_req(struct gigaset_capi_ctr *iif,
- struct gigaset_capi_appl *ap,
- struct sk_buff *skb)
-{
- struct cardstate *cs = iif->ctr.driverdata;
- _cmsg *cmsg = &iif->acmsg;
- struct bc_state *bcs;
- int channel;
-
- /* decode message */
- if (capi_message2cmsg(cmsg, skb->data)) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, cmsg);
-
- /* extract and check channel number and NCCI */
- channel = (cmsg->adr.adrNCCI >> 8) & 0xff;
- if (!channel || channel > cs->channels ||
- ((cmsg->adr.adrNCCI >> 16) & 0xffff) != 1) {
- dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
- "DISCONNECT_B3_REQ", "NCCI", cmsg->adr.adrNCCI);
- send_conf(iif, ap, skb, CapiIllContrPlciNcci);
- return;
- }
- bcs = &cs->bcs[channel - 1];
-
- /* reject if logical connection not active */
- if (bcs->apconnstate < APCONN_ACTIVE) {
- send_conf(iif, ap, skb,
- CapiMessageNotSupportedInCurrentState);
- return;
- }
-
- /* trigger hangup, causing eventual DISCONNECT_B3_IND */
- if (!gigaset_add_event(cs, &bcs->at_state, EV_HUP, NULL, 0, NULL)) {
- send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
- return;
- }
- gigaset_schedule_event(cs);
-
- /* NCPI parameter: not applicable for B3 Transparent */
- ignore_cstruct_param(cs, cmsg->NCPI,
- "DISCONNECT_B3_REQ", "NCPI");
- send_conf(iif, ap, skb,
- (cmsg->NCPI && cmsg->NCPI[0]) ?
- CapiNcpiNotSupportedByProtocol : CapiSuccess);
-}
-
-/*
- * process DATA_B3_REQ message
- */
-static void do_data_b3_req(struct gigaset_capi_ctr *iif,
- struct gigaset_capi_appl *ap,
- struct sk_buff *skb)
-{
- struct cardstate *cs = iif->ctr.driverdata;
- struct bc_state *bcs;
- int channel = CAPIMSG_PLCI_PART(skb->data);
- u16 ncci = CAPIMSG_NCCI_PART(skb->data);
- u16 msglen = CAPIMSG_LEN(skb->data);
- u16 datalen = CAPIMSG_DATALEN(skb->data);
- u16 flags = CAPIMSG_FLAGS(skb->data);
- u16 msgid = CAPIMSG_MSGID(skb->data);
- u16 handle = CAPIMSG_HANDLE_REQ(skb->data);
-
- /* frequent message, avoid _cmsg overhead */
- dump_rawmsg(DEBUG_MCMD, __func__, skb->data);
-
- /* check parameters */
- if (channel == 0 || channel > cs->channels || ncci != 1) {
- dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
- "DATA_B3_REQ", "NCCI", CAPIMSG_NCCI(skb->data));
- send_conf(iif, ap, skb, CapiIllContrPlciNcci);
- return;
- }
- bcs = &cs->bcs[channel - 1];
- if (msglen != CAPI_DATA_B3_REQ_LEN && msglen != CAPI_DATA_B3_REQ_LEN64)
- dev_notice(cs->dev, "%s: unexpected length %d\n",
- "DATA_B3_REQ", msglen);
- if (msglen + datalen != skb->len)
- dev_notice(cs->dev, "%s: length mismatch (%d+%d!=%d)\n",
- "DATA_B3_REQ", msglen, datalen, skb->len);
- if (msglen + datalen > skb->len) {
- /* message too short for announced data length */
- send_conf(iif, ap, skb, CapiIllMessageParmCoding); /* ? */
- return;
- }
- if (flags & CAPI_FLAGS_RESERVED) {
- dev_notice(cs->dev, "%s: reserved flags set (%x)\n",
- "DATA_B3_REQ", flags);
- send_conf(iif, ap, skb, CapiIllMessageParmCoding);
- return;
- }
-
- /* reject if logical connection not active */
- if (bcs->apconnstate < APCONN_ACTIVE) {
- send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState);
- return;
- }
-
- /* pull CAPI message into link layer header */
- skb_reset_mac_header(skb);
- skb->mac_len = msglen;
- skb_pull(skb, msglen);
-
- /* pass to device-specific module */
- if (cs->ops->send_skb(bcs, skb) < 0) {
- send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
- return;
- }
-
- /*
- * DATA_B3_CONF will be sent by gigaset_skb_sent() only if "delivery
- * confirmation" bit is set; otherwise we have to send it now
- */
- if (!(flags & CAPI_FLAGS_DELIVERY_CONFIRMATION))
- send_data_b3_conf(cs, &iif->ctr, ap->id, msgid, channel, handle,
- flags ? CapiFlagsNotSupportedByProtocol
- : CAPI_NOERROR);
-}
-
-/*
- * process RESET_B3_REQ message
- * just always reply "not supported by current protocol"
- */
-static void do_reset_b3_req(struct gigaset_capi_ctr *iif,
- struct gigaset_capi_appl *ap,
- struct sk_buff *skb)
-{
- struct cardstate *cs = iif->ctr.driverdata;
-
- /* decode message */
- if (capi_message2cmsg(&iif->acmsg, skb->data)) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
- send_conf(iif, ap, skb,
- CapiResetProcedureNotSupportedByCurrentProtocol);
-}
-
-/*
- * unsupported CAPI message handler
- */
-static void do_unsupported(struct gigaset_capi_ctr *iif,
- struct gigaset_capi_appl *ap,
- struct sk_buff *skb)
-{
- struct cardstate *cs = iif->ctr.driverdata;
-
- /* decode message */
- if (capi_message2cmsg(&iif->acmsg, skb->data)) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
- send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState);
-}
-
-/*
- * CAPI message handler: no-op
- */
-static void do_nothing(struct gigaset_capi_ctr *iif,
- struct gigaset_capi_appl *ap,
- struct sk_buff *skb)
-{
- struct cardstate *cs = iif->ctr.driverdata;
-
- /* decode message */
- if (capi_message2cmsg(&iif->acmsg, skb->data)) {
- dev_err(cs->dev, "%s: message parser failure\n", __func__);
- dev_kfree_skb_any(skb);
- return;
- }
- dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
- dev_kfree_skb_any(skb);
-}
-
-static void do_data_b3_resp(struct gigaset_capi_ctr *iif,
- struct gigaset_capi_appl *ap,
- struct sk_buff *skb)
-{
- dump_rawmsg(DEBUG_MCMD, __func__, skb->data);
- dev_kfree_skb_any(skb);
-}
-
-/* table of outgoing CAPI message handlers with lookup function */
-typedef void (*capi_send_handler_t)(struct gigaset_capi_ctr *,
- struct gigaset_capi_appl *,
- struct sk_buff *);
-
-static struct {
- u16 cmd;
- capi_send_handler_t handler;
-} capi_send_handler_table[] = {
- /* most frequent messages first for faster lookup */
- { CAPI_DATA_B3_REQ, do_data_b3_req },
- { CAPI_DATA_B3_RESP, do_data_b3_resp },
-
- { CAPI_ALERT_REQ, do_alert_req },
- { CAPI_CONNECT_ACTIVE_RESP, do_nothing },
- { CAPI_CONNECT_B3_ACTIVE_RESP, do_nothing },
- { CAPI_CONNECT_B3_REQ, do_connect_b3_req },
- { CAPI_CONNECT_B3_RESP, do_connect_b3_resp },
- { CAPI_CONNECT_B3_T90_ACTIVE_RESP, do_nothing },
- { CAPI_CONNECT_REQ, do_connect_req },
- { CAPI_CONNECT_RESP, do_connect_resp },
- { CAPI_DISCONNECT_B3_REQ, do_disconnect_b3_req },
- { CAPI_DISCONNECT_B3_RESP, do_nothing },
- { CAPI_DISCONNECT_REQ, do_disconnect_req },
- { CAPI_DISCONNECT_RESP, do_nothing },
- { CAPI_FACILITY_REQ, do_facility_req },
- { CAPI_FACILITY_RESP, do_nothing },
- { CAPI_LISTEN_REQ, do_listen_req },
- { CAPI_SELECT_B_PROTOCOL_REQ, do_unsupported },
- { CAPI_RESET_B3_REQ, do_reset_b3_req },
- { CAPI_RESET_B3_RESP, do_nothing },
-
- /*
- * ToDo: support overlap sending (requires ev-layer state
- * machine extension to generate additional ATD commands)
- */
- { CAPI_INFO_REQ, do_unsupported },
- { CAPI_INFO_RESP, do_nothing },
-
- /*
- * ToDo: what's the proper response for these?
- */
- { CAPI_MANUFACTURER_REQ, do_nothing },
- { CAPI_MANUFACTURER_RESP, do_nothing },
-};
-
-/* look up handler */
-static inline capi_send_handler_t lookup_capi_send_handler(const u16 cmd)
-{
- size_t i;
-
- for (i = 0; i < ARRAY_SIZE(capi_send_handler_table); i++)
- if (capi_send_handler_table[i].cmd == cmd)
- return capi_send_handler_table[i].handler;
- return NULL;
-}
-
-
-/**
- * gigaset_send_message() - accept a CAPI message from an application
- * @ctr: controller descriptor structure.
- * @skb: CAPI message.
- *
- * Return value: CAPI error code
- * Note: capidrv (and probably others, too) only uses the return value to
- * decide whether it has to free the skb (only if result != CAPI_NOERROR (0))
- */
-static u16 gigaset_send_message(struct capi_ctr *ctr, struct sk_buff *skb)
-{
- struct gigaset_capi_ctr *iif
- = container_of(ctr, struct gigaset_capi_ctr, ctr);
- struct cardstate *cs = ctr->driverdata;
- struct gigaset_capi_appl *ap;
- capi_send_handler_t handler;
-
- /* can only handle linear sk_buffs */
- if (skb_linearize(skb) < 0) {
- dev_warn(cs->dev, "%s: skb_linearize failed\n", __func__);
- return CAPI_MSGOSRESOURCEERR;
- }
-
- /* retrieve application data structure */
- ap = get_appl(iif, CAPIMSG_APPID(skb->data));
- if (!ap) {
- dev_notice(cs->dev, "%s: application %u not registered\n",
- __func__, CAPIMSG_APPID(skb->data));
- return CAPI_ILLAPPNR;
- }
-
- /* look up command */
- handler = lookup_capi_send_handler(CAPIMSG_CMD(skb->data));
- if (!handler) {
- /* unknown/unsupported message type */
- if (printk_ratelimit())
- dev_notice(cs->dev, "%s: unsupported message %u\n",
- __func__, CAPIMSG_CMD(skb->data));
- return CAPI_ILLCMDORSUBCMDORMSGTOSMALL;
- }
-
- /* serialize */
- if (atomic_add_return(1, &iif->sendqlen) > 1) {
- /* queue behind other messages */
- skb_queue_tail(&iif->sendqueue, skb);
- return CAPI_NOERROR;
- }
-
- /* process message */
- handler(iif, ap, skb);
-
- /* process other messages arrived in the meantime */
- while (atomic_sub_return(1, &iif->sendqlen) > 0) {
- skb = skb_dequeue(&iif->sendqueue);
- if (!skb) {
- /* should never happen */
- dev_err(cs->dev, "%s: send queue empty\n", __func__);
- continue;
- }
- ap = get_appl(iif, CAPIMSG_APPID(skb->data));
- if (!ap) {
- /* could that happen? */
- dev_warn(cs->dev, "%s: application %u vanished\n",
- __func__, CAPIMSG_APPID(skb->data));
- continue;
- }
- handler = lookup_capi_send_handler(CAPIMSG_CMD(skb->data));
- if (!handler) {
- /* should never happen */
- dev_err(cs->dev, "%s: handler %x vanished\n",
- __func__, CAPIMSG_CMD(skb->data));
- continue;
- }
- handler(iif, ap, skb);
- }
-
- return CAPI_NOERROR;
-}
-
-/**
- * gigaset_procinfo() - build single line description for controller
- * @ctr: controller descriptor structure.
- *
- * Return value: pointer to generated string (null terminated)
- */
-static char *gigaset_procinfo(struct capi_ctr *ctr)
-{
- return ctr->name; /* ToDo: more? */
-}
-
-static int gigaset_proc_show(struct seq_file *m, void *v)
-{
- struct capi_ctr *ctr = m->private;
- struct cardstate *cs = ctr->driverdata;
- char *s;
- int i;
-
- seq_printf(m, "%-16s %s\n", "name", ctr->name);
- seq_printf(m, "%-16s %s %s\n", "dev",
- dev_driver_string(cs->dev), dev_name(cs->dev));
- seq_printf(m, "%-16s %d\n", "id", cs->myid);
- if (cs->gotfwver)
- seq_printf(m, "%-16s %d.%d.%d.%d\n", "firmware",
- cs->fwver[0], cs->fwver[1], cs->fwver[2], cs->fwver[3]);
- seq_printf(m, "%-16s %d\n", "channels", cs->channels);
- seq_printf(m, "%-16s %s\n", "onechannel", cs->onechannel ? "yes" : "no");
-
- switch (cs->mode) {
- case M_UNKNOWN:
- s = "unknown";
- break;
- case M_CONFIG:
- s = "config";
- break;
- case M_UNIMODEM:
- s = "Unimodem";
- break;
- case M_CID:
- s = "CID";
- break;
- default:
- s = "??";
- }
- seq_printf(m, "%-16s %s\n", "mode", s);
-
- switch (cs->mstate) {
- case MS_UNINITIALIZED:
- s = "uninitialized";
- break;
- case MS_INIT:
- s = "init";
- break;
- case MS_LOCKED:
- s = "locked";
- break;
- case MS_SHUTDOWN:
- s = "shutdown";
- break;
- case MS_RECOVER:
- s = "recover";
- break;
- case MS_READY:
- s = "ready";
- break;
- default:
- s = "??";
- }
- seq_printf(m, "%-16s %s\n", "mstate", s);
-
- seq_printf(m, "%-16s %s\n", "running", cs->running ? "yes" : "no");
- seq_printf(m, "%-16s %s\n", "connected", cs->connected ? "yes" : "no");
- seq_printf(m, "%-16s %s\n", "isdn_up", cs->isdn_up ? "yes" : "no");
- seq_printf(m, "%-16s %s\n", "cidmode", cs->cidmode ? "yes" : "no");
-
- for (i = 0; i < cs->channels; i++) {
- seq_printf(m, "[%d]%-13s %d\n", i, "corrupted",
- cs->bcs[i].corrupted);
- seq_printf(m, "[%d]%-13s %d\n", i, "trans_down",
- cs->bcs[i].trans_down);
- seq_printf(m, "[%d]%-13s %d\n", i, "trans_up",
- cs->bcs[i].trans_up);
- seq_printf(m, "[%d]%-13s %d\n", i, "chstate",
- cs->bcs[i].chstate);
- switch (cs->bcs[i].proto2) {
- case L2_BITSYNC:
- s = "bitsync";
- break;
- case L2_HDLC:
- s = "HDLC";
- break;
- case L2_VOICE:
- s = "voice";
- break;
- default:
- s = "??";
- }
- seq_printf(m, "[%d]%-13s %s\n", i, "proto2", s);
- }
- return 0;
-}
-
-/**
- * gigaset_isdn_regdev() - register device to LL
- * @cs: device descriptor structure.
- * @isdnid: device name.
- *
- * Return value: 0 on success, error code < 0 on failure
- */
-int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
-{
- struct gigaset_capi_ctr *iif;
- int rc;
-
- iif = kzalloc(sizeof(*iif), GFP_KERNEL);
- if (!iif) {
- pr_err("%s: out of memory\n", __func__);
- return -ENOMEM;
- }
-
- /* prepare controller structure */
- iif->ctr.owner = THIS_MODULE;
- iif->ctr.driverdata = cs;
- strncpy(iif->ctr.name, isdnid, sizeof(iif->ctr.name) - 1);
- iif->ctr.driver_name = "gigaset";
- iif->ctr.load_firmware = NULL;
- iif->ctr.reset_ctr = NULL;
- iif->ctr.register_appl = gigaset_register_appl;
- iif->ctr.release_appl = gigaset_release_appl;
- iif->ctr.send_message = gigaset_send_message;
- iif->ctr.procinfo = gigaset_procinfo;
- iif->ctr.proc_show = gigaset_proc_show,
- INIT_LIST_HEAD(&iif->appls);
- skb_queue_head_init(&iif->sendqueue);
- atomic_set(&iif->sendqlen, 0);
-
- /* register controller with CAPI */
- rc = attach_capi_ctr(&iif->ctr);
- if (rc) {
- pr_err("attach_capi_ctr failed (%d)\n", rc);
- kfree(iif);
- return rc;
- }
-
- cs->iif = iif;
- cs->hw_hdr_len = CAPI_DATA_B3_REQ_LEN;
- return 0;
-}
-
-/**
- * gigaset_isdn_unregdev() - unregister device from LL
- * @cs: device descriptor structure.
- */
-void gigaset_isdn_unregdev(struct cardstate *cs)
-{
- struct gigaset_capi_ctr *iif = cs->iif;
-
- detach_capi_ctr(&iif->ctr);
- kfree(iif);
- cs->iif = NULL;
-}
-
-static struct capi_driver capi_driver_gigaset = {
- .name = "gigaset",
- .revision = "1.0",
-};
-
-/**
- * gigaset_isdn_regdrv() - register driver to LL
- */
-void gigaset_isdn_regdrv(void)
-{
- pr_info("Kernel CAPI interface\n");
- register_capi_driver(&capi_driver_gigaset);
-}
-
-/**
- * gigaset_isdn_unregdrv() - unregister driver from LL
- */
-void gigaset_isdn_unregdrv(void)
-{
- unregister_capi_driver(&capi_driver_gigaset);
-}
diff --git a/drivers/staging/isdn/gigaset/common.c b/drivers/staging/isdn/gigaset/common.c
deleted file mode 100644
index 3bb8092858ab..000000000000
--- a/drivers/staging/isdn/gigaset/common.c
+++ /dev/null
@@ -1,1153 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Stuff used by all variants of the driver
- *
- * Copyright (c) 2001 by Stefan Eilers,
- * Hansjoerg Lipp <hjlipp@web.de>,
- * Tilman Schmidt <tilman@imap.cc>.
- *
- * =====================================================================
- * =====================================================================
- */
-
-#include "gigaset.h"
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-
-/* Version Information */
-#define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Tilman Schmidt <tilman@imap.cc>, Stefan Eilers"
-#define DRIVER_DESC "Driver for Gigaset 307x"
-
-#ifdef CONFIG_GIGASET_DEBUG
-#define DRIVER_DESC_DEBUG " (debug build)"
-#else
-#define DRIVER_DESC_DEBUG ""
-#endif
-
-/* Module parameters */
-int gigaset_debuglevel;
-EXPORT_SYMBOL_GPL(gigaset_debuglevel);
-module_param_named(debug, gigaset_debuglevel, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "debug level");
-
-/* driver state flags */
-#define VALID_MINOR 0x01
-#define VALID_ID 0x02
-
-/**
- * gigaset_dbg_buffer() - dump data in ASCII and hex for debugging
- * @level: debugging level.
- * @msg: message prefix.
- * @len: number of bytes to dump.
- * @buf: data to dump.
- *
- * If the current debugging level includes one of the bits set in @level,
- * @len bytes starting at @buf are logged to dmesg at KERN_DEBUG prio,
- * prefixed by the text @msg.
- */
-void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
- size_t len, const unsigned char *buf)
-{
- unsigned char outbuf[80];
- unsigned char c;
- size_t space = sizeof outbuf - 1;
- unsigned char *out = outbuf;
- size_t numin = len;
-
- while (numin--) {
- c = *buf++;
- if (c == '~' || c == '^' || c == '\\') {
- if (!space--)
- break;
- *out++ = '\\';
- }
- if (c & 0x80) {
- if (!space--)
- break;
- *out++ = '~';
- c ^= 0x80;
- }
- if (c < 0x20 || c == 0x7f) {
- if (!space--)
- break;
- *out++ = '^';
- c ^= 0x40;
- }
- if (!space--)
- break;
- *out++ = c;
- }
- *out = 0;
-
- gig_dbg(level, "%s (%u bytes): %s", msg, (unsigned) len, outbuf);
-}
-EXPORT_SYMBOL_GPL(gigaset_dbg_buffer);
-
-static int setflags(struct cardstate *cs, unsigned flags, unsigned delay)
-{
- int r;
-
- r = cs->ops->set_modem_ctrl(cs, cs->control_state, flags);
- cs->control_state = flags;
- if (r < 0)
- return r;
-
- if (delay) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(delay * HZ / 1000);
- }
-
- return 0;
-}
-
-int gigaset_enterconfigmode(struct cardstate *cs)
-{
- int i, r;
-
- cs->control_state = TIOCM_RTS;
-
- r = setflags(cs, TIOCM_DTR, 200);
- if (r < 0)
- goto error;
- r = setflags(cs, 0, 200);
- if (r < 0)
- goto error;
- for (i = 0; i < 5; ++i) {
- r = setflags(cs, TIOCM_RTS, 100);
- if (r < 0)
- goto error;
- r = setflags(cs, 0, 100);
- if (r < 0)
- goto error;
- }
- r = setflags(cs, TIOCM_RTS | TIOCM_DTR, 800);
- if (r < 0)
- goto error;
-
- return 0;
-
-error:
- dev_err(cs->dev, "error %d on setuartbits\n", -r);
- cs->control_state = TIOCM_RTS | TIOCM_DTR;
- cs->ops->set_modem_ctrl(cs, 0, TIOCM_RTS | TIOCM_DTR);
-
- return -1;
-}
-
-static int test_timeout(struct at_state_t *at_state)
-{
- if (!at_state->timer_expires)
- return 0;
-
- if (--at_state->timer_expires) {
- gig_dbg(DEBUG_MCMD, "decreased timer of %p to %lu",
- at_state, at_state->timer_expires);
- return 0;
- }
-
- gigaset_add_event(at_state->cs, at_state, EV_TIMEOUT, NULL,
- at_state->timer_index, NULL);
- return 1;
-}
-
-static void timer_tick(struct timer_list *t)
-{
- struct cardstate *cs = from_timer(cs, t, timer);
- unsigned long flags;
- unsigned channel;
- struct at_state_t *at_state;
- int timeout = 0;
-
- spin_lock_irqsave(&cs->lock, flags);
-
- for (channel = 0; channel < cs->channels; ++channel)
- if (test_timeout(&cs->bcs[channel].at_state))
- timeout = 1;
-
- if (test_timeout(&cs->at_state))
- timeout = 1;
-
- list_for_each_entry(at_state, &cs->temp_at_states, list)
- if (test_timeout(at_state))
- timeout = 1;
-
- if (cs->running) {
- mod_timer(&cs->timer, jiffies + msecs_to_jiffies(GIG_TICK));
- if (timeout) {
- gig_dbg(DEBUG_EVENT, "scheduling timeout");
- tasklet_schedule(&cs->event_tasklet);
- }
- }
-
- spin_unlock_irqrestore(&cs->lock, flags);
-}
-
-int gigaset_get_channel(struct bc_state *bcs)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&bcs->cs->lock, flags);
- if (bcs->use_count || !try_module_get(bcs->cs->driver->owner)) {
- gig_dbg(DEBUG_CHANNEL, "could not allocate channel %d",
- bcs->channel);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- return -EBUSY;
- }
- ++bcs->use_count;
- bcs->busy = 1;
- gig_dbg(DEBUG_CHANNEL, "allocated channel %d", bcs->channel);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- return 0;
-}
-
-struct bc_state *gigaset_get_free_channel(struct cardstate *cs)
-{
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&cs->lock, flags);
- if (!try_module_get(cs->driver->owner)) {
- gig_dbg(DEBUG_CHANNEL,
- "could not get module for allocating channel");
- spin_unlock_irqrestore(&cs->lock, flags);
- return NULL;
- }
- for (i = 0; i < cs->channels; ++i)
- if (!cs->bcs[i].use_count) {
- ++cs->bcs[i].use_count;
- cs->bcs[i].busy = 1;
- spin_unlock_irqrestore(&cs->lock, flags);
- gig_dbg(DEBUG_CHANNEL, "allocated channel %d", i);
- return cs->bcs + i;
- }
- module_put(cs->driver->owner);
- spin_unlock_irqrestore(&cs->lock, flags);
- gig_dbg(DEBUG_CHANNEL, "no free channel");
- return NULL;
-}
-
-void gigaset_free_channel(struct bc_state *bcs)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&bcs->cs->lock, flags);
- if (!bcs->busy) {
- gig_dbg(DEBUG_CHANNEL, "could not free channel %d",
- bcs->channel);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- return;
- }
- --bcs->use_count;
- bcs->busy = 0;
- module_put(bcs->cs->driver->owner);
- gig_dbg(DEBUG_CHANNEL, "freed channel %d", bcs->channel);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
-}
-
-int gigaset_get_channels(struct cardstate *cs)
-{
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&cs->lock, flags);
- for (i = 0; i < cs->channels; ++i)
- if (cs->bcs[i].use_count) {
- spin_unlock_irqrestore(&cs->lock, flags);
- gig_dbg(DEBUG_CHANNEL,
- "could not allocate all channels");
- return -EBUSY;
- }
- for (i = 0; i < cs->channels; ++i)
- ++cs->bcs[i].use_count;
- spin_unlock_irqrestore(&cs->lock, flags);
-
- gig_dbg(DEBUG_CHANNEL, "allocated all channels");
-
- return 0;
-}
-
-void gigaset_free_channels(struct cardstate *cs)
-{
- unsigned long flags;
- int i;
-
- gig_dbg(DEBUG_CHANNEL, "unblocking all channels");
- spin_lock_irqsave(&cs->lock, flags);
- for (i = 0; i < cs->channels; ++i)
- --cs->bcs[i].use_count;
- spin_unlock_irqrestore(&cs->lock, flags);
-}
-
-void gigaset_block_channels(struct cardstate *cs)
-{
- unsigned long flags;
- int i;
-
- gig_dbg(DEBUG_CHANNEL, "blocking all channels");
- spin_lock_irqsave(&cs->lock, flags);
- for (i = 0; i < cs->channels; ++i)
- ++cs->bcs[i].use_count;
- spin_unlock_irqrestore(&cs->lock, flags);
-}
-
-static void clear_events(struct cardstate *cs)
-{
- struct event_t *ev;
- unsigned head, tail;
- unsigned long flags;
-
- spin_lock_irqsave(&cs->ev_lock, flags);
-
- head = cs->ev_head;
- tail = cs->ev_tail;
-
- while (tail != head) {
- ev = cs->events + head;
- kfree(ev->ptr);
- head = (head + 1) % MAX_EVENTS;
- }
-
- cs->ev_head = tail;
-
- spin_unlock_irqrestore(&cs->ev_lock, flags);
-}
-
-/**
- * gigaset_add_event() - add event to device event queue
- * @cs: device descriptor structure.
- * @at_state: connection state structure.
- * @type: event type.
- * @ptr: pointer parameter for event.
- * @parameter: integer parameter for event.
- * @arg: pointer parameter for event.
- *
- * Allocate an event queue entry from the device's event queue, and set it up
- * with the parameters given.
- *
- * Return value: added event
- */
-struct event_t *gigaset_add_event(struct cardstate *cs,
- struct at_state_t *at_state, int type,
- void *ptr, int parameter, void *arg)
-{
- unsigned long flags;
- unsigned next, tail;
- struct event_t *event = NULL;
-
- gig_dbg(DEBUG_EVENT, "queueing event %d", type);
-
- spin_lock_irqsave(&cs->ev_lock, flags);
-
- tail = cs->ev_tail;
- next = (tail + 1) % MAX_EVENTS;
- if (unlikely(next == cs->ev_head))
- dev_err(cs->dev, "event queue full\n");
- else {
- event = cs->events + tail;
- event->type = type;
- event->at_state = at_state;
- event->cid = -1;
- event->ptr = ptr;
- event->arg = arg;
- event->parameter = parameter;
- cs->ev_tail = next;
- }
-
- spin_unlock_irqrestore(&cs->ev_lock, flags);
-
- return event;
-}
-EXPORT_SYMBOL_GPL(gigaset_add_event);
-
-static void clear_at_state(struct at_state_t *at_state)
-{
- int i;
-
- for (i = 0; i < STR_NUM; ++i) {
- kfree(at_state->str_var[i]);
- at_state->str_var[i] = NULL;
- }
-}
-
-static void dealloc_temp_at_states(struct cardstate *cs)
-{
- struct at_state_t *cur, *next;
-
- list_for_each_entry_safe(cur, next, &cs->temp_at_states, list) {
- list_del(&cur->list);
- clear_at_state(cur);
- kfree(cur);
- }
-}
-
-static void gigaset_freebcs(struct bc_state *bcs)
-{
- int i;
-
- gig_dbg(DEBUG_INIT, "freeing bcs[%d]->hw", bcs->channel);
- bcs->cs->ops->freebcshw(bcs);
-
- gig_dbg(DEBUG_INIT, "clearing bcs[%d]->at_state", bcs->channel);
- clear_at_state(&bcs->at_state);
- gig_dbg(DEBUG_INIT, "freeing bcs[%d]->skb", bcs->channel);
- dev_kfree_skb(bcs->rx_skb);
- bcs->rx_skb = NULL;
-
- for (i = 0; i < AT_NUM; ++i) {
- kfree(bcs->commands[i]);
- bcs->commands[i] = NULL;
- }
-}
-
-static struct cardstate *alloc_cs(struct gigaset_driver *drv)
-{
- unsigned long flags;
- unsigned i;
- struct cardstate *cs;
- struct cardstate *ret = NULL;
-
- spin_lock_irqsave(&drv->lock, flags);
- if (drv->blocked)
- goto exit;
- for (i = 0; i < drv->minors; ++i) {
- cs = drv->cs + i;
- if (!(cs->flags & VALID_MINOR)) {
- cs->flags = VALID_MINOR;
- ret = cs;
- break;
- }
- }
-exit:
- spin_unlock_irqrestore(&drv->lock, flags);
- return ret;
-}
-
-static void free_cs(struct cardstate *cs)
-{
- cs->flags = 0;
-}
-
-static void make_valid(struct cardstate *cs, unsigned mask)
-{
- unsigned long flags;
- struct gigaset_driver *drv = cs->driver;
- spin_lock_irqsave(&drv->lock, flags);
- cs->flags |= mask;
- spin_unlock_irqrestore(&drv->lock, flags);
-}
-
-static void make_invalid(struct cardstate *cs, unsigned mask)
-{
- unsigned long flags;
- struct gigaset_driver *drv = cs->driver;
- spin_lock_irqsave(&drv->lock, flags);
- cs->flags &= ~mask;
- spin_unlock_irqrestore(&drv->lock, flags);
-}
-
-/**
- * gigaset_freecs() - free all associated ressources of a device
- * @cs: device descriptor structure.
- *
- * Stops all tasklets and timers, unregisters the device from all
- * subsystems it was registered to, deallocates the device structure
- * @cs and all structures referenced from it.
- * Operations on the device should be stopped before calling this.
- */
-void gigaset_freecs(struct cardstate *cs)
-{
- int i;
- unsigned long flags;
-
- if (!cs)
- return;
-
- mutex_lock(&cs->mutex);
-
- spin_lock_irqsave(&cs->lock, flags);
- cs->running = 0;
- spin_unlock_irqrestore(&cs->lock, flags); /* event handler and timer are
- not rescheduled below */
-
- tasklet_kill(&cs->event_tasklet);
- del_timer_sync(&cs->timer);
-
- switch (cs->cs_init) {
- default:
- /* clear B channel structures */
- for (i = 0; i < cs->channels; ++i) {
- gig_dbg(DEBUG_INIT, "clearing bcs[%d]", i);
- gigaset_freebcs(cs->bcs + i);
- }
-
- /* clear device sysfs */
- gigaset_free_dev_sysfs(cs);
-
- gigaset_if_free(cs);
-
- gig_dbg(DEBUG_INIT, "clearing hw");
- cs->ops->freecshw(cs);
-
- /* fall through */
- case 2: /* error in initcshw */
- /* Deregister from LL */
- make_invalid(cs, VALID_ID);
- gigaset_isdn_unregdev(cs);
-
- /* fall through */
- case 1: /* error when registering to LL */
- gig_dbg(DEBUG_INIT, "clearing at_state");
- clear_at_state(&cs->at_state);
- dealloc_temp_at_states(cs);
- clear_events(cs);
- tty_port_destroy(&cs->port);
-
- /* fall through */
- case 0: /* error in basic setup */
- gig_dbg(DEBUG_INIT, "freeing inbuf");
- kfree(cs->inbuf);
- kfree(cs->bcs);
- }
-
- mutex_unlock(&cs->mutex);
- free_cs(cs);
-}
-EXPORT_SYMBOL_GPL(gigaset_freecs);
-
-void gigaset_at_init(struct at_state_t *at_state, struct bc_state *bcs,
- struct cardstate *cs, int cid)
-{
- int i;
-
- INIT_LIST_HEAD(&at_state->list);
- at_state->waiting = 0;
- at_state->getstring = 0;
- at_state->pending_commands = 0;
- at_state->timer_expires = 0;
- at_state->timer_active = 0;
- at_state->timer_index = 0;
- at_state->seq_index = 0;
- at_state->ConState = 0;
- for (i = 0; i < STR_NUM; ++i)
- at_state->str_var[i] = NULL;
- at_state->int_var[VAR_ZDLE] = 0;
- at_state->int_var[VAR_ZCTP] = -1;
- at_state->int_var[VAR_ZSAU] = ZSAU_NULL;
- at_state->cs = cs;
- at_state->bcs = bcs;
- at_state->cid = cid;
- if (!cid)
- at_state->replystruct = cs->tabnocid;
- else
- at_state->replystruct = cs->tabcid;
-}
-
-
-static void gigaset_inbuf_init(struct inbuf_t *inbuf, struct cardstate *cs)
-/* inbuf->read must be allocated before! */
-{
- inbuf->head = 0;
- inbuf->tail = 0;
- inbuf->cs = cs;
- inbuf->inputstate = INS_command;
-}
-
-/**
- * gigaset_fill_inbuf() - append received data to input buffer
- * @inbuf: buffer structure.
- * @src: received data.
- * @numbytes: number of bytes received.
- *
- * Return value: !=0 if some data was appended
- */
-int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src,
- unsigned numbytes)
-{
- unsigned n, head, tail, bytesleft;
-
- gig_dbg(DEBUG_INTR, "received %u bytes", numbytes);
-
- if (!numbytes)
- return 0;
-
- bytesleft = numbytes;
- tail = inbuf->tail;
- head = inbuf->head;
- gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail);
-
- while (bytesleft) {
- if (head > tail)
- n = head - 1 - tail;
- else if (head == 0)
- n = (RBUFSIZE - 1) - tail;
- else
- n = RBUFSIZE - tail;
- if (!n) {
- dev_err(inbuf->cs->dev,
- "buffer overflow (%u bytes lost)\n",
- bytesleft);
- break;
- }
- if (n > bytesleft)
- n = bytesleft;
- memcpy(inbuf->data + tail, src, n);
- bytesleft -= n;
- tail = (tail + n) % RBUFSIZE;
- src += n;
- }
- gig_dbg(DEBUG_INTR, "setting tail to %u", tail);
- inbuf->tail = tail;
- return numbytes != bytesleft;
-}
-EXPORT_SYMBOL_GPL(gigaset_fill_inbuf);
-
-/* Initialize the b-channel structure */
-static int gigaset_initbcs(struct bc_state *bcs, struct cardstate *cs,
- int channel)
-{
- int i;
-
- bcs->tx_skb = NULL;
-
- skb_queue_head_init(&bcs->squeue);
-
- bcs->corrupted = 0;
- bcs->trans_down = 0;
- bcs->trans_up = 0;
-
- gig_dbg(DEBUG_INIT, "setting up bcs[%d]->at_state", channel);
- gigaset_at_init(&bcs->at_state, bcs, cs, -1);
-
-#ifdef CONFIG_GIGASET_DEBUG
- bcs->emptycount = 0;
-#endif
-
- bcs->rx_bufsize = 0;
- bcs->rx_skb = NULL;
- bcs->rx_fcs = PPP_INITFCS;
- bcs->inputstate = 0;
- bcs->channel = channel;
- bcs->cs = cs;
-
- bcs->chstate = 0;
- bcs->use_count = 1;
- bcs->busy = 0;
- bcs->ignore = cs->ignoreframes;
-
- for (i = 0; i < AT_NUM; ++i)
- bcs->commands[i] = NULL;
-
- spin_lock_init(&bcs->aplock);
- bcs->ap = NULL;
- bcs->apconnstate = 0;
-
- gig_dbg(DEBUG_INIT, " setting up bcs[%d]->hw", channel);
- return cs->ops->initbcshw(bcs);
-}
-
-/**
- * gigaset_initcs() - initialize device structure
- * @drv: hardware driver the device belongs to
- * @channels: number of B channels supported by device
- * @onechannel: !=0 if B channel data and AT commands share one
- * communication channel (M10x),
- * ==0 if B channels have separate communication channels (base)
- * @ignoreframes: number of frames to ignore after setting up B channel
- * @cidmode: !=0: start in CallID mode
- * @modulename: name of driver module for LL registration
- *
- * Allocate and initialize cardstate structure for Gigaset driver
- * Calls hardware dependent gigaset_initcshw() function
- * Calls B channel initialization function gigaset_initbcs() for each B channel
- *
- * Return value:
- * pointer to cardstate structure
- */
-struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
- int onechannel, int ignoreframes,
- int cidmode, const char *modulename)
-{
- struct cardstate *cs;
- unsigned long flags;
- int i;
-
- gig_dbg(DEBUG_INIT, "allocating cs");
- cs = alloc_cs(drv);
- if (!cs) {
- pr_err("maximum number of devices exceeded\n");
- return NULL;
- }
-
- cs->cs_init = 0;
- cs->channels = channels;
- cs->onechannel = onechannel;
- cs->ignoreframes = ignoreframes;
- INIT_LIST_HEAD(&cs->temp_at_states);
- cs->running = 0;
- timer_setup(&cs->timer, timer_tick, 0);
- spin_lock_init(&cs->ev_lock);
- cs->ev_tail = 0;
- cs->ev_head = 0;
-
- tasklet_init(&cs->event_tasklet, gigaset_handle_event,
- (unsigned long) cs);
- tty_port_init(&cs->port);
- cs->commands_pending = 0;
- cs->cur_at_seq = 0;
- cs->gotfwver = -1;
- cs->dev = NULL;
- cs->tty_dev = NULL;
- cs->cidmode = cidmode != 0;
- cs->tabnocid = gigaset_tab_nocid;
- cs->tabcid = gigaset_tab_cid;
-
- init_waitqueue_head(&cs->waitqueue);
- cs->waiting = 0;
-
- cs->mode = M_UNKNOWN;
- cs->mstate = MS_UNINITIALIZED;
-
- cs->bcs = kmalloc_array(channels, sizeof(struct bc_state), GFP_KERNEL);
- cs->inbuf = kmalloc(sizeof(struct inbuf_t), GFP_KERNEL);
- if (!cs->bcs || !cs->inbuf) {
- pr_err("out of memory\n");
- goto error;
- }
- ++cs->cs_init;
-
- gig_dbg(DEBUG_INIT, "setting up at_state");
- spin_lock_init(&cs->lock);
- gigaset_at_init(&cs->at_state, NULL, cs, 0);
- cs->dle = 0;
- cs->cbytes = 0;
-
- gig_dbg(DEBUG_INIT, "setting up inbuf");
- gigaset_inbuf_init(cs->inbuf, cs);
-
- cs->connected = 0;
- cs->isdn_up = 0;
-
- gig_dbg(DEBUG_INIT, "setting up cmdbuf");
- cs->cmdbuf = cs->lastcmdbuf = NULL;
- spin_lock_init(&cs->cmdlock);
- cs->curlen = 0;
- cs->cmdbytes = 0;
-
- gig_dbg(DEBUG_INIT, "setting up iif");
- if (gigaset_isdn_regdev(cs, modulename) < 0) {
- pr_err("error registering ISDN device\n");
- goto error;
- }
-
- make_valid(cs, VALID_ID);
- ++cs->cs_init;
- gig_dbg(DEBUG_INIT, "setting up hw");
- if (cs->ops->initcshw(cs) < 0)
- goto error;
-
- ++cs->cs_init;
-
- /* set up character device */
- gigaset_if_init(cs);
-
- /* set up device sysfs */
- gigaset_init_dev_sysfs(cs);
-
- /* set up channel data structures */
- for (i = 0; i < channels; ++i) {
- gig_dbg(DEBUG_INIT, "setting up bcs[%d]", i);
- if (gigaset_initbcs(cs->bcs + i, cs, i) < 0) {
- pr_err("could not allocate channel %d data\n", i);
- goto error;
- }
- }
-
- spin_lock_irqsave(&cs->lock, flags);
- cs->running = 1;
- spin_unlock_irqrestore(&cs->lock, flags);
- cs->timer.expires = jiffies + msecs_to_jiffies(GIG_TICK);
- add_timer(&cs->timer);
-
- gig_dbg(DEBUG_INIT, "cs initialized");
- return cs;
-
-error:
- gig_dbg(DEBUG_INIT, "failed");
- gigaset_freecs(cs);
- return NULL;
-}
-EXPORT_SYMBOL_GPL(gigaset_initcs);
-
-/* ReInitialize the b-channel structure on hangup */
-void gigaset_bcs_reinit(struct bc_state *bcs)
-{
- struct sk_buff *skb;
- struct cardstate *cs = bcs->cs;
- unsigned long flags;
-
- while ((skb = skb_dequeue(&bcs->squeue)) != NULL)
- dev_kfree_skb(skb);
-
- spin_lock_irqsave(&cs->lock, flags);
- clear_at_state(&bcs->at_state);
- bcs->at_state.ConState = 0;
- bcs->at_state.timer_active = 0;
- bcs->at_state.timer_expires = 0;
- bcs->at_state.cid = -1; /* No CID defined */
- spin_unlock_irqrestore(&cs->lock, flags);
-
- bcs->inputstate = 0;
-
-#ifdef CONFIG_GIGASET_DEBUG
- bcs->emptycount = 0;
-#endif
-
- bcs->rx_fcs = PPP_INITFCS;
- bcs->chstate = 0;
-
- bcs->ignore = cs->ignoreframes;
- dev_kfree_skb(bcs->rx_skb);
- bcs->rx_skb = NULL;
-
- cs->ops->reinitbcshw(bcs);
-}
-
-static void cleanup_cs(struct cardstate *cs)
-{
- struct cmdbuf_t *cb, *tcb;
- int i;
- unsigned long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
-
- cs->mode = M_UNKNOWN;
- cs->mstate = MS_UNINITIALIZED;
-
- clear_at_state(&cs->at_state);
- dealloc_temp_at_states(cs);
- gigaset_at_init(&cs->at_state, NULL, cs, 0);
-
- cs->inbuf->inputstate = INS_command;
- cs->inbuf->head = 0;
- cs->inbuf->tail = 0;
-
- cb = cs->cmdbuf;
- while (cb) {
- tcb = cb;
- cb = cb->next;
- kfree(tcb);
- }
- cs->cmdbuf = cs->lastcmdbuf = NULL;
- cs->curlen = 0;
- cs->cmdbytes = 0;
- cs->gotfwver = -1;
- cs->dle = 0;
- cs->cur_at_seq = 0;
- cs->commands_pending = 0;
- cs->cbytes = 0;
-
- spin_unlock_irqrestore(&cs->lock, flags);
-
- for (i = 0; i < cs->channels; ++i) {
- gigaset_freebcs(cs->bcs + i);
- if (gigaset_initbcs(cs->bcs + i, cs, i) < 0)
- pr_err("could not allocate channel %d data\n", i);
- }
-
- if (cs->waiting) {
- cs->cmd_result = -ENODEV;
- cs->waiting = 0;
- wake_up_interruptible(&cs->waitqueue);
- }
-}
-
-
-/**
- * gigaset_start() - start device operations
- * @cs: device descriptor structure.
- *
- * Prepares the device for use by setting up communication parameters,
- * scheduling an EV_START event to initiate device initialization, and
- * waiting for completion of the initialization.
- *
- * Return value:
- * 0 on success, error code < 0 on failure
- */
-int gigaset_start(struct cardstate *cs)
-{
- unsigned long flags;
-
- if (mutex_lock_interruptible(&cs->mutex))
- return -EBUSY;
-
- spin_lock_irqsave(&cs->lock, flags);
- cs->connected = 1;
- spin_unlock_irqrestore(&cs->lock, flags);
-
- if (cs->mstate != MS_LOCKED) {
- cs->ops->set_modem_ctrl(cs, 0, TIOCM_DTR | TIOCM_RTS);
- cs->ops->baud_rate(cs, B115200);
- cs->ops->set_line_ctrl(cs, CS8);
- cs->control_state = TIOCM_DTR | TIOCM_RTS;
- }
-
- cs->waiting = 1;
-
- if (!gigaset_add_event(cs, &cs->at_state, EV_START, NULL, 0, NULL)) {
- cs->waiting = 0;
- goto error;
- }
- gigaset_schedule_event(cs);
-
- wait_event(cs->waitqueue, !cs->waiting);
-
- mutex_unlock(&cs->mutex);
- return 0;
-
-error:
- mutex_unlock(&cs->mutex);
- return -ENOMEM;
-}
-EXPORT_SYMBOL_GPL(gigaset_start);
-
-/**
- * gigaset_shutdown() - shut down device operations
- * @cs: device descriptor structure.
- *
- * Deactivates the device by scheduling an EV_SHUTDOWN event and
- * waiting for completion of the shutdown.
- *
- * Return value:
- * 0 - success, -ENODEV - error (no device associated)
- */
-int gigaset_shutdown(struct cardstate *cs)
-{
- mutex_lock(&cs->mutex);
-
- if (!(cs->flags & VALID_MINOR)) {
- mutex_unlock(&cs->mutex);
- return -ENODEV;
- }
-
- cs->waiting = 1;
-
- if (!gigaset_add_event(cs, &cs->at_state, EV_SHUTDOWN, NULL, 0, NULL))
- goto exit;
- gigaset_schedule_event(cs);
-
- wait_event(cs->waitqueue, !cs->waiting);
-
- cleanup_cs(cs);
-
-exit:
- mutex_unlock(&cs->mutex);
- return 0;
-}
-EXPORT_SYMBOL_GPL(gigaset_shutdown);
-
-/**
- * gigaset_stop() - stop device operations
- * @cs: device descriptor structure.
- *
- * Stops operations on the device by scheduling an EV_STOP event and
- * waiting for completion of the shutdown.
- */
-void gigaset_stop(struct cardstate *cs)
-{
- mutex_lock(&cs->mutex);
-
- cs->waiting = 1;
-
- if (!gigaset_add_event(cs, &cs->at_state, EV_STOP, NULL, 0, NULL))
- goto exit;
- gigaset_schedule_event(cs);
-
- wait_event(cs->waitqueue, !cs->waiting);
-
- cleanup_cs(cs);
-
-exit:
- mutex_unlock(&cs->mutex);
-}
-EXPORT_SYMBOL_GPL(gigaset_stop);
-
-static LIST_HEAD(drivers);
-static DEFINE_SPINLOCK(driver_lock);
-
-struct cardstate *gigaset_get_cs_by_id(int id)
-{
- unsigned long flags;
- struct cardstate *ret = NULL;
- struct cardstate *cs;
- struct gigaset_driver *drv;
- unsigned i;
-
- spin_lock_irqsave(&driver_lock, flags);
- list_for_each_entry(drv, &drivers, list) {
- spin_lock(&drv->lock);
- for (i = 0; i < drv->minors; ++i) {
- cs = drv->cs + i;
- if ((cs->flags & VALID_ID) && cs->myid == id) {
- ret = cs;
- break;
- }
- }
- spin_unlock(&drv->lock);
- if (ret)
- break;
- }
- spin_unlock_irqrestore(&driver_lock, flags);
- return ret;
-}
-
-static struct cardstate *gigaset_get_cs_by_minor(unsigned minor)
-{
- unsigned long flags;
- struct cardstate *ret = NULL;
- struct gigaset_driver *drv;
- unsigned index;
-
- spin_lock_irqsave(&driver_lock, flags);
- list_for_each_entry(drv, &drivers, list) {
- if (minor < drv->minor || minor >= drv->minor + drv->minors)
- continue;
- index = minor - drv->minor;
- spin_lock(&drv->lock);
- if (drv->cs[index].flags & VALID_MINOR)
- ret = drv->cs + index;
- spin_unlock(&drv->lock);
- if (ret)
- break;
- }
- spin_unlock_irqrestore(&driver_lock, flags);
- return ret;
-}
-
-struct cardstate *gigaset_get_cs_by_tty(struct tty_struct *tty)
-{
- return gigaset_get_cs_by_minor(tty->index + tty->driver->minor_start);
-}
-
-/**
- * gigaset_freedriver() - free all associated ressources of a driver
- * @drv: driver descriptor structure.
- *
- * Unregisters the driver from the system and deallocates the driver
- * structure @drv and all structures referenced from it.
- * All devices should be shut down before calling this.
- */
-void gigaset_freedriver(struct gigaset_driver *drv)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&driver_lock, flags);
- list_del(&drv->list);
- spin_unlock_irqrestore(&driver_lock, flags);
-
- gigaset_if_freedriver(drv);
-
- kfree(drv->cs);
- kfree(drv);
-}
-EXPORT_SYMBOL_GPL(gigaset_freedriver);
-
-/**
- * gigaset_initdriver() - initialize driver structure
- * @minor: First minor number
- * @minors: Number of minors this driver can handle
- * @procname: Name of the driver
- * @devname: Name of the device files (prefix without minor number)
- *
- * Allocate and initialize gigaset_driver structure. Initialize interface.
- *
- * Return value:
- * Pointer to the gigaset_driver structure on success, NULL on failure.
- */
-struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors,
- const char *procname,
- const char *devname,
- const struct gigaset_ops *ops,
- struct module *owner)
-{
- struct gigaset_driver *drv;
- unsigned long flags;
- unsigned i;
-
- drv = kmalloc(sizeof *drv, GFP_KERNEL);
- if (!drv)
- return NULL;
-
- drv->have_tty = 0;
- drv->minor = minor;
- drv->minors = minors;
- spin_lock_init(&drv->lock);
- drv->blocked = 0;
- drv->ops = ops;
- drv->owner = owner;
- INIT_LIST_HEAD(&drv->list);
-
- drv->cs = kmalloc_array(minors, sizeof(*drv->cs), GFP_KERNEL);
- if (!drv->cs)
- goto error;
-
- for (i = 0; i < minors; ++i) {
- drv->cs[i].flags = 0;
- drv->cs[i].driver = drv;
- drv->cs[i].ops = drv->ops;
- drv->cs[i].minor_index = i;
- mutex_init(&drv->cs[i].mutex);
- }
-
- gigaset_if_initdriver(drv, procname, devname);
-
- spin_lock_irqsave(&driver_lock, flags);
- list_add(&drv->list, &drivers);
- spin_unlock_irqrestore(&driver_lock, flags);
-
- return drv;
-
-error:
- kfree(drv);
- return NULL;
-}
-EXPORT_SYMBOL_GPL(gigaset_initdriver);
-
-/**
- * gigaset_blockdriver() - block driver
- * @drv: driver descriptor structure.
- *
- * Prevents the driver from attaching new devices, in preparation for
- * deregistration.
- */
-void gigaset_blockdriver(struct gigaset_driver *drv)
-{
- drv->blocked = 1;
-}
-EXPORT_SYMBOL_GPL(gigaset_blockdriver);
-
-static int __init gigaset_init_module(void)
-{
- /* in accordance with the principle of least astonishment,
- * setting the 'debug' parameter to 1 activates a sensible
- * set of default debug levels
- */
- if (gigaset_debuglevel == 1)
- gigaset_debuglevel = DEBUG_DEFAULT;
-
- pr_info(DRIVER_DESC DRIVER_DESC_DEBUG "\n");
- gigaset_isdn_regdrv();
- return 0;
-}
-
-static void __exit gigaset_exit_module(void)
-{
- gigaset_isdn_unregdrv();
-}
-
-module_init(gigaset_init_module);
-module_exit(gigaset_exit_module);
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/isdn/gigaset/dummyll.c b/drivers/staging/isdn/gigaset/dummyll.c
deleted file mode 100644
index 4b9637e5da6e..000000000000
--- a/drivers/staging/isdn/gigaset/dummyll.c
+++ /dev/null
@@ -1,74 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Dummy LL interface for the Gigaset driver
- *
- * Copyright (c) 2009 by Tilman Schmidt <tilman@imap.cc>.
- *
- * =====================================================================
- * =====================================================================
- */
-
-#include <linux/export.h>
-#include "gigaset.h"
-
-void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb)
-{
-}
-EXPORT_SYMBOL_GPL(gigaset_skb_sent);
-
-void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb)
-{
-}
-EXPORT_SYMBOL_GPL(gigaset_skb_rcvd);
-
-void gigaset_isdn_rcv_err(struct bc_state *bcs)
-{
-}
-EXPORT_SYMBOL_GPL(gigaset_isdn_rcv_err);
-
-int gigaset_isdn_icall(struct at_state_t *at_state)
-{
- return ICALL_IGNORE;
-}
-
-void gigaset_isdn_connD(struct bc_state *bcs)
-{
-}
-
-void gigaset_isdn_hupD(struct bc_state *bcs)
-{
-}
-
-void gigaset_isdn_connB(struct bc_state *bcs)
-{
-}
-
-void gigaset_isdn_hupB(struct bc_state *bcs)
-{
-}
-
-void gigaset_isdn_start(struct cardstate *cs)
-{
-}
-
-void gigaset_isdn_stop(struct cardstate *cs)
-{
-}
-
-int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
-{
- return 0;
-}
-
-void gigaset_isdn_unregdev(struct cardstate *cs)
-{
-}
-
-void gigaset_isdn_regdrv(void)
-{
- pr_info("no ISDN subsystem interface\n");
-}
-
-void gigaset_isdn_unregdrv(void)
-{
-}
diff --git a/drivers/staging/isdn/gigaset/ev-layer.c b/drivers/staging/isdn/gigaset/ev-layer.c
deleted file mode 100644
index f8bb1869c600..000000000000
--- a/drivers/staging/isdn/gigaset/ev-layer.c
+++ /dev/null
@@ -1,1910 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Stuff used by all variants of the driver
- *
- * Copyright (c) 2001 by Stefan Eilers,
- * Hansjoerg Lipp <hjlipp@web.de>,
- * Tilman Schmidt <tilman@imap.cc>.
- *
- * =====================================================================
- * =====================================================================
- */
-
-#include <linux/export.h>
-#include "gigaset.h"
-
-/* ========================================================== */
-/* bit masks for pending commands */
-#define PC_DIAL 0x001
-#define PC_HUP 0x002
-#define PC_INIT 0x004
-#define PC_DLE0 0x008
-#define PC_DLE1 0x010
-#define PC_SHUTDOWN 0x020
-#define PC_ACCEPT 0x040
-#define PC_CID 0x080
-#define PC_NOCID 0x100
-#define PC_CIDMODE 0x200
-#define PC_UMMODE 0x400
-
-/* types of modem responses */
-#define RT_NOTHING 0
-#define RT_ZSAU 1
-#define RT_RING 2
-#define RT_NUMBER 3
-#define RT_STRING 4
-#define RT_ZCAU 6
-
-/* Possible ASCII responses */
-#define RSP_OK 0
-#define RSP_ERROR 1
-#define RSP_ZGCI 3
-#define RSP_RING 4
-#define RSP_ZVLS 5
-#define RSP_ZCAU 6
-
-/* responses with values to store in at_state */
-/* - numeric */
-#define RSP_VAR 100
-#define RSP_ZSAU (RSP_VAR + VAR_ZSAU)
-#define RSP_ZDLE (RSP_VAR + VAR_ZDLE)
-#define RSP_ZCTP (RSP_VAR + VAR_ZCTP)
-/* - string */
-#define RSP_STR (RSP_VAR + VAR_NUM)
-#define RSP_NMBR (RSP_STR + STR_NMBR)
-#define RSP_ZCPN (RSP_STR + STR_ZCPN)
-#define RSP_ZCON (RSP_STR + STR_ZCON)
-#define RSP_ZBC (RSP_STR + STR_ZBC)
-#define RSP_ZHLC (RSP_STR + STR_ZHLC)
-
-#define RSP_WRONG_CID -2 /* unknown cid in cmd */
-#define RSP_INVAL -6 /* invalid response */
-#define RSP_NODEV -9 /* device not connected */
-
-#define RSP_NONE -19
-#define RSP_STRING -20
-#define RSP_NULL -21
-#define RSP_INIT -27
-#define RSP_ANY -26
-#define RSP_LAST -28
-
-/* actions for process_response */
-#define ACT_NOTHING 0
-#define ACT_SETDLE1 1
-#define ACT_SETDLE0 2
-#define ACT_FAILINIT 3
-#define ACT_HUPMODEM 4
-#define ACT_CONFIGMODE 5
-#define ACT_INIT 6
-#define ACT_DLE0 7
-#define ACT_DLE1 8
-#define ACT_FAILDLE0 9
-#define ACT_FAILDLE1 10
-#define ACT_RING 11
-#define ACT_CID 12
-#define ACT_FAILCID 13
-#define ACT_SDOWN 14
-#define ACT_FAILSDOWN 15
-#define ACT_DEBUG 16
-#define ACT_WARN 17
-#define ACT_DIALING 18
-#define ACT_ABORTDIAL 19
-#define ACT_DISCONNECT 20
-#define ACT_CONNECT 21
-#define ACT_REMOTEREJECT 22
-#define ACT_CONNTIMEOUT 23
-#define ACT_REMOTEHUP 24
-#define ACT_ABORTHUP 25
-#define ACT_ICALL 26
-#define ACT_ACCEPTED 27
-#define ACT_ABORTACCEPT 28
-#define ACT_TIMEOUT 29
-#define ACT_GETSTRING 30
-#define ACT_SETVER 31
-#define ACT_FAILVER 32
-#define ACT_GOTVER 33
-#define ACT_TEST 34
-#define ACT_ERROR 35
-#define ACT_ABORTCID 36
-#define ACT_ZCAU 37
-#define ACT_NOTIFY_BC_DOWN 38
-#define ACT_NOTIFY_BC_UP 39
-#define ACT_DIAL 40
-#define ACT_ACCEPT 41
-#define ACT_HUP 43
-#define ACT_IF_LOCK 44
-#define ACT_START 45
-#define ACT_STOP 46
-#define ACT_FAKEDLE0 47
-#define ACT_FAKEHUP 48
-#define ACT_FAKESDOWN 49
-#define ACT_SHUTDOWN 50
-#define ACT_PROC_CIDMODE 51
-#define ACT_UMODESET 52
-#define ACT_FAILUMODE 53
-#define ACT_CMODESET 54
-#define ACT_FAILCMODE 55
-#define ACT_IF_VER 56
-#define ACT_CMD 100
-
-/* at command sequences */
-#define SEQ_NONE 0
-#define SEQ_INIT 100
-#define SEQ_DLE0 200
-#define SEQ_DLE1 250
-#define SEQ_CID 300
-#define SEQ_NOCID 350
-#define SEQ_HUP 400
-#define SEQ_DIAL 600
-#define SEQ_ACCEPT 720
-#define SEQ_SHUTDOWN 500
-#define SEQ_CIDMODE 10
-#define SEQ_UMMODE 11
-
-
-/* 100: init, 200: dle0, 250:dle1, 300: get cid (dial), 350: "hup" (no cid),
- * 400: hup, 500: reset, 600: dial, 700: ring */
-struct reply_t gigaset_tab_nocid[] =
-{
-/* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout,
- * action, command */
-
-/* initialize device, set cid mode if possible */
- {RSP_INIT, -1, -1, SEQ_INIT, 100, 1, {ACT_TIMEOUT} },
-
- {EV_TIMEOUT, 100, 100, -1, 101, 3, {0}, "Z\r"},
- {RSP_OK, 101, 103, -1, 120, 5, {ACT_GETSTRING},
- "+GMR\r"},
-
- {EV_TIMEOUT, 101, 101, -1, 102, 5, {0}, "Z\r"},
- {RSP_ERROR, 101, 101, -1, 102, 5, {0}, "Z\r"},
-
- {EV_TIMEOUT, 102, 102, -1, 108, 5, {ACT_SETDLE1},
- "^SDLE=0\r"},
- {RSP_OK, 108, 108, -1, 104, -1},
- {RSP_ZDLE, 104, 104, 0, 103, 5, {0}, "Z\r"},
- {EV_TIMEOUT, 104, 104, -1, 0, 0, {ACT_FAILINIT} },
- {RSP_ERROR, 108, 108, -1, 0, 0, {ACT_FAILINIT} },
-
- {EV_TIMEOUT, 108, 108, -1, 105, 2, {ACT_SETDLE0,
- ACT_HUPMODEM,
- ACT_TIMEOUT} },
- {EV_TIMEOUT, 105, 105, -1, 103, 5, {0}, "Z\r"},
-
- {RSP_ERROR, 102, 102, -1, 107, 5, {0}, "^GETPRE\r"},
- {RSP_OK, 107, 107, -1, 0, 0, {ACT_CONFIGMODE} },
- {RSP_ERROR, 107, 107, -1, 0, 0, {ACT_FAILINIT} },
- {EV_TIMEOUT, 107, 107, -1, 0, 0, {ACT_FAILINIT} },
-
- {RSP_ERROR, 103, 103, -1, 0, 0, {ACT_FAILINIT} },
- {EV_TIMEOUT, 103, 103, -1, 0, 0, {ACT_FAILINIT} },
-
- {RSP_STRING, 120, 120, -1, 121, -1, {ACT_SETVER} },
-
- {EV_TIMEOUT, 120, 121, -1, 0, 0, {ACT_FAILVER,
- ACT_INIT} },
- {RSP_ERROR, 120, 121, -1, 0, 0, {ACT_FAILVER,
- ACT_INIT} },
- {RSP_OK, 121, 121, -1, 0, 0, {ACT_GOTVER,
- ACT_INIT} },
- {RSP_NONE, 121, 121, -1, 120, 0, {ACT_GETSTRING} },
-
-/* leave dle mode */
- {RSP_INIT, 0, 0, SEQ_DLE0, 201, 5, {0}, "^SDLE=0\r"},
- {RSP_OK, 201, 201, -1, 202, -1},
- {RSP_ZDLE, 202, 202, 0, 0, 0, {ACT_DLE0} },
- {RSP_NODEV, 200, 249, -1, 0, 0, {ACT_FAKEDLE0} },
- {RSP_ERROR, 200, 249, -1, 0, 0, {ACT_FAILDLE0} },
- {EV_TIMEOUT, 200, 249, -1, 0, 0, {ACT_FAILDLE0} },
-
-/* enter dle mode */
- {RSP_INIT, 0, 0, SEQ_DLE1, 251, 5, {0}, "^SDLE=1\r"},
- {RSP_OK, 251, 251, -1, 252, -1},
- {RSP_ZDLE, 252, 252, 1, 0, 0, {ACT_DLE1} },
- {RSP_ERROR, 250, 299, -1, 0, 0, {ACT_FAILDLE1} },
- {EV_TIMEOUT, 250, 299, -1, 0, 0, {ACT_FAILDLE1} },
-
-/* incoming call */
- {RSP_RING, -1, -1, -1, -1, -1, {ACT_RING} },
-
-/* get cid */
- {RSP_INIT, 0, 0, SEQ_CID, 301, 5, {0}, "^SGCI?\r"},
- {RSP_OK, 301, 301, -1, 302, -1},
- {RSP_ZGCI, 302, 302, -1, 0, 0, {ACT_CID} },
- {RSP_ERROR, 301, 349, -1, 0, 0, {ACT_FAILCID} },
- {EV_TIMEOUT, 301, 349, -1, 0, 0, {ACT_FAILCID} },
-
-/* enter cid mode */
- {RSP_INIT, 0, 0, SEQ_CIDMODE, 150, 5, {0}, "^SGCI=1\r"},
- {RSP_OK, 150, 150, -1, 0, 0, {ACT_CMODESET} },
- {RSP_ERROR, 150, 150, -1, 0, 0, {ACT_FAILCMODE} },
- {EV_TIMEOUT, 150, 150, -1, 0, 0, {ACT_FAILCMODE} },
-
-/* leave cid mode */
- {RSP_INIT, 0, 0, SEQ_UMMODE, 160, 5, {0}, "Z\r"},
- {RSP_OK, 160, 160, -1, 0, 0, {ACT_UMODESET} },
- {RSP_ERROR, 160, 160, -1, 0, 0, {ACT_FAILUMODE} },
- {EV_TIMEOUT, 160, 160, -1, 0, 0, {ACT_FAILUMODE} },
-
-/* abort getting cid */
- {RSP_INIT, 0, 0, SEQ_NOCID, 0, 0, {ACT_ABORTCID} },
-
-/* reset */
- {RSP_INIT, 0, 0, SEQ_SHUTDOWN, 504, 5, {0}, "Z\r"},
- {RSP_OK, 504, 504, -1, 0, 0, {ACT_SDOWN} },
- {RSP_ERROR, 501, 599, -1, 0, 0, {ACT_FAILSDOWN} },
- {EV_TIMEOUT, 501, 599, -1, 0, 0, {ACT_FAILSDOWN} },
- {RSP_NODEV, 501, 599, -1, 0, 0, {ACT_FAKESDOWN} },
-
- {EV_PROC_CIDMODE, -1, -1, -1, -1, -1, {ACT_PROC_CIDMODE} },
- {EV_IF_LOCK, -1, -1, -1, -1, -1, {ACT_IF_LOCK} },
- {EV_IF_VER, -1, -1, -1, -1, -1, {ACT_IF_VER} },
- {EV_START, -1, -1, -1, -1, -1, {ACT_START} },
- {EV_STOP, -1, -1, -1, -1, -1, {ACT_STOP} },
- {EV_SHUTDOWN, -1, -1, -1, -1, -1, {ACT_SHUTDOWN} },
-
-/* misc. */
- {RSP_ERROR, -1, -1, -1, -1, -1, {ACT_ERROR} },
- {RSP_ZCAU, -1, -1, -1, -1, -1, {ACT_ZCAU} },
- {RSP_NONE, -1, -1, -1, -1, -1, {ACT_DEBUG} },
- {RSP_ANY, -1, -1, -1, -1, -1, {ACT_WARN} },
- {RSP_LAST}
-};
-
-/* 600: start dialing, 650: dial in progress, 800: connection is up, 700: ring,
- * 400: hup, 750: accepted icall */
-struct reply_t gigaset_tab_cid[] =
-{
-/* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout,
- * action, command */
-
-/* dial */
- {EV_DIAL, -1, -1, -1, -1, -1, {ACT_DIAL} },
- {RSP_INIT, 0, 0, SEQ_DIAL, 601, 5, {ACT_CMD + AT_BC} },
- {RSP_OK, 601, 601, -1, 603, 5, {ACT_CMD + AT_PROTO} },
- {RSP_OK, 603, 603, -1, 604, 5, {ACT_CMD + AT_TYPE} },
- {RSP_OK, 604, 604, -1, 605, 5, {ACT_CMD + AT_MSN} },
- {RSP_NULL, 605, 605, -1, 606, 5, {ACT_CMD + AT_CLIP} },
- {RSP_OK, 605, 605, -1, 606, 5, {ACT_CMD + AT_CLIP} },
- {RSP_NULL, 606, 606, -1, 607, 5, {ACT_CMD + AT_ISO} },
- {RSP_OK, 606, 606, -1, 607, 5, {ACT_CMD + AT_ISO} },
- {RSP_OK, 607, 607, -1, 608, 5, {0}, "+VLS=17\r"},
- {RSP_OK, 608, 608, -1, 609, -1},
- {RSP_ZSAU, 609, 609, ZSAU_PROCEEDING, 610, 5, {ACT_CMD + AT_DIAL} },
- {RSP_OK, 610, 610, -1, 650, 0, {ACT_DIALING} },
-
- {RSP_ERROR, 601, 610, -1, 0, 0, {ACT_ABORTDIAL} },
- {EV_TIMEOUT, 601, 610, -1, 0, 0, {ACT_ABORTDIAL} },
-
-/* optional dialing responses */
- {EV_BC_OPEN, 650, 650, -1, 651, -1},
- {RSP_ZVLS, 609, 651, 17, -1, -1, {ACT_DEBUG} },
- {RSP_ZCTP, 610, 651, -1, -1, -1, {ACT_DEBUG} },
- {RSP_ZCPN, 610, 651, -1, -1, -1, {ACT_DEBUG} },
- {RSP_ZSAU, 650, 651, ZSAU_CALL_DELIVERED, -1, -1, {ACT_DEBUG} },
-
-/* connect */
- {RSP_ZSAU, 650, 650, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT} },
- {RSP_ZSAU, 651, 651, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT,
- ACT_NOTIFY_BC_UP} },
- {RSP_ZSAU, 750, 750, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT} },
- {RSP_ZSAU, 751, 751, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT,
- ACT_NOTIFY_BC_UP} },
- {EV_BC_OPEN, 800, 800, -1, 800, -1, {ACT_NOTIFY_BC_UP} },
-
-/* remote hangup */
- {RSP_ZSAU, 650, 651, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEREJECT} },
- {RSP_ZSAU, 750, 751, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP} },
- {RSP_ZSAU, 800, 800, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP} },
-
-/* hangup */
- {EV_HUP, -1, -1, -1, -1, -1, {ACT_HUP} },
- {RSP_INIT, -1, -1, SEQ_HUP, 401, 5, {0}, "+VLS=0\r"},
- {RSP_OK, 401, 401, -1, 402, 5},
- {RSP_ZVLS, 402, 402, 0, 403, 5},
- {RSP_ZSAU, 403, 403, ZSAU_DISCONNECT_REQ, -1, -1, {ACT_DEBUG} },
- {RSP_ZSAU, 403, 403, ZSAU_NULL, 0, 0, {ACT_DISCONNECT} },
- {RSP_NODEV, 401, 403, -1, 0, 0, {ACT_FAKEHUP} },
- {RSP_ERROR, 401, 401, -1, 0, 0, {ACT_ABORTHUP} },
- {EV_TIMEOUT, 401, 403, -1, 0, 0, {ACT_ABORTHUP} },
-
- {EV_BC_CLOSED, 0, 0, -1, 0, -1, {ACT_NOTIFY_BC_DOWN} },
-
-/* ring */
- {RSP_ZBC, 700, 700, -1, -1, -1, {0} },
- {RSP_ZHLC, 700, 700, -1, -1, -1, {0} },
- {RSP_NMBR, 700, 700, -1, -1, -1, {0} },
- {RSP_ZCPN, 700, 700, -1, -1, -1, {0} },
- {RSP_ZCTP, 700, 700, -1, -1, -1, {0} },
- {EV_TIMEOUT, 700, 700, -1, 720, 720, {ACT_ICALL} },
- {EV_BC_CLOSED, 720, 720, -1, 0, -1, {ACT_NOTIFY_BC_DOWN} },
-
-/*accept icall*/
- {EV_ACCEPT, -1, -1, -1, -1, -1, {ACT_ACCEPT} },
- {RSP_INIT, 720, 720, SEQ_ACCEPT, 721, 5, {ACT_CMD + AT_PROTO} },
- {RSP_OK, 721, 721, -1, 722, 5, {ACT_CMD + AT_ISO} },
- {RSP_OK, 722, 722, -1, 723, 5, {0}, "+VLS=17\r"},
- {RSP_OK, 723, 723, -1, 724, 5, {0} },
- {RSP_ZVLS, 724, 724, 17, 750, 50, {ACT_ACCEPTED} },
- {RSP_ERROR, 721, 729, -1, 0, 0, {ACT_ABORTACCEPT} },
- {EV_TIMEOUT, 721, 729, -1, 0, 0, {ACT_ABORTACCEPT} },
- {RSP_ZSAU, 700, 729, ZSAU_NULL, 0, 0, {ACT_ABORTACCEPT} },
- {RSP_ZSAU, 700, 729, ZSAU_ACTIVE, 0, 0, {ACT_ABORTACCEPT} },
- {RSP_ZSAU, 700, 729, ZSAU_DISCONNECT_IND, 0, 0, {ACT_ABORTACCEPT} },
-
- {EV_BC_OPEN, 750, 750, -1, 751, -1},
- {EV_TIMEOUT, 750, 751, -1, 0, 0, {ACT_CONNTIMEOUT} },
-
-/* B channel closed (general case) */
- {EV_BC_CLOSED, -1, -1, -1, -1, -1, {ACT_NOTIFY_BC_DOWN} },
-
-/* misc. */
- {RSP_ZCON, -1, -1, -1, -1, -1, {ACT_DEBUG} },
- {RSP_ZCAU, -1, -1, -1, -1, -1, {ACT_ZCAU} },
- {RSP_NONE, -1, -1, -1, -1, -1, {ACT_DEBUG} },
- {RSP_ANY, -1, -1, -1, -1, -1, {ACT_WARN} },
- {RSP_LAST}
-};
-
-
-static const struct resp_type_t {
- char *response;
- int resp_code;
- int type;
-}
-resp_type[] =
-{
- {"OK", RSP_OK, RT_NOTHING},
- {"ERROR", RSP_ERROR, RT_NOTHING},
- {"ZSAU", RSP_ZSAU, RT_ZSAU},
- {"ZCAU", RSP_ZCAU, RT_ZCAU},
- {"RING", RSP_RING, RT_RING},
- {"ZGCI", RSP_ZGCI, RT_NUMBER},
- {"ZVLS", RSP_ZVLS, RT_NUMBER},
- {"ZCTP", RSP_ZCTP, RT_NUMBER},
- {"ZDLE", RSP_ZDLE, RT_NUMBER},
- {"ZHLC", RSP_ZHLC, RT_STRING},
- {"ZBC", RSP_ZBC, RT_STRING},
- {"NMBR", RSP_NMBR, RT_STRING},
- {"ZCPN", RSP_ZCPN, RT_STRING},
- {"ZCON", RSP_ZCON, RT_STRING},
- {NULL, 0, 0}
-};
-
-static const struct zsau_resp_t {
- char *str;
- int code;
-}
-zsau_resp[] =
-{
- {"OUTGOING_CALL_PROCEEDING", ZSAU_PROCEEDING},
- {"CALL_DELIVERED", ZSAU_CALL_DELIVERED},
- {"ACTIVE", ZSAU_ACTIVE},
- {"DISCONNECT_IND", ZSAU_DISCONNECT_IND},
- {"NULL", ZSAU_NULL},
- {"DISCONNECT_REQ", ZSAU_DISCONNECT_REQ},
- {NULL, ZSAU_UNKNOWN}
-};
-
-/* check for and remove fixed string prefix
- * If s starts with prefix terminated by a non-alphanumeric character,
- * return pointer to the first character after that, otherwise return NULL.
- */
-static char *skip_prefix(char *s, const char *prefix)
-{
- while (*prefix)
- if (*s++ != *prefix++)
- return NULL;
- if (isalnum(*s))
- return NULL;
- return s;
-}
-
-/* queue event with CID */
-static void add_cid_event(struct cardstate *cs, int cid, int type,
- void *ptr, int parameter)
-{
- unsigned long flags;
- unsigned next, tail;
- struct event_t *event;
-
- gig_dbg(DEBUG_EVENT, "queueing event %d for cid %d", type, cid);
-
- spin_lock_irqsave(&cs->ev_lock, flags);
-
- tail = cs->ev_tail;
- next = (tail + 1) % MAX_EVENTS;
- if (unlikely(next == cs->ev_head)) {
- dev_err(cs->dev, "event queue full\n");
- kfree(ptr);
- } else {
- event = cs->events + tail;
- event->type = type;
- event->cid = cid;
- event->ptr = ptr;
- event->arg = NULL;
- event->parameter = parameter;
- event->at_state = NULL;
- cs->ev_tail = next;
- }
-
- spin_unlock_irqrestore(&cs->ev_lock, flags);
-}
-
-/**
- * gigaset_handle_modem_response() - process received modem response
- * @cs: device descriptor structure.
- *
- * Called by asyncdata/isocdata if a block of data received from the
- * device must be processed as a modem command response. The data is
- * already in the cs structure.
- */
-void gigaset_handle_modem_response(struct cardstate *cs)
-{
- char *eoc, *psep, *ptr;
- const struct resp_type_t *rt;
- const struct zsau_resp_t *zr;
- int cid, parameter;
- u8 type, value;
-
- if (!cs->cbytes) {
- /* ignore additional LFs/CRs (M10x config mode or cx100) */
- gig_dbg(DEBUG_MCMD, "skipped EOL [%02X]", cs->respdata[0]);
- return;
- }
- cs->respdata[cs->cbytes] = 0;
-
- if (cs->at_state.getstring) {
- /* state machine wants next line verbatim */
- cs->at_state.getstring = 0;
- ptr = kstrdup(cs->respdata, GFP_ATOMIC);
- gig_dbg(DEBUG_EVENT, "string==%s", ptr ? ptr : "NULL");
- add_cid_event(cs, 0, RSP_STRING, ptr, 0);
- return;
- }
-
- /* look up response type */
- for (rt = resp_type; rt->response; ++rt) {
- eoc = skip_prefix(cs->respdata, rt->response);
- if (eoc)
- break;
- }
- if (!rt->response) {
- add_cid_event(cs, 0, RSP_NONE, NULL, 0);
- gig_dbg(DEBUG_EVENT, "unknown modem response: '%s'\n",
- cs->respdata);
- return;
- }
-
- /* check for CID */
- psep = strrchr(cs->respdata, ';');
- if (psep &&
- !kstrtoint(psep + 1, 10, &cid) &&
- cid >= 1 && cid <= 65535) {
- /* valid CID: chop it off */
- *psep = 0;
- } else {
- /* no valid CID: leave unchanged */
- cid = 0;
- }
-
- gig_dbg(DEBUG_EVENT, "CMD received: %s", cs->respdata);
- if (cid)
- gig_dbg(DEBUG_EVENT, "CID: %d", cid);
-
- switch (rt->type) {
- case RT_NOTHING:
- /* check parameter separator */
- if (*eoc)
- goto bad_param; /* extra parameter */
-
- add_cid_event(cs, cid, rt->resp_code, NULL, 0);
- break;
-
- case RT_RING:
- /* check parameter separator */
- if (!*eoc)
- eoc = NULL; /* no parameter */
- else if (*eoc++ != ',')
- goto bad_param;
-
- add_cid_event(cs, 0, rt->resp_code, NULL, cid);
-
- /* process parameters as individual responses */
- while (eoc) {
- /* look up parameter type */
- psep = NULL;
- for (rt = resp_type; rt->response; ++rt) {
- psep = skip_prefix(eoc, rt->response);
- if (psep)
- break;
- }
-
- /* all legal parameters are of type RT_STRING */
- if (!psep || rt->type != RT_STRING) {
- dev_warn(cs->dev,
- "illegal RING parameter: '%s'\n",
- eoc);
- return;
- }
-
- /* skip parameter value separator */
- if (*psep++ != '=')
- goto bad_param;
-
- /* look up end of parameter */
- eoc = strchr(psep, ',');
- if (eoc)
- *eoc++ = 0;
-
- /* retrieve parameter value */
- ptr = kstrdup(psep, GFP_ATOMIC);
-
- /* queue event */
- add_cid_event(cs, cid, rt->resp_code, ptr, 0);
- }
- break;
-
- case RT_ZSAU:
- /* check parameter separator */
- if (!*eoc) {
- /* no parameter */
- add_cid_event(cs, cid, rt->resp_code, NULL, ZSAU_NONE);
- break;
- }
- if (*eoc++ != '=')
- goto bad_param;
-
- /* look up parameter value */
- for (zr = zsau_resp; zr->str; ++zr)
- if (!strcmp(eoc, zr->str))
- break;
- if (!zr->str)
- goto bad_param;
-
- add_cid_event(cs, cid, rt->resp_code, NULL, zr->code);
- break;
-
- case RT_STRING:
- /* check parameter separator */
- if (*eoc++ != '=')
- goto bad_param;
-
- /* retrieve parameter value */
- ptr = kstrdup(eoc, GFP_ATOMIC);
-
- /* queue event */
- add_cid_event(cs, cid, rt->resp_code, ptr, 0);
- break;
-
- case RT_ZCAU:
- /* check parameter separators */
- if (*eoc++ != '=')
- goto bad_param;
- psep = strchr(eoc, ',');
- if (!psep)
- goto bad_param;
- *psep++ = 0;
-
- /* decode parameter values */
- if (kstrtou8(eoc, 16, &type) || kstrtou8(psep, 16, &value)) {
- *--psep = ',';
- goto bad_param;
- }
- parameter = (type << 8) | value;
-
- add_cid_event(cs, cid, rt->resp_code, NULL, parameter);
- break;
-
- case RT_NUMBER:
- /* check parameter separator */
- if (*eoc++ != '=')
- goto bad_param;
-
- /* decode parameter value */
- if (kstrtoint(eoc, 10, &parameter))
- goto bad_param;
-
- /* special case ZDLE: set flag before queueing event */
- if (rt->resp_code == RSP_ZDLE)
- cs->dle = parameter;
-
- add_cid_event(cs, cid, rt->resp_code, NULL, parameter);
- break;
-
-bad_param:
- /* parameter unexpected, incomplete or malformed */
- dev_warn(cs->dev, "bad parameter in response '%s'\n",
- cs->respdata);
- add_cid_event(cs, cid, rt->resp_code, NULL, -1);
- break;
-
- default:
- dev_err(cs->dev, "%s: internal error on '%s'\n",
- __func__, cs->respdata);
- }
-}
-EXPORT_SYMBOL_GPL(gigaset_handle_modem_response);
-
-/* disconnect_nobc
- * process closing of connection associated with given AT state structure
- * without B channel
- */
-static void disconnect_nobc(struct at_state_t **at_state_p,
- struct cardstate *cs)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- ++(*at_state_p)->seq_index;
-
- /* revert to selected idle mode */
- if (!cs->cidmode) {
- cs->at_state.pending_commands |= PC_UMMODE;
- gig_dbg(DEBUG_EVENT, "Scheduling PC_UMMODE");
- cs->commands_pending = 1;
- }
-
- /* check for and deallocate temporary AT state */
- if (!list_empty(&(*at_state_p)->list)) {
- list_del(&(*at_state_p)->list);
- kfree(*at_state_p);
- *at_state_p = NULL;
- }
-
- spin_unlock_irqrestore(&cs->lock, flags);
-}
-
-/* disconnect_bc
- * process closing of connection associated with given AT state structure
- * and B channel
- */
-static void disconnect_bc(struct at_state_t *at_state,
- struct cardstate *cs, struct bc_state *bcs)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- ++at_state->seq_index;
-
- /* revert to selected idle mode */
- if (!cs->cidmode) {
- cs->at_state.pending_commands |= PC_UMMODE;
- gig_dbg(DEBUG_EVENT, "Scheduling PC_UMMODE");
- cs->commands_pending = 1;
- }
- spin_unlock_irqrestore(&cs->lock, flags);
-
- /* invoke hardware specific handler */
- cs->ops->close_bchannel(bcs);
-
- /* notify LL */
- if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) {
- bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL);
- gigaset_isdn_hupD(bcs);
- }
-}
-
-/* get_free_channel
- * get a free AT state structure: either one of those associated with the
- * B channels of the Gigaset device, or if none of those is available,
- * a newly allocated one with bcs=NULL
- * The structure should be freed by calling disconnect_nobc() after use.
- */
-static inline struct at_state_t *get_free_channel(struct cardstate *cs,
- int cid)
-/* cids: >0: siemens-cid
- * 0: without cid
- * -1: no cid assigned yet
- */
-{
- unsigned long flags;
- int i;
- struct at_state_t *ret;
-
- for (i = 0; i < cs->channels; ++i)
- if (gigaset_get_channel(cs->bcs + i) >= 0) {
- ret = &cs->bcs[i].at_state;
- ret->cid = cid;
- return ret;
- }
-
- spin_lock_irqsave(&cs->lock, flags);
- ret = kmalloc(sizeof(struct at_state_t), GFP_ATOMIC);
- if (ret) {
- gigaset_at_init(ret, NULL, cs, cid);
- list_add(&ret->list, &cs->temp_at_states);
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- return ret;
-}
-
-static void init_failed(struct cardstate *cs, int mode)
-{
- int i;
- struct at_state_t *at_state;
-
- cs->at_state.pending_commands &= ~PC_INIT;
- cs->mode = mode;
- cs->mstate = MS_UNINITIALIZED;
- gigaset_free_channels(cs);
- for (i = 0; i < cs->channels; ++i) {
- at_state = &cs->bcs[i].at_state;
- if (at_state->pending_commands & PC_CID) {
- at_state->pending_commands &= ~PC_CID;
- at_state->pending_commands |= PC_NOCID;
- cs->commands_pending = 1;
- }
- }
-}
-
-static void schedule_init(struct cardstate *cs, int state)
-{
- if (cs->at_state.pending_commands & PC_INIT) {
- gig_dbg(DEBUG_EVENT, "not scheduling PC_INIT again");
- return;
- }
- cs->mstate = state;
- cs->mode = M_UNKNOWN;
- gigaset_block_channels(cs);
- cs->at_state.pending_commands |= PC_INIT;
- gig_dbg(DEBUG_EVENT, "Scheduling PC_INIT");
- cs->commands_pending = 1;
-}
-
-/* send an AT command
- * adding the "AT" prefix, cid and DLE encapsulation as appropriate
- */
-static void send_command(struct cardstate *cs, const char *cmd,
- struct at_state_t *at_state)
-{
- int cid = at_state->cid;
- struct cmdbuf_t *cb;
- size_t buflen;
-
- buflen = strlen(cmd) + 12; /* DLE ( A T 1 2 3 4 5 <cmd> DLE ) \0 */
- cb = kmalloc(sizeof(struct cmdbuf_t) + buflen, GFP_ATOMIC);
- if (!cb) {
- dev_err(cs->dev, "%s: out of memory\n", __func__);
- return;
- }
- if (cid > 0 && cid <= 65535)
- cb->len = snprintf(cb->buf, buflen,
- cs->dle ? "\020(AT%d%s\020)" : "AT%d%s",
- cid, cmd);
- else
- cb->len = snprintf(cb->buf, buflen,
- cs->dle ? "\020(AT%s\020)" : "AT%s",
- cmd);
- cb->offset = 0;
- cb->next = NULL;
- cb->wake_tasklet = NULL;
- cs->ops->write_cmd(cs, cb);
-}
-
-static struct at_state_t *at_state_from_cid(struct cardstate *cs, int cid)
-{
- struct at_state_t *at_state;
- int i;
- unsigned long flags;
-
- if (cid == 0)
- return &cs->at_state;
-
- for (i = 0; i < cs->channels; ++i)
- if (cid == cs->bcs[i].at_state.cid)
- return &cs->bcs[i].at_state;
-
- spin_lock_irqsave(&cs->lock, flags);
-
- list_for_each_entry(at_state, &cs->temp_at_states, list)
- if (cid == at_state->cid) {
- spin_unlock_irqrestore(&cs->lock, flags);
- return at_state;
- }
-
- spin_unlock_irqrestore(&cs->lock, flags);
-
- return NULL;
-}
-
-static void bchannel_down(struct bc_state *bcs)
-{
- if (bcs->chstate & CHS_B_UP) {
- bcs->chstate &= ~CHS_B_UP;
- gigaset_isdn_hupB(bcs);
- }
-
- if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) {
- bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL);
- gigaset_isdn_hupD(bcs);
- }
-
- gigaset_free_channel(bcs);
-
- gigaset_bcs_reinit(bcs);
-}
-
-static void bchannel_up(struct bc_state *bcs)
-{
- if (bcs->chstate & CHS_B_UP) {
- dev_notice(bcs->cs->dev, "%s: B channel already up\n",
- __func__);
- return;
- }
-
- bcs->chstate |= CHS_B_UP;
- gigaset_isdn_connB(bcs);
-}
-
-static void start_dial(struct at_state_t *at_state, void *data,
- unsigned seq_index)
-{
- struct bc_state *bcs = at_state->bcs;
- struct cardstate *cs = at_state->cs;
- char **commands = data;
- unsigned long flags;
- int i;
-
- bcs->chstate |= CHS_NOTIFY_LL;
-
- spin_lock_irqsave(&cs->lock, flags);
- if (at_state->seq_index != seq_index) {
- spin_unlock_irqrestore(&cs->lock, flags);
- goto error;
- }
- spin_unlock_irqrestore(&cs->lock, flags);
-
- for (i = 0; i < AT_NUM; ++i) {
- kfree(bcs->commands[i]);
- bcs->commands[i] = commands[i];
- }
-
- at_state->pending_commands |= PC_CID;
- gig_dbg(DEBUG_EVENT, "Scheduling PC_CID");
- cs->commands_pending = 1;
- return;
-
-error:
- for (i = 0; i < AT_NUM; ++i) {
- kfree(commands[i]);
- commands[i] = NULL;
- }
- at_state->pending_commands |= PC_NOCID;
- gig_dbg(DEBUG_EVENT, "Scheduling PC_NOCID");
- cs->commands_pending = 1;
- return;
-}
-
-static void start_accept(struct at_state_t *at_state)
-{
- struct cardstate *cs = at_state->cs;
- struct bc_state *bcs = at_state->bcs;
- int i;
-
- for (i = 0; i < AT_NUM; ++i) {
- kfree(bcs->commands[i]);
- bcs->commands[i] = NULL;
- }
-
- bcs->commands[AT_PROTO] = kmalloc(9, GFP_ATOMIC);
- bcs->commands[AT_ISO] = kmalloc(9, GFP_ATOMIC);
- if (!bcs->commands[AT_PROTO] || !bcs->commands[AT_ISO]) {
- dev_err(at_state->cs->dev, "out of memory\n");
- /* error reset */
- at_state->pending_commands |= PC_HUP;
- gig_dbg(DEBUG_EVENT, "Scheduling PC_HUP");
- cs->commands_pending = 1;
- return;
- }
-
- snprintf(bcs->commands[AT_PROTO], 9, "^SBPR=%u\r", bcs->proto2);
- snprintf(bcs->commands[AT_ISO], 9, "^SISO=%u\r", bcs->channel + 1);
-
- at_state->pending_commands |= PC_ACCEPT;
- gig_dbg(DEBUG_EVENT, "Scheduling PC_ACCEPT");
- cs->commands_pending = 1;
-}
-
-static void do_start(struct cardstate *cs)
-{
- gigaset_free_channels(cs);
-
- if (cs->mstate != MS_LOCKED)
- schedule_init(cs, MS_INIT);
-
- cs->isdn_up = 1;
- gigaset_isdn_start(cs);
-
- cs->waiting = 0;
- wake_up(&cs->waitqueue);
-}
-
-static void finish_shutdown(struct cardstate *cs)
-{
- if (cs->mstate != MS_LOCKED) {
- cs->mstate = MS_UNINITIALIZED;
- cs->mode = M_UNKNOWN;
- }
-
- /* Tell the LL that the device is not available .. */
- if (cs->isdn_up) {
- cs->isdn_up = 0;
- gigaset_isdn_stop(cs);
- }
-
- /* The rest is done by cleanup_cs() in process context. */
-
- cs->cmd_result = -ENODEV;
- cs->waiting = 0;
- wake_up(&cs->waitqueue);
-}
-
-static void do_shutdown(struct cardstate *cs)
-{
- gigaset_block_channels(cs);
-
- if (cs->mstate == MS_READY) {
- cs->mstate = MS_SHUTDOWN;
- cs->at_state.pending_commands |= PC_SHUTDOWN;
- gig_dbg(DEBUG_EVENT, "Scheduling PC_SHUTDOWN");
- cs->commands_pending = 1;
- } else
- finish_shutdown(cs);
-}
-
-static void do_stop(struct cardstate *cs)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- cs->connected = 0;
- spin_unlock_irqrestore(&cs->lock, flags);
-
- do_shutdown(cs);
-}
-
-/* Entering cid mode or getting a cid failed:
- * try to initialize the device and try again.
- *
- * channel >= 0: getting cid for the channel failed
- * channel < 0: entering cid mode failed
- *
- * returns 0 on success, <0 on failure
- */
-static int reinit_and_retry(struct cardstate *cs, int channel)
-{
- int i;
-
- if (--cs->retry_count <= 0)
- return -EFAULT;
-
- for (i = 0; i < cs->channels; ++i)
- if (cs->bcs[i].at_state.cid > 0)
- return -EBUSY;
-
- if (channel < 0)
- dev_warn(cs->dev,
- "Could not enter cid mode. Reinit device and try again.\n");
- else {
- dev_warn(cs->dev,
- "Could not get a call id. Reinit device and try again.\n");
- cs->bcs[channel].at_state.pending_commands |= PC_CID;
- }
- schedule_init(cs, MS_INIT);
- return 0;
-}
-
-static int at_state_invalid(struct cardstate *cs,
- struct at_state_t *test_ptr)
-{
- unsigned long flags;
- unsigned channel;
- struct at_state_t *at_state;
- int retval = 0;
-
- spin_lock_irqsave(&cs->lock, flags);
-
- if (test_ptr == &cs->at_state)
- goto exit;
-
- list_for_each_entry(at_state, &cs->temp_at_states, list)
- if (at_state == test_ptr)
- goto exit;
-
- for (channel = 0; channel < cs->channels; ++channel)
- if (&cs->bcs[channel].at_state == test_ptr)
- goto exit;
-
- retval = 1;
-exit:
- spin_unlock_irqrestore(&cs->lock, flags);
- return retval;
-}
-
-static void handle_icall(struct cardstate *cs, struct bc_state *bcs,
- struct at_state_t *at_state)
-{
- int retval;
-
- retval = gigaset_isdn_icall(at_state);
- switch (retval) {
- case ICALL_ACCEPT:
- break;
- default:
- dev_err(cs->dev, "internal error: disposition=%d\n", retval);
- /* fall through */
- case ICALL_IGNORE:
- case ICALL_REJECT:
- /* hang up actively
- * Device doc says that would reject the call.
- * In fact it doesn't.
- */
- at_state->pending_commands |= PC_HUP;
- cs->commands_pending = 1;
- break;
- }
-}
-
-static int do_lock(struct cardstate *cs)
-{
- int mode;
- int i;
-
- switch (cs->mstate) {
- case MS_UNINITIALIZED:
- case MS_READY:
- if (cs->cur_at_seq || !list_empty(&cs->temp_at_states) ||
- cs->at_state.pending_commands)
- return -EBUSY;
-
- for (i = 0; i < cs->channels; ++i)
- if (cs->bcs[i].at_state.pending_commands)
- return -EBUSY;
-
- if (gigaset_get_channels(cs) < 0)
- return -EBUSY;
-
- break;
- case MS_LOCKED:
- break;
- default:
- return -EBUSY;
- }
-
- mode = cs->mode;
- cs->mstate = MS_LOCKED;
- cs->mode = M_UNKNOWN;
-
- return mode;
-}
-
-static int do_unlock(struct cardstate *cs)
-{
- if (cs->mstate != MS_LOCKED)
- return -EINVAL;
-
- cs->mstate = MS_UNINITIALIZED;
- cs->mode = M_UNKNOWN;
- gigaset_free_channels(cs);
- if (cs->connected)
- schedule_init(cs, MS_INIT);
-
- return 0;
-}
-
-static void do_action(int action, struct cardstate *cs,
- struct bc_state *bcs,
- struct at_state_t **p_at_state, char **pp_command,
- int *p_genresp, int *p_resp_code,
- struct event_t *ev)
-{
- struct at_state_t *at_state = *p_at_state;
- struct bc_state *bcs2;
- unsigned long flags;
-
- int channel;
-
- unsigned char *s, *e;
- int i;
- unsigned long val;
-
- switch (action) {
- case ACT_NOTHING:
- break;
- case ACT_TIMEOUT:
- at_state->waiting = 1;
- break;
- case ACT_INIT:
- cs->at_state.pending_commands &= ~PC_INIT;
- cs->cur_at_seq = SEQ_NONE;
- cs->mode = M_UNIMODEM;
- spin_lock_irqsave(&cs->lock, flags);
- if (!cs->cidmode) {
- spin_unlock_irqrestore(&cs->lock, flags);
- gigaset_free_channels(cs);
- cs->mstate = MS_READY;
- break;
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- cs->at_state.pending_commands |= PC_CIDMODE;
- gig_dbg(DEBUG_EVENT, "Scheduling PC_CIDMODE");
- cs->commands_pending = 1;
- break;
- case ACT_FAILINIT:
- dev_warn(cs->dev, "Could not initialize the device.\n");
- cs->dle = 0;
- init_failed(cs, M_UNKNOWN);
- cs->cur_at_seq = SEQ_NONE;
- break;
- case ACT_CONFIGMODE:
- init_failed(cs, M_CONFIG);
- cs->cur_at_seq = SEQ_NONE;
- break;
- case ACT_SETDLE1:
- cs->dle = 1;
- /* cs->inbuf[0].inputstate |= INS_command | INS_DLE_command; */
- cs->inbuf[0].inputstate &=
- ~(INS_command | INS_DLE_command);
- break;
- case ACT_SETDLE0:
- cs->dle = 0;
- cs->inbuf[0].inputstate =
- (cs->inbuf[0].inputstate & ~INS_DLE_command)
- | INS_command;
- break;
- case ACT_CMODESET:
- if (cs->mstate == MS_INIT || cs->mstate == MS_RECOVER) {
- gigaset_free_channels(cs);
- cs->mstate = MS_READY;
- }
- cs->mode = M_CID;
- cs->cur_at_seq = SEQ_NONE;
- break;
- case ACT_UMODESET:
- cs->mode = M_UNIMODEM;
- cs->cur_at_seq = SEQ_NONE;
- break;
- case ACT_FAILCMODE:
- cs->cur_at_seq = SEQ_NONE;
- if (cs->mstate == MS_INIT || cs->mstate == MS_RECOVER) {
- init_failed(cs, M_UNKNOWN);
- break;
- }
- if (reinit_and_retry(cs, -1) < 0)
- schedule_init(cs, MS_RECOVER);
- break;
- case ACT_FAILUMODE:
- cs->cur_at_seq = SEQ_NONE;
- schedule_init(cs, MS_RECOVER);
- break;
- case ACT_HUPMODEM:
- /* send "+++" (hangup in unimodem mode) */
- if (cs->connected) {
- struct cmdbuf_t *cb;
-
- cb = kmalloc(sizeof(struct cmdbuf_t) + 3, GFP_ATOMIC);
- if (!cb) {
- dev_err(cs->dev, "%s: out of memory\n",
- __func__);
- return;
- }
- memcpy(cb->buf, "+++", 3);
- cb->len = 3;
- cb->offset = 0;
- cb->next = NULL;
- cb->wake_tasklet = NULL;
- cs->ops->write_cmd(cs, cb);
- }
- break;
- case ACT_RING:
- /* get fresh AT state structure for new CID */
- at_state = get_free_channel(cs, ev->parameter);
- if (!at_state) {
- dev_warn(cs->dev,
- "RING ignored: could not allocate channel structure\n");
- break;
- }
-
- /* initialize AT state structure
- * note that bcs may be NULL if no B channel is free
- */
- at_state->ConState = 700;
- for (i = 0; i < STR_NUM; ++i) {
- kfree(at_state->str_var[i]);
- at_state->str_var[i] = NULL;
- }
- at_state->int_var[VAR_ZCTP] = -1;
-
- spin_lock_irqsave(&cs->lock, flags);
- at_state->timer_expires = RING_TIMEOUT;
- at_state->timer_active = 1;
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case ACT_ICALL:
- handle_icall(cs, bcs, at_state);
- break;
- case ACT_FAILSDOWN:
- dev_warn(cs->dev, "Could not shut down the device.\n");
- /* fall through */
- case ACT_FAKESDOWN:
- case ACT_SDOWN:
- cs->cur_at_seq = SEQ_NONE;
- finish_shutdown(cs);
- break;
- case ACT_CONNECT:
- if (cs->onechannel) {
- at_state->pending_commands |= PC_DLE1;
- cs->commands_pending = 1;
- break;
- }
- bcs->chstate |= CHS_D_UP;
- gigaset_isdn_connD(bcs);
- cs->ops->init_bchannel(bcs);
- break;
- case ACT_DLE1:
- cs->cur_at_seq = SEQ_NONE;
- bcs = cs->bcs + cs->curchannel;
-
- bcs->chstate |= CHS_D_UP;
- gigaset_isdn_connD(bcs);
- cs->ops->init_bchannel(bcs);
- break;
- case ACT_FAKEHUP:
- at_state->int_var[VAR_ZSAU] = ZSAU_NULL;
- /* fall through */
- case ACT_DISCONNECT:
- cs->cur_at_seq = SEQ_NONE;
- at_state->cid = -1;
- if (!bcs) {
- disconnect_nobc(p_at_state, cs);
- } else if (cs->onechannel && cs->dle) {
- /* Check for other open channels not needed:
- * DLE only used for M10x with one B channel.
- */
- at_state->pending_commands |= PC_DLE0;
- cs->commands_pending = 1;
- } else {
- disconnect_bc(at_state, cs, bcs);
- }
- break;
- case ACT_FAKEDLE0:
- at_state->int_var[VAR_ZDLE] = 0;
- cs->dle = 0;
- /* fall through */
- case ACT_DLE0:
- cs->cur_at_seq = SEQ_NONE;
- bcs2 = cs->bcs + cs->curchannel;
- disconnect_bc(&bcs2->at_state, cs, bcs2);
- break;
- case ACT_ABORTHUP:
- cs->cur_at_seq = SEQ_NONE;
- dev_warn(cs->dev, "Could not hang up.\n");
- at_state->cid = -1;
- if (!bcs)
- disconnect_nobc(p_at_state, cs);
- else if (cs->onechannel)
- at_state->pending_commands |= PC_DLE0;
- else
- disconnect_bc(at_state, cs, bcs);
- schedule_init(cs, MS_RECOVER);
- break;
- case ACT_FAILDLE0:
- cs->cur_at_seq = SEQ_NONE;
- dev_warn(cs->dev, "Error leaving DLE mode.\n");
- cs->dle = 0;
- bcs2 = cs->bcs + cs->curchannel;
- disconnect_bc(&bcs2->at_state, cs, bcs2);
- schedule_init(cs, MS_RECOVER);
- break;
- case ACT_FAILDLE1:
- cs->cur_at_seq = SEQ_NONE;
- dev_warn(cs->dev,
- "Could not enter DLE mode. Trying to hang up.\n");
- channel = cs->curchannel;
- cs->bcs[channel].at_state.pending_commands |= PC_HUP;
- cs->commands_pending = 1;
- break;
-
- case ACT_CID: /* got cid; start dialing */
- cs->cur_at_seq = SEQ_NONE;
- channel = cs->curchannel;
- if (ev->parameter > 0 && ev->parameter <= 65535) {
- cs->bcs[channel].at_state.cid = ev->parameter;
- cs->bcs[channel].at_state.pending_commands |=
- PC_DIAL;
- cs->commands_pending = 1;
- break;
- }
- /* fall through - bad cid */
- case ACT_FAILCID:
- cs->cur_at_seq = SEQ_NONE;
- channel = cs->curchannel;
- if (reinit_and_retry(cs, channel) < 0) {
- dev_warn(cs->dev,
- "Could not get a call ID. Cannot dial.\n");
- bcs2 = cs->bcs + channel;
- disconnect_bc(&bcs2->at_state, cs, bcs2);
- }
- break;
- case ACT_ABORTCID:
- cs->cur_at_seq = SEQ_NONE;
- bcs2 = cs->bcs + cs->curchannel;
- disconnect_bc(&bcs2->at_state, cs, bcs2);
- break;
-
- case ACT_DIALING:
- case ACT_ACCEPTED:
- cs->cur_at_seq = SEQ_NONE;
- break;
-
- case ACT_ABORTACCEPT: /* hangup/error/timeout during ICALL procssng */
- if (bcs)
- disconnect_bc(at_state, cs, bcs);
- else
- disconnect_nobc(p_at_state, cs);
- break;
-
- case ACT_ABORTDIAL: /* error/timeout during dial preparation */
- cs->cur_at_seq = SEQ_NONE;
- at_state->pending_commands |= PC_HUP;
- cs->commands_pending = 1;
- break;
-
- case ACT_REMOTEREJECT: /* DISCONNECT_IND after dialling */
- case ACT_CONNTIMEOUT: /* timeout waiting for ZSAU=ACTIVE */
- case ACT_REMOTEHUP: /* DISCONNECT_IND with established connection */
- at_state->pending_commands |= PC_HUP;
- cs->commands_pending = 1;
- break;
- case ACT_GETSTRING: /* warning: RING, ZDLE, ...
- are not handled properly anymore */
- at_state->getstring = 1;
- break;
- case ACT_SETVER:
- if (!ev->ptr) {
- *p_genresp = 1;
- *p_resp_code = RSP_ERROR;
- break;
- }
- s = ev->ptr;
-
- if (!strcmp(s, "OK")) {
- /* OK without version string: assume old response */
- *p_genresp = 1;
- *p_resp_code = RSP_NONE;
- break;
- }
-
- for (i = 0; i < 4; ++i) {
- val = simple_strtoul(s, (char **) &e, 10);
- if (val > INT_MAX || e == s)
- break;
- if (i == 3) {
- if (*e)
- break;
- } else if (*e != '.')
- break;
- else
- s = e + 1;
- cs->fwver[i] = val;
- }
- if (i != 4) {
- *p_genresp = 1;
- *p_resp_code = RSP_ERROR;
- break;
- }
- cs->gotfwver = 0;
- break;
- case ACT_GOTVER:
- if (cs->gotfwver == 0) {
- cs->gotfwver = 1;
- gig_dbg(DEBUG_EVENT,
- "firmware version %02d.%03d.%02d.%02d",
- cs->fwver[0], cs->fwver[1],
- cs->fwver[2], cs->fwver[3]);
- break;
- }
- /* fall through */
- case ACT_FAILVER:
- cs->gotfwver = -1;
- dev_err(cs->dev, "could not read firmware version.\n");
- break;
- case ACT_ERROR:
- gig_dbg(DEBUG_ANY, "%s: ERROR response in ConState %d",
- __func__, at_state->ConState);
- cs->cur_at_seq = SEQ_NONE;
- break;
- case ACT_DEBUG:
- gig_dbg(DEBUG_ANY, "%s: resp_code %d in ConState %d",
- __func__, ev->type, at_state->ConState);
- break;
- case ACT_WARN:
- dev_warn(cs->dev, "%s: resp_code %d in ConState %d!\n",
- __func__, ev->type, at_state->ConState);
- break;
- case ACT_ZCAU:
- dev_warn(cs->dev, "cause code %04x in connection state %d.\n",
- ev->parameter, at_state->ConState);
- break;
-
- /* events from the LL */
-
- case ACT_DIAL:
- if (!ev->ptr) {
- *p_genresp = 1;
- *p_resp_code = RSP_ERROR;
- break;
- }
- start_dial(at_state, ev->ptr, ev->parameter);
- break;
- case ACT_ACCEPT:
- start_accept(at_state);
- break;
- case ACT_HUP:
- at_state->pending_commands |= PC_HUP;
- gig_dbg(DEBUG_EVENT, "Scheduling PC_HUP");
- cs->commands_pending = 1;
- break;
-
- /* hotplug events */
-
- case ACT_STOP:
- do_stop(cs);
- break;
- case ACT_START:
- do_start(cs);
- break;
-
- /* events from the interface */
-
- case ACT_IF_LOCK:
- cs->cmd_result = ev->parameter ? do_lock(cs) : do_unlock(cs);
- cs->waiting = 0;
- wake_up(&cs->waitqueue);
- break;
- case ACT_IF_VER:
- if (ev->parameter != 0)
- cs->cmd_result = -EINVAL;
- else if (cs->gotfwver != 1) {
- cs->cmd_result = -ENOENT;
- } else {
- memcpy(ev->arg, cs->fwver, sizeof cs->fwver);
- cs->cmd_result = 0;
- }
- cs->waiting = 0;
- wake_up(&cs->waitqueue);
- break;
-
- /* events from the proc file system */
-
- case ACT_PROC_CIDMODE:
- spin_lock_irqsave(&cs->lock, flags);
- if (ev->parameter != cs->cidmode) {
- cs->cidmode = ev->parameter;
- if (ev->parameter) {
- cs->at_state.pending_commands |= PC_CIDMODE;
- gig_dbg(DEBUG_EVENT, "Scheduling PC_CIDMODE");
- } else {
- cs->at_state.pending_commands |= PC_UMMODE;
- gig_dbg(DEBUG_EVENT, "Scheduling PC_UMMODE");
- }
- cs->commands_pending = 1;
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- cs->waiting = 0;
- wake_up(&cs->waitqueue);
- break;
-
- /* events from the hardware drivers */
-
- case ACT_NOTIFY_BC_DOWN:
- bchannel_down(bcs);
- break;
- case ACT_NOTIFY_BC_UP:
- bchannel_up(bcs);
- break;
- case ACT_SHUTDOWN:
- do_shutdown(cs);
- break;
-
-
- default:
- if (action >= ACT_CMD && action < ACT_CMD + AT_NUM) {
- *pp_command = at_state->bcs->commands[action - ACT_CMD];
- if (!*pp_command) {
- *p_genresp = 1;
- *p_resp_code = RSP_NULL;
- }
- } else
- dev_err(cs->dev, "%s: action==%d!\n", __func__, action);
- }
-}
-
-/* State machine to do the calling and hangup procedure */
-static void process_event(struct cardstate *cs, struct event_t *ev)
-{
- struct bc_state *bcs;
- char *p_command = NULL;
- struct reply_t *rep;
- int rcode;
- int genresp = 0;
- int resp_code = RSP_ERROR;
- struct at_state_t *at_state;
- int index;
- int curact;
- unsigned long flags;
-
- if (ev->cid >= 0) {
- at_state = at_state_from_cid(cs, ev->cid);
- if (!at_state) {
- gig_dbg(DEBUG_EVENT, "event %d for invalid cid %d",
- ev->type, ev->cid);
- gigaset_add_event(cs, &cs->at_state, RSP_WRONG_CID,
- NULL, 0, NULL);
- return;
- }
- } else {
- at_state = ev->at_state;
- if (at_state_invalid(cs, at_state)) {
- gig_dbg(DEBUG_EVENT, "event for invalid at_state %p",
- at_state);
- return;
- }
- }
-
- gig_dbg(DEBUG_EVENT, "connection state %d, event %d",
- at_state->ConState, ev->type);
-
- bcs = at_state->bcs;
-
- /* Setting the pointer to the dial array */
- rep = at_state->replystruct;
-
- spin_lock_irqsave(&cs->lock, flags);
- if (ev->type == EV_TIMEOUT) {
- if (ev->parameter != at_state->timer_index
- || !at_state->timer_active) {
- ev->type = RSP_NONE; /* old timeout */
- gig_dbg(DEBUG_EVENT, "old timeout");
- } else {
- if (at_state->waiting)
- gig_dbg(DEBUG_EVENT, "stopped waiting");
- else
- gig_dbg(DEBUG_EVENT, "timeout occurred");
- }
- }
- spin_unlock_irqrestore(&cs->lock, flags);
-
- /* if the response belongs to a variable in at_state->int_var[VAR_XXXX]
- or at_state->str_var[STR_XXXX], set it */
- if (ev->type >= RSP_VAR && ev->type < RSP_VAR + VAR_NUM) {
- index = ev->type - RSP_VAR;
- at_state->int_var[index] = ev->parameter;
- } else if (ev->type >= RSP_STR && ev->type < RSP_STR + STR_NUM) {
- index = ev->type - RSP_STR;
- kfree(at_state->str_var[index]);
- at_state->str_var[index] = ev->ptr;
- ev->ptr = NULL; /* prevent process_events() from
- deallocating ptr */
- }
-
- if (ev->type == EV_TIMEOUT || ev->type == RSP_STRING)
- at_state->getstring = 0;
-
- /* Search row in dial array which matches modem response and current
- constate */
- for (;; rep++) {
- rcode = rep->resp_code;
- if (rcode == RSP_LAST) {
- /* found nothing...*/
- dev_warn(cs->dev, "%s: rcode=RSP_LAST: "
- "resp_code %d in ConState %d!\n",
- __func__, ev->type, at_state->ConState);
- return;
- }
- if ((rcode == RSP_ANY || rcode == ev->type)
- && ((int) at_state->ConState >= rep->min_ConState)
- && (rep->max_ConState < 0
- || (int) at_state->ConState <= rep->max_ConState)
- && (rep->parameter < 0 || rep->parameter == ev->parameter))
- break;
- }
-
- p_command = rep->command;
-
- at_state->waiting = 0;
- for (curact = 0; curact < MAXACT; ++curact) {
- /* The row tells us what we should do ..
- */
- do_action(rep->action[curact], cs, bcs, &at_state, &p_command,
- &genresp, &resp_code, ev);
- if (!at_state)
- /* at_state destroyed by disconnect */
- return;
- }
-
- /* Jump to the next con-state regarding the array */
- if (rep->new_ConState >= 0)
- at_state->ConState = rep->new_ConState;
-
- if (genresp) {
- spin_lock_irqsave(&cs->lock, flags);
- at_state->timer_expires = 0;
- at_state->timer_active = 0;
- spin_unlock_irqrestore(&cs->lock, flags);
- gigaset_add_event(cs, at_state, resp_code, NULL, 0, NULL);
- } else {
- /* Send command to modem if not NULL... */
- if (p_command) {
- if (cs->connected)
- send_command(cs, p_command, at_state);
- else
- gigaset_add_event(cs, at_state, RSP_NODEV,
- NULL, 0, NULL);
- }
-
- spin_lock_irqsave(&cs->lock, flags);
- if (!rep->timeout) {
- at_state->timer_expires = 0;
- at_state->timer_active = 0;
- } else if (rep->timeout > 0) { /* new timeout */
- at_state->timer_expires = rep->timeout * 10;
- at_state->timer_active = 1;
- ++at_state->timer_index;
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- }
-}
-
-static void schedule_sequence(struct cardstate *cs,
- struct at_state_t *at_state, int sequence)
-{
- cs->cur_at_seq = sequence;
- gigaset_add_event(cs, at_state, RSP_INIT, NULL, sequence, NULL);
-}
-
-static void process_command_flags(struct cardstate *cs)
-{
- struct at_state_t *at_state = NULL;
- struct bc_state *bcs;
- int i;
- int sequence;
- unsigned long flags;
-
- cs->commands_pending = 0;
-
- if (cs->cur_at_seq) {
- gig_dbg(DEBUG_EVENT, "not searching scheduled commands: busy");
- return;
- }
-
- gig_dbg(DEBUG_EVENT, "searching scheduled commands");
-
- sequence = SEQ_NONE;
-
- /* clear pending_commands and hangup channels on shutdown */
- if (cs->at_state.pending_commands & PC_SHUTDOWN) {
- cs->at_state.pending_commands &= ~PC_CIDMODE;
- for (i = 0; i < cs->channels; ++i) {
- bcs = cs->bcs + i;
- at_state = &bcs->at_state;
- at_state->pending_commands &=
- ~(PC_DLE1 | PC_ACCEPT | PC_DIAL);
- if (at_state->cid > 0)
- at_state->pending_commands |= PC_HUP;
- if (at_state->pending_commands & PC_CID) {
- at_state->pending_commands |= PC_NOCID;
- at_state->pending_commands &= ~PC_CID;
- }
- }
- }
-
- /* clear pending_commands and hangup channels on reset */
- if (cs->at_state.pending_commands & PC_INIT) {
- cs->at_state.pending_commands &= ~PC_CIDMODE;
- for (i = 0; i < cs->channels; ++i) {
- bcs = cs->bcs + i;
- at_state = &bcs->at_state;
- at_state->pending_commands &=
- ~(PC_DLE1 | PC_ACCEPT | PC_DIAL);
- if (at_state->cid > 0)
- at_state->pending_commands |= PC_HUP;
- if (cs->mstate == MS_RECOVER) {
- if (at_state->pending_commands & PC_CID) {
- at_state->pending_commands |= PC_NOCID;
- at_state->pending_commands &= ~PC_CID;
- }
- }
- }
- }
-
- /* only switch back to unimodem mode if no commands are pending and
- * no channels are up */
- spin_lock_irqsave(&cs->lock, flags);
- if (cs->at_state.pending_commands == PC_UMMODE
- && !cs->cidmode
- && list_empty(&cs->temp_at_states)
- && cs->mode == M_CID) {
- sequence = SEQ_UMMODE;
- at_state = &cs->at_state;
- for (i = 0; i < cs->channels; ++i) {
- bcs = cs->bcs + i;
- if (bcs->at_state.pending_commands ||
- bcs->at_state.cid > 0) {
- sequence = SEQ_NONE;
- break;
- }
- }
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- cs->at_state.pending_commands &= ~PC_UMMODE;
- if (sequence != SEQ_NONE) {
- schedule_sequence(cs, at_state, sequence);
- return;
- }
-
- for (i = 0; i < cs->channels; ++i) {
- bcs = cs->bcs + i;
- if (bcs->at_state.pending_commands & PC_HUP) {
- if (cs->dle) {
- cs->curchannel = bcs->channel;
- schedule_sequence(cs, &cs->at_state, SEQ_DLE0);
- return;
- }
- bcs->at_state.pending_commands &= ~PC_HUP;
- if (bcs->at_state.pending_commands & PC_CID) {
- /* not yet dialing: PC_NOCID is sufficient */
- bcs->at_state.pending_commands |= PC_NOCID;
- bcs->at_state.pending_commands &= ~PC_CID;
- } else {
- schedule_sequence(cs, &bcs->at_state, SEQ_HUP);
- return;
- }
- }
- if (bcs->at_state.pending_commands & PC_NOCID) {
- bcs->at_state.pending_commands &= ~PC_NOCID;
- cs->curchannel = bcs->channel;
- schedule_sequence(cs, &cs->at_state, SEQ_NOCID);
- return;
- } else if (bcs->at_state.pending_commands & PC_DLE0) {
- bcs->at_state.pending_commands &= ~PC_DLE0;
- cs->curchannel = bcs->channel;
- schedule_sequence(cs, &cs->at_state, SEQ_DLE0);
- return;
- }
- }
-
- list_for_each_entry(at_state, &cs->temp_at_states, list)
- if (at_state->pending_commands & PC_HUP) {
- at_state->pending_commands &= ~PC_HUP;
- schedule_sequence(cs, at_state, SEQ_HUP);
- return;
- }
-
- if (cs->at_state.pending_commands & PC_INIT) {
- cs->at_state.pending_commands &= ~PC_INIT;
- cs->dle = 0;
- cs->inbuf->inputstate = INS_command;
- schedule_sequence(cs, &cs->at_state, SEQ_INIT);
- return;
- }
- if (cs->at_state.pending_commands & PC_SHUTDOWN) {
- cs->at_state.pending_commands &= ~PC_SHUTDOWN;
- schedule_sequence(cs, &cs->at_state, SEQ_SHUTDOWN);
- return;
- }
- if (cs->at_state.pending_commands & PC_CIDMODE) {
- cs->at_state.pending_commands &= ~PC_CIDMODE;
- if (cs->mode == M_UNIMODEM) {
- cs->retry_count = 1;
- schedule_sequence(cs, &cs->at_state, SEQ_CIDMODE);
- return;
- }
- }
-
- for (i = 0; i < cs->channels; ++i) {
- bcs = cs->bcs + i;
- if (bcs->at_state.pending_commands & PC_DLE1) {
- bcs->at_state.pending_commands &= ~PC_DLE1;
- cs->curchannel = bcs->channel;
- schedule_sequence(cs, &cs->at_state, SEQ_DLE1);
- return;
- }
- if (bcs->at_state.pending_commands & PC_ACCEPT) {
- bcs->at_state.pending_commands &= ~PC_ACCEPT;
- schedule_sequence(cs, &bcs->at_state, SEQ_ACCEPT);
- return;
- }
- if (bcs->at_state.pending_commands & PC_DIAL) {
- bcs->at_state.pending_commands &= ~PC_DIAL;
- schedule_sequence(cs, &bcs->at_state, SEQ_DIAL);
- return;
- }
- if (bcs->at_state.pending_commands & PC_CID) {
- switch (cs->mode) {
- case M_UNIMODEM:
- cs->at_state.pending_commands |= PC_CIDMODE;
- gig_dbg(DEBUG_EVENT, "Scheduling PC_CIDMODE");
- cs->commands_pending = 1;
- return;
- case M_UNKNOWN:
- schedule_init(cs, MS_INIT);
- return;
- }
- bcs->at_state.pending_commands &= ~PC_CID;
- cs->curchannel = bcs->channel;
- cs->retry_count = 2;
- schedule_sequence(cs, &cs->at_state, SEQ_CID);
- return;
- }
- }
-}
-
-static void process_events(struct cardstate *cs)
-{
- struct event_t *ev;
- unsigned head, tail;
- int i;
- int check_flags = 0;
- int was_busy;
- unsigned long flags;
-
- spin_lock_irqsave(&cs->ev_lock, flags);
- head = cs->ev_head;
-
- for (i = 0; i < 2 * MAX_EVENTS; ++i) {
- tail = cs->ev_tail;
- if (tail == head) {
- if (!check_flags && !cs->commands_pending)
- break;
- check_flags = 0;
- spin_unlock_irqrestore(&cs->ev_lock, flags);
- process_command_flags(cs);
- spin_lock_irqsave(&cs->ev_lock, flags);
- tail = cs->ev_tail;
- if (tail == head) {
- if (!cs->commands_pending)
- break;
- continue;
- }
- }
-
- ev = cs->events + head;
- was_busy = cs->cur_at_seq != SEQ_NONE;
- spin_unlock_irqrestore(&cs->ev_lock, flags);
- process_event(cs, ev);
- spin_lock_irqsave(&cs->ev_lock, flags);
- kfree(ev->ptr);
- ev->ptr = NULL;
- if (was_busy && cs->cur_at_seq == SEQ_NONE)
- check_flags = 1;
-
- head = (head + 1) % MAX_EVENTS;
- cs->ev_head = head;
- }
-
- spin_unlock_irqrestore(&cs->ev_lock, flags);
-
- if (i == 2 * MAX_EVENTS) {
- dev_err(cs->dev,
- "infinite loop in process_events; aborting.\n");
- }
-}
-
-/* tasklet scheduled on any event received from the Gigaset device
- * parameter:
- * data ISDN controller state structure
- */
-void gigaset_handle_event(unsigned long data)
-{
- struct cardstate *cs = (struct cardstate *) data;
-
- /* handle incoming data on control/common channel */
- if (cs->inbuf->head != cs->inbuf->tail) {
- gig_dbg(DEBUG_INTR, "processing new data");
- cs->ops->handle_input(cs->inbuf);
- }
-
- process_events(cs);
-}
diff --git a/drivers/staging/isdn/gigaset/gigaset.h b/drivers/staging/isdn/gigaset/gigaset.h
deleted file mode 100644
index 0ecc2b5ea553..000000000000
--- a/drivers/staging/isdn/gigaset/gigaset.h
+++ /dev/null
@@ -1,827 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Siemens Gigaset 307x driver
- * Common header file for all connection variants
- *
- * Written by Stefan Eilers
- * and Hansjoerg Lipp <hjlipp@web.de>
- *
- * =====================================================================
- * =====================================================================
- */
-
-#ifndef GIGASET_H
-#define GIGASET_H
-
-/* define global prefix for pr_ macros in linux/kernel.h */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/compiler.h>
-#include <linux/types.h>
-#include <linux/ctype.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/ppp_defs.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/tty.h>
-#include <linux/tty_driver.h>
-#include <linux/list.h>
-#include <linux/atomic.h>
-
-#define GIG_VERSION {0, 5, 0, 0}
-#define GIG_COMPAT {0, 4, 0, 0}
-
-#define MAX_REC_PARAMS 10 /* Max. number of params in response string */
-#define MAX_RESP_SIZE 511 /* Max. size of a response string */
-
-#define MAX_EVENTS 64 /* size of event queue */
-
-#define RBUFSIZE 8192
-
-#define GIG_TICK 100 /* in milliseconds */
-
-/* timeout values (unit: 1 sec) */
-#define INIT_TIMEOUT 1
-
-/* timeout values (unit: 0.1 sec) */
-#define RING_TIMEOUT 3 /* for additional parameters to RING */
-#define BAS_TIMEOUT 20 /* for response to Base USB ops */
-#define ATRDY_TIMEOUT 3 /* for HD_READY_SEND_ATDATA */
-
-#define BAS_RETRY 3 /* max. retries for base USB ops */
-
-#define MAXACT 3
-
-extern int gigaset_debuglevel; /* "needs" cast to (enum debuglevel) */
-
-/* debug flags, combine by adding/bitwise OR */
-enum debuglevel {
- DEBUG_INTR = 0x00008, /* interrupt processing */
- DEBUG_CMD = 0x00020, /* sent/received LL commands */
- DEBUG_STREAM = 0x00040, /* application data stream I/O events */
- DEBUG_STREAM_DUMP = 0x00080, /* application data stream content */
- DEBUG_LLDATA = 0x00100, /* sent/received LL data */
- DEBUG_EVENT = 0x00200, /* event processing */
- DEBUG_HDLC = 0x00800, /* M10x HDLC processing */
- DEBUG_CHANNEL = 0x01000, /* channel allocation/deallocation */
- DEBUG_TRANSCMD = 0x02000, /* AT-COMMANDS+RESPONSES */
- DEBUG_MCMD = 0x04000, /* COMMANDS THAT ARE SENT VERY OFTEN */
- DEBUG_INIT = 0x08000, /* (de)allocation+initialization of data
- structures */
- DEBUG_SUSPEND = 0x10000, /* suspend/resume processing */
- DEBUG_OUTPUT = 0x20000, /* output to device */
- DEBUG_ISO = 0x40000, /* isochronous transfers */
- DEBUG_IF = 0x80000, /* character device operations */
- DEBUG_USBREQ = 0x100000, /* USB communication (except payload
- data) */
- DEBUG_LOCKCMD = 0x200000, /* AT commands and responses when
- MS_LOCKED */
-
- DEBUG_ANY = 0x3fffff, /* print message if any of the others is
- activated */
-};
-
-#ifdef CONFIG_GIGASET_DEBUG
-
-#define gig_dbg(level, format, arg...) \
- do { \
- if (unlikely(((enum debuglevel)gigaset_debuglevel) & (level))) \
- printk(KERN_DEBUG KBUILD_MODNAME ": " format "\n", \
- ## arg); \
- } while (0)
-#define DEBUG_DEFAULT (DEBUG_TRANSCMD | DEBUG_CMD | DEBUG_USBREQ)
-
-#else
-
-#define gig_dbg(level, format, arg...) do {} while (0)
-#define DEBUG_DEFAULT 0
-
-#endif
-
-void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
- size_t len, const unsigned char *buf);
-
-/* connection state */
-#define ZSAU_NONE 0
-#define ZSAU_PROCEEDING 1
-#define ZSAU_CALL_DELIVERED 2
-#define ZSAU_ACTIVE 3
-#define ZSAU_DISCONNECT_IND 4
-#define ZSAU_NULL 5
-#define ZSAU_DISCONNECT_REQ 6
-#define ZSAU_UNKNOWN -1
-
-/* USB control transfer requests */
-#define OUT_VENDOR_REQ (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT)
-#define IN_VENDOR_REQ (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT)
-
-/* interrupt pipe messages */
-#define HD_B1_FLOW_CONTROL 0x80
-#define HD_B2_FLOW_CONTROL 0x81
-#define HD_RECEIVEATDATA_ACK (0x35) /* 3070 */
-#define HD_READY_SEND_ATDATA (0x36) /* 3070 */
-#define HD_OPEN_ATCHANNEL_ACK (0x37) /* 3070 */
-#define HD_CLOSE_ATCHANNEL_ACK (0x38) /* 3070 */
-#define HD_DEVICE_INIT_OK (0x11) /* ISurf USB + 3070 */
-#define HD_OPEN_B1CHANNEL_ACK (0x51) /* ISurf USB + 3070 */
-#define HD_OPEN_B2CHANNEL_ACK (0x52) /* ISurf USB + 3070 */
-#define HD_CLOSE_B1CHANNEL_ACK (0x53) /* ISurf USB + 3070 */
-#define HD_CLOSE_B2CHANNEL_ACK (0x54) /* ISurf USB + 3070 */
-#define HD_SUSPEND_END (0x61) /* ISurf USB */
-#define HD_RESET_INTERRUPT_PIPE_ACK (0xFF) /* ISurf USB + 3070 */
-
-/* control requests */
-#define HD_OPEN_B1CHANNEL (0x23) /* ISurf USB + 3070 */
-#define HD_CLOSE_B1CHANNEL (0x24) /* ISurf USB + 3070 */
-#define HD_OPEN_B2CHANNEL (0x25) /* ISurf USB + 3070 */
-#define HD_CLOSE_B2CHANNEL (0x26) /* ISurf USB + 3070 */
-#define HD_RESET_INTERRUPT_PIPE (0x27) /* ISurf USB + 3070 */
-#define HD_DEVICE_INIT_ACK (0x34) /* ISurf USB + 3070 */
-#define HD_WRITE_ATMESSAGE (0x12) /* 3070 */
-#define HD_READ_ATMESSAGE (0x13) /* 3070 */
-#define HD_OPEN_ATCHANNEL (0x28) /* 3070 */
-#define HD_CLOSE_ATCHANNEL (0x29) /* 3070 */
-
-/* number of B channels supported by base driver */
-#define BAS_CHANNELS 2
-
-/* USB frames for isochronous transfer */
-#define BAS_FRAMETIME 1 /* number of milliseconds between frames */
-#define BAS_NUMFRAMES 8 /* number of frames per URB */
-#define BAS_MAXFRAME 16 /* allocated bytes per frame */
-#define BAS_NORMFRAME 8 /* send size without flow control */
-#define BAS_HIGHFRAME 10 /* " " with positive flow control */
-#define BAS_LOWFRAME 5 /* " " with negative flow control */
-#define BAS_CORRFRAMES 4 /* flow control multiplicator */
-
-#define BAS_INBUFSIZE (BAS_MAXFRAME * BAS_NUMFRAMES) /* size of isoc in buf
- * per URB */
-#define BAS_OUTBUFSIZE 4096 /* size of common isoc out buffer */
-#define BAS_OUTBUFPAD BAS_MAXFRAME /* size of pad area for isoc out buf */
-
-#define BAS_INURBS 3
-#define BAS_OUTURBS 3
-
-/* variable commands in struct bc_state */
-#define AT_ISO 0
-#define AT_DIAL 1
-#define AT_MSN 2
-#define AT_BC 3
-#define AT_PROTO 4
-#define AT_TYPE 5
-#define AT_CLIP 6
-/* total number */
-#define AT_NUM 7
-
-/* variables in struct at_state_t */
-/* - numeric */
-#define VAR_ZSAU 0
-#define VAR_ZDLE 1
-#define VAR_ZCTP 2
-/* total number */
-#define VAR_NUM 3
-/* - string */
-#define STR_NMBR 0
-#define STR_ZCPN 1
-#define STR_ZCON 2
-#define STR_ZBC 3
-#define STR_ZHLC 4
-/* total number */
-#define STR_NUM 5
-
-/* event types */
-#define EV_TIMEOUT -105
-#define EV_IF_VER -106
-#define EV_PROC_CIDMODE -107
-#define EV_SHUTDOWN -108
-#define EV_START -110
-#define EV_STOP -111
-#define EV_IF_LOCK -112
-#define EV_ACCEPT -114
-#define EV_DIAL -115
-#define EV_HUP -116
-#define EV_BC_OPEN -117
-#define EV_BC_CLOSED -118
-
-/* input state */
-#define INS_command 0x0001 /* receiving messages (not payload data) */
-#define INS_DLE_char 0x0002 /* DLE flag received (in DLE mode) */
-#define INS_byte_stuff 0x0004
-#define INS_have_data 0x0008
-#define INS_DLE_command 0x0020 /* DLE message start (<DLE> X) received */
-#define INS_flag_hunt 0x0040
-
-/* channel state */
-#define CHS_D_UP 0x01
-#define CHS_B_UP 0x02
-#define CHS_NOTIFY_LL 0x04
-
-#define ICALL_REJECT 0
-#define ICALL_ACCEPT 1
-#define ICALL_IGNORE 2
-
-/* device state */
-#define MS_UNINITIALIZED 0
-#define MS_INIT 1
-#define MS_LOCKED 2
-#define MS_SHUTDOWN 3
-#define MS_RECOVER 4
-#define MS_READY 5
-
-/* mode */
-#define M_UNKNOWN 0
-#define M_CONFIG 1
-#define M_UNIMODEM 2
-#define M_CID 3
-
-/* start mode */
-#define SM_LOCKED 0
-#define SM_ISDN 1 /* default */
-
-/* layer 2 protocols (AT^SBPR=...) */
-#define L2_BITSYNC 0
-#define L2_HDLC 1
-#define L2_VOICE 2
-
-struct gigaset_ops;
-struct gigaset_driver;
-
-struct usb_cardstate;
-struct ser_cardstate;
-struct bas_cardstate;
-
-struct bc_state;
-struct usb_bc_state;
-struct ser_bc_state;
-struct bas_bc_state;
-
-struct reply_t {
- int resp_code; /* RSP_XXXX */
- int min_ConState; /* <0 => ignore */
- int max_ConState; /* <0 => ignore */
- int parameter; /* e.g. ZSAU_XXXX <0: ignore*/
- int new_ConState; /* <0 => ignore */
- int timeout; /* >0 => *HZ; <=0 => TOUT_XXXX*/
- int action[MAXACT]; /* ACT_XXXX */
- char *command; /* NULL==none */
-};
-
-extern struct reply_t gigaset_tab_cid[];
-extern struct reply_t gigaset_tab_nocid[];
-
-struct inbuf_t {
- struct cardstate *cs;
- int inputstate;
- int head, tail;
- unsigned char data[RBUFSIZE];
-};
-
-/* isochronous write buffer structure
- * circular buffer with pad area for extraction of complete USB frames
- * - data[read..nextread-1] is valid data already submitted to the USB subsystem
- * - data[nextread..write-1] is valid data yet to be sent
- * - data[write] is the next byte to write to
- * - in byte-oriented L2 procotols, it is completely free
- * - in bit-oriented L2 procotols, it may contain a partial byte of valid data
- * - data[write+1..read-1] is free
- * - wbits is the number of valid data bits in data[write], starting at the LSB
- * - writesem is the semaphore for writing to the buffer:
- * if writesem <= 0, data[write..read-1] is currently being written to
- * - idle contains the byte value to repeat when the end of valid data is
- * reached; if nextread==write (buffer contains no data to send), either the
- * BAS_OUTBUFPAD bytes immediately before data[write] (if
- * write>=BAS_OUTBUFPAD) or those of the pad area (if write<BAS_OUTBUFPAD)
- * are also filled with that value
- */
-struct isowbuf_t {
- int read;
- int nextread;
- int write;
- atomic_t writesem;
- int wbits;
- unsigned char data[BAS_OUTBUFSIZE + BAS_OUTBUFPAD];
- unsigned char idle;
-};
-
-/* isochronous write URB context structure
- * data to be stored along with the URB and retrieved when it is returned
- * as completed by the USB subsystem
- * - urb: pointer to the URB itself
- * - bcs: pointer to the B Channel control structure
- * - limit: end of write buffer area covered by this URB
- * - status: URB completion status
- */
-struct isow_urbctx_t {
- struct urb *urb;
- struct bc_state *bcs;
- int limit;
- int status;
-};
-
-/* AT state structure
- * data associated with the state of an ISDN connection, whether or not
- * it is currently assigned a B channel
- */
-struct at_state_t {
- struct list_head list;
- int waiting;
- int getstring;
- unsigned timer_index;
- unsigned long timer_expires;
- int timer_active;
- unsigned int ConState; /* State of connection */
- struct reply_t *replystruct;
- int cid;
- int int_var[VAR_NUM]; /* see VAR_XXXX */
- char *str_var[STR_NUM]; /* see STR_XXXX */
- unsigned pending_commands; /* see PC_XXXX */
- unsigned seq_index;
-
- struct cardstate *cs;
- struct bc_state *bcs;
-};
-
-struct event_t {
- int type;
- void *ptr, *arg;
- int parameter;
- int cid;
- struct at_state_t *at_state;
-};
-
-/* This buffer holds all information about the used B-Channel */
-struct bc_state {
- struct sk_buff *tx_skb; /* Current transfer buffer to modem */
- struct sk_buff_head squeue; /* B-Channel send Queue */
-
- /* Variables for debugging .. */
- int corrupted; /* Counter for corrupted packages */
- int trans_down; /* Counter of packages (downstream) */
- int trans_up; /* Counter of packages (upstream) */
-
- struct at_state_t at_state;
-
- /* receive buffer */
- unsigned rx_bufsize; /* max size accepted by application */
- struct sk_buff *rx_skb;
- __u16 rx_fcs;
- int inputstate; /* see INS_XXXX */
-
- int channel;
-
- struct cardstate *cs;
-
- unsigned chstate; /* bitmap (CHS_*) */
- int ignore;
- unsigned proto2; /* layer 2 protocol (L2_*) */
- char *commands[AT_NUM]; /* see AT_XXXX */
-
-#ifdef CONFIG_GIGASET_DEBUG
- int emptycount;
-#endif
- int busy;
- int use_count;
-
- /* private data of hardware drivers */
- union {
- struct ser_bc_state *ser; /* serial hardware driver */
- struct usb_bc_state *usb; /* usb hardware driver (m105) */
- struct bas_bc_state *bas; /* usb hardware driver (base) */
- } hw;
-
- void *ap; /* associated LL application */
- int apconnstate; /* LL application connection state */
- spinlock_t aplock;
-};
-
-struct cardstate {
- struct gigaset_driver *driver;
- unsigned minor_index;
- struct device *dev;
- struct device *tty_dev;
- unsigned flags;
-
- const struct gigaset_ops *ops;
-
- /* Stuff to handle communication */
- wait_queue_head_t waitqueue;
- int waiting;
- int mode; /* see M_XXXX */
- int mstate; /* Modem state: see MS_XXXX */
- /* only changed by the event layer */
- int cmd_result;
-
- int channels;
- struct bc_state *bcs; /* Array of struct bc_state */
-
- int onechannel; /* data and commands transmitted in one
- stream (M10x) */
-
- spinlock_t lock;
- struct at_state_t at_state; /* at_state_t for cid == 0 */
- struct list_head temp_at_states;/* list of temporary "struct
- at_state_t"s without B channel */
-
- struct inbuf_t *inbuf;
-
- struct cmdbuf_t *cmdbuf, *lastcmdbuf;
- spinlock_t cmdlock;
- unsigned curlen, cmdbytes;
-
- struct tty_port port;
- struct tasklet_struct if_wake_tasklet;
- unsigned control_state;
-
- unsigned fwver[4];
- int gotfwver;
-
- unsigned running; /* !=0 if events are handled */
- unsigned connected; /* !=0 if hardware is connected */
- unsigned isdn_up; /* !=0 after gigaset_isdn_start() */
-
- unsigned cidmode;
-
- int myid; /* id for communication with LL */
- void *iif; /* LL interface structure */
- unsigned short hw_hdr_len; /* headroom needed in data skbs */
-
- struct reply_t *tabnocid;
- struct reply_t *tabcid;
- int cs_init;
- int ignoreframes; /* frames to ignore after setting up the
- B channel */
- struct mutex mutex; /* locks this structure:
- * connected is not changed,
- * hardware_up is not changed,
- * MState is not changed to or from
- * MS_LOCKED */
-
- struct timer_list timer;
- int retry_count;
- int dle; /* !=0 if DLE mode is active
- (ZDLE=1 received -- M10x only) */
- int cur_at_seq; /* sequence of AT commands being
- processed */
- int curchannel; /* channel those commands are meant
- for */
- int commands_pending; /* flag(s) in xxx.commands_pending have
- been set */
- struct tasklet_struct
- event_tasklet; /* tasklet for serializing AT commands.
- * Scheduled
- * -> for modem reponses (and
- * incoming data for M10x)
- * -> on timeout
- * -> after setting bits in
- * xxx.at_state.pending_command
- * (e.g. command from LL) */
- struct tasklet_struct
- write_tasklet; /* tasklet for serial output
- * (not used in base driver) */
-
- /* event queue */
- struct event_t events[MAX_EVENTS];
- unsigned ev_tail, ev_head;
- spinlock_t ev_lock;
-
- /* current modem response */
- unsigned char respdata[MAX_RESP_SIZE + 1];
- unsigned cbytes;
-
- /* private data of hardware drivers */
- union {
- struct usb_cardstate *usb; /* USB hardware driver (m105) */
- struct ser_cardstate *ser; /* serial hardware driver */
- struct bas_cardstate *bas; /* USB hardware driver (base) */
- } hw;
-};
-
-struct gigaset_driver {
- struct list_head list;
- spinlock_t lock; /* locks minor tables and blocked */
- struct tty_driver *tty;
- unsigned have_tty;
- unsigned minor;
- unsigned minors;
- struct cardstate *cs;
- int blocked;
-
- const struct gigaset_ops *ops;
- struct module *owner;
-};
-
-struct cmdbuf_t {
- struct cmdbuf_t *next, *prev;
- int len, offset;
- struct tasklet_struct *wake_tasklet;
- unsigned char buf[0];
-};
-
-struct bas_bc_state {
- /* isochronous output state */
- int running;
- atomic_t corrbytes;
- spinlock_t isooutlock;
- struct isow_urbctx_t isoouturbs[BAS_OUTURBS];
- struct isow_urbctx_t *isooutdone, *isooutfree, *isooutovfl;
- struct isowbuf_t *isooutbuf;
- unsigned numsub; /* submitted URB counter
- (for diagnostic messages only) */
- struct tasklet_struct sent_tasklet;
-
- /* isochronous input state */
- spinlock_t isoinlock;
- struct urb *isoinurbs[BAS_INURBS];
- unsigned char isoinbuf[BAS_INBUFSIZE * BAS_INURBS];
- struct urb *isoindone; /* completed isoc read URB */
- int isoinstatus; /* status of completed URB */
- int loststatus; /* status of dropped URB */
- unsigned isoinlost; /* number of bytes lost */
- /* state of bit unstuffing algorithm
- (in addition to BC_state.inputstate) */
- unsigned seqlen; /* number of '1' bits not yet
- unstuffed */
- unsigned inbyte, inbits; /* collected bits for next byte */
- /* statistics */
- unsigned goodbytes; /* bytes correctly received */
- unsigned alignerrs; /* frames with incomplete byte at end */
- unsigned fcserrs; /* FCS errors */
- unsigned frameerrs; /* framing errors */
- unsigned giants; /* long frames */
- unsigned runts; /* short frames */
- unsigned aborts; /* HDLC aborts */
- unsigned shared0s; /* '0' bits shared between flags */
- unsigned stolen0s; /* '0' stuff bits also serving as
- leading flag bits */
- struct tasklet_struct rcvd_tasklet;
-};
-
-struct gigaset_ops {
- /* Called from ev-layer.c/interface.c for sending AT commands to the
- device */
- int (*write_cmd)(struct cardstate *cs, struct cmdbuf_t *cb);
-
- /* Called from interface.c for additional device control */
- int (*write_room)(struct cardstate *cs);
- int (*chars_in_buffer)(struct cardstate *cs);
- int (*brkchars)(struct cardstate *cs, const unsigned char buf[6]);
-
- /* Called from ev-layer.c after setting up connection
- * Should call gigaset_bchannel_up(), when finished. */
- int (*init_bchannel)(struct bc_state *bcs);
-
- /* Called from ev-layer.c after hanging up
- * Should call gigaset_bchannel_down(), when finished. */
- int (*close_bchannel)(struct bc_state *bcs);
-
- /* Called by gigaset_initcs() for setting up bcs->hw.xxx */
- int (*initbcshw)(struct bc_state *bcs);
-
- /* Called by gigaset_freecs() for freeing bcs->hw.xxx */
- void (*freebcshw)(struct bc_state *bcs);
-
- /* Called by gigaset_bchannel_down() for resetting bcs->hw.xxx */
- void (*reinitbcshw)(struct bc_state *bcs);
-
- /* Called by gigaset_initcs() for setting up cs->hw.xxx */
- int (*initcshw)(struct cardstate *cs);
-
- /* Called by gigaset_freecs() for freeing cs->hw.xxx */
- void (*freecshw)(struct cardstate *cs);
-
- /* Called from common.c/interface.c for additional serial port
- control */
- int (*set_modem_ctrl)(struct cardstate *cs, unsigned old_state,
- unsigned new_state);
- int (*baud_rate)(struct cardstate *cs, unsigned cflag);
- int (*set_line_ctrl)(struct cardstate *cs, unsigned cflag);
-
- /* Called from LL interface to put an skb into the send-queue.
- * After sending is completed, gigaset_skb_sent() must be called
- * with the skb's link layer header preserved. */
- int (*send_skb)(struct bc_state *bcs, struct sk_buff *skb);
-
- /* Called from ev-layer.c to process a block of data
- * received through the common/control channel. */
- void (*handle_input)(struct inbuf_t *inbuf);
-
-};
-
-/* = Common structures and definitions =======================================
- */
-
-/* Parser states for DLE-Event:
- * <DLE-EVENT>: <DLE_FLAG> "X" <EVENT> <DLE_FLAG> "."
- * <DLE_FLAG>: 0x10
- * <EVENT>: ((a-z)* | (A-Z)* | (0-10)*)+
- */
-#define DLE_FLAG 0x10
-
-/* ===========================================================================
- * Functions implemented in asyncdata.c
- */
-
-/* Called from LL interface to put an skb into the send queue. */
-int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb);
-
-/* Called from ev-layer.c to process a block of data
- * received through the common/control channel. */
-void gigaset_m10x_input(struct inbuf_t *inbuf);
-
-/* ===========================================================================
- * Functions implemented in isocdata.c
- */
-
-/* Called from LL interface to put an skb into the send queue. */
-int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb);
-
-/* Called from ev-layer.c to process a block of data
- * received through the common/control channel. */
-void gigaset_isoc_input(struct inbuf_t *inbuf);
-
-/* Called from bas-gigaset.c to process a block of data
- * received through the isochronous channel */
-void gigaset_isoc_receive(unsigned char *src, unsigned count,
- struct bc_state *bcs);
-
-/* Called from bas-gigaset.c to put a block of data
- * into the isochronous output buffer */
-int gigaset_isoc_buildframe(struct bc_state *bcs, unsigned char *in, int len);
-
-/* Called from bas-gigaset.c to initialize the isochronous output buffer */
-void gigaset_isowbuf_init(struct isowbuf_t *iwb, unsigned char idle);
-
-/* Called from bas-gigaset.c to retrieve a block of bytes for sending */
-int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size);
-
-/* ===========================================================================
- * Functions implemented in LL interface
- */
-
-/* Called from common.c for setting up/shutting down with the ISDN subsystem */
-void gigaset_isdn_regdrv(void);
-void gigaset_isdn_unregdrv(void);
-int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid);
-void gigaset_isdn_unregdev(struct cardstate *cs);
-
-/* Called from hardware module to indicate completion of an skb */
-void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb);
-void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb);
-void gigaset_isdn_rcv_err(struct bc_state *bcs);
-
-/* Called from common.c/ev-layer.c to indicate events relevant to the LL */
-void gigaset_isdn_start(struct cardstate *cs);
-void gigaset_isdn_stop(struct cardstate *cs);
-int gigaset_isdn_icall(struct at_state_t *at_state);
-void gigaset_isdn_connD(struct bc_state *bcs);
-void gigaset_isdn_hupD(struct bc_state *bcs);
-void gigaset_isdn_connB(struct bc_state *bcs);
-void gigaset_isdn_hupB(struct bc_state *bcs);
-
-/* ===========================================================================
- * Functions implemented in ev-layer.c
- */
-
-/* tasklet called from common.c to process queued events */
-void gigaset_handle_event(unsigned long data);
-
-/* called from isocdata.c / asyncdata.c
- * when a complete modem response line has been received */
-void gigaset_handle_modem_response(struct cardstate *cs);
-
-/* ===========================================================================
- * Functions implemented in proc.c
- */
-
-/* initialize sysfs for device */
-void gigaset_init_dev_sysfs(struct cardstate *cs);
-void gigaset_free_dev_sysfs(struct cardstate *cs);
-
-/* ===========================================================================
- * Functions implemented in common.c/gigaset.h
- */
-
-void gigaset_bcs_reinit(struct bc_state *bcs);
-void gigaset_at_init(struct at_state_t *at_state, struct bc_state *bcs,
- struct cardstate *cs, int cid);
-int gigaset_get_channel(struct bc_state *bcs);
-struct bc_state *gigaset_get_free_channel(struct cardstate *cs);
-void gigaset_free_channel(struct bc_state *bcs);
-int gigaset_get_channels(struct cardstate *cs);
-void gigaset_free_channels(struct cardstate *cs);
-void gigaset_block_channels(struct cardstate *cs);
-
-/* Allocate and initialize driver structure. */
-struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors,
- const char *procname,
- const char *devname,
- const struct gigaset_ops *ops,
- struct module *owner);
-
-/* Deallocate driver structure. */
-void gigaset_freedriver(struct gigaset_driver *drv);
-
-struct cardstate *gigaset_get_cs_by_tty(struct tty_struct *tty);
-struct cardstate *gigaset_get_cs_by_id(int id);
-void gigaset_blockdriver(struct gigaset_driver *drv);
-
-/* Allocate and initialize card state. Calls hardware dependent
- gigaset_init[b]cs(). */
-struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
- int onechannel, int ignoreframes,
- int cidmode, const char *modulename);
-
-/* Free card state. Calls hardware dependent gigaset_free[b]cs(). */
-void gigaset_freecs(struct cardstate *cs);
-
-/* Tell common.c that hardware and driver are ready. */
-int gigaset_start(struct cardstate *cs);
-
-/* Tell common.c that the device is not present any more. */
-void gigaset_stop(struct cardstate *cs);
-
-/* Tell common.c that the driver is being unloaded. */
-int gigaset_shutdown(struct cardstate *cs);
-
-/* Append event to the queue.
- * Returns NULL on failure or a pointer to the event on success.
- * ptr must be kmalloc()ed (and not be freed by the caller).
- */
-struct event_t *gigaset_add_event(struct cardstate *cs,
- struct at_state_t *at_state, int type,
- void *ptr, int parameter, void *arg);
-
-/* Called on CONFIG1 command from frontend. */
-int gigaset_enterconfigmode(struct cardstate *cs);
-
-/* cs->lock must not be locked */
-static inline void gigaset_schedule_event(struct cardstate *cs)
-{
- unsigned long flags;
- spin_lock_irqsave(&cs->lock, flags);
- if (cs->running)
- tasklet_schedule(&cs->event_tasklet);
- spin_unlock_irqrestore(&cs->lock, flags);
-}
-
-/* Tell common.c that B channel has been closed. */
-/* cs->lock must not be locked */
-static inline void gigaset_bchannel_down(struct bc_state *bcs)
-{
- gigaset_add_event(bcs->cs, &bcs->at_state, EV_BC_CLOSED, NULL, 0, NULL);
- gigaset_schedule_event(bcs->cs);
-}
-
-/* Tell common.c that B channel has been opened. */
-/* cs->lock must not be locked */
-static inline void gigaset_bchannel_up(struct bc_state *bcs)
-{
- gigaset_add_event(bcs->cs, &bcs->at_state, EV_BC_OPEN, NULL, 0, NULL);
- gigaset_schedule_event(bcs->cs);
-}
-
-/* set up next receive skb for data mode */
-static inline struct sk_buff *gigaset_new_rx_skb(struct bc_state *bcs)
-{
- struct cardstate *cs = bcs->cs;
- unsigned short hw_hdr_len = cs->hw_hdr_len;
-
- if (bcs->ignore) {
- bcs->rx_skb = NULL;
- } else {
- bcs->rx_skb = dev_alloc_skb(bcs->rx_bufsize + hw_hdr_len);
- if (bcs->rx_skb == NULL)
- dev_warn(cs->dev, "could not allocate skb\n");
- else
- skb_reserve(bcs->rx_skb, hw_hdr_len);
- }
- return bcs->rx_skb;
-}
-
-/* append received bytes to inbuf */
-int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src,
- unsigned numbytes);
-
-/* ===========================================================================
- * Functions implemented in interface.c
- */
-
-/* initialize interface */
-void gigaset_if_initdriver(struct gigaset_driver *drv, const char *procname,
- const char *devname);
-/* release interface */
-void gigaset_if_freedriver(struct gigaset_driver *drv);
-/* add minor */
-void gigaset_if_init(struct cardstate *cs);
-/* remove minor */
-void gigaset_if_free(struct cardstate *cs);
-/* device received data */
-void gigaset_if_receive(struct cardstate *cs,
- unsigned char *buffer, size_t len);
-
-#endif
diff --git a/drivers/staging/isdn/gigaset/interface.c b/drivers/staging/isdn/gigaset/interface.c
deleted file mode 100644
index 9ddadd07e707..000000000000
--- a/drivers/staging/isdn/gigaset/interface.c
+++ /dev/null
@@ -1,613 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * interface to user space for the gigaset driver
- *
- * Copyright (c) 2004 by Hansjoerg Lipp <hjlipp@web.de>
- *
- * =====================================================================
- * =====================================================================
- */
-
-#include "gigaset.h"
-#include <linux/gigaset_dev.h>
-#include <linux/tty_flip.h>
-#include <linux/module.h>
-
-/*** our ioctls ***/
-
-static int if_lock(struct cardstate *cs, int *arg)
-{
- int cmd = *arg;
-
- gig_dbg(DEBUG_IF, "%u: if_lock (%d)", cs->minor_index, cmd);
-
- if (cmd > 1)
- return -EINVAL;
-
- if (cmd < 0) {
- *arg = cs->mstate == MS_LOCKED;
- return 0;
- }
-
- if (!cmd && cs->mstate == MS_LOCKED && cs->connected) {
- cs->ops->set_modem_ctrl(cs, 0, TIOCM_DTR | TIOCM_RTS);
- cs->ops->baud_rate(cs, B115200);
- cs->ops->set_line_ctrl(cs, CS8);
- cs->control_state = TIOCM_DTR | TIOCM_RTS;
- }
-
- cs->waiting = 1;
- if (!gigaset_add_event(cs, &cs->at_state, EV_IF_LOCK,
- NULL, cmd, NULL)) {
- cs->waiting = 0;
- return -ENOMEM;
- }
- gigaset_schedule_event(cs);
-
- wait_event(cs->waitqueue, !cs->waiting);
-
- if (cs->cmd_result >= 0) {
- *arg = cs->cmd_result;
- return 0;
- }
-
- return cs->cmd_result;
-}
-
-static int if_version(struct cardstate *cs, unsigned arg[4])
-{
- static const unsigned version[4] = GIG_VERSION;
- static const unsigned compat[4] = GIG_COMPAT;
- unsigned cmd = arg[0];
-
- gig_dbg(DEBUG_IF, "%u: if_version (%d)", cs->minor_index, cmd);
-
- switch (cmd) {
- case GIGVER_DRIVER:
- memcpy(arg, version, sizeof version);
- return 0;
- case GIGVER_COMPAT:
- memcpy(arg, compat, sizeof compat);
- return 0;
- case GIGVER_FWBASE:
- cs->waiting = 1;
- if (!gigaset_add_event(cs, &cs->at_state, EV_IF_VER,
- NULL, 0, arg)) {
- cs->waiting = 0;
- return -ENOMEM;
- }
- gigaset_schedule_event(cs);
-
- wait_event(cs->waitqueue, !cs->waiting);
-
- if (cs->cmd_result >= 0)
- return 0;
-
- return cs->cmd_result;
- default:
- return -EINVAL;
- }
-}
-
-static int if_config(struct cardstate *cs, int *arg)
-{
- gig_dbg(DEBUG_IF, "%u: if_config (%d)", cs->minor_index, *arg);
-
- if (*arg != 1)
- return -EINVAL;
-
- if (cs->mstate != MS_LOCKED)
- return -EBUSY;
-
- if (!cs->connected) {
- pr_err("%s: not connected\n", __func__);
- return -ENODEV;
- }
-
- *arg = 0;
- return gigaset_enterconfigmode(cs);
-}
-
-/*** the terminal driver ***/
-
-static int if_open(struct tty_struct *tty, struct file *filp)
-{
- struct cardstate *cs;
-
- gig_dbg(DEBUG_IF, "%d+%d: %s()",
- tty->driver->minor_start, tty->index, __func__);
-
- cs = gigaset_get_cs_by_tty(tty);
- if (!cs || !try_module_get(cs->driver->owner))
- return -ENODEV;
-
- if (mutex_lock_interruptible(&cs->mutex)) {
- module_put(cs->driver->owner);
- return -ERESTARTSYS;
- }
- tty->driver_data = cs;
-
- ++cs->port.count;
-
- if (cs->port.count == 1) {
- tty_port_tty_set(&cs->port, tty);
- cs->port.low_latency = 1;
- }
-
- mutex_unlock(&cs->mutex);
- return 0;
-}
-
-static void if_close(struct tty_struct *tty, struct file *filp)
-{
- struct cardstate *cs = tty->driver_data;
-
- if (!cs) { /* happens if we didn't find cs in open */
- gig_dbg(DEBUG_IF, "%s: no cardstate", __func__);
- return;
- }
-
- gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
-
- mutex_lock(&cs->mutex);
-
- if (!cs->connected)
- gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
- else if (!cs->port.count)
- dev_warn(cs->dev, "%s: device not opened\n", __func__);
- else if (!--cs->port.count)
- tty_port_tty_set(&cs->port, NULL);
-
- mutex_unlock(&cs->mutex);
-
- module_put(cs->driver->owner);
-}
-
-static int if_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg)
-{
- struct cardstate *cs = tty->driver_data;
- int retval = -ENODEV;
- int int_arg;
- unsigned char buf[6];
- unsigned version[4];
-
- gig_dbg(DEBUG_IF, "%u: %s(0x%x)", cs->minor_index, __func__, cmd);
-
- if (mutex_lock_interruptible(&cs->mutex))
- return -ERESTARTSYS;
-
- if (!cs->connected) {
- gig_dbg(DEBUG_IF, "not connected");
- retval = -ENODEV;
- } else {
- retval = 0;
- switch (cmd) {
- case GIGASET_REDIR:
- retval = get_user(int_arg, (int __user *) arg);
- if (retval >= 0)
- retval = if_lock(cs, &int_arg);
- if (retval >= 0)
- retval = put_user(int_arg, (int __user *) arg);
- break;
- case GIGASET_CONFIG:
- retval = get_user(int_arg, (int __user *) arg);
- if (retval >= 0)
- retval = if_config(cs, &int_arg);
- if (retval >= 0)
- retval = put_user(int_arg, (int __user *) arg);
- break;
- case GIGASET_BRKCHARS:
- retval = copy_from_user(&buf,
- (const unsigned char __user *) arg, 6)
- ? -EFAULT : 0;
- if (retval >= 0) {
- gigaset_dbg_buffer(DEBUG_IF, "GIGASET_BRKCHARS",
- 6, buf);
- retval = cs->ops->brkchars(cs, buf);
- }
- break;
- case GIGASET_VERSION:
- retval = copy_from_user(version,
- (unsigned __user *) arg, sizeof version)
- ? -EFAULT : 0;
- if (retval >= 0)
- retval = if_version(cs, version);
- if (retval >= 0)
- retval = copy_to_user((unsigned __user *) arg,
- version, sizeof version)
- ? -EFAULT : 0;
- break;
- default:
- gig_dbg(DEBUG_IF, "%s: arg not supported - 0x%04x",
- __func__, cmd);
- retval = -ENOIOCTLCMD;
- }
- }
-
- mutex_unlock(&cs->mutex);
-
- return retval;
-}
-
-#ifdef CONFIG_COMPAT
-static long if_compat_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg)
-{
- return if_ioctl(tty, cmd, (unsigned long)compat_ptr(arg));
-}
-#endif
-
-static int if_tiocmget(struct tty_struct *tty)
-{
- struct cardstate *cs = tty->driver_data;
- int retval;
-
- gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
-
- if (mutex_lock_interruptible(&cs->mutex))
- return -ERESTARTSYS;
-
- retval = cs->control_state & (TIOCM_RTS | TIOCM_DTR);
-
- mutex_unlock(&cs->mutex);
-
- return retval;
-}
-
-static int if_tiocmset(struct tty_struct *tty,
- unsigned int set, unsigned int clear)
-{
- struct cardstate *cs = tty->driver_data;
- int retval;
- unsigned mc;
-
- gig_dbg(DEBUG_IF, "%u: %s(0x%x, 0x%x)",
- cs->minor_index, __func__, set, clear);
-
- if (mutex_lock_interruptible(&cs->mutex))
- return -ERESTARTSYS;
-
- if (!cs->connected) {
- gig_dbg(DEBUG_IF, "not connected");
- retval = -ENODEV;
- } else {
- mc = (cs->control_state | set) & ~clear & (TIOCM_RTS | TIOCM_DTR);
- retval = cs->ops->set_modem_ctrl(cs, cs->control_state, mc);
- cs->control_state = mc;
- }
-
- mutex_unlock(&cs->mutex);
-
- return retval;
-}
-
-static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
-{
- struct cardstate *cs = tty->driver_data;
- struct cmdbuf_t *cb;
- int retval;
-
- gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
-
- if (mutex_lock_interruptible(&cs->mutex))
- return -ERESTARTSYS;
-
- if (!cs->connected) {
- gig_dbg(DEBUG_IF, "not connected");
- retval = -ENODEV;
- goto done;
- }
- if (cs->mstate != MS_LOCKED) {
- dev_warn(cs->dev, "can't write to unlocked device\n");
- retval = -EBUSY;
- goto done;
- }
- if (count <= 0) {
- /* nothing to do */
- retval = 0;
- goto done;
- }
-
- cb = kmalloc(sizeof(struct cmdbuf_t) + count, GFP_KERNEL);
- if (!cb) {
- dev_err(cs->dev, "%s: out of memory\n", __func__);
- retval = -ENOMEM;
- goto done;
- }
-
- memcpy(cb->buf, buf, count);
- cb->len = count;
- cb->offset = 0;
- cb->next = NULL;
- cb->wake_tasklet = &cs->if_wake_tasklet;
- retval = cs->ops->write_cmd(cs, cb);
-done:
- mutex_unlock(&cs->mutex);
- return retval;
-}
-
-static int if_write_room(struct tty_struct *tty)
-{
- struct cardstate *cs = tty->driver_data;
- int retval;
-
- gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
-
- if (mutex_lock_interruptible(&cs->mutex))
- return -ERESTARTSYS;
-
- if (!cs->connected) {
- gig_dbg(DEBUG_IF, "not connected");
- retval = -ENODEV;
- } else if (cs->mstate != MS_LOCKED) {
- dev_warn(cs->dev, "can't write to unlocked device\n");
- retval = -EBUSY;
- } else
- retval = cs->ops->write_room(cs);
-
- mutex_unlock(&cs->mutex);
-
- return retval;
-}
-
-static int if_chars_in_buffer(struct tty_struct *tty)
-{
- struct cardstate *cs = tty->driver_data;
- int retval = 0;
-
- gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
-
- mutex_lock(&cs->mutex);
-
- if (!cs->connected)
- gig_dbg(DEBUG_IF, "not connected");
- else if (cs->mstate != MS_LOCKED)
- dev_warn(cs->dev, "can't write to unlocked device\n");
- else
- retval = cs->ops->chars_in_buffer(cs);
-
- mutex_unlock(&cs->mutex);
-
- return retval;
-}
-
-static void if_throttle(struct tty_struct *tty)
-{
- struct cardstate *cs = tty->driver_data;
-
- gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
-
- mutex_lock(&cs->mutex);
-
- if (!cs->connected)
- gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
- else
- gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
-
- mutex_unlock(&cs->mutex);
-}
-
-static void if_unthrottle(struct tty_struct *tty)
-{
- struct cardstate *cs = tty->driver_data;
-
- gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
-
- mutex_lock(&cs->mutex);
-
- if (!cs->connected)
- gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
- else
- gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
-
- mutex_unlock(&cs->mutex);
-}
-
-static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
-{
- struct cardstate *cs = tty->driver_data;
- unsigned int iflag;
- unsigned int cflag;
- unsigned int old_cflag;
- unsigned int control_state, new_state;
-
- gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
-
- mutex_lock(&cs->mutex);
-
- if (!cs->connected) {
- gig_dbg(DEBUG_IF, "not connected");
- goto out;
- }
-
- iflag = tty->termios.c_iflag;
- cflag = tty->termios.c_cflag;
- old_cflag = old ? old->c_cflag : cflag;
- gig_dbg(DEBUG_IF, "%u: iflag %x cflag %x old %x",
- cs->minor_index, iflag, cflag, old_cflag);
-
- /* get a local copy of the current port settings */
- control_state = cs->control_state;
-
- /*
- * Update baud rate.
- * Do not attempt to cache old rates and skip settings,
- * disconnects screw such tricks up completely.
- * Premature optimization is the root of all evil.
- */
-
- /* reassert DTR and (maybe) RTS on transition from B0 */
- if ((old_cflag & CBAUD) == B0) {
- new_state = control_state | TIOCM_DTR;
- /* don't set RTS if using hardware flow control */
- if (!(old_cflag & CRTSCTS))
- new_state |= TIOCM_RTS;
- gig_dbg(DEBUG_IF, "%u: from B0 - set DTR%s",
- cs->minor_index,
- (new_state & TIOCM_RTS) ? " only" : "/RTS");
- cs->ops->set_modem_ctrl(cs, control_state, new_state);
- control_state = new_state;
- }
-
- cs->ops->baud_rate(cs, cflag & CBAUD);
-
- if ((cflag & CBAUD) == B0) {
- /* Drop RTS and DTR */
- gig_dbg(DEBUG_IF, "%u: to B0 - drop DTR/RTS", cs->minor_index);
- new_state = control_state & ~(TIOCM_DTR | TIOCM_RTS);
- cs->ops->set_modem_ctrl(cs, control_state, new_state);
- control_state = new_state;
- }
-
- /*
- * Update line control register (LCR)
- */
-
- cs->ops->set_line_ctrl(cs, cflag);
-
- /* save off the modified port settings */
- cs->control_state = control_state;
-
-out:
- mutex_unlock(&cs->mutex);
-}
-
-static const struct tty_operations if_ops = {
- .open = if_open,
- .close = if_close,
- .ioctl = if_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = if_compat_ioctl,
-#endif
- .write = if_write,
- .write_room = if_write_room,
- .chars_in_buffer = if_chars_in_buffer,
- .set_termios = if_set_termios,
- .throttle = if_throttle,
- .unthrottle = if_unthrottle,
- .tiocmget = if_tiocmget,
- .tiocmset = if_tiocmset,
-};
-
-
-/* wakeup tasklet for the write operation */
-static void if_wake(unsigned long data)
-{
- struct cardstate *cs = (struct cardstate *)data;
-
- tty_port_tty_wakeup(&cs->port);
-}
-
-/*** interface to common ***/
-
-void gigaset_if_init(struct cardstate *cs)
-{
- struct gigaset_driver *drv;
-
- drv = cs->driver;
- if (!drv->have_tty)
- return;
-
- tasklet_init(&cs->if_wake_tasklet, if_wake, (unsigned long) cs);
-
- mutex_lock(&cs->mutex);
- cs->tty_dev = tty_port_register_device(&cs->port, drv->tty,
- cs->minor_index, NULL);
-
- if (!IS_ERR(cs->tty_dev))
- dev_set_drvdata(cs->tty_dev, cs);
- else {
- pr_warn("could not register device to the tty subsystem\n");
- cs->tty_dev = NULL;
- }
- mutex_unlock(&cs->mutex);
-}
-
-void gigaset_if_free(struct cardstate *cs)
-{
- struct gigaset_driver *drv;
-
- drv = cs->driver;
- if (!drv->have_tty)
- return;
-
- tasklet_disable(&cs->if_wake_tasklet);
- tasklet_kill(&cs->if_wake_tasklet);
- cs->tty_dev = NULL;
- tty_unregister_device(drv->tty, cs->minor_index);
-}
-
-/**
- * gigaset_if_receive() - pass a received block of data to the tty device
- * @cs: device descriptor structure.
- * @buffer: received data.
- * @len: number of bytes received.
- *
- * Called by asyncdata/isocdata if a block of data received from the
- * device must be sent to userspace through the ttyG* device.
- */
-void gigaset_if_receive(struct cardstate *cs,
- unsigned char *buffer, size_t len)
-{
- tty_insert_flip_string(&cs->port, buffer, len);
- tty_flip_buffer_push(&cs->port);
-}
-EXPORT_SYMBOL_GPL(gigaset_if_receive);
-
-/* gigaset_if_initdriver
- * Initialize tty interface.
- * parameters:
- * drv Driver
- * procname Name of the driver (e.g. for /proc/tty/drivers)
- * devname Name of the device files (prefix without minor number)
- */
-void gigaset_if_initdriver(struct gigaset_driver *drv, const char *procname,
- const char *devname)
-{
- int ret;
- struct tty_driver *tty;
-
- drv->have_tty = 0;
-
- drv->tty = tty = alloc_tty_driver(drv->minors);
- if (tty == NULL)
- goto enomem;
-
- tty->type = TTY_DRIVER_TYPE_SERIAL;
- tty->subtype = SERIAL_TYPE_NORMAL;
- tty->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
-
- tty->driver_name = procname;
- tty->name = devname;
- tty->minor_start = drv->minor;
-
- tty->init_termios = tty_std_termios;
- tty->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
- tty_set_operations(tty, &if_ops);
-
- ret = tty_register_driver(tty);
- if (ret < 0) {
- pr_err("error %d registering tty driver\n", ret);
- goto error;
- }
- gig_dbg(DEBUG_IF, "tty driver initialized");
- drv->have_tty = 1;
- return;
-
-enomem:
- pr_err("out of memory\n");
-error:
- if (drv->tty)
- put_tty_driver(drv->tty);
-}
-
-void gigaset_if_freedriver(struct gigaset_driver *drv)
-{
- if (!drv->have_tty)
- return;
-
- drv->have_tty = 0;
- tty_unregister_driver(drv->tty);
- put_tty_driver(drv->tty);
-}
diff --git a/drivers/staging/isdn/gigaset/isocdata.c b/drivers/staging/isdn/gigaset/isocdata.c
deleted file mode 100644
index 3ecf6e33ed15..000000000000
--- a/drivers/staging/isdn/gigaset/isocdata.c
+++ /dev/null
@@ -1,1006 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Common data handling layer for bas_gigaset
- *
- * Copyright (c) 2005 by Tilman Schmidt <tilman@imap.cc>,
- * Hansjoerg Lipp <hjlipp@web.de>.
- *
- * =====================================================================
- * =====================================================================
- */
-
-#include "gigaset.h"
-#include <linux/crc-ccitt.h>
-#include <linux/bitrev.h>
-
-/* access methods for isowbuf_t */
-/* ============================ */
-
-/* initialize buffer structure
- */
-void gigaset_isowbuf_init(struct isowbuf_t *iwb, unsigned char idle)
-{
- iwb->read = 0;
- iwb->nextread = 0;
- iwb->write = 0;
- atomic_set(&iwb->writesem, 1);
- iwb->wbits = 0;
- iwb->idle = idle;
- memset(iwb->data + BAS_OUTBUFSIZE, idle, BAS_OUTBUFPAD);
-}
-
-/* compute number of bytes which can be appended to buffer
- * so that there is still room to append a maximum frame of flags
- */
-static inline int isowbuf_freebytes(struct isowbuf_t *iwb)
-{
- int read, write, freebytes;
-
- read = iwb->read;
- write = iwb->write;
- freebytes = read - write;
- if (freebytes > 0) {
- /* no wraparound: need padding space within regular area */
- return freebytes - BAS_OUTBUFPAD;
- } else if (read < BAS_OUTBUFPAD) {
- /* wraparound: can use space up to end of regular area */
- return BAS_OUTBUFSIZE - write;
- } else {
- /* following the wraparound yields more space */
- return freebytes + BAS_OUTBUFSIZE - BAS_OUTBUFPAD;
- }
-}
-
-/* start writing
- * acquire the write semaphore
- * return 0 if acquired, <0 if busy
- */
-static inline int isowbuf_startwrite(struct isowbuf_t *iwb)
-{
- if (!atomic_dec_and_test(&iwb->writesem)) {
- atomic_inc(&iwb->writesem);
- gig_dbg(DEBUG_ISO, "%s: couldn't acquire iso write semaphore",
- __func__);
- return -EBUSY;
- }
- gig_dbg(DEBUG_ISO,
- "%s: acquired iso write semaphore, data[write]=%02x, nbits=%d",
- __func__, iwb->data[iwb->write], iwb->wbits);
- return 0;
-}
-
-/* finish writing
- * release the write semaphore
- * returns the current write position
- */
-static inline int isowbuf_donewrite(struct isowbuf_t *iwb)
-{
- int write = iwb->write;
- atomic_inc(&iwb->writesem);
- return write;
-}
-
-/* append bits to buffer without any checks
- * - data contains bits to append, starting at LSB
- * - nbits is number of bits to append (0..24)
- * must be called with the write semaphore held
- * If more than nbits bits are set in data, the extraneous bits are set in the
- * buffer too, but the write position is only advanced by nbits.
- */
-static inline void isowbuf_putbits(struct isowbuf_t *iwb, u32 data, int nbits)
-{
- int write = iwb->write;
- data <<= iwb->wbits;
- data |= iwb->data[write];
- nbits += iwb->wbits;
- while (nbits >= 8) {
- iwb->data[write++] = data & 0xff;
- write %= BAS_OUTBUFSIZE;
- data >>= 8;
- nbits -= 8;
- }
- iwb->wbits = nbits;
- iwb->data[write] = data & 0xff;
- iwb->write = write;
-}
-
-/* put final flag on HDLC bitstream
- * also sets the idle fill byte to the correspondingly shifted flag pattern
- * must be called with the write semaphore held
- */
-static inline void isowbuf_putflag(struct isowbuf_t *iwb)
-{
- int write;
-
- /* add two flags, thus reliably covering one byte */
- isowbuf_putbits(iwb, 0x7e7e, 8);
- /* recover the idle flag byte */
- write = iwb->write;
- iwb->idle = iwb->data[write];
- gig_dbg(DEBUG_ISO, "idle fill byte %02x", iwb->idle);
- /* mask extraneous bits in buffer */
- iwb->data[write] &= (1 << iwb->wbits) - 1;
-}
-
-/* retrieve a block of bytes for sending
- * The requested number of bytes is provided as a contiguous block.
- * If necessary, the frame is filled to the requested number of bytes
- * with the idle value.
- * returns offset to frame, < 0 on busy or error
- */
-int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size)
-{
- int read, write, limit, src, dst;
- unsigned char pbyte;
-
- read = iwb->nextread;
- write = iwb->write;
- if (likely(read == write)) {
- /* return idle frame */
- return read < BAS_OUTBUFPAD ?
- BAS_OUTBUFSIZE : read - BAS_OUTBUFPAD;
- }
-
- limit = read + size;
- gig_dbg(DEBUG_STREAM, "%s: read=%d write=%d limit=%d",
- __func__, read, write, limit);
-#ifdef CONFIG_GIGASET_DEBUG
- if (unlikely(size < 0 || size > BAS_OUTBUFPAD)) {
- pr_err("invalid size %d\n", size);
- return -EINVAL;
- }
-#endif
-
- if (read < write) {
- /* no wraparound in valid data */
- if (limit >= write) {
- /* append idle frame */
- if (isowbuf_startwrite(iwb) < 0)
- return -EBUSY;
- /* write position could have changed */
- write = iwb->write;
- if (limit >= write) {
- pbyte = iwb->data[write]; /* save
- partial byte */
- limit = write + BAS_OUTBUFPAD;
- gig_dbg(DEBUG_STREAM,
- "%s: filling %d->%d with %02x",
- __func__, write, limit, iwb->idle);
- if (write + BAS_OUTBUFPAD < BAS_OUTBUFSIZE)
- memset(iwb->data + write, iwb->idle,
- BAS_OUTBUFPAD);
- else {
- /* wraparound, fill entire pad area */
- memset(iwb->data + write, iwb->idle,
- BAS_OUTBUFSIZE + BAS_OUTBUFPAD
- - write);
- limit = 0;
- }
- gig_dbg(DEBUG_STREAM,
- "%s: restoring %02x at %d",
- __func__, pbyte, limit);
- iwb->data[limit] = pbyte; /* restore
- partial byte */
- iwb->write = limit;
- }
- isowbuf_donewrite(iwb);
- }
- } else {
- /* valid data wraparound */
- if (limit >= BAS_OUTBUFSIZE) {
- /* copy wrapped part into pad area */
- src = 0;
- dst = BAS_OUTBUFSIZE;
- while (dst < limit && src < write)
- iwb->data[dst++] = iwb->data[src++];
- if (dst <= limit) {
- /* fill pad area with idle byte */
- memset(iwb->data + dst, iwb->idle,
- BAS_OUTBUFSIZE + BAS_OUTBUFPAD - dst);
- }
- limit = src;
- }
- }
- iwb->nextread = limit;
- return read;
-}
-
-/* dump_bytes
- * write hex bytes to syslog for debugging
- */
-static inline void dump_bytes(enum debuglevel level, const char *tag,
- unsigned char *bytes, int count)
-{
-#ifdef CONFIG_GIGASET_DEBUG
- unsigned char c;
- static char dbgline[3 * 32 + 1];
- int i = 0;
-
- if (!(gigaset_debuglevel & level))
- return;
-
- while (count-- > 0) {
- if (i > sizeof(dbgline) - 4) {
- dbgline[i] = '\0';
- gig_dbg(level, "%s:%s", tag, dbgline);
- i = 0;
- }
- c = *bytes++;
- dbgline[i] = (i && !(i % 12)) ? '-' : ' ';
- i++;
- dbgline[i++] = hex_asc_hi(c);
- dbgline[i++] = hex_asc_lo(c);
- }
- dbgline[i] = '\0';
- gig_dbg(level, "%s:%s", tag, dbgline);
-#endif
-}
-
-/*============================================================================*/
-
-/* bytewise HDLC bitstuffing via table lookup
- * lookup table: 5 subtables for 0..4 preceding consecutive '1' bits
- * index: 256*(number of preceding '1' bits) + (next byte to stuff)
- * value: bit 9.. 0 = result bits
- * bit 12..10 = number of trailing '1' bits in result
- * bit 14..13 = number of bits added by stuffing
- */
-static const u16 stufftab[5 * 256] = {
-/* previous 1s = 0: */
- 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
- 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x201f,
- 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f,
- 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x203e, 0x205f,
- 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f,
- 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x209f,
- 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f,
- 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x207c, 0x207d, 0x20be, 0x20df,
- 0x0480, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x0487, 0x0488, 0x0489, 0x048a, 0x048b, 0x048c, 0x048d, 0x048e, 0x048f,
- 0x0490, 0x0491, 0x0492, 0x0493, 0x0494, 0x0495, 0x0496, 0x0497, 0x0498, 0x0499, 0x049a, 0x049b, 0x049c, 0x049d, 0x049e, 0x251f,
- 0x04a0, 0x04a1, 0x04a2, 0x04a3, 0x04a4, 0x04a5, 0x04a6, 0x04a7, 0x04a8, 0x04a9, 0x04aa, 0x04ab, 0x04ac, 0x04ad, 0x04ae, 0x04af,
- 0x04b0, 0x04b1, 0x04b2, 0x04b3, 0x04b4, 0x04b5, 0x04b6, 0x04b7, 0x04b8, 0x04b9, 0x04ba, 0x04bb, 0x04bc, 0x04bd, 0x253e, 0x255f,
- 0x08c0, 0x08c1, 0x08c2, 0x08c3, 0x08c4, 0x08c5, 0x08c6, 0x08c7, 0x08c8, 0x08c9, 0x08ca, 0x08cb, 0x08cc, 0x08cd, 0x08ce, 0x08cf,
- 0x08d0, 0x08d1, 0x08d2, 0x08d3, 0x08d4, 0x08d5, 0x08d6, 0x08d7, 0x08d8, 0x08d9, 0x08da, 0x08db, 0x08dc, 0x08dd, 0x08de, 0x299f,
- 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x0cef,
- 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x2ddf,
-
-/* previous 1s = 1: */
- 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x200f,
- 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x202f,
- 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x204f,
- 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x203e, 0x206f,
- 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x208f,
- 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x20af,
- 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x20cf,
- 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x207c, 0x207d, 0x20be, 0x20ef,
- 0x0480, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x0487, 0x0488, 0x0489, 0x048a, 0x048b, 0x048c, 0x048d, 0x048e, 0x250f,
- 0x0490, 0x0491, 0x0492, 0x0493, 0x0494, 0x0495, 0x0496, 0x0497, 0x0498, 0x0499, 0x049a, 0x049b, 0x049c, 0x049d, 0x049e, 0x252f,
- 0x04a0, 0x04a1, 0x04a2, 0x04a3, 0x04a4, 0x04a5, 0x04a6, 0x04a7, 0x04a8, 0x04a9, 0x04aa, 0x04ab, 0x04ac, 0x04ad, 0x04ae, 0x254f,
- 0x04b0, 0x04b1, 0x04b2, 0x04b3, 0x04b4, 0x04b5, 0x04b6, 0x04b7, 0x04b8, 0x04b9, 0x04ba, 0x04bb, 0x04bc, 0x04bd, 0x253e, 0x256f,
- 0x08c0, 0x08c1, 0x08c2, 0x08c3, 0x08c4, 0x08c5, 0x08c6, 0x08c7, 0x08c8, 0x08c9, 0x08ca, 0x08cb, 0x08cc, 0x08cd, 0x08ce, 0x298f,
- 0x08d0, 0x08d1, 0x08d2, 0x08d3, 0x08d4, 0x08d5, 0x08d6, 0x08d7, 0x08d8, 0x08d9, 0x08da, 0x08db, 0x08dc, 0x08dd, 0x08de, 0x29af,
- 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dcf,
- 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x31ef,
-
-/* previous 1s = 2: */
- 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x2007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x2017,
- 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x2027, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x2037,
- 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x2047, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x2057,
- 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x2067, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x203e, 0x2077,
- 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x2087, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x2097,
- 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x20a7, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x20b7,
- 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x20c7, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x20d7,
- 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x20e7, 0x0078, 0x0079, 0x007a, 0x007b, 0x207c, 0x207d, 0x20be, 0x20f7,
- 0x0480, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x2507, 0x0488, 0x0489, 0x048a, 0x048b, 0x048c, 0x048d, 0x048e, 0x2517,
- 0x0490, 0x0491, 0x0492, 0x0493, 0x0494, 0x0495, 0x0496, 0x2527, 0x0498, 0x0499, 0x049a, 0x049b, 0x049c, 0x049d, 0x049e, 0x2537,
- 0x04a0, 0x04a1, 0x04a2, 0x04a3, 0x04a4, 0x04a5, 0x04a6, 0x2547, 0x04a8, 0x04a9, 0x04aa, 0x04ab, 0x04ac, 0x04ad, 0x04ae, 0x2557,
- 0x04b0, 0x04b1, 0x04b2, 0x04b3, 0x04b4, 0x04b5, 0x04b6, 0x2567, 0x04b8, 0x04b9, 0x04ba, 0x04bb, 0x04bc, 0x04bd, 0x253e, 0x2577,
- 0x08c0, 0x08c1, 0x08c2, 0x08c3, 0x08c4, 0x08c5, 0x08c6, 0x2987, 0x08c8, 0x08c9, 0x08ca, 0x08cb, 0x08cc, 0x08cd, 0x08ce, 0x2997,
- 0x08d0, 0x08d1, 0x08d2, 0x08d3, 0x08d4, 0x08d5, 0x08d6, 0x29a7, 0x08d8, 0x08d9, 0x08da, 0x08db, 0x08dc, 0x08dd, 0x08de, 0x29b7,
- 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dc7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dd7,
- 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x31e7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x41f7,
-
-/* previous 1s = 3: */
- 0x0000, 0x0001, 0x0002, 0x2003, 0x0004, 0x0005, 0x0006, 0x200b, 0x0008, 0x0009, 0x000a, 0x2013, 0x000c, 0x000d, 0x000e, 0x201b,
- 0x0010, 0x0011, 0x0012, 0x2023, 0x0014, 0x0015, 0x0016, 0x202b, 0x0018, 0x0019, 0x001a, 0x2033, 0x001c, 0x001d, 0x001e, 0x203b,
- 0x0020, 0x0021, 0x0022, 0x2043, 0x0024, 0x0025, 0x0026, 0x204b, 0x0028, 0x0029, 0x002a, 0x2053, 0x002c, 0x002d, 0x002e, 0x205b,
- 0x0030, 0x0031, 0x0032, 0x2063, 0x0034, 0x0035, 0x0036, 0x206b, 0x0038, 0x0039, 0x003a, 0x2073, 0x003c, 0x003d, 0x203e, 0x207b,
- 0x0040, 0x0041, 0x0042, 0x2083, 0x0044, 0x0045, 0x0046, 0x208b, 0x0048, 0x0049, 0x004a, 0x2093, 0x004c, 0x004d, 0x004e, 0x209b,
- 0x0050, 0x0051, 0x0052, 0x20a3, 0x0054, 0x0055, 0x0056, 0x20ab, 0x0058, 0x0059, 0x005a, 0x20b3, 0x005c, 0x005d, 0x005e, 0x20bb,
- 0x0060, 0x0061, 0x0062, 0x20c3, 0x0064, 0x0065, 0x0066, 0x20cb, 0x0068, 0x0069, 0x006a, 0x20d3, 0x006c, 0x006d, 0x006e, 0x20db,
- 0x0070, 0x0071, 0x0072, 0x20e3, 0x0074, 0x0075, 0x0076, 0x20eb, 0x0078, 0x0079, 0x007a, 0x20f3, 0x207c, 0x207d, 0x20be, 0x40fb,
- 0x0480, 0x0481, 0x0482, 0x2503, 0x0484, 0x0485, 0x0486, 0x250b, 0x0488, 0x0489, 0x048a, 0x2513, 0x048c, 0x048d, 0x048e, 0x251b,
- 0x0490, 0x0491, 0x0492, 0x2523, 0x0494, 0x0495, 0x0496, 0x252b, 0x0498, 0x0499, 0x049a, 0x2533, 0x049c, 0x049d, 0x049e, 0x253b,
- 0x04a0, 0x04a1, 0x04a2, 0x2543, 0x04a4, 0x04a5, 0x04a6, 0x254b, 0x04a8, 0x04a9, 0x04aa, 0x2553, 0x04ac, 0x04ad, 0x04ae, 0x255b,
- 0x04b0, 0x04b1, 0x04b2, 0x2563, 0x04b4, 0x04b5, 0x04b6, 0x256b, 0x04b8, 0x04b9, 0x04ba, 0x2573, 0x04bc, 0x04bd, 0x253e, 0x257b,
- 0x08c0, 0x08c1, 0x08c2, 0x2983, 0x08c4, 0x08c5, 0x08c6, 0x298b, 0x08c8, 0x08c9, 0x08ca, 0x2993, 0x08cc, 0x08cd, 0x08ce, 0x299b,
- 0x08d0, 0x08d1, 0x08d2, 0x29a3, 0x08d4, 0x08d5, 0x08d6, 0x29ab, 0x08d8, 0x08d9, 0x08da, 0x29b3, 0x08dc, 0x08dd, 0x08de, 0x29bb,
- 0x0ce0, 0x0ce1, 0x0ce2, 0x2dc3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dcb, 0x0ce8, 0x0ce9, 0x0cea, 0x2dd3, 0x0cec, 0x0ced, 0x0cee, 0x2ddb,
- 0x10f0, 0x10f1, 0x10f2, 0x31e3, 0x10f4, 0x10f5, 0x10f6, 0x31eb, 0x20f8, 0x20f9, 0x20fa, 0x41f3, 0x257c, 0x257d, 0x29be, 0x46fb,
-
-/* previous 1s = 4: */
- 0x0000, 0x2001, 0x0002, 0x2005, 0x0004, 0x2009, 0x0006, 0x200d, 0x0008, 0x2011, 0x000a, 0x2015, 0x000c, 0x2019, 0x000e, 0x201d,
- 0x0010, 0x2021, 0x0012, 0x2025, 0x0014, 0x2029, 0x0016, 0x202d, 0x0018, 0x2031, 0x001a, 0x2035, 0x001c, 0x2039, 0x001e, 0x203d,
- 0x0020, 0x2041, 0x0022, 0x2045, 0x0024, 0x2049, 0x0026, 0x204d, 0x0028, 0x2051, 0x002a, 0x2055, 0x002c, 0x2059, 0x002e, 0x205d,
- 0x0030, 0x2061, 0x0032, 0x2065, 0x0034, 0x2069, 0x0036, 0x206d, 0x0038, 0x2071, 0x003a, 0x2075, 0x003c, 0x2079, 0x203e, 0x407d,
- 0x0040, 0x2081, 0x0042, 0x2085, 0x0044, 0x2089, 0x0046, 0x208d, 0x0048, 0x2091, 0x004a, 0x2095, 0x004c, 0x2099, 0x004e, 0x209d,
- 0x0050, 0x20a1, 0x0052, 0x20a5, 0x0054, 0x20a9, 0x0056, 0x20ad, 0x0058, 0x20b1, 0x005a, 0x20b5, 0x005c, 0x20b9, 0x005e, 0x20bd,
- 0x0060, 0x20c1, 0x0062, 0x20c5, 0x0064, 0x20c9, 0x0066, 0x20cd, 0x0068, 0x20d1, 0x006a, 0x20d5, 0x006c, 0x20d9, 0x006e, 0x20dd,
- 0x0070, 0x20e1, 0x0072, 0x20e5, 0x0074, 0x20e9, 0x0076, 0x20ed, 0x0078, 0x20f1, 0x007a, 0x20f5, 0x207c, 0x40f9, 0x20be, 0x417d,
- 0x0480, 0x2501, 0x0482, 0x2505, 0x0484, 0x2509, 0x0486, 0x250d, 0x0488, 0x2511, 0x048a, 0x2515, 0x048c, 0x2519, 0x048e, 0x251d,
- 0x0490, 0x2521, 0x0492, 0x2525, 0x0494, 0x2529, 0x0496, 0x252d, 0x0498, 0x2531, 0x049a, 0x2535, 0x049c, 0x2539, 0x049e, 0x253d,
- 0x04a0, 0x2541, 0x04a2, 0x2545, 0x04a4, 0x2549, 0x04a6, 0x254d, 0x04a8, 0x2551, 0x04aa, 0x2555, 0x04ac, 0x2559, 0x04ae, 0x255d,
- 0x04b0, 0x2561, 0x04b2, 0x2565, 0x04b4, 0x2569, 0x04b6, 0x256d, 0x04b8, 0x2571, 0x04ba, 0x2575, 0x04bc, 0x2579, 0x253e, 0x467d,
- 0x08c0, 0x2981, 0x08c2, 0x2985, 0x08c4, 0x2989, 0x08c6, 0x298d, 0x08c8, 0x2991, 0x08ca, 0x2995, 0x08cc, 0x2999, 0x08ce, 0x299d,
- 0x08d0, 0x29a1, 0x08d2, 0x29a5, 0x08d4, 0x29a9, 0x08d6, 0x29ad, 0x08d8, 0x29b1, 0x08da, 0x29b5, 0x08dc, 0x29b9, 0x08de, 0x29bd,
- 0x0ce0, 0x2dc1, 0x0ce2, 0x2dc5, 0x0ce4, 0x2dc9, 0x0ce6, 0x2dcd, 0x0ce8, 0x2dd1, 0x0cea, 0x2dd5, 0x0cec, 0x2dd9, 0x0cee, 0x2ddd,
- 0x10f0, 0x31e1, 0x10f2, 0x31e5, 0x10f4, 0x31e9, 0x10f6, 0x31ed, 0x20f8, 0x41f1, 0x20fa, 0x41f5, 0x257c, 0x46f9, 0x29be, 0x4b7d
-};
-
-/* hdlc_bitstuff_byte
- * perform HDLC bitstuffing for one input byte (8 bits, LSB first)
- * parameters:
- * cin input byte
- * ones number of trailing '1' bits in result before this step
- * iwb pointer to output buffer structure
- * (write semaphore must be held)
- * return value:
- * number of trailing '1' bits in result after this step
- */
-
-static inline int hdlc_bitstuff_byte(struct isowbuf_t *iwb, unsigned char cin,
- int ones)
-{
- u16 stuff;
- int shiftinc, newones;
-
- /* get stuffing information for input byte
- * value: bit 9.. 0 = result bits
- * bit 12..10 = number of trailing '1' bits in result
- * bit 14..13 = number of bits added by stuffing
- */
- stuff = stufftab[256 * ones + cin];
- shiftinc = (stuff >> 13) & 3;
- newones = (stuff >> 10) & 7;
- stuff &= 0x3ff;
-
- /* append stuffed byte to output stream */
- isowbuf_putbits(iwb, stuff, 8 + shiftinc);
- return newones;
-}
-
-/* hdlc_buildframe
- * Perform HDLC framing with bitstuffing on a byte buffer
- * The input buffer is regarded as a sequence of bits, starting with the least
- * significant bit of the first byte and ending with the most significant bit
- * of the last byte. A 16 bit FCS is appended as defined by RFC 1662.
- * Whenever five consecutive '1' bits appear in the resulting bit sequence, a
- * '0' bit is inserted after them.
- * The resulting bit string and a closing flag pattern (PPP_FLAG, '01111110')
- * are appended to the output buffer starting at the given bit position, which
- * is assumed to already contain a leading flag.
- * The output buffer must have sufficient length; count + count/5 + 6 bytes
- * starting at *out are safe and are verified to be present.
- * parameters:
- * in input buffer
- * count number of bytes in input buffer
- * iwb pointer to output buffer structure
- * (write semaphore must be held)
- * return value:
- * position of end of packet in output buffer on success,
- * -EAGAIN if write semaphore busy or buffer full
- */
-
-static inline int hdlc_buildframe(struct isowbuf_t *iwb,
- unsigned char *in, int count)
-{
- int ones;
- u16 fcs;
- int end;
- unsigned char c;
-
- if (isowbuf_freebytes(iwb) < count + count / 5 + 6 ||
- isowbuf_startwrite(iwb) < 0) {
- gig_dbg(DEBUG_ISO, "%s: %d bytes free -> -EAGAIN",
- __func__, isowbuf_freebytes(iwb));
- return -EAGAIN;
- }
-
- dump_bytes(DEBUG_STREAM_DUMP, "snd data", in, count);
-
- /* bitstuff and checksum input data */
- fcs = PPP_INITFCS;
- ones = 0;
- while (count-- > 0) {
- c = *in++;
- ones = hdlc_bitstuff_byte(iwb, c, ones);
- fcs = crc_ccitt_byte(fcs, c);
- }
-
- /* bitstuff and append FCS
- * (complemented, least significant byte first) */
- fcs ^= 0xffff;
- ones = hdlc_bitstuff_byte(iwb, fcs & 0x00ff, ones);
- ones = hdlc_bitstuff_byte(iwb, (fcs >> 8) & 0x00ff, ones);
-
- /* put closing flag and repeat byte for flag idle */
- isowbuf_putflag(iwb);
- end = isowbuf_donewrite(iwb);
- return end;
-}
-
-/* trans_buildframe
- * Append a block of 'transparent' data to the output buffer,
- * inverting the bytes.
- * The output buffer must have sufficient length; count bytes
- * starting at *out are safe and are verified to be present.
- * parameters:
- * in input buffer
- * count number of bytes in input buffer
- * iwb pointer to output buffer structure
- * (write semaphore must be held)
- * return value:
- * position of end of packet in output buffer on success,
- * -EAGAIN if write semaphore busy or buffer full
- */
-
-static inline int trans_buildframe(struct isowbuf_t *iwb,
- unsigned char *in, int count)
-{
- int write;
- unsigned char c;
-
- if (unlikely(count <= 0))
- return iwb->write;
-
- if (isowbuf_freebytes(iwb) < count ||
- isowbuf_startwrite(iwb) < 0) {
- gig_dbg(DEBUG_ISO, "can't put %d bytes", count);
- return -EAGAIN;
- }
-
- gig_dbg(DEBUG_STREAM, "put %d bytes", count);
- dump_bytes(DEBUG_STREAM_DUMP, "snd data", in, count);
-
- write = iwb->write;
- do {
- c = bitrev8(*in++);
- iwb->data[write++] = c;
- write %= BAS_OUTBUFSIZE;
- } while (--count > 0);
- iwb->write = write;
- iwb->idle = c;
-
- return isowbuf_donewrite(iwb);
-}
-
-int gigaset_isoc_buildframe(struct bc_state *bcs, unsigned char *in, int len)
-{
- int result;
-
- switch (bcs->proto2) {
- case L2_HDLC:
- result = hdlc_buildframe(bcs->hw.bas->isooutbuf, in, len);
- gig_dbg(DEBUG_ISO, "%s: %d bytes HDLC -> %d",
- __func__, len, result);
- break;
- default: /* assume transparent */
- result = trans_buildframe(bcs->hw.bas->isooutbuf, in, len);
- gig_dbg(DEBUG_ISO, "%s: %d bytes trans -> %d",
- __func__, len, result);
- }
- return result;
-}
-
-/* hdlc_putbyte
- * append byte c to current skb of B channel structure *bcs, updating fcs
- */
-static inline void hdlc_putbyte(unsigned char c, struct bc_state *bcs)
-{
- bcs->rx_fcs = crc_ccitt_byte(bcs->rx_fcs, c);
- if (bcs->rx_skb == NULL)
- /* skipping */
- return;
- if (bcs->rx_skb->len >= bcs->rx_bufsize) {
- dev_warn(bcs->cs->dev, "received oversized packet discarded\n");
- bcs->hw.bas->giants++;
- dev_kfree_skb_any(bcs->rx_skb);
- bcs->rx_skb = NULL;
- return;
- }
- __skb_put_u8(bcs->rx_skb, c);
-}
-
-/* hdlc_flush
- * drop partial HDLC data packet
- */
-static inline void hdlc_flush(struct bc_state *bcs)
-{
- /* clear skb or allocate new if not skipping */
- if (bcs->rx_skb != NULL)
- skb_trim(bcs->rx_skb, 0);
- else
- gigaset_new_rx_skb(bcs);
-
- /* reset packet state */
- bcs->rx_fcs = PPP_INITFCS;
-}
-
-/* hdlc_done
- * process completed HDLC data packet
- */
-static inline void hdlc_done(struct bc_state *bcs)
-{
- struct cardstate *cs = bcs->cs;
- struct sk_buff *procskb;
- unsigned int len;
-
- if (unlikely(bcs->ignore)) {
- bcs->ignore--;
- hdlc_flush(bcs);
- return;
- }
- procskb = bcs->rx_skb;
- if (procskb == NULL) {
- /* previous error */
- gig_dbg(DEBUG_ISO, "%s: skb=NULL", __func__);
- gigaset_isdn_rcv_err(bcs);
- } else if (procskb->len < 2) {
- dev_notice(cs->dev, "received short frame (%d octets)\n",
- procskb->len);
- bcs->hw.bas->runts++;
- dev_kfree_skb_any(procskb);
- gigaset_isdn_rcv_err(bcs);
- } else if (bcs->rx_fcs != PPP_GOODFCS) {
- dev_notice(cs->dev, "frame check error\n");
- bcs->hw.bas->fcserrs++;
- dev_kfree_skb_any(procskb);
- gigaset_isdn_rcv_err(bcs);
- } else {
- len = procskb->len;
- __skb_trim(procskb, len -= 2); /* subtract FCS */
- gig_dbg(DEBUG_ISO, "%s: good frame (%d octets)", __func__, len);
- dump_bytes(DEBUG_STREAM_DUMP,
- "rcv data", procskb->data, len);
- bcs->hw.bas->goodbytes += len;
- gigaset_skb_rcvd(bcs, procskb);
- }
- gigaset_new_rx_skb(bcs);
- bcs->rx_fcs = PPP_INITFCS;
-}
-
-/* hdlc_frag
- * drop HDLC data packet with non-integral last byte
- */
-static inline void hdlc_frag(struct bc_state *bcs, unsigned inbits)
-{
- if (unlikely(bcs->ignore)) {
- bcs->ignore--;
- hdlc_flush(bcs);
- return;
- }
-
- dev_notice(bcs->cs->dev, "received partial byte (%d bits)\n", inbits);
- bcs->hw.bas->alignerrs++;
- gigaset_isdn_rcv_err(bcs);
- __skb_trim(bcs->rx_skb, 0);
- bcs->rx_fcs = PPP_INITFCS;
-}
-
-/* bit counts lookup table for HDLC bit unstuffing
- * index: input byte
- * value: bit 0..3 = number of consecutive '1' bits starting from LSB
- * bit 4..6 = number of consecutive '1' bits starting from MSB
- * (replacing 8 by 7 to make it fit; the algorithm won't care)
- * bit 7 set if there are 5 or more "interior" consecutive '1' bits
- */
-static const unsigned char bitcounts[256] = {
- 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04,
- 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x05,
- 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04,
- 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x80, 0x06,
- 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04,
- 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x05,
- 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04,
- 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x80, 0x81, 0x80, 0x07,
- 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x14,
- 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x15,
- 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x14,
- 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x90, 0x16,
- 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x23, 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x24,
- 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x23, 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x25,
- 0x30, 0x31, 0x30, 0x32, 0x30, 0x31, 0x30, 0x33, 0x30, 0x31, 0x30, 0x32, 0x30, 0x31, 0x30, 0x34,
- 0x40, 0x41, 0x40, 0x42, 0x40, 0x41, 0x40, 0x43, 0x50, 0x51, 0x50, 0x52, 0x60, 0x61, 0x70, 0x78
-};
-
-/* hdlc_unpack
- * perform HDLC frame processing (bit unstuffing, flag detection, FCS
- * calculation) on a sequence of received data bytes (8 bits each, LSB first)
- * pass on successfully received, complete frames as SKBs via gigaset_skb_rcvd
- * notify of errors via gigaset_isdn_rcv_err
- * tally frames, errors etc. in BC structure counters
- * parameters:
- * src received data
- * count number of received bytes
- * bcs receiving B channel structure
- */
-static inline void hdlc_unpack(unsigned char *src, unsigned count,
- struct bc_state *bcs)
-{
- struct bas_bc_state *ubc = bcs->hw.bas;
- int inputstate;
- unsigned seqlen, inbyte, inbits;
-
- /* load previous state:
- * inputstate = set of flag bits:
- * - INS_flag_hunt: no complete opening flag received since connection
- * setup or last abort
- * - INS_have_data: at least one complete data byte received since last
- * flag
- * seqlen = number of consecutive '1' bits in last 7 input stream bits
- * (0..7)
- * inbyte = accumulated partial data byte (if !INS_flag_hunt)
- * inbits = number of valid bits in inbyte, starting at LSB (0..6)
- */
- inputstate = bcs->inputstate;
- seqlen = ubc->seqlen;
- inbyte = ubc->inbyte;
- inbits = ubc->inbits;
-
- /* bit unstuffing a byte a time
- * Take your time to understand this; it's straightforward but tedious.
- * The "bitcounts" lookup table is used to speed up the counting of
- * leading and trailing '1' bits.
- */
- while (count--) {
- unsigned char c = *src++;
- unsigned char tabentry = bitcounts[c];
- unsigned lead1 = tabentry & 0x0f;
- unsigned trail1 = (tabentry >> 4) & 0x0f;
-
- seqlen += lead1;
-
- if (unlikely(inputstate & INS_flag_hunt)) {
- if (c == PPP_FLAG) {
- /* flag-in-one */
- inputstate &= ~(INS_flag_hunt | INS_have_data);
- inbyte = 0;
- inbits = 0;
- } else if (seqlen == 6 && trail1 != 7) {
- /* flag completed & not followed by abort */
- inputstate &= ~(INS_flag_hunt | INS_have_data);
- inbyte = c >> (lead1 + 1);
- inbits = 7 - lead1;
- if (trail1 >= 8) {
- /* interior stuffing:
- * omitting the MSB handles most cases,
- * correct the incorrectly handled
- * cases individually */
- inbits--;
- switch (c) {
- case 0xbe:
- inbyte = 0x3f;
- break;
- }
- }
- }
- /* else: continue flag-hunting */
- } else if (likely(seqlen < 5 && trail1 < 7)) {
- /* streamlined case: 8 data bits, no stuffing */
- inbyte |= c << inbits;
- hdlc_putbyte(inbyte & 0xff, bcs);
- inputstate |= INS_have_data;
- inbyte >>= 8;
- /* inbits unchanged */
- } else if (likely(seqlen == 6 && inbits == 7 - lead1 &&
- trail1 + 1 == inbits &&
- !(inputstate & INS_have_data))) {
- /* streamlined case: flag idle - state unchanged */
- } else if (unlikely(seqlen > 6)) {
- /* abort sequence */
- ubc->aborts++;
- hdlc_flush(bcs);
- inputstate |= INS_flag_hunt;
- } else if (seqlen == 6) {
- /* closing flag, including (6 - lead1) '1's
- * and one '0' from inbits */
- if (inbits > 7 - lead1) {
- hdlc_frag(bcs, inbits + lead1 - 7);
- inputstate &= ~INS_have_data;
- } else {
- if (inbits < 7 - lead1)
- ubc->stolen0s++;
- if (inputstate & INS_have_data) {
- hdlc_done(bcs);
- inputstate &= ~INS_have_data;
- }
- }
-
- if (c == PPP_FLAG) {
- /* complete flag, LSB overlaps preceding flag */
- ubc->shared0s++;
- inbits = 0;
- inbyte = 0;
- } else if (trail1 != 7) {
- /* remaining bits */
- inbyte = c >> (lead1 + 1);
- inbits = 7 - lead1;
- if (trail1 >= 8) {
- /* interior stuffing:
- * omitting the MSB handles most cases,
- * correct the incorrectly handled
- * cases individually */
- inbits--;
- switch (c) {
- case 0xbe:
- inbyte = 0x3f;
- break;
- }
- }
- } else {
- /* abort sequence follows,
- * skb already empty anyway */
- ubc->aborts++;
- inputstate |= INS_flag_hunt;
- }
- } else { /* (seqlen < 6) && (seqlen == 5 || trail1 >= 7) */
-
- if (c == PPP_FLAG) {
- /* complete flag */
- if (seqlen == 5)
- ubc->stolen0s++;
- if (inbits) {
- hdlc_frag(bcs, inbits);
- inbits = 0;
- inbyte = 0;
- } else if (inputstate & INS_have_data)
- hdlc_done(bcs);
- inputstate &= ~INS_have_data;
- } else if (trail1 == 7) {
- /* abort sequence */
- ubc->aborts++;
- hdlc_flush(bcs);
- inputstate |= INS_flag_hunt;
- } else {
- /* stuffed data */
- if (trail1 < 7) { /* => seqlen == 5 */
- /* stuff bit at position lead1,
- * no interior stuffing */
- unsigned char mask = (1 << lead1) - 1;
- c = (c & mask) | ((c & ~mask) >> 1);
- inbyte |= c << inbits;
- inbits += 7;
- } else if (seqlen < 5) { /* trail1 >= 8 */
- /* interior stuffing:
- * omitting the MSB handles most cases,
- * correct the incorrectly handled
- * cases individually */
- switch (c) {
- case 0xbe:
- c = 0x7e;
- break;
- }
- inbyte |= c << inbits;
- inbits += 7;
- } else { /* seqlen == 5 && trail1 >= 8 */
-
- /* stuff bit at lead1 *and* interior
- * stuffing -- unstuff individually */
- switch (c) {
- case 0x7d:
- c = 0x3f;
- break;
- case 0xbe:
- c = 0x3f;
- break;
- case 0x3e:
- c = 0x1f;
- break;
- case 0x7c:
- c = 0x3e;
- break;
- }
- inbyte |= c << inbits;
- inbits += 6;
- }
- if (inbits >= 8) {
- inbits -= 8;
- hdlc_putbyte(inbyte & 0xff, bcs);
- inputstate |= INS_have_data;
- inbyte >>= 8;
- }
- }
- }
- seqlen = trail1 & 7;
- }
-
- /* save new state */
- bcs->inputstate = inputstate;
- ubc->seqlen = seqlen;
- ubc->inbyte = inbyte;
- ubc->inbits = inbits;
-}
-
-/* trans_receive
- * pass on received USB frame transparently as SKB via gigaset_skb_rcvd
- * invert bytes
- * tally frames, errors etc. in BC structure counters
- * parameters:
- * src received data
- * count number of received bytes
- * bcs receiving B channel structure
- */
-static inline void trans_receive(unsigned char *src, unsigned count,
- struct bc_state *bcs)
-{
- struct sk_buff *skb;
- int dobytes;
- unsigned char *dst;
-
- if (unlikely(bcs->ignore)) {
- bcs->ignore--;
- return;
- }
- skb = bcs->rx_skb;
- if (skb == NULL) {
- skb = gigaset_new_rx_skb(bcs);
- if (skb == NULL)
- return;
- }
- dobytes = bcs->rx_bufsize - skb->len;
- while (count > 0) {
- dst = skb_put(skb, count < dobytes ? count : dobytes);
- while (count > 0 && dobytes > 0) {
- *dst++ = bitrev8(*src++);
- count--;
- dobytes--;
- }
- if (dobytes == 0) {
- dump_bytes(DEBUG_STREAM_DUMP,
- "rcv data", skb->data, skb->len);
- bcs->hw.bas->goodbytes += skb->len;
- gigaset_skb_rcvd(bcs, skb);
- skb = gigaset_new_rx_skb(bcs);
- if (skb == NULL)
- return;
- dobytes = bcs->rx_bufsize;
- }
- }
-}
-
-void gigaset_isoc_receive(unsigned char *src, unsigned count,
- struct bc_state *bcs)
-{
- switch (bcs->proto2) {
- case L2_HDLC:
- hdlc_unpack(src, count, bcs);
- break;
- default: /* assume transparent */
- trans_receive(src, count, bcs);
- }
-}
-
-/* == data input =========================================================== */
-
-/* process a block of received bytes in command mode (mstate != MS_LOCKED)
- * Append received bytes to the command response buffer and forward them
- * line by line to the response handler.
- * Note: Received lines may be terminated by CR, LF, or CR LF, which will be
- * removed before passing the line to the response handler.
- */
-static void cmd_loop(unsigned char *src, int numbytes, struct inbuf_t *inbuf)
-{
- struct cardstate *cs = inbuf->cs;
- unsigned cbytes = cs->cbytes;
- unsigned char c;
-
- while (numbytes--) {
- c = *src++;
- switch (c) {
- case '\n':
- if (cbytes == 0 && cs->respdata[0] == '\r') {
- /* collapse LF with preceding CR */
- cs->respdata[0] = 0;
- break;
- }
- /* fall through */
- case '\r':
- /* end of message line, pass to response handler */
- if (cbytes >= MAX_RESP_SIZE) {
- dev_warn(cs->dev, "response too large (%d)\n",
- cbytes);
- cbytes = MAX_RESP_SIZE;
- }
- cs->cbytes = cbytes;
- gigaset_dbg_buffer(DEBUG_TRANSCMD, "received response",
- cbytes, cs->respdata);
- gigaset_handle_modem_response(cs);
- cbytes = 0;
-
- /* store EOL byte for CRLF collapsing */
- cs->respdata[0] = c;
- break;
- default:
- /* append to line buffer if possible */
- if (cbytes < MAX_RESP_SIZE)
- cs->respdata[cbytes] = c;
- cbytes++;
- }
- }
-
- /* save state */
- cs->cbytes = cbytes;
-}
-
-
-/* process a block of data received through the control channel
- */
-void gigaset_isoc_input(struct inbuf_t *inbuf)
-{
- struct cardstate *cs = inbuf->cs;
- unsigned tail, head, numbytes;
- unsigned char *src;
-
- head = inbuf->head;
- while (head != (tail = inbuf->tail)) {
- gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail);
- if (head > tail)
- tail = RBUFSIZE;
- src = inbuf->data + head;
- numbytes = tail - head;
- gig_dbg(DEBUG_INTR, "processing %u bytes", numbytes);
-
- if (cs->mstate == MS_LOCKED) {
- gigaset_dbg_buffer(DEBUG_LOCKCMD, "received response",
- numbytes, src);
- gigaset_if_receive(inbuf->cs, src, numbytes);
- } else {
- cmd_loop(src, numbytes, inbuf);
- }
-
- head += numbytes;
- if (head == RBUFSIZE)
- head = 0;
- gig_dbg(DEBUG_INTR, "setting head to %u", head);
- inbuf->head = head;
- }
-}
-
-
-/* == data output ========================================================== */
-
-/**
- * gigaset_isoc_send_skb() - queue an skb for sending
- * @bcs: B channel descriptor structure.
- * @skb: data to send.
- *
- * Called by LL to queue an skb for sending, and start transmission if
- * necessary.
- * Once the payload data has been transmitted completely, gigaset_skb_sent()
- * will be called with the skb's link layer header preserved.
- *
- * Return value:
- * number of bytes accepted for sending (skb->len) if ok,
- * error code < 0 (eg. -ENODEV) on error
- */
-int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb)
-{
- int len = skb->len;
- unsigned long flags;
-
- spin_lock_irqsave(&bcs->cs->lock, flags);
- if (!bcs->cs->connected) {
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- return -ENODEV;
- }
-
- skb_queue_tail(&bcs->squeue, skb);
- gig_dbg(DEBUG_ISO, "%s: skb queued, qlen=%d",
- __func__, skb_queue_len(&bcs->squeue));
-
- /* tasklet submits URB if necessary */
- tasklet_schedule(&bcs->hw.bas->sent_tasklet);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
-
- return len; /* ok so far */
-}
diff --git a/drivers/staging/isdn/gigaset/proc.c b/drivers/staging/isdn/gigaset/proc.c
deleted file mode 100644
index 8914439a4237..000000000000
--- a/drivers/staging/isdn/gigaset/proc.c
+++ /dev/null
@@ -1,77 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Stuff used by all variants of the driver
- *
- * Copyright (c) 2001 by Stefan Eilers,
- * Hansjoerg Lipp <hjlipp@web.de>,
- * Tilman Schmidt <tilman@imap.cc>.
- *
- * =====================================================================
- * =====================================================================
- */
-
-#include "gigaset.h"
-
-static ssize_t show_cidmode(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cardstate *cs = dev_get_drvdata(dev);
-
- return sprintf(buf, "%u\n", cs->cidmode);
-}
-
-static ssize_t set_cidmode(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cardstate *cs = dev_get_drvdata(dev);
- long int value;
- char *end;
-
- value = simple_strtol(buf, &end, 0);
- while (*end)
- if (!isspace(*end++))
- return -EINVAL;
- if (value < 0 || value > 1)
- return -EINVAL;
-
- if (mutex_lock_interruptible(&cs->mutex))
- return -ERESTARTSYS;
-
- cs->waiting = 1;
- if (!gigaset_add_event(cs, &cs->at_state, EV_PROC_CIDMODE,
- NULL, value, NULL)) {
- cs->waiting = 0;
- mutex_unlock(&cs->mutex);
- return -ENOMEM;
- }
- gigaset_schedule_event(cs);
-
- wait_event(cs->waitqueue, !cs->waiting);
-
- mutex_unlock(&cs->mutex);
-
- return count;
-}
-
-static DEVICE_ATTR(cidmode, S_IRUGO | S_IWUSR, show_cidmode, set_cidmode);
-
-/* free sysfs for device */
-void gigaset_free_dev_sysfs(struct cardstate *cs)
-{
- if (!cs->tty_dev)
- return;
-
- gig_dbg(DEBUG_INIT, "removing sysfs entries");
- device_remove_file(cs->tty_dev, &dev_attr_cidmode);
-}
-
-/* initialize sysfs for device */
-void gigaset_init_dev_sysfs(struct cardstate *cs)
-{
- if (!cs->tty_dev)
- return;
-
- gig_dbg(DEBUG_INIT, "setting up sysfs");
- if (device_create_file(cs->tty_dev, &dev_attr_cidmode))
- pr_err("could not create sysfs attribute\n");
-}
diff --git a/drivers/staging/isdn/gigaset/ser-gigaset.c b/drivers/staging/isdn/gigaset/ser-gigaset.c
deleted file mode 100644
index 5587e9e7fc73..000000000000
--- a/drivers/staging/isdn/gigaset/ser-gigaset.c
+++ /dev/null
@@ -1,796 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* This is the serial hardware link layer (HLL) for the Gigaset 307x isdn
- * DECT base (aka Sinus 45 isdn) using the RS232 DECT data module M101,
- * written as a line discipline.
- *
- * =====================================================================
- * =====================================================================
- */
-
-#include "gigaset.h"
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/platform_device.h>
-#include <linux/completion.h>
-
-/* Version Information */
-#define DRIVER_AUTHOR "Tilman Schmidt"
-#define DRIVER_DESC "Serial Driver for Gigaset 307x using Siemens M101"
-
-#define GIGASET_MINORS 1
-#define GIGASET_MINOR 0
-#define GIGASET_MODULENAME "ser_gigaset"
-#define GIGASET_DEVNAME "ttyGS"
-
-/* length limit according to Siemens 3070usb-protokoll.doc ch. 2.1 */
-#define IF_WRITEBUF 264
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_LDISC(N_GIGASET_M101);
-
-static int startmode = SM_ISDN;
-module_param(startmode, int, S_IRUGO);
-MODULE_PARM_DESC(startmode, "initial operation mode");
-static int cidmode = 1;
-module_param(cidmode, int, S_IRUGO);
-MODULE_PARM_DESC(cidmode, "stay in CID mode when idle");
-
-static struct gigaset_driver *driver;
-
-struct ser_cardstate {
- struct platform_device dev;
- struct tty_struct *tty;
- atomic_t refcnt;
- struct completion dead_cmp;
-};
-
-static struct platform_driver device_driver = {
- .driver = {
- .name = GIGASET_MODULENAME,
- },
-};
-
-static void flush_send_queue(struct cardstate *);
-
-/* transmit data from current open skb
- * result: number of bytes sent or error code < 0
- */
-static int write_modem(struct cardstate *cs)
-{
- struct tty_struct *tty = cs->hw.ser->tty;
- struct bc_state *bcs = &cs->bcs[0]; /* only one channel */
- struct sk_buff *skb = bcs->tx_skb;
- int sent = -EOPNOTSUPP;
-
- WARN_ON(!tty || !tty->ops || !skb);
-
- if (!skb->len) {
- dev_kfree_skb_any(skb);
- bcs->tx_skb = NULL;
- return -EINVAL;
- }
-
- set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
- if (tty->ops->write)
- sent = tty->ops->write(tty, skb->data, skb->len);
- gig_dbg(DEBUG_OUTPUT, "write_modem: sent %d", sent);
- if (sent < 0) {
- /* error */
- flush_send_queue(cs);
- return sent;
- }
- skb_pull(skb, sent);
- if (!skb->len) {
- /* skb sent completely */
- gigaset_skb_sent(bcs, skb);
-
- gig_dbg(DEBUG_INTR, "kfree skb (Adr: %lx)!",
- (unsigned long) skb);
- dev_kfree_skb_any(skb);
- bcs->tx_skb = NULL;
- }
- return sent;
-}
-
-/*
- * transmit first queued command buffer
- * result: number of bytes sent or error code < 0
- */
-static int send_cb(struct cardstate *cs)
-{
- struct tty_struct *tty = cs->hw.ser->tty;
- struct cmdbuf_t *cb, *tcb;
- unsigned long flags;
- int sent = 0;
-
- WARN_ON(!tty || !tty->ops);
-
- cb = cs->cmdbuf;
- if (!cb)
- return 0; /* nothing to do */
-
- if (cb->len) {
- set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
- sent = tty->ops->write(tty, cb->buf + cb->offset, cb->len);
- if (sent < 0) {
- /* error */
- gig_dbg(DEBUG_OUTPUT, "send_cb: write error %d", sent);
- flush_send_queue(cs);
- return sent;
- }
- cb->offset += sent;
- cb->len -= sent;
- gig_dbg(DEBUG_OUTPUT, "send_cb: sent %d, left %u, queued %u",
- sent, cb->len, cs->cmdbytes);
- }
-
- while (cb && !cb->len) {
- spin_lock_irqsave(&cs->cmdlock, flags);
- cs->cmdbytes -= cs->curlen;
- tcb = cb;
- cs->cmdbuf = cb = cb->next;
- if (cb) {
- cb->prev = NULL;
- cs->curlen = cb->len;
- } else {
- cs->lastcmdbuf = NULL;
- cs->curlen = 0;
- }
- spin_unlock_irqrestore(&cs->cmdlock, flags);
-
- if (tcb->wake_tasklet)
- tasklet_schedule(tcb->wake_tasklet);
- kfree(tcb);
- }
- return sent;
-}
-
-/*
- * send queue tasklet
- * If there is already a skb opened, put data to the transfer buffer
- * by calling "write_modem".
- * Otherwise take a new skb out of the queue.
- */
-static void gigaset_modem_fill(unsigned long data)
-{
- struct cardstate *cs = (struct cardstate *) data;
- struct bc_state *bcs;
- struct sk_buff *nextskb;
- int sent = 0;
-
- if (!cs) {
- gig_dbg(DEBUG_OUTPUT, "%s: no cardstate", __func__);
- return;
- }
- bcs = cs->bcs;
- if (!bcs) {
- gig_dbg(DEBUG_OUTPUT, "%s: no cardstate", __func__);
- return;
- }
- if (!bcs->tx_skb) {
- /* no skb is being sent; send command if any */
- sent = send_cb(cs);
- gig_dbg(DEBUG_OUTPUT, "%s: send_cb -> %d", __func__, sent);
- if (sent)
- /* something sent or error */
- return;
-
- /* no command to send; get skb */
- nextskb = skb_dequeue(&bcs->squeue);
- if (!nextskb)
- /* no skb either, nothing to do */
- return;
- bcs->tx_skb = nextskb;
-
- gig_dbg(DEBUG_INTR, "Dequeued skb (Adr: %lx)",
- (unsigned long) bcs->tx_skb);
- }
-
- /* send skb */
- gig_dbg(DEBUG_OUTPUT, "%s: tx_skb", __func__);
- if (write_modem(cs) < 0)
- gig_dbg(DEBUG_OUTPUT, "%s: write_modem failed", __func__);
-}
-
-/*
- * throw away all data queued for sending
- */
-static void flush_send_queue(struct cardstate *cs)
-{
- struct sk_buff *skb;
- struct cmdbuf_t *cb;
- unsigned long flags;
-
- /* command queue */
- spin_lock_irqsave(&cs->cmdlock, flags);
- while ((cb = cs->cmdbuf) != NULL) {
- cs->cmdbuf = cb->next;
- if (cb->wake_tasklet)
- tasklet_schedule(cb->wake_tasklet);
- kfree(cb);
- }
- cs->cmdbuf = cs->lastcmdbuf = NULL;
- cs->cmdbytes = cs->curlen = 0;
- spin_unlock_irqrestore(&cs->cmdlock, flags);
-
- /* data queue */
- if (cs->bcs->tx_skb)
- dev_kfree_skb_any(cs->bcs->tx_skb);
- while ((skb = skb_dequeue(&cs->bcs->squeue)) != NULL)
- dev_kfree_skb_any(skb);
-}
-
-
-/* Gigaset Driver Interface */
-/* ======================== */
-
-/*
- * queue an AT command string for transmission to the Gigaset device
- * parameters:
- * cs controller state structure
- * buf buffer containing the string to send
- * len number of characters to send
- * wake_tasklet tasklet to run when transmission is complete, or NULL
- * return value:
- * number of bytes queued, or error code < 0
- */
-static int gigaset_write_cmd(struct cardstate *cs, struct cmdbuf_t *cb)
-{
- unsigned long flags;
-
- gigaset_dbg_buffer(cs->mstate != MS_LOCKED ?
- DEBUG_TRANSCMD : DEBUG_LOCKCMD,
- "CMD Transmit", cb->len, cb->buf);
-
- spin_lock_irqsave(&cs->cmdlock, flags);
- cb->prev = cs->lastcmdbuf;
- if (cs->lastcmdbuf)
- cs->lastcmdbuf->next = cb;
- else {
- cs->cmdbuf = cb;
- cs->curlen = cb->len;
- }
- cs->cmdbytes += cb->len;
- cs->lastcmdbuf = cb;
- spin_unlock_irqrestore(&cs->cmdlock, flags);
-
- spin_lock_irqsave(&cs->lock, flags);
- if (cs->connected)
- tasklet_schedule(&cs->write_tasklet);
- spin_unlock_irqrestore(&cs->lock, flags);
- return cb->len;
-}
-
-/*
- * tty_driver.write_room interface routine
- * return number of characters the driver will accept to be written
- * parameter:
- * controller state structure
- * return value:
- * number of characters
- */
-static int gigaset_write_room(struct cardstate *cs)
-{
- unsigned bytes;
-
- bytes = cs->cmdbytes;
- return bytes < IF_WRITEBUF ? IF_WRITEBUF - bytes : 0;
-}
-
-/*
- * tty_driver.chars_in_buffer interface routine
- * return number of characters waiting to be sent
- * parameter:
- * controller state structure
- * return value:
- * number of characters
- */
-static int gigaset_chars_in_buffer(struct cardstate *cs)
-{
- return cs->cmdbytes;
-}
-
-/*
- * implementation of ioctl(GIGASET_BRKCHARS)
- * parameter:
- * controller state structure
- * return value:
- * -EINVAL (unimplemented function)
- */
-static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
-{
- /* not implemented */
- return -EINVAL;
-}
-
-/*
- * Open B channel
- * Called by "do_action" in ev-layer.c
- */
-static int gigaset_init_bchannel(struct bc_state *bcs)
-{
- /* nothing to do for M10x */
- gigaset_bchannel_up(bcs);
- return 0;
-}
-
-/*
- * Close B channel
- * Called by "do_action" in ev-layer.c
- */
-static int gigaset_close_bchannel(struct bc_state *bcs)
-{
- /* nothing to do for M10x */
- gigaset_bchannel_down(bcs);
- return 0;
-}
-
-/*
- * Set up B channel structure
- * This is called by "gigaset_initcs" in common.c
- */
-static int gigaset_initbcshw(struct bc_state *bcs)
-{
- /* unused */
- bcs->hw.ser = NULL;
- return 0;
-}
-
-/*
- * Free B channel structure
- * Called by "gigaset_freebcs" in common.c
- */
-static void gigaset_freebcshw(struct bc_state *bcs)
-{
- /* unused */
-}
-
-/*
- * Reinitialize B channel structure
- * This is called by "bcs_reinit" in common.c
- */
-static void gigaset_reinitbcshw(struct bc_state *bcs)
-{
- /* nothing to do for M10x */
-}
-
-/*
- * Free hardware specific device data
- * This will be called by "gigaset_freecs" in common.c
- */
-static void gigaset_freecshw(struct cardstate *cs)
-{
- tasklet_kill(&cs->write_tasklet);
- if (!cs->hw.ser)
- return;
- platform_device_unregister(&cs->hw.ser->dev);
-}
-
-static void gigaset_device_release(struct device *dev)
-{
- kfree(container_of(dev, struct ser_cardstate, dev.dev));
-}
-
-/*
- * Set up hardware specific device data
- * This is called by "gigaset_initcs" in common.c
- */
-static int gigaset_initcshw(struct cardstate *cs)
-{
- int rc;
- struct ser_cardstate *scs;
-
- scs = kzalloc(sizeof(struct ser_cardstate), GFP_KERNEL);
- if (!scs) {
- pr_err("out of memory\n");
- return -ENOMEM;
- }
- cs->hw.ser = scs;
-
- cs->hw.ser->dev.name = GIGASET_MODULENAME;
- cs->hw.ser->dev.id = cs->minor_index;
- cs->hw.ser->dev.dev.release = gigaset_device_release;
- rc = platform_device_register(&cs->hw.ser->dev);
- if (rc != 0) {
- pr_err("error %d registering platform device\n", rc);
- kfree(cs->hw.ser);
- cs->hw.ser = NULL;
- return rc;
- }
-
- tasklet_init(&cs->write_tasklet,
- gigaset_modem_fill, (unsigned long) cs);
- return 0;
-}
-
-/*
- * set modem control lines
- * Parameters:
- * card state structure
- * modem control line state ([TIOCM_DTR]|[TIOCM_RTS])
- * Called by "gigaset_start" and "gigaset_enterconfigmode" in common.c
- * and by "if_lock" and "if_termios" in interface.c
- */
-static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
- unsigned new_state)
-{
- struct tty_struct *tty = cs->hw.ser->tty;
- unsigned int set, clear;
-
- WARN_ON(!tty || !tty->ops);
- /* tiocmset is an optional tty driver method */
- if (!tty->ops->tiocmset)
- return -EINVAL;
- set = new_state & ~old_state;
- clear = old_state & ~new_state;
- if (!set && !clear)
- return 0;
- gig_dbg(DEBUG_IF, "tiocmset set %x clear %x", set, clear);
- return tty->ops->tiocmset(tty, set, clear);
-}
-
-static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag)
-{
- return -EINVAL;
-}
-
-static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
-{
- return -EINVAL;
-}
-
-static const struct gigaset_ops ops = {
- .write_cmd = gigaset_write_cmd,
- .write_room = gigaset_write_room,
- .chars_in_buffer = gigaset_chars_in_buffer,
- .brkchars = gigaset_brkchars,
- .init_bchannel = gigaset_init_bchannel,
- .close_bchannel = gigaset_close_bchannel,
- .initbcshw = gigaset_initbcshw,
- .freebcshw = gigaset_freebcshw,
- .reinitbcshw = gigaset_reinitbcshw,
- .initcshw = gigaset_initcshw,
- .freecshw = gigaset_freecshw,
- .set_modem_ctrl = gigaset_set_modem_ctrl,
- .baud_rate = gigaset_baud_rate,
- .set_line_ctrl = gigaset_set_line_ctrl,
- .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
- .handle_input = gigaset_m10x_input, /* asyncdata.c */
-};
-
-
-/* Line Discipline Interface */
-/* ========================= */
-
-/* helper functions for cardstate refcounting */
-static struct cardstate *cs_get(struct tty_struct *tty)
-{
- struct cardstate *cs = tty->disc_data;
-
- if (!cs || !cs->hw.ser) {
- gig_dbg(DEBUG_ANY, "%s: no cardstate", __func__);
- return NULL;
- }
- atomic_inc(&cs->hw.ser->refcnt);
- return cs;
-}
-
-static void cs_put(struct cardstate *cs)
-{
- if (atomic_dec_and_test(&cs->hw.ser->refcnt))
- complete(&cs->hw.ser->dead_cmp);
-}
-
-/*
- * Called by the tty driver when the line discipline is pushed onto the tty.
- * Called in process context.
- */
-static int
-gigaset_tty_open(struct tty_struct *tty)
-{
- struct cardstate *cs;
- int rc;
-
- gig_dbg(DEBUG_INIT, "Starting HLL for Gigaset M101");
-
- pr_info(DRIVER_DESC "\n");
-
- if (!driver) {
- pr_err("%s: no driver structure\n", __func__);
- return -ENODEV;
- }
-
- /* allocate memory for our device state and initialize it */
- cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME);
- if (!cs) {
- rc = -ENODEV;
- goto error;
- }
-
- cs->dev = &cs->hw.ser->dev.dev;
- cs->hw.ser->tty = tty;
- atomic_set(&cs->hw.ser->refcnt, 1);
- init_completion(&cs->hw.ser->dead_cmp);
- tty->disc_data = cs;
-
- /* Set the amount of data we're willing to receive per call
- * from the hardware driver to half of the input buffer size
- * to leave some reserve.
- * Note: We don't do flow control towards the hardware driver.
- * If more data is received than will fit into the input buffer,
- * it will be dropped and an error will be logged. This should
- * never happen as the device is slow and the buffer size ample.
- */
- tty->receive_room = RBUFSIZE/2;
-
- /* OK.. Initialization of the datastructures and the HW is done.. Now
- * startup system and notify the LL that we are ready to run
- */
- if (startmode == SM_LOCKED)
- cs->mstate = MS_LOCKED;
- rc = gigaset_start(cs);
- if (rc < 0) {
- tasklet_kill(&cs->write_tasklet);
- goto error;
- }
-
- gig_dbg(DEBUG_INIT, "Startup of HLL done");
- return 0;
-
-error:
- gig_dbg(DEBUG_INIT, "Startup of HLL failed");
- tty->disc_data = NULL;
- gigaset_freecs(cs);
- return rc;
-}
-
-/*
- * Called by the tty driver when the line discipline is removed.
- * Called from process context.
- */
-static void
-gigaset_tty_close(struct tty_struct *tty)
-{
- struct cardstate *cs = tty->disc_data;
-
- gig_dbg(DEBUG_INIT, "Stopping HLL for Gigaset M101");
-
- if (!cs) {
- gig_dbg(DEBUG_INIT, "%s: no cardstate", __func__);
- return;
- }
-
- /* prevent other callers from entering ldisc methods */
- tty->disc_data = NULL;
-
- if (!cs->hw.ser)
- pr_err("%s: no hw cardstate\n", __func__);
- else {
- /* wait for running methods to finish */
- if (!atomic_dec_and_test(&cs->hw.ser->refcnt))
- wait_for_completion(&cs->hw.ser->dead_cmp);
- }
-
- /* stop operations */
- gigaset_stop(cs);
- tasklet_kill(&cs->write_tasklet);
- flush_send_queue(cs);
- cs->dev = NULL;
- gigaset_freecs(cs);
-
- gig_dbg(DEBUG_INIT, "Shutdown of HLL done");
-}
-
-/*
- * Called by the tty driver when the tty line is hung up.
- * Wait for I/O to driver to complete and unregister ISDN device.
- * This is already done by the close routine, so just call that.
- * Called from process context.
- */
-static int gigaset_tty_hangup(struct tty_struct *tty)
-{
- gigaset_tty_close(tty);
- return 0;
-}
-
-/*
- * Ioctl on the tty.
- * Called in process context only.
- * May be re-entered by multiple ioctl calling threads.
- */
-static int
-gigaset_tty_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct cardstate *cs = cs_get(tty);
- int rc, val;
- int __user *p = (int __user *)arg;
-
- if (!cs)
- return -ENXIO;
-
- switch (cmd) {
-
- case FIONREAD:
- /* unused, always return zero */
- val = 0;
- rc = put_user(val, p);
- break;
-
- case TCFLSH:
- /* flush our buffers and the serial port's buffer */
- switch (arg) {
- case TCIFLUSH:
- /* no own input buffer to flush */
- break;
- case TCIOFLUSH:
- case TCOFLUSH:
- flush_send_queue(cs);
- break;
- }
- /* fall through */
-
- default:
- /* pass through to underlying serial device */
- rc = n_tty_ioctl_helper(tty, file, cmd, arg);
- break;
- }
- cs_put(cs);
- return rc;
-}
-
-/*
- * Called by the tty driver when a block of data has been received.
- * Will not be re-entered while running but other ldisc functions
- * may be called in parallel.
- * Can be called from hard interrupt level as well as soft interrupt
- * level or mainline.
- * Parameters:
- * tty tty structure
- * buf buffer containing received characters
- * cflags buffer containing error flags for received characters (ignored)
- * count number of received characters
- */
-static void
-gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
- char *cflags, int count)
-{
- struct cardstate *cs = cs_get(tty);
- unsigned tail, head, n;
- struct inbuf_t *inbuf;
-
- if (!cs)
- return;
- inbuf = cs->inbuf;
- if (!inbuf) {
- dev_err(cs->dev, "%s: no inbuf\n", __func__);
- cs_put(cs);
- return;
- }
-
- tail = inbuf->tail;
- head = inbuf->head;
- gig_dbg(DEBUG_INTR, "buffer state: %u -> %u, receive %u bytes",
- head, tail, count);
-
- if (head <= tail) {
- /* possible buffer wraparound */
- n = min_t(unsigned, count, RBUFSIZE - tail);
- memcpy(inbuf->data + tail, buf, n);
- tail = (tail + n) % RBUFSIZE;
- buf += n;
- count -= n;
- }
-
- if (count > 0) {
- /* tail < head and some data left */
- n = head - tail - 1;
- if (count > n) {
- dev_err(cs->dev,
- "inbuf overflow, discarding %d bytes\n",
- count - n);
- count = n;
- }
- memcpy(inbuf->data + tail, buf, count);
- tail += count;
- }
-
- gig_dbg(DEBUG_INTR, "setting tail to %u", tail);
- inbuf->tail = tail;
-
- /* Everything was received .. Push data into handler */
- gig_dbg(DEBUG_INTR, "%s-->BH", __func__);
- gigaset_schedule_event(cs);
- cs_put(cs);
-}
-
-/*
- * Called by the tty driver when there's room for more data to send.
- */
-static void
-gigaset_tty_wakeup(struct tty_struct *tty)
-{
- struct cardstate *cs = cs_get(tty);
-
- clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
- if (!cs)
- return;
- tasklet_schedule(&cs->write_tasklet);
- cs_put(cs);
-}
-
-static struct tty_ldisc_ops gigaset_ldisc = {
- .owner = THIS_MODULE,
- .magic = TTY_LDISC_MAGIC,
- .name = "ser_gigaset",
- .open = gigaset_tty_open,
- .close = gigaset_tty_close,
- .hangup = gigaset_tty_hangup,
- .ioctl = gigaset_tty_ioctl,
- .receive_buf = gigaset_tty_receive,
- .write_wakeup = gigaset_tty_wakeup,
-};
-
-
-/* Initialization / Shutdown */
-/* ========================= */
-
-static int __init ser_gigaset_init(void)
-{
- int rc;
-
- gig_dbg(DEBUG_INIT, "%s", __func__);
- rc = platform_driver_register(&device_driver);
- if (rc != 0) {
- pr_err("error %d registering platform driver\n", rc);
- return rc;
- }
-
- /* allocate memory for our driver state and initialize it */
- driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
- GIGASET_MODULENAME, GIGASET_DEVNAME,
- &ops, THIS_MODULE);
- if (!driver) {
- rc = -ENOMEM;
- goto error;
- }
-
- rc = tty_register_ldisc(N_GIGASET_M101, &gigaset_ldisc);
- if (rc != 0) {
- pr_err("error %d registering line discipline\n", rc);
- goto error;
- }
-
- return 0;
-
-error:
- if (driver) {
- gigaset_freedriver(driver);
- driver = NULL;
- }
- platform_driver_unregister(&device_driver);
- return rc;
-}
-
-static void __exit ser_gigaset_exit(void)
-{
- int rc;
-
- gig_dbg(DEBUG_INIT, "%s", __func__);
-
- if (driver) {
- gigaset_freedriver(driver);
- driver = NULL;
- }
-
- rc = tty_unregister_ldisc(N_GIGASET_M101);
- if (rc != 0)
- pr_err("error %d unregistering line discipline\n", rc);
-
- platform_driver_unregister(&device_driver);
-}
-
-module_init(ser_gigaset_init);
-module_exit(ser_gigaset_exit);
diff --git a/drivers/staging/isdn/gigaset/usb-gigaset.c b/drivers/staging/isdn/gigaset/usb-gigaset.c
deleted file mode 100644
index a20c0bfa68f3..000000000000
--- a/drivers/staging/isdn/gigaset/usb-gigaset.c
+++ /dev/null
@@ -1,959 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * USB driver for Gigaset 307x directly or using M105 Data.
- *
- * Copyright (c) 2001 by Stefan Eilers
- * and Hansjoerg Lipp <hjlipp@web.de>.
- *
- * This driver was derived from the USB skeleton driver by
- * Greg Kroah-Hartman <greg@kroah.com>
- *
- * =====================================================================
- * =====================================================================
- */
-
-#include "gigaset.h"
-#include <linux/usb.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-
-/* Version Information */
-#define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Stefan Eilers"
-#define DRIVER_DESC "USB Driver for Gigaset 307x using M105"
-
-/* Module parameters */
-
-static int startmode = SM_ISDN;
-static int cidmode = 1;
-
-module_param(startmode, int, S_IRUGO);
-module_param(cidmode, int, S_IRUGO);
-MODULE_PARM_DESC(startmode, "start in isdn4linux mode");
-MODULE_PARM_DESC(cidmode, "Call-ID mode");
-
-#define GIGASET_MINORS 1
-#define GIGASET_MINOR 8
-#define GIGASET_MODULENAME "usb_gigaset"
-#define GIGASET_DEVNAME "ttyGU"
-
-/* length limit according to Siemens 3070usb-protokoll.doc ch. 2.1 */
-#define IF_WRITEBUF 264
-
-/* Values for the Gigaset M105 Data */
-#define USB_M105_VENDOR_ID 0x0681
-#define USB_M105_PRODUCT_ID 0x0009
-
-/* table of devices that work with this driver */
-static const struct usb_device_id gigaset_table[] = {
- { USB_DEVICE(USB_M105_VENDOR_ID, USB_M105_PRODUCT_ID) },
- { } /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE(usb, gigaset_table);
-
-/*
- * Control requests (empty fields: 00)
- *
- * RT|RQ|VALUE|INDEX|LEN |DATA
- * In:
- * C1 08 01
- * Get flags (1 byte). Bits: 0=dtr,1=rts,3-7:?
- * C1 0F ll ll
- * Get device information/status (llll: 0x200 and 0x40 seen).
- * Real size: I only saw MIN(llll,0x64).
- * Contents: seems to be always the same...
- * offset 0x00: Length of this structure (0x64) (len: 1,2,3 bytes)
- * offset 0x3c: String (16 bit chars): "MCCI USB Serial V2.0"
- * rest: ?
- * Out:
- * 41 11
- * Initialize/reset device ?
- * 41 00 xx 00
- * ? (xx=00 or 01; 01 on start, 00 on close)
- * 41 07 vv mm
- * Set/clear flags vv=value, mm=mask (see RQ 08)
- * 41 12 xx
- * Used before the following configuration requests are issued
- * (with xx=0x0f). I've seen other values<0xf, though.
- * 41 01 xx xx
- * Set baud rate. xxxx=ceil(0x384000/rate)=trunc(0x383fff/rate)+1.
- * 41 03 ps bb
- * Set byte size and parity. p: 0x20=even,0x10=odd,0x00=no parity
- * [ 0x30: m, 0x40: s ]
- * [s: 0: 1 stop bit; 1: 1.5; 2: 2]
- * bb: bits/byte (seen 7 and 8)
- * 41 13 -- -- -- -- 10 00 ww 00 00 00 xx 00 00 00 yy 00 00 00 zz 00 00 00
- * ??
- * Initialization: 01, 40, 00, 00
- * Open device: 00 40, 00, 00
- * yy and zz seem to be equal, either 0x00 or 0x0a
- * (ww,xx) pairs seen: (00,00), (00,40), (01,40), (09,80), (19,80)
- * 41 19 -- -- -- -- 06 00 00 00 00 xx 11 13
- * Used after every "configuration sequence" (RQ 12, RQs 01/03/13).
- * xx is usually 0x00 but was 0x7e before starting data transfer
- * in unimodem mode. So, this might be an array of characters that
- * need special treatment ("commit all bufferd data"?), 11=^Q, 13=^S.
- *
- * Unimodem mode: use "modprobe ppp_async flag_time=0" as the device _needs_ two
- * flags per packet.
- */
-
-/* functions called if a device of this driver is connected/disconnected */
-static int gigaset_probe(struct usb_interface *interface,
- const struct usb_device_id *id);
-static void gigaset_disconnect(struct usb_interface *interface);
-
-/* functions called before/after suspend */
-static int gigaset_suspend(struct usb_interface *intf, pm_message_t message);
-static int gigaset_resume(struct usb_interface *intf);
-static int gigaset_pre_reset(struct usb_interface *intf);
-
-static struct gigaset_driver *driver;
-
-/* usb specific object needed to register this driver with the usb subsystem */
-static struct usb_driver gigaset_usb_driver = {
- .name = GIGASET_MODULENAME,
- .probe = gigaset_probe,
- .disconnect = gigaset_disconnect,
- .id_table = gigaset_table,
- .suspend = gigaset_suspend,
- .resume = gigaset_resume,
- .reset_resume = gigaset_resume,
- .pre_reset = gigaset_pre_reset,
- .post_reset = gigaset_resume,
- .disable_hub_initiated_lpm = 1,
-};
-
-struct usb_cardstate {
- struct usb_device *udev; /* usb device pointer */
- struct usb_interface *interface; /* interface for this device */
- int busy; /* bulk output in progress */
-
- /* Output buffer */
- unsigned char *bulk_out_buffer;
- int bulk_out_size;
- int bulk_out_epnum;
- struct urb *bulk_out_urb;
-
- /* Input buffer */
- unsigned char *rcvbuf;
- int rcvbuf_size;
- struct urb *read_urb;
-
- char bchars[6]; /* for request 0x19 */
-};
-
-static inline unsigned tiocm_to_gigaset(unsigned state)
-{
- return ((state & TIOCM_DTR) ? 1 : 0) | ((state & TIOCM_RTS) ? 2 : 0);
-}
-
-static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
- unsigned new_state)
-{
- struct usb_device *udev = cs->hw.usb->udev;
- unsigned mask, val;
- int r;
-
- mask = tiocm_to_gigaset(old_state ^ new_state);
- val = tiocm_to_gigaset(new_state);
-
- gig_dbg(DEBUG_USBREQ, "set flags 0x%02x with mask 0x%02x", val, mask);
- r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 7, 0x41,
- (val & 0xff) | ((mask & 0xff) << 8), 0,
- NULL, 0, 2000 /* timeout? */);
- if (r < 0)
- return r;
- return 0;
-}
-
-/*
- * Set M105 configuration value
- * using undocumented device commands reverse engineered from USB traces
- * of the Siemens Windows driver
- */
-static int set_value(struct cardstate *cs, u8 req, u16 val)
-{
- struct usb_device *udev = cs->hw.usb->udev;
- int r, r2;
-
- gig_dbg(DEBUG_USBREQ, "request %02x (%04x)",
- (unsigned)req, (unsigned)val);
- r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x12, 0x41,
- 0xf /*?*/, 0, NULL, 0, 2000 /*?*/);
- /* no idea what this does */
- if (r < 0) {
- dev_err(&udev->dev, "error %d on request 0x12\n", -r);
- return r;
- }
-
- r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), req, 0x41,
- val, 0, NULL, 0, 2000 /*?*/);
- if (r < 0)
- dev_err(&udev->dev, "error %d on request 0x%02x\n",
- -r, (unsigned)req);
-
- r2 = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
- 0, 0, cs->hw.usb->bchars, 6, 2000 /*?*/);
- if (r2 < 0)
- dev_err(&udev->dev, "error %d on request 0x19\n", -r2);
-
- return r < 0 ? r : (r2 < 0 ? r2 : 0);
-}
-
-/*
- * set the baud rate on the internal serial adapter
- * using the undocumented parameter setting command
- */
-static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag)
-{
- u16 val;
- u32 rate;
-
- cflag &= CBAUD;
-
- switch (cflag) {
- case B300: rate = 300; break;
- case B600: rate = 600; break;
- case B1200: rate = 1200; break;
- case B2400: rate = 2400; break;
- case B4800: rate = 4800; break;
- case B9600: rate = 9600; break;
- case B19200: rate = 19200; break;
- case B38400: rate = 38400; break;
- case B57600: rate = 57600; break;
- case B115200: rate = 115200; break;
- default:
- rate = 9600;
- dev_err(cs->dev, "unsupported baudrate request 0x%x,"
- " using default of B9600\n", cflag);
- }
-
- val = 0x383fff / rate + 1;
-
- return set_value(cs, 1, val);
-}
-
-/*
- * set the line format on the internal serial adapter
- * using the undocumented parameter setting command
- */
-static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
-{
- u16 val = 0;
-
- /* set the parity */
- if (cflag & PARENB)
- val |= (cflag & PARODD) ? 0x10 : 0x20;
-
- /* set the number of data bits */
- switch (cflag & CSIZE) {
- case CS5:
- val |= 5 << 8; break;
- case CS6:
- val |= 6 << 8; break;
- case CS7:
- val |= 7 << 8; break;
- case CS8:
- val |= 8 << 8; break;
- default:
- dev_err(cs->dev, "CSIZE was not CS5-CS8, using default of 8\n");
- val |= 8 << 8;
- break;
- }
-
- /* set the number of stop bits */
- if (cflag & CSTOPB) {
- if ((cflag & CSIZE) == CS5)
- val |= 1; /* 1.5 stop bits */
- else
- val |= 2; /* 2 stop bits */
- }
-
- return set_value(cs, 3, val);
-}
-
-
-/*============================================================================*/
-static int gigaset_init_bchannel(struct bc_state *bcs)
-{
- /* nothing to do for M10x */
- gigaset_bchannel_up(bcs);
- return 0;
-}
-
-static int gigaset_close_bchannel(struct bc_state *bcs)
-{
- /* nothing to do for M10x */
- gigaset_bchannel_down(bcs);
- return 0;
-}
-
-static int write_modem(struct cardstate *cs);
-static int send_cb(struct cardstate *cs);
-
-
-/* Write tasklet handler: Continue sending current skb, or send command, or
- * start sending an skb from the send queue.
- */
-static void gigaset_modem_fill(unsigned long data)
-{
- struct cardstate *cs = (struct cardstate *) data;
- struct bc_state *bcs = &cs->bcs[0]; /* only one channel */
-
- gig_dbg(DEBUG_OUTPUT, "modem_fill");
-
- if (cs->hw.usb->busy) {
- gig_dbg(DEBUG_OUTPUT, "modem_fill: busy");
- return;
- }
-
-again:
- if (!bcs->tx_skb) { /* no skb is being sent */
- if (cs->cmdbuf) { /* commands to send? */
- gig_dbg(DEBUG_OUTPUT, "modem_fill: cb");
- if (send_cb(cs) < 0) {
- gig_dbg(DEBUG_OUTPUT,
- "modem_fill: send_cb failed");
- goto again; /* no callback will be called! */
- }
- return;
- }
-
- /* skbs to send? */
- bcs->tx_skb = skb_dequeue(&bcs->squeue);
- if (!bcs->tx_skb)
- return;
-
- gig_dbg(DEBUG_INTR, "Dequeued skb (Adr: %lx)!",
- (unsigned long) bcs->tx_skb);
- }
-
- gig_dbg(DEBUG_OUTPUT, "modem_fill: tx_skb");
- if (write_modem(cs) < 0) {
- gig_dbg(DEBUG_OUTPUT, "modem_fill: write_modem failed");
- goto again; /* no callback will be called! */
- }
-}
-
-/*
- * Interrupt Input URB completion routine
- */
-static void gigaset_read_int_callback(struct urb *urb)
-{
- struct cardstate *cs = urb->context;
- struct inbuf_t *inbuf = cs->inbuf;
- int status = urb->status;
- int r;
- unsigned numbytes;
- unsigned char *src;
- unsigned long flags;
-
- if (!status) {
- numbytes = urb->actual_length;
-
- if (numbytes) {
- src = cs->hw.usb->rcvbuf;
- if (unlikely(*src))
- dev_warn(cs->dev,
- "%s: There was no leading 0, but 0x%02x!\n",
- __func__, (unsigned) *src);
- ++src; /* skip leading 0x00 */
- --numbytes;
- if (gigaset_fill_inbuf(inbuf, src, numbytes)) {
- gig_dbg(DEBUG_INTR, "%s-->BH", __func__);
- gigaset_schedule_event(inbuf->cs);
- }
- } else
- gig_dbg(DEBUG_INTR, "Received zero block length");
- } else {
- /* The urb might have been killed. */
- gig_dbg(DEBUG_ANY, "%s - nonzero status received: %d",
- __func__, status);
- if (status == -ENOENT || status == -ESHUTDOWN)
- /* killed or endpoint shutdown: don't resubmit */
- return;
- }
-
- /* resubmit URB */
- spin_lock_irqsave(&cs->lock, flags);
- if (!cs->connected) {
- spin_unlock_irqrestore(&cs->lock, flags);
- pr_err("%s: disconnected\n", __func__);
- return;
- }
- r = usb_submit_urb(urb, GFP_ATOMIC);
- spin_unlock_irqrestore(&cs->lock, flags);
- if (r)
- dev_err(cs->dev, "error %d resubmitting URB\n", -r);
-}
-
-
-/* This callback routine is called when data was transmitted to the device. */
-static void gigaset_write_bulk_callback(struct urb *urb)
-{
- struct cardstate *cs = urb->context;
- int status = urb->status;
- unsigned long flags;
-
- switch (status) {
- case 0: /* normal completion */
- break;
- case -ENOENT: /* killed */
- gig_dbg(DEBUG_ANY, "%s: killed", __func__);
- cs->hw.usb->busy = 0;
- return;
- default:
- dev_err(cs->dev, "bulk transfer failed (status %d)\n",
- -status);
- /* That's all we can do. Communication problems
- are handled by timeouts or network protocols. */
- }
-
- spin_lock_irqsave(&cs->lock, flags);
- if (!cs->connected) {
- pr_err("%s: disconnected\n", __func__);
- } else {
- cs->hw.usb->busy = 0;
- tasklet_schedule(&cs->write_tasklet);
- }
- spin_unlock_irqrestore(&cs->lock, flags);
-}
-
-static int send_cb(struct cardstate *cs)
-{
- struct cmdbuf_t *cb = cs->cmdbuf;
- unsigned long flags;
- int count;
- int status = -ENOENT;
- struct usb_cardstate *ucs = cs->hw.usb;
-
- do {
- if (!cb->len) {
- spin_lock_irqsave(&cs->cmdlock, flags);
- cs->cmdbytes -= cs->curlen;
- gig_dbg(DEBUG_OUTPUT, "send_cb: sent %u bytes, %u left",
- cs->curlen, cs->cmdbytes);
- cs->cmdbuf = cb->next;
- if (cs->cmdbuf) {
- cs->cmdbuf->prev = NULL;
- cs->curlen = cs->cmdbuf->len;
- } else {
- cs->lastcmdbuf = NULL;
- cs->curlen = 0;
- }
- spin_unlock_irqrestore(&cs->cmdlock, flags);
-
- if (cb->wake_tasklet)
- tasklet_schedule(cb->wake_tasklet);
- kfree(cb);
-
- cb = cs->cmdbuf;
- }
-
- if (cb) {
- count = min(cb->len, ucs->bulk_out_size);
- gig_dbg(DEBUG_OUTPUT, "send_cb: send %d bytes", count);
-
- usb_fill_bulk_urb(ucs->bulk_out_urb, ucs->udev,
- usb_sndbulkpipe(ucs->udev,
- ucs->bulk_out_epnum),
- cb->buf + cb->offset, count,
- gigaset_write_bulk_callback, cs);
-
- cb->offset += count;
- cb->len -= count;
- ucs->busy = 1;
-
- spin_lock_irqsave(&cs->lock, flags);
- status = cs->connected ?
- usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC) :
- -ENODEV;
- spin_unlock_irqrestore(&cs->lock, flags);
-
- if (status) {
- ucs->busy = 0;
- dev_err(cs->dev,
- "could not submit urb (error %d)\n",
- -status);
- cb->len = 0; /* skip urb => remove cb+wakeup
- in next loop cycle */
- }
- }
- } while (cb && status); /* next command on error */
-
- return status;
-}
-
-/* Send command to device. */
-static int gigaset_write_cmd(struct cardstate *cs, struct cmdbuf_t *cb)
-{
- unsigned long flags;
- int len;
-
- gigaset_dbg_buffer(cs->mstate != MS_LOCKED ?
- DEBUG_TRANSCMD : DEBUG_LOCKCMD,
- "CMD Transmit", cb->len, cb->buf);
-
- spin_lock_irqsave(&cs->cmdlock, flags);
- cb->prev = cs->lastcmdbuf;
- if (cs->lastcmdbuf)
- cs->lastcmdbuf->next = cb;
- else {
- cs->cmdbuf = cb;
- cs->curlen = cb->len;
- }
- cs->cmdbytes += cb->len;
- cs->lastcmdbuf = cb;
- spin_unlock_irqrestore(&cs->cmdlock, flags);
-
- spin_lock_irqsave(&cs->lock, flags);
- len = cb->len;
- if (cs->connected)
- tasklet_schedule(&cs->write_tasklet);
- spin_unlock_irqrestore(&cs->lock, flags);
- return len;
-}
-
-static int gigaset_write_room(struct cardstate *cs)
-{
- unsigned bytes;
-
- bytes = cs->cmdbytes;
- return bytes < IF_WRITEBUF ? IF_WRITEBUF - bytes : 0;
-}
-
-static int gigaset_chars_in_buffer(struct cardstate *cs)
-{
- return cs->cmdbytes;
-}
-
-/*
- * set the break characters on the internal serial adapter
- * using undocumented device commands reverse engineered from USB traces
- * of the Siemens Windows driver
- */
-static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
-{
- struct usb_device *udev = cs->hw.usb->udev;
-
- gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
- memcpy(cs->hw.usb->bchars, buf, 6);
- return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
- 0, 0, &buf, 6, 2000);
-}
-
-static void gigaset_freebcshw(struct bc_state *bcs)
-{
- /* unused */
-}
-
-/* Initialize the b-channel structure */
-static int gigaset_initbcshw(struct bc_state *bcs)
-{
- /* unused */
- bcs->hw.usb = NULL;
- return 0;
-}
-
-static void gigaset_reinitbcshw(struct bc_state *bcs)
-{
- /* nothing to do for M10x */
-}
-
-static void gigaset_freecshw(struct cardstate *cs)
-{
- tasklet_kill(&cs->write_tasklet);
- kfree(cs->hw.usb);
-}
-
-static int gigaset_initcshw(struct cardstate *cs)
-{
- struct usb_cardstate *ucs;
-
- cs->hw.usb = ucs = kzalloc(sizeof(struct usb_cardstate), GFP_KERNEL);
- if (!ucs) {
- pr_err("out of memory\n");
- return -ENOMEM;
- }
-
- ucs->bchars[0] = 0;
- ucs->bchars[1] = 0;
- ucs->bchars[2] = 0;
- ucs->bchars[3] = 0;
- ucs->bchars[4] = 0x11;
- ucs->bchars[5] = 0x13;
- tasklet_init(&cs->write_tasklet,
- gigaset_modem_fill, (unsigned long) cs);
-
- return 0;
-}
-
-/* Send data from current skb to the device. */
-static int write_modem(struct cardstate *cs)
-{
- int ret = 0;
- int count;
- struct bc_state *bcs = &cs->bcs[0]; /* only one channel */
- struct usb_cardstate *ucs = cs->hw.usb;
- unsigned long flags;
-
- gig_dbg(DEBUG_OUTPUT, "len: %d...", bcs->tx_skb->len);
-
- if (!bcs->tx_skb->len) {
- dev_kfree_skb_any(bcs->tx_skb);
- bcs->tx_skb = NULL;
- return -EINVAL;
- }
-
- /* Copy data to bulk out buffer and transmit data */
- count = min(bcs->tx_skb->len, (unsigned) ucs->bulk_out_size);
- skb_copy_from_linear_data(bcs->tx_skb, ucs->bulk_out_buffer, count);
- skb_pull(bcs->tx_skb, count);
- ucs->busy = 1;
- gig_dbg(DEBUG_OUTPUT, "write_modem: send %d bytes", count);
-
- spin_lock_irqsave(&cs->lock, flags);
- if (cs->connected) {
- usb_fill_bulk_urb(ucs->bulk_out_urb, ucs->udev,
- usb_sndbulkpipe(ucs->udev,
- ucs->bulk_out_epnum),
- ucs->bulk_out_buffer, count,
- gigaset_write_bulk_callback, cs);
- ret = usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC);
- } else {
- ret = -ENODEV;
- }
- spin_unlock_irqrestore(&cs->lock, flags);
-
- if (ret) {
- dev_err(cs->dev, "could not submit urb (error %d)\n", -ret);
- ucs->busy = 0;
- }
-
- if (!bcs->tx_skb->len) {
- /* skb sent completely */
- gigaset_skb_sent(bcs, bcs->tx_skb);
-
- gig_dbg(DEBUG_INTR, "kfree skb (Adr: %lx)!",
- (unsigned long) bcs->tx_skb);
- dev_kfree_skb_any(bcs->tx_skb);
- bcs->tx_skb = NULL;
- }
-
- return ret;
-}
-
-static int gigaset_probe(struct usb_interface *interface,
- const struct usb_device_id *id)
-{
- int retval;
- struct usb_device *udev = interface_to_usbdev(interface);
- struct usb_host_interface *hostif = interface->cur_altsetting;
- struct cardstate *cs = NULL;
- struct usb_cardstate *ucs = NULL;
- struct usb_endpoint_descriptor *endpoint;
- int buffer_size;
-
- gig_dbg(DEBUG_ANY, "%s: Check if device matches ...", __func__);
-
- /* See if the device offered us matches what we can accept */
- if ((le16_to_cpu(udev->descriptor.idVendor) != USB_M105_VENDOR_ID) ||
- (le16_to_cpu(udev->descriptor.idProduct) != USB_M105_PRODUCT_ID)) {
- gig_dbg(DEBUG_ANY, "device ID (0x%x, 0x%x) not for me - skip",
- le16_to_cpu(udev->descriptor.idVendor),
- le16_to_cpu(udev->descriptor.idProduct));
- return -ENODEV;
- }
- if (hostif->desc.bInterfaceNumber != 0) {
- gig_dbg(DEBUG_ANY, "interface %d not for me - skip",
- hostif->desc.bInterfaceNumber);
- return -ENODEV;
- }
- if (hostif->desc.bAlternateSetting != 0) {
- dev_notice(&udev->dev, "unsupported altsetting %d - skip",
- hostif->desc.bAlternateSetting);
- return -ENODEV;
- }
- if (hostif->desc.bInterfaceClass != 255) {
- dev_notice(&udev->dev, "unsupported interface class %d - skip",
- hostif->desc.bInterfaceClass);
- return -ENODEV;
- }
-
- if (hostif->desc.bNumEndpoints < 2) {
- dev_err(&interface->dev, "missing endpoints\n");
- return -ENODEV;
- }
-
- dev_info(&udev->dev, "%s: Device matched ... !\n", __func__);
-
- /* allocate memory for our device state and initialize it */
- cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME);
- if (!cs)
- return -ENODEV;
- ucs = cs->hw.usb;
-
- /* save off device structure ptrs for later use */
- usb_get_dev(udev);
- ucs->udev = udev;
- ucs->interface = interface;
- cs->dev = &interface->dev;
-
- /* save address of controller structure */
- usb_set_intfdata(interface, cs);
-
- endpoint = &hostif->endpoint[0].desc;
-
- if (!usb_endpoint_is_bulk_out(endpoint)) {
- dev_err(&interface->dev, "missing bulk-out endpoint\n");
- retval = -ENODEV;
- goto error;
- }
-
- buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
- ucs->bulk_out_size = buffer_size;
- ucs->bulk_out_epnum = usb_endpoint_num(endpoint);
- ucs->bulk_out_buffer = kmalloc(buffer_size, GFP_KERNEL);
- if (!ucs->bulk_out_buffer) {
- dev_err(cs->dev, "Couldn't allocate bulk_out_buffer\n");
- retval = -ENOMEM;
- goto error;
- }
-
- ucs->bulk_out_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!ucs->bulk_out_urb) {
- dev_err(cs->dev, "Couldn't allocate bulk_out_urb\n");
- retval = -ENOMEM;
- goto error;
- }
-
- endpoint = &hostif->endpoint[1].desc;
-
- if (!usb_endpoint_is_int_in(endpoint)) {
- dev_err(&interface->dev, "missing int-in endpoint\n");
- retval = -ENODEV;
- goto error;
- }
-
- ucs->busy = 0;
-
- ucs->read_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!ucs->read_urb) {
- dev_err(cs->dev, "No free urbs available\n");
- retval = -ENOMEM;
- goto error;
- }
- buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
- ucs->rcvbuf_size = buffer_size;
- ucs->rcvbuf = kmalloc(buffer_size, GFP_KERNEL);
- if (!ucs->rcvbuf) {
- dev_err(cs->dev, "Couldn't allocate rcvbuf\n");
- retval = -ENOMEM;
- goto error;
- }
- /* Fill the interrupt urb and send it to the core */
- usb_fill_int_urb(ucs->read_urb, udev,
- usb_rcvintpipe(udev, usb_endpoint_num(endpoint)),
- ucs->rcvbuf, buffer_size,
- gigaset_read_int_callback,
- cs, endpoint->bInterval);
-
- retval = usb_submit_urb(ucs->read_urb, GFP_KERNEL);
- if (retval) {
- dev_err(cs->dev, "Could not submit URB (error %d)\n", -retval);
- goto error;
- }
-
- /* tell common part that the device is ready */
- if (startmode == SM_LOCKED)
- cs->mstate = MS_LOCKED;
-
- retval = gigaset_start(cs);
- if (retval < 0) {
- tasklet_kill(&cs->write_tasklet);
- goto error;
- }
- return 0;
-
-error:
- usb_kill_urb(ucs->read_urb);
- kfree(ucs->bulk_out_buffer);
- usb_free_urb(ucs->bulk_out_urb);
- kfree(ucs->rcvbuf);
- usb_free_urb(ucs->read_urb);
- usb_set_intfdata(interface, NULL);
- ucs->read_urb = ucs->bulk_out_urb = NULL;
- ucs->rcvbuf = ucs->bulk_out_buffer = NULL;
- usb_put_dev(ucs->udev);
- ucs->udev = NULL;
- ucs->interface = NULL;
- gigaset_freecs(cs);
- return retval;
-}
-
-static void gigaset_disconnect(struct usb_interface *interface)
-{
- struct cardstate *cs;
- struct usb_cardstate *ucs;
-
- cs = usb_get_intfdata(interface);
- ucs = cs->hw.usb;
-
- dev_info(cs->dev, "disconnecting Gigaset USB adapter\n");
-
- usb_kill_urb(ucs->read_urb);
-
- gigaset_stop(cs);
-
- usb_set_intfdata(interface, NULL);
- tasklet_kill(&cs->write_tasklet);
-
- usb_kill_urb(ucs->bulk_out_urb);
-
- kfree(ucs->bulk_out_buffer);
- usb_free_urb(ucs->bulk_out_urb);
- kfree(ucs->rcvbuf);
- usb_free_urb(ucs->read_urb);
- ucs->read_urb = ucs->bulk_out_urb = NULL;
- ucs->rcvbuf = ucs->bulk_out_buffer = NULL;
-
- usb_put_dev(ucs->udev);
- ucs->interface = NULL;
- ucs->udev = NULL;
- cs->dev = NULL;
- gigaset_freecs(cs);
-}
-
-/* gigaset_suspend
- * This function is called before the USB connection is suspended or reset.
- */
-static int gigaset_suspend(struct usb_interface *intf, pm_message_t message)
-{
- struct cardstate *cs = usb_get_intfdata(intf);
-
- /* stop activity */
- cs->connected = 0; /* prevent rescheduling */
- usb_kill_urb(cs->hw.usb->read_urb);
- tasklet_kill(&cs->write_tasklet);
- usb_kill_urb(cs->hw.usb->bulk_out_urb);
-
- gig_dbg(DEBUG_SUSPEND, "suspend complete");
- return 0;
-}
-
-/* gigaset_resume
- * This function is called after the USB connection has been resumed or reset.
- */
-static int gigaset_resume(struct usb_interface *intf)
-{
- struct cardstate *cs = usb_get_intfdata(intf);
- int rc;
-
- /* resubmit interrupt URB */
- cs->connected = 1;
- rc = usb_submit_urb(cs->hw.usb->read_urb, GFP_KERNEL);
- if (rc) {
- dev_err(cs->dev, "Could not submit read URB (error %d)\n", -rc);
- return rc;
- }
-
- gig_dbg(DEBUG_SUSPEND, "resume complete");
- return 0;
-}
-
-/* gigaset_pre_reset
- * This function is called before the USB connection is reset.
- */
-static int gigaset_pre_reset(struct usb_interface *intf)
-{
- /* same as suspend */
- return gigaset_suspend(intf, PMSG_ON);
-}
-
-static const struct gigaset_ops ops = {
- .write_cmd = gigaset_write_cmd,
- .write_room = gigaset_write_room,
- .chars_in_buffer = gigaset_chars_in_buffer,
- .brkchars = gigaset_brkchars,
- .init_bchannel = gigaset_init_bchannel,
- .close_bchannel = gigaset_close_bchannel,
- .initbcshw = gigaset_initbcshw,
- .freebcshw = gigaset_freebcshw,
- .reinitbcshw = gigaset_reinitbcshw,
- .initcshw = gigaset_initcshw,
- .freecshw = gigaset_freecshw,
- .set_modem_ctrl = gigaset_set_modem_ctrl,
- .baud_rate = gigaset_baud_rate,
- .set_line_ctrl = gigaset_set_line_ctrl,
- .send_skb = gigaset_m10x_send_skb,
- .handle_input = gigaset_m10x_input,
-};
-
-/*
- * This function is called while kernel-module is loaded
- */
-static int __init usb_gigaset_init(void)
-{
- int result;
-
- /* allocate memory for our driver state and initialize it */
- driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
- GIGASET_MODULENAME, GIGASET_DEVNAME,
- &ops, THIS_MODULE);
- if (driver == NULL) {
- result = -ENOMEM;
- goto error;
- }
-
- /* register this driver with the USB subsystem */
- result = usb_register(&gigaset_usb_driver);
- if (result < 0) {
- pr_err("error %d registering USB driver\n", -result);
- goto error;
- }
-
- pr_info(DRIVER_DESC "\n");
- return 0;
-
-error:
- if (driver)
- gigaset_freedriver(driver);
- driver = NULL;
- return result;
-}
-
-/*
- * This function is called while unloading the kernel-module
- */
-static void __exit usb_gigaset_exit(void)
-{
- int i;
-
- gigaset_blockdriver(driver); /* => probe will fail
- * => no gigaset_start any more
- */
-
- /* stop all connected devices */
- for (i = 0; i < driver->minors; i++)
- gigaset_shutdown(driver->cs + i);
-
- /* from now on, no isdn callback should be possible */
-
- /* deregister this driver with the USB subsystem */
- usb_deregister(&gigaset_usb_driver);
- /* this will call the disconnect-callback */
- /* from now on, no disconnect/probe callback should be running */
-
- gigaset_freedriver(driver);
- driver = NULL;
-}
-
-
-module_init(usb_gigaset_init);
-module_exit(usb_gigaset_exit);
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/isdn/hysdn/Kconfig b/drivers/staging/isdn/hysdn/Kconfig
deleted file mode 100644
index 4c8a9283b9dd..000000000000
--- a/drivers/staging/isdn/hysdn/Kconfig
+++ /dev/null
@@ -1,15 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config HYSDN
- tristate "Hypercope HYSDN cards (Champ, Ergo, Metro) support (module only)"
- depends on m && PROC_FS && PCI
- help
- Say Y here if you have one of Hypercope's active PCI ISDN cards
- Champ, Ergo and Metro. You will then get a module called hysdn.
- Please read the file <file:Documentation/isdn/hysdn.rst> for more
- information.
-
-config HYSDN_CAPI
- bool "HYSDN CAPI 2.0 support"
- depends on HYSDN && ISDN_CAPI
- help
- Say Y here if you like to use Hypercope's CAPI 2.0 interface.
diff --git a/drivers/staging/isdn/hysdn/Makefile b/drivers/staging/isdn/hysdn/Makefile
deleted file mode 100644
index e01f17f22ebb..000000000000
--- a/drivers/staging/isdn/hysdn/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-# Makefile for the hysdn ISDN device driver
-
-# Each configuration option enables a list of files.
-
-obj-$(CONFIG_HYSDN) += hysdn.o
-
-# Multipart objects.
-
-hysdn-y := hysdn_procconf.o hysdn_proclog.o boardergo.o \
- hysdn_boot.o hysdn_sched.o hysdn_net.o hysdn_init.o
-hysdn-$(CONFIG_HYSDN_CAPI) += hycapi.o
diff --git a/drivers/staging/isdn/hysdn/boardergo.c b/drivers/staging/isdn/hysdn/boardergo.c
deleted file mode 100644
index 2aa2a0e08247..000000000000
--- a/drivers/staging/isdn/hysdn/boardergo.c
+++ /dev/null
@@ -1,445 +0,0 @@
-/* $Id: boardergo.c,v 1.5.6.7 2001/11/06 21:58:19 kai Exp $
- *
- * Linux driver for HYSDN cards, specific routines for ergo type boards.
- *
- * Author Werner Cornelius (werner@titro.de) for Hypercope GmbH
- * Copyright 1999 by Werner Cornelius (werner@titro.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * As all Linux supported cards Champ2, Ergo and Metro2/4 use the same
- * DPRAM interface and layout with only minor differences all related
- * stuff is done here, not in separate modules.
- *
- */
-
-#include <linux/signal.h>
-#include <linux/kernel.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/vmalloc.h>
-#include <linux/delay.h>
-#include <asm/io.h>
-
-#include "hysdn_defs.h"
-#include "boardergo.h"
-
-#define byteout(addr, val) outb(val, addr)
-#define bytein(addr) inb(addr)
-
-/***************************************************/
-/* The cards interrupt handler. Called from system */
-/***************************************************/
-static irqreturn_t
-ergo_interrupt(int intno, void *dev_id)
-{
- hysdn_card *card = dev_id; /* parameter from irq */
- tErgDpram *dpr;
- unsigned long flags;
- unsigned char volatile b;
-
- if (!card)
- return IRQ_NONE; /* error -> spurious interrupt */
- if (!card->irq_enabled)
- return IRQ_NONE; /* other device interrupting or irq switched off */
-
- spin_lock_irqsave(&card->hysdn_lock, flags); /* no further irqs allowed */
-
- if (!(bytein(card->iobase + PCI9050_INTR_REG) & PCI9050_INTR_REG_STAT1)) {
- spin_unlock_irqrestore(&card->hysdn_lock, flags); /* restore old state */
- return IRQ_NONE; /* no interrupt requested by E1 */
- }
- /* clear any pending ints on the board */
- dpr = card->dpram;
- b = dpr->ToPcInt; /* clear for ergo */
- b |= dpr->ToPcIntMetro; /* same for metro */
- b |= dpr->ToHyInt; /* and for champ */
-
- /* start kernel task immediately after leaving all interrupts */
- if (!card->hw_lock)
- schedule_work(&card->irq_queue);
- spin_unlock_irqrestore(&card->hysdn_lock, flags);
- return IRQ_HANDLED;
-} /* ergo_interrupt */
-
-/******************************************************************************/
-/* ergo_irq_bh will be called as part of the kernel clearing its shared work */
-/* queue sometime after a call to schedule_work has been made passing our */
-/* work_struct. This task is the only one handling data transfer from or to */
-/* the card after booting. The task may be queued from everywhere */
-/* (interrupts included). */
-/******************************************************************************/
-static void
-ergo_irq_bh(struct work_struct *ugli_api)
-{
- hysdn_card *card = container_of(ugli_api, hysdn_card, irq_queue);
- tErgDpram *dpr;
- int again;
- unsigned long flags;
-
- if (card->state != CARD_STATE_RUN)
- return; /* invalid call */
-
- dpr = card->dpram; /* point to DPRAM */
-
- spin_lock_irqsave(&card->hysdn_lock, flags);
- if (card->hw_lock) {
- spin_unlock_irqrestore(&card->hysdn_lock, flags); /* hardware currently unavailable */
- return;
- }
- card->hw_lock = 1; /* we now lock the hardware */
-
- do {
- again = 0; /* assume loop not to be repeated */
-
- if (!dpr->ToHyFlag) {
- /* we are able to send a buffer */
-
- if (hysdn_sched_tx(card, dpr->ToHyBuf, &dpr->ToHySize, &dpr->ToHyChannel,
- ERG_TO_HY_BUF_SIZE)) {
- dpr->ToHyFlag = 1; /* enable tx */
- again = 1; /* restart loop */
- }
- } /* we are able to send a buffer */
- if (dpr->ToPcFlag) {
- /* a message has arrived for us, handle it */
-
- if (hysdn_sched_rx(card, dpr->ToPcBuf, dpr->ToPcSize, dpr->ToPcChannel)) {
- dpr->ToPcFlag = 0; /* we worked the data */
- again = 1; /* restart loop */
- }
- } /* a message has arrived for us */
- if (again) {
- dpr->ToHyInt = 1;
- dpr->ToPcInt = 1; /* interrupt to E1 for all cards */
- } else
- card->hw_lock = 0; /* free hardware again */
- } while (again); /* until nothing more to do */
-
- spin_unlock_irqrestore(&card->hysdn_lock, flags);
-} /* ergo_irq_bh */
-
-
-/*********************************************************/
-/* stop the card (hardware reset) and disable interrupts */
-/*********************************************************/
-static void
-ergo_stopcard(hysdn_card *card)
-{
- unsigned long flags;
- unsigned char val;
-
- hysdn_net_release(card); /* first release the net device if existing */
-#ifdef CONFIG_HYSDN_CAPI
- hycapi_capi_stop(card);
-#endif /* CONFIG_HYSDN_CAPI */
- spin_lock_irqsave(&card->hysdn_lock, flags);
- val = bytein(card->iobase + PCI9050_INTR_REG); /* get actual value */
- val &= ~(PCI9050_INTR_REG_ENPCI | PCI9050_INTR_REG_EN1); /* mask irq */
- byteout(card->iobase + PCI9050_INTR_REG, val);
- card->irq_enabled = 0;
- byteout(card->iobase + PCI9050_USER_IO, PCI9050_E1_RESET); /* reset E1 processor */
- card->state = CARD_STATE_UNUSED;
- card->err_log_state = ERRLOG_STATE_OFF; /* currently no log active */
-
- spin_unlock_irqrestore(&card->hysdn_lock, flags);
-} /* ergo_stopcard */
-
-/**************************************************************************/
-/* enable or disable the cards error log. The event is queued if possible */
-/**************************************************************************/
-static void
-ergo_set_errlog_state(hysdn_card *card, int on)
-{
- unsigned long flags;
-
- if (card->state != CARD_STATE_RUN) {
- card->err_log_state = ERRLOG_STATE_OFF; /* must be off */
- return;
- }
- spin_lock_irqsave(&card->hysdn_lock, flags);
-
- if (((card->err_log_state == ERRLOG_STATE_OFF) && !on) ||
- ((card->err_log_state == ERRLOG_STATE_ON) && on)) {
- spin_unlock_irqrestore(&card->hysdn_lock, flags);
- return; /* nothing to do */
- }
- if (on)
- card->err_log_state = ERRLOG_STATE_START; /* request start */
- else
- card->err_log_state = ERRLOG_STATE_STOP; /* request stop */
-
- spin_unlock_irqrestore(&card->hysdn_lock, flags);
- schedule_work(&card->irq_queue);
-} /* ergo_set_errlog_state */
-
-/******************************************/
-/* test the cards RAM and return 0 if ok. */
-/******************************************/
-static const char TestText[36] = "This Message is filler, why read it";
-
-static int
-ergo_testram(hysdn_card *card)
-{
- tErgDpram *dpr = card->dpram;
-
- memset(dpr->TrapTable, 0, sizeof(dpr->TrapTable)); /* clear all Traps */
- dpr->ToHyInt = 1; /* E1 INTR state forced */
-
- memcpy(&dpr->ToHyBuf[ERG_TO_HY_BUF_SIZE - sizeof(TestText)], TestText,
- sizeof(TestText));
- if (memcmp(&dpr->ToHyBuf[ERG_TO_HY_BUF_SIZE - sizeof(TestText)], TestText,
- sizeof(TestText)))
- return (-1);
-
- memcpy(&dpr->ToPcBuf[ERG_TO_PC_BUF_SIZE - sizeof(TestText)], TestText,
- sizeof(TestText));
- if (memcmp(&dpr->ToPcBuf[ERG_TO_PC_BUF_SIZE - sizeof(TestText)], TestText,
- sizeof(TestText)))
- return (-1);
-
- return (0);
-} /* ergo_testram */
-
-/*****************************************************************************/
-/* this function is intended to write stage 1 boot image to the cards buffer */
-/* this is done in two steps. First the 1024 hi-words are written (offs=0), */
-/* then the 1024 lo-bytes are written. The remaining DPRAM is cleared, the */
-/* PCI-write-buffers flushed and the card is taken out of reset. */
-/* The function then waits for a reaction of the E1 processor or a timeout. */
-/* Negative return values are interpreted as errors. */
-/*****************************************************************************/
-static int
-ergo_writebootimg(struct HYSDN_CARD *card, unsigned char *buf,
- unsigned long offs)
-{
- unsigned char *dst;
- tErgDpram *dpram;
- int cnt = (BOOT_IMG_SIZE >> 2); /* number of words to move and swap (byte order!) */
-
- if (card->debug_flags & LOG_POF_CARD)
- hysdn_addlog(card, "ERGO: write bootldr offs=0x%lx ", offs);
-
- dst = card->dpram; /* pointer to start of DPRAM */
- dst += (offs + ERG_DPRAM_FILL_SIZE); /* offset in the DPRAM */
- while (cnt--) {
- *dst++ = *(buf + 1); /* high byte */
- *dst++ = *buf; /* low byte */
- dst += 2; /* point to next longword */
- buf += 2; /* buffer only filled with words */
- }
-
- /* if low words (offs = 2) have been written, clear the rest of the DPRAM, */
- /* flush the PCI-write-buffer and take the E1 out of reset */
- if (offs) {
- memset(card->dpram, 0, ERG_DPRAM_FILL_SIZE); /* fill the DPRAM still not cleared */
- dpram = card->dpram; /* get pointer to dpram structure */
- dpram->ToHyNoDpramErrLog = 0xFF; /* write a dpram register */
- while (!dpram->ToHyNoDpramErrLog); /* reread volatile register to flush PCI */
-
- byteout(card->iobase + PCI9050_USER_IO, PCI9050_E1_RUN); /* start E1 processor */
- /* the interrupts are still masked */
-
- msleep_interruptible(20); /* Timeout 20ms */
-
- if (((tDpramBootSpooler *) card->dpram)->Len != DPRAM_SPOOLER_DATA_SIZE) {
- if (card->debug_flags & LOG_POF_CARD)
- hysdn_addlog(card, "ERGO: write bootldr no answer");
- return (-ERR_BOOTIMG_FAIL);
- }
- } /* start_boot_img */
- return (0); /* successful */
-} /* ergo_writebootimg */
-
-/********************************************************************************/
-/* ergo_writebootseq writes the buffer containing len bytes to the E1 processor */
-/* using the boot spool mechanism. If everything works fine 0 is returned. In */
-/* case of errors a negative error value is returned. */
-/********************************************************************************/
-static int
-ergo_writebootseq(struct HYSDN_CARD *card, unsigned char *buf, int len)
-{
- tDpramBootSpooler *sp = (tDpramBootSpooler *) card->dpram;
- unsigned char *dst;
- unsigned char buflen;
- int nr_write;
- unsigned char tmp_rdptr;
- unsigned char wr_mirror;
- int i;
-
- if (card->debug_flags & LOG_POF_CARD)
- hysdn_addlog(card, "ERGO: write boot seq len=%d ", len);
-
- dst = sp->Data; /* point to data in spool structure */
- buflen = sp->Len; /* maximum len of spooled data */
- wr_mirror = sp->WrPtr; /* only once read */
-
- /* try until all bytes written or error */
- i = 0x1000; /* timeout value */
- while (len) {
-
- /* first determine the number of bytes that may be buffered */
- do {
- tmp_rdptr = sp->RdPtr; /* first read the pointer */
- i--; /* decrement timeout */
- } while (i && (tmp_rdptr != sp->RdPtr)); /* wait for stable pointer */
-
- if (!i) {
- if (card->debug_flags & LOG_POF_CARD)
- hysdn_addlog(card, "ERGO: write boot seq timeout");
- return (-ERR_BOOTSEQ_FAIL); /* value not stable -> timeout */
- }
- if ((nr_write = tmp_rdptr - wr_mirror - 1) < 0)
- nr_write += buflen; /* now we got number of free bytes - 1 in buffer */
-
- if (!nr_write)
- continue; /* no free bytes in buffer */
-
- if (nr_write > len)
- nr_write = len; /* limit if last few bytes */
- i = 0x1000; /* reset timeout value */
-
- /* now we know how much bytes we may put in the puffer */
- len -= nr_write; /* we savely could adjust len before output */
- while (nr_write--) {
- *(dst + wr_mirror) = *buf++; /* output one byte */
- if (++wr_mirror >= buflen)
- wr_mirror = 0;
- sp->WrPtr = wr_mirror; /* announce the next byte to E1 */
- } /* while (nr_write) */
-
- } /* while (len) */
- return (0);
-} /* ergo_writebootseq */
-
-/***********************************************************************************/
-/* ergo_waitpofready waits for a maximum of 10 seconds for the completition of the */
-/* boot process. If the process has been successful 0 is returned otherwise a */
-/* negative error code is returned. */
-/***********************************************************************************/
-static int
-ergo_waitpofready(struct HYSDN_CARD *card)
-{
- tErgDpram *dpr = card->dpram; /* pointer to DPRAM structure */
- int timecnt = 10000 / 50; /* timeout is 10 secs max. */
- unsigned long flags;
- int msg_size;
- int i;
-
- if (card->debug_flags & LOG_POF_CARD)
- hysdn_addlog(card, "ERGO: waiting for pof ready");
- while (timecnt--) {
- /* wait until timeout */
-
- if (dpr->ToPcFlag) {
- /* data has arrived */
-
- if ((dpr->ToPcChannel != CHAN_SYSTEM) ||
- (dpr->ToPcSize < MIN_RDY_MSG_SIZE) ||
- (dpr->ToPcSize > MAX_RDY_MSG_SIZE) ||
- ((*(unsigned long *) dpr->ToPcBuf) != RDY_MAGIC))
- break; /* an error occurred */
-
- /* Check for additional data delivered during SysReady */
- msg_size = dpr->ToPcSize - RDY_MAGIC_SIZE;
- if (msg_size > 0)
- if (EvalSysrTokData(card, dpr->ToPcBuf + RDY_MAGIC_SIZE, msg_size))
- break;
-
- if (card->debug_flags & LOG_POF_RECORD)
- hysdn_addlog(card, "ERGO: pof boot success");
- spin_lock_irqsave(&card->hysdn_lock, flags);
-
- card->state = CARD_STATE_RUN; /* now card is running */
- /* enable the cards interrupt */
- byteout(card->iobase + PCI9050_INTR_REG,
- bytein(card->iobase + PCI9050_INTR_REG) |
- (PCI9050_INTR_REG_ENPCI | PCI9050_INTR_REG_EN1));
- card->irq_enabled = 1; /* we are ready to receive interrupts */
-
- dpr->ToPcFlag = 0; /* reset data indicator */
- dpr->ToHyInt = 1;
- dpr->ToPcInt = 1; /* interrupt to E1 for all cards */
-
- spin_unlock_irqrestore(&card->hysdn_lock, flags);
- if ((hynet_enable & (1 << card->myid))
- && (i = hysdn_net_create(card)))
- {
- ergo_stopcard(card);
- card->state = CARD_STATE_BOOTERR;
- return (i);
- }
-#ifdef CONFIG_HYSDN_CAPI
- if ((i = hycapi_capi_create(card))) {
- printk(KERN_WARNING "HYSDN: failed to create capi-interface.\n");
- }
-#endif /* CONFIG_HYSDN_CAPI */
- return (0); /* success */
- } /* data has arrived */
- msleep_interruptible(50); /* Timeout 50ms */
- } /* wait until timeout */
-
- if (card->debug_flags & LOG_POF_CARD)
- hysdn_addlog(card, "ERGO: pof boot ready timeout");
- return (-ERR_POF_TIMEOUT);
-} /* ergo_waitpofready */
-
-
-
-/************************************************************************************/
-/* release the cards hardware. Before releasing do a interrupt disable and hardware */
-/* reset. Also unmap dpram. */
-/* Use only during module release. */
-/************************************************************************************/
-static void
-ergo_releasehardware(hysdn_card *card)
-{
- ergo_stopcard(card); /* first stop the card if not already done */
- free_irq(card->irq, card); /* release interrupt */
- release_region(card->iobase + PCI9050_INTR_REG, 1); /* release all io ports */
- release_region(card->iobase + PCI9050_USER_IO, 1);
- iounmap(card->dpram);
- card->dpram = NULL; /* release shared mem */
-} /* ergo_releasehardware */
-
-
-/*********************************************************************************/
-/* acquire the needed hardware ports and map dpram. If an error occurs a nonzero */
-/* value is returned. */
-/* Use only during module init. */
-/*********************************************************************************/
-int
-ergo_inithardware(hysdn_card *card)
-{
- if (!request_region(card->iobase + PCI9050_INTR_REG, 1, "HYSDN"))
- return (-1);
- if (!request_region(card->iobase + PCI9050_USER_IO, 1, "HYSDN")) {
- release_region(card->iobase + PCI9050_INTR_REG, 1);
- return (-1); /* ports already in use */
- }
- card->memend = card->membase + ERG_DPRAM_PAGE_SIZE - 1;
- if (!(card->dpram = ioremap(card->membase, ERG_DPRAM_PAGE_SIZE))) {
- release_region(card->iobase + PCI9050_INTR_REG, 1);
- release_region(card->iobase + PCI9050_USER_IO, 1);
- return (-1);
- }
-
- ergo_stopcard(card); /* disable interrupts */
- if (request_irq(card->irq, ergo_interrupt, IRQF_SHARED, "HYSDN", card)) {
- ergo_releasehardware(card); /* return the acquired hardware */
- return (-1);
- }
- /* success, now setup the function pointers */
- card->stopcard = ergo_stopcard;
- card->releasehardware = ergo_releasehardware;
- card->testram = ergo_testram;
- card->writebootimg = ergo_writebootimg;
- card->writebootseq = ergo_writebootseq;
- card->waitpofready = ergo_waitpofready;
- card->set_errlog_state = ergo_set_errlog_state;
- INIT_WORK(&card->irq_queue, ergo_irq_bh);
- spin_lock_init(&card->hysdn_lock);
-
- return (0);
-} /* ergo_inithardware */
diff --git a/drivers/staging/isdn/hysdn/boardergo.h b/drivers/staging/isdn/hysdn/boardergo.h
deleted file mode 100644
index e99bd81c4034..000000000000
--- a/drivers/staging/isdn/hysdn/boardergo.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/* $Id: boardergo.h,v 1.2.6.1 2001/09/23 22:24:54 kai Exp $
- *
- * Linux driver for HYSDN cards, definitions for ergo type boards (buffers..).
- *
- * Author Werner Cornelius (werner@titro.de) for Hypercope GmbH
- * Copyright 1999 by Werner Cornelius (werner@titro.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-
-/************************************************/
-/* defines for the dual port memory of the card */
-/************************************************/
-#define ERG_DPRAM_PAGE_SIZE 0x2000 /* DPRAM occupies a 8K page */
-#define BOOT_IMG_SIZE 4096
-#define ERG_DPRAM_FILL_SIZE (ERG_DPRAM_PAGE_SIZE - BOOT_IMG_SIZE)
-
-#define ERG_TO_HY_BUF_SIZE 0x0E00 /* 3072 bytes buffer size to card */
-#define ERG_TO_PC_BUF_SIZE 0x0E00 /* 3072 bytes to PC, too */
-
-/* following DPRAM layout copied from OS2-driver boarderg.h */
-typedef struct ErgDpram_tag {
- /*0000 */ unsigned char ToHyBuf[ERG_TO_HY_BUF_SIZE];
- /*0E00 */ unsigned char ToPcBuf[ERG_TO_PC_BUF_SIZE];
-
- /*1C00 */ unsigned char bSoftUart[SIZE_RSV_SOFT_UART];
- /* size 0x1B0 */
-
- /*1DB0 *//* tErrLogEntry */ unsigned char volatile ErrLogMsg[64];
- /* size 64 bytes */
- /*1DB0 unsigned long ulErrType; */
- /*1DB4 unsigned long ulErrSubtype; */
- /*1DB8 unsigned long ucTextSize; */
- /*1DB9 unsigned long ucText[ERRLOG_TEXT_SIZE]; *//* ASCIIZ of len ucTextSize-1 */
- /*1DF0 */
-
- /*1DF0 */ unsigned short volatile ToHyChannel;
- /*1DF2 */ unsigned short volatile ToHySize;
- /*1DF4 */ unsigned char volatile ToHyFlag;
- /* !=0: msg for Hy waiting */
- /*1DF5 */ unsigned char volatile ToPcFlag;
- /* !=0: msg for PC waiting */
- /*1DF6 */ unsigned short volatile ToPcChannel;
- /*1DF8 */ unsigned short volatile ToPcSize;
- /*1DFA */ unsigned char bRes1DBA[0x1E00 - 0x1DFA];
- /* 6 bytes */
-
- /*1E00 */ unsigned char bRestOfEntryTbl[0x1F00 - 0x1E00];
- /*1F00 */ unsigned long TrapTable[62];
- /*1FF8 */ unsigned char bRes1FF8[0x1FFB - 0x1FF8];
- /* low part of reset vetor */
- /*1FFB */ unsigned char ToPcIntMetro;
- /* notes:
- * - metro has 32-bit boot ram - accessing
- * ToPcInt and ToHyInt would be the same;
- * so we moved ToPcInt to 1FFB.
- * Because on the PC side both vars are
- * readonly (reseting on int from E1 to PC),
- * we can read both vars on both cards
- * without destroying anything.
- * - 1FFB is the high byte of the reset vector,
- * so E1 side should NOT change this byte
- * when writing!
- */
- /*1FFC */ unsigned char volatile ToHyNoDpramErrLog;
- /* note: ToHyNoDpramErrLog is used to inform
- * boot loader, not to use DPRAM based
- * ErrLog; when DOS driver is rewritten
- * this becomes obsolete
- */
- /*1FFD */ unsigned char bRes1FFD;
- /*1FFE */ unsigned char ToPcInt;
- /* E1_intclear; on CHAMP2: E1_intset */
- /*1FFF */ unsigned char ToHyInt;
- /* E1_intset; on CHAMP2: E1_intclear */
-} tErgDpram;
-
-/**********************************************/
-/* PCI9050 controller local register offsets: */
-/* copied from boarderg.c */
-/**********************************************/
-#define PCI9050_INTR_REG 0x4C /* Interrupt register */
-#define PCI9050_USER_IO 0x51 /* User I/O register */
-
-/* bitmask for PCI9050_INTR_REG: */
-#define PCI9050_INTR_REG_EN1 0x01 /* 1= enable (def.), 0= disable */
-#define PCI9050_INTR_REG_POL1 0x02 /* 1= active high (def.), 0= active low */
-#define PCI9050_INTR_REG_STAT1 0x04 /* 1= intr. active, 0= intr. not active (def.) */
-#define PCI9050_INTR_REG_ENPCI 0x40 /* 1= PCI interrupts enable (def.) */
-
-/* bitmask for PCI9050_USER_IO: */
-#define PCI9050_USER_IO_EN3 0x02 /* 1= disable , 0= enable (def.) */
-#define PCI9050_USER_IO_DIR3 0x04 /* 1= output (def.), 0= input */
-#define PCI9050_USER_IO_DAT3 0x08 /* 1= high (def.) , 0= low */
-
-#define PCI9050_E1_RESET (PCI9050_USER_IO_DIR3) /* 0x04 */
-#define PCI9050_E1_RUN (PCI9050_USER_IO_DAT3 | PCI9050_USER_IO_DIR3) /* 0x0C */
diff --git a/drivers/staging/isdn/hysdn/hycapi.c b/drivers/staging/isdn/hysdn/hycapi.c
deleted file mode 100644
index a2c15cd7bf67..000000000000
--- a/drivers/staging/isdn/hysdn/hycapi.c
+++ /dev/null
@@ -1,785 +0,0 @@
-/* $Id: hycapi.c,v 1.8.6.4 2001/09/23 22:24:54 kai Exp $
- *
- * Linux driver for HYSDN cards, CAPI2.0-Interface.
- *
- * Author Ulrich Albrecht <u.albrecht@hypercope.de> for Hypercope GmbH
- * Copyright 2000 by Hypercope GmbH
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/signal.h>
-#include <linux/kernel.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/slab.h>
-
-#define VER_DRIVER 0
-#define VER_CARDTYPE 1
-#define VER_HWID 2
-#define VER_SERIAL 3
-#define VER_OPTION 4
-#define VER_PROTO 5
-#define VER_PROFILE 6
-#define VER_CAPI 7
-
-#include "hysdn_defs.h"
-#include <linux/kernelcapi.h>
-
-static char hycapi_revision[] = "$Revision: 1.8.6.4 $";
-
-unsigned int hycapi_enable = 0xffffffff;
-module_param(hycapi_enable, uint, 0);
-
-typedef struct _hycapi_appl {
- unsigned int ctrl_mask;
- capi_register_params rp;
- struct sk_buff *listen_req[CAPI_MAXCONTR];
-} hycapi_appl;
-
-static hycapi_appl hycapi_applications[CAPI_MAXAPPL];
-
-static u16 hycapi_send_message(struct capi_ctr *ctrl, struct sk_buff *skb);
-
-static inline int _hycapi_appCheck(int app_id, int ctrl_no)
-{
- if ((ctrl_no <= 0) || (ctrl_no > CAPI_MAXCONTR) || (app_id <= 0) ||
- (app_id > CAPI_MAXAPPL))
- {
- printk(KERN_ERR "HYCAPI: Invalid request app_id %d for controller %d", app_id, ctrl_no);
- return -1;
- }
- return ((hycapi_applications[app_id - 1].ctrl_mask & (1 << (ctrl_no-1))) != 0);
-}
-
-/******************************
-Kernel-Capi callback reset_ctr
-******************************/
-
-static void
-hycapi_reset_ctr(struct capi_ctr *ctrl)
-{
- hycapictrl_info *cinfo = ctrl->driverdata;
-
-#ifdef HYCAPI_PRINTFNAMES
- printk(KERN_NOTICE "HYCAPI hycapi_reset_ctr\n");
-#endif
- capilib_release(&cinfo->ncci_head);
- capi_ctr_down(ctrl);
-}
-
-/******************************
-Kernel-Capi callback remove_ctr
-******************************/
-
-static void
-hycapi_remove_ctr(struct capi_ctr *ctrl)
-{
- int i;
- hycapictrl_info *cinfo = NULL;
- hysdn_card *card = NULL;
-#ifdef HYCAPI_PRINTFNAMES
- printk(KERN_NOTICE "HYCAPI hycapi_remove_ctr\n");
-#endif
- cinfo = (hycapictrl_info *)(ctrl->driverdata);
- if (!cinfo) {
- printk(KERN_ERR "No hycapictrl_info set!");
- return;
- }
- card = cinfo->card;
- capi_ctr_suspend_output(ctrl);
- for (i = 0; i < CAPI_MAXAPPL; i++) {
- if (hycapi_applications[i].listen_req[ctrl->cnr - 1]) {
- kfree_skb(hycapi_applications[i].listen_req[ctrl->cnr - 1]);
- hycapi_applications[i].listen_req[ctrl->cnr - 1] = NULL;
- }
- }
- detach_capi_ctr(ctrl);
- ctrl->driverdata = NULL;
- kfree(card->hyctrlinfo);
-
-
- card->hyctrlinfo = NULL;
-}
-
-/***********************************************************
-
-Queue a CAPI-message to the controller.
-
-***********************************************************/
-
-static void
-hycapi_sendmsg_internal(struct capi_ctr *ctrl, struct sk_buff *skb)
-{
- hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata);
- hysdn_card *card = cinfo->card;
-
- spin_lock_irq(&cinfo->lock);
-#ifdef HYCAPI_PRINTFNAMES
- printk(KERN_NOTICE "hycapi_send_message\n");
-#endif
- cinfo->skbs[cinfo->in_idx++] = skb; /* add to buffer list */
- if (cinfo->in_idx >= HYSDN_MAX_CAPI_SKB)
- cinfo->in_idx = 0; /* wrap around */
- cinfo->sk_count++; /* adjust counter */
- if (cinfo->sk_count >= HYSDN_MAX_CAPI_SKB) {
- /* inform upper layers we're full */
- printk(KERN_ERR "HYSDN Card%d: CAPI-buffer overrun!\n",
- card->myid);
- capi_ctr_suspend_output(ctrl);
- }
- cinfo->tx_skb = skb;
- spin_unlock_irq(&cinfo->lock);
- schedule_work(&card->irq_queue);
-}
-
-/***********************************************************
-hycapi_register_internal
-
-Send down the CAPI_REGISTER-Command to the controller.
-This functions will also be used if the adapter has been rebooted to
-re-register any applications in the private list.
-
-************************************************************/
-
-static void
-hycapi_register_internal(struct capi_ctr *ctrl, __u16 appl,
- capi_register_params *rp)
-{
- char ExtFeatureDefaults[] = "49 /0/0/0/0,*/1,*/2,*/3,*/4,*/5,*/6,*/7,*/8,*/9,*";
- hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata);
- hysdn_card *card = cinfo->card;
- struct sk_buff *skb;
- __u16 len;
- __u8 _command = 0xa0, _subcommand = 0x80;
- __u16 MessageNumber = 0x0000;
- __u16 MessageBufferSize = 0;
- int slen = strlen(ExtFeatureDefaults);
-#ifdef HYCAPI_PRINTFNAMES
- printk(KERN_NOTICE "hycapi_register_appl\n");
-#endif
- MessageBufferSize = rp->level3cnt * rp->datablkcnt * rp->datablklen;
-
- len = CAPI_MSG_BASELEN + 8 + slen + 1;
- if (!(skb = alloc_skb(len, GFP_ATOMIC))) {
- printk(KERN_ERR "HYSDN card%d: memory squeeze in hycapi_register_appl\n",
- card->myid);
- return;
- }
- skb_put_data(skb, &len, sizeof(__u16));
- skb_put_data(skb, &appl, sizeof(__u16));
- skb_put_data(skb, &_command, sizeof(__u8));
- skb_put_data(skb, &_subcommand, sizeof(__u8));
- skb_put_data(skb, &MessageNumber, sizeof(__u16));
- skb_put_data(skb, &MessageBufferSize, sizeof(__u16));
- skb_put_data(skb, &(rp->level3cnt), sizeof(__u16));
- skb_put_data(skb, &(rp->datablkcnt), sizeof(__u16));
- skb_put_data(skb, &(rp->datablklen), sizeof(__u16));
- skb_put_data(skb, ExtFeatureDefaults, slen);
- hycapi_applications[appl - 1].ctrl_mask |= (1 << (ctrl->cnr - 1));
- hycapi_send_message(ctrl, skb);
-}
-
-/************************************************************
-hycapi_restart_internal
-
-After an adapter has been rebootet, re-register all applications and
-send a LISTEN_REQ (if there has been such a thing )
-
-*************************************************************/
-
-static void hycapi_restart_internal(struct capi_ctr *ctrl)
-{
- int i;
- struct sk_buff *skb;
-#ifdef HYCAPI_PRINTFNAMES
- printk(KERN_WARNING "HYSDN: hycapi_restart_internal");
-#endif
- for (i = 0; i < CAPI_MAXAPPL; i++) {
- if (_hycapi_appCheck(i + 1, ctrl->cnr) == 1) {
- hycapi_register_internal(ctrl, i + 1,
- &hycapi_applications[i].rp);
- if (hycapi_applications[i].listen_req[ctrl->cnr - 1]) {
- skb = skb_copy(hycapi_applications[i].listen_req[ctrl->cnr - 1], GFP_ATOMIC);
- hycapi_sendmsg_internal(ctrl, skb);
- }
- }
- }
-}
-
-/*************************************************************
-Register an application.
-Error-checking is done for CAPI-compliance.
-
-The application is recorded in the internal list.
-*************************************************************/
-
-static void
-hycapi_register_appl(struct capi_ctr *ctrl, __u16 appl,
- capi_register_params *rp)
-{
- int MaxLogicalConnections = 0, MaxBDataBlocks = 0, MaxBDataLen = 0;
- hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata);
- hysdn_card *card = cinfo->card;
- int chk = _hycapi_appCheck(appl, ctrl->cnr);
- if (chk < 0) {
- return;
- }
- if (chk == 1) {
- printk(KERN_INFO "HYSDN: apl %d already registered\n", appl);
- return;
- }
- MaxBDataBlocks = rp->datablkcnt > CAPI_MAXDATAWINDOW ? CAPI_MAXDATAWINDOW : rp->datablkcnt;
- rp->datablkcnt = MaxBDataBlocks;
- MaxBDataLen = rp->datablklen < 1024 ? 1024 : rp->datablklen;
- rp->datablklen = MaxBDataLen;
-
- MaxLogicalConnections = rp->level3cnt;
- if (MaxLogicalConnections < 0) {
- MaxLogicalConnections = card->bchans * -MaxLogicalConnections;
- }
- if (MaxLogicalConnections == 0) {
- MaxLogicalConnections = card->bchans;
- }
-
- rp->level3cnt = MaxLogicalConnections;
- memcpy(&hycapi_applications[appl - 1].rp,
- rp, sizeof(capi_register_params));
-}
-
-/*********************************************************************
-
-hycapi_release_internal
-
-Send down a CAPI_RELEASE to the controller.
-*********************************************************************/
-
-static void hycapi_release_internal(struct capi_ctr *ctrl, __u16 appl)
-{
- hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata);
- hysdn_card *card = cinfo->card;
- struct sk_buff *skb;
- __u16 len;
- __u8 _command = 0xa1, _subcommand = 0x80;
- __u16 MessageNumber = 0x0000;
-
- capilib_release_appl(&cinfo->ncci_head, appl);
-
-#ifdef HYCAPI_PRINTFNAMES
- printk(KERN_NOTICE "hycapi_release_appl\n");
-#endif
- len = CAPI_MSG_BASELEN;
- if (!(skb = alloc_skb(len, GFP_ATOMIC))) {
- printk(KERN_ERR "HYSDN card%d: memory squeeze in hycapi_register_appl\n",
- card->myid);
- return;
- }
- skb_put_data(skb, &len, sizeof(__u16));
- skb_put_data(skb, &appl, sizeof(__u16));
- skb_put_data(skb, &_command, sizeof(__u8));
- skb_put_data(skb, &_subcommand, sizeof(__u8));
- skb_put_data(skb, &MessageNumber, sizeof(__u16));
- hycapi_send_message(ctrl, skb);
- hycapi_applications[appl - 1].ctrl_mask &= ~(1 << (ctrl->cnr - 1));
-}
-
-/******************************************************************
-hycapi_release_appl
-
-Release the application from the internal list an remove it's
-registration at controller-level
-******************************************************************/
-
-static void
-hycapi_release_appl(struct capi_ctr *ctrl, __u16 appl)
-{
- int chk;
-
- chk = _hycapi_appCheck(appl, ctrl->cnr);
- if (chk < 0) {
- printk(KERN_ERR "HYCAPI: Releasing invalid appl %d on controller %d\n", appl, ctrl->cnr);
- return;
- }
- if (hycapi_applications[appl - 1].listen_req[ctrl->cnr - 1]) {
- kfree_skb(hycapi_applications[appl - 1].listen_req[ctrl->cnr - 1]);
- hycapi_applications[appl - 1].listen_req[ctrl->cnr - 1] = NULL;
- }
- if (chk == 1)
- {
- hycapi_release_internal(ctrl, appl);
- }
-}
-
-
-/**************************************************************
-Kill a single controller.
-**************************************************************/
-
-int hycapi_capi_release(hysdn_card *card)
-{
- hycapictrl_info *cinfo = card->hyctrlinfo;
- struct capi_ctr *ctrl;
-#ifdef HYCAPI_PRINTFNAMES
- printk(KERN_NOTICE "hycapi_capi_release\n");
-#endif
- if (cinfo) {
- ctrl = &cinfo->capi_ctrl;
- hycapi_remove_ctr(ctrl);
- }
- return 0;
-}
-
-/**************************************************************
-hycapi_capi_stop
-
-Stop CAPI-Output on a card. (e.g. during reboot)
-***************************************************************/
-
-int hycapi_capi_stop(hysdn_card *card)
-{
- hycapictrl_info *cinfo = card->hyctrlinfo;
- struct capi_ctr *ctrl;
-#ifdef HYCAPI_PRINTFNAMES
- printk(KERN_NOTICE "hycapi_capi_stop\n");
-#endif
- if (cinfo) {
- ctrl = &cinfo->capi_ctrl;
-/* ctrl->suspend_output(ctrl); */
- capi_ctr_down(ctrl);
- }
- return 0;
-}
-
-/***************************************************************
-hycapi_send_message
-
-Send a message to the controller.
-
-Messages are parsed for their Command/Subcommand-type, and appropriate
-action's are performed.
-
-Note that we have to muck around with a 64Bit-DATA_REQ as there are
-firmware-releases that do not check the MsgLen-Indication!
-
-***************************************************************/
-
-static u16 hycapi_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
-{
- __u16 appl_id;
- int _len, _len2;
- __u8 msghead[64];
- hycapictrl_info *cinfo = ctrl->driverdata;
- u16 retval = CAPI_NOERROR;
-
- appl_id = CAPIMSG_APPID(skb->data);
- switch (_hycapi_appCheck(appl_id, ctrl->cnr))
- {
- case 0:
-/* printk(KERN_INFO "Need to register\n"); */
- hycapi_register_internal(ctrl,
- appl_id,
- &(hycapi_applications[appl_id - 1].rp));
- break;
- case 1:
- break;
- default:
- printk(KERN_ERR "HYCAPI: Controller mixup!\n");
- retval = CAPI_ILLAPPNR;
- goto out;
- }
- switch (CAPIMSG_CMD(skb->data)) {
- case CAPI_DISCONNECT_B3_RESP:
- capilib_free_ncci(&cinfo->ncci_head, appl_id,
- CAPIMSG_NCCI(skb->data));
- break;
- case CAPI_DATA_B3_REQ:
- _len = CAPIMSG_LEN(skb->data);
- if (_len > 22) {
- _len2 = _len - 22;
- skb_copy_from_linear_data(skb, msghead, 22);
- skb_copy_to_linear_data_offset(skb, _len2,
- msghead, 22);
- skb_pull(skb, _len2);
- CAPIMSG_SETLEN(skb->data, 22);
- retval = capilib_data_b3_req(&cinfo->ncci_head,
- CAPIMSG_APPID(skb->data),
- CAPIMSG_NCCI(skb->data),
- CAPIMSG_MSGID(skb->data));
- }
- break;
- case CAPI_LISTEN_REQ:
- if (hycapi_applications[appl_id - 1].listen_req[ctrl->cnr - 1])
- {
- kfree_skb(hycapi_applications[appl_id - 1].listen_req[ctrl->cnr - 1]);
- hycapi_applications[appl_id - 1].listen_req[ctrl->cnr - 1] = NULL;
- }
- if (!(hycapi_applications[appl_id -1].listen_req[ctrl->cnr - 1] = skb_copy(skb, GFP_ATOMIC)))
- {
- printk(KERN_ERR "HYSDN: memory squeeze in private_listen\n");
- }
- break;
- default:
- break;
- }
-out:
- if (retval == CAPI_NOERROR)
- hycapi_sendmsg_internal(ctrl, skb);
- else
- dev_kfree_skb_any(skb);
-
- return retval;
-}
-
-static int hycapi_proc_show(struct seq_file *m, void *v)
-{
- struct capi_ctr *ctrl = m->private;
- hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata);
- hysdn_card *card = cinfo->card;
- char *s;
-
- seq_printf(m, "%-16s %s\n", "name", cinfo->cardname);
- seq_printf(m, "%-16s 0x%x\n", "io", card->iobase);
- seq_printf(m, "%-16s %d\n", "irq", card->irq);
-
- switch (card->brdtype) {
- case BD_PCCARD: s = "HYSDN Hycard"; break;
- case BD_ERGO: s = "HYSDN Ergo2"; break;
- case BD_METRO: s = "HYSDN Metro4"; break;
- case BD_CHAMP2: s = "HYSDN Champ2"; break;
- case BD_PLEXUS: s = "HYSDN Plexus30"; break;
- default: s = "???"; break;
- }
- seq_printf(m, "%-16s %s\n", "type", s);
- if ((s = cinfo->version[VER_DRIVER]) != NULL)
- seq_printf(m, "%-16s %s\n", "ver_driver", s);
- if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
- seq_printf(m, "%-16s %s\n", "ver_cardtype", s);
- if ((s = cinfo->version[VER_SERIAL]) != NULL)
- seq_printf(m, "%-16s %s\n", "ver_serial", s);
-
- seq_printf(m, "%-16s %s\n", "cardname", cinfo->cardname);
-
- return 0;
-}
-
-/**************************************************************
-hycapi_load_firmware
-
-This does NOT load any firmware, but the callback somehow is needed
-on capi-interface registration.
-
-**************************************************************/
-
-static int hycapi_load_firmware(struct capi_ctr *ctrl, capiloaddata *data)
-{
-#ifdef HYCAPI_PRINTFNAMES
- printk(KERN_NOTICE "hycapi_load_firmware\n");
-#endif
- return 0;
-}
-
-
-static char *hycapi_procinfo(struct capi_ctr *ctrl)
-{
- hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata);
-#ifdef HYCAPI_PRINTFNAMES
- printk(KERN_NOTICE "%s\n", __func__);
-#endif
- if (!cinfo)
- return "";
- sprintf(cinfo->infobuf, "%s %s 0x%x %d %s",
- cinfo->cardname[0] ? cinfo->cardname : "-",
- cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
- cinfo->card ? cinfo->card->iobase : 0x0,
- cinfo->card ? cinfo->card->irq : 0,
- hycapi_revision
- );
- return cinfo->infobuf;
-}
-
-/******************************************************************
-hycapi_rx_capipkt
-
-Receive a capi-message.
-
-All B3_DATA_IND are converted to 64K-extension compatible format.
-New nccis are created if necessary.
-*******************************************************************/
-
-void
-hycapi_rx_capipkt(hysdn_card *card, unsigned char *buf, unsigned short len)
-{
- struct sk_buff *skb;
- hycapictrl_info *cinfo = card->hyctrlinfo;
- struct capi_ctr *ctrl;
- __u16 ApplId;
- __u16 MsgLen, info;
- __u16 len2, CapiCmd;
- __u32 CP64[2] = {0, 0};
-#ifdef HYCAPI_PRINTFNAMES
- printk(KERN_NOTICE "hycapi_rx_capipkt\n");
-#endif
- if (!cinfo) {
- return;
- }
- ctrl = &cinfo->capi_ctrl;
- if (len < CAPI_MSG_BASELEN) {
- printk(KERN_ERR "HYSDN Card%d: invalid CAPI-message, length %d!\n",
- card->myid, len);
- return;
- }
- MsgLen = CAPIMSG_LEN(buf);
- ApplId = CAPIMSG_APPID(buf);
- CapiCmd = CAPIMSG_CMD(buf);
-
- if ((CapiCmd == CAPI_DATA_B3_IND) && (MsgLen < 30)) {
- len2 = len + (30 - MsgLen);
- if (!(skb = alloc_skb(len2, GFP_ATOMIC))) {
- printk(KERN_ERR "HYSDN Card%d: incoming packet dropped\n",
- card->myid);
- return;
- }
- skb_put_data(skb, buf, MsgLen);
- skb_put_data(skb, CP64, 2 * sizeof(__u32));
- skb_put_data(skb, buf + MsgLen, len - MsgLen);
- CAPIMSG_SETLEN(skb->data, 30);
- } else {
- if (!(skb = alloc_skb(len, GFP_ATOMIC))) {
- printk(KERN_ERR "HYSDN Card%d: incoming packet dropped\n",
- card->myid);
- return;
- }
- skb_put_data(skb, buf, len);
- }
- switch (CAPIMSG_CMD(skb->data))
- {
- case CAPI_CONNECT_B3_CONF:
-/* Check info-field for error-indication: */
- info = CAPIMSG_U16(skb->data, 12);
- switch (info)
- {
- case 0:
- capilib_new_ncci(&cinfo->ncci_head, ApplId, CAPIMSG_NCCI(skb->data),
- hycapi_applications[ApplId - 1].rp.datablkcnt);
-
- break;
- case 0x0001:
- printk(KERN_ERR "HYSDN Card%d: NCPI not supported by current "
- "protocol. NCPI ignored.\n", card->myid);
- break;
- case 0x2001:
- printk(KERN_ERR "HYSDN Card%d: Message not supported in"
- " current state\n", card->myid);
- break;
- case 0x2002:
- printk(KERN_ERR "HYSDN Card%d: invalid PLCI\n", card->myid);
- break;
- case 0x2004:
- printk(KERN_ERR "HYSDN Card%d: out of NCCI\n", card->myid);
- break;
- case 0x3008:
- printk(KERN_ERR "HYSDN Card%d: NCPI not supported\n",
- card->myid);
- break;
- default:
- printk(KERN_ERR "HYSDN Card%d: Info in CONNECT_B3_CONF: %d\n",
- card->myid, info);
- break;
- }
- break;
- case CAPI_CONNECT_B3_IND:
- capilib_new_ncci(&cinfo->ncci_head, ApplId,
- CAPIMSG_NCCI(skb->data),
- hycapi_applications[ApplId - 1].rp.datablkcnt);
- break;
- case CAPI_DATA_B3_CONF:
- capilib_data_b3_conf(&cinfo->ncci_head, ApplId,
- CAPIMSG_NCCI(skb->data),
- CAPIMSG_MSGID(skb->data));
- break;
- default:
- break;
- }
- capi_ctr_handle_message(ctrl, ApplId, skb);
-}
-
-/******************************************************************
-hycapi_tx_capiack
-
-Internally acknowledge a msg sent. This will remove the msg from the
-internal queue.
-
-*******************************************************************/
-
-void hycapi_tx_capiack(hysdn_card *card)
-{
- hycapictrl_info *cinfo = card->hyctrlinfo;
-#ifdef HYCAPI_PRINTFNAMES
- printk(KERN_NOTICE "hycapi_tx_capiack\n");
-#endif
- if (!cinfo) {
- return;
- }
- spin_lock_irq(&cinfo->lock);
- kfree_skb(cinfo->skbs[cinfo->out_idx]); /* free skb */
- cinfo->skbs[cinfo->out_idx++] = NULL;
- if (cinfo->out_idx >= HYSDN_MAX_CAPI_SKB)
- cinfo->out_idx = 0; /* wrap around */
-
- if (cinfo->sk_count-- == HYSDN_MAX_CAPI_SKB) /* dec usage count */
- capi_ctr_resume_output(&cinfo->capi_ctrl);
- spin_unlock_irq(&cinfo->lock);
-}
-
-/***************************************************************
-hycapi_tx_capiget(hysdn_card *card)
-
-This is called when polling for messages to SEND.
-
-****************************************************************/
-
-struct sk_buff *
-hycapi_tx_capiget(hysdn_card *card)
-{
- hycapictrl_info *cinfo = card->hyctrlinfo;
- if (!cinfo) {
- return (struct sk_buff *)NULL;
- }
- if (!cinfo->sk_count)
- return (struct sk_buff *)NULL; /* nothing available */
-
- return (cinfo->skbs[cinfo->out_idx]); /* next packet to send */
-}
-
-
-/**********************************************************
-int hycapi_init()
-
-attach the capi-driver to the kernel-capi.
-
-***********************************************************/
-
-int hycapi_init(void)
-{
- int i;
- for (i = 0; i < CAPI_MAXAPPL; i++) {
- memset(&(hycapi_applications[i]), 0, sizeof(hycapi_appl));
- }
- return (0);
-}
-
-/**************************************************************
-hycapi_cleanup(void)
-
-detach the capi-driver to the kernel-capi. Actually this should
-free some more ressources. Do that later.
-**************************************************************/
-
-void
-hycapi_cleanup(void)
-{
-}
-
-/********************************************************************
-hycapi_capi_create(hysdn_card *card)
-
-Attach the card with its capi-ctrl.
-*********************************************************************/
-
-static void hycapi_fill_profile(hysdn_card *card)
-{
- hycapictrl_info *cinfo = NULL;
- struct capi_ctr *ctrl = NULL;
- cinfo = card->hyctrlinfo;
- if (!cinfo) return;
- ctrl = &cinfo->capi_ctrl;
- strcpy(ctrl->manu, "Hypercope");
- ctrl->version.majorversion = 2;
- ctrl->version.minorversion = 0;
- ctrl->version.majormanuversion = 3;
- ctrl->version.minormanuversion = 2;
- ctrl->profile.ncontroller = card->myid;
- ctrl->profile.nbchannel = card->bchans;
- ctrl->profile.goptions = GLOBAL_OPTION_INTERNAL_CONTROLLER |
- GLOBAL_OPTION_B_CHANNEL_OPERATION;
- ctrl->profile.support1 = B1_PROT_64KBIT_HDLC |
- (card->faxchans ? B1_PROT_T30 : 0) |
- B1_PROT_64KBIT_TRANSPARENT;
- ctrl->profile.support2 = B2_PROT_ISO7776 |
- (card->faxchans ? B2_PROT_T30 : 0) |
- B2_PROT_TRANSPARENT;
- ctrl->profile.support3 = B3_PROT_TRANSPARENT |
- B3_PROT_T90NL |
- (card->faxchans ? B3_PROT_T30 : 0) |
- (card->faxchans ? B3_PROT_T30EXT : 0) |
- B3_PROT_ISO8208;
-}
-
-int
-hycapi_capi_create(hysdn_card *card)
-{
- hycapictrl_info *cinfo = NULL;
- struct capi_ctr *ctrl = NULL;
- int retval;
-#ifdef HYCAPI_PRINTFNAMES
- printk(KERN_NOTICE "hycapi_capi_create\n");
-#endif
- if ((hycapi_enable & (1 << card->myid)) == 0) {
- return 1;
- }
- if (!card->hyctrlinfo) {
- cinfo = kzalloc(sizeof(hycapictrl_info), GFP_ATOMIC);
- if (!cinfo) {
- printk(KERN_WARNING "HYSDN: no memory for capi-ctrl.\n");
- return -ENOMEM;
- }
- card->hyctrlinfo = cinfo;
- cinfo->card = card;
- spin_lock_init(&cinfo->lock);
- INIT_LIST_HEAD(&cinfo->ncci_head);
-
- switch (card->brdtype) {
- case BD_PCCARD: strcpy(cinfo->cardname, "HYSDN Hycard"); break;
- case BD_ERGO: strcpy(cinfo->cardname, "HYSDN Ergo2"); break;
- case BD_METRO: strcpy(cinfo->cardname, "HYSDN Metro4"); break;
- case BD_CHAMP2: strcpy(cinfo->cardname, "HYSDN Champ2"); break;
- case BD_PLEXUS: strcpy(cinfo->cardname, "HYSDN Plexus30"); break;
- default: strcpy(cinfo->cardname, "HYSDN ???"); break;
- }
-
- ctrl = &cinfo->capi_ctrl;
- ctrl->driver_name = "hycapi";
- ctrl->driverdata = cinfo;
- ctrl->register_appl = hycapi_register_appl;
- ctrl->release_appl = hycapi_release_appl;
- ctrl->send_message = hycapi_send_message;
- ctrl->load_firmware = hycapi_load_firmware;
- ctrl->reset_ctr = hycapi_reset_ctr;
- ctrl->procinfo = hycapi_procinfo;
- ctrl->proc_show = hycapi_proc_show;
- strcpy(ctrl->name, cinfo->cardname);
- ctrl->owner = THIS_MODULE;
-
- retval = attach_capi_ctr(ctrl);
- if (retval) {
- printk(KERN_ERR "hycapi: attach controller failed.\n");
- return -EBUSY;
- }
- /* fill in the blanks: */
- hycapi_fill_profile(card);
- capi_ctr_ready(ctrl);
- } else {
- /* resume output on stopped ctrl */
- ctrl = &card->hyctrlinfo->capi_ctrl;
- hycapi_fill_profile(card);
- capi_ctr_ready(ctrl);
- hycapi_restart_internal(ctrl);
-/* ctrl->resume_output(ctrl); */
- }
- return 0;
-}
diff --git a/drivers/staging/isdn/hysdn/hysdn_boot.c b/drivers/staging/isdn/hysdn/hysdn_boot.c
deleted file mode 100644
index ba177c3a621b..000000000000
--- a/drivers/staging/isdn/hysdn/hysdn_boot.c
+++ /dev/null
@@ -1,400 +0,0 @@
-/* $Id: hysdn_boot.c,v 1.4.6.4 2001/09/23 22:24:54 kai Exp $
- *
- * Linux driver for HYSDN cards
- * specific routines for booting and pof handling
- *
- * Author Werner Cornelius (werner@titro.de) for Hypercope GmbH
- * Copyright 1999 by Werner Cornelius (werner@titro.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-
-#include "hysdn_defs.h"
-#include "hysdn_pof.h"
-
-/********************************/
-/* defines for pof read handler */
-/********************************/
-#define POF_READ_FILE_HEAD 0
-#define POF_READ_TAG_HEAD 1
-#define POF_READ_TAG_DATA 2
-
-/************************************************************/
-/* definition of boot specific data area. This data is only */
-/* needed during boot and so allocated dynamically. */
-/************************************************************/
-struct boot_data {
- unsigned short Cryptor; /* for use with Decrypt function */
- unsigned short Nrecs; /* records remaining in file */
- unsigned char pof_state;/* actual state of read handler */
- unsigned char is_crypted;/* card data is crypted */
- int BufSize; /* actual number of bytes bufferd */
- int last_error; /* last occurred error */
- unsigned short pof_recid;/* actual pof recid */
- unsigned long pof_reclen;/* total length of pof record data */
- unsigned long pof_recoffset;/* actual offset inside pof record */
- union {
- unsigned char BootBuf[BOOT_BUF_SIZE];/* buffer as byte count */
- tPofRecHdr PofRecHdr; /* header for actual record/chunk */
- tPofFileHdr PofFileHdr; /* header from POF file */
- tPofTimeStamp PofTime; /* time information */
- } buf;
-};
-
-/*****************************************************/
-/* start decryption of successive POF file chuncks. */
-/* */
-/* to be called at start of POF file reading, */
-/* before starting any decryption on any POF record. */
-/*****************************************************/
-static void
-StartDecryption(struct boot_data *boot)
-{
- boot->Cryptor = CRYPT_STARTTERM;
-} /* StartDecryption */
-
-
-/***************************************************************/
-/* decrypt complete BootBuf */
-/* NOTE: decryption must be applied to all or none boot tags - */
-/* to HI and LO boot loader and (all) seq tags, because */
-/* global Cryptor is started for whole POF. */
-/***************************************************************/
-static void
-DecryptBuf(struct boot_data *boot, int cnt)
-{
- unsigned char *bufp = boot->buf.BootBuf;
-
- while (cnt--) {
- boot->Cryptor = (boot->Cryptor >> 1) ^ ((boot->Cryptor & 1U) ? CRYPT_FEEDTERM : 0);
- *bufp++ ^= (unsigned char)boot->Cryptor;
- }
-} /* DecryptBuf */
-
-/********************************************************************************/
-/* pof_handle_data executes the required actions dependent on the active record */
-/* id. If successful 0 is returned, a negative value shows an error. */
-/********************************************************************************/
-static int
-pof_handle_data(hysdn_card *card, int datlen)
-{
- struct boot_data *boot = card->boot; /* pointer to boot specific data */
- long l;
- unsigned char *imgp;
- int img_len;
-
- /* handle the different record types */
- switch (boot->pof_recid) {
-
- case TAG_TIMESTMP:
- if (card->debug_flags & LOG_POF_RECORD)
- hysdn_addlog(card, "POF created %s", boot->buf.PofTime.DateTimeText);
- break;
-
- case TAG_CBOOTDTA:
- DecryptBuf(boot, datlen); /* we need to encrypt the buffer */
- /* fall through */
- case TAG_BOOTDTA:
- if (card->debug_flags & LOG_POF_RECORD)
- hysdn_addlog(card, "POF got %s len=%d offs=0x%lx",
- (boot->pof_recid == TAG_CBOOTDTA) ? "CBOOTDATA" : "BOOTDTA",
- datlen, boot->pof_recoffset);
-
- if (boot->pof_reclen != POF_BOOT_LOADER_TOTAL_SIZE) {
- boot->last_error = EPOF_BAD_IMG_SIZE; /* invalid length */
- return (boot->last_error);
- }
- imgp = boot->buf.BootBuf; /* start of buffer */
- img_len = datlen; /* maximum length to transfer */
-
- l = POF_BOOT_LOADER_OFF_IN_PAGE -
- (boot->pof_recoffset & (POF_BOOT_LOADER_PAGE_SIZE - 1));
- if (l > 0) {
- /* buffer needs to be truncated */
- imgp += l; /* advance pointer */
- img_len -= l; /* adjust len */
- }
- /* at this point no special handling for data wrapping over buffer */
- /* is necessary, because the boot image always will be adjusted to */
- /* match a page boundary inside the buffer. */
- /* The buffer for the boot image on the card is filled in 2 cycles */
- /* first the 1024 hi-words are put in the buffer, then the low 1024 */
- /* word are handled in the same way with different offset. */
-
- if (img_len > 0) {
- /* data available for copy */
- if ((boot->last_error =
- card->writebootimg(card, imgp,
- (boot->pof_recoffset > POF_BOOT_LOADER_PAGE_SIZE) ? 2 : 0)) < 0)
- return (boot->last_error);
- }
- break; /* end of case boot image hi/lo */
-
- case TAG_CABSDATA:
- DecryptBuf(boot, datlen); /* we need to encrypt the buffer */
- /* fall through */
- case TAG_ABSDATA:
- if (card->debug_flags & LOG_POF_RECORD)
- hysdn_addlog(card, "POF got %s len=%d offs=0x%lx",
- (boot->pof_recid == TAG_CABSDATA) ? "CABSDATA" : "ABSDATA",
- datlen, boot->pof_recoffset);
-
- if ((boot->last_error = card->writebootseq(card, boot->buf.BootBuf, datlen)) < 0)
- return (boot->last_error); /* error writing data */
-
- if (boot->pof_recoffset + datlen >= boot->pof_reclen)
- return (card->waitpofready(card)); /* data completely spooled, wait for ready */
-
- break; /* end of case boot seq data */
-
- default:
- if (card->debug_flags & LOG_POF_RECORD)
- hysdn_addlog(card, "POF got data(id=0x%lx) len=%d offs=0x%lx", boot->pof_recid,
- datlen, boot->pof_recoffset);
-
- break; /* simply skip record */
- } /* switch boot->pof_recid */
-
- return (0);
-} /* pof_handle_data */
-
-
-/******************************************************************************/
-/* pof_write_buffer is called when the buffer has been filled with the needed */
-/* number of data bytes. The number delivered is additionally supplied for */
-/* verification. The functions handles the data and returns the needed number */
-/* of bytes for the next action. If the returned value is 0 or less an error */
-/* occurred and booting must be aborted. */
-/******************************************************************************/
-int
-pof_write_buffer(hysdn_card *card, int datlen)
-{
- struct boot_data *boot = card->boot; /* pointer to boot specific data */
-
- if (!boot)
- return (-EFAULT); /* invalid call */
- if (boot->last_error < 0)
- return (boot->last_error); /* repeated error */
-
- if (card->debug_flags & LOG_POF_WRITE)
- hysdn_addlog(card, "POF write: got %d bytes ", datlen);
-
- switch (boot->pof_state) {
- case POF_READ_FILE_HEAD:
- if (card->debug_flags & LOG_POF_WRITE)
- hysdn_addlog(card, "POF write: checking file header");
-
- if (datlen != sizeof(tPofFileHdr)) {
- boot->last_error = -EPOF_INTERNAL;
- break;
- }
- if (boot->buf.PofFileHdr.Magic != TAGFILEMAGIC) {
- boot->last_error = -EPOF_BAD_MAGIC;
- break;
- }
- /* Setup the new state and vars */
- boot->Nrecs = (unsigned short)(boot->buf.PofFileHdr.N_PofRecs); /* limited to 65535 */
- boot->pof_state = POF_READ_TAG_HEAD; /* now start with single tags */
- boot->last_error = sizeof(tPofRecHdr); /* new length */
- break;
-
- case POF_READ_TAG_HEAD:
- if (card->debug_flags & LOG_POF_WRITE)
- hysdn_addlog(card, "POF write: checking tag header");
-
- if (datlen != sizeof(tPofRecHdr)) {
- boot->last_error = -EPOF_INTERNAL;
- break;
- }
- boot->pof_recid = boot->buf.PofRecHdr.PofRecId; /* actual pof recid */
- boot->pof_reclen = boot->buf.PofRecHdr.PofRecDataLen; /* total length */
- boot->pof_recoffset = 0; /* no starting offset */
-
- if (card->debug_flags & LOG_POF_RECORD)
- hysdn_addlog(card, "POF: got record id=0x%lx length=%ld ",
- boot->pof_recid, boot->pof_reclen);
-
- boot->pof_state = POF_READ_TAG_DATA; /* now start with tag data */
- if (boot->pof_reclen < BOOT_BUF_SIZE)
- boot->last_error = boot->pof_reclen; /* limit size */
- else
- boot->last_error = BOOT_BUF_SIZE; /* maximum */
-
- if (!boot->last_error) { /* no data inside record */
- boot->pof_state = POF_READ_TAG_HEAD; /* now start with single tags */
- boot->last_error = sizeof(tPofRecHdr); /* new length */
- }
- break;
-
- case POF_READ_TAG_DATA:
- if (card->debug_flags & LOG_POF_WRITE)
- hysdn_addlog(card, "POF write: getting tag data");
-
- if (datlen != boot->last_error) {
- boot->last_error = -EPOF_INTERNAL;
- break;
- }
- if ((boot->last_error = pof_handle_data(card, datlen)) < 0)
- return (boot->last_error); /* an error occurred */
- boot->pof_recoffset += datlen;
- if (boot->pof_recoffset >= boot->pof_reclen) {
- boot->pof_state = POF_READ_TAG_HEAD; /* now start with single tags */
- boot->last_error = sizeof(tPofRecHdr); /* new length */
- } else {
- if (boot->pof_reclen - boot->pof_recoffset < BOOT_BUF_SIZE)
- boot->last_error = boot->pof_reclen - boot->pof_recoffset; /* limit size */
- else
- boot->last_error = BOOT_BUF_SIZE; /* maximum */
- }
- break;
-
- default:
- boot->last_error = -EPOF_INTERNAL; /* unknown state */
- break;
- } /* switch (boot->pof_state) */
-
- return (boot->last_error);
-} /* pof_write_buffer */
-
-
-/*******************************************************************************/
-/* pof_write_open is called when an open for boot on the cardlog device occurs. */
-/* The function returns the needed number of bytes for the next operation. If */
-/* the returned number is less or equal 0 an error specified by this code */
-/* occurred. Additionally the pointer to the buffer data area is set on success */
-/*******************************************************************************/
-int
-pof_write_open(hysdn_card *card, unsigned char **bufp)
-{
- struct boot_data *boot; /* pointer to boot specific data */
-
- if (card->boot) {
- if (card->debug_flags & LOG_POF_OPEN)
- hysdn_addlog(card, "POF open: already opened for boot");
- return (-ERR_ALREADY_BOOT); /* boot already active */
- }
- /* error no mem available */
- if (!(boot = kzalloc(sizeof(struct boot_data), GFP_KERNEL))) {
- if (card->debug_flags & LOG_MEM_ERR)
- hysdn_addlog(card, "POF open: unable to allocate mem");
- return (-EFAULT);
- }
- card->boot = boot;
- card->state = CARD_STATE_BOOTING;
-
- card->stopcard(card); /* first stop the card */
- if (card->testram(card)) {
- if (card->debug_flags & LOG_POF_OPEN)
- hysdn_addlog(card, "POF open: DPRAM test failure");
- boot->last_error = -ERR_BOARD_DPRAM;
- card->state = CARD_STATE_BOOTERR; /* show boot error */
- return (boot->last_error);
- }
- boot->BufSize = 0; /* Buffer is empty */
- boot->pof_state = POF_READ_FILE_HEAD; /* read file header */
- StartDecryption(boot); /* if POF File should be encrypted */
-
- if (card->debug_flags & LOG_POF_OPEN)
- hysdn_addlog(card, "POF open: success");
-
- *bufp = boot->buf.BootBuf; /* point to buffer */
- return (sizeof(tPofFileHdr));
-} /* pof_write_open */
-
-/********************************************************************************/
-/* pof_write_close is called when an close of boot on the cardlog device occurs. */
-/* The return value must be 0 if everything has happened as desired. */
-/********************************************************************************/
-int
-pof_write_close(hysdn_card *card)
-{
- struct boot_data *boot = card->boot; /* pointer to boot specific data */
-
- if (!boot)
- return (-EFAULT); /* invalid call */
-
- card->boot = NULL; /* no boot active */
- kfree(boot);
-
- if (card->state == CARD_STATE_RUN)
- card->set_errlog_state(card, 1); /* activate error log */
-
- if (card->debug_flags & LOG_POF_OPEN)
- hysdn_addlog(card, "POF close: success");
-
- return (0);
-} /* pof_write_close */
-
-/*********************************************************************************/
-/* EvalSysrTokData checks additional records delivered with the Sysready Message */
-/* when POF has been booted. A return value of 0 is used if no error occurred. */
-/*********************************************************************************/
-int
-EvalSysrTokData(hysdn_card *card, unsigned char *cp, int len)
-{
- u_char *p;
- u_char crc;
-
- if (card->debug_flags & LOG_POF_RECORD)
- hysdn_addlog(card, "SysReady Token data length %d", len);
-
- if (len < 2) {
- hysdn_addlog(card, "SysReady Token Data to short");
- return (1);
- }
- for (p = cp, crc = 0; p < (cp + len - 2); p++)
- if ((crc & 0x80))
- crc = (((u_char) (crc << 1)) + 1) + *p;
- else
- crc = ((u_char) (crc << 1)) + *p;
- crc = ~crc;
- if (crc != *(cp + len - 1)) {
- hysdn_addlog(card, "SysReady Token Data invalid CRC");
- return (1);
- }
- len--; /* don't check CRC byte */
- while (len > 0) {
-
- if (*cp == SYSR_TOK_END)
- return (0); /* End of Token stream */
-
- if (len < (*(cp + 1) + 2)) {
- hysdn_addlog(card, "token 0x%x invalid length %d", *cp, *(cp + 1));
- return (1);
- }
- switch (*cp) {
- case SYSR_TOK_B_CHAN: /* 1 */
- if (*(cp + 1) != 1)
- return (1); /* length invalid */
- card->bchans = *(cp + 2);
- break;
-
- case SYSR_TOK_FAX_CHAN: /* 2 */
- if (*(cp + 1) != 1)
- return (1); /* length invalid */
- card->faxchans = *(cp + 2);
- break;
-
- case SYSR_TOK_MAC_ADDR: /* 3 */
- if (*(cp + 1) != 6)
- return (1); /* length invalid */
- memcpy(card->mac_addr, cp + 2, 6);
- break;
-
- default:
- hysdn_addlog(card, "unknown token 0x%02x length %d", *cp, *(cp + 1));
- break;
- }
- len -= (*(cp + 1) + 2); /* adjust len */
- cp += (*(cp + 1) + 2); /* and pointer */
- }
-
- hysdn_addlog(card, "no end token found");
- return (1);
-} /* EvalSysrTokData */
diff --git a/drivers/staging/isdn/hysdn/hysdn_defs.h b/drivers/staging/isdn/hysdn/hysdn_defs.h
deleted file mode 100644
index cdac46a21692..000000000000
--- a/drivers/staging/isdn/hysdn/hysdn_defs.h
+++ /dev/null
@@ -1,282 +0,0 @@
-/* $Id: hysdn_defs.h,v 1.5.6.3 2001/09/23 22:24:54 kai Exp $
- *
- * Linux driver for HYSDN cards
- * global definitions and exported vars and functions.
- *
- * Author Werner Cornelius (werner@titro.de) for Hypercope GmbH
- * Copyright 1999 by Werner Cornelius (werner@titro.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#ifndef HYSDN_DEFS_H
-#define HYSDN_DEFS_H
-
-#include <linux/hysdn_if.h>
-#include <linux/interrupt.h>
-#include <linux/workqueue.h>
-#include <linux/skbuff.h>
-
-#include "ince1pc.h"
-
-#ifdef CONFIG_HYSDN_CAPI
-#include <linux/capi.h>
-#include <linux/isdn/capicmd.h>
-#include <linux/isdn/capiutil.h>
-#include <linux/isdn/capilli.h>
-
-/***************************/
-/* CAPI-Profile values. */
-/***************************/
-
-#define GLOBAL_OPTION_INTERNAL_CONTROLLER 0x0001
-#define GLOBAL_OPTION_EXTERNAL_CONTROLLER 0x0002
-#define GLOBAL_OPTION_HANDSET 0x0004
-#define GLOBAL_OPTION_DTMF 0x0008
-#define GLOBAL_OPTION_SUPPL_SERVICES 0x0010
-#define GLOBAL_OPTION_CHANNEL_ALLOCATION 0x0020
-#define GLOBAL_OPTION_B_CHANNEL_OPERATION 0x0040
-
-#define B1_PROT_64KBIT_HDLC 0x0001
-#define B1_PROT_64KBIT_TRANSPARENT 0x0002
-#define B1_PROT_V110_ASYNCH 0x0004
-#define B1_PROT_V110_SYNCH 0x0008
-#define B1_PROT_T30 0x0010
-#define B1_PROT_64KBIT_INV_HDLC 0x0020
-#define B1_PROT_56KBIT_TRANSPARENT 0x0040
-
-#define B2_PROT_ISO7776 0x0001
-#define B2_PROT_TRANSPARENT 0x0002
-#define B2_PROT_SDLC 0x0004
-#define B2_PROT_LAPD 0x0008
-#define B2_PROT_T30 0x0010
-#define B2_PROT_PPP 0x0020
-#define B2_PROT_TRANSPARENT_IGNORE_B1_FRAMING_ERRORS 0x0040
-
-#define B3_PROT_TRANSPARENT 0x0001
-#define B3_PROT_T90NL 0x0002
-#define B3_PROT_ISO8208 0x0004
-#define B3_PROT_X25_DCE 0x0008
-#define B3_PROT_T30 0x0010
-#define B3_PROT_T30EXT 0x0020
-
-#define HYSDN_MAXVERSION 8
-
-/* Number of sendbuffers in CAPI-queue */
-#define HYSDN_MAX_CAPI_SKB 20
-
-#endif /* CONFIG_HYSDN_CAPI*/
-
-/************************************************/
-/* constants and bits for debugging/log outputs */
-/************************************************/
-#define LOG_MAX_LINELEN 120
-#define DEB_OUT_SYSLOG 0x80000000 /* output to syslog instead of proc fs */
-#define LOG_MEM_ERR 0x00000001 /* log memory errors like kmalloc failure */
-#define LOG_POF_OPEN 0x00000010 /* log pof open and close activities */
-#define LOG_POF_RECORD 0x00000020 /* log pof record parser */
-#define LOG_POF_WRITE 0x00000040 /* log detailed pof write operation */
-#define LOG_POF_CARD 0x00000080 /* log pof related card functions */
-#define LOG_CNF_LINE 0x00000100 /* all conf lines are put to procfs */
-#define LOG_CNF_DATA 0x00000200 /* non comment conf lines are shown with channel */
-#define LOG_CNF_MISC 0x00000400 /* additional conf line debug outputs */
-#define LOG_SCHED_ASYN 0x00001000 /* debug schedulers async tx routines */
-#define LOG_PROC_OPEN 0x00100000 /* open and close from procfs are logged */
-#define LOG_PROC_ALL 0x00200000 /* all actions from procfs are logged */
-#define LOG_NET_INIT 0x00010000 /* network init and deinit logging */
-
-#define DEF_DEB_FLAGS 0x7fff000f /* everything is logged to procfs */
-
-/**********************************/
-/* proc filesystem name constants */
-/**********************************/
-#define PROC_SUBDIR_NAME "hysdn"
-#define PROC_CONF_BASENAME "cardconf"
-#define PROC_LOG_BASENAME "cardlog"
-
-/***********************************/
-/* PCI 32 bit parms for IO and MEM */
-/***********************************/
-#define PCI_REG_PLX_MEM_BASE 0
-#define PCI_REG_PLX_IO_BASE 1
-#define PCI_REG_MEMORY_BASE 3
-
-/**************/
-/* card types */
-/**************/
-#define BD_NONE 0U
-#define BD_PERFORMANCE 1U
-#define BD_VALUE 2U
-#define BD_PCCARD 3U
-#define BD_ERGO 4U
-#define BD_METRO 5U
-#define BD_CHAMP2 6U
-#define BD_PLEXUS 7U
-
-/******************************************************/
-/* defined states for cards shown by reading cardconf */
-/******************************************************/
-#define CARD_STATE_UNUSED 0 /* never been used or booted */
-#define CARD_STATE_BOOTING 1 /* booting is in progress */
-#define CARD_STATE_BOOTERR 2 /* a previous boot was aborted */
-#define CARD_STATE_RUN 3 /* card is active */
-
-/*******************************/
-/* defines for error_log_state */
-/*******************************/
-#define ERRLOG_STATE_OFF 0 /* error log is switched off, nothing to do */
-#define ERRLOG_STATE_ON 1 /* error log is switched on, wait for data */
-#define ERRLOG_STATE_START 2 /* start error logging */
-#define ERRLOG_STATE_STOP 3 /* stop error logging */
-
-/*******************************/
-/* data structure for one card */
-/*******************************/
-typedef struct HYSDN_CARD {
-
- /* general variables for the cards */
- int myid; /* own driver card id */
- unsigned char bus; /* pci bus the card is connected to */
- unsigned char devfn; /* slot+function bit encoded */
- unsigned short subsysid;/* PCI subsystem id */
- unsigned char brdtype; /* type of card */
- unsigned int bchans; /* number of available B-channels */
- unsigned int faxchans; /* number of available fax-channels */
- unsigned char mac_addr[6];/* MAC Address read from card */
- unsigned int irq; /* interrupt number */
- unsigned int iobase; /* IO-port base address */
- unsigned long plxbase; /* PLX memory base */
- unsigned long membase; /* DPRAM memory base */
- unsigned long memend; /* DPRAM memory end */
- void *dpram; /* mapped dpram */
- int state; /* actual state of card -> CARD_STATE_** */
- struct HYSDN_CARD *next; /* pointer to next card */
-
- /* data areas for the /proc file system */
- void *proclog; /* pointer to proclog filesystem specific data */
- void *procconf; /* pointer to procconf filesystem specific data */
-
- /* debugging and logging */
- unsigned char err_log_state;/* actual error log state of the card */
- unsigned long debug_flags;/* tells what should be debugged and where */
- void (*set_errlog_state) (struct HYSDN_CARD *, int);
-
- /* interrupt handler + interrupt synchronisation */
- struct work_struct irq_queue; /* interrupt task queue */
- unsigned char volatile irq_enabled;/* interrupt enabled if != 0 */
- unsigned char volatile hw_lock;/* hardware is currently locked -> no access */
-
- /* boot process */
- void *boot; /* pointer to boot private data */
- int (*writebootimg) (struct HYSDN_CARD *, unsigned char *, unsigned long);
- int (*writebootseq) (struct HYSDN_CARD *, unsigned char *, int);
- int (*waitpofready) (struct HYSDN_CARD *);
- int (*testram) (struct HYSDN_CARD *);
-
- /* scheduler for data transfer (only async parts) */
- unsigned char async_data[256];/* async data to be sent (normally for config) */
- unsigned short volatile async_len;/* length of data to sent */
- unsigned short volatile async_channel;/* channel number for async transfer */
- int volatile async_busy; /* flag != 0 sending in progress */
- int volatile net_tx_busy; /* a network packet tx is in progress */
-
- /* network interface */
- void *netif; /* pointer to network structure */
-
- /* init and deinit stopcard for booting, too */
- void (*stopcard) (struct HYSDN_CARD *);
- void (*releasehardware) (struct HYSDN_CARD *);
-
- spinlock_t hysdn_lock;
-#ifdef CONFIG_HYSDN_CAPI
- struct hycapictrl_info {
- char cardname[32];
- spinlock_t lock;
- int versionlen;
- char versionbuf[1024];
- char *version[HYSDN_MAXVERSION];
-
- char infobuf[128]; /* for function procinfo */
-
- struct HYSDN_CARD *card;
- struct capi_ctr capi_ctrl;
- struct sk_buff *skbs[HYSDN_MAX_CAPI_SKB];
- int in_idx, out_idx; /* indexes to buffer ring */
- int sk_count; /* number of buffers currently in ring */
- struct sk_buff *tx_skb; /* buffer for tx operation */
-
- struct list_head ncci_head;
- } *hyctrlinfo;
-#endif /* CONFIG_HYSDN_CAPI */
-} hysdn_card;
-
-#ifdef CONFIG_HYSDN_CAPI
-typedef struct hycapictrl_info hycapictrl_info;
-#endif /* CONFIG_HYSDN_CAPI */
-
-
-/*****************/
-/* exported vars */
-/*****************/
-extern hysdn_card *card_root; /* pointer to first card */
-
-
-
-/*************************/
-/* im/exported functions */
-/*************************/
-
-/* hysdn_procconf.c */
-extern int hysdn_procconf_init(void); /* init proc config filesys */
-extern void hysdn_procconf_release(void); /* deinit proc config filesys */
-
-/* hysdn_proclog.c */
-extern int hysdn_proclog_init(hysdn_card *); /* init proc log entry */
-extern void hysdn_proclog_release(hysdn_card *); /* deinit proc log entry */
-extern void hysdn_addlog(hysdn_card *, char *, ...); /* output data to log */
-extern void hysdn_card_errlog(hysdn_card *, tErrLogEntry *, int); /* output card log */
-
-/* boardergo.c */
-extern int ergo_inithardware(hysdn_card *card); /* get hardware -> module init */
-
-/* hysdn_boot.c */
-extern int pof_write_close(hysdn_card *); /* close proc file after writing pof */
-extern int pof_write_open(hysdn_card *, unsigned char **); /* open proc file for writing pof */
-extern int pof_write_buffer(hysdn_card *, int); /* write boot data to card */
-extern int EvalSysrTokData(hysdn_card *, unsigned char *, int); /* Check Sysready Token Data */
-
-/* hysdn_sched.c */
-extern int hysdn_sched_tx(hysdn_card *, unsigned char *,
- unsigned short volatile *, unsigned short volatile *,
- unsigned short);
-extern int hysdn_sched_rx(hysdn_card *, unsigned char *, unsigned short,
- unsigned short);
-extern int hysdn_tx_cfgline(hysdn_card *, unsigned char *,
- unsigned short); /* send one cfg line */
-
-/* hysdn_net.c */
-extern unsigned int hynet_enable;
-extern int hysdn_net_create(hysdn_card *); /* create a new net device */
-extern int hysdn_net_release(hysdn_card *); /* delete the device */
-extern char *hysdn_net_getname(hysdn_card *); /* get name of net interface */
-extern void hysdn_tx_netack(hysdn_card *); /* acknowledge a packet tx */
-extern struct sk_buff *hysdn_tx_netget(hysdn_card *); /* get next network packet */
-extern void hysdn_rx_netpkt(hysdn_card *, unsigned char *,
- unsigned short); /* rxed packet from network */
-
-#ifdef CONFIG_HYSDN_CAPI
-extern unsigned int hycapi_enable;
-extern int hycapi_capi_create(hysdn_card *); /* create a new capi device */
-extern int hycapi_capi_release(hysdn_card *); /* delete the device */
-extern int hycapi_capi_stop(hysdn_card *card); /* suspend */
-extern void hycapi_rx_capipkt(hysdn_card *card, unsigned char *buf,
- unsigned short len);
-extern void hycapi_tx_capiack(hysdn_card *card);
-extern struct sk_buff *hycapi_tx_capiget(hysdn_card *card);
-extern int hycapi_init(void);
-extern void hycapi_cleanup(void);
-#endif /* CONFIG_HYSDN_CAPI */
-
-#endif /* HYSDN_DEFS_H */
diff --git a/drivers/staging/isdn/hysdn/hysdn_init.c b/drivers/staging/isdn/hysdn/hysdn_init.c
deleted file mode 100644
index 0db2f7506250..000000000000
--- a/drivers/staging/isdn/hysdn/hysdn_init.c
+++ /dev/null
@@ -1,213 +0,0 @@
-/* $Id: hysdn_init.c,v 1.6.6.6 2001/09/23 22:24:54 kai Exp $
- *
- * Linux driver for HYSDN cards, init functions.
- *
- * Author Werner Cornelius (werner@titro.de) for Hypercope GmbH
- * Copyright 1999 by Werner Cornelius (werner@titro.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/poll.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-
-#include "hysdn_defs.h"
-
-static struct pci_device_id hysdn_pci_tbl[] = {
- { PCI_VENDOR_ID_HYPERCOPE, PCI_DEVICE_ID_HYPERCOPE_PLX,
- PCI_ANY_ID, PCI_SUBDEVICE_ID_HYPERCOPE_METRO, 0, 0, BD_METRO },
- { PCI_VENDOR_ID_HYPERCOPE, PCI_DEVICE_ID_HYPERCOPE_PLX,
- PCI_ANY_ID, PCI_SUBDEVICE_ID_HYPERCOPE_CHAMP2, 0, 0, BD_CHAMP2 },
- { PCI_VENDOR_ID_HYPERCOPE, PCI_DEVICE_ID_HYPERCOPE_PLX,
- PCI_ANY_ID, PCI_SUBDEVICE_ID_HYPERCOPE_ERGO, 0, 0, BD_ERGO },
- { PCI_VENDOR_ID_HYPERCOPE, PCI_DEVICE_ID_HYPERCOPE_PLX,
- PCI_ANY_ID, PCI_SUBDEVICE_ID_HYPERCOPE_OLD_ERGO, 0, 0, BD_ERGO },
-
- { } /* Terminating entry */
-};
-MODULE_DEVICE_TABLE(pci, hysdn_pci_tbl);
-MODULE_DESCRIPTION("ISDN4Linux: Driver for HYSDN cards");
-MODULE_AUTHOR("Werner Cornelius");
-MODULE_LICENSE("GPL");
-
-static int cardmax; /* number of found cards */
-hysdn_card *card_root = NULL; /* pointer to first card */
-static hysdn_card *card_last = NULL; /* pointer to first card */
-
-
-/****************************************************************************/
-/* The module startup and shutdown code. Only compiled when used as module. */
-/* Using the driver as module is always advisable, because the booting */
-/* image becomes smaller and the driver code is only loaded when needed. */
-/* Additionally newer versions may be activated without rebooting. */
-/****************************************************************************/
-
-/****************************************************************************/
-/* init_module is called once when the module is loaded to do all necessary */
-/* things like autodetect... */
-/* If the return value of this function is 0 the init has been successful */
-/* and the module is added to the list in /proc/modules, otherwise an error */
-/* is assumed and the module will not be kept in memory. */
-/****************************************************************************/
-
-static int hysdn_pci_init_one(struct pci_dev *akt_pcidev,
- const struct pci_device_id *ent)
-{
- hysdn_card *card;
- int rc;
-
- rc = pci_enable_device(akt_pcidev);
- if (rc)
- return rc;
-
- if (!(card = kzalloc(sizeof(hysdn_card), GFP_KERNEL))) {
- printk(KERN_ERR "HYSDN: unable to alloc device mem \n");
- rc = -ENOMEM;
- goto err_out;
- }
- card->myid = cardmax; /* set own id */
- card->bus = akt_pcidev->bus->number;
- card->devfn = akt_pcidev->devfn; /* slot + function */
- card->subsysid = akt_pcidev->subsystem_device;
- card->irq = akt_pcidev->irq;
- card->iobase = pci_resource_start(akt_pcidev, PCI_REG_PLX_IO_BASE);
- card->plxbase = pci_resource_start(akt_pcidev, PCI_REG_PLX_MEM_BASE);
- card->membase = pci_resource_start(akt_pcidev, PCI_REG_MEMORY_BASE);
- card->brdtype = BD_NONE; /* unknown */
- card->debug_flags = DEF_DEB_FLAGS; /* set default debug */
- card->faxchans = 0; /* default no fax channels */
- card->bchans = 2; /* and 2 b-channels */
- card->brdtype = ent->driver_data;
-
- if (ergo_inithardware(card)) {
- printk(KERN_WARNING "HYSDN: card at io 0x%04x already in use\n", card->iobase);
- rc = -EBUSY;
- goto err_out_card;
- }
-
- cardmax++;
- card->next = NULL; /*end of chain */
- if (card_last)
- card_last->next = card; /* pointer to next card */
- else
- card_root = card;
- card_last = card; /* new chain end */
-
- pci_set_drvdata(akt_pcidev, card);
- return 0;
-
-err_out_card:
- kfree(card);
-err_out:
- pci_disable_device(akt_pcidev);
- return rc;
-}
-
-static void hysdn_pci_remove_one(struct pci_dev *akt_pcidev)
-{
- hysdn_card *card = pci_get_drvdata(akt_pcidev);
-
- pci_set_drvdata(akt_pcidev, NULL);
-
- if (card->stopcard)
- card->stopcard(card);
-
-#ifdef CONFIG_HYSDN_CAPI
- hycapi_capi_release(card);
-#endif
-
- if (card->releasehardware)
- card->releasehardware(card); /* free all hardware resources */
-
- if (card == card_root) {
- card_root = card_root->next;
- if (!card_root)
- card_last = NULL;
- } else {
- hysdn_card *tmp = card_root;
- while (tmp) {
- if (tmp->next == card)
- tmp->next = card->next;
- card_last = tmp;
- tmp = tmp->next;
- }
- }
-
- kfree(card);
- pci_disable_device(akt_pcidev);
-}
-
-static struct pci_driver hysdn_pci_driver = {
- .name = "hysdn",
- .id_table = hysdn_pci_tbl,
- .probe = hysdn_pci_init_one,
- .remove = hysdn_pci_remove_one,
-};
-
-static int hysdn_have_procfs;
-
-static int __init
-hysdn_init(void)
-{
- int rc;
-
- printk(KERN_NOTICE "HYSDN: module loaded\n");
-
- rc = pci_register_driver(&hysdn_pci_driver);
- if (rc)
- return rc;
-
- printk(KERN_INFO "HYSDN: %d card(s) found.\n", cardmax);
-
- if (!hysdn_procconf_init())
- hysdn_have_procfs = 1;
-
-#ifdef CONFIG_HYSDN_CAPI
- if (cardmax > 0) {
- if (hycapi_init()) {
- printk(KERN_ERR "HYCAPI: init failed\n");
-
- if (hysdn_have_procfs)
- hysdn_procconf_release();
-
- pci_unregister_driver(&hysdn_pci_driver);
- return -ESPIPE;
- }
- }
-#endif /* CONFIG_HYSDN_CAPI */
-
- return 0; /* no error */
-} /* init_module */
-
-
-/***********************************************************************/
-/* cleanup_module is called when the module is released by the kernel. */
-/* The routine is only called if init_module has been successful and */
-/* the module counter has a value of 0. Otherwise this function will */
-/* not be called. This function must release all resources still allo- */
-/* cated as after the return from this function the module code will */
-/* be removed from memory. */
-/***********************************************************************/
-static void __exit
-hysdn_exit(void)
-{
- if (hysdn_have_procfs)
- hysdn_procconf_release();
-
- pci_unregister_driver(&hysdn_pci_driver);
-
-#ifdef CONFIG_HYSDN_CAPI
- hycapi_cleanup();
-#endif /* CONFIG_HYSDN_CAPI */
-
- printk(KERN_NOTICE "HYSDN: module unloaded\n");
-} /* cleanup_module */
-
-module_init(hysdn_init);
-module_exit(hysdn_exit);
diff --git a/drivers/staging/isdn/hysdn/hysdn_net.c b/drivers/staging/isdn/hysdn/hysdn_net.c
deleted file mode 100644
index dcb9ef7a2651..000000000000
--- a/drivers/staging/isdn/hysdn/hysdn_net.c
+++ /dev/null
@@ -1,330 +0,0 @@
-/* $Id: hysdn_net.c,v 1.8.6.4 2001/09/23 22:24:54 kai Exp $
- *
- * Linux driver for HYSDN cards, net (ethernet type) handling routines.
- *
- * Author Werner Cornelius (werner@titro.de) for Hypercope GmbH
- * Copyright 1999 by Werner Cornelius (werner@titro.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * This net module has been inspired by the skeleton driver from
- * Donald Becker (becker@CESDIS.gsfc.nasa.gov)
- *
- */
-
-#include <linux/module.h>
-#include <linux/signal.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/inetdevice.h>
-
-#include "hysdn_defs.h"
-
-unsigned int hynet_enable = 0xffffffff;
-module_param(hynet_enable, uint, 0);
-
-#define MAX_SKB_BUFFERS 20 /* number of buffers for keeping TX-data */
-
-/****************************************************************************/
-/* structure containing the complete network data. The structure is aligned */
-/* in a way that both, the device and statistics are kept inside it. */
-/* for proper access, the device structure MUST be the first var/struct */
-/* inside the definition. */
-/****************************************************************************/
-struct net_local {
- /* Tx control lock. This protects the transmit buffer ring
- * state along with the "tx full" state of the driver. This
- * means all netif_queue flow control actions are protected
- * by this lock as well.
- */
- struct net_device *dev;
- spinlock_t lock;
- struct sk_buff *skbs[MAX_SKB_BUFFERS]; /* pointers to tx-skbs */
- int in_idx, out_idx; /* indexes to buffer ring */
- int sk_count; /* number of buffers currently in ring */
-}; /* net_local */
-
-
-
-/*********************************************************************/
-/* Open/initialize the board. This is called (in the current kernel) */
-/* sometime after booting when the 'ifconfig' program is run. */
-/* This routine should set everything up anew at each open, even */
-/* registers that "should" only need to be set once at boot, so that */
-/* there is non-reboot way to recover if something goes wrong. */
-/*********************************************************************/
-static int
-net_open(struct net_device *dev)
-{
- struct in_device *in_dev;
- hysdn_card *card = dev->ml_priv;
- int i;
-
- netif_start_queue(dev); /* start tx-queueing */
-
- /* Fill in the MAC-level header (if not already set) */
- if (!card->mac_addr[0]) {
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = 0xfc;
- if ((in_dev = dev->ip_ptr) != NULL) {
- const struct in_ifaddr *ifa;
-
- rcu_read_lock();
- ifa = rcu_dereference(in_dev->ifa_list);
- if (ifa != NULL)
- memcpy(dev->dev_addr + (ETH_ALEN - sizeof(ifa->ifa_local)), &ifa->ifa_local, sizeof(ifa->ifa_local));
- rcu_read_unlock();
- }
- } else
- memcpy(dev->dev_addr, card->mac_addr, ETH_ALEN);
-
- return (0);
-} /* net_open */
-
-/*******************************************/
-/* flush the currently occupied tx-buffers */
-/* must only be called when device closed */
-/*******************************************/
-static void
-flush_tx_buffers(struct net_local *nl)
-{
-
- while (nl->sk_count) {
- dev_kfree_skb(nl->skbs[nl->out_idx++]); /* free skb */
- if (nl->out_idx >= MAX_SKB_BUFFERS)
- nl->out_idx = 0; /* wrap around */
- nl->sk_count--;
- }
-} /* flush_tx_buffers */
-
-
-/*********************************************************************/
-/* close/decativate the device. The device is not removed, but only */
-/* deactivated. */
-/*********************************************************************/
-static int
-net_close(struct net_device *dev)
-{
-
- netif_stop_queue(dev); /* disable queueing */
-
- flush_tx_buffers((struct net_local *) dev);
-
- return (0); /* success */
-} /* net_close */
-
-/************************************/
-/* send a packet on this interface. */
-/* new style for kernel >= 2.3.33 */
-/************************************/
-static netdev_tx_t
-net_send_packet(struct sk_buff *skb, struct net_device *dev)
-{
- struct net_local *lp = (struct net_local *) dev;
-
- spin_lock_irq(&lp->lock);
-
- lp->skbs[lp->in_idx++] = skb; /* add to buffer list */
- if (lp->in_idx >= MAX_SKB_BUFFERS)
- lp->in_idx = 0; /* wrap around */
- lp->sk_count++; /* adjust counter */
- netif_trans_update(dev);
-
- /* If we just used up the very last entry in the
- * TX ring on this device, tell the queueing
- * layer to send no more.
- */
- if (lp->sk_count >= MAX_SKB_BUFFERS)
- netif_stop_queue(dev);
-
- /* When the TX completion hw interrupt arrives, this
- * is when the transmit statistics are updated.
- */
-
- spin_unlock_irq(&lp->lock);
-
- if (lp->sk_count <= 3) {
- schedule_work(&((hysdn_card *) dev->ml_priv)->irq_queue);
- }
- return NETDEV_TX_OK; /* success */
-} /* net_send_packet */
-
-
-
-/***********************************************************************/
-/* acknowlegde a packet send. The network layer will be informed about */
-/* completion */
-/***********************************************************************/
-void
-hysdn_tx_netack(hysdn_card *card)
-{
- struct net_local *lp = card->netif;
-
- if (!lp)
- return; /* non existing device */
-
-
- if (!lp->sk_count)
- return; /* error condition */
-
- lp->dev->stats.tx_packets++;
- lp->dev->stats.tx_bytes += lp->skbs[lp->out_idx]->len;
-
- dev_kfree_skb(lp->skbs[lp->out_idx++]); /* free skb */
- if (lp->out_idx >= MAX_SKB_BUFFERS)
- lp->out_idx = 0; /* wrap around */
-
- if (lp->sk_count-- == MAX_SKB_BUFFERS) /* dec usage count */
- netif_start_queue((struct net_device *) lp);
-} /* hysdn_tx_netack */
-
-/*****************************************************/
-/* we got a packet from the network, go and queue it */
-/*****************************************************/
-void
-hysdn_rx_netpkt(hysdn_card *card, unsigned char *buf, unsigned short len)
-{
- struct net_local *lp = card->netif;
- struct net_device *dev;
- struct sk_buff *skb;
-
- if (!lp)
- return; /* non existing device */
-
- dev = lp->dev;
- dev->stats.rx_bytes += len;
-
- skb = dev_alloc_skb(len);
- if (skb == NULL) {
- printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n",
- dev->name);
- dev->stats.rx_dropped++;
- return;
- }
- /* copy the data */
- skb_put_data(skb, buf, len);
-
- /* determine the used protocol */
- skb->protocol = eth_type_trans(skb, dev);
-
- dev->stats.rx_packets++; /* adjust packet count */
-
- netif_rx(skb);
-} /* hysdn_rx_netpkt */
-
-/*****************************************************/
-/* return the pointer to a network packet to be send */
-/*****************************************************/
-struct sk_buff *
-hysdn_tx_netget(hysdn_card *card)
-{
- struct net_local *lp = card->netif;
-
- if (!lp)
- return (NULL); /* non existing device */
-
- if (!lp->sk_count)
- return (NULL); /* nothing available */
-
- return (lp->skbs[lp->out_idx]); /* next packet to send */
-} /* hysdn_tx_netget */
-
-static const struct net_device_ops hysdn_netdev_ops = {
- .ndo_open = net_open,
- .ndo_stop = net_close,
- .ndo_start_xmit = net_send_packet,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-
-/*****************************************************************************/
-/* hysdn_net_create creates a new net device for the given card. If a device */
-/* already exists, it will be deleted and created a new one. The return value */
-/* 0 announces success, else a negative error code will be returned. */
-/*****************************************************************************/
-int
-hysdn_net_create(hysdn_card *card)
-{
- struct net_device *dev;
- int i;
- struct net_local *lp;
-
- if (!card) {
- printk(KERN_WARNING "No card-pt in hysdn_net_create!\n");
- return (-ENOMEM);
- }
- hysdn_net_release(card); /* release an existing net device */
-
- dev = alloc_etherdev(sizeof(struct net_local));
- if (!dev) {
- printk(KERN_WARNING "HYSDN: unable to allocate mem\n");
- return (-ENOMEM);
- }
-
- lp = netdev_priv(dev);
- lp->dev = dev;
-
- dev->netdev_ops = &hysdn_netdev_ops;
- spin_lock_init(&((struct net_local *) dev)->lock);
-
- /* initialise necessary or informing fields */
- dev->base_addr = card->iobase; /* IO address */
- dev->irq = card->irq; /* irq */
-
- dev->netdev_ops = &hysdn_netdev_ops;
- if ((i = register_netdev(dev))) {
- printk(KERN_WARNING "HYSDN: unable to create network device\n");
- free_netdev(dev);
- return (i);
- }
- dev->ml_priv = card; /* remember pointer to own data structure */
- card->netif = dev; /* setup the local pointer */
-
- if (card->debug_flags & LOG_NET_INIT)
- hysdn_addlog(card, "network device created");
- return 0; /* and return success */
-} /* hysdn_net_create */
-
-/***************************************************************************/
-/* hysdn_net_release deletes the net device for the given card. The return */
-/* value 0 announces success, else a negative error code will be returned. */
-/***************************************************************************/
-int
-hysdn_net_release(hysdn_card *card)
-{
- struct net_device *dev = card->netif;
-
- if (!dev)
- return (0); /* non existing */
-
- card->netif = NULL; /* clear out pointer */
- net_close(dev);
-
- flush_tx_buffers((struct net_local *) dev); /* empty buffers */
-
- unregister_netdev(dev); /* release the device */
- free_netdev(dev); /* release the memory allocated */
- if (card->debug_flags & LOG_NET_INIT)
- hysdn_addlog(card, "network device deleted");
-
- return (0); /* always successful */
-} /* hysdn_net_release */
-
-/*****************************************************************************/
-/* hysdn_net_getname returns a pointer to the name of the network interface. */
-/* if the interface is not existing, a "-" is returned. */
-/*****************************************************************************/
-char *
-hysdn_net_getname(hysdn_card *card)
-{
- struct net_device *dev = card->netif;
-
- if (!dev)
- return ("-"); /* non existing */
-
- return (dev->name);
-} /* hysdn_net_getname */
diff --git a/drivers/staging/isdn/hysdn/hysdn_pof.h b/drivers/staging/isdn/hysdn/hysdn_pof.h
deleted file mode 100644
index f63f5fa59d7e..000000000000
--- a/drivers/staging/isdn/hysdn/hysdn_pof.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/* $Id: hysdn_pof.h,v 1.2.6.1 2001/09/23 22:24:54 kai Exp $
- *
- * Linux driver for HYSDN cards, definitions used for handling pof-files.
- *
- * Author Werner Cornelius (werner@titro.de) for Hypercope GmbH
- * Copyright 1999 by Werner Cornelius (werner@titro.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-/************************/
-/* POF specific defines */
-/************************/
-#define BOOT_BUF_SIZE 0x1000 /* =4096, maybe moved to other h file */
-#define CRYPT_FEEDTERM 0x8142
-#define CRYPT_STARTTERM 0x81a5
-/* max. timeout time in seconds
- * from end of booting to POF is ready
- */
-#define POF_READY_TIME_OUT_SEC 10
-
-/**********************************/
-/* defines for 1.stage boot image */
-/**********************************/
-
-/* the POF file record containing the boot loader image
- * has 2 pages a 16KB:
- * 1. page contains the high 16-bit part of the 32-bit E1 words
- * 2. page contains the low 16-bit part of the 32-bit E1 words
- *
- * In each 16KB page we assume the start of the boot loader code
- * in the highest 2KB part (at offset 0x3800);
- * the rest (0x0000..0x37FF) is assumed to contain 0 bytes.
- */
-
-#define POF_BOOT_LOADER_PAGE_SIZE 0x4000 /* =16384U */
-#define POF_BOOT_LOADER_TOTAL_SIZE (2U * POF_BOOT_LOADER_PAGE_SIZE)
-
-#define POF_BOOT_LOADER_CODE_SIZE 0x0800 /* =2KB =2048U */
-
-/* offset in boot page, where loader code may start */
-/* =0x3800= 14336U */
-#define POF_BOOT_LOADER_OFF_IN_PAGE (POF_BOOT_LOADER_PAGE_SIZE-POF_BOOT_LOADER_CODE_SIZE)
-
-
-/*--------------------------------------POF file record structs------------*/
-typedef struct PofFileHdr_tag { /* Pof file header */
- /*00 */ unsigned long Magic __attribute__((packed));
- /*04 */ unsigned long N_PofRecs __attribute__((packed));
-/*08 */
-} tPofFileHdr;
-
-typedef struct PofRecHdr_tag { /* Pof record header */
- /*00 */ unsigned short PofRecId __attribute__((packed));
- /*02 */ unsigned long PofRecDataLen __attribute__((packed));
-/*06 */
-} tPofRecHdr;
-
-typedef struct PofTimeStamp_tag {
- /*00 */ unsigned long UnixTime __attribute__((packed));
- /*04 */ unsigned char DateTimeText[0x28];
- /* =40 */
-/*2C */
-} tPofTimeStamp;
-
-/* tPofFileHdr.Magic value: */
-#define TAGFILEMAGIC 0x464F501AUL
-/* tPofRecHdr.PofRecId values: */
-#define TAG_ABSDATA 0x1000 /* abs. data */
-#define TAG_BOOTDTA 0x1001 /* boot data */
-#define TAG_COMMENT 0x0020
-#define TAG_SYSCALL 0x0021
-#define TAG_FLOWCTRL 0x0022
-#define TAG_TIMESTMP 0x0010 /* date/time stamp of version */
-#define TAG_CABSDATA 0x1100 /* crypted abs. data */
-#define TAG_CBOOTDTA 0x1101 /* crypted boot data */
diff --git a/drivers/staging/isdn/hysdn/hysdn_procconf.c b/drivers/staging/isdn/hysdn/hysdn_procconf.c
deleted file mode 100644
index 48afd9f5316e..000000000000
--- a/drivers/staging/isdn/hysdn/hysdn_procconf.c
+++ /dev/null
@@ -1,411 +0,0 @@
-/* $Id: hysdn_procconf.c,v 1.8.6.4 2001/09/23 22:24:54 kai Exp $
- *
- * Linux driver for HYSDN cards, /proc/net filesystem dir and conf functions.
- *
- * written by Werner Cornelius (werner@titro.de) for Hypercope GmbH
- *
- * Copyright 1999 by Werner Cornelius (werner@titro.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/cred.h>
-#include <linux/module.h>
-#include <linux/poll.h>
-#include <linux/proc_fs.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <net/net_namespace.h>
-
-#include "hysdn_defs.h"
-
-static DEFINE_MUTEX(hysdn_conf_mutex);
-
-#define INFO_OUT_LEN 80 /* length of info line including lf */
-
-/********************************************************/
-/* defines and data structure for conf write operations */
-/********************************************************/
-#define CONF_STATE_DETECT 0 /* waiting for detect */
-#define CONF_STATE_CONF 1 /* writing config data */
-#define CONF_STATE_POF 2 /* writing pof data */
-#define CONF_LINE_LEN 255 /* 255 chars max */
-
-struct conf_writedata {
- hysdn_card *card; /* card the device is connected to */
- int buf_size; /* actual number of bytes in the buffer */
- int needed_size; /* needed size when reading pof */
- int state; /* actual interface states from above constants */
- unsigned char conf_line[CONF_LINE_LEN]; /* buffered conf line */
- unsigned short channel; /* active channel number */
- unsigned char *pof_buffer; /* buffer when writing pof */
-};
-
-/***********************************************************************/
-/* process_line parses one config line and transfers it to the card if */
-/* necessary. */
-/* if the return value is negative an error occurred. */
-/***********************************************************************/
-static int
-process_line(struct conf_writedata *cnf)
-{
- unsigned char *cp = cnf->conf_line;
- int i;
-
- if (cnf->card->debug_flags & LOG_CNF_LINE)
- hysdn_addlog(cnf->card, "conf line: %s", cp);
-
- if (*cp == '-') { /* option */
- cp++; /* point to option char */
-
- if (*cp++ != 'c')
- return (0); /* option unknown or used */
- i = 0; /* start value for channel */
- while ((*cp <= '9') && (*cp >= '0'))
- i = i * 10 + *cp++ - '0'; /* get decimal number */
- if (i > 65535) {
- if (cnf->card->debug_flags & LOG_CNF_MISC)
- hysdn_addlog(cnf->card, "conf channel invalid %d", i);
- return (-ERR_INV_CHAN); /* invalid channel */
- }
- cnf->channel = i & 0xFFFF; /* set new channel number */
- return (0); /* success */
- } /* option */
- if (*cp == '*') { /* line to send */
- if (cnf->card->debug_flags & LOG_CNF_DATA)
- hysdn_addlog(cnf->card, "conf chan=%d %s", cnf->channel, cp);
- return (hysdn_tx_cfgline(cnf->card, cnf->conf_line + 1,
- cnf->channel)); /* send the line without * */
- } /* line to send */
- return (0);
-} /* process_line */
-
-/***********************************/
-/* conf file operations and tables */
-/***********************************/
-
-/****************************************************/
-/* write conf file -> boot or send cfg line to card */
-/****************************************************/
-static ssize_t
-hysdn_conf_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
-{
- struct conf_writedata *cnf;
- int i;
- unsigned char ch, *cp;
-
- if (!count)
- return (0); /* nothing to handle */
-
- if (!(cnf = file->private_data))
- return (-EFAULT); /* should never happen */
-
- if (cnf->state == CONF_STATE_DETECT) { /* auto detect cnf or pof data */
- if (copy_from_user(&ch, buf, 1)) /* get first char for detect */
- return (-EFAULT);
-
- if (ch == 0x1A) {
- /* we detected a pof file */
- if ((cnf->needed_size = pof_write_open(cnf->card, &cnf->pof_buffer)) <= 0)
- return (cnf->needed_size); /* an error occurred -> exit */
- cnf->buf_size = 0; /* buffer is empty */
- cnf->state = CONF_STATE_POF; /* new state */
- } else {
- /* conf data has been detected */
- cnf->buf_size = 0; /* buffer is empty */
- cnf->state = CONF_STATE_CONF; /* requested conf data write */
- if (cnf->card->state != CARD_STATE_RUN)
- return (-ERR_NOT_BOOTED);
- cnf->conf_line[CONF_LINE_LEN - 1] = 0; /* limit string length */
- cnf->channel = 4098; /* default channel for output */
- }
- } /* state was auto detect */
- if (cnf->state == CONF_STATE_POF) { /* pof write active */
- i = cnf->needed_size - cnf->buf_size; /* bytes still missing for write */
- if (i <= 0)
- return (-EINVAL); /* size error handling pof */
-
- if (i < count)
- count = i; /* limit requested number of bytes */
- if (copy_from_user(cnf->pof_buffer + cnf->buf_size, buf, count))
- return (-EFAULT); /* error while copying */
- cnf->buf_size += count;
-
- if (cnf->needed_size == cnf->buf_size) {
- cnf->needed_size = pof_write_buffer(cnf->card, cnf->buf_size); /* write data */
- if (cnf->needed_size <= 0) {
- cnf->card->state = CARD_STATE_BOOTERR; /* show boot error */
- return (cnf->needed_size); /* an error occurred */
- }
- cnf->buf_size = 0; /* buffer is empty again */
- }
- }
- /* pof write active */
- else { /* conf write active */
-
- if (cnf->card->state != CARD_STATE_RUN) {
- if (cnf->card->debug_flags & LOG_CNF_MISC)
- hysdn_addlog(cnf->card, "cnf write denied -> not booted");
- return (-ERR_NOT_BOOTED);
- }
- i = (CONF_LINE_LEN - 1) - cnf->buf_size; /* bytes available in buffer */
- if (i > 0) {
- /* copy remaining bytes into buffer */
-
- if (count > i)
- count = i; /* limit transfer */
- if (copy_from_user(cnf->conf_line + cnf->buf_size, buf, count))
- return (-EFAULT); /* error while copying */
-
- i = count; /* number of chars in buffer */
- cp = cnf->conf_line + cnf->buf_size;
- while (i) {
- /* search for end of line */
- if ((*cp < ' ') && (*cp != 9))
- break; /* end of line found */
- cp++;
- i--;
- } /* search for end of line */
-
- if (i) {
- /* delimiter found */
- *cp++ = 0; /* string termination */
- count -= (i - 1); /* subtract remaining bytes from count */
- while ((i) && (*cp < ' ') && (*cp != 9)) {
- i--; /* discard next char */
- count++; /* mark as read */
- cp++; /* next char */
- }
- cnf->buf_size = 0; /* buffer is empty after transfer */
- if ((i = process_line(cnf)) < 0) /* handle the line */
- count = i; /* return the error */
- }
- /* delimiter found */
- else {
- cnf->buf_size += count; /* add chars to string */
- if (cnf->buf_size >= CONF_LINE_LEN - 1) {
- if (cnf->card->debug_flags & LOG_CNF_MISC)
- hysdn_addlog(cnf->card, "cnf line too long %d chars pos %d", cnf->buf_size, count);
- return (-ERR_CONF_LONG);
- }
- } /* not delimited */
-
- }
- /* copy remaining bytes into buffer */
- else {
- if (cnf->card->debug_flags & LOG_CNF_MISC)
- hysdn_addlog(cnf->card, "cnf line too long");
- return (-ERR_CONF_LONG);
- }
- } /* conf write active */
-
- return (count);
-} /* hysdn_conf_write */
-
-/*******************************************/
-/* read conf file -> output card info data */
-/*******************************************/
-static ssize_t
-hysdn_conf_read(struct file *file, char __user *buf, size_t count, loff_t *off)
-{
- char *cp;
-
- if (!(file->f_mode & FMODE_READ))
- return -EPERM; /* no permission to read */
-
- if (!(cp = file->private_data))
- return -EFAULT; /* should never happen */
-
- return simple_read_from_buffer(buf, count, off, cp, strlen(cp));
-} /* hysdn_conf_read */
-
-/******************/
-/* open conf file */
-/******************/
-static int
-hysdn_conf_open(struct inode *ino, struct file *filep)
-{
- hysdn_card *card;
- struct conf_writedata *cnf;
- char *cp, *tmp;
-
- /* now search the addressed card */
- mutex_lock(&hysdn_conf_mutex);
- card = PDE_DATA(ino);
- if (card->debug_flags & (LOG_PROC_OPEN | LOG_PROC_ALL))
- hysdn_addlog(card, "config open for uid=%d gid=%d mode=0x%x",
- filep->f_cred->fsuid, filep->f_cred->fsgid,
- filep->f_mode);
-
- if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) {
- /* write only access -> write boot file or conf line */
-
- if (!(cnf = kmalloc(sizeof(struct conf_writedata), GFP_KERNEL))) {
- mutex_unlock(&hysdn_conf_mutex);
- return (-EFAULT);
- }
- cnf->card = card;
- cnf->buf_size = 0; /* nothing buffered */
- cnf->state = CONF_STATE_DETECT; /* start auto detect */
- filep->private_data = cnf;
-
- } else if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) {
- /* read access -> output card info data */
-
- if (!(tmp = kmalloc(INFO_OUT_LEN * 2 + 2, GFP_KERNEL))) {
- mutex_unlock(&hysdn_conf_mutex);
- return (-EFAULT); /* out of memory */
- }
- filep->private_data = tmp; /* start of string */
-
- /* first output a headline */
- sprintf(tmp, "id bus slot type irq iobase dp-mem b-chans fax-chans state device");
- cp = tmp; /* start of string */
- while (*cp)
- cp++;
- while (((cp - tmp) % (INFO_OUT_LEN + 1)) != INFO_OUT_LEN)
- *cp++ = ' ';
- *cp++ = '\n';
-
- /* and now the data */
- sprintf(cp, "%d %3d %4d %4d %3d 0x%04x 0x%08lx %7d %9d %3d %s",
- card->myid,
- card->bus,
- PCI_SLOT(card->devfn),
- card->brdtype,
- card->irq,
- card->iobase,
- card->membase,
- card->bchans,
- card->faxchans,
- card->state,
- hysdn_net_getname(card));
- while (*cp)
- cp++;
- while (((cp - tmp) % (INFO_OUT_LEN + 1)) != INFO_OUT_LEN)
- *cp++ = ' ';
- *cp++ = '\n';
- *cp = 0; /* end of string */
- } else { /* simultaneous read/write access forbidden ! */
- mutex_unlock(&hysdn_conf_mutex);
- return (-EPERM); /* no permission this time */
- }
- mutex_unlock(&hysdn_conf_mutex);
- return nonseekable_open(ino, filep);
-} /* hysdn_conf_open */
-
-/***************************/
-/* close a config file. */
-/***************************/
-static int
-hysdn_conf_close(struct inode *ino, struct file *filep)
-{
- hysdn_card *card;
- struct conf_writedata *cnf;
- int retval = 0;
-
- mutex_lock(&hysdn_conf_mutex);
- card = PDE_DATA(ino);
- if (card->debug_flags & (LOG_PROC_OPEN | LOG_PROC_ALL))
- hysdn_addlog(card, "config close for uid=%d gid=%d mode=0x%x",
- filep->f_cred->fsuid, filep->f_cred->fsgid,
- filep->f_mode);
-
- if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) {
- /* write only access -> write boot file or conf line */
- if (filep->private_data) {
- cnf = filep->private_data;
-
- if (cnf->state == CONF_STATE_POF)
- retval = pof_write_close(cnf->card); /* close the pof write */
- kfree(filep->private_data); /* free allocated memory for buffer */
-
- } /* handle write private data */
- } else if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) {
- /* read access -> output card info data */
-
- kfree(filep->private_data); /* release memory */
- }
- mutex_unlock(&hysdn_conf_mutex);
- return (retval);
-} /* hysdn_conf_close */
-
-/******************************************************/
-/* table for conf filesystem functions defined above. */
-/******************************************************/
-static const struct file_operations conf_fops =
-{
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .read = hysdn_conf_read,
- .write = hysdn_conf_write,
- .open = hysdn_conf_open,
- .release = hysdn_conf_close,
-};
-
-/*****************************/
-/* hysdn subdir in /proc/net */
-/*****************************/
-struct proc_dir_entry *hysdn_proc_entry = NULL;
-
-/*******************************************************************************/
-/* hysdn_procconf_init is called when the module is loaded and after the cards */
-/* have been detected. The needed proc dir and card config files are created. */
-/* The log init is called at last. */
-/*******************************************************************************/
-int
-hysdn_procconf_init(void)
-{
- hysdn_card *card;
- unsigned char conf_name[20];
-
- hysdn_proc_entry = proc_mkdir(PROC_SUBDIR_NAME, init_net.proc_net);
- if (!hysdn_proc_entry) {
- printk(KERN_ERR "HYSDN: unable to create hysdn subdir\n");
- return (-1);
- }
- card = card_root; /* point to first card */
- while (card) {
-
- sprintf(conf_name, "%s%d", PROC_CONF_BASENAME, card->myid);
- if ((card->procconf = (void *) proc_create_data(conf_name,
- S_IFREG | S_IRUGO | S_IWUSR,
- hysdn_proc_entry,
- &conf_fops,
- card)) != NULL) {
- hysdn_proclog_init(card); /* init the log file entry */
- }
- card = card->next; /* next entry */
- }
-
- printk(KERN_NOTICE "HYSDN: procfs initialised\n");
- return 0;
-} /* hysdn_procconf_init */
-
-/*************************************************************************************/
-/* hysdn_procconf_release is called when the module is unloaded and before the cards */
-/* resources are released. The module counter is assumed to be 0 ! */
-/*************************************************************************************/
-void
-hysdn_procconf_release(void)
-{
- hysdn_card *card;
- unsigned char conf_name[20];
-
- card = card_root; /* start with first card */
- while (card) {
-
- sprintf(conf_name, "%s%d", PROC_CONF_BASENAME, card->myid);
- if (card->procconf)
- remove_proc_entry(conf_name, hysdn_proc_entry);
-
- hysdn_proclog_release(card); /* init the log file entry */
-
- card = card->next; /* point to next card */
- }
-
- remove_proc_entry(PROC_SUBDIR_NAME, init_net.proc_net);
-}
diff --git a/drivers/staging/isdn/hysdn/hysdn_proclog.c b/drivers/staging/isdn/hysdn/hysdn_proclog.c
deleted file mode 100644
index 6e898b90e86e..000000000000
--- a/drivers/staging/isdn/hysdn/hysdn_proclog.c
+++ /dev/null
@@ -1,357 +0,0 @@
-/* $Id: hysdn_proclog.c,v 1.9.6.3 2001/09/23 22:24:54 kai Exp $
- *
- * Linux driver for HYSDN cards, /proc/net filesystem log functions.
- *
- * Author Werner Cornelius (werner@titro.de) for Hypercope GmbH
- * Copyright 1999 by Werner Cornelius (werner@titro.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/poll.h>
-#include <linux/proc_fs.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/kernel.h>
-
-#include "hysdn_defs.h"
-
-/* the proc subdir for the interface is defined in the procconf module */
-extern struct proc_dir_entry *hysdn_proc_entry;
-
-static DEFINE_MUTEX(hysdn_log_mutex);
-static void put_log_buffer(hysdn_card *card, char *cp);
-
-/*************************************************/
-/* structure keeping ascii log for device output */
-/*************************************************/
-struct log_data {
- struct log_data *next;
- unsigned long usage_cnt;/* number of files still to work */
- void *proc_ctrl; /* pointer to own control procdata structure */
- char log_start[2]; /* log string start (final len aligned by size) */
-};
-
-/**********************************************/
-/* structure holding proc entrys for one card */
-/**********************************************/
-struct procdata {
- struct proc_dir_entry *log; /* log entry */
- char log_name[15]; /* log filename */
- struct log_data *log_head, *log_tail; /* head and tail for queue */
- int if_used; /* open count for interface */
- unsigned char logtmp[LOG_MAX_LINELEN];
- wait_queue_head_t rd_queue;
-};
-
-
-/**********************************************/
-/* log function for cards error log interface */
-/**********************************************/
-void
-hysdn_card_errlog(hysdn_card *card, tErrLogEntry *logp, int maxsize)
-{
- char buf[ERRLOG_TEXT_SIZE + 40];
-
- sprintf(buf, "LOG 0x%08lX 0x%08lX : %s\n", logp->ulErrType, logp->ulErrSubtype, logp->ucText);
- put_log_buffer(card, buf); /* output the string */
-} /* hysdn_card_errlog */
-
-/***************************************************/
-/* Log function using format specifiers for output */
-/***************************************************/
-void
-hysdn_addlog(hysdn_card *card, char *fmt, ...)
-{
- struct procdata *pd = card->proclog;
- char *cp;
- va_list args;
-
- if (!pd)
- return; /* log structure non existent */
-
- cp = pd->logtmp;
- cp += sprintf(cp, "HYSDN: card %d ", card->myid);
-
- va_start(args, fmt);
- cp += vsprintf(cp, fmt, args);
- va_end(args);
- *cp++ = '\n';
- *cp = 0;
-
- if (card->debug_flags & DEB_OUT_SYSLOG)
- printk(KERN_INFO "%s", pd->logtmp);
- else
- put_log_buffer(card, pd->logtmp);
-
-} /* hysdn_addlog */
-
-/********************************************/
-/* put an log buffer into the log queue. */
-/* This buffer will be kept until all files */
-/* opened for read got the contents. */
-/* Flushes buffers not longer in use. */
-/********************************************/
-static void
-put_log_buffer(hysdn_card *card, char *cp)
-{
- struct log_data *ib;
- struct procdata *pd = card->proclog;
- unsigned long flags;
-
- if (!pd)
- return;
- if (!cp)
- return;
- if (!*cp)
- return;
- if (pd->if_used <= 0)
- return; /* no open file for read */
-
- if (!(ib = kmalloc(sizeof(struct log_data) + strlen(cp), GFP_ATOMIC)))
- return; /* no memory */
- strcpy(ib->log_start, cp); /* set output string */
- ib->next = NULL;
- ib->proc_ctrl = pd; /* point to own control structure */
- spin_lock_irqsave(&card->hysdn_lock, flags);
- ib->usage_cnt = pd->if_used;
- if (!pd->log_head)
- pd->log_head = ib; /* new head */
- else
- pd->log_tail->next = ib; /* follows existing messages */
- pd->log_tail = ib; /* new tail */
-
- /* delete old entrys */
- while (pd->log_head->next) {
- if ((pd->log_head->usage_cnt <= 0) &&
- (pd->log_head->next->usage_cnt <= 0)) {
- ib = pd->log_head;
- pd->log_head = pd->log_head->next;
- kfree(ib);
- } else {
- break;
- }
- } /* pd->log_head->next */
-
- spin_unlock_irqrestore(&card->hysdn_lock, flags);
-
- wake_up_interruptible(&(pd->rd_queue)); /* announce new entry */
-} /* put_log_buffer */
-
-
-/******************************/
-/* file operations and tables */
-/******************************/
-
-/****************************************/
-/* write log file -> set log level bits */
-/****************************************/
-static ssize_t
-hysdn_log_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
-{
- int rc;
- hysdn_card *card = file->private_data;
-
- rc = kstrtoul_from_user(buf, count, 0, &card->debug_flags);
- if (rc < 0)
- return rc;
- hysdn_addlog(card, "debug set to 0x%lx", card->debug_flags);
- return (count);
-} /* hysdn_log_write */
-
-/******************/
-/* read log file */
-/******************/
-static ssize_t
-hysdn_log_read(struct file *file, char __user *buf, size_t count, loff_t *off)
-{
- struct log_data *inf;
- int len;
- hysdn_card *card = PDE_DATA(file_inode(file));
-
- if (!(inf = *((struct log_data **) file->private_data))) {
- struct procdata *pd = card->proclog;
- if (file->f_flags & O_NONBLOCK)
- return (-EAGAIN);
-
- wait_event_interruptible(pd->rd_queue, (inf =
- *((struct log_data **) file->private_data)));
- }
- if (!inf)
- return (0);
-
- inf->usage_cnt--; /* new usage count */
- file->private_data = &inf->next; /* next structure */
- if ((len = strlen(inf->log_start)) <= count) {
- if (copy_to_user(buf, inf->log_start, len))
- return -EFAULT;
- *off += len;
- return (len);
- }
- return (0);
-} /* hysdn_log_read */
-
-/******************/
-/* open log file */
-/******************/
-static int
-hysdn_log_open(struct inode *ino, struct file *filep)
-{
- hysdn_card *card = PDE_DATA(ino);
-
- mutex_lock(&hysdn_log_mutex);
- if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) {
- /* write only access -> write log level only */
- filep->private_data = card; /* remember our own card */
- } else if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) {
- struct procdata *pd = card->proclog;
- unsigned long flags;
-
- /* read access -> log/debug read */
- spin_lock_irqsave(&card->hysdn_lock, flags);
- pd->if_used++;
- if (pd->log_head)
- filep->private_data = &pd->log_tail->next;
- else
- filep->private_data = &pd->log_head;
- spin_unlock_irqrestore(&card->hysdn_lock, flags);
- } else { /* simultaneous read/write access forbidden ! */
- mutex_unlock(&hysdn_log_mutex);
- return (-EPERM); /* no permission this time */
- }
- mutex_unlock(&hysdn_log_mutex);
- return nonseekable_open(ino, filep);
-} /* hysdn_log_open */
-
-/*******************************************************************************/
-/* close a cardlog file. If the file has been opened for exclusive write it is */
-/* assumed as pof data input and the pof loader is noticed about. */
-/* Otherwise file is handled as log output. In this case the interface usage */
-/* count is decremented and all buffers are noticed of closing. If this file */
-/* was the last one to be closed, all buffers are freed. */
-/*******************************************************************************/
-static int
-hysdn_log_close(struct inode *ino, struct file *filep)
-{
- struct log_data *inf;
- struct procdata *pd;
- hysdn_card *card;
- int retval = 0;
-
- mutex_lock(&hysdn_log_mutex);
- if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) {
- /* write only access -> write debug level written */
- retval = 0; /* success */
- } else {
- /* read access -> log/debug read, mark one further file as closed */
-
- inf = *((struct log_data **) filep->private_data); /* get first log entry */
- if (inf)
- pd = (struct procdata *) inf->proc_ctrl; /* still entries there */
- else {
- /* no info available -> search card */
- card = PDE_DATA(file_inode(filep));
- pd = card->proclog; /* pointer to procfs log */
- }
- if (pd)
- pd->if_used--; /* decrement interface usage count by one */
-
- while (inf) {
- inf->usage_cnt--; /* decrement usage count for buffers */
- inf = inf->next;
- }
-
- if (pd)
- if (pd->if_used <= 0) /* delete buffers if last file closed */
- while (pd->log_head) {
- inf = pd->log_head;
- pd->log_head = pd->log_head->next;
- kfree(inf);
- }
- } /* read access */
- mutex_unlock(&hysdn_log_mutex);
-
- return (retval);
-} /* hysdn_log_close */
-
-/*************************************************/
-/* select/poll routine to be able using select() */
-/*************************************************/
-static __poll_t
-hysdn_log_poll(struct file *file, poll_table *wait)
-{
- __poll_t mask = 0;
- hysdn_card *card = PDE_DATA(file_inode(file));
- struct procdata *pd = card->proclog;
-
- if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE)
- return (mask); /* no polling for write supported */
-
- poll_wait(file, &(pd->rd_queue), wait);
-
- if (*((struct log_data **) file->private_data))
- mask |= EPOLLIN | EPOLLRDNORM;
-
- return mask;
-} /* hysdn_log_poll */
-
-/**************************************************/
-/* table for log filesystem functions defined above. */
-/**************************************************/
-static const struct file_operations log_fops =
-{
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .read = hysdn_log_read,
- .write = hysdn_log_write,
- .poll = hysdn_log_poll,
- .open = hysdn_log_open,
- .release = hysdn_log_close,
-};
-
-
-/***********************************************************************************/
-/* hysdn_proclog_init is called when the module is loaded after creating the cards */
-/* conf files. */
-/***********************************************************************************/
-int
-hysdn_proclog_init(hysdn_card *card)
-{
- struct procdata *pd;
-
- /* create a cardlog proc entry */
-
- if ((pd = kzalloc(sizeof(struct procdata), GFP_KERNEL)) != NULL) {
- sprintf(pd->log_name, "%s%d", PROC_LOG_BASENAME, card->myid);
- pd->log = proc_create_data(pd->log_name,
- S_IFREG | S_IRUGO | S_IWUSR, hysdn_proc_entry,
- &log_fops, card);
-
- init_waitqueue_head(&(pd->rd_queue));
-
- card->proclog = (void *) pd; /* remember procfs structure */
- }
- return (0);
-} /* hysdn_proclog_init */
-
-/************************************************************************************/
-/* hysdn_proclog_release is called when the module is unloaded and before the cards */
-/* conf file is released */
-/* The module counter is assumed to be 0 ! */
-/************************************************************************************/
-void
-hysdn_proclog_release(hysdn_card *card)
-{
- struct procdata *pd;
-
- if ((pd = (struct procdata *) card->proclog) != NULL) {
- if (pd->log)
- remove_proc_entry(pd->log_name, hysdn_proc_entry);
- kfree(pd); /* release memory */
- card->proclog = NULL;
- }
-} /* hysdn_proclog_release */
diff --git a/drivers/staging/isdn/hysdn/hysdn_sched.c b/drivers/staging/isdn/hysdn/hysdn_sched.c
deleted file mode 100644
index 31d7c1415543..000000000000
--- a/drivers/staging/isdn/hysdn/hysdn_sched.c
+++ /dev/null
@@ -1,197 +0,0 @@
-/* $Id: hysdn_sched.c,v 1.5.6.4 2001/11/06 21:58:19 kai Exp $
- *
- * Linux driver for HYSDN cards
- * scheduler routines for handling exchange card <-> pc.
- *
- * Author Werner Cornelius (werner@titro.de) for Hypercope GmbH
- * Copyright 1999 by Werner Cornelius (werner@titro.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/signal.h>
-#include <linux/kernel.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <asm/io.h>
-
-#include "hysdn_defs.h"
-
-/*****************************************************************************/
-/* hysdn_sched_rx is called from the cards handler to announce new data is */
-/* available from the card. The routine has to handle the data and return */
-/* with a nonzero code if the data could be worked (or even thrown away), if */
-/* no room to buffer the data is available a zero return tells the card */
-/* to keep the data until later. */
-/*****************************************************************************/
-int
-hysdn_sched_rx(hysdn_card *card, unsigned char *buf, unsigned short len,
- unsigned short chan)
-{
-
- switch (chan) {
- case CHAN_NDIS_DATA:
- if (hynet_enable & (1 << card->myid)) {
- /* give packet to network handler */
- hysdn_rx_netpkt(card, buf, len);
- }
- break;
-
- case CHAN_ERRLOG:
- hysdn_card_errlog(card, (tErrLogEntry *) buf, len);
- if (card->err_log_state == ERRLOG_STATE_ON)
- card->err_log_state = ERRLOG_STATE_START; /* start new fetch */
- break;
-#ifdef CONFIG_HYSDN_CAPI
- case CHAN_CAPI:
-/* give packet to CAPI handler */
- if (hycapi_enable & (1 << card->myid)) {
- hycapi_rx_capipkt(card, buf, len);
- }
- break;
-#endif /* CONFIG_HYSDN_CAPI */
- default:
- printk(KERN_INFO "irq message channel %d len %d unhandled \n", chan, len);
- break;
-
- } /* switch rx channel */
-
- return (1); /* always handled */
-} /* hysdn_sched_rx */
-
-/*****************************************************************************/
-/* hysdn_sched_tx is called from the cards handler to announce that there is */
-/* room in the tx-buffer to the card and data may be sent if needed. */
-/* If the routine wants to send data it must fill buf, len and chan with the */
-/* appropriate data and return a nonzero value. With a zero return no new */
-/* data to send is assumed. maxlen specifies the buffer size available for */
-/* sending. */
-/*****************************************************************************/
-int
-hysdn_sched_tx(hysdn_card *card, unsigned char *buf,
- unsigned short volatile *len, unsigned short volatile *chan,
- unsigned short maxlen)
-{
- struct sk_buff *skb;
-
- if (card->net_tx_busy) {
- card->net_tx_busy = 0; /* reset flag */
- hysdn_tx_netack(card); /* acknowledge packet send */
- } /* a network packet has completely been transferred */
- /* first of all async requests are handled */
- if (card->async_busy) {
- if (card->async_len <= maxlen) {
- memcpy(buf, card->async_data, card->async_len);
- *len = card->async_len;
- *chan = card->async_channel;
- card->async_busy = 0; /* reset request */
- return (1);
- }
- card->async_busy = 0; /* in case of length error */
- } /* async request */
- if ((card->err_log_state == ERRLOG_STATE_START) &&
- (maxlen >= ERRLOG_CMD_REQ_SIZE)) {
- strcpy(buf, ERRLOG_CMD_REQ); /* copy the command */
- *len = ERRLOG_CMD_REQ_SIZE; /* buffer length */
- *chan = CHAN_ERRLOG; /* and channel */
- card->err_log_state = ERRLOG_STATE_ON; /* new state is on */
- return (1); /* tell that data should be send */
- } /* error log start and able to send */
- if ((card->err_log_state == ERRLOG_STATE_STOP) &&
- (maxlen >= ERRLOG_CMD_STOP_SIZE)) {
- strcpy(buf, ERRLOG_CMD_STOP); /* copy the command */
- *len = ERRLOG_CMD_STOP_SIZE; /* buffer length */
- *chan = CHAN_ERRLOG; /* and channel */
- card->err_log_state = ERRLOG_STATE_OFF; /* new state is off */
- return (1); /* tell that data should be send */
- } /* error log start and able to send */
- /* now handle network interface packets */
- if ((hynet_enable & (1 << card->myid)) &&
- (skb = hysdn_tx_netget(card)) != NULL)
- {
- if (skb->len <= maxlen) {
- /* copy the packet to the buffer */
- skb_copy_from_linear_data(skb, buf, skb->len);
- *len = skb->len;
- *chan = CHAN_NDIS_DATA;
- card->net_tx_busy = 1; /* we are busy sending network data */
- return (1); /* go and send the data */
- } else
- hysdn_tx_netack(card); /* aknowledge packet -> throw away */
- } /* send a network packet if available */
-#ifdef CONFIG_HYSDN_CAPI
- if (((hycapi_enable & (1 << card->myid))) &&
- ((skb = hycapi_tx_capiget(card)) != NULL))
- {
- if (skb->len <= maxlen) {
- skb_copy_from_linear_data(skb, buf, skb->len);
- *len = skb->len;
- *chan = CHAN_CAPI;
- hycapi_tx_capiack(card);
- return (1); /* go and send the data */
- }
- }
-#endif /* CONFIG_HYSDN_CAPI */
- return (0); /* nothing to send */
-} /* hysdn_sched_tx */
-
-
-/*****************************************************************************/
-/* send one config line to the card and return 0 if successful, otherwise a */
-/* negative error code. */
-/* The function works with timeouts perhaps not giving the greatest speed */
-/* sending the line, but this should be meaningless because only some lines */
-/* are to be sent and this happens very seldom. */
-/*****************************************************************************/
-int
-hysdn_tx_cfgline(hysdn_card *card, unsigned char *line, unsigned short chan)
-{
- int cnt = 50; /* timeout intervalls */
- unsigned long flags;
-
- if (card->debug_flags & LOG_SCHED_ASYN)
- hysdn_addlog(card, "async tx-cfg chan=%d len=%d", chan, strlen(line) + 1);
-
- while (card->async_busy) {
-
- if (card->debug_flags & LOG_SCHED_ASYN)
- hysdn_addlog(card, "async tx-cfg delayed");
-
- msleep_interruptible(20); /* Timeout 20ms */
- if (!--cnt)
- return (-ERR_ASYNC_TIME); /* timed out */
- } /* wait for buffer to become free */
-
- spin_lock_irqsave(&card->hysdn_lock, flags);
- strcpy(card->async_data, line);
- card->async_len = strlen(line) + 1;
- card->async_channel = chan;
- card->async_busy = 1; /* request transfer */
-
- /* now queue the task */
- schedule_work(&card->irq_queue);
- spin_unlock_irqrestore(&card->hysdn_lock, flags);
-
- if (card->debug_flags & LOG_SCHED_ASYN)
- hysdn_addlog(card, "async tx-cfg data queued");
-
- cnt++; /* short delay */
-
- while (card->async_busy) {
-
- if (card->debug_flags & LOG_SCHED_ASYN)
- hysdn_addlog(card, "async tx-cfg waiting for tx-ready");
-
- msleep_interruptible(20); /* Timeout 20ms */
- if (!--cnt)
- return (-ERR_ASYNC_TIME); /* timed out */
- } /* wait for buffer to become free again */
-
- if (card->debug_flags & LOG_SCHED_ASYN)
- hysdn_addlog(card, "async tx-cfg data send");
-
- return (0); /* line send correctly */
-} /* hysdn_tx_cfgline */
diff --git a/drivers/staging/isdn/hysdn/ince1pc.h b/drivers/staging/isdn/hysdn/ince1pc.h
deleted file mode 100644
index cab68361de65..000000000000
--- a/drivers/staging/isdn/hysdn/ince1pc.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Linux driver for HYSDN cards
- * common definitions for both sides of the bus:
- * - conventions both spoolers must know
- * - channel numbers agreed upon
- *
- * Author M. Steinkopf
- * Copyright 1999 by M. Steinkopf
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#ifndef __INCE1PC_H__
-#define __INCE1PC_H__
-
-/* basic scalar definitions have same meanning,
- * but their declaration location depends on environment
- */
-
-/*--------------------------------------channel numbers---------------------*/
-#define CHAN_SYSTEM 0x0001 /* system channel (spooler to spooler) */
-#define CHAN_ERRLOG 0x0005 /* error logger */
-#define CHAN_CAPI 0x0064 /* CAPI interface */
-#define CHAN_NDIS_DATA 0x1001 /* NDIS data transfer */
-
-/*--------------------------------------POF ready msg-----------------------*/
-/* NOTE: after booting POF sends system ready message to PC: */
-#define RDY_MAGIC 0x52535953UL /* 'SYSR' reversed */
-#define RDY_MAGIC_SIZE 4 /* size in bytes */
-
-#define MAX_N_TOK_BYTES 255
-
-#define MIN_RDY_MSG_SIZE RDY_MAGIC_SIZE
-#define MAX_RDY_MSG_SIZE (RDY_MAGIC_SIZE + MAX_N_TOK_BYTES)
-
-#define SYSR_TOK_END 0
-#define SYSR_TOK_B_CHAN 1 /* nr. of B-Channels; DataLen=1; def: 2 */
-#define SYSR_TOK_FAX_CHAN 2 /* nr. of FAX Channels; DataLen=1; def: 0 */
-#define SYSR_TOK_MAC_ADDR 3 /* MAC-Address; DataLen=6; def: auto */
-#define SYSR_TOK_ESC 255 /* undefined data size yet */
-/* default values, if not corrected by token: */
-#define SYSR_TOK_B_CHAN_DEF 2 /* assume 2 B-Channels */
-#define SYSR_TOK_FAX_CHAN_DEF 1 /* assume 1 FAX Channel */
-
-/* syntax of new SYSR token stream:
- * channel: CHAN_SYSTEM
- * msgsize: MIN_RDY_MSG_SIZE <= x <= MAX_RDY_MSG_SIZE
- * RDY_MAGIC_SIZE <= x <= (RDY_MAGIC_SIZE+MAX_N_TOK_BYTES)
- * msg : 0 1 2 3 {4 5 6 ..}
- * S Y S R MAX_N_TOK_BYTES bytes of TokenStream
- *
- * TokenStream := empty
- * | {NonEndTokenChunk} EndToken RotlCRC
- * NonEndTokenChunk:= NonEndTokenId DataLen [Data]
- * NonEndTokenId := 0x01 .. 0xFE 1 BYTE
- * DataLen := 0x00 .. 0xFF 1 BYTE
- * Data := DataLen bytes
- * EndToken := 0x00
- * RotlCRC := special 1 byte CRC over all NonEndTokenChunk bytes
- * s. RotlCRC algorithm
- *
- * RotlCRC algorithm:
- * ucSum= 0 1 unsigned char
- * for all NonEndTokenChunk bytes:
- * ROTL(ucSum,1) rotate left by 1
- * ucSum += Char; add current byte with swap around
- * RotlCRC= ~ucSum; invert all bits for result
- *
- * note:
- * - for 16-bit FIFO add padding 0 byte to achieve even token data bytes!
- */
-
-/*--------------------------------------error logger------------------------*/
-/* note: pof needs final 0 ! */
-#define ERRLOG_CMD_REQ "ERRLOG ON"
-#define ERRLOG_CMD_REQ_SIZE 10 /* with final 0 byte ! */
-#define ERRLOG_CMD_STOP "ERRLOG OFF"
-#define ERRLOG_CMD_STOP_SIZE 11 /* with final 0 byte ! */
-
-#define ERRLOG_ENTRY_SIZE 64 /* sizeof(tErrLogEntry) */
- /* remaining text size = 55 */
-#define ERRLOG_TEXT_SIZE (ERRLOG_ENTRY_SIZE - 2 * 4 - 1)
-
-typedef struct ErrLogEntry_tag {
-
- /*00 */ unsigned long ulErrType;
-
- /*04 */ unsigned long ulErrSubtype;
-
- /*08 */ unsigned char ucTextSize;
-
- /*09 */ unsigned char ucText[ERRLOG_TEXT_SIZE];
- /* ASCIIZ of len ucTextSize-1 */
-
-/*40 */
-} tErrLogEntry;
-
-
-#if defined(__TURBOC__)
-#if sizeof(tErrLogEntry) != ERRLOG_ENTRY_SIZE
-#error size of tErrLogEntry != ERRLOG_ENTRY_SIZE
-#endif /* */
-#endif /* */
-
-/*--------------------------------------DPRAM boot spooler------------------*/
-/* this is the struture used between pc and
- * hyperstone to exchange boot data
- */
-#define DPRAM_SPOOLER_DATA_SIZE 0x20
-typedef struct DpramBootSpooler_tag {
-
- /*00 */ unsigned char Len;
-
- /*01 */ volatile unsigned char RdPtr;
-
- /*02 */ unsigned char WrPtr;
-
- /*03 */ unsigned char Data[DPRAM_SPOOLER_DATA_SIZE];
-
-/*23 */
-} tDpramBootSpooler;
-
-
-#define DPRAM_SPOOLER_MIN_SIZE 5 /* Len+RdPtr+Wrptr+2*data */
-#define DPRAM_SPOOLER_DEF_SIZE 0x23 /* current default size */
-
-/*--------------------------------------HYCARD/ERGO DPRAM SoftUart----------*/
-/* at DPRAM offset 0x1C00: */
-#define SIZE_RSV_SOFT_UART 0x1B0 /* 432 bytes reserved for SoftUart */
-
-
-#endif /* __INCE1PC_H__ */
diff --git a/drivers/staging/kpc2000/kpc2000/core.c b/drivers/staging/kpc2000/kpc2000/core.c
index 0a23727d0dc3..93cf28febdf6 100644
--- a/drivers/staging/kpc2000/kpc2000/core.c
+++ b/drivers/staging/kpc2000/kpc2000/core.c
@@ -338,7 +338,7 @@ static int kp2000_pcie_probe(struct pci_dev *pdev,
reg_bar_phys_addr = pci_resource_start(pcard->pdev, REG_BAR);
reg_bar_phys_len = pci_resource_len(pcard->pdev, REG_BAR);
- pcard->regs_bar_base = ioremap_nocache(reg_bar_phys_addr, PAGE_SIZE);
+ pcard->regs_bar_base = ioremap(reg_bar_phys_addr, PAGE_SIZE);
if (!pcard->regs_bar_base) {
dev_err(&pcard->pdev->dev,
"probe: REG_BAR could not remap memory to virtual space\n");
@@ -367,7 +367,7 @@ static int kp2000_pcie_probe(struct pci_dev *pdev,
dma_bar_phys_addr = pci_resource_start(pcard->pdev, DMA_BAR);
dma_bar_phys_len = pci_resource_len(pcard->pdev, DMA_BAR);
- pcard->dma_bar_base = ioremap_nocache(dma_bar_phys_addr,
+ pcard->dma_bar_base = ioremap(dma_bar_phys_addr,
dma_bar_phys_len);
if (!pcard->dma_bar_base) {
dev_err(&pcard->pdev->dev,
diff --git a/drivers/staging/kpc2000/kpc2000_i2c.c b/drivers/staging/kpc2000/kpc2000_i2c.c
index 5460bf973c9c..25bb5c97dd21 100644
--- a/drivers/staging/kpc2000/kpc2000_i2c.c
+++ b/drivers/staging/kpc2000/kpc2000_i2c.c
@@ -32,7 +32,7 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Matt.Sickler@Daktronics.com");
-struct i2c_device {
+struct kpc_i2c {
unsigned long smba;
struct i2c_adapter adapter;
unsigned int features;
@@ -131,7 +131,7 @@ struct i2c_device {
/* Make sure the SMBus host is ready to start transmitting.
* Return 0 if it is, -EBUSY if it is not.
*/
-static int i801_check_pre(struct i2c_device *priv)
+static int i801_check_pre(struct kpc_i2c *priv)
{
int status;
@@ -156,7 +156,7 @@ static int i801_check_pre(struct i2c_device *priv)
}
/* Convert the status register to an error code, and clear it. */
-static int i801_check_post(struct i2c_device *priv, int status, int timeout)
+static int i801_check_post(struct kpc_i2c *priv, int status, int timeout)
{
int result = 0;
@@ -206,7 +206,7 @@ static int i801_check_post(struct i2c_device *priv, int status, int timeout)
return result;
}
-static int i801_transaction(struct i2c_device *priv, int xact)
+static int i801_transaction(struct kpc_i2c *priv, int xact)
{
int status;
int result;
@@ -235,7 +235,7 @@ static int i801_transaction(struct i2c_device *priv, int xact)
}
/* wait for INTR bit as advised by Intel */
-static void i801_wait_hwpec(struct i2c_device *priv)
+static void i801_wait_hwpec(struct kpc_i2c *priv)
{
int timeout = 0;
int status;
@@ -251,7 +251,7 @@ static void i801_wait_hwpec(struct i2c_device *priv)
outb_p(status, SMBHSTSTS(priv));
}
-static int i801_block_transaction_by_block(struct i2c_device *priv,
+static int i801_block_transaction_by_block(struct kpc_i2c *priv,
union i2c_smbus_data *data,
char read_write, int hwpec)
{
@@ -285,7 +285,7 @@ static int i801_block_transaction_by_block(struct i2c_device *priv,
return 0;
}
-static int i801_block_transaction_byte_by_byte(struct i2c_device *priv,
+static int i801_block_transaction_byte_by_byte(struct kpc_i2c *priv,
union i2c_smbus_data *data,
char read_write, int command,
int hwpec)
@@ -367,7 +367,7 @@ static int i801_block_transaction_byte_by_byte(struct i2c_device *priv,
return 0;
}
-static int i801_set_block_buffer_mode(struct i2c_device *priv)
+static int i801_set_block_buffer_mode(struct kpc_i2c *priv)
{
outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_E32B, SMBAUXCTL(priv));
if ((inb_p(SMBAUXCTL(priv)) & SMBAUXCTL_E32B) == 0)
@@ -376,7 +376,7 @@ static int i801_set_block_buffer_mode(struct i2c_device *priv)
}
/* Block transaction function */
-static int i801_block_transaction(struct i2c_device *priv,
+static int i801_block_transaction(struct kpc_i2c *priv,
union i2c_smbus_data *data, char read_write,
int command, int hwpec)
{
@@ -440,7 +440,7 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
int hwpec;
int block = 0;
int ret, xact = 0;
- struct i2c_device *priv = i2c_get_adapdata(adap);
+ struct kpc_i2c *priv = i2c_get_adapdata(adap);
hwpec = (priv->features & FEATURE_SMBUS_PEC) &&
(flags & I2C_CLIENT_PEC) &&
@@ -572,9 +572,13 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
return 0;
}
+#define enable_flag(x) (x)
+#define disable_flag(x) 0
+#define enable_flag_if(x, cond) ((cond) ? (x) : 0)
+
static u32 i801_func(struct i2c_adapter *adapter)
{
- struct i2c_device *priv = i2c_get_adapdata(adapter);
+ struct kpc_i2c *priv = i2c_get_adapdata(adapter);
/* original settings
* u32 f = I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
@@ -588,42 +592,42 @@ static u32 i801_func(struct i2c_adapter *adapter)
// http://lxr.free-electrons.com/source/include/uapi/linux/i2c.h#L85
u32 f =
- I2C_FUNC_I2C | /* 0x00000001(I enabled this
- * one)
- */
- !I2C_FUNC_10BIT_ADDR | /* 0x00000002 */
- !I2C_FUNC_PROTOCOL_MANGLING | /* 0x00000004 */
- ((priv->features & FEATURE_SMBUS_PEC) ?
- I2C_FUNC_SMBUS_PEC : 0) | /* 0x00000008 */
- !I2C_FUNC_SMBUS_BLOCK_PROC_CALL | /* 0x00008000 */
- I2C_FUNC_SMBUS_QUICK | /* 0x00010000 */
- !I2C_FUNC_SMBUS_READ_BYTE | /* 0x00020000 */
- !I2C_FUNC_SMBUS_WRITE_BYTE | /* 0x00040000 */
- !I2C_FUNC_SMBUS_READ_BYTE_DATA | /* 0x00080000 */
- !I2C_FUNC_SMBUS_WRITE_BYTE_DATA | /* 0x00100000 */
- !I2C_FUNC_SMBUS_READ_WORD_DATA | /* 0x00200000 */
- !I2C_FUNC_SMBUS_WRITE_WORD_DATA | /* 0x00400000 */
- !I2C_FUNC_SMBUS_PROC_CALL | /* 0x00800000 */
- !I2C_FUNC_SMBUS_READ_BLOCK_DATA | /* 0x01000000 */
- !I2C_FUNC_SMBUS_WRITE_BLOCK_DATA | /* 0x02000000 */
- ((priv->features & FEATURE_I2C_BLOCK_READ) ?
- I2C_FUNC_SMBUS_READ_I2C_BLOCK : 0) | /* 0x04000000 */
- I2C_FUNC_SMBUS_WRITE_I2C_BLOCK | /* 0x08000000 */
-
- I2C_FUNC_SMBUS_BYTE | /* _READ_BYTE _WRITE_BYTE */
- I2C_FUNC_SMBUS_BYTE_DATA | /* _READ_BYTE_DATA
- * _WRITE_BYTE_DATA
- */
- I2C_FUNC_SMBUS_WORD_DATA | /* _READ_WORD_DATA
- * _WRITE_WORD_DATA
- */
- I2C_FUNC_SMBUS_BLOCK_DATA | /* _READ_BLOCK_DATA
- * _WRITE_BLOCK_DATA
- */
- !I2C_FUNC_SMBUS_I2C_BLOCK | /* _READ_I2C_BLOCK
- * _WRITE_I2C_BLOCK
- */
- !I2C_FUNC_SMBUS_EMUL; /* _QUICK _BYTE
+ enable_flag(I2C_FUNC_I2C) | /* 0x00000001(I enabled this one) */
+ disable_flag(I2C_FUNC_10BIT_ADDR) | /* 0x00000002 */
+ disable_flag(I2C_FUNC_PROTOCOL_MANGLING) | /* 0x00000004 */
+ enable_flag_if(I2C_FUNC_SMBUS_PEC,
+ priv->features & FEATURE_SMBUS_PEC) |
+ /* 0x00000008 */
+ disable_flag(I2C_FUNC_SMBUS_BLOCK_PROC_CALL) | /* 0x00008000 */
+ enable_flag(I2C_FUNC_SMBUS_QUICK) | /* 0x00010000 */
+ disable_flag(I2C_FUNC_SMBUS_READ_BYTE) | /* 0x00020000 */
+ disable_flag(I2C_FUNC_SMBUS_WRITE_BYTE) | /* 0x00040000 */
+ disable_flag(I2C_FUNC_SMBUS_READ_BYTE_DATA) | /* 0x00080000 */
+ disable_flag(I2C_FUNC_SMBUS_WRITE_BYTE_DATA) | /* 0x00100000 */
+ disable_flag(I2C_FUNC_SMBUS_READ_WORD_DATA) | /* 0x00200000 */
+ disable_flag(I2C_FUNC_SMBUS_WRITE_WORD_DATA) | /* 0x00400000 */
+ disable_flag(I2C_FUNC_SMBUS_PROC_CALL) | /* 0x00800000 */
+ disable_flag(I2C_FUNC_SMBUS_READ_BLOCK_DATA) | /* 0x01000000 */
+ disable_flag(I2C_FUNC_SMBUS_WRITE_BLOCK_DATA) | /* 0x02000000 */
+ enable_flag_if(I2C_FUNC_SMBUS_READ_I2C_BLOCK,
+ priv->features & FEATURE_I2C_BLOCK_READ) |
+ /* 0x04000000 */
+ enable_flag(I2C_FUNC_SMBUS_WRITE_I2C_BLOCK) | /* 0x08000000 */
+
+ enable_flag(I2C_FUNC_SMBUS_BYTE) | /* _READ_BYTE _WRITE_BYTE */
+ enable_flag(I2C_FUNC_SMBUS_BYTE_DATA) | /* _READ_BYTE_DATA
+ * _WRITE_BYTE_DATA
+ */
+ enable_flag(I2C_FUNC_SMBUS_WORD_DATA) | /* _READ_WORD_DATA
+ * _WRITE_WORD_DATA
+ */
+ enable_flag(I2C_FUNC_SMBUS_BLOCK_DATA) | /* _READ_BLOCK_DATA
+ * _WRITE_BLOCK_DATA
+ */
+ disable_flag(I2C_FUNC_SMBUS_I2C_BLOCK) | /* _READ_I2C_BLOCK
+ * _WRITE_I2C_BLOCK
+ */
+ disable_flag(I2C_FUNC_SMBUS_EMUL); /* _QUICK _BYTE
* _BYTE_DATA _WORD_DATA
* _PROC_CALL
* _WRITE_BLOCK_DATA
@@ -632,6 +636,10 @@ static u32 i801_func(struct i2c_adapter *adapter)
return f;
}
+#undef enable_flag
+#undef disable_flag
+#undef enable_flag_if
+
static const struct i2c_algorithm smbus_algorithm = {
.smbus_xfer = i801_access,
.functionality = i801_func,
@@ -640,10 +648,10 @@ static const struct i2c_algorithm smbus_algorithm = {
/********************************
*** Part 2 - Driver Handlers ***
********************************/
-static int pi2c_probe(struct platform_device *pldev)
+static int kpc_i2c_probe(struct platform_device *pldev)
{
int err;
- struct i2c_device *priv;
+ struct kpc_i2c *priv;
struct resource *res;
priv = devm_kzalloc(&pldev->dev, sizeof(*priv), GFP_KERNEL);
@@ -659,7 +667,7 @@ static int pi2c_probe(struct platform_device *pldev)
if (!res)
return -ENXIO;
- priv->smba = (unsigned long)devm_ioremap_nocache(&pldev->dev,
+ priv->smba = (unsigned long)devm_ioremap(&pldev->dev,
res->start,
resource_size(res));
if (!priv->smba)
@@ -692,11 +700,11 @@ static int pi2c_probe(struct platform_device *pldev)
return 0;
}
-static int pi2c_remove(struct platform_device *pldev)
+static int kpc_i2c_remove(struct platform_device *pldev)
{
- struct i2c_device *lddev;
+ struct kpc_i2c *lddev;
- lddev = (struct i2c_device *)platform_get_drvdata(pldev);
+ lddev = (struct kpc_i2c *)platform_get_drvdata(pldev);
i2c_del_adapter(&lddev->adapter);
@@ -710,12 +718,12 @@ static int pi2c_remove(struct platform_device *pldev)
return 0;
}
-static struct platform_driver i2c_plat_driver_i = {
- .probe = pi2c_probe,
- .remove = pi2c_remove,
+static struct platform_driver kpc_i2c_driver = {
+ .probe = kpc_i2c_probe,
+ .remove = kpc_i2c_remove,
.driver = {
.name = KP_DRIVER_NAME_I2C,
},
};
-module_platform_driver(i2c_plat_driver_i);
+module_platform_driver(kpc_i2c_driver);
diff --git a/drivers/staging/kpc2000/kpc2000_spi.c b/drivers/staging/kpc2000/kpc2000_spi.c
index 8becf972af9c..1c360daa703d 100644
--- a/drivers/staging/kpc2000/kpc2000_spi.c
+++ b/drivers/staging/kpc2000/kpc2000_spi.c
@@ -464,7 +464,7 @@ kp_spi_probe(struct platform_device *pldev)
goto free_master;
}
- kpspi->base = devm_ioremap_nocache(&pldev->dev, r->start,
+ kpspi->base = devm_ioremap(&pldev->dev, r->start,
resource_size(r));
status = spi_register_master(master);
diff --git a/drivers/staging/kpc2000/kpc_dma/fileops.c b/drivers/staging/kpc2000/kpc_dma/fileops.c
index cb52bd9a6d2f..40525540dde6 100644
--- a/drivers/staging/kpc2000/kpc_dma/fileops.c
+++ b/drivers/staging/kpc2000/kpc_dma/fileops.c
@@ -49,9 +49,7 @@ static int kpc_dma_transfer(struct dev_private_data *priv,
u64 dma_addr;
u64 user_ctl;
- BUG_ON(priv == NULL);
ldev = priv->ldev;
- BUG_ON(ldev == NULL);
acd = kzalloc(sizeof(*acd), GFP_KERNEL);
if (!acd) {
diff --git a/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c b/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c
index a05ae6d40db9..ec79a8500caf 100644
--- a/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c
+++ b/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c
@@ -122,7 +122,7 @@ int kpc_dma_probe(struct platform_device *pldev)
rv = -ENXIO;
goto err_kfree;
}
- ldev->eng_regs = ioremap_nocache(r->start, resource_size(r));
+ ldev->eng_regs = ioremap(r->start, resource_size(r));
if (!ldev->eng_regs) {
dev_err(&ldev->pldev->dev, "%s: failed to ioremap engine regs!\n", __func__);
rv = -ENXIO;
diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c
index 3cffc8be6656..211dd4a11cac 100644
--- a/drivers/staging/ks7010/ks_wlan_net.c
+++ b/drivers/staging/ks7010/ks_wlan_net.c
@@ -45,7 +45,7 @@ struct wep_key {
* function prototypes
*/
static int ks_wlan_open(struct net_device *dev);
-static void ks_wlan_tx_timeout(struct net_device *dev);
+static void ks_wlan_tx_timeout(struct net_device *dev, unsigned int txqueue);
static int ks_wlan_start_xmit(struct sk_buff *skb, struct net_device *dev);
static int ks_wlan_close(struct net_device *dev);
static void ks_wlan_set_rx_mode(struct net_device *dev);
@@ -2498,7 +2498,7 @@ int ks_wlan_set_mac_address(struct net_device *dev, void *addr)
}
static
-void ks_wlan_tx_timeout(struct net_device *dev)
+void ks_wlan_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ks_wlan_private *priv = netdev_priv(dev);
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index 642adc4c24d2..c394abffea86 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -38,4 +38,8 @@ source "drivers/staging/media/ipu3/Kconfig"
source "drivers/staging/media/soc_camera/Kconfig"
+source "drivers/staging/media/phy-rockchip-dphy-rx0/Kconfig"
+
+source "drivers/staging/media/rkisp1/Kconfig"
+
endif
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 2f1711a8aeed..ea9fce8014bb 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -8,3 +8,5 @@ obj-$(CONFIG_TEGRA_VDE) += tegra-vde/
obj-$(CONFIG_VIDEO_HANTRO) += hantro/
obj-$(CONFIG_VIDEO_IPU3_IMGU) += ipu3/
obj-$(CONFIG_SOC_CAMERA) += soc_camera/
+obj-$(CONFIG_PHY_ROCKCHIP_DPHY_RX0) += phy-rockchip-dphy-rx0/
+obj-$(CONFIG_VIDEO_ROCKCHIP_ISP1) += rkisp1/
diff --git a/drivers/staging/media/allegro-dvt/allegro-core.c b/drivers/staging/media/allegro-dvt/allegro-core.c
index 6f0cd0784786..3be41698df4c 100644
--- a/drivers/staging/media/allegro-dvt/allegro-core.c
+++ b/drivers/staging/media/allegro-dvt/allegro-core.c
@@ -2914,7 +2914,7 @@ static int allegro_probe(struct platform_device *pdev)
"regs resource missing from device tree\n");
return -EINVAL;
}
- regs = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res));
+ regs = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (IS_ERR(regs)) {
dev_err(&pdev->dev, "failed to map registers\n");
return PTR_ERR(regs);
@@ -2932,7 +2932,7 @@ static int allegro_probe(struct platform_device *pdev)
"sram resource missing from device tree\n");
return -EINVAL;
}
- sram_regs = devm_ioremap_nocache(&pdev->dev,
+ sram_regs = devm_ioremap(&pdev->dev,
sram_res->start,
resource_size(sram_res));
if (IS_ERR(sram_regs)) {
diff --git a/drivers/staging/media/hantro/Makefile b/drivers/staging/media/hantro/Makefile
index 5d6b0383d280..496b30c3c396 100644
--- a/drivers/staging/media/hantro/Makefile
+++ b/drivers/staging/media/hantro/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_VIDEO_HANTRO) += hantro-vpu.o
hantro-vpu-y += \
hantro_drv.o \
hantro_v4l2.o \
+ hantro_postproc.o \
hantro_h1_jpeg_enc.o \
hantro_g1_h264_dec.o \
hantro_g1_mpeg2_dec.o \
diff --git a/drivers/staging/media/hantro/hantro.h b/drivers/staging/media/hantro/hantro.h
index deb90ae37859..b0faa43b3f79 100644
--- a/drivers/staging/media/hantro/hantro.h
+++ b/drivers/staging/media/hantro/hantro.h
@@ -60,6 +60,8 @@ struct hantro_irq {
* @num_enc_fmts: Number of encoder formats.
* @dec_fmts: Decoder formats.
* @num_dec_fmts: Number of decoder formats.
+ * @postproc_fmts: Post-processor formats.
+ * @num_postproc_fmts: Number of post-processor formats.
* @codec: Supported codecs
* @codec_ops: Codec ops.
* @init: Initialize hardware.
@@ -70,6 +72,7 @@ struct hantro_irq {
* @num_clocks: number of clocks in the array
* @reg_names: array of register range names
* @num_regs: number of register range names in the array
+ * @postproc_regs: &struct hantro_postproc_regs pointer
*/
struct hantro_variant {
unsigned int enc_offset;
@@ -78,6 +81,8 @@ struct hantro_variant {
unsigned int num_enc_fmts;
const struct hantro_fmt *dec_fmts;
unsigned int num_dec_fmts;
+ const struct hantro_fmt *postproc_fmts;
+ unsigned int num_postproc_fmts;
unsigned int codec;
const struct hantro_codec_ops *codec_ops;
int (*init)(struct hantro_dev *vpu);
@@ -88,6 +93,7 @@ struct hantro_variant {
int num_clocks;
const char * const *reg_names;
int num_regs;
+ const struct hantro_postproc_regs *postproc_regs;
};
/**
@@ -213,6 +219,7 @@ struct hantro_dev {
* context, and it's called right before
* calling v4l2_m2m_job_finish.
* @codec_ops: Set of operations related to codec mode.
+ * @postproc: Post-processing context.
* @jpeg_enc: JPEG-encoding context.
* @mpeg2_dec: MPEG-2-decoding context.
* @vp8_dec: VP8-decoding context.
@@ -237,6 +244,7 @@ struct hantro_ctx {
unsigned int bytesused);
const struct hantro_codec_ops *codec_ops;
+ struct hantro_postproc_ctx postproc;
/* Specific for particular codec modes. */
union {
@@ -274,6 +282,23 @@ struct hantro_reg {
u32 mask;
};
+struct hantro_postproc_regs {
+ struct hantro_reg pipeline_en;
+ struct hantro_reg max_burst;
+ struct hantro_reg clk_gate;
+ struct hantro_reg out_swap32;
+ struct hantro_reg out_endian;
+ struct hantro_reg out_luma_base;
+ struct hantro_reg input_width;
+ struct hantro_reg input_height;
+ struct hantro_reg output_width;
+ struct hantro_reg output_height;
+ struct hantro_reg input_fmt;
+ struct hantro_reg output_fmt;
+ struct hantro_reg orig_width;
+ struct hantro_reg display_width;
+};
+
/* Logging helpers */
/**
@@ -352,16 +377,30 @@ static inline u32 vdpu_read(struct hantro_dev *vpu, u32 reg)
return val;
}
-static inline void hantro_reg_write(struct hantro_dev *vpu,
- const struct hantro_reg *reg,
- u32 val)
+static inline u32 vdpu_read_mask(struct hantro_dev *vpu,
+ const struct hantro_reg *reg,
+ u32 val)
{
u32 v;
v = vdpu_read(vpu, reg->base);
v &= ~(reg->mask << reg->shift);
v |= ((val & reg->mask) << reg->shift);
- vdpu_write_relaxed(vpu, v, reg->base);
+ return v;
+}
+
+static inline void hantro_reg_write(struct hantro_dev *vpu,
+ const struct hantro_reg *reg,
+ u32 val)
+{
+ vdpu_write_relaxed(vpu, vdpu_read_mask(vpu, reg, val), reg->base);
+}
+
+static inline void hantro_reg_write_s(struct hantro_dev *vpu,
+ const struct hantro_reg *reg,
+ u32 val)
+{
+ vdpu_write(vpu, vdpu_read_mask(vpu, reg, val), reg->base);
}
bool hantro_is_encoder_ctx(const struct hantro_ctx *ctx);
@@ -381,4 +420,23 @@ hantro_get_dst_buf(struct hantro_ctx *ctx)
return v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
}
+static inline bool
+hantro_needs_postproc(struct hantro_ctx *ctx, const struct hantro_fmt *fmt)
+{
+ return fmt->fourcc != V4L2_PIX_FMT_NV12;
+}
+
+static inline dma_addr_t
+hantro_get_dec_buf_addr(struct hantro_ctx *ctx, struct vb2_buffer *vb)
+{
+ if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt))
+ return ctx->postproc.dec_q[vb->index].dma;
+ return vb2_dma_contig_plane_dma_addr(vb, 0);
+}
+
+void hantro_postproc_disable(struct hantro_ctx *ctx);
+void hantro_postproc_enable(struct hantro_ctx *ctx);
+void hantro_postproc_free(struct hantro_ctx *ctx);
+int hantro_postproc_alloc(struct hantro_ctx *ctx);
+
#endif /* HANTRO_H_ */
diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c
index 26108c96b674..97c615a2f057 100644
--- a/drivers/staging/media/hantro/hantro_drv.c
+++ b/drivers/staging/media/hantro/hantro_drv.c
@@ -53,7 +53,7 @@ dma_addr_t hantro_get_ref(struct hantro_ctx *ctx, u64 ts)
if (index < 0)
return 0;
buf = vb2_get_buffer(q, index);
- return vb2_dma_contig_plane_dma_addr(buf, 0);
+ return hantro_get_dec_buf_addr(ctx, buf);
}
static int
@@ -152,16 +152,21 @@ void hantro_watchdog(struct work_struct *work)
}
}
-void hantro_prepare_run(struct hantro_ctx *ctx)
+void hantro_start_prepare_run(struct hantro_ctx *ctx)
{
struct vb2_v4l2_buffer *src_buf;
src_buf = hantro_get_src_buf(ctx);
v4l2_ctrl_request_setup(src_buf->vb2_buf.req_obj.req,
&ctx->ctrl_handler);
+
+ if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt))
+ hantro_postproc_enable(ctx);
+ else
+ hantro_postproc_disable(ctx);
}
-void hantro_finish_run(struct hantro_ctx *ctx)
+void hantro_end_prepare_run(struct hantro_ctx *ctx)
{
struct vb2_v4l2_buffer *src_buf;
diff --git a/drivers/staging/media/hantro/hantro_g1_h264_dec.c b/drivers/staging/media/hantro/hantro_g1_h264_dec.c
index 3cd40a8f0daa..424c648ce9fc 100644
--- a/drivers/staging/media/hantro/hantro_g1_h264_dec.c
+++ b/drivers/staging/media/hantro/hantro_g1_h264_dec.c
@@ -244,7 +244,7 @@ static void set_buffers(struct hantro_ctx *ctx)
vdpu_write_relaxed(vpu, src_dma, G1_REG_ADDR_STR);
/* Destination (decoded frame) buffer. */
- dst_dma = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+ dst_dma = hantro_get_dec_buf_addr(ctx, &dst_buf->vb2_buf);
/* Adjust dma addr to start at second line for bottom field */
if (ctrls->slices[0].flags & V4L2_H264_SLICE_FLAG_BOTTOM_FIELD)
offset = ALIGN(ctx->src_fmt.width, MB_DIM);
@@ -288,7 +288,7 @@ void hantro_g1_h264_dec_run(struct hantro_ctx *ctx)
set_ref(ctx);
set_buffers(ctx);
- hantro_finish_run(ctx);
+ hantro_end_prepare_run(ctx);
/* Start decoding! */
vdpu_write_relaxed(vpu,
diff --git a/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c b/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c
index f3bf67d8a289..24041849384a 100644
--- a/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c
+++ b/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c
@@ -121,7 +121,7 @@ hantro_g1_mpeg2_dec_set_buffers(struct hantro_dev *vpu, struct hantro_ctx *ctx,
vdpu_write_relaxed(vpu, addr, G1_REG_RLC_VLC_BASE);
/* Destination frame buffer */
- addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ addr = hantro_get_dec_buf_addr(ctx, dst_buf);
current_addr = addr;
if (picture->picture_structure == PICT_BOTTOM_FIELD)
@@ -168,7 +168,7 @@ void hantro_g1_mpeg2_dec_run(struct hantro_ctx *ctx)
dst_buf = hantro_get_dst_buf(ctx);
/* Apply request controls if any */
- hantro_prepare_run(ctx);
+ hantro_start_prepare_run(ctx);
slice_params = hantro_get_ctrl(ctx,
V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS);
@@ -244,7 +244,7 @@ void hantro_g1_mpeg2_dec_run(struct hantro_ctx *ctx)
&dst_buf->vb2_buf,
sequence, picture, slice_params);
- hantro_finish_run(ctx);
+ hantro_end_prepare_run(ctx);
reg = G1_REG_DEC_E(1);
vdpu_write(vpu, reg, G1_SWREG(1));
diff --git a/drivers/staging/media/hantro/hantro_g1_regs.h b/drivers/staging/media/hantro/hantro_g1_regs.h
index 5c0ea7994336..c1756e3d5391 100644
--- a/drivers/staging/media/hantro/hantro_g1_regs.h
+++ b/drivers/staging/media/hantro/hantro_g1_regs.h
@@ -9,6 +9,8 @@
#ifndef HANTRO_G1_REGS_H_
#define HANTRO_G1_REGS_H_
+#define G1_SWREG(nr) ((nr) * 4)
+
/* Decoder registers. */
#define G1_REG_INTERRUPT 0x004
#define G1_REG_INTERRUPT_DEC_PIC_INF BIT(24)
@@ -298,4 +300,55 @@
#define G1_REG_REF_BUF_CTRL2_APF_THRESHOLD(x) (((x) & 0x3fff) << 0)
#define G1_REG_SOFT_RESET 0x194
+/* Post-processor registers. */
+#define G1_REG_PP_INTERRUPT G1_SWREG(60)
+#define G1_REG_PP_READY_IRQ BIT(12)
+#define G1_REG_PP_IRQ BIT(8)
+#define G1_REG_PP_IRQ_DIS BIT(4)
+#define G1_REG_PP_PIPELINE_EN BIT(1)
+#define G1_REG_PP_EXTERNAL_TRIGGER BIT(0)
+#define G1_REG_PP_DEV_CONFIG G1_SWREG(61)
+#define G1_REG_PP_AXI_RD_ID(v) (((v) << 24) & GENMASK(31, 24))
+#define G1_REG_PP_AXI_WR_ID(v) (((v) << 16) & GENMASK(23, 16))
+#define G1_REG_PP_INSWAP32_E(v) ((v) ? BIT(10) : 0)
+#define G1_REG_PP_DATA_DISC_E(v) ((v) ? BIT(9) : 0)
+#define G1_REG_PP_CLK_GATE_E(v) ((v) ? BIT(8) : 0)
+#define G1_REG_PP_IN_ENDIAN(v) ((v) ? BIT(7) : 0)
+#define G1_REG_PP_OUT_ENDIAN(v) ((v) ? BIT(6) : 0)
+#define G1_REG_PP_OUTSWAP32_E(v) ((v) ? BIT(5) : 0)
+#define G1_REG_PP_MAX_BURST(v) (((v) << 0) & GENMASK(4, 0))
+#define G1_REG_PP_IN_LUMA_BASE G1_SWREG(63)
+#define G1_REG_PP_IN_CB_BASE G1_SWREG(64)
+#define G1_REG_PP_IN_CR_BASE G1_SWREG(65)
+#define G1_REG_PP_OUT_LUMA_BASE G1_SWREG(66)
+#define G1_REG_PP_OUT_CHROMA_BASE G1_SWREG(67)
+#define G1_REG_PP_CONTRAST_ADJUST G1_SWREG(68)
+#define G1_REG_PP_COLOR_CONVERSION G1_SWREG(69)
+#define G1_REG_PP_COLOR_CONVERSION0 G1_SWREG(70)
+#define G1_REG_PP_COLOR_CONVERSION1 G1_SWREG(71)
+#define G1_REG_PP_INPUT_SIZE G1_SWREG(72)
+#define G1_REG_PP_INPUT_SIZE_HEIGHT(v) (((v) << 9) & GENMASK(16, 9))
+#define G1_REG_PP_INPUT_SIZE_WIDTH(v) (((v) << 0) & GENMASK(8, 0))
+#define G1_REG_PP_SCALING0 G1_SWREG(79)
+#define G1_REG_PP_PADD_R(v) (((v) << 23) & GENMASK(27, 23))
+#define G1_REG_PP_PADD_G(v) (((v) << 18) & GENMASK(22, 18))
+#define G1_REG_PP_RANGEMAP_Y(v) ((v) ? BIT(31) : 0)
+#define G1_REG_PP_RANGEMAP_C(v) ((v) ? BIT(30) : 0)
+#define G1_REG_PP_YCBCR_RANGE(v) ((v) ? BIT(29) : 0)
+#define G1_REG_PP_RGB_16(v) ((v) ? BIT(28) : 0)
+#define G1_REG_PP_SCALING1 G1_SWREG(80)
+#define G1_REG_PP_PADD_B(v) (((v) << 18) & GENMASK(22, 18))
+#define G1_REG_PP_MASK_R G1_SWREG(82)
+#define G1_REG_PP_MASK_G G1_SWREG(83)
+#define G1_REG_PP_MASK_B G1_SWREG(84)
+#define G1_REG_PP_CONTROL G1_SWREG(85)
+#define G1_REG_PP_CONTROL_IN_FMT(v) (((v) << 29) & GENMASK(31, 29))
+#define G1_REG_PP_CONTROL_OUT_FMT(v) (((v) << 26) & GENMASK(28, 26))
+#define G1_REG_PP_CONTROL_OUT_HEIGHT(v) (((v) << 15) & GENMASK(25, 15))
+#define G1_REG_PP_CONTROL_OUT_WIDTH(v) (((v) << 4) & GENMASK(14, 4))
+#define G1_REG_PP_MASK1_ORIG_WIDTH G1_SWREG(88)
+#define G1_REG_PP_ORIG_WIDTH(v) (((v) << 23) & GENMASK(31, 23))
+#define G1_REG_PP_DISPLAY_WIDTH G1_SWREG(92)
+#define G1_REG_PP_FUSE G1_SWREG(99)
+
#endif /* HANTRO_G1_REGS_H_ */
diff --git a/drivers/staging/media/hantro/hantro_g1_vp8_dec.c b/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
index cad18094fee0..a5cdf150cd16 100644
--- a/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
+++ b/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
@@ -422,7 +422,7 @@ static void cfg_buffers(struct hantro_ctx *ctx,
}
vdpu_write_relaxed(vpu, reg, G1_REG_FWD_PIC(0));
- dst_dma = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
+ dst_dma = hantro_get_dec_buf_addr(ctx, &vb2_dst->vb2_buf);
vdpu_write_relaxed(vpu, dst_dma, G1_REG_ADDR_DST);
}
@@ -435,7 +435,7 @@ void hantro_g1_vp8_dec_run(struct hantro_ctx *ctx)
u32 mb_width, mb_height;
u32 reg;
- hantro_prepare_run(ctx);
+ hantro_start_prepare_run(ctx);
hdr = hantro_get_ctrl(ctx, V4L2_CID_MPEG_VIDEO_VP8_FRAME_HEADER);
if (WARN_ON(!hdr))
@@ -496,7 +496,7 @@ void hantro_g1_vp8_dec_run(struct hantro_ctx *ctx)
cfg_ref(ctx, hdr);
cfg_buffers(ctx, hdr);
- hantro_finish_run(ctx);
+ hantro_end_prepare_run(ctx);
vdpu_write(vpu, G1_REG_INTERRUPT_DEC_E, G1_REG_INTERRUPT);
}
diff --git a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
index 938b48d4d3d9..0d8afc3e5d71 100644
--- a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
+++ b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
@@ -87,7 +87,7 @@ void hantro_h1_jpeg_enc_run(struct hantro_ctx *ctx)
src_buf = hantro_get_src_buf(ctx);
dst_buf = hantro_get_dst_buf(ctx);
- hantro_prepare_run(ctx);
+ hantro_start_prepare_run(ctx);
memset(&jpeg_ctx, 0, sizeof(jpeg_ctx));
jpeg_ctx.buffer = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
@@ -122,7 +122,7 @@ void hantro_h1_jpeg_enc_run(struct hantro_ctx *ctx)
| H1_REG_ENC_PIC_INTRA
| H1_REG_ENC_CTRL_EN_BIT;
- hantro_finish_run(ctx);
+ hantro_end_prepare_run(ctx);
vepu_write(vpu, reg, H1_REG_ENC_CTRL);
}
diff --git a/drivers/staging/media/hantro/hantro_h264.c b/drivers/staging/media/hantro/hantro_h264.c
index 568640eab3a6..f2d3e81fb6ce 100644
--- a/drivers/staging/media/hantro/hantro_h264.c
+++ b/drivers/staging/media/hantro/hantro_h264.c
@@ -562,7 +562,7 @@ int hantro_h264_dec_prepare_run(struct hantro_ctx *ctx)
struct hantro_h264_dec_ctrls *ctrls = &h264_ctx->ctrls;
struct hantro_h264_reflist_builder reflist_builder;
- hantro_prepare_run(ctx);
+ hantro_start_prepare_run(ctx);
ctrls->scaling =
hantro_get_ctrl(ctx, V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX);
diff --git a/drivers/staging/media/hantro/hantro_hw.h b/drivers/staging/media/hantro/hantro_hw.h
index fa91dd1848b7..2398d4c1f207 100644
--- a/drivers/staging/media/hantro/hantro_hw.h
+++ b/drivers/staging/media/hantro/hantro_hw.h
@@ -28,11 +28,13 @@ struct hantro_variant;
* @cpu: CPU pointer to the buffer.
* @dma: DMA address of the buffer.
* @size: Size of the buffer.
+ * @attrs: Attributes of the DMA mapping.
*/
struct hantro_aux_buf {
void *cpu;
dma_addr_t dma;
size_t size;
+ unsigned long attrs;
};
/**
@@ -107,6 +109,15 @@ struct hantro_vp8_dec_hw_ctx {
};
/**
+ * struct hantro_postproc_ctx
+ *
+ * @dec_q: References buffers, in decoder format.
+ */
+struct hantro_postproc_ctx {
+ struct hantro_aux_buf dec_q[VB2_MAX_FRAME];
+};
+
+/**
* struct hantro_codec_ops - codec mode specific operations
*
* @init: If needed, can be used for initialization.
@@ -141,14 +152,16 @@ extern const struct hantro_variant rk3399_vpu_variant;
extern const struct hantro_variant rk3328_vpu_variant;
extern const struct hantro_variant rk3288_vpu_variant;
+extern const struct hantro_postproc_regs hantro_g1_postproc_regs;
+
extern const u32 hantro_vp8_dec_mc_filter[8][6];
void hantro_watchdog(struct work_struct *work);
void hantro_run(struct hantro_ctx *ctx);
void hantro_irq_done(struct hantro_dev *vpu, unsigned int bytesused,
enum vb2_buffer_state result);
-void hantro_prepare_run(struct hantro_ctx *ctx);
-void hantro_finish_run(struct hantro_ctx *ctx);
+void hantro_start_prepare_run(struct hantro_ctx *ctx);
+void hantro_end_prepare_run(struct hantro_ctx *ctx);
void hantro_h1_jpeg_enc_run(struct hantro_ctx *ctx);
void rk3399_vpu_jpeg_enc_run(struct hantro_ctx *ctx);
diff --git a/drivers/staging/media/hantro/hantro_postproc.c b/drivers/staging/media/hantro/hantro_postproc.c
new file mode 100644
index 000000000000..28a85d301d7f
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro_postproc.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro G1 post-processor support
+ *
+ * Copyright (C) 2019 Collabora, Ltd.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/types.h>
+
+#include "hantro.h"
+#include "hantro_hw.h"
+#include "hantro_g1_regs.h"
+
+#define HANTRO_PP_REG_WRITE(vpu, reg_name, val) \
+{ \
+ hantro_reg_write((vpu), \
+ &((vpu)->variant->postproc_regs->reg_name), \
+ (val)); \
+}
+
+#define HANTRO_PP_REG_WRITE_S(vpu, reg_name, val) \
+{ \
+ hantro_reg_write_s((vpu), \
+ &((vpu)->variant->postproc_regs->reg_name), \
+ (val)); \
+}
+
+#define VPU_PP_IN_YUYV 0x0
+#define VPU_PP_IN_NV12 0x1
+#define VPU_PP_IN_YUV420 0x2
+#define VPU_PP_IN_YUV240_TILED 0x5
+#define VPU_PP_OUT_RGB 0x0
+#define VPU_PP_OUT_YUYV 0x3
+
+const struct hantro_postproc_regs hantro_g1_postproc_regs = {
+ .pipeline_en = {G1_REG_PP_INTERRUPT, 1, 0x1},
+ .max_burst = {G1_REG_PP_DEV_CONFIG, 0, 0x1f},
+ .clk_gate = {G1_REG_PP_DEV_CONFIG, 1, 0x1},
+ .out_swap32 = {G1_REG_PP_DEV_CONFIG, 5, 0x1},
+ .out_endian = {G1_REG_PP_DEV_CONFIG, 6, 0x1},
+ .out_luma_base = {G1_REG_PP_OUT_LUMA_BASE, 0, 0xffffffff},
+ .input_width = {G1_REG_PP_INPUT_SIZE, 0, 0x1ff},
+ .input_height = {G1_REG_PP_INPUT_SIZE, 9, 0x1ff},
+ .output_width = {G1_REG_PP_CONTROL, 4, 0x7ff},
+ .output_height = {G1_REG_PP_CONTROL, 15, 0x7ff},
+ .input_fmt = {G1_REG_PP_CONTROL, 29, 0x7},
+ .output_fmt = {G1_REG_PP_CONTROL, 26, 0x7},
+ .orig_width = {G1_REG_PP_MASK1_ORIG_WIDTH, 23, 0x1ff},
+ .display_width = {G1_REG_PP_DISPLAY_WIDTH, 0, 0xfff},
+};
+
+void hantro_postproc_enable(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *dst_buf;
+ u32 src_pp_fmt, dst_pp_fmt;
+ dma_addr_t dst_dma;
+
+ if (!vpu->variant->postproc_regs)
+ return;
+
+ /* Turn on pipeline mode. Must be done first. */
+ HANTRO_PP_REG_WRITE_S(vpu, pipeline_en, 0x1);
+
+ src_pp_fmt = VPU_PP_IN_NV12;
+
+ switch (ctx->vpu_dst_fmt->fourcc) {
+ case V4L2_PIX_FMT_YUYV:
+ dst_pp_fmt = VPU_PP_OUT_YUYV;
+ break;
+ default:
+ WARN(1, "output format %d not supported by the post-processor, this wasn't expected.",
+ ctx->vpu_dst_fmt->fourcc);
+ dst_pp_fmt = 0;
+ break;
+ }
+
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ dst_dma = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+
+ HANTRO_PP_REG_WRITE(vpu, clk_gate, 0x1);
+ HANTRO_PP_REG_WRITE(vpu, out_endian, 0x1);
+ HANTRO_PP_REG_WRITE(vpu, out_swap32, 0x1);
+ HANTRO_PP_REG_WRITE(vpu, max_burst, 16);
+ HANTRO_PP_REG_WRITE(vpu, out_luma_base, dst_dma);
+ HANTRO_PP_REG_WRITE(vpu, input_width, MB_WIDTH(ctx->dst_fmt.width));
+ HANTRO_PP_REG_WRITE(vpu, input_height, MB_HEIGHT(ctx->dst_fmt.height));
+ HANTRO_PP_REG_WRITE(vpu, input_fmt, src_pp_fmt);
+ HANTRO_PP_REG_WRITE(vpu, output_fmt, dst_pp_fmt);
+ HANTRO_PP_REG_WRITE(vpu, output_width, ctx->dst_fmt.width);
+ HANTRO_PP_REG_WRITE(vpu, output_height, ctx->dst_fmt.height);
+ HANTRO_PP_REG_WRITE(vpu, orig_width, MB_WIDTH(ctx->dst_fmt.width));
+ HANTRO_PP_REG_WRITE(vpu, display_width, ctx->dst_fmt.width);
+}
+
+void hantro_postproc_free(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ unsigned int i;
+
+ for (i = 0; i < VB2_MAX_FRAME; ++i) {
+ struct hantro_aux_buf *priv = &ctx->postproc.dec_q[i];
+
+ if (priv->cpu) {
+ dma_free_attrs(vpu->dev, priv->size, priv->cpu,
+ priv->dma, priv->attrs);
+ priv->cpu = NULL;
+ }
+ }
+}
+
+int hantro_postproc_alloc(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
+ struct vb2_queue *cap_queue = &m2m_ctx->cap_q_ctx.q;
+ unsigned int num_buffers = cap_queue->num_buffers;
+ unsigned int i, buf_size;
+
+ buf_size = ctx->dst_fmt.plane_fmt[0].sizeimage;
+
+ for (i = 0; i < num_buffers; ++i) {
+ struct hantro_aux_buf *priv = &ctx->postproc.dec_q[i];
+
+ /*
+ * The buffers on this queue are meant as intermediate
+ * buffers for the decoder, so no mapping is needed.
+ */
+ priv->attrs = DMA_ATTR_NO_KERNEL_MAPPING;
+ priv->cpu = dma_alloc_attrs(vpu->dev, buf_size, &priv->dma,
+ GFP_KERNEL, priv->attrs);
+ if (!priv->cpu)
+ return -ENOMEM;
+ priv->size = buf_size;
+ }
+ return 0;
+}
+
+void hantro_postproc_disable(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ if (!vpu->variant->postproc_regs)
+ return;
+
+ HANTRO_PP_REG_WRITE_S(vpu, pipeline_en, 0x0);
+}
diff --git a/drivers/staging/media/hantro/hantro_v4l2.c b/drivers/staging/media/hantro/hantro_v4l2.c
index 1dae76f20034..0198bcda26b7 100644
--- a/drivers/staging/media/hantro/hantro_v4l2.c
+++ b/drivers/staging/media/hantro/hantro_v4l2.c
@@ -47,11 +47,30 @@ hantro_get_formats(const struct hantro_ctx *ctx, unsigned int *num_fmts)
}
static const struct hantro_fmt *
-hantro_find_format(const struct hantro_fmt *formats, unsigned int num_fmts,
- u32 fourcc)
+hantro_get_postproc_formats(const struct hantro_ctx *ctx,
+ unsigned int *num_fmts)
{
- unsigned int i;
+ if (hantro_is_encoder_ctx(ctx)) {
+ *num_fmts = 0;
+ return NULL;
+ }
+
+ *num_fmts = ctx->dev->variant->num_postproc_fmts;
+ return ctx->dev->variant->postproc_fmts;
+}
+
+static const struct hantro_fmt *
+hantro_find_format(const struct hantro_ctx *ctx, u32 fourcc)
+{
+ const struct hantro_fmt *formats;
+ unsigned int i, num_fmts;
+
+ formats = hantro_get_formats(ctx, &num_fmts);
+ for (i = 0; i < num_fmts; i++)
+ if (formats[i].fourcc == fourcc)
+ return &formats[i];
+ formats = hantro_get_postproc_formats(ctx, &num_fmts);
for (i = 0; i < num_fmts; i++)
if (formats[i].fourcc == fourcc)
return &formats[i];
@@ -59,11 +78,12 @@ hantro_find_format(const struct hantro_fmt *formats, unsigned int num_fmts,
}
static const struct hantro_fmt *
-hantro_get_default_fmt(const struct hantro_fmt *formats, unsigned int num_fmts,
- bool bitstream)
+hantro_get_default_fmt(const struct hantro_ctx *ctx, bool bitstream)
{
- unsigned int i;
+ const struct hantro_fmt *formats;
+ unsigned int i, num_fmts;
+ formats = hantro_get_formats(ctx, &num_fmts);
for (i = 0; i < num_fmts; i++) {
if (bitstream == (formats[i].codec_mode !=
HANTRO_MODE_NONE))
@@ -89,8 +109,7 @@ static int vidioc_enum_framesizes(struct file *file, void *priv,
struct v4l2_frmsizeenum *fsize)
{
struct hantro_ctx *ctx = fh_to_ctx(priv);
- const struct hantro_fmt *formats, *fmt;
- unsigned int num_fmts;
+ const struct hantro_fmt *fmt;
if (fsize->index != 0) {
vpu_debug(0, "invalid frame size index (expected 0, got %d)\n",
@@ -98,8 +117,7 @@ static int vidioc_enum_framesizes(struct file *file, void *priv,
return -EINVAL;
}
- formats = hantro_get_formats(ctx, &num_fmts);
- fmt = hantro_find_format(formats, num_fmts, fsize->pixel_format);
+ fmt = hantro_find_format(ctx, fsize->pixel_format);
if (!fmt) {
vpu_debug(0, "unsupported bitstream format (%08x)\n",
fsize->pixel_format);
@@ -150,6 +168,24 @@ static int vidioc_enum_fmt(struct file *file, void *priv,
}
++j;
}
+
+ /*
+ * Enumerate post-processed formats. As per the specification,
+ * we enumerated these formats after natively decoded formats
+ * as a hint for applications on what's the preferred fomat.
+ */
+ if (!capture)
+ return -EINVAL;
+ formats = hantro_get_postproc_formats(ctx, &num_fmts);
+ for (i = 0; i < num_fmts; i++) {
+ if (j == f->index) {
+ fmt = &formats[i];
+ f->pixelformat = fmt->fourcc;
+ return 0;
+ }
+ ++j;
+ }
+
return -EINVAL;
}
@@ -196,8 +232,7 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f,
{
struct hantro_ctx *ctx = fh_to_ctx(priv);
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
- const struct hantro_fmt *formats, *fmt, *vpu_fmt;
- unsigned int num_fmts;
+ const struct hantro_fmt *fmt, *vpu_fmt;
bool coded;
coded = capture == hantro_is_encoder_ctx(ctx);
@@ -208,10 +243,9 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f,
(pix_mp->pixelformat >> 16) & 0x7f,
(pix_mp->pixelformat >> 24) & 0x7f);
- formats = hantro_get_formats(ctx, &num_fmts);
- fmt = hantro_find_format(formats, num_fmts, pix_mp->pixelformat);
+ fmt = hantro_find_format(ctx, pix_mp->pixelformat);
if (!fmt) {
- fmt = hantro_get_default_fmt(formats, num_fmts, coded);
+ fmt = hantro_get_default_fmt(ctx, coded);
f->fmt.pix_mp.pixelformat = fmt->fourcc;
}
@@ -246,7 +280,7 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f,
*
* The H264 decoder needs extra space on the output buffers
* to store motion vectors. This is needed for reference
- * frames.
+ * frames and only if the format is non-post-processed NV12.
*
* Memory layout is as follow:
*
@@ -260,7 +294,8 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f,
* | MC sync 32 bytes |
* +---------------------------+
*/
- if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_H264_SLICE)
+ if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_H264_SLICE &&
+ !hantro_needs_postproc(ctx, ctx->vpu_dst_fmt))
pix_mp->plane_fmt[0].sizeimage +=
64 * MB_WIDTH(pix_mp->width) *
MB_WIDTH(pix_mp->height) + 32;
@@ -306,12 +341,10 @@ hantro_reset_fmt(struct v4l2_pix_format_mplane *fmt,
static void
hantro_reset_encoded_fmt(struct hantro_ctx *ctx)
{
- const struct hantro_fmt *vpu_fmt, *formats;
+ const struct hantro_fmt *vpu_fmt;
struct v4l2_pix_format_mplane *fmt;
- unsigned int num_fmts;
- formats = hantro_get_formats(ctx, &num_fmts);
- vpu_fmt = hantro_get_default_fmt(formats, num_fmts, true);
+ vpu_fmt = hantro_get_default_fmt(ctx, true);
if (hantro_is_encoder_ctx(ctx)) {
ctx->vpu_dst_fmt = vpu_fmt;
@@ -332,12 +365,10 @@ hantro_reset_encoded_fmt(struct hantro_ctx *ctx)
static void
hantro_reset_raw_fmt(struct hantro_ctx *ctx)
{
- const struct hantro_fmt *raw_vpu_fmt, *formats;
+ const struct hantro_fmt *raw_vpu_fmt;
struct v4l2_pix_format_mplane *raw_fmt, *encoded_fmt;
- unsigned int num_fmts;
- formats = hantro_get_formats(ctx, &num_fmts);
- raw_vpu_fmt = hantro_get_default_fmt(formats, num_fmts, false);
+ raw_vpu_fmt = hantro_get_default_fmt(ctx, false);
if (hantro_is_encoder_ctx(ctx)) {
ctx->vpu_src_fmt = raw_vpu_fmt;
@@ -384,8 +415,6 @@ vidioc_s_fmt_out_mplane(struct file *file, void *priv, struct v4l2_format *f)
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
struct hantro_ctx *ctx = fh_to_ctx(priv);
struct vb2_queue *vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
- const struct hantro_fmt *formats;
- unsigned int num_fmts;
int ret;
ret = vidioc_try_fmt_out_mplane(file, priv, f);
@@ -421,9 +450,7 @@ vidioc_s_fmt_out_mplane(struct file *file, void *priv, struct v4l2_format *f)
return -EBUSY;
}
- formats = hantro_get_formats(ctx, &num_fmts);
- ctx->vpu_src_fmt = hantro_find_format(formats, num_fmts,
- pix_mp->pixelformat);
+ ctx->vpu_src_fmt = hantro_find_format(ctx, pix_mp->pixelformat);
ctx->src_fmt = *pix_mp;
/*
@@ -457,9 +484,7 @@ static int vidioc_s_fmt_cap_mplane(struct file *file, void *priv,
{
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
struct hantro_ctx *ctx = fh_to_ctx(priv);
- const struct hantro_fmt *formats;
struct vb2_queue *vq;
- unsigned int num_fmts;
int ret;
/* Change not allowed if queue is busy. */
@@ -488,9 +513,7 @@ static int vidioc_s_fmt_cap_mplane(struct file *file, void *priv,
if (ret)
return ret;
- formats = hantro_get_formats(ctx, &num_fmts);
- ctx->vpu_dst_fmt = hantro_find_format(formats, num_fmts,
- pix_mp->pixelformat);
+ ctx->vpu_dst_fmt = hantro_find_format(ctx, pix_mp->pixelformat);
ctx->dst_fmt = *pix_mp;
/*
@@ -650,10 +673,23 @@ static int hantro_start_streaming(struct vb2_queue *q, unsigned int count)
vpu_debug(4, "Codec mode = %d\n", codec_mode);
ctx->codec_ops = &ctx->dev->variant->codec_ops[codec_mode];
- if (ctx->codec_ops->init)
+ if (ctx->codec_ops->init) {
ret = ctx->codec_ops->init(ctx);
+ if (ret)
+ return ret;
+ }
+
+ if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt)) {
+ ret = hantro_postproc_alloc(ctx);
+ if (ret)
+ goto err_codec_exit;
+ }
}
+ return ret;
+err_codec_exit:
+ if (ctx->codec_ops->exit)
+ ctx->codec_ops->exit(ctx);
return ret;
}
@@ -680,6 +716,7 @@ static void hantro_stop_streaming(struct vb2_queue *q)
struct hantro_ctx *ctx = vb2_get_drv_priv(q);
if (hantro_vq_is_coded(q)) {
+ hantro_postproc_free(ctx);
if (ctx->codec_ops && ctx->codec_ops->exit)
ctx->codec_ops->exit(ctx);
}
diff --git a/drivers/staging/media/hantro/rk3288_vpu_hw.c b/drivers/staging/media/hantro/rk3288_vpu_hw.c
index f8db6fcaad73..2f914b37b9e5 100644
--- a/drivers/staging/media/hantro/rk3288_vpu_hw.c
+++ b/drivers/staging/media/hantro/rk3288_vpu_hw.c
@@ -56,6 +56,13 @@ static const struct hantro_fmt rk3288_vpu_enc_fmts[] = {
},
};
+static const struct hantro_fmt rk3288_vpu_postproc_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .codec_mode = HANTRO_MODE_NONE,
+ },
+};
+
static const struct hantro_fmt rk3288_vpu_dec_fmts[] = {
{
.fourcc = V4L2_PIX_FMT_NV12,
@@ -215,6 +222,9 @@ const struct hantro_variant rk3288_vpu_variant = {
.dec_offset = 0x400,
.dec_fmts = rk3288_vpu_dec_fmts,
.num_dec_fmts = ARRAY_SIZE(rk3288_vpu_dec_fmts),
+ .postproc_fmts = rk3288_vpu_postproc_fmts,
+ .num_postproc_fmts = ARRAY_SIZE(rk3288_vpu_postproc_fmts),
+ .postproc_regs = &hantro_g1_postproc_regs,
.codec = HANTRO_JPEG_ENCODER | HANTRO_MPEG2_DECODER |
HANTRO_VP8_DECODER | HANTRO_H264_DECODER,
.codec_ops = rk3288_vpu_codec_ops,
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c b/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c
index 067892345b5d..4c2d43fb6fd1 100644
--- a/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c
+++ b/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c
@@ -118,7 +118,7 @@ void rk3399_vpu_jpeg_enc_run(struct hantro_ctx *ctx)
src_buf = hantro_get_src_buf(ctx);
dst_buf = hantro_get_dst_buf(ctx);
- hantro_prepare_run(ctx);
+ hantro_start_prepare_run(ctx);
memset(&jpeg_ctx, 0, sizeof(jpeg_ctx));
jpeg_ctx.buffer = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
@@ -156,6 +156,6 @@ void rk3399_vpu_jpeg_enc_run(struct hantro_ctx *ctx)
| VEPU_REG_ENCODE_ENABLE;
/* Kick the watchdog and start encoding */
- hantro_finish_run(ctx);
+ hantro_end_prepare_run(ctx);
vepu_write(vpu, reg, VEPU_REG_ENCODE_START);
}
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c b/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c
index b40d2cdf832f..7e9aad671489 100644
--- a/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c
+++ b/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c
@@ -169,7 +169,7 @@ void rk3399_vpu_mpeg2_dec_run(struct hantro_ctx *ctx)
src_buf = hantro_get_src_buf(ctx);
dst_buf = hantro_get_dst_buf(ctx);
- hantro_prepare_run(ctx);
+ hantro_start_prepare_run(ctx);
slice_params = hantro_get_ctrl(ctx,
V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS);
@@ -250,7 +250,7 @@ void rk3399_vpu_mpeg2_dec_run(struct hantro_ctx *ctx)
sequence, picture, slice_params);
/* Kick the watchdog and start decoding */
- hantro_finish_run(ctx);
+ hantro_end_prepare_run(ctx);
reg = vdpu_read(vpu, VDPU_SWREG(57)) | VDPU_REG_DEC_E(1);
vdpu_write(vpu, reg, VDPU_SWREG(57));
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c b/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c
index 76d7ed3fd69a..a4a792f00b11 100644
--- a/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c
+++ b/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c
@@ -513,7 +513,7 @@ void rk3399_vpu_vp8_dec_run(struct hantro_ctx *ctx)
u32 mb_width, mb_height;
u32 reg;
- hantro_prepare_run(ctx);
+ hantro_start_prepare_run(ctx);
hdr = hantro_get_ctrl(ctx, V4L2_CID_MPEG_VIDEO_VP8_FRAME_HEADER);
if (WARN_ON(!hdr))
@@ -587,7 +587,7 @@ void rk3399_vpu_vp8_dec_run(struct hantro_ctx *ctx)
cfg_ref(ctx, hdr);
cfg_buffers(ctx, hdr);
- hantro_finish_run(ctx);
+ hantro_end_prepare_run(ctx);
hantro_reg_write(vpu, &vp8_dec_start_dec, 1);
}
diff --git a/drivers/staging/media/imx/imx7-mipi-csis.c b/drivers/staging/media/imx/imx7-mipi-csis.c
index 99166afca071..383abecb3bec 100644
--- a/drivers/staging/media/imx/imx7-mipi-csis.c
+++ b/drivers/staging/media/imx/imx7-mipi-csis.c
@@ -251,8 +251,6 @@ struct csi_state {
struct mipi_csis_event events[MIPI_CSIS_NUM_EVENTS];
- struct v4l2_async_notifier subdev_notifier;
-
struct csis_hw_reset hw_reset;
struct regulator *mipi_phy_regulator;
bool sink_linked;
@@ -1104,7 +1102,6 @@ static int mipi_csis_remove(struct platform_device *pdev)
mipi_csis_debugfs_exit(state);
v4l2_async_unregister_subdev(&state->mipi_sd);
- v4l2_async_notifier_unregister(&state->subdev_notifier);
pm_runtime_disable(&pdev->dev);
mipi_csis_pm_suspend(&pdev->dev, true);
diff --git a/drivers/staging/media/ipu3/TODO b/drivers/staging/media/ipu3/TODO
index b44bb4a72ca7..dc356d6f03c4 100644
--- a/drivers/staging/media/ipu3/TODO
+++ b/drivers/staging/media/ipu3/TODO
@@ -5,15 +5,9 @@ staging directory.
as well as formats and the binary used to a request. Remove the
opportunistic buffer management. (Sakari)
-- Using ENABLED and IMMUTABLE link flags for the links where those are
- relevant. (Sakari)
-
- IPU3 driver documentation (Laurent)
Comments on configuring v4l2 subdevs for CIO2 and ImgU.
-- uAPI documentation:
- Move acronyms to doc-rst file. (Mauro)
-
- Switch to yavta from v4l2n in driver docs.
- Elaborate the functionality of different selection rectangles in driver
diff --git a/drivers/staging/media/ipu3/include/intel-ipu3.h b/drivers/staging/media/ipu3/include/intel-ipu3.h
index 08eaa0bad0de..1c9c3ba4d518 100644
--- a/drivers/staging/media/ipu3/include/intel-ipu3.h
+++ b/drivers/staging/media/ipu3/include/intel-ipu3.h
@@ -449,7 +449,7 @@ struct ipu3_uapi_awb_fr_config_s {
__u16 reserved1;
__u32 bayer_sign;
__u8 bayer_nf;
- __u8 reserved2[3];
+ __u8 reserved2[7];
} __attribute__((aligned(32))) __packed;
/**
diff --git a/drivers/staging/media/ipu3/ipu3-css.c b/drivers/staging/media/ipu3/ipu3-css.c
index fd1ed84c400c..f36de501edc6 100644
--- a/drivers/staging/media/ipu3/ipu3-css.c
+++ b/drivers/staging/media/ipu3/ipu3-css.c
@@ -1450,7 +1450,7 @@ bool imgu_css_pipe_queue_empty(struct imgu_css *css, unsigned int pipe)
bool imgu_css_queue_empty(struct imgu_css *css)
{
unsigned int pipe;
- bool ret = 0;
+ bool ret = false;
for (pipe = 0; pipe < IMGU_MAX_PIPE_NUM; pipe++)
ret &= imgu_css_pipe_queue_empty(css, pipe);
diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c
index 3c7ad1eed434..569e27b824c8 100644
--- a/drivers/staging/media/ipu3/ipu3-v4l2.c
+++ b/drivers/staging/media/ipu3/ipu3-v4l2.c
@@ -1260,6 +1260,11 @@ static int imgu_v4l2_node_setup(struct imgu_device *imgu, unsigned int pipe,
r = media_create_pad_link(&vdev->entity, 0, &sd->entity,
node_num, flags);
} else {
+ if (node->id == IMGU_NODE_OUT) {
+ flags |= MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE;
+ node->enabled = true;
+ }
+
r = media_create_pad_link(&sd->entity, node_num, &vdev->entity,
0, flags);
}
diff --git a/drivers/staging/media/meson/vdec/vdec.c b/drivers/staging/media/meson/vdec/vdec.c
index 0a1a04fd5d13..5c5dabed2f09 100644
--- a/drivers/staging/media/meson/vdec/vdec.c
+++ b/drivers/staging/media/meson/vdec/vdec.c
@@ -133,6 +133,8 @@ vdec_queue_recycle(struct amvdec_session *sess, struct vb2_buffer *vb)
struct amvdec_buffer *new_buf;
new_buf = kmalloc(sizeof(*new_buf), GFP_KERNEL);
+ if (!new_buf)
+ return;
new_buf->vb = vb;
mutex_lock(&sess->bufs_recycle_lock);
@@ -956,6 +958,10 @@ static const struct of_device_id vdec_dt_match[] = {
.data = &vdec_platform_gxm },
{ .compatible = "amlogic,gxl-vdec",
.data = &vdec_platform_gxl },
+ { .compatible = "amlogic,g12a-vdec",
+ .data = &vdec_platform_g12a },
+ { .compatible = "amlogic,sm1-vdec",
+ .data = &vdec_platform_sm1 },
{}
};
MODULE_DEVICE_TABLE(of, vdec_dt_match);
@@ -1003,6 +1009,16 @@ static int vdec_probe(struct platform_device *pdev)
if (IS_ERR(core->canvas))
return PTR_ERR(core->canvas);
+ of_id = of_match_node(vdec_dt_match, dev->of_node);
+ core->platform = of_id->data;
+
+ if (core->platform->revision == VDEC_REVISION_G12A ||
+ core->platform->revision == VDEC_REVISION_SM1) {
+ core->vdec_hevcf_clk = devm_clk_get(dev, "vdec_hevcf");
+ if (IS_ERR(core->vdec_hevcf_clk))
+ return -EPROBE_DEFER;
+ }
+
core->dos_parser_clk = devm_clk_get(dev, "dos_parser");
if (IS_ERR(core->dos_parser_clk))
return -EPROBE_DEFER;
@@ -1045,8 +1061,6 @@ static int vdec_probe(struct platform_device *pdev)
goto err_vdev_release;
}
- of_id = of_match_node(vdec_dt_match, dev->of_node);
- core->platform = of_id->data;
core->vdev_dec = vdev;
core->dev_dec = dev;
mutex_init(&core->lock);
diff --git a/drivers/staging/media/meson/vdec/vdec.h b/drivers/staging/media/meson/vdec/vdec.h
index d811e7976519..0faa1ec4858e 100644
--- a/drivers/staging/media/meson/vdec/vdec.h
+++ b/drivers/staging/media/meson/vdec/vdec.h
@@ -74,6 +74,7 @@ struct amvdec_core {
struct clk *dos_clk;
struct clk *vdec_1_clk;
struct clk *vdec_hevc_clk;
+ struct clk *vdec_hevcf_clk;
struct reset_control *esparser_reset;
diff --git a/drivers/staging/media/meson/vdec/vdec_1.c b/drivers/staging/media/meson/vdec/vdec_1.c
index 3a15c6fc0567..3fe2de0c9331 100644
--- a/drivers/staging/media/meson/vdec/vdec_1.c
+++ b/drivers/staging/media/meson/vdec/vdec_1.c
@@ -18,6 +18,7 @@
#define AO_RTI_GEN_PWR_SLEEP0 0xe8
#define AO_RTI_GEN_PWR_ISO0 0xec
#define GEN_PWR_VDEC_1 (BIT(3) | BIT(2))
+ #define GEN_PWR_VDEC_1_SM1 (BIT(1))
#define MC_SIZE (4096 * 4)
@@ -142,12 +143,20 @@ static int vdec_1_stop(struct amvdec_session *sess)
amvdec_read_dos(core, DOS_SW_RESET0);
/* enable vdec1 isolation */
- regmap_write(core->regmap_ao, AO_RTI_GEN_PWR_ISO0, 0xc0);
+ if (core->platform->revision == VDEC_REVISION_SM1)
+ regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_ISO0,
+ GEN_PWR_VDEC_1_SM1, GEN_PWR_VDEC_1_SM1);
+ else
+ regmap_write(core->regmap_ao, AO_RTI_GEN_PWR_ISO0, 0xc0);
/* power off vdec1 memories */
amvdec_write_dos(core, DOS_MEM_PD_VDEC, 0xffffffff);
/* power off vdec1 */
- regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
- GEN_PWR_VDEC_1, GEN_PWR_VDEC_1);
+ if (core->platform->revision == VDEC_REVISION_SM1)
+ regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VDEC_1_SM1, GEN_PWR_VDEC_1_SM1);
+ else
+ regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VDEC_1, GEN_PWR_VDEC_1);
clk_disable_unprepare(core->vdec_1_clk);
@@ -170,8 +179,12 @@ static int vdec_1_start(struct amvdec_session *sess)
return ret;
/* Enable power for VDEC_1 */
- regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
- GEN_PWR_VDEC_1, 0);
+ if (core->platform->revision == VDEC_REVISION_SM1)
+ regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VDEC_1_SM1, 0);
+ else
+ regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VDEC_1, 0);
usleep_range(10, 20);
/* Reset VDEC1 */
@@ -183,7 +196,11 @@ static int vdec_1_start(struct amvdec_session *sess)
/* enable VDEC Memories */
amvdec_write_dos(core, DOS_MEM_PD_VDEC, 0);
/* Remove VDEC1 Isolation */
- regmap_write(core->regmap_ao, AO_RTI_GEN_PWR_ISO0, 0);
+ if (core->platform->revision == VDEC_REVISION_SM1)
+ regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_ISO0,
+ GEN_PWR_VDEC_1_SM1, 0);
+ else
+ regmap_write(core->regmap_ao, AO_RTI_GEN_PWR_ISO0, 0);
/* Reset DOS top registers */
amvdec_write_dos(core, DOS_VDEC_MCRCC_STALL_CTRL, 0);
diff --git a/drivers/staging/media/meson/vdec/vdec_platform.c b/drivers/staging/media/meson/vdec/vdec_platform.c
index 824dbc7f46f5..ea39f8209ec7 100644
--- a/drivers/staging/media/meson/vdec/vdec_platform.c
+++ b/drivers/staging/media/meson/vdec/vdec_platform.c
@@ -82,6 +82,54 @@ static const struct amvdec_format vdec_formats_gxm[] = {
},
};
+static const struct amvdec_format vdec_formats_g12a[] = {
+ {
+ .pixfmt = V4L2_PIX_FMT_MPEG1,
+ .min_buffers = 8,
+ .max_buffers = 8,
+ .max_width = 1920,
+ .max_height = 1080,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_mpeg12_ops,
+ .firmware_path = "meson/vdec/gxl_mpeg12.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ }, {
+ .pixfmt = V4L2_PIX_FMT_MPEG2,
+ .min_buffers = 8,
+ .max_buffers = 8,
+ .max_width = 1920,
+ .max_height = 1080,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_mpeg12_ops,
+ .firmware_path = "meson/vdec/gxl_mpeg12.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ },
+};
+
+static const struct amvdec_format vdec_formats_sm1[] = {
+ {
+ .pixfmt = V4L2_PIX_FMT_MPEG1,
+ .min_buffers = 8,
+ .max_buffers = 8,
+ .max_width = 1920,
+ .max_height = 1080,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_mpeg12_ops,
+ .firmware_path = "meson/vdec/gxl_mpeg12.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ }, {
+ .pixfmt = V4L2_PIX_FMT_MPEG2,
+ .min_buffers = 8,
+ .max_buffers = 8,
+ .max_width = 1920,
+ .max_height = 1080,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_mpeg12_ops,
+ .firmware_path = "meson/vdec/gxl_mpeg12.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ },
+};
+
const struct vdec_platform vdec_platform_gxbb = {
.formats = vdec_formats_gxbb,
.num_formats = ARRAY_SIZE(vdec_formats_gxbb),
@@ -99,3 +147,15 @@ const struct vdec_platform vdec_platform_gxm = {
.num_formats = ARRAY_SIZE(vdec_formats_gxm),
.revision = VDEC_REVISION_GXM,
};
+
+const struct vdec_platform vdec_platform_g12a = {
+ .formats = vdec_formats_g12a,
+ .num_formats = ARRAY_SIZE(vdec_formats_g12a),
+ .revision = VDEC_REVISION_G12A,
+};
+
+const struct vdec_platform vdec_platform_sm1 = {
+ .formats = vdec_formats_sm1,
+ .num_formats = ARRAY_SIZE(vdec_formats_sm1),
+ .revision = VDEC_REVISION_SM1,
+};
diff --git a/drivers/staging/media/meson/vdec/vdec_platform.h b/drivers/staging/media/meson/vdec/vdec_platform.h
index f6025326db1d..731877a771f4 100644
--- a/drivers/staging/media/meson/vdec/vdec_platform.h
+++ b/drivers/staging/media/meson/vdec/vdec_platform.h
@@ -15,6 +15,8 @@ enum vdec_revision {
VDEC_REVISION_GXBB,
VDEC_REVISION_GXL,
VDEC_REVISION_GXM,
+ VDEC_REVISION_G12A,
+ VDEC_REVISION_SM1,
};
struct vdec_platform {
@@ -26,5 +28,7 @@ struct vdec_platform {
extern const struct vdec_platform vdec_platform_gxbb;
extern const struct vdec_platform vdec_platform_gxm;
extern const struct vdec_platform vdec_platform_gxl;
+extern const struct vdec_platform vdec_platform_g12a;
+extern const struct vdec_platform vdec_platform_sm1;
#endif
diff --git a/drivers/staging/media/phy-rockchip-dphy-rx0/Documentation/devicetree/bindings/phy/rockchip-mipi-dphy-rx0.yaml b/drivers/staging/media/phy-rockchip-dphy-rx0/Documentation/devicetree/bindings/phy/rockchip-mipi-dphy-rx0.yaml
new file mode 100644
index 000000000000..5dacece35702
--- /dev/null
+++ b/drivers/staging/media/phy-rockchip-dphy-rx0/Documentation/devicetree/bindings/phy/rockchip-mipi-dphy-rx0.yaml
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/rockchip-mipi-dphy-rx0.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip SoC MIPI RX0 D-PHY Device Tree Bindings
+
+maintainers:
+ - Helen Koike <helen.koike@collabora.com>
+ - Ezequiel Garcia <ezequiel@collabora.com>
+
+description: |
+ The Rockchip SoC has a MIPI D-PHY bus with an RX0 entry which connects to
+ the ISP1 (Image Signal Processing unit v1.0) for CSI cameras.
+
+properties:
+ compatible:
+ const: rockchip,rk3399-mipi-dphy-rx0
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: MIPI D-PHY ref clock
+ - description: MIPI D-PHY RX0 cfg clock
+ - description: Video in/out general register file clock
+
+ clock-names:
+ items:
+ - const: dphy-ref
+ - const: dphy-cfg
+ - const: grf
+
+ '#phy-cells':
+ const: 0
+
+ power-domains:
+ description: Video in/out power domain.
+ maxItems: 1
+
+required:
+ - compatible
+ - clocks
+ - clock-names
+ - '#phy-cells'
+ - power-domains
+
+additionalProperties: false
+
+examples:
+ - |
+
+ /*
+ * MIPI D-PHY RX0 use registers in "general register files", it
+ * should be a child of the GRF.
+ *
+ * grf: syscon@ff770000 {
+ * compatible = "rockchip,rk3399-grf", "syscon", "simple-mfd";
+ * ...
+ * };
+ */
+
+ #include <dt-bindings/clock/rk3399-cru.h>
+ #include <dt-bindings/power/rk3399-power.h>
+
+ mipi_dphy_rx0: mipi-dphy-rx0 {
+ compatible = "rockchip,rk3399-mipi-dphy-rx0";
+ clocks = <&cru SCLK_MIPIDPHY_REF>,
+ <&cru SCLK_DPHY_RX0_CFG>,
+ <&cru PCLK_VIO_GRF>;
+ clock-names = "dphy-ref", "dphy-cfg", "grf";
+ power-domains = <&power RK3399_PD_VIO>;
+ #phy-cells = <0>;
+ };
diff --git a/drivers/staging/media/phy-rockchip-dphy-rx0/Kconfig b/drivers/staging/media/phy-rockchip-dphy-rx0/Kconfig
new file mode 100644
index 000000000000..bd0147624de1
--- /dev/null
+++ b/drivers/staging/media/phy-rockchip-dphy-rx0/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config PHY_ROCKCHIP_DPHY_RX0
+ tristate "Rockchip MIPI Synopsys DPHY RX0 driver"
+ depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF
+ select GENERIC_PHY_MIPI_DPHY
+ select GENERIC_PHY
+ help
+ Enable this to support the Rockchip MIPI Synopsys DPHY RX0
+ associated to the Rockchip ISP module present in RK3399 SoCs.
+
+ To compile this driver as a module, choose M here: the module
+ will be called phy-rockchip-dphy-rx0.
diff --git a/drivers/staging/media/phy-rockchip-dphy-rx0/Makefile b/drivers/staging/media/phy-rockchip-dphy-rx0/Makefile
new file mode 100644
index 000000000000..507e5d0593ab
--- /dev/null
+++ b/drivers/staging/media/phy-rockchip-dphy-rx0/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PHY_ROCKCHIP_DPHY_RX0) += phy-rockchip-dphy-rx0.o
diff --git a/drivers/staging/media/phy-rockchip-dphy-rx0/TODO b/drivers/staging/media/phy-rockchip-dphy-rx0/TODO
new file mode 100644
index 000000000000..ab612e5b27dc
--- /dev/null
+++ b/drivers/staging/media/phy-rockchip-dphy-rx0/TODO
@@ -0,0 +1,6 @@
+The main reason for keeping this in staging is because the only driver
+that uses this is rkisp1, which is also in staging. It should be moved together
+with rkisp1.
+
+Please CC patches to Linux Media <linux-media@vger.kernel.org> and
+Helen Koike <helen.koike@collabora.com>.
diff --git a/drivers/staging/media/phy-rockchip-dphy-rx0/phy-rockchip-dphy-rx0.c b/drivers/staging/media/phy-rockchip-dphy-rx0/phy-rockchip-dphy-rx0.c
new file mode 100644
index 000000000000..7c4df6d48c43
--- /dev/null
+++ b/drivers/staging/media/phy-rockchip-dphy-rx0/phy-rockchip-dphy-rx0.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Rockchip MIPI Synopsys DPHY RX0 driver
+ *
+ * Copyright (C) 2019 Collabora, Ltd.
+ *
+ * Based on:
+ *
+ * drivers/media/platform/rockchip/isp1/mipi_dphy_sy.c
+ * in https://chromium.googlesource.com/chromiumos/third_party/kernel,
+ * chromeos-4.4 branch.
+ *
+ * Copyright (C) 2017 Fuzhou Rockchip Electronics Co., Ltd.
+ * Jacob Chen <jacob2.chen@rock-chips.com>
+ * Shunqian Zheng <zhengsq@rock-chips.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-mipi-dphy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define RK3399_GRF_SOC_CON9 0x6224
+#define RK3399_GRF_SOC_CON21 0x6254
+#define RK3399_GRF_SOC_CON22 0x6258
+#define RK3399_GRF_SOC_CON23 0x625c
+#define RK3399_GRF_SOC_CON24 0x6260
+#define RK3399_GRF_SOC_CON25 0x6264
+#define RK3399_GRF_SOC_STATUS1 0xe2a4
+
+#define CLOCK_LANE_HS_RX_CONTROL 0x34
+#define LANE0_HS_RX_CONTROL 0x44
+#define LANE1_HS_RX_CONTROL 0x54
+#define LANE2_HS_RX_CONTROL 0x84
+#define LANE3_HS_RX_CONTROL 0x94
+#define LANES_THS_SETTLE_CONTROL 0x75
+#define THS_SETTLE_COUNTER_THRESHOLD 0x04
+
+struct hsfreq_range {
+ u16 range_h;
+ u8 cfg_bit;
+};
+
+static const struct hsfreq_range rk3399_mipidphy_hsfreq_ranges[] = {
+ { 89, 0x00 }, { 99, 0x10 }, { 109, 0x20 }, { 129, 0x01 },
+ { 139, 0x11 }, { 149, 0x21 }, { 169, 0x02 }, { 179, 0x12 },
+ { 199, 0x22 }, { 219, 0x03 }, { 239, 0x13 }, { 249, 0x23 },
+ { 269, 0x04 }, { 299, 0x14 }, { 329, 0x05 }, { 359, 0x15 },
+ { 399, 0x25 }, { 449, 0x06 }, { 499, 0x16 }, { 549, 0x07 },
+ { 599, 0x17 }, { 649, 0x08 }, { 699, 0x18 }, { 749, 0x09 },
+ { 799, 0x19 }, { 849, 0x29 }, { 899, 0x39 }, { 949, 0x0a },
+ { 999, 0x1a }, { 1049, 0x2a }, { 1099, 0x3a }, { 1149, 0x0b },
+ { 1199, 0x1b }, { 1249, 0x2b }, { 1299, 0x3b }, { 1349, 0x0c },
+ { 1399, 0x1c }, { 1449, 0x2c }, { 1500, 0x3c }
+};
+
+static const char * const rk3399_mipidphy_clks[] = {
+ "dphy-ref",
+ "dphy-cfg",
+ "grf",
+};
+
+enum dphy_reg_id {
+ GRF_DPHY_RX0_TURNDISABLE = 0,
+ GRF_DPHY_RX0_FORCERXMODE,
+ GRF_DPHY_RX0_FORCETXSTOPMODE,
+ GRF_DPHY_RX0_ENABLE,
+ GRF_DPHY_RX0_TESTCLR,
+ GRF_DPHY_RX0_TESTCLK,
+ GRF_DPHY_RX0_TESTEN,
+ GRF_DPHY_RX0_TESTDIN,
+ GRF_DPHY_RX0_TURNREQUEST,
+ GRF_DPHY_RX0_TESTDOUT,
+ GRF_DPHY_TX0_TURNDISABLE,
+ GRF_DPHY_TX0_FORCERXMODE,
+ GRF_DPHY_TX0_FORCETXSTOPMODE,
+ GRF_DPHY_TX0_TURNREQUEST,
+ GRF_DPHY_TX1RX1_TURNDISABLE,
+ GRF_DPHY_TX1RX1_FORCERXMODE,
+ GRF_DPHY_TX1RX1_FORCETXSTOPMODE,
+ GRF_DPHY_TX1RX1_ENABLE,
+ GRF_DPHY_TX1RX1_MASTERSLAVEZ,
+ GRF_DPHY_TX1RX1_BASEDIR,
+ GRF_DPHY_TX1RX1_ENABLECLK,
+ GRF_DPHY_TX1RX1_TURNREQUEST,
+ GRF_DPHY_RX1_SRC_SEL,
+ /* rk3288 only */
+ GRF_CON_DISABLE_ISP,
+ GRF_CON_ISP_DPHY_SEL,
+ GRF_DSI_CSI_TESTBUS_SEL,
+ GRF_DVP_V18SEL,
+ /* below is for rk3399 only */
+ GRF_DPHY_RX0_CLK_INV_SEL,
+ GRF_DPHY_RX1_CLK_INV_SEL,
+};
+
+struct dphy_reg {
+ u16 offset;
+ u8 mask;
+ u8 shift;
+};
+
+#define PHY_REG(_offset, _width, _shift) \
+ { .offset = _offset, .mask = BIT(_width) - 1, .shift = _shift, }
+
+static const struct dphy_reg rk3399_grf_dphy_regs[] = {
+ [GRF_DPHY_RX0_TURNREQUEST] = PHY_REG(RK3399_GRF_SOC_CON9, 4, 0),
+ [GRF_DPHY_RX0_CLK_INV_SEL] = PHY_REG(RK3399_GRF_SOC_CON9, 1, 10),
+ [GRF_DPHY_RX1_CLK_INV_SEL] = PHY_REG(RK3399_GRF_SOC_CON9, 1, 11),
+ [GRF_DPHY_RX0_ENABLE] = PHY_REG(RK3399_GRF_SOC_CON21, 4, 0),
+ [GRF_DPHY_RX0_FORCERXMODE] = PHY_REG(RK3399_GRF_SOC_CON21, 4, 4),
+ [GRF_DPHY_RX0_FORCETXSTOPMODE] = PHY_REG(RK3399_GRF_SOC_CON21, 4, 8),
+ [GRF_DPHY_RX0_TURNDISABLE] = PHY_REG(RK3399_GRF_SOC_CON21, 4, 12),
+ [GRF_DPHY_TX0_FORCERXMODE] = PHY_REG(RK3399_GRF_SOC_CON22, 4, 0),
+ [GRF_DPHY_TX0_FORCETXSTOPMODE] = PHY_REG(RK3399_GRF_SOC_CON22, 4, 4),
+ [GRF_DPHY_TX0_TURNDISABLE] = PHY_REG(RK3399_GRF_SOC_CON22, 4, 8),
+ [GRF_DPHY_TX0_TURNREQUEST] = PHY_REG(RK3399_GRF_SOC_CON22, 4, 12),
+ [GRF_DPHY_TX1RX1_ENABLE] = PHY_REG(RK3399_GRF_SOC_CON23, 4, 0),
+ [GRF_DPHY_TX1RX1_FORCERXMODE] = PHY_REG(RK3399_GRF_SOC_CON23, 4, 4),
+ [GRF_DPHY_TX1RX1_FORCETXSTOPMODE] = PHY_REG(RK3399_GRF_SOC_CON23, 4, 8),
+ [GRF_DPHY_TX1RX1_TURNDISABLE] = PHY_REG(RK3399_GRF_SOC_CON23, 4, 12),
+ [GRF_DPHY_TX1RX1_TURNREQUEST] = PHY_REG(RK3399_GRF_SOC_CON24, 4, 0),
+ [GRF_DPHY_RX1_SRC_SEL] = PHY_REG(RK3399_GRF_SOC_CON24, 1, 4),
+ [GRF_DPHY_TX1RX1_BASEDIR] = PHY_REG(RK3399_GRF_SOC_CON24, 1, 5),
+ [GRF_DPHY_TX1RX1_ENABLECLK] = PHY_REG(RK3399_GRF_SOC_CON24, 1, 6),
+ [GRF_DPHY_TX1RX1_MASTERSLAVEZ] = PHY_REG(RK3399_GRF_SOC_CON24, 1, 7),
+ [GRF_DPHY_RX0_TESTDIN] = PHY_REG(RK3399_GRF_SOC_CON25, 8, 0),
+ [GRF_DPHY_RX0_TESTEN] = PHY_REG(RK3399_GRF_SOC_CON25, 1, 8),
+ [GRF_DPHY_RX0_TESTCLK] = PHY_REG(RK3399_GRF_SOC_CON25, 1, 9),
+ [GRF_DPHY_RX0_TESTCLR] = PHY_REG(RK3399_GRF_SOC_CON25, 1, 10),
+ [GRF_DPHY_RX0_TESTDOUT] = PHY_REG(RK3399_GRF_SOC_STATUS1, 8, 0),
+};
+
+struct rk_dphy_drv_data {
+ const char * const *clks;
+ unsigned int num_clks;
+ const struct hsfreq_range *hsfreq_ranges;
+ unsigned int num_hsfreq_ranges;
+ const struct dphy_reg *regs;
+};
+
+struct rk_dphy {
+ struct device *dev;
+ struct regmap *grf;
+ struct clk_bulk_data *clks;
+
+ const struct rk_dphy_drv_data *drv_data;
+ struct phy_configure_opts_mipi_dphy config;
+
+ u8 hsfreq;
+};
+
+static inline void rk_dphy_write_grf(struct rk_dphy *priv,
+ unsigned int index, u8 value)
+{
+ const struct dphy_reg *reg = &priv->drv_data->regs[index];
+ /* Update high word */
+ unsigned int val = (value << reg->shift) |
+ (reg->mask << (reg->shift + 16));
+
+ if (WARN_ON(!reg->offset))
+ return;
+ regmap_write(priv->grf, reg->offset, val);
+}
+
+static void rk_dphy_write(struct rk_dphy *priv, u8 test_code, u8 test_data)
+{
+ rk_dphy_write_grf(priv, GRF_DPHY_RX0_TESTDIN, test_code);
+ rk_dphy_write_grf(priv, GRF_DPHY_RX0_TESTEN, 1);
+ /*
+ * With the falling edge on TESTCLK, the TESTDIN[7:0] signal content
+ * is latched internally as the current test code. Test data is
+ * programmed internally by rising edge on TESTCLK.
+ * This code assumes that TESTCLK is already 1.
+ */
+ rk_dphy_write_grf(priv, GRF_DPHY_RX0_TESTCLK, 0);
+ rk_dphy_write_grf(priv, GRF_DPHY_RX0_TESTEN, 0);
+ rk_dphy_write_grf(priv, GRF_DPHY_RX0_TESTDIN, test_data);
+ rk_dphy_write_grf(priv, GRF_DPHY_RX0_TESTCLK, 1);
+}
+
+static void rk_dphy_enable(struct rk_dphy *priv)
+{
+ rk_dphy_write_grf(priv, GRF_DPHY_RX0_FORCERXMODE, 0);
+ rk_dphy_write_grf(priv, GRF_DPHY_RX0_FORCETXSTOPMODE, 0);
+
+ /* Disable lane turn around, which is ignored in receive mode */
+ rk_dphy_write_grf(priv, GRF_DPHY_RX0_TURNREQUEST, 0);
+ rk_dphy_write_grf(priv, GRF_DPHY_RX0_TURNDISABLE, 0xf);
+
+ rk_dphy_write_grf(priv, GRF_DPHY_RX0_ENABLE,
+ GENMASK(priv->config.lanes - 1, 0));
+
+ /* dphy start */
+ rk_dphy_write_grf(priv, GRF_DPHY_RX0_TESTCLK, 1);
+ rk_dphy_write_grf(priv, GRF_DPHY_RX0_TESTCLR, 1);
+ usleep_range(100, 150);
+ rk_dphy_write_grf(priv, GRF_DPHY_RX0_TESTCLR, 0);
+ usleep_range(100, 150);
+
+ /* set clock lane */
+ /* HS hsfreq_range & lane 0 settle bypass */
+ rk_dphy_write(priv, CLOCK_LANE_HS_RX_CONTROL, 0);
+ /* HS RX Control of lane0 */
+ rk_dphy_write(priv, LANE0_HS_RX_CONTROL, priv->hsfreq << 1);
+ /* HS RX Control of lane1 */
+ rk_dphy_write(priv, LANE1_HS_RX_CONTROL, priv->hsfreq << 1);
+ /* HS RX Control of lane2 */
+ rk_dphy_write(priv, LANE2_HS_RX_CONTROL, priv->hsfreq << 1);
+ /* HS RX Control of lane3 */
+ rk_dphy_write(priv, LANE3_HS_RX_CONTROL, priv->hsfreq << 1);
+ /* HS RX Data Lanes Settle State Time Control */
+ rk_dphy_write(priv, LANES_THS_SETTLE_CONTROL,
+ THS_SETTLE_COUNTER_THRESHOLD);
+
+ /* Normal operation */
+ rk_dphy_write(priv, 0x0, 0);
+}
+
+static int rk_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
+{
+ struct rk_dphy *priv = phy_get_drvdata(phy);
+ const struct rk_dphy_drv_data *drv_data = priv->drv_data;
+ struct phy_configure_opts_mipi_dphy *config = &opts->mipi_dphy;
+ unsigned int hsfreq = 0;
+ unsigned int i;
+ u64 data_rate_mbps;
+ int ret;
+
+ /* pass with phy_mipi_dphy_get_default_config (with pixel rate?) */
+ ret = phy_mipi_dphy_config_validate(config);
+ if (ret)
+ return ret;
+
+ data_rate_mbps = div_u64(config->hs_clk_rate, 1000 * 1000);
+
+ dev_dbg(priv->dev, "lanes %d - data_rate_mbps %llu\n",
+ config->lanes, data_rate_mbps);
+ for (i = 0; i < drv_data->num_hsfreq_ranges; i++) {
+ if (drv_data->hsfreq_ranges[i].range_h >= data_rate_mbps) {
+ hsfreq = drv_data->hsfreq_ranges[i].cfg_bit;
+ break;
+ }
+ }
+ if (!hsfreq)
+ return -EINVAL;
+
+ priv->hsfreq = hsfreq;
+ priv->config = *config;
+ return 0;
+}
+
+static int rk_dphy_power_on(struct phy *phy)
+{
+ struct rk_dphy *priv = phy_get_drvdata(phy);
+ int ret;
+
+ ret = clk_bulk_enable(priv->drv_data->num_clks, priv->clks);
+ if (ret)
+ return ret;
+
+ rk_dphy_enable(priv);
+
+ return 0;
+}
+
+static int rk_dphy_power_off(struct phy *phy)
+{
+ struct rk_dphy *priv = phy_get_drvdata(phy);
+
+ rk_dphy_write_grf(priv, GRF_DPHY_RX0_ENABLE, 0);
+ clk_bulk_disable(priv->drv_data->num_clks, priv->clks);
+ return 0;
+}
+
+static int rk_dphy_init(struct phy *phy)
+{
+ struct rk_dphy *priv = phy_get_drvdata(phy);
+
+ return clk_bulk_prepare(priv->drv_data->num_clks, priv->clks);
+}
+
+static int rk_dphy_exit(struct phy *phy)
+{
+ struct rk_dphy *priv = phy_get_drvdata(phy);
+
+ clk_bulk_unprepare(priv->drv_data->num_clks, priv->clks);
+ return 0;
+}
+
+static const struct phy_ops rk_dphy_ops = {
+ .power_on = rk_dphy_power_on,
+ .power_off = rk_dphy_power_off,
+ .init = rk_dphy_init,
+ .exit = rk_dphy_exit,
+ .configure = rk_dphy_configure,
+ .owner = THIS_MODULE,
+};
+
+static const struct rk_dphy_drv_data rk3399_mipidphy_drv_data = {
+ .clks = rk3399_mipidphy_clks,
+ .num_clks = ARRAY_SIZE(rk3399_mipidphy_clks),
+ .hsfreq_ranges = rk3399_mipidphy_hsfreq_ranges,
+ .num_hsfreq_ranges = ARRAY_SIZE(rk3399_mipidphy_hsfreq_ranges),
+ .regs = rk3399_grf_dphy_regs,
+};
+
+static const struct of_device_id rk_dphy_dt_ids[] = {
+ {
+ .compatible = "rockchip,rk3399-mipi-dphy-rx0",
+ .data = &rk3399_mipidphy_drv_data,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rk_dphy_dt_ids);
+
+static int rk_dphy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const struct rk_dphy_drv_data *drv_data;
+ struct phy_provider *phy_provider;
+ const struct of_device_id *of_id;
+ struct rk_dphy *priv;
+ struct phy *phy;
+ unsigned int i;
+ int ret;
+
+ if (!dev->parent || !dev->parent->of_node)
+ return -ENODEV;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ priv->dev = dev;
+
+ priv->grf = syscon_node_to_regmap(dev->parent->of_node);
+ if (IS_ERR(priv->grf)) {
+ dev_err(dev, "Can't find GRF syscon\n");
+ return -ENODEV;
+ }
+
+ of_id = of_match_device(rk_dphy_dt_ids, dev);
+ if (!of_id)
+ return -EINVAL;
+
+ drv_data = of_id->data;
+ priv->drv_data = drv_data;
+ priv->clks = devm_kcalloc(&pdev->dev, drv_data->num_clks,
+ sizeof(*priv->clks), GFP_KERNEL);
+ if (!priv->clks)
+ return -ENOMEM;
+ for (i = 0; i < drv_data->num_clks; i++)
+ priv->clks[i].id = drv_data->clks[i];
+ ret = devm_clk_bulk_get(&pdev->dev, drv_data->num_clks, priv->clks);
+ if (ret)
+ return ret;
+
+ phy = devm_phy_create(dev, np, &rk_dphy_ops);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "failed to create phy\n");
+ return PTR_ERR(phy);
+ }
+ phy_set_drvdata(phy, priv);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static struct platform_driver rk_dphy_driver = {
+ .probe = rk_dphy_probe,
+ .driver = {
+ .name = "rockchip-mipi-dphy-rx0",
+ .of_match_table = rk_dphy_dt_ids,
+ },
+};
+module_platform_driver(rk_dphy_driver);
+
+MODULE_AUTHOR("Ezequiel Garcia <ezequiel@collabora.com>");
+MODULE_DESCRIPTION("Rockchip MIPI Synopsys DPHY RX0 driver");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/staging/media/rkisp1/Documentation/devicetree/bindings/media/rockchip-isp1.yaml b/drivers/staging/media/rkisp1/Documentation/devicetree/bindings/media/rockchip-isp1.yaml
new file mode 100644
index 000000000000..af246b71eac6
--- /dev/null
+++ b/drivers/staging/media/rkisp1/Documentation/devicetree/bindings/media/rockchip-isp1.yaml
@@ -0,0 +1,192 @@
+# SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/rockchip-isp1.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip SoC Image Signal Processing unit v1
+
+maintainers:
+ - Helen Koike <helen.koike@collabora.com>
+
+description: |
+ Rockchip ISP1 is the Camera interface for the Rockchip series of SoCs
+ which contains image processing, scaling, and compression functions.
+
+properties:
+ compatible:
+ const: rockchip,rk3399-cif-isp
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ iommus:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ phys:
+ maxItems: 1
+ description: phandle for the PHY port
+
+ phy-names:
+ const: dphy
+
+ clocks:
+ items:
+ - description: ISP clock
+ - description: ISP AXI clock clock
+ - description: ISP AXI clock wrapper clock
+ - description: ISP AHB clock clock
+ - description: ISP AHB wrapper clock
+
+ clock-names:
+ items:
+ - const: clk_isp
+ - const: aclk_isp
+ - const: aclk_isp_wrap
+ - const: hclk_isp
+ - const: hclk_isp_wrap
+
+ # See ./video-interfaces.txt for details
+ ports:
+ type: object
+ additionalProperties: false
+
+ properties:
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ port@0:
+ type: object
+ description: connection point for sensors at MIPI-DPHY RX0
+ additionalProperties: false
+
+ properties:
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ reg:
+ const: 0
+
+ patternProperties:
+ endpoint:
+ type: object
+ additionalProperties: false
+
+ properties:
+ reg:
+ maxItems: 1
+
+ data-lanes:
+ minItems: 1
+ maxItems: 4
+
+ remote-endpoint: true
+
+ required:
+ - port@0
+
+required:
+ - compatible
+ - interrupts
+ - clocks
+ - clock-names
+ - power-domains
+ - iommus
+ - phys
+ - phy-names
+ - ports
+
+additionalProperties: false
+
+examples:
+ - |
+
+ #include <dt-bindings/clock/rk3399-cru.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/rk3399-power.h>
+
+ parent0: parent@0 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ isp0: isp0@ff910000 {
+ compatible = "rockchip,rk3399-cif-isp";
+ reg = <0x0 0xff910000 0x0 0x4000>;
+ interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru SCLK_ISP0>,
+ <&cru ACLK_ISP0>, <&cru ACLK_ISP0_WRAPPER>,
+ <&cru HCLK_ISP0>, <&cru HCLK_ISP0_WRAPPER>;
+ clock-names = "clk_isp",
+ "aclk_isp", "aclk_isp_wrap",
+ "hclk_isp", "hclk_isp_wrap";
+ power-domains = <&power RK3399_PD_ISP0>;
+ iommus = <&isp0_mmu>;
+ phys = <&dphy>;
+ phy-names = "dphy";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ mipi_in_wcam: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&wcam_out>;
+ data-lanes = <1 2>;
+ };
+
+ mipi_in_ucam: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&ucam_out>;
+ data-lanes = <1>;
+ };
+ };
+ };
+ };
+
+ i2c7: i2c@ff160000 {
+ clock-frequency = <400000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ wcam: camera@36 {
+ compatible = "ovti,ov5695";
+ reg = <0x36>;
+
+ port {
+ wcam_out: endpoint {
+ remote-endpoint = <&mipi_in_wcam>;
+ data-lanes = <1 2>;
+ };
+ };
+ };
+
+ ucam: camera@3c {
+ compatible = "ovti,ov2685";
+ reg = <0x3c>;
+
+ port {
+ ucam_out: endpoint {
+ remote-endpoint = <&mipi_in_ucam>;
+ data-lanes = <1>;
+ };
+ };
+ };
+ };
+ };
diff --git a/drivers/staging/media/rkisp1/Documentation/media/uapi/v4l/pixfmt-meta-rkisp1-params.rst b/drivers/staging/media/rkisp1/Documentation/media/uapi/v4l/pixfmt-meta-rkisp1-params.rst
new file mode 100644
index 000000000000..32034e481357
--- /dev/null
+++ b/drivers/staging/media/rkisp1/Documentation/media/uapi/v4l/pixfmt-meta-rkisp1-params.rst
@@ -0,0 +1,23 @@
+.. SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+.. _v4l2-meta-fmt-rkisp1-params:
+
+============================
+V4L2_META_FMT_RK_ISP1_PARAMS
+============================
+
+Rockchip ISP1 Parameters Data
+
+Description
+===========
+
+This format describes input parameters for the Rockchip ISP1.
+
+It uses c-struct :c:type:`rkisp1_params_cfg`, which is defined in
+the ``linux/rkisp1-config.h`` header file.
+
+The parameters consist of multiple modules.
+The module won't be updated if the corresponding bit was not set in module_*_update.
+
+.. kernel-doc:: include/uapi/linux/rkisp1-config.h
+ :functions: rkisp1_params_cfg
diff --git a/drivers/staging/media/rkisp1/Documentation/media/uapi/v4l/pixfmt-meta-rkisp1-stat.rst b/drivers/staging/media/rkisp1/Documentation/media/uapi/v4l/pixfmt-meta-rkisp1-stat.rst
new file mode 100644
index 000000000000..4ad303f96421
--- /dev/null
+++ b/drivers/staging/media/rkisp1/Documentation/media/uapi/v4l/pixfmt-meta-rkisp1-stat.rst
@@ -0,0 +1,22 @@
+.. SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+.. _v4l2-meta-fmt-rkisp1-stat:
+
+=============================
+V4L2_META_FMT_RK_ISP1_STAT_3A
+=============================
+
+
+Rockchip ISP1 Statistics Data
+
+Description
+===========
+
+This format describes image color statistics information generated by the Rockchip
+ISP1.
+
+It uses c-struct :c:type:`rkisp1_stat_buffer`, which is defined in
+the ``linux/rkisp1-config.h`` header file.
+
+.. kernel-doc:: include/uapi/linux/rkisp1-config.h
+ :functions: rkisp1_stat_buffer
diff --git a/drivers/staging/media/rkisp1/Kconfig b/drivers/staging/media/rkisp1/Kconfig
new file mode 100644
index 000000000000..b859a493caba
--- /dev/null
+++ b/drivers/staging/media/rkisp1/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config VIDEO_ROCKCHIP_ISP1
+ tristate "Rockchip Image Signal Processing v1 Unit driver"
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEOBUF2_VMALLOC
+ select V4L2_FWNODE
+ select PHY_ROCKCHIP_DPHY_RX0
+ default n
+ help
+ Enable this to support the Image Signal Processing (ISP) module
+ present in RK3399 SoCs.
+
+ To compile this driver as a module, choose M here: the module
+ will be called rockchip-isp1.
diff --git a/drivers/staging/media/rkisp1/Makefile b/drivers/staging/media/rkisp1/Makefile
new file mode 100644
index 000000000000..69ca59c7ef34
--- /dev/null
+++ b/drivers/staging/media/rkisp1/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_VIDEO_ROCKCHIP_ISP1) += rockchip-isp1.o
+rockchip-isp1-objs += rkisp1-capture.o \
+ rkisp1-common.o \
+ rkisp1-dev.o \
+ rkisp1-isp.o \
+ rkisp1-resizer.o \
+ rkisp1-stats.o \
+ rkisp1-params.o
diff --git a/drivers/staging/media/rkisp1/TODO b/drivers/staging/media/rkisp1/TODO
new file mode 100644
index 000000000000..03cd9a4e70f7
--- /dev/null
+++ b/drivers/staging/media/rkisp1/TODO
@@ -0,0 +1,23 @@
+* Fix serialization on subdev ops.
+* Don't use v4l2_async_notifier_parse_fwnode_endpoints_by_port().
+e.g. isp_parse_of_endpoints in drivers/media/platform/omap3isp/isp.c
+cio2_parse_firmware in drivers/media/pci/intel/ipu3/ipu3-cio2.c.
+* Fix pad format size for statistics and parameters entities.
+* Use threaded interrupt for rkisp1_stats_isr(), remove work queue.
+* Fix checkpatch errors.
+* Make sure uapi structs have the same size and layout in 32 and 62 bits,
+and that there are no holes in the structures (pahole is a utility that
+can be used to test this).
+* Review and comment every lock
+* Handle quantization
+* Document rkisp1-common.h
+* streaming paths (mainpath and selfpath) check if the other path is streaming
+in several places of the code, review this, specially that it doesn't seem it
+supports streaming from both paths at the same time.
+
+NOTES:
+* All v4l2-compliance test must pass.
+* Stats and params can be tested with libcamera and ChromiumOS stack.
+
+Please CC patches to Linux Media <linux-media@vger.kernel.org> and
+Helen Koike <helen.koike@collabora.com>.
diff --git a/drivers/staging/media/rkisp1/rkisp1-capture.c b/drivers/staging/media/rkisp1/rkisp1-capture.c
new file mode 100644
index 000000000000..524e0dd38c1b
--- /dev/null
+++ b/drivers/staging/media/rkisp1/rkisp1-capture.c
@@ -0,0 +1,1437 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Rockchip ISP1 Driver - V4l capture device
+ *
+ * Copyright (C) 2019 Collabora, Ltd.
+ *
+ * Based on Rockchip ISP1 driver by Rockchip Electronics Co., Ltd.
+ * Copyright (C) 2017 Rockchip Electronics Co., Ltd.
+ */
+
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "rkisp1-common.h"
+
+/*
+ * NOTE: There are two capture video devices in rkisp1, selfpath and mainpath.
+ *
+ * differences between selfpath and mainpath
+ * available mp sink input: isp
+ * available sp sink input : isp, dma(TODO)
+ * available mp sink pad fmts: yuv422, raw
+ * available sp sink pad fmts: yuv422, yuv420......
+ * available mp source fmts: yuv, raw, jpeg(TODO)
+ * available sp source fmts: yuv, rgb
+ */
+
+#define RKISP1_SP_DEV_NAME RKISP1_DRIVER_NAME "_selfpath"
+#define RKISP1_MP_DEV_NAME RKISP1_DRIVER_NAME "_mainpath"
+
+#define RKISP1_MIN_BUFFERS_NEEDED 3
+
+enum rkisp1_plane {
+ RKISP1_PLANE_Y = 0,
+ RKISP1_PLANE_CB = 1,
+ RKISP1_PLANE_CR = 2
+};
+
+/*
+ * @fourcc: pixel format
+ * @fmt_type: helper filed for pixel format
+ * @uv_swap: if cb cr swaped, for yuv
+ * @write_format: defines how YCbCr self picture data is written to memory
+ * @output_format: defines sp output format
+ */
+struct rkisp1_capture_fmt_cfg {
+ u32 fourcc;
+ u8 fmt_type;
+ u8 uv_swap;
+ u32 write_format;
+ u32 output_format;
+};
+
+struct rkisp1_capture_ops {
+ void (*config)(struct rkisp1_capture *cap);
+ void (*stop)(struct rkisp1_capture *cap);
+ void (*enable)(struct rkisp1_capture *cap);
+ void (*disable)(struct rkisp1_capture *cap);
+ void (*set_data_path)(struct rkisp1_capture *cap);
+ bool (*is_stopped)(struct rkisp1_capture *cap);
+};
+
+struct rkisp1_capture_config {
+ const struct rkisp1_capture_fmt_cfg *fmts;
+ int fmt_size;
+ struct {
+ u32 y_size_init;
+ u32 cb_size_init;
+ u32 cr_size_init;
+ u32 y_base_ad_init;
+ u32 cb_base_ad_init;
+ u32 cr_base_ad_init;
+ u32 y_offs_cnt_init;
+ u32 cb_offs_cnt_init;
+ u32 cr_offs_cnt_init;
+ } mi;
+};
+
+static const struct rkisp1_capture_fmt_cfg rkisp1_mp_fmts[] = {
+ /* yuv422 */
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .fmt_type = RKISP1_FMT_YUV,
+ .uv_swap = 0,
+ .write_format = RKISP1_MI_CTRL_MP_WRITE_YUVINT,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .fmt_type = RKISP1_FMT_YUV,
+ .uv_swap = 1,
+ .write_format = RKISP1_MI_CTRL_MP_WRITE_YUVINT,
+ }, {
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .fmt_type = RKISP1_FMT_YUV,
+ .write_format = RKISP1_MI_CTRL_MP_WRITE_YUVINT,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ .fmt_type = RKISP1_FMT_YUV,
+ .uv_swap = 0,
+ .write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .fmt_type = RKISP1_FMT_YUV,
+ .uv_swap = 0,
+ .write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV61,
+ .fmt_type = RKISP1_FMT_YUV,
+ .uv_swap = 1,
+ .write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YVU422M,
+ .fmt_type = RKISP1_FMT_YUV,
+ .uv_swap = 1,
+ .write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
+ },
+ /* yuv420 */
+ {
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .fmt_type = RKISP1_FMT_YUV,
+ .uv_swap = 1,
+ .write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .fmt_type = RKISP1_FMT_YUV,
+ .uv_swap = 0,
+ .write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV21M,
+ .fmt_type = RKISP1_FMT_YUV,
+ .uv_swap = 1,
+ .write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .fmt_type = RKISP1_FMT_YUV,
+ .uv_swap = 0,
+ .write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .fmt_type = RKISP1_FMT_YUV,
+ .uv_swap = 0,
+ .write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YVU420,
+ .fmt_type = RKISP1_FMT_YUV,
+ .uv_swap = 1,
+ .write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
+ },
+ /* yuv444 */
+ {
+ .fourcc = V4L2_PIX_FMT_YUV444M,
+ .fmt_type = RKISP1_FMT_YUV,
+ .uv_swap = 0,
+ .write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
+ },
+ /* yuv400 */
+ {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .fmt_type = RKISP1_FMT_YUV,
+ .uv_swap = 0,
+ .write_format = RKISP1_MI_CTRL_MP_WRITE_YUVINT,
+ },
+ /* raw */
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ .fmt_type = RKISP1_FMT_BAYER,
+ .write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .fmt_type = RKISP1_FMT_BAYER,
+ .write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .fmt_type = RKISP1_FMT_BAYER,
+ .write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .fmt_type = RKISP1_FMT_BAYER,
+ .write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB10,
+ .fmt_type = RKISP1_FMT_BAYER,
+ .write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ .fmt_type = RKISP1_FMT_BAYER,
+ .write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG10,
+ .fmt_type = RKISP1_FMT_BAYER,
+ .write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .fmt_type = RKISP1_FMT_BAYER,
+ .write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB12,
+ .fmt_type = RKISP1_FMT_BAYER,
+ .write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG12,
+ .fmt_type = RKISP1_FMT_BAYER,
+ .write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG12,
+ .fmt_type = RKISP1_FMT_BAYER,
+ .write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR12,
+ .fmt_type = RKISP1_FMT_BAYER,
+ .write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
+ },
+};
+
+static const struct rkisp1_capture_fmt_cfg rkisp1_sp_fmts[] = {
+ /* yuv422 */
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .fmt_type = RKISP1_FMT_YUV,
+ .uv_swap = 0,
+ .write_format = RKISP1_MI_CTRL_SP_WRITE_INT,
+ .output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .fmt_type = RKISP1_FMT_YUV,
+ .uv_swap = 1,
+ .write_format = RKISP1_MI_CTRL_SP_WRITE_INT,
+ .output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
+ }, {
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .fmt_type = RKISP1_FMT_YUV,
+ .uv_swap = 1,
+ .write_format = RKISP1_MI_CTRL_SP_WRITE_INT,
+ .output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ .fmt_type = RKISP1_FMT_YUV,
+ .uv_swap = 0,
+ .write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
+ .output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .fmt_type = RKISP1_FMT_YUV,
+ .uv_swap = 0,
+ .write_format = RKISP1_MI_CTRL_SP_WRITE_SPLA,
+ .output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV61,
+ .fmt_type = RKISP1_FMT_YUV,
+ .uv_swap = 1,
+ .write_format = RKISP1_MI_CTRL_SP_WRITE_SPLA,
+ .output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YVU422M,
+ .fmt_type = RKISP1_FMT_YUV,
+ .uv_swap = 1,
+ .write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
+ .output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
+ },
+ /* yuv420 */
+ {
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .fmt_type = RKISP1_FMT_YUV,
+ .uv_swap = 1,
+ .write_format = RKISP1_MI_CTRL_SP_WRITE_SPLA,
+ .output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV420,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .fmt_type = RKISP1_FMT_YUV,
+ .uv_swap = 0,
+ .write_format = RKISP1_MI_CTRL_SP_WRITE_SPLA,
+ .output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV420,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV21M,
+ .fmt_type = RKISP1_FMT_YUV,
+ .uv_swap = 1,
+ .write_format = RKISP1_MI_CTRL_SP_WRITE_SPLA,
+ .output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV420,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .fmt_type = RKISP1_FMT_YUV,
+ .uv_swap = 0,
+ .write_format = RKISP1_MI_CTRL_SP_WRITE_SPLA,
+ .output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV420,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .fmt_type = RKISP1_FMT_YUV,
+ .uv_swap = 0,
+ .write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
+ .output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV420,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YVU420,
+ .fmt_type = RKISP1_FMT_YUV,
+ .uv_swap = 1,
+ .write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
+ .output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV420,
+ },
+ /* yuv444 */
+ {
+ .fourcc = V4L2_PIX_FMT_YUV444M,
+ .fmt_type = RKISP1_FMT_YUV,
+ .uv_swap = 0,
+ .write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
+ .output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV444,
+ },
+ /* yuv400 */
+ {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .fmt_type = RKISP1_FMT_YUV,
+ .uv_swap = 0,
+ .write_format = RKISP1_MI_CTRL_SP_WRITE_INT,
+ .output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV400,
+ },
+ /* rgb */
+ {
+ .fourcc = V4L2_PIX_FMT_RGB24,
+ .fmt_type = RKISP1_FMT_RGB,
+ .write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
+ .output_format = RKISP1_MI_CTRL_SP_OUTPUT_RGB888,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .fmt_type = RKISP1_FMT_RGB,
+ .write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
+ .output_format = RKISP1_MI_CTRL_SP_OUTPUT_RGB565,
+ }, {
+ .fourcc = V4L2_PIX_FMT_BGR666,
+ .fmt_type = RKISP1_FMT_RGB,
+ .write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
+ .output_format = RKISP1_MI_CTRL_SP_OUTPUT_RGB666,
+ },
+};
+
+static const struct rkisp1_capture_config rkisp1_capture_config_mp = {
+ .fmts = rkisp1_mp_fmts,
+ .fmt_size = ARRAY_SIZE(rkisp1_mp_fmts),
+ .mi = {
+ .y_size_init = RKISP1_CIF_MI_MP_Y_SIZE_INIT,
+ .cb_size_init = RKISP1_CIF_MI_MP_CB_SIZE_INIT,
+ .cr_size_init = RKISP1_CIF_MI_MP_CR_SIZE_INIT,
+ .y_base_ad_init = RKISP1_CIF_MI_MP_Y_BASE_AD_INIT,
+ .cb_base_ad_init = RKISP1_CIF_MI_MP_CB_BASE_AD_INIT,
+ .cr_base_ad_init = RKISP1_CIF_MI_MP_CR_BASE_AD_INIT,
+ .y_offs_cnt_init = RKISP1_CIF_MI_MP_Y_OFFS_CNT_INIT,
+ .cb_offs_cnt_init = RKISP1_CIF_MI_MP_CB_OFFS_CNT_INIT,
+ .cr_offs_cnt_init = RKISP1_CIF_MI_MP_CR_OFFS_CNT_INIT,
+ },
+};
+
+static const struct rkisp1_capture_config rkisp1_capture_config_sp = {
+ .fmts = rkisp1_sp_fmts,
+ .fmt_size = ARRAY_SIZE(rkisp1_sp_fmts),
+ .mi = {
+ .y_size_init = RKISP1_CIF_MI_SP_Y_SIZE_INIT,
+ .cb_size_init = RKISP1_CIF_MI_SP_CB_SIZE_INIT,
+ .cr_size_init = RKISP1_CIF_MI_SP_CR_SIZE_INIT,
+ .y_base_ad_init = RKISP1_CIF_MI_SP_Y_BASE_AD_INIT,
+ .cb_base_ad_init = RKISP1_CIF_MI_SP_CB_BASE_AD_INIT,
+ .cr_base_ad_init = RKISP1_CIF_MI_SP_CR_BASE_AD_INIT,
+ .y_offs_cnt_init = RKISP1_CIF_MI_SP_Y_OFFS_CNT_INIT,
+ .cb_offs_cnt_init = RKISP1_CIF_MI_SP_CB_OFFS_CNT_INIT,
+ .cr_offs_cnt_init = RKISP1_CIF_MI_SP_CR_OFFS_CNT_INIT,
+ },
+};
+
+static inline struct rkisp1_vdev_node *
+rkisp1_vdev_to_node(struct video_device *vdev)
+{
+ return container_of(vdev, struct rkisp1_vdev_node, vdev);
+}
+
+/* ----------------------------------------------------------------------------
+ * Stream operations for self-picture path (sp) and main-picture path (mp)
+ */
+
+static void rkisp1_mi_config_ctrl(struct rkisp1_capture *cap)
+{
+ u32 mi_ctrl = rkisp1_read(cap->rkisp1, RKISP1_CIF_MI_CTRL);
+
+ mi_ctrl &= ~GENMASK(17, 16);
+ mi_ctrl |= RKISP1_CIF_MI_CTRL_BURST_LEN_LUM_64;
+
+ mi_ctrl &= ~GENMASK(19, 18);
+ mi_ctrl |= RKISP1_CIF_MI_CTRL_BURST_LEN_CHROM_64;
+
+ mi_ctrl |= RKISP1_CIF_MI_CTRL_INIT_BASE_EN |
+ RKISP1_CIF_MI_CTRL_INIT_OFFSET_EN;
+
+ rkisp1_write(cap->rkisp1, mi_ctrl, RKISP1_CIF_MI_CTRL);
+}
+
+static u32 rkisp1_pixfmt_comp_size(const struct v4l2_pix_format_mplane *pixm,
+ unsigned int component)
+{
+ /*
+ * If packed format, then plane_fmt[0].sizeimage is the sum of all
+ * components, so we need to calculate just the size of Y component.
+ * See rkisp1_fill_pixfmt().
+ */
+ if (!component && pixm->num_planes == 1)
+ return pixm->plane_fmt[0].bytesperline * pixm->height;
+ return pixm->plane_fmt[component].sizeimage;
+}
+
+static void rkisp1_irq_frame_end_enable(struct rkisp1_capture *cap)
+{
+ u32 mi_imsc = rkisp1_read(cap->rkisp1, RKISP1_CIF_MI_IMSC);
+
+ mi_imsc |= RKISP1_CIF_MI_FRAME(cap);
+ rkisp1_write(cap->rkisp1, mi_imsc, RKISP1_CIF_MI_IMSC);
+}
+
+static void rkisp1_mp_config(struct rkisp1_capture *cap)
+{
+ const struct v4l2_pix_format_mplane *pixm = &cap->pix.fmt;
+ struct rkisp1_device *rkisp1 = cap->rkisp1;
+ u32 reg;
+
+ rkisp1_write(rkisp1, rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_Y),
+ cap->config->mi.y_size_init);
+ rkisp1_write(rkisp1, rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_CB),
+ cap->config->mi.cb_size_init);
+ rkisp1_write(rkisp1, rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_CR),
+ cap->config->mi.cr_size_init);
+
+ rkisp1_irq_frame_end_enable(cap);
+ if (cap->pix.cfg->uv_swap) {
+ reg = rkisp1_read(rkisp1, RKISP1_CIF_MI_XTD_FORMAT_CTRL);
+
+ reg = (reg & ~BIT(0)) |
+ RKISP1_CIF_MI_XTD_FMT_CTRL_MP_CB_CR_SWAP;
+ rkisp1_write(rkisp1, reg, RKISP1_CIF_MI_XTD_FORMAT_CTRL);
+ }
+
+ rkisp1_mi_config_ctrl(cap);
+
+ reg = rkisp1_read(rkisp1, RKISP1_CIF_MI_CTRL);
+ reg &= ~RKISP1_MI_CTRL_MP_FMT_MASK;
+ reg |= cap->pix.cfg->write_format;
+ rkisp1_write(rkisp1, reg, RKISP1_CIF_MI_CTRL);
+
+ reg = rkisp1_read(rkisp1, RKISP1_CIF_MI_CTRL);
+ reg |= RKISP1_CIF_MI_MP_AUTOUPDATE_ENABLE;
+ rkisp1_write(rkisp1, reg, RKISP1_CIF_MI_CTRL);
+}
+
+static void rkisp1_sp_config(struct rkisp1_capture *cap)
+{
+ const struct v4l2_pix_format_mplane *pixm = &cap->pix.fmt;
+ struct rkisp1_device *rkisp1 = cap->rkisp1;
+ u32 mi_ctrl;
+
+ rkisp1_write(rkisp1, rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_Y),
+ cap->config->mi.y_size_init);
+ rkisp1_write(rkisp1, rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_CB),
+ cap->config->mi.cb_size_init);
+ rkisp1_write(rkisp1, rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_CR),
+ cap->config->mi.cr_size_init);
+
+ rkisp1_write(rkisp1, pixm->width, RKISP1_CIF_MI_SP_Y_PIC_WIDTH);
+ rkisp1_write(rkisp1, pixm->height, RKISP1_CIF_MI_SP_Y_PIC_HEIGHT);
+ rkisp1_write(rkisp1, cap->sp_y_stride, RKISP1_CIF_MI_SP_Y_LLENGTH);
+
+ rkisp1_irq_frame_end_enable(cap);
+ if (cap->pix.cfg->uv_swap) {
+ u32 reg = rkisp1_read(rkisp1, RKISP1_CIF_MI_XTD_FORMAT_CTRL);
+
+ rkisp1_write(rkisp1, reg & ~BIT(1),
+ RKISP1_CIF_MI_XTD_FORMAT_CTRL);
+ }
+
+ rkisp1_mi_config_ctrl(cap);
+
+ mi_ctrl = rkisp1_read(rkisp1, RKISP1_CIF_MI_CTRL);
+ mi_ctrl &= ~RKISP1_MI_CTRL_SP_FMT_MASK;
+ mi_ctrl |= cap->pix.cfg->write_format |
+ RKISP1_MI_CTRL_SP_INPUT_YUV422 |
+ cap->pix.cfg->output_format |
+ RKISP1_CIF_MI_SP_AUTOUPDATE_ENABLE;
+ rkisp1_write(rkisp1, mi_ctrl, RKISP1_CIF_MI_CTRL);
+}
+
+static void rkisp1_mp_disable(struct rkisp1_capture *cap)
+{
+ u32 mi_ctrl = rkisp1_read(cap->rkisp1, RKISP1_CIF_MI_CTRL);
+
+ mi_ctrl &= ~(RKISP1_CIF_MI_CTRL_MP_ENABLE |
+ RKISP1_CIF_MI_CTRL_RAW_ENABLE);
+ rkisp1_write(cap->rkisp1, mi_ctrl, RKISP1_CIF_MI_CTRL);
+}
+
+static void rkisp1_sp_disable(struct rkisp1_capture *cap)
+{
+ u32 mi_ctrl = rkisp1_read(cap->rkisp1, RKISP1_CIF_MI_CTRL);
+
+ mi_ctrl &= ~RKISP1_CIF_MI_CTRL_SP_ENABLE;
+ rkisp1_write(cap->rkisp1, mi_ctrl, RKISP1_CIF_MI_CTRL);
+}
+
+static void rkisp1_mp_enable(struct rkisp1_capture *cap)
+{
+ const struct rkisp1_capture_fmt_cfg *isp_fmt = cap->pix.cfg;
+ u32 mi_ctrl;
+
+ rkisp1_mp_disable(cap);
+
+ mi_ctrl = rkisp1_read(cap->rkisp1, RKISP1_CIF_MI_CTRL);
+ if (isp_fmt->fmt_type == RKISP1_FMT_BAYER)
+ mi_ctrl |= RKISP1_CIF_MI_CTRL_RAW_ENABLE;
+ /* YUV */
+ else
+ mi_ctrl |= RKISP1_CIF_MI_CTRL_MP_ENABLE;
+
+ rkisp1_write(cap->rkisp1, mi_ctrl, RKISP1_CIF_MI_CTRL);
+}
+
+static void rkisp1_sp_enable(struct rkisp1_capture *cap)
+{
+ u32 mi_ctrl = rkisp1_read(cap->rkisp1, RKISP1_CIF_MI_CTRL);
+
+ mi_ctrl |= RKISP1_CIF_MI_CTRL_SP_ENABLE;
+ rkisp1_write(cap->rkisp1, mi_ctrl, RKISP1_CIF_MI_CTRL);
+}
+
+static void rkisp1_mp_sp_stop(struct rkisp1_capture *cap)
+{
+ if (!cap->is_streaming)
+ return;
+ rkisp1_write(cap->rkisp1,
+ RKISP1_CIF_MI_FRAME(cap), RKISP1_CIF_MI_ICR);
+ cap->ops->disable(cap);
+}
+
+static bool rkisp1_mp_is_stopped(struct rkisp1_capture *cap)
+{
+ u32 en = RKISP1_CIF_MI_CTRL_SHD_MP_IN_ENABLED |
+ RKISP1_CIF_MI_CTRL_SHD_RAW_OUT_ENABLED;
+
+ return !(rkisp1_read(cap->rkisp1, RKISP1_CIF_MI_CTRL_SHD) & en);
+}
+
+static bool rkisp1_sp_is_stopped(struct rkisp1_capture *cap)
+{
+ return !(rkisp1_read(cap->rkisp1, RKISP1_CIF_MI_CTRL_SHD) &
+ RKISP1_CIF_MI_CTRL_SHD_SP_IN_ENABLED);
+}
+
+static void rkisp1_mp_set_data_path(struct rkisp1_capture *cap)
+{
+ u32 dpcl = rkisp1_read(cap->rkisp1, RKISP1_CIF_VI_DPCL);
+
+ dpcl = dpcl | RKISP1_CIF_VI_DPCL_CHAN_MODE_MP |
+ RKISP1_CIF_VI_DPCL_MP_MUX_MRSZ_MI;
+ rkisp1_write(cap->rkisp1, dpcl, RKISP1_CIF_VI_DPCL);
+}
+
+static void rkisp1_sp_set_data_path(struct rkisp1_capture *cap)
+{
+ u32 dpcl = rkisp1_read(cap->rkisp1, RKISP1_CIF_VI_DPCL);
+
+ dpcl |= RKISP1_CIF_VI_DPCL_CHAN_MODE_SP;
+ rkisp1_write(cap->rkisp1, dpcl, RKISP1_CIF_VI_DPCL);
+}
+
+static struct rkisp1_capture_ops rkisp1_capture_ops_mp = {
+ .config = rkisp1_mp_config,
+ .enable = rkisp1_mp_enable,
+ .disable = rkisp1_mp_disable,
+ .stop = rkisp1_mp_sp_stop,
+ .set_data_path = rkisp1_mp_set_data_path,
+ .is_stopped = rkisp1_mp_is_stopped,
+};
+
+static struct rkisp1_capture_ops rkisp1_capture_ops_sp = {
+ .config = rkisp1_sp_config,
+ .enable = rkisp1_sp_enable,
+ .disable = rkisp1_sp_disable,
+ .stop = rkisp1_mp_sp_stop,
+ .set_data_path = rkisp1_sp_set_data_path,
+ .is_stopped = rkisp1_sp_is_stopped,
+};
+
+/* ----------------------------------------------------------------------------
+ * Frame buffer operations
+ */
+
+static int rkisp1_dummy_buf_create(struct rkisp1_capture *cap)
+{
+ const struct v4l2_pix_format_mplane *pixm = &cap->pix.fmt;
+ struct rkisp1_dummy_buffer *dummy_buf = &cap->buf.dummy;
+
+ dummy_buf->size = max3(rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_Y),
+ rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_CB),
+ rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_CR));
+
+ /* The driver never access vaddr, no mapping is required */
+ dummy_buf->vaddr = dma_alloc_attrs(cap->rkisp1->dev,
+ dummy_buf->size,
+ &dummy_buf->dma_addr,
+ GFP_KERNEL,
+ DMA_ATTR_NO_KERNEL_MAPPING);
+ if (!dummy_buf->vaddr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void rkisp1_dummy_buf_destroy(struct rkisp1_capture *cap)
+{
+ dma_free_attrs(cap->rkisp1->dev,
+ cap->buf.dummy.size, cap->buf.dummy.vaddr,
+ cap->buf.dummy.dma_addr, DMA_ATTR_NO_KERNEL_MAPPING);
+}
+
+static void rkisp1_set_next_buf(struct rkisp1_capture *cap)
+{
+ /*
+ * Use the dummy space allocated by dma_alloc_coherent to
+ * throw data if there is no available buffer.
+ */
+ if (cap->buf.next) {
+ u32 *buff_addr = cap->buf.next->buff_addr;
+
+ rkisp1_write(cap->rkisp1,
+ buff_addr[RKISP1_PLANE_Y],
+ cap->config->mi.y_base_ad_init);
+ rkisp1_write(cap->rkisp1,
+ buff_addr[RKISP1_PLANE_CB],
+ cap->config->mi.cb_base_ad_init);
+ rkisp1_write(cap->rkisp1,
+ buff_addr[RKISP1_PLANE_CR],
+ cap->config->mi.cr_base_ad_init);
+ } else {
+ rkisp1_write(cap->rkisp1,
+ cap->buf.dummy.dma_addr,
+ cap->config->mi.y_base_ad_init);
+ rkisp1_write(cap->rkisp1,
+ cap->buf.dummy.dma_addr,
+ cap->config->mi.cb_base_ad_init);
+ rkisp1_write(cap->rkisp1,
+ cap->buf.dummy.dma_addr,
+ cap->config->mi.cr_base_ad_init);
+ }
+
+ /* Set plane offsets */
+ rkisp1_write(cap->rkisp1, 0, cap->config->mi.y_offs_cnt_init);
+ rkisp1_write(cap->rkisp1, 0, cap->config->mi.cb_offs_cnt_init);
+ rkisp1_write(cap->rkisp1, 0, cap->config->mi.cr_offs_cnt_init);
+}
+
+/*
+ * This function is called when a frame end comes. The next frame
+ * is processing and we should set up buffer for next-next frame,
+ * otherwise it will overflow.
+ */
+static void rkisp1_handle_buffer(struct rkisp1_capture *cap)
+{
+ struct rkisp1_isp *isp = &cap->rkisp1->isp;
+ struct rkisp1_buffer *curr_buf = cap->buf.curr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cap->buf.lock, flags);
+
+ if (curr_buf) {
+ curr_buf->vb.sequence = atomic_read(&isp->frame_sequence);
+ curr_buf->vb.vb2_buf.timestamp = ktime_get_boottime_ns();
+ curr_buf->vb.field = V4L2_FIELD_NONE;
+ vb2_buffer_done(&curr_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+ } else {
+ cap->rkisp1->debug.frame_drop[cap->id]++;
+ }
+
+ cap->buf.curr = cap->buf.next;
+ cap->buf.next = NULL;
+
+ if (!list_empty(&cap->buf.queue)) {
+ cap->buf.next = list_first_entry(&cap->buf.queue,
+ struct rkisp1_buffer,
+ queue);
+ list_del(&cap->buf.next->queue);
+ }
+ spin_unlock_irqrestore(&cap->buf.lock, flags);
+
+ rkisp1_set_next_buf(cap);
+}
+
+void rkisp1_capture_isr(struct rkisp1_device *rkisp1)
+{
+ unsigned int i;
+ u32 status;
+
+ status = rkisp1_read(rkisp1, RKISP1_CIF_MI_MIS);
+ rkisp1_write(rkisp1, status, RKISP1_CIF_MI_ICR);
+
+ for (i = 0; i < ARRAY_SIZE(rkisp1->capture_devs); ++i) {
+ struct rkisp1_capture *cap = &rkisp1->capture_devs[i];
+
+ if (!(status & RKISP1_CIF_MI_FRAME(cap)))
+ continue;
+ if (!cap->is_stopping) {
+ rkisp1_handle_buffer(cap);
+ continue;
+ }
+ /*
+ * Make sure stream is actually stopped, whose state
+ * can be read from the shadow register, before
+ * wake_up() thread which would immediately free all
+ * frame buffers. stop() takes effect at the next
+ * frame end that sync the configurations to shadow
+ * regs.
+ */
+ if (!cap->ops->is_stopped(cap)) {
+ cap->ops->stop(cap);
+ continue;
+ }
+ cap->is_stopping = false;
+ cap->is_streaming = false;
+ wake_up(&cap->done);
+ }
+}
+
+/* ----------------------------------------------------------------------------
+ * Vb2 operations
+ */
+
+static int rkisp1_vb2_queue_setup(struct vb2_queue *queue,
+ unsigned int *num_buffers,
+ unsigned int *num_planes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct rkisp1_capture *cap = queue->drv_priv;
+ const struct v4l2_pix_format_mplane *pixm = &cap->pix.fmt;
+ unsigned int i;
+
+ if (*num_planes) {
+ if (*num_planes != pixm->num_planes)
+ return -EINVAL;
+
+ for (i = 0; i < pixm->num_planes; i++)
+ if (sizes[i] < pixm->plane_fmt[i].sizeimage)
+ return -EINVAL;
+ } else {
+ *num_planes = pixm->num_planes;
+ for (i = 0; i < pixm->num_planes; i++)
+ sizes[i] = pixm->plane_fmt[i].sizeimage;
+ }
+
+ return 0;
+}
+
+static void rkisp1_vb2_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct rkisp1_buffer *ispbuf =
+ container_of(vbuf, struct rkisp1_buffer, vb);
+ struct rkisp1_capture *cap = vb->vb2_queue->drv_priv;
+ const struct v4l2_pix_format_mplane *pixm = &cap->pix.fmt;
+ unsigned long flags;
+ unsigned int i;
+
+ memset(ispbuf->buff_addr, 0, sizeof(ispbuf->buff_addr));
+ for (i = 0; i < pixm->num_planes; i++)
+ ispbuf->buff_addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
+
+ /* Convert to non-MPLANE */
+ if (pixm->num_planes == 1) {
+ ispbuf->buff_addr[RKISP1_PLANE_CB] =
+ ispbuf->buff_addr[RKISP1_PLANE_Y] +
+ rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_Y);
+ ispbuf->buff_addr[RKISP1_PLANE_CR] =
+ ispbuf->buff_addr[RKISP1_PLANE_CB] +
+ rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_CB);
+ }
+
+ spin_lock_irqsave(&cap->buf.lock, flags);
+
+ /*
+ * If there's no next buffer assigned, queue this buffer directly
+ * as the next buffer, and update the memory interface.
+ */
+ if (cap->is_streaming && !cap->buf.next &&
+ atomic_read(&cap->rkisp1->isp.frame_sequence) == -1) {
+ cap->buf.next = ispbuf;
+ rkisp1_set_next_buf(cap);
+ } else {
+ list_add_tail(&ispbuf->queue, &cap->buf.queue);
+ }
+ spin_unlock_irqrestore(&cap->buf.lock, flags);
+}
+
+static int rkisp1_vb2_buf_prepare(struct vb2_buffer *vb)
+{
+ struct rkisp1_capture *cap = vb->vb2_queue->drv_priv;
+ unsigned int i;
+
+ for (i = 0; i < cap->pix.fmt.num_planes; i++) {
+ unsigned long size = cap->pix.fmt.plane_fmt[i].sizeimage;
+
+ if (vb2_plane_size(vb, i) < size) {
+ dev_err(cap->rkisp1->dev,
+ "User buffer too small (%ld < %ld)\n",
+ vb2_plane_size(vb, i), size);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb, i, size);
+ }
+
+ return 0;
+}
+
+static void rkisp1_return_all_buffers(struct rkisp1_capture *cap,
+ enum vb2_buffer_state state)
+{
+ unsigned long flags;
+ struct rkisp1_buffer *buf;
+
+ spin_lock_irqsave(&cap->buf.lock, flags);
+ if (cap->buf.curr) {
+ vb2_buffer_done(&cap->buf.curr->vb.vb2_buf, state);
+ cap->buf.curr = NULL;
+ }
+ if (cap->buf.next) {
+ vb2_buffer_done(&cap->buf.next->vb.vb2_buf, state);
+ cap->buf.next = NULL;
+ }
+ while (!list_empty(&cap->buf.queue)) {
+ buf = list_first_entry(&cap->buf.queue,
+ struct rkisp1_buffer, queue);
+ list_del(&buf->queue);
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ }
+ spin_unlock_irqrestore(&cap->buf.lock, flags);
+}
+
+/*
+ * rkisp1_pipeline_sink_walk - Walk through the pipeline and call cb
+ * @from: entity at which to start pipeline walk
+ * @until: entity at which to stop pipeline walk
+ *
+ * Walk the entities chain starting at the pipeline video node and stop
+ * all subdevices in the chain.
+ *
+ * If the until argument isn't NULL, stop the pipeline walk when reaching the
+ * until entity. This is used to disable a partially started pipeline due to a
+ * subdev start error.
+ */
+static int rkisp1_pipeline_sink_walk(struct media_entity *from,
+ struct media_entity *until,
+ int (*cb)(struct media_entity *from,
+ struct media_entity *curr))
+{
+ struct media_entity *entity = from;
+ struct media_pad *pad;
+ unsigned int i;
+ int ret;
+
+ while (1) {
+ pad = NULL;
+ /* Find remote source pad */
+ for (i = 0; i < entity->num_pads; i++) {
+ struct media_pad *spad = &entity->pads[i];
+
+ if (!(spad->flags & MEDIA_PAD_FL_SINK))
+ continue;
+ pad = media_entity_remote_pad(spad);
+ if (pad && is_media_entity_v4l2_subdev(pad->entity))
+ break;
+ }
+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+ break;
+
+ entity = pad->entity;
+ if (entity == until)
+ break;
+
+ ret = cb(from, entity);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rkisp1_pipeline_disable_cb(struct media_entity *from,
+ struct media_entity *curr)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(curr);
+
+ return v4l2_subdev_call(sd, video, s_stream, false);
+}
+
+static int rkisp1_pipeline_enable_cb(struct media_entity *from,
+ struct media_entity *curr)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(curr);
+
+ return v4l2_subdev_call(sd, video, s_stream, true);
+}
+
+static void rkisp1_stream_stop(struct rkisp1_capture *cap)
+{
+ int ret;
+
+ /* Stream should stop in interrupt. If it dosn't, stop it by force. */
+ cap->is_stopping = true;
+ ret = wait_event_timeout(cap->done,
+ !cap->is_streaming,
+ msecs_to_jiffies(1000));
+ if (!ret) {
+ cap->rkisp1->debug.stop_timeout[cap->id]++;
+ cap->ops->stop(cap);
+ cap->is_stopping = false;
+ cap->is_streaming = false;
+ }
+}
+
+static void rkisp1_vb2_stop_streaming(struct vb2_queue *queue)
+{
+ struct rkisp1_capture *cap = queue->drv_priv;
+ struct rkisp1_vdev_node *node = &cap->vnode;
+ struct rkisp1_device *rkisp1 = cap->rkisp1;
+ int ret;
+
+ rkisp1_stream_stop(cap);
+ media_pipeline_stop(&node->vdev.entity);
+ ret = rkisp1_pipeline_sink_walk(&node->vdev.entity, NULL,
+ rkisp1_pipeline_disable_cb);
+ if (ret)
+ dev_err(rkisp1->dev,
+ "pipeline stream-off failed error:%d\n", ret);
+
+ rkisp1_return_all_buffers(cap, VB2_BUF_STATE_ERROR);
+
+ ret = v4l2_pipeline_pm_use(&node->vdev.entity, 0);
+ if (ret)
+ dev_err(rkisp1->dev, "pipeline close failed error:%d\n", ret);
+
+ ret = pm_runtime_put(rkisp1->dev);
+ if (ret)
+ dev_err(rkisp1->dev, "power down failed error:%d\n", ret);
+
+ rkisp1_dummy_buf_destroy(cap);
+}
+
+/*
+ * Most of registers inside rockchip ISP1 have shadow register since
+ * they must be not be changed during processing a frame.
+ * Usually, each sub-module updates its shadow register after
+ * processing the last pixel of a frame.
+ */
+static void rkisp1_stream_start(struct rkisp1_capture *cap)
+{
+ struct rkisp1_device *rkisp1 = cap->rkisp1;
+ struct rkisp1_capture *other = &rkisp1->capture_devs[cap->id ^ 1];
+
+ cap->ops->set_data_path(cap);
+ cap->ops->config(cap);
+
+ /* Setup a buffer for the next frame */
+ rkisp1_handle_buffer(cap);
+ cap->ops->enable(cap);
+ /* It's safe to config ACTIVE and SHADOW regs for the
+ * first stream. While when the second is starting, do NOT
+ * force update because it also update the first one.
+ *
+ * The latter case would drop one more buf(that is 2) since
+ * there's not buf in shadow when the second FE received. This's
+ * also required because the second FE maybe corrupt especially
+ * when run at 120fps.
+ */
+ if (!other->is_streaming) {
+ /* force cfg update */
+ rkisp1_write(rkisp1,
+ RKISP1_CIF_MI_INIT_SOFT_UPD, RKISP1_CIF_MI_INIT);
+ rkisp1_handle_buffer(cap);
+ }
+ cap->is_streaming = true;
+}
+
+static int
+rkisp1_vb2_start_streaming(struct vb2_queue *queue, unsigned int count)
+{
+ struct rkisp1_capture *cap = queue->drv_priv;
+ struct media_entity *entity = &cap->vnode.vdev.entity;
+ int ret;
+
+ ret = rkisp1_dummy_buf_create(cap);
+ if (ret)
+ goto err_ret_buffers;
+
+ ret = pm_runtime_get_sync(cap->rkisp1->dev);
+ if (ret) {
+ dev_err(cap->rkisp1->dev, "power up failed %d\n", ret);
+ goto err_destroy_dummy;
+ }
+ ret = v4l2_pipeline_pm_use(entity, 1);
+ if (ret) {
+ dev_err(cap->rkisp1->dev, "open cif pipeline failed %d\n", ret);
+ goto err_pipe_pm_put;
+ }
+
+ rkisp1_stream_start(cap);
+
+ /* start sub-devices */
+ ret = rkisp1_pipeline_sink_walk(entity, NULL,
+ rkisp1_pipeline_enable_cb);
+ if (ret)
+ goto err_stop_stream;
+
+ ret = media_pipeline_start(entity, &cap->rkisp1->pipe);
+ if (ret) {
+ dev_err(cap->rkisp1->dev, "start pipeline failed %d\n", ret);
+ goto err_pipe_disable;
+ }
+
+ return 0;
+
+err_pipe_disable:
+ rkisp1_pipeline_sink_walk(entity, NULL, rkisp1_pipeline_disable_cb);
+err_stop_stream:
+ rkisp1_stream_stop(cap);
+ v4l2_pipeline_pm_use(entity, 0);
+err_pipe_pm_put:
+ pm_runtime_put(cap->rkisp1->dev);
+err_destroy_dummy:
+ rkisp1_dummy_buf_destroy(cap);
+err_ret_buffers:
+ rkisp1_return_all_buffers(cap, VB2_BUF_STATE_QUEUED);
+
+ return ret;
+}
+
+static struct vb2_ops rkisp1_vb2_ops = {
+ .queue_setup = rkisp1_vb2_queue_setup,
+ .buf_queue = rkisp1_vb2_buf_queue,
+ .buf_prepare = rkisp1_vb2_buf_prepare,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .stop_streaming = rkisp1_vb2_stop_streaming,
+ .start_streaming = rkisp1_vb2_start_streaming,
+};
+
+/* ----------------------------------------------------------------------------
+ * IOCTLs operations
+ */
+
+static const struct v4l2_format_info *
+rkisp1_fill_pixfmt(struct v4l2_pix_format_mplane *pixm,
+ enum rkisp1_stream_id id)
+{
+ struct v4l2_plane_pix_format *plane_y = &pixm->plane_fmt[0];
+ const struct v4l2_format_info *info;
+ unsigned int i;
+ u32 stride;
+
+ info = v4l2_format_info(pixm->pixelformat);
+ pixm->num_planes = info->mem_planes;
+ stride = info->bpp[0] * pixm->width;
+ /* Self path supports custom stride but Main path doesn't */
+ if (id == RKISP1_MAINPATH || plane_y->bytesperline < stride)
+ plane_y->bytesperline = stride;
+ plane_y->sizeimage = plane_y->bytesperline * pixm->height;
+
+ /* normalize stride to pixels per line */
+ stride = DIV_ROUND_UP(plane_y->bytesperline, info->bpp[0]);
+
+ for (i = 1; i < info->comp_planes; i++) {
+ struct v4l2_plane_pix_format *plane = &pixm->plane_fmt[i];
+
+ /* bytesperline for other components derive from Y component */
+ plane->bytesperline = DIV_ROUND_UP(stride, info->hdiv) *
+ info->bpp[i];
+ plane->sizeimage = plane->bytesperline *
+ DIV_ROUND_UP(pixm->height, info->vdiv);
+ }
+
+ /*
+ * If pixfmt is packed, then plane_fmt[0] should contain the total size
+ * considering all components. plane_fmt[i] for i > 0 should be ignored
+ * by userspace as mem_planes == 1, but we are keeping information there
+ * for convenience.
+ */
+ if (info->mem_planes == 1)
+ for (i = 1; i < info->comp_planes; i++)
+ plane_y->sizeimage += pixm->plane_fmt[i].sizeimage;
+
+ return info;
+}
+
+static const struct rkisp1_capture_fmt_cfg *
+rkisp1_find_fmt_cfg(const struct rkisp1_capture *cap, const u32 pixelfmt)
+{
+ unsigned int i;
+
+ for (i = 0; i < cap->config->fmt_size; i++) {
+ if (cap->config->fmts[i].fourcc == pixelfmt)
+ return &cap->config->fmts[i];
+ }
+ return NULL;
+}
+
+static void rkisp1_try_fmt(const struct rkisp1_capture *cap,
+ struct v4l2_pix_format_mplane *pixm,
+ const struct rkisp1_capture_fmt_cfg **fmt_cfg,
+ const struct v4l2_format_info **fmt_info)
+{
+ const struct rkisp1_capture_config *config = cap->config;
+ struct rkisp1_capture *other_cap =
+ &cap->rkisp1->capture_devs[cap->id ^ 1];
+ const struct rkisp1_capture_fmt_cfg *fmt;
+ const struct v4l2_format_info *info;
+ const unsigned int max_widths[] = { RKISP1_RSZ_MP_SRC_MAX_WIDTH,
+ RKISP1_RSZ_SP_SRC_MAX_WIDTH };
+ const unsigned int max_heights[] = { RKISP1_RSZ_MP_SRC_MAX_HEIGHT,
+ RKISP1_RSZ_SP_SRC_MAX_HEIGHT};
+
+ fmt = rkisp1_find_fmt_cfg(cap, pixm->pixelformat);
+ if (!fmt) {
+ fmt = config->fmts;
+ pixm->pixelformat = fmt->fourcc;
+ }
+
+ pixm->width = clamp_t(u32, pixm->width,
+ RKISP1_RSZ_SRC_MIN_WIDTH, max_widths[cap->id]);
+ pixm->height = clamp_t(u32, pixm->height,
+ RKISP1_RSZ_SRC_MIN_HEIGHT, max_heights[cap->id]);
+
+ pixm->field = V4L2_FIELD_NONE;
+ pixm->colorspace = V4L2_COLORSPACE_DEFAULT;
+ pixm->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+
+ info = rkisp1_fill_pixfmt(pixm, cap->id);
+
+ /* can not change quantization when stream-on */
+ if (other_cap->is_streaming)
+ pixm->quantization = other_cap->pix.fmt.quantization;
+ /* output full range by default, take effect in params */
+ else if (!pixm->quantization ||
+ pixm->quantization > V4L2_QUANTIZATION_LIM_RANGE)
+ pixm->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+
+ if (fmt_cfg)
+ *fmt_cfg = fmt;
+ if (fmt_info)
+ *fmt_info = info;
+}
+
+static void rkisp1_set_fmt(struct rkisp1_capture *cap,
+ struct v4l2_pix_format_mplane *pixm)
+{
+ rkisp1_try_fmt(cap, pixm, &cap->pix.cfg, &cap->pix.info);
+ cap->pix.fmt = *pixm;
+
+ /* SP supports custom stride in number of pixels of the Y plane */
+ if (cap->id == RKISP1_SELFPATH)
+ cap->sp_y_stride = pixm->plane_fmt[0].bytesperline /
+ cap->pix.info->bpp[0];
+}
+
+static int rkisp1_try_fmt_vid_cap_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct rkisp1_capture *cap = video_drvdata(file);
+
+ rkisp1_try_fmt(cap, &f->fmt.pix_mp, NULL, NULL);
+
+ return 0;
+}
+
+static int rkisp1_enum_fmt_vid_cap_mplane(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct rkisp1_capture *cap = video_drvdata(file);
+ const struct rkisp1_capture_fmt_cfg *fmt = NULL;
+
+ if (f->index >= cap->config->fmt_size)
+ return -EINVAL;
+
+ fmt = &cap->config->fmts[f->index];
+ f->pixelformat = fmt->fourcc;
+
+ return 0;
+}
+
+static int rkisp1_s_fmt_vid_cap_mplane(struct file *file,
+ void *priv, struct v4l2_format *f)
+{
+ struct rkisp1_capture *cap = video_drvdata(file);
+ struct rkisp1_vdev_node *node =
+ rkisp1_vdev_to_node(&cap->vnode.vdev);
+
+ if (vb2_is_busy(&node->buf_queue))
+ return -EBUSY;
+
+ rkisp1_set_fmt(cap, &f->fmt.pix_mp);
+
+ return 0;
+}
+
+static int rkisp1_g_fmt_vid_cap_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct rkisp1_capture *cap = video_drvdata(file);
+
+ f->fmt.pix_mp = cap->pix.fmt;
+
+ return 0;
+}
+
+static int
+rkisp1_querycap(struct file *file, void *priv, struct v4l2_capability *cap)
+{
+ struct rkisp1_capture *cap_dev = video_drvdata(file);
+ struct rkisp1_device *rkisp1 = cap_dev->rkisp1;
+
+ strscpy(cap->driver, rkisp1->dev->driver->name, sizeof(cap->driver));
+ strscpy(cap->card, rkisp1->dev->driver->name, sizeof(cap->card));
+ strscpy(cap->bus_info, RKISP1_BUS_INFO, sizeof(cap->bus_info));
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops rkisp1_v4l2_ioctl_ops = {
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_try_fmt_vid_cap_mplane = rkisp1_try_fmt_vid_cap_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = rkisp1_s_fmt_vid_cap_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = rkisp1_g_fmt_vid_cap_mplane,
+ .vidioc_enum_fmt_vid_cap = rkisp1_enum_fmt_vid_cap_mplane,
+ .vidioc_querycap = rkisp1_querycap,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static int rkisp1_capture_link_validate(struct media_link *link)
+{
+ struct video_device *vdev =
+ media_entity_to_video_device(link->sink->entity);
+ struct v4l2_subdev *sd =
+ media_entity_to_v4l2_subdev(link->source->entity);
+ struct rkisp1_capture *cap = video_get_drvdata(vdev);
+ struct rkisp1_isp *isp = &cap->rkisp1->isp;
+ struct v4l2_subdev_format sd_fmt;
+ int ret;
+
+ if (cap->id == RKISP1_SELFPATH &&
+ isp->src_fmt->mbus_code != MEDIA_BUS_FMT_YUYV8_2X8) {
+ dev_err(cap->rkisp1->dev,
+ "selfpath only supports MEDIA_BUS_FMT_YUYV8_2X8\n");
+ return -EPIPE;
+ }
+
+ if (cap->pix.cfg->fmt_type != isp->src_fmt->fmt_type) {
+ dev_err(cap->rkisp1->dev,
+ "format type mismatch in link '%s:%d->%s:%d'\n",
+ link->source->entity->name, link->source->index,
+ link->sink->entity->name, link->sink->index);
+ return -EPIPE;
+ }
+
+ sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ sd_fmt.pad = link->source->index;
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sd_fmt);
+ if (ret)
+ return ret;
+
+ if (sd_fmt.format.height != cap->pix.fmt.height ||
+ sd_fmt.format.width != cap->pix.fmt.width)
+ return -EPIPE;
+
+ return 0;
+}
+
+/* ----------------------------------------------------------------------------
+ * core functions
+ */
+
+static const struct media_entity_operations rkisp1_media_ops = {
+ .link_validate = rkisp1_capture_link_validate,
+};
+
+static const struct v4l2_file_operations rkisp1_fops = {
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .unlocked_ioctl = video_ioctl2,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+};
+
+static void rkisp1_unregister_capture(struct rkisp1_capture *cap)
+{
+ media_entity_cleanup(&cap->vnode.vdev.entity);
+ video_unregister_device(&cap->vnode.vdev);
+}
+
+void rkisp1_capture_devs_unregister(struct rkisp1_device *rkisp1)
+{
+ struct rkisp1_capture *mp = &rkisp1->capture_devs[RKISP1_MAINPATH];
+ struct rkisp1_capture *sp = &rkisp1->capture_devs[RKISP1_SELFPATH];
+
+ rkisp1_unregister_capture(mp);
+ rkisp1_unregister_capture(sp);
+}
+
+static int rkisp1_register_capture(struct rkisp1_capture *cap)
+{
+ const char * const dev_names[] = {RKISP1_MP_DEV_NAME,
+ RKISP1_SP_DEV_NAME};
+ struct v4l2_device *v4l2_dev = &cap->rkisp1->v4l2_dev;
+ struct video_device *vdev = &cap->vnode.vdev;
+ struct rkisp1_vdev_node *node;
+ struct vb2_queue *q;
+ int ret;
+
+ strscpy(vdev->name, dev_names[cap->id], sizeof(vdev->name));
+ node = rkisp1_vdev_to_node(vdev);
+ mutex_init(&node->vlock);
+
+ vdev->ioctl_ops = &rkisp1_v4l2_ioctl_ops;
+ vdev->release = video_device_release_empty;
+ vdev->fops = &rkisp1_fops;
+ vdev->minor = -1;
+ vdev->v4l2_dev = v4l2_dev;
+ vdev->lock = &node->vlock;
+ vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+ V4L2_CAP_STREAMING;
+ vdev->entity.ops = &rkisp1_media_ops;
+ video_set_drvdata(vdev, cap);
+ vdev->vfl_dir = VFL_DIR_RX;
+ node->pad.flags = MEDIA_PAD_FL_SINK;
+
+ q = &node->buf_queue;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_USERPTR;
+ q->drv_priv = cap;
+ q->ops = &rkisp1_vb2_ops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct rkisp1_buffer);
+ q->min_buffers_needed = RKISP1_MIN_BUFFERS_NEEDED;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &node->vlock;
+ q->dev = cap->rkisp1->dev;
+ ret = vb2_queue_init(q);
+ if (ret) {
+ dev_err(cap->rkisp1->dev,
+ "vb2 queue init failed (err=%d)\n", ret);
+ return ret;
+ }
+
+ vdev->queue = q;
+
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ dev_err(cap->rkisp1->dev,
+ "failed to register %s, ret=%d\n", vdev->name, ret);
+ return ret;
+ }
+ v4l2_info(v4l2_dev, "registered %s as /dev/video%d\n", vdev->name,
+ vdev->num);
+
+ ret = media_entity_pads_init(&vdev->entity, 1, &node->pad);
+ if (ret) {
+ video_unregister_device(vdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+rkisp1_capture_init(struct rkisp1_device *rkisp1, enum rkisp1_stream_id id)
+{
+ struct rkisp1_capture *cap = &rkisp1->capture_devs[id];
+ struct v4l2_pix_format_mplane pixm;
+
+ memset(cap, 0, sizeof(*cap));
+ cap->id = id;
+ cap->rkisp1 = rkisp1;
+
+ INIT_LIST_HEAD(&cap->buf.queue);
+ init_waitqueue_head(&cap->done);
+ spin_lock_init(&cap->buf.lock);
+ if (cap->id == RKISP1_SELFPATH) {
+ cap->ops = &rkisp1_capture_ops_sp;
+ cap->config = &rkisp1_capture_config_sp;
+ } else {
+ cap->ops = &rkisp1_capture_ops_mp;
+ cap->config = &rkisp1_capture_config_mp;
+ }
+
+ cap->is_streaming = false;
+
+ memset(&pixm, 0, sizeof(pixm));
+ pixm.pixelformat = V4L2_PIX_FMT_YUYV;
+ pixm.width = RKISP1_DEFAULT_WIDTH;
+ pixm.height = RKISP1_DEFAULT_HEIGHT;
+ rkisp1_set_fmt(cap, &pixm);
+}
+
+int rkisp1_capture_devs_register(struct rkisp1_device *rkisp1)
+{
+ struct rkisp1_capture *cap;
+ unsigned int i, j;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(rkisp1->capture_devs); i++) {
+ rkisp1_capture_init(rkisp1, i);
+ cap = &rkisp1->capture_devs[i];
+ cap->rkisp1 = rkisp1;
+ ret = rkisp1_register_capture(cap);
+ if (ret)
+ goto err_unreg_capture_devs;
+ }
+
+ return 0;
+
+err_unreg_capture_devs:
+ for (j = 0; j < i; j++) {
+ cap = &rkisp1->capture_devs[j];
+ rkisp1_unregister_capture(cap);
+ }
+
+ return ret;
+}
diff --git a/drivers/staging/media/rkisp1/rkisp1-common.c b/drivers/staging/media/rkisp1/rkisp1-common.c
new file mode 100644
index 000000000000..cf889666e166
--- /dev/null
+++ b/drivers/staging/media/rkisp1/rkisp1-common.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Rockchip ISP1 Driver - Common definitions
+ *
+ * Copyright (C) 2019 Collabora, Ltd.
+ */
+
+#include <media/v4l2-rect.h>
+
+#include "rkisp1-common.h"
+
+static const struct v4l2_rect rkisp1_sd_min_crop = {
+ .width = RKISP1_ISP_MIN_WIDTH,
+ .height = RKISP1_ISP_MIN_HEIGHT,
+ .top = 0,
+ .left = 0,
+};
+
+void rkisp1_sd_adjust_crop_rect(struct v4l2_rect *crop,
+ const struct v4l2_rect *bounds)
+{
+ v4l2_rect_set_min_size(crop, &rkisp1_sd_min_crop);
+ v4l2_rect_map_inside(crop, bounds);
+}
+
+void rkisp1_sd_adjust_crop(struct v4l2_rect *crop,
+ const struct v4l2_mbus_framefmt *bounds)
+{
+ struct v4l2_rect crop_bounds = {
+ .left = 0,
+ .top = 0,
+ .width = bounds->width,
+ .height = bounds->height,
+ };
+
+ rkisp1_sd_adjust_crop_rect(crop, &crop_bounds);
+}
diff --git a/drivers/staging/media/rkisp1/rkisp1-common.h b/drivers/staging/media/rkisp1/rkisp1-common.h
new file mode 100644
index 000000000000..369a401b098a
--- /dev/null
+++ b/drivers/staging/media/rkisp1/rkisp1-common.h
@@ -0,0 +1,337 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Rockchip ISP1 Driver - Common definitions
+ *
+ * Copyright (C) 2019 Collabora, Ltd.
+ *
+ * Based on Rockchip ISP1 driver by Rockchip Electronics Co., Ltd.
+ * Copyright (C) 2017 Rockchip Electronics Co., Ltd.
+ */
+
+#ifndef _RKISP1_COMMON_H
+#define _RKISP1_COMMON_H
+
+#include <linux/clk.h>
+#include <linux/mutex.h>
+#include <media/media-device.h>
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "rkisp1-regs.h"
+#include "uapi/rkisp1-config.h"
+
+#define RKISP1_ISP_MAX_WIDTH 4032
+#define RKISP1_ISP_MAX_HEIGHT 3024
+#define RKISP1_ISP_MIN_WIDTH 32
+#define RKISP1_ISP_MIN_HEIGHT 32
+
+#define RKISP1_RSZ_MP_SRC_MAX_WIDTH 4416
+#define RKISP1_RSZ_MP_SRC_MAX_HEIGHT 3312
+#define RKISP1_RSZ_SP_SRC_MAX_WIDTH 1920
+#define RKISP1_RSZ_SP_SRC_MAX_HEIGHT 1920
+#define RKISP1_RSZ_SRC_MIN_WIDTH 32
+#define RKISP1_RSZ_SRC_MIN_HEIGHT 16
+
+#define RKISP1_DEFAULT_WIDTH 800
+#define RKISP1_DEFAULT_HEIGHT 600
+
+#define RKISP1_DRIVER_NAME "rkisp1"
+#define RKISP1_BUS_INFO "platform:" RKISP1_DRIVER_NAME
+
+#define RKISP1_MAX_BUS_CLK 8
+
+enum rkisp1_rsz_pad {
+ RKISP1_RSZ_PAD_SINK,
+ RKISP1_RSZ_PAD_SRC,
+};
+
+enum rkisp1_stream_id {
+ RKISP1_MAINPATH,
+ RKISP1_SELFPATH,
+};
+
+enum rkisp1_fmt_pix_type {
+ RKISP1_FMT_YUV,
+ RKISP1_FMT_RGB,
+ RKISP1_FMT_BAYER,
+ RKISP1_FMT_JPEG,
+};
+
+enum rkisp1_fmt_raw_pat_type {
+ RKISP1_RAW_RGGB = 0,
+ RKISP1_RAW_GRBG,
+ RKISP1_RAW_GBRG,
+ RKISP1_RAW_BGGR,
+};
+
+enum rkisp1_isp_pad {
+ RKISP1_ISP_PAD_SINK_VIDEO,
+ RKISP1_ISP_PAD_SINK_PARAMS,
+ RKISP1_ISP_PAD_SOURCE_VIDEO,
+ RKISP1_ISP_PAD_SOURCE_STATS,
+ RKISP1_ISP_PAD_MAX
+};
+
+/*
+ * struct rkisp1_sensor_async - Sensor information
+ * @mbus: media bus configuration
+ */
+struct rkisp1_sensor_async {
+ struct v4l2_async_subdev asd;
+ struct v4l2_mbus_config mbus;
+ unsigned int lanes;
+ struct v4l2_subdev *sd;
+ struct v4l2_ctrl *pixel_rate_ctrl;
+ struct phy *dphy;
+};
+
+/*
+ * struct rkisp1_isp - ISP sub-device
+ *
+ * See Cropping regions of ISP in rkisp1.c for details
+ * @sink_frm: input size, don't have to be equal to sensor size
+ * @sink_fmt: input format
+ * @sink_crop: crop for sink pad
+ * @src_fmt: output format
+ * @src_crop: output size
+ *
+ * @is_dphy_errctrl_disabled : if dphy errctrl is disabled (avoid endless interrupt)
+ * @frame_sequence: used to synchronize frame_id between video devices.
+ * @quantization: output quantization
+ */
+struct rkisp1_isp {
+ struct v4l2_subdev sd;
+ struct media_pad pads[RKISP1_ISP_PAD_MAX];
+ struct v4l2_subdev_pad_config pad_cfg[RKISP1_ISP_PAD_MAX];
+ const struct rkisp1_isp_mbus_info *sink_fmt;
+ const struct rkisp1_isp_mbus_info *src_fmt;
+ bool is_dphy_errctrl_disabled;
+ atomic_t frame_sequence;
+};
+
+struct rkisp1_vdev_node {
+ struct vb2_queue buf_queue;
+ struct mutex vlock; /* ioctl serialization mutex */
+ struct video_device vdev;
+ struct media_pad pad;
+};
+
+struct rkisp1_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct list_head queue;
+ union {
+ u32 buff_addr[VIDEO_MAX_PLANES];
+ void *vaddr[VIDEO_MAX_PLANES];
+ };
+};
+
+struct rkisp1_dummy_buffer {
+ void *vaddr;
+ dma_addr_t dma_addr;
+ u32 size;
+};
+
+struct rkisp1_device;
+
+/*
+ * struct rkisp1_capture - ISP capture video device
+ *
+ * @pix.fmt: buffer format
+ * @pix.info: pixel information
+ * @pix.cfg: pixel configuration
+ *
+ * @buf.lock: lock to protect buf_queue
+ * @buf.queue: queued buffer list
+ * @buf.dummy: dummy space to store dropped data
+ *
+ * rkisp1 use shadowsock registers, so it need two buffer at a time
+ * @buf.curr: the buffer used for current frame
+ * @buf.next: the buffer used for next frame
+ */
+struct rkisp1_capture {
+ struct rkisp1_vdev_node vnode;
+ struct rkisp1_device *rkisp1;
+ enum rkisp1_stream_id id;
+ struct rkisp1_capture_ops *ops;
+ const struct rkisp1_capture_config *config;
+ bool is_streaming;
+ bool is_stopping;
+ wait_queue_head_t done;
+ unsigned int sp_y_stride;
+ struct {
+ /* protects queue, curr and next */
+ spinlock_t lock;
+ struct list_head queue;
+ struct rkisp1_dummy_buffer dummy;
+ struct rkisp1_buffer *curr;
+ struct rkisp1_buffer *next;
+ } buf;
+ struct {
+ const struct rkisp1_capture_fmt_cfg *cfg;
+ const struct v4l2_format_info *info;
+ struct v4l2_pix_format_mplane fmt;
+ } pix;
+};
+
+/*
+ * struct rkisp1_stats - ISP Statistics device
+ *
+ * @irq_lock: buffer queue lock
+ * @stat: stats buffer list
+ * @readout_wq: workqueue for statistics information read
+ */
+struct rkisp1_stats {
+ struct rkisp1_vdev_node vnode;
+ struct rkisp1_device *rkisp1;
+
+ spinlock_t irq_lock;
+ struct list_head stat;
+ struct v4l2_format vdev_fmt;
+ bool is_streaming;
+
+ struct workqueue_struct *readout_wq;
+ struct mutex wq_lock;
+};
+
+/*
+ * struct rkisp1_params - ISP input parameters device
+ *
+ * @cur_params: Current ISP parameters
+ * @is_first_params: the first params should take effect immediately
+ */
+struct rkisp1_params {
+ struct rkisp1_vdev_node vnode;
+ struct rkisp1_device *rkisp1;
+
+ spinlock_t config_lock;
+ struct list_head params;
+ struct rkisp1_params_cfg cur_params;
+ struct v4l2_format vdev_fmt;
+ bool is_streaming;
+ bool is_first_params;
+
+ enum v4l2_quantization quantization;
+ enum rkisp1_fmt_raw_pat_type raw_type;
+};
+
+struct rkisp1_resizer {
+ struct v4l2_subdev sd;
+ enum rkisp1_stream_id id;
+ struct rkisp1_device *rkisp1;
+ struct media_pad pads[RKISP1_ISP_PAD_MAX];
+ struct v4l2_subdev_pad_config pad_cfg[RKISP1_ISP_PAD_MAX];
+ const struct rkisp1_rsz_config *config;
+ enum rkisp1_fmt_pix_type fmt_type;
+};
+
+struct rkisp1_debug {
+ struct dentry *debugfs_dir;
+ unsigned long data_loss;
+ unsigned long pic_size_error;
+ unsigned long mipi_error;
+ unsigned long stats_error;
+ unsigned long stop_timeout[2];
+ unsigned long frame_drop[2];
+};
+
+/*
+ * struct rkisp1_device - ISP platform device
+ * @base_addr: base register address
+ * @active_sensor: sensor in-use, set when streaming on
+ * @isp: ISP sub-device
+ * @rkisp1_capture: capture video device
+ * @stats: ISP statistics output device
+ * @params: ISP input parameters device
+ */
+struct rkisp1_device {
+ void __iomem *base_addr;
+ int irq;
+ struct device *dev;
+ unsigned int clk_size;
+ struct clk_bulk_data clks[RKISP1_MAX_BUS_CLK];
+ struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct media_device media_dev;
+ struct v4l2_async_notifier notifier;
+ struct rkisp1_sensor_async *active_sensor;
+ struct rkisp1_isp isp;
+ struct rkisp1_resizer resizer_devs[2];
+ struct rkisp1_capture capture_devs[2];
+ struct rkisp1_stats stats;
+ struct rkisp1_params params;
+ struct media_pipeline pipe;
+ struct vb2_alloc_ctx *alloc_ctx;
+ struct rkisp1_debug debug;
+};
+
+/*
+ * struct rkisp1_isp_mbus_info - ISP pad format info
+ *
+ * Translate mbus_code to hardware format values
+ *
+ * @bus_width: used for parallel
+ */
+struct rkisp1_isp_mbus_info {
+ u32 mbus_code;
+ enum rkisp1_fmt_pix_type fmt_type;
+ u32 mipi_dt;
+ u32 yuv_seq;
+ u8 bus_width;
+ enum rkisp1_fmt_raw_pat_type bayer_pat;
+ unsigned int direction;
+};
+
+static inline void
+rkisp1_write(struct rkisp1_device *rkisp1, u32 val, unsigned int addr)
+{
+ writel(val, rkisp1->base_addr + addr);
+}
+
+static inline u32 rkisp1_read(struct rkisp1_device *rkisp1, unsigned int addr)
+{
+ return readl(rkisp1->base_addr + addr);
+}
+
+void rkisp1_sd_adjust_crop_rect(struct v4l2_rect *crop,
+ const struct v4l2_rect *bounds);
+
+void rkisp1_sd_adjust_crop(struct v4l2_rect *crop,
+ const struct v4l2_mbus_framefmt *bounds);
+
+int rkisp1_isp_register(struct rkisp1_device *rkisp1,
+ struct v4l2_device *v4l2_dev);
+void rkisp1_isp_unregister(struct rkisp1_device *rkisp1);
+
+const struct rkisp1_isp_mbus_info *rkisp1_isp_mbus_info_get(u32 mbus_code);
+
+void rkisp1_isp_isr(struct rkisp1_device *rkisp1);
+void rkisp1_mipi_isr(struct rkisp1_device *rkisp1);
+void rkisp1_capture_isr(struct rkisp1_device *rkisp1);
+void rkisp1_stats_isr(struct rkisp1_stats *stats, u32 isp_ris);
+void rkisp1_params_isr(struct rkisp1_device *rkisp1, u32 isp_mis);
+
+int rkisp1_capture_devs_register(struct rkisp1_device *rkisp1);
+void rkisp1_capture_devs_unregister(struct rkisp1_device *rkisp1);
+
+int rkisp1_resizer_devs_register(struct rkisp1_device *rkisp1);
+void rkisp1_resizer_devs_unregister(struct rkisp1_device *rkisp1);
+
+int rkisp1_stats_register(struct rkisp1_stats *stats,
+ struct v4l2_device *v4l2_dev,
+ struct rkisp1_device *rkisp1);
+void rkisp1_stats_unregister(struct rkisp1_stats *stats);
+
+void rkisp1_params_configure(struct rkisp1_params *params,
+ enum rkisp1_fmt_raw_pat_type bayer_pat,
+ enum v4l2_quantization quantization);
+void rkisp1_params_disable(struct rkisp1_params *params);
+int rkisp1_params_register(struct rkisp1_params *params,
+ struct v4l2_device *v4l2_dev,
+ struct rkisp1_device *rkisp1);
+void rkisp1_params_unregister(struct rkisp1_params *params);
+
+void rkisp1_params_isr_handler(struct rkisp1_device *rkisp1, u32 isp_mis);
+
+#endif /* _RKISP1_COMMON_H */
diff --git a/drivers/staging/media/rkisp1/rkisp1-dev.c b/drivers/staging/media/rkisp1/rkisp1-dev.c
new file mode 100644
index 000000000000..558126e66465
--- /dev/null
+++ b/drivers/staging/media/rkisp1/rkisp1-dev.c
@@ -0,0 +1,574 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Rockchip ISP1 Driver - Base driver
+ *
+ * Copyright (C) 2019 Collabora, Ltd.
+ *
+ * Based on Rockchip ISP1 driver by Rockchip Electronics Co., Ltd.
+ * Copyright (C) 2017 Rockchip Electronics Co., Ltd.
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-mipi-dphy.h>
+#include <media/v4l2-fwnode.h>
+
+#include "rkisp1-common.h"
+
+/*
+ * ISP Details
+ * -----------
+ *
+ * ISP Comprises with:
+ * MIPI serial camera interface
+ * Image Signal Processing
+ * Many Image Enhancement Blocks
+ * Crop
+ * Resizer
+ * RBG display ready image
+ * Image Rotation
+ *
+ * ISP Block Diagram
+ * -----------------
+ * rkisp1-resizer.c rkisp1-capture.c
+ * |====================| |=======================|
+ * rkisp1-isp.c Main Picture Path
+ * |==========================| |===============================================|
+ * +-----------+ +--+--+--+--+ +--------+ +--------+ +-----------+
+ * | | | | | | | | | | | | |
+ * +--------+ |\ | | | | | | | -->| Crop |->| RSZ |------------->| |
+ * | MIPI |--->| \ | | | | | | | | | | | | | |
+ * +--------+ | | | | |IE|IE|IE|IE| | +--------+ +--------+ | Memory |
+ * |MUX|--->| ISP |->|0 |1 |2 |3 |---+ | Interface |
+ * +--------+ | | | | | | | | | | +--------+ +--------+ +--------+ | |
+ * |Parallel|--->| / | | | | | | | | | | | | | | | |
+ * +--------+ |/ | | | | | | | -->| Crop |->| RSZ |->| RGB |->| |
+ * | | | | | | | | | | | | Rotate | | |
+ * +-----------+ +--+--+--+--+ +--------+ +--------+ +--------+ +-----------+
+ * ^
+ * +--------+ | |===============================================|
+ * | DMA |------------------------------------+ Self Picture Path
+ * +--------+
+ *
+ * rkisp1-stats.c rkisp1-params.c
+ * |===============| |===============|
+ * +---------------+ +---------------+
+ * | | | |
+ * | ISP | | ISP |
+ * | | | |
+ * +---------------+ +---------------+
+ *
+ *
+ * Media Topology
+ * --------------
+ * +----------+ +----------+
+ * | Sensor 2 | | Sensor X |
+ * ------------ ... ------------
+ * | 0 | | 0 |
+ * +----------+ +----------+ +-----------+
+ * \ | | params |
+ * \ | | (output) |
+ * +----------+ \ | +-----------+
+ * | Sensor 1 | v v |
+ * ------------ +------+------+ |
+ * | 0 |----->| 0 | 1 |<---------+
+ * +----------+ |------+------|
+ * | ISP |
+ * |------+------|
+ * +-------------| 2 | 3 |----------+
+ * | +------+------+ |
+ * | | |
+ * v v v
+ * +- ---------+ +-----------+ +-----------+
+ * | 0 | | 0 | | stats |
+ * ------------- ------------- | (capture) |
+ * | Resizer | | Resizer | +-----------+
+ * ------------| ------------|
+ * | 1 | | 1 |
+ * +-----------+ +-----------+
+ * | |
+ * v v
+ * +-----------+ +-----------+
+ * | selfpath | | mainpath |
+ * | (capture) | | (capture) |
+ * +-----------+ +-----------+
+ */
+
+struct rkisp1_match_data {
+ const char * const *clks;
+ unsigned int size;
+};
+
+/* ----------------------------------------------------------------------------
+ * Sensor DT bindings
+ */
+
+static int rkisp1_create_links(struct rkisp1_device *rkisp1)
+{
+ struct media_entity *source, *sink;
+ unsigned int flags, source_pad;
+ struct v4l2_subdev *sd;
+ unsigned int i;
+ int ret;
+
+ /* sensor links */
+ flags = MEDIA_LNK_FL_ENABLED;
+ list_for_each_entry(sd, &rkisp1->v4l2_dev.subdevs, list) {
+ if (sd == &rkisp1->isp.sd ||
+ sd == &rkisp1->resizer_devs[RKISP1_MAINPATH].sd ||
+ sd == &rkisp1->resizer_devs[RKISP1_SELFPATH].sd)
+ continue;
+
+ ret = media_entity_get_fwnode_pad(&sd->entity, sd->fwnode,
+ MEDIA_PAD_FL_SOURCE);
+ if (ret) {
+ dev_err(sd->dev, "failed to find src pad for %s\n",
+ sd->name);
+ return ret;
+ }
+ source_pad = ret;
+
+ ret = media_create_pad_link(&sd->entity, source_pad,
+ &rkisp1->isp.sd.entity,
+ RKISP1_ISP_PAD_SINK_VIDEO,
+ flags);
+ if (ret)
+ return ret;
+
+ flags = 0;
+ }
+
+ flags = MEDIA_LNK_FL_ENABLED;
+
+ /* create ISP->RSZ->CAP links */
+ for (i = 0; i < 2; i++) {
+ source = &rkisp1->isp.sd.entity;
+ sink = &rkisp1->resizer_devs[i].sd.entity;
+ ret = media_create_pad_link(source, RKISP1_ISP_PAD_SOURCE_VIDEO,
+ sink, RKISP1_RSZ_PAD_SINK, flags);
+ if (ret)
+ return ret;
+
+ source = sink;
+ sink = &rkisp1->capture_devs[i].vnode.vdev.entity;
+ ret = media_create_pad_link(source, RKISP1_RSZ_PAD_SRC,
+ sink, 0, flags);
+ if (ret)
+ return ret;
+ }
+
+ /* params links */
+ source = &rkisp1->params.vnode.vdev.entity;
+ sink = &rkisp1->isp.sd.entity;
+ ret = media_create_pad_link(source, 0, sink,
+ RKISP1_ISP_PAD_SINK_PARAMS, flags);
+ if (ret)
+ return ret;
+
+ /* 3A stats links */
+ source = &rkisp1->isp.sd.entity;
+ sink = &rkisp1->stats.vnode.vdev.entity;
+ return media_create_pad_link(source, RKISP1_ISP_PAD_SOURCE_STATS,
+ sink, 0, flags);
+}
+
+static int rkisp1_subdev_notifier_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
+{
+ struct rkisp1_device *rkisp1 =
+ container_of(notifier, struct rkisp1_device, notifier);
+ struct rkisp1_sensor_async *s_asd =
+ container_of(asd, struct rkisp1_sensor_async, asd);
+
+ s_asd->pixel_rate_ctrl = v4l2_ctrl_find(sd->ctrl_handler,
+ V4L2_CID_PIXEL_RATE);
+ s_asd->sd = sd;
+ s_asd->dphy = devm_phy_get(rkisp1->dev, "dphy");
+ if (IS_ERR(s_asd->dphy)) {
+ if (PTR_ERR(s_asd->dphy) != -EPROBE_DEFER)
+ dev_err(rkisp1->dev, "Couldn't get the MIPI D-PHY\n");
+ return PTR_ERR(s_asd->dphy);
+ }
+
+ phy_init(s_asd->dphy);
+
+ return 0;
+}
+
+static void rkisp1_subdev_notifier_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
+{
+ struct rkisp1_sensor_async *s_asd =
+ container_of(asd, struct rkisp1_sensor_async, asd);
+
+ phy_exit(s_asd->dphy);
+}
+
+static int rkisp1_subdev_notifier_complete(struct v4l2_async_notifier *notifier)
+{
+ struct rkisp1_device *rkisp1 =
+ container_of(notifier, struct rkisp1_device, notifier);
+ int ret;
+
+ mutex_lock(&rkisp1->media_dev.graph_mutex);
+ ret = rkisp1_create_links(rkisp1);
+ if (ret)
+ goto unlock;
+ ret = v4l2_device_register_subdev_nodes(&rkisp1->v4l2_dev);
+ if (ret)
+ goto unlock;
+
+ dev_dbg(rkisp1->dev, "Async subdev notifier completed\n");
+
+unlock:
+ mutex_unlock(&rkisp1->media_dev.graph_mutex);
+ return ret;
+}
+
+static int rkisp1_fwnode_parse(struct device *dev,
+ struct v4l2_fwnode_endpoint *vep,
+ struct v4l2_async_subdev *asd)
+{
+ struct rkisp1_sensor_async *s_asd =
+ container_of(asd, struct rkisp1_sensor_async, asd);
+
+ if (vep->bus_type != V4L2_MBUS_CSI2_DPHY) {
+ dev_err(dev, "Only CSI2 bus type is currently supported\n");
+ return -EINVAL;
+ }
+
+ if (vep->base.port != 0) {
+ dev_err(dev, "The ISP has only port 0\n");
+ return -EINVAL;
+ }
+
+ s_asd->mbus.type = vep->bus_type;
+ s_asd->mbus.flags = vep->bus.mipi_csi2.flags;
+ s_asd->lanes = vep->bus.mipi_csi2.num_data_lanes;
+
+ switch (vep->bus.mipi_csi2.num_data_lanes) {
+ case 1:
+ s_asd->mbus.flags |= V4L2_MBUS_CSI2_1_LANE;
+ break;
+ case 2:
+ s_asd->mbus.flags |= V4L2_MBUS_CSI2_2_LANE;
+ break;
+ case 3:
+ s_asd->mbus.flags |= V4L2_MBUS_CSI2_3_LANE;
+ break;
+ case 4:
+ s_asd->mbus.flags |= V4L2_MBUS_CSI2_4_LANE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_async_notifier_operations rkisp1_subdev_notifier_ops = {
+ .bound = rkisp1_subdev_notifier_bound,
+ .unbind = rkisp1_subdev_notifier_unbind,
+ .complete = rkisp1_subdev_notifier_complete,
+};
+
+static int rkisp1_subdev_notifier(struct rkisp1_device *rkisp1)
+{
+ struct v4l2_async_notifier *ntf = &rkisp1->notifier;
+ struct device *dev = rkisp1->dev;
+ int ret;
+
+ v4l2_async_notifier_init(ntf);
+
+ ret = v4l2_async_notifier_parse_fwnode_endpoints_by_port(dev, ntf,
+ sizeof(struct rkisp1_sensor_async),
+ 0, rkisp1_fwnode_parse);
+ if (ret)
+ return ret;
+
+ if (list_empty(&ntf->asd_list))
+ return -ENODEV;
+
+ ntf->ops = &rkisp1_subdev_notifier_ops;
+
+ return v4l2_async_notifier_register(&rkisp1->v4l2_dev, ntf);
+}
+
+/* ----------------------------------------------------------------------------
+ * Power
+ */
+
+static int __maybe_unused rkisp1_runtime_suspend(struct device *dev)
+{
+ struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
+
+ clk_bulk_disable_unprepare(rkisp1->clk_size, rkisp1->clks);
+ return pinctrl_pm_select_sleep_state(dev);
+}
+
+static int __maybe_unused rkisp1_runtime_resume(struct device *dev)
+{
+ struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret)
+ return ret;
+ ret = clk_bulk_prepare_enable(rkisp1->clk_size, rkisp1->clks);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct dev_pm_ops rkisp1_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(rkisp1_runtime_suspend, rkisp1_runtime_resume, NULL)
+};
+
+/* ----------------------------------------------------------------------------
+ * Core
+ */
+
+static int rkisp1_entities_register(struct rkisp1_device *rkisp1)
+{
+ int ret;
+
+ ret = rkisp1_isp_register(rkisp1, &rkisp1->v4l2_dev);
+ if (ret)
+ return ret;
+
+ ret = rkisp1_resizer_devs_register(rkisp1);
+ if (ret)
+ goto err_unreg_isp_subdev;
+
+ ret = rkisp1_capture_devs_register(rkisp1);
+ if (ret)
+ goto err_unreg_resizer_devs;
+
+ ret = rkisp1_stats_register(&rkisp1->stats, &rkisp1->v4l2_dev, rkisp1);
+ if (ret)
+ goto err_unreg_capture_devs;
+
+ ret = rkisp1_params_register(&rkisp1->params,
+ &rkisp1->v4l2_dev, rkisp1);
+ if (ret)
+ goto err_unreg_stats;
+
+ ret = rkisp1_subdev_notifier(rkisp1);
+ if (ret) {
+ dev_err(rkisp1->dev,
+ "Failed to register subdev notifier(%d)\n", ret);
+ goto err_unreg_params;
+ }
+
+ return 0;
+err_unreg_params:
+ rkisp1_params_unregister(&rkisp1->params);
+err_unreg_stats:
+ rkisp1_stats_unregister(&rkisp1->stats);
+err_unreg_capture_devs:
+ rkisp1_capture_devs_unregister(rkisp1);
+err_unreg_resizer_devs:
+ rkisp1_resizer_devs_unregister(rkisp1);
+err_unreg_isp_subdev:
+ rkisp1_isp_unregister(rkisp1);
+ return ret;
+}
+
+static irqreturn_t rkisp1_isr(int irq, void *ctx)
+{
+ struct device *dev = ctx;
+ struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
+
+ /*
+ * Call rkisp1_capture_isr() first to handle the frame that
+ * potentially completed using the current frame_sequence number before
+ * it is potentially incremented by rkisp1_isp_isr() in the vertical
+ * sync.
+ */
+ rkisp1_capture_isr(rkisp1);
+ rkisp1_isp_isr(rkisp1);
+ rkisp1_mipi_isr(rkisp1);
+
+ return IRQ_HANDLED;
+}
+
+static const char * const rk3399_isp_clks[] = {
+ "clk_isp",
+ "aclk_isp",
+ "hclk_isp",
+ "aclk_isp_wrap",
+ "hclk_isp_wrap",
+};
+
+static const struct rkisp1_match_data rk3399_isp_clk_data = {
+ .clks = rk3399_isp_clks,
+ .size = ARRAY_SIZE(rk3399_isp_clks),
+};
+
+static const struct of_device_id rkisp1_of_match[] = {
+ {
+ .compatible = "rockchip,rk3399-cif-isp",
+ .data = &rk3399_isp_clk_data,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rkisp1_of_match);
+
+static void rkisp1_debug_init(struct rkisp1_device *rkisp1)
+{
+ struct rkisp1_debug *debug = &rkisp1->debug;
+
+ debug->debugfs_dir = debugfs_create_dir(RKISP1_DRIVER_NAME, NULL);
+ if (!debug->debugfs_dir) {
+ dev_dbg(rkisp1->dev, "failed to create debugfs directory\n");
+ return;
+ }
+ debugfs_create_ulong("data_loss", 0444, debug->debugfs_dir,
+ &debug->data_loss);
+ debugfs_create_ulong("pic_size_error", 0444, debug->debugfs_dir,
+ &debug->pic_size_error);
+ debugfs_create_ulong("mipi_error", 0444, debug->debugfs_dir,
+ &debug->mipi_error);
+ debugfs_create_ulong("stats_error", 0444, debug->debugfs_dir,
+ &debug->stats_error);
+ debugfs_create_ulong("mp_stop_timeout", 0444, debug->debugfs_dir,
+ &debug->stop_timeout[RKISP1_MAINPATH]);
+ debugfs_create_ulong("sp_stop_timeout", 0444, debug->debugfs_dir,
+ &debug->stop_timeout[RKISP1_SELFPATH]);
+ debugfs_create_ulong("mp_frame_drop", 0444, debug->debugfs_dir,
+ &debug->frame_drop[RKISP1_MAINPATH]);
+ debugfs_create_ulong("sp_frame_drop", 0444, debug->debugfs_dir,
+ &debug->frame_drop[RKISP1_SELFPATH]);
+}
+
+static int rkisp1_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ const struct rkisp1_match_data *clk_data;
+ const struct of_device_id *match;
+ struct device *dev = &pdev->dev;
+ struct rkisp1_device *rkisp1;
+ struct v4l2_device *v4l2_dev;
+ unsigned int i;
+ int ret, irq;
+
+ match = of_match_node(rkisp1_of_match, node);
+ rkisp1 = devm_kzalloc(dev, sizeof(*rkisp1), GFP_KERNEL);
+ if (!rkisp1)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, rkisp1);
+ rkisp1->dev = dev;
+
+ rkisp1->base_addr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rkisp1->base_addr))
+ return PTR_ERR(rkisp1->base_addr);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, rkisp1_isr, IRQF_SHARED,
+ dev_driver_string(dev), dev);
+ if (ret) {
+ dev_err(dev, "request irq failed: %d\n", ret);
+ return ret;
+ }
+
+ rkisp1->irq = irq;
+ clk_data = match->data;
+
+ for (i = 0; i < clk_data->size; i++)
+ rkisp1->clks[i].id = clk_data->clks[i];
+ ret = devm_clk_bulk_get(dev, clk_data->size, rkisp1->clks);
+ if (ret)
+ return ret;
+ rkisp1->clk_size = clk_data->size;
+
+ pm_runtime_enable(&pdev->dev);
+
+ strscpy(rkisp1->media_dev.model, RKISP1_DRIVER_NAME,
+ sizeof(rkisp1->media_dev.model));
+ rkisp1->media_dev.dev = &pdev->dev;
+ strscpy(rkisp1->media_dev.bus_info,
+ "platform: " RKISP1_DRIVER_NAME,
+ sizeof(rkisp1->media_dev.bus_info));
+ media_device_init(&rkisp1->media_dev);
+
+ v4l2_dev = &rkisp1->v4l2_dev;
+ v4l2_dev->mdev = &rkisp1->media_dev;
+ strscpy(v4l2_dev->name, RKISP1_DRIVER_NAME, sizeof(v4l2_dev->name));
+
+ ret = v4l2_device_register(rkisp1->dev, &rkisp1->v4l2_dev);
+ if (ret)
+ return ret;
+
+ ret = media_device_register(&rkisp1->media_dev);
+ if (ret) {
+ dev_err(dev, "Failed to register media device: %d\n", ret);
+ goto err_unreg_v4l2_dev;
+ }
+
+ ret = rkisp1_entities_register(rkisp1);
+ if (ret)
+ goto err_unreg_media_dev;
+
+ rkisp1_debug_init(rkisp1);
+
+ return 0;
+
+err_unreg_media_dev:
+ media_device_unregister(&rkisp1->media_dev);
+err_unreg_v4l2_dev:
+ v4l2_device_unregister(&rkisp1->v4l2_dev);
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
+static int rkisp1_remove(struct platform_device *pdev)
+{
+ struct rkisp1_device *rkisp1 = platform_get_drvdata(pdev);
+
+ v4l2_async_notifier_unregister(&rkisp1->notifier);
+ v4l2_async_notifier_cleanup(&rkisp1->notifier);
+
+ rkisp1_params_unregister(&rkisp1->params);
+ rkisp1_stats_unregister(&rkisp1->stats);
+ rkisp1_capture_devs_unregister(rkisp1);
+ rkisp1_resizer_devs_unregister(rkisp1);
+ rkisp1_isp_unregister(rkisp1);
+
+ media_device_unregister(&rkisp1->media_dev);
+ v4l2_device_unregister(&rkisp1->v4l2_dev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ debugfs_remove_recursive(rkisp1->debug.debugfs_dir);
+ return 0;
+}
+
+static struct platform_driver rkisp1_drv = {
+ .driver = {
+ .name = RKISP1_DRIVER_NAME,
+ .of_match_table = of_match_ptr(rkisp1_of_match),
+ .pm = &rkisp1_pm_ops,
+ },
+ .probe = rkisp1_probe,
+ .remove = rkisp1_remove,
+};
+
+module_platform_driver(rkisp1_drv);
+MODULE_DESCRIPTION("Rockchip ISP1 platform driver");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/staging/media/rkisp1/rkisp1-isp.c b/drivers/staging/media/rkisp1/rkisp1-isp.c
new file mode 100644
index 000000000000..328c7ea60971
--- /dev/null
+++ b/drivers/staging/media/rkisp1/rkisp1-isp.c
@@ -0,0 +1,1164 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Rockchip ISP1 Driver - ISP Subdevice
+ *
+ * Copyright (C) 2019 Collabora, Ltd.
+ *
+ * Based on Rockchip ISP1 driver by Rockchip Electronics Co., Ltd.
+ * Copyright (C) 2017 Rockchip Electronics Co., Ltd.
+ */
+
+#include <linux/iopoll.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-mipi-dphy.h>
+#include <linux/pm_runtime.h>
+#include <linux/videodev2.h>
+#include <linux/vmalloc.h>
+#include <media/v4l2-event.h>
+
+#include "rkisp1-common.h"
+
+#define RKISP1_DEF_SINK_PAD_FMT MEDIA_BUS_FMT_SRGGB10_1X10
+#define RKISP1_DEF_SRC_PAD_FMT MEDIA_BUS_FMT_YUYV8_2X8
+
+#define RKISP1_ISP_DEV_NAME RKISP1_DRIVER_NAME "_isp"
+
+#define RKISP1_DIR_SRC BIT(0)
+#define RKISP1_DIR_SINK BIT(1)
+#define RKISP1_DIR_SINK_SRC (RKISP1_DIR_SINK | RKISP1_DIR_SRC)
+
+/*
+ * NOTE: MIPI controller and input MUX are also configured in this file,
+ * because ISP Subdev is not only describe ISP submodule(input size,format,
+ * output size, format), but also a virtual route device.
+ */
+
+/*
+ * There are many variables named with format/frame in below code,
+ * please see here for their meaning.
+ * Cropping in the sink pad defines the image region from the sensor.
+ * Cropping in the source pad defines the region for the Image Stabilizer (IS)
+ *
+ * Cropping regions of ISP
+ *
+ * +---------------------------------------------------------+
+ * | Sensor image |
+ * | +---------------------------------------------------+ |
+ * | | CIF_ISP_ACQ (for black level) | |
+ * | | sink pad format | |
+ * | | +--------------------------------------------+ | |
+ * | | | CIF_ISP_OUT | | |
+ * | | | sink pad crop | | |
+ * | | | +---------------------------------+ | | |
+ * | | | | CIF_ISP_IS | | | |
+ * | | | | source pad crop and format | | | |
+ * | | | +---------------------------------+ | | |
+ * | | +--------------------------------------------+ | |
+ * | +---------------------------------------------------+ |
+ * +---------------------------------------------------------+
+ */
+
+static const struct rkisp1_isp_mbus_info rkisp1_isp_formats[] = {
+ {
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .fmt_type = RKISP1_FMT_YUV,
+ .direction = RKISP1_DIR_SRC,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .fmt_type = RKISP1_FMT_BAYER,
+ .mipi_dt = RKISP1_CIF_CSI2_DT_RAW10,
+ .bayer_pat = RKISP1_RAW_RGGB,
+ .bus_width = 10,
+ .direction = RKISP1_DIR_SINK_SRC,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .fmt_type = RKISP1_FMT_BAYER,
+ .mipi_dt = RKISP1_CIF_CSI2_DT_RAW10,
+ .bayer_pat = RKISP1_RAW_BGGR,
+ .bus_width = 10,
+ .direction = RKISP1_DIR_SINK_SRC,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .fmt_type = RKISP1_FMT_BAYER,
+ .mipi_dt = RKISP1_CIF_CSI2_DT_RAW10,
+ .bayer_pat = RKISP1_RAW_GBRG,
+ .bus_width = 10,
+ .direction = RKISP1_DIR_SINK_SRC,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .fmt_type = RKISP1_FMT_BAYER,
+ .mipi_dt = RKISP1_CIF_CSI2_DT_RAW10,
+ .bayer_pat = RKISP1_RAW_GRBG,
+ .bus_width = 10,
+ .direction = RKISP1_DIR_SINK_SRC,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .fmt_type = RKISP1_FMT_BAYER,
+ .mipi_dt = RKISP1_CIF_CSI2_DT_RAW12,
+ .bayer_pat = RKISP1_RAW_RGGB,
+ .bus_width = 12,
+ .direction = RKISP1_DIR_SINK_SRC,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .fmt_type = RKISP1_FMT_BAYER,
+ .mipi_dt = RKISP1_CIF_CSI2_DT_RAW12,
+ .bayer_pat = RKISP1_RAW_BGGR,
+ .bus_width = 12,
+ .direction = RKISP1_DIR_SINK_SRC,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .fmt_type = RKISP1_FMT_BAYER,
+ .mipi_dt = RKISP1_CIF_CSI2_DT_RAW12,
+ .bayer_pat = RKISP1_RAW_GBRG,
+ .bus_width = 12,
+ .direction = RKISP1_DIR_SINK_SRC,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .fmt_type = RKISP1_FMT_BAYER,
+ .mipi_dt = RKISP1_CIF_CSI2_DT_RAW12,
+ .bayer_pat = RKISP1_RAW_GRBG,
+ .bus_width = 12,
+ .direction = RKISP1_DIR_SINK_SRC,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .fmt_type = RKISP1_FMT_BAYER,
+ .mipi_dt = RKISP1_CIF_CSI2_DT_RAW8,
+ .bayer_pat = RKISP1_RAW_RGGB,
+ .bus_width = 8,
+ .direction = RKISP1_DIR_SINK_SRC,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .fmt_type = RKISP1_FMT_BAYER,
+ .mipi_dt = RKISP1_CIF_CSI2_DT_RAW8,
+ .bayer_pat = RKISP1_RAW_BGGR,
+ .bus_width = 8,
+ .direction = RKISP1_DIR_SINK_SRC,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .fmt_type = RKISP1_FMT_BAYER,
+ .mipi_dt = RKISP1_CIF_CSI2_DT_RAW8,
+ .bayer_pat = RKISP1_RAW_GBRG,
+ .bus_width = 8,
+ .direction = RKISP1_DIR_SINK_SRC,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .fmt_type = RKISP1_FMT_BAYER,
+ .mipi_dt = RKISP1_CIF_CSI2_DT_RAW8,
+ .bayer_pat = RKISP1_RAW_GRBG,
+ .bus_width = 8,
+ .direction = RKISP1_DIR_SINK_SRC,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_1X16,
+ .fmt_type = RKISP1_FMT_YUV,
+ .mipi_dt = RKISP1_CIF_CSI2_DT_YUV422_8b,
+ .yuv_seq = RKISP1_CIF_ISP_ACQ_PROP_YCBYCR,
+ .bus_width = 16,
+ .direction = RKISP1_DIR_SINK,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_YVYU8_1X16,
+ .fmt_type = RKISP1_FMT_YUV,
+ .mipi_dt = RKISP1_CIF_CSI2_DT_YUV422_8b,
+ .yuv_seq = RKISP1_CIF_ISP_ACQ_PROP_YCRYCB,
+ .bus_width = 16,
+ .direction = RKISP1_DIR_SINK,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .fmt_type = RKISP1_FMT_YUV,
+ .mipi_dt = RKISP1_CIF_CSI2_DT_YUV422_8b,
+ .yuv_seq = RKISP1_CIF_ISP_ACQ_PROP_CBYCRY,
+ .bus_width = 16,
+ .direction = RKISP1_DIR_SINK,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_VYUY8_1X16,
+ .fmt_type = RKISP1_FMT_YUV,
+ .mipi_dt = RKISP1_CIF_CSI2_DT_YUV422_8b,
+ .yuv_seq = RKISP1_CIF_ISP_ACQ_PROP_CRYCBY,
+ .bus_width = 16,
+ .direction = RKISP1_DIR_SINK,
+ },
+};
+
+/* ----------------------------------------------------------------------------
+ * Helpers
+ */
+
+const struct rkisp1_isp_mbus_info *rkisp1_isp_mbus_info_get(u32 mbus_code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(rkisp1_isp_formats); i++) {
+ const struct rkisp1_isp_mbus_info *fmt = &rkisp1_isp_formats[i];
+
+ if (fmt->mbus_code == mbus_code)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+static struct v4l2_subdev *rkisp1_get_remote_sensor(struct v4l2_subdev *sd)
+{
+ struct media_pad *local, *remote;
+ struct media_entity *sensor_me;
+
+ local = &sd->entity.pads[RKISP1_ISP_PAD_SINK_VIDEO];
+ remote = media_entity_remote_pad(local);
+ if (!remote) {
+ dev_warn(sd->dev, "No link between isp and sensor\n");
+ return NULL;
+ }
+
+ sensor_me = remote->entity;
+ return media_entity_to_v4l2_subdev(sensor_me);
+}
+
+static struct v4l2_mbus_framefmt *
+rkisp1_isp_get_pad_fmt(struct rkisp1_isp *isp,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(&isp->sd, cfg, pad);
+ else
+ return v4l2_subdev_get_try_format(&isp->sd, isp->pad_cfg, pad);
+}
+
+static struct v4l2_rect *
+rkisp1_isp_get_pad_crop(struct rkisp1_isp *isp,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_crop(&isp->sd, cfg, pad);
+ else
+ return v4l2_subdev_get_try_crop(&isp->sd, isp->pad_cfg, pad);
+}
+
+/* ----------------------------------------------------------------------------
+ * Camera Interface registers configurations
+ */
+
+/*
+ * Image Stabilization.
+ * This should only be called when configuring CIF
+ * or at the frame end interrupt
+ */
+static void rkisp1_config_ism(struct rkisp1_device *rkisp1)
+{
+ struct v4l2_rect *src_crop =
+ rkisp1_isp_get_pad_crop(&rkisp1->isp, NULL,
+ RKISP1_ISP_PAD_SOURCE_VIDEO,
+ V4L2_SUBDEV_FORMAT_ACTIVE);
+ u32 val;
+
+ rkisp1_write(rkisp1, 0, RKISP1_CIF_ISP_IS_RECENTER);
+ rkisp1_write(rkisp1, 0, RKISP1_CIF_ISP_IS_MAX_DX);
+ rkisp1_write(rkisp1, 0, RKISP1_CIF_ISP_IS_MAX_DY);
+ rkisp1_write(rkisp1, 0, RKISP1_CIF_ISP_IS_DISPLACE);
+ rkisp1_write(rkisp1, src_crop->left, RKISP1_CIF_ISP_IS_H_OFFS);
+ rkisp1_write(rkisp1, src_crop->top, RKISP1_CIF_ISP_IS_V_OFFS);
+ rkisp1_write(rkisp1, src_crop->width, RKISP1_CIF_ISP_IS_H_SIZE);
+ rkisp1_write(rkisp1, src_crop->height, RKISP1_CIF_ISP_IS_V_SIZE);
+
+ /* IS(Image Stabilization) is always on, working as output crop */
+ rkisp1_write(rkisp1, 1, RKISP1_CIF_ISP_IS_CTRL);
+ val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_CTRL);
+ val |= RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD;
+ rkisp1_write(rkisp1, val, RKISP1_CIF_ISP_CTRL);
+}
+
+/*
+ * configure ISP blocks with input format, size......
+ */
+static int rkisp1_config_isp(struct rkisp1_device *rkisp1)
+{
+ u32 isp_ctrl = 0, irq_mask = 0, acq_mult = 0, signal = 0;
+ const struct rkisp1_isp_mbus_info *src_fmt, *sink_fmt;
+ struct rkisp1_sensor_async *sensor;
+ struct v4l2_mbus_framefmt *sink_frm;
+ struct v4l2_rect *sink_crop;
+
+ sensor = rkisp1->active_sensor;
+ sink_fmt = rkisp1->isp.sink_fmt;
+ src_fmt = rkisp1->isp.src_fmt;
+ sink_frm = rkisp1_isp_get_pad_fmt(&rkisp1->isp, NULL,
+ RKISP1_ISP_PAD_SINK_VIDEO,
+ V4L2_SUBDEV_FORMAT_ACTIVE);
+ sink_crop = rkisp1_isp_get_pad_crop(&rkisp1->isp, NULL,
+ RKISP1_ISP_PAD_SINK_VIDEO,
+ V4L2_SUBDEV_FORMAT_ACTIVE);
+
+ if (sink_fmt->fmt_type == RKISP1_FMT_BAYER) {
+ acq_mult = 1;
+ if (src_fmt->fmt_type == RKISP1_FMT_BAYER) {
+ if (sensor->mbus.type == V4L2_MBUS_BT656)
+ isp_ctrl = RKISP1_CIF_ISP_CTRL_ISP_MODE_RAW_PICT_ITU656;
+ else
+ isp_ctrl = RKISP1_CIF_ISP_CTRL_ISP_MODE_RAW_PICT;
+ } else {
+ rkisp1_write(rkisp1, RKISP1_CIF_ISP_DEMOSAIC_TH(0xc),
+ RKISP1_CIF_ISP_DEMOSAIC);
+
+ if (sensor->mbus.type == V4L2_MBUS_BT656)
+ isp_ctrl = RKISP1_CIF_ISP_CTRL_ISP_MODE_BAYER_ITU656;
+ else
+ isp_ctrl = RKISP1_CIF_ISP_CTRL_ISP_MODE_BAYER_ITU601;
+ }
+ } else if (sink_fmt->fmt_type == RKISP1_FMT_YUV) {
+ acq_mult = 2;
+ if (sensor->mbus.type == V4L2_MBUS_CSI2_DPHY) {
+ isp_ctrl = RKISP1_CIF_ISP_CTRL_ISP_MODE_ITU601;
+ } else {
+ if (sensor->mbus.type == V4L2_MBUS_BT656)
+ isp_ctrl = RKISP1_CIF_ISP_CTRL_ISP_MODE_ITU656;
+ else
+ isp_ctrl = RKISP1_CIF_ISP_CTRL_ISP_MODE_ITU601;
+ }
+
+ irq_mask |= RKISP1_CIF_ISP_DATA_LOSS;
+ }
+
+ /* Set up input acquisition properties */
+ if (sensor->mbus.type == V4L2_MBUS_BT656 ||
+ sensor->mbus.type == V4L2_MBUS_PARALLEL) {
+ if (sensor->mbus.flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
+ signal = RKISP1_CIF_ISP_ACQ_PROP_POS_EDGE;
+ }
+
+ if (sensor->mbus.type == V4L2_MBUS_PARALLEL) {
+ if (sensor->mbus.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ signal |= RKISP1_CIF_ISP_ACQ_PROP_VSYNC_LOW;
+
+ if (sensor->mbus.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+ signal |= RKISP1_CIF_ISP_ACQ_PROP_HSYNC_LOW;
+ }
+
+ rkisp1_write(rkisp1, isp_ctrl, RKISP1_CIF_ISP_CTRL);
+ rkisp1_write(rkisp1, signal | sink_fmt->yuv_seq |
+ RKISP1_CIF_ISP_ACQ_PROP_BAYER_PAT(sink_fmt->bayer_pat) |
+ RKISP1_CIF_ISP_ACQ_PROP_FIELD_SEL_ALL,
+ RKISP1_CIF_ISP_ACQ_PROP);
+ rkisp1_write(rkisp1, 0, RKISP1_CIF_ISP_ACQ_NR_FRAMES);
+
+ /* Acquisition Size */
+ rkisp1_write(rkisp1, 0, RKISP1_CIF_ISP_ACQ_H_OFFS);
+ rkisp1_write(rkisp1, 0, RKISP1_CIF_ISP_ACQ_V_OFFS);
+ rkisp1_write(rkisp1,
+ acq_mult * sink_frm->width, RKISP1_CIF_ISP_ACQ_H_SIZE);
+ rkisp1_write(rkisp1, sink_frm->height, RKISP1_CIF_ISP_ACQ_V_SIZE);
+
+ /* ISP Out Area */
+ rkisp1_write(rkisp1, sink_crop->left, RKISP1_CIF_ISP_OUT_H_OFFS);
+ rkisp1_write(rkisp1, sink_crop->top, RKISP1_CIF_ISP_OUT_V_OFFS);
+ rkisp1_write(rkisp1, sink_crop->width, RKISP1_CIF_ISP_OUT_H_SIZE);
+ rkisp1_write(rkisp1, sink_crop->height, RKISP1_CIF_ISP_OUT_V_SIZE);
+
+ irq_mask |= RKISP1_CIF_ISP_FRAME | RKISP1_CIF_ISP_V_START |
+ RKISP1_CIF_ISP_PIC_SIZE_ERROR | RKISP1_CIF_ISP_FRAME_IN;
+ rkisp1_write(rkisp1, irq_mask, RKISP1_CIF_ISP_IMSC);
+
+ if (src_fmt->fmt_type == RKISP1_FMT_BAYER) {
+ rkisp1_params_disable(&rkisp1->params);
+ } else {
+ struct v4l2_mbus_framefmt *src_frm;
+
+ src_frm = rkisp1_isp_get_pad_fmt(&rkisp1->isp, NULL,
+ RKISP1_ISP_PAD_SINK_VIDEO,
+ V4L2_SUBDEV_FORMAT_ACTIVE);
+ rkisp1_params_configure(&rkisp1->params, sink_fmt->bayer_pat,
+ src_frm->quantization);
+ }
+
+ return 0;
+}
+
+static int rkisp1_config_dvp(struct rkisp1_device *rkisp1)
+{
+ const struct rkisp1_isp_mbus_info *sink_fmt = rkisp1->isp.sink_fmt;
+ u32 val, input_sel;
+
+ switch (sink_fmt->bus_width) {
+ case 8:
+ input_sel = RKISP1_CIF_ISP_ACQ_PROP_IN_SEL_8B_ZERO;
+ break;
+ case 10:
+ input_sel = RKISP1_CIF_ISP_ACQ_PROP_IN_SEL_10B_ZERO;
+ break;
+ case 12:
+ input_sel = RKISP1_CIF_ISP_ACQ_PROP_IN_SEL_12B;
+ break;
+ default:
+ dev_err(rkisp1->dev, "Invalid bus width\n");
+ return -EINVAL;
+ }
+
+ val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_ACQ_PROP);
+ rkisp1_write(rkisp1, val | input_sel, RKISP1_CIF_ISP_ACQ_PROP);
+
+ return 0;
+}
+
+static int rkisp1_config_mipi(struct rkisp1_device *rkisp1)
+{
+ const struct rkisp1_isp_mbus_info *sink_fmt = rkisp1->isp.sink_fmt;
+ unsigned int lanes;
+ u32 mipi_ctrl;
+
+ /*
+ * rkisp1->active_sensor->mbus is set in isp or d-phy notifier_bound
+ * function
+ */
+ switch (rkisp1->active_sensor->mbus.flags & V4L2_MBUS_CSI2_LANES) {
+ case V4L2_MBUS_CSI2_4_LANE:
+ lanes = 4;
+ break;
+ case V4L2_MBUS_CSI2_3_LANE:
+ lanes = 3;
+ break;
+ case V4L2_MBUS_CSI2_2_LANE:
+ lanes = 2;
+ break;
+ case V4L2_MBUS_CSI2_1_LANE:
+ lanes = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mipi_ctrl = RKISP1_CIF_MIPI_CTRL_NUM_LANES(lanes - 1) |
+ RKISP1_CIF_MIPI_CTRL_SHUTDOWNLANES(0xf) |
+ RKISP1_CIF_MIPI_CTRL_ERR_SOT_SYNC_HS_SKIP |
+ RKISP1_CIF_MIPI_CTRL_CLOCKLANE_ENA;
+
+ rkisp1_write(rkisp1, mipi_ctrl, RKISP1_CIF_MIPI_CTRL);
+
+ /* Configure Data Type and Virtual Channel */
+ rkisp1_write(rkisp1,
+ RKISP1_CIF_MIPI_DATA_SEL_DT(sink_fmt->mipi_dt) |
+ RKISP1_CIF_MIPI_DATA_SEL_VC(0),
+ RKISP1_CIF_MIPI_IMG_DATA_SEL);
+
+ /* Clear MIPI interrupts */
+ rkisp1_write(rkisp1, ~0, RKISP1_CIF_MIPI_ICR);
+ /*
+ * Disable RKISP1_CIF_MIPI_ERR_DPHY interrupt here temporary for
+ * isp bus may be dead when switch isp.
+ */
+ rkisp1_write(rkisp1,
+ RKISP1_CIF_MIPI_FRAME_END | RKISP1_CIF_MIPI_ERR_CSI |
+ RKISP1_CIF_MIPI_ERR_DPHY |
+ RKISP1_CIF_MIPI_SYNC_FIFO_OVFLW(0x03) |
+ RKISP1_CIF_MIPI_ADD_DATA_OVFLW,
+ RKISP1_CIF_MIPI_IMSC);
+
+ dev_dbg(rkisp1->dev, "\n MIPI_CTRL 0x%08x\n"
+ " MIPI_IMG_DATA_SEL 0x%08x\n"
+ " MIPI_STATUS 0x%08x\n"
+ " MIPI_IMSC 0x%08x\n",
+ rkisp1_read(rkisp1, RKISP1_CIF_MIPI_CTRL),
+ rkisp1_read(rkisp1, RKISP1_CIF_MIPI_IMG_DATA_SEL),
+ rkisp1_read(rkisp1, RKISP1_CIF_MIPI_STATUS),
+ rkisp1_read(rkisp1, RKISP1_CIF_MIPI_IMSC));
+
+ return 0;
+}
+
+/* Configure MUX */
+static int rkisp1_config_path(struct rkisp1_device *rkisp1)
+{
+ struct rkisp1_sensor_async *sensor = rkisp1->active_sensor;
+ u32 dpcl = rkisp1_read(rkisp1, RKISP1_CIF_VI_DPCL);
+ int ret = 0;
+
+ if (sensor->mbus.type == V4L2_MBUS_BT656 ||
+ sensor->mbus.type == V4L2_MBUS_PARALLEL) {
+ ret = rkisp1_config_dvp(rkisp1);
+ dpcl |= RKISP1_CIF_VI_DPCL_IF_SEL_PARALLEL;
+ } else if (sensor->mbus.type == V4L2_MBUS_CSI2_DPHY) {
+ ret = rkisp1_config_mipi(rkisp1);
+ dpcl |= RKISP1_CIF_VI_DPCL_IF_SEL_MIPI;
+ }
+
+ rkisp1_write(rkisp1, dpcl, RKISP1_CIF_VI_DPCL);
+
+ return ret;
+}
+
+/* Hardware configure Entry */
+static int rkisp1_config_cif(struct rkisp1_device *rkisp1)
+{
+ u32 cif_id;
+ int ret;
+
+ cif_id = rkisp1_read(rkisp1, RKISP1_CIF_VI_ID);
+ dev_dbg(rkisp1->dev, "CIF_ID 0x%08x\n", cif_id);
+
+ ret = rkisp1_config_isp(rkisp1);
+ if (ret)
+ return ret;
+ ret = rkisp1_config_path(rkisp1);
+ if (ret)
+ return ret;
+ rkisp1_config_ism(rkisp1);
+
+ return 0;
+}
+
+static int rkisp1_isp_stop(struct rkisp1_device *rkisp1)
+{
+ u32 val;
+
+ /*
+ * ISP(mi) stop in mi frame end -> Stop ISP(mipi) ->
+ * Stop ISP(isp) ->wait for ISP isp off
+ */
+ /* stop and clear MI, MIPI, and ISP interrupts */
+ rkisp1_write(rkisp1, 0, RKISP1_CIF_MIPI_IMSC);
+ rkisp1_write(rkisp1, ~0, RKISP1_CIF_MIPI_ICR);
+
+ rkisp1_write(rkisp1, 0, RKISP1_CIF_ISP_IMSC);
+ rkisp1_write(rkisp1, ~0, RKISP1_CIF_ISP_ICR);
+
+ rkisp1_write(rkisp1, 0, RKISP1_CIF_MI_IMSC);
+ rkisp1_write(rkisp1, ~0, RKISP1_CIF_MI_ICR);
+ val = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_CTRL);
+ rkisp1_write(rkisp1, val & (~RKISP1_CIF_MIPI_CTRL_OUTPUT_ENA),
+ RKISP1_CIF_MIPI_CTRL);
+ /* stop ISP */
+ val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_CTRL);
+ val &= ~(RKISP1_CIF_ISP_CTRL_ISP_INFORM_ENABLE |
+ RKISP1_CIF_ISP_CTRL_ISP_ENABLE);
+ rkisp1_write(rkisp1, val, RKISP1_CIF_ISP_CTRL);
+
+ val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_CTRL);
+ rkisp1_write(rkisp1, val | RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD,
+ RKISP1_CIF_ISP_CTRL);
+
+ readx_poll_timeout(readl, rkisp1->base_addr + RKISP1_CIF_ISP_RIS,
+ val, val & RKISP1_CIF_ISP_OFF, 20, 100);
+ rkisp1_write(rkisp1,
+ RKISP1_CIF_IRCL_MIPI_SW_RST | RKISP1_CIF_IRCL_ISP_SW_RST,
+ RKISP1_CIF_IRCL);
+ rkisp1_write(rkisp1, 0x0, RKISP1_CIF_IRCL);
+
+ return 0;
+}
+
+static void rkisp1_config_clk(struct rkisp1_device *rkisp1)
+{
+ u32 val = RKISP1_CIF_ICCL_ISP_CLK | RKISP1_CIF_ICCL_CP_CLK |
+ RKISP1_CIF_ICCL_MRSZ_CLK | RKISP1_CIF_ICCL_SRSZ_CLK |
+ RKISP1_CIF_ICCL_JPEG_CLK | RKISP1_CIF_ICCL_MI_CLK |
+ RKISP1_CIF_ICCL_IE_CLK | RKISP1_CIF_ICCL_MIPI_CLK |
+ RKISP1_CIF_ICCL_DCROP_CLK;
+
+ rkisp1_write(rkisp1, val, RKISP1_CIF_ICCL);
+}
+
+static int rkisp1_isp_start(struct rkisp1_device *rkisp1)
+{
+ struct rkisp1_sensor_async *sensor = rkisp1->active_sensor;
+ u32 val;
+
+ rkisp1_config_clk(rkisp1);
+
+ /* Activate MIPI */
+ if (sensor->mbus.type == V4L2_MBUS_CSI2_DPHY) {
+ val = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_CTRL);
+ rkisp1_write(rkisp1, val | RKISP1_CIF_MIPI_CTRL_OUTPUT_ENA,
+ RKISP1_CIF_MIPI_CTRL);
+ }
+ /* Activate ISP */
+ val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_CTRL);
+ val |= RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD |
+ RKISP1_CIF_ISP_CTRL_ISP_ENABLE |
+ RKISP1_CIF_ISP_CTRL_ISP_INFORM_ENABLE;
+ rkisp1_write(rkisp1, val, RKISP1_CIF_ISP_CTRL);
+
+ /*
+ * CIF spec says to wait for sufficient time after enabling
+ * the MIPI interface and before starting the sensor output.
+ */
+ usleep_range(1000, 1200);
+
+ return 0;
+}
+
+/* ----------------------------------------------------------------------------
+ * Subdev pad operations
+ */
+
+static int rkisp1_isp_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ unsigned int i, dir;
+ int pos = 0;
+
+ if (code->pad == RKISP1_ISP_PAD_SINK_VIDEO) {
+ dir = RKISP1_DIR_SINK;
+ } else if (code->pad == RKISP1_ISP_PAD_SOURCE_VIDEO) {
+ dir = RKISP1_DIR_SRC;
+ } else {
+ if (code->index > 0)
+ return -EINVAL;
+ code->code = MEDIA_BUS_FMT_FIXED;
+ return 0;
+ }
+
+ if (code->index >= ARRAY_SIZE(rkisp1_isp_formats))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(rkisp1_isp_formats); i++) {
+ const struct rkisp1_isp_mbus_info *fmt = &rkisp1_isp_formats[i];
+
+ if (fmt->direction & dir)
+ pos++;
+
+ if (code->index == pos - 1) {
+ code->code = fmt->mbus_code;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int rkisp1_isp_init_config(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg)
+{
+ struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
+ struct v4l2_rect *sink_crop, *src_crop;
+
+ sink_fmt = v4l2_subdev_get_try_format(sd, cfg,
+ RKISP1_ISP_PAD_SINK_VIDEO);
+ sink_fmt->width = RKISP1_DEFAULT_WIDTH;
+ sink_fmt->height = RKISP1_DEFAULT_HEIGHT;
+ sink_fmt->field = V4L2_FIELD_NONE;
+ sink_fmt->code = RKISP1_DEF_SINK_PAD_FMT;
+
+ sink_crop = v4l2_subdev_get_try_crop(sd, cfg,
+ RKISP1_ISP_PAD_SINK_VIDEO);
+ sink_crop->width = RKISP1_DEFAULT_WIDTH;
+ sink_crop->height = RKISP1_DEFAULT_HEIGHT;
+ sink_crop->left = 0;
+ sink_crop->top = 0;
+
+ src_fmt = v4l2_subdev_get_try_format(sd, cfg,
+ RKISP1_ISP_PAD_SOURCE_VIDEO);
+ *src_fmt = *sink_fmt;
+ src_fmt->code = RKISP1_DEF_SRC_PAD_FMT;
+ src_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+
+ src_crop = v4l2_subdev_get_try_crop(sd, cfg,
+ RKISP1_ISP_PAD_SOURCE_VIDEO);
+ *src_crop = *sink_crop;
+
+ sink_fmt = v4l2_subdev_get_try_format(sd, cfg,
+ RKISP1_ISP_PAD_SINK_PARAMS);
+ src_fmt = v4l2_subdev_get_try_format(sd, cfg,
+ RKISP1_ISP_PAD_SOURCE_STATS);
+ sink_fmt->width = RKISP1_DEFAULT_WIDTH;
+ sink_fmt->height = RKISP1_DEFAULT_HEIGHT;
+ sink_fmt->field = V4L2_FIELD_NONE;
+ sink_fmt->code = MEDIA_BUS_FMT_FIXED;
+ *src_fmt = *sink_fmt;
+
+ return 0;
+}
+
+static void rkisp1_isp_set_src_fmt(struct rkisp1_isp *isp,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_mbus_framefmt *format,
+ unsigned int which)
+{
+ const struct rkisp1_isp_mbus_info *mbus_info;
+ struct v4l2_mbus_framefmt *src_fmt;
+ const struct v4l2_rect *src_crop;
+
+ src_fmt = rkisp1_isp_get_pad_fmt(isp, cfg,
+ RKISP1_ISP_PAD_SOURCE_VIDEO, which);
+ src_crop = rkisp1_isp_get_pad_crop(isp, cfg,
+ RKISP1_ISP_PAD_SOURCE_VIDEO, which);
+
+ src_fmt->code = format->code;
+ mbus_info = rkisp1_isp_mbus_info_get(src_fmt->code);
+ if (!mbus_info) {
+ src_fmt->code = RKISP1_DEF_SRC_PAD_FMT;
+ mbus_info = rkisp1_isp_mbus_info_get(src_fmt->code);
+ }
+ if (which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ isp->src_fmt = mbus_info;
+ src_fmt->width = src_crop->width;
+ src_fmt->height = src_crop->height;
+ src_fmt->quantization = format->quantization;
+ /* full range by default */
+ if (!src_fmt->quantization)
+ src_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+
+ *format = *src_fmt;
+}
+
+static void rkisp1_isp_set_src_crop(struct rkisp1_isp *isp,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_rect *r, unsigned int which)
+{
+ struct v4l2_mbus_framefmt *src_fmt;
+ const struct v4l2_rect *sink_crop;
+ struct v4l2_rect *src_crop;
+
+ src_crop = rkisp1_isp_get_pad_crop(isp, cfg,
+ RKISP1_ISP_PAD_SOURCE_VIDEO,
+ which);
+ sink_crop = rkisp1_isp_get_pad_crop(isp, cfg,
+ RKISP1_ISP_PAD_SINK_VIDEO,
+ which);
+
+ src_crop->left = ALIGN(r->left, 2);
+ src_crop->width = ALIGN(r->width, 2);
+ src_crop->top = r->top;
+ src_crop->height = r->height;
+ rkisp1_sd_adjust_crop_rect(src_crop, sink_crop);
+
+ *r = *src_crop;
+
+ /* Propagate to out format */
+ src_fmt = rkisp1_isp_get_pad_fmt(isp, cfg,
+ RKISP1_ISP_PAD_SOURCE_VIDEO, which);
+ rkisp1_isp_set_src_fmt(isp, cfg, src_fmt, which);
+}
+
+static void rkisp1_isp_set_sink_crop(struct rkisp1_isp *isp,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_rect *r, unsigned int which)
+{
+ struct v4l2_rect *sink_crop, *src_crop;
+ struct v4l2_mbus_framefmt *sink_fmt;
+
+ sink_crop = rkisp1_isp_get_pad_crop(isp, cfg, RKISP1_ISP_PAD_SINK_VIDEO,
+ which);
+ sink_fmt = rkisp1_isp_get_pad_fmt(isp, cfg, RKISP1_ISP_PAD_SINK_VIDEO,
+ which);
+
+ sink_crop->left = ALIGN(r->left, 2);
+ sink_crop->width = ALIGN(r->width, 2);
+ sink_crop->top = r->top;
+ sink_crop->height = r->height;
+ rkisp1_sd_adjust_crop(sink_crop, sink_fmt);
+
+ *r = *sink_crop;
+
+ /* Propagate to out crop */
+ src_crop = rkisp1_isp_get_pad_crop(isp, cfg,
+ RKISP1_ISP_PAD_SOURCE_VIDEO, which);
+ rkisp1_isp_set_src_crop(isp, cfg, src_crop, which);
+}
+
+static void rkisp1_isp_set_sink_fmt(struct rkisp1_isp *isp,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_mbus_framefmt *format,
+ unsigned int which)
+{
+ const struct rkisp1_isp_mbus_info *mbus_info;
+ struct v4l2_mbus_framefmt *sink_fmt;
+ struct v4l2_rect *sink_crop;
+
+ sink_fmt = rkisp1_isp_get_pad_fmt(isp, cfg, RKISP1_ISP_PAD_SINK_VIDEO,
+ which);
+ sink_fmt->code = format->code;
+ mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code);
+ if (!mbus_info) {
+ sink_fmt->code = RKISP1_DEF_SINK_PAD_FMT;
+ mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code);
+ }
+ if (which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ isp->sink_fmt = mbus_info;
+
+ sink_fmt->width = clamp_t(u32, format->width,
+ RKISP1_ISP_MIN_WIDTH,
+ RKISP1_ISP_MAX_WIDTH);
+ sink_fmt->height = clamp_t(u32, format->height,
+ RKISP1_ISP_MIN_HEIGHT,
+ RKISP1_ISP_MAX_HEIGHT);
+
+ *format = *sink_fmt;
+
+ /* Propagate to in crop */
+ sink_crop = rkisp1_isp_get_pad_crop(isp, cfg, RKISP1_ISP_PAD_SINK_VIDEO,
+ which);
+ rkisp1_isp_set_sink_crop(isp, cfg, sink_crop, which);
+}
+
+static int rkisp1_isp_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct rkisp1_isp *isp = container_of(sd, struct rkisp1_isp, sd);
+
+ fmt->format = *rkisp1_isp_get_pad_fmt(isp, cfg, fmt->pad, fmt->which);
+ return 0;
+}
+
+static int rkisp1_isp_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct rkisp1_isp *isp = container_of(sd, struct rkisp1_isp, sd);
+
+ if (fmt->pad == RKISP1_ISP_PAD_SINK_VIDEO)
+ rkisp1_isp_set_sink_fmt(isp, cfg, &fmt->format, fmt->which);
+ else if (fmt->pad == RKISP1_ISP_PAD_SOURCE_VIDEO)
+ rkisp1_isp_set_src_fmt(isp, cfg, &fmt->format, fmt->which);
+ else
+ fmt->format = *rkisp1_isp_get_pad_fmt(isp, cfg, fmt->pad,
+ fmt->which);
+
+ return 0;
+}
+
+static int rkisp1_isp_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct rkisp1_isp *isp = container_of(sd, struct rkisp1_isp, sd);
+
+ if (sel->pad != RKISP1_ISP_PAD_SOURCE_VIDEO &&
+ sel->pad != RKISP1_ISP_PAD_SINK_VIDEO)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ if (sel->pad == RKISP1_ISP_PAD_SINK_VIDEO) {
+ struct v4l2_mbus_framefmt *fmt;
+
+ fmt = rkisp1_isp_get_pad_fmt(isp, cfg, sel->pad,
+ sel->which);
+ sel->r.height = fmt->height;
+ sel->r.width = fmt->width;
+ sel->r.left = 0;
+ sel->r.top = 0;
+ } else {
+ sel->r = *rkisp1_isp_get_pad_crop(isp, cfg,
+ RKISP1_ISP_PAD_SINK_VIDEO,
+ sel->which);
+ }
+ break;
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *rkisp1_isp_get_pad_crop(isp, cfg, sel->pad,
+ sel->which);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rkisp1_isp_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct rkisp1_device *rkisp1 =
+ container_of(sd->v4l2_dev, struct rkisp1_device, v4l2_dev);
+ struct rkisp1_isp *isp = container_of(sd, struct rkisp1_isp, sd);
+
+ if (sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ dev_dbg(rkisp1->dev, "%s: pad: %d sel(%d,%d)/%dx%d\n", __func__,
+ sel->pad, sel->r.left, sel->r.top, sel->r.width, sel->r.height);
+
+ if (sel->pad == RKISP1_ISP_PAD_SINK_VIDEO)
+ rkisp1_isp_set_sink_crop(isp, cfg, &sel->r, sel->which);
+ else if (sel->pad == RKISP1_ISP_PAD_SOURCE_VIDEO)
+ rkisp1_isp_set_src_crop(isp, cfg, &sel->r, sel->which);
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int rkisp1_subdev_link_validate(struct media_link *link)
+{
+ if (link->sink->index == RKISP1_ISP_PAD_SINK_PARAMS)
+ return 0;
+
+ return v4l2_subdev_link_validate(link);
+}
+
+static const struct v4l2_subdev_pad_ops rkisp1_isp_pad_ops = {
+ .enum_mbus_code = rkisp1_isp_enum_mbus_code,
+ .get_selection = rkisp1_isp_get_selection,
+ .set_selection = rkisp1_isp_set_selection,
+ .init_cfg = rkisp1_isp_init_config,
+ .get_fmt = rkisp1_isp_get_fmt,
+ .set_fmt = rkisp1_isp_set_fmt,
+ .link_validate = v4l2_subdev_link_validate_default,
+};
+
+/* ----------------------------------------------------------------------------
+ * Stream operations
+ */
+
+static int rkisp1_mipi_csi2_start(struct rkisp1_isp *isp,
+ struct rkisp1_sensor_async *sensor)
+{
+ union phy_configure_opts opts;
+ struct phy_configure_opts_mipi_dphy *cfg = &opts.mipi_dphy;
+ s64 pixel_clock;
+
+ if (!sensor->pixel_rate_ctrl) {
+ dev_warn(sensor->sd->dev, "No pixel rate control in subdev\n");
+ return -EPIPE;
+ }
+
+ pixel_clock = v4l2_ctrl_g_ctrl_int64(sensor->pixel_rate_ctrl);
+ if (!pixel_clock) {
+ dev_err(sensor->sd->dev, "Invalid pixel rate value\n");
+ return -EINVAL;
+ }
+
+ phy_mipi_dphy_get_default_config(pixel_clock, isp->sink_fmt->bus_width,
+ sensor->lanes, cfg);
+ phy_set_mode(sensor->dphy, PHY_MODE_MIPI_DPHY);
+ phy_configure(sensor->dphy, &opts);
+ phy_power_on(sensor->dphy);
+
+ return 0;
+}
+
+static void rkisp1_mipi_csi2_stop(struct rkisp1_sensor_async *sensor)
+{
+ phy_power_off(sensor->dphy);
+}
+
+static int rkisp1_isp_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct rkisp1_device *rkisp1 =
+ container_of(sd->v4l2_dev, struct rkisp1_device, v4l2_dev);
+ struct v4l2_subdev *sensor_sd;
+ int ret = 0;
+
+ if (!enable) {
+ ret = rkisp1_isp_stop(rkisp1);
+ if (ret)
+ return ret;
+ rkisp1_mipi_csi2_stop(rkisp1->active_sensor);
+ return 0;
+ }
+
+ sensor_sd = rkisp1_get_remote_sensor(sd);
+ if (!sensor_sd)
+ return -ENODEV;
+ rkisp1->active_sensor = container_of(sensor_sd->asd,
+ struct rkisp1_sensor_async, asd);
+
+ atomic_set(&rkisp1->isp.frame_sequence, -1);
+ ret = rkisp1_config_cif(rkisp1);
+ if (ret)
+ return ret;
+
+ if (rkisp1->active_sensor->mbus.type != V4L2_MBUS_CSI2_DPHY)
+ return -EINVAL;
+
+ ret = rkisp1_mipi_csi2_start(&rkisp1->isp, rkisp1->active_sensor);
+ if (ret)
+ return ret;
+
+ ret = rkisp1_isp_start(rkisp1);
+ if (ret)
+ rkisp1_mipi_csi2_stop(rkisp1->active_sensor);
+
+ return ret;
+}
+
+static int rkisp1_isp_subs_evt(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ if (sub->type != V4L2_EVENT_FRAME_SYNC)
+ return -EINVAL;
+
+ /* V4L2_EVENT_FRAME_SYNC doesn't require an id, so zero should be set */
+ if (sub->id != 0)
+ return -EINVAL;
+
+ return v4l2_event_subscribe(fh, sub, 0, NULL);
+}
+
+static const struct media_entity_operations rkisp1_isp_media_ops = {
+ .link_validate = rkisp1_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_video_ops rkisp1_isp_video_ops = {
+ .s_stream = rkisp1_isp_s_stream,
+};
+
+static const struct v4l2_subdev_core_ops rkisp1_isp_core_ops = {
+ .subscribe_event = rkisp1_isp_subs_evt,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static const struct v4l2_subdev_ops rkisp1_isp_ops = {
+ .core = &rkisp1_isp_core_ops,
+ .video = &rkisp1_isp_video_ops,
+ .pad = &rkisp1_isp_pad_ops,
+};
+
+int rkisp1_isp_register(struct rkisp1_device *rkisp1,
+ struct v4l2_device *v4l2_dev)
+{
+ struct rkisp1_isp *isp = &rkisp1->isp;
+ struct media_pad *pads = isp->pads;
+ struct v4l2_subdev *sd = &isp->sd;
+ int ret;
+
+ v4l2_subdev_init(sd, &rkisp1_isp_ops);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+ sd->entity.ops = &rkisp1_isp_media_ops;
+ sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
+ sd->owner = THIS_MODULE;
+ strscpy(sd->name, RKISP1_ISP_DEV_NAME, sizeof(sd->name));
+
+ pads[RKISP1_ISP_PAD_SINK_VIDEO].flags = MEDIA_PAD_FL_SINK |
+ MEDIA_PAD_FL_MUST_CONNECT;
+ pads[RKISP1_ISP_PAD_SINK_PARAMS].flags = MEDIA_PAD_FL_SINK;
+ pads[RKISP1_ISP_PAD_SOURCE_VIDEO].flags = MEDIA_PAD_FL_SOURCE;
+ pads[RKISP1_ISP_PAD_SOURCE_STATS].flags = MEDIA_PAD_FL_SOURCE;
+
+ isp->sink_fmt = rkisp1_isp_mbus_info_get(RKISP1_DEF_SINK_PAD_FMT);
+ isp->src_fmt = rkisp1_isp_mbus_info_get(RKISP1_DEF_SRC_PAD_FMT);
+
+ ret = media_entity_pads_init(&sd->entity, RKISP1_ISP_PAD_MAX, pads);
+ if (ret)
+ return ret;
+
+ ret = v4l2_device_register_subdev(v4l2_dev, sd);
+ if (ret) {
+ dev_err(sd->dev, "Failed to register isp subdev\n");
+ goto err_cleanup_media_entity;
+ }
+
+ rkisp1_isp_init_config(sd, rkisp1->isp.pad_cfg);
+ return 0;
+
+err_cleanup_media_entity:
+ media_entity_cleanup(&sd->entity);
+
+ return ret;
+}
+
+void rkisp1_isp_unregister(struct rkisp1_device *rkisp1)
+{
+ struct v4l2_subdev *sd = &rkisp1->isp.sd;
+
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+}
+
+/* ----------------------------------------------------------------------------
+ * Interrupt handlers
+ */
+
+void rkisp1_mipi_isr(struct rkisp1_device *rkisp1)
+{
+ u32 val, status;
+
+ status = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_MIS);
+ if (!status)
+ return;
+
+ rkisp1_write(rkisp1, status, RKISP1_CIF_MIPI_ICR);
+
+ /*
+ * Disable DPHY errctrl interrupt, because this dphy
+ * erctrl signal is asserted until the next changes
+ * of line state. This time is may be too long and cpu
+ * is hold in this interrupt.
+ */
+ if (status & RKISP1_CIF_MIPI_ERR_CTRL(0x0f)) {
+ val = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_IMSC);
+ rkisp1_write(rkisp1, val & ~RKISP1_CIF_MIPI_ERR_CTRL(0x0f),
+ RKISP1_CIF_MIPI_IMSC);
+ rkisp1->isp.is_dphy_errctrl_disabled = true;
+ }
+
+ /*
+ * Enable DPHY errctrl interrupt again, if mipi have receive
+ * the whole frame without any error.
+ */
+ if (status == RKISP1_CIF_MIPI_FRAME_END) {
+ /*
+ * Enable DPHY errctrl interrupt again, if mipi have receive
+ * the whole frame without any error.
+ */
+ if (rkisp1->isp.is_dphy_errctrl_disabled) {
+ val = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_IMSC);
+ val |= RKISP1_CIF_MIPI_ERR_CTRL(0x0f);
+ rkisp1_write(rkisp1, val, RKISP1_CIF_MIPI_IMSC);
+ rkisp1->isp.is_dphy_errctrl_disabled = false;
+ }
+ } else {
+ rkisp1->debug.mipi_error++;
+ }
+}
+
+static void rkisp1_isp_queue_event_sof(struct rkisp1_isp *isp)
+{
+ struct v4l2_event event = {
+ .type = V4L2_EVENT_FRAME_SYNC,
+ };
+
+ /*
+ * Increment the frame sequence on the vsync signal.
+ * This will allow applications to detect dropped.
+ * Note that there is a debugfs counter for dropped
+ * frames, but using this event is more accurate.
+ */
+ event.u.frame_sync.frame_sequence =
+ atomic_inc_return(&isp->frame_sequence);
+ v4l2_event_queue(isp->sd.devnode, &event);
+}
+
+void rkisp1_isp_isr(struct rkisp1_device *rkisp1)
+{
+ u32 status, isp_err;
+
+ status = rkisp1_read(rkisp1, RKISP1_CIF_ISP_MIS);
+ if (!status)
+ return;
+
+ rkisp1_write(rkisp1, status, RKISP1_CIF_ISP_ICR);
+
+ /* Vertical sync signal, starting generating new frame */
+ if (status & RKISP1_CIF_ISP_V_START)
+ rkisp1_isp_queue_event_sof(&rkisp1->isp);
+
+ if (status & RKISP1_CIF_ISP_PIC_SIZE_ERROR) {
+ /* Clear pic_size_error */
+ isp_err = rkisp1_read(rkisp1, RKISP1_CIF_ISP_ERR);
+ rkisp1_write(rkisp1, isp_err, RKISP1_CIF_ISP_ERR_CLR);
+ rkisp1->debug.pic_size_error++;
+ } else if (status & RKISP1_CIF_ISP_DATA_LOSS) {
+ /* keep track of data_loss in debugfs */
+ rkisp1->debug.data_loss++;
+ }
+
+ if (status & RKISP1_CIF_ISP_FRAME) {
+ u32 isp_ris;
+
+ /* New frame from the sensor received */
+ isp_ris = rkisp1_read(rkisp1, RKISP1_CIF_ISP_RIS);
+ if (isp_ris & (RKISP1_CIF_ISP_AWB_DONE |
+ RKISP1_CIF_ISP_AFM_FIN |
+ RKISP1_CIF_ISP_EXP_END |
+ RKISP1_CIF_ISP_HIST_MEASURE_RDY))
+ rkisp1_stats_isr(&rkisp1->stats, isp_ris);
+ }
+
+ /*
+ * Then update changed configs. Some of them involve
+ * lot of register writes. Do those only one per frame.
+ * Do the updates in the order of the processing flow.
+ */
+ rkisp1_params_isr(rkisp1, status);
+}
diff --git a/drivers/staging/media/rkisp1/rkisp1-params.c b/drivers/staging/media/rkisp1/rkisp1-params.c
new file mode 100644
index 000000000000..781f0ca85af1
--- /dev/null
+++ b/drivers/staging/media/rkisp1/rkisp1-params.c
@@ -0,0 +1,1630 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Rockchip ISP1 Driver - Params subdevice
+ *
+ * Copyright (C) 2017 Rockchip Electronics Co., Ltd.
+ */
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-vmalloc.h> /* for ISP params */
+
+#include "rkisp1-common.h"
+
+#define RKISP1_PARAMS_DEV_NAME RKISP1_DRIVER_NAME "_params"
+
+#define RKISP1_ISP_PARAMS_REQ_BUFS_MIN 2
+#define RKISP1_ISP_PARAMS_REQ_BUFS_MAX 8
+
+#define RKISP1_ISP_DPCC_LINE_THRESH(n) \
+ (RKISP1_CIF_ISP_DPCC_LINE_THRESH_1 + 0x14 * (n))
+#define RKISP1_ISP_DPCC_LINE_MAD_FAC(n) \
+ (RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_1 + 0x14 * (n))
+#define RKISP1_ISP_DPCC_PG_FAC(n) \
+ (RKISP1_CIF_ISP_DPCC_PG_FAC_1 + 0x14 * (n))
+#define RKISP1_ISP_DPCC_RND_THRESH(n) \
+ (RKISP1_CIF_ISP_DPCC_RND_THRESH_1 + 0x14 * (n))
+#define RKISP1_ISP_DPCC_RG_FAC(n) \
+ (RKISP1_CIF_ISP_DPCC_RG_FAC_1 + 0x14 * (n))
+#define RKISP1_ISP_CC_COEFF(n) \
+ (RKISP1_CIF_ISP_CC_COEFF_0 + (n) * 4)
+
+static inline void
+rkisp1_param_set_bits(struct rkisp1_params *params, u32 reg, u32 bit_mask)
+{
+ u32 val;
+
+ val = rkisp1_read(params->rkisp1, reg);
+ rkisp1_write(params->rkisp1, val | bit_mask, reg);
+}
+
+static inline void
+rkisp1_param_clear_bits(struct rkisp1_params *params, u32 reg, u32 bit_mask)
+{
+ u32 val;
+
+ val = rkisp1_read(params->rkisp1, reg);
+ rkisp1_write(params->rkisp1, val & ~bit_mask, reg);
+}
+
+/* ISP BP interface function */
+static void rkisp1_dpcc_config(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_dpcc_config *arg)
+{
+ unsigned int i;
+ u32 mode;
+
+ /* avoid to override the old enable value */
+ mode = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_DPCC_MODE);
+ mode &= RKISP1_CIF_ISP_DPCC_ENA;
+ mode |= arg->mode & ~RKISP1_CIF_ISP_DPCC_ENA;
+ rkisp1_write(params->rkisp1, mode, RKISP1_CIF_ISP_DPCC_MODE);
+ rkisp1_write(params->rkisp1, arg->output_mode,
+ RKISP1_CIF_ISP_DPCC_OUTPUT_MODE);
+ rkisp1_write(params->rkisp1, arg->set_use,
+ RKISP1_CIF_ISP_DPCC_SET_USE);
+
+ rkisp1_write(params->rkisp1, arg->methods[0].method,
+ RKISP1_CIF_ISP_DPCC_METHODS_SET_1);
+ rkisp1_write(params->rkisp1, arg->methods[1].method,
+ RKISP1_CIF_ISP_DPCC_METHODS_SET_2);
+ rkisp1_write(params->rkisp1, arg->methods[2].method,
+ RKISP1_CIF_ISP_DPCC_METHODS_SET_3);
+ for (i = 0; i < RKISP1_CIF_ISP_DPCC_METHODS_MAX; i++) {
+ rkisp1_write(params->rkisp1, arg->methods[i].line_thresh,
+ RKISP1_ISP_DPCC_LINE_THRESH(i));
+ rkisp1_write(params->rkisp1, arg->methods[i].line_mad_fac,
+ RKISP1_ISP_DPCC_LINE_MAD_FAC(i));
+ rkisp1_write(params->rkisp1, arg->methods[i].pg_fac,
+ RKISP1_ISP_DPCC_PG_FAC(i));
+ rkisp1_write(params->rkisp1, arg->methods[i].rnd_thresh,
+ RKISP1_ISP_DPCC_RND_THRESH(i));
+ rkisp1_write(params->rkisp1, arg->methods[i].rg_fac,
+ RKISP1_ISP_DPCC_RG_FAC(i));
+ }
+
+ rkisp1_write(params->rkisp1, arg->rnd_offs,
+ RKISP1_CIF_ISP_DPCC_RND_OFFS);
+ rkisp1_write(params->rkisp1, arg->ro_limits,
+ RKISP1_CIF_ISP_DPCC_RO_LIMITS);
+}
+
+/* ISP black level subtraction interface function */
+static void rkisp1_bls_config(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_bls_config *arg)
+{
+ /* avoid to override the old enable value */
+ u32 new_control;
+
+ new_control = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_BLS_CTRL);
+ new_control &= RKISP1_CIF_ISP_BLS_ENA;
+ /* fixed subtraction values */
+ if (!arg->enable_auto) {
+ const struct rkisp1_cif_isp_bls_fixed_val *pval =
+ &arg->fixed_val;
+
+ switch (params->raw_type) {
+ case RKISP1_RAW_BGGR:
+ rkisp1_write(params->rkisp1,
+ pval->r, RKISP1_CIF_ISP_BLS_D_FIXED);
+ rkisp1_write(params->rkisp1,
+ pval->gr, RKISP1_CIF_ISP_BLS_C_FIXED);
+ rkisp1_write(params->rkisp1,
+ pval->gb, RKISP1_CIF_ISP_BLS_B_FIXED);
+ rkisp1_write(params->rkisp1,
+ pval->b, RKISP1_CIF_ISP_BLS_A_FIXED);
+ break;
+ case RKISP1_RAW_GBRG:
+ rkisp1_write(params->rkisp1,
+ pval->r, RKISP1_CIF_ISP_BLS_C_FIXED);
+ rkisp1_write(params->rkisp1,
+ pval->gr, RKISP1_CIF_ISP_BLS_D_FIXED);
+ rkisp1_write(params->rkisp1,
+ pval->gb, RKISP1_CIF_ISP_BLS_A_FIXED);
+ rkisp1_write(params->rkisp1,
+ pval->b, RKISP1_CIF_ISP_BLS_B_FIXED);
+ break;
+ case RKISP1_RAW_GRBG:
+ rkisp1_write(params->rkisp1,
+ pval->r, RKISP1_CIF_ISP_BLS_B_FIXED);
+ rkisp1_write(params->rkisp1,
+ pval->gr, RKISP1_CIF_ISP_BLS_A_FIXED);
+ rkisp1_write(params->rkisp1,
+ pval->gb, RKISP1_CIF_ISP_BLS_D_FIXED);
+ rkisp1_write(params->rkisp1,
+ pval->b, RKISP1_CIF_ISP_BLS_C_FIXED);
+ break;
+ case RKISP1_RAW_RGGB:
+ rkisp1_write(params->rkisp1,
+ pval->r, RKISP1_CIF_ISP_BLS_A_FIXED);
+ rkisp1_write(params->rkisp1,
+ pval->gr, RKISP1_CIF_ISP_BLS_B_FIXED);
+ rkisp1_write(params->rkisp1,
+ pval->gb, RKISP1_CIF_ISP_BLS_C_FIXED);
+ rkisp1_write(params->rkisp1,
+ pval->b, RKISP1_CIF_ISP_BLS_D_FIXED);
+ break;
+ default:
+ break;
+ }
+
+ } else {
+ if (arg->en_windows & BIT(1)) {
+ rkisp1_write(params->rkisp1, arg->bls_window2.h_offs,
+ RKISP1_CIF_ISP_BLS_H2_START);
+ rkisp1_write(params->rkisp1, arg->bls_window2.h_size,
+ RKISP1_CIF_ISP_BLS_H2_STOP);
+ rkisp1_write(params->rkisp1, arg->bls_window2.v_offs,
+ RKISP1_CIF_ISP_BLS_V2_START);
+ rkisp1_write(params->rkisp1, arg->bls_window2.v_size,
+ RKISP1_CIF_ISP_BLS_V2_STOP);
+ new_control |= RKISP1_CIF_ISP_BLS_WINDOW_2;
+ }
+
+ if (arg->en_windows & BIT(0)) {
+ rkisp1_write(params->rkisp1, arg->bls_window1.h_offs,
+ RKISP1_CIF_ISP_BLS_H1_START);
+ rkisp1_write(params->rkisp1, arg->bls_window1.h_size,
+ RKISP1_CIF_ISP_BLS_H1_STOP);
+ rkisp1_write(params->rkisp1, arg->bls_window1.v_offs,
+ RKISP1_CIF_ISP_BLS_V1_START);
+ rkisp1_write(params->rkisp1, arg->bls_window1.v_size,
+ RKISP1_CIF_ISP_BLS_V1_STOP);
+ new_control |= RKISP1_CIF_ISP_BLS_WINDOW_1;
+ }
+
+ rkisp1_write(params->rkisp1, arg->bls_samples,
+ RKISP1_CIF_ISP_BLS_SAMPLES);
+
+ new_control |= RKISP1_CIF_ISP_BLS_MODE_MEASURED;
+ }
+ rkisp1_write(params->rkisp1, new_control, RKISP1_CIF_ISP_BLS_CTRL);
+}
+
+/* ISP LS correction interface function */
+static void
+rkisp1_lsc_correct_matrix_config(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_lsc_config *pconfig)
+{
+ unsigned int isp_lsc_status, sram_addr, isp_lsc_table_sel, i, j, data;
+
+ isp_lsc_status = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_LSC_STATUS);
+
+ /* RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_153 = ( 17 * 18 ) >> 1 */
+ sram_addr = (isp_lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE) ?
+ RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_0 :
+ RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_153;
+ rkisp1_write(params->rkisp1, sram_addr,
+ RKISP1_CIF_ISP_LSC_R_TABLE_ADDR);
+ rkisp1_write(params->rkisp1, sram_addr,
+ RKISP1_CIF_ISP_LSC_GR_TABLE_ADDR);
+ rkisp1_write(params->rkisp1, sram_addr,
+ RKISP1_CIF_ISP_LSC_GB_TABLE_ADDR);
+ rkisp1_write(params->rkisp1, sram_addr,
+ RKISP1_CIF_ISP_LSC_B_TABLE_ADDR);
+
+ /* program data tables (table size is 9 * 17 = 153) */
+ for (i = 0;
+ i < RKISP1_CIF_ISP_LSC_SECTORS_MAX * RKISP1_CIF_ISP_LSC_SECTORS_MAX;
+ i += RKISP1_CIF_ISP_LSC_SECTORS_MAX) {
+ /*
+ * 17 sectors with 2 values in one DWORD = 9
+ * DWORDs (2nd value of last DWORD unused)
+ */
+ for (j = 0; j < RKISP1_CIF_ISP_LSC_SECTORS_MAX - 1; j += 2) {
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->r_data_tbl[i + j],
+ pconfig->r_data_tbl[i + j + 1]);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_R_TABLE_DATA);
+
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->gr_data_tbl[i + j],
+ pconfig->gr_data_tbl[i + j + 1]);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_GR_TABLE_DATA);
+
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->gb_data_tbl[i + j],
+ pconfig->gb_data_tbl[i + j + 1]);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_GB_TABLE_DATA);
+
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->b_data_tbl[i + j],
+ pconfig->b_data_tbl[i + j + 1]);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_B_TABLE_DATA);
+ }
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->r_data_tbl[i + j], 0);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_R_TABLE_DATA);
+
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->gr_data_tbl[i + j], 0);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_GR_TABLE_DATA);
+
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->gb_data_tbl[i + j], 0);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_GB_TABLE_DATA);
+
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->b_data_tbl[i + j], 0);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_B_TABLE_DATA);
+ }
+ isp_lsc_table_sel = (isp_lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE) ?
+ RKISP1_CIF_ISP_LSC_TABLE_0 :
+ RKISP1_CIF_ISP_LSC_TABLE_1;
+ rkisp1_write(params->rkisp1, isp_lsc_table_sel,
+ RKISP1_CIF_ISP_LSC_TABLE_SEL);
+}
+
+static void rkisp1_lsc_config(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_lsc_config *arg)
+{
+ unsigned int i, data;
+ u32 lsc_ctrl;
+
+ /* To config must be off , store the current status firstly */
+ lsc_ctrl = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_LSC_CTRL);
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_LSC_CTRL,
+ RKISP1_CIF_ISP_LSC_CTRL_ENA);
+ rkisp1_lsc_correct_matrix_config(params, arg);
+
+ for (i = 0; i < 4; i++) {
+ /* program x size tables */
+ data = RKISP1_CIF_ISP_LSC_SECT_SIZE(arg->x_size_tbl[i * 2],
+ arg->x_size_tbl[i * 2 + 1]);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_XSIZE_01 + i * 4);
+
+ /* program x grad tables */
+ data = RKISP1_CIF_ISP_LSC_SECT_SIZE(arg->x_grad_tbl[i * 2],
+ arg->x_grad_tbl[i * 2 + 1]);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_XGRAD_01 + i * 4);
+
+ /* program y size tables */
+ data = RKISP1_CIF_ISP_LSC_SECT_SIZE(arg->y_size_tbl[i * 2],
+ arg->y_size_tbl[i * 2 + 1]);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_YSIZE_01 + i * 4);
+
+ /* program y grad tables */
+ data = RKISP1_CIF_ISP_LSC_SECT_SIZE(arg->y_grad_tbl[i * 2],
+ arg->y_grad_tbl[i * 2 + 1]);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_YGRAD_01 + i * 4);
+ }
+
+ /* restore the lsc ctrl status */
+ if (lsc_ctrl & RKISP1_CIF_ISP_LSC_CTRL_ENA) {
+ rkisp1_param_set_bits(params,
+ RKISP1_CIF_ISP_LSC_CTRL,
+ RKISP1_CIF_ISP_LSC_CTRL_ENA);
+ } else {
+ rkisp1_param_clear_bits(params,
+ RKISP1_CIF_ISP_LSC_CTRL,
+ RKISP1_CIF_ISP_LSC_CTRL_ENA);
+ }
+}
+
+/* ISP Filtering function */
+static void rkisp1_flt_config(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_flt_config *arg)
+{
+ u32 filt_mode;
+
+ rkisp1_write(params->rkisp1,
+ arg->thresh_bl0, RKISP1_CIF_ISP_FILT_THRESH_BL0);
+ rkisp1_write(params->rkisp1,
+ arg->thresh_bl1, RKISP1_CIF_ISP_FILT_THRESH_BL1);
+ rkisp1_write(params->rkisp1,
+ arg->thresh_sh0, RKISP1_CIF_ISP_FILT_THRESH_SH0);
+ rkisp1_write(params->rkisp1,
+ arg->thresh_sh1, RKISP1_CIF_ISP_FILT_THRESH_SH1);
+ rkisp1_write(params->rkisp1, arg->fac_bl0, RKISP1_CIF_ISP_FILT_FAC_BL0);
+ rkisp1_write(params->rkisp1, arg->fac_bl1, RKISP1_CIF_ISP_FILT_FAC_BL1);
+ rkisp1_write(params->rkisp1, arg->fac_mid, RKISP1_CIF_ISP_FILT_FAC_MID);
+ rkisp1_write(params->rkisp1, arg->fac_sh0, RKISP1_CIF_ISP_FILT_FAC_SH0);
+ rkisp1_write(params->rkisp1, arg->fac_sh1, RKISP1_CIF_ISP_FILT_FAC_SH1);
+ rkisp1_write(params->rkisp1,
+ arg->lum_weight, RKISP1_CIF_ISP_FILT_LUM_WEIGHT);
+
+ rkisp1_write(params->rkisp1,
+ (arg->mode ? RKISP1_CIF_ISP_FLT_MODE_DNR : 0) |
+ RKISP1_CIF_ISP_FLT_CHROMA_V_MODE(arg->chr_v_mode) |
+ RKISP1_CIF_ISP_FLT_CHROMA_H_MODE(arg->chr_h_mode) |
+ RKISP1_CIF_ISP_FLT_GREEN_STAGE1(arg->grn_stage1),
+ RKISP1_CIF_ISP_FILT_MODE);
+
+ /* avoid to override the old enable value */
+ filt_mode = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_FILT_MODE);
+ filt_mode &= RKISP1_CIF_ISP_FLT_ENA;
+ if (arg->mode)
+ filt_mode |= RKISP1_CIF_ISP_FLT_MODE_DNR;
+ filt_mode |= RKISP1_CIF_ISP_FLT_CHROMA_V_MODE(arg->chr_v_mode) |
+ RKISP1_CIF_ISP_FLT_CHROMA_H_MODE(arg->chr_h_mode) |
+ RKISP1_CIF_ISP_FLT_GREEN_STAGE1(arg->grn_stage1);
+ rkisp1_write(params->rkisp1, filt_mode, RKISP1_CIF_ISP_FILT_MODE);
+}
+
+/* ISP demosaic interface function */
+static int rkisp1_bdm_config(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_bdm_config *arg)
+{
+ u32 bdm_th;
+
+ /* avoid to override the old enable value */
+ bdm_th = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_DEMOSAIC);
+ bdm_th &= RKISP1_CIF_ISP_DEMOSAIC_BYPASS;
+ bdm_th |= arg->demosaic_th & ~RKISP1_CIF_ISP_DEMOSAIC_BYPASS;
+ /* set demosaic threshold */
+ rkisp1_write(params->rkisp1, bdm_th, RKISP1_CIF_ISP_DEMOSAIC);
+ return 0;
+}
+
+/* ISP GAMMA correction interface function */
+static void rkisp1_sdg_config(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_sdg_config *arg)
+{
+ unsigned int i;
+
+ rkisp1_write(params->rkisp1,
+ arg->xa_pnts.gamma_dx0, RKISP1_CIF_ISP_GAMMA_DX_LO);
+ rkisp1_write(params->rkisp1,
+ arg->xa_pnts.gamma_dx1, RKISP1_CIF_ISP_GAMMA_DX_HI);
+
+ for (i = 0; i < RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE; i++) {
+ rkisp1_write(params->rkisp1, arg->curve_r.gamma_y[i],
+ RKISP1_CIF_ISP_GAMMA_R_Y0 + i * 4);
+ rkisp1_write(params->rkisp1, arg->curve_g.gamma_y[i],
+ RKISP1_CIF_ISP_GAMMA_G_Y0 + i * 4);
+ rkisp1_write(params->rkisp1, arg->curve_b.gamma_y[i],
+ RKISP1_CIF_ISP_GAMMA_B_Y0 + i * 4);
+ }
+}
+
+/* ISP GAMMA correction interface function */
+static void rkisp1_goc_config(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_goc_config *arg)
+{
+ unsigned int i;
+
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
+ RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA);
+ rkisp1_write(params->rkisp1, arg->mode, RKISP1_CIF_ISP_GAMMA_OUT_MODE);
+
+ for (i = 0; i < RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES; i++)
+ rkisp1_write(params->rkisp1, arg->gamma_y[i],
+ RKISP1_CIF_ISP_GAMMA_OUT_Y_0 + i * 4);
+}
+
+/* ISP Cross Talk */
+static void rkisp1_ctk_config(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_ctk_config *arg)
+{
+ rkisp1_write(params->rkisp1, arg->coeff0, RKISP1_CIF_ISP_CT_COEFF_0);
+ rkisp1_write(params->rkisp1, arg->coeff1, RKISP1_CIF_ISP_CT_COEFF_1);
+ rkisp1_write(params->rkisp1, arg->coeff2, RKISP1_CIF_ISP_CT_COEFF_2);
+ rkisp1_write(params->rkisp1, arg->coeff3, RKISP1_CIF_ISP_CT_COEFF_3);
+ rkisp1_write(params->rkisp1, arg->coeff4, RKISP1_CIF_ISP_CT_COEFF_4);
+ rkisp1_write(params->rkisp1, arg->coeff5, RKISP1_CIF_ISP_CT_COEFF_5);
+ rkisp1_write(params->rkisp1, arg->coeff6, RKISP1_CIF_ISP_CT_COEFF_6);
+ rkisp1_write(params->rkisp1, arg->coeff7, RKISP1_CIF_ISP_CT_COEFF_7);
+ rkisp1_write(params->rkisp1, arg->coeff8, RKISP1_CIF_ISP_CT_COEFF_8);
+ rkisp1_write(params->rkisp1, arg->ct_offset_r,
+ RKISP1_CIF_ISP_CT_OFFSET_R);
+ rkisp1_write(params->rkisp1, arg->ct_offset_g,
+ RKISP1_CIF_ISP_CT_OFFSET_G);
+ rkisp1_write(params->rkisp1, arg->ct_offset_b,
+ RKISP1_CIF_ISP_CT_OFFSET_B);
+}
+
+static void rkisp1_ctk_enable(struct rkisp1_params *params, bool en)
+{
+ if (en)
+ return;
+
+ /* Write back the default values. */
+ rkisp1_write(params->rkisp1, 0x80, RKISP1_CIF_ISP_CT_COEFF_0);
+ rkisp1_write(params->rkisp1, 0, RKISP1_CIF_ISP_CT_COEFF_1);
+ rkisp1_write(params->rkisp1, 0, RKISP1_CIF_ISP_CT_COEFF_2);
+ rkisp1_write(params->rkisp1, 0, RKISP1_CIF_ISP_CT_COEFF_3);
+ rkisp1_write(params->rkisp1, 0x80, RKISP1_CIF_ISP_CT_COEFF_4);
+ rkisp1_write(params->rkisp1, 0, RKISP1_CIF_ISP_CT_COEFF_5);
+ rkisp1_write(params->rkisp1, 0, RKISP1_CIF_ISP_CT_COEFF_6);
+ rkisp1_write(params->rkisp1, 0, RKISP1_CIF_ISP_CT_COEFF_7);
+ rkisp1_write(params->rkisp1, 0x80, RKISP1_CIF_ISP_CT_COEFF_8);
+
+ rkisp1_write(params->rkisp1, 0, RKISP1_CIF_ISP_CT_OFFSET_R);
+ rkisp1_write(params->rkisp1, 0, RKISP1_CIF_ISP_CT_OFFSET_G);
+ rkisp1_write(params->rkisp1, 0, RKISP1_CIF_ISP_CT_OFFSET_B);
+}
+
+/* ISP White Balance Mode */
+static void rkisp1_awb_meas_config(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_awb_meas_config *arg)
+{
+ u32 reg_val = 0;
+ /* based on the mode,configure the awb module */
+ if (arg->awb_mode == RKISP1_CIF_ISP_AWB_MODE_YCBCR) {
+ /* Reference Cb and Cr */
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_AWB_REF_CR_SET(arg->awb_ref_cr) |
+ arg->awb_ref_cb, RKISP1_CIF_ISP_AWB_REF);
+ /* Yc Threshold */
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_AWB_MAX_Y_SET(arg->max_y) |
+ RKISP1_CIF_ISP_AWB_MIN_Y_SET(arg->min_y) |
+ RKISP1_CIF_ISP_AWB_MAX_CS_SET(arg->max_csum) |
+ arg->min_c, RKISP1_CIF_ISP_AWB_THRESH);
+ }
+
+ reg_val = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP);
+ if (arg->enable_ymax_cmp)
+ reg_val |= RKISP1_CIF_ISP_AWB_YMAX_CMP_EN;
+ else
+ reg_val &= ~RKISP1_CIF_ISP_AWB_YMAX_CMP_EN;
+ rkisp1_write(params->rkisp1, reg_val, RKISP1_CIF_ISP_AWB_PROP);
+
+ /* window offset */
+ rkisp1_write(params->rkisp1,
+ arg->awb_wnd.v_offs, RKISP1_CIF_ISP_AWB_WND_V_OFFS);
+ rkisp1_write(params->rkisp1,
+ arg->awb_wnd.h_offs, RKISP1_CIF_ISP_AWB_WND_H_OFFS);
+ /* AWB window size */
+ rkisp1_write(params->rkisp1,
+ arg->awb_wnd.v_size, RKISP1_CIF_ISP_AWB_WND_V_SIZE);
+ rkisp1_write(params->rkisp1,
+ arg->awb_wnd.h_size, RKISP1_CIF_ISP_AWB_WND_H_SIZE);
+ /* Number of frames */
+ rkisp1_write(params->rkisp1,
+ arg->frames, RKISP1_CIF_ISP_AWB_FRAMES);
+}
+
+static void
+rkisp1_awb_meas_enable(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_awb_meas_config *arg,
+ bool en)
+{
+ u32 reg_val = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP);
+
+ /* switch off */
+ reg_val &= RKISP1_CIF_ISP_AWB_MODE_MASK_NONE;
+
+ if (en) {
+ if (arg->awb_mode == RKISP1_CIF_ISP_AWB_MODE_RGB)
+ reg_val |= RKISP1_CIF_ISP_AWB_MODE_RGB_EN;
+ else
+ reg_val |= RKISP1_CIF_ISP_AWB_MODE_YCBCR_EN;
+
+ rkisp1_write(params->rkisp1, reg_val, RKISP1_CIF_ISP_AWB_PROP);
+
+ /* Measurements require AWB block be active. */
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL,
+ RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
+ } else {
+ rkisp1_write(params->rkisp1,
+ reg_val, RKISP1_CIF_ISP_AWB_PROP);
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
+ RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
+ }
+}
+
+static void
+rkisp1_awb_gain_config(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_awb_gain_config *arg)
+{
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_AWB_GAIN_R_SET(arg->gain_green_r) |
+ arg->gain_green_b, RKISP1_CIF_ISP_AWB_GAIN_G);
+
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_AWB_GAIN_R_SET(arg->gain_red) |
+ arg->gain_blue, RKISP1_CIF_ISP_AWB_GAIN_RB);
+}
+
+static void rkisp1_aec_config(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_aec_config *arg)
+{
+ unsigned int block_hsize, block_vsize;
+ u32 exp_ctrl;
+
+ /* avoid to override the old enable value */
+ exp_ctrl = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_EXP_CTRL);
+ exp_ctrl &= RKISP1_CIF_ISP_EXP_ENA;
+ if (arg->autostop)
+ exp_ctrl |= RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP;
+ if (arg->mode == RKISP1_CIF_ISP_EXP_MEASURING_MODE_1)
+ exp_ctrl |= RKISP1_CIF_ISP_EXP_CTRL_MEASMODE_1;
+ rkisp1_write(params->rkisp1, exp_ctrl, RKISP1_CIF_ISP_EXP_CTRL);
+
+ rkisp1_write(params->rkisp1,
+ arg->meas_window.h_offs, RKISP1_CIF_ISP_EXP_H_OFFSET);
+ rkisp1_write(params->rkisp1,
+ arg->meas_window.v_offs, RKISP1_CIF_ISP_EXP_V_OFFSET);
+
+ block_hsize = arg->meas_window.h_size /
+ RKISP1_CIF_ISP_EXP_COLUMN_NUM - 1;
+ block_vsize = arg->meas_window.v_size /
+ RKISP1_CIF_ISP_EXP_ROW_NUM - 1;
+
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_EXP_H_SIZE_SET(block_hsize),
+ RKISP1_CIF_ISP_EXP_H_SIZE);
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_EXP_V_SIZE_SET(block_vsize),
+ RKISP1_CIF_ISP_EXP_V_SIZE);
+}
+
+static void rkisp1_cproc_config(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_cproc_config *arg)
+{
+ struct rkisp1_cif_isp_isp_other_cfg *cur_other_cfg =
+ &params->cur_params.others;
+ struct rkisp1_cif_isp_ie_config *cur_ie_config =
+ &cur_other_cfg->ie_config;
+ u32 effect = cur_ie_config->effect;
+ u32 quantization = params->quantization;
+
+ rkisp1_write(params->rkisp1, arg->contrast, RKISP1_CIF_C_PROC_CONTRAST);
+ rkisp1_write(params->rkisp1, arg->hue, RKISP1_CIF_C_PROC_HUE);
+ rkisp1_write(params->rkisp1, arg->sat, RKISP1_CIF_C_PROC_SATURATION);
+ rkisp1_write(params->rkisp1, arg->brightness,
+ RKISP1_CIF_C_PROC_BRIGHTNESS);
+
+ if (quantization != V4L2_QUANTIZATION_FULL_RANGE ||
+ effect != V4L2_COLORFX_NONE) {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_C_PROC_CTRL,
+ RKISP1_CIF_C_PROC_YOUT_FULL |
+ RKISP1_CIF_C_PROC_YIN_FULL |
+ RKISP1_CIF_C_PROC_COUT_FULL);
+ } else {
+ rkisp1_param_set_bits(params, RKISP1_CIF_C_PROC_CTRL,
+ RKISP1_CIF_C_PROC_YOUT_FULL |
+ RKISP1_CIF_C_PROC_YIN_FULL |
+ RKISP1_CIF_C_PROC_COUT_FULL);
+ }
+}
+
+static void rkisp1_hst_config(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_hst_config *arg)
+{
+ unsigned int block_hsize, block_vsize;
+ static const u32 hist_weight_regs[] = {
+ RKISP1_CIF_ISP_HIST_WEIGHT_00TO30,
+ RKISP1_CIF_ISP_HIST_WEIGHT_40TO21,
+ RKISP1_CIF_ISP_HIST_WEIGHT_31TO12,
+ RKISP1_CIF_ISP_HIST_WEIGHT_22TO03,
+ RKISP1_CIF_ISP_HIST_WEIGHT_13TO43,
+ RKISP1_CIF_ISP_HIST_WEIGHT_04TO34,
+ RKISP1_CIF_ISP_HIST_WEIGHT_44,
+ };
+ const u8 *weight;
+ unsigned int i;
+ u32 hist_prop;
+
+ /* avoid to override the old enable value */
+ hist_prop = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_HIST_PROP);
+ hist_prop &= RKISP1_CIF_ISP_HIST_PROP_MODE_MASK;
+ hist_prop |= RKISP1_CIF_ISP_HIST_PREDIV_SET(arg->histogram_predivider);
+ rkisp1_write(params->rkisp1, hist_prop, RKISP1_CIF_ISP_HIST_PROP);
+ rkisp1_write(params->rkisp1,
+ arg->meas_window.h_offs,
+ RKISP1_CIF_ISP_HIST_H_OFFS);
+ rkisp1_write(params->rkisp1,
+ arg->meas_window.v_offs,
+ RKISP1_CIF_ISP_HIST_V_OFFS);
+
+ block_hsize = arg->meas_window.h_size /
+ RKISP1_CIF_ISP_HIST_COLUMN_NUM - 1;
+ block_vsize = arg->meas_window.v_size / RKISP1_CIF_ISP_HIST_ROW_NUM - 1;
+
+ rkisp1_write(params->rkisp1, block_hsize, RKISP1_CIF_ISP_HIST_H_SIZE);
+ rkisp1_write(params->rkisp1, block_vsize, RKISP1_CIF_ISP_HIST_V_SIZE);
+
+ weight = arg->hist_weight;
+ for (i = 0; i < ARRAY_SIZE(hist_weight_regs); ++i, weight += 4)
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_HIST_WEIGHT_SET(weight[0],
+ weight[1],
+ weight[2],
+ weight[3]),
+ hist_weight_regs[i]);
+}
+
+static void
+rkisp1_hst_enable(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_hst_config *arg, bool en)
+{
+ if (en) {
+ u32 hist_prop = rkisp1_read(params->rkisp1,
+ RKISP1_CIF_ISP_HIST_PROP);
+
+ hist_prop &= ~RKISP1_CIF_ISP_HIST_PROP_MODE_MASK;
+ hist_prop |= arg->mode;
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_HIST_PROP,
+ hist_prop);
+ } else {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_HIST_PROP,
+ RKISP1_CIF_ISP_HIST_PROP_MODE_MASK);
+ }
+}
+
+static void rkisp1_afm_config(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_afc_config *arg)
+{
+ size_t num_of_win = min_t(size_t, ARRAY_SIZE(arg->afm_win),
+ arg->num_afm_win);
+ u32 afm_ctrl = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_AFM_CTRL);
+ unsigned int i;
+
+ /* Switch off to configure. */
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_AFM_CTRL,
+ RKISP1_CIF_ISP_AFM_ENA);
+
+ for (i = 0; i < num_of_win; i++) {
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_AFM_WINDOW_X(arg->afm_win[i].h_offs) |
+ RKISP1_CIF_ISP_AFM_WINDOW_Y(arg->afm_win[i].v_offs),
+ RKISP1_CIF_ISP_AFM_LT_A + i * 8);
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_AFM_WINDOW_X(arg->afm_win[i].h_size +
+ arg->afm_win[i].h_offs) |
+ RKISP1_CIF_ISP_AFM_WINDOW_Y(arg->afm_win[i].v_size +
+ arg->afm_win[i].v_offs),
+ RKISP1_CIF_ISP_AFM_RB_A + i * 8);
+ }
+ rkisp1_write(params->rkisp1, arg->thres, RKISP1_CIF_ISP_AFM_THRES);
+ rkisp1_write(params->rkisp1, arg->var_shift,
+ RKISP1_CIF_ISP_AFM_VAR_SHIFT);
+ /* restore afm status */
+ rkisp1_write(params->rkisp1, afm_ctrl, RKISP1_CIF_ISP_AFM_CTRL);
+}
+
+static void rkisp1_ie_config(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_ie_config *arg)
+{
+ u32 eff_ctrl;
+
+ eff_ctrl = rkisp1_read(params->rkisp1, RKISP1_CIF_IMG_EFF_CTRL);
+ eff_ctrl &= ~RKISP1_CIF_IMG_EFF_CTRL_MODE_MASK;
+
+ if (params->quantization == V4L2_QUANTIZATION_FULL_RANGE)
+ eff_ctrl |= RKISP1_CIF_IMG_EFF_CTRL_YCBCR_FULL;
+
+ switch (arg->effect) {
+ case V4L2_COLORFX_SEPIA:
+ eff_ctrl |= RKISP1_CIF_IMG_EFF_CTRL_MODE_SEPIA;
+ break;
+ case V4L2_COLORFX_SET_CBCR:
+ rkisp1_write(params->rkisp1, arg->eff_tint,
+ RKISP1_CIF_IMG_EFF_TINT);
+ eff_ctrl |= RKISP1_CIF_IMG_EFF_CTRL_MODE_SEPIA;
+ break;
+ /*
+ * Color selection is similar to water color(AQUA):
+ * grayscale + selected color w threshold
+ */
+ case V4L2_COLORFX_AQUA:
+ eff_ctrl |= RKISP1_CIF_IMG_EFF_CTRL_MODE_COLOR_SEL;
+ rkisp1_write(params->rkisp1, arg->color_sel,
+ RKISP1_CIF_IMG_EFF_COLOR_SEL);
+ break;
+ case V4L2_COLORFX_EMBOSS:
+ eff_ctrl |= RKISP1_CIF_IMG_EFF_CTRL_MODE_EMBOSS;
+ rkisp1_write(params->rkisp1, arg->eff_mat_1,
+ RKISP1_CIF_IMG_EFF_MAT_1);
+ rkisp1_write(params->rkisp1, arg->eff_mat_2,
+ RKISP1_CIF_IMG_EFF_MAT_2);
+ rkisp1_write(params->rkisp1, arg->eff_mat_3,
+ RKISP1_CIF_IMG_EFF_MAT_3);
+ break;
+ case V4L2_COLORFX_SKETCH:
+ eff_ctrl |= RKISP1_CIF_IMG_EFF_CTRL_MODE_SKETCH;
+ rkisp1_write(params->rkisp1, arg->eff_mat_3,
+ RKISP1_CIF_IMG_EFF_MAT_3);
+ rkisp1_write(params->rkisp1, arg->eff_mat_4,
+ RKISP1_CIF_IMG_EFF_MAT_4);
+ rkisp1_write(params->rkisp1, arg->eff_mat_5,
+ RKISP1_CIF_IMG_EFF_MAT_5);
+ break;
+ case V4L2_COLORFX_BW:
+ eff_ctrl |= RKISP1_CIF_IMG_EFF_CTRL_MODE_BLACKWHITE;
+ break;
+ case V4L2_COLORFX_NEGATIVE:
+ eff_ctrl |= RKISP1_CIF_IMG_EFF_CTRL_MODE_NEGATIVE;
+ break;
+ default:
+ break;
+ }
+
+ rkisp1_write(params->rkisp1, eff_ctrl, RKISP1_CIF_IMG_EFF_CTRL);
+}
+
+static void rkisp1_ie_enable(struct rkisp1_params *params, bool en)
+{
+ if (en) {
+ rkisp1_param_set_bits(params, RKISP1_CIF_ICCL,
+ RKISP1_CIF_ICCL_IE_CLK);
+ rkisp1_write(params->rkisp1, RKISP1_CIF_IMG_EFF_CTRL_ENABLE,
+ RKISP1_CIF_IMG_EFF_CTRL);
+ rkisp1_param_set_bits(params, RKISP1_CIF_IMG_EFF_CTRL,
+ RKISP1_CIF_IMG_EFF_CTRL_CFG_UPD);
+ } else {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_IMG_EFF_CTRL,
+ RKISP1_CIF_IMG_EFF_CTRL_ENABLE);
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ICCL,
+ RKISP1_CIF_ICCL_IE_CLK);
+ }
+}
+
+static void rkisp1_csm_config(struct rkisp1_params *params, bool full_range)
+{
+ static const u16 full_range_coeff[] = {
+ 0x0026, 0x004b, 0x000f,
+ 0x01ea, 0x01d6, 0x0040,
+ 0x0040, 0x01ca, 0x01f6
+ };
+ static const u16 limited_range_coeff[] = {
+ 0x0021, 0x0040, 0x000d,
+ 0x01ed, 0x01db, 0x0038,
+ 0x0038, 0x01d1, 0x01f7,
+ };
+ unsigned int i;
+
+ if (full_range) {
+ for (i = 0; i < ARRAY_SIZE(full_range_coeff); i++)
+ rkisp1_write(params->rkisp1, full_range_coeff[i],
+ RKISP1_CIF_ISP_CC_COEFF_0 + i * 4);
+
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL,
+ RKISP1_CIF_ISP_CTRL_ISP_CSM_Y_FULL_ENA |
+ RKISP1_CIF_ISP_CTRL_ISP_CSM_C_FULL_ENA);
+ } else {
+ for (i = 0; i < ARRAY_SIZE(limited_range_coeff); i++)
+ rkisp1_write(params->rkisp1, limited_range_coeff[i],
+ RKISP1_CIF_ISP_CC_COEFF_0 + i * 4);
+
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
+ RKISP1_CIF_ISP_CTRL_ISP_CSM_Y_FULL_ENA |
+ RKISP1_CIF_ISP_CTRL_ISP_CSM_C_FULL_ENA);
+ }
+}
+
+/* ISP De-noise Pre-Filter(DPF) function */
+static void rkisp1_dpf_config(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_dpf_config *arg)
+{
+ unsigned int isp_dpf_mode, spatial_coeff, i;
+
+ switch (arg->gain.mode) {
+ case RKISP1_CIF_ISP_DPF_GAIN_USAGE_NF_GAINS:
+ isp_dpf_mode = RKISP1_CIF_ISP_DPF_MODE_USE_NF_GAIN |
+ RKISP1_CIF_ISP_DPF_MODE_AWB_GAIN_COMP;
+ break;
+ case RKISP1_CIF_ISP_DPF_GAIN_USAGE_LSC_GAINS:
+ isp_dpf_mode = RKISP1_CIF_ISP_DPF_MODE_LSC_GAIN_COMP;
+ break;
+ case RKISP1_CIF_ISP_DPF_GAIN_USAGE_NF_LSC_GAINS:
+ isp_dpf_mode = RKISP1_CIF_ISP_DPF_MODE_USE_NF_GAIN |
+ RKISP1_CIF_ISP_DPF_MODE_AWB_GAIN_COMP |
+ RKISP1_CIF_ISP_DPF_MODE_LSC_GAIN_COMP;
+ break;
+ case RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_GAINS:
+ isp_dpf_mode = RKISP1_CIF_ISP_DPF_MODE_AWB_GAIN_COMP;
+ break;
+ case RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_LSC_GAINS:
+ isp_dpf_mode = RKISP1_CIF_ISP_DPF_MODE_LSC_GAIN_COMP |
+ RKISP1_CIF_ISP_DPF_MODE_AWB_GAIN_COMP;
+ break;
+ case RKISP1_CIF_ISP_DPF_GAIN_USAGE_DISABLED:
+ default:
+ isp_dpf_mode = 0;
+ break;
+ }
+
+ if (arg->nll.scale_mode == RKISP1_CIF_ISP_NLL_SCALE_LOGARITHMIC)
+ isp_dpf_mode |= RKISP1_CIF_ISP_DPF_MODE_NLL_SEGMENTATION;
+ if (arg->rb_flt.fltsize == RKISP1_CIF_ISP_DPF_RB_FILTERSIZE_9x9)
+ isp_dpf_mode |= RKISP1_CIF_ISP_DPF_MODE_RB_FLTSIZE_9x9;
+ if (!arg->rb_flt.r_enable)
+ isp_dpf_mode |= RKISP1_CIF_ISP_DPF_MODE_R_FLT_DIS;
+ if (!arg->rb_flt.b_enable)
+ isp_dpf_mode |= RKISP1_CIF_ISP_DPF_MODE_B_FLT_DIS;
+ if (!arg->g_flt.gb_enable)
+ isp_dpf_mode |= RKISP1_CIF_ISP_DPF_MODE_GB_FLT_DIS;
+ if (!arg->g_flt.gr_enable)
+ isp_dpf_mode |= RKISP1_CIF_ISP_DPF_MODE_GR_FLT_DIS;
+
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_DPF_MODE,
+ isp_dpf_mode);
+ rkisp1_write(params->rkisp1, arg->gain.nf_b_gain,
+ RKISP1_CIF_ISP_DPF_NF_GAIN_B);
+ rkisp1_write(params->rkisp1, arg->gain.nf_r_gain,
+ RKISP1_CIF_ISP_DPF_NF_GAIN_R);
+ rkisp1_write(params->rkisp1, arg->gain.nf_gb_gain,
+ RKISP1_CIF_ISP_DPF_NF_GAIN_GB);
+ rkisp1_write(params->rkisp1, arg->gain.nf_gr_gain,
+ RKISP1_CIF_ISP_DPF_NF_GAIN_GR);
+
+ for (i = 0; i < RKISP1_CIF_ISP_DPF_MAX_NLF_COEFFS; i++) {
+ rkisp1_write(params->rkisp1, arg->nll.coeff[i],
+ RKISP1_CIF_ISP_DPF_NULL_COEFF_0 + i * 4);
+ }
+
+ spatial_coeff = arg->g_flt.spatial_coeff[0] |
+ (arg->g_flt.spatial_coeff[1] << 8) |
+ (arg->g_flt.spatial_coeff[2] << 16) |
+ (arg->g_flt.spatial_coeff[3] << 24);
+ rkisp1_write(params->rkisp1, spatial_coeff,
+ RKISP1_CIF_ISP_DPF_S_WEIGHT_G_1_4);
+
+ spatial_coeff = arg->g_flt.spatial_coeff[4] |
+ (arg->g_flt.spatial_coeff[5] << 8);
+ rkisp1_write(params->rkisp1, spatial_coeff,
+ RKISP1_CIF_ISP_DPF_S_WEIGHT_G_5_6);
+
+ spatial_coeff = arg->rb_flt.spatial_coeff[0] |
+ (arg->rb_flt.spatial_coeff[1] << 8) |
+ (arg->rb_flt.spatial_coeff[2] << 16) |
+ (arg->rb_flt.spatial_coeff[3] << 24);
+ rkisp1_write(params->rkisp1, spatial_coeff,
+ RKISP1_CIF_ISP_DPF_S_WEIGHT_RB_1_4);
+
+ spatial_coeff = arg->rb_flt.spatial_coeff[4] |
+ (arg->rb_flt.spatial_coeff[5] << 8);
+ rkisp1_write(params->rkisp1, spatial_coeff,
+ RKISP1_CIF_ISP_DPF_S_WEIGHT_RB_5_6);
+}
+
+static void
+rkisp1_dpf_strength_config(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_dpf_strength_config *arg)
+{
+ rkisp1_write(params->rkisp1, arg->b, RKISP1_CIF_ISP_DPF_STRENGTH_B);
+ rkisp1_write(params->rkisp1, arg->g, RKISP1_CIF_ISP_DPF_STRENGTH_G);
+ rkisp1_write(params->rkisp1, arg->r, RKISP1_CIF_ISP_DPF_STRENGTH_R);
+}
+
+static void
+rkisp1_isp_isr_other_config(struct rkisp1_params *params,
+ const struct rkisp1_params_cfg *new_params)
+{
+ unsigned int module_en_update, module_cfg_update, module_ens;
+
+ module_en_update = new_params->module_en_update;
+ module_cfg_update = new_params->module_cfg_update;
+ module_ens = new_params->module_ens;
+
+ if ((module_en_update & RKISP1_CIF_ISP_MODULE_DPCC) ||
+ (module_cfg_update & RKISP1_CIF_ISP_MODULE_DPCC)) {
+ /*update dpc config */
+ if ((module_cfg_update & RKISP1_CIF_ISP_MODULE_DPCC))
+ rkisp1_dpcc_config(params,
+ &new_params->others.dpcc_config);
+
+ if (module_en_update & RKISP1_CIF_ISP_MODULE_DPCC) {
+ if (!!(module_ens & RKISP1_CIF_ISP_MODULE_DPCC))
+ rkisp1_param_set_bits(params,
+ RKISP1_CIF_ISP_DPCC_MODE,
+ RKISP1_CIF_ISP_DPCC_ENA);
+ else
+ rkisp1_param_clear_bits(params,
+ RKISP1_CIF_ISP_DPCC_MODE,
+ RKISP1_CIF_ISP_DPCC_ENA);
+ }
+ }
+
+ if ((module_en_update & RKISP1_CIF_ISP_MODULE_BLS) ||
+ (module_cfg_update & RKISP1_CIF_ISP_MODULE_BLS)) {
+ /* update bls config */
+ if ((module_cfg_update & RKISP1_CIF_ISP_MODULE_BLS))
+ rkisp1_bls_config(params,
+ &new_params->others.bls_config);
+
+ if (module_en_update & RKISP1_CIF_ISP_MODULE_BLS) {
+ if (!!(module_ens & RKISP1_CIF_ISP_MODULE_BLS))
+ rkisp1_param_set_bits(params,
+ RKISP1_CIF_ISP_BLS_CTRL,
+ RKISP1_CIF_ISP_BLS_ENA);
+ else
+ rkisp1_param_clear_bits(params,
+ RKISP1_CIF_ISP_BLS_CTRL,
+ RKISP1_CIF_ISP_BLS_ENA);
+ }
+ }
+
+ if ((module_en_update & RKISP1_CIF_ISP_MODULE_SDG) ||
+ (module_cfg_update & RKISP1_CIF_ISP_MODULE_SDG)) {
+ /* update sdg config */
+ if ((module_cfg_update & RKISP1_CIF_ISP_MODULE_SDG))
+ rkisp1_sdg_config(params,
+ &new_params->others.sdg_config);
+
+ if (module_en_update & RKISP1_CIF_ISP_MODULE_SDG) {
+ if (!!(module_ens & RKISP1_CIF_ISP_MODULE_SDG))
+ rkisp1_param_set_bits(params,
+ RKISP1_CIF_ISP_CTRL,
+ RKISP1_CIF_ISP_CTRL_ISP_GAMMA_IN_ENA);
+ else
+ rkisp1_param_clear_bits(params,
+ RKISP1_CIF_ISP_CTRL,
+ RKISP1_CIF_ISP_CTRL_ISP_GAMMA_IN_ENA);
+ }
+ }
+
+ if ((module_en_update & RKISP1_CIF_ISP_MODULE_LSC) ||
+ (module_cfg_update & RKISP1_CIF_ISP_MODULE_LSC)) {
+ /* update lsc config */
+ if ((module_cfg_update & RKISP1_CIF_ISP_MODULE_LSC))
+ rkisp1_lsc_config(params,
+ &new_params->others.lsc_config);
+
+ if (module_en_update & RKISP1_CIF_ISP_MODULE_LSC) {
+ if (!!(module_ens & RKISP1_CIF_ISP_MODULE_LSC))
+ rkisp1_param_set_bits(params,
+ RKISP1_CIF_ISP_LSC_CTRL,
+ RKISP1_CIF_ISP_LSC_CTRL_ENA);
+ else
+ rkisp1_param_clear_bits(params,
+ RKISP1_CIF_ISP_LSC_CTRL,
+ RKISP1_CIF_ISP_LSC_CTRL_ENA);
+ }
+ }
+
+ if ((module_en_update & RKISP1_CIF_ISP_MODULE_AWB_GAIN) ||
+ (module_cfg_update & RKISP1_CIF_ISP_MODULE_AWB_GAIN)) {
+ /* update awb gains */
+ if ((module_cfg_update & RKISP1_CIF_ISP_MODULE_AWB_GAIN))
+ rkisp1_awb_gain_config(params,
+ &new_params->others.awb_gain_config);
+
+ if (module_en_update & RKISP1_CIF_ISP_MODULE_AWB_GAIN) {
+ if (!!(module_ens & RKISP1_CIF_ISP_MODULE_AWB_GAIN))
+ rkisp1_param_set_bits(params,
+ RKISP1_CIF_ISP_CTRL,
+ RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
+ else
+ rkisp1_param_clear_bits(params,
+ RKISP1_CIF_ISP_CTRL,
+ RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
+ }
+ }
+
+ if ((module_en_update & RKISP1_CIF_ISP_MODULE_BDM) ||
+ (module_cfg_update & RKISP1_CIF_ISP_MODULE_BDM)) {
+ /* update bdm config */
+ if ((module_cfg_update & RKISP1_CIF_ISP_MODULE_BDM))
+ rkisp1_bdm_config(params,
+ &new_params->others.bdm_config);
+
+ if (module_en_update & RKISP1_CIF_ISP_MODULE_BDM) {
+ if (!!(module_ens & RKISP1_CIF_ISP_MODULE_BDM))
+ rkisp1_param_set_bits(params,
+ RKISP1_CIF_ISP_DEMOSAIC,
+ RKISP1_CIF_ISP_DEMOSAIC_BYPASS);
+ else
+ rkisp1_param_clear_bits(params,
+ RKISP1_CIF_ISP_DEMOSAIC,
+ RKISP1_CIF_ISP_DEMOSAIC_BYPASS);
+ }
+ }
+
+ if ((module_en_update & RKISP1_CIF_ISP_MODULE_FLT) ||
+ (module_cfg_update & RKISP1_CIF_ISP_MODULE_FLT)) {
+ /* update filter config */
+ if ((module_cfg_update & RKISP1_CIF_ISP_MODULE_FLT))
+ rkisp1_flt_config(params,
+ &new_params->others.flt_config);
+
+ if (module_en_update & RKISP1_CIF_ISP_MODULE_FLT) {
+ if (!!(module_ens & RKISP1_CIF_ISP_MODULE_FLT))
+ rkisp1_param_set_bits(params,
+ RKISP1_CIF_ISP_FILT_MODE,
+ RKISP1_CIF_ISP_FLT_ENA);
+ else
+ rkisp1_param_clear_bits(params,
+ RKISP1_CIF_ISP_FILT_MODE,
+ RKISP1_CIF_ISP_FLT_ENA);
+ }
+ }
+
+ if ((module_en_update & RKISP1_CIF_ISP_MODULE_CTK) ||
+ (module_cfg_update & RKISP1_CIF_ISP_MODULE_CTK)) {
+ /* update ctk config */
+ if ((module_cfg_update & RKISP1_CIF_ISP_MODULE_CTK))
+ rkisp1_ctk_config(params,
+ &new_params->others.ctk_config);
+
+ if (module_en_update & RKISP1_CIF_ISP_MODULE_CTK)
+ rkisp1_ctk_enable(params,
+ !!(module_ens & RKISP1_CIF_ISP_MODULE_CTK));
+ }
+
+ if ((module_en_update & RKISP1_CIF_ISP_MODULE_GOC) ||
+ (module_cfg_update & RKISP1_CIF_ISP_MODULE_GOC)) {
+ /* update goc config */
+ if ((module_cfg_update & RKISP1_CIF_ISP_MODULE_GOC))
+ rkisp1_goc_config(params,
+ &new_params->others.goc_config);
+
+ if (module_en_update & RKISP1_CIF_ISP_MODULE_GOC) {
+ if (!!(module_ens & RKISP1_CIF_ISP_MODULE_GOC))
+ rkisp1_param_set_bits(params,
+ RKISP1_CIF_ISP_CTRL,
+ RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA);
+ else
+ rkisp1_param_clear_bits(params,
+ RKISP1_CIF_ISP_CTRL,
+ RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA);
+ }
+ }
+
+ if ((module_en_update & RKISP1_CIF_ISP_MODULE_CPROC) ||
+ (module_cfg_update & RKISP1_CIF_ISP_MODULE_CPROC)) {
+ /* update cproc config */
+ if ((module_cfg_update & RKISP1_CIF_ISP_MODULE_CPROC)) {
+ rkisp1_cproc_config(params,
+ &new_params->others.cproc_config);
+ }
+
+ if (module_en_update & RKISP1_CIF_ISP_MODULE_CPROC) {
+ if (!!(module_ens & RKISP1_CIF_ISP_MODULE_CPROC))
+ rkisp1_param_set_bits(params,
+ RKISP1_CIF_C_PROC_CTRL,
+ RKISP1_CIF_C_PROC_CTR_ENABLE);
+ else
+ rkisp1_param_clear_bits(params,
+ RKISP1_CIF_C_PROC_CTRL,
+ RKISP1_CIF_C_PROC_CTR_ENABLE);
+ }
+ }
+
+ if ((module_en_update & RKISP1_CIF_ISP_MODULE_IE) ||
+ (module_cfg_update & RKISP1_CIF_ISP_MODULE_IE)) {
+ /* update ie config */
+ if ((module_cfg_update & RKISP1_CIF_ISP_MODULE_IE))
+ rkisp1_ie_config(params,
+ &new_params->others.ie_config);
+
+ if (module_en_update & RKISP1_CIF_ISP_MODULE_IE)
+ rkisp1_ie_enable(params,
+ !!(module_ens & RKISP1_CIF_ISP_MODULE_IE));
+ }
+
+ if ((module_en_update & RKISP1_CIF_ISP_MODULE_DPF) ||
+ (module_cfg_update & RKISP1_CIF_ISP_MODULE_DPF)) {
+ /* update dpf config */
+ if ((module_cfg_update & RKISP1_CIF_ISP_MODULE_DPF))
+ rkisp1_dpf_config(params,
+ &new_params->others.dpf_config);
+
+ if (module_en_update & RKISP1_CIF_ISP_MODULE_DPF) {
+ if (!!(module_ens & RKISP1_CIF_ISP_MODULE_DPF))
+ rkisp1_param_set_bits(params,
+ RKISP1_CIF_ISP_DPF_MODE,
+ RKISP1_CIF_ISP_DPF_MODE_EN);
+ else
+ rkisp1_param_clear_bits(params,
+ RKISP1_CIF_ISP_DPF_MODE,
+ RKISP1_CIF_ISP_DPF_MODE_EN);
+ }
+ }
+
+ if ((module_en_update & RKISP1_CIF_ISP_MODULE_DPF_STRENGTH) ||
+ (module_cfg_update & RKISP1_CIF_ISP_MODULE_DPF_STRENGTH)) {
+ /* update dpf strength config */
+ rkisp1_dpf_strength_config(params,
+ &new_params->others.dpf_strength_config);
+ }
+}
+
+static void rkisp1_isp_isr_meas_config(struct rkisp1_params *params,
+ struct rkisp1_params_cfg *new_params)
+{
+ unsigned int module_en_update, module_cfg_update, module_ens;
+
+ module_en_update = new_params->module_en_update;
+ module_cfg_update = new_params->module_cfg_update;
+ module_ens = new_params->module_ens;
+
+ if ((module_en_update & RKISP1_CIF_ISP_MODULE_AWB) ||
+ (module_cfg_update & RKISP1_CIF_ISP_MODULE_AWB)) {
+ /* update awb config */
+ if ((module_cfg_update & RKISP1_CIF_ISP_MODULE_AWB))
+ rkisp1_awb_meas_config(params,
+ &new_params->meas.awb_meas_config);
+
+ if (module_en_update & RKISP1_CIF_ISP_MODULE_AWB)
+ rkisp1_awb_meas_enable(params,
+ &new_params->meas.awb_meas_config,
+ !!(module_ens & RKISP1_CIF_ISP_MODULE_AWB));
+ }
+
+ if ((module_en_update & RKISP1_CIF_ISP_MODULE_AFC) ||
+ (module_cfg_update & RKISP1_CIF_ISP_MODULE_AFC)) {
+ /* update afc config */
+ if ((module_cfg_update & RKISP1_CIF_ISP_MODULE_AFC))
+ rkisp1_afm_config(params,
+ &new_params->meas.afc_config);
+
+ if (module_en_update & RKISP1_CIF_ISP_MODULE_AFC) {
+ if (!!(module_ens & RKISP1_CIF_ISP_MODULE_AFC))
+ rkisp1_param_set_bits(params,
+ RKISP1_CIF_ISP_AFM_CTRL,
+ RKISP1_CIF_ISP_AFM_ENA);
+ else
+ rkisp1_param_clear_bits(params,
+ RKISP1_CIF_ISP_AFM_CTRL,
+ RKISP1_CIF_ISP_AFM_ENA);
+ }
+ }
+
+ if ((module_en_update & RKISP1_CIF_ISP_MODULE_HST) ||
+ (module_cfg_update & RKISP1_CIF_ISP_MODULE_HST)) {
+ /* update hst config */
+ if ((module_cfg_update & RKISP1_CIF_ISP_MODULE_HST))
+ rkisp1_hst_config(params,
+ &new_params->meas.hst_config);
+
+ if (module_en_update & RKISP1_CIF_ISP_MODULE_HST)
+ rkisp1_hst_enable(params,
+ &new_params->meas.hst_config,
+ !!(module_ens & RKISP1_CIF_ISP_MODULE_HST));
+ }
+
+ if ((module_en_update & RKISP1_CIF_ISP_MODULE_AEC) ||
+ (module_cfg_update & RKISP1_CIF_ISP_MODULE_AEC)) {
+ /* update aec config */
+ if ((module_cfg_update & RKISP1_CIF_ISP_MODULE_AEC))
+ rkisp1_aec_config(params,
+ &new_params->meas.aec_config);
+
+ if (module_en_update & RKISP1_CIF_ISP_MODULE_AEC) {
+ if (!!(module_ens & RKISP1_CIF_ISP_MODULE_AEC))
+ rkisp1_param_set_bits(params,
+ RKISP1_CIF_ISP_EXP_CTRL,
+ RKISP1_CIF_ISP_EXP_ENA);
+ else
+ rkisp1_param_clear_bits(params,
+ RKISP1_CIF_ISP_EXP_CTRL,
+ RKISP1_CIF_ISP_EXP_ENA);
+ }
+ }
+}
+
+void rkisp1_params_isr(struct rkisp1_device *rkisp1, u32 isp_mis)
+{
+ unsigned int frame_sequence = atomic_read(&rkisp1->isp.frame_sequence);
+ struct rkisp1_params *params = &rkisp1->params;
+ struct rkisp1_params_cfg *new_params;
+ struct rkisp1_buffer *cur_buf = NULL;
+
+ spin_lock(&params->config_lock);
+ if (!params->is_streaming) {
+ spin_unlock(&params->config_lock);
+ return;
+ }
+
+ /* get one empty buffer */
+ if (!list_empty(&params->params))
+ cur_buf = list_first_entry(&params->params,
+ struct rkisp1_buffer, queue);
+ spin_unlock(&params->config_lock);
+
+ if (!cur_buf)
+ return;
+
+ new_params = (struct rkisp1_params_cfg *)(cur_buf->vaddr[0]);
+
+ if (isp_mis & RKISP1_CIF_ISP_FRAME) {
+ u32 isp_ctrl;
+
+ rkisp1_isp_isr_other_config(params, new_params);
+ rkisp1_isp_isr_meas_config(params, new_params);
+
+ /* update shadow register immediately */
+ isp_ctrl = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_CTRL);
+ isp_ctrl |= RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD;
+ rkisp1_write(params->rkisp1, isp_ctrl, RKISP1_CIF_ISP_CTRL);
+
+ spin_lock(&params->config_lock);
+ list_del(&cur_buf->queue);
+ spin_unlock(&params->config_lock);
+
+ cur_buf->vb.sequence = frame_sequence;
+ vb2_buffer_done(&cur_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+ }
+}
+
+static const struct rkisp1_cif_isp_awb_meas_config rkisp1_awb_params_default_config = {
+ {
+ 0, 0, RKISP1_DEFAULT_WIDTH, RKISP1_DEFAULT_HEIGHT
+ },
+ RKISP1_CIF_ISP_AWB_MODE_YCBCR, 200, 30, 20, 20, 0, 128, 128
+};
+
+static const struct rkisp1_cif_isp_aec_config rkisp1_aec_params_default_config = {
+ RKISP1_CIF_ISP_EXP_MEASURING_MODE_0,
+ RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP_0,
+ {
+ RKISP1_DEFAULT_WIDTH >> 2, RKISP1_DEFAULT_HEIGHT >> 2,
+ RKISP1_DEFAULT_WIDTH >> 1, RKISP1_DEFAULT_HEIGHT >> 1
+ }
+};
+
+static const struct rkisp1_cif_isp_hst_config rkisp1_hst_params_default_config = {
+ RKISP1_CIF_ISP_HISTOGRAM_MODE_RGB_COMBINED,
+ 3,
+ {
+ RKISP1_DEFAULT_WIDTH >> 2, RKISP1_DEFAULT_HEIGHT >> 2,
+ RKISP1_DEFAULT_WIDTH >> 1, RKISP1_DEFAULT_HEIGHT >> 1
+ },
+ {
+ 0, /* To be filled in with 0x01 at runtime. */
+ }
+};
+
+static const struct rkisp1_cif_isp_afc_config rkisp1_afc_params_default_config = {
+ 1,
+ {
+ {
+ 300, 225, 200, 150
+ }
+ },
+ 4,
+ 14
+};
+
+static void rkisp1_params_config_parameter(struct rkisp1_params *params)
+{
+ struct rkisp1_cif_isp_hst_config hst = rkisp1_hst_params_default_config;
+
+ spin_lock(&params->config_lock);
+
+ rkisp1_awb_meas_config(params, &rkisp1_awb_params_default_config);
+ rkisp1_awb_meas_enable(params, &rkisp1_awb_params_default_config,
+ true);
+
+ rkisp1_aec_config(params, &rkisp1_aec_params_default_config);
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_EXP_CTRL,
+ RKISP1_CIF_ISP_EXP_ENA);
+
+ rkisp1_afm_config(params, &rkisp1_afc_params_default_config);
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_AFM_CTRL,
+ RKISP1_CIF_ISP_AFM_ENA);
+
+ memset(hst.hist_weight, 0x01, sizeof(hst.hist_weight));
+ rkisp1_hst_config(params, &hst);
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_HIST_PROP,
+ ~RKISP1_CIF_ISP_HIST_PROP_MODE_MASK |
+ rkisp1_hst_params_default_config.mode);
+
+ /* set the range */
+ if (params->quantization == V4L2_QUANTIZATION_FULL_RANGE)
+ rkisp1_csm_config(params, true);
+ else
+ rkisp1_csm_config(params, false);
+
+ /* override the default things */
+ rkisp1_isp_isr_other_config(params, &params->cur_params);
+ rkisp1_isp_isr_meas_config(params, &params->cur_params);
+
+ spin_unlock(&params->config_lock);
+}
+
+/* Not called when the camera active, thus not isr protection. */
+void rkisp1_params_configure(struct rkisp1_params *params,
+ enum rkisp1_fmt_raw_pat_type bayer_pat,
+ enum v4l2_quantization quantization)
+{
+ params->quantization = quantization;
+ params->raw_type = bayer_pat;
+ rkisp1_params_config_parameter(params);
+}
+
+/* Not called when the camera active, thus not isr protection. */
+void rkisp1_params_disable(struct rkisp1_params *params)
+{
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_DPCC_MODE,
+ RKISP1_CIF_ISP_DPCC_ENA);
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_LSC_CTRL,
+ RKISP1_CIF_ISP_LSC_CTRL_ENA);
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_BLS_CTRL,
+ RKISP1_CIF_ISP_BLS_ENA);
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
+ RKISP1_CIF_ISP_CTRL_ISP_GAMMA_IN_ENA);
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
+ RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA);
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_DEMOSAIC,
+ RKISP1_CIF_ISP_DEMOSAIC_BYPASS);
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_FILT_MODE,
+ RKISP1_CIF_ISP_FLT_ENA);
+ rkisp1_awb_meas_enable(params, NULL, false);
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
+ RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_EXP_CTRL,
+ RKISP1_CIF_ISP_EXP_ENA);
+ rkisp1_ctk_enable(params, false);
+ rkisp1_param_clear_bits(params, RKISP1_CIF_C_PROC_CTRL,
+ RKISP1_CIF_C_PROC_CTR_ENABLE);
+ rkisp1_hst_enable(params, NULL, false);
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_AFM_CTRL,
+ RKISP1_CIF_ISP_AFM_ENA);
+ rkisp1_ie_enable(params, false);
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_DPF_MODE,
+ RKISP1_CIF_ISP_DPF_MODE_EN);
+}
+
+static int rkisp1_params_enum_fmt_meta_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct video_device *video = video_devdata(file);
+ struct rkisp1_params *params = video_get_drvdata(video);
+
+ if (f->index > 0 || f->type != video->queue->type)
+ return -EINVAL;
+
+ f->pixelformat = params->vdev_fmt.fmt.meta.dataformat;
+
+ return 0;
+}
+
+static int rkisp1_params_g_fmt_meta_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct video_device *video = video_devdata(file);
+ struct rkisp1_params *params = video_get_drvdata(video);
+ struct v4l2_meta_format *meta = &f->fmt.meta;
+
+ if (f->type != video->queue->type)
+ return -EINVAL;
+
+ memset(meta, 0, sizeof(*meta));
+ meta->dataformat = params->vdev_fmt.fmt.meta.dataformat;
+ meta->buffersize = params->vdev_fmt.fmt.meta.buffersize;
+
+ return 0;
+}
+
+static int rkisp1_params_querycap(struct file *file,
+ void *priv, struct v4l2_capability *cap)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ strscpy(cap->driver, RKISP1_DRIVER_NAME, sizeof(cap->driver));
+ strscpy(cap->card, vdev->name, sizeof(cap->card));
+ strscpy(cap->bus_info, RKISP1_BUS_INFO, sizeof(cap->bus_info));
+
+ return 0;
+}
+
+/* ISP params video device IOCTLs */
+static const struct v4l2_ioctl_ops rkisp1_params_ioctl = {
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_enum_fmt_meta_out = rkisp1_params_enum_fmt_meta_out,
+ .vidioc_g_fmt_meta_out = rkisp1_params_g_fmt_meta_out,
+ .vidioc_s_fmt_meta_out = rkisp1_params_g_fmt_meta_out,
+ .vidioc_try_fmt_meta_out = rkisp1_params_g_fmt_meta_out,
+ .vidioc_querycap = rkisp1_params_querycap,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static int rkisp1_params_vb2_queue_setup(struct vb2_queue *vq,
+ unsigned int *num_buffers,
+ unsigned int *num_planes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct rkisp1_params *params = vq->drv_priv;
+
+ *num_buffers = clamp_t(u32, *num_buffers,
+ RKISP1_ISP_PARAMS_REQ_BUFS_MIN,
+ RKISP1_ISP_PARAMS_REQ_BUFS_MAX);
+
+ *num_planes = 1;
+
+ sizes[0] = sizeof(struct rkisp1_params_cfg);
+
+ INIT_LIST_HEAD(&params->params);
+ params->is_first_params = true;
+
+ return 0;
+}
+
+static void rkisp1_params_vb2_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct rkisp1_buffer *params_buf =
+ container_of(vbuf, struct rkisp1_buffer, vb);
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct rkisp1_params *params = vq->drv_priv;
+ struct rkisp1_params_cfg *new_params;
+ unsigned long flags;
+ unsigned int frame_sequence =
+ atomic_read(&params->rkisp1->isp.frame_sequence);
+
+ if (params->is_first_params) {
+ new_params = (struct rkisp1_params_cfg *)
+ (vb2_plane_vaddr(vb, 0));
+ vbuf->sequence = frame_sequence;
+ vb2_buffer_done(&params_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+ params->is_first_params = false;
+ params->cur_params = *new_params;
+ return;
+ }
+
+ params_buf->vaddr[0] = vb2_plane_vaddr(vb, 0);
+ spin_lock_irqsave(&params->config_lock, flags);
+ list_add_tail(&params_buf->queue, &params->params);
+ spin_unlock_irqrestore(&params->config_lock, flags);
+}
+
+static int rkisp1_params_vb2_buf_prepare(struct vb2_buffer *vb)
+{
+ if (vb2_plane_size(vb, 0) < sizeof(struct rkisp1_params_cfg))
+ return -EINVAL;
+
+ vb2_set_plane_payload(vb, 0, sizeof(struct rkisp1_params_cfg));
+
+ return 0;
+}
+
+static void rkisp1_params_vb2_stop_streaming(struct vb2_queue *vq)
+{
+ struct rkisp1_params *params = vq->drv_priv;
+ struct rkisp1_buffer *buf;
+ unsigned long flags;
+ unsigned int i;
+
+ /* stop params input firstly */
+ spin_lock_irqsave(&params->config_lock, flags);
+ params->is_streaming = false;
+ spin_unlock_irqrestore(&params->config_lock, flags);
+
+ for (i = 0; i < RKISP1_ISP_PARAMS_REQ_BUFS_MAX; i++) {
+ spin_lock_irqsave(&params->config_lock, flags);
+ if (!list_empty(&params->params)) {
+ buf = list_first_entry(&params->params,
+ struct rkisp1_buffer, queue);
+ list_del(&buf->queue);
+ spin_unlock_irqrestore(&params->config_lock,
+ flags);
+ } else {
+ spin_unlock_irqrestore(&params->config_lock,
+ flags);
+ break;
+ }
+
+ if (buf)
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ buf = NULL;
+ }
+}
+
+static int
+rkisp1_params_vb2_start_streaming(struct vb2_queue *queue, unsigned int count)
+{
+ struct rkisp1_params *params = queue->drv_priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&params->config_lock, flags);
+ params->is_streaming = true;
+ spin_unlock_irqrestore(&params->config_lock, flags);
+
+ return 0;
+}
+
+static struct vb2_ops rkisp1_params_vb2_ops = {
+ .queue_setup = rkisp1_params_vb2_queue_setup,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .buf_queue = rkisp1_params_vb2_buf_queue,
+ .buf_prepare = rkisp1_params_vb2_buf_prepare,
+ .start_streaming = rkisp1_params_vb2_start_streaming,
+ .stop_streaming = rkisp1_params_vb2_stop_streaming,
+
+};
+
+static struct v4l2_file_operations rkisp1_params_fops = {
+ .mmap = vb2_fop_mmap,
+ .unlocked_ioctl = video_ioctl2,
+ .poll = vb2_fop_poll,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release
+};
+
+static int rkisp1_params_init_vb2_queue(struct vb2_queue *q,
+ struct rkisp1_params *params)
+{
+ struct rkisp1_vdev_node *node;
+
+ node = container_of(q, struct rkisp1_vdev_node, buf_queue);
+
+ q->type = V4L2_BUF_TYPE_META_OUTPUT;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ q->drv_priv = params;
+ q->ops = &rkisp1_params_vb2_ops;
+ q->mem_ops = &vb2_vmalloc_memops;
+ q->buf_struct_size = sizeof(struct rkisp1_buffer);
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &node->vlock;
+
+ return vb2_queue_init(q);
+}
+
+static void rkisp1_init_params(struct rkisp1_params *params)
+{
+ params->vdev_fmt.fmt.meta.dataformat =
+ V4L2_META_FMT_RK_ISP1_PARAMS;
+ params->vdev_fmt.fmt.meta.buffersize =
+ sizeof(struct rkisp1_params_cfg);
+}
+
+int rkisp1_params_register(struct rkisp1_params *params,
+ struct v4l2_device *v4l2_dev,
+ struct rkisp1_device *rkisp1)
+{
+ struct rkisp1_vdev_node *node = &params->vnode;
+ struct video_device *vdev = &node->vdev;
+ int ret;
+
+ params->rkisp1 = rkisp1;
+ mutex_init(&node->vlock);
+ spin_lock_init(&params->config_lock);
+
+ strscpy(vdev->name, RKISP1_PARAMS_DEV_NAME, sizeof(vdev->name));
+
+ video_set_drvdata(vdev, params);
+ vdev->ioctl_ops = &rkisp1_params_ioctl;
+ vdev->fops = &rkisp1_params_fops;
+ vdev->release = video_device_release_empty;
+ /*
+ * Provide a mutex to v4l2 core. It will be used
+ * to protect all fops and v4l2 ioctls.
+ */
+ vdev->lock = &node->vlock;
+ vdev->v4l2_dev = v4l2_dev;
+ vdev->queue = &node->buf_queue;
+ vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_META_OUTPUT;
+ vdev->vfl_dir = VFL_DIR_TX;
+ rkisp1_params_init_vb2_queue(vdev->queue, params);
+ rkisp1_init_params(params);
+ video_set_drvdata(vdev, params);
+
+ node->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&vdev->entity, 1, &node->pad);
+ if (ret)
+ goto err_release_queue;
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ dev_err(&vdev->dev,
+ "failed to register %s, ret=%d\n", vdev->name, ret);
+ goto err_cleanup_media_entity;
+ }
+ return 0;
+err_cleanup_media_entity:
+ media_entity_cleanup(&vdev->entity);
+err_release_queue:
+ vb2_queue_release(vdev->queue);
+ return ret;
+}
+
+void rkisp1_params_unregister(struct rkisp1_params *params)
+{
+ struct rkisp1_vdev_node *node = &params->vnode;
+ struct video_device *vdev = &node->vdev;
+
+ video_unregister_device(vdev);
+ media_entity_cleanup(&vdev->entity);
+ vb2_queue_release(vdev->queue);
+}
diff --git a/drivers/staging/media/rkisp1/rkisp1-regs.h b/drivers/staging/media/rkisp1/rkisp1-regs.h
new file mode 100644
index 000000000000..46018f435b6f
--- /dev/null
+++ b/drivers/staging/media/rkisp1/rkisp1-regs.h
@@ -0,0 +1,1264 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Rockchip ISP1 Driver - Registers header
+ *
+ * Copyright (C) 2017 Rockchip Electronics Co., Ltd.
+ */
+
+#ifndef _RKISP1_REGS_H
+#define _RKISP1_REGS_H
+
+/* ISP_CTRL */
+#define RKISP1_CIF_ISP_CTRL_ISP_ENABLE BIT(0)
+#define RKISP1_CIF_ISP_CTRL_ISP_MODE_RAW_PICT (0 << 1)
+#define RKISP1_CIF_ISP_CTRL_ISP_MODE_ITU656 BIT(1)
+#define RKISP1_CIF_ISP_CTRL_ISP_MODE_ITU601 (2 << 1)
+#define RKISP1_CIF_ISP_CTRL_ISP_MODE_BAYER_ITU601 (3 << 1)
+#define RKISP1_CIF_ISP_CTRL_ISP_MODE_DATA_MODE (4 << 1)
+#define RKISP1_CIF_ISP_CTRL_ISP_MODE_BAYER_ITU656 (5 << 1)
+#define RKISP1_CIF_ISP_CTRL_ISP_MODE_RAW_PICT_ITU656 (6 << 1)
+#define RKISP1_CIF_ISP_CTRL_ISP_INFORM_ENABLE BIT(4)
+#define RKISP1_CIF_ISP_CTRL_ISP_GAMMA_IN_ENA BIT(6)
+#define RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA BIT(7)
+#define RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD_PERMANENT BIT(8)
+#define RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD BIT(9)
+#define RKISP1_CIF_ISP_CTRL_ISP_GEN_CFG_UPD BIT(10)
+#define RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA BIT(11)
+#define RKISP1_CIF_ISP_CTRL_ISP_FLASH_MODE_ENA BIT(12)
+#define RKISP1_CIF_ISP_CTRL_ISP_CSM_Y_FULL_ENA BIT(13)
+#define RKISP1_CIF_ISP_CTRL_ISP_CSM_C_FULL_ENA BIT(14)
+
+/* ISP_ACQ_PROP */
+#define RKISP1_CIF_ISP_ACQ_PROP_POS_EDGE BIT(0)
+#define RKISP1_CIF_ISP_ACQ_PROP_HSYNC_LOW BIT(1)
+#define RKISP1_CIF_ISP_ACQ_PROP_VSYNC_LOW BIT(2)
+#define RKISP1_CIF_ISP_ACQ_PROP_BAYER_PAT_RGGB (0 << 3)
+#define RKISP1_CIF_ISP_ACQ_PROP_BAYER_PAT_GRBG BIT(3)
+#define RKISP1_CIF_ISP_ACQ_PROP_BAYER_PAT_GBRG (2 << 3)
+#define RKISP1_CIF_ISP_ACQ_PROP_BAYER_PAT_BGGR (3 << 3)
+#define RKISP1_CIF_ISP_ACQ_PROP_BAYER_PAT(pat) ((pat) << 3)
+#define RKISP1_CIF_ISP_ACQ_PROP_YCBYCR (0 << 7)
+#define RKISP1_CIF_ISP_ACQ_PROP_YCRYCB BIT(7)
+#define RKISP1_CIF_ISP_ACQ_PROP_CBYCRY (2 << 7)
+#define RKISP1_CIF_ISP_ACQ_PROP_CRYCBY (3 << 7)
+#define RKISP1_CIF_ISP_ACQ_PROP_FIELD_SEL_ALL (0 << 9)
+#define RKISP1_CIF_ISP_ACQ_PROP_FIELD_SEL_EVEN BIT(9)
+#define RKISP1_CIF_ISP_ACQ_PROP_FIELD_SEL_ODD (2 << 9)
+#define RKISP1_CIF_ISP_ACQ_PROP_IN_SEL_12B (0 << 12)
+#define RKISP1_CIF_ISP_ACQ_PROP_IN_SEL_10B_ZERO BIT(12)
+#define RKISP1_CIF_ISP_ACQ_PROP_IN_SEL_10B_MSB (2 << 12)
+#define RKISP1_CIF_ISP_ACQ_PROP_IN_SEL_8B_ZERO (3 << 12)
+#define RKISP1_CIF_ISP_ACQ_PROP_IN_SEL_8B_MSB (4 << 12)
+
+/* VI_DPCL */
+#define RKISP1_CIF_VI_DPCL_DMA_JPEG (0 << 0)
+#define RKISP1_CIF_VI_DPCL_MP_MUX_MRSZ_MI BIT(0)
+#define RKISP1_CIF_VI_DPCL_MP_MUX_MRSZ_JPEG (2 << 0)
+#define RKISP1_CIF_VI_DPCL_CHAN_MODE_MP BIT(2)
+#define RKISP1_CIF_VI_DPCL_CHAN_MODE_SP (2 << 2)
+#define RKISP1_CIF_VI_DPCL_CHAN_MODE_MPSP (3 << 2)
+#define RKISP1_CIF_VI_DPCL_DMA_SW_SPMUX (0 << 4)
+#define RKISP1_CIF_VI_DPCL_DMA_SW_SI BIT(4)
+#define RKISP1_CIF_VI_DPCL_DMA_SW_IE (2 << 4)
+#define RKISP1_CIF_VI_DPCL_DMA_SW_JPEG (3 << 4)
+#define RKISP1_CIF_VI_DPCL_DMA_SW_ISP (4 << 4)
+#define RKISP1_CIF_VI_DPCL_IF_SEL_PARALLEL (0 << 8)
+#define RKISP1_CIF_VI_DPCL_IF_SEL_SMIA BIT(8)
+#define RKISP1_CIF_VI_DPCL_IF_SEL_MIPI (2 << 8)
+#define RKISP1_CIF_VI_DPCL_DMA_IE_MUX_DMA BIT(10)
+#define RKISP1_CIF_VI_DPCL_DMA_SP_MUX_DMA BIT(11)
+
+/* ISP_IMSC - ISP_MIS - ISP_RIS - ISP_ICR - ISP_ISR */
+#define RKISP1_CIF_ISP_OFF BIT(0)
+#define RKISP1_CIF_ISP_FRAME BIT(1)
+#define RKISP1_CIF_ISP_DATA_LOSS BIT(2)
+#define RKISP1_CIF_ISP_PIC_SIZE_ERROR BIT(3)
+#define RKISP1_CIF_ISP_AWB_DONE BIT(4)
+#define RKISP1_CIF_ISP_FRAME_IN BIT(5)
+#define RKISP1_CIF_ISP_V_START BIT(6)
+#define RKISP1_CIF_ISP_H_START BIT(7)
+#define RKISP1_CIF_ISP_FLASH_ON BIT(8)
+#define RKISP1_CIF_ISP_FLASH_OFF BIT(9)
+#define RKISP1_CIF_ISP_SHUTTER_ON BIT(10)
+#define RKISP1_CIF_ISP_SHUTTER_OFF BIT(11)
+#define RKISP1_CIF_ISP_AFM_SUM_OF BIT(12)
+#define RKISP1_CIF_ISP_AFM_LUM_OF BIT(13)
+#define RKISP1_CIF_ISP_AFM_FIN BIT(14)
+#define RKISP1_CIF_ISP_HIST_MEASURE_RDY BIT(15)
+#define RKISP1_CIF_ISP_FLASH_CAP BIT(17)
+#define RKISP1_CIF_ISP_EXP_END BIT(18)
+#define RKISP1_CIF_ISP_VSM_END BIT(19)
+
+/* ISP_ERR */
+#define RKISP1_CIF_ISP_ERR_INFORM_SIZE BIT(0)
+#define RKISP1_CIF_ISP_ERR_IS_SIZE BIT(1)
+#define RKISP1_CIF_ISP_ERR_OUTFORM_SIZE BIT(2)
+
+/* MI_CTRL */
+#define RKISP1_CIF_MI_CTRL_MP_ENABLE BIT(0)
+#define RKISP1_CIF_MI_CTRL_SP_ENABLE (2 << 0)
+#define RKISP1_CIF_MI_CTRL_JPEG_ENABLE (4 << 0)
+#define RKISP1_CIF_MI_CTRL_RAW_ENABLE (8 << 0)
+#define RKISP1_CIF_MI_CTRL_HFLIP BIT(4)
+#define RKISP1_CIF_MI_CTRL_VFLIP BIT(5)
+#define RKISP1_CIF_MI_CTRL_ROT BIT(6)
+#define RKISP1_CIF_MI_BYTE_SWAP BIT(7)
+#define RKISP1_CIF_MI_SP_Y_FULL_YUV2RGB BIT(8)
+#define RKISP1_CIF_MI_SP_CBCR_FULL_YUV2RGB BIT(9)
+#define RKISP1_CIF_MI_SP_422NONCOSITEED BIT(10)
+#define RKISP1_CIF_MI_MP_PINGPONG_ENABEL BIT(11)
+#define RKISP1_CIF_MI_SP_PINGPONG_ENABEL BIT(12)
+#define RKISP1_CIF_MI_MP_AUTOUPDATE_ENABLE BIT(13)
+#define RKISP1_CIF_MI_SP_AUTOUPDATE_ENABLE BIT(14)
+#define RKISP1_CIF_MI_LAST_PIXEL_SIG_ENABLE BIT(15)
+#define RKISP1_CIF_MI_CTRL_BURST_LEN_LUM_16 (0 << 16)
+#define RKISP1_CIF_MI_CTRL_BURST_LEN_LUM_32 BIT(16)
+#define RKISP1_CIF_MI_CTRL_BURST_LEN_LUM_64 (2 << 16)
+#define RKISP1_CIF_MI_CTRL_BURST_LEN_CHROM_16 (0 << 18)
+#define RKISP1_CIF_MI_CTRL_BURST_LEN_CHROM_32 BIT(18)
+#define RKISP1_CIF_MI_CTRL_BURST_LEN_CHROM_64 (2 << 18)
+#define RKISP1_CIF_MI_CTRL_INIT_BASE_EN BIT(20)
+#define RKISP1_CIF_MI_CTRL_INIT_OFFSET_EN BIT(21)
+#define RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8 (0 << 22)
+#define RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA BIT(22)
+#define RKISP1_MI_CTRL_MP_WRITE_YUVINT (2 << 22)
+#define RKISP1_MI_CTRL_MP_WRITE_RAW12 (2 << 22)
+#define RKISP1_MI_CTRL_SP_WRITE_PLA (0 << 24)
+#define RKISP1_MI_CTRL_SP_WRITE_SPLA BIT(24)
+#define RKISP1_MI_CTRL_SP_WRITE_INT (2 << 24)
+#define RKISP1_MI_CTRL_SP_INPUT_YUV400 (0 << 26)
+#define RKISP1_MI_CTRL_SP_INPUT_YUV420 BIT(26)
+#define RKISP1_MI_CTRL_SP_INPUT_YUV422 (2 << 26)
+#define RKISP1_MI_CTRL_SP_INPUT_YUV444 (3 << 26)
+#define RKISP1_MI_CTRL_SP_OUTPUT_YUV400 (0 << 28)
+#define RKISP1_MI_CTRL_SP_OUTPUT_YUV420 BIT(28)
+#define RKISP1_MI_CTRL_SP_OUTPUT_YUV422 (2 << 28)
+#define RKISP1_MI_CTRL_SP_OUTPUT_YUV444 (3 << 28)
+#define RKISP1_MI_CTRL_SP_OUTPUT_RGB565 (4 << 28)
+#define RKISP1_MI_CTRL_SP_OUTPUT_RGB666 (5 << 28)
+#define RKISP1_MI_CTRL_SP_OUTPUT_RGB888 (6 << 28)
+
+#define RKISP1_MI_CTRL_MP_FMT_MASK GENMASK(23, 22)
+#define RKISP1_MI_CTRL_SP_FMT_MASK GENMASK(30, 24)
+
+/* MI_INIT */
+#define RKISP1_CIF_MI_INIT_SKIP BIT(2)
+#define RKISP1_CIF_MI_INIT_SOFT_UPD BIT(4)
+
+/* MI_CTRL_SHD */
+#define RKISP1_CIF_MI_CTRL_SHD_MP_IN_ENABLED BIT(0)
+#define RKISP1_CIF_MI_CTRL_SHD_SP_IN_ENABLED BIT(1)
+#define RKISP1_CIF_MI_CTRL_SHD_JPEG_IN_ENABLED BIT(2)
+#define RKISP1_CIF_MI_CTRL_SHD_RAW_IN_ENABLED BIT(3)
+#define RKISP1_CIF_MI_CTRL_SHD_MP_OUT_ENABLED BIT(16)
+#define RKISP1_CIF_MI_CTRL_SHD_SP_OUT_ENABLED BIT(17)
+#define RKISP1_CIF_MI_CTRL_SHD_JPEG_OUT_ENABLED BIT(18)
+#define RKISP1_CIF_MI_CTRL_SHD_RAW_OUT_ENABLED BIT(19)
+
+/* RSZ_CTRL */
+#define RKISP1_CIF_RSZ_CTRL_SCALE_HY_ENABLE BIT(0)
+#define RKISP1_CIF_RSZ_CTRL_SCALE_HC_ENABLE BIT(1)
+#define RKISP1_CIF_RSZ_CTRL_SCALE_VY_ENABLE BIT(2)
+#define RKISP1_CIF_RSZ_CTRL_SCALE_VC_ENABLE BIT(3)
+#define RKISP1_CIF_RSZ_CTRL_SCALE_HY_UP BIT(4)
+#define RKISP1_CIF_RSZ_CTRL_SCALE_HC_UP BIT(5)
+#define RKISP1_CIF_RSZ_CTRL_SCALE_VY_UP BIT(6)
+#define RKISP1_CIF_RSZ_CTRL_SCALE_VC_UP BIT(7)
+#define RKISP1_CIF_RSZ_CTRL_CFG_UPD BIT(8)
+#define RKISP1_CIF_RSZ_CTRL_CFG_UPD_AUTO BIT(9)
+#define RKISP1_CIF_RSZ_SCALER_FACTOR BIT(16)
+
+/* MI_IMSC - MI_MIS - MI_RIS - MI_ICR - MI_ISR */
+#define RKISP1_CIF_MI_FRAME(stream) BIT((stream)->id)
+#define RKISP1_CIF_MI_MBLK_LINE BIT(2)
+#define RKISP1_CIF_MI_FILL_MP_Y BIT(3)
+#define RKISP1_CIF_MI_WRAP_MP_Y BIT(4)
+#define RKISP1_CIF_MI_WRAP_MP_CB BIT(5)
+#define RKISP1_CIF_MI_WRAP_MP_CR BIT(6)
+#define RKISP1_CIF_MI_WRAP_SP_Y BIT(7)
+#define RKISP1_CIF_MI_WRAP_SP_CB BIT(8)
+#define RKISP1_CIF_MI_WRAP_SP_CR BIT(9)
+#define RKISP1_CIF_MI_DMA_READY BIT(11)
+
+/* MI_STATUS */
+#define RKISP1_CIF_MI_STATUS_MP_Y_FIFO_FULL BIT(0)
+#define RKISP1_CIF_MI_STATUS_SP_Y_FIFO_FULL BIT(4)
+
+/* MI_DMA_CTRL */
+#define RKISP1_CIF_MI_DMA_CTRL_BURST_LEN_LUM_16 (0 << 0)
+#define RKISP1_CIF_MI_DMA_CTRL_BURST_LEN_LUM_32 BIT(0)
+#define RKISP1_CIF_MI_DMA_CTRL_BURST_LEN_LUM_64 (2 << 0)
+#define RKISP1_CIF_MI_DMA_CTRL_BURST_LEN_CHROM_16 (0 << 2)
+#define RKISP1_CIF_MI_DMA_CTRL_BURST_LEN_CHROM_32 BIT(2)
+#define RKISP1_CIF_MI_DMA_CTRL_BURST_LEN_CHROM_64 (2 << 2)
+#define RKISP1_CIF_MI_DMA_CTRL_READ_FMT_PLANAR (0 << 4)
+#define RKISP1_CIF_MI_DMA_CTRL_READ_FMT_SPLANAR BIT(4)
+#define RKISP1_CIF_MI_DMA_CTRL_FMT_YUV400 (0 << 6)
+#define RKISP1_CIF_MI_DMA_CTRL_FMT_YUV420 BIT(6)
+#define RKISP1_CIF_MI_DMA_CTRL_READ_FMT_PACKED (2 << 4)
+#define RKISP1_CIF_MI_DMA_CTRL_FMT_YUV422 (2 << 6)
+#define RKISP1_CIF_MI_DMA_CTRL_FMT_YUV444 (3 << 6)
+#define RKISP1_CIF_MI_DMA_CTRL_BYTE_SWAP BIT(8)
+#define RKISP1_CIF_MI_DMA_CTRL_CONTINUOUS_ENA BIT(9)
+#define RKISP1_CIF_MI_DMA_CTRL_RGB_BAYER_NO (0 << 12)
+#define RKISP1_CIF_MI_DMA_CTRL_RGB_BAYER_8BIT BIT(12)
+#define RKISP1_CIF_MI_DMA_CTRL_RGB_BAYER_16BIT (2 << 12)
+/* MI_DMA_START */
+#define RKISP1_CIF_MI_DMA_START_ENABLE BIT(0)
+/* MI_XTD_FORMAT_CTRL */
+#define RKISP1_CIF_MI_XTD_FMT_CTRL_MP_CB_CR_SWAP BIT(0)
+#define RKISP1_CIF_MI_XTD_FMT_CTRL_SP_CB_CR_SWAP BIT(1)
+#define RKISP1_CIF_MI_XTD_FMT_CTRL_DMA_CB_CR_SWAP BIT(2)
+
+/* CCL */
+#define RKISP1_CIF_CCL_CIF_CLK_DIS BIT(2)
+/* ICCL */
+#define RKISP1_CIF_ICCL_ISP_CLK BIT(0)
+#define RKISP1_CIF_ICCL_CP_CLK BIT(1)
+#define RKISP1_CIF_ICCL_RES_2 BIT(2)
+#define RKISP1_CIF_ICCL_MRSZ_CLK BIT(3)
+#define RKISP1_CIF_ICCL_SRSZ_CLK BIT(4)
+#define RKISP1_CIF_ICCL_JPEG_CLK BIT(5)
+#define RKISP1_CIF_ICCL_MI_CLK BIT(6)
+#define RKISP1_CIF_ICCL_RES_7 BIT(7)
+#define RKISP1_CIF_ICCL_IE_CLK BIT(8)
+#define RKISP1_CIF_ICCL_SIMP_CLK BIT(9)
+#define RKISP1_CIF_ICCL_SMIA_CLK BIT(10)
+#define RKISP1_CIF_ICCL_MIPI_CLK BIT(11)
+#define RKISP1_CIF_ICCL_DCROP_CLK BIT(12)
+/* IRCL */
+#define RKISP1_CIF_IRCL_ISP_SW_RST BIT(0)
+#define RKISP1_CIF_IRCL_CP_SW_RST BIT(1)
+#define RKISP1_CIF_IRCL_YCS_SW_RST BIT(2)
+#define RKISP1_CIF_IRCL_MRSZ_SW_RST BIT(3)
+#define RKISP1_CIF_IRCL_SRSZ_SW_RST BIT(4)
+#define RKISP1_CIF_IRCL_JPEG_SW_RST BIT(5)
+#define RKISP1_CIF_IRCL_MI_SW_RST BIT(6)
+#define RKISP1_CIF_IRCL_CIF_SW_RST BIT(7)
+#define RKISP1_CIF_IRCL_IE_SW_RST BIT(8)
+#define RKISP1_CIF_IRCL_SI_SW_RST BIT(9)
+#define RKISP1_CIF_IRCL_MIPI_SW_RST BIT(11)
+
+/* C_PROC_CTR */
+#define RKISP1_CIF_C_PROC_CTR_ENABLE BIT(0)
+#define RKISP1_CIF_C_PROC_YOUT_FULL BIT(1)
+#define RKISP1_CIF_C_PROC_YIN_FULL BIT(2)
+#define RKISP1_CIF_C_PROC_COUT_FULL BIT(3)
+#define RKISP1_CIF_C_PROC_CTRL_RESERVED 0xFFFFFFFE
+#define RKISP1_CIF_C_PROC_CONTRAST_RESERVED 0xFFFFFF00
+#define RKISP1_CIF_C_PROC_BRIGHTNESS_RESERVED 0xFFFFFF00
+#define RKISP1_CIF_C_PROC_HUE_RESERVED 0xFFFFFF00
+#define RKISP1_CIF_C_PROC_SATURATION_RESERVED 0xFFFFFF00
+#define RKISP1_CIF_C_PROC_MACC_RESERVED 0xE000E000
+#define RKISP1_CIF_C_PROC_TONE_RESERVED 0xF000
+/* DUAL_CROP_CTRL */
+#define RKISP1_CIF_DUAL_CROP_MP_MODE_BYPASS (0 << 0)
+#define RKISP1_CIF_DUAL_CROP_MP_MODE_YUV BIT(0)
+#define RKISP1_CIF_DUAL_CROP_MP_MODE_RAW (2 << 0)
+#define RKISP1_CIF_DUAL_CROP_SP_MODE_BYPASS (0 << 2)
+#define RKISP1_CIF_DUAL_CROP_SP_MODE_YUV BIT(2)
+#define RKISP1_CIF_DUAL_CROP_SP_MODE_RAW (2 << 2)
+#define RKISP1_CIF_DUAL_CROP_CFG_UPD_PERMANENT BIT(4)
+#define RKISP1_CIF_DUAL_CROP_CFG_UPD BIT(5)
+#define RKISP1_CIF_DUAL_CROP_GEN_CFG_UPD BIT(6)
+
+/* IMG_EFF_CTRL */
+#define RKISP1_CIF_IMG_EFF_CTRL_ENABLE BIT(0)
+#define RKISP1_CIF_IMG_EFF_CTRL_MODE_BLACKWHITE (0 << 1)
+#define RKISP1_CIF_IMG_EFF_CTRL_MODE_NEGATIVE BIT(1)
+#define RKISP1_CIF_IMG_EFF_CTRL_MODE_SEPIA (2 << 1)
+#define RKISP1_CIF_IMG_EFF_CTRL_MODE_COLOR_SEL (3 << 1)
+#define RKISP1_CIF_IMG_EFF_CTRL_MODE_EMBOSS (4 << 1)
+#define RKISP1_CIF_IMG_EFF_CTRL_MODE_SKETCH (5 << 1)
+#define RKISP1_CIF_IMG_EFF_CTRL_MODE_SHARPEN (6 << 1)
+#define RKISP1_CIF_IMG_EFF_CTRL_CFG_UPD BIT(4)
+#define RKISP1_CIF_IMG_EFF_CTRL_YCBCR_FULL BIT(5)
+
+#define RKISP1_CIF_IMG_EFF_CTRL_MODE_BLACKWHITE_SHIFT 0
+#define RKISP1_CIF_IMG_EFF_CTRL_MODE_NEGATIVE_SHIFT 1
+#define RKISP1_CIF_IMG_EFF_CTRL_MODE_SEPIA_SHIFT 2
+#define RKISP1_CIF_IMG_EFF_CTRL_MODE_COLOR_SEL_SHIFT 3
+#define RKISP1_CIF_IMG_EFF_CTRL_MODE_EMBOSS_SHIFT 4
+#define RKISP1_CIF_IMG_EFF_CTRL_MODE_SKETCH_SHIFT 5
+#define RKISP1_CIF_IMG_EFF_CTRL_MODE_SHARPEN_SHIFT 6
+#define RKISP1_CIF_IMG_EFF_CTRL_MODE_MASK 0xE
+
+/* IMG_EFF_COLOR_SEL */
+#define RKISP1_CIF_IMG_EFF_COLOR_RGB 0
+#define RKISP1_CIF_IMG_EFF_COLOR_B BIT(0)
+#define RKISP1_CIF_IMG_EFF_COLOR_G (2 << 0)
+#define RKISP1_CIF_IMG_EFF_COLOR_GB (3 << 0)
+#define RKISP1_CIF_IMG_EFF_COLOR_R (4 << 0)
+#define RKISP1_CIF_IMG_EFF_COLOR_RB (5 << 0)
+#define RKISP1_CIF_IMG_EFF_COLOR_RG (6 << 0)
+#define RKISP1_CIF_IMG_EFF_COLOR_RGB2 (7 << 0)
+
+/* MIPI_CTRL */
+#define RKISP1_CIF_MIPI_CTRL_OUTPUT_ENA BIT(0)
+#define RKISP1_CIF_MIPI_CTRL_SHUTDOWNLANES(a) (((a) & 0xF) << 8)
+#define RKISP1_CIF_MIPI_CTRL_NUM_LANES(a) (((a) & 0x3) << 12)
+#define RKISP1_CIF_MIPI_CTRL_ERR_SOT_HS_SKIP BIT(16)
+#define RKISP1_CIF_MIPI_CTRL_ERR_SOT_SYNC_HS_SKIP BIT(17)
+#define RKISP1_CIF_MIPI_CTRL_CLOCKLANE_ENA BIT(18)
+
+/* MIPI_DATA_SEL */
+#define RKISP1_CIF_MIPI_DATA_SEL_VC(a) (((a) & 0x3) << 6)
+#define RKISP1_CIF_MIPI_DATA_SEL_DT(a) (((a) & 0x3F) << 0)
+/* MIPI DATA_TYPE */
+#define RKISP1_CIF_CSI2_DT_YUV420_8b 0x18
+#define RKISP1_CIF_CSI2_DT_YUV420_10b 0x19
+#define RKISP1_CIF_CSI2_DT_YUV422_8b 0x1E
+#define RKISP1_CIF_CSI2_DT_YUV422_10b 0x1F
+#define RKISP1_CIF_CSI2_DT_RGB565 0x22
+#define RKISP1_CIF_CSI2_DT_RGB666 0x23
+#define RKISP1_CIF_CSI2_DT_RGB888 0x24
+#define RKISP1_CIF_CSI2_DT_RAW8 0x2A
+#define RKISP1_CIF_CSI2_DT_RAW10 0x2B
+#define RKISP1_CIF_CSI2_DT_RAW12 0x2C
+
+/* MIPI_IMSC, MIPI_RIS, MIPI_MIS, MIPI_ICR, MIPI_ISR */
+#define RKISP1_CIF_MIPI_SYNC_FIFO_OVFLW(a) (((a) & 0xF) << 0)
+#define RKISP1_CIF_MIPI_ERR_SOT(a) (((a) & 0xF) << 4)
+#define RKISP1_CIF_MIPI_ERR_SOT_SYNC(a) (((a) & 0xF) << 8)
+#define RKISP1_CIF_MIPI_ERR_EOT_SYNC(a) (((a) & 0xF) << 12)
+#define RKISP1_CIF_MIPI_ERR_CTRL(a) (((a) & 0xF) << 16)
+#define RKISP1_CIF_MIPI_ERR_PROTOCOL BIT(20)
+#define RKISP1_CIF_MIPI_ERR_ECC1 BIT(21)
+#define RKISP1_CIF_MIPI_ERR_ECC2 BIT(22)
+#define RKISP1_CIF_MIPI_ERR_CS BIT(23)
+#define RKISP1_CIF_MIPI_FRAME_END BIT(24)
+#define RKISP1_CIF_MIPI_ADD_DATA_OVFLW BIT(25)
+#define RKISP1_CIF_MIPI_ADD_DATA_WATER_MARK BIT(26)
+
+#define RKISP1_CIF_MIPI_ERR_CSI (RKISP1_CIF_MIPI_ERR_PROTOCOL | \
+ RKISP1_CIF_MIPI_ERR_ECC1 | \
+ RKISP1_CIF_MIPI_ERR_ECC2 | \
+ RKISP1_CIF_MIPI_ERR_CS)
+
+#define RKISP1_CIF_MIPI_ERR_DPHY (RKISP1_CIF_MIPI_ERR_SOT(3) | \
+ RKISP1_CIF_MIPI_ERR_SOT_SYNC(3) | \
+ RKISP1_CIF_MIPI_ERR_EOT_SYNC(3) | \
+ RKISP1_CIF_MIPI_ERR_CTRL(3))
+
+/* SUPER_IMPOSE */
+#define RKISP1_CIF_SUPER_IMP_CTRL_NORMAL_MODE BIT(0)
+#define RKISP1_CIF_SUPER_IMP_CTRL_REF_IMG_MEM BIT(1)
+#define RKISP1_CIF_SUPER_IMP_CTRL_TRANSP_DIS BIT(2)
+
+/* ISP HISTOGRAM CALCULATION : ISP_HIST_PROP */
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_DIS (0 << 0)
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_RGB BIT(0)
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_RED (2 << 0)
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_GREEN (3 << 0)
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_BLUE (4 << 0)
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_LUM (5 << 0)
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_MASK 0x7
+#define RKISP1_CIF_ISP_HIST_PREDIV_SET(x) (((x) & 0x7F) << 3)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_SET(v0, v1, v2, v3) \
+ (((v0) & 0x1F) | (((v1) & 0x1F) << 8) |\
+ (((v2) & 0x1F) << 16) | \
+ (((v3) & 0x1F) << 24))
+
+#define RKISP1_CIF_ISP_HIST_WINDOW_OFFSET_RESERVED 0xFFFFF000
+#define RKISP1_CIF_ISP_HIST_WINDOW_SIZE_RESERVED 0xFFFFF800
+#define RKISP1_CIF_ISP_HIST_WEIGHT_RESERVED 0xE0E0E0E0
+#define RKISP1_CIF_ISP_MAX_HIST_PREDIVIDER 0x0000007F
+#define RKISP1_CIF_ISP_HIST_ROW_NUM 5
+#define RKISP1_CIF_ISP_HIST_COLUMN_NUM 5
+
+/* AUTO FOCUS MEASUREMENT: ISP_AFM_CTRL */
+#define RKISP1_ISP_AFM_CTRL_ENABLE BIT(0)
+
+/* SHUTTER CONTROL */
+#define RKISP1_CIF_ISP_SH_CTRL_SH_ENA BIT(0)
+#define RKISP1_CIF_ISP_SH_CTRL_REP_EN BIT(1)
+#define RKISP1_CIF_ISP_SH_CTRL_SRC_SH_TRIG BIT(2)
+#define RKISP1_CIF_ISP_SH_CTRL_EDGE_POS BIT(3)
+#define RKISP1_CIF_ISP_SH_CTRL_POL_LOW BIT(4)
+
+/* FLASH MODULE */
+/* ISP_FLASH_CMD */
+#define RKISP1_CIFFLASH_CMD_PRELIGHT_ON BIT(0)
+#define RKISP1_CIFFLASH_CMD_FLASH_ON BIT(1)
+#define RKISP1_CIFFLASH_CMD_PRE_FLASH_ON BIT(2)
+/* ISP_FLASH_CONFIG */
+#define RKISP1_CIFFLASH_CONFIG_PRELIGHT_END BIT(0)
+#define RKISP1_CIFFLASH_CONFIG_VSYNC_POS BIT(1)
+#define RKISP1_CIFFLASH_CONFIG_PRELIGHT_LOW BIT(2)
+#define RKISP1_CIFFLASH_CONFIG_SRC_FL_TRIG BIT(3)
+#define RKISP1_CIFFLASH_CONFIG_DELAY(a) (((a) & 0xF) << 4)
+
+/* Demosaic: ISP_DEMOSAIC */
+#define RKISP1_CIF_ISP_DEMOSAIC_BYPASS BIT(10)
+#define RKISP1_CIF_ISP_DEMOSAIC_TH(x) ((x) & 0xFF)
+
+/* AWB */
+/* ISP_AWB_PROP */
+#define RKISP1_CIF_ISP_AWB_YMAX_CMP_EN BIT(2)
+#define RKISP1_CIF_ISP_AWB_YMAX_READ(x) (((x) >> 2) & 1)
+#define RKISP1_CIF_ISP_AWB_MODE_RGB_EN ((1 << 31) | (0x2 << 0))
+#define RKISP1_CIF_ISP_AWB_MODE_YCBCR_EN ((0 << 31) | (0x2 << 0))
+#define RKISP1_CIF_ISP_AWB_MODE_YCBCR_EN ((0 << 31) | (0x2 << 0))
+#define RKISP1_CIF_ISP_AWB_MODE_MASK_NONE 0xFFFFFFFC
+#define RKISP1_CIF_ISP_AWB_MODE_READ(x) ((x) & 3)
+/* ISP_AWB_GAIN_RB, ISP_AWB_GAIN_G */
+#define RKISP1_CIF_ISP_AWB_GAIN_R_SET(x) (((x) & 0x3FF) << 16)
+#define RKISP1_CIF_ISP_AWB_GAIN_R_READ(x) (((x) >> 16) & 0x3FF)
+#define RKISP1_CIF_ISP_AWB_GAIN_B_SET(x) ((x) & 0x3FFF)
+#define RKISP1_CIF_ISP_AWB_GAIN_B_READ(x) ((x) & 0x3FFF)
+/* ISP_AWB_REF */
+#define RKISP1_CIF_ISP_AWB_REF_CR_SET(x) (((x) & 0xFF) << 8)
+#define RKISP1_CIF_ISP_AWB_REF_CR_READ(x) (((x) >> 8) & 0xFF)
+#define RKISP1_CIF_ISP_AWB_REF_CB_READ(x) ((x) & 0xFF)
+/* ISP_AWB_THRESH */
+#define RKISP1_CIF_ISP_AWB_MAX_CS_SET(x) (((x) & 0xFF) << 8)
+#define RKISP1_CIF_ISP_AWB_MAX_CS_READ(x) (((x) >> 8) & 0xFF)
+#define RKISP1_CIF_ISP_AWB_MIN_C_READ(x) ((x) & 0xFF)
+#define RKISP1_CIF_ISP_AWB_MIN_Y_SET(x) (((x) & 0xFF) << 16)
+#define RKISP1_CIF_ISP_AWB_MIN_Y_READ(x) (((x) >> 16) & 0xFF)
+#define RKISP1_CIF_ISP_AWB_MAX_Y_SET(x) (((x) & 0xFF) << 24)
+#define RKISP1_CIF_ISP_AWB_MAX_Y_READ(x) (((x) >> 24) & 0xFF)
+/* ISP_AWB_MEAN */
+#define RKISP1_CIF_ISP_AWB_GET_MEAN_CR_R(x) ((x) & 0xFF)
+#define RKISP1_CIF_ISP_AWB_GET_MEAN_CB_B(x) (((x) >> 8) & 0xFF)
+#define RKISP1_CIF_ISP_AWB_GET_MEAN_Y_G(x) (((x) >> 16) & 0xFF)
+/* ISP_AWB_WHITE_CNT */
+#define RKISP1_CIF_ISP_AWB_GET_PIXEL_CNT(x) ((x) & 0x3FFFFFF)
+
+#define RKISP1_CIF_ISP_AWB_GAINS_MAX_VAL 0x000003FF
+#define RKISP1_CIF_ISP_AWB_WINDOW_OFFSET_MAX 0x00000FFF
+#define RKISP1_CIF_ISP_AWB_WINDOW_MAX_SIZE 0x00001FFF
+#define RKISP1_CIF_ISP_AWB_CBCR_MAX_REF 0x000000FF
+#define RKISP1_CIF_ISP_AWB_THRES_MAX_YC 0x000000FF
+
+/* AE */
+/* ISP_EXP_CTRL */
+#define RKISP1_CIF_ISP_EXP_ENA BIT(0)
+#define RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP BIT(1)
+/*
+ *'1' luminance calculation according to Y=(R+G+B) x 0.332 (85/256)
+ *'0' luminance calculation according to Y=16+0.25R+0.5G+0.1094B
+ */
+#define RKISP1_CIF_ISP_EXP_CTRL_MEASMODE_1 BIT(31)
+
+/* ISP_EXP_H_SIZE */
+#define RKISP1_CIF_ISP_EXP_H_SIZE_SET(x) ((x) & 0x7FF)
+#define RKISP1_CIF_ISP_EXP_HEIGHT_MASK 0x000007FF
+/* ISP_EXP_V_SIZE : vertical size must be a multiple of 2). */
+#define RKISP1_CIF_ISP_EXP_V_SIZE_SET(x) ((x) & 0x7FE)
+
+/* ISP_EXP_H_OFFSET */
+#define RKISP1_CIF_ISP_EXP_H_OFFSET_SET(x) ((x) & 0x1FFF)
+#define RKISP1_CIF_ISP_EXP_MAX_HOFFS 2424
+/* ISP_EXP_V_OFFSET */
+#define RKISP1_CIF_ISP_EXP_V_OFFSET_SET(x) ((x) & 0x1FFF)
+#define RKISP1_CIF_ISP_EXP_MAX_VOFFS 1806
+
+#define RKISP1_CIF_ISP_EXP_ROW_NUM 5
+#define RKISP1_CIF_ISP_EXP_COLUMN_NUM 5
+#define RKISP1_CIF_ISP_EXP_NUM_LUMA_REGS \
+ (RKISP1_CIF_ISP_EXP_ROW_NUM * RKISP1_CIF_ISP_EXP_COLUMN_NUM)
+#define RKISP1_CIF_ISP_EXP_BLOCK_MAX_HSIZE 516
+#define RKISP1_CIF_ISP_EXP_BLOCK_MIN_HSIZE 35
+#define RKISP1_CIF_ISP_EXP_BLOCK_MAX_VSIZE 390
+#define RKISP1_CIF_ISP_EXP_BLOCK_MIN_VSIZE 28
+#define RKISP1_CIF_ISP_EXP_MAX_HSIZE \
+ (RKISP1_CIF_ISP_EXP_BLOCK_MAX_HSIZE * RKISP1_CIF_ISP_EXP_COLUMN_NUM + 1)
+#define RKISP1_CIF_ISP_EXP_MIN_HSIZE \
+ (RKISP1_CIF_ISP_EXP_BLOCK_MIN_HSIZE * RKISP1_CIF_ISP_EXP_COLUMN_NUM + 1)
+#define RKISP1_CIF_ISP_EXP_MAX_VSIZE \
+ (RKISP1_CIF_ISP_EXP_BLOCK_MAX_VSIZE * RKISP1_CIF_ISP_EXP_ROW_NUM + 1)
+#define RKISP1_CIF_ISP_EXP_MIN_VSIZE \
+ (RKISP1_CIF_ISP_EXP_BLOCK_MIN_VSIZE * RKISP1_CIF_ISP_EXP_ROW_NUM + 1)
+
+/* LSC: ISP_LSC_CTRL */
+#define RKISP1_CIF_ISP_LSC_CTRL_ENA BIT(0)
+#define RKISP1_CIF_ISP_LSC_SECT_SIZE_RESERVED 0xFC00FC00
+#define RKISP1_CIF_ISP_LSC_GRAD_RESERVED 0xF000F000
+#define RKISP1_CIF_ISP_LSC_SAMPLE_RESERVED 0xF000F000
+#define RKISP1_CIF_ISP_LSC_SECTORS_MAX 17
+#define RKISP1_CIF_ISP_LSC_TABLE_DATA(v0, v1) \
+ (((v0) & 0xFFF) | (((v1) & 0xFFF) << 12))
+#define RKISP1_CIF_ISP_LSC_SECT_SIZE(v0, v1) \
+ (((v0) & 0xFFF) | (((v1) & 0xFFF) << 16))
+#define RKISP1_CIF_ISP_LSC_GRAD_SIZE(v0, v1) \
+ (((v0) & 0xFFF) | (((v1) & 0xFFF) << 16))
+
+/* LSC: ISP_LSC_TABLE_SEL */
+#define RKISP1_CIF_ISP_LSC_TABLE_0 0
+#define RKISP1_CIF_ISP_LSC_TABLE_1 1
+
+/* LSC: ISP_LSC_STATUS */
+#define RKISP1_CIF_ISP_LSC_ACTIVE_TABLE BIT(1)
+#define RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_0 0
+#define RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_153 153
+
+/* FLT */
+/* ISP_FILT_MODE */
+#define RKISP1_CIF_ISP_FLT_ENA BIT(0)
+
+/*
+ * 0: green filter static mode (active filter factor = FILT_FAC_MID)
+ * 1: dynamic noise reduction/sharpen Default
+ */
+#define RKISP1_CIF_ISP_FLT_MODE_DNR BIT(1)
+#define RKISP1_CIF_ISP_FLT_MODE_MAX 1
+#define RKISP1_CIF_ISP_FLT_CHROMA_V_MODE(x) (((x) & 0x3) << 4)
+#define RKISP1_CIF_ISP_FLT_CHROMA_H_MODE(x) (((x) & 0x3) << 6)
+#define RKISP1_CIF_ISP_FLT_CHROMA_MODE_MAX 3
+#define RKISP1_CIF_ISP_FLT_GREEN_STAGE1(x) (((x) & 0xF) << 8)
+#define RKISP1_CIF_ISP_FLT_GREEN_STAGE1_MAX 8
+#define RKISP1_CIF_ISP_FLT_THREAD_RESERVED 0xFFFFFC00
+#define RKISP1_CIF_ISP_FLT_FAC_RESERVED 0xFFFFFFC0
+#define RKISP1_CIF_ISP_FLT_LUM_WEIGHT_RESERVED 0xFFF80000
+
+#define RKISP1_CIF_ISP_CTK_COEFF_RESERVED 0xFFFFF800
+#define RKISP1_CIF_ISP_XTALK_OFFSET_RESERVED 0xFFFFF000
+
+/* GOC */
+#define RKISP1_CIF_ISP_GAMMA_OUT_MODE_EQU BIT(0)
+#define RKISP1_CIF_ISP_GOC_MODE_MAX 1
+#define RKISP1_CIF_ISP_GOC_RESERVED 0xFFFFF800
+/* ISP_CTRL BIT 11*/
+#define RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA_READ(x) (((x) >> 11) & 1)
+
+/* DPCC */
+/* ISP_DPCC_MODE */
+#define RKISP1_CIF_ISP_DPCC_ENA BIT(0)
+#define RKISP1_CIF_ISP_DPCC_MODE_MAX 0x07
+#define RKISP1_CIF_ISP_DPCC_OUTPUTMODE_MAX 0x0F
+#define RKISP1_CIF_ISP_DPCC_SETUSE_MAX 0x0F
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RESERVED 0xFFFFE000
+#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_RESERVED 0xFFFF0000
+#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_RESERVED 0xFFFFC0C0
+#define RKISP1_CIF_ISP_DPCC_PG_FAC_RESERVED 0xFFFFC0C0
+#define RKISP1_CIF_ISP_DPCC_RND_THRESH_RESERVED 0xFFFF0000
+#define RKISP1_CIF_ISP_DPCC_RG_FAC_RESERVED 0xFFFFC0C0
+#define RKISP1_CIF_ISP_DPCC_RO_LIMIT_RESERVED 0xFFFFF000
+#define RKISP1_CIF_ISP_DPCC_RND_OFFS_RESERVED 0xFFFFF000
+
+/* BLS */
+/* ISP_BLS_CTRL */
+#define RKISP1_CIF_ISP_BLS_ENA BIT(0)
+#define RKISP1_CIF_ISP_BLS_MODE_MEASURED BIT(1)
+#define RKISP1_CIF_ISP_BLS_MODE_FIXED 0
+#define RKISP1_CIF_ISP_BLS_WINDOW_1 BIT(2)
+#define RKISP1_CIF_ISP_BLS_WINDOW_2 (2 << 2)
+
+/* GAMMA-IN */
+#define RKISP1_CIFISP_DEGAMMA_X_RESERVED \
+ ((1 << 31) | (1 << 27) | (1 << 23) | (1 << 19) |\
+ (1 << 15) | (1 << 11) | (1 << 7) | (1 << 3))
+#define RKISP1_CIFISP_DEGAMMA_Y_RESERVED 0xFFFFF000
+
+/* AFM */
+#define RKISP1_CIF_ISP_AFM_ENA BIT(0)
+#define RKISP1_CIF_ISP_AFM_THRES_RESERVED 0xFFFF0000
+#define RKISP1_CIF_ISP_AFM_VAR_SHIFT_RESERVED 0xFFF8FFF8
+#define RKISP1_CIF_ISP_AFM_WINDOW_X_RESERVED 0xE000
+#define RKISP1_CIF_ISP_AFM_WINDOW_Y_RESERVED 0xF000
+#define RKISP1_CIF_ISP_AFM_WINDOW_X_MIN 0x5
+#define RKISP1_CIF_ISP_AFM_WINDOW_Y_MIN 0x2
+#define RKISP1_CIF_ISP_AFM_WINDOW_X(x) (((x) & 0x1FFF) << 16)
+#define RKISP1_CIF_ISP_AFM_WINDOW_Y(x) ((x) & 0x1FFF)
+
+/* DPF */
+#define RKISP1_CIF_ISP_DPF_MODE_EN BIT(0)
+#define RKISP1_CIF_ISP_DPF_MODE_B_FLT_DIS BIT(1)
+#define RKISP1_CIF_ISP_DPF_MODE_GB_FLT_DIS BIT(2)
+#define RKISP1_CIF_ISP_DPF_MODE_GR_FLT_DIS BIT(3)
+#define RKISP1_CIF_ISP_DPF_MODE_R_FLT_DIS BIT(4)
+#define RKISP1_CIF_ISP_DPF_MODE_RB_FLTSIZE_9x9 BIT(5)
+#define RKISP1_CIF_ISP_DPF_MODE_NLL_SEGMENTATION BIT(6)
+#define RKISP1_CIF_ISP_DPF_MODE_AWB_GAIN_COMP BIT(7)
+#define RKISP1_CIF_ISP_DPF_MODE_LSC_GAIN_COMP BIT(8)
+#define RKISP1_CIF_ISP_DPF_MODE_USE_NF_GAIN BIT(9)
+#define RKISP1_CIF_ISP_DPF_NF_GAIN_RESERVED 0xFFFFF000
+#define RKISP1_CIF_ISP_DPF_SPATIAL_COEFF_MAX 0x1F
+#define RKISP1_CIF_ISP_DPF_NLL_COEFF_N_MAX 0x3FF
+
+/* =================================================================== */
+/* CIF Registers */
+/* =================================================================== */
+#define RKISP1_CIF_CTRL_BASE 0x00000000
+#define RKISP1_CIF_CCL (RKISP1_CIF_CTRL_BASE + 0x00000000)
+#define RKISP1_CIF_VI_ID (RKISP1_CIF_CTRL_BASE + 0x00000008)
+#define RKISP1_CIF_ICCL (RKISP1_CIF_CTRL_BASE + 0x00000010)
+#define RKISP1_CIF_IRCL (RKISP1_CIF_CTRL_BASE + 0x00000014)
+#define RKISP1_CIF_VI_DPCL (RKISP1_CIF_CTRL_BASE + 0x00000018)
+
+#define RKISP1_CIF_IMG_EFF_BASE 0x00000200
+#define RKISP1_CIF_IMG_EFF_CTRL (RKISP1_CIF_IMG_EFF_BASE + 0x00000000)
+#define RKISP1_CIF_IMG_EFF_COLOR_SEL (RKISP1_CIF_IMG_EFF_BASE + 0x00000004)
+#define RKISP1_CIF_IMG_EFF_MAT_1 (RKISP1_CIF_IMG_EFF_BASE + 0x00000008)
+#define RKISP1_CIF_IMG_EFF_MAT_2 (RKISP1_CIF_IMG_EFF_BASE + 0x0000000C)
+#define RKISP1_CIF_IMG_EFF_MAT_3 (RKISP1_CIF_IMG_EFF_BASE + 0x00000010)
+#define RKISP1_CIF_IMG_EFF_MAT_4 (RKISP1_CIF_IMG_EFF_BASE + 0x00000014)
+#define RKISP1_CIF_IMG_EFF_MAT_5 (RKISP1_CIF_IMG_EFF_BASE + 0x00000018)
+#define RKISP1_CIF_IMG_EFF_TINT (RKISP1_CIF_IMG_EFF_BASE + 0x0000001C)
+#define RKISP1_CIF_IMG_EFF_CTRL_SHD (RKISP1_CIF_IMG_EFF_BASE + 0x00000020)
+#define RKISP1_CIF_IMG_EFF_SHARPEN (RKISP1_CIF_IMG_EFF_BASE + 0x00000024)
+
+#define RKISP1_CIF_SUPER_IMP_BASE 0x00000300
+#define RKISP1_CIF_SUPER_IMP_CTRL (RKISP1_CIF_SUPER_IMP_BASE + 0x00000000)
+#define RKISP1_CIF_SUPER_IMP_OFFSET_X (RKISP1_CIF_SUPER_IMP_BASE + 0x00000004)
+#define RKISP1_CIF_SUPER_IMP_OFFSET_Y (RKISP1_CIF_SUPER_IMP_BASE + 0x00000008)
+#define RKISP1_CIF_SUPER_IMP_COLOR_Y (RKISP1_CIF_SUPER_IMP_BASE + 0x0000000C)
+#define RKISP1_CIF_SUPER_IMP_COLOR_CB (RKISP1_CIF_SUPER_IMP_BASE + 0x00000010)
+#define RKISP1_CIF_SUPER_IMP_COLOR_CR (RKISP1_CIF_SUPER_IMP_BASE + 0x00000014)
+
+#define RKISP1_CIF_ISP_BASE 0x00000400
+#define RKISP1_CIF_ISP_CTRL (RKISP1_CIF_ISP_BASE + 0x00000000)
+#define RKISP1_CIF_ISP_ACQ_PROP (RKISP1_CIF_ISP_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_ACQ_H_OFFS (RKISP1_CIF_ISP_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_ACQ_V_OFFS (RKISP1_CIF_ISP_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_ACQ_H_SIZE (RKISP1_CIF_ISP_BASE + 0x00000010)
+#define RKISP1_CIF_ISP_ACQ_V_SIZE (RKISP1_CIF_ISP_BASE + 0x00000014)
+#define RKISP1_CIF_ISP_ACQ_NR_FRAMES (RKISP1_CIF_ISP_BASE + 0x00000018)
+#define RKISP1_CIF_ISP_GAMMA_DX_LO (RKISP1_CIF_ISP_BASE + 0x0000001C)
+#define RKISP1_CIF_ISP_GAMMA_DX_HI (RKISP1_CIF_ISP_BASE + 0x00000020)
+#define RKISP1_CIF_ISP_GAMMA_R_Y0 (RKISP1_CIF_ISP_BASE + 0x00000024)
+#define RKISP1_CIF_ISP_GAMMA_R_Y1 (RKISP1_CIF_ISP_BASE + 0x00000028)
+#define RKISP1_CIF_ISP_GAMMA_R_Y2 (RKISP1_CIF_ISP_BASE + 0x0000002C)
+#define RKISP1_CIF_ISP_GAMMA_R_Y3 (RKISP1_CIF_ISP_BASE + 0x00000030)
+#define RKISP1_CIF_ISP_GAMMA_R_Y4 (RKISP1_CIF_ISP_BASE + 0x00000034)
+#define RKISP1_CIF_ISP_GAMMA_R_Y5 (RKISP1_CIF_ISP_BASE + 0x00000038)
+#define RKISP1_CIF_ISP_GAMMA_R_Y6 (RKISP1_CIF_ISP_BASE + 0x0000003C)
+#define RKISP1_CIF_ISP_GAMMA_R_Y7 (RKISP1_CIF_ISP_BASE + 0x00000040)
+#define RKISP1_CIF_ISP_GAMMA_R_Y8 (RKISP1_CIF_ISP_BASE + 0x00000044)
+#define RKISP1_CIF_ISP_GAMMA_R_Y9 (RKISP1_CIF_ISP_BASE + 0x00000048)
+#define RKISP1_CIF_ISP_GAMMA_R_Y10 (RKISP1_CIF_ISP_BASE + 0x0000004C)
+#define RKISP1_CIF_ISP_GAMMA_R_Y11 (RKISP1_CIF_ISP_BASE + 0x00000050)
+#define RKISP1_CIF_ISP_GAMMA_R_Y12 (RKISP1_CIF_ISP_BASE + 0x00000054)
+#define RKISP1_CIF_ISP_GAMMA_R_Y13 (RKISP1_CIF_ISP_BASE + 0x00000058)
+#define RKISP1_CIF_ISP_GAMMA_R_Y14 (RKISP1_CIF_ISP_BASE + 0x0000005C)
+#define RKISP1_CIF_ISP_GAMMA_R_Y15 (RKISP1_CIF_ISP_BASE + 0x00000060)
+#define RKISP1_CIF_ISP_GAMMA_R_Y16 (RKISP1_CIF_ISP_BASE + 0x00000064)
+#define RKISP1_CIF_ISP_GAMMA_G_Y0 (RKISP1_CIF_ISP_BASE + 0x00000068)
+#define RKISP1_CIF_ISP_GAMMA_G_Y1 (RKISP1_CIF_ISP_BASE + 0x0000006C)
+#define RKISP1_CIF_ISP_GAMMA_G_Y2 (RKISP1_CIF_ISP_BASE + 0x00000070)
+#define RKISP1_CIF_ISP_GAMMA_G_Y3 (RKISP1_CIF_ISP_BASE + 0x00000074)
+#define RKISP1_CIF_ISP_GAMMA_G_Y4 (RKISP1_CIF_ISP_BASE + 0x00000078)
+#define RKISP1_CIF_ISP_GAMMA_G_Y5 (RKISP1_CIF_ISP_BASE + 0x0000007C)
+#define RKISP1_CIF_ISP_GAMMA_G_Y6 (RKISP1_CIF_ISP_BASE + 0x00000080)
+#define RKISP1_CIF_ISP_GAMMA_G_Y7 (RKISP1_CIF_ISP_BASE + 0x00000084)
+#define RKISP1_CIF_ISP_GAMMA_G_Y8 (RKISP1_CIF_ISP_BASE + 0x00000088)
+#define RKISP1_CIF_ISP_GAMMA_G_Y9 (RKISP1_CIF_ISP_BASE + 0x0000008C)
+#define RKISP1_CIF_ISP_GAMMA_G_Y10 (RKISP1_CIF_ISP_BASE + 0x00000090)
+#define RKISP1_CIF_ISP_GAMMA_G_Y11 (RKISP1_CIF_ISP_BASE + 0x00000094)
+#define RKISP1_CIF_ISP_GAMMA_G_Y12 (RKISP1_CIF_ISP_BASE + 0x00000098)
+#define RKISP1_CIF_ISP_GAMMA_G_Y13 (RKISP1_CIF_ISP_BASE + 0x0000009C)
+#define RKISP1_CIF_ISP_GAMMA_G_Y14 (RKISP1_CIF_ISP_BASE + 0x000000A0)
+#define RKISP1_CIF_ISP_GAMMA_G_Y15 (RKISP1_CIF_ISP_BASE + 0x000000A4)
+#define RKISP1_CIF_ISP_GAMMA_G_Y16 (RKISP1_CIF_ISP_BASE + 0x000000A8)
+#define RKISP1_CIF_ISP_GAMMA_B_Y0 (RKISP1_CIF_ISP_BASE + 0x000000AC)
+#define RKISP1_CIF_ISP_GAMMA_B_Y1 (RKISP1_CIF_ISP_BASE + 0x000000B0)
+#define RKISP1_CIF_ISP_GAMMA_B_Y2 (RKISP1_CIF_ISP_BASE + 0x000000B4)
+#define RKISP1_CIF_ISP_GAMMA_B_Y3 (RKISP1_CIF_ISP_BASE + 0x000000B8)
+#define RKISP1_CIF_ISP_GAMMA_B_Y4 (RKISP1_CIF_ISP_BASE + 0x000000BC)
+#define RKISP1_CIF_ISP_GAMMA_B_Y5 (RKISP1_CIF_ISP_BASE + 0x000000C0)
+#define RKISP1_CIF_ISP_GAMMA_B_Y6 (RKISP1_CIF_ISP_BASE + 0x000000C4)
+#define RKISP1_CIF_ISP_GAMMA_B_Y7 (RKISP1_CIF_ISP_BASE + 0x000000C8)
+#define RKISP1_CIF_ISP_GAMMA_B_Y8 (RKISP1_CIF_ISP_BASE + 0x000000CC)
+#define RKISP1_CIF_ISP_GAMMA_B_Y9 (RKISP1_CIF_ISP_BASE + 0x000000D0)
+#define RKISP1_CIF_ISP_GAMMA_B_Y10 (RKISP1_CIF_ISP_BASE + 0x000000D4)
+#define RKISP1_CIF_ISP_GAMMA_B_Y11 (RKISP1_CIF_ISP_BASE + 0x000000D8)
+#define RKISP1_CIF_ISP_GAMMA_B_Y12 (RKISP1_CIF_ISP_BASE + 0x000000DC)
+#define RKISP1_CIF_ISP_GAMMA_B_Y13 (RKISP1_CIF_ISP_BASE + 0x000000E0)
+#define RKISP1_CIF_ISP_GAMMA_B_Y14 (RKISP1_CIF_ISP_BASE + 0x000000E4)
+#define RKISP1_CIF_ISP_GAMMA_B_Y15 (RKISP1_CIF_ISP_BASE + 0x000000E8)
+#define RKISP1_CIF_ISP_GAMMA_B_Y16 (RKISP1_CIF_ISP_BASE + 0x000000EC)
+#define RKISP1_CIF_ISP_AWB_PROP (RKISP1_CIF_ISP_BASE + 0x00000110)
+#define RKISP1_CIF_ISP_AWB_WND_H_OFFS (RKISP1_CIF_ISP_BASE + 0x00000114)
+#define RKISP1_CIF_ISP_AWB_WND_V_OFFS (RKISP1_CIF_ISP_BASE + 0x00000118)
+#define RKISP1_CIF_ISP_AWB_WND_H_SIZE (RKISP1_CIF_ISP_BASE + 0x0000011C)
+#define RKISP1_CIF_ISP_AWB_WND_V_SIZE (RKISP1_CIF_ISP_BASE + 0x00000120)
+#define RKISP1_CIF_ISP_AWB_FRAMES (RKISP1_CIF_ISP_BASE + 0x00000124)
+#define RKISP1_CIF_ISP_AWB_REF (RKISP1_CIF_ISP_BASE + 0x00000128)
+#define RKISP1_CIF_ISP_AWB_THRESH (RKISP1_CIF_ISP_BASE + 0x0000012C)
+#define RKISP1_CIF_ISP_AWB_GAIN_G (RKISP1_CIF_ISP_BASE + 0x00000138)
+#define RKISP1_CIF_ISP_AWB_GAIN_RB (RKISP1_CIF_ISP_BASE + 0x0000013C)
+#define RKISP1_CIF_ISP_AWB_WHITE_CNT (RKISP1_CIF_ISP_BASE + 0x00000140)
+#define RKISP1_CIF_ISP_AWB_MEAN (RKISP1_CIF_ISP_BASE + 0x00000144)
+#define RKISP1_CIF_ISP_CC_COEFF_0 (RKISP1_CIF_ISP_BASE + 0x00000170)
+#define RKISP1_CIF_ISP_CC_COEFF_1 (RKISP1_CIF_ISP_BASE + 0x00000174)
+#define RKISP1_CIF_ISP_CC_COEFF_2 (RKISP1_CIF_ISP_BASE + 0x00000178)
+#define RKISP1_CIF_ISP_CC_COEFF_3 (RKISP1_CIF_ISP_BASE + 0x0000017C)
+#define RKISP1_CIF_ISP_CC_COEFF_4 (RKISP1_CIF_ISP_BASE + 0x00000180)
+#define RKISP1_CIF_ISP_CC_COEFF_5 (RKISP1_CIF_ISP_BASE + 0x00000184)
+#define RKISP1_CIF_ISP_CC_COEFF_6 (RKISP1_CIF_ISP_BASE + 0x00000188)
+#define RKISP1_CIF_ISP_CC_COEFF_7 (RKISP1_CIF_ISP_BASE + 0x0000018C)
+#define RKISP1_CIF_ISP_CC_COEFF_8 (RKISP1_CIF_ISP_BASE + 0x00000190)
+#define RKISP1_CIF_ISP_OUT_H_OFFS (RKISP1_CIF_ISP_BASE + 0x00000194)
+#define RKISP1_CIF_ISP_OUT_V_OFFS (RKISP1_CIF_ISP_BASE + 0x00000198)
+#define RKISP1_CIF_ISP_OUT_H_SIZE (RKISP1_CIF_ISP_BASE + 0x0000019C)
+#define RKISP1_CIF_ISP_OUT_V_SIZE (RKISP1_CIF_ISP_BASE + 0x000001A0)
+#define RKISP1_CIF_ISP_DEMOSAIC (RKISP1_CIF_ISP_BASE + 0x000001A4)
+#define RKISP1_CIF_ISP_FLAGS_SHD (RKISP1_CIF_ISP_BASE + 0x000001A8)
+#define RKISP1_CIF_ISP_OUT_H_OFFS_SHD (RKISP1_CIF_ISP_BASE + 0x000001AC)
+#define RKISP1_CIF_ISP_OUT_V_OFFS_SHD (RKISP1_CIF_ISP_BASE + 0x000001B0)
+#define RKISP1_CIF_ISP_OUT_H_SIZE_SHD (RKISP1_CIF_ISP_BASE + 0x000001B4)
+#define RKISP1_CIF_ISP_OUT_V_SIZE_SHD (RKISP1_CIF_ISP_BASE + 0x000001B8)
+#define RKISP1_CIF_ISP_IMSC (RKISP1_CIF_ISP_BASE + 0x000001BC)
+#define RKISP1_CIF_ISP_RIS (RKISP1_CIF_ISP_BASE + 0x000001C0)
+#define RKISP1_CIF_ISP_MIS (RKISP1_CIF_ISP_BASE + 0x000001C4)
+#define RKISP1_CIF_ISP_ICR (RKISP1_CIF_ISP_BASE + 0x000001C8)
+#define RKISP1_CIF_ISP_ISR (RKISP1_CIF_ISP_BASE + 0x000001CC)
+#define RKISP1_CIF_ISP_CT_COEFF_0 (RKISP1_CIF_ISP_BASE + 0x000001D0)
+#define RKISP1_CIF_ISP_CT_COEFF_1 (RKISP1_CIF_ISP_BASE + 0x000001D4)
+#define RKISP1_CIF_ISP_CT_COEFF_2 (RKISP1_CIF_ISP_BASE + 0x000001D8)
+#define RKISP1_CIF_ISP_CT_COEFF_3 (RKISP1_CIF_ISP_BASE + 0x000001DC)
+#define RKISP1_CIF_ISP_CT_COEFF_4 (RKISP1_CIF_ISP_BASE + 0x000001E0)
+#define RKISP1_CIF_ISP_CT_COEFF_5 (RKISP1_CIF_ISP_BASE + 0x000001E4)
+#define RKISP1_CIF_ISP_CT_COEFF_6 (RKISP1_CIF_ISP_BASE + 0x000001E8)
+#define RKISP1_CIF_ISP_CT_COEFF_7 (RKISP1_CIF_ISP_BASE + 0x000001EC)
+#define RKISP1_CIF_ISP_CT_COEFF_8 (RKISP1_CIF_ISP_BASE + 0x000001F0)
+#define RKISP1_CIF_ISP_GAMMA_OUT_MODE (RKISP1_CIF_ISP_BASE + 0x000001F4)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_0 (RKISP1_CIF_ISP_BASE + 0x000001F8)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_1 (RKISP1_CIF_ISP_BASE + 0x000001FC)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_2 (RKISP1_CIF_ISP_BASE + 0x00000200)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_3 (RKISP1_CIF_ISP_BASE + 0x00000204)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_4 (RKISP1_CIF_ISP_BASE + 0x00000208)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_5 (RKISP1_CIF_ISP_BASE + 0x0000020C)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_6 (RKISP1_CIF_ISP_BASE + 0x00000210)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_7 (RKISP1_CIF_ISP_BASE + 0x00000214)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_8 (RKISP1_CIF_ISP_BASE + 0x00000218)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_9 (RKISP1_CIF_ISP_BASE + 0x0000021C)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_10 (RKISP1_CIF_ISP_BASE + 0x00000220)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_11 (RKISP1_CIF_ISP_BASE + 0x00000224)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_12 (RKISP1_CIF_ISP_BASE + 0x00000228)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_13 (RKISP1_CIF_ISP_BASE + 0x0000022C)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_14 (RKISP1_CIF_ISP_BASE + 0x00000230)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_15 (RKISP1_CIF_ISP_BASE + 0x00000234)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_16 (RKISP1_CIF_ISP_BASE + 0x00000238)
+#define RKISP1_CIF_ISP_ERR (RKISP1_CIF_ISP_BASE + 0x0000023C)
+#define RKISP1_CIF_ISP_ERR_CLR (RKISP1_CIF_ISP_BASE + 0x00000240)
+#define RKISP1_CIF_ISP_FRAME_COUNT (RKISP1_CIF_ISP_BASE + 0x00000244)
+#define RKISP1_CIF_ISP_CT_OFFSET_R (RKISP1_CIF_ISP_BASE + 0x00000248)
+#define RKISP1_CIF_ISP_CT_OFFSET_G (RKISP1_CIF_ISP_BASE + 0x0000024C)
+#define RKISP1_CIF_ISP_CT_OFFSET_B (RKISP1_CIF_ISP_BASE + 0x00000250)
+
+#define RKISP1_CIF_ISP_FLASH_BASE 0x00000660
+#define RKISP1_CIF_ISP_FLASH_CMD (RKISP1_CIF_ISP_FLASH_BASE + 0x00000000)
+#define RKISP1_CIF_ISP_FLASH_CONFIG (RKISP1_CIF_ISP_FLASH_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_FLASH_PREDIV (RKISP1_CIF_ISP_FLASH_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_FLASH_DELAY (RKISP1_CIF_ISP_FLASH_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_FLASH_TIME (RKISP1_CIF_ISP_FLASH_BASE + 0x00000010)
+#define RKISP1_CIF_ISP_FLASH_MAXP (RKISP1_CIF_ISP_FLASH_BASE + 0x00000014)
+
+#define RKISP1_CIF_ISP_SH_BASE 0x00000680
+#define RKISP1_CIF_ISP_SH_CTRL (RKISP1_CIF_ISP_SH_BASE + 0x00000000)
+#define RKISP1_CIF_ISP_SH_PREDIV (RKISP1_CIF_ISP_SH_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_SH_DELAY (RKISP1_CIF_ISP_SH_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_SH_TIME (RKISP1_CIF_ISP_SH_BASE + 0x0000000C)
+
+#define RKISP1_CIF_C_PROC_BASE 0x00000800
+#define RKISP1_CIF_C_PROC_CTRL (RKISP1_CIF_C_PROC_BASE + 0x00000000)
+#define RKISP1_CIF_C_PROC_CONTRAST (RKISP1_CIF_C_PROC_BASE + 0x00000004)
+#define RKISP1_CIF_C_PROC_BRIGHTNESS (RKISP1_CIF_C_PROC_BASE + 0x00000008)
+#define RKISP1_CIF_C_PROC_SATURATION (RKISP1_CIF_C_PROC_BASE + 0x0000000C)
+#define RKISP1_CIF_C_PROC_HUE (RKISP1_CIF_C_PROC_BASE + 0x00000010)
+
+#define RKISP1_CIF_DUAL_CROP_BASE 0x00000880
+#define RKISP1_CIF_DUAL_CROP_CTRL (RKISP1_CIF_DUAL_CROP_BASE + 0x00000000)
+#define RKISP1_CIF_DUAL_CROP_M_H_OFFS (RKISP1_CIF_DUAL_CROP_BASE + 0x00000004)
+#define RKISP1_CIF_DUAL_CROP_M_V_OFFS (RKISP1_CIF_DUAL_CROP_BASE + 0x00000008)
+#define RKISP1_CIF_DUAL_CROP_M_H_SIZE (RKISP1_CIF_DUAL_CROP_BASE + 0x0000000C)
+#define RKISP1_CIF_DUAL_CROP_M_V_SIZE (RKISP1_CIF_DUAL_CROP_BASE + 0x00000010)
+#define RKISP1_CIF_DUAL_CROP_S_H_OFFS (RKISP1_CIF_DUAL_CROP_BASE + 0x00000014)
+#define RKISP1_CIF_DUAL_CROP_S_V_OFFS (RKISP1_CIF_DUAL_CROP_BASE + 0x00000018)
+#define RKISP1_CIF_DUAL_CROP_S_H_SIZE (RKISP1_CIF_DUAL_CROP_BASE + 0x0000001C)
+#define RKISP1_CIF_DUAL_CROP_S_V_SIZE (RKISP1_CIF_DUAL_CROP_BASE + 0x00000020)
+#define RKISP1_CIF_DUAL_CROP_M_H_OFFS_SHD (RKISP1_CIF_DUAL_CROP_BASE + 0x00000024)
+#define RKISP1_CIF_DUAL_CROP_M_V_OFFS_SHD (RKISP1_CIF_DUAL_CROP_BASE + 0x00000028)
+#define RKISP1_CIF_DUAL_CROP_M_H_SIZE_SHD (RKISP1_CIF_DUAL_CROP_BASE + 0x0000002C)
+#define RKISP1_CIF_DUAL_CROP_M_V_SIZE_SHD (RKISP1_CIF_DUAL_CROP_BASE + 0x00000030)
+#define RKISP1_CIF_DUAL_CROP_S_H_OFFS_SHD (RKISP1_CIF_DUAL_CROP_BASE + 0x00000034)
+#define RKISP1_CIF_DUAL_CROP_S_V_OFFS_SHD (RKISP1_CIF_DUAL_CROP_BASE + 0x00000038)
+#define RKISP1_CIF_DUAL_CROP_S_H_SIZE_SHD (RKISP1_CIF_DUAL_CROP_BASE + 0x0000003C)
+#define RKISP1_CIF_DUAL_CROP_S_V_SIZE_SHD (RKISP1_CIF_DUAL_CROP_BASE + 0x00000040)
+
+#define RKISP1_CIF_MRSZ_BASE 0x00000C00
+#define RKISP1_CIF_MRSZ_CTRL (RKISP1_CIF_MRSZ_BASE + 0x00000000)
+#define RKISP1_CIF_MRSZ_SCALE_HY (RKISP1_CIF_MRSZ_BASE + 0x00000004)
+#define RKISP1_CIF_MRSZ_SCALE_HCB (RKISP1_CIF_MRSZ_BASE + 0x00000008)
+#define RKISP1_CIF_MRSZ_SCALE_HCR (RKISP1_CIF_MRSZ_BASE + 0x0000000C)
+#define RKISP1_CIF_MRSZ_SCALE_VY (RKISP1_CIF_MRSZ_BASE + 0x00000010)
+#define RKISP1_CIF_MRSZ_SCALE_VC (RKISP1_CIF_MRSZ_BASE + 0x00000014)
+#define RKISP1_CIF_MRSZ_PHASE_HY (RKISP1_CIF_MRSZ_BASE + 0x00000018)
+#define RKISP1_CIF_MRSZ_PHASE_HC (RKISP1_CIF_MRSZ_BASE + 0x0000001C)
+#define RKISP1_CIF_MRSZ_PHASE_VY (RKISP1_CIF_MRSZ_BASE + 0x00000020)
+#define RKISP1_CIF_MRSZ_PHASE_VC (RKISP1_CIF_MRSZ_BASE + 0x00000024)
+#define RKISP1_CIF_MRSZ_SCALE_LUT_ADDR (RKISP1_CIF_MRSZ_BASE + 0x00000028)
+#define RKISP1_CIF_MRSZ_SCALE_LUT (RKISP1_CIF_MRSZ_BASE + 0x0000002C)
+#define RKISP1_CIF_MRSZ_CTRL_SHD (RKISP1_CIF_MRSZ_BASE + 0x00000030)
+#define RKISP1_CIF_MRSZ_SCALE_HY_SHD (RKISP1_CIF_MRSZ_BASE + 0x00000034)
+#define RKISP1_CIF_MRSZ_SCALE_HCB_SHD (RKISP1_CIF_MRSZ_BASE + 0x00000038)
+#define RKISP1_CIF_MRSZ_SCALE_HCR_SHD (RKISP1_CIF_MRSZ_BASE + 0x0000003C)
+#define RKISP1_CIF_MRSZ_SCALE_VY_SHD (RKISP1_CIF_MRSZ_BASE + 0x00000040)
+#define RKISP1_CIF_MRSZ_SCALE_VC_SHD (RKISP1_CIF_MRSZ_BASE + 0x00000044)
+#define RKISP1_CIF_MRSZ_PHASE_HY_SHD (RKISP1_CIF_MRSZ_BASE + 0x00000048)
+#define RKISP1_CIF_MRSZ_PHASE_HC_SHD (RKISP1_CIF_MRSZ_BASE + 0x0000004C)
+#define RKISP1_CIF_MRSZ_PHASE_VY_SHD (RKISP1_CIF_MRSZ_BASE + 0x00000050)
+#define RKISP1_CIF_MRSZ_PHASE_VC_SHD (RKISP1_CIF_MRSZ_BASE + 0x00000054)
+
+#define RKISP1_CIF_SRSZ_BASE 0x00001000
+#define RKISP1_CIF_SRSZ_CTRL (RKISP1_CIF_SRSZ_BASE + 0x00000000)
+#define RKISP1_CIF_SRSZ_SCALE_HY (RKISP1_CIF_SRSZ_BASE + 0x00000004)
+#define RKISP1_CIF_SRSZ_SCALE_HCB (RKISP1_CIF_SRSZ_BASE + 0x00000008)
+#define RKISP1_CIF_SRSZ_SCALE_HCR (RKISP1_CIF_SRSZ_BASE + 0x0000000C)
+#define RKISP1_CIF_SRSZ_SCALE_VY (RKISP1_CIF_SRSZ_BASE + 0x00000010)
+#define RKISP1_CIF_SRSZ_SCALE_VC (RKISP1_CIF_SRSZ_BASE + 0x00000014)
+#define RKISP1_CIF_SRSZ_PHASE_HY (RKISP1_CIF_SRSZ_BASE + 0x00000018)
+#define RKISP1_CIF_SRSZ_PHASE_HC (RKISP1_CIF_SRSZ_BASE + 0x0000001C)
+#define RKISP1_CIF_SRSZ_PHASE_VY (RKISP1_CIF_SRSZ_BASE + 0x00000020)
+#define RKISP1_CIF_SRSZ_PHASE_VC (RKISP1_CIF_SRSZ_BASE + 0x00000024)
+#define RKISP1_CIF_SRSZ_SCALE_LUT_ADDR (RKISP1_CIF_SRSZ_BASE + 0x00000028)
+#define RKISP1_CIF_SRSZ_SCALE_LUT (RKISP1_CIF_SRSZ_BASE + 0x0000002C)
+#define RKISP1_CIF_SRSZ_CTRL_SHD (RKISP1_CIF_SRSZ_BASE + 0x00000030)
+#define RKISP1_CIF_SRSZ_SCALE_HY_SHD (RKISP1_CIF_SRSZ_BASE + 0x00000034)
+#define RKISP1_CIF_SRSZ_SCALE_HCB_SHD (RKISP1_CIF_SRSZ_BASE + 0x00000038)
+#define RKISP1_CIF_SRSZ_SCALE_HCR_SHD (RKISP1_CIF_SRSZ_BASE + 0x0000003C)
+#define RKISP1_CIF_SRSZ_SCALE_VY_SHD (RKISP1_CIF_SRSZ_BASE + 0x00000040)
+#define RKISP1_CIF_SRSZ_SCALE_VC_SHD (RKISP1_CIF_SRSZ_BASE + 0x00000044)
+#define RKISP1_CIF_SRSZ_PHASE_HY_SHD (RKISP1_CIF_SRSZ_BASE + 0x00000048)
+#define RKISP1_CIF_SRSZ_PHASE_HC_SHD (RKISP1_CIF_SRSZ_BASE + 0x0000004C)
+#define RKISP1_CIF_SRSZ_PHASE_VY_SHD (RKISP1_CIF_SRSZ_BASE + 0x00000050)
+#define RKISP1_CIF_SRSZ_PHASE_VC_SHD (RKISP1_CIF_SRSZ_BASE + 0x00000054)
+
+#define RKISP1_CIF_MI_BASE 0x00001400
+#define RKISP1_CIF_MI_CTRL (RKISP1_CIF_MI_BASE + 0x00000000)
+#define RKISP1_CIF_MI_INIT (RKISP1_CIF_MI_BASE + 0x00000004)
+#define RKISP1_CIF_MI_MP_Y_BASE_AD_INIT (RKISP1_CIF_MI_BASE + 0x00000008)
+#define RKISP1_CIF_MI_MP_Y_SIZE_INIT (RKISP1_CIF_MI_BASE + 0x0000000C)
+#define RKISP1_CIF_MI_MP_Y_OFFS_CNT_INIT (RKISP1_CIF_MI_BASE + 0x00000010)
+#define RKISP1_CIF_MI_MP_Y_OFFS_CNT_START (RKISP1_CIF_MI_BASE + 0x00000014)
+#define RKISP1_CIF_MI_MP_Y_IRQ_OFFS_INIT (RKISP1_CIF_MI_BASE + 0x00000018)
+#define RKISP1_CIF_MI_MP_CB_BASE_AD_INIT (RKISP1_CIF_MI_BASE + 0x0000001C)
+#define RKISP1_CIF_MI_MP_CB_SIZE_INIT (RKISP1_CIF_MI_BASE + 0x00000020)
+#define RKISP1_CIF_MI_MP_CB_OFFS_CNT_INIT (RKISP1_CIF_MI_BASE + 0x00000024)
+#define RKISP1_CIF_MI_MP_CB_OFFS_CNT_START (RKISP1_CIF_MI_BASE + 0x00000028)
+#define RKISP1_CIF_MI_MP_CR_BASE_AD_INIT (RKISP1_CIF_MI_BASE + 0x0000002C)
+#define RKISP1_CIF_MI_MP_CR_SIZE_INIT (RKISP1_CIF_MI_BASE + 0x00000030)
+#define RKISP1_CIF_MI_MP_CR_OFFS_CNT_INIT (RKISP1_CIF_MI_BASE + 0x00000034)
+#define RKISP1_CIF_MI_MP_CR_OFFS_CNT_START (RKISP1_CIF_MI_BASE + 0x00000038)
+#define RKISP1_CIF_MI_SP_Y_BASE_AD_INIT (RKISP1_CIF_MI_BASE + 0x0000003C)
+#define RKISP1_CIF_MI_SP_Y_SIZE_INIT (RKISP1_CIF_MI_BASE + 0x00000040)
+#define RKISP1_CIF_MI_SP_Y_OFFS_CNT_INIT (RKISP1_CIF_MI_BASE + 0x00000044)
+#define RKISP1_CIF_MI_SP_Y_OFFS_CNT_START (RKISP1_CIF_MI_BASE + 0x00000048)
+#define RKISP1_CIF_MI_SP_Y_LLENGTH (RKISP1_CIF_MI_BASE + 0x0000004C)
+#define RKISP1_CIF_MI_SP_CB_BASE_AD_INIT (RKISP1_CIF_MI_BASE + 0x00000050)
+#define RKISP1_CIF_MI_SP_CB_SIZE_INIT (RKISP1_CIF_MI_BASE + 0x00000054)
+#define RKISP1_CIF_MI_SP_CB_OFFS_CNT_INIT (RKISP1_CIF_MI_BASE + 0x00000058)
+#define RKISP1_CIF_MI_SP_CB_OFFS_CNT_START (RKISP1_CIF_MI_BASE + 0x0000005C)
+#define RKISP1_CIF_MI_SP_CR_BASE_AD_INIT (RKISP1_CIF_MI_BASE + 0x00000060)
+#define RKISP1_CIF_MI_SP_CR_SIZE_INIT (RKISP1_CIF_MI_BASE + 0x00000064)
+#define RKISP1_CIF_MI_SP_CR_OFFS_CNT_INIT (RKISP1_CIF_MI_BASE + 0x00000068)
+#define RKISP1_CIF_MI_SP_CR_OFFS_CNT_START (RKISP1_CIF_MI_BASE + 0x0000006C)
+#define RKISP1_CIF_MI_BYTE_CNT (RKISP1_CIF_MI_BASE + 0x00000070)
+#define RKISP1_CIF_MI_CTRL_SHD (RKISP1_CIF_MI_BASE + 0x00000074)
+#define RKISP1_CIF_MI_MP_Y_BASE_AD_SHD (RKISP1_CIF_MI_BASE + 0x00000078)
+#define RKISP1_CIF_MI_MP_Y_SIZE_SHD (RKISP1_CIF_MI_BASE + 0x0000007C)
+#define RKISP1_CIF_MI_MP_Y_OFFS_CNT_SHD (RKISP1_CIF_MI_BASE + 0x00000080)
+#define RKISP1_CIF_MI_MP_Y_IRQ_OFFS_SHD (RKISP1_CIF_MI_BASE + 0x00000084)
+#define RKISP1_CIF_MI_MP_CB_BASE_AD_SHD (RKISP1_CIF_MI_BASE + 0x00000088)
+#define RKISP1_CIF_MI_MP_CB_SIZE_SHD (RKISP1_CIF_MI_BASE + 0x0000008C)
+#define RKISP1_CIF_MI_MP_CB_OFFS_CNT_SHD (RKISP1_CIF_MI_BASE + 0x00000090)
+#define RKISP1_CIF_MI_MP_CR_BASE_AD_SHD (RKISP1_CIF_MI_BASE + 0x00000094)
+#define RKISP1_CIF_MI_MP_CR_SIZE_SHD (RKISP1_CIF_MI_BASE + 0x00000098)
+#define RKISP1_CIF_MI_MP_CR_OFFS_CNT_SHD (RKISP1_CIF_MI_BASE + 0x0000009C)
+#define RKISP1_CIF_MI_SP_Y_BASE_AD_SHD (RKISP1_CIF_MI_BASE + 0x000000A0)
+#define RKISP1_CIF_MI_SP_Y_SIZE_SHD (RKISP1_CIF_MI_BASE + 0x000000A4)
+#define RKISP1_CIF_MI_SP_Y_OFFS_CNT_SHD (RKISP1_CIF_MI_BASE + 0x000000A8)
+#define RKISP1_CIF_MI_SP_CB_BASE_AD_SHD (RKISP1_CIF_MI_BASE + 0x000000B0)
+#define RKISP1_CIF_MI_SP_CB_SIZE_SHD (RKISP1_CIF_MI_BASE + 0x000000B4)
+#define RKISP1_CIF_MI_SP_CB_OFFS_CNT_SHD (RKISP1_CIF_MI_BASE + 0x000000B8)
+#define RKISP1_CIF_MI_SP_CR_BASE_AD_SHD (RKISP1_CIF_MI_BASE + 0x000000BC)
+#define RKISP1_CIF_MI_SP_CR_SIZE_SHD (RKISP1_CIF_MI_BASE + 0x000000C0)
+#define RKISP1_CIF_MI_SP_CR_OFFS_CNT_SHD (RKISP1_CIF_MI_BASE + 0x000000C4)
+#define RKISP1_CIF_MI_DMA_Y_PIC_START_AD (RKISP1_CIF_MI_BASE + 0x000000C8)
+#define RKISP1_CIF_MI_DMA_Y_PIC_WIDTH (RKISP1_CIF_MI_BASE + 0x000000CC)
+#define RKISP1_CIF_MI_DMA_Y_LLENGTH (RKISP1_CIF_MI_BASE + 0x000000D0)
+#define RKISP1_CIF_MI_DMA_Y_PIC_SIZE (RKISP1_CIF_MI_BASE + 0x000000D4)
+#define RKISP1_CIF_MI_DMA_CB_PIC_START_AD (RKISP1_CIF_MI_BASE + 0x000000D8)
+#define RKISP1_CIF_MI_DMA_CR_PIC_START_AD (RKISP1_CIF_MI_BASE + 0x000000E8)
+#define RKISP1_CIF_MI_IMSC (RKISP1_CIF_MI_BASE + 0x000000F8)
+#define RKISP1_CIF_MI_RIS (RKISP1_CIF_MI_BASE + 0x000000FC)
+#define RKISP1_CIF_MI_MIS (RKISP1_CIF_MI_BASE + 0x00000100)
+#define RKISP1_CIF_MI_ICR (RKISP1_CIF_MI_BASE + 0x00000104)
+#define RKISP1_CIF_MI_ISR (RKISP1_CIF_MI_BASE + 0x00000108)
+#define RKISP1_CIF_MI_STATUS (RKISP1_CIF_MI_BASE + 0x0000010C)
+#define RKISP1_CIF_MI_STATUS_CLR (RKISP1_CIF_MI_BASE + 0x00000110)
+#define RKISP1_CIF_MI_SP_Y_PIC_WIDTH (RKISP1_CIF_MI_BASE + 0x00000114)
+#define RKISP1_CIF_MI_SP_Y_PIC_HEIGHT (RKISP1_CIF_MI_BASE + 0x00000118)
+#define RKISP1_CIF_MI_SP_Y_PIC_SIZE (RKISP1_CIF_MI_BASE + 0x0000011C)
+#define RKISP1_CIF_MI_DMA_CTRL (RKISP1_CIF_MI_BASE + 0x00000120)
+#define RKISP1_CIF_MI_DMA_START (RKISP1_CIF_MI_BASE + 0x00000124)
+#define RKISP1_CIF_MI_DMA_STATUS (RKISP1_CIF_MI_BASE + 0x00000128)
+#define RKISP1_CIF_MI_PIXEL_COUNT (RKISP1_CIF_MI_BASE + 0x0000012C)
+#define RKISP1_CIF_MI_MP_Y_BASE_AD_INIT2 (RKISP1_CIF_MI_BASE + 0x00000130)
+#define RKISP1_CIF_MI_MP_CB_BASE_AD_INIT2 (RKISP1_CIF_MI_BASE + 0x00000134)
+#define RKISP1_CIF_MI_MP_CR_BASE_AD_INIT2 (RKISP1_CIF_MI_BASE + 0x00000138)
+#define RKISP1_CIF_MI_SP_Y_BASE_AD_INIT2 (RKISP1_CIF_MI_BASE + 0x0000013C)
+#define RKISP1_CIF_MI_SP_CB_BASE_AD_INIT2 (RKISP1_CIF_MI_BASE + 0x00000140)
+#define RKISP1_CIF_MI_SP_CR_BASE_AD_INIT2 (RKISP1_CIF_MI_BASE + 0x00000144)
+#define RKISP1_CIF_MI_XTD_FORMAT_CTRL (RKISP1_CIF_MI_BASE + 0x00000148)
+
+#define RKISP1_CIF_SMIA_BASE 0x00001A00
+#define RKISP1_CIF_SMIA_CTRL (RKISP1_CIF_SMIA_BASE + 0x00000000)
+#define RKISP1_CIF_SMIA_STATUS (RKISP1_CIF_SMIA_BASE + 0x00000004)
+#define RKISP1_CIF_SMIA_IMSC (RKISP1_CIF_SMIA_BASE + 0x00000008)
+#define RKISP1_CIF_SMIA_RIS (RKISP1_CIF_SMIA_BASE + 0x0000000C)
+#define RKISP1_CIF_SMIA_MIS (RKISP1_CIF_SMIA_BASE + 0x00000010)
+#define RKISP1_CIF_SMIA_ICR (RKISP1_CIF_SMIA_BASE + 0x00000014)
+#define RKISP1_CIF_SMIA_ISR (RKISP1_CIF_SMIA_BASE + 0x00000018)
+#define RKISP1_CIF_SMIA_DATA_FORMAT_SEL (RKISP1_CIF_SMIA_BASE + 0x0000001C)
+#define RKISP1_CIF_SMIA_SOF_EMB_DATA_LINES (RKISP1_CIF_SMIA_BASE + 0x00000020)
+#define RKISP1_CIF_SMIA_EMB_HSTART (RKISP1_CIF_SMIA_BASE + 0x00000024)
+#define RKISP1_CIF_SMIA_EMB_HSIZE (RKISP1_CIF_SMIA_BASE + 0x00000028)
+#define RKISP1_CIF_SMIA_EMB_VSTART (RKISP1_CIF_SMIA_BASE + 0x0000002c)
+#define RKISP1_CIF_SMIA_NUM_LINES (RKISP1_CIF_SMIA_BASE + 0x00000030)
+#define RKISP1_CIF_SMIA_EMB_DATA_FIFO (RKISP1_CIF_SMIA_BASE + 0x00000034)
+#define RKISP1_CIF_SMIA_EMB_DATA_WATERMARK (RKISP1_CIF_SMIA_BASE + 0x00000038)
+
+#define RKISP1_CIF_MIPI_BASE 0x00001C00
+#define RKISP1_CIF_MIPI_CTRL (RKISP1_CIF_MIPI_BASE + 0x00000000)
+#define RKISP1_CIF_MIPI_STATUS (RKISP1_CIF_MIPI_BASE + 0x00000004)
+#define RKISP1_CIF_MIPI_IMSC (RKISP1_CIF_MIPI_BASE + 0x00000008)
+#define RKISP1_CIF_MIPI_RIS (RKISP1_CIF_MIPI_BASE + 0x0000000C)
+#define RKISP1_CIF_MIPI_MIS (RKISP1_CIF_MIPI_BASE + 0x00000010)
+#define RKISP1_CIF_MIPI_ICR (RKISP1_CIF_MIPI_BASE + 0x00000014)
+#define RKISP1_CIF_MIPI_ISR (RKISP1_CIF_MIPI_BASE + 0x00000018)
+#define RKISP1_CIF_MIPI_CUR_DATA_ID (RKISP1_CIF_MIPI_BASE + 0x0000001C)
+#define RKISP1_CIF_MIPI_IMG_DATA_SEL (RKISP1_CIF_MIPI_BASE + 0x00000020)
+#define RKISP1_CIF_MIPI_ADD_DATA_SEL_1 (RKISP1_CIF_MIPI_BASE + 0x00000024)
+#define RKISP1_CIF_MIPI_ADD_DATA_SEL_2 (RKISP1_CIF_MIPI_BASE + 0x00000028)
+#define RKISP1_CIF_MIPI_ADD_DATA_SEL_3 (RKISP1_CIF_MIPI_BASE + 0x0000002C)
+#define RKISP1_CIF_MIPI_ADD_DATA_SEL_4 (RKISP1_CIF_MIPI_BASE + 0x00000030)
+#define RKISP1_CIF_MIPI_ADD_DATA_FIFO (RKISP1_CIF_MIPI_BASE + 0x00000034)
+#define RKISP1_CIF_MIPI_FIFO_FILL_LEVEL (RKISP1_CIF_MIPI_BASE + 0x00000038)
+#define RKISP1_CIF_MIPI_COMPRESSED_MODE (RKISP1_CIF_MIPI_BASE + 0x0000003C)
+#define RKISP1_CIF_MIPI_FRAME (RKISP1_CIF_MIPI_BASE + 0x00000040)
+#define RKISP1_CIF_MIPI_GEN_SHORT_DT (RKISP1_CIF_MIPI_BASE + 0x00000044)
+#define RKISP1_CIF_MIPI_GEN_SHORT_8_9 (RKISP1_CIF_MIPI_BASE + 0x00000048)
+#define RKISP1_CIF_MIPI_GEN_SHORT_A_B (RKISP1_CIF_MIPI_BASE + 0x0000004C)
+#define RKISP1_CIF_MIPI_GEN_SHORT_C_D (RKISP1_CIF_MIPI_BASE + 0x00000050)
+#define RKISP1_CIF_MIPI_GEN_SHORT_E_F (RKISP1_CIF_MIPI_BASE + 0x00000054)
+
+#define RKISP1_CIF_ISP_AFM_BASE 0x00002000
+#define RKISP1_CIF_ISP_AFM_CTRL (RKISP1_CIF_ISP_AFM_BASE + 0x00000000)
+#define RKISP1_CIF_ISP_AFM_LT_A (RKISP1_CIF_ISP_AFM_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_AFM_RB_A (RKISP1_CIF_ISP_AFM_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_AFM_LT_B (RKISP1_CIF_ISP_AFM_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_AFM_RB_B (RKISP1_CIF_ISP_AFM_BASE + 0x00000010)
+#define RKISP1_CIF_ISP_AFM_LT_C (RKISP1_CIF_ISP_AFM_BASE + 0x00000014)
+#define RKISP1_CIF_ISP_AFM_RB_C (RKISP1_CIF_ISP_AFM_BASE + 0x00000018)
+#define RKISP1_CIF_ISP_AFM_THRES (RKISP1_CIF_ISP_AFM_BASE + 0x0000001C)
+#define RKISP1_CIF_ISP_AFM_VAR_SHIFT (RKISP1_CIF_ISP_AFM_BASE + 0x00000020)
+#define RKISP1_CIF_ISP_AFM_SUM_A (RKISP1_CIF_ISP_AFM_BASE + 0x00000024)
+#define RKISP1_CIF_ISP_AFM_SUM_B (RKISP1_CIF_ISP_AFM_BASE + 0x00000028)
+#define RKISP1_CIF_ISP_AFM_SUM_C (RKISP1_CIF_ISP_AFM_BASE + 0x0000002C)
+#define RKISP1_CIF_ISP_AFM_LUM_A (RKISP1_CIF_ISP_AFM_BASE + 0x00000030)
+#define RKISP1_CIF_ISP_AFM_LUM_B (RKISP1_CIF_ISP_AFM_BASE + 0x00000034)
+#define RKISP1_CIF_ISP_AFM_LUM_C (RKISP1_CIF_ISP_AFM_BASE + 0x00000038)
+
+#define RKISP1_CIF_ISP_LSC_BASE 0x00002200
+#define RKISP1_CIF_ISP_LSC_CTRL (RKISP1_CIF_ISP_LSC_BASE + 0x00000000)
+#define RKISP1_CIF_ISP_LSC_R_TABLE_ADDR (RKISP1_CIF_ISP_LSC_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_LSC_GR_TABLE_ADDR (RKISP1_CIF_ISP_LSC_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_LSC_B_TABLE_ADDR (RKISP1_CIF_ISP_LSC_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_LSC_GB_TABLE_ADDR (RKISP1_CIF_ISP_LSC_BASE + 0x00000010)
+#define RKISP1_CIF_ISP_LSC_R_TABLE_DATA (RKISP1_CIF_ISP_LSC_BASE + 0x00000014)
+#define RKISP1_CIF_ISP_LSC_GR_TABLE_DATA (RKISP1_CIF_ISP_LSC_BASE + 0x00000018)
+#define RKISP1_CIF_ISP_LSC_B_TABLE_DATA (RKISP1_CIF_ISP_LSC_BASE + 0x0000001C)
+#define RKISP1_CIF_ISP_LSC_GB_TABLE_DATA (RKISP1_CIF_ISP_LSC_BASE + 0x00000020)
+#define RKISP1_CIF_ISP_LSC_XGRAD_01 (RKISP1_CIF_ISP_LSC_BASE + 0x00000024)
+#define RKISP1_CIF_ISP_LSC_XGRAD_23 (RKISP1_CIF_ISP_LSC_BASE + 0x00000028)
+#define RKISP1_CIF_ISP_LSC_XGRAD_45 (RKISP1_CIF_ISP_LSC_BASE + 0x0000002C)
+#define RKISP1_CIF_ISP_LSC_XGRAD_67 (RKISP1_CIF_ISP_LSC_BASE + 0x00000030)
+#define RKISP1_CIF_ISP_LSC_YGRAD_01 (RKISP1_CIF_ISP_LSC_BASE + 0x00000034)
+#define RKISP1_CIF_ISP_LSC_YGRAD_23 (RKISP1_CIF_ISP_LSC_BASE + 0x00000038)
+#define RKISP1_CIF_ISP_LSC_YGRAD_45 (RKISP1_CIF_ISP_LSC_BASE + 0x0000003C)
+#define RKISP1_CIF_ISP_LSC_YGRAD_67 (RKISP1_CIF_ISP_LSC_BASE + 0x00000040)
+#define RKISP1_CIF_ISP_LSC_XSIZE_01 (RKISP1_CIF_ISP_LSC_BASE + 0x00000044)
+#define RKISP1_CIF_ISP_LSC_XSIZE_23 (RKISP1_CIF_ISP_LSC_BASE + 0x00000048)
+#define RKISP1_CIF_ISP_LSC_XSIZE_45 (RKISP1_CIF_ISP_LSC_BASE + 0x0000004C)
+#define RKISP1_CIF_ISP_LSC_XSIZE_67 (RKISP1_CIF_ISP_LSC_BASE + 0x00000050)
+#define RKISP1_CIF_ISP_LSC_YSIZE_01 (RKISP1_CIF_ISP_LSC_BASE + 0x00000054)
+#define RKISP1_CIF_ISP_LSC_YSIZE_23 (RKISP1_CIF_ISP_LSC_BASE + 0x00000058)
+#define RKISP1_CIF_ISP_LSC_YSIZE_45 (RKISP1_CIF_ISP_LSC_BASE + 0x0000005C)
+#define RKISP1_CIF_ISP_LSC_YSIZE_67 (RKISP1_CIF_ISP_LSC_BASE + 0x00000060)
+#define RKISP1_CIF_ISP_LSC_TABLE_SEL (RKISP1_CIF_ISP_LSC_BASE + 0x00000064)
+#define RKISP1_CIF_ISP_LSC_STATUS (RKISP1_CIF_ISP_LSC_BASE + 0x00000068)
+
+#define RKISP1_CIF_ISP_IS_BASE 0x00002300
+#define RKISP1_CIF_ISP_IS_CTRL (RKISP1_CIF_ISP_IS_BASE + 0x00000000)
+#define RKISP1_CIF_ISP_IS_RECENTER (RKISP1_CIF_ISP_IS_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_IS_H_OFFS (RKISP1_CIF_ISP_IS_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_IS_V_OFFS (RKISP1_CIF_ISP_IS_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_IS_H_SIZE (RKISP1_CIF_ISP_IS_BASE + 0x00000010)
+#define RKISP1_CIF_ISP_IS_V_SIZE (RKISP1_CIF_ISP_IS_BASE + 0x00000014)
+#define RKISP1_CIF_ISP_IS_MAX_DX (RKISP1_CIF_ISP_IS_BASE + 0x00000018)
+#define RKISP1_CIF_ISP_IS_MAX_DY (RKISP1_CIF_ISP_IS_BASE + 0x0000001C)
+#define RKISP1_CIF_ISP_IS_DISPLACE (RKISP1_CIF_ISP_IS_BASE + 0x00000020)
+#define RKISP1_CIF_ISP_IS_H_OFFS_SHD (RKISP1_CIF_ISP_IS_BASE + 0x00000024)
+#define RKISP1_CIF_ISP_IS_V_OFFS_SHD (RKISP1_CIF_ISP_IS_BASE + 0x00000028)
+#define RKISP1_CIF_ISP_IS_H_SIZE_SHD (RKISP1_CIF_ISP_IS_BASE + 0x0000002C)
+#define RKISP1_CIF_ISP_IS_V_SIZE_SHD (RKISP1_CIF_ISP_IS_BASE + 0x00000030)
+
+#define RKISP1_CIF_ISP_HIST_BASE 0x00002400
+
+#define RKISP1_CIF_ISP_HIST_PROP (RKISP1_CIF_ISP_HIST_BASE + 0x00000000)
+#define RKISP1_CIF_ISP_HIST_H_OFFS (RKISP1_CIF_ISP_HIST_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_HIST_V_OFFS (RKISP1_CIF_ISP_HIST_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_HIST_H_SIZE (RKISP1_CIF_ISP_HIST_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_HIST_V_SIZE (RKISP1_CIF_ISP_HIST_BASE + 0x00000010)
+#define RKISP1_CIF_ISP_HIST_BIN_0 (RKISP1_CIF_ISP_HIST_BASE + 0x00000014)
+#define RKISP1_CIF_ISP_HIST_BIN_1 (RKISP1_CIF_ISP_HIST_BASE + 0x00000018)
+#define RKISP1_CIF_ISP_HIST_BIN_2 (RKISP1_CIF_ISP_HIST_BASE + 0x0000001C)
+#define RKISP1_CIF_ISP_HIST_BIN_3 (RKISP1_CIF_ISP_HIST_BASE + 0x00000020)
+#define RKISP1_CIF_ISP_HIST_BIN_4 (RKISP1_CIF_ISP_HIST_BASE + 0x00000024)
+#define RKISP1_CIF_ISP_HIST_BIN_5 (RKISP1_CIF_ISP_HIST_BASE + 0x00000028)
+#define RKISP1_CIF_ISP_HIST_BIN_6 (RKISP1_CIF_ISP_HIST_BASE + 0x0000002C)
+#define RKISP1_CIF_ISP_HIST_BIN_7 (RKISP1_CIF_ISP_HIST_BASE + 0x00000030)
+#define RKISP1_CIF_ISP_HIST_BIN_8 (RKISP1_CIF_ISP_HIST_BASE + 0x00000034)
+#define RKISP1_CIF_ISP_HIST_BIN_9 (RKISP1_CIF_ISP_HIST_BASE + 0x00000038)
+#define RKISP1_CIF_ISP_HIST_BIN_10 (RKISP1_CIF_ISP_HIST_BASE + 0x0000003C)
+#define RKISP1_CIF_ISP_HIST_BIN_11 (RKISP1_CIF_ISP_HIST_BASE + 0x00000040)
+#define RKISP1_CIF_ISP_HIST_BIN_12 (RKISP1_CIF_ISP_HIST_BASE + 0x00000044)
+#define RKISP1_CIF_ISP_HIST_BIN_13 (RKISP1_CIF_ISP_HIST_BASE + 0x00000048)
+#define RKISP1_CIF_ISP_HIST_BIN_14 (RKISP1_CIF_ISP_HIST_BASE + 0x0000004C)
+#define RKISP1_CIF_ISP_HIST_BIN_15 (RKISP1_CIF_ISP_HIST_BASE + 0x00000050)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_00TO30 (RKISP1_CIF_ISP_HIST_BASE + 0x00000054)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_40TO21 (RKISP1_CIF_ISP_HIST_BASE + 0x00000058)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_31TO12 (RKISP1_CIF_ISP_HIST_BASE + 0x0000005C)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_22TO03 (RKISP1_CIF_ISP_HIST_BASE + 0x00000060)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_13TO43 (RKISP1_CIF_ISP_HIST_BASE + 0x00000064)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_04TO34 (RKISP1_CIF_ISP_HIST_BASE + 0x00000068)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_44 (RKISP1_CIF_ISP_HIST_BASE + 0x0000006C)
+
+#define RKISP1_CIF_ISP_FILT_BASE 0x00002500
+#define RKISP1_CIF_ISP_FILT_MODE (RKISP1_CIF_ISP_FILT_BASE + 0x00000000)
+#define RKISP1_CIF_ISP_FILT_THRESH_BL0 (RKISP1_CIF_ISP_FILT_BASE + 0x00000028)
+#define RKISP1_CIF_ISP_FILT_THRESH_BL1 (RKISP1_CIF_ISP_FILT_BASE + 0x0000002c)
+#define RKISP1_CIF_ISP_FILT_THRESH_SH0 (RKISP1_CIF_ISP_FILT_BASE + 0x00000030)
+#define RKISP1_CIF_ISP_FILT_THRESH_SH1 (RKISP1_CIF_ISP_FILT_BASE + 0x00000034)
+#define RKISP1_CIF_ISP_FILT_LUM_WEIGHT (RKISP1_CIF_ISP_FILT_BASE + 0x00000038)
+#define RKISP1_CIF_ISP_FILT_FAC_SH1 (RKISP1_CIF_ISP_FILT_BASE + 0x0000003c)
+#define RKISP1_CIF_ISP_FILT_FAC_SH0 (RKISP1_CIF_ISP_FILT_BASE + 0x00000040)
+#define RKISP1_CIF_ISP_FILT_FAC_MID (RKISP1_CIF_ISP_FILT_BASE + 0x00000044)
+#define RKISP1_CIF_ISP_FILT_FAC_BL0 (RKISP1_CIF_ISP_FILT_BASE + 0x00000048)
+#define RKISP1_CIF_ISP_FILT_FAC_BL1 (RKISP1_CIF_ISP_FILT_BASE + 0x0000004C)
+
+#define RKISP1_CIF_ISP_CAC_BASE 0x00002580
+#define RKISP1_CIF_ISP_CAC_CTRL (RKISP1_CIF_ISP_CAC_BASE + 0x00000000)
+#define RKISP1_CIF_ISP_CAC_COUNT_START (RKISP1_CIF_ISP_CAC_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_CAC_A (RKISP1_CIF_ISP_CAC_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_CAC_B (RKISP1_CIF_ISP_CAC_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_CAC_C (RKISP1_CIF_ISP_CAC_BASE + 0x00000010)
+#define RKISP1_CIF_ISP_X_NORM (RKISP1_CIF_ISP_CAC_BASE + 0x00000014)
+#define RKISP1_CIF_ISP_Y_NORM (RKISP1_CIF_ISP_CAC_BASE + 0x00000018)
+
+#define RKISP1_CIF_ISP_EXP_BASE 0x00002600
+#define RKISP1_CIF_ISP_EXP_CTRL (RKISP1_CIF_ISP_EXP_BASE + 0x00000000)
+#define RKISP1_CIF_ISP_EXP_H_OFFSET (RKISP1_CIF_ISP_EXP_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_EXP_V_OFFSET (RKISP1_CIF_ISP_EXP_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_EXP_H_SIZE (RKISP1_CIF_ISP_EXP_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_EXP_V_SIZE (RKISP1_CIF_ISP_EXP_BASE + 0x00000010)
+#define RKISP1_CIF_ISP_EXP_MEAN_00 (RKISP1_CIF_ISP_EXP_BASE + 0x00000014)
+#define RKISP1_CIF_ISP_EXP_MEAN_10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000018)
+#define RKISP1_CIF_ISP_EXP_MEAN_20 (RKISP1_CIF_ISP_EXP_BASE + 0x0000001c)
+#define RKISP1_CIF_ISP_EXP_MEAN_30 (RKISP1_CIF_ISP_EXP_BASE + 0x00000020)
+#define RKISP1_CIF_ISP_EXP_MEAN_40 (RKISP1_CIF_ISP_EXP_BASE + 0x00000024)
+#define RKISP1_CIF_ISP_EXP_MEAN_01 (RKISP1_CIF_ISP_EXP_BASE + 0x00000028)
+#define RKISP1_CIF_ISP_EXP_MEAN_11 (RKISP1_CIF_ISP_EXP_BASE + 0x0000002c)
+#define RKISP1_CIF_ISP_EXP_MEAN_21 (RKISP1_CIF_ISP_EXP_BASE + 0x00000030)
+#define RKISP1_CIF_ISP_EXP_MEAN_31 (RKISP1_CIF_ISP_EXP_BASE + 0x00000034)
+#define RKISP1_CIF_ISP_EXP_MEAN_41 (RKISP1_CIF_ISP_EXP_BASE + 0x00000038)
+#define RKISP1_CIF_ISP_EXP_MEAN_02 (RKISP1_CIF_ISP_EXP_BASE + 0x0000003c)
+#define RKISP1_CIF_ISP_EXP_MEAN_12 (RKISP1_CIF_ISP_EXP_BASE + 0x00000040)
+#define RKISP1_CIF_ISP_EXP_MEAN_22 (RKISP1_CIF_ISP_EXP_BASE + 0x00000044)
+#define RKISP1_CIF_ISP_EXP_MEAN_32 (RKISP1_CIF_ISP_EXP_BASE + 0x00000048)
+#define RKISP1_CIF_ISP_EXP_MEAN_42 (RKISP1_CIF_ISP_EXP_BASE + 0x0000004c)
+#define RKISP1_CIF_ISP_EXP_MEAN_03 (RKISP1_CIF_ISP_EXP_BASE + 0x00000050)
+#define RKISP1_CIF_ISP_EXP_MEAN_13 (RKISP1_CIF_ISP_EXP_BASE + 0x00000054)
+#define RKISP1_CIF_ISP_EXP_MEAN_23 (RKISP1_CIF_ISP_EXP_BASE + 0x00000058)
+#define RKISP1_CIF_ISP_EXP_MEAN_33 (RKISP1_CIF_ISP_EXP_BASE + 0x0000005c)
+#define RKISP1_CIF_ISP_EXP_MEAN_43 (RKISP1_CIF_ISP_EXP_BASE + 0x00000060)
+#define RKISP1_CIF_ISP_EXP_MEAN_04 (RKISP1_CIF_ISP_EXP_BASE + 0x00000064)
+#define RKISP1_CIF_ISP_EXP_MEAN_14 (RKISP1_CIF_ISP_EXP_BASE + 0x00000068)
+#define RKISP1_CIF_ISP_EXP_MEAN_24 (RKISP1_CIF_ISP_EXP_BASE + 0x0000006c)
+#define RKISP1_CIF_ISP_EXP_MEAN_34 (RKISP1_CIF_ISP_EXP_BASE + 0x00000070)
+#define RKISP1_CIF_ISP_EXP_MEAN_44 (RKISP1_CIF_ISP_EXP_BASE + 0x00000074)
+
+#define RKISP1_CIF_ISP_BLS_BASE 0x00002700
+#define RKISP1_CIF_ISP_BLS_CTRL (RKISP1_CIF_ISP_BLS_BASE + 0x00000000)
+#define RKISP1_CIF_ISP_BLS_SAMPLES (RKISP1_CIF_ISP_BLS_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_BLS_H1_START (RKISP1_CIF_ISP_BLS_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_BLS_H1_STOP (RKISP1_CIF_ISP_BLS_BASE + 0x0000000c)
+#define RKISP1_CIF_ISP_BLS_V1_START (RKISP1_CIF_ISP_BLS_BASE + 0x00000010)
+#define RKISP1_CIF_ISP_BLS_V1_STOP (RKISP1_CIF_ISP_BLS_BASE + 0x00000014)
+#define RKISP1_CIF_ISP_BLS_H2_START (RKISP1_CIF_ISP_BLS_BASE + 0x00000018)
+#define RKISP1_CIF_ISP_BLS_H2_STOP (RKISP1_CIF_ISP_BLS_BASE + 0x0000001c)
+#define RKISP1_CIF_ISP_BLS_V2_START (RKISP1_CIF_ISP_BLS_BASE + 0x00000020)
+#define RKISP1_CIF_ISP_BLS_V2_STOP (RKISP1_CIF_ISP_BLS_BASE + 0x00000024)
+#define RKISP1_CIF_ISP_BLS_A_FIXED (RKISP1_CIF_ISP_BLS_BASE + 0x00000028)
+#define RKISP1_CIF_ISP_BLS_B_FIXED (RKISP1_CIF_ISP_BLS_BASE + 0x0000002c)
+#define RKISP1_CIF_ISP_BLS_C_FIXED (RKISP1_CIF_ISP_BLS_BASE + 0x00000030)
+#define RKISP1_CIF_ISP_BLS_D_FIXED (RKISP1_CIF_ISP_BLS_BASE + 0x00000034)
+#define RKISP1_CIF_ISP_BLS_A_MEASURED (RKISP1_CIF_ISP_BLS_BASE + 0x00000038)
+#define RKISP1_CIF_ISP_BLS_B_MEASURED (RKISP1_CIF_ISP_BLS_BASE + 0x0000003c)
+#define RKISP1_CIF_ISP_BLS_C_MEASURED (RKISP1_CIF_ISP_BLS_BASE + 0x00000040)
+#define RKISP1_CIF_ISP_BLS_D_MEASURED (RKISP1_CIF_ISP_BLS_BASE + 0x00000044)
+
+#define RKISP1_CIF_ISP_DPF_BASE 0x00002800
+#define RKISP1_CIF_ISP_DPF_MODE (RKISP1_CIF_ISP_DPF_BASE + 0x00000000)
+#define RKISP1_CIF_ISP_DPF_STRENGTH_R (RKISP1_CIF_ISP_DPF_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_DPF_STRENGTH_G (RKISP1_CIF_ISP_DPF_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_DPF_STRENGTH_B (RKISP1_CIF_ISP_DPF_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_DPF_S_WEIGHT_G_1_4 (RKISP1_CIF_ISP_DPF_BASE + 0x00000010)
+#define RKISP1_CIF_ISP_DPF_S_WEIGHT_G_5_6 (RKISP1_CIF_ISP_DPF_BASE + 0x00000014)
+#define RKISP1_CIF_ISP_DPF_S_WEIGHT_RB_1_4 (RKISP1_CIF_ISP_DPF_BASE + 0x00000018)
+#define RKISP1_CIF_ISP_DPF_S_WEIGHT_RB_5_6 (RKISP1_CIF_ISP_DPF_BASE + 0x0000001C)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_0 (RKISP1_CIF_ISP_DPF_BASE + 0x00000020)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_1 (RKISP1_CIF_ISP_DPF_BASE + 0x00000024)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_2 (RKISP1_CIF_ISP_DPF_BASE + 0x00000028)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_3 (RKISP1_CIF_ISP_DPF_BASE + 0x0000002C)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_4 (RKISP1_CIF_ISP_DPF_BASE + 0x00000030)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_5 (RKISP1_CIF_ISP_DPF_BASE + 0x00000034)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_6 (RKISP1_CIF_ISP_DPF_BASE + 0x00000038)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_7 (RKISP1_CIF_ISP_DPF_BASE + 0x0000003C)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_8 (RKISP1_CIF_ISP_DPF_BASE + 0x00000040)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_9 (RKISP1_CIF_ISP_DPF_BASE + 0x00000044)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_10 (RKISP1_CIF_ISP_DPF_BASE + 0x00000048)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_11 (RKISP1_CIF_ISP_DPF_BASE + 0x0000004C)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_12 (RKISP1_CIF_ISP_DPF_BASE + 0x00000050)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_13 (RKISP1_CIF_ISP_DPF_BASE + 0x00000054)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_14 (RKISP1_CIF_ISP_DPF_BASE + 0x00000058)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_15 (RKISP1_CIF_ISP_DPF_BASE + 0x0000005C)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_16 (RKISP1_CIF_ISP_DPF_BASE + 0x00000060)
+#define RKISP1_CIF_ISP_DPF_NF_GAIN_R (RKISP1_CIF_ISP_DPF_BASE + 0x00000064)
+#define RKISP1_CIF_ISP_DPF_NF_GAIN_GR (RKISP1_CIF_ISP_DPF_BASE + 0x00000068)
+#define RKISP1_CIF_ISP_DPF_NF_GAIN_GB (RKISP1_CIF_ISP_DPF_BASE + 0x0000006C)
+#define RKISP1_CIF_ISP_DPF_NF_GAIN_B (RKISP1_CIF_ISP_DPF_BASE + 0x00000070)
+
+#define RKISP1_CIF_ISP_DPCC_BASE 0x00002900
+#define RKISP1_CIF_ISP_DPCC_MODE (RKISP1_CIF_ISP_DPCC_BASE + 0x00000000)
+#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE (RKISP1_CIF_ISP_DPCC_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_DPCC_SET_USE (RKISP1_CIF_ISP_DPCC_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_1 (RKISP1_CIF_ISP_DPCC_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_2 (RKISP1_CIF_ISP_DPCC_BASE + 0x00000010)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_3 (RKISP1_CIF_ISP_DPCC_BASE + 0x00000014)
+#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_1 (RKISP1_CIF_ISP_DPCC_BASE + 0x00000018)
+#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_1 (RKISP1_CIF_ISP_DPCC_BASE + 0x0000001C)
+#define RKISP1_CIF_ISP_DPCC_PG_FAC_1 (RKISP1_CIF_ISP_DPCC_BASE + 0x00000020)
+#define RKISP1_CIF_ISP_DPCC_RND_THRESH_1 (RKISP1_CIF_ISP_DPCC_BASE + 0x00000024)
+#define RKISP1_CIF_ISP_DPCC_RG_FAC_1 (RKISP1_CIF_ISP_DPCC_BASE + 0x00000028)
+#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_2 (RKISP1_CIF_ISP_DPCC_BASE + 0x0000002C)
+#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_2 (RKISP1_CIF_ISP_DPCC_BASE + 0x00000030)
+#define RKISP1_CIF_ISP_DPCC_PG_FAC_2 (RKISP1_CIF_ISP_DPCC_BASE + 0x00000034)
+#define RKISP1_CIF_ISP_DPCC_RND_THRESH_2 (RKISP1_CIF_ISP_DPCC_BASE + 0x00000038)
+#define RKISP1_CIF_ISP_DPCC_RG_FAC_2 (RKISP1_CIF_ISP_DPCC_BASE + 0x0000003C)
+#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_3 (RKISP1_CIF_ISP_DPCC_BASE + 0x00000040)
+#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_3 (RKISP1_CIF_ISP_DPCC_BASE + 0x00000044)
+#define RKISP1_CIF_ISP_DPCC_PG_FAC_3 (RKISP1_CIF_ISP_DPCC_BASE + 0x00000048)
+#define RKISP1_CIF_ISP_DPCC_RND_THRESH_3 (RKISP1_CIF_ISP_DPCC_BASE + 0x0000004C)
+#define RKISP1_CIF_ISP_DPCC_RG_FAC_3 (RKISP1_CIF_ISP_DPCC_BASE + 0x00000050)
+#define RKISP1_CIF_ISP_DPCC_RO_LIMITS (RKISP1_CIF_ISP_DPCC_BASE + 0x00000054)
+#define RKISP1_CIF_ISP_DPCC_RND_OFFS (RKISP1_CIF_ISP_DPCC_BASE + 0x00000058)
+#define RKISP1_CIF_ISP_DPCC_BPT_CTRL (RKISP1_CIF_ISP_DPCC_BASE + 0x0000005C)
+#define RKISP1_CIF_ISP_DPCC_BPT_NUMBER (RKISP1_CIF_ISP_DPCC_BASE + 0x00000060)
+#define RKISP1_CIF_ISP_DPCC_BPT_ADDR (RKISP1_CIF_ISP_DPCC_BASE + 0x00000064)
+#define RKISP1_CIF_ISP_DPCC_BPT_DATA (RKISP1_CIF_ISP_DPCC_BASE + 0x00000068)
+
+#define RKISP1_CIF_ISP_WDR_BASE 0x00002A00
+#define RKISP1_CIF_ISP_WDR_CTRL (RKISP1_CIF_ISP_WDR_BASE + 0x00000000)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_1 (RKISP1_CIF_ISP_WDR_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_2 (RKISP1_CIF_ISP_WDR_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_3 (RKISP1_CIF_ISP_WDR_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_4 (RKISP1_CIF_ISP_WDR_BASE + 0x00000010)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_0 (RKISP1_CIF_ISP_WDR_BASE + 0x00000014)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_1 (RKISP1_CIF_ISP_WDR_BASE + 0x00000018)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_2 (RKISP1_CIF_ISP_WDR_BASE + 0x0000001C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_3 (RKISP1_CIF_ISP_WDR_BASE + 0x00000020)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_4 (RKISP1_CIF_ISP_WDR_BASE + 0x00000024)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_5 (RKISP1_CIF_ISP_WDR_BASE + 0x00000028)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_6 (RKISP1_CIF_ISP_WDR_BASE + 0x0000002C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_7 (RKISP1_CIF_ISP_WDR_BASE + 0x00000030)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_8 (RKISP1_CIF_ISP_WDR_BASE + 0x00000034)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_9 (RKISP1_CIF_ISP_WDR_BASE + 0x00000038)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_10 (RKISP1_CIF_ISP_WDR_BASE + 0x0000003C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_11 (RKISP1_CIF_ISP_WDR_BASE + 0x00000040)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_12 (RKISP1_CIF_ISP_WDR_BASE + 0x00000044)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_13 (RKISP1_CIF_ISP_WDR_BASE + 0x00000048)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_14 (RKISP1_CIF_ISP_WDR_BASE + 0x0000004C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_15 (RKISP1_CIF_ISP_WDR_BASE + 0x00000050)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_16 (RKISP1_CIF_ISP_WDR_BASE + 0x00000054)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_17 (RKISP1_CIF_ISP_WDR_BASE + 0x00000058)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_18 (RKISP1_CIF_ISP_WDR_BASE + 0x0000005C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_19 (RKISP1_CIF_ISP_WDR_BASE + 0x00000060)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_20 (RKISP1_CIF_ISP_WDR_BASE + 0x00000064)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_21 (RKISP1_CIF_ISP_WDR_BASE + 0x00000068)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_22 (RKISP1_CIF_ISP_WDR_BASE + 0x0000006C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_23 (RKISP1_CIF_ISP_WDR_BASE + 0x00000070)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_24 (RKISP1_CIF_ISP_WDR_BASE + 0x00000074)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_25 (RKISP1_CIF_ISP_WDR_BASE + 0x00000078)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_26 (RKISP1_CIF_ISP_WDR_BASE + 0x0000007C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_27 (RKISP1_CIF_ISP_WDR_BASE + 0x00000080)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_28 (RKISP1_CIF_ISP_WDR_BASE + 0x00000084)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_29 (RKISP1_CIF_ISP_WDR_BASE + 0x00000088)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_30 (RKISP1_CIF_ISP_WDR_BASE + 0x0000008C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_31 (RKISP1_CIF_ISP_WDR_BASE + 0x00000090)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_32 (RKISP1_CIF_ISP_WDR_BASE + 0x00000094)
+#define RKISP1_CIF_ISP_WDR_OFFSET (RKISP1_CIF_ISP_WDR_BASE + 0x00000098)
+#define RKISP1_CIF_ISP_WDR_DELTAMIN (RKISP1_CIF_ISP_WDR_BASE + 0x0000009C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_1_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000A0)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_2_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000A4)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_3_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000A8)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_4_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000AC)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_0_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000B0)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_1_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000B4)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_2_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000B8)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_3_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000BC)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_4_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000C0)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_5_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000C4)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_6_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000C8)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_7_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000CC)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_8_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000D0)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_9_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000D4)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_10_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000D8)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_11_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000DC)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_12_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000E0)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_13_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000E4)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_14_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000E8)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_15_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000EC)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_16_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000F0)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_17_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000F4)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_18_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000F8)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_19_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000FC)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_20_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000100)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_21_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000104)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_22_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000108)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_23_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x0000010C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_24_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000110)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_25_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000114)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_26_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000118)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_27_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x0000011C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_28_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000120)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_29_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000124)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_30_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000128)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_31_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x0000012C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_32_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000130)
+
+#define RKISP1_CIF_ISP_VSM_BASE 0x00002F00
+#define RKISP1_CIF_ISP_VSM_MODE (RKISP1_CIF_ISP_VSM_BASE + 0x00000000)
+#define RKISP1_CIF_ISP_VSM_H_OFFS (RKISP1_CIF_ISP_VSM_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_VSM_V_OFFS (RKISP1_CIF_ISP_VSM_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_VSM_H_SIZE (RKISP1_CIF_ISP_VSM_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_VSM_V_SIZE (RKISP1_CIF_ISP_VSM_BASE + 0x00000010)
+#define RKISP1_CIF_ISP_VSM_H_SEGMENTS (RKISP1_CIF_ISP_VSM_BASE + 0x00000014)
+#define RKISP1_CIF_ISP_VSM_V_SEGMENTS (RKISP1_CIF_ISP_VSM_BASE + 0x00000018)
+#define RKISP1_CIF_ISP_VSM_DELTA_H (RKISP1_CIF_ISP_VSM_BASE + 0x0000001C)
+#define RKISP1_CIF_ISP_VSM_DELTA_V (RKISP1_CIF_ISP_VSM_BASE + 0x00000020)
+
+#endif /* _RKISP1_REGS_H */
diff --git a/drivers/staging/media/rkisp1/rkisp1-resizer.c b/drivers/staging/media/rkisp1/rkisp1-resizer.c
new file mode 100644
index 000000000000..8cdc29c1a178
--- /dev/null
+++ b/drivers/staging/media/rkisp1/rkisp1-resizer.c
@@ -0,0 +1,775 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Rockchip ISP1 Driver - V4l resizer device
+ *
+ * Copyright (C) 2019 Collabora, Ltd.
+ *
+ * Based on Rockchip ISP1 driver by Rockchip Electronics Co., Ltd.
+ * Copyright (C) 2017 Rockchip Electronics Co., Ltd.
+ */
+
+#include "rkisp1-common.h"
+
+#define RKISP1_RSZ_SP_DEV_NAME RKISP1_DRIVER_NAME "_resizer_selfpath"
+#define RKISP1_RSZ_MP_DEV_NAME RKISP1_DRIVER_NAME "_resizer_mainpath"
+
+#define RKISP1_DEF_FMT MEDIA_BUS_FMT_YUYV8_2X8
+#define RKISP1_DEF_FMT_TYPE RKISP1_FMT_YUV
+
+#define RKISP1_MBUS_FMT_HDIV 2
+#define RKISP1_MBUS_FMT_VDIV 1
+
+enum rkisp1_shadow_regs_when {
+ RKISP1_SHADOW_REGS_SYNC,
+ RKISP1_SHADOW_REGS_ASYNC,
+};
+
+struct rkisp1_rsz_config {
+ /* constrains */
+ const int max_rsz_width;
+ const int max_rsz_height;
+ const int min_rsz_width;
+ const int min_rsz_height;
+ /* registers */
+ struct {
+ u32 ctrl;
+ u32 ctrl_shd;
+ u32 scale_hy;
+ u32 scale_hcr;
+ u32 scale_hcb;
+ u32 scale_vy;
+ u32 scale_vc;
+ u32 scale_lut;
+ u32 scale_lut_addr;
+ u32 scale_hy_shd;
+ u32 scale_hcr_shd;
+ u32 scale_hcb_shd;
+ u32 scale_vy_shd;
+ u32 scale_vc_shd;
+ u32 phase_hy;
+ u32 phase_hc;
+ u32 phase_vy;
+ u32 phase_vc;
+ u32 phase_hy_shd;
+ u32 phase_hc_shd;
+ u32 phase_vy_shd;
+ u32 phase_vc_shd;
+ } rsz;
+ struct {
+ u32 ctrl;
+ u32 yuvmode_mask;
+ u32 rawmode_mask;
+ u32 h_offset;
+ u32 v_offset;
+ u32 h_size;
+ u32 v_size;
+ } dual_crop;
+};
+
+static const struct rkisp1_rsz_config rkisp1_rsz_config_mp = {
+ /* constraints */
+ .max_rsz_width = RKISP1_RSZ_MP_SRC_MAX_WIDTH,
+ .max_rsz_height = RKISP1_RSZ_MP_SRC_MAX_HEIGHT,
+ .min_rsz_width = RKISP1_RSZ_SRC_MIN_WIDTH,
+ .min_rsz_height = RKISP1_RSZ_SRC_MIN_HEIGHT,
+ /* registers */
+ .rsz = {
+ .ctrl = RKISP1_CIF_MRSZ_CTRL,
+ .scale_hy = RKISP1_CIF_MRSZ_SCALE_HY,
+ .scale_hcr = RKISP1_CIF_MRSZ_SCALE_HCR,
+ .scale_hcb = RKISP1_CIF_MRSZ_SCALE_HCB,
+ .scale_vy = RKISP1_CIF_MRSZ_SCALE_VY,
+ .scale_vc = RKISP1_CIF_MRSZ_SCALE_VC,
+ .scale_lut = RKISP1_CIF_MRSZ_SCALE_LUT,
+ .scale_lut_addr = RKISP1_CIF_MRSZ_SCALE_LUT_ADDR,
+ .scale_hy_shd = RKISP1_CIF_MRSZ_SCALE_HY_SHD,
+ .scale_hcr_shd = RKISP1_CIF_MRSZ_SCALE_HCR_SHD,
+ .scale_hcb_shd = RKISP1_CIF_MRSZ_SCALE_HCB_SHD,
+ .scale_vy_shd = RKISP1_CIF_MRSZ_SCALE_VY_SHD,
+ .scale_vc_shd = RKISP1_CIF_MRSZ_SCALE_VC_SHD,
+ .phase_hy = RKISP1_CIF_MRSZ_PHASE_HY,
+ .phase_hc = RKISP1_CIF_MRSZ_PHASE_HC,
+ .phase_vy = RKISP1_CIF_MRSZ_PHASE_VY,
+ .phase_vc = RKISP1_CIF_MRSZ_PHASE_VC,
+ .ctrl_shd = RKISP1_CIF_MRSZ_CTRL_SHD,
+ .phase_hy_shd = RKISP1_CIF_MRSZ_PHASE_HY_SHD,
+ .phase_hc_shd = RKISP1_CIF_MRSZ_PHASE_HC_SHD,
+ .phase_vy_shd = RKISP1_CIF_MRSZ_PHASE_VY_SHD,
+ .phase_vc_shd = RKISP1_CIF_MRSZ_PHASE_VC_SHD,
+ },
+ .dual_crop = {
+ .ctrl = RKISP1_CIF_DUAL_CROP_CTRL,
+ .yuvmode_mask = RKISP1_CIF_DUAL_CROP_MP_MODE_YUV,
+ .rawmode_mask = RKISP1_CIF_DUAL_CROP_MP_MODE_RAW,
+ .h_offset = RKISP1_CIF_DUAL_CROP_M_H_OFFS,
+ .v_offset = RKISP1_CIF_DUAL_CROP_M_V_OFFS,
+ .h_size = RKISP1_CIF_DUAL_CROP_M_H_SIZE,
+ .v_size = RKISP1_CIF_DUAL_CROP_M_V_SIZE,
+ },
+};
+
+static const struct rkisp1_rsz_config rkisp1_rsz_config_sp = {
+ /* constraints */
+ .max_rsz_width = RKISP1_RSZ_SP_SRC_MAX_WIDTH,
+ .max_rsz_height = RKISP1_RSZ_SP_SRC_MAX_HEIGHT,
+ .min_rsz_width = RKISP1_RSZ_SRC_MIN_WIDTH,
+ .min_rsz_height = RKISP1_RSZ_SRC_MIN_HEIGHT,
+ /* registers */
+ .rsz = {
+ .ctrl = RKISP1_CIF_SRSZ_CTRL,
+ .scale_hy = RKISP1_CIF_SRSZ_SCALE_HY,
+ .scale_hcr = RKISP1_CIF_SRSZ_SCALE_HCR,
+ .scale_hcb = RKISP1_CIF_SRSZ_SCALE_HCB,
+ .scale_vy = RKISP1_CIF_SRSZ_SCALE_VY,
+ .scale_vc = RKISP1_CIF_SRSZ_SCALE_VC,
+ .scale_lut = RKISP1_CIF_SRSZ_SCALE_LUT,
+ .scale_lut_addr = RKISP1_CIF_SRSZ_SCALE_LUT_ADDR,
+ .scale_hy_shd = RKISP1_CIF_SRSZ_SCALE_HY_SHD,
+ .scale_hcr_shd = RKISP1_CIF_SRSZ_SCALE_HCR_SHD,
+ .scale_hcb_shd = RKISP1_CIF_SRSZ_SCALE_HCB_SHD,
+ .scale_vy_shd = RKISP1_CIF_SRSZ_SCALE_VY_SHD,
+ .scale_vc_shd = RKISP1_CIF_SRSZ_SCALE_VC_SHD,
+ .phase_hy = RKISP1_CIF_SRSZ_PHASE_HY,
+ .phase_hc = RKISP1_CIF_SRSZ_PHASE_HC,
+ .phase_vy = RKISP1_CIF_SRSZ_PHASE_VY,
+ .phase_vc = RKISP1_CIF_SRSZ_PHASE_VC,
+ .ctrl_shd = RKISP1_CIF_SRSZ_CTRL_SHD,
+ .phase_hy_shd = RKISP1_CIF_SRSZ_PHASE_HY_SHD,
+ .phase_hc_shd = RKISP1_CIF_SRSZ_PHASE_HC_SHD,
+ .phase_vy_shd = RKISP1_CIF_SRSZ_PHASE_VY_SHD,
+ .phase_vc_shd = RKISP1_CIF_SRSZ_PHASE_VC_SHD,
+ },
+ .dual_crop = {
+ .ctrl = RKISP1_CIF_DUAL_CROP_CTRL,
+ .yuvmode_mask = RKISP1_CIF_DUAL_CROP_SP_MODE_YUV,
+ .rawmode_mask = RKISP1_CIF_DUAL_CROP_SP_MODE_RAW,
+ .h_offset = RKISP1_CIF_DUAL_CROP_S_H_OFFS,
+ .v_offset = RKISP1_CIF_DUAL_CROP_S_V_OFFS,
+ .h_size = RKISP1_CIF_DUAL_CROP_S_H_SIZE,
+ .v_size = RKISP1_CIF_DUAL_CROP_S_V_SIZE,
+ },
+};
+
+static struct v4l2_mbus_framefmt *
+rkisp1_rsz_get_pad_fmt(struct rkisp1_resizer *rsz,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(&rsz->sd, cfg, pad);
+ else
+ return v4l2_subdev_get_try_format(&rsz->sd, rsz->pad_cfg, pad);
+}
+
+static struct v4l2_rect *
+rkisp1_rsz_get_pad_crop(struct rkisp1_resizer *rsz,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_crop(&rsz->sd, cfg, pad);
+ else
+ return v4l2_subdev_get_try_crop(&rsz->sd, rsz->pad_cfg, pad);
+}
+
+/* ----------------------------------------------------------------------------
+ * Dual crop hw configs
+ */
+
+static void rkisp1_dcrop_disable(struct rkisp1_resizer *rsz,
+ enum rkisp1_shadow_regs_when when)
+{
+ u32 dc_ctrl = rkisp1_read(rsz->rkisp1, rsz->config->dual_crop.ctrl);
+ u32 mask = ~(rsz->config->dual_crop.yuvmode_mask |
+ rsz->config->dual_crop.rawmode_mask);
+
+ dc_ctrl &= mask;
+ if (when == RKISP1_SHADOW_REGS_ASYNC)
+ dc_ctrl |= RKISP1_CIF_DUAL_CROP_GEN_CFG_UPD;
+ else
+ dc_ctrl |= RKISP1_CIF_DUAL_CROP_CFG_UPD;
+ rkisp1_write(rsz->rkisp1, dc_ctrl, rsz->config->dual_crop.ctrl);
+}
+
+/* configure dual-crop unit */
+static void rkisp1_dcrop_config(struct rkisp1_resizer *rsz)
+{
+ struct rkisp1_device *rkisp1 = rsz->rkisp1;
+ struct v4l2_mbus_framefmt *sink_fmt;
+ struct v4l2_rect *sink_crop;
+ u32 dc_ctrl;
+
+ sink_crop = rkisp1_rsz_get_pad_crop(rsz, NULL, RKISP1_RSZ_PAD_SINK,
+ V4L2_SUBDEV_FORMAT_ACTIVE);
+ sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, NULL, RKISP1_RSZ_PAD_SINK,
+ V4L2_SUBDEV_FORMAT_ACTIVE);
+
+ if (sink_crop->width == sink_fmt->width &&
+ sink_crop->height == sink_fmt->height &&
+ sink_crop->left == 0 && sink_crop->top == 0) {
+ rkisp1_dcrop_disable(rsz, RKISP1_SHADOW_REGS_SYNC);
+ dev_dbg(rkisp1->dev, "capture %d crop disabled\n", rsz->id);
+ return;
+ }
+
+ dc_ctrl = rkisp1_read(rkisp1, rsz->config->dual_crop.ctrl);
+ rkisp1_write(rkisp1, sink_crop->left, rsz->config->dual_crop.h_offset);
+ rkisp1_write(rkisp1, sink_crop->top, rsz->config->dual_crop.v_offset);
+ rkisp1_write(rkisp1, sink_crop->width, rsz->config->dual_crop.h_size);
+ rkisp1_write(rkisp1, sink_crop->height, rsz->config->dual_crop.v_size);
+ dc_ctrl |= rsz->config->dual_crop.yuvmode_mask;
+ dc_ctrl |= RKISP1_CIF_DUAL_CROP_CFG_UPD;
+ rkisp1_write(rkisp1, dc_ctrl, rsz->config->dual_crop.ctrl);
+
+ dev_dbg(rkisp1->dev, "stream %d crop: %dx%d -> %dx%d\n", rsz->id,
+ sink_fmt->width, sink_fmt->height,
+ sink_crop->width, sink_crop->height);
+}
+
+/* ----------------------------------------------------------------------------
+ * Resizer hw configs
+ */
+
+static void rkisp1_rsz_dump_regs(struct rkisp1_resizer *rsz)
+{
+ dev_dbg(rsz->rkisp1->dev,
+ "RSZ_CTRL 0x%08x/0x%08x\n"
+ "RSZ_SCALE_HY %d/%d\n"
+ "RSZ_SCALE_HCB %d/%d\n"
+ "RSZ_SCALE_HCR %d/%d\n"
+ "RSZ_SCALE_VY %d/%d\n"
+ "RSZ_SCALE_VC %d/%d\n"
+ "RSZ_PHASE_HY %d/%d\n"
+ "RSZ_PHASE_HC %d/%d\n"
+ "RSZ_PHASE_VY %d/%d\n"
+ "RSZ_PHASE_VC %d/%d\n",
+ rkisp1_read(rsz->rkisp1, rsz->config->rsz.ctrl),
+ rkisp1_read(rsz->rkisp1, rsz->config->rsz.ctrl_shd),
+ rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_hy),
+ rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_hy_shd),
+ rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_hcb),
+ rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_hcb_shd),
+ rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_hcr),
+ rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_hcr_shd),
+ rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_vy),
+ rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_vy_shd),
+ rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_vc),
+ rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_vc_shd),
+ rkisp1_read(rsz->rkisp1, rsz->config->rsz.phase_hy),
+ rkisp1_read(rsz->rkisp1, rsz->config->rsz.phase_hy_shd),
+ rkisp1_read(rsz->rkisp1, rsz->config->rsz.phase_hc),
+ rkisp1_read(rsz->rkisp1, rsz->config->rsz.phase_hc_shd),
+ rkisp1_read(rsz->rkisp1, rsz->config->rsz.phase_vy),
+ rkisp1_read(rsz->rkisp1, rsz->config->rsz.phase_vy_shd),
+ rkisp1_read(rsz->rkisp1, rsz->config->rsz.phase_vc),
+ rkisp1_read(rsz->rkisp1, rsz->config->rsz.phase_vc_shd));
+}
+
+static void rkisp1_rsz_update_shadow(struct rkisp1_resizer *rsz,
+ enum rkisp1_shadow_regs_when when)
+{
+ u32 ctrl_cfg = rkisp1_read(rsz->rkisp1, rsz->config->rsz.ctrl);
+
+ if (when == RKISP1_SHADOW_REGS_ASYNC)
+ ctrl_cfg |= RKISP1_CIF_RSZ_CTRL_CFG_UPD_AUTO;
+ else
+ ctrl_cfg |= RKISP1_CIF_RSZ_CTRL_CFG_UPD;
+
+ rkisp1_write(rsz->rkisp1, ctrl_cfg, rsz->config->rsz.ctrl);
+}
+
+static u32 rkisp1_rsz_calc_ratio(u32 len_sink, u32 len_src)
+{
+ if (len_sink < len_src)
+ return ((len_sink - 1) * RKISP1_CIF_RSZ_SCALER_FACTOR) /
+ (len_src - 1);
+
+ return ((len_src - 1) * RKISP1_CIF_RSZ_SCALER_FACTOR) /
+ (len_sink - 1) + 1;
+}
+
+static void rkisp1_rsz_disable(struct rkisp1_resizer *rsz,
+ enum rkisp1_shadow_regs_when when)
+{
+ rkisp1_write(rsz->rkisp1, 0, rsz->config->rsz.ctrl);
+
+ if (when == RKISP1_SHADOW_REGS_SYNC)
+ rkisp1_rsz_update_shadow(rsz, when);
+}
+
+static void rkisp1_rsz_config_regs(struct rkisp1_resizer *rsz,
+ struct v4l2_rect *sink_y,
+ struct v4l2_rect *sink_c,
+ struct v4l2_rect *src_y,
+ struct v4l2_rect *src_c,
+ enum rkisp1_shadow_regs_when when)
+{
+ struct rkisp1_device *rkisp1 = rsz->rkisp1;
+ u32 ratio, rsz_ctrl = 0;
+ unsigned int i;
+
+ /* No phase offset */
+ rkisp1_write(rkisp1, 0, rsz->config->rsz.phase_hy);
+ rkisp1_write(rkisp1, 0, rsz->config->rsz.phase_hc);
+ rkisp1_write(rkisp1, 0, rsz->config->rsz.phase_vy);
+ rkisp1_write(rkisp1, 0, rsz->config->rsz.phase_vc);
+
+ /* Linear interpolation */
+ for (i = 0; i < 64; i++) {
+ rkisp1_write(rkisp1, i, rsz->config->rsz.scale_lut_addr);
+ rkisp1_write(rkisp1, i, rsz->config->rsz.scale_lut);
+ }
+
+ if (sink_y->width != src_y->width) {
+ rsz_ctrl |= RKISP1_CIF_RSZ_CTRL_SCALE_HY_ENABLE;
+ if (sink_y->width < src_y->width)
+ rsz_ctrl |= RKISP1_CIF_RSZ_CTRL_SCALE_HY_UP;
+ ratio = rkisp1_rsz_calc_ratio(sink_y->width, src_y->width);
+ rkisp1_write(rkisp1, ratio, rsz->config->rsz.scale_hy);
+ }
+
+ if (sink_c->width != src_c->width) {
+ rsz_ctrl |= RKISP1_CIF_RSZ_CTRL_SCALE_HC_ENABLE;
+ if (sink_c->width < src_c->width)
+ rsz_ctrl |= RKISP1_CIF_RSZ_CTRL_SCALE_HC_UP;
+ ratio = rkisp1_rsz_calc_ratio(sink_c->width, src_c->width);
+ rkisp1_write(rkisp1, ratio, rsz->config->rsz.scale_hcb);
+ rkisp1_write(rkisp1, ratio, rsz->config->rsz.scale_hcr);
+ }
+
+ if (sink_y->height != src_y->height) {
+ rsz_ctrl |= RKISP1_CIF_RSZ_CTRL_SCALE_VY_ENABLE;
+ if (sink_y->height < src_y->height)
+ rsz_ctrl |= RKISP1_CIF_RSZ_CTRL_SCALE_VY_UP;
+ ratio = rkisp1_rsz_calc_ratio(sink_y->height, src_y->height);
+ rkisp1_write(rkisp1, ratio, rsz->config->rsz.scale_vy);
+ }
+
+ if (sink_c->height != src_c->height) {
+ rsz_ctrl |= RKISP1_CIF_RSZ_CTRL_SCALE_VC_ENABLE;
+ if (sink_c->height < src_c->height)
+ rsz_ctrl |= RKISP1_CIF_RSZ_CTRL_SCALE_VC_UP;
+ ratio = rkisp1_rsz_calc_ratio(sink_c->height, src_c->height);
+ rkisp1_write(rkisp1, ratio, rsz->config->rsz.scale_vc);
+ }
+
+ rkisp1_write(rkisp1, rsz_ctrl, rsz->config->rsz.ctrl);
+
+ rkisp1_rsz_update_shadow(rsz, when);
+}
+
+static void rkisp1_rsz_config(struct rkisp1_resizer *rsz,
+ enum rkisp1_shadow_regs_when when)
+{
+ u8 hdiv = RKISP1_MBUS_FMT_HDIV, vdiv = RKISP1_MBUS_FMT_VDIV;
+ struct v4l2_rect sink_y, sink_c, src_y, src_c;
+ struct v4l2_mbus_framefmt *src_fmt;
+ struct v4l2_rect *sink_crop;
+
+ sink_crop = rkisp1_rsz_get_pad_crop(rsz, NULL, RKISP1_RSZ_PAD_SINK,
+ V4L2_SUBDEV_FORMAT_ACTIVE);
+ src_fmt = rkisp1_rsz_get_pad_fmt(rsz, NULL, RKISP1_RSZ_PAD_SRC,
+ V4L2_SUBDEV_FORMAT_ACTIVE);
+
+ if (rsz->fmt_type == RKISP1_FMT_BAYER) {
+ rkisp1_rsz_disable(rsz, when);
+ return;
+ }
+
+ sink_y.width = sink_crop->width;
+ sink_y.height = sink_crop->height;
+ src_y.width = src_fmt->width;
+ src_y.height = src_fmt->height;
+
+ sink_c.width = sink_y.width / RKISP1_MBUS_FMT_HDIV;
+ sink_c.height = sink_y.height / RKISP1_MBUS_FMT_VDIV;
+
+ if (rsz->fmt_type == RKISP1_FMT_YUV) {
+ struct rkisp1_capture *cap =
+ &rsz->rkisp1->capture_devs[rsz->id];
+ const struct v4l2_format_info *pixfmt_info =
+ v4l2_format_info(cap->pix.fmt.pixelformat);
+
+ hdiv = pixfmt_info->hdiv;
+ vdiv = pixfmt_info->vdiv;
+ }
+ src_c.width = src_y.width / hdiv;
+ src_c.height = src_y.height / vdiv;
+
+ if (sink_c.width == src_c.width && sink_c.height == src_c.height) {
+ rkisp1_rsz_disable(rsz, when);
+ return;
+ }
+
+ dev_dbg(rsz->rkisp1->dev, "stream %d rsz/scale: %dx%d -> %dx%d\n",
+ rsz->id, sink_crop->width, sink_crop->height,
+ src_fmt->width, src_fmt->height);
+ dev_dbg(rsz->rkisp1->dev, "chroma scaling %dx%d -> %dx%d\n",
+ sink_c.width, sink_c.height, src_c.width, src_c.height);
+
+ /* set values in the hw */
+ rkisp1_rsz_config_regs(rsz, &sink_y, &sink_c, &src_y, &src_c, when);
+
+ rkisp1_rsz_dump_regs(rsz);
+}
+
+/* ----------------------------------------------------------------------------
+ * Subdev pad operations
+ */
+
+static int rkisp1_rsz_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct rkisp1_resizer *rsz =
+ container_of(sd, struct rkisp1_resizer, sd);
+ struct v4l2_subdev_pad_config dummy_cfg;
+ u32 pad = code->pad;
+ int ret;
+
+ /* supported mbus codes are the same in isp sink pad */
+ code->pad = RKISP1_ISP_PAD_SINK_VIDEO;
+ ret = v4l2_subdev_call(&rsz->rkisp1->isp.sd, pad, enum_mbus_code,
+ &dummy_cfg, code);
+
+ /* restore pad */
+ code->pad = pad;
+ return ret;
+}
+
+static int rkisp1_rsz_init_config(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg)
+{
+ struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
+ struct v4l2_rect *sink_crop;
+
+ sink_fmt = v4l2_subdev_get_try_format(sd, cfg, RKISP1_RSZ_PAD_SRC);
+ sink_fmt->width = RKISP1_DEFAULT_WIDTH;
+ sink_fmt->height = RKISP1_DEFAULT_HEIGHT;
+ sink_fmt->field = V4L2_FIELD_NONE;
+ sink_fmt->code = RKISP1_DEF_FMT;
+
+ sink_crop = v4l2_subdev_get_try_crop(sd, cfg, RKISP1_RSZ_PAD_SINK);
+ sink_crop->width = RKISP1_DEFAULT_WIDTH;
+ sink_crop->height = RKISP1_DEFAULT_HEIGHT;
+ sink_crop->left = 0;
+ sink_crop->top = 0;
+
+ src_fmt = v4l2_subdev_get_try_format(sd, cfg, RKISP1_RSZ_PAD_SINK);
+ *src_fmt = *sink_fmt;
+
+ /* NOTE: there is no crop in the source pad, only in the sink */
+
+ return 0;
+}
+
+static void rkisp1_rsz_set_src_fmt(struct rkisp1_resizer *rsz,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_mbus_framefmt *format,
+ unsigned int which)
+{
+ struct v4l2_mbus_framefmt *src_fmt;
+
+ src_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SRC, which);
+ src_fmt->width = clamp_t(u32, format->width,
+ rsz->config->min_rsz_width,
+ rsz->config->max_rsz_width);
+ src_fmt->height = clamp_t(u32, format->height,
+ rsz->config->min_rsz_height,
+ rsz->config->max_rsz_height);
+
+ *format = *src_fmt;
+}
+
+static void rkisp1_rsz_set_sink_crop(struct rkisp1_resizer *rsz,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_rect *r,
+ unsigned int which)
+{
+ const struct rkisp1_isp_mbus_info *mbus_info;
+ struct v4l2_mbus_framefmt *sink_fmt;
+ struct v4l2_rect *sink_crop;
+
+ sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SINK, which);
+ sink_crop = rkisp1_rsz_get_pad_crop(rsz, cfg, RKISP1_RSZ_PAD_SINK,
+ which);
+
+ /* Not crop for MP bayer raw data */
+ mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code);
+
+ if (rsz->id == RKISP1_MAINPATH &&
+ mbus_info->fmt_type == RKISP1_FMT_BAYER) {
+ sink_crop->left = 0;
+ sink_crop->top = 0;
+ sink_crop->width = sink_fmt->width;
+ sink_crop->height = sink_fmt->height;
+ return;
+ }
+
+ sink_crop->left = ALIGN(r->left, 2);
+ sink_crop->width = ALIGN(r->width, 2);
+ sink_crop->top = r->top;
+ sink_crop->height = r->height;
+ rkisp1_sd_adjust_crop(sink_crop, sink_fmt);
+
+ *r = *sink_crop;
+}
+
+static void rkisp1_rsz_set_sink_fmt(struct rkisp1_resizer *rsz,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_mbus_framefmt *format,
+ unsigned int which)
+{
+ const struct rkisp1_isp_mbus_info *mbus_info;
+ struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
+ struct v4l2_rect *sink_crop;
+
+ sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SINK, which);
+ src_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SRC, which);
+ sink_crop = rkisp1_rsz_get_pad_crop(rsz, cfg, RKISP1_RSZ_PAD_SINK,
+ which);
+ sink_fmt->code = format->code;
+ mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code);
+ if (!mbus_info) {
+ sink_fmt->code = RKISP1_DEF_FMT;
+ mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code);
+ }
+ if (which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ rsz->fmt_type = mbus_info->fmt_type;
+
+ if (rsz->id == RKISP1_MAINPATH &&
+ mbus_info->fmt_type == RKISP1_FMT_BAYER) {
+ sink_crop->left = 0;
+ sink_crop->top = 0;
+ sink_crop->width = sink_fmt->width;
+ sink_crop->height = sink_fmt->height;
+ return;
+ }
+
+ /* Propagete to source pad */
+ src_fmt->code = sink_fmt->code;
+
+ sink_fmt->width = clamp_t(u32, format->width,
+ rsz->config->min_rsz_width,
+ rsz->config->max_rsz_width);
+ sink_fmt->height = clamp_t(u32, format->height,
+ rsz->config->min_rsz_height,
+ rsz->config->max_rsz_height);
+
+ *format = *sink_fmt;
+
+ /* Update sink crop */
+ rkisp1_rsz_set_sink_crop(rsz, cfg, sink_crop, which);
+}
+
+static int rkisp1_rsz_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct rkisp1_resizer *rsz =
+ container_of(sd, struct rkisp1_resizer, sd);
+
+ fmt->format = *rkisp1_rsz_get_pad_fmt(rsz, cfg, fmt->pad, fmt->which);
+ return 0;
+}
+
+static int rkisp1_rsz_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct rkisp1_resizer *rsz =
+ container_of(sd, struct rkisp1_resizer, sd);
+
+ if (fmt->pad == RKISP1_RSZ_PAD_SINK)
+ rkisp1_rsz_set_sink_fmt(rsz, cfg, &fmt->format, fmt->which);
+ else
+ rkisp1_rsz_set_src_fmt(rsz, cfg, &fmt->format, fmt->which);
+
+ return 0;
+}
+
+static int rkisp1_rsz_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct rkisp1_resizer *rsz =
+ container_of(sd, struct rkisp1_resizer, sd);
+ struct v4l2_mbus_framefmt *mf_sink;
+
+ if (sel->pad == RKISP1_RSZ_PAD_SRC)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ mf_sink = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SINK,
+ sel->which);
+ sel->r.height = mf_sink->height;
+ sel->r.width = mf_sink->width;
+ sel->r.left = 0;
+ sel->r.top = 0;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *rkisp1_rsz_get_pad_crop(rsz, cfg, RKISP1_RSZ_PAD_SINK,
+ sel->which);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rkisp1_rsz_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct rkisp1_resizer *rsz =
+ container_of(sd, struct rkisp1_resizer, sd);
+
+ if (sel->target != V4L2_SEL_TGT_CROP || sel->pad == RKISP1_RSZ_PAD_SRC)
+ return -EINVAL;
+
+ dev_dbg(sd->dev, "%s: pad: %d sel(%d,%d)/%dx%d\n", __func__,
+ sel->pad, sel->r.left, sel->r.top, sel->r.width, sel->r.height);
+
+ rkisp1_rsz_set_sink_crop(rsz, cfg, &sel->r, sel->which);
+
+ return 0;
+}
+
+static const struct media_entity_operations rkisp1_rsz_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_pad_ops rkisp1_rsz_pad_ops = {
+ .enum_mbus_code = rkisp1_rsz_enum_mbus_code,
+ .get_selection = rkisp1_rsz_get_selection,
+ .set_selection = rkisp1_rsz_set_selection,
+ .init_cfg = rkisp1_rsz_init_config,
+ .get_fmt = rkisp1_rsz_get_fmt,
+ .set_fmt = rkisp1_rsz_set_fmt,
+ .link_validate = v4l2_subdev_link_validate_default,
+};
+
+/* ----------------------------------------------------------------------------
+ * Stream operations
+ */
+
+static int rkisp1_rsz_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct rkisp1_resizer *rsz =
+ container_of(sd, struct rkisp1_resizer, sd);
+ struct rkisp1_device *rkisp1 = rsz->rkisp1;
+ struct rkisp1_capture *other = &rkisp1->capture_devs[rsz->id ^ 1];
+ enum rkisp1_shadow_regs_when when = RKISP1_SHADOW_REGS_SYNC;
+
+ if (!enable) {
+ rkisp1_dcrop_disable(rsz, RKISP1_SHADOW_REGS_ASYNC);
+ rkisp1_rsz_disable(rsz, RKISP1_SHADOW_REGS_ASYNC);
+ return 0;
+ }
+
+ if (other->is_streaming)
+ when = RKISP1_SHADOW_REGS_ASYNC;
+
+ rkisp1_rsz_config(rsz, when);
+ rkisp1_dcrop_config(rsz);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops rkisp1_rsz_video_ops = {
+ .s_stream = rkisp1_rsz_s_stream,
+};
+
+static const struct v4l2_subdev_ops rkisp1_rsz_ops = {
+ .video = &rkisp1_rsz_video_ops,
+ .pad = &rkisp1_rsz_pad_ops,
+};
+
+static void rkisp1_rsz_unregister(struct rkisp1_resizer *rsz)
+{
+ v4l2_device_unregister_subdev(&rsz->sd);
+ media_entity_cleanup(&rsz->sd.entity);
+}
+
+static int rkisp1_rsz_register(struct rkisp1_resizer *rsz)
+{
+ const char * const dev_names[] = {RKISP1_RSZ_MP_DEV_NAME,
+ RKISP1_RSZ_SP_DEV_NAME};
+ struct media_pad *pads = rsz->pads;
+ struct v4l2_subdev *sd = &rsz->sd;
+ int ret;
+
+ if (rsz->id == RKISP1_SELFPATH)
+ rsz->config = &rkisp1_rsz_config_sp;
+ else
+ rsz->config = &rkisp1_rsz_config_mp;
+
+ v4l2_subdev_init(sd, &rkisp1_rsz_ops);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sd->entity.ops = &rkisp1_rsz_media_ops;
+ sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_SCALER;
+ sd->owner = THIS_MODULE;
+ strscpy(sd->name, dev_names[rsz->id], sizeof(sd->name));
+
+ pads[RKISP1_RSZ_PAD_SINK].flags = MEDIA_PAD_FL_SINK |
+ MEDIA_PAD_FL_MUST_CONNECT;
+ pads[RKISP1_RSZ_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE |
+ MEDIA_PAD_FL_MUST_CONNECT;
+
+ rsz->fmt_type = RKISP1_DEF_FMT_TYPE;
+
+ ret = media_entity_pads_init(&sd->entity, 2, pads);
+ if (ret)
+ return ret;
+
+ ret = v4l2_device_register_subdev(&rsz->rkisp1->v4l2_dev, sd);
+ if (ret) {
+ dev_err(sd->dev, "Failed to register resizer subdev\n");
+ goto err_cleanup_media_entity;
+ }
+
+ rkisp1_rsz_init_config(sd, rsz->pad_cfg);
+ return 0;
+
+err_cleanup_media_entity:
+ media_entity_cleanup(&sd->entity);
+
+ return ret;
+}
+
+int rkisp1_resizer_devs_register(struct rkisp1_device *rkisp1)
+{
+ struct rkisp1_resizer *rsz;
+ unsigned int i, j;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(rkisp1->resizer_devs); i++) {
+ rsz = &rkisp1->resizer_devs[i];
+ rsz->rkisp1 = rkisp1;
+ rsz->id = i;
+ ret = rkisp1_rsz_register(rsz);
+ if (ret)
+ goto err_unreg_resizer_devs;
+ }
+
+ return 0;
+
+err_unreg_resizer_devs:
+ for (j = 0; j < i; j++) {
+ rsz = &rkisp1->resizer_devs[j];
+ rkisp1_rsz_unregister(rsz);
+ }
+
+ return ret;
+}
+
+void rkisp1_resizer_devs_unregister(struct rkisp1_device *rkisp1)
+{
+ struct rkisp1_resizer *mp = &rkisp1->resizer_devs[RKISP1_MAINPATH];
+ struct rkisp1_resizer *sp = &rkisp1->resizer_devs[RKISP1_SELFPATH];
+
+ rkisp1_rsz_unregister(mp);
+ rkisp1_rsz_unregister(sp);
+}
diff --git a/drivers/staging/media/rkisp1/rkisp1-stats.c b/drivers/staging/media/rkisp1/rkisp1-stats.c
new file mode 100644
index 000000000000..d98ea15837de
--- /dev/null
+++ b/drivers/staging/media/rkisp1/rkisp1-stats.c
@@ -0,0 +1,530 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Rockchip ISP1 Driver - Stats subdevice
+ *
+ * Copyright (C) 2017 Rockchip Electronics Co., Ltd.
+ */
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-vmalloc.h> /* for ISP statistics */
+
+#include "rkisp1-common.h"
+
+#define RKISP1_STATS_DEV_NAME RKISP1_DRIVER_NAME "_stats"
+
+#define RKISP1_ISP_STATS_REQ_BUFS_MIN 2
+#define RKISP1_ISP_STATS_REQ_BUFS_MAX 8
+
+enum rkisp1_isp_readout_cmd {
+ RKISP1_ISP_READOUT_MEAS,
+ RKISP1_ISP_READOUT_META,
+};
+
+struct rkisp1_isp_readout_work {
+ struct work_struct work;
+ struct rkisp1_stats *stats;
+
+ unsigned int frame_id;
+ unsigned int isp_ris;
+ enum rkisp1_isp_readout_cmd readout;
+ struct vb2_buffer *vb;
+};
+
+static int rkisp1_stats_enum_fmt_meta_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct video_device *video = video_devdata(file);
+ struct rkisp1_stats *stats = video_get_drvdata(video);
+
+ if (f->index > 0 || f->type != video->queue->type)
+ return -EINVAL;
+
+ f->pixelformat = stats->vdev_fmt.fmt.meta.dataformat;
+ return 0;
+}
+
+static int rkisp1_stats_g_fmt_meta_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct video_device *video = video_devdata(file);
+ struct rkisp1_stats *stats = video_get_drvdata(video);
+ struct v4l2_meta_format *meta = &f->fmt.meta;
+
+ if (f->type != video->queue->type)
+ return -EINVAL;
+
+ memset(meta, 0, sizeof(*meta));
+ meta->dataformat = stats->vdev_fmt.fmt.meta.dataformat;
+ meta->buffersize = stats->vdev_fmt.fmt.meta.buffersize;
+
+ return 0;
+}
+
+static int rkisp1_stats_querycap(struct file *file,
+ void *priv, struct v4l2_capability *cap)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ strscpy(cap->driver, RKISP1_DRIVER_NAME, sizeof(cap->driver));
+ strscpy(cap->card, vdev->name, sizeof(cap->card));
+ strscpy(cap->bus_info, "platform: " RKISP1_DRIVER_NAME,
+ sizeof(cap->bus_info));
+
+ return 0;
+}
+
+/* ISP video device IOCTLs */
+static const struct v4l2_ioctl_ops rkisp1_stats_ioctl = {
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_enum_fmt_meta_cap = rkisp1_stats_enum_fmt_meta_cap,
+ .vidioc_g_fmt_meta_cap = rkisp1_stats_g_fmt_meta_cap,
+ .vidioc_s_fmt_meta_cap = rkisp1_stats_g_fmt_meta_cap,
+ .vidioc_try_fmt_meta_cap = rkisp1_stats_g_fmt_meta_cap,
+ .vidioc_querycap = rkisp1_stats_querycap,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static const struct v4l2_file_operations rkisp1_stats_fops = {
+ .mmap = vb2_fop_mmap,
+ .unlocked_ioctl = video_ioctl2,
+ .poll = vb2_fop_poll,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release
+};
+
+static int rkisp1_stats_vb2_queue_setup(struct vb2_queue *vq,
+ unsigned int *num_buffers,
+ unsigned int *num_planes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct rkisp1_stats *stats = vq->drv_priv;
+
+ *num_planes = 1;
+
+ *num_buffers = clamp_t(u32, *num_buffers, RKISP1_ISP_STATS_REQ_BUFS_MIN,
+ RKISP1_ISP_STATS_REQ_BUFS_MAX);
+
+ sizes[0] = sizeof(struct rkisp1_stat_buffer);
+
+ INIT_LIST_HEAD(&stats->stat);
+
+ return 0;
+}
+
+static void rkisp1_stats_vb2_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct rkisp1_buffer *stats_buf =
+ container_of(vbuf, struct rkisp1_buffer, vb);
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct rkisp1_stats *stats_dev = vq->drv_priv;
+
+ stats_buf->vaddr[0] = vb2_plane_vaddr(vb, 0);
+
+ mutex_lock(&stats_dev->wq_lock);
+ list_add_tail(&stats_buf->queue, &stats_dev->stat);
+ mutex_unlock(&stats_dev->wq_lock);
+}
+
+static int rkisp1_stats_vb2_buf_prepare(struct vb2_buffer *vb)
+{
+ if (vb2_plane_size(vb, 0) < sizeof(struct rkisp1_stat_buffer))
+ return -EINVAL;
+
+ vb2_set_plane_payload(vb, 0, sizeof(struct rkisp1_stat_buffer));
+
+ return 0;
+}
+
+static void rkisp1_stats_vb2_stop_streaming(struct vb2_queue *vq)
+{
+ struct rkisp1_stats *stats = vq->drv_priv;
+ struct rkisp1_buffer *buf;
+ unsigned long flags;
+ unsigned int i;
+
+ /* Make sure no new work queued in isr before draining wq */
+ spin_lock_irqsave(&stats->irq_lock, flags);
+ stats->is_streaming = false;
+ spin_unlock_irqrestore(&stats->irq_lock, flags);
+
+ drain_workqueue(stats->readout_wq);
+
+ mutex_lock(&stats->wq_lock);
+ for (i = 0; i < RKISP1_ISP_STATS_REQ_BUFS_MAX; i++) {
+ if (list_empty(&stats->stat))
+ break;
+ buf = list_first_entry(&stats->stat,
+ struct rkisp1_buffer, queue);
+ list_del(&buf->queue);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+ mutex_unlock(&stats->wq_lock);
+}
+
+static int
+rkisp1_stats_vb2_start_streaming(struct vb2_queue *queue, unsigned int count)
+{
+ struct rkisp1_stats *stats = queue->drv_priv;
+
+ stats->is_streaming = true;
+
+ return 0;
+}
+
+static const struct vb2_ops rkisp1_stats_vb2_ops = {
+ .queue_setup = rkisp1_stats_vb2_queue_setup,
+ .buf_queue = rkisp1_stats_vb2_buf_queue,
+ .buf_prepare = rkisp1_stats_vb2_buf_prepare,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .stop_streaming = rkisp1_stats_vb2_stop_streaming,
+ .start_streaming = rkisp1_stats_vb2_start_streaming,
+};
+
+static int
+rkisp1_stats_init_vb2_queue(struct vb2_queue *q, struct rkisp1_stats *stats)
+{
+ struct rkisp1_vdev_node *node;
+
+ node = container_of(q, struct rkisp1_vdev_node, buf_queue);
+
+ q->type = V4L2_BUF_TYPE_META_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ q->drv_priv = stats;
+ q->ops = &rkisp1_stats_vb2_ops;
+ q->mem_ops = &vb2_vmalloc_memops;
+ q->buf_struct_size = sizeof(struct rkisp1_buffer);
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &node->vlock;
+
+ return vb2_queue_init(q);
+}
+
+static void rkisp1_stats_get_awb_meas(struct rkisp1_stats *stats,
+ struct rkisp1_stat_buffer *pbuf)
+{
+ /* Protect against concurrent access from ISR? */
+ struct rkisp1_device *rkisp1 = stats->rkisp1;
+ u32 reg_val;
+
+ pbuf->meas_type |= RKISP1_CIF_ISP_STAT_AWB;
+ reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AWB_WHITE_CNT);
+ pbuf->params.awb.awb_mean[0].cnt =
+ RKISP1_CIF_ISP_AWB_GET_PIXEL_CNT(reg_val);
+ reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AWB_MEAN);
+
+ pbuf->params.awb.awb_mean[0].mean_cr_or_r =
+ RKISP1_CIF_ISP_AWB_GET_MEAN_CR_R(reg_val);
+ pbuf->params.awb.awb_mean[0].mean_cb_or_b =
+ RKISP1_CIF_ISP_AWB_GET_MEAN_CB_B(reg_val);
+ pbuf->params.awb.awb_mean[0].mean_y_or_g =
+ RKISP1_CIF_ISP_AWB_GET_MEAN_Y_G(reg_val);
+}
+
+static void rkisp1_stats_get_aec_meas(struct rkisp1_stats *stats,
+ struct rkisp1_stat_buffer *pbuf)
+{
+ struct rkisp1_device *rkisp1 = stats->rkisp1;
+ unsigned int i;
+
+ pbuf->meas_type |= RKISP1_CIF_ISP_STAT_AUTOEXP;
+ for (i = 0; i < RKISP1_CIF_ISP_AE_MEAN_MAX; i++)
+ pbuf->params.ae.exp_mean[i] =
+ (u8)rkisp1_read(rkisp1,
+ RKISP1_CIF_ISP_EXP_MEAN_00 + i * 4);
+}
+
+static void rkisp1_stats_get_afc_meas(struct rkisp1_stats *stats,
+ struct rkisp1_stat_buffer *pbuf)
+{
+ struct rkisp1_device *rkisp1 = stats->rkisp1;
+ struct rkisp1_cif_isp_af_stat *af;
+
+ pbuf->meas_type = RKISP1_CIF_ISP_STAT_AFM_FIN;
+
+ af = &pbuf->params.af;
+ af->window[0].sum = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AFM_SUM_A);
+ af->window[0].lum = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AFM_LUM_A);
+ af->window[1].sum = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AFM_SUM_B);
+ af->window[1].lum = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AFM_LUM_B);
+ af->window[2].sum = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AFM_SUM_C);
+ af->window[2].lum = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AFM_LUM_C);
+}
+
+static void rkisp1_stats_get_hst_meas(struct rkisp1_stats *stats,
+ struct rkisp1_stat_buffer *pbuf)
+{
+ struct rkisp1_device *rkisp1 = stats->rkisp1;
+ unsigned int i;
+
+ pbuf->meas_type |= RKISP1_CIF_ISP_STAT_HIST;
+ for (i = 0; i < RKISP1_CIF_ISP_HIST_BIN_N_MAX; i++)
+ pbuf->params.hist.hist_bins[i] =
+ (u8)rkisp1_read(rkisp1,
+ RKISP1_CIF_ISP_HIST_BIN_0 + i * 4);
+}
+
+static void rkisp1_stats_get_bls_meas(struct rkisp1_stats *stats,
+ struct rkisp1_stat_buffer *pbuf)
+{
+ struct rkisp1_device *rkisp1 = stats->rkisp1;
+ const struct rkisp1_isp_mbus_info *in_fmt = rkisp1->isp.sink_fmt;
+ struct rkisp1_cif_isp_bls_meas_val *bls_val;
+
+ bls_val = &pbuf->params.ae.bls_val;
+ if (in_fmt->bayer_pat == RKISP1_RAW_BGGR) {
+ bls_val->meas_b =
+ rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_A_MEASURED);
+ bls_val->meas_gb =
+ rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_B_MEASURED);
+ bls_val->meas_gr =
+ rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_C_MEASURED);
+ bls_val->meas_r =
+ rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_D_MEASURED);
+ } else if (in_fmt->bayer_pat == RKISP1_RAW_GBRG) {
+ bls_val->meas_gb =
+ rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_A_MEASURED);
+ bls_val->meas_b =
+ rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_B_MEASURED);
+ bls_val->meas_r =
+ rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_C_MEASURED);
+ bls_val->meas_gr =
+ rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_D_MEASURED);
+ } else if (in_fmt->bayer_pat == RKISP1_RAW_GRBG) {
+ bls_val->meas_gr =
+ rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_A_MEASURED);
+ bls_val->meas_r =
+ rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_B_MEASURED);
+ bls_val->meas_b =
+ rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_C_MEASURED);
+ bls_val->meas_gb =
+ rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_D_MEASURED);
+ } else if (in_fmt->bayer_pat == RKISP1_RAW_RGGB) {
+ bls_val->meas_r =
+ rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_A_MEASURED);
+ bls_val->meas_gr =
+ rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_B_MEASURED);
+ bls_val->meas_gb =
+ rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_C_MEASURED);
+ bls_val->meas_b =
+ rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_D_MEASURED);
+ }
+}
+
+static void
+rkisp1_stats_send_measurement(struct rkisp1_stats *stats,
+ struct rkisp1_isp_readout_work *meas_work)
+{
+ struct rkisp1_stat_buffer *cur_stat_buf;
+ struct rkisp1_buffer *cur_buf = NULL;
+ unsigned int frame_sequence =
+ atomic_read(&stats->rkisp1->isp.frame_sequence);
+ u64 timestamp = ktime_get_ns();
+
+ if (frame_sequence != meas_work->frame_id) {
+ dev_warn(stats->rkisp1->dev,
+ "Measurement late(%d, %d)\n",
+ frame_sequence, meas_work->frame_id);
+ frame_sequence = meas_work->frame_id;
+ }
+
+ mutex_lock(&stats->wq_lock);
+ /* get one empty buffer */
+ if (!list_empty(&stats->stat)) {
+ cur_buf = list_first_entry(&stats->stat,
+ struct rkisp1_buffer, queue);
+ list_del(&cur_buf->queue);
+ }
+ mutex_unlock(&stats->wq_lock);
+
+ if (!cur_buf)
+ return;
+
+ cur_stat_buf =
+ (struct rkisp1_stat_buffer *)(cur_buf->vaddr[0]);
+
+ if (meas_work->isp_ris & RKISP1_CIF_ISP_AWB_DONE) {
+ rkisp1_stats_get_awb_meas(stats, cur_stat_buf);
+ cur_stat_buf->meas_type |= RKISP1_CIF_ISP_STAT_AWB;
+ }
+
+ if (meas_work->isp_ris & RKISP1_CIF_ISP_AFM_FIN) {
+ rkisp1_stats_get_afc_meas(stats, cur_stat_buf);
+ cur_stat_buf->meas_type |= RKISP1_CIF_ISP_STAT_AFM_FIN;
+ }
+
+ if (meas_work->isp_ris & RKISP1_CIF_ISP_EXP_END) {
+ rkisp1_stats_get_aec_meas(stats, cur_stat_buf);
+ rkisp1_stats_get_bls_meas(stats, cur_stat_buf);
+ cur_stat_buf->meas_type |= RKISP1_CIF_ISP_STAT_AUTOEXP;
+ }
+
+ if (meas_work->isp_ris & RKISP1_CIF_ISP_HIST_MEASURE_RDY) {
+ rkisp1_stats_get_hst_meas(stats, cur_stat_buf);
+ cur_stat_buf->meas_type |= RKISP1_CIF_ISP_STAT_HIST;
+ }
+
+ vb2_set_plane_payload(&cur_buf->vb.vb2_buf, 0,
+ sizeof(struct rkisp1_stat_buffer));
+ cur_buf->vb.sequence = frame_sequence;
+ cur_buf->vb.vb2_buf.timestamp = timestamp;
+ vb2_buffer_done(&cur_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+}
+
+static void rkisp1_stats_readout_work(struct work_struct *work)
+{
+ struct rkisp1_isp_readout_work *readout_work =
+ container_of(work, struct rkisp1_isp_readout_work, work);
+ struct rkisp1_stats *stats = readout_work->stats;
+
+ if (readout_work->readout == RKISP1_ISP_READOUT_MEAS)
+ rkisp1_stats_send_measurement(stats, readout_work);
+
+ kfree(readout_work);
+}
+
+void rkisp1_stats_isr(struct rkisp1_stats *stats, u32 isp_ris)
+{
+ unsigned int frame_sequence =
+ atomic_read(&stats->rkisp1->isp.frame_sequence);
+ struct rkisp1_device *rkisp1 = stats->rkisp1;
+ struct rkisp1_isp_readout_work *work;
+ unsigned int isp_mis_tmp = 0;
+ u32 val;
+
+ spin_lock(&stats->irq_lock);
+
+ val = RKISP1_CIF_ISP_AWB_DONE | RKISP1_CIF_ISP_AFM_FIN |
+ RKISP1_CIF_ISP_EXP_END | RKISP1_CIF_ISP_HIST_MEASURE_RDY;
+ rkisp1_write(rkisp1, val, RKISP1_CIF_ISP_ICR);
+
+ isp_mis_tmp = rkisp1_read(rkisp1, RKISP1_CIF_ISP_MIS);
+ if (isp_mis_tmp &
+ (RKISP1_CIF_ISP_AWB_DONE | RKISP1_CIF_ISP_AFM_FIN |
+ RKISP1_CIF_ISP_EXP_END | RKISP1_CIF_ISP_HIST_MEASURE_RDY))
+ rkisp1->debug.stats_error++;
+
+ if (!stats->is_streaming)
+ goto unlock;
+ if (isp_ris & (RKISP1_CIF_ISP_AWB_DONE |
+ RKISP1_CIF_ISP_AFM_FIN |
+ RKISP1_CIF_ISP_EXP_END |
+ RKISP1_CIF_ISP_HIST_MEASURE_RDY)) {
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (work) {
+ INIT_WORK(&work->work,
+ rkisp1_stats_readout_work);
+ work->readout = RKISP1_ISP_READOUT_MEAS;
+ work->stats = stats;
+ work->frame_id = frame_sequence;
+ work->isp_ris = isp_ris;
+ if (!queue_work(stats->readout_wq,
+ &work->work))
+ kfree(work);
+ } else {
+ dev_err(stats->rkisp1->dev,
+ "Could not allocate work\n");
+ }
+ }
+
+unlock:
+ spin_unlock(&stats->irq_lock);
+}
+
+static void rkisp1_init_stats(struct rkisp1_stats *stats)
+{
+ stats->vdev_fmt.fmt.meta.dataformat =
+ V4L2_META_FMT_RK_ISP1_STAT_3A;
+ stats->vdev_fmt.fmt.meta.buffersize =
+ sizeof(struct rkisp1_stat_buffer);
+}
+
+int rkisp1_stats_register(struct rkisp1_stats *stats,
+ struct v4l2_device *v4l2_dev,
+ struct rkisp1_device *rkisp1)
+{
+ struct rkisp1_vdev_node *node = &stats->vnode;
+ struct video_device *vdev = &node->vdev;
+ int ret;
+
+ stats->rkisp1 = rkisp1;
+ mutex_init(&stats->wq_lock);
+ mutex_init(&node->vlock);
+ INIT_LIST_HEAD(&stats->stat);
+ spin_lock_init(&stats->irq_lock);
+
+ strscpy(vdev->name, RKISP1_STATS_DEV_NAME, sizeof(vdev->name));
+
+ video_set_drvdata(vdev, stats);
+ vdev->ioctl_ops = &rkisp1_stats_ioctl;
+ vdev->fops = &rkisp1_stats_fops;
+ vdev->release = video_device_release_empty;
+ vdev->lock = &node->vlock;
+ vdev->v4l2_dev = v4l2_dev;
+ vdev->queue = &node->buf_queue;
+ vdev->device_caps = V4L2_CAP_META_CAPTURE | V4L2_CAP_STREAMING;
+ vdev->vfl_dir = VFL_DIR_RX;
+ rkisp1_stats_init_vb2_queue(vdev->queue, stats);
+ rkisp1_init_stats(stats);
+ video_set_drvdata(vdev, stats);
+
+ node->pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vdev->entity, 1, &node->pad);
+ if (ret)
+ goto err_release_queue;
+
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ dev_err(&vdev->dev,
+ "failed to register %s, ret=%d\n", vdev->name, ret);
+ goto err_cleanup_media_entity;
+ }
+
+ stats->readout_wq = alloc_workqueue("measurement_queue",
+ WQ_UNBOUND | WQ_MEM_RECLAIM,
+ 1);
+
+ if (!stats->readout_wq) {
+ ret = -ENOMEM;
+ goto err_unreg_vdev;
+ }
+
+ return 0;
+
+err_unreg_vdev:
+ video_unregister_device(vdev);
+err_cleanup_media_entity:
+ media_entity_cleanup(&vdev->entity);
+err_release_queue:
+ vb2_queue_release(vdev->queue);
+ mutex_destroy(&node->vlock);
+ mutex_destroy(&stats->wq_lock);
+ return ret;
+}
+
+void rkisp1_stats_unregister(struct rkisp1_stats *stats)
+{
+ struct rkisp1_vdev_node *node = &stats->vnode;
+ struct video_device *vdev = &node->vdev;
+
+ destroy_workqueue(stats->readout_wq);
+ video_unregister_device(vdev);
+ media_entity_cleanup(&vdev->entity);
+ vb2_queue_release(vdev->queue);
+ mutex_destroy(&node->vlock);
+ mutex_destroy(&stats->wq_lock);
+}
diff --git a/drivers/staging/media/rkisp1/uapi/rkisp1-config.h b/drivers/staging/media/rkisp1/uapi/rkisp1-config.h
new file mode 100644
index 000000000000..ca0d031b14ac
--- /dev/null
+++ b/drivers/staging/media/rkisp1/uapi/rkisp1-config.h
@@ -0,0 +1,819 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Rockchip ISP1 userspace API
+ * Copyright (C) 2017 Rockchip Electronics Co., Ltd.
+ */
+
+/*
+ * TODO: Improve documentation, mostly regarding abbreviation and hardware
+ * specificities. Reference: "REF_01 - ISP_user_manual, Rev 2.57" (not public)
+ */
+
+#ifndef _UAPI_RKISP1_CONFIG_H
+#define _UAPI_RKISP1_CONFIG_H
+
+#include <linux/types.h>
+
+/* Vendor specific - used for RK_ISP1 camera sub-system */
+#define V4L2_META_FMT_RK_ISP1_PARAMS v4l2_fourcc('R', 'K', '1', 'P') /* Rockchip ISP1 params */
+#define V4L2_META_FMT_RK_ISP1_STAT_3A v4l2_fourcc('R', 'K', '1', 'S') /* Rockchip ISP1 3A statistics */
+
+#define RKISP1_CIF_ISP_MODULE_DPCC BIT(0)
+#define RKISP1_CIF_ISP_MODULE_BLS BIT(1)
+#define RKISP1_CIF_ISP_MODULE_SDG BIT(2)
+#define RKISP1_CIF_ISP_MODULE_HST BIT(3)
+#define RKISP1_CIF_ISP_MODULE_LSC BIT(4)
+#define RKISP1_CIF_ISP_MODULE_AWB_GAIN BIT(5)
+#define RKISP1_CIF_ISP_MODULE_FLT BIT(6)
+#define RKISP1_CIF_ISP_MODULE_BDM BIT(7)
+#define RKISP1_CIF_ISP_MODULE_CTK BIT(8)
+#define RKISP1_CIF_ISP_MODULE_GOC BIT(9)
+#define RKISP1_CIF_ISP_MODULE_CPROC BIT(10)
+#define RKISP1_CIF_ISP_MODULE_AFC BIT(11)
+#define RKISP1_CIF_ISP_MODULE_AWB BIT(12)
+#define RKISP1_CIF_ISP_MODULE_IE BIT(13)
+#define RKISP1_CIF_ISP_MODULE_AEC BIT(14)
+#define RKISP1_CIF_ISP_MODULE_WDR BIT(15)
+#define RKISP1_CIF_ISP_MODULE_DPF BIT(16)
+#define RKISP1_CIF_ISP_MODULE_DPF_STRENGTH BIT(17)
+
+#define RKISP1_CIF_ISP_CTK_COEFF_MAX 0x100
+#define RKISP1_CIF_ISP_CTK_OFFSET_MAX 0x800
+
+#define RKISP1_CIF_ISP_AE_MEAN_MAX 25
+#define RKISP1_CIF_ISP_HIST_BIN_N_MAX 16
+#define RKISP1_CIF_ISP_AFM_MAX_WINDOWS 3
+#define RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE 17
+
+#define RKISP1_CIF_ISP_BDM_MAX_TH 0xff
+
+/*
+ * Black level compensation
+ */
+/* maximum value for horizontal start address */
+#define RKISP1_CIF_ISP_BLS_START_H_MAX 0x00000fff
+/* maximum value for horizontal stop address */
+#define RKISP1_CIF_ISP_BLS_STOP_H_MAX 0x00000fff
+/* maximum value for vertical start address */
+#define RKISP1_CIF_ISP_BLS_START_V_MAX 0x00000fff
+/* maximum value for vertical stop address */
+#define RKISP1_CIF_ISP_BLS_STOP_V_MAX 0x00000fff
+/* maximum is 2^18 = 262144*/
+#define RKISP1_CIF_ISP_BLS_SAMPLES_MAX 0x00000012
+/* maximum value for fixed black level */
+#define RKISP1_CIF_ISP_BLS_FIX_SUB_MAX 0x00000fff
+/* minimum value for fixed black level */
+#define RKISP1_CIF_ISP_BLS_FIX_SUB_MIN 0xfffff000
+/* 13 bit range (signed)*/
+#define RKISP1_CIF_ISP_BLS_FIX_MASK 0x00001fff
+
+/*
+ * Automatic white balance measurments
+ */
+#define RKISP1_CIF_ISP_AWB_MAX_GRID 1
+#define RKISP1_CIF_ISP_AWB_MAX_FRAMES 7
+
+/*
+ * Gamma out
+ */
+/* Maximum number of color samples supported */
+#define RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES 17
+
+/*
+ * Lens shade correction
+ */
+#define RKISP1_CIF_ISP_LSC_GRAD_TBL_SIZE 8
+#define RKISP1_CIF_ISP_LSC_SIZE_TBL_SIZE 8
+/*
+ * The following matches the tuning process,
+ * not the max capabilities of the chip.
+ * Last value unused.
+ */
+#define RKISP1_CIF_ISP_LSC_DATA_TBL_SIZE 290
+
+/*
+ * Histogram calculation
+ */
+/* Last 3 values unused. */
+#define RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE 28
+
+/*
+ * Defect Pixel Cluster Correction
+ */
+#define RKISP1_CIF_ISP_DPCC_METHODS_MAX 3
+
+/*
+ * Denoising pre filter
+ */
+#define RKISP1_CIF_ISP_DPF_MAX_NLF_COEFFS 17
+#define RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS 6
+
+/*
+ * Measurement types
+ */
+#define RKISP1_CIF_ISP_STAT_AWB BIT(0)
+#define RKISP1_CIF_ISP_STAT_AUTOEXP BIT(1)
+#define RKISP1_CIF_ISP_STAT_AFM_FIN BIT(2)
+#define RKISP1_CIF_ISP_STAT_HIST BIT(3)
+
+enum rkisp1_cif_isp_histogram_mode {
+ RKISP1_CIF_ISP_HISTOGRAM_MODE_DISABLE,
+ RKISP1_CIF_ISP_HISTOGRAM_MODE_RGB_COMBINED,
+ RKISP1_CIF_ISP_HISTOGRAM_MODE_R_HISTOGRAM,
+ RKISP1_CIF_ISP_HISTOGRAM_MODE_G_HISTOGRAM,
+ RKISP1_CIF_ISP_HISTOGRAM_MODE_B_HISTOGRAM,
+ RKISP1_CIF_ISP_HISTOGRAM_MODE_Y_HISTOGRAM
+};
+
+enum rkisp1_cif_isp_awb_mode_type {
+ RKISP1_CIF_ISP_AWB_MODE_MANUAL,
+ RKISP1_CIF_ISP_AWB_MODE_RGB,
+ RKISP1_CIF_ISP_AWB_MODE_YCBCR
+};
+
+enum rkisp1_cif_isp_flt_mode {
+ RKISP1_CIF_ISP_FLT_STATIC_MODE,
+ RKISP1_CIF_ISP_FLT_DYNAMIC_MODE
+};
+
+/**
+ * enum rkisp1_cif_isp_exp_ctrl_autostop - stop modes
+ * @RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP_0: continuous measurement
+ * @RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP_1: stop measuring after a complete frame
+ */
+enum rkisp1_cif_isp_exp_ctrl_autostop {
+ RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP_0 = 0,
+ RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP_1 = 1,
+};
+
+/**
+ * enum rkisp1_cif_isp_exp_meas_mode - Exposure measure mode
+ * @RKISP1_CIF_ISP_EXP_MEASURING_MODE_0: Y = 16 + 0.25R + 0.5G + 0.1094B
+ * @RKISP1_CIF_ISP_EXP_MEASURING_MODE_1: Y = (R + G + B) x (85/256)
+ */
+enum rkisp1_cif_isp_exp_meas_mode {
+ RKISP1_CIF_ISP_EXP_MEASURING_MODE_0,
+ RKISP1_CIF_ISP_EXP_MEASURING_MODE_1,
+};
+
+/*---------- PART1: Input Parameters ------------*/
+
+struct rkisp1_cif_isp_window {
+ __u16 h_offs;
+ __u16 v_offs;
+ __u16 h_size;
+ __u16 v_size;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_bls_fixed_val - BLS fixed subtraction values
+ *
+ * The values will be subtracted from the sensor
+ * values. Therefore a negative value means addition instead of subtraction!
+ *
+ * @r: Fixed (signed!) subtraction value for Bayer pattern R
+ * @gr: Fixed (signed!) subtraction value for Bayer pattern Gr
+ * @gb: Fixed (signed!) subtraction value for Bayer pattern Gb
+ * @b: Fixed (signed!) subtraction value for Bayer pattern B
+ */
+struct rkisp1_cif_isp_bls_fixed_val {
+ __s16 r;
+ __s16 gr;
+ __s16 gb;
+ __s16 b;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_bls_config - Configuration used by black level subtraction
+ *
+ * @enable_auto: Automatic mode activated means that the measured values
+ * are subtracted. Otherwise the fixed subtraction
+ * values will be subtracted.
+ * @en_windows: enabled window
+ * @bls_window1: Measurement window 1 size
+ * @bls_window2: Measurement window 2 size
+ * @bls_samples: Set amount of measured pixels for each Bayer position
+ * (A, B,C and D) to 2^bls_samples.
+ * @fixed_val: Fixed subtraction values
+ */
+struct rkisp1_cif_isp_bls_config {
+ __u8 enable_auto;
+ __u8 en_windows;
+ struct rkisp1_cif_isp_window bls_window1;
+ struct rkisp1_cif_isp_window bls_window2;
+ __u8 bls_samples;
+ struct rkisp1_cif_isp_bls_fixed_val fixed_val;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_dpcc_methods_config - Methods Configuration used by DPCC
+ *
+ * Methods Configuration used by Defect Pixel Cluster Correction
+ *
+ * @method: Method enable bits
+ * @line_thresh: Line threshold
+ * @line_mad_fac: Line MAD factor
+ * @pg_fac: Peak gradient factor
+ * @rnd_thresh: Rank Neighbor Difference threshold
+ * @rg_fac: Rank gradient factor
+ */
+struct rkisp1_cif_isp_dpcc_methods_config {
+ __u32 method;
+ __u32 line_thresh;
+ __u32 line_mad_fac;
+ __u32 pg_fac;
+ __u32 rnd_thresh;
+ __u32 rg_fac;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_dpcc_config - Configuration used by DPCC
+ *
+ * Configuration used by Defect Pixel Cluster Correction
+ *
+ * @mode: dpcc output mode
+ * @output_mode: whether use hard coded methods
+ * @set_use: stage1 methods set
+ * @methods: methods config
+ * @ro_limits: rank order limits
+ * @rnd_offs: differential rank offsets for rank neighbor difference
+ */
+struct rkisp1_cif_isp_dpcc_config {
+ __u32 mode;
+ __u32 output_mode;
+ __u32 set_use;
+ struct rkisp1_cif_isp_dpcc_methods_config methods[RKISP1_CIF_ISP_DPCC_METHODS_MAX];
+ __u32 ro_limits;
+ __u32 rnd_offs;
+} __packed;
+
+struct rkisp1_cif_isp_gamma_corr_curve {
+ __u16 gamma_y[RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE];
+} __packed;
+
+struct rkisp1_cif_isp_gamma_curve_x_axis_pnts {
+ __u32 gamma_dx0;
+ __u32 gamma_dx1;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_sdg_config - Configuration used by sensor degamma
+ *
+ * @curve_x: gamma curve point definition axis for x
+ * @xa_pnts: x increments
+ */
+struct rkisp1_cif_isp_sdg_config {
+ struct rkisp1_cif_isp_gamma_corr_curve curve_r;
+ struct rkisp1_cif_isp_gamma_corr_curve curve_g;
+ struct rkisp1_cif_isp_gamma_corr_curve curve_b;
+ struct rkisp1_cif_isp_gamma_curve_x_axis_pnts xa_pnts;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_lsc_config - Configuration used by Lens shading correction
+ *
+ * refer to REF_01 for details
+ */
+struct rkisp1_cif_isp_lsc_config {
+ __u32 r_data_tbl[RKISP1_CIF_ISP_LSC_DATA_TBL_SIZE];
+ __u32 gr_data_tbl[RKISP1_CIF_ISP_LSC_DATA_TBL_SIZE];
+ __u32 gb_data_tbl[RKISP1_CIF_ISP_LSC_DATA_TBL_SIZE];
+ __u32 b_data_tbl[RKISP1_CIF_ISP_LSC_DATA_TBL_SIZE];
+
+ __u32 x_grad_tbl[RKISP1_CIF_ISP_LSC_GRAD_TBL_SIZE];
+ __u32 y_grad_tbl[RKISP1_CIF_ISP_LSC_GRAD_TBL_SIZE];
+
+ __u32 x_size_tbl[RKISP1_CIF_ISP_LSC_SIZE_TBL_SIZE];
+ __u32 y_size_tbl[RKISP1_CIF_ISP_LSC_SIZE_TBL_SIZE];
+ __u16 config_width;
+ __u16 config_height;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_ie_config - Configuration used by image effects
+ *
+ * @eff_mat_1: 3x3 Matrix Coefficients for Emboss Effect 1
+ * @eff_mat_2: 3x3 Matrix Coefficients for Emboss Effect 2
+ * @eff_mat_3: 3x3 Matrix Coefficients for Emboss 3/Sketch 1
+ * @eff_mat_4: 3x3 Matrix Coefficients for Sketch Effect 2
+ * @eff_mat_5: 3x3 Matrix Coefficients for Sketch Effect 3
+ * @eff_tint: Chrominance increment values of tint (used for sepia effect)
+ */
+struct rkisp1_cif_isp_ie_config {
+ __u16 effect;
+ __u16 color_sel;
+ __u16 eff_mat_1;
+ __u16 eff_mat_2;
+ __u16 eff_mat_3;
+ __u16 eff_mat_4;
+ __u16 eff_mat_5;
+ __u16 eff_tint;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_cproc_config - Configuration used by Color Processing
+ *
+ * @c_out_range: Chrominance pixel clipping range at output.
+ * (0 for limit, 1 for full)
+ * @y_in_range: Luminance pixel clipping range at output.
+ * @y_out_range: Luminance pixel clipping range at output.
+ * @contrast: 00~ff, 0.0~1.992
+ * @brightness: 80~7F, -128~+127
+ * @sat: saturation, 00~FF, 0.0~1.992
+ * @hue: 80~7F, -90~+87.188
+ */
+struct rkisp1_cif_isp_cproc_config {
+ __u8 c_out_range;
+ __u8 y_in_range;
+ __u8 y_out_range;
+ __u8 contrast;
+ __u8 brightness;
+ __u8 sat;
+ __u8 hue;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_awb_meas_config - Configuration used by auto white balance
+ *
+ * @awb_wnd: white balance measurement window (in pixels)
+ * (from enum rkisp1_cif_isp_awb_mode_type)
+ * @max_y: only pixels values < max_y contribute to awb measurement, set to 0
+ * to disable this feature
+ * @min_y: only pixels values > min_y contribute to awb measurement
+ * @max_csum: Chrominance sum maximum value, only consider pixels with Cb+Cr,
+ * smaller than threshold for awb measurements
+ * @min_c: Chrominance minimum value, only consider pixels with Cb/Cr
+ * each greater than threshold value for awb measurements
+ * @frames: number of frames - 1 used for mean value calculation
+ * (ucFrames=0 means 1 Frame)
+ * @awb_ref_cr: reference Cr value for AWB regulation, target for AWB
+ * @awb_ref_cb: reference Cb value for AWB regulation, target for AWB
+ */
+struct rkisp1_cif_isp_awb_meas_config {
+ /*
+ * Note: currently the h and v offsets are mapped to grid offsets
+ */
+ struct rkisp1_cif_isp_window awb_wnd;
+ __u32 awb_mode;
+ __u8 max_y;
+ __u8 min_y;
+ __u8 max_csum;
+ __u8 min_c;
+ __u8 frames;
+ __u8 awb_ref_cr;
+ __u8 awb_ref_cb;
+ __u8 enable_ymax_cmp;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_awb_gain_config - Configuration used by auto white balance gain
+ *
+ * out_data_x = ( AWB_GEAIN_X * in_data + 128) >> 8
+ */
+struct rkisp1_cif_isp_awb_gain_config {
+ __u16 gain_red;
+ __u16 gain_green_r;
+ __u16 gain_blue;
+ __u16 gain_green_b;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_flt_config - Configuration used by ISP filtering
+ *
+ * @mode: ISP_FILT_MODE register fields (from enum rkisp1_cif_isp_flt_mode)
+ * @grn_stage1: ISP_FILT_MODE register fields
+ * @chr_h_mode: ISP_FILT_MODE register fields
+ * @chr_v_mode: ISP_FILT_MODE register fields
+ *
+ * refer to REF_01 for details.
+ */
+
+struct rkisp1_cif_isp_flt_config {
+ __u32 mode;
+ __u8 grn_stage1;
+ __u8 chr_h_mode;
+ __u8 chr_v_mode;
+ __u32 thresh_bl0;
+ __u32 thresh_bl1;
+ __u32 thresh_sh0;
+ __u32 thresh_sh1;
+ __u32 lum_weight;
+ __u32 fac_sh1;
+ __u32 fac_sh0;
+ __u32 fac_mid;
+ __u32 fac_bl0;
+ __u32 fac_bl1;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_bdm_config - Configuration used by Bayer DeMosaic
+ *
+ * @demosaic_th: threshod for bayer demosaicing texture detection
+ */
+struct rkisp1_cif_isp_bdm_config {
+ __u8 demosaic_th;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_ctk_config - Configuration used by Cross Talk correction
+ *
+ * @coeff: color correction matrix
+ * @ct_offset_b: offset for the crosstalk correction matrix
+ */
+struct rkisp1_cif_isp_ctk_config {
+ __u16 coeff0;
+ __u16 coeff1;
+ __u16 coeff2;
+ __u16 coeff3;
+ __u16 coeff4;
+ __u16 coeff5;
+ __u16 coeff6;
+ __u16 coeff7;
+ __u16 coeff8;
+ __u16 ct_offset_r;
+ __u16 ct_offset_g;
+ __u16 ct_offset_b;
+} __packed;
+
+enum rkisp1_cif_isp_goc_mode {
+ RKISP1_CIF_ISP_GOC_MODE_LOGARITHMIC,
+ RKISP1_CIF_ISP_GOC_MODE_EQUIDISTANT
+};
+
+/**
+ * struct rkisp1_cif_isp_goc_config - Configuration used by Gamma Out correction
+ *
+ * @mode: goc mode (from enum rkisp1_cif_isp_goc_mode)
+ * @gamma_y: gamma out curve y-axis for all color components
+ */
+struct rkisp1_cif_isp_goc_config {
+ __u32 mode;
+ __u16 gamma_y[RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES];
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_hst_config - Configuration used by Histogram
+ *
+ * @mode: histogram mode (from enum rkisp1_cif_isp_histogram_mode)
+ * @histogram_predivider: process every stepsize pixel, all other pixels are
+ * skipped
+ * @meas_window: coordinates of the measure window
+ * @hist_weight: weighting factor for sub-windows
+ */
+struct rkisp1_cif_isp_hst_config {
+ __u32 mode;
+ __u8 histogram_predivider;
+ struct rkisp1_cif_isp_window meas_window;
+ __u8 hist_weight[RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE];
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_aec_config - Configuration used by Auto Exposure Control
+ *
+ * @mode: Exposure measure mode (from enum rkisp1_cif_isp_exp_meas_mode)
+ * @autostop: stop mode (from enum rkisp1_cif_isp_exp_ctrl_autostop)
+ * @meas_window: coordinates of the measure window
+ */
+struct rkisp1_cif_isp_aec_config {
+ __u32 mode;
+ __u32 autostop;
+ struct rkisp1_cif_isp_window meas_window;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_afc_config - Configuration used by Auto Focus Control
+ *
+ * @num_afm_win: max RKISP1_CIF_ISP_AFM_MAX_WINDOWS
+ * @afm_win: coordinates of the meas window
+ * @thres: threshold used for minimizing the influence of noise
+ * @var_shift: the number of bits for the shift operation at the end of the
+ * calculation chain.
+ */
+struct rkisp1_cif_isp_afc_config {
+ __u8 num_afm_win;
+ struct rkisp1_cif_isp_window afm_win[RKISP1_CIF_ISP_AFM_MAX_WINDOWS];
+ __u32 thres;
+ __u32 var_shift;
+} __packed;
+
+/**
+ * enum rkisp1_cif_isp_dpf_gain_usage - dpf gain usage
+ * @RKISP1_CIF_ISP_DPF_GAIN_USAGE_DISABLED: don't use any gains in preprocessing stage
+ * @RKISP1_CIF_ISP_DPF_GAIN_USAGE_NF_GAINS: use only the noise function gains from
+ * registers DPF_NF_GAIN_R, ...
+ * @RKISP1_CIF_ISP_DPF_GAIN_USAGE_LSC_GAINS: use only the gains from LSC module
+ * @RKISP1_CIF_ISP_DPF_GAIN_USAGE_NF_LSC_GAINS: use the noise function gains and the
+ * gains from LSC module
+ * @RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_GAINS: use only the gains from AWB module
+ * @RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_LSC_GAINS: use the gains from AWB and LSC module
+ * @RKISP1_CIF_ISP_DPF_GAIN_USAGE_MAX: upper border (only for an internal evaluation)
+ */
+enum rkisp1_cif_isp_dpf_gain_usage {
+ RKISP1_CIF_ISP_DPF_GAIN_USAGE_DISABLED,
+ RKISP1_CIF_ISP_DPF_GAIN_USAGE_NF_GAINS,
+ RKISP1_CIF_ISP_DPF_GAIN_USAGE_LSC_GAINS,
+ RKISP1_CIF_ISP_DPF_GAIN_USAGE_NF_LSC_GAINS,
+ RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_GAINS,
+ RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_LSC_GAINS,
+ RKISP1_CIF_ISP_DPF_GAIN_USAGE_MAX
+};
+
+/**
+ * enum rkisp1_cif_isp_dpf_rb_filtersize - Red and blue filter sizes
+ * @RKISP1_CIF_ISP_DPF_RB_FILTERSIZE_13x9: red and blue filter kernel size 13x9
+ * (means 7x5 active pixel)
+ * @RKISP1_CIF_ISP_DPF_RB_FILTERSIZE_9x9: red and blue filter kernel size 9x9
+ * (means 5x5 active pixel)
+ */
+enum rkisp1_cif_isp_dpf_rb_filtersize {
+ RKISP1_CIF_ISP_DPF_RB_FILTERSIZE_13x9,
+ RKISP1_CIF_ISP_DPF_RB_FILTERSIZE_9x9,
+};
+
+/**
+ * enum rkisp1_cif_isp_dpf_nll_scale_mode - dpf noise level scale mode
+ * @RKISP1_CIF_ISP_NLL_SCALE_LINEAR: use a linear scaling
+ * @RKISP1_CIF_ISP_NLL_SCALE_LOGARITHMIC: use a logarithmic scaling
+ */
+enum rkisp1_cif_isp_dpf_nll_scale_mode {
+ RKISP1_CIF_ISP_NLL_SCALE_LINEAR,
+ RKISP1_CIF_ISP_NLL_SCALE_LOGARITHMIC,
+};
+
+/**
+ * struct rkisp1_cif_isp_dpf_nll - Noise level lookup
+ *
+ * @coeff: Noise level Lookup coefficient
+ * @scale_mode: dpf noise level scale mode (from enum rkisp1_cif_isp_dpf_nll_scale_mode)
+ */
+struct rkisp1_cif_isp_dpf_nll {
+ __u16 coeff[RKISP1_CIF_ISP_DPF_MAX_NLF_COEFFS];
+ __u32 scale_mode;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_dpf_rb_flt - Red blue filter config
+ *
+ * @fltsize: The filter size for the red and blue pixels
+ * (from enum rkisp1_cif_isp_dpf_rb_filtersize)
+ * @spatial_coeff: Spatial weights
+ * @r_enable: enable filter processing for red pixels
+ * @b_enable: enable filter processing for blue pixels
+ */
+struct rkisp1_cif_isp_dpf_rb_flt {
+ __u32 fltsize;
+ __u8 spatial_coeff[RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS];
+ __u8 r_enable;
+ __u8 b_enable;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_dpf_g_flt - Green filter Configuration
+ *
+ * @spatial_coeff: Spatial weights
+ * @gr_enable: enable filter processing for green pixels in green/red lines
+ * @gb_enable: enable filter processing for green pixels in green/blue lines
+ */
+struct rkisp1_cif_isp_dpf_g_flt {
+ __u8 spatial_coeff[RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS];
+ __u8 gr_enable;
+ __u8 gb_enable;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_dpf_gain - Noise function Configuration
+ *
+ * @mode: dpf gain usage (from enum rkisp1_cif_isp_dpf_gain_usage)
+ * @nf_r_gain: Noise function Gain that replaces the AWB gain for red pixels
+ * @nf_b_gain: Noise function Gain that replaces the AWB gain for blue pixels
+ * @nf_gr_gain: Noise function Gain that replaces the AWB gain
+ * for green pixels in a red line
+ * @nf_gb_gain: Noise function Gain that replaces the AWB gain
+ * for green pixels in a blue line
+ */
+struct rkisp1_cif_isp_dpf_gain {
+ __u32 mode;
+ __u16 nf_r_gain;
+ __u16 nf_b_gain;
+ __u16 nf_gr_gain;
+ __u16 nf_gb_gain;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_dpf_config - Configuration used by De-noising pre-filter
+ *
+ * @gain: noise function gain
+ * @g_flt: green filter config
+ * @rb_flt: red blue filter config
+ * @nll: noise level lookup
+ */
+struct rkisp1_cif_isp_dpf_config {
+ struct rkisp1_cif_isp_dpf_gain gain;
+ struct rkisp1_cif_isp_dpf_g_flt g_flt;
+ struct rkisp1_cif_isp_dpf_rb_flt rb_flt;
+ struct rkisp1_cif_isp_dpf_nll nll;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_dpf_strength_config - strength of the filter
+ *
+ * @r: filter strength of the RED filter
+ * @g: filter strength of the GREEN filter
+ * @b: filter strength of the BLUE filter
+ */
+struct rkisp1_cif_isp_dpf_strength_config {
+ __u8 r;
+ __u8 g;
+ __u8 b;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_isp_other_cfg - Parameters for some blocks in rockchip isp1
+ *
+ * @dpcc_config: Defect Pixel Cluster Correction config
+ * @bls_config: Black Level Subtraction config
+ * @sdg_config: sensor degamma config
+ * @lsc_config: Lens Shade config
+ * @awb_gain_config: Auto White balance gain config
+ * @flt_config: filter config
+ * @bdm_config: demosaic config
+ * @ctk_config: cross talk config
+ * @goc_config: gamma out config
+ * @bls_config: black level subtraction config
+ * @dpf_config: De-noising pre-filter config
+ * @dpf_strength_config: dpf strength config
+ * @cproc_config: color process config
+ * @ie_config: image effects config
+ */
+struct rkisp1_cif_isp_isp_other_cfg {
+ struct rkisp1_cif_isp_dpcc_config dpcc_config;
+ struct rkisp1_cif_isp_bls_config bls_config;
+ struct rkisp1_cif_isp_sdg_config sdg_config;
+ struct rkisp1_cif_isp_lsc_config lsc_config;
+ struct rkisp1_cif_isp_awb_gain_config awb_gain_config;
+ struct rkisp1_cif_isp_flt_config flt_config;
+ struct rkisp1_cif_isp_bdm_config bdm_config;
+ struct rkisp1_cif_isp_ctk_config ctk_config;
+ struct rkisp1_cif_isp_goc_config goc_config;
+ struct rkisp1_cif_isp_dpf_config dpf_config;
+ struct rkisp1_cif_isp_dpf_strength_config dpf_strength_config;
+ struct rkisp1_cif_isp_cproc_config cproc_config;
+ struct rkisp1_cif_isp_ie_config ie_config;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_isp_meas_cfg - Rockchip ISP1 Measure Parameters
+ *
+ * @awb_meas_config: auto white balance config
+ * @hst_config: histogram config
+ * @aec_config: auto exposure config
+ * @afc_config: auto focus config
+ */
+struct rkisp1_cif_isp_isp_meas_cfg {
+ struct rkisp1_cif_isp_awb_meas_config awb_meas_config;
+ struct rkisp1_cif_isp_hst_config hst_config;
+ struct rkisp1_cif_isp_aec_config aec_config;
+ struct rkisp1_cif_isp_afc_config afc_config;
+} __packed;
+
+/**
+ * struct rkisp1_params_cfg - Rockchip ISP1 Input Parameters Meta Data
+ *
+ * @module_en_update: mask the enable bits of which module should be updated
+ * @module_ens: mask the enable value of each module, only update the module
+ * which correspond bit was set in module_en_update
+ * @module_cfg_update: mask the config bits of which module should be updated
+ * @meas: measurement config
+ * @others: other config
+ */
+struct rkisp1_params_cfg {
+ __u32 module_en_update;
+ __u32 module_ens;
+ __u32 module_cfg_update;
+
+ struct rkisp1_cif_isp_isp_meas_cfg meas;
+ struct rkisp1_cif_isp_isp_other_cfg others;
+} __packed;
+
+/*---------- PART2: Measurement Statistics ------------*/
+
+/**
+ * struct rkisp1_cif_isp_awb_meas - AWB measured values
+ *
+ * @cnt: White pixel count, number of "white pixels" found during last
+ * measurement
+ * @mean_y_or_g: Mean value of Y within window and frames,
+ * Green if RGB is selected.
+ * @mean_cb_or_b: Mean value of Cb within window and frames,
+ * Blue if RGB is selected.
+ * @mean_cr_or_r: Mean value of Cr within window and frames,
+ * Red if RGB is selected.
+ */
+struct rkisp1_cif_isp_awb_meas {
+ __u32 cnt;
+ __u8 mean_y_or_g;
+ __u8 mean_cb_or_b;
+ __u8 mean_cr_or_r;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_awb_stat - statistics automatic white balance data
+ *
+ * @awb_mean: Mean measured data
+ */
+struct rkisp1_cif_isp_awb_stat {
+ struct rkisp1_cif_isp_awb_meas awb_mean[RKISP1_CIF_ISP_AWB_MAX_GRID];
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_bls_meas_val - BLS measured values
+ *
+ * @meas_r: Mean measured value for Bayer pattern R
+ * @meas_gr: Mean measured value for Bayer pattern Gr
+ * @meas_gb: Mean measured value for Bayer pattern Gb
+ * @meas_b: Mean measured value for Bayer pattern B
+ */
+struct rkisp1_cif_isp_bls_meas_val {
+ __u16 meas_r;
+ __u16 meas_gr;
+ __u16 meas_gb;
+ __u16 meas_b;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_ae_stat - statistics auto exposure data
+ *
+ * @exp_mean: Mean luminance value of block xx
+ * @bls_val: BLS measured values
+ *
+ * Image is divided into 5x5 blocks.
+ */
+struct rkisp1_cif_isp_ae_stat {
+ __u8 exp_mean[RKISP1_CIF_ISP_AE_MEAN_MAX];
+ struct rkisp1_cif_isp_bls_meas_val bls_val;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_af_meas_val - AF measured values
+ *
+ * @sum: sharpness, refer to REF_01 for definition
+ * @lum: luminance, refer to REF_01 for definition
+ */
+struct rkisp1_cif_isp_af_meas_val {
+ __u32 sum;
+ __u32 lum;
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_af_stat - statistics auto focus data
+ *
+ * @window: AF measured value of window x
+ *
+ * The module measures the sharpness in 3 windows of selectable size via
+ * register settings(ISP_AFM_*_A/B/C)
+ */
+struct rkisp1_cif_isp_af_stat {
+ struct rkisp1_cif_isp_af_meas_val window[RKISP1_CIF_ISP_AFM_MAX_WINDOWS];
+} __packed;
+
+/**
+ * struct rkisp1_cif_isp_hist_stat - statistics histogram data
+ *
+ * @hist_bins: measured bin counters
+ *
+ * Measurement window divided into 25 sub-windows, set
+ * with ISP_HIST_XXX
+ */
+struct rkisp1_cif_isp_hist_stat {
+ __u16 hist_bins[RKISP1_CIF_ISP_HIST_BIN_N_MAX];
+} __packed;
+
+/**
+ * struct rkisp1_stat_buffer - Rockchip ISP1 Statistics Data
+ *
+ * @rkisp1_cif_isp_awb_stat: statistics data for automatic white balance
+ * @rkisp1_cif_isp_ae_stat: statistics data for auto exposure
+ * @rkisp1_cif_isp_af_stat: statistics data for auto focus
+ * @rkisp1_cif_isp_hist_stat: statistics histogram data
+ */
+struct rkisp1_cif_isp_stat {
+ struct rkisp1_cif_isp_awb_stat awb;
+ struct rkisp1_cif_isp_ae_stat ae;
+ struct rkisp1_cif_isp_af_stat af;
+ struct rkisp1_cif_isp_hist_stat hist;
+} __packed;
+
+/**
+ * struct rkisp1_stat_buffer - Rockchip ISP1 Statistics Meta Data
+ *
+ * @meas_type: measurement types (RKISP1_CIF_ISP_STAT_ definitions)
+ * @frame_id: frame ID for sync
+ * @params: statistics data
+ */
+struct rkisp1_stat_buffer {
+ __u32 meas_type;
+ __u32 frame_id;
+ struct rkisp1_cif_isp_stat params;
+} __packed;
+
+#endif /* _UAPI_RKISP1_CONFIG_H */
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
index 6945dc74e1d7..ce497d0197df 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
@@ -7,6 +7,7 @@
* Copyright (C) 2018 Bootlin
*/
+#include <linux/delay.h>
#include <linux/types.h>
#include <media/videobuf2-dma-contig.h>
@@ -220,6 +221,23 @@ static void cedrus_h265_pred_weight_write(struct cedrus_dev *dev,
}
}
+static void cedrus_h265_skip_bits(struct cedrus_dev *dev, int num)
+{
+ int count = 0;
+
+ while (count < num) {
+ int tmp = min(num - count, 32);
+
+ cedrus_write(dev, VE_DEC_H265_TRIGGER,
+ VE_DEC_H265_TRIGGER_FLUSH_BITS |
+ VE_DEC_H265_TRIGGER_TYPE_N_BITS(tmp));
+ while (cedrus_read(dev, VE_DEC_H265_STATUS) & VE_DEC_H265_STATUS_VLD_BUSY)
+ udelay(1);
+
+ count += tmp;
+ }
+}
+
static void cedrus_h265_setup(struct cedrus_ctx *ctx,
struct cedrus_run *run)
{
@@ -280,10 +298,9 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx,
/* Source offset and length in bits. */
- reg = slice_params->data_bit_offset;
- cedrus_write(dev, VE_DEC_H265_BITS_OFFSET, reg);
+ cedrus_write(dev, VE_DEC_H265_BITS_OFFSET, 0);
- reg = slice_params->bit_size - slice_params->data_bit_offset;
+ reg = slice_params->bit_size;
cedrus_write(dev, VE_DEC_H265_BITS_LEN, reg);
/* Source beginning and end addresses. */
@@ -316,6 +333,8 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx,
/* Initialize bitstream access. */
cedrus_write(dev, VE_DEC_H265_TRIGGER, VE_DEC_H265_TRIGGER_INIT_SWDEC);
+ cedrus_h265_skip_bits(dev, slice_params->data_bit_offset);
+
/* Bitstream parameters. */
reg = VE_DEC_H265_DEC_NAL_HDR_NAL_UNIT_TYPE(slice_params->nal_unit_type) |
@@ -332,6 +351,7 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx,
VE_DEC_H265_DEC_SPS_HDR_LOG2_DIFF_MAX_MIN_LUMA_CODING_BLOCK_SIZE(sps->log2_diff_max_min_luma_coding_block_size) |
VE_DEC_H265_DEC_SPS_HDR_LOG2_MIN_LUMA_CODING_BLOCK_SIZE_MINUS3(sps->log2_min_luma_coding_block_size_minus3) |
VE_DEC_H265_DEC_SPS_HDR_BIT_DEPTH_CHROMA_MINUS8(sps->bit_depth_chroma_minus8) |
+ VE_DEC_H265_DEC_SPS_HDR_BIT_DEPTH_LUMA_MINUS8(sps->bit_depth_luma_minus8) |
VE_DEC_H265_DEC_SPS_HDR_CHROMA_FORMAT_IDC(sps->chroma_format_idc);
reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SPS_HDR_FLAG_STRONG_INTRA_SMOOTHING_ENABLE,
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
index 7beb03d3bb39..66b152f18d17 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
@@ -424,6 +424,7 @@
#define VE_DEC_H265_TRIGGER (VE_ENGINE_DEC_H265 + 0x34)
+#define VE_DEC_H265_TRIGGER_TYPE_N_BITS(x) (((x) & 0x3f) << 8)
#define VE_DEC_H265_TRIGGER_STCD_VC1 (0x02 << 4)
#define VE_DEC_H265_TRIGGER_STCD_AVS (0x01 << 4)
#define VE_DEC_H265_TRIGGER_STCD_HEVC (0x00 << 4)
diff --git a/drivers/staging/media/tegra-vde/Kconfig b/drivers/staging/media/tegra-vde/Kconfig
index ba49ea50b8c0..0dc78afd09e0 100644
--- a/drivers/staging/media/tegra-vde/Kconfig
+++ b/drivers/staging/media/tegra-vde/Kconfig
@@ -3,7 +3,7 @@ config TEGRA_VDE
tristate "NVIDIA Tegra Video Decoder Engine driver"
depends on ARCH_TEGRA || COMPILE_TEST
select DMA_SHARED_BUFFER
- select IOMMU_IOVA if (IOMMU_SUPPORT || COMPILE_TEST)
+ select IOMMU_IOVA
select SRAM
help
Say Y here to enable support for the NVIDIA Tegra video decoder
diff --git a/drivers/staging/media/tegra-vde/vde.c b/drivers/staging/media/tegra-vde/vde.c
index 3466daddf663..e18fd48981da 100644
--- a/drivers/staging/media/tegra-vde/vde.c
+++ b/drivers/staging/media/tegra-vde/vde.c
@@ -1150,8 +1150,7 @@ static int tegra_vde_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int tegra_vde_pm_suspend(struct device *dev)
+static __maybe_unused int tegra_vde_pm_suspend(struct device *dev)
{
struct tegra_vde *vde = dev_get_drvdata(dev);
int err;
@@ -1165,7 +1164,7 @@ static int tegra_vde_pm_suspend(struct device *dev)
return 0;
}
-static int tegra_vde_pm_resume(struct device *dev)
+static __maybe_unused int tegra_vde_pm_resume(struct device *dev)
{
struct tegra_vde *vde = dev_get_drvdata(dev);
int err;
@@ -1178,7 +1177,6 @@ static int tegra_vde_pm_resume(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops tegra_vde_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_vde_runtime_suspend,
diff --git a/drivers/staging/media/tegra-vde/vde.h b/drivers/staging/media/tegra-vde/vde.h
index d369f1466bc7..5561291b0c88 100644
--- a/drivers/staging/media/tegra-vde/vde.h
+++ b/drivers/staging/media/tegra-vde/vde.h
@@ -10,11 +10,11 @@
#include <linux/completion.h>
#include <linux/dma-direction.h>
+#include <linux/iova.h>
#include <linux/list.h>
#include <linux/miscdevice.h>
#include <linux/mutex.h>
#include <linux/types.h>
-#include <linux/iova.h>
struct clk;
struct dma_buf;
diff --git a/drivers/staging/most/Makefile b/drivers/staging/most/Makefile
index 85ea5a434ced..20a99ecb37c4 100644
--- a/drivers/staging/most/Makefile
+++ b/drivers/staging/most/Makefile
@@ -2,7 +2,6 @@
obj-$(CONFIG_MOST) += most_core.o
most_core-y := core.o
most_core-y += configfs.o
-ccflags-y += -I $(srctree)/drivers/staging/
obj-$(CONFIG_MOST_CDEV) += cdev/
obj-$(CONFIG_MOST_NET) += net/
diff --git a/drivers/staging/most/cdev/Makefile b/drivers/staging/most/cdev/Makefile
index 9f4a8b8c9c27..ef90cd71994a 100644
--- a/drivers/staging/most/cdev/Makefile
+++ b/drivers/staging/most/cdev/Makefile
@@ -2,4 +2,3 @@
obj-$(CONFIG_MOST_CDEV) += most_cdev.o
most_cdev-objs := cdev.o
-ccflags-y += -I $(srctree)/drivers/staging/
diff --git a/drivers/staging/most/cdev/cdev.c b/drivers/staging/most/cdev/cdev.c
index f880147c82fd..71943d17f825 100644
--- a/drivers/staging/most/cdev/cdev.c
+++ b/drivers/staging/most/cdev/cdev.c
@@ -16,7 +16,8 @@
#include <linux/kfifo.h>
#include <linux/uaccess.h>
#include <linux/idr.h>
-#include "most/core.h"
+
+#include "../most.h"
#define CHRDEV_REGION_SIZE 50
@@ -25,7 +26,7 @@ static struct cdev_component {
struct ida minor_id;
unsigned int major;
struct class *class;
- struct core_component cc;
+ struct most_component cc;
} comp;
struct comp_channel {
diff --git a/drivers/staging/most/configfs.c b/drivers/staging/most/configfs.c
index 34a9fb53985c..9a961222f458 100644
--- a/drivers/staging/most/configfs.c
+++ b/drivers/staging/most/configfs.c
@@ -10,7 +10,10 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/configfs.h>
-#include <most/core.h>
+
+#include "most.h"
+
+#define MAX_STRING_SIZE 80
struct mdev_link {
struct config_item item;
@@ -22,13 +25,13 @@ struct mdev_link {
u16 subbuffer_size;
u16 packets_per_xact;
u16 dbr_size;
- char datatype[PAGE_SIZE];
- char direction[PAGE_SIZE];
- char name[PAGE_SIZE];
- char device[PAGE_SIZE];
- char channel[PAGE_SIZE];
- char comp[PAGE_SIZE];
- char comp_params[PAGE_SIZE];
+ char datatype[MAX_STRING_SIZE];
+ char direction[MAX_STRING_SIZE];
+ char name[MAX_STRING_SIZE];
+ char device[MAX_STRING_SIZE];
+ char channel[MAX_STRING_SIZE];
+ char comp[MAX_STRING_SIZE];
+ char comp_params[MAX_STRING_SIZE];
};
static struct list_head mdev_link_list;
@@ -125,6 +128,8 @@ static ssize_t mdev_link_create_link_store(struct config_item *item,
return ret;
list_add_tail(&mdev_link->list, &mdev_link_list);
mdev_link->create_link = tmp;
+ mdev_link->destroy_link = false;
+
return count;
}
@@ -140,13 +145,16 @@ static ssize_t mdev_link_destroy_link_store(struct config_item *item,
return ret;
if (!tmp)
return count;
- mdev_link->destroy_link = tmp;
+
ret = most_remove_link(mdev_link->device, mdev_link->channel,
mdev_link->comp);
if (ret)
return ret;
if (!list_empty(&mdev_link_list))
list_del(&mdev_link->list);
+
+ mdev_link->destroy_link = tmp;
+
return count;
}
@@ -197,7 +205,7 @@ static ssize_t mdev_link_device_store(struct config_item *item,
{
struct mdev_link *mdev_link = to_mdev_link(item);
- strcpy(mdev_link->device, page);
+ strlcpy(mdev_link->device, page, sizeof(mdev_link->device));
strim(mdev_link->device);
return count;
}
@@ -212,7 +220,7 @@ static ssize_t mdev_link_channel_store(struct config_item *item,
{
struct mdev_link *mdev_link = to_mdev_link(item);
- strcpy(mdev_link->channel, page);
+ strlcpy(mdev_link->channel, page, sizeof(mdev_link->channel));
strim(mdev_link->channel);
return count;
}
@@ -227,7 +235,8 @@ static ssize_t mdev_link_comp_store(struct config_item *item,
{
struct mdev_link *mdev_link = to_mdev_link(item);
- strcpy(mdev_link->comp, page);
+ strlcpy(mdev_link->comp, page, sizeof(mdev_link->comp));
+ strim(mdev_link->comp);
return count;
}
@@ -242,7 +251,8 @@ static ssize_t mdev_link_comp_params_store(struct config_item *item,
{
struct mdev_link *mdev_link = to_mdev_link(item);
- strcpy(mdev_link->comp_params, page);
+ strlcpy(mdev_link->comp_params, page, sizeof(mdev_link->comp_params));
+ strim(mdev_link->comp_params);
return count;
}
@@ -373,13 +383,20 @@ static void mdev_link_release(struct config_item *item)
struct mdev_link *mdev_link = to_mdev_link(item);
int ret;
- if (!list_empty(&mdev_link_list)) {
- ret = most_remove_link(mdev_link->device, mdev_link->channel,
- mdev_link->comp);
- if (ret && (ret != -ENODEV))
- pr_err("Removing link failed.\n");
- list_del(&mdev_link->list);
+ if (mdev_link->destroy_link)
+ goto free_item;
+
+ ret = most_remove_link(mdev_link->device, mdev_link->channel,
+ mdev_link->comp);
+ if (ret) {
+ pr_err("Removing link failed.\n");
+ goto free_item;
}
+
+ if (!list_empty(&mdev_link_list))
+ list_del(&mdev_link->list);
+
+free_item:
kfree(to_mdev_link(item));
}
@@ -630,7 +647,7 @@ static struct most_sound most_sound_subsys = {
},
};
-int most_register_configfs_subsys(struct core_component *c)
+int most_register_configfs_subsys(struct most_component *c)
{
int ret;
@@ -674,7 +691,7 @@ void most_interface_register_notify(const char *mdev)
most_cfg_complete("sound");
}
-void most_deregister_configfs_subsys(struct core_component *c)
+void most_deregister_configfs_subsys(struct most_component *c)
{
if (!strcmp(c->name, "cdev"))
configfs_unregister_subsystem(&most_cdev.subsys);
diff --git a/drivers/staging/most/core.c b/drivers/staging/most/core.c
index 51a6b41d5b82..0c4ae6920d77 100644
--- a/drivers/staging/most/core.c
+++ b/drivers/staging/most/core.c
@@ -2,10 +2,9 @@
/*
* core.c - Implementation of core module of MOST Linux driver stack
*
- * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
+ * Copyright (C) 2013-2020 Microchip Technology Germany II GmbH & Co. KG
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/slab.h>
@@ -21,25 +20,18 @@
#include <linux/kthread.h>
#include <linux/dma-mapping.h>
#include <linux/idr.h>
-#include <most/core.h>
+
+#include "most.h"
#define MAX_CHANNELS 64
#define STRING_SIZE 80
static struct ida mdev_id;
static int dummy_num_buffers;
-
-static struct mostcore {
- struct device dev;
- struct device_driver drv;
- struct bus_type bus;
- struct list_head comp_list;
-} mc;
-
-#define to_driver(d) container_of(d, struct mostcore, drv)
+static struct list_head comp_list;
struct pipe {
- struct core_component *comp;
+ struct most_component *comp;
int refs;
int num_buffers;
};
@@ -151,7 +143,7 @@ static void flush_channel_fifos(struct most_channel *c)
spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo))))
- pr_info("WARN: fifo | trash fifo not empty\n");
+ dev_warn(&c->dev, "Channel or trash fifo not empty\n");
}
/**
@@ -402,7 +394,7 @@ static ssize_t description_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct most_interface *iface = to_most_interface(dev);
+ struct most_interface *iface = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%s\n", iface->description);
}
@@ -411,7 +403,7 @@ static ssize_t interface_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct most_interface *iface = to_most_interface(dev);
+ struct most_interface *iface = dev_get_drvdata(dev);
switch (iface->interface) {
case ITYPE_LOOPBACK:
@@ -454,11 +446,11 @@ static const struct attribute_group *interface_attr_groups[] = {
NULL,
};
-static struct core_component *match_component(char *name)
+static struct most_component *match_component(char *name)
{
- struct core_component *comp;
+ struct most_component *comp;
- list_for_each_entry(comp, &mc.comp_list, list) {
+ list_for_each_entry(comp, &comp_list, list) {
if (!strcmp(comp->name, name))
return comp;
}
@@ -476,7 +468,7 @@ static int print_links(struct device *dev, void *data)
int offs = d->offs;
char *buf = d->buf;
struct most_channel *c;
- struct most_interface *iface = to_most_interface(dev);
+ struct most_interface *iface = dev_get_drvdata(dev);
list_for_each_entry(c, &iface->p->channel_list, list) {
if (c->pipe0.comp) {
@@ -484,7 +476,7 @@ static int print_links(struct device *dev, void *data)
PAGE_SIZE - offs,
"%s:%s:%s\n",
c->pipe0.comp->name,
- dev_name(&iface->dev),
+ dev_name(iface->dev),
dev_name(&c->dev));
}
if (c->pipe1.comp) {
@@ -492,7 +484,7 @@ static int print_links(struct device *dev, void *data)
PAGE_SIZE - offs,
"%s:%s:%s\n",
c->pipe1.comp->name,
- dev_name(&iface->dev),
+ dev_name(iface->dev),
dev_name(&c->dev));
}
}
@@ -500,20 +492,33 @@ static int print_links(struct device *dev, void *data)
return 0;
}
+static int most_match(struct device *dev, struct device_driver *drv)
+{
+ if (!strcmp(dev_name(dev), "most"))
+ return 0;
+ else
+ return 1;
+}
+
+static struct bus_type mostbus = {
+ .name = "most",
+ .match = most_match,
+};
+
static ssize_t links_show(struct device_driver *drv, char *buf)
{
struct show_links_data d = { .buf = buf };
- bus_for_each_dev(&mc.bus, NULL, &d, print_links);
+ bus_for_each_dev(&mostbus, NULL, &d, print_links);
return d.offs;
}
static ssize_t components_show(struct device_driver *drv, char *buf)
{
- struct core_component *comp;
+ struct most_component *comp;
int offs = 0;
- list_for_each_entry(comp, &mc.comp_list, list) {
+ list_for_each_entry(comp, &comp_list, list) {
offs += snprintf(buf + offs, PAGE_SIZE - offs, "%s\n",
comp->name);
}
@@ -531,10 +536,11 @@ static struct most_channel *get_channel(char *mdev, char *mdev_ch)
struct most_interface *iface;
struct most_channel *c, *tmp;
- dev = bus_find_device_by_name(&mc.bus, NULL, mdev);
+ dev = bus_find_device_by_name(&mostbus, NULL, mdev);
if (!dev)
return NULL;
- iface = to_most_interface(dev);
+ put_device(dev);
+ iface = dev_get_drvdata(dev);
list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) {
if (!strcmp(dev_name(&c->dev), mdev_ch))
return c;
@@ -544,12 +550,12 @@ static struct most_channel *get_channel(char *mdev, char *mdev_ch)
static
inline int link_channel_to_component(struct most_channel *c,
- struct core_component *comp,
+ struct most_component *comp,
char *name,
char *comp_param)
{
int ret;
- struct core_component **comp_ptr;
+ struct most_component **comp_ptr;
if (!c->pipe0.comp)
comp_ptr = &c->pipe0.comp;
@@ -623,7 +629,7 @@ int most_set_cfg_datatype(char *mdev, char *mdev_ch, char *buf)
}
if (i == ARRAY_SIZE(ch_data_type))
- pr_info("WARN: invalid attribute settings\n");
+ dev_warn(&c->dev, "Invalid attribute settings\n");
return 0;
}
@@ -642,7 +648,7 @@ int most_set_cfg_direction(char *mdev, char *mdev_ch, char *buf)
} else if (!strcmp(buf, "tx")) {
c->cfg.direction = MOST_CH_TX;
} else {
- pr_info("Invalid direction\n");
+ dev_err(&c->dev, "Invalid direction\n");
return -ENODATA;
}
return 0;
@@ -660,7 +666,7 @@ int most_set_cfg_packets_xact(char *mdev, char *mdev_ch, u16 val)
int most_cfg_complete(char *comp_name)
{
- struct core_component *comp;
+ struct most_component *comp;
comp = match_component(comp_name);
if (!comp)
@@ -673,7 +679,7 @@ int most_add_link(char *mdev, char *mdev_ch, char *comp_name, char *link_name,
char *comp_param)
{
struct most_channel *c = get_channel(mdev, mdev_ch);
- struct core_component *comp = match_component(comp_name);
+ struct most_component *comp = match_component(comp_name);
if (!c || !comp)
return -ENODEV;
@@ -684,7 +690,7 @@ int most_add_link(char *mdev, char *mdev_ch, char *comp_name, char *link_name,
int most_remove_link(char *mdev, char *mdev_ch, char *comp_name)
{
struct most_channel *c;
- struct core_component *comp;
+ struct most_component *comp;
comp = match_component(comp_name);
if (!comp)
@@ -722,13 +728,11 @@ static const struct attribute_group *mc_attr_groups[] = {
NULL,
};
-static int most_match(struct device *dev, struct device_driver *drv)
-{
- if (!strcmp(dev_name(dev), "most"))
- return 0;
- else
- return 1;
-}
+static struct device_driver mostbus_driver = {
+ .name = "most_core",
+ .bus = &mostbus,
+ .groups = mc_attr_groups,
+};
static inline void trash_mbo(struct mbo *mbo)
{
@@ -795,7 +799,7 @@ static int hdm_enqueue_thread(void *data)
mutex_unlock(&c->nq_mutex);
if (unlikely(ret)) {
- pr_err("hdm enqueue failed\n");
+ dev_err(&c->dev, "Buffer enqueue failed\n");
nq_hdm_mbo(mbo);
c->hdm_enqueue_task = NULL;
return 0;
@@ -922,7 +926,7 @@ flush_fifos:
void most_submit_mbo(struct mbo *mbo)
{
if (WARN_ONCE(!mbo || !mbo->context,
- "bad mbo or missing channel reference\n"))
+ "Bad buffer or missing channel reference\n"))
return;
nq_hdm_mbo(mbo);
@@ -941,8 +945,6 @@ static void most_write_completion(struct mbo *mbo)
struct most_channel *c;
c = mbo->context;
- if (mbo->status == MBO_E_INVAL)
- pr_info("WARN: Tx MBO status: invalid\n");
if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE)))
trash_mbo(mbo);
else
@@ -950,7 +952,7 @@ static void most_write_completion(struct mbo *mbo)
}
int channel_has_mbo(struct most_interface *iface, int id,
- struct core_component *comp)
+ struct most_component *comp)
{
struct most_channel *c = iface->p->channel[id];
unsigned long flags;
@@ -981,7 +983,7 @@ EXPORT_SYMBOL_GPL(channel_has_mbo);
* Returns a pointer to MBO on success or NULL otherwise.
*/
struct mbo *most_get_mbo(struct most_interface *iface, int id,
- struct core_component *comp)
+ struct most_component *comp)
{
struct mbo *mbo;
struct most_channel *c;
@@ -1087,7 +1089,7 @@ static void most_read_completion(struct mbo *mbo)
* Returns 0 on success or error code otherwise.
*/
int most_start_channel(struct most_interface *iface, int id,
- struct core_component *comp)
+ struct most_component *comp)
{
int num_buffer;
int ret;
@@ -1101,14 +1103,14 @@ int most_start_channel(struct most_interface *iface, int id,
goto out; /* already started by another component */
if (!try_module_get(iface->mod)) {
- pr_info("failed to acquire HDM lock\n");
+ dev_err(&c->dev, "Failed to acquire HDM lock\n");
mutex_unlock(&c->start_mutex);
return -ENOLCK;
}
c->cfg.extra_len = 0;
if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) {
- pr_info("channel configuration failed. Go check settings...\n");
+ dev_err(&c->dev, "Channel configuration failed. Go check settings...\n");
ret = -EINVAL;
goto err_put_module;
}
@@ -1157,7 +1159,7 @@ EXPORT_SYMBOL_GPL(most_start_channel);
* @comp: driver component
*/
int most_stop_channel(struct most_interface *iface, int id,
- struct core_component *comp)
+ struct most_component *comp)
{
struct most_channel *c;
@@ -1182,8 +1184,8 @@ int most_stop_channel(struct most_interface *iface, int id,
c->is_poisoned = true;
if (c->iface->poison_channel(c->iface, c->channel_id)) {
- pr_err("Cannot stop channel %d of mdev %s\n", c->channel_id,
- c->iface->description);
+ dev_err(&c->dev, "Failed to stop channel %d of interface %s\n", c->channel_id,
+ c->iface->description);
mutex_unlock(&c->start_mutex);
return -EAGAIN;
}
@@ -1192,7 +1194,7 @@ int most_stop_channel(struct most_interface *iface, int id,
#ifdef CMPL_INTERRUPTIBLE
if (wait_for_completion_interruptible(&c->cleanup)) {
- pr_info("Interrupted while clean up ch %d\n", c->channel_id);
+ dev_err(&c->dev, "Interrupted while cleaning up channel %d\n", c->channel_id);
mutex_unlock(&c->start_mutex);
return -EINTR;
}
@@ -1215,14 +1217,13 @@ EXPORT_SYMBOL_GPL(most_stop_channel);
* most_register_component - registers a driver component with the core
* @comp: driver component
*/
-int most_register_component(struct core_component *comp)
+int most_register_component(struct most_component *comp)
{
if (!comp) {
pr_err("Bad component\n");
return -EINVAL;
}
- list_add_tail(&comp->list, &mc.comp_list);
- pr_info("registered new core component %s\n", comp->name);
+ list_add_tail(&comp->list, &comp_list);
return 0;
}
EXPORT_SYMBOL_GPL(most_register_component);
@@ -1231,9 +1232,9 @@ static int disconnect_channels(struct device *dev, void *data)
{
struct most_interface *iface;
struct most_channel *c, *tmp;
- struct core_component *comp = data;
+ struct most_component *comp = data;
- iface = to_most_interface(dev);
+ iface = dev_get_drvdata(dev);
list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) {
if (c->pipe0.comp == comp || c->pipe1.comp == comp)
comp->disconnect_channel(c->iface, c->channel_id);
@@ -1249,28 +1250,24 @@ static int disconnect_channels(struct device *dev, void *data)
* most_deregister_component - deregisters a driver component with the core
* @comp: driver component
*/
-int most_deregister_component(struct core_component *comp)
+int most_deregister_component(struct most_component *comp)
{
if (!comp) {
pr_err("Bad component\n");
return -EINVAL;
}
- bus_for_each_dev(&mc.bus, NULL, comp, disconnect_channels);
+ bus_for_each_dev(&mostbus, NULL, comp, disconnect_channels);
list_del(&comp->list);
- pr_info("deregistering component %s\n", comp->name);
return 0;
}
EXPORT_SYMBOL_GPL(most_deregister_component);
-static void release_interface(struct device *dev)
-{
- pr_info("releasing interface dev %s...\n", dev_name(dev));
-}
-
static void release_channel(struct device *dev)
{
- pr_info("releasing channel dev %s...\n", dev_name(dev));
+ struct most_channel *c = to_channel(dev);
+
+ kfree(c);
}
/**
@@ -1288,13 +1285,13 @@ int most_register_interface(struct most_interface *iface)
if (!iface || !iface->enqueue || !iface->configure ||
!iface->poison_channel || (iface->num_channels > MAX_CHANNELS)) {
- pr_err("Bad interface or channel overflow\n");
+ dev_err(iface->dev, "Bad interface or channel overflow\n");
return -EINVAL;
}
id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL);
if (id < 0) {
- pr_info("Failed to alloc mdev ID\n");
+ dev_err(iface->dev, "Failed to allocate device ID\n");
return id;
}
@@ -1307,14 +1304,13 @@ int most_register_interface(struct most_interface *iface)
INIT_LIST_HEAD(&iface->p->channel_list);
iface->p->dev_id = id;
strscpy(iface->p->name, iface->description, sizeof(iface->p->name));
- iface->dev.init_name = iface->p->name;
- iface->dev.bus = &mc.bus;
- iface->dev.parent = &mc.dev;
- iface->dev.groups = interface_attr_groups;
- iface->dev.release = release_interface;
- if (device_register(&iface->dev)) {
- pr_err("registering iface->dev failed\n");
+ iface->dev->bus = &mostbus;
+ iface->dev->groups = interface_attr_groups;
+ dev_set_drvdata(iface->dev, iface);
+ if (device_register(iface->dev)) {
+ dev_err(iface->dev, "Failed to register interface device\n");
kfree(iface->p);
+ put_device(iface->dev);
ida_simple_remove(&mdev_id, id);
return -ENOMEM;
}
@@ -1330,7 +1326,7 @@ int most_register_interface(struct most_interface *iface)
else
snprintf(c->name, STRING_SIZE, "%s", name_suffix);
c->dev.init_name = c->name;
- c->dev.parent = &iface->dev;
+ c->dev.parent = iface->dev;
c->dev.groups = channel_attr_groups;
c->dev.release = release_channel;
iface->p->channel[i] = c;
@@ -1356,26 +1352,23 @@ int most_register_interface(struct most_interface *iface)
mutex_init(&c->nq_mutex);
list_add_tail(&c->list, &iface->p->channel_list);
if (device_register(&c->dev)) {
- pr_err("registering c->dev failed\n");
+ dev_err(&c->dev, "Failed to register channel device\n");
goto err_free_most_channel;
}
}
- pr_info("registered new device mdev%d (%s)\n",
- id, iface->description);
most_interface_register_notify(iface->description);
return 0;
err_free_most_channel:
- kfree(c);
+ put_device(&c->dev);
err_free_resources:
while (i > 0) {
c = iface->p->channel[--i];
device_unregister(&c->dev);
- kfree(c);
}
kfree(iface->p);
- device_unregister(&iface->dev);
+ device_unregister(iface->dev);
ida_simple_remove(&mdev_id, id);
return -ENOMEM;
}
@@ -1393,8 +1386,6 @@ void most_deregister_interface(struct most_interface *iface)
int i;
struct most_channel *c;
- pr_info("deregistering device %s (%s)\n", dev_name(&iface->dev),
- iface->description);
for (i = 0; i < iface->num_channels; i++) {
c = iface->p->channel[i];
if (c->pipe0.comp)
@@ -1407,12 +1398,11 @@ void most_deregister_interface(struct most_interface *iface)
c->pipe1.comp = NULL;
list_del(&c->list);
device_unregister(&c->dev);
- kfree(c);
}
ida_simple_remove(&mdev_id, iface->p->dev_id);
kfree(iface->p);
- device_unregister(&iface->dev);
+ device_unregister(iface->dev);
}
EXPORT_SYMBOL_GPL(most_deregister_interface);
@@ -1462,57 +1452,35 @@ void most_resume_enqueue(struct most_interface *iface, int id)
}
EXPORT_SYMBOL_GPL(most_resume_enqueue);
-static void release_most_sub(struct device *dev)
-{
- pr_info("releasing most_subsystem\n");
-}
-
static int __init most_init(void)
{
int err;
- pr_info("init()\n");
- INIT_LIST_HEAD(&mc.comp_list);
+ INIT_LIST_HEAD(&comp_list);
ida_init(&mdev_id);
- mc.bus.name = "most",
- mc.bus.match = most_match,
- mc.drv.name = "most_core",
- mc.drv.bus = &mc.bus,
- mc.drv.groups = mc_attr_groups;
-
- err = bus_register(&mc.bus);
+ err = bus_register(&mostbus);
if (err) {
- pr_info("Cannot register most bus\n");
+ pr_err("Failed to register most bus\n");
return err;
}
- err = driver_register(&mc.drv);
+ err = driver_register(&mostbus_driver);
if (err) {
- pr_info("Cannot register core driver\n");
+ pr_err("Failed to register core driver\n");
goto err_unregister_bus;
}
- mc.dev.init_name = "most_bus";
- mc.dev.release = release_most_sub;
- if (device_register(&mc.dev)) {
- err = -ENOMEM;
- goto err_unregister_driver;
- }
configfs_init();
return 0;
-err_unregister_driver:
- driver_unregister(&mc.drv);
err_unregister_bus:
- bus_unregister(&mc.bus);
+ bus_unregister(&mostbus);
return err;
}
static void __exit most_exit(void)
{
- pr_info("exit core module\n");
- device_unregister(&mc.dev);
- driver_unregister(&mc.drv);
- bus_unregister(&mc.bus);
+ driver_unregister(&mostbus_driver);
+ bus_unregister(&mostbus);
ida_destroy(&mdev_id);
}
diff --git a/drivers/staging/most/dim2/Makefile b/drivers/staging/most/dim2/Makefile
index 116f04d69244..861adacf6c72 100644
--- a/drivers/staging/most/dim2/Makefile
+++ b/drivers/staging/most/dim2/Makefile
@@ -2,4 +2,3 @@
obj-$(CONFIG_MOST_DIM2) += most_dim2.o
most_dim2-objs := dim2.o hal.o sysfs.o
-ccflags-y += -I $(srctree)/drivers/staging/
diff --git a/drivers/staging/most/dim2/dim2.c b/drivers/staging/most/dim2/dim2.c
index 64c979155a49..16593281fcda 100644
--- a/drivers/staging/most/dim2/dim2.c
+++ b/drivers/staging/most/dim2/dim2.c
@@ -21,7 +21,7 @@
#include <linux/sched.h>
#include <linux/kthread.h>
-#include "most/core.h"
+#include "../most.h"
#include "hal.h"
#include "errors.h"
#include "sysfs.h"
@@ -854,8 +854,9 @@ static int dim2_probe(struct platform_device *pdev)
dev->most_iface.poison_channel = poison_channel;
dev->most_iface.request_netinfo = request_netinfo;
dev->most_iface.driver_dev = &pdev->dev;
+ dev->most_iface.dev = &dev->dev;
dev->dev.init_name = "dim2_state";
- dev->dev.parent = &dev->most_iface.dev;
+ dev->dev.parent = &pdev->dev;
ret = most_register_interface(&dev->most_iface);
if (ret) {
diff --git a/drivers/staging/most/i2c/Makefile b/drivers/staging/most/i2c/Makefile
index 2b3769dc19e7..71099dd0f85b 100644
--- a/drivers/staging/most/i2c/Makefile
+++ b/drivers/staging/most/i2c/Makefile
@@ -2,4 +2,3 @@
obj-$(CONFIG_MOST_I2C) += most_i2c.o
most_i2c-objs := i2c.o
-ccflags-y += -I $(srctree)/drivers/staging/
diff --git a/drivers/staging/most/i2c/i2c.c b/drivers/staging/most/i2c/i2c.c
index 4a4fc1005932..2980f7065846 100644
--- a/drivers/staging/most/i2c/i2c.c
+++ b/drivers/staging/most/i2c/i2c.c
@@ -14,7 +14,7 @@
#include <linux/interrupt.h>
#include <linux/err.h>
-#include "most/core.h"
+#include "../most.h"
enum { CH_RX, CH_TX, NUM_CHANNELS };
diff --git a/drivers/staging/most/core.h b/drivers/staging/most/most.h
index 49859aef98df..232e01b7f5d2 100644
--- a/drivers/staging/most/core.h
+++ b/drivers/staging/most/most.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* most.h - API for component and adapter drivers
*
@@ -47,7 +47,7 @@ enum most_channel_data_type {
MOST_CH_SYNC = 1 << 5,
};
-enum mbo_status_flags {
+enum most_status_flags {
/* MBO was processed successfully (data was send or received )*/
MBO_SUCCESS = 0,
/* The MBO contains wrong or missing information. */
@@ -184,7 +184,7 @@ struct mbo {
dma_addr_t bus_address;
u16 buffer_length;
u16 processed_length;
- enum mbo_status_flags status;
+ enum most_status_flags status;
void (*complete)(struct mbo *mbo);
};
@@ -229,7 +229,7 @@ struct mbo {
* @priv Private field used by mostcore to store context information.
*/
struct most_interface {
- struct device dev;
+ struct device *dev;
struct device *driver_dev;
struct module *mod;
enum most_interface_type interface;
@@ -251,10 +251,8 @@ struct most_interface {
struct interface_private *p;
};
-#define to_most_interface(d) container_of(d, struct most_interface, dev)
-
/**
- * struct core_component - identifies a loadable component for the mostcore
+ * struct most_component - identifies a loadable component for the mostcore
* @list: list_head
* @name: component name
* @probe_channel: function for core to notify driver about channel connection
@@ -262,7 +260,7 @@ struct most_interface {
* @rx_completion: completion handler for received packets
* @tx_completion: completion handler for transmitted packets
*/
-struct core_component {
+struct most_component {
struct list_head list;
const char *name;
struct module *mod;
@@ -310,20 +308,20 @@ void most_stop_enqueue(struct most_interface *iface, int channel_idx);
* in wait fifo.
*/
void most_resume_enqueue(struct most_interface *iface, int channel_idx);
-int most_register_component(struct core_component *comp);
-int most_deregister_component(struct core_component *comp);
+int most_register_component(struct most_component *comp);
+int most_deregister_component(struct most_component *comp);
struct mbo *most_get_mbo(struct most_interface *iface, int channel_idx,
- struct core_component *comp);
+ struct most_component *comp);
void most_put_mbo(struct mbo *mbo);
int channel_has_mbo(struct most_interface *iface, int channel_idx,
- struct core_component *comp);
+ struct most_component *comp);
int most_start_channel(struct most_interface *iface, int channel_idx,
- struct core_component *comp);
+ struct most_component *comp);
int most_stop_channel(struct most_interface *iface, int channel_idx,
- struct core_component *comp);
+ struct most_component *comp);
int __init configfs_init(void);
-int most_register_configfs_subsys(struct core_component *comp);
-void most_deregister_configfs_subsys(struct core_component *comp);
+int most_register_configfs_subsys(struct most_component *comp);
+void most_deregister_configfs_subsys(struct most_component *comp);
int most_add_link(char *mdev, char *mdev_ch, char *comp_name, char *link_name,
char *comp_param);
int most_remove_link(char *mdev, char *mdev_ch, char *comp_name);
diff --git a/drivers/staging/most/net/Makefile b/drivers/staging/most/net/Makefile
index f0ac64dee71b..1582c97eb204 100644
--- a/drivers/staging/most/net/Makefile
+++ b/drivers/staging/most/net/Makefile
@@ -2,4 +2,3 @@
obj-$(CONFIG_MOST_NET) += most_net.o
most_net-objs := net.o
-ccflags-y += -I $(srctree)/drivers/staging/
diff --git a/drivers/staging/most/net/net.c b/drivers/staging/most/net/net.c
index 6cab1bb8956e..5547e36e09de 100644
--- a/drivers/staging/most/net/net.c
+++ b/drivers/staging/most/net/net.c
@@ -15,7 +15,8 @@
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/kobject.h>
-#include "most/core.h"
+
+#include "../most.h"
#define MEP_HDR_LEN 8
#define MDP_HDR_LEN 16
@@ -70,7 +71,7 @@ struct net_dev_context {
static struct list_head net_devices = LIST_HEAD_INIT(net_devices);
static struct mutex probe_disc_mt; /* ch->linked = true, most_nd_open */
static DEFINE_SPINLOCK(list_lock); /* list_head, ch->linked = false, dev_hold */
-static struct core_component comp;
+static struct most_component comp;
static int skb_to_mamac(const struct sk_buff *skb, struct mbo *mbo)
{
@@ -81,6 +82,11 @@ static int skb_to_mamac(const struct sk_buff *skb, struct mbo *mbo)
unsigned int payload_len = skb->len - ETH_HLEN;
unsigned int mdp_len = payload_len + MDP_HDR_LEN;
+ if (mdp_len < skb->len) {
+ pr_err("drop: too large packet! (%u)\n", skb->len);
+ return -EINVAL;
+ }
+
if (mbo->buffer_length < mdp_len) {
pr_err("drop: too small buffer! (%d for %d)\n",
mbo->buffer_length, mdp_len);
@@ -128,6 +134,11 @@ static int skb_to_mep(const struct sk_buff *skb, struct mbo *mbo)
u8 *buff = mbo->virt_address;
unsigned int mep_len = skb->len + MEP_HDR_LEN;
+ if (mep_len < skb->len) {
+ pr_err("drop: too large packet! (%u)\n", skb->len);
+ return -EINVAL;
+ }
+
if (mbo->buffer_length < mep_len) {
pr_err("drop: too small buffer! (%d for %d)\n",
mbo->buffer_length, mep_len);
@@ -497,7 +508,7 @@ put_nd:
return ret;
}
-static struct core_component comp = {
+static struct most_component comp = {
.mod = THIS_MODULE,
.name = "net",
.probe_channel = comp_probe_channel,
diff --git a/drivers/staging/most/sound/Makefile b/drivers/staging/most/sound/Makefile
index a3d086c6ca70..f0cd9d8d213e 100644
--- a/drivers/staging/most/sound/Makefile
+++ b/drivers/staging/most/sound/Makefile
@@ -2,4 +2,3 @@
obj-$(CONFIG_MOST_SOUND) += most_sound.o
most_sound-objs := sound.o
-ccflags-y += -I $(srctree)/drivers/staging/
diff --git a/drivers/staging/most/sound/sound.c b/drivers/staging/most/sound/sound.c
index 723d0bd1cc21..44cf2334834f 100644
--- a/drivers/staging/most/sound/sound.c
+++ b/drivers/staging/most/sound/sound.c
@@ -17,12 +17,13 @@
#include <sound/pcm_params.h>
#include <linux/sched.h>
#include <linux/kthread.h>
-#include <most/core.h>
+
+#include "../most.h"
#define DRIVER_NAME "sound"
#define STRING_SIZE 80
-static struct core_component comp;
+static struct most_component comp;
/**
* struct channel - private structure to keep channel specific data
@@ -323,45 +324,6 @@ static int pcm_close(struct snd_pcm_substream *substream)
}
/**
- * pcm_hw_params - implements hw_params callback function for PCM middle layer
- * @substream: sub-stream pointer
- * @hw_params: contains the hardware parameters set by the application
- *
- * This is called when the hardware parameters is set by the application, that
- * is, once when the buffer size, the period size, the format, etc. are defined
- * for the PCM substream. Many hardware setups should be done is this callback,
- * including the allocation of buffers.
- *
- * Returns 0 on success or error code otherwise.
- */
-static int pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params)
-{
- struct channel *channel = substream->private_data;
-
- if ((params_channels(hw_params) > channel->pcm_hardware.channels_max) ||
- (params_channels(hw_params) < channel->pcm_hardware.channels_min)) {
- pr_err("Requested number of channels not supported.\n");
- return -EINVAL;
- }
- return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
-}
-
-/**
- * pcm_hw_free - implements hw_free callback function for PCM middle layer
- * @substream: substream pointer
- *
- * This is called to release the resources allocated via hw_params.
- * This function will be always called before the close callback is called.
- *
- * Returns 0 on success or error code otherwise.
- */
-static int pcm_hw_free(struct snd_pcm_substream *substream)
-{
- return snd_pcm_lib_free_pages(substream);
-}
-
-/**
* pcm_prepare - implements prepare callback function for PCM middle layer
* @substream: substream pointer
*
@@ -462,9 +424,6 @@ static snd_pcm_uframes_t pcm_pointer(struct snd_pcm_substream *substream)
static const struct snd_pcm_ops pcm_ops = {
.open = pcm_open,
.close = pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = pcm_hw_params,
- .hw_free = pcm_hw_free,
.prepare = pcm_prepare,
.trigger = pcm_trigger,
.pointer = pcm_pointer,
@@ -661,8 +620,7 @@ skip_adpt_alloc:
pcm->private_data = channel;
strscpy(pcm->name, device_name, sizeof(pcm->name));
snd_pcm_set_ops(pcm, direction, &pcm_ops);
- snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
- NULL, 0, 0);
+ snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_VMALLOC, NULL, 0, 0);
return 0;
@@ -779,9 +737,9 @@ static int audio_tx_completion(struct most_interface *iface, int channel_id)
}
/**
- * Initialization of the struct core_component
+ * Initialization of the struct most_component
*/
-static struct core_component comp = {
+static struct most_component comp = {
.mod = THIS_MODULE,
.name = DRIVER_NAME,
.probe_channel = audio_probe_channel,
diff --git a/drivers/staging/most/usb/Makefile b/drivers/staging/most/usb/Makefile
index 83cf2ead7122..c2b207339aec 100644
--- a/drivers/staging/most/usb/Makefile
+++ b/drivers/staging/most/usb/Makefile
@@ -2,4 +2,3 @@
obj-$(CONFIG_MOST_USB) += most_usb.o
most_usb-objs := usb.o
-ccflags-y += -I $(srctree)/drivers/staging/
diff --git a/drivers/staging/most/usb/usb.c b/drivers/staging/most/usb/usb.c
index 360cb5b7a10b..0bda88c4bc89 100644
--- a/drivers/staging/most/usb/usb.c
+++ b/drivers/staging/most/usb/usb.c
@@ -23,7 +23,8 @@
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/uaccess.h>
-#include "most/core.h"
+
+#include "../most.h"
#define USB_MTU 512
#define NO_ISOCHRONOUS_URB 0
@@ -101,6 +102,7 @@ struct clear_hold_work {
* @poll_work_obj: work for polling link status
*/
struct most_dev {
+ struct device dev;
struct usb_device *usb_device;
struct most_interface iface;
struct most_channel_capability *cap;
@@ -122,6 +124,7 @@ struct most_dev {
};
#define to_mdev(d) container_of(d, struct most_dev, iface)
+#define to_mdev_from_dev(d) container_of(d, struct most_dev, dev)
#define to_mdev_from_work(w) container_of(w, struct most_dev, poll_work_obj)
static void wq_clear_halt(struct work_struct *wq_obj);
@@ -1022,6 +1025,12 @@ static void release_dci(struct device *dev)
kfree(dci);
}
+static void release_mdev(struct device *dev)
+{
+ struct most_dev *mdev = to_mdev_from_dev(dev);
+
+ kfree(mdev);
+}
/**
* hdm_probe - probe function of USB device driver
* @interface: Interface of the attached USB device
@@ -1060,6 +1069,7 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
mdev->link_stat_timer.expires = jiffies + (2 * HZ);
mdev->iface.mod = hdm_usb_fops.owner;
+ mdev->iface.dev = &mdev->dev;
mdev->iface.driver_dev = &interface->dev;
mdev->iface.interface = ITYPE_USB;
mdev->iface.configure = hdm_configure_channel;
@@ -1078,6 +1088,9 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
usb_dev->config->desc.bConfigurationValue,
usb_iface_desc->desc.bInterfaceNumber);
+ mdev->dev.init_name = mdev->description;
+ mdev->dev.parent = &interface->dev;
+ mdev->dev.release = release_mdev;
mdev->conf = kcalloc(num_endpoints, sizeof(*mdev->conf), GFP_KERNEL);
if (!mdev->conf)
goto err_free_mdev;
@@ -1151,7 +1164,7 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
}
mdev->dci->dev.init_name = "dci";
- mdev->dci->dev.parent = &mdev->iface.dev;
+ mdev->dci->dev.parent = get_device(mdev->iface.dev);
mdev->dci->dev.groups = dci_attr_groups;
mdev->dci->dev.release = release_dci;
if (device_register(&mdev->dci->dev)) {
@@ -1165,7 +1178,7 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
mutex_unlock(&mdev->io_mutex);
return 0;
err_free_dci:
- kfree(mdev->dci);
+ put_device(&mdev->dci->dev);
err_free_busy_urbs:
kfree(mdev->busy_urbs);
err_free_ep_address:
@@ -1175,7 +1188,7 @@ err_free_cap:
err_free_conf:
kfree(mdev->conf);
err_free_mdev:
- kfree(mdev);
+ put_device(&mdev->dev);
err_out_of_memory:
if (ret == 0 || ret == -ENOMEM) {
ret = -ENOMEM;
@@ -1205,14 +1218,15 @@ static void hdm_disconnect(struct usb_interface *interface)
del_timer_sync(&mdev->link_stat_timer);
cancel_work_sync(&mdev->poll_work_obj);
- device_unregister(&mdev->dci->dev);
+ if (mdev->dci)
+ device_unregister(&mdev->dci->dev);
most_deregister_interface(&mdev->iface);
kfree(mdev->busy_urbs);
kfree(mdev->cap);
kfree(mdev->conf);
kfree(mdev->ep_address);
- kfree(mdev);
+ put_device(&mdev->dev);
}
static struct usb_driver hdm_usb = {
diff --git a/drivers/staging/most/video/Makefile b/drivers/staging/most/video/Makefile
index 2d857d3cbcc8..856175fec8b6 100644
--- a/drivers/staging/most/video/Makefile
+++ b/drivers/staging/most/video/Makefile
@@ -2,4 +2,3 @@
obj-$(CONFIG_MOST_VIDEO) += most_video.o
most_video-objs := video.o
-ccflags-y += -I $(srctree)/drivers/staging/
diff --git a/drivers/staging/most/video/video.c b/drivers/staging/most/video/video.c
index 10c1ef7e3a3e..d32ae49d617b 100644
--- a/drivers/staging/most/video/video.c
+++ b/drivers/staging/most/video/video.c
@@ -21,11 +21,11 @@
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fh.h>
-#include "most/core.h"
+#include "../most.h"
#define V4L2_CMP_MAX_INPUT 1
-static struct core_component comp;
+static struct most_component comp;
struct most_video_dev {
struct most_interface *iface;
@@ -527,7 +527,7 @@ static int comp_disconnect_channel(struct most_interface *iface,
return 0;
}
-static struct core_component comp = {
+static struct most_component comp = {
.mod = THIS_MODULE,
.name = "video",
.probe_channel = comp_probe_channel,
diff --git a/drivers/staging/mt7621-dts/mt7621.dtsi b/drivers/staging/mt7621-dts/mt7621.dtsi
index a4c08110094b..d89d68ffa7bc 100644
--- a/drivers/staging/mt7621-dts/mt7621.dtsi
+++ b/drivers/staging/mt7621-dts/mt7621.dtsi
@@ -138,7 +138,7 @@
memc: memc@5000 {
compatible = "mtk,mt7621-memc";
- reg = <0x300 0x100>;
+ reg = <0x5000 0x1000>;
};
cpc: cpc@1fbf0000 {
diff --git a/drivers/staging/nvec/nvec_kbd.c b/drivers/staging/nvec/nvec_kbd.c
index 01dbb66f7e9a..386d619e3ee9 100644
--- a/drivers/staging/nvec/nvec_kbd.c
+++ b/drivers/staging/nvec/nvec_kbd.c
@@ -123,6 +123,8 @@ static int nvec_kbd_probe(struct platform_device *pdev)
keycodes[j++] = extcode_tab_us102[i];
idev = devm_input_allocate_device(&pdev->dev);
+ if (!idev)
+ return -ENOMEM;
idev->name = "nvec keyboard";
idev->phys = "nvec";
idev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) | BIT_MASK(EV_LED);
diff --git a/drivers/staging/octeon-usb/Kconfig b/drivers/staging/octeon-usb/Kconfig
deleted file mode 100644
index 6a5d842ee0f2..000000000000
--- a/drivers/staging/octeon-usb/Kconfig
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-config OCTEON_USB
- tristate "Cavium Networks Octeon USB support"
- depends on CAVIUM_OCTEON_SOC && USB
- help
- This driver supports USB host controller on some Cavium
- Networks' products in the Octeon family.
-
- To compile this driver as a module, choose M here. The module
- will be called octeon-hcd.
-
diff --git a/drivers/staging/octeon-usb/Makefile b/drivers/staging/octeon-usb/Makefile
deleted file mode 100644
index 9873a0130ad5..000000000000
--- a/drivers/staging/octeon-usb/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-${CONFIG_OCTEON_USB} := octeon-hcd.o
diff --git a/drivers/staging/octeon-usb/TODO b/drivers/staging/octeon-usb/TODO
deleted file mode 100644
index 2b29acca5caa..000000000000
--- a/drivers/staging/octeon-usb/TODO
+++ /dev/null
@@ -1,8 +0,0 @@
-This driver is functional and has been tested on EdgeRouter Lite,
-D-Link DSR-1000N and EBH5600 evaluation board with USB mass storage.
-
-TODO:
- - kernel coding style
- - checkpatch warnings
-
-Contact: Aaro Koskinen <aaro.koskinen@iki.fi>
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c
deleted file mode 100644
index 582c9187559d..000000000000
--- a/drivers/staging/octeon-usb/octeon-hcd.c
+++ /dev/null
@@ -1,3737 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2008 Cavium Networks
- *
- * Some parts of the code were originally released under BSD license:
- *
- * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
- * reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Networks nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its associated
- * regulations, and may be subject to export or import regulations in other
- * countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
- * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
- * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION
- * OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
- * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
- * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
- * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
- * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
- * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
- */
-
-#include <linux/usb.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/usb/hcd.h>
-#include <linux/prefetch.h>
-#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-
-#include <asm/octeon/octeon.h>
-
-#include "octeon-hcd.h"
-
-/**
- * enum cvmx_usb_speed - the possible USB device speeds
- *
- * @CVMX_USB_SPEED_HIGH: Device is operation at 480Mbps
- * @CVMX_USB_SPEED_FULL: Device is operation at 12Mbps
- * @CVMX_USB_SPEED_LOW: Device is operation at 1.5Mbps
- */
-enum cvmx_usb_speed {
- CVMX_USB_SPEED_HIGH = 0,
- CVMX_USB_SPEED_FULL = 1,
- CVMX_USB_SPEED_LOW = 2,
-};
-
-/**
- * enum cvmx_usb_transfer - the possible USB transfer types
- *
- * @CVMX_USB_TRANSFER_CONTROL: USB transfer type control for hub and status
- * transfers
- * @CVMX_USB_TRANSFER_ISOCHRONOUS: USB transfer type isochronous for low
- * priority periodic transfers
- * @CVMX_USB_TRANSFER_BULK: USB transfer type bulk for large low priority
- * transfers
- * @CVMX_USB_TRANSFER_INTERRUPT: USB transfer type interrupt for high priority
- * periodic transfers
- */
-enum cvmx_usb_transfer {
- CVMX_USB_TRANSFER_CONTROL = 0,
- CVMX_USB_TRANSFER_ISOCHRONOUS = 1,
- CVMX_USB_TRANSFER_BULK = 2,
- CVMX_USB_TRANSFER_INTERRUPT = 3,
-};
-
-/**
- * enum cvmx_usb_direction - the transfer directions
- *
- * @CVMX_USB_DIRECTION_OUT: Data is transferring from Octeon to the device/host
- * @CVMX_USB_DIRECTION_IN: Data is transferring from the device/host to Octeon
- */
-enum cvmx_usb_direction {
- CVMX_USB_DIRECTION_OUT,
- CVMX_USB_DIRECTION_IN,
-};
-
-/**
- * enum cvmx_usb_status - possible callback function status codes
- *
- * @CVMX_USB_STATUS_OK: The transaction / operation finished without
- * any errors
- * @CVMX_USB_STATUS_SHORT: FIXME: This is currently not implemented
- * @CVMX_USB_STATUS_CANCEL: The transaction was canceled while in flight
- * by a user call to cvmx_usb_cancel
- * @CVMX_USB_STATUS_ERROR: The transaction aborted with an unexpected
- * error status
- * @CVMX_USB_STATUS_STALL: The transaction received a USB STALL response
- * from the device
- * @CVMX_USB_STATUS_XACTERR: The transaction failed with an error from the
- * device even after a number of retries
- * @CVMX_USB_STATUS_DATATGLERR: The transaction failed with a data toggle
- * error even after a number of retries
- * @CVMX_USB_STATUS_BABBLEERR: The transaction failed with a babble error
- * @CVMX_USB_STATUS_FRAMEERR: The transaction failed with a frame error
- * even after a number of retries
- */
-enum cvmx_usb_status {
- CVMX_USB_STATUS_OK,
- CVMX_USB_STATUS_SHORT,
- CVMX_USB_STATUS_CANCEL,
- CVMX_USB_STATUS_ERROR,
- CVMX_USB_STATUS_STALL,
- CVMX_USB_STATUS_XACTERR,
- CVMX_USB_STATUS_DATATGLERR,
- CVMX_USB_STATUS_BABBLEERR,
- CVMX_USB_STATUS_FRAMEERR,
-};
-
-/**
- * struct cvmx_usb_port_status - the USB port status information
- *
- * @port_enabled: 1 = Usb port is enabled, 0 = disabled
- * @port_over_current: 1 = Over current detected, 0 = Over current not
- * detected. Octeon doesn't support over current detection.
- * @port_powered: 1 = Port power is being supplied to the device, 0 =
- * power is off. Octeon doesn't support turning port power
- * off.
- * @port_speed: Current port speed.
- * @connected: 1 = A device is connected to the port, 0 = No device is
- * connected.
- * @connect_change: 1 = Device connected state changed since the last set
- * status call.
- */
-struct cvmx_usb_port_status {
- u32 reserved : 25;
- u32 port_enabled : 1;
- u32 port_over_current : 1;
- u32 port_powered : 1;
- enum cvmx_usb_speed port_speed : 2;
- u32 connected : 1;
- u32 connect_change : 1;
-};
-
-/**
- * struct cvmx_usb_iso_packet - descriptor for Isochronous packets
- *
- * @offset: This is the offset in bytes into the main buffer where this data
- * is stored.
- * @length: This is the length in bytes of the data.
- * @status: This is the status of this individual packet transfer.
- */
-struct cvmx_usb_iso_packet {
- int offset;
- int length;
- enum cvmx_usb_status status;
-};
-
-/**
- * enum cvmx_usb_initialize_flags - flags used by the initialization function
- *
- * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI: The USB port uses a 12MHz crystal
- * as clock source at USB_XO and
- * USB_XI.
- * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND: The USB port uses 12/24/48MHz 2.5V
- * board clock source at USB_XO.
- * USB_XI should be tied to GND.
- * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK: Mask for clock speed field
- * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ: Speed of reference clock or
- * crystal
- * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ: Speed of reference clock
- * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ: Speed of reference clock
- * @CVMX_USB_INITIALIZE_FLAGS_NO_DMA: Disable DMA and used polled IO for
- * data transfer use for the USB
- */
-enum cvmx_usb_initialize_flags {
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI = 1 << 0,
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND = 1 << 1,
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK = 3 << 3,
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ = 1 << 3,
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ = 2 << 3,
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ = 3 << 3,
- /* Bits 3-4 used to encode the clock frequency */
- CVMX_USB_INITIALIZE_FLAGS_NO_DMA = 1 << 5,
-};
-
-/**
- * enum cvmx_usb_pipe_flags - internal flags for a pipe.
- *
- * @CVMX_USB_PIPE_FLAGS_SCHEDULED: Used internally to determine if a pipe is
- * actively using hardware.
- * @CVMX_USB_PIPE_FLAGS_NEED_PING: Used internally to determine if a high speed
- * pipe is in the ping state.
- */
-enum cvmx_usb_pipe_flags {
- CVMX_USB_PIPE_FLAGS_SCHEDULED = 1 << 17,
- CVMX_USB_PIPE_FLAGS_NEED_PING = 1 << 18,
-};
-
-/* Maximum number of times to retry failed transactions */
-#define MAX_RETRIES 3
-
-/* Maximum number of hardware channels supported by the USB block */
-#define MAX_CHANNELS 8
-
-/*
- * The low level hardware can transfer a maximum of this number of bytes in each
- * transfer. The field is 19 bits wide
- */
-#define MAX_TRANSFER_BYTES ((1 << 19) - 1)
-
-/*
- * The low level hardware can transfer a maximum of this number of packets in
- * each transfer. The field is 10 bits wide
- */
-#define MAX_TRANSFER_PACKETS ((1 << 10) - 1)
-
-/**
- * Logical transactions may take numerous low level
- * transactions, especially when splits are concerned. This
- * enum represents all of the possible stages a transaction can
- * be in. Note that split completes are always even. This is so
- * the NAK handler can backup to the previous low level
- * transaction with a simple clearing of bit 0.
- */
-enum cvmx_usb_stage {
- CVMX_USB_STAGE_NON_CONTROL,
- CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE,
- CVMX_USB_STAGE_SETUP,
- CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE,
- CVMX_USB_STAGE_DATA,
- CVMX_USB_STAGE_DATA_SPLIT_COMPLETE,
- CVMX_USB_STAGE_STATUS,
- CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE,
-};
-
-/**
- * struct cvmx_usb_transaction - describes each pending USB transaction
- * regardless of type. These are linked together
- * to form a list of pending requests for a pipe.
- *
- * @node: List node for transactions in the pipe.
- * @type: Type of transaction, duplicated of the pipe.
- * @flags: State flags for this transaction.
- * @buffer: User's physical buffer address to read/write.
- * @buffer_length: Size of the user's buffer in bytes.
- * @control_header: For control transactions, physical address of the 8
- * byte standard header.
- * @iso_start_frame: For ISO transactions, the starting frame number.
- * @iso_number_packets: For ISO transactions, the number of packets in the
- * request.
- * @iso_packets: For ISO transactions, the sub packets in the request.
- * @actual_bytes: Actual bytes transfer for this transaction.
- * @stage: For control transactions, the current stage.
- * @urb: URB.
- */
-struct cvmx_usb_transaction {
- struct list_head node;
- enum cvmx_usb_transfer type;
- u64 buffer;
- int buffer_length;
- u64 control_header;
- int iso_start_frame;
- int iso_number_packets;
- struct cvmx_usb_iso_packet *iso_packets;
- int xfersize;
- int pktcnt;
- int retries;
- int actual_bytes;
- enum cvmx_usb_stage stage;
- struct urb *urb;
-};
-
-/**
- * struct cvmx_usb_pipe - a pipe represents a virtual connection between Octeon
- * and some USB device. It contains a list of pending
- * request to the device.
- *
- * @node: List node for pipe list
- * @next: Pipe after this one in the list
- * @transactions: List of pending transactions
- * @interval: For periodic pipes, the interval between packets in
- * frames
- * @next_tx_frame: The next frame this pipe is allowed to transmit on
- * @flags: State flags for this pipe
- * @device_speed: Speed of device connected to this pipe
- * @transfer_type: Type of transaction supported by this pipe
- * @transfer_dir: IN or OUT. Ignored for Control
- * @multi_count: Max packet in a row for the device
- * @max_packet: The device's maximum packet size in bytes
- * @device_addr: USB device address at other end of pipe
- * @endpoint_num: USB endpoint number at other end of pipe
- * @hub_device_addr: Hub address this device is connected to
- * @hub_port: Hub port this device is connected to
- * @pid_toggle: This toggles between 0/1 on every packet send to track
- * the data pid needed
- * @channel: Hardware DMA channel for this pipe
- * @split_sc_frame: The low order bits of the frame number the split
- * complete should be sent on
- */
-struct cvmx_usb_pipe {
- struct list_head node;
- struct list_head transactions;
- u64 interval;
- u64 next_tx_frame;
- enum cvmx_usb_pipe_flags flags;
- enum cvmx_usb_speed device_speed;
- enum cvmx_usb_transfer transfer_type;
- enum cvmx_usb_direction transfer_dir;
- int multi_count;
- u16 max_packet;
- u8 device_addr;
- u8 endpoint_num;
- u8 hub_device_addr;
- u8 hub_port;
- u8 pid_toggle;
- u8 channel;
- s8 split_sc_frame;
-};
-
-struct cvmx_usb_tx_fifo {
- struct {
- int channel;
- int size;
- u64 address;
- } entry[MAX_CHANNELS + 1];
- int head;
- int tail;
-};
-
-/**
- * struct octeon_hcd - the state of the USB block
- *
- * lock: Serialization lock.
- * init_flags: Flags passed to initialize.
- * index: Which USB block this is for.
- * idle_hardware_channels: Bit set for every idle hardware channel.
- * usbcx_hprt: Stored port status so we don't need to read a CSR to
- * determine splits.
- * pipe_for_channel: Map channels to pipes.
- * pipe: Storage for pipes.
- * indent: Used by debug output to indent functions.
- * port_status: Last port status used for change notification.
- * idle_pipes: List of open pipes that have no transactions.
- * active_pipes: Active pipes indexed by transfer type.
- * frame_number: Increments every SOF interrupt for time keeping.
- * active_split: Points to the current active split, or NULL.
- */
-struct octeon_hcd {
- spinlock_t lock; /* serialization lock */
- int init_flags;
- int index;
- int idle_hardware_channels;
- union cvmx_usbcx_hprt usbcx_hprt;
- struct cvmx_usb_pipe *pipe_for_channel[MAX_CHANNELS];
- int indent;
- struct cvmx_usb_port_status port_status;
- struct list_head idle_pipes;
- struct list_head active_pipes[4];
- u64 frame_number;
- struct cvmx_usb_transaction *active_split;
- struct cvmx_usb_tx_fifo periodic;
- struct cvmx_usb_tx_fifo nonperiodic;
-};
-
-/*
- * This macro logically sets a single field in a CSR. It does the sequence
- * read, modify, and write
- */
-#define USB_SET_FIELD32(address, _union, field, value) \
- do { \
- union _union c; \
- \
- c.u32 = cvmx_usb_read_csr32(usb, address); \
- c.s.field = value; \
- cvmx_usb_write_csr32(usb, address, c.u32); \
- } while (0)
-
-/* Returns the IO address to push/pop stuff data from the FIFOs */
-#define USB_FIFO_ADDRESS(channel, usb_index) \
- (CVMX_USBCX_GOTGCTL(usb_index) + ((channel) + 1) * 0x1000)
-
-/**
- * struct octeon_temp_buffer - a bounce buffer for USB transfers
- * @orig_buffer: the original buffer passed by the USB stack
- * @data: the newly allocated temporary buffer (excluding meta-data)
- *
- * Both the DMA engine and FIFO mode will always transfer full 32-bit words. If
- * the buffer is too short, we need to allocate a temporary one, and this struct
- * represents it.
- */
-struct octeon_temp_buffer {
- void *orig_buffer;
- u8 data[0];
-};
-
-static inline struct usb_hcd *octeon_to_hcd(struct octeon_hcd *p)
-{
- return container_of((void *)p, struct usb_hcd, hcd_priv);
-}
-
-/**
- * octeon_alloc_temp_buffer - allocate a temporary buffer for USB transfer
- * (if needed)
- * @urb: URB.
- * @mem_flags: Memory allocation flags.
- *
- * This function allocates a temporary bounce buffer whenever it's needed
- * due to HW limitations.
- */
-static int octeon_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags)
-{
- struct octeon_temp_buffer *temp;
-
- if (urb->num_sgs || urb->sg ||
- (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) ||
- !(urb->transfer_buffer_length % sizeof(u32)))
- return 0;
-
- temp = kmalloc(ALIGN(urb->transfer_buffer_length, sizeof(u32)) +
- sizeof(*temp), mem_flags);
- if (!temp)
- return -ENOMEM;
-
- temp->orig_buffer = urb->transfer_buffer;
- if (usb_urb_dir_out(urb))
- memcpy(temp->data, urb->transfer_buffer,
- urb->transfer_buffer_length);
- urb->transfer_buffer = temp->data;
- urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
-
- return 0;
-}
-
-/**
- * octeon_free_temp_buffer - free a temporary buffer used by USB transfers.
- * @urb: URB.
- *
- * Frees a buffer allocated by octeon_alloc_temp_buffer().
- */
-static void octeon_free_temp_buffer(struct urb *urb)
-{
- struct octeon_temp_buffer *temp;
- size_t length;
-
- if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
- return;
-
- temp = container_of(urb->transfer_buffer, struct octeon_temp_buffer,
- data);
- if (usb_urb_dir_in(urb)) {
- if (usb_pipeisoc(urb->pipe))
- length = urb->transfer_buffer_length;
- else
- length = urb->actual_length;
-
- memcpy(temp->orig_buffer, urb->transfer_buffer, length);
- }
- urb->transfer_buffer = temp->orig_buffer;
- urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
- kfree(temp);
-}
-
-/**
- * octeon_map_urb_for_dma - Octeon-specific map_urb_for_dma().
- * @hcd: USB HCD structure.
- * @urb: URB.
- * @mem_flags: Memory allocation flags.
- */
-static int octeon_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
- gfp_t mem_flags)
-{
- int ret;
-
- ret = octeon_alloc_temp_buffer(urb, mem_flags);
- if (ret)
- return ret;
-
- ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
- if (ret)
- octeon_free_temp_buffer(urb);
-
- return ret;
-}
-
-/**
- * octeon_unmap_urb_for_dma - Octeon-specific unmap_urb_for_dma()
- * @hcd: USB HCD structure.
- * @urb: URB.
- */
-static void octeon_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
-{
- usb_hcd_unmap_urb_for_dma(hcd, urb);
- octeon_free_temp_buffer(urb);
-}
-
-/**
- * Read a USB 32bit CSR. It performs the necessary address swizzle
- * for 32bit CSRs and logs the value in a readable format if
- * debugging is on.
- *
- * @usb: USB block this access is for
- * @address: 64bit address to read
- *
- * Returns: Result of the read
- */
-static inline u32 cvmx_usb_read_csr32(struct octeon_hcd *usb, u64 address)
-{
- return cvmx_read64_uint32(address ^ 4);
-}
-
-/**
- * Write a USB 32bit CSR. It performs the necessary address
- * swizzle for 32bit CSRs and logs the value in a readable format
- * if debugging is on.
- *
- * @usb: USB block this access is for
- * @address: 64bit address to write
- * @value: Value to write
- */
-static inline void cvmx_usb_write_csr32(struct octeon_hcd *usb,
- u64 address, u32 value)
-{
- cvmx_write64_uint32(address ^ 4, value);
- cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
-}
-
-/**
- * Return non zero if this pipe connects to a non HIGH speed
- * device through a high speed hub.
- *
- * @usb: USB block this access is for
- * @pipe: Pipe to check
- *
- * Returns: Non zero if we need to do split transactions
- */
-static inline int cvmx_usb_pipe_needs_split(struct octeon_hcd *usb,
- struct cvmx_usb_pipe *pipe)
-{
- return pipe->device_speed != CVMX_USB_SPEED_HIGH &&
- usb->usbcx_hprt.s.prtspd == CVMX_USB_SPEED_HIGH;
-}
-
-/**
- * Trivial utility function to return the correct PID for a pipe
- *
- * @pipe: pipe to check
- *
- * Returns: PID for pipe
- */
-static inline int cvmx_usb_get_data_pid(struct cvmx_usb_pipe *pipe)
-{
- if (pipe->pid_toggle)
- return 2; /* Data1 */
- return 0; /* Data0 */
-}
-
-/* Loops through register until txfflsh or rxfflsh become zero.*/
-static int cvmx_wait_tx_rx(struct octeon_hcd *usb, int fflsh_type)
-{
- int result;
- u64 address = CVMX_USBCX_GRSTCTL(usb->index);
- u64 done = cvmx_get_cycle() + 100 *
- (u64)octeon_get_clock_rate / 1000000;
- union cvmx_usbcx_grstctl c;
-
- while (1) {
- c.u32 = cvmx_usb_read_csr32(usb, address);
- if (fflsh_type == 0 && c.s.txfflsh == 0) {
- result = 0;
- break;
- } else if (fflsh_type == 1 && c.s.rxfflsh == 0) {
- result = 0;
- break;
- } else if (cvmx_get_cycle() > done) {
- result = -1;
- break;
- }
-
- __delay(100);
- }
- return result;
-}
-
-static void cvmx_fifo_setup(struct octeon_hcd *usb)
-{
- union cvmx_usbcx_ghwcfg3 usbcx_ghwcfg3;
- union cvmx_usbcx_gnptxfsiz npsiz;
- union cvmx_usbcx_hptxfsiz psiz;
-
- usbcx_ghwcfg3.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_GHWCFG3(usb->index));
-
- /*
- * Program the USBC_GRXFSIZ register to select the size of the receive
- * FIFO (25%).
- */
- USB_SET_FIELD32(CVMX_USBCX_GRXFSIZ(usb->index), cvmx_usbcx_grxfsiz,
- rxfdep, usbcx_ghwcfg3.s.dfifodepth / 4);
-
- /*
- * Program the USBC_GNPTXFSIZ register to select the size and the start
- * address of the non-periodic transmit FIFO for nonperiodic
- * transactions (50%).
- */
- npsiz.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index));
- npsiz.s.nptxfdep = usbcx_ghwcfg3.s.dfifodepth / 2;
- npsiz.s.nptxfstaddr = usbcx_ghwcfg3.s.dfifodepth / 4;
- cvmx_usb_write_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index), npsiz.u32);
-
- /*
- * Program the USBC_HPTXFSIZ register to select the size and start
- * address of the periodic transmit FIFO for periodic transactions
- * (25%).
- */
- psiz.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index));
- psiz.s.ptxfsize = usbcx_ghwcfg3.s.dfifodepth / 4;
- psiz.s.ptxfstaddr = 3 * usbcx_ghwcfg3.s.dfifodepth / 4;
- cvmx_usb_write_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index), psiz.u32);
-
- /* Flush all FIFOs */
- USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index),
- cvmx_usbcx_grstctl, txfnum, 0x10);
- USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index),
- cvmx_usbcx_grstctl, txfflsh, 1);
- cvmx_wait_tx_rx(usb, 0);
- USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index),
- cvmx_usbcx_grstctl, rxfflsh, 1);
- cvmx_wait_tx_rx(usb, 1);
-}
-
-/**
- * Shutdown a USB port after a call to cvmx_usb_initialize().
- * The port should be disabled with all pipes closed when this
- * function is called.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- *
- * Returns: 0 or a negative error code.
- */
-static int cvmx_usb_shutdown(struct octeon_hcd *usb)
-{
- union cvmx_usbnx_clk_ctl usbn_clk_ctl;
-
- /* Make sure all pipes are closed */
- if (!list_empty(&usb->idle_pipes) ||
- !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_ISOCHRONOUS]) ||
- !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_INTERRUPT]) ||
- !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_CONTROL]) ||
- !list_empty(&usb->active_pipes[CVMX_USB_TRANSFER_BULK]))
- return -EBUSY;
-
- /* Disable the clocks and put them in power on reset */
- usbn_clk_ctl.u64 = cvmx_read64_uint64(CVMX_USBNX_CLK_CTL(usb->index));
- usbn_clk_ctl.s.enable = 1;
- usbn_clk_ctl.s.por = 1;
- usbn_clk_ctl.s.hclk_rst = 1;
- usbn_clk_ctl.s.prst = 0;
- usbn_clk_ctl.s.hrst = 0;
- cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
- return 0;
-}
-
-/**
- * Initialize a USB port for use. This must be called before any
- * other access to the Octeon USB port is made. The port starts
- * off in the disabled state.
- *
- * @dev: Pointer to struct device for logging purposes.
- * @usb: Pointer to struct octeon_hcd.
- *
- * Returns: 0 or a negative error code.
- */
-static int cvmx_usb_initialize(struct device *dev,
- struct octeon_hcd *usb)
-{
- int channel;
- int divisor;
- int retries = 0;
- union cvmx_usbcx_hcfg usbcx_hcfg;
- union cvmx_usbnx_clk_ctl usbn_clk_ctl;
- union cvmx_usbcx_gintsts usbc_gintsts;
- union cvmx_usbcx_gahbcfg usbcx_gahbcfg;
- union cvmx_usbcx_gintmsk usbcx_gintmsk;
- union cvmx_usbcx_gusbcfg usbcx_gusbcfg;
- union cvmx_usbnx_usbp_ctl_status usbn_usbp_ctl_status;
-
-retry:
- /*
- * Power On Reset and PHY Initialization
- *
- * 1. Wait for DCOK to assert (nothing to do)
- *
- * 2a. Write USBN0/1_CLK_CTL[POR] = 1 and
- * USBN0/1_CLK_CTL[HRST,PRST,HCLK_RST] = 0
- */
- usbn_clk_ctl.u64 = cvmx_read64_uint64(CVMX_USBNX_CLK_CTL(usb->index));
- usbn_clk_ctl.s.por = 1;
- usbn_clk_ctl.s.hrst = 0;
- usbn_clk_ctl.s.prst = 0;
- usbn_clk_ctl.s.hclk_rst = 0;
- usbn_clk_ctl.s.enable = 0;
- /*
- * 2b. Select the USB reference clock/crystal parameters by writing
- * appropriate values to USBN0/1_CLK_CTL[P_C_SEL, P_RTYPE, P_COM_ON]
- */
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND) {
- /*
- * The USB port uses 12/24/48MHz 2.5V board clock
- * source at USB_XO. USB_XI should be tied to GND.
- * Most Octeon evaluation boards require this setting
- */
- if (OCTEON_IS_MODEL(OCTEON_CN3XXX) ||
- OCTEON_IS_MODEL(OCTEON_CN56XX) ||
- OCTEON_IS_MODEL(OCTEON_CN50XX))
- /* From CN56XX,CN50XX,CN31XX,CN30XX manuals */
- usbn_clk_ctl.s.p_rtype = 2; /* p_rclk=1 & p_xenbn=0 */
- else
- /* From CN52XX manual */
- usbn_clk_ctl.s.p_rtype = 1;
-
- switch (usb->init_flags &
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK) {
- case CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ:
- usbn_clk_ctl.s.p_c_sel = 0;
- break;
- case CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ:
- usbn_clk_ctl.s.p_c_sel = 1;
- break;
- case CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ:
- usbn_clk_ctl.s.p_c_sel = 2;
- break;
- }
- } else {
- /*
- * The USB port uses a 12MHz crystal as clock source
- * at USB_XO and USB_XI
- */
- if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
- /* From CN31XX,CN30XX manual */
- usbn_clk_ctl.s.p_rtype = 3; /* p_rclk=1 & p_xenbn=1 */
- else
- /* From CN56XX,CN52XX,CN50XX manuals. */
- usbn_clk_ctl.s.p_rtype = 0;
-
- usbn_clk_ctl.s.p_c_sel = 0;
- }
- /*
- * 2c. Select the HCLK via writing USBN0/1_CLK_CTL[DIVIDE, DIVIDE2] and
- * setting USBN0/1_CLK_CTL[ENABLE] = 1. Divide the core clock down
- * such that USB is as close as possible to 125Mhz
- */
- divisor = DIV_ROUND_UP(octeon_get_clock_rate(), 125000000);
- /* Lower than 4 doesn't seem to work properly */
- if (divisor < 4)
- divisor = 4;
- usbn_clk_ctl.s.divide = divisor;
- usbn_clk_ctl.s.divide2 = 0;
- cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
-
- /* 2d. Write USBN0/1_CLK_CTL[HCLK_RST] = 1 */
- usbn_clk_ctl.s.hclk_rst = 1;
- cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
- /* 2e. Wait 64 core-clock cycles for HCLK to stabilize */
- __delay(64);
- /*
- * 3. Program the power-on reset field in the USBN clock-control
- * register:
- * USBN_CLK_CTL[POR] = 0
- */
- usbn_clk_ctl.s.por = 0;
- cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
- /* 4. Wait 1 ms for PHY clock to start */
- mdelay(1);
- /*
- * 5. Program the Reset input from automatic test equipment field in the
- * USBP control and status register:
- * USBN_USBP_CTL_STATUS[ATE_RESET] = 1
- */
- usbn_usbp_ctl_status.u64 =
- cvmx_read64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index));
- usbn_usbp_ctl_status.s.ate_reset = 1;
- cvmx_write64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index),
- usbn_usbp_ctl_status.u64);
- /* 6. Wait 10 cycles */
- __delay(10);
- /*
- * 7. Clear ATE_RESET field in the USBN clock-control register:
- * USBN_USBP_CTL_STATUS[ATE_RESET] = 0
- */
- usbn_usbp_ctl_status.s.ate_reset = 0;
- cvmx_write64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index),
- usbn_usbp_ctl_status.u64);
- /*
- * 8. Program the PHY reset field in the USBN clock-control register:
- * USBN_CLK_CTL[PRST] = 1
- */
- usbn_clk_ctl.s.prst = 1;
- cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
- /*
- * 9. Program the USBP control and status register to select host or
- * device mode. USBN_USBP_CTL_STATUS[HST_MODE] = 0 for host, = 1 for
- * device
- */
- usbn_usbp_ctl_status.s.hst_mode = 0;
- cvmx_write64_uint64(CVMX_USBNX_USBP_CTL_STATUS(usb->index),
- usbn_usbp_ctl_status.u64);
- /* 10. Wait 1 us */
- udelay(1);
- /*
- * 11. Program the hreset_n field in the USBN clock-control register:
- * USBN_CLK_CTL[HRST] = 1
- */
- usbn_clk_ctl.s.hrst = 1;
- cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
- /* 12. Proceed to USB core initialization */
- usbn_clk_ctl.s.enable = 1;
- cvmx_write64_uint64(CVMX_USBNX_CLK_CTL(usb->index), usbn_clk_ctl.u64);
- udelay(1);
-
- /*
- * USB Core Initialization
- *
- * 1. Read USBC_GHWCFG1, USBC_GHWCFG2, USBC_GHWCFG3, USBC_GHWCFG4 to
- * determine USB core configuration parameters.
- *
- * Nothing needed
- *
- * 2. Program the following fields in the global AHB configuration
- * register (USBC_GAHBCFG)
- * DMA mode, USBC_GAHBCFG[DMAEn]: 1 = DMA mode, 0 = slave mode
- * Burst length, USBC_GAHBCFG[HBSTLEN] = 0
- * Nonperiodic TxFIFO empty level (slave mode only),
- * USBC_GAHBCFG[NPTXFEMPLVL]
- * Periodic TxFIFO empty level (slave mode only),
- * USBC_GAHBCFG[PTXFEMPLVL]
- * Global interrupt mask, USBC_GAHBCFG[GLBLINTRMSK] = 1
- */
- usbcx_gahbcfg.u32 = 0;
- usbcx_gahbcfg.s.dmaen = !(usb->init_flags &
- CVMX_USB_INITIALIZE_FLAGS_NO_DMA);
- usbcx_gahbcfg.s.hbstlen = 0;
- usbcx_gahbcfg.s.nptxfemplvl = 1;
- usbcx_gahbcfg.s.ptxfemplvl = 1;
- usbcx_gahbcfg.s.glblintrmsk = 1;
- cvmx_usb_write_csr32(usb, CVMX_USBCX_GAHBCFG(usb->index),
- usbcx_gahbcfg.u32);
-
- /*
- * 3. Program the following fields in USBC_GUSBCFG register.
- * HS/FS timeout calibration, USBC_GUSBCFG[TOUTCAL] = 0
- * ULPI DDR select, USBC_GUSBCFG[DDRSEL] = 0
- * USB turnaround time, USBC_GUSBCFG[USBTRDTIM] = 0x5
- * PHY low-power clock select, USBC_GUSBCFG[PHYLPWRCLKSEL] = 0
- */
- usbcx_gusbcfg.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_GUSBCFG(usb->index));
- usbcx_gusbcfg.s.toutcal = 0;
- usbcx_gusbcfg.s.ddrsel = 0;
- usbcx_gusbcfg.s.usbtrdtim = 0x5;
- usbcx_gusbcfg.s.phylpwrclksel = 0;
- cvmx_usb_write_csr32(usb, CVMX_USBCX_GUSBCFG(usb->index),
- usbcx_gusbcfg.u32);
-
- /*
- * 4. The software must unmask the following bits in the USBC_GINTMSK
- * register.
- * OTG interrupt mask, USBC_GINTMSK[OTGINTMSK] = 1
- * Mode mismatch interrupt mask, USBC_GINTMSK[MODEMISMSK] = 1
- */
- usbcx_gintmsk.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_GINTMSK(usb->index));
- usbcx_gintmsk.s.otgintmsk = 1;
- usbcx_gintmsk.s.modemismsk = 1;
- usbcx_gintmsk.s.hchintmsk = 1;
- usbcx_gintmsk.s.sofmsk = 0;
- /* We need RX FIFO interrupts if we don't have DMA */
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
- usbcx_gintmsk.s.rxflvlmsk = 1;
- cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTMSK(usb->index),
- usbcx_gintmsk.u32);
-
- /*
- * Disable all channel interrupts. We'll enable them per channel later.
- */
- for (channel = 0; channel < 8; channel++)
- cvmx_usb_write_csr32(usb,
- CVMX_USBCX_HCINTMSKX(channel, usb->index),
- 0);
-
- /*
- * Host Port Initialization
- *
- * 1. Program the host-port interrupt-mask field to unmask,
- * USBC_GINTMSK[PRTINT] = 1
- */
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
- cvmx_usbcx_gintmsk, prtintmsk, 1);
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
- cvmx_usbcx_gintmsk, disconnintmsk, 1);
-
- /*
- * 2. Program the USBC_HCFG register to select full-speed host
- * or high-speed host.
- */
- usbcx_hcfg.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HCFG(usb->index));
- usbcx_hcfg.s.fslssupp = 0;
- usbcx_hcfg.s.fslspclksel = 0;
- cvmx_usb_write_csr32(usb, CVMX_USBCX_HCFG(usb->index), usbcx_hcfg.u32);
-
- cvmx_fifo_setup(usb);
-
- /*
- * If the controller is getting port events right after the reset, it
- * means the initialization failed. Try resetting the controller again
- * in such case. This is seen to happen after cold boot on DSR-1000N.
- */
- usbc_gintsts.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_GINTSTS(usb->index));
- cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTSTS(usb->index),
- usbc_gintsts.u32);
- dev_dbg(dev, "gintsts after reset: 0x%x\n", (int)usbc_gintsts.u32);
- if (!usbc_gintsts.s.disconnint && !usbc_gintsts.s.prtint)
- return 0;
- if (retries++ >= 5)
- return -EAGAIN;
- dev_info(dev, "controller reset failed (gintsts=0x%x) - retrying\n",
- (int)usbc_gintsts.u32);
- msleep(50);
- cvmx_usb_shutdown(usb);
- msleep(50);
- goto retry;
-}
-
-/**
- * Reset a USB port. After this call succeeds, the USB port is
- * online and servicing requests.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- */
-static void cvmx_usb_reset_port(struct octeon_hcd *usb)
-{
- usb->usbcx_hprt.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HPRT(usb->index));
-
- /* Program the port reset bit to start the reset process */
- USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt,
- prtrst, 1);
-
- /*
- * Wait at least 50ms (high speed), or 10ms (full speed) for the reset
- * process to complete.
- */
- mdelay(50);
-
- /* Program the port reset bit to 0, USBC_HPRT[PRTRST] = 0 */
- USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt,
- prtrst, 0);
-
- /*
- * Read the port speed field to get the enumerated speed,
- * USBC_HPRT[PRTSPD].
- */
- usb->usbcx_hprt.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HPRT(usb->index));
-}
-
-/**
- * Disable a USB port. After this call the USB port will not
- * generate data transfers and will not generate events.
- * Transactions in process will fail and call their
- * associated callbacks.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- *
- * Returns: 0 or a negative error code.
- */
-static int cvmx_usb_disable(struct octeon_hcd *usb)
-{
- /* Disable the port */
- USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt,
- prtena, 1);
- return 0;
-}
-
-/**
- * Get the current state of the USB port. Use this call to
- * determine if the usb port has anything connected, is enabled,
- * or has some sort of error condition. The return value of this
- * call has "changed" bits to signal of the value of some fields
- * have changed between calls.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- *
- * Returns: Port status information
- */
-static struct cvmx_usb_port_status cvmx_usb_get_status(struct octeon_hcd *usb)
-{
- union cvmx_usbcx_hprt usbc_hprt;
- struct cvmx_usb_port_status result;
-
- memset(&result, 0, sizeof(result));
-
- usbc_hprt.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
- result.port_enabled = usbc_hprt.s.prtena;
- result.port_over_current = usbc_hprt.s.prtovrcurract;
- result.port_powered = usbc_hprt.s.prtpwr;
- result.port_speed = usbc_hprt.s.prtspd;
- result.connected = usbc_hprt.s.prtconnsts;
- result.connect_change =
- result.connected != usb->port_status.connected;
-
- return result;
-}
-
-/**
- * Open a virtual pipe between the host and a USB device. A pipe
- * must be opened before data can be transferred between a device
- * and Octeon.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @device_addr:
- * USB device address to open the pipe to
- * (0-127).
- * @endpoint_num:
- * USB endpoint number to open the pipe to
- * (0-15).
- * @device_speed:
- * The speed of the device the pipe is going
- * to. This must match the device's speed,
- * which may be different than the port speed.
- * @max_packet: The maximum packet length the device can
- * transmit/receive (low speed=0-8, full
- * speed=0-1023, high speed=0-1024). This value
- * comes from the standard endpoint descriptor
- * field wMaxPacketSize bits <10:0>.
- * @transfer_type:
- * The type of transfer this pipe is for.
- * @transfer_dir:
- * The direction the pipe is in. This is not
- * used for control pipes.
- * @interval: For ISOCHRONOUS and INTERRUPT transfers,
- * this is how often the transfer is scheduled
- * for. All other transfers should specify
- * zero. The units are in frames (8000/sec at
- * high speed, 1000/sec for full speed).
- * @multi_count:
- * For high speed devices, this is the maximum
- * allowed number of packet per microframe.
- * Specify zero for non high speed devices. This
- * value comes from the standard endpoint descriptor
- * field wMaxPacketSize bits <12:11>.
- * @hub_device_addr:
- * Hub device address this device is connected
- * to. Devices connected directly to Octeon
- * use zero. This is only used when the device
- * is full/low speed behind a high speed hub.
- * The address will be of the high speed hub,
- * not and full speed hubs after it.
- * @hub_port: Which port on the hub the device is
- * connected. Use zero for devices connected
- * directly to Octeon. Like hub_device_addr,
- * this is only used for full/low speed
- * devices behind a high speed hub.
- *
- * Returns: A non-NULL value is a pipe. NULL means an error.
- */
-static struct cvmx_usb_pipe *cvmx_usb_open_pipe(struct octeon_hcd *usb,
- int device_addr,
- int endpoint_num,
- enum cvmx_usb_speed
- device_speed,
- int max_packet,
- enum cvmx_usb_transfer
- transfer_type,
- enum cvmx_usb_direction
- transfer_dir,
- int interval, int multi_count,
- int hub_device_addr,
- int hub_port)
-{
- struct cvmx_usb_pipe *pipe;
-
- pipe = kzalloc(sizeof(*pipe), GFP_ATOMIC);
- if (!pipe)
- return NULL;
- if ((device_speed == CVMX_USB_SPEED_HIGH) &&
- (transfer_dir == CVMX_USB_DIRECTION_OUT) &&
- (transfer_type == CVMX_USB_TRANSFER_BULK))
- pipe->flags |= CVMX_USB_PIPE_FLAGS_NEED_PING;
- pipe->device_addr = device_addr;
- pipe->endpoint_num = endpoint_num;
- pipe->device_speed = device_speed;
- pipe->max_packet = max_packet;
- pipe->transfer_type = transfer_type;
- pipe->transfer_dir = transfer_dir;
- INIT_LIST_HEAD(&pipe->transactions);
-
- /*
- * All pipes use interval to rate limit NAK processing. Force an
- * interval if one wasn't supplied
- */
- if (!interval)
- interval = 1;
- if (cvmx_usb_pipe_needs_split(usb, pipe)) {
- pipe->interval = interval * 8;
- /* Force start splits to be schedule on uFrame 0 */
- pipe->next_tx_frame = ((usb->frame_number + 7) & ~7) +
- pipe->interval;
- } else {
- pipe->interval = interval;
- pipe->next_tx_frame = usb->frame_number + pipe->interval;
- }
- pipe->multi_count = multi_count;
- pipe->hub_device_addr = hub_device_addr;
- pipe->hub_port = hub_port;
- pipe->pid_toggle = 0;
- pipe->split_sc_frame = -1;
- list_add_tail(&pipe->node, &usb->idle_pipes);
-
- /*
- * We don't need to tell the hardware about this pipe yet since
- * it doesn't have any submitted requests
- */
-
- return pipe;
-}
-
-/**
- * Poll the RX FIFOs and remove data as needed. This function is only used
- * in non DMA mode. It is very important that this function be called quickly
- * enough to prevent FIFO overflow.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- */
-static void cvmx_usb_poll_rx_fifo(struct octeon_hcd *usb)
-{
- union cvmx_usbcx_grxstsph rx_status;
- int channel;
- int bytes;
- u64 address;
- u32 *ptr;
-
- rx_status.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_GRXSTSPH(usb->index));
- /* Only read data if IN data is there */
- if (rx_status.s.pktsts != 2)
- return;
- /* Check if no data is available */
- if (!rx_status.s.bcnt)
- return;
-
- channel = rx_status.s.chnum;
- bytes = rx_status.s.bcnt;
- if (!bytes)
- return;
-
- /* Get where the DMA engine would have written this data */
- address = cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index) +
- channel * 8);
-
- ptr = cvmx_phys_to_ptr(address);
- cvmx_write64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index) + channel * 8,
- address + bytes);
-
- /* Loop writing the FIFO data for this packet into memory */
- while (bytes > 0) {
- *ptr++ = cvmx_usb_read_csr32(usb,
- USB_FIFO_ADDRESS(channel, usb->index));
- bytes -= 4;
- }
- CVMX_SYNCW;
-}
-
-/**
- * Fill the TX hardware fifo with data out of the software
- * fifos
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @fifo: Software fifo to use
- * @available: Amount of space in the hardware fifo
- *
- * Returns: Non zero if the hardware fifo was too small and needs
- * to be serviced again.
- */
-static int cvmx_usb_fill_tx_hw(struct octeon_hcd *usb,
- struct cvmx_usb_tx_fifo *fifo, int available)
-{
- /*
- * We're done either when there isn't anymore space or the software FIFO
- * is empty
- */
- while (available && (fifo->head != fifo->tail)) {
- int i = fifo->tail;
- const u32 *ptr = cvmx_phys_to_ptr(fifo->entry[i].address);
- u64 csr_address = USB_FIFO_ADDRESS(fifo->entry[i].channel,
- usb->index) ^ 4;
- int words = available;
-
- /* Limit the amount of data to what the SW fifo has */
- if (fifo->entry[i].size <= available) {
- words = fifo->entry[i].size;
- fifo->tail++;
- if (fifo->tail > MAX_CHANNELS)
- fifo->tail = 0;
- }
-
- /* Update the next locations and counts */
- available -= words;
- fifo->entry[i].address += words * 4;
- fifo->entry[i].size -= words;
-
- /*
- * Write the HW fifo data. The read every three writes is due
- * to an errata on CN3XXX chips
- */
- while (words > 3) {
- cvmx_write64_uint32(csr_address, *ptr++);
- cvmx_write64_uint32(csr_address, *ptr++);
- cvmx_write64_uint32(csr_address, *ptr++);
- cvmx_read64_uint64(
- CVMX_USBNX_DMA0_INB_CHN0(usb->index));
- words -= 3;
- }
- cvmx_write64_uint32(csr_address, *ptr++);
- if (--words) {
- cvmx_write64_uint32(csr_address, *ptr++);
- if (--words)
- cvmx_write64_uint32(csr_address, *ptr++);
- }
- cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
- }
- return fifo->head != fifo->tail;
-}
-
-/**
- * Check the hardware FIFOs and fill them as needed
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- */
-static void cvmx_usb_poll_tx_fifo(struct octeon_hcd *usb)
-{
- if (usb->periodic.head != usb->periodic.tail) {
- union cvmx_usbcx_hptxsts tx_status;
-
- tx_status.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HPTXSTS(usb->index));
- if (cvmx_usb_fill_tx_hw(usb, &usb->periodic,
- tx_status.s.ptxfspcavail))
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
- cvmx_usbcx_gintmsk, ptxfempmsk, 1);
- else
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
- cvmx_usbcx_gintmsk, ptxfempmsk, 0);
- }
-
- if (usb->nonperiodic.head != usb->nonperiodic.tail) {
- union cvmx_usbcx_gnptxsts tx_status;
-
- tx_status.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_GNPTXSTS(usb->index));
- if (cvmx_usb_fill_tx_hw(usb, &usb->nonperiodic,
- tx_status.s.nptxfspcavail))
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
- cvmx_usbcx_gintmsk, nptxfempmsk, 1);
- else
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
- cvmx_usbcx_gintmsk, nptxfempmsk, 0);
- }
-}
-
-/**
- * Fill the TX FIFO with an outgoing packet
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @channel: Channel number to get packet from
- */
-static void cvmx_usb_fill_tx_fifo(struct octeon_hcd *usb, int channel)
-{
- union cvmx_usbcx_hccharx hcchar;
- union cvmx_usbcx_hcspltx usbc_hcsplt;
- union cvmx_usbcx_hctsizx usbc_hctsiz;
- struct cvmx_usb_tx_fifo *fifo;
-
- /* We only need to fill data on outbound channels */
- hcchar.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HCCHARX(channel, usb->index));
- if (hcchar.s.epdir != CVMX_USB_DIRECTION_OUT)
- return;
-
- /* OUT Splits only have data on the start and not the complete */
- usbc_hcsplt.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HCSPLTX(channel, usb->index));
- if (usbc_hcsplt.s.spltena && usbc_hcsplt.s.compsplt)
- return;
-
- /*
- * Find out how many bytes we need to fill and convert it into 32bit
- * words.
- */
- usbc_hctsiz.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HCTSIZX(channel, usb->index));
- if (!usbc_hctsiz.s.xfersize)
- return;
-
- if ((hcchar.s.eptype == CVMX_USB_TRANSFER_INTERRUPT) ||
- (hcchar.s.eptype == CVMX_USB_TRANSFER_ISOCHRONOUS))
- fifo = &usb->periodic;
- else
- fifo = &usb->nonperiodic;
-
- fifo->entry[fifo->head].channel = channel;
- fifo->entry[fifo->head].address =
- cvmx_read64_uint64(CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) +
- channel * 8);
- fifo->entry[fifo->head].size = (usbc_hctsiz.s.xfersize + 3) >> 2;
- fifo->head++;
- if (fifo->head > MAX_CHANNELS)
- fifo->head = 0;
-
- cvmx_usb_poll_tx_fifo(usb);
-}
-
-/**
- * Perform channel specific setup for Control transactions. All
- * the generic stuff will already have been done in cvmx_usb_start_channel().
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @channel: Channel to setup
- * @pipe: Pipe for control transaction
- */
-static void cvmx_usb_start_channel_control(struct octeon_hcd *usb,
- int channel,
- struct cvmx_usb_pipe *pipe)
-{
- struct usb_hcd *hcd = octeon_to_hcd(usb);
- struct device *dev = hcd->self.controller;
- struct cvmx_usb_transaction *transaction =
- list_first_entry(&pipe->transactions, typeof(*transaction),
- node);
- struct usb_ctrlrequest *header =
- cvmx_phys_to_ptr(transaction->control_header);
- int bytes_to_transfer = transaction->buffer_length -
- transaction->actual_bytes;
- int packets_to_transfer;
- union cvmx_usbcx_hctsizx usbc_hctsiz;
-
- usbc_hctsiz.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HCTSIZX(channel, usb->index));
-
- switch (transaction->stage) {
- case CVMX_USB_STAGE_NON_CONTROL:
- case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE:
- dev_err(dev, "%s: ERROR - Non control stage\n", __func__);
- break;
- case CVMX_USB_STAGE_SETUP:
- usbc_hctsiz.s.pid = 3; /* Setup */
- bytes_to_transfer = sizeof(*header);
- /* All Control operations start with a setup going OUT */
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
- cvmx_usbcx_hccharx, epdir,
- CVMX_USB_DIRECTION_OUT);
- /*
- * Setup send the control header instead of the buffer data. The
- * buffer data will be used in the next stage
- */
- cvmx_write64_uint64(CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) +
- channel * 8,
- transaction->control_header);
- break;
- case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE:
- usbc_hctsiz.s.pid = 3; /* Setup */
- bytes_to_transfer = 0;
- /* All Control operations start with a setup going OUT */
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
- cvmx_usbcx_hccharx, epdir,
- CVMX_USB_DIRECTION_OUT);
-
- USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index),
- cvmx_usbcx_hcspltx, compsplt, 1);
- break;
- case CVMX_USB_STAGE_DATA:
- usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe);
- if (cvmx_usb_pipe_needs_split(usb, pipe)) {
- if (header->bRequestType & USB_DIR_IN)
- bytes_to_transfer = 0;
- else if (bytes_to_transfer > pipe->max_packet)
- bytes_to_transfer = pipe->max_packet;
- }
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
- cvmx_usbcx_hccharx, epdir,
- ((header->bRequestType & USB_DIR_IN) ?
- CVMX_USB_DIRECTION_IN :
- CVMX_USB_DIRECTION_OUT));
- break;
- case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE:
- usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe);
- if (!(header->bRequestType & USB_DIR_IN))
- bytes_to_transfer = 0;
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
- cvmx_usbcx_hccharx, epdir,
- ((header->bRequestType & USB_DIR_IN) ?
- CVMX_USB_DIRECTION_IN :
- CVMX_USB_DIRECTION_OUT));
- USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index),
- cvmx_usbcx_hcspltx, compsplt, 1);
- break;
- case CVMX_USB_STAGE_STATUS:
- usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe);
- bytes_to_transfer = 0;
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
- cvmx_usbcx_hccharx, epdir,
- ((header->bRequestType & USB_DIR_IN) ?
- CVMX_USB_DIRECTION_OUT :
- CVMX_USB_DIRECTION_IN));
- break;
- case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE:
- usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe);
- bytes_to_transfer = 0;
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
- cvmx_usbcx_hccharx, epdir,
- ((header->bRequestType & USB_DIR_IN) ?
- CVMX_USB_DIRECTION_OUT :
- CVMX_USB_DIRECTION_IN));
- USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index),
- cvmx_usbcx_hcspltx, compsplt, 1);
- break;
- }
-
- /*
- * Make sure the transfer never exceeds the byte limit of the hardware.
- * Further bytes will be sent as continued transactions
- */
- if (bytes_to_transfer > MAX_TRANSFER_BYTES) {
- /* Round MAX_TRANSFER_BYTES to a multiple of out packet size */
- bytes_to_transfer = MAX_TRANSFER_BYTES / pipe->max_packet;
- bytes_to_transfer *= pipe->max_packet;
- }
-
- /*
- * Calculate the number of packets to transfer. If the length is zero
- * we still need to transfer one packet
- */
- packets_to_transfer = DIV_ROUND_UP(bytes_to_transfer,
- pipe->max_packet);
- if (packets_to_transfer == 0) {
- packets_to_transfer = 1;
- } else if ((packets_to_transfer > 1) &&
- (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
- /*
- * Limit to one packet when not using DMA. Channels must be
- * restarted between every packet for IN transactions, so there
- * is no reason to do multiple packets in a row
- */
- packets_to_transfer = 1;
- bytes_to_transfer = packets_to_transfer * pipe->max_packet;
- } else if (packets_to_transfer > MAX_TRANSFER_PACKETS) {
- /*
- * Limit the number of packet and data transferred to what the
- * hardware can handle
- */
- packets_to_transfer = MAX_TRANSFER_PACKETS;
- bytes_to_transfer = packets_to_transfer * pipe->max_packet;
- }
-
- usbc_hctsiz.s.xfersize = bytes_to_transfer;
- usbc_hctsiz.s.pktcnt = packets_to_transfer;
-
- cvmx_usb_write_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index),
- usbc_hctsiz.u32);
-}
-
-/**
- * Start a channel to perform the pipe's head transaction
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @channel: Channel to setup
- * @pipe: Pipe to start
- */
-static void cvmx_usb_start_channel(struct octeon_hcd *usb, int channel,
- struct cvmx_usb_pipe *pipe)
-{
- struct cvmx_usb_transaction *transaction =
- list_first_entry(&pipe->transactions, typeof(*transaction),
- node);
-
- /* Make sure all writes to the DMA region get flushed */
- CVMX_SYNCW;
-
- /* Attach the channel to the pipe */
- usb->pipe_for_channel[channel] = pipe;
- pipe->channel = channel;
- pipe->flags |= CVMX_USB_PIPE_FLAGS_SCHEDULED;
-
- /* Mark this channel as in use */
- usb->idle_hardware_channels &= ~(1 << channel);
-
- /* Enable the channel interrupt bits */
- {
- union cvmx_usbcx_hcintx usbc_hcint;
- union cvmx_usbcx_hcintmskx usbc_hcintmsk;
- union cvmx_usbcx_haintmsk usbc_haintmsk;
-
- /* Clear all channel status bits */
- usbc_hcint.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HCINTX(channel, usb->index));
-
- cvmx_usb_write_csr32(usb,
- CVMX_USBCX_HCINTX(channel, usb->index),
- usbc_hcint.u32);
-
- usbc_hcintmsk.u32 = 0;
- usbc_hcintmsk.s.chhltdmsk = 1;
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) {
- /*
- * Channels need these extra interrupts when we aren't
- * in DMA mode.
- */
- usbc_hcintmsk.s.datatglerrmsk = 1;
- usbc_hcintmsk.s.frmovrunmsk = 1;
- usbc_hcintmsk.s.bblerrmsk = 1;
- usbc_hcintmsk.s.xacterrmsk = 1;
- if (cvmx_usb_pipe_needs_split(usb, pipe)) {
- /*
- * Splits don't generate xfercompl, so we need
- * ACK and NYET.
- */
- usbc_hcintmsk.s.nyetmsk = 1;
- usbc_hcintmsk.s.ackmsk = 1;
- }
- usbc_hcintmsk.s.nakmsk = 1;
- usbc_hcintmsk.s.stallmsk = 1;
- usbc_hcintmsk.s.xfercomplmsk = 1;
- }
- cvmx_usb_write_csr32(usb,
- CVMX_USBCX_HCINTMSKX(channel, usb->index),
- usbc_hcintmsk.u32);
-
- /* Enable the channel interrupt to propagate */
- usbc_haintmsk.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HAINTMSK(usb->index));
- usbc_haintmsk.s.haintmsk |= 1 << channel;
- cvmx_usb_write_csr32(usb, CVMX_USBCX_HAINTMSK(usb->index),
- usbc_haintmsk.u32);
- }
-
- /* Setup the location the DMA engine uses. */
- {
- u64 reg;
- u64 dma_address = transaction->buffer +
- transaction->actual_bytes;
-
- if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
- dma_address = transaction->buffer +
- transaction->iso_packets[0].offset +
- transaction->actual_bytes;
-
- if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT)
- reg = CVMX_USBNX_DMA0_OUTB_CHN0(usb->index);
- else
- reg = CVMX_USBNX_DMA0_INB_CHN0(usb->index);
- cvmx_write64_uint64(reg + channel * 8, dma_address);
- }
-
- /* Setup both the size of the transfer and the SPLIT characteristics */
- {
- union cvmx_usbcx_hcspltx usbc_hcsplt = {.u32 = 0};
- union cvmx_usbcx_hctsizx usbc_hctsiz = {.u32 = 0};
- int packets_to_transfer;
- int bytes_to_transfer = transaction->buffer_length -
- transaction->actual_bytes;
-
- /*
- * ISOCHRONOUS transactions store each individual transfer size
- * in the packet structure, not the global buffer_length
- */
- if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
- bytes_to_transfer =
- transaction->iso_packets[0].length -
- transaction->actual_bytes;
-
- /*
- * We need to do split transactions when we are talking to non
- * high speed devices that are behind a high speed hub
- */
- if (cvmx_usb_pipe_needs_split(usb, pipe)) {
- /*
- * On the start split phase (stage is even) record the
- * frame number we will need to send the split complete.
- * We only store the lower two bits since the time ahead
- * can only be two frames
- */
- if ((transaction->stage & 1) == 0) {
- if (transaction->type == CVMX_USB_TRANSFER_BULK)
- pipe->split_sc_frame =
- (usb->frame_number + 1) & 0x7f;
- else
- pipe->split_sc_frame =
- (usb->frame_number + 2) & 0x7f;
- } else {
- pipe->split_sc_frame = -1;
- }
-
- usbc_hcsplt.s.spltena = 1;
- usbc_hcsplt.s.hubaddr = pipe->hub_device_addr;
- usbc_hcsplt.s.prtaddr = pipe->hub_port;
- usbc_hcsplt.s.compsplt = (transaction->stage ==
- CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE);
-
- /*
- * SPLIT transactions can only ever transmit one data
- * packet so limit the transfer size to the max packet
- * size
- */
- if (bytes_to_transfer > pipe->max_packet)
- bytes_to_transfer = pipe->max_packet;
-
- /*
- * ISOCHRONOUS OUT splits are unique in that they limit
- * data transfers to 188 byte chunks representing the
- * begin/middle/end of the data or all
- */
- if (!usbc_hcsplt.s.compsplt &&
- (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) &&
- (pipe->transfer_type ==
- CVMX_USB_TRANSFER_ISOCHRONOUS)) {
- /*
- * Clear the split complete frame number as
- * there isn't going to be a split complete
- */
- pipe->split_sc_frame = -1;
- /*
- * See if we've started this transfer and sent
- * data
- */
- if (transaction->actual_bytes == 0) {
- /*
- * Nothing sent yet, this is either a
- * begin or the entire payload
- */
- if (bytes_to_transfer <= 188)
- /* Entire payload in one go */
- usbc_hcsplt.s.xactpos = 3;
- else
- /* First part of payload */
- usbc_hcsplt.s.xactpos = 2;
- } else {
- /*
- * Continuing the previous data, we must
- * either be in the middle or at the end
- */
- if (bytes_to_transfer <= 188)
- /* End of payload */
- usbc_hcsplt.s.xactpos = 1;
- else
- /* Middle of payload */
- usbc_hcsplt.s.xactpos = 0;
- }
- /*
- * Again, the transfer size is limited to 188
- * bytes
- */
- if (bytes_to_transfer > 188)
- bytes_to_transfer = 188;
- }
- }
-
- /*
- * Make sure the transfer never exceeds the byte limit of the
- * hardware. Further bytes will be sent as continued
- * transactions
- */
- if (bytes_to_transfer > MAX_TRANSFER_BYTES) {
- /*
- * Round MAX_TRANSFER_BYTES to a multiple of out packet
- * size
- */
- bytes_to_transfer = MAX_TRANSFER_BYTES /
- pipe->max_packet;
- bytes_to_transfer *= pipe->max_packet;
- }
-
- /*
- * Calculate the number of packets to transfer. If the length is
- * zero we still need to transfer one packet
- */
- packets_to_transfer =
- DIV_ROUND_UP(bytes_to_transfer, pipe->max_packet);
- if (packets_to_transfer == 0) {
- packets_to_transfer = 1;
- } else if ((packets_to_transfer > 1) &&
- (usb->init_flags &
- CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
- /*
- * Limit to one packet when not using DMA. Channels must
- * be restarted between every packet for IN
- * transactions, so there is no reason to do multiple
- * packets in a row
- */
- packets_to_transfer = 1;
- bytes_to_transfer = packets_to_transfer *
- pipe->max_packet;
- } else if (packets_to_transfer > MAX_TRANSFER_PACKETS) {
- /*
- * Limit the number of packet and data transferred to
- * what the hardware can handle
- */
- packets_to_transfer = MAX_TRANSFER_PACKETS;
- bytes_to_transfer = packets_to_transfer *
- pipe->max_packet;
- }
-
- usbc_hctsiz.s.xfersize = bytes_to_transfer;
- usbc_hctsiz.s.pktcnt = packets_to_transfer;
-
- /* Update the DATA0/DATA1 toggle */
- usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe);
- /*
- * High speed pipes may need a hardware ping before they start
- */
- if (pipe->flags & CVMX_USB_PIPE_FLAGS_NEED_PING)
- usbc_hctsiz.s.dopng = 1;
-
- cvmx_usb_write_csr32(usb,
- CVMX_USBCX_HCSPLTX(channel, usb->index),
- usbc_hcsplt.u32);
- cvmx_usb_write_csr32(usb,
- CVMX_USBCX_HCTSIZX(channel, usb->index),
- usbc_hctsiz.u32);
- }
-
- /* Setup the Host Channel Characteristics Register */
- {
- union cvmx_usbcx_hccharx usbc_hcchar = {.u32 = 0};
-
- /*
- * Set the startframe odd/even properly. This is only used for
- * periodic
- */
- usbc_hcchar.s.oddfrm = usb->frame_number & 1;
-
- /*
- * Set the number of back to back packets allowed by this
- * endpoint. Split transactions interpret "ec" as the number of
- * immediate retries of failure. These retries happen too
- * quickly, so we disable these entirely for splits
- */
- if (cvmx_usb_pipe_needs_split(usb, pipe))
- usbc_hcchar.s.ec = 1;
- else if (pipe->multi_count < 1)
- usbc_hcchar.s.ec = 1;
- else if (pipe->multi_count > 3)
- usbc_hcchar.s.ec = 3;
- else
- usbc_hcchar.s.ec = pipe->multi_count;
-
- /* Set the rest of the endpoint specific settings */
- usbc_hcchar.s.devaddr = pipe->device_addr;
- usbc_hcchar.s.eptype = transaction->type;
- usbc_hcchar.s.lspddev =
- (pipe->device_speed == CVMX_USB_SPEED_LOW);
- usbc_hcchar.s.epdir = pipe->transfer_dir;
- usbc_hcchar.s.epnum = pipe->endpoint_num;
- usbc_hcchar.s.mps = pipe->max_packet;
- cvmx_usb_write_csr32(usb,
- CVMX_USBCX_HCCHARX(channel, usb->index),
- usbc_hcchar.u32);
- }
-
- /* Do transaction type specific fixups as needed */
- switch (transaction->type) {
- case CVMX_USB_TRANSFER_CONTROL:
- cvmx_usb_start_channel_control(usb, channel, pipe);
- break;
- case CVMX_USB_TRANSFER_BULK:
- case CVMX_USB_TRANSFER_INTERRUPT:
- break;
- case CVMX_USB_TRANSFER_ISOCHRONOUS:
- if (!cvmx_usb_pipe_needs_split(usb, pipe)) {
- /*
- * ISO transactions require different PIDs depending on
- * direction and how many packets are needed
- */
- if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) {
- if (pipe->multi_count < 2) /* Need DATA0 */
- USB_SET_FIELD32(
- CVMX_USBCX_HCTSIZX(channel,
- usb->index),
- cvmx_usbcx_hctsizx, pid, 0);
- else /* Need MDATA */
- USB_SET_FIELD32(
- CVMX_USBCX_HCTSIZX(channel,
- usb->index),
- cvmx_usbcx_hctsizx, pid, 3);
- }
- }
- break;
- }
- {
- union cvmx_usbcx_hctsizx usbc_hctsiz = { .u32 =
- cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HCTSIZX(channel,
- usb->index))
- };
- transaction->xfersize = usbc_hctsiz.s.xfersize;
- transaction->pktcnt = usbc_hctsiz.s.pktcnt;
- }
- /* Remember when we start a split transaction */
- if (cvmx_usb_pipe_needs_split(usb, pipe))
- usb->active_split = transaction;
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
- cvmx_usbcx_hccharx, chena, 1);
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
- cvmx_usb_fill_tx_fifo(usb, channel);
-}
-
-/**
- * Find a pipe that is ready to be scheduled to hardware.
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @xfer_type: Transfer type
- *
- * Returns: Pipe or NULL if none are ready
- */
-static struct cvmx_usb_pipe *cvmx_usb_find_ready_pipe(struct octeon_hcd *usb,
- enum cvmx_usb_transfer xfer_type)
-{
- struct list_head *list = usb->active_pipes + xfer_type;
- u64 current_frame = usb->frame_number;
- struct cvmx_usb_pipe *pipe;
-
- list_for_each_entry(pipe, list, node) {
- struct cvmx_usb_transaction *t =
- list_first_entry(&pipe->transactions, typeof(*t),
- node);
- if (!(pipe->flags & CVMX_USB_PIPE_FLAGS_SCHEDULED) && t &&
- (pipe->next_tx_frame <= current_frame) &&
- ((pipe->split_sc_frame == -1) ||
- ((((int)current_frame - pipe->split_sc_frame) & 0x7f) <
- 0x40)) &&
- (!usb->active_split || (usb->active_split == t))) {
- prefetch(t);
- return pipe;
- }
- }
- return NULL;
-}
-
-static struct cvmx_usb_pipe *cvmx_usb_next_pipe(struct octeon_hcd *usb,
- int is_sof)
-{
- struct cvmx_usb_pipe *pipe;
-
- /* Find a pipe needing service. */
- if (is_sof) {
- /*
- * Only process periodic pipes on SOF interrupts. This way we
- * are sure that the periodic data is sent in the beginning of
- * the frame.
- */
- pipe = cvmx_usb_find_ready_pipe(usb,
- CVMX_USB_TRANSFER_ISOCHRONOUS);
- if (pipe)
- return pipe;
- pipe = cvmx_usb_find_ready_pipe(usb,
- CVMX_USB_TRANSFER_INTERRUPT);
- if (pipe)
- return pipe;
- }
- pipe = cvmx_usb_find_ready_pipe(usb, CVMX_USB_TRANSFER_CONTROL);
- if (pipe)
- return pipe;
- return cvmx_usb_find_ready_pipe(usb, CVMX_USB_TRANSFER_BULK);
-}
-
-/**
- * Called whenever a pipe might need to be scheduled to the
- * hardware.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @is_sof: True if this schedule was called on a SOF interrupt.
- */
-static void cvmx_usb_schedule(struct octeon_hcd *usb, int is_sof)
-{
- int channel;
- struct cvmx_usb_pipe *pipe;
- int need_sof;
- enum cvmx_usb_transfer ttype;
-
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) {
- /*
- * Without DMA we need to be careful to not schedule something
- * at the end of a frame and cause an overrun.
- */
- union cvmx_usbcx_hfnum hfnum = {
- .u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HFNUM(usb->index))
- };
-
- union cvmx_usbcx_hfir hfir = {
- .u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HFIR(usb->index))
- };
-
- if (hfnum.s.frrem < hfir.s.frint / 4)
- goto done;
- }
-
- while (usb->idle_hardware_channels) {
- /* Find an idle channel */
- channel = __fls(usb->idle_hardware_channels);
- if (unlikely(channel > 7))
- break;
-
- pipe = cvmx_usb_next_pipe(usb, is_sof);
- if (!pipe)
- break;
-
- cvmx_usb_start_channel(usb, channel, pipe);
- }
-
-done:
- /*
- * Only enable SOF interrupts when we have transactions pending in the
- * future that might need to be scheduled
- */
- need_sof = 0;
- for (ttype = CVMX_USB_TRANSFER_CONTROL;
- ttype <= CVMX_USB_TRANSFER_INTERRUPT; ttype++) {
- list_for_each_entry(pipe, &usb->active_pipes[ttype], node) {
- if (pipe->next_tx_frame > usb->frame_number) {
- need_sof = 1;
- break;
- }
- }
- }
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index),
- cvmx_usbcx_gintmsk, sofmsk, need_sof);
-}
-
-static void octeon_usb_urb_complete_callback(struct octeon_hcd *usb,
- enum cvmx_usb_status status,
- struct cvmx_usb_pipe *pipe,
- struct cvmx_usb_transaction
- *transaction,
- int bytes_transferred,
- struct urb *urb)
-{
- struct usb_hcd *hcd = octeon_to_hcd(usb);
- struct device *dev = hcd->self.controller;
-
- if (likely(status == CVMX_USB_STATUS_OK))
- urb->actual_length = bytes_transferred;
- else
- urb->actual_length = 0;
-
- urb->hcpriv = NULL;
-
- /* For Isochronous transactions we need to update the URB packet status
- * list from data in our private copy
- */
- if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
- int i;
- /*
- * The pointer to the private list is stored in the setup_packet
- * field.
- */
- struct cvmx_usb_iso_packet *iso_packet =
- (struct cvmx_usb_iso_packet *)urb->setup_packet;
- /* Recalculate the transfer size by adding up each packet */
- urb->actual_length = 0;
- for (i = 0; i < urb->number_of_packets; i++) {
- if (iso_packet[i].status == CVMX_USB_STATUS_OK) {
- urb->iso_frame_desc[i].status = 0;
- urb->iso_frame_desc[i].actual_length =
- iso_packet[i].length;
- urb->actual_length +=
- urb->iso_frame_desc[i].actual_length;
- } else {
- dev_dbg(dev, "ISOCHRONOUS packet=%d of %d status=%d pipe=%p transaction=%p size=%d\n",
- i, urb->number_of_packets,
- iso_packet[i].status, pipe,
- transaction, iso_packet[i].length);
- urb->iso_frame_desc[i].status = -EREMOTEIO;
- }
- }
- /* Free the private list now that we don't need it anymore */
- kfree(iso_packet);
- urb->setup_packet = NULL;
- }
-
- switch (status) {
- case CVMX_USB_STATUS_OK:
- urb->status = 0;
- break;
- case CVMX_USB_STATUS_CANCEL:
- if (urb->status == 0)
- urb->status = -ENOENT;
- break;
- case CVMX_USB_STATUS_STALL:
- dev_dbg(dev, "status=stall pipe=%p transaction=%p size=%d\n",
- pipe, transaction, bytes_transferred);
- urb->status = -EPIPE;
- break;
- case CVMX_USB_STATUS_BABBLEERR:
- dev_dbg(dev, "status=babble pipe=%p transaction=%p size=%d\n",
- pipe, transaction, bytes_transferred);
- urb->status = -EPIPE;
- break;
- case CVMX_USB_STATUS_SHORT:
- dev_dbg(dev, "status=short pipe=%p transaction=%p size=%d\n",
- pipe, transaction, bytes_transferred);
- urb->status = -EREMOTEIO;
- break;
- case CVMX_USB_STATUS_ERROR:
- case CVMX_USB_STATUS_XACTERR:
- case CVMX_USB_STATUS_DATATGLERR:
- case CVMX_USB_STATUS_FRAMEERR:
- dev_dbg(dev, "status=%d pipe=%p transaction=%p size=%d\n",
- status, pipe, transaction, bytes_transferred);
- urb->status = -EPROTO;
- break;
- }
- usb_hcd_unlink_urb_from_ep(octeon_to_hcd(usb), urb);
- spin_unlock(&usb->lock);
- usb_hcd_giveback_urb(octeon_to_hcd(usb), urb, urb->status);
- spin_lock(&usb->lock);
-}
-
-/**
- * Signal the completion of a transaction and free it. The
- * transaction will be removed from the pipe transaction list.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @pipe: Pipe the transaction is on
- * @transaction:
- * Transaction that completed
- * @complete_code:
- * Completion code
- */
-static void cvmx_usb_complete(struct octeon_hcd *usb,
- struct cvmx_usb_pipe *pipe,
- struct cvmx_usb_transaction *transaction,
- enum cvmx_usb_status complete_code)
-{
- /* If this was a split then clear our split in progress marker */
- if (usb->active_split == transaction)
- usb->active_split = NULL;
-
- /*
- * Isochronous transactions need extra processing as they might not be
- * done after a single data transfer
- */
- if (unlikely(transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)) {
- /* Update the number of bytes transferred in this ISO packet */
- transaction->iso_packets[0].length = transaction->actual_bytes;
- transaction->iso_packets[0].status = complete_code;
-
- /*
- * If there are more ISOs pending and we succeeded, schedule the
- * next one
- */
- if ((transaction->iso_number_packets > 1) &&
- (complete_code == CVMX_USB_STATUS_OK)) {
- /* No bytes transferred for this packet as of yet */
- transaction->actual_bytes = 0;
- /* One less ISO waiting to transfer */
- transaction->iso_number_packets--;
- /* Increment to the next location in our packet array */
- transaction->iso_packets++;
- transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
- return;
- }
- }
-
- /* Remove the transaction from the pipe list */
- list_del(&transaction->node);
- if (list_empty(&pipe->transactions))
- list_move_tail(&pipe->node, &usb->idle_pipes);
- octeon_usb_urb_complete_callback(usb, complete_code, pipe,
- transaction,
- transaction->actual_bytes,
- transaction->urb);
- kfree(transaction);
-}
-
-/**
- * Submit a usb transaction to a pipe. Called for all types
- * of transactions.
- *
- * @usb:
- * @pipe: Which pipe to submit to.
- * @type: Transaction type
- * @buffer: User buffer for the transaction
- * @buffer_length:
- * User buffer's length in bytes
- * @control_header:
- * For control transactions, the 8 byte standard header
- * @iso_start_frame:
- * For ISO transactions, the start frame
- * @iso_number_packets:
- * For ISO, the number of packet in the transaction.
- * @iso_packets:
- * A description of each ISO packet
- * @urb: URB for the callback
- *
- * Returns: Transaction or NULL on failure.
- */
-static struct cvmx_usb_transaction *cvmx_usb_submit_transaction(
- struct octeon_hcd *usb,
- struct cvmx_usb_pipe *pipe,
- enum cvmx_usb_transfer type,
- u64 buffer,
- int buffer_length,
- u64 control_header,
- int iso_start_frame,
- int iso_number_packets,
- struct cvmx_usb_iso_packet *iso_packets,
- struct urb *urb)
-{
- struct cvmx_usb_transaction *transaction;
-
- if (unlikely(pipe->transfer_type != type))
- return NULL;
-
- transaction = kzalloc(sizeof(*transaction), GFP_ATOMIC);
- if (unlikely(!transaction))
- return NULL;
-
- transaction->type = type;
- transaction->buffer = buffer;
- transaction->buffer_length = buffer_length;
- transaction->control_header = control_header;
- /* FIXME: This is not used, implement it. */
- transaction->iso_start_frame = iso_start_frame;
- transaction->iso_number_packets = iso_number_packets;
- transaction->iso_packets = iso_packets;
- transaction->urb = urb;
- if (transaction->type == CVMX_USB_TRANSFER_CONTROL)
- transaction->stage = CVMX_USB_STAGE_SETUP;
- else
- transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
-
- if (!list_empty(&pipe->transactions)) {
- list_add_tail(&transaction->node, &pipe->transactions);
- } else {
- list_add_tail(&transaction->node, &pipe->transactions);
- list_move_tail(&pipe->node,
- &usb->active_pipes[pipe->transfer_type]);
-
- /*
- * We may need to schedule the pipe if this was the head of the
- * pipe.
- */
- cvmx_usb_schedule(usb, 0);
- }
-
- return transaction;
-}
-
-/**
- * Call to submit a USB Bulk transfer to a pipe.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @pipe: Handle to the pipe for the transfer.
- * @urb: URB.
- *
- * Returns: A submitted transaction or NULL on failure.
- */
-static struct cvmx_usb_transaction *cvmx_usb_submit_bulk(
- struct octeon_hcd *usb,
- struct cvmx_usb_pipe *pipe,
- struct urb *urb)
-{
- return cvmx_usb_submit_transaction(usb, pipe, CVMX_USB_TRANSFER_BULK,
- urb->transfer_dma,
- urb->transfer_buffer_length,
- 0, /* control_header */
- 0, /* iso_start_frame */
- 0, /* iso_number_packets */
- NULL, /* iso_packets */
- urb);
-}
-
-/**
- * Call to submit a USB Interrupt transfer to a pipe.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @pipe: Handle to the pipe for the transfer.
- * @urb: URB returned when the callback is called.
- *
- * Returns: A submitted transaction or NULL on failure.
- */
-static struct cvmx_usb_transaction *cvmx_usb_submit_interrupt(
- struct octeon_hcd *usb,
- struct cvmx_usb_pipe *pipe,
- struct urb *urb)
-{
- return cvmx_usb_submit_transaction(usb, pipe,
- CVMX_USB_TRANSFER_INTERRUPT,
- urb->transfer_dma,
- urb->transfer_buffer_length,
- 0, /* control_header */
- 0, /* iso_start_frame */
- 0, /* iso_number_packets */
- NULL, /* iso_packets */
- urb);
-}
-
-/**
- * Call to submit a USB Control transfer to a pipe.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @pipe: Handle to the pipe for the transfer.
- * @urb: URB.
- *
- * Returns: A submitted transaction or NULL on failure.
- */
-static struct cvmx_usb_transaction *cvmx_usb_submit_control(
- struct octeon_hcd *usb,
- struct cvmx_usb_pipe *pipe,
- struct urb *urb)
-{
- int buffer_length = urb->transfer_buffer_length;
- u64 control_header = urb->setup_dma;
- struct usb_ctrlrequest *header = cvmx_phys_to_ptr(control_header);
-
- if ((header->bRequestType & USB_DIR_IN) == 0)
- buffer_length = le16_to_cpu(header->wLength);
-
- return cvmx_usb_submit_transaction(usb, pipe,
- CVMX_USB_TRANSFER_CONTROL,
- urb->transfer_dma, buffer_length,
- control_header,
- 0, /* iso_start_frame */
- 0, /* iso_number_packets */
- NULL, /* iso_packets */
- urb);
-}
-
-/**
- * Call to submit a USB Isochronous transfer to a pipe.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @pipe: Handle to the pipe for the transfer.
- * @urb: URB returned when the callback is called.
- *
- * Returns: A submitted transaction or NULL on failure.
- */
-static struct cvmx_usb_transaction *cvmx_usb_submit_isochronous(
- struct octeon_hcd *usb,
- struct cvmx_usb_pipe *pipe,
- struct urb *urb)
-{
- struct cvmx_usb_iso_packet *packets;
-
- packets = (struct cvmx_usb_iso_packet *)urb->setup_packet;
- return cvmx_usb_submit_transaction(usb, pipe,
- CVMX_USB_TRANSFER_ISOCHRONOUS,
- urb->transfer_dma,
- urb->transfer_buffer_length,
- 0, /* control_header */
- urb->start_frame,
- urb->number_of_packets,
- packets, urb);
-}
-
-/**
- * Cancel one outstanding request in a pipe. Canceling a request
- * can fail if the transaction has already completed before cancel
- * is called. Even after a successful cancel call, it may take
- * a frame or two for the cvmx_usb_poll() function to call the
- * associated callback.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @pipe: Pipe to cancel requests in.
- * @transaction: Transaction to cancel, returned by the submit function.
- *
- * Returns: 0 or a negative error code.
- */
-static int cvmx_usb_cancel(struct octeon_hcd *usb,
- struct cvmx_usb_pipe *pipe,
- struct cvmx_usb_transaction *transaction)
-{
- /*
- * If the transaction is the HEAD of the queue and scheduled. We need to
- * treat it special
- */
- if (list_first_entry(&pipe->transactions, typeof(*transaction), node) ==
- transaction && (pipe->flags & CVMX_USB_PIPE_FLAGS_SCHEDULED)) {
- union cvmx_usbcx_hccharx usbc_hcchar;
-
- usb->pipe_for_channel[pipe->channel] = NULL;
- pipe->flags &= ~CVMX_USB_PIPE_FLAGS_SCHEDULED;
-
- CVMX_SYNCW;
-
- usbc_hcchar.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HCCHARX(pipe->channel, usb->index));
- /*
- * If the channel isn't enabled then the transaction already
- * completed.
- */
- if (usbc_hcchar.s.chena) {
- usbc_hcchar.s.chdis = 1;
- cvmx_usb_write_csr32(usb,
- CVMX_USBCX_HCCHARX(pipe->channel,
- usb->index),
- usbc_hcchar.u32);
- }
- }
- cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_CANCEL);
- return 0;
-}
-
-/**
- * Cancel all outstanding requests in a pipe. Logically all this
- * does is call cvmx_usb_cancel() in a loop.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @pipe: Pipe to cancel requests in.
- *
- * Returns: 0 or a negative error code.
- */
-static int cvmx_usb_cancel_all(struct octeon_hcd *usb,
- struct cvmx_usb_pipe *pipe)
-{
- struct cvmx_usb_transaction *transaction, *next;
-
- /* Simply loop through and attempt to cancel each transaction */
- list_for_each_entry_safe(transaction, next, &pipe->transactions, node) {
- int result = cvmx_usb_cancel(usb, pipe, transaction);
-
- if (unlikely(result != 0))
- return result;
- }
- return 0;
-}
-
-/**
- * Close a pipe created with cvmx_usb_open_pipe().
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- * @pipe: Pipe to close.
- *
- * Returns: 0 or a negative error code. EBUSY is returned if the pipe has
- * outstanding transfers.
- */
-static int cvmx_usb_close_pipe(struct octeon_hcd *usb,
- struct cvmx_usb_pipe *pipe)
-{
- /* Fail if the pipe has pending transactions */
- if (!list_empty(&pipe->transactions))
- return -EBUSY;
-
- list_del(&pipe->node);
- kfree(pipe);
-
- return 0;
-}
-
-/**
- * Get the current USB protocol level frame number. The frame
- * number is always in the range of 0-0x7ff.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- *
- * Returns: USB frame number
- */
-static int cvmx_usb_get_frame_number(struct octeon_hcd *usb)
-{
- union cvmx_usbcx_hfnum usbc_hfnum;
-
- usbc_hfnum.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index));
-
- return usbc_hfnum.s.frnum;
-}
-
-static void cvmx_usb_transfer_control(struct octeon_hcd *usb,
- struct cvmx_usb_pipe *pipe,
- struct cvmx_usb_transaction *transaction,
- union cvmx_usbcx_hccharx usbc_hcchar,
- int buffer_space_left,
- int bytes_in_last_packet)
-{
- switch (transaction->stage) {
- case CVMX_USB_STAGE_NON_CONTROL:
- case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE:
- /* This should be impossible */
- cvmx_usb_complete(usb, pipe, transaction,
- CVMX_USB_STATUS_ERROR);
- break;
- case CVMX_USB_STAGE_SETUP:
- pipe->pid_toggle = 1;
- if (cvmx_usb_pipe_needs_split(usb, pipe)) {
- transaction->stage =
- CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE;
- } else {
- struct usb_ctrlrequest *header =
- cvmx_phys_to_ptr(transaction->control_header);
- if (header->wLength)
- transaction->stage = CVMX_USB_STAGE_DATA;
- else
- transaction->stage = CVMX_USB_STAGE_STATUS;
- }
- break;
- case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE:
- {
- struct usb_ctrlrequest *header =
- cvmx_phys_to_ptr(transaction->control_header);
- if (header->wLength)
- transaction->stage = CVMX_USB_STAGE_DATA;
- else
- transaction->stage = CVMX_USB_STAGE_STATUS;
- }
- break;
- case CVMX_USB_STAGE_DATA:
- if (cvmx_usb_pipe_needs_split(usb, pipe)) {
- transaction->stage = CVMX_USB_STAGE_DATA_SPLIT_COMPLETE;
- /*
- * For setup OUT data that are splits,
- * the hardware doesn't appear to count
- * transferred data. Here we manually
- * update the data transferred
- */
- if (!usbc_hcchar.s.epdir) {
- if (buffer_space_left < pipe->max_packet)
- transaction->actual_bytes +=
- buffer_space_left;
- else
- transaction->actual_bytes +=
- pipe->max_packet;
- }
- } else if ((buffer_space_left == 0) ||
- (bytes_in_last_packet < pipe->max_packet)) {
- pipe->pid_toggle = 1;
- transaction->stage = CVMX_USB_STAGE_STATUS;
- }
- break;
- case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE:
- if ((buffer_space_left == 0) ||
- (bytes_in_last_packet < pipe->max_packet)) {
- pipe->pid_toggle = 1;
- transaction->stage = CVMX_USB_STAGE_STATUS;
- } else {
- transaction->stage = CVMX_USB_STAGE_DATA;
- }
- break;
- case CVMX_USB_STAGE_STATUS:
- if (cvmx_usb_pipe_needs_split(usb, pipe))
- transaction->stage =
- CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE;
- else
- cvmx_usb_complete(usb, pipe, transaction,
- CVMX_USB_STATUS_OK);
- break;
- case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE:
- cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_OK);
- break;
- }
-}
-
-static void cvmx_usb_transfer_bulk(struct octeon_hcd *usb,
- struct cvmx_usb_pipe *pipe,
- struct cvmx_usb_transaction *transaction,
- union cvmx_usbcx_hcintx usbc_hcint,
- int buffer_space_left,
- int bytes_in_last_packet)
-{
- /*
- * The only time a bulk transfer isn't complete when it finishes with
- * an ACK is during a split transaction. For splits we need to continue
- * the transfer if more data is needed.
- */
- if (cvmx_usb_pipe_needs_split(usb, pipe)) {
- if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL)
- transaction->stage =
- CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
- else if (buffer_space_left &&
- (bytes_in_last_packet == pipe->max_packet))
- transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
- else
- cvmx_usb_complete(usb, pipe, transaction,
- CVMX_USB_STATUS_OK);
- } else {
- if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) &&
- (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) &&
- (usbc_hcint.s.nak))
- pipe->flags |= CVMX_USB_PIPE_FLAGS_NEED_PING;
- if (!buffer_space_left ||
- (bytes_in_last_packet < pipe->max_packet))
- cvmx_usb_complete(usb, pipe, transaction,
- CVMX_USB_STATUS_OK);
- }
-}
-
-static void cvmx_usb_transfer_intr(struct octeon_hcd *usb,
- struct cvmx_usb_pipe *pipe,
- struct cvmx_usb_transaction *transaction,
- int buffer_space_left,
- int bytes_in_last_packet)
-{
- if (cvmx_usb_pipe_needs_split(usb, pipe)) {
- if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL) {
- transaction->stage =
- CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
- } else if (buffer_space_left &&
- (bytes_in_last_packet == pipe->max_packet)) {
- transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
- } else {
- pipe->next_tx_frame += pipe->interval;
- cvmx_usb_complete(usb, pipe, transaction,
- CVMX_USB_STATUS_OK);
- }
- } else if (!buffer_space_left ||
- (bytes_in_last_packet < pipe->max_packet)) {
- pipe->next_tx_frame += pipe->interval;
- cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_OK);
- }
-}
-
-static void cvmx_usb_transfer_isoc(struct octeon_hcd *usb,
- struct cvmx_usb_pipe *pipe,
- struct cvmx_usb_transaction *transaction,
- int buffer_space_left,
- int bytes_in_last_packet,
- int bytes_this_transfer)
-{
- if (cvmx_usb_pipe_needs_split(usb, pipe)) {
- /*
- * ISOCHRONOUS OUT splits don't require a complete split stage.
- * Instead they use a sequence of begin OUT splits to transfer
- * the data 188 bytes at a time. Once the transfer is complete,
- * the pipe sleeps until the next schedule interval.
- */
- if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) {
- /*
- * If no space left or this wasn't a max size packet
- * then this transfer is complete. Otherwise start it
- * again to send the next 188 bytes
- */
- if (!buffer_space_left || (bytes_this_transfer < 188)) {
- pipe->next_tx_frame += pipe->interval;
- cvmx_usb_complete(usb, pipe, transaction,
- CVMX_USB_STATUS_OK);
- }
- return;
- }
- if (transaction->stage ==
- CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE) {
- /*
- * We are in the incoming data phase. Keep getting data
- * until we run out of space or get a small packet
- */
- if ((buffer_space_left == 0) ||
- (bytes_in_last_packet < pipe->max_packet)) {
- pipe->next_tx_frame += pipe->interval;
- cvmx_usb_complete(usb, pipe, transaction,
- CVMX_USB_STATUS_OK);
- }
- } else {
- transaction->stage =
- CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
- }
- } else {
- pipe->next_tx_frame += pipe->interval;
- cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_OK);
- }
-}
-
-/**
- * Poll a channel for status
- *
- * @usb: USB device
- * @channel: Channel to poll
- *
- * Returns: Zero on success
- */
-static int cvmx_usb_poll_channel(struct octeon_hcd *usb, int channel)
-{
- struct usb_hcd *hcd = octeon_to_hcd(usb);
- struct device *dev = hcd->self.controller;
- union cvmx_usbcx_hcintx usbc_hcint;
- union cvmx_usbcx_hctsizx usbc_hctsiz;
- union cvmx_usbcx_hccharx usbc_hcchar;
- struct cvmx_usb_pipe *pipe;
- struct cvmx_usb_transaction *transaction;
- int bytes_this_transfer;
- int bytes_in_last_packet;
- int packets_processed;
- int buffer_space_left;
-
- /* Read the interrupt status bits for the channel */
- usbc_hcint.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HCINTX(channel, usb->index));
-
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) {
- usbc_hcchar.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HCCHARX(channel, usb->index));
-
- if (usbc_hcchar.s.chena && usbc_hcchar.s.chdis) {
- /*
- * There seems to be a bug in CN31XX which can cause
- * interrupt IN transfers to get stuck until we do a
- * write of HCCHARX without changing things
- */
- cvmx_usb_write_csr32(usb,
- CVMX_USBCX_HCCHARX(channel,
- usb->index),
- usbc_hcchar.u32);
- return 0;
- }
-
- /*
- * In non DMA mode the channels don't halt themselves. We need
- * to manually disable channels that are left running
- */
- if (!usbc_hcint.s.chhltd) {
- if (usbc_hcchar.s.chena) {
- union cvmx_usbcx_hcintmskx hcintmsk;
- /* Disable all interrupts except CHHLTD */
- hcintmsk.u32 = 0;
- hcintmsk.s.chhltdmsk = 1;
- cvmx_usb_write_csr32(usb,
- CVMX_USBCX_HCINTMSKX(channel, usb->index),
- hcintmsk.u32);
- usbc_hcchar.s.chdis = 1;
- cvmx_usb_write_csr32(usb,
- CVMX_USBCX_HCCHARX(channel, usb->index),
- usbc_hcchar.u32);
- return 0;
- } else if (usbc_hcint.s.xfercompl) {
- /*
- * Successful IN/OUT with transfer complete.
- * Channel halt isn't needed.
- */
- } else {
- dev_err(dev, "USB%d: Channel %d interrupt without halt\n",
- usb->index, channel);
- return 0;
- }
- }
- } else {
- /*
- * There is are no interrupts that we need to process when the
- * channel is still running
- */
- if (!usbc_hcint.s.chhltd)
- return 0;
- }
-
- /* Disable the channel interrupts now that it is done */
- cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), 0);
- usb->idle_hardware_channels |= (1 << channel);
-
- /* Make sure this channel is tied to a valid pipe */
- pipe = usb->pipe_for_channel[channel];
- prefetch(pipe);
- if (!pipe)
- return 0;
- transaction = list_first_entry(&pipe->transactions,
- typeof(*transaction),
- node);
- prefetch(transaction);
-
- /*
- * Disconnect this pipe from the HW channel. Later the schedule
- * function will figure out which pipe needs to go
- */
- usb->pipe_for_channel[channel] = NULL;
- pipe->flags &= ~CVMX_USB_PIPE_FLAGS_SCHEDULED;
-
- /*
- * Read the channel config info so we can figure out how much data
- * transferred
- */
- usbc_hcchar.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HCCHARX(channel, usb->index));
- usbc_hctsiz.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HCTSIZX(channel, usb->index));
-
- /*
- * Calculating the number of bytes successfully transferred is dependent
- * on the transfer direction
- */
- packets_processed = transaction->pktcnt - usbc_hctsiz.s.pktcnt;
- if (usbc_hcchar.s.epdir) {
- /*
- * IN transactions are easy. For every byte received the
- * hardware decrements xfersize. All we need to do is subtract
- * the current value of xfersize from its starting value and we
- * know how many bytes were written to the buffer
- */
- bytes_this_transfer = transaction->xfersize -
- usbc_hctsiz.s.xfersize;
- } else {
- /*
- * OUT transaction don't decrement xfersize. Instead pktcnt is
- * decremented on every successful packet send. The hardware
- * does this when it receives an ACK, or NYET. If it doesn't
- * receive one of these responses pktcnt doesn't change
- */
- bytes_this_transfer = packets_processed * usbc_hcchar.s.mps;
- /*
- * The last packet may not be a full transfer if we didn't have
- * enough data
- */
- if (bytes_this_transfer > transaction->xfersize)
- bytes_this_transfer = transaction->xfersize;
- }
- /* Figure out how many bytes were in the last packet of the transfer */
- if (packets_processed)
- bytes_in_last_packet = bytes_this_transfer -
- (packets_processed - 1) * usbc_hcchar.s.mps;
- else
- bytes_in_last_packet = bytes_this_transfer;
-
- /*
- * As a special case, setup transactions output the setup header, not
- * the user's data. For this reason we don't count setup data as bytes
- * transferred
- */
- if ((transaction->stage == CVMX_USB_STAGE_SETUP) ||
- (transaction->stage == CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE))
- bytes_this_transfer = 0;
-
- /*
- * Add the bytes transferred to the running total. It is important that
- * bytes_this_transfer doesn't count any data that needs to be
- * retransmitted
- */
- transaction->actual_bytes += bytes_this_transfer;
- if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
- buffer_space_left = transaction->iso_packets[0].length -
- transaction->actual_bytes;
- else
- buffer_space_left = transaction->buffer_length -
- transaction->actual_bytes;
-
- /*
- * We need to remember the PID toggle state for the next transaction.
- * The hardware already updated it for the next transaction
- */
- pipe->pid_toggle = !(usbc_hctsiz.s.pid == 0);
-
- /*
- * For high speed bulk out, assume the next transaction will need to do
- * a ping before proceeding. If this isn't true the ACK processing below
- * will clear this flag
- */
- if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) &&
- (pipe->transfer_type == CVMX_USB_TRANSFER_BULK) &&
- (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT))
- pipe->flags |= CVMX_USB_PIPE_FLAGS_NEED_PING;
-
- if (WARN_ON_ONCE(bytes_this_transfer < 0)) {
- /*
- * In some rare cases the DMA engine seems to get stuck and
- * keeps substracting same byte count over and over again. In
- * such case we just need to fail every transaction.
- */
- cvmx_usb_complete(usb, pipe, transaction,
- CVMX_USB_STATUS_ERROR);
- return 0;
- }
-
- if (usbc_hcint.s.stall) {
- /*
- * STALL as a response means this transaction cannot be
- * completed because the device can't process transactions. Tell
- * the user. Any data that was transferred will be counted on
- * the actual bytes transferred
- */
- pipe->pid_toggle = 0;
- cvmx_usb_complete(usb, pipe, transaction,
- CVMX_USB_STATUS_STALL);
- } else if (usbc_hcint.s.xacterr) {
- /*
- * XactErr as a response means the device signaled
- * something wrong with the transfer. For example, PID
- * toggle errors cause these.
- */
- cvmx_usb_complete(usb, pipe, transaction,
- CVMX_USB_STATUS_XACTERR);
- } else if (usbc_hcint.s.bblerr) {
- /* Babble Error (BblErr) */
- cvmx_usb_complete(usb, pipe, transaction,
- CVMX_USB_STATUS_BABBLEERR);
- } else if (usbc_hcint.s.datatglerr) {
- /* Data toggle error */
- cvmx_usb_complete(usb, pipe, transaction,
- CVMX_USB_STATUS_DATATGLERR);
- } else if (usbc_hcint.s.nyet) {
- /*
- * NYET as a response is only allowed in three cases: as a
- * response to a ping, as a response to a split transaction, and
- * as a response to a bulk out. The ping case is handled by
- * hardware, so we only have splits and bulk out
- */
- if (!cvmx_usb_pipe_needs_split(usb, pipe)) {
- transaction->retries = 0;
- /*
- * If there is more data to go then we need to try
- * again. Otherwise this transaction is complete
- */
- if ((buffer_space_left == 0) ||
- (bytes_in_last_packet < pipe->max_packet))
- cvmx_usb_complete(usb, pipe,
- transaction,
- CVMX_USB_STATUS_OK);
- } else {
- /*
- * Split transactions retry the split complete 4 times
- * then rewind to the start split and do the entire
- * transactions again
- */
- transaction->retries++;
- if ((transaction->retries & 0x3) == 0) {
- /*
- * Rewind to the beginning of the transaction by
- * anding off the split complete bit
- */
- transaction->stage &= ~1;
- pipe->split_sc_frame = -1;
- }
- }
- } else if (usbc_hcint.s.ack) {
- transaction->retries = 0;
- /*
- * The ACK bit can only be checked after the other error bits.
- * This is because a multi packet transfer may succeed in a
- * number of packets and then get a different response on the
- * last packet. In this case both ACK and the last response bit
- * will be set. If none of the other response bits is set, then
- * the last packet must have been an ACK
- *
- * Since we got an ACK, we know we don't need to do a ping on
- * this pipe
- */
- pipe->flags &= ~CVMX_USB_PIPE_FLAGS_NEED_PING;
-
- switch (transaction->type) {
- case CVMX_USB_TRANSFER_CONTROL:
- cvmx_usb_transfer_control(usb, pipe, transaction,
- usbc_hcchar,
- buffer_space_left,
- bytes_in_last_packet);
- break;
- case CVMX_USB_TRANSFER_BULK:
- cvmx_usb_transfer_bulk(usb, pipe, transaction,
- usbc_hcint, buffer_space_left,
- bytes_in_last_packet);
- break;
- case CVMX_USB_TRANSFER_INTERRUPT:
- cvmx_usb_transfer_intr(usb, pipe, transaction,
- buffer_space_left,
- bytes_in_last_packet);
- break;
- case CVMX_USB_TRANSFER_ISOCHRONOUS:
- cvmx_usb_transfer_isoc(usb, pipe, transaction,
- buffer_space_left,
- bytes_in_last_packet,
- bytes_this_transfer);
- break;
- }
- } else if (usbc_hcint.s.nak) {
- /*
- * If this was a split then clear our split in progress marker.
- */
- if (usb->active_split == transaction)
- usb->active_split = NULL;
- /*
- * NAK as a response means the device couldn't accept the
- * transaction, but it should be retried in the future. Rewind
- * to the beginning of the transaction by anding off the split
- * complete bit. Retry in the next interval
- */
- transaction->retries = 0;
- transaction->stage &= ~1;
- pipe->next_tx_frame += pipe->interval;
- if (pipe->next_tx_frame < usb->frame_number)
- pipe->next_tx_frame = usb->frame_number +
- pipe->interval -
- (usb->frame_number - pipe->next_tx_frame) %
- pipe->interval;
- } else {
- struct cvmx_usb_port_status port;
-
- port = cvmx_usb_get_status(usb);
- if (port.port_enabled) {
- /* We'll retry the exact same transaction again */
- transaction->retries++;
- } else {
- /*
- * We get channel halted interrupts with no result bits
- * sets when the cable is unplugged
- */
- cvmx_usb_complete(usb, pipe, transaction,
- CVMX_USB_STATUS_ERROR);
- }
- }
- return 0;
-}
-
-static void octeon_usb_port_callback(struct octeon_hcd *usb)
-{
- spin_unlock(&usb->lock);
- usb_hcd_poll_rh_status(octeon_to_hcd(usb));
- spin_lock(&usb->lock);
-}
-
-/**
- * Poll the USB block for status and call all needed callback
- * handlers. This function is meant to be called in the interrupt
- * handler for the USB controller. It can also be called
- * periodically in a loop for non-interrupt based operation.
- *
- * @usb: USB device state populated by cvmx_usb_initialize().
- *
- * Returns: 0 or a negative error code.
- */
-static int cvmx_usb_poll(struct octeon_hcd *usb)
-{
- union cvmx_usbcx_hfnum usbc_hfnum;
- union cvmx_usbcx_gintsts usbc_gintsts;
-
- prefetch_range(usb, sizeof(*usb));
-
- /* Update the frame counter */
- usbc_hfnum.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index));
- if ((usb->frame_number & 0x3fff) > usbc_hfnum.s.frnum)
- usb->frame_number += 0x4000;
- usb->frame_number &= ~0x3fffull;
- usb->frame_number |= usbc_hfnum.s.frnum;
-
- /* Read the pending interrupts */
- usbc_gintsts.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_GINTSTS(usb->index));
-
- /* Clear the interrupts now that we know about them */
- cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTSTS(usb->index),
- usbc_gintsts.u32);
-
- if (usbc_gintsts.s.rxflvl) {
- /*
- * RxFIFO Non-Empty (RxFLvl)
- * Indicates that there is at least one packet pending to be
- * read from the RxFIFO.
- *
- * In DMA mode this is handled by hardware
- */
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
- cvmx_usb_poll_rx_fifo(usb);
- }
- if (usbc_gintsts.s.ptxfemp || usbc_gintsts.s.nptxfemp) {
- /* Fill the Tx FIFOs when not in DMA mode */
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
- cvmx_usb_poll_tx_fifo(usb);
- }
- if (usbc_gintsts.s.disconnint || usbc_gintsts.s.prtint) {
- union cvmx_usbcx_hprt usbc_hprt;
- /*
- * Disconnect Detected Interrupt (DisconnInt)
- * Asserted when a device disconnect is detected.
- *
- * Host Port Interrupt (PrtInt)
- * The core sets this bit to indicate a change in port status of
- * one of the O2P USB core ports in Host mode. The application
- * must read the Host Port Control and Status (HPRT) register to
- * determine the exact event that caused this interrupt. The
- * application must clear the appropriate status bit in the Host
- * Port Control and Status register to clear this bit.
- *
- * Call the user's port callback
- */
- octeon_usb_port_callback(usb);
- /* Clear the port change bits */
- usbc_hprt.u32 =
- cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
- usbc_hprt.s.prtena = 0;
- cvmx_usb_write_csr32(usb, CVMX_USBCX_HPRT(usb->index),
- usbc_hprt.u32);
- }
- if (usbc_gintsts.s.hchint) {
- /*
- * Host Channels Interrupt (HChInt)
- * The core sets this bit to indicate that an interrupt is
- * pending on one of the channels of the core (in Host mode).
- * The application must read the Host All Channels Interrupt
- * (HAINT) register to determine the exact number of the channel
- * on which the interrupt occurred, and then read the
- * corresponding Host Channel-n Interrupt (HCINTn) register to
- * determine the exact cause of the interrupt. The application
- * must clear the appropriate status bit in the HCINTn register
- * to clear this bit.
- */
- union cvmx_usbcx_haint usbc_haint;
-
- usbc_haint.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HAINT(usb->index));
- while (usbc_haint.u32) {
- int channel;
-
- channel = __fls(usbc_haint.u32);
- cvmx_usb_poll_channel(usb, channel);
- usbc_haint.u32 ^= 1 << channel;
- }
- }
-
- cvmx_usb_schedule(usb, usbc_gintsts.s.sof);
-
- return 0;
-}
-
-/* convert between an HCD pointer and the corresponding struct octeon_hcd */
-static inline struct octeon_hcd *hcd_to_octeon(struct usb_hcd *hcd)
-{
- return (struct octeon_hcd *)(hcd->hcd_priv);
-}
-
-static irqreturn_t octeon_usb_irq(struct usb_hcd *hcd)
-{
- struct octeon_hcd *usb = hcd_to_octeon(hcd);
- unsigned long flags;
-
- spin_lock_irqsave(&usb->lock, flags);
- cvmx_usb_poll(usb);
- spin_unlock_irqrestore(&usb->lock, flags);
- return IRQ_HANDLED;
-}
-
-static int octeon_usb_start(struct usb_hcd *hcd)
-{
- hcd->state = HC_STATE_RUNNING;
- return 0;
-}
-
-static void octeon_usb_stop(struct usb_hcd *hcd)
-{
- hcd->state = HC_STATE_HALT;
-}
-
-static int octeon_usb_get_frame_number(struct usb_hcd *hcd)
-{
- struct octeon_hcd *usb = hcd_to_octeon(hcd);
-
- return cvmx_usb_get_frame_number(usb);
-}
-
-static int octeon_usb_urb_enqueue(struct usb_hcd *hcd,
- struct urb *urb,
- gfp_t mem_flags)
-{
- struct octeon_hcd *usb = hcd_to_octeon(hcd);
- struct device *dev = hcd->self.controller;
- struct cvmx_usb_transaction *transaction = NULL;
- struct cvmx_usb_pipe *pipe;
- unsigned long flags;
- struct cvmx_usb_iso_packet *iso_packet;
- struct usb_host_endpoint *ep = urb->ep;
- int rc;
-
- urb->status = 0;
- spin_lock_irqsave(&usb->lock, flags);
-
- rc = usb_hcd_link_urb_to_ep(hcd, urb);
- if (rc) {
- spin_unlock_irqrestore(&usb->lock, flags);
- return rc;
- }
-
- if (!ep->hcpriv) {
- enum cvmx_usb_transfer transfer_type;
- enum cvmx_usb_speed speed;
- int split_device = 0;
- int split_port = 0;
-
- switch (usb_pipetype(urb->pipe)) {
- case PIPE_ISOCHRONOUS:
- transfer_type = CVMX_USB_TRANSFER_ISOCHRONOUS;
- break;
- case PIPE_INTERRUPT:
- transfer_type = CVMX_USB_TRANSFER_INTERRUPT;
- break;
- case PIPE_CONTROL:
- transfer_type = CVMX_USB_TRANSFER_CONTROL;
- break;
- default:
- transfer_type = CVMX_USB_TRANSFER_BULK;
- break;
- }
- switch (urb->dev->speed) {
- case USB_SPEED_LOW:
- speed = CVMX_USB_SPEED_LOW;
- break;
- case USB_SPEED_FULL:
- speed = CVMX_USB_SPEED_FULL;
- break;
- default:
- speed = CVMX_USB_SPEED_HIGH;
- break;
- }
- /*
- * For slow devices on high speed ports we need to find the hub
- * that does the speed translation so we know where to send the
- * split transactions.
- */
- if (speed != CVMX_USB_SPEED_HIGH) {
- /*
- * Start at this device and work our way up the usb
- * tree.
- */
- struct usb_device *dev = urb->dev;
-
- while (dev->parent) {
- /*
- * If our parent is high speed then he'll
- * receive the splits.
- */
- if (dev->parent->speed == USB_SPEED_HIGH) {
- split_device = dev->parent->devnum;
- split_port = dev->portnum;
- break;
- }
- /*
- * Move up the tree one level. If we make it all
- * the way up the tree, then the port must not
- * be in high speed mode and we don't need a
- * split.
- */
- dev = dev->parent;
- }
- }
- pipe = cvmx_usb_open_pipe(usb, usb_pipedevice(urb->pipe),
- usb_pipeendpoint(urb->pipe), speed,
- le16_to_cpu(ep->desc.wMaxPacketSize)
- & 0x7ff,
- transfer_type,
- usb_pipein(urb->pipe) ?
- CVMX_USB_DIRECTION_IN :
- CVMX_USB_DIRECTION_OUT,
- urb->interval,
- (le16_to_cpu(ep->desc.wMaxPacketSize)
- >> 11) & 0x3,
- split_device, split_port);
- if (!pipe) {
- usb_hcd_unlink_urb_from_ep(hcd, urb);
- spin_unlock_irqrestore(&usb->lock, flags);
- dev_dbg(dev, "Failed to create pipe\n");
- return -ENOMEM;
- }
- ep->hcpriv = pipe;
- } else {
- pipe = ep->hcpriv;
- }
-
- switch (usb_pipetype(urb->pipe)) {
- case PIPE_ISOCHRONOUS:
- dev_dbg(dev, "Submit isochronous to %d.%d\n",
- usb_pipedevice(urb->pipe),
- usb_pipeendpoint(urb->pipe));
- /*
- * Allocate a structure to use for our private list of
- * isochronous packets.
- */
- iso_packet = kmalloc_array(urb->number_of_packets,
- sizeof(struct cvmx_usb_iso_packet),
- GFP_ATOMIC);
- if (iso_packet) {
- int i;
- /* Fill the list with the data from the URB */
- for (i = 0; i < urb->number_of_packets; i++) {
- iso_packet[i].offset =
- urb->iso_frame_desc[i].offset;
- iso_packet[i].length =
- urb->iso_frame_desc[i].length;
- iso_packet[i].status = CVMX_USB_STATUS_ERROR;
- }
- /*
- * Store a pointer to the list in the URB setup_packet
- * field. We know this currently isn't being used and
- * this saves us a bunch of logic.
- */
- urb->setup_packet = (char *)iso_packet;
- transaction = cvmx_usb_submit_isochronous(usb,
- pipe, urb);
- /*
- * If submit failed we need to free our private packet
- * list.
- */
- if (!transaction) {
- urb->setup_packet = NULL;
- kfree(iso_packet);
- }
- }
- break;
- case PIPE_INTERRUPT:
- dev_dbg(dev, "Submit interrupt to %d.%d\n",
- usb_pipedevice(urb->pipe),
- usb_pipeendpoint(urb->pipe));
- transaction = cvmx_usb_submit_interrupt(usb, pipe, urb);
- break;
- case PIPE_CONTROL:
- dev_dbg(dev, "Submit control to %d.%d\n",
- usb_pipedevice(urb->pipe),
- usb_pipeendpoint(urb->pipe));
- transaction = cvmx_usb_submit_control(usb, pipe, urb);
- break;
- case PIPE_BULK:
- dev_dbg(dev, "Submit bulk to %d.%d\n",
- usb_pipedevice(urb->pipe),
- usb_pipeendpoint(urb->pipe));
- transaction = cvmx_usb_submit_bulk(usb, pipe, urb);
- break;
- }
- if (!transaction) {
- usb_hcd_unlink_urb_from_ep(hcd, urb);
- spin_unlock_irqrestore(&usb->lock, flags);
- dev_dbg(dev, "Failed to submit\n");
- return -ENOMEM;
- }
- urb->hcpriv = transaction;
- spin_unlock_irqrestore(&usb->lock, flags);
- return 0;
-}
-
-static int octeon_usb_urb_dequeue(struct usb_hcd *hcd,
- struct urb *urb,
- int status)
-{
- struct octeon_hcd *usb = hcd_to_octeon(hcd);
- unsigned long flags;
- int rc;
-
- if (!urb->dev)
- return -EINVAL;
-
- spin_lock_irqsave(&usb->lock, flags);
-
- rc = usb_hcd_check_unlink_urb(hcd, urb, status);
- if (rc)
- goto out;
-
- urb->status = status;
- cvmx_usb_cancel(usb, urb->ep->hcpriv, urb->hcpriv);
-
-out:
- spin_unlock_irqrestore(&usb->lock, flags);
-
- return rc;
-}
-
-static void octeon_usb_endpoint_disable(struct usb_hcd *hcd,
- struct usb_host_endpoint *ep)
-{
- struct device *dev = hcd->self.controller;
-
- if (ep->hcpriv) {
- struct octeon_hcd *usb = hcd_to_octeon(hcd);
- struct cvmx_usb_pipe *pipe = ep->hcpriv;
- unsigned long flags;
-
- spin_lock_irqsave(&usb->lock, flags);
- cvmx_usb_cancel_all(usb, pipe);
- if (cvmx_usb_close_pipe(usb, pipe))
- dev_dbg(dev, "Closing pipe %p failed\n", pipe);
- spin_unlock_irqrestore(&usb->lock, flags);
- ep->hcpriv = NULL;
- }
-}
-
-static int octeon_usb_hub_status_data(struct usb_hcd *hcd, char *buf)
-{
- struct octeon_hcd *usb = hcd_to_octeon(hcd);
- struct cvmx_usb_port_status port_status;
- unsigned long flags;
-
- spin_lock_irqsave(&usb->lock, flags);
- port_status = cvmx_usb_get_status(usb);
- spin_unlock_irqrestore(&usb->lock, flags);
- buf[0] = port_status.connect_change << 1;
-
- return buf[0] != 0;
-}
-
-static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
- u16 wIndex, char *buf, u16 wLength)
-{
- struct octeon_hcd *usb = hcd_to_octeon(hcd);
- struct device *dev = hcd->self.controller;
- struct cvmx_usb_port_status usb_port_status;
- int port_status;
- struct usb_hub_descriptor *desc;
- unsigned long flags;
-
- switch (typeReq) {
- case ClearHubFeature:
- dev_dbg(dev, "ClearHubFeature\n");
- switch (wValue) {
- case C_HUB_LOCAL_POWER:
- case C_HUB_OVER_CURRENT:
- /* Nothing required here */
- break;
- default:
- return -EINVAL;
- }
- break;
- case ClearPortFeature:
- dev_dbg(dev, "ClearPortFeature\n");
- if (wIndex != 1) {
- dev_dbg(dev, " INVALID\n");
- return -EINVAL;
- }
-
- switch (wValue) {
- case USB_PORT_FEAT_ENABLE:
- dev_dbg(dev, " ENABLE\n");
- spin_lock_irqsave(&usb->lock, flags);
- cvmx_usb_disable(usb);
- spin_unlock_irqrestore(&usb->lock, flags);
- break;
- case USB_PORT_FEAT_SUSPEND:
- dev_dbg(dev, " SUSPEND\n");
- /* Not supported on Octeon */
- break;
- case USB_PORT_FEAT_POWER:
- dev_dbg(dev, " POWER\n");
- /* Not supported on Octeon */
- break;
- case USB_PORT_FEAT_INDICATOR:
- dev_dbg(dev, " INDICATOR\n");
- /* Port inidicator not supported */
- break;
- case USB_PORT_FEAT_C_CONNECTION:
- dev_dbg(dev, " C_CONNECTION\n");
- /* Clears drivers internal connect status change flag */
- spin_lock_irqsave(&usb->lock, flags);
- usb->port_status = cvmx_usb_get_status(usb);
- spin_unlock_irqrestore(&usb->lock, flags);
- break;
- case USB_PORT_FEAT_C_RESET:
- dev_dbg(dev, " C_RESET\n");
- /*
- * Clears the driver's internal Port Reset Change flag.
- */
- spin_lock_irqsave(&usb->lock, flags);
- usb->port_status = cvmx_usb_get_status(usb);
- spin_unlock_irqrestore(&usb->lock, flags);
- break;
- case USB_PORT_FEAT_C_ENABLE:
- dev_dbg(dev, " C_ENABLE\n");
- /*
- * Clears the driver's internal Port Enable/Disable
- * Change flag.
- */
- spin_lock_irqsave(&usb->lock, flags);
- usb->port_status = cvmx_usb_get_status(usb);
- spin_unlock_irqrestore(&usb->lock, flags);
- break;
- case USB_PORT_FEAT_C_SUSPEND:
- dev_dbg(dev, " C_SUSPEND\n");
- /*
- * Clears the driver's internal Port Suspend Change
- * flag, which is set when resume signaling on the host
- * port is complete.
- */
- break;
- case USB_PORT_FEAT_C_OVER_CURRENT:
- dev_dbg(dev, " C_OVER_CURRENT\n");
- /* Clears the driver's overcurrent Change flag */
- spin_lock_irqsave(&usb->lock, flags);
- usb->port_status = cvmx_usb_get_status(usb);
- spin_unlock_irqrestore(&usb->lock, flags);
- break;
- default:
- dev_dbg(dev, " UNKNOWN\n");
- return -EINVAL;
- }
- break;
- case GetHubDescriptor:
- dev_dbg(dev, "GetHubDescriptor\n");
- desc = (struct usb_hub_descriptor *)buf;
- desc->bDescLength = 9;
- desc->bDescriptorType = 0x29;
- desc->bNbrPorts = 1;
- desc->wHubCharacteristics = cpu_to_le16(0x08);
- desc->bPwrOn2PwrGood = 1;
- desc->bHubContrCurrent = 0;
- desc->u.hs.DeviceRemovable[0] = 0;
- desc->u.hs.DeviceRemovable[1] = 0xff;
- break;
- case GetHubStatus:
- dev_dbg(dev, "GetHubStatus\n");
- *(__le32 *)buf = 0;
- break;
- case GetPortStatus:
- dev_dbg(dev, "GetPortStatus\n");
- if (wIndex != 1) {
- dev_dbg(dev, " INVALID\n");
- return -EINVAL;
- }
-
- spin_lock_irqsave(&usb->lock, flags);
- usb_port_status = cvmx_usb_get_status(usb);
- spin_unlock_irqrestore(&usb->lock, flags);
- port_status = 0;
-
- if (usb_port_status.connect_change) {
- port_status |= (1 << USB_PORT_FEAT_C_CONNECTION);
- dev_dbg(dev, " C_CONNECTION\n");
- }
-
- if (usb_port_status.port_enabled) {
- port_status |= (1 << USB_PORT_FEAT_C_ENABLE);
- dev_dbg(dev, " C_ENABLE\n");
- }
-
- if (usb_port_status.connected) {
- port_status |= (1 << USB_PORT_FEAT_CONNECTION);
- dev_dbg(dev, " CONNECTION\n");
- }
-
- if (usb_port_status.port_enabled) {
- port_status |= (1 << USB_PORT_FEAT_ENABLE);
- dev_dbg(dev, " ENABLE\n");
- }
-
- if (usb_port_status.port_over_current) {
- port_status |= (1 << USB_PORT_FEAT_OVER_CURRENT);
- dev_dbg(dev, " OVER_CURRENT\n");
- }
-
- if (usb_port_status.port_powered) {
- port_status |= (1 << USB_PORT_FEAT_POWER);
- dev_dbg(dev, " POWER\n");
- }
-
- if (usb_port_status.port_speed == CVMX_USB_SPEED_HIGH) {
- port_status |= USB_PORT_STAT_HIGH_SPEED;
- dev_dbg(dev, " HIGHSPEED\n");
- } else if (usb_port_status.port_speed == CVMX_USB_SPEED_LOW) {
- port_status |= (1 << USB_PORT_FEAT_LOWSPEED);
- dev_dbg(dev, " LOWSPEED\n");
- }
-
- *((__le32 *)buf) = cpu_to_le32(port_status);
- break;
- case SetHubFeature:
- dev_dbg(dev, "SetHubFeature\n");
- /* No HUB features supported */
- break;
- case SetPortFeature:
- dev_dbg(dev, "SetPortFeature\n");
- if (wIndex != 1) {
- dev_dbg(dev, " INVALID\n");
- return -EINVAL;
- }
-
- switch (wValue) {
- case USB_PORT_FEAT_SUSPEND:
- dev_dbg(dev, " SUSPEND\n");
- return -EINVAL;
- case USB_PORT_FEAT_POWER:
- dev_dbg(dev, " POWER\n");
- /*
- * Program the port power bit to drive VBUS on the USB.
- */
- spin_lock_irqsave(&usb->lock, flags);
- USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index),
- cvmx_usbcx_hprt, prtpwr, 1);
- spin_unlock_irqrestore(&usb->lock, flags);
- return 0;
- case USB_PORT_FEAT_RESET:
- dev_dbg(dev, " RESET\n");
- spin_lock_irqsave(&usb->lock, flags);
- cvmx_usb_reset_port(usb);
- spin_unlock_irqrestore(&usb->lock, flags);
- return 0;
- case USB_PORT_FEAT_INDICATOR:
- dev_dbg(dev, " INDICATOR\n");
- /* Not supported */
- break;
- default:
- dev_dbg(dev, " UNKNOWN\n");
- return -EINVAL;
- }
- break;
- default:
- dev_dbg(dev, "Unknown root hub request\n");
- return -EINVAL;
- }
- return 0;
-}
-
-static const struct hc_driver octeon_hc_driver = {
- .description = "Octeon USB",
- .product_desc = "Octeon Host Controller",
- .hcd_priv_size = sizeof(struct octeon_hcd),
- .irq = octeon_usb_irq,
- .flags = HCD_MEMORY | HCD_DMA | HCD_USB2,
- .start = octeon_usb_start,
- .stop = octeon_usb_stop,
- .urb_enqueue = octeon_usb_urb_enqueue,
- .urb_dequeue = octeon_usb_urb_dequeue,
- .endpoint_disable = octeon_usb_endpoint_disable,
- .get_frame_number = octeon_usb_get_frame_number,
- .hub_status_data = octeon_usb_hub_status_data,
- .hub_control = octeon_usb_hub_control,
- .map_urb_for_dma = octeon_map_urb_for_dma,
- .unmap_urb_for_dma = octeon_unmap_urb_for_dma,
-};
-
-static int octeon_usb_probe(struct platform_device *pdev)
-{
- int status;
- int initialize_flags;
- int usb_num;
- struct resource *res_mem;
- struct device_node *usbn_node;
- int irq = platform_get_irq(pdev, 0);
- struct device *dev = &pdev->dev;
- struct octeon_hcd *usb;
- struct usb_hcd *hcd;
- u32 clock_rate = 48000000;
- bool is_crystal_clock = false;
- const char *clock_type;
- int i;
-
- if (!dev->of_node) {
- dev_err(dev, "Error: empty of_node\n");
- return -ENXIO;
- }
- usbn_node = dev->of_node->parent;
-
- i = of_property_read_u32(usbn_node,
- "clock-frequency", &clock_rate);
- if (i)
- i = of_property_read_u32(usbn_node,
- "refclk-frequency", &clock_rate);
- if (i) {
- dev_err(dev, "No USBN \"clock-frequency\"\n");
- return -ENXIO;
- }
- switch (clock_rate) {
- case 12000000:
- initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ;
- break;
- case 24000000:
- initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ;
- break;
- case 48000000:
- initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ;
- break;
- default:
- dev_err(dev, "Illegal USBN \"clock-frequency\" %u\n",
- clock_rate);
- return -ENXIO;
- }
-
- i = of_property_read_string(usbn_node,
- "cavium,refclk-type", &clock_type);
- if (i)
- i = of_property_read_string(usbn_node,
- "refclk-type", &clock_type);
-
- if (!i && strcmp("crystal", clock_type) == 0)
- is_crystal_clock = true;
-
- if (is_crystal_clock)
- initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI;
- else
- initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND;
-
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res_mem) {
- dev_err(dev, "found no memory resource\n");
- return -ENXIO;
- }
- usb_num = (res_mem->start >> 44) & 1;
-
- if (irq < 0) {
- /* Defective device tree, but we know how to fix it. */
- irq_hw_number_t hwirq = usb_num ? (1 << 6) + 17 : 56;
-
- irq = irq_create_mapping(NULL, hwirq);
- }
-
- /*
- * Set the DMA mask to 64bits so we get buffers already translated for
- * DMA.
- */
- i = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
- if (i)
- return i;
-
- /*
- * Only cn52XX and cn56XX have DWC_OTG USB hardware and the
- * IOB priority registers. Under heavy network load USB
- * hardware can be starved by the IOB causing a crash. Give
- * it a priority boost if it has been waiting more than 400
- * cycles to avoid this situation.
- *
- * Testing indicates that a cnt_val of 8192 is not sufficient,
- * but no failures are seen with 4096. We choose a value of
- * 400 to give a safety factor of 10.
- */
- if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
- union cvmx_iob_n2c_l2c_pri_cnt pri_cnt;
-
- pri_cnt.u64 = 0;
- pri_cnt.s.cnt_enb = 1;
- pri_cnt.s.cnt_val = 400;
- cvmx_write_csr(CVMX_IOB_N2C_L2C_PRI_CNT, pri_cnt.u64);
- }
-
- hcd = usb_create_hcd(&octeon_hc_driver, dev, dev_name(dev));
- if (!hcd) {
- dev_dbg(dev, "Failed to allocate memory for HCD\n");
- return -1;
- }
- hcd->uses_new_polling = 1;
- usb = (struct octeon_hcd *)hcd->hcd_priv;
-
- spin_lock_init(&usb->lock);
-
- usb->init_flags = initialize_flags;
-
- /* Initialize the USB state structure */
- usb->index = usb_num;
- INIT_LIST_HEAD(&usb->idle_pipes);
- for (i = 0; i < ARRAY_SIZE(usb->active_pipes); i++)
- INIT_LIST_HEAD(&usb->active_pipes[i]);
-
- /* Due to an errata, CN31XX doesn't support DMA */
- if (OCTEON_IS_MODEL(OCTEON_CN31XX)) {
- usb->init_flags |= CVMX_USB_INITIALIZE_FLAGS_NO_DMA;
- /* Only use one channel with non DMA */
- usb->idle_hardware_channels = 0x1;
- } else if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
- /* CN5XXX have an errata with channel 3 */
- usb->idle_hardware_channels = 0xf7;
- } else {
- usb->idle_hardware_channels = 0xff;
- }
-
- status = cvmx_usb_initialize(dev, usb);
- if (status) {
- dev_dbg(dev, "USB initialization failed with %d\n", status);
- usb_put_hcd(hcd);
- return -1;
- }
-
- status = usb_add_hcd(hcd, irq, 0);
- if (status) {
- dev_dbg(dev, "USB add HCD failed with %d\n", status);
- usb_put_hcd(hcd);
- return -1;
- }
- device_wakeup_enable(hcd->self.controller);
-
- dev_info(dev, "Registered HCD for port %d on irq %d\n", usb_num, irq);
-
- return 0;
-}
-
-static int octeon_usb_remove(struct platform_device *pdev)
-{
- int status;
- struct device *dev = &pdev->dev;
- struct usb_hcd *hcd = dev_get_drvdata(dev);
- struct octeon_hcd *usb = hcd_to_octeon(hcd);
- unsigned long flags;
-
- usb_remove_hcd(hcd);
- spin_lock_irqsave(&usb->lock, flags);
- status = cvmx_usb_shutdown(usb);
- spin_unlock_irqrestore(&usb->lock, flags);
- if (status)
- dev_dbg(dev, "USB shutdown failed with %d\n", status);
-
- usb_put_hcd(hcd);
-
- return 0;
-}
-
-static const struct of_device_id octeon_usb_match[] = {
- {
- .compatible = "cavium,octeon-5750-usbc",
- },
- {},
-};
-MODULE_DEVICE_TABLE(of, octeon_usb_match);
-
-static struct platform_driver octeon_usb_driver = {
- .driver = {
- .name = "octeon-hcd",
- .of_match_table = octeon_usb_match,
- },
- .probe = octeon_usb_probe,
- .remove = octeon_usb_remove,
-};
-
-static int __init octeon_usb_driver_init(void)
-{
- if (usb_disabled())
- return 0;
-
- return platform_driver_register(&octeon_usb_driver);
-}
-module_init(octeon_usb_driver_init);
-
-static void __exit octeon_usb_driver_exit(void)
-{
- if (usb_disabled())
- return;
-
- platform_driver_unregister(&octeon_usb_driver);
-}
-module_exit(octeon_usb_driver_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Cavium, Inc. <support@cavium.com>");
-MODULE_DESCRIPTION("Cavium Inc. OCTEON USB Host driver.");
diff --git a/drivers/staging/octeon-usb/octeon-hcd.h b/drivers/staging/octeon-usb/octeon-hcd.h
deleted file mode 100644
index 9ed619c93a4e..000000000000
--- a/drivers/staging/octeon-usb/octeon-hcd.h
+++ /dev/null
@@ -1,1847 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Octeon HCD hardware register definitions.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Some parts of the code were originally released under BSD license:
- *
- * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
- * reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Networks nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its associated
- * regulations, and may be subject to export or import regulations in other
- * countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
- * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
- * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION
- * OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
- * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
- * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
- * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
- * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
- * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
- */
-
-#ifndef __OCTEON_HCD_H__
-#define __OCTEON_HCD_H__
-
-#include <asm/bitfield.h>
-
-#define CVMX_USBCXBASE 0x00016F0010000000ull
-#define CVMX_USBCXREG1(reg, bid) \
- (CVMX_ADD_IO_SEG(CVMX_USBCXBASE | reg) + \
- ((bid) & 1) * 0x100000000000ull)
-#define CVMX_USBCXREG2(reg, bid, off) \
- (CVMX_ADD_IO_SEG(CVMX_USBCXBASE | reg) + \
- (((off) & 7) + ((bid) & 1) * 0x8000000000ull) * 32)
-
-#define CVMX_USBCX_GAHBCFG(bid) CVMX_USBCXREG1(0x008, bid)
-#define CVMX_USBCX_GHWCFG3(bid) CVMX_USBCXREG1(0x04c, bid)
-#define CVMX_USBCX_GINTMSK(bid) CVMX_USBCXREG1(0x018, bid)
-#define CVMX_USBCX_GINTSTS(bid) CVMX_USBCXREG1(0x014, bid)
-#define CVMX_USBCX_GNPTXFSIZ(bid) CVMX_USBCXREG1(0x028, bid)
-#define CVMX_USBCX_GNPTXSTS(bid) CVMX_USBCXREG1(0x02c, bid)
-#define CVMX_USBCX_GOTGCTL(bid) CVMX_USBCXREG1(0x000, bid)
-#define CVMX_USBCX_GRSTCTL(bid) CVMX_USBCXREG1(0x010, bid)
-#define CVMX_USBCX_GRXFSIZ(bid) CVMX_USBCXREG1(0x024, bid)
-#define CVMX_USBCX_GRXSTSPH(bid) CVMX_USBCXREG1(0x020, bid)
-#define CVMX_USBCX_GUSBCFG(bid) CVMX_USBCXREG1(0x00c, bid)
-#define CVMX_USBCX_HAINT(bid) CVMX_USBCXREG1(0x414, bid)
-#define CVMX_USBCX_HAINTMSK(bid) CVMX_USBCXREG1(0x418, bid)
-#define CVMX_USBCX_HCCHARX(off, bid) CVMX_USBCXREG2(0x500, bid, off)
-#define CVMX_USBCX_HCFG(bid) CVMX_USBCXREG1(0x400, bid)
-#define CVMX_USBCX_HCINTMSKX(off, bid) CVMX_USBCXREG2(0x50c, bid, off)
-#define CVMX_USBCX_HCINTX(off, bid) CVMX_USBCXREG2(0x508, bid, off)
-#define CVMX_USBCX_HCSPLTX(off, bid) CVMX_USBCXREG2(0x504, bid, off)
-#define CVMX_USBCX_HCTSIZX(off, bid) CVMX_USBCXREG2(0x510, bid, off)
-#define CVMX_USBCX_HFIR(bid) CVMX_USBCXREG1(0x404, bid)
-#define CVMX_USBCX_HFNUM(bid) CVMX_USBCXREG1(0x408, bid)
-#define CVMX_USBCX_HPRT(bid) CVMX_USBCXREG1(0x440, bid)
-#define CVMX_USBCX_HPTXFSIZ(bid) CVMX_USBCXREG1(0x100, bid)
-#define CVMX_USBCX_HPTXSTS(bid) CVMX_USBCXREG1(0x410, bid)
-
-#define CVMX_USBNXBID1(bid) (((bid) & 1) * 0x10000000ull)
-#define CVMX_USBNXBID2(bid) (((bid) & 1) * 0x100000000000ull)
-
-#define CVMX_USBNXREG1(reg, bid) \
- (CVMX_ADD_IO_SEG(0x0001180068000000ull | reg) + CVMX_USBNXBID1(bid))
-#define CVMX_USBNXREG2(reg, bid) \
- (CVMX_ADD_IO_SEG(0x00016F0000000000ull | reg) + CVMX_USBNXBID2(bid))
-
-#define CVMX_USBNX_CLK_CTL(bid) CVMX_USBNXREG1(0x10, bid)
-#define CVMX_USBNX_DMA0_INB_CHN0(bid) CVMX_USBNXREG2(0x818, bid)
-#define CVMX_USBNX_DMA0_OUTB_CHN0(bid) CVMX_USBNXREG2(0x858, bid)
-#define CVMX_USBNX_USBP_CTL_STATUS(bid) CVMX_USBNXREG1(0x18, bid)
-
-/**
- * cvmx_usbc#_gahbcfg
- *
- * Core AHB Configuration Register (GAHBCFG)
- *
- * This register can be used to configure the core after power-on or a change in
- * mode of operation. This register mainly contains AHB system-related
- * configuration parameters. The AHB is the processor interface to the O2P USB
- * core. In general, software need not know about this interface except to
- * program the values as specified.
- *
- * The application must program this register as part of the O2P USB core
- * initialization. Do not change this register after the initial programming.
- */
-union cvmx_usbcx_gahbcfg {
- u32 u32;
- /**
- * struct cvmx_usbcx_gahbcfg_s
- * @ptxfemplvl: Periodic TxFIFO Empty Level (PTxFEmpLvl)
- * Software should set this bit to 0x1.
- * Indicates when the Periodic TxFIFO Empty Interrupt bit in the
- * Core Interrupt register (GINTSTS.PTxFEmp) is triggered. This
- * bit is used only in Slave mode.
- * * 1'b0: GINTSTS.PTxFEmp interrupt indicates that the Periodic
- * TxFIFO is half empty
- * * 1'b1: GINTSTS.PTxFEmp interrupt indicates that the Periodic
- * TxFIFO is completely empty
- * @nptxfemplvl: Non-Periodic TxFIFO Empty Level (NPTxFEmpLvl)
- * Software should set this bit to 0x1.
- * Indicates when the Non-Periodic TxFIFO Empty Interrupt bit in
- * the Core Interrupt register (GINTSTS.NPTxFEmp) is triggered.
- * This bit is used only in Slave mode.
- * * 1'b0: GINTSTS.NPTxFEmp interrupt indicates that the Non-
- * Periodic TxFIFO is half empty
- * * 1'b1: GINTSTS.NPTxFEmp interrupt indicates that the Non-
- * Periodic TxFIFO is completely empty
- * @dmaen: DMA Enable (DMAEn)
- * * 1'b0: Core operates in Slave mode
- * * 1'b1: Core operates in a DMA mode
- * @hbstlen: Burst Length/Type (HBstLen)
- * This field has not effect and should be left as 0x0.
- * @glblintrmsk: Global Interrupt Mask (GlblIntrMsk)
- * Software should set this field to 0x1.
- * The application uses this bit to mask or unmask the interrupt
- * line assertion to itself. Irrespective of this bit's setting,
- * the interrupt status registers are updated by the core.
- * * 1'b0: Mask the interrupt assertion to the application.
- * * 1'b1: Unmask the interrupt assertion to the application.
- */
- struct cvmx_usbcx_gahbcfg_s {
- __BITFIELD_FIELD(u32 reserved_9_31 : 23,
- __BITFIELD_FIELD(u32 ptxfemplvl : 1,
- __BITFIELD_FIELD(u32 nptxfemplvl : 1,
- __BITFIELD_FIELD(u32 reserved_6_6 : 1,
- __BITFIELD_FIELD(u32 dmaen : 1,
- __BITFIELD_FIELD(u32 hbstlen : 4,
- __BITFIELD_FIELD(u32 glblintrmsk : 1,
- ;)))))))
- } s;
-};
-
-/**
- * cvmx_usbc#_ghwcfg3
- *
- * User HW Config3 Register (GHWCFG3)
- *
- * This register contains the configuration options of the O2P USB core.
- */
-union cvmx_usbcx_ghwcfg3 {
- u32 u32;
- /**
- * struct cvmx_usbcx_ghwcfg3_s
- * @dfifodepth: DFIFO Depth (DfifoDepth)
- * This value is in terms of 32-bit words.
- * * Minimum value is 32
- * * Maximum value is 32768
- * @ahbphysync: AHB and PHY Synchronous (AhbPhySync)
- * Indicates whether AHB and PHY clocks are synchronous to
- * each other.
- * * 1'b0: No
- * * 1'b1: Yes
- * This bit is tied to 1.
- * @rsttype: Reset Style for Clocked always Blocks in RTL (RstType)
- * * 1'b0: Asynchronous reset is used in the core
- * * 1'b1: Synchronous reset is used in the core
- * @optfeature: Optional Features Removed (OptFeature)
- * Indicates whether the User ID register, GPIO interface ports,
- * and SOF toggle and counter ports were removed for gate count
- * optimization.
- * @vendor_control_interface_support: Vendor Control Interface Support
- * * 1'b0: Vendor Control Interface is not available on the core.
- * * 1'b1: Vendor Control Interface is available.
- * @i2c_selection: I2C Selection
- * * 1'b0: I2C Interface is not available on the core.
- * * 1'b1: I2C Interface is available on the core.
- * @otgen: OTG Function Enabled (OtgEn)
- * The application uses this bit to indicate the O2P USB core's
- * OTG capabilities.
- * * 1'b0: Not OTG capable
- * * 1'b1: OTG Capable
- * @pktsizewidth: Width of Packet Size Counters (PktSizeWidth)
- * * 3'b000: 4 bits
- * * 3'b001: 5 bits
- * * 3'b010: 6 bits
- * * 3'b011: 7 bits
- * * 3'b100: 8 bits
- * * 3'b101: 9 bits
- * * 3'b110: 10 bits
- * * Others: Reserved
- * @xfersizewidth: Width of Transfer Size Counters (XferSizeWidth)
- * * 4'b0000: 11 bits
- * * 4'b0001: 12 bits
- * - ...
- * * 4'b1000: 19 bits
- * * Others: Reserved
- */
- struct cvmx_usbcx_ghwcfg3_s {
- __BITFIELD_FIELD(u32 dfifodepth : 16,
- __BITFIELD_FIELD(u32 reserved_13_15 : 3,
- __BITFIELD_FIELD(u32 ahbphysync : 1,
- __BITFIELD_FIELD(u32 rsttype : 1,
- __BITFIELD_FIELD(u32 optfeature : 1,
- __BITFIELD_FIELD(u32 vendor_control_interface_support : 1,
- __BITFIELD_FIELD(u32 i2c_selection : 1,
- __BITFIELD_FIELD(u32 otgen : 1,
- __BITFIELD_FIELD(u32 pktsizewidth : 3,
- __BITFIELD_FIELD(u32 xfersizewidth : 4,
- ;))))))))))
- } s;
-};
-
-/**
- * cvmx_usbc#_gintmsk
- *
- * Core Interrupt Mask Register (GINTMSK)
- *
- * This register works with the Core Interrupt register to interrupt the
- * application. When an interrupt bit is masked, the interrupt associated with
- * that bit will not be generated. However, the Core Interrupt (GINTSTS)
- * register bit corresponding to that interrupt will still be set.
- * Mask interrupt: 1'b0, Unmask interrupt: 1'b1
- */
-union cvmx_usbcx_gintmsk {
- u32 u32;
- /**
- * struct cvmx_usbcx_gintmsk_s
- * @wkupintmsk: Resume/Remote Wakeup Detected Interrupt Mask
- * (WkUpIntMsk)
- * @sessreqintmsk: Session Request/New Session Detected Interrupt Mask
- * (SessReqIntMsk)
- * @disconnintmsk: Disconnect Detected Interrupt Mask (DisconnIntMsk)
- * @conidstschngmsk: Connector ID Status Change Mask (ConIDStsChngMsk)
- * @ptxfempmsk: Periodic TxFIFO Empty Mask (PTxFEmpMsk)
- * @hchintmsk: Host Channels Interrupt Mask (HChIntMsk)
- * @prtintmsk: Host Port Interrupt Mask (PrtIntMsk)
- * @fetsuspmsk: Data Fetch Suspended Mask (FetSuspMsk)
- * @incomplpmsk: Incomplete Periodic Transfer Mask (incomplPMsk)
- * Incomplete Isochronous OUT Transfer Mask
- * (incompISOOUTMsk)
- * @incompisoinmsk: Incomplete Isochronous IN Transfer Mask
- * (incompISOINMsk)
- * @oepintmsk: OUT Endpoints Interrupt Mask (OEPIntMsk)
- * @inepintmsk: IN Endpoints Interrupt Mask (INEPIntMsk)
- * @epmismsk: Endpoint Mismatch Interrupt Mask (EPMisMsk)
- * @eopfmsk: End of Periodic Frame Interrupt Mask (EOPFMsk)
- * @isooutdropmsk: Isochronous OUT Packet Dropped Interrupt Mask
- * (ISOOutDropMsk)
- * @enumdonemsk: Enumeration Done Mask (EnumDoneMsk)
- * @usbrstmsk: USB Reset Mask (USBRstMsk)
- * @usbsuspmsk: USB Suspend Mask (USBSuspMsk)
- * @erlysuspmsk: Early Suspend Mask (ErlySuspMsk)
- * @i2cint: I2C Interrupt Mask (I2CINT)
- * @ulpickintmsk: ULPI Carkit Interrupt Mask (ULPICKINTMsk)
- * I2C Carkit Interrupt Mask (I2CCKINTMsk)
- * @goutnakeffmsk: Global OUT NAK Effective Mask (GOUTNakEffMsk)
- * @ginnakeffmsk: Global Non-Periodic IN NAK Effective Mask
- * (GINNakEffMsk)
- * @nptxfempmsk: Non-Periodic TxFIFO Empty Mask (NPTxFEmpMsk)
- * @rxflvlmsk: Receive FIFO Non-Empty Mask (RxFLvlMsk)
- * @sofmsk: Start of (micro)Frame Mask (SofMsk)
- * @otgintmsk: OTG Interrupt Mask (OTGIntMsk)
- * @modemismsk: Mode Mismatch Interrupt Mask (ModeMisMsk)
- */
- struct cvmx_usbcx_gintmsk_s {
- __BITFIELD_FIELD(u32 wkupintmsk : 1,
- __BITFIELD_FIELD(u32 sessreqintmsk : 1,
- __BITFIELD_FIELD(u32 disconnintmsk : 1,
- __BITFIELD_FIELD(u32 conidstschngmsk : 1,
- __BITFIELD_FIELD(u32 reserved_27_27 : 1,
- __BITFIELD_FIELD(u32 ptxfempmsk : 1,
- __BITFIELD_FIELD(u32 hchintmsk : 1,
- __BITFIELD_FIELD(u32 prtintmsk : 1,
- __BITFIELD_FIELD(u32 reserved_23_23 : 1,
- __BITFIELD_FIELD(u32 fetsuspmsk : 1,
- __BITFIELD_FIELD(u32 incomplpmsk : 1,
- __BITFIELD_FIELD(u32 incompisoinmsk : 1,
- __BITFIELD_FIELD(u32 oepintmsk : 1,
- __BITFIELD_FIELD(u32 inepintmsk : 1,
- __BITFIELD_FIELD(u32 epmismsk : 1,
- __BITFIELD_FIELD(u32 reserved_16_16 : 1,
- __BITFIELD_FIELD(u32 eopfmsk : 1,
- __BITFIELD_FIELD(u32 isooutdropmsk : 1,
- __BITFIELD_FIELD(u32 enumdonemsk : 1,
- __BITFIELD_FIELD(u32 usbrstmsk : 1,
- __BITFIELD_FIELD(u32 usbsuspmsk : 1,
- __BITFIELD_FIELD(u32 erlysuspmsk : 1,
- __BITFIELD_FIELD(u32 i2cint : 1,
- __BITFIELD_FIELD(u32 ulpickintmsk : 1,
- __BITFIELD_FIELD(u32 goutnakeffmsk : 1,
- __BITFIELD_FIELD(u32 ginnakeffmsk : 1,
- __BITFIELD_FIELD(u32 nptxfempmsk : 1,
- __BITFIELD_FIELD(u32 rxflvlmsk : 1,
- __BITFIELD_FIELD(u32 sofmsk : 1,
- __BITFIELD_FIELD(u32 otgintmsk : 1,
- __BITFIELD_FIELD(u32 modemismsk : 1,
- __BITFIELD_FIELD(u32 reserved_0_0 : 1,
- ;))))))))))))))))))))))))))))))))
- } s;
-};
-
-/**
- * cvmx_usbc#_gintsts
- *
- * Core Interrupt Register (GINTSTS)
- *
- * This register interrupts the application for system-level events in the
- * current mode of operation (Device mode or Host mode). It is shown in
- * Interrupt. Some of the bits in this register are valid only in Host mode,
- * while others are valid in Device mode only. This register also indicates the
- * current mode of operation. In order to clear the interrupt status bits of
- * type R_SS_WC, the application must write 1'b1 into the bit. The FIFO status
- * interrupts are read only; once software reads from or writes to the FIFO
- * while servicing these interrupts, FIFO interrupt conditions are cleared
- * automatically.
- */
-union cvmx_usbcx_gintsts {
- u32 u32;
- /**
- * struct cvmx_usbcx_gintsts_s
- * @wkupint: Resume/Remote Wakeup Detected Interrupt (WkUpInt)
- * In Device mode, this interrupt is asserted when a resume is
- * detected on the USB. In Host mode, this interrupt is asserted
- * when a remote wakeup is detected on the USB.
- * For more information on how to use this interrupt, see "Partial
- * Power-Down and Clock Gating Programming Model" on
- * page 353.
- * @sessreqint: Session Request/New Session Detected Interrupt
- * (SessReqInt)
- * In Host mode, this interrupt is asserted when a session request
- * is detected from the device. In Device mode, this interrupt is
- * asserted when the utmiotg_bvalid signal goes high.
- * For more information on how to use this interrupt, see "Partial
- * Power-Down and Clock Gating Programming Model" on
- * page 353.
- * @disconnint: Disconnect Detected Interrupt (DisconnInt)
- * Asserted when a device disconnect is detected.
- * @conidstschng: Connector ID Status Change (ConIDStsChng)
- * The core sets this bit when there is a change in connector ID
- * status.
- * @ptxfemp: Periodic TxFIFO Empty (PTxFEmp)
- * Asserted when the Periodic Transmit FIFO is either half or
- * completely empty and there is space for at least one entry to be
- * written in the Periodic Request Queue. The half or completely
- * empty status is determined by the Periodic TxFIFO Empty Level
- * bit in the Core AHB Configuration register
- * (GAHBCFG.PTxFEmpLvl).
- * @hchint: Host Channels Interrupt (HChInt)
- * The core sets this bit to indicate that an interrupt is pending
- * on one of the channels of the core (in Host mode). The
- * application must read the Host All Channels Interrupt (HAINT)
- * register to determine the exact number of the channel on which
- * the interrupt occurred, and then read the corresponding Host
- * Channel-n Interrupt (HCINTn) register to determine the exact
- * cause of the interrupt. The application must clear the
- * appropriate status bit in the HCINTn register to clear this bit.
- * @prtint: Host Port Interrupt (PrtInt)
- * The core sets this bit to indicate a change in port status of
- * one of the O2P USB core ports in Host mode. The application must
- * read the Host Port Control and Status (HPRT) register to
- * determine the exact event that caused this interrupt. The
- * application must clear the appropriate status bit in the Host
- * Port Control and Status register to clear this bit.
- * @fetsusp: Data Fetch Suspended (FetSusp)
- * This interrupt is valid only in DMA mode. This interrupt
- * indicates that the core has stopped fetching data for IN
- * endpoints due to the unavailability of TxFIFO space or Request
- * Queue space. This interrupt is used by the application for an
- * endpoint mismatch algorithm.
- * @incomplp: Incomplete Periodic Transfer (incomplP)
- * In Host mode, the core sets this interrupt bit when there are
- * incomplete periodic transactions still pending which are
- * scheduled for the current microframe.
- * Incomplete Isochronous OUT Transfer (incompISOOUT)
- * The Device mode, the core sets this interrupt to indicate that
- * there is at least one isochronous OUT endpoint on which the
- * transfer is not completed in the current microframe. This
- * interrupt is asserted along with the End of Periodic Frame
- * Interrupt (EOPF) bit in this register.
- * @incompisoin: Incomplete Isochronous IN Transfer (incompISOIN)
- * The core sets this interrupt to indicate that there is at least
- * one isochronous IN endpoint on which the transfer is not
- * completed in the current microframe. This interrupt is asserted
- * along with the End of Periodic Frame Interrupt (EOPF) bit in
- * this register.
- * @oepint: OUT Endpoints Interrupt (OEPInt)
- * The core sets this bit to indicate that an interrupt is pending
- * on one of the OUT endpoints of the core (in Device mode). The
- * application must read the Device All Endpoints Interrupt
- * (DAINT) register to determine the exact number of the OUT
- * endpoint on which the interrupt occurred, and then read the
- * corresponding Device OUT Endpoint-n Interrupt (DOEPINTn)
- * register to determine the exact cause of the interrupt. The
- * application must clear the appropriate status bit in the
- * corresponding DOEPINTn register to clear this bit.
- * @iepint: IN Endpoints Interrupt (IEPInt)
- * The core sets this bit to indicate that an interrupt is pending
- * on one of the IN endpoints of the core (in Device mode). The
- * application must read the Device All Endpoints Interrupt
- * (DAINT) register to determine the exact number of the IN
- * endpoint on which the interrupt occurred, and then read the
- * corresponding Device IN Endpoint-n Interrupt (DIEPINTn)
- * register to determine the exact cause of the interrupt. The
- * application must clear the appropriate status bit in the
- * corresponding DIEPINTn register to clear this bit.
- * @epmis: Endpoint Mismatch Interrupt (EPMis)
- * Indicates that an IN token has been received for a non-periodic
- * endpoint, but the data for another endpoint is present in the
- * top of the Non-Periodic Transmit FIFO and the IN endpoint
- * mismatch count programmed by the application has expired.
- * @eopf: End of Periodic Frame Interrupt (EOPF)
- * Indicates that the period specified in the Periodic Frame
- * Interval field of the Device Configuration register
- * (DCFG.PerFrInt) has been reached in the current microframe.
- * @isooutdrop: Isochronous OUT Packet Dropped Interrupt (ISOOutDrop)
- * The core sets this bit when it fails to write an isochronous OUT
- * packet into the RxFIFO because the RxFIFO doesn't have
- * enough space to accommodate a maximum packet size packet
- * for the isochronous OUT endpoint.
- * @enumdone: Enumeration Done (EnumDone)
- * The core sets this bit to indicate that speed enumeration is
- * complete. The application must read the Device Status (DSTS)
- * register to obtain the enumerated speed.
- * @usbrst: USB Reset (USBRst)
- * The core sets this bit to indicate that a reset is detected on
- * the USB.
- * @usbsusp: USB Suspend (USBSusp)
- * The core sets this bit to indicate that a suspend was detected
- * on the USB. The core enters the Suspended state when there
- * is no activity on the phy_line_state_i signal for an extended
- * period of time.
- * @erlysusp: Early Suspend (ErlySusp)
- * The core sets this bit to indicate that an Idle state has been
- * detected on the USB for 3 ms.
- * @i2cint: I2C Interrupt (I2CINT)
- * This bit is always 0x0.
- * @ulpickint: ULPI Carkit Interrupt (ULPICKINT)
- * This bit is always 0x0.
- * @goutnakeff: Global OUT NAK Effective (GOUTNakEff)
- * Indicates that the Set Global OUT NAK bit in the Device Control
- * register (DCTL.SGOUTNak), set by the application, has taken
- * effect in the core. This bit can be cleared by writing the Clear
- * Global OUT NAK bit in the Device Control register
- * (DCTL.CGOUTNak).
- * @ginnakeff: Global IN Non-Periodic NAK Effective (GINNakEff)
- * Indicates that the Set Global Non-Periodic IN NAK bit in the
- * Device Control register (DCTL.SGNPInNak), set by the
- * application, has taken effect in the core. That is, the core has
- * sampled the Global IN NAK bit set by the application. This bit
- * can be cleared by clearing the Clear Global Non-Periodic IN
- * NAK bit in the Device Control register (DCTL.CGNPInNak).
- * This interrupt does not necessarily mean that a NAK handshake
- * is sent out on the USB. The STALL bit takes precedence over
- * the NAK bit.
- * @nptxfemp: Non-Periodic TxFIFO Empty (NPTxFEmp)
- * This interrupt is asserted when the Non-Periodic TxFIFO is
- * either half or completely empty, and there is space for at least
- * one entry to be written to the Non-Periodic Transmit Request
- * Queue. The half or completely empty status is determined by
- * the Non-Periodic TxFIFO Empty Level bit in the Core AHB
- * Configuration register (GAHBCFG.NPTxFEmpLvl).
- * @rxflvl: RxFIFO Non-Empty (RxFLvl)
- * Indicates that there is at least one packet pending to be read
- * from the RxFIFO.
- * @sof: Start of (micro)Frame (Sof)
- * In Host mode, the core sets this bit to indicate that an SOF
- * (FS), micro-SOF (HS), or Keep-Alive (LS) is transmitted on the
- * USB. The application must write a 1 to this bit to clear the
- * interrupt.
- * In Device mode, in the core sets this bit to indicate that an
- * SOF token has been received on the USB. The application can read
- * the Device Status register to get the current (micro)frame
- * number. This interrupt is seen only when the core is operating
- * at either HS or FS.
- * @otgint: OTG Interrupt (OTGInt)
- * The core sets this bit to indicate an OTG protocol event. The
- * application must read the OTG Interrupt Status (GOTGINT)
- * register to determine the exact event that caused this
- * interrupt. The application must clear the appropriate status bit
- * in the GOTGINT register to clear this bit.
- * @modemis: Mode Mismatch Interrupt (ModeMis)
- * The core sets this bit when the application is trying to access:
- * * A Host mode register, when the core is operating in Device
- * mode
- * * A Device mode register, when the core is operating in Host
- * mode
- * The register access is completed on the AHB with an OKAY
- * response, but is ignored by the core internally and doesn't
- * affect the operation of the core.
- * @curmod: Current Mode of Operation (CurMod)
- * Indicates the current mode of operation.
- * * 1'b0: Device mode
- * * 1'b1: Host mode
- */
- struct cvmx_usbcx_gintsts_s {
- __BITFIELD_FIELD(u32 wkupint : 1,
- __BITFIELD_FIELD(u32 sessreqint : 1,
- __BITFIELD_FIELD(u32 disconnint : 1,
- __BITFIELD_FIELD(u32 conidstschng : 1,
- __BITFIELD_FIELD(u32 reserved_27_27 : 1,
- __BITFIELD_FIELD(u32 ptxfemp : 1,
- __BITFIELD_FIELD(u32 hchint : 1,
- __BITFIELD_FIELD(u32 prtint : 1,
- __BITFIELD_FIELD(u32 reserved_23_23 : 1,
- __BITFIELD_FIELD(u32 fetsusp : 1,
- __BITFIELD_FIELD(u32 incomplp : 1,
- __BITFIELD_FIELD(u32 incompisoin : 1,
- __BITFIELD_FIELD(u32 oepint : 1,
- __BITFIELD_FIELD(u32 iepint : 1,
- __BITFIELD_FIELD(u32 epmis : 1,
- __BITFIELD_FIELD(u32 reserved_16_16 : 1,
- __BITFIELD_FIELD(u32 eopf : 1,
- __BITFIELD_FIELD(u32 isooutdrop : 1,
- __BITFIELD_FIELD(u32 enumdone : 1,
- __BITFIELD_FIELD(u32 usbrst : 1,
- __BITFIELD_FIELD(u32 usbsusp : 1,
- __BITFIELD_FIELD(u32 erlysusp : 1,
- __BITFIELD_FIELD(u32 i2cint : 1,
- __BITFIELD_FIELD(u32 ulpickint : 1,
- __BITFIELD_FIELD(u32 goutnakeff : 1,
- __BITFIELD_FIELD(u32 ginnakeff : 1,
- __BITFIELD_FIELD(u32 nptxfemp : 1,
- __BITFIELD_FIELD(u32 rxflvl : 1,
- __BITFIELD_FIELD(u32 sof : 1,
- __BITFIELD_FIELD(u32 otgint : 1,
- __BITFIELD_FIELD(u32 modemis : 1,
- __BITFIELD_FIELD(u32 curmod : 1,
- ;))))))))))))))))))))))))))))))))
- } s;
-};
-
-/**
- * cvmx_usbc#_gnptxfsiz
- *
- * Non-Periodic Transmit FIFO Size Register (GNPTXFSIZ)
- *
- * The application can program the RAM size and the memory start address for the
- * Non-Periodic TxFIFO.
- */
-union cvmx_usbcx_gnptxfsiz {
- u32 u32;
- /**
- * struct cvmx_usbcx_gnptxfsiz_s
- * @nptxfdep: Non-Periodic TxFIFO Depth (NPTxFDep)
- * This value is in terms of 32-bit words.
- * Minimum value is 16
- * Maximum value is 32768
- * @nptxfstaddr: Non-Periodic Transmit RAM Start Address (NPTxFStAddr)
- * This field contains the memory start address for Non-Periodic
- * Transmit FIFO RAM.
- */
- struct cvmx_usbcx_gnptxfsiz_s {
- __BITFIELD_FIELD(u32 nptxfdep : 16,
- __BITFIELD_FIELD(u32 nptxfstaddr : 16,
- ;))
- } s;
-};
-
-/**
- * cvmx_usbc#_gnptxsts
- *
- * Non-Periodic Transmit FIFO/Queue Status Register (GNPTXSTS)
- *
- * This read-only register contains the free space information for the
- * Non-Periodic TxFIFO and the Non-Periodic Transmit Request Queue.
- */
-union cvmx_usbcx_gnptxsts {
- u32 u32;
- /**
- * struct cvmx_usbcx_gnptxsts_s
- * @nptxqtop: Top of the Non-Periodic Transmit Request Queue (NPTxQTop)
- * Entry in the Non-Periodic Tx Request Queue that is currently
- * being processed by the MAC.
- * * Bits [30:27]: Channel/endpoint number
- * * Bits [26:25]:
- * - 2'b00: IN/OUT token
- * - 2'b01: Zero-length transmit packet (device IN/host OUT)
- * - 2'b10: PING/CSPLIT token
- * - 2'b11: Channel halt command
- * * Bit [24]: Terminate (last entry for selected channel/endpoint)
- * @nptxqspcavail: Non-Periodic Transmit Request Queue Space Available
- * (NPTxQSpcAvail)
- * Indicates the amount of free space available in the Non-
- * Periodic Transmit Request Queue. This queue holds both IN
- * and OUT requests in Host mode. Device mode has only IN
- * requests.
- * * 8'h0: Non-Periodic Transmit Request Queue is full
- * * 8'h1: 1 location available
- * * 8'h2: 2 locations available
- * * n: n locations available (0..8)
- * * Others: Reserved
- * @nptxfspcavail: Non-Periodic TxFIFO Space Avail (NPTxFSpcAvail)
- * Indicates the amount of free space available in the Non-
- * Periodic TxFIFO.
- * Values are in terms of 32-bit words.
- * * 16'h0: Non-Periodic TxFIFO is full
- * * 16'h1: 1 word available
- * * 16'h2: 2 words available
- * * 16'hn: n words available (where 0..32768)
- * * 16'h8000: 32768 words available
- * * Others: Reserved
- */
- struct cvmx_usbcx_gnptxsts_s {
- __BITFIELD_FIELD(u32 reserved_31_31 : 1,
- __BITFIELD_FIELD(u32 nptxqtop : 7,
- __BITFIELD_FIELD(u32 nptxqspcavail : 8,
- __BITFIELD_FIELD(u32 nptxfspcavail : 16,
- ;))))
- } s;
-};
-
-/**
- * cvmx_usbc#_grstctl
- *
- * Core Reset Register (GRSTCTL)
- *
- * The application uses this register to reset various hardware features inside
- * the core.
- */
-union cvmx_usbcx_grstctl {
- u32 u32;
- /**
- * struct cvmx_usbcx_grstctl_s
- * @ahbidle: AHB Master Idle (AHBIdle)
- * Indicates that the AHB Master State Machine is in the IDLE
- * condition.
- * @dmareq: DMA Request Signal (DMAReq)
- * Indicates that the DMA request is in progress. Used for debug.
- * @txfnum: TxFIFO Number (TxFNum)
- * This is the FIFO number that must be flushed using the TxFIFO
- * Flush bit. This field must not be changed until the core clears
- * the TxFIFO Flush bit.
- * * 5'h0: Non-Periodic TxFIFO flush
- * * 5'h1: Periodic TxFIFO 1 flush in Device mode or Periodic
- * TxFIFO flush in Host mode
- * * 5'h2: Periodic TxFIFO 2 flush in Device mode
- * - ...
- * * 5'hF: Periodic TxFIFO 15 flush in Device mode
- * * 5'h10: Flush all the Periodic and Non-Periodic TxFIFOs in the
- * core
- * @txfflsh: TxFIFO Flush (TxFFlsh)
- * This bit selectively flushes a single or all transmit FIFOs, but
- * cannot do so if the core is in the midst of a transaction.
- * The application must only write this bit after checking that the
- * core is neither writing to the TxFIFO nor reading from the
- * TxFIFO.
- * The application must wait until the core clears this bit before
- * performing any operations. This bit takes 8 clocks (of phy_clk
- * or hclk, whichever is slower) to clear.
- * @rxfflsh: RxFIFO Flush (RxFFlsh)
- * The application can flush the entire RxFIFO using this bit, but
- * must first ensure that the core is not in the middle of a
- * transaction.
- * The application must only write to this bit after checking that
- * the core is neither reading from the RxFIFO nor writing to the
- * RxFIFO.
- * The application must wait until the bit is cleared before
- * performing any other operations. This bit will take 8 clocks
- * (slowest of PHY or AHB clock) to clear.
- * @intknqflsh: IN Token Sequence Learning Queue Flush (INTknQFlsh)
- * The application writes this bit to flush the IN Token Sequence
- * Learning Queue.
- * @frmcntrrst: Host Frame Counter Reset (FrmCntrRst)
- * The application writes this bit to reset the (micro)frame number
- * counter inside the core. When the (micro)frame counter is reset,
- * the subsequent SOF sent out by the core will have a
- * (micro)frame number of 0.
- * @hsftrst: HClk Soft Reset (HSftRst)
- * The application uses this bit to flush the control logic in the
- * AHB Clock domain. Only AHB Clock Domain pipelines are reset.
- * * FIFOs are not flushed with this bit.
- * * All state machines in the AHB clock domain are reset to the
- * Idle state after terminating the transactions on the AHB,
- * following the protocol.
- * * CSR control bits used by the AHB clock domain state
- * machines are cleared.
- * * To clear this interrupt, status mask bits that control the
- * interrupt status and are generated by the AHB clock domain
- * state machine are cleared.
- * * Because interrupt status bits are not cleared, the application
- * can get the status of any core events that occurred after it set
- * this bit.
- * This is a self-clearing bit that the core clears after all
- * necessary logic is reset in the core. This may take several
- * clocks, depending on the core's current state.
- * @csftrst: Core Soft Reset (CSftRst)
- * Resets the hclk and phy_clock domains as follows:
- * * Clears the interrupts and all the CSR registers except the
- * following register bits:
- * - PCGCCTL.RstPdwnModule
- * - PCGCCTL.GateHclk
- * - PCGCCTL.PwrClmp
- * - PCGCCTL.StopPPhyLPwrClkSelclk
- * - GUSBCFG.PhyLPwrClkSel
- * - GUSBCFG.DDRSel
- * - GUSBCFG.PHYSel
- * - GUSBCFG.FSIntf
- * - GUSBCFG.ULPI_UTMI_Sel
- * - GUSBCFG.PHYIf
- * - HCFG.FSLSPclkSel
- * - DCFG.DevSpd
- * * All module state machines (except the AHB Slave Unit) are
- * reset to the IDLE state, and all the transmit FIFOs and the
- * receive FIFO are flushed.
- * * Any transactions on the AHB Master are terminated as soon
- * as possible, after gracefully completing the last data phase of
- * an AHB transfer. Any transactions on the USB are terminated
- * immediately.
- * The application can write to this bit any time it wants to reset
- * the core. This is a self-clearing bit and the core clears this
- * bit after all the necessary logic is reset in the core, which
- * may take several clocks, depending on the current state of the
- * core. Once this bit is cleared software should wait at least 3
- * PHY clocks before doing any access to the PHY domain
- * (synchronization delay). Software should also should check that
- * bit 31 of this register is 1 (AHB Master is IDLE) before
- * starting any operation.
- * Typically software reset is used during software development
- * and also when you dynamically change the PHY selection bits
- * in the USB configuration registers listed above. When you
- * change the PHY, the corresponding clock for the PHY is
- * selected and used in the PHY domain. Once a new clock is
- * selected, the PHY domain has to be reset for proper operation.
- */
- struct cvmx_usbcx_grstctl_s {
- __BITFIELD_FIELD(u32 ahbidle : 1,
- __BITFIELD_FIELD(u32 dmareq : 1,
- __BITFIELD_FIELD(u32 reserved_11_29 : 19,
- __BITFIELD_FIELD(u32 txfnum : 5,
- __BITFIELD_FIELD(u32 txfflsh : 1,
- __BITFIELD_FIELD(u32 rxfflsh : 1,
- __BITFIELD_FIELD(u32 intknqflsh : 1,
- __BITFIELD_FIELD(u32 frmcntrrst : 1,
- __BITFIELD_FIELD(u32 hsftrst : 1,
- __BITFIELD_FIELD(u32 csftrst : 1,
- ;))))))))))
- } s;
-};
-
-/**
- * cvmx_usbc#_grxfsiz
- *
- * Receive FIFO Size Register (GRXFSIZ)
- *
- * The application can program the RAM size that must be allocated to the
- * RxFIFO.
- */
-union cvmx_usbcx_grxfsiz {
- u32 u32;
- /**
- * struct cvmx_usbcx_grxfsiz_s
- * @rxfdep: RxFIFO Depth (RxFDep)
- * This value is in terms of 32-bit words.
- * * Minimum value is 16
- * * Maximum value is 32768
- */
- struct cvmx_usbcx_grxfsiz_s {
- __BITFIELD_FIELD(u32 reserved_16_31 : 16,
- __BITFIELD_FIELD(u32 rxfdep : 16,
- ;))
- } s;
-};
-
-/**
- * cvmx_usbc#_grxstsph
- *
- * Receive Status Read and Pop Register, Host Mode (GRXSTSPH)
- *
- * A read to the Receive Status Read and Pop register returns and additionally
- * pops the top data entry out of the RxFIFO.
- * This Description is only valid when the core is in Host Mode. For Device Mode
- * use USBC_GRXSTSPD instead.
- * NOTE: GRXSTSPH and GRXSTSPD are physically the same register and share the
- * same offset in the O2P USB core. The offset difference shown in this
- * document is for software clarity and is actually ignored by the
- * hardware.
- */
-union cvmx_usbcx_grxstsph {
- u32 u32;
- /**
- * struct cvmx_usbcx_grxstsph_s
- * @pktsts: Packet Status (PktSts)
- * Indicates the status of the received packet
- * * 4'b0010: IN data packet received
- * * 4'b0011: IN transfer completed (triggers an interrupt)
- * * 4'b0101: Data toggle error (triggers an interrupt)
- * * 4'b0111: Channel halted (triggers an interrupt)
- * * Others: Reserved
- * @dpid: Data PID (DPID)
- * * 2'b00: DATA0
- * * 2'b10: DATA1
- * * 2'b01: DATA2
- * * 2'b11: MDATA
- * @bcnt: Byte Count (BCnt)
- * Indicates the byte count of the received IN data packet
- * @chnum: Channel Number (ChNum)
- * Indicates the channel number to which the current received
- * packet belongs.
- */
- struct cvmx_usbcx_grxstsph_s {
- __BITFIELD_FIELD(u32 reserved_21_31 : 11,
- __BITFIELD_FIELD(u32 pktsts : 4,
- __BITFIELD_FIELD(u32 dpid : 2,
- __BITFIELD_FIELD(u32 bcnt : 11,
- __BITFIELD_FIELD(u32 chnum : 4,
- ;)))))
- } s;
-};
-
-/**
- * cvmx_usbc#_gusbcfg
- *
- * Core USB Configuration Register (GUSBCFG)
- *
- * This register can be used to configure the core after power-on or a changing
- * to Host mode or Device mode. It contains USB and USB-PHY related
- * configuration parameters. The application must program this register before
- * starting any transactions on either the AHB or the USB. Do not make changes
- * to this register after the initial programming.
- */
-union cvmx_usbcx_gusbcfg {
- u32 u32;
- /**
- * struct cvmx_usbcx_gusbcfg_s
- * @otgi2csel: UTMIFS or I2C Interface Select (OtgI2CSel)
- * This bit is always 0x0.
- * @phylpwrclksel: PHY Low-Power Clock Select (PhyLPwrClkSel)
- * Software should set this bit to 0x0.
- * Selects either 480-MHz or 48-MHz (low-power) PHY mode. In
- * FS and LS modes, the PHY can usually operate on a 48-MHz
- * clock to save power.
- * * 1'b0: 480-MHz Internal PLL clock
- * * 1'b1: 48-MHz External Clock
- * In 480 MHz mode, the UTMI interface operates at either 60 or
- * 30-MHz, depending upon whether 8- or 16-bit data width is
- * selected. In 48-MHz mode, the UTMI interface operates at 48
- * MHz in FS mode and at either 48 or 6 MHz in LS mode
- * (depending on the PHY vendor).
- * This bit drives the utmi_fsls_low_power core output signal, and
- * is valid only for UTMI+ PHYs.
- * @usbtrdtim: USB Turnaround Time (USBTrdTim)
- * Sets the turnaround time in PHY clocks.
- * Specifies the response time for a MAC request to the Packet
- * FIFO Controller (PFC) to fetch data from the DFIFO (SPRAM).
- * This must be programmed to 0x5.
- * @hnpcap: HNP-Capable (HNPCap)
- * This bit is always 0x0.
- * @srpcap: SRP-Capable (SRPCap)
- * This bit is always 0x0.
- * @ddrsel: ULPI DDR Select (DDRSel)
- * Software should set this bit to 0x0.
- * @physel: USB 2.0 High-Speed PHY or USB 1.1 Full-Speed Serial
- * Software should set this bit to 0x0.
- * @fsintf: Full-Speed Serial Interface Select (FSIntf)
- * Software should set this bit to 0x0.
- * @ulpi_utmi_sel: ULPI or UTMI+ Select (ULPI_UTMI_Sel)
- * This bit is always 0x0.
- * @phyif: PHY Interface (PHYIf)
- * This bit is always 0x1.
- * @toutcal: HS/FS Timeout Calibration (TOutCal)
- * The number of PHY clocks that the application programs in this
- * field is added to the high-speed/full-speed interpacket timeout
- * duration in the core to account for any additional delays
- * introduced by the PHY. This may be required, since the delay
- * introduced by the PHY in generating the linestate condition may
- * vary from one PHY to another.
- * The USB standard timeout value for high-speed operation is
- * 736 to 816 (inclusive) bit times. The USB standard timeout
- * value for full-speed operation is 16 to 18 (inclusive) bit
- * times. The application must program this field based on the
- * speed of enumeration. The number of bit times added per PHY
- * clock are:
- * High-speed operation:
- * * One 30-MHz PHY clock = 16 bit times
- * * One 60-MHz PHY clock = 8 bit times
- * Full-speed operation:
- * * One 30-MHz PHY clock = 0.4 bit times
- * * One 60-MHz PHY clock = 0.2 bit times
- * * One 48-MHz PHY clock = 0.25 bit times
- */
- struct cvmx_usbcx_gusbcfg_s {
- __BITFIELD_FIELD(u32 reserved_17_31 : 15,
- __BITFIELD_FIELD(u32 otgi2csel : 1,
- __BITFIELD_FIELD(u32 phylpwrclksel : 1,
- __BITFIELD_FIELD(u32 reserved_14_14 : 1,
- __BITFIELD_FIELD(u32 usbtrdtim : 4,
- __BITFIELD_FIELD(u32 hnpcap : 1,
- __BITFIELD_FIELD(u32 srpcap : 1,
- __BITFIELD_FIELD(u32 ddrsel : 1,
- __BITFIELD_FIELD(u32 physel : 1,
- __BITFIELD_FIELD(u32 fsintf : 1,
- __BITFIELD_FIELD(u32 ulpi_utmi_sel : 1,
- __BITFIELD_FIELD(u32 phyif : 1,
- __BITFIELD_FIELD(u32 toutcal : 3,
- ;)))))))))))))
- } s;
-};
-
-/**
- * cvmx_usbc#_haint
- *
- * Host All Channels Interrupt Register (HAINT)
- *
- * When a significant event occurs on a channel, the Host All Channels Interrupt
- * register interrupts the application using the Host Channels Interrupt bit of
- * the Core Interrupt register (GINTSTS.HChInt). This is shown in Interrupt.
- * There is one interrupt bit per channel, up to a maximum of 16 bits. Bits in
- * this register are set and cleared when the application sets and clears bits
- * in the corresponding Host Channel-n Interrupt register.
- */
-union cvmx_usbcx_haint {
- u32 u32;
- /**
- * struct cvmx_usbcx_haint_s
- * @haint: Channel Interrupts (HAINT)
- * One bit per channel: Bit 0 for Channel 0, bit 15 for Channel 15
- */
- struct cvmx_usbcx_haint_s {
- __BITFIELD_FIELD(u32 reserved_16_31 : 16,
- __BITFIELD_FIELD(u32 haint : 16,
- ;))
- } s;
-};
-
-/**
- * cvmx_usbc#_haintmsk
- *
- * Host All Channels Interrupt Mask Register (HAINTMSK)
- *
- * The Host All Channel Interrupt Mask register works with the Host All Channel
- * Interrupt register to interrupt the application when an event occurs on a
- * channel. There is one interrupt mask bit per channel, up to a maximum of 16
- * bits.
- * Mask interrupt: 1'b0 Unmask interrupt: 1'b1
- */
-union cvmx_usbcx_haintmsk {
- u32 u32;
- /**
- * struct cvmx_usbcx_haintmsk_s
- * @haintmsk: Channel Interrupt Mask (HAINTMsk)
- * One bit per channel: Bit 0 for channel 0, bit 15 for channel 15
- */
- struct cvmx_usbcx_haintmsk_s {
- __BITFIELD_FIELD(u32 reserved_16_31 : 16,
- __BITFIELD_FIELD(u32 haintmsk : 16,
- ;))
- } s;
-};
-
-/**
- * cvmx_usbc#_hcchar#
- *
- * Host Channel-n Characteristics Register (HCCHAR)
- *
- */
-union cvmx_usbcx_hccharx {
- u32 u32;
- /**
- * struct cvmx_usbcx_hccharx_s
- * @chena: Channel Enable (ChEna)
- * This field is set by the application and cleared by the OTG
- * host.
- * * 1'b0: Channel disabled
- * * 1'b1: Channel enabled
- * @chdis: Channel Disable (ChDis)
- * The application sets this bit to stop transmitting/receiving
- * data on a channel, even before the transfer for that channel is
- * complete. The application must wait for the Channel Disabled
- * interrupt before treating the channel as disabled.
- * @oddfrm: Odd Frame (OddFrm)
- * This field is set (reset) by the application to indicate that
- * the OTG host must perform a transfer in an odd (micro)frame.
- * This field is applicable for only periodic (isochronous and
- * interrupt) transactions.
- * * 1'b0: Even (micro)frame
- * * 1'b1: Odd (micro)frame
- * @devaddr: Device Address (DevAddr)
- * This field selects the specific device serving as the data
- * source or sink.
- * @ec: Multi Count (MC) / Error Count (EC)
- * When the Split Enable bit of the Host Channel-n Split Control
- * register (HCSPLTn.SpltEna) is reset (1'b0), this field indicates
- * to the host the number of transactions that should be executed
- * per microframe for this endpoint.
- * * 2'b00: Reserved. This field yields undefined results.
- * * 2'b01: 1 transaction
- * * 2'b10: 2 transactions to be issued for this endpoint per
- * microframe
- * * 2'b11: 3 transactions to be issued for this endpoint per
- * microframe
- * When HCSPLTn.SpltEna is set (1'b1), this field indicates the
- * number of immediate retries to be performed for a periodic split
- * transactions on transaction errors. This field must be set to at
- * least 2'b01.
- * @eptype: Endpoint Type (EPType)
- * Indicates the transfer type selected.
- * * 2'b00: Control
- * * 2'b01: Isochronous
- * * 2'b10: Bulk
- * * 2'b11: Interrupt
- * @lspddev: Low-Speed Device (LSpdDev)
- * This field is set by the application to indicate that this
- * channel is communicating to a low-speed device.
- * @epdir: Endpoint Direction (EPDir)
- * Indicates whether the transaction is IN or OUT.
- * * 1'b0: OUT
- * * 1'b1: IN
- * @epnum: Endpoint Number (EPNum)
- * Indicates the endpoint number on the device serving as the
- * data source or sink.
- * @mps: Maximum Packet Size (MPS)
- * Indicates the maximum packet size of the associated endpoint.
- */
- struct cvmx_usbcx_hccharx_s {
- __BITFIELD_FIELD(u32 chena : 1,
- __BITFIELD_FIELD(u32 chdis : 1,
- __BITFIELD_FIELD(u32 oddfrm : 1,
- __BITFIELD_FIELD(u32 devaddr : 7,
- __BITFIELD_FIELD(u32 ec : 2,
- __BITFIELD_FIELD(u32 eptype : 2,
- __BITFIELD_FIELD(u32 lspddev : 1,
- __BITFIELD_FIELD(u32 reserved_16_16 : 1,
- __BITFIELD_FIELD(u32 epdir : 1,
- __BITFIELD_FIELD(u32 epnum : 4,
- __BITFIELD_FIELD(u32 mps : 11,
- ;)))))))))))
- } s;
-};
-
-/**
- * cvmx_usbc#_hcfg
- *
- * Host Configuration Register (HCFG)
- *
- * This register configures the core after power-on. Do not make changes to this
- * register after initializing the host.
- */
-union cvmx_usbcx_hcfg {
- u32 u32;
- /**
- * struct cvmx_usbcx_hcfg_s
- * @fslssupp: FS- and LS-Only Support (FSLSSupp)
- * The application uses this bit to control the core's enumeration
- * speed. Using this bit, the application can make the core
- * enumerate as a FS host, even if the connected device supports
- * HS traffic. Do not make changes to this field after initial
- * programming.
- * * 1'b0: HS/FS/LS, based on the maximum speed supported by
- * the connected device
- * * 1'b1: FS/LS-only, even if the connected device can support HS
- * @fslspclksel: FS/LS PHY Clock Select (FSLSPclkSel)
- * When the core is in FS Host mode
- * * 2'b00: PHY clock is running at 30/60 MHz
- * * 2'b01: PHY clock is running at 48 MHz
- * * Others: Reserved
- * When the core is in LS Host mode
- * * 2'b00: PHY clock is running at 30/60 MHz. When the
- * UTMI+/ULPI PHY Low Power mode is not selected, use
- * 30/60 MHz.
- * * 2'b01: PHY clock is running at 48 MHz. When the UTMI+
- * PHY Low Power mode is selected, use 48MHz if the PHY
- * supplies a 48 MHz clock during LS mode.
- * * 2'b10: PHY clock is running at 6 MHz. In USB 1.1 FS mode,
- * use 6 MHz when the UTMI+ PHY Low Power mode is
- * selected and the PHY supplies a 6 MHz clock during LS
- * mode. If you select a 6 MHz clock during LS mode, you must
- * do a soft reset.
- * * 2'b11: Reserved
- */
- struct cvmx_usbcx_hcfg_s {
- __BITFIELD_FIELD(u32 reserved_3_31 : 29,
- __BITFIELD_FIELD(u32 fslssupp : 1,
- __BITFIELD_FIELD(u32 fslspclksel : 2,
- ;)))
- } s;
-};
-
-/**
- * cvmx_usbc#_hcint#
- *
- * Host Channel-n Interrupt Register (HCINT)
- *
- * This register indicates the status of a channel with respect to USB- and
- * AHB-related events. The application must read this register when the Host
- * Channels Interrupt bit of the Core Interrupt register (GINTSTS.HChInt) is
- * set. Before the application can read this register, it must first read
- * the Host All Channels Interrupt (HAINT) register to get the exact channel
- * number for the Host Channel-n Interrupt register. The application must clear
- * the appropriate bit in this register to clear the corresponding bits in the
- * HAINT and GINTSTS registers.
- */
-union cvmx_usbcx_hcintx {
- u32 u32;
- /**
- * struct cvmx_usbcx_hcintx_s
- * @datatglerr: Data Toggle Error (DataTglErr)
- * @frmovrun: Frame Overrun (FrmOvrun)
- * @bblerr: Babble Error (BblErr)
- * @xacterr: Transaction Error (XactErr)
- * @nyet: NYET Response Received Interrupt (NYET)
- * @ack: ACK Response Received Interrupt (ACK)
- * @nak: NAK Response Received Interrupt (NAK)
- * @stall: STALL Response Received Interrupt (STALL)
- * @ahberr: This bit is always 0x0.
- * @chhltd: Channel Halted (ChHltd)
- * Indicates the transfer completed abnormally either because of
- * any USB transaction error or in response to disable request by
- * the application.
- * @xfercompl: Transfer Completed (XferCompl)
- * Transfer completed normally without any errors.
- */
- struct cvmx_usbcx_hcintx_s {
- __BITFIELD_FIELD(u32 reserved_11_31 : 21,
- __BITFIELD_FIELD(u32 datatglerr : 1,
- __BITFIELD_FIELD(u32 frmovrun : 1,
- __BITFIELD_FIELD(u32 bblerr : 1,
- __BITFIELD_FIELD(u32 xacterr : 1,
- __BITFIELD_FIELD(u32 nyet : 1,
- __BITFIELD_FIELD(u32 ack : 1,
- __BITFIELD_FIELD(u32 nak : 1,
- __BITFIELD_FIELD(u32 stall : 1,
- __BITFIELD_FIELD(u32 ahberr : 1,
- __BITFIELD_FIELD(u32 chhltd : 1,
- __BITFIELD_FIELD(u32 xfercompl : 1,
- ;))))))))))))
- } s;
-};
-
-/**
- * cvmx_usbc#_hcintmsk#
- *
- * Host Channel-n Interrupt Mask Register (HCINTMSKn)
- *
- * This register reflects the mask for each channel status described in the
- * previous section.
- * Mask interrupt: 1'b0 Unmask interrupt: 1'b1
- */
-union cvmx_usbcx_hcintmskx {
- u32 u32;
- /**
- * struct cvmx_usbcx_hcintmskx_s
- * @datatglerrmsk: Data Toggle Error Mask (DataTglErrMsk)
- * @frmovrunmsk: Frame Overrun Mask (FrmOvrunMsk)
- * @bblerrmsk: Babble Error Mask (BblErrMsk)
- * @xacterrmsk: Transaction Error Mask (XactErrMsk)
- * @nyetmsk: NYET Response Received Interrupt Mask (NyetMsk)
- * @ackmsk: ACK Response Received Interrupt Mask (AckMsk)
- * @nakmsk: NAK Response Received Interrupt Mask (NakMsk)
- * @stallmsk: STALL Response Received Interrupt Mask (StallMsk)
- * @ahberrmsk: AHB Error Mask (AHBErrMsk)
- * @chhltdmsk: Channel Halted Mask (ChHltdMsk)
- * @xfercomplmsk: Transfer Completed Mask (XferComplMsk)
- */
- struct cvmx_usbcx_hcintmskx_s {
- __BITFIELD_FIELD(u32 reserved_11_31 : 21,
- __BITFIELD_FIELD(u32 datatglerrmsk : 1,
- __BITFIELD_FIELD(u32 frmovrunmsk : 1,
- __BITFIELD_FIELD(u32 bblerrmsk : 1,
- __BITFIELD_FIELD(u32 xacterrmsk : 1,
- __BITFIELD_FIELD(u32 nyetmsk : 1,
- __BITFIELD_FIELD(u32 ackmsk : 1,
- __BITFIELD_FIELD(u32 nakmsk : 1,
- __BITFIELD_FIELD(u32 stallmsk : 1,
- __BITFIELD_FIELD(u32 ahberrmsk : 1,
- __BITFIELD_FIELD(u32 chhltdmsk : 1,
- __BITFIELD_FIELD(u32 xfercomplmsk : 1,
- ;))))))))))))
- } s;
-};
-
-/**
- * cvmx_usbc#_hcsplt#
- *
- * Host Channel-n Split Control Register (HCSPLT)
- *
- */
-union cvmx_usbcx_hcspltx {
- u32 u32;
- /**
- * struct cvmx_usbcx_hcspltx_s
- * @spltena: Split Enable (SpltEna)
- * The application sets this field to indicate that this channel is
- * enabled to perform split transactions.
- * @compsplt: Do Complete Split (CompSplt)
- * The application sets this field to request the OTG host to
- * perform a complete split transaction.
- * @xactpos: Transaction Position (XactPos)
- * This field is used to determine whether to send all, first,
- * middle, or last payloads with each OUT transaction.
- * * 2'b11: All. This is the entire data payload is of this
- * transaction (which is less than or equal to 188 bytes).
- * * 2'b10: Begin. This is the first data payload of this
- * transaction (which is larger than 188 bytes).
- * * 2'b00: Mid. This is the middle payload of this transaction
- * (which is larger than 188 bytes).
- * * 2'b01: End. This is the last payload of this transaction
- * (which is larger than 188 bytes).
- * @hubaddr: Hub Address (HubAddr)
- * This field holds the device address of the transaction
- * translator's hub.
- * @prtaddr: Port Address (PrtAddr)
- * This field is the port number of the recipient transaction
- * translator.
- */
- struct cvmx_usbcx_hcspltx_s {
- __BITFIELD_FIELD(u32 spltena : 1,
- __BITFIELD_FIELD(u32 reserved_17_30 : 14,
- __BITFIELD_FIELD(u32 compsplt : 1,
- __BITFIELD_FIELD(u32 xactpos : 2,
- __BITFIELD_FIELD(u32 hubaddr : 7,
- __BITFIELD_FIELD(u32 prtaddr : 7,
- ;))))))
- } s;
-};
-
-/**
- * cvmx_usbc#_hctsiz#
- *
- * Host Channel-n Transfer Size Register (HCTSIZ)
- *
- */
-union cvmx_usbcx_hctsizx {
- u32 u32;
- /**
- * struct cvmx_usbcx_hctsizx_s
- * @dopng: Do Ping (DoPng)
- * Setting this field to 1 directs the host to do PING protocol.
- * @pid: PID (Pid)
- * The application programs this field with the type of PID to use
- * for the initial transaction. The host will maintain this field
- * for the rest of the transfer.
- * * 2'b00: DATA0
- * * 2'b01: DATA2
- * * 2'b10: DATA1
- * * 2'b11: MDATA (non-control)/SETUP (control)
- * @pktcnt: Packet Count (PktCnt)
- * This field is programmed by the application with the expected
- * number of packets to be transmitted (OUT) or received (IN).
- * The host decrements this count on every successful
- * transmission or reception of an OUT/IN packet. Once this count
- * reaches zero, the application is interrupted to indicate normal
- * completion.
- * @xfersize: Transfer Size (XferSize)
- * For an OUT, this field is the number of data bytes the host will
- * send during the transfer.
- * For an IN, this field is the buffer size that the application
- * has reserved for the transfer. The application is expected to
- * program this field as an integer multiple of the maximum packet
- * size for IN transactions (periodic and non-periodic).
- */
- struct cvmx_usbcx_hctsizx_s {
- __BITFIELD_FIELD(u32 dopng : 1,
- __BITFIELD_FIELD(u32 pid : 2,
- __BITFIELD_FIELD(u32 pktcnt : 10,
- __BITFIELD_FIELD(u32 xfersize : 19,
- ;))))
- } s;
-};
-
-/**
- * cvmx_usbc#_hfir
- *
- * Host Frame Interval Register (HFIR)
- *
- * This register stores the frame interval information for the current speed to
- * which the O2P USB core has enumerated.
- */
-union cvmx_usbcx_hfir {
- u32 u32;
- /**
- * struct cvmx_usbcx_hfir_s
- * @frint: Frame Interval (FrInt)
- * The value that the application programs to this field specifies
- * the interval between two consecutive SOFs (FS) or micro-
- * SOFs (HS) or Keep-Alive tokens (HS). This field contains the
- * number of PHY clocks that constitute the required frame
- * interval. The default value set in this field for a FS operation
- * when the PHY clock frequency is 60 MHz. The application can
- * write a value to this register only after the Port Enable bit of
- * the Host Port Control and Status register (HPRT.PrtEnaPort)
- * has been set. If no value is programmed, the core calculates
- * the value based on the PHY clock specified in the FS/LS PHY
- * Clock Select field of the Host Configuration register
- * (HCFG.FSLSPclkSel). Do not change the value of this field
- * after the initial configuration.
- * * 125 us (PHY clock frequency for HS)
- * * 1 ms (PHY clock frequency for FS/LS)
- */
- struct cvmx_usbcx_hfir_s {
- __BITFIELD_FIELD(u32 reserved_16_31 : 16,
- __BITFIELD_FIELD(u32 frint : 16,
- ;))
- } s;
-};
-
-/**
- * cvmx_usbc#_hfnum
- *
- * Host Frame Number/Frame Time Remaining Register (HFNUM)
- *
- * This register indicates the current frame number.
- * It also indicates the time remaining (in terms of the number of PHY clocks)
- * in the current (micro)frame.
- */
-union cvmx_usbcx_hfnum {
- u32 u32;
- /**
- * struct cvmx_usbcx_hfnum_s
- * @frrem: Frame Time Remaining (FrRem)
- * Indicates the amount of time remaining in the current
- * microframe (HS) or frame (FS/LS), in terms of PHY clocks.
- * This field decrements on each PHY clock. When it reaches
- * zero, this field is reloaded with the value in the Frame
- * Interval register and a new SOF is transmitted on the USB.
- * @frnum: Frame Number (FrNum)
- * This field increments when a new SOF is transmitted on the
- * USB, and is reset to 0 when it reaches 16'h3FFF.
- */
- struct cvmx_usbcx_hfnum_s {
- __BITFIELD_FIELD(u32 frrem : 16,
- __BITFIELD_FIELD(u32 frnum : 16,
- ;))
- } s;
-};
-
-/**
- * cvmx_usbc#_hprt
- *
- * Host Port Control and Status Register (HPRT)
- *
- * This register is available in both Host and Device modes.
- * Currently, the OTG Host supports only one port.
- * A single register holds USB port-related information such as USB reset,
- * enable, suspend, resume, connect status, and test mode for each port. The
- * R_SS_WC bits in this register can trigger an interrupt to the application
- * through the Host Port Interrupt bit of the Core Interrupt register
- * (GINTSTS.PrtInt). On a Port Interrupt, the application must read this
- * register and clear the bit that caused the interrupt. For the R_SS_WC bits,
- * the application must write a 1 to the bit to clear the interrupt.
- */
-union cvmx_usbcx_hprt {
- u32 u32;
- /**
- * struct cvmx_usbcx_hprt_s
- * @prtspd: Port Speed (PrtSpd)
- * Indicates the speed of the device attached to this port.
- * * 2'b00: High speed
- * * 2'b01: Full speed
- * * 2'b10: Low speed
- * * 2'b11: Reserved
- * @prttstctl: Port Test Control (PrtTstCtl)
- * The application writes a nonzero value to this field to put
- * the port into a Test mode, and the corresponding pattern is
- * signaled on the port.
- * * 4'b0000: Test mode disabled
- * * 4'b0001: Test_J mode
- * * 4'b0010: Test_K mode
- * * 4'b0011: Test_SE0_NAK mode
- * * 4'b0100: Test_Packet mode
- * * 4'b0101: Test_Force_Enable
- * * Others: Reserved
- * PrtSpd must be zero (i.e. the interface must be in high-speed
- * mode) to use the PrtTstCtl test modes.
- * @prtpwr: Port Power (PrtPwr)
- * The application uses this field to control power to this port,
- * and the core clears this bit on an overcurrent condition.
- * * 1'b0: Power off
- * * 1'b1: Power on
- * @prtlnsts: Port Line Status (PrtLnSts)
- * Indicates the current logic level USB data lines
- * * Bit [10]: Logic level of D-
- * * Bit [11]: Logic level of D+
- * @prtrst: Port Reset (PrtRst)
- * When the application sets this bit, a reset sequence is
- * started on this port. The application must time the reset
- * period and clear this bit after the reset sequence is
- * complete.
- * * 1'b0: Port not in reset
- * * 1'b1: Port in reset
- * The application must leave this bit set for at least a
- * minimum duration mentioned below to start a reset on the
- * port. The application can leave it set for another 10 ms in
- * addition to the required minimum duration, before clearing
- * the bit, even though there is no maximum limit set by the
- * USB standard.
- * * High speed: 50 ms
- * * Full speed/Low speed: 10 ms
- * @prtsusp: Port Suspend (PrtSusp)
- * The application sets this bit to put this port in Suspend
- * mode. The core only stops sending SOFs when this is set.
- * To stop the PHY clock, the application must set the Port
- * Clock Stop bit, which will assert the suspend input pin of
- * the PHY.
- * The read value of this bit reflects the current suspend
- * status of the port. This bit is cleared by the core after a
- * remote wakeup signal is detected or the application sets
- * the Port Reset bit or Port Resume bit in this register or the
- * Resume/Remote Wakeup Detected Interrupt bit or
- * Disconnect Detected Interrupt bit in the Core Interrupt
- * register (GINTSTS.WkUpInt or GINTSTS.DisconnInt,
- * respectively).
- * * 1'b0: Port not in Suspend mode
- * * 1'b1: Port in Suspend mode
- * @prtres: Port Resume (PrtRes)
- * The application sets this bit to drive resume signaling on
- * the port. The core continues to drive the resume signal
- * until the application clears this bit.
- * If the core detects a USB remote wakeup sequence, as
- * indicated by the Port Resume/Remote Wakeup Detected
- * Interrupt bit of the Core Interrupt register
- * (GINTSTS.WkUpInt), the core starts driving resume
- * signaling without application intervention and clears this bit
- * when it detects a disconnect condition. The read value of
- * this bit indicates whether the core is currently driving
- * resume signaling.
- * * 1'b0: No resume driven
- * * 1'b1: Resume driven
- * @prtovrcurrchng: Port Overcurrent Change (PrtOvrCurrChng)
- * The core sets this bit when the status of the Port
- * Overcurrent Active bit (bit 4) in this register changes.
- * @prtovrcurract: Port Overcurrent Active (PrtOvrCurrAct)
- * Indicates the overcurrent condition of the port.
- * * 1'b0: No overcurrent condition
- * * 1'b1: Overcurrent condition
- * @prtenchng: Port Enable/Disable Change (PrtEnChng)
- * The core sets this bit when the status of the Port Enable bit
- * [2] of this register changes.
- * @prtena: Port Enable (PrtEna)
- * A port is enabled only by the core after a reset sequence,
- * and is disabled by an overcurrent condition, a disconnect
- * condition, or by the application clearing this bit. The
- * application cannot set this bit by a register write. It can only
- * clear it to disable the port. This bit does not trigger any
- * interrupt to the application.
- * * 1'b0: Port disabled
- * * 1'b1: Port enabled
- * @prtconndet: Port Connect Detected (PrtConnDet)
- * The core sets this bit when a device connection is detected
- * to trigger an interrupt to the application using the Host Port
- * Interrupt bit of the Core Interrupt register (GINTSTS.PrtInt).
- * The application must write a 1 to this bit to clear the
- * interrupt.
- * @prtconnsts: Port Connect Status (PrtConnSts)
- * * 0: No device is attached to the port.
- * * 1: A device is attached to the port.
- */
- struct cvmx_usbcx_hprt_s {
- __BITFIELD_FIELD(u32 reserved_19_31 : 13,
- __BITFIELD_FIELD(u32 prtspd : 2,
- __BITFIELD_FIELD(u32 prttstctl : 4,
- __BITFIELD_FIELD(u32 prtpwr : 1,
- __BITFIELD_FIELD(u32 prtlnsts : 2,
- __BITFIELD_FIELD(u32 reserved_9_9 : 1,
- __BITFIELD_FIELD(u32 prtrst : 1,
- __BITFIELD_FIELD(u32 prtsusp : 1,
- __BITFIELD_FIELD(u32 prtres : 1,
- __BITFIELD_FIELD(u32 prtovrcurrchng : 1,
- __BITFIELD_FIELD(u32 prtovrcurract : 1,
- __BITFIELD_FIELD(u32 prtenchng : 1,
- __BITFIELD_FIELD(u32 prtena : 1,
- __BITFIELD_FIELD(u32 prtconndet : 1,
- __BITFIELD_FIELD(u32 prtconnsts : 1,
- ;)))))))))))))))
- } s;
-};
-
-/**
- * cvmx_usbc#_hptxfsiz
- *
- * Host Periodic Transmit FIFO Size Register (HPTXFSIZ)
- *
- * This register holds the size and the memory start address of the Periodic
- * TxFIFO, as shown in Figures 310 and 311.
- */
-union cvmx_usbcx_hptxfsiz {
- u32 u32;
- /**
- * struct cvmx_usbcx_hptxfsiz_s
- * @ptxfsize: Host Periodic TxFIFO Depth (PTxFSize)
- * This value is in terms of 32-bit words.
- * * Minimum value is 16
- * * Maximum value is 32768
- * @ptxfstaddr: Host Periodic TxFIFO Start Address (PTxFStAddr)
- */
- struct cvmx_usbcx_hptxfsiz_s {
- __BITFIELD_FIELD(u32 ptxfsize : 16,
- __BITFIELD_FIELD(u32 ptxfstaddr : 16,
- ;))
- } s;
-};
-
-/**
- * cvmx_usbc#_hptxsts
- *
- * Host Periodic Transmit FIFO/Queue Status Register (HPTXSTS)
- *
- * This read-only register contains the free space information for the Periodic
- * TxFIFO and the Periodic Transmit Request Queue
- */
-union cvmx_usbcx_hptxsts {
- u32 u32;
- /**
- * struct cvmx_usbcx_hptxsts_s
- * @ptxqtop: Top of the Periodic Transmit Request Queue (PTxQTop)
- * This indicates the entry in the Periodic Tx Request Queue that
- * is currently being processes by the MAC.
- * This register is used for debugging.
- * * Bit [31]: Odd/Even (micro)frame
- * - 1'b0: send in even (micro)frame
- * - 1'b1: send in odd (micro)frame
- * * Bits [30:27]: Channel/endpoint number
- * * Bits [26:25]: Type
- * - 2'b00: IN/OUT
- * - 2'b01: Zero-length packet
- * - 2'b10: CSPLIT
- * - 2'b11: Disable channel command
- * * Bit [24]: Terminate (last entry for the selected
- * channel/endpoint)
- * @ptxqspcavail: Periodic Transmit Request Queue Space Available
- * (PTxQSpcAvail)
- * Indicates the number of free locations available to be written
- * in the Periodic Transmit Request Queue. This queue holds both
- * IN and OUT requests.
- * * 8'h0: Periodic Transmit Request Queue is full
- * * 8'h1: 1 location available
- * * 8'h2: 2 locations available
- * * n: n locations available (0..8)
- * * Others: Reserved
- * @ptxfspcavail: Periodic Transmit Data FIFO Space Available
- * (PTxFSpcAvail)
- * Indicates the number of free locations available to be written
- * to in the Periodic TxFIFO.
- * Values are in terms of 32-bit words
- * * 16'h0: Periodic TxFIFO is full
- * * 16'h1: 1 word available
- * * 16'h2: 2 words available
- * * 16'hn: n words available (where 0..32768)
- * * 16'h8000: 32768 words available
- * * Others: Reserved
- */
- struct cvmx_usbcx_hptxsts_s {
- __BITFIELD_FIELD(u32 ptxqtop : 8,
- __BITFIELD_FIELD(u32 ptxqspcavail : 8,
- __BITFIELD_FIELD(u32 ptxfspcavail : 16,
- ;)))
- } s;
-};
-
-/**
- * cvmx_usbn#_clk_ctl
- *
- * USBN_CLK_CTL = USBN's Clock Control
- *
- * This register is used to control the frequency of the hclk and the
- * hreset and phy_rst signals.
- */
-union cvmx_usbnx_clk_ctl {
- u64 u64;
- /**
- * struct cvmx_usbnx_clk_ctl_s
- * @divide2: The 'hclk' used by the USB subsystem is derived
- * from the eclk.
- * Also see the field DIVIDE. DIVIDE2<1> must currently
- * be zero because it is not implemented, so the maximum
- * ratio of eclk/hclk is currently 16.
- * The actual divide number for hclk is:
- * (DIVIDE2 + 1) * (DIVIDE + 1)
- * @hclk_rst: When this field is '0' the HCLK-DIVIDER used to
- * generate the hclk in the USB Subsystem is held
- * in reset. This bit must be set to '0' before
- * changing the value os DIVIDE in this register.
- * The reset to the HCLK_DIVIDERis also asserted
- * when core reset is asserted.
- * @p_x_on: Force USB-PHY on during suspend.
- * '1' USB-PHY XO block is powered-down during
- * suspend.
- * '0' USB-PHY XO block is powered-up during
- * suspend.
- * The value of this field must be set while POR is
- * active.
- * @p_rtype: PHY reference clock type
- * On CN50XX/CN52XX/CN56XX the values are:
- * '0' The USB-PHY uses a 12MHz crystal as a clock source
- * at the USB_XO and USB_XI pins.
- * '1' Reserved.
- * '2' The USB_PHY uses 12/24/48MHz 2.5V board clock at the
- * USB_XO pin. USB_XI should be tied to ground in this
- * case.
- * '3' Reserved.
- * On CN3xxx bits 14 and 15 are p_xenbn and p_rclk and values are:
- * '0' Reserved.
- * '1' Reserved.
- * '2' The PHY PLL uses the XO block output as a reference.
- * The XO block uses an external clock supplied on the
- * XO pin. USB_XI should be tied to ground for this
- * usage.
- * '3' The XO block uses the clock from a crystal.
- * @p_com_on: '0' Force USB-PHY XO Bias, Bandgap and PLL to
- * remain powered in Suspend Mode.
- * '1' The USB-PHY XO Bias, Bandgap and PLL are
- * powered down in suspend mode.
- * The value of this field must be set while POR is
- * active.
- * @p_c_sel: Phy clock speed select.
- * Selects the reference clock / crystal frequency.
- * '11': Reserved
- * '10': 48 MHz (reserved when a crystal is used)
- * '01': 24 MHz (reserved when a crystal is used)
- * '00': 12 MHz
- * The value of this field must be set while POR is
- * active.
- * NOTE: if a crystal is used as a reference clock,
- * this field must be set to 12 MHz.
- * @cdiv_byp: Used to enable the bypass input to the USB_CLK_DIV.
- * @sd_mode: Scaledown mode for the USBC. Control timing events
- * in the USBC, for normal operation this must be '0'.
- * @s_bist: Starts bist on the hclk memories, during the '0'
- * to '1' transition.
- * @por: Power On Reset for the PHY.
- * Resets all the PHYS registers and state machines.
- * @enable: When '1' allows the generation of the hclk. When
- * '0' the hclk will not be generated. SEE DIVIDE
- * field of this register.
- * @prst: When this field is '0' the reset associated with
- * the phy_clk functionality in the USB Subsystem is
- * help in reset. This bit should not be set to '1'
- * until the time it takes 6 clocks (hclk or phy_clk,
- * whichever is slower) has passed. Under normal
- * operation once this bit is set to '1' it should not
- * be set to '0'.
- * @hrst: When this field is '0' the reset associated with
- * the hclk functioanlity in the USB Subsystem is
- * held in reset.This bit should not be set to '1'
- * until 12ms after phy_clk is stable. Under normal
- * operation, once this bit is set to '1' it should
- * not be set to '0'.
- * @divide: The frequency of 'hclk' used by the USB subsystem
- * is the eclk frequency divided by the value of
- * (DIVIDE2 + 1) * (DIVIDE + 1), also see the field
- * DIVIDE2 of this register.
- * The hclk frequency should be less than 125Mhz.
- * After writing a value to this field the SW should
- * read the field for the value written.
- * The ENABLE field of this register should not be set
- * until AFTER this field is set and then read.
- */
- struct cvmx_usbnx_clk_ctl_s {
- __BITFIELD_FIELD(u64 reserved_20_63 : 44,
- __BITFIELD_FIELD(u64 divide2 : 2,
- __BITFIELD_FIELD(u64 hclk_rst : 1,
- __BITFIELD_FIELD(u64 p_x_on : 1,
- __BITFIELD_FIELD(u64 p_rtype : 2,
- __BITFIELD_FIELD(u64 p_com_on : 1,
- __BITFIELD_FIELD(u64 p_c_sel : 2,
- __BITFIELD_FIELD(u64 cdiv_byp : 1,
- __BITFIELD_FIELD(u64 sd_mode : 2,
- __BITFIELD_FIELD(u64 s_bist : 1,
- __BITFIELD_FIELD(u64 por : 1,
- __BITFIELD_FIELD(u64 enable : 1,
- __BITFIELD_FIELD(u64 prst : 1,
- __BITFIELD_FIELD(u64 hrst : 1,
- __BITFIELD_FIELD(u64 divide : 3,
- ;)))))))))))))))
- } s;
-};
-
-/**
- * cvmx_usbn#_usbp_ctl_status
- *
- * USBN_USBP_CTL_STATUS = USBP Control And Status Register
- *
- * Contains general control and status information for the USBN block.
- */
-union cvmx_usbnx_usbp_ctl_status {
- u64 u64;
- /**
- * struct cvmx_usbnx_usbp_ctl_status_s
- * @txrisetune: HS Transmitter Rise/Fall Time Adjustment
- * @txvreftune: HS DC Voltage Level Adjustment
- * @txfslstune: FS/LS Source Impedance Adjustment
- * @txhsxvtune: Transmitter High-Speed Crossover Adjustment
- * @sqrxtune: Squelch Threshold Adjustment
- * @compdistune: Disconnect Threshold Adjustment
- * @otgtune: VBUS Valid Threshold Adjustment
- * @otgdisable: OTG Block Disable
- * @portreset: Per_Port Reset
- * @drvvbus: Drive VBUS
- * @lsbist: Low-Speed BIST Enable.
- * @fsbist: Full-Speed BIST Enable.
- * @hsbist: High-Speed BIST Enable.
- * @bist_done: PHY Bist Done.
- * Asserted at the end of the PHY BIST sequence.
- * @bist_err: PHY Bist Error.
- * Indicates an internal error was detected during
- * the BIST sequence.
- * @tdata_out: PHY Test Data Out.
- * Presents either internally generated signals or
- * test register contents, based upon the value of
- * test_data_out_sel.
- * @siddq: Drives the USBP (USB-PHY) SIDDQ input.
- * Normally should be set to zero.
- * When customers have no intent to use USB PHY
- * interface, they should:
- * - still provide 3.3V to USB_VDD33, and
- * - tie USB_REXT to 3.3V supply, and
- * - set USBN*_USBP_CTL_STATUS[SIDDQ]=1
- * @txpreemphasistune: HS Transmitter Pre-Emphasis Enable
- * @dma_bmode: When set to 1 the L2C DMA address will be updated
- * with byte-counts between packets. When set to 0
- * the L2C DMA address is incremented to the next
- * 4-byte aligned address after adding byte-count.
- * @usbc_end: Bigendian input to the USB Core. This should be
- * set to '0' for operation.
- * @usbp_bist: PHY, This is cleared '0' to run BIST on the USBP.
- * @tclk: PHY Test Clock, used to load TDATA_IN to the USBP.
- * @dp_pulld: PHY DP_PULLDOWN input to the USB-PHY.
- * This signal enables the pull-down resistance on
- * the D+ line. '1' pull down-resistance is connected
- * to D+/ '0' pull down resistance is not connected
- * to D+. When an A/B device is acting as a host
- * (downstream-facing port), dp_pulldown and
- * dm_pulldown are enabled. This must not toggle
- * during normal operation.
- * @dm_pulld: PHY DM_PULLDOWN input to the USB-PHY.
- * This signal enables the pull-down resistance on
- * the D- line. '1' pull down-resistance is connected
- * to D-. '0' pull down resistance is not connected
- * to D-. When an A/B device is acting as a host
- * (downstream-facing port), dp_pulldown and
- * dm_pulldown are enabled. This must not toggle
- * during normal operation.
- * @hst_mode: When '0' the USB is acting as HOST, when '1'
- * USB is acting as device. This field needs to be
- * set while the USB is in reset.
- * @tuning: Transmitter Tuning for High-Speed Operation.
- * Tunes the current supply and rise/fall output
- * times for high-speed operation.
- * [20:19] == 11: Current supply increased
- * approximately 9%
- * [20:19] == 10: Current supply increased
- * approximately 4.5%
- * [20:19] == 01: Design default.
- * [20:19] == 00: Current supply decreased
- * approximately 4.5%
- * [22:21] == 11: Rise and fall times are increased.
- * [22:21] == 10: Design default.
- * [22:21] == 01: Rise and fall times are decreased.
- * [22:21] == 00: Rise and fall times are decreased
- * further as compared to the 01 setting.
- * @tx_bs_enh: Transmit Bit Stuffing on [15:8].
- * Enables or disables bit stuffing on data[15:8]
- * when bit-stuffing is enabled.
- * @tx_bs_en: Transmit Bit Stuffing on [7:0].
- * Enables or disables bit stuffing on data[7:0]
- * when bit-stuffing is enabled.
- * @loop_enb: PHY Loopback Test Enable.
- * '1': During data transmission the receive is
- * enabled.
- * '0': During data transmission the receive is
- * disabled.
- * Must be '0' for normal operation.
- * @vtest_enb: Analog Test Pin Enable.
- * '1' The PHY's analog_test pin is enabled for the
- * input and output of applicable analog test signals.
- * '0' THe analog_test pin is disabled.
- * @bist_enb: Built-In Self Test Enable.
- * Used to activate BIST in the PHY.
- * @tdata_sel: Test Data Out Select.
- * '1' test_data_out[3:0] (PHY) register contents
- * are output. '0' internally generated signals are
- * output.
- * @taddr_in: Mode Address for Test Interface.
- * Specifies the register address for writing to or
- * reading from the PHY test interface register.
- * @tdata_in: Internal Testing Register Input Data and Select
- * This is a test bus. Data is present on [3:0],
- * and its corresponding select (enable) is present
- * on bits [7:4].
- * @ate_reset: Reset input from automatic test equipment.
- * This is a test signal. When the USB Core is
- * powered up (not in Susned Mode), an automatic
- * tester can use this to disable phy_clock and
- * free_clk, then re-enable them with an aligned
- * phase.
- * '1': The phy_clk and free_clk outputs are
- * disabled. "0": The phy_clock and free_clk outputs
- * are available within a specific period after the
- * de-assertion.
- */
- struct cvmx_usbnx_usbp_ctl_status_s {
- __BITFIELD_FIELD(u64 txrisetune : 1,
- __BITFIELD_FIELD(u64 txvreftune : 4,
- __BITFIELD_FIELD(u64 txfslstune : 4,
- __BITFIELD_FIELD(u64 txhsxvtune : 2,
- __BITFIELD_FIELD(u64 sqrxtune : 3,
- __BITFIELD_FIELD(u64 compdistune : 3,
- __BITFIELD_FIELD(u64 otgtune : 3,
- __BITFIELD_FIELD(u64 otgdisable : 1,
- __BITFIELD_FIELD(u64 portreset : 1,
- __BITFIELD_FIELD(u64 drvvbus : 1,
- __BITFIELD_FIELD(u64 lsbist : 1,
- __BITFIELD_FIELD(u64 fsbist : 1,
- __BITFIELD_FIELD(u64 hsbist : 1,
- __BITFIELD_FIELD(u64 bist_done : 1,
- __BITFIELD_FIELD(u64 bist_err : 1,
- __BITFIELD_FIELD(u64 tdata_out : 4,
- __BITFIELD_FIELD(u64 siddq : 1,
- __BITFIELD_FIELD(u64 txpreemphasistune : 1,
- __BITFIELD_FIELD(u64 dma_bmode : 1,
- __BITFIELD_FIELD(u64 usbc_end : 1,
- __BITFIELD_FIELD(u64 usbp_bist : 1,
- __BITFIELD_FIELD(u64 tclk : 1,
- __BITFIELD_FIELD(u64 dp_pulld : 1,
- __BITFIELD_FIELD(u64 dm_pulld : 1,
- __BITFIELD_FIELD(u64 hst_mode : 1,
- __BITFIELD_FIELD(u64 tuning : 4,
- __BITFIELD_FIELD(u64 tx_bs_enh : 1,
- __BITFIELD_FIELD(u64 tx_bs_en : 1,
- __BITFIELD_FIELD(u64 loop_enb : 1,
- __BITFIELD_FIELD(u64 vtest_enb : 1,
- __BITFIELD_FIELD(u64 bist_enb : 1,
- __BITFIELD_FIELD(u64 tdata_sel : 1,
- __BITFIELD_FIELD(u64 taddr_in : 4,
- __BITFIELD_FIELD(u64 tdata_in : 8,
- __BITFIELD_FIELD(u64 ate_reset : 1,
- ;)))))))))))))))))))))))))))))))))))
- } s;
-};
-
-#endif /* __OCTEON_HCD_H__ */
diff --git a/drivers/staging/octeon/Kconfig b/drivers/staging/octeon/Kconfig
deleted file mode 100644
index e7f4ddcc1361..000000000000
--- a/drivers/staging/octeon/Kconfig
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-config OCTEON_ETHERNET
- tristate "Cavium Networks Octeon Ethernet support"
- depends on CAVIUM_OCTEON_SOC || COMPILE_TEST
- depends on NETDEVICES
- depends on BROKEN
- select PHYLIB
- select MDIO_OCTEON
- help
- This driver supports the builtin ethernet ports on Cavium
- Networks' products in the Octeon family. This driver supports the
- CN3XXX and CN5XXX Octeon processors.
-
- To compile this driver as a module, choose M here. The module
- will be called octeon-ethernet.
-
diff --git a/drivers/staging/octeon/Makefile b/drivers/staging/octeon/Makefile
deleted file mode 100644
index 3887cf5f1e84..000000000000
--- a/drivers/staging/octeon/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Copyright (C) 2005-2009 Cavium Networks
-#
-
-#
-# Makefile for Cavium OCTEON on-board ethernet driver
-#
-
-obj-${CONFIG_OCTEON_ETHERNET} := octeon-ethernet.o
-
-octeon-ethernet-y := ethernet.o
-octeon-ethernet-y += ethernet-mdio.o
-octeon-ethernet-y += ethernet-mem.o
-octeon-ethernet-y += ethernet-rgmii.o
-octeon-ethernet-y += ethernet-rx.o
-octeon-ethernet-y += ethernet-sgmii.o
-octeon-ethernet-y += ethernet-spi.o
-octeon-ethernet-y += ethernet-tx.o
diff --git a/drivers/staging/octeon/TODO b/drivers/staging/octeon/TODO
deleted file mode 100644
index 67a0a1f6b922..000000000000
--- a/drivers/staging/octeon/TODO
+++ /dev/null
@@ -1,9 +0,0 @@
-This driver is functional and supports Ethernet on OCTEON+/OCTEON2/OCTEON3
-chips at least up to CN7030.
-
-TODO:
- - general code review and clean up
- - make driver self-contained instead of being split between staging and
- arch/mips/cavium-octeon.
-
-Contact: Aaro Koskinen <aaro.koskinen@iki.fi>
diff --git a/drivers/staging/octeon/ethernet-defines.h b/drivers/staging/octeon/ethernet-defines.h
deleted file mode 100644
index ef9e767b0e2e..000000000000
--- a/drivers/staging/octeon/ethernet-defines.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2007 Cavium Networks
- */
-
-/*
- * A few defines are used to control the operation of this driver:
- * USE_ASYNC_IOBDMA
- * Use asynchronous IO access to hardware. This uses Octeon's asynchronous
- * IOBDMAs to issue IO accesses without stalling. Set this to zero
- * to disable this. Note that IOBDMAs require CVMSEG.
- * REUSE_SKBUFFS_WITHOUT_FREE
- * Allows the TX path to free an skbuff into the FPA hardware pool. This
- * can significantly improve performance for forwarding and bridging, but
- * may be somewhat dangerous. Checks are made, but if any buffer is reused
- * without the proper Linux cleanup, the networking stack may have very
- * bizarre bugs.
- */
-#ifndef __ETHERNET_DEFINES_H__
-#define __ETHERNET_DEFINES_H__
-
-#ifdef CONFIG_NETFILTER
-#define REUSE_SKBUFFS_WITHOUT_FREE 0
-#else
-#define REUSE_SKBUFFS_WITHOUT_FREE 1
-#endif
-
-#define USE_ASYNC_IOBDMA (CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0)
-
-/* Maximum number of SKBs to try to free per xmit packet. */
-#define MAX_OUT_QUEUE_DEPTH 1000
-
-#define FAU_TOTAL_TX_TO_CLEAN (CVMX_FAU_REG_END - sizeof(u32))
-#define FAU_NUM_PACKET_BUFFERS_TO_FREE (FAU_TOTAL_TX_TO_CLEAN - sizeof(u32))
-
-#define TOTAL_NUMBER_OF_PORTS (CVMX_PIP_NUM_INPUT_PORTS + 1)
-
-#endif /* __ETHERNET_DEFINES_H__ */
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
deleted file mode 100644
index c798672d61b2..000000000000
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ /dev/null
@@ -1,178 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2007 Cavium Networks
- */
-
-#include <linux/kernel.h>
-#include <linux/ethtool.h>
-#include <linux/phy.h>
-#include <linux/ratelimit.h>
-#include <linux/of_mdio.h>
-#include <generated/utsrelease.h>
-#include <net/dst.h>
-
-#include "octeon-ethernet.h"
-#include "ethernet-defines.h"
-#include "ethernet-mdio.h"
-#include "ethernet-util.h"
-
-static void cvm_oct_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->version, UTS_RELEASE, sizeof(info->version));
- strlcpy(info->bus_info, "Builtin", sizeof(info->bus_info));
-}
-
-static int cvm_oct_nway_reset(struct net_device *dev)
-{
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (dev->phydev)
- return phy_start_aneg(dev->phydev);
-
- return -EINVAL;
-}
-
-const struct ethtool_ops cvm_oct_ethtool_ops = {
- .get_drvinfo = cvm_oct_get_drvinfo,
- .nway_reset = cvm_oct_nway_reset,
- .get_link = ethtool_op_get_link,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
-};
-
-/**
- * cvm_oct_ioctl - IOCTL support for PHY control
- * @dev: Device to change
- * @rq: the request
- * @cmd: the command
- *
- * Returns Zero on success
- */
-int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- if (!netif_running(dev))
- return -EINVAL;
-
- if (!dev->phydev)
- return -EINVAL;
-
- return phy_mii_ioctl(dev->phydev, rq, cmd);
-}
-
-void cvm_oct_note_carrier(struct octeon_ethernet *priv,
- union cvmx_helper_link_info li)
-{
- if (li.s.link_up) {
- pr_notice_ratelimited("%s: %u Mbps %s duplex, port %d, queue %d\n",
- netdev_name(priv->netdev), li.s.speed,
- (li.s.full_duplex) ? "Full" : "Half",
- priv->port, priv->queue);
- } else {
- pr_notice_ratelimited("%s: Link down\n",
- netdev_name(priv->netdev));
- }
-}
-
-void cvm_oct_adjust_link(struct net_device *dev)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
- union cvmx_helper_link_info link_info;
-
- link_info.u64 = 0;
- link_info.s.link_up = dev->phydev->link ? 1 : 0;
- link_info.s.full_duplex = dev->phydev->duplex ? 1 : 0;
- link_info.s.speed = dev->phydev->speed;
- priv->link_info = link_info.u64;
-
- /*
- * The polling task need to know about link status changes.
- */
- if (priv->poll)
- priv->poll(dev);
-
- if (priv->last_link != dev->phydev->link) {
- priv->last_link = dev->phydev->link;
- cvmx_helper_link_set(priv->port, link_info);
- cvm_oct_note_carrier(priv, link_info);
- }
-}
-
-int cvm_oct_common_stop(struct net_device *dev)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
- int interface = INTERFACE(priv->port);
- union cvmx_helper_link_info link_info;
- union cvmx_gmxx_prtx_cfg gmx_cfg;
- int index = INDEX(priv->port);
-
- gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
- gmx_cfg.s.en = 0;
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
-
- priv->poll = NULL;
-
- if (dev->phydev)
- phy_disconnect(dev->phydev);
-
- if (priv->last_link) {
- link_info.u64 = 0;
- priv->last_link = 0;
-
- cvmx_helper_link_set(priv->port, link_info);
- cvm_oct_note_carrier(priv, link_info);
- }
- return 0;
-}
-
-/**
- * cvm_oct_phy_setup_device - setup the PHY
- *
- * @dev: Device to setup
- *
- * Returns Zero on success, negative on failure
- */
-int cvm_oct_phy_setup_device(struct net_device *dev)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
- struct device_node *phy_node;
- struct phy_device *phydev = NULL;
-
- if (!priv->of_node)
- goto no_phy;
-
- phy_node = of_parse_phandle(priv->of_node, "phy-handle", 0);
- if (!phy_node && of_phy_is_fixed_link(priv->of_node)) {
- int rc;
-
- rc = of_phy_register_fixed_link(priv->of_node);
- if (rc)
- return rc;
-
- phy_node = of_node_get(priv->of_node);
- }
- if (!phy_node)
- goto no_phy;
-
- phydev = of_phy_connect(dev, phy_node, cvm_oct_adjust_link, 0,
- priv->phy_mode);
- of_node_put(phy_node);
-
- if (!phydev)
- return -ENODEV;
-
- priv->last_link = 0;
- phy_start(phydev);
-
- return 0;
-no_phy:
- /* If there is no phy, assume a direct MAC connection and that
- * the link is up.
- */
- netif_carrier_on(dev);
- return 0;
-}
diff --git a/drivers/staging/octeon/ethernet-mdio.h b/drivers/staging/octeon/ethernet-mdio.h
deleted file mode 100644
index e3771d48c49b..000000000000
--- a/drivers/staging/octeon/ethernet-mdio.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2007 Cavium Networks
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ip.h>
-#include <linux/string.h>
-#include <linux/ethtool.h>
-#include <linux/seq_file.h>
-#include <linux/proc_fs.h>
-#include <net/dst.h>
-#ifdef CONFIG_XFRM
-#include <linux/xfrm.h>
-#include <net/xfrm.h>
-#endif /* CONFIG_XFRM */
-
-extern const struct ethtool_ops cvm_oct_ethtool_ops;
-
-void octeon_mdiobus_force_mod_depencency(void);
-
-int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-int cvm_oct_phy_setup_device(struct net_device *dev);
diff --git a/drivers/staging/octeon/ethernet-mem.c b/drivers/staging/octeon/ethernet-mem.c
deleted file mode 100644
index 532594957ebc..000000000000
--- a/drivers/staging/octeon/ethernet-mem.c
+++ /dev/null
@@ -1,154 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2010 Cavium Networks
- */
-
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/slab.h>
-
-#include "octeon-ethernet.h"
-#include "ethernet-mem.h"
-#include "ethernet-defines.h"
-
-/**
- * cvm_oct_fill_hw_skbuff - fill the supplied hardware pool with skbuffs
- * @pool: Pool to allocate an skbuff for
- * @size: Size of the buffer needed for the pool
- * @elements: Number of buffers to allocate
- *
- * Returns the actual number of buffers allocated.
- */
-static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements)
-{
- int freed = elements;
-
- while (freed) {
- struct sk_buff *skb = dev_alloc_skb(size + 256);
-
- if (unlikely(!skb))
- break;
- skb_reserve(skb, 256 - (((unsigned long)skb->data) & 0x7f));
- *(struct sk_buff **)(skb->data - sizeof(void *)) = skb;
- cvmx_fpa_free(skb->data, pool, size / 128);
- freed--;
- }
- return elements - freed;
-}
-
-/**
- * cvm_oct_free_hw_skbuff- free hardware pool skbuffs
- * @pool: Pool to allocate an skbuff for
- * @size: Size of the buffer needed for the pool
- * @elements: Number of buffers to allocate
- */
-static void cvm_oct_free_hw_skbuff(int pool, int size, int elements)
-{
- char *memory;
-
- do {
- memory = cvmx_fpa_alloc(pool);
- if (memory) {
- struct sk_buff *skb =
- *(struct sk_buff **)(memory - sizeof(void *));
- elements--;
- dev_kfree_skb(skb);
- }
- } while (memory);
-
- if (elements < 0)
- pr_warn("Freeing of pool %u had too many skbuffs (%d)\n",
- pool, elements);
- else if (elements > 0)
- pr_warn("Freeing of pool %u is missing %d skbuffs\n",
- pool, elements);
-}
-
-/**
- * cvm_oct_fill_hw_memory - fill a hardware pool with memory.
- * @pool: Pool to populate
- * @size: Size of each buffer in the pool
- * @elements: Number of buffers to allocate
- *
- * Returns the actual number of buffers allocated.
- */
-static int cvm_oct_fill_hw_memory(int pool, int size, int elements)
-{
- char *memory;
- char *fpa;
- int freed = elements;
-
- while (freed) {
- /*
- * FPA memory must be 128 byte aligned. Since we are
- * aligning we need to save the original pointer so we
- * can feed it to kfree when the memory is returned to
- * the kernel.
- *
- * We allocate an extra 256 bytes to allow for
- * alignment and space for the original pointer saved
- * just before the block.
- */
- memory = kmalloc(size + 256, GFP_ATOMIC);
- if (unlikely(!memory)) {
- pr_warn("Unable to allocate %u bytes for FPA pool %d\n",
- elements * size, pool);
- break;
- }
- fpa = (char *)(((unsigned long)memory + 256) & ~0x7fUL);
- *((char **)fpa - 1) = memory;
- cvmx_fpa_free(fpa, pool, 0);
- freed--;
- }
- return elements - freed;
-}
-
-/**
- * cvm_oct_free_hw_memory - Free memory allocated by cvm_oct_fill_hw_memory
- * @pool: FPA pool to free
- * @size: Size of each buffer in the pool
- * @elements: Number of buffers that should be in the pool
- */
-static void cvm_oct_free_hw_memory(int pool, int size, int elements)
-{
- char *memory;
- char *fpa;
-
- do {
- fpa = cvmx_fpa_alloc(pool);
- if (fpa) {
- elements--;
- fpa = (char *)phys_to_virt(cvmx_ptr_to_phys(fpa));
- memory = *((char **)fpa - 1);
- kfree(memory);
- }
- } while (fpa);
-
- if (elements < 0)
- pr_warn("Freeing of pool %u had too many buffers (%d)\n",
- pool, elements);
- else if (elements > 0)
- pr_warn("Warning: Freeing of pool %u is missing %d buffers\n",
- pool, elements);
-}
-
-int cvm_oct_mem_fill_fpa(int pool, int size, int elements)
-{
- int freed;
-
- if (pool == CVMX_FPA_PACKET_POOL)
- freed = cvm_oct_fill_hw_skbuff(pool, size, elements);
- else
- freed = cvm_oct_fill_hw_memory(pool, size, elements);
- return freed;
-}
-
-void cvm_oct_mem_empty_fpa(int pool, int size, int elements)
-{
- if (pool == CVMX_FPA_PACKET_POOL)
- cvm_oct_free_hw_skbuff(pool, size, elements);
- else
- cvm_oct_free_hw_memory(pool, size, elements);
-}
diff --git a/drivers/staging/octeon/ethernet-mem.h b/drivers/staging/octeon/ethernet-mem.h
deleted file mode 100644
index 692dcdb7154d..000000000000
--- a/drivers/staging/octeon/ethernet-mem.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2007 Cavium Networks
- */
-
-int cvm_oct_mem_fill_fpa(int pool, int size, int elements);
-void cvm_oct_mem_empty_fpa(int pool, int size, int elements);
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
deleted file mode 100644
index 0c4fac31540a..000000000000
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ /dev/null
@@ -1,158 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2007 Cavium Networks
- */
-
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/interrupt.h>
-#include <linux/phy.h>
-#include <linux/ratelimit.h>
-#include <net/dst.h>
-
-#include "octeon-ethernet.h"
-#include "ethernet-defines.h"
-#include "ethernet-util.h"
-#include "ethernet-mdio.h"
-
-static DEFINE_SPINLOCK(global_register_lock);
-
-static void cvm_oct_set_hw_preamble(struct octeon_ethernet *priv, bool enable)
-{
- union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
- union cvmx_ipd_sub_port_fcs ipd_sub_port_fcs;
- union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg;
- int interface = INTERFACE(priv->port);
- int index = INDEX(priv->port);
-
- /* Set preamble checking. */
- gmxx_rxx_frm_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index,
- interface));
- gmxx_rxx_frm_ctl.s.pre_chk = enable;
- cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface),
- gmxx_rxx_frm_ctl.u64);
-
- /* Set FCS stripping. */
- ipd_sub_port_fcs.u64 = cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS);
- if (enable)
- ipd_sub_port_fcs.s.port_bit |= 1ull << priv->port;
- else
- ipd_sub_port_fcs.s.port_bit &=
- 0xffffffffull ^ (1ull << priv->port);
- cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS, ipd_sub_port_fcs.u64);
-
- /* Clear any error bits. */
- gmxx_rxx_int_reg.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(index,
- interface));
- cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, interface),
- gmxx_rxx_int_reg.u64);
-}
-
-static void cvm_oct_check_preamble_errors(struct net_device *dev)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
- union cvmx_helper_link_info link_info;
- unsigned long flags;
-
- link_info.u64 = priv->link_info;
-
- /*
- * Take the global register lock since we are going to
- * touch registers that affect more than one port.
- */
- spin_lock_irqsave(&global_register_lock, flags);
-
- if (link_info.s.speed == 10 && priv->last_speed == 10) {
- /*
- * Read the GMXX_RXX_INT_REG[PCTERR] bit and see if we are
- * getting preamble errors.
- */
- int interface = INTERFACE(priv->port);
- int index = INDEX(priv->port);
- union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg;
-
- gmxx_rxx_int_reg.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_REG
- (index, interface));
- if (gmxx_rxx_int_reg.s.pcterr) {
- /*
- * We are getting preamble errors at 10Mbps. Most
- * likely the PHY is giving us packets with misaligned
- * preambles. In order to get these packets we need to
- * disable preamble checking and do it in software.
- */
- cvm_oct_set_hw_preamble(priv, false);
- printk_ratelimited("%s: Using 10Mbps with software preamble removal\n",
- dev->name);
- }
- } else {
- /*
- * Since the 10Mbps preamble workaround is allowed we need to
- * enable preamble checking, FCS stripping, and clear error
- * bits on every speed change. If errors occur during 10Mbps
- * operation the above code will change this stuff
- */
- if (priv->last_speed != link_info.s.speed)
- cvm_oct_set_hw_preamble(priv, true);
- priv->last_speed = link_info.s.speed;
- }
- spin_unlock_irqrestore(&global_register_lock, flags);
-}
-
-static void cvm_oct_rgmii_poll(struct net_device *dev)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
- union cvmx_helper_link_info link_info;
- bool status_change;
-
- link_info = cvmx_helper_link_get(priv->port);
- if (priv->link_info != link_info.u64 &&
- cvmx_helper_link_set(priv->port, link_info))
- link_info.u64 = priv->link_info;
- status_change = priv->link_info != link_info.u64;
- priv->link_info = link_info.u64;
-
- cvm_oct_check_preamble_errors(dev);
-
- if (likely(!status_change))
- return;
-
- /* Tell core. */
- if (link_info.s.link_up) {
- if (!netif_carrier_ok(dev))
- netif_carrier_on(dev);
- } else if (netif_carrier_ok(dev)) {
- netif_carrier_off(dev);
- }
- cvm_oct_note_carrier(priv, link_info);
-}
-
-int cvm_oct_rgmii_open(struct net_device *dev)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
- int ret;
-
- ret = cvm_oct_common_open(dev, cvm_oct_rgmii_poll);
- if (ret)
- return ret;
-
- if (dev->phydev) {
- /*
- * In phydev mode, we need still periodic polling for the
- * preamble error checking, and we also need to call this
- * function on every link state change.
- *
- * Only true RGMII ports need to be polled. In GMII mode, port
- * 0 is really a RGMII port.
- */
- if ((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII &&
- priv->port == 0) ||
- (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) {
- priv->poll = cvm_oct_check_preamble_errors;
- cvm_oct_check_preamble_errors(dev);
- }
- }
-
- return 0;
-}
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
deleted file mode 100644
index 2c16230f993c..000000000000
--- a/drivers/staging/octeon/ethernet-rx.c
+++ /dev/null
@@ -1,538 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2010 Cavium Networks
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/cache.h>
-#include <linux/cpumask.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ip.h>
-#include <linux/string.h>
-#include <linux/prefetch.h>
-#include <linux/ratelimit.h>
-#include <linux/smp.h>
-#include <linux/interrupt.h>
-#include <net/dst.h>
-#ifdef CONFIG_XFRM
-#include <linux/xfrm.h>
-#include <net/xfrm.h>
-#endif /* CONFIG_XFRM */
-
-#include "octeon-ethernet.h"
-#include "ethernet-defines.h"
-#include "ethernet-mem.h"
-#include "ethernet-rx.h"
-#include "ethernet-util.h"
-
-static atomic_t oct_rx_ready = ATOMIC_INIT(0);
-
-static struct oct_rx_group {
- int irq;
- int group;
- struct napi_struct napi;
-} oct_rx_group[16];
-
-/**
- * cvm_oct_do_interrupt - interrupt handler.
- * @irq: Interrupt number.
- * @napi_id: Cookie to identify the NAPI instance.
- *
- * The interrupt occurs whenever the POW has packets in our group.
- *
- */
-static irqreturn_t cvm_oct_do_interrupt(int irq, void *napi_id)
-{
- /* Disable the IRQ and start napi_poll. */
- disable_irq_nosync(irq);
- napi_schedule(napi_id);
-
- return IRQ_HANDLED;
-}
-
-/**
- * cvm_oct_check_rcv_error - process receive errors
- * @work: Work queue entry pointing to the packet.
- *
- * Returns Non-zero if the packet can be dropped, zero otherwise.
- */
-static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work)
-{
- int port;
-
- if (octeon_has_feature(OCTEON_FEATURE_PKND))
- port = work->word0.pip.cn68xx.pknd;
- else
- port = work->word1.cn38xx.ipprt;
-
- if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) {
- /*
- * Ignore length errors on min size packets. Some
- * equipment incorrectly pads packets to 64+4FCS
- * instead of 60+4FCS. Note these packets still get
- * counted as frame errors.
- */
- } else if (work->word2.snoip.err_code == 5 ||
- work->word2.snoip.err_code == 7) {
- /*
- * We received a packet with either an alignment error
- * or a FCS error. This may be signalling that we are
- * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK]
- * off. If this is the case we need to parse the
- * packet to determine if we can remove a non spec
- * preamble and generate a correct packet.
- */
- int interface = cvmx_helper_get_interface_num(port);
- int index = cvmx_helper_get_interface_index_num(port);
- union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
-
- gmxx_rxx_frm_ctl.u64 =
- cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
- if (gmxx_rxx_frm_ctl.s.pre_chk == 0) {
- u8 *ptr =
- cvmx_phys_to_ptr(work->packet_ptr.s.addr);
- int i = 0;
-
- while (i < work->word1.len - 1) {
- if (*ptr != 0x55)
- break;
- ptr++;
- i++;
- }
-
- if (*ptr == 0xd5) {
- /* Port received 0xd5 preamble */
- work->packet_ptr.s.addr += i + 1;
- work->word1.len -= i + 5;
- } else if ((*ptr & 0xf) == 0xd) {
- /* Port received 0xd preamble */
- work->packet_ptr.s.addr += i;
- work->word1.len -= i + 4;
- for (i = 0; i < work->word1.len; i++) {
- *ptr =
- ((*ptr & 0xf0) >> 4) |
- ((*(ptr + 1) & 0xf) << 4);
- ptr++;
- }
- } else {
- printk_ratelimited("Port %d unknown preamble, packet dropped\n",
- port);
- cvm_oct_free_work(work);
- return 1;
- }
- }
- } else {
- printk_ratelimited("Port %d receive error code %d, packet dropped\n",
- port, work->word2.snoip.err_code);
- cvm_oct_free_work(work);
- return 1;
- }
-
- return 0;
-}
-
-static void copy_segments_to_skb(struct cvmx_wqe *work, struct sk_buff *skb)
-{
- int segments = work->word2.s.bufs;
- union cvmx_buf_ptr segment_ptr = work->packet_ptr;
- int len = work->word1.len;
- int segment_size;
-
- while (segments--) {
- union cvmx_buf_ptr next_ptr;
-
- next_ptr = *(union cvmx_buf_ptr *)
- cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
-
- /*
- * Octeon Errata PKI-100: The segment size is wrong.
- *
- * Until it is fixed, calculate the segment size based on
- * the packet pool buffer size.
- * When it is fixed, the following line should be replaced
- * with this one:
- * int segment_size = segment_ptr.s.size;
- */
- segment_size =
- CVMX_FPA_PACKET_POOL_SIZE -
- (segment_ptr.s.addr -
- (((segment_ptr.s.addr >> 7) -
- segment_ptr.s.back) << 7));
-
- /* Don't copy more than what is left in the packet */
- if (segment_size > len)
- segment_size = len;
-
- /* Copy the data into the packet */
- skb_put_data(skb, cvmx_phys_to_ptr(segment_ptr.s.addr),
- segment_size);
- len -= segment_size;
- segment_ptr = next_ptr;
- }
-}
-
-static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget)
-{
- const int coreid = cvmx_get_core_num();
- u64 old_group_mask;
- u64 old_scratch;
- int rx_count = 0;
- int did_work_request = 0;
- int packet_not_copied;
-
- /* Prefetch cvm_oct_device since we know we need it soon */
- prefetch(cvm_oct_device);
-
- if (USE_ASYNC_IOBDMA) {
- /* Save scratch in case userspace is using it */
- CVMX_SYNCIOBDMA;
- old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
- }
-
- /* Only allow work for our group (and preserve priorities) */
- if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
- old_group_mask = cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid));
- cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid),
- BIT(rx_group->group));
- cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
- } else {
- old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
- cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
- (old_group_mask & ~0xFFFFull) |
- BIT(rx_group->group));
- }
-
- if (USE_ASYNC_IOBDMA) {
- cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
- did_work_request = 1;
- }
-
- while (rx_count < budget) {
- struct sk_buff *skb = NULL;
- struct sk_buff **pskb = NULL;
- int skb_in_hw;
- struct cvmx_wqe *work;
- int port;
-
- if (USE_ASYNC_IOBDMA && did_work_request)
- work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
- else
- work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
-
- prefetch(work);
- did_work_request = 0;
- if (!work) {
- if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
- cvmx_write_csr(CVMX_SSO_WQ_IQ_DIS,
- BIT(rx_group->group));
- cvmx_write_csr(CVMX_SSO_WQ_INT,
- BIT(rx_group->group));
- } else {
- union cvmx_pow_wq_int wq_int;
-
- wq_int.u64 = 0;
- wq_int.s.iq_dis = BIT(rx_group->group);
- wq_int.s.wq_int = BIT(rx_group->group);
- cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
- }
- break;
- }
- pskb = (struct sk_buff **)
- (cvm_oct_get_buffer_ptr(work->packet_ptr) -
- sizeof(void *));
- prefetch(pskb);
-
- if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) {
- cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH,
- CVMX_POW_NO_WAIT);
- did_work_request = 1;
- }
- rx_count++;
-
- skb_in_hw = work->word2.s.bufs == 1;
- if (likely(skb_in_hw)) {
- skb = *pskb;
- prefetch(&skb->head);
- prefetch(&skb->len);
- }
-
- if (octeon_has_feature(OCTEON_FEATURE_PKND))
- port = work->word0.pip.cn68xx.pknd;
- else
- port = work->word1.cn38xx.ipprt;
-
- prefetch(cvm_oct_device[port]);
-
- /* Immediately throw away all packets with receive errors */
- if (unlikely(work->word2.snoip.rcv_error)) {
- if (cvm_oct_check_rcv_error(work))
- continue;
- }
-
- /*
- * We can only use the zero copy path if skbuffs are
- * in the FPA pool and the packet fits in a single
- * buffer.
- */
- if (likely(skb_in_hw)) {
- skb->data = skb->head + work->packet_ptr.s.addr -
- cvmx_ptr_to_phys(skb->head);
- prefetch(skb->data);
- skb->len = work->word1.len;
- skb_set_tail_pointer(skb, skb->len);
- packet_not_copied = 1;
- } else {
- /*
- * We have to copy the packet. First allocate
- * an skbuff for it.
- */
- skb = dev_alloc_skb(work->word1.len);
- if (!skb) {
- cvm_oct_free_work(work);
- continue;
- }
-
- /*
- * Check if we've received a packet that was
- * entirely stored in the work entry.
- */
- if (unlikely(work->word2.s.bufs == 0)) {
- u8 *ptr = work->packet_data;
-
- if (likely(!work->word2.s.not_IP)) {
- /*
- * The beginning of the packet
- * moves for IP packets.
- */
- if (work->word2.s.is_v6)
- ptr += 2;
- else
- ptr += 6;
- }
- skb_put_data(skb, ptr, work->word1.len);
- /* No packet buffers to free */
- } else {
- copy_segments_to_skb(work, skb);
- }
- packet_not_copied = 0;
- }
- if (likely((port < TOTAL_NUMBER_OF_PORTS) &&
- cvm_oct_device[port])) {
- struct net_device *dev = cvm_oct_device[port];
-
- /*
- * Only accept packets for devices that are
- * currently up.
- */
- if (likely(dev->flags & IFF_UP)) {
- skb->protocol = eth_type_trans(skb, dev);
- skb->dev = dev;
-
- if (unlikely(work->word2.s.not_IP ||
- work->word2.s.IP_exc ||
- work->word2.s.L4_error ||
- !work->word2.s.tcp_or_udp))
- skb->ip_summed = CHECKSUM_NONE;
- else
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- /* Increment RX stats for virtual ports */
- if (port >= CVMX_PIP_NUM_INPUT_PORTS) {
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
- }
- netif_receive_skb(skb);
- } else {
- /*
- * Drop any packet received for a device that
- * isn't up.
- */
- dev->stats.rx_dropped++;
- dev_kfree_skb_irq(skb);
- }
- } else {
- /*
- * Drop any packet received for a device that
- * doesn't exist.
- */
- printk_ratelimited("Port %d not controlled by Linux, packet dropped\n",
- port);
- dev_kfree_skb_irq(skb);
- }
- /*
- * Check to see if the skbuff and work share the same
- * packet buffer.
- */
- if (likely(packet_not_copied)) {
- /*
- * This buffer needs to be replaced, increment
- * the number of buffers we need to free by
- * one.
- */
- cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
- 1);
-
- cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
- } else {
- cvm_oct_free_work(work);
- }
- }
- /* Restore the original POW group mask */
- if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
- cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid), old_group_mask);
- cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
- } else {
- cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
- }
-
- if (USE_ASYNC_IOBDMA) {
- /* Restore the scratch area */
- cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
- }
- cvm_oct_rx_refill_pool(0);
-
- return rx_count;
-}
-
-/**
- * cvm_oct_napi_poll - the NAPI poll function.
- * @napi: The NAPI instance.
- * @budget: Maximum number of packets to receive.
- *
- * Returns the number of packets processed.
- */
-static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
-{
- struct oct_rx_group *rx_group = container_of(napi, struct oct_rx_group,
- napi);
- int rx_count;
-
- rx_count = cvm_oct_poll(rx_group, budget);
-
- if (rx_count < budget) {
- /* No more work */
- napi_complete_done(napi, rx_count);
- enable_irq(rx_group->irq);
- }
- return rx_count;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/**
- * cvm_oct_poll_controller - poll for receive packets
- * device.
- *
- * @dev: Device to poll. Unused
- */
-void cvm_oct_poll_controller(struct net_device *dev)
-{
- int i;
-
- if (!atomic_read(&oct_rx_ready))
- return;
-
- for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
- if (!(pow_receive_groups & BIT(i)))
- continue;
-
- cvm_oct_poll(&oct_rx_group[i], 16);
- }
-}
-#endif
-
-void cvm_oct_rx_initialize(void)
-{
- int i;
- struct net_device *dev_for_napi = NULL;
-
- for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) {
- if (cvm_oct_device[i]) {
- dev_for_napi = cvm_oct_device[i];
- break;
- }
- }
-
- if (!dev_for_napi)
- panic("No net_devices were allocated.");
-
- for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
- int ret;
-
- if (!(pow_receive_groups & BIT(i)))
- continue;
-
- netif_napi_add(dev_for_napi, &oct_rx_group[i].napi,
- cvm_oct_napi_poll, rx_napi_weight);
- napi_enable(&oct_rx_group[i].napi);
-
- oct_rx_group[i].irq = OCTEON_IRQ_WORKQ0 + i;
- oct_rx_group[i].group = i;
-
- /* Register an IRQ handler to receive POW interrupts */
- ret = request_irq(oct_rx_group[i].irq, cvm_oct_do_interrupt, 0,
- "Ethernet", &oct_rx_group[i].napi);
- if (ret)
- panic("Could not acquire Ethernet IRQ %d\n",
- oct_rx_group[i].irq);
-
- disable_irq_nosync(oct_rx_group[i].irq);
-
- /* Enable POW interrupt when our port has at least one packet */
- if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
- union cvmx_sso_wq_int_thrx int_thr;
- union cvmx_pow_wq_int_pc int_pc;
-
- int_thr.u64 = 0;
- int_thr.s.tc_en = 1;
- int_thr.s.tc_thr = 1;
- cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), int_thr.u64);
-
- int_pc.u64 = 0;
- int_pc.s.pc_thr = 5;
- cvmx_write_csr(CVMX_SSO_WQ_INT_PC, int_pc.u64);
- } else {
- union cvmx_pow_wq_int_thrx int_thr;
- union cvmx_pow_wq_int_pc int_pc;
-
- int_thr.u64 = 0;
- int_thr.s.tc_en = 1;
- int_thr.s.tc_thr = 1;
- cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), int_thr.u64);
-
- int_pc.u64 = 0;
- int_pc.s.pc_thr = 5;
- cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
- }
-
- /* Schedule NAPI now. This will indirectly enable the
- * interrupt.
- */
- napi_schedule(&oct_rx_group[i].napi);
- }
- atomic_inc(&oct_rx_ready);
-}
-
-void cvm_oct_rx_shutdown(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
- if (!(pow_receive_groups & BIT(i)))
- continue;
-
- /* Disable POW interrupt */
- if (OCTEON_IS_MODEL(OCTEON_CN68XX))
- cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), 0);
- else
- cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), 0);
-
- /* Free the interrupt handler */
- free_irq(oct_rx_group[i].irq, cvm_oct_device);
-
- netif_napi_del(&oct_rx_group[i].napi);
- }
-}
diff --git a/drivers/staging/octeon/ethernet-rx.h b/drivers/staging/octeon/ethernet-rx.h
deleted file mode 100644
index ff6482fa20d6..000000000000
--- a/drivers/staging/octeon/ethernet-rx.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2007 Cavium Networks
- */
-
-void cvm_oct_poll_controller(struct net_device *dev);
-void cvm_oct_rx_initialize(void);
-void cvm_oct_rx_shutdown(void);
-
-static inline void cvm_oct_rx_refill_pool(int fill_threshold)
-{
- int number_to_free;
- int num_freed;
- /* Refill the packet buffer pool */
- number_to_free =
- cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
-
- if (number_to_free > fill_threshold) {
- cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
- -number_to_free);
- num_freed = cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL,
- CVMX_FPA_PACKET_POOL_SIZE,
- number_to_free);
- if (num_freed != number_to_free) {
- cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
- number_to_free - num_freed);
- }
- }
-}
diff --git a/drivers/staging/octeon/ethernet-sgmii.c b/drivers/staging/octeon/ethernet-sgmii.c
deleted file mode 100644
index d7fbd9159302..000000000000
--- a/drivers/staging/octeon/ethernet-sgmii.c
+++ /dev/null
@@ -1,30 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2007 Cavium Networks
- */
-
-#include <linux/phy.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/ratelimit.h>
-#include <net/dst.h>
-
-#include "octeon-ethernet.h"
-#include "ethernet-defines.h"
-#include "ethernet-util.h"
-#include "ethernet-mdio.h"
-
-int cvm_oct_sgmii_open(struct net_device *dev)
-{
- return cvm_oct_common_open(dev, cvm_oct_link_poll);
-}
-
-int cvm_oct_sgmii_init(struct net_device *dev)
-{
- cvm_oct_common_init(dev);
-
- /* FIXME: Need autoneg logic */
- return 0;
-}
diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c
deleted file mode 100644
index c582403e6a1f..000000000000
--- a/drivers/staging/octeon/ethernet-spi.c
+++ /dev/null
@@ -1,226 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2007 Cavium Networks
- */
-
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/interrupt.h>
-#include <net/dst.h>
-
-#include "octeon-ethernet.h"
-#include "ethernet-defines.h"
-#include "ethernet-util.h"
-
-static int number_spi_ports;
-static int need_retrain[2] = { 0, 0 };
-
-static void cvm_oct_spxx_int_pr(union cvmx_spxx_int_reg spx_int_reg, int index)
-{
- if (spx_int_reg.s.spf)
- pr_err("SPI%d: SRX Spi4 interface down\n", index);
- if (spx_int_reg.s.calerr)
- pr_err("SPI%d: SRX Spi4 Calendar table parity error\n", index);
- if (spx_int_reg.s.syncerr)
- pr_err("SPI%d: SRX Consecutive Spi4 DIP4 errors have exceeded SPX_ERR_CTL[ERRCNT]\n",
- index);
- if (spx_int_reg.s.diperr)
- pr_err("SPI%d: SRX Spi4 DIP4 error\n", index);
- if (spx_int_reg.s.tpaovr)
- pr_err("SPI%d: SRX Selected port has hit TPA overflow\n",
- index);
- if (spx_int_reg.s.rsverr)
- pr_err("SPI%d: SRX Spi4 reserved control word detected\n",
- index);
- if (spx_int_reg.s.drwnng)
- pr_err("SPI%d: SRX Spi4 receive FIFO drowning/overflow\n",
- index);
- if (spx_int_reg.s.clserr)
- pr_err("SPI%d: SRX Spi4 packet closed on non-16B alignment without EOP\n",
- index);
- if (spx_int_reg.s.spiovr)
- pr_err("SPI%d: SRX Spi4 async FIFO overflow\n", index);
- if (spx_int_reg.s.abnorm)
- pr_err("SPI%d: SRX Abnormal packet termination (ERR bit)\n",
- index);
- if (spx_int_reg.s.prtnxa)
- pr_err("SPI%d: SRX Port out of range\n", index);
-}
-
-static void cvm_oct_stxx_int_pr(union cvmx_stxx_int_reg stx_int_reg, int index)
-{
- if (stx_int_reg.s.syncerr)
- pr_err("SPI%d: STX Interface encountered a fatal error\n",
- index);
- if (stx_int_reg.s.frmerr)
- pr_err("SPI%d: STX FRMCNT has exceeded STX_DIP_CNT[MAXFRM]\n",
- index);
- if (stx_int_reg.s.unxfrm)
- pr_err("SPI%d: STX Unexpected framing sequence\n", index);
- if (stx_int_reg.s.nosync)
- pr_err("SPI%d: STX ERRCNT has exceeded STX_DIP_CNT[MAXDIP]\n",
- index);
- if (stx_int_reg.s.diperr)
- pr_err("SPI%d: STX DIP2 error on the Spi4 Status channel\n",
- index);
- if (stx_int_reg.s.datovr)
- pr_err("SPI%d: STX Spi4 FIFO overflow error\n", index);
- if (stx_int_reg.s.ovrbst)
- pr_err("SPI%d: STX Transmit packet burst too big\n", index);
- if (stx_int_reg.s.calpar1)
- pr_err("SPI%d: STX Calendar Table Parity Error Bank%d\n",
- index, 1);
- if (stx_int_reg.s.calpar0)
- pr_err("SPI%d: STX Calendar Table Parity Error Bank%d\n",
- index, 0);
-}
-
-static irqreturn_t cvm_oct_spi_spx_int(int index)
-{
- union cvmx_spxx_int_reg spx_int_reg;
- union cvmx_stxx_int_reg stx_int_reg;
-
- spx_int_reg.u64 = cvmx_read_csr(CVMX_SPXX_INT_REG(index));
- cvmx_write_csr(CVMX_SPXX_INT_REG(index), spx_int_reg.u64);
- if (!need_retrain[index]) {
- spx_int_reg.u64 &= cvmx_read_csr(CVMX_SPXX_INT_MSK(index));
- cvm_oct_spxx_int_pr(spx_int_reg, index);
- }
-
- stx_int_reg.u64 = cvmx_read_csr(CVMX_STXX_INT_REG(index));
- cvmx_write_csr(CVMX_STXX_INT_REG(index), stx_int_reg.u64);
- if (!need_retrain[index]) {
- stx_int_reg.u64 &= cvmx_read_csr(CVMX_STXX_INT_MSK(index));
- cvm_oct_stxx_int_pr(stx_int_reg, index);
- }
-
- cvmx_write_csr(CVMX_SPXX_INT_MSK(index), 0);
- cvmx_write_csr(CVMX_STXX_INT_MSK(index), 0);
- need_retrain[index] = 1;
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t cvm_oct_spi_rml_interrupt(int cpl, void *dev_id)
-{
- irqreturn_t return_status = IRQ_NONE;
- union cvmx_npi_rsl_int_blocks rsl_int_blocks;
-
- /* Check and see if this interrupt was caused by the GMX block */
- rsl_int_blocks.u64 = cvmx_read_csr(CVMX_NPI_RSL_INT_BLOCKS);
- if (rsl_int_blocks.s.spx1) /* 19 - SPX1_INT_REG & STX1_INT_REG */
- return_status = cvm_oct_spi_spx_int(1);
-
- if (rsl_int_blocks.s.spx0) /* 18 - SPX0_INT_REG & STX0_INT_REG */
- return_status = cvm_oct_spi_spx_int(0);
-
- return return_status;
-}
-
-static void cvm_oct_spi_enable_error_reporting(int interface)
-{
- union cvmx_spxx_int_msk spxx_int_msk;
- union cvmx_stxx_int_msk stxx_int_msk;
-
- spxx_int_msk.u64 = cvmx_read_csr(CVMX_SPXX_INT_MSK(interface));
- spxx_int_msk.s.calerr = 1;
- spxx_int_msk.s.syncerr = 1;
- spxx_int_msk.s.diperr = 1;
- spxx_int_msk.s.tpaovr = 1;
- spxx_int_msk.s.rsverr = 1;
- spxx_int_msk.s.drwnng = 1;
- spxx_int_msk.s.clserr = 1;
- spxx_int_msk.s.spiovr = 1;
- spxx_int_msk.s.abnorm = 1;
- spxx_int_msk.s.prtnxa = 1;
- cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), spxx_int_msk.u64);
-
- stxx_int_msk.u64 = cvmx_read_csr(CVMX_STXX_INT_MSK(interface));
- stxx_int_msk.s.frmerr = 1;
- stxx_int_msk.s.unxfrm = 1;
- stxx_int_msk.s.nosync = 1;
- stxx_int_msk.s.diperr = 1;
- stxx_int_msk.s.datovr = 1;
- stxx_int_msk.s.ovrbst = 1;
- stxx_int_msk.s.calpar1 = 1;
- stxx_int_msk.s.calpar0 = 1;
- cvmx_write_csr(CVMX_STXX_INT_MSK(interface), stxx_int_msk.u64);
-}
-
-static void cvm_oct_spi_poll(struct net_device *dev)
-{
- static int spi4000_port;
- struct octeon_ethernet *priv = netdev_priv(dev);
- int interface;
-
- for (interface = 0; interface < 2; interface++) {
- if ((priv->port == interface * 16) && need_retrain[interface]) {
- if (cvmx_spi_restart_interface
- (interface, CVMX_SPI_MODE_DUPLEX, 10) == 0) {
- need_retrain[interface] = 0;
- cvm_oct_spi_enable_error_reporting(interface);
- }
- }
-
- /*
- * The SPI4000 TWSI interface is very slow. In order
- * not to bring the system to a crawl, we only poll a
- * single port every second. This means negotiation
- * speed changes take up to 10 seconds, but at least
- * we don't waste absurd amounts of time waiting for
- * TWSI.
- */
- if (priv->port == spi4000_port) {
- /*
- * This function does nothing if it is called on an
- * interface without a SPI4000.
- */
- cvmx_spi4000_check_speed(interface, priv->port);
- /*
- * Normal ordering increments. By decrementing
- * we only match once per iteration.
- */
- spi4000_port--;
- if (spi4000_port < 0)
- spi4000_port = 10;
- }
- }
-}
-
-int cvm_oct_spi_init(struct net_device *dev)
-{
- int r;
- struct octeon_ethernet *priv = netdev_priv(dev);
-
- if (number_spi_ports == 0) {
- r = request_irq(OCTEON_IRQ_RML, cvm_oct_spi_rml_interrupt,
- IRQF_SHARED, "SPI", &number_spi_ports);
- if (r)
- return r;
- }
- number_spi_ports++;
-
- if ((priv->port == 0) || (priv->port == 16)) {
- cvm_oct_spi_enable_error_reporting(INTERFACE(priv->port));
- priv->poll = cvm_oct_spi_poll;
- }
- cvm_oct_common_init(dev);
- return 0;
-}
-
-void cvm_oct_spi_uninit(struct net_device *dev)
-{
- int interface;
-
- cvm_oct_common_uninit(dev);
- number_spi_ports--;
- if (number_spi_ports == 0) {
- for (interface = 0; interface < 2; interface++) {
- cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), 0);
- cvmx_write_csr(CVMX_STXX_INT_MSK(interface), 0);
- }
- free_irq(OCTEON_IRQ_RML, &number_spi_ports);
- }
-}
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
deleted file mode 100644
index b334cf89794e..000000000000
--- a/drivers/staging/octeon/ethernet-tx.c
+++ /dev/null
@@ -1,717 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2010 Cavium Networks
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ip.h>
-#include <linux/ratelimit.h>
-#include <linux/string.h>
-#include <linux/interrupt.h>
-#include <net/dst.h>
-#ifdef CONFIG_XFRM
-#include <linux/xfrm.h>
-#include <net/xfrm.h>
-#endif /* CONFIG_XFRM */
-
-#include <linux/atomic.h>
-#include <net/sch_generic.h>
-
-#include "octeon-ethernet.h"
-#include "ethernet-defines.h"
-#include "ethernet-tx.h"
-#include "ethernet-util.h"
-
-#define CVM_OCT_SKB_CB(skb) ((u64 *)((skb)->cb))
-
-/*
- * You can define GET_SKBUFF_QOS() to override how the skbuff output
- * function determines which output queue is used. The default
- * implementation always uses the base queue for the port. If, for
- * example, you wanted to use the skb->priority field, define
- * GET_SKBUFF_QOS as: #define GET_SKBUFF_QOS(skb) ((skb)->priority)
- */
-#ifndef GET_SKBUFF_QOS
-#define GET_SKBUFF_QOS(skb) 0
-#endif
-
-static void cvm_oct_tx_do_cleanup(unsigned long arg);
-static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0);
-
-/* Maximum number of SKBs to try to free per xmit packet. */
-#define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2)
-
-static inline int cvm_oct_adjust_skb_to_free(int skb_to_free, int fau)
-{
- int undo;
-
- undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free +
- MAX_SKB_TO_FREE;
- if (undo > 0)
- cvmx_fau_atomic_add32(fau, -undo);
- skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE :
- -skb_to_free;
- return skb_to_free;
-}
-
-static void cvm_oct_kick_tx_poll_watchdog(void)
-{
- union cvmx_ciu_timx ciu_timx;
-
- ciu_timx.u64 = 0;
- ciu_timx.s.one_shot = 1;
- ciu_timx.s.len = cvm_oct_tx_poll_interval;
- cvmx_write_csr(CVMX_CIU_TIMX(1), ciu_timx.u64);
-}
-
-static void cvm_oct_free_tx_skbs(struct net_device *dev)
-{
- int skb_to_free;
- int qos, queues_per_port;
- int total_freed = 0;
- int total_remaining = 0;
- unsigned long flags;
- struct octeon_ethernet *priv = netdev_priv(dev);
-
- queues_per_port = cvmx_pko_get_num_queues(priv->port);
- /* Drain any pending packets in the free list */
- for (qos = 0; qos < queues_per_port; qos++) {
- if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
- continue;
- skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
- MAX_SKB_TO_FREE);
- skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
- priv->fau + qos * 4);
- total_freed += skb_to_free;
- if (skb_to_free > 0) {
- struct sk_buff *to_free_list = NULL;
-
- spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
- while (skb_to_free > 0) {
- struct sk_buff *t;
-
- t = __skb_dequeue(&priv->tx_free_list[qos]);
- t->next = to_free_list;
- to_free_list = t;
- skb_to_free--;
- }
- spin_unlock_irqrestore(&priv->tx_free_list[qos].lock,
- flags);
- /* Do the actual freeing outside of the lock. */
- while (to_free_list) {
- struct sk_buff *t = to_free_list;
-
- to_free_list = to_free_list->next;
- dev_kfree_skb_any(t);
- }
- }
- total_remaining += skb_queue_len(&priv->tx_free_list[qos]);
- }
- if (total_remaining < MAX_OUT_QUEUE_DEPTH && netif_queue_stopped(dev))
- netif_wake_queue(dev);
- if (total_remaining)
- cvm_oct_kick_tx_poll_watchdog();
-}
-
-/**
- * cvm_oct_xmit - transmit a packet
- * @skb: Packet to send
- * @dev: Device info structure
- *
- * Returns Always returns NETDEV_TX_OK
- */
-int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- union cvmx_pko_command_word0 pko_command;
- union cvmx_buf_ptr hw_buffer;
- u64 old_scratch;
- u64 old_scratch2;
- int qos;
- int i;
- enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP} queue_type;
- struct octeon_ethernet *priv = netdev_priv(dev);
- struct sk_buff *to_free_list;
- int skb_to_free;
- int buffers_to_free;
- u32 total_to_clean;
- unsigned long flags;
-#if REUSE_SKBUFFS_WITHOUT_FREE
- unsigned char *fpa_head;
-#endif
-
- /*
- * Prefetch the private data structure. It is larger than the
- * one cache line.
- */
- prefetch(priv);
-
- /*
- * The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to
- * completely remove "qos" in the event neither interface
- * supports multiple queues per port.
- */
- if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) ||
- (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) {
- qos = GET_SKBUFF_QOS(skb);
- if (qos <= 0)
- qos = 0;
- else if (qos >= cvmx_pko_get_num_queues(priv->port))
- qos = 0;
- } else {
- qos = 0;
- }
-
- if (USE_ASYNC_IOBDMA) {
- /* Save scratch in case userspace is using it */
- CVMX_SYNCIOBDMA;
- old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
- old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
-
- /*
- * Fetch and increment the number of packets to be
- * freed.
- */
- cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8,
- FAU_NUM_PACKET_BUFFERS_TO_FREE,
- 0);
- cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
- priv->fau + qos * 4,
- MAX_SKB_TO_FREE);
- }
-
- /*
- * We have space for 6 segment pointers, If there will be more
- * than that, we must linearize.
- */
- if (unlikely(skb_shinfo(skb)->nr_frags > 5)) {
- if (unlikely(__skb_linearize(skb))) {
- queue_type = QUEUE_DROP;
- if (USE_ASYNC_IOBDMA) {
- /*
- * Get the number of skbuffs in use
- * by the hardware
- */
- CVMX_SYNCIOBDMA;
- skb_to_free =
- cvmx_scratch_read64(CVMX_SCR_SCRATCH);
- } else {
- /*
- * Get the number of skbuffs in use
- * by the hardware
- */
- skb_to_free =
- cvmx_fau_fetch_and_add32(priv->fau +
- qos * 4,
- MAX_SKB_TO_FREE);
- }
- skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
- priv->fau +
- qos * 4);
- spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
- goto skip_xmit;
- }
- }
-
- /*
- * The CN3XXX series of parts has an errata (GMX-401) which
- * causes the GMX block to hang if a collision occurs towards
- * the end of a <68 byte packet. As a workaround for this, we
- * pad packets to be 68 bytes whenever we are in half duplex
- * mode. We don't handle the case of having a small packet but
- * no room to add the padding. The kernel should always give
- * us at least a cache line
- */
- if ((skb->len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
- union cvmx_gmxx_prtx_cfg gmx_prt_cfg;
- int interface = INTERFACE(priv->port);
- int index = INDEX(priv->port);
-
- if (interface < 2) {
- /* We only need to pad packet in half duplex mode */
- gmx_prt_cfg.u64 =
- cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
- if (gmx_prt_cfg.s.duplex == 0) {
- int add_bytes = 64 - skb->len;
-
- if ((skb_tail_pointer(skb) + add_bytes) <=
- skb_end_pointer(skb))
- __skb_put_zero(skb, add_bytes);
- }
- }
- }
-
- /* Build the PKO command */
- pko_command.u64 = 0;
-#ifdef __LITTLE_ENDIAN
- pko_command.s.le = 1;
-#endif
- pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */
- pko_command.s.segs = 1;
- pko_command.s.total_bytes = skb->len;
- pko_command.s.size0 = CVMX_FAU_OP_SIZE_32;
- pko_command.s.subone0 = 1;
-
- pko_command.s.dontfree = 1;
-
- /* Build the PKO buffer pointer */
- hw_buffer.u64 = 0;
- if (skb_shinfo(skb)->nr_frags == 0) {
- hw_buffer.s.addr = XKPHYS_TO_PHYS((uintptr_t)skb->data);
- hw_buffer.s.pool = 0;
- hw_buffer.s.size = skb->len;
- } else {
- hw_buffer.s.addr = XKPHYS_TO_PHYS((uintptr_t)skb->data);
- hw_buffer.s.pool = 0;
- hw_buffer.s.size = skb_headlen(skb);
- CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_frag_t *fs = skb_shinfo(skb)->frags + i;
-
- hw_buffer.s.addr =
- XKPHYS_TO_PHYS((uintptr_t)skb_frag_address(fs));
- hw_buffer.s.size = skb_frag_size(fs);
- CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
- }
- hw_buffer.s.addr =
- XKPHYS_TO_PHYS((uintptr_t)CVM_OCT_SKB_CB(skb));
- hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1;
- pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1;
- pko_command.s.gather = 1;
- goto dont_put_skbuff_in_hw;
- }
-
- /*
- * See if we can put this skb in the FPA pool. Any strange
- * behavior from the Linux networking stack will most likely
- * be caused by a bug in the following code. If some field is
- * in use by the network stack and gets carried over when a
- * buffer is reused, bad things may happen. If in doubt and
- * you dont need the absolute best performance, disable the
- * define REUSE_SKBUFFS_WITHOUT_FREE. The reuse of buffers has
- * shown a 25% increase in performance under some loads.
- */
-#if REUSE_SKBUFFS_WITHOUT_FREE
- fpa_head = skb->head + 256 - ((unsigned long)skb->head & 0x7f);
- if (unlikely(skb->data < fpa_head)) {
- /* TX buffer beginning can't meet FPA alignment constraints */
- goto dont_put_skbuff_in_hw;
- }
- if (unlikely
- ((skb_end_pointer(skb) - fpa_head) < CVMX_FPA_PACKET_POOL_SIZE)) {
- /* TX buffer isn't large enough for the FPA */
- goto dont_put_skbuff_in_hw;
- }
- if (unlikely(skb_shared(skb))) {
- /* TX buffer sharing data with someone else */
- goto dont_put_skbuff_in_hw;
- }
- if (unlikely(skb_cloned(skb))) {
- /* TX buffer has been cloned */
- goto dont_put_skbuff_in_hw;
- }
- if (unlikely(skb_header_cloned(skb))) {
- /* TX buffer header has been cloned */
- goto dont_put_skbuff_in_hw;
- }
- if (unlikely(skb->destructor)) {
- /* TX buffer has a destructor */
- goto dont_put_skbuff_in_hw;
- }
- if (unlikely(skb_shinfo(skb)->nr_frags)) {
- /* TX buffer has fragments */
- goto dont_put_skbuff_in_hw;
- }
- if (unlikely
- (skb->truesize !=
- sizeof(*skb) + skb_end_offset(skb))) {
- /* TX buffer truesize has been changed */
- goto dont_put_skbuff_in_hw;
- }
-
- /*
- * We can use this buffer in the FPA. We don't need the FAU
- * update anymore
- */
- pko_command.s.dontfree = 0;
-
- hw_buffer.s.back = ((unsigned long)skb->data >> 7) -
- ((unsigned long)fpa_head >> 7);
-
- *(struct sk_buff **)(fpa_head - sizeof(void *)) = skb;
-
- /*
- * The skbuff will be reused without ever being freed. We must
- * cleanup a bunch of core things.
- */
- dst_release(skb_dst(skb));
- skb_dst_set(skb, NULL);
- skb_ext_reset(skb);
- nf_reset_ct(skb);
-
-#ifdef CONFIG_NET_SCHED
- skb->tc_index = 0;
- skb_reset_tc(skb);
-#endif /* CONFIG_NET_SCHED */
-#endif /* REUSE_SKBUFFS_WITHOUT_FREE */
-
-dont_put_skbuff_in_hw:
-
- /* Check if we can use the hardware checksumming */
- if ((skb->protocol == htons(ETH_P_IP)) &&
- (ip_hdr(skb)->version == 4) &&
- (ip_hdr(skb)->ihl == 5) &&
- ((ip_hdr(skb)->frag_off == 0) ||
- (ip_hdr(skb)->frag_off == htons(1 << 14))) &&
- ((ip_hdr(skb)->protocol == IPPROTO_TCP) ||
- (ip_hdr(skb)->protocol == IPPROTO_UDP))) {
- /* Use hardware checksum calc */
- pko_command.s.ipoffp1 = skb_network_offset(skb) + 1;
- }
-
- if (USE_ASYNC_IOBDMA) {
- /* Get the number of skbuffs in use by the hardware */
- CVMX_SYNCIOBDMA;
- skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
- buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
- } else {
- /* Get the number of skbuffs in use by the hardware */
- skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
- MAX_SKB_TO_FREE);
- buffers_to_free =
- cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
- }
-
- skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
- priv->fau + qos * 4);
-
- /*
- * If we're sending faster than the receive can free them then
- * don't do the HW free.
- */
- if ((buffers_to_free < -100) && !pko_command.s.dontfree)
- pko_command.s.dontfree = 1;
-
- if (pko_command.s.dontfree) {
- queue_type = QUEUE_CORE;
- pko_command.s.reg0 = priv->fau + qos * 4;
- } else {
- queue_type = QUEUE_HW;
- }
- if (USE_ASYNC_IOBDMA)
- cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
- FAU_TOTAL_TX_TO_CLEAN, 1);
-
- spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
-
- /* Drop this packet if we have too many already queued to the HW */
- if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >=
- MAX_OUT_QUEUE_DEPTH)) {
- if (dev->tx_queue_len != 0) {
- /* Drop the lock when notifying the core. */
- spin_unlock_irqrestore(&priv->tx_free_list[qos].lock,
- flags);
- netif_stop_queue(dev);
- spin_lock_irqsave(&priv->tx_free_list[qos].lock,
- flags);
- } else {
- /* If not using normal queueing. */
- queue_type = QUEUE_DROP;
- goto skip_xmit;
- }
- }
-
- cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos,
- CVMX_PKO_LOCK_NONE);
-
- /* Send the packet to the output queue */
- if (unlikely(cvmx_pko_send_packet_finish(priv->port,
- priv->queue + qos,
- pko_command, hw_buffer,
- CVMX_PKO_LOCK_NONE))) {
- printk_ratelimited("%s: Failed to send the packet\n",
- dev->name);
- queue_type = QUEUE_DROP;
- }
-skip_xmit:
- to_free_list = NULL;
-
- switch (queue_type) {
- case QUEUE_DROP:
- skb->next = to_free_list;
- to_free_list = skb;
- dev->stats.tx_dropped++;
- break;
- case QUEUE_HW:
- cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, -1);
- break;
- case QUEUE_CORE:
- __skb_queue_tail(&priv->tx_free_list[qos], skb);
- break;
- default:
- BUG();
- }
-
- while (skb_to_free > 0) {
- struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]);
-
- t->next = to_free_list;
- to_free_list = t;
- skb_to_free--;
- }
-
- spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
-
- /* Do the actual freeing outside of the lock. */
- while (to_free_list) {
- struct sk_buff *t = to_free_list;
-
- to_free_list = to_free_list->next;
- dev_kfree_skb_any(t);
- }
-
- if (USE_ASYNC_IOBDMA) {
- CVMX_SYNCIOBDMA;
- total_to_clean = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
- /* Restore the scratch area */
- cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
- cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
- } else {
- total_to_clean =
- cvmx_fau_fetch_and_add32(FAU_TOTAL_TX_TO_CLEAN, 1);
- }
-
- if (total_to_clean & 0x3ff) {
- /*
- * Schedule the cleanup tasklet every 1024 packets for
- * the pathological case of high traffic on one port
- * delaying clean up of packets on a different port
- * that is blocked waiting for the cleanup.
- */
- tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
- }
-
- cvm_oct_kick_tx_poll_watchdog();
-
- return NETDEV_TX_OK;
-}
-
-/**
- * cvm_oct_xmit_pow - transmit a packet to the POW
- * @skb: Packet to send
- * @dev: Device info structure
-
- * Returns Always returns zero
- */
-int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
- void *packet_buffer;
- void *copy_location;
-
- /* Get a work queue entry */
- struct cvmx_wqe *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
-
- if (unlikely(!work)) {
- printk_ratelimited("%s: Failed to allocate a work queue entry\n",
- dev->name);
- dev->stats.tx_dropped++;
- dev_kfree_skb_any(skb);
- return 0;
- }
-
- /* Get a packet buffer */
- packet_buffer = cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL);
- if (unlikely(!packet_buffer)) {
- printk_ratelimited("%s: Failed to allocate a packet buffer\n",
- dev->name);
- cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
- dev->stats.tx_dropped++;
- dev_kfree_skb_any(skb);
- return 0;
- }
-
- /*
- * Calculate where we need to copy the data to. We need to
- * leave 8 bytes for a next pointer (unused). We also need to
- * include any configure skip. Then we need to align the IP
- * packet src and dest into the same 64bit word. The below
- * calculation may add a little extra, but that doesn't
- * hurt.
- */
- copy_location = packet_buffer + sizeof(u64);
- copy_location += ((CVMX_HELPER_FIRST_MBUFF_SKIP + 7) & 0xfff8) + 6;
-
- /*
- * We have to copy the packet since whoever processes this
- * packet will free it to a hardware pool. We can't use the
- * trick of counting outstanding packets like in
- * cvm_oct_xmit.
- */
- memcpy(copy_location, skb->data, skb->len);
-
- /*
- * Fill in some of the work queue fields. We may need to add
- * more if the software at the other end needs them.
- */
- if (!OCTEON_IS_MODEL(OCTEON_CN68XX))
- work->word0.pip.cn38xx.hw_chksum = skb->csum;
- work->word1.len = skb->len;
- cvmx_wqe_set_port(work, priv->port);
- cvmx_wqe_set_qos(work, priv->port & 0x7);
- cvmx_wqe_set_grp(work, pow_send_group);
- work->word1.tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
- work->word1.tag = pow_send_group; /* FIXME */
- /* Default to zero. Sets of zero later are commented out */
- work->word2.u64 = 0;
- work->word2.s.bufs = 1;
- work->packet_ptr.u64 = 0;
- work->packet_ptr.s.addr = cvmx_ptr_to_phys(copy_location);
- work->packet_ptr.s.pool = CVMX_FPA_PACKET_POOL;
- work->packet_ptr.s.size = CVMX_FPA_PACKET_POOL_SIZE;
- work->packet_ptr.s.back = (copy_location - packet_buffer) >> 7;
-
- if (skb->protocol == htons(ETH_P_IP)) {
- work->word2.s.ip_offset = 14;
-#if 0
- work->word2.s.vlan_valid = 0; /* FIXME */
- work->word2.s.vlan_cfi = 0; /* FIXME */
- work->word2.s.vlan_id = 0; /* FIXME */
- work->word2.s.dec_ipcomp = 0; /* FIXME */
-#endif
- work->word2.s.tcp_or_udp =
- (ip_hdr(skb)->protocol == IPPROTO_TCP) ||
- (ip_hdr(skb)->protocol == IPPROTO_UDP);
-#if 0
- /* FIXME */
- work->word2.s.dec_ipsec = 0;
- /* We only support IPv4 right now */
- work->word2.s.is_v6 = 0;
- /* Hardware would set to zero */
- work->word2.s.software = 0;
- /* No error, packet is internal */
- work->word2.s.L4_error = 0;
-#endif
- work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0) ||
- (ip_hdr(skb)->frag_off ==
- cpu_to_be16(1 << 14)));
-#if 0
- /* Assume Linux is sending a good packet */
- work->word2.s.IP_exc = 0;
-#endif
- work->word2.s.is_bcast = (skb->pkt_type == PACKET_BROADCAST);
- work->word2.s.is_mcast = (skb->pkt_type == PACKET_MULTICAST);
-#if 0
- /* This is an IP packet */
- work->word2.s.not_IP = 0;
- /* No error, packet is internal */
- work->word2.s.rcv_error = 0;
- /* No error, packet is internal */
- work->word2.s.err_code = 0;
-#endif
-
- /*
- * When copying the data, include 4 bytes of the
- * ethernet header to align the same way hardware
- * does.
- */
- memcpy(work->packet_data, skb->data + 10,
- sizeof(work->packet_data));
- } else {
-#if 0
- work->word2.snoip.vlan_valid = 0; /* FIXME */
- work->word2.snoip.vlan_cfi = 0; /* FIXME */
- work->word2.snoip.vlan_id = 0; /* FIXME */
- work->word2.snoip.software = 0; /* Hardware would set to zero */
-#endif
- work->word2.snoip.is_rarp = skb->protocol == htons(ETH_P_RARP);
- work->word2.snoip.is_arp = skb->protocol == htons(ETH_P_ARP);
- work->word2.snoip.is_bcast =
- (skb->pkt_type == PACKET_BROADCAST);
- work->word2.snoip.is_mcast =
- (skb->pkt_type == PACKET_MULTICAST);
- work->word2.snoip.not_IP = 1; /* IP was done up above */
-#if 0
- /* No error, packet is internal */
- work->word2.snoip.rcv_error = 0;
- /* No error, packet is internal */
- work->word2.snoip.err_code = 0;
-#endif
- memcpy(work->packet_data, skb->data, sizeof(work->packet_data));
- }
-
- /* Submit the packet to the POW */
- cvmx_pow_work_submit(work, work->word1.tag, work->word1.tag_type,
- cvmx_wqe_get_qos(work), cvmx_wqe_get_grp(work));
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
- dev_consume_skb_any(skb);
- return 0;
-}
-
-/**
- * cvm_oct_tx_shutdown_dev - free all skb that are currently queued for TX.
- * @dev: Device being shutdown
- *
- */
-void cvm_oct_tx_shutdown_dev(struct net_device *dev)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
- unsigned long flags;
- int qos;
-
- for (qos = 0; qos < 16; qos++) {
- spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
- while (skb_queue_len(&priv->tx_free_list[qos]))
- dev_kfree_skb_any(__skb_dequeue
- (&priv->tx_free_list[qos]));
- spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
- }
-}
-
-static void cvm_oct_tx_do_cleanup(unsigned long arg)
-{
- int port;
-
- for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
- if (cvm_oct_device[port]) {
- struct net_device *dev = cvm_oct_device[port];
-
- cvm_oct_free_tx_skbs(dev);
- }
- }
-}
-
-static irqreturn_t cvm_oct_tx_cleanup_watchdog(int cpl, void *dev_id)
-{
- /* Disable the interrupt. */
- cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
- /* Do the work in the tasklet. */
- tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
- return IRQ_HANDLED;
-}
-
-void cvm_oct_tx_initialize(void)
-{
- int i;
-
- /* Disable the interrupt. */
- cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
- /* Register an IRQ handler to receive CIU_TIMX(1) interrupts */
- i = request_irq(OCTEON_IRQ_TIMER1,
- cvm_oct_tx_cleanup_watchdog, 0,
- "Ethernet", cvm_oct_device);
-
- if (i)
- panic("Could not acquire Ethernet IRQ %d\n", OCTEON_IRQ_TIMER1);
-}
-
-void cvm_oct_tx_shutdown(void)
-{
- /* Free the interrupt handler */
- free_irq(OCTEON_IRQ_TIMER1, cvm_oct_device);
-}
diff --git a/drivers/staging/octeon/ethernet-tx.h b/drivers/staging/octeon/ethernet-tx.h
deleted file mode 100644
index 78936e9b33b0..000000000000
--- a/drivers/staging/octeon/ethernet-tx.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2007 Cavium Networks
- */
-
-int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev);
-int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev);
-int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry,
- int do_free, int qos);
-void cvm_oct_tx_initialize(void);
-void cvm_oct_tx_shutdown(void);
-void cvm_oct_tx_shutdown_dev(struct net_device *dev);
diff --git a/drivers/staging/octeon/ethernet-util.h b/drivers/staging/octeon/ethernet-util.h
deleted file mode 100644
index 2af83a12ca78..000000000000
--- a/drivers/staging/octeon/ethernet-util.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2007 Cavium Networks
- */
-
-/**
- * cvm_oct_get_buffer_ptr - convert packet data address to pointer
- * @packet_ptr: Packet data hardware address
- *
- * Returns Packet buffer pointer
- */
-static inline void *cvm_oct_get_buffer_ptr(union cvmx_buf_ptr packet_ptr)
-{
- return cvmx_phys_to_ptr(((packet_ptr.s.addr >> 7) - packet_ptr.s.back)
- << 7);
-}
-
-/**
- * INTERFACE - convert IPD port to logical interface
- * @ipd_port: Port to check
- *
- * Returns Logical interface
- */
-static inline int INTERFACE(int ipd_port)
-{
- int interface;
-
- if (ipd_port == CVMX_PIP_NUM_INPUT_PORTS)
- return 10;
- interface = cvmx_helper_get_interface_num(ipd_port);
- if (interface >= 0)
- return interface;
- panic("Illegal ipd_port %d passed to %s\n", ipd_port, __func__);
-}
-
-/**
- * INDEX - convert IPD/PKO port number to the port's interface index
- * @ipd_port: Port to check
- *
- * Returns Index into interface port list
- */
-static inline int INDEX(int ipd_port)
-{
- return cvmx_helper_get_interface_index_num(ipd_port);
-}
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
deleted file mode 100644
index f42c3816ce49..000000000000
--- a/drivers/staging/octeon/ethernet.c
+++ /dev/null
@@ -1,992 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2007 Cavium Networks
- */
-
-#include <linux/platform_device.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/phy.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/of_net.h>
-#include <linux/if_ether.h>
-#include <linux/if_vlan.h>
-
-#include <net/dst.h>
-
-#include "octeon-ethernet.h"
-#include "ethernet-defines.h"
-#include "ethernet-mem.h"
-#include "ethernet-rx.h"
-#include "ethernet-tx.h"
-#include "ethernet-mdio.h"
-#include "ethernet-util.h"
-
-#define OCTEON_MAX_MTU 65392
-
-static int num_packet_buffers = 1024;
-module_param(num_packet_buffers, int, 0444);
-MODULE_PARM_DESC(num_packet_buffers, "\n"
- "\tNumber of packet buffers to allocate and store in the\n"
- "\tFPA. By default, 1024 packet buffers are used.\n");
-
-static int pow_receive_group = 15;
-module_param(pow_receive_group, int, 0444);
-MODULE_PARM_DESC(pow_receive_group, "\n"
- "\tPOW group to receive packets from. All ethernet hardware\n"
- "\twill be configured to send incoming packets to this POW\n"
- "\tgroup. Also any other software can submit packets to this\n"
- "\tgroup for the kernel to process.");
-
-static int receive_group_order;
-module_param(receive_group_order, int, 0444);
-MODULE_PARM_DESC(receive_group_order, "\n"
- "\tOrder (0..4) of receive groups to take into use. Ethernet hardware\n"
- "\twill be configured to send incoming packets to multiple POW\n"
- "\tgroups. pow_receive_group parameter is ignored when multiple\n"
- "\tgroups are taken into use and groups are allocated starting\n"
- "\tfrom 0. By default, a single group is used.\n");
-
-int pow_send_group = -1;
-module_param(pow_send_group, int, 0644);
-MODULE_PARM_DESC(pow_send_group, "\n"
- "\tPOW group to send packets to other software on. This\n"
- "\tcontrols the creation of the virtual device pow0.\n"
- "\talways_use_pow also depends on this value.");
-
-int always_use_pow;
-module_param(always_use_pow, int, 0444);
-MODULE_PARM_DESC(always_use_pow, "\n"
- "\tWhen set, always send to the pow group. This will cause\n"
- "\tpackets sent to real ethernet devices to be sent to the\n"
- "\tPOW group instead of the hardware. Unless some other\n"
- "\tapplication changes the config, packets will still be\n"
- "\treceived from the low level hardware. Use this option\n"
- "\tto allow a CVMX app to intercept all packets from the\n"
- "\tlinux kernel. You must specify pow_send_group along with\n"
- "\tthis option.");
-
-char pow_send_list[128] = "";
-module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444);
-MODULE_PARM_DESC(pow_send_list, "\n"
- "\tComma separated list of ethernet devices that should use the\n"
- "\tPOW for transmit instead of the actual ethernet hardware. This\n"
- "\tis a per port version of always_use_pow. always_use_pow takes\n"
- "\tprecedence over this list. For example, setting this to\n"
- "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
- "\tusing the pow_send_group.");
-
-int rx_napi_weight = 32;
-module_param(rx_napi_weight, int, 0444);
-MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
-
-/* Mask indicating which receive groups are in use. */
-int pow_receive_groups;
-
-/*
- * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
- *
- * Set to one right before cvm_oct_poll_queue is destroyed.
- */
-atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0);
-
-/*
- * Array of every ethernet device owned by this driver indexed by
- * the ipd input port number.
- */
-struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
-
-u64 cvm_oct_tx_poll_interval;
-
-static void cvm_oct_rx_refill_worker(struct work_struct *work);
-static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker);
-
-static void cvm_oct_rx_refill_worker(struct work_struct *work)
-{
- /*
- * FPA 0 may have been drained, try to refill it if we need
- * more than num_packet_buffers / 2, otherwise normal receive
- * processing will refill it. If it were drained, no packets
- * could be received so cvm_oct_napi_poll would never be
- * invoked to do the refill.
- */
- cvm_oct_rx_refill_pool(num_packet_buffers / 2);
-
- if (!atomic_read(&cvm_oct_poll_queue_stopping))
- schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
-}
-
-static void cvm_oct_periodic_worker(struct work_struct *work)
-{
- struct octeon_ethernet *priv = container_of(work,
- struct octeon_ethernet,
- port_periodic_work.work);
-
- if (priv->poll)
- priv->poll(cvm_oct_device[priv->port]);
-
- cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats
- (cvm_oct_device[priv->port]);
-
- if (!atomic_read(&cvm_oct_poll_queue_stopping))
- schedule_delayed_work(&priv->port_periodic_work, HZ);
-}
-
-static void cvm_oct_configure_common_hw(void)
-{
- /* Setup the FPA */
- cvmx_fpa_enable();
- cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
- num_packet_buffers);
- cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
- num_packet_buffers);
- if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
- cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
- CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 1024);
-
-#ifdef __LITTLE_ENDIAN
- {
- union cvmx_ipd_ctl_status ipd_ctl_status;
-
- ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
- ipd_ctl_status.s.pkt_lend = 1;
- ipd_ctl_status.s.wqe_lend = 1;
- cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
- }
-#endif
-
- cvmx_helper_setup_red(num_packet_buffers / 4, num_packet_buffers / 8);
-}
-
-/**
- * cvm_oct_free_work- Free a work queue entry
- *
- * @work_queue_entry: Work queue entry to free
- *
- * Returns Zero on success, Negative on failure.
- */
-int cvm_oct_free_work(void *work_queue_entry)
-{
- struct cvmx_wqe *work = work_queue_entry;
-
- int segments = work->word2.s.bufs;
- union cvmx_buf_ptr segment_ptr = work->packet_ptr;
-
- while (segments--) {
- union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
- cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
- if (unlikely(!segment_ptr.s.i))
- cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
- segment_ptr.s.pool,
- CVMX_FPA_PACKET_POOL_SIZE / 128);
- segment_ptr = next_ptr;
- }
- cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
-
- return 0;
-}
-EXPORT_SYMBOL(cvm_oct_free_work);
-
-/**
- * cvm_oct_common_get_stats - get the low level ethernet statistics
- * @dev: Device to get the statistics from
- *
- * Returns Pointer to the statistics
- */
-static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
-{
- cvmx_pip_port_status_t rx_status;
- cvmx_pko_port_status_t tx_status;
- struct octeon_ethernet *priv = netdev_priv(dev);
-
- if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
- if (octeon_is_simulation()) {
- /* The simulator doesn't support statistics */
- memset(&rx_status, 0, sizeof(rx_status));
- memset(&tx_status, 0, sizeof(tx_status));
- } else {
- cvmx_pip_get_port_status(priv->port, 1, &rx_status);
- cvmx_pko_get_port_status(priv->port, 1, &tx_status);
- }
-
- dev->stats.rx_packets += rx_status.inb_packets;
- dev->stats.tx_packets += tx_status.packets;
- dev->stats.rx_bytes += rx_status.inb_octets;
- dev->stats.tx_bytes += tx_status.octets;
- dev->stats.multicast += rx_status.multicast_packets;
- dev->stats.rx_crc_errors += rx_status.inb_errors;
- dev->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
- dev->stats.rx_dropped += rx_status.dropped_packets;
- }
-
- return &dev->stats;
-}
-
-/**
- * cvm_oct_common_change_mtu - change the link MTU
- * @dev: Device to change
- * @new_mtu: The new MTU
- *
- * Returns Zero on success
- */
-static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
- int interface = INTERFACE(priv->port);
-#if IS_ENABLED(CONFIG_VLAN_8021Q)
- int vlan_bytes = VLAN_HLEN;
-#else
- int vlan_bytes = 0;
-#endif
- int mtu_overhead = ETH_HLEN + ETH_FCS_LEN + vlan_bytes;
-
- dev->mtu = new_mtu;
-
- if ((interface < 2) &&
- (cvmx_helper_interface_get_mode(interface) !=
- CVMX_HELPER_INTERFACE_MODE_SPI)) {
- int index = INDEX(priv->port);
- /* Add ethernet header and FCS, and VLAN if configured. */
- int max_packet = new_mtu + mtu_overhead;
-
- if (OCTEON_IS_MODEL(OCTEON_CN3XXX) ||
- OCTEON_IS_MODEL(OCTEON_CN58XX)) {
- /* Signal errors on packets larger than the MTU */
- cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
- max_packet);
- } else {
- /*
- * Set the hardware to truncate packets larger
- * than the MTU and smaller the 64 bytes.
- */
- union cvmx_pip_frm_len_chkx frm_len_chk;
-
- frm_len_chk.u64 = 0;
- frm_len_chk.s.minlen = VLAN_ETH_ZLEN;
- frm_len_chk.s.maxlen = max_packet;
- cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
- frm_len_chk.u64);
- }
- /*
- * Set the hardware to truncate packets larger than
- * the MTU. The jabber register must be set to a
- * multiple of 8 bytes, so round up.
- */
- cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
- (max_packet + 7) & ~7u);
- }
- return 0;
-}
-
-/**
- * cvm_oct_common_set_multicast_list - set the multicast list
- * @dev: Device to work on
- */
-static void cvm_oct_common_set_multicast_list(struct net_device *dev)
-{
- union cvmx_gmxx_prtx_cfg gmx_cfg;
- struct octeon_ethernet *priv = netdev_priv(dev);
- int interface = INTERFACE(priv->port);
-
- if ((interface < 2) &&
- (cvmx_helper_interface_get_mode(interface) !=
- CVMX_HELPER_INTERFACE_MODE_SPI)) {
- union cvmx_gmxx_rxx_adr_ctl control;
- int index = INDEX(priv->port);
-
- control.u64 = 0;
- control.s.bcst = 1; /* Allow broadcast MAC addresses */
-
- if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI) ||
- (dev->flags & IFF_PROMISC))
- /* Force accept multicast packets */
- control.s.mcst = 2;
- else
- /* Force reject multicast packets */
- control.s.mcst = 1;
-
- if (dev->flags & IFF_PROMISC)
- /*
- * Reject matches if promisc. Since CAM is
- * shut off, should accept everything.
- */
- control.s.cam_mode = 0;
- else
- /* Filter packets based on the CAM */
- control.s.cam_mode = 1;
-
- gmx_cfg.u64 =
- cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
- gmx_cfg.u64 & ~1ull);
-
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
- control.u64);
- if (dev->flags & IFF_PROMISC)
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
- (index, interface), 0);
- else
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
- (index, interface), 1);
-
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
- gmx_cfg.u64);
- }
-}
-
-static int cvm_oct_set_mac_filter(struct net_device *dev)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
- union cvmx_gmxx_prtx_cfg gmx_cfg;
- int interface = INTERFACE(priv->port);
-
- if ((interface < 2) &&
- (cvmx_helper_interface_get_mode(interface) !=
- CVMX_HELPER_INTERFACE_MODE_SPI)) {
- int i;
- u8 *ptr = dev->dev_addr;
- u64 mac = 0;
- int index = INDEX(priv->port);
-
- for (i = 0; i < 6; i++)
- mac = (mac << 8) | (u64)ptr[i];
-
- gmx_cfg.u64 =
- cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
- gmx_cfg.u64 & ~1ull);
-
- cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
- ptr[0]);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
- ptr[1]);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
- ptr[2]);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
- ptr[3]);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
- ptr[4]);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
- ptr[5]);
- cvm_oct_common_set_multicast_list(dev);
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
- gmx_cfg.u64);
- }
- return 0;
-}
-
-/**
- * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
- * @dev: The device in question.
- * @addr: Socket address.
- *
- * Returns Zero on success
- */
-static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
-{
- int r = eth_mac_addr(dev, addr);
-
- if (r)
- return r;
- return cvm_oct_set_mac_filter(dev);
-}
-
-/**
- * cvm_oct_common_init - per network device initialization
- * @dev: Device to initialize
- *
- * Returns Zero on success
- */
-int cvm_oct_common_init(struct net_device *dev)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
- const u8 *mac = NULL;
-
- if (priv->of_node)
- mac = of_get_mac_address(priv->of_node);
-
- if (!IS_ERR_OR_NULL(mac))
- ether_addr_copy(dev->dev_addr, mac);
- else
- eth_hw_addr_random(dev);
-
- /*
- * Force the interface to use the POW send if always_use_pow
- * was specified or it is in the pow send list.
- */
- if ((pow_send_group != -1) &&
- (always_use_pow || strstr(pow_send_list, dev->name)))
- priv->queue = -1;
-
- if (priv->queue != -1)
- dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
-
- /* We do our own locking, Linux doesn't need to */
- dev->features |= NETIF_F_LLTX;
- dev->ethtool_ops = &cvm_oct_ethtool_ops;
-
- cvm_oct_set_mac_filter(dev);
- dev_set_mtu(dev, dev->mtu);
-
- /*
- * Zero out stats for port so we won't mistakenly show
- * counters from the bootloader.
- */
- memset(dev->netdev_ops->ndo_get_stats(dev), 0,
- sizeof(struct net_device_stats));
-
- if (dev->netdev_ops->ndo_stop)
- dev->netdev_ops->ndo_stop(dev);
-
- return 0;
-}
-
-void cvm_oct_common_uninit(struct net_device *dev)
-{
- if (dev->phydev)
- phy_disconnect(dev->phydev);
-}
-
-int cvm_oct_common_open(struct net_device *dev,
- void (*link_poll)(struct net_device *))
-{
- union cvmx_gmxx_prtx_cfg gmx_cfg;
- struct octeon_ethernet *priv = netdev_priv(dev);
- int interface = INTERFACE(priv->port);
- int index = INDEX(priv->port);
- union cvmx_helper_link_info link_info;
- int rv;
-
- rv = cvm_oct_phy_setup_device(dev);
- if (rv)
- return rv;
-
- gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
- gmx_cfg.s.en = 1;
- if (octeon_has_feature(OCTEON_FEATURE_PKND))
- gmx_cfg.s.pknd = priv->port;
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
-
- if (octeon_is_simulation())
- return 0;
-
- if (dev->phydev) {
- int r = phy_read_status(dev->phydev);
-
- if (r == 0 && dev->phydev->link == 0)
- netif_carrier_off(dev);
- cvm_oct_adjust_link(dev);
- } else {
- link_info = cvmx_helper_link_get(priv->port);
- if (!link_info.s.link_up)
- netif_carrier_off(dev);
- priv->poll = link_poll;
- link_poll(dev);
- }
-
- return 0;
-}
-
-void cvm_oct_link_poll(struct net_device *dev)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
- union cvmx_helper_link_info link_info;
-
- link_info = cvmx_helper_link_get(priv->port);
- if (link_info.u64 == priv->link_info)
- return;
-
- if (cvmx_helper_link_set(priv->port, link_info))
- link_info.u64 = priv->link_info;
- else
- priv->link_info = link_info.u64;
-
- if (link_info.s.link_up) {
- if (!netif_carrier_ok(dev))
- netif_carrier_on(dev);
- } else if (netif_carrier_ok(dev)) {
- netif_carrier_off(dev);
- }
- cvm_oct_note_carrier(priv, link_info);
-}
-
-static int cvm_oct_xaui_open(struct net_device *dev)
-{
- return cvm_oct_common_open(dev, cvm_oct_link_poll);
-}
-
-static const struct net_device_ops cvm_oct_npi_netdev_ops = {
- .ndo_init = cvm_oct_common_init,
- .ndo_uninit = cvm_oct_common_uninit,
- .ndo_start_xmit = cvm_oct_xmit,
- .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
- .ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
- .ndo_change_mtu = cvm_oct_common_change_mtu,
- .ndo_get_stats = cvm_oct_common_get_stats,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = cvm_oct_poll_controller,
-#endif
-};
-
-static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
- .ndo_init = cvm_oct_common_init,
- .ndo_uninit = cvm_oct_common_uninit,
- .ndo_open = cvm_oct_xaui_open,
- .ndo_stop = cvm_oct_common_stop,
- .ndo_start_xmit = cvm_oct_xmit,
- .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
- .ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
- .ndo_change_mtu = cvm_oct_common_change_mtu,
- .ndo_get_stats = cvm_oct_common_get_stats,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = cvm_oct_poll_controller,
-#endif
-};
-
-static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
- .ndo_init = cvm_oct_sgmii_init,
- .ndo_uninit = cvm_oct_common_uninit,
- .ndo_open = cvm_oct_sgmii_open,
- .ndo_stop = cvm_oct_common_stop,
- .ndo_start_xmit = cvm_oct_xmit,
- .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
- .ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
- .ndo_change_mtu = cvm_oct_common_change_mtu,
- .ndo_get_stats = cvm_oct_common_get_stats,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = cvm_oct_poll_controller,
-#endif
-};
-
-static const struct net_device_ops cvm_oct_spi_netdev_ops = {
- .ndo_init = cvm_oct_spi_init,
- .ndo_uninit = cvm_oct_spi_uninit,
- .ndo_start_xmit = cvm_oct_xmit,
- .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
- .ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
- .ndo_change_mtu = cvm_oct_common_change_mtu,
- .ndo_get_stats = cvm_oct_common_get_stats,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = cvm_oct_poll_controller,
-#endif
-};
-
-static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
- .ndo_init = cvm_oct_common_init,
- .ndo_uninit = cvm_oct_common_uninit,
- .ndo_open = cvm_oct_rgmii_open,
- .ndo_stop = cvm_oct_common_stop,
- .ndo_start_xmit = cvm_oct_xmit,
- .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
- .ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
- .ndo_change_mtu = cvm_oct_common_change_mtu,
- .ndo_get_stats = cvm_oct_common_get_stats,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = cvm_oct_poll_controller,
-#endif
-};
-
-static const struct net_device_ops cvm_oct_pow_netdev_ops = {
- .ndo_init = cvm_oct_common_init,
- .ndo_start_xmit = cvm_oct_xmit_pow,
- .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
- .ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
- .ndo_change_mtu = cvm_oct_common_change_mtu,
- .ndo_get_stats = cvm_oct_common_get_stats,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = cvm_oct_poll_controller,
-#endif
-};
-
-static struct device_node *cvm_oct_of_get_child
- (const struct device_node *parent, int reg_val)
-{
- struct device_node *node = NULL;
- int size;
- const __be32 *addr;
-
- for (;;) {
- node = of_get_next_child(parent, node);
- if (!node)
- break;
- addr = of_get_property(node, "reg", &size);
- if (addr && (be32_to_cpu(*addr) == reg_val))
- break;
- }
- return node;
-}
-
-static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
- int interface, int port)
-{
- struct device_node *ni, *np;
-
- ni = cvm_oct_of_get_child(pip, interface);
- if (!ni)
- return NULL;
-
- np = cvm_oct_of_get_child(ni, port);
- of_node_put(ni);
-
- return np;
-}
-
-static void cvm_set_rgmii_delay(struct octeon_ethernet *priv, int iface,
- int port)
-{
- struct device_node *np = priv->of_node;
- u32 delay_value;
- bool rx_delay;
- bool tx_delay;
-
- /* By default, both RX/TX delay is enabled in
- * __cvmx_helper_rgmii_enable().
- */
- rx_delay = true;
- tx_delay = true;
-
- if (!of_property_read_u32(np, "rx-delay", &delay_value)) {
- cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, iface), delay_value);
- rx_delay = delay_value > 0;
- }
- if (!of_property_read_u32(np, "tx-delay", &delay_value)) {
- cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, iface), delay_value);
- tx_delay = delay_value > 0;
- }
-
- if (!rx_delay && !tx_delay)
- priv->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
- else if (!rx_delay)
- priv->phy_mode = PHY_INTERFACE_MODE_RGMII_RXID;
- else if (!tx_delay)
- priv->phy_mode = PHY_INTERFACE_MODE_RGMII_TXID;
- else
- priv->phy_mode = PHY_INTERFACE_MODE_RGMII;
-}
-
-static int cvm_oct_probe(struct platform_device *pdev)
-{
- int num_interfaces;
- int interface;
- int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
- int qos;
- struct device_node *pip;
- int mtu_overhead = ETH_HLEN + ETH_FCS_LEN;
-
-#if IS_ENABLED(CONFIG_VLAN_8021Q)
- mtu_overhead += VLAN_HLEN;
-#endif
-
- octeon_mdiobus_force_mod_depencency();
-
- pip = pdev->dev.of_node;
- if (!pip) {
- pr_err("Error: No 'pip' in /aliases\n");
- return -EINVAL;
- }
-
- cvm_oct_configure_common_hw();
-
- cvmx_helper_initialize_packet_io_global();
-
- if (receive_group_order) {
- if (receive_group_order > 4)
- receive_group_order = 4;
- pow_receive_groups = (1 << (1 << receive_group_order)) - 1;
- } else {
- pow_receive_groups = BIT(pow_receive_group);
- }
-
- /* Change the input group for all ports before input is enabled */
- num_interfaces = cvmx_helper_get_number_of_interfaces();
- for (interface = 0; interface < num_interfaces; interface++) {
- int num_ports = cvmx_helper_ports_on_interface(interface);
- int port;
-
- for (port = cvmx_helper_get_ipd_port(interface, 0);
- port < cvmx_helper_get_ipd_port(interface, num_ports);
- port++) {
- union cvmx_pip_prt_tagx pip_prt_tagx;
-
- pip_prt_tagx.u64 =
- cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
-
- if (receive_group_order) {
- int tag_mask;
-
- /* We support only 16 groups at the moment, so
- * always disable the two additional "hidden"
- * tag_mask bits on CN68XX.
- */
- if (OCTEON_IS_MODEL(OCTEON_CN68XX))
- pip_prt_tagx.u64 |= 0x3ull << 44;
-
- tag_mask = ~((1 << receive_group_order) - 1);
- pip_prt_tagx.s.grptagbase = 0;
- pip_prt_tagx.s.grptagmask = tag_mask;
- pip_prt_tagx.s.grptag = 1;
- pip_prt_tagx.s.tag_mode = 0;
- pip_prt_tagx.s.inc_prt_flag = 1;
- pip_prt_tagx.s.ip6_dprt_flag = 1;
- pip_prt_tagx.s.ip4_dprt_flag = 1;
- pip_prt_tagx.s.ip6_sprt_flag = 1;
- pip_prt_tagx.s.ip4_sprt_flag = 1;
- pip_prt_tagx.s.ip6_dst_flag = 1;
- pip_prt_tagx.s.ip4_dst_flag = 1;
- pip_prt_tagx.s.ip6_src_flag = 1;
- pip_prt_tagx.s.ip4_src_flag = 1;
- pip_prt_tagx.s.grp = 0;
- } else {
- pip_prt_tagx.s.grptag = 0;
- pip_prt_tagx.s.grp = pow_receive_group;
- }
-
- cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
- pip_prt_tagx.u64);
- }
- }
-
- cvmx_helper_ipd_and_packet_input_enable();
-
- memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
-
- /*
- * Initialize the FAU used for counting packet buffers that
- * need to be freed.
- */
- cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
-
- /* Initialize the FAU used for counting tx SKBs that need to be freed */
- cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);
-
- if ((pow_send_group != -1)) {
- struct net_device *dev;
-
- dev = alloc_etherdev(sizeof(struct octeon_ethernet));
- if (dev) {
- /* Initialize the device private structure. */
- struct octeon_ethernet *priv = netdev_priv(dev);
-
- SET_NETDEV_DEV(dev, &pdev->dev);
- dev->netdev_ops = &cvm_oct_pow_netdev_ops;
- priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
- priv->port = CVMX_PIP_NUM_INPUT_PORTS;
- priv->queue = -1;
- strscpy(dev->name, "pow%d", sizeof(dev->name));
- for (qos = 0; qos < 16; qos++)
- skb_queue_head_init(&priv->tx_free_list[qos]);
- dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead;
- dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead;
-
- if (register_netdev(dev) < 0) {
- pr_err("Failed to register ethernet device for POW\n");
- free_netdev(dev);
- } else {
- cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
- pr_info("%s: POW send group %d, receive group %d\n",
- dev->name, pow_send_group,
- pow_receive_group);
- }
- } else {
- pr_err("Failed to allocate ethernet device for POW\n");
- }
- }
-
- num_interfaces = cvmx_helper_get_number_of_interfaces();
- for (interface = 0; interface < num_interfaces; interface++) {
- cvmx_helper_interface_mode_t imode =
- cvmx_helper_interface_get_mode(interface);
- int num_ports = cvmx_helper_ports_on_interface(interface);
- int port;
- int port_index;
-
- for (port_index = 0,
- port = cvmx_helper_get_ipd_port(interface, 0);
- port < cvmx_helper_get_ipd_port(interface, num_ports);
- port_index++, port++) {
- struct octeon_ethernet *priv;
- struct net_device *dev =
- alloc_etherdev(sizeof(struct octeon_ethernet));
- if (!dev) {
- pr_err("Failed to allocate ethernet device for port %d\n",
- port);
- continue;
- }
-
- /* Initialize the device private structure. */
- SET_NETDEV_DEV(dev, &pdev->dev);
- priv = netdev_priv(dev);
- priv->netdev = dev;
- priv->of_node = cvm_oct_node_for_port(pip, interface,
- port_index);
-
- INIT_DELAYED_WORK(&priv->port_periodic_work,
- cvm_oct_periodic_worker);
- priv->imode = imode;
- priv->port = port;
- priv->queue = cvmx_pko_get_base_queue(priv->port);
- priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
- priv->phy_mode = PHY_INTERFACE_MODE_NA;
- for (qos = 0; qos < 16; qos++)
- skb_queue_head_init(&priv->tx_free_list[qos]);
- for (qos = 0; qos < cvmx_pko_get_num_queues(port);
- qos++)
- cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
- dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead;
- dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead;
-
- switch (priv->imode) {
- /* These types don't support ports to IPD/PKO */
- case CVMX_HELPER_INTERFACE_MODE_DISABLED:
- case CVMX_HELPER_INTERFACE_MODE_PCIE:
- case CVMX_HELPER_INTERFACE_MODE_PICMG:
- break;
-
- case CVMX_HELPER_INTERFACE_MODE_NPI:
- dev->netdev_ops = &cvm_oct_npi_netdev_ops;
- strscpy(dev->name, "npi%d", sizeof(dev->name));
- break;
-
- case CVMX_HELPER_INTERFACE_MODE_XAUI:
- dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
- strscpy(dev->name, "xaui%d", sizeof(dev->name));
- break;
-
- case CVMX_HELPER_INTERFACE_MODE_LOOP:
- dev->netdev_ops = &cvm_oct_npi_netdev_ops;
- strscpy(dev->name, "loop%d", sizeof(dev->name));
- break;
-
- case CVMX_HELPER_INTERFACE_MODE_SGMII:
- priv->phy_mode = PHY_INTERFACE_MODE_SGMII;
- dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
- strscpy(dev->name, "eth%d", sizeof(dev->name));
- break;
-
- case CVMX_HELPER_INTERFACE_MODE_SPI:
- dev->netdev_ops = &cvm_oct_spi_netdev_ops;
- strscpy(dev->name, "spi%d", sizeof(dev->name));
- break;
-
- case CVMX_HELPER_INTERFACE_MODE_GMII:
- priv->phy_mode = PHY_INTERFACE_MODE_GMII;
- dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
- strscpy(dev->name, "eth%d", sizeof(dev->name));
- break;
-
- case CVMX_HELPER_INTERFACE_MODE_RGMII:
- dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
- strscpy(dev->name, "eth%d", sizeof(dev->name));
- cvm_set_rgmii_delay(priv, interface,
- port_index);
- break;
- }
-
- if (!dev->netdev_ops) {
- free_netdev(dev);
- } else if (register_netdev(dev) < 0) {
- pr_err("Failed to register ethernet device for interface %d, port %d\n",
- interface, priv->port);
- free_netdev(dev);
- } else {
- cvm_oct_device[priv->port] = dev;
- fau -=
- cvmx_pko_get_num_queues(priv->port) *
- sizeof(u32);
- schedule_delayed_work(&priv->port_periodic_work,
- HZ);
- }
- }
- }
-
- cvm_oct_tx_initialize();
- cvm_oct_rx_initialize();
-
- /*
- * 150 uS: about 10 1500-byte packets at 1GE.
- */
- cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);
-
- schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
-
- return 0;
-}
-
-static int cvm_oct_remove(struct platform_device *pdev)
-{
- int port;
-
- cvmx_ipd_disable();
-
- atomic_inc_return(&cvm_oct_poll_queue_stopping);
- cancel_delayed_work_sync(&cvm_oct_rx_refill_work);
-
- cvm_oct_rx_shutdown();
- cvm_oct_tx_shutdown();
-
- cvmx_pko_disable();
-
- /* Free the ethernet devices */
- for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
- if (cvm_oct_device[port]) {
- struct net_device *dev = cvm_oct_device[port];
- struct octeon_ethernet *priv = netdev_priv(dev);
-
- cancel_delayed_work_sync(&priv->port_periodic_work);
-
- cvm_oct_tx_shutdown_dev(dev);
- unregister_netdev(dev);
- free_netdev(dev);
- cvm_oct_device[port] = NULL;
- }
- }
-
- cvmx_pko_shutdown();
-
- cvmx_ipd_free_ptr();
-
- /* Free the HW pools */
- cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
- num_packet_buffers);
- cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
- num_packet_buffers);
- if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
- cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
- CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
- return 0;
-}
-
-static const struct of_device_id cvm_oct_match[] = {
- {
- .compatible = "cavium,octeon-3860-pip",
- },
- {},
-};
-MODULE_DEVICE_TABLE(of, cvm_oct_match);
-
-static struct platform_driver cvm_oct_driver = {
- .probe = cvm_oct_probe,
- .remove = cvm_oct_remove,
- .driver = {
- .name = KBUILD_MODNAME,
- .of_match_table = cvm_oct_match,
- },
-};
-
-module_platform_driver(cvm_oct_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
-MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
deleted file mode 100644
index a6140705706f..000000000000
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file is based on code from OCTEON SDK by Cavium Networks.
- *
- * Copyright (c) 2003-2010 Cavium Networks
- */
-
-/*
- * External interface for the Cavium Octeon ethernet driver.
- */
-#ifndef OCTEON_ETHERNET_H
-#define OCTEON_ETHERNET_H
-
-#include <linux/of.h>
-#include <linux/phy.h>
-
-#ifdef CONFIG_CAVIUM_OCTEON_SOC
-
-#include <asm/octeon/octeon.h>
-
-#include <asm/octeon/cvmx-asxx-defs.h>
-#include <asm/octeon/cvmx-config.h>
-#include <asm/octeon/cvmx-fau.h>
-#include <asm/octeon/cvmx-gmxx-defs.h>
-#include <asm/octeon/cvmx-helper.h>
-#include <asm/octeon/cvmx-helper-util.h>
-#include <asm/octeon/cvmx-ipd.h>
-#include <asm/octeon/cvmx-ipd-defs.h>
-#include <asm/octeon/cvmx-npi-defs.h>
-#include <asm/octeon/cvmx-pip.h>
-#include <asm/octeon/cvmx-pko.h>
-#include <asm/octeon/cvmx-pow.h>
-#include <asm/octeon/cvmx-scratch.h>
-#include <asm/octeon/cvmx-spi.h>
-#include <asm/octeon/cvmx-spxx-defs.h>
-#include <asm/octeon/cvmx-stxx-defs.h>
-#include <asm/octeon/cvmx-wqe.h>
-
-#else
-
-#include "octeon-stubs.h"
-
-#endif
-
-/**
- * This is the definition of the Ethernet driver's private
- * driver state stored in netdev_priv(dev).
- */
-struct octeon_ethernet {
- /* PKO hardware output port */
- int port;
- /* PKO hardware queue for the port */
- int queue;
- /* Hardware fetch and add to count outstanding tx buffers */
- int fau;
- /* My netdev. */
- struct net_device *netdev;
- /*
- * Type of port. This is one of the enums in
- * cvmx_helper_interface_mode_t
- */
- int imode;
- /* PHY mode */
- phy_interface_t phy_mode;
- /* List of outstanding tx buffers per queue */
- struct sk_buff_head tx_free_list[16];
- unsigned int last_speed;
- unsigned int last_link;
- /* Last negotiated link state */
- u64 link_info;
- /* Called periodically to check link status */
- void (*poll)(struct net_device *dev);
- struct delayed_work port_periodic_work;
- struct device_node *of_node;
-};
-
-int cvm_oct_free_work(void *work_queue_entry);
-
-int cvm_oct_rgmii_open(struct net_device *dev);
-
-int cvm_oct_sgmii_init(struct net_device *dev);
-int cvm_oct_sgmii_open(struct net_device *dev);
-
-int cvm_oct_spi_init(struct net_device *dev);
-void cvm_oct_spi_uninit(struct net_device *dev);
-
-int cvm_oct_common_init(struct net_device *dev);
-void cvm_oct_common_uninit(struct net_device *dev);
-void cvm_oct_adjust_link(struct net_device *dev);
-int cvm_oct_common_stop(struct net_device *dev);
-int cvm_oct_common_open(struct net_device *dev,
- void (*link_poll)(struct net_device *));
-void cvm_oct_note_carrier(struct octeon_ethernet *priv,
- union cvmx_helper_link_info li);
-void cvm_oct_link_poll(struct net_device *dev);
-
-extern int always_use_pow;
-extern int pow_send_group;
-extern int pow_receive_groups;
-extern char pow_send_list[];
-extern struct net_device *cvm_oct_device[];
-extern atomic_t cvm_oct_poll_queue_stopping;
-extern u64 cvm_oct_tx_poll_interval;
-
-extern int rx_napi_weight;
-
-#endif
diff --git a/drivers/staging/octeon/octeon-stubs.h b/drivers/staging/octeon/octeon-stubs.h
deleted file mode 100644
index 79213c045504..000000000000
--- a/drivers/staging/octeon/octeon-stubs.h
+++ /dev/null
@@ -1,1433 +0,0 @@
-#define CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE 512
-
-#ifndef XKPHYS_TO_PHYS
-# define XKPHYS_TO_PHYS(p) (p)
-#endif
-
-#define OCTEON_IRQ_WORKQ0 0
-#define OCTEON_IRQ_RML 0
-#define OCTEON_IRQ_TIMER1 0
-#define OCTEON_IS_MODEL(x) 0
-#define octeon_has_feature(x) 0
-#define octeon_get_clock_rate() 0
-
-#define CVMX_SYNCIOBDMA do { } while(0)
-
-#define CVMX_HELPER_INPUT_TAG_TYPE 0
-#define CVMX_HELPER_FIRST_MBUFF_SKIP 7
-#define CVMX_FAU_REG_END (2048)
-#define CVMX_FPA_OUTPUT_BUFFER_POOL (2)
-#define CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE 16
-#define CVMX_FPA_PACKET_POOL (0)
-#define CVMX_FPA_PACKET_POOL_SIZE 16
-#define CVMX_FPA_WQE_POOL (1)
-#define CVMX_FPA_WQE_POOL_SIZE 16
-#define CVMX_GMXX_RXX_ADR_CAM_EN(a, b) ((a)+(b))
-#define CVMX_GMXX_RXX_ADR_CTL(a, b) ((a)+(b))
-#define CVMX_GMXX_PRTX_CFG(a, b) ((a)+(b))
-#define CVMX_GMXX_RXX_FRM_MAX(a, b) ((a)+(b))
-#define CVMX_GMXX_RXX_JABBER(a, b) ((a)+(b))
-#define CVMX_IPD_CTL_STATUS 0
-#define CVMX_PIP_FRM_LEN_CHKX(a) (a)
-#define CVMX_PIP_NUM_INPUT_PORTS 1
-#define CVMX_SCR_SCRATCH 0
-#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 2
-#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 2
-#define CVMX_IPD_SUB_PORT_FCS 0
-#define CVMX_SSO_WQ_IQ_DIS 0
-#define CVMX_SSO_WQ_INT 0
-#define CVMX_POW_WQ_INT 0
-#define CVMX_SSO_WQ_INT_PC 0
-#define CVMX_NPI_RSL_INT_BLOCKS 0
-#define CVMX_POW_WQ_INT_PC 0
-
-union cvmx_pip_wqe_word2 {
- uint64_t u64;
- struct {
- uint64_t bufs:8;
- uint64_t ip_offset:8;
- uint64_t vlan_valid:1;
- uint64_t vlan_stacked:1;
- uint64_t unassigned:1;
- uint64_t vlan_cfi:1;
- uint64_t vlan_id:12;
- uint64_t pr:4;
- uint64_t unassigned2:8;
- uint64_t dec_ipcomp:1;
- uint64_t tcp_or_udp:1;
- uint64_t dec_ipsec:1;
- uint64_t is_v6:1;
- uint64_t software:1;
- uint64_t L4_error:1;
- uint64_t is_frag:1;
- uint64_t IP_exc:1;
- uint64_t is_bcast:1;
- uint64_t is_mcast:1;
- uint64_t not_IP:1;
- uint64_t rcv_error:1;
- uint64_t err_code:8;
- } s;
- struct {
- uint64_t bufs:8;
- uint64_t ip_offset:8;
- uint64_t vlan_valid:1;
- uint64_t vlan_stacked:1;
- uint64_t unassigned:1;
- uint64_t vlan_cfi:1;
- uint64_t vlan_id:12;
- uint64_t port:12;
- uint64_t dec_ipcomp:1;
- uint64_t tcp_or_udp:1;
- uint64_t dec_ipsec:1;
- uint64_t is_v6:1;
- uint64_t software:1;
- uint64_t L4_error:1;
- uint64_t is_frag:1;
- uint64_t IP_exc:1;
- uint64_t is_bcast:1;
- uint64_t is_mcast:1;
- uint64_t not_IP:1;
- uint64_t rcv_error:1;
- uint64_t err_code:8;
- } s_cn68xx;
-
- struct {
- uint64_t unused1:16;
- uint64_t vlan:16;
- uint64_t unused2:32;
- } svlan;
- struct {
- uint64_t bufs:8;
- uint64_t unused:8;
- uint64_t vlan_valid:1;
- uint64_t vlan_stacked:1;
- uint64_t unassigned:1;
- uint64_t vlan_cfi:1;
- uint64_t vlan_id:12;
- uint64_t pr:4;
- uint64_t unassigned2:12;
- uint64_t software:1;
- uint64_t unassigned3:1;
- uint64_t is_rarp:1;
- uint64_t is_arp:1;
- uint64_t is_bcast:1;
- uint64_t is_mcast:1;
- uint64_t not_IP:1;
- uint64_t rcv_error:1;
- uint64_t err_code:8;
- } snoip;
-
-};
-
-union cvmx_pip_wqe_word0 {
- struct {
- uint64_t next_ptr:40;
- uint8_t unused;
- __wsum hw_chksum;
- } cn38xx;
- struct {
- uint64_t pknd:6; /* 0..5 */
- uint64_t unused2:2; /* 6..7 */
- uint64_t bpid:6; /* 8..13 */
- uint64_t unused1:18; /* 14..31 */
- uint64_t l2ptr:8; /* 32..39 */
- uint64_t l3ptr:8; /* 40..47 */
- uint64_t unused0:8; /* 48..55 */
- uint64_t l4ptr:8; /* 56..63 */
- } cn68xx;
-};
-
-union cvmx_wqe_word0 {
- uint64_t u64;
- union cvmx_pip_wqe_word0 pip;
-};
-
-union cvmx_wqe_word1 {
- uint64_t u64;
- struct {
- uint64_t tag:32;
- uint64_t tag_type:2;
- uint64_t varies:14;
- uint64_t len:16;
- };
- struct {
- uint64_t tag:32;
- uint64_t tag_type:2;
- uint64_t zero_2:3;
- uint64_t grp:6;
- uint64_t zero_1:1;
- uint64_t qos:3;
- uint64_t zero_0:1;
- uint64_t len:16;
- } cn68xx;
- struct {
- uint64_t tag:32;
- uint64_t tag_type:2;
- uint64_t zero_2:1;
- uint64_t grp:4;
- uint64_t qos:3;
- uint64_t ipprt:6;
- uint64_t len:16;
- } cn38xx;
-};
-
-union cvmx_buf_ptr {
- void *ptr;
- uint64_t u64;
- struct {
- uint64_t i:1;
- uint64_t back:4;
- uint64_t pool:3;
- uint64_t size:16;
- uint64_t addr:40;
- } s;
-};
-
-struct cvmx_wqe {
- union cvmx_wqe_word0 word0;
- union cvmx_wqe_word1 word1;
- union cvmx_pip_wqe_word2 word2;
- union cvmx_buf_ptr packet_ptr;
- uint8_t packet_data[96];
-};
-
-union cvmx_helper_link_info {
- uint64_t u64;
- struct {
- uint64_t reserved_20_63:44;
- uint64_t link_up:1; /**< Is the physical link up? */
- uint64_t full_duplex:1; /**< 1 if the link is full duplex */
- uint64_t speed:18; /**< Speed of the link in Mbps */
- } s;
-};
-
-enum cvmx_fau_reg_32 {
- CVMX_FAU_REG_32_START = 0,
-};
-
-enum cvmx_fau_op_size {
- CVMX_FAU_OP_SIZE_8 = 0,
- CVMX_FAU_OP_SIZE_16 = 1,
- CVMX_FAU_OP_SIZE_32 = 2,
- CVMX_FAU_OP_SIZE_64 = 3
-};
-
-typedef enum {
- CVMX_SPI_MODE_UNKNOWN = 0,
- CVMX_SPI_MODE_TX_HALFPLEX = 1,
- CVMX_SPI_MODE_RX_HALFPLEX = 2,
- CVMX_SPI_MODE_DUPLEX = 3
-} cvmx_spi_mode_t;
-
-typedef enum {
- CVMX_HELPER_INTERFACE_MODE_DISABLED,
- CVMX_HELPER_INTERFACE_MODE_RGMII,
- CVMX_HELPER_INTERFACE_MODE_GMII,
- CVMX_HELPER_INTERFACE_MODE_SPI,
- CVMX_HELPER_INTERFACE_MODE_PCIE,
- CVMX_HELPER_INTERFACE_MODE_XAUI,
- CVMX_HELPER_INTERFACE_MODE_SGMII,
- CVMX_HELPER_INTERFACE_MODE_PICMG,
- CVMX_HELPER_INTERFACE_MODE_NPI,
- CVMX_HELPER_INTERFACE_MODE_LOOP,
-} cvmx_helper_interface_mode_t;
-
-typedef enum {
- CVMX_POW_WAIT = 1,
- CVMX_POW_NO_WAIT = 0,
-} cvmx_pow_wait_t;
-
-typedef enum {
- CVMX_PKO_LOCK_NONE = 0,
- CVMX_PKO_LOCK_ATOMIC_TAG = 1,
- CVMX_PKO_LOCK_CMD_QUEUE = 2,
-} cvmx_pko_lock_t;
-
-typedef enum {
- CVMX_PKO_SUCCESS,
- CVMX_PKO_INVALID_PORT,
- CVMX_PKO_INVALID_QUEUE,
- CVMX_PKO_INVALID_PRIORITY,
- CVMX_PKO_NO_MEMORY,
- CVMX_PKO_PORT_ALREADY_SETUP,
- CVMX_PKO_CMD_QUEUE_INIT_ERROR
-} cvmx_pko_status_t;
-
-enum cvmx_pow_tag_type {
- CVMX_POW_TAG_TYPE_ORDERED = 0L,
- CVMX_POW_TAG_TYPE_ATOMIC = 1L,
- CVMX_POW_TAG_TYPE_NULL = 2L,
- CVMX_POW_TAG_TYPE_NULL_NULL = 3L
-};
-
-union cvmx_ipd_ctl_status {
- uint64_t u64;
- struct cvmx_ipd_ctl_status_s {
- uint64_t reserved_18_63:46;
- uint64_t use_sop:1;
- uint64_t rst_done:1;
- uint64_t clken:1;
- uint64_t no_wptr:1;
- uint64_t pq_apkt:1;
- uint64_t pq_nabuf:1;
- uint64_t ipd_full:1;
- uint64_t pkt_off:1;
- uint64_t len_m8:1;
- uint64_t reset:1;
- uint64_t addpkt:1;
- uint64_t naddbuf:1;
- uint64_t pkt_lend:1;
- uint64_t wqe_lend:1;
- uint64_t pbp_en:1;
- uint64_t opc_mode:2;
- uint64_t ipd_en:1;
- } s;
- struct cvmx_ipd_ctl_status_cn30xx {
- uint64_t reserved_10_63:54;
- uint64_t len_m8:1;
- uint64_t reset:1;
- uint64_t addpkt:1;
- uint64_t naddbuf:1;
- uint64_t pkt_lend:1;
- uint64_t wqe_lend:1;
- uint64_t pbp_en:1;
- uint64_t opc_mode:2;
- uint64_t ipd_en:1;
- } cn30xx;
- struct cvmx_ipd_ctl_status_cn38xxp2 {
- uint64_t reserved_9_63:55;
- uint64_t reset:1;
- uint64_t addpkt:1;
- uint64_t naddbuf:1;
- uint64_t pkt_lend:1;
- uint64_t wqe_lend:1;
- uint64_t pbp_en:1;
- uint64_t opc_mode:2;
- uint64_t ipd_en:1;
- } cn38xxp2;
- struct cvmx_ipd_ctl_status_cn50xx {
- uint64_t reserved_15_63:49;
- uint64_t no_wptr:1;
- uint64_t pq_apkt:1;
- uint64_t pq_nabuf:1;
- uint64_t ipd_full:1;
- uint64_t pkt_off:1;
- uint64_t len_m8:1;
- uint64_t reset:1;
- uint64_t addpkt:1;
- uint64_t naddbuf:1;
- uint64_t pkt_lend:1;
- uint64_t wqe_lend:1;
- uint64_t pbp_en:1;
- uint64_t opc_mode:2;
- uint64_t ipd_en:1;
- } cn50xx;
- struct cvmx_ipd_ctl_status_cn58xx {
- uint64_t reserved_12_63:52;
- uint64_t ipd_full:1;
- uint64_t pkt_off:1;
- uint64_t len_m8:1;
- uint64_t reset:1;
- uint64_t addpkt:1;
- uint64_t naddbuf:1;
- uint64_t pkt_lend:1;
- uint64_t wqe_lend:1;
- uint64_t pbp_en:1;
- uint64_t opc_mode:2;
- uint64_t ipd_en:1;
- } cn58xx;
- struct cvmx_ipd_ctl_status_cn63xxp1 {
- uint64_t reserved_16_63:48;
- uint64_t clken:1;
- uint64_t no_wptr:1;
- uint64_t pq_apkt:1;
- uint64_t pq_nabuf:1;
- uint64_t ipd_full:1;
- uint64_t pkt_off:1;
- uint64_t len_m8:1;
- uint64_t reset:1;
- uint64_t addpkt:1;
- uint64_t naddbuf:1;
- uint64_t pkt_lend:1;
- uint64_t wqe_lend:1;
- uint64_t pbp_en:1;
- uint64_t opc_mode:2;
- uint64_t ipd_en:1;
- } cn63xxp1;
-};
-
-union cvmx_ipd_sub_port_fcs {
- uint64_t u64;
- struct cvmx_ipd_sub_port_fcs_s {
- uint64_t port_bit:32;
- uint64_t reserved_32_35:4;
- uint64_t port_bit2:4;
- uint64_t reserved_40_63:24;
- } s;
- struct cvmx_ipd_sub_port_fcs_cn30xx {
- uint64_t port_bit:3;
- uint64_t reserved_3_63:61;
- } cn30xx;
- struct cvmx_ipd_sub_port_fcs_cn38xx {
- uint64_t port_bit:32;
- uint64_t reserved_32_63:32;
- } cn38xx;
-};
-
-union cvmx_ipd_sub_port_qos_cnt {
- uint64_t u64;
- struct cvmx_ipd_sub_port_qos_cnt_s {
- uint64_t cnt:32;
- uint64_t port_qos:9;
- uint64_t reserved_41_63:23;
- } s;
-};
-typedef struct {
- uint32_t dropped_octets;
- uint32_t dropped_packets;
- uint32_t pci_raw_packets;
- uint32_t octets;
- uint32_t packets;
- uint32_t multicast_packets;
- uint32_t broadcast_packets;
- uint32_t len_64_packets;
- uint32_t len_65_127_packets;
- uint32_t len_128_255_packets;
- uint32_t len_256_511_packets;
- uint32_t len_512_1023_packets;
- uint32_t len_1024_1518_packets;
- uint32_t len_1519_max_packets;
- uint32_t fcs_align_err_packets;
- uint32_t runt_packets;
- uint32_t runt_crc_packets;
- uint32_t oversize_packets;
- uint32_t oversize_crc_packets;
- uint32_t inb_packets;
- uint64_t inb_octets;
- uint16_t inb_errors;
-} cvmx_pip_port_status_t;
-
-typedef struct {
- uint32_t packets;
- uint64_t octets;
- uint64_t doorbell;
-} cvmx_pko_port_status_t;
-
-union cvmx_pip_frm_len_chkx {
- uint64_t u64;
- struct cvmx_pip_frm_len_chkx_s {
- uint64_t reserved_32_63:32;
- uint64_t maxlen:16;
- uint64_t minlen:16;
- } s;
-};
-
-union cvmx_gmxx_rxx_frm_ctl {
- uint64_t u64;
- struct cvmx_gmxx_rxx_frm_ctl_s {
- uint64_t pre_chk:1;
- uint64_t pre_strp:1;
- uint64_t ctl_drp:1;
- uint64_t ctl_bck:1;
- uint64_t ctl_mcst:1;
- uint64_t ctl_smac:1;
- uint64_t pre_free:1;
- uint64_t vlan_len:1;
- uint64_t pad_len:1;
- uint64_t pre_align:1;
- uint64_t null_dis:1;
- uint64_t reserved_11_11:1;
- uint64_t ptp_mode:1;
- uint64_t reserved_13_63:51;
- } s;
- struct cvmx_gmxx_rxx_frm_ctl_cn30xx {
- uint64_t pre_chk:1;
- uint64_t pre_strp:1;
- uint64_t ctl_drp:1;
- uint64_t ctl_bck:1;
- uint64_t ctl_mcst:1;
- uint64_t ctl_smac:1;
- uint64_t pre_free:1;
- uint64_t vlan_len:1;
- uint64_t pad_len:1;
- uint64_t reserved_9_63:55;
- } cn30xx;
- struct cvmx_gmxx_rxx_frm_ctl_cn31xx {
- uint64_t pre_chk:1;
- uint64_t pre_strp:1;
- uint64_t ctl_drp:1;
- uint64_t ctl_bck:1;
- uint64_t ctl_mcst:1;
- uint64_t ctl_smac:1;
- uint64_t pre_free:1;
- uint64_t vlan_len:1;
- uint64_t reserved_8_63:56;
- } cn31xx;
- struct cvmx_gmxx_rxx_frm_ctl_cn50xx {
- uint64_t pre_chk:1;
- uint64_t pre_strp:1;
- uint64_t ctl_drp:1;
- uint64_t ctl_bck:1;
- uint64_t ctl_mcst:1;
- uint64_t ctl_smac:1;
- uint64_t pre_free:1;
- uint64_t reserved_7_8:2;
- uint64_t pre_align:1;
- uint64_t null_dis:1;
- uint64_t reserved_11_63:53;
- } cn50xx;
- struct cvmx_gmxx_rxx_frm_ctl_cn56xxp1 {
- uint64_t pre_chk:1;
- uint64_t pre_strp:1;
- uint64_t ctl_drp:1;
- uint64_t ctl_bck:1;
- uint64_t ctl_mcst:1;
- uint64_t ctl_smac:1;
- uint64_t pre_free:1;
- uint64_t reserved_7_8:2;
- uint64_t pre_align:1;
- uint64_t reserved_10_63:54;
- } cn56xxp1;
- struct cvmx_gmxx_rxx_frm_ctl_cn58xx {
- uint64_t pre_chk:1;
- uint64_t pre_strp:1;
- uint64_t ctl_drp:1;
- uint64_t ctl_bck:1;
- uint64_t ctl_mcst:1;
- uint64_t ctl_smac:1;
- uint64_t pre_free:1;
- uint64_t vlan_len:1;
- uint64_t pad_len:1;
- uint64_t pre_align:1;
- uint64_t null_dis:1;
- uint64_t reserved_11_63:53;
- } cn58xx;
- struct cvmx_gmxx_rxx_frm_ctl_cn61xx {
- uint64_t pre_chk:1;
- uint64_t pre_strp:1;
- uint64_t ctl_drp:1;
- uint64_t ctl_bck:1;
- uint64_t ctl_mcst:1;
- uint64_t ctl_smac:1;
- uint64_t pre_free:1;
- uint64_t reserved_7_8:2;
- uint64_t pre_align:1;
- uint64_t null_dis:1;
- uint64_t reserved_11_11:1;
- uint64_t ptp_mode:1;
- uint64_t reserved_13_63:51;
- } cn61xx;
-};
-
-union cvmx_gmxx_rxx_int_reg {
- uint64_t u64;
- struct cvmx_gmxx_rxx_int_reg_s {
- uint64_t minerr:1;
- uint64_t carext:1;
- uint64_t maxerr:1;
- uint64_t jabber:1;
- uint64_t fcserr:1;
- uint64_t alnerr:1;
- uint64_t lenerr:1;
- uint64_t rcverr:1;
- uint64_t skperr:1;
- uint64_t niberr:1;
- uint64_t ovrerr:1;
- uint64_t pcterr:1;
- uint64_t rsverr:1;
- uint64_t falerr:1;
- uint64_t coldet:1;
- uint64_t ifgerr:1;
- uint64_t phy_link:1;
- uint64_t phy_spd:1;
- uint64_t phy_dupx:1;
- uint64_t pause_drp:1;
- uint64_t loc_fault:1;
- uint64_t rem_fault:1;
- uint64_t bad_seq:1;
- uint64_t bad_term:1;
- uint64_t unsop:1;
- uint64_t uneop:1;
- uint64_t undat:1;
- uint64_t hg2fld:1;
- uint64_t hg2cc:1;
- uint64_t reserved_29_63:35;
- } s;
- struct cvmx_gmxx_rxx_int_reg_cn30xx {
- uint64_t minerr:1;
- uint64_t carext:1;
- uint64_t maxerr:1;
- uint64_t jabber:1;
- uint64_t fcserr:1;
- uint64_t alnerr:1;
- uint64_t lenerr:1;
- uint64_t rcverr:1;
- uint64_t skperr:1;
- uint64_t niberr:1;
- uint64_t ovrerr:1;
- uint64_t pcterr:1;
- uint64_t rsverr:1;
- uint64_t falerr:1;
- uint64_t coldet:1;
- uint64_t ifgerr:1;
- uint64_t phy_link:1;
- uint64_t phy_spd:1;
- uint64_t phy_dupx:1;
- uint64_t reserved_19_63:45;
- } cn30xx;
- struct cvmx_gmxx_rxx_int_reg_cn50xx {
- uint64_t reserved_0_0:1;
- uint64_t carext:1;
- uint64_t reserved_2_2:1;
- uint64_t jabber:1;
- uint64_t fcserr:1;
- uint64_t alnerr:1;
- uint64_t reserved_6_6:1;
- uint64_t rcverr:1;
- uint64_t skperr:1;
- uint64_t niberr:1;
- uint64_t ovrerr:1;
- uint64_t pcterr:1;
- uint64_t rsverr:1;
- uint64_t falerr:1;
- uint64_t coldet:1;
- uint64_t ifgerr:1;
- uint64_t phy_link:1;
- uint64_t phy_spd:1;
- uint64_t phy_dupx:1;
- uint64_t pause_drp:1;
- uint64_t reserved_20_63:44;
- } cn50xx;
- struct cvmx_gmxx_rxx_int_reg_cn52xx {
- uint64_t reserved_0_0:1;
- uint64_t carext:1;
- uint64_t reserved_2_2:1;
- uint64_t jabber:1;
- uint64_t fcserr:1;
- uint64_t reserved_5_6:2;
- uint64_t rcverr:1;
- uint64_t skperr:1;
- uint64_t reserved_9_9:1;
- uint64_t ovrerr:1;
- uint64_t pcterr:1;
- uint64_t rsverr:1;
- uint64_t falerr:1;
- uint64_t coldet:1;
- uint64_t ifgerr:1;
- uint64_t reserved_16_18:3;
- uint64_t pause_drp:1;
- uint64_t loc_fault:1;
- uint64_t rem_fault:1;
- uint64_t bad_seq:1;
- uint64_t bad_term:1;
- uint64_t unsop:1;
- uint64_t uneop:1;
- uint64_t undat:1;
- uint64_t hg2fld:1;
- uint64_t hg2cc:1;
- uint64_t reserved_29_63:35;
- } cn52xx;
- struct cvmx_gmxx_rxx_int_reg_cn56xxp1 {
- uint64_t reserved_0_0:1;
- uint64_t carext:1;
- uint64_t reserved_2_2:1;
- uint64_t jabber:1;
- uint64_t fcserr:1;
- uint64_t reserved_5_6:2;
- uint64_t rcverr:1;
- uint64_t skperr:1;
- uint64_t reserved_9_9:1;
- uint64_t ovrerr:1;
- uint64_t pcterr:1;
- uint64_t rsverr:1;
- uint64_t falerr:1;
- uint64_t coldet:1;
- uint64_t ifgerr:1;
- uint64_t reserved_16_18:3;
- uint64_t pause_drp:1;
- uint64_t loc_fault:1;
- uint64_t rem_fault:1;
- uint64_t bad_seq:1;
- uint64_t bad_term:1;
- uint64_t unsop:1;
- uint64_t uneop:1;
- uint64_t undat:1;
- uint64_t reserved_27_63:37;
- } cn56xxp1;
- struct cvmx_gmxx_rxx_int_reg_cn58xx {
- uint64_t minerr:1;
- uint64_t carext:1;
- uint64_t maxerr:1;
- uint64_t jabber:1;
- uint64_t fcserr:1;
- uint64_t alnerr:1;
- uint64_t lenerr:1;
- uint64_t rcverr:1;
- uint64_t skperr:1;
- uint64_t niberr:1;
- uint64_t ovrerr:1;
- uint64_t pcterr:1;
- uint64_t rsverr:1;
- uint64_t falerr:1;
- uint64_t coldet:1;
- uint64_t ifgerr:1;
- uint64_t phy_link:1;
- uint64_t phy_spd:1;
- uint64_t phy_dupx:1;
- uint64_t pause_drp:1;
- uint64_t reserved_20_63:44;
- } cn58xx;
- struct cvmx_gmxx_rxx_int_reg_cn61xx {
- uint64_t minerr:1;
- uint64_t carext:1;
- uint64_t reserved_2_2:1;
- uint64_t jabber:1;
- uint64_t fcserr:1;
- uint64_t reserved_5_6:2;
- uint64_t rcverr:1;
- uint64_t skperr:1;
- uint64_t reserved_9_9:1;
- uint64_t ovrerr:1;
- uint64_t pcterr:1;
- uint64_t rsverr:1;
- uint64_t falerr:1;
- uint64_t coldet:1;
- uint64_t ifgerr:1;
- uint64_t reserved_16_18:3;
- uint64_t pause_drp:1;
- uint64_t loc_fault:1;
- uint64_t rem_fault:1;
- uint64_t bad_seq:1;
- uint64_t bad_term:1;
- uint64_t unsop:1;
- uint64_t uneop:1;
- uint64_t undat:1;
- uint64_t hg2fld:1;
- uint64_t hg2cc:1;
- uint64_t reserved_29_63:35;
- } cn61xx;
-};
-
-union cvmx_gmxx_prtx_cfg {
- uint64_t u64;
- struct cvmx_gmxx_prtx_cfg_s {
- uint64_t reserved_22_63:42;
- uint64_t pknd:6;
- uint64_t reserved_14_15:2;
- uint64_t tx_idle:1;
- uint64_t rx_idle:1;
- uint64_t reserved_9_11:3;
- uint64_t speed_msb:1;
- uint64_t reserved_4_7:4;
- uint64_t slottime:1;
- uint64_t duplex:1;
- uint64_t speed:1;
- uint64_t en:1;
- } s;
- struct cvmx_gmxx_prtx_cfg_cn30xx {
- uint64_t reserved_4_63:60;
- uint64_t slottime:1;
- uint64_t duplex:1;
- uint64_t speed:1;
- uint64_t en:1;
- } cn30xx;
- struct cvmx_gmxx_prtx_cfg_cn52xx {
- uint64_t reserved_14_63:50;
- uint64_t tx_idle:1;
- uint64_t rx_idle:1;
- uint64_t reserved_9_11:3;
- uint64_t speed_msb:1;
- uint64_t reserved_4_7:4;
- uint64_t slottime:1;
- uint64_t duplex:1;
- uint64_t speed:1;
- uint64_t en:1;
- } cn52xx;
-};
-
-union cvmx_gmxx_rxx_adr_ctl {
- uint64_t u64;
- struct cvmx_gmxx_rxx_adr_ctl_s {
- uint64_t reserved_4_63:60;
- uint64_t cam_mode:1;
- uint64_t mcst:2;
- uint64_t bcst:1;
- } s;
-};
-
-union cvmx_pip_prt_tagx {
- uint64_t u64;
- struct cvmx_pip_prt_tagx_s {
- uint64_t reserved_54_63:10;
- uint64_t portadd_en:1;
- uint64_t inc_hwchk:1;
- uint64_t reserved_50_51:2;
- uint64_t grptagbase_msb:2;
- uint64_t reserved_46_47:2;
- uint64_t grptagmask_msb:2;
- uint64_t reserved_42_43:2;
- uint64_t grp_msb:2;
- uint64_t grptagbase:4;
- uint64_t grptagmask:4;
- uint64_t grptag:1;
- uint64_t grptag_mskip:1;
- uint64_t tag_mode:2;
- uint64_t inc_vs:2;
- uint64_t inc_vlan:1;
- uint64_t inc_prt_flag:1;
- uint64_t ip6_dprt_flag:1;
- uint64_t ip4_dprt_flag:1;
- uint64_t ip6_sprt_flag:1;
- uint64_t ip4_sprt_flag:1;
- uint64_t ip6_nxth_flag:1;
- uint64_t ip4_pctl_flag:1;
- uint64_t ip6_dst_flag:1;
- uint64_t ip4_dst_flag:1;
- uint64_t ip6_src_flag:1;
- uint64_t ip4_src_flag:1;
- uint64_t tcp6_tag_type:2;
- uint64_t tcp4_tag_type:2;
- uint64_t ip6_tag_type:2;
- uint64_t ip4_tag_type:2;
- uint64_t non_tag_type:2;
- uint64_t grp:4;
- } s;
- struct cvmx_pip_prt_tagx_cn30xx {
- uint64_t reserved_40_63:24;
- uint64_t grptagbase:4;
- uint64_t grptagmask:4;
- uint64_t grptag:1;
- uint64_t reserved_30_30:1;
- uint64_t tag_mode:2;
- uint64_t inc_vs:2;
- uint64_t inc_vlan:1;
- uint64_t inc_prt_flag:1;
- uint64_t ip6_dprt_flag:1;
- uint64_t ip4_dprt_flag:1;
- uint64_t ip6_sprt_flag:1;
- uint64_t ip4_sprt_flag:1;
- uint64_t ip6_nxth_flag:1;
- uint64_t ip4_pctl_flag:1;
- uint64_t ip6_dst_flag:1;
- uint64_t ip4_dst_flag:1;
- uint64_t ip6_src_flag:1;
- uint64_t ip4_src_flag:1;
- uint64_t tcp6_tag_type:2;
- uint64_t tcp4_tag_type:2;
- uint64_t ip6_tag_type:2;
- uint64_t ip4_tag_type:2;
- uint64_t non_tag_type:2;
- uint64_t grp:4;
- } cn30xx;
- struct cvmx_pip_prt_tagx_cn50xx {
- uint64_t reserved_40_63:24;
- uint64_t grptagbase:4;
- uint64_t grptagmask:4;
- uint64_t grptag:1;
- uint64_t grptag_mskip:1;
- uint64_t tag_mode:2;
- uint64_t inc_vs:2;
- uint64_t inc_vlan:1;
- uint64_t inc_prt_flag:1;
- uint64_t ip6_dprt_flag:1;
- uint64_t ip4_dprt_flag:1;
- uint64_t ip6_sprt_flag:1;
- uint64_t ip4_sprt_flag:1;
- uint64_t ip6_nxth_flag:1;
- uint64_t ip4_pctl_flag:1;
- uint64_t ip6_dst_flag:1;
- uint64_t ip4_dst_flag:1;
- uint64_t ip6_src_flag:1;
- uint64_t ip4_src_flag:1;
- uint64_t tcp6_tag_type:2;
- uint64_t tcp4_tag_type:2;
- uint64_t ip6_tag_type:2;
- uint64_t ip4_tag_type:2;
- uint64_t non_tag_type:2;
- uint64_t grp:4;
- } cn50xx;
-};
-
-union cvmx_spxx_int_reg {
- uint64_t u64;
- struct cvmx_spxx_int_reg_s {
- uint64_t reserved_32_63:32;
- uint64_t spf:1;
- uint64_t reserved_12_30:19;
- uint64_t calerr:1;
- uint64_t syncerr:1;
- uint64_t diperr:1;
- uint64_t tpaovr:1;
- uint64_t rsverr:1;
- uint64_t drwnng:1;
- uint64_t clserr:1;
- uint64_t spiovr:1;
- uint64_t reserved_2_3:2;
- uint64_t abnorm:1;
- uint64_t prtnxa:1;
- } s;
-};
-
-union cvmx_spxx_int_msk {
- uint64_t u64;
- struct cvmx_spxx_int_msk_s {
- uint64_t reserved_12_63:52;
- uint64_t calerr:1;
- uint64_t syncerr:1;
- uint64_t diperr:1;
- uint64_t tpaovr:1;
- uint64_t rsverr:1;
- uint64_t drwnng:1;
- uint64_t clserr:1;
- uint64_t spiovr:1;
- uint64_t reserved_2_3:2;
- uint64_t abnorm:1;
- uint64_t prtnxa:1;
- } s;
-};
-
-union cvmx_pow_wq_int {
- uint64_t u64;
- struct cvmx_pow_wq_int_s {
- uint64_t wq_int:16;
- uint64_t iq_dis:16;
- uint64_t reserved_32_63:32;
- } s;
-};
-
-union cvmx_sso_wq_int_thrx {
- uint64_t u64;
- struct {
- uint64_t iq_thr:12;
- uint64_t reserved_12_13:2;
- uint64_t ds_thr:12;
- uint64_t reserved_26_27:2;
- uint64_t tc_thr:4;
- uint64_t tc_en:1;
- uint64_t reserved_33_63:31;
- } s;
-};
-
-union cvmx_stxx_int_reg {
- uint64_t u64;
- struct cvmx_stxx_int_reg_s {
- uint64_t reserved_9_63:55;
- uint64_t syncerr:1;
- uint64_t frmerr:1;
- uint64_t unxfrm:1;
- uint64_t nosync:1;
- uint64_t diperr:1;
- uint64_t datovr:1;
- uint64_t ovrbst:1;
- uint64_t calpar1:1;
- uint64_t calpar0:1;
- } s;
-};
-
-union cvmx_stxx_int_msk {
- uint64_t u64;
- struct cvmx_stxx_int_msk_s {
- uint64_t reserved_8_63:56;
- uint64_t frmerr:1;
- uint64_t unxfrm:1;
- uint64_t nosync:1;
- uint64_t diperr:1;
- uint64_t datovr:1;
- uint64_t ovrbst:1;
- uint64_t calpar1:1;
- uint64_t calpar0:1;
- } s;
-};
-
-union cvmx_pow_wq_int_pc {
- uint64_t u64;
- struct cvmx_pow_wq_int_pc_s {
- uint64_t reserved_0_7:8;
- uint64_t pc_thr:20;
- uint64_t reserved_28_31:4;
- uint64_t pc:28;
- uint64_t reserved_60_63:4;
- } s;
-};
-
-union cvmx_pow_wq_int_thrx {
- uint64_t u64;
- struct cvmx_pow_wq_int_thrx_s {
- uint64_t reserved_29_63:35;
- uint64_t tc_en:1;
- uint64_t tc_thr:4;
- uint64_t reserved_23_23:1;
- uint64_t ds_thr:11;
- uint64_t reserved_11_11:1;
- uint64_t iq_thr:11;
- } s;
- struct cvmx_pow_wq_int_thrx_cn30xx {
- uint64_t reserved_29_63:35;
- uint64_t tc_en:1;
- uint64_t tc_thr:4;
- uint64_t reserved_18_23:6;
- uint64_t ds_thr:6;
- uint64_t reserved_6_11:6;
- uint64_t iq_thr:6;
- } cn30xx;
- struct cvmx_pow_wq_int_thrx_cn31xx {
- uint64_t reserved_29_63:35;
- uint64_t tc_en:1;
- uint64_t tc_thr:4;
- uint64_t reserved_20_23:4;
- uint64_t ds_thr:8;
- uint64_t reserved_8_11:4;
- uint64_t iq_thr:8;
- } cn31xx;
- struct cvmx_pow_wq_int_thrx_cn52xx {
- uint64_t reserved_29_63:35;
- uint64_t tc_en:1;
- uint64_t tc_thr:4;
- uint64_t reserved_21_23:3;
- uint64_t ds_thr:9;
- uint64_t reserved_9_11:3;
- uint64_t iq_thr:9;
- } cn52xx;
- struct cvmx_pow_wq_int_thrx_cn63xx {
- uint64_t reserved_29_63:35;
- uint64_t tc_en:1;
- uint64_t tc_thr:4;
- uint64_t reserved_22_23:2;
- uint64_t ds_thr:10;
- uint64_t reserved_10_11:2;
- uint64_t iq_thr:10;
- } cn63xx;
-};
-
-union cvmx_npi_rsl_int_blocks {
- uint64_t u64;
- struct cvmx_npi_rsl_int_blocks_s {
- uint64_t reserved_32_63:32;
- uint64_t rint_31:1;
- uint64_t iob:1;
- uint64_t reserved_28_29:2;
- uint64_t rint_27:1;
- uint64_t rint_26:1;
- uint64_t rint_25:1;
- uint64_t rint_24:1;
- uint64_t asx1:1;
- uint64_t asx0:1;
- uint64_t rint_21:1;
- uint64_t pip:1;
- uint64_t spx1:1;
- uint64_t spx0:1;
- uint64_t lmc:1;
- uint64_t l2c:1;
- uint64_t rint_15:1;
- uint64_t reserved_13_14:2;
- uint64_t pow:1;
- uint64_t tim:1;
- uint64_t pko:1;
- uint64_t ipd:1;
- uint64_t rint_8:1;
- uint64_t zip:1;
- uint64_t dfa:1;
- uint64_t fpa:1;
- uint64_t key:1;
- uint64_t npi:1;
- uint64_t gmx1:1;
- uint64_t gmx0:1;
- uint64_t mio:1;
- } s;
- struct cvmx_npi_rsl_int_blocks_cn30xx {
- uint64_t reserved_32_63:32;
- uint64_t rint_31:1;
- uint64_t iob:1;
- uint64_t rint_29:1;
- uint64_t rint_28:1;
- uint64_t rint_27:1;
- uint64_t rint_26:1;
- uint64_t rint_25:1;
- uint64_t rint_24:1;
- uint64_t asx1:1;
- uint64_t asx0:1;
- uint64_t rint_21:1;
- uint64_t pip:1;
- uint64_t spx1:1;
- uint64_t spx0:1;
- uint64_t lmc:1;
- uint64_t l2c:1;
- uint64_t rint_15:1;
- uint64_t rint_14:1;
- uint64_t usb:1;
- uint64_t pow:1;
- uint64_t tim:1;
- uint64_t pko:1;
- uint64_t ipd:1;
- uint64_t rint_8:1;
- uint64_t zip:1;
- uint64_t dfa:1;
- uint64_t fpa:1;
- uint64_t key:1;
- uint64_t npi:1;
- uint64_t gmx1:1;
- uint64_t gmx0:1;
- uint64_t mio:1;
- } cn30xx;
- struct cvmx_npi_rsl_int_blocks_cn38xx {
- uint64_t reserved_32_63:32;
- uint64_t rint_31:1;
- uint64_t iob:1;
- uint64_t rint_29:1;
- uint64_t rint_28:1;
- uint64_t rint_27:1;
- uint64_t rint_26:1;
- uint64_t rint_25:1;
- uint64_t rint_24:1;
- uint64_t asx1:1;
- uint64_t asx0:1;
- uint64_t rint_21:1;
- uint64_t pip:1;
- uint64_t spx1:1;
- uint64_t spx0:1;
- uint64_t lmc:1;
- uint64_t l2c:1;
- uint64_t rint_15:1;
- uint64_t rint_14:1;
- uint64_t rint_13:1;
- uint64_t pow:1;
- uint64_t tim:1;
- uint64_t pko:1;
- uint64_t ipd:1;
- uint64_t rint_8:1;
- uint64_t zip:1;
- uint64_t dfa:1;
- uint64_t fpa:1;
- uint64_t key:1;
- uint64_t npi:1;
- uint64_t gmx1:1;
- uint64_t gmx0:1;
- uint64_t mio:1;
- } cn38xx;
- struct cvmx_npi_rsl_int_blocks_cn50xx {
- uint64_t reserved_31_63:33;
- uint64_t iob:1;
- uint64_t lmc1:1;
- uint64_t agl:1;
- uint64_t reserved_24_27:4;
- uint64_t asx1:1;
- uint64_t asx0:1;
- uint64_t reserved_21_21:1;
- uint64_t pip:1;
- uint64_t spx1:1;
- uint64_t spx0:1;
- uint64_t lmc:1;
- uint64_t l2c:1;
- uint64_t reserved_15_15:1;
- uint64_t rad:1;
- uint64_t usb:1;
- uint64_t pow:1;
- uint64_t tim:1;
- uint64_t pko:1;
- uint64_t ipd:1;
- uint64_t reserved_8_8:1;
- uint64_t zip:1;
- uint64_t dfa:1;
- uint64_t fpa:1;
- uint64_t key:1;
- uint64_t npi:1;
- uint64_t gmx1:1;
- uint64_t gmx0:1;
- uint64_t mio:1;
- } cn50xx;
-};
-
-union cvmx_pko_command_word0 {
- uint64_t u64;
- struct {
- uint64_t total_bytes:16;
- uint64_t segs:6;
- uint64_t dontfree:1;
- uint64_t ignore_i:1;
- uint64_t ipoffp1:7;
- uint64_t gather:1;
- uint64_t rsp:1;
- uint64_t wqp:1;
- uint64_t n2:1;
- uint64_t le:1;
- uint64_t reg0:11;
- uint64_t subone0:1;
- uint64_t reg1:11;
- uint64_t subone1:1;
- uint64_t size0:2;
- uint64_t size1:2;
- } s;
-};
-
-union cvmx_ciu_timx {
- uint64_t u64;
- struct cvmx_ciu_timx_s {
- uint64_t reserved_37_63:27;
- uint64_t one_shot:1;
- uint64_t len:36;
- } s;
-};
-
-union cvmx_gmxx_rxx_rx_inbnd {
- uint64_t u64;
- struct cvmx_gmxx_rxx_rx_inbnd_s {
- uint64_t status:1;
- uint64_t speed:2;
- uint64_t duplex:1;
- uint64_t reserved_4_63:60;
- } s;
-};
-
-static inline int32_t cvmx_fau_fetch_and_add32(enum cvmx_fau_reg_32 reg,
- int32_t value)
-{
- return value;
-}
-
-static inline void cvmx_fau_atomic_add32(enum cvmx_fau_reg_32 reg,
- int32_t value)
-{ }
-
-static inline void cvmx_fau_atomic_write32(enum cvmx_fau_reg_32 reg,
- int32_t value)
-{ }
-
-static inline uint64_t cvmx_scratch_read64(uint64_t address)
-{
- return 0;
-}
-
-static inline void cvmx_scratch_write64(uint64_t address, uint64_t value)
-{ }
-
-static inline int cvmx_wqe_get_grp(struct cvmx_wqe *work)
-{
- return 0;
-}
-
-static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
-{
- return (void *)(uintptr_t)(physical_address);
-}
-
-static inline uint64_t cvmx_ptr_to_phys(void *ptr)
-{
- return (unsigned long)ptr;
-}
-
-static inline int cvmx_helper_get_interface_num(int ipd_port)
-{
- return ipd_port;
-}
-
-static inline int cvmx_helper_get_interface_index_num(int ipd_port)
-{
- return ipd_port;
-}
-
-static inline void cvmx_fpa_enable(void)
-{ }
-
-static inline uint64_t cvmx_read_csr(uint64_t csr_addr)
-{
- return 0;
-}
-
-static inline void cvmx_write_csr(uint64_t csr_addr, uint64_t val)
-{ }
-
-static inline int cvmx_helper_setup_red(int pass_thresh, int drop_thresh)
-{
- return 0;
-}
-
-static inline void *cvmx_fpa_alloc(uint64_t pool)
-{
- return NULL;
-}
-
-static inline void cvmx_fpa_free(void *ptr, uint64_t pool,
- uint64_t num_cache_lines)
-{ }
-
-static inline int octeon_is_simulation(void)
-{
- return 1;
-}
-
-static inline void cvmx_pip_get_port_status(uint64_t port_num, uint64_t clear,
- cvmx_pip_port_status_t *status)
-{ }
-
-static inline void cvmx_pko_get_port_status(uint64_t port_num, uint64_t clear,
- cvmx_pko_port_status_t *status)
-{ }
-
-static inline cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int
- interface)
-{
- return 0;
-}
-
-static inline union cvmx_helper_link_info cvmx_helper_link_get(int ipd_port)
-{
- union cvmx_helper_link_info ret = { .u64 = 0 };
-
- return ret;
-}
-
-static inline int cvmx_helper_link_set(int ipd_port,
- union cvmx_helper_link_info link_info)
-{
- return 0;
-}
-
-static inline int cvmx_helper_initialize_packet_io_global(void)
-{
- return 0;
-}
-
-static inline int cvmx_helper_get_number_of_interfaces(void)
-{
- return 2;
-}
-
-static inline int cvmx_helper_ports_on_interface(int interface)
-{
- return 1;
-}
-
-static inline int cvmx_helper_get_ipd_port(int interface, int port)
-{
- return 0;
-}
-
-static inline int cvmx_helper_ipd_and_packet_input_enable(void)
-{
- return 0;
-}
-
-static inline void cvmx_ipd_disable(void)
-{ }
-
-static inline void cvmx_ipd_free_ptr(void)
-{ }
-
-static inline void cvmx_pko_disable(void)
-{ }
-
-static inline void cvmx_pko_shutdown(void)
-{ }
-
-static inline int cvmx_pko_get_base_queue_per_core(int port, int core)
-{
- return port;
-}
-
-static inline int cvmx_pko_get_base_queue(int port)
-{
- return port;
-}
-
-static inline int cvmx_pko_get_num_queues(int port)
-{
- return port;
-}
-
-static inline unsigned int cvmx_get_core_num(void)
-{
- return 0;
-}
-
-static inline void cvmx_pow_work_request_async_nocheck(int scr_addr,
- cvmx_pow_wait_t wait)
-{ }
-
-static inline void cvmx_pow_work_request_async(int scr_addr,
- cvmx_pow_wait_t wait)
-{ }
-
-static inline struct cvmx_wqe *cvmx_pow_work_response_async(int scr_addr)
-{
- struct cvmx_wqe *wqe = (void *)(unsigned long)scr_addr;
-
- return wqe;
-}
-
-static inline struct cvmx_wqe *cvmx_pow_work_request_sync(cvmx_pow_wait_t wait)
-{
- return (void *)(unsigned long)wait;
-}
-
-static inline int cvmx_spi_restart_interface(int interface,
- cvmx_spi_mode_t mode, int timeout)
-{
- return 0;
-}
-
-static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr,
- enum cvmx_fau_reg_32 reg,
- int32_t value)
-{ }
-
-static inline union cvmx_gmxx_rxx_rx_inbnd cvmx_spi4000_check_speed(
- int interface,
- int port)
-{
- union cvmx_gmxx_rxx_rx_inbnd r;
-
- r.u64 = 0;
- return r;
-}
-
-static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue,
- cvmx_pko_lock_t use_locking)
-{ }
-
-static inline cvmx_pko_status_t cvmx_pko_send_packet_finish(uint64_t port,
- uint64_t queue, union cvmx_pko_command_word0 pko_command,
- union cvmx_buf_ptr packet, cvmx_pko_lock_t use_locking)
-{
- return 0;
-}
-
-static inline void cvmx_wqe_set_port(struct cvmx_wqe *work, int port)
-{ }
-
-static inline void cvmx_wqe_set_qos(struct cvmx_wqe *work, int qos)
-{ }
-
-static inline int cvmx_wqe_get_qos(struct cvmx_wqe *work)
-{
- return 0;
-}
-
-static inline void cvmx_wqe_set_grp(struct cvmx_wqe *work, int grp)
-{ }
-
-static inline void cvmx_pow_work_submit(struct cvmx_wqe *wqp, uint32_t tag,
- enum cvmx_pow_tag_type tag_type,
- uint64_t qos, uint64_t grp)
-{ }
-
-#define CVMX_ASXX_RX_CLK_SETX(a, b) ((a)+(b))
-#define CVMX_ASXX_TX_CLK_SETX(a, b) ((a)+(b))
-#define CVMX_CIU_TIMX(a) (a)
-#define CVMX_GMXX_RXX_ADR_CAM0(a, b) ((a)+(b))
-#define CVMX_GMXX_RXX_ADR_CAM1(a, b) ((a)+(b))
-#define CVMX_GMXX_RXX_ADR_CAM2(a, b) ((a)+(b))
-#define CVMX_GMXX_RXX_ADR_CAM3(a, b) ((a)+(b))
-#define CVMX_GMXX_RXX_ADR_CAM4(a, b) ((a)+(b))
-#define CVMX_GMXX_RXX_ADR_CAM5(a, b) ((a)+(b))
-#define CVMX_GMXX_RXX_FRM_CTL(a, b) ((a)+(b))
-#define CVMX_GMXX_RXX_INT_REG(a, b) ((a)+(b))
-#define CVMX_GMXX_SMACX(a, b) ((a)+(b))
-#define CVMX_PIP_PRT_TAGX(a) (a)
-#define CVMX_POW_PP_GRP_MSKX(a) (a)
-#define CVMX_POW_WQ_INT_THRX(a) (a)
-#define CVMX_SPXX_INT_MSK(a) (a)
-#define CVMX_SPXX_INT_REG(a) (a)
-#define CVMX_SSO_PPX_GRP_MSK(a) (a)
-#define CVMX_SSO_WQ_INT_THRX(a) (a)
-#define CVMX_STXX_INT_MSK(a) (a)
-#define CVMX_STXX_INT_REG(a) (a)
diff --git a/drivers/staging/qlge/qlge.h b/drivers/staging/qlge/qlge.h
index 6ec7e3ce3863..4bc5d5fce9bf 100644
--- a/drivers/staging/qlge/qlge.h
+++ b/drivers/staging/qlge/qlge.h
@@ -63,7 +63,6 @@
#define UDELAY_COUNT 3
#define UDELAY_DELAY 100
-
#define TX_DESC_PER_IOCB 8
#if ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) > 0
@@ -1627,18 +1626,18 @@ enum {
#define MPI_COREDUMP_COOKIE 0x5555aaaa
struct mpi_coredump_global_header {
u32 cookie;
- u8 idString[16];
- u32 timeLo;
- u32 timeHi;
- u32 imageSize;
- u32 headerSize;
+ u8 id_string[16];
+ u32 time_lo;
+ u32 time_hi;
+ u32 image_size;
+ u32 header_size;
u8 info[220];
};
struct mpi_coredump_segment_header {
u32 cookie;
- u32 segNum;
- u32 segSize;
+ u32 seg_num;
+ u32 seg_size;
u32 extra;
u8 description[16];
};
diff --git a/drivers/staging/qlge/qlge_dbg.c b/drivers/staging/qlge/qlge_dbg.c
index 83f34ca43aa4..8cf39615c520 100644
--- a/drivers/staging/qlge/qlge_dbg.c
+++ b/drivers/staging/qlge/qlge_dbg.c
@@ -142,10 +142,10 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
u32 *direct_ptr, temp;
u32 *indirect_ptr;
-
/* The XAUI needs to be read out per port */
status = ql_read_other_func_serdes_reg(qdev,
- XG_SERDES_XAUI_HSS_PCS_START, &temp);
+ XG_SERDES_XAUI_HSS_PCS_START,
+ &temp);
if (status)
temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
@@ -297,7 +297,6 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
xfi_direct_valid, xfi_indirect_valid);
-
/* Get XAUI_XFI_HSS_PLL register block. */
if (qdev->func & 1) {
direct_ptr =
@@ -482,7 +481,8 @@ static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 *buf)
int status;
for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) {
- status = ql_write_mpi_reg(qdev, RISC_124,
+ status = ql_write_mpi_reg(qdev,
+ RISC_124,
(SHADOW_OFFSET | i << SHADOW_REG_SHIFT));
if (status)
goto end;
@@ -702,8 +702,8 @@ static void ql_build_coredump_seg_header(
{
memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header));
seg_hdr->cookie = MPI_COREDUMP_COOKIE;
- seg_hdr->segNum = seg_number;
- seg_hdr->segSize = seg_size;
+ seg_hdr->seg_num = seg_number;
+ seg_hdr->seg_size = seg_size;
strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
}
@@ -741,12 +741,12 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
memset(&(mpi_coredump->mpi_global_header), 0,
sizeof(struct mpi_coredump_global_header));
mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
- mpi_coredump->mpi_global_header.headerSize =
+ mpi_coredump->mpi_global_header.header_size =
sizeof(struct mpi_coredump_global_header);
- mpi_coredump->mpi_global_header.imageSize =
+ mpi_coredump->mpi_global_header.image_size =
sizeof(struct ql_mpi_coredump);
- strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
- sizeof(mpi_coredump->mpi_global_header.idString));
+ strncpy(mpi_coredump->mpi_global_header.id_string, "MPI Coredump",
+ sizeof(mpi_coredump->mpi_global_header.id_string));
/* Get generic NIC reg dump */
ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
@@ -1108,7 +1108,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
+ sizeof(mpi_coredump->nic_routing_words),
"Routing Words");
status = ql_get_routing_entries(qdev,
- &mpi_coredump->nic_routing_words[0]);
+ &mpi_coredump->nic_routing_words[0]);
if (status)
goto err;
@@ -1227,17 +1227,15 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev,
{
int i, status;
-
memset(&(mpi_coredump->mpi_global_header), 0,
sizeof(struct mpi_coredump_global_header));
mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
- mpi_coredump->mpi_global_header.headerSize =
+ mpi_coredump->mpi_global_header.header_size =
sizeof(struct mpi_coredump_global_header);
- mpi_coredump->mpi_global_header.imageSize =
+ mpi_coredump->mpi_global_header.image_size =
sizeof(struct ql_reg_dump);
- strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
- sizeof(mpi_coredump->mpi_global_header.idString));
-
+ strncpy(mpi_coredump->mpi_global_header.id_string, "MPI Coredump",
+ sizeof(mpi_coredump->mpi_global_header.id_string));
/* segment 16 */
ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
diff --git a/drivers/staging/qlge/qlge_ethtool.c b/drivers/staging/qlge/qlge_ethtool.c
index 56d116d79e56..790997aff995 100644
--- a/drivers/staging/qlge/qlge_ethtool.c
+++ b/drivers/staging/qlge/qlge_ethtool.c
@@ -32,7 +32,6 @@
#include <linux/mm.h>
#include <linux/vmalloc.h>
-
#include "qlge.h"
struct ql_stats {
@@ -197,8 +196,7 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
*/
cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_count];
if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs ||
- le16_to_cpu(cqicb->pkt_delay) !=
- qdev->tx_max_coalesced_frames) {
+ le16_to_cpu(cqicb->pkt_delay) != qdev->tx_max_coalesced_frames) {
for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
rx_ring = &qdev->rx_ring[i];
cqicb = (struct cqicb *)rx_ring;
@@ -207,7 +205,7 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
cpu_to_le16(qdev->tx_max_coalesced_frames);
cqicb->flags = FLAGS_LI;
status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
- CFG_LCQ, rx_ring->cq_id);
+ CFG_LCQ, rx_ring->cq_id);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to load CQICB.\n");
@@ -219,8 +217,7 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
/* Update the inbound (RSS) handler queues if they changed. */
cqicb = (struct cqicb *)&qdev->rx_ring[0];
if (le16_to_cpu(cqicb->irq_delay) != qdev->rx_coalesce_usecs ||
- le16_to_cpu(cqicb->pkt_delay) !=
- qdev->rx_max_coalesced_frames) {
+ le16_to_cpu(cqicb->pkt_delay) != qdev->rx_max_coalesced_frames) {
for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
rx_ring = &qdev->rx_ring[i];
cqicb = (struct cqicb *)rx_ring;
@@ -229,7 +226,7 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
cpu_to_le16(qdev->rx_max_coalesced_frames);
cqicb->flags = FLAGS_LI;
status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
- CFG_LCQ, rx_ring->cq_id);
+ CFG_LCQ, rx_ring->cq_id);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to load CQICB.\n");
@@ -332,6 +329,7 @@ quit:
static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
int index;
+
switch (stringset) {
case ETH_SS_TEST:
memcpy(buf, *ql_gstrings_test, QLGE_TEST_LEN * ETH_GSTRING_LEN);
@@ -339,8 +337,8 @@ static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
case ETH_SS_STATS:
for (index = 0; index < QLGE_STATS_LEN; index++) {
memcpy(buf + index * ETH_GSTRING_LEN,
- ql_gstrings_stats[index].stat_string,
- ETH_GSTRING_LEN);
+ ql_gstrings_stats[index].stat_string,
+ ETH_GSTRING_LEN);
}
break;
}
@@ -412,6 +410,7 @@ static void ql_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *drvinfo)
{
struct ql_adapter *qdev = netdev_priv(ndev);
+
strlcpy(drvinfo->driver, qlge_driver_name, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, qlge_driver_version,
sizeof(drvinfo->version));
@@ -431,7 +430,7 @@ static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
/* WOL is only supported for mezz card. */
if (ssys_dev == QLGE_MEZZ_SSYS_ID_068 ||
- ssys_dev == QLGE_MEZZ_SSYS_ID_180) {
+ ssys_dev == QLGE_MEZZ_SSYS_ID_180) {
wol->supported = WAKE_MAGIC;
wol->wolopts = qdev->wol;
}
@@ -444,9 +443,9 @@ static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
/* WOL is only supported for mezz card. */
if (ssys_dev != QLGE_MEZZ_SSYS_ID_068 &&
- ssys_dev != QLGE_MEZZ_SSYS_ID_180) {
+ ssys_dev != QLGE_MEZZ_SSYS_ID_180) {
netif_info(qdev, drv, qdev->ndev,
- "WOL is only supported for mezz card\n");
+ "WOL is only supported for mezz card\n");
return -EOPNOTSUPP;
}
if (wol->wolopts & ~WAKE_MAGIC)
@@ -506,7 +505,7 @@ static void ql_stop_loopback(struct ql_adapter *qdev)
}
static void ql_create_lb_frame(struct sk_buff *skb,
- unsigned int frame_size)
+ unsigned int frame_size)
{
memset(skb->data, 0xFF, frame_size);
frame_size &= ~1;
@@ -516,13 +515,13 @@ static void ql_create_lb_frame(struct sk_buff *skb,
}
void ql_check_lb_frame(struct ql_adapter *qdev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
unsigned int frame_size = skb->len;
if ((*(skb->data + 3) == 0xFF) &&
- (*(skb->data + frame_size / 2 + 10) == 0xBE) &&
- (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
+ (*(skb->data + frame_size / 2 + 10) == 0xBE) &&
+ (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
atomic_dec(&qdev->lb_count);
return;
}
@@ -566,7 +565,7 @@ out:
}
static void ql_self_test(struct net_device *ndev,
- struct ethtool_test *eth_test, u64 *data)
+ struct ethtool_test *eth_test, u64 *data)
{
struct ql_adapter *qdev = netdev_priv(ndev);
@@ -672,7 +671,7 @@ static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c)
}
static void ql_get_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
+ struct ethtool_pauseparam *pause)
{
struct ql_adapter *qdev = netdev_priv(netdev);
@@ -684,7 +683,7 @@ static void ql_get_pauseparam(struct net_device *netdev,
}
static int ql_set_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
+ struct ethtool_pauseparam *pause)
{
struct ql_adapter *qdev = netdev_priv(netdev);
int status = 0;
@@ -703,12 +702,14 @@ static int ql_set_pauseparam(struct net_device *netdev,
static u32 ql_get_msglevel(struct net_device *ndev)
{
struct ql_adapter *qdev = netdev_priv(ndev);
+
return qdev->msg_enable;
}
static void ql_set_msglevel(struct net_device *ndev, u32 value)
{
struct ql_adapter *qdev = netdev_priv(ndev);
+
qdev->msg_enable = value;
}
diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c
index 6ad4515311f7..ef8037d0b52e 100644
--- a/drivers/staging/qlge/qlge_main.c
+++ b/drivers/staging/qlge/qlge_main.c
@@ -77,14 +77,12 @@ MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
static int qlge_mpi_coredump;
module_param(qlge_mpi_coredump, int, 0);
MODULE_PARM_DESC(qlge_mpi_coredump,
- "Option to enable MPI firmware dump. "
- "Default is OFF - Do Not allocate memory. ");
+ "Option to enable MPI firmware dump. Default is OFF - Do Not allocate memory. ");
static int qlge_force_coredump;
module_param(qlge_force_coredump, int, 0);
MODULE_PARM_DESC(qlge_force_coredump,
- "Option to allow force of firmware core dump. "
- "Default is OFF - Do not allow.");
+ "Option to allow force of firmware core dump. Default is OFF - Do not allow.");
static const struct pci_device_id qlge_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
@@ -178,8 +176,9 @@ int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
"register 0x%.08x access error, value = 0x%.08x!.\n",
reg, temp);
return -EIO;
- } else if (temp & bit)
+ } else if (temp & bit) {
return 0;
+ }
udelay(UDELAY_DELAY);
}
netif_alert(qdev, probe, qdev->ndev,
@@ -206,7 +205,6 @@ static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
return -ETIMEDOUT;
}
-
/* Used to issue init control blocks to hw. Maps control block,
* sets address, triggers download, waits for completion.
*/
@@ -270,36 +268,34 @@ int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
{
status =
ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MR, 0);
+ ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
if (status)
goto exit;
*value++ = ql_read32(qdev, MAC_ADDR_DATA);
status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MR, 0);
+ ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
if (status)
goto exit;
*value++ = ql_read32(qdev, MAC_ADDR_DATA);
if (type == MAC_ADDR_TYPE_CAM_MAC) {
status =
ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ MAC_ADDR_IDX, MAC_ADDR_MW,
+ 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
@@ -343,7 +339,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
status =
ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
@@ -352,7 +348,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
ql_write32(qdev, MAC_ADDR_DATA, lower);
status =
ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
@@ -362,7 +358,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
ql_write32(qdev, MAC_ADDR_DATA, upper);
status =
ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
break;
@@ -375,8 +371,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
(addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
(addr[5]);
status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
@@ -384,8 +379,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
type); /* type */
ql_write32(qdev, MAC_ADDR_DATA, lower);
status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
@@ -393,16 +387,15 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
type); /* type */
ql_write32(qdev, MAC_ADDR_DATA, upper);
status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
type); /* type */
/* This field should also include the queue id
- and possibly the function id. Right now we hardcode
- the route field to NIC core.
+ * and possibly the function id. Right now we hardcode
+ * the route field to NIC core.
*/
cam_output = (CAM_OUT_ROUTE_NIC |
(qdev->
@@ -423,8 +416,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
* That's bit-27 we're talking about.
*/
status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
@@ -467,7 +459,8 @@ static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
if (status)
return status;
status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
- MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
+ MAC_ADDR_TYPE_CAM_MAC,
+ qdev->func * MAX_CQ);
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
netif_err(qdev, ifup, qdev->ndev,
@@ -672,17 +665,17 @@ static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
int status = 0;
/* wait for reg to come ready */
status = ql_wait_reg_rdy(qdev,
- FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
+ FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
if (status)
goto exit;
/* set up for reg read */
ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
/* wait for reg to come ready */
status = ql_wait_reg_rdy(qdev,
- FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
+ FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
if (status)
goto exit;
- /* This data is stored on flash as an array of
+ /* This data is stored on flash as an array of
* __le32. Since ql_read32() returns cpu endian
* we need to swap it back.
*/
@@ -721,8 +714,9 @@ static int ql_get_8000_flash_params(struct ql_adapter *qdev)
}
status = ql_validate_flash(qdev,
- sizeof(struct flash_params_8000) / sizeof(u16),
- "8000");
+ sizeof(struct flash_params_8000) /
+ sizeof(u16),
+ "8000");
if (status) {
netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
status = -EINVAL;
@@ -734,12 +728,12 @@ static int ql_get_8000_flash_params(struct ql_adapter *qdev)
*/
if (qdev->flash.flash_params_8000.data_type1 == 2)
memcpy(mac_addr,
- qdev->flash.flash_params_8000.mac_addr1,
- qdev->ndev->addr_len);
+ qdev->flash.flash_params_8000.mac_addr1,
+ qdev->ndev->addr_len);
else
memcpy(mac_addr,
- qdev->flash.flash_params_8000.mac_addr,
- qdev->ndev->addr_len);
+ qdev->flash.flash_params_8000.mac_addr,
+ qdev->ndev->addr_len);
if (!is_valid_ether_addr(mac_addr)) {
netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
@@ -748,8 +742,8 @@ static int ql_get_8000_flash_params(struct ql_adapter *qdev)
}
memcpy(qdev->ndev->dev_addr,
- mac_addr,
- qdev->ndev->addr_len);
+ mac_addr,
+ qdev->ndev->addr_len);
exit:
ql_sem_unlock(qdev, SEM_FLASH_MASK);
@@ -784,8 +778,9 @@ static int ql_get_8012_flash_params(struct ql_adapter *qdev)
}
status = ql_validate_flash(qdev,
- sizeof(struct flash_params_8012) / sizeof(u16),
- "8012");
+ sizeof(struct flash_params_8012) /
+ sizeof(u16),
+ "8012");
if (status) {
netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
status = -EINVAL;
@@ -798,8 +793,8 @@ static int ql_get_8012_flash_params(struct ql_adapter *qdev)
}
memcpy(qdev->ndev->dev_addr,
- qdev->flash.flash_params_8012.mac_addr,
- qdev->ndev->addr_len);
+ qdev->flash.flash_params_8012.mac_addr,
+ qdev->ndev->addr_len);
exit:
ql_sem_unlock(qdev, SEM_FLASH_MASK);
@@ -815,7 +810,7 @@ static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
int status;
/* wait for reg to come ready */
status = ql_wait_reg_rdy(qdev,
- XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+ XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
if (status)
return status;
/* write the data to the data reg */
@@ -834,14 +829,14 @@ int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
int status = 0;
/* wait for reg to come ready */
status = ql_wait_reg_rdy(qdev,
- XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+ XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
if (status)
goto exit;
/* set up for reg read */
ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
/* wait for reg to come ready */
status = ql_wait_reg_rdy(qdev,
- XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+ XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
if (status)
goto exit;
/* get the data */
@@ -1436,10 +1431,9 @@ static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
/* Process an inbound completion from an rx ring. */
static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
- struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp,
- u32 length,
- u16 vlan_id)
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u32 length, u16 vlan_id)
{
struct sk_buff *skb;
struct qlge_bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
@@ -1483,10 +1477,9 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
/* Process an inbound completion from an rx ring. */
static void ql_process_mac_rx_page(struct ql_adapter *qdev,
- struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp,
- u32 length,
- u16 vlan_id)
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u32 length, u16 vlan_id)
{
struct net_device *ndev = qdev->ndev;
struct sk_buff *skb = NULL;
@@ -1528,8 +1521,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
"%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
length);
skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
- lbq_desc->p.pg_chunk.offset + hlen,
- length - hlen);
+ lbq_desc->p.pg_chunk.offset + hlen, length - hlen);
skb->len += length - hlen;
skb->data_len += length - hlen;
skb->truesize += length - hlen;
@@ -1540,7 +1532,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
skb_checksum_none_assert(skb);
if ((ndev->features & NETIF_F_RXCSUM) &&
- !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
+ !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
/* TCP frame. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
@@ -1576,10 +1568,9 @@ err_out:
/* Process an inbound completion from an rx ring. */
static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
- struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp,
- u32 length,
- u16 vlan_id)
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u32 length, u16 vlan_id)
{
struct qlge_bq_desc *sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
struct net_device *ndev = qdev->ndev;
@@ -1648,7 +1639,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
* csum or frame errors.
*/
if ((ndev->features & NETIF_F_RXCSUM) &&
- !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
+ !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
/* TCP frame. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
@@ -1779,8 +1770,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
"Chaining page at offset = %d, for %d bytes to skb.\n",
lbq_desc->p.pg_chunk.offset, length);
skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
- lbq_desc->p.pg_chunk.offset,
- length);
+ lbq_desc->p.pg_chunk.offset, length);
skb->len += length;
skb->data_len += length;
skb->truesize += length;
@@ -1804,10 +1794,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
length);
- skb_fill_page_desc(skb, 0,
- lbq_desc->p.pg_chunk.page,
- lbq_desc->p.pg_chunk.offset,
- length);
+ skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
+ lbq_desc->p.pg_chunk.offset,
+ length);
skb->len += length;
skb->data_len += length;
skb->truesize += length;
@@ -1857,9 +1846,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
"Adding page %d to skb for %d bytes.\n",
i, size);
skb_fill_page_desc(skb, i,
- lbq_desc->p.pg_chunk.page,
- lbq_desc->p.pg_chunk.offset,
- size);
+ lbq_desc->p.pg_chunk.page,
+ lbq_desc->p.pg_chunk.offset, size);
skb->len += size;
skb->data_len += size;
skb->truesize += size;
@@ -1875,9 +1863,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
/* Process an inbound completion from an rx ring. */
static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
- struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp,
- u16 vlan_id)
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u16 vlan_id)
{
struct net_device *ndev = qdev->ndev;
struct sk_buff *skb = NULL;
@@ -1938,7 +1926,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
* csum or frame errors.
*/
if ((ndev->features & NETIF_F_RXCSUM) &&
- !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
+ !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
/* TCP frame. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
@@ -1970,8 +1958,8 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
/* Process an inbound completion from an rx ring. */
static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
- struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp)
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp)
{
u32 length = le32_to_cpu(ib_mac_rsp->data_len);
u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
@@ -1986,34 +1974,34 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
* separate buffers.
*/
ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
- vlan_id);
+ vlan_id);
} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
/* The data fit in a single small buffer.
* Allocate a new skb, copy the data and
* return the buffer to the free pool.
*/
- ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
- length, vlan_id);
+ ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, length,
+ vlan_id);
} else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
/* TCP packet in a page chunk that's been checksummed.
* Tack it on to our GRO skb and let it go.
*/
- ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
- length, vlan_id);
+ ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, length,
+ vlan_id);
} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
/* Non-TCP packet in a page chunk. Allocate an
* skb, tack it on frags, and send it up.
*/
- ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
- length, vlan_id);
+ ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, length,
+ vlan_id);
} else {
/* Non-TCP/UDP large frames that span multiple buffers
* can be processed corrrectly by the split frame logic.
*/
ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
- vlan_id);
+ vlan_id);
}
return (unsigned long)length;
@@ -2222,15 +2210,16 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
"Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
/* Service the TX rings first. They start
- * right after the RSS rings. */
+ * right after the RSS rings.
+ */
for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
trx_ring = &qdev->rx_ring[i];
/* If this TX completion ring belongs to this vector and
* it's not empty then service it.
*/
if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
- (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
- trx_ring->cnsmr_idx)) {
+ (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
+ trx_ring->cnsmr_idx)) {
netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
"%s: Servicing TX completion ring %d.\n",
__func__, trx_ring->cq_id);
@@ -2304,7 +2293,7 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev,
}
static int qlge_set_features(struct net_device *ndev,
- netdev_features_t features)
+ netdev_features_t features)
{
netdev_features_t changed = ndev->features ^ features;
int err;
@@ -2447,7 +2436,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
* Check MPI processor activity.
*/
if ((var & STS_PI) &&
- (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
+ (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
/*
* We've got an async event or mailbox completion.
* Handle it and clear the source of the interrupt.
@@ -2456,7 +2445,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
"Got MPI processor interrupt.\n");
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
queue_delayed_work_on(smp_processor_id(),
- qdev->workqueue, &qdev->mpi_work, 0);
+ qdev->workqueue, &qdev->mpi_work, 0);
work_done++;
}
@@ -2639,7 +2628,6 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK;
}
-
static void ql_free_shadow_space(struct ql_adapter *qdev)
{
if (qdev->rx_ring_shadow_reg_area) {
@@ -2887,7 +2875,8 @@ static void ql_free_rx_resources(struct ql_adapter *qdev,
}
/* Allocate queues and buffers for this completions queue based
- * on the values in the parameter structure. */
+ * on the values in the parameter structure.
+ */
static int ql_alloc_rx_resources(struct ql_adapter *qdev,
struct rx_ring *rx_ring)
{
@@ -3530,19 +3519,17 @@ static int ql_route_initialize(struct ql_adapter *qdev)
return status;
status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
- RT_IDX_IP_CSUM_ERR, 1);
+ RT_IDX_IP_CSUM_ERR, 1);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
- "Failed to init routing register "
- "for IP CSUM error packets.\n");
+ "Failed to init routing register for IP CSUM error packets.\n");
goto exit;
}
status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
- RT_IDX_TU_CSUM_ERR, 1);
+ RT_IDX_TU_CSUM_ERR, 1);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
- "Failed to init routing register "
- "for TCP/UDP CSUM error packets.\n");
+ "Failed to init routing register for TCP/UDP CSUM error packets.\n");
goto exit;
}
status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
@@ -3556,7 +3543,7 @@ static int ql_route_initialize(struct ql_adapter *qdev)
*/
if (qdev->rss_ring_count > 1) {
status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
- RT_IDX_RSS_MATCH, 1);
+ RT_IDX_RSS_MATCH, 1);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to init routing register for MATCH RSS packets.\n");
@@ -3654,7 +3641,7 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
/* Default WOL is enable on Mezz cards */
if (qdev->pdev->subsystem_device == 0x0068 ||
- qdev->pdev->subsystem_device == 0x0180)
+ qdev->pdev->subsystem_device == 0x0180)
qdev->wol = WAKE_MAGIC;
/* Start up the rx queues. */
@@ -3731,8 +3718,9 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
/* Wait for the NIC and MGMNT FIFOs to empty. */
ql_wait_fifo_empty(qdev);
- } else
+ } else {
clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
+ }
ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
@@ -3880,7 +3868,7 @@ static int ql_adapter_up(struct ql_adapter *qdev)
* link is up the turn on the carrier.
*/
if ((ql_read32(qdev, STS) & qdev->port_init) &&
- (ql_read32(qdev, STS) & qdev->port_link_up))
+ (ql_read32(qdev, STS) & qdev->port_link_up))
ql_link_on(qdev);
/* Restore rx mode. */
clear_bit(QL_ALLMULTI, &qdev->flags);
@@ -4099,21 +4087,20 @@ static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
struct ql_adapter *qdev = netdev_priv(ndev);
int status;
- if (ndev->mtu == 1500 && new_mtu == 9000) {
+ if (ndev->mtu == 1500 && new_mtu == 9000)
netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
- } else if (ndev->mtu == 9000 && new_mtu == 1500) {
+ else if (ndev->mtu == 9000 && new_mtu == 1500)
netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
- } else
+ else
return -EINVAL;
queue_delayed_work(qdev->workqueue,
- &qdev->mpi_port_cfg_work, 3*HZ);
+ &qdev->mpi_port_cfg_work, 3 * HZ);
ndev->mtu = new_mtu;
- if (!netif_running(qdev->ndev)) {
+ if (!netif_running(qdev->ndev))
return 0;
- }
status = ql_change_rx_buffers(qdev);
if (status) {
@@ -4267,14 +4254,15 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
if (status)
return status;
status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
- MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
+ MAC_ADDR_TYPE_CAM_MAC,
+ qdev->func * MAX_CQ);
if (status)
netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
return status;
}
-static void qlge_tx_timeout(struct net_device *ndev)
+static void qlge_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct ql_adapter *qdev = netdev_priv(ndev);
ql_queue_asic_error(qdev);
@@ -4334,7 +4322,7 @@ static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
u32 nic_func1, nic_func2;
status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
- &temp);
+ &temp);
if (status)
return status;
@@ -4455,7 +4443,7 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
pdev->needs_freset = 1;
pci_save_state(pdev);
qdev->reg_base =
- ioremap_nocache(pci_resource_start(pdev, 1),
+ ioremap(pci_resource_start(pdev, 1),
pci_resource_len(pdev, 1));
if (!qdev->reg_base) {
dev_err(&pdev->dev, "Register mapping failed.\n");
@@ -4465,7 +4453,7 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
qdev->doorbell_area_size = pci_resource_len(pdev, 3);
qdev->doorbell_area =
- ioremap_nocache(pci_resource_start(pdev, 3),
+ ioremap(pci_resource_start(pdev, 3),
pci_resource_len(pdev, 3));
if (!qdev->doorbell_area) {
dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
@@ -4578,11 +4566,12 @@ static int qlge_probe(struct pci_dev *pdev,
{
struct net_device *ndev = NULL;
struct ql_adapter *qdev = NULL;
- static int cards_found = 0;
+ static int cards_found;
int err = 0;
ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
- min(MAX_CPUS, netif_get_num_default_rss_queues()));
+ min(MAX_CPUS,
+ netif_get_num_default_rss_queues()));
if (!ndev)
return -ENOMEM;
diff --git a/drivers/staging/qlge/qlge_mpi.c b/drivers/staging/qlge/qlge_mpi.c
index 9e422bbbb6ab..bb03b2fa7233 100644
--- a/drivers/staging/qlge/qlge_mpi.c
+++ b/drivers/staging/qlge/qlge_mpi.c
@@ -134,7 +134,7 @@ static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
for (i = 0; i < mbcp->out_count; i++) {
status =
ql_read_mpi_reg(qdev, qdev->mailbox_out + i,
- &mbcp->mbox_out[i]);
+ &mbcp->mbox_out[i]);
if (status) {
netif_err(qdev, drv, qdev->ndev, "Failed mailbox read.\n");
break;
@@ -184,7 +184,7 @@ static int ql_exec_mb_cmd(struct ql_adapter *qdev, struct mbox_params *mbcp)
*/
for (i = 0; i < mbcp->in_count; i++) {
status = ql_write_mpi_reg(qdev, qdev->mailbox_in + i,
- mbcp->mbox_in[i]);
+ mbcp->mbox_in[i]);
if (status)
goto end;
}
@@ -293,7 +293,7 @@ static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
*/
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
queue_delayed_work(qdev->workqueue,
- &qdev->mpi_port_cfg_work, 0);
+ &qdev->mpi_port_cfg_work, 0);
}
ql_link_on(qdev);
@@ -544,7 +544,6 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
if (status)
goto end;
-
/* If we're generating a system error, then there's nothing
* to wait for.
*/
@@ -730,7 +729,6 @@ int ql_mb_set_port_cfg(struct ql_adapter *qdev)
mbcp->mbox_in[1] = qdev->link_config;
mbcp->mbox_in[2] = qdev->max_frame_size;
-
status = ql_mailbox_command(qdev, mbcp);
if (status)
return status;
@@ -747,7 +745,7 @@ int ql_mb_set_port_cfg(struct ql_adapter *qdev)
}
static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
- u32 size)
+ u32 size)
{
int status = 0;
struct mbox_params mbc;
@@ -768,7 +766,6 @@ static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
mbcp->mbox_in[7] = LSW(MSD(req_dma));
mbcp->mbox_in[8] = MSW(addr);
-
status = ql_mailbox_command(qdev, mbcp);
if (status)
return status;
@@ -782,14 +779,14 @@ static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
/* Issue a mailbox command to dump RISC RAM. */
int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
- u32 ram_addr, int word_count)
+ u32 ram_addr, int word_count)
{
int status;
char *my_buf;
dma_addr_t buf_dma;
my_buf = pci_alloc_consistent(qdev->pdev, word_count * sizeof(u32),
- &buf_dma);
+ &buf_dma);
if (!my_buf)
return -EIO;
@@ -798,7 +795,7 @@ int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
memcpy(buf, my_buf, word_count * sizeof(u32));
pci_free_consistent(qdev->pdev, word_count * sizeof(u32), my_buf,
- buf_dma);
+ buf_dma);
return status;
}
@@ -850,7 +847,6 @@ int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol)
mbcp->mbox_in[0] = MB_CMD_SET_WOL_MODE;
mbcp->mbox_in[1] = wol;
-
status = ql_mailbox_command(qdev, mbcp);
if (status)
return status;
@@ -922,7 +918,7 @@ static int ql_idc_wait(struct ql_adapter *qdev)
*/
wait_time =
wait_for_completion_timeout(&qdev->ide_completion,
- wait_time);
+ wait_time);
if (!wait_time) {
netif_err(qdev, drv, qdev->ndev, "IDC Timeout.\n");
break;
@@ -965,7 +961,6 @@ int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config)
mbcp->mbox_in[0] = MB_CMD_SET_LED_CFG;
mbcp->mbox_in[1] = led_config;
-
status = ql_mailbox_command(qdev, mbcp);
if (status)
return status;
@@ -1130,8 +1125,7 @@ void ql_mpi_port_cfg_work(struct work_struct *work)
}
if (qdev->link_config & CFG_JUMBO_FRAME_SIZE &&
- qdev->max_frame_size ==
- CFG_DEFAULT_MAX_FRAME_SIZE)
+ qdev->max_frame_size == CFG_DEFAULT_MAX_FRAME_SIZE)
goto end;
qdev->link_config |= CFG_JUMBO_FRAME_SIZE;
@@ -1278,7 +1272,7 @@ void ql_mpi_reset_work(struct work_struct *work)
netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n");
qdev->core_is_dumped = 1;
queue_delayed_work(qdev->workqueue,
- &qdev->mpi_core_to_log, 5 * HZ);
+ &qdev->mpi_core_to_log, 5 * HZ);
}
ql_soft_reset_mpi_risc(qdev);
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 88e42cc1d837..93283c7deec4 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -429,7 +429,7 @@ static void update_bmc_sta(struct adapter *padapter)
/* prepare for add_RATid */
supportRateNum = rtw_get_rateset_len((u8 *)&pcur_network->SupportedRates);
- network_type = rtw_check_network_type((u8 *)&pcur_network->SupportedRates, supportRateNum, 1);
+ network_type = rtw_check_network_type((u8 *)&pcur_network->SupportedRates);
memcpy(psta->bssrateset, &pcur_network->SupportedRates, supportRateNum);
psta->bssratelen = supportRateNum;
@@ -802,7 +802,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
supportRateNum += ie_len;
}
- network_type = rtw_check_network_type(supportRate, supportRateNum, channel);
+ network_type = rtw_check_network_type(supportRate);
rtw_set_supported_rate(pbss_network->SupportedRates, network_type);
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c
index d9b0f9e6235c..c525682d0edf 100644
--- a/drivers/staging/rtl8188eu/core/rtw_efuse.c
+++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c
@@ -402,7 +402,6 @@ static u16 Efuse_GetCurrentSize(struct adapter *pAdapter)
int Efuse_PgPacketRead(struct adapter *pAdapter, u8 offset, u8 *data)
{
u8 ReadState = PG_STATE_HEADER;
- int bContinual = true;
int bDataEmpty = true;
u8 efuse_data, word_cnts = 0;
u16 efuse_addr = 0;
@@ -422,7 +421,7 @@ int Efuse_PgPacketRead(struct adapter *pAdapter, u8 offset, u8 *data)
/* <Roger_TODO> Efuse has been pre-programmed dummy 5Bytes at the end of Efuse by CP. */
/* Skip dummy parts to prevent unexpected data read from Efuse. */
/* By pass right now. 2009.02.19. */
- while (bContinual && AVAILABLE_EFUSE_ADDR(efuse_addr)) {
+ while (AVAILABLE_EFUSE_ADDR(efuse_addr)) {
/* Header Read ------------- */
if (ReadState & PG_STATE_HEADER) {
if (efuse_OneByteRead(pAdapter, efuse_addr, &efuse_data) && (efuse_data != 0xFF)) {
@@ -464,7 +463,7 @@ int Efuse_PgPacketRead(struct adapter *pAdapter, u8 offset, u8 *data)
ReadState = PG_STATE_HEADER;
}
} else {
- bContinual = false;
+ break;
}
} else if (ReadState & PG_STATE_DATA) {
/* Data section Read ------------- */
@@ -498,8 +497,8 @@ static bool hal_EfuseFixHeaderProcess(struct adapter *pAdapter, u8 efuseType, st
if (!PgWriteSuccess)
return false;
- else
- efuse_addr = Efuse_GetCurrentSize(pAdapter);
+
+ efuse_addr = Efuse_GetCurrentSize(pAdapter);
} else {
efuse_addr = efuse_addr + (pFixPkt->word_cnts * 2) + 1;
}
@@ -714,10 +713,9 @@ static bool hal_EfusePartialWriteCheck(struct adapter *pAdapter, u8 efuseType, u
if (ALL_WORDS_DISABLED(efuse_data)) {
ret = false;
break;
- } else {
- curPkt.offset = ((cur_header & 0xE0) >> 5) | ((efuse_data & 0xF0) >> 1);
- curPkt.word_en = efuse_data & 0x0F;
}
+ curPkt.offset = ((cur_header & 0xE0) >> 5) | ((efuse_data & 0xF0) >> 1);
+ curPkt.word_en = efuse_data & 0x0F;
} else {
cur_header = efuse_data;
curPkt.offset = (cur_header >> 4) & 0x0F;
diff --git a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
index cc1b5438c04c..29f615443e8f 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
@@ -98,7 +98,7 @@ bool rtw_is_cckratesonly_included(u8 *rate)
return true;
}
-int rtw_check_network_type(unsigned char *rate, int ratelen, int channel)
+int rtw_check_network_type(unsigned char *rate)
{
/* could be pure B, pure G, or B/G */
if (rtw_is_cckratesonly_included(rate))
@@ -157,11 +157,10 @@ u8 *rtw_get_ie(u8 *pbuf, int index, uint *len, int limit)
if (*p == index) {
*len = *(p + 1);
return p;
- } else {
- tmp = *(p + 1);
- p += (tmp + 2);
- i += (tmp + 2);
}
+ tmp = *(p + 1);
+ p += (tmp + 2);
+ i += (tmp + 2);
if (i >= limit)
break;
}
@@ -295,10 +294,9 @@ unsigned char *rtw_get_wpa_ie(unsigned char *pie, uint *wpa_ie_len, int limit)
goto check_next_ie;
*wpa_ie_len = *(pbuf + 1);
return pbuf;
- } else {
- *wpa_ie_len = 0;
- return NULL;
}
+ *wpa_ie_len = 0;
+ return NULL;
check_next_ie:
limit_new = limit - (pbuf - pie) - 2 - len;
@@ -596,9 +594,8 @@ u8 *rtw_get_wps_ie(u8 *in_ie, uint in_len, u8 *wps_ie, uint *wps_ielen)
cnt += in_ie[cnt + 1] + 2;
break;
- } else {
- cnt += in_ie[cnt + 1] + 2; /* goto next */
}
+ cnt += in_ie[cnt + 1] + 2; /* goto next */
}
return wpsie_ptr;
}
@@ -642,9 +639,8 @@ u8 *rtw_get_wps_attr(u8 *wps_ie, uint wps_ielen, u16 target_attr_id, u8 *buf_att
if (len_attr)
*len_attr = attr_len;
break;
- } else {
- attr_ptr += attr_len; /* goto next */
}
+ attr_ptr += attr_len; /* goto next */
}
return target_attr_ptr;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index e984b4605e91..36841d20c3c1 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -291,7 +291,7 @@ static int update_hidden_ssid(u8 *ies, u32 ies_len, u8 hidden_ssid_mode)
remain_len = ies_len - (next_ie - ies);
ssid_ie[1] = 0;
- memcpy(ssid_ie+2, next_ie, remain_len);
+ memcpy(ssid_ie + 2, next_ie, remain_len);
len_diff -= ssid_len_ori;
break;
@@ -363,14 +363,14 @@ static void issue_beacon(struct adapter *padapter, int timeout_ms)
memcpy(pframe, cur_network->ies, cur_network->ie_length);
len_diff = update_hidden_ssid(
- pframe+_BEACON_IE_OFFSET_
- , cur_network->ie_length-_BEACON_IE_OFFSET_
+ pframe + _BEACON_IE_OFFSET_
+ , cur_network->ie_length - _BEACON_IE_OFFSET_
, pmlmeinfo->hidden_ssid_mode
);
- pframe += (cur_network->ie_length+len_diff);
- pattrib->pktlen += (cur_network->ie_length+len_diff);
- wps_ie = rtw_get_wps_ie(pmgntframe->buf_addr+TXDESC_OFFSET+sizeof(struct ieee80211_hdr_3addr)+_BEACON_IE_OFFSET_,
- pattrib->pktlen-sizeof(struct ieee80211_hdr_3addr)-_BEACON_IE_OFFSET_, NULL, &wps_ielen);
+ pframe += (cur_network->ie_length + len_diff);
+ pattrib->pktlen += (cur_network->ie_length + len_diff);
+ wps_ie = rtw_get_wps_ie(pmgntframe->buf_addr + TXDESC_OFFSET + sizeof(struct ieee80211_hdr_3addr) + _BEACON_IE_OFFSET_,
+ pattrib->pktlen - sizeof(struct ieee80211_hdr_3addr) - _BEACON_IE_OFFSET_, NULL, &wps_ielen);
if (wps_ie && wps_ielen > 0)
rtw_get_wps_attr_content(wps_ie, wps_ielen, WPS_ATTR_SELECTED_REGISTRAR, (u8 *)(&sr), NULL);
if (sr != 0)
@@ -504,7 +504,7 @@ static void issue_probersp(struct adapter *padapter, unsigned char *da)
#if defined(CONFIG_88EU_AP_MODE)
if ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE) {
- pwps_ie = rtw_get_wps_ie(cur_network->ies+_FIXED_IE_LENGTH_, cur_network->ie_length-_FIXED_IE_LENGTH_, NULL, &wps_ielen);
+ pwps_ie = rtw_get_wps_ie(cur_network->ies + _FIXED_IE_LENGTH_, cur_network->ie_length - _FIXED_IE_LENGTH_, NULL, &wps_ielen);
/* inerset & update wps_probe_resp_ie */
if (pmlmepriv->wps_probe_resp_ie && pwps_ie && wps_ielen > 0) {
@@ -522,13 +522,13 @@ static void issue_probersp(struct adapter *padapter, unsigned char *da)
pattrib->pktlen += wps_offset;
wps_ielen = (uint)pmlmepriv->wps_probe_resp_ie[1];/* to get ie data len */
- if ((wps_offset+wps_ielen+2) <= MAX_IE_SZ) {
- memcpy(pframe, pmlmepriv->wps_probe_resp_ie, wps_ielen+2);
- pframe += wps_ielen+2;
- pattrib->pktlen += wps_ielen+2;
+ if ((wps_offset + wps_ielen + 2) <= MAX_IE_SZ) {
+ memcpy(pframe, pmlmepriv->wps_probe_resp_ie, wps_ielen + 2);
+ pframe += wps_ielen + 2;
+ pattrib->pktlen += wps_ielen + 2;
}
- if ((wps_offset+wps_ielen+2+remainder_ielen) <= MAX_IE_SZ) {
+ if ((wps_offset + wps_ielen + 2 + remainder_ielen) <= MAX_IE_SZ) {
memcpy(pframe, premainder_ie, remainder_ielen);
pframe += remainder_ielen;
pattrib->pktlen += remainder_ielen;
@@ -751,7 +751,7 @@ static void issue_auth(struct adapter *padapter, struct sta_info *psta,
struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (pmgntframe == NULL)
+ if (!pmgntframe)
return;
/* update attribute */
@@ -943,7 +943,7 @@ static void issue_asocrsp(struct adapter *padapter, unsigned short status,
pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, pstat->bssratelen, pstat->bssrateset, &pattrib->pktlen);
} else {
pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, 8, pstat->bssrateset, &pattrib->pktlen);
- pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, pstat->bssratelen-8, pstat->bssrateset+8, &pattrib->pktlen);
+ pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, pstat->bssratelen - 8, pstat->bssrateset + 8, &pattrib->pktlen);
}
if ((pstat->flags & WLAN_STA_HT) && (pmlmepriv->htpriv.ht_option)) {
@@ -952,17 +952,17 @@ static void issue_asocrsp(struct adapter *padapter, unsigned short status,
/* FILL HT CAP INFO IE */
pbuf = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _HT_CAPABILITY_IE_, &ie_len, (pnetwork->ie_length - _BEACON_IE_OFFSET_));
if (pbuf && ie_len > 0) {
- memcpy(pframe, pbuf, ie_len+2);
- pframe += (ie_len+2);
- pattrib->pktlen += (ie_len+2);
+ memcpy(pframe, pbuf, ie_len + 2);
+ pframe += (ie_len + 2);
+ pattrib->pktlen += (ie_len + 2);
}
/* FILL HT ADD INFO IE */
pbuf = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _HT_ADD_INFO_IE_, &ie_len, (pnetwork->ie_length - _BEACON_IE_OFFSET_));
if (pbuf && ie_len > 0) {
- memcpy(pframe, pbuf, ie_len+2);
- pframe += (ie_len+2);
- pattrib->pktlen += (ie_len+2);
+ memcpy(pframe, pbuf, ie_len + 2);
+ pframe += (ie_len + 2);
+ pattrib->pktlen += (ie_len + 2);
}
}
@@ -973,14 +973,14 @@ static void issue_asocrsp(struct adapter *padapter, unsigned short status,
for (pbuf = ie + _BEACON_IE_OFFSET_;; pbuf += (ie_len + 2)) {
pbuf = rtw_get_ie(pbuf, _VENDOR_SPECIFIC_IE_, &ie_len, (pnetwork->ie_length - _BEACON_IE_OFFSET_ - (ie_len + 2)));
- if (pbuf && !memcmp(pbuf+2, WMM_PARA_IE, 6)) {
- memcpy(pframe, pbuf, ie_len+2);
- pframe += (ie_len+2);
- pattrib->pktlen += (ie_len+2);
+ if (pbuf && !memcmp(pbuf + 2, WMM_PARA_IE, 6)) {
+ memcpy(pframe, pbuf, ie_len + 2);
+ pframe += (ie_len + 2);
+ pattrib->pktlen += (ie_len + 2);
break;
}
- if ((pbuf == NULL) || (ie_len == 0))
+ if (!pbuf || ie_len == 0)
break;
}
}
@@ -1082,8 +1082,8 @@ static void issue_assocreq(struct adapter *padapter)
/* Check if the AP's supported rates are also supported by STA. */
for (j = 0; j < sta_bssrate_len; j++) {
/* Avoid the proprietary data rate (22Mbps) of Handlink WSG-4000 AP */
- if ((pmlmeinfo->network.SupportedRates[i]|IEEE80211_BASIC_RATE_MASK)
- == (sta_bssrate[j]|IEEE80211_BASIC_RATE_MASK))
+ if ((pmlmeinfo->network.SupportedRates[i] | IEEE80211_BASIC_RATE_MASK)
+ == (sta_bssrate[j] | IEEE80211_BASIC_RATE_MASK))
break;
}
@@ -1120,7 +1120,7 @@ static void issue_assocreq(struct adapter *padapter)
/* HT caps */
if (padapter->mlmepriv.htpriv.ht_option) {
p = rtw_get_ie((pmlmeinfo->network.ies + sizeof(struct ndis_802_11_fixed_ie)), _HT_CAPABILITY_IE_, &ie_len, (pmlmeinfo->network.ie_length - sizeof(struct ndis_802_11_fixed_ie)));
- if ((p != NULL) && (!(is_ap_in_tkip(padapter)))) {
+ if (p && !is_ap_in_tkip(padapter)) {
memcpy(&pmlmeinfo->HT_caps, p + 2, sizeof(struct ieee80211_ht_cap));
/* to disable 40M Hz support while gd_bw_40MHz_en = 0 */
@@ -1263,7 +1263,7 @@ int issue_nulldata(struct adapter *padapter, unsigned char *da,
struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
/* da == NULL, assume it's null data for sta to ap*/
- if (da == NULL)
+ if (!da)
da = pnetwork->MacAddress;
do {
@@ -1392,7 +1392,7 @@ int issue_qos_nulldata(struct adapter *padapter, unsigned char *da,
struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
/* da == NULL, assume it's null data for sta to ap*/
- if (da == NULL)
+ if (!da)
da = pnetwork->MacAddress;
do {
@@ -1444,7 +1444,7 @@ static int _issue_deauth(struct adapter *padapter, unsigned char *da,
__le16 le_tmp;
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (pmgntframe == NULL)
+ if (!pmgntframe)
goto exit;
/* update attribute */
@@ -1780,7 +1780,7 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
pbss_network = (struct wlan_bssid_ex *)&pnetwork->network;
p = rtw_get_ie(pbss_network->ies + _FIXED_IE_LENGTH_, _HT_CAPABILITY_IE_, &len, pbss_network->ie_length - _FIXED_IE_LENGTH_);
- if ((p == NULL) || (len == 0)) { /* non-HT */
+ if (!p || len == 0) { /* non-HT */
if ((pbss_network->Configuration.DSConfig <= 0) || (pbss_network->Configuration.DSConfig > 14))
continue;
@@ -1833,7 +1833,7 @@ unsigned int send_delba(struct adapter *padapter, u8 initiator, u8 *addr)
return _SUCCESS;
psta = rtw_get_stainfo(pstapriv, addr);
- if (psta == NULL)
+ if (!psta)
return _SUCCESS;
if (initiator == 0) { /* recipient */
@@ -1865,6 +1865,7 @@ unsigned int send_beacon(struct adapter *padapter)
int issue = 0;
int poll = 0;
unsigned long start = jiffies;
+ u32 passing_time;
rtw_hal_set_hwreg(padapter, HW_VAR_BCN_VALID, NULL);
do {
@@ -1874,7 +1875,7 @@ unsigned int send_beacon(struct adapter *padapter)
yield();
rtw_hal_get_hwreg(padapter, HW_VAR_BCN_VALID, (u8 *)(&bxmitok));
poll++;
- } while ((poll%10) != 0 && !bxmitok && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
+ } while ((poll % 10) != 0 && !bxmitok && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
} while (!bxmitok && issue < 100 && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
@@ -1883,15 +1884,14 @@ unsigned int send_beacon(struct adapter *padapter)
DBG_88E("%s fail! %u ms\n", __func__,
jiffies_to_msecs(jiffies - start));
return _FAIL;
- } else {
- u32 passing_time = jiffies_to_msecs(jiffies - start);
-
- if (passing_time > 100 || issue > 3)
- DBG_88E("%s success, issue:%d, poll:%d, %u ms\n",
- __func__, issue, poll,
- jiffies_to_msecs(jiffies - start));
- return _SUCCESS;
}
+ passing_time = jiffies_to_msecs(jiffies - start);
+
+ if (passing_time > 100 || issue > 3)
+ DBG_88E("%s success, issue:%d, poll:%d, %u ms\n",
+ __func__, issue, poll,
+ jiffies_to_msecs(jiffies - start));
+ return _SUCCESS;
}
/****************************************************************************
@@ -2082,7 +2082,7 @@ static u8 collect_bss_info(struct adapter *padapter,
/* checking rate info... */
i = 0;
p = rtw_get_ie(bssid->ies + ie_offset, _SUPPORTEDRATES_IE_, &len, bssid->ie_length - ie_offset);
- if (p != NULL) {
+ if (p) {
if (len > NDIS_802_11_LENGTH_RATES_EX) {
DBG_88E("%s()-%d: IE too long (%d) for survey event\n", __func__, __LINE__, len);
return _FAIL;
@@ -2093,7 +2093,7 @@ static u8 collect_bss_info(struct adapter *padapter,
p = rtw_get_ie(bssid->ies + ie_offset, _EXT_SUPPORTEDRATES_IE_, &len, bssid->ie_length - ie_offset);
if (p) {
- if (len > (NDIS_802_11_LENGTH_RATES_EX-i)) {
+ if (len > (NDIS_802_11_LENGTH_RATES_EX - i)) {
DBG_88E("%s()-%d: IE too long (%d) for survey event\n", __func__, __LINE__, len);
return _FAIL;
}
@@ -2518,7 +2518,7 @@ static unsigned int OnProbeReq(struct adapter *padapter,
return _SUCCESS;
if (!check_fwstate(pmlmepriv, _FW_LINKED) &&
- !check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE|WIFI_AP_STATE))
+ !check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE | WIFI_AP_STATE))
return _SUCCESS;
p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + _PROBEREQ_IE_OFFSET_, _SSID_IE_, &ielen,
@@ -2526,7 +2526,7 @@ static unsigned int OnProbeReq(struct adapter *padapter,
/* check (wildcard) SSID */
if (p) {
- if ((ielen != 0 && memcmp((void *)(p+2), (void *)cur->ssid.ssid, cur->ssid.ssid_length)) ||
+ if ((ielen != 0 && memcmp((void *)(p + 2), (void *)cur->ssid.ssid, cur->ssid.ssid_length)) ||
(ielen == 0 && pmlmeinfo->hidden_ssid_mode))
return _SUCCESS;
@@ -2583,7 +2583,7 @@ static unsigned int OnBeacon(struct adapter *padapter,
}
/* check the vendor of the assoc AP */
- pmlmeinfo->assoc_AP_vendor = check_assoc_AP(pframe+sizeof(struct ieee80211_hdr_3addr), len-sizeof(struct ieee80211_hdr_3addr));
+ pmlmeinfo->assoc_AP_vendor = check_assoc_AP(pframe + sizeof(struct ieee80211_hdr_3addr), len - sizeof(struct ieee80211_hdr_3addr));
/* update TSF Value */
update_TSF(pmlmeext, pframe, len);
@@ -2596,7 +2596,7 @@ static unsigned int OnBeacon(struct adapter *padapter,
if (((pmlmeinfo->state & 0x03) == WIFI_FW_STATION_STATE) && (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS)) {
psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
- if (psta != NULL) {
+ if (psta) {
ret = rtw_check_bcn_info(padapter, pframe, len);
if (!ret) {
DBG_88E_LEVEL(_drv_info_, "ap has changed, disconnect now\n ");
@@ -2610,7 +2610,7 @@ static unsigned int OnBeacon(struct adapter *padapter,
}
} else if ((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) {
psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
- if (psta != NULL) {
+ if (psta) {
/* update WMM, ERP in the beacon */
/* todo: the timer is used instead of the number of the beacon received */
if ((sta_rx_pkts(psta) & 0xf) == 0)
@@ -2729,7 +2729,7 @@ static unsigned int OnAuth(struct adapter *padapter,
if ((pstat->auth_seq + 1) != seq) {
DBG_88E("(1)auth rejected because out of seq [rx_seq=%d, exp_seq=%d]!\n",
- seq, pstat->auth_seq+1);
+ seq, pstat->auth_seq + 1);
status = _STATS_OUT_OF_AUTH_SEQ_;
goto auth_fail;
}
@@ -2742,7 +2742,7 @@ static unsigned int OnAuth(struct adapter *padapter,
pstat->authalg = algorithm;
} else {
DBG_88E("(2)auth rejected because out of seq [rx_seq=%d, exp_seq=%d]!\n",
- seq, pstat->auth_seq+1);
+ seq, pstat->auth_seq + 1);
status = _STATS_OUT_OF_AUTH_SEQ_;
goto auth_fail;
}
@@ -2761,7 +2761,7 @@ static unsigned int OnAuth(struct adapter *padapter,
p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + 4 + _AUTH_IE_OFFSET_, _CHLGETXT_IE_, &ie_len,
len - WLAN_HDR_A3_LEN - _AUTH_IE_OFFSET_ - 4);
- if ((p == NULL) || (ie_len <= 0)) {
+ if (!p || ie_len <= 0) {
DBG_88E("auth rejected because challenge failure!(1)\n");
status = _STATS_CHALLENGE_FAIL_;
goto auth_fail;
@@ -2779,7 +2779,7 @@ static unsigned int OnAuth(struct adapter *padapter,
}
} else {
DBG_88E("(3)auth rejected because out of seq [rx_seq=%d, exp_seq=%d]!\n",
- seq, pstat->auth_seq+1);
+ seq, pstat->auth_seq + 1);
status = _STATS_OUT_OF_AUTH_SEQ_;
goto auth_fail;
}
@@ -2855,7 +2855,7 @@ static unsigned int OnAuthClient(struct adapter *padapter,
p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + _AUTH_IE_OFFSET_, _CHLGETXT_IE_, &len,
pkt_len - WLAN_HDR_A3_LEN - _AUTH_IE_OFFSET_);
- if (p == NULL)
+ if (!p)
goto authclnt_fail;
memcpy((void *)(pmlmeinfo->chg_txt), (void *)(p + 2), len);
@@ -2864,10 +2864,9 @@ static unsigned int OnAuthClient(struct adapter *padapter,
set_link_timer(pmlmeext, REAUTH_TO);
return _SUCCESS;
- } else {
- /* open system */
- go2asoc = 1;
}
+ /* open system */
+ go2asoc = 1;
} else if (seq == 4) {
if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared)
go2asoc = 1;
@@ -2975,7 +2974,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
goto OnAssocReqFail;
} else {
/* check if ssid match */
- if (memcmp((void *)(p+2), cur->ssid.ssid, cur->ssid.ssid_length))
+ if (memcmp((void *)(p + 2), cur->ssid.ssid, cur->ssid.ssid_length))
status = _STATS_FAILURE_;
if (ie_len != cur->ssid.ssid_length)
@@ -2987,7 +2986,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
/* check if the supported rate is ok */
p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, _SUPPORTEDRATES_IE_, &ie_len, pkt_len - WLAN_HDR_A3_LEN - ie_offset);
- if (p == NULL) {
+ if (!p) {
DBG_88E("Rx a sta assoc-req which supported rate is empty!\n");
/* use our own rate set as statoin used */
/* memcpy(supportRate, AP_BSSRATE, AP_BSSRATE_LEN); */
@@ -2996,14 +2995,15 @@ static unsigned int OnAssocReq(struct adapter *padapter,
status = _STATS_FAILURE_;
goto OnAssocReqFail;
} else {
- memcpy(supportRate, p+2, ie_len);
+ memcpy(supportRate, p + 2, ie_len);
supportRateNum = ie_len;
p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, _EXT_SUPPORTEDRATES_IE_, &ie_len,
pkt_len - WLAN_HDR_A3_LEN - ie_offset);
- if (p != NULL) {
+ if (p) {
if (supportRateNum <= sizeof(supportRate)) {
- memcpy(supportRate+supportRateNum, p+2, ie_len);
+ memcpy(supportRate + supportRateNum,
+ p + 2, ie_len);
supportRateNum += ie_len;
}
}
@@ -3031,7 +3031,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
wpa_ie = elems.rsn_ie;
wpa_ie_len = elems.rsn_ie_len;
- if (rtw_parse_wpa2_ie(wpa_ie-2, wpa_ie_len+2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
+ if (rtw_parse_wpa2_ie(wpa_ie - 2, wpa_ie_len + 2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
pstat->dot8021xalg = 1;/* psk, todo:802.1x */
pstat->wpa_psk |= BIT(1);
@@ -3052,7 +3052,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
wpa_ie = elems.wpa_ie;
wpa_ie_len = elems.wpa_ie_len;
- if (rtw_parse_wpa_ie(wpa_ie-2, wpa_ie_len+2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
+ if (rtw_parse_wpa_ie(wpa_ie - 2, wpa_ie_len + 2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
pstat->dot8021xalg = 1;/* psk, todo:802.1x */
pstat->wpa_psk |= BIT(0);
@@ -3094,7 +3094,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
/* AP support WPA/RSN, and sta is going to do WPS, but AP is not ready */
/* that the selected registrar of AP is _FLASE */
- if ((psecuritypriv->wpa_psk > 0) && (pstat->flags & (WLAN_STA_WPS|WLAN_STA_MAYBE_WPS))) {
+ if ((psecuritypriv->wpa_psk > 0) && (pstat->flags & (WLAN_STA_WPS | WLAN_STA_MAYBE_WPS))) {
if (pmlmepriv->wps_beacon_ie) {
u8 selected_registrar = 0;
@@ -3131,7 +3131,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
copy_len = min_t(int, wpa_ie_len + 2, sizeof(pstat->wpa_ie));
}
if (copy_len > 0)
- memcpy(pstat->wpa_ie, wpa_ie-2, copy_len);
+ memcpy(pstat->wpa_ie, wpa_ie - 2, copy_len);
}
/* check if there is WMM IE & support WWM-PS */
pstat->flags &= ~WLAN_STA_WME;
@@ -3146,14 +3146,14 @@ static unsigned int OnAssocReq(struct adapter *padapter,
p = pframe + WLAN_HDR_A3_LEN + ie_offset; ie_len = 0;
for (;;) {
p = rtw_get_ie(p, _VENDOR_SPECIFIC_IE_, &ie_len, pkt_len - WLAN_HDR_A3_LEN - ie_offset);
- if (p != NULL) {
- if (!memcmp(p+2, WMM_IE, 6)) {
+ if (p) {
+ if (!memcmp(p + 2, WMM_IE, 6)) {
pstat->flags |= WLAN_STA_WME;
pstat->qos_option = 1;
- pstat->qos_info = *(p+8);
+ pstat->qos_info = *(p + 8);
- pstat->max_sp_len = (pstat->qos_info>>5) & 0x3;
+ pstat->max_sp_len = (pstat->qos_info >> 5) & 0x3;
if ((pstat->qos_info & 0xf) != 0xf)
pstat->has_legacy_ac = true;
@@ -3162,22 +3162,22 @@ static unsigned int OnAssocReq(struct adapter *padapter,
if (pstat->qos_info & 0xf) {
if (pstat->qos_info & BIT(0))
- pstat->uapsd_vo = BIT(0)|BIT(1);
+ pstat->uapsd_vo = BIT(0) | BIT(1);
else
pstat->uapsd_vo = 0;
if (pstat->qos_info & BIT(1))
- pstat->uapsd_vi = BIT(0)|BIT(1);
+ pstat->uapsd_vi = BIT(0) | BIT(1);
else
pstat->uapsd_vi = 0;
if (pstat->qos_info & BIT(2))
- pstat->uapsd_bk = BIT(0)|BIT(1);
+ pstat->uapsd_bk = BIT(0) | BIT(1);
else
pstat->uapsd_bk = 0;
if (pstat->qos_info & BIT(3))
- pstat->uapsd_be = BIT(0)|BIT(1);
+ pstat->uapsd_be = BIT(0) | BIT(1);
else
pstat->uapsd_be = 0;
}
@@ -3245,7 +3245,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
DBG_88E(" old AID %d\n", pstat->aid);
} else {
for (pstat->aid = 1; pstat->aid <= NUM_STA; pstat->aid++)
- if (pstapriv->sta_aid[pstat->aid - 1] == NULL)
+ if (!pstapriv->sta_aid[pstat->aid - 1])
break;
/* if (pstat->aid > NUM_STA) { */
@@ -3452,14 +3452,13 @@ static unsigned int OnDeAuth(struct adapter *padapter,
}
return _SUCCESS;
- } else
+ }
#endif
- {
- DBG_88E_LEVEL(_drv_always_, "sta recv deauth reason code(%d) sta:%pM\n",
- reason, GetAddr3Ptr(pframe));
+ DBG_88E_LEVEL(_drv_always_, "sta recv deauth reason code(%d) sta:%pM\n",
+ reason, GetAddr3Ptr(pframe));
+
+ receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
- receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
- }
pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
return _SUCCESS;
}
@@ -3506,14 +3505,13 @@ static unsigned int OnDisassoc(struct adapter *padapter,
}
return _SUCCESS;
- } else
+ }
#endif
- {
- DBG_88E_LEVEL(_drv_always_, "ap recv disassoc reason code(%d) sta:%pM\n",
- reason, GetAddr3Ptr(pframe));
+ DBG_88E_LEVEL(_drv_always_, "ap recv disassoc reason code(%d) sta:%pM\n",
+ reason, GetAddr3Ptr(pframe));
+
+ receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
- receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
- }
pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
return _SUCCESS;
}
@@ -3958,7 +3956,7 @@ static void init_channel_list(struct adapter *padapter,
((o->bw == BW40MINUS) || (o->bw == BW40PLUS)))
continue;
- if (reg == NULL) {
+ if (!reg) {
reg = &channel_list->reg_class[cla];
cla++;
reg->reg_class = o->op_class;
@@ -4515,7 +4513,7 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
/* set per sta rate after updating HT cap. */
set_sta_rate(padapter, psta);
rtw_hal_set_hwreg(padapter, HW_VAR_TX_RPT_MAX_MACID, (u8 *)&psta->mac_id);
- media_status = (psta->mac_id<<8)|1; /* MACID|OPMODE: 1 means connect */
+ media_status = (psta->mac_id << 8) | 1; /* MACID|OPMODE: 1 means connect */
rtw_hal_set_hwreg(padapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status);
}
@@ -4844,7 +4842,7 @@ u8 setopmode_hdl(struct adapter *padapter, u8 *pbuf)
pmlmeinfo->state = WIFI_FW_AP_STATE;
type = _HW_STATE_AP_;
} else if (psetop->mode == Ndis802_11Infrastructure) {
- pmlmeinfo->state &= ~(BIT(0)|BIT(1));/* clear state */
+ pmlmeinfo->state &= ~(BIT(0) | BIT(1));/* clear state */
pmlmeinfo->state |= WIFI_FW_STATION_STATE;/* set to STATION_STATE */
type = _HW_STATE_STATION_;
} else if (psetop->mode == Ndis802_11IBSS) {
@@ -5040,7 +5038,7 @@ u8 disconnect_hdl(struct adapter *padapter, unsigned char *pbuf)
u8 val8;
if (is_client_associated_to_ap(padapter))
- issue_deauth_ex(padapter, pnetwork->MacAddress, WLAN_REASON_DEAUTH_LEAVING, param->deauth_timeout_ms/100, 100);
+ issue_deauth_ex(padapter, pnetwork->MacAddress, WLAN_REASON_DEAUTH_LEAVING, param->deauth_timeout_ms / 100, 100);
rtw_hal_set_hwreg(padapter, HW_VAR_MLME_DISCONNECT, NULL);
rtw_hal_set_hwreg(padapter, HW_VAR_BSSID, null_addr);
@@ -5084,7 +5082,7 @@ static int rtw_scan_ch_decision(struct adapter *padapter,
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
/* clear out first */
- memset(out, 0, sizeof(struct rtw_ieee80211_channel)*out_num);
+ memset(out, 0, sizeof(struct rtw_ieee80211_channel) * out_num);
/* acquire channels from in */
j = 0;
@@ -5263,7 +5261,7 @@ u8 set_stakey_hdl(struct adapter *padapter, u8 *pbuf)
DBG_88E("r871x_set_stakey_hdl(): enc_algorithm=%d\n", pparm->algorithm);
- if ((psta->mac_id < 1) || (psta->mac_id > (NUM_STA-4))) {
+ if ((psta->mac_id < 1) || (psta->mac_id > (NUM_STA - 4))) {
DBG_88E("r871x_set_stakey_hdl():set_stakey failed, mac_id(aid)=%d\n", psta->mac_id);
return H2C_REJECTED;
}
@@ -5276,10 +5274,10 @@ u8 set_stakey_hdl(struct adapter *padapter, u8 *pbuf)
write_cam(padapter, cam_id, ctrl, pparm->addr, pparm->key);
return H2C_SUCCESS_RSP;
- } else {
- DBG_88E("r871x_set_stakey_hdl(): sta has been free\n");
- return H2C_REJECTED;
}
+
+ DBG_88E("r871x_set_stakey_hdl(): sta has been free\n");
+ return H2C_REJECTED;
}
/* below for sta mode */
@@ -5333,14 +5331,14 @@ u8 set_tx_beacon_cmd(struct adapter *padapter)
ptxBeacon_parm = kmemdup(&pmlmeinfo->network,
sizeof(struct wlan_bssid_ex), GFP_ATOMIC);
- if (ptxBeacon_parm == NULL) {
+ if (!ptxBeacon_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
}
- len_diff = update_hidden_ssid(ptxBeacon_parm->ies+_BEACON_IE_OFFSET_,
- ptxBeacon_parm->ie_length-_BEACON_IE_OFFSET_,
+ len_diff = update_hidden_ssid(ptxBeacon_parm->ies + _BEACON_IE_OFFSET_,
+ ptxBeacon_parm->ie_length - _BEACON_IE_OFFSET_,
pmlmeinfo->hidden_ssid_mode);
ptxBeacon_parm->ie_length += len_diff;
@@ -5361,7 +5359,7 @@ u8 mlme_evt_hdl(struct adapter *padapter, unsigned char *pbuf)
peventbuf = (uint *)pbuf;
evt_sz = (u16)(*peventbuf & 0xffff);
- evt_code = (u8)((*peventbuf>>16) & 0xff);
+ evt_code = (u8)((*peventbuf >> 16) & 0xff);
/* checking if event code is valid */
if (evt_code >= MAX_C2HEVT) {
diff --git a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
index 03dc7e5fcc38..c4f58507dbfd 100644
--- a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
@@ -154,8 +154,8 @@ void ips_enter(struct adapter *padapter)
int ips_leave(struct adapter *padapter)
{
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- struct security_priv *psecuritypriv = &(padapter->securitypriv);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
int result = _SUCCESS;
int keyid;
@@ -200,28 +200,24 @@ int ips_leave(struct adapter *padapter)
static bool rtw_pwr_unassociated_idle(struct adapter *adapter)
{
- struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
- bool ret = false;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
if (time_after_eq(adapter->pwrctrlpriv.ips_deny_time, jiffies))
- goto exit;
+ return false;
if (check_fwstate(pmlmepriv, WIFI_ASOC_STATE|WIFI_SITE_MONITOR) ||
check_fwstate(pmlmepriv, WIFI_UNDER_LINKING|WIFI_UNDER_WPS) ||
check_fwstate(pmlmepriv, WIFI_AP_STATE) ||
check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE|WIFI_ADHOC_STATE))
- goto exit;
-
- ret = true;
+ return false;
-exit:
- return ret;
+ return true;
}
void rtw_ps_processor(struct adapter *padapter)
{
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
enum rt_rf_power_state rfpwrstate;
pwrpriv->ps_processing = true;
@@ -282,7 +278,7 @@ static void pwr_state_check_handler(struct timer_list *t)
*/
void rtw_set_rpwm(struct adapter *padapter, u8 pslv)
{
- u8 rpwm;
+ u8 rpwm;
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
pslv = PS_STATE(pslv);
@@ -335,8 +331,8 @@ void rtw_set_rpwm(struct adapter *padapter, u8 pslv)
static u8 PS_RDY_CHECK(struct adapter *padapter)
{
unsigned long curr_time, delta_time;
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
curr_time = jiffies;
delta_time = curr_time - pwrpriv->DelayLPSLastTimeStamp;
@@ -437,7 +433,7 @@ s32 LPS_RF_ON_check(struct adapter *padapter, u32 delay_ms)
/* */
void LPS_Enter(struct adapter *padapter)
{
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
if (!PS_RDY_CHECK(padapter))
return;
@@ -463,7 +459,7 @@ void LPS_Enter(struct adapter *padapter)
/* Leave the leisure power save mode. */
void LPS_Leave(struct adapter *padapter)
{
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
if (pwrpriv->bLeisurePs) {
if (pwrpriv->pwr_mode != PS_MODE_ACTIVE) {
@@ -483,8 +479,8 @@ void LPS_Leave(struct adapter *padapter)
/* */
void LeaveAllPowerSaveMode(struct adapter *Adapter)
{
- struct mlme_priv *pmlmepriv = &(Adapter->mlmepriv);
- u8 enqueue = 0;
+ struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
+ u8 enqueue = 0;
if (check_fwstate(pmlmepriv, _FW_LINKED))
rtw_lps_ctrl_wk_cmd(Adapter, LPS_CTRL_LEAVE, enqueue);
@@ -611,7 +607,7 @@ exit:
int rtw_pm_set_lps(struct adapter *padapter, u8 mode)
{
- int ret = 0;
+ int ret = 0;
struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
if (mode < PS_MODE_NUM) {
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index af8a79ce8736..6df873e4c2ed 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -1247,9 +1247,8 @@ unsigned char check_assoc_AP(u8 *pframe, uint len)
if (ralink_vendor_flag) {
DBG_88E("link to Tenda W311R AP\n");
return HT_IOT_PEER_TENDA;
- } else {
- DBG_88E("Capture EPIGRAM_OUI\n");
}
+ DBG_88E("Capture EPIGRAM_OUI\n");
} else {
break;
}
@@ -1266,10 +1265,9 @@ unsigned char check_assoc_AP(u8 *pframe, uint len)
} else if (ralink_vendor_flag && epigram_vendor_flag) {
DBG_88E("link to Tenda W311R AP\n");
return HT_IOT_PEER_TENDA;
- } else {
- DBG_88E("link to new AP\n");
- return HT_IOT_PEER_UNKNOWN;
}
+ DBG_88E("link to new AP\n");
+ return HT_IOT_PEER_UNKNOWN;
}
void update_IOT_info(struct adapter *padapter)
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index c37591657bac..258531bc1408 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -1022,10 +1022,10 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
ClearMFrag(mem_start);
break;
- } else {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("%s: There're still something in packet!\n", __func__));
}
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("%s: There're still something in packet!\n", __func__));
+
addr = (size_t)(pframe);
mem_start = (unsigned char *)round_up(addr, 4) + hw_hdr_offset;
diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c
index 4e2f6cb55a75..7489491f5aaa 100644
--- a/drivers/staging/rtl8188eu/hal/odm.c
+++ b/drivers/staging/rtl8188eu/hal/odm.c
@@ -967,10 +967,11 @@ void ODM_TXPowerTrackingCheck(struct odm_dm_struct *pDM_Odm)
pDM_Odm->RFCalibrateInfo.TM_Trigger = 1;
return;
- } else {
- rtl88eu_dm_txpower_tracking_callback_thermalmeter(Adapter);
- pDM_Odm->RFCalibrateInfo.TM_Trigger = 0;
}
+
+ rtl88eu_dm_txpower_tracking_callback_thermalmeter(Adapter);
+ pDM_Odm->RFCalibrateInfo.TM_Trigger = 0;
+
}
/* 3============================================================ */
diff --git a/drivers/staging/rtl8188eu/hal/odm_rtl8188e.c b/drivers/staging/rtl8188eu/hal/odm_rtl8188e.c
index 251bd8aba3b1..a55a0d8b9fb7 100644
--- a/drivers/staging/rtl8188eu/hal/odm_rtl8188e.c
+++ b/drivers/staging/rtl8188eu/hal/odm_rtl8188e.c
@@ -154,35 +154,37 @@ void rtl88eu_dm_update_rx_idle_ant(struct odm_dm_struct *dm_odm, u8 ant)
struct adapter *adapter = dm_odm->Adapter;
u32 default_ant, optional_ant;
- if (dm_fat_tbl->RxIdleAnt != ant) {
- if (ant == MAIN_ANT) {
- default_ant = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ?
- MAIN_ANT_CG_TRX : MAIN_ANT_CGCS_RX;
- optional_ant = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ?
- AUX_ANT_CG_TRX : AUX_ANT_CGCS_RX;
- } else {
- default_ant = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ?
- AUX_ANT_CG_TRX : AUX_ANT_CGCS_RX;
- optional_ant = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ?
- MAIN_ANT_CG_TRX : MAIN_ANT_CGCS_RX;
- }
+ if (dm_fat_tbl->RxIdleAnt == ant)
+ return;
- if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) {
- phy_set_bb_reg(adapter, ODM_REG_RX_ANT_CTRL_11N,
- BIT(5) | BIT(4) | BIT(3), default_ant);
- phy_set_bb_reg(adapter, ODM_REG_RX_ANT_CTRL_11N,
- BIT(8) | BIT(7) | BIT(6), optional_ant);
- phy_set_bb_reg(adapter, ODM_REG_ANTSEL_CTRL_11N,
- BIT(14) | BIT(13) | BIT(12), default_ant);
- phy_set_bb_reg(adapter, ODM_REG_RESP_TX_11N,
- BIT(6) | BIT(7), default_ant);
- } else if (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV) {
- phy_set_bb_reg(adapter, ODM_REG_RX_ANT_CTRL_11N,
- BIT(5) | BIT(4) | BIT(3), default_ant);
- phy_set_bb_reg(adapter, ODM_REG_RX_ANT_CTRL_11N,
- BIT(8) | BIT(7) | BIT(6), optional_ant);
- }
+ if (ant == MAIN_ANT) {
+ default_ant = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ?
+ MAIN_ANT_CG_TRX : MAIN_ANT_CGCS_RX;
+ optional_ant = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ?
+ AUX_ANT_CG_TRX : AUX_ANT_CGCS_RX;
+ } else {
+ default_ant = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ?
+ AUX_ANT_CG_TRX : AUX_ANT_CGCS_RX;
+ optional_ant = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ?
+ MAIN_ANT_CG_TRX : MAIN_ANT_CGCS_RX;
+ }
+
+ if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) {
+ phy_set_bb_reg(adapter, ODM_REG_RX_ANT_CTRL_11N,
+ BIT(5) | BIT(4) | BIT(3), default_ant);
+ phy_set_bb_reg(adapter, ODM_REG_RX_ANT_CTRL_11N,
+ BIT(8) | BIT(7) | BIT(6), optional_ant);
+ phy_set_bb_reg(adapter, ODM_REG_ANTSEL_CTRL_11N,
+ BIT(14) | BIT(13) | BIT(12), default_ant);
+ phy_set_bb_reg(adapter, ODM_REG_RESP_TX_11N,
+ BIT(6) | BIT(7), default_ant);
+ } else if (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV) {
+ phy_set_bb_reg(adapter, ODM_REG_RX_ANT_CTRL_11N,
+ BIT(5) | BIT(4) | BIT(3), default_ant);
+ phy_set_bb_reg(adapter, ODM_REG_RX_ANT_CTRL_11N,
+ BIT(8) | BIT(7) | BIT(6), optional_ant);
}
+
dm_fat_tbl->RxIdleAnt = ant;
}
@@ -303,6 +305,7 @@ void rtl88eu_dm_antenna_diversity(struct odm_dm_struct *dm_odm)
if (!(dm_odm->SupportAbility & ODM_BB_ANT_DIV))
return;
+
if (!dm_odm->bLinked) {
ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
("ODM_AntennaDiversity_88E(): No Link.\n"));
@@ -318,19 +321,20 @@ void rtl88eu_dm_antenna_diversity(struct odm_dm_struct *dm_odm)
dm_fat_tbl->bBecomeLinked = dm_odm->bLinked;
}
return;
- } else {
- if (!dm_fat_tbl->bBecomeLinked) {
- ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
- ("Need to Turn on HW AntDiv\n"));
- phy_set_bb_reg(adapter, ODM_REG_IGI_A_11N, BIT(7), 1);
- phy_set_bb_reg(adapter, ODM_REG_CCK_ANTDIV_PARA1_11N,
- BIT(15), 1);
- if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV)
- phy_set_bb_reg(adapter, ODM_REG_TX_ANT_CTRL_11N,
- BIT(21), 1);
- dm_fat_tbl->bBecomeLinked = dm_odm->bLinked;
- }
}
+
+ if (!dm_fat_tbl->bBecomeLinked) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
+ ("Need to Turn on HW AntDiv\n"));
+ phy_set_bb_reg(adapter, ODM_REG_IGI_A_11N, BIT(7), 1);
+ phy_set_bb_reg(adapter, ODM_REG_CCK_ANTDIV_PARA1_11N,
+ BIT(15), 1);
+ if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV)
+ phy_set_bb_reg(adapter, ODM_REG_TX_ANT_CTRL_11N,
+ BIT(21), 1);
+ dm_fat_tbl->bBecomeLinked = dm_odm->bLinked;
+ }
+
if ((dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ||
(dm_odm->AntDivType == CGCS_RX_HW_ANTDIV))
rtl88eu_dm_hw_ant_div(dm_odm);
diff --git a/drivers/staging/rtl8188eu/hal/phy.c b/drivers/staging/rtl8188eu/hal/phy.c
index 51c40abfafaa..afaf9e55195a 100644
--- a/drivers/staging/rtl8188eu/hal/phy.c
+++ b/drivers/staging/rtl8188eu/hal/phy.c
@@ -923,27 +923,27 @@ static bool simularity_compare(struct adapter *adapt, s32 resulta[][8],
}
}
return result;
- } else {
- if (!(sim_bitmap & 0x03)) { /* path A TX OK */
- for (i = 0; i < 2; i++)
- resulta[3][i] = resulta[c1][i];
- }
- if (!(sim_bitmap & 0x0c)) { /* path A RX OK */
- for (i = 2; i < 4; i++)
- resulta[3][i] = resulta[c1][i];
- }
+ }
- if (!(sim_bitmap & 0x30)) { /* path B TX OK */
- for (i = 4; i < 6; i++)
- resulta[3][i] = resulta[c1][i];
- }
+ if (!(sim_bitmap & 0x03)) { /* path A TX OK */
+ for (i = 0; i < 2; i++)
+ resulta[3][i] = resulta[c1][i];
+ }
+ if (!(sim_bitmap & 0x0c)) { /* path A RX OK */
+ for (i = 2; i < 4; i++)
+ resulta[3][i] = resulta[c1][i];
+ }
- if (!(sim_bitmap & 0xc0)) { /* path B RX OK */
- for (i = 6; i < 8; i++)
- resulta[3][i] = resulta[c1][i];
- }
- return false;
+ if (!(sim_bitmap & 0x30)) { /* path B TX OK */
+ for (i = 4; i < 6; i++)
+ resulta[3][i] = resulta[c1][i];
+ }
+
+ if (!(sim_bitmap & 0xc0)) { /* path B RX OK */
+ for (i = 6; i < 8; i++)
+ resulta[3][i] = resulta[c1][i];
}
+ return false;
}
static void phy_iq_calibrate(struct adapter *adapt, s32 result[][8],
@@ -1053,10 +1053,9 @@ static void phy_iq_calibrate(struct adapter *adapt, s32 result[][8],
result[t][3] = (phy_query_bb_reg(adapt, rRx_Power_After_IQK_A_2,
bMaskDWord)&0x3FF0000)>>16;
break;
- } else {
- ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
- ("Path A Rx IQK Fail!!\n"));
}
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("Path A Rx IQK Fail!!\n"));
}
if (path_a_ok == 0x00) {
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
index 545d6a6102f1..241f55b92808 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
@@ -21,7 +21,7 @@
/* Initialize GPIO setting registers */
static void dm_InitGPIOSetting(struct adapter *Adapter)
{
- u8 tmp1byte;
+ u8 tmp1byte;
tmp1byte = usb_read8(Adapter, REG_GPIO_MUXCFG);
tmp1byte &= (GPIOSEL_GPIO | ~GPIOSEL_ENBT);
@@ -35,8 +35,8 @@ static void dm_InitGPIOSetting(struct adapter *Adapter)
static void Init_ODM_ComInfo_88E(struct adapter *Adapter)
{
struct hal_data_8188e *hal_data = Adapter->HalData;
- struct dm_priv *pdmpriv = &hal_data->dmpriv;
- struct odm_dm_struct *dm_odm = &(hal_data->odmpriv);
+ struct dm_priv *pdmpriv = &hal_data->dmpriv;
+ struct odm_dm_struct *dm_odm = &hal_data->odmpriv;
/* Init Value */
memset(dm_odm, 0, sizeof(*dm_odm));
@@ -51,44 +51,46 @@ static void Init_ODM_ComInfo_88E(struct adapter *Adapter)
dm_odm->AntDivType = hal_data->TRxAntDivType;
- /* Tx power tracking BB swing table. */
- /* The base index = 12. +((12-n)/2)dB 13~?? = decrease tx pwr by -((n-12)/2)dB */
+ /* Tx power tracking BB swing table.
+ * The base index =
+ * 12. +((12-n)/2)dB 13~?? = decrease tx pwr by -((n-12)/2)dB
+ */
dm_odm->BbSwingIdxOfdm = 12; /* Set defalut value as index 12. */
dm_odm->BbSwingIdxOfdmCurrent = 12;
dm_odm->BbSwingFlagOfdm = false;
- pdmpriv->InitODMFlag = ODM_RF_CALIBRATION |
- ODM_RF_TX_PWR_TRACK;
+ pdmpriv->InitODMFlag = ODM_RF_CALIBRATION |
+ ODM_RF_TX_PWR_TRACK;
dm_odm->SupportAbility = pdmpriv->InitODMFlag;
}
static void Update_ODM_ComInfo_88E(struct adapter *Adapter)
{
- struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
- struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
+ struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
struct pwrctrl_priv *pwrctrlpriv = &Adapter->pwrctrlpriv;
struct hal_data_8188e *hal_data = Adapter->HalData;
- struct odm_dm_struct *dm_odm = &(hal_data->odmpriv);
- struct dm_priv *pdmpriv = &hal_data->dmpriv;
+ struct odm_dm_struct *dm_odm = &hal_data->odmpriv;
+ struct dm_priv *pdmpriv = &hal_data->dmpriv;
int i;
- pdmpriv->InitODMFlag = ODM_BB_DIG |
- ODM_BB_RA_MASK |
- ODM_BB_DYNAMIC_TXPWR |
- ODM_BB_FA_CNT |
- ODM_BB_RSSI_MONITOR |
- ODM_BB_CCK_PD |
- ODM_BB_PWR_SAVE |
- ODM_MAC_EDCA_TURBO |
- ODM_RF_CALIBRATION |
- ODM_RF_TX_PWR_TRACK;
+ pdmpriv->InitODMFlag = ODM_BB_DIG |
+ ODM_BB_RA_MASK |
+ ODM_BB_DYNAMIC_TXPWR |
+ ODM_BB_FA_CNT |
+ ODM_BB_RSSI_MONITOR |
+ ODM_BB_CCK_PD |
+ ODM_BB_PWR_SAVE |
+ ODM_MAC_EDCA_TURBO |
+ ODM_RF_CALIBRATION |
+ ODM_RF_TX_PWR_TRACK;
if (hal_data->AntDivCfg)
pdmpriv->InitODMFlag |= ODM_BB_ANT_DIV;
if (Adapter->registrypriv.mp_mode == 1) {
- pdmpriv->InitODMFlag = ODM_RF_CALIBRATION |
- ODM_RF_TX_PWR_TRACK;
+ pdmpriv->InitODMFlag = ODM_RF_CALIBRATION |
+ ODM_RF_TX_PWR_TRACK;
}
dm_odm->SupportAbility = pdmpriv->InitODMFlag;
@@ -106,20 +108,23 @@ static void Update_ODM_ComInfo_88E(struct adapter *Adapter)
dm_odm->pbPowerSaving = (bool *)&pwrctrlpriv->bpower_saving;
dm_odm->AntDivType = hal_data->TRxAntDivType;
- /* Tx power tracking BB swing table. */
- /* The base index = 12. +((12-n)/2)dB 13~?? = decrease tx pwr by -((n-12)/2)dB */
+ /* Tx power tracking BB swing table.
+ * The base index =
+ * 12. +((12-n)/2)dB 13~?? = decrease tx pwr by -((n-12)/2)dB
+ */
dm_odm->BbSwingIdxOfdm = 12; /* Set defalut value as index 12. */
dm_odm->BbSwingIdxOfdmCurrent = 12;
dm_odm->BbSwingFlagOfdm = false;
for (i = 0; i < NUM_STA; i++)
- ODM_CmnInfoPtrArrayHook(dm_odm, ODM_CMNINFO_STA_STATUS, i, NULL);
+ ODM_CmnInfoPtrArrayHook(dm_odm, ODM_CMNINFO_STA_STATUS, i,
+ NULL);
}
void rtl8188e_InitHalDm(struct adapter *Adapter)
{
- struct dm_priv *pdmpriv = &Adapter->HalData->dmpriv;
- struct odm_dm_struct *dm_odm = &(Adapter->HalData->odmpriv);
+ struct dm_priv *pdmpriv = &Adapter->HalData->dmpriv;
+ struct odm_dm_struct *dm_odm = &Adapter->HalData->odmpriv;
dm_InitGPIOSetting(Adapter);
pdmpriv->DM_Type = DM_Type_ByDriver;
@@ -162,7 +167,7 @@ skip_dm:
void rtw_hal_dm_init(struct adapter *Adapter)
{
- struct dm_priv *pdmpriv = &Adapter->HalData->dmpriv;
+ struct dm_priv *pdmpriv = &Adapter->HalData->dmpriv;
struct odm_dm_struct *podmpriv = &Adapter->HalData->odmpriv;
memset(pdmpriv, 0, sizeof(struct dm_priv));
@@ -172,23 +177,28 @@ void rtw_hal_dm_init(struct adapter *Adapter)
/* Add new function to reset the state of antenna diversity before link. */
/* Compare RSSI for deciding antenna */
-void rtw_hal_antdiv_rssi_compared(struct adapter *Adapter, struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src)
+void rtw_hal_antdiv_rssi_compared(struct adapter *Adapter,
+ struct wlan_bssid_ex *dst,
+ struct wlan_bssid_ex *src)
{
if (Adapter->HalData->AntDivCfg != 0) {
- /* select optimum_antenna for before linked =>For antenna diversity */
- if (dst->Rssi >= src->Rssi) {/* keep org parameter */
+ /* select optimum_antenna for before linked => For antenna
+ * diversity
+ */
+ if (dst->Rssi >= src->Rssi) {/* keep org parameter */
src->Rssi = dst->Rssi;
- src->PhyInfo.Optimum_antenna = dst->PhyInfo.Optimum_antenna;
+ src->PhyInfo.Optimum_antenna =
+ dst->PhyInfo.Optimum_antenna;
}
}
}
/* Add new function to reset the state of antenna diversity before link. */
-u8 rtw_hal_antdiv_before_linked(struct adapter *Adapter)
+bool rtw_hal_antdiv_before_linked(struct adapter *Adapter)
{
struct odm_dm_struct *dm_odm = &Adapter->HalData->odmpriv;
struct sw_ant_switch *dm_swat_tbl = &dm_odm->DM_SWAT_Table;
- struct mlme_priv *pmlmepriv = &(Adapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
/* Condition that does not need to use antenna diversity. */
if (Adapter->HalData->AntDivCfg == 0)
@@ -197,15 +207,16 @@ u8 rtw_hal_antdiv_before_linked(struct adapter *Adapter)
if (check_fwstate(pmlmepriv, _FW_LINKED))
return false;
- if (dm_swat_tbl->SWAS_NoLink_State == 0) {
- /* switch channel */
- dm_swat_tbl->SWAS_NoLink_State = 1;
- dm_swat_tbl->CurAntenna = (dm_swat_tbl->CurAntenna == Antenna_A) ? Antenna_B : Antenna_A;
-
- rtw_antenna_select_cmd(Adapter, dm_swat_tbl->CurAntenna, false);
- return true;
- } else {
+ if (dm_swat_tbl->SWAS_NoLink_State != 0) {
dm_swat_tbl->SWAS_NoLink_State = 0;
return false;
}
+
+ /* switch channel */
+ dm_swat_tbl->SWAS_NoLink_State = 1;
+ dm_swat_tbl->CurAntenna = (dm_swat_tbl->CurAntenna == Antenna_A) ?
+ Antenna_B : Antenna_A;
+
+ rtw_antenna_select_cmd(Adapter, dm_swat_tbl->CurAntenna, false);
+ return true;
}
diff --git a/drivers/staging/rtl8188eu/include/hal8188e_phy_cfg.h b/drivers/staging/rtl8188eu/include/hal8188e_phy_cfg.h
index da66695a1d8f..0c5b2b0948f5 100644
--- a/drivers/staging/rtl8188eu/include/hal8188e_phy_cfg.h
+++ b/drivers/staging/rtl8188eu/include/hal8188e_phy_cfg.h
@@ -15,11 +15,6 @@
#define MAX_TXPWR_IDX_NMODE_92S 63
#define Reset_Cnt_Limit 3
-#define IQK_MAC_REG_NUM 4
-#define IQK_ADDA_REG_NUM 16
-#define IQK_BB_REG_NUM 9
-#define HP_THERMAL_NUM 8
-
#define MAX_AGGR_NUM 0x07
diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
index 516a89647003..39df30599a5d 100644
--- a/drivers/staging/rtl8188eu/include/hal_intf.h
+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
@@ -209,7 +209,7 @@ void rtw_hal_set_bwmode(struct adapter *padapter,
void rtw_hal_set_chan(struct adapter *padapter, u8 channel);
void rtw_hal_dm_watchdog(struct adapter *padapter);
-u8 rtw_hal_antdiv_before_linked(struct adapter *padapter);
+bool rtw_hal_antdiv_before_linked(struct adapter *padapter);
void rtw_hal_antdiv_rssi_compared(struct adapter *padapter,
struct wlan_bssid_ex *dst,
struct wlan_bssid_ex *src);
diff --git a/drivers/staging/rtl8188eu/include/ieee80211.h b/drivers/staging/rtl8188eu/include/ieee80211.h
index d569fe5ed8e6..75f0ebe0faf5 100644
--- a/drivers/staging/rtl8188eu/include/ieee80211.h
+++ b/drivers/staging/rtl8188eu/include/ieee80211.h
@@ -765,7 +765,7 @@ bool rtw_is_cckrates_included(u8 *rate);
bool rtw_is_cckratesonly_included(u8 *rate);
-int rtw_check_network_type(unsigned char *rate, int ratelen, int channel);
+int rtw_check_network_type(unsigned char *rate);
void rtw_get_bcn_info(struct wlan_network *pnetwork);
diff --git a/drivers/staging/rtl8188eu/include/odm.h b/drivers/staging/rtl8188eu/include/odm.h
index 8245cea2feef..9d39fe13626a 100644
--- a/drivers/staging/rtl8188eu/include/odm.h
+++ b/drivers/staging/rtl8188eu/include/odm.h
@@ -239,7 +239,6 @@ struct odm_rate_adapt {
#define IQK_MAC_REG_NUM 4
#define IQK_ADDA_REG_NUM 16
-#define IQK_BB_REG_NUM_MAX 10
#define IQK_BB_REG_NUM 9
#define HP_THERMAL_NUM 8
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_dm.h b/drivers/staging/rtl8188eu/include/rtl8188e_dm.h
index 19204335ab4c..910b982ab775 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_dm.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_dm.h
@@ -10,12 +10,7 @@ enum{
UP_LINK,
DOWN_LINK,
};
-/* duplicate code,will move to ODM ######### */
-#define IQK_MAC_REG_NUM 4
-#define IQK_ADDA_REG_NUM 16
-#define IQK_BB_REG_NUM 9
-#define HP_THERMAL_NUM 8
-/* duplicate code,will move to ODM ######### */
+
struct dm_priv {
u8 DM_Type;
u8 DMFlag;
diff --git a/drivers/staging/rtl8188eu/include/rtw_rf.h b/drivers/staging/rtl8188eu/include/rtw_rf.h
index b5dfb226f32a..c3847cf16ec1 100644
--- a/drivers/staging/rtl8188eu/include/rtw_rf.h
+++ b/drivers/staging/rtl8188eu/include/rtw_rf.h
@@ -19,9 +19,6 @@
#define SHORT_SLOT_TIME 9
#define NON_SHORT_SLOT_TIME 20
-#define RTL8711_RF_MAX_SENS 6
-#define RTL8711_RF_DEF_SENS 4
-
/* We now define the following channels as the max channels in each
* channel plan.
*/
@@ -30,8 +27,6 @@
#define MAX_CHANNEL_NUM_2G 14
#define MAX_CHANNEL_NUM 14 /* 2.4 GHz only */
-#define NUM_REGULATORYS 1
-
/* Country codes */
#define USA 0x555320
#define EUROPE 0x1 /* temp, should be provided later */
@@ -74,17 +69,6 @@ enum _REG_PREAMBLE_MODE {
PREAMBLE_SHORT = 3,
};
-enum _RTL8712_RF_MIMO_CONFIG_ {
- RTL8712_RFCONFIG_1T = 0x10,
- RTL8712_RFCONFIG_2T = 0x20,
- RTL8712_RFCONFIG_1R = 0x01,
- RTL8712_RFCONFIG_2R = 0x02,
- RTL8712_RFCONFIG_1T1R = 0x11,
- RTL8712_RFCONFIG_1T2R = 0x12,
- RTL8712_RFCONFIG_TURBO = 0x92,
- RTL8712_RFCONFIG_2T2R = 0x22
-};
-
enum rf90_radio_path {
RF90_PATH_A = 0, /* Radio Path A */
RF90_PATH_B = 1, /* Radio Path B */
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index 710c33fd4965..9b6ea86d1dcf 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -222,18 +222,21 @@ static char *translate_scan(struct adapter *padapter,
/* parsing WPA/WPA2 IE */
{
- u8 buf[MAX_WPA_IE_LEN];
+ u8 *buf;
u8 wpa_ie[255], rsn_ie[255];
u16 wpa_len = 0, rsn_len = 0;
u8 *p;
+ buf = kzalloc(MAX_WPA_IE_LEN, GFP_ATOMIC);
+ if (!buf)
+ return start;
+
rtw_get_sec_ie(pnetwork->network.ies, pnetwork->network.ie_length, rsn_ie, &rsn_len, wpa_ie, &wpa_len);
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_scan: ssid =%s\n", pnetwork->network.ssid.ssid));
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_scan: wpa_len =%d rsn_len =%d\n", wpa_len, rsn_len));
if (wpa_len > 0) {
p = buf;
- memset(buf, 0, MAX_WPA_IE_LEN);
p += sprintf(p, "wpa_ie=");
for (i = 0; i < wpa_len; i++)
p += sprintf(p, "%02x", wpa_ie[i]);
@@ -250,7 +253,6 @@ static char *translate_scan(struct adapter *padapter,
}
if (rsn_len > 0) {
p = buf;
- memset(buf, 0, MAX_WPA_IE_LEN);
p += sprintf(p, "rsn_ie=");
for (i = 0; i < rsn_len; i++)
p += sprintf(p, "%02x", rsn_ie[i]);
@@ -264,6 +266,7 @@ static char *translate_scan(struct adapter *padapter,
iwe.u.data.length = rsn_len;
start = iwe_stream_add_point(info, start, stop, &iwe, rsn_ie);
}
+ kfree(buf);
}
{/* parsing WPS IE */
@@ -593,9 +596,8 @@ static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ie
set_fwstate(&padapter->mlmepriv, WIFI_UNDER_WPS);
cnt += buf[cnt+1]+2;
break;
- } else {
- cnt += buf[cnt+1]+2; /* goto next */
}
+ cnt += buf[cnt+1]+2; /* goto next */
}
}
}
@@ -770,8 +772,7 @@ static int rtw_wx_set_pmkid(struct net_device *dev,
DBG_88E("[rtw_wx_set_pmkid] IW_PMKSA_ADD!\n");
if (!memcmp(strIssueBssid, strZeroMacAddress, ETH_ALEN))
return ret;
- else
- ret = true;
+ ret = true;
blInserted = false;
/* overwrite PMKID */
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index a7cac0719b8b..b5d42f411dd8 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -37,6 +37,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = {
{USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
{USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */
{USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
+ {USB_DEVICE(0x2357, 0x0111)}, /* TP-Link TL-WN727N v5.21 */
{USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
{} /* Terminating entry */
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index dace81a7d1ba..11183b9f757a 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -267,7 +267,7 @@ static short _rtl92e_check_nic_enough_desc(struct net_device *dev, int prio)
return 0;
}
-static void _rtl92e_tx_timeout(struct net_device *dev)
+static void _rtl92e_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -1973,18 +1973,11 @@ u8 rtl92e_rx_db_to_percent(s8 antpower)
u8 rtl92e_evm_db_to_percent(s8 value)
{
- s8 ret_val;
+ s8 ret_val = clamp(-value, 0, 33) * 3;
- ret_val = value;
-
- if (ret_val >= 0)
- ret_val = 0;
- if (ret_val <= -33)
- ret_val = -33;
- ret_val = 0 - ret_val;
- ret_val *= 3;
if (ret_val == 99)
ret_val = 100;
+
return ret_val;
}
@@ -2463,7 +2456,7 @@ static int _rtl92e_pci_probe(struct pci_dev *pdev,
}
- ioaddr = (unsigned long)ioremap_nocache(pmem_start, pmem_len);
+ ioaddr = (unsigned long)ioremap(pmem_start, pmem_len);
if (ioaddr == (unsigned long)NULL) {
netdev_err(dev, "ioremap failed!");
goto err_rel_mem;
diff --git a/drivers/staging/rtl8192u/Makefile b/drivers/staging/rtl8192u/Makefile
index dcd51bf4aed3..0be7426b6ebc 100644
--- a/drivers/staging/rtl8192u/Makefile
+++ b/drivers/staging/rtl8192u/Makefile
@@ -1,13 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
NIC_SELECT = RTL8192U
-ccflags-y := -std=gnu89
-ccflags-y += -O2
-
ccflags-y += -DCONFIG_FORCE_HARD_FLOAT=y
ccflags-y += -DJACKSON_NEW_8187 -DJACKSON_NEW_RX
ccflags-y += -DTHOMAS_BEACON -DTHOMAS_TASKLET -DTHOMAS_SKB -DTHOMAS_TURBO
-ccflags-y += -I $(srctree)/$(src)/ieee80211
r8192u_usb-y := r8192U_core.o r8180_93cx6.o r8192U_wx.o \
r8190_rtl8256.o r819xU_phy.o r819xU_firmware.o \
diff --git a/drivers/staging/rtl8192u/ieee80211/Makefile b/drivers/staging/rtl8192u/ieee80211/Makefile
deleted file mode 100644
index 0d4d6489f767..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/Makefile
+++ /dev/null
@@ -1,27 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-NIC_SELECT = RTL8192U
-
-ccflags-y := -O2
-ccflags-y += -DJACKSON_NEW_8187 -DJACKSON_NEW_RX
-
-ieee80211-rsl-objs := ieee80211_rx.o \
- ieee80211_softmac.o \
- ieee80211_tx.o \
- ieee80211_wx.o \
- ieee80211_module.o \
- ieee80211_softmac_wx.o\
- rtl819x_HTProc.o\
- rtl819x_TSProc.o\
- rtl819x_BAProc.o\
- dot11d.o
-
-ieee80211_crypt-rsl-objs := ieee80211_crypt.o
-ieee80211_crypt_tkip-rsl-objs := ieee80211_crypt_tkip.o
-ieee80211_crypt_ccmp-rsl-objs := ieee80211_crypt_ccmp.o
-ieee80211_crypt_wep-rsl-objs := ieee80211_crypt_wep.o
-
-obj-m +=ieee80211-rsl.o
-obj-m +=ieee80211_crypt-rsl.o
-obj-m +=ieee80211_crypt_wep-rsl.o
-obj-m +=ieee80211_crypt_tkip-rsl.o
-obj-m +=ieee80211_crypt_ccmp-rsl.o
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
index 4a6c3f674431..aa9dab8a4ae8 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
@@ -264,12 +264,12 @@ static int open_debug_level(struct inode *inode, struct file *file)
return single_open(file, show_debug_level, NULL);
}
-static const struct file_operations fops = {
- .open = open_debug_level,
- .read = seq_read,
- .llseek = seq_lseek,
- .write = write_debug_level,
- .release = single_release,
+static const struct proc_ops debug_level_proc_ops = {
+ .proc_open = open_debug_level,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_write = write_debug_level,
+ .proc_release = single_release,
};
int __init ieee80211_debug_init(void)
@@ -284,7 +284,7 @@ int __init ieee80211_debug_init(void)
" proc directory\n");
return -EIO;
}
- e = proc_create("debug_level", 0644, ieee80211_proc, &fops);
+ e = proc_create("debug_level", 0644, ieee80211_proc, &debug_level_proc_ops);
if (!e) {
remove_proc_entry(DRV_NAME, init_net.proc_net);
ieee80211_proc = NULL;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index 00fea127bdc3..e101f7b13c7e 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -232,8 +232,7 @@ ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb,
#ifdef NOT_YET
if (ieee->iw_mode == IW_MODE_MASTER) {
- printk(KERN_DEBUG "%s: Master mode not yet supported.\n",
- ieee->dev->name);
+ netdev_dbg(ieee->dev, "Master mode not yet supported.\n");
return 0;
/*
hostap_update_sta_ps(ieee, (struct hostap_ieee80211_hdr_4addr *)
@@ -261,9 +260,9 @@ ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb,
if (ieee->iw_mode == IW_MODE_MASTER) {
if (type != WLAN_FC_TYPE_MGMT && type != WLAN_FC_TYPE_CTRL) {
- printk(KERN_DEBUG "%s: unknown management frame "
+ netdev_dbg(skb->dev, "unknown management frame "
"(type=0x%02x, stype=0x%02x) dropped\n",
- skb->dev->name, type, stype);
+ type, stype);
return -1;
}
@@ -271,8 +270,8 @@ ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb,
return 0;
}
- printk(KERN_DEBUG "%s: hostap_rx_frame_mgmt: management frame "
- "received in non-Host AP mode\n", skb->dev->name);
+ netdev_dbg(skb->dev, "hostap_rx_frame_mgmt: management frame "
+ "received in non-Host AP mode\n");
return -1;
#endif
}
@@ -349,9 +348,9 @@ ieee80211_rx_frame_decrypt(struct ieee80211_device *ieee, struct sk_buff *skb,
if (ieee->tkip_countermeasures &&
strcmp(crypt->ops->name, "TKIP") == 0) {
if (net_ratelimit()) {
- printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
+ netdev_dbg(ieee->dev, "TKIP countermeasures: dropped "
"received packet from %pM\n",
- ieee->dev->name, hdr->addr2);
+ hdr->addr2);
}
return -1;
}
@@ -397,9 +396,9 @@ ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device *ieee, struct sk_buff *s
res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv);
atomic_dec(&crypt->refcnt);
if (res < 0) {
- printk(KERN_DEBUG "%s: MSDU decryption/MIC verification failed"
+ netdev_dbg(ieee->dev, "MSDU decryption/MIC verification failed"
" (SA=%pM keyidx=%d)\n",
- ieee->dev->name, hdr->addr2, keyidx);
+ hdr->addr2, keyidx);
return -1;
}
@@ -749,7 +748,8 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
kfree(prxbIndicateArray);
}
-static u8 parse_subframe(struct sk_buff *skb,
+static u8 parse_subframe(struct ieee80211_device *ieee,
+ struct sk_buff *skb,
struct ieee80211_rx_stats *rx_stats,
struct ieee80211_rxb *rxb, u8 *src, u8 *dst)
{
@@ -810,11 +810,11 @@ static u8 parse_subframe(struct sk_buff *skb,
nSubframe_Length = (nSubframe_Length >> 8) + (nSubframe_Length << 8);
if (skb->len < (ETHERNET_HEADER_SIZE + nSubframe_Length)) {
- printk("%s: A-MSDU parse error!! pRfd->nTotalSubframe : %d\n",\
- __func__, rxb->nr_subframes);
- printk("%s: A-MSDU parse error!! Subframe Length: %d\n", __func__, nSubframe_Length);
- printk("nRemain_Length is %d and nSubframe_Length is : %d\n", skb->len, nSubframe_Length);
- printk("The Packet SeqNum is %d\n", SeqNum);
+ netdev_dbg(ieee->dev, "A-MSDU parse error!! pRfd->nTotalSubframe : %d\n",
+ rxb->nr_subframes);
+ netdev_dbg(ieee->dev, "A-MSDU parse error!! Subframe Length: %d\n", nSubframe_Length);
+ netdev_dbg(ieee->dev, "nRemain_Length is %d and nSubframe_Length is : %d\n", skb->len, nSubframe_Length);
+ netdev_dbg(ieee->dev, "The Packet SeqNum is %d\n", SeqNum);
return 0;
}
@@ -904,8 +904,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
stats = &ieee->stats;
if (skb->len < 10) {
- printk(KERN_INFO "%s: SKB length < 10\n",
- dev->name);
+ netdev_info(dev, "SKB length < 10\n");
goto rx_dropped;
}
@@ -919,7 +918,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
if (HTCCheck(ieee, skb->data)) {
if (net_ratelimit())
- printk("find HTCControl\n");
+ netdev_warn(dev, "find HTCControl\n");
hdrlen += 4;
rx_stats->bContainHTC = true;
}
@@ -1113,7 +1112,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
if (ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) &&
(keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0) {
- printk("decrypt frame error\n");
+ netdev_dbg(ieee->dev, "decrypt frame error\n");
goto rx_dropped;
}
@@ -1141,9 +1140,8 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
flen -= hdrlen;
if (frag_skb->tail + flen > frag_skb->end) {
- printk(KERN_WARNING "%s: host decrypted and "
- "reassembled frame did not fit skb\n",
- dev->name);
+ netdev_warn(dev, "host decrypted and "
+ "reassembled frame did not fit skb\n");
ieee80211_frag_cache_invalidate(ieee, hdr);
goto rx_dropped;
}
@@ -1178,7 +1176,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
* encrypted/authenticated */
if (ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) &&
ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt)) {
- printk("==>decrypt msdu error\n");
+ netdev_dbg(ieee->dev, "==>decrypt msdu error\n");
goto rx_dropped;
}
@@ -1250,7 +1248,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
goto rx_dropped;
/* to parse amsdu packets */
/* qos data packets & reserved bit is 1 */
- if (parse_subframe(skb, rx_stats, rxb, src, dst) == 0) {
+ if (parse_subframe(ieee, skb, rx_stats, rxb, src, dst) == 0) {
/* only to free rxb, and not submit the packets to upper layer */
for (i = 0; i < rxb->nr_subframes; i++) {
dev_kfree_skb(rxb->subframes[i]);
@@ -1863,7 +1861,7 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x13 &&
info_element->data[2] == 0x74)) {
- printk("========>%s(): athros AP is exist\n", __func__);
+ netdev_dbg(ieee->dev, "========> athros AP is exist\n");
network->atheros_cap_exist = true;
} else
network->atheros_cap_exist = false;
@@ -1980,8 +1978,8 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
}
break;
case MFIE_TYPE_QOS_PARAMETER:
- printk(KERN_ERR
- "QoS Error need to parse QOS_PARAMETER IE\n");
+ netdev_err(ieee->dev,
+ "QoS Error need to parse QOS_PARAMETER IE\n");
break;
case MFIE_TYPE_COUNTRY:
@@ -2357,14 +2355,14 @@ static inline void ieee80211_process_probe_response(
if (IS_COUNTRY_IE_VALID(ieee)) {
// Case 1: Country code
if (!is_legal_channel(ieee, network->channel)) {
- printk("GetScanInfo(): For Country code, filter probe response at channel(%d).\n", network->channel);
+ netdev_warn(ieee->dev, "GetScanInfo(): For Country code, filter probe response at channel(%d).\n", network->channel);
goto out;
}
} else {
// Case 2: No any country code.
// Filter over channel ch12~14
if (network->channel > 11) {
- printk("GetScanInfo(): For Global Domain, filter probe response at channel(%d).\n", network->channel);
+ netdev_warn(ieee->dev, "GetScanInfo(): For Global Domain, filter probe response at channel(%d).\n", network->channel);
goto out;
}
}
@@ -2372,14 +2370,14 @@ static inline void ieee80211_process_probe_response(
if (IS_COUNTRY_IE_VALID(ieee)) {
// Case 1: Country code
if (!is_legal_channel(ieee, network->channel)) {
- printk("GetScanInfo(): For Country code, filter beacon at channel(%d).\n", network->channel);
+ netdev_warn(ieee->dev, "GetScanInfo(): For Country code, filter beacon at channel(%d).\n", network->channel);
goto out;
}
} else {
// Case 2: No any country code.
// Filter over channel ch12~14
if (network->channel > 14) {
- printk("GetScanInfo(): For Global Domain, filter beacon at channel(%d).\n", network->channel);
+ netdev_warn(ieee->dev, "GetScanInfo(): For Global Domain, filter beacon at channel(%d).\n", network->channel);
goto out;
}
}
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 7e2cabd16e88..89dd1fb0b38d 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -60,7 +60,7 @@ double __extendsfdf2(float a)
#include <linux/seq_file.h>
/* FIXME: check if 2.6.7 is ok */
-#include "dot11d.h"
+#include "ieee80211/dot11d.h"
/* set here to open your trace code. */
u32 rt_global_debug_component = COMP_DOWN |
COMP_SEC |
@@ -640,7 +640,7 @@ short check_nic_enough_desc(struct net_device *dev, int queue_index)
return (used < MAX_TX_URB);
}
-static void tx_timeout(struct net_device *dev)
+static void tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -3954,18 +3954,11 @@ static u8 rtl819x_query_rxpwrpercentage(s8 antpower)
static u8 rtl819x_evm_dbtopercentage(s8 value)
{
- s8 ret_val;
+ s8 ret_val = clamp(-value, 0, 33) * 3;
- ret_val = value;
-
- if (ret_val >= 0)
- ret_val = 0;
- if (ret_val <= -33)
- ret_val = -33;
- ret_val = 0 - ret_val;
- ret_val *= 3;
if (ret_val == 99)
ret_val = 100;
+
return ret_val;
}
diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c
index 5822bb7984b9..0118edb0b9ab 100644
--- a/drivers/staging/rtl8192u/r8192U_wx.c
+++ b/drivers/staging/rtl8192u/r8192U_wx.c
@@ -23,7 +23,7 @@
#include "r8192U.h"
#include "r8192U_hw.h"
-#include "dot11d.h"
+#include "ieee80211/dot11d.h"
#include "r8192U_wx.h"
#define RATE_COUNT 12
diff --git a/drivers/staging/rtl8192u/r819xU_phy.c b/drivers/staging/rtl8192u/r819xU_phy.c
index c04d8eca0cfb..555e52522be6 100644
--- a/drivers/staging/rtl8192u/r819xU_phy.c
+++ b/drivers/staging/rtl8192u/r819xU_phy.c
@@ -7,7 +7,7 @@
#include "r8192U_dm.h"
#include "r819xU_firmware_img.h"
-#include "dot11d.h"
+#include "ieee80211/dot11d.h"
#include <linux/bitops.h>
static u32 RF_CHANNEL_TABLE_ZEBRA[] = {
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index 00ea0beb12c9..116773943a2e 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -663,17 +663,11 @@ static u8 evm_db2percentage(s8 value)
/*
* -33dB~0dB to 0%~99%
*/
- s8 ret_val;
+ s8 ret_val = clamp(-value, 0, 33) * 3;
- ret_val = value;
- if (ret_val >= 0)
- ret_val = 0;
- if (ret_val <= -33)
- ret_val = -33;
- ret_val = -ret_val;
- ret_val *= 3;
if (ret_val == 99)
ret_val = 100;
+
return ret_val;
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
index 5e687f6d2c3e..c642825ca8ef 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
@@ -207,10 +207,10 @@ static RT_CHANNEL_PLAN_MAP RTW_ChannelPlanMap[RT_CHANNEL_DOMAIN_MAX] = {
{0x02, 0x1F}, /* 0x57, RT_CHANNEL_DOMAIN_FCC1_FCC10 */
};
-static RT_CHANNEL_PLAN_MAP RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE = {0x03, 0x02}; /* use the conbination for max channel numbers */
+ /* use the combination for max channel numbers */
+static RT_CHANNEL_PLAN_MAP RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE = {0x03, 0x02};
-/*
- * Search the @param ch in given @param ch_set
+/* Search the @param ch in given @param ch_set
* @ch_set: the given channel set
* @ch: the given channel number
*
@@ -229,8 +229,7 @@ int rtw_ch_set_search_ch(RT_CHANNEL_INFO *ch_set, const u32 ch)
return i;
}
-/*
- * Check the @param ch is fit with setband setting of @param adapter
+/* Check the @param ch is fit with setband setting of @param adapter
* @adapter: the given adapter
* @ch: the given channel number
*
@@ -2265,7 +2264,7 @@ inline struct xmit_frame *alloc_mgtxmitframe(struct xmit_priv *pxmitpriv)
/****************************************************************************
-Following are some TX fuctions for WiFi MLME
+Following are some TX functions for WiFi MLME
*****************************************************************************/
@@ -2956,7 +2955,7 @@ exit:
return ret;
}
-/* if psta == NULL, indiate we are station(client) now... */
+/* if psta == NULL, indicate we are station(client) now... */
void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short status)
{
struct xmit_frame *pmgntframe;
@@ -3356,9 +3355,11 @@ void issue_assocreq(struct adapter *padapter)
(!memcmp(pIE->data, WPS_OUI, 4))) {
vs_ie_length = pIE->Length;
if ((!padapter->registrypriv.wifi_spec) && (!memcmp(pIE->data, WPS_OUI, 4))) {
- /* Commented by Kurt 20110629 */
- /* In some older APs, WPS handshake */
- /* would be fail if we append vender extensions informations to AP */
+ /* Commented by Kurt 20110629
+ * In some older APs, WPS handshake
+ * would be fail if we append vendor
+ * extensions information to AP
+ */
vs_ie_length = 14;
}
@@ -3406,7 +3407,7 @@ exit:
rtw_buf_free(&pmlmepriv->assoc_req, &pmlmepriv->assoc_req_len);
}
-/* when wait_ack is ture, this function shoule be called at process context */
+/* when wait_ack is true, this function should be called at process context */
static int _issue_nulldata(struct adapter *padapter, unsigned char *da,
unsigned int power_mode, bool wait_ack)
{
@@ -3481,7 +3482,7 @@ exit:
/*
* [IMPORTANT] Don't call this function in interrupt context
*
- * When wait_ms > 0, this function shoule be called at process context
+ * When wait_ms > 0, this function should be called at process context
* da == NULL for station mode
*/
int issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int power_mode, int try_cnt, int wait_ms)
@@ -3493,7 +3494,7 @@ int issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int pow
struct sta_info *psta;
- /* da == NULL, assum it's null data for sta to ap*/
+ /* da == NULL, assume it's null data for sta to ap*/
if (!da)
da = get_my_bssid(&(pmlmeinfo->network));
@@ -3558,14 +3559,14 @@ s32 issue_nulldata_in_interrupt(struct adapter *padapter, u8 *da)
pmlmeext = &padapter->mlmeextpriv;
pmlmeinfo = &pmlmeext->mlmext_info;
- /* da == NULL, assum it's null data for sta to ap*/
+ /* da == NULL, assume it's null data for sta to ap*/
if (!da)
da = get_my_bssid(&(pmlmeinfo->network));
return _issue_nulldata(padapter, da, 0, false);
}
-/* when wait_ack is ture, this function shoule be called at process context */
+/* when wait_ack is true, this function should be called at process context */
static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da,
u16 tid, bool wait_ack)
{
@@ -3644,7 +3645,7 @@ exit:
return ret;
}
-/* when wait_ms >0 , this function shoule be called at process context */
+/* when wait_ms >0 , this function should be called at process context */
/* da == NULL for station mode */
int issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int try_cnt, int wait_ms)
{
@@ -3653,7 +3654,7 @@ int issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- /* da == NULL, assum it's null data for sta to ap*/
+ /* da == NULL, assume it's null data for sta to ap*/
if (!da)
da = get_my_bssid(&(pmlmeinfo->network));
@@ -4264,7 +4265,7 @@ unsigned int send_beacon(struct adapter *padapter)
/****************************************************************************
-Following are some utitity fuctions for WiFi MLME
+Following are some utility functions for WiFi MLME
*****************************************************************************/
@@ -4568,7 +4569,7 @@ u8 collect_bss_info(struct adapter *padapter, union recv_frame *precv_frame, str
}
#endif
- /* mark bss info receving from nearby channel as SignalQuality 101 */
+ /* mark bss info receiving from nearby channel as SignalQuality 101 */
if (bssid->Configuration.DSConfig != rtw_get_oper_ch(padapter))
bssid->PhyInfo.SignalQuality = 101;
@@ -4589,7 +4590,7 @@ void start_create_ibss(struct adapter *padapter)
/* update wireless mode */
update_wireless_mode(padapter);
- /* udpate capability */
+ /* update capability */
caps = rtw_get_capability((struct wlan_bssid_ex *)pnetwork);
update_capinfo(padapter, caps);
if (caps&cap_IBSS) {/* adhoc master */
@@ -4644,7 +4645,7 @@ void start_clnt_join(struct adapter *padapter)
/* update wireless mode */
update_wireless_mode(padapter);
- /* udpate capability */
+ /* update capability */
caps = rtw_get_capability((struct wlan_bssid_ex *)pnetwork);
update_capinfo(padapter, caps);
if (caps&cap_ESS) {
@@ -5379,8 +5380,7 @@ static void rtw_mlmeext_disconnect(struct adapter *padapter)
/* set_opmode_cmd(padapter, infra_client_with_mlme); */
- /*
- * For safety, prevent from keeping macid sleep.
+ /* For safety, prevent from keeping macid sleep.
* If we can sure all power mode enter/leave are paired,
* this check can be removed.
* Lucas@20131113
@@ -5441,7 +5441,7 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
/* turn on dynamic functions */
Switch_DM_Func(padapter, DYNAMIC_ALL_FUNC_ENABLE, true);
- /* update IOT-releated issue */
+ /* update IOT-related issue */
update_IOT_info(padapter);
rtw_hal_set_hwreg(padapter, HW_VAR_BASIC_RATE, cur_network->SupportedRates);
@@ -5449,7 +5449,7 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
/* BCN interval */
rtw_hal_set_hwreg(padapter, HW_VAR_BEACON_INTERVAL, (u8 *)(&pmlmeinfo->bcn_interval));
- /* udpate capability */
+ /* update capability */
update_capinfo(padapter, pmlmeinfo->capability);
/* WMM, Update EDCA param */
@@ -6385,7 +6385,9 @@ u8 sitesurvey_cmd_hdl(struct adapter *padapter, u8 *pbuf)
Save_DM_Func_Flag(padapter);
Switch_DM_Func(padapter, DYNAMIC_FUNC_DISABLE, false);
- /* config the initial gain under scaning, need to write the BB registers */
+ /* config the initial gain under scanning, need to write the BB
+ * registers
+ */
initialgain = 0x1e;
rtw_hal_set_hwreg(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain));
diff --git a/drivers/staging/rtl8723bs/hal/HalPhyRf.c b/drivers/staging/rtl8723bs/hal/HalPhyRf.c
index beb4002a40e1..357802db9aed 100644
--- a/drivers/staging/rtl8723bs/hal/HalPhyRf.c
+++ b/drivers/staging/rtl8723bs/hal/HalPhyRf.c
@@ -622,33 +622,3 @@ void ODM_TXPowerTrackingCallback_ThermalMeter(struct adapter *Adapter)
pDM_Odm->RFCalibrateInfo.TXPowercount = 0;
}
-
-
-
-
-/* 3 ============================================================ */
-/* 3 IQ Calibration */
-/* 3 ============================================================ */
-
-u8 ODM_GetRightChnlPlaceforIQK(u8 chnl)
-{
- u8 channel_all[ODM_TARGET_CHNL_NUM_2G_5G] = {
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
- 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58,
- 60, 62, 64, 100, 102, 104, 106, 108, 110, 112,
- 114, 116, 118, 120, 122, 124, 126, 128, 130, 132,
- 134, 136, 138, 140, 149, 151, 153, 155, 157, 159,
- 161, 163, 165
- };
- u8 place = chnl;
-
-
- if (chnl > 14) {
- for (place = 14; place < sizeof(channel_all); place++) {
- if (channel_all[place] == chnl)
- return place-13;
- }
- }
- return 0;
-
-}
diff --git a/drivers/staging/rtl8723bs/hal/HalPhyRf.h b/drivers/staging/rtl8723bs/hal/HalPhyRf.h
index 3d6f68bc61d7..643fcf37c9ad 100644
--- a/drivers/staging/rtl8723bs/hal/HalPhyRf.h
+++ b/drivers/staging/rtl8723bs/hal/HalPhyRf.h
@@ -44,12 +44,4 @@ void ODM_ClearTxPowerTrackingState(PDM_ODM_T pDM_Odm);
void ODM_TXPowerTrackingCallback_ThermalMeter(struct adapter *Adapter);
-
-
-#define ODM_TARGET_CHNL_NUM_2G_5G 59
-
-
-u8 ODM_GetRightChnlPlaceforIQK(u8 chnl);
-
-
#endif /* #ifndef __HAL_PHY_RF_H__ */
diff --git a/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c b/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
index 1ca9063a269f..85ea535dd6e9 100644
--- a/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
+++ b/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
+/*****************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
@@ -82,7 +82,9 @@ static void setIqkMatrix_8723B(
/* if (RFPath == ODM_RF_PATH_A) */
switch (RFPath) {
case ODM_RF_PATH_A:
- /* wirte new elements A, C, D to regC80 and regC94, element B is always 0 */
+ /* write new elements A, C, D to regC80 and regC94,
+ * element B is always 0
+ */
value32 = (ele_D<<22)|((ele_C&0x3F)<<16)|ele_A;
PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_XATxIQImbalance, bMaskDWord, value32);
@@ -93,7 +95,9 @@ static void setIqkMatrix_8723B(
PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_ECCAThreshold, BIT24, value32);
break;
case ODM_RF_PATH_B:
- /* wirte new elements A, C, D to regC88 and regC9C, element B is always 0 */
+ /* write new elements A, C, D to regC88 and regC9C,
+ * element B is always 0
+ */
value32 = (ele_D<<22)|((ele_C&0x3F)<<16)|ele_A;
PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_XBTxIQImbalance, bMaskDWord, value32);
@@ -166,7 +170,7 @@ void DoIQK_8723B(
/*-----------------------------------------------------------------------------
* Function: odm_TxPwrTrackSetPwr88E()
*
- * Overview: 88E change all channel tx power accordign to flag.
+ * Overview: 88E change all channel tx power according to flag.
* OFDM & CCK are all different.
*
* Input: NONE
@@ -1788,7 +1792,7 @@ void PHY_IQCalibrate_8723B(
PDM_ODM_T pDM_Odm = &pHalData->odmpriv;
s32 result[4][8]; /* last is final result */
- u8 i, final_candidate, Indexforchannel;
+ u8 i, final_candidate;
bool bPathAOK, bPathBOK;
s32 RegE94, RegE9C, RegEA4, RegEAC, RegEB4, RegEBC, RegEC4, RegECC, RegTmp = 0;
bool is12simular, is13simular, is23simular;
@@ -1993,17 +1997,14 @@ void PHY_IQCalibrate_8723B(
_PHY_PathBFillIQKMatrix8723B(padapter, bPathBOK, result, final_candidate, (RegEC4 == 0));
}
- Indexforchannel = ODM_GetRightChnlPlaceforIQK(pHalData->CurrentChannel);
-
/* To Fix BSOD when final_candidate is 0xff */
/* by sherry 20120321 */
if (final_candidate < 4) {
for (i = 0; i < IQK_Matrix_REG_NUM; i++)
- pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[Indexforchannel].Value[0][i] = result[final_candidate][i];
- pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[Indexforchannel].bIQKDone = true;
+ pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[0].Value[0][i] = result[final_candidate][i];
+ pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[0].bIQKDone = true;
}
- /* RT_DISP(FINIT, INIT_IQK, ("\nIQK OK Indexforchannel %d.\n", Indexforchannel)); */
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("\nIQK OK Indexforchannel %d.\n", Indexforchannel));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("\nIQK OK Indexforchannel %d.\n", 0));
_PHY_SaveADDARegisters8723B(padapter, IQK_BB_REG_92C, pDM_Odm->RFCalibrateInfo.IQK_BB_backup_recover, 9);
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
index 66127f6c8e4d..de8caa6cd418 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
@@ -750,7 +750,7 @@ static void Hal_BT_EfusePowerSwitch(
rtw_write8(padapter, 0x6B, tempval);
/* Attention!! Between 0x6A[14] and 0x6A[15] setting need 100us delay */
- /* So don't wirte 0x6A[14]= 1 and 0x6A[15]= 0 together! */
+ /* So don't write 0x6A[14]= 1 and 0x6A[15]= 0 together! */
msleep(1);
/* disable BT output isolation */
/* 0x6A[15] = 0 */
@@ -765,7 +765,7 @@ static void Hal_BT_EfusePowerSwitch(
rtw_write8(padapter, 0x6B, tempval);
/* Attention!! Between 0x6A[14] and 0x6A[15] setting need 100us delay */
- /* So don't wirte 0x6A[14]= 1 and 0x6A[15]= 0 together! */
+ /* So don't write 0x6A[14]= 1 and 0x6A[15]= 0 together! */
/* disable BT power cut */
/* 0x6A[14] = 1 */
@@ -1231,7 +1231,7 @@ static u16 hal_EfuseGetCurrentSize_WiFi(
goto exit;
error:
- /* report max size to prevent wirte efuse */
+ /* report max size to prevent write efuse */
EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, &efuse_addr, bPseudoTest);
exit:
@@ -2237,7 +2237,7 @@ void rtl8723b_InitAntenna_Selection(struct adapter *padapter)
u8 val;
val = rtw_read8(padapter, REG_LEDCFG2);
- /* Let 8051 take control antenna settting */
+ /* Let 8051 take control antenna setting */
val |= BIT(7); /* DPDT_SEL_EN, 0x4C[23] */
rtw_write8(padapter, REG_LEDCFG2, val);
}
@@ -3191,22 +3191,26 @@ static void rtl8723b_fill_default_txdesc(
if (bmcst)
ptxdesc->bmc = 1;
- /* 2009.11.05. tynli_test. Suggested by SD4 Filen for FW LPS. */
- /* (1) The sequence number of each non-Qos frame / broadcast / multicast / */
- /* mgnt frame should be controled by Hw because Fw will also send null data */
- /* which we cannot control when Fw LPS enable. */
- /* --> default enable non-Qos data sequense number. 2010.06.23. by tynli. */
- /* (2) Enable HW SEQ control for beacon packet, because we use Hw beacon. */
- /* (3) Use HW Qos SEQ to control the seq num of Ext port non-Qos packets. */
- /* 2010.06.23. Added by tynli. */
+ /* 2009.11.05. tynli_test. Suggested by SD4 Filen for FW LPS.
+ * (1) The sequence number of each non-Qos frame / broadcast /
+ * multicast / mgnt frame should be controlled by Hw because Fw
+ * will also send null data which we cannot control when Fw LPS
+ * enable.
+ * --> default enable non-Qos data sequense number. 2010.06.23.
+ * by tynli.
+ * (2) Enable HW SEQ control for beacon packet, because we use
+ * Hw beacon.
+ * (3) Use HW Qos SEQ to control the seq num of Ext port non-Qos
+ * packets.
+ * 2010.06.23. Added by tynli.
+ */
if (!pattrib->qos_en) /* Hw set sequence number */
ptxdesc->en_hwseq = 1; /* HWSEQ_EN */
}
-/*
- *Description:
+/* Description:
*
- *Parameters:
+ * Parameters:
* pxmitframe xmitframe
* pbuf where to fill tx desc
*/
@@ -3543,7 +3547,7 @@ static void hw_var_set_mlme_sitesurvey(struct adapter *padapter, u8 variable, u8
rtw_write8(padapter, reg_bcn_ctl, val8);
}
- /* Save orignal RRSR setting. */
+ /* Save original RRSR setting. */
pHalData->RegRRSR = rtw_read16(padapter, REG_RRSR);
} else {
/* sitesurvey done */
@@ -3561,7 +3565,7 @@ static void hw_var_set_mlme_sitesurvey(struct adapter *padapter, u8 variable, u8
value_rcr |= rcr_clear_bit;
rtw_write32(padapter, REG_RCR, value_rcr);
- /* Restore orignal RRSR setting. */
+ /* Restore original RRSR setting. */
rtw_write16(padapter, REG_RRSR, pHalData->RegRRSR);
}
}
@@ -4329,8 +4333,7 @@ void GetHwReg8723B(struct adapter *padapter, u8 variable, u8 *val)
}
}
-/*
- *Description:
+/* Description:
* Change default setting of specified variable.
*/
u8 SetHalDefVar8723B(struct adapter *padapter, enum HAL_DEF_VARIABLE variable, void *pval)
@@ -4348,8 +4351,7 @@ u8 SetHalDefVar8723B(struct adapter *padapter, enum HAL_DEF_VARIABLE variable, v
return bResult;
}
-/*
- *Description:
+/* Description:
* Query setting of specified variable.
*/
u8 GetHalDefVar8723B(struct adapter *padapter, enum HAL_DEF_VARIABLE variable, void *pval)
diff --git a/drivers/staging/rts5208/Makefile b/drivers/staging/rts5208/Makefile
index 6a934c41c738..3c9e9797d3d9 100644
--- a/drivers/staging/rts5208/Makefile
+++ b/drivers/staging/rts5208/Makefile
@@ -1,7 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_RTS5208) := rts5208.o
-ccflags-y := -Idrivers/scsi
-
rts5208-y := rtsx.o rtsx_chip.o rtsx_transport.o rtsx_scsi.o \
rtsx_card.o general.o sd.o xd.o ms.o spi.o
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
index cb95ad6fa4f9..be0053c795b7 100644
--- a/drivers/staging/rts5208/rtsx.c
+++ b/drivers/staging/rts5208/rtsx.c
@@ -831,7 +831,8 @@ static int rtsx_probe(struct pci_dev *pci,
host = scsi_host_alloc(&rtsx_host_template, sizeof(*dev));
if (!host) {
dev_err(&pci->dev, "Unable to allocate the scsi host\n");
- return -ENOMEM;
+ err = -ENOMEM;
+ goto scsi_host_alloc_fail;
}
dev = host_to_rtsx(host);
@@ -858,7 +859,7 @@ static int rtsx_probe(struct pci_dev *pci,
dev_info(&pci->dev, "Resource length: 0x%x\n",
(unsigned int)pci_resource_len(pci, 0));
dev->addr = pci_resource_start(pci, 0);
- dev->remap_addr = ioremap_nocache(dev->addr, pci_resource_len(pci, 0));
+ dev->remap_addr = ioremap(dev->addr, pci_resource_len(pci, 0));
if (!dev->remap_addr) {
dev_err(&pci->dev, "ioremap error\n");
err = -ENXIO;
@@ -971,7 +972,8 @@ ioremap_fail:
kfree(dev->chip);
chip_alloc_fail:
dev_err(&pci->dev, "%s failed\n", __func__);
-
+scsi_host_alloc_fail:
+ pci_release_regions(pci);
return err;
}
@@ -983,6 +985,7 @@ static void rtsx_remove(struct pci_dev *pci)
quiesce_and_remove_host(dev);
release_everything(dev);
+ pci_release_regions(pci);
}
/* PCI IDs */
diff --git a/drivers/staging/sm750fb/sm750_hw.c b/drivers/staging/sm750fb/sm750_hw.c
index ea1d3d4efbc2..b8d60701f898 100644
--- a/drivers/staging/sm750fb/sm750_hw.c
+++ b/drivers/staging/sm750fb/sm750_hw.c
@@ -50,7 +50,7 @@ int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
}
/* now map mmio and vidmem */
- sm750_dev->pvReg = ioremap_nocache(sm750_dev->vidreg_start,
+ sm750_dev->pvReg = ioremap(sm750_dev->vidreg_start,
sm750_dev->vidreg_size);
if (!sm750_dev->pvReg) {
pr_err("mmio failed\n");
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index 1d1440d43002..0433536930a9 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -1078,7 +1078,7 @@ out_save_flags:
* Queue the work and return. Make sure we have not already been informed that
* the IO Partition is gone; if so, we will have already timed-out the xmits.
*/
-static void visornic_xmit_timeout(struct net_device *netdev)
+static void visornic_xmit_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct visornic_devdata *devdata = netdev_priv(netdev);
unsigned long flags;
diff --git a/drivers/staging/uwb/whc-rc.c b/drivers/staging/uwb/whc-rc.c
index 34020ed351ab..a5ab255d7d36 100644
--- a/drivers/staging/uwb/whc-rc.c
+++ b/drivers/staging/uwb/whc-rc.c
@@ -216,11 +216,11 @@ int whcrc_setup_rc_umc(struct whcrc *whcrc)
goto error_request_region;
}
- whcrc->rc_base = ioremap_nocache(whcrc->area, whcrc->rc_len);
+ whcrc->rc_base = ioremap(whcrc->area, whcrc->rc_len);
if (whcrc->rc_base == NULL) {
dev_err(dev, "can't ioremap registers (%zu bytes @ 0x%lx): %d\n",
whcrc->rc_len, whcrc->area, result);
- goto error_ioremap_nocache;
+ goto error_ioremap;
}
result = request_irq(umc_dev->irq, whcrc_irq_cb, IRQF_SHARED,
@@ -254,7 +254,7 @@ error_cmd_buffer:
free_irq(umc_dev->irq, whcrc);
error_request_irq:
iounmap(whcrc->rc_base);
-error_ioremap_nocache:
+error_ioremap:
release_mem_region(whcrc->area, whcrc->rc_len);
error_request_region:
return result;
diff --git a/drivers/staging/vc04_services/Makefile b/drivers/staging/vc04_services/Makefile
index afe43fa5a6d7..54d9e2f31916 100644
--- a/drivers/staging/vc04_services/Makefile
+++ b/drivers/staging/vc04_services/Makefile
@@ -13,5 +13,5 @@ vchiq-objs := \
obj-$(CONFIG_SND_BCM2835) += bcm2835-audio/
obj-$(CONFIG_VIDEO_BCM2835) += bcm2835-camera/
-ccflags-y += -Idrivers/staging/vc04_services -D__VCCOREVER__=0x04000000
+ccflags-y += -D__VCCOREVER__=0x04000000
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
index 826016c3431a..33485184a98a 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
@@ -193,17 +193,6 @@ static int snd_bcm2835_playback_close(struct snd_pcm_substream *substream)
return 0;
}
-static int snd_bcm2835_pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
-}
-
-static int snd_bcm2835_pcm_hw_free(struct snd_pcm_substream *substream)
-{
- return snd_pcm_lib_free_pages(substream);
-}
-
static int snd_bcm2835_pcm_prepare(struct snd_pcm_substream *substream)
{
struct bcm2835_chip *chip = snd_pcm_substream_chip(substream);
@@ -316,9 +305,6 @@ snd_bcm2835_pcm_pointer(struct snd_pcm_substream *substream)
static const struct snd_pcm_ops snd_bcm2835_playback_ops = {
.open = snd_bcm2835_playback_open,
.close = snd_bcm2835_playback_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_bcm2835_pcm_hw_params,
- .hw_free = snd_bcm2835_pcm_hw_free,
.prepare = snd_bcm2835_pcm_prepare,
.trigger = snd_bcm2835_pcm_trigger,
.pointer = snd_bcm2835_pcm_pointer,
@@ -328,9 +314,6 @@ static const struct snd_pcm_ops snd_bcm2835_playback_ops = {
static const struct snd_pcm_ops snd_bcm2835_playback_spdif_ops = {
.open = snd_bcm2835_playback_spdif_open,
.close = snd_bcm2835_playback_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_bcm2835_pcm_hw_params,
- .hw_free = snd_bcm2835_pcm_hw_free,
.prepare = snd_bcm2835_pcm_prepare,
.trigger = snd_bcm2835_pcm_trigger,
.pointer = snd_bcm2835_pcm_pointer,
@@ -362,7 +345,7 @@ int snd_bcm2835_new_pcm(struct bcm2835_chip *chip, const char *name,
spdif ? &snd_bcm2835_playback_spdif_ops :
&snd_bcm2835_playback_ops);
- snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+ snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV,
chip->card->dev, 128 * 1024, 128 * 1024);
if (spdif)
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
index beb6a0063bb8..1ef31a984741 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
@@ -60,6 +60,9 @@ MODULE_PARM_DESC(max_video_width, "Threshold for video mode");
module_param(max_video_height, int, 0644);
MODULE_PARM_DESC(max_video_height, "Threshold for video mode");
+/* camera instance counter */
+static atomic_t camera_instance = ATOMIC_INIT(0);
+
/* global device data array */
static struct bm2835_mmal_dev *gdev[MAX_BCM2835_CAMERAS];
@@ -1870,7 +1873,6 @@ static int bcm2835_mmal_probe(struct platform_device *pdev)
/* v4l2 core mutex used to protect all fops and v4l2 ioctls. */
mutex_init(&dev->mutex);
- dev->camera_num = camera;
dev->max_width = resolutions[camera][0];
dev->max_height = resolutions[camera][1];
@@ -1886,8 +1888,9 @@ static int bcm2835_mmal_probe(struct platform_device *pdev)
dev->capture.fmt = &formats[3]; /* JPEG */
/* v4l device registration */
- snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name),
- "%s", BM2835_MMAL_MODULE_NAME);
+ dev->camera_num = v4l2_device_set_name(&dev->v4l2_dev,
+ BM2835_MMAL_MODULE_NAME,
+ &camera_instance);
ret = v4l2_device_register(NULL, &dev->v4l2_dev);
if (ret) {
dev_err(&pdev->dev, "%s: could not register V4L2 device: %d\n",
diff --git a/drivers/staging/vc04_services/interface/vchi/vchi.h b/drivers/staging/vc04_services/interface/vchi/vchi.h
index 56b1037d8e25..ff2b960d8cac 100644
--- a/drivers/staging/vc04_services/interface/vchi/vchi.h
+++ b/drivers/staging/vc04_services/interface/vchi/vchi.h
@@ -4,8 +4,8 @@
#ifndef VCHI_H_
#define VCHI_H_
-#include "interface/vchi/vchi_cfg.h"
-#include "interface/vchi/vchi_common.h"
+#include "vchi_cfg.h"
+#include "vchi_common.h"
/******************************************************************************
* Global defs
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
index 0ce3b08b3441..efdd3b1c7d85 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
@@ -3,7 +3,7 @@
#include <linux/module.h>
#include <linux/types.h>
-#include "interface/vchi/vchi.h"
+#include "../vchi/vchi.h"
#include "vchiq.h"
#include "vchiq_core.h"
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index fb2855e686a7..d6ca6e5551a7 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -758,7 +758,7 @@ bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char byRFType,
*/
bool RFbSetPower(struct vnt_private *priv, unsigned int rate, u16 uCH)
{
- bool ret = true;
+ bool ret;
unsigned char byPwr = 0;
unsigned char byDec = 0;
diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c
index 8d19ae71e7cc..f18e059ce66b 100644
--- a/drivers/staging/vt6656/baseband.c
+++ b/drivers/staging/vt6656/baseband.c
@@ -381,8 +381,8 @@ int vnt_vt3184_init(struct vnt_private *priv)
dev_dbg(&priv->usb->dev, "RF Type %d\n", priv->rf_type);
- if ((priv->rf_type == RF_AL2230) ||
- (priv->rf_type == RF_AL2230S)) {
+ if (priv->rf_type == RF_AL2230 ||
+ priv->rf_type == RF_AL2230S) {
priv->bb_rx_conf = vnt_vt3184_al2230[10];
length = sizeof(vnt_vt3184_al2230);
addr = vnt_vt3184_al2230;
@@ -449,8 +449,8 @@ int vnt_vt3184_init(struct vnt_private *priv)
memcpy(array, addr, length);
- ret = vnt_control_out(priv, MESSAGE_TYPE_WRITE, 0,
- MESSAGE_REQUEST_BBREG, length, array);
+ ret = vnt_control_out_blocks(priv, VNT_REG_BLOCK_SIZE,
+ MESSAGE_REQUEST_BBREG, length, array);
if (ret)
goto end;
@@ -461,8 +461,8 @@ int vnt_vt3184_init(struct vnt_private *priv)
if (ret)
goto end;
- if ((priv->rf_type == RF_VT3226) ||
- (priv->rf_type == RF_VT3342A0)) {
+ if (priv->rf_type == RF_VT3226 ||
+ priv->rf_type == RF_VT3342A0) {
ret = vnt_control_out_u8(priv, MESSAGE_REQUEST_MACREG,
MAC_REG_ITRTMSET, 0x23);
if (ret)
diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c
index 56cd77fd9ea0..7958fc165462 100644
--- a/drivers/staging/vt6656/card.c
+++ b/drivers/staging/vt6656/card.c
@@ -719,7 +719,7 @@ end:
*/
int vnt_radio_power_on(struct vnt_private *priv)
{
- int ret = true;
+ int ret = 0;
vnt_exit_deep_sleep(priv);
diff --git a/drivers/staging/vt6656/device.h b/drivers/staging/vt6656/device.h
index 6074ceda78bf..fe6c11266123 100644
--- a/drivers/staging/vt6656/device.h
+++ b/drivers/staging/vt6656/device.h
@@ -52,6 +52,8 @@
#define RATE_AUTO 12
#define MAX_RATE 12
+#define VNT_B_RATES (BIT(RATE_1M) | BIT(RATE_2M) |\
+ BIT(RATE_5M) | BIT(RATE_11M))
/*
* device specific
@@ -204,6 +206,22 @@ enum {
CONTEXT_BEACON_PACKET
};
+struct vnt_rx_header {
+ u32 wbk_status;
+ u8 rx_sts;
+ u8 rx_rate;
+ u16 pay_load_len;
+} __packed;
+
+struct vnt_rx_tail {
+ __le64 tsf_time;
+ u8 sq;
+ u8 new_rsr;
+ u8 rssi;
+ u8 rsr;
+ u8 sq_3;
+} __packed;
+
/* RCB (Receive Control Block) */
struct vnt_rcb {
void *priv;
@@ -259,9 +277,9 @@ struct vnt_private {
u8 mac_hw;
/* netdev */
struct usb_device *usb;
+ struct usb_interface *intf;
u64 tsf_time;
- u8 rx_rate;
u32 rx_buf_sz;
int mc_list_count;
diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
index 3b94e80f1d5e..821aae8ca402 100644
--- a/drivers/staging/vt6656/dpc.c
+++ b/drivers/staging/vt6656/dpc.c
@@ -29,27 +29,21 @@ int vnt_rx_data(struct vnt_private *priv, struct vnt_rcb *ptr_rcb,
struct ieee80211_hw *hw = priv->hw;
struct ieee80211_supported_band *sband;
struct sk_buff *skb;
- struct ieee80211_rx_status rx_status = { 0 };
- struct ieee80211_hdr *hdr;
- __le16 fc;
- u8 *rsr, *new_rsr, *rssi;
- __le64 *tsf_time;
+ struct ieee80211_rx_status *rx_status;
+ struct vnt_rx_header *head;
+ struct vnt_rx_tail *tail;
u32 frame_size;
- int ii, r;
- u8 *rx_rate, *sq, *sq_3;
- u32 wbk_status;
- u8 *skb_data;
- u16 *pay_load_len;
- u16 pay_load_with_padding;
+ int ii;
+ u16 rx_bitrate, pay_load_with_padding;
u8 rate_idx = 0;
- u8 rate[MAX_RATE] = {2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108};
long rx_dbm;
skb = ptr_rcb->skb;
+ rx_status = IEEE80211_SKB_RXCB(skb);
/* [31:16]RcvByteCount ( not include 4-byte Status ) */
- wbk_status = *((u32 *)(skb->data));
- frame_size = wbk_status >> 16;
+ head = (struct vnt_rx_header *)skb->data;
+ frame_size = head->wbk_status >> 16;
frame_size += 4;
if (bytes_received != frame_size) {
@@ -63,106 +57,66 @@ int vnt_rx_data(struct vnt_private *priv, struct vnt_rcb *ptr_rcb,
return false;
}
- skb_data = (u8 *)skb->data;
-
- rx_rate = skb_data + 5;
-
/* real Frame Size = USBframe_size -4WbkStatus - 4RxStatus */
/* -8TSF - 4RSR - 4SQ3 - ?Padding */
/* if SQ3 the range is 24~27, if no SQ3 the range is 20~23 */
- pay_load_len = (u16 *)(skb_data + 6);
-
/*Fix hardware bug => PLCP_Length error */
- if (((bytes_received - (*pay_load_len)) > 27) ||
- ((bytes_received - (*pay_load_len)) < 24) ||
- (bytes_received < (*pay_load_len))) {
+ if (((bytes_received - head->pay_load_len) > 27) ||
+ ((bytes_received - head->pay_load_len) < 24) ||
+ (bytes_received < head->pay_load_len)) {
dev_dbg(&priv->usb->dev, "Wrong PLCP Length %x\n",
- *pay_load_len);
+ head->pay_load_len);
return false;
}
sband = hw->wiphy->bands[hw->conf.chandef.chan->band];
-
- for (r = RATE_1M; r < MAX_RATE; r++) {
- if (*rx_rate == rate[r])
- break;
- }
-
- priv->rx_rate = r;
+ rx_bitrate = head->rx_rate * 5; /* rx_rate * 5 */
for (ii = 0; ii < sband->n_bitrates; ii++) {
- if (sband->bitrates[ii].hw_value == r) {
+ if (sband->bitrates[ii].bitrate == rx_bitrate) {
rate_idx = ii;
break;
}
}
if (ii == sband->n_bitrates) {
- dev_dbg(&priv->usb->dev, "Wrong RxRate %x\n", *rx_rate);
+ dev_dbg(&priv->usb->dev, "Wrong Rx Bit Rate %d\n", rx_bitrate);
return false;
}
- pay_load_with_padding = ((*pay_load_len / 4) +
- ((*pay_load_len % 4) ? 1 : 0)) * 4;
-
- tsf_time = (__le64 *)(skb_data + 8 + pay_load_with_padding);
-
- priv->tsf_time = le64_to_cpu(*tsf_time);
+ pay_load_with_padding = ((head->pay_load_len / 4) +
+ ((head->pay_load_len % 4) ? 1 : 0)) * 4;
- if (priv->bb_type == BB_TYPE_11G) {
- sq_3 = skb_data + 8 + pay_load_with_padding + 12;
- sq = sq_3;
- } else {
- sq = skb_data + 8 + pay_load_with_padding + 8;
- sq_3 = sq;
- }
-
- new_rsr = skb_data + 8 + pay_load_with_padding + 9;
- rssi = skb_data + 8 + pay_load_with_padding + 10;
+ tail = (struct vnt_rx_tail *)(skb->data +
+ sizeof(*head) + pay_load_with_padding);
+ priv->tsf_time = le64_to_cpu(tail->tsf_time);
- rsr = skb_data + 8 + pay_load_with_padding + 11;
- if (*rsr & (RSR_IVLDTYP | RSR_IVLDLEN))
+ if (tail->rsr & (RSR_IVLDTYP | RSR_IVLDLEN))
return false;
- frame_size = *pay_load_len;
-
- vnt_rf_rssi_to_dbm(priv, *rssi, &rx_dbm);
+ vnt_rf_rssi_to_dbm(priv, tail->rssi, &rx_dbm);
priv->bb_pre_ed_rssi = (u8)rx_dbm + 1;
priv->current_rssi = priv->bb_pre_ed_rssi;
- skb_pull(skb, 8);
- skb_trim(skb, frame_size);
-
- rx_status.mactime = priv->tsf_time;
- rx_status.band = hw->conf.chandef.chan->band;
- rx_status.signal = rx_dbm;
- rx_status.flag = 0;
- rx_status.freq = hw->conf.chandef.chan->center_freq;
+ skb_pull(skb, sizeof(*head));
+ skb_trim(skb, head->pay_load_len);
- if (!(*rsr & RSR_CRCOK))
- rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
+ rx_status->mactime = priv->tsf_time;
+ rx_status->band = hw->conf.chandef.chan->band;
+ rx_status->signal = rx_dbm;
+ rx_status->flag = 0;
+ rx_status->freq = hw->conf.chandef.chan->center_freq;
- hdr = (struct ieee80211_hdr *)(skb->data);
- fc = hdr->frame_control;
+ if (!(tail->rsr & RSR_CRCOK))
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
- rx_status.rate_idx = rate_idx;
-
- if (ieee80211_has_protected(fc)) {
- if (priv->local_id > REV_ID_VT3253_A1) {
- rx_status.flag |= RX_FLAG_DECRYPTED;
-
- /* Drop packet */
- if (!(*new_rsr & NEWRSR_DECRYPTOK)) {
- dev_kfree_skb(skb);
- return true;
- }
- }
- }
+ rx_status->rate_idx = rate_idx;
- memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
+ if (tail->new_rsr & NEWRSR_DECRYPTOK)
+ rx_status->flag |= RX_FLAG_DECRYPTED;
ieee80211_rx_irqsafe(priv->hw, skb);
diff --git a/drivers/staging/vt6656/firmware.c b/drivers/staging/vt6656/firmware.c
index 60a00af250bf..70358d427211 100644
--- a/drivers/staging/vt6656/firmware.c
+++ b/drivers/staging/vt6656/firmware.c
@@ -30,7 +30,6 @@ int vnt_download_firmware(struct vnt_private *priv)
{
struct device *dev = &priv->usb->dev;
const struct firmware *fw;
- void *buffer = NULL;
u16 length;
int ii;
int ret = 0;
@@ -44,26 +43,17 @@ int vnt_download_firmware(struct vnt_private *priv)
goto end;
}
- buffer = kmalloc(FIRMWARE_CHUNK_SIZE, GFP_KERNEL);
- if (!buffer) {
- ret = -ENOMEM;
- goto free_fw;
- }
-
for (ii = 0; ii < fw->size; ii += FIRMWARE_CHUNK_SIZE) {
length = min_t(int, fw->size - ii, FIRMWARE_CHUNK_SIZE);
- memcpy(buffer, fw->data + ii, length);
ret = vnt_control_out(priv, 0, 0x1200 + ii, 0x0000, length,
- buffer);
+ fw->data + ii);
if (ret)
- goto free_buffer;
+ goto free_fw;
dev_dbg(dev, "Download firmware...%d %zu\n", ii, fw->size);
}
-free_buffer:
- kfree(buffer);
free_fw:
release_firmware(fw);
end:
diff --git a/drivers/staging/vt6656/int.c b/drivers/staging/vt6656/int.c
index f40947955675..af215860be4c 100644
--- a/drivers/staging/vt6656/int.c
+++ b/drivers/staging/vt6656/int.c
@@ -99,9 +99,11 @@ static int vnt_int_report_rate(struct vnt_private *priv, u8 pkt_no, u8 tsr)
info->status.rates[0].count = tx_retry;
- if (!(tsr & (TSR_TMO | TSR_RETRYTMO))) {
+ if (!(tsr & TSR_TMO)) {
info->status.rates[0].idx = idx;
- info->flags |= IEEE80211_TX_STAT_ACK;
+
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ info->flags |= IEEE80211_TX_STAT_ACK;
}
ieee80211_tx_status_irqsafe(priv->hw, context->skb);
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 4ac85ecb0921..5e48b3ddb94c 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -949,7 +949,7 @@ static const struct ieee80211_ops vnt_mac_ops = {
int vnt_init(struct vnt_private *priv)
{
- if (!(vnt_init_registers(priv)))
+ if (vnt_init_registers(priv))
return -EAGAIN;
SET_IEEE80211_PERM_ADDR(priv->hw, priv->permanent_net_addr);
@@ -992,6 +992,7 @@ vt6656_probe(struct usb_interface *intf, const struct usb_device_id *id)
priv = hw->priv;
priv->hw = hw;
priv->usb = udev;
+ priv->intf = intf;
vnt_set_options(priv);
@@ -1014,6 +1015,7 @@ vt6656_probe(struct usb_interface *intf, const struct usb_device_id *id)
ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS);
ieee80211_hw_set(priv->hw, REPORTS_TX_ACK_STATUS);
ieee80211_hw_set(priv->hw, SUPPORTS_PS);
+ ieee80211_hw_set(priv->hw, PS_NULLFUNC_STACK);
priv->hw->max_signal = 100;
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index f9020a4f7bbf..29caba728906 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -278,11 +278,9 @@ static u16 vnt_rxtx_datahead_g(struct vnt_usb_send_context *tx_context,
PK_TYPE_11B, &buf->b);
/* Get Duration and TimeStamp */
- if (ieee80211_is_pspoll(hdr->frame_control)) {
- __le16 dur = cpu_to_le16(priv->current_aid | BIT(14) | BIT(15));
-
- buf->duration_a = dur;
- buf->duration_b = dur;
+ if (ieee80211_is_nullfunc(hdr->frame_control)) {
+ buf->duration_a = hdr->duration_id;
+ buf->duration_b = hdr->duration_id;
} else {
buf->duration_a = vnt_get_duration_le(priv,
tx_context->pkt_type, need_ack);
@@ -371,10 +369,8 @@ static u16 vnt_rxtx_datahead_ab(struct vnt_usb_send_context *tx_context,
tx_context->pkt_type, &buf->ab);
/* Get Duration and TimeStampOff */
- if (ieee80211_is_pspoll(hdr->frame_control)) {
- __le16 dur = cpu_to_le16(priv->current_aid | BIT(14) | BIT(15));
-
- buf->duration = dur;
+ if (ieee80211_is_nullfunc(hdr->frame_control)) {
+ buf->duration = hdr->duration_id;
} else {
buf->duration = vnt_get_duration_le(priv, tx_context->pkt_type,
need_ack);
@@ -815,10 +811,14 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
if (info->band == NL80211_BAND_5GHZ) {
pkt_type = PK_TYPE_11A;
} else {
- if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
- pkt_type = PK_TYPE_11GB;
- else
- pkt_type = PK_TYPE_11GA;
+ if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+ if (priv->basic_rates & VNT_B_RATES)
+ pkt_type = PK_TYPE_11GB;
+ else
+ pkt_type = PK_TYPE_11GA;
+ } else {
+ pkt_type = PK_TYPE_11A;
+ }
}
} else {
pkt_type = PK_TYPE_11B;
diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c
index d3304df6bd53..7bfccc48a366 100644
--- a/drivers/staging/vt6656/usbpipe.c
+++ b/drivers/staging/vt6656/usbpipe.c
@@ -34,7 +34,7 @@
#define USB_CTL_WAIT 500 /* ms */
int vnt_control_out(struct vnt_private *priv, u8 request, u16 value,
- u16 index, u16 length, u8 *buffer)
+ u16 index, u16 length, const u8 *buffer)
{
int ret = 0;
u8 *usb_buffer;
@@ -59,7 +59,9 @@ int vnt_control_out(struct vnt_private *priv, u8 request, u16 value,
kfree(usb_buffer);
- if (ret >= 0 && ret < (int)length)
+ if (ret == (int)length)
+ ret = 0;
+ else
ret = -EIO;
end_unlock:
@@ -74,6 +76,23 @@ int vnt_control_out_u8(struct vnt_private *priv, u8 reg, u8 reg_off, u8 data)
reg_off, reg, sizeof(u8), &data);
}
+int vnt_control_out_blocks(struct vnt_private *priv,
+ u16 block, u8 reg, u16 length, u8 *data)
+{
+ int ret = 0, i;
+
+ for (i = 0; i < length; i += block) {
+ u16 len = min_t(int, length - i, block);
+
+ ret = vnt_control_out(priv, MESSAGE_TYPE_WRITE,
+ i, reg, len, data + i);
+ if (ret)
+ goto end;
+ }
+end:
+ return ret;
+}
+
int vnt_control_in(struct vnt_private *priv, u8 request, u16 value,
u16 index, u16 length, u8 *buffer)
{
@@ -103,7 +122,9 @@ int vnt_control_in(struct vnt_private *priv, u8 request, u16 value,
kfree(usb_buffer);
- if (ret >= 0 && ret < (int)length)
+ if (ret == (int)length)
+ ret = 0;
+ else
ret = -EIO;
end_unlock:
diff --git a/drivers/staging/vt6656/usbpipe.h b/drivers/staging/vt6656/usbpipe.h
index 95147ec7b96a..4e3341bc3221 100644
--- a/drivers/staging/vt6656/usbpipe.h
+++ b/drivers/staging/vt6656/usbpipe.h
@@ -18,14 +18,19 @@
#include "device.h"
+#define VNT_REG_BLOCK_SIZE 64
+
int vnt_control_out(struct vnt_private *priv, u8 request, u16 value,
- u16 index, u16 length, u8 *buffer);
+ u16 index, u16 length, const u8 *buffer);
int vnt_control_in(struct vnt_private *priv, u8 request, u16 value,
u16 index, u16 length, u8 *buffer);
int vnt_control_out_u8(struct vnt_private *priv, u8 reg, u8 ref_off, u8 data);
int vnt_control_in_u8(struct vnt_private *priv, u8 reg, u8 reg_off, u8 *data);
+int vnt_control_out_blocks(struct vnt_private *priv,
+ u16 block, u8 reg, u16 len, u8 *data);
+
int vnt_start_interrupt_urb(struct vnt_private *priv);
int vnt_submit_rx_urb(struct vnt_private *priv, struct vnt_rcb *rcb);
int vnt_tx_context(struct vnt_private *priv,
diff --git a/drivers/staging/vt6656/wcmd.c b/drivers/staging/vt6656/wcmd.c
index 3eb2f11a5de1..2c5250ca2801 100644
--- a/drivers/staging/vt6656/wcmd.c
+++ b/drivers/staging/vt6656/wcmd.c
@@ -99,6 +99,7 @@ void vnt_run_command(struct work_struct *work)
if (vnt_init(priv)) {
/* If fail all ends TODO retry */
dev_err(&priv->usb->dev, "failed to start\n");
+ usb_set_intfdata(priv->intf, NULL);
ieee80211_free_hw(priv->hw);
return;
}
diff --git a/drivers/staging/wfx/TODO b/drivers/staging/wfx/TODO
index e44772289af8..efcb7c6a5aa7 100644
--- a/drivers/staging/wfx/TODO
+++ b/drivers/staging/wfx/TODO
@@ -1,17 +1,60 @@
This is a list of things that need to be done to get this driver out of the
staging directory.
- - I have to take a decision about secure link support. I can:
- - drop completely
- - keep it in an external patch (my preferred option)
- - replace call to mbedtls with kernel crypto API (necessitate a
- bunch of work)
- - pull mbedtls in kernel (non-realistic)
-
- - mac80211 interface does not (yet) have expected quality to be placed
- outside of staging:
- - Some processings are redundant with mac80211 ones
- - Many members from wfx_dev/wfx_vif can be retrieved from mac80211
- structures
- - Some functions are too complex
- - ...
+ - All structures defined in hif_api_*.h are intended to sent/received to/from
+ hardware. All their members whould be declared __le32 or __le16.
+ See:
+ https://lore.kernel.org/lkml/20191111202852.GX26530@ZenIV.linux.org.uk
+
+ - Once previous item done, it will be possible to audit the driver with
+ `sparse'. It will probably find tons of problems with big endian
+ architectures.
+
+ - hif_api_*.h whave been imported from firmware code. Some of the structures
+ are never used in driver.
+
+ - Driver try to maintains power save status of the stations. However, this
+ work is already done by mac80211. sta_asleep_mask and pspoll_mask should be
+ dropped.
+
+ - wfx_tx_queues_get() should be reworked. It currently try compute itself the
+ QoS policy. However, firmware already do the job. Firmware would prefer to
+ have a few packets in each queue and be able to choose itself which queue to
+ use.
+
+ - As suggested by Felix, rate control could be improved following this idea:
+ https://lore.kernel.org/lkml/3099559.gv3Q75KnN1@pc-42/
+
+ - When driver is about to loose BSS, it forge its own Null Func request (see
+ wfx_cqm_bssloss_sm()). It should use mechanism provided by mac80211.
+
+ - AP is actually is setup after a call to wfx_bss_info_changed(). Yet,
+ ieee80211_ops provide callback start_ap().
+
+ - The current process for joining a network is incredibly complex. Should be
+ reworked.
+
+ - Monitoring mode is not implemented despite being mandatory by mac80211.
+
+ - "compatible" value are not correct. They should be "vendor,chip". See:
+ https://lore.kernel.org/driverdev-devel/5226570.CMH5hVlZcI@pc-42
+
+ - The "state" field from wfx_vif should be replaced by "vif->type".
+
+ - It seems that wfx_upload_keys() is useless.
+
+ - "event_queue" from wfx_vif seems overkill. These event are rare and they
+ probably could be handled in a simpler fashion.
+
+ - Feature called "secure link" should be either developed (using kernel
+ crypto API) or dropped.
+
+ - In wfx_cmd_send(), "async" allow to send command without waiting the reply.
+ It may help in some situation, but it is not yet used. In add, it may cause
+ some trouble:
+ https://lore.kernel.org/driverdev-devel/alpine.DEB.2.21.1910041317381.2992@hadrien/
+ So, fix it (by replacing the mutex with a semaphore) or drop it.
+
+ - Chip support P2P, but driver does not implement it.
+
+ - Chip support kind of Mesh, but driver does not implement it.
diff --git a/drivers/staging/wfx/bh.c b/drivers/staging/wfx/bh.c
index 2432ba95c2f5..983c41d1fe7c 100644
--- a/drivers/staging/wfx/bh.c
+++ b/drivers/staging/wfx/bh.c
@@ -271,8 +271,7 @@ static void bh_work(struct work_struct *work)
if (last_op_is_rx)
ack_sdio_data(wdev);
- if (!wdev->hif.tx_buffers_used && !work_pending(work) &&
- !atomic_read(&wdev->scan_in_progress)) {
+ if (!wdev->hif.tx_buffers_used && !work_pending(work)) {
device_release(wdev);
release_chip = true;
}
diff --git a/drivers/staging/wfx/bus_spi.c b/drivers/staging/wfx/bus_spi.c
index ab0cda1e124f..40bc33035de2 100644
--- a/drivers/staging/wfx/bus_spi.c
+++ b/drivers/staging/wfx/bus_spi.c
@@ -107,6 +107,8 @@ static int wfx_spi_copy_to_io(void *priv, unsigned int addr,
cpu_to_le16s(&regaddr);
+ // Register address and CONFIG content always use 16bit big endian
+ // ("BADC" order)
if (bus->need_swab)
swab16s(&regaddr);
if (bus->need_swab && addr == WFX_REG_CONFIG)
@@ -183,7 +185,7 @@ static int wfx_spi_probe(struct spi_device *func)
if (func->bits_per_word != 16 && func->bits_per_word != 8)
dev_warn(&func->dev, "unusual bits/word value: %d\n",
func->bits_per_word);
- if (func->max_speed_hz > 49000000)
+ if (func->max_speed_hz > 50000000)
dev_warn(&func->dev, "%dHz is a very high speed\n",
func->max_speed_hz);
@@ -223,8 +225,7 @@ static int wfx_spi_probe(struct spi_device *func)
return ret;
}
-/* Disconnect Function to be called by SPI stack when device is disconnected */
-static int wfx_spi_disconnect(struct spi_device *func)
+static int wfx_spi_remove(struct spi_device *func)
{
struct wfx_spi_priv *bus = spi_get_drvdata(func);
@@ -263,5 +264,5 @@ struct spi_driver wfx_spi_driver = {
},
.id_table = wfx_spi_id,
.probe = wfx_spi_probe,
- .remove = wfx_spi_disconnect,
+ .remove = wfx_spi_remove,
};
diff --git a/drivers/staging/wfx/data_rx.c b/drivers/staging/wfx/data_rx.c
index e7fcce8d0cc4..5d198457c6ce 100644
--- a/drivers/staging/wfx/data_rx.c
+++ b/drivers/staging/wfx/data_rx.c
@@ -13,42 +13,9 @@
#include "bh.h"
#include "sta.h"
-static int wfx_handle_pspoll(struct wfx_vif *wvif, struct sk_buff *skb)
-{
- struct ieee80211_sta *sta;
- struct ieee80211_pspoll *pspoll = (struct ieee80211_pspoll *)skb->data;
- int link_id = 0;
- u32 pspoll_mask = 0;
- int i;
-
- if (wvif->state != WFX_STATE_AP)
- return 1;
- if (!ether_addr_equal(wvif->vif->addr, pspoll->bssid))
- return 1;
-
- rcu_read_lock();
- sta = ieee80211_find_sta(wvif->vif, pspoll->ta);
- if (sta)
- link_id = ((struct wfx_sta_priv *)&sta->drv_priv)->link_id;
- rcu_read_unlock();
- if (link_id)
- pspoll_mask = BIT(link_id);
- else
- return 1;
-
- wvif->pspoll_mask |= pspoll_mask;
- /* Do not report pspols if data for given link id is queued already. */
- for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
- if (wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[i],
- pspoll_mask)) {
- wfx_bh_request_tx(wvif->wdev);
- return 1;
- }
- }
- return 0;
-}
-
-static int wfx_drop_encrypt_data(struct wfx_dev *wdev, struct hif_ind_rx *arg, struct sk_buff *skb)
+static int wfx_drop_encrypt_data(struct wfx_dev *wdev,
+ const struct hif_ind_rx *arg,
+ struct sk_buff *skb)
{
struct ieee80211_hdr *frame = (struct ieee80211_hdr *) skb->data;
size_t hdrlen = ieee80211_hdrlen(frame->frame_control);
@@ -98,15 +65,12 @@ static int wfx_drop_encrypt_data(struct wfx_dev *wdev, struct hif_ind_rx *arg, s
}
-void wfx_rx_cb(struct wfx_vif *wvif, struct hif_ind_rx *arg,
- struct sk_buff *skb)
+void wfx_rx_cb(struct wfx_vif *wvif,
+ const struct hif_ind_rx *arg, struct sk_buff *skb)
{
- int link_id = arg->rx_flags.peer_sta_id;
struct ieee80211_rx_status *hdr = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *frame = (struct ieee80211_hdr *)skb->data;
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
- struct wfx_link_entry *entry = NULL;
- bool early_data = false;
memset(hdr, 0, sizeof(*hdr));
@@ -116,14 +80,6 @@ void wfx_rx_cb(struct wfx_vif *wvif, struct hif_ind_rx *arg,
ieee80211_is_beacon(frame->frame_control)))
goto drop;
- if (link_id && link_id <= WFX_MAX_STA_IN_AP_MODE) {
- entry = &wvif->link_id_db[link_id - 1];
- entry->timestamp = jiffies;
- if (entry->status == WFX_LINK_SOFT &&
- ieee80211_is_data(frame->frame_control))
- early_data = true;
- }
-
if (arg->status == HIF_STATUS_MICFAILURE)
hdr->flag |= RX_FLAG_MMIC_ERROR;
else if (arg->status)
@@ -134,10 +90,6 @@ void wfx_rx_cb(struct wfx_vif *wvif, struct hif_ind_rx *arg,
goto drop;
}
- if (ieee80211_is_pspoll(frame->frame_control))
- if (wfx_handle_pspoll(wvif, skb))
- goto drop;
-
hdr->band = NL80211_BAND_2GHZ;
hdr->freq = ieee80211_channel_to_frequency(arg->channel_number,
hdr->band);
@@ -171,20 +123,6 @@ void wfx_rx_cb(struct wfx_vif *wvif, struct hif_ind_rx *arg,
!arg->status && wvif->vif &&
ether_addr_equal(ieee80211_get_SA(frame),
wvif->vif->bss_conf.bssid)) {
- const u8 *tim_ie;
- u8 *ies = mgmt->u.beacon.variable;
- size_t ies_len = skb->len - (ies - skb->data);
-
- tim_ie = cfg80211_find_ie(WLAN_EID_TIM, ies, ies_len);
- if (tim_ie) {
- struct ieee80211_tim_ie *tim = (struct ieee80211_tim_ie *)&tim_ie[2];
-
- if (wvif->dtim_period != tim->dtim_period) {
- wvif->dtim_period = tim->dtim_period;
- schedule_work(&wvif->set_beacon_wakeup_period_work);
- }
- }
-
/* Disable beacon filter once we're associated... */
if (wvif->disable_beacon_filter &&
(wvif->vif->bss_conf.assoc ||
@@ -193,18 +131,7 @@ void wfx_rx_cb(struct wfx_vif *wvif, struct hif_ind_rx *arg,
schedule_work(&wvif->update_filtering_work);
}
}
-
- if (early_data) {
- spin_lock_bh(&wvif->ps_state_lock);
- /* Double-check status with lock held */
- if (entry->status == WFX_LINK_SOFT)
- skb_queue_tail(&entry->rx_queue, skb);
- else
- ieee80211_rx_irqsafe(wvif->wdev->hw, skb);
- spin_unlock_bh(&wvif->ps_state_lock);
- } else {
- ieee80211_rx_irqsafe(wvif->wdev->hw, skb);
- }
+ ieee80211_rx_irqsafe(wvif->wdev->hw, skb);
return;
diff --git a/drivers/staging/wfx/data_rx.h b/drivers/staging/wfx/data_rx.h
index a50ce352bc5e..61c28bfd2a37 100644
--- a/drivers/staging/wfx/data_rx.h
+++ b/drivers/staging/wfx/data_rx.h
@@ -13,7 +13,7 @@
struct wfx_vif;
struct sk_buff;
-void wfx_rx_cb(struct wfx_vif *wvif, struct hif_ind_rx *arg,
- struct sk_buff *skb);
+void wfx_rx_cb(struct wfx_vif *wvif,
+ const struct hif_ind_rx *arg, struct sk_buff *skb);
#endif /* WFX_DATA_RX_H */
diff --git a/drivers/staging/wfx/data_tx.c b/drivers/staging/wfx/data_tx.c
index df2640a79f02..20f4740734f2 100644
--- a/drivers/staging/wfx/data_tx.c
+++ b/drivers/staging/wfx/data_tx.c
@@ -16,8 +16,7 @@
#include "traces.h"
#include "hif_tx_mib.h"
-#define WFX_INVALID_RATE_ID (0xFF)
-#define WFX_LINK_ID_NO_ASSOC 15
+#define WFX_INVALID_RATE_ID 15
#define WFX_LINK_ID_GC_TIMEOUT ((unsigned long)(10 * HZ))
static int wfx_get_hw_rate(struct wfx_dev *wdev,
@@ -169,7 +168,8 @@ static int wfx_tx_policy_get(struct wfx_vif *wvif,
wfx_tx_policy_build(wvif, &wanted, rates);
spin_lock_bh(&cache->lock);
- if (WARN_ON(list_empty(&cache->free))) {
+ if (list_empty(&cache->free)) {
+ WARN(1, "unable to get a valid Tx policy");
spin_unlock_bh(&cache->lock);
return WFX_INVALID_RATE_ID;
}
@@ -184,7 +184,7 @@ static int wfx_tx_policy_get(struct wfx_vif *wvif,
*/
entry = list_entry(cache->free.prev, struct tx_policy, link);
memcpy(entry->rates, wanted.rates, sizeof(entry->rates));
- entry->uploaded = 0;
+ entry->uploaded = false;
entry->usage_count = 0;
idx = entry - cache->cache;
}
@@ -202,6 +202,8 @@ static void wfx_tx_policy_put(struct wfx_vif *wvif, int idx)
int usage, locked;
struct tx_policy_cache *cache = &wvif->tx_policy_cache;
+ if (idx == WFX_INVALID_RATE_ID)
+ return;
spin_lock_bh(&cache->lock);
locked = list_empty(&cache->free);
usage = wfx_tx_policy_release(cache, &cache->cache[idx]);
@@ -214,42 +216,29 @@ static void wfx_tx_policy_put(struct wfx_vif *wvif, int idx)
static int wfx_tx_policy_upload(struct wfx_vif *wvif)
{
+ struct tx_policy *policies = wvif->tx_policy_cache.cache;
+ u8 tmp_rates[12];
int i;
- struct tx_policy_cache *cache = &wvif->tx_policy_cache;
- struct hif_mib_set_tx_rate_retry_policy *arg =
- kzalloc(struct_size(arg,
- tx_rate_retry_policy,
- HIF_MIB_NUM_TX_RATE_RETRY_POLICIES),
- GFP_KERNEL);
- struct hif_mib_tx_rate_retry_policy *dst;
- spin_lock_bh(&cache->lock);
- /* Upload only modified entries. */
- for (i = 0; i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES; ++i) {
- struct tx_policy *src = &cache->cache[i];
-
- if (!src->uploaded && memzcmp(src->rates, sizeof(src->rates))) {
- dst = arg->tx_rate_retry_policy +
- arg->num_tx_rate_policies;
-
- dst->policy_index = i;
- dst->short_retry_count = 255;
- dst->long_retry_count = 255;
- dst->first_rate_sel = 1;
- dst->terminate = 1;
- dst->count_init = 1;
- memcpy(&dst->rates, src->rates, sizeof(src->rates));
- src->uploaded = 1;
- arg->num_tx_rate_policies++;
+ do {
+ spin_lock_bh(&wvif->tx_policy_cache.lock);
+ for (i = 0; i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES; ++i)
+ if (!policies[i].uploaded &&
+ memzcmp(policies[i].rates, sizeof(policies[i].rates)))
+ break;
+ if (i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES) {
+ policies[i].uploaded = 1;
+ memcpy(tmp_rates, policies[i].rates, sizeof(tmp_rates));
+ spin_unlock_bh(&wvif->tx_policy_cache.lock);
+ hif_set_tx_rate_retry_policy(wvif, i, tmp_rates);
+ } else {
+ spin_unlock_bh(&wvif->tx_policy_cache.lock);
}
- }
- spin_unlock_bh(&cache->lock);
- hif_set_tx_rate_retry_policy(wvif, arg);
- kfree(arg);
+ } while (i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES);
return 0;
}
-static void wfx_tx_policy_upload_work(struct work_struct *work)
+void wfx_tx_policy_upload_work(struct work_struct *work)
{
struct wfx_vif *wvif =
container_of(work, struct wfx_vif, tx_policy_upload_work);
@@ -270,170 +259,11 @@ void wfx_tx_policy_init(struct wfx_vif *wvif)
spin_lock_init(&cache->lock);
INIT_LIST_HEAD(&cache->used);
INIT_LIST_HEAD(&cache->free);
- INIT_WORK(&wvif->tx_policy_upload_work, wfx_tx_policy_upload_work);
for (i = 0; i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES; ++i)
list_add(&cache->cache[i].link, &cache->free);
}
-/* Link ID related functions */
-
-static int wfx_alloc_link_id(struct wfx_vif *wvif, const u8 *mac)
-{
- int i, ret = 0;
- unsigned long max_inactivity = 0;
- unsigned long now = jiffies;
-
- spin_lock_bh(&wvif->ps_state_lock);
- for (i = 0; i < WFX_MAX_STA_IN_AP_MODE; ++i) {
- if (!wvif->link_id_db[i].status) {
- ret = i + 1;
- break;
- } else if (wvif->link_id_db[i].status != WFX_LINK_HARD &&
- !wvif->wdev->tx_queue_stats.link_map_cache[i + 1]) {
- unsigned long inactivity =
- now - wvif->link_id_db[i].timestamp;
-
- if (inactivity < max_inactivity)
- continue;
- max_inactivity = inactivity;
- ret = i + 1;
- }
- }
-
- if (ret) {
- struct wfx_link_entry *entry = &wvif->link_id_db[ret - 1];
-
- entry->status = WFX_LINK_RESERVE;
- ether_addr_copy(entry->mac, mac);
- memset(&entry->buffered, 0, WFX_MAX_TID);
- skb_queue_head_init(&entry->rx_queue);
- wfx_tx_lock(wvif->wdev);
-
- if (!schedule_work(&wvif->link_id_work))
- wfx_tx_unlock(wvif->wdev);
- } else {
- dev_info(wvif->wdev->dev, "no more link-id available\n");
- }
- spin_unlock_bh(&wvif->ps_state_lock);
- return ret;
-}
-
-int wfx_find_link_id(struct wfx_vif *wvif, const u8 *mac)
-{
- int i, ret = 0;
-
- spin_lock_bh(&wvif->ps_state_lock);
- for (i = 0; i < WFX_MAX_STA_IN_AP_MODE; ++i) {
- if (ether_addr_equal(mac, wvif->link_id_db[i].mac) &&
- wvif->link_id_db[i].status) {
- wvif->link_id_db[i].timestamp = jiffies;
- ret = i + 1;
- break;
- }
- }
- spin_unlock_bh(&wvif->ps_state_lock);
- return ret;
-}
-
-static int wfx_map_link(struct wfx_vif *wvif,
- struct wfx_link_entry *link_entry, int sta_id)
-{
- int ret;
-
- ret = hif_map_link(wvif, link_entry->mac, 0, sta_id);
-
- if (ret == 0)
- /* Save the MAC address currently associated with the peer
- * for future unmap request
- */
- ether_addr_copy(link_entry->old_mac, link_entry->mac);
-
- return ret;
-}
-
-int wfx_unmap_link(struct wfx_vif *wvif, int sta_id)
-{
- u8 *mac_addr = NULL;
-
- if (sta_id)
- mac_addr = wvif->link_id_db[sta_id - 1].old_mac;
-
- return hif_map_link(wvif, mac_addr, 1, sta_id);
-}
-
-void wfx_link_id_gc_work(struct work_struct *work)
-{
- struct wfx_vif *wvif =
- container_of(work, struct wfx_vif, link_id_gc_work.work);
- unsigned long now = jiffies;
- unsigned long next_gc = -1;
- long ttl;
- u32 mask;
- int i;
-
- if (wvif->state != WFX_STATE_AP)
- return;
-
- wfx_tx_lock_flush(wvif->wdev);
- spin_lock_bh(&wvif->ps_state_lock);
- for (i = 0; i < WFX_MAX_STA_IN_AP_MODE; ++i) {
- bool need_reset = false;
-
- mask = BIT(i + 1);
- if (wvif->link_id_db[i].status == WFX_LINK_RESERVE ||
- (wvif->link_id_db[i].status == WFX_LINK_HARD &&
- !(wvif->link_id_map & mask))) {
- if (wvif->link_id_map & mask) {
- wvif->sta_asleep_mask &= ~mask;
- wvif->pspoll_mask &= ~mask;
- need_reset = true;
- }
- wvif->link_id_map |= mask;
- if (wvif->link_id_db[i].status != WFX_LINK_HARD)
- wvif->link_id_db[i].status = WFX_LINK_SOFT;
-
- spin_unlock_bh(&wvif->ps_state_lock);
- if (need_reset)
- wfx_unmap_link(wvif, i + 1);
- wfx_map_link(wvif, &wvif->link_id_db[i], i + 1);
- next_gc = min(next_gc, WFX_LINK_ID_GC_TIMEOUT);
- spin_lock_bh(&wvif->ps_state_lock);
- } else if (wvif->link_id_db[i].status == WFX_LINK_SOFT) {
- ttl = wvif->link_id_db[i].timestamp - now +
- WFX_LINK_ID_GC_TIMEOUT;
- if (ttl <= 0) {
- need_reset = true;
- wvif->link_id_db[i].status = WFX_LINK_OFF;
- wvif->link_id_map &= ~mask;
- wvif->sta_asleep_mask &= ~mask;
- wvif->pspoll_mask &= ~mask;
- spin_unlock_bh(&wvif->ps_state_lock);
- wfx_unmap_link(wvif, i + 1);
- spin_lock_bh(&wvif->ps_state_lock);
- } else {
- next_gc = min_t(unsigned long, next_gc, ttl);
- }
- }
- if (need_reset)
- skb_queue_purge(&wvif->link_id_db[i].rx_queue);
- }
- spin_unlock_bh(&wvif->ps_state_lock);
- if (next_gc != -1)
- schedule_delayed_work(&wvif->link_id_gc_work, next_gc);
- wfx_tx_unlock(wvif->wdev);
-}
-
-void wfx_link_id_work(struct work_struct *work)
-{
- struct wfx_vif *wvif =
- container_of(work, struct wfx_vif, link_id_work);
-
- wfx_tx_flush(wvif->wdev);
- wfx_link_id_gc_work(&wvif->link_id_gc_work.work);
- wfx_tx_unlock(wvif->wdev);
-}
-
/* Tx implementation */
static bool ieee80211_is_action_back(struct ieee80211_hdr *hdr)
@@ -452,29 +282,21 @@ static void wfx_tx_manage_pm(struct wfx_vif *wvif, struct ieee80211_hdr *hdr,
struct ieee80211_sta *sta)
{
u32 mask = ~BIT(tx_priv->raw_link_id);
+ struct wfx_sta_priv *sta_priv;
+ int tid = ieee80211_get_tid(hdr);
spin_lock_bh(&wvif->ps_state_lock);
- if (ieee80211_is_auth(hdr->frame_control)) {
+ if (ieee80211_is_auth(hdr->frame_control))
wvif->sta_asleep_mask &= mask;
- wvif->pspoll_mask &= mask;
- }
-
- if (tx_priv->link_id == WFX_LINK_ID_AFTER_DTIM &&
- !wvif->mcast_buffered) {
- wvif->mcast_buffered = true;
- if (wvif->sta_asleep_mask)
- schedule_work(&wvif->mcast_start_work);
- }
-
- if (tx_priv->raw_link_id) {
- wvif->link_id_db[tx_priv->raw_link_id - 1].timestamp = jiffies;
- if (tx_priv->tid < WFX_MAX_TID)
- wvif->link_id_db[tx_priv->raw_link_id - 1].buffered[tx_priv->tid]++;
- }
spin_unlock_bh(&wvif->ps_state_lock);
- if (sta)
- ieee80211_sta_set_buffered(sta, tx_priv->tid, true);
+ if (sta) {
+ sta_priv = (struct wfx_sta_priv *)&sta->drv_priv;
+ spin_lock_bh(&sta_priv->lock);
+ sta_priv->buffered[tid]++;
+ ieee80211_sta_set_buffered(sta, tid, true);
+ spin_unlock_bh(&sta_priv->lock);
+ }
}
static u8 wfx_tx_get_raw_link_id(struct wfx_vif *wvif,
@@ -484,7 +306,6 @@ static u8 wfx_tx_get_raw_link_id(struct wfx_vif *wvif,
struct wfx_sta_priv *sta_priv =
sta ? (struct wfx_sta_priv *) &sta->drv_priv : NULL;
const u8 *da = ieee80211_get_DA(hdr);
- int ret;
if (sta_priv && sta_priv->link_id)
return sta_priv->link_id;
@@ -492,14 +313,7 @@ static u8 wfx_tx_get_raw_link_id(struct wfx_vif *wvif,
return 0;
if (is_multicast_ether_addr(da))
return 0;
- ret = wfx_find_link_id(wvif, da);
- if (!ret)
- ret = wfx_alloc_link_id(wvif, da);
- if (!ret) {
- dev_err(wvif->wdev->dev, "no more link-id available\n");
- return WFX_LINK_ID_NO_ASSOC;
- }
- return ret;
+ return WFX_LINK_ID_NO_ASSOC;
}
static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates)
@@ -523,9 +337,9 @@ static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates)
for (i = 0; i < IEEE80211_TX_MAX_RATES - 1; i++) {
if (rates[i + 1].idx == rates[i].idx &&
rates[i].idx != -1) {
- rates[i].count =
- max_t(int, rates[i].count,
- rates[i + 1].count);
+ rates[i].count += rates[i + 1].count;
+ if (rates[i].count > 15)
+ rates[i].count = 15;
rates[i + 1].idx = -1;
rates[i + 1].count = 0;
@@ -537,6 +351,17 @@ static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates)
}
}
} while (!finished);
+ // Ensure that MCS0 or 1Mbps is present at the end of the retry list
+ for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+ if (rates[i].idx == 0)
+ break;
+ if (rates[i].idx == -1) {
+ rates[i].idx = 0;
+ rates[i].count = 8; // == hw->max_rate_tries
+ rates[i].flags = rates[i - 1].flags & IEEE80211_TX_RC_MCS;
+ break;
+ }
+ }
// All retries use long GI
for (i = 1; i < IEEE80211_TX_MAX_RATES; i++)
rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI;
@@ -550,7 +375,8 @@ static u8 wfx_tx_get_rate_id(struct wfx_vif *wvif,
rate_id = wfx_tx_policy_get(wvif,
tx_info->driver_rates, &tx_policy_renew);
- WARN(rate_id == WFX_INVALID_RATE_ID, "unable to get a valid Tx policy");
+ if (rate_id == WFX_INVALID_RATE_ID)
+ dev_warn(wvif->wdev->dev, "unable to get a valid Tx policy");
if (tx_policy_renew) {
/* FIXME: It's not so optimal to stop TX queues every now and
@@ -584,17 +410,6 @@ static struct hif_ht_tx_parameters wfx_tx_get_tx_parms(struct wfx_dev *wdev, str
return ret;
}
-static u8 wfx_tx_get_tid(struct ieee80211_hdr *hdr)
-{
- // FIXME: ieee80211_get_tid(hdr) should be sufficient for all cases.
- if (!ieee80211_is_data(hdr->frame_control))
- return WFX_MAX_TID;
- if (ieee80211_is_data_qos(hdr->frame_control))
- return ieee80211_get_tid(hdr);
- else
- return 0;
-}
-
static int wfx_tx_get_icv_len(struct ieee80211_key_conf *hw_key)
{
int mic_space;
@@ -626,7 +441,6 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta,
memset(tx_info->rate_driver_data, 0, sizeof(struct wfx_tx_priv));
// Fill tx_priv
tx_priv = (struct wfx_tx_priv *)tx_info->rate_driver_data;
- tx_priv->tid = wfx_tx_get_tid(hdr);
tx_priv->raw_link_id = wfx_tx_get_raw_link_id(wvif, sta, hdr);
tx_priv->link_id = tx_priv->raw_link_id;
if (ieee80211_has_protected(hdr->frame_control))
@@ -655,9 +469,15 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta,
// Fill tx request
req = (struct hif_req_tx *)hif_msg->body;
- req->packet_id = queue_id << 16 |
- IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+ // packet_id just need to be unique on device. 32bits are more than
+ // necessary for that task, so we tae advantage of it to add some extra
+ // data for debug.
+ req->packet_id = queue_id << 28 |
+ IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)) << 16 |
+ (atomic_add_return(1, &wvif->wdev->packet_id) & 0xFFFF);
req->data_flags.fc_offset = offset;
+ if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
+ req->data_flags.after_dtim = 1;
req->queue_id.peer_sta_id = tx_priv->raw_link_id;
// Queue index are inverted between firmware and Linux
req->queue_id.queue_id = 3 - queue_id;
@@ -667,6 +487,8 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta,
// Auxiliary operations
wfx_tx_manage_pm(wvif, hdr, tx_priv, sta);
wfx_tx_queue_put(wvif->wdev, &wvif->wdev->tx_queue[queue_id], skb);
+ if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
+ schedule_work(&wvif->update_tim_work);
wfx_bh_request_tx(wvif->wdev);
return 0;
}
@@ -706,7 +528,7 @@ drop:
ieee80211_tx_status_irqsafe(wdev->hw, skb);
}
-void wfx_tx_confirm_cb(struct wfx_vif *wvif, struct hif_cnf_tx *arg)
+void wfx_tx_confirm_cb(struct wfx_vif *wvif, const struct hif_cnf_tx *arg)
{
int i;
int tx_count;
@@ -735,7 +557,9 @@ void wfx_tx_confirm_cb(struct wfx_vif *wvif, struct hif_cnf_tx *arg)
rate = &tx_info->status.rates[i];
if (rate->idx < 0)
break;
- if (tx_count < rate->count && arg->status && arg->ack_failures)
+ if (tx_count < rate->count &&
+ arg->status == HIF_STATUS_RETRY_EXCEEDED &&
+ arg->ack_failures)
dev_dbg(wvif->wdev->dev, "all retries were not consumed: %d != %d\n",
rate->count, tx_count);
if (tx_count <= rate->count && tx_count &&
@@ -776,14 +600,11 @@ void wfx_tx_confirm_cb(struct wfx_vif *wvif, struct hif_cnf_tx *arg)
else
tx_info->flags |= IEEE80211_TX_STAT_ACK;
} else if (arg->status == HIF_REQUEUE) {
- /* "REQUEUE" means "implicit suspend" */
- struct hif_ind_suspend_resume_tx suspend = {
- .suspend_resume_flags.resume = 0,
- .suspend_resume_flags.bc_mc_only = 1,
- };
-
WARN(!arg->tx_result_flags.requeue, "incoherent status and result_flags");
- wfx_suspend_resume(wvif, &suspend);
+ if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
+ wvif->after_dtim_tx_allowed = false; // DTIM period elapsed
+ schedule_work(&wvif->update_tim_work);
+ }
tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
} else {
if (wvif->bss_loss_state &&
@@ -793,31 +614,25 @@ void wfx_tx_confirm_cb(struct wfx_vif *wvif, struct hif_cnf_tx *arg)
wfx_pending_remove(wvif->wdev, skb);
}
-static void wfx_notify_buffered_tx(struct wfx_vif *wvif, struct sk_buff *skb,
- struct hif_req_tx *req)
+static void wfx_notify_buffered_tx(struct wfx_vif *wvif, struct sk_buff *skb)
{
- struct ieee80211_sta *sta;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- int tid = wfx_tx_get_tid(hdr);
- int raw_link_id = req->queue_id.peer_sta_id;
- u8 *buffered;
-
- if (raw_link_id && tid < WFX_MAX_TID) {
- buffered = wvif->link_id_db[raw_link_id - 1].buffered;
-
- spin_lock_bh(&wvif->ps_state_lock);
- WARN(!buffered[tid], "inconsistent notification");
- buffered[tid]--;
- spin_unlock_bh(&wvif->ps_state_lock);
-
- if (!buffered[tid]) {
- rcu_read_lock();
- sta = ieee80211_find_sta(wvif->vif, hdr->addr1);
- if (sta)
- ieee80211_sta_set_buffered(sta, tid, false);
- rcu_read_unlock();
- }
+ struct ieee80211_sta *sta;
+ struct wfx_sta_priv *sta_priv;
+ int tid = ieee80211_get_tid(hdr);
+
+ rcu_read_lock(); // protect sta
+ sta = ieee80211_find_sta(wvif->vif, hdr->addr1);
+ if (sta) {
+ sta_priv = (struct wfx_sta_priv *)&sta->drv_priv;
+ spin_lock_bh(&sta_priv->lock);
+ WARN(!sta_priv->buffered[tid], "inconsistent notification");
+ sta_priv->buffered[tid]--;
+ if (!sta_priv->buffered[tid])
+ ieee80211_sta_set_buffered(sta, tid, false);
+ spin_unlock_bh(&sta_priv->lock);
}
+ rcu_read_unlock();
}
void wfx_skb_dtor(struct wfx_dev *wdev, struct sk_buff *skb)
@@ -831,7 +646,7 @@ void wfx_skb_dtor(struct wfx_dev *wdev, struct sk_buff *skb)
WARN_ON(!wvif);
skb_pull(skb, offset);
- wfx_notify_buffered_tx(wvif, skb, req);
+ wfx_notify_buffered_tx(wvif, skb);
wfx_tx_policy_put(wvif, req->tx_flags.retry_policy_index);
ieee80211_tx_status_irqsafe(wdev->hw, skb);
}
diff --git a/drivers/staging/wfx/data_tx.h b/drivers/staging/wfx/data_tx.h
index 29faa5640516..04b2147101b6 100644
--- a/drivers/staging/wfx/data_tx.h
+++ b/drivers/staging/wfx/data_tx.h
@@ -14,34 +14,15 @@
#include "hif_api_cmd.h"
#include "hif_api_mib.h"
-// FIXME: use IEEE80211_NUM_TIDS
-#define WFX_MAX_TID 8
-
struct wfx_tx_priv;
struct wfx_dev;
struct wfx_vif;
-enum wfx_link_status {
- WFX_LINK_OFF,
- WFX_LINK_RESERVE,
- WFX_LINK_SOFT,
- WFX_LINK_HARD,
-};
-
-struct wfx_link_entry {
- unsigned long timestamp;
- enum wfx_link_status status;
- u8 mac[ETH_ALEN];
- u8 old_mac[ETH_ALEN];
- u8 buffered[WFX_MAX_TID];
- struct sk_buff_head rx_queue;
-};
-
struct tx_policy {
struct list_head link;
+ int usage_count;
u8 rates[12];
- u8 usage_count;
- u8 uploaded;
+ bool uploaded;
};
struct tx_policy_cache {
@@ -57,21 +38,16 @@ struct wfx_tx_priv {
struct ieee80211_key_conf *hw_key;
u8 link_id;
u8 raw_link_id;
- u8 tid;
} __packed;
void wfx_tx_policy_init(struct wfx_vif *wvif);
+void wfx_tx_policy_upload_work(struct work_struct *work);
void wfx_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
struct sk_buff *skb);
-void wfx_tx_confirm_cb(struct wfx_vif *wvif, struct hif_cnf_tx *arg);
+void wfx_tx_confirm_cb(struct wfx_vif *wvif, const struct hif_cnf_tx *arg);
void wfx_skb_dtor(struct wfx_dev *wdev, struct sk_buff *skb);
-int wfx_unmap_link(struct wfx_vif *wvif, int link_id);
-void wfx_link_id_work(struct work_struct *work);
-void wfx_link_id_gc_work(struct work_struct *work);
-int wfx_find_link_id(struct wfx_vif *wvif, const u8 *mac);
-
static inline struct wfx_tx_priv *wfx_skb_tx_priv(struct sk_buff *skb)
{
struct ieee80211_tx_info *tx_info;
diff --git a/drivers/staging/wfx/debug.c b/drivers/staging/wfx/debug.c
index d17a75242365..1164aba118a1 100644
--- a/drivers/staging/wfx/debug.c
+++ b/drivers/staging/wfx/debug.c
@@ -145,7 +145,7 @@ static int wfx_rx_stats_show(struct seq_file *seq, void *v)
st->pwr_clk_freq,
st->is_ext_pwr_clk ? "yes" : "no");
seq_printf(seq,
- "N. of frames: %d, PER (x10e4): %d, Throughput: %dKbps/s\n",
+ "Num. of frames: %d, PER (x10e4): %d, Throughput: %dKbps/s\n",
st->nb_rx_frame, st->per_total, st->throughput);
seq_puts(seq, " Num. of PER RSSI SNR CFO\n");
seq_puts(seq, " frames (x10e4) (dBm) (dB) (kHz)\n");
diff --git a/drivers/staging/wfx/fwio.c b/drivers/staging/wfx/fwio.c
index dbf8bda71ff7..9d61082c1e6c 100644
--- a/drivers/staging/wfx/fwio.c
+++ b/drivers/staging/wfx/fwio.c
@@ -61,7 +61,7 @@
#define DCA_TIMEOUT 50 // milliseconds
#define WAKEUP_TIMEOUT 200 // milliseconds
-static const char * const fwio_error_strings[] = {
+static const char * const fwio_errors[] = {
[ERR_INVALID_SEC_TYPE] = "Invalid section type or wrong encryption",
[ERR_SIG_VERIF_FAILED] = "Signature verification failed",
[ERR_AES_CTRL_KEY] = "AES control key not initialized",
@@ -220,22 +220,16 @@ static int upload_firmware(struct wfx_dev *wdev, const u8 *data, size_t len)
static void print_boot_status(struct wfx_dev *wdev)
{
- u32 val32;
+ u32 reg;
- sram_reg_read(wdev, WFX_STATUS_INFO, &val32);
- if (val32 == 0x12345678) {
- dev_info(wdev->dev, "no error reported by secure boot\n");
- } else {
- sram_reg_read(wdev, WFX_ERR_INFO, &val32);
- if (val32 < ARRAY_SIZE(fwio_error_strings) &&
- fwio_error_strings[val32])
- dev_info(wdev->dev, "secure boot error: %s\n",
- fwio_error_strings[val32]);
- else
- dev_info(wdev->dev,
- "secure boot error: Unknown (0x%02x)\n",
- val32);
- }
+ sram_reg_read(wdev, WFX_STATUS_INFO, &reg);
+ if (reg == 0x12345678)
+ return;
+ sram_reg_read(wdev, WFX_ERR_INFO, &reg);
+ if (reg < ARRAY_SIZE(fwio_errors) && fwio_errors[reg])
+ dev_info(wdev->dev, "secure boot: %s\n", fwio_errors[reg]);
+ else
+ dev_info(wdev->dev, "secure boot: Error %#02x\n", reg);
}
static int load_firmware_secure(struct wfx_dev *wdev)
@@ -345,7 +339,7 @@ int wfx_init_device(struct wfx_dev *wdev)
ktime_t now, start;
u32 reg;
- reg = CFG_DIRECT_ACCESS_MODE | CFG_CPU_RESET | CFG_WORD_MODE2;
+ reg = CFG_DIRECT_ACCESS_MODE | CFG_CPU_RESET | CFG_BYTE_ORDER_ABCD;
if (wdev->pdata.use_rising_clk)
reg |= CFG_CLK_RISE_EDGE;
ret = config_reg_write(wdev, reg);
diff --git a/drivers/staging/wfx/hif_api_cmd.h b/drivers/staging/wfx/hif_api_cmd.h
index c15831de4ff4..5554d6eddbf3 100644
--- a/drivers/staging/wfx/hif_api_cmd.h
+++ b/drivers/staging/wfx/hif_api_cmd.h
@@ -137,7 +137,7 @@ struct hif_ie_tlv {
struct hif_req_update_ie {
struct hif_ie_flags ie_flags;
- u16 num_i_es;
+ u16 num_ies;
struct hif_ie_tlv ie[];
} __packed;
@@ -180,7 +180,7 @@ struct hif_req_start_scan {
struct hif_auto_scan_param auto_scan_param;
u8 num_of_probe_requests;
u8 probe_delay;
- u8 num_of_ssi_ds;
+ u8 num_of_ssids;
u8 num_of_channels;
u32 min_channel_time;
u32 max_channel_time;
@@ -188,7 +188,7 @@ struct hif_req_start_scan {
u8 ssid_and_channel_lists[];
} __packed;
-struct hif_start_scan_req_cstnbssid_body {
+struct hif_req_start_scan_alt {
u8 band;
struct hif_scan_type scan_type;
struct hif_scan_flags scan_flags;
@@ -196,7 +196,7 @@ struct hif_start_scan_req_cstnbssid_body {
struct hif_auto_scan_param auto_scan_param;
u8 num_of_probe_requests;
u8 probe_delay;
- u8 num_of_ssi_ds;
+ u8 num_of_ssids;
u8 num_of_channels;
u32 min_channel_time;
u32 max_channel_time;
@@ -253,7 +253,8 @@ struct hif_queue {
struct hif_data_flags {
u8 more:1;
u8 fc_offset:3;
- u8 reserved:4;
+ u8 after_dtim:1;
+ u8 reserved:3;
} __packed;
struct hif_tx_flags {
@@ -377,17 +378,6 @@ struct hif_cnf_edca_queue_params {
u32 status;
} __packed;
-enum hif_ap_mode {
- HIF_MODE_IBSS = 0x0,
- HIF_MODE_BSS = 0x1
-};
-
-enum hif_preamble {
- HIF_PREAMBLE_LONG = 0x0,
- HIF_PREAMBLE_SHORT = 0x1,
- HIF_PREAMBLE_SHORT_LONG12 = 0x2
-};
-
struct hif_join_flags {
u8 reserved1:2;
u8 force_no_beacon:1;
@@ -396,14 +386,16 @@ struct hif_join_flags {
} __packed;
struct hif_req_join {
- u8 mode;
+ u8 infrastructure_bss_mode:1;
+ u8 reserved1:7;
u8 band;
u16 channel_number;
u8 bssid[ETH_ALEN];
u16 atim_window;
- u8 preamble_type;
+ u8 short_preamble:1;
+ u8 reserved2:7;
u8 probe_for_join;
- u8 reserved;
+ u8 reserved3;
struct hif_join_flags join_flags;
u32 ssid_length;
u8 ssid[HIF_API_SSID_SIZE];
@@ -466,8 +458,9 @@ struct hif_req_start {
u32 reserved1;
u32 beacon_interval;
u8 dtim_period;
- u8 preamble_type;
- u8 reserved2;
+ u8 short_preamble:1;
+ u8 reserved2:7;
+ u8 reserved3;
u8 ssid_length;
u8 ssid[HIF_API_SSID_SIZE];
u32 basic_rate_set;
diff --git a/drivers/staging/wfx/hif_api_mib.h b/drivers/staging/wfx/hif_api_mib.h
index 94b789ceb4ff..0c67cd4c1593 100644
--- a/drivers/staging/wfx/hif_api_mib.h
+++ b/drivers/staging/wfx/hif_api_mib.h
@@ -181,19 +181,13 @@ struct hif_mib_ipv6_addr_data_frame_condition {
u8 i_pv6_address[HIF_API_IPV6_ADDRESS_SIZE];
} __packed;
-union hif_addr_type {
- u8 value;
- struct {
- u8 type_unicast:1;
- u8 type_multicast:1;
- u8 type_broadcast:1;
- u8 reserved:5;
- } bits;
-};
+#define HIF_FILTER_UNICAST 0x1
+#define HIF_FILTER_MULTICAST 0x2
+#define HIF_FILTER_BROADCAST 0x4
struct hif_mib_uc_mc_bc_data_frame_condition {
u8 condition_idx;
- union hif_addr_type param;
+ u8 allowed_frames;
u8 reserved[2];
} __packed;
@@ -212,9 +206,11 @@ struct hif_mib_config_data_filter {
} __packed;
struct hif_mib_set_data_filtering {
- u8 default_filter;
- u8 enable;
- u8 reserved[2];
+ u8 invert_matching:1;
+ u8 reserved1:7;
+ u8 enable:1;
+ u8 reserved2:7;
+ u8 reserved3[2];
} __packed;
enum hif_arp_ns_frame_treatment {
@@ -395,11 +391,6 @@ struct hif_mib_non_erp_protection {
u8 reserved2[3];
} __packed;
-enum hif_tx_mode {
- HIF_TX_MODE_MIXED = 0x0,
- HIF_TX_MODE_GREENFIELD = 0x1
-};
-
enum hif_tmplt {
HIF_TMPLT_PRBREQ = 0x0,
HIF_TMPLT_BCN = 0x1,
@@ -471,9 +462,11 @@ struct hif_mib_set_association_mode {
u8 mode:1;
u8 rateset:1;
u8 spacing:1;
- u8 reserved:4;
- u8 preamble_type;
- u8 mixed_or_greenfield_type;
+ u8 reserved1:4;
+ u8 short_preamble:1;
+ u8 reserved2:7;
+ u8 greenfield:1;
+ u8 reserved3:7;
u8 mpdu_start_spacing;
u32 basic_rate_set;
} __packed;
diff --git a/drivers/staging/wfx/hif_rx.c b/drivers/staging/wfx/hif_rx.c
index 820de216be0c..33c22c5d629d 100644
--- a/drivers/staging/wfx/hif_rx.c
+++ b/drivers/staging/wfx/hif_rx.c
@@ -18,8 +18,8 @@
#include "secure_link.h"
#include "hif_api_cmd.h"
-static int hif_generic_confirm(struct wfx_dev *wdev, struct hif_msg *hif,
- void *buf)
+static int hif_generic_confirm(struct wfx_dev *wdev,
+ const struct hif_msg *hif, const void *buf)
{
// All confirm messages start with status
int status = le32_to_cpu(*((__le32 *) buf));
@@ -59,9 +59,10 @@ static int hif_generic_confirm(struct wfx_dev *wdev, struct hif_msg *hif,
return status;
}
-static int hif_tx_confirm(struct wfx_dev *wdev, struct hif_msg *hif, void *buf)
+static int hif_tx_confirm(struct wfx_dev *wdev,
+ const struct hif_msg *hif, const void *buf)
{
- struct hif_cnf_tx *body = buf;
+ const struct hif_cnf_tx *body = buf;
struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
WARN_ON(!wvif);
@@ -72,31 +73,27 @@ static int hif_tx_confirm(struct wfx_dev *wdev, struct hif_msg *hif, void *buf)
return 0;
}
-static int hif_multi_tx_confirm(struct wfx_dev *wdev, struct hif_msg *hif,
- void *buf)
+static int hif_multi_tx_confirm(struct wfx_dev *wdev,
+ const struct hif_msg *hif, const void *buf)
{
- struct hif_cnf_multi_transmit *body = buf;
- struct hif_cnf_tx *buf_loc = (struct hif_cnf_tx *) &body->tx_conf_payload;
+ const struct hif_cnf_multi_transmit *body = buf;
struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
- int count = body->num_tx_confs;
int i;
- WARN(count <= 0, "corrupted message");
+ WARN(body->num_tx_confs <= 0, "corrupted message");
WARN_ON(!wvif);
if (!wvif)
return -EFAULT;
- for (i = 0; i < count; ++i) {
- wfx_tx_confirm_cb(wvif, buf_loc);
- buf_loc++;
- }
+ for (i = 0; i < body->num_tx_confs; i++)
+ wfx_tx_confirm_cb(wvif, &body->tx_conf_payload[i]);
return 0;
}
-static int hif_startup_indication(struct wfx_dev *wdev, struct hif_msg *hif,
- void *buf)
+static int hif_startup_indication(struct wfx_dev *wdev,
+ const struct hif_msg *hif, const void *buf)
{
- struct hif_ind_startup *body = buf;
+ const struct hif_ind_startup *body = buf;
if (body->status || body->firmware_type > 4) {
dev_err(wdev->dev, "received invalid startup indication");
@@ -112,8 +109,8 @@ static int hif_startup_indication(struct wfx_dev *wdev, struct hif_msg *hif,
return 0;
}
-static int hif_wakeup_indication(struct wfx_dev *wdev, struct hif_msg *hif,
- void *buf)
+static int hif_wakeup_indication(struct wfx_dev *wdev,
+ const struct hif_msg *hif, const void *buf)
{
if (!wdev->pdata.gpio_wakeup
|| !gpiod_get_value(wdev->pdata.gpio_wakeup)) {
@@ -123,25 +120,27 @@ static int hif_wakeup_indication(struct wfx_dev *wdev, struct hif_msg *hif,
return 0;
}
-static int hif_keys_indication(struct wfx_dev *wdev, struct hif_msg *hif,
- void *buf)
+static int hif_keys_indication(struct wfx_dev *wdev,
+ const struct hif_msg *hif, const void *buf)
{
- struct hif_ind_sl_exchange_pub_keys *body = buf;
+ const struct hif_ind_sl_exchange_pub_keys *body = buf;
+ u8 pubkey[API_NCP_PUB_KEY_SIZE];
- // Compatibility with legacy secure link
- if (body->status == SL_PUB_KEY_EXCHANGE_STATUS_SUCCESS)
- body->status = 0;
- if (body->status)
+ // SL_PUB_KEY_EXCHANGE_STATUS_SUCCESS is used by legacy secure link
+ if (body->status && body->status != SL_PUB_KEY_EXCHANGE_STATUS_SUCCESS)
dev_warn(wdev->dev, "secure link negociation error\n");
- wfx_sl_check_pubkey(wdev, body->ncp_pub_key, body->ncp_pub_key_mac);
+ memcpy(pubkey, body->ncp_pub_key, sizeof(pubkey));
+ memreverse(pubkey, sizeof(pubkey));
+ wfx_sl_check_pubkey(wdev, pubkey, body->ncp_pub_key_mac);
return 0;
}
-static int hif_receive_indication(struct wfx_dev *wdev, struct hif_msg *hif,
- void *buf, struct sk_buff *skb)
+static int hif_receive_indication(struct wfx_dev *wdev,
+ const struct hif_msg *hif,
+ const void *buf, struct sk_buff *skb)
{
struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
- struct hif_ind_rx *body = buf;
+ const struct hif_ind_rx *body = buf;
if (!wvif) {
dev_warn(wdev->dev, "ignore rx data for non-existent vif %d\n",
@@ -154,11 +153,11 @@ static int hif_receive_indication(struct wfx_dev *wdev, struct hif_msg *hif,
return 0;
}
-static int hif_event_indication(struct wfx_dev *wdev, struct hif_msg *hif,
- void *buf)
+static int hif_event_indication(struct wfx_dev *wdev,
+ const struct hif_msg *hif, const void *buf)
{
struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
- struct hif_ind_event *body = buf;
+ const struct hif_ind_event *body = buf;
struct wfx_hif_event *event;
int first;
@@ -183,7 +182,8 @@ static int hif_event_indication(struct wfx_dev *wdev, struct hif_msg *hif,
}
static int hif_pm_mode_complete_indication(struct wfx_dev *wdev,
- struct hif_msg *hif, void *buf)
+ const struct hif_msg *hif,
+ const void *buf)
{
struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
@@ -194,19 +194,20 @@ static int hif_pm_mode_complete_indication(struct wfx_dev *wdev,
}
static int hif_scan_complete_indication(struct wfx_dev *wdev,
- struct hif_msg *hif, void *buf)
+ const struct hif_msg *hif,
+ const void *buf)
{
struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
- struct hif_ind_scan_cmpl *body = buf;
WARN_ON(!wvif);
- wfx_scan_complete_cb(wvif, body);
+ wfx_scan_complete(wvif);
return 0;
}
static int hif_join_complete_indication(struct wfx_dev *wdev,
- struct hif_msg *hif, void *buf)
+ const struct hif_msg *hif,
+ const void *buf)
{
struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
@@ -217,21 +218,26 @@ static int hif_join_complete_indication(struct wfx_dev *wdev,
}
static int hif_suspend_resume_indication(struct wfx_dev *wdev,
- struct hif_msg *hif, void *buf)
+ const struct hif_msg *hif,
+ const void *buf)
{
struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
- struct hif_ind_suspend_resume_tx *body = buf;
+ const struct hif_ind_suspend_resume_tx *body = buf;
WARN_ON(!wvif);
- wfx_suspend_resume(wvif, body);
+ WARN(!body->suspend_resume_flags.bc_mc_only, "unsupported suspend/resume notification");
+ if (body->suspend_resume_flags.resume)
+ wfx_suspend_resume_mc(wvif, STA_NOTIFY_AWAKE);
+ else
+ wfx_suspend_resume_mc(wvif, STA_NOTIFY_SLEEP);
return 0;
}
-static int hif_error_indication(struct wfx_dev *wdev, struct hif_msg *hif,
- void *buf)
+static int hif_error_indication(struct wfx_dev *wdev,
+ const struct hif_msg *hif, const void *buf)
{
- struct hif_ind_error *body = buf;
+ const struct hif_ind_error *body = buf;
u8 *pRollback = (u8 *) body->data;
u32 *pStatus = (u32 *) body->data;
@@ -268,10 +274,10 @@ static int hif_error_indication(struct wfx_dev *wdev, struct hif_msg *hif,
return 0;
}
-static int hif_generic_indication(struct wfx_dev *wdev, struct hif_msg *hif,
- void *buf)
+static int hif_generic_indication(struct wfx_dev *wdev,
+ const struct hif_msg *hif, const void *buf)
{
- struct hif_ind_generic *body = buf;
+ const struct hif_ind_generic *body = buf;
switch (body->indication_type) {
case HIF_GENERIC_INDICATION_TYPE_RAW:
@@ -299,9 +305,10 @@ static int hif_generic_indication(struct wfx_dev *wdev, struct hif_msg *hif,
}
static int hif_exception_indication(struct wfx_dev *wdev,
- struct hif_msg *hif, void *buf)
+ const struct hif_msg *hif, const void *buf)
{
size_t len = hif->len - 4; // drop header
+
dev_err(wdev->dev, "firmware exception\n");
print_hex_dump_bytes("Dump: ", DUMP_PREFIX_NONE, buf, len);
wdev->chip_frozen = 1;
@@ -311,7 +318,8 @@ static int hif_exception_indication(struct wfx_dev *wdev,
static const struct {
int msg_id;
- int (*handler)(struct wfx_dev *wdev, struct hif_msg *hif, void *buf);
+ int (*handler)(struct wfx_dev *wdev,
+ const struct hif_msg *hif, const void *buf);
} hif_handlers[] = {
/* Confirmations */
{ HIF_CNF_ID_TX, hif_tx_confirm },
@@ -335,7 +343,7 @@ static const struct {
void wfx_handle_rx(struct wfx_dev *wdev, struct sk_buff *skb)
{
int i;
- struct hif_msg *hif = (struct hif_msg *) skb->data;
+ const struct hif_msg *hif = (const struct hif_msg *)skb->data;
int hif_id = hif->id;
if (hif_id == HIF_IND_ID_RX) {
@@ -358,7 +366,12 @@ void wfx_handle_rx(struct wfx_dev *wdev, struct sk_buff *skb)
goto free;
}
}
- dev_err(wdev->dev, "unsupported HIF ID %02x\n", hif_id);
+ if (hif_id & 0x80)
+ dev_err(wdev->dev, "unsupported HIF indication: ID %02x\n",
+ hif_id);
+ else
+ dev_err(wdev->dev, "unexpected HIF confirmation: ID %02x\n",
+ hif_id);
free:
dev_kfree_skb(skb);
}
diff --git a/drivers/staging/wfx/hif_tx.c b/drivers/staging/wfx/hif_tx.c
index cb7cddcb9815..2428363371fa 100644
--- a/drivers/staging/wfx/hif_tx.c
+++ b/drivers/staging/wfx/hif_tx.c
@@ -6,7 +6,6 @@
* Copyright (c) 2017-2019, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
-#include <linux/skbuff.h>
#include <linux/etherdevice.h>
#include "hif_tx.h"
@@ -221,41 +220,59 @@ int hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *val,
return ret;
}
-int hif_scan(struct wfx_vif *wvif, const struct wfx_scan_params *arg)
+int hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req,
+ int chan_start_idx, int chan_num)
{
int ret, i;
struct hif_msg *hif;
- struct hif_ssid_def *ssids;
- size_t buf_len = sizeof(struct hif_req_start_scan) +
- arg->scan_req.num_of_channels * sizeof(u8) +
- arg->scan_req.num_of_ssi_ds * sizeof(struct hif_ssid_def);
- struct hif_req_start_scan *body = wfx_alloc_hif(buf_len, &hif);
- u8 *ptr = (u8 *) body + sizeof(*body);
-
- WARN(arg->scan_req.num_of_channels > HIF_API_MAX_NB_CHANNELS, "invalid params");
- WARN(arg->scan_req.num_of_ssi_ds > 2, "invalid params");
- WARN(arg->scan_req.band > 1, "invalid params");
-
- // FIXME: This API is unnecessary complex, fixing NumOfChannels and
- // adding a member SsidDef at end of struct hif_req_start_scan would
- // simplify that a lot.
- memcpy(body, &arg->scan_req, sizeof(*body));
- cpu_to_le32s(&body->min_channel_time);
- cpu_to_le32s(&body->max_channel_time);
- cpu_to_le32s(&body->tx_power_level);
- memcpy(ptr, arg->ssids,
- arg->scan_req.num_of_ssi_ds * sizeof(struct hif_ssid_def));
- ssids = (struct hif_ssid_def *) ptr;
- for (i = 0; i < body->num_of_ssi_ds; ++i)
- cpu_to_le32s(&ssids[i].ssid_length);
- ptr += arg->scan_req.num_of_ssi_ds * sizeof(struct hif_ssid_def);
- memcpy(ptr, arg->ch, arg->scan_req.num_of_channels * sizeof(u8));
- ptr += arg->scan_req.num_of_channels * sizeof(u8);
- WARN(buf_len != ptr - (u8 *) body, "allocation size mismatch");
+ size_t buf_len =
+ sizeof(struct hif_req_start_scan_alt) + chan_num * sizeof(u8);
+ struct hif_req_start_scan_alt *body = wfx_alloc_hif(buf_len, &hif);
+ int tmo_chan_fg, tmo_chan_bg, tmo;
+
+ WARN(chan_num > HIF_API_MAX_NB_CHANNELS, "invalid params");
+ WARN(req->n_ssids > HIF_API_MAX_NB_SSIDS, "invalid params");
+
+ compiletime_assert(IEEE80211_MAX_SSID_LEN == HIF_API_SSID_SIZE,
+ "API inconsistency");
+ for (i = 0; i < req->n_ssids; i++) {
+ memcpy(body->ssid_def[i].ssid, req->ssids[i].ssid,
+ IEEE80211_MAX_SSID_LEN);
+ body->ssid_def[i].ssid_length =
+ cpu_to_le32(req->ssids[i].ssid_len);
+ }
+ body->num_of_ssids = HIF_API_MAX_NB_SSIDS;
+ // Background scan is always a good idea
+ body->scan_type.type = 1;
+ body->scan_flags.fbg = 1;
+ body->tx_power_level =
+ cpu_to_le32(req->channels[chan_start_idx]->max_power);
+ body->num_of_channels = chan_num;
+ for (i = 0; i < chan_num; i++)
+ body->channel_list[i] =
+ req->channels[i + chan_start_idx]->hw_value;
+ if (req->no_cck)
+ body->max_transmit_rate = API_RATE_INDEX_G_6MBPS;
+ else
+ body->max_transmit_rate = API_RATE_INDEX_B_1MBPS;
+ if (req->channels[chan_start_idx]->flags & IEEE80211_CHAN_NO_IR) {
+ body->min_channel_time = cpu_to_le32(50);
+ body->max_channel_time = cpu_to_le32(150);
+ } else {
+ body->min_channel_time = cpu_to_le32(10);
+ body->max_channel_time = cpu_to_le32(50);
+ body->num_of_probe_requests = 2;
+ body->probe_delay = 100;
+ }
+ tmo_chan_bg = le32_to_cpu(body->max_channel_time) * USEC_PER_TU;
+ tmo_chan_fg = 512 * USEC_PER_TU + body->probe_delay;
+ tmo_chan_fg *= body->num_of_probe_requests;
+ tmo = chan_num * max(tmo_chan_bg, tmo_chan_fg);
+
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_START_SCAN, buf_len);
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
kfree(hif);
- return ret;
+ return ret ? ret : usecs_to_jiffies(tmo);
}
int hif_stop_scan(struct wfx_vif *wvif)
@@ -271,18 +288,29 @@ int hif_stop_scan(struct wfx_vif *wvif)
return ret;
}
-int hif_join(struct wfx_vif *wvif, const struct hif_req_join *arg)
+int hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
+ const struct ieee80211_channel *channel, const u8 *ssidie)
{
int ret;
struct hif_msg *hif;
struct hif_req_join *body = wfx_alloc_hif(sizeof(*body), &hif);
- memcpy(body, arg, sizeof(struct hif_req_join));
- cpu_to_le16s(&body->channel_number);
- cpu_to_le16s(&body->atim_window);
- cpu_to_le32s(&body->ssid_length);
- cpu_to_le32s(&body->beacon_interval);
- cpu_to_le32s(&body->basic_rate_set);
+ WARN_ON(!conf->basic_rates);
+ body->infrastructure_bss_mode = !conf->ibss_joined;
+ body->short_preamble = conf->use_short_preamble;
+ if (channel && channel->flags & IEEE80211_CHAN_NO_IR)
+ body->probe_for_join = 0;
+ else
+ body->probe_for_join = 1;
+ body->channel_number = cpu_to_le16(channel->hw_value);
+ body->beacon_interval = cpu_to_le32(conf->beacon_int);
+ body->basic_rate_set =
+ cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, conf->basic_rates));
+ memcpy(body->bssid, conf->bssid, sizeof(body->bssid));
+ if (!conf->ibss_joined && ssidie) {
+ body->ssid_length = cpu_to_le32(ssidie[1]);
+ memcpy(body->ssid, &ssidie[2], ssidie[1]);
+ }
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_JOIN, sizeof(*body));
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
kfree(hif);
@@ -341,19 +369,28 @@ int hif_remove_key(struct wfx_dev *wdev, int idx)
return ret;
}
-int hif_set_edca_queue_params(struct wfx_vif *wvif,
- const struct hif_req_edca_queue_params *arg)
+int hif_set_edca_queue_params(struct wfx_vif *wvif, u16 queue,
+ const struct ieee80211_tx_queue_params *arg)
{
int ret;
struct hif_msg *hif;
struct hif_req_edca_queue_params *body = wfx_alloc_hif(sizeof(*body),
&hif);
- // NOTE: queues numerotation are not the same between WFx and Linux
- memcpy(body, arg, sizeof(*body));
- cpu_to_le16s(&body->cw_min);
- cpu_to_le16s(&body->cw_max);
- cpu_to_le16s(&body->tx_op_limit);
+ if (!body)
+ return -ENOMEM;
+
+ WARN_ON(arg->aifs > 255);
+ body->aifsn = arg->aifs;
+ body->cw_min = cpu_to_le16(arg->cw_min);
+ body->cw_max = cpu_to_le16(arg->cw_max);
+ body->tx_op_limit = cpu_to_le16(arg->txop * USEC_PER_TXOP);
+ body->queue_id = 3 - queue;
+ // API 2.0 has changed queue IDs values
+ if (wfx_api_older_than(wvif->wdev, 2, 0) && queue == IEEE80211_AC_BE)
+ body->queue_id = HIF_QUEUE_ID_BACKGROUND;
+ if (wfx_api_older_than(wvif->wdev, 2, 0) && queue == IEEE80211_AC_BK)
+ body->queue_id = HIF_QUEUE_ID_BESTEFFORT;
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_EDCA_QUEUE_PARAMS,
sizeof(*body));
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
@@ -361,43 +398,57 @@ int hif_set_edca_queue_params(struct wfx_vif *wvif,
return ret;
}
-int hif_set_pm(struct wfx_vif *wvif, const struct hif_req_set_pm_mode *arg)
+int hif_set_pm(struct wfx_vif *wvif, bool ps, int dynamic_ps_timeout)
{
int ret;
struct hif_msg *hif;
struct hif_req_set_pm_mode *body = wfx_alloc_hif(sizeof(*body), &hif);
- memcpy(body, arg, sizeof(*body));
+ if (!body)
+ return -ENOMEM;
+
+ if (ps) {
+ body->pm_mode.enter_psm = 1;
+ // Firmware does not support more than 128ms
+ body->fast_psm_idle_period = min(dynamic_ps_timeout * 2, 255);
+ if (body->fast_psm_idle_period)
+ body->pm_mode.fast_psm = 1;
+ }
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_SET_PM_MODE, sizeof(*body));
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
kfree(hif);
return ret;
}
-int hif_start(struct wfx_vif *wvif, const struct hif_req_start *arg)
+int hif_start(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
+ const struct ieee80211_channel *channel)
{
int ret;
struct hif_msg *hif;
struct hif_req_start *body = wfx_alloc_hif(sizeof(*body), &hif);
- memcpy(body, arg, sizeof(*body));
- cpu_to_le16s(&body->channel_number);
- cpu_to_le32s(&body->beacon_interval);
- cpu_to_le32s(&body->basic_rate_set);
+ body->dtim_period = conf->dtim_period,
+ body->short_preamble = conf->use_short_preamble,
+ body->channel_number = cpu_to_le16(channel->hw_value),
+ body->beacon_interval = cpu_to_le32(conf->beacon_int);
+ body->basic_rate_set =
+ cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, conf->basic_rates));
+ body->ssid_length = conf->ssid_len;
+ memcpy(body->ssid, conf->ssid, conf->ssid_len);
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_START, sizeof(*body));
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
kfree(hif);
return ret;
}
-int hif_beacon_transmit(struct wfx_vif *wvif, bool enable_beaconing)
+int hif_beacon_transmit(struct wfx_vif *wvif, bool enable)
{
int ret;
struct hif_msg *hif;
struct hif_req_beacon_transmit *body = wfx_alloc_hif(sizeof(*body),
&hif);
- body->enable_beaconing = enable_beaconing ? 1 : 0;
+ body->enable_beaconing = enable ? 1 : 0;
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_BEACON_TRANSMIT,
sizeof(*body));
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
@@ -421,16 +472,15 @@ int hif_map_link(struct wfx_vif *wvif, u8 *mac_addr, int flags, int sta_id)
return ret;
}
-int hif_update_ie(struct wfx_vif *wvif, const struct hif_ie_flags *target_frame,
- const u8 *ies, size_t ies_len)
+int hif_update_ie_beacon(struct wfx_vif *wvif, const u8 *ies, size_t ies_len)
{
int ret;
struct hif_msg *hif;
int buf_len = sizeof(struct hif_req_update_ie) + ies_len;
struct hif_req_update_ie *body = wfx_alloc_hif(buf_len, &hif);
- memcpy(&body->ie_flags, target_frame, sizeof(struct hif_ie_flags));
- body->num_i_es = cpu_to_le16(1);
+ body->ie_flags.beacon = 1;
+ body->num_ies = cpu_to_le16(1);
memcpy(body->ie, ies, ies_len);
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_UPDATE_IE, buf_len);
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
diff --git a/drivers/staging/wfx/hif_tx.h b/drivers/staging/wfx/hif_tx.h
index f61ae7b0d41c..20977e461718 100644
--- a/drivers/staging/wfx/hif_tx.h
+++ b/drivers/staging/wfx/hif_tx.h
@@ -12,15 +12,13 @@
#include "hif_api_cmd.h"
+struct ieee80211_channel;
+struct ieee80211_bss_conf;
+struct ieee80211_tx_queue_params;
+struct cfg80211_scan_request;
struct wfx_dev;
struct wfx_vif;
-struct wfx_scan_params {
- struct hif_req_start_scan scan_req;
- struct hif_ssid_def *ssids;
- u8 *ch;
-};
-
struct wfx_hif_cmd {
struct mutex lock;
struct mutex key_renew_lock;
@@ -44,21 +42,23 @@ int hif_read_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id,
void *buf, size_t buf_size);
int hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id,
void *buf, size_t buf_size);
-int hif_scan(struct wfx_vif *wvif, const struct wfx_scan_params *arg);
+int hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req80211,
+ int chan_start, int chan_num);
int hif_stop_scan(struct wfx_vif *wvif);
-int hif_join(struct wfx_vif *wvif, const struct hif_req_join *arg);
-int hif_set_pm(struct wfx_vif *wvif, const struct hif_req_set_pm_mode *arg);
+int hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
+ const struct ieee80211_channel *channel, const u8 *ssidie);
+int hif_set_pm(struct wfx_vif *wvif, bool ps, int dynamic_ps_timeout);
int hif_set_bss_params(struct wfx_vif *wvif,
const struct hif_req_set_bss_params *arg);
int hif_add_key(struct wfx_dev *wdev, const struct hif_req_add_key *arg);
int hif_remove_key(struct wfx_dev *wdev, int idx);
-int hif_set_edca_queue_params(struct wfx_vif *wvif,
- const struct hif_req_edca_queue_params *arg);
-int hif_start(struct wfx_vif *wvif, const struct hif_req_start *arg);
+int hif_set_edca_queue_params(struct wfx_vif *wvif, u16 queue,
+ const struct ieee80211_tx_queue_params *arg);
+int hif_start(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
+ const struct ieee80211_channel *channel);
int hif_beacon_transmit(struct wfx_vif *wvif, bool enable);
int hif_map_link(struct wfx_vif *wvif, u8 *mac_addr, int flags, int sta_id);
-int hif_update_ie(struct wfx_vif *wvif, const struct hif_ie_flags *target_frame,
- const u8 *ies, size_t ies_len);
+int hif_update_ie_beacon(struct wfx_vif *wvif, const u8 *ies, size_t ies_len);
int hif_sl_set_mac_key(struct wfx_dev *wdev, const u8 *slk_key,
int destination);
int hif_sl_config(struct wfx_dev *wdev, const unsigned long *bitmap);
diff --git a/drivers/staging/wfx/hif_tx_mib.h b/drivers/staging/wfx/hif_tx_mib.h
index bb091e395ff5..bf3769c2a9b6 100644
--- a/drivers/staging/wfx/hif_tx_mib.h
+++ b/drivers/staging/wfx/hif_tx_mib.h
@@ -15,13 +15,15 @@
#include "hif_tx.h"
#include "hif_api_mib.h"
-static inline int hif_set_output_power(struct wfx_vif *wvif, int power_level)
+static inline int hif_set_output_power(struct wfx_vif *wvif, int val)
{
- __le32 val = cpu_to_le32(power_level);
+ struct hif_mib_current_tx_power_level arg = {
+ .power_level = cpu_to_le32(val * 10),
+ };
return hif_write_mib(wvif->wdev, wvif->id,
HIF_MIB_ID_CURRENT_TX_POWER_LEVEL,
- &val, sizeof(val));
+ &arg, sizeof(arg));
}
static inline int hif_set_beacon_wakeup_period(struct wfx_vif *wvif,
@@ -42,10 +44,25 @@ static inline int hif_set_beacon_wakeup_period(struct wfx_vif *wvif,
}
static inline int hif_set_rcpi_rssi_threshold(struct wfx_vif *wvif,
- struct hif_mib_rcpi_rssi_threshold *arg)
+ int rssi_thold, int rssi_hyst)
{
+ struct hif_mib_rcpi_rssi_threshold arg = {
+ .rolling_average_count = 8,
+ .detection = 1,
+ };
+
+ if (!rssi_thold && !rssi_hyst) {
+ arg.upperthresh = 1;
+ arg.lowerthresh = 1;
+ } else {
+ arg.upper_threshold = rssi_thold + rssi_hyst;
+ arg.upper_threshold = (arg.upper_threshold + 110) * 2;
+ arg.lower_threshold = rssi_thold;
+ arg.lower_threshold = (arg.lower_threshold + 110) * 2;
+ }
+
return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_RCPI_RSSI_THRESHOLD, arg, sizeof(*arg));
+ HIF_MIB_ID_RCPI_RSSI_THRESHOLD, &arg, sizeof(arg));
}
static inline int hif_get_counters_table(struct wfx_dev *wdev,
@@ -130,8 +147,17 @@ static inline int hif_set_operational_mode(struct wfx_dev *wdev,
}
static inline int hif_set_template_frame(struct wfx_vif *wvif,
- struct hif_mib_template_frame *arg)
+ struct sk_buff *skb,
+ u8 frame_type, int init_rate)
{
+ struct hif_mib_template_frame *arg;
+
+ skb_push(skb, 4);
+ arg = (struct hif_mib_template_frame *)skb->data;
+ skb_pull(skb, 4);
+ arg->init_rate = init_rate;
+ arg->frame_type = frame_type;
+ arg->frame_length = cpu_to_le16(skb->len);
return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_TEMPLATE_FRAME,
arg, sizeof(*arg));
}
@@ -147,7 +173,6 @@ static inline int hif_set_mfp(struct wfx_vif *wvif, bool capable, bool required)
}
if (!required)
val.unpmf_allowed = 1;
- cpu_to_le32s((u32 *) &val);
return hif_write_mib(wvif->wdev, wvif->id,
HIF_MIB_ID_PROTECTED_MGMT_POLICY,
&val, sizeof(val));
@@ -166,50 +191,105 @@ static inline int hif_set_block_ack_policy(struct wfx_vif *wvif,
}
static inline int hif_set_association_mode(struct wfx_vif *wvif,
- struct hif_mib_set_association_mode *arg)
+ struct ieee80211_bss_conf *info,
+ struct ieee80211_sta_ht_cap *ht_cap)
{
+ int basic_rates = wfx_rate_mask_to_hw(wvif->wdev, info->basic_rates);
+ struct hif_mib_set_association_mode val = {
+ .preambtype_use = 1,
+ .mode = 1,
+ .rateset = 1,
+ .spacing = 1,
+ .short_preamble = info->use_short_preamble,
+ .basic_rate_set = cpu_to_le32(basic_rates)
+ };
+
+ // FIXME: it is strange to not retrieve all information from bss_info
+ if (ht_cap && ht_cap->ht_supported) {
+ val.mpdu_start_spacing = ht_cap->ampdu_density;
+ if (!(info->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT))
+ val.greenfield = !!(ht_cap->cap & IEEE80211_HT_CAP_GRN_FLD);
+ }
+
return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_SET_ASSOCIATION_MODE, arg, sizeof(*arg));
+ HIF_MIB_ID_SET_ASSOCIATION_MODE, &val, sizeof(val));
}
static inline int hif_set_tx_rate_retry_policy(struct wfx_vif *wvif,
- struct hif_mib_set_tx_rate_retry_policy *arg)
+ int policy_index, uint8_t *rates)
{
- size_t size = struct_size(arg, tx_rate_retry_policy,
- arg->num_tx_rate_policies);
+ struct hif_mib_set_tx_rate_retry_policy *arg;
+ size_t size = struct_size(arg, tx_rate_retry_policy, 1);
+ int ret;
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_SET_TX_RATE_RETRY_POLICY, arg, size);
+ arg = kzalloc(size, GFP_KERNEL);
+ arg->num_tx_rate_policies = 1;
+ arg->tx_rate_retry_policy[0].policy_index = policy_index;
+ arg->tx_rate_retry_policy[0].short_retry_count = 255;
+ arg->tx_rate_retry_policy[0].long_retry_count = 255;
+ arg->tx_rate_retry_policy[0].first_rate_sel = 1;
+ arg->tx_rate_retry_policy[0].terminate = 1;
+ arg->tx_rate_retry_policy[0].count_init = 1;
+ memcpy(&arg->tx_rate_retry_policy[0].rates, rates,
+ sizeof(arg->tx_rate_retry_policy[0].rates));
+ ret = hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_SET_TX_RATE_RETRY_POLICY, arg, size);
+ kfree(arg);
+ return ret;
}
static inline int hif_set_mac_addr_condition(struct wfx_vif *wvif,
- struct hif_mib_mac_addr_data_frame_condition *arg)
+ int idx, const u8 *mac_addr)
{
+ struct hif_mib_mac_addr_data_frame_condition val = {
+ .condition_idx = idx,
+ .address_type = HIF_MAC_ADDR_A1,
+ };
+
+ ether_addr_copy(val.mac_address, mac_addr);
return hif_write_mib(wvif->wdev, wvif->id,
HIF_MIB_ID_MAC_ADDR_DATAFRAME_CONDITION,
- arg, sizeof(*arg));
+ &val, sizeof(val));
}
static inline int hif_set_uc_mc_bc_condition(struct wfx_vif *wvif,
- struct hif_mib_uc_mc_bc_data_frame_condition *arg)
+ int idx, u8 allowed_frames)
{
+ struct hif_mib_uc_mc_bc_data_frame_condition val = {
+ .condition_idx = idx,
+ .allowed_frames = allowed_frames,
+ };
+
return hif_write_mib(wvif->wdev, wvif->id,
HIF_MIB_ID_UC_MC_BC_DATAFRAME_CONDITION,
- arg, sizeof(*arg));
+ &val, sizeof(val));
}
-static inline int hif_set_config_data_filter(struct wfx_vif *wvif,
- struct hif_mib_config_data_filter *arg)
+static inline int hif_set_config_data_filter(struct wfx_vif *wvif, bool enable,
+ int idx, int mac_filters,
+ int frames_types_filters)
{
+ struct hif_mib_config_data_filter val = {
+ .enable = enable,
+ .filter_idx = idx,
+ .mac_cond = mac_filters,
+ .uc_mc_bc_cond = frames_types_filters,
+ };
+
return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_CONFIG_DATA_FILTER, arg, sizeof(*arg));
+ HIF_MIB_ID_CONFIG_DATA_FILTER, &val, sizeof(val));
}
static inline int hif_set_data_filtering(struct wfx_vif *wvif,
- struct hif_mib_set_data_filtering *arg)
+ bool enable, bool invert)
{
+ struct hif_mib_set_data_filtering val = {
+ .enable = enable,
+ .invert_matching = invert,
+ };
+
return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_SET_DATA_FILTERING, arg, sizeof(*arg));
+ HIF_MIB_ID_SET_DATA_FILTERING, &val, sizeof(val));
}
static inline int hif_keep_alive_period(struct wfx_vif *wvif, int period)
@@ -222,34 +302,56 @@ static inline int hif_keep_alive_period(struct wfx_vif *wvif, int period)
&arg, sizeof(arg));
};
-static inline int hif_set_arp_ipv4_filter(struct wfx_vif *wvif,
- struct hif_mib_arp_ip_addr_table *fp)
+static inline int hif_set_arp_ipv4_filter(struct wfx_vif *wvif, int idx,
+ __be32 *addr)
{
+ struct hif_mib_arp_ip_addr_table arg = {
+ .condition_idx = idx,
+ .arp_enable = HIF_ARP_NS_FILTERING_DISABLE,
+ };
+
+ if (addr) {
+ // Caution: type of addr is __be32
+ memcpy(arg.ipv4_address, addr, sizeof(arg.ipv4_address));
+ arg.arp_enable = HIF_ARP_NS_FILTERING_ENABLE;
+ }
return hif_write_mib(wvif->wdev, wvif->id,
HIF_MIB_ID_ARP_IP_ADDRESSES_TABLE,
- fp, sizeof(*fp));
+ &arg, sizeof(arg));
}
-static inline int hif_use_multi_tx_conf(struct wfx_dev *wdev,
- bool enabled)
+static inline int hif_use_multi_tx_conf(struct wfx_dev *wdev, bool enable)
{
- __le32 arg = enabled ? cpu_to_le32(1) : 0;
+ struct hif_mib_gl_set_multi_msg arg = {
+ .enable_multi_tx_conf = enable,
+ };
return hif_write_mib(wdev, -1, HIF_MIB_ID_GL_SET_MULTI_MSG,
&arg, sizeof(arg));
}
-static inline int hif_set_uapsd_info(struct wfx_vif *wvif,
- struct hif_mib_set_uapsd_information *arg)
+static inline int hif_set_uapsd_info(struct wfx_vif *wvif, unsigned long val)
{
+ struct hif_mib_set_uapsd_information arg = { };
+
+ if (val & BIT(IEEE80211_AC_VO))
+ arg.trig_voice = 1;
+ if (val & BIT(IEEE80211_AC_VI))
+ arg.trig_video = 1;
+ if (val & BIT(IEEE80211_AC_BE))
+ arg.trig_be = 1;
+ if (val & BIT(IEEE80211_AC_BK))
+ arg.trig_bckgrnd = 1;
return hif_write_mib(wvif->wdev, wvif->id,
HIF_MIB_ID_SET_UAPSD_INFORMATION,
- arg, sizeof(*arg));
+ &arg, sizeof(arg));
}
static inline int hif_erp_use_protection(struct wfx_vif *wvif, bool enable)
{
- __le32 arg = enable ? cpu_to_le32(1) : 0;
+ struct hif_mib_non_erp_protection arg = {
+ .use_cts_to_self = enable,
+ };
return hif_write_mib(wvif->wdev, wvif->id,
HIF_MIB_ID_NON_ERP_PROTECTION, &arg, sizeof(arg));
@@ -257,16 +359,18 @@ static inline int hif_erp_use_protection(struct wfx_vif *wvif, bool enable)
static inline int hif_slot_time(struct wfx_vif *wvif, int val)
{
- __le32 arg = cpu_to_le32(val);
+ struct hif_mib_slot_time arg = {
+ .slot_time = cpu_to_le32(val),
+ };
return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SLOT_TIME,
&arg, sizeof(arg));
}
-static inline int hif_dual_cts_protection(struct wfx_vif *wvif, bool val)
+static inline int hif_dual_cts_protection(struct wfx_vif *wvif, bool enable)
{
struct hif_mib_set_ht_protection arg = {
- .dual_cts_prot = val,
+ .dual_cts_prot = enable,
};
return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SET_HT_PROTECTION,
@@ -275,7 +379,9 @@ static inline int hif_dual_cts_protection(struct wfx_vif *wvif, bool val)
static inline int hif_wep_default_key_id(struct wfx_vif *wvif, int val)
{
- __le32 arg = cpu_to_le32(val);
+ struct hif_mib_wep_default_key_id arg = {
+ .wep_default_key_id = val,
+ };
return hif_write_mib(wvif->wdev, wvif->id,
HIF_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID,
@@ -284,7 +390,9 @@ static inline int hif_wep_default_key_id(struct wfx_vif *wvif, int val)
static inline int hif_rts_threshold(struct wfx_vif *wvif, int val)
{
- __le32 arg = cpu_to_le32(val > 0 ? val : 0xFFFF);
+ struct hif_mib_dot11_rts_threshold arg = {
+ .threshold = cpu_to_le32(val >= 0 ? val : 0xFFFF),
+ };
return hif_write_mib(wvif->wdev, wvif->id,
HIF_MIB_ID_DOT11_RTS_THRESHOLD, &arg, sizeof(arg));
diff --git a/drivers/staging/wfx/hwio.h b/drivers/staging/wfx/hwio.h
index b2c1a66de963..4b6ef061b40b 100644
--- a/drivers/staging/wfx/hwio.h
+++ b/drivers/staging/wfx/hwio.h
@@ -37,16 +37,11 @@ int ahb_reg_write(struct wfx_dev *wdev, u32 addr, u32 val);
#define CFG_ERR_HOST_NO_IN_QUEUE 0x00000040
#define CFG_ERR_HOST_CRC_MISS 0x00000080 // only with SDIO
#define CFG_SPI_IGNORE_CS 0x00000080 // only with SPI
-/* Bytes ordering (only writable in SPI): */
-#define CFG_WORD_MODE_MASK 0x00000300
-/*
- * B1,B0,B3,B2 (In SPI, register address and
- * CONFIG data always use this mode)
- */
-#define CFG_WORD_MODE0 0x00000000
-#define CFG_WORD_MODE1 0x00000100 // B3,B2,B1,B0
-#define CFG_WORD_MODE2 0x00000200 // B0,B1,B2,B3 (SDIO)
-#define CFG_DIRECT_ACCESS_MODE 0x00000400 // Direct or queue access mode
+#define CFG_BYTE_ORDER_MASK 0x00000300 // only writable with SPI
+#define CFG_BYTE_ORDER_BADC 0x00000000
+#define CFG_BYTE_ORDER_DCBA 0x00000100
+#define CFG_BYTE_ORDER_ABCD 0x00000200 // SDIO always use this value
+#define CFG_DIRECT_ACCESS_MODE 0x00000400
#define CFG_PREFETCH_AHB 0x00000800
#define CFG_DISABLE_CPU_CLK 0x00001000
#define CFG_PREFETCH_SRAM 0x00002000
diff --git a/drivers/staging/wfx/main.c b/drivers/staging/wfx/main.c
index 986a2ef678b9..84adad64fc30 100644
--- a/drivers/staging/wfx/main.c
+++ b/drivers/staging/wfx/main.c
@@ -131,10 +131,11 @@ static const struct ieee80211_ops wfx_ops = {
.stop = wfx_stop,
.add_interface = wfx_add_interface,
.remove_interface = wfx_remove_interface,
- .config = wfx_config,
+ .config = wfx_config,
.tx = wfx_tx,
.conf_tx = wfx_conf_tx,
.hw_scan = wfx_hw_scan,
+ .cancel_hw_scan = wfx_cancel_hw_scan,
.sta_add = wfx_sta_add,
.sta_remove = wfx_sta_remove,
.sta_notify = wfx_sta_notify,
@@ -182,7 +183,7 @@ struct gpio_desc *wfx_get_gpio(struct device *dev, int override,
} else {
ret = devm_gpiod_get(dev, label, GPIOD_OUT_LOW);
}
- if (IS_ERR(ret) || !ret) {
+ if (IS_ERR_OR_NULL(ret)) {
if (!ret || PTR_ERR(ret) == -ENOENT)
dev_warn(dev, "gpio %s is not defined\n", label);
else
@@ -289,7 +290,7 @@ struct wfx_dev *wfx_init_common(struct device *dev,
hw->sta_data_size = sizeof(struct wfx_sta_priv);
hw->queues = 4;
hw->max_rates = 8;
- hw->max_rate_tries = 15;
+ hw->max_rate_tries = 8;
hw->extra_tx_headroom = sizeof(struct hif_sl_msg_hdr) +
sizeof(struct hif_msg)
+ sizeof(struct hif_req_tx)
@@ -297,6 +298,11 @@ struct wfx_dev *wfx_init_common(struct device *dev,
hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_AP);
+ hw->wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U;
+ hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
hw->wiphy->max_ap_assoc_sta = WFX_MAX_STA_IN_AP_MODE;
diff --git a/drivers/staging/wfx/queue.c b/drivers/staging/wfx/queue.c
index c7ee90888f69..0bcc61feee1d 100644
--- a/drivers/staging/wfx/queue.c
+++ b/drivers/staging/wfx/queue.c
@@ -31,8 +31,6 @@ void wfx_tx_flush(struct wfx_dev *wdev)
{
int ret;
- WARN(!atomic_read(&wdev->tx_lock), "tx_lock is not locked");
-
// Do not wait for any reply if chip is frozen
if (wdev->chip_frozen)
return;
@@ -177,11 +175,9 @@ void wfx_tx_queues_deinit(struct wfx_dev *wdev)
wfx_tx_queues_clear(wdev);
}
-size_t wfx_tx_queue_get_num_queued(struct wfx_queue *queue,
- u32 link_id_map)
+int wfx_tx_queue_get_num_queued(struct wfx_queue *queue, u32 link_id_map)
{
- size_t ret;
- int i, bit;
+ int ret, i;
if (!link_id_map)
return 0;
@@ -191,11 +187,9 @@ size_t wfx_tx_queue_get_num_queued(struct wfx_queue *queue,
ret = skb_queue_len(&queue->queue);
} else {
ret = 0;
- for (i = 0, bit = 1; i < ARRAY_SIZE(queue->link_map_cache);
- ++i, bit <<= 1) {
- if (link_id_map & bit)
+ for (i = 0; i < ARRAY_SIZE(queue->link_map_cache); i++)
+ if (link_id_map & BIT(i))
ret += queue->link_map_cache[i];
- }
}
spin_unlock_bh(&queue->queue.lock);
return ret;
@@ -237,7 +231,6 @@ static struct sk_buff *wfx_tx_queue_get(struct wfx_dev *wdev,
break;
}
}
- WARN_ON(!skb);
if (skb) {
tx_priv = wfx_skb_tx_priv(skb);
tx_priv->xmit_timestamp = ktime_get();
@@ -362,79 +355,38 @@ bool wfx_tx_queues_is_empty(struct wfx_dev *wdev)
static bool hif_handle_tx_data(struct wfx_vif *wvif, struct sk_buff *skb,
struct wfx_queue *queue)
{
- bool handled = false;
- struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
struct hif_req_tx *req = wfx_skb_txreq(skb);
- struct ieee80211_hdr *frame = (struct ieee80211_hdr *) (req->frame + req->data_flags.fc_offset);
-
- enum {
- do_probe,
- do_drop,
- do_wep,
- do_tx,
- } action = do_tx;
-
- switch (wvif->vif->type) {
- case NL80211_IFTYPE_STATION:
- if (wvif->state < WFX_STATE_PRE_STA)
- action = do_drop;
- break;
- case NL80211_IFTYPE_AP:
- if (!wvif->state) {
- action = do_drop;
- } else if (!(BIT(tx_priv->raw_link_id) &
- (BIT(0) | wvif->link_id_map))) {
- dev_warn(wvif->wdev->dev, "a frame with expired link-id is dropped\n");
- action = do_drop;
- }
- break;
- case NL80211_IFTYPE_ADHOC:
- if (wvif->state != WFX_STATE_IBSS)
- action = do_drop;
- break;
- case NL80211_IFTYPE_MONITOR:
- default:
- action = do_drop;
- break;
- }
-
- if (action == do_tx) {
- if (ieee80211_is_nullfunc(frame->frame_control)) {
- mutex_lock(&wvif->bss_loss_lock);
- if (wvif->bss_loss_state) {
- wvif->bss_loss_confirm_id = req->packet_id;
- req->queue_id.queue_id = HIF_QUEUE_ID_VOICE;
- }
- mutex_unlock(&wvif->bss_loss_lock);
- } else if (ieee80211_has_protected(frame->frame_control) &&
- tx_priv->hw_key &&
- tx_priv->hw_key->keyidx != wvif->wep_default_key_id &&
- (tx_priv->hw_key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
- tx_priv->hw_key->cipher == WLAN_CIPHER_SUITE_WEP104)) {
- action = do_wep;
+ struct ieee80211_key_conf *hw_key = wfx_skb_tx_priv(skb)->hw_key;
+ struct ieee80211_hdr *frame =
+ (struct ieee80211_hdr *)(req->frame + req->data_flags.fc_offset);
+
+ // FIXME: mac80211 is smart enough to handle BSS loss. Driver should not
+ // try to do anything about that.
+ if (ieee80211_is_nullfunc(frame->frame_control)) {
+ mutex_lock(&wvif->bss_loss_lock);
+ if (wvif->bss_loss_state) {
+ wvif->bss_loss_confirm_id = req->packet_id;
+ req->queue_id.queue_id = HIF_QUEUE_ID_VOICE;
}
+ mutex_unlock(&wvif->bss_loss_lock);
}
- switch (action) {
- case do_drop:
- wfx_pending_remove(wvif->wdev, skb);
- handled = true;
- break;
- case do_wep:
+ // FIXME: identify the exact scenario matched by this condition. Does it
+ // happen yet?
+ if (ieee80211_has_protected(frame->frame_control) &&
+ hw_key && hw_key->keyidx != wvif->wep_default_key_id &&
+ (hw_key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ hw_key->cipher == WLAN_CIPHER_SUITE_WEP104)) {
wfx_tx_lock(wvif->wdev);
- wvif->wep_default_key_id = tx_priv->hw_key->keyidx;
+ WARN_ON(wvif->wep_pending_skb);
+ wvif->wep_default_key_id = hw_key->keyidx;
wvif->wep_pending_skb = skb;
if (!schedule_work(&wvif->wep_key_work))
wfx_tx_unlock(wvif->wdev);
- handled = true;
- break;
- case do_tx:
- break;
- default:
- /* Do nothing */
- break;
+ return true;
+ } else {
+ return false;
}
- return handled;
}
static int wfx_get_prio_queue(struct wfx_vif *wvif,
@@ -442,7 +394,7 @@ static int wfx_get_prio_queue(struct wfx_vif *wvif,
{
static const int urgent = BIT(WFX_LINK_ID_AFTER_DTIM) |
BIT(WFX_LINK_ID_UAPSD);
- struct hif_req_edca_queue_params *edca;
+ const struct ieee80211_tx_queue_params *edca;
unsigned int score, best = -1;
int winner = -1;
int i;
@@ -451,13 +403,13 @@ static int wfx_get_prio_queue(struct wfx_vif *wvif,
for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
int queued;
- edca = &wvif->edca.params[i];
+ edca = &wvif->edca_params[i];
queued = wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[i],
tx_allowed_mask);
if (!queued)
continue;
*total += queued;
- score = ((edca->aifsn + edca->cw_min) << 16) +
+ score = ((edca->aifs + edca->cw_min) << 16) +
((edca->cw_max - edca->cw_min) *
(get_random_int() & 0xFFFF));
if (score < best && (winner < 0 || i != 3)) {
@@ -479,94 +431,100 @@ static int wfx_get_prio_queue(struct wfx_vif *wvif,
static int wfx_tx_queue_mask_get(struct wfx_vif *wvif,
struct wfx_queue **queue_p,
- u32 *tx_allowed_mask_p,
- bool *more)
+ u32 *tx_allowed_mask_p)
{
int idx;
u32 tx_allowed_mask;
int total = 0;
- /* Search for a queue with multicast frames buffered */
- if (wvif->mcast_tx) {
- tx_allowed_mask = BIT(WFX_LINK_ID_AFTER_DTIM);
- idx = wfx_get_prio_queue(wvif, tx_allowed_mask, &total);
- if (idx >= 0) {
- *more = total > 1;
- goto found;
- }
- }
-
/* Search for unicast traffic */
tx_allowed_mask = ~wvif->sta_asleep_mask;
tx_allowed_mask |= BIT(WFX_LINK_ID_UAPSD);
- if (wvif->sta_asleep_mask) {
- tx_allowed_mask |= wvif->pspoll_mask;
+ if (wvif->sta_asleep_mask)
tx_allowed_mask &= ~BIT(WFX_LINK_ID_AFTER_DTIM);
- } else {
+ else
tx_allowed_mask |= BIT(WFX_LINK_ID_AFTER_DTIM);
- }
idx = wfx_get_prio_queue(wvif, tx_allowed_mask, &total);
if (idx < 0)
return -ENOENT;
-found:
*queue_p = &wvif->wdev->tx_queue[idx];
*tx_allowed_mask_p = tx_allowed_mask;
return 0;
}
+struct hif_msg *wfx_tx_queues_get_after_dtim(struct wfx_vif *wvif)
+{
+ struct wfx_dev *wdev = wvif->wdev;
+ struct ieee80211_tx_info *tx_info;
+ struct hif_msg *hif;
+ struct sk_buff *skb;
+ int i;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
+ skb_queue_walk(&wdev->tx_queue[i].queue, skb) {
+ tx_info = IEEE80211_SKB_CB(skb);
+ hif = (struct hif_msg *)skb->data;
+ if ((tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) &&
+ (hif->interface == wvif->id))
+ return (struct hif_msg *)skb->data;
+ }
+ }
+ return NULL;
+}
+
struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev)
{
struct sk_buff *skb;
struct hif_msg *hif = NULL;
- struct hif_req_tx *req = NULL;
struct wfx_queue *queue = NULL;
struct wfx_queue *vif_queue = NULL;
u32 tx_allowed_mask = 0;
u32 vif_tx_allowed_mask = 0;
const struct wfx_tx_priv *tx_priv = NULL;
struct wfx_vif *wvif;
- /* More is used only for broadcasts. */
- bool more = false;
- bool vif_more = false;
int not_found;
int burst;
+ int i;
+
+ if (atomic_read(&wdev->tx_lock))
+ return NULL;
+
+ wvif = NULL;
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
+ if (wvif->after_dtim_tx_allowed) {
+ for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
+ skb = wfx_tx_queue_get(wvif->wdev,
+ &wdev->tx_queue[i],
+ BIT(WFX_LINK_ID_AFTER_DTIM));
+ if (skb) {
+ hif = (struct hif_msg *)skb->data;
+ // Cannot happen since only one vif can
+ // be AP at time
+ WARN_ON(wvif->id != hif->interface);
+ return hif;
+ }
+ }
+ // No more multicast to sent
+ wvif->after_dtim_tx_allowed = false;
+ schedule_work(&wvif->update_tim_work);
+ }
+ }
for (;;) {
int ret = -ENOENT;
int queue_num;
- struct ieee80211_hdr *hdr;
-
- if (atomic_read(&wdev->tx_lock))
- return NULL;
wvif = NULL;
while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
spin_lock_bh(&wvif->ps_state_lock);
not_found = wfx_tx_queue_mask_get(wvif, &vif_queue,
- &vif_tx_allowed_mask,
- &vif_more);
-
- if (wvif->mcast_buffered && (not_found || !vif_more) &&
- (wvif->mcast_tx ||
- !wvif->sta_asleep_mask)) {
- wvif->mcast_buffered = false;
- if (wvif->mcast_tx) {
- wvif->mcast_tx = false;
- schedule_work(&wvif->mcast_stop_work);
- }
- }
+ &vif_tx_allowed_mask);
spin_unlock_bh(&wvif->ps_state_lock);
- if (vif_more) {
- more = true;
- tx_allowed_mask = vif_tx_allowed_mask;
- queue = vif_queue;
- ret = 0;
- break;
- } else if (!not_found) {
+ if (!not_found) {
if (queue && queue != vif_queue)
dev_info(wdev->dev, "vifs disagree about queue priority\n");
tx_allowed_mask |= vif_tx_allowed_mask;
@@ -591,11 +549,9 @@ struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev)
if (hif_handle_tx_data(wvif, skb, queue))
continue; /* Handled by WSM */
- wvif->pspoll_mask &= ~BIT(tx_priv->raw_link_id);
-
/* allow bursting if txop is set */
- if (wvif->edca.params[queue_num].tx_op_limit)
- burst = (int)wfx_tx_queue_get_num_queued(queue, tx_allowed_mask) + 1;
+ if (wvif->edca_params[queue_num].txop)
+ burst = wfx_tx_queue_get_num_queued(queue, tx_allowed_mask) + 1;
else
burst = 1;
@@ -605,15 +561,6 @@ struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev)
else
wdev->tx_burst_idx = -1;
- /* more buffered multicast/broadcast frames
- * ==> set MoreData flag in IEEE 802.11 header
- * to inform PS STAs
- */
- if (more) {
- req = (struct hif_req_tx *) hif->body;
- hdr = (struct ieee80211_hdr *) (req->frame + req->data_flags.fc_offset);
- hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
- }
return hif;
}
}
diff --git a/drivers/staging/wfx/queue.h b/drivers/staging/wfx/queue.h
index 21566e48b2c2..90bb060d1204 100644
--- a/drivers/staging/wfx/queue.h
+++ b/drivers/staging/wfx/queue.h
@@ -13,9 +13,10 @@
#include "hif_api_cmd.h"
#define WFX_MAX_STA_IN_AP_MODE 14
-#define WFX_LINK_ID_AFTER_DTIM (WFX_MAX_STA_IN_AP_MODE + 1)
-#define WFX_LINK_ID_UAPSD (WFX_MAX_STA_IN_AP_MODE + 2)
-#define WFX_LINK_ID_MAX (WFX_MAX_STA_IN_AP_MODE + 3)
+#define WFX_LINK_ID_NO_ASSOC 15
+#define WFX_LINK_ID_AFTER_DTIM (WFX_LINK_ID_NO_ASSOC + 1)
+#define WFX_LINK_ID_UAPSD (WFX_LINK_ID_NO_ASSOC + 2)
+#define WFX_LINK_ID_MAX (WFX_LINK_ID_NO_ASSOC + 3)
struct wfx_dev;
struct wfx_vif;
@@ -46,10 +47,11 @@ void wfx_tx_queues_clear(struct wfx_dev *wdev);
bool wfx_tx_queues_is_empty(struct wfx_dev *wdev);
void wfx_tx_queues_wait_empty_vif(struct wfx_vif *wvif);
struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev);
+struct hif_msg *wfx_tx_queues_get_after_dtim(struct wfx_vif *wvif);
void wfx_tx_queue_put(struct wfx_dev *wdev, struct wfx_queue *queue,
struct sk_buff *skb);
-size_t wfx_tx_queue_get_num_queued(struct wfx_queue *queue, u32 link_id_map);
+int wfx_tx_queue_get_num_queued(struct wfx_queue *queue, u32 link_id_map);
struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id);
int wfx_pending_remove(struct wfx_dev *wdev, struct sk_buff *skb);
diff --git a/drivers/staging/wfx/scan.c b/drivers/staging/wfx/scan.c
index 35fcf9119f96..6e1e50048651 100644
--- a/drivers/staging/wfx/scan.c
+++ b/drivers/staging/wfx/scan.c
@@ -16,279 +16,118 @@ static void __ieee80211_scan_completed_compat(struct ieee80211_hw *hw,
bool aborted)
{
struct cfg80211_scan_info info = {
- .aborted = aborted ? 1 : 0,
+ .aborted = aborted,
};
ieee80211_scan_completed(hw, &info);
}
-static void wfx_scan_restart_delayed(struct wfx_vif *wvif)
+static int update_probe_tmpl(struct wfx_vif *wvif,
+ struct cfg80211_scan_request *req)
{
- if (wvif->delayed_unjoin) {
- wvif->delayed_unjoin = false;
- if (!schedule_work(&wvif->unjoin_work))
- wfx_tx_unlock(wvif->wdev);
- } else if (wvif->delayed_link_loss) {
- wvif->delayed_link_loss = 0;
- wfx_cqm_bssloss_sm(wvif, 1, 0, 0);
- }
-}
-
-static int wfx_scan_start(struct wfx_vif *wvif, struct wfx_scan_params *scan)
-{
- int ret;
- int tmo = 500;
-
- if (wvif->state == WFX_STATE_PRE_STA)
- return -EBUSY;
-
- tmo += scan->scan_req.num_of_channels *
- ((20 * (scan->scan_req.max_channel_time)) + 10);
- atomic_set(&wvif->scan.in_progress, 1);
- atomic_set(&wvif->wdev->scan_in_progress, 1);
-
- schedule_delayed_work(&wvif->scan.timeout, msecs_to_jiffies(tmo));
- ret = hif_scan(wvif, scan);
- if (ret) {
- wfx_scan_failed_cb(wvif);
- atomic_set(&wvif->scan.in_progress, 0);
- atomic_set(&wvif->wdev->scan_in_progress, 0);
- cancel_delayed_work_sync(&wvif->scan.timeout);
- wfx_scan_restart_delayed(wvif);
- }
- return ret;
-}
-
-int wfx_hw_scan(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_scan_request *hw_req)
-{
- struct wfx_dev *wdev = hw->priv;
- struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
- struct cfg80211_scan_request *req = &hw_req->req;
struct sk_buff *skb;
- int i, ret;
- struct hif_mib_template_frame *p;
-
- if (!wvif)
- return -EINVAL;
-
- if (wvif->state == WFX_STATE_AP)
- return -EOPNOTSUPP;
-
- if (req->n_ssids == 1 && !req->ssids[0].ssid_len)
- req->n_ssids = 0;
- if (req->n_ssids > HIF_API_MAX_NB_SSIDS)
- return -EINVAL;
-
- skb = ieee80211_probereq_get(hw, wvif->vif->addr, NULL, 0, req->ie_len);
+ skb = ieee80211_probereq_get(wvif->wdev->hw, wvif->vif->addr,
+ NULL, 0, req->ie_len);
if (!skb)
return -ENOMEM;
- if (req->ie_len)
- memcpy(skb_put(skb, req->ie_len), req->ie, req->ie_len);
-
- mutex_lock(&wdev->conf_mutex);
-
- p = (struct hif_mib_template_frame *)skb_push(skb, 4);
- p->frame_type = HIF_TMPLT_PRBREQ;
- p->frame_length = cpu_to_le16(skb->len - 4);
- ret = hif_set_template_frame(wvif, p);
- skb_pull(skb, 4);
-
- if (!ret)
- /* Host want to be the probe responder. */
- ret = wfx_fwd_probe_req(wvif, true);
- if (ret) {
- mutex_unlock(&wdev->conf_mutex);
- dev_kfree_skb(skb);
- return ret;
- }
-
- wfx_tx_lock_flush(wdev);
-
- WARN(wvif->scan.req, "unexpected concurrent scan");
- wvif->scan.req = req;
- wvif->scan.n_ssids = 0;
- wvif->scan.status = 0;
- wvif->scan.begin = &req->channels[0];
- wvif->scan.curr = wvif->scan.begin;
- wvif->scan.end = &req->channels[req->n_channels];
- wvif->scan.output_power = wdev->output_power;
-
- for (i = 0; i < req->n_ssids; ++i) {
- struct hif_ssid_def *dst = &wvif->scan.ssids[wvif->scan.n_ssids];
-
- memcpy(&dst->ssid[0], req->ssids[i].ssid, sizeof(dst->ssid));
- dst->ssid_length = req->ssids[i].ssid_len;
- ++wvif->scan.n_ssids;
- }
-
- mutex_unlock(&wdev->conf_mutex);
-
- if (skb)
- dev_kfree_skb(skb);
- schedule_work(&wvif->scan.work);
+ skb_put_data(skb, req->ie, req->ie_len);
+ hif_set_template_frame(wvif, skb, HIF_TMPLT_PRBREQ, 0);
+ dev_kfree_skb(skb);
return 0;
}
-void wfx_scan_work(struct work_struct *work)
+static int send_scan_req(struct wfx_vif *wvif,
+ struct cfg80211_scan_request *req, int start_idx)
{
- struct wfx_vif *wvif = container_of(work, struct wfx_vif, scan.work);
- struct ieee80211_channel **it;
- struct wfx_scan_params scan = {
- .scan_req.scan_type.type = 0, /* Foreground */
- };
- struct ieee80211_channel *first;
- bool first_run = (wvif->scan.begin == wvif->scan.curr &&
- wvif->scan.begin != wvif->scan.end);
- int i;
-
- down(&wvif->scan.lock);
- mutex_lock(&wvif->wdev->conf_mutex);
-
- if (first_run) {
- if (wvif->state == WFX_STATE_STA &&
- !(wvif->powersave_mode.pm_mode.enter_psm)) {
- struct hif_req_set_pm_mode pm = wvif->powersave_mode;
-
- pm.pm_mode.enter_psm = 1;
- wfx_set_pm(wvif, &pm);
- }
- }
-
- if (!wvif->scan.req || wvif->scan.curr == wvif->scan.end) {
- if (wvif->scan.output_power != wvif->wdev->output_power)
- hif_set_output_power(wvif,
- wvif->wdev->output_power * 10);
-
- if (wvif->scan.status < 0)
- dev_warn(wvif->wdev->dev, "scan failed\n");
- else if (wvif->scan.req)
- dev_dbg(wvif->wdev->dev, "scan completed\n");
- else
- dev_dbg(wvif->wdev->dev, "scan canceled\n");
-
- wvif->scan.req = NULL;
- wfx_scan_restart_delayed(wvif);
- wfx_tx_unlock(wvif->wdev);
- mutex_unlock(&wvif->wdev->conf_mutex);
- __ieee80211_scan_completed_compat(wvif->wdev->hw,
- wvif->scan.status ? 1 : 0);
- up(&wvif->scan.lock);
- if (wvif->state == WFX_STATE_STA &&
- !(wvif->powersave_mode.pm_mode.enter_psm))
- wfx_set_pm(wvif, &wvif->powersave_mode);
- return;
- }
- first = *wvif->scan.curr;
-
- for (it = wvif->scan.curr + 1, i = 1;
- it != wvif->scan.end && i < HIF_API_MAX_NB_CHANNELS;
- ++it, ++i) {
- if ((*it)->band != first->band)
- break;
- if (((*it)->flags ^ first->flags) &
- IEEE80211_CHAN_NO_IR)
+ int i, ret, timeout;
+ struct ieee80211_channel *ch_start, *ch_cur;
+
+ for (i = start_idx; i < req->n_channels; i++) {
+ ch_start = req->channels[start_idx];
+ ch_cur = req->channels[i];
+ WARN(ch_cur->band != NL80211_BAND_2GHZ, "band not supported");
+ if (ch_cur->max_power != ch_start->max_power)
break;
- if (!(first->flags & IEEE80211_CHAN_NO_IR) &&
- (*it)->max_power != first->max_power)
+ if ((ch_cur->flags ^ ch_start->flags) & IEEE80211_CHAN_NO_IR)
break;
}
- scan.scan_req.band = first->band;
-
- if (wvif->scan.req->no_cck)
- scan.scan_req.max_transmit_rate = API_RATE_INDEX_G_6MBPS;
- else
- scan.scan_req.max_transmit_rate = API_RATE_INDEX_B_1MBPS;
- scan.scan_req.num_of_probe_requests =
- (first->flags & IEEE80211_CHAN_NO_IR) ? 0 : 2;
- scan.scan_req.num_of_ssi_ds = wvif->scan.n_ssids;
- scan.ssids = &wvif->scan.ssids[0];
- scan.scan_req.num_of_channels = it - wvif->scan.curr;
- scan.scan_req.probe_delay = 100;
- // FIXME: Check if FW can do active scan while joined.
- if (wvif->state == WFX_STATE_STA) {
- scan.scan_req.scan_type.type = 1;
- scan.scan_req.scan_flags.fbg = 1;
+ wfx_tx_lock_flush(wvif->wdev);
+ wvif->scan_abort = false;
+ reinit_completion(&wvif->scan_complete);
+ timeout = hif_scan(wvif, req, start_idx, i - start_idx);
+ if (timeout < 0)
+ return timeout;
+ ret = wait_for_completion_timeout(&wvif->scan_complete, timeout);
+ if (req->channels[start_idx]->max_power != wvif->vif->bss_conf.txpower)
+ hif_set_output_power(wvif, wvif->vif->bss_conf.txpower);
+ wfx_tx_unlock(wvif->wdev);
+ if (!ret) {
+ dev_notice(wvif->wdev->dev, "scan timeout\n");
+ hif_stop_scan(wvif);
+ return -ETIMEDOUT;
}
-
- scan.ch = kcalloc(scan.scan_req.num_of_channels,
- sizeof(u8), GFP_KERNEL);
-
- if (!scan.ch) {
- wvif->scan.status = -ENOMEM;
- goto fail;
+ if (wvif->scan_abort) {
+ dev_notice(wvif->wdev->dev, "scan abort\n");
+ return -ECONNABORTED;
}
- for (i = 0; i < scan.scan_req.num_of_channels; ++i)
- scan.ch[i] = wvif->scan.curr[i]->hw_value;
+ return i - start_idx;
+}
- if (wvif->scan.curr[0]->flags & IEEE80211_CHAN_NO_IR) {
- scan.scan_req.min_channel_time = 50;
- scan.scan_req.max_channel_time = 150;
- } else {
- scan.scan_req.min_channel_time = 10;
- scan.scan_req.max_channel_time = 50;
- }
- if (!(first->flags & IEEE80211_CHAN_NO_IR) &&
- wvif->scan.output_power != first->max_power) {
- wvif->scan.output_power = first->max_power;
- hif_set_output_power(wvif, wvif->scan.output_power * 10);
- }
- wvif->scan.status = wfx_scan_start(wvif, &scan);
- kfree(scan.ch);
- if (wvif->scan.status)
- goto fail;
- wvif->scan.curr = it;
- mutex_unlock(&wvif->wdev->conf_mutex);
- return;
+/*
+ * It is not really necessary to run scan request asynchronously. However,
+ * there is a bug in "iw scan" when ieee80211_scan_completed() is called before
+ * wfx_hw_scan() return
+ */
+void wfx_hw_scan_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif, scan_work);
+ struct ieee80211_scan_request *hw_req = wvif->scan_req;
+ int chan_cur, ret;
-fail:
- wvif->scan.curr = wvif->scan.end;
+ mutex_lock(&wvif->scan_lock);
+ mutex_lock(&wvif->wdev->conf_mutex);
+ update_probe_tmpl(wvif, &hw_req->req);
+ wfx_fwd_probe_req(wvif, true);
+ chan_cur = 0;
+ do {
+ ret = send_scan_req(wvif, &hw_req->req, chan_cur);
+ if (ret > 0)
+ chan_cur += ret;
+ } while (ret > 0 && chan_cur < hw_req->req.n_channels);
mutex_unlock(&wvif->wdev->conf_mutex);
- up(&wvif->scan.lock);
- schedule_work(&wvif->scan.work);
+ mutex_unlock(&wvif->scan_lock);
+ __ieee80211_scan_completed_compat(wvif->wdev->hw, ret < 0);
}
-static void wfx_scan_complete(struct wfx_vif *wvif)
+int wfx_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *hw_req)
{
- up(&wvif->scan.lock);
- atomic_set(&wvif->wdev->scan_in_progress, 0);
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
- wfx_scan_work(&wvif->scan.work);
-}
+ WARN_ON(hw_req->req.n_channels > HIF_API_MAX_NB_CHANNELS);
-void wfx_scan_failed_cb(struct wfx_vif *wvif)
-{
- if (cancel_delayed_work_sync(&wvif->scan.timeout) > 0) {
- wvif->scan.status = -EIO;
- schedule_work(&wvif->scan.timeout.work);
- }
+ if (vif->type == NL80211_IFTYPE_AP)
+ return -EOPNOTSUPP;
+
+ if (wvif->state == WFX_STATE_PRE_STA)
+ return -EBUSY;
+
+ wvif->scan_req = hw_req;
+ schedule_work(&wvif->scan_work);
+ return 0;
}
-void wfx_scan_complete_cb(struct wfx_vif *wvif, struct hif_ind_scan_cmpl *arg)
+void wfx_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
- if (cancel_delayed_work_sync(&wvif->scan.timeout) > 0) {
- wvif->scan.status = 1;
- schedule_work(&wvif->scan.timeout.work);
- }
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
+
+ wvif->scan_abort = true;
+ hif_stop_scan(wvif);
}
-void wfx_scan_timeout(struct work_struct *work)
+void wfx_scan_complete(struct wfx_vif *wvif)
{
- struct wfx_vif *wvif = container_of(work, struct wfx_vif,
- scan.timeout.work);
-
- if (atomic_xchg(&wvif->scan.in_progress, 0)) {
- if (wvif->scan.status > 0) {
- wvif->scan.status = 0;
- } else if (!wvif->scan.status) {
- dev_warn(wvif->wdev->dev, "timeout waiting for scan complete notification\n");
- wvif->scan.status = -ETIMEDOUT;
- wvif->scan.curr = wvif->scan.end;
- hif_stop_scan(wvif);
- }
- wfx_scan_complete(wvif);
- }
+ complete(&wvif->scan_complete);
}
diff --git a/drivers/staging/wfx/scan.h b/drivers/staging/wfx/scan.h
index b4ddd0771a9b..2eb786c9572c 100644
--- a/drivers/staging/wfx/scan.h
+++ b/drivers/staging/wfx/scan.h
@@ -8,35 +8,15 @@
#ifndef WFX_SCAN_H
#define WFX_SCAN_H
-#include <linux/semaphore.h>
-#include <linux/workqueue.h>
#include <net/mac80211.h>
-#include "hif_api_cmd.h"
-
struct wfx_dev;
struct wfx_vif;
-struct wfx_scan {
- struct semaphore lock;
- struct work_struct work;
- struct delayed_work timeout;
- struct cfg80211_scan_request *req;
- struct ieee80211_channel **begin;
- struct ieee80211_channel **curr;
- struct ieee80211_channel **end;
- struct hif_ssid_def ssids[HIF_API_MAX_NB_SSIDS];
- int output_power;
- int n_ssids;
- int status;
- atomic_t in_progress;
-};
-
+void wfx_hw_scan_work(struct work_struct *work);
int wfx_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_scan_request *req);
-void wfx_scan_work(struct work_struct *work);
-void wfx_scan_timeout(struct work_struct *work);
-void wfx_scan_complete_cb(struct wfx_vif *wvif, struct hif_ind_scan_cmpl *arg);
-void wfx_scan_failed_cb(struct wfx_vif *wvif);
+void wfx_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void wfx_scan_complete(struct wfx_vif *wvif);
#endif /* WFX_SCAN_H */
diff --git a/drivers/staging/wfx/secure_link.h b/drivers/staging/wfx/secure_link.h
index 666b26e5308d..c3d055b2f8b1 100644
--- a/drivers/staging/wfx/secure_link.h
+++ b/drivers/staging/wfx/secure_link.h
@@ -25,14 +25,16 @@ static inline int wfx_sl_decode(struct wfx_dev *wdev, struct hif_sl_msg *m)
return -EIO;
}
-static inline int wfx_sl_encode(struct wfx_dev *wdev, struct hif_msg *input,
+static inline int wfx_sl_encode(struct wfx_dev *wdev,
+ const struct hif_msg *input,
struct hif_sl_msg *output)
{
return -EIO;
}
-static inline int wfx_sl_check_pubkey(struct wfx_dev *wdev, u8 *ncp_pubkey,
- u8 *ncp_pubmac)
+static inline int wfx_sl_check_pubkey(struct wfx_dev *wdev,
+ const u8 *ncp_pubkey,
+ const u8 *ncp_pubmac)
{
return -EIO;
}
diff --git a/drivers/staging/wfx/sta.c b/drivers/staging/wfx/sta.c
index 29848a202ab4..03d0f224ffdb 100644
--- a/drivers/staging/wfx/sta.c
+++ b/drivers/staging/wfx/sta.c
@@ -17,10 +17,9 @@
#include "hif_tx.h"
#include "hif_tx_mib.h"
-#define TXOP_UNIT 32
#define HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES 2
-static u32 wfx_rate_mask_to_hw(struct wfx_dev *wdev, u32 rates)
+u32 wfx_rate_mask_to_hw(struct wfx_dev *wdev, u32 rates)
{
int i;
u32 ret = 0;
@@ -64,13 +63,8 @@ void wfx_cqm_bssloss_sm(struct wfx_vif *wvif, int init, int good, int bad)
int tx = 0;
mutex_lock(&wvif->bss_loss_lock);
- wvif->delayed_link_loss = 0;
cancel_work_sync(&wvif->bss_params_work);
- /* If we have a pending unjoin */
- if (wvif->delayed_unjoin)
- goto end;
-
if (init) {
schedule_delayed_work(&wvif->bss_loss_work, HZ);
wvif->bss_loss_state = 0;
@@ -94,62 +88,30 @@ void wfx_cqm_bssloss_sm(struct wfx_vif *wvif, int init, int good, int bad)
// FIXME: call ieee80211_beacon_loss/ieee80211_connection_loss instead
if (tx) {
struct sk_buff *skb;
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_tx_control control = { };
wvif->bss_loss_state++;
skb = ieee80211_nullfunc_get(wvif->wdev->hw, wvif->vif, false);
if (!skb)
goto end;
+ hdr = (struct ieee80211_hdr *)skb->data;
memset(IEEE80211_SKB_CB(skb), 0,
sizeof(*IEEE80211_SKB_CB(skb)));
IEEE80211_SKB_CB(skb)->control.vif = wvif->vif;
IEEE80211_SKB_CB(skb)->driver_rates[0].idx = 0;
IEEE80211_SKB_CB(skb)->driver_rates[0].count = 1;
IEEE80211_SKB_CB(skb)->driver_rates[1].idx = -1;
- wfx_tx(wvif->wdev->hw, NULL, skb);
+ rcu_read_lock(); // protect control.sta
+ control.sta = ieee80211_find_sta(wvif->vif, hdr->addr1);
+ wfx_tx(wvif->wdev->hw, &control, skb);
+ rcu_read_unlock();
}
end:
mutex_unlock(&wvif->bss_loss_lock);
}
-static int wfx_set_uapsd_param(struct wfx_vif *wvif,
- const struct wfx_edca_params *arg)
-{
- /* Here's the mapping AC [queue, bit]
- * VO [0,3], VI [1, 2], BE [2, 1], BK [3, 0]
- */
-
- if (arg->uapsd_enable[IEEE80211_AC_VO])
- wvif->uapsd_info.trig_voice = 1;
- else
- wvif->uapsd_info.trig_voice = 0;
-
- if (arg->uapsd_enable[IEEE80211_AC_VI])
- wvif->uapsd_info.trig_video = 1;
- else
- wvif->uapsd_info.trig_video = 0;
-
- if (arg->uapsd_enable[IEEE80211_AC_BE])
- wvif->uapsd_info.trig_be = 1;
- else
- wvif->uapsd_info.trig_be = 0;
-
- if (arg->uapsd_enable[IEEE80211_AC_BK])
- wvif->uapsd_info.trig_bckgrnd = 1;
- else
- wvif->uapsd_info.trig_bckgrnd = 0;
-
- /* Currently pseudo U-APSD operation is not supported, so setting
- * MinAutoTriggerInterval, MaxAutoTriggerInterval and
- * AutoTriggerStep to 0
- */
- wvif->uapsd_info.min_auto_trigger_interval = 0;
- wvif->uapsd_info.max_auto_trigger_interval = 0;
- wvif->uapsd_info.auto_trigger_step = 0;
-
- return hif_set_uapsd_info(wvif, &wvif->uapsd_info);
-}
-
int wfx_fwd_probe_req(struct wfx_vif *wvif, bool enable)
{
wvif->fwd_probe_req = enable;
@@ -160,63 +122,31 @@ int wfx_fwd_probe_req(struct wfx_vif *wvif, bool enable)
static int wfx_set_mcast_filter(struct wfx_vif *wvif,
struct wfx_grp_addr_table *fp)
{
- int i, ret;
- struct hif_mib_config_data_filter config = { };
- struct hif_mib_set_data_filtering filter_data = { };
- struct hif_mib_mac_addr_data_frame_condition filter_addr_val = { };
- struct hif_mib_uc_mc_bc_data_frame_condition filter_addr_type = { };
+ int i;
// Temporary workaround for filters
- return hif_set_data_filtering(wvif, &filter_data);
+ return hif_set_data_filtering(wvif, false, true);
- if (!fp->enable) {
- filter_data.enable = 0;
- return hif_set_data_filtering(wvif, &filter_data);
- }
+ if (!fp->enable)
+ return hif_set_data_filtering(wvif, false, true);
- // A1 Address match on list
- for (i = 0; i < fp->num_addresses; i++) {
- filter_addr_val.condition_idx = i;
- filter_addr_val.address_type = HIF_MAC_ADDR_A1;
- ether_addr_copy(filter_addr_val.mac_address,
- fp->address_list[i]);
- ret = hif_set_mac_addr_condition(wvif,
- &filter_addr_val);
- if (ret)
- return ret;
- config.mac_cond |= 1 << i;
- }
+ for (i = 0; i < fp->num_addresses; i++)
+ hif_set_mac_addr_condition(wvif, i, fp->address_list[i]);
+ hif_set_uc_mc_bc_condition(wvif, 0,
+ HIF_FILTER_UNICAST | HIF_FILTER_BROADCAST);
+ hif_set_config_data_filter(wvif, true, 0, BIT(1),
+ BIT(fp->num_addresses) - 1);
+ hif_set_data_filtering(wvif, true, true);
- // Accept unicast and broadcast
- filter_addr_type.condition_idx = 0;
- filter_addr_type.param.bits.type_unicast = 1;
- filter_addr_type.param.bits.type_broadcast = 1;
- ret = hif_set_uc_mc_bc_condition(wvif, &filter_addr_type);
- if (ret)
- return ret;
-
- config.uc_mc_bc_cond = 1;
- config.filter_idx = 0; // TODO #define MULTICAST_FILTERING 0
- config.enable = 1;
- ret = hif_set_config_data_filter(wvif, &config);
- if (ret)
- return ret;
-
- // discard all data frames except match filter
- filter_data.enable = 1;
- filter_data.default_filter = 1; // discard all
- ret = hif_set_data_filtering(wvif, &filter_data);
-
- return ret;
+ return 0;
}
void wfx_update_filtering(struct wfx_vif *wvif)
{
int ret;
- bool is_sta = wvif->vif && NL80211_IFTYPE_STATION == wvif->vif->type;
- bool filter_bssid = wvif->filter_bssid;
- bool fwd_probe_req = wvif->fwd_probe_req;
- struct hif_mib_bcn_filter_enable bf_ctrl;
+ int bf_enable;
+ int bf_count;
+ int n_filter_ies;
struct hif_ie_table_entry filter_ies[] = {
{
.ie_id = WLAN_EID_VENDOR_SPECIFIC,
@@ -236,33 +166,29 @@ void wfx_update_filtering(struct wfx_vif *wvif)
.has_appeared = 1,
}
};
- int n_filter_ies;
if (wvif->state == WFX_STATE_PASSIVE)
return;
if (wvif->disable_beacon_filter) {
- bf_ctrl.enable = 0;
- bf_ctrl.bcn_count = 1;
+ bf_enable = 0;
+ bf_count = 1;
n_filter_ies = 0;
- } else if (!is_sta) {
- bf_ctrl.enable = HIF_BEACON_FILTER_ENABLE |
- HIF_BEACON_FILTER_AUTO_ERP;
- bf_ctrl.bcn_count = 0;
+ } else if (wvif->vif->type != NL80211_IFTYPE_STATION) {
+ bf_enable = HIF_BEACON_FILTER_ENABLE | HIF_BEACON_FILTER_AUTO_ERP;
+ bf_count = 0;
n_filter_ies = 2;
} else {
- bf_ctrl.enable = HIF_BEACON_FILTER_ENABLE;
- bf_ctrl.bcn_count = 0;
+ bf_enable = HIF_BEACON_FILTER_ENABLE;
+ bf_count = 0;
n_filter_ies = 3;
}
- ret = hif_set_rx_filter(wvif, filter_bssid, fwd_probe_req);
+ ret = hif_set_rx_filter(wvif, wvif->filter_bssid, wvif->fwd_probe_req);
if (!ret)
- ret = hif_set_beacon_filter_table(wvif, n_filter_ies,
- filter_ies);
+ ret = hif_set_beacon_filter_table(wvif, n_filter_ies, filter_ies);
if (!ret)
- ret = hif_beacon_filter_control(wvif, bf_ctrl.enable,
- bf_ctrl.bcn_count);
+ ret = hif_beacon_filter_control(wvif, bf_enable, bf_count);
if (!ret)
ret = wfx_set_mcast_filter(wvif, &wvif->mcast_filter);
if (ret)
@@ -316,90 +242,71 @@ void wfx_configure_filter(struct ieee80211_hw *hw,
*total_flags &= FIF_OTHER_BSS | FIF_FCSFAIL | FIF_PROBE_REQ;
while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
- down(&wvif->scan.lock);
+ mutex_lock(&wvif->scan_lock);
wvif->filter_bssid = (*total_flags &
(FIF_OTHER_BSS | FIF_PROBE_REQ)) ? 0 : 1;
wvif->disable_beacon_filter = !(*total_flags & FIF_PROBE_REQ);
wfx_fwd_probe_req(wvif, true);
wfx_update_filtering(wvif);
- up(&wvif->scan.lock);
+ mutex_unlock(&wvif->scan_lock);
}
}
-int wfx_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u16 queue, const struct ieee80211_tx_queue_params *params)
+static int wfx_update_pm(struct wfx_vif *wvif)
{
- struct wfx_dev *wdev = hw->priv;
- struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
- int ret = 0;
- /* To prevent re-applying PM request OID again and again*/
- u16 old_uapsd_flags, new_uapsd_flags;
- struct hif_req_edca_queue_params *edca;
-
- mutex_lock(&wdev->conf_mutex);
-
- if (queue < hw->queues) {
- old_uapsd_flags = *((u16 *) &wvif->uapsd_info);
- edca = &wvif->edca.params[queue];
-
- wvif->edca.uapsd_enable[queue] = params->uapsd;
- edca->aifsn = params->aifs;
- edca->cw_min = params->cw_min;
- edca->cw_max = params->cw_max;
- edca->tx_op_limit = params->txop * TXOP_UNIT;
- edca->allowed_medium_time = 0;
- ret = hif_set_edca_queue_params(wvif, edca);
- if (ret) {
- ret = -EINVAL;
- goto out;
- }
+ struct ieee80211_conf *conf = &wvif->wdev->hw->conf;
+ bool ps = conf->flags & IEEE80211_CONF_PS;
+ int ps_timeout = conf->dynamic_ps_timeout;
+ struct ieee80211_channel *chan0 = NULL, *chan1 = NULL;
- if (wvif->vif->type == NL80211_IFTYPE_STATION) {
- ret = wfx_set_uapsd_param(wvif, &wvif->edca);
- new_uapsd_flags = *((u16 *) &wvif->uapsd_info);
- if (!ret && wvif->setbssparams_done &&
- wvif->state == WFX_STATE_STA &&
- old_uapsd_flags != new_uapsd_flags)
- ret = wfx_set_pm(wvif, &wvif->powersave_mode);
- }
- } else {
- ret = -EINVAL;
+ WARN_ON(conf->dynamic_ps_timeout < 0);
+ if (wvif->state != WFX_STATE_STA || !wvif->bss_params.aid)
+ return 0;
+ if (!ps)
+ ps_timeout = 0;
+ if (wvif->uapsd_mask)
+ ps_timeout = 0;
+
+ // Kernel disable powersave when an AP is in use. In contrary, it is
+ // absolutely necessary to enable legacy powersave for WF200 if channels
+ // are differents.
+ if (wdev_to_wvif(wvif->wdev, 0))
+ chan0 = wdev_to_wvif(wvif->wdev, 0)->vif->bss_conf.chandef.chan;
+ if (wdev_to_wvif(wvif->wdev, 1))
+ chan1 = wdev_to_wvif(wvif->wdev, 1)->vif->bss_conf.chandef.chan;
+ if (chan0 && chan1 && chan0->hw_value != chan1->hw_value &&
+ wvif->vif->type != NL80211_IFTYPE_AP) {
+ ps = true;
+ ps_timeout = 0;
}
-out:
- mutex_unlock(&wdev->conf_mutex);
- return ret;
+ if (!wait_for_completion_timeout(&wvif->set_pm_mode_complete,
+ TU_TO_JIFFIES(512)))
+ dev_warn(wvif->wdev->dev,
+ "timeout while waiting of set_pm_mode_complete\n");
+ return hif_set_pm(wvif, ps, ps_timeout);
}
-int wfx_set_pm(struct wfx_vif *wvif, const struct hif_req_set_pm_mode *arg)
+int wfx_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params)
{
- struct hif_req_set_pm_mode pm = *arg;
- u16 uapsd_flags;
- int ret;
-
- if (wvif->state != WFX_STATE_STA || !wvif->bss_params.aid)
- return 0;
-
- memcpy(&uapsd_flags, &wvif->uapsd_info, sizeof(uapsd_flags));
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ int old_uapsd = wvif->uapsd_mask;
+ int ret = 0;
- if (uapsd_flags != 0)
- pm.pm_mode.fast_psm = 0;
+ WARN_ON(queue >= hw->queues);
- // Kernel disable PowerSave when multiple vifs are in use. In contrary,
- // it is absolutly necessary to enable PowerSave for WF200
- if (wvif_count(wvif->wdev) > 1) {
- pm.pm_mode.enter_psm = 1;
- pm.pm_mode.fast_psm = 0;
+ mutex_lock(&wdev->conf_mutex);
+ assign_bit(queue, &wvif->uapsd_mask, params->uapsd);
+ memcpy(&wvif->edca_params[queue], params, sizeof(*params));
+ hif_set_edca_queue_params(wvif, queue, params);
+ if (wvif->vif->type == NL80211_IFTYPE_STATION &&
+ old_uapsd != wvif->uapsd_mask) {
+ hif_set_uapsd_info(wvif, wvif->uapsd_mask);
+ wfx_update_pm(wvif);
}
-
- if (!wait_for_completion_timeout(&wvif->set_pm_mode_complete,
- msecs_to_jiffies(300)))
- dev_warn(wvif->wdev->dev,
- "timeout while waiting of set_pm_mode_complete\n");
- ret = hif_set_pm(wvif, &pm);
- // FIXME: why ?
- if (-ETIMEDOUT == wvif->scan.status)
- wvif->scan.status = 1;
+ mutex_unlock(&wdev->conf_mutex);
return ret;
}
@@ -413,56 +320,27 @@ int wfx_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
return 0;
}
-/* If successful, LOCKS the TX queue! */
static int __wfx_flush(struct wfx_dev *wdev, bool drop)
{
- int ret;
-
for (;;) {
- if (drop) {
+ if (drop)
wfx_tx_queues_clear(wdev);
- } else {
- ret = wait_event_timeout(
- wdev->tx_queue_stats.wait_link_id_empty,
- wfx_tx_queues_is_empty(wdev),
- 2 * HZ);
- }
-
- if (!drop && ret <= 0) {
- ret = -ETIMEDOUT;
- break;
- }
- ret = 0;
-
- wfx_tx_lock_flush(wdev);
- if (!wfx_tx_queues_is_empty(wdev)) {
- /* Highly unlikely: WSM requeued frames. */
- wfx_tx_unlock(wdev);
- continue;
- }
- break;
+ if (wait_event_timeout(wdev->tx_queue_stats.wait_link_id_empty,
+ wfx_tx_queues_is_empty(wdev),
+ 2 * HZ) <= 0)
+ return -ETIMEDOUT;
+ wfx_tx_flush(wdev);
+ if (wfx_tx_queues_is_empty(wdev))
+ return 0;
+ dev_warn(wdev->dev, "frames queued while flushing tx queues");
}
- return ret;
}
void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
- struct wfx_dev *wdev = hw->priv;
- struct wfx_vif *wvif;
-
- if (vif) {
- wvif = (struct wfx_vif *) vif->drv_priv;
- if (wvif->vif->type == NL80211_IFTYPE_MONITOR)
- drop = true;
- if (wvif->vif->type == NL80211_IFTYPE_AP &&
- !wvif->enable_beacon)
- drop = true;
- }
-
- // FIXME: only flush requested vif
- if (!__wfx_flush(wdev, drop))
- wfx_tx_unlock(wdev);
+ // FIXME: only flush requested vif and queues
+ __wfx_flush(hw->priv, drop);
}
/* WSM callbacks */
@@ -476,7 +354,7 @@ static void wfx_event_report_rssi(struct wfx_vif *wvif, u8 raw_rcpi_rssi)
int cqm_evt;
rcpi_rssi = raw_rcpi_rssi / 2 - 110;
- if (rcpi_rssi <= wvif->cqm_rssi_thold)
+ if (rcpi_rssi <= wvif->vif->bss_conf.cqm_rssi_thold)
cqm_evt = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
else
cqm_evt = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
@@ -499,18 +377,9 @@ static void wfx_event_handler_work(struct work_struct *work)
switch (event->evt.event_id) {
case HIF_EVENT_IND_BSSLOST:
cancel_work_sync(&wvif->unjoin_work);
- if (!down_trylock(&wvif->scan.lock)) {
- wfx_cqm_bssloss_sm(wvif, 1, 0, 0);
- up(&wvif->scan.lock);
- } else {
- /* Scan is in progress. Delay reporting.
- * Scan complete will trigger bss_loss_work
- */
- wvif->delayed_link_loss = 1;
- /* Also start a watchdog. */
- schedule_delayed_work(&wvif->bss_loss_work,
- 5 * HZ);
- }
+ mutex_lock(&wvif->scan_lock);
+ wfx_cqm_bssloss_sm(wvif, 1, 0, 0);
+ mutex_unlock(&wvif->scan_lock);
break;
case HIF_EVENT_IND_BSSREGAINED:
wfx_cqm_bssloss_sm(wvif, 0, 0, 0);
@@ -554,30 +423,10 @@ static void wfx_bss_params_work(struct work_struct *work)
mutex_unlock(&wvif->wdev->conf_mutex);
}
-static void wfx_set_beacon_wakeup_period_work(struct work_struct *work)
-{
- struct wfx_vif *wvif = container_of(work, struct wfx_vif,
- set_beacon_wakeup_period_work);
-
- hif_set_beacon_wakeup_period(wvif, wvif->dtim_period,
- wvif->dtim_period);
-}
-
static void wfx_do_unjoin(struct wfx_vif *wvif)
{
mutex_lock(&wvif->wdev->conf_mutex);
- if (atomic_read(&wvif->scan.in_progress)) {
- if (wvif->delayed_unjoin)
- dev_dbg(wvif->wdev->dev,
- "delayed unjoin is already scheduled\n");
- else
- wvif->delayed_unjoin = true;
- goto done;
- }
-
- wvif->delayed_link_loss = false;
-
if (!wvif->state)
goto done;
@@ -585,15 +434,13 @@ static void wfx_do_unjoin(struct wfx_vif *wvif)
goto done;
cancel_work_sync(&wvif->update_filtering_work);
- cancel_work_sync(&wvif->set_beacon_wakeup_period_work);
wvif->state = WFX_STATE_PASSIVE;
/* Unjoin is a reset. */
wfx_tx_flush(wvif->wdev);
hif_keep_alive_period(wvif, 0);
hif_reset(wvif, false);
- hif_set_output_power(wvif, wvif->wdev->output_power * 10);
- wvif->dtim_period = 0;
+ wfx_tx_policy_init(wvif);
hif_set_macaddr(wvif, wvif->vif->addr);
wfx_free_event_queue(wvif);
cancel_work_sync(&wvif->event_handler_work);
@@ -605,8 +452,6 @@ static void wfx_do_unjoin(struct wfx_vif *wvif)
wvif->disable_beacon_filter = false;
wfx_update_filtering(wvif);
memset(&wvif->bss_params, 0, sizeof(wvif->bss_params));
- wvif->setbssparams_done = false;
- memset(&wvif->ht_info, 0, sizeof(wvif->ht_info));
done:
mutex_unlock(&wvif->wdev->conf_mutex);
@@ -643,33 +488,21 @@ static void wfx_set_mfp(struct wfx_vif *wvif,
hif_set_mfp(wvif, mfpc, mfpr);
}
-/* MUST be called with tx_lock held! It will be unlocked for us. */
static void wfx_do_join(struct wfx_vif *wvif)
{
- const u8 *bssid;
+ int ret;
+ const u8 *ssidie;
struct ieee80211_bss_conf *conf = &wvif->vif->bss_conf;
struct cfg80211_bss *bss = NULL;
- struct hif_req_join join = {
- .mode = conf->ibss_joined ? HIF_MODE_IBSS : HIF_MODE_BSS,
- .preamble_type = conf->use_short_preamble ? HIF_PREAMBLE_SHORT : HIF_PREAMBLE_LONG,
- .probe_for_join = 1,
- .atim_window = 0,
- .basic_rate_set = wfx_rate_mask_to_hw(wvif->wdev,
- conf->basic_rates),
- };
- if (wvif->channel->flags & IEEE80211_CHAN_NO_IR)
- join.probe_for_join = 0;
+ wfx_tx_lock_flush(wvif->wdev);
if (wvif->state)
wfx_do_unjoin(wvif);
- bssid = wvif->vif->bss_conf.bssid;
-
bss = cfg80211_get_bss(wvif->wdev->hw->wiphy, wvif->channel,
- bssid, NULL, 0,
+ conf->bssid, NULL, 0,
IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
-
if (!bss && !conf->ibss_joined) {
wfx_tx_unlock(wvif->wdev);
return;
@@ -677,41 +510,15 @@ static void wfx_do_join(struct wfx_vif *wvif)
mutex_lock(&wvif->wdev->conf_mutex);
- /* Under the conf lock: check scan status and
- * bail out if it is in progress.
- */
- if (atomic_read(&wvif->scan.in_progress)) {
- wfx_tx_unlock(wvif->wdev);
- goto done_put;
- }
-
- /* Sanity check basic rates */
- if (!join.basic_rate_set)
- join.basic_rate_set = 7;
-
/* Sanity check beacon interval */
if (!wvif->beacon_int)
wvif->beacon_int = 1;
- join.beacon_interval = wvif->beacon_int;
-
- // DTIM period will be set on first Beacon
- wvif->dtim_period = 0;
-
- join.channel_number = wvif->channel->hw_value;
- memcpy(join.bssid, bssid, sizeof(join.bssid));
-
- if (!conf->ibss_joined) {
- const u8 *ssidie;
-
- rcu_read_lock();
+ rcu_read_lock();
+ if (!conf->ibss_joined)
ssidie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
- if (ssidie) {
- join.ssid_length = ssidie[1];
- memcpy(join.ssid, &ssidie[2], join.ssid_length);
- }
- rcu_read_unlock();
- }
+ else
+ ssidie = NULL;
wfx_tx_flush(wvif->wdev);
@@ -722,7 +529,9 @@ static void wfx_do_join(struct wfx_vif *wvif)
/* Perform actual join */
wvif->wdev->tx_burst_idx = -1;
- if (hif_join(wvif, &join)) {
+ ret = hif_join(wvif, conf, wvif->channel, ssidie);
+ rcu_read_unlock();
+ if (ret) {
ieee80211_connection_loss(wvif->vif);
wvif->join_complete_status = -1;
/* Tx lock still held, unjoin will clear it. */
@@ -748,7 +557,6 @@ static void wfx_do_join(struct wfx_vif *wvif)
}
wfx_update_filtering(wvif);
-done_put:
mutex_unlock(&wvif->wdev->conf_mutex);
if (bss)
cfg80211_put_bss(wvif->wdev->hw->wiphy, bss);
@@ -765,30 +573,26 @@ static void wfx_unjoin_work(struct work_struct *work)
int wfx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct wfx_dev *wdev = hw->priv;
struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *) &sta->drv_priv;
- struct wfx_link_entry *entry;
- struct sk_buff *skb;
-
- if (wvif->vif->type != NL80211_IFTYPE_AP)
- return 0;
+ spin_lock_init(&sta_priv->lock);
sta_priv->vif_id = wvif->id;
- sta_priv->link_id = wfx_find_link_id(wvif, sta->addr);
- if (!sta_priv->link_id) {
- dev_warn(wdev->dev, "mo more link-id available\n");
- return -ENOENT;
- }
- entry = &wvif->link_id_db[sta_priv->link_id - 1];
+ // FIXME: in station mode, the current API interprets new link-id as a
+ // tdls peer.
+ if (vif->type == NL80211_IFTYPE_STATION)
+ return 0;
+ sta_priv->link_id = ffz(wvif->link_id_map);
+ wvif->link_id_map |= BIT(sta_priv->link_id);
+ WARN_ON(!sta_priv->link_id);
+ WARN_ON(sta_priv->link_id >= WFX_MAX_STA_IN_AP_MODE);
+ hif_map_link(wvif, sta->addr, 0, sta_priv->link_id);
+
spin_lock_bh(&wvif->ps_state_lock);
if ((sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK) ==
IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
wvif->sta_asleep_mask |= BIT(sta_priv->link_id);
- entry->status = WFX_LINK_HARD;
- while ((skb = skb_dequeue(&entry->rx_queue)))
- ieee80211_rx_irqsafe(wdev->hw, skb);
spin_unlock_bh(&wvif->ps_state_lock);
return 0;
}
@@ -796,223 +600,116 @@ int wfx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int wfx_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct wfx_dev *wdev = hw->priv;
struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *) &sta->drv_priv;
- struct wfx_link_entry *entry;
+ int i;
- if (wvif->vif->type != NL80211_IFTYPE_AP || !sta_priv->link_id)
+ for (i = 0; i < ARRAY_SIZE(sta_priv->buffered); i++)
+ WARN(sta_priv->buffered[i], "release station while Tx is in progress");
+ // FIXME: see note in wfx_sta_add()
+ if (vif->type == NL80211_IFTYPE_STATION)
return 0;
-
- entry = &wvif->link_id_db[sta_priv->link_id - 1];
- spin_lock_bh(&wvif->ps_state_lock);
- entry->status = WFX_LINK_RESERVE;
- entry->timestamp = jiffies;
- wfx_tx_lock(wdev);
- if (!schedule_work(&wvif->link_id_work))
- wfx_tx_unlock(wdev);
- spin_unlock_bh(&wvif->ps_state_lock);
- flush_work(&wvif->link_id_work);
+ // FIXME add a mutex?
+ hif_map_link(wvif, sta->addr, 1, sta_priv->link_id);
+ wvif->link_id_map &= ~BIT(sta_priv->link_id);
return 0;
}
-static void wfx_set_cts_work(struct work_struct *work)
-{
- struct wfx_vif *wvif = container_of(work, struct wfx_vif, set_cts_work);
- u8 erp_ie[3] = { WLAN_EID_ERP_INFO, 1, 0 };
- struct hif_ie_flags target_frame = {
- .beacon = 1,
- };
-
- mutex_lock(&wvif->wdev->conf_mutex);
- erp_ie[2] = wvif->erp_info;
- mutex_unlock(&wvif->wdev->conf_mutex);
-
- hif_erp_use_protection(wvif, erp_ie[2] & WLAN_ERP_USE_PROTECTION);
-
- if (wvif->vif->type != NL80211_IFTYPE_STATION)
- hif_update_ie(wvif, &target_frame, erp_ie, sizeof(erp_ie));
-}
-
static int wfx_start_ap(struct wfx_vif *wvif)
{
int ret;
- struct ieee80211_bss_conf *conf = &wvif->vif->bss_conf;
- struct hif_req_start start = {
- .channel_number = wvif->channel->hw_value,
- .beacon_interval = conf->beacon_int,
- .dtim_period = conf->dtim_period,
- .preamble_type = conf->use_short_preamble ? HIF_PREAMBLE_SHORT : HIF_PREAMBLE_LONG,
- .basic_rate_set = wfx_rate_mask_to_hw(wvif->wdev,
- conf->basic_rates),
- };
-
- memset(start.ssid, 0, sizeof(start.ssid));
- if (!conf->hidden_ssid) {
- start.ssid_length = conf->ssid_len;
- memcpy(start.ssid, conf->ssid, start.ssid_length);
- }
-
- wvif->beacon_int = conf->beacon_int;
- wvif->dtim_period = conf->dtim_period;
-
- memset(&wvif->link_id_db, 0, sizeof(wvif->link_id_db));
+ wvif->beacon_int = wvif->vif->bss_conf.beacon_int;
wvif->wdev->tx_burst_idx = -1;
- ret = hif_start(wvif, &start);
- if (!ret)
- ret = wfx_upload_keys(wvif);
- if (!ret) {
- if (wvif_count(wvif->wdev) <= 1)
- hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
- wvif->state = WFX_STATE_AP;
- wfx_update_filtering(wvif);
- }
- return ret;
+ ret = hif_start(wvif, &wvif->vif->bss_conf, wvif->channel);
+ if (ret)
+ return ret;
+ ret = wfx_upload_keys(wvif);
+ if (ret)
+ return ret;
+ if (wvif_count(wvif->wdev) <= 1)
+ hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
+ wvif->state = WFX_STATE_AP;
+ wfx_update_filtering(wvif);
+ return 0;
}
static int wfx_update_beaconing(struct wfx_vif *wvif)
{
- struct ieee80211_bss_conf *conf = &wvif->vif->bss_conf;
-
- if (wvif->vif->type == NL80211_IFTYPE_AP) {
- /* TODO: check if changed channel, band */
- if (wvif->state != WFX_STATE_AP ||
- wvif->beacon_int != conf->beacon_int) {
- wfx_tx_lock_flush(wvif->wdev);
- if (wvif->state != WFX_STATE_PASSIVE)
- hif_reset(wvif, false);
- wvif->state = WFX_STATE_PASSIVE;
- wfx_start_ap(wvif);
- wfx_tx_unlock(wvif->wdev);
- } else {
- }
- }
+ if (wvif->vif->type != NL80211_IFTYPE_AP)
+ return 0;
+ if (wvif->state == WFX_STATE_AP &&
+ wvif->beacon_int == wvif->vif->bss_conf.beacon_int)
+ return 0;
+ wfx_tx_lock_flush(wvif->wdev);
+ hif_reset(wvif, false);
+ wfx_tx_policy_init(wvif);
+ wvif->state = WFX_STATE_PASSIVE;
+ wfx_start_ap(wvif);
+ wfx_tx_unlock(wvif->wdev);
return 0;
}
-static int wfx_upload_beacon(struct wfx_vif *wvif)
+static int wfx_upload_ap_templates(struct wfx_vif *wvif)
{
- int ret = 0;
- struct sk_buff *skb = NULL;
- struct ieee80211_mgmt *mgmt;
- struct hif_mib_template_frame *p;
+ struct sk_buff *skb;
if (wvif->vif->type == NL80211_IFTYPE_STATION ||
wvif->vif->type == NL80211_IFTYPE_MONITOR ||
wvif->vif->type == NL80211_IFTYPE_UNSPECIFIED)
- goto done;
+ return 0;
skb = ieee80211_beacon_get(wvif->wdev->hw, wvif->vif);
-
if (!skb)
return -ENOMEM;
-
- p = (struct hif_mib_template_frame *) skb_push(skb, 4);
- p->frame_type = HIF_TMPLT_BCN;
- p->init_rate = API_RATE_INDEX_B_1MBPS; /* 1Mbps DSSS */
- p->frame_length = cpu_to_le16(skb->len - 4);
-
- ret = hif_set_template_frame(wvif, p);
-
- skb_pull(skb, 4);
-
- if (ret)
- goto done;
- /* TODO: Distill probe resp; remove TIM and any other beacon-specific
- * IEs
- */
- mgmt = (void *)skb->data;
- mgmt->frame_control =
- cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP);
-
- p->frame_type = HIF_TMPLT_PRBRES;
-
- ret = hif_set_template_frame(wvif, p);
- wfx_fwd_probe_req(wvif, false);
-
-done:
+ hif_set_template_frame(wvif, skb, HIF_TMPLT_BCN,
+ API_RATE_INDEX_B_1MBPS);
dev_kfree_skb(skb);
- return ret;
-}
-
-static int wfx_is_ht(const struct wfx_ht_info *ht_info)
-{
- return ht_info->channel_type != NL80211_CHAN_NO_HT;
-}
-static int wfx_ht_greenfield(const struct wfx_ht_info *ht_info)
-{
- return wfx_is_ht(ht_info) &&
- (ht_info->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
- !(ht_info->operation_mode &
- IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
-}
-
-static int wfx_ht_ampdu_density(const struct wfx_ht_info *ht_info)
-{
- if (!wfx_is_ht(ht_info))
- return 0;
- return ht_info->ht_cap.ampdu_density;
+ skb = ieee80211_proberesp_get(wvif->wdev->hw, wvif->vif);
+ if (!skb)
+ return -ENOMEM;
+ hif_set_template_frame(wvif, skb, HIF_TMPLT_PRBRES,
+ API_RATE_INDEX_B_1MBPS);
+ dev_kfree_skb(skb);
+ return 0;
}
static void wfx_join_finalize(struct wfx_vif *wvif,
struct ieee80211_bss_conf *info)
{
struct ieee80211_sta *sta = NULL;
- struct hif_mib_set_association_mode association_mode = { };
- if (info->dtim_period)
- wvif->dtim_period = info->dtim_period;
wvif->beacon_int = info->beacon_int;
-
- rcu_read_lock();
+ rcu_read_lock(); // protect sta
if (info->bssid && !info->ibss_joined)
sta = ieee80211_find_sta(wvif->vif, info->bssid);
- if (sta) {
- wvif->ht_info.ht_cap = sta->ht_cap;
+ if (sta)
wvif->bss_params.operational_rate_set =
wfx_rate_mask_to_hw(wvif->wdev, sta->supp_rates[wvif->channel->band]);
- wvif->ht_info.operation_mode = info->ht_operation_mode;
- } else {
- memset(&wvif->ht_info, 0, sizeof(wvif->ht_info));
+ else
wvif->bss_params.operational_rate_set = -1;
- }
- rcu_read_unlock();
-
- /* Non Greenfield stations present */
- if (wvif->ht_info.operation_mode &
- IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT)
+ if (sta &&
+ info->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT)
hif_dual_cts_protection(wvif, true);
else
hif_dual_cts_protection(wvif, false);
- association_mode.preambtype_use = 1;
- association_mode.mode = 1;
- association_mode.rateset = 1;
- association_mode.spacing = 1;
- association_mode.preamble_type = info->use_short_preamble ? HIF_PREAMBLE_SHORT : HIF_PREAMBLE_LONG;
- association_mode.basic_rate_set = cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, info->basic_rates));
- association_mode.mixed_or_greenfield_type = wfx_ht_greenfield(&wvif->ht_info);
- association_mode.mpdu_start_spacing = wfx_ht_ampdu_density(&wvif->ht_info);
-
wfx_cqm_bssloss_sm(wvif, 0, 0, 0);
cancel_work_sync(&wvif->unjoin_work);
wvif->bss_params.beacon_lost_count = 20;
wvif->bss_params.aid = info->aid;
- if (wvif->dtim_period < 1)
- wvif->dtim_period = 1;
-
- hif_set_association_mode(wvif, &association_mode);
+ hif_set_association_mode(wvif, info, sta ? &sta->ht_cap : NULL);
+ rcu_read_unlock();
if (!info->ibss_joined) {
hif_keep_alive_period(wvif, 30 /* sec */);
hif_set_bss_params(wvif, &wvif->bss_params);
- wvif->setbssparams_done = true;
- wfx_set_beacon_wakeup_period_work(&wvif->set_beacon_wakeup_period_work);
- wfx_set_pm(wvif, &wvif->powersave_mode);
+ hif_set_beacon_wakeup_period(wvif, info->dtim_period,
+ info->dtim_period);
+ wfx_update_pm(wvif);
}
}
@@ -1025,48 +722,40 @@ void wfx_bss_info_changed(struct ieee80211_hw *hw,
struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
bool do_join = false;
int i;
- int nb_arp_addr;
mutex_lock(&wdev->conf_mutex);
/* TODO: BSS_CHANGED_QOS */
if (changed & BSS_CHANGED_ARP_FILTER) {
- struct hif_mib_arp_ip_addr_table filter = { };
-
- nb_arp_addr = info->arp_addr_cnt;
- if (nb_arp_addr <= 0 || nb_arp_addr > HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES)
- nb_arp_addr = 0;
-
for (i = 0; i < HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES; i++) {
- filter.condition_idx = i;
- if (i < nb_arp_addr) {
- // Caution: type of arp_addr_list[i] is __be32
- memcpy(filter.ipv4_address,
- &info->arp_addr_list[i],
- sizeof(filter.ipv4_address));
- filter.arp_enable = HIF_ARP_NS_FILTERING_ENABLE;
- } else {
- filter.arp_enable = HIF_ARP_NS_FILTERING_DISABLE;
- }
- hif_set_arp_ipv4_filter(wvif, &filter);
+ __be32 *arp_addr = &info->arp_addr_list[i];
+
+ if (info->arp_addr_cnt > HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES)
+ arp_addr = NULL;
+ if (i >= info->arp_addr_cnt)
+ arp_addr = NULL;
+ hif_set_arp_ipv4_filter(wvif, i, arp_addr);
}
}
- if (changed &
- (BSS_CHANGED_BEACON | BSS_CHANGED_AP_PROBE_RESP |
- BSS_CHANGED_BSSID | BSS_CHANGED_SSID | BSS_CHANGED_IBSS)) {
+ if (changed & BSS_CHANGED_BEACON ||
+ changed & BSS_CHANGED_AP_PROBE_RESP ||
+ changed & BSS_CHANGED_BSSID ||
+ changed & BSS_CHANGED_SSID ||
+ changed & BSS_CHANGED_IBSS) {
wvif->beacon_int = info->beacon_int;
wfx_update_beaconing(wvif);
- wfx_upload_beacon(wvif);
+ wfx_upload_ap_templates(wvif);
+ wfx_fwd_probe_req(wvif, false);
}
if (changed & BSS_CHANGED_BEACON_ENABLED &&
- wvif->state != WFX_STATE_IBSS) {
- if (wvif->enable_beacon != info->enable_beacon) {
- hif_beacon_transmit(wvif, info->enable_beacon);
- wvif->enable_beacon = info->enable_beacon;
- }
- }
+ wvif->state != WFX_STATE_IBSS)
+ hif_beacon_transmit(wvif, info->enable_beacon);
+
+ if (changed & BSS_CHANGED_BEACON_INFO)
+ hif_set_beacon_wakeup_period(wvif, info->dtim_period,
+ info->dtim_period);
/* assoc/disassoc, or maybe AID changed */
if (changed & BSS_CHANGED_ASSOC) {
@@ -1092,10 +781,11 @@ void wfx_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BSSID)
do_join = true;
- if (changed &
- (BSS_CHANGED_ASSOC | BSS_CHANGED_BSSID |
- BSS_CHANGED_IBSS | BSS_CHANGED_BASIC_RATES |
- BSS_CHANGED_HT)) {
+ if (changed & BSS_CHANGED_ASSOC ||
+ changed & BSS_CHANGED_BSSID ||
+ changed & BSS_CHANGED_IBSS ||
+ changed & BSS_CHANGED_BASIC_RATES ||
+ changed & BSS_CHANGED_HT) {
if (info->assoc) {
if (wvif->state < WFX_STATE_PRE_STA) {
ieee80211_connection_loss(vif);
@@ -1116,106 +806,50 @@ void wfx_bss_info_changed(struct ieee80211_hw *hw,
}
}
- /* ERP Protection */
- if (changed & (BSS_CHANGED_ASSOC |
- BSS_CHANGED_ERP_CTS_PROT |
- BSS_CHANGED_ERP_PREAMBLE)) {
- u32 prev_erp_info = wvif->erp_info;
+ if (changed & BSS_CHANGED_ASSOC ||
+ changed & BSS_CHANGED_ERP_CTS_PROT ||
+ changed & BSS_CHANGED_ERP_PREAMBLE) {
+ u8 erp_ie[3] = { WLAN_EID_ERP_INFO, 1, 0 };
+ hif_erp_use_protection(wvif, info->use_cts_prot);
if (info->use_cts_prot)
- wvif->erp_info |= WLAN_ERP_USE_PROTECTION;
- else if (!(prev_erp_info & WLAN_ERP_NON_ERP_PRESENT))
- wvif->erp_info &= ~WLAN_ERP_USE_PROTECTION;
-
+ erp_ie[2] |= WLAN_ERP_USE_PROTECTION;
if (info->use_short_preamble)
- wvif->erp_info |= WLAN_ERP_BARKER_PREAMBLE;
- else
- wvif->erp_info &= ~WLAN_ERP_BARKER_PREAMBLE;
-
- if (prev_erp_info != wvif->erp_info)
- schedule_work(&wvif->set_cts_work);
+ erp_ie[2] |= WLAN_ERP_BARKER_PREAMBLE;
+ if (wvif->vif->type != NL80211_IFTYPE_STATION)
+ hif_update_ie_beacon(wvif, erp_ie, sizeof(erp_ie));
}
- if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_ERP_SLOT))
+ if (changed & BSS_CHANGED_ASSOC || changed & BSS_CHANGED_ERP_SLOT)
hif_slot_time(wvif, info->use_short_slot ? 9 : 20);
- if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_CQM)) {
- struct hif_mib_rcpi_rssi_threshold th = {
- .rolling_average_count = 8,
- .detection = 1,
- };
-
- wvif->cqm_rssi_thold = info->cqm_rssi_thold;
-
- if (!info->cqm_rssi_thold && !info->cqm_rssi_hyst) {
- th.upperthresh = 1;
- th.lowerthresh = 1;
- } else {
- /* FIXME It's not a correct way of setting threshold.
- * Upper and lower must be set equal here and adjusted
- * in callback. However current implementation is much
- * more reliable and stable.
- */
- /* RSSI: signed Q8.0, RCPI: unsigned Q7.1
- * RSSI = RCPI / 2 - 110
- */
- th.upper_threshold = info->cqm_rssi_thold + info->cqm_rssi_hyst;
- th.upper_threshold = (th.upper_threshold + 110) * 2;
- th.lower_threshold = info->cqm_rssi_thold;
- th.lower_threshold = (th.lower_threshold + 110) * 2;
- }
- hif_set_rcpi_rssi_threshold(wvif, &th);
- }
+ if (changed & BSS_CHANGED_ASSOC || changed & BSS_CHANGED_CQM)
+ hif_set_rcpi_rssi_threshold(wvif, info->cqm_rssi_thold,
+ info->cqm_rssi_hyst);
+
+ if (changed & BSS_CHANGED_TXPOWER)
+ hif_set_output_power(wvif, info->txpower);
+
+ if (changed & BSS_CHANGED_PS)
+ wfx_update_pm(wvif);
- if (changed & BSS_CHANGED_TXPOWER &&
- info->txpower != wdev->output_power) {
- wdev->output_power = info->txpower;
- hif_set_output_power(wvif, wdev->output_power * 10);
- }
mutex_unlock(&wdev->conf_mutex);
- if (do_join) {
- wfx_tx_lock_flush(wdev);
- wfx_do_join(wvif); /* Will unlock it for us */
- }
+ if (do_join)
+ wfx_do_join(wvif);
}
-static void wfx_ps_notify(struct wfx_vif *wvif, enum sta_notify_cmd notify_cmd,
- int link_id)
+static void wfx_ps_notify_sta(struct wfx_vif *wvif,
+ enum sta_notify_cmd notify_cmd, int link_id)
{
- u32 bit, prev;
-
spin_lock_bh(&wvif->ps_state_lock);
- /* Zero link id means "for all link IDs" */
- if (link_id) {
- bit = BIT(link_id);
- } else if (notify_cmd != STA_NOTIFY_AWAKE) {
- dev_warn(wvif->wdev->dev, "unsupported notify command\n");
- bit = 0;
- } else {
- bit = wvif->link_id_map;
- }
- prev = wvif->sta_asleep_mask & bit;
-
- switch (notify_cmd) {
- case STA_NOTIFY_SLEEP:
- if (!prev) {
- if (wvif->mcast_buffered && !wvif->sta_asleep_mask)
- schedule_work(&wvif->mcast_start_work);
- wvif->sta_asleep_mask |= bit;
- }
- break;
- case STA_NOTIFY_AWAKE:
- if (prev) {
- wvif->sta_asleep_mask &= ~bit;
- wvif->pspoll_mask &= ~bit;
- if (link_id && !wvif->sta_asleep_mask)
- schedule_work(&wvif->mcast_stop_work);
- wfx_bh_request_tx(wvif->wdev);
- }
- break;
- }
+ if (notify_cmd == STA_NOTIFY_SLEEP)
+ wvif->sta_asleep_mask |= BIT(link_id);
+ else // notify_cmd == STA_NOTIFY_AWAKE
+ wvif->sta_asleep_mask &= ~BIT(link_id);
spin_unlock_bh(&wvif->ps_state_lock);
+ if (notify_cmd == STA_NOTIFY_AWAKE)
+ wfx_bh_request_tx(wvif->wdev);
}
void wfx_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -1224,23 +858,19 @@ void wfx_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *) &sta->drv_priv;
- wfx_ps_notify(wvif, notify_cmd, sta_priv->link_id);
+ wfx_ps_notify_sta(wvif, notify_cmd, sta_priv->link_id);
}
-static int wfx_set_tim_impl(struct wfx_vif *wvif, bool aid0_bit_set)
+static int wfx_update_tim(struct wfx_vif *wvif)
{
struct sk_buff *skb;
- struct hif_ie_flags target_frame = {
- .beacon = 1,
- };
u16 tim_offset, tim_length;
u8 *tim_ptr;
skb = ieee80211_beacon_get_tim(wvif->wdev->hw, wvif->vif,
&tim_offset, &tim_length);
if (!skb) {
- if (!__wfx_flush(wvif->wdev, true))
- wfx_tx_unlock(wvif->wdev);
+ __wfx_flush(wvif->wdev, true);
return -ENOENT;
}
tim_ptr = skb->data + tim_offset;
@@ -1252,23 +882,23 @@ static int wfx_set_tim_impl(struct wfx_vif *wvif, bool aid0_bit_set)
tim_ptr[2] = 0;
/* Set/reset aid0 bit */
- if (aid0_bit_set)
+ if (wfx_tx_queues_get_after_dtim(wvif))
tim_ptr[4] |= 1;
else
tim_ptr[4] &= ~1;
}
- hif_update_ie(wvif, &target_frame, tim_ptr, tim_length);
+ hif_update_ie_beacon(wvif, tim_ptr, tim_length);
dev_kfree_skb(skb);
return 0;
}
-static void wfx_set_tim_work(struct work_struct *work)
+static void wfx_update_tim_work(struct work_struct *work)
{
- struct wfx_vif *wvif = container_of(work, struct wfx_vif, set_tim_work);
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif, update_tim_work);
- wfx_set_tim_impl(wvif, wvif->aid0_bit_set);
+ wfx_update_tim(wvif);
}
int wfx_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
@@ -1277,50 +907,16 @@ int wfx_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
struct wfx_sta_priv *sta_dev = (struct wfx_sta_priv *) &sta->drv_priv;
struct wfx_vif *wvif = wdev_to_wvif(wdev, sta_dev->vif_id);
- schedule_work(&wvif->set_tim_work);
+ schedule_work(&wvif->update_tim_work);
return 0;
}
-static void wfx_mcast_start_work(struct work_struct *work)
+void wfx_suspend_resume_mc(struct wfx_vif *wvif, enum sta_notify_cmd notify_cmd)
{
- struct wfx_vif *wvif = container_of(work, struct wfx_vif,
- mcast_start_work);
- long tmo = wvif->dtim_period * TU_TO_JIFFIES(wvif->beacon_int + 20);
-
- cancel_work_sync(&wvif->mcast_stop_work);
- if (!wvif->aid0_bit_set) {
- wfx_tx_lock_flush(wvif->wdev);
- wfx_set_tim_impl(wvif, true);
- wvif->aid0_bit_set = true;
- mod_timer(&wvif->mcast_timeout, jiffies + tmo);
- wfx_tx_unlock(wvif->wdev);
- }
-}
-
-static void wfx_mcast_stop_work(struct work_struct *work)
-{
- struct wfx_vif *wvif = container_of(work, struct wfx_vif,
- mcast_stop_work);
-
- if (wvif->aid0_bit_set) {
- del_timer_sync(&wvif->mcast_timeout);
- wfx_tx_lock_flush(wvif->wdev);
- wvif->aid0_bit_set = false;
- wfx_set_tim_impl(wvif, false);
- wfx_tx_unlock(wvif->wdev);
- }
-}
-
-static void wfx_mcast_timeout(struct timer_list *t)
-{
- struct wfx_vif *wvif = from_timer(wvif, t, mcast_timeout);
-
- dev_warn(wvif->wdev->dev, "multicast delivery timeout\n");
- spin_lock_bh(&wvif->ps_state_lock);
- wvif->mcast_tx = wvif->aid0_bit_set && wvif->mcast_buffered;
- if (wvif->mcast_tx)
- wfx_bh_request_tx(wvif->wdev);
- spin_unlock_bh(&wvif->ps_state_lock);
+ WARN(!wfx_tx_queues_get_after_dtim(wvif), "incorrect sequence");
+ WARN(wvif->after_dtim_tx_allowed, "incorrect sequence");
+ wvif->after_dtim_tx_allowed = true;
+ wfx_bh_request_tx(wvif->wdev);
}
int wfx_ampdu_action(struct ieee80211_hw *hw,
@@ -1338,35 +934,6 @@ int wfx_ampdu_action(struct ieee80211_hw *hw,
return -ENOTSUPP;
}
-void wfx_suspend_resume(struct wfx_vif *wvif,
- struct hif_ind_suspend_resume_tx *arg)
-{
- if (arg->suspend_resume_flags.bc_mc_only) {
- bool cancel_tmo = false;
-
- spin_lock_bh(&wvif->ps_state_lock);
- if (!arg->suspend_resume_flags.resume)
- wvif->mcast_tx = false;
- else
- wvif->mcast_tx = wvif->aid0_bit_set &&
- wvif->mcast_buffered;
- if (wvif->mcast_tx) {
- cancel_tmo = true;
- wfx_bh_request_tx(wvif->wdev);
- }
- spin_unlock_bh(&wvif->ps_state_lock);
- if (cancel_tmo)
- del_timer_sync(&wvif->mcast_timeout);
- } else if (arg->suspend_resume_flags.resume) {
- // FIXME: should change each station status independently
- wfx_ps_notify(wvif, STA_NOTIFY_AWAKE, 0);
- wfx_bh_request_tx(wvif->wdev);
- } else {
- // FIXME: should change each station status independently
- wfx_ps_notify(wvif, STA_NOTIFY_SLEEP, 0);
- }
-}
-
int wfx_add_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *conf)
{
@@ -1392,7 +959,6 @@ int wfx_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
WARN(wvif->channel, "channel overwrite");
wvif->channel = ch;
- wvif->ht_info.channel_type = cfg80211_get_chandef_type(&conf->def);
return 0;
}
@@ -1410,97 +976,14 @@ void wfx_unassign_vif_chanctx(struct ieee80211_hw *hw,
int wfx_config(struct ieee80211_hw *hw, u32 changed)
{
- int ret = 0;
- struct wfx_dev *wdev = hw->priv;
- struct ieee80211_conf *conf = &hw->conf;
- struct wfx_vif *wvif;
-
- // FIXME: Interface id should not been hardcoded
- wvif = wdev_to_wvif(wdev, 0);
- if (!wvif) {
- WARN(1, "interface 0 does not exist anymore");
- return 0;
- }
-
- down(&wvif->scan.lock);
- mutex_lock(&wdev->conf_mutex);
- if (changed & IEEE80211_CONF_CHANGE_POWER) {
- wdev->output_power = conf->power_level;
- hif_set_output_power(wvif, wdev->output_power * 10);
- }
-
- if (changed & IEEE80211_CONF_CHANGE_PS) {
- wvif = NULL;
- while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
- memset(&wvif->powersave_mode, 0,
- sizeof(wvif->powersave_mode));
- if (conf->flags & IEEE80211_CONF_PS) {
- wvif->powersave_mode.pm_mode.enter_psm = 1;
- if (conf->dynamic_ps_timeout > 0) {
- wvif->powersave_mode.pm_mode.fast_psm = 1;
- /*
- * Firmware does not support more than
- * 128ms
- */
- wvif->powersave_mode.fast_psm_idle_period =
- min(conf->dynamic_ps_timeout *
- 2, 255);
- }
- }
- if (wvif->state == WFX_STATE_STA && wvif->bss_params.aid)
- wfx_set_pm(wvif, &wvif->powersave_mode);
- }
- wvif = wdev_to_wvif(wdev, 0);
- }
-
- mutex_unlock(&wdev->conf_mutex);
- up(&wvif->scan.lock);
- return ret;
+ return 0;
}
int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
- int i;
+ int i, ret = 0;
struct wfx_dev *wdev = hw->priv;
struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
- // FIXME: parameters are set by kernel juste after interface_add.
- // Keep struct hif_req_edca_queue_params blank?
- struct hif_req_edca_queue_params default_edca_params[] = {
- [IEEE80211_AC_VO] = {
- .queue_id = HIF_QUEUE_ID_VOICE,
- .aifsn = 2,
- .cw_min = 3,
- .cw_max = 7,
- .tx_op_limit = TXOP_UNIT * 47,
- },
- [IEEE80211_AC_VI] = {
- .queue_id = HIF_QUEUE_ID_VIDEO,
- .aifsn = 2,
- .cw_min = 7,
- .cw_max = 15,
- .tx_op_limit = TXOP_UNIT * 94,
- },
- [IEEE80211_AC_BE] = {
- .queue_id = HIF_QUEUE_ID_BESTEFFORT,
- .aifsn = 3,
- .cw_min = 15,
- .cw_max = 1023,
- .tx_op_limit = TXOP_UNIT * 0,
- },
- [IEEE80211_AC_BK] = {
- .queue_id = HIF_QUEUE_ID_BACKGROUND,
- .aifsn = 7,
- .cw_min = 15,
- .cw_max = 1023,
- .tx_op_limit = TXOP_UNIT * 0,
- },
- };
-
- BUILD_BUG_ON(ARRAY_SIZE(default_edca_params) != ARRAY_SIZE(wvif->edca.params));
- if (wfx_api_older_than(wdev, 2, 0)) {
- default_edca_params[IEEE80211_AC_BE].queue_id = HIF_QUEUE_ID_BACKGROUND;
- default_edca_params[IEEE80211_AC_BK].queue_id = HIF_QUEUE_ID_BESTEFFORT;
- }
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
IEEE80211_VIF_SUPPORTS_UAPSD |
@@ -1533,50 +1016,37 @@ int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
wvif->vif = vif;
wvif->wdev = wdev;
- INIT_WORK(&wvif->link_id_work, wfx_link_id_work);
- INIT_DELAYED_WORK(&wvif->link_id_gc_work, wfx_link_id_gc_work);
-
+ wvif->link_id_map = 1; // link-id 0 is reserved for multicast
spin_lock_init(&wvif->ps_state_lock);
- INIT_WORK(&wvif->set_tim_work, wfx_set_tim_work);
+ INIT_WORK(&wvif->update_tim_work, wfx_update_tim_work);
- INIT_WORK(&wvif->mcast_start_work, wfx_mcast_start_work);
- INIT_WORK(&wvif->mcast_stop_work, wfx_mcast_stop_work);
- timer_setup(&wvif->mcast_timeout, wfx_mcast_timeout, 0);
+ memset(&wvif->bss_params, 0, sizeof(wvif->bss_params));
- wvif->setbssparams_done = false;
mutex_init(&wvif->bss_loss_lock);
INIT_DELAYED_WORK(&wvif->bss_loss_work, wfx_bss_loss_work);
wvif->wep_default_key_id = -1;
INIT_WORK(&wvif->wep_key_work, wfx_wep_key_work);
- sema_init(&wvif->scan.lock, 1);
- INIT_WORK(&wvif->scan.work, wfx_scan_work);
- INIT_DELAYED_WORK(&wvif->scan.timeout, wfx_scan_timeout);
-
spin_lock_init(&wvif->event_queue_lock);
INIT_LIST_HEAD(&wvif->event_queue);
INIT_WORK(&wvif->event_handler_work, wfx_event_handler_work);
init_completion(&wvif->set_pm_mode_complete);
complete(&wvif->set_pm_mode_complete);
- INIT_WORK(&wvif->set_beacon_wakeup_period_work,
- wfx_set_beacon_wakeup_period_work);
INIT_WORK(&wvif->update_filtering_work, wfx_update_filtering_work);
INIT_WORK(&wvif->bss_params_work, wfx_bss_params_work);
- INIT_WORK(&wvif->set_cts_work, wfx_set_cts_work);
INIT_WORK(&wvif->unjoin_work, wfx_unjoin_work);
+ INIT_WORK(&wvif->tx_policy_upload_work, wfx_tx_policy_upload_work);
+
+ mutex_init(&wvif->scan_lock);
+ init_completion(&wvif->scan_complete);
+ INIT_WORK(&wvif->scan_work, wfx_hw_scan_work);
+ INIT_WORK(&wvif->tx_policy_upload_work, wfx_tx_policy_upload_work);
mutex_unlock(&wdev->conf_mutex);
hif_set_macaddr(wvif, vif->addr);
- for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- memcpy(&wvif->edca.params[i], &default_edca_params[i],
- sizeof(default_edca_params[i]));
- wvif->edca.uapsd_enable[i] = false;
- hif_set_edca_queue_params(wvif, &wvif->edca.params[i]);
- }
- wfx_set_uapsd_param(wvif, &wvif->edca);
wfx_tx_policy_init(wvif);
wvif = NULL;
@@ -1587,9 +1057,9 @@ int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
else
hif_set_block_ack_policy(wvif, 0x00, 0x00);
// Combo force powersave mode. We can re-enable it now
- wfx_set_pm(wvif, &wvif->powersave_mode);
+ ret = wfx_update_pm(wvif);
}
- return 0;
+ return ret;
}
void wfx_remove_interface(struct ieee80211_hw *hw,
@@ -1597,15 +1067,11 @@ void wfx_remove_interface(struct ieee80211_hw *hw,
{
struct wfx_dev *wdev = hw->priv;
struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
- int i;
- // If scan is in progress, stop it
- while (down_trylock(&wvif->scan.lock))
- schedule();
- up(&wvif->scan.lock);
wait_for_completion_timeout(&wvif->set_pm_mode_complete, msecs_to_jiffies(300));
mutex_lock(&wdev->conf_mutex);
+ WARN(wvif->link_id_map != 1, "corrupted state");
switch (wvif->state) {
case WFX_STATE_PRE_STA:
case WFX_STATE_STA:
@@ -1615,19 +1081,7 @@ void wfx_remove_interface(struct ieee80211_hw *hw,
wfx_tx_unlock(wdev);
break;
case WFX_STATE_AP:
- for (i = 0; wvif->link_id_map; ++i) {
- if (wvif->link_id_map & BIT(i)) {
- wfx_unmap_link(wvif, i);
- wvif->link_id_map &= ~BIT(i);
- }
- }
- memset(wvif->link_id_db, 0, sizeof(wvif->link_id_db));
wvif->sta_asleep_mask = 0;
- wvif->enable_beacon = false;
- wvif->mcast_tx = false;
- wvif->aid0_bit_set = false;
- wvif->mcast_buffered = false;
- wvif->pspoll_mask = 0;
/* reset.link_id = 0; */
hif_reset(wvif, false);
break;
@@ -1642,12 +1096,8 @@ void wfx_remove_interface(struct ieee80211_hw *hw,
/* FIXME: In add to reset MAC address, try to reset interface */
hif_set_macaddr(wvif, NULL);
- cancel_delayed_work_sync(&wvif->scan.timeout);
-
wfx_cqm_bssloss_sm(wvif, 0, 0, 0);
cancel_work_sync(&wvif->unjoin_work);
- cancel_delayed_work_sync(&wvif->link_id_gc_work);
- del_timer_sync(&wvif->mcast_timeout);
wfx_free_event_queue(wvif);
wdev->vif[wvif->id] = NULL;
@@ -1662,7 +1112,7 @@ void wfx_remove_interface(struct ieee80211_hw *hw,
else
hif_set_block_ack_policy(wvif, 0x00, 0x00);
// Combo force powersave mode. We can re-enable it now
- wfx_set_pm(wvif, &wvif->powersave_mode);
+ wfx_update_pm(wvif);
}
}
diff --git a/drivers/staging/wfx/sta.h b/drivers/staging/wfx/sta.h
index 4ccf1b17632b..cf99a8a74a81 100644
--- a/drivers/staging/wfx/sta.h
+++ b/drivers/staging/wfx/sta.h
@@ -23,23 +23,11 @@ enum wfx_state {
WFX_STATE_AP,
};
-struct wfx_ht_info {
- struct ieee80211_sta_ht_cap ht_cap;
- enum nl80211_channel_type channel_type;
- u16 operation_mode;
-};
-
struct wfx_hif_event {
struct list_head link;
struct hif_ind_event evt;
};
-struct wfx_edca_params {
- /* NOTE: index is a linux queue id. */
- struct hif_req_edca_queue_params params[IEEE80211_NUM_ACS];
- bool uapsd_enable[IEEE80211_NUM_ACS];
-};
-
struct wfx_grp_addr_table {
bool enable;
int num_addresses;
@@ -49,6 +37,9 @@ struct wfx_grp_addr_table {
struct wfx_sta_priv {
int link_id;
int vif_id;
+ u8 buffered[IEEE80211_NUM_TIDS];
+ // Ensure atomicity of "buffered" and calls to ieee80211_sta_set_buffered()
+ spinlock_t lock;
};
// mac80211 interface
@@ -91,13 +82,12 @@ void wfx_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *conf);
// WSM Callbacks
-void wfx_suspend_resume(struct wfx_vif *wvif,
- struct hif_ind_suspend_resume_tx *arg);
+void wfx_suspend_resume_mc(struct wfx_vif *wvif, enum sta_notify_cmd cmd);
// Other Helpers
void wfx_cqm_bssloss_sm(struct wfx_vif *wvif, int init, int good, int bad);
void wfx_update_filtering(struct wfx_vif *wvif);
-int wfx_set_pm(struct wfx_vif *wvif, const struct hif_req_set_pm_mode *arg);
int wfx_fwd_probe_req(struct wfx_vif *wvif, bool enable);
+u32 wfx_rate_mask_to_hw(struct wfx_dev *wdev, u32 rates);
#endif /* WFX_STA_H */
diff --git a/drivers/staging/wfx/traces.h b/drivers/staging/wfx/traces.h
index 3f6198ab2235..30c6a13f0e22 100644
--- a/drivers/staging/wfx/traces.h
+++ b/drivers/staging/wfx/traces.h
@@ -153,7 +153,7 @@ hif_mib_list_enum
#define hif_mib_list hif_mib_list_enum { -1, NULL }
DECLARE_EVENT_CLASS(hif_data,
- TP_PROTO(struct hif_msg *hif, int tx_fill_level, bool is_recv),
+ TP_PROTO(const struct hif_msg *hif, int tx_fill_level, bool is_recv),
TP_ARGS(hif, tx_fill_level, is_recv),
TP_STRUCT__entry(
__field(int, tx_fill_level)
@@ -203,12 +203,12 @@ DECLARE_EVENT_CLASS(hif_data,
)
);
DEFINE_EVENT(hif_data, hif_send,
- TP_PROTO(struct hif_msg *hif, int tx_fill_level, bool is_recv),
+ TP_PROTO(const struct hif_msg *hif, int tx_fill_level, bool is_recv),
TP_ARGS(hif, tx_fill_level, is_recv));
#define _trace_hif_send(hif, tx_fill_level)\
trace_hif_send(hif, tx_fill_level, false)
DEFINE_EVENT(hif_data, hif_recv,
- TP_PROTO(struct hif_msg *hif, int tx_fill_level, bool is_recv),
+ TP_PROTO(const struct hif_msg *hif, int tx_fill_level, bool is_recv),
TP_ARGS(hif, tx_fill_level, is_recv));
#define _trace_hif_recv(hif, tx_fill_level)\
trace_hif_recv(hif, tx_fill_level, true)
@@ -359,7 +359,8 @@ TRACE_EVENT(bh_stats,
trace_bh_stats(ind, req, cnf, busy, release)
TRACE_EVENT(tx_stats,
- TP_PROTO(struct hif_cnf_tx *tx_cnf, struct sk_buff *skb, int delay),
+ TP_PROTO(const struct hif_cnf_tx *tx_cnf, const struct sk_buff *skb,
+ int delay),
TP_ARGS(tx_cnf, skb, delay),
TP_STRUCT__entry(
__field(int, pkt_id)
@@ -375,8 +376,9 @@ TRACE_EVENT(tx_stats,
// Keep sync with wfx_rates definition in main.c
static const int hw_rate[] = { 0, 1, 2, 3, 6, 7, 8, 9,
10, 11, 12, 13 };
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- struct ieee80211_tx_rate *rates = tx_info->driver_rates;
+ const struct ieee80211_tx_info *tx_info =
+ (const struct ieee80211_tx_info *)skb->cb;
+ const struct ieee80211_tx_rate *rates = tx_info->driver_rates;
int i;
__entry->pkt_id = tx_cnf->packet_id;
diff --git a/drivers/staging/wfx/wfx.h b/drivers/staging/wfx/wfx.h
index 781a8c8ba982..8b85bb1abb9c 100644
--- a/drivers/staging/wfx/wfx.h
+++ b/drivers/staging/wfx/wfx.h
@@ -26,6 +26,9 @@
#include "hif_tx.h"
#include "hif_api_general.h"
+#define USEC_PER_TXOP 32 // see struct ieee80211_tx_queue_params
+#define USEC_PER_TU 1024
+
struct hwbus_ops;
struct wfx_dev {
@@ -51,14 +54,12 @@ struct wfx_dev {
int tx_burst_idx;
atomic_t tx_lock;
+ atomic_t packet_id;
u32 key_map;
struct hif_req_add_key keys[MAX_KEY_ENTRIES];
struct hif_rx_stats rx_stats;
struct mutex rx_stats_lock;
-
- int output_power;
- atomic_t scan_in_progress;
};
struct wfx_vif {
@@ -68,24 +69,15 @@ struct wfx_vif {
int id;
enum wfx_state state;
- int delayed_link_loss;
int bss_loss_state;
u32 bss_loss_confirm_id;
struct mutex bss_loss_lock;
struct delayed_work bss_loss_work;
u32 link_id_map;
- struct wfx_link_entry link_id_db[WFX_MAX_STA_IN_AP_MODE];
- struct delayed_work link_id_gc_work;
- struct work_struct link_id_work;
- bool aid0_bit_set;
- bool mcast_tx;
- bool mcast_buffered;
+ bool after_dtim_tx_allowed;
struct wfx_grp_addr_table mcast_filter;
- struct timer_list mcast_timeout;
- struct work_struct mcast_start_work;
- struct work_struct mcast_stop_work;
s8 wep_default_key_id;
struct sk_buff *wep_pending_skb;
@@ -95,37 +87,30 @@ struct wfx_vif {
struct work_struct tx_policy_upload_work;
u32 sta_asleep_mask;
- u32 pspoll_mask;
spinlock_t ps_state_lock;
- struct work_struct set_tim_work;
+ struct work_struct update_tim_work;
- int dtim_period;
int beacon_int;
- bool enable_beacon;
- struct work_struct set_beacon_wakeup_period_work;
-
bool filter_bssid;
bool fwd_probe_req;
bool disable_beacon_filter;
struct work_struct update_filtering_work;
- u32 erp_info;
- int cqm_rssi_thold;
- bool setbssparams_done;
- struct wfx_ht_info ht_info;
- struct wfx_edca_params edca;
- struct hif_mib_set_uapsd_information uapsd_info;
+ unsigned long uapsd_mask;
+ struct ieee80211_tx_queue_params edca_params[IEEE80211_NUM_ACS];
struct hif_req_set_bss_params bss_params;
struct work_struct bss_params_work;
- struct work_struct set_cts_work;
int join_complete_status;
- bool delayed_unjoin;
struct work_struct unjoin_work;
- struct wfx_scan scan;
+ /* avoid some operations in parallel with scan */
+ struct mutex scan_lock;
+ struct work_struct scan_work;
+ struct completion scan_complete;
+ bool scan_abort;
+ struct ieee80211_scan_request *scan_req;
- struct hif_req_set_pm_mode powersave_mode;
struct completion set_pm_mode_complete;
struct list_head event_queue;
diff --git a/drivers/staging/wilc1000/fw.h b/drivers/staging/wilc1000/fw.h
new file mode 100644
index 000000000000..a76e1dea4345
--- /dev/null
+++ b/drivers/staging/wilc1000/fw.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2012 - 2018 Microchip Technology Inc., and its subsidiaries.
+ * All rights reserved.
+ */
+
+#ifndef WILC_FW_H
+#define WILC_FW_H
+
+#include <linux/ieee80211.h>
+
+#define WILC_MAX_NUM_STA 9
+#define WILC_MAX_RATES_SUPPORTED 12
+#define WILC_MAX_NUM_PMKIDS 16
+#define WILC_MAX_NUM_SCANNED_CH 14
+
+struct wilc_assoc_resp {
+ __le16 capab_info;
+ __le16 status_code;
+ __le16 aid;
+} __packed;
+
+struct wilc_pmkid {
+ u8 bssid[ETH_ALEN];
+ u8 pmkid[WLAN_PMKID_LEN];
+} __packed;
+
+struct wilc_pmkid_attr {
+ u8 numpmkid;
+ struct wilc_pmkid pmkidlist[WILC_MAX_NUM_PMKIDS];
+} __packed;
+
+struct wilc_reg_frame {
+ u8 reg;
+ u8 reg_id;
+ __le16 frame_type;
+} __packed;
+
+struct wilc_drv_handler {
+ __le32 handler;
+ u8 mode;
+} __packed;
+
+struct wilc_wep_key {
+ u8 index;
+ u8 key_len;
+ u8 key[0];
+} __packed;
+
+struct wilc_sta_wpa_ptk {
+ u8 mac_addr[ETH_ALEN];
+ u8 key_len;
+ u8 key[0];
+} __packed;
+
+struct wilc_ap_wpa_ptk {
+ u8 mac_addr[ETH_ALEN];
+ u8 index;
+ u8 key_len;
+ u8 key[0];
+} __packed;
+
+struct wilc_gtk_key {
+ u8 mac_addr[ETH_ALEN];
+ u8 rsc[8];
+ u8 index;
+ u8 key_len;
+ u8 key[0];
+} __packed;
+
+struct wilc_op_mode {
+ __le32 mode;
+} __packed;
+
+struct wilc_noa_opp_enable {
+ u8 ct_window;
+ u8 cnt;
+ __le32 duration;
+ __le32 interval;
+ __le32 start_time;
+} __packed;
+
+struct wilc_noa_opp_disable {
+ u8 cnt;
+ __le32 duration;
+ __le32 interval;
+ __le32 start_time;
+} __packed;
+
+struct wilc_join_bss_param {
+ char ssid[IEEE80211_MAX_SSID_LEN];
+ u8 ssid_terminator;
+ u8 bss_type;
+ u8 ch;
+ __le16 cap_info;
+ u8 sa[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
+ __le16 beacon_period;
+ u8 dtim_period;
+ u8 supp_rates[WILC_MAX_RATES_SUPPORTED + 1];
+ u8 wmm_cap;
+ u8 uapsd_cap;
+ u8 ht_capable;
+ u8 rsn_found;
+ u8 rsn_grp_policy;
+ u8 mode_802_11i;
+ u8 p_suites[3];
+ u8 akm_suites[3];
+ u8 rsn_cap[2];
+ u8 noa_enabled;
+ __le32 tsf_lo;
+ u8 idx;
+ u8 opp_enabled;
+ union {
+ struct wilc_noa_opp_disable opp_dis;
+ struct wilc_noa_opp_enable opp_en;
+ };
+} __packed;
+#endif
diff --git a/drivers/staging/wilc1000/hif.c b/drivers/staging/wilc1000/hif.c
index 349e45d58ec9..658790bd465b 100644
--- a/drivers/staging/wilc1000/hif.c
+++ b/drivers/staging/wilc1000/hif.c
@@ -10,7 +10,6 @@
#define WILC_HIF_CONNECT_TIMEOUT_MS 9500
#define WILC_FALSE_FRMWR_CHANNEL 100
-#define WILC_MAX_RATES_SUPPORTED 12
struct wilc_rcvd_mac_info {
u8 status;
@@ -27,48 +26,6 @@ struct wilc_del_all_sta {
u8 mac[WILC_MAX_NUM_STA][ETH_ALEN];
};
-struct wilc_op_mode {
- __le32 mode;
-};
-
-struct wilc_reg_frame {
- u8 reg;
- u8 reg_id;
- __le16 frame_type;
-} __packed;
-
-struct wilc_drv_handler {
- __le32 handler;
- u8 mode;
-} __packed;
-
-struct wilc_wep_key {
- u8 index;
- u8 key_len;
- u8 key[0];
-} __packed;
-
-struct wilc_sta_wpa_ptk {
- u8 mac_addr[ETH_ALEN];
- u8 key_len;
- u8 key[0];
-} __packed;
-
-struct wilc_ap_wpa_ptk {
- u8 mac_addr[ETH_ALEN];
- u8 index;
- u8 key_len;
- u8 key[0];
-} __packed;
-
-struct wilc_gtk_key {
- u8 mac_addr[ETH_ALEN];
- u8 rsc[8];
- u8 index;
- u8 key_len;
- u8 key[0];
-} __packed;
-
union wilc_message_body {
struct wilc_rcvd_net_info net_info;
struct wilc_rcvd_mac_info mac_info;
@@ -86,51 +43,6 @@ struct host_if_msg {
bool is_sync;
};
-struct wilc_noa_opp_enable {
- u8 ct_window;
- u8 cnt;
- __le32 duration;
- __le32 interval;
- __le32 start_time;
-} __packed;
-
-struct wilc_noa_opp_disable {
- u8 cnt;
- __le32 duration;
- __le32 interval;
- __le32 start_time;
-} __packed;
-
-struct wilc_join_bss_param {
- char ssid[IEEE80211_MAX_SSID_LEN];
- u8 ssid_terminator;
- u8 bss_type;
- u8 ch;
- __le16 cap_info;
- u8 sa[ETH_ALEN];
- u8 bssid[ETH_ALEN];
- __le16 beacon_period;
- u8 dtim_period;
- u8 supp_rates[WILC_MAX_RATES_SUPPORTED + 1];
- u8 wmm_cap;
- u8 uapsd_cap;
- u8 ht_capable;
- u8 rsn_found;
- u8 rsn_grp_policy;
- u8 mode_802_11i;
- u8 p_suites[3];
- u8 akm_suites[3];
- u8 rsn_cap[2];
- u8 noa_enabled;
- __le32 tsf_lo;
- u8 idx;
- u8 opp_enabled;
- union {
- struct wilc_noa_opp_disable opp_dis;
- struct wilc_noa_opp_enable opp_en;
- };
-} __packed;
-
/* 'msg' should be free by the caller for syc */
static struct host_if_msg*
wilc_alloc_work(struct wilc_vif *vif, void (*work_fun)(struct work_struct *),
@@ -640,7 +552,7 @@ static s32 wilc_parse_assoc_resp_info(u8 *buffer, u32 buffer_len,
{
u8 *ies;
u16 ies_len;
- struct assoc_resp *res = (struct assoc_resp *)buffer;
+ struct wilc_assoc_resp *res = (struct wilc_assoc_resp *)buffer;
ret_conn_info->status = le16_to_cpu(res->status_code);
if (ret_conn_info->status == WLAN_STATUS_SUCCESS) {
diff --git a/drivers/staging/wilc1000/hif.h b/drivers/staging/wilc1000/hif.h
index 22ee6fffd599..db9179171f05 100644
--- a/drivers/staging/wilc1000/hif.h
+++ b/drivers/staging/wilc1000/hif.h
@@ -17,14 +17,11 @@ enum {
WILC_CLIENT_MODE = 0x4
};
-#define WILC_MAX_NUM_STA 9
-#define WILC_MAX_NUM_SCANNED_CH 14
#define WILC_MAX_NUM_PROBED_SSID 10
#define WILC_TX_MIC_KEY_LEN 8
#define WILC_RX_MIC_KEY_LEN 8
-#define WILC_MAX_NUM_PMKIDS 16
#define WILC_ADD_STA_LENGTH 40
#define WILC_NUM_CONCURRENT_IFC 2
@@ -35,12 +32,6 @@ enum {
#define WILC_MAX_ASSOC_RESP_FRAME_SIZE 256
-struct assoc_resp {
- __le16 capab_info;
- __le16 status_code;
- __le16 aid;
-} __packed;
-
struct rf_info {
u8 link_speed;
s8 rssi;
@@ -59,16 +50,6 @@ enum host_if_state {
HOST_IF_FORCE_32BIT = 0xFFFFFFFF
};
-struct wilc_pmkid {
- u8 bssid[ETH_ALEN];
- u8 pmkid[WLAN_PMKID_LEN];
-} __packed;
-
-struct wilc_pmkid_attr {
- u8 numpmkid;
- struct wilc_pmkid pmkidlist[WILC_MAX_NUM_PMKIDS];
-} __packed;
-
struct cfg_param_attr {
u32 flag;
u16 short_retry_limit;
diff --git a/drivers/staging/wilc1000/netdev.c b/drivers/staging/wilc1000/netdev.c
index d2c0b0f7cf63..fce5bf2d82fa 100644
--- a/drivers/staging/wilc1000/netdev.c
+++ b/drivers/staging/wilc1000/netdev.c
@@ -96,21 +96,18 @@ void wilc_mac_indicate(struct wilc *wilc)
static struct net_device *get_if_handler(struct wilc *wilc, u8 *mac_header)
{
- u8 *bssid, *bssid1;
struct net_device *ndev = NULL;
struct wilc_vif *vif;
-
- bssid = mac_header + 10;
- bssid1 = mac_header + 4;
+ struct ieee80211_hdr *h = (struct ieee80211_hdr *)mac_header;
list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
if (vif->mode == WILC_STATION_MODE)
- if (ether_addr_equal_unaligned(bssid, vif->bssid)) {
+ if (ether_addr_equal_unaligned(h->addr2, vif->bssid)) {
ndev = vif->ndev;
goto out;
}
if (vif->mode == WILC_AP_MODE)
- if (ether_addr_equal_unaligned(bssid1, vif->bssid)) {
+ if (ether_addr_equal_unaligned(h->addr1, vif->bssid)) {
ndev = vif->ndev;
goto out;
}
@@ -177,7 +174,7 @@ static int wilc_txq_task(void *vp)
}
srcu_read_unlock(&wl->srcu, srcu_idx);
}
- } while (ret == -ENOBUFS && !wl->close);
+ } while (ret == WILC_VMM_ENTRY_FULL_RETRY && !wl->close);
}
return 0;
}
@@ -186,7 +183,7 @@ static int wilc_wlan_get_firmware(struct net_device *dev)
{
struct wilc_vif *vif = netdev_priv(dev);
struct wilc *wilc = vif->wilc;
- int chip_id, ret = 0;
+ int chip_id;
const struct firmware *wilc_firmware;
char *firmware;
@@ -201,14 +198,11 @@ static int wilc_wlan_get_firmware(struct net_device *dev)
if (request_firmware(&wilc_firmware, firmware, wilc->dev) != 0) {
netdev_err(dev, "%s - firmware not available\n", firmware);
- ret = -1;
- goto fail;
+ return -EINVAL;
}
wilc->firmware = wilc_firmware;
-fail:
-
- return ret;
+ return 0;
}
static int wilc_start_firmware(struct net_device *dev)
@@ -218,7 +212,7 @@ static int wilc_start_firmware(struct net_device *dev)
int ret = 0;
ret = wilc_wlan_start(wilc);
- if (ret < 0)
+ if (ret)
return ret;
if (!wait_for_completion_timeout(&wilc->sync_event,
@@ -241,7 +235,7 @@ static int wilc1000_firmware_download(struct net_device *dev)
ret = wilc_wlan_firmware_download(wilc, wilc->firmware->data,
wilc->firmware->size);
- if (ret < 0)
+ if (ret)
return ret;
release_firmware(wilc->firmware);
@@ -420,7 +414,7 @@ static int wilc_init_fw_config(struct net_device *dev, struct wilc_vif *vif)
return 0;
fail:
- return -1;
+ return -EINVAL;
}
static void wlan_deinitialize_threads(struct net_device *dev)
@@ -500,14 +494,12 @@ static int wilc_wlan_initialize(struct net_device *dev, struct wilc_vif *vif)
wl->close = 0;
ret = wilc_wlan_init(dev);
- if (ret < 0)
- return -EIO;
+ if (ret)
+ return ret;
ret = wlan_initialize_threads(dev);
- if (ret < 0) {
- ret = -EIO;
+ if (ret)
goto fail_wilc_wlan;
- }
if (wl->gpio_irq && init_irq(dev)) {
ret = -EIO;
@@ -521,22 +513,17 @@ static int wilc_wlan_initialize(struct net_device *dev, struct wilc_vif *vif)
goto fail_irq_init;
}
- if (wilc_wlan_get_firmware(dev)) {
- ret = -EIO;
+ ret = wilc_wlan_get_firmware(dev);
+ if (ret)
goto fail_irq_enable;
- }
ret = wilc1000_firmware_download(dev);
- if (ret < 0) {
- ret = -EIO;
+ if (ret)
goto fail_irq_enable;
- }
ret = wilc_start_firmware(dev);
- if (ret < 0) {
- ret = -EIO;
+ if (ret)
goto fail_irq_enable;
- }
if (wilc_wlan_cfg_get(vif, 1, WID_FIRMWARE_VERSION, 1, 0)) {
int size;
@@ -548,11 +535,10 @@ static int wilc_wlan_initialize(struct net_device *dev, struct wilc_vif *vif)
firmware_ver[size] = '\0';
netdev_dbg(dev, "Firmware Ver = %s\n", firmware_ver);
}
- ret = wilc_init_fw_config(dev, vif);
- if (ret < 0) {
+ ret = wilc_init_fw_config(dev, vif);
+ if (ret) {
netdev_err(dev, "Failed to configure firmware\n");
- ret = -EIO;
goto fail_fw_start;
}
wl->initialized = true;
@@ -603,11 +589,11 @@ static int wilc_mac_open(struct net_device *ndev)
netdev_dbg(ndev, "MAC OPEN[%p]\n", ndev);
ret = wilc_init_host_int(ndev);
- if (ret < 0)
+ if (ret)
return ret;
ret = wilc_wlan_initialize(ndev, vif);
- if (ret < 0) {
+ if (ret) {
wilc_deinit_host_int(ndev);
return ret;
}
@@ -840,7 +826,7 @@ static const struct net_device_ops wilc_netdev_ops = {
void wilc_netdev_cleanup(struct wilc *wilc)
{
struct wilc_vif *vif;
- int srcu_idx;
+ int srcu_idx, ifc_cnt = 0;
if (!wilc)
return;
@@ -861,7 +847,7 @@ void wilc_netdev_cleanup(struct wilc *wilc)
flush_workqueue(wilc->hif_workqueue);
destroy_workqueue(wilc->hif_workqueue);
- do {
+ while (ifc_cnt < WILC_NUM_CONCURRENT_IFC) {
mutex_lock(&wilc->vif_mutex);
if (wilc->vif_num <= 0) {
mutex_unlock(&wilc->vif_mutex);
@@ -874,7 +860,8 @@ void wilc_netdev_cleanup(struct wilc *wilc)
wilc->vif_num--;
mutex_unlock(&wilc->vif_mutex);
synchronize_srcu(&wilc->srcu);
- } while (1);
+ ifc_cnt++;
+ }
wilc_wlan_cfg_deinit(wilc);
wlan_deinit_locks(wilc);
diff --git a/drivers/staging/wilc1000/netdev.h b/drivers/staging/wilc1000/netdev.h
index cd8f0d72caaa..d5f7a6037fbc 100644
--- a/drivers/staging/wilc1000/netdev.h
+++ b/drivers/staging/wilc1000/netdev.h
@@ -21,7 +21,6 @@
#define FLOW_CONTROL_LOWER_THRESHOLD 128
#define FLOW_CONTROL_UPPER_THRESHOLD 256
-#define WILC_MAX_NUM_PMKIDS 16
#define PMKID_FOUND 1
#define NUM_STA_ASSOCIATED 8
diff --git a/drivers/staging/wilc1000/sdio.c b/drivers/staging/wilc1000/sdio.c
index 319e039380b0..ca99335687c4 100644
--- a/drivers/staging/wilc1000/sdio.c
+++ b/drivers/staging/wilc1000/sdio.c
@@ -273,7 +273,7 @@ static int wilc_sdio_set_func0_csa_address(struct wilc *wilc, u32 adr)
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
dev_err(&func->dev, "Failed cmd52, set 0x10c data...\n");
- goto fail;
+ return ret;
}
cmd.address = 0x10d;
@@ -281,7 +281,7 @@ static int wilc_sdio_set_func0_csa_address(struct wilc *wilc, u32 adr)
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
dev_err(&func->dev, "Failed cmd52, set 0x10d data...\n");
- goto fail;
+ return ret;
}
cmd.address = 0x10e;
@@ -289,11 +289,9 @@ static int wilc_sdio_set_func0_csa_address(struct wilc *wilc, u32 adr)
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
dev_err(&func->dev, "Failed cmd52, set 0x10e data...\n");
- goto fail;
+ return ret;
}
- return 1;
-fail:
return 0;
}
@@ -311,7 +309,7 @@ static int wilc_sdio_set_func0_block_size(struct wilc *wilc, u32 block_size)
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
dev_err(&func->dev, "Failed cmd52, set 0x10 data...\n");
- goto fail;
+ return ret;
}
cmd.address = 0x11;
@@ -319,11 +317,9 @@ static int wilc_sdio_set_func0_block_size(struct wilc *wilc, u32 block_size)
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
dev_err(&func->dev, "Failed cmd52, set 0x11 data...\n");
- goto fail;
+ return ret;
}
- return 1;
-fail:
return 0;
}
@@ -347,18 +343,16 @@ static int wilc_sdio_set_func1_block_size(struct wilc *wilc, u32 block_size)
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
dev_err(&func->dev, "Failed cmd52, set 0x110 data...\n");
- goto fail;
+ return ret;
}
cmd.address = 0x111;
cmd.data = (u8)(block_size >> 8);
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
dev_err(&func->dev, "Failed cmd52, set 0x111 data...\n");
- goto fail;
+ return ret;
}
- return 1;
-fail:
return 0;
}
@@ -384,19 +378,18 @@ static int wilc_sdio_write_reg(struct wilc *wilc, u32 addr, u32 data)
cmd.address = addr;
cmd.data = data;
ret = wilc_sdio_cmd52(wilc, &cmd);
- if (ret) {
+ if (ret)
dev_err(&func->dev,
"Failed cmd 52, read reg (%08x) ...\n", addr);
- goto fail;
- }
} else {
struct sdio_cmd53 cmd;
/**
* set the AHB address
**/
- if (!wilc_sdio_set_func0_csa_address(wilc, addr))
- goto fail;
+ ret = wilc_sdio_set_func0_csa_address(wilc, addr);
+ if (ret)
+ return ret;
cmd.read_write = 1;
cmd.function = 0;
@@ -407,18 +400,12 @@ static int wilc_sdio_write_reg(struct wilc *wilc, u32 addr, u32 data)
cmd.buffer = (u8 *)&data;
cmd.block_size = sdio_priv->block_size;
ret = wilc_sdio_cmd53(wilc, &cmd);
- if (ret) {
+ if (ret)
dev_err(&func->dev,
"Failed cmd53, write reg (%08x)...\n", addr);
- goto fail;
- }
}
- return 1;
-
-fail:
-
- return 0;
+ return ret;
}
static int wilc_sdio_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
@@ -470,14 +457,15 @@ static int wilc_sdio_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
cmd.buffer = buf;
cmd.block_size = block_size;
if (addr > 0) {
- if (!wilc_sdio_set_func0_csa_address(wilc, addr))
- goto fail;
+ ret = wilc_sdio_set_func0_csa_address(wilc, addr);
+ if (ret)
+ return ret;
}
ret = wilc_sdio_cmd53(wilc, &cmd);
if (ret) {
dev_err(&func->dev,
"Failed cmd53 [%x], block send...\n", addr);
- goto fail;
+ return ret;
}
if (addr > 0)
addr += nblk * block_size;
@@ -493,21 +481,18 @@ static int wilc_sdio_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
cmd.block_size = block_size;
if (addr > 0) {
- if (!wilc_sdio_set_func0_csa_address(wilc, addr))
- goto fail;
+ ret = wilc_sdio_set_func0_csa_address(wilc, addr);
+ if (ret)
+ return ret;
}
ret = wilc_sdio_cmd53(wilc, &cmd);
if (ret) {
dev_err(&func->dev,
"Failed cmd53 [%x], bytes send...\n", addr);
- goto fail;
+ return ret;
}
}
- return 1;
-
-fail:
-
return 0;
}
@@ -528,14 +513,15 @@ static int wilc_sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data)
if (ret) {
dev_err(&func->dev,
"Failed cmd 52, read reg (%08x) ...\n", addr);
- goto fail;
+ return ret;
}
*data = cmd.data;
} else {
struct sdio_cmd53 cmd;
- if (!wilc_sdio_set_func0_csa_address(wilc, addr))
- goto fail;
+ ret = wilc_sdio_set_func0_csa_address(wilc, addr);
+ if (ret)
+ return ret;
cmd.read_write = 0;
cmd.function = 0;
@@ -550,16 +536,11 @@ static int wilc_sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data)
if (ret) {
dev_err(&func->dev,
"Failed cmd53, read reg (%08x)...\n", addr);
- goto fail;
+ return ret;
}
}
le32_to_cpus(data);
-
- return 1;
-
-fail:
-
return 0;
}
@@ -612,14 +593,15 @@ static int wilc_sdio_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
cmd.buffer = buf;
cmd.block_size = block_size;
if (addr > 0) {
- if (!wilc_sdio_set_func0_csa_address(wilc, addr))
- goto fail;
+ ret = wilc_sdio_set_func0_csa_address(wilc, addr);
+ if (ret)
+ return ret;
}
ret = wilc_sdio_cmd53(wilc, &cmd);
if (ret) {
dev_err(&func->dev,
"Failed cmd53 [%x], block read...\n", addr);
- goto fail;
+ return ret;
}
if (addr > 0)
addr += nblk * block_size;
@@ -635,21 +617,18 @@ static int wilc_sdio_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
cmd.block_size = block_size;
if (addr > 0) {
- if (!wilc_sdio_set_func0_csa_address(wilc, addr))
- goto fail;
+ ret = wilc_sdio_set_func0_csa_address(wilc, addr);
+ if (ret)
+ return ret;
}
ret = wilc_sdio_cmd53(wilc, &cmd);
if (ret) {
dev_err(&func->dev,
"Failed cmd53 [%x], bytes read...\n", addr);
- goto fail;
+ return ret;
}
}
- return 1;
-
-fail:
-
return 0;
}
@@ -661,7 +640,7 @@ fail:
static int wilc_sdio_deinit(struct wilc *wilc)
{
- return 1;
+ return 0;
}
static int wilc_sdio_init(struct wilc *wilc, bool resume)
@@ -686,15 +665,16 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume)
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
dev_err(&func->dev, "Fail cmd 52, enable csa...\n");
- goto fail;
+ return ret;
}
/**
* function 0 block size
**/
- if (!wilc_sdio_set_func0_block_size(wilc, WILC_SDIO_BLOCK_SIZE)) {
+ ret = wilc_sdio_set_func0_block_size(wilc, WILC_SDIO_BLOCK_SIZE);
+ if (ret) {
dev_err(&func->dev, "Fail cmd 52, set func 0 block size...\n");
- goto fail;
+ return ret;
}
sdio_priv->block_size = WILC_SDIO_BLOCK_SIZE;
@@ -710,7 +690,7 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume)
if (ret) {
dev_err(&func->dev,
"Fail cmd 52, set IOE register...\n");
- goto fail;
+ return ret;
}
/**
@@ -727,7 +707,7 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume)
if (ret) {
dev_err(&func->dev,
"Fail cmd 52, get IOR register...\n");
- goto fail;
+ return ret;
}
if (cmd.data == 0x2)
break;
@@ -735,15 +715,16 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume)
if (loop <= 0) {
dev_err(&func->dev, "Fail func 1 is not ready...\n");
- goto fail;
+ return -EINVAL;
}
/**
* func 1 is ready, set func 1 block size
**/
- if (!wilc_sdio_set_func1_block_size(wilc, WILC_SDIO_BLOCK_SIZE)) {
+ ret = wilc_sdio_set_func1_block_size(wilc, WILC_SDIO_BLOCK_SIZE);
+ if (ret) {
dev_err(&func->dev, "Fail set func 1 block size...\n");
- goto fail;
+ return ret;
}
/**
@@ -757,16 +738,17 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume)
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
dev_err(&func->dev, "Fail cmd 52, set IEN register...\n");
- goto fail;
+ return ret;
}
/**
* make sure can read back chip id correctly
**/
if (!resume) {
- if (!wilc_sdio_read_reg(wilc, 0x1000, &chipid)) {
+ ret = wilc_sdio_read_reg(wilc, 0x1000, &chipid);
+ if (ret) {
dev_err(&func->dev, "Fail cmd read chip id...\n");
- goto fail;
+ return ret;
}
dev_err(&func->dev, "chipid (%08x)\n", chipid);
if ((chipid & 0xfff) > 0x2a0)
@@ -777,10 +759,6 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume)
sdio_priv->has_thrpt_enh3);
}
- return 1;
-
-fail:
-
return 0;
}
@@ -806,7 +784,7 @@ static int wilc_sdio_read_size(struct wilc *wilc, u32 *size)
tmp |= (cmd.data << 8);
*size = tmp;
- return 1;
+ return 0;
}
static int wilc_sdio_read_int(struct wilc *wilc, u32 *int_status)
@@ -865,7 +843,7 @@ static int wilc_sdio_read_int(struct wilc *wilc, u32 *int_status)
*int_status = tmp;
- return 1;
+ return 0;
}
static int wilc_sdio_clear_int_ext(struct wilc *wilc, u32 val)
@@ -909,10 +887,10 @@ static int wilc_sdio_clear_int_ext(struct wilc *wilc, u32 val)
dev_err(&func->dev,
"Failed cmd52, set 0xf8 data (%d) ...\n",
__LINE__);
- goto fail;
+ return ret;
}
}
- return 1;
+ return 0;
}
if (sdio_priv->irq_gpio) {
/* has_thrpt_enh2 uses register 0xf8 to clear interrupts. */
@@ -926,7 +904,6 @@ static int wilc_sdio_clear_int_ext(struct wilc *wilc, u32 val)
if (flags) {
int i;
- ret = 1;
for (i = 0; i < sdio_priv->nint; i++) {
if (flags & 1) {
struct sdio_cmd52 cmd;
@@ -942,15 +919,12 @@ static int wilc_sdio_clear_int_ext(struct wilc *wilc, u32 val)
dev_err(&func->dev,
"Failed cmd52, set 0xf8 data (%d) ...\n",
__LINE__);
- goto fail;
+ return ret;
}
}
- if (!ret)
- break;
flags >>= 1;
}
- if (!ret)
- goto fail;
+
for (i = sdio_priv->nint; i < MAX_NUM_INT; i++) {
if (flags & 1)
dev_err(&func->dev,
@@ -985,11 +959,9 @@ static int wilc_sdio_clear_int_ext(struct wilc *wilc, u32 val)
dev_err(&func->dev,
"Failed cmd52, set 0xf6 data (%d) ...\n",
__LINE__);
- goto fail;
+ return ret;
}
}
- return 1;
-fail:
return 0;
}
@@ -1001,12 +973,12 @@ static int wilc_sdio_sync_ext(struct wilc *wilc, int nint)
if (nint > MAX_NUM_INT) {
dev_err(&func->dev, "Too many interrupts (%d)...\n", nint);
- return 0;
+ return -EINVAL;
}
if (nint > MAX_NUN_INT_THRPT_ENH2) {
dev_err(&func->dev,
"Cannot support more than 5 interrupts when has_thrpt_enh2=1.\n");
- return 0;
+ return -EINVAL;
}
sdio_priv->nint = nint;
@@ -1014,15 +986,15 @@ static int wilc_sdio_sync_ext(struct wilc *wilc, int nint)
/**
* Disable power sequencer
**/
- if (!wilc_sdio_read_reg(wilc, WILC_MISC, &reg)) {
+ if (wilc_sdio_read_reg(wilc, WILC_MISC, &reg)) {
dev_err(&func->dev, "Failed read misc reg...\n");
- return 0;
+ return -EINVAL;
}
reg &= ~BIT(8);
- if (!wilc_sdio_write_reg(wilc, WILC_MISC, reg)) {
+ if (wilc_sdio_write_reg(wilc, WILC_MISC, reg)) {
dev_err(&func->dev, "Failed write misc reg...\n");
- return 0;
+ return -EINVAL;
}
if (sdio_priv->irq_gpio) {
@@ -1033,59 +1005,59 @@ static int wilc_sdio_sync_ext(struct wilc *wilc, int nint)
* interrupt pin mux select
**/
ret = wilc_sdio_read_reg(wilc, WILC_PIN_MUX_0, &reg);
- if (!ret) {
+ if (ret) {
dev_err(&func->dev, "Failed read reg (%08x)...\n",
WILC_PIN_MUX_0);
- return 0;
+ return ret;
}
reg |= BIT(8);
ret = wilc_sdio_write_reg(wilc, WILC_PIN_MUX_0, reg);
- if (!ret) {
+ if (ret) {
dev_err(&func->dev, "Failed write reg (%08x)...\n",
WILC_PIN_MUX_0);
- return 0;
+ return ret;
}
/**
* interrupt enable
**/
ret = wilc_sdio_read_reg(wilc, WILC_INTR_ENABLE, &reg);
- if (!ret) {
+ if (ret) {
dev_err(&func->dev, "Failed read reg (%08x)...\n",
WILC_INTR_ENABLE);
- return 0;
+ return ret;
}
for (i = 0; (i < 5) && (nint > 0); i++, nint--)
reg |= BIT((27 + i));
ret = wilc_sdio_write_reg(wilc, WILC_INTR_ENABLE, reg);
- if (!ret) {
+ if (ret) {
dev_err(&func->dev, "Failed write reg (%08x)...\n",
WILC_INTR_ENABLE);
- return 0;
+ return ret;
}
if (nint) {
ret = wilc_sdio_read_reg(wilc, WILC_INTR2_ENABLE, &reg);
- if (!ret) {
+ if (ret) {
dev_err(&func->dev,
"Failed read reg (%08x)...\n",
WILC_INTR2_ENABLE);
- return 0;
+ return ret;
}
for (i = 0; (i < 3) && (nint > 0); i++, nint--)
reg |= BIT(i);
ret = wilc_sdio_read_reg(wilc, WILC_INTR2_ENABLE, &reg);
- if (!ret) {
+ if (ret) {
dev_err(&func->dev,
"Failed write reg (%08x)...\n",
WILC_INTR2_ENABLE);
- return 0;
+ return ret;
}
}
}
- return 1;
+ return 0;
}
/* Global sdio HIF function table */
diff --git a/drivers/staging/wilc1000/spi.c b/drivers/staging/wilc1000/spi.c
index 55f8757325f0..3ffc7b4fddf6 100644
--- a/drivers/staging/wilc1000/spi.c
+++ b/drivers/staging/wilc1000/spi.c
@@ -13,7 +13,6 @@
struct wilc_spi {
int crc_off;
int nint;
- int has_thrpt_enh;
};
static const struct wilc_hif_func wilc_hif_spi;
@@ -89,11 +88,6 @@ static u8 crc7(u8 crc, const u8 *buffer, u32 len)
#define CMD_SINGLE_READ 0xca
#define CMD_RESET 0xcf
-#define N_OK 1
-#define N_FAIL 0
-#define N_RESET -1
-#define N_RETRY -2
-
#define DATA_PKT_SZ_256 256
#define DATA_PKT_SZ_512 512
#define DATA_PKT_SZ_1K 1024
@@ -300,7 +294,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
u32 len2;
u8 rsp;
int len = 0;
- int result = N_OK;
+ int result = 0;
int retry;
u8 crc[2];
@@ -388,11 +382,11 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
break;
default:
- result = N_FAIL;
+ result = -EINVAL;
break;
}
- if (result != N_OK)
+ if (result)
return result;
if (!spi_priv->crc_off)
@@ -425,7 +419,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
if (len2 > ARRAY_SIZE(wb)) {
dev_err(&spi->dev, "spi buffer size too small (%d) (%zu)\n",
len2, ARRAY_SIZE(wb));
- return N_FAIL;
+ return -EINVAL;
}
/* zero spi write buffers. */
for (wix = len; wix < len2; wix++)
@@ -434,7 +428,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
if (wilc_spi_tx_rx(wilc, wb, rb, len2)) {
dev_err(&spi->dev, "Failed cmd write, bus error...\n");
- return N_FAIL;
+ return -EINVAL;
}
/*
@@ -449,7 +443,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
dev_err(&spi->dev,
"Failed cmd response, cmd (%02x), resp (%02x)\n",
cmd, rsp);
- return N_FAIL;
+ return -EINVAL;
}
/*
@@ -459,7 +453,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
if (rsp != 0x00) {
dev_err(&spi->dev, "Failed cmd state response state (%02x)\n",
rsp);
- return N_FAIL;
+ return -EINVAL;
}
if (cmd == CMD_INTERNAL_READ || cmd == CMD_SINGLE_READ ||
@@ -486,7 +480,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
if (retry <= 0) {
dev_err(&spi->dev,
"Error, data read response (%02x)\n", rsp);
- return N_RESET;
+ return -EAGAIN;
}
}
@@ -502,7 +496,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
} else {
dev_err(&spi->dev,
"buffer overrun when reading data.\n");
- return N_FAIL;
+ return -EINVAL;
}
if (!spi_priv->crc_off) {
@@ -515,7 +509,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
} else {
dev_err(&spi->dev,
"buffer overrun when reading crc.\n");
- return N_FAIL;
+ return -EINVAL;
}
}
} else if ((cmd == CMD_DMA_READ) || (cmd == CMD_DMA_EXT_READ)) {
@@ -541,7 +535,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
if (wilc_spi_rx(wilc, &b[ix], nbytes)) {
dev_err(&spi->dev,
"Failed block read, bus err\n");
- return N_FAIL;
+ return -EINVAL;
}
/*
@@ -550,7 +544,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
if (!spi_priv->crc_off && wilc_spi_rx(wilc, crc, 2)) {
dev_err(&spi->dev,
"Failed block crc read, bus err\n");
- return N_FAIL;
+ return -EINVAL;
}
ix += nbytes;
@@ -582,14 +576,14 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
if (wilc_spi_rx(wilc, &rsp, 1)) {
dev_err(&spi->dev,
"Failed resp read, bus err\n");
- result = N_FAIL;
+ result = -EINVAL;
break;
}
if (((rsp >> 4) & 0xf) == 0xf)
break;
} while (retry--);
- if (result == N_FAIL)
+ if (result)
break;
/*
@@ -598,7 +592,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
if (wilc_spi_rx(wilc, &b[ix], nbytes)) {
dev_err(&spi->dev,
"Failed block read, bus err\n");
- result = N_FAIL;
+ result = -EINVAL;
break;
}
@@ -608,7 +602,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
if (!spi_priv->crc_off && wilc_spi_rx(wilc, crc, 2)) {
dev_err(&spi->dev,
"Failed block crc read, bus err\n");
- result = N_FAIL;
+ result = -EINVAL;
break;
}
@@ -624,7 +618,7 @@ static int spi_data_write(struct wilc *wilc, u8 *b, u32 sz)
struct spi_device *spi = to_spi_device(wilc->dev);
struct wilc_spi *spi_priv = wilc->bus_data;
int ix, nbytes;
- int result = 1;
+ int result = 0;
u8 cmd, order, crc[2] = {0};
/*
@@ -652,7 +646,7 @@ static int spi_data_write(struct wilc *wilc, u8 *b, u32 sz)
if (wilc_spi_tx(wilc, &cmd, 1)) {
dev_err(&spi->dev,
"Failed data block cmd write, bus error...\n");
- result = N_FAIL;
+ result = -EINVAL;
break;
}
@@ -662,7 +656,7 @@ static int spi_data_write(struct wilc *wilc, u8 *b, u32 sz)
if (wilc_spi_tx(wilc, &b[ix], nbytes)) {
dev_err(&spi->dev,
"Failed data block write, bus error...\n");
- result = N_FAIL;
+ result = -EINVAL;
break;
}
@@ -672,7 +666,7 @@ static int spi_data_write(struct wilc *wilc, u8 *b, u32 sz)
if (!spi_priv->crc_off) {
if (wilc_spi_tx(wilc, crc, 2)) {
dev_err(&spi->dev, "Failed data block crc write, bus error...\n");
- result = N_FAIL;
+ result = -EINVAL;
break;
}
}
@@ -701,7 +695,7 @@ static int spi_internal_write(struct wilc *wilc, u32 adr, u32 dat)
cpu_to_le32s(&dat);
result = spi_cmd_complete(wilc, CMD_INTERNAL_WRITE, adr, (u8 *)&dat, 4,
0);
- if (result != N_OK)
+ if (result)
dev_err(&spi->dev, "Failed internal write cmd...\n");
return result;
@@ -714,14 +708,14 @@ static int spi_internal_read(struct wilc *wilc, u32 adr, u32 *data)
result = spi_cmd_complete(wilc, CMD_INTERNAL_READ, adr, (u8 *)data, 4,
0);
- if (result != N_OK) {
+ if (result) {
dev_err(&spi->dev, "Failed internal read cmd...\n");
- return 0;
+ return result;
}
le32_to_cpus(data);
- return 1;
+ return result;
}
/********************************************
@@ -733,7 +727,7 @@ static int spi_internal_read(struct wilc *wilc, u32 adr, u32 *data)
static int wilc_spi_write_reg(struct wilc *wilc, u32 addr, u32 data)
{
struct spi_device *spi = to_spi_device(wilc->dev);
- int result = N_OK;
+ int result;
u8 cmd = CMD_SINGLE_WRITE;
u8 clockless = 0;
@@ -745,7 +739,7 @@ static int wilc_spi_write_reg(struct wilc *wilc, u32 addr, u32 data)
}
result = spi_cmd_complete(wilc, cmd, addr, (u8 *)&data, 4, clockless);
- if (result != N_OK)
+ if (result)
dev_err(&spi->dev, "Failed cmd, write reg (%08x)...\n", addr);
return result;
@@ -760,29 +754,29 @@ static int wilc_spi_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
* has to be greated than 4
*/
if (size <= 4)
- return 0;
+ return -EINVAL;
result = spi_cmd_complete(wilc, CMD_DMA_EXT_WRITE, addr, NULL, size, 0);
- if (result != N_OK) {
+ if (result) {
dev_err(&spi->dev,
"Failed cmd, write block (%08x)...\n", addr);
- return 0;
+ return result;
}
/*
* Data
*/
result = spi_data_write(wilc, buf, size);
- if (result != N_OK)
+ if (result)
dev_err(&spi->dev, "Failed block data write...\n");
- return 1;
+ return result;
}
static int wilc_spi_read_reg(struct wilc *wilc, u32 addr, u32 *data)
{
struct spi_device *spi = to_spi_device(wilc->dev);
- int result = N_OK;
+ int result;
u8 cmd = CMD_SINGLE_READ;
u8 clockless = 0;
@@ -793,14 +787,14 @@ static int wilc_spi_read_reg(struct wilc *wilc, u32 addr, u32 *data)
}
result = spi_cmd_complete(wilc, cmd, addr, (u8 *)data, 4, clockless);
- if (result != N_OK) {
+ if (result) {
dev_err(&spi->dev, "Failed cmd, read reg (%08x)...\n", addr);
- return 0;
+ return result;
}
le32_to_cpus(data);
- return 1;
+ return 0;
}
static int wilc_spi_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
@@ -809,15 +803,13 @@ static int wilc_spi_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
int result;
if (size <= 4)
- return 0;
+ return -EINVAL;
result = spi_cmd_complete(wilc, CMD_DMA_EXT_READ, addr, buf, size, 0);
- if (result != N_OK) {
+ if (result)
dev_err(&spi->dev, "Failed cmd, read block (%08x)...\n", addr);
- return 0;
- }
- return 1;
+ return result;
}
/********************************************
@@ -831,7 +823,7 @@ static int wilc_spi_deinit(struct wilc *wilc)
/*
* TODO:
*/
- return 1;
+ return 0;
}
static int wilc_spi_init(struct wilc *wilc, bool resume)
@@ -841,13 +833,14 @@ static int wilc_spi_init(struct wilc *wilc, bool resume)
u32 reg;
u32 chipid;
static int isinit;
+ int ret;
if (isinit) {
- if (!wilc_spi_read_reg(wilc, 0x1000, &chipid)) {
+ ret = wilc_spi_read_reg(wilc, 0x1000, &chipid);
+ if (ret)
dev_err(&spi->dev, "Fail cmd read chip id...\n");
- return 0;
- }
- return 1;
+
+ return ret;
}
/*
@@ -859,7 +852,8 @@ static int wilc_spi_init(struct wilc *wilc, bool resume)
* way to reset
*/
/* the SPI to it's initial value. */
- if (!spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, &reg)) {
+ ret = spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, &reg);
+ if (ret) {
/*
* Read failed. Try with CRC off. This might happen when module
* is removed but chip isn't reset
@@ -867,24 +861,26 @@ static int wilc_spi_init(struct wilc *wilc, bool resume)
spi_priv->crc_off = 1;
dev_err(&spi->dev,
"Failed read with CRC on, retrying with CRC off\n");
- if (!spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, &reg)) {
+ ret = spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, &reg);
+ if (ret) {
/*
* Read failed with both CRC on and off,
* something went bad
*/
dev_err(&spi->dev, "Failed internal read protocol\n");
- return 0;
+ return ret;
}
}
if (spi_priv->crc_off == 0) {
reg &= ~0xc; /* disable crc checking */
reg &= ~0x70;
reg |= (0x5 << 4);
- if (!spi_internal_write(wilc, WILC_SPI_PROTOCOL_OFFSET, reg)) {
+ ret = spi_internal_write(wilc, WILC_SPI_PROTOCOL_OFFSET, reg);
+ if (ret) {
dev_err(&spi->dev,
"[wilc spi %d]: Failed internal write reg\n",
__LINE__);
- return 0;
+ return ret;
}
spi_priv->crc_off = 1;
}
@@ -892,168 +888,35 @@ static int wilc_spi_init(struct wilc *wilc, bool resume)
/*
* make sure can read back chip id correctly
*/
- if (!wilc_spi_read_reg(wilc, 0x1000, &chipid)) {
+ ret = wilc_spi_read_reg(wilc, 0x1000, &chipid);
+ if (ret) {
dev_err(&spi->dev, "Fail cmd read chip id...\n");
- return 0;
+ return ret;
}
- spi_priv->has_thrpt_enh = 1;
-
isinit = 1;
- return 1;
+ return 0;
}
static int wilc_spi_read_size(struct wilc *wilc, u32 *size)
{
- struct spi_device *spi = to_spi_device(wilc->dev);
- struct wilc_spi *spi_priv = wilc->bus_data;
int ret;
- if (spi_priv->has_thrpt_enh) {
- ret = spi_internal_read(wilc, 0xe840 - WILC_SPI_REG_BASE,
- size);
- *size = *size & IRQ_DMA_WD_CNT_MASK;
- } else {
- u32 tmp;
- u32 byte_cnt;
-
- ret = wilc_spi_read_reg(wilc, WILC_VMM_TO_HOST_SIZE,
- &byte_cnt);
- if (!ret) {
- dev_err(&spi->dev,
- "Failed read WILC_VMM_TO_HOST_SIZE ...\n");
- return ret;
- }
- tmp = (byte_cnt >> 2) & IRQ_DMA_WD_CNT_MASK;
- *size = tmp;
- }
+ ret = spi_internal_read(wilc, 0xe840 - WILC_SPI_REG_BASE, size);
+ *size = *size & IRQ_DMA_WD_CNT_MASK;
return ret;
}
static int wilc_spi_read_int(struct wilc *wilc, u32 *int_status)
{
- struct spi_device *spi = to_spi_device(wilc->dev);
- struct wilc_spi *spi_priv = wilc->bus_data;
- int ret;
- u32 tmp;
- u32 byte_cnt;
- bool unexpected_irq;
- int j;
- u32 unknown_mask;
- u32 irq_flags;
- int k = IRG_FLAGS_OFFSET + 5;
-
- if (spi_priv->has_thrpt_enh)
- return spi_internal_read(wilc, 0xe840 - WILC_SPI_REG_BASE,
- int_status);
- ret = wilc_spi_read_reg(wilc, WILC_VMM_TO_HOST_SIZE, &byte_cnt);
- if (!ret) {
- dev_err(&spi->dev,
- "Failed read WILC_VMM_TO_HOST_SIZE ...\n");
- return ret;
- }
- tmp = (byte_cnt >> 2) & IRQ_DMA_WD_CNT_MASK;
-
- j = 0;
- do {
- wilc_spi_read_reg(wilc, 0x1a90, &irq_flags);
- tmp |= ((irq_flags >> 27) << IRG_FLAGS_OFFSET);
-
- if (spi_priv->nint > 5) {
- wilc_spi_read_reg(wilc, 0x1a94, &irq_flags);
- tmp |= (((irq_flags >> 0) & 0x7) << k);
- }
-
- unknown_mask = ~((1ul << spi_priv->nint) - 1);
-
- unexpected_irq = (tmp >> IRG_FLAGS_OFFSET) & unknown_mask;
- if (unexpected_irq) {
- dev_err(&spi->dev,
- "Unexpected interrupt(2):j=%d,tmp=%x,mask=%x\n",
- j, tmp, unknown_mask);
- }
-
- j++;
- } while (unexpected_irq);
-
- *int_status = tmp;
-
- return ret;
+ return spi_internal_read(wilc, 0xe840 - WILC_SPI_REG_BASE, int_status);
}
static int wilc_spi_clear_int_ext(struct wilc *wilc, u32 val)
{
- struct spi_device *spi = to_spi_device(wilc->dev);
- struct wilc_spi *spi_priv = wilc->bus_data;
- int ret;
- u32 flags;
- u32 tbl_ctl;
-
- if (spi_priv->has_thrpt_enh) {
- return spi_internal_write(wilc, 0xe844 - WILC_SPI_REG_BASE,
- val);
- }
-
- flags = val & (BIT(MAX_NUM_INT) - 1);
- if (flags) {
- int i;
-
- ret = 1;
- for (i = 0; i < spi_priv->nint; i++) {
- /*
- * No matter what you write 1 or 0,
- * it will clear interrupt.
- */
- if (flags & 1)
- ret = wilc_spi_write_reg(wilc,
- 0x10c8 + i * 4, 1);
- if (!ret)
- break;
- flags >>= 1;
- }
- if (!ret) {
- dev_err(&spi->dev,
- "Failed wilc_spi_write_reg, set reg %x ...\n",
- 0x10c8 + i * 4);
- return ret;
- }
- for (i = spi_priv->nint; i < MAX_NUM_INT; i++) {
- if (flags & 1)
- dev_err(&spi->dev,
- "Unexpected interrupt cleared %d...\n",
- i);
- flags >>= 1;
- }
- }
-
- tbl_ctl = 0;
- /* select VMM table 0 */
- if (val & SEL_VMM_TBL0)
- tbl_ctl |= BIT(0);
- /* select VMM table 1 */
- if (val & SEL_VMM_TBL1)
- tbl_ctl |= BIT(1);
-
- ret = wilc_spi_write_reg(wilc, WILC_VMM_TBL_CTL, tbl_ctl);
- if (!ret) {
- dev_err(&spi->dev, "fail write reg vmm_tbl_ctl...\n");
- return ret;
- }
-
- if (val & EN_VMM) {
- /*
- * enable vmm transfer.
- */
- ret = wilc_spi_write_reg(wilc, WILC_VMM_CORE_CTL, 1);
- if (!ret) {
- dev_err(&spi->dev, "fail write reg vmm_core_ctl...\n");
- return ret;
- }
- }
-
- return ret;
+ return spi_internal_write(wilc, 0xe844 - WILC_SPI_REG_BASE, val);
}
static int wilc_spi_sync_ext(struct wilc *wilc, int nint)
@@ -1065,7 +928,7 @@ static int wilc_spi_sync_ext(struct wilc *wilc, int nint)
if (nint > MAX_NUM_INT) {
dev_err(&spi->dev, "Too many interrupts (%d)...\n", nint);
- return 0;
+ return -EINVAL;
}
spi_priv->nint = nint;
@@ -1074,58 +937,58 @@ static int wilc_spi_sync_ext(struct wilc *wilc, int nint)
* interrupt pin mux select
*/
ret = wilc_spi_read_reg(wilc, WILC_PIN_MUX_0, &reg);
- if (!ret) {
+ if (ret) {
dev_err(&spi->dev, "Failed read reg (%08x)...\n",
WILC_PIN_MUX_0);
- return 0;
+ return ret;
}
reg |= BIT(8);
ret = wilc_spi_write_reg(wilc, WILC_PIN_MUX_0, reg);
- if (!ret) {
+ if (ret) {
dev_err(&spi->dev, "Failed write reg (%08x)...\n",
WILC_PIN_MUX_0);
- return 0;
+ return ret;
}
/*
* interrupt enable
*/
ret = wilc_spi_read_reg(wilc, WILC_INTR_ENABLE, &reg);
- if (!ret) {
+ if (ret) {
dev_err(&spi->dev, "Failed read reg (%08x)...\n",
WILC_INTR_ENABLE);
- return 0;
+ return ret;
}
for (i = 0; (i < 5) && (nint > 0); i++, nint--)
reg |= (BIT((27 + i)));
ret = wilc_spi_write_reg(wilc, WILC_INTR_ENABLE, reg);
- if (!ret) {
+ if (ret) {
dev_err(&spi->dev, "Failed write reg (%08x)...\n",
WILC_INTR_ENABLE);
- return 0;
+ return ret;
}
if (nint) {
ret = wilc_spi_read_reg(wilc, WILC_INTR2_ENABLE, &reg);
- if (!ret) {
+ if (ret) {
dev_err(&spi->dev, "Failed read reg (%08x)...\n",
WILC_INTR2_ENABLE);
- return 0;
+ return ret;
}
for (i = 0; (i < 3) && (nint > 0); i++, nint--)
reg |= BIT(i);
ret = wilc_spi_read_reg(wilc, WILC_INTR2_ENABLE, &reg);
- if (!ret) {
+ if (ret) {
dev_err(&spi->dev, "Failed write reg (%08x)...\n",
WILC_INTR2_ENABLE);
- return 0;
+ return ret;
}
}
- return 1;
+ return 0;
}
/* Global spi HIF function table */
diff --git a/drivers/staging/wilc1000/wlan.c b/drivers/staging/wilc1000/wlan.c
index d3de76126b78..601e4d1345d2 100644
--- a/drivers/staging/wilc1000/wlan.c
+++ b/drivers/staging/wilc1000/wlan.c
@@ -489,50 +489,44 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
struct wilc_vif *vif;
if (wilc->quit)
- goto out;
+ goto out_update_cnt;
mutex_lock(&wilc->txq_add_to_head_cs);
tqe = wilc_wlan_txq_get_first(wilc);
if (!tqe)
- goto out;
+ goto out_unlock;
dev = tqe->vif->ndev;
wilc_wlan_txq_filter_dup_tcp_ack(dev);
i = 0;
sum = 0;
- do {
- if (tqe && (i < (WILC_VMM_TBL_SIZE - 1))) {
- if (tqe->type == WILC_CFG_PKT)
- vmm_sz = ETH_CONFIG_PKT_HDR_OFFSET;
-
- else if (tqe->type == WILC_NET_PKT)
- vmm_sz = ETH_ETHERNET_HDR_OFFSET;
-
- else
- vmm_sz = HOST_HDR_OFFSET;
+ while (tqe && (i < (WILC_VMM_TBL_SIZE - 1))) {
+ if (tqe->type == WILC_CFG_PKT)
+ vmm_sz = ETH_CONFIG_PKT_HDR_OFFSET;
+ else if (tqe->type == WILC_NET_PKT)
+ vmm_sz = ETH_ETHERNET_HDR_OFFSET;
+ else
+ vmm_sz = HOST_HDR_OFFSET;
- vmm_sz += tqe->buffer_size;
+ vmm_sz += tqe->buffer_size;
- if (vmm_sz & 0x3)
- vmm_sz = (vmm_sz + 4) & ~0x3;
+ if (vmm_sz & 0x3)
+ vmm_sz = (vmm_sz + 4) & ~0x3;
- if ((sum + vmm_sz) > WILC_TX_BUFF_SIZE)
- break;
+ if ((sum + vmm_sz) > WILC_TX_BUFF_SIZE)
+ break;
- vmm_table[i] = vmm_sz / 4;
- if (tqe->type == WILC_CFG_PKT)
- vmm_table[i] |= BIT(10);
- cpu_to_le32s(&vmm_table[i]);
+ vmm_table[i] = vmm_sz / 4;
+ if (tqe->type == WILC_CFG_PKT)
+ vmm_table[i] |= BIT(10);
+ cpu_to_le32s(&vmm_table[i]);
- i++;
- sum += vmm_sz;
- tqe = wilc_wlan_txq_get_next(wilc, tqe);
- } else {
- break;
- }
- } while (1);
+ i++;
+ sum += vmm_sz;
+ tqe = wilc_wlan_txq_get_next(wilc, tqe);
+ }
if (i == 0)
- goto out;
+ goto out_unlock;
vmm_table[i] = 0x0;
acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP);
@@ -540,7 +534,7 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
func = wilc->hif_func;
do {
ret = func->hif_read_reg(wilc, WILC_HOST_TX_CTRL, &reg);
- if (!ret)
+ if (ret)
break;
if ((reg & 0x1) == 0)
@@ -554,7 +548,7 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
}
} while (!wilc->quit);
- if (!ret)
+ if (ret)
goto out_release_bus;
timeout = 200;
@@ -563,16 +557,16 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
WILC_VMM_TBL_RX_SHADOW_BASE,
(u8 *)vmm_table,
((i + 1) * 4));
- if (!ret)
+ if (ret)
break;
ret = func->hif_write_reg(wilc, WILC_HOST_VMM_CTL, 0x2);
- if (!ret)
+ if (ret)
break;
do {
ret = func->hif_read_reg(wilc, WILC_HOST_VMM_CTL, &reg);
- if (!ret)
+ if (ret)
break;
if ((reg >> 2) & 0x1) {
entries = ((reg >> 3) & 0x3f);
@@ -585,27 +579,27 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
break;
}
- if (!ret)
+ if (ret)
break;
if (entries == 0) {
ret = func->hif_read_reg(wilc, WILC_HOST_TX_CTRL, &reg);
- if (!ret)
+ if (ret)
break;
reg &= ~BIT(0);
ret = func->hif_write_reg(wilc, WILC_HOST_TX_CTRL, reg);
- if (!ret)
- break;
- break;
}
- break;
- } while (1);
+ } while (0);
- if (!ret)
+ if (ret)
goto out_release_bus;
if (entries == 0) {
- ret = -ENOBUFS;
+ /*
+ * No VMM space available in firmware so retry to transmit
+ * the packet from tx queue.
+ */
+ ret = WILC_VMM_ENTRY_FULL_RETRY;
goto out_release_bus;
}
@@ -664,7 +658,7 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP);
ret = func->hif_clear_int_ext(wilc, ENABLE_TX_VMM);
- if (!ret)
+ if (ret)
goto out_release_bus;
ret = func->hif_block_tx_ext(wilc, 0, txb, offset);
@@ -672,9 +666,10 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
out_release_bus:
release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
-out:
+out_unlock:
mutex_unlock(&wilc->txq_add_to_head_cs);
+out_update_cnt:
*txq_count = wilc->txq_entries;
return ret;
}
@@ -725,9 +720,7 @@ static void wilc_wlan_handle_rx_buff(struct wilc *wilc, u8 *buffer, int size)
}
}
offset += tp_len;
- if (offset >= size)
- break;
- } while (1);
+ } while (offset < size);
}
static void wilc_wlan_handle_rxq(struct wilc *wilc)
@@ -736,11 +729,7 @@ static void wilc_wlan_handle_rxq(struct wilc *wilc)
u8 *buffer;
struct rxq_entry_t *rqe;
- do {
- if (wilc->quit) {
- complete(&wilc->cfg_event);
- break;
- }
+ while (!wilc->quit) {
rqe = wilc_wlan_rxq_remove(wilc);
if (!rqe)
break;
@@ -750,7 +739,9 @@ static void wilc_wlan_handle_rxq(struct wilc *wilc)
wilc_wlan_handle_rx_buff(wilc, buffer, size);
kfree(rqe);
- } while (1);
+ }
+ if (wilc->quit)
+ complete(&wilc->cfg_event);
}
static void wilc_unknown_isr_ext(struct wilc *wilc)
@@ -785,7 +776,7 @@ static void wilc_wlan_handle_isr_ext(struct wilc *wilc, u32 int_status)
wilc->hif_func->hif_clear_int_ext(wilc, DATA_INT_CLR | ENABLE_RX_VMM);
ret = wilc->hif_func->hif_block_rx_ext(wilc, 0, buffer, size);
- if (!ret)
+ if (ret)
return;
offset += size;
@@ -846,7 +837,7 @@ int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
memcpy(dma_buffer, &buffer[offset], size2);
ret = wilc->hif_func->hif_block_tx(wilc, addr,
dma_buffer, size2);
- if (!ret)
+ if (ret)
break;
addr += size2;
@@ -855,17 +846,15 @@ int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
}
release_bus(wilc, WILC_BUS_RELEASE_ONLY);
- if (!ret) {
- ret = -EIO;
+ if (ret)
goto fail;
- }
} while (offset < buffer_size);
fail:
kfree(dma_buffer);
- return (ret < 0) ? ret : 0;
+ return ret;
}
int wilc_wlan_start(struct wilc *wilc)
@@ -882,49 +871,26 @@ int wilc_wlan_start(struct wilc *wilc)
}
acquire_bus(wilc, WILC_BUS_ACQUIRE_ONLY);
ret = wilc->hif_func->hif_write_reg(wilc, WILC_VMM_CORE_CFG, reg);
- if (!ret) {
+ if (ret) {
release_bus(wilc, WILC_BUS_RELEASE_ONLY);
- return -EIO;
+ return ret;
}
reg = 0;
if (wilc->io_type == WILC_HIF_SDIO && wilc->dev_irq_num)
reg |= WILC_HAVE_SDIO_IRQ_GPIO;
-#ifdef WILC_DISABLE_PMU
-#else
- reg |= WILC_HAVE_USE_PMU;
-#endif
-
-#ifdef WILC_SLEEP_CLK_SRC_XO
- reg |= WILC_HAVE_SLEEP_CLK_SRC_XO;
-#elif defined WILC_SLEEP_CLK_SRC_RTC
- reg |= WILC_HAVE_SLEEP_CLK_SRC_RTC;
-#endif
-
-#ifdef WILC_EXT_PA_INV_TX_RX
- reg |= WILC_HAVE_EXT_PA_INV_TX_RX;
-#endif
- reg |= WILC_HAVE_USE_IRQ_AS_HOST_WAKE;
- reg |= WILC_HAVE_LEGACY_RF_SETTINGS;
-#ifdef XTAL_24
- reg |= WILC_HAVE_XTAL_24;
-#endif
-#ifdef DISABLE_WILC_UART
- reg |= WILC_HAVE_DISABLE_WILC_UART;
-#endif
-
ret = wilc->hif_func->hif_write_reg(wilc, WILC_GP_REG_1, reg);
- if (!ret) {
+ if (ret) {
release_bus(wilc, WILC_BUS_RELEASE_ONLY);
- return -EIO;
+ return ret;
}
wilc->hif_func->hif_sync_ext(wilc, NUM_INT_EXT);
ret = wilc->hif_func->hif_read_reg(wilc, 0x1000, &chipid);
- if (!ret) {
+ if (ret) {
release_bus(wilc, WILC_BUS_RELEASE_ONLY);
- return -EIO;
+ return ret;
}
wilc->hif_func->hif_read_reg(wilc, WILC_GLB_RESET_0, &reg);
@@ -939,7 +905,7 @@ int wilc_wlan_start(struct wilc *wilc)
wilc->hif_func->hif_read_reg(wilc, WILC_GLB_RESET_0, &reg);
release_bus(wilc, WILC_BUS_RELEASE_ONLY);
- return (ret < 0) ? ret : 0;
+ return ret;
}
int wilc_wlan_stop(struct wilc *wilc, struct wilc_vif *vif)
@@ -950,33 +916,33 @@ int wilc_wlan_stop(struct wilc *wilc, struct wilc_vif *vif)
acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP);
ret = wilc->hif_func->hif_read_reg(wilc, WILC_GP_REG_0, &reg);
- if (!ret) {
+ if (ret) {
netdev_err(vif->ndev, "Error while reading reg\n");
release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
- return -EIO;
+ return ret;
}
ret = wilc->hif_func->hif_write_reg(wilc, WILC_GP_REG_0,
(reg | WILC_ABORT_REQ_BIT));
- if (!ret) {
+ if (ret) {
netdev_err(vif->ndev, "Error while writing reg\n");
release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
- return -EIO;
+ return ret;
}
ret = wilc->hif_func->hif_read_reg(wilc, WILC_FW_HOST_COMM, &reg);
- if (!ret) {
+ if (ret) {
netdev_err(vif->ndev, "Error while reading reg\n");
release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
- return -EIO;
+ return ret;
}
reg = BIT(0);
ret = wilc->hif_func->hif_write_reg(wilc, WILC_FW_HOST_COMM, reg);
- if (!ret) {
+ if (ret) {
netdev_err(vif->ndev, "Error while writing reg\n");
release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
- return -EIO;
+ return ret;
}
release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
@@ -992,21 +958,14 @@ void wilc_wlan_cleanup(struct net_device *dev)
struct wilc *wilc = vif->wilc;
wilc->quit = 1;
- do {
- tqe = wilc_wlan_txq_remove_from_head(dev);
- if (!tqe)
- break;
+ while ((tqe = wilc_wlan_txq_remove_from_head(dev))) {
if (tqe->tx_complete_func)
tqe->tx_complete_func(tqe->priv, 0);
kfree(tqe);
- } while (1);
+ }
- do {
- rqe = wilc_wlan_rxq_remove(wilc);
- if (!rqe)
- break;
+ while ((rqe = wilc_wlan_rxq_remove(wilc)))
kfree(rqe);
- } while (1);
kfree(wilc->rx_buffer);
wilc->rx_buffer = NULL;
@@ -1156,10 +1115,11 @@ int wilc_send_config_pkt(struct wilc_vif *vif, u8 mode, struct wid *wids,
return ret;
}
-static u32 init_chip(struct net_device *dev)
+static int init_chip(struct net_device *dev)
{
u32 chipid;
- u32 reg, ret = 0;
+ u32 reg;
+ int ret = 0;
struct wilc_vif *vif = netdev_priv(dev);
struct wilc *wilc = vif->wilc;
@@ -1169,18 +1129,18 @@ static u32 init_chip(struct net_device *dev)
if ((chipid & 0xfff) != 0xa0) {
ret = wilc->hif_func->hif_read_reg(wilc, 0x1118, &reg);
- if (!ret) {
+ if (ret) {
netdev_err(dev, "fail read reg 0x1118\n");
goto release;
}
reg |= BIT(0);
ret = wilc->hif_func->hif_write_reg(wilc, 0x1118, reg);
- if (!ret) {
+ if (ret) {
netdev_err(dev, "fail write reg 0x1118\n");
goto release;
}
ret = wilc->hif_func->hif_write_reg(wilc, 0xc0000, 0x71);
- if (!ret) {
+ if (ret) {
netdev_err(dev, "fail write reg 0xc0000\n");
goto release;
}
@@ -1230,7 +1190,7 @@ int wilc_wlan_init(struct net_device *dev)
wilc->quit = 0;
- if (!wilc->hif_func->hif_init(wilc, false)) {
+ if (wilc->hif_func->hif_init(wilc, false)) {
ret = -EIO;
goto fail;
}
@@ -1251,12 +1211,12 @@ int wilc_wlan_init(struct net_device *dev)
goto fail;
}
- if (!init_chip(dev)) {
+ if (init_chip(dev)) {
ret = -EIO;
goto fail;
}
- return 1;
+ return 0;
fail:
diff --git a/drivers/staging/wilc1000/wlan.h b/drivers/staging/wilc1000/wlan.h
index 1f6957cf2e9c..8c4634262adb 100644
--- a/drivers/staging/wilc1000/wlan.h
+++ b/drivers/staging/wilc1000/wlan.h
@@ -197,6 +197,8 @@
#define IS_MANAGMEMENT_CALLBACK 0x080
#define IS_MGMT_STATUS_SUCCES 0x040
+#define WILC_WID_TYPE GENMASK(15, 12)
+#define WILC_VMM_ENTRY_FULL_RETRY 1
/********************************************
*
* Tx/Rx Queue Structure
diff --git a/drivers/staging/wilc1000/wlan_cfg.c b/drivers/staging/wilc1000/wlan_cfg.c
index 6f6b286788d1..fe2a7ed8e5cd 100644
--- a/drivers/staging/wilc1000/wlan_cfg.c
+++ b/drivers/staging/wilc1000/wlan_cfg.c
@@ -4,6 +4,7 @@
* All rights reserved.
*/
+#include <linux/bitfield.h>
#include "wlan_if.h"
#include "wlan.h"
#include "wlan_cfg.h"
@@ -132,75 +133,54 @@ static int wilc_wlan_cfg_set_bin(u8 *frame, u32 offset, u16 id, u8 *b, u32 size)
*
********************************************/
-#define GET_WID_TYPE(wid) (((wid) >> 12) & 0x7)
static void wilc_wlan_parse_response_frame(struct wilc *wl, u8 *info, int size)
{
u16 wid;
u32 len = 0, i = 0;
+ struct wilc_cfg *cfg = &wl->cfg;
while (size > 0) {
i = 0;
wid = get_unaligned_le16(info);
- switch (GET_WID_TYPE(wid)) {
+ switch (FIELD_GET(WILC_WID_TYPE, wid)) {
case WID_CHAR:
- do {
- if (wl->cfg.b[i].id == WID_NIL)
- break;
-
- if (wl->cfg.b[i].id == wid) {
- wl->cfg.b[i].val = info[4];
- break;
- }
+ while (cfg->b[i].id != WID_NIL && cfg->b[i].id != wid)
i++;
- } while (1);
+
+ if (cfg->b[i].id == wid)
+ cfg->b[i].val = info[4];
+
len = 3;
break;
case WID_SHORT:
- do {
- struct wilc_cfg_hword *hw = &wl->cfg.hw[i];
+ while (cfg->hw[i].id != WID_NIL && cfg->hw[i].id != wid)
+ i++;
- if (hw->id == WID_NIL)
- break;
+ if (cfg->hw[i].id == wid)
+ cfg->hw[i].val = get_unaligned_le16(&info[4]);
- if (hw->id == wid) {
- hw->val = get_unaligned_le16(&info[4]);
- break;
- }
- i++;
- } while (1);
len = 4;
break;
case WID_INT:
- do {
- struct wilc_cfg_word *w = &wl->cfg.w[i];
+ while (cfg->w[i].id != WID_NIL && cfg->w[i].id != wid)
+ i++;
- if (w->id == WID_NIL)
- break;
+ if (cfg->w[i].id == wid)
+ cfg->w[i].val = get_unaligned_le32(&info[4]);
- if (w->id == wid) {
- w->val = get_unaligned_le32(&info[4]);
- break;
- }
- i++;
- } while (1);
len = 6;
break;
case WID_STR:
- do {
- if (wl->cfg.s[i].id == WID_NIL)
- break;
-
- if (wl->cfg.s[i].id == wid) {
- memcpy(wl->cfg.s[i].str, &info[2],
- (info[2] + 2));
- break;
- }
+ while (cfg->s[i].id != WID_NIL && cfg->s[i].id != wid)
i++;
- } while (1);
+
+ if (cfg->s[i].id == wid)
+ memcpy(cfg->s[i].str, &info[2], info[2] + 2);
+
len = 2 + info[2];
break;
@@ -223,16 +203,12 @@ static void wilc_wlan_parse_info_frame(struct wilc *wl, u8 *info)
if (len == 1 && wid == WID_STATUS) {
int i = 0;
- do {
- if (wl->cfg.b[i].id == WID_NIL)
- break;
-
- if (wl->cfg.b[i].id == wid) {
- wl->cfg.b[i].val = info[3];
- break;
- }
+ while (wl->cfg.b[i].id != WID_NIL &&
+ wl->cfg.b[i].id != wid)
i++;
- } while (1);
+
+ if (wl->cfg.b[i].id == wid)
+ wl->cfg.b[i].val = info[3];
}
}
@@ -244,7 +220,7 @@ static void wilc_wlan_parse_info_frame(struct wilc *wl, u8 *info)
int wilc_wlan_cfg_set_wid(u8 *frame, u32 offset, u16 id, u8 *buf, int size)
{
- u8 type = (id >> 12) & 0xf;
+ u8 type = FIELD_GET(WILC_WID_TYPE, id);
int ret = 0;
switch (type) {
@@ -290,65 +266,47 @@ int wilc_wlan_cfg_get_wid(u8 *frame, u32 offset, u16 id)
int wilc_wlan_cfg_get_val(struct wilc *wl, u16 wid, u8 *buffer,
u32 buffer_size)
{
- u32 type = (wid >> 12) & 0xf;
+ u8 type = FIELD_GET(WILC_WID_TYPE, wid);
int i, ret = 0;
+ struct wilc_cfg *cfg = &wl->cfg;
i = 0;
if (type == CFG_BYTE_CMD) {
- do {
- if (wl->cfg.b[i].id == WID_NIL)
- break;
-
- if (wl->cfg.b[i].id == wid) {
- memcpy(buffer, &wl->cfg.b[i].val, 1);
- ret = 1;
- break;
- }
+ while (cfg->b[i].id != WID_NIL && cfg->b[i].id != wid)
i++;
- } while (1);
+
+ if (cfg->b[i].id == wid) {
+ memcpy(buffer, &cfg->b[i].val, 1);
+ ret = 1;
+ }
} else if (type == CFG_HWORD_CMD) {
- do {
- if (wl->cfg.hw[i].id == WID_NIL)
- break;
-
- if (wl->cfg.hw[i].id == wid) {
- memcpy(buffer, &wl->cfg.hw[i].val, 2);
- ret = 2;
- break;
- }
+ while (cfg->hw[i].id != WID_NIL && cfg->hw[i].id != wid)
i++;
- } while (1);
+
+ if (cfg->hw[i].id == wid) {
+ memcpy(buffer, &cfg->hw[i].val, 2);
+ ret = 2;
+ }
} else if (type == CFG_WORD_CMD) {
- do {
- if (wl->cfg.w[i].id == WID_NIL)
- break;
-
- if (wl->cfg.w[i].id == wid) {
- memcpy(buffer, &wl->cfg.w[i].val, 4);
- ret = 4;
- break;
- }
+ while (cfg->w[i].id != WID_NIL && cfg->w[i].id != wid)
i++;
- } while (1);
- } else if (type == CFG_STR_CMD) {
- do {
- u32 id = wl->cfg.s[i].id;
- if (id == WID_NIL)
- break;
+ if (cfg->w[i].id == wid) {
+ memcpy(buffer, &cfg->w[i].val, 4);
+ ret = 4;
+ }
+ } else if (type == CFG_STR_CMD) {
+ while (cfg->s[i].id != WID_NIL && cfg->s[i].id != wid)
+ i++;
- if (id == wid) {
- u16 size = get_unaligned_le16(wl->cfg.s[i].str);
+ if (cfg->s[i].id == wid) {
+ u16 size = get_unaligned_le16(cfg->s[i].str);
- if (buffer_size >= size) {
- memcpy(buffer, &wl->cfg.s[i].str[2],
- size);
- ret = size;
- }
- break;
+ if (buffer_size >= size) {
+ memcpy(buffer, &cfg->s[i].str[2], size);
+ ret = size;
}
- i++;
- } while (1);
+ }
}
return ret;
}
diff --git a/drivers/staging/wilc1000/wlan_if.h b/drivers/staging/wilc1000/wlan_if.h
index 7c7ee66c35f5..f85fd575136d 100644
--- a/drivers/staging/wilc1000/wlan_if.h
+++ b/drivers/staging/wilc1000/wlan_if.h
@@ -8,6 +8,7 @@
#define WILC_WLAN_IF_H
#include <linux/netdevice.h>
+#include "fw.h"
/********************************************
*
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index a70fb84f38f1..b809c0015c0c 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -101,7 +101,7 @@ static void p80211knetdev_set_multicast_list(struct net_device *dev);
static int p80211knetdev_do_ioctl(struct net_device *dev, struct ifreq *ifr,
int cmd);
static int p80211knetdev_set_mac_address(struct net_device *dev, void *addr);
-static void p80211knetdev_tx_timeout(struct net_device *netdev);
+static void p80211knetdev_tx_timeout(struct net_device *netdev, unsigned int txqueue);
static int p80211_rx_typedrop(struct wlandevice *wlandev, u16 fc);
int wlan_watchdog = 5000;
@@ -1074,7 +1074,7 @@ static int p80211_rx_typedrop(struct wlandevice *wlandev, u16 fc)
return drop;
}
-static void p80211knetdev_tx_timeout(struct net_device *netdev)
+static void p80211knetdev_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct wlandevice *wlandev = netdev->ml_priv;
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index 7350fe5d96a3..a8860d2aee68 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -959,7 +959,7 @@ int prism2mgmt_flashdl_state(struct wlandevice *wlandev, void *msgp)
}
}
- return 0;
+ return result;
}
/*----------------------------------------------------------------
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 7251a87bb576..b94ed4e30770 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -4149,9 +4149,6 @@ int iscsit_close_connection(
iscsit_stop_nopin_response_timer(conn);
iscsit_stop_nopin_timer(conn);
- if (conn->conn_transport->iscsit_wait_conn)
- conn->conn_transport->iscsit_wait_conn(conn);
-
/*
* During Connection recovery drop unacknowledged out of order
* commands for this connection, and prepare the other commands
@@ -4237,6 +4234,9 @@ int iscsit_close_connection(
target_sess_cmd_list_set_waiting(sess->se_sess);
target_wait_for_sess_cmds(sess->se_sess);
+ if (conn->conn_transport->iscsit_wait_conn)
+ conn->conn_transport->iscsit_wait_conn(conn);
+
ahash_request_free(conn->conn_tx_hash);
if (conn->conn_rx_hash) {
struct crypto_ahash *tfm;
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 6949ea8bc387..51ffd5c002de 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -646,7 +646,9 @@ iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
}
bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
- bip_set_seed(bip, bio->bi_iter.bi_sector);
+ /* virtual start sector must be in integrity interval units */
+ bip_set_seed(bip, bio->bi_iter.bi_sector >>
+ (bi->interval_exp - SECTOR_SHIFT));
pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
(unsigned long long)bip->bip_iter.bi_sector);
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 1354a157e9af..6a38ff936389 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -221,7 +221,6 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
ep = fc_seq_exch(seq);
lport = ep->lp;
if (cmd->was_ddp_setup) {
- BUG_ON(!ep);
BUG_ON(!lport);
/*
* Since DDP (Large Rx offload) was setup for this request,
diff --git a/drivers/tc/tc-driver.c b/drivers/tc/tc-driver.c
index 16b5bae63c74..d45f2c1ff341 100644
--- a/drivers/tc/tc-driver.c
+++ b/drivers/tc/tc-driver.c
@@ -56,8 +56,8 @@ EXPORT_SYMBOL(tc_unregister_driver);
* system is in its list of supported devices. Returns the matching
* tc_device_id structure or %NULL if there is no match.
*/
-const struct tc_device_id *tc_match_device(struct tc_driver *tdrv,
- struct tc_dev *tdev)
+static const struct tc_device_id *tc_match_device(struct tc_driver *tdrv,
+ struct tc_dev *tdev)
{
const struct tc_device_id *id = tdrv->id_table;
@@ -71,7 +71,6 @@ const struct tc_device_id *tc_match_device(struct tc_driver *tdrv,
}
return NULL;
}
-EXPORT_SYMBOL(tc_match_device);
/**
* tc_bus_match - Tell if a device structure has a matching
diff --git a/drivers/tc/tc.c b/drivers/tc/tc.c
index cf3fad2cb871..c5b17dd8f587 100644
--- a/drivers/tc/tc.c
+++ b/drivers/tc/tc.c
@@ -47,7 +47,7 @@ static void __init tc_bus_add_devices(struct tc_bus *tbus)
for (slot = 0; slot < tbus->num_tcslots; slot++) {
slotaddr = tbus->slot_base + slot * slotsize;
extslotaddr = tbus->ext_slot_base + slot * extslotsize;
- module = ioremap_nocache(slotaddr, slotsize);
+ module = ioremap(slotaddr, slotsize);
BUG_ON(!module);
offset = TC_OLDCARD;
diff --git a/drivers/tee/Kconfig b/drivers/tee/Kconfig
index 676ffcb64985..8da63f38e6bd 100644
--- a/drivers/tee/Kconfig
+++ b/drivers/tee/Kconfig
@@ -2,7 +2,7 @@
# Generic Trusted Execution Environment Configuration
config TEE
tristate "Trusted Execution Environment support"
- depends on HAVE_ARM_SMCCC || COMPILE_TEST
+ depends on HAVE_ARM_SMCCC || COMPILE_TEST || CPU_SUP_AMD
select DMA_SHARED_BUFFER
select GENERIC_ALLOCATOR
help
@@ -14,7 +14,7 @@ if TEE
menu "TEE drivers"
source "drivers/tee/optee/Kconfig"
-
+source "drivers/tee/amdtee/Kconfig"
endmenu
endif
diff --git a/drivers/tee/Makefile b/drivers/tee/Makefile
index 21f51fd88b07..68da044afbfa 100644
--- a/drivers/tee/Makefile
+++ b/drivers/tee/Makefile
@@ -4,3 +4,4 @@ tee-objs += tee_core.o
tee-objs += tee_shm.o
tee-objs += tee_shm_pool.o
obj-$(CONFIG_OPTEE) += optee/
+obj-$(CONFIG_AMDTEE) += amdtee/
diff --git a/drivers/tee/amdtee/Kconfig b/drivers/tee/amdtee/Kconfig
new file mode 100644
index 000000000000..4e32b6413b41
--- /dev/null
+++ b/drivers/tee/amdtee/Kconfig
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: MIT
+# AMD-TEE Trusted Execution Environment Configuration
+config AMDTEE
+ tristate "AMD-TEE"
+ default m
+ depends on CRYPTO_DEV_SP_PSP
+ help
+ This implements AMD's Trusted Execution Environment (TEE) driver.
diff --git a/drivers/tee/amdtee/Makefile b/drivers/tee/amdtee/Makefile
new file mode 100644
index 000000000000..ff1485266117
--- /dev/null
+++ b/drivers/tee/amdtee/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: MIT
+obj-$(CONFIG_AMDTEE) += amdtee.o
+amdtee-objs += core.o
+amdtee-objs += call.o
+amdtee-objs += shm_pool.o
diff --git a/drivers/tee/amdtee/amdtee_if.h b/drivers/tee/amdtee/amdtee_if.h
new file mode 100644
index 000000000000..ff48c3e47375
--- /dev/null
+++ b/drivers/tee/amdtee/amdtee_if.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: MIT */
+
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ */
+
+/*
+ * This file has definitions related to Host and AMD-TEE Trusted OS interface.
+ * These definitions must match the definitions on the TEE side.
+ */
+
+#ifndef AMDTEE_IF_H
+#define AMDTEE_IF_H
+
+#include <linux/types.h>
+
+/*****************************************************************************
+ ** TEE Param
+ ******************************************************************************/
+#define TEE_MAX_PARAMS 4
+
+/**
+ * struct memref - memory reference structure
+ * @buf_id: buffer ID of the buffer mapped by TEE_CMD_ID_MAP_SHARED_MEM
+ * @offset: offset in bytes from beginning of the buffer
+ * @size: data size in bytes
+ */
+struct memref {
+ u32 buf_id;
+ u32 offset;
+ u32 size;
+};
+
+struct value {
+ u32 a;
+ u32 b;
+};
+
+/*
+ * Parameters passed to open_session or invoke_command
+ */
+union tee_op_param {
+ struct memref mref;
+ struct value val;
+};
+
+struct tee_operation {
+ u32 param_types;
+ union tee_op_param params[TEE_MAX_PARAMS];
+};
+
+/* Must be same as in GP TEE specification */
+#define TEE_OP_PARAM_TYPE_NONE 0
+#define TEE_OP_PARAM_TYPE_VALUE_INPUT 1
+#define TEE_OP_PARAM_TYPE_VALUE_OUTPUT 2
+#define TEE_OP_PARAM_TYPE_VALUE_INOUT 3
+#define TEE_OP_PARAM_TYPE_INVALID 4
+#define TEE_OP_PARAM_TYPE_MEMREF_INPUT 5
+#define TEE_OP_PARAM_TYPE_MEMREF_OUTPUT 6
+#define TEE_OP_PARAM_TYPE_MEMREF_INOUT 7
+
+#define TEE_PARAM_TYPE_GET(t, i) (((t) >> ((i) * 4)) & 0xF)
+#define TEE_PARAM_TYPES(t0, t1, t2, t3) \
+ ((t0) | ((t1) << 4) | ((t2) << 8) | ((t3) << 12))
+
+/*****************************************************************************
+ ** TEE Commands
+ *****************************************************************************/
+
+/*
+ * The shared memory between rich world and secure world may be physically
+ * non-contiguous. Below structures are meant to describe a shared memory region
+ * via scatter/gather (sg) list
+ */
+
+/**
+ * struct tee_sg_desc - sg descriptor for a physically contiguous buffer
+ * @low_addr: [in] bits[31:0] of buffer's physical address. Must be 4KB aligned
+ * @hi_addr: [in] bits[63:32] of the buffer's physical address
+ * @size: [in] size in bytes (must be multiple of 4KB)
+ */
+struct tee_sg_desc {
+ u32 low_addr;
+ u32 hi_addr;
+ u32 size;
+};
+
+/**
+ * struct tee_sg_list - structure describing a scatter/gather list
+ * @count: [in] number of sg descriptors
+ * @size: [in] total size of all buffers in the list. Must be multiple of 4KB
+ * @buf: [in] list of sg buffer descriptors
+ */
+#define TEE_MAX_SG_DESC 64
+struct tee_sg_list {
+ u32 count;
+ u32 size;
+ struct tee_sg_desc buf[TEE_MAX_SG_DESC];
+};
+
+/**
+ * struct tee_cmd_map_shared_mem - command to map shared memory
+ * @buf_id: [out] return buffer ID value
+ * @sg_list: [in] list describing memory to be mapped
+ */
+struct tee_cmd_map_shared_mem {
+ u32 buf_id;
+ struct tee_sg_list sg_list;
+};
+
+/**
+ * struct tee_cmd_unmap_shared_mem - command to unmap shared memory
+ * @buf_id: [in] buffer ID of memory to be unmapped
+ */
+struct tee_cmd_unmap_shared_mem {
+ u32 buf_id;
+};
+
+/**
+ * struct tee_cmd_load_ta - load Trusted Application (TA) binary into TEE
+ * @low_addr: [in] bits [31:0] of the physical address of the TA binary
+ * @hi_addr: [in] bits [63:32] of the physical address of the TA binary
+ * @size: [in] size of TA binary in bytes
+ * @ta_handle: [out] return handle of the loaded TA
+ */
+struct tee_cmd_load_ta {
+ u32 low_addr;
+ u32 hi_addr;
+ u32 size;
+ u32 ta_handle;
+};
+
+/**
+ * struct tee_cmd_unload_ta - command to unload TA binary from TEE environment
+ * @ta_handle: [in] handle of the loaded TA to be unloaded
+ */
+struct tee_cmd_unload_ta {
+ u32 ta_handle;
+};
+
+/**
+ * struct tee_cmd_open_session - command to call TA_OpenSessionEntryPoint in TA
+ * @ta_handle: [in] handle of the loaded TA
+ * @session_info: [out] pointer to TA allocated session data
+ * @op: [in/out] operation parameters
+ * @return_origin: [out] origin of return code after TEE processing
+ */
+struct tee_cmd_open_session {
+ u32 ta_handle;
+ u32 session_info;
+ struct tee_operation op;
+ u32 return_origin;
+};
+
+/**
+ * struct tee_cmd_close_session - command to call TA_CloseSessionEntryPoint()
+ * in TA
+ * @ta_handle: [in] handle of the loaded TA
+ * @session_info: [in] pointer to TA allocated session data
+ */
+struct tee_cmd_close_session {
+ u32 ta_handle;
+ u32 session_info;
+};
+
+/**
+ * struct tee_cmd_invoke_cmd - command to call TA_InvokeCommandEntryPoint() in
+ * TA
+ * @ta_handle: [in] handle of the loaded TA
+ * @cmd_id: [in] TA command ID
+ * @session_info: [in] pointer to TA allocated session data
+ * @op: [in/out] operation parameters
+ * @return_origin: [out] origin of return code after TEE processing
+ */
+struct tee_cmd_invoke_cmd {
+ u32 ta_handle;
+ u32 cmd_id;
+ u32 session_info;
+ struct tee_operation op;
+ u32 return_origin;
+};
+
+#endif /*AMDTEE_IF_H*/
diff --git a/drivers/tee/amdtee/amdtee_private.h b/drivers/tee/amdtee/amdtee_private.h
new file mode 100644
index 000000000000..d7f798c3394b
--- /dev/null
+++ b/drivers/tee/amdtee/amdtee_private.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: MIT */
+
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ */
+
+#ifndef AMDTEE_PRIVATE_H
+#define AMDTEE_PRIVATE_H
+
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/tee_drv.h>
+#include <linux/kref.h>
+#include <linux/types.h>
+#include "amdtee_if.h"
+
+#define DRIVER_NAME "amdtee"
+#define DRIVER_AUTHOR "AMD-TEE Linux driver team"
+
+/* Some GlobalPlatform error codes used in this driver */
+#define TEEC_SUCCESS 0x00000000
+#define TEEC_ERROR_GENERIC 0xFFFF0000
+#define TEEC_ERROR_BAD_PARAMETERS 0xFFFF0006
+#define TEEC_ERROR_COMMUNICATION 0xFFFF000E
+
+#define TEEC_ORIGIN_COMMS 0x00000002
+
+/* Maximum number of sessions which can be opened with a Trusted Application */
+#define TEE_NUM_SESSIONS 32
+
+#define TA_LOAD_PATH "/amdtee"
+#define TA_PATH_MAX 60
+
+/**
+ * struct amdtee - main service struct
+ * @teedev: client device
+ * @pool: shared memory pool
+ */
+struct amdtee {
+ struct tee_device *teedev;
+ struct tee_shm_pool *pool;
+};
+
+/**
+ * struct amdtee_session - Trusted Application (TA) session related information.
+ * @ta_handle: handle to Trusted Application (TA) loaded in TEE environment
+ * @refcount: counter to keep track of sessions opened for the TA instance
+ * @session_info: an array pointing to TA allocated session data.
+ * @sess_mask: session usage bit-mask. If a particular bit is set, then the
+ * corresponding @session_info entry is in use or valid.
+ *
+ * Session structure is updated on open_session and this information is used for
+ * subsequent operations with the Trusted Application.
+ */
+struct amdtee_session {
+ struct list_head list_node;
+ u32 ta_handle;
+ struct kref refcount;
+ u32 session_info[TEE_NUM_SESSIONS];
+ DECLARE_BITMAP(sess_mask, TEE_NUM_SESSIONS);
+ spinlock_t lock; /* synchronizes access to @sess_mask */
+};
+
+/**
+ * struct amdtee_context_data - AMD-TEE driver context data
+ * @sess_list: Keeps track of sessions opened in current TEE context
+ */
+struct amdtee_context_data {
+ struct list_head sess_list;
+};
+
+struct amdtee_driver_data {
+ struct amdtee *amdtee;
+};
+
+struct shmem_desc {
+ void *kaddr;
+ u64 size;
+};
+
+/**
+ * struct amdtee_shm_data - Shared memory data
+ * @kaddr: Kernel virtual address of shared memory
+ * @buf_id: Buffer id of memory mapped by TEE_CMD_ID_MAP_SHARED_MEM
+ */
+struct amdtee_shm_data {
+ struct list_head shm_node;
+ void *kaddr;
+ u32 buf_id;
+};
+
+struct amdtee_shm_context {
+ struct list_head shmdata_list;
+};
+
+#define LOWER_TWO_BYTE_MASK 0x0000FFFF
+
+/**
+ * set_session_id() - Sets the session identifier.
+ * @ta_handle: [in] handle of the loaded Trusted Application (TA)
+ * @session_index: [in] Session index. Range: 0 to (TEE_NUM_SESSIONS - 1).
+ * @session: [out] Pointer to session id
+ *
+ * Lower two bytes of the session identifier represents the TA handle and the
+ * upper two bytes is session index.
+ */
+static inline void set_session_id(u32 ta_handle, u32 session_index,
+ u32 *session)
+{
+ *session = (session_index << 16) | (LOWER_TWO_BYTE_MASK & ta_handle);
+}
+
+static inline u32 get_ta_handle(u32 session)
+{
+ return session & LOWER_TWO_BYTE_MASK;
+}
+
+static inline u32 get_session_index(u32 session)
+{
+ return (session >> 16) & LOWER_TWO_BYTE_MASK;
+}
+
+int amdtee_open_session(struct tee_context *ctx,
+ struct tee_ioctl_open_session_arg *arg,
+ struct tee_param *param);
+
+int amdtee_close_session(struct tee_context *ctx, u32 session);
+
+int amdtee_invoke_func(struct tee_context *ctx,
+ struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param);
+
+int amdtee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session);
+
+int amdtee_map_shmem(struct tee_shm *shm);
+
+void amdtee_unmap_shmem(struct tee_shm *shm);
+
+int handle_load_ta(void *data, u32 size,
+ struct tee_ioctl_open_session_arg *arg);
+
+int handle_unload_ta(u32 ta_handle);
+
+int handle_open_session(struct tee_ioctl_open_session_arg *arg, u32 *info,
+ struct tee_param *p);
+
+int handle_close_session(u32 ta_handle, u32 info);
+
+int handle_map_shmem(u32 count, struct shmem_desc *start, u32 *buf_id);
+
+void handle_unmap_shmem(u32 buf_id);
+
+int handle_invoke_cmd(struct tee_ioctl_invoke_arg *arg, u32 sinfo,
+ struct tee_param *p);
+
+struct tee_shm_pool *amdtee_config_shm(void);
+
+u32 get_buffer_id(struct tee_shm *shm);
+#endif /*AMDTEE_PRIVATE_H*/
diff --git a/drivers/tee/amdtee/call.c b/drivers/tee/amdtee/call.c
new file mode 100644
index 000000000000..096dd4d92d39
--- /dev/null
+++ b/drivers/tee/amdtee/call.c
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/tee.h>
+#include <linux/tee_drv.h>
+#include <linux/psp-tee.h>
+#include <linux/slab.h>
+#include <linux/psp-sev.h>
+#include "amdtee_if.h"
+#include "amdtee_private.h"
+
+static int tee_params_to_amd_params(struct tee_param *tee, u32 count,
+ struct tee_operation *amd)
+{
+ int i, ret = 0;
+ u32 type;
+
+ if (!count)
+ return 0;
+
+ if (!tee || !amd || count > TEE_MAX_PARAMS)
+ return -EINVAL;
+
+ amd->param_types = 0;
+ for (i = 0; i < count; i++) {
+ /* AMD TEE does not support meta parameter */
+ if (tee[i].attr > TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT)
+ return -EINVAL;
+
+ amd->param_types |= ((tee[i].attr & 0xF) << i * 4);
+ }
+
+ for (i = 0; i < count; i++) {
+ type = TEE_PARAM_TYPE_GET(amd->param_types, i);
+ pr_debug("%s: type[%d] = 0x%x\n", __func__, i, type);
+
+ if (type == TEE_OP_PARAM_TYPE_INVALID)
+ return -EINVAL;
+
+ if (type == TEE_OP_PARAM_TYPE_NONE)
+ continue;
+
+ /* It is assumed that all values are within 2^32-1 */
+ if (type > TEE_OP_PARAM_TYPE_VALUE_INOUT) {
+ u32 buf_id = get_buffer_id(tee[i].u.memref.shm);
+
+ amd->params[i].mref.buf_id = buf_id;
+ amd->params[i].mref.offset = tee[i].u.memref.shm_offs;
+ amd->params[i].mref.size = tee[i].u.memref.size;
+ pr_debug("%s: bufid[%d] = 0x%x, offset[%d] = 0x%x, size[%d] = 0x%x\n",
+ __func__,
+ i, amd->params[i].mref.buf_id,
+ i, amd->params[i].mref.offset,
+ i, amd->params[i].mref.size);
+ } else {
+ if (tee[i].u.value.c)
+ pr_warn("%s: Discarding value c", __func__);
+
+ amd->params[i].val.a = tee[i].u.value.a;
+ amd->params[i].val.b = tee[i].u.value.b;
+ pr_debug("%s: a[%d] = 0x%x, b[%d] = 0x%x\n", __func__,
+ i, amd->params[i].val.a,
+ i, amd->params[i].val.b);
+ }
+ }
+ return ret;
+}
+
+static int amd_params_to_tee_params(struct tee_param *tee, u32 count,
+ struct tee_operation *amd)
+{
+ int i, ret = 0;
+ u32 type;
+
+ if (!count)
+ return 0;
+
+ if (!tee || !amd || count > TEE_MAX_PARAMS)
+ return -EINVAL;
+
+ /* Assumes amd->param_types is valid */
+ for (i = 0; i < count; i++) {
+ type = TEE_PARAM_TYPE_GET(amd->param_types, i);
+ pr_debug("%s: type[%d] = 0x%x\n", __func__, i, type);
+
+ if (type == TEE_OP_PARAM_TYPE_INVALID ||
+ type > TEE_OP_PARAM_TYPE_MEMREF_INOUT)
+ return -EINVAL;
+
+ if (type == TEE_OP_PARAM_TYPE_NONE ||
+ type == TEE_OP_PARAM_TYPE_VALUE_INPUT ||
+ type == TEE_OP_PARAM_TYPE_MEMREF_INPUT)
+ continue;
+
+ /*
+ * It is assumed that buf_id remains unchanged for
+ * both open_session and invoke_cmd call
+ */
+ if (type > TEE_OP_PARAM_TYPE_MEMREF_INPUT) {
+ tee[i].u.memref.shm_offs = amd->params[i].mref.offset;
+ tee[i].u.memref.size = amd->params[i].mref.size;
+ pr_debug("%s: bufid[%d] = 0x%x, offset[%d] = 0x%x, size[%d] = 0x%x\n",
+ __func__,
+ i, amd->params[i].mref.buf_id,
+ i, amd->params[i].mref.offset,
+ i, amd->params[i].mref.size);
+ } else {
+ /* field 'c' not supported by AMD TEE */
+ tee[i].u.value.a = amd->params[i].val.a;
+ tee[i].u.value.b = amd->params[i].val.b;
+ tee[i].u.value.c = 0;
+ pr_debug("%s: a[%d] = 0x%x, b[%d] = 0x%x\n",
+ __func__,
+ i, amd->params[i].val.a,
+ i, amd->params[i].val.b);
+ }
+ }
+ return ret;
+}
+
+int handle_unload_ta(u32 ta_handle)
+{
+ struct tee_cmd_unload_ta cmd = {0};
+ u32 status;
+ int ret;
+
+ if (!ta_handle)
+ return -EINVAL;
+
+ cmd.ta_handle = ta_handle;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA, (void *)&cmd,
+ sizeof(cmd), &status);
+ if (!ret && status != 0) {
+ pr_err("unload ta: status = 0x%x\n", status);
+ ret = -EBUSY;
+ }
+
+ return ret;
+}
+
+int handle_close_session(u32 ta_handle, u32 info)
+{
+ struct tee_cmd_close_session cmd = {0};
+ u32 status;
+ int ret;
+
+ if (ta_handle == 0)
+ return -EINVAL;
+
+ cmd.ta_handle = ta_handle;
+ cmd.session_info = info;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_CLOSE_SESSION, (void *)&cmd,
+ sizeof(cmd), &status);
+ if (!ret && status != 0) {
+ pr_err("close session: status = 0x%x\n", status);
+ ret = -EBUSY;
+ }
+
+ return ret;
+}
+
+void handle_unmap_shmem(u32 buf_id)
+{
+ struct tee_cmd_unmap_shared_mem cmd = {0};
+ u32 status;
+ int ret;
+
+ cmd.buf_id = buf_id;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_UNMAP_SHARED_MEM, (void *)&cmd,
+ sizeof(cmd), &status);
+ if (!ret)
+ pr_debug("unmap shared memory: buf_id %u status = 0x%x\n",
+ buf_id, status);
+}
+
+int handle_invoke_cmd(struct tee_ioctl_invoke_arg *arg, u32 sinfo,
+ struct tee_param *p)
+{
+ struct tee_cmd_invoke_cmd cmd = {0};
+ int ret;
+
+ if (!arg || (!p && arg->num_params))
+ return -EINVAL;
+
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+
+ if (arg->session == 0) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return -EINVAL;
+ }
+
+ ret = tee_params_to_amd_params(p, arg->num_params, &cmd.op);
+ if (ret) {
+ pr_err("invalid Params. Abort invoke command\n");
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return ret;
+ }
+
+ cmd.ta_handle = get_ta_handle(arg->session);
+ cmd.cmd_id = arg->func;
+ cmd.session_info = sinfo;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_INVOKE_CMD, (void *)&cmd,
+ sizeof(cmd), &arg->ret);
+ if (ret) {
+ arg->ret = TEEC_ERROR_COMMUNICATION;
+ } else {
+ ret = amd_params_to_tee_params(p, arg->num_params, &cmd.op);
+ if (unlikely(ret)) {
+ pr_err("invoke command: failed to copy output\n");
+ arg->ret = TEEC_ERROR_GENERIC;
+ return ret;
+ }
+ arg->ret_origin = cmd.return_origin;
+ pr_debug("invoke command: RO = 0x%x ret = 0x%x\n",
+ arg->ret_origin, arg->ret);
+ }
+
+ return ret;
+}
+
+int handle_map_shmem(u32 count, struct shmem_desc *start, u32 *buf_id)
+{
+ struct tee_cmd_map_shared_mem *cmd;
+ phys_addr_t paddr;
+ int ret, i;
+ u32 status;
+
+ if (!count || !start || !buf_id)
+ return -EINVAL;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ /* Size must be page aligned */
+ for (i = 0; i < count ; i++) {
+ if (!start[i].kaddr || (start[i].size & (PAGE_SIZE - 1))) {
+ ret = -EINVAL;
+ goto free_cmd;
+ }
+
+ if ((u64)start[i].kaddr & (PAGE_SIZE - 1)) {
+ pr_err("map shared memory: page unaligned. addr 0x%llx",
+ (u64)start[i].kaddr);
+ ret = -EINVAL;
+ goto free_cmd;
+ }
+ }
+
+ cmd->sg_list.count = count;
+
+ /* Create buffer list */
+ for (i = 0; i < count ; i++) {
+ paddr = __psp_pa(start[i].kaddr);
+ cmd->sg_list.buf[i].hi_addr = upper_32_bits(paddr);
+ cmd->sg_list.buf[i].low_addr = lower_32_bits(paddr);
+ cmd->sg_list.buf[i].size = start[i].size;
+ cmd->sg_list.size += cmd->sg_list.buf[i].size;
+
+ pr_debug("buf[%d]:hi addr = 0x%x\n", i,
+ cmd->sg_list.buf[i].hi_addr);
+ pr_debug("buf[%d]:low addr = 0x%x\n", i,
+ cmd->sg_list.buf[i].low_addr);
+ pr_debug("buf[%d]:size = 0x%x\n", i, cmd->sg_list.buf[i].size);
+ pr_debug("list size = 0x%x\n", cmd->sg_list.size);
+ }
+
+ *buf_id = 0;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_MAP_SHARED_MEM, (void *)cmd,
+ sizeof(*cmd), &status);
+ if (!ret && !status) {
+ *buf_id = cmd->buf_id;
+ pr_debug("mapped buffer ID = 0x%x\n", *buf_id);
+ } else {
+ pr_err("map shared memory: status = 0x%x\n", status);
+ ret = -ENOMEM;
+ }
+
+free_cmd:
+ kfree(cmd);
+
+ return ret;
+}
+
+int handle_open_session(struct tee_ioctl_open_session_arg *arg, u32 *info,
+ struct tee_param *p)
+{
+ struct tee_cmd_open_session cmd = {0};
+ int ret;
+
+ if (!arg || !info || (!p && arg->num_params))
+ return -EINVAL;
+
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+
+ if (arg->session == 0) {
+ arg->ret = TEEC_ERROR_GENERIC;
+ return -EINVAL;
+ }
+
+ ret = tee_params_to_amd_params(p, arg->num_params, &cmd.op);
+ if (ret) {
+ pr_err("invalid Params. Abort open session\n");
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return ret;
+ }
+
+ cmd.ta_handle = get_ta_handle(arg->session);
+ *info = 0;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_OPEN_SESSION, (void *)&cmd,
+ sizeof(cmd), &arg->ret);
+ if (ret) {
+ arg->ret = TEEC_ERROR_COMMUNICATION;
+ } else {
+ ret = amd_params_to_tee_params(p, arg->num_params, &cmd.op);
+ if (unlikely(ret)) {
+ pr_err("open session: failed to copy output\n");
+ arg->ret = TEEC_ERROR_GENERIC;
+ return ret;
+ }
+ arg->ret_origin = cmd.return_origin;
+ *info = cmd.session_info;
+ pr_debug("open session: session info = 0x%x\n", *info);
+ }
+
+ pr_debug("open session: ret = 0x%x RO = 0x%x\n", arg->ret,
+ arg->ret_origin);
+
+ return ret;
+}
+
+int handle_load_ta(void *data, u32 size, struct tee_ioctl_open_session_arg *arg)
+{
+ struct tee_cmd_load_ta cmd = {0};
+ phys_addr_t blob;
+ int ret;
+
+ if (size == 0 || !data || !arg)
+ return -EINVAL;
+
+ blob = __psp_pa(data);
+ if (blob & (PAGE_SIZE - 1)) {
+ pr_err("load TA: page unaligned. blob 0x%llx", blob);
+ return -EINVAL;
+ }
+
+ cmd.hi_addr = upper_32_bits(blob);
+ cmd.low_addr = lower_32_bits(blob);
+ cmd.size = size;
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_LOAD_TA, (void *)&cmd,
+ sizeof(cmd), &arg->ret);
+ if (ret) {
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+ arg->ret = TEEC_ERROR_COMMUNICATION;
+ } else {
+ set_session_id(cmd.ta_handle, 0, &arg->session);
+ }
+
+ pr_debug("load TA: TA handle = 0x%x, RO = 0x%x, ret = 0x%x\n",
+ cmd.ta_handle, arg->ret_origin, arg->ret);
+
+ return 0;
+}
diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c
new file mode 100644
index 000000000000..6370bb55f512
--- /dev/null
+++ b/drivers/tee/amdtee/core.c
@@ -0,0 +1,518 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/device.h>
+#include <linux/tee_drv.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/firmware.h>
+#include "amdtee_private.h"
+#include "../tee_private.h"
+#include <linux/psp-tee.h>
+
+static struct amdtee_driver_data *drv_data;
+static DEFINE_MUTEX(session_list_mutex);
+static struct amdtee_shm_context shmctx;
+
+static void amdtee_get_version(struct tee_device *teedev,
+ struct tee_ioctl_version_data *vers)
+{
+ struct tee_ioctl_version_data v = {
+ .impl_id = TEE_IMPL_ID_AMDTEE,
+ .impl_caps = 0,
+ .gen_caps = TEE_GEN_CAP_GP,
+ };
+ *vers = v;
+}
+
+static int amdtee_open(struct tee_context *ctx)
+{
+ struct amdtee_context_data *ctxdata;
+
+ ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL);
+ if (!ctxdata)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ctxdata->sess_list);
+ INIT_LIST_HEAD(&shmctx.shmdata_list);
+
+ ctx->data = ctxdata;
+ return 0;
+}
+
+static void release_session(struct amdtee_session *sess)
+{
+ int i;
+
+ /* Close any open session */
+ for (i = 0; i < TEE_NUM_SESSIONS; ++i) {
+ /* Check if session entry 'i' is valid */
+ if (!test_bit(i, sess->sess_mask))
+ continue;
+
+ handle_close_session(sess->ta_handle, sess->session_info[i]);
+ }
+
+ /* Unload Trusted Application once all sessions are closed */
+ handle_unload_ta(sess->ta_handle);
+ kfree(sess);
+}
+
+static void amdtee_release(struct tee_context *ctx)
+{
+ struct amdtee_context_data *ctxdata = ctx->data;
+
+ if (!ctxdata)
+ return;
+
+ while (true) {
+ struct amdtee_session *sess;
+
+ sess = list_first_entry_or_null(&ctxdata->sess_list,
+ struct amdtee_session,
+ list_node);
+
+ if (!sess)
+ break;
+
+ list_del(&sess->list_node);
+ release_session(sess);
+ }
+ kfree(ctxdata);
+
+ ctx->data = NULL;
+}
+
+/**
+ * alloc_session() - Allocate a session structure
+ * @ctxdata: TEE Context data structure
+ * @session: Session ID for which 'struct amdtee_session' structure is to be
+ * allocated.
+ *
+ * Scans the TEE context's session list to check if TA is already loaded in to
+ * TEE. If yes, returns the 'session' structure for that TA. Else allocates,
+ * initializes a new 'session' structure and adds it to context's session list.
+ *
+ * The caller must hold a mutex.
+ *
+ * Returns:
+ * 'struct amdtee_session *' on success and NULL on failure.
+ */
+static struct amdtee_session *alloc_session(struct amdtee_context_data *ctxdata,
+ u32 session)
+{
+ struct amdtee_session *sess;
+ u32 ta_handle = get_ta_handle(session);
+
+ /* Scan session list to check if TA is already loaded in to TEE */
+ list_for_each_entry(sess, &ctxdata->sess_list, list_node)
+ if (sess->ta_handle == ta_handle) {
+ kref_get(&sess->refcount);
+ return sess;
+ }
+
+ /* Allocate a new session and add to list */
+ sess = kzalloc(sizeof(*sess), GFP_KERNEL);
+ if (sess) {
+ sess->ta_handle = ta_handle;
+ kref_init(&sess->refcount);
+ spin_lock_init(&sess->lock);
+ list_add(&sess->list_node, &ctxdata->sess_list);
+ }
+
+ return sess;
+}
+
+/* Requires mutex to be held */
+static struct amdtee_session *find_session(struct amdtee_context_data *ctxdata,
+ u32 session)
+{
+ u32 ta_handle = get_ta_handle(session);
+ u32 index = get_session_index(session);
+ struct amdtee_session *sess;
+
+ list_for_each_entry(sess, &ctxdata->sess_list, list_node)
+ if (ta_handle == sess->ta_handle &&
+ test_bit(index, sess->sess_mask))
+ return sess;
+
+ return NULL;
+}
+
+u32 get_buffer_id(struct tee_shm *shm)
+{
+ u32 buf_id = 0;
+ struct amdtee_shm_data *shmdata;
+
+ list_for_each_entry(shmdata, &shmctx.shmdata_list, shm_node)
+ if (shmdata->kaddr == shm->kaddr) {
+ buf_id = shmdata->buf_id;
+ break;
+ }
+
+ return buf_id;
+}
+
+static DEFINE_MUTEX(drv_mutex);
+static int copy_ta_binary(struct tee_context *ctx, void *ptr, void **ta,
+ size_t *ta_size)
+{
+ const struct firmware *fw;
+ char fw_name[TA_PATH_MAX];
+ struct {
+ u32 lo;
+ u16 mid;
+ u16 hi_ver;
+ u8 seq_n[8];
+ } *uuid = ptr;
+ int n, rc = 0;
+
+ n = snprintf(fw_name, TA_PATH_MAX,
+ "%s/%08x-%04x-%04x-%02x%02x%02x%02x%02x%02x%02x%02x.bin",
+ TA_LOAD_PATH, uuid->lo, uuid->mid, uuid->hi_ver,
+ uuid->seq_n[0], uuid->seq_n[1],
+ uuid->seq_n[2], uuid->seq_n[3],
+ uuid->seq_n[4], uuid->seq_n[5],
+ uuid->seq_n[6], uuid->seq_n[7]);
+ if (n < 0 || n >= TA_PATH_MAX) {
+ pr_err("failed to get firmware name\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&drv_mutex);
+ n = request_firmware(&fw, fw_name, &ctx->teedev->dev);
+ if (n) {
+ pr_err("failed to load firmware %s\n", fw_name);
+ rc = -ENOMEM;
+ goto unlock;
+ }
+
+ *ta_size = roundup(fw->size, PAGE_SIZE);
+ *ta = (void *)__get_free_pages(GFP_KERNEL, get_order(*ta_size));
+ if (IS_ERR(*ta)) {
+ pr_err("%s: get_free_pages failed 0x%llx\n", __func__,
+ (u64)*ta);
+ rc = -ENOMEM;
+ goto rel_fw;
+ }
+
+ memcpy(*ta, fw->data, fw->size);
+rel_fw:
+ release_firmware(fw);
+unlock:
+ mutex_unlock(&drv_mutex);
+ return rc;
+}
+
+int amdtee_open_session(struct tee_context *ctx,
+ struct tee_ioctl_open_session_arg *arg,
+ struct tee_param *param)
+{
+ struct amdtee_context_data *ctxdata = ctx->data;
+ struct amdtee_session *sess = NULL;
+ u32 session_info;
+ size_t ta_size;
+ int rc, i;
+ void *ta;
+
+ if (arg->clnt_login != TEE_IOCTL_LOGIN_PUBLIC) {
+ pr_err("unsupported client login method\n");
+ return -EINVAL;
+ }
+
+ rc = copy_ta_binary(ctx, &arg->uuid[0], &ta, &ta_size);
+ if (rc) {
+ pr_err("failed to copy TA binary\n");
+ return rc;
+ }
+
+ /* Load the TA binary into TEE environment */
+ handle_load_ta(ta, ta_size, arg);
+ if (arg->ret == TEEC_SUCCESS) {
+ mutex_lock(&session_list_mutex);
+ sess = alloc_session(ctxdata, arg->session);
+ mutex_unlock(&session_list_mutex);
+ }
+
+ if (arg->ret != TEEC_SUCCESS)
+ goto out;
+
+ if (!sess) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Find an empty session index for the given TA */
+ spin_lock(&sess->lock);
+ i = find_first_zero_bit(sess->sess_mask, TEE_NUM_SESSIONS);
+ if (i < TEE_NUM_SESSIONS)
+ set_bit(i, sess->sess_mask);
+ spin_unlock(&sess->lock);
+
+ if (i >= TEE_NUM_SESSIONS) {
+ pr_err("reached maximum session count %d\n", TEE_NUM_SESSIONS);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Open session with loaded TA */
+ handle_open_session(arg, &session_info, param);
+
+ if (arg->ret == TEEC_SUCCESS) {
+ sess->session_info[i] = session_info;
+ set_session_id(sess->ta_handle, i, &arg->session);
+ } else {
+ pr_err("open_session failed %d\n", arg->ret);
+ spin_lock(&sess->lock);
+ clear_bit(i, sess->sess_mask);
+ spin_unlock(&sess->lock);
+ }
+out:
+ free_pages((u64)ta, get_order(ta_size));
+ return rc;
+}
+
+static void destroy_session(struct kref *ref)
+{
+ struct amdtee_session *sess = container_of(ref, struct amdtee_session,
+ refcount);
+
+ /* Unload the TA from TEE */
+ handle_unload_ta(sess->ta_handle);
+ mutex_lock(&session_list_mutex);
+ list_del(&sess->list_node);
+ mutex_unlock(&session_list_mutex);
+ kfree(sess);
+}
+
+int amdtee_close_session(struct tee_context *ctx, u32 session)
+{
+ struct amdtee_context_data *ctxdata = ctx->data;
+ u32 i, ta_handle, session_info;
+ struct amdtee_session *sess;
+
+ pr_debug("%s: sid = 0x%x\n", __func__, session);
+
+ /*
+ * Check that the session is valid and clear the session
+ * usage bit
+ */
+ mutex_lock(&session_list_mutex);
+ sess = find_session(ctxdata, session);
+ if (sess) {
+ ta_handle = get_ta_handle(session);
+ i = get_session_index(session);
+ session_info = sess->session_info[i];
+ spin_lock(&sess->lock);
+ clear_bit(i, sess->sess_mask);
+ spin_unlock(&sess->lock);
+ }
+ mutex_unlock(&session_list_mutex);
+
+ if (!sess)
+ return -EINVAL;
+
+ /* Close the session */
+ handle_close_session(ta_handle, session_info);
+
+ kref_put(&sess->refcount, destroy_session);
+
+ return 0;
+}
+
+int amdtee_map_shmem(struct tee_shm *shm)
+{
+ struct shmem_desc shmem;
+ struct amdtee_shm_data *shmnode;
+ int rc, count;
+ u32 buf_id;
+
+ if (!shm)
+ return -EINVAL;
+
+ shmnode = kmalloc(sizeof(*shmnode), GFP_KERNEL);
+ if (!shmnode)
+ return -ENOMEM;
+
+ count = 1;
+ shmem.kaddr = shm->kaddr;
+ shmem.size = shm->size;
+
+ /*
+ * Send a MAP command to TEE and get the corresponding
+ * buffer Id
+ */
+ rc = handle_map_shmem(count, &shmem, &buf_id);
+ if (rc) {
+ pr_err("map_shmem failed: ret = %d\n", rc);
+ kfree(shmnode);
+ return rc;
+ }
+
+ shmnode->kaddr = shm->kaddr;
+ shmnode->buf_id = buf_id;
+ list_add(&shmnode->shm_node, &shmctx.shmdata_list);
+
+ pr_debug("buf_id :[%x] kaddr[%p]\n", shmnode->buf_id, shmnode->kaddr);
+
+ return 0;
+}
+
+void amdtee_unmap_shmem(struct tee_shm *shm)
+{
+ struct amdtee_shm_data *shmnode;
+ u32 buf_id;
+
+ if (!shm)
+ return;
+
+ buf_id = get_buffer_id(shm);
+ /* Unmap the shared memory from TEE */
+ handle_unmap_shmem(buf_id);
+
+ list_for_each_entry(shmnode, &shmctx.shmdata_list, shm_node)
+ if (buf_id == shmnode->buf_id) {
+ list_del(&shmnode->shm_node);
+ kfree(shmnode);
+ break;
+ }
+}
+
+int amdtee_invoke_func(struct tee_context *ctx,
+ struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param)
+{
+ struct amdtee_context_data *ctxdata = ctx->data;
+ struct amdtee_session *sess;
+ u32 i, session_info;
+
+ /* Check that the session is valid */
+ mutex_lock(&session_list_mutex);
+ sess = find_session(ctxdata, arg->session);
+ if (sess) {
+ i = get_session_index(arg->session);
+ session_info = sess->session_info[i];
+ }
+ mutex_unlock(&session_list_mutex);
+
+ if (!sess)
+ return -EINVAL;
+
+ handle_invoke_cmd(arg, session_info, param);
+
+ return 0;
+}
+
+int amdtee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
+{
+ return -EINVAL;
+}
+
+static const struct tee_driver_ops amdtee_ops = {
+ .get_version = amdtee_get_version,
+ .open = amdtee_open,
+ .release = amdtee_release,
+ .open_session = amdtee_open_session,
+ .close_session = amdtee_close_session,
+ .invoke_func = amdtee_invoke_func,
+ .cancel_req = amdtee_cancel_req,
+};
+
+static const struct tee_desc amdtee_desc = {
+ .name = DRIVER_NAME "-clnt",
+ .ops = &amdtee_ops,
+ .owner = THIS_MODULE,
+};
+
+static int __init amdtee_driver_init(void)
+{
+ struct tee_device *teedev;
+ struct tee_shm_pool *pool;
+ struct amdtee *amdtee;
+ int rc;
+
+ rc = psp_check_tee_status();
+ if (rc) {
+ pr_err("amd-tee driver: tee not present\n");
+ return rc;
+ }
+
+ drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL);
+ if (!drv_data)
+ return -ENOMEM;
+
+ amdtee = kzalloc(sizeof(*amdtee), GFP_KERNEL);
+ if (!amdtee) {
+ rc = -ENOMEM;
+ goto err_kfree_drv_data;
+ }
+
+ pool = amdtee_config_shm();
+ if (IS_ERR(pool)) {
+ pr_err("shared pool configuration error\n");
+ rc = PTR_ERR(pool);
+ goto err_kfree_amdtee;
+ }
+
+ teedev = tee_device_alloc(&amdtee_desc, NULL, pool, amdtee);
+ if (IS_ERR(teedev)) {
+ rc = PTR_ERR(teedev);
+ goto err_free_pool;
+ }
+ amdtee->teedev = teedev;
+
+ rc = tee_device_register(amdtee->teedev);
+ if (rc)
+ goto err_device_unregister;
+
+ amdtee->pool = pool;
+
+ drv_data->amdtee = amdtee;
+
+ pr_info("amd-tee driver initialization successful\n");
+ return 0;
+
+err_device_unregister:
+ tee_device_unregister(amdtee->teedev);
+
+err_free_pool:
+ tee_shm_pool_free(pool);
+
+err_kfree_amdtee:
+ kfree(amdtee);
+
+err_kfree_drv_data:
+ kfree(drv_data);
+ drv_data = NULL;
+
+ pr_err("amd-tee driver initialization failed\n");
+ return rc;
+}
+module_init(amdtee_driver_init);
+
+static void __exit amdtee_driver_exit(void)
+{
+ struct amdtee *amdtee;
+
+ if (!drv_data || !drv_data->amdtee)
+ return;
+
+ amdtee = drv_data->amdtee;
+
+ tee_device_unregister(amdtee->teedev);
+ tee_shm_pool_free(amdtee->pool);
+}
+module_exit(amdtee_driver_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION("AMD-TEE driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/tee/amdtee/shm_pool.c b/drivers/tee/amdtee/shm_pool.c
new file mode 100644
index 000000000000..065854e2db18
--- /dev/null
+++ b/drivers/tee/amdtee/shm_pool.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include <linux/psp-sev.h>
+#include "amdtee_private.h"
+
+static int pool_op_alloc(struct tee_shm_pool_mgr *poolm, struct tee_shm *shm,
+ size_t size)
+{
+ unsigned int order = get_order(size);
+ unsigned long va;
+ int rc;
+
+ va = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!va)
+ return -ENOMEM;
+
+ shm->kaddr = (void *)va;
+ shm->paddr = __psp_pa((void *)va);
+ shm->size = PAGE_SIZE << order;
+
+ /* Map the allocated memory in to TEE */
+ rc = amdtee_map_shmem(shm);
+ if (rc) {
+ free_pages(va, order);
+ shm->kaddr = NULL;
+ return rc;
+ }
+
+ return 0;
+}
+
+static void pool_op_free(struct tee_shm_pool_mgr *poolm, struct tee_shm *shm)
+{
+ /* Unmap the shared memory from TEE */
+ amdtee_unmap_shmem(shm);
+ free_pages((unsigned long)shm->kaddr, get_order(shm->size));
+ shm->kaddr = NULL;
+}
+
+static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
+{
+ kfree(poolm);
+}
+
+static const struct tee_shm_pool_mgr_ops pool_ops = {
+ .alloc = pool_op_alloc,
+ .free = pool_op_free,
+ .destroy_poolmgr = pool_op_destroy_poolmgr,
+};
+
+static struct tee_shm_pool_mgr *pool_mem_mgr_alloc(void)
+{
+ struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+
+ if (!mgr)
+ return ERR_PTR(-ENOMEM);
+
+ mgr->ops = &pool_ops;
+
+ return mgr;
+}
+
+struct tee_shm_pool *amdtee_config_shm(void)
+{
+ struct tee_shm_pool_mgr *priv_mgr;
+ struct tee_shm_pool_mgr *dmabuf_mgr;
+ void *rc;
+
+ rc = pool_mem_mgr_alloc();
+ if (IS_ERR(rc))
+ return rc;
+ priv_mgr = rc;
+
+ rc = pool_mem_mgr_alloc();
+ if (IS_ERR(rc)) {
+ tee_shm_pool_mgr_destroy(priv_mgr);
+ return rc;
+ }
+ dmabuf_mgr = rc;
+
+ rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
+ if (IS_ERR(rc)) {
+ tee_shm_pool_mgr_destroy(priv_mgr);
+ tee_shm_pool_mgr_destroy(dmabuf_mgr);
+ }
+
+ return rc;
+}
diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig
index d1ad512e1708..3ca71e3812ed 100644
--- a/drivers/tee/optee/Kconfig
+++ b/drivers/tee/optee/Kconfig
@@ -3,6 +3,7 @@
config OPTEE
tristate "OP-TEE"
depends on HAVE_ARM_SMCCC
+ depends on MMU
help
This implements the OP-TEE Trusted Execution Environment (TEE)
driver.
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index b830e0a87fba..99698b8a3a74 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -534,13 +534,13 @@ static void optee_smccc_hvc(unsigned long a0, unsigned long a1,
arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
}
-static optee_invoke_fn *get_invoke_func(struct device_node *np)
+static optee_invoke_fn *get_invoke_func(struct device *dev)
{
const char *method;
- pr_info("probing for conduit method from DT.\n");
+ pr_info("probing for conduit method.\n");
- if (of_property_read_string(np, "method", &method)) {
+ if (device_property_read_string(dev, "method", &method)) {
pr_warn("missing \"method\" property\n");
return ERR_PTR(-ENXIO);
}
@@ -554,7 +554,37 @@ static optee_invoke_fn *get_invoke_func(struct device_node *np)
return ERR_PTR(-EINVAL);
}
-static struct optee *optee_probe(struct device_node *np)
+static int optee_remove(struct platform_device *pdev)
+{
+ struct optee *optee = platform_get_drvdata(pdev);
+
+ /*
+ * Ask OP-TEE to free all cached shared memory objects to decrease
+ * reference counters and also avoid wild pointers in secure world
+ * into the old shared memory range.
+ */
+ optee_disable_shm_cache(optee);
+
+ /*
+ * The two devices have to be unregistered before we can free the
+ * other resources.
+ */
+ tee_device_unregister(optee->supp_teedev);
+ tee_device_unregister(optee->teedev);
+
+ tee_shm_pool_free(optee->pool);
+ if (optee->memremaped_shm)
+ memunmap(optee->memremaped_shm);
+ optee_wait_queue_exit(&optee->wait_queue);
+ optee_supp_uninit(&optee->supp);
+ mutex_destroy(&optee->call_queue.mutex);
+
+ kfree(optee);
+
+ return 0;
+}
+
+static int optee_probe(struct platform_device *pdev)
{
optee_invoke_fn *invoke_fn;
struct tee_shm_pool *pool = ERR_PTR(-EINVAL);
@@ -564,25 +594,25 @@ static struct optee *optee_probe(struct device_node *np)
u32 sec_caps;
int rc;
- invoke_fn = get_invoke_func(np);
+ invoke_fn = get_invoke_func(&pdev->dev);
if (IS_ERR(invoke_fn))
- return (void *)invoke_fn;
+ return PTR_ERR(invoke_fn);
if (!optee_msg_api_uid_is_optee_api(invoke_fn)) {
pr_warn("api uid mismatch\n");
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
optee_msg_get_os_revision(invoke_fn);
if (!optee_msg_api_revision_is_compatible(invoke_fn)) {
pr_warn("api revision mismatch\n");
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) {
pr_warn("capabilities mismatch\n");
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
/*
@@ -598,7 +628,7 @@ static struct optee *optee_probe(struct device_node *np)
pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
if (IS_ERR(pool))
- return (void *)pool;
+ return PTR_ERR(pool);
optee = kzalloc(sizeof(*optee), GFP_KERNEL);
if (!optee) {
@@ -643,7 +673,16 @@ static struct optee *optee_probe(struct device_node *np)
if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
pr_info("dynamic shared memory is enabled\n");
- return optee;
+ platform_set_drvdata(pdev, optee);
+
+ rc = optee_enumerate_devices();
+ if (rc) {
+ optee_remove(pdev);
+ return rc;
+ }
+
+ pr_info("initialized driver\n");
+ return 0;
err:
if (optee) {
/*
@@ -659,92 +698,28 @@ err:
tee_shm_pool_free(pool);
if (memremaped_shm)
memunmap(memremaped_shm);
- return ERR_PTR(rc);
-}
-
-static void optee_remove(struct optee *optee)
-{
- /*
- * Ask OP-TEE to free all cached shared memory objects to decrease
- * reference counters and also avoid wild pointers in secure world
- * into the old shared memory range.
- */
- optee_disable_shm_cache(optee);
-
- /*
- * The two devices has to be unregistered before we can free the
- * other resources.
- */
- tee_device_unregister(optee->supp_teedev);
- tee_device_unregister(optee->teedev);
-
- tee_shm_pool_free(optee->pool);
- if (optee->memremaped_shm)
- memunmap(optee->memremaped_shm);
- optee_wait_queue_exit(&optee->wait_queue);
- optee_supp_uninit(&optee->supp);
- mutex_destroy(&optee->call_queue.mutex);
-
- kfree(optee);
+ return rc;
}
-static const struct of_device_id optee_match[] = {
+static const struct of_device_id optee_dt_match[] = {
{ .compatible = "linaro,optee-tz" },
{},
};
-
-static struct optee *optee_svc;
-
-static int __init optee_driver_init(void)
-{
- struct device_node *fw_np = NULL;
- struct device_node *np = NULL;
- struct optee *optee = NULL;
- int rc = 0;
-
- /* Node is supposed to be below /firmware */
- fw_np = of_find_node_by_name(NULL, "firmware");
- if (!fw_np)
- return -ENODEV;
-
- np = of_find_matching_node(fw_np, optee_match);
- if (!np || !of_device_is_available(np)) {
- of_node_put(np);
- return -ENODEV;
- }
-
- optee = optee_probe(np);
- of_node_put(np);
-
- if (IS_ERR(optee))
- return PTR_ERR(optee);
-
- rc = optee_enumerate_devices();
- if (rc) {
- optee_remove(optee);
- return rc;
- }
-
- pr_info("initialized driver\n");
-
- optee_svc = optee;
-
- return 0;
-}
-module_init(optee_driver_init);
-
-static void __exit optee_driver_exit(void)
-{
- struct optee *optee = optee_svc;
-
- optee_svc = NULL;
- if (optee)
- optee_remove(optee);
-}
-module_exit(optee_driver_exit);
+MODULE_DEVICE_TABLE(of, optee_dt_match);
+
+static struct platform_driver optee_driver = {
+ .probe = optee_probe,
+ .remove = optee_remove,
+ .driver = {
+ .name = "optee",
+ .of_match_table = optee_dt_match,
+ },
+};
+module_platform_driver(optee_driver);
MODULE_AUTHOR("Linaro");
MODULE_DESCRIPTION("OP-TEE driver");
MODULE_SUPPORTED_DEVICE("");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:optee");
diff --git a/drivers/tee/optee/shm_pool.c b/drivers/tee/optee/shm_pool.c
index 0332a5301d61..d767eebf30bd 100644
--- a/drivers/tee/optee/shm_pool.c
+++ b/drivers/tee/optee/shm_pool.c
@@ -28,9 +28,22 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
shm->size = PAGE_SIZE << order;
if (shm->flags & TEE_SHM_DMA_BUF) {
+ unsigned int nr_pages = 1 << order, i;
+ struct page **pages;
+
+ pages = kcalloc(nr_pages, sizeof(pages), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ for (i = 0; i < nr_pages; i++) {
+ pages[i] = page;
+ page++;
+ }
+
shm->flags |= TEE_SHM_REGISTER;
- rc = optee_shm_register(shm->ctx, shm, &page, 1 << order,
+ rc = optee_shm_register(shm->ctx, shm, pages, nr_pages,
(unsigned long)shm->kaddr);
+ kfree(pages);
}
return rc;
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 79b27865c6f4..5a05db5438d6 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -151,16 +151,33 @@ config THERMAL_GOV_POWER_ALLOCATOR
config CPU_THERMAL
bool "Generic cpu cooling support"
- depends on CPU_FREQ
depends on THERMAL_OF
help
+ Enable the CPU cooling features. If the system has no active
+ cooling device available, this option allows to use the CPU
+ as a cooling device.
+
+if CPU_THERMAL
+
+config CPU_FREQ_THERMAL
+ bool "CPU frequency cooling device"
+ depends on CPU_FREQ
+ default y
+ help
This implements the generic cpu cooling mechanism through frequency
reduction. An ACPI version of this already exists
(drivers/acpi/processor_thermal.c).
This will be useful for platforms using the generic thermal interface
and not the ACPI interface.
- If you want this support, you should say Y here.
+config CPU_IDLE_THERMAL
+ bool "CPU idle cooling device"
+ depends on IDLE_INJECT
+ help
+ This implements the CPU cooling mechanism through
+ idle injection. This will throttle the CPU by injecting
+ idle cycle.
+endif
config CLOCK_THERMAL
bool "Generic clock cooling support"
@@ -263,6 +280,20 @@ config SPEAR_THERMAL
Enable this to plug the SPEAr thermal sensor driver into the Linux
thermal framework.
+config SUN8I_THERMAL
+ tristate "Allwinner sun8i thermal driver"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ depends on HAS_IOMEM
+ depends on NVMEM
+ depends on OF
+ depends on RESET_CONTROLLER
+ help
+ Support for the sun8i thermal sensor driver into the Linux thermal
+ framework.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sun8i-thermal.
+
config ROCKCHIP_THERMAL
tristate "Rockchip thermal driver"
depends on ARCH_ROCKCHIP || COMPILE_TEST
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index baeb70bf0568..9fb88e26fb10 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -19,7 +19,8 @@ thermal_sys-$(CONFIG_THERMAL_GOV_USER_SPACE) += user_space.o
thermal_sys-$(CONFIG_THERMAL_GOV_POWER_ALLOCATOR) += power_allocator.o
# cpufreq cooling
-thermal_sys-$(CONFIG_CPU_THERMAL) += cpu_cooling.o
+thermal_sys-$(CONFIG_CPU_FREQ_THERMAL) += cpufreq_cooling.o
+thermal_sys-$(CONFIG_CPU_IDLE_THERMAL) += cpuidle_cooling.o
# clock cooling
thermal_sys-$(CONFIG_CLOCK_THERMAL) += clock_cooling.o
@@ -31,6 +32,7 @@ thermal_sys-$(CONFIG_DEVFREQ_THERMAL) += devfreq_cooling.o
obj-y += broadcom/
obj-$(CONFIG_THERMAL_MMIO) += thermal_mmio.o
obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o
+obj-$(CONFIG_SUN8I_THERMAL) += sun8i_thermal.o
obj-$(CONFIG_ROCKCHIP_THERMAL) += rockchip_thermal.o
obj-$(CONFIG_RCAR_THERMAL) += rcar_thermal.o
obj-$(CONFIG_RCAR_GEN3_THERMAL) += rcar_gen3_thermal.o
diff --git a/drivers/thermal/amlogic_thermal.c b/drivers/thermal/amlogic_thermal.c
index 8a9e9bc421c6..ccb1fe18e993 100644
--- a/drivers/thermal/amlogic_thermal.c
+++ b/drivers/thermal/amlogic_thermal.c
@@ -67,7 +67,11 @@
/**
* struct amlogic_thermal_soc_calib_data
- * @A, B, m, n: calibration parameters
+ * @A: calibration parameters
+ * @B: calibration parameters
+ * @m: calibration parameters
+ * @n: calibration parameters
+ *
* This structure is required for configuration of amlogic thermal driver.
*/
struct amlogic_thermal_soc_calib_data {
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
index 709a22f455e9..7c447cd149e7 100644
--- a/drivers/thermal/armada_thermal.c
+++ b/drivers/thermal/armada_thermal.c
@@ -21,8 +21,6 @@
#include "thermal_core.h"
-#define TO_MCELSIUS(c) ((c) * 1000)
-
/* Thermal Manager Control and Status Register */
#define PMU_TDC0_SW_RST_MASK (0x1 << 1)
#define PMU_TM_DISABLE_OFFS 0
@@ -155,6 +153,9 @@ static void armadaxp_init(struct platform_device *pdev,
regmap_write(priv->syscon, data->syscon_control1_off, reg);
+ reg &= ~PMU_TDC0_SW_RST_MASK;
+ regmap_write(priv->syscon, data->syscon_control1_off, reg);
+
/* Enable the sensor */
regmap_read(priv->syscon, data->syscon_status_off, &reg);
reg &= ~PMU_TM_DISABLE_MASK;
@@ -578,7 +579,7 @@ static const struct armada_thermal_data armadaxp_data = {
.coef_m = 10000000ULL,
.coef_div = 13825,
.syscon_status_off = 0xb0,
- .syscon_control1_off = 0xd0,
+ .syscon_control1_off = 0x2d0,
};
static const struct armada_thermal_data armada370_data = {
diff --git a/drivers/thermal/broadcom/Kconfig b/drivers/thermal/broadcom/Kconfig
index cf43e1520de9..061f1db6edc9 100644
--- a/drivers/thermal/broadcom/Kconfig
+++ b/drivers/thermal/broadcom/Kconfig
@@ -1,4 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
+config BCM2711_THERMAL
+ tristate "Broadcom AVS RO thermal sensor driver"
+ depends on ARCH_BCM2835 || COMPILE_TEST
+ depends on THERMAL_OF && MFD_SYSCON
+ help
+ Support for thermal sensors on Broadcom BCM2711 SoCs.
+
config BCM2835_THERMAL
tristate "Thermal sensors on bcm2835 SoC"
depends on ARCH_BCM2835 || COMPILE_TEST
diff --git a/drivers/thermal/broadcom/Makefile b/drivers/thermal/broadcom/Makefile
index 490ab1f7a86d..c917b243d5f0 100644
--- a/drivers/thermal/broadcom/Makefile
+++ b/drivers/thermal/broadcom/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_BCM2711_THERMAL) += bcm2711_thermal.o
obj-$(CONFIG_BCM2835_THERMAL) += bcm2835_thermal.o
obj-$(CONFIG_BRCMSTB_THERMAL) += brcmstb_thermal.o
obj-$(CONFIG_BCM_NS_THERMAL) += ns-thermal.o
diff --git a/drivers/thermal/broadcom/bcm2711_thermal.c b/drivers/thermal/broadcom/bcm2711_thermal.c
new file mode 100644
index 000000000000..67c2a737bc9d
--- /dev/null
+++ b/drivers/thermal/broadcom/bcm2711_thermal.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Broadcom AVS RO thermal sensor driver
+ *
+ * based on brcmstb_thermal
+ *
+ * Copyright (C) 2020 Stefan Wahren
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/thermal.h>
+
+#include "../thermal_hwmon.h"
+
+#define AVS_RO_TEMP_STATUS 0x200
+#define AVS_RO_TEMP_STATUS_VALID_MSK (BIT(16) | BIT(10))
+#define AVS_RO_TEMP_STATUS_DATA_MSK GENMASK(9, 0)
+
+struct bcm2711_thermal_priv {
+ struct regmap *regmap;
+ struct thermal_zone_device *thermal;
+};
+
+static int bcm2711_get_temp(void *data, int *temp)
+{
+ struct bcm2711_thermal_priv *priv = data;
+ int slope = thermal_zone_get_slope(priv->thermal);
+ int offset = thermal_zone_get_offset(priv->thermal);
+ u32 val;
+ int ret;
+ long t;
+
+ ret = regmap_read(priv->regmap, AVS_RO_TEMP_STATUS, &val);
+ if (ret)
+ return ret;
+
+ if (!(val & AVS_RO_TEMP_STATUS_VALID_MSK))
+ return -EIO;
+
+ val &= AVS_RO_TEMP_STATUS_DATA_MSK;
+
+ /* Convert a HW code to a temperature reading (millidegree celsius) */
+ t = slope * val + offset;
+
+ *temp = t < 0 ? 0 : t;
+
+ return 0;
+}
+
+static const struct thermal_zone_of_device_ops bcm2711_thermal_of_ops = {
+ .get_temp = bcm2711_get_temp,
+};
+
+static const struct of_device_id bcm2711_thermal_id_table[] = {
+ { .compatible = "brcm,bcm2711-thermal" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm2711_thermal_id_table);
+
+static int bcm2711_thermal_probe(struct platform_device *pdev)
+{
+ struct thermal_zone_device *thermal;
+ struct bcm2711_thermal_priv *priv;
+ struct device *dev = &pdev->dev;
+ struct device_node *parent;
+ struct regmap *regmap;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* get regmap from syscon node */
+ parent = of_get_parent(dev->of_node); /* parent should be syscon node */
+ regmap = syscon_node_to_regmap(parent);
+ of_node_put(parent);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ dev_err(dev, "failed to get regmap: %d\n", ret);
+ return ret;
+ }
+ priv->regmap = regmap;
+
+ thermal = devm_thermal_zone_of_sensor_register(dev, 0, priv,
+ &bcm2711_thermal_of_ops);
+ if (IS_ERR(thermal)) {
+ ret = PTR_ERR(thermal);
+ dev_err(dev, "could not register sensor: %d\n", ret);
+ return ret;
+ }
+
+ priv->thermal = thermal;
+
+ thermal->tzp->no_hwmon = false;
+ ret = thermal_add_hwmon_sysfs(thermal);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static struct platform_driver bcm2711_thermal_driver = {
+ .probe = bcm2711_thermal_probe,
+ .driver = {
+ .name = "bcm2711_thermal",
+ .of_match_table = bcm2711_thermal_id_table,
+ },
+};
+module_platform_driver(bcm2711_thermal_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Stefan Wahren");
+MODULE_DESCRIPTION("Broadcom AVS RO thermal sensor driver");
diff --git a/drivers/thermal/broadcom/brcmstb_thermal.c b/drivers/thermal/broadcom/brcmstb_thermal.c
index 5825ac581f56..8df5edef1ded 100644
--- a/drivers/thermal/broadcom/brcmstb_thermal.c
+++ b/drivers/thermal/broadcom/brcmstb_thermal.c
@@ -49,7 +49,7 @@
#define AVS_TMON_TP_TEST_ENABLE 0x20
/* Default coefficients */
-#define AVS_TMON_TEMP_SLOPE -487
+#define AVS_TMON_TEMP_SLOPE 487
#define AVS_TMON_TEMP_OFFSET 410040
/* HW related temperature constants */
@@ -102,29 +102,28 @@ static struct avs_tmon_trip avs_tmon_trips[] = {
},
};
+struct brcmstb_thermal_params {
+ unsigned int offset;
+ unsigned int mult;
+ const struct thermal_zone_of_device_ops *of_ops;
+};
+
struct brcmstb_thermal_priv {
void __iomem *tmon_base;
struct device *dev;
struct thermal_zone_device *thermal;
+ /* Process specific thermal parameters used for calculations */
+ const struct brcmstb_thermal_params *temp_params;
};
-static void avs_tmon_get_coeffs(struct thermal_zone_device *tz, int *slope,
- int *offset)
-{
- *slope = thermal_zone_get_slope(tz);
- *offset = thermal_zone_get_offset(tz);
-}
-
/* Convert a HW code to a temperature reading (millidegree celsius) */
-static inline int avs_tmon_code_to_temp(struct thermal_zone_device *tz,
+static inline int avs_tmon_code_to_temp(struct brcmstb_thermal_priv *priv,
u32 code)
{
- const int val = code & AVS_TMON_TEMP_MASK;
- int slope, offset;
+ int offset = priv->temp_params->offset;
+ int mult = priv->temp_params->mult;
- avs_tmon_get_coeffs(tz, &slope, &offset);
-
- return slope * val + offset;
+ return (offset - (int)((code & AVS_TMON_TEMP_MASK) * mult));
}
/*
@@ -133,23 +132,22 @@ static inline int avs_tmon_code_to_temp(struct thermal_zone_device *tz,
* @temp: temperature to convert
* @low: if true, round toward the low side
*/
-static inline u32 avs_tmon_temp_to_code(struct thermal_zone_device *tz,
+static inline u32 avs_tmon_temp_to_code(struct brcmstb_thermal_priv *priv,
int temp, bool low)
{
- int slope, offset;
+ int offset = priv->temp_params->offset;
+ int mult = priv->temp_params->mult;
if (temp < AVS_TMON_TEMP_MIN)
- return AVS_TMON_TEMP_MAX; /* Maximum code value */
-
- avs_tmon_get_coeffs(tz, &slope, &offset);
+ return AVS_TMON_TEMP_MAX; /* Maximum code value */
if (temp >= offset)
return 0; /* Minimum code value */
if (low)
- return (u32)(DIV_ROUND_UP(offset - temp, abs(slope)));
+ return (u32)(DIV_ROUND_UP(offset - temp, mult));
else
- return (u32)((offset - temp) / abs(slope));
+ return (u32)((offset - temp) / mult);
}
static int brcmstb_get_temp(void *data, int *temp)
@@ -167,7 +165,7 @@ static int brcmstb_get_temp(void *data, int *temp)
val = (val & AVS_TMON_STATUS_data_msk) >> AVS_TMON_STATUS_data_shift;
- t = avs_tmon_code_to_temp(priv->thermal, val);
+ t = avs_tmon_code_to_temp(priv, val);
if (t < 0)
*temp = 0;
else
@@ -201,7 +199,7 @@ static int avs_tmon_get_trip_temp(struct brcmstb_thermal_priv *priv,
val &= trip->reg_msk;
val >>= trip->reg_shift;
- return avs_tmon_code_to_temp(priv->thermal, val);
+ return avs_tmon_code_to_temp(priv, val);
}
static void avs_tmon_set_trip_temp(struct brcmstb_thermal_priv *priv,
@@ -214,7 +212,7 @@ static void avs_tmon_set_trip_temp(struct brcmstb_thermal_priv *priv,
dev_dbg(priv->dev, "set temp %d to %d\n", type, temp);
/* round toward low temp for the low interrupt */
- val = avs_tmon_temp_to_code(priv->thermal, temp,
+ val = avs_tmon_temp_to_code(priv, temp,
type == TMON_TRIP_TYPE_LOW);
val <<= trip->reg_shift;
@@ -231,7 +229,7 @@ static int avs_tmon_get_intr_temp(struct brcmstb_thermal_priv *priv)
u32 val;
val = __raw_readl(priv->tmon_base + AVS_TMON_TEMP_INT_CODE);
- return avs_tmon_code_to_temp(priv->thermal, val);
+ return avs_tmon_code_to_temp(priv, val);
}
static irqreturn_t brcmstb_tmon_irq_thread(int irq, void *data)
@@ -290,19 +288,37 @@ static int brcmstb_set_trips(void *data, int low, int high)
return 0;
}
-static const struct thermal_zone_of_device_ops of_ops = {
+static const struct thermal_zone_of_device_ops brcmstb_16nm_of_ops = {
+ .get_temp = brcmstb_get_temp,
+};
+
+static const struct brcmstb_thermal_params brcmstb_16nm_params = {
+ .offset = 457829,
+ .mult = 557,
+ .of_ops = &brcmstb_16nm_of_ops,
+};
+
+static const struct thermal_zone_of_device_ops brcmstb_28nm_of_ops = {
.get_temp = brcmstb_get_temp,
.set_trips = brcmstb_set_trips,
};
+static const struct brcmstb_thermal_params brcmstb_28nm_params = {
+ .offset = 410040,
+ .mult = 487,
+ .of_ops = &brcmstb_28nm_of_ops,
+};
+
static const struct of_device_id brcmstb_thermal_id_table[] = {
- { .compatible = "brcm,avs-tmon" },
+ { .compatible = "brcm,avs-tmon-bcm7216", .data = &brcmstb_16nm_params },
+ { .compatible = "brcm,avs-tmon", .data = &brcmstb_28nm_params },
{},
};
MODULE_DEVICE_TABLE(of, brcmstb_thermal_id_table);
static int brcmstb_thermal_probe(struct platform_device *pdev)
{
+ const struct thermal_zone_of_device_ops *of_ops;
struct thermal_zone_device *thermal;
struct brcmstb_thermal_priv *priv;
struct resource *res;
@@ -312,6 +328,10 @@ static int brcmstb_thermal_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ priv->temp_params = of_device_get_match_data(&pdev->dev);
+ if (!priv->temp_params)
+ return -EINVAL;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->tmon_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->tmon_base))
@@ -319,9 +339,10 @@ static int brcmstb_thermal_probe(struct platform_device *pdev)
priv->dev = &pdev->dev;
platform_set_drvdata(pdev, priv);
+ of_ops = priv->temp_params->of_ops;
thermal = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, priv,
- &of_ops);
+ of_ops);
if (IS_ERR(thermal)) {
ret = PTR_ERR(thermal);
dev_err(&pdev->dev, "could not register sensor: %d\n", ret);
@@ -331,16 +352,15 @@ static int brcmstb_thermal_probe(struct platform_device *pdev)
priv->thermal = thermal;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "could not get IRQ\n");
- return irq;
- }
- ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
- brcmstb_tmon_irq_thread, IRQF_ONESHOT,
- DRV_NAME, priv);
- if (ret < 0) {
- dev_err(&pdev->dev, "could not request IRQ: %d\n", ret);
- return ret;
+ if (irq >= 0) {
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ brcmstb_tmon_irq_thread,
+ IRQF_ONESHOT,
+ DRV_NAME, priv);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "could not request IRQ: %d\n", ret);
+ return ret;
+ }
}
dev_info(&pdev->dev, "registered AVS TMON of-sensor driver\n");
diff --git a/drivers/thermal/clock_cooling.c b/drivers/thermal/clock_cooling.c
index 3ad3256c48fd..7cb3ae4b44ee 100644
--- a/drivers/thermal/clock_cooling.c
+++ b/drivers/thermal/clock_cooling.c
@@ -7,7 +7,7 @@
* Copyright (C) 2013 Texas Instruments Inc.
* Contact: Eduardo Valentin <eduardo.valentin@ti.com>
*
- * Highly based on cpu_cooling.c.
+ * Highly based on cpufreq_cooling.c.
* Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
* Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org>
*/
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpufreq_cooling.c
index 52569b27b426..fe83d7a210d4 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpufreq_cooling.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * linux/drivers/thermal/cpu_cooling.c
+ * linux/drivers/thermal/cpufreq_cooling.c
*
* Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
*
@@ -63,6 +63,7 @@ struct time_in_idle {
* @policy: cpufreq policy.
* @node: list_head to link all cpufreq_cooling_device together.
* @idle_time: idle time stats
+ * @qos_req: PM QoS contraint to apply
*
* This structure is required for keeping information of each registered
* cpufreq_cooling_device.
@@ -620,7 +621,7 @@ of_cpufreq_cooling_register(struct cpufreq_policy *policy)
struct thermal_cooling_device *cdev = NULL;
if (!np) {
- pr_err("cpu_cooling: OF node not available for cpu%d\n",
+ pr_err("cpufreq_cooling: OF node not available for cpu%d\n",
policy->cpu);
return NULL;
}
@@ -630,7 +631,7 @@ of_cpufreq_cooling_register(struct cpufreq_policy *policy)
cdev = __cpufreq_cooling_register(np, policy, em);
if (IS_ERR(cdev)) {
- pr_err("cpu_cooling: cpu%d failed to register as cooling device: %ld\n",
+ pr_err("cpufreq_cooling: cpu%d failed to register as cooling device: %ld\n",
policy->cpu, PTR_ERR(cdev));
cdev = NULL;
}
diff --git a/drivers/thermal/cpuidle_cooling.c b/drivers/thermal/cpuidle_cooling.c
new file mode 100644
index 000000000000..0bb843246f59
--- /dev/null
+++ b/drivers/thermal/cpuidle_cooling.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Linaro Limited.
+ *
+ * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
+ *
+ */
+#include <linux/cpu_cooling.h>
+#include <linux/cpuidle.h>
+#include <linux/err.h>
+#include <linux/idle_inject.h>
+#include <linux/idr.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+
+/**
+ * struct cpuidle_cooling_device - data for the idle cooling device
+ * @ii_dev: an atomic to keep track of the last task exiting the idle cycle
+ * @state: a normalized integer giving the state of the cooling device
+ */
+struct cpuidle_cooling_device {
+ struct idle_inject_device *ii_dev;
+ unsigned long state;
+};
+
+static DEFINE_IDA(cpuidle_ida);
+
+/**
+ * cpuidle_cooling_runtime - Running time computation
+ * @idle_duration_us: the idle cooling device
+ * @state: a percentile based number
+ *
+ * The running duration is computed from the idle injection duration
+ * which is fixed. If we reach 100% of idle injection ratio, that
+ * means the running duration is zero. If we have a 50% ratio
+ * injection, that means we have equal duration for idle and for
+ * running duration.
+ *
+ * The formula is deduced as follows:
+ *
+ * running = idle x ((100 / ratio) - 1)
+ *
+ * For precision purpose for integer math, we use the following:
+ *
+ * running = (idle x 100) / ratio - idle
+ *
+ * For example, if we have an injected duration of 50%, then we end up
+ * with 10ms of idle injection and 10ms of running duration.
+ *
+ * Return: An unsigned int for a usec based runtime duration.
+ */
+static unsigned int cpuidle_cooling_runtime(unsigned int idle_duration_us,
+ unsigned long state)
+{
+ if (!state)
+ return 0;
+
+ return ((idle_duration_us * 100) / state) - idle_duration_us;
+}
+
+/**
+ * cpuidle_cooling_get_max_state - Get the maximum state
+ * @cdev : the thermal cooling device
+ * @state : a pointer to the state variable to be filled
+ *
+ * The function always returns 100 as the injection ratio. It is
+ * percentile based for consistency accross different platforms.
+ *
+ * Return: The function can not fail, it is always zero
+ */
+static int cpuidle_cooling_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ /*
+ * Depending on the configuration or the hardware, the running
+ * cycle and the idle cycle could be different. We want to
+ * unify that to an 0..100 interval, so the set state
+ * interface will be the same whatever the platform is.
+ *
+ * The state 100% will make the cluster 100% ... idle. A 0%
+ * injection ratio means no idle injection at all and 50%
+ * means for 10ms of idle injection, we have 10ms of running
+ * time.
+ */
+ *state = 100;
+
+ return 0;
+}
+
+/**
+ * cpuidle_cooling_get_cur_state - Get the current cooling state
+ * @cdev: the thermal cooling device
+ * @state: a pointer to the state
+ *
+ * The function just copies the state value from the private thermal
+ * cooling device structure, the mapping is 1 <-> 1.
+ *
+ * Return: The function can not fail, it is always zero
+ */
+static int cpuidle_cooling_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct cpuidle_cooling_device *idle_cdev = cdev->devdata;
+
+ *state = idle_cdev->state;
+
+ return 0;
+}
+
+/**
+ * cpuidle_cooling_set_cur_state - Set the current cooling state
+ * @cdev: the thermal cooling device
+ * @state: the target state
+ *
+ * The function checks first if we are initiating the mitigation which
+ * in turn wakes up all the idle injection tasks belonging to the idle
+ * cooling device. In any case, it updates the internal state for the
+ * cooling device.
+ *
+ * Return: The function can not fail, it is always zero
+ */
+static int cpuidle_cooling_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ struct cpuidle_cooling_device *idle_cdev = cdev->devdata;
+ struct idle_inject_device *ii_dev = idle_cdev->ii_dev;
+ unsigned long current_state = idle_cdev->state;
+ unsigned int runtime_us, idle_duration_us;
+
+ idle_cdev->state = state;
+
+ idle_inject_get_duration(ii_dev, &runtime_us, &idle_duration_us);
+
+ runtime_us = cpuidle_cooling_runtime(idle_duration_us, state);
+
+ idle_inject_set_duration(ii_dev, runtime_us, idle_duration_us);
+
+ if (current_state == 0 && state > 0) {
+ idle_inject_start(ii_dev);
+ } else if (current_state > 0 && !state) {
+ idle_inject_stop(ii_dev);
+ }
+
+ return 0;
+}
+
+/**
+ * cpuidle_cooling_ops - thermal cooling device ops
+ */
+static struct thermal_cooling_device_ops cpuidle_cooling_ops = {
+ .get_max_state = cpuidle_cooling_get_max_state,
+ .get_cur_state = cpuidle_cooling_get_cur_state,
+ .set_cur_state = cpuidle_cooling_set_cur_state,
+};
+
+/**
+ * cpuidle_of_cooling_register - Idle cooling device initialization function
+ * @drv: a cpuidle driver structure pointer
+ * @np: a node pointer to a device tree cooling device node
+ *
+ * This function is in charge of creating a cooling device per cpuidle
+ * driver and register it to thermal framework.
+ *
+ * Return: zero on success, or negative value corresponding to the
+ * error detected in the underlying subsystems.
+ */
+int cpuidle_of_cooling_register(struct device_node *np,
+ struct cpuidle_driver *drv)
+{
+ struct idle_inject_device *ii_dev;
+ struct cpuidle_cooling_device *idle_cdev;
+ struct thermal_cooling_device *cdev;
+ char dev_name[THERMAL_NAME_LENGTH];
+ int id, ret;
+
+ idle_cdev = kzalloc(sizeof(*idle_cdev), GFP_KERNEL);
+ if (!idle_cdev) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ id = ida_simple_get(&cpuidle_ida, 0, 0, GFP_KERNEL);
+ if (id < 0) {
+ ret = id;
+ goto out_kfree;
+ }
+
+ ii_dev = idle_inject_register(drv->cpumask);
+ if (!ii_dev) {
+ ret = -EINVAL;
+ goto out_id;
+ }
+
+ idle_inject_set_duration(ii_dev, TICK_USEC, TICK_USEC);
+
+ idle_cdev->ii_dev = ii_dev;
+
+ snprintf(dev_name, sizeof(dev_name), "thermal-idle-%d", id);
+
+ cdev = thermal_of_cooling_device_register(np, dev_name, idle_cdev,
+ &cpuidle_cooling_ops);
+ if (IS_ERR(cdev)) {
+ ret = PTR_ERR(cdev);
+ goto out_unregister;
+ }
+
+ return 0;
+
+out_unregister:
+ idle_inject_unregister(ii_dev);
+out_id:
+ ida_simple_remove(&cpuidle_ida, id);
+out_kfree:
+ kfree(idle_cdev);
+out:
+ return ret;
+}
+
+/**
+ * cpuidle_cooling_register - Idle cooling device initialization function
+ * @drv: a cpuidle driver structure pointer
+ *
+ * This function is in charge of creating a cooling device per cpuidle
+ * driver and register it to thermal framework.
+ *
+ * Return: zero on success, or negative value corresponding to the
+ * error detected in the underlying subsystems.
+ */
+int cpuidle_cooling_register(struct cpuidle_driver *drv)
+{
+ return cpuidle_of_cooling_register(NULL, drv);
+}
diff --git a/drivers/thermal/db8500_thermal.c b/drivers/thermal/db8500_thermal.c
index 372dbbaaafb8..21d4d6e6409a 100644
--- a/drivers/thermal/db8500_thermal.c
+++ b/drivers/thermal/db8500_thermal.c
@@ -152,8 +152,8 @@ static irqreturn_t prcmu_high_irq_handler(int irq, void *irq_data)
db8500_thermal_update_config(th, idx, THERMAL_TREND_RAISING,
next_low, next_high);
- dev_info(&th->tz->device,
- "PRCMU set max %ld, min %ld\n", next_high, next_low);
+ dev_dbg(&th->tz->device,
+ "PRCMU set max %ld, min %ld\n", next_high, next_low);
} else if (idx == num_points - 1)
/* So we roof out 1 degree over the max point */
th->interpolated_temp = db8500_thermal_points[idx] + 1;
diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c
index ef59256887ff..a87d4fa031c8 100644
--- a/drivers/thermal/devfreq_cooling.c
+++ b/drivers/thermal/devfreq_cooling.c
@@ -53,6 +53,7 @@ static DEFINE_IDA(devfreq_ida);
* 'utilization' (which is 'busy_time / 'total_time').
* The 'res_util' range is from 100 to (power_table[state] * 100)
* for the corresponding 'state'.
+ * @capped_state: index to cooling state with in dynamic power budget
*/
struct devfreq_cooling_device {
int id;
@@ -587,7 +588,7 @@ EXPORT_SYMBOL_GPL(devfreq_cooling_register);
/**
* devfreq_cooling_unregister() - Unregister devfreq cooling device.
- * @dfc: Pointer to devfreq cooling device to unregister.
+ * @cdev: Pointer to devfreq cooling device to unregister.
*/
void devfreq_cooling_unregister(struct thermal_cooling_device *cdev)
{
diff --git a/drivers/thermal/fair_share.c b/drivers/thermal/fair_share.c
index afd99f668c65..aaa07180ab48 100644
--- a/drivers/thermal/fair_share.c
+++ b/drivers/thermal/fair_share.c
@@ -58,8 +58,8 @@ static long get_target_state(struct thermal_zone_device *tz,
/**
* fair_share_throttle - throttles devices associated with the given zone
- * @tz - thermal_zone_device
- * @trip - trip point index
+ * @tz: thermal_zone_device
+ * @trip: trip point index
*
* Throttling Logic: This uses three parameters to calculate the new
* throttle state of the cooling devices associated with the given zone.
diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c
index b831fc77cf64..991a1c54296d 100644
--- a/drivers/thermal/gov_bang_bang.c
+++ b/drivers/thermal/gov_bang_bang.c
@@ -71,8 +71,8 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
/**
* bang_bang_control - controls devices associated with the given zone
- * @tz - thermal_zone_device
- * @trip - the trip point
+ * @tz: thermal_zone_device
+ * @trip: the trip point
*
* Regulation Logic: a two point regulation, deliver cooling state depending
* on the previous state shown in this diagram:
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 3517883b5cdb..efae0c02d898 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -369,6 +369,7 @@ static int int3400_thermal_remove(struct platform_device *pdev)
}
static const struct acpi_device_id int3400_thermal_match[] = {
+ {"INT1040", 0},
{"INT3400", 0},
{}
};
diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
index a7bbd8584ae2..aeece1e136a5 100644
--- a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
@@ -282,6 +282,7 @@ static int int3403_remove(struct platform_device *pdev)
}
static const struct acpi_device_id int3403_device_ids[] = {
+ {"INT1043", 0},
{"INT3403", 0},
{"", 0},
};
diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
index 75484d6c5056..432213272f1e 100644
--- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
+++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
@@ -8,6 +8,7 @@
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/thermal.h>
+#include <linux/units.h>
#include "int340x_thermal_zone.h"
static int int340x_thermal_get_zone_temp(struct thermal_zone_device *zone,
@@ -34,7 +35,7 @@ static int int340x_thermal_get_zone_temp(struct thermal_zone_device *zone,
*temp = (unsigned long)conv_temp * 10;
} else
/* _TMP returns the temperature in tenths of degrees Kelvin */
- *temp = DECI_KELVIN_TO_MILLICELSIUS(tmp);
+ *temp = deci_kelvin_to_millicelsius(tmp);
return 0;
}
@@ -116,7 +117,7 @@ static int int340x_thermal_set_trip_temp(struct thermal_zone_device *zone,
snprintf(name, sizeof(name), "PAT%d", trip);
status = acpi_execute_simple_method(d->adev->handle, name,
- MILLICELSIUS_TO_DECI_KELVIN(temp));
+ millicelsius_to_deci_kelvin(temp));
if (ACPI_FAILURE(status))
return -EIO;
@@ -163,7 +164,7 @@ static int int340x_thermal_get_trip_config(acpi_handle handle, char *name,
if (ACPI_FAILURE(status))
return -EIO;
- *temp = DECI_KELVIN_TO_MILLICELSIUS(r);
+ *temp = deci_kelvin_to_millicelsius(r);
return 0;
}
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
index 89a015387283..b1fd34516e28 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
@@ -42,6 +42,9 @@
/* IceLake thermal reporting device */
#define PCI_DEVICE_ID_PROC_ICL_THERMAL 0x8a03
+/* JasperLake thermal reporting device */
+#define PCI_DEVICE_ID_PROC_JSL_THERMAL 0x4503
+
#define DRV_NAME "proc_thermal"
struct power_config {
@@ -724,6 +727,7 @@ static const struct pci_device_id proc_thermal_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_GLK_THERMAL)},
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_ICL_THERMAL),
.driver_data = (kernel_ulong_t)&rapl_mmio_hsw, },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_JSL_THERMAL)},
{ 0, },
};
diff --git a/drivers/thermal/intel/intel_pch_thermal.c b/drivers/thermal/intel/intel_pch_thermal.c
index 4f0bb8f502e1..56401fd4708d 100644
--- a/drivers/thermal/intel/intel_pch_thermal.c
+++ b/drivers/thermal/intel/intel_pch_thermal.c
@@ -13,6 +13,7 @@
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/thermal.h>
+#include <linux/units.h>
#include <linux/pm.h>
/* Intel PCH thermal Device IDs */
@@ -23,6 +24,7 @@
#define PCH_THERMAL_DID_SKL_H 0xA131 /* Skylake PCH 100 series */
#define PCH_THERMAL_DID_CNL 0x9Df9 /* CNL PCH */
#define PCH_THERMAL_DID_CNL_H 0xA379 /* CNL-H PCH */
+#define PCH_THERMAL_DID_CML_H 0X06F9 /* CML-H PCH */
/* Wildcat Point-LP PCH Thermal registers */
#define WPT_TEMP 0x0000 /* Temperature */
@@ -92,7 +94,7 @@ static void pch_wpt_add_acpi_psv_trip(struct pch_thermal_device *ptd,
if (ACPI_SUCCESS(status)) {
unsigned long trip_temp;
- trip_temp = DECI_KELVIN_TO_MILLICELSIUS(r);
+ trip_temp = deci_kelvin_to_millicelsius(r);
if (trip_temp) {
ptd->psv_temp = trip_temp;
ptd->psv_trip_id = *nr_trips;
@@ -272,6 +274,7 @@ enum board_ids {
board_wpt,
board_skl,
board_cnl,
+ board_cml,
};
static const struct board_info {
@@ -294,6 +297,10 @@ static const struct board_info {
.name = "pch_cannonlake",
.ops = &pch_dev_ops_wpt,
},
+ [board_cml] = {
+ .name = "pch_cometlake",
+ .ops = &pch_dev_ops_wpt,
+ }
};
static int intel_pch_thermal_probe(struct pci_dev *pdev,
@@ -365,7 +372,7 @@ static void intel_pch_thermal_remove(struct pci_dev *pdev)
thermal_zone_device_unregister(ptd->tzd);
iounmap(ptd->hw_base);
pci_set_drvdata(pdev, NULL);
- pci_release_region(pdev, 0);
+ pci_release_regions(pdev);
pci_disable_device(pdev);
}
@@ -398,6 +405,8 @@ static const struct pci_device_id intel_pch_thermal_id[] = {
.driver_data = board_cnl, },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_CNL_H),
.driver_data = board_cnl, },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_CML_H),
+ .driver_data = board_cml, },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id);
diff --git a/drivers/thermal/max77620_thermal.c b/drivers/thermal/max77620_thermal.c
index 88fd0fbe0cfa..82d06c7411eb 100644
--- a/drivers/thermal/max77620_thermal.c
+++ b/drivers/thermal/max77620_thermal.c
@@ -33,7 +33,7 @@ struct max77620_therm_info {
/**
* max77620_thermal_read_temp: Read PMIC die temperatue.
* @data: Device specific data.
- * temp: Temperature in millidegrees Celsius
+ * @temp: Temperature in millidegrees Celsius
*
* The actual temperature of PMIC die is not available from PMIC.
* PMIC only tells the status if it has crossed or not the threshold level
diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c
index acf4854cbb8b..76e30603d4d5 100644
--- a/drivers/thermal/mtk_thermal.c
+++ b/drivers/thermal/mtk_thermal.c
@@ -358,7 +358,7 @@ static const int mt7622_mux_values[MT7622_NUM_SENSORS] = { 0, };
static const int mt7622_vts_index[MT7622_NUM_SENSORS] = { VTS1 };
static const int mt7622_tc_offset[MT7622_NUM_CONTROLLER] = { 0x0, };
-/**
+/*
* The MT8173 thermal controller has four banks. Each bank can read up to
* four temperature sensors simultaneously. The MT8173 has a total of 5
* temperature sensors. We use each bank to measure a certain area of the
@@ -400,7 +400,7 @@ static const struct mtk_thermal_data mt8173_thermal_data = {
.sensor_mux_values = mt8173_mux_values,
};
-/**
+/*
* The MT2701 thermal controller has one bank, which can read up to
* three temperature sensors simultaneously. The MT2701 has a total of 3
* temperature sensors.
@@ -430,7 +430,7 @@ static const struct mtk_thermal_data mt2701_thermal_data = {
.sensor_mux_values = mt2701_mux_values,
};
-/**
+/*
* The MT2712 thermal controller has one bank, which can read up to
* four temperature sensors simultaneously. The MT2712 has a total of 4
* temperature sensors.
@@ -484,7 +484,7 @@ static const struct mtk_thermal_data mt7622_thermal_data = {
.sensor_mux_values = mt7622_mux_values,
};
-/**
+/*
* The MT8183 thermal controller has one bank for the current SW framework.
* The MT8183 has a total of 6 temperature sensors.
* There are two thermal controller to control the six sensor.
@@ -495,7 +495,6 @@ static const struct mtk_thermal_data mt7622_thermal_data = {
* data, and this indeed needs the temperatures of the individual banks
* for making better decisions.
*/
-
static const struct mtk_thermal_data mt8183_thermal_data = {
.auxadc_channel = MT8183_TEMP_AUXADC_CHANNEL,
.num_banks = MT8183_NUM_SENSORS_PER_ZONE,
@@ -519,7 +518,8 @@ static const struct mtk_thermal_data mt8183_thermal_data = {
/**
* raw_to_mcelsius - convert a raw ADC value to mcelsius
- * @mt: The thermal controller
+ * @mt: The thermal controller
+ * @sensno: sensor number
* @raw: raw ADC value
*
* This converts the raw ADC value to mcelsius using the SoC specific
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index dc5093be553e..ef0baa954ff0 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -493,7 +493,7 @@ thermal_zone_of_sensor_register(struct device *dev, int sensor_id, void *data,
if (!dev || !dev->of_node) {
of_node_put(np);
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-ENODEV);
}
sensor_np = of_node_get(dev->of_node);
@@ -754,7 +754,7 @@ end:
return ret;
}
-/**
+/*
* It maps 'enum thermal_trip_type' found in include/linux/thermal.h
* into the device tree binding of 'trip', property type.
*/
@@ -977,7 +977,7 @@ free_tz:
return ERR_PTR(ret);
}
-static inline void of_thermal_free_zone(struct __thermal_zone *tz)
+static __init void of_thermal_free_zone(struct __thermal_zone *tz)
{
struct __thermal_bind_params *tbp;
int i, j;
@@ -999,6 +999,38 @@ static inline void of_thermal_free_zone(struct __thermal_zone *tz)
}
/**
+ * of_thermal_destroy_zones - remove all zones parsed and allocated resources
+ *
+ * Finds all zones parsed and added to the thermal framework and remove them
+ * from the system, together with their resources.
+ *
+ */
+static __init void of_thermal_destroy_zones(void)
+{
+ struct device_node *np, *child;
+
+ np = of_find_node_by_name(NULL, "thermal-zones");
+ if (!np) {
+ pr_debug("unable to find thermal zones\n");
+ return;
+ }
+
+ for_each_available_child_of_node(np, child) {
+ struct thermal_zone_device *zone;
+
+ zone = thermal_zone_get_zone_by_name(child->name);
+ if (IS_ERR(zone))
+ continue;
+
+ thermal_zone_device_unregister(zone);
+ kfree(zone->tzp);
+ kfree(zone->ops);
+ of_thermal_free_zone(zone->devdata);
+ }
+ of_node_put(np);
+}
+
+/**
* of_parse_thermal_zones - parse device tree thermal data
*
* Initialization function that can be called by machine initialization
@@ -1087,35 +1119,3 @@ exit_free:
return -ENOMEM;
}
-
-/**
- * of_thermal_destroy_zones - remove all zones parsed and allocated resources
- *
- * Finds all zones parsed and added to the thermal framework and remove them
- * from the system, together with their resources.
- *
- */
-void of_thermal_destroy_zones(void)
-{
- struct device_node *np, *child;
-
- np = of_find_node_by_name(NULL, "thermal-zones");
- if (!np) {
- pr_debug("unable to find thermal zones\n");
- return;
- }
-
- for_each_available_child_of_node(np, child) {
- struct thermal_zone_device *zone;
-
- zone = thermal_zone_get_zone_by_name(child->name);
- if (IS_ERR(zone))
- continue;
-
- thermal_zone_device_unregister(zone);
- kfree(zone->tzp);
- kfree(zone->ops);
- of_thermal_free_zone(zone->devdata);
- }
- of_node_put(np);
-}
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index 015e7d201598..0e7cf5236932 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -110,6 +110,9 @@ static int tsens_register(struct tsens_priv *priv)
irq = platform_get_irq_byname(pdev, "uplow");
if (irq < 0) {
ret = irq;
+ /* For old DTs with no IRQ defined */
+ if (irq == -ENXIO)
+ ret = 0;
goto err_put_device;
}
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c
index 45e9fcb172cc..874bc46e6c73 100644
--- a/drivers/thermal/qoriq_thermal.c
+++ b/drivers/thermal/qoriq_thermal.c
@@ -9,9 +9,12 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/regmap.h>
+#include <linux/sizes.h>
#include <linux/thermal.h>
#include "thermal_core.h"
+#include "thermal_hwmon.h"
#define SITES_MAX 16
#define TMR_DISABLE 0x0
@@ -24,129 +27,80 @@
#define TMU_VER1 0x1
#define TMU_VER2 0x2
-/*
- * QorIQ TMU Registers
- */
-struct qoriq_tmu_site_regs {
- u32 tritsr; /* Immediate Temperature Site Register */
- u32 tratsr; /* Average Temperature Site Register */
- u8 res0[0x8];
-};
+#define REGS_TMR 0x000 /* Mode Register */
+#define TMR_DISABLE 0x0
+#define TMR_ME 0x80000000
+#define TMR_ALPF 0x0c000000
+#define TMR_MSITE_ALL GENMASK(15, 0)
-struct qoriq_tmu_regs_v1 {
- u32 tmr; /* Mode Register */
- u32 tsr; /* Status Register */
- u32 tmtmir; /* Temperature measurement interval Register */
- u8 res0[0x14];
- u32 tier; /* Interrupt Enable Register */
- u32 tidr; /* Interrupt Detect Register */
- u32 tiscr; /* Interrupt Site Capture Register */
- u32 ticscr; /* Interrupt Critical Site Capture Register */
- u8 res1[0x10];
- u32 tmhtcrh; /* High Temperature Capture Register */
- u32 tmhtcrl; /* Low Temperature Capture Register */
- u8 res2[0x8];
- u32 tmhtitr; /* High Temperature Immediate Threshold */
- u32 tmhtatr; /* High Temperature Average Threshold */
- u32 tmhtactr; /* High Temperature Average Crit Threshold */
- u8 res3[0x24];
- u32 ttcfgr; /* Temperature Configuration Register */
- u32 tscfgr; /* Sensor Configuration Register */
- u8 res4[0x78];
- struct qoriq_tmu_site_regs site[SITES_MAX];
- u8 res5[0x9f8];
- u32 ipbrr0; /* IP Block Revision Register 0 */
- u32 ipbrr1; /* IP Block Revision Register 1 */
- u8 res6[0x310];
- u32 ttrcr[4]; /* Temperature Range Control Register */
-};
+#define REGS_TMTMIR 0x008 /* Temperature measurement interval Register */
+#define TMTMIR_DEFAULT 0x0000000f
-struct qoriq_tmu_regs_v2 {
- u32 tmr; /* Mode Register */
- u32 tsr; /* Status Register */
- u32 tmsr; /* monitor site register */
- u32 tmtmir; /* Temperature measurement interval Register */
- u8 res0[0x10];
- u32 tier; /* Interrupt Enable Register */
- u32 tidr; /* Interrupt Detect Register */
- u8 res1[0x8];
- u32 tiiscr; /* interrupt immediate site capture register */
- u32 tiascr; /* interrupt average site capture register */
- u32 ticscr; /* Interrupt Critical Site Capture Register */
- u32 res2;
- u32 tmhtcr; /* monitor high temperature capture register */
- u32 tmltcr; /* monitor low temperature capture register */
- u32 tmrtrcr; /* monitor rising temperature rate capture register */
- u32 tmftrcr; /* monitor falling temperature rate capture register */
- u32 tmhtitr; /* High Temperature Immediate Threshold */
- u32 tmhtatr; /* High Temperature Average Threshold */
- u32 tmhtactr; /* High Temperature Average Crit Threshold */
- u32 res3;
- u32 tmltitr; /* monitor low temperature immediate threshold */
- u32 tmltatr; /* monitor low temperature average threshold register */
- u32 tmltactr; /* monitor low temperature average critical threshold */
- u32 res4;
- u32 tmrtrctr; /* monitor rising temperature rate critical threshold */
- u32 tmftrctr; /* monitor falling temperature rate critical threshold*/
- u8 res5[0x8];
- u32 ttcfgr; /* Temperature Configuration Register */
- u32 tscfgr; /* Sensor Configuration Register */
- u8 res6[0x78];
- struct qoriq_tmu_site_regs site[SITES_MAX];
- u8 res7[0x9f8];
- u32 ipbrr0; /* IP Block Revision Register 0 */
- u32 ipbrr1; /* IP Block Revision Register 1 */
- u8 res8[0x300];
- u32 teumr0;
- u32 teumr1;
- u32 teumr2;
- u32 res9;
- u32 ttrcr[4]; /* Temperature Range Control Register */
-};
+#define REGS_V2_TMSR 0x008 /* monitor site register */
-struct qoriq_tmu_data;
+#define REGS_V2_TMTMIR 0x00c /* Temperature measurement interval Register */
+
+#define REGS_TIER 0x020 /* Interrupt Enable Register */
+#define TIER_DISABLE 0x0
+
+
+#define REGS_TTCFGR 0x080 /* Temperature Configuration Register */
+#define REGS_TSCFGR 0x084 /* Sensor Configuration Register */
+
+#define REGS_TRITSR(n) (0x100 + 16 * (n)) /* Immediate Temperature
+ * Site Register
+ */
+#define TRITSR_V BIT(31)
+#define REGS_TTRnCR(n) (0xf10 + 4 * (n)) /* Temperature Range n
+ * Control Register
+ */
+#define REGS_IPBRR(n) (0xbf8 + 4 * (n)) /* IP Block Revision
+ * Register n
+ */
+#define REGS_V2_TEUMR(n) (0xf00 + 4 * (n))
/*
* Thermal zone data
*/
struct qoriq_sensor {
- struct thermal_zone_device *tzd;
- struct qoriq_tmu_data *qdata;
int id;
};
struct qoriq_tmu_data {
int ver;
- struct qoriq_tmu_regs_v1 __iomem *regs;
- struct qoriq_tmu_regs_v2 __iomem *regs_v2;
+ struct regmap *regmap;
struct clk *clk;
- bool little_endian;
- struct qoriq_sensor *sensor[SITES_MAX];
+ struct qoriq_sensor sensor[SITES_MAX];
};
-static void tmu_write(struct qoriq_tmu_data *p, u32 val, void __iomem *addr)
+static struct qoriq_tmu_data *qoriq_sensor_to_data(struct qoriq_sensor *s)
{
- if (p->little_endian)
- iowrite32(val, addr);
- else
- iowrite32be(val, addr);
-}
-
-static u32 tmu_read(struct qoriq_tmu_data *p, void __iomem *addr)
-{
- if (p->little_endian)
- return ioread32(addr);
- else
- return ioread32be(addr);
+ return container_of(s, struct qoriq_tmu_data, sensor[s->id]);
}
static int tmu_get_temp(void *p, int *temp)
{
struct qoriq_sensor *qsensor = p;
- struct qoriq_tmu_data *qdata = qsensor->qdata;
+ struct qoriq_tmu_data *qdata = qoriq_sensor_to_data(qsensor);
u32 val;
+ /*
+ * REGS_TRITSR(id) has the following layout:
+ *
+ * 31 ... 7 6 5 4 3 2 1 0
+ * V TEMP
+ *
+ * Where V bit signifies if the measurement is ready and is
+ * within sensor range. TEMP is an 8 bit value representing
+ * temperature in C.
+ */
+ if (regmap_read_poll_timeout(qdata->regmap,
+ REGS_TRITSR(qsensor->id),
+ val,
+ val & TRITSR_V,
+ USEC_PER_MSEC,
+ 10 * USEC_PER_MSEC))
+ return -ENODATA;
- val = tmu_read(qdata, &qdata->regs->site[qsensor->id].tritsr);
*temp = (val & 0xff) * 1000;
return 0;
@@ -156,84 +110,82 @@ static const struct thermal_zone_of_device_ops tmu_tz_ops = {
.get_temp = tmu_get_temp,
};
-static int qoriq_tmu_register_tmu_zone(struct platform_device *pdev)
+static int qoriq_tmu_register_tmu_zone(struct device *dev,
+ struct qoriq_tmu_data *qdata)
{
- struct qoriq_tmu_data *qdata = platform_get_drvdata(pdev);
- int id, sites = 0;
+ int id;
+
+ if (qdata->ver == TMU_VER1) {
+ regmap_write(qdata->regmap, REGS_TMR,
+ TMR_MSITE_ALL | TMR_ME | TMR_ALPF);
+ } else {
+ regmap_write(qdata->regmap, REGS_V2_TMSR, TMR_MSITE_ALL);
+ regmap_write(qdata->regmap, REGS_TMR, TMR_ME | TMR_ALPF_V2);
+ }
for (id = 0; id < SITES_MAX; id++) {
- qdata->sensor[id] = devm_kzalloc(&pdev->dev,
- sizeof(struct qoriq_sensor), GFP_KERNEL);
- if (!qdata->sensor[id])
- return -ENOMEM;
-
- qdata->sensor[id]->id = id;
- qdata->sensor[id]->qdata = qdata;
- qdata->sensor[id]->tzd = devm_thermal_zone_of_sensor_register(
- &pdev->dev, id, qdata->sensor[id], &tmu_tz_ops);
- if (IS_ERR(qdata->sensor[id]->tzd)) {
- if (PTR_ERR(qdata->sensor[id]->tzd) == -ENODEV)
+ struct thermal_zone_device *tzd;
+ struct qoriq_sensor *sensor = &qdata->sensor[id];
+ int ret;
+
+ sensor->id = id;
+
+ tzd = devm_thermal_zone_of_sensor_register(dev, id,
+ sensor,
+ &tmu_tz_ops);
+ ret = PTR_ERR_OR_ZERO(tzd);
+ if (ret) {
+ if (ret == -ENODEV)
continue;
- else
- return PTR_ERR(qdata->sensor[id]->tzd);
+
+ regmap_write(qdata->regmap, REGS_TMR, TMR_DISABLE);
+ return ret;
}
- if (qdata->ver == TMU_VER1)
- sites |= 0x1 << (15 - id);
- else
- sites |= 0x1 << id;
- }
+ if (devm_thermal_add_hwmon_sysfs(tzd))
+ dev_warn(dev,
+ "Failed to add hwmon sysfs attributes\n");
- /* Enable monitoring */
- if (sites != 0) {
- if (qdata->ver == TMU_VER1) {
- tmu_write(qdata, sites | TMR_ME | TMR_ALPF,
- &qdata->regs->tmr);
- } else {
- tmu_write(qdata, sites, &qdata->regs_v2->tmsr);
- tmu_write(qdata, TMR_ME | TMR_ALPF_V2,
- &qdata->regs_v2->tmr);
- }
}
return 0;
}
-static int qoriq_tmu_calibration(struct platform_device *pdev)
+static int qoriq_tmu_calibration(struct device *dev,
+ struct qoriq_tmu_data *data)
{
int i, val, len;
u32 range[4];
const u32 *calibration;
- struct device_node *np = pdev->dev.of_node;
- struct qoriq_tmu_data *data = platform_get_drvdata(pdev);
+ struct device_node *np = dev->of_node;
len = of_property_count_u32_elems(np, "fsl,tmu-range");
if (len < 0 || len > 4) {
- dev_err(&pdev->dev, "invalid range data.\n");
+ dev_err(dev, "invalid range data.\n");
return len;
}
val = of_property_read_u32_array(np, "fsl,tmu-range", range, len);
if (val != 0) {
- dev_err(&pdev->dev, "failed to read range data.\n");
+ dev_err(dev, "failed to read range data.\n");
return val;
}
/* Init temperature range registers */
for (i = 0; i < len; i++)
- tmu_write(data, range[i], &data->regs->ttrcr[i]);
+ regmap_write(data->regmap, REGS_TTRnCR(i), range[i]);
calibration = of_get_property(np, "fsl,tmu-calibration", &len);
if (calibration == NULL || len % 8) {
- dev_err(&pdev->dev, "invalid calibration data.\n");
+ dev_err(dev, "invalid calibration data.\n");
return -ENODEV;
}
for (i = 0; i < len; i += 8, calibration += 2) {
val = of_read_number(calibration, 1);
- tmu_write(data, val, &data->regs->ttcfgr);
+ regmap_write(data->regmap, REGS_TTCFGR, val);
val = of_read_number(calibration + 1, 1);
- tmu_write(data, val, &data->regs->tscfgr);
+ regmap_write(data->regmap, REGS_TSCFGR, val);
}
return 0;
@@ -242,76 +194,117 @@ static int qoriq_tmu_calibration(struct platform_device *pdev)
static void qoriq_tmu_init_device(struct qoriq_tmu_data *data)
{
/* Disable interrupt, using polling instead */
- tmu_write(data, TIER_DISABLE, &data->regs->tier);
+ regmap_write(data->regmap, REGS_TIER, TIER_DISABLE);
/* Set update_interval */
+
if (data->ver == TMU_VER1) {
- tmu_write(data, TMTMIR_DEFAULT, &data->regs->tmtmir);
+ regmap_write(data->regmap, REGS_TMTMIR, TMTMIR_DEFAULT);
} else {
- tmu_write(data, TMTMIR_DEFAULT, &data->regs_v2->tmtmir);
- tmu_write(data, TEUMR0_V2, &data->regs_v2->teumr0);
+ regmap_write(data->regmap, REGS_V2_TMTMIR, TMTMIR_DEFAULT);
+ regmap_write(data->regmap, REGS_V2_TEUMR(0), TEUMR0_V2);
}
/* Disable monitoring */
- tmu_write(data, TMR_DISABLE, &data->regs->tmr);
+ regmap_write(data->regmap, REGS_TMR, TMR_DISABLE);
}
+static const struct regmap_range qoriq_yes_ranges[] = {
+ regmap_reg_range(REGS_TMR, REGS_TSCFGR),
+ regmap_reg_range(REGS_TTRnCR(0), REGS_TTRnCR(3)),
+ regmap_reg_range(REGS_V2_TEUMR(0), REGS_V2_TEUMR(2)),
+ regmap_reg_range(REGS_IPBRR(0), REGS_IPBRR(1)),
+ /* Read only registers below */
+ regmap_reg_range(REGS_TRITSR(0), REGS_TRITSR(15)),
+};
+
+static const struct regmap_access_table qoriq_wr_table = {
+ .yes_ranges = qoriq_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(qoriq_yes_ranges) - 1,
+};
+
+static const struct regmap_access_table qoriq_rd_table = {
+ .yes_ranges = qoriq_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(qoriq_yes_ranges),
+};
+
static int qoriq_tmu_probe(struct platform_device *pdev)
{
int ret;
u32 ver;
struct qoriq_tmu_data *data;
struct device_node *np = pdev->dev.of_node;
-
- data = devm_kzalloc(&pdev->dev, sizeof(struct qoriq_tmu_data),
+ struct device *dev = &pdev->dev;
+ const bool little_endian = of_property_read_bool(np, "little-endian");
+ const enum regmap_endian format_endian =
+ little_endian ? REGMAP_ENDIAN_LITTLE : REGMAP_ENDIAN_BIG;
+ const struct regmap_config regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .rd_table = &qoriq_rd_table,
+ .wr_table = &qoriq_wr_table,
+ .val_format_endian = format_endian,
+ .max_register = SZ_4K,
+ };
+ void __iomem *base;
+
+ data = devm_kzalloc(dev, sizeof(struct qoriq_tmu_data),
GFP_KERNEL);
if (!data)
return -ENOMEM;
- platform_set_drvdata(pdev, data);
-
- data->little_endian = of_property_read_bool(np, "little-endian");
+ base = devm_platform_ioremap_resource(pdev, 0);
+ ret = PTR_ERR_OR_ZERO(base);
+ if (ret) {
+ dev_err(dev, "Failed to get memory region\n");
+ return ret;
+ }
- data->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(data->regs)) {
- dev_err(&pdev->dev, "Failed to get memory region\n");
- return PTR_ERR(data->regs);
+ data->regmap = devm_regmap_init_mmio(dev, base, &regmap_config);
+ ret = PTR_ERR_OR_ZERO(data->regmap);
+ if (ret) {
+ dev_err(dev, "Failed to init regmap (%d)\n", ret);
+ return ret;
}
- data->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ data->clk = devm_clk_get_optional(dev, NULL);
if (IS_ERR(data->clk))
return PTR_ERR(data->clk);
ret = clk_prepare_enable(data->clk);
if (ret) {
- dev_err(&pdev->dev, "Failed to enable clock\n");
+ dev_err(dev, "Failed to enable clock\n");
return ret;
}
/* version register offset at: 0xbf8 on both v1 and v2 */
- ver = tmu_read(data, &data->regs->ipbrr0);
+ ret = regmap_read(data->regmap, REGS_IPBRR(0), &ver);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to read IP block version\n");
+ return ret;
+ }
data->ver = (ver >> 8) & 0xff;
- if (data->ver == TMU_VER2)
- data->regs_v2 = (void __iomem *)data->regs;
qoriq_tmu_init_device(data); /* TMU initialization */
- ret = qoriq_tmu_calibration(pdev); /* TMU calibration */
+ ret = qoriq_tmu_calibration(dev, data); /* TMU calibration */
if (ret < 0)
goto err;
- ret = qoriq_tmu_register_tmu_zone(pdev);
+ ret = qoriq_tmu_register_tmu_zone(dev, data);
if (ret < 0) {
- dev_err(&pdev->dev, "Failed to register sensors\n");
+ dev_err(dev, "Failed to register sensors\n");
ret = -ENODEV;
goto err;
}
+ platform_set_drvdata(pdev, data);
+
return 0;
err:
clk_disable_unprepare(data->clk);
- platform_set_drvdata(pdev, NULL);
return ret;
}
@@ -321,24 +314,21 @@ static int qoriq_tmu_remove(struct platform_device *pdev)
struct qoriq_tmu_data *data = platform_get_drvdata(pdev);
/* Disable monitoring */
- tmu_write(data, TMR_DISABLE, &data->regs->tmr);
+ regmap_write(data->regmap, REGS_TMR, TMR_DISABLE);
clk_disable_unprepare(data->clk);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
static int __maybe_unused qoriq_tmu_suspend(struct device *dev)
{
- u32 tmr;
struct qoriq_tmu_data *data = dev_get_drvdata(dev);
+ int ret;
- /* Disable monitoring */
- tmr = tmu_read(data, &data->regs->tmr);
- tmr &= ~TMR_ME;
- tmu_write(data, tmr, &data->regs->tmr);
+ ret = regmap_update_bits(data->regmap, REGS_TMR, TMR_ME, 0);
+ if (ret)
+ return ret;
clk_disable_unprepare(data->clk);
@@ -347,7 +337,6 @@ static int __maybe_unused qoriq_tmu_suspend(struct device *dev)
static int __maybe_unused qoriq_tmu_resume(struct device *dev)
{
- u32 tmr;
int ret;
struct qoriq_tmu_data *data = dev_get_drvdata(dev);
@@ -356,11 +345,7 @@ static int __maybe_unused qoriq_tmu_resume(struct device *dev)
return ret;
/* Enable monitoring */
- tmr = tmu_read(data, &data->regs->tmr);
- tmr |= TMR_ME;
- tmu_write(data, tmr, &data->regs->tmr);
-
- return 0;
+ return regmap_update_bits(data->regmap, REGS_TMR, TMR_ME, TMR_ME);
}
static SIMPLE_DEV_PM_OPS(qoriq_tmu_pm_ops,
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index 1460cf9d9f1c..72877bdc072d 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -182,9 +182,7 @@ static int rcar_gen3_thermal_get_temp(void *devdata, int *temp)
tsc->coef.a2);
mcelsius = FIXPT_TO_MCELSIUS(val);
- /* Make sure we are inside specifications */
- if ((mcelsius < MCELSIUS(-40)) || (mcelsius > MCELSIUS(125)))
- return -EIO;
+ /* Guaranteed operating range is -40C to 125C. */
/* Round value to device granularity setting */
*temp = rcar_gen3_thermal_round(mcelsius);
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index d0873de718da..8f1aafa2044e 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -219,7 +219,7 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv)
* to get stable temperature.
* see "Usage Notes" on datasheet
*/
- udelay(300);
+ usleep_range(300, 400);
new = rcar_thermal_read(priv, THSSR) & CTEMP;
if (new == old) {
@@ -275,12 +275,7 @@ static int rcar_thermal_get_current_temp(struct rcar_thermal_priv *priv,
tmp = MCELSIUS((priv->ctemp * 5) - 60);
mutex_unlock(&priv->lock);
- if ((tmp < MCELSIUS(-45)) || (tmp > MCELSIUS(125))) {
- struct device *dev = rcar_priv_to_dev(priv);
-
- dev_err(dev, "it couldn't measure temperature correctly\n");
- return -EIO;
- }
+ /* Guaranteed operating range is -45C to 125C. */
*temp = tmp;
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index 343c2f5c5a25..7c1a8bccdcba 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -19,7 +19,7 @@
#include <linux/mfd/syscon.h>
#include <linux/pinctrl/consumer.h>
-/**
+/*
* If the temperature over a period of time High,
* the resulting TSHUT gave CRU module,let it reset the entire chip,
* or via GPIO give PMIC.
@@ -29,7 +29,7 @@ enum tshut_mode {
TSHUT_MODE_GPIO,
};
-/**
+/*
* The system Temperature Sensors tshut(tshut) polarity
* the bit 8 is tshut polarity.
* 0: low active, 1: high active
@@ -39,7 +39,7 @@ enum tshut_polarity {
TSHUT_HIGH_ACTIVE,
};
-/**
+/*
* The system has two Temperature Sensors.
* sensor0 is for CPU, and sensor1 is for GPU.
*/
@@ -48,7 +48,7 @@ enum sensor_id {
SENSOR_GPU,
};
-/**
+/*
* The conversion table has the adc value and temperature.
* ADC_DECREMENT: the adc value is of diminishing.(e.g. rk3288_code_table)
* ADC_INCREMENT: the adc value is incremental.(e.g. rk3368_code_table)
@@ -58,6 +58,8 @@ enum adc_sort_mode {
ADC_INCREMENT,
};
+#include "thermal_hwmon.h"
+
/**
* The max sensors is two in rockchip SoCs.
* Two sensors: CPU and GPU sensor.
@@ -80,13 +82,14 @@ struct chip_tsadc_table {
/**
* struct rockchip_tsadc_chip - hold the private data of tsadc chip
- * @chn_id[SOC_MAX_SENSORS]: the sensor id of chip correspond to the channel
+ * @chn_id: array of sensor ids of chip corresponding to the channel
* @chn_num: the channel number of tsadc chip
* @tshut_temp: the hardware-controlled shutdown temperature value
* @tshut_mode: the hardware-controlled shutdown mode (0:CRU 1:GPIO)
* @tshut_polarity: the hardware-controlled active polarity (0:LOW 1:HIGH)
* @initialize: SoC special initialize tsadc controller method
* @irq_ack: clear the interrupt
+ * @control: enable/disable method for the tsadc controller
* @get_temp: get the temperature
* @set_alarm_temp: set the high temperature interrupt
* @set_tshut_temp: set the hardware-controlled shutdown temperature
@@ -139,7 +142,7 @@ struct rockchip_thermal_sensor {
* @chip: pointer to the platform/configuration data
* @pdev: platform device of thermal
* @reset: the reset controller of tsadc
- * @sensors[SOC_MAX_SENSORS]: the thermal sensor
+ * @sensors: array of thermal sensors
* @clk: the controller clock is divided by the exteral 24MHz
* @pclk: the advanced peripherals bus clock
* @grf: the general register file will be used to do static set by software
@@ -590,6 +593,9 @@ static int rk_tsadcv2_code_to_temp(const struct chip_tsadc_table *table,
/**
* rk_tsadcv2_initialize - initialize TASDC Controller.
+ * @grf: the general register file will be used to do static set by software
+ * @regs: the base address of tsadc controller
+ * @tshut_polarity: the hardware-controlled active polarity (0:LOW 1:HIGH)
*
* (1) Set TSADC_V2_AUTO_PERIOD:
* Configure the interleave between every two accessing of
@@ -624,6 +630,9 @@ static void rk_tsadcv2_initialize(struct regmap *grf, void __iomem *regs,
/**
* rk_tsadcv3_initialize - initialize TASDC Controller.
+ * @grf: the general register file will be used to do static set by software
+ * @regs: the base address of tsadc controller
+ * @tshut_polarity: the hardware-controlled active polarity (0:LOW 1:HIGH)
*
* (1) The tsadc control power sequence.
*
@@ -723,6 +732,8 @@ static void rk_tsadcv2_control(void __iomem *regs, bool enable)
/**
* rk_tsadcv3_control - the tsadc controller is enabled or disabled.
+ * @regs: the base address of tsadc controller
+ * @enable: boolean flag to enable the controller
*
* NOTE: TSADC controller works at auto mode, and some SoCs need set the
* tsadc_q_sel bit on TSADCV2_AUTO_CON[1]. The (1024 - tsadc_q) as output
@@ -1206,6 +1217,7 @@ rockchip_thermal_register_sensor(struct platform_device *pdev,
/**
* Reset TSADC Controller, reset all tsadc registers.
+ * @reset: the reset controller of tsadc
*/
static void rockchip_thermal_reset_controller(struct reset_control *reset)
{
@@ -1321,8 +1333,15 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
thermal->chip->control(thermal->regs, true);
- for (i = 0; i < thermal->chip->chn_num; i++)
+ for (i = 0; i < thermal->chip->chn_num; i++) {
rockchip_thermal_toggle_sensor(&thermal->sensors[i], true);
+ thermal->sensors[i].tzd->tzp->no_hwmon = false;
+ error = thermal_add_hwmon_sysfs(thermal->sensors[i].tzd);
+ if (error)
+ dev_warn(&pdev->dev,
+ "failed to register sensor %d with hwmon: %d\n",
+ i, error);
+ }
platform_set_drvdata(pdev, thermal);
@@ -1344,6 +1363,7 @@ static int rockchip_thermal_remove(struct platform_device *pdev)
for (i = 0; i < thermal->chip->chn_num; i++) {
struct rockchip_thermal_sensor *sensor = &thermal->sensors[i];
+ thermal_remove_hwmon_sysfs(sensor->tzd);
rockchip_thermal_toggle_sensor(sensor, false);
}
diff --git a/drivers/thermal/samsung/Kconfig b/drivers/thermal/samsung/Kconfig
index fe0d2ba51392..f4eff5a41a84 100644
--- a/drivers/thermal/samsung/Kconfig
+++ b/drivers/thermal/samsung/Kconfig
@@ -5,7 +5,7 @@ config EXYNOS_THERMAL
depends on HAS_IOMEM
help
If you say yes here you get support for the TMU (Thermal Management
- Unit) driver for SAMSUNG EXYNOS series of SoCs. This driver initialises
+ Unit) driver for Samsung Exynos series of SoCs. This driver initialises
the TMU, reports temperature and handles cooling action if defined.
This driver uses the Exynos core thermal APIs and TMU configuration
data from the supported SoCs.
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index fb2c55123a99..fd4a17812f33 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * exynos_tmu.c - Samsung EXYNOS TMU (Thermal Management Unit)
+ * exynos_tmu.c - Samsung Exynos TMU (Thermal Management Unit)
*
* Copyright (C) 2014 Samsung Electronics
* Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
@@ -138,7 +138,7 @@ enum soc_type {
/**
* struct exynos_tmu_data : A structure to hold the private data of the TMU
- driver
+ * driver
* @id: identifier of the one instance of the TMU controller.
* @base: base address of the single instance of the TMU controller.
* @base_second: base address of the common registers of the TMU controller.
@@ -162,8 +162,11 @@ enum soc_type {
* 0 < reference_voltage <= 31
* @regulator: pointer to the TMU regulator structure.
* @reg_conf: pointer to structure to register with core thermal.
+ * @tzd: pointer to thermal_zone_device structure
* @ntrip: number of supported trip points.
* @enabled: current status of TMU device
+ * @tmu_set_trip_temp: SoC specific method to set trip (rising threshold)
+ * @tmu_set_trip_hyst: SoC specific to set hysteresis (falling threshold)
* @tmu_initialize: SoC specific TMU initialization method
* @tmu_control: SoC specific TMU control method
* @tmu_read: SoC specific TMU temperature read method
@@ -1183,7 +1186,7 @@ static struct platform_driver exynos_tmu_driver = {
module_platform_driver(exynos_tmu_driver);
-MODULE_DESCRIPTION("EXYNOS TMU Driver");
+MODULE_DESCRIPTION("Exynos TMU Driver");
MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:exynos-tmu");
diff --git a/drivers/thermal/st/stm_thermal.c b/drivers/thermal/st/stm_thermal.c
index cf9ddc52f30e..ad9e3bf8fdf6 100644
--- a/drivers/thermal/st/stm_thermal.c
+++ b/drivers/thermal/st/stm_thermal.c
@@ -30,7 +30,7 @@
#define DTS_DR_OFFSET 0x1C
#define DTS_SR_OFFSET 0x20
#define DTS_ITENR_OFFSET 0x24
-#define DTS_CIFR_OFFSET 0x28
+#define DTS_ICIFR_OFFSET 0x28
/* DTS_CFGR1 register mask definitions */
#define HSREF_CLK_DIV_MASK GENMASK(30, 24)
@@ -51,10 +51,16 @@
/* DTS_DR register mask definitions */
#define TS1_MFREQ_MASK GENMASK(15, 0)
+/* DTS_ITENR register mask definitions */
+#define ITENR_MASK (GENMASK(2, 0) | GENMASK(6, 4))
+
+/* DTS_ICIFR register mask definitions */
+#define ICIFR_MASK (GENMASK(2, 0) | GENMASK(6, 4))
+
/* Less significant bit position definitions */
#define TS1_T0_POS 16
-#define TS1_SMP_TIME_POS 16
#define TS1_HITTHD_POS 16
+#define TS1_LITTHD_POS 0
#define HSREF_CLK_DIV_POS 24
/* DTS_CFGR1 bit definitions */
@@ -76,58 +82,59 @@
#define ONE_MHZ 1000000
#define POLL_TIMEOUT 5000
#define STARTUP_TIME 40
-#define TS1_T0_VAL0 30
-#define TS1_T0_VAL1 130
+#define TS1_T0_VAL0 30000 /* 30 celsius */
+#define TS1_T0_VAL1 130000 /* 130 celsius */
#define NO_HW_TRIG 0
-
-/* The Thermal Framework expects millidegrees */
-#define mcelsius(temp) ((temp) * 1000)
-
-/* The Sensor expects oC degrees */
-#define celsius(temp) ((temp) / 1000)
+#define SAMPLING_TIME 15
struct stm_thermal_sensor {
struct device *dev;
struct thermal_zone_device *th_dev;
enum thermal_device_mode mode;
struct clk *clk;
- int high_temp;
- int low_temp;
- int temp_critical;
- int temp_passive;
unsigned int low_temp_enabled;
- int num_trips;
+ unsigned int high_temp_enabled;
int irq;
- unsigned int irq_enabled;
void __iomem *base;
int t0, fmt0, ramp_coeff;
};
-static irqreturn_t stm_thermal_alarm_irq(int irq, void *sdata)
+static int stm_enable_irq(struct stm_thermal_sensor *sensor)
{
- struct stm_thermal_sensor *sensor = sdata;
+ u32 value;
- disable_irq_nosync(irq);
- sensor->irq_enabled = false;
+ dev_dbg(sensor->dev, "low:%d high:%d\n", sensor->low_temp_enabled,
+ sensor->high_temp_enabled);
- return IRQ_WAKE_THREAD;
+ /* Disable IT generation for low and high thresholds */
+ value = readl_relaxed(sensor->base + DTS_ITENR_OFFSET);
+ value &= ~(LOW_THRESHOLD | HIGH_THRESHOLD);
+
+ if (sensor->low_temp_enabled)
+ value |= HIGH_THRESHOLD;
+
+ if (sensor->high_temp_enabled)
+ value |= LOW_THRESHOLD;
+
+ /* Enable interrupts */
+ writel_relaxed(value, sensor->base + DTS_ITENR_OFFSET);
+
+ return 0;
}
-static irqreturn_t stm_thermal_alarm_irq_thread(int irq, void *sdata)
+static irqreturn_t stm_thermal_irq_handler(int irq, void *sdata)
{
- u32 value;
struct stm_thermal_sensor *sensor = sdata;
- /* read IT reason in SR and clear flags */
- value = readl_relaxed(sensor->base + DTS_SR_OFFSET);
+ dev_dbg(sensor->dev, "sr:%d\n",
+ readl_relaxed(sensor->base + DTS_SR_OFFSET));
- if ((value & LOW_THRESHOLD) == LOW_THRESHOLD)
- writel_relaxed(LOW_THRESHOLD, sensor->base + DTS_CIFR_OFFSET);
+ thermal_zone_device_update(sensor->th_dev, THERMAL_EVENT_UNSPECIFIED);
- if ((value & HIGH_THRESHOLD) == HIGH_THRESHOLD)
- writel_relaxed(HIGH_THRESHOLD, sensor->base + DTS_CIFR_OFFSET);
+ stm_enable_irq(sensor);
- thermal_zone_device_update(sensor->th_dev, THERMAL_EVENT_UNSPECIFIED);
+ /* Acknoledge all DTS irqs */
+ writel_relaxed(ICIFR_MASK, sensor->base + DTS_ICIFR_OFFSET);
return IRQ_HANDLED;
}
@@ -160,6 +167,8 @@ static int stm_sensor_power_on(struct stm_thermal_sensor *sensor)
writel_relaxed(value, sensor->base +
DTS_CFGR1_OFFSET);
+ sensor->mode = THERMAL_DEVICE_ENABLED;
+
return 0;
}
@@ -167,6 +176,8 @@ static int stm_sensor_power_off(struct stm_thermal_sensor *sensor)
{
u32 value;
+ sensor->mode = THERMAL_DEVICE_DISABLED;
+
/* Stop measuring */
value = readl_relaxed(sensor->base + DTS_CFGR1_OFFSET);
value &= ~TS1_START;
@@ -263,60 +274,17 @@ static int stm_thermal_calculate_threshold(struct stm_thermal_sensor *sensor,
int temp, u32 *th)
{
int freqM;
- u32 sampling_time;
-
- /* Retrieve the number of periods to sample */
- sampling_time = (readl_relaxed(sensor->base + DTS_CFGR1_OFFSET) &
- TS1_SMP_TIME_MASK) >> TS1_SMP_TIME_POS;
/* Figure out the CLK_PTAT frequency for a given temperature */
- freqM = ((temp - sensor->t0) * sensor->ramp_coeff)
- + sensor->fmt0;
-
- dev_dbg(sensor->dev, "%s: freqM for threshold = %d Hz",
- __func__, freqM);
+ freqM = ((temp - sensor->t0) * sensor->ramp_coeff) / 1000 +
+ sensor->fmt0;
/* Figure out the threshold sample number */
- *th = clk_get_rate(sensor->clk);
+ *th = clk_get_rate(sensor->clk) * SAMPLING_TIME / freqM;
if (!*th)
return -EINVAL;
- *th = *th / freqM;
-
- *th *= sampling_time;
-
- return 0;
-}
-
-static int stm_thermal_set_threshold(struct stm_thermal_sensor *sensor)
-{
- u32 value, th;
- int ret;
-
- value = readl_relaxed(sensor->base + DTS_ITR1_OFFSET);
-
- /* Erase threshold content */
- value &= ~(TS1_LITTHD_MASK | TS1_HITTHD_MASK);
-
- /* Retrieve the sample threshold number th for a given temperature */
- ret = stm_thermal_calculate_threshold(sensor, sensor->high_temp, &th);
- if (ret)
- return ret;
-
- value |= th & TS1_LITTHD_MASK;
-
- if (sensor->low_temp_enabled) {
- /* Retrieve the sample threshold */
- ret = stm_thermal_calculate_threshold(sensor, sensor->low_temp,
- &th);
- if (ret)
- return ret;
-
- value |= (TS1_HITTHD_MASK & (th << TS1_HITTHD_POS));
- }
-
- /* Write value on the Low interrupt threshold */
- writel_relaxed(value, sensor->base + DTS_ITR1_OFFSET);
+ dev_dbg(sensor->dev, "freqM=%d Hz, threshold=0x%x", freqM, *th);
return 0;
}
@@ -326,77 +294,57 @@ static int stm_disable_irq(struct stm_thermal_sensor *sensor)
{
u32 value;
- /* Disable IT generation for low and high thresholds */
+ /* Disable IT generation */
value = readl_relaxed(sensor->base + DTS_ITENR_OFFSET);
- writel_relaxed(value & ~(LOW_THRESHOLD | HIGH_THRESHOLD),
- sensor->base + DTS_ITENR_OFFSET);
-
- dev_dbg(sensor->dev, "%s: IT disabled on sensor side", __func__);
-
- return 0;
-}
-
-/* Enable temperature interrupt */
-static int stm_enable_irq(struct stm_thermal_sensor *sensor)
-{
- u32 value;
-
- /*
- * Code below enables High temperature threshold using a low threshold
- * sampling value
- */
-
- /* Make sure LOW_THRESHOLD IT is clear before enabling */
- writel_relaxed(LOW_THRESHOLD, sensor->base + DTS_CIFR_OFFSET);
-
- /* Enable IT generation for low threshold */
- value = readl_relaxed(sensor->base + DTS_ITENR_OFFSET);
- value |= LOW_THRESHOLD;
-
- /* Enable the low temperature threshold if needed */
- if (sensor->low_temp_enabled) {
- /* Make sure HIGH_THRESHOLD IT is clear before enabling */
- writel_relaxed(HIGH_THRESHOLD, sensor->base + DTS_CIFR_OFFSET);
-
- /* Enable IT generation for high threshold */
- value |= HIGH_THRESHOLD;
- }
-
- /* Enable thresholds */
+ value &= ~ITENR_MASK;
writel_relaxed(value, sensor->base + DTS_ITENR_OFFSET);
- dev_dbg(sensor->dev, "%s: IT enabled on sensor side", __func__);
-
return 0;
}
-static int stm_thermal_update_threshold(struct stm_thermal_sensor *sensor)
+static int stm_thermal_set_trips(void *data, int low, int high)
{
+ struct stm_thermal_sensor *sensor = data;
+ u32 itr1, th;
int ret;
- sensor->mode = THERMAL_DEVICE_DISABLED;
+ dev_dbg(sensor->dev, "set trips %d <--> %d\n", low, high);
- ret = stm_sensor_power_off(sensor);
- if (ret)
- return ret;
+ /* Erase threshold content */
+ itr1 = readl_relaxed(sensor->base + DTS_ITR1_OFFSET);
+ itr1 &= ~(TS1_LITTHD_MASK | TS1_HITTHD_MASK);
- ret = stm_disable_irq(sensor);
- if (ret)
- return ret;
+ /*
+ * Disable low-temp if "low" is too small. As per thermal framework
+ * API, we use -INT_MAX rather than INT_MIN.
+ */
- ret = stm_thermal_set_threshold(sensor);
- if (ret)
- return ret;
+ if (low > -INT_MAX) {
+ sensor->low_temp_enabled = 1;
+ /* add 0.5 of hysteresis due to measurement error */
+ ret = stm_thermal_calculate_threshold(sensor, low - 500, &th);
+ if (ret)
+ return ret;
- ret = stm_enable_irq(sensor);
- if (ret)
- return ret;
+ itr1 |= (TS1_HITTHD_MASK & (th << TS1_HITTHD_POS));
+ } else {
+ sensor->low_temp_enabled = 0;
+ }
- ret = stm_sensor_power_on(sensor);
- if (ret)
- return ret;
+ /* Disable high-temp if "high" is too big. */
+ if (high < INT_MAX) {
+ sensor->high_temp_enabled = 1;
+ ret = stm_thermal_calculate_threshold(sensor, high, &th);
+ if (ret)
+ return ret;
- sensor->mode = THERMAL_DEVICE_ENABLED;
+ itr1 |= (TS1_LITTHD_MASK & (th << TS1_LITTHD_POS));
+ } else {
+ sensor->high_temp_enabled = 0;
+ }
+
+ /* Write new threshod values*/
+ writel_relaxed(itr1, sensor->base + DTS_ITR1_OFFSET);
return 0;
}
@@ -405,76 +353,26 @@ static int stm_thermal_update_threshold(struct stm_thermal_sensor *sensor)
static int stm_thermal_get_temp(void *data, int *temp)
{
struct stm_thermal_sensor *sensor = data;
- u32 sampling_time;
+ u32 periods;
int freqM, ret;
if (sensor->mode != THERMAL_DEVICE_ENABLED)
return -EAGAIN;
- /* Retrieve the number of samples */
- ret = readl_poll_timeout(sensor->base + DTS_DR_OFFSET, freqM,
- (freqM & TS1_MFREQ_MASK), STARTUP_TIME,
- POLL_TIMEOUT);
-
+ /* Retrieve the number of periods sampled */
+ ret = readl_relaxed_poll_timeout(sensor->base + DTS_DR_OFFSET, periods,
+ (periods & TS1_MFREQ_MASK),
+ STARTUP_TIME, POLL_TIMEOUT);
if (ret)
return ret;
- if (!freqM)
- return -ENODATA;
-
- /* Retrieve the number of periods sampled */
- sampling_time = (readl_relaxed(sensor->base + DTS_CFGR1_OFFSET) &
- TS1_SMP_TIME_MASK) >> TS1_SMP_TIME_POS;
-
- /* Figure out the number of samples per period */
- freqM /= sampling_time;
-
/* Figure out the CLK_PTAT frequency */
- freqM = clk_get_rate(sensor->clk) / freqM;
+ freqM = (clk_get_rate(sensor->clk) * SAMPLING_TIME) / periods;
if (!freqM)
return -EINVAL;
- dev_dbg(sensor->dev, "%s: freqM=%d\n", __func__, freqM);
-
/* Figure out the temperature in mili celsius */
- *temp = mcelsius(sensor->t0 + ((freqM - sensor->fmt0) /
- sensor->ramp_coeff));
-
- dev_dbg(sensor->dev, "%s: temperature = %d millicelsius",
- __func__, *temp);
-
- /* Update thresholds */
- if (sensor->num_trips > 1) {
- /* Update alarm threshold value to next higher trip point */
- if (sensor->high_temp == sensor->temp_passive &&
- celsius(*temp) >= sensor->temp_passive) {
- sensor->high_temp = sensor->temp_critical;
- sensor->low_temp = sensor->temp_passive;
- sensor->low_temp_enabled = true;
- ret = stm_thermal_update_threshold(sensor);
- if (ret)
- return ret;
- }
-
- if (sensor->high_temp == sensor->temp_critical &&
- celsius(*temp) < sensor->temp_passive) {
- sensor->high_temp = sensor->temp_passive;
- sensor->low_temp_enabled = false;
- ret = stm_thermal_update_threshold(sensor);
- if (ret)
- return ret;
- }
-
- /*
- * Re-enable alarm IRQ if temperature below critical
- * temperature
- */
- if (!sensor->irq_enabled &&
- (celsius(*temp) < sensor->temp_critical)) {
- sensor->irq_enabled = true;
- enable_irq(sensor->irq);
- }
- }
+ *temp = (freqM - sensor->fmt0) * 1000 / sensor->ramp_coeff + sensor->t0;
return 0;
}
@@ -493,8 +391,8 @@ static int stm_register_irq(struct stm_thermal_sensor *sensor)
}
ret = devm_request_threaded_irq(dev, sensor->irq,
- stm_thermal_alarm_irq,
- stm_thermal_alarm_irq_thread,
+ NULL,
+ stm_thermal_irq_handler,
IRQF_ONESHOT,
dev->driver->name, sensor);
if (ret) {
@@ -503,8 +401,6 @@ static int stm_register_irq(struct stm_thermal_sensor *sensor)
return ret;
}
- sensor->irq_enabled = true;
-
dev_dbg(dev, "%s: thermal IRQ registered", __func__);
return 0;
@@ -514,6 +410,8 @@ static int stm_thermal_sensor_off(struct stm_thermal_sensor *sensor)
{
int ret;
+ stm_disable_irq(sensor);
+
ret = stm_sensor_power_off(sensor);
if (ret)
return ret;
@@ -526,7 +424,6 @@ static int stm_thermal_sensor_off(struct stm_thermal_sensor *sensor)
static int stm_thermal_prepare(struct stm_thermal_sensor *sensor)
{
int ret;
- struct device *dev = sensor->dev;
ret = clk_prepare_enable(sensor->clk);
if (ret)
@@ -540,26 +437,8 @@ static int stm_thermal_prepare(struct stm_thermal_sensor *sensor)
if (ret)
goto thermal_unprepare;
- /* Set threshold(s) for IRQ */
- ret = stm_thermal_set_threshold(sensor);
- if (ret)
- goto thermal_unprepare;
-
- ret = stm_enable_irq(sensor);
- if (ret)
- goto thermal_unprepare;
-
- ret = stm_sensor_power_on(sensor);
- if (ret) {
- dev_err(dev, "%s: failed to power on sensor\n", __func__);
- goto irq_disable;
- }
-
return 0;
-irq_disable:
- stm_disable_irq(sensor);
-
thermal_unprepare:
clk_disable_unprepare(sensor->clk);
@@ -576,8 +455,6 @@ static int stm_thermal_suspend(struct device *dev)
if (ret)
return ret;
- sensor->mode = THERMAL_DEVICE_DISABLED;
-
return 0;
}
@@ -590,7 +467,12 @@ static int stm_thermal_resume(struct device *dev)
if (ret)
return ret;
- sensor->mode = THERMAL_DEVICE_ENABLED;
+ ret = stm_sensor_power_on(sensor);
+ if (ret)
+ return ret;
+
+ thermal_zone_device_update(sensor->th_dev, THERMAL_EVENT_UNSPECIFIED);
+ stm_enable_irq(sensor);
return 0;
}
@@ -600,6 +482,7 @@ SIMPLE_DEV_PM_OPS(stm_thermal_pm_ops, stm_thermal_suspend, stm_thermal_resume);
static const struct thermal_zone_of_device_ops stm_tz_ops = {
.get_temp = stm_thermal_get_temp,
+ .set_trips = stm_thermal_set_trips,
};
static const struct of_device_id stm_thermal_of_match[] = {
@@ -612,9 +495,8 @@ static int stm_thermal_probe(struct platform_device *pdev)
{
struct stm_thermal_sensor *sensor;
struct resource *res;
- const struct thermal_trip *trip;
void __iomem *base;
- int ret, i;
+ int ret;
if (!pdev->dev.of_node) {
dev_err(&pdev->dev, "%s: device tree node not found\n",
@@ -645,10 +527,23 @@ static int stm_thermal_probe(struct platform_device *pdev)
return PTR_ERR(sensor->clk);
}
- /* Register IRQ into GIC */
- ret = stm_register_irq(sensor);
- if (ret)
+ stm_disable_irq(sensor);
+
+ /* Clear irq flags */
+ writel_relaxed(ICIFR_MASK, sensor->base + DTS_ICIFR_OFFSET);
+
+ /* Configure and enable HW sensor */
+ ret = stm_thermal_prepare(sensor);
+ if (ret) {
+ dev_err(&pdev->dev, "Error prepare sensor: %d\n", ret);
+ return ret;
+ }
+
+ ret = stm_sensor_power_on(sensor);
+ if (ret) {
+ dev_err(&pdev->dev, "Error power on sensor: %d\n", ret);
return ret;
+ }
sensor->th_dev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0,
sensor,
@@ -661,53 +556,12 @@ static int stm_thermal_probe(struct platform_device *pdev)
return ret;
}
- if (!sensor->th_dev->ops->get_crit_temp) {
- /* Critical point must be provided */
- ret = -EINVAL;
- goto err_tz;
- }
-
- ret = sensor->th_dev->ops->get_crit_temp(sensor->th_dev,
- &sensor->temp_critical);
- if (ret) {
- dev_err(&pdev->dev,
- "Not able to read critical_temp: %d\n", ret);
+ /* Register IRQ into GIC */
+ ret = stm_register_irq(sensor);
+ if (ret)
goto err_tz;
- }
-
- sensor->temp_critical = celsius(sensor->temp_critical);
-
- /* Set thresholds for IRQ */
- sensor->high_temp = sensor->temp_critical;
-
- trip = of_thermal_get_trip_points(sensor->th_dev);
- sensor->num_trips = of_thermal_get_ntrips(sensor->th_dev);
-
- /* Find out passive temperature if it exists */
- for (i = (sensor->num_trips - 1); i >= 0; i--) {
- if (trip[i].type == THERMAL_TRIP_PASSIVE) {
- sensor->temp_passive = celsius(trip[i].temperature);
- /* Update high temperature threshold */
- sensor->high_temp = sensor->temp_passive;
- }
- }
- /*
- * Ensure low_temp_enabled flag is disabled.
- * By disabling low_temp_enabled, low threshold IT will not be
- * configured neither enabled because it is not needed as high
- * threshold is set on the lowest temperature trip point after
- * probe.
- */
- sensor->low_temp_enabled = false;
-
- /* Configure and enable HW sensor */
- ret = stm_thermal_prepare(sensor);
- if (ret) {
- dev_err(&pdev->dev,
- "Not able to enable sensor: %d\n", ret);
- goto err_tz;
- }
+ stm_enable_irq(sensor);
/*
* Thermal_zone doesn't enable hwmon as default,
@@ -718,8 +572,6 @@ static int stm_thermal_probe(struct platform_device *pdev)
if (ret)
goto err_tz;
- sensor->mode = THERMAL_DEVICE_ENABLED;
-
dev_info(&pdev->dev, "%s: Driver initialized successfully\n",
__func__);
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index 6e051cbd824f..2ae7198d3067 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -174,8 +174,8 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
/**
* step_wise_throttle - throttles devices associated with the given zone
- * @tz - thermal_zone_device
- * @trip - trip point index
+ * @tz: thermal_zone_device
+ * @trip: trip point index
*
* Throttling Logic: This uses the trend of the thermal zone to throttle.
* If the thermal zone is 'heating up' this throttles all the cooling
diff --git a/drivers/thermal/sun8i_thermal.c b/drivers/thermal/sun8i_thermal.c
new file mode 100644
index 000000000000..74d73be16496
--- /dev/null
+++ b/drivers/thermal/sun8i_thermal.c
@@ -0,0 +1,639 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thermal sensor driver for Allwinner SOC
+ * Copyright (C) 2019 Yangtao Li
+ *
+ * Based on the work of Icenowy Zheng <icenowy@aosc.io>
+ * Based on the work of Ondrej Jirman <megous@megous.com>
+ * Based on the work of Josef Gajdusek <atx@atx.name>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+
+#include "thermal_hwmon.h"
+
+#define MAX_SENSOR_NUM 4
+
+#define FT_TEMP_MASK GENMASK(11, 0)
+#define TEMP_CALIB_MASK GENMASK(11, 0)
+#define CALIBRATE_DEFAULT 0x800
+
+#define SUN8I_THS_CTRL0 0x00
+#define SUN8I_THS_CTRL2 0x40
+#define SUN8I_THS_IC 0x44
+#define SUN8I_THS_IS 0x48
+#define SUN8I_THS_MFC 0x70
+#define SUN8I_THS_TEMP_CALIB 0x74
+#define SUN8I_THS_TEMP_DATA 0x80
+
+#define SUN50I_THS_CTRL0 0x00
+#define SUN50I_H6_THS_ENABLE 0x04
+#define SUN50I_H6_THS_PC 0x08
+#define SUN50I_H6_THS_DIC 0x10
+#define SUN50I_H6_THS_DIS 0x20
+#define SUN50I_H6_THS_MFC 0x30
+#define SUN50I_H6_THS_TEMP_CALIB 0xa0
+#define SUN50I_H6_THS_TEMP_DATA 0xc0
+
+#define SUN8I_THS_CTRL0_T_ACQ0(x) (GENMASK(15, 0) & (x))
+#define SUN8I_THS_CTRL2_T_ACQ1(x) ((GENMASK(15, 0) & (x)) << 16)
+#define SUN8I_THS_DATA_IRQ_STS(x) BIT(x + 8)
+
+#define SUN50I_THS_CTRL0_T_ACQ(x) ((GENMASK(15, 0) & (x)) << 16)
+#define SUN50I_THS_FILTER_EN BIT(2)
+#define SUN50I_THS_FILTER_TYPE(x) (GENMASK(1, 0) & (x))
+#define SUN50I_H6_THS_PC_TEMP_PERIOD(x) ((GENMASK(19, 0) & (x)) << 12)
+#define SUN50I_H6_THS_DATA_IRQ_STS(x) BIT(x)
+
+/* millidegree celsius */
+
+struct tsensor {
+ struct ths_device *tmdev;
+ struct thermal_zone_device *tzd;
+ int id;
+};
+
+struct ths_thermal_chip {
+ bool has_mod_clk;
+ bool has_bus_clk_reset;
+ int sensor_num;
+ int offset;
+ int scale;
+ int ft_deviation;
+ int temp_data_base;
+ int (*calibrate)(struct ths_device *tmdev,
+ u16 *caldata, int callen);
+ int (*init)(struct ths_device *tmdev);
+ int (*irq_ack)(struct ths_device *tmdev);
+ int (*calc_temp)(struct ths_device *tmdev,
+ int id, int reg);
+};
+
+struct ths_device {
+ const struct ths_thermal_chip *chip;
+ struct device *dev;
+ struct regmap *regmap;
+ struct reset_control *reset;
+ struct clk *bus_clk;
+ struct clk *mod_clk;
+ struct tsensor sensor[MAX_SENSOR_NUM];
+};
+
+/* Temp Unit: millidegree Celsius */
+static int sun8i_ths_calc_temp(struct ths_device *tmdev,
+ int id, int reg)
+{
+ return tmdev->chip->offset - (reg * tmdev->chip->scale / 10);
+}
+
+static int sun50i_h5_calc_temp(struct ths_device *tmdev,
+ int id, int reg)
+{
+ if (reg >= 0x500)
+ return -1191 * reg / 10 + 223000;
+ else if (!id)
+ return -1452 * reg / 10 + 259000;
+ else
+ return -1590 * reg / 10 + 276000;
+}
+
+static int sun8i_ths_get_temp(void *data, int *temp)
+{
+ struct tsensor *s = data;
+ struct ths_device *tmdev = s->tmdev;
+ int val = 0;
+
+ regmap_read(tmdev->regmap, tmdev->chip->temp_data_base +
+ 0x4 * s->id, &val);
+
+ /* ths have no data yet */
+ if (!val)
+ return -EAGAIN;
+
+ *temp = tmdev->chip->calc_temp(tmdev, s->id, val);
+ /*
+ * According to the original sdk, there are some platforms(rarely)
+ * that add a fixed offset value after calculating the temperature
+ * value. We can't simply put it on the formula for calculating the
+ * temperature above, because the formula for calculating the
+ * temperature above is also used when the sensor is calibrated. If
+ * do this, the correct calibration formula is hard to know.
+ */
+ *temp += tmdev->chip->ft_deviation;
+
+ return 0;
+}
+
+static const struct thermal_zone_of_device_ops ths_ops = {
+ .get_temp = sun8i_ths_get_temp,
+};
+
+static const struct regmap_config config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .fast_io = true,
+ .max_register = 0xfc,
+};
+
+static int sun8i_h3_irq_ack(struct ths_device *tmdev)
+{
+ int i, state, ret = 0;
+
+ regmap_read(tmdev->regmap, SUN8I_THS_IS, &state);
+
+ for (i = 0; i < tmdev->chip->sensor_num; i++) {
+ if (state & SUN8I_THS_DATA_IRQ_STS(i)) {
+ regmap_write(tmdev->regmap, SUN8I_THS_IS,
+ SUN8I_THS_DATA_IRQ_STS(i));
+ ret |= BIT(i);
+ }
+ }
+
+ return ret;
+}
+
+static int sun50i_h6_irq_ack(struct ths_device *tmdev)
+{
+ int i, state, ret = 0;
+
+ regmap_read(tmdev->regmap, SUN50I_H6_THS_DIS, &state);
+
+ for (i = 0; i < tmdev->chip->sensor_num; i++) {
+ if (state & SUN50I_H6_THS_DATA_IRQ_STS(i)) {
+ regmap_write(tmdev->regmap, SUN50I_H6_THS_DIS,
+ SUN50I_H6_THS_DATA_IRQ_STS(i));
+ ret |= BIT(i);
+ }
+ }
+
+ return ret;
+}
+
+static irqreturn_t sun8i_irq_thread(int irq, void *data)
+{
+ struct ths_device *tmdev = data;
+ int i, state;
+
+ state = tmdev->chip->irq_ack(tmdev);
+
+ for (i = 0; i < tmdev->chip->sensor_num; i++) {
+ if (state & BIT(i))
+ thermal_zone_device_update(tmdev->sensor[i].tzd,
+ THERMAL_EVENT_UNSPECIFIED);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int sun8i_h3_ths_calibrate(struct ths_device *tmdev,
+ u16 *caldata, int callen)
+{
+ int i;
+
+ if (!caldata[0] || callen < 2 * tmdev->chip->sensor_num)
+ return -EINVAL;
+
+ for (i = 0; i < tmdev->chip->sensor_num; i++) {
+ int offset = (i % 2) << 4;
+
+ regmap_update_bits(tmdev->regmap,
+ SUN8I_THS_TEMP_CALIB + (4 * (i >> 1)),
+ 0xfff << offset,
+ caldata[i] << offset);
+ }
+
+ return 0;
+}
+
+static int sun50i_h6_ths_calibrate(struct ths_device *tmdev,
+ u16 *caldata, int callen)
+{
+ struct device *dev = tmdev->dev;
+ int i, ft_temp;
+
+ if (!caldata[0] || callen < 2 + 2 * tmdev->chip->sensor_num)
+ return -EINVAL;
+
+ /*
+ * efuse layout:
+ *
+ * 0 11 16 32
+ * +-------+-------+-------+
+ * |temp| |sensor0|sensor1|
+ * +-------+-------+-------+
+ *
+ * The calibration data on the H6 is the ambient temperature and
+ * sensor values that are filled during the factory test stage.
+ *
+ * The unit of stored FT temperature is 0.1 degreee celusis.
+ *
+ * We need to calculate a delta between measured and caluclated
+ * register values and this will become a calibration offset.
+ */
+ ft_temp = (caldata[0] & FT_TEMP_MASK) * 100;
+
+ for (i = 0; i < tmdev->chip->sensor_num; i++) {
+ int sensor_reg = caldata[i + 1];
+ int cdata, offset;
+ int sensor_temp = tmdev->chip->calc_temp(tmdev, i, sensor_reg);
+
+ /*
+ * Calibration data is CALIBRATE_DEFAULT - (calculated
+ * temperature from sensor reading at factory temperature
+ * minus actual factory temperature) * 14.88 (scale from
+ * temperature to register values)
+ */
+ cdata = CALIBRATE_DEFAULT -
+ ((sensor_temp - ft_temp) * 10 / tmdev->chip->scale);
+ if (cdata & ~TEMP_CALIB_MASK) {
+ /*
+ * Calibration value more than 12-bit, but calibration
+ * register is 12-bit. In this case, ths hardware can
+ * still work without calibration, although the data
+ * won't be so accurate.
+ */
+ dev_warn(dev, "sensor%d is not calibrated.\n", i);
+ continue;
+ }
+
+ offset = (i % 2) * 16;
+ regmap_update_bits(tmdev->regmap,
+ SUN50I_H6_THS_TEMP_CALIB + (i / 2 * 4),
+ 0xfff << offset,
+ cdata << offset);
+ }
+
+ return 0;
+}
+
+static int sun8i_ths_calibrate(struct ths_device *tmdev)
+{
+ struct nvmem_cell *calcell;
+ struct device *dev = tmdev->dev;
+ u16 *caldata;
+ size_t callen;
+ int ret = 0;
+
+ calcell = devm_nvmem_cell_get(dev, "calibration");
+ if (IS_ERR(calcell)) {
+ if (PTR_ERR(calcell) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ /*
+ * Even if the external calibration data stored in sid is
+ * not accessible, the THS hardware can still work, although
+ * the data won't be so accurate.
+ *
+ * The default value of calibration register is 0x800 for
+ * every sensor, and the calibration value is usually 0x7xx
+ * or 0x8xx, so they won't be away from the default value
+ * for a lot.
+ *
+ * So here we do not return error if the calibartion data is
+ * not available, except the probe needs deferring.
+ */
+ goto out;
+ }
+
+ caldata = nvmem_cell_read(calcell, &callen);
+ if (IS_ERR(caldata)) {
+ ret = PTR_ERR(caldata);
+ goto out;
+ }
+
+ tmdev->chip->calibrate(tmdev, caldata, callen);
+
+ kfree(caldata);
+out:
+ return ret;
+}
+
+static int sun8i_ths_resource_init(struct ths_device *tmdev)
+{
+ struct device *dev = tmdev->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ void __iomem *base;
+ int ret;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ tmdev->regmap = devm_regmap_init_mmio(dev, base, &config);
+ if (IS_ERR(tmdev->regmap))
+ return PTR_ERR(tmdev->regmap);
+
+ if (tmdev->chip->has_bus_clk_reset) {
+ tmdev->reset = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(tmdev->reset))
+ return PTR_ERR(tmdev->reset);
+
+ tmdev->bus_clk = devm_clk_get(&pdev->dev, "bus");
+ if (IS_ERR(tmdev->bus_clk))
+ return PTR_ERR(tmdev->bus_clk);
+ }
+
+ if (tmdev->chip->has_mod_clk) {
+ tmdev->mod_clk = devm_clk_get(&pdev->dev, "mod");
+ if (IS_ERR(tmdev->mod_clk))
+ return PTR_ERR(tmdev->mod_clk);
+ }
+
+ ret = reset_control_deassert(tmdev->reset);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(tmdev->bus_clk);
+ if (ret)
+ goto assert_reset;
+
+ ret = clk_set_rate(tmdev->mod_clk, 24000000);
+ if (ret)
+ goto bus_disable;
+
+ ret = clk_prepare_enable(tmdev->mod_clk);
+ if (ret)
+ goto bus_disable;
+
+ ret = sun8i_ths_calibrate(tmdev);
+ if (ret)
+ goto mod_disable;
+
+ return 0;
+
+mod_disable:
+ clk_disable_unprepare(tmdev->mod_clk);
+bus_disable:
+ clk_disable_unprepare(tmdev->bus_clk);
+assert_reset:
+ reset_control_assert(tmdev->reset);
+
+ return ret;
+}
+
+static int sun8i_h3_thermal_init(struct ths_device *tmdev)
+{
+ int val;
+
+ /* average over 4 samples */
+ regmap_write(tmdev->regmap, SUN8I_THS_MFC,
+ SUN50I_THS_FILTER_EN |
+ SUN50I_THS_FILTER_TYPE(1));
+ /*
+ * clkin = 24MHz
+ * filter_samples = 4
+ * period = 0.25s
+ *
+ * x = period * clkin / 4096 / filter_samples - 1
+ * = 365
+ */
+ val = GENMASK(7 + tmdev->chip->sensor_num, 8);
+ regmap_write(tmdev->regmap, SUN8I_THS_IC,
+ SUN50I_H6_THS_PC_TEMP_PERIOD(365) | val);
+ /*
+ * T_acq = 20us
+ * clkin = 24MHz
+ *
+ * x = T_acq * clkin - 1
+ * = 479
+ */
+ regmap_write(tmdev->regmap, SUN8I_THS_CTRL0,
+ SUN8I_THS_CTRL0_T_ACQ0(479));
+ val = GENMASK(tmdev->chip->sensor_num - 1, 0);
+ regmap_write(tmdev->regmap, SUN8I_THS_CTRL2,
+ SUN8I_THS_CTRL2_T_ACQ1(479) | val);
+
+ return 0;
+}
+
+/*
+ * Without this undocummented value, the returned temperatures would
+ * be higher than real ones by about 20C.
+ */
+#define SUN50I_H6_CTRL0_UNK 0x0000002f
+
+static int sun50i_h6_thermal_init(struct ths_device *tmdev)
+{
+ int val;
+
+ /*
+ * T_acq = 20us
+ * clkin = 24MHz
+ *
+ * x = T_acq * clkin - 1
+ * = 479
+ */
+ regmap_write(tmdev->regmap, SUN50I_THS_CTRL0,
+ SUN50I_H6_CTRL0_UNK | SUN50I_THS_CTRL0_T_ACQ(479));
+ /* average over 4 samples */
+ regmap_write(tmdev->regmap, SUN50I_H6_THS_MFC,
+ SUN50I_THS_FILTER_EN |
+ SUN50I_THS_FILTER_TYPE(1));
+ /*
+ * clkin = 24MHz
+ * filter_samples = 4
+ * period = 0.25s
+ *
+ * x = period * clkin / 4096 / filter_samples - 1
+ * = 365
+ */
+ regmap_write(tmdev->regmap, SUN50I_H6_THS_PC,
+ SUN50I_H6_THS_PC_TEMP_PERIOD(365));
+ /* enable sensor */
+ val = GENMASK(tmdev->chip->sensor_num - 1, 0);
+ regmap_write(tmdev->regmap, SUN50I_H6_THS_ENABLE, val);
+ /* thermal data interrupt enable */
+ val = GENMASK(tmdev->chip->sensor_num - 1, 0);
+ regmap_write(tmdev->regmap, SUN50I_H6_THS_DIC, val);
+
+ return 0;
+}
+
+static int sun8i_ths_register(struct ths_device *tmdev)
+{
+ int i;
+
+ for (i = 0; i < tmdev->chip->sensor_num; i++) {
+ tmdev->sensor[i].tmdev = tmdev;
+ tmdev->sensor[i].id = i;
+ tmdev->sensor[i].tzd =
+ devm_thermal_zone_of_sensor_register(tmdev->dev,
+ i,
+ &tmdev->sensor[i],
+ &ths_ops);
+ if (IS_ERR(tmdev->sensor[i].tzd))
+ return PTR_ERR(tmdev->sensor[i].tzd);
+
+ if (devm_thermal_add_hwmon_sysfs(tmdev->sensor[i].tzd))
+ dev_warn(tmdev->dev,
+ "Failed to add hwmon sysfs attributes\n");
+ }
+
+ return 0;
+}
+
+static int sun8i_ths_probe(struct platform_device *pdev)
+{
+ struct ths_device *tmdev;
+ struct device *dev = &pdev->dev;
+ int ret, irq;
+
+ tmdev = devm_kzalloc(dev, sizeof(*tmdev), GFP_KERNEL);
+ if (!tmdev)
+ return -ENOMEM;
+
+ tmdev->dev = dev;
+ tmdev->chip = of_device_get_match_data(&pdev->dev);
+ if (!tmdev->chip)
+ return -EINVAL;
+
+ platform_set_drvdata(pdev, tmdev);
+
+ ret = sun8i_ths_resource_init(tmdev);
+ if (ret)
+ return ret;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = tmdev->chip->init(tmdev);
+ if (ret)
+ return ret;
+
+ ret = sun8i_ths_register(tmdev);
+ if (ret)
+ return ret;
+
+ /*
+ * Avoid entering the interrupt handler, the thermal device is not
+ * registered yet, we deffer the registration of the interrupt to
+ * the end.
+ */
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ sun8i_irq_thread,
+ IRQF_ONESHOT, "ths", tmdev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int sun8i_ths_remove(struct platform_device *pdev)
+{
+ struct ths_device *tmdev = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(tmdev->mod_clk);
+ clk_disable_unprepare(tmdev->bus_clk);
+ reset_control_assert(tmdev->reset);
+
+ return 0;
+}
+
+static const struct ths_thermal_chip sun8i_a83t_ths = {
+ .sensor_num = 3,
+ .scale = 705,
+ .offset = 191668,
+ .temp_data_base = SUN8I_THS_TEMP_DATA,
+ .calibrate = sun8i_h3_ths_calibrate,
+ .init = sun8i_h3_thermal_init,
+ .irq_ack = sun8i_h3_irq_ack,
+ .calc_temp = sun8i_ths_calc_temp,
+};
+
+static const struct ths_thermal_chip sun8i_h3_ths = {
+ .sensor_num = 1,
+ .scale = 1211,
+ .offset = 217000,
+ .has_mod_clk = true,
+ .has_bus_clk_reset = true,
+ .temp_data_base = SUN8I_THS_TEMP_DATA,
+ .calibrate = sun8i_h3_ths_calibrate,
+ .init = sun8i_h3_thermal_init,
+ .irq_ack = sun8i_h3_irq_ack,
+ .calc_temp = sun8i_ths_calc_temp,
+};
+
+static const struct ths_thermal_chip sun8i_r40_ths = {
+ .sensor_num = 2,
+ .offset = 251086,
+ .scale = 1130,
+ .has_mod_clk = true,
+ .has_bus_clk_reset = true,
+ .temp_data_base = SUN8I_THS_TEMP_DATA,
+ .calibrate = sun8i_h3_ths_calibrate,
+ .init = sun8i_h3_thermal_init,
+ .irq_ack = sun8i_h3_irq_ack,
+ .calc_temp = sun8i_ths_calc_temp,
+};
+
+static const struct ths_thermal_chip sun50i_a64_ths = {
+ .sensor_num = 3,
+ .offset = 260890,
+ .scale = 1170,
+ .has_mod_clk = true,
+ .has_bus_clk_reset = true,
+ .temp_data_base = SUN8I_THS_TEMP_DATA,
+ .calibrate = sun8i_h3_ths_calibrate,
+ .init = sun8i_h3_thermal_init,
+ .irq_ack = sun8i_h3_irq_ack,
+ .calc_temp = sun8i_ths_calc_temp,
+};
+
+static const struct ths_thermal_chip sun50i_h5_ths = {
+ .sensor_num = 2,
+ .has_mod_clk = true,
+ .has_bus_clk_reset = true,
+ .temp_data_base = SUN8I_THS_TEMP_DATA,
+ .calibrate = sun8i_h3_ths_calibrate,
+ .init = sun8i_h3_thermal_init,
+ .irq_ack = sun8i_h3_irq_ack,
+ .calc_temp = sun50i_h5_calc_temp,
+};
+
+static const struct ths_thermal_chip sun50i_h6_ths = {
+ .sensor_num = 2,
+ .has_bus_clk_reset = true,
+ .ft_deviation = 7000,
+ .offset = 187744,
+ .scale = 672,
+ .temp_data_base = SUN50I_H6_THS_TEMP_DATA,
+ .calibrate = sun50i_h6_ths_calibrate,
+ .init = sun50i_h6_thermal_init,
+ .irq_ack = sun50i_h6_irq_ack,
+ .calc_temp = sun8i_ths_calc_temp,
+};
+
+static const struct of_device_id of_ths_match[] = {
+ { .compatible = "allwinner,sun8i-a83t-ths", .data = &sun8i_a83t_ths },
+ { .compatible = "allwinner,sun8i-h3-ths", .data = &sun8i_h3_ths },
+ { .compatible = "allwinner,sun8i-r40-ths", .data = &sun8i_r40_ths },
+ { .compatible = "allwinner,sun50i-a64-ths", .data = &sun50i_a64_ths },
+ { .compatible = "allwinner,sun50i-h5-ths", .data = &sun50i_h5_ths },
+ { .compatible = "allwinner,sun50i-h6-ths", .data = &sun50i_h6_ths },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, of_ths_match);
+
+static struct platform_driver ths_driver = {
+ .probe = sun8i_ths_probe,
+ .remove = sun8i_ths_remove,
+ .driver = {
+ .name = "sun8i-thermal",
+ .of_match_table = of_ths_match,
+ },
+};
+module_platform_driver(ths_driver);
+
+MODULE_DESCRIPTION("Thermal sensor driver for Allwinner SOC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c
index 5acaad3a594f..66e0639da4bf 100644
--- a/drivers/thermal/tegra/soctherm.c
+++ b/drivers/thermal/tegra/soctherm.c
@@ -360,7 +360,7 @@ static struct soctherm_oc_irq_chip_data soc_irq_cdata;
/**
* ccroc_writel() - writes a value to a CCROC register
* @ts: pointer to a struct tegra_soctherm
- * @v: the value to write
+ * @value: the value to write
* @reg: the register offset
*
* Writes @v to @reg. No return value.
@@ -435,6 +435,7 @@ static int tegra_thermctl_get_temp(void *data, int *out_temp)
/**
* enforce_temp_range() - check and enforce temperature range [min, max]
+ * @dev: struct device * of the SOC_THERM instance
* @trip_temp: the trip temperature to check
*
* Checks and enforces the permitted temperature range that SOC_THERM
@@ -747,6 +748,8 @@ static int get_hot_temp(struct thermal_zone_device *tz, int *trip, int *temp)
/**
* tegra_soctherm_set_hwtrips() - set HW trip point from DT data
* @dev: struct device * of the SOC_THERM instance
+ * @sg: pointer to the sensor group to set the thermtrip temperature for
+ * @tz: struct thermal_zone_device *
*
* Configure the SOC_THERM HW trip points, setting "THERMTRIP"
* "THROTTLE" trip points , using "thermtrips", "critical" or "hot"
@@ -931,6 +934,7 @@ static irqreturn_t soctherm_thermal_isr_thread(int irq, void *dev_id)
/**
* soctherm_oc_intr_enable() - Enables the soctherm over-current interrupt
+ * @ts: pointer to a struct tegra_soctherm
* @alarm: The soctherm throttle id
* @enable: Flag indicating enable the soctherm over-current
* interrupt or disable it
@@ -1156,7 +1160,7 @@ static void soctherm_oc_irq_enable(struct irq_data *data)
/**
* soctherm_oc_irq_disable() - Disables overcurrent interrupt requests
- * @irq_data: The interrupt request information
+ * @data: The interrupt request information
*
* Clears the interrupt request enable bit of the overcurrent
* interrupt request chip data.
@@ -1206,6 +1210,7 @@ static int soctherm_oc_irq_map(struct irq_domain *h, unsigned int virq,
/**
* soctherm_irq_domain_xlate_twocell() - xlate for soctherm interrupts
* @d: Interrupt request domain
+ * @ctrlr: Controller device tree node
* @intspec: Array of u32s from DTs "interrupt" property
* @intsize: Number of values inside the intspec array
* @out_hwirq: HW IRQ value associated with this interrupt
@@ -1681,6 +1686,7 @@ err:
/**
* soctherm_init_hw_throt_cdev() - Parse the HW throttle configurations
* and register them as cooling devices.
+ * @pdev: Pointer to platform_device struct
*/
static void soctherm_init_hw_throt_cdev(struct platform_device *pdev)
{
@@ -1751,6 +1757,7 @@ static void soctherm_init_hw_throt_cdev(struct platform_device *pdev)
/**
* throttlectl_cpu_level_cfg() - programs CCROC NV_THERM level config
+ * @ts: pointer to a struct tegra_soctherm
* @level: describing the level LOW/MED/HIGH of throttling
*
* It's necessary to set up the CPU-local CCROC NV_THERM instance with
@@ -1798,6 +1805,7 @@ static void throttlectl_cpu_level_cfg(struct tegra_soctherm *ts, int level)
/**
* throttlectl_cpu_level_select() - program CPU pulse skipper config
+ * @ts: pointer to a struct tegra_soctherm
* @throt: the LIGHT/HEAVY of throttle event id
*
* Pulse skippers are used to throttle clock frequencies. This
@@ -1841,6 +1849,7 @@ static void throttlectl_cpu_level_select(struct tegra_soctherm *ts,
/**
* throttlectl_cpu_mn() - program CPU pulse skipper configuration
+ * @ts: pointer to a struct tegra_soctherm
* @throt: the LIGHT/HEAVY of throttle event id
*
* Pulse skippers are used to throttle clock frequencies. This
@@ -1874,6 +1883,7 @@ static void throttlectl_cpu_mn(struct tegra_soctherm *ts,
/**
* throttlectl_gpu_level_select() - selects throttling level for GPU
+ * @ts: pointer to a struct tegra_soctherm
* @throt: the LIGHT/HEAVY of throttle event id
*
* This function programs soctherm's interface to GK20a NV_THERM to select
@@ -1918,6 +1928,7 @@ static int soctherm_oc_cfg_program(struct tegra_soctherm *ts,
/**
* soctherm_throttle_program() - programs pulse skippers' configuration
+ * @ts: pointer to a struct tegra_soctherm
* @throt: the LIGHT/HEAVY of the throttle event id.
*
* Pulse skippers are used to throttle clock frequencies.
diff --git a/drivers/thermal/thermal-generic-adc.c b/drivers/thermal/thermal-generic-adc.c
index ae5743c9a894..73665c3ccfe0 100644
--- a/drivers/thermal/thermal-generic-adc.c
+++ b/drivers/thermal/thermal-generic-adc.c
@@ -76,13 +76,17 @@ static int gadc_thermal_read_linear_lookup_table(struct device *dev,
struct gadc_thermal_info *gti)
{
struct device_node *np = dev->of_node;
+ enum iio_chan_type chan_type;
int ntable;
int ret;
ntable = of_property_count_elems_of_size(np, "temperature-lookup-table",
sizeof(u32));
if (ntable <= 0) {
- dev_notice(dev, "no lookup table, assuming DAC channel returns milliCelcius\n");
+ ret = iio_get_channel_type(gti->channel, &chan_type);
+ if (ret || chan_type != IIO_TEMP)
+ dev_notice(dev,
+ "no lookup table, assuming DAC channel returns milliCelcius\n");
return 0;
}
@@ -124,13 +128,6 @@ static int gadc_thermal_probe(struct platform_device *pdev)
if (!gti)
return -ENOMEM;
- ret = gadc_thermal_read_linear_lookup_table(&pdev->dev, gti);
- if (ret < 0)
- return ret;
-
- gti->dev = &pdev->dev;
- platform_set_drvdata(pdev, gti);
-
gti->channel = devm_iio_channel_get(&pdev->dev, "sensor-channel");
if (IS_ERR(gti->channel)) {
ret = PTR_ERR(gti->channel);
@@ -139,6 +136,13 @@ static int gadc_thermal_probe(struct platform_device *pdev)
return ret;
}
+ ret = gadc_thermal_read_linear_lookup_table(&pdev->dev, gti);
+ if (ret < 0)
+ return ret;
+
+ gti->dev = &pdev->dev;
+ platform_set_drvdata(pdev, gti);
+
gti->tz_dev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, gti,
&gadc_thermal_ops);
if (IS_ERR(gti->tz_dev)) {
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 207b0cda70da..a9bf00e91d64 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -92,14 +92,12 @@ thermal_cooling_device_stats_update(struct thermal_cooling_device *cdev,
/* device tree support */
#ifdef CONFIG_THERMAL_OF
int of_parse_thermal_zones(void);
-void of_thermal_destroy_zones(void);
int of_thermal_get_ntrips(struct thermal_zone_device *);
bool of_thermal_is_trip_valid(struct thermal_zone_device *, int);
const struct thermal_trip *
of_thermal_get_trip_points(struct thermal_zone_device *);
#else
static inline int of_parse_thermal_zones(void) { return 0; }
-static inline void of_thermal_destroy_zones(void) { }
static inline int of_thermal_get_ntrips(struct thermal_zone_device *tz)
{
return 0;
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
index dd5d8ee37928..c8d2620f2e42 100644
--- a/drivers/thermal/thermal_hwmon.c
+++ b/drivers/thermal/thermal_hwmon.c
@@ -248,3 +248,31 @@ void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
kfree(hwmon);
}
EXPORT_SYMBOL_GPL(thermal_remove_hwmon_sysfs);
+
+static void devm_thermal_hwmon_release(struct device *dev, void *res)
+{
+ thermal_remove_hwmon_sysfs(*(struct thermal_zone_device **)res);
+}
+
+int devm_thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
+{
+ struct thermal_zone_device **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_thermal_hwmon_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = thermal_add_hwmon_sysfs(tz);
+ if (ret) {
+ devres_free(ptr);
+ return ret;
+ }
+
+ *ptr = tz;
+ devres_add(&tz->device, ptr);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_thermal_add_hwmon_sysfs);
diff --git a/drivers/thermal/thermal_hwmon.h b/drivers/thermal/thermal_hwmon.h
index a160b9d62dd0..1a9d65f6a6a8 100644
--- a/drivers/thermal/thermal_hwmon.h
+++ b/drivers/thermal/thermal_hwmon.h
@@ -17,6 +17,7 @@
#ifdef CONFIG_THERMAL_HWMON
int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz);
+int devm_thermal_add_hwmon_sysfs(struct thermal_zone_device *tz);
void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz);
#else
static inline int
@@ -25,6 +26,12 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
return 0;
}
+static inline int
+devm_thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
+{
+ return 0;
+}
+
static inline void
thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
{
diff --git a/drivers/thermal/user_space.c b/drivers/thermal/user_space.c
index 962873fd9242..293cffd9c8ad 100644
--- a/drivers/thermal/user_space.c
+++ b/drivers/thermal/user_space.c
@@ -17,8 +17,8 @@
/**
* notify_user_space - Notifies user space about thermal events
- * @tz - thermal_zone_device
- * @trip - trip point index
+ * @tz: thermal_zone_device
+ * @trip: trip point index
*
* This function notifies the user space through UEvents.
*/
diff --git a/drivers/thermal/zx2967_thermal.c b/drivers/thermal/zx2967_thermal.c
index 7c8a82c8e1e9..8e3a2d3c2f9a 100644
--- a/drivers/thermal/zx2967_thermal.c
+++ b/drivers/thermal/zx2967_thermal.c
@@ -45,6 +45,7 @@
* @clk_topcrm: topcrm clk structure
* @clk_apb: apb clk structure
* @regs: pointer to base address of the thermal sensor
+ * @dev: struct device pointer
*/
struct zx2967_thermal_priv {
diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig
index fd9adca898ff..1eb757e8df3b 100644
--- a/drivers/thunderbolt/Kconfig
+++ b/drivers/thunderbolt/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
-menuconfig THUNDERBOLT
- tristate "Thunderbolt support"
+menuconfig USB4
+ tristate "Unified support for USB4 and Thunderbolt"
depends on PCI
depends on X86 || COMPILE_TEST
select APPLE_PROPERTIES if EFI_STUB && X86
@@ -9,9 +9,10 @@ menuconfig THUNDERBOLT
select CRYPTO_HASH
select NVMEM
help
- Thunderbolt Controller driver. This driver is required if you
- want to hotplug Thunderbolt devices on Apple hardware or on PCs
- with Intel Falcon Ridge or newer.
+ USB4 and Thunderbolt driver. USB4 is the public speficiation
+ based on Thunderbolt 3 protocol. This driver is required if
+ you want to hotplug Thunderbolt and USB4 compliant devices on
+ Apple hardware or on PCs with Intel Falcon Ridge or newer.
To compile this driver a module, choose M here. The module will be
called thunderbolt.
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
index 001187c577bf..eae28dd45250 100644
--- a/drivers/thunderbolt/Makefile
+++ b/drivers/thunderbolt/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-${CONFIG_THUNDERBOLT} := thunderbolt.o
+obj-${CONFIG_USB4} := thunderbolt.o
thunderbolt-objs := nhi.o nhi_ops.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o
-thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o
+thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o tmu.o usb4.o
diff --git a/drivers/thunderbolt/cap.c b/drivers/thunderbolt/cap.c
index fdd77bb4628d..19db6cdc5b70 100644
--- a/drivers/thunderbolt/cap.c
+++ b/drivers/thunderbolt/cap.c
@@ -113,7 +113,16 @@ int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
return ret;
}
-static int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap)
+/**
+ * tb_switch_find_cap() - Find switch capability
+ * @sw Switch to find the capability for
+ * @cap: Capability to look
+ *
+ * Returns offset to start of capability or %-ENOENT if no such
+ * capability was found. Negative errno is returned if there was an
+ * error.
+ */
+int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap)
{
int offset = sw->config.first_cap_offset;
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index d97813e80e5f..f77ceae5c7d7 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -708,19 +708,26 @@ void tb_ctl_stop(struct tb_ctl *ctl)
/* public interface, commands */
/**
- * tb_cfg_error() - send error packet
+ * tb_cfg_ack_plug() - Ack hot plug/unplug event
+ * @ctl: Control channel to use
+ * @route: Router that originated the event
+ * @port: Port where the hot plug/unplug happened
+ * @unplug: Ack hot plug or unplug
*
- * Return: Returns 0 on success or an error code on failure.
+ * Call this as response for hot plug/unplug event to ack it.
+ * Returns %0 on success or an error code on failure.
*/
-int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port,
- enum tb_cfg_error error)
+int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug)
{
struct cfg_error_pkg pkg = {
.header = tb_cfg_make_header(route),
.port = port,
- .error = error,
+ .error = TB_CFG_ERROR_ACK_PLUG_EVENT,
+ .pg = unplug ? TB_CFG_ERROR_PG_HOT_UNPLUG
+ : TB_CFG_ERROR_PG_HOT_PLUG,
};
- tb_ctl_dbg(ctl, "resetting error on %llx:%x.\n", route, port);
+ tb_ctl_dbg(ctl, "acking hot %splug event on %llx:%x\n",
+ unplug ? "un" : "", route, port);
return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
}
diff --git a/drivers/thunderbolt/ctl.h b/drivers/thunderbolt/ctl.h
index 2f1a1e111110..97cb03b38953 100644
--- a/drivers/thunderbolt/ctl.h
+++ b/drivers/thunderbolt/ctl.h
@@ -123,8 +123,7 @@ static inline struct tb_cfg_header tb_cfg_make_header(u64 route)
return header;
}
-int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port,
- enum tb_cfg_error error);
+int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug);
struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route,
int timeout_msec);
struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index 8dd7de0cc826..921d164b3f35 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -131,12 +131,51 @@ static int tb_eeprom_in(struct tb_switch *sw, u8 *val)
}
/**
+ * tb_eeprom_get_drom_offset - get drom offset within eeprom
+ */
+static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset)
+{
+ struct tb_cap_plug_events cap;
+ int res;
+
+ if (!sw->cap_plug_events) {
+ tb_sw_warn(sw, "no TB_CAP_PLUG_EVENTS, cannot read eeprom\n");
+ return -ENODEV;
+ }
+ res = tb_sw_read(sw, &cap, TB_CFG_SWITCH, sw->cap_plug_events,
+ sizeof(cap) / 4);
+ if (res)
+ return res;
+
+ if (!cap.eeprom_ctl.present || cap.eeprom_ctl.not_present) {
+ tb_sw_warn(sw, "no NVM\n");
+ return -ENODEV;
+ }
+
+ if (cap.drom_offset > 0xffff) {
+ tb_sw_warn(sw, "drom offset is larger than 0xffff: %#x\n",
+ cap.drom_offset);
+ return -ENXIO;
+ }
+ *offset = cap.drom_offset;
+ return 0;
+}
+
+/**
* tb_eeprom_read_n - read count bytes from offset into val
*/
static int tb_eeprom_read_n(struct tb_switch *sw, u16 offset, u8 *val,
size_t count)
{
+ u16 drom_offset;
int i, res;
+
+ res = tb_eeprom_get_drom_offset(sw, &drom_offset);
+ if (res)
+ return res;
+
+ offset += drom_offset;
+
res = tb_eeprom_active(sw, true);
if (res)
return res;
@@ -239,36 +278,6 @@ struct tb_drom_entry_port {
/**
- * tb_eeprom_get_drom_offset - get drom offset within eeprom
- */
-static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset)
-{
- struct tb_cap_plug_events cap;
- int res;
- if (!sw->cap_plug_events) {
- tb_sw_warn(sw, "no TB_CAP_PLUG_EVENTS, cannot read eeprom\n");
- return -ENOSYS;
- }
- res = tb_sw_read(sw, &cap, TB_CFG_SWITCH, sw->cap_plug_events,
- sizeof(cap) / 4);
- if (res)
- return res;
-
- if (!cap.eeprom_ctl.present || cap.eeprom_ctl.not_present) {
- tb_sw_warn(sw, "no NVM\n");
- return -ENOSYS;
- }
-
- if (cap.drom_offset > 0xffff) {
- tb_sw_warn(sw, "drom offset is larger than 0xffff: %#x\n",
- cap.drom_offset);
- return -ENXIO;
- }
- *offset = cap.drom_offset;
- return 0;
-}
-
-/**
* tb_drom_read_uid_only - read uid directly from drom
*
* Does not use the cached copy in sw->drom. Used during resume to check switch
@@ -277,17 +286,11 @@ static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset)
int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid)
{
u8 data[9];
- u16 drom_offset;
u8 crc;
- int res = tb_eeprom_get_drom_offset(sw, &drom_offset);
- if (res)
- return res;
-
- if (drom_offset == 0)
- return -ENODEV;
+ int res;
/* read uid */
- res = tb_eeprom_read_n(sw, drom_offset, data, 9);
+ res = tb_eeprom_read_n(sw, 0, data, 9);
if (res)
return res;
@@ -484,12 +487,42 @@ err_free:
return ret;
}
+static int usb4_copy_host_drom(struct tb_switch *sw, u16 *size)
+{
+ int ret;
+
+ ret = usb4_switch_drom_read(sw, 14, size, sizeof(*size));
+ if (ret)
+ return ret;
+
+ /* Size includes CRC8 + UID + CRC32 */
+ *size += 1 + 8 + 4;
+ sw->drom = kzalloc(*size, GFP_KERNEL);
+ if (!sw->drom)
+ return -ENOMEM;
+
+ ret = usb4_switch_drom_read(sw, 0, sw->drom, *size);
+ if (ret) {
+ kfree(sw->drom);
+ sw->drom = NULL;
+ }
+
+ return ret;
+}
+
+static int tb_drom_read_n(struct tb_switch *sw, u16 offset, u8 *val,
+ size_t count)
+{
+ if (tb_switch_is_usb4(sw))
+ return usb4_switch_drom_read(sw, offset, val, count);
+ return tb_eeprom_read_n(sw, offset, val, count);
+}
+
/**
* tb_drom_read - copy drom to sw->drom and parse it
*/
int tb_drom_read(struct tb_switch *sw)
{
- u16 drom_offset;
u16 size;
u32 crc;
struct tb_drom_header *header;
@@ -510,18 +543,26 @@ int tb_drom_read(struct tb_switch *sw)
goto parse;
/*
- * The root switch contains only a dummy drom (header only,
- * no entries). Hardcode the configuration here.
+ * USB4 hosts may support reading DROM through router
+ * operations.
*/
- tb_drom_read_uid_only(sw, &sw->uid);
+ if (tb_switch_is_usb4(sw)) {
+ usb4_switch_read_uid(sw, &sw->uid);
+ if (!usb4_copy_host_drom(sw, &size))
+ goto parse;
+ } else {
+ /*
+ * The root switch contains only a dummy drom
+ * (header only, no entries). Hardcode the
+ * configuration here.
+ */
+ tb_drom_read_uid_only(sw, &sw->uid);
+ }
+
return 0;
}
- res = tb_eeprom_get_drom_offset(sw, &drom_offset);
- if (res)
- return res;
-
- res = tb_eeprom_read_n(sw, drom_offset + 14, (u8 *) &size, 2);
+ res = tb_drom_read_n(sw, 14, (u8 *) &size, 2);
if (res)
return res;
size &= 0x3ff;
@@ -535,7 +576,7 @@ int tb_drom_read(struct tb_switch *sw)
sw->drom = kzalloc(size, GFP_KERNEL);
if (!sw->drom)
return -ENOMEM;
- res = tb_eeprom_read_n(sw, drom_offset, sw->drom, size);
+ res = tb_drom_read_n(sw, 0, sw->drom, size);
if (res)
goto err;
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 641b21b54460..1be491ecbb45 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -1271,6 +1271,9 @@ static struct pci_device_id nhi_ids[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI1),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ /* Any USB4 compliant host */
+ { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) },
+
{ 0,}
};
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
index b7b973949f8e..5d276ee9b38e 100644
--- a/drivers/thunderbolt/nhi.h
+++ b/drivers/thunderbolt/nhi.h
@@ -74,4 +74,6 @@ extern const struct tb_nhi_ops icl_nhi_ops;
#define PCI_DEVICE_ID_INTEL_ICL_NHI1 0x8a0d
#define PCI_DEVICE_ID_INTEL_ICL_NHI0 0x8a17
+#define PCI_CLASS_SERIAL_USB_USB4 0x0c0340
+
#endif
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index ca86a8e09c77..ad5479f21174 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -163,10 +163,12 @@ static int nvm_validate_and_write(struct tb_switch *sw)
image_size -= hdr_size;
}
+ if (tb_switch_is_usb4(sw))
+ return usb4_switch_nvm_write(sw, 0, buf, image_size);
return dma_port_flash_write(sw->dma_port, 0, buf, image_size);
}
-static int nvm_authenticate_host(struct tb_switch *sw)
+static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
{
int ret = 0;
@@ -206,7 +208,7 @@ static int nvm_authenticate_host(struct tb_switch *sw)
return ret;
}
-static int nvm_authenticate_device(struct tb_switch *sw)
+static int nvm_authenticate_device_dma_port(struct tb_switch *sw)
{
int ret, retries = 10;
@@ -251,6 +253,78 @@ static int nvm_authenticate_device(struct tb_switch *sw)
return -ETIMEDOUT;
}
+static void nvm_authenticate_start_dma_port(struct tb_switch *sw)
+{
+ struct pci_dev *root_port;
+
+ /*
+ * During host router NVM upgrade we should not allow root port to
+ * go into D3cold because some root ports cannot trigger PME
+ * itself. To be on the safe side keep the root port in D0 during
+ * the whole upgrade process.
+ */
+ root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
+ if (root_port)
+ pm_runtime_get_noresume(&root_port->dev);
+}
+
+static void nvm_authenticate_complete_dma_port(struct tb_switch *sw)
+{
+ struct pci_dev *root_port;
+
+ root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
+ if (root_port)
+ pm_runtime_put(&root_port->dev);
+}
+
+static inline bool nvm_readable(struct tb_switch *sw)
+{
+ if (tb_switch_is_usb4(sw)) {
+ /*
+ * USB4 devices must support NVM operations but it is
+ * optional for hosts. Therefore we query the NVM sector
+ * size here and if it is supported assume NVM
+ * operations are implemented.
+ */
+ return usb4_switch_nvm_sector_size(sw) > 0;
+ }
+
+ /* Thunderbolt 2 and 3 devices support NVM through DMA port */
+ return !!sw->dma_port;
+}
+
+static inline bool nvm_upgradeable(struct tb_switch *sw)
+{
+ if (sw->no_nvm_upgrade)
+ return false;
+ return nvm_readable(sw);
+}
+
+static inline int nvm_read(struct tb_switch *sw, unsigned int address,
+ void *buf, size_t size)
+{
+ if (tb_switch_is_usb4(sw))
+ return usb4_switch_nvm_read(sw, address, buf, size);
+ return dma_port_flash_read(sw->dma_port, address, buf, size);
+}
+
+static int nvm_authenticate(struct tb_switch *sw)
+{
+ int ret;
+
+ if (tb_switch_is_usb4(sw))
+ return usb4_switch_nvm_authenticate(sw);
+
+ if (!tb_route(sw)) {
+ nvm_authenticate_start_dma_port(sw);
+ ret = nvm_authenticate_host_dma_port(sw);
+ } else {
+ ret = nvm_authenticate_device_dma_port(sw);
+ }
+
+ return ret;
+}
+
static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
size_t bytes)
{
@@ -264,7 +338,7 @@ static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
goto out;
}
- ret = dma_port_flash_read(sw->dma_port, offset, val, bytes);
+ ret = nvm_read(sw, offset, val, bytes);
mutex_unlock(&sw->tb->lock);
out:
@@ -341,8 +415,20 @@ static int tb_switch_nvm_add(struct tb_switch *sw)
u32 val;
int ret;
- if (!sw->dma_port)
+ if (!nvm_readable(sw))
+ return 0;
+
+ /*
+ * The NVM format of non-Intel hardware is not known so
+ * currently restrict NVM upgrade for Intel hardware. We may
+ * relax this in the future when we learn other NVM formats.
+ */
+ if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) {
+ dev_info(&sw->dev,
+ "NVM format of vendor %#x is not known, disabling NVM upgrade\n",
+ sw->config.vendor_id);
return 0;
+ }
nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
if (!nvm)
@@ -358,8 +444,7 @@ static int tb_switch_nvm_add(struct tb_switch *sw)
if (!sw->safe_mode) {
u32 nvm_size, hdr_size;
- ret = dma_port_flash_read(sw->dma_port, NVM_FLASH_SIZE, &val,
- sizeof(val));
+ ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val));
if (ret)
goto err_ida;
@@ -367,8 +452,7 @@ static int tb_switch_nvm_add(struct tb_switch *sw)
nvm_size = (SZ_1M << (val & 7)) / 8;
nvm_size = (nvm_size - hdr_size) / 2;
- ret = dma_port_flash_read(sw->dma_port, NVM_VERSION, &val,
- sizeof(val));
+ ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val));
if (ret)
goto err_ida;
@@ -620,6 +704,24 @@ int tb_port_clear_counter(struct tb_port *port, int counter)
}
/**
+ * tb_port_unlock() - Unlock downstream port
+ * @port: Port to unlock
+ *
+ * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
+ * downstream router accessible for CM.
+ */
+int tb_port_unlock(struct tb_port *port)
+{
+ if (tb_switch_is_icm(port->sw))
+ return 0;
+ if (!tb_port_is_null(port))
+ return -EINVAL;
+ if (tb_switch_is_usb4(port->sw))
+ return usb4_port_unlock(port);
+ return 0;
+}
+
+/**
* tb_init_port() - initialize a port
*
* This is a helper method for tb_switch_alloc. Does not check or initialize
@@ -650,6 +752,10 @@ static int tb_init_port(struct tb_port *port)
port->cap_phy = cap;
else
tb_port_WARN(port, "non switch port without a PHY\n");
+
+ cap = tb_port_find_cap(port, TB_PORT_CAP_USB4);
+ if (cap > 0)
+ port->cap_usb4 = cap;
} else if (port->port != 0) {
cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
if (cap > 0)
@@ -936,12 +1042,47 @@ bool tb_port_is_enabled(struct tb_port *port)
case TB_TYPE_DP_HDMI_OUT:
return tb_dp_port_is_enabled(port);
+ case TB_TYPE_USB3_UP:
+ case TB_TYPE_USB3_DOWN:
+ return tb_usb3_port_is_enabled(port);
+
default:
return false;
}
}
/**
+ * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
+ * @port: USB3 adapter port to check
+ */
+bool tb_usb3_port_is_enabled(struct tb_port *port)
+{
+ u32 data;
+
+ if (tb_port_read(port, &data, TB_CFG_PORT,
+ port->cap_adap + ADP_USB3_CS_0, 1))
+ return false;
+
+ return !!(data & ADP_USB3_CS_0_PE);
+}
+
+/**
+ * tb_usb3_port_enable() - Enable USB3 adapter port
+ * @port: USB3 adapter port to enable
+ * @enable: Enable/disable the USB3 adapter
+ */
+int tb_usb3_port_enable(struct tb_port *port, bool enable)
+{
+ u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V)
+ : ADP_USB3_CS_0_V;
+
+ if (!port->cap_adap)
+ return -ENXIO;
+ return tb_port_write(port, &word, TB_CFG_PORT,
+ port->cap_adap + ADP_USB3_CS_0, 1);
+}
+
+/**
* tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
* @port: PCIe port to check
*/
@@ -1088,20 +1229,38 @@ int tb_dp_port_enable(struct tb_port *port, bool enable)
/* switch utility functions */
-static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw)
+static const char *tb_switch_generation_name(const struct tb_switch *sw)
+{
+ switch (sw->generation) {
+ case 1:
+ return "Thunderbolt 1";
+ case 2:
+ return "Thunderbolt 2";
+ case 3:
+ return "Thunderbolt 3";
+ case 4:
+ return "USB4";
+ default:
+ return "Unknown";
+ }
+}
+
+static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
{
- tb_dbg(tb, " Switch: %x:%x (Revision: %d, TB Version: %d)\n",
- sw->vendor_id, sw->device_id, sw->revision,
- sw->thunderbolt_version);
- tb_dbg(tb, " Max Port Number: %d\n", sw->max_port_number);
+ const struct tb_regs_switch_header *regs = &sw->config;
+
+ tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n",
+ tb_switch_generation_name(sw), regs->vendor_id, regs->device_id,
+ regs->revision, regs->thunderbolt_version);
+ tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number);
tb_dbg(tb, " Config:\n");
tb_dbg(tb,
" Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
- sw->upstream_port_number, sw->depth,
- (((u64) sw->route_hi) << 32) | sw->route_lo,
- sw->enabled, sw->plug_events_delay);
+ regs->upstream_port_number, regs->depth,
+ (((u64) regs->route_hi) << 32) | regs->route_lo,
+ regs->enabled, regs->plug_events_delay);
tb_dbg(tb, " unknown1: %#x unknown4: %#x\n",
- sw->__unknown1, sw->__unknown4);
+ regs->__unknown1, regs->__unknown4);
}
/**
@@ -1148,6 +1307,10 @@ static int tb_plug_events_active(struct tb_switch *sw, bool active)
if (res)
return res;
+ /* Plug events are always enabled in USB4 */
+ if (tb_switch_is_usb4(sw))
+ return 0;
+
res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
if (res)
return res;
@@ -1359,30 +1522,6 @@ static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
-static void nvm_authenticate_start(struct tb_switch *sw)
-{
- struct pci_dev *root_port;
-
- /*
- * During host router NVM upgrade we should not allow root port to
- * go into D3cold because some root ports cannot trigger PME
- * itself. To be on the safe side keep the root port in D0 during
- * the whole upgrade process.
- */
- root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
- if (root_port)
- pm_runtime_get_noresume(&root_port->dev);
-}
-
-static void nvm_authenticate_complete(struct tb_switch *sw)
-{
- struct pci_dev *root_port;
-
- root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
- if (root_port)
- pm_runtime_put(&root_port->dev);
-}
-
static ssize_t nvm_authenticate_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1431,17 +1570,7 @@ static ssize_t nvm_authenticate_store(struct device *dev,
goto exit_unlock;
sw->nvm->authenticating = true;
-
- if (!tb_route(sw)) {
- /*
- * Keep root port from suspending as long as the
- * NVM upgrade process is running.
- */
- nvm_authenticate_start(sw);
- ret = nvm_authenticate_host(sw);
- } else {
- ret = nvm_authenticate_device(sw);
- }
+ ret = nvm_authenticate(sw);
}
exit_unlock:
@@ -1556,11 +1685,11 @@ static umode_t switch_attr_is_visible(struct kobject *kobj,
return attr->mode;
return 0;
} else if (attr == &dev_attr_nvm_authenticate.attr) {
- if (sw->dma_port && !sw->no_nvm_upgrade)
+ if (nvm_upgradeable(sw))
return attr->mode;
return 0;
} else if (attr == &dev_attr_nvm_version.attr) {
- if (sw->dma_port)
+ if (nvm_readable(sw))
return attr->mode;
return 0;
} else if (attr == &dev_attr_boot.attr) {
@@ -1672,6 +1801,9 @@ static int tb_switch_get_generation(struct tb_switch *sw)
return 3;
default:
+ if (tb_switch_is_usb4(sw))
+ return 4;
+
/*
* For unknown switches assume generation to be 1 to be
* on the safe side.
@@ -1682,6 +1814,19 @@ static int tb_switch_get_generation(struct tb_switch *sw)
}
}
+static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
+{
+ int max_depth;
+
+ if (tb_switch_is_usb4(sw) ||
+ (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch)))
+ max_depth = USB4_SWITCH_MAX_DEPTH;
+ else
+ max_depth = TB_SWITCH_MAX_DEPTH;
+
+ return depth > max_depth;
+}
+
/**
* tb_switch_alloc() - allocate a switch
* @tb: Pointer to the owning domain
@@ -1703,10 +1848,16 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
int upstream_port;
int i, ret, depth;
- /* Make sure we do not exceed maximum topology limit */
+ /* Unlock the downstream port so we can access the switch below */
+ if (route) {
+ struct tb_switch *parent_sw = tb_to_switch(parent);
+ struct tb_port *down;
+
+ down = tb_port_at(route, parent_sw);
+ tb_port_unlock(down);
+ }
+
depth = tb_route_length(route);
- if (depth > TB_SWITCH_MAX_DEPTH)
- return ERR_PTR(-EADDRNOTAVAIL);
upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
if (upstream_port < 0)
@@ -1721,8 +1872,10 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
if (ret)
goto err_free_sw_ports;
+ sw->generation = tb_switch_get_generation(sw);
+
tb_dbg(tb, "current switch config:\n");
- tb_dump_switch(tb, &sw->config);
+ tb_dump_switch(tb, sw);
/* configure switch */
sw->config.upstream_port_number = upstream_port;
@@ -1731,6 +1884,12 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
sw->config.route_lo = lower_32_bits(route);
sw->config.enabled = 0;
+ /* Make sure we do not exceed maximum topology limit */
+ if (tb_switch_exceeds_max_depth(sw, depth)) {
+ ret = -EADDRNOTAVAIL;
+ goto err_free_sw_ports;
+ }
+
/* initialize ports */
sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
GFP_KERNEL);
@@ -1745,14 +1904,9 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
sw->ports[i].port = i;
}
- sw->generation = tb_switch_get_generation(sw);
-
ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
- if (ret < 0) {
- tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
- goto err_free_sw_ports;
- }
- sw->cap_plug_events = ret;
+ if (ret > 0)
+ sw->cap_plug_events = ret;
ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
if (ret > 0)
@@ -1823,7 +1977,8 @@ tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
*
* Call this function before the switch is added to the system. It will
* upload configuration to the switch and makes it available for the
- * connection manager to use.
+ * connection manager to use. Can be called to the switch again after
+ * resume from low power states to re-initialize it.
*
* Return: %0 in case of success and negative errno in case of failure
*/
@@ -1834,21 +1989,50 @@ int tb_switch_configure(struct tb_switch *sw)
int ret;
route = tb_route(sw);
- tb_dbg(tb, "initializing Switch at %#llx (depth: %d, up port: %d)\n",
- route, tb_route_length(route), sw->config.upstream_port_number);
- if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
- tb_sw_warn(sw, "unknown switch vendor id %#x\n",
- sw->config.vendor_id);
+ tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n",
+ sw->config.enabled ? "restoring " : "initializing", route,
+ tb_route_length(route), sw->config.upstream_port_number);
sw->config.enabled = 1;
- /* upload configuration */
- ret = tb_sw_write(sw, 1 + (u32 *)&sw->config, TB_CFG_SWITCH, 1, 3);
- if (ret)
- return ret;
+ if (tb_switch_is_usb4(sw)) {
+ /*
+ * For USB4 devices, we need to program the CM version
+ * accordingly so that it knows to expose all the
+ * additional capabilities.
+ */
+ sw->config.cmuv = USB4_VERSION_1_0;
+
+ /* Enumerate the switch */
+ ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
+ ROUTER_CS_1, 4);
+ if (ret)
+ return ret;
+
+ ret = usb4_switch_setup(sw);
+ if (ret)
+ return ret;
+
+ ret = usb4_switch_configure_link(sw);
+ } else {
+ if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
+ tb_sw_warn(sw, "unknown switch vendor id %#x\n",
+ sw->config.vendor_id);
- ret = tb_lc_configure_link(sw);
+ if (!sw->cap_plug_events) {
+ tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
+ return -ENODEV;
+ }
+
+ /* Enumerate the switch */
+ ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
+ ROUTER_CS_1, 3);
+ if (ret)
+ return ret;
+
+ ret = tb_lc_configure_link(sw);
+ }
if (ret)
return ret;
@@ -1857,18 +2041,32 @@ int tb_switch_configure(struct tb_switch *sw)
static int tb_switch_set_uuid(struct tb_switch *sw)
{
+ bool uid = false;
u32 uuid[4];
int ret;
if (sw->uuid)
return 0;
- /*
- * The newer controllers include fused UUID as part of link
- * controller specific registers
- */
- ret = tb_lc_read_uuid(sw, uuid);
- if (ret) {
+ if (tb_switch_is_usb4(sw)) {
+ ret = usb4_switch_read_uid(sw, &sw->uid);
+ if (ret)
+ return ret;
+ uid = true;
+ } else {
+ /*
+ * The newer controllers include fused UUID as part of
+ * link controller specific registers
+ */
+ ret = tb_lc_read_uuid(sw, uuid);
+ if (ret) {
+ if (ret != -EINVAL)
+ return ret;
+ uid = true;
+ }
+ }
+
+ if (uid) {
/*
* ICM generates UUID based on UID and fills the upper
* two words with ones. This is not strictly following
@@ -1935,7 +2133,7 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
nvm_get_auth_status(sw, &status);
if (status) {
if (!tb_route(sw))
- nvm_authenticate_complete(sw);
+ nvm_authenticate_complete_dma_port(sw);
return 0;
}
@@ -1950,7 +2148,7 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
/* Now we can allow root port to suspend again */
if (!tb_route(sw))
- nvm_authenticate_complete(sw);
+ nvm_authenticate_complete_dma_port(sw);
if (status) {
tb_sw_info(sw, "switch flash authentication failed\n");
@@ -2004,6 +2202,8 @@ static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
if (!up->dual_link_port || !up->dual_link_port->remote)
return false;
+ if (tb_switch_is_usb4(sw))
+ return usb4_switch_lane_bonding_possible(sw);
return tb_lc_lane_bonding_possible(sw);
}
@@ -2175,6 +2375,10 @@ int tb_switch_add(struct tb_switch *sw)
ret = tb_switch_update_link_attributes(sw);
if (ret)
return ret;
+
+ ret = tb_switch_tmu_init(sw);
+ if (ret)
+ return ret;
}
ret = device_add(&sw->dev);
@@ -2240,7 +2444,11 @@ void tb_switch_remove(struct tb_switch *sw)
if (!sw->is_unplugged)
tb_plug_events_active(sw, false);
- tb_lc_unconfigure_link(sw);
+
+ if (tb_switch_is_usb4(sw))
+ usb4_switch_unconfigure_link(sw);
+ else
+ tb_lc_unconfigure_link(sw);
tb_switch_nvm_remove(sw);
@@ -2298,7 +2506,10 @@ int tb_switch_resume(struct tb_switch *sw)
return err;
}
- err = tb_drom_read_uid_only(sw, &uid);
+ if (tb_switch_is_usb4(sw))
+ err = usb4_switch_read_uid(sw, &uid);
+ else
+ err = tb_drom_read_uid_only(sw, &uid);
if (err) {
tb_sw_warn(sw, "uid read failed\n");
return err;
@@ -2311,16 +2522,7 @@ int tb_switch_resume(struct tb_switch *sw)
}
}
- /* upload configuration */
- err = tb_sw_write(sw, 1 + (u32 *) &sw->config, TB_CFG_SWITCH, 1, 3);
- if (err)
- return err;
-
- err = tb_lc_configure_link(sw);
- if (err)
- return err;
-
- err = tb_plug_events_active(sw, true);
+ err = tb_switch_configure(sw);
if (err)
return err;
@@ -2336,8 +2538,14 @@ int tb_switch_resume(struct tb_switch *sw)
tb_sw_set_unplugged(port->remote->sw);
else if (port->xdomain)
port->xdomain->is_unplugged = true;
- } else if (tb_port_has_remote(port)) {
- if (tb_switch_resume(port->remote->sw)) {
+ } else if (tb_port_has_remote(port) || port->xdomain) {
+ /*
+ * Always unlock the port so the downstream
+ * switch/domain is accessible.
+ */
+ if (tb_port_unlock(port))
+ tb_port_warn(port, "failed to unlock port\n");
+ if (port->remote && tb_switch_resume(port->remote->sw)) {
tb_port_warn(port,
"lost during suspend, disconnecting\n");
tb_sw_set_unplugged(port->remote->sw);
@@ -2361,7 +2569,10 @@ void tb_switch_suspend(struct tb_switch *sw)
tb_switch_suspend(port->remote->sw);
}
- tb_lc_set_sleep(sw);
+ if (tb_switch_is_usb4(sw))
+ usb4_switch_set_sleep(sw);
+ else
+ tb_lc_set_sleep(sw);
}
/**
@@ -2374,6 +2585,8 @@ void tb_switch_suspend(struct tb_switch *sw)
*/
bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
{
+ if (tb_switch_is_usb4(sw))
+ return usb4_switch_query_dp_resource(sw, in);
return tb_lc_dp_sink_query(sw, in);
}
@@ -2388,6 +2601,8 @@ bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
*/
int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
{
+ if (tb_switch_is_usb4(sw))
+ return usb4_switch_alloc_dp_resource(sw, in);
return tb_lc_dp_sink_alloc(sw, in);
}
@@ -2401,10 +2616,16 @@ int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
*/
void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
{
- if (tb_lc_dp_sink_dealloc(sw, in)) {
+ int ret;
+
+ if (tb_switch_is_usb4(sw))
+ ret = usb4_switch_dealloc_dp_resource(sw, in);
+ else
+ ret = tb_lc_dp_sink_dealloc(sw, in);
+
+ if (ret)
tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
in->port);
- }
}
struct tb_sw_lookup {
@@ -2517,6 +2738,24 @@ struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
return NULL;
}
+/**
+ * tb_switch_find_port() - return the first port of @type on @sw or NULL
+ * @sw: Switch to find the port from
+ * @type: Port type to look for
+ */
+struct tb_port *tb_switch_find_port(struct tb_switch *sw,
+ enum tb_port_type type)
+{
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port) {
+ if (port->config.type == type)
+ return port;
+ }
+
+ return NULL;
+}
+
void tb_switch_exit(void)
{
ida_destroy(&nvm_ida);
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index ea8727f769d6..107cd232f486 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -111,6 +111,10 @@ static void tb_discover_tunnels(struct tb_switch *sw)
tunnel = tb_tunnel_discover_pci(tb, port);
break;
+ case TB_TYPE_USB3_DOWN:
+ tunnel = tb_tunnel_discover_usb3(tb, port);
+ break;
+
default:
break;
}
@@ -158,6 +162,137 @@ static void tb_scan_xdomain(struct tb_port *port)
}
}
+static int tb_enable_tmu(struct tb_switch *sw)
+{
+ int ret;
+
+ /* If it is already enabled in correct mode, don't touch it */
+ if (tb_switch_tmu_is_enabled(sw))
+ return 0;
+
+ ret = tb_switch_tmu_disable(sw);
+ if (ret)
+ return ret;
+
+ ret = tb_switch_tmu_post_time(sw);
+ if (ret)
+ return ret;
+
+ return tb_switch_tmu_enable(sw);
+}
+
+/**
+ * tb_find_unused_port() - return the first inactive port on @sw
+ * @sw: Switch to find the port on
+ * @type: Port type to look for
+ */
+static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
+ enum tb_port_type type)
+{
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port) {
+ if (tb_is_upstream_port(port))
+ continue;
+ if (port->config.type != type)
+ continue;
+ if (!port->cap_adap)
+ continue;
+ if (tb_port_is_enabled(port))
+ continue;
+ return port;
+ }
+ return NULL;
+}
+
+static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
+ const struct tb_port *port)
+{
+ struct tb_port *down;
+
+ down = usb4_switch_map_usb3_down(sw, port);
+ if (down) {
+ if (WARN_ON(!tb_port_is_usb3_down(down)))
+ goto out;
+ if (WARN_ON(tb_usb3_port_is_enabled(down)))
+ goto out;
+
+ return down;
+ }
+
+out:
+ return tb_find_unused_port(sw, TB_TYPE_USB3_DOWN);
+}
+
+static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
+{
+ struct tb_switch *parent = tb_switch_parent(sw);
+ struct tb_port *up, *down, *port;
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_tunnel *tunnel;
+
+ up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
+ if (!up)
+ return 0;
+
+ /*
+ * Look up available down port. Since we are chaining it should
+ * be found right above this switch.
+ */
+ port = tb_port_at(tb_route(sw), parent);
+ down = tb_find_usb3_down(parent, port);
+ if (!down)
+ return 0;
+
+ if (tb_route(parent)) {
+ struct tb_port *parent_up;
+ /*
+ * Check first that the parent switch has its upstream USB3
+ * port enabled. Otherwise the chain is not complete and
+ * there is no point setting up a new tunnel.
+ */
+ parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
+ if (!parent_up || !tb_port_is_enabled(parent_up))
+ return 0;
+ }
+
+ tunnel = tb_tunnel_alloc_usb3(tb, up, down);
+ if (!tunnel)
+ return -ENOMEM;
+
+ if (tb_tunnel_activate(tunnel)) {
+ tb_port_info(up,
+ "USB3 tunnel activation failed, aborting\n");
+ tb_tunnel_free(tunnel);
+ return -EIO;
+ }
+
+ list_add_tail(&tunnel->list, &tcm->tunnel_list);
+ return 0;
+}
+
+static int tb_create_usb3_tunnels(struct tb_switch *sw)
+{
+ struct tb_port *port;
+ int ret;
+
+ if (tb_route(sw)) {
+ ret = tb_tunnel_usb3(sw->tb, sw);
+ if (ret)
+ return ret;
+ }
+
+ tb_switch_for_each_port(sw, port) {
+ if (!tb_port_has_remote(port))
+ continue;
+ ret = tb_create_usb3_tunnels(port->remote->sw);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static void tb_scan_port(struct tb_port *port);
/**
@@ -257,6 +392,18 @@ static void tb_scan_port(struct tb_port *port)
if (tb_switch_lane_bonding_enable(sw))
tb_sw_warn(sw, "failed to enable lane bonding\n");
+ if (tb_enable_tmu(sw))
+ tb_sw_warn(sw, "failed to enable TMU\n");
+
+ /*
+ * Create USB 3.x tunnels only when the switch is plugged to the
+ * domain. This is because we scan the domain also during discovery
+ * and want to discover existing USB 3.x tunnels before we create
+ * any new.
+ */
+ if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
+ tb_sw_warn(sw, "USB3 tunnel creation failed\n");
+
tb_scan_switch(sw);
}
@@ -338,57 +485,18 @@ static void tb_free_unplugged_children(struct tb_switch *sw)
}
}
-/**
- * tb_find_port() - return the first port of @type on @sw or NULL
- * @sw: Switch to find the port from
- * @type: Port type to look for
- */
-static struct tb_port *tb_find_port(struct tb_switch *sw,
- enum tb_port_type type)
-{
- struct tb_port *port;
-
- tb_switch_for_each_port(sw, port) {
- if (port->config.type == type)
- return port;
- }
-
- return NULL;
-}
-
-/**
- * tb_find_unused_port() - return the first inactive port on @sw
- * @sw: Switch to find the port on
- * @type: Port type to look for
- */
-static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
- enum tb_port_type type)
-{
- struct tb_port *port;
-
- tb_switch_for_each_port(sw, port) {
- if (tb_is_upstream_port(port))
- continue;
- if (port->config.type != type)
- continue;
- if (port->cap_adap)
- continue;
- if (tb_port_is_enabled(port))
- continue;
- return port;
- }
- return NULL;
-}
-
static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
const struct tb_port *port)
{
+ struct tb_port *down = NULL;
+
/*
* To keep plugging devices consistently in the same PCIe
- * hierarchy, do mapping here for root switch downstream PCIe
- * ports.
+ * hierarchy, do mapping here for switch downstream PCIe ports.
*/
- if (!tb_route(sw)) {
+ if (tb_switch_is_usb4(sw)) {
+ down = usb4_switch_map_pcie_down(sw, port);
+ } else if (!tb_route(sw)) {
int phy_port = tb_phy_port_from_link(port->port);
int index;
@@ -409,12 +517,17 @@ static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
/* Validate the hard-coding */
if (WARN_ON(index > sw->config.max_port_number))
goto out;
- if (WARN_ON(!tb_port_is_pcie_down(&sw->ports[index])))
+
+ down = &sw->ports[index];
+ }
+
+ if (down) {
+ if (WARN_ON(!tb_port_is_pcie_down(down)))
goto out;
- if (WARN_ON(tb_pci_port_is_enabled(&sw->ports[index])))
+ if (WARN_ON(tb_pci_port_is_enabled(down)))
goto out;
- return &sw->ports[index];
+ return down;
}
out:
@@ -586,7 +699,7 @@ static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
struct tb_switch *parent_sw;
struct tb_tunnel *tunnel;
- up = tb_find_port(sw, TB_TYPE_PCIE_UP);
+ up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
if (!up)
return 0;
@@ -624,7 +737,7 @@ static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
sw = tb_to_switch(xd->dev.parent);
dst_port = tb_port_at(xd->route, sw);
- nhi_port = tb_find_port(tb->root_switch, TB_TYPE_NHI);
+ nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
mutex_lock(&tb->lock);
tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
@@ -719,6 +832,7 @@ static void tb_handle_hotplug(struct work_struct *work)
tb_sw_set_unplugged(port->remote->sw);
tb_free_invalid_tunnels(tb);
tb_remove_dp_resources(port->remote->sw);
+ tb_switch_tmu_disable(port->remote->sw);
tb_switch_lane_bonding_disable(port->remote->sw);
tb_switch_remove(port->remote->sw);
port->remote = NULL;
@@ -786,8 +900,7 @@ static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
route = tb_cfg_get_route(&pkg->header);
- if (tb_cfg_error(tb->ctl, route, pkg->port,
- TB_CFG_ERROR_ACK_PLUG_EVENT)) {
+ if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
pkg->port);
}
@@ -866,10 +979,17 @@ static int tb_start(struct tb *tb)
return ret;
}
+ /* Enable TMU if it is off */
+ tb_switch_tmu_enable(tb->root_switch);
/* Full scan to discover devices added before the driver was loaded. */
tb_scan_switch(tb->root_switch);
/* Find out tunnels created by the boot firmware */
tb_discover_tunnels(tb->root_switch);
+ /*
+ * If the boot firmware did not create USB 3.x tunnels create them
+ * now for the whole topology.
+ */
+ tb_create_usb3_tunnels(tb->root_switch);
/* Add DP IN resources for the root switch */
tb_add_dp_resources(tb->root_switch);
/* Make the discovered switches available to the userspace */
@@ -897,6 +1017,9 @@ static void tb_restore_children(struct tb_switch *sw)
{
struct tb_port *port;
+ if (tb_enable_tmu(sw))
+ tb_sw_warn(sw, "failed to restore TMU configuration\n");
+
tb_switch_for_each_port(sw, port) {
if (!tb_port_has_remote(port))
continue;
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index ec851f20c571..2eb2bcd3cca3 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -44,6 +44,39 @@ struct tb_switch_nvm {
#define TB_SWITCH_KEY_SIZE 32
#define TB_SWITCH_MAX_DEPTH 6
+#define USB4_SWITCH_MAX_DEPTH 5
+
+/**
+ * enum tb_switch_tmu_rate - TMU refresh rate
+ * @TB_SWITCH_TMU_RATE_OFF: %0 (Disable Time Sync handshake)
+ * @TB_SWITCH_TMU_RATE_HIFI: %16 us time interval between successive
+ * transmission of the Delay Request TSNOS
+ * (Time Sync Notification Ordered Set) on a Link
+ * @TB_SWITCH_TMU_RATE_NORMAL: %1 ms time interval between successive
+ * transmission of the Delay Request TSNOS on
+ * a Link
+ */
+enum tb_switch_tmu_rate {
+ TB_SWITCH_TMU_RATE_OFF = 0,
+ TB_SWITCH_TMU_RATE_HIFI = 16,
+ TB_SWITCH_TMU_RATE_NORMAL = 1000,
+};
+
+/**
+ * struct tb_switch_tmu - Structure holding switch TMU configuration
+ * @cap: Offset to the TMU capability (%0 if not found)
+ * @has_ucap: Does the switch support uni-directional mode
+ * @rate: TMU refresh rate related to upstream switch. In case of root
+ * switch this holds the domain rate.
+ * @unidirectional: Is the TMU in uni-directional or bi-directional mode
+ * related to upstream switch. Don't case for root switch.
+ */
+struct tb_switch_tmu {
+ int cap;
+ bool has_ucap;
+ enum tb_switch_tmu_rate rate;
+ bool unidirectional;
+};
/**
* struct tb_switch - a thunderbolt switch
@@ -54,6 +87,7 @@ struct tb_switch_nvm {
* mailbox this will hold the pointer to that (%NULL
* otherwise). If set it also means the switch has
* upgradeable NVM.
+ * @tmu: The switch TMU configuration
* @tb: Pointer to the domain the switch belongs to
* @uid: Unique ID of the switch
* @uuid: UUID of the switch (or %NULL if not supported)
@@ -92,6 +126,7 @@ struct tb_switch {
struct tb_regs_switch_header config;
struct tb_port *ports;
struct tb_dma_port *dma_port;
+ struct tb_switch_tmu tmu;
struct tb *tb;
u64 uid;
uuid_t *uuid;
@@ -128,7 +163,9 @@ struct tb_switch {
* @remote: Remote port (%NULL if not connected)
* @xdomain: Remote host (%NULL if not connected)
* @cap_phy: Offset, zero if not found
+ * @cap_tmu: Offset of the adapter specific TMU capability (%0 if not present)
* @cap_adap: Offset of the adapter specific capability (%0 if not present)
+ * @cap_usb4: Offset to the USB4 port capability (%0 if not present)
* @port: Port number on switch
* @disabled: Disabled by eeprom
* @bonded: true if the port is bonded (two lanes combined as one)
@@ -145,7 +182,9 @@ struct tb_port {
struct tb_port *remote;
struct tb_xdomain *xdomain;
int cap_phy;
+ int cap_tmu;
int cap_adap;
+ int cap_usb4;
u8 port;
bool disabled;
bool bonded;
@@ -393,6 +432,16 @@ static inline bool tb_port_is_dpout(const struct tb_port *port)
return port && port->config.type == TB_TYPE_DP_HDMI_OUT;
}
+static inline bool tb_port_is_usb3_down(const struct tb_port *port)
+{
+ return port && port->config.type == TB_TYPE_USB3_DOWN;
+}
+
+static inline bool tb_port_is_usb3_up(const struct tb_port *port)
+{
+ return port && port->config.type == TB_TYPE_USB3_UP;
+}
+
static inline int tb_sw_read(struct tb_switch *sw, void *buffer,
enum tb_cfg_space space, u32 offset, u32 length)
{
@@ -533,6 +582,8 @@ void tb_switch_suspend(struct tb_switch *sw);
int tb_switch_resume(struct tb_switch *sw);
int tb_switch_reset(struct tb *tb, u64 route);
void tb_sw_set_unplugged(struct tb_switch *sw);
+struct tb_port *tb_switch_find_port(struct tb_switch *sw,
+ enum tb_port_type type);
struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link,
u8 depth);
struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid);
@@ -636,6 +687,17 @@ static inline bool tb_switch_is_titan_ridge(const struct tb_switch *sw)
}
/**
+ * tb_switch_is_usb4() - Is the switch USB4 compliant
+ * @sw: Switch to check
+ *
+ * Returns true if the @sw is USB4 compliant router, false otherwise.
+ */
+static inline bool tb_switch_is_usb4(const struct tb_switch *sw)
+{
+ return sw->config.thunderbolt_version == USB4_VERSION_1_0;
+}
+
+/**
* tb_switch_is_icm() - Is the switch handled by ICM firmware
* @sw: Switch to check
*
@@ -656,10 +718,22 @@ bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in);
int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
+int tb_switch_tmu_init(struct tb_switch *sw);
+int tb_switch_tmu_post_time(struct tb_switch *sw);
+int tb_switch_tmu_disable(struct tb_switch *sw);
+int tb_switch_tmu_enable(struct tb_switch *sw);
+
+static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw)
+{
+ return sw->tmu.rate == TB_SWITCH_TMU_RATE_HIFI &&
+ !sw->tmu.unidirectional;
+}
+
int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
int tb_port_add_nfc_credits(struct tb_port *port, int credits);
int tb_port_set_initial_credits(struct tb_port *port, u32 credits);
int tb_port_clear_counter(struct tb_port *port, int counter);
+int tb_port_unlock(struct tb_port *port);
int tb_port_alloc_in_hopid(struct tb_port *port, int hopid, int max_hopid);
void tb_port_release_in_hopid(struct tb_port *port, int hopid);
int tb_port_alloc_out_hopid(struct tb_port *port, int hopid, int max_hopid);
@@ -668,9 +742,13 @@ struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
struct tb_port *prev);
int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec);
+int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap);
int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap);
bool tb_port_is_enabled(struct tb_port *port);
+bool tb_usb3_port_is_enabled(struct tb_port *port);
+int tb_usb3_port_enable(struct tb_port *port, bool enable);
+
bool tb_pci_port_is_enabled(struct tb_port *port);
int tb_pci_port_enable(struct tb_port *port, bool enable);
@@ -734,4 +812,27 @@ void tb_xdomain_remove(struct tb_xdomain *xd);
struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
u8 depth);
+int usb4_switch_setup(struct tb_switch *sw);
+int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid);
+int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
+ size_t size);
+int usb4_switch_configure_link(struct tb_switch *sw);
+void usb4_switch_unconfigure_link(struct tb_switch *sw);
+bool usb4_switch_lane_bonding_possible(struct tb_switch *sw);
+int usb4_switch_set_sleep(struct tb_switch *sw);
+int usb4_switch_nvm_sector_size(struct tb_switch *sw);
+int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
+ size_t size);
+int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
+ const void *buf, size_t size);
+int usb4_switch_nvm_authenticate(struct tb_switch *sw);
+bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in);
+int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
+int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
+struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
+ const struct tb_port *port);
+struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
+ const struct tb_port *port);
+
+int usb4_port_unlock(struct tb_port *port);
#endif
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index 3705057723b6..fc208c567953 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -67,9 +67,13 @@ struct cfg_error_pkg {
u32 zero1:4;
u32 port:6;
u32 zero2:2; /* Both should be zero, still they are different fields. */
- u32 zero3:16;
+ u32 zero3:14;
+ u32 pg:2;
} __packed;
+#define TB_CFG_ERROR_PG_HOT_PLUG 0x2
+#define TB_CFG_ERROR_PG_HOT_UNPLUG 0x3
+
/* TB_CFG_PKG_EVENT */
struct cfg_event_pkg {
struct tb_cfg_header header;
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 7ee45b73c7f7..c29c5075525a 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -26,6 +26,7 @@
#define TB_MAX_CONFIG_RW_LENGTH 60
enum tb_switch_cap {
+ TB_SWITCH_CAP_TMU = 0x03,
TB_SWITCH_CAP_VSE = 0x05,
};
@@ -41,6 +42,7 @@ enum tb_port_cap {
TB_PORT_CAP_TIME1 = 0x03,
TB_PORT_CAP_ADAP = 0x04,
TB_PORT_CAP_VSE = 0x05,
+ TB_PORT_CAP_USB4 = 0x06,
};
enum tb_port_state {
@@ -164,10 +166,52 @@ struct tb_regs_switch_header {
* milliseconds. Writing 0x00 is interpreted
* as 255ms.
*/
- u32 __unknown4:16;
+ u32 cmuv:8;
+ u32 __unknown4:8;
u32 thunderbolt_version:8;
} __packed;
+/* USB4 version 1.0 */
+#define USB4_VERSION_1_0 0x20
+
+#define ROUTER_CS_1 0x01
+#define ROUTER_CS_4 0x04
+#define ROUTER_CS_5 0x05
+#define ROUTER_CS_5_SLP BIT(0)
+#define ROUTER_CS_5_C3S BIT(23)
+#define ROUTER_CS_5_PTO BIT(24)
+#define ROUTER_CS_5_UTO BIT(25)
+#define ROUTER_CS_5_HCO BIT(26)
+#define ROUTER_CS_5_CV BIT(31)
+#define ROUTER_CS_6 0x06
+#define ROUTER_CS_6_SLPR BIT(0)
+#define ROUTER_CS_6_TNS BIT(1)
+#define ROUTER_CS_6_HCI BIT(18)
+#define ROUTER_CS_6_CR BIT(25)
+#define ROUTER_CS_7 0x07
+#define ROUTER_CS_9 0x09
+#define ROUTER_CS_25 0x19
+#define ROUTER_CS_26 0x1a
+#define ROUTER_CS_26_STATUS_MASK GENMASK(29, 24)
+#define ROUTER_CS_26_STATUS_SHIFT 24
+#define ROUTER_CS_26_ONS BIT(30)
+#define ROUTER_CS_26_OV BIT(31)
+
+/* Router TMU configuration */
+#define TMU_RTR_CS_0 0x00
+#define TMU_RTR_CS_0_TD BIT(27)
+#define TMU_RTR_CS_0_UCAP BIT(30)
+#define TMU_RTR_CS_1 0x01
+#define TMU_RTR_CS_1_LOCAL_TIME_NS_MASK GENMASK(31, 16)
+#define TMU_RTR_CS_1_LOCAL_TIME_NS_SHIFT 16
+#define TMU_RTR_CS_2 0x02
+#define TMU_RTR_CS_3 0x03
+#define TMU_RTR_CS_3_LOCAL_TIME_NS_MASK GENMASK(15, 0)
+#define TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK GENMASK(31, 16)
+#define TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT 16
+#define TMU_RTR_CS_22 0x16
+#define TMU_RTR_CS_24 0x18
+
enum tb_port_type {
TB_TYPE_INACTIVE = 0x000000,
TB_TYPE_PORT = 0x000001,
@@ -178,7 +222,8 @@ enum tb_port_type {
TB_TYPE_DP_HDMI_OUT = 0x0e0102,
TB_TYPE_PCIE_DOWN = 0x100101,
TB_TYPE_PCIE_UP = 0x100102,
- /* TB_TYPE_USB = 0x200000, lower order bits are not known */
+ TB_TYPE_USB3_DOWN = 0x200101,
+ TB_TYPE_USB3_UP = 0x200102,
};
/* Present on every port in TB_CF_PORT at address zero. */
@@ -216,10 +261,15 @@ struct tb_regs_port_header {
#define ADP_CS_4_NFC_BUFFERS_MASK GENMASK(9, 0)
#define ADP_CS_4_TOTAL_BUFFERS_MASK GENMASK(29, 20)
#define ADP_CS_4_TOTAL_BUFFERS_SHIFT 20
+#define ADP_CS_4_LCK BIT(31)
#define ADP_CS_5 0x05
#define ADP_CS_5_LCA_MASK GENMASK(28, 22)
#define ADP_CS_5_LCA_SHIFT 22
+/* TMU adapter registers */
+#define TMU_ADP_CS_3 0x03
+#define TMU_ADP_CS_3_UDM BIT(29)
+
/* Lane adapter registers */
#define LANE_ADP_CS_0 0x00
#define LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK GENMASK(25, 20)
@@ -237,6 +287,12 @@ struct tb_regs_port_header {
#define LANE_ADP_CS_1_CURRENT_WIDTH_MASK GENMASK(25, 20)
#define LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT 20
+/* USB4 port registers */
+#define PORT_CS_18 0x12
+#define PORT_CS_18_BE BIT(8)
+#define PORT_CS_19 0x13
+#define PORT_CS_19_PC BIT(3)
+
/* Display Port adapter registers */
#define ADP_DP_CS_0 0x00
#define ADP_DP_CS_0_VIDEO_HOPID_MASK GENMASK(26, 16)
@@ -277,6 +333,11 @@ struct tb_regs_port_header {
#define ADP_PCIE_CS_0 0x00
#define ADP_PCIE_CS_0_PE BIT(31)
+/* USB adapter registers */
+#define ADP_USB3_CS_0 0x00
+#define ADP_USB3_CS_0_V BIT(30)
+#define ADP_USB3_CS_0_PE BIT(31)
+
/* Hop register from TB_CFG_HOPS. 8 byte per entry. */
struct tb_regs_hop {
/* DWORD 0 */
diff --git a/drivers/thunderbolt/tmu.c b/drivers/thunderbolt/tmu.c
new file mode 100644
index 000000000000..039c42a06000
--- /dev/null
+++ b/drivers/thunderbolt/tmu.c
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thunderbolt Time Management Unit (TMU) support
+ *
+ * Copyright (C) 2019, Intel Corporation
+ * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
+ * Rajmohan Mani <rajmohan.mani@intel.com>
+ */
+
+#include <linux/delay.h>
+
+#include "tb.h"
+
+static const char *tb_switch_tmu_mode_name(const struct tb_switch *sw)
+{
+ bool root_switch = !tb_route(sw);
+
+ switch (sw->tmu.rate) {
+ case TB_SWITCH_TMU_RATE_OFF:
+ return "off";
+
+ case TB_SWITCH_TMU_RATE_HIFI:
+ /* Root switch does not have upstream directionality */
+ if (root_switch)
+ return "HiFi";
+ if (sw->tmu.unidirectional)
+ return "uni-directional, HiFi";
+ return "bi-directional, HiFi";
+
+ case TB_SWITCH_TMU_RATE_NORMAL:
+ if (root_switch)
+ return "normal";
+ return "uni-directional, normal";
+
+ default:
+ return "unknown";
+ }
+}
+
+static bool tb_switch_tmu_ucap_supported(struct tb_switch *sw)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->tmu.cap + TMU_RTR_CS_0, 1);
+ if (ret)
+ return false;
+
+ return !!(val & TMU_RTR_CS_0_UCAP);
+}
+
+static int tb_switch_tmu_rate_read(struct tb_switch *sw)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->tmu.cap + TMU_RTR_CS_3, 1);
+ if (ret)
+ return ret;
+
+ val >>= TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
+ return val;
+}
+
+static int tb_switch_tmu_rate_write(struct tb_switch *sw, int rate)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->tmu.cap + TMU_RTR_CS_3, 1);
+ if (ret)
+ return ret;
+
+ val &= ~TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK;
+ val |= rate << TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
+
+ return tb_sw_write(sw, &val, TB_CFG_SWITCH,
+ sw->tmu.cap + TMU_RTR_CS_3, 1);
+}
+
+static int tb_port_tmu_write(struct tb_port *port, u8 offset, u32 mask,
+ u32 value)
+{
+ u32 data;
+ int ret;
+
+ ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_tmu + offset, 1);
+ if (ret)
+ return ret;
+
+ data &= ~mask;
+ data |= value;
+
+ return tb_port_write(port, &data, TB_CFG_PORT,
+ port->cap_tmu + offset, 1);
+}
+
+static int tb_port_tmu_set_unidirectional(struct tb_port *port,
+ bool unidirectional)
+{
+ u32 val;
+
+ if (!port->sw->tmu.has_ucap)
+ return 0;
+
+ val = unidirectional ? TMU_ADP_CS_3_UDM : 0;
+ return tb_port_tmu_write(port, TMU_ADP_CS_3, TMU_ADP_CS_3_UDM, val);
+}
+
+static inline int tb_port_tmu_unidirectional_disable(struct tb_port *port)
+{
+ return tb_port_tmu_set_unidirectional(port, false);
+}
+
+static bool tb_port_tmu_is_unidirectional(struct tb_port *port)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_tmu + TMU_ADP_CS_3, 1);
+ if (ret)
+ return false;
+
+ return val & TMU_ADP_CS_3_UDM;
+}
+
+static int tb_switch_tmu_set_time_disruption(struct tb_switch *sw, bool set)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->tmu.cap + TMU_RTR_CS_0, 1);
+ if (ret)
+ return ret;
+
+ if (set)
+ val |= TMU_RTR_CS_0_TD;
+ else
+ val &= ~TMU_RTR_CS_0_TD;
+
+ return tb_sw_write(sw, &val, TB_CFG_SWITCH,
+ sw->tmu.cap + TMU_RTR_CS_0, 1);
+}
+
+/**
+ * tb_switch_tmu_init() - Initialize switch TMU structures
+ * @sw: Switch to initialized
+ *
+ * This function must be called before other TMU related functions to
+ * makes the internal structures are filled in correctly. Does not
+ * change any hardware configuration.
+ */
+int tb_switch_tmu_init(struct tb_switch *sw)
+{
+ struct tb_port *port;
+ int ret;
+
+ if (tb_switch_is_icm(sw))
+ return 0;
+
+ ret = tb_switch_find_cap(sw, TB_SWITCH_CAP_TMU);
+ if (ret > 0)
+ sw->tmu.cap = ret;
+
+ tb_switch_for_each_port(sw, port) {
+ int cap;
+
+ cap = tb_port_find_cap(port, TB_PORT_CAP_TIME1);
+ if (cap > 0)
+ port->cap_tmu = cap;
+ }
+
+ ret = tb_switch_tmu_rate_read(sw);
+ if (ret < 0)
+ return ret;
+
+ sw->tmu.rate = ret;
+
+ sw->tmu.has_ucap = tb_switch_tmu_ucap_supported(sw);
+ if (sw->tmu.has_ucap) {
+ tb_sw_dbg(sw, "TMU: supports uni-directional mode\n");
+
+ if (tb_route(sw)) {
+ struct tb_port *up = tb_upstream_port(sw);
+
+ sw->tmu.unidirectional =
+ tb_port_tmu_is_unidirectional(up);
+ }
+ } else {
+ sw->tmu.unidirectional = false;
+ }
+
+ tb_sw_dbg(sw, "TMU: current mode: %s\n", tb_switch_tmu_mode_name(sw));
+ return 0;
+}
+
+/**
+ * tb_switch_tmu_post_time() - Update switch local time
+ * @sw: Switch whose time to update
+ *
+ * Updates switch local time using time posting procedure.
+ */
+int tb_switch_tmu_post_time(struct tb_switch *sw)
+{
+ unsigned int post_local_time_offset, post_time_offset;
+ struct tb_switch *root_switch = sw->tb->root_switch;
+ u64 hi, mid, lo, local_time, post_time;
+ int i, ret, retries = 100;
+ u32 gm_local_time[3];
+
+ if (!tb_route(sw))
+ return 0;
+
+ if (!tb_switch_is_usb4(sw))
+ return 0;
+
+ /* Need to be able to read the grand master time */
+ if (!root_switch->tmu.cap)
+ return 0;
+
+ ret = tb_sw_read(root_switch, gm_local_time, TB_CFG_SWITCH,
+ root_switch->tmu.cap + TMU_RTR_CS_1,
+ ARRAY_SIZE(gm_local_time));
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(gm_local_time); i++)
+ tb_sw_dbg(root_switch, "local_time[%d]=0x%08x\n", i,
+ gm_local_time[i]);
+
+ /* Convert to nanoseconds (drop fractional part) */
+ hi = gm_local_time[2] & TMU_RTR_CS_3_LOCAL_TIME_NS_MASK;
+ mid = gm_local_time[1];
+ lo = (gm_local_time[0] & TMU_RTR_CS_1_LOCAL_TIME_NS_MASK) >>
+ TMU_RTR_CS_1_LOCAL_TIME_NS_SHIFT;
+ local_time = hi << 48 | mid << 16 | lo;
+
+ /* Tell the switch that time sync is disrupted for a while */
+ ret = tb_switch_tmu_set_time_disruption(sw, true);
+ if (ret)
+ return ret;
+
+ post_local_time_offset = sw->tmu.cap + TMU_RTR_CS_22;
+ post_time_offset = sw->tmu.cap + TMU_RTR_CS_24;
+
+ /*
+ * Write the Grandmaster time to the Post Local Time registers
+ * of the new switch.
+ */
+ ret = tb_sw_write(sw, &local_time, TB_CFG_SWITCH,
+ post_local_time_offset, 2);
+ if (ret)
+ goto out;
+
+ /*
+ * Have the new switch update its local time (by writing 1 to
+ * the post_time registers) and wait for the completion of the
+ * same (post_time register becomes 0). This means the time has
+ * been converged properly.
+ */
+ post_time = 1;
+
+ ret = tb_sw_write(sw, &post_time, TB_CFG_SWITCH, post_time_offset, 2);
+ if (ret)
+ goto out;
+
+ do {
+ usleep_range(5, 10);
+ ret = tb_sw_read(sw, &post_time, TB_CFG_SWITCH,
+ post_time_offset, 2);
+ if (ret)
+ goto out;
+ } while (--retries && post_time);
+
+ if (!retries) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ tb_sw_dbg(sw, "TMU: updated local time to %#llx\n", local_time);
+
+out:
+ tb_switch_tmu_set_time_disruption(sw, false);
+ return ret;
+}
+
+/**
+ * tb_switch_tmu_disable() - Disable TMU of a switch
+ * @sw: Switch whose TMU to disable
+ *
+ * Turns off TMU of @sw if it is enabled. If not enabled does nothing.
+ */
+int tb_switch_tmu_disable(struct tb_switch *sw)
+{
+ int ret;
+
+ if (!tb_switch_is_usb4(sw))
+ return 0;
+
+ /* Already disabled? */
+ if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF)
+ return 0;
+
+ if (sw->tmu.unidirectional) {
+ struct tb_switch *parent = tb_switch_parent(sw);
+ struct tb_port *up, *down;
+
+ up = tb_upstream_port(sw);
+ down = tb_port_at(tb_route(sw), parent);
+
+ /* The switch may be unplugged so ignore any errors */
+ tb_port_tmu_unidirectional_disable(up);
+ ret = tb_port_tmu_unidirectional_disable(down);
+ if (ret)
+ return ret;
+ }
+
+ tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
+
+ sw->tmu.unidirectional = false;
+ sw->tmu.rate = TB_SWITCH_TMU_RATE_OFF;
+
+ tb_sw_dbg(sw, "TMU: disabled\n");
+ return 0;
+}
+
+/**
+ * tb_switch_tmu_enable() - Enable TMU on a switch
+ * @sw: Switch whose TMU to enable
+ *
+ * Enables TMU of a switch to be in bi-directional, HiFi mode. In this mode
+ * all tunneling should work.
+ */
+int tb_switch_tmu_enable(struct tb_switch *sw)
+{
+ int ret;
+
+ if (!tb_switch_is_usb4(sw))
+ return 0;
+
+ if (tb_switch_tmu_is_enabled(sw))
+ return 0;
+
+ ret = tb_switch_tmu_set_time_disruption(sw, true);
+ if (ret)
+ return ret;
+
+ /* Change mode to bi-directional */
+ if (tb_route(sw) && sw->tmu.unidirectional) {
+ struct tb_switch *parent = tb_switch_parent(sw);
+ struct tb_port *up, *down;
+
+ up = tb_upstream_port(sw);
+ down = tb_port_at(tb_route(sw), parent);
+
+ ret = tb_port_tmu_unidirectional_disable(down);
+ if (ret)
+ return ret;
+
+ ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI);
+ if (ret)
+ return ret;
+
+ ret = tb_port_tmu_unidirectional_disable(up);
+ if (ret)
+ return ret;
+ } else {
+ ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI);
+ if (ret)
+ return ret;
+ }
+
+ sw->tmu.unidirectional = false;
+ sw->tmu.rate = TB_SWITCH_TMU_RATE_HIFI;
+ tb_sw_dbg(sw, "TMU: mode set to: %s\n", tb_switch_tmu_mode_name(sw));
+
+ return tb_switch_tmu_set_time_disruption(sw, false);
+}
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 0d3463c4e24a..dbe90bcf4ad4 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -19,6 +19,12 @@
#define TB_PCI_PATH_DOWN 0
#define TB_PCI_PATH_UP 1
+/* USB3 adapters use always HopID of 8 for both directions */
+#define TB_USB3_HOPID 8
+
+#define TB_USB3_PATH_DOWN 0
+#define TB_USB3_PATH_UP 1
+
/* DP adapters use HopID 8 for AUX and 9 for Video */
#define TB_DP_AUX_TX_HOPID 8
#define TB_DP_AUX_RX_HOPID 8
@@ -31,7 +37,7 @@
#define TB_DMA_PATH_OUT 0
#define TB_DMA_PATH_IN 1
-static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA" };
+static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
do { \
@@ -243,6 +249,12 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
return tunnel;
}
+static bool tb_dp_is_usb4(const struct tb_switch *sw)
+{
+ /* Titan Ridge DP adapters need the same treatment as USB4 */
+ return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
+}
+
static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
{
int timeout = 10;
@@ -250,8 +262,7 @@ static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
int ret;
/* Both ends need to support this */
- if (!tb_switch_is_titan_ridge(in->sw) ||
- !tb_switch_is_titan_ridge(out->sw))
+ if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
return 0;
ret = tb_port_read(out, &val, TB_CFG_PORT,
@@ -531,7 +542,7 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel)
u32 val, rate = 0, lanes = 0;
int ret;
- if (tb_switch_is_titan_ridge(sw)) {
+ if (tb_dp_is_usb4(sw)) {
int timeout = 10;
/*
@@ -843,6 +854,156 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
return tunnel;
}
+static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
+{
+ int res;
+
+ res = tb_usb3_port_enable(tunnel->src_port, activate);
+ if (res)
+ return res;
+
+ if (tb_port_is_usb3_up(tunnel->dst_port))
+ return tb_usb3_port_enable(tunnel->dst_port, activate);
+
+ return 0;
+}
+
+static void tb_usb3_init_path(struct tb_path *path)
+{
+ path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
+ path->egress_shared_buffer = TB_PATH_NONE;
+ path->ingress_fc_enable = TB_PATH_ALL;
+ path->ingress_shared_buffer = TB_PATH_NONE;
+ path->priority = 3;
+ path->weight = 3;
+ path->drop_packages = 0;
+ path->nfc_credits = 0;
+ path->hops[0].initial_credits = 7;
+ path->hops[1].initial_credits =
+ tb_initial_credits(path->hops[1].in_port->sw);
+}
+
+/**
+ * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
+ * @tb: Pointer to the domain structure
+ * @down: USB3 downstream adapter
+ *
+ * If @down adapter is active, follows the tunnel to the USB3 upstream
+ * adapter and back. Returns the discovered tunnel or %NULL if there was
+ * no tunnel.
+ */
+struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down)
+{
+ struct tb_tunnel *tunnel;
+ struct tb_path *path;
+
+ if (!tb_usb3_port_is_enabled(down))
+ return NULL;
+
+ tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
+ if (!tunnel)
+ return NULL;
+
+ tunnel->activate = tb_usb3_activate;
+ tunnel->src_port = down;
+
+ /*
+ * Discover both paths even if they are not complete. We will
+ * clean them up by calling tb_tunnel_deactivate() below in that
+ * case.
+ */
+ path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
+ &tunnel->dst_port, "USB3 Up");
+ if (!path) {
+ /* Just disable the downstream port */
+ tb_usb3_port_enable(down, false);
+ goto err_free;
+ }
+ tunnel->paths[TB_USB3_PATH_UP] = path;
+ tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
+
+ path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
+ "USB3 Down");
+ if (!path)
+ goto err_deactivate;
+ tunnel->paths[TB_USB3_PATH_DOWN] = path;
+ tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
+
+ /* Validate that the tunnel is complete */
+ if (!tb_port_is_usb3_up(tunnel->dst_port)) {
+ tb_port_warn(tunnel->dst_port,
+ "path does not end on an USB3 adapter, cleaning up\n");
+ goto err_deactivate;
+ }
+
+ if (down != tunnel->src_port) {
+ tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
+ goto err_deactivate;
+ }
+
+ if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
+ tb_tunnel_warn(tunnel,
+ "tunnel is not fully activated, cleaning up\n");
+ goto err_deactivate;
+ }
+
+ tb_tunnel_dbg(tunnel, "discovered\n");
+ return tunnel;
+
+err_deactivate:
+ tb_tunnel_deactivate(tunnel);
+err_free:
+ tb_tunnel_free(tunnel);
+
+ return NULL;
+}
+
+/**
+ * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
+ * @tb: Pointer to the domain structure
+ * @up: USB3 upstream adapter port
+ * @down: USB3 downstream adapter port
+ *
+ * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
+ * @TB_TYPE_USB3_DOWN.
+ *
+ * Return: Returns a tb_tunnel on success or %NULL on failure.
+ */
+struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
+ struct tb_port *down)
+{
+ struct tb_tunnel *tunnel;
+ struct tb_path *path;
+
+ tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
+ if (!tunnel)
+ return NULL;
+
+ tunnel->activate = tb_usb3_activate;
+ tunnel->src_port = down;
+ tunnel->dst_port = up;
+
+ path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
+ "USB3 Down");
+ if (!path) {
+ tb_tunnel_free(tunnel);
+ return NULL;
+ }
+ tb_usb3_init_path(path);
+ tunnel->paths[TB_USB3_PATH_DOWN] = path;
+
+ path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
+ "USB3 Up");
+ if (!path) {
+ tb_tunnel_free(tunnel);
+ return NULL;
+ }
+ tb_usb3_init_path(path);
+ tunnel->paths[TB_USB3_PATH_UP] = path;
+
+ return tunnel;
+}
+
/**
* tb_tunnel_free() - free a tunnel
* @tunnel: Tunnel to be freed
diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
index ba888da005f5..3f5ba93225e7 100644
--- a/drivers/thunderbolt/tunnel.h
+++ b/drivers/thunderbolt/tunnel.h
@@ -15,6 +15,7 @@ enum tb_tunnel_type {
TB_TUNNEL_PCI,
TB_TUNNEL_DP,
TB_TUNNEL_DMA,
+ TB_TUNNEL_USB3,
};
/**
@@ -57,6 +58,9 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
struct tb_port *dst, int transmit_ring,
int transmit_path, int receive_ring,
int receive_path);
+struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down);
+struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
+ struct tb_port *down);
void tb_tunnel_free(struct tb_tunnel *tunnel);
int tb_tunnel_activate(struct tb_tunnel *tunnel);
@@ -82,5 +86,10 @@ static inline bool tb_tunnel_is_dma(const struct tb_tunnel *tunnel)
return tunnel->type == TB_TUNNEL_DMA;
}
+static inline bool tb_tunnel_is_usb3(const struct tb_tunnel *tunnel)
+{
+ return tunnel->type == TB_TUNNEL_USB3;
+}
+
#endif
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
new file mode 100644
index 000000000000..b341fc60c4ba
--- /dev/null
+++ b/drivers/thunderbolt/usb4.c
@@ -0,0 +1,764 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * USB4 specific functionality
+ *
+ * Copyright (C) 2019, Intel Corporation
+ * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
+ * Rajmohan Mani <rajmohan.mani@intel.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/ktime.h>
+
+#include "tb.h"
+
+#define USB4_DATA_DWORDS 16
+#define USB4_DATA_RETRIES 3
+
+enum usb4_switch_op {
+ USB4_SWITCH_OP_QUERY_DP_RESOURCE = 0x10,
+ USB4_SWITCH_OP_ALLOC_DP_RESOURCE = 0x11,
+ USB4_SWITCH_OP_DEALLOC_DP_RESOURCE = 0x12,
+ USB4_SWITCH_OP_NVM_WRITE = 0x20,
+ USB4_SWITCH_OP_NVM_AUTH = 0x21,
+ USB4_SWITCH_OP_NVM_READ = 0x22,
+ USB4_SWITCH_OP_NVM_SET_OFFSET = 0x23,
+ USB4_SWITCH_OP_DROM_READ = 0x24,
+ USB4_SWITCH_OP_NVM_SECTOR_SIZE = 0x25,
+};
+
+#define USB4_NVM_READ_OFFSET_MASK GENMASK(23, 2)
+#define USB4_NVM_READ_OFFSET_SHIFT 2
+#define USB4_NVM_READ_LENGTH_MASK GENMASK(27, 24)
+#define USB4_NVM_READ_LENGTH_SHIFT 24
+
+#define USB4_NVM_SET_OFFSET_MASK USB4_NVM_READ_OFFSET_MASK
+#define USB4_NVM_SET_OFFSET_SHIFT USB4_NVM_READ_OFFSET_SHIFT
+
+#define USB4_DROM_ADDRESS_MASK GENMASK(14, 2)
+#define USB4_DROM_ADDRESS_SHIFT 2
+#define USB4_DROM_SIZE_MASK GENMASK(19, 15)
+#define USB4_DROM_SIZE_SHIFT 15
+
+#define USB4_NVM_SECTOR_SIZE_MASK GENMASK(23, 0)
+
+typedef int (*read_block_fn)(struct tb_switch *, unsigned int, void *, size_t);
+typedef int (*write_block_fn)(struct tb_switch *, const void *, size_t);
+
+static int usb4_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
+ u32 value, int timeout_msec)
+{
+ ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
+
+ do {
+ u32 val;
+ int ret;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
+ if (ret)
+ return ret;
+
+ if ((val & bit) == value)
+ return 0;
+
+ usleep_range(50, 100);
+ } while (ktime_before(ktime_get(), timeout));
+
+ return -ETIMEDOUT;
+}
+
+static int usb4_switch_op_read_data(struct tb_switch *sw, void *data,
+ size_t dwords)
+{
+ if (dwords > USB4_DATA_DWORDS)
+ return -EINVAL;
+
+ return tb_sw_read(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords);
+}
+
+static int usb4_switch_op_write_data(struct tb_switch *sw, const void *data,
+ size_t dwords)
+{
+ if (dwords > USB4_DATA_DWORDS)
+ return -EINVAL;
+
+ return tb_sw_write(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords);
+}
+
+static int usb4_switch_op_read_metadata(struct tb_switch *sw, u32 *metadata)
+{
+ return tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
+}
+
+static int usb4_switch_op_write_metadata(struct tb_switch *sw, u32 metadata)
+{
+ return tb_sw_write(sw, &metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
+}
+
+static int usb4_switch_do_read_data(struct tb_switch *sw, u16 address,
+ void *buf, size_t size, read_block_fn read_block)
+{
+ unsigned int retries = USB4_DATA_RETRIES;
+ unsigned int offset;
+
+ offset = address & 3;
+ address = address & ~3;
+
+ do {
+ size_t nbytes = min_t(size_t, size, USB4_DATA_DWORDS * 4);
+ unsigned int dwaddress, dwords;
+ u8 data[USB4_DATA_DWORDS * 4];
+ int ret;
+
+ dwaddress = address / 4;
+ dwords = ALIGN(nbytes, 4) / 4;
+
+ ret = read_block(sw, dwaddress, data, dwords);
+ if (ret) {
+ if (ret == -ETIMEDOUT) {
+ if (retries--)
+ continue;
+ ret = -EIO;
+ }
+ return ret;
+ }
+
+ memcpy(buf, data + offset, nbytes);
+
+ size -= nbytes;
+ address += nbytes;
+ buf += nbytes;
+ } while (size > 0);
+
+ return 0;
+}
+
+static int usb4_switch_do_write_data(struct tb_switch *sw, u16 address,
+ const void *buf, size_t size, write_block_fn write_next_block)
+{
+ unsigned int retries = USB4_DATA_RETRIES;
+ unsigned int offset;
+
+ offset = address & 3;
+ address = address & ~3;
+
+ do {
+ u32 nbytes = min_t(u32, size, USB4_DATA_DWORDS * 4);
+ u8 data[USB4_DATA_DWORDS * 4];
+ int ret;
+
+ memcpy(data + offset, buf, nbytes);
+
+ ret = write_next_block(sw, data, nbytes / 4);
+ if (ret) {
+ if (ret == -ETIMEDOUT) {
+ if (retries--)
+ continue;
+ ret = -EIO;
+ }
+ return ret;
+ }
+
+ size -= nbytes;
+ address += nbytes;
+ buf += nbytes;
+ } while (size > 0);
+
+ return 0;
+}
+
+static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status)
+{
+ u32 val;
+ int ret;
+
+ val = opcode | ROUTER_CS_26_OV;
+ ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
+ if (ret)
+ return ret;
+
+ ret = usb4_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500);
+ if (ret)
+ return ret;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
+ if (val & ROUTER_CS_26_ONS)
+ return -EOPNOTSUPP;
+
+ *status = (val & ROUTER_CS_26_STATUS_MASK) >> ROUTER_CS_26_STATUS_SHIFT;
+ return 0;
+}
+
+/**
+ * usb4_switch_setup() - Additional setup for USB4 device
+ * @sw: USB4 router to setup
+ *
+ * USB4 routers need additional settings in order to enable all the
+ * tunneling. This function enables USB and PCIe tunneling if it can be
+ * enabled (e.g the parent switch also supports them). If USB tunneling
+ * is not available for some reason (like that there is Thunderbolt 3
+ * switch upstream) then the internal xHCI controller is enabled
+ * instead.
+ */
+int usb4_switch_setup(struct tb_switch *sw)
+{
+ struct tb_switch *parent;
+ bool tbt3, xhci;
+ u32 val = 0;
+ int ret;
+
+ if (!tb_route(sw))
+ return 0;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1);
+ if (ret)
+ return ret;
+
+ xhci = val & ROUTER_CS_6_HCI;
+ tbt3 = !(val & ROUTER_CS_6_TNS);
+
+ tb_sw_dbg(sw, "TBT3 support: %s, xHCI: %s\n",
+ tbt3 ? "yes" : "no", xhci ? "yes" : "no");
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
+ if (ret)
+ return ret;
+
+ parent = tb_switch_parent(sw);
+
+ if (tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) {
+ val |= ROUTER_CS_5_UTO;
+ xhci = false;
+ }
+
+ /* Only enable PCIe tunneling if the parent router supports it */
+ if (tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) {
+ val |= ROUTER_CS_5_PTO;
+ /*
+ * xHCI can be enabled if PCIe tunneling is supported
+ * and the parent does not have any USB3 dowstream
+ * adapters (so we cannot do USB 3.x tunneling).
+ */
+ if (xhci)
+ val |= ROUTER_CS_5_HCO;
+ }
+
+ /* TBT3 supported by the CM */
+ val |= ROUTER_CS_5_C3S;
+ /* Tunneling configuration is ready now */
+ val |= ROUTER_CS_5_CV;
+
+ ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
+ if (ret)
+ return ret;
+
+ return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR,
+ ROUTER_CS_6_CR, 50);
+}
+
+/**
+ * usb4_switch_read_uid() - Read UID from USB4 router
+ * @sw: USB4 router
+ *
+ * Reads 64-bit UID from USB4 router config space.
+ */
+int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid)
+{
+ return tb_sw_read(sw, uid, TB_CFG_SWITCH, ROUTER_CS_7, 2);
+}
+
+static int usb4_switch_drom_read_block(struct tb_switch *sw,
+ unsigned int dwaddress, void *buf,
+ size_t dwords)
+{
+ u8 status = 0;
+ u32 metadata;
+ int ret;
+
+ metadata = (dwords << USB4_DROM_SIZE_SHIFT) & USB4_DROM_SIZE_MASK;
+ metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) &
+ USB4_DROM_ADDRESS_MASK;
+
+ ret = usb4_switch_op_write_metadata(sw, metadata);
+ if (ret)
+ return ret;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_DROM_READ, &status);
+ if (ret)
+ return ret;
+
+ if (status)
+ return -EIO;
+
+ return usb4_switch_op_read_data(sw, buf, dwords);
+}
+
+/**
+ * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM
+ * @sw: USB4 router
+ *
+ * Uses USB4 router operations to read router DROM. For devices this
+ * should always work but for hosts it may return %-EOPNOTSUPP in which
+ * case the host router does not have DROM.
+ */
+int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
+ size_t size)
+{
+ return usb4_switch_do_read_data(sw, address, buf, size,
+ usb4_switch_drom_read_block);
+}
+
+static int usb4_set_port_configured(struct tb_port *port, bool configured)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+ if (ret)
+ return ret;
+
+ if (configured)
+ val |= PORT_CS_19_PC;
+ else
+ val &= ~PORT_CS_19_PC;
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+}
+
+/**
+ * usb4_switch_configure_link() - Set upstream USB4 link configured
+ * @sw: USB4 router
+ *
+ * Sets the upstream USB4 link to be configured for power management
+ * purposes.
+ */
+int usb4_switch_configure_link(struct tb_switch *sw)
+{
+ struct tb_port *up;
+
+ if (!tb_route(sw))
+ return 0;
+
+ up = tb_upstream_port(sw);
+ return usb4_set_port_configured(up, true);
+}
+
+/**
+ * usb4_switch_unconfigure_link() - Un-set upstream USB4 link configuration
+ * @sw: USB4 router
+ *
+ * Reverse of usb4_switch_configure_link().
+ */
+void usb4_switch_unconfigure_link(struct tb_switch *sw)
+{
+ struct tb_port *up;
+
+ if (sw->is_unplugged || !tb_route(sw))
+ return;
+
+ up = tb_upstream_port(sw);
+ usb4_set_port_configured(up, false);
+}
+
+/**
+ * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding
+ * @sw: USB4 router
+ *
+ * Checks whether conditions are met so that lane bonding can be
+ * established with the upstream router. Call only for device routers.
+ */
+bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
+{
+ struct tb_port *up;
+ int ret;
+ u32 val;
+
+ up = tb_upstream_port(sw);
+ ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1);
+ if (ret)
+ return false;
+
+ return !!(val & PORT_CS_18_BE);
+}
+
+/**
+ * usb4_switch_set_sleep() - Prepare the router to enter sleep
+ * @sw: USB4 router
+ *
+ * Enables wakes and sets sleep bit for the router. Returns when the
+ * router sleep ready bit has been asserted.
+ */
+int usb4_switch_set_sleep(struct tb_switch *sw)
+{
+ int ret;
+ u32 val;
+
+ /* Set sleep bit and wait for sleep ready to be asserted */
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
+ if (ret)
+ return ret;
+
+ val |= ROUTER_CS_5_SLP;
+
+ ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
+ if (ret)
+ return ret;
+
+ return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR,
+ ROUTER_CS_6_SLPR, 500);
+}
+
+/**
+ * usb4_switch_nvm_sector_size() - Return router NVM sector size
+ * @sw: USB4 router
+ *
+ * If the router supports NVM operations this function returns the NVM
+ * sector size in bytes. If NVM operations are not supported returns
+ * %-EOPNOTSUPP.
+ */
+int usb4_switch_nvm_sector_size(struct tb_switch *sw)
+{
+ u32 metadata;
+ u8 status;
+ int ret;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &status);
+ if (ret)
+ return ret;
+
+ if (status)
+ return status == 0x2 ? -EOPNOTSUPP : -EIO;
+
+ ret = usb4_switch_op_read_metadata(sw, &metadata);
+ if (ret)
+ return ret;
+
+ return metadata & USB4_NVM_SECTOR_SIZE_MASK;
+}
+
+static int usb4_switch_nvm_read_block(struct tb_switch *sw,
+ unsigned int dwaddress, void *buf, size_t dwords)
+{
+ u8 status = 0;
+ u32 metadata;
+ int ret;
+
+ metadata = (dwords << USB4_NVM_READ_LENGTH_SHIFT) &
+ USB4_NVM_READ_LENGTH_MASK;
+ metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) &
+ USB4_NVM_READ_OFFSET_MASK;
+
+ ret = usb4_switch_op_write_metadata(sw, metadata);
+ if (ret)
+ return ret;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_READ, &status);
+ if (ret)
+ return ret;
+
+ if (status)
+ return -EIO;
+
+ return usb4_switch_op_read_data(sw, buf, dwords);
+}
+
+/**
+ * usb4_switch_nvm_read() - Read arbitrary bytes from router NVM
+ * @sw: USB4 router
+ * @address: Starting address in bytes
+ * @buf: Read data is placed here
+ * @size: How many bytes to read
+ *
+ * Reads NVM contents of the router. If NVM is not supported returns
+ * %-EOPNOTSUPP.
+ */
+int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
+ size_t size)
+{
+ return usb4_switch_do_read_data(sw, address, buf, size,
+ usb4_switch_nvm_read_block);
+}
+
+static int usb4_switch_nvm_set_offset(struct tb_switch *sw,
+ unsigned int address)
+{
+ u32 metadata, dwaddress;
+ u8 status = 0;
+ int ret;
+
+ dwaddress = address / 4;
+ metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
+ USB4_NVM_SET_OFFSET_MASK;
+
+ ret = usb4_switch_op_write_metadata(sw, metadata);
+ if (ret)
+ return ret;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &status);
+ if (ret)
+ return ret;
+
+ return status ? -EIO : 0;
+}
+
+static int usb4_switch_nvm_write_next_block(struct tb_switch *sw,
+ const void *buf, size_t dwords)
+{
+ u8 status;
+ int ret;
+
+ ret = usb4_switch_op_write_data(sw, buf, dwords);
+ if (ret)
+ return ret;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_WRITE, &status);
+ if (ret)
+ return ret;
+
+ return status ? -EIO : 0;
+}
+
+/**
+ * usb4_switch_nvm_write() - Write to the router NVM
+ * @sw: USB4 router
+ * @address: Start address where to write in bytes
+ * @buf: Pointer to the data to write
+ * @size: Size of @buf in bytes
+ *
+ * Writes @buf to the router NVM using USB4 router operations. If NVM
+ * write is not supported returns %-EOPNOTSUPP.
+ */
+int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
+ const void *buf, size_t size)
+{
+ int ret;
+
+ ret = usb4_switch_nvm_set_offset(sw, address);
+ if (ret)
+ return ret;
+
+ return usb4_switch_do_write_data(sw, address, buf, size,
+ usb4_switch_nvm_write_next_block);
+}
+
+/**
+ * usb4_switch_nvm_authenticate() - Authenticate new NVM
+ * @sw: USB4 router
+ *
+ * After the new NVM has been written via usb4_switch_nvm_write(), this
+ * function triggers NVM authentication process. If the authentication
+ * is successful the router is power cycled and the new NVM starts
+ * running. In case of failure returns negative errno.
+ */
+int usb4_switch_nvm_authenticate(struct tb_switch *sw)
+{
+ u8 status = 0;
+ int ret;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, &status);
+ if (ret)
+ return ret;
+
+ switch (status) {
+ case 0x0:
+ tb_sw_dbg(sw, "NVM authentication successful\n");
+ return 0;
+ case 0x1:
+ return -EINVAL;
+ case 0x2:
+ return -EAGAIN;
+ case 0x3:
+ return -EOPNOTSUPP;
+ default:
+ return -EIO;
+ }
+}
+
+/**
+ * usb4_switch_query_dp_resource() - Query availability of DP IN resource
+ * @sw: USB4 router
+ * @in: DP IN adapter
+ *
+ * For DP tunneling this function can be used to query availability of
+ * DP IN resource. Returns true if the resource is available for DP
+ * tunneling, false otherwise.
+ */
+bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+ u8 status;
+ int ret;
+
+ ret = usb4_switch_op_write_metadata(sw, in->port);
+ if (ret)
+ return false;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &status);
+ /*
+ * If DP resource allocation is not supported assume it is
+ * always available.
+ */
+ if (ret == -EOPNOTSUPP)
+ return true;
+ else if (ret)
+ return false;
+
+ return !status;
+}
+
+/**
+ * usb4_switch_alloc_dp_resource() - Allocate DP IN resource
+ * @sw: USB4 router
+ * @in: DP IN adapter
+ *
+ * Allocates DP IN resource for DP tunneling using USB4 router
+ * operations. If the resource was allocated returns %0. Otherwise
+ * returns negative errno, in particular %-EBUSY if the resource is
+ * already allocated.
+ */
+int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+ u8 status;
+ int ret;
+
+ ret = usb4_switch_op_write_metadata(sw, in->port);
+ if (ret)
+ return ret;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &status);
+ if (ret == -EOPNOTSUPP)
+ return 0;
+ else if (ret)
+ return ret;
+
+ return status ? -EBUSY : 0;
+}
+
+/**
+ * usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource
+ * @sw: USB4 router
+ * @in: DP IN adapter
+ *
+ * Releases the previously allocated DP IN resource.
+ */
+int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+ u8 status;
+ int ret;
+
+ ret = usb4_switch_op_write_metadata(sw, in->port);
+ if (ret)
+ return ret;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &status);
+ if (ret == -EOPNOTSUPP)
+ return 0;
+ else if (ret)
+ return ret;
+
+ return status ? -EIO : 0;
+}
+
+static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port)
+{
+ struct tb_port *p;
+ int usb4_idx = 0;
+
+ /* Assume port is primary */
+ tb_switch_for_each_port(sw, p) {
+ if (!tb_port_is_null(p))
+ continue;
+ if (tb_is_upstream_port(p))
+ continue;
+ if (!p->link_nr) {
+ if (p == port)
+ break;
+ usb4_idx++;
+ }
+ }
+
+ return usb4_idx;
+}
+
+/**
+ * usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter
+ * @sw: USB4 router
+ * @port: USB4 port
+ *
+ * USB4 routers have direct mapping between USB4 ports and PCIe
+ * downstream adapters where the PCIe topology is extended. This
+ * function returns the corresponding downstream PCIe adapter or %NULL
+ * if no such mapping was possible.
+ */
+struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
+ const struct tb_port *port)
+{
+ int usb4_idx = usb4_port_idx(sw, port);
+ struct tb_port *p;
+ int pcie_idx = 0;
+
+ /* Find PCIe down port matching usb4_port */
+ tb_switch_for_each_port(sw, p) {
+ if (!tb_port_is_pcie_down(p))
+ continue;
+
+ if (pcie_idx == usb4_idx && !tb_pci_port_is_enabled(p))
+ return p;
+
+ pcie_idx++;
+ }
+
+ return NULL;
+}
+
+/**
+ * usb4_switch_map_usb3_down() - Map USB4 port to a USB3 downstream adapter
+ * @sw: USB4 router
+ * @port: USB4 port
+ *
+ * USB4 routers have direct mapping between USB4 ports and USB 3.x
+ * downstream adapters where the USB 3.x topology is extended. This
+ * function returns the corresponding downstream USB 3.x adapter or
+ * %NULL if no such mapping was possible.
+ */
+struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
+ const struct tb_port *port)
+{
+ int usb4_idx = usb4_port_idx(sw, port);
+ struct tb_port *p;
+ int usb_idx = 0;
+
+ /* Find USB3 down port matching usb4_port */
+ tb_switch_for_each_port(sw, p) {
+ if (!tb_port_is_usb3_down(p))
+ continue;
+
+ if (usb_idx == usb4_idx && !tb_usb3_port_is_enabled(p))
+ return p;
+
+ usb_idx++;
+ }
+
+ return NULL;
+}
+
+/**
+ * usb4_port_unlock() - Unlock USB4 downstream port
+ * @port: USB4 port to unlock
+ *
+ * Unlocks USB4 downstream port so that the connection manager can
+ * access the router below this port.
+ */
+int usb4_port_unlock(struct tb_port *port)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
+ if (ret)
+ return ret;
+
+ val &= ~ADP_CS_4_LCK;
+ return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
+}
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index 880d784398a3..053f918e00e8 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -1220,7 +1220,13 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
u64 route, const uuid_t *local_uuid,
const uuid_t *remote_uuid)
{
+ struct tb_switch *parent_sw = tb_to_switch(parent);
struct tb_xdomain *xd;
+ struct tb_port *down;
+
+ /* Make sure the downstream domain is accessible */
+ down = tb_port_at(route, parent_sw);
+ tb_port_unlock(down);
xd = kzalloc(sizeof(*xd), GFP_KERNEL);
if (!xd)
diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
index 4562c8060d09..a6aabfd6e2da 100644
--- a/drivers/tty/cyclades.c
+++ b/drivers/tty/cyclades.c
@@ -3256,7 +3256,7 @@ static int __init cy_detect_isa(void)
return nboard;
/* probe for CD1400... */
- cy_isa_address = ioremap_nocache(isa_address, CyISA_Ywin);
+ cy_isa_address = ioremap(isa_address, CyISA_Ywin);
if (cy_isa_address == NULL) {
printk(KERN_ERR "Cyclom-Y/ISA: can't remap base "
"address\n");
@@ -3690,13 +3690,13 @@ static int cy_pci_probe(struct pci_dev *pdev,
device_id == PCI_DEVICE_ID_CYCLOM_Y_Hi) {
card_name = "Cyclom-Y";
- addr0 = ioremap_nocache(pci_resource_start(pdev, 0),
+ addr0 = ioremap(pci_resource_start(pdev, 0),
CyPCI_Yctl);
if (addr0 == NULL) {
dev_err(&pdev->dev, "can't remap ctl region\n");
goto err_reg;
}
- addr2 = ioremap_nocache(pci_resource_start(pdev, 2),
+ addr2 = ioremap(pci_resource_start(pdev, 2),
CyPCI_Ywin);
if (addr2 == NULL) {
dev_err(&pdev->dev, "can't remap base region\n");
@@ -3712,7 +3712,7 @@ static int cy_pci_probe(struct pci_dev *pdev,
} else if (device_id == PCI_DEVICE_ID_CYCLOM_Z_Hi) {
struct RUNTIME_9060 __iomem *ctl_addr;
- ctl_addr = addr0 = ioremap_nocache(pci_resource_start(pdev, 0),
+ ctl_addr = addr0 = ioremap(pci_resource_start(pdev, 0),
CyPCI_Zctl);
if (addr0 == NULL) {
dev_err(&pdev->dev, "can't remap ctl region\n");
@@ -3727,7 +3727,7 @@ static int cy_pci_probe(struct pci_dev *pdev,
mailbox = readl(&ctl_addr->mail_box_0);
- addr2 = ioremap_nocache(pci_resource_start(pdev, 2),
+ addr2 = ioremap(pci_resource_start(pdev, 2),
mailbox == ZE_V1 ? CyPCI_Ze_win : CyPCI_Zwin);
if (addr2 == NULL) {
dev_err(&pdev->dev, "can't remap base region\n");
diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c
index 4c1cd49ae95b..620d8488b83e 100644
--- a/drivers/tty/mips_ejtag_fdc.c
+++ b/drivers/tty/mips_ejtag_fdc.c
@@ -898,7 +898,7 @@ static int mips_ejtag_fdc_tty_probe(struct mips_cdmm_device *dev)
atomic_set(&priv->xmit_total, 0);
raw_spin_lock_init(&priv->lock);
- priv->reg = devm_ioremap_nocache(priv->dev, dev->res.start,
+ priv->reg = devm_ioremap(priv->dev, dev->res.start,
resource_size(&dev->res));
if (!priv->reg) {
dev_err(priv->dev, "ioremap failed for resource %pR\n",
diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
index 3a1a5e0ee93f..9f13f7d49dd7 100644
--- a/drivers/tty/moxa.c
+++ b/drivers/tty/moxa.c
@@ -961,7 +961,7 @@ static int moxa_pci_probe(struct pci_dev *pdev,
goto err;
}
- board->basemem = ioremap_nocache(pci_resource_start(pdev, 2), 0x4000);
+ board->basemem = ioremap(pci_resource_start(pdev, 2), 0x4000);
if (board->basemem == NULL) {
dev_err(&pdev->dev, "can't remap io space 2\n");
retval = -ENOMEM;
@@ -1071,7 +1071,7 @@ static int __init moxa_init(void)
brd->numPorts = type[i] == MOXA_BOARD_C218_ISA ? 8 :
numports[i];
brd->busType = MOXA_BUS_TYPE_ISA;
- brd->basemem = ioremap_nocache(baseaddr[i], 0x4000);
+ brd->basemem = ioremap(baseaddr[i], 0x4000);
if (!brd->basemem) {
printk(KERN_ERR "MOXA: can't remap %lx\n",
baseaddr[i]);
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 36a3eb4ad4c5..f1c90fa2978e 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -2704,7 +2704,7 @@ static netdev_tx_t gsm_mux_net_start_xmit(struct sk_buff *skb,
}
/* called when a packet did not ack after watchdogtimeout */
-static void gsm_mux_net_tx_timeout(struct net_device *net)
+static void gsm_mux_net_tx_timeout(struct net_device *net, unsigned int txqueue)
{
/* Tell syslog we are hosed. */
dev_dbg(&net->dev, "Tx timed out.\n");
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index 98361acd3053..27b506bf03ce 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -115,11 +115,9 @@
struct n_hdlc_buf {
struct list_head list_item;
int count;
- char buf[1];
+ char buf[];
};
-#define N_HDLC_BUF_SIZE (sizeof(struct n_hdlc_buf) + maxframe)
-
struct n_hdlc_buf_list {
struct list_head list;
int count;
@@ -524,7 +522,8 @@ static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data,
/* no buffers in free list, attempt to allocate another rx buffer */
/* unless the maximum count has been reached */
if (n_hdlc->rx_buf_list.count < MAX_RX_BUF_COUNT)
- buf = kmalloc(N_HDLC_BUF_SIZE, GFP_ATOMIC);
+ buf = kmalloc(struct_size(buf, buf, maxframe),
+ GFP_ATOMIC);
}
if (!buf) {
@@ -853,7 +852,7 @@ static struct n_hdlc *n_hdlc_alloc(void)
/* allocate free rx buffer list */
for(i=0;i<DEFAULT_RX_BUF_COUNT;i++) {
- buf = kmalloc(N_HDLC_BUF_SIZE, GFP_KERNEL);
+ buf = kmalloc(struct_size(buf, buf, maxframe), GFP_KERNEL);
if (buf)
n_hdlc_buf_put(&n_hdlc->rx_free_buf_list,buf);
else if (debuglevel >= DEBUG_LEVEL_INFO)
@@ -862,7 +861,7 @@ static struct n_hdlc *n_hdlc_alloc(void)
/* allocate free tx buffer list */
for(i=0;i<DEFAULT_TX_BUF_COUNT;i++) {
- buf = kmalloc(N_HDLC_BUF_SIZE, GFP_KERNEL);
+ buf = kmalloc(struct_size(buf, buf, maxframe), GFP_KERNEL);
if (buf)
n_hdlc_buf_put(&n_hdlc->tx_free_buf_list,buf);
else if (debuglevel >= DEBUG_LEVEL_INFO)
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index 226adeec2aed..42345e79920c 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -115,8 +115,8 @@ int serdev_device_add(struct serdev_device *serdev)
err = device_add(&serdev->dev);
if (err < 0) {
- dev_err(&serdev->dev, "Can't add %s, status %d\n",
- dev_name(&serdev->dev), err);
+ dev_err(&serdev->dev, "Can't add %s, status %pe\n",
+ dev_name(&serdev->dev), ERR_PTR(err));
goto err_clear_serdev;
}
@@ -540,7 +540,8 @@ static int of_serdev_register_devices(struct serdev_controller *ctrl)
err = serdev_device_add(serdev);
if (err) {
dev_err(&serdev->dev,
- "failure adding device. status %d\n", err);
+ "failure adding device. status %pe\n",
+ ERR_PTR(err));
serdev_device_put(serdev);
} else
found = true;
@@ -656,13 +657,20 @@ static acpi_status acpi_serdev_register_device(struct serdev_controller *ctrl,
err = serdev_device_add(serdev);
if (err) {
dev_err(&serdev->dev,
- "failure adding ACPI serdev device. status %d\n", err);
+ "failure adding ACPI serdev device. status %pe\n",
+ ERR_PTR(err));
serdev_device_put(serdev);
}
return AE_OK;
}
+static const struct acpi_device_id serdev_acpi_devices_blacklist[] = {
+ { "INT3511", 0 },
+ { "INT3512", 0 },
+ { },
+};
+
static acpi_status acpi_serdev_add_device(acpi_handle handle, u32 level,
void *data, void **return_value)
{
@@ -675,6 +683,10 @@ static acpi_status acpi_serdev_add_device(acpi_handle handle, u32 level,
if (acpi_device_enumerated(adev))
return AE_OK;
+ /* Skip if black listed */
+ if (!acpi_match_device_ids(adev, serdev_acpi_devices_blacklist))
+ return AE_OK;
+
if (acpi_serdev_check_resources(ctrl, adev))
return AE_OK;
@@ -731,8 +743,8 @@ int serdev_controller_add(struct serdev_controller *ctrl)
ret_of = of_serdev_register_devices(ctrl);
ret_acpi = acpi_serdev_register_devices(ctrl);
if (ret_of && ret_acpi) {
- dev_dbg(&ctrl->dev, "no devices registered: of:%d acpi:%d\n",
- ret_of, ret_acpi);
+ dev_dbg(&ctrl->dev, "no devices registered: of:%pe acpi:%pe\n",
+ ERR_PTR(ret_of), ERR_PTR(ret_acpi));
ret = -ENODEV;
goto err_rpm_disable;
}
diff --git a/drivers/tty/serial/21285.c b/drivers/tty/serial/21285.c
index 32b3acf8150a..718e010fcb04 100644
--- a/drivers/tty/serial/21285.c
+++ b/drivers/tty/serial/21285.c
@@ -41,8 +41,43 @@
static const char serial21285_name[] = "Footbridge UART";
-#define tx_enabled(port) ((port)->unused[0])
-#define rx_enabled(port) ((port)->unused[1])
+/*
+ * We only need 2 bits of data, so instead of creating a whole structure for
+ * this, use bits of the private_data pointer of the uart port structure.
+ */
+#define tx_enabled_bit 0
+#define rx_enabled_bit 1
+
+static bool is_enabled(struct uart_port *port, int bit)
+{
+ unsigned long private_data = (unsigned long)port->private_data;
+
+ if (test_bit(bit, &private_data))
+ return true;
+ return false;
+}
+
+static void enable(struct uart_port *port, int bit)
+{
+ unsigned long private_data = (unsigned long)port->private_data;
+
+ set_bit(bit, &private_data);
+}
+
+static void disable(struct uart_port *port, int bit)
+{
+ unsigned long private_data = (unsigned long)port->private_data;
+
+ clear_bit(bit, &private_data);
+}
+
+#define is_tx_enabled(port) is_enabled(port, tx_enabled_bit)
+#define tx_enable(port) enable(port, tx_enabled_bit)
+#define tx_disable(port) disable(port, tx_enabled_bit)
+
+#define is_rx_enabled(port) is_enabled(port, rx_enabled_bit)
+#define rx_enable(port) enable(port, rx_enabled_bit)
+#define rx_disable(port) disable(port, rx_enabled_bit)
/*
* The documented expression for selecting the divisor is:
@@ -57,25 +92,25 @@ static const char serial21285_name[] = "Footbridge UART";
static void serial21285_stop_tx(struct uart_port *port)
{
- if (tx_enabled(port)) {
+ if (is_tx_enabled(port)) {
disable_irq_nosync(IRQ_CONTX);
- tx_enabled(port) = 0;
+ tx_disable(port);
}
}
static void serial21285_start_tx(struct uart_port *port)
{
- if (!tx_enabled(port)) {
+ if (!is_tx_enabled(port)) {
enable_irq(IRQ_CONTX);
- tx_enabled(port) = 1;
+ tx_enable(port);
}
}
static void serial21285_stop_rx(struct uart_port *port)
{
- if (rx_enabled(port)) {
+ if (is_rx_enabled(port)) {
disable_irq_nosync(IRQ_CONRX);
- rx_enabled(port) = 0;
+ rx_disable(port);
}
}
@@ -185,8 +220,8 @@ static int serial21285_startup(struct uart_port *port)
{
int ret;
- tx_enabled(port) = 1;
- rx_enabled(port) = 1;
+ tx_enable(port);
+ rx_enable(port);
ret = request_irq(IRQ_CONRX, serial21285_rx_chars, 0,
serial21285_name, port);
diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
index 6e67fd89445a..d657aa14c3e4 100644
--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
+++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
@@ -5,10 +5,6 @@
* Copyright (C) 2016 Jeremy Kerr <jk@ozlabs.org>, IBM Corp.
* Copyright (C) 2006 Arnd Bergmann <arnd@arndb.de>, IBM Corp.
*/
-#if defined(CONFIG_SERIAL_8250_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/device.h>
#include <linux/module.h>
#include <linux/of_address.h>
@@ -406,6 +402,7 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
port.port.unthrottle = aspeed_vuart_unthrottle;
port.port.status = UPSTAT_SYNC_FIFO;
port.port.dev = &pdev->dev;
+ port.port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
rc = sysfs_create_group(&vuart->dev->kobj, &aspeed_vuart_attr_group);
if (rc < 0)
diff --git a/drivers/tty/serial/8250/8250_bcm2835aux.c b/drivers/tty/serial/8250/8250_bcm2835aux.c
index 8ce700c1a7fc..e70e3cc30050 100644
--- a/drivers/tty/serial/8250/8250_bcm2835aux.c
+++ b/drivers/tty/serial/8250/8250_bcm2835aux.c
@@ -16,14 +16,19 @@
#include "8250.h"
+/**
+ * struct bcm2835aux_data - driver private data of BCM2835 auxiliary UART
+ * @clk: clock producer of the port's uartclk
+ * @line: index of the port's serial8250_ports[] entry
+ */
struct bcm2835aux_data {
- struct uart_8250_port uart;
struct clk *clk;
int line;
};
static int bcm2835aux_serial_probe(struct platform_device *pdev)
{
+ struct uart_8250_port up = { };
struct bcm2835aux_data *data;
struct resource *res;
int ret;
@@ -34,23 +39,21 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
return -ENOMEM;
/* initialize data */
- spin_lock_init(&data->uart.port.lock);
- data->uart.capabilities = UART_CAP_FIFO | UART_CAP_MINI;
- data->uart.port.dev = &pdev->dev;
- data->uart.port.regshift = 2;
- data->uart.port.type = PORT_16550;
- data->uart.port.iotype = UPIO_MEM;
- data->uart.port.fifosize = 8;
- data->uart.port.flags = UPF_SHARE_IRQ |
- UPF_FIXED_PORT |
- UPF_FIXED_TYPE |
- UPF_SKIP_TEST;
+ up.capabilities = UART_CAP_FIFO | UART_CAP_MINI;
+ up.port.dev = &pdev->dev;
+ up.port.regshift = 2;
+ up.port.type = PORT_16550;
+ up.port.iotype = UPIO_MEM;
+ up.port.fifosize = 8;
+ up.port.flags = UPF_SHARE_IRQ | UPF_FIXED_PORT | UPF_FIXED_TYPE |
+ UPF_SKIP_TEST | UPF_IOREMAP;
/* get the clock - this also enables the HW */
data->clk = devm_clk_get(&pdev->dev, NULL);
ret = PTR_ERR_OR_ZERO(data->clk);
if (ret) {
- dev_err(&pdev->dev, "could not get clk: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "could not get clk: %d\n", ret);
return ret;
}
@@ -58,7 +61,7 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
ret = platform_get_irq(pdev, 0);
if (ret < 0)
return ret;
- data->uart.port.irq = ret;
+ up.port.irq = ret;
/* map the main registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -66,15 +69,13 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "memory resource not found");
return -EINVAL;
}
- data->uart.port.membase = devm_ioremap_resource(&pdev->dev, res);
- ret = PTR_ERR_OR_ZERO(data->uart.port.membase);
- if (ret)
- return ret;
+ up.port.mapbase = res->start;
+ up.port.mapsize = resource_size(res);
/* Check for a fixed line number */
ret = of_alias_get_id(pdev->dev.of_node, "serial");
if (ret >= 0)
- data->uart.port.line = ret;
+ up.port.line = ret;
/* enable the clock as a last step */
ret = clk_prepare_enable(data->clk);
@@ -89,13 +90,14 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
* so we have to multiply the actual clock by 2
* to get identical baudrates.
*/
- data->uart.port.uartclk = clk_get_rate(data->clk) * 2;
+ up.port.uartclk = clk_get_rate(data->clk) * 2;
/* register the port */
- ret = serial8250_register_8250_port(&data->uart);
+ ret = serial8250_register_8250_port(&up);
if (ret < 0) {
- dev_err(&pdev->dev, "unable to register 8250 port - %d\n",
- ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "unable to register 8250 port - %d\n", ret);
goto dis_clk;
}
data->line = ret;
@@ -113,7 +115,7 @@ static int bcm2835aux_serial_remove(struct platform_device *pdev)
{
struct bcm2835aux_data *data = platform_get_drvdata(pdev);
- serial8250_unregister_port(data->uart.port.line);
+ serial8250_unregister_port(data->line);
clk_disable_unprepare(data->clk);
return 0;
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index e682390ce0de..0894a22fd702 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -816,6 +816,7 @@ static int serial8250_probe(struct platform_device *dev)
uart.port.flags = p->flags;
uart.port.mapbase = p->mapbase;
uart.port.hub6 = p->hub6;
+ uart.port.has_sysrq = p->has_sysrq;
uart.port.private_data = p->private_data;
uart.port.type = p->type;
uart.port.serial_in = p->serial_in;
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index 108cd55f9c4d..91e9b070d36d 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -186,7 +186,7 @@ static int xr17v35x_startup(struct uart_port *port)
static void exar_shutdown(struct uart_port *port)
{
unsigned char lsr;
- bool tx_complete = 0;
+ bool tx_complete = false;
struct uart_8250_port *up = up_to_u8250p(port);
struct circ_buf *xmit = &port->state->xmit;
int i = 0;
@@ -194,9 +194,9 @@ static void exar_shutdown(struct uart_port *port)
do {
lsr = serial_in(up, UART_LSR);
if (lsr & (UART_LSR_TEMT | UART_LSR_THRE))
- tx_complete = 1;
+ tx_complete = true;
else
- tx_complete = 0;
+ tx_complete = false;
usleep_range(1000, 1100);
} while (!uart_circ_empty(xmit) && !tx_complete && i++ < 1000);
diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c
index aa0e216d5ead..0d0c80905c58 100644
--- a/drivers/tty/serial/8250/8250_fsl.c
+++ b/drivers/tty/serial/8250/8250_fsl.c
@@ -1,8 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
-#if defined(CONFIG_SERIAL_8250_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/serial_reg.h>
#include <linux/serial_8250.h>
diff --git a/drivers/tty/serial/8250/8250_gsc.c b/drivers/tty/serial/8250/8250_gsc.c
index 0809ae2aa9b1..673cda3d011d 100644
--- a/drivers/tty/serial/8250/8250_gsc.c
+++ b/drivers/tty/serial/8250/8250_gsc.c
@@ -55,7 +55,7 @@ static int __init serial_init_chip(struct parisc_device *dev)
uart.port.uartclk = (dev->id.sversion != 0xad) ?
7272727 : 1843200;
uart.port.mapbase = address;
- uart.port.membase = ioremap_nocache(address, 16);
+ uart.port.membase = ioremap(address, 16);
if (!uart.port.membase) {
dev_warn(&dev->dev, "Failed to map memory\n");
return -ENOMEM;
diff --git a/drivers/tty/serial/8250/8250_ioc3.c b/drivers/tty/serial/8250/8250_ioc3.c
new file mode 100644
index 000000000000..d5a39e105a76
--- /dev/null
+++ b/drivers/tty/serial/8250/8250_ioc3.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SGI IOC3 8250 UART driver
+ *
+ * Copyright (C) 2019 Thomas Bogendoerfer <tbogendoerfer@suse.de>
+ *
+ * based on code Copyright (C) 2005 Stanislaw Skowronek <skylark@unaligned.org>
+ * Copyright (C) 2014 Joshua Kinard <kumba@gentoo.org>
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+#include "8250.h"
+
+#define IOC3_UARTCLK (22000000 / 3)
+
+struct ioc3_8250_data {
+ int line;
+};
+
+static unsigned int ioc3_serial_in(struct uart_port *p, int offset)
+{
+ return readb(p->membase + (offset ^ 3));
+}
+
+static void ioc3_serial_out(struct uart_port *p, int offset, int value)
+{
+ writeb(value, p->membase + (offset ^ 3));
+}
+
+static int serial8250_ioc3_probe(struct platform_device *pdev)
+{
+ struct ioc3_8250_data *data;
+ struct uart_8250_port up;
+ struct resource *r;
+ void __iomem *membase;
+ int irq, line;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r)
+ return -ENODEV;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ membase = devm_ioremap(&pdev->dev, r->start, resource_size(r));
+ if (!membase)
+ return -ENOMEM;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ irq = 0; /* no interrupt -> use polling */
+
+ /* Register serial ports with 8250.c */
+ memset(&up, 0, sizeof(struct uart_8250_port));
+ up.port.iotype = UPIO_MEM;
+ up.port.uartclk = IOC3_UARTCLK;
+ up.port.type = PORT_16550A;
+ up.port.irq = irq;
+ up.port.flags = (UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ);
+ up.port.dev = &pdev->dev;
+ up.port.membase = membase;
+ up.port.mapbase = r->start;
+ up.port.serial_in = ioc3_serial_in;
+ up.port.serial_out = ioc3_serial_out;
+ line = serial8250_register_8250_port(&up);
+ if (line < 0)
+ return line;
+
+ platform_set_drvdata(pdev, data);
+ return 0;
+}
+
+static int serial8250_ioc3_remove(struct platform_device *pdev)
+{
+ struct ioc3_8250_data *data = platform_get_drvdata(pdev);
+
+ serial8250_unregister_port(data->line);
+ return 0;
+}
+
+static struct platform_driver serial8250_ioc3_driver = {
+ .probe = serial8250_ioc3_probe,
+ .remove = serial8250_ioc3_remove,
+ .driver = {
+ .name = "ioc3-serial8250",
+ }
+};
+
+module_platform_driver(serial8250_ioc3_driver);
+
+MODULE_AUTHOR("Thomas Bogendoerfer <tbogendoerfer@suse.de>");
+MODULE_DESCRIPTION("SGI IOC3 8250 UART driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index 92fbf46ce3bd..531ad67395e0 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -222,8 +222,10 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
if (IS_ENABLED(CONFIG_SERIAL_8250_FSL) &&
(of_device_is_compatible(np, "fsl,ns16550") ||
- of_device_is_compatible(np, "fsl,16550-FIFO64")))
+ of_device_is_compatible(np, "fsl,16550-FIFO64"))) {
port->handle_irq = fsl8250_handle_irq;
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
+ }
return 0;
err_unprepare:
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 836e736ae188..6f343ca08440 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -8,10 +8,6 @@
*
*/
-#if defined(CONFIG_SERIAL_8250_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/io.h>
@@ -1147,7 +1143,7 @@ static int omap8250_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- membase = devm_ioremap_nocache(&pdev->dev, regs->start,
+ membase = devm_ioremap(&pdev->dev, regs->start,
resource_size(regs));
if (!membase)
return -ENODEV;
@@ -1192,6 +1188,7 @@ static int omap8250_probe(struct platform_device *pdev)
up.port.throttle = omap_8250_throttle;
up.port.unthrottle = omap_8250_unthrottle;
up.port.rs485_config = omap_8250_rs485_config;
+ up.port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
ret = of_alias_get_id(np, "serial");
if (ret < 0) {
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 022924d5ad54..939685fed396 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -275,7 +275,7 @@ static int pci_plx9050_init(struct pci_dev *dev)
/*
* enable/disable interrupts
*/
- p = ioremap_nocache(pci_resource_start(dev, 0), 0x80);
+ p = ioremap(pci_resource_start(dev, 0), 0x80);
if (p == NULL)
return -ENOMEM;
writel(irq_config, p + 0x4c);
@@ -299,7 +299,7 @@ static void pci_plx9050_exit(struct pci_dev *dev)
/*
* disable interrupts
*/
- p = ioremap_nocache(pci_resource_start(dev, 0), 0x80);
+ p = ioremap(pci_resource_start(dev, 0), 0x80);
if (p != NULL) {
writel(0, p + 0x4c);
@@ -475,7 +475,7 @@ static int pci_siig10x_init(struct pci_dev *dev)
break;
}
- p = ioremap_nocache(pci_resource_start(dev, 0), 0x80);
+ p = ioremap(pci_resource_start(dev, 0), 0x80);
if (p == NULL)
return -ENOMEM;
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 90655910b0c7..430e3467aff7 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -11,10 +11,6 @@
* membase is an 'ioremapped' cookie.
*/
-#if defined(CONFIG_SERIAL_8250_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/ioport.h>
@@ -1001,6 +997,9 @@ static void autoconfig_16550a(struct uart_8250_port *up)
up->port.type = PORT_16550A;
up->capabilities |= UART_CAP_FIFO;
+ if (!IS_ENABLED(CONFIG_SERIAL_8250_16550A_VARIANTS))
+ return;
+
/*
* Check for presence of the EFR when DLAB is set.
* Only ST16C650V1 UARTs pass this test.
@@ -2766,7 +2765,7 @@ static int serial8250_request_std_resource(struct uart_8250_port *up)
}
if (port->flags & UPF_IOREMAP) {
- port->membase = ioremap_nocache(port->mapbase, size);
+ port->membase = ioremap(port->mapbase, size);
if (!port->membase) {
release_mem_region(port->mapbase, size);
ret = -ENOMEM;
@@ -3055,6 +3054,7 @@ void serial8250_init_port(struct uart_8250_port *up)
spin_lock_init(&port->lock);
port->ops = &serial8250_pops;
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
up->cur_iotype = 0xFF;
}
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index fab3d4f20667..f16824bbb573 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -60,6 +60,16 @@ config SERIAL_8250_PNP
This builds standard PNP serial support. You may be able to
disable this feature if you only need legacy serial support.
+config SERIAL_8250_16550A_VARIANTS
+ bool "Support for variants of the 16550A serial port"
+ depends on SERIAL_8250
+ help
+ The 8250 driver can probe for many variants of the venerable 16550A
+ serial port. Doing so takes additional time at boot.
+
+ On modern systems, especially those using serial only for a simple
+ console, you can say N here.
+
config SERIAL_8250_FINTEK
bool "Support for Fintek F81216A LPC to 4 UART RS485 API"
depends on SERIAL_8250
@@ -371,6 +381,17 @@ config SERIAL_8250_EM
port hardware found on the Emma Mobile line of processors.
If unsure, say N.
+config SERIAL_8250_IOC3
+ tristate "SGI IOC3 8250 UART support"
+ depends on SGI_MFD_IOC3 && SERIAL_8250
+ select SERIAL_8250_EXTENDED
+ select SERIAL_8250_SHARE_IRQ
+ help
+ Enable this if you have a SGI Origin or Octane machine. This module
+ provides basic serial support by directly driving the UART chip
+ behind the IOC3 device on those systems. Maximum baud speed is
+ 38400bps using this driver.
+
config SERIAL_8250_RT288X
bool "Ralink RT288x/RT305x/RT3662/RT3883 serial port support"
depends on SERIAL_8250
diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile
index 08c1d8117506..51a6079d3f1f 100644
--- a/drivers/tty/serial/8250/Makefile
+++ b/drivers/tty/serial/8250/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_SERIAL_8250_FSL) += 8250_fsl.o
obj-$(CONFIG_SERIAL_8250_MEN_MCB) += 8250_men_mcb.o
obj-$(CONFIG_SERIAL_8250_DW) += 8250_dw.o
obj-$(CONFIG_SERIAL_8250_EM) += 8250_em.o
+obj-$(CONFIG_SERIAL_8250_IOC3) += 8250_ioc3.o
obj-$(CONFIG_SERIAL_8250_OMAP) += 8250_omap.o
obj-$(CONFIG_SERIAL_8250_LPC18XX) += 8250_lpc18xx.o
obj-$(CONFIG_SERIAL_8250_MT6577) += 8250_mtk.o
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 99f5da3bf913..52eaac21ff9f 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -237,7 +237,7 @@ config SERIAL_CLPS711X_CONSOLE
config SERIAL_SAMSUNG
tristate "Samsung SoC serial support"
- depends on PLAT_SAMSUNG || ARCH_EXYNOS
+ depends on PLAT_SAMSUNG || ARCH_EXYNOS || COMPILE_TEST
select SERIAL_CORE
help
Support for the on-chip UARTs on the Samsung S3C24XX series CPUs,
@@ -975,7 +975,7 @@ config SERIAL_QCOM_GENI
config SERIAL_QCOM_GENI_CONSOLE
bool "QCOM GENI Serial Console support"
- depends on SERIAL_QCOM_GENI=y
+ depends on SERIAL_QCOM_GENI
select SERIAL_CORE_CONSOLE
select SERIAL_EARLYCON
help
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index 2c37d11726ab..3284f34e9dfe 100644
--- a/drivers/tty/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
@@ -15,10 +15,6 @@
* and hooked into this driver.
*/
-#if defined(CONFIG_SERIAL_AMBA_PL010_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
@@ -728,6 +724,7 @@ static int pl010_probe(struct amba_device *dev, const struct amba_id *id)
uap->port.iotype = UPIO_MEM;
uap->port.irq = dev->irq[0];
uap->port.fifosize = 16;
+ uap->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_AMBA_PL010_CONSOLE);
uap->port.ops = &amba_pl010_pops;
uap->port.flags = UPF_BOOT_AUTOCONF;
uap->port.line = i;
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 4b28134d596a..2296bb0f9578 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -16,11 +16,6 @@
* and hooked into this driver.
*/
-
-#if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
@@ -1452,8 +1447,6 @@ static void pl011_modem_status(struct uart_amba_port *uap)
static void check_apply_cts_event_workaround(struct uart_amba_port *uap)
{
- unsigned int dummy_read;
-
if (!uap->vendor->cts_event_workaround)
return;
@@ -1465,8 +1458,8 @@ static void check_apply_cts_event_workaround(struct uart_amba_port *uap)
* single apb access will incur 2 pclk(133.12Mhz) delay,
* so add 2 dummy reads
*/
- dummy_read = pl011_read(uap, REG_ICR);
- dummy_read = pl011_read(uap, REG_ICR);
+ pl011_read(uap, REG_ICR);
+ pl011_read(uap, REG_ICR);
}
static irqreturn_t pl011_int(int irq, void *dev_id)
@@ -2579,6 +2572,7 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
uap->port.mapbase = mmiobase->start;
uap->port.membase = base;
uap->port.fifosize = uap->fifosize;
+ uap->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_AMBA_PL011_CONSOLE);
uap->port.flags = UPF_BOOT_AUTOCONF;
uap->port.line = index;
@@ -2769,6 +2763,7 @@ static struct platform_driver arm_sbsa_uart_platform_driver = {
.remove = sbsa_uart_remove,
.driver = {
.name = "sbsa-uart",
+ .pm = &pl011_dev_pm_ops,
.of_match_table = of_match_ptr(sbsa_uart_of_match),
.acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match),
.suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
diff --git a/drivers/tty/serial/apbuart.c b/drivers/tty/serial/apbuart.c
index 60cd133ffbbc..e8d56e899ec7 100644
--- a/drivers/tty/serial/apbuart.c
+++ b/drivers/tty/serial/apbuart.c
@@ -11,10 +11,6 @@
* Copyright (C) 2009 Kristoffer Glembo <kristoffer@gaisler.com>, Aeroflex Gaisler AB
*/
-#if defined(CONFIG_SERIAL_GRLIB_GAISLER_APBUART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/module.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
@@ -626,6 +622,7 @@ static int __init grlib_apbuart_configure(void)
port->irq = 0;
port->iotype = UPIO_MEM;
port->ops = &grlib_apbuart_ops;
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_GRLIB_GAISLER_APBUART_CONSOLE);
port->flags = UPF_BOOT_AUTOCONF;
port->line = line;
port->uartclk = *freq_hz;
diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
index d904a3a345e7..17c3fc398fc6 100644
--- a/drivers/tty/serial/arc_uart.c
+++ b/drivers/tty/serial/arc_uart.c
@@ -21,10 +21,6 @@
* -check if sysreq works
*/
-#if defined(CONFIG_SERIAL_ARC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/module.h>
#include <linux/serial.h>
#include <linux/console.h>
@@ -625,6 +621,7 @@ static int arc_serial_probe(struct platform_device *pdev)
port->flags = UPF_BOOT_AUTOCONF;
port->line = dev_id;
port->ops = &arc_serial_pops;
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_ARC_CONSOLE);
port->fifosize = ARC_UART_TX_FIFO_SIZE;
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index a8dc8af83f39..c15c398c88a9 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -51,10 +51,6 @@
#define ATMEL_RTS_HIGH_OFFSET 16
#define ATMEL_RTS_LOW_OFFSET 20
-#if defined(CONFIG_SERIAL_ATMEL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/serial_core.h>
#include "serial_mctrl_gpio.h"
@@ -196,10 +192,6 @@ struct atmel_uart_port {
static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART];
static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART);
-#ifdef SUPPORT_SYSRQ
-static struct console atmel_console;
-#endif
-
#if defined(CONFIG_OF)
static const struct of_device_id atmel_serial_dt_ids[] = {
{ .compatible = "atmel,at91rm9200-usart-serial" },
@@ -313,7 +305,11 @@ static int atmel_config_rs485(struct uart_port *port,
if (rs485conf->flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
- atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
+ if (port->rs485.flags & SER_RS485_RX_DURING_TX)
+ atmel_port->tx_done_mask = ATMEL_US_TXRDY;
+ else
+ atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
+
atmel_uart_writel(port, ATMEL_US_TTGR,
rs485conf->delay_rts_after_send);
mode |= ATMEL_US_USMODE_RS485;
@@ -831,7 +827,7 @@ static void atmel_tx_chars(struct uart_port *port)
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (port->x_char &&
- (atmel_uart_readl(port, ATMEL_US_CSR) & atmel_port->tx_done_mask)) {
+ (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY)) {
atmel_uart_write_char(port, port->x_char);
port->icount.tx++;
port->x_char = 0;
@@ -839,8 +835,7 @@ static void atmel_tx_chars(struct uart_port *port)
if (uart_circ_empty(xmit) || uart_tx_stopped(port))
return;
- while (atmel_uart_readl(port, ATMEL_US_CSR) &
- atmel_port->tx_done_mask) {
+ while (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY) {
atmel_uart_write_char(port, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
@@ -851,10 +846,20 @@ static void atmel_tx_chars(struct uart_port *port)
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
- if (!uart_circ_empty(xmit))
+ if (!uart_circ_empty(xmit)) {
+ /* we still have characters to transmit, so we should continue
+ * transmitting them when TX is ready, regardless of
+ * mode or duplexity
+ */
+ atmel_port->tx_done_mask |= ATMEL_US_TXRDY;
+
/* Enable interrupts */
atmel_uart_writel(port, ATMEL_US_IER,
atmel_port->tx_done_mask);
+ } else {
+ if (atmel_uart_is_half_duplex(port))
+ atmel_port->tx_done_mask &= ~ATMEL_US_TXRDY;
+ }
}
static void atmel_complete_tx_dma(void *arg)
@@ -1067,7 +1072,7 @@ static int atmel_prepare_tx_dma(struct uart_port *port)
chan_err:
dev_err(port->dev, "TX channel not available, switch to pio\n");
- atmel_port->use_dma_tx = 0;
+ atmel_port->use_dma_tx = false;
if (atmel_port->chan_tx)
atmel_release_tx_dma(port);
return -EINVAL;
@@ -1266,7 +1271,7 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
chan_err:
dev_err(port->dev, "RX channel not available, switch to pio\n");
- atmel_port->use_dma_rx = 0;
+ atmel_port->use_dma_rx = false;
if (atmel_port->chan_rx)
atmel_release_rx_dma(port);
return -EINVAL;
@@ -1693,7 +1698,7 @@ static int atmel_prepare_rx_pdc(struct uart_port *port)
DMA_FROM_DEVICE);
kfree(atmel_port->pdc_rx[0].buf);
}
- atmel_port->use_pdc_rx = 0;
+ atmel_port->use_pdc_rx = false;
return -ENOMEM;
}
pdc->dma_addr = dma_map_single(port->dev,
@@ -2270,27 +2275,6 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
mode |= ATMEL_US_USMODE_NORMAL;
}
- /* set the mode, clock divisor, parity, stop bits and data size */
- atmel_uart_writel(port, ATMEL_US_MR, mode);
-
- /*
- * when switching the mode, set the RTS line state according to the
- * new mode, otherwise keep the former state
- */
- if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) {
- unsigned int rts_state;
-
- if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
- /* let the hardware control the RTS line */
- rts_state = ATMEL_US_RTSDIS;
- } else {
- /* force RTS line to low level */
- rts_state = ATMEL_US_RTSEN;
- }
-
- atmel_uart_writel(port, ATMEL_US_CR, rts_state);
- }
-
/*
* Set the baud rate:
* Fractional baudrate allows to setup output frequency more
@@ -2317,6 +2301,28 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
if (!(port->iso7816.flags & SER_ISO7816_ENABLED))
atmel_uart_writel(port, ATMEL_US_BRGR, quot);
+
+ /* set the mode, clock divisor, parity, stop bits and data size */
+ atmel_uart_writel(port, ATMEL_US_MR, mode);
+
+ /*
+ * when switching the mode, set the RTS line state according to the
+ * new mode, otherwise keep the former state
+ */
+ if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) {
+ unsigned int rts_state;
+
+ if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
+ /* let the hardware control the RTS line */
+ rts_state = ATMEL_US_RTSDIS;
+ } else {
+ /* force RTS line to low level */
+ rts_state = ATMEL_US_RTSEN;
+ }
+
+ atmel_uart_writel(port, ATMEL_US_CR, rts_state);
+ }
+
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
atmel_port->tx_stopped = false;
@@ -2525,8 +2531,7 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
* Use TXEMPTY for interrupt when rs485 or ISO7816 else TXRDY or
* ENDTX|TXBUFE
*/
- if (port->rs485.flags & SER_RS485_ENABLED ||
- port->iso7816.flags & SER_ISO7816_ENABLED)
+ if (atmel_uart_is_half_duplex(port))
atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
else if (atmel_use_pdc_tx(port)) {
port->fifosize = PDC_BUFFER_SIZE;
@@ -2877,6 +2882,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
atmel_port = &atmel_ports[ret];
atmel_port->backup_imr = 0;
atmel_port->uart.line = ret;
+ atmel_port->uart.has_sysrq = IS_ENABLED(CONFIG_SERIAL_ATMEL_CONSOLE);
atmel_serial_probe_fifos(atmel_port, pdev);
atomic_set(&atmel_port->tasklet_shutdown, 0);
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index b7adc6127b3d..5674da2b76f0 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -10,10 +10,6 @@
* my board.
*/
-#if defined(CONFIG_SERIAL_BCM63XX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/init.h>
@@ -858,6 +854,7 @@ static int bcm_uart_probe(struct platform_device *pdev)
port->fifosize = 16;
port->uartclk = clk_get_rate(clk) / 2;
port->line = pdev->id;
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_BCM63XX_CONSOLE);
clk_put(clk);
ret = uart_add_one_port(&bcm_uart_driver, port);
diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c
index 061590795680..95abc6faa3d5 100644
--- a/drivers/tty/serial/clps711x.c
+++ b/drivers/tty/serial/clps711x.c
@@ -8,10 +8,6 @@
* Copyright (C) 2000 Deep Blue Solutions Ltd.
*/
-#if defined(CONFIG_SERIAL_CLPS711X_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/module.h>
#include <linux/device.h>
#include <linux/console.h>
@@ -479,6 +475,7 @@ static int uart_clps711x_probe(struct platform_device *pdev)
s->port.mapbase = res->start;
s->port.type = PORT_CLPS711X;
s->port.fifosize = 16;
+ s->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_CLPS711X_CONSOLE);
s->port.flags = UPF_SKIP_TEST | UPF_FIXED_TYPE;
s->port.uartclk = clk_get_rate(uart_clk);
s->port.ops = &uart_clps711x_ops;
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index de6d02f7abe2..19d5a4cf29a6 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -40,10 +40,6 @@
#include <asm/fs_pd.h>
#include <asm/udbg.h>
-#if defined(CONFIG_SERIAL_CPM_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/serial_core.h>
#include <linux/kernel.h>
@@ -347,9 +343,7 @@ static void cpm_uart_int_rx(struct uart_port *port)
/* ASSUMPTION: it contains nothing valid */
i = 0;
}
-#ifdef SUPPORT_SYSRQ
port->sysrq = 0;
-#endif
goto error_return;
}
@@ -1204,7 +1198,8 @@ static int cpm_uart_init_port(struct device_node *np,
pinfo->port.uartclk = ppc_proc_freq;
pinfo->port.mapbase = (unsigned long)mem;
pinfo->port.type = PORT_CPM;
- pinfo->port.ops = &cpm_uart_pops,
+ pinfo->port.ops = &cpm_uart_pops;
+ pinfo->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_CPM_CONSOLE);
pinfo->port.iotype = UPIO_MEM;
pinfo->port.fifosize = pinfo->tx_nrfifos * pinfo->tx_fifosize;
spin_lock_init(&pinfo->port.lock);
diff --git a/drivers/tty/serial/dz.c b/drivers/tty/serial/dz.c
index 7b57e840e255..4552742c3859 100644
--- a/drivers/tty/serial/dz.c
+++ b/drivers/tty/serial/dz.c
@@ -29,10 +29,6 @@
#undef DEBUG_DZ
-#if defined(CONFIG_SERIAL_DZ_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/bitops.h>
#include <linux/compiler.h>
#include <linux/console.h>
@@ -677,7 +673,7 @@ static void dz_release_port(struct uart_port *uport)
static int dz_map_port(struct uart_port *uport)
{
if (!uport->membase)
- uport->membase = ioremap_nocache(uport->mapbase,
+ uport->membase = ioremap(uport->mapbase,
dec_kn_slot_size);
if (!uport->membase) {
printk(KERN_ERR "dz: Cannot map MMIO\n");
@@ -787,6 +783,7 @@ static void __init dz_init_ports(void)
uport->ops = &dz_ops;
uport->line = line;
uport->mapbase = base;
+ uport->has_sysrq = IS_ENABLED(CONFIG_SERIAL_DZ_CONSOLE);
}
}
diff --git a/drivers/tty/serial/efm32-uart.c b/drivers/tty/serial/efm32-uart.c
index d6b5e5463746..2ac87128d7fd 100644
--- a/drivers/tty/serial/efm32-uart.c
+++ b/drivers/tty/serial/efm32-uart.c
@@ -1,8 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
-#if defined(CONFIG_SERIAL_EFM32_UART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/io.h>
@@ -748,6 +744,7 @@ static int efm32_uart_probe(struct platform_device *pdev)
efm_port->port.type = PORT_EFMUART;
efm_port->port.iotype = UPIO_MEM32;
efm_port->port.fifosize = 2;
+ efm_port->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_EFM32_UART_CONSOLE);
efm_port->port.ops = &efm32_uart_pops;
efm_port->port.flags = UPF_BOOT_AUTOCONF;
diff --git a/drivers/tty/serial/fsl_linflexuart.c b/drivers/tty/serial/fsl_linflexuart.c
index 205c31a61684..3e28be402aef 100644
--- a/drivers/tty/serial/fsl_linflexuart.c
+++ b/drivers/tty/serial/fsl_linflexuart.c
@@ -6,11 +6,6 @@
* Copyright 2017-2019 NXP
*/
-#if defined(CONFIG_SERIAL_FSL_LINFLEXUART_CONSOLE) && \
- defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/console.h>
#include <linux/io.h>
#include <linux/irq.h>
@@ -279,10 +274,8 @@ static irqreturn_t linflex_rxint(int irq, void *dev_id)
if (brk) {
uart_handle_break(sport);
} else {
-#ifdef SUPPORT_SYSRQ
if (uart_handle_sysrq_char(sport, (unsigned char)rx))
continue;
-#endif
tty_insert_flip_char(port, rx, flg);
}
}
@@ -863,6 +856,7 @@ static int linflex_probe(struct platform_device *pdev)
sport->irq = platform_get_irq(pdev, 0);
sport->ops = &linflex_pops;
sport->flags = UPF_BOOT_AUTOCONF;
+ sport->has_sysrq = IS_ENABLED(CONFIG_SERIAL_FSL_LINFLEXUART_CONSOLE);
linflex_ports[sport->line] = sport;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 4e128d19e0ad..91e2805e6441 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -5,10 +5,6 @@
* Copyright 2012-2014 Freescale Semiconductor, Inc.
*/
-#if defined(CONFIG_SERIAL_FSL_LPUART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/dma-mapping.h>
@@ -864,9 +860,7 @@ static void lpuart_rxint(struct lpuart_port *sport)
if (sr & UARTSR1_OR)
flg = TTY_OVERRUN;
-#ifdef SUPPORT_SYSRQ
sport->port.sysrq = 0;
-#endif
}
tty_insert_flip_char(port, rx, flg);
@@ -946,9 +940,7 @@ static void lpuart32_rxint(struct lpuart_port *sport)
if (sr & UARTSTAT_OR)
flg = TTY_OVERRUN;
-#ifdef SUPPORT_SYSRQ
sport->port.sysrq = 0;
-#endif
}
tty_insert_flip_char(port, rx, flg);
@@ -2376,7 +2368,9 @@ static int __init lpuart32_early_console_setup(struct earlycon_device *device,
if (!device->port.membase)
return -ENODEV;
- device->port.iotype = UPIO_MEM32BE;
+ if (device->port.iotype != UPIO_MEM32)
+ device->port.iotype = UPIO_MEM32BE;
+
device->con->write = lpuart32_early_write;
return 0;
}
@@ -2396,9 +2390,6 @@ static int __init lpuart32_imx_early_console_setup(struct earlycon_device *devic
OF_EARLYCON_DECLARE(lpuart, "fsl,vf610-lpuart", lpuart_early_console_setup);
OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1021a-lpuart", lpuart32_early_console_setup);
OF_EARLYCON_DECLARE(lpuart32, "fsl,imx7ulp-lpuart", lpuart32_imx_early_console_setup);
-OF_EARLYCON_DECLARE(lpuart32, "fsl,imx8qxp-lpuart", lpuart32_imx_early_console_setup);
-EARLYCON_DECLARE(lpuart, lpuart_early_console_setup);
-EARLYCON_DECLARE(lpuart32, lpuart32_early_console_setup);
#define LPUART_CONSOLE (&lpuart_console)
#define LPUART32_CONSOLE (&lpuart32_console)
@@ -2461,6 +2452,7 @@ static int lpuart_probe(struct platform_device *pdev)
sport->port.ops = &lpuart32_pops;
else
sport->port.ops = &lpuart_pops;
+ sport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_FSL_LPUART_CONSOLE);
sport->port.flags = UPF_BOOT_AUTOCONF;
if (lpuart_is_32(sport))
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index a9e20e6c63ad..0c6c63166250 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -8,10 +8,6 @@
* Copyright (C) 2004 Pengutronix
*/
-#if defined(CONFIG_SERIAL_IMX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
@@ -700,22 +696,33 @@ static void imx_uart_start_tx(struct uart_port *port)
}
}
-static irqreturn_t imx_uart_rtsint(int irq, void *dev_id)
+static irqreturn_t __imx_uart_rtsint(int irq, void *dev_id)
{
struct imx_port *sport = dev_id;
u32 usr1;
- spin_lock(&sport->port.lock);
-
imx_uart_writel(sport, USR1_RTSD, USR1);
usr1 = imx_uart_readl(sport, USR1) & USR1_RTSS;
uart_handle_cts_change(&sport->port, !!usr1);
wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
- spin_unlock(&sport->port.lock);
return IRQ_HANDLED;
}
+static irqreturn_t imx_uart_rtsint(int irq, void *dev_id)
+{
+ struct imx_port *sport = dev_id;
+ irqreturn_t ret;
+
+ spin_lock(&sport->port.lock);
+
+ ret = __imx_uart_rtsint(irq, dev_id);
+
+ spin_unlock(&sport->port.lock);
+
+ return ret;
+}
+
static irqreturn_t imx_uart_txint(int irq, void *dev_id)
{
struct imx_port *sport = dev_id;
@@ -726,14 +733,12 @@ static irqreturn_t imx_uart_txint(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static irqreturn_t imx_uart_rxint(int irq, void *dev_id)
+static irqreturn_t __imx_uart_rxint(int irq, void *dev_id)
{
struct imx_port *sport = dev_id;
unsigned int rx, flg, ignored = 0;
struct tty_port *port = &sport->port.state->port;
- spin_lock(&sport->port.lock);
-
while (imx_uart_readl(sport, USR2) & USR2_RDR) {
u32 usr2;
@@ -779,9 +784,7 @@ static irqreturn_t imx_uart_rxint(int irq, void *dev_id)
if (rx & URXD_OVRRUN)
flg = TTY_OVERRUN;
-#ifdef SUPPORT_SYSRQ
sport->port.sysrq = 0;
-#endif
}
if (sport->port.ignore_status_mask & URXD_DUMMY_READ)
@@ -792,11 +795,25 @@ static irqreturn_t imx_uart_rxint(int irq, void *dev_id)
}
out:
- spin_unlock(&sport->port.lock);
tty_flip_buffer_push(port);
+
return IRQ_HANDLED;
}
+static irqreturn_t imx_uart_rxint(int irq, void *dev_id)
+{
+ struct imx_port *sport = dev_id;
+ irqreturn_t ret;
+
+ spin_lock(&sport->port.lock);
+
+ ret = __imx_uart_rxint(irq, dev_id);
+
+ spin_unlock(&sport->port.lock);
+
+ return ret;
+}
+
static void imx_uart_clear_rx_errors(struct imx_port *sport);
/*
@@ -855,6 +872,8 @@ static irqreturn_t imx_uart_int(int irq, void *dev_id)
unsigned int usr1, usr2, ucr1, ucr2, ucr3, ucr4;
irqreturn_t ret = IRQ_NONE;
+ spin_lock(&sport->port.lock);
+
usr1 = imx_uart_readl(sport, USR1);
usr2 = imx_uart_readl(sport, USR2);
ucr1 = imx_uart_readl(sport, UCR1);
@@ -888,27 +907,25 @@ static irqreturn_t imx_uart_int(int irq, void *dev_id)
usr2 &= ~USR2_ORE;
if (usr1 & (USR1_RRDY | USR1_AGTIM)) {
- imx_uart_rxint(irq, dev_id);
+ __imx_uart_rxint(irq, dev_id);
ret = IRQ_HANDLED;
}
if ((usr1 & USR1_TRDY) || (usr2 & USR2_TXDC)) {
- imx_uart_txint(irq, dev_id);
+ imx_uart_transmit_buffer(sport);
ret = IRQ_HANDLED;
}
if (usr1 & USR1_DTRD) {
imx_uart_writel(sport, USR1_DTRD, USR1);
- spin_lock(&sport->port.lock);
imx_uart_mctrl_check(sport);
- spin_unlock(&sport->port.lock);
ret = IRQ_HANDLED;
}
if (usr1 & USR1_RTSD) {
- imx_uart_rtsint(irq, dev_id);
+ __imx_uart_rtsint(irq, dev_id);
ret = IRQ_HANDLED;
}
@@ -923,6 +940,8 @@ static irqreturn_t imx_uart_int(int irq, void *dev_id)
ret = IRQ_HANDLED;
}
+ spin_unlock(&sport->port.lock);
+
return ret;
}
@@ -2231,6 +2250,7 @@ static int imx_uart_probe(struct platform_device *pdev)
sport->port.iotype = UPIO_MEM;
sport->port.irq = rxirq;
sport->port.fifosize = 32;
+ sport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_IMX_CONSOLE);
sport->port.ops = &imx_uart_pops;
sport->port.rs485_config = imx_uart_rs485_config;
sport->port.flags = UPF_BOOT_AUTOCONF;
diff --git a/drivers/tty/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c
index 8c810733df3d..86fff69d7e7c 100644
--- a/drivers/tty/serial/ip22zilog.c
+++ b/drivers/tty/serial/ip22zilog.c
@@ -38,10 +38,6 @@
#include <asm/sgi/hpc3.h>
#include <asm/sgi/ip22.h>
-#if defined(CONFIG_SERIAL_IP22_ZILOG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/serial_core.h>
#include "ip22zilog.h"
@@ -1080,6 +1076,7 @@ static struct uart_driver ip22zilog_reg = {
static void __init ip22zilog_prepare(void)
{
+ unsigned char sysrq_on = IS_ENABLED(CONFIG_SERIAL_IP22_ZILOG_CONSOLE);
struct uart_ip22zilog_port *up;
struct zilog_layout *rp;
int channel, chip;
@@ -1115,6 +1112,7 @@ static void __init ip22zilog_prepare(void)
up[(chip * 2) + 0].port.irq = zilog_irq;
up[(chip * 2) + 0].port.uartclk = ZS_CLOCK;
up[(chip * 2) + 0].port.fifosize = 1;
+ up[(chip * 2) + 0].port.has_sysrq = sysrq_on;
up[(chip * 2) + 0].port.ops = &ip22zilog_pops;
up[(chip * 2) + 0].port.type = PORT_IP22ZILOG;
up[(chip * 2) + 0].port.flags = 0;
@@ -1126,6 +1124,7 @@ static void __init ip22zilog_prepare(void)
up[(chip * 2) + 1].port.irq = zilog_irq;
up[(chip * 2) + 1].port.uartclk = ZS_CLOCK;
up[(chip * 2) + 1].port.fifosize = 1;
+ up[(chip * 2) + 1].port.has_sysrq = sysrq_on;
up[(chip * 2) + 1].port.ops = &ip22zilog_pops;
up[(chip * 2) + 1].port.type = PORT_IP22ZILOG;
up[(chip * 2) + 1].port.line = (chip * 2) + 1;
diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
index 4029272891f9..5022447afa23 100644
--- a/drivers/tty/serial/kgdb_nmi.c
+++ b/drivers/tty/serial/kgdb_nmi.c
@@ -118,7 +118,7 @@ static int kgdb_nmi_poll_one_knock(void)
int c = -1;
const char *magic = kgdb_nmi_magic;
size_t m = strlen(magic);
- bool printch = 0;
+ bool printch = false;
c = dbg_io_ops->read_char();
if (c == NO_POLL_CHAR)
@@ -130,7 +130,7 @@ static int kgdb_nmi_poll_one_knock(void)
n = (n + 1) % m;
if (!n)
return 1;
- printch = 1;
+ printch = true;
} else {
n = 0;
}
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index fcbea43dc334..f67226df30d4 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -549,7 +549,7 @@ lqasc_request_port(struct uart_port *port)
}
if (port->flags & UPF_IOREMAP) {
- port->membase = devm_ioremap_nocache(&pdev->dev,
+ port->membase = devm_ioremap(&pdev->dev,
port->mapbase, size);
if (port->membase == NULL)
return -ENOMEM;
diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
index fbc5bc022a39..d2c08b760f83 100644
--- a/drivers/tty/serial/meson_uart.c
+++ b/drivers/tty/serial/meson_uart.c
@@ -5,15 +5,12 @@
* Copyright (C) 2014 Carlo Caione <carlo@caione.org>
*/
-#if defined(CONFIG_SERIAL_MESON_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/of.h>
@@ -76,6 +73,8 @@
#define AML_UART_PORT_OFFSET 6
#define AML_UART_DEV_NAME "ttyAML"
+#define AML_UART_POLL_USEC 5
+#define AML_UART_TIMEOUT_USEC 10000
static struct uart_driver meson_uart_driver;
@@ -411,7 +410,7 @@ static int meson_uart_request_port(struct uart_port *port)
return -EBUSY;
}
- port->membase = devm_ioremap_nocache(port->dev, port->mapbase,
+ port->membase = devm_ioremap(port->dev, port->mapbase,
port->mapsize);
if (!port->membase)
return -ENOMEM;
@@ -427,6 +426,64 @@ static void meson_uart_config_port(struct uart_port *port, int flags)
}
}
+#ifdef CONFIG_CONSOLE_POLL
+/*
+ * Console polling routines for writing and reading from the uart while
+ * in an interrupt or debug context (i.e. kgdb).
+ */
+
+static int meson_uart_poll_get_char(struct uart_port *port)
+{
+ u32 c;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ if (readl(port->membase + AML_UART_STATUS) & AML_UART_RX_EMPTY)
+ c = NO_POLL_CHAR;
+ else
+ c = readl(port->membase + AML_UART_RFIFO);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return c;
+}
+
+static void meson_uart_poll_put_char(struct uart_port *port, unsigned char c)
+{
+ unsigned long flags;
+ u32 reg;
+ int ret;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* Wait until FIFO is empty or timeout */
+ ret = readl_poll_timeout_atomic(port->membase + AML_UART_STATUS, reg,
+ reg & AML_UART_TX_EMPTY,
+ AML_UART_POLL_USEC,
+ AML_UART_TIMEOUT_USEC);
+ if (ret == -ETIMEDOUT) {
+ dev_err(port->dev, "Timeout waiting for UART TX EMPTY\n");
+ goto out;
+ }
+
+ /* Write the character */
+ writel(c, port->membase + AML_UART_WFIFO);
+
+ /* Wait until FIFO is empty or timeout */
+ ret = readl_poll_timeout_atomic(port->membase + AML_UART_STATUS, reg,
+ reg & AML_UART_TX_EMPTY,
+ AML_UART_POLL_USEC,
+ AML_UART_TIMEOUT_USEC);
+ if (ret == -ETIMEDOUT)
+ dev_err(port->dev, "Timeout waiting for UART TX EMPTY\n");
+
+out:
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+#endif /* CONFIG_CONSOLE_POLL */
+
static const struct uart_ops meson_uart_ops = {
.set_mctrl = meson_uart_set_mctrl,
.get_mctrl = meson_uart_get_mctrl,
@@ -442,6 +499,10 @@ static const struct uart_ops meson_uart_ops = {
.request_port = meson_uart_request_port,
.release_port = meson_uart_release_port,
.verify_port = meson_uart_verify_port,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = meson_uart_poll_get_char,
+ .poll_put_char = meson_uart_poll_put_char,
+#endif
};
#ifdef CONFIG_SERIAL_MESON_CONSOLE
@@ -703,6 +764,7 @@ static int meson_uart_probe(struct platform_device *pdev)
port->mapsize = resource_size(res_mem);
port->irq = res_irq->start;
port->flags = UPF_BOOT_AUTOCONF | UPF_LOW_LATENCY;
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MESON_CONSOLE);
port->dev = &pdev->dev;
port->line = pdev->id;
port->type = PORT_MESON;
diff --git a/drivers/tty/serial/milbeaut_usio.c b/drivers/tty/serial/milbeaut_usio.c
index 949ab7efc4fc..8f2cab7f66ad 100644
--- a/drivers/tty/serial/milbeaut_usio.c
+++ b/drivers/tty/serial/milbeaut_usio.c
@@ -3,10 +3,6 @@
* Copyright (C) 2018 Socionext Inc.
*/
-#if defined(CONFIG_SERIAL_MILBEAUT_USIO_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/module.h>
@@ -537,6 +533,7 @@ static int mlb_usio_probe(struct platform_device *pdev)
port->irq = mlb_usio_irq[index][RX];
port->uartclk = clk_get_rate(clk);
port->fifosize = 128;
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MILBEAUT_USIO_CONSOLE);
port->iotype = UPIO_MEM32;
port->flags = UPF_BOOT_AUTOCONF | UPF_SPD_VHI;
port->line = index;
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index 3a75ee08d619..af1700445251 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -44,10 +44,6 @@
#include <asm/mpc52xx.h>
#include <asm/mpc52xx_psc.h>
-#if defined(CONFIG_SERIAL_MPC52xx_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/serial_core.h>
@@ -1382,12 +1378,8 @@ mpc52xx_uart_int_rx_chars(struct uart_port *port)
ch = psc_ops->read_char(port);
/* Handle sysreq char */
-#ifdef SUPPORT_SYSRQ
- if (uart_handle_sysrq_char(port, ch)) {
- port->sysrq = 0;
+ if (uart_handle_sysrq_char(port, ch))
continue;
- }
-#endif
/* Store it */
@@ -1770,6 +1762,7 @@ static int mpc52xx_uart_of_probe(struct platform_device *op)
spin_lock_init(&port->lock);
port->uartclk = uartclk;
port->fifosize = 512;
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MPC52xx_CONSOLE);
port->iotype = UPIO_MEM;
port->flags = UPF_BOOT_AUTOCONF |
(uart_console(port) ? 0 : UPF_IOREMAP);
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 1cbae0768b1f..60a9c53fa7cb 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -7,10 +7,6 @@
* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*/
-#if defined(CONFIG_SERIAL_MSM_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-# define SUPPORT_SYSRQ
-#endif
-
#include <linux/kernel.h>
#include <linux/atomic.h>
#include <linux/dma-mapping.h>
@@ -610,7 +606,7 @@ static void msm_start_rx_dma(struct msm_port *msm_port)
UARTDM_RX_SIZE, dma->dir);
ret = dma_mapping_error(uart->dev, dma->phys);
if (ret)
- return;
+ goto sw_mode;
dma->desc = dmaengine_prep_slave_single(dma->chan, dma->phys,
UARTDM_RX_SIZE, DMA_DEV_TO_MEM,
@@ -661,6 +657,22 @@ static void msm_start_rx_dma(struct msm_port *msm_port)
return;
unmap:
dma_unmap_single(uart->dev, dma->phys, UARTDM_RX_SIZE, dma->dir);
+
+sw_mode:
+ /*
+ * Switch from DMA to SW/FIFO mode. After clearing Rx BAM (UARTDM_DMEN),
+ * receiver must be reset.
+ */
+ msm_write(uart, UART_CR_CMD_RESET_RX, UART_CR);
+ msm_write(uart, UART_CR_RX_ENABLE, UART_CR);
+
+ msm_write(uart, UART_CR_CMD_RESET_STALE_INT, UART_CR);
+ msm_write(uart, 0xFFFFFF, UARTDM_DMRX);
+ msm_write(uart, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+
+ /* Re-enable RX interrupts */
+ msm_port->imr |= (UART_IMR_RXLEV | UART_IMR_RXSTALE);
+ msm_write(uart, msm_port->imr, UART_IMR);
}
static void msm_stop_rx(struct uart_port *port)
@@ -1580,6 +1592,7 @@ static void __msm_console_write(struct uart_port *port, const char *s,
int num_newlines = 0;
bool replaced = false;
void __iomem *tf;
+ int locked = 1;
if (is_uartdm)
tf = port->membase + UARTDM_TF;
@@ -1592,7 +1605,13 @@ static void __msm_console_write(struct uart_port *port, const char *s,
num_newlines++;
count += num_newlines;
- spin_lock(&port->lock);
+ if (port->sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+ locked = spin_trylock(&port->lock);
+ else
+ spin_lock(&port->lock);
+
if (is_uartdm)
msm_reset_dm_count(port, count);
@@ -1628,7 +1647,9 @@ static void __msm_console_write(struct uart_port *port, const char *s,
iowrite32_rep(tf, buf, 1);
i += num_chars;
}
- spin_unlock(&port->lock);
+
+ if (locked)
+ spin_unlock(&port->lock);
}
static void msm_console_write(struct console *co, const char *s,
@@ -1801,6 +1822,7 @@ static int msm_serial_probe(struct platform_device *pdev)
if (unlikely(irq < 0))
return -ENXIO;
port->irq = irq;
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MSM_CONSOLE);
platform_set_drvdata(pdev, port);
diff --git a/drivers/tty/serial/mux.c b/drivers/tty/serial/mux.c
index 00ce31e8d19a..47ab280f553b 100644
--- a/drivers/tty/serial/mux.c
+++ b/drivers/tty/serial/mux.c
@@ -25,11 +25,7 @@
#include <asm/irq.h>
#include <asm/parisc-device.h>
-#if defined(CONFIG_SERIAL_MUX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#include <linux/sysrq.h>
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/serial_core.h>
#define MUX_OFFSET 0x800
@@ -474,7 +470,7 @@ static int __init mux_probe(struct parisc_device *dev)
port->iobase = 0;
port->mapbase = dev->hpa.start + MUX_OFFSET +
(i * MUX_LINE_OFFSET);
- port->membase = ioremap_nocache(port->mapbase, MUX_LINE_OFFSET);
+ port->membase = ioremap(port->mapbase, MUX_LINE_OFFSET);
port->iotype = UPIO_MEM;
port->type = PORT_MUX;
port->irq = 0;
@@ -483,6 +479,7 @@ static int __init mux_probe(struct parisc_device *dev)
port->ops = &mux_pops;
port->flags = UPF_BOOT_AUTOCONF;
port->line = port_cnt;
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MUX_CONSOLE);
/* The port->timeout needs to match what is present in
* uart_wait_until_sent in serial_core.c. Otherwise
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index e34525970682..b4f835e7de23 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -12,10 +12,6 @@
* Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
*/
-#if defined(CONFIG_SERIAL_MXS_AUART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
@@ -1693,6 +1689,7 @@ static int mxs_auart_probe(struct platform_device *pdev)
s->port.fifosize = MXS_AUART_FIFO_SIZE;
s->port.uartclk = clk_get_rate(s->clk);
s->port.type = PORT_IMX;
+ s->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_MXS_AUART_CONSOLE);
mxs_init_regs(s);
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 6420ae581a80..48017cec7f2f 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -16,10 +16,6 @@
* this driver as required for the omap-platform.
*/
-#if defined(CONFIG_SERIAL_OMAP_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/console.h>
@@ -493,10 +489,13 @@ static unsigned int check_modem_status(struct uart_omap_port *up)
static void serial_omap_rlsi(struct uart_omap_port *up, unsigned int lsr)
{
unsigned int flag;
- unsigned char ch = 0;
+ /*
+ * Read one data character out to avoid stalling the receiver according
+ * to the table 23-246 of the omap4 TRM.
+ */
if (likely(lsr & UART_LSR_DR))
- ch = serial_in(up, UART_RX);
+ serial_in(up, UART_RX);
up->port.icount.rx++;
flag = TTY_NORMAL;
@@ -1680,6 +1679,7 @@ static int serial_omap_probe(struct platform_device *pdev)
up->port.regshift = 2;
up->port.fifosize = 64;
up->port.ops = &serial_omap_pops;
+ up->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_OMAP_CONSOLE);
if (pdev->dev.of_node)
ret = of_alias_get_id(pdev->dev.of_node, "serial");
diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c
index d2d8b3494685..42c8cc93b603 100644
--- a/drivers/tty/serial/owl-uart.c
+++ b/drivers/tty/serial/owl-uart.c
@@ -427,7 +427,7 @@ static int owl_uart_request_port(struct uart_port *port)
return -EBUSY;
if (port->flags & UPF_IOREMAP) {
- port->membase = devm_ioremap_nocache(port->dev, port->mapbase,
+ port->membase = devm_ioremap(port->dev, port->mapbase,
resource_size(res));
if (!port->membase)
return -EBUSY;
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index c16234bca78f..0a96217dba67 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -2,9 +2,6 @@
/*
*Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
*/
-#if defined(CONFIG_SERIAL_PCH_UART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
#include <linux/kernel.h>
#include <linux/serial_reg.h>
#include <linux/slab.h>
@@ -587,12 +584,8 @@ static int pch_uart_hal_read(struct eg20t_port *priv, unsigned char *buf,
if (uart_handle_break(port))
continue;
}
-#ifdef SUPPORT_SYSRQ
- if (port->sysrq) {
- if (uart_handle_sysrq_char(port, rbr))
- continue;
- }
-#endif
+ if (uart_handle_sysrq_char(port, rbr))
+ continue;
buf[i++] = rbr;
}
@@ -1796,6 +1789,7 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
priv->port.flags = UPF_BOOT_AUTOCONF;
priv->port.fifosize = fifosize;
priv->port.line = board->line_no;
+ priv->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_PCH_UART_CONSOLE);
priv->trigger = PCH_UART_HAL_TRIGGER_M;
snprintf(priv->irq_name, IRQ_NAME_SIZE,
diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c
index 0bdf1687983f..484b7e8d5381 100644
--- a/drivers/tty/serial/pic32_uart.c
+++ b/drivers/tty/serial/pic32_uart.c
@@ -618,7 +618,7 @@ static int pic32_uart_request_port(struct uart_port *port)
"pic32_uart_mem"))
return -EBUSY;
- port->membase = devm_ioremap_nocache(port->dev, port->mapbase,
+ port->membase = devm_ioremap(port->dev, port->mapbase,
resource_size(res_mem));
if (!port->membase) {
dev_err(port->dev, "Unable to map registers\n");
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index bcb5bf70534e..ba65a3bbd72a 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -61,10 +61,6 @@
#define of_machine_is_compatible(x) (0)
#endif
-#if defined (CONFIG_SERIAL_PMACZILOG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/serial.h>
#include <linux/serial_core.h>
@@ -1721,6 +1717,7 @@ static int __init pmz_init_port(struct uart_pmac_port *uap)
uap->control_reg = uap->port.membase;
uap->data_reg = uap->control_reg + 4;
uap->port_type = 0;
+ uap->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_PMACZILOG_CONSOLE);
pmz_convert_to_zs(uap, CS8, 0, 9600);
diff --git a/drivers/tty/serial/pnx8xxx_uart.c b/drivers/tty/serial/pnx8xxx_uart.c
index 223a9499104e..972d94e8d32b 100644
--- a/drivers/tty/serial/pnx8xxx_uart.c
+++ b/drivers/tty/serial/pnx8xxx_uart.c
@@ -10,10 +10,6 @@
* Copyright (C) 2000 Deep Blue Solutions Ltd.
*/
-#if defined(CONFIG_SERIAL_PNX8XXX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
@@ -220,9 +216,7 @@ static void pnx8xxx_rx_chars(struct pnx8xxx_port *sport)
else if (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE))
flg = TTY_FRAME;
-#ifdef SUPPORT_SYSRQ
sport->port.sysrq = 0;
-#endif
}
if (uart_handle_sysrq_char(&sport->port, ch))
@@ -800,6 +794,7 @@ static int pnx8xxx_serial_probe(struct platform_device *pdev)
if (pnx8xxx_ports[i].port.mapbase != res->start)
continue;
+ pnx8xxx_ports[i].port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_PNX8XXX_CONSOLE);
pnx8xxx_ports[i].port.dev = &pdev->dev;
uart_add_one_port(&pnx8xxx_reg, &pnx8xxx_ports[i].port);
platform_set_drvdata(pdev, &pnx8xxx_ports[i]);
diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
index 4932b674f7ef..41319ef96fa6 100644
--- a/drivers/tty/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
@@ -19,10 +19,6 @@
*/
-#if defined(CONFIG_SERIAL_PXA_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/console.h>
@@ -879,6 +875,7 @@ static int serial_pxa_probe(struct platform_device *dev)
sport->port.dev = &dev->dev;
sport->port.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF;
sport->port.uartclk = clk_get_rate(sport->clk);
+ sport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_PXA_CONSOLE);
ret = serial_pxa_probe_dt(dev, sport);
if (ret > 0)
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index ff63728a95f4..191abb18fc2a 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -1,10 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
-#if defined(CONFIG_SERIAL_QCOM_GENI_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-# define SUPPORT_SYSRQ
-#endif
-
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/io.h>
@@ -14,6 +10,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
#include <linux/qcom-geni-se.h>
#include <linux/serial.h>
@@ -92,7 +89,11 @@
#define DEF_TX_WM 2
#define DEF_FIFO_WIDTH_BITS 32
#define UART_RX_WM 2
-#define MAX_LOOPBACK_CFG 3
+
+/* SE_UART_LOOPBACK_CFG */
+#define RX_TX_SORTED BIT(0)
+#define CTS_RTS_SORTED BIT(1)
+#define RX_TX_CTS_RTS_SORTED (RX_TX_SORTED | CTS_RTS_SORTED)
#ifdef CONFIG_CONSOLE_POLL
#define CONSOLE_RX_BYTES_PW 1
@@ -103,7 +104,7 @@
struct qcom_geni_serial_port {
struct uart_port uport;
struct geni_se se;
- char name[20];
+ const char *name;
u32 tx_fifo_depth;
u32 tx_fifo_width;
u32 rx_fifo_depth;
@@ -164,30 +165,6 @@ static struct qcom_geni_serial_port qcom_geni_uart_ports[GENI_UART_PORTS] = {
},
};
-static ssize_t loopback_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
-
- return snprintf(buf, sizeof(u32), "%d\n", port->loopback);
-}
-
-static ssize_t loopback_store(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t size)
-{
- struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
- u32 loopback;
-
- if (kstrtoint(buf, 0, &loopback) || loopback > MAX_LOOPBACK_CFG) {
- dev_err(dev, "Invalid input\n");
- return -EINVAL;
- }
- port->loopback = loopback;
- return size;
-}
-static DEVICE_ATTR_RW(loopback);
-
static struct qcom_geni_serial_port qcom_geni_console_port = {
.uport = {
.iotype = UPIO_MEM,
@@ -237,10 +214,14 @@ static void qcom_geni_serial_set_mctrl(struct uart_port *uport,
unsigned int mctrl)
{
u32 uart_manual_rfr = 0;
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
if (uart_console(uport))
return;
+ if (mctrl & TIOCM_LOOP)
+ port->loopback = RX_TX_CTS_RTS_SORTED;
+
if (!(mctrl & TIOCM_RTS))
uart_manual_rfr = UART_MANUAL_RFR_EN | UART_RFR_NOT_READY;
writel(uart_manual_rfr, uport->membase + SE_UART_MANUAL_RFR);
@@ -757,15 +738,6 @@ out_write_wakeup:
uart_write_wakeup(uport);
}
-static irqreturn_t qcom_geni_serial_wakeup_isr(int isr, void *dev)
-{
- struct uart_port *uport = dev;
-
- pm_wakeup_event(uport->dev, 2000);
-
- return IRQ_HANDLED;
-}
-
static irqreturn_t qcom_geni_serial_isr(int isr, void *dev)
{
u32 m_irq_en;
@@ -1302,50 +1274,57 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
port->rx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
port->tx_fifo_width = DEF_FIFO_WIDTH_BITS;
- scnprintf(port->name, sizeof(port->name), "qcom_geni_serial_%s%d",
- (uart_console(uport) ? "console" : "uart"), uport->line);
+ port->name = devm_kasprintf(uport->dev, GFP_KERNEL,
+ "qcom_geni_serial_%s%d",
+ uart_console(uport) ? "console" : "uart", uport->line);
+ if (!port->name)
+ return -ENOMEM;
+
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
uport->irq = irq;
+ uport->has_sysrq = IS_ENABLED(CONFIG_SERIAL_QCOM_GENI_CONSOLE);
+
+ if (!console)
+ port->wakeup_irq = platform_get_irq_optional(pdev, 1);
+
+ uport->private_data = drv;
+ platform_set_drvdata(pdev, port);
+ port->handle_rx = console ? handle_rx_console : handle_rx_uart;
+
+ ret = uart_add_one_port(drv, uport);
+ if (ret)
+ return ret;
irq_set_status_flags(uport->irq, IRQ_NOAUTOEN);
ret = devm_request_irq(uport->dev, uport->irq, qcom_geni_serial_isr,
IRQF_TRIGGER_HIGH, port->name, uport);
if (ret) {
dev_err(uport->dev, "Failed to get IRQ ret %d\n", ret);
+ uart_remove_one_port(drv, uport);
return ret;
}
- if (!console) {
- port->wakeup_irq = platform_get_irq(pdev, 1);
- if (port->wakeup_irq < 0) {
- dev_err(&pdev->dev, "Failed to get wakeup IRQ %d\n",
- port->wakeup_irq);
- } else {
- irq_set_status_flags(port->wakeup_irq, IRQ_NOAUTOEN);
- ret = devm_request_irq(uport->dev, port->wakeup_irq,
- qcom_geni_serial_wakeup_isr,
- IRQF_TRIGGER_FALLING, "uart_wakeup", uport);
- if (ret) {
- dev_err(uport->dev, "Failed to register wakeup IRQ ret %d\n",
- ret);
- return ret;
- }
-
- device_init_wakeup(&pdev->dev, true);
- ret = dev_pm_set_wake_irq(&pdev->dev, port->wakeup_irq);
- if (unlikely(ret))
- dev_err(uport->dev, "%s:Failed to set IRQ wake:%d\n",
- __func__, ret);
+ /*
+ * Set pm_runtime status as ACTIVE so that wakeup_irq gets
+ * enabled/disabled from dev_pm_arm_wake_irq during system
+ * suspend/resume respectively.
+ */
+ pm_runtime_set_active(&pdev->dev);
+
+ if (port->wakeup_irq > 0) {
+ device_init_wakeup(&pdev->dev, true);
+ ret = dev_pm_set_dedicated_wake_irq(&pdev->dev,
+ port->wakeup_irq);
+ if (ret) {
+ device_init_wakeup(&pdev->dev, false);
+ uart_remove_one_port(drv, uport);
+ return ret;
}
}
- uport->private_data = drv;
- platform_set_drvdata(pdev, port);
- port->handle_rx = console ? handle_rx_console : handle_rx_uart;
- if (!console)
- device_create_file(uport->dev, &dev_attr_loopback);
- return uart_add_one_port(drv, uport);
+
+ return 0;
}
static int qcom_geni_serial_remove(struct platform_device *pdev)
@@ -1353,7 +1332,10 @@ static int qcom_geni_serial_remove(struct platform_device *pdev)
struct qcom_geni_serial_port *port = platform_get_drvdata(pdev);
struct uart_driver *drv = port->uport.private_data;
+ dev_pm_clear_wake_irq(&pdev->dev);
+ device_init_wakeup(&pdev->dev, false);
uart_remove_one_port(drv, &port->uport);
+
return 0;
}
@@ -1362,12 +1344,7 @@ static int __maybe_unused qcom_geni_serial_sys_suspend(struct device *dev)
struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
struct uart_port *uport = &port->uport;
- uart_suspend_port(uport->private_data, uport);
-
- if (port->wakeup_irq > 0)
- enable_irq(port->wakeup_irq);
-
- return 0;
+ return uart_suspend_port(uport->private_data, uport);
}
static int __maybe_unused qcom_geni_serial_sys_resume(struct device *dev)
@@ -1375,9 +1352,6 @@ static int __maybe_unused qcom_geni_serial_sys_resume(struct device *dev)
struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
struct uart_port *uport = &port->uport;
- if (port->wakeup_irq > 0)
- disable_irq(port->wakeup_irq);
-
return uart_resume_port(uport->private_data, uport);
}
diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c
index ff9a27d48bca..b5ef86ae2746 100644
--- a/drivers/tty/serial/rda-uart.c
+++ b/drivers/tty/serial/rda-uart.c
@@ -498,7 +498,7 @@ static int rda_uart_request_port(struct uart_port *port)
return -EBUSY;
if (port->flags & UPF_IOREMAP) {
- port->membase = devm_ioremap_nocache(port->dev, port->mapbase,
+ port->membase = devm_ioremap(port->dev, port->mapbase,
resource_size(res));
if (!port->membase)
return -EBUSY;
diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c
index 8e618129e65c..75c2a22895f9 100644
--- a/drivers/tty/serial/sa1100.c
+++ b/drivers/tty/serial/sa1100.c
@@ -7,10 +7,6 @@
* Copyright (C) 2000 Deep Blue Solutions Ltd.
*/
-#if defined(CONFIG_SERIAL_SA1100_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
@@ -214,9 +210,7 @@ sa1100_rx_chars(struct sa1100_port *sport)
else if (status & UTSR1_TO_SM(UTSR1_FRE))
flg = TTY_FRAME;
-#ifdef SUPPORT_SYSRQ
sport->port.sysrq = 0;
-#endif
}
if (uart_handle_sysrq_char(&sport->port, ch))
@@ -860,6 +854,7 @@ static int sa1100_serial_resume(struct platform_device *dev)
static int sa1100_serial_add_one_port(struct sa1100_port *sport, struct platform_device *dev)
{
sport->port.dev = &dev->dev;
+ sport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_SA1100_CONSOLE);
// mctrl_gpio_init() requires that the GPIO driver supports interrupts,
// but we need to support GPIO drivers for hardware that has no such
diff --git a/drivers/tty/serial/samsung.h b/drivers/tty/serial/samsung.h
deleted file mode 100644
index f93022113f59..000000000000
--- a/drivers/tty/serial/samsung.h
+++ /dev/null
@@ -1,147 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#ifndef __SAMSUNG_H
-#define __SAMSUNG_H
-
-/*
- * Driver for Samsung SoC onboard UARTs.
- *
- * Ben Dooks, Copyright (c) 2003-2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
-*/
-
-#include <linux/dmaengine.h>
-
-struct s3c24xx_uart_info {
- char *name;
- unsigned int type;
- unsigned int fifosize;
- unsigned long rx_fifomask;
- unsigned long rx_fifoshift;
- unsigned long rx_fifofull;
- unsigned long tx_fifomask;
- unsigned long tx_fifoshift;
- unsigned long tx_fifofull;
- unsigned int def_clk_sel;
- unsigned long num_clks;
- unsigned long clksel_mask;
- unsigned long clksel_shift;
-
- /* uart port features */
-
- unsigned int has_divslot:1;
-
- /* uart controls */
- int (*reset_port)(struct uart_port *, struct s3c2410_uartcfg *);
-};
-
-struct s3c24xx_serial_drv_data {
- struct s3c24xx_uart_info *info;
- struct s3c2410_uartcfg *def_cfg;
- unsigned int fifosize[CONFIG_SERIAL_SAMSUNG_UARTS];
-};
-
-struct s3c24xx_uart_dma {
- unsigned int rx_chan_id;
- unsigned int tx_chan_id;
-
- struct dma_slave_config rx_conf;
- struct dma_slave_config tx_conf;
-
- struct dma_chan *rx_chan;
- struct dma_chan *tx_chan;
-
- dma_addr_t rx_addr;
- dma_addr_t tx_addr;
-
- dma_cookie_t rx_cookie;
- dma_cookie_t tx_cookie;
-
- char *rx_buf;
-
- dma_addr_t tx_transfer_addr;
-
- size_t rx_size;
- size_t tx_size;
-
- struct dma_async_tx_descriptor *tx_desc;
- struct dma_async_tx_descriptor *rx_desc;
-
- int tx_bytes_requested;
- int rx_bytes_requested;
-};
-
-struct s3c24xx_uart_port {
- unsigned char rx_claimed;
- unsigned char tx_claimed;
- unsigned int pm_level;
- unsigned long baudclk_rate;
- unsigned int min_dma_size;
-
- unsigned int rx_irq;
- unsigned int tx_irq;
-
- unsigned int tx_in_progress;
- unsigned int tx_mode;
- unsigned int rx_mode;
-
- struct s3c24xx_uart_info *info;
- struct clk *clk;
- struct clk *baudclk;
- struct uart_port port;
- struct s3c24xx_serial_drv_data *drv_data;
-
- /* reference to platform data */
- struct s3c2410_uartcfg *cfg;
-
- struct s3c24xx_uart_dma *dma;
-
-#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
- struct notifier_block freq_transition;
-#endif
-};
-
-/* conversion functions */
-
-#define s3c24xx_dev_to_port(__dev) dev_get_drvdata(__dev)
-
-/* register access controls */
-
-#define portaddr(port, reg) ((port)->membase + (reg))
-#define portaddrl(port, reg) \
- ((unsigned long *)(unsigned long)((port)->membase + (reg)))
-
-#define rd_regb(port, reg) (readb_relaxed(portaddr(port, reg)))
-#define rd_regl(port, reg) (readl_relaxed(portaddr(port, reg)))
-
-#define wr_regb(port, reg, val) writeb_relaxed(val, portaddr(port, reg))
-#define wr_regl(port, reg, val) writel_relaxed(val, portaddr(port, reg))
-
-/* Byte-order aware bit setting/clearing functions. */
-
-static inline void s3c24xx_set_bit(struct uart_port *port, int idx,
- unsigned int reg)
-{
- unsigned long flags;
- u32 val;
-
- local_irq_save(flags);
- val = rd_regl(port, reg);
- val |= (1 << idx);
- wr_regl(port, reg, val);
- local_irq_restore(flags);
-}
-
-static inline void s3c24xx_clear_bit(struct uart_port *port, int idx,
- unsigned int reg)
-{
- unsigned long flags;
- u32 val;
-
- local_irq_save(flags);
- val = rd_regl(port, reg);
- val &= ~(1 << idx);
- wr_regl(port, reg, val);
- local_irq_restore(flags);
-}
-
-#endif
diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
index 83fd51607741..73f951d65b93 100644
--- a/drivers/tty/serial/samsung_tty.c
+++ b/drivers/tty/serial/samsung_tty.c
@@ -4,7 +4,7 @@
*
* Ben Dooks, Copyright (c) 2003-2008 Simtec Electronics
* http://armlinux.simtec.co.uk/
-*/
+ */
/* Hote on 2410 error handling
*
@@ -19,11 +19,7 @@
* and change the policy on BREAK
*
* BJD, 04-Nov-2004
-*/
-
-#if defined(CONFIG_SERIAL_SAMSUNG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
+ */
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
@@ -44,33 +40,8 @@
#include <linux/clk.h>
#include <linux/cpufreq.h>
#include <linux/of.h>
-
#include <asm/irq.h>
-#include "samsung.h"
-
-#if defined(CONFIG_SERIAL_SAMSUNG_DEBUG) && \
- !defined(MODULE)
-
-extern void printascii(const char *);
-
-__printf(1, 2)
-static void dbg(const char *fmt, ...)
-{
- va_list va;
- char buff[256];
-
- va_start(va, fmt);
- vscnprintf(buff, sizeof(buff), fmt, va);
- va_end(va);
-
- printascii(buff);
-}
-
-#else
-#define dbg(fmt, ...) do { if (0) no_printk(fmt, ##__VA_ARGS__); } while (0)
-#endif
-
/* UART name and device definitions */
#define S3C24XX_SERIAL_NAME "ttySAC"
@@ -81,14 +52,142 @@ static void dbg(const char *fmt, ...)
#define S3C24XX_TX_DMA 2
#define S3C24XX_RX_PIO 1
#define S3C24XX_RX_DMA 2
-/* macros to change one thing to another */
-
-#define tx_enabled(port) ((port)->unused[0])
-#define rx_enabled(port) ((port)->unused[1])
/* flag to ignore all characters coming in */
#define RXSTAT_DUMMY_READ (0x10000000)
+struct s3c24xx_uart_info {
+ char *name;
+ unsigned int type;
+ unsigned int fifosize;
+ unsigned long rx_fifomask;
+ unsigned long rx_fifoshift;
+ unsigned long rx_fifofull;
+ unsigned long tx_fifomask;
+ unsigned long tx_fifoshift;
+ unsigned long tx_fifofull;
+ unsigned int def_clk_sel;
+ unsigned long num_clks;
+ unsigned long clksel_mask;
+ unsigned long clksel_shift;
+
+ /* uart port features */
+
+ unsigned int has_divslot:1;
+};
+
+struct s3c24xx_serial_drv_data {
+ struct s3c24xx_uart_info *info;
+ struct s3c2410_uartcfg *def_cfg;
+ unsigned int fifosize[CONFIG_SERIAL_SAMSUNG_UARTS];
+};
+
+struct s3c24xx_uart_dma {
+ unsigned int rx_chan_id;
+ unsigned int tx_chan_id;
+
+ struct dma_slave_config rx_conf;
+ struct dma_slave_config tx_conf;
+
+ struct dma_chan *rx_chan;
+ struct dma_chan *tx_chan;
+
+ dma_addr_t rx_addr;
+ dma_addr_t tx_addr;
+
+ dma_cookie_t rx_cookie;
+ dma_cookie_t tx_cookie;
+
+ char *rx_buf;
+
+ dma_addr_t tx_transfer_addr;
+
+ size_t rx_size;
+ size_t tx_size;
+
+ struct dma_async_tx_descriptor *tx_desc;
+ struct dma_async_tx_descriptor *rx_desc;
+
+ int tx_bytes_requested;
+ int rx_bytes_requested;
+};
+
+struct s3c24xx_uart_port {
+ unsigned char rx_claimed;
+ unsigned char tx_claimed;
+ unsigned char rx_enabled;
+ unsigned char tx_enabled;
+ unsigned int pm_level;
+ unsigned long baudclk_rate;
+ unsigned int min_dma_size;
+
+ unsigned int rx_irq;
+ unsigned int tx_irq;
+
+ unsigned int tx_in_progress;
+ unsigned int tx_mode;
+ unsigned int rx_mode;
+
+ struct s3c24xx_uart_info *info;
+ struct clk *clk;
+ struct clk *baudclk;
+ struct uart_port port;
+ struct s3c24xx_serial_drv_data *drv_data;
+
+ /* reference to platform data */
+ struct s3c2410_uartcfg *cfg;
+
+ struct s3c24xx_uart_dma *dma;
+
+#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
+ struct notifier_block freq_transition;
+#endif
+};
+
+/* conversion functions */
+
+#define s3c24xx_dev_to_port(__dev) dev_get_drvdata(__dev)
+
+/* register access controls */
+
+#define portaddr(port, reg) ((port)->membase + (reg))
+#define portaddrl(port, reg) \
+ ((unsigned long *)(unsigned long)((port)->membase + (reg)))
+
+#define rd_regb(port, reg) (readb_relaxed(portaddr(port, reg)))
+#define rd_regl(port, reg) (readl_relaxed(portaddr(port, reg)))
+
+#define wr_regb(port, reg, val) writeb_relaxed(val, portaddr(port, reg))
+#define wr_regl(port, reg, val) writel_relaxed(val, portaddr(port, reg))
+
+/* Byte-order aware bit setting/clearing functions. */
+
+static inline void s3c24xx_set_bit(struct uart_port *port, int idx,
+ unsigned int reg)
+{
+ unsigned long flags;
+ u32 val;
+
+ local_irq_save(flags);
+ val = rd_regl(port, reg);
+ val |= (1 << idx);
+ wr_regl(port, reg, val);
+ local_irq_restore(flags);
+}
+
+static inline void s3c24xx_clear_bit(struct uart_port *port, int idx,
+ unsigned int reg)
+{
+ unsigned long flags;
+ u32 val;
+
+ local_irq_save(flags);
+ val = rd_regl(port, reg);
+ val &= ~(1 << idx);
+ wr_regl(port, reg, val);
+ local_irq_restore(flags);
+}
+
static inline struct s3c24xx_uart_port *to_ourport(struct uart_port *port)
{
return container_of(port, struct s3c24xx_uart_port, port);
@@ -118,6 +217,7 @@ static int s3c24xx_serial_has_interrupt_mask(struct uart_port *port)
static void s3c24xx_serial_rx_enable(struct uart_port *port)
{
+ struct s3c24xx_uart_port *ourport = to_ourport(port);
unsigned long flags;
unsigned int ucon, ufcon;
int count = 10000;
@@ -135,12 +235,13 @@ static void s3c24xx_serial_rx_enable(struct uart_port *port)
ucon |= S3C2410_UCON_RXIRQMODE;
wr_regl(port, S3C2410_UCON, ucon);
- rx_enabled(port) = 1;
+ ourport->rx_enabled = 1;
spin_unlock_irqrestore(&port->lock, flags);
}
static void s3c24xx_serial_rx_disable(struct uart_port *port)
{
+ struct s3c24xx_uart_port *ourport = to_ourport(port);
unsigned long flags;
unsigned int ucon;
@@ -150,7 +251,7 @@ static void s3c24xx_serial_rx_disable(struct uart_port *port)
ucon &= ~S3C2410_UCON_RXIRQMODE;
wr_regl(port, S3C2410_UCON, ucon);
- rx_enabled(port) = 0;
+ ourport->rx_enabled = 0;
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -162,7 +263,7 @@ static void s3c24xx_serial_stop_tx(struct uart_port *port)
struct dma_tx_state state;
int count;
- if (!tx_enabled(port))
+ if (!ourport->tx_enabled)
return;
if (s3c24xx_serial_has_interrupt_mask(port))
@@ -182,7 +283,7 @@ static void s3c24xx_serial_stop_tx(struct uart_port *port)
port->icount.tx += count;
}
- tx_enabled(port) = 0;
+ ourport->tx_enabled = 0;
ourport->tx_in_progress = 0;
if (port->flags & UPF_CONS_FLOW)
@@ -340,11 +441,11 @@ static void s3c24xx_serial_start_tx(struct uart_port *port)
struct s3c24xx_uart_port *ourport = to_ourport(port);
struct circ_buf *xmit = &port->state->xmit;
- if (!tx_enabled(port)) {
+ if (!ourport->tx_enabled) {
if (port->flags & UPF_CONS_FLOW)
s3c24xx_serial_rx_disable(port);
- tx_enabled(port) = 1;
+ ourport->tx_enabled = 1;
if (!ourport->dma || !ourport->dma->tx_chan)
s3c24xx_serial_start_tx_pio(ourport);
}
@@ -389,14 +490,14 @@ static void s3c24xx_serial_stop_rx(struct uart_port *port)
enum dma_status dma_status;
unsigned int received;
- if (rx_enabled(port)) {
- dbg("s3c24xx_serial_stop_rx: port=%p\n", port);
+ if (ourport->rx_enabled) {
+ dev_dbg(port->dev, "stopping rx\n");
if (s3c24xx_serial_has_interrupt_mask(port))
s3c24xx_set_bit(port, S3C64XX_UINTM_RXD,
S3C64XX_UINTM);
else
disable_irq_nosync(ourport->rx_irq);
- rx_enabled(port) = 0;
+ ourport->rx_enabled = 0;
}
if (dma && dma->rx_chan) {
dmaengine_pause(dma->tx_chan);
@@ -546,7 +647,7 @@ static void s3c24xx_serial_rx_drain_fifo(struct s3c24xx_uart_port *ourport);
static irqreturn_t s3c24xx_serial_rx_chars_dma(void *dev_id)
{
- unsigned int utrstat, ufstat, received;
+ unsigned int utrstat, received;
struct s3c24xx_uart_port *ourport = dev_id;
struct uart_port *port = &ourport->port;
struct s3c24xx_uart_dma *dma = ourport->dma;
@@ -556,7 +657,7 @@ static irqreturn_t s3c24xx_serial_rx_chars_dma(void *dev_id)
struct dma_tx_state state;
utrstat = rd_regl(port, S3C2410_UTRSTAT);
- ufstat = rd_regl(port, S3C2410_UFSTAT);
+ rd_regl(port, S3C2410_UFSTAT);
spin_lock_irqsave(&port->lock, flags);
@@ -618,9 +719,9 @@ static void s3c24xx_serial_rx_drain_fifo(struct s3c24xx_uart_port *ourport)
if (port->flags & UPF_CONS_FLOW) {
int txe = s3c24xx_serial_txempty_nofifo(port);
- if (rx_enabled(port)) {
+ if (ourport->rx_enabled) {
if (!txe) {
- rx_enabled(port) = 0;
+ ourport->rx_enabled = 0;
continue;
}
} else {
@@ -628,7 +729,7 @@ static void s3c24xx_serial_rx_drain_fifo(struct s3c24xx_uart_port *ourport)
ufcon = rd_regl(port, S3C2410_UFCON);
ufcon |= S3C2410_UFCON_RESETRX;
wr_regl(port, S3C2410_UFCON, ufcon);
- rx_enabled(port) = 1;
+ ourport->rx_enabled = 1;
return;
}
continue;
@@ -641,12 +742,13 @@ static void s3c24xx_serial_rx_drain_fifo(struct s3c24xx_uart_port *ourport)
port->icount.rx++;
if (unlikely(uerstat & S3C2410_UERSTAT_ANY)) {
- dbg("rxerr: port ch=0x%02x, rxs=0x%08x\n",
- ch, uerstat);
+ dev_dbg(port->dev,
+ "rxerr: port ch=0x%02x, rxs=0x%08x\n",
+ ch, uerstat);
/* check for break */
if (uerstat & S3C2410_UERSTAT_BREAK) {
- dbg("break!\n");
+ dev_dbg(port->dev, "break!\n");
port->icount.brk++;
if (uart_handle_break(port))
continue; /* Ignore character */
@@ -732,7 +834,7 @@ static irqreturn_t s3c24xx_serial_tx_chars(int irq, void *id)
/* if there isn't anything more to transmit, or the uart is now
* stopped, disable the uart and exit
- */
+ */
if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
s3c24xx_serial_stop_tx(port);
@@ -978,7 +1080,7 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
if (ourport->tx_claimed) {
if (!s3c24xx_serial_has_interrupt_mask(port))
free_irq(ourport->tx_irq, ourport);
- tx_enabled(port) = 0;
+ ourport->tx_enabled = 0;
ourport->tx_claimed = 0;
ourport->tx_mode = 0;
}
@@ -987,7 +1089,7 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
if (!s3c24xx_serial_has_interrupt_mask(port))
free_irq(ourport->rx_irq, ourport);
ourport->rx_claimed = 0;
- rx_enabled(port) = 0;
+ ourport->rx_enabled = 0;
}
/* Clear pending interrupts and mask all interrupts */
@@ -1009,10 +1111,7 @@ static int s3c24xx_serial_startup(struct uart_port *port)
struct s3c24xx_uart_port *ourport = to_ourport(port);
int ret;
- dbg("s3c24xx_serial_startup: port=%p (%08llx,%p)\n",
- port, (unsigned long long)port->mapbase, port->membase);
-
- rx_enabled(port) = 1;
+ ourport->rx_enabled = 1;
ret = request_irq(ourport->rx_irq, s3c24xx_serial_rx_chars, 0,
s3c24xx_serial_portname(port), ourport);
@@ -1024,9 +1123,9 @@ static int s3c24xx_serial_startup(struct uart_port *port)
ourport->rx_claimed = 1;
- dbg("requesting tx irq...\n");
+ dev_dbg(port->dev, "requesting tx irq...\n");
- tx_enabled(port) = 1;
+ ourport->tx_enabled = 1;
ret = request_irq(ourport->tx_irq, s3c24xx_serial_tx_chars, 0,
s3c24xx_serial_portname(port), ourport);
@@ -1038,10 +1137,9 @@ static int s3c24xx_serial_startup(struct uart_port *port)
ourport->tx_claimed = 1;
- dbg("s3c24xx_serial_startup ok\n");
-
/* the port reset code should have done the correct
- * register setup for the port controls */
+ * register setup for the port controls
+ */
return ret;
@@ -1057,9 +1155,6 @@ static int s3c64xx_serial_startup(struct uart_port *port)
unsigned int ufcon;
int ret;
- dbg("s3c64xx_serial_startup: port=%p (%08llx,%p)\n",
- port, (unsigned long long)port->mapbase, port->membase);
-
wr_regl(port, S3C64XX_UINTM, 0xf);
if (ourport->dma) {
ret = s3c24xx_serial_request_dma(ourport);
@@ -1077,9 +1172,9 @@ static int s3c64xx_serial_startup(struct uart_port *port)
}
/* For compatibility with s3c24xx Soc's */
- rx_enabled(port) = 1;
+ ourport->rx_enabled = 1;
ourport->rx_claimed = 1;
- tx_enabled(port) = 0;
+ ourport->tx_enabled = 0;
ourport->tx_claimed = 1;
spin_lock_irqsave(&port->lock, flags);
@@ -1097,7 +1192,6 @@ static int s3c64xx_serial_startup(struct uart_port *port)
/* Enable Rx Interrupt */
s3c24xx_clear_bit(port, S3C64XX_UINTM_RXD, S3C64XX_UINTM);
- dbg("s3c64xx_serial_startup ok\n");
return ret;
}
@@ -1145,7 +1239,7 @@ static void s3c24xx_serial_pm(struct uart_port *port, unsigned int level,
* baud clocks (and the resultant actual baud rates) and then tries to
* pick the closest one and select that.
*
-*/
+ */
#define MAX_CLK_NAME_LENGTH 15
@@ -1315,29 +1409,30 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
if (cfg->has_fracval) {
udivslot = (div & 15);
- dbg("fracval = %04x\n", udivslot);
+ dev_dbg(port->dev, "fracval = %04x\n", udivslot);
} else {
udivslot = udivslot_table[div & 15];
- dbg("udivslot = %04x (div %d)\n", udivslot, div & 15);
+ dev_dbg(port->dev, "udivslot = %04x (div %d)\n",
+ udivslot, div & 15);
}
}
switch (termios->c_cflag & CSIZE) {
case CS5:
- dbg("config: 5bits/char\n");
+ dev_dbg(port->dev, "config: 5bits/char\n");
ulcon = S3C2410_LCON_CS5;
break;
case CS6:
- dbg("config: 6bits/char\n");
+ dev_dbg(port->dev, "config: 6bits/char\n");
ulcon = S3C2410_LCON_CS6;
break;
case CS7:
- dbg("config: 7bits/char\n");
+ dev_dbg(port->dev, "config: 7bits/char\n");
ulcon = S3C2410_LCON_CS7;
break;
case CS8:
default:
- dbg("config: 8bits/char\n");
+ dev_dbg(port->dev, "config: 8bits/char\n");
ulcon = S3C2410_LCON_CS8;
break;
}
@@ -1359,8 +1454,9 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
spin_lock_irqsave(&port->lock, flags);
- dbg("setting ulcon to %08x, brddiv to %d, udivslot %08x\n",
- ulcon, quot, udivslot);
+ dev_dbg(port->dev,
+ "setting ulcon to %08x, brddiv to %d, udivslot %08x\n",
+ ulcon, quot, udivslot);
wr_regl(port, S3C2410_ULCON, ulcon);
wr_regl(port, S3C2410_UBRDIV, quot);
@@ -1381,10 +1477,11 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
if (ourport->info->has_divslot)
wr_regl(port, S3C2443_DIVSLOT, udivslot);
- dbg("uart: ulcon = 0x%08x, ucon = 0x%08x, ufcon = 0x%08x\n",
- rd_regl(port, S3C2410_ULCON),
- rd_regl(port, S3C2410_UCON),
- rd_regl(port, S3C2410_UFCON));
+ dev_dbg(port->dev,
+ "uart: ulcon = 0x%08x, ucon = 0x%08x, ufcon = 0x%08x\n",
+ rd_regl(port, S3C2410_ULCON),
+ rd_regl(port, S3C2410_UCON),
+ rd_regl(port, S3C2410_UFCON));
/*
* Update the per-port timeout.
@@ -1442,6 +1539,7 @@ static void s3c24xx_serial_release_port(struct uart_port *port)
static int s3c24xx_serial_request_port(struct uart_port *port)
{
const char *name = s3c24xx_serial_portname(port);
+
return request_mem_region(port->mapbase, MAP_SIZE, name) ? 0 : -EBUSY;
}
@@ -1583,7 +1681,7 @@ s3c24xx_serial_ports[CONFIG_SERIAL_SAMSUNG_UARTS] = {
/* s3c24xx_serial_resetport
*
* reset the fifos and other the settings.
-*/
+ */
static void s3c24xx_serial_resetport(struct uart_port *port,
struct s3c2410_uartcfg *cfg)
@@ -1637,7 +1735,8 @@ static int s3c24xx_serial_cpufreq_transition(struct notifier_block *nb,
if (val == CPUFREQ_PRECHANGE) {
/* we should really shut the port down whilst the
- * frequency change is in progress. */
+ * frequency change is in progress.
+ */
} else if (val == CPUFREQ_POSTCHANGE) {
struct ktermios *termios;
@@ -1743,8 +1842,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
struct resource *res;
int ret;
- dbg("s3c24xx_serial_init_port: port=%p, platdev=%p\n", port, platdev);
-
if (platdev == NULL)
return -ENODEV;
@@ -1761,7 +1858,7 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
port->uartclk = 1;
if (cfg->uart_flags & UPF_CONS_FLOW) {
- dbg("s3c24xx_serial_init_port: enabling flow control\n");
+ dev_dbg(port->dev, "enabling flow control\n");
port->flags |= UPF_CONS_FLOW;
}
@@ -1773,7 +1870,7 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
return -EINVAL;
}
- dbg("resource %pR)\n", res);
+ dev_dbg(port->dev, "resource %pR)\n", res);
port->membase = devm_ioremap(port->dev, res->start, resource_size(res));
if (!port->membase) {
@@ -1835,9 +1932,9 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
wr_regl(port, S3C64XX_UINTSP, 0xf);
}
- dbg("port: map=%pa, mem=%p, irq=%d (%d,%d), clock=%u\n",
- &port->mapbase, port->membase, port->irq,
- ourport->rx_irq, ourport->tx_irq, port->uartclk);
+ dev_dbg(port->dev, "port: map=%pa, mem=%p, irq=%d (%d,%d), clock=%u\n",
+ &port->mapbase, port->membase, port->irq,
+ ourport->rx_irq, ourport->tx_irq, port->uartclk);
/* reset the fifos (and setup the uart) */
s3c24xx_serial_resetport(port, cfg);
@@ -1851,7 +1948,10 @@ err:
/* Device driver serial port probe */
+#ifdef CONFIG_OF
static const struct of_device_id s3c24xx_uart_dt_match[];
+#endif
+
static int probe_index;
static inline struct s3c24xx_serial_drv_data *s3c24xx_get_driver_data(
@@ -1860,6 +1960,7 @@ static inline struct s3c24xx_serial_drv_data *s3c24xx_get_driver_data(
#ifdef CONFIG_OF
if (pdev->dev.of_node) {
const struct of_device_id *match;
+
match = of_match_node(s3c24xx_uart_dt_match, pdev->dev.of_node);
return (struct s3c24xx_serial_drv_data *)match->data;
}
@@ -1881,8 +1982,6 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
index = ret;
}
- dbg("s3c24xx_serial_probe(%p) %d\n", pdev, index);
-
if (index >= ARRAY_SIZE(s3c24xx_serial_ports)) {
dev_err(&pdev->dev, "serial%d out of range\n", index);
return -EINVAL;
@@ -1909,6 +2008,7 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
ourport->port.fifosize = ourport->drv_data->fifosize[index];
else if (ourport->info->fifosize)
ourport->port.fifosize = ourport->info->fifosize;
+ ourport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_SAMSUNG_CONSOLE);
/*
* DMA transfers must be aligned at least to cache line size,
@@ -1917,7 +2017,7 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
ourport->min_dma_size = max_t(int, ourport->port.fifosize,
dma_get_cache_alignment());
- dbg("%s: initialising port %p...\n", __func__, ourport);
+ dev_dbg(&pdev->dev, "%s: initialising port %p...\n", __func__, ourport);
ret = s3c24xx_serial_init_port(ourport, pdev);
if (ret < 0)
@@ -1931,7 +2031,7 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
}
}
- dbg("%s: adding port\n", __func__);
+ dev_dbg(&pdev->dev, "%s: adding port\n", __func__);
uart_add_one_port(&s3c24xx_uart_drv, &ourport->port);
platform_set_drvdata(pdev, &ourport->port);
@@ -2008,9 +2108,10 @@ static int s3c24xx_serial_resume_noirq(struct device *dev)
/* restore IRQ mask */
if (s3c24xx_serial_has_interrupt_mask(port)) {
unsigned int uintm = 0xf;
- if (tx_enabled(port))
+
+ if (ourport->tx_enabled)
uintm &= ~S3C64XX_UINTM_TXD_MSK;
- if (rx_enabled(port))
+ if (ourport->rx_enabled)
uintm &= ~S3C64XX_UINTM_RXD_MSK;
clk_prepare_enable(ourport->clk);
if (!IS_ERR(ourport->baudclk))
@@ -2143,10 +2244,6 @@ s3c24xx_serial_get_options(struct uart_port *port, int *baud,
ucon = rd_regl(port, S3C2410_UCON);
ubrdiv = rd_regl(port, S3C2410_UBRDIV);
- dbg("s3c24xx_serial_get_options: port=%p\n"
- "registers: ulcon=%08x, ucon=%08x, ubdriv=%08x\n",
- port, ulcon, ucon, ubrdiv);
-
if (s3c24xx_port_configured(ucon)) {
switch (ulcon & S3C2410_LCON_CSMASK) {
case S3C2410_LCON_CS5:
@@ -2190,7 +2287,7 @@ s3c24xx_serial_get_options(struct uart_port *port, int *baud,
rate = 1;
*baud = rate / (16 * (ubrdiv + 1));
- dbg("calculated baud %d\n", *baud);
+ dev_dbg(port->dev, "calculated baud %d\n", *baud);
}
}
@@ -2204,9 +2301,6 @@ s3c24xx_serial_console_setup(struct console *co, char *options)
int parity = 'n';
int flow = 'n';
- dbg("s3c24xx_serial_console_setup: co=%p (%d), %s\n",
- co, co->index, options);
-
/* is this a valid port */
if (co->index == -1 || co->index >= CONFIG_SERIAL_SAMSUNG_UARTS)
@@ -2221,8 +2315,6 @@ s3c24xx_serial_console_setup(struct console *co, char *options)
cons_uart = port;
- dbg("s3c24xx_serial_console_setup: port=%p (%d)\n", port, co->index);
-
/*
* Check whether an invalid uart number has been specified, and
* if so, search for the first available port that does have
@@ -2233,7 +2325,7 @@ s3c24xx_serial_console_setup(struct console *co, char *options)
else
s3c24xx_serial_get_options(port, &baud, &parity, &bits);
- dbg("s3c24xx_serial_console_setup: baud %d\n", baud);
+ dev_dbg(port->dev, "baud %d\n", baud);
return uart_set_options(port, co, baud, parity, bits, flow);
}
@@ -2523,7 +2615,8 @@ static void samsung_early_putc(struct uart_port *port, int c)
writeb(c, port->membase + S3C2410_UTXH);
}
-static void samsung_early_write(struct console *con, const char *s, unsigned n)
+static void samsung_early_write(struct console *con, const char *s,
+ unsigned int n)
{
struct earlycon_device *dev = con->data;
@@ -2572,7 +2665,7 @@ OF_EARLYCON_DECLARE(s3c2440, "samsung,s3c2440-uart",
OF_EARLYCON_DECLARE(s3c6400, "samsung,s3c6400-uart",
s3c2440_early_console_setup);
-/* S5PV210, EXYNOS */
+/* S5PV210, Exynos */
static struct samsung_early_console_data s5pv210_early_console_data = {
.txfull_mask = S5PV210_UFSTAT_TXFULL,
};
diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
index 329aced26bd8..bd5e7e9938ce 100644
--- a/drivers/tty/serial/sb1250-duart.c
+++ b/drivers/tty/serial/sb1250-duart.c
@@ -15,10 +15,6 @@
* "BCM1250/BCM1125/BCM1125H User Manual", Broadcom Corporation
*/
-#if defined(CONFIG_SERIAL_SB1250_DUART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/compiler.h>
#include <linux/console.h>
#include <linux/delay.h>
@@ -668,7 +664,7 @@ static int sbd_map_port(struct uart_port *uport)
struct sbd_duart *duart = sport->duart;
if (!uport->membase)
- uport->membase = ioremap_nocache(uport->mapbase,
+ uport->membase = ioremap(uport->mapbase,
DUART_CHANREG_SPACING);
if (!uport->membase) {
printk(err);
@@ -676,7 +672,7 @@ static int sbd_map_port(struct uart_port *uport)
}
if (!sport->memctrl)
- sport->memctrl = ioremap_nocache(duart->mapctrl,
+ sport->memctrl = ioremap(duart->mapctrl,
DUART_CHANREG_SPACING);
if (!sport->memctrl) {
printk(err);
@@ -813,6 +809,7 @@ static void __init sbd_probe_duarts(void)
uport->ops = &sbd_ops;
uport->line = line;
uport->mapbase = SBD_CHANREGS(line);
+ uport->has_sysrq = IS_ENABLED(CONFIG_SERIAL_SB1250_DUART_CONSOLE);
}
}
}
diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
index d2b77aae42ae..10cc16a71f26 100644
--- a/drivers/tty/serial/sccnxp.c
+++ b/drivers/tty/serial/sccnxp.c
@@ -7,10 +7,6 @@
* Based on sc26xx.c, by Thomas Bogendörfer (tsbogend@alpha.franken.de)
*/
-#if defined(CONFIG_SERIAL_SCCNXP_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
@@ -1000,6 +996,7 @@ static int sccnxp_probe(struct platform_device *pdev)
s->port[i].regshift = s->pdata.reg_shift;
s->port[i].uartclk = uartclk;
s->port[i].ops = &sccnxp_ops;
+ s->port[i].has_sysrq = IS_ENABLED(CONFIG_SERIAL_SCCNXP_CONSOLE);
uart_add_one_port(&s->uart, &s->port[i]);
/* Set direction to input */
if (s->chip->flags & SCCNXP_HAVE_IO)
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index b6ace6290e23..33034b852a51 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -141,6 +141,7 @@ struct tegra_uart_port {
int configured_rate;
bool use_rx_pio;
bool use_tx_pio;
+ bool rx_dma_active;
};
static void tegra_uart_start_next_tx(struct tegra_uart_port *tup);
@@ -533,11 +534,12 @@ static int tegra_uart_start_tx_dma(struct tegra_uart_port *tup,
struct circ_buf *xmit = &tup->uport.state->xmit;
dma_addr_t tx_phys_addr;
- dma_sync_single_for_device(tup->uport.dev, tup->tx_dma_buf_phys,
- UART_XMIT_SIZE, DMA_TO_DEVICE);
-
tup->tx_bytes = count & ~(0xF);
tx_phys_addr = tup->tx_dma_buf_phys + xmit->tail;
+
+ dma_sync_single_for_device(tup->uport.dev, tx_phys_addr,
+ tup->tx_bytes, DMA_TO_DEVICE);
+
tup->tx_dma_desc = dmaengine_prep_slave_single(tup->tx_dma_chan,
tx_phys_addr, tup->tx_bytes, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT);
@@ -679,7 +681,7 @@ static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
return;
dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys,
- TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
+ count, DMA_FROM_DEVICE);
copied = tty_insert_flip_string(tty,
((unsigned char *)(tup->rx_dma_buf_virt)), count);
if (copied != count) {
@@ -687,7 +689,7 @@ static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
dev_err(tup->uport.dev, "RxData copy to tty layer failed\n");
}
dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
- TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_TO_DEVICE);
+ count, DMA_TO_DEVICE);
}
static void tegra_uart_rx_buffer_push(struct tegra_uart_port *tup,
@@ -731,6 +733,7 @@ static void tegra_uart_rx_dma_complete(void *args)
if (tup->rts_active)
set_rts(tup, false);
+ tup->rx_dma_active = false;
tegra_uart_rx_buffer_push(tup, 0);
tegra_uart_start_rx_dma(tup);
@@ -742,18 +745,27 @@ done:
spin_unlock_irqrestore(&u->lock, flags);
}
-static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
+static void tegra_uart_terminate_rx_dma(struct tegra_uart_port *tup)
{
struct dma_tx_state state;
- /* Deactivate flow control to stop sender */
- if (tup->rts_active)
- set_rts(tup, false);
+ if (!tup->rx_dma_active)
+ return;
dmaengine_terminate_all(tup->rx_dma_chan);
dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
+
tegra_uart_rx_buffer_push(tup, state.residue);
- tegra_uart_start_rx_dma(tup);
+ tup->rx_dma_active = false;
+}
+
+static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
+{
+ /* Deactivate flow control to stop sender */
+ if (tup->rts_active)
+ set_rts(tup, false);
+
+ tegra_uart_terminate_rx_dma(tup);
if (tup->rts_active)
set_rts(tup, true);
@@ -763,6 +775,9 @@ static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup)
{
unsigned int count = TEGRA_UART_RX_DMA_BUFFER_SIZE;
+ if (tup->rx_dma_active)
+ return 0;
+
tup->rx_dma_desc = dmaengine_prep_slave_single(tup->rx_dma_chan,
tup->rx_dma_buf_phys, count, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT);
@@ -771,10 +786,9 @@ static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup)
return -EIO;
}
+ tup->rx_dma_active = true;
tup->rx_dma_desc->callback = tegra_uart_rx_dma_complete;
tup->rx_dma_desc->callback_param = tup;
- dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
- count, DMA_TO_DEVICE);
tup->rx_bytes_requested = count;
tup->rx_cookie = dmaengine_submit(tup->rx_dma_desc);
dma_async_issue_pending(tup->rx_dma_chan);
@@ -820,6 +834,7 @@ static irqreturn_t tegra_uart_isr(int irq, void *data)
struct uart_port *u = &tup->uport;
unsigned long iir;
unsigned long ier;
+ bool is_rx_start = false;
bool is_rx_int = false;
unsigned long flags;
@@ -832,10 +847,12 @@ static irqreturn_t tegra_uart_isr(int irq, void *data)
if (tup->rx_in_progress) {
ier = tup->ier_shadow;
ier |= (UART_IER_RLSI | UART_IER_RTOIE |
- TEGRA_UART_IER_EORD);
+ TEGRA_UART_IER_EORD | UART_IER_RDI);
tup->ier_shadow = ier;
tegra_uart_write(tup, ier, UART_IER);
}
+ } else if (is_rx_start) {
+ tegra_uart_start_rx_dma(tup);
}
spin_unlock_irqrestore(&u->lock, flags);
return IRQ_HANDLED;
@@ -854,17 +871,23 @@ static irqreturn_t tegra_uart_isr(int irq, void *data)
case 4: /* End of data */
case 6: /* Rx timeout */
- case 2: /* Receive */
- if (!tup->use_rx_pio && !is_rx_int) {
- is_rx_int = true;
+ if (!tup->use_rx_pio) {
+ is_rx_int = tup->rx_in_progress;
/* Disable Rx interrupts */
ier = tup->ier_shadow;
- ier |= UART_IER_RDI;
- tegra_uart_write(tup, ier, UART_IER);
ier &= ~(UART_IER_RDI | UART_IER_RLSI |
UART_IER_RTOIE | TEGRA_UART_IER_EORD);
tup->ier_shadow = ier;
tegra_uart_write(tup, ier, UART_IER);
+ break;
+ }
+ /* Fall through */
+ case 2: /* Receive */
+ if (!tup->use_rx_pio) {
+ is_rx_start = tup->rx_in_progress;
+ tup->ier_shadow &= ~UART_IER_RDI;
+ tegra_uart_write(tup, tup->ier_shadow,
+ UART_IER);
} else {
do_handle_rx_pio(tup);
}
@@ -886,7 +909,6 @@ static void tegra_uart_stop_rx(struct uart_port *u)
{
struct tegra_uart_port *tup = to_tegra_uport(u);
struct tty_port *port = &tup->uport.state->port;
- struct dma_tx_state state;
unsigned long ier;
if (tup->rts_active)
@@ -903,13 +925,11 @@ static void tegra_uart_stop_rx(struct uart_port *u)
tup->ier_shadow = ier;
tegra_uart_write(tup, ier, UART_IER);
tup->rx_in_progress = 0;
- if (tup->rx_dma_chan && !tup->use_rx_pio) {
- dmaengine_terminate_all(tup->rx_dma_chan);
- dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
- tegra_uart_rx_buffer_push(tup, state.residue);
- } else {
+
+ if (!tup->use_rx_pio)
+ tegra_uart_terminate_rx_dma(tup);
+ else
tegra_uart_handle_rx_pio(tup, port);
- }
}
static void tegra_uart_hw_deinit(struct tegra_uart_port *tup)
@@ -1052,12 +1072,6 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
tup->lcr_shadow = TEGRA_UART_DEFAULT_LSR;
tup->fcr_shadow |= UART_FCR_DMA_SELECT;
tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
-
- ret = tegra_uart_start_rx_dma(tup);
- if (ret < 0) {
- dev_err(tup->uport.dev, "Not able to start Rx DMA\n");
- return ret;
- }
} else {
tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
}
@@ -1067,10 +1081,6 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
* Enable IE_RXS for the receive status interrupts like line errros.
* Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd.
*
- * If using DMA mode, enable EORD instead of receive interrupt which
- * will interrupt after the UART is done with the receive instead of
- * the interrupt when the FIFO "threshold" is reached.
- *
* EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when
* the DATA is sitting in the FIFO and couldn't be transferred to the
* DMA as the DMA size alignment (4 bytes) is not met. EORD will be
@@ -1081,11 +1091,14 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
* both the EORD as well as RX_TIMEOUT - SW sees RX_TIMEOUT first
* then the EORD.
*/
+ tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE | UART_IER_RDI;
+
+ /*
+ * If using DMA mode, enable EORD interrupt to notify about RX
+ * completion.
+ */
if (!tup->use_rx_pio)
- tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE |
- TEGRA_UART_IER_EORD;
- else
- tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE | UART_IER_RDI;
+ tup->ier_shadow |= TEGRA_UART_IER_EORD;
tegra_uart_write(tup, tup->ier_shadow, UART_IER);
return 0;
@@ -1140,6 +1153,9 @@ static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
dma_release_channel(dma_chan);
return -ENOMEM;
}
+ dma_sync_single_for_device(tup->uport.dev, dma_phys,
+ TEGRA_UART_RX_DMA_BUFFER_SIZE,
+ DMA_TO_DEVICE);
dma_sconfig.src_addr = tup->uport.mapbase;
dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
dma_sconfig.src_maxburst = tup->cdata->max_dma_burst_bytes;
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index b0a6eb106edb..76e506ee335c 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -2603,6 +2603,7 @@ struct tty_driver *uart_console_device(struct console *co, int *index)
*index = co->index;
return p->tty_driver;
}
+EXPORT_SYMBOL_GPL(uart_console_device);
static ssize_t uart_get_attr_uartclk(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -2834,6 +2835,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
if (uport->cons && uport->dev)
of_console_check(uport->dev->of_node, uport->cons->name, uport->line);
+ tty_port_link_device(port, drv->tty_driver, uport->line);
uart_configure_port(drv, state, uport);
port->console = uart_console(uport);
@@ -3080,6 +3082,89 @@ void uart_insert_char(struct uart_port *port, unsigned int status,
}
EXPORT_SYMBOL_GPL(uart_insert_char);
+int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
+{
+ if (!IS_ENABLED(CONFIG_MAGIC_SYSRQ_SERIAL))
+ return 0;
+
+ if (!port->has_sysrq || !port->sysrq)
+ return 0;
+
+ if (ch && time_before(jiffies, port->sysrq)) {
+ handle_sysrq(ch);
+ port->sysrq = 0;
+ return 1;
+ }
+ port->sysrq = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(uart_handle_sysrq_char);
+
+int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch)
+{
+ if (!IS_ENABLED(CONFIG_MAGIC_SYSRQ_SERIAL))
+ return 0;
+
+ if (!port->has_sysrq || !port->sysrq)
+ return 0;
+
+ if (ch && time_before(jiffies, port->sysrq)) {
+ port->sysrq_ch = ch;
+ port->sysrq = 0;
+ return 1;
+ }
+ port->sysrq = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(uart_prepare_sysrq_char);
+
+void uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long irqflags)
+{
+ int sysrq_ch;
+
+ if (!port->has_sysrq) {
+ spin_unlock_irqrestore(&port->lock, irqflags);
+ return;
+ }
+
+ sysrq_ch = port->sysrq_ch;
+ port->sysrq_ch = 0;
+
+ spin_unlock_irqrestore(&port->lock, irqflags);
+
+ if (sysrq_ch)
+ handle_sysrq(sysrq_ch);
+}
+EXPORT_SYMBOL_GPL(uart_unlock_and_check_sysrq);
+
+/*
+ * We do the SysRQ and SAK checking like this...
+ */
+int uart_handle_break(struct uart_port *port)
+{
+ struct uart_state *state = port->state;
+
+ if (port->handle_break)
+ port->handle_break(port);
+
+ if (port->has_sysrq) {
+ if (port->cons && port->cons->index == port->line) {
+ if (!port->sysrq) {
+ port->sysrq = jiffies + HZ*5;
+ return 1;
+ }
+ port->sysrq = 0;
+ }
+ }
+
+ if (port->flags & UPF_SAK)
+ do_SAK(state->port.tty);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(uart_handle_break);
+
EXPORT_SYMBOL(uart_write_wakeup);
EXPORT_SYMBOL(uart_register_driver);
EXPORT_SYMBOL(uart_unregister_driver);
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index d22ccb32aa9b..b4d89e31730e 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -12,10 +12,6 @@
* Serial driver for TX3927/TX4927/TX4925/TX4938 internal SIO controller
*/
-#if defined(CONFIG_SERIAL_TXX9_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
@@ -1095,6 +1091,7 @@ static int serial_txx9_probe(struct platform_device *dev)
port.flags = p->flags;
port.mapbase = p->mapbase;
port.dev = &dev->dev;
+ port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_TXX9_CONSOLE);
ret = serial_txx9_register_port(&port);
if (ret < 0) {
dev_err(&dev->dev, "unable to register port at index %d "
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 58bf9d496ba5..c073aa7001c4 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -15,10 +15,6 @@
* Modified to support SH7300 SCIF. Takashi Kusuda (Jun 2003).
* Removed SH7300 support (Jul 2007).
*/
-#if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#undef DEBUG
#include <linux/clk.h>
@@ -2680,7 +2676,7 @@ static int sci_remap_port(struct uart_port *port)
return 0;
if (port->dev->of_node || (port->flags & UPF_IOREMAP)) {
- port->membase = ioremap_nocache(port->mapbase, sport->reg_size);
+ port->membase = ioremap(port->mapbase, sport->reg_size);
if (unlikely(!port->membase)) {
dev_err(port->dev, "can't remap port#%d\n", port->line);
return -ENXIO;
@@ -2887,6 +2883,7 @@ static int sci_init_single(struct platform_device *dev,
port->ops = &sci_uart_ops;
port->iotype = UPIO_MEM;
port->line = index;
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_SH_SCI_CONSOLE);
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (res == NULL)
@@ -3015,12 +3012,9 @@ static void serial_console_write(struct console *co, const char *s,
unsigned long flags;
int locked = 1;
-#if defined(SUPPORT_SYSRQ)
if (port->sysrq)
locked = 0;
- else
-#endif
- if (oops_in_progress)
+ else if (oops_in_progress)
locked = spin_trylock_irqsave(&port->lock, flags);
else
spin_lock_irqsave(&port->lock, flags);
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
index 31df23502562..3d3c70634589 100644
--- a/drivers/tty/serial/sprd_serial.c
+++ b/drivers/tty/serial/sprd_serial.c
@@ -3,10 +3,6 @@
* Copyright (C) 2012-2015 Spreadtrum Communications Inc.
*/
-#if defined(CONFIG_SERIAL_SPRD_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/delay.h>
@@ -679,6 +675,9 @@ static irqreturn_t sprd_handle_irq(int irq, void *dev_id)
if (ims & SPRD_IMSR_TIMEOUT)
serial_out(port, SPRD_ICLR, SPRD_ICLR_TIMEOUT);
+ if (ims & SPRD_IMSR_BREAK_DETECT)
+ serial_out(port, SPRD_ICLR, SPRD_IMSR_BREAK_DETECT);
+
if (ims & (SPRD_IMSR_RX_FIFO_FULL | SPRD_IMSR_BREAK_DETECT |
SPRD_IMSR_TIMEOUT))
sprd_rx(port);
@@ -1227,6 +1226,7 @@ static int sprd_probe(struct platform_device *pdev)
up->fifosize = SPRD_FIFO_SIZE;
up->ops = &serial_sprd_ops;
up->flags = UPF_BOOT_AUTOCONF;
+ up->has_sysrq = IS_ENABLED(CONFIG_SERIAL_SPRD_CONSOLE);
ret = sprd_clk_init(up);
if (ret)
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index 7971997cdead..e7048515a79c 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -5,10 +5,6 @@
* Copyright (C) 2003-2013 STMicroelectronics (R&D) Limited
*/
-#if defined(CONFIG_SERIAL_ST_ASC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/module.h>
#include <linux/serial.h>
#include <linux/console.h>
@@ -508,7 +504,6 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
struct asc_port *ascport = to_asc_port(port);
- struct device_node *np = port->dev->of_node;
struct gpio_desc *gpiod;
unsigned int baud;
u32 ctrl_val;
@@ -570,13 +565,12 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
pinctrl_select_state(ascport->pinctrl,
ascport->states[NO_HW_FLOWCTRL]);
- gpiod = devm_fwnode_get_gpiod_from_child(port->dev,
- "rts",
- &np->fwnode,
- GPIOD_OUT_LOW,
- np->name);
- if (!IS_ERR(gpiod))
+ gpiod = devm_gpiod_get(port->dev, "rts", GPIOD_OUT_LOW);
+ if (!IS_ERR(gpiod)) {
+ gpiod_set_consumer_name(gpiod,
+ port->dev->of_node->name);
ascport->rts = gpiod;
+ }
}
}
@@ -730,6 +724,7 @@ static int asc_init_port(struct asc_port *ascport,
port->fifosize = ASC_FIFO_SIZE;
port->dev = &pdev->dev;
port->irq = platform_get_irq(pdev, 0);
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_ST_ASC_CONSOLE);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
port->membase = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 2f72514d63ed..5e93e8d40f59 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -8,10 +8,6 @@
* Inspired by st-asc.c from STMicroelectronics (c)
*/
-#if defined(CONFIG_SERIAL_STM32_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/delay.h>
@@ -926,6 +922,7 @@ static int stm32_init_port(struct stm32_port *stm32port,
port->ops = &stm32_uart_ops;
port->dev = &pdev->dev;
port->fifosize = stm32port->info->cfg.fifosize;
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_STM32_CONSOLE);
ret = platform_get_irq(pdev, 0);
if (ret <= 0)
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index f8503f8fc44e..eafada8fb6fa 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -25,10 +25,6 @@
#include <asm/irq.h>
#include <asm/setup.h>
-#if defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/serial_core.h>
#include <linux/sunserialcore.h>
@@ -552,6 +548,7 @@ static int hv_probe(struct platform_device *op)
sunhv_port = port;
+ port->has_sysrq = 1;
port->line = 0;
port->ops = &sunhv_pops;
port->type = PORT_SUNHV;
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
index 72131b5e132e..1eb703c980e0 100644
--- a/drivers/tty/serial/sunsab.c
+++ b/drivers/tty/serial/sunsab.c
@@ -40,10 +40,6 @@
#include <asm/prom.h>
#include <asm/setup.h>
-#if defined(CONFIG_SERIAL_SUNSAB_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/serial_core.h>
#include <linux/sunserialcore.h>
@@ -985,6 +981,7 @@ static int sunsab_init_one(struct uart_sunsab_port *up,
up->port.fifosize = SAB82532_XMIT_FIFO_SIZE;
up->port.iotype = UPIO_MEM;
+ up->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_SUNSAB_CONSOLE);
writeb(SAB82532_IPC_IC_ACT_LOW, &up->regs->w.ipc);
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index 4db6aaa330b2..8ce9a7a256e5 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -44,10 +44,6 @@
#include <asm/prom.h>
#include <asm/setup.h>
-#if defined(CONFIG_SERIAL_SUNSU_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/serial_core.h>
#include <linux/sunserialcore.h>
@@ -1475,6 +1471,7 @@ static int su_probe(struct platform_device *op)
up->port.type = PORT_UNKNOWN;
up->port.uartclk = (SU_BASE_BAUD * 16);
+ up->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_SUNSU_CONSOLE);
err = 0;
if (up->su_type == SU_PORT_KBD || up->su_type == SU_PORT_MS) {
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
index bc7af8b08a72..103ab8c556e7 100644
--- a/drivers/tty/serial/sunzilog.c
+++ b/drivers/tty/serial/sunzilog.c
@@ -40,10 +40,6 @@
#include <asm/prom.h>
#include <asm/setup.h>
-#if defined(CONFIG_SERIAL_SUNZILOG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/serial_core.h>
#include <linux/sunserialcore.h>
@@ -1444,6 +1440,7 @@ static int zs_probe(struct platform_device *op)
up[0].port.line = (inst * 2) + 0;
up[0].port.dev = &op->dev;
up[0].flags |= SUNZILOG_FLAG_IS_CHANNEL_A;
+ up[0].port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_SUNZILOG_CONSOLE);
if (keyboard_mouse)
up[0].flags |= SUNZILOG_FLAG_CONS_KEYB;
sunzilog_init_hw(&up[0]);
@@ -1461,6 +1458,7 @@ static int zs_probe(struct platform_device *op)
up[1].port.line = (inst * 2) + 1;
up[1].port.dev = &op->dev;
up[1].flags |= 0;
+ up[1].port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_SUNZILOG_CONSOLE);
if (keyboard_mouse)
up[1].flags |= SUNZILOG_FLAG_CONS_MOUSE;
sunzilog_init_hw(&up[1]);
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index a0555ae2b1ef..3c8c662c69e2 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -32,7 +32,11 @@
#include <soc/fsl/qe/ucc_slow.h>
#include <linux/firmware.h>
-#include <asm/reg.h>
+#include <soc/fsl/cpm.h>
+
+#ifdef CONFIG_PPC32
+#include <asm/reg.h> /* mfspr, SPRN_SVR */
+#endif
/*
* The GUMR flag for Soft UART. This would normally be defined in qe.h,
@@ -257,11 +261,11 @@ static unsigned int qe_uart_tx_empty(struct uart_port *port)
struct qe_bd *bdp = qe_port->tx_bd_base;
while (1) {
- if (in_be16(&bdp->status) & BD_SC_READY)
+ if (qe_ioread16be(&bdp->status) & BD_SC_READY)
/* This BD is not done, so return "not done" */
return 0;
- if (in_be16(&bdp->status) & BD_SC_WRAP)
+ if (qe_ioread16be(&bdp->status) & BD_SC_WRAP)
/*
* This BD is done and it's the last one, so return
* "done"
@@ -307,7 +311,7 @@ static void qe_uart_stop_tx(struct uart_port *port)
struct uart_qe_port *qe_port =
container_of(port, struct uart_qe_port, port);
- clrbits16(&qe_port->uccp->uccm, UCC_UART_UCCE_TX);
+ qe_clrbits_be16(&qe_port->uccp->uccm, UCC_UART_UCCE_TX);
}
/*
@@ -332,20 +336,18 @@ static int qe_uart_tx_pump(struct uart_qe_port *qe_port)
struct uart_port *port = &qe_port->port;
struct circ_buf *xmit = &port->state->xmit;
- bdp = qe_port->rx_cur;
-
/* Handle xon/xoff */
if (port->x_char) {
/* Pick next descriptor and fill from buffer */
bdp = qe_port->tx_cur;
- p = qe2cpu_addr(bdp->buf, qe_port);
+ p = qe2cpu_addr(be32_to_cpu(bdp->buf), qe_port);
*p++ = port->x_char;
- out_be16(&bdp->length, 1);
- setbits16(&bdp->status, BD_SC_READY);
+ qe_iowrite16be(1, &bdp->length);
+ qe_setbits_be16(&bdp->status, BD_SC_READY);
/* Get next BD. */
- if (in_be16(&bdp->status) & BD_SC_WRAP)
+ if (qe_ioread16be(&bdp->status) & BD_SC_WRAP)
bdp = qe_port->tx_bd_base;
else
bdp++;
@@ -364,10 +366,10 @@ static int qe_uart_tx_pump(struct uart_qe_port *qe_port)
/* Pick next descriptor and fill from buffer */
bdp = qe_port->tx_cur;
- while (!(in_be16(&bdp->status) & BD_SC_READY) &&
+ while (!(qe_ioread16be(&bdp->status) & BD_SC_READY) &&
(xmit->tail != xmit->head)) {
count = 0;
- p = qe2cpu_addr(bdp->buf, qe_port);
+ p = qe2cpu_addr(be32_to_cpu(bdp->buf), qe_port);
while (count < qe_port->tx_fifosize) {
*p++ = xmit->buf[xmit->tail];
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
@@ -377,11 +379,11 @@ static int qe_uart_tx_pump(struct uart_qe_port *qe_port)
break;
}
- out_be16(&bdp->length, count);
- setbits16(&bdp->status, BD_SC_READY);
+ qe_iowrite16be(count, &bdp->length);
+ qe_setbits_be16(&bdp->status, BD_SC_READY);
/* Get next BD. */
- if (in_be16(&bdp->status) & BD_SC_WRAP)
+ if (qe_ioread16be(&bdp->status) & BD_SC_WRAP)
bdp = qe_port->tx_bd_base;
else
bdp++;
@@ -414,12 +416,12 @@ static void qe_uart_start_tx(struct uart_port *port)
container_of(port, struct uart_qe_port, port);
/* If we currently are transmitting, then just return */
- if (in_be16(&qe_port->uccp->uccm) & UCC_UART_UCCE_TX)
+ if (qe_ioread16be(&qe_port->uccp->uccm) & UCC_UART_UCCE_TX)
return;
/* Otherwise, pump the port and start transmission */
if (qe_uart_tx_pump(qe_port))
- setbits16(&qe_port->uccp->uccm, UCC_UART_UCCE_TX);
+ qe_setbits_be16(&qe_port->uccp->uccm, UCC_UART_UCCE_TX);
}
/*
@@ -430,7 +432,7 @@ static void qe_uart_stop_rx(struct uart_port *port)
struct uart_qe_port *qe_port =
container_of(port, struct uart_qe_port, port);
- clrbits16(&qe_port->uccp->uccm, UCC_UART_UCCE_RX);
+ qe_clrbits_be16(&qe_port->uccp->uccm, UCC_UART_UCCE_RX);
}
/* Start or stop sending break signal
@@ -469,14 +471,14 @@ static void qe_uart_int_rx(struct uart_qe_port *qe_port)
*/
bdp = qe_port->rx_cur;
while (1) {
- status = in_be16(&bdp->status);
+ status = qe_ioread16be(&bdp->status);
/* If this one is empty, then we assume we've read them all */
if (status & BD_SC_EMPTY)
break;
/* get number of characters, and check space in RX buffer */
- i = in_be16(&bdp->length);
+ i = qe_ioread16be(&bdp->length);
/* If we don't have enough room in RX buffer for the entire BD,
* then we try later, which will be the next RX interrupt.
@@ -487,7 +489,7 @@ static void qe_uart_int_rx(struct uart_qe_port *qe_port)
}
/* get pointer */
- cp = qe2cpu_addr(bdp->buf, qe_port);
+ cp = qe2cpu_addr(be32_to_cpu(bdp->buf), qe_port);
/* loop through the buffer */
while (i-- > 0) {
@@ -507,9 +509,10 @@ error_return:
}
/* This BD is ready to be used again. Clear status. get next */
- clrsetbits_be16(&bdp->status, BD_SC_BR | BD_SC_FR | BD_SC_PR |
- BD_SC_OV | BD_SC_ID, BD_SC_EMPTY);
- if (in_be16(&bdp->status) & BD_SC_WRAP)
+ qe_clrsetbits_be16(&bdp->status,
+ BD_SC_BR | BD_SC_FR | BD_SC_PR | BD_SC_OV | BD_SC_ID,
+ BD_SC_EMPTY);
+ if (qe_ioread16be(&bdp->status) & BD_SC_WRAP)
bdp = qe_port->rx_bd_base;
else
bdp++;
@@ -551,9 +554,7 @@ handle_error:
/* Overrun does not affect the current character ! */
if (status & BD_SC_OV)
tty_insert_flip_char(tport, 0, TTY_OVERRUN);
-#ifdef SUPPORT_SYSRQ
port->sysrq = 0;
-#endif
goto error_return;
}
@@ -568,8 +569,8 @@ static irqreturn_t qe_uart_int(int irq, void *data)
u16 events;
/* Clear the interrupts */
- events = in_be16(&uccp->ucce);
- out_be16(&uccp->ucce, events);
+ events = qe_ioread16be(&uccp->ucce);
+ qe_iowrite16be(events, &uccp->ucce);
if (events & UCC_UART_UCCE_BRKE)
uart_handle_break(&qe_port->port);
@@ -600,17 +601,17 @@ static void qe_uart_initbd(struct uart_qe_port *qe_port)
bdp = qe_port->rx_bd_base;
qe_port->rx_cur = qe_port->rx_bd_base;
for (i = 0; i < (qe_port->rx_nrfifos - 1); i++) {
- out_be16(&bdp->status, BD_SC_EMPTY | BD_SC_INTRPT);
- out_be32(&bdp->buf, cpu2qe_addr(bd_virt, qe_port));
- out_be16(&bdp->length, 0);
+ qe_iowrite16be(BD_SC_EMPTY | BD_SC_INTRPT, &bdp->status);
+ qe_iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf);
+ qe_iowrite16be(0, &bdp->length);
bd_virt += qe_port->rx_fifosize;
bdp++;
}
/* */
- out_be16(&bdp->status, BD_SC_WRAP | BD_SC_EMPTY | BD_SC_INTRPT);
- out_be32(&bdp->buf, cpu2qe_addr(bd_virt, qe_port));
- out_be16(&bdp->length, 0);
+ qe_iowrite16be(BD_SC_WRAP | BD_SC_EMPTY | BD_SC_INTRPT, &bdp->status);
+ qe_iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf);
+ qe_iowrite16be(0, &bdp->length);
/* Set the physical address of the host memory
* buffers in the buffer descriptors, and the
@@ -621,21 +622,21 @@ static void qe_uart_initbd(struct uart_qe_port *qe_port)
qe_port->tx_cur = qe_port->tx_bd_base;
bdp = qe_port->tx_bd_base;
for (i = 0; i < (qe_port->tx_nrfifos - 1); i++) {
- out_be16(&bdp->status, BD_SC_INTRPT);
- out_be32(&bdp->buf, cpu2qe_addr(bd_virt, qe_port));
- out_be16(&bdp->length, 0);
+ qe_iowrite16be(BD_SC_INTRPT, &bdp->status);
+ qe_iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf);
+ qe_iowrite16be(0, &bdp->length);
bd_virt += qe_port->tx_fifosize;
bdp++;
}
/* Loopback requires the preamble bit to be set on the first TX BD */
#ifdef LOOPBACK
- setbits16(&qe_port->tx_cur->status, BD_SC_P);
+ qe_setbits_be16(&qe_port->tx_cur->status, BD_SC_P);
#endif
- out_be16(&bdp->status, BD_SC_WRAP | BD_SC_INTRPT);
- out_be32(&bdp->buf, cpu2qe_addr(bd_virt, qe_port));
- out_be16(&bdp->length, 0);
+ qe_iowrite16be(BD_SC_WRAP | BD_SC_INTRPT, &bdp->status);
+ qe_iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf);
+ qe_iowrite16be(0, &bdp->length);
}
/*
@@ -657,78 +658,74 @@ static void qe_uart_init_ucc(struct uart_qe_port *qe_port)
ucc_slow_disable(qe_port->us_private, COMM_DIR_RX_AND_TX);
/* Program the UCC UART parameter RAM */
- out_8(&uccup->common.rbmr, UCC_BMR_GBL | UCC_BMR_BO_BE);
- out_8(&uccup->common.tbmr, UCC_BMR_GBL | UCC_BMR_BO_BE);
- out_be16(&uccup->common.mrblr, qe_port->rx_fifosize);
- out_be16(&uccup->maxidl, 0x10);
- out_be16(&uccup->brkcr, 1);
- out_be16(&uccup->parec, 0);
- out_be16(&uccup->frmec, 0);
- out_be16(&uccup->nosec, 0);
- out_be16(&uccup->brkec, 0);
- out_be16(&uccup->uaddr[0], 0);
- out_be16(&uccup->uaddr[1], 0);
- out_be16(&uccup->toseq, 0);
+ qe_iowrite8(UCC_BMR_GBL | UCC_BMR_BO_BE, &uccup->common.rbmr);
+ qe_iowrite8(UCC_BMR_GBL | UCC_BMR_BO_BE, &uccup->common.tbmr);
+ qe_iowrite16be(qe_port->rx_fifosize, &uccup->common.mrblr);
+ qe_iowrite16be(0x10, &uccup->maxidl);
+ qe_iowrite16be(1, &uccup->brkcr);
+ qe_iowrite16be(0, &uccup->parec);
+ qe_iowrite16be(0, &uccup->frmec);
+ qe_iowrite16be(0, &uccup->nosec);
+ qe_iowrite16be(0, &uccup->brkec);
+ qe_iowrite16be(0, &uccup->uaddr[0]);
+ qe_iowrite16be(0, &uccup->uaddr[1]);
+ qe_iowrite16be(0, &uccup->toseq);
for (i = 0; i < 8; i++)
- out_be16(&uccup->cchars[i], 0xC000);
- out_be16(&uccup->rccm, 0xc0ff);
+ qe_iowrite16be(0xC000, &uccup->cchars[i]);
+ qe_iowrite16be(0xc0ff, &uccup->rccm);
/* Configure the GUMR registers for UART */
if (soft_uart) {
/* Soft-UART requires a 1X multiplier for TX */
- clrsetbits_be32(&uccp->gumr_l,
- UCC_SLOW_GUMR_L_MODE_MASK | UCC_SLOW_GUMR_L_TDCR_MASK |
- UCC_SLOW_GUMR_L_RDCR_MASK,
- UCC_SLOW_GUMR_L_MODE_UART | UCC_SLOW_GUMR_L_TDCR_1 |
- UCC_SLOW_GUMR_L_RDCR_16);
-
- clrsetbits_be32(&uccp->gumr_h, UCC_SLOW_GUMR_H_RFW,
- UCC_SLOW_GUMR_H_TRX | UCC_SLOW_GUMR_H_TTX);
+ qe_clrsetbits_be32(&uccp->gumr_l,
+ UCC_SLOW_GUMR_L_MODE_MASK | UCC_SLOW_GUMR_L_TDCR_MASK | UCC_SLOW_GUMR_L_RDCR_MASK,
+ UCC_SLOW_GUMR_L_MODE_UART | UCC_SLOW_GUMR_L_TDCR_1 | UCC_SLOW_GUMR_L_RDCR_16);
+
+ qe_clrsetbits_be32(&uccp->gumr_h, UCC_SLOW_GUMR_H_RFW,
+ UCC_SLOW_GUMR_H_TRX | UCC_SLOW_GUMR_H_TTX);
} else {
- clrsetbits_be32(&uccp->gumr_l,
- UCC_SLOW_GUMR_L_MODE_MASK | UCC_SLOW_GUMR_L_TDCR_MASK |
- UCC_SLOW_GUMR_L_RDCR_MASK,
- UCC_SLOW_GUMR_L_MODE_UART | UCC_SLOW_GUMR_L_TDCR_16 |
- UCC_SLOW_GUMR_L_RDCR_16);
-
- clrsetbits_be32(&uccp->gumr_h,
- UCC_SLOW_GUMR_H_TRX | UCC_SLOW_GUMR_H_TTX,
- UCC_SLOW_GUMR_H_RFW);
+ qe_clrsetbits_be32(&uccp->gumr_l,
+ UCC_SLOW_GUMR_L_MODE_MASK | UCC_SLOW_GUMR_L_TDCR_MASK | UCC_SLOW_GUMR_L_RDCR_MASK,
+ UCC_SLOW_GUMR_L_MODE_UART | UCC_SLOW_GUMR_L_TDCR_16 | UCC_SLOW_GUMR_L_RDCR_16);
+
+ qe_clrsetbits_be32(&uccp->gumr_h,
+ UCC_SLOW_GUMR_H_TRX | UCC_SLOW_GUMR_H_TTX,
+ UCC_SLOW_GUMR_H_RFW);
}
#ifdef LOOPBACK
- clrsetbits_be32(&uccp->gumr_l, UCC_SLOW_GUMR_L_DIAG_MASK,
- UCC_SLOW_GUMR_L_DIAG_LOOP);
- clrsetbits_be32(&uccp->gumr_h,
- UCC_SLOW_GUMR_H_CTSP | UCC_SLOW_GUMR_H_RSYN,
- UCC_SLOW_GUMR_H_CDS);
+ qe_clrsetbits_be32(&uccp->gumr_l, UCC_SLOW_GUMR_L_DIAG_MASK,
+ UCC_SLOW_GUMR_L_DIAG_LOOP);
+ qe_clrsetbits_be32(&uccp->gumr_h,
+ UCC_SLOW_GUMR_H_CTSP | UCC_SLOW_GUMR_H_RSYN,
+ UCC_SLOW_GUMR_H_CDS);
#endif
/* Disable rx interrupts and clear all pending events. */
- out_be16(&uccp->uccm, 0);
- out_be16(&uccp->ucce, 0xffff);
- out_be16(&uccp->udsr, 0x7e7e);
+ qe_iowrite16be(0, &uccp->uccm);
+ qe_iowrite16be(0xffff, &uccp->ucce);
+ qe_iowrite16be(0x7e7e, &uccp->udsr);
/* Initialize UPSMR */
- out_be16(&uccp->upsmr, 0);
+ qe_iowrite16be(0, &uccp->upsmr);
if (soft_uart) {
- out_be16(&uccup->supsmr, 0x30);
- out_be16(&uccup->res92, 0);
- out_be32(&uccup->rx_state, 0);
- out_be32(&uccup->rx_cnt, 0);
- out_8(&uccup->rx_bitmark, 0);
- out_8(&uccup->rx_length, 10);
- out_be32(&uccup->dump_ptr, 0x4000);
- out_8(&uccup->rx_temp_dlst_qe, 0);
- out_be32(&uccup->rx_frame_rem, 0);
- out_8(&uccup->rx_frame_rem_size, 0);
+ qe_iowrite16be(0x30, &uccup->supsmr);
+ qe_iowrite16be(0, &uccup->res92);
+ qe_iowrite32be(0, &uccup->rx_state);
+ qe_iowrite32be(0, &uccup->rx_cnt);
+ qe_iowrite8(0, &uccup->rx_bitmark);
+ qe_iowrite8(10, &uccup->rx_length);
+ qe_iowrite32be(0x4000, &uccup->dump_ptr);
+ qe_iowrite8(0, &uccup->rx_temp_dlst_qe);
+ qe_iowrite32be(0, &uccup->rx_frame_rem);
+ qe_iowrite8(0, &uccup->rx_frame_rem_size);
/* Soft-UART requires TX to be 1X */
- out_8(&uccup->tx_mode,
- UCC_UART_TX_STATE_UART | UCC_UART_TX_STATE_X1);
- out_be16(&uccup->tx_state, 0);
- out_8(&uccup->resD4, 0);
- out_be16(&uccup->resD5, 0);
+ qe_iowrite8(UCC_UART_TX_STATE_UART | UCC_UART_TX_STATE_X1,
+ &uccup->tx_mode);
+ qe_iowrite16be(0, &uccup->tx_state);
+ qe_iowrite8(0, &uccup->resD4);
+ qe_iowrite16be(0, &uccup->resD5);
/* Set UART mode.
* Enable receive and transmit.
@@ -742,22 +739,19 @@ static void qe_uart_init_ucc(struct uart_qe_port *qe_port)
* ...
* 6.Receiver must use 16x over sampling
*/
- clrsetbits_be32(&uccp->gumr_l,
- UCC_SLOW_GUMR_L_MODE_MASK | UCC_SLOW_GUMR_L_TDCR_MASK |
- UCC_SLOW_GUMR_L_RDCR_MASK,
- UCC_SLOW_GUMR_L_MODE_QMC | UCC_SLOW_GUMR_L_TDCR_16 |
- UCC_SLOW_GUMR_L_RDCR_16);
+ qe_clrsetbits_be32(&uccp->gumr_l,
+ UCC_SLOW_GUMR_L_MODE_MASK | UCC_SLOW_GUMR_L_TDCR_MASK | UCC_SLOW_GUMR_L_RDCR_MASK,
+ UCC_SLOW_GUMR_L_MODE_QMC | UCC_SLOW_GUMR_L_TDCR_16 | UCC_SLOW_GUMR_L_RDCR_16);
- clrsetbits_be32(&uccp->gumr_h,
- UCC_SLOW_GUMR_H_RFW | UCC_SLOW_GUMR_H_RSYN,
- UCC_SLOW_GUMR_H_SUART | UCC_SLOW_GUMR_H_TRX |
- UCC_SLOW_GUMR_H_TTX | UCC_SLOW_GUMR_H_TFL);
+ qe_clrsetbits_be32(&uccp->gumr_h,
+ UCC_SLOW_GUMR_H_RFW | UCC_SLOW_GUMR_H_RSYN,
+ UCC_SLOW_GUMR_H_SUART | UCC_SLOW_GUMR_H_TRX | UCC_SLOW_GUMR_H_TTX | UCC_SLOW_GUMR_H_TFL);
#ifdef LOOPBACK
- clrsetbits_be32(&uccp->gumr_l, UCC_SLOW_GUMR_L_DIAG_MASK,
- UCC_SLOW_GUMR_L_DIAG_LOOP);
- clrbits32(&uccp->gumr_h, UCC_SLOW_GUMR_H_CTSP |
- UCC_SLOW_GUMR_H_CDS);
+ qe_clrsetbits_be32(&uccp->gumr_l, UCC_SLOW_GUMR_L_DIAG_MASK,
+ UCC_SLOW_GUMR_L_DIAG_LOOP);
+ qe_clrbits_be32(&uccp->gumr_h,
+ UCC_SLOW_GUMR_H_CTSP | UCC_SLOW_GUMR_H_CDS);
#endif
cecr_subblock = ucc_slow_get_qe_cr_subblock(qe_port->ucc_num);
@@ -800,7 +794,7 @@ static int qe_uart_startup(struct uart_port *port)
}
/* Startup rx-int */
- setbits16(&qe_port->uccp->uccm, UCC_UART_UCCE_RX);
+ qe_setbits_be16(&qe_port->uccp->uccm, UCC_UART_UCCE_RX);
ucc_slow_enable(qe_port->us_private, COMM_DIR_RX_AND_TX);
return 0;
@@ -836,7 +830,7 @@ static void qe_uart_shutdown(struct uart_port *port)
/* Stop uarts */
ucc_slow_disable(qe_port->us_private, COMM_DIR_RX_AND_TX);
- clrbits16(&uccp->uccm, UCC_UART_UCCE_TX | UCC_UART_UCCE_RX);
+ qe_clrbits_be16(&uccp->uccm, UCC_UART_UCCE_TX | UCC_UART_UCCE_RX);
/* Shut them really down and reinit buffer descriptors */
ucc_slow_graceful_stop_tx(qe_port->us_private);
@@ -856,9 +850,9 @@ static void qe_uart_set_termios(struct uart_port *port,
struct ucc_slow __iomem *uccp = qe_port->uccp;
unsigned int baud;
unsigned long flags;
- u16 upsmr = in_be16(&uccp->upsmr);
+ u16 upsmr = qe_ioread16be(&uccp->upsmr);
struct ucc_uart_pram __iomem *uccup = qe_port->uccup;
- u16 supsmr = in_be16(&uccup->supsmr);
+ u16 supsmr = qe_ioread16be(&uccup->supsmr);
u8 char_length = 2; /* 1 + CL + PEN + 1 + SL */
/* Character length programmed into the mode register is the
@@ -956,10 +950,10 @@ static void qe_uart_set_termios(struct uart_port *port,
/* Update the per-port timeout. */
uart_update_timeout(port, termios->c_cflag, baud);
- out_be16(&uccp->upsmr, upsmr);
+ qe_iowrite16be(upsmr, &uccp->upsmr);
if (soft_uart) {
- out_be16(&uccup->supsmr, supsmr);
- out_8(&uccup->rx_length, char_length);
+ qe_iowrite16be(supsmr, &uccup->supsmr);
+ qe_iowrite8(char_length, &uccup->rx_length);
/* Soft-UART requires a 1X multiplier for TX */
qe_setbrg(qe_port->us_info.rx_clock, baud, 16);
@@ -1101,6 +1095,8 @@ static const struct uart_ops qe_uart_pops = {
.verify_port = qe_uart_verify_port,
};
+
+#ifdef CONFIG_PPC32
/*
* Obtain the SOC model number and revision level
*
@@ -1188,70 +1184,86 @@ static void uart_firmware_cont(const struct firmware *fw, void *context)
release_firmware(fw);
}
-static int ucc_uart_probe(struct platform_device *ofdev)
+static int soft_uart_init(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
- const unsigned int *iprop; /* Integer OF properties */
- const char *sprop; /* String OF properties */
- struct uart_qe_port *qe_port = NULL;
- struct resource res;
+ struct qe_firmware_info *qe_fw_info;
int ret;
- /*
- * Determine if we need Soft-UART mode
- */
if (of_find_property(np, "soft-uart", NULL)) {
dev_dbg(&ofdev->dev, "using Soft-UART mode\n");
soft_uart = 1;
+ } else {
+ return 0;
}
- /*
- * If we are using Soft-UART, determine if we need to upload the
- * firmware, too.
- */
- if (soft_uart) {
- struct qe_firmware_info *qe_fw_info;
-
- qe_fw_info = qe_get_firmware_info();
-
- /* Check if the firmware has been uploaded. */
- if (qe_fw_info && strstr(qe_fw_info->id, "Soft-UART")) {
- firmware_loaded = 1;
- } else {
- char filename[32];
- unsigned int soc;
- unsigned int rev_h;
- unsigned int rev_l;
-
- soc = soc_info(&rev_h, &rev_l);
- if (!soc) {
- dev_err(&ofdev->dev, "unknown CPU model\n");
- return -ENXIO;
- }
- sprintf(filename, "fsl_qe_ucode_uart_%u_%u%u.bin",
- soc, rev_h, rev_l);
-
- dev_info(&ofdev->dev, "waiting for firmware %s\n",
- filename);
+ qe_fw_info = qe_get_firmware_info();
- /*
- * We call request_firmware_nowait instead of
- * request_firmware so that the driver can load and
- * initialize the ports without holding up the rest of
- * the kernel. If hotplug support is enabled in the
- * kernel, then we use it.
- */
- ret = request_firmware_nowait(THIS_MODULE,
- FW_ACTION_HOTPLUG, filename, &ofdev->dev,
- GFP_KERNEL, &ofdev->dev, uart_firmware_cont);
- if (ret) {
- dev_err(&ofdev->dev,
- "could not load firmware %s\n",
- filename);
- return ret;
- }
+ /* Check if the firmware has been uploaded. */
+ if (qe_fw_info && strstr(qe_fw_info->id, "Soft-UART")) {
+ firmware_loaded = 1;
+ } else {
+ char filename[32];
+ unsigned int soc;
+ unsigned int rev_h;
+ unsigned int rev_l;
+
+ soc = soc_info(&rev_h, &rev_l);
+ if (!soc) {
+ dev_err(&ofdev->dev, "unknown CPU model\n");
+ return -ENXIO;
+ }
+ sprintf(filename, "fsl_qe_ucode_uart_%u_%u%u.bin",
+ soc, rev_h, rev_l);
+
+ dev_info(&ofdev->dev, "waiting for firmware %s\n",
+ filename);
+
+ /*
+ * We call request_firmware_nowait instead of
+ * request_firmware so that the driver can load and
+ * initialize the ports without holding up the rest of
+ * the kernel. If hotplug support is enabled in the
+ * kernel, then we use it.
+ */
+ ret = request_firmware_nowait(THIS_MODULE,
+ FW_ACTION_HOTPLUG, filename, &ofdev->dev,
+ GFP_KERNEL, &ofdev->dev, uart_firmware_cont);
+ if (ret) {
+ dev_err(&ofdev->dev,
+ "could not load firmware %s\n",
+ filename);
+ return ret;
}
}
+ return 0;
+}
+
+#else /* !CONFIG_PPC32 */
+
+static int soft_uart_init(struct platform_device *ofdev)
+{
+ return 0;
+}
+
+#endif
+
+
+static int ucc_uart_probe(struct platform_device *ofdev)
+{
+ struct device_node *np = ofdev->dev.of_node;
+ const char *sprop; /* String OF properties */
+ struct uart_qe_port *qe_port = NULL;
+ struct resource res;
+ u32 val;
+ int ret;
+
+ /*
+ * Determine if we need Soft-UART mode
+ */
+ ret = soft_uart_init(ofdev);
+ if (ret)
+ return ret;
qe_port = kzalloc(sizeof(struct uart_qe_port), GFP_KERNEL);
if (!qe_port) {
@@ -1274,23 +1286,20 @@ static int ucc_uart_probe(struct platform_device *ofdev)
/* Get the UCC number (device ID) */
/* UCCs are numbered 1-7 */
- iprop = of_get_property(np, "cell-index", NULL);
- if (!iprop) {
- iprop = of_get_property(np, "device-id", NULL);
- if (!iprop) {
- dev_err(&ofdev->dev, "UCC is unspecified in "
- "device tree\n");
+ if (of_property_read_u32(np, "cell-index", &val)) {
+ if (of_property_read_u32(np, "device-id", &val)) {
+ dev_err(&ofdev->dev, "UCC is unspecified in device tree\n");
ret = -EINVAL;
goto out_free;
}
}
- if ((*iprop < 1) || (*iprop > UCC_MAX_NUM)) {
- dev_err(&ofdev->dev, "no support for UCC%u\n", *iprop);
+ if (val < 1 || val > UCC_MAX_NUM) {
+ dev_err(&ofdev->dev, "no support for UCC%u\n", val);
ret = -ENODEV;
goto out_free;
}
- qe_port->ucc_num = *iprop - 1;
+ qe_port->ucc_num = val - 1;
/*
* In the future, we should not require the BRG to be specified in the
@@ -1334,13 +1343,12 @@ static int ucc_uart_probe(struct platform_device *ofdev)
}
/* Get the port number, numbered 0-3 */
- iprop = of_get_property(np, "port-number", NULL);
- if (!iprop) {
+ if (of_property_read_u32(np, "port-number", &val)) {
dev_err(&ofdev->dev, "missing port-number in device tree\n");
ret = -EINVAL;
goto out_free;
}
- qe_port->port.line = *iprop;
+ qe_port->port.line = val;
if (qe_port->port.line >= UCC_MAX_UART) {
dev_err(&ofdev->dev, "port-number must be 0-%u\n",
UCC_MAX_UART - 1);
@@ -1370,31 +1378,36 @@ static int ucc_uart_probe(struct platform_device *ofdev)
}
}
- iprop = of_get_property(np, "brg-frequency", NULL);
- if (!iprop) {
+ if (of_property_read_u32(np, "brg-frequency", &val)) {
dev_err(&ofdev->dev,
"missing brg-frequency in device tree\n");
ret = -EINVAL;
goto out_np;
}
- if (*iprop)
- qe_port->port.uartclk = *iprop;
+ if (val)
+ qe_port->port.uartclk = val;
else {
+ if (!IS_ENABLED(CONFIG_PPC32)) {
+ dev_err(&ofdev->dev,
+ "invalid brg-frequency in device tree\n");
+ ret = -EINVAL;
+ goto out_np;
+ }
+
/*
* Older versions of U-Boot do not initialize the brg-frequency
* property, so in this case we assume the BRG frequency is
* half the QE bus frequency.
*/
- iprop = of_get_property(np, "bus-frequency", NULL);
- if (!iprop) {
+ if (of_property_read_u32(np, "bus-frequency", &val)) {
dev_err(&ofdev->dev,
"missing QE bus-frequency in device tree\n");
ret = -EINVAL;
goto out_np;
}
- if (*iprop)
- qe_port->port.uartclk = *iprop / 2;
+ if (val)
+ qe_port->port.uartclk = val / 2;
else {
dev_err(&ofdev->dev,
"invalid QE bus-frequency in device tree\n");
diff --git a/drivers/tty/serial/vr41xx_siu.c b/drivers/tty/serial/vr41xx_siu.c
index 6d106e33f842..eeb4b6568776 100644
--- a/drivers/tty/serial/vr41xx_siu.c
+++ b/drivers/tty/serial/vr41xx_siu.c
@@ -7,10 +7,6 @@
* Based on drivers/serial/8250.c, by Russell King.
*/
-#if defined(CONFIG_SERIAL_VR41XX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/console.h>
#include <linux/errno.h>
#include <linux/init.h>
@@ -869,6 +865,7 @@ static int siu_probe(struct platform_device *dev)
port = &siu_uart_ports[i];
port->ops = &siu_uart_ops;
port->dev = &dev->dev;
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_VR41XX_CONSOLE);
retval = uart_add_one_port(&siu_uart_driver, port);
if (retval < 0) {
diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c
index 3d58e9b34553..764e992438b2 100644
--- a/drivers/tty/serial/vt8500_serial.c
+++ b/drivers/tty/serial/vt8500_serial.c
@@ -7,10 +7,6 @@
* Author: Robert Love <rlove@google.com>
*/
-#if defined(CONFIG_SERIAL_VT8500_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-# define SUPPORT_SYSRQ
-#endif
-
#include <linux/hrtimer.h>
#include <linux/delay.h>
#include <linux/io.h>
@@ -703,6 +699,7 @@ static int vt8500_serial_probe(struct platform_device *pdev)
vt8500_port->uart.line = port;
vt8500_port->uart.dev = &pdev->dev;
vt8500_port->uart.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF;
+ vt8500_port->uart.has_sysrq = IS_ENABLED(CONFIG_SERIAL_VT8500_CONSOLE);
/* Serial core uses the magic "16" everywhere - adjust for it */
vt8500_port->uart.uartclk = 16 * clk_get_rate(vt8500_port->clk) /
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 4e55bc327a54..98db9dc168ff 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -9,10 +9,6 @@
* in the code.
*/
-#if defined(CONFIG_SERIAL_XILINX_PS_UART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/platform_device.h>
#include <linux/serial.h>
#include <linux/console.h>
@@ -158,6 +154,16 @@ MODULE_PARM_DESC(rx_timeout, "Rx timeout, 1-255");
#define CDNS_UART_MODEMCR_DTR 0x00000001 /* Data Terminal Ready */
/*
+ * Modem Status register:
+ * The read/write Modem Status register reports the interface with the modem
+ * or data set, or a peripheral device emulating a modem.
+ */
+#define CDNS_UART_MODEMSR_DCD BIT(7) /* Data Carrier Detect */
+#define CDNS_UART_MODEMSR_RI BIT(6) /* Ting Indicator */
+#define CDNS_UART_MODEMSR_DSR BIT(5) /* Data Set Ready */
+#define CDNS_UART_MODEMSR_CTS BIT(4) /* Clear To Send */
+
+/*
* Channel Status Register:
* The channel status register (CSR) is provided to enable the control logic
* to monitor the status of bits in the channel interrupt status register,
@@ -684,7 +690,7 @@ static void cdns_uart_break_ctl(struct uart_port *port, int ctl)
static void cdns_uart_set_termios(struct uart_port *port,
struct ktermios *termios, struct ktermios *old)
{
- unsigned int cval = 0;
+ u32 cval = 0;
unsigned int baud, minbaud, maxbaud;
unsigned long flags;
unsigned int ctrl_reg, mode_reg, val;
@@ -805,6 +811,13 @@ static void cdns_uart_set_termios(struct uart_port *port,
cval |= mode_reg & 1;
writel(cval, port->membase + CDNS_UART_MR);
+ cval = readl(port->membase + CDNS_UART_MODEMCR);
+ if (termios->c_cflag & CRTSCTS)
+ cval |= CDNS_UART_MODEMCR_FCM;
+ else
+ cval &= ~CDNS_UART_MODEMCR_FCM;
+ writel(cval, port->membase + CDNS_UART_MODEMCR);
+
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -1007,12 +1020,24 @@ static void cdns_uart_config_port(struct uart_port *port, int flags)
*/
static unsigned int cdns_uart_get_mctrl(struct uart_port *port)
{
+ u32 val;
+ unsigned int mctrl = 0;
struct cdns_uart *cdns_uart_data = port->private_data;
if (cdns_uart_data->cts_override)
- return 0;
-
- return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+ return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+
+ val = readl(port->membase + CDNS_UART_MODEMSR);
+ if (val & CDNS_UART_MODEMSR_CTS)
+ mctrl |= TIOCM_CTS;
+ if (val & CDNS_UART_MODEMSR_DSR)
+ mctrl |= TIOCM_DSR;
+ if (val & CDNS_UART_MODEMSR_RI)
+ mctrl |= TIOCM_RNG;
+ if (val & CDNS_UART_MODEMSR_DCD)
+ mctrl |= TIOCM_CAR;
+
+ return mctrl;
}
static void cdns_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
@@ -1027,12 +1052,13 @@ static void cdns_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
val = readl(port->membase + CDNS_UART_MODEMCR);
mode_reg = readl(port->membase + CDNS_UART_MR);
- val &= ~(CDNS_UART_MODEMCR_RTS | CDNS_UART_MODEMCR_DTR |
- CDNS_UART_MODEMCR_FCM);
+ val &= ~(CDNS_UART_MODEMCR_RTS | CDNS_UART_MODEMCR_DTR);
mode_reg &= ~CDNS_UART_MR_CHMODE_MASK;
- if (mctrl & TIOCM_RTS || mctrl & TIOCM_DTR)
- val |= CDNS_UART_MODEMCR_FCM;
+ if (mctrl & TIOCM_RTS)
+ val |= CDNS_UART_MODEMCR_RTS;
+ if (mctrl & TIOCM_DTR)
+ val |= CDNS_UART_MODEMCR_DTR;
if (mctrl & TIOCM_LOOP)
mode_reg |= CDNS_UART_MR_CHMODE_L_LOOP;
else
@@ -1634,6 +1660,7 @@ static int cdns_uart_probe(struct platform_device *pdev)
port->flags = UPF_BOOT_AUTOCONF;
port->ops = &cdns_uart_ops;
port->fifosize = CDNS_UART_FIFO_SIZE;
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_XILINX_PS_UART_CONSOLE);
/*
* Register the port.
diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c
index b03d3e458ea2..4b4f604646a7 100644
--- a/drivers/tty/serial/zs.c
+++ b/drivers/tty/serial/zs.c
@@ -44,10 +44,6 @@
* complicated and prevents the use of some automatic modes of operation.
*/
-#if defined(CONFIG_SERIAL_ZS_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
#include <linux/bug.h>
#include <linux/console.h>
#include <linux/delay.h>
@@ -992,7 +988,7 @@ static void zs_release_port(struct uart_port *uport)
static int zs_map_port(struct uart_port *uport)
{
if (!uport->membase)
- uport->membase = ioremap_nocache(uport->mapbase,
+ uport->membase = ioremap(uport->mapbase,
ZS_CHAN_IO_SIZE);
if (!uport->membase) {
printk(KERN_ERR "zs: Cannot map MMIO\n");
@@ -1106,6 +1102,7 @@ static int __init zs_probe_sccs(void)
zport->scc = &zs_sccs[chip];
zport->clk_mode = 16;
+ uport->has_sysrq = IS_ENABLED(CONFIG_SERIAL_ZS_CONSOLE);
uport->irq = zs_parms.irq[chip];
uport->uartclk = ZS_CLOCK;
uport->fifosize = 1;
diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
index 84f26e43b229..08d2976593d5 100644
--- a/drivers/tty/synclink.c
+++ b/drivers/tty/synclink.c
@@ -4054,7 +4054,7 @@ static int mgsl_claim_resources(struct mgsl_struct *info)
}
info->lcr_mem_requested = true;
- info->memory_base = ioremap_nocache(info->phys_memory_base,
+ info->memory_base = ioremap(info->phys_memory_base,
0x40000);
if (!info->memory_base) {
printk( "%s(%d):Can't map shared memory on device %s MemAddr=%08X\n",
@@ -4068,7 +4068,7 @@ static int mgsl_claim_resources(struct mgsl_struct *info)
goto errout;
}
- info->lcr_base = ioremap_nocache(info->phys_lcr_base,
+ info->lcr_base = ioremap(info->phys_lcr_base,
PAGE_SIZE);
if (!info->lcr_base) {
printk( "%s(%d):Can't map LCR memory on device %s MemAddr=%08X\n",
@@ -7837,7 +7837,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
*
* dev pointer to network device structure
*/
-static void hdlcdev_tx_timeout(struct net_device *dev)
+static void hdlcdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mgsl_struct *info = dev_to_port(dev);
unsigned long flags;
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index e8a9047de451..b794177ccfb9 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -1334,10 +1334,10 @@ static void throttle(struct tty_struct * tty)
DBGINFO(("%s throttle\n", info->device_name));
if (I_IXOFF(tty))
send_xchar(tty, STOP_CHAR(tty));
- if (C_CRTSCTS(tty)) {
+ if (C_CRTSCTS(tty)) {
spin_lock_irqsave(&info->lock,flags);
info->signals &= ~SerialSignal_RTS;
- set_signals(info);
+ set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
}
@@ -1359,10 +1359,10 @@ static void unthrottle(struct tty_struct * tty)
else
send_xchar(tty, START_CHAR(tty));
}
- if (C_CRTSCTS(tty)) {
+ if (C_CRTSCTS(tty)) {
spin_lock_irqsave(&info->lock,flags);
info->signals |= SerialSignal_RTS;
- set_signals(info);
+ set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
}
@@ -1682,7 +1682,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
*
* dev pointer to network device structure
*/
-static void hdlcdev_tx_timeout(struct net_device *dev)
+static void hdlcdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct slgt_info *info = dev_to_port(dev);
unsigned long flags;
@@ -2098,7 +2098,7 @@ static void isr_rxdata(struct slgt_info *info)
if (desc_complete(info->rbufs[i])) {
/* all buffers full */
rx_stop(info);
- info->rx_restart = 1;
+ info->rx_restart = true;
continue;
}
info->rbufs[i].buf[count++] = (unsigned char)reg;
@@ -2560,8 +2560,8 @@ static void change_params(struct slgt_info *info)
info->read_status_mask = IRQ_RXOVER;
if (I_INPCK(info->port.tty))
info->read_status_mask |= MASK_PARITY | MASK_FRAMING;
- if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
- info->read_status_mask |= MASK_BREAK;
+ if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
+ info->read_status_mask |= MASK_BREAK;
if (I_IGNPAR(info->port.tty))
info->ignore_status_mask |= MASK_PARITY | MASK_FRAMING;
if (I_IGNBRK(info->port.tty)) {
@@ -3192,7 +3192,7 @@ static int tiocmset(struct tty_struct *tty,
info->signals &= ~SerialSignal_DTR;
spin_lock_irqsave(&info->lock,flags);
- set_signals(info);
+ set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
return 0;
}
@@ -3203,7 +3203,7 @@ static int carrier_raised(struct tty_port *port)
struct slgt_info *info = container_of(port, struct slgt_info, port);
spin_lock_irqsave(&info->lock,flags);
- get_signals(info);
+ get_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
return (info->signals & SerialSignal_DCD) ? 1 : 0;
}
@@ -3218,7 +3218,7 @@ static void dtr_rts(struct tty_port *port, int on)
info->signals |= SerialSignal_RTS | SerialSignal_DTR;
else
info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
- set_signals(info);
+ set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
@@ -3450,7 +3450,7 @@ static int claim_resources(struct slgt_info *info)
else
info->reg_addr_requested = true;
- info->reg_addr = ioremap_nocache(info->phys_reg_addr, SLGT_REG_SIZE);
+ info->reg_addr = ioremap(info->phys_reg_addr, SLGT_REG_SIZE);
if (!info->reg_addr) {
DBGERR(("%s can't map device registers, addr=%08X\n",
info->device_name, info->phys_reg_addr));
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
index fcb91bf7a15b..33ff2dbb6650 100644
--- a/drivers/tty/synclinkmp.c
+++ b/drivers/tty/synclinkmp.c
@@ -1453,10 +1453,10 @@ static void throttle(struct tty_struct * tty)
if (I_IXOFF(tty))
send_xchar(tty, STOP_CHAR(tty));
- if (C_CRTSCTS(tty)) {
+ if (C_CRTSCTS(tty)) {
spin_lock_irqsave(&info->lock,flags);
info->serial_signals &= ~SerialSignal_RTS;
- set_signals(info);
+ set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
}
@@ -1482,10 +1482,10 @@ static void unthrottle(struct tty_struct * tty)
send_xchar(tty, START_CHAR(tty));
}
- if (C_CRTSCTS(tty)) {
+ if (C_CRTSCTS(tty)) {
spin_lock_irqsave(&info->lock,flags);
info->serial_signals |= SerialSignal_RTS;
- set_signals(info);
+ set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
}
@@ -1807,7 +1807,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
*
* dev pointer to network device structure
*/
-static void hdlcdev_tx_timeout(struct net_device *dev)
+static void hdlcdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
SLMP_INFO *info = dev_to_port(dev);
unsigned long flags;
@@ -2470,7 +2470,7 @@ static void isr_io_pin( SLMP_INFO *info, u16 status )
if (status & SerialSignal_CTS) {
if ( debug_level >= DEBUG_LEVEL_ISR )
printk("CTS tx start...");
- info->port.tty->hw_stopped = 0;
+ info->port.tty->hw_stopped = 0;
tx_start(info);
info->pending_bh |= BH_TRANSMIT;
return;
@@ -2479,7 +2479,7 @@ static void isr_io_pin( SLMP_INFO *info, u16 status )
if (!(status & SerialSignal_CTS)) {
if ( debug_level >= DEBUG_LEVEL_ISR )
printk("CTS tx stop...");
- info->port.tty->hw_stopped = 1;
+ info->port.tty->hw_stopped = 1;
tx_stop(info);
}
}
@@ -2806,8 +2806,8 @@ static void change_params(SLMP_INFO *info)
info->read_status_mask2 = OVRN;
if (I_INPCK(info->port.tty))
info->read_status_mask2 |= PE | FRME;
- if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
- info->read_status_mask1 |= BRKD;
+ if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
+ info->read_status_mask1 |= BRKD;
if (I_IGNPAR(info->port.tty))
info->ignore_status_mask2 |= PE | FRME;
if (I_IGNBRK(info->port.tty)) {
@@ -3177,7 +3177,7 @@ static int tiocmget(struct tty_struct *tty)
unsigned long flags;
spin_lock_irqsave(&info->lock,flags);
- get_signals(info);
+ get_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS : 0) |
@@ -3215,7 +3215,7 @@ static int tiocmset(struct tty_struct *tty,
info->serial_signals &= ~SerialSignal_DTR;
spin_lock_irqsave(&info->lock,flags);
- set_signals(info);
+ set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
return 0;
@@ -3227,7 +3227,7 @@ static int carrier_raised(struct tty_port *port)
unsigned long flags;
spin_lock_irqsave(&info->lock,flags);
- get_signals(info);
+ get_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
return (info->serial_signals & SerialSignal_DCD) ? 1 : 0;
@@ -3243,7 +3243,7 @@ static void dtr_rts(struct tty_port *port, int on)
info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
else
info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
- set_signals(info);
+ set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
@@ -3559,7 +3559,7 @@ static int claim_resources(SLMP_INFO *info)
else
info->sca_statctrl_requested = true;
- info->memory_base = ioremap_nocache(info->phys_memory_base,
+ info->memory_base = ioremap(info->phys_memory_base,
SCA_MEM_SIZE);
if (!info->memory_base) {
printk( "%s(%d):%s Can't map shared memory, MemAddr=%08X\n",
@@ -3568,7 +3568,7 @@ static int claim_resources(SLMP_INFO *info)
goto errout;
}
- info->lcr_base = ioremap_nocache(info->phys_lcr_base, PAGE_SIZE);
+ info->lcr_base = ioremap(info->phys_lcr_base, PAGE_SIZE);
if (!info->lcr_base) {
printk( "%s(%d):%s Can't map LCR memory, MemAddr=%08X\n",
__FILE__,__LINE__,info->device_name, info->phys_lcr_base );
@@ -3577,7 +3577,7 @@ static int claim_resources(SLMP_INFO *info)
}
info->lcr_base += info->lcr_offset;
- info->sca_base = ioremap_nocache(info->phys_sca_base, PAGE_SIZE);
+ info->sca_base = ioremap(info->phys_sca_base, PAGE_SIZE);
if (!info->sca_base) {
printk( "%s(%d):%s Can't map SCA memory, MemAddr=%08X\n",
__FILE__,__LINE__,info->device_name, info->phys_sca_base );
@@ -3586,7 +3586,7 @@ static int claim_resources(SLMP_INFO *info)
}
info->sca_base += info->sca_offset;
- info->statctrl_base = ioremap_nocache(info->phys_statctrl_base,
+ info->statctrl_base = ioremap(info->phys_statctrl_base,
PAGE_SIZE);
if (!info->statctrl_base) {
printk( "%s(%d):%s Can't map SCA Status/Control memory, MemAddr=%08X\n",
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 573b2055173c..f724962a5906 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -967,8 +967,6 @@ static struct input_handler sysrq_handler = {
.id_table = sysrq_ids,
};
-static bool sysrq_handler_registered;
-
static inline void sysrq_register_handler(void)
{
int error;
@@ -978,16 +976,11 @@ static inline void sysrq_register_handler(void)
error = input_register_handler(&sysrq_handler);
if (error)
pr_err("Failed to register input handler, error %d", error);
- else
- sysrq_handler_registered = true;
}
static inline void sysrq_unregister_handler(void)
{
- if (sysrq_handler_registered) {
- input_unregister_handler(&sysrq_handler);
- sysrq_handler_registered = false;
- }
+ input_unregister_handler(&sysrq_handler);
}
static int sysrq_reset_seq_param_set(const char *buffer,
@@ -1108,15 +1101,15 @@ static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
return count;
}
-static const struct file_operations proc_sysrq_trigger_operations = {
- .write = write_sysrq_trigger,
- .llseek = noop_llseek,
+static const struct proc_ops sysrq_trigger_proc_ops = {
+ .proc_write = write_sysrq_trigger,
+ .proc_lseek = noop_llseek,
};
static void sysrq_init_procfs(void)
{
if (!proc_create("sysrq-trigger", S_IWUSR, NULL,
- &proc_sysrq_trigger_operations))
+ &sysrq_trigger_proc_ops))
pr_err("Failed to register proc interface\n");
}
diff --git a/drivers/tty/tty_baudrate.c b/drivers/tty/tty_baudrate.c
index f438eaa68246..40207cab3b2a 100644
--- a/drivers/tty/tty_baudrate.c
+++ b/drivers/tty/tty_baudrate.c
@@ -17,32 +17,28 @@
* include/asm/termbits.h file.
*/
static const speed_t baud_table[] = {
- 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
- 9600, 19200, 38400, 57600, 115200, 230400, 460800,
+ 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400, 57600, 115200, 230400, 460800,
#ifdef __sparc__
- 76800, 153600, 307200, 614400, 921600
+ 76800, 153600, 307200, 614400, 921600, 500000, 576000,
+ 1000000, 1152000, 1500000, 2000000
#else
500000, 576000, 921600, 1000000, 1152000, 1500000, 2000000,
2500000, 3000000, 3500000, 4000000
#endif
};
-#ifndef __sparc__
static const tcflag_t baud_bits[] = {
- B0, B50, B75, B110, B134, B150, B200, B300, B600,
- B1200, B1800, B2400, B4800, B9600, B19200, B38400,
- B57600, B115200, B230400, B460800, B500000, B576000,
- B921600, B1000000, B1152000, B1500000, B2000000, B2500000,
- B3000000, B3500000, B4000000
-};
+ B0, B50, B75, B110, B134, B150, B200, B300, B600, B1200, B1800, B2400,
+ B4800, B9600, B19200, B38400, B57600, B115200, B230400, B460800,
+#ifdef __sparc__
+ B76800, B153600, B307200, B614400, B921600, B500000, B576000,
+ B1000000, B1152000, B1500000, B2000000
#else
-static const tcflag_t baud_bits[] = {
- B0, B50, B75, B110, B134, B150, B200, B300, B600,
- B1200, B1800, B2400, B4800, B9600, B19200, B38400,
- B57600, B115200, B230400, B460800, B76800, B153600,
- B307200, B614400, B921600
-};
+ B500000, B576000, B921600, B1000000, B1152000, B1500000, B2000000,
+ B2500000, B3000000, B3500000, B4000000
#endif
+};
static int n_baud_table = ARRAY_SIZE(baud_table);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index d9f54c7d94f2..a1453fe10862 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1893,7 +1893,7 @@ static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp,
struct tty_struct *tty_kopen(dev_t device)
{
struct tty_struct *tty;
- struct tty_driver *driver = NULL;
+ struct tty_driver *driver;
int index = -1;
mutex_lock(&tty_mutex);
diff --git a/drivers/tty/vt/.gitignore b/drivers/tty/vt/.gitignore
index 9b38b85f9d9a..3ecf42234d89 100644
--- a/drivers/tty/vt/.gitignore
+++ b/drivers/tty/vt/.gitignore
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
+conmakehash
consolemap_deftbl.c
defkeymap.c
diff --git a/drivers/tty/vt/Makefile b/drivers/tty/vt/Makefile
index edbbe0ccdb83..fe30ce512819 100644
--- a/drivers/tty/vt/Makefile
+++ b/drivers/tty/vt/Makefile
@@ -12,10 +12,12 @@ obj-$(CONFIG_HW_CONSOLE) += vt.o defkeymap.o
# Files generated that shall be removed upon make clean
clean-files := consolemap_deftbl.c defkeymap.c
+hostprogs += conmakehash
+
quiet_cmd_conmk = CONMK $@
- cmd_conmk = scripts/conmakehash $< > $@
+ cmd_conmk = $(obj)/conmakehash $< > $@
-$(obj)/consolemap_deftbl.c: $(src)/$(FONTMAPFILE)
+$(obj)/consolemap_deftbl.c: $(src)/$(FONTMAPFILE) $(obj)/conmakehash
$(call cmd,conmk)
$(obj)/defkeymap.o: $(obj)/defkeymap.c
diff --git a/drivers/tty/vt/conmakehash.c b/drivers/tty/vt/conmakehash.c
new file mode 100644
index 000000000000..cddd789fe46e
--- /dev/null
+++ b/drivers/tty/vt/conmakehash.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * conmakehash.c
+ *
+ * Create arrays for initializing the kernel folded tables (using a hash
+ * table turned out to be to limiting...) Unfortunately we can't simply
+ * preinitialize the tables at compile time since kfree() cannot accept
+ * memory not allocated by kmalloc(), and doing our own memory management
+ * just for this seems like massive overkill.
+ *
+ * Copyright (C) 1995-1997 H. Peter Anvin
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <sysexits.h>
+#include <string.h>
+#include <ctype.h>
+
+#define MAX_FONTLEN 256
+
+typedef unsigned short unicode;
+
+static void usage(char *argv0)
+{
+ fprintf(stderr, "Usage: \n"
+ " %s chartable [hashsize] [hashstep] [maxhashlevel]\n", argv0);
+ exit(EX_USAGE);
+}
+
+static int getunicode(char **p0)
+{
+ char *p = *p0;
+
+ while (*p == ' ' || *p == '\t')
+ p++;
+ if (*p != 'U' || p[1] != '+' ||
+ !isxdigit(p[2]) || !isxdigit(p[3]) || !isxdigit(p[4]) ||
+ !isxdigit(p[5]) || isxdigit(p[6]))
+ return -1;
+ *p0 = p+6;
+ return strtol(p+2,0,16);
+}
+
+unicode unitable[MAX_FONTLEN][255];
+ /* Massive overkill, but who cares? */
+int unicount[MAX_FONTLEN];
+
+static void addpair(int fp, int un)
+{
+ int i;
+
+ if ( un <= 0xfffe )
+ {
+ /* Check it isn't a duplicate */
+
+ for ( i = 0 ; i < unicount[fp] ; i++ )
+ if ( unitable[fp][i] == un )
+ return;
+
+ /* Add to list */
+
+ if ( unicount[fp] > 254 )
+ {
+ fprintf(stderr, "ERROR: Only 255 unicodes/glyph permitted!\n");
+ exit(EX_DATAERR);
+ }
+
+ unitable[fp][unicount[fp]] = un;
+ unicount[fp]++;
+ }
+
+ /* otherwise: ignore */
+}
+
+int main(int argc, char *argv[])
+{
+ FILE *ctbl;
+ char *tblname;
+ char buffer[65536];
+ int fontlen;
+ int i, nuni, nent;
+ int fp0, fp1, un0, un1;
+ char *p, *p1;
+
+ if ( argc < 2 || argc > 5 )
+ usage(argv[0]);
+
+ if ( !strcmp(argv[1],"-") )
+ {
+ ctbl = stdin;
+ tblname = "stdin";
+ }
+ else
+ {
+ ctbl = fopen(tblname = argv[1], "r");
+ if ( !ctbl )
+ {
+ perror(tblname);
+ exit(EX_NOINPUT);
+ }
+ }
+
+ /* For now we assume the default font is always 256 characters. */
+ fontlen = 256;
+
+ /* Initialize table */
+
+ for ( i = 0 ; i < fontlen ; i++ )
+ unicount[i] = 0;
+
+ /* Now we come to the tricky part. Parse the input table. */
+
+ while ( fgets(buffer, sizeof(buffer), ctbl) != NULL )
+ {
+ if ( (p = strchr(buffer, '\n')) != NULL )
+ *p = '\0';
+ else
+ fprintf(stderr, "%s: Warning: line too long\n", tblname);
+
+ p = buffer;
+
+/*
+ * Syntax accepted:
+ * <fontpos> <unicode> <unicode> ...
+ * <range> idem
+ * <range> <unicode range>
+ *
+ * where <range> ::= <fontpos>-<fontpos>
+ * and <unicode> ::= U+<h><h><h><h>
+ * and <h> ::= <hexadecimal digit>
+ */
+
+ while (*p == ' ' || *p == '\t')
+ p++;
+ if (!*p || *p == '#')
+ continue; /* skip comment or blank line */
+
+ fp0 = strtol(p, &p1, 0);
+ if (p1 == p)
+ {
+ fprintf(stderr, "Bad input line: %s\n", buffer);
+ exit(EX_DATAERR);
+ }
+ p = p1;
+
+ while (*p == ' ' || *p == '\t')
+ p++;
+ if (*p == '-')
+ {
+ p++;
+ fp1 = strtol(p, &p1, 0);
+ if (p1 == p)
+ {
+ fprintf(stderr, "Bad input line: %s\n", buffer);
+ exit(EX_DATAERR);
+ }
+ p = p1;
+ }
+ else
+ fp1 = 0;
+
+ if ( fp0 < 0 || fp0 >= fontlen )
+ {
+ fprintf(stderr,
+ "%s: Glyph number (0x%x) larger than font length\n",
+ tblname, fp0);
+ exit(EX_DATAERR);
+ }
+ if ( fp1 && (fp1 < fp0 || fp1 >= fontlen) )
+ {
+ fprintf(stderr,
+ "%s: Bad end of range (0x%x)\n",
+ tblname, fp1);
+ exit(EX_DATAERR);
+ }
+
+ if (fp1)
+ {
+ /* we have a range; expect the word "idem" or a Unicode range of the
+ same length */
+ while (*p == ' ' || *p == '\t')
+ p++;
+ if (!strncmp(p, "idem", 4))
+ {
+ for (i=fp0; i<=fp1; i++)
+ addpair(i,i);
+ p += 4;
+ }
+ else
+ {
+ un0 = getunicode(&p);
+ while (*p == ' ' || *p == '\t')
+ p++;
+ if (*p != '-')
+ {
+ fprintf(stderr,
+"%s: Corresponding to a range of font positions, there should be a Unicode range\n",
+ tblname);
+ exit(EX_DATAERR);
+ }
+ p++;
+ un1 = getunicode(&p);
+ if (un0 < 0 || un1 < 0)
+ {
+ fprintf(stderr,
+"%s: Bad Unicode range corresponding to font position range 0x%x-0x%x\n",
+ tblname, fp0, fp1);
+ exit(EX_DATAERR);
+ }
+ if (un1 - un0 != fp1 - fp0)
+ {
+ fprintf(stderr,
+"%s: Unicode range U+%x-U+%x not of the same length as font position range 0x%x-0x%x\n",
+ tblname, un0, un1, fp0, fp1);
+ exit(EX_DATAERR);
+ }
+ for(i=fp0; i<=fp1; i++)
+ addpair(i,un0-fp0+i);
+ }
+ }
+ else
+ {
+ /* no range; expect a list of unicode values for a single font position */
+
+ while ( (un0 = getunicode(&p)) >= 0 )
+ addpair(fp0, un0);
+ }
+ while (*p == ' ' || *p == '\t')
+ p++;
+ if (*p && *p != '#')
+ fprintf(stderr, "%s: trailing junk (%s) ignored\n", tblname, p);
+ }
+
+ /* Okay, we hit EOF, now output hash table */
+
+ fclose(ctbl);
+
+
+ /* Compute total size of Unicode list */
+ nuni = 0;
+ for ( i = 0 ; i < fontlen ; i++ )
+ nuni += unicount[i];
+
+ printf("\
+/*\n\
+ * Do not edit this file; it was automatically generated by\n\
+ *\n\
+ * conmakehash %s > [this file]\n\
+ *\n\
+ */\n\
+\n\
+#include <linux/types.h>\n\
+\n\
+u8 dfont_unicount[%d] = \n\
+{\n\t", argv[1], fontlen);
+
+ for ( i = 0 ; i < fontlen ; i++ )
+ {
+ printf("%3d", unicount[i]);
+ if ( i == fontlen-1 )
+ printf("\n};\n");
+ else if ( i % 8 == 7 )
+ printf(",\n\t");
+ else
+ printf(", ");
+ }
+
+ printf("\nu16 dfont_unitable[%d] = \n{\n\t", nuni);
+
+ fp0 = 0;
+ nent = 0;
+ for ( i = 0 ; i < nuni ; i++ )
+ {
+ while ( nent >= unicount[fp0] )
+ {
+ fp0++;
+ nent = 0;
+ }
+ printf("0x%04x", unitable[fp0][nent++]);
+ if ( i == nuni-1 )
+ printf("\n};\n");
+ else if ( i % 8 == 7 )
+ printf(",\n\t");
+ else
+ printf(", ");
+ }
+
+ exit(EX_OK);
+}
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 34aa39d1aed9..35d21cdb60d0 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -3326,8 +3326,9 @@ static int __init con_init(void)
console_lock();
- if (conswitchp)
- display_desc = conswitchp->con_startup();
+ if (!conswitchp)
+ conswitchp = &dummy_con;
+ display_desc = conswitchp->con_startup();
if (!display_desc) {
fg_console = 0;
console_unlock();
@@ -3567,7 +3568,6 @@ err:
#ifdef CONFIG_VT_HW_CONSOLE_BINDING
-/* unlocked version of unbind_con_driver() */
int do_unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
{
struct module *owner = csw->owner;
@@ -4095,7 +4095,7 @@ static void con_driver_unregister_callback(struct work_struct *ignored)
* when a driver wants to take over some existing consoles
* and become default driver for newly opened ones.
*
- * do_take_over_console is basically a register followed by unbind
+ * do_take_over_console is basically a register followed by bind
*/
int do_take_over_console(const struct consw *csw, int first, int last, int deflt)
{
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
index 81c88f7bbbcb..f6ab3f28c838 100644
--- a/drivers/uio/uio_dmem_genirq.c
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -132,11 +132,13 @@ static int uio_dmem_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on)
if (irq_on) {
if (test_and_clear_bit(0, &priv->flags))
enable_irq(dev_info->irq);
+ spin_unlock_irqrestore(&priv->lock, flags);
} else {
- if (!test_and_set_bit(0, &priv->flags))
+ if (!test_and_set_bit(0, &priv->flags)) {
+ spin_unlock_irqrestore(&priv->lock, flags);
disable_irq(dev_info->irq);
+ }
}
- spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c
index 1303b165055b..fc25ce90da3b 100644
--- a/drivers/uio/uio_pdrv_genirq.c
+++ b/drivers/uio/uio_pdrv_genirq.c
@@ -156,6 +156,8 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev)
uioinfo->irq = ret;
if (ret == -ENXIO && pdev->dev.of_node)
uioinfo->irq = UIO_IRQ_NONE;
+ else if (ret == -EPROBE_DEFER)
+ return ret;
else if (ret < 0) {
dev_err(&pdev->dev, "failed to get IRQ\n");
return ret;
diff --git a/drivers/usb/cdns3/Kconfig b/drivers/usb/cdns3/Kconfig
index 2a1e89d12ed9..84716d216ae5 100644
--- a/drivers/usb/cdns3/Kconfig
+++ b/drivers/usb/cdns3/Kconfig
@@ -53,4 +53,14 @@ config USB_CDNS3_TI
e.g. J721e.
+config USB_CDNS3_IMX
+ tristate "Cadence USB3 support on NXP i.MX platforms"
+ depends on ARCH_MXC || COMPILE_TEST
+ default USB_CDNS3
+ help
+ Say 'Y' or 'M' here if you are building for NXP i.MX
+ platforms that contain Cadence USB3 controller core.
+
+ For example, imx8qm and imx8qxp.
+
endif
diff --git a/drivers/usb/cdns3/Makefile b/drivers/usb/cdns3/Makefile
index 948e6b88d1a9..d47e341a6f39 100644
--- a/drivers/usb/cdns3/Makefile
+++ b/drivers/usb/cdns3/Makefile
@@ -15,3 +15,4 @@ cdns3-$(CONFIG_USB_CDNS3_HOST) += host.o
obj-$(CONFIG_USB_CDNS3_PCI_WRAP) += cdns3-pci-wrap.o
obj-$(CONFIG_USB_CDNS3_TI) += cdns3-ti.o
+obj-$(CONFIG_USB_CDNS3_IMX) += cdns3-imx.o
diff --git a/drivers/usb/cdns3/cdns3-imx.c b/drivers/usb/cdns3/cdns3-imx.c
new file mode 100644
index 000000000000..aba988e71958
--- /dev/null
+++ b/drivers/usb/cdns3/cdns3-imx.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * cdns3-imx.c - NXP i.MX specific Glue layer for Cadence USB Controller
+ *
+ * Copyright (C) 2019 NXP
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/iopoll.h>
+
+#define USB3_CORE_CTRL1 0x00
+#define USB3_CORE_CTRL2 0x04
+#define USB3_INT_REG 0x08
+#define USB3_CORE_STATUS 0x0c
+#define XHCI_DEBUG_LINK_ST 0x10
+#define XHCI_DEBUG_BUS 0x14
+#define USB3_SSPHY_CTRL1 0x40
+#define USB3_SSPHY_CTRL2 0x44
+#define USB3_SSPHY_STATUS 0x4c
+#define USB2_PHY_CTRL1 0x50
+#define USB2_PHY_CTRL2 0x54
+#define USB2_PHY_STATUS 0x5c
+
+/* Register bits definition */
+
+/* USB3_CORE_CTRL1 */
+#define SW_RESET_MASK (0x3f << 26)
+#define PWR_SW_RESET BIT(31)
+#define APB_SW_RESET BIT(30)
+#define AXI_SW_RESET BIT(29)
+#define RW_SW_RESET BIT(28)
+#define PHY_SW_RESET BIT(27)
+#define PHYAHB_SW_RESET BIT(26)
+#define ALL_SW_RESET (PWR_SW_RESET | APB_SW_RESET | AXI_SW_RESET | \
+ RW_SW_RESET | PHY_SW_RESET | PHYAHB_SW_RESET)
+#define OC_DISABLE BIT(9)
+#define MDCTRL_CLK_SEL BIT(7)
+#define MODE_STRAP_MASK (0x7)
+#define DEV_MODE (1 << 2)
+#define HOST_MODE (1 << 1)
+#define OTG_MODE (1 << 0)
+
+/* USB3_INT_REG */
+#define CLK_125_REQ BIT(29)
+#define LPM_CLK_REQ BIT(28)
+#define DEVU3_WAEKUP_EN BIT(14)
+#define OTG_WAKEUP_EN BIT(12)
+#define DEV_INT_EN (3 << 8) /* DEV INT b9:8 */
+#define HOST_INT1_EN (1 << 0) /* HOST INT b7:0 */
+
+/* USB3_CORE_STATUS */
+#define MDCTRL_CLK_STATUS BIT(15)
+#define DEV_POWER_ON_READY BIT(13)
+#define HOST_POWER_ON_READY BIT(12)
+
+/* USB3_SSPHY_STATUS */
+#define CLK_VALID_MASK (0x3f << 26)
+#define CLK_VALID_COMPARE_BITS (0xf << 28)
+#define PHY_REFCLK_REQ (1 << 0)
+
+struct cdns_imx {
+ struct device *dev;
+ void __iomem *noncore;
+ struct clk_bulk_data *clks;
+ int num_clks;
+};
+
+static inline u32 cdns_imx_readl(struct cdns_imx *data, u32 offset)
+{
+ return readl(data->noncore + offset);
+}
+
+static inline void cdns_imx_writel(struct cdns_imx *data, u32 offset, u32 value)
+{
+ writel(value, data->noncore + offset);
+}
+
+static const struct clk_bulk_data imx_cdns3_core_clks[] = {
+ { .id = "usb3_lpm_clk" },
+ { .id = "usb3_bus_clk" },
+ { .id = "usb3_aclk" },
+ { .id = "usb3_ipg_clk" },
+ { .id = "usb3_core_pclk" },
+};
+
+static int cdns_imx_noncore_init(struct cdns_imx *data)
+{
+ u32 value;
+ int ret;
+ struct device *dev = data->dev;
+
+ cdns_imx_writel(data, USB3_SSPHY_STATUS, CLK_VALID_MASK);
+ udelay(1);
+ ret = readl_poll_timeout(data->noncore + USB3_SSPHY_STATUS, value,
+ (value & CLK_VALID_COMPARE_BITS) == CLK_VALID_COMPARE_BITS,
+ 10, 100000);
+ if (ret) {
+ dev_err(dev, "wait clkvld timeout\n");
+ return ret;
+ }
+
+ value = cdns_imx_readl(data, USB3_CORE_CTRL1);
+ value |= ALL_SW_RESET;
+ cdns_imx_writel(data, USB3_CORE_CTRL1, value);
+ udelay(1);
+
+ value = cdns_imx_readl(data, USB3_CORE_CTRL1);
+ value = (value & ~MODE_STRAP_MASK) | OTG_MODE | OC_DISABLE;
+ cdns_imx_writel(data, USB3_CORE_CTRL1, value);
+
+ value = cdns_imx_readl(data, USB3_INT_REG);
+ value |= HOST_INT1_EN | DEV_INT_EN;
+ cdns_imx_writel(data, USB3_INT_REG, value);
+
+ value = cdns_imx_readl(data, USB3_CORE_CTRL1);
+ value &= ~ALL_SW_RESET;
+ cdns_imx_writel(data, USB3_CORE_CTRL1, value);
+ return ret;
+}
+
+static int cdns_imx_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct cdns_imx *data;
+ int ret;
+
+ if (!node)
+ return -ENODEV;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, data);
+ data->dev = dev;
+ data->noncore = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->noncore)) {
+ dev_err(dev, "can't map IOMEM resource\n");
+ return PTR_ERR(data->noncore);
+ }
+
+ data->num_clks = ARRAY_SIZE(imx_cdns3_core_clks);
+ data->clks = (struct clk_bulk_data *)imx_cdns3_core_clks;
+ ret = devm_clk_bulk_get(dev, data->num_clks, data->clks);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(data->num_clks, data->clks);
+ if (ret)
+ return ret;
+
+ ret = cdns_imx_noncore_init(data);
+ if (ret)
+ goto err;
+
+ ret = of_platform_populate(node, NULL, NULL, dev);
+ if (ret) {
+ dev_err(dev, "failed to create children: %d\n", ret);
+ goto err;
+ }
+
+ return ret;
+
+err:
+ clk_bulk_disable_unprepare(data->num_clks, data->clks);
+ return ret;
+}
+
+static int cdns_imx_remove_core(struct device *dev, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ platform_device_unregister(pdev);
+
+ return 0;
+}
+
+static int cdns_imx_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ device_for_each_child(dev, NULL, cdns_imx_remove_core);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id cdns_imx_of_match[] = {
+ { .compatible = "fsl,imx8qm-usb3", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cdns_imx_of_match);
+
+static struct platform_driver cdns_imx_driver = {
+ .probe = cdns_imx_probe,
+ .remove = cdns_imx_remove,
+ .driver = {
+ .name = "cdns3-imx",
+ .of_match_table = cdns_imx_of_match,
+ },
+};
+module_platform_driver(cdns_imx_driver);
+
+MODULE_ALIAS("platform:cdns3-imx");
+MODULE_AUTHOR("Peter Chen <peter.chen@nxp.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Cadence USB3 i.MX Glue Layer");
diff --git a/drivers/usb/cdns3/debug.h b/drivers/usb/cdns3/debug.h
index 2c9afbfe988b..a5c6a29e1340 100644
--- a/drivers/usb/cdns3/debug.h
+++ b/drivers/usb/cdns3/debug.h
@@ -140,7 +140,7 @@ static inline char *cdns3_dbg_ring(struct cdns3_endpoint *priv_ep,
trb_per_sector = TRBS_PER_SEGMENT;
if (trb_per_sector > TRBS_PER_SEGMENT) {
- sprintf(str + ret, "\t\tTo big transfer ring %d\n",
+ sprintf(str + ret, "\t\tTransfer ring %d too big\n",
trb_per_sector);
return str;
}
diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
index 4c1e75509303..736b0c6e27fe 100644
--- a/drivers/usb/cdns3/gadget.c
+++ b/drivers/usb/cdns3/gadget.c
@@ -71,6 +71,23 @@ static int __cdns3_gadget_ep_queue(struct usb_ep *ep,
struct usb_request *request,
gfp_t gfp_flags);
+static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
+ struct usb_request *request);
+
+static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep,
+ struct usb_request *request);
+
+/**
+ * cdns3_clear_register_bit - clear bit in given register.
+ * @ptr: address of device controller register to be read and changed
+ * @mask: bits requested to clar
+ */
+void cdns3_clear_register_bit(void __iomem *ptr, u32 mask)
+{
+ mask = readl(ptr) & ~mask;
+ writel(mask, ptr);
+}
+
/**
* cdns3_set_register_bit - set bit in given register.
* @ptr: address of device controller register to be read and changed
@@ -150,6 +167,21 @@ void cdns3_select_ep(struct cdns3_device *priv_dev, u32 ep)
writel(ep, &priv_dev->regs->ep_sel);
}
+/**
+ * cdns3_get_tdl - gets current tdl for selected endpoint.
+ * @priv_dev: extended gadget object
+ *
+ * Before calling this function the appropriate endpoint must
+ * be selected by means of cdns3_select_ep function.
+ */
+static int cdns3_get_tdl(struct cdns3_device *priv_dev)
+{
+ if (priv_dev->dev_ver < DEV_VER_V3)
+ return EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd));
+ else
+ return readl(&priv_dev->regs->ep_tdl);
+}
+
dma_addr_t cdns3_trb_virt_to_dma(struct cdns3_endpoint *priv_ep,
struct cdns3_trb *trb)
{
@@ -166,7 +198,22 @@ int cdns3_ring_size(struct cdns3_endpoint *priv_ep)
case USB_ENDPOINT_XFER_CONTROL:
return TRB_CTRL_RING_SIZE;
default:
- return TRB_RING_SIZE;
+ if (priv_ep->use_streams)
+ return TRB_STREAM_RING_SIZE;
+ else
+ return TRB_RING_SIZE;
+ }
+}
+
+static void cdns3_free_trb_pool(struct cdns3_endpoint *priv_ep)
+{
+ struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+
+ if (priv_ep->trb_pool) {
+ dma_free_coherent(priv_dev->sysdev,
+ cdns3_ring_size(priv_ep),
+ priv_ep->trb_pool, priv_ep->trb_pool_dma);
+ priv_ep->trb_pool = NULL;
}
}
@@ -180,8 +227,12 @@ int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep)
{
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
int ring_size = cdns3_ring_size(priv_ep);
+ int num_trbs = ring_size / TRB_SIZE;
struct cdns3_trb *link_trb;
+ if (priv_ep->trb_pool && priv_ep->alloc_ring_size < ring_size)
+ cdns3_free_trb_pool(priv_ep);
+
if (!priv_ep->trb_pool) {
priv_ep->trb_pool = dma_alloc_coherent(priv_dev->sysdev,
ring_size,
@@ -189,32 +240,30 @@ int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep)
GFP_DMA32 | GFP_ATOMIC);
if (!priv_ep->trb_pool)
return -ENOMEM;
- } else {
+
+ priv_ep->alloc_ring_size = ring_size;
memset(priv_ep->trb_pool, 0, ring_size);
}
+ priv_ep->num_trbs = num_trbs;
+
if (!priv_ep->num)
return 0;
- priv_ep->num_trbs = ring_size / TRB_SIZE;
- /* Initialize the last TRB as Link TRB. */
+ /* Initialize the last TRB as Link TRB */
link_trb = (priv_ep->trb_pool + (priv_ep->num_trbs - 1));
- link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma);
- link_trb->control = TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE;
-
- return 0;
-}
-static void cdns3_free_trb_pool(struct cdns3_endpoint *priv_ep)
-{
- struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
-
- if (priv_ep->trb_pool) {
- dma_free_coherent(priv_dev->sysdev,
- cdns3_ring_size(priv_ep),
- priv_ep->trb_pool, priv_ep->trb_pool_dma);
- priv_ep->trb_pool = NULL;
+ if (priv_ep->use_streams) {
+ /*
+ * For stream capable endpoints driver use single correct TRB.
+ * The last trb has zeroed cycle bit
+ */
+ link_trb->control = 0;
+ } else {
+ link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma);
+ link_trb->control = TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE;
}
+ return 0;
}
/**
@@ -253,6 +302,7 @@ void cdns3_hw_reset_eps_config(struct cdns3_device *priv_dev)
priv_dev->onchip_used_size = 0;
priv_dev->out_mem_is_allocated = 0;
priv_dev->wait_for_setup = 0;
+ priv_dev->using_streams = 0;
}
/**
@@ -356,17 +406,43 @@ static int cdns3_start_all_request(struct cdns3_device *priv_dev,
{
struct usb_request *request;
int ret = 0;
+ u8 pending_empty = list_empty(&priv_ep->pending_req_list);
+
+ /*
+ * If the last pending transfer is INTERNAL
+ * OR streams are enabled for this endpoint
+ * do NOT start new transfer till the last one is pending
+ */
+ if (!pending_empty) {
+ struct cdns3_request *priv_req;
+
+ request = cdns3_next_request(&priv_ep->pending_req_list);
+ priv_req = to_cdns3_request(request);
+ if ((priv_req->flags & REQUEST_INTERNAL) ||
+ (priv_ep->flags & EP_TDLCHK_EN) ||
+ priv_ep->use_streams) {
+ trace_printk("Blocking external request\n");
+ return ret;
+ }
+ }
while (!list_empty(&priv_ep->deferred_req_list)) {
request = cdns3_next_request(&priv_ep->deferred_req_list);
- ret = cdns3_ep_run_transfer(priv_ep, request);
+ if (!priv_ep->use_streams) {
+ ret = cdns3_ep_run_transfer(priv_ep, request);
+ } else {
+ priv_ep->stream_sg_idx = 0;
+ ret = cdns3_ep_run_stream_transfer(priv_ep, request);
+ }
if (ret)
return ret;
list_del(&request->list);
list_add_tail(&request->list,
&priv_ep->pending_req_list);
+ if (request->stream_id != 0 || (priv_ep->flags & EP_TDLCHK_EN))
+ break;
}
priv_ep->flags &= ~EP_RING_FULL;
@@ -379,7 +455,7 @@ static int cdns3_start_all_request(struct cdns3_device *priv_dev,
* buffer for unblocking on-chip FIFO buffer. This flag will be cleared
* if before first DESCMISS interrupt the DMA will be armed.
*/
-#define cdns3_wa2_enable_detection(priv_dev, ep_priv, reg) do { \
+#define cdns3_wa2_enable_detection(priv_dev, priv_ep, reg) do { \
if (!priv_ep->dir && priv_ep->type != USB_ENDPOINT_XFER_ISOC) { \
priv_ep->flags |= EP_QUIRK_EXTRA_BUF_DET; \
(reg) |= EP_STS_EN_DESCMISEN; \
@@ -450,10 +526,17 @@ struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_dev,
if (!req)
return NULL;
+ /* unmap the gadget request before copying data */
+ usb_gadget_unmap_request_by_dev(priv_dev->sysdev, req,
+ priv_ep->dir);
+
cdns3_wa2_descmiss_copy_data(priv_ep, req);
if (!(priv_ep->flags & EP_QUIRK_END_TRANSFER) &&
req->length != req->actual) {
/* wait for next part of transfer */
+ /* re-map the gadget request buffer*/
+ usb_gadget_map_request_by_dev(priv_dev->sysdev, req,
+ usb_endpoint_dir_in(priv_ep->endpoint.desc));
return NULL;
}
@@ -570,6 +653,13 @@ static void cdns3_wa2_descmissing_packet(struct cdns3_endpoint *priv_ep)
{
struct cdns3_request *priv_req;
struct usb_request *request;
+ u8 pending_empty = list_empty(&priv_ep->pending_req_list);
+
+ /* check for pending transfer */
+ if (!pending_empty) {
+ trace_cdns3_wa2(priv_ep, "Ignoring Descriptor missing IRQ\n");
+ return;
+ }
if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) {
priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET;
@@ -578,8 +668,10 @@ static void cdns3_wa2_descmissing_packet(struct cdns3_endpoint *priv_ep)
trace_cdns3_wa2(priv_ep, "Description Missing detected\n");
- if (priv_ep->wa2_counter >= CDNS3_WA2_NUM_BUFFERS)
+ if (priv_ep->wa2_counter >= CDNS3_WA2_NUM_BUFFERS) {
+ trace_cdns3_wa2(priv_ep, "WA2 overflow\n");
cdns3_wa2_remove_old_request(priv_ep);
+ }
request = cdns3_gadget_ep_alloc_request(&priv_ep->endpoint,
GFP_ATOMIC);
@@ -621,6 +713,78 @@ err:
"Failed: No sufficient memory for DESCMIS\n");
}
+static void cdns3_wa2_reset_tdl(struct cdns3_device *priv_dev)
+{
+ u16 tdl = EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd));
+
+ if (tdl) {
+ u16 reset_val = EP_CMD_TDL_MAX + 1 - tdl;
+
+ writel(EP_CMD_TDL_SET(reset_val) | EP_CMD_STDL,
+ &priv_dev->regs->ep_cmd);
+ }
+}
+
+static void cdns3_wa2_check_outq_status(struct cdns3_device *priv_dev)
+{
+ u32 ep_sts_reg;
+
+ /* select EP0-out */
+ cdns3_select_ep(priv_dev, 0);
+
+ ep_sts_reg = readl(&priv_dev->regs->ep_sts);
+
+ if (EP_STS_OUTQ_VAL(ep_sts_reg)) {
+ u32 outq_ep_num = EP_STS_OUTQ_NO(ep_sts_reg);
+ struct cdns3_endpoint *outq_ep = priv_dev->eps[outq_ep_num];
+
+ if ((outq_ep->flags & EP_ENABLED) && !(outq_ep->use_streams) &&
+ outq_ep->type != USB_ENDPOINT_XFER_ISOC && outq_ep_num) {
+ u8 pending_empty = list_empty(&outq_ep->pending_req_list);
+
+ if ((outq_ep->flags & EP_QUIRK_EXTRA_BUF_DET) ||
+ (outq_ep->flags & EP_QUIRK_EXTRA_BUF_EN) ||
+ !pending_empty) {
+ } else {
+ u32 ep_sts_en_reg;
+ u32 ep_cmd_reg;
+
+ cdns3_select_ep(priv_dev, outq_ep->num |
+ outq_ep->dir);
+ ep_sts_en_reg = readl(&priv_dev->regs->ep_sts_en);
+ ep_cmd_reg = readl(&priv_dev->regs->ep_cmd);
+
+ outq_ep->flags |= EP_TDLCHK_EN;
+ cdns3_set_register_bit(&priv_dev->regs->ep_cfg,
+ EP_CFG_TDL_CHK);
+
+ cdns3_wa2_enable_detection(priv_dev, outq_ep,
+ ep_sts_en_reg);
+ writel(ep_sts_en_reg,
+ &priv_dev->regs->ep_sts_en);
+ /* reset tdl value to zero */
+ cdns3_wa2_reset_tdl(priv_dev);
+ /*
+ * Memory barrier - Reset tdl before ringing the
+ * doorbell.
+ */
+ wmb();
+ if (EP_CMD_DRDY & ep_cmd_reg) {
+ trace_cdns3_wa2(outq_ep, "Enabling WA2 skipping doorbell\n");
+
+ } else {
+ trace_cdns3_wa2(outq_ep, "Enabling WA2 ringing doorbell\n");
+ /*
+ * ring doorbell to generate DESCMIS irq
+ */
+ writel(EP_CMD_DRDY,
+ &priv_dev->regs->ep_cmd);
+ }
+ }
+ }
+ }
+}
+
/**
* cdns3_gadget_giveback - call struct usb_request's ->complete callback
* @priv_ep: The endpoint to whom the request belongs to
@@ -807,14 +971,120 @@ static void cdns3_wa1_tray_restore_cycle_bit(struct cdns3_device *priv_dev,
cdns3_wa1_restore_cycle_bit(priv_ep);
}
+static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep,
+ struct usb_request *request)
+{
+ struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+ struct cdns3_request *priv_req;
+ struct cdns3_trb *trb;
+ dma_addr_t trb_dma;
+ int address;
+ u32 control;
+ u32 length;
+ u32 tdl;
+ unsigned int sg_idx = priv_ep->stream_sg_idx;
+
+ priv_req = to_cdns3_request(request);
+ address = priv_ep->endpoint.desc->bEndpointAddress;
+
+ priv_ep->flags |= EP_PENDING_REQUEST;
+
+ /* must allocate buffer aligned to 8 */
+ if (priv_req->flags & REQUEST_UNALIGNED)
+ trb_dma = priv_req->aligned_buf->dma;
+ else
+ trb_dma = request->dma;
+
+ /* For stream capable endpoints driver use only single TD. */
+ trb = priv_ep->trb_pool + priv_ep->enqueue;
+ priv_req->start_trb = priv_ep->enqueue;
+ priv_req->end_trb = priv_req->start_trb;
+ priv_req->trb = trb;
+
+ cdns3_select_ep(priv_ep->cdns3_dev, address);
+
+ control = TRB_TYPE(TRB_NORMAL) | TRB_CYCLE |
+ TRB_STREAM_ID(priv_req->request.stream_id) | TRB_ISP;
+
+ if (!request->num_sgs) {
+ trb->buffer = TRB_BUFFER(trb_dma);
+ length = request->length;
+ } else {
+ trb->buffer = TRB_BUFFER(request->sg[sg_idx].dma_address);
+ length = request->sg[sg_idx].length;
+ }
+
+ tdl = DIV_ROUND_UP(length, priv_ep->endpoint.maxpacket);
+
+ trb->length = TRB_BURST_LEN(16 /*priv_ep->trb_burst_size*/) |
+ TRB_LEN(length);
+
+ /*
+ * For DEV_VER_V2 controller version we have enabled
+ * USB_CONF2_EN_TDL_TRB in DMULT configuration.
+ * This enables TDL calculation based on TRB, hence setting TDL in TRB.
+ */
+ if (priv_dev->dev_ver >= DEV_VER_V2) {
+ if (priv_dev->gadget.speed == USB_SPEED_SUPER)
+ trb->length |= TRB_TDL_SS_SIZE(tdl);
+ }
+ priv_req->flags |= REQUEST_PENDING;
+
+ trb->control = control;
+
+ trace_cdns3_prepare_trb(priv_ep, priv_req->trb);
+
+ /*
+ * Memory barrier - Cycle Bit must be set before trb->length and
+ * trb->buffer fields.
+ */
+ wmb();
+
+ /* always first element */
+ writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma),
+ &priv_dev->regs->ep_traddr);
+
+ if (!(priv_ep->flags & EP_STALLED)) {
+ trace_cdns3_ring(priv_ep);
+ /*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/
+ writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts);
+
+ priv_ep->prime_flag = false;
+
+ /*
+ * Controller version DEV_VER_V2 tdl calculation
+ * is based on TRB
+ */
+
+ if (priv_dev->dev_ver < DEV_VER_V2)
+ writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL,
+ &priv_dev->regs->ep_cmd);
+ else if (priv_dev->dev_ver > DEV_VER_V2)
+ writel(tdl, &priv_dev->regs->ep_tdl);
+
+ priv_ep->last_stream_id = priv_req->request.stream_id;
+ writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
+ writel(EP_CMD_ERDY_SID(priv_req->request.stream_id) |
+ EP_CMD_ERDY, &priv_dev->regs->ep_cmd);
+
+ trace_cdns3_doorbell_epx(priv_ep->name,
+ readl(&priv_dev->regs->ep_traddr));
+ }
+
+ /* WORKAROUND for transition to L0 */
+ __cdns3_gadget_wakeup(priv_dev);
+
+ return 0;
+}
+
/**
* cdns3_ep_run_transfer - start transfer on no-default endpoint hardware
* @priv_ep: endpoint object
*
* Returns zero on success or negative value on failure
*/
-int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
- struct usb_request *request)
+static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
+ struct usb_request *request)
{
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
struct cdns3_request *priv_req;
@@ -826,6 +1096,7 @@ int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
int address;
u32 control;
int pcs;
+ u16 total_tdl = 0;
if (priv_ep->type == USB_ENDPOINT_XFER_ISOC)
num_trb = priv_ep->interval;
@@ -910,6 +1181,9 @@ int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
if (likely(priv_dev->dev_ver >= DEV_VER_V2))
td_size = DIV_ROUND_UP(length,
priv_ep->endpoint.maxpacket);
+ else if (priv_ep->flags & EP_TDLCHK_EN)
+ total_tdl += DIV_ROUND_UP(length,
+ priv_ep->endpoint.maxpacket);
trb->length = TRB_BURST_LEN(priv_ep->trb_burst_size) |
TRB_LEN(length);
@@ -954,6 +1228,23 @@ int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
if (sg_iter == 1)
trb->control |= TRB_IOC | TRB_ISP;
+ if (priv_dev->dev_ver < DEV_VER_V2 &&
+ (priv_ep->flags & EP_TDLCHK_EN)) {
+ u16 tdl = total_tdl;
+ u16 old_tdl = EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd));
+
+ if (tdl > EP_CMD_TDL_MAX) {
+ tdl = EP_CMD_TDL_MAX;
+ priv_ep->pending_tdl = total_tdl - EP_CMD_TDL_MAX;
+ }
+
+ if (old_tdl < tdl) {
+ tdl -= old_tdl;
+ writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL,
+ &priv_dev->regs->ep_cmd);
+ }
+ }
+
/*
* Memory barrier - cycle bit must be set before other filds in trb.
*/
@@ -1153,29 +1444,56 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
cdns3_move_deq_to_next_trb(priv_req);
}
- /* Re-select endpoint. It could be changed by other CPU during
- * handling usb_gadget_giveback_request.
- */
- cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
+ if (!request->stream_id) {
+ /* Re-select endpoint. It could be changed by other CPU
+ * during handling usb_gadget_giveback_request.
+ */
+ cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
- if (!cdns3_request_handled(priv_ep, priv_req))
- goto prepare_next_td;
+ if (!cdns3_request_handled(priv_ep, priv_req))
+ goto prepare_next_td;
- trb = priv_ep->trb_pool + priv_ep->dequeue;
- trace_cdns3_complete_trb(priv_ep, trb);
+ trb = priv_ep->trb_pool + priv_ep->dequeue;
+ trace_cdns3_complete_trb(priv_ep, trb);
- if (trb != priv_req->trb)
- dev_warn(priv_dev->dev,
- "request_trb=0x%p, queue_trb=0x%p\n",
- priv_req->trb, trb);
+ if (trb != priv_req->trb)
+ dev_warn(priv_dev->dev,
+ "request_trb=0x%p, queue_trb=0x%p\n",
+ priv_req->trb, trb);
- request->actual = TRB_LEN(le32_to_cpu(trb->length));
- cdns3_move_deq_to_next_trb(priv_req);
- cdns3_gadget_giveback(priv_ep, priv_req, 0);
+ request->actual = TRB_LEN(le32_to_cpu(trb->length));
+ cdns3_move_deq_to_next_trb(priv_req);
+ cdns3_gadget_giveback(priv_ep, priv_req, 0);
+
+ if (priv_ep->type != USB_ENDPOINT_XFER_ISOC &&
+ TRBS_PER_SEGMENT == 2)
+ break;
+ } else {
+ /* Re-select endpoint. It could be changed by other CPU
+ * during handling usb_gadget_giveback_request.
+ */
+ cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
- if (priv_ep->type != USB_ENDPOINT_XFER_ISOC &&
- TRBS_PER_SEGMENT == 2)
+ trb = priv_ep->trb_pool;
+ trace_cdns3_complete_trb(priv_ep, trb);
+
+ if (trb != priv_req->trb)
+ dev_warn(priv_dev->dev,
+ "request_trb=0x%p, queue_trb=0x%p\n",
+ priv_req->trb, trb);
+
+ request->actual += TRB_LEN(le32_to_cpu(trb->length));
+
+ if (!request->num_sgs ||
+ (request->num_sgs == (priv_ep->stream_sg_idx + 1))) {
+ priv_ep->stream_sg_idx = 0;
+ cdns3_gadget_giveback(priv_ep, priv_req, 0);
+ } else {
+ priv_ep->stream_sg_idx++;
+ cdns3_ep_run_stream_transfer(priv_ep, request);
+ }
break;
+ }
}
priv_ep->flags &= ~EP_PENDING_REQUEST;
@@ -1205,6 +1523,21 @@ void cdns3_rearm_transfer(struct cdns3_endpoint *priv_ep, u8 rearm)
}
}
+static void cdns3_reprogram_tdl(struct cdns3_endpoint *priv_ep)
+{
+ u16 tdl = priv_ep->pending_tdl;
+ struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+
+ if (tdl > EP_CMD_TDL_MAX) {
+ tdl = EP_CMD_TDL_MAX;
+ priv_ep->pending_tdl -= EP_CMD_TDL_MAX;
+ } else {
+ priv_ep->pending_tdl = 0;
+ }
+
+ writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL, &priv_dev->regs->ep_cmd);
+}
+
/**
* cdns3_check_ep_interrupt_proceed - Processes interrupt related to endpoint
* @priv_ep: endpoint object
@@ -1215,6 +1548,9 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
{
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
u32 ep_sts_reg;
+ struct usb_request *deferred_request;
+ struct usb_request *pending_request;
+ u32 tdl = 0;
cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
@@ -1223,6 +1559,36 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
ep_sts_reg = readl(&priv_dev->regs->ep_sts);
writel(ep_sts_reg, &priv_dev->regs->ep_sts);
+ if ((ep_sts_reg & EP_STS_PRIME) && priv_ep->use_streams) {
+ bool dbusy = !!(ep_sts_reg & EP_STS_DBUSY);
+
+ tdl = cdns3_get_tdl(priv_dev);
+
+ /*
+ * Continue the previous transfer:
+ * There is some racing between ERDY and PRIME. The device send
+ * ERDY and almost in the same time Host send PRIME. It cause
+ * that host ignore the ERDY packet and driver has to send it
+ * again.
+ */
+ if (tdl && (dbusy | !EP_STS_BUFFEMPTY(ep_sts_reg) |
+ EP_STS_HOSTPP(ep_sts_reg))) {
+ writel(EP_CMD_ERDY |
+ EP_CMD_ERDY_SID(priv_ep->last_stream_id),
+ &priv_dev->regs->ep_cmd);
+ ep_sts_reg &= ~(EP_STS_MD_EXIT | EP_STS_IOC);
+ } else {
+ priv_ep->prime_flag = true;
+
+ pending_request = cdns3_next_request(&priv_ep->pending_req_list);
+ deferred_request = cdns3_next_request(&priv_ep->deferred_req_list);
+
+ if (deferred_request && !pending_request) {
+ cdns3_start_all_request(priv_dev, priv_ep);
+ }
+ }
+ }
+
if (ep_sts_reg & EP_STS_TRBERR) {
if (priv_ep->flags & EP_STALL_PENDING &&
!(ep_sts_reg & EP_STS_DESCMIS &&
@@ -1259,7 +1625,8 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
}
}
- if ((ep_sts_reg & EP_STS_IOC) || (ep_sts_reg & EP_STS_ISP)) {
+ if ((ep_sts_reg & EP_STS_IOC) || (ep_sts_reg & EP_STS_ISP) ||
+ (ep_sts_reg & EP_STS_IOT)) {
if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) {
if (ep_sts_reg & EP_STS_ISP)
priv_ep->flags |= EP_QUIRK_END_TRANSFER;
@@ -1267,6 +1634,29 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
priv_ep->flags &= ~EP_QUIRK_END_TRANSFER;
}
+ if (!priv_ep->use_streams) {
+ if ((ep_sts_reg & EP_STS_IOC) ||
+ (ep_sts_reg & EP_STS_ISP)) {
+ cdns3_transfer_completed(priv_dev, priv_ep);
+ } else if ((priv_ep->flags & EP_TDLCHK_EN) &
+ priv_ep->pending_tdl) {
+ /* handle IOT with pending tdl */
+ cdns3_reprogram_tdl(priv_ep);
+ }
+ } else if (priv_ep->dir == USB_DIR_OUT) {
+ priv_ep->ep_sts_pending |= ep_sts_reg;
+ } else if (ep_sts_reg & EP_STS_IOT) {
+ cdns3_transfer_completed(priv_dev, priv_ep);
+ }
+ }
+
+ /*
+ * MD_EXIT interrupt sets when stream capable endpoint exits
+ * from MOVE DATA state of Bulk IN/OUT stream protocol state machine
+ */
+ if (priv_ep->dir == USB_DIR_OUT && (ep_sts_reg & EP_STS_MD_EXIT) &&
+ (priv_ep->ep_sts_pending & EP_STS_IOT) && priv_ep->use_streams) {
+ priv_ep->ep_sts_pending = 0;
cdns3_transfer_completed(priv_dev, priv_ep);
}
@@ -1274,7 +1664,7 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
* WA2: this condition should only be meet when
* priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET or
* priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN.
- * In other cases this interrupt will be disabled/
+ * In other cases this interrupt will be disabled.
*/
if (ep_sts_reg & EP_STS_DESCMIS && priv_dev->dev_ver < DEV_VER_V2 &&
!(priv_ep->flags & EP_STALLED))
@@ -1375,13 +1765,10 @@ static void cdns3_check_usb_interrupt_proceed(struct cdns3_device *priv_dev,
*/
static irqreturn_t cdns3_device_irq_handler(int irq, void *data)
{
- struct cdns3_device *priv_dev;
- struct cdns3 *cdns = data;
+ struct cdns3_device *priv_dev = data;
irqreturn_t ret = IRQ_NONE;
u32 reg;
- priv_dev = cdns->gadget_dev;
-
/* check USB device interrupt */
reg = readl(&priv_dev->regs->usb_ists);
if (reg) {
@@ -1419,14 +1806,12 @@ static irqreturn_t cdns3_device_irq_handler(int irq, void *data)
*/
static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data)
{
- struct cdns3_device *priv_dev;
- struct cdns3 *cdns = data;
+ struct cdns3_device *priv_dev = data;
irqreturn_t ret = IRQ_NONE;
unsigned long flags;
int bit;
u32 reg;
- priv_dev = cdns->gadget_dev;
spin_lock_irqsave(&priv_dev->lock, flags);
reg = readl(&priv_dev->regs->usb_ists);
@@ -1462,6 +1847,9 @@ static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data)
ret = IRQ_HANDLED;
}
+ if (priv_dev->dev_ver < DEV_VER_V2 && priv_dev->using_streams)
+ cdns3_wa2_check_outq_status(priv_dev);
+
irqend:
writel(~0, &priv_dev->regs->ep_ien);
spin_unlock_irqrestore(&priv_dev->lock, flags);
@@ -1516,6 +1904,27 @@ static int cdns3_ep_onchip_buffer_reserve(struct cdns3_device *priv_dev,
return 0;
}
+void cdns3_stream_ep_reconfig(struct cdns3_device *priv_dev,
+ struct cdns3_endpoint *priv_ep)
+{
+ if (!priv_ep->use_streams || priv_dev->gadget.speed < USB_SPEED_SUPER)
+ return;
+
+ if (priv_dev->dev_ver >= DEV_VER_V3) {
+ u32 mask = BIT(priv_ep->num + (priv_ep->dir ? 16 : 0));
+
+ /*
+ * Stream capable endpoints are handled by using ep_tdl
+ * register. Other endpoints use TDL from TRB feature.
+ */
+ cdns3_clear_register_bit(&priv_dev->regs->tdl_from_trb, mask);
+ }
+
+ /* Enable Stream Bit TDL chk and SID chk */
+ cdns3_set_register_bit(&priv_dev->regs->ep_cfg, EP_CFG_STREAM_EN |
+ EP_CFG_TDL_CHK | EP_CFG_SID_CHK);
+}
+
void cdns3_configure_dmult(struct cdns3_device *priv_dev,
struct cdns3_endpoint *priv_ep)
{
@@ -1777,6 +2186,7 @@ static int cdns3_gadget_ep_enable(struct usb_ep *ep,
{
struct cdns3_endpoint *priv_ep;
struct cdns3_device *priv_dev;
+ const struct usb_ss_ep_comp_descriptor *comp_desc;
u32 reg = EP_STS_EN_TRBERREN;
u32 bEndpointAddress;
unsigned long flags;
@@ -1786,6 +2196,7 @@ static int cdns3_gadget_ep_enable(struct usb_ep *ep,
priv_ep = ep_to_cdns3_ep(ep);
priv_dev = priv_ep->cdns3_dev;
+ comp_desc = priv_ep->endpoint.comp_desc;
if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
dev_dbg(priv_dev->dev, "usbss: invalid parameters\n");
@@ -1816,6 +2227,24 @@ static int cdns3_gadget_ep_enable(struct usb_ep *ep,
goto exit;
}
+ bEndpointAddress = priv_ep->num | priv_ep->dir;
+ cdns3_select_ep(priv_dev, bEndpointAddress);
+
+ if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
+ /*
+ * Enable stream support (SS mode) related interrupts
+ * in EP_STS_EN Register
+ */
+ if (priv_dev->gadget.speed >= USB_SPEED_SUPER) {
+ reg |= EP_STS_EN_IOTEN | EP_STS_EN_PRIMEEEN |
+ EP_STS_EN_SIDERREN | EP_STS_EN_MD_EXITEN |
+ EP_STS_EN_STREAMREN;
+ priv_ep->use_streams = true;
+ cdns3_stream_ep_reconfig(priv_dev, priv_ep);
+ priv_dev->using_streams |= true;
+ }
+ }
+
ret = cdns3_allocate_trb_pool(priv_ep);
if (ret)
@@ -1962,6 +2391,7 @@ static int cdns3_gadget_ep_disable(struct usb_ep *ep)
ep->desc = NULL;
priv_ep->flags &= ~EP_ENABLED;
+ priv_ep->use_streams = false;
spin_unlock_irqrestore(&priv_dev->lock, flags);
@@ -2010,13 +2440,21 @@ static int __cdns3_gadget_ep_queue(struct usb_ep *ep,
list_add_tail(&request->list, &priv_ep->deferred_req_list);
/*
+ * For stream capable endpoint if prime irq flag is set then only start
+ * request.
* If hardware endpoint configuration has not been set yet then
* just queue request in deferred list. Transfer will be started in
* cdns3_set_hw_configuration.
*/
- if (priv_dev->hw_configured_flag && !(priv_ep->flags & EP_STALLED) &&
- !(priv_ep->flags & EP_STALL_PENDING))
- cdns3_start_all_request(priv_dev, priv_ep);
+ if (!request->stream_id) {
+ if (priv_dev->hw_configured_flag &&
+ !(priv_ep->flags & EP_STALLED) &&
+ !(priv_ep->flags & EP_STALL_PENDING))
+ cdns3_start_all_request(priv_dev, priv_ep);
+ } else {
+ if (priv_dev->hw_configured_flag && priv_ep->prime_flag)
+ cdns3_start_all_request(priv_dev, priv_ep);
+ }
return 0;
}
@@ -2389,7 +2827,6 @@ static int cdns3_gadget_udc_stop(struct usb_gadget *gadget)
struct cdns3_endpoint *priv_ep;
u32 bEndpointAddress;
struct usb_ep *ep;
- int ret = 0;
int val;
priv_dev->gadget_driver = NULL;
@@ -2413,7 +2850,7 @@ static int cdns3_gadget_udc_stop(struct usb_gadget *gadget)
writel(0, &priv_dev->regs->usb_ien);
writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf);
- return ret;
+ return 0;
}
static const struct usb_gadget_ops cdns3_gadget_ops = {
@@ -2539,7 +2976,7 @@ void cdns3_gadget_exit(struct cdns3 *cdns)
priv_dev = cdns->gadget_dev;
- devm_free_irq(cdns->dev, cdns->dev_irq, cdns);
+ devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev);
pm_runtime_mark_last_busy(cdns->dev);
pm_runtime_put_autosuspend(cdns->dev);
@@ -2710,7 +3147,8 @@ static int __cdns3_gadget_init(struct cdns3 *cdns)
ret = devm_request_threaded_irq(cdns->dev, cdns->dev_irq,
cdns3_device_irq_handler,
cdns3_device_thread_irq_handler,
- IRQF_SHARED, dev_name(cdns->dev), cdns);
+ IRQF_SHARED, dev_name(cdns->dev),
+ cdns->gadget_dev);
if (ret)
goto err0;
diff --git a/drivers/usb/cdns3/gadget.h b/drivers/usb/cdns3/gadget.h
index bc4024041ef2..f003a7801872 100644
--- a/drivers/usb/cdns3/gadget.h
+++ b/drivers/usb/cdns3/gadget.h
@@ -599,6 +599,7 @@ struct cdns3_usb_regs {
#define EP_CMD_TDL_MASK GENMASK(15, 9)
#define EP_CMD_TDL_SET(p) (((p) << 9) & EP_CMD_TDL_MASK)
#define EP_CMD_TDL_GET(p) (((p) & EP_CMD_TDL_MASK) >> 9)
+#define EP_CMD_TDL_MAX (EP_CMD_TDL_MASK >> 9)
/* ERDY Stream ID value (used in SS mode). */
#define EP_CMD_ERDY_SID_MASK GENMASK(31, 16)
@@ -969,10 +970,18 @@ struct cdns3_usb_regs {
#define ISO_MAX_INTERVAL 10
+#define MAX_TRB_LENGTH BIT(16)
+
#if TRBS_PER_SEGMENT < 2
#error "Incorrect TRBS_PER_SEGMENT. Minimal Transfer Ring size is 2."
#endif
+#define TRBS_PER_STREAM_SEGMENT 2
+
+#if TRBS_PER_STREAM_SEGMENT < 2
+#error "Incorrect TRBS_PER_STREAMS_SEGMENT. Minimal Transfer Ring size is 2."
+#endif
+
/*
*Only for ISOC endpoints - maximum number of TRBs is calculated as
* pow(2, bInterval-1) * number of usb requests. It is limitation made by
@@ -1000,6 +1009,7 @@ struct cdns3_trb {
#define TRB_SIZE (sizeof(struct cdns3_trb))
#define TRB_RING_SIZE (TRB_SIZE * TRBS_PER_SEGMENT)
+#define TRB_STREAM_RING_SIZE (TRB_SIZE * TRBS_PER_STREAM_SEGMENT)
#define TRB_ISO_RING_SIZE (TRB_SIZE * TRBS_PER_ISOC_SEGMENT)
#define TRB_CTRL_RING_SIZE (TRB_SIZE * 2)
@@ -1078,7 +1088,7 @@ struct cdns3_trb {
#define CDNS3_ENDPOINTS_MAX_COUNT 32
#define CDNS3_EP_ZLP_BUF_SIZE 1024
-#define CDNS3_EP_BUF_SIZE 2 /* KB */
+#define CDNS3_EP_BUF_SIZE 4 /* KB */
#define CDNS3_EP_ISO_HS_MULT 3
#define CDNS3_EP_ISO_SS_BURST 3
#define CDNS3_MAX_NUM_DESCMISS_BUF 32
@@ -1109,6 +1119,7 @@ struct cdns3_device;
* @interval: interval between packets used for ISOC endpoint.
* @free_trbs: number of free TRBs in transfer ring
* @num_trbs: number of all TRBs in transfer ring
+ * @alloc_ring_size: size of the allocated TRB ring
* @pcs: producer cycle state
* @ccs: consumer cycle state
* @enqueue: enqueue index in transfer ring
@@ -1142,6 +1153,7 @@ struct cdns3_endpoint {
#define EP_QUIRK_END_TRANSFER BIT(11)
#define EP_QUIRK_EXTRA_BUF_DET BIT(12)
#define EP_QUIRK_EXTRA_BUF_EN BIT(13)
+#define EP_TDLCHK_EN BIT(15)
u32 flags;
struct cdns3_request *descmis_req;
@@ -1153,6 +1165,7 @@ struct cdns3_endpoint {
int free_trbs;
int num_trbs;
+ int alloc_ring_size;
u8 pcs;
u8 ccs;
int enqueue;
@@ -1163,6 +1176,14 @@ struct cdns3_endpoint {
struct cdns3_trb *wa1_trb;
unsigned int wa1_trb_index;
unsigned int wa1_cycle_bit:1;
+
+ /* Stream related */
+ unsigned int use_streams:1;
+ unsigned int prime_flag:1;
+ u32 ep_sts_pending;
+ u16 last_stream_id;
+ u16 pending_tdl;
+ unsigned int stream_sg_idx;
};
/**
@@ -1290,6 +1311,7 @@ struct cdns3_device {
int hw_configured_flag:1;
int wake_up_flag:1;
unsigned status_completion_no_call:1;
+ unsigned using_streams:1;
int out_mem_is_allocated;
struct work_struct pending_status_wq;
@@ -1310,8 +1332,6 @@ void cdns3_set_hw_configuration(struct cdns3_device *priv_dev);
void cdns3_select_ep(struct cdns3_device *priv_dev, u32 ep);
void cdns3_allow_enable_l1(struct cdns3_device *priv_dev, int enable);
struct usb_request *cdns3_next_request(struct list_head *list);
-int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
- struct usb_request *request);
void cdns3_rearm_transfer(struct cdns3_endpoint *priv_ep, u8 rearm);
int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep);
u8 cdns3_ep_addr_to_index(u8 ep_addr);
diff --git a/drivers/usb/cdns3/trace.h b/drivers/usb/cdns3/trace.h
index e92348c9b4d7..8d121e207fd8 100644
--- a/drivers/usb/cdns3/trace.h
+++ b/drivers/usb/cdns3/trace.h
@@ -122,18 +122,24 @@ DECLARE_EVENT_CLASS(cdns3_log_epx_irq,
__string(ep_name, priv_ep->name)
__field(u32, ep_sts)
__field(u32, ep_traddr)
+ __field(u32, ep_last_sid)
+ __field(u32, use_streams)
__dynamic_array(char, str, CDNS3_MSG_MAX)
),
TP_fast_assign(
__assign_str(ep_name, priv_ep->name);
__entry->ep_sts = readl(&priv_dev->regs->ep_sts);
__entry->ep_traddr = readl(&priv_dev->regs->ep_traddr);
+ __entry->ep_last_sid = priv_ep->last_stream_id;
+ __entry->use_streams = priv_ep->use_streams;
),
- TP_printk("%s, ep_traddr: %08x",
+ TP_printk("%s, ep_traddr: %08x ep_last_sid: %08x use_streams: %d",
cdns3_decode_epx_irq(__get_str(str),
__get_str(ep_name),
__entry->ep_sts),
- __entry->ep_traddr)
+ __entry->ep_traddr,
+ __entry->ep_last_sid,
+ __entry->use_streams)
);
DEFINE_EVENT(cdns3_log_epx_irq, cdns3_epx_irq,
@@ -210,6 +216,7 @@ DECLARE_EVENT_CLASS(cdns3_log_request,
__field(int, end_trb)
__field(struct cdns3_trb *, start_trb_addr)
__field(int, flags)
+ __field(unsigned int, stream_id)
),
TP_fast_assign(
__assign_str(name, req->priv_ep->name);
@@ -225,9 +232,10 @@ DECLARE_EVENT_CLASS(cdns3_log_request,
__entry->end_trb = req->end_trb;
__entry->start_trb_addr = req->trb;
__entry->flags = req->flags;
+ __entry->stream_id = req->request.stream_id;
),
TP_printk("%s: req: %p, req buff %p, length: %u/%u %s%s%s, status: %d,"
- " trb: [start:%d, end:%d: virt addr %pa], flags:%x ",
+ " trb: [start:%d, end:%d: virt addr %pa], flags:%x SID: %u",
__get_str(name), __entry->req, __entry->buf, __entry->actual,
__entry->length,
__entry->zero ? "Z" : "z",
@@ -237,7 +245,8 @@ DECLARE_EVENT_CLASS(cdns3_log_request,
__entry->start_trb,
__entry->end_trb,
__entry->start_trb_addr,
- __entry->flags
+ __entry->flags,
+ __entry->stream_id
)
);
@@ -281,6 +290,39 @@ TRACE_EVENT(cdns3_ep0_queue,
__entry->length)
);
+DECLARE_EVENT_CLASS(cdns3_stream_split_transfer_len,
+ TP_PROTO(struct cdns3_request *req),
+ TP_ARGS(req),
+ TP_STRUCT__entry(
+ __string(name, req->priv_ep->name)
+ __field(struct cdns3_request *, req)
+ __field(unsigned int, length)
+ __field(unsigned int, actual)
+ __field(unsigned int, stream_id)
+ ),
+ TP_fast_assign(
+ __assign_str(name, req->priv_ep->name);
+ __entry->req = req;
+ __entry->actual = req->request.length;
+ __entry->length = req->request.actual;
+ __entry->stream_id = req->request.stream_id;
+ ),
+ TP_printk("%s: req: %p,request length: %u actual length: %u SID: %u",
+ __get_str(name), __entry->req, __entry->length,
+ __entry->actual, __entry->stream_id)
+);
+
+DEFINE_EVENT(cdns3_stream_split_transfer_len, cdns3_stream_transfer_split,
+ TP_PROTO(struct cdns3_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(cdns3_stream_split_transfer_len,
+ cdns3_stream_transfer_split_next_part,
+ TP_PROTO(struct cdns3_request *req),
+ TP_ARGS(req)
+);
+
DECLARE_EVENT_CLASS(cdns3_log_aligned_request,
TP_PROTO(struct cdns3_request *priv_req),
TP_ARGS(priv_req),
@@ -319,6 +361,34 @@ DEFINE_EVENT(cdns3_log_aligned_request, cdns3_prepare_aligned_request,
TP_ARGS(req)
);
+DECLARE_EVENT_CLASS(cdns3_log_map_request,
+ TP_PROTO(struct cdns3_request *priv_req),
+ TP_ARGS(priv_req),
+ TP_STRUCT__entry(
+ __string(name, priv_req->priv_ep->name)
+ __field(struct usb_request *, req)
+ __field(void *, buf)
+ __field(dma_addr_t, dma)
+ ),
+ TP_fast_assign(
+ __assign_str(name, priv_req->priv_ep->name);
+ __entry->req = &priv_req->request;
+ __entry->buf = priv_req->request.buf;
+ __entry->dma = priv_req->request.dma;
+ ),
+ TP_printk("%s: req: %p, req buf %p, dma %p",
+ __get_str(name), __entry->req, __entry->buf, &__entry->dma
+ )
+);
+DEFINE_EVENT(cdns3_log_map_request, cdns3_map_request,
+ TP_PROTO(struct cdns3_request *req),
+ TP_ARGS(req)
+);
+DEFINE_EVENT(cdns3_log_map_request, cdns3_mapped_request,
+ TP_PROTO(struct cdns3_request *req),
+ TP_ARGS(req)
+);
+
DECLARE_EVENT_CLASS(cdns3_log_trb,
TP_PROTO(struct cdns3_endpoint *priv_ep, struct cdns3_trb *trb),
TP_ARGS(priv_ep, trb),
@@ -329,6 +399,7 @@ DECLARE_EVENT_CLASS(cdns3_log_trb,
__field(u32, length)
__field(u32, control)
__field(u32, type)
+ __field(unsigned int, last_stream_id)
),
TP_fast_assign(
__assign_str(name, priv_ep->name);
@@ -337,8 +408,9 @@ DECLARE_EVENT_CLASS(cdns3_log_trb,
__entry->length = trb->length;
__entry->control = trb->control;
__entry->type = usb_endpoint_type(priv_ep->endpoint.desc);
+ __entry->last_stream_id = priv_ep->last_stream_id;
),
- TP_printk("%s: trb %p, dma buf: 0x%08x, size: %ld, burst: %d ctrl: 0x%08x (%s%s%s%s%s%s%s)",
+ TP_printk("%s: trb %p, dma buf: 0x%08x, size: %ld, burst: %d ctrl: 0x%08x (%s%s%s%s%s%s%s) SID:%lu LAST_SID:%u",
__get_str(name), __entry->trb, __entry->buffer,
TRB_LEN(__entry->length),
(u8)TRB_BURST_LEN_GET(__entry->length),
@@ -349,7 +421,9 @@ DECLARE_EVENT_CLASS(cdns3_log_trb,
__entry->control & TRB_FIFO_MODE ? "FIFO, " : "",
__entry->control & TRB_CHAIN ? "CHAIN, " : "",
__entry->control & TRB_IOC ? "IOC, " : "",
- TRB_FIELD_TO_TYPE(__entry->control) == TRB_NORMAL ? "Normal" : "LINK"
+ TRB_FIELD_TO_TYPE(__entry->control) == TRB_NORMAL ? "Normal" : "LINK",
+ TRB_FIELD_TO_STREAMID(__entry->control),
+ __entry->last_stream_id
)
);
@@ -398,6 +472,7 @@ DECLARE_EVENT_CLASS(cdns3_log_ep,
__field(unsigned int, maxpacket)
__field(unsigned int, maxpacket_limit)
__field(unsigned int, max_streams)
+ __field(unsigned int, use_streams)
__field(unsigned int, maxburst)
__field(unsigned int, flags)
__field(unsigned int, dir)
@@ -409,16 +484,18 @@ DECLARE_EVENT_CLASS(cdns3_log_ep,
__entry->maxpacket = priv_ep->endpoint.maxpacket;
__entry->maxpacket_limit = priv_ep->endpoint.maxpacket_limit;
__entry->max_streams = priv_ep->endpoint.max_streams;
+ __entry->use_streams = priv_ep->use_streams;
__entry->maxburst = priv_ep->endpoint.maxburst;
__entry->flags = priv_ep->flags;
__entry->dir = priv_ep->dir;
__entry->enqueue = priv_ep->enqueue;
__entry->dequeue = priv_ep->dequeue;
),
- TP_printk("%s: mps: %d/%d. streams: %d, burst: %d, enq idx: %d, "
- "deq idx: %d, flags %s%s%s%s%s%s%s%s, dir: %s",
+ TP_printk("%s: mps: %d/%d. streams: %d, stream enable: %d, burst: %d, "
+ "enq idx: %d, deq idx: %d, flags %s%s%s%s%s%s%s%s, dir: %s",
__get_str(name), __entry->maxpacket,
__entry->maxpacket_limit, __entry->max_streams,
+ __entry->use_streams,
__entry->maxburst, __entry->enqueue,
__entry->dequeue,
__entry->flags & EP_ENABLED ? "EN | " : "",
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index ae850b3fddf2..d53db520e209 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -7,6 +7,7 @@ config USB_CHIPIDEA
select RESET_CONTROLLER
select USB_ULPI_BUS
select USB_ROLE_SWITCH
+ select USB_TEGRA_PHY if ARCH_TEGRA
help
Say Y here if your system has a dual role high speed USB
controller based on ChipIdea silicon IP. It supports:
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index 6911aef500e9..d49d5e1235d0 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -302,6 +302,16 @@ static inline enum usb_role ci_role_to_usb_role(struct ci_hdrc *ci)
return USB_ROLE_NONE;
}
+static inline enum ci_role usb_role_to_ci_role(enum usb_role role)
+{
+ if (role == USB_ROLE_HOST)
+ return CI_ROLE_HOST;
+ else if (role == USB_ROLE_DEVICE)
+ return CI_ROLE_GADGET;
+ else
+ return CI_ROLE_END;
+}
+
/**
* hw_read_id_reg: reads from a identification register
* @ci: the controller
diff --git a/drivers/usb/chipidea/ci_hdrc_tegra.c b/drivers/usb/chipidea/ci_hdrc_tegra.c
index 0c9911d44ee5..7455df0ede49 100644
--- a/drivers/usb/chipidea/ci_hdrc_tegra.c
+++ b/drivers/usb/chipidea/ci_hdrc_tegra.c
@@ -83,13 +83,6 @@ static int tegra_udc_probe(struct platform_device *pdev)
return err;
}
- /*
- * Tegra's USB PHY driver doesn't implement optional phy_init()
- * hook, so we have to power on UDC controller before ChipIdea
- * driver initialization kicks in.
- */
- usb_phy_set_suspend(udc->phy, 0);
-
/* setup and register ChipIdea HDRC device */
udc->data.name = "tegra-udc";
udc->data.flags = soc->flags;
@@ -109,7 +102,6 @@ static int tegra_udc_probe(struct platform_device *pdev)
return 0;
fail_power_off:
- usb_phy_set_suspend(udc->phy, 1);
clk_disable_unprepare(udc->clk);
return err;
}
@@ -119,7 +111,6 @@ static int tegra_udc_remove(struct platform_device *pdev)
struct tegra_udc *udc = platform_get_drvdata(pdev);
ci_hdrc_remove_device(udc->dev);
- usb_phy_set_suspend(udc->phy, 1);
clk_disable_unprepare(udc->clk);
return 0;
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index dce5db41501c..52139c2a9924 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -618,9 +618,11 @@ static int ci_usb_role_switch_set(struct device *dev, enum usb_role role)
struct ci_hdrc *ci = dev_get_drvdata(dev);
struct ci_hdrc_cable *cable = NULL;
enum usb_role current_role = ci_role_to_usb_role(ci);
+ enum ci_role ci_role = usb_role_to_ci_role(role);
unsigned long flags;
- if (current_role == role)
+ if ((ci_role != CI_ROLE_END && !ci->roles[ci_role]) ||
+ (current_role == role))
return 0;
pm_runtime_get_sync(ci->dev);
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index b45ceb91c735..48e4a5ca1835 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -26,6 +26,7 @@ static int (*orig_bus_suspend)(struct usb_hcd *hcd);
struct ehci_ci_priv {
struct regulator *reg_vbus;
+ bool enabled;
};
static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable)
@@ -37,7 +38,7 @@ static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable)
int ret = 0;
int port = HCS_N_PORTS(ehci->hcs_params);
- if (priv->reg_vbus) {
+ if (priv->reg_vbus && enable != priv->enabled) {
if (port > 1) {
dev_warn(dev,
"Not support multi-port regulator control\n");
@@ -53,6 +54,7 @@ static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable)
enable ? "enable" : "disable", ret);
return ret;
}
+ priv->enabled = enable;
}
if (enable && (ci->platdata->phy_mode == USBPHY_INTERFACE_MODE_HSIC)) {
diff --git a/drivers/usb/chipidea/host.h b/drivers/usb/chipidea/host.h
index 70112cf0f195..2625aa01a911 100644
--- a/drivers/usb/chipidea/host.h
+++ b/drivers/usb/chipidea/host.h
@@ -20,7 +20,7 @@ static inline void ci_hdrc_host_destroy(struct ci_hdrc *ci)
}
-static void ci_hdrc_host_driver_init(void)
+static inline void ci_hdrc_host_driver_init(void)
{
}
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 5f40117e68e7..26bc05e48d8a 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -203,9 +203,58 @@ static const unsigned short super_speed_maxpacket_maxes[4] = {
[USB_ENDPOINT_XFER_INT] = 1024,
};
-static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
- int asnum, struct usb_host_interface *ifp, int num_ep,
- unsigned char *buffer, int size)
+static bool endpoint_is_duplicate(struct usb_endpoint_descriptor *e1,
+ struct usb_endpoint_descriptor *e2)
+{
+ if (e1->bEndpointAddress == e2->bEndpointAddress)
+ return true;
+
+ if (usb_endpoint_xfer_control(e1) || usb_endpoint_xfer_control(e2)) {
+ if (usb_endpoint_num(e1) == usb_endpoint_num(e2))
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Check for duplicate endpoint addresses in other interfaces and in the
+ * altsetting currently being parsed.
+ */
+static bool config_endpoint_is_duplicate(struct usb_host_config *config,
+ int inum, int asnum, struct usb_endpoint_descriptor *d)
+{
+ struct usb_endpoint_descriptor *epd;
+ struct usb_interface_cache *intfc;
+ struct usb_host_interface *alt;
+ int i, j, k;
+
+ for (i = 0; i < config->desc.bNumInterfaces; ++i) {
+ intfc = config->intf_cache[i];
+
+ for (j = 0; j < intfc->num_altsetting; ++j) {
+ alt = &intfc->altsetting[j];
+
+ if (alt->desc.bInterfaceNumber == inum &&
+ alt->desc.bAlternateSetting != asnum)
+ continue;
+
+ for (k = 0; k < alt->desc.bNumEndpoints; ++k) {
+ epd = &alt->endpoint[k].desc;
+
+ if (endpoint_is_duplicate(epd, d))
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ struct usb_host_config *config, int inum, int asnum,
+ struct usb_host_interface *ifp, int num_ep,
+ unsigned char *buffer, int size)
{
unsigned char *buffer0 = buffer;
struct usb_endpoint_descriptor *d;
@@ -242,13 +291,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
goto skip_to_next_endpoint_or_interface_descriptor;
/* Check for duplicate endpoint addresses */
- for (i = 0; i < ifp->desc.bNumEndpoints; ++i) {
- if (ifp->endpoint[i].desc.bEndpointAddress ==
- d->bEndpointAddress) {
- dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
- cfgno, inum, asnum, d->bEndpointAddress);
- goto skip_to_next_endpoint_or_interface_descriptor;
- }
+ if (config_endpoint_is_duplicate(config, inum, asnum, d)) {
+ dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
+ cfgno, inum, asnum, d->bEndpointAddress);
+ goto skip_to_next_endpoint_or_interface_descriptor;
}
endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
@@ -346,12 +392,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
endpoint->desc.wMaxPacketSize = cpu_to_le16(8);
}
- /* Validate the wMaxPacketSize field */
+ /*
+ * Validate the wMaxPacketSize field.
+ * Some devices have isochronous endpoints in altsetting 0;
+ * the USB-2 spec requires such endpoints to have wMaxPacketSize = 0
+ * (see the end of section 5.6.3), so don't warn about them.
+ */
maxp = usb_endpoint_maxp(&endpoint->desc);
- if (maxp == 0) {
- dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has wMaxPacketSize 0, skipping\n",
+ if (maxp == 0 && !(usb_endpoint_xfer_isoc(d) && asnum == 0)) {
+ dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n",
cfgno, inum, asnum, d->bEndpointAddress);
- goto skip_to_next_endpoint_or_interface_descriptor;
}
/* Find the highest legal maxpacket size for this endpoint */
@@ -522,8 +572,8 @@ static int usb_parse_interface(struct device *ddev, int cfgno,
if (((struct usb_descriptor_header *) buffer)->bDescriptorType
== USB_DT_INTERFACE)
break;
- retval = usb_parse_endpoint(ddev, cfgno, inum, asnum, alt,
- num_ep, buffer, size);
+ retval = usb_parse_endpoint(ddev, cfgno, config, inum, asnum,
+ alt, num_ep, buffer, size);
if (retval < 0)
return retval;
++n;
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 12bb5722b420..6833c918abce 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -574,7 +574,7 @@ __acquires(ps->lock)
/* Now carefully unlink all the marked pending URBs */
rescan:
- list_for_each_entry(as, &ps->async_pending, asynclist) {
+ list_for_each_entry_reverse(as, &ps->async_pending, asynclist) {
if (as->bulk_status == AS_UNLINK) {
as->bulk_status = 0; /* Only once */
urb = as->urb;
@@ -636,7 +636,7 @@ static void destroy_async(struct usb_dev_state *ps, struct list_head *list)
spin_lock_irqsave(&ps->lock, flags);
while (!list_empty(list)) {
- as = list_entry(list->next, struct async, asynclist);
+ as = list_last_entry(list, struct async, asynclist);
list_del_init(&as->asynclist);
urb = as->urb;
usb_get_urb(urb);
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 9ae2a7a93df2..f0a259937da8 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -222,7 +222,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
retval = -EBUSY;
goto put_hcd;
}
- hcd->regs = devm_ioremap_nocache(&dev->dev, hcd->rsrc_start,
+ hcd->regs = devm_ioremap(&dev->dev, hcd->rsrc_start,
hcd->rsrc_len);
if (hcd->regs == NULL) {
dev_dbg(&dev->dev, "error mapping memory\n");
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index f229ad6952c0..3405b146edc9 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1192,6 +1192,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
* PORT_OVER_CURRENT is not. So check for any of them.
*/
if (udev || (portstatus & USB_PORT_STAT_CONNECTION) ||
+ (portchange & USB_PORT_STAT_C_CONNECTION) ||
(portstatus & USB_PORT_STAT_OVERCURRENT) ||
(portchange & USB_PORT_STAT_C_OVERCURRENT))
set_bit(port1, hub->change_bits);
@@ -2692,7 +2693,7 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
#define SET_ADDRESS_TRIES 2
#define GET_DESCRIPTOR_TRIES 2
#define SET_CONFIG_TRIES (2 * (use_both_schemes + 1))
-#define USE_NEW_SCHEME(i, scheme) ((i) / 2 == (int)scheme)
+#define USE_NEW_SCHEME(i, scheme) ((i) / 2 == (int)(scheme))
#define HUB_ROOT_RESET_TIME 60 /* times are in msec */
#define HUB_SHORT_RESET_TIME 10
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index 6af6add3d4c0..876ff31261d5 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -288,14 +288,9 @@ static void dwc2_handle_conn_id_status_change_intr(struct dwc2_hsotg *hsotg)
/*
* Need to schedule a work, as there are possible DELAY function calls.
- * Release lock before scheduling workq as it holds spinlock during
- * scheduling.
*/
- if (hsotg->wq_otg) {
- spin_unlock(&hsotg->lock);
+ if (hsotg->wq_otg)
queue_work(hsotg->wq_otg, &hsotg->wf_otg);
- spin_lock(&hsotg->lock);
- }
}
/**
diff --git a/drivers/usb/dwc2/debugfs.c b/drivers/usb/dwc2/debugfs.c
index b8f2790abf91..3a0dcbfbc827 100644
--- a/drivers/usb/dwc2/debugfs.c
+++ b/drivers/usb/dwc2/debugfs.c
@@ -183,6 +183,7 @@ DEFINE_SHOW_ATTRIBUTE(state);
static int fifo_show(struct seq_file *seq, void *v)
{
struct dwc2_hsotg *hsotg = seq->private;
+ int fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
u32 val;
int idx;
@@ -196,7 +197,7 @@ static int fifo_show(struct seq_file *seq, void *v)
seq_puts(seq, "\nPeriodic TXFIFOs:\n");
- for (idx = 1; idx < hsotg->num_of_eps; idx++) {
+ for (idx = 1; idx <= fifo_count; idx++) {
val = dwc2_readl(hsotg, DPTXFSIZN(idx));
seq_printf(seq, "\tDPTXFIFO%2d: Size %d, Start 0x%08x\n", idx,
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 6be10e496e10..88f7d6d4ff2d 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -3784,15 +3784,26 @@ irq_retry:
for (idx = 1; idx < hsotg->num_of_eps; idx++) {
hs_ep = hsotg->eps_out[idx];
/* Proceed only unmasked ISOC EPs */
- if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
+ if (BIT(idx) & ~daintmsk)
continue;
epctrl = dwc2_readl(hsotg, DOEPCTL(idx));
- if (epctrl & DXEPCTL_EPENA) {
+ //ISOC Ep's only
+ if ((epctrl & DXEPCTL_EPENA) && hs_ep->isochronous) {
epctrl |= DXEPCTL_SNAK;
epctrl |= DXEPCTL_EPDIS;
dwc2_writel(hsotg, epctrl, DOEPCTL(idx));
+ continue;
+ }
+
+ //Non-ISOC EP's
+ if (hs_ep->halted) {
+ if (!(epctrl & DXEPCTL_EPENA))
+ epctrl |= DXEPCTL_EPENA;
+ epctrl |= DXEPCTL_EPDIS;
+ epctrl |= DXEPCTL_STALL;
+ dwc2_writel(hsotg, epctrl, DOEPCTL(idx));
}
}
@@ -4056,11 +4067,12 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
* a unique tx-fifo even if it is non-periodic.
*/
if (dir_in && hsotg->dedicated_fifos) {
+ unsigned fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
u32 fifo_index = 0;
u32 fifo_size = UINT_MAX;
size = hs_ep->ep.maxpacket * hs_ep->mc;
- for (i = 1; i < hsotg->num_of_eps; ++i) {
+ for (i = 1; i <= fifo_count; ++i) {
if (hsotg->fifo_map & (1 << i))
continue;
val = dwc2_readl(hsotg, DPTXFSIZN(i));
@@ -4310,19 +4322,20 @@ static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now)
epctl = dwc2_readl(hs, epreg);
if (value) {
- epctl |= DXEPCTL_STALL;
+ if (!(dwc2_readl(hs, GINTSTS) & GINTSTS_GOUTNAKEFF))
+ dwc2_set_bit(hs, DCTL, DCTL_SGOUTNAK);
+ // STALL bit will be set in GOUTNAKEFF interrupt handler
} else {
epctl &= ~DXEPCTL_STALL;
xfertype = epctl & DXEPCTL_EPTYPE_MASK;
if (xfertype == DXEPCTL_EPTYPE_BULK ||
xfertype == DXEPCTL_EPTYPE_INTERRUPT)
epctl |= DXEPCTL_SETD0PID;
+ dwc2_writel(hs, epctl, epreg);
}
- dwc2_writel(hs, epctl, epreg);
}
hs_ep->halted = value;
-
return 0;
}
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 81afe553aa66..b90f858af960 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -2824,7 +2824,7 @@ static int dwc2_queue_transaction(struct dwc2_hsotg *hsotg,
list_move_tail(&chan->split_order_list_entry,
&hsotg->split_order);
- if (hsotg->params.host_dma) {
+ if (hsotg->params.host_dma && chan->qh) {
if (hsotg->params.dma_desc_enable) {
if (!chan->xfer_started ||
chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index f561c6c9e8a9..1d85c42b9c67 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1246,6 +1246,9 @@ static void dwc3_core_exit_mode(struct dwc3 *dwc)
/* do nothing */
break;
}
+
+ /* de-assert DRVVBUS for HOST and OTG mode */
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
}
static void dwc3_get_properties(struct dwc3 *dwc)
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 1c8b349379af..77c4a9abe365 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -688,7 +688,9 @@ struct dwc3_ep {
#define DWC3_EP_STALL BIT(1)
#define DWC3_EP_WEDGE BIT(2)
#define DWC3_EP_TRANSFER_STARTED BIT(3)
+#define DWC3_EP_END_TRANSFER_PENDING BIT(4)
#define DWC3_EP_PENDING_REQUEST BIT(5)
+#define DWC3_EP_DELAY_START BIT(6)
/* This last one is specific to EP0 */
#define DWC3_EP0_DIR_IN BIT(31)
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index c1e9ea621f41..90bb022737da 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/**
- * dwc3-exynos.c - Samsung EXYNOS DWC3 Specific Glue layer
+ * dwc3-exynos.c - Samsung Exynos DWC3 Specific Glue layer
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com
@@ -255,4 +255,4 @@ module_platform_driver(dwc3_exynos_driver);
MODULE_AUTHOR("Anton Tikhomirov <av.tikhomirov@samsung.com>");
MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("DesignWare USB3 EXYNOS Glue Layer");
+MODULE_DESCRIPTION("DesignWare USB3 Exynos Glue Layer");
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 294276f7deb9..7051611229c9 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -34,6 +34,7 @@
#define PCI_DEVICE_ID_INTEL_GLK 0x31aa
#define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee
#define PCI_DEVICE_ID_INTEL_CNPH 0xa36e
+#define PCI_DEVICE_ID_INTEL_CNPV 0xa3b0
#define PCI_DEVICE_ID_INTEL_ICLLP 0x34ee
#define PCI_DEVICE_ID_INTEL_EHLLP 0x4b7e
#define PCI_DEVICE_ID_INTEL_TGPLP 0xa0ee
@@ -342,6 +343,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CNPH),
(kernel_ulong_t) &dwc3_pci_intel_properties, },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CNPV),
+ (kernel_ulong_t) &dwc3_pci_intel_properties, },
+
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICLLP),
(kernel_ulong_t) &dwc3_pci_intel_properties, },
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index fd1b100d2927..6dee4dabc0a4 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -1136,8 +1136,10 @@ void dwc3_ep0_interrupt(struct dwc3 *dwc,
case DWC3_DEPEVT_EPCMDCMPLT:
cmd = DEPEVT_PARAMETER_CMD(event->parameters);
- if (cmd == DWC3_DEPCMD_ENDTRANSFER)
+ if (cmd == DWC3_DEPCMD_ENDTRANSFER) {
+ dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
+ }
break;
}
}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 0c960a97ea02..1b8014ab0b25 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -57,7 +57,7 @@ int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
return -EINVAL;
}
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ dwc3_gadget_dctl_write_safe(dwc, reg);
return 0;
}
@@ -111,6 +111,9 @@ int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
+ /* set no action before sending new link state change */
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
/* set requested state */
reg |= DWC3_DCTL_ULSTCHNGREQ(state);
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
@@ -1447,6 +1450,12 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
list_add_tail(&req->list, &dep->pending_list);
req->status = DWC3_REQUEST_STATUS_QUEUED;
+ /* Start the transfer only after the END_TRANSFER is completed */
+ if (dep->flags & DWC3_EP_END_TRANSFER_PENDING) {
+ dep->flags |= DWC3_EP_DELAY_START;
+ return 0;
+ }
+
/*
* NOTICE: Isochronous endpoints should NEVER be prestarted. We must
* wait for a XferNotReady event so we will know what's the current
@@ -1828,7 +1837,7 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
dwc->pullups_connected = false;
}
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ dwc3_gadget_dctl_write_safe(dwc, reg);
do {
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
@@ -2467,6 +2476,13 @@ static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep,
static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req)
{
+ /*
+ * For OUT direction, host may send less than the setup
+ * length. Return true for all OUT requests.
+ */
+ if (!req->direction)
+ return true;
+
return req->request.actual == req->request.length;
}
@@ -2618,8 +2634,14 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
cmd = DEPEVT_PARAMETER_CMD(event->parameters);
if (cmd == DWC3_DEPCMD_ENDTRANSFER) {
+ dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
dwc3_gadget_ep_cleanup_cancelled_requests(dep);
+ if ((dep->flags & DWC3_EP_DELAY_START) &&
+ !usb_endpoint_xfer_isoc(dep->endpoint.desc))
+ __dwc3_gadget_kick_transfer(dep);
+
+ dep->flags &= ~DWC3_EP_DELAY_START;
}
break;
case DWC3_DEPEVT_STREAMEVT:
@@ -2671,12 +2693,12 @@ static void dwc3_reset_gadget(struct dwc3 *dwc)
static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
bool interrupt)
{
- struct dwc3 *dwc = dep->dwc;
struct dwc3_gadget_ep_cmd_params params;
u32 cmd;
int ret;
- if (!(dep->flags & DWC3_EP_TRANSFER_STARTED))
+ if (!(dep->flags & DWC3_EP_TRANSFER_STARTED) ||
+ (dep->flags & DWC3_EP_END_TRANSFER_PENDING))
return;
/*
@@ -2686,16 +2708,13 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
* much trouble synchronizing between us and gadget driver.
*
* We have discussed this with the IP Provider and it was
- * suggested to giveback all requests here, but give HW some
- * extra time to synchronize with the interconnect. We're using
- * an arbitrary 100us delay for that.
+ * suggested to giveback all requests here.
*
* Note also that a similar handling was tested by Synopsys
* (thanks a lot Paul) and nothing bad has come out of it.
- * In short, what we're doing is:
- *
- * - Issue EndTransfer WITH CMDIOC bit set
- * - Wait 100us
+ * In short, what we're doing is issuing EndTransfer with
+ * CMDIOC bit set and delay kicking transfer until the
+ * EndTransfer command had completed.
*
* As of IP version 3.10a of the DWC_usb3 IP, the controller
* supports a mode to work around the above limitation. The
@@ -2704,8 +2723,7 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
* by writing GUCTL2[14]. This polling is already done in the
* dwc3_send_gadget_ep_cmd() function so if the mode is
* enabled, the EndTransfer command will have completed upon
- * returning from this function and we don't need to delay for
- * 100us.
+ * returning from this function.
*
* This mode is NOT available on the DWC_usb31 IP.
*/
@@ -2721,9 +2739,8 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
if (!interrupt)
dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
-
- if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A)
- udelay(100);
+ else
+ dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
}
static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
@@ -2752,12 +2769,12 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
{
int reg;
+ dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RX_DET);
+
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~DWC3_DCTL_INITU1ENA;
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
-
reg &= ~DWC3_DCTL_INITU2ENA;
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ dwc3_gadget_dctl_write_safe(dwc, reg);
dwc3_disconnect_gadget(dwc);
@@ -2809,7 +2826,7 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~DWC3_DCTL_TSTCTRL_MASK;
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ dwc3_gadget_dctl_write_safe(dwc, reg);
dwc->test_mode = false;
dwc3_clear_stall_all_ep(dwc);
@@ -2913,11 +2930,11 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
reg |= DWC3_DCTL_NYET_THRES(dwc->lpm_nyet_threshold);
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ dwc3_gadget_dctl_write_safe(dwc, reg);
} else {
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ dwc3_gadget_dctl_write_safe(dwc, reg);
}
dep = dwc->eps[0];
@@ -3026,7 +3043,7 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
reg &= ~u1u2;
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ dwc3_gadget_dctl_write_safe(dwc, reg);
break;
default:
/* do nothing */
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index 5faf4d1249e0..fbc7d8013f0b 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -127,4 +127,18 @@ static inline void dwc3_gadget_ep_get_transfer_index(struct dwc3_ep *dep)
dep->resource_index = DWC3_DEPCMD_GET_RSC_IDX(res_id);
}
+/**
+ * dwc3_gadget_dctl_write_safe - write to DCTL safe from link state change
+ * @dwc: pointer to our context structure
+ * @value: value to write to DCTL
+ *
+ * Use this function when doing read-modify-write to DCTL. It will not
+ * send link state change request.
+ */
+static inline void dwc3_gadget_dctl_write_safe(struct dwc3 *dwc, u32 value)
+{
+ value &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
+ dwc3_writel(dwc->regs, DWC3_DCTL, value);
+}
+
#endif /* __DRIVERS_USB_DWC3_GADGET_H */
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index 5567ed2cddbe..fa252870c926 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -88,10 +88,10 @@ int dwc3_host_init(struct dwc3 *dwc)
memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props));
if (dwc->usb3_lpm_capable)
- props[prop_idx++].name = "usb3-lpm-capable";
+ props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb3-lpm-capable");
if (dwc->usb2_lpm_disable)
- props[prop_idx++].name = "usb2-lpm-disable";
+ props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb2-lpm-disable");
/**
* WORKAROUND: dwc3 revisions <=3.00a have a limitation
@@ -103,7 +103,7 @@ int dwc3_host_init(struct dwc3 *dwc)
* This following flag tells XHCI to do just that.
*/
if (dwc->revision <= DWC3_REVISION_300A)
- props[prop_idx++].name = "quirk-broken-port-ped";
+ props[prop_idx++] = PROPERTY_ENTRY_BOOL("quirk-broken-port-ped");
if (prop_idx) {
ret = platform_device_add_properties(xhci, props);
diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c
index cac991173ac0..971c6b92484a 100644
--- a/drivers/usb/early/xhci-dbc.c
+++ b/drivers/usb/early/xhci-dbc.c
@@ -971,7 +971,7 @@ static int __init xdbc_init(void)
goto free_and_quit;
}
- base = ioremap_nocache(xdbc.xhci_start, xdbc.xhci_length);
+ base = ioremap(xdbc.xhci_start, xdbc.xhci_length);
if (!base) {
xdbc_trace("failed to remap the io address\n");
ret = -ENOMEM;
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 02ff850278b1..c6db0a0a340c 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -483,34 +483,6 @@ config USB_CONFIGFS_F_TCM
Both protocols can work on USB2.0 and USB3.0.
UAS utilizes the USB 3.0 feature called streams support.
-choice
- tristate "USB Gadget precomposed configurations"
- default USB_ETH
- optional
- help
- A Linux "Gadget Driver" talks to the USB Peripheral Controller
- driver through the abstract "gadget" API. Some other operating
- systems call these "client" drivers, of which "class drivers"
- are a subset (implementing a USB device class specification).
- A gadget driver implements one or more USB functions using
- the peripheral hardware.
-
- Gadget drivers are hardware-neutral, or "platform independent",
- except that they sometimes must understand quirks or limitations
- of the particular controllers they work with. For example, when
- a controller doesn't support alternate configurations or provide
- enough of the right types of endpoints, the gadget driver might
- not be able work with that controller, or might need to implement
- a less common variant of a device class protocol.
-
- The available choices each represent a single precomposed USB
- gadget configuration. In the device model, each option contains
- both the device instantiation as a child for a USB gadget
- controller, and the relevant drivers for each function declared
- by the device.
-
source "drivers/usb/gadget/legacy/Kconfig"
-endchoice
-
endif # USB_GADGET
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index ab9ac48a751a..32b637e3e1fa 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -293,6 +293,47 @@ err:
return ret;
}
+static ssize_t gadget_dev_desc_max_speed_show(struct config_item *item,
+ char *page)
+{
+ enum usb_device_speed speed = to_gadget_info(item)->composite.max_speed;
+
+ return sprintf(page, "%s\n", usb_speed_string(speed));
+}
+
+static ssize_t gadget_dev_desc_max_speed_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct gadget_info *gi = to_gadget_info(item);
+
+ mutex_lock(&gi->lock);
+
+ /* Prevent changing of max_speed after the driver is binded */
+ if (gi->composite.gadget_driver.udc_name)
+ goto err;
+
+ if (strncmp(page, "super-speed-plus", 16) == 0)
+ gi->composite.max_speed = USB_SPEED_SUPER_PLUS;
+ else if (strncmp(page, "super-speed", 11) == 0)
+ gi->composite.max_speed = USB_SPEED_SUPER;
+ else if (strncmp(page, "high-speed", 10) == 0)
+ gi->composite.max_speed = USB_SPEED_HIGH;
+ else if (strncmp(page, "full-speed", 10) == 0)
+ gi->composite.max_speed = USB_SPEED_FULL;
+ else if (strncmp(page, "low-speed", 9) == 0)
+ gi->composite.max_speed = USB_SPEED_LOW;
+ else
+ goto err;
+
+ gi->composite.gadget_driver.max_speed = gi->composite.max_speed;
+
+ mutex_unlock(&gi->lock);
+ return len;
+err:
+ mutex_unlock(&gi->lock);
+ return -EINVAL;
+}
+
CONFIGFS_ATTR(gadget_dev_desc_, bDeviceClass);
CONFIGFS_ATTR(gadget_dev_desc_, bDeviceSubClass);
CONFIGFS_ATTR(gadget_dev_desc_, bDeviceProtocol);
@@ -302,6 +343,7 @@ CONFIGFS_ATTR(gadget_dev_desc_, idProduct);
CONFIGFS_ATTR(gadget_dev_desc_, bcdDevice);
CONFIGFS_ATTR(gadget_dev_desc_, bcdUSB);
CONFIGFS_ATTR(gadget_dev_desc_, UDC);
+CONFIGFS_ATTR(gadget_dev_desc_, max_speed);
static struct configfs_attribute *gadget_root_attrs[] = {
&gadget_dev_desc_attr_bDeviceClass,
@@ -313,6 +355,7 @@ static struct configfs_attribute *gadget_root_attrs[] = {
&gadget_dev_desc_attr_bcdDevice,
&gadget_dev_desc_attr_bcdUSB,
&gadget_dev_desc_attr_UDC,
+ &gadget_dev_desc_attr_max_speed,
NULL,
};
diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c
index 460d5d7c984f..7f5cf488b2b1 100644
--- a/drivers/usb/gadget/function/f_ecm.c
+++ b/drivers/usb/gadget/function/f_ecm.c
@@ -52,6 +52,7 @@ struct f_ecm {
struct usb_ep *notify;
struct usb_request *notify_req;
u8 notify_state;
+ atomic_t notify_count;
bool is_open;
/* FIXME is_open needs some irq-ish locking
@@ -380,7 +381,7 @@ static void ecm_do_notify(struct f_ecm *ecm)
int status;
/* notification already in flight? */
- if (!req)
+ if (atomic_read(&ecm->notify_count))
return;
event = req->buf;
@@ -420,10 +421,10 @@ static void ecm_do_notify(struct f_ecm *ecm)
event->bmRequestType = 0xA1;
event->wIndex = cpu_to_le16(ecm->ctrl_id);
- ecm->notify_req = NULL;
+ atomic_inc(&ecm->notify_count);
status = usb_ep_queue(ecm->notify, req, GFP_ATOMIC);
if (status < 0) {
- ecm->notify_req = req;
+ atomic_dec(&ecm->notify_count);
DBG(cdev, "notify --> %d\n", status);
}
}
@@ -448,17 +449,19 @@ static void ecm_notify_complete(struct usb_ep *ep, struct usb_request *req)
switch (req->status) {
case 0:
/* no fault */
+ atomic_dec(&ecm->notify_count);
break;
case -ECONNRESET:
case -ESHUTDOWN:
+ atomic_set(&ecm->notify_count, 0);
ecm->notify_state = ECM_NOTIFY_NONE;
break;
default:
DBG(cdev, "event %02x --> %d\n",
event->bNotificationType, req->status);
+ atomic_dec(&ecm->notify_count);
break;
}
- ecm->notify_req = req;
ecm_do_notify(ecm);
}
@@ -907,6 +910,11 @@ static void ecm_unbind(struct usb_configuration *c, struct usb_function *f)
usb_free_all_descriptors(f);
+ if (atomic_read(&ecm->notify_count)) {
+ usb_ep_dequeue(ecm->notify, ecm->notify_req);
+ atomic_set(&ecm->notify_count, 0);
+ }
+
kfree(ecm->notify_req->buf);
usb_ep_free_request(ecm->notify, ecm->notify_req);
}
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 0bbccac94d6c..6171d28331e6 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1062,6 +1062,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
req->num_sgs = io_data->sgt.nents;
} else {
req->buf = data;
+ req->num_sgs = 0;
}
req->length = data_len;
@@ -1105,6 +1106,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
req->num_sgs = io_data->sgt.nents;
} else {
req->buf = data;
+ req->num_sgs = 0;
}
req->length = data_len;
@@ -1486,7 +1488,7 @@ enum {
Opt_gid,
};
-static const struct fs_parameter_spec ffs_fs_param_specs[] = {
+static const struct fs_parameter_spec ffs_fs_fs_parameters[] = {
fsparam_bool ("no_disconnect", Opt_no_disconnect),
fsparam_u32 ("rmode", Opt_rmode),
fsparam_u32 ("fmode", Opt_fmode),
@@ -1496,11 +1498,6 @@ static const struct fs_parameter_spec ffs_fs_param_specs[] = {
{}
};
-static const struct fs_parameter_description ffs_fs_fs_parameters = {
- .name = "kAFS",
- .specs = ffs_fs_param_specs,
-};
-
static int ffs_fs_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
struct ffs_sb_fill_data *data = fc->fs_private;
@@ -1509,7 +1506,7 @@ static int ffs_fs_parse_param(struct fs_context *fc, struct fs_parameter *param)
ENTER();
- opt = fs_parse(fc, &ffs_fs_fs_parameters, param, &result);
+ opt = fs_parse(fc, ffs_fs_fs_parameters, param, &result);
if (opt < 0)
return opt;
@@ -1641,7 +1638,7 @@ static struct file_system_type ffs_fs_type = {
.owner = THIS_MODULE,
.name = "functionfs",
.init_fs_context = ffs_fs_init_fs_context,
- .parameters = &ffs_fs_fs_parameters,
+ .parameters = ffs_fs_fs_parameters,
.kill_sb = ffs_fs_kill_sb,
};
MODULE_ALIAS_FS("functionfs");
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 2d6e76e4cffa..1d900081b1f0 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -53,6 +53,7 @@ struct f_ncm {
struct usb_ep *notify;
struct usb_request *notify_req;
u8 notify_state;
+ atomic_t notify_count;
bool is_open;
const struct ndp_parser_opts *parser_opts;
@@ -547,7 +548,7 @@ static void ncm_do_notify(struct f_ncm *ncm)
int status;
/* notification already in flight? */
- if (!req)
+ if (atomic_read(&ncm->notify_count))
return;
event = req->buf;
@@ -587,7 +588,8 @@ static void ncm_do_notify(struct f_ncm *ncm)
event->bmRequestType = 0xA1;
event->wIndex = cpu_to_le16(ncm->ctrl_id);
- ncm->notify_req = NULL;
+ atomic_inc(&ncm->notify_count);
+
/*
* In double buffering if there is a space in FIFO,
* completion callback can be called right after the call,
@@ -597,7 +599,7 @@ static void ncm_do_notify(struct f_ncm *ncm)
status = usb_ep_queue(ncm->notify, req, GFP_ATOMIC);
spin_lock(&ncm->lock);
if (status < 0) {
- ncm->notify_req = req;
+ atomic_dec(&ncm->notify_count);
DBG(cdev, "notify --> %d\n", status);
}
}
@@ -632,17 +634,19 @@ static void ncm_notify_complete(struct usb_ep *ep, struct usb_request *req)
case 0:
VDBG(cdev, "Notification %02x sent\n",
event->bNotificationType);
+ atomic_dec(&ncm->notify_count);
break;
case -ECONNRESET:
case -ESHUTDOWN:
+ atomic_set(&ncm->notify_count, 0);
ncm->notify_state = NCM_NOTIFY_NONE;
break;
default:
DBG(cdev, "event %02x --> %d\n",
event->bNotificationType, req->status);
+ atomic_dec(&ncm->notify_count);
break;
}
- ncm->notify_req = req;
ncm_do_notify(ncm);
spin_unlock(&ncm->lock);
}
@@ -1649,6 +1653,11 @@ static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
ncm_string_defs[0].id = 0;
usb_free_all_descriptors(f);
+ if (atomic_read(&ncm->notify_count)) {
+ usb_ep_dequeue(ncm->notify, ncm->notify_req);
+ atomic_set(&ncm->notify_count, 0);
+ }
+
kfree(ncm->notify_req->buf);
usb_ep_free_request(ncm->notify, ncm->notify_req);
}
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
index 04c142c13075..64de9f1b874c 100644
--- a/drivers/usb/gadget/function/rndis.c
+++ b/drivers/usb/gadget/function/rndis.c
@@ -72,7 +72,7 @@ static rndis_resp_t *rndis_add_response(struct rndis_params *params,
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
-static const struct file_operations rndis_proc_fops;
+static const struct proc_ops rndis_proc_ops;
#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
@@ -902,7 +902,7 @@ struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v)
sprintf(name, NAME_TEMPLATE, i);
proc_entry = proc_create_data(name, 0660, NULL,
- &rndis_proc_fops, params);
+ &rndis_proc_ops, params);
if (!proc_entry) {
kfree(params);
rndis_put_nr(i);
@@ -1164,13 +1164,12 @@ static int rndis_proc_open(struct inode *inode, struct file *file)
return single_open(file, rndis_proc_show, PDE_DATA(inode));
}
-static const struct file_operations rndis_proc_fops = {
- .owner = THIS_MODULE,
- .open = rndis_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = rndis_proc_write,
+static const struct proc_ops rndis_proc_ops = {
+ .proc_open = rndis_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = rndis_proc_write,
};
#define NAME_TEMPLATE "driver/rndis-%03d"
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
index 7ec6a996af26..6d956f190f5a 100644
--- a/drivers/usb/gadget/function/u_audio.c
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -239,18 +239,6 @@ static snd_pcm_uframes_t uac_pcm_pointer(struct snd_pcm_substream *substream)
return bytes_to_frames(substream->runtime, prm->hw_ptr);
}
-static int uac_pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params)
-{
- return snd_pcm_lib_malloc_pages(substream,
- params_buffer_bytes(hw_params));
-}
-
-static int uac_pcm_hw_free(struct snd_pcm_substream *substream)
-{
- return snd_pcm_lib_free_pages(substream);
-}
-
static int uac_pcm_open(struct snd_pcm_substream *substream)
{
struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
@@ -326,9 +314,6 @@ static int uac_pcm_null(struct snd_pcm_substream *substream)
static const struct snd_pcm_ops uac_pcm_ops = {
.open = uac_pcm_open,
.close = uac_pcm_null,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = uac_pcm_hw_params,
- .hw_free = uac_pcm_hw_free,
.trigger = uac_pcm_trigger,
.pointer = uac_pcm_pointer,
.prepare = uac_pcm_null,
@@ -422,7 +407,7 @@ int u_audio_start_playback(struct g_audio *audio_dev)
struct usb_ep *ep;
struct uac_rtd_params *prm;
struct uac_params *params = &audio_dev->params;
- unsigned int factor, rate;
+ unsigned int factor;
const struct usb_endpoint_descriptor *ep_desc;
int req_len, i;
@@ -441,13 +426,15 @@ int u_audio_start_playback(struct g_audio *audio_dev)
/* pre-compute some values for iso_complete() */
uac->p_framesize = params->p_ssize *
num_channels(params->p_chmask);
- rate = params->p_srate * uac->p_framesize;
uac->p_interval = factor / (1 << (ep_desc->bInterval - 1));
- uac->p_pktsize = min_t(unsigned int, rate / uac->p_interval,
+ uac->p_pktsize = min_t(unsigned int,
+ uac->p_framesize *
+ (params->p_srate / uac->p_interval),
prm->max_psize);
if (uac->p_pktsize < prm->max_psize)
- uac->p_pktsize_residue = rate % uac->p_interval;
+ uac->p_pktsize_residue = uac->p_framesize *
+ (params->p_srate % uac->p_interval);
else
uac->p_pktsize_residue = 0;
@@ -584,8 +571,8 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
strlcpy(card->shortname, card_name, sizeof(card->shortname));
sprintf(card->longname, "%s %i", card_name, card->dev->id);
- snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- NULL, 0, BUFF_SIZE_MAX);
+ snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
+ NULL, 0, BUFF_SIZE_MAX);
err = snd_card_register(card);
diff --git a/drivers/usb/gadget/legacy/Kconfig b/drivers/usb/gadget/legacy/Kconfig
index 119a4e47681f..6e7e1a9202e6 100644
--- a/drivers/usb/gadget/legacy/Kconfig
+++ b/drivers/usb/gadget/legacy/Kconfig
@@ -14,6 +14,32 @@
# both kinds of controller can also support "USB On-the-Go" (CONFIG_USB_OTG).
#
+choice
+ tristate "USB Gadget precomposed configurations"
+ default USB_ETH
+ optional
+ help
+ A Linux "Gadget Driver" talks to the USB Peripheral Controller
+ driver through the abstract "gadget" API. Some other operating
+ systems call these "client" drivers, of which "class drivers"
+ are a subset (implementing a USB device class specification).
+ A gadget driver implements one or more USB functions using
+ the peripheral hardware.
+
+ Gadget drivers are hardware-neutral, or "platform independent",
+ except that they sometimes must understand quirks or limitations
+ of the particular controllers they work with. For example, when
+ a controller doesn't support alternate configurations or provide
+ enough of the right types of endpoints, the gadget driver might
+ not be able work with that controller, or might need to implement
+ a less common variant of a device class protocol.
+
+ The available choices each represent a single precomposed USB
+ gadget configuration. In the device model, each option contains
+ both the device instantiation as a child for a USB gadget
+ controller, and the relevant drivers for each function declared
+ by the device.
+
config USB_ZERO
tristate "Gadget Zero (DEVELOPMENT)"
select USB_LIBCOMPOSITE
@@ -489,3 +515,5 @@ config USB_G_WEBCAM
Say "y" to link the driver statically, or "m" to build a
dynamically linked module called "g_webcam".
+
+endchoice
diff --git a/drivers/usb/gadget/legacy/cdc2.c b/drivers/usb/gadget/legacy/cdc2.c
index da1c37933ca1..8d7a556ece30 100644
--- a/drivers/usb/gadget/legacy/cdc2.c
+++ b/drivers/usb/gadget/legacy/cdc2.c
@@ -225,7 +225,7 @@ static struct usb_composite_driver cdc_driver = {
.name = "g_cdc",
.dev = &device_desc,
.strings = dev_strings,
- .max_speed = USB_SPEED_HIGH,
+ .max_speed = USB_SPEED_SUPER,
.bind = cdc_bind,
.unbind = cdc_unbind,
};
diff --git a/drivers/usb/gadget/legacy/g_ffs.c b/drivers/usb/gadget/legacy/g_ffs.c
index b640ed3fcf70..ae6d8f7092b8 100644
--- a/drivers/usb/gadget/legacy/g_ffs.c
+++ b/drivers/usb/gadget/legacy/g_ffs.c
@@ -149,7 +149,7 @@ static struct usb_composite_driver gfs_driver = {
.name = DRIVER_NAME,
.dev = &gfs_dev_desc,
.strings = gfs_dev_strings,
- .max_speed = USB_SPEED_HIGH,
+ .max_speed = USB_SPEED_SUPER,
.bind = gfs_bind,
.unbind = gfs_unbind,
};
diff --git a/drivers/usb/gadget/legacy/multi.c b/drivers/usb/gadget/legacy/multi.c
index 50515f9e1022..ec9749845660 100644
--- a/drivers/usb/gadget/legacy/multi.c
+++ b/drivers/usb/gadget/legacy/multi.c
@@ -482,7 +482,7 @@ static struct usb_composite_driver multi_driver = {
.name = "g_multi",
.dev = &device_desc,
.strings = dev_strings,
- .max_speed = USB_SPEED_HIGH,
+ .max_speed = USB_SPEED_SUPER,
.bind = multi_bind,
.unbind = multi_unbind,
.needs_serial = 1,
diff --git a/drivers/usb/gadget/legacy/ncm.c b/drivers/usb/gadget/legacy/ncm.c
index 8465f081e921..c61e71ba7045 100644
--- a/drivers/usb/gadget/legacy/ncm.c
+++ b/drivers/usb/gadget/legacy/ncm.c
@@ -197,7 +197,7 @@ static struct usb_composite_driver ncm_driver = {
.name = "g_ncm",
.dev = &device_desc,
.strings = dev_strings,
- .max_speed = USB_SPEED_HIGH,
+ .max_speed = USB_SPEED_SUPER,
.bind = gncm_bind,
.unbind = gncm_unbind,
};
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index ae70ce29d5e4..797d6ace8994 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -445,6 +445,7 @@ config USB_TEGRA_XUDC
tristate "NVIDIA Tegra Superspeed USB 3.0 Device Controller"
depends on ARCH_TEGRA || COMPILE_TEST
depends on PHY_TEGRA_XUSB
+ select USB_ROLE_SWITCH
help
Enables NVIDIA Tegra USB 3.0 device mode controller driver.
diff --git a/drivers/usb/gadget/udc/amd5536udc_pci.c b/drivers/usb/gadget/udc/amd5536udc_pci.c
index 57b6f66331cf..bfd1c9e80a1f 100644
--- a/drivers/usb/gadget/udc/amd5536udc_pci.c
+++ b/drivers/usb/gadget/udc/amd5536udc_pci.c
@@ -116,7 +116,7 @@ static int udc_pci_probe(
goto err_memreg;
}
- dev->virt_addr = ioremap_nocache(resource, len);
+ dev->virt_addr = ioremap(resource, len);
if (!dev->virt_addr) {
dev_dbg(&pdev->dev, "start address cannot be mapped\n");
retval = -EFAULT;
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 8a42768e3213..6e0432141c40 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -1122,7 +1122,7 @@ static struct usb_endpoint_descriptor usba_ep0_desc = {
.bInterval = 1,
};
-static struct usb_gadget usba_gadget_template = {
+static const struct usb_gadget usba_gadget_template = {
.ops = &usba_udc_ops,
.max_speed = USB_SPEED_HIGH,
.name = "atmel_usba_udc",
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 51fa614b4079..9b11046480fe 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1414,6 +1414,8 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver)
}
mutex_unlock(&udc_lock);
+ if (ret)
+ pr_warn("udc-core: couldn't find an available UDC or it's busy\n");
return ret;
found:
ret = udc_bind_to_driver(udc, driver);
diff --git a/drivers/usb/gadget/udc/goku_udc.c b/drivers/usb/gadget/udc/goku_udc.c
index c3721225b61e..4a46f661d0e4 100644
--- a/drivers/usb/gadget/udc/goku_udc.c
+++ b/drivers/usb/gadget/udc/goku_udc.c
@@ -1782,7 +1782,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
dev->got_region = 1;
- base = ioremap_nocache(resource, len);
+ base = ioremap(resource, len);
if (base == NULL) {
DBG(dev, "can't map memory\n");
retval = -EFAULT;
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index 64d80c65bb96..aaf975c809bf 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -2175,8 +2175,6 @@ static int gr_probe(struct platform_device *pdev)
return -ENOMEM;
}
- spin_lock(&dev->lock);
-
/* Inside lock so that no gadget can use this udc until probe is done */
retval = usb_add_gadget_udc(dev->dev, &dev->gadget);
if (retval) {
@@ -2185,15 +2183,21 @@ static int gr_probe(struct platform_device *pdev)
}
dev->added = 1;
+ spin_lock(&dev->lock);
+
retval = gr_udc_init(dev);
- if (retval)
+ if (retval) {
+ spin_unlock(&dev->lock);
goto out;
-
- gr_dfs_create(dev);
+ }
/* Clear all interrupt enables that might be left on since last boot */
gr_disable_interrupts_and_pullup(dev);
+ spin_unlock(&dev->lock);
+
+ gr_dfs_create(dev);
+
retval = gr_request_irq(dev, dev->irq);
if (retval) {
dev_err(dev->dev, "Failed to request irq %d\n", dev->irq);
@@ -2222,8 +2226,6 @@ static int gr_probe(struct platform_device *pdev)
dev_info(dev->dev, "regs: %p, irq %d\n", dev->regs, dev->irq);
out:
- spin_unlock(&dev->lock);
-
if (retval)
gr_remove(pdev);
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 247de0faaeb7..a8273b589456 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -2323,7 +2323,7 @@ net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev)
goto err;
}
- mem_mapped_addr[i] = ioremap_nocache(resource, len);
+ mem_mapped_addr[i] = ioremap(resource, len);
if (mem_mapped_addr[i] == NULL) {
release_mem_region(resource, len);
dev_dbg(dev->dev, "can't map memory\n");
@@ -2401,7 +2401,7 @@ net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev)
goto err;
}
- mem_mapped_addr[i] = ioremap_nocache(resource, len);
+ mem_mapped_addr[i] = ioremap(resource, len);
if (mem_mapped_addr[i] == NULL) {
release_mem_region(resource, len);
dev_dbg(dev->dev, "can't map memory\n");
@@ -2625,7 +2625,7 @@ net2272_plat_probe(struct platform_device *pdev)
ret = -EBUSY;
goto err;
}
- dev->base_addr = ioremap_nocache(base, len);
+ dev->base_addr = ioremap(base, len);
if (!dev->base_addr) {
dev_dbg(dev->dev, "can't map memory\n");
ret = -EFAULT;
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 51efee21915f..1fd1b9186e46 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -3659,7 +3659,7 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id)
* 8051 code into the chip, e.g. to turn on PCI PM.
*/
- base = ioremap_nocache(resource, len);
+ base = ioremap(resource, len);
if (base == NULL) {
ep_dbg(dev, "can't map memory\n");
retval = -EFAULT;
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index f36f0730afab..bd12417996db 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -2757,7 +2757,7 @@ static int omap_udc_probe(struct platform_device *pdev)
/* NOTE: "knows" the order of the resources! */
if (!request_mem_region(pdev->resource[0].start,
- pdev->resource[0].end - pdev->resource[0].start + 1,
+ resource_size(&pdev->resource[0]),
driver_name)) {
DBG("request_mem_region failed\n");
return -EBUSY;
@@ -2934,7 +2934,7 @@ cleanup0:
}
release_mem_region(pdev->resource[0].start,
- pdev->resource[0].end - pdev->resource[0].start + 1);
+ resource_size(&pdev->resource[0]));
return status;
}
@@ -2950,7 +2950,7 @@ static int omap_udc_remove(struct platform_device *pdev)
wait_for_completion(&done);
release_mem_region(pdev->resource[0].start,
- pdev->resource[0].end - pdev->resource[0].start + 1);
+ resource_size(&pdev->resource[0]));
return 0;
}
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 8d730180db06..55bdfdf11e4c 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -186,7 +186,7 @@ config USB_EHCI_FSL
config USB_EHCI_MXC
tristate "Support for Freescale i.MX on-chip EHCI USB controller"
- depends on ARCH_MXC
+ depends on ARCH_MXC || COMPILE_TEST
select USB_EHCI_ROOT_HUB_TT
---help---
Variation of ARC USB block used in some Freescale chips.
@@ -210,8 +210,8 @@ config USB_EHCI_HCD_OMAP
config USB_EHCI_HCD_ORION
tristate "Support for Marvell EBU on-chip EHCI USB controller"
- depends on USB_EHCI_HCD && (PLAT_ORION || ARCH_MVEBU)
- default y
+ depends on USB_EHCI_HCD && (PLAT_ORION || ARCH_MVEBU || COMPILE_TEST)
+ default y if (PLAT_ORION || ARCH_MVEBU)
---help---
Enables support for the on-chip EHCI controller on Marvell's
embedded ARM SoCs, including Orion, Kirkwood, Dove, Armada XP,
@@ -221,15 +221,15 @@ config USB_EHCI_HCD_ORION
config USB_EHCI_HCD_SPEAR
tristate "Support for ST SPEAr on-chip EHCI USB controller"
- depends on USB_EHCI_HCD && PLAT_SPEAR
- default y
+ depends on USB_EHCI_HCD && (PLAT_SPEAR || COMPILE_TEST)
+ default y if PLAT_SPEAR
---help---
Enables support for the on-chip EHCI controller on
ST SPEAr chips.
config USB_EHCI_HCD_STI
tristate "Support for ST STiHxxx on-chip EHCI USB controller"
- depends on ARCH_STI && OF
+ depends on (ARCH_STI || COMPILE_TEST) && OF
select GENERIC_PHY
select USB_EHCI_HCD_PLATFORM
help
@@ -238,8 +238,8 @@ config USB_EHCI_HCD_STI
config USB_EHCI_HCD_AT91
tristate "Support for Atmel on-chip EHCI USB controller"
- depends on USB_EHCI_HCD && ARCH_AT91
- default y
+ depends on USB_EHCI_HCD && (ARCH_AT91 || COMPILE_TEST)
+ default y if ARCH_AT91
---help---
Enables support for the on-chip EHCI controller on
Atmel chips.
@@ -263,20 +263,20 @@ config USB_EHCI_HCD_PPC_OF
config USB_EHCI_SH
bool "EHCI support for SuperH USB controller"
- depends on SUPERH
+ depends on SUPERH || COMPILE_TEST
---help---
Enables support for the on-chip EHCI controller on the SuperH.
If you use the PCI EHCI controller, this option is not necessary.
config USB_EHCI_EXYNOS
- tristate "EHCI support for Samsung S5P/EXYNOS SoC Series"
- depends on ARCH_S5PV210 || ARCH_EXYNOS
+ tristate "EHCI support for Samsung S5P/Exynos SoC Series"
+ depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
help
Enable support for the Samsung Exynos SOC's on-chip EHCI controller.
config USB_EHCI_MV
tristate "EHCI support for Marvell PXA/MMP USB controller"
- depends on (ARCH_PXA || ARCH_MMP)
+ depends on ARCH_PXA || ARCH_MMP || COMPILE_TEST
select USB_EHCI_ROOT_HUB_TT
---help---
Enables support for Marvell (including PXA and MMP series) on-chip
@@ -289,7 +289,7 @@ config USB_EHCI_MV
config USB_CNS3XXX_EHCI
bool "Cavium CNS3XXX EHCI Module (DEPRECATED)"
- depends on ARCH_CNS3XXX
+ depends on ARCH_CNS3XXX || COMPILE_TEST
select USB_EHCI_HCD_PLATFORM
---help---
This option is deprecated now and the driver was removed, use
@@ -410,15 +410,15 @@ config USB_OHCI_HCD_OMAP1
config USB_OHCI_HCD_SPEAR
tristate "Support for ST SPEAr on-chip OHCI USB controller"
- depends on USB_OHCI_HCD && PLAT_SPEAR
- default y
+ depends on USB_OHCI_HCD && (PLAT_SPEAR || COMPILE_TEST)
+ default y if PLAT_SPEAR
---help---
Enables support for the on-chip OHCI controller on
ST SPEAr chips.
config USB_OHCI_HCD_STI
tristate "Support for ST STiHxxx on-chip OHCI USB controller"
- depends on ARCH_STI && OF
+ depends on (ARCH_STI || COMPILE_TEST) && OF
select GENERIC_PHY
select USB_OHCI_HCD_PLATFORM
help
@@ -427,8 +427,8 @@ config USB_OHCI_HCD_STI
config USB_OHCI_HCD_S3C2410
tristate "OHCI support for Samsung S3C24xx/S3C64xx SoC series"
- depends on USB_OHCI_HCD && (ARCH_S3C24XX || ARCH_S3C64XX)
- default y
+ depends on USB_OHCI_HCD && (ARCH_S3C24XX || ARCH_S3C64XX || COMPILE_TEST)
+ default y if (ARCH_S3C24XX || ARCH_S3C64XX)
---help---
Enables support for the on-chip OHCI controller on
S3C24xx/S3C64xx chips.
@@ -453,17 +453,17 @@ config USB_OHCI_HCD_PXA27X
config USB_OHCI_HCD_AT91
tristate "Support for Atmel on-chip OHCI USB controller"
- depends on USB_OHCI_HCD && ARCH_AT91 && OF
- default y
+ depends on USB_OHCI_HCD && (ARCH_AT91 || COMPILE_TEST) && OF
+ default y if ARCH_AT91
---help---
Enables support for the on-chip OHCI controller on
Atmel chips.
config USB_OHCI_HCD_OMAP3
tristate "OHCI support for OMAP3 and later chips"
- depends on (ARCH_OMAP3 || ARCH_OMAP4 || SOC_OMAP5)
+ depends on ARCH_OMAP3 || ARCH_OMAP4 || SOC_OMAP5 || COMPILE_TEST
select USB_OHCI_HCD_PLATFORM
- default y
+ default y if ARCH_OMAP3 || ARCH_OMAP4 || SOC_OMAP5
help
This option is deprecated now and the driver was removed, use
USB_OHCI_HCD_PLATFORM instead.
@@ -473,10 +473,10 @@ config USB_OHCI_HCD_OMAP3
config USB_OHCI_HCD_DAVINCI
tristate "OHCI support for TI DaVinci DA8xx"
- depends on ARCH_DAVINCI_DA8XX
+ depends on ARCH_DAVINCI_DA8XX || COMPILE_TEST
depends on USB_OHCI_HCD
select PHY_DA8XX_USB
- default y
+ default y if ARCH_DAVINCI_DA8XX
help
Enables support for the DaVinci DA8xx integrated OHCI
controller. This driver cannot currently be a loadable
@@ -532,7 +532,7 @@ config USB_OHCI_HCD_SSB
config USB_OHCI_SH
bool "OHCI support for SuperH USB controller (DEPRECATED)"
- depends on SUPERH
+ depends on SUPERH || COMPILE_TEST
select USB_OHCI_HCD_PLATFORM
---help---
This option is deprecated now and the driver was removed, use
@@ -542,14 +542,14 @@ config USB_OHCI_SH
If you use the PCI OHCI controller, this option is not necessary.
config USB_OHCI_EXYNOS
- tristate "OHCI support for Samsung S5P/EXYNOS SoC Series"
- depends on ARCH_S5PV210 || ARCH_EXYNOS
+ tristate "OHCI support for Samsung S5P/Exynos SoC Series"
+ depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
help
Enable support for the Samsung Exynos SOC's on-chip OHCI controller.
config USB_CNS3XXX_OHCI
bool "Cavium CNS3XXX OHCI Module (DEPRECATED)"
- depends on ARCH_CNS3XXX
+ depends on ARCH_CNS3XXX || COMPILE_TEST
select USB_OHCI_HCD_PLATFORM
---help---
This option is deprecated now and the driver was removed, use
diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
index 01debfd03d4a..a4e9abcbdc4f 100644
--- a/drivers/usb/host/ehci-exynos.c
+++ b/drivers/usb/host/ehci-exynos.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * SAMSUNG EXYNOS USB HOST EHCI Controller
+ * Samsung Exynos USB HOST EHCI Controller
*
* Copyright (C) 2011 Samsung Electronics Co.Ltd
* Author: Jingoo Han <jg1.han@samsung.com>
@@ -21,7 +21,7 @@
#include "ehci.h"
-#define DRIVER_DESC "EHCI EXYNOS driver"
+#define DRIVER_DESC "EHCI Exynos driver"
#define EHCI_INSNREG00(base) (base + 0x90)
#define EHCI_INSNREG00_ENA_INCR16 (0x1 << 25)
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
index 66ec1fdf9fe7..bd4f6ef534d9 100644
--- a/drivers/usb/host/ehci-mv.c
+++ b/drivers/usb/host/ehci-mv.c
@@ -11,6 +11,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/usb/otg.h>
+#include <linux/usb/of.h>
#include <linux/platform_data/mv_usb.h>
#include <linux/io.h>
@@ -67,6 +68,8 @@ static int mv_ehci_reset(struct usb_hcd *hcd)
{
struct device *dev = hcd->self.controller;
struct ehci_hcd_mv *ehci_mv = hcd_to_ehci_hcd_mv(hcd);
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ u32 status;
int retval;
if (ehci_mv == NULL) {
@@ -80,6 +83,14 @@ static int mv_ehci_reset(struct usb_hcd *hcd)
if (retval)
dev_err(dev, "ehci_setup failed %d\n", retval);
+ if (of_usb_get_phy_mode(dev->of_node) == USBPHY_INTERFACE_MODE_HSIC) {
+ status = ehci_readl(ehci, &ehci->regs->port_status[0]);
+ status |= PORT_TEST_FORCE;
+ ehci_writel(ehci, status, &ehci->regs->port_status[0]);
+ status &= ~PORT_TEST_FORCE;
+ ehci_writel(ehci, status, &ehci->regs->port_status[0]);
+ }
+
return retval;
}
@@ -116,7 +127,7 @@ static int mv_ehci_probe(struct platform_device *pdev)
ehci_mv->set_vbus = pdata->set_vbus;
}
- ehci_mv->phy = devm_phy_get(&pdev->dev, "usb");
+ ehci_mv->phy = devm_phy_optional_get(&pdev->dev, "usb");
if (IS_ERR(ehci_mv->phy)) {
retval = PTR_ERR(ehci_mv->phy);
if (retval != -EPROBE_DEFER)
@@ -164,7 +175,7 @@ static int mv_ehci_probe(struct platform_device *pdev)
}
ehci = hcd_to_ehci(hcd);
- ehci->caps = (struct ehci_caps *) ehci_mv->cap_regs;
+ ehci->caps = (struct ehci_caps __iomem *) ehci_mv->cap_regs;
if (ehci_mv->mode == MV_USB_MODE_OTG) {
ehci_mv->otg = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
@@ -246,10 +257,8 @@ static int mv_ehci_remove(struct platform_device *pdev)
MODULE_ALIAS("mv-ehci");
static const struct platform_device_id ehci_id_table[] = {
- {"pxa-u2oehci", PXA_U2OEHCI},
- {"pxa-sph", PXA_SPH},
- {"mmp3-hsic", MMP3_HSIC},
- {"mmp3-fsic", MMP3_FSIC},
+ {"pxa-u2oehci", 0},
+ {"pxa-sph", 0},
{},
};
diff --git a/drivers/usb/host/ehci-pmcmsp.c b/drivers/usb/host/ehci-pmcmsp.c
index a2b610dbedfc..2d462fbbe0a6 100644
--- a/drivers/usb/host/ehci-pmcmsp.c
+++ b/drivers/usb/host/ehci-pmcmsp.c
@@ -107,7 +107,7 @@ static int usb_hcd_msp_map_regs(struct mspusb_device *dev)
if (!request_mem_region(res->start, res_len, "mab regs"))
return -EBUSY;
- dev->mab_regs = ioremap_nocache(res->start, res_len);
+ dev->mab_regs = ioremap(res->start, res_len);
if (dev->mab_regs == NULL) {
retval = -ENOMEM;
goto err1;
@@ -124,7 +124,7 @@ static int usb_hcd_msp_map_regs(struct mspusb_device *dev)
retval = -EBUSY;
goto err2;
}
- dev->usbid_regs = ioremap_nocache(res->start, res_len);
+ dev->usbid_regs = ioremap(res->start, res_len);
if (dev->usbid_regs == NULL) {
retval = -ENOMEM;
goto err3;
@@ -178,7 +178,7 @@ int usb_hcd_msp_probe(const struct hc_driver *driver,
retval = -EBUSY;
goto err1;
}
- hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
+ hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
if (!hcd->regs) {
pr_debug("ioremap failed");
retval = -ENOMEM;
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index aa2f77f1506d..8a5c9b3ebe1e 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -27,6 +27,10 @@
/*-------------------------------------------------------------------------*/
+/* PID Codes that are used here, from EHCI specification, Table 3-16. */
+#define PID_CODE_IN 1
+#define PID_CODE_SETUP 2
+
/* fill a qtd, returning how much of the buffer we were able to queue up */
static int
@@ -190,7 +194,7 @@ static int qtd_copy_status (
int status = -EINPROGRESS;
/* count IN/OUT bytes, not SETUP (even short packets) */
- if (likely (QTD_PID (token) != 2))
+ if (likely(QTD_PID(token) != PID_CODE_SETUP))
urb->actual_length += length - QTD_LENGTH (token);
/* don't modify error codes */
@@ -206,6 +210,13 @@ static int qtd_copy_status (
if (token & QTD_STS_BABBLE) {
/* FIXME "must" disable babbling device's port too */
status = -EOVERFLOW;
+ /*
+ * When MMF is active and PID Code is IN, queue is halted.
+ * EHCI Specification, Table 4-13.
+ */
+ } else if ((token & QTD_STS_MMF) &&
+ (QTD_PID(token) == PID_CODE_IN)) {
+ status = -EPROTO;
/* CERR nonzero + halt --> stall */
} else if (QTD_CERR(token)) {
status = -EPIPE;
diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c
index 2afde14dc425..c25c51d26f26 100644
--- a/drivers/usb/host/ehci-sh.c
+++ b/drivers/usb/host/ehci-sh.c
@@ -8,7 +8,6 @@
*/
#include <linux/platform_device.h>
#include <linux/clk.h>
-#include <linux/platform_data/ehci-sh.h>
struct ehci_sh_priv {
struct clk *iclk, *fclk;
@@ -76,7 +75,6 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
{
struct resource *res;
struct ehci_sh_priv *priv;
- struct ehci_sh_platdata *pdata;
struct usb_hcd *hcd;
int irq, ret;
@@ -89,8 +87,6 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
goto fail_create_hcd;
}
- pdata = dev_get_platdata(&pdev->dev);
-
/* initialize hcd */
hcd = usb_create_hcd(&ehci_sh_hc_driver, &pdev->dev,
dev_name(&pdev->dev));
@@ -127,9 +123,6 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
clk_enable(priv->fclk);
clk_enable(priv->iclk);
- if (pdata && pdata->phy_init)
- pdata->phy_init();
-
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to add hcd");
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 4d2cdec4cb78..d6433f206c17 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -42,12 +42,10 @@ struct tegra_ehci_soc_config {
};
struct tegra_ehci_hcd {
- struct tegra_usb_phy *phy;
struct clk *clk;
struct reset_control *rst;
int port_resuming;
bool needs_double_reset;
- enum tegra_usb_phy_port_speed port_speed;
};
static int tegra_reset_usb_controller(struct platform_device *pdev)
@@ -480,12 +478,6 @@ static int tegra_ehci_probe(struct platform_device *pdev)
}
u_phy->otg->host = hcd_to_bus(hcd);
- err = usb_phy_set_suspend(hcd->usb_phy, 0);
- if (err) {
- dev_err(&pdev->dev, "Failed to power on the phy\n");
- goto cleanup_phy;
- }
-
irq = platform_get_irq(pdev, 0);
if (!irq) {
dev_err(&pdev->dev, "Failed to get IRQ\n");
@@ -521,16 +513,10 @@ static int tegra_ehci_remove(struct platform_device *pdev)
struct tegra_ehci_hcd *tegra =
(struct tegra_ehci_hcd *)hcd_to_ehci(hcd)->priv;
+ usb_remove_hcd(hcd);
otg_set_host(hcd->usb_phy->otg, NULL);
-
usb_phy_shutdown(hcd->usb_phy);
- usb_remove_hcd(hcd);
-
- reset_control_assert(tegra->rst);
- udelay(1);
-
clk_disable_unprepare(tegra->clk);
-
usb_put_hcd(hcd);
return 0;
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
index 38183ac438c6..1371b0c249ec 100644
--- a/drivers/usb/host/ohci-da8xx.c
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -415,13 +415,17 @@ static int ohci_da8xx_probe(struct platform_device *pdev)
}
da8xx_ohci->oc_gpio = devm_gpiod_get_optional(dev, "oc", GPIOD_IN);
- if (IS_ERR(da8xx_ohci->oc_gpio))
+ if (IS_ERR(da8xx_ohci->oc_gpio)) {
+ error = PTR_ERR(da8xx_ohci->oc_gpio);
goto err;
+ }
if (da8xx_ohci->oc_gpio) {
oc_irq = gpiod_to_irq(da8xx_ohci->oc_gpio);
- if (oc_irq < 0)
+ if (oc_irq < 0) {
+ error = oc_irq;
goto err;
+ }
error = devm_request_threaded_irq(dev, oc_irq, NULL,
ohci_da8xx_oc_thread, IRQF_TRIGGER_RISING |
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
index d5ce98e205c7..bd40e597f256 100644
--- a/drivers/usb/host/ohci-exynos.c
+++ b/drivers/usb/host/ohci-exynos.c
@@ -19,7 +19,7 @@
#include "ohci.h"
-#define DRIVER_DESC "OHCI EXYNOS driver"
+#define DRIVER_DESC "OHCI Exynos driver"
static const char hcd_name[] = "ohci-exynos";
static struct hc_driver __read_mostly exynos_ohci_hc_driver;
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index fe09b8626329..120666a0d590 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -2783,11 +2783,15 @@ static void ehci_port_power(struct oxu_hcd *oxu, int is_on)
return;
oxu_dbg(oxu, "...power%s ports...\n", is_on ? "up" : "down");
- for (port = HCS_N_PORTS(oxu->hcs_params); port > 0; )
- (void) oxu_hub_control(oxu_to_hcd(oxu),
- is_on ? SetPortFeature : ClearPortFeature,
- USB_PORT_FEAT_POWER,
- port--, NULL, 0);
+ for (port = HCS_N_PORTS(oxu->hcs_params); port > 0; ) {
+ if (is_on)
+ oxu_hub_control(oxu_to_hcd(oxu), SetPortFeature,
+ USB_PORT_FEAT_POWER, port--, NULL, 0);
+ else
+ oxu_hub_control(oxu_to_hcd(oxu), ClearPortFeature,
+ USB_PORT_FEAT_POWER, port--, NULL, 0);
+ }
+
msleep(20);
}
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 6c7f0a876b96..beb2efa71341 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -1150,7 +1150,7 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
if (!mmio_resource_enabled(pdev, 0))
return;
- base = ioremap_nocache(pci_resource_start(pdev, 0), len);
+ base = ioremap(pci_resource_start(pdev, 0), len);
if (base == NULL)
return;
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index b18a6baef204..bfbdb3ceed29 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -488,11 +488,6 @@ static int xhci_mtk_probe(struct platform_device *pdev)
goto disable_clk;
}
- /* Initialize dma_mask and coherent_dma_mask to 32-bits */
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
- if (ret)
- goto disable_clk;
-
hcd = usb_create_hcd(driver, dev, dev_name(dev));
if (!hcd) {
ret = -ENOMEM;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 2907fe4d78dd..4917c5b033fa 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -519,7 +519,6 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
retval = xhci_resume(xhci, hibernated);
return retval;
}
-#endif /* CONFIG_PM */
static void xhci_pci_shutdown(struct usb_hcd *hcd)
{
@@ -532,6 +531,7 @@ static void xhci_pci_shutdown(struct usb_hcd *hcd)
if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
pci_set_power_state(pdev, PCI_D3hot);
}
+#endif /* CONFIG_PM */
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index bf9065438320..8163aefc6c6b 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -11,6 +11,7 @@
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
#include <linux/interrupt.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_device.h>
@@ -38,7 +39,15 @@
#define XUSB_CFG_4 0x010
#define XUSB_BASE_ADDR_SHIFT 15
#define XUSB_BASE_ADDR_MASK 0x1ffff
+#define XUSB_CFG_16 0x040
+#define XUSB_CFG_24 0x060
+#define XUSB_CFG_AXI_CFG 0x0f8
#define XUSB_CFG_ARU_C11_CSBRANGE 0x41c
+#define XUSB_CFG_ARU_CONTEXT 0x43c
+#define XUSB_CFG_ARU_CONTEXT_HS_PLS 0x478
+#define XUSB_CFG_ARU_CONTEXT_FS_PLS 0x47c
+#define XUSB_CFG_ARU_CONTEXT_HSFS_SPEED 0x480
+#define XUSB_CFG_ARU_CONTEXT_HSFS_PP 0x484
#define XUSB_CFG_CSB_BASE_ADDR 0x800
/* FPCI mailbox registers */
@@ -62,11 +71,20 @@
#define MBOX_SMI_INTR_EN BIT(3)
/* IPFS registers */
+#define IPFS_XUSB_HOST_MSI_BAR_SZ_0 0x0c0
+#define IPFS_XUSB_HOST_MSI_AXI_BAR_ST_0 0x0c4
+#define IPFS_XUSB_HOST_MSI_FPCI_BAR_ST_0 0x0c8
+#define IPFS_XUSB_HOST_MSI_VEC0_0 0x100
+#define IPFS_XUSB_HOST_MSI_EN_VEC0_0 0x140
#define IPFS_XUSB_HOST_CONFIGURATION_0 0x180
#define IPFS_EN_FPCI BIT(0)
+#define IPFS_XUSB_HOST_FPCI_ERROR_MASKS_0 0x184
#define IPFS_XUSB_HOST_INTR_MASK_0 0x188
#define IPFS_IP_INT_MASK BIT(16)
+#define IPFS_XUSB_HOST_INTR_ENABLE_0 0x198
+#define IPFS_XUSB_HOST_UFPCI_CONFIG_0 0x19c
#define IPFS_XUSB_HOST_CLKGATE_HYSTERESIS_0 0x1bc
+#define IPFS_XUSB_HOST_MCCIF_FIFOCTRL_0 0x1dc
#define CSB_PAGE_SELECT_MASK 0x7fffff
#define CSB_PAGE_SELECT_SHIFT 9
@@ -101,6 +119,8 @@
#define L2IMEMOP_ACTION_SHIFT 24
#define L2IMEMOP_INVALIDATE_ALL (0x40 << L2IMEMOP_ACTION_SHIFT)
#define L2IMEMOP_LOAD_LOCKED_RESULT (0x11 << L2IMEMOP_ACTION_SHIFT)
+#define XUSB_CSB_MEMPOOL_L2IMEMOP_RESULT 0x101a18
+#define L2IMEMOP_RESULT_VLD BIT(31)
#define XUSB_CSB_MP_APMAP 0x10181c
#define APMAP_BOOTPATH BIT(31)
@@ -145,19 +165,32 @@ struct tegra_xusb_phy_type {
unsigned int num;
};
-struct tega_xusb_mbox_regs {
+struct tegra_xusb_mbox_regs {
u16 cmd;
u16 data_in;
u16 data_out;
u16 owner;
};
+struct tegra_xusb_context_soc {
+ struct {
+ const unsigned int *offsets;
+ unsigned int num_offsets;
+ } ipfs;
+
+ struct {
+ const unsigned int *offsets;
+ unsigned int num_offsets;
+ } fpci;
+};
+
struct tegra_xusb_soc {
const char *firmware;
const char * const *supply_names;
unsigned int num_supplies;
const struct tegra_xusb_phy_type *phy_types;
unsigned int num_types;
+ const struct tegra_xusb_context_soc *context;
struct {
struct {
@@ -166,12 +199,17 @@ struct tegra_xusb_soc {
} usb2, ulpi, hsic, usb3;
} ports;
- struct tega_xusb_mbox_regs mbox;
+ struct tegra_xusb_mbox_regs mbox;
bool scale_ss_clock;
bool has_ipfs;
};
+struct tegra_xusb_context {
+ u32 *ipfs;
+ u32 *fpci;
+};
+
struct tegra_xusb {
struct device *dev;
void __iomem *regs;
@@ -218,6 +256,8 @@ struct tegra_xusb {
void *virt;
dma_addr_t phys;
} fw;
+
+ struct tegra_xusb_context context;
};
static struct hc_driver __read_mostly tegra_xhci_hc_driver;
@@ -623,9 +663,9 @@ static irqreturn_t tegra_xusb_mbox_thread(int irq, void *data)
return IRQ_HANDLED;
}
-static void tegra_xusb_config(struct tegra_xusb *tegra,
- struct resource *regs)
+static void tegra_xusb_config(struct tegra_xusb *tegra)
{
+ u32 regs = tegra->hcd->rsrc_start;
u32 value;
if (tegra->soc->has_ipfs) {
@@ -639,7 +679,7 @@ static void tegra_xusb_config(struct tegra_xusb *tegra,
/* Program BAR0 space */
value = fpci_readl(tegra, XUSB_CFG_4);
value &= ~(XUSB_BASE_ADDR_MASK << XUSB_BASE_ADDR_SHIFT);
- value |= regs->start & (XUSB_BASE_ADDR_MASK << XUSB_BASE_ADDR_SHIFT);
+ value |= regs & (XUSB_BASE_ADDR_MASK << XUSB_BASE_ADDR_SHIFT);
fpci_writel(tegra, value, XUSB_CFG_4);
usleep_range(100, 200);
@@ -793,17 +833,34 @@ disable_clk:
return err;
}
-static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
+#ifdef CONFIG_PM_SLEEP
+static int tegra_xusb_init_context(struct tegra_xusb *tegra)
+{
+ const struct tegra_xusb_context_soc *soc = tegra->soc->context;
+
+ tegra->context.ipfs = devm_kcalloc(tegra->dev, soc->ipfs.num_offsets,
+ sizeof(u32), GFP_KERNEL);
+ if (!tegra->context.ipfs)
+ return -ENOMEM;
+
+ tegra->context.fpci = devm_kcalloc(tegra->dev, soc->ipfs.num_offsets,
+ sizeof(u32), GFP_KERNEL);
+ if (!tegra->context.fpci)
+ return -ENOMEM;
+
+ return 0;
+}
+#else
+static inline int tegra_xusb_init_context(struct tegra_xusb *tegra)
+{
+ return 0;
+}
+#endif
+
+static int tegra_xusb_request_firmware(struct tegra_xusb *tegra)
{
- unsigned int code_tag_blocks, code_size_blocks, code_blocks;
struct tegra_xusb_fw_header *header;
- struct device *dev = tegra->dev;
const struct firmware *fw;
- unsigned long timeout;
- time64_t timestamp;
- struct tm time;
- u64 address;
- u32 value;
int err;
err = request_firmware(&fw, tegra->soc->firmware, tegra->dev);
@@ -828,6 +885,26 @@ static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
memcpy(tegra->fw.virt, fw->data, tegra->fw.size);
release_firmware(fw);
+ return 0;
+}
+
+static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
+{
+ unsigned int code_tag_blocks, code_size_blocks, code_blocks;
+ struct xhci_cap_regs __iomem *cap = tegra->regs;
+ struct tegra_xusb_fw_header *header;
+ struct device *dev = tegra->dev;
+ struct xhci_op_regs __iomem *op;
+ unsigned long timeout;
+ time64_t timestamp;
+ struct tm time;
+ u64 address;
+ u32 value;
+ int err;
+
+ header = (struct tegra_xusb_fw_header *)tegra->fw.virt;
+ op = tegra->regs + HC_LENGTH(readl(&cap->hc_capbase));
+
if (csb_readl(tegra, XUSB_CSB_MP_ILOAD_BASE_LO) != 0) {
dev_info(dev, "Firmware already loaded, Falcon state %#x\n",
csb_readl(tegra, XUSB_FALC_CPUCTL));
@@ -882,26 +959,37 @@ static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
csb_writel(tegra, 0, XUSB_FALC_DMACTL);
- msleep(50);
+ /* wait for RESULT_VLD to get set */
+#define tegra_csb_readl(offset) csb_readl(tegra, offset)
+ err = readx_poll_timeout(tegra_csb_readl,
+ XUSB_CSB_MEMPOOL_L2IMEMOP_RESULT, value,
+ value & L2IMEMOP_RESULT_VLD, 100, 10000);
+ if (err < 0) {
+ dev_err(dev, "DMA controller not ready %#010x\n", value);
+ return err;
+ }
+#undef tegra_csb_readl
csb_writel(tegra, le32_to_cpu(header->boot_codetag),
XUSB_FALC_BOOTVEC);
- /* Boot Falcon CPU and wait for it to enter the STOPPED (idle) state. */
- timeout = jiffies + msecs_to_jiffies(5);
-
+ /* Boot Falcon CPU and wait for USBSTS_CNR to get cleared. */
csb_writel(tegra, CPUCTL_STARTCPU, XUSB_FALC_CPUCTL);
- while (time_before(jiffies, timeout)) {
- if (csb_readl(tegra, XUSB_FALC_CPUCTL) == CPUCTL_STATE_STOPPED)
+ timeout = jiffies + msecs_to_jiffies(200);
+
+ do {
+ value = readl(&op->status);
+ if ((value & STS_CNR) == 0)
break;
- usleep_range(100, 200);
- }
+ usleep_range(1000, 2000);
+ } while (time_is_after_jiffies(timeout));
- if (csb_readl(tegra, XUSB_FALC_CPUCTL) != CPUCTL_STATE_STOPPED) {
- dev_err(dev, "Falcon failed to start, state: %#x\n",
- csb_readl(tegra, XUSB_FALC_CPUCTL));
+ value = readl(&op->status);
+ if (value & STS_CNR) {
+ value = csb_readl(tegra, XUSB_FALC_CPUCTL);
+ dev_err(dev, "XHCI controller not read: %#010x\n", value);
return -EIO;
}
@@ -966,11 +1054,37 @@ static int tegra_xusb_powerdomain_init(struct device *dev,
return 0;
}
-static int tegra_xusb_probe(struct platform_device *pdev)
+static int __tegra_xusb_enable_firmware_messages(struct tegra_xusb *tegra)
{
struct tegra_xusb_mbox_msg msg;
- struct resource *regs;
+ int err;
+
+ /* Enable firmware messages from controller. */
+ msg.cmd = MBOX_CMD_MSG_ENABLED;
+ msg.data = 0;
+
+ err = tegra_xusb_mbox_send(tegra, &msg);
+ if (err < 0)
+ dev_err(tegra->dev, "failed to enable messages: %d\n", err);
+
+ return err;
+}
+
+static int tegra_xusb_enable_firmware_messages(struct tegra_xusb *tegra)
+{
+ int err;
+
+ mutex_lock(&tegra->lock);
+ err = __tegra_xusb_enable_firmware_messages(tegra);
+ mutex_unlock(&tegra->lock);
+
+ return err;
+}
+
+static int tegra_xusb_probe(struct platform_device *pdev)
+{
struct tegra_xusb *tegra;
+ struct resource *regs;
struct xhci_hcd *xhci;
unsigned int i, j, k;
struct phy *phy;
@@ -986,6 +1100,10 @@ static int tegra_xusb_probe(struct platform_device *pdev)
mutex_init(&tegra->lock);
tegra->dev = &pdev->dev;
+ err = tegra_xusb_init_context(tegra);
+ if (err < 0)
+ return err;
+
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
tegra->regs = devm_ioremap_resource(&pdev->dev, regs);
if (IS_ERR(tegra->regs))
@@ -1173,6 +1291,10 @@ static int tegra_xusb_probe(struct platform_device *pdev)
goto put_powerdomains;
}
+ tegra->hcd->regs = tegra->regs;
+ tegra->hcd->rsrc_start = regs->start;
+ tegra->hcd->rsrc_len = resource_size(regs);
+
/*
* This must happen after usb_create_hcd(), because usb_create_hcd()
* will overwrite the drvdata of the device with the hcd it creates.
@@ -1185,19 +1307,6 @@ static int tegra_xusb_probe(struct platform_device *pdev)
goto put_hcd;
}
- pm_runtime_enable(&pdev->dev);
- if (pm_runtime_enabled(&pdev->dev))
- err = pm_runtime_get_sync(&pdev->dev);
- else
- err = tegra_xusb_runtime_resume(&pdev->dev);
-
- if (err < 0) {
- dev_err(&pdev->dev, "failed to enable device: %d\n", err);
- goto disable_phy;
- }
-
- tegra_xusb_config(tegra, regs);
-
/*
* The XUSB Falcon microcontroller can only address 40 bits, so set
* the DMA mask accordingly.
@@ -1205,19 +1314,35 @@ static int tegra_xusb_probe(struct platform_device *pdev)
err = dma_set_mask_and_coherent(tegra->dev, DMA_BIT_MASK(40));
if (err < 0) {
dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
- goto put_rpm;
+ goto disable_phy;
+ }
+
+ err = tegra_xusb_request_firmware(tegra);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request firmware: %d\n", err);
+ goto disable_phy;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ if (!pm_runtime_enabled(&pdev->dev))
+ err = tegra_xusb_runtime_resume(&pdev->dev);
+ else
+ err = pm_runtime_get_sync(&pdev->dev);
+
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to enable device: %d\n", err);
+ goto free_firmware;
}
+ tegra_xusb_config(tegra);
+
err = tegra_xusb_load_firmware(tegra);
if (err < 0) {
dev_err(&pdev->dev, "failed to load firmware: %d\n", err);
goto put_rpm;
}
- tegra->hcd->regs = tegra->regs;
- tegra->hcd->rsrc_start = regs->start;
- tegra->hcd->rsrc_len = resource_size(regs);
-
err = usb_add_hcd(tegra->hcd, tegra->xhci_irq, IRQF_SHARED);
if (err < 0) {
dev_err(&pdev->dev, "failed to add USB HCD: %d\n", err);
@@ -1244,21 +1369,12 @@ static int tegra_xusb_probe(struct platform_device *pdev)
goto put_usb3;
}
- mutex_lock(&tegra->lock);
-
- /* Enable firmware messages from controller. */
- msg.cmd = MBOX_CMD_MSG_ENABLED;
- msg.data = 0;
-
- err = tegra_xusb_mbox_send(tegra, &msg);
+ err = tegra_xusb_enable_firmware_messages(tegra);
if (err < 0) {
dev_err(&pdev->dev, "failed to enable messages: %d\n", err);
- mutex_unlock(&tegra->lock);
goto remove_usb3;
}
- mutex_unlock(&tegra->lock);
-
err = devm_request_threaded_irq(&pdev->dev, tegra->mbox_irq,
tegra_xusb_mbox_irq,
tegra_xusb_mbox_thread, 0,
@@ -1281,6 +1397,9 @@ put_rpm:
tegra_xusb_runtime_suspend(&pdev->dev);
put_hcd:
usb_put_hcd(tegra->hcd);
+free_firmware:
+ dma_free_coherent(&pdev->dev, tegra->fw.size, tegra->fw.virt,
+ tegra->fw.phys);
disable_phy:
tegra_xusb_phy_disable(tegra);
pm_runtime_disable(&pdev->dev);
@@ -1328,22 +1447,176 @@ static int tegra_xusb_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
+static bool xhci_hub_ports_suspended(struct xhci_hub *hub)
+{
+ struct device *dev = hub->hcd->self.controller;
+ bool status = true;
+ unsigned int i;
+ u32 value;
+
+ for (i = 0; i < hub->num_ports; i++) {
+ value = readl(hub->ports[i]->addr);
+ if ((value & PORT_PE) == 0)
+ continue;
+
+ if ((value & PORT_PLS_MASK) != XDEV_U3) {
+ dev_info(dev, "%u-%u isn't suspended: %#010x\n",
+ hub->hcd->self.busnum, i + 1, value);
+ status = false;
+ }
+ }
+
+ return status;
+}
+
+static int tegra_xusb_check_ports(struct tegra_xusb *tegra)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
+ unsigned long flags;
+ int err = 0;
+
+ spin_lock_irqsave(&xhci->lock, flags);
+
+ if (!xhci_hub_ports_suspended(&xhci->usb2_rhub) ||
+ !xhci_hub_ports_suspended(&xhci->usb3_rhub))
+ err = -EBUSY;
+
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ return err;
+}
+
+static void tegra_xusb_save_context(struct tegra_xusb *tegra)
+{
+ const struct tegra_xusb_context_soc *soc = tegra->soc->context;
+ struct tegra_xusb_context *ctx = &tegra->context;
+ unsigned int i;
+
+ if (soc->ipfs.num_offsets > 0) {
+ for (i = 0; i < soc->ipfs.num_offsets; i++)
+ ctx->ipfs[i] = ipfs_readl(tegra, soc->ipfs.offsets[i]);
+ }
+
+ if (soc->fpci.num_offsets > 0) {
+ for (i = 0; i < soc->fpci.num_offsets; i++)
+ ctx->fpci[i] = fpci_readl(tegra, soc->fpci.offsets[i]);
+ }
+}
+
+static void tegra_xusb_restore_context(struct tegra_xusb *tegra)
+{
+ const struct tegra_xusb_context_soc *soc = tegra->soc->context;
+ struct tegra_xusb_context *ctx = &tegra->context;
+ unsigned int i;
+
+ if (soc->fpci.num_offsets > 0) {
+ for (i = 0; i < soc->fpci.num_offsets; i++)
+ fpci_writel(tegra, ctx->fpci[i], soc->fpci.offsets[i]);
+ }
+
+ if (soc->ipfs.num_offsets > 0) {
+ for (i = 0; i < soc->ipfs.num_offsets; i++)
+ ipfs_writel(tegra, ctx->ipfs[i], soc->ipfs.offsets[i]);
+ }
+}
+
+static int tegra_xusb_enter_elpg(struct tegra_xusb *tegra, bool wakeup)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
+ int err;
+
+ err = tegra_xusb_check_ports(tegra);
+ if (err < 0) {
+ dev_err(tegra->dev, "not all ports suspended: %d\n", err);
+ return err;
+ }
+
+ err = xhci_suspend(xhci, wakeup);
+ if (err < 0) {
+ dev_err(tegra->dev, "failed to suspend XHCI: %d\n", err);
+ return err;
+ }
+
+ tegra_xusb_save_context(tegra);
+ tegra_xusb_phy_disable(tegra);
+ tegra_xusb_clk_disable(tegra);
+
+ return 0;
+}
+
+static int tegra_xusb_exit_elpg(struct tegra_xusb *tegra, bool wakeup)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
+ int err;
+
+ err = tegra_xusb_clk_enable(tegra);
+ if (err < 0) {
+ dev_err(tegra->dev, "failed to enable clocks: %d\n", err);
+ return err;
+ }
+
+ err = tegra_xusb_phy_enable(tegra);
+ if (err < 0) {
+ dev_err(tegra->dev, "failed to enable PHYs: %d\n", err);
+ goto disable_clk;
+ }
+
+ tegra_xusb_config(tegra);
+ tegra_xusb_restore_context(tegra);
+
+ err = tegra_xusb_load_firmware(tegra);
+ if (err < 0) {
+ dev_err(tegra->dev, "failed to load firmware: %d\n", err);
+ goto disable_phy;
+ }
+
+ err = __tegra_xusb_enable_firmware_messages(tegra);
+ if (err < 0) {
+ dev_err(tegra->dev, "failed to enable messages: %d\n", err);
+ goto disable_phy;
+ }
+
+ err = xhci_resume(xhci, true);
+ if (err < 0) {
+ dev_err(tegra->dev, "failed to resume XHCI: %d\n", err);
+ goto disable_phy;
+ }
+
+ return 0;
+
+disable_phy:
+ tegra_xusb_phy_disable(tegra);
+disable_clk:
+ tegra_xusb_clk_disable(tegra);
+ return err;
+}
+
static int tegra_xusb_suspend(struct device *dev)
{
struct tegra_xusb *tegra = dev_get_drvdata(dev);
- struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
bool wakeup = device_may_wakeup(dev);
+ int err;
+
+ synchronize_irq(tegra->mbox_irq);
- /* TODO: Powergate controller across suspend/resume. */
- return xhci_suspend(xhci, wakeup);
+ mutex_lock(&tegra->lock);
+ err = tegra_xusb_enter_elpg(tegra, wakeup);
+ mutex_unlock(&tegra->lock);
+
+ return err;
}
static int tegra_xusb_resume(struct device *dev)
{
struct tegra_xusb *tegra = dev_get_drvdata(dev);
- struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
+ bool wakeup = device_may_wakeup(dev);
+ int err;
- return xhci_resume(xhci, 0);
+ mutex_lock(&tegra->lock);
+ err = tegra_xusb_exit_elpg(tegra, wakeup);
+ mutex_unlock(&tegra->lock);
+
+ return err;
}
#endif
@@ -1370,12 +1643,50 @@ static const struct tegra_xusb_phy_type tegra124_phy_types[] = {
{ .name = "hsic", .num = 2, },
};
+static const unsigned int tegra124_xusb_context_ipfs[] = {
+ IPFS_XUSB_HOST_MSI_BAR_SZ_0,
+ IPFS_XUSB_HOST_MSI_BAR_SZ_0,
+ IPFS_XUSB_HOST_MSI_AXI_BAR_ST_0,
+ IPFS_XUSB_HOST_MSI_FPCI_BAR_ST_0,
+ IPFS_XUSB_HOST_MSI_VEC0_0,
+ IPFS_XUSB_HOST_MSI_EN_VEC0_0,
+ IPFS_XUSB_HOST_FPCI_ERROR_MASKS_0,
+ IPFS_XUSB_HOST_INTR_MASK_0,
+ IPFS_XUSB_HOST_INTR_ENABLE_0,
+ IPFS_XUSB_HOST_UFPCI_CONFIG_0,
+ IPFS_XUSB_HOST_CLKGATE_HYSTERESIS_0,
+ IPFS_XUSB_HOST_MCCIF_FIFOCTRL_0,
+};
+
+static const unsigned int tegra124_xusb_context_fpci[] = {
+ XUSB_CFG_ARU_CONTEXT_HS_PLS,
+ XUSB_CFG_ARU_CONTEXT_FS_PLS,
+ XUSB_CFG_ARU_CONTEXT_HSFS_SPEED,
+ XUSB_CFG_ARU_CONTEXT_HSFS_PP,
+ XUSB_CFG_ARU_CONTEXT,
+ XUSB_CFG_AXI_CFG,
+ XUSB_CFG_24,
+ XUSB_CFG_16,
+};
+
+static const struct tegra_xusb_context_soc tegra124_xusb_context = {
+ .ipfs = {
+ .num_offsets = ARRAY_SIZE(tegra124_xusb_context_ipfs),
+ .offsets = tegra124_xusb_context_ipfs,
+ },
+ .fpci = {
+ .num_offsets = ARRAY_SIZE(tegra124_xusb_context_fpci),
+ .offsets = tegra124_xusb_context_fpci,
+ },
+};
+
static const struct tegra_xusb_soc tegra124_soc = {
.firmware = "nvidia/tegra124/xusb.bin",
.supply_names = tegra124_supply_names,
.num_supplies = ARRAY_SIZE(tegra124_supply_names),
.phy_types = tegra124_phy_types,
.num_types = ARRAY_SIZE(tegra124_phy_types),
+ .context = &tegra124_xusb_context,
.ports = {
.usb2 = { .offset = 4, .count = 4, },
.hsic = { .offset = 6, .count = 2, },
@@ -1414,6 +1725,7 @@ static const struct tegra_xusb_soc tegra210_soc = {
.num_supplies = ARRAY_SIZE(tegra210_supply_names),
.phy_types = tegra210_phy_types,
.num_types = ARRAY_SIZE(tegra210_phy_types),
+ .context = &tegra124_xusb_context,
.ports = {
.usb2 = { .offset = 4, .count = 4, },
.hsic = { .offset = 8, .count = 1, },
@@ -1432,6 +1744,7 @@ MODULE_FIRMWARE("nvidia/tegra210/xusb.bin");
static const char * const tegra186_supply_names[] = {
};
+MODULE_FIRMWARE("nvidia/tegra186/xusb.bin");
static const struct tegra_xusb_phy_type tegra186_phy_types[] = {
{ .name = "usb3", .num = 3, },
@@ -1439,12 +1752,20 @@ static const struct tegra_xusb_phy_type tegra186_phy_types[] = {
{ .name = "hsic", .num = 1, },
};
+static const struct tegra_xusb_context_soc tegra186_xusb_context = {
+ .fpci = {
+ .num_offsets = ARRAY_SIZE(tegra124_xusb_context_fpci),
+ .offsets = tegra124_xusb_context_fpci,
+ },
+};
+
static const struct tegra_xusb_soc tegra186_soc = {
.firmware = "nvidia/tegra186/xusb.bin",
.supply_names = tegra186_supply_names,
.num_supplies = ARRAY_SIZE(tegra186_supply_names),
.phy_types = tegra186_phy_types,
.num_types = ARRAY_SIZE(tegra186_phy_types),
+ .context = &tegra186_xusb_context,
.ports = {
.usb3 = { .offset = 0, .count = 3, },
.usb2 = { .offset = 3, .count = 3, },
@@ -1474,6 +1795,7 @@ static const struct tegra_xusb_soc tegra194_soc = {
.num_supplies = ARRAY_SIZE(tegra194_supply_names),
.phy_types = tegra194_phy_types,
.num_types = ARRAY_SIZE(tegra194_phy_types),
+ .context = &tegra186_xusb_context,
.ports = {
.usb3 = { .offset = 0, .count = 4, },
.usb2 = { .offset = 4, .count = 4, },
diff --git a/drivers/usb/isp1760/isp1760-if.c b/drivers/usb/isp1760/isp1760-if.c
index 07cc82ff327c..ccd30f835888 100644
--- a/drivers/usb/isp1760/isp1760-if.c
+++ b/drivers/usb/isp1760/isp1760-if.c
@@ -50,7 +50,7 @@ static int isp1761_pci_init(struct pci_dev *dev)
}
/* map available memory */
- iobase = ioremap_nocache(mem_start, mem_length);
+ iobase = ioremap(mem_start, mem_length);
if (!iobase) {
printk(KERN_ERR "Error ioremap failed\n");
release_mem_region(mem_start, mem_length);
@@ -101,7 +101,7 @@ static int isp1761_pci_init(struct pci_dev *dev)
return -EBUSY;
}
- iobase = ioremap_nocache(mem_start, mem_length);
+ iobase = ioremap(mem_start, mem_length);
if (!iobase) {
printk(KERN_ERR "ioremap #1\n");
release_mem_region(mem_start, mem_length);
diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c
index 72f39a9751b5..116bd789e568 100644
--- a/drivers/usb/misc/usb3503.c
+++ b/drivers/usb/misc/usb3503.c
@@ -7,11 +7,10 @@
#include <linux/clk.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/platform_data/usb3503.h>
#include <linux/regmap.h>
@@ -47,19 +46,19 @@ struct usb3503 {
struct device *dev;
struct clk *clk;
u8 port_off_mask;
- int gpio_intn;
- int gpio_reset;
- int gpio_connect;
+ struct gpio_desc *intn;
+ struct gpio_desc *reset;
+ struct gpio_desc *connect;
bool secondary_ref_clk;
};
static int usb3503_reset(struct usb3503 *hub, int state)
{
- if (!state && gpio_is_valid(hub->gpio_connect))
- gpio_set_value_cansleep(hub->gpio_connect, 0);
+ if (!state && hub->connect)
+ gpiod_set_value_cansleep(hub->connect, 0);
- if (gpio_is_valid(hub->gpio_reset))
- gpio_set_value_cansleep(hub->gpio_reset, state);
+ if (hub->reset)
+ gpiod_set_value_cansleep(hub->reset, !state);
/* Wait T_HUBINIT == 4ms for hub logic to stabilize */
if (state)
@@ -115,8 +114,8 @@ static int usb3503_connect(struct usb3503 *hub)
}
}
- if (gpio_is_valid(hub->gpio_connect))
- gpio_set_value_cansleep(hub->gpio_connect, 1);
+ if (hub->connect)
+ gpiod_set_value_cansleep(hub->connect, 1);
hub->mode = USB3503_MODE_HUB;
dev_info(dev, "switched to HUB mode\n");
@@ -163,13 +162,11 @@ static int usb3503_probe(struct usb3503 *hub)
int err;
u32 mode = USB3503_MODE_HUB;
const u32 *property;
+ enum gpiod_flags flags;
int len;
if (pdata) {
hub->port_off_mask = pdata->port_off_mask;
- hub->gpio_intn = pdata->gpio_intn;
- hub->gpio_connect = pdata->gpio_connect;
- hub->gpio_reset = pdata->gpio_reset;
hub->mode = pdata->initial_mode;
} else if (np) {
u32 rate = 0;
@@ -230,59 +227,38 @@ static int usb3503_probe(struct usb3503 *hub)
}
}
- hub->gpio_intn = of_get_named_gpio(np, "intn-gpios", 0);
- if (hub->gpio_intn == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- hub->gpio_connect = of_get_named_gpio(np, "connect-gpios", 0);
- if (hub->gpio_connect == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- hub->gpio_reset = of_get_named_gpio(np, "reset-gpios", 0);
- if (hub->gpio_reset == -EPROBE_DEFER)
- return -EPROBE_DEFER;
of_property_read_u32(np, "initial-mode", &mode);
hub->mode = mode;
}
- if (hub->port_off_mask && !hub->regmap)
- dev_err(dev, "Ports disabled with no control interface\n");
-
- if (gpio_is_valid(hub->gpio_intn)) {
- int val = hub->secondary_ref_clk ? GPIOF_OUT_INIT_LOW :
- GPIOF_OUT_INIT_HIGH;
- err = devm_gpio_request_one(dev, hub->gpio_intn, val,
- "usb3503 intn");
- if (err) {
- dev_err(dev,
- "unable to request GPIO %d as interrupt pin (%d)\n",
- hub->gpio_intn, err);
- return err;
- }
- }
-
- if (gpio_is_valid(hub->gpio_connect)) {
- err = devm_gpio_request_one(dev, hub->gpio_connect,
- GPIOF_OUT_INIT_LOW, "usb3503 connect");
- if (err) {
- dev_err(dev,
- "unable to request GPIO %d as connect pin (%d)\n",
- hub->gpio_connect, err);
- return err;
- }
- }
-
- if (gpio_is_valid(hub->gpio_reset)) {
- err = devm_gpio_request_one(dev, hub->gpio_reset,
- GPIOF_OUT_INIT_LOW, "usb3503 reset");
+ if (hub->secondary_ref_clk)
+ flags = GPIOD_OUT_LOW;
+ else
+ flags = GPIOD_OUT_HIGH;
+ hub->intn = devm_gpiod_get_optional(dev, "intn", flags);
+ if (IS_ERR(hub->intn))
+ return PTR_ERR(hub->intn);
+ if (hub->intn)
+ gpiod_set_consumer_name(hub->intn, "usb3503 intn");
+
+ hub->connect = devm_gpiod_get_optional(dev, "connect", GPIOD_OUT_LOW);
+ if (IS_ERR(hub->connect))
+ return PTR_ERR(hub->connect);
+ if (hub->connect)
+ gpiod_set_consumer_name(hub->connect, "usb3503 connect");
+
+ hub->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(hub->reset))
+ return PTR_ERR(hub->reset);
+ if (hub->reset) {
/* Datasheet defines a hardware reset to be at least 100us */
usleep_range(100, 10000);
- if (err) {
- dev_err(dev,
- "unable to request GPIO %d as reset pin (%d)\n",
- hub->gpio_reset, err);
- return err;
- }
+ gpiod_set_consumer_name(hub->reset, "usb3503 reset");
}
+ if (hub->port_off_mask && !hub->regmap)
+ dev_err(dev, "Ports disabled with no control interface\n");
+
usb3503_switch_mode(hub, hub->mode);
dev_info(dev, "%s: probed in %s mode\n", __func__,
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 52f8e2b57ad5..eb2ded1026ee 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -101,7 +101,6 @@ config USB_MUSB_AM35X
config USB_MUSB_DSPS
tristate "TI DSPS platforms"
- select USB_MUSB_AM335X_CHILD
depends on ARCH_OMAP2PLUS || COMPILE_TEST
depends on OF_IRQ
@@ -111,13 +110,16 @@ config USB_MUSB_UX500
config USB_MUSB_JZ4740
tristate "JZ4740"
- depends on NOP_USB_XCEIV
depends on MIPS || COMPILE_TEST
depends on USB_MUSB_GADGET
depends on USB=n || USB_OTG_BLACKLIST_HUB
-config USB_MUSB_AM335X_CHILD
- tristate
+config USB_MUSB_MEDIATEK
+ tristate "MediaTek platforms"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on NOP_USB_XCEIV
+ depends on GENERIC_PHY
+ select USB_ROLE_SWITCH
comment "MUSB DMA mode"
@@ -142,7 +144,7 @@ config USB_UX500_DMA
config USB_INVENTRA_DMA
bool 'Inventra'
- depends on USB_MUSB_OMAP2PLUS
+ depends on USB_MUSB_OMAP2PLUS || USB_MUSB_MEDIATEK
help
Enable DMA transfers using Mentor's engine.
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile
index 3a88c79e650c..932247360a9f 100644
--- a/drivers/usb/musb/Makefile
+++ b/drivers/usb/musb/Makefile
@@ -24,9 +24,7 @@ obj-$(CONFIG_USB_MUSB_DA8XX) += da8xx.o
obj-$(CONFIG_USB_MUSB_UX500) += ux500.o
obj-$(CONFIG_USB_MUSB_JZ4740) += jz4740.o
obj-$(CONFIG_USB_MUSB_SUNXI) += sunxi.o
-
-
-obj-$(CONFIG_USB_MUSB_AM335X_CHILD) += musb_am335x.o
+obj-$(CONFIG_USB_MUSB_MEDIATEK) += mediatek.o
# the kconfig must guarantee that only one of the
# possible I/O schemes will be enabled at a time ...
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index fb6bbd254ab7..704435526394 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -13,7 +13,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/usb/usb_phy_generic.h>
@@ -25,10 +25,6 @@
#include "musb_core.h"
-#ifdef CONFIG_MACH_DAVINCI_EVM
-#define GPIO_nVBUS_DRV 160
-#endif
-
#include "davinci.h"
#include "cppi_dma.h"
@@ -40,6 +36,9 @@ struct davinci_glue {
struct device *dev;
struct platform_device *musb;
struct clk *clk;
+ bool vbus_state;
+ struct gpio_desc *vbus;
+ struct work_struct vbus_work;
};
/* REVISIT (PM) we should be able to keep the PHY in low power mode most
@@ -135,43 +134,44 @@ static void davinci_musb_disable(struct musb *musb)
* when J10 is out, and TI documents it as handling OTG.
*/
-#ifdef CONFIG_MACH_DAVINCI_EVM
-
-static int vbus_state = -1;
-
/* I2C operations are always synchronous, and require a task context.
* With unloaded systems, using the shared workqueue seems to suffice
* to satisfy the 100msec A_WAIT_VRISE timeout...
*/
-static void evm_deferred_drvvbus(struct work_struct *ignored)
+static void evm_deferred_drvvbus(struct work_struct *work)
{
- gpio_set_value_cansleep(GPIO_nVBUS_DRV, vbus_state);
- vbus_state = !vbus_state;
-}
+ struct davinci_glue *glue = container_of(work, struct davinci_glue,
+ vbus_work);
-#endif /* EVM */
+ gpiod_set_value_cansleep(glue->vbus, glue->vbus_state);
+ glue->vbus_state = !glue->vbus_state;
+}
-static void davinci_musb_source_power(struct musb *musb, int is_on, int immediate)
+static void davinci_musb_source_power(struct musb *musb, int is_on,
+ int immediate)
{
-#ifdef CONFIG_MACH_DAVINCI_EVM
+ struct davinci_glue *glue = dev_get_drvdata(musb->controller->parent);
+
+ /* This GPIO handling is entirely optional */
+ if (!glue->vbus)
+ return;
+
if (is_on)
is_on = 1;
- if (vbus_state == is_on)
+ if (glue->vbus_state == is_on)
return;
- vbus_state = !is_on; /* 0/1 vs "-1 == unknown/init" */
+ /* 0/1 vs "-1 == unknown/init" */
+ glue->vbus_state = !is_on;
if (machine_is_davinci_evm()) {
- static DECLARE_WORK(evm_vbus_work, evm_deferred_drvvbus);
-
if (immediate)
- gpio_set_value_cansleep(GPIO_nVBUS_DRV, vbus_state);
+ gpiod_set_value_cansleep(glue->vbus, glue->vbus_state);
else
- schedule_work(&evm_vbus_work);
+ schedule_work(&glue->vbus_work);
}
if (immediate)
- vbus_state = is_on;
-#endif
+ glue->vbus_state = is_on;
}
static void davinci_musb_set_vbus(struct musb *musb, int is_on)
@@ -524,6 +524,15 @@ static int davinci_probe(struct platform_device *pdev)
pdata->platform_ops = &davinci_ops;
+ glue->vbus = devm_gpiod_get_optional(&pdev->dev, NULL, GPIOD_OUT_LOW);
+ if (IS_ERR(glue->vbus)) {
+ ret = PTR_ERR(glue->vbus);
+ goto err0;
+ } else {
+ glue->vbus_state = -1;
+ INIT_WORK(&glue->vbus_work, evm_deferred_drvvbus);
+ }
+
usb_phy_generic_register();
platform_set_drvdata(pdev, glue);
diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c
index 5261f8dfedec..bc0109f4700b 100644
--- a/drivers/usb/musb/jz4740.c
+++ b/drivers/usb/musb/jz4740.c
@@ -17,16 +17,15 @@
#include "musb_core.h"
struct jz4740_glue {
- struct device *dev;
- struct platform_device *musb;
+ struct platform_device *pdev;
struct clk *clk;
};
static irqreturn_t jz4740_musb_interrupt(int irq, void *__hci)
{
- unsigned long flags;
- irqreturn_t retval = IRQ_NONE;
- struct musb *musb = __hci;
+ unsigned long flags;
+ irqreturn_t retval = IRQ_NONE;
+ struct musb *musb = __hci;
spin_lock_irqsave(&musb->lock, flags);
@@ -40,7 +39,7 @@ static irqreturn_t jz4740_musb_interrupt(int irq, void *__hci)
* never see them set
*/
musb->int_usb &= MUSB_INTR_SUSPEND | MUSB_INTR_RESUME |
- MUSB_INTR_RESET | MUSB_INTR_SOF;
+ MUSB_INTR_RESET | MUSB_INTR_SOF;
if (musb->int_usb || musb->int_tx || musb->int_rx)
retval = musb_interrupt(musb);
@@ -51,41 +50,40 @@ static irqreturn_t jz4740_musb_interrupt(int irq, void *__hci)
}
static struct musb_fifo_cfg jz4740_musb_fifo_cfg[] = {
-{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
-{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
-{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 64, },
+ { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 64, },
};
static const struct musb_hdrc_config jz4740_musb_config = {
/* Silicon does not implement USB OTG. */
- .multipoint = 0,
+ .multipoint = 0,
/* Max EPs scanned, driver will decide which EP can be used. */
- .num_eps = 4,
+ .num_eps = 4,
/* RAMbits needed to configure EPs from table */
- .ram_bits = 9,
- .fifo_cfg = jz4740_musb_fifo_cfg,
- .fifo_cfg_size = ARRAY_SIZE(jz4740_musb_fifo_cfg),
-};
-
-static struct musb_hdrc_platform_data jz4740_musb_platform_data = {
- .mode = MUSB_PERIPHERAL,
- .config = &jz4740_musb_config,
+ .ram_bits = 9,
+ .fifo_cfg = jz4740_musb_fifo_cfg,
+ .fifo_cfg_size = ARRAY_SIZE(jz4740_musb_fifo_cfg),
};
static int jz4740_musb_init(struct musb *musb)
{
struct device *dev = musb->controller->parent;
+ int err;
if (dev->of_node)
musb->xceiv = devm_usb_get_phy_by_phandle(dev, "phys", 0);
else
musb->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
if (IS_ERR(musb->xceiv)) {
- dev_err(dev, "No transceiver configured\n");
- return PTR_ERR(musb->xceiv);
+ err = PTR_ERR(musb->xceiv);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "No transceiver configured: %d", err);
+ return err;
}
- /* Silicon does not implement ConfigData register.
+ /*
+ * Silicon does not implement ConfigData register.
* Set dyn_fifo to avoid reading EP config from hardware.
*/
musb->dyn_fifo = true;
@@ -105,63 +103,67 @@ static const struct musb_platform_ops jz4740_musb_ops = {
.init = jz4740_musb_init,
};
+static const struct musb_hdrc_platform_data jz4740_musb_pdata = {
+ .mode = MUSB_PERIPHERAL,
+ .config = &jz4740_musb_config,
+ .platform_ops = &jz4740_musb_ops,
+};
+
static int jz4740_probe(struct platform_device *pdev)
{
- struct musb_hdrc_platform_data *pdata = &jz4740_musb_platform_data;
+ struct device *dev = &pdev->dev;
+ const struct musb_hdrc_platform_data *pdata = &jz4740_musb_pdata;
struct platform_device *musb;
struct jz4740_glue *glue;
- struct clk *clk;
+ struct clk *clk;
int ret;
- glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
+ glue = devm_kzalloc(dev, sizeof(*glue), GFP_KERNEL);
if (!glue)
return -ENOMEM;
musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
if (!musb) {
- dev_err(&pdev->dev, "failed to allocate musb device\n");
+ dev_err(dev, "failed to allocate musb device");
return -ENOMEM;
}
- clk = devm_clk_get(&pdev->dev, "udc");
+ clk = devm_clk_get(dev, "udc");
if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "failed to get clock\n");
+ dev_err(dev, "failed to get clock");
ret = PTR_ERR(clk);
goto err_platform_device_put;
}
ret = clk_prepare_enable(clk);
if (ret) {
- dev_err(&pdev->dev, "failed to enable clock\n");
+ dev_err(dev, "failed to enable clock");
goto err_platform_device_put;
}
- musb->dev.parent = &pdev->dev;
+ musb->dev.parent = dev;
- glue->dev = &pdev->dev;
- glue->musb = musb;
+ glue->pdev = musb;
glue->clk = clk;
- pdata->platform_ops = &jz4740_musb_ops;
-
platform_set_drvdata(pdev, glue);
ret = platform_device_add_resources(musb, pdev->resource,
pdev->num_resources);
if (ret) {
- dev_err(&pdev->dev, "failed to add resources\n");
+ dev_err(dev, "failed to add resources");
goto err_clk_disable;
}
ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
if (ret) {
- dev_err(&pdev->dev, "failed to add platform_data\n");
+ dev_err(dev, "failed to add platform_data");
goto err_clk_disable;
}
ret = platform_device_add(musb);
if (ret) {
- dev_err(&pdev->dev, "failed to register musb device\n");
+ dev_err(dev, "failed to register musb device");
goto err_clk_disable;
}
@@ -176,9 +178,9 @@ err_platform_device_put:
static int jz4740_remove(struct platform_device *pdev)
{
- struct jz4740_glue *glue = platform_get_drvdata(pdev);
+ struct jz4740_glue *glue = platform_get_drvdata(pdev);
- platform_device_unregister(glue->musb);
+ platform_device_unregister(glue->pdev);
clk_disable_unprepare(glue->clk);
return 0;
@@ -187,7 +189,7 @@ static int jz4740_remove(struct platform_device *pdev)
#ifdef CONFIG_OF
static const struct of_device_id jz4740_musb_of_match[] = {
{ .compatible = "ingenic,jz4740-musb" },
- {},
+ { /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, jz4740_musb_of_match);
#endif
diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c
new file mode 100644
index 000000000000..6b88c2f5d970
--- /dev/null
+++ b/drivers/usb/musb/mediatek.c
@@ -0,0 +1,582 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author:
+ * Min Guo <min.guo@mediatek.com>
+ * Yonglong Wu <yonglong.wu@mediatek.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/usb/role.h>
+#include <linux/usb/usb_phy_generic.h>
+#include "musb_core.h"
+#include "musb_dma.h"
+
+#define USB_L1INTS 0x00a0
+#define USB_L1INTM 0x00a4
+#define MTK_MUSB_TXFUNCADDR 0x0480
+
+/* MediaTek controller toggle enable and status reg */
+#define MUSB_RXTOG 0x80
+#define MUSB_RXTOGEN 0x82
+#define MUSB_TXTOG 0x84
+#define MUSB_TXTOGEN 0x86
+#define MTK_TOGGLE_EN GENMASK(15, 0)
+
+#define TX_INT_STATUS BIT(0)
+#define RX_INT_STATUS BIT(1)
+#define USBCOM_INT_STATUS BIT(2)
+#define DMA_INT_STATUS BIT(3)
+
+#define DMA_INTR_STATUS_MSK GENMASK(7, 0)
+#define DMA_INTR_UNMASK_SET_MSK GENMASK(31, 24)
+
+struct mtk_glue {
+ struct device *dev;
+ struct musb *musb;
+ struct platform_device *musb_pdev;
+ struct platform_device *usb_phy;
+ struct phy *phy;
+ struct usb_phy *xceiv;
+ enum phy_mode phy_mode;
+ struct clk *main;
+ struct clk *mcu;
+ struct clk *univpll;
+ enum usb_role role;
+ struct usb_role_switch *role_sw;
+};
+
+static int mtk_musb_clks_get(struct mtk_glue *glue)
+{
+ struct device *dev = glue->dev;
+
+ glue->main = devm_clk_get(dev, "main");
+ if (IS_ERR(glue->main)) {
+ dev_err(dev, "fail to get main clock\n");
+ return PTR_ERR(glue->main);
+ }
+
+ glue->mcu = devm_clk_get(dev, "mcu");
+ if (IS_ERR(glue->mcu)) {
+ dev_err(dev, "fail to get mcu clock\n");
+ return PTR_ERR(glue->mcu);
+ }
+
+ glue->univpll = devm_clk_get(dev, "univpll");
+ if (IS_ERR(glue->univpll)) {
+ dev_err(dev, "fail to get univpll clock\n");
+ return PTR_ERR(glue->univpll);
+ }
+
+ return 0;
+}
+
+static int mtk_musb_clks_enable(struct mtk_glue *glue)
+{
+ int ret;
+
+ ret = clk_prepare_enable(glue->main);
+ if (ret) {
+ dev_err(glue->dev, "failed to enable main clock\n");
+ goto err_main_clk;
+ }
+
+ ret = clk_prepare_enable(glue->mcu);
+ if (ret) {
+ dev_err(glue->dev, "failed to enable mcu clock\n");
+ goto err_mcu_clk;
+ }
+
+ ret = clk_prepare_enable(glue->univpll);
+ if (ret) {
+ dev_err(glue->dev, "failed to enable univpll clock\n");
+ goto err_univpll_clk;
+ }
+
+ return 0;
+
+err_univpll_clk:
+ clk_disable_unprepare(glue->mcu);
+err_mcu_clk:
+ clk_disable_unprepare(glue->main);
+err_main_clk:
+ return ret;
+}
+
+static void mtk_musb_clks_disable(struct mtk_glue *glue)
+{
+ clk_disable_unprepare(glue->univpll);
+ clk_disable_unprepare(glue->mcu);
+ clk_disable_unprepare(glue->main);
+}
+
+static int musb_usb_role_sx_set(struct device *dev, enum usb_role role)
+{
+ struct mtk_glue *glue = dev_get_drvdata(dev);
+ struct musb *musb = glue->musb;
+ u8 devctl = readb(musb->mregs + MUSB_DEVCTL);
+ enum usb_role new_role;
+
+ if (role == glue->role)
+ return 0;
+
+ switch (role) {
+ case USB_ROLE_HOST:
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+ glue->phy_mode = PHY_MODE_USB_HOST;
+ new_role = USB_ROLE_HOST;
+ if (glue->role == USB_ROLE_NONE)
+ phy_power_on(glue->phy);
+
+ devctl |= MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+ MUSB_HST_MODE(musb);
+ break;
+ case USB_ROLE_DEVICE:
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
+ glue->phy_mode = PHY_MODE_USB_DEVICE;
+ new_role = USB_ROLE_DEVICE;
+ devctl &= ~MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+ if (glue->role == USB_ROLE_NONE)
+ phy_power_on(glue->phy);
+
+ MUSB_DEV_MODE(musb);
+ break;
+ case USB_ROLE_NONE:
+ glue->phy_mode = PHY_MODE_USB_OTG;
+ new_role = USB_ROLE_NONE;
+ devctl &= ~MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+ if (glue->role != USB_ROLE_NONE)
+ phy_power_off(glue->phy);
+
+ break;
+ default:
+ dev_err(glue->dev, "Invalid State\n");
+ return -EINVAL;
+ }
+
+ glue->role = new_role;
+ phy_set_mode(glue->phy, glue->phy_mode);
+
+ return 0;
+}
+
+static enum usb_role musb_usb_role_sx_get(struct device *dev)
+{
+ struct mtk_glue *glue = dev_get_drvdata(dev);
+
+ return glue->role;
+}
+
+static int mtk_otg_switch_init(struct mtk_glue *glue)
+{
+ struct usb_role_switch_desc role_sx_desc = { 0 };
+
+ role_sx_desc.set = musb_usb_role_sx_set;
+ role_sx_desc.get = musb_usb_role_sx_get;
+ role_sx_desc.fwnode = dev_fwnode(glue->dev);
+ glue->role_sw = usb_role_switch_register(glue->dev, &role_sx_desc);
+
+ return PTR_ERR_OR_ZERO(glue->role_sw);
+}
+
+static void mtk_otg_switch_exit(struct mtk_glue *glue)
+{
+ return usb_role_switch_unregister(glue->role_sw);
+}
+
+static irqreturn_t generic_interrupt(int irq, void *__hci)
+{
+ unsigned long flags;
+ irqreturn_t retval = IRQ_NONE;
+ struct musb *musb = __hci;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ musb->int_usb = musb_clearb(musb->mregs, MUSB_INTRUSB);
+ musb->int_rx = musb_clearw(musb->mregs, MUSB_INTRRX);
+ musb->int_tx = musb_clearw(musb->mregs, MUSB_INTRTX);
+
+ if (musb->int_usb || musb->int_tx || musb->int_rx)
+ retval = musb_interrupt(musb);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return retval;
+}
+
+static irqreturn_t mtk_musb_interrupt(int irq, void *dev_id)
+{
+ irqreturn_t retval = IRQ_NONE;
+ struct musb *musb = (struct musb *)dev_id;
+ u32 l1_ints;
+
+ l1_ints = musb_readl(musb->mregs, USB_L1INTS) &
+ musb_readl(musb->mregs, USB_L1INTM);
+
+ if (l1_ints & (TX_INT_STATUS | RX_INT_STATUS | USBCOM_INT_STATUS))
+ retval = generic_interrupt(irq, musb);
+
+#if defined(CONFIG_USB_INVENTRA_DMA)
+ if (l1_ints & DMA_INT_STATUS)
+ retval = dma_controller_irq(irq, musb->dma_controller);
+#endif
+ return retval;
+}
+
+static u32 mtk_musb_busctl_offset(u8 epnum, u16 offset)
+{
+ return MTK_MUSB_TXFUNCADDR + offset + 8 * epnum;
+}
+
+static u8 mtk_musb_clearb(void __iomem *addr, unsigned int offset)
+{
+ u8 data;
+
+ /* W1C */
+ data = musb_readb(addr, offset);
+ musb_writeb(addr, offset, data);
+ return data;
+}
+
+static u16 mtk_musb_clearw(void __iomem *addr, unsigned int offset)
+{
+ u16 data;
+
+ /* W1C */
+ data = musb_readw(addr, offset);
+ musb_writew(addr, offset, data);
+ return data;
+}
+
+static int mtk_musb_set_mode(struct musb *musb, u8 mode)
+{
+ struct device *dev = musb->controller;
+ struct mtk_glue *glue = dev_get_drvdata(dev->parent);
+ enum phy_mode new_mode;
+ enum usb_role new_role;
+
+ switch (mode) {
+ case MUSB_HOST:
+ new_mode = PHY_MODE_USB_HOST;
+ new_role = USB_ROLE_HOST;
+ break;
+ case MUSB_PERIPHERAL:
+ new_mode = PHY_MODE_USB_DEVICE;
+ new_role = USB_ROLE_DEVICE;
+ break;
+ case MUSB_OTG:
+ new_mode = PHY_MODE_USB_OTG;
+ new_role = USB_ROLE_NONE;
+ break;
+ default:
+ dev_err(glue->dev, "Invalid mode request\n");
+ return -EINVAL;
+ }
+
+ if (glue->phy_mode == new_mode)
+ return 0;
+
+ if (musb->port_mode != MUSB_OTG) {
+ dev_err(glue->dev, "Does not support changing modes\n");
+ return -EINVAL;
+ }
+
+ glue->role = new_role;
+ musb_usb_role_sx_set(dev, glue->role);
+ return 0;
+}
+
+static int mtk_musb_init(struct musb *musb)
+{
+ struct device *dev = musb->controller;
+ struct mtk_glue *glue = dev_get_drvdata(dev->parent);
+ int ret;
+
+ glue->musb = musb;
+ musb->phy = glue->phy;
+ musb->xceiv = glue->xceiv;
+ musb->is_host = false;
+ musb->isr = mtk_musb_interrupt;
+
+ /* Set TX/RX toggle enable */
+ musb_writew(musb->mregs, MUSB_TXTOGEN, MTK_TOGGLE_EN);
+ musb_writew(musb->mregs, MUSB_RXTOGEN, MTK_TOGGLE_EN);
+
+ if (musb->port_mode == MUSB_OTG) {
+ ret = mtk_otg_switch_init(glue);
+ if (ret)
+ return ret;
+ }
+
+ ret = phy_init(glue->phy);
+ if (ret)
+ goto err_phy_init;
+
+ ret = phy_power_on(glue->phy);
+ if (ret)
+ goto err_phy_power_on;
+
+ phy_set_mode(glue->phy, glue->phy_mode);
+
+#if defined(CONFIG_USB_INVENTRA_DMA)
+ musb_writel(musb->mregs, MUSB_HSDMA_INTR,
+ DMA_INTR_STATUS_MSK | DMA_INTR_UNMASK_SET_MSK);
+#endif
+ musb_writel(musb->mregs, USB_L1INTM, TX_INT_STATUS | RX_INT_STATUS |
+ USBCOM_INT_STATUS | DMA_INT_STATUS);
+ return 0;
+
+err_phy_power_on:
+ phy_exit(glue->phy);
+err_phy_init:
+ mtk_otg_switch_exit(glue);
+ return ret;
+}
+
+static u16 mtk_musb_get_toggle(struct musb_qh *qh, int is_out)
+{
+ struct musb *musb = qh->hw_ep->musb;
+ u8 epnum = qh->hw_ep->epnum;
+ u16 toggle;
+
+ toggle = musb_readw(musb->mregs, is_out ? MUSB_TXTOG : MUSB_RXTOG);
+ return toggle & (1 << epnum);
+}
+
+static u16 mtk_musb_set_toggle(struct musb_qh *qh, int is_out, struct urb *urb)
+{
+ struct musb *musb = qh->hw_ep->musb;
+ u8 epnum = qh->hw_ep->epnum;
+ u16 value, toggle;
+
+ toggle = usb_gettoggle(urb->dev, qh->epnum, is_out);
+
+ if (is_out) {
+ value = musb_readw(musb->mregs, MUSB_TXTOG);
+ value |= toggle << epnum;
+ musb_writew(musb->mregs, MUSB_TXTOG, value);
+ } else {
+ value = musb_readw(musb->mregs, MUSB_RXTOG);
+ value |= toggle << epnum;
+ musb_writew(musb->mregs, MUSB_RXTOG, value);
+ }
+
+ return 0;
+}
+
+static int mtk_musb_exit(struct musb *musb)
+{
+ struct device *dev = musb->controller;
+ struct mtk_glue *glue = dev_get_drvdata(dev->parent);
+
+ mtk_otg_switch_exit(glue);
+ phy_power_off(glue->phy);
+ phy_exit(glue->phy);
+ mtk_musb_clks_disable(glue);
+
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ return 0;
+}
+
+static const struct musb_platform_ops mtk_musb_ops = {
+ .quirks = MUSB_DMA_INVENTRA,
+ .init = mtk_musb_init,
+ .get_toggle = mtk_musb_get_toggle,
+ .set_toggle = mtk_musb_set_toggle,
+ .exit = mtk_musb_exit,
+#ifdef CONFIG_USB_INVENTRA_DMA
+ .dma_init = musbhs_dma_controller_create_noirq,
+ .dma_exit = musbhs_dma_controller_destroy,
+#endif
+ .clearb = mtk_musb_clearb,
+ .clearw = mtk_musb_clearw,
+ .busctl_offset = mtk_musb_busctl_offset,
+ .set_mode = mtk_musb_set_mode,
+};
+
+#define MTK_MUSB_MAX_EP_NUM 8
+#define MTK_MUSB_RAM_BITS 11
+
+static struct musb_fifo_cfg mtk_musb_mode_cfg[] = {
+ { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
+ { .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 1024, },
+ { .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 1024, },
+ { .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 512, },
+ { .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 64, },
+};
+
+static const struct musb_hdrc_config mtk_musb_hdrc_config = {
+ .fifo_cfg = mtk_musb_mode_cfg,
+ .fifo_cfg_size = ARRAY_SIZE(mtk_musb_mode_cfg),
+ .multipoint = true,
+ .dyn_fifo = true,
+ .num_eps = MTK_MUSB_MAX_EP_NUM,
+ .ram_bits = MTK_MUSB_RAM_BITS,
+};
+
+static const struct platform_device_info mtk_dev_info = {
+ .name = "musb-hdrc",
+ .id = PLATFORM_DEVID_AUTO,
+ .dma_mask = DMA_BIT_MASK(32),
+};
+
+static int mtk_musb_probe(struct platform_device *pdev)
+{
+ struct musb_hdrc_platform_data *pdata;
+ struct mtk_glue *glue;
+ struct platform_device_info pinfo;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ int ret = -ENOMEM;
+
+ glue = devm_kzalloc(dev, sizeof(*glue), GFP_KERNEL);
+ if (!glue)
+ return -ENOMEM;
+
+ glue->dev = dev;
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ ret = of_platform_populate(np, NULL, NULL, dev);
+ if (ret) {
+ dev_err(dev, "failed to create child devices at %p\n", np);
+ return ret;
+ }
+
+ ret = mtk_musb_clks_get(glue);
+ if (ret)
+ return ret;
+
+ pdata->config = &mtk_musb_hdrc_config;
+ pdata->platform_ops = &mtk_musb_ops;
+ pdata->mode = usb_get_dr_mode(dev);
+
+ if (IS_ENABLED(CONFIG_USB_MUSB_HOST))
+ pdata->mode = USB_DR_MODE_HOST;
+ else if (IS_ENABLED(CONFIG_USB_MUSB_GADGET))
+ pdata->mode = USB_DR_MODE_PERIPHERAL;
+
+ switch (pdata->mode) {
+ case USB_DR_MODE_HOST:
+ glue->phy_mode = PHY_MODE_USB_HOST;
+ glue->role = USB_ROLE_HOST;
+ break;
+ case USB_DR_MODE_PERIPHERAL:
+ glue->phy_mode = PHY_MODE_USB_DEVICE;
+ glue->role = USB_ROLE_DEVICE;
+ break;
+ case USB_DR_MODE_OTG:
+ glue->phy_mode = PHY_MODE_USB_OTG;
+ glue->role = USB_ROLE_NONE;
+ break;
+ default:
+ dev_err(&pdev->dev, "Error 'dr_mode' property\n");
+ return -EINVAL;
+ }
+
+ glue->phy = devm_of_phy_get_by_index(dev, np, 0);
+ if (IS_ERR(glue->phy)) {
+ dev_err(dev, "fail to getting phy %ld\n",
+ PTR_ERR(glue->phy));
+ return PTR_ERR(glue->phy);
+ }
+
+ glue->usb_phy = usb_phy_generic_register();
+ if (IS_ERR(glue->usb_phy)) {
+ dev_err(dev, "fail to registering usb-phy %ld\n",
+ PTR_ERR(glue->usb_phy));
+ return PTR_ERR(glue->usb_phy);
+ }
+
+ glue->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
+ if (IS_ERR(glue->xceiv)) {
+ dev_err(dev, "fail to getting usb-phy %d\n", ret);
+ ret = PTR_ERR(glue->xceiv);
+ goto err_unregister_usb_phy;
+ }
+
+ platform_set_drvdata(pdev, glue);
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ ret = mtk_musb_clks_enable(glue);
+ if (ret)
+ goto err_enable_clk;
+
+ pinfo = mtk_dev_info;
+ pinfo.parent = dev;
+ pinfo.res = pdev->resource;
+ pinfo.num_res = pdev->num_resources;
+ pinfo.data = pdata;
+ pinfo.size_data = sizeof(*pdata);
+
+ glue->musb_pdev = platform_device_register_full(&pinfo);
+ if (IS_ERR(glue->musb_pdev)) {
+ ret = PTR_ERR(glue->musb_pdev);
+ dev_err(dev, "failed to register musb device: %d\n", ret);
+ goto err_device_register;
+ }
+
+ return 0;
+
+err_device_register:
+ mtk_musb_clks_disable(glue);
+err_enable_clk:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+err_unregister_usb_phy:
+ usb_phy_generic_unregister(glue->usb_phy);
+ return ret;
+}
+
+static int mtk_musb_remove(struct platform_device *pdev)
+{
+ struct mtk_glue *glue = platform_get_drvdata(pdev);
+ struct platform_device *usb_phy = glue->usb_phy;
+
+ platform_device_unregister(glue->musb_pdev);
+ usb_phy_generic_unregister(usb_phy);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id mtk_musb_match[] = {
+ {.compatible = "mediatek,mtk-musb",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtk_musb_match);
+#endif
+
+static struct platform_driver mtk_musb_driver = {
+ .probe = mtk_musb_probe,
+ .remove = mtk_musb_remove,
+ .driver = {
+ .name = "musb-mtk",
+ .of_match_table = of_match_ptr(mtk_musb_match),
+ },
+};
+
+module_platform_driver(mtk_musb_driver);
+
+MODULE_DESCRIPTION("MediaTek MUSB Glue Layer");
+MODULE_AUTHOR("Min Guo <min.guo@mediatek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/musb/musb_am335x.c b/drivers/usb/musb/musb_am335x.c
deleted file mode 100644
index 5f04f8e3a640..000000000000
--- a/drivers/usb/musb/musb_am335x.c
+++ /dev/null
@@ -1,44 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/module.h>
-#include <linux/of_platform.h>
-
-static int am335x_child_probe(struct platform_device *pdev)
-{
- int ret;
-
- pm_runtime_enable(&pdev->dev);
-
- ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
- if (ret)
- goto err;
-
- return 0;
-err:
- pm_runtime_disable(&pdev->dev);
- return ret;
-}
-
-static const struct of_device_id am335x_child_of_match[] = {
- { .compatible = "ti,am33xx-usb" },
- { },
-};
-MODULE_DEVICE_TABLE(of, am335x_child_of_match);
-
-static struct platform_driver am335x_child_driver = {
- .probe = am335x_child_probe,
- .driver = {
- .name = "am335x-usb-childs",
- .of_match_table = am335x_child_of_match,
- },
-};
-
-static int __init am335x_child_init(void)
-{
- return platform_driver_register(&am335x_child_driver);
-}
-module_init(am335x_child_init);
-
-MODULE_DESCRIPTION("AM33xx child devices");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 15cca912c53e..f616fb489542 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -73,6 +73,7 @@
#include <linux/prefetch.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/dma-mapping.h>
#include <linux/usb.h>
#include <linux/usb/of.h>
@@ -246,7 +247,7 @@ static u32 musb_default_busctl_offset(u8 epnum, u16 offset)
return 0x80 + (0x08 * epnum) + offset;
}
-static u8 musb_default_readb(const void __iomem *addr, unsigned offset)
+static u8 musb_default_readb(void __iomem *addr, u32 offset)
{
u8 data = __raw_readb(addr + offset);
@@ -254,13 +255,13 @@ static u8 musb_default_readb(const void __iomem *addr, unsigned offset)
return data;
}
-static void musb_default_writeb(void __iomem *addr, unsigned offset, u8 data)
+static void musb_default_writeb(void __iomem *addr, u32 offset, u8 data)
{
trace_musb_writeb(__builtin_return_address(0), addr, offset, data);
__raw_writeb(data, addr + offset);
}
-static u16 musb_default_readw(const void __iomem *addr, unsigned offset)
+static u16 musb_default_readw(void __iomem *addr, u32 offset)
{
u16 data = __raw_readw(addr + offset);
@@ -268,12 +269,44 @@ static u16 musb_default_readw(const void __iomem *addr, unsigned offset)
return data;
}
-static void musb_default_writew(void __iomem *addr, unsigned offset, u16 data)
+static void musb_default_writew(void __iomem *addr, u32 offset, u16 data)
{
trace_musb_writew(__builtin_return_address(0), addr, offset, data);
__raw_writew(data, addr + offset);
}
+static u16 musb_default_get_toggle(struct musb_qh *qh, int is_out)
+{
+ void __iomem *epio = qh->hw_ep->regs;
+ u16 csr;
+
+ if (is_out)
+ csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
+ else
+ csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
+
+ return csr;
+}
+
+static u16 musb_default_set_toggle(struct musb_qh *qh, int is_out,
+ struct urb *urb)
+{
+ u16 csr;
+ u16 toggle;
+
+ toggle = usb_gettoggle(urb->dev, qh->epnum, is_out);
+
+ if (is_out)
+ csr = toggle ? (MUSB_TXCSR_H_WR_DATATOGGLE
+ | MUSB_TXCSR_H_DATATOGGLE)
+ : MUSB_TXCSR_CLRDATATOG;
+ else
+ csr = toggle ? (MUSB_RXCSR_H_WR_DATATOGGLE
+ | MUSB_RXCSR_H_DATATOGGLE) : 0;
+
+ return csr;
+}
+
/*
* Load an endpoint's FIFO
*/
@@ -364,19 +397,25 @@ static void musb_default_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
/*
* Old style IO functions
*/
-u8 (*musb_readb)(const void __iomem *addr, unsigned offset);
+u8 (*musb_readb)(void __iomem *addr, u32 offset);
EXPORT_SYMBOL_GPL(musb_readb);
-void (*musb_writeb)(void __iomem *addr, unsigned offset, u8 data);
+void (*musb_writeb)(void __iomem *addr, u32 offset, u8 data);
EXPORT_SYMBOL_GPL(musb_writeb);
-u16 (*musb_readw)(const void __iomem *addr, unsigned offset);
+u8 (*musb_clearb)(void __iomem *addr, u32 offset);
+EXPORT_SYMBOL_GPL(musb_clearb);
+
+u16 (*musb_readw)(void __iomem *addr, u32 offset);
EXPORT_SYMBOL_GPL(musb_readw);
-void (*musb_writew)(void __iomem *addr, unsigned offset, u16 data);
+void (*musb_writew)(void __iomem *addr, u32 offset, u16 data);
EXPORT_SYMBOL_GPL(musb_writew);
-u32 musb_readl(const void __iomem *addr, unsigned offset)
+u16 (*musb_clearw)(void __iomem *addr, u32 offset);
+EXPORT_SYMBOL_GPL(musb_clearw);
+
+u32 musb_readl(void __iomem *addr, u32 offset)
{
u32 data = __raw_readl(addr + offset);
@@ -385,7 +424,7 @@ u32 musb_readl(const void __iomem *addr, unsigned offset)
}
EXPORT_SYMBOL_GPL(musb_readl);
-void musb_writel(void __iomem *addr, unsigned offset, u32 data)
+void musb_writel(void __iomem *addr, u32 offset, u32 data)
{
trace_musb_writel(__builtin_return_address(0), addr, offset, data);
__raw_writel(data, addr + offset);
@@ -414,6 +453,108 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
return hw_ep->musb->io.write_fifo(hw_ep, len, src);
}
+static u8 musb_read_devctl(struct musb *musb)
+{
+ return musb_readb(musb->mregs, MUSB_DEVCTL);
+}
+
+/**
+ * musb_set_host - set and initialize host mode
+ * @musb: musb controller driver data
+ *
+ * At least some musb revisions need to enable devctl session bit in
+ * peripheral mode to switch to host mode. Initializes things to host
+ * mode and sets A_IDLE. SoC glue needs to advance state further
+ * based on phy provided VBUS state.
+ *
+ * Note that the SoC glue code may need to wait for musb to settle
+ * on enable before calling this to avoid babble.
+ */
+int musb_set_host(struct musb *musb)
+{
+ int error = 0;
+ u8 devctl;
+
+ if (!musb)
+ return -EINVAL;
+
+ devctl = musb_read_devctl(musb);
+ if (!(devctl & MUSB_DEVCTL_BDEVICE)) {
+ dev_info(musb->controller,
+ "%s: already in host mode: %02x\n",
+ __func__, devctl);
+ goto init_data;
+ }
+
+ devctl |= MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+ error = readx_poll_timeout(musb_read_devctl, musb, devctl,
+ !(devctl & MUSB_DEVCTL_BDEVICE), 5000,
+ 1000000);
+ if (error) {
+ dev_err(musb->controller, "%s: could not set host: %02x\n",
+ __func__, devctl);
+
+ return error;
+ }
+
+init_data:
+ musb->is_active = 1;
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+ MUSB_HST_MODE(musb);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(musb_set_host);
+
+/**
+ * musb_set_peripheral - set and initialize peripheral mode
+ * @musb: musb controller driver data
+ *
+ * Clears devctl session bit and initializes things for peripheral
+ * mode and sets B_IDLE. SoC glue needs to advance state further
+ * based on phy provided VBUS state.
+ */
+int musb_set_peripheral(struct musb *musb)
+{
+ int error = 0;
+ u8 devctl;
+
+ if (!musb)
+ return -EINVAL;
+
+ devctl = musb_read_devctl(musb);
+ if (devctl & MUSB_DEVCTL_BDEVICE) {
+ dev_info(musb->controller,
+ "%s: already in peripheral mode: %02x\n",
+ __func__, devctl);
+
+ goto init_data;
+ }
+
+ devctl &= ~MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+ error = readx_poll_timeout(musb_read_devctl, musb, devctl,
+ devctl & MUSB_DEVCTL_BDEVICE, 5000,
+ 1000000);
+ if (error) {
+ dev_err(musb->controller, "%s: could not set peripheral: %02x\n",
+ __func__, devctl);
+
+ return error;
+ }
+
+init_data:
+ musb->is_active = 0;
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
+ MUSB_DEV_MODE(musb);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(musb_set_peripheral);
+
/*-------------------------------------------------------------------------*/
/* for high speed test mode; see USB 2.0 spec 7.1.20 */
@@ -909,7 +1050,6 @@ static void musb_handle_intr_reset(struct musb *musb)
* @param musb instance pointer
* @param int_usb register contents
* @param devctl
- * @param power
*/
static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
@@ -1015,7 +1155,6 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
static void musb_disable_interrupts(struct musb *musb)
{
void __iomem *mbase = musb->mregs;
- u16 temp;
/* disable interrupts */
musb_writeb(mbase, MUSB_INTRUSBE, 0);
@@ -1025,9 +1164,9 @@ static void musb_disable_interrupts(struct musb *musb)
musb_writew(mbase, MUSB_INTRRXE, 0);
/* flush pending interrupts */
- temp = musb_readb(mbase, MUSB_INTRUSB);
- temp = musb_readw(mbase, MUSB_INTRTX);
- temp = musb_readw(mbase, MUSB_INTRRX);
+ musb_clearb(mbase, MUSB_INTRUSB);
+ musb_clearw(mbase, MUSB_INTRTX);
+ musb_clearw(mbase, MUSB_INTRRX);
}
static void musb_enable_interrupts(struct musb *musb)
@@ -1840,6 +1979,9 @@ ATTRIBUTE_GROUPS(musb);
#define MUSB_QUIRK_B_INVALID_VBUS_91 (MUSB_DEVCTL_BDEVICE | \
(2 << MUSB_DEVCTL_VBUS_SHIFT) | \
MUSB_DEVCTL_SESSION)
+#define MUSB_QUIRK_B_DISCONNECT_99 (MUSB_DEVCTL_BDEVICE | \
+ (3 << MUSB_DEVCTL_VBUS_SHIFT) | \
+ MUSB_DEVCTL_SESSION)
#define MUSB_QUIRK_A_DISCONNECT_19 ((3 << MUSB_DEVCTL_VBUS_SHIFT) | \
MUSB_DEVCTL_SESSION)
@@ -1862,6 +2004,11 @@ static void musb_pm_runtime_check_session(struct musb *musb)
s = MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV |
MUSB_DEVCTL_HR;
switch (devctl & ~s) {
+ case MUSB_QUIRK_B_DISCONNECT_99:
+ musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n");
+ schedule_delayed_work(&musb->irq_work,
+ msecs_to_jiffies(1000));
+ break;
case MUSB_QUIRK_B_INVALID_VBUS_91:
if (musb->quirk_retries && !musb->flush_irq_work) {
musb_dbg(musb,
@@ -2246,10 +2393,19 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
musb_readb = musb->ops->readb;
if (musb->ops->writeb)
musb_writeb = musb->ops->writeb;
+ if (musb->ops->clearb)
+ musb_clearb = musb->ops->clearb;
+ else
+ musb_clearb = musb_readb;
+
if (musb->ops->readw)
musb_readw = musb->ops->readw;
if (musb->ops->writew)
musb_writew = musb->ops->writew;
+ if (musb->ops->clearw)
+ musb_clearw = musb->ops->clearw;
+ else
+ musb_clearw = musb_readw;
#ifndef CONFIG_MUSB_PIO_ONLY
if (!musb->ops->dma_init || !musb->ops->dma_exit) {
@@ -2271,6 +2427,16 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
else
musb->io.write_fifo = musb_default_write_fifo;
+ if (musb->ops->get_toggle)
+ musb->io.get_toggle = musb->ops->get_toggle;
+ else
+ musb->io.get_toggle = musb_default_get_toggle;
+
+ if (musb->ops->set_toggle)
+ musb->io.set_toggle = musb->ops->set_toggle;
+ else
+ musb->io.set_toggle = musb_default_set_toggle;
+
if (!musb->xceiv->io_ops) {
musb->xceiv->io_dev = musb->controller;
musb->xceiv->io_priv = musb->mregs;
@@ -2310,6 +2476,9 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
musb_disable_interrupts(musb);
musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+ /* MUSB_POWER_SOFTCONN might be already set, JZ4740 does this. */
+ musb_writeb(musb->mregs, MUSB_POWER, 0);
+
/* Init IRQ workqueue before request_irq */
INIT_DELAYED_WORK(&musb->irq_work, musb_irq_work);
INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset);
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 04203b7126d5..290a2bc46606 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -27,6 +27,7 @@
struct musb;
struct musb_hw_ep;
struct musb_ep;
+struct musb_qh;
/* Helper defines for struct musb->hwvers */
#define MUSB_HWVERS_MAJOR(x) ((x >> 10) & 0x1f)
@@ -119,10 +120,14 @@ struct musb_io;
* @fifo_offset: returns the fifo offset
* @readb: read 8 bits
* @writeb: write 8 bits
+ * @clearb: could be clear-on-readb or W1C
* @readw: read 16 bits
* @writew: write 16 bits
+ * @clearw: could be clear-on-readw or W1C
* @read_fifo: reads the fifo
* @write_fifo: writes to fifo
+ * @get_toggle: platform specific get toggle function
+ * @set_toggle: platform specific set toggle function
* @dma_init: platform specific dma init function
* @dma_exit: platform specific dma exit function
* @init: turns on clocks, sets up platform-specific registers, etc
@@ -161,12 +166,16 @@ struct musb_platform_ops {
u16 fifo_mode;
u32 (*fifo_offset)(u8 epnum);
u32 (*busctl_offset)(u8 epnum, u16 offset);
- u8 (*readb)(const void __iomem *addr, unsigned offset);
- void (*writeb)(void __iomem *addr, unsigned offset, u8 data);
- u16 (*readw)(const void __iomem *addr, unsigned offset);
- void (*writew)(void __iomem *addr, unsigned offset, u16 data);
+ u8 (*readb)(void __iomem *addr, u32 offset);
+ void (*writeb)(void __iomem *addr, u32 offset, u8 data);
+ u8 (*clearb)(void __iomem *addr, u32 offset);
+ u16 (*readw)(void __iomem *addr, u32 offset);
+ void (*writew)(void __iomem *addr, u32 offset, u16 data);
+ u16 (*clearw)(void __iomem *addr, u32 offset);
void (*read_fifo)(struct musb_hw_ep *hw_ep, u16 len, u8 *buf);
void (*write_fifo)(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf);
+ u16 (*get_toggle)(struct musb_qh *qh, int is_out);
+ u16 (*set_toggle)(struct musb_qh *qh, int is_out, struct urb *urb);
struct dma_controller *
(*dma_init) (struct musb *musb, void __iomem *base);
void (*dma_exit)(struct dma_controller *c);
@@ -487,6 +496,9 @@ extern void musb_start(struct musb *musb);
extern void musb_write_fifo(struct musb_hw_ep *ep, u16 len, const u8 *src);
extern void musb_read_fifo(struct musb_hw_ep *ep, u16 len, u8 *dst);
+extern int musb_set_host(struct musb *musb);
+extern int musb_set_peripheral(struct musb *musb);
+
extern void musb_load_testpacket(struct musb *);
extern irqreturn_t musb_interrupt(struct musb *);
diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h
index 8f60271c0a9d..4b4d8dc5d3f2 100644
--- a/drivers/usb/musb/musb_dma.h
+++ b/drivers/usb/musb/musb_dma.h
@@ -35,6 +35,12 @@ struct musb_hw_ep;
* whether shared with the Inventra core or separate.
*/
+#define MUSB_HSDMA_BASE 0x200
+#define MUSB_HSDMA_INTR (MUSB_HSDMA_BASE + 0)
+#define MUSB_HSDMA_CONTROL 0x4
+#define MUSB_HSDMA_ADDRESS 0x8
+#define MUSB_HSDMA_COUNT 0xc
+
#define DMA_ADDR_INVALID (~(dma_addr_t)0)
#ifdef CONFIG_MUSB_PIO_ONLY
@@ -191,6 +197,9 @@ extern void (*musb_dma_controller_destroy)(struct dma_controller *);
extern struct dma_controller *
musbhs_dma_controller_create(struct musb *musb, void __iomem *base);
extern void musbhs_dma_controller_destroy(struct dma_controller *c);
+extern struct dma_controller *
+musbhs_dma_controller_create_noirq(struct musb *musb, void __iomem *base);
+extern irqreturn_t dma_controller_irq(int irq, void *private_data);
extern struct dma_controller *
tusb_dma_controller_create(struct musb *musb, void __iomem *base);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 5a44b70372d9..886c9b602f8c 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -286,26 +286,6 @@ __acquires(musb->lock)
spin_lock(&musb->lock);
}
-/* For bulk/interrupt endpoints only */
-static inline void musb_save_toggle(struct musb_qh *qh, int is_in,
- struct urb *urb)
-{
- void __iomem *epio = qh->hw_ep->regs;
- u16 csr;
-
- /*
- * FIXME: the current Mentor DMA code seems to have
- * problems getting toggle correct.
- */
-
- if (is_in)
- csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
- else
- csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
-
- usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0);
-}
-
/*
* Advance this hardware endpoint's queue, completing the specified URB and
* advancing to either the next URB queued to that qh, or else invalidating
@@ -320,6 +300,7 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
struct musb_hw_ep *ep = qh->hw_ep;
int ready = qh->is_ready;
int status;
+ u16 toggle;
status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
@@ -327,7 +308,8 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
switch (qh->type) {
case USB_ENDPOINT_XFER_BULK:
case USB_ENDPOINT_XFER_INT:
- musb_save_toggle(qh, is_in, urb);
+ toggle = musb->io.get_toggle(qh, !is_in);
+ usb_settoggle(urb->dev, qh->epnum, !is_in, toggle ? 1 : 0);
break;
case USB_ENDPOINT_XFER_ISOC:
if (status == 0 && urb->error_count)
@@ -772,13 +754,8 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
);
csr |= MUSB_TXCSR_MODE;
- if (!hw_ep->tx_double_buffered) {
- if (usb_gettoggle(urb->dev, qh->epnum, 1))
- csr |= MUSB_TXCSR_H_WR_DATATOGGLE
- | MUSB_TXCSR_H_DATATOGGLE;
- else
- csr |= MUSB_TXCSR_CLRDATATOG;
- }
+ if (!hw_ep->tx_double_buffered)
+ csr |= musb->io.set_toggle(qh, is_out, urb);
musb_writew(epio, MUSB_TXCSR, csr);
/* REVISIT may need to clear FLUSHFIFO ... */
@@ -860,17 +837,12 @@ finish:
/* IN/receive */
} else {
- u16 csr;
+ u16 csr = 0;
if (hw_ep->rx_reinit) {
musb_rx_reinit(musb, qh, epnum);
+ csr |= musb->io.set_toggle(qh, is_out, urb);
- /* init new state: toggle and NYET, maybe DMA later */
- if (usb_gettoggle(urb->dev, qh->epnum, 0))
- csr = MUSB_RXCSR_H_WR_DATATOGGLE
- | MUSB_RXCSR_H_DATATOGGLE;
- else
- csr = 0;
if (qh->type == USB_ENDPOINT_XFER_INT)
csr |= MUSB_RXCSR_DISNYET;
@@ -933,6 +905,7 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
void __iomem *epio = ep->regs;
struct musb_qh *cur_qh, *next_qh;
u16 rx_csr, tx_csr;
+ u16 toggle;
musb_ep_select(mbase, ep->epnum);
if (is_in) {
@@ -970,7 +943,8 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
urb->actual_length += dma->actual_len;
dma->actual_len = 0L;
}
- musb_save_toggle(cur_qh, is_in, urb);
+ toggle = musb->io.get_toggle(cur_qh, !is_in);
+ usb_settoggle(urb->dev, cur_qh->epnum, !is_in, toggle ? 1 : 0);
if (is_in) {
/* move cur_qh to end of queue */
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h
index 8058a58092cf..f17aabd95a50 100644
--- a/drivers/usb/musb/musb_io.h
+++ b/drivers/usb/musb/musb_io.h
@@ -22,6 +22,8 @@
* @read_fifo: platform specific function to read fifo
* @write_fifo: platform specific function to write fifo
* @busctl_offset: platform specific function to get busctl offset
+ * @get_toggle: platform specific function to get toggle
+ * @set_toggle: platform specific function to set toggle
*/
struct musb_io {
u32 (*ep_offset)(u8 epnum, u16 offset);
@@ -30,14 +32,18 @@ struct musb_io {
void (*read_fifo)(struct musb_hw_ep *hw_ep, u16 len, u8 *buf);
void (*write_fifo)(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf);
u32 (*busctl_offset)(u8 epnum, u16 offset);
+ u16 (*get_toggle)(struct musb_qh *qh, int is_out);
+ u16 (*set_toggle)(struct musb_qh *qh, int is_out, struct urb *urb);
};
/* Do not add new entries here, add them the struct musb_io instead */
-extern u8 (*musb_readb)(const void __iomem *addr, unsigned offset);
-extern void (*musb_writeb)(void __iomem *addr, unsigned offset, u8 data);
-extern u16 (*musb_readw)(const void __iomem *addr, unsigned offset);
-extern void (*musb_writew)(void __iomem *addr, unsigned offset, u16 data);
-extern u32 musb_readl(const void __iomem *addr, unsigned offset);
-extern void musb_writel(void __iomem *addr, unsigned offset, u32 data);
+extern u8 (*musb_readb)(void __iomem *addr, u32 offset);
+extern void (*musb_writeb)(void __iomem *addr, u32 offset, u8 data);
+extern u8 (*musb_clearb)(void __iomem *addr, u32 offset);
+extern u16 (*musb_readw)(void __iomem *addr, u32 offset);
+extern void (*musb_writew)(void __iomem *addr, u32 offset, u16 data);
+extern u16 (*musb_clearw)(void __iomem *addr, u32 offset);
+extern u32 musb_readl(void __iomem *addr, u32 offset);
+extern void musb_writel(void __iomem *addr, u32 offset, u32 data);
#endif
diff --git a/drivers/usb/musb/musb_trace.h b/drivers/usb/musb/musb_trace.h
index a97d618fe8ff..b193daf69685 100644
--- a/drivers/usb/musb/musb_trace.h
+++ b/drivers/usb/musb/musb_trace.h
@@ -38,11 +38,12 @@ TRACE_EVENT(musb_log,
);
DECLARE_EVENT_CLASS(musb_regb,
- TP_PROTO(void *caller, const void *addr, unsigned int offset, u8 data),
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u8 data),
TP_ARGS(caller, addr, offset, data),
TP_STRUCT__entry(
__field(void *, caller)
- __field(const void *, addr)
+ __field(const void __iomem *, addr)
__field(unsigned int, offset)
__field(u8, data)
),
@@ -57,21 +58,24 @@ DECLARE_EVENT_CLASS(musb_regb,
);
DEFINE_EVENT(musb_regb, musb_readb,
- TP_PROTO(void *caller, const void *addr, unsigned int offset, u8 data),
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u8 data),
TP_ARGS(caller, addr, offset, data)
);
DEFINE_EVENT(musb_regb, musb_writeb,
- TP_PROTO(void *caller, const void *addr, unsigned int offset, u8 data),
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u8 data),
TP_ARGS(caller, addr, offset, data)
);
DECLARE_EVENT_CLASS(musb_regw,
- TP_PROTO(void *caller, const void *addr, unsigned int offset, u16 data),
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u16 data),
TP_ARGS(caller, addr, offset, data),
TP_STRUCT__entry(
__field(void *, caller)
- __field(const void *, addr)
+ __field(const void __iomem *, addr)
__field(unsigned int, offset)
__field(u16, data)
),
@@ -86,21 +90,24 @@ DECLARE_EVENT_CLASS(musb_regw,
);
DEFINE_EVENT(musb_regw, musb_readw,
- TP_PROTO(void *caller, const void *addr, unsigned int offset, u16 data),
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u16 data),
TP_ARGS(caller, addr, offset, data)
);
DEFINE_EVENT(musb_regw, musb_writew,
- TP_PROTO(void *caller, const void *addr, unsigned int offset, u16 data),
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u16 data),
TP_ARGS(caller, addr, offset, data)
);
DECLARE_EVENT_CLASS(musb_regl,
- TP_PROTO(void *caller, const void *addr, unsigned int offset, u32 data),
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u32 data),
TP_ARGS(caller, addr, offset, data),
TP_STRUCT__entry(
__field(void *, caller)
- __field(const void *, addr)
+ __field(const void __iomem *, addr)
__field(unsigned int, offset)
__field(u32, data)
),
@@ -115,12 +122,14 @@ DECLARE_EVENT_CLASS(musb_regl,
);
DEFINE_EVENT(musb_regl, musb_readl,
- TP_PROTO(void *caller, const void *addr, unsigned int offset, u32 data),
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u32 data),
TP_ARGS(caller, addr, offset, data)
);
DEFINE_EVENT(musb_regl, musb_writel,
- TP_PROTO(void *caller, const void *addr, unsigned int offset, u32 data),
+ TP_PROTO(void *caller, const void __iomem *addr,
+ unsigned int offset, u32 data),
TP_ARGS(caller, addr, offset, data)
);
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 5fc6825745f2..0aacfc8be5a1 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -10,12 +10,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "musb_core.h"
-
-#define MUSB_HSDMA_BASE 0x200
-#define MUSB_HSDMA_INTR (MUSB_HSDMA_BASE + 0)
-#define MUSB_HSDMA_CONTROL 0x4
-#define MUSB_HSDMA_ADDRESS 0x8
-#define MUSB_HSDMA_COUNT 0xc
+#include "musb_dma.h"
#define MUSB_HSDMA_CHANNEL_OFFSET(_bchannel, _offset) \
(MUSB_HSDMA_BASE + (_bchannel << 4) + _offset)
@@ -268,7 +263,7 @@ static int dma_channel_abort(struct dma_channel *channel)
return 0;
}
-static irqreturn_t dma_controller_irq(int irq, void *private_data)
+irqreturn_t dma_controller_irq(int irq, void *private_data)
{
struct musb_dma_controller *controller = private_data;
struct musb *musb = controller->private_data;
@@ -289,7 +284,7 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
spin_lock_irqsave(&musb->lock, flags);
- int_hsdma = musb_readb(mbase, MUSB_HSDMA_INTR);
+ int_hsdma = musb_clearb(mbase, MUSB_HSDMA_INTR);
if (!int_hsdma) {
musb_dbg(musb, "spurious DMA irq");
@@ -383,6 +378,7 @@ done:
spin_unlock_irqrestore(&musb->lock, flags);
return retval;
}
+EXPORT_SYMBOL_GPL(dma_controller_irq);
void musbhs_dma_controller_destroy(struct dma_controller *c)
{
@@ -398,18 +394,10 @@ void musbhs_dma_controller_destroy(struct dma_controller *c)
}
EXPORT_SYMBOL_GPL(musbhs_dma_controller_destroy);
-struct dma_controller *musbhs_dma_controller_create(struct musb *musb,
- void __iomem *base)
+static struct musb_dma_controller *
+dma_controller_alloc(struct musb *musb, void __iomem *base)
{
struct musb_dma_controller *controller;
- struct device *dev = musb->controller;
- struct platform_device *pdev = to_platform_device(dev);
- int irq = platform_get_irq_byname(pdev, "dma");
-
- if (irq <= 0) {
- dev_err(dev, "No DMA interrupt line!\n");
- return NULL;
- }
controller = kzalloc(sizeof(*controller), GFP_KERNEL);
if (!controller)
@@ -423,9 +411,28 @@ struct dma_controller *musbhs_dma_controller_create(struct musb *musb,
controller->controller.channel_release = dma_channel_release;
controller->controller.channel_program = dma_channel_program;
controller->controller.channel_abort = dma_channel_abort;
+ return controller;
+}
+
+struct dma_controller *
+musbhs_dma_controller_create(struct musb *musb, void __iomem *base)
+{
+ struct musb_dma_controller *controller;
+ struct device *dev = musb->controller;
+ struct platform_device *pdev = to_platform_device(dev);
+ int irq = platform_get_irq_byname(pdev, "dma");
+
+ if (irq <= 0) {
+ dev_err(dev, "No DMA interrupt line!\n");
+ return NULL;
+ }
+
+ controller = dma_controller_alloc(musb, base);
+ if (!controller)
+ return NULL;
if (request_irq(irq, dma_controller_irq, 0,
- dev_name(musb->controller), &controller->controller)) {
+ dev_name(musb->controller), controller)) {
dev_err(dev, "request_irq %d failed!\n", irq);
musb_dma_controller_destroy(&controller->controller);
@@ -437,3 +444,16 @@ struct dma_controller *musbhs_dma_controller_create(struct musb *musb,
return &controller->controller;
}
EXPORT_SYMBOL_GPL(musbhs_dma_controller_create);
+
+struct dma_controller *
+musbhs_dma_controller_create_noirq(struct musb *musb, void __iomem *base)
+{
+ struct musb_dma_controller *controller;
+
+ controller = dma_controller_alloc(musb, base);
+ if (!controller)
+ return NULL;
+
+ return &controller->controller;
+}
+EXPORT_SYMBOL_GPL(musbhs_dma_controller_create_noirq);
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index a3d2fef67746..d62c78b97cad 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -38,69 +38,6 @@ struct omap2430_glue {
static struct omap2430_glue *_glue;
-static void omap2430_musb_set_vbus(struct musb *musb, int is_on)
-{
- struct usb_otg *otg = musb->xceiv->otg;
- u8 devctl;
- unsigned long timeout = jiffies + msecs_to_jiffies(1000);
- /* HDRC controls CPEN, but beware current surges during device
- * connect. They can trigger transient overcurrent conditions
- * that must be ignored.
- */
-
- devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
-
- if (is_on) {
- if (musb->xceiv->otg->state == OTG_STATE_A_IDLE) {
- int loops = 100;
- /* start the session */
- devctl |= MUSB_DEVCTL_SESSION;
- musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
- /*
- * Wait for the musb to set as A device to enable the
- * VBUS
- */
- while (musb_readb(musb->mregs, MUSB_DEVCTL) &
- MUSB_DEVCTL_BDEVICE) {
-
- mdelay(5);
- cpu_relax();
-
- if (time_after(jiffies, timeout)
- || loops-- <= 0) {
- dev_err(musb->controller,
- "configured as A device timeout");
- break;
- }
- }
-
- otg_set_vbus(otg, 1);
- } else {
- musb->is_active = 1;
- musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
- devctl |= MUSB_DEVCTL_SESSION;
- MUSB_HST_MODE(musb);
- }
- } else {
- musb->is_active = 0;
-
- /* NOTE: we're skipping A_WAIT_VFALL -> A_IDLE and
- * jumping right to B_IDLE...
- */
-
- musb->xceiv->otg->state = OTG_STATE_B_IDLE;
- devctl &= ~MUSB_DEVCTL_SESSION;
-
- MUSB_DEV_MODE(musb);
- }
- musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
-
- dev_dbg(musb->controller, "VBUS %s, devctl %02x "
- /* otg %3x conf %08x prcm %08x */ "\n",
- usb_otg_state_string(musb->xceiv->otg->state),
- musb_readb(musb->mregs, MUSB_DEVCTL));
-}
-
static inline void omap2430_low_level_exit(struct musb *musb)
{
u32 l;
@@ -140,24 +77,52 @@ static int omap2430_musb_mailbox(enum musb_vbus_id_status status)
return 0;
}
+/*
+ * HDRC controls CPEN, but beware current surges during device connect.
+ * They can trigger transient overcurrent conditions that must be ignored.
+ *
+ * Note that we're skipping A_WAIT_VFALL -> A_IDLE and jumping right to B_IDLE
+ * as set by musb_set_peripheral().
+ */
static void omap_musb_set_mailbox(struct omap2430_glue *glue)
{
struct musb *musb = glue_to_musb(glue);
- struct musb_hdrc_platform_data *pdata =
- dev_get_platdata(musb->controller);
- struct omap_musb_board_data *data = pdata->board_data;
+ int error;
pm_runtime_get_sync(musb->controller);
+
+ dev_dbg(musb->controller, "VBUS %s, devctl %02x\n",
+ usb_otg_state_string(musb->xceiv->otg->state),
+ musb_readb(musb->mregs, MUSB_DEVCTL));
+
switch (glue->status) {
case MUSB_ID_GROUND:
dev_dbg(musb->controller, "ID GND\n");
-
- musb->xceiv->otg->state = OTG_STATE_A_IDLE;
- musb->xceiv->last_event = USB_EVENT_ID;
- if (musb->gadget_driver) {
- omap_control_usb_set_mode(glue->control_otghs,
- USB_MODE_HOST);
- omap2430_musb_set_vbus(musb, 1);
+ switch (musb->xceiv->otg->state) {
+ case OTG_STATE_A_IDLE:
+ error = musb_set_host(musb);
+ if (error)
+ break;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+ /* Fall through */
+ case OTG_STATE_A_WAIT_VRISE:
+ case OTG_STATE_A_WAIT_BCON:
+ case OTG_STATE_A_HOST:
+ /*
+ * On multiple ID ground interrupts just keep enabling
+ * VBUS. At least cpcap VBUS shuts down otherwise.
+ */
+ otg_set_vbus(musb->xceiv->otg, 1);
+ break;
+ default:
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+ musb->xceiv->last_event = USB_EVENT_ID;
+ if (musb->gadget_driver) {
+ omap_control_usb_set_mode(glue->control_otghs,
+ USB_MODE_HOST);
+ otg_set_vbus(musb->xceiv->otg, 1);
+ }
+ break;
}
break;
@@ -174,12 +139,8 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
dev_dbg(musb->controller, "VBUS Disconnect\n");
musb->xceiv->last_event = USB_EVENT_NONE;
- if (musb->gadget_driver)
- omap2430_musb_set_vbus(musb, 0);
-
- if (data->interface_type == MUSB_INTERFACE_UTMI)
- otg_set_vbus(musb->xceiv->otg, 0);
-
+ musb_set_peripheral(musb);
+ otg_set_vbus(musb->xceiv->otg, 0);
omap_control_usb_set_mode(glue->control_otghs,
USB_MODE_DISCONNECT);
break;
@@ -226,7 +187,6 @@ static int omap2430_musb_init(struct musb *musb)
u32 l;
int status = 0;
struct device *dev = musb->controller;
- struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
struct omap_musb_board_data *data = plat->board_data;
@@ -282,50 +242,17 @@ static int omap2430_musb_init(struct musb *musb)
musb_readl(musb->mregs, OTG_INTERFSEL),
musb_readl(musb->mregs, OTG_SIMENABLE));
- if (glue->status != MUSB_UNKNOWN)
- omap_musb_set_mailbox(glue);
-
return 0;
}
static void omap2430_musb_enable(struct musb *musb)
{
- u8 devctl;
- unsigned long timeout = jiffies + msecs_to_jiffies(1000);
struct device *dev = musb->controller;
struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
- struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev);
- struct omap_musb_board_data *data = pdata->board_data;
-
-
- switch (glue->status) {
-
- case MUSB_ID_GROUND:
- omap_control_usb_set_mode(glue->control_otghs, USB_MODE_HOST);
- if (data->interface_type != MUSB_INTERFACE_UTMI)
- break;
- devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
- /* start the session */
- devctl |= MUSB_DEVCTL_SESSION;
- musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
- while (musb_readb(musb->mregs, MUSB_DEVCTL) &
- MUSB_DEVCTL_BDEVICE) {
- cpu_relax();
-
- if (time_after(jiffies, timeout)) {
- dev_err(dev, "configured as A device timeout");
- break;
- }
- }
- break;
- case MUSB_VBUS_VALID:
- omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DEVICE);
- break;
-
- default:
- break;
- }
+ if (glue->status == MUSB_UNKNOWN)
+ glue->status = MUSB_VBUS_OFF;
+ omap_musb_set_mailbox(glue);
}
static void omap2430_musb_disable(struct musb *musb)
@@ -361,8 +288,6 @@ static const struct musb_platform_ops omap2430_ops = {
.init = omap2430_musb_init,
.exit = omap2430_musb_exit,
- .set_vbus = omap2430_musb_set_vbus,
-
.enable = omap2430_musb_enable,
.disable = omap2430_musb_disable,
@@ -552,6 +477,9 @@ static int omap2430_runtime_resume(struct device *dev)
musb_writel(musb->mregs, OTG_INTERFSEL,
musb->context.otg_interfsel);
+ /* Wait for musb to get oriented. Otherwise we can get babble */
+ usleep_range(200000, 250000);
+
return 0;
}
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
index 832a41f9ee7d..f3f76f2ac63f 100644
--- a/drivers/usb/musb/sunxi.c
+++ b/drivers/usb/musb/sunxi.c
@@ -407,7 +407,7 @@ static u32 sunxi_musb_busctl_offset(u8 epnum, u16 offset)
return SUNXI_MUSB_TXFUNCADDR + offset;
}
-static u8 sunxi_musb_readb(const void __iomem *addr, unsigned offset)
+static u8 sunxi_musb_readb(void __iomem *addr, u32 offset)
{
struct sunxi_glue *glue;
@@ -520,7 +520,7 @@ static void sunxi_musb_writeb(void __iomem *addr, unsigned offset, u8 data)
(int)(addr - sunxi_musb->mregs));
}
-static u16 sunxi_musb_readw(const void __iomem *addr, unsigned offset)
+static u16 sunxi_musb_readw(void __iomem *addr, u32 offset)
{
if (addr == sunxi_musb->mregs) {
/* generic control or fifo control reg access */
@@ -781,6 +781,8 @@ static int sunxi_musb_probe(struct platform_device *pdev)
pinfo.name = "musb-hdrc";
pinfo.id = PLATFORM_DEVID_AUTO;
pinfo.parent = &pdev->dev;
+ pinfo.fwnode = of_fwnode_handle(pdev->dev.of_node);
+ pinfo.of_node_reused = true;
pinfo.res = pdev->resource;
pinfo.num_res = pdev->num_resources;
pinfo.data = &pdata;
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index 39453287b5c3..5d449089e3ad 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -142,7 +142,7 @@ static void tusb_ep_select(void __iomem *mbase, u8 epnum)
/*
* TUSB6010 doesn't allow 8-bit access; 16-bit access is the minimum.
*/
-static u8 tusb_readb(const void __iomem *addr, unsigned offset)
+static u8 tusb_readb(void __iomem *addr, u32 offset)
{
u16 tmp;
u8 val;
diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c
index d19bb3e89da6..d5cf5e8bb1ca 100644
--- a/drivers/usb/musb/ux500_dma.c
+++ b/drivers/usb/musb/ux500_dma.c
@@ -310,9 +310,9 @@ static int ux500_dma_controller_start(struct ux500_dma_controller *controller)
dma_channel->max_len = SZ_16M;
ux500_channel->dma_chan =
- dma_request_slave_channel(dev, chan_names[ch_num]);
+ dma_request_chan(dev, chan_names[ch_num]);
- if (!ux500_channel->dma_chan)
+ if (IS_ERR(ux500_channel->dma_chan))
ux500_channel->dma_chan =
dma_request_channel(mask,
data ?
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 24b4f091acb8..ff24fca0a2d9 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -162,7 +162,7 @@ config USB_MXS_PHY
config USB_TEGRA_PHY
tristate "NVIDIA Tegra USB PHY Driver"
- depends on ARCH_TEGRA
+ depends on ARCH_TEGRA || COMPILE_TEST
select USB_COMMON
select USB_PHY
select USB_ULPI
@@ -172,7 +172,7 @@ config USB_TEGRA_PHY
config USB_ULPI
bool "Generic ULPI Transceiver Driver"
- depends on ARM || ARM64
+ depends on ARM || ARM64 || COMPILE_TEST
select USB_ULPI_VIEWPORT
help
Enable this to support ULPI connected USB OTG transceivers which
diff --git a/drivers/usb/phy/phy-ab8500-usb.c b/drivers/usb/phy/phy-ab8500-usb.c
index 4bb4b1d42f32..20c0f082bf9c 100644
--- a/drivers/usb/phy/phy-ab8500-usb.c
+++ b/drivers/usb/phy/phy-ab8500-usb.c
@@ -108,7 +108,8 @@ enum ab8500_usb_mode {
USB_IDLE = 0,
USB_PERIPHERAL,
USB_HOST,
- USB_DEDICATED_CHG
+ USB_DEDICATED_CHG,
+ USB_UART
};
/* Register USB_LINK_STATUS interrupt */
@@ -393,6 +394,24 @@ static int ab8505_usb_link_status_update(struct ab8500_usb *ab,
usb_phy_set_event(&ab->phy, USB_EVENT_CHARGER);
break;
+ /*
+ * FIXME: For now we rely on the boot firmware to set up the necessary
+ * PHY/pin configuration for UART mode.
+ *
+ * AB8505 does not seem to report any status change for UART cables,
+ * possibly because it cannot detect them autonomously.
+ * We may need to measure the ID resistance manually to reliably
+ * detect UART cables after bootup.
+ */
+ case USB_LINK_SAMSUNG_UART_CBL_PHY_EN_8505:
+ case USB_LINK_SAMSUNG_UART_CBL_PHY_DISB_8505:
+ if (ab->mode == USB_IDLE) {
+ ab->mode = USB_UART;
+ ab8500_usb_peri_phy_en(ab);
+ }
+
+ break;
+
default:
break;
}
@@ -566,6 +585,11 @@ static irqreturn_t ab8500_usb_disconnect_irq(int irq, void *data)
ab->vbus_draw = 0;
}
+ if (ab->mode == USB_UART) {
+ ab8500_usb_peri_phy_dis(ab);
+ ab->mode = USB_IDLE;
+ }
+
if (is_ab8500_2p0(ab->ab8500)) {
if (ab->mode == USB_DEDICATED_CHG) {
ab8500_usb_wd_linkstatus(ab,
diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c
index f5f0568d8533..8524475d942d 100644
--- a/drivers/usb/phy/phy-am335x.c
+++ b/drivers/usb/phy/phy-am335x.c
@@ -57,7 +57,7 @@ static int am335x_phy_probe(struct platform_device *pdev)
am_phy->dr_mode = of_usb_get_dr_mode_by_phy(pdev->dev.of_node, -1);
- ret = usb_phy_gen_create_phy(dev, &am_phy->usb_phy_gen, NULL);
+ ret = usb_phy_gen_create_phy(dev, &am_phy->usb_phy_gen);
if (ret)
return ret;
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index a53b89be5324..661a229c105d 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -21,8 +21,7 @@
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include "phy-generic.h"
@@ -204,8 +203,7 @@ static int nop_set_host(struct usb_otg *otg, struct usb_bus *host)
return 0;
}
-int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop,
- struct usb_phy_generic_platform_data *pdata)
+int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop)
{
enum usb_phy_type type = USB_PHY_TYPE_USB2;
int err = 0;
@@ -221,28 +219,15 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop,
needs_vcc = of_property_read_bool(node, "vcc-supply");
needs_clk = of_property_read_bool(node, "clocks");
- nop->gpiod_reset = devm_gpiod_get_optional(dev, "reset",
- GPIOD_ASIS);
- err = PTR_ERR_OR_ZERO(nop->gpiod_reset);
- if (!err) {
- nop->gpiod_vbus = devm_gpiod_get_optional(dev,
- "vbus-detect",
- GPIOD_ASIS);
- err = PTR_ERR_OR_ZERO(nop->gpiod_vbus);
- }
- } else if (pdata) {
- type = pdata->type;
- clk_rate = pdata->clk_rate;
- needs_vcc = pdata->needs_vcc;
- if (gpio_is_valid(pdata->gpio_reset)) {
- err = devm_gpio_request_one(dev, pdata->gpio_reset,
- GPIOF_ACTIVE_LOW,
- dev_name(dev));
- if (!err)
- nop->gpiod_reset =
- gpio_to_desc(pdata->gpio_reset);
- }
- nop->gpiod_vbus = pdata->gpiod_vbus;
+ }
+ nop->gpiod_reset = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_ASIS);
+ err = PTR_ERR_OR_ZERO(nop->gpiod_reset);
+ if (!err) {
+ nop->gpiod_vbus = devm_gpiod_get_optional(dev,
+ "vbus-detect",
+ GPIOD_ASIS);
+ err = PTR_ERR_OR_ZERO(nop->gpiod_vbus);
}
if (err == -EPROBE_DEFER)
@@ -308,7 +293,7 @@ static int usb_phy_generic_probe(struct platform_device *pdev)
if (!nop)
return -ENOMEM;
- err = usb_phy_gen_create_phy(dev, nop, dev_get_platdata(&pdev->dev));
+ err = usb_phy_gen_create_phy(dev, nop);
if (err)
return err;
if (nop->gpiod_vbus) {
diff --git a/drivers/usb/phy/phy-generic.h b/drivers/usb/phy/phy-generic.h
index 97289627561d..7ee80211a688 100644
--- a/drivers/usb/phy/phy-generic.h
+++ b/drivers/usb/phy/phy-generic.h
@@ -22,7 +22,6 @@ struct usb_phy_generic {
int usb_gen_phy_init(struct usb_phy *phy);
void usb_gen_phy_shutdown(struct usb_phy *phy);
-int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop,
- struct usb_phy_generic_platform_data *pdata);
+int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop);
#endif
diff --git a/drivers/usb/phy/phy-gpio-vbus-usb.c b/drivers/usb/phy/phy-gpio-vbus-usb.c
index 553e2573c74f..f13f5530746c 100644
--- a/drivers/usb/phy/phy-gpio-vbus-usb.c
+++ b/drivers/usb/phy/phy-gpio-vbus-usb.c
@@ -7,7 +7,7 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
@@ -17,7 +17,6 @@
#include <linux/regulator/consumer.h>
#include <linux/usb/gadget.h>
-#include <linux/usb/gpio_vbus.h>
#include <linux/usb/otg.h>
@@ -29,6 +28,8 @@
* Needs to be loaded before the UDC driver that will use it.
*/
struct gpio_vbus_data {
+ struct gpio_desc *vbus_gpiod;
+ struct gpio_desc *pullup_gpiod;
struct usb_phy phy;
struct device *dev;
struct regulator *vbus_draw;
@@ -83,38 +84,30 @@ static void set_vbus_draw(struct gpio_vbus_data *gpio_vbus, unsigned mA)
gpio_vbus->mA = mA;
}
-static int is_vbus_powered(struct gpio_vbus_mach_info *pdata)
+static int is_vbus_powered(struct gpio_vbus_data *gpio_vbus)
{
- int vbus;
-
- vbus = gpio_get_value(pdata->gpio_vbus);
- if (pdata->gpio_vbus_inverted)
- vbus = !vbus;
-
- return vbus;
+ return gpiod_get_value(gpio_vbus->vbus_gpiod);
}
static void gpio_vbus_work(struct work_struct *work)
{
struct gpio_vbus_data *gpio_vbus =
container_of(work, struct gpio_vbus_data, work.work);
- struct gpio_vbus_mach_info *pdata = dev_get_platdata(gpio_vbus->dev);
- int gpio, status, vbus;
+ int status, vbus;
if (!gpio_vbus->phy.otg->gadget)
return;
- vbus = is_vbus_powered(pdata);
+ vbus = is_vbus_powered(gpio_vbus);
if ((vbus ^ gpio_vbus->vbus) == 0)
return;
gpio_vbus->vbus = vbus;
/* Peripheral controllers which manage the pullup themselves won't have
- * gpio_pullup configured here. If it's configured here, we'll do what
- * isp1301_omap::b_peripheral() does and enable the pullup here... although
- * that may complicate usb_gadget_{,dis}connect() support.
+ * a pullup GPIO configured here. If it's configured here, we'll do
+ * what isp1301_omap::b_peripheral() does and enable the pullup here...
+ * although that may complicate usb_gadget_{,dis}connect() support.
*/
- gpio = pdata->gpio_pullup;
if (vbus) {
status = USB_EVENT_VBUS;
@@ -126,16 +119,16 @@ static void gpio_vbus_work(struct work_struct *work)
set_vbus_draw(gpio_vbus, 100);
/* optionally enable D+ pullup */
- if (gpio_is_valid(gpio))
- gpio_set_value(gpio, !pdata->gpio_pullup_inverted);
+ if (gpio_vbus->pullup_gpiod)
+ gpiod_set_value(gpio_vbus->pullup_gpiod, 1);
atomic_notifier_call_chain(&gpio_vbus->phy.notifier,
status, gpio_vbus->phy.otg->gadget);
usb_phy_set_event(&gpio_vbus->phy, USB_EVENT_ENUMERATED);
} else {
/* optionally disable D+ pullup */
- if (gpio_is_valid(gpio))
- gpio_set_value(gpio, pdata->gpio_pullup_inverted);
+ if (gpio_vbus->pullup_gpiod)
+ gpiod_set_value(gpio_vbus->pullup_gpiod, 0);
set_vbus_draw(gpio_vbus, 0);
@@ -154,12 +147,11 @@ static void gpio_vbus_work(struct work_struct *work)
static irqreturn_t gpio_vbus_irq(int irq, void *data)
{
struct platform_device *pdev = data;
- struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev);
struct gpio_vbus_data *gpio_vbus = platform_get_drvdata(pdev);
struct usb_otg *otg = gpio_vbus->phy.otg;
dev_dbg(&pdev->dev, "VBUS %s (gadget: %s)\n",
- is_vbus_powered(pdata) ? "supplied" : "inactive",
+ is_vbus_powered(gpio_vbus) ? "supplied" : "inactive",
otg->gadget ? otg->gadget->name : "none");
if (otg->gadget)
@@ -175,22 +167,18 @@ static int gpio_vbus_set_peripheral(struct usb_otg *otg,
struct usb_gadget *gadget)
{
struct gpio_vbus_data *gpio_vbus;
- struct gpio_vbus_mach_info *pdata;
struct platform_device *pdev;
- int gpio;
gpio_vbus = container_of(otg->usb_phy, struct gpio_vbus_data, phy);
pdev = to_platform_device(gpio_vbus->dev);
- pdata = dev_get_platdata(gpio_vbus->dev);
- gpio = pdata->gpio_pullup;
if (!gadget) {
dev_dbg(&pdev->dev, "unregistering gadget '%s'\n",
otg->gadget->name);
/* optionally disable D+ pullup */
- if (gpio_is_valid(gpio))
- gpio_set_value(gpio, pdata->gpio_pullup_inverted);
+ if (gpio_vbus->pullup_gpiod)
+ gpiod_set_value(gpio_vbus->pullup_gpiod, 0);
set_vbus_draw(gpio_vbus, 0);
@@ -242,16 +230,12 @@ static int gpio_vbus_set_suspend(struct usb_phy *phy, int suspend)
static int gpio_vbus_probe(struct platform_device *pdev)
{
- struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev);
struct gpio_vbus_data *gpio_vbus;
struct resource *res;
- int err, gpio, irq;
+ struct device *dev = &pdev->dev;
+ int err, irq;
unsigned long irqflags;
- if (!pdata || !gpio_is_valid(pdata->gpio_vbus))
- return -EINVAL;
- gpio = pdata->gpio_vbus;
-
gpio_vbus = devm_kzalloc(&pdev->dev, sizeof(struct gpio_vbus_data),
GFP_KERNEL);
if (!gpio_vbus)
@@ -273,37 +257,43 @@ static int gpio_vbus_probe(struct platform_device *pdev)
gpio_vbus->phy.otg->usb_phy = &gpio_vbus->phy;
gpio_vbus->phy.otg->set_peripheral = gpio_vbus_set_peripheral;
- err = devm_gpio_request(&pdev->dev, gpio, "vbus_detect");
- if (err) {
- dev_err(&pdev->dev, "can't request vbus gpio %d, err: %d\n",
- gpio, err);
+ /* Look up the VBUS sensing GPIO */
+ gpio_vbus->vbus_gpiod = devm_gpiod_get(dev, "vbus", GPIOD_IN);
+ if (IS_ERR(gpio_vbus->vbus_gpiod)) {
+ err = PTR_ERR(gpio_vbus->vbus_gpiod);
+ dev_err(&pdev->dev, "can't request vbus gpio, err: %d\n", err);
return err;
}
- gpio_direction_input(gpio);
+ gpiod_set_consumer_name(gpio_vbus->vbus_gpiod, "vbus_detect");
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res) {
irq = res->start;
irqflags = (res->flags & IRQF_TRIGGER_MASK) | IRQF_SHARED;
} else {
- irq = gpio_to_irq(gpio);
+ irq = gpiod_to_irq(gpio_vbus->vbus_gpiod);
irqflags = VBUS_IRQ_FLAGS;
}
gpio_vbus->irq = irq;
- /* if data line pullup is in use, initialize it to "not pulling up" */
- gpio = pdata->gpio_pullup;
- if (gpio_is_valid(gpio)) {
- err = devm_gpio_request(&pdev->dev, gpio, "udc_pullup");
- if (err) {
- dev_err(&pdev->dev,
- "can't request pullup gpio %d, err: %d\n",
- gpio, err);
- return err;
- }
- gpio_direction_output(gpio, pdata->gpio_pullup_inverted);
+ /*
+ * The VBUS sensing GPIO should have a pulldown, which will normally be
+ * part of a resistor ladder turning a 4.0V-5.25V level on VBUS into a
+ * value the GPIO detects as active. Some systems will use comparators.
+ * Get the optional D+ or D- pullup GPIO. If the data line pullup is
+ * in use, initialize it to "not pulling up"
+ */
+ gpio_vbus->pullup_gpiod = devm_gpiod_get_optional(dev, "pullup",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(gpio_vbus->pullup_gpiod)) {
+ err = PTR_ERR(gpio_vbus->pullup_gpiod);
+ dev_err(&pdev->dev, "can't request pullup gpio, err: %d\n",
+ err);
+ return err;
}
+ if (gpio_vbus->pullup_gpiod)
+ gpiod_set_consumer_name(gpio_vbus->pullup_gpiod, "udc_pullup");
err = devm_request_irq(&pdev->dev, irq, gpio_vbus_irq, irqflags,
"vbus_detect", pdev);
@@ -330,7 +320,7 @@ static int gpio_vbus_probe(struct platform_device *pdev)
return err;
}
- device_init_wakeup(&pdev->dev, pdata->wakeup);
+ /* TODO: wakeup could be enabled here with device_init_wakeup(dev, 1) */
return 0;
}
diff --git a/drivers/usb/phy/phy-keystone.c b/drivers/usb/phy/phy-keystone.c
index 110e6e9ad621..9c226b57153b 100644
--- a/drivers/usb/phy/phy-keystone.c
+++ b/drivers/usb/phy/phy-keystone.c
@@ -76,7 +76,7 @@ static int keystone_usbphy_probe(struct platform_device *pdev)
if (IS_ERR(k_phy->phy_ctrl))
return PTR_ERR(k_phy->phy_ctrl);
- ret = usb_phy_gen_create_phy(dev, &k_phy->usb_phy_gen, NULL);
+ ret = usb_phy_gen_create_phy(dev, &k_phy->usb_phy_gen);
if (ret)
return ret;
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
index ea7ef1dc0b42..037e8eee737d 100644
--- a/drivers/usb/phy/phy-tegra-usb.c
+++ b/drivers/usb/phy/phy-tegra-usb.c
@@ -9,54 +9,56 @@
* Venu Byravarasu <vbyravarasu@nvidia.com>
*/
-#include <linux/resource.h>
#include <linux/delay.h>
-#include <linux/slab.h>
#include <linux/err.h>
#include <linux/export.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/iopoll.h>
#include <linux/gpio.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
-#include <linux/usb/otg.h>
-#include <linux/usb/ulpi.h>
-#include <linux/usb/of.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <linux/regulator/consumer.h>
+
#include <linux/usb/ehci_def.h>
+#include <linux/usb/of.h>
#include <linux/usb/tegra_usb_phy.h>
-#include <linux/regulator/consumer.h>
+#include <linux/usb/ulpi.h>
-#define ULPI_VIEWPORT 0x170
+#define ULPI_VIEWPORT 0x170
/* PORTSC PTS/PHCD bits, Tegra20 only */
-#define TEGRA_USB_PORTSC1 0x184
-#define TEGRA_USB_PORTSC1_PTS(x) (((x) & 0x3) << 30)
-#define TEGRA_USB_PORTSC1_PHCD (1 << 23)
+#define TEGRA_USB_PORTSC1 0x184
+#define TEGRA_USB_PORTSC1_PTS(x) (((x) & 0x3) << 30)
+#define TEGRA_USB_PORTSC1_PHCD BIT(23)
/* HOSTPC1 PTS/PHCD bits, Tegra30 and above */
-#define TEGRA_USB_HOSTPC1_DEVLC 0x1b4
-#define TEGRA_USB_HOSTPC1_DEVLC_PTS(x) (((x) & 0x7) << 29)
-#define TEGRA_USB_HOSTPC1_DEVLC_PHCD (1 << 22)
+#define TEGRA_USB_HOSTPC1_DEVLC 0x1b4
+#define TEGRA_USB_HOSTPC1_DEVLC_PTS(x) (((x) & 0x7) << 29)
+#define TEGRA_USB_HOSTPC1_DEVLC_PHCD BIT(22)
/* Bits of PORTSC1, which will get cleared by writing 1 into them */
#define TEGRA_PORTSC1_RWC_BITS (PORT_CSC | PORT_PEC | PORT_OCC)
-#define USB_SUSP_CTRL 0x400
-#define USB_WAKE_ON_CNNT_EN_DEV (1 << 3)
-#define USB_WAKE_ON_DISCON_EN_DEV (1 << 4)
-#define USB_SUSP_CLR (1 << 5)
-#define USB_PHY_CLK_VALID (1 << 7)
-#define UTMIP_RESET (1 << 11)
-#define UHSIC_RESET (1 << 11)
-#define UTMIP_PHY_ENABLE (1 << 12)
-#define ULPI_PHY_ENABLE (1 << 13)
-#define USB_SUSP_SET (1 << 14)
-#define USB_WAKEUP_DEBOUNCE_COUNT(x) (((x) & 0x7) << 16)
-
-#define USB1_LEGACY_CTRL 0x410
-#define USB1_NO_LEGACY_MODE (1 << 0)
+#define USB_SUSP_CTRL 0x400
+#define USB_WAKE_ON_CNNT_EN_DEV BIT(3)
+#define USB_WAKE_ON_DISCON_EN_DEV BIT(4)
+#define USB_SUSP_CLR BIT(5)
+#define USB_PHY_CLK_VALID BIT(7)
+#define UTMIP_RESET BIT(11)
+#define UHSIC_RESET BIT(11)
+#define UTMIP_PHY_ENABLE BIT(12)
+#define ULPI_PHY_ENABLE BIT(13)
+#define USB_SUSP_SET BIT(14)
+#define USB_WAKEUP_DEBOUNCE_COUNT(x) (((x) & 0x7) << 16)
+
+#define USB1_LEGACY_CTRL 0x410
+#define USB1_NO_LEGACY_MODE BIT(0)
#define USB1_VBUS_SENSE_CTL_MASK (3 << 1)
#define USB1_VBUS_SENSE_CTL_VBUS_WAKEUP (0 << 1)
#define USB1_VBUS_SENSE_CTL_AB_SESS_VLD_OR_VBUS_WAKEUP \
@@ -64,94 +66,94 @@
#define USB1_VBUS_SENSE_CTL_AB_SESS_VLD (2 << 1)
#define USB1_VBUS_SENSE_CTL_A_SESS_VLD (3 << 1)
-#define ULPI_TIMING_CTRL_0 0x424
-#define ULPI_OUTPUT_PINMUX_BYP (1 << 10)
-#define ULPI_CLKOUT_PINMUX_BYP (1 << 11)
+#define ULPI_TIMING_CTRL_0 0x424
+#define ULPI_OUTPUT_PINMUX_BYP BIT(10)
+#define ULPI_CLKOUT_PINMUX_BYP BIT(11)
-#define ULPI_TIMING_CTRL_1 0x428
-#define ULPI_DATA_TRIMMER_LOAD (1 << 0)
-#define ULPI_DATA_TRIMMER_SEL(x) (((x) & 0x7) << 1)
-#define ULPI_STPDIRNXT_TRIMMER_LOAD (1 << 16)
-#define ULPI_STPDIRNXT_TRIMMER_SEL(x) (((x) & 0x7) << 17)
-#define ULPI_DIR_TRIMMER_LOAD (1 << 24)
-#define ULPI_DIR_TRIMMER_SEL(x) (((x) & 0x7) << 25)
+#define ULPI_TIMING_CTRL_1 0x428
+#define ULPI_DATA_TRIMMER_LOAD BIT(0)
+#define ULPI_DATA_TRIMMER_SEL(x) (((x) & 0x7) << 1)
+#define ULPI_STPDIRNXT_TRIMMER_LOAD BIT(16)
+#define ULPI_STPDIRNXT_TRIMMER_SEL(x) (((x) & 0x7) << 17)
+#define ULPI_DIR_TRIMMER_LOAD BIT(24)
+#define ULPI_DIR_TRIMMER_SEL(x) (((x) & 0x7) << 25)
-#define UTMIP_PLL_CFG1 0x804
+#define UTMIP_PLL_CFG1 0x804
#define UTMIP_XTAL_FREQ_COUNT(x) (((x) & 0xfff) << 0)
#define UTMIP_PLLU_ENABLE_DLY_COUNT(x) (((x) & 0x1f) << 27)
-#define UTMIP_XCVR_CFG0 0x808
+#define UTMIP_XCVR_CFG0 0x808
#define UTMIP_XCVR_SETUP(x) (((x) & 0xf) << 0)
#define UTMIP_XCVR_SETUP_MSB(x) ((((x) & 0x70) >> 4) << 22)
#define UTMIP_XCVR_LSRSLEW(x) (((x) & 0x3) << 8)
#define UTMIP_XCVR_LSFSLEW(x) (((x) & 0x3) << 10)
-#define UTMIP_FORCE_PD_POWERDOWN (1 << 14)
-#define UTMIP_FORCE_PD2_POWERDOWN (1 << 16)
-#define UTMIP_FORCE_PDZI_POWERDOWN (1 << 18)
-#define UTMIP_XCVR_LSBIAS_SEL (1 << 21)
+#define UTMIP_FORCE_PD_POWERDOWN BIT(14)
+#define UTMIP_FORCE_PD2_POWERDOWN BIT(16)
+#define UTMIP_FORCE_PDZI_POWERDOWN BIT(18)
+#define UTMIP_XCVR_LSBIAS_SEL BIT(21)
#define UTMIP_XCVR_HSSLEW(x) (((x) & 0x3) << 4)
#define UTMIP_XCVR_HSSLEW_MSB(x) ((((x) & 0x1fc) >> 2) << 25)
-#define UTMIP_BIAS_CFG0 0x80c
-#define UTMIP_OTGPD (1 << 11)
-#define UTMIP_BIASPD (1 << 10)
-#define UTMIP_HSSQUELCH_LEVEL(x) (((x) & 0x3) << 0)
-#define UTMIP_HSDISCON_LEVEL(x) (((x) & 0x3) << 2)
-#define UTMIP_HSDISCON_LEVEL_MSB(x) ((((x) & 0x4) >> 2) << 24)
+#define UTMIP_BIAS_CFG0 0x80c
+#define UTMIP_OTGPD BIT(11)
+#define UTMIP_BIASPD BIT(10)
+#define UTMIP_HSSQUELCH_LEVEL(x) (((x) & 0x3) << 0)
+#define UTMIP_HSDISCON_LEVEL(x) (((x) & 0x3) << 2)
+#define UTMIP_HSDISCON_LEVEL_MSB(x) ((((x) & 0x4) >> 2) << 24)
-#define UTMIP_HSRX_CFG0 0x810
-#define UTMIP_ELASTIC_LIMIT(x) (((x) & 0x1f) << 10)
-#define UTMIP_IDLE_WAIT(x) (((x) & 0x1f) << 15)
+#define UTMIP_HSRX_CFG0 0x810
+#define UTMIP_ELASTIC_LIMIT(x) (((x) & 0x1f) << 10)
+#define UTMIP_IDLE_WAIT(x) (((x) & 0x1f) << 15)
-#define UTMIP_HSRX_CFG1 0x814
-#define UTMIP_HS_SYNC_START_DLY(x) (((x) & 0x1f) << 1)
+#define UTMIP_HSRX_CFG1 0x814
+#define UTMIP_HS_SYNC_START_DLY(x) (((x) & 0x1f) << 1)
-#define UTMIP_TX_CFG0 0x820
-#define UTMIP_FS_PREABMLE_J (1 << 19)
-#define UTMIP_HS_DISCON_DISABLE (1 << 8)
+#define UTMIP_TX_CFG0 0x820
+#define UTMIP_FS_PREABMLE_J BIT(19)
+#define UTMIP_HS_DISCON_DISABLE BIT(8)
-#define UTMIP_MISC_CFG0 0x824
-#define UTMIP_DPDM_OBSERVE (1 << 26)
-#define UTMIP_DPDM_OBSERVE_SEL(x) (((x) & 0xf) << 27)
-#define UTMIP_DPDM_OBSERVE_SEL_FS_J UTMIP_DPDM_OBSERVE_SEL(0xf)
-#define UTMIP_DPDM_OBSERVE_SEL_FS_K UTMIP_DPDM_OBSERVE_SEL(0xe)
-#define UTMIP_DPDM_OBSERVE_SEL_FS_SE1 UTMIP_DPDM_OBSERVE_SEL(0xd)
-#define UTMIP_DPDM_OBSERVE_SEL_FS_SE0 UTMIP_DPDM_OBSERVE_SEL(0xc)
-#define UTMIP_SUSPEND_EXIT_ON_EDGE (1 << 22)
+#define UTMIP_MISC_CFG0 0x824
+#define UTMIP_DPDM_OBSERVE BIT(26)
+#define UTMIP_DPDM_OBSERVE_SEL(x) (((x) & 0xf) << 27)
+#define UTMIP_DPDM_OBSERVE_SEL_FS_J UTMIP_DPDM_OBSERVE_SEL(0xf)
+#define UTMIP_DPDM_OBSERVE_SEL_FS_K UTMIP_DPDM_OBSERVE_SEL(0xe)
+#define UTMIP_DPDM_OBSERVE_SEL_FS_SE1 UTMIP_DPDM_OBSERVE_SEL(0xd)
+#define UTMIP_DPDM_OBSERVE_SEL_FS_SE0 UTMIP_DPDM_OBSERVE_SEL(0xc)
+#define UTMIP_SUSPEND_EXIT_ON_EDGE BIT(22)
-#define UTMIP_MISC_CFG1 0x828
-#define UTMIP_PLL_ACTIVE_DLY_COUNT(x) (((x) & 0x1f) << 18)
-#define UTMIP_PLLU_STABLE_COUNT(x) (((x) & 0xfff) << 6)
+#define UTMIP_MISC_CFG1 0x828
+#define UTMIP_PLL_ACTIVE_DLY_COUNT(x) (((x) & 0x1f) << 18)
+#define UTMIP_PLLU_STABLE_COUNT(x) (((x) & 0xfff) << 6)
-#define UTMIP_DEBOUNCE_CFG0 0x82c
-#define UTMIP_BIAS_DEBOUNCE_A(x) (((x) & 0xffff) << 0)
+#define UTMIP_DEBOUNCE_CFG0 0x82c
+#define UTMIP_BIAS_DEBOUNCE_A(x) (((x) & 0xffff) << 0)
-#define UTMIP_BAT_CHRG_CFG0 0x830
-#define UTMIP_PD_CHRG (1 << 0)
+#define UTMIP_BAT_CHRG_CFG0 0x830
+#define UTMIP_PD_CHRG BIT(0)
-#define UTMIP_SPARE_CFG0 0x834
-#define FUSE_SETUP_SEL (1 << 3)
+#define UTMIP_SPARE_CFG0 0x834
+#define FUSE_SETUP_SEL BIT(3)
-#define UTMIP_XCVR_CFG1 0x838
-#define UTMIP_FORCE_PDDISC_POWERDOWN (1 << 0)
-#define UTMIP_FORCE_PDCHRP_POWERDOWN (1 << 2)
-#define UTMIP_FORCE_PDDR_POWERDOWN (1 << 4)
-#define UTMIP_XCVR_TERM_RANGE_ADJ(x) (((x) & 0xf) << 18)
+#define UTMIP_XCVR_CFG1 0x838
+#define UTMIP_FORCE_PDDISC_POWERDOWN BIT(0)
+#define UTMIP_FORCE_PDCHRP_POWERDOWN BIT(2)
+#define UTMIP_FORCE_PDDR_POWERDOWN BIT(4)
+#define UTMIP_XCVR_TERM_RANGE_ADJ(x) (((x) & 0xf) << 18)
-#define UTMIP_BIAS_CFG1 0x83c
-#define UTMIP_BIAS_PDTRK_COUNT(x) (((x) & 0x1f) << 3)
+#define UTMIP_BIAS_CFG1 0x83c
+#define UTMIP_BIAS_PDTRK_COUNT(x) (((x) & 0x1f) << 3)
/* For Tegra30 and above only, the address is different in Tegra20 */
-#define USB_USBMODE 0x1f8
-#define USB_USBMODE_MASK (3 << 0)
-#define USB_USBMODE_HOST (3 << 0)
-#define USB_USBMODE_DEVICE (2 << 0)
+#define USB_USBMODE 0x1f8
+#define USB_USBMODE_MASK (3 << 0)
+#define USB_USBMODE_HOST (3 << 0)
+#define USB_USBMODE_DEVICE (2 << 0)
static DEFINE_SPINLOCK(utmip_pad_lock);
-static int utmip_pad_count;
+static unsigned int utmip_pad_count;
struct tegra_xtal_freq {
- int freq;
+ unsigned int freq;
u8 enable_delay;
u8 stable_count;
u8 active_delay;
@@ -194,43 +196,49 @@ static const struct tegra_xtal_freq tegra_freq_table[] = {
},
};
+static inline struct tegra_usb_phy *to_tegra_usb_phy(struct usb_phy *u_phy)
+{
+ return container_of(u_phy, struct tegra_usb_phy, u_phy);
+}
+
static void set_pts(struct tegra_usb_phy *phy, u8 pts_val)
{
void __iomem *base = phy->regs;
- unsigned long val;
+ u32 val;
if (phy->soc_config->has_hostpc) {
- val = readl(base + TEGRA_USB_HOSTPC1_DEVLC);
+ val = readl_relaxed(base + TEGRA_USB_HOSTPC1_DEVLC);
val &= ~TEGRA_USB_HOSTPC1_DEVLC_PTS(~0);
val |= TEGRA_USB_HOSTPC1_DEVLC_PTS(pts_val);
- writel(val, base + TEGRA_USB_HOSTPC1_DEVLC);
+ writel_relaxed(val, base + TEGRA_USB_HOSTPC1_DEVLC);
} else {
- val = readl(base + TEGRA_USB_PORTSC1) & ~TEGRA_PORTSC1_RWC_BITS;
+ val = readl_relaxed(base + TEGRA_USB_PORTSC1);
+ val &= ~TEGRA_PORTSC1_RWC_BITS;
val &= ~TEGRA_USB_PORTSC1_PTS(~0);
val |= TEGRA_USB_PORTSC1_PTS(pts_val);
- writel(val, base + TEGRA_USB_PORTSC1);
+ writel_relaxed(val, base + TEGRA_USB_PORTSC1);
}
}
static void set_phcd(struct tegra_usb_phy *phy, bool enable)
{
void __iomem *base = phy->regs;
- unsigned long val;
+ u32 val;
if (phy->soc_config->has_hostpc) {
- val = readl(base + TEGRA_USB_HOSTPC1_DEVLC);
+ val = readl_relaxed(base + TEGRA_USB_HOSTPC1_DEVLC);
if (enable)
val |= TEGRA_USB_HOSTPC1_DEVLC_PHCD;
else
val &= ~TEGRA_USB_HOSTPC1_DEVLC_PHCD;
- writel(val, base + TEGRA_USB_HOSTPC1_DEVLC);
+ writel_relaxed(val, base + TEGRA_USB_HOSTPC1_DEVLC);
} else {
- val = readl(base + TEGRA_USB_PORTSC1) & ~PORT_RWC_BITS;
+ val = readl_relaxed(base + TEGRA_USB_PORTSC1) & ~PORT_RWC_BITS;
if (enable)
val |= TEGRA_USB_PORTSC1_PHCD;
else
val &= ~TEGRA_USB_PORTSC1_PHCD;
- writel(val, base + TEGRA_USB_PORTSC1);
+ writel_relaxed(val, base + TEGRA_USB_PORTSC1);
}
}
@@ -238,23 +246,6 @@ static int utmip_pad_open(struct tegra_usb_phy *phy)
{
int ret;
- phy->pad_clk = devm_clk_get(phy->u_phy.dev, "utmi-pads");
- if (IS_ERR(phy->pad_clk)) {
- ret = PTR_ERR(phy->pad_clk);
- dev_err(phy->u_phy.dev,
- "Failed to get UTMIP pad clock: %d\n", ret);
- return ret;
- }
-
- phy->pad_rst = devm_reset_control_get_optional_shared(
- phy->u_phy.dev, "utmi-pads");
- if (IS_ERR(phy->pad_rst)) {
- ret = PTR_ERR(phy->pad_rst);
- dev_err(phy->u_phy.dev,
- "Failed to get UTMI-pads reset: %d\n", ret);
- return ret;
- }
-
ret = clk_prepare_enable(phy->pad_clk);
if (ret) {
dev_err(phy->u_phy.dev,
@@ -315,18 +306,21 @@ static int utmip_pad_close(struct tegra_usb_phy *phy)
return ret;
}
-static void utmip_pad_power_on(struct tegra_usb_phy *phy)
+static int utmip_pad_power_on(struct tegra_usb_phy *phy)
{
- unsigned long val, flags;
- void __iomem *base = phy->pad_regs;
struct tegra_utmip_config *config = phy->config;
+ void __iomem *base = phy->pad_regs;
+ u32 val;
+ int err;
- clk_prepare_enable(phy->pad_clk);
+ err = clk_prepare_enable(phy->pad_clk);
+ if (err)
+ return err;
- spin_lock_irqsave(&utmip_pad_lock, flags);
+ spin_lock(&utmip_pad_lock);
if (utmip_pad_count++ == 0) {
- val = readl(base + UTMIP_BIAS_CFG0);
+ val = readl_relaxed(base + UTMIP_BIAS_CFG0);
val &= ~(UTMIP_OTGPD | UTMIP_BIASPD);
if (phy->soc_config->requires_extra_tuning_parameters) {
@@ -338,53 +332,59 @@ static void utmip_pad_power_on(struct tegra_usb_phy *phy)
val |= UTMIP_HSDISCON_LEVEL(config->hsdiscon_level);
val |= UTMIP_HSDISCON_LEVEL_MSB(config->hsdiscon_level);
}
- writel(val, base + UTMIP_BIAS_CFG0);
+ writel_relaxed(val, base + UTMIP_BIAS_CFG0);
}
- spin_unlock_irqrestore(&utmip_pad_lock, flags);
+ spin_unlock(&utmip_pad_lock);
clk_disable_unprepare(phy->pad_clk);
+
+ return 0;
}
static int utmip_pad_power_off(struct tegra_usb_phy *phy)
{
- unsigned long val, flags;
void __iomem *base = phy->pad_regs;
+ u32 val;
+ int ret;
+
+ ret = clk_prepare_enable(phy->pad_clk);
+ if (ret)
+ return ret;
+
+ spin_lock(&utmip_pad_lock);
if (!utmip_pad_count) {
dev_err(phy->u_phy.dev, "UTMIP pad already powered off\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto ulock;
}
- clk_prepare_enable(phy->pad_clk);
-
- spin_lock_irqsave(&utmip_pad_lock, flags);
-
if (--utmip_pad_count == 0) {
- val = readl(base + UTMIP_BIAS_CFG0);
+ val = readl_relaxed(base + UTMIP_BIAS_CFG0);
val |= UTMIP_OTGPD | UTMIP_BIASPD;
- writel(val, base + UTMIP_BIAS_CFG0);
+ writel_relaxed(val, base + UTMIP_BIAS_CFG0);
}
-
- spin_unlock_irqrestore(&utmip_pad_lock, flags);
+ulock:
+ spin_unlock(&utmip_pad_lock);
clk_disable_unprepare(phy->pad_clk);
- return 0;
+ return ret;
}
static int utmi_wait_register(void __iomem *reg, u32 mask, u32 result)
{
u32 tmp;
- return readl_poll_timeout(reg, tmp, (tmp & mask) == result,
- 2000, 6000);
+ return readl_relaxed_poll_timeout(reg, tmp, (tmp & mask) == result,
+ 2000, 6000);
}
static void utmi_phy_clk_disable(struct tegra_usb_phy *phy)
{
- unsigned long val;
void __iomem *base = phy->regs;
+ u32 val;
/*
* The USB driver may have already initiated the phy clock
@@ -395,27 +395,28 @@ static void utmi_phy_clk_disable(struct tegra_usb_phy *phy)
return;
if (phy->is_legacy_phy) {
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val |= USB_SUSP_SET;
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
- udelay(10);
+ usleep_range(10, 100);
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val &= ~USB_SUSP_SET;
- writel(val, base + USB_SUSP_CTRL);
- } else
+ writel_relaxed(val, base + USB_SUSP_CTRL);
+ } else {
set_phcd(phy, true);
+ }
- if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, 0) < 0)
+ if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, 0))
dev_err(phy->u_phy.dev,
"Timeout waiting for PHY to stabilize on disable\n");
}
static void utmi_phy_clk_enable(struct tegra_usb_phy *phy)
{
- unsigned long val;
void __iomem *base = phy->regs;
+ u32 val;
/*
* The USB driver may have already initiated the phy clock
@@ -427,97 +428,101 @@ static void utmi_phy_clk_enable(struct tegra_usb_phy *phy)
return;
if (phy->is_legacy_phy) {
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val |= USB_SUSP_CLR;
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
- udelay(10);
+ usleep_range(10, 100);
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val &= ~USB_SUSP_CLR;
- writel(val, base + USB_SUSP_CTRL);
- } else
+ writel_relaxed(val, base + USB_SUSP_CTRL);
+ } else {
set_phcd(phy, false);
+ }
if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID,
- USB_PHY_CLK_VALID))
+ USB_PHY_CLK_VALID))
dev_err(phy->u_phy.dev,
"Timeout waiting for PHY to stabilize on enable\n");
}
static int utmi_phy_power_on(struct tegra_usb_phy *phy)
{
- unsigned long val;
- void __iomem *base = phy->regs;
struct tegra_utmip_config *config = phy->config;
+ void __iomem *base = phy->regs;
+ u32 val;
+ int err;
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val |= UTMIP_RESET;
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
if (phy->is_legacy_phy) {
- val = readl(base + USB1_LEGACY_CTRL);
+ val = readl_relaxed(base + USB1_LEGACY_CTRL);
val |= USB1_NO_LEGACY_MODE;
- writel(val, base + USB1_LEGACY_CTRL);
+ writel_relaxed(val, base + USB1_LEGACY_CTRL);
}
- val = readl(base + UTMIP_TX_CFG0);
+ val = readl_relaxed(base + UTMIP_TX_CFG0);
val |= UTMIP_FS_PREABMLE_J;
- writel(val, base + UTMIP_TX_CFG0);
+ writel_relaxed(val, base + UTMIP_TX_CFG0);
- val = readl(base + UTMIP_HSRX_CFG0);
+ val = readl_relaxed(base + UTMIP_HSRX_CFG0);
val &= ~(UTMIP_IDLE_WAIT(~0) | UTMIP_ELASTIC_LIMIT(~0));
val |= UTMIP_IDLE_WAIT(config->idle_wait_delay);
val |= UTMIP_ELASTIC_LIMIT(config->elastic_limit);
- writel(val, base + UTMIP_HSRX_CFG0);
+ writel_relaxed(val, base + UTMIP_HSRX_CFG0);
- val = readl(base + UTMIP_HSRX_CFG1);
+ val = readl_relaxed(base + UTMIP_HSRX_CFG1);
val &= ~UTMIP_HS_SYNC_START_DLY(~0);
val |= UTMIP_HS_SYNC_START_DLY(config->hssync_start_delay);
- writel(val, base + UTMIP_HSRX_CFG1);
+ writel_relaxed(val, base + UTMIP_HSRX_CFG1);
- val = readl(base + UTMIP_DEBOUNCE_CFG0);
+ val = readl_relaxed(base + UTMIP_DEBOUNCE_CFG0);
val &= ~UTMIP_BIAS_DEBOUNCE_A(~0);
val |= UTMIP_BIAS_DEBOUNCE_A(phy->freq->debounce);
- writel(val, base + UTMIP_DEBOUNCE_CFG0);
+ writel_relaxed(val, base + UTMIP_DEBOUNCE_CFG0);
- val = readl(base + UTMIP_MISC_CFG0);
+ val = readl_relaxed(base + UTMIP_MISC_CFG0);
val &= ~UTMIP_SUSPEND_EXIT_ON_EDGE;
- writel(val, base + UTMIP_MISC_CFG0);
+ writel_relaxed(val, base + UTMIP_MISC_CFG0);
if (!phy->soc_config->utmi_pll_config_in_car_module) {
- val = readl(base + UTMIP_MISC_CFG1);
+ val = readl_relaxed(base + UTMIP_MISC_CFG1);
val &= ~(UTMIP_PLL_ACTIVE_DLY_COUNT(~0) |
UTMIP_PLLU_STABLE_COUNT(~0));
val |= UTMIP_PLL_ACTIVE_DLY_COUNT(phy->freq->active_delay) |
UTMIP_PLLU_STABLE_COUNT(phy->freq->stable_count);
- writel(val, base + UTMIP_MISC_CFG1);
+ writel_relaxed(val, base + UTMIP_MISC_CFG1);
- val = readl(base + UTMIP_PLL_CFG1);
+ val = readl_relaxed(base + UTMIP_PLL_CFG1);
val &= ~(UTMIP_XTAL_FREQ_COUNT(~0) |
UTMIP_PLLU_ENABLE_DLY_COUNT(~0));
val |= UTMIP_XTAL_FREQ_COUNT(phy->freq->xtal_freq_count) |
UTMIP_PLLU_ENABLE_DLY_COUNT(phy->freq->enable_delay);
- writel(val, base + UTMIP_PLL_CFG1);
+ writel_relaxed(val, base + UTMIP_PLL_CFG1);
}
if (phy->mode == USB_DR_MODE_PERIPHERAL) {
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val &= ~(USB_WAKE_ON_CNNT_EN_DEV | USB_WAKE_ON_DISCON_EN_DEV);
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
- val = readl(base + UTMIP_BAT_CHRG_CFG0);
+ val = readl_relaxed(base + UTMIP_BAT_CHRG_CFG0);
val &= ~UTMIP_PD_CHRG;
- writel(val, base + UTMIP_BAT_CHRG_CFG0);
+ writel_relaxed(val, base + UTMIP_BAT_CHRG_CFG0);
} else {
- val = readl(base + UTMIP_BAT_CHRG_CFG0);
+ val = readl_relaxed(base + UTMIP_BAT_CHRG_CFG0);
val |= UTMIP_PD_CHRG;
- writel(val, base + UTMIP_BAT_CHRG_CFG0);
+ writel_relaxed(val, base + UTMIP_BAT_CHRG_CFG0);
}
- utmip_pad_power_on(phy);
+ err = utmip_pad_power_on(phy);
+ if (err)
+ return err;
- val = readl(base + UTMIP_XCVR_CFG0);
+ val = readl_relaxed(base + UTMIP_XCVR_CFG0);
val &= ~(UTMIP_FORCE_PD_POWERDOWN | UTMIP_FORCE_PD2_POWERDOWN |
UTMIP_FORCE_PDZI_POWERDOWN | UTMIP_XCVR_LSBIAS_SEL |
UTMIP_XCVR_SETUP(~0) | UTMIP_XCVR_SETUP_MSB(~0) |
@@ -535,57 +540,57 @@ static int utmi_phy_power_on(struct tegra_usb_phy *phy)
val |= UTMIP_XCVR_HSSLEW(config->xcvr_hsslew);
val |= UTMIP_XCVR_HSSLEW_MSB(config->xcvr_hsslew);
}
- writel(val, base + UTMIP_XCVR_CFG0);
+ writel_relaxed(val, base + UTMIP_XCVR_CFG0);
- val = readl(base + UTMIP_XCVR_CFG1);
+ val = readl_relaxed(base + UTMIP_XCVR_CFG1);
val &= ~(UTMIP_FORCE_PDDISC_POWERDOWN | UTMIP_FORCE_PDCHRP_POWERDOWN |
UTMIP_FORCE_PDDR_POWERDOWN | UTMIP_XCVR_TERM_RANGE_ADJ(~0));
val |= UTMIP_XCVR_TERM_RANGE_ADJ(config->term_range_adj);
- writel(val, base + UTMIP_XCVR_CFG1);
+ writel_relaxed(val, base + UTMIP_XCVR_CFG1);
- val = readl(base + UTMIP_BIAS_CFG1);
+ val = readl_relaxed(base + UTMIP_BIAS_CFG1);
val &= ~UTMIP_BIAS_PDTRK_COUNT(~0);
val |= UTMIP_BIAS_PDTRK_COUNT(0x5);
- writel(val, base + UTMIP_BIAS_CFG1);
+ writel_relaxed(val, base + UTMIP_BIAS_CFG1);
- val = readl(base + UTMIP_SPARE_CFG0);
+ val = readl_relaxed(base + UTMIP_SPARE_CFG0);
if (config->xcvr_setup_use_fuses)
val |= FUSE_SETUP_SEL;
else
val &= ~FUSE_SETUP_SEL;
- writel(val, base + UTMIP_SPARE_CFG0);
+ writel_relaxed(val, base + UTMIP_SPARE_CFG0);
if (!phy->is_legacy_phy) {
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val |= UTMIP_PHY_ENABLE;
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
}
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val &= ~UTMIP_RESET;
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
if (phy->is_legacy_phy) {
- val = readl(base + USB1_LEGACY_CTRL);
+ val = readl_relaxed(base + USB1_LEGACY_CTRL);
val &= ~USB1_VBUS_SENSE_CTL_MASK;
val |= USB1_VBUS_SENSE_CTL_A_SESS_VLD;
- writel(val, base + USB1_LEGACY_CTRL);
+ writel_relaxed(val, base + USB1_LEGACY_CTRL);
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val &= ~USB_SUSP_SET;
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
}
utmi_phy_clk_enable(phy);
if (phy->soc_config->requires_usbmode_setup) {
- val = readl(base + USB_USBMODE);
+ val = readl_relaxed(base + USB_USBMODE);
val &= ~USB_USBMODE_MASK;
if (phy->mode == USB_DR_MODE_HOST)
val |= USB_USBMODE_HOST;
else
val |= USB_USBMODE_DEVICE;
- writel(val, base + USB_USBMODE);
+ writel_relaxed(val, base + USB_USBMODE);
}
if (!phy->is_legacy_phy)
@@ -596,258 +601,252 @@ static int utmi_phy_power_on(struct tegra_usb_phy *phy)
static int utmi_phy_power_off(struct tegra_usb_phy *phy)
{
- unsigned long val;
void __iomem *base = phy->regs;
+ u32 val;
utmi_phy_clk_disable(phy);
if (phy->mode == USB_DR_MODE_PERIPHERAL) {
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val &= ~USB_WAKEUP_DEBOUNCE_COUNT(~0);
val |= USB_WAKE_ON_CNNT_EN_DEV | USB_WAKEUP_DEBOUNCE_COUNT(5);
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
}
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val |= UTMIP_RESET;
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
- val = readl(base + UTMIP_BAT_CHRG_CFG0);
+ val = readl_relaxed(base + UTMIP_BAT_CHRG_CFG0);
val |= UTMIP_PD_CHRG;
- writel(val, base + UTMIP_BAT_CHRG_CFG0);
+ writel_relaxed(val, base + UTMIP_BAT_CHRG_CFG0);
- val = readl(base + UTMIP_XCVR_CFG0);
+ val = readl_relaxed(base + UTMIP_XCVR_CFG0);
val |= UTMIP_FORCE_PD_POWERDOWN | UTMIP_FORCE_PD2_POWERDOWN |
UTMIP_FORCE_PDZI_POWERDOWN;
- writel(val, base + UTMIP_XCVR_CFG0);
+ writel_relaxed(val, base + UTMIP_XCVR_CFG0);
- val = readl(base + UTMIP_XCVR_CFG1);
+ val = readl_relaxed(base + UTMIP_XCVR_CFG1);
val |= UTMIP_FORCE_PDDISC_POWERDOWN | UTMIP_FORCE_PDCHRP_POWERDOWN |
UTMIP_FORCE_PDDR_POWERDOWN;
- writel(val, base + UTMIP_XCVR_CFG1);
+ writel_relaxed(val, base + UTMIP_XCVR_CFG1);
return utmip_pad_power_off(phy);
}
static void utmi_phy_preresume(struct tegra_usb_phy *phy)
{
- unsigned long val;
void __iomem *base = phy->regs;
+ u32 val;
- val = readl(base + UTMIP_TX_CFG0);
+ val = readl_relaxed(base + UTMIP_TX_CFG0);
val |= UTMIP_HS_DISCON_DISABLE;
- writel(val, base + UTMIP_TX_CFG0);
+ writel_relaxed(val, base + UTMIP_TX_CFG0);
}
static void utmi_phy_postresume(struct tegra_usb_phy *phy)
{
- unsigned long val;
void __iomem *base = phy->regs;
+ u32 val;
- val = readl(base + UTMIP_TX_CFG0);
+ val = readl_relaxed(base + UTMIP_TX_CFG0);
val &= ~UTMIP_HS_DISCON_DISABLE;
- writel(val, base + UTMIP_TX_CFG0);
+ writel_relaxed(val, base + UTMIP_TX_CFG0);
}
static void utmi_phy_restore_start(struct tegra_usb_phy *phy,
enum tegra_usb_phy_port_speed port_speed)
{
- unsigned long val;
void __iomem *base = phy->regs;
+ u32 val;
- val = readl(base + UTMIP_MISC_CFG0);
+ val = readl_relaxed(base + UTMIP_MISC_CFG0);
val &= ~UTMIP_DPDM_OBSERVE_SEL(~0);
if (port_speed == TEGRA_USB_PHY_PORT_SPEED_LOW)
val |= UTMIP_DPDM_OBSERVE_SEL_FS_K;
else
val |= UTMIP_DPDM_OBSERVE_SEL_FS_J;
- writel(val, base + UTMIP_MISC_CFG0);
- udelay(1);
+ writel_relaxed(val, base + UTMIP_MISC_CFG0);
+ usleep_range(1, 10);
- val = readl(base + UTMIP_MISC_CFG0);
+ val = readl_relaxed(base + UTMIP_MISC_CFG0);
val |= UTMIP_DPDM_OBSERVE;
- writel(val, base + UTMIP_MISC_CFG0);
- udelay(10);
+ writel_relaxed(val, base + UTMIP_MISC_CFG0);
+ usleep_range(10, 100);
}
static void utmi_phy_restore_end(struct tegra_usb_phy *phy)
{
- unsigned long val;
void __iomem *base = phy->regs;
+ u32 val;
- val = readl(base + UTMIP_MISC_CFG0);
+ val = readl_relaxed(base + UTMIP_MISC_CFG0);
val &= ~UTMIP_DPDM_OBSERVE;
- writel(val, base + UTMIP_MISC_CFG0);
- udelay(10);
+ writel_relaxed(val, base + UTMIP_MISC_CFG0);
+ usleep_range(10, 100);
}
static int ulpi_phy_power_on(struct tegra_usb_phy *phy)
{
- int ret;
- unsigned long val;
void __iomem *base = phy->regs;
+ u32 val;
+ int err;
- ret = gpio_direction_output(phy->reset_gpio, 0);
- if (ret < 0) {
- dev_err(phy->u_phy.dev, "GPIO %d not set to 0: %d\n",
- phy->reset_gpio, ret);
- return ret;
- }
- msleep(5);
- ret = gpio_direction_output(phy->reset_gpio, 1);
- if (ret < 0) {
- dev_err(phy->u_phy.dev, "GPIO %d not set to 1: %d\n",
- phy->reset_gpio, ret);
- return ret;
- }
+ gpiod_set_value_cansleep(phy->reset_gpio, 1);
+
+ err = clk_prepare_enable(phy->clk);
+ if (err)
+ return err;
+
+ usleep_range(5000, 6000);
+
+ gpiod_set_value_cansleep(phy->reset_gpio, 0);
- clk_prepare_enable(phy->clk);
- msleep(1);
+ usleep_range(1000, 2000);
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val |= UHSIC_RESET;
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
- val = readl(base + ULPI_TIMING_CTRL_0);
+ val = readl_relaxed(base + ULPI_TIMING_CTRL_0);
val |= ULPI_OUTPUT_PINMUX_BYP | ULPI_CLKOUT_PINMUX_BYP;
- writel(val, base + ULPI_TIMING_CTRL_0);
+ writel_relaxed(val, base + ULPI_TIMING_CTRL_0);
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val |= ULPI_PHY_ENABLE;
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
val = 0;
- writel(val, base + ULPI_TIMING_CTRL_1);
+ writel_relaxed(val, base + ULPI_TIMING_CTRL_1);
val |= ULPI_DATA_TRIMMER_SEL(4);
val |= ULPI_STPDIRNXT_TRIMMER_SEL(4);
val |= ULPI_DIR_TRIMMER_SEL(4);
- writel(val, base + ULPI_TIMING_CTRL_1);
- udelay(10);
+ writel_relaxed(val, base + ULPI_TIMING_CTRL_1);
+ usleep_range(10, 100);
val |= ULPI_DATA_TRIMMER_LOAD;
val |= ULPI_STPDIRNXT_TRIMMER_LOAD;
val |= ULPI_DIR_TRIMMER_LOAD;
- writel(val, base + ULPI_TIMING_CTRL_1);
+ writel_relaxed(val, base + ULPI_TIMING_CTRL_1);
/* Fix VbusInvalid due to floating VBUS */
- ret = usb_phy_io_write(phy->ulpi, 0x40, 0x08);
- if (ret) {
- dev_err(phy->u_phy.dev, "ULPI write failed: %d\n", ret);
- return ret;
+ err = usb_phy_io_write(phy->ulpi, 0x40, 0x08);
+ if (err) {
+ dev_err(phy->u_phy.dev, "ULPI write failed: %d\n", err);
+ goto disable_clk;
}
- ret = usb_phy_io_write(phy->ulpi, 0x80, 0x0B);
- if (ret) {
- dev_err(phy->u_phy.dev, "ULPI write failed: %d\n", ret);
- return ret;
+ err = usb_phy_io_write(phy->ulpi, 0x80, 0x0B);
+ if (err) {
+ dev_err(phy->u_phy.dev, "ULPI write failed: %d\n", err);
+ goto disable_clk;
}
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val |= USB_SUSP_CLR;
- writel(val, base + USB_SUSP_CTRL);
- udelay(100);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
+ usleep_range(100, 1000);
- val = readl(base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_SUSP_CTRL);
val &= ~USB_SUSP_CLR;
- writel(val, base + USB_SUSP_CTRL);
+ writel_relaxed(val, base + USB_SUSP_CTRL);
return 0;
-}
-static int ulpi_phy_power_off(struct tegra_usb_phy *phy)
-{
- clk_disable(phy->clk);
- return gpio_direction_output(phy->reset_gpio, 0);
+disable_clk:
+ clk_disable_unprepare(phy->clk);
+
+ return err;
}
-static void tegra_usb_phy_close(struct tegra_usb_phy *phy)
+static int ulpi_phy_power_off(struct tegra_usb_phy *phy)
{
- if (!IS_ERR(phy->vbus))
- regulator_disable(phy->vbus);
+ gpiod_set_value_cansleep(phy->reset_gpio, 1);
+ usleep_range(5000, 6000);
+ clk_disable_unprepare(phy->clk);
- if (!phy->is_ulpi_phy)
- utmip_pad_close(phy);
-
- clk_disable_unprepare(phy->pll_u);
+ return 0;
}
static int tegra_usb_phy_power_on(struct tegra_usb_phy *phy)
{
+ int err;
+
+ if (phy->powered_on)
+ return 0;
+
if (phy->is_ulpi_phy)
- return ulpi_phy_power_on(phy);
+ err = ulpi_phy_power_on(phy);
else
- return utmi_phy_power_on(phy);
+ err = utmi_phy_power_on(phy);
+ if (err)
+ return err;
+
+ phy->powered_on = true;
+
+ return 0;
}
static int tegra_usb_phy_power_off(struct tegra_usb_phy *phy)
{
+ int err;
+
+ if (!phy->powered_on)
+ return 0;
+
if (phy->is_ulpi_phy)
- return ulpi_phy_power_off(phy);
+ err = ulpi_phy_power_off(phy);
else
- return utmi_phy_power_off(phy);
-}
+ err = utmi_phy_power_off(phy);
+ if (err)
+ return err;
-static int tegra_usb_phy_suspend(struct usb_phy *x, int suspend)
-{
- struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy);
- if (suspend)
- return tegra_usb_phy_power_off(phy);
- else
- return tegra_usb_phy_power_on(phy);
+ phy->powered_on = false;
+
+ return 0;
}
-static int ulpi_open(struct tegra_usb_phy *phy)
+static void tegra_usb_phy_shutdown(struct usb_phy *u_phy)
{
- int err;
+ struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
- phy->clk = devm_clk_get(phy->u_phy.dev, "ulpi-link");
- if (IS_ERR(phy->clk)) {
- err = PTR_ERR(phy->clk);
- dev_err(phy->u_phy.dev, "Failed to get ULPI clock: %d\n", err);
- return err;
- }
+ if (WARN_ON(!phy->freq))
+ return;
- err = devm_gpio_request(phy->u_phy.dev, phy->reset_gpio,
- "ulpi_phy_reset_b");
- if (err < 0) {
- dev_err(phy->u_phy.dev, "Request failed for GPIO %d: %d\n",
- phy->reset_gpio, err);
- return err;
- }
+ tegra_usb_phy_power_off(phy);
- err = gpio_direction_output(phy->reset_gpio, 0);
- if (err < 0) {
- dev_err(phy->u_phy.dev,
- "GPIO %d direction not set to output: %d\n",
- phy->reset_gpio, err);
- return err;
- }
+ if (!phy->is_ulpi_phy)
+ utmip_pad_close(phy);
- phy->ulpi = otg_ulpi_create(&ulpi_viewport_access_ops, 0);
- if (!phy->ulpi) {
- dev_err(phy->u_phy.dev, "Failed to create ULPI OTG\n");
- err = -ENOMEM;
- return err;
- }
+ regulator_disable(phy->vbus);
+ clk_disable_unprepare(phy->pll_u);
- phy->ulpi->io_priv = phy->regs + ULPI_VIEWPORT;
- return 0;
+ phy->freq = NULL;
}
-static int tegra_usb_phy_init(struct tegra_usb_phy *phy)
+static int tegra_usb_phy_set_suspend(struct usb_phy *u_phy, int suspend)
{
+ struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
+
+ if (WARN_ON(!phy->freq))
+ return -EINVAL;
+
+ if (suspend)
+ return tegra_usb_phy_power_off(phy);
+ else
+ return tegra_usb_phy_power_on(phy);
+}
+
+static int tegra_usb_phy_init(struct usb_phy *u_phy)
+{
+ struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
unsigned long parent_rate;
- int i;
+ unsigned int i;
int err;
- phy->pll_u = devm_clk_get(phy->u_phy.dev, "pll_u");
- if (IS_ERR(phy->pll_u)) {
- err = PTR_ERR(phy->pll_u);
- dev_err(phy->u_phy.dev,
- "Failed to get pll_u clock: %d\n", err);
- return err;
- }
+ if (WARN_ON(phy->freq))
+ return 0;
err = clk_prepare_enable(phy->pll_u);
if (err)
@@ -864,64 +863,74 @@ static int tegra_usb_phy_init(struct tegra_usb_phy *phy)
dev_err(phy->u_phy.dev, "Invalid pll_u parent rate %ld\n",
parent_rate);
err = -EINVAL;
- goto fail;
+ goto disable_clk;
}
- if (!IS_ERR(phy->vbus)) {
- err = regulator_enable(phy->vbus);
- if (err) {
- dev_err(phy->u_phy.dev,
- "Failed to enable USB VBUS regulator: %d\n",
- err);
- goto fail;
- }
+ err = regulator_enable(phy->vbus);
+ if (err) {
+ dev_err(phy->u_phy.dev,
+ "Failed to enable USB VBUS regulator: %d\n", err);
+ goto disable_clk;
}
- if (phy->is_ulpi_phy)
- err = ulpi_open(phy);
- else
+ if (!phy->is_ulpi_phy) {
err = utmip_pad_open(phy);
- if (err < 0)
- goto fail;
+ if (err)
+ goto disable_vbus;
+ }
+
+ err = tegra_usb_phy_power_on(phy);
+ if (err)
+ goto close_phy;
return 0;
-fail:
+close_phy:
+ if (!phy->is_ulpi_phy)
+ utmip_pad_close(phy);
+
+disable_vbus:
+ regulator_disable(phy->vbus);
+
+disable_clk:
clk_disable_unprepare(phy->pll_u);
+
+ phy->freq = NULL;
+
return err;
}
-void tegra_usb_phy_preresume(struct usb_phy *x)
+void tegra_usb_phy_preresume(struct usb_phy *u_phy)
{
- struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy);
+ struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
if (!phy->is_ulpi_phy)
utmi_phy_preresume(phy);
}
EXPORT_SYMBOL_GPL(tegra_usb_phy_preresume);
-void tegra_usb_phy_postresume(struct usb_phy *x)
+void tegra_usb_phy_postresume(struct usb_phy *u_phy)
{
- struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy);
+ struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
if (!phy->is_ulpi_phy)
utmi_phy_postresume(phy);
}
EXPORT_SYMBOL_GPL(tegra_usb_phy_postresume);
-void tegra_ehci_phy_restore_start(struct usb_phy *x,
- enum tegra_usb_phy_port_speed port_speed)
+void tegra_ehci_phy_restore_start(struct usb_phy *u_phy,
+ enum tegra_usb_phy_port_speed port_speed)
{
- struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy);
+ struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
if (!phy->is_ulpi_phy)
utmi_phy_restore_start(phy, port_speed);
}
EXPORT_SYMBOL_GPL(tegra_ehci_phy_restore_start);
-void tegra_ehci_phy_restore_end(struct usb_phy *x)
+void tegra_ehci_phy_restore_end(struct usb_phy *u_phy)
{
- struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy);
+ struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
if (!phy->is_ulpi_phy)
utmi_phy_restore_end(phy);
@@ -932,21 +941,25 @@ static int read_utmi_param(struct platform_device *pdev, const char *param,
u8 *dest)
{
u32 value;
- int err = of_property_read_u32(pdev->dev.of_node, param, &value);
- *dest = (u8)value;
- if (err < 0)
+ int err;
+
+ err = of_property_read_u32(pdev->dev.of_node, param, &value);
+ if (err)
dev_err(&pdev->dev,
"Failed to read USB UTMI parameter %s: %d\n",
param, err);
+ else
+ *dest = value;
+
return err;
}
static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
struct platform_device *pdev)
{
+ struct tegra_utmip_config *config;
struct resource *res;
int err;
- struct tegra_utmip_config *config;
tegra_phy->is_ulpi_phy = false;
@@ -957,7 +970,7 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
}
tegra_phy->pad_regs = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
+ resource_size(res));
if (!tegra_phy->pad_regs) {
dev_err(&pdev->dev, "Failed to remap UTMI pad regs\n");
return -ENOMEM;
@@ -971,49 +984,49 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
config = tegra_phy->config;
err = read_utmi_param(pdev, "nvidia,hssync-start-delay",
- &config->hssync_start_delay);
- if (err < 0)
+ &config->hssync_start_delay);
+ if (err)
return err;
err = read_utmi_param(pdev, "nvidia,elastic-limit",
- &config->elastic_limit);
- if (err < 0)
+ &config->elastic_limit);
+ if (err)
return err;
err = read_utmi_param(pdev, "nvidia,idle-wait-delay",
- &config->idle_wait_delay);
- if (err < 0)
+ &config->idle_wait_delay);
+ if (err)
return err;
err = read_utmi_param(pdev, "nvidia,term-range-adj",
- &config->term_range_adj);
- if (err < 0)
+ &config->term_range_adj);
+ if (err)
return err;
err = read_utmi_param(pdev, "nvidia,xcvr-lsfslew",
- &config->xcvr_lsfslew);
- if (err < 0)
+ &config->xcvr_lsfslew);
+ if (err)
return err;
err = read_utmi_param(pdev, "nvidia,xcvr-lsrslew",
- &config->xcvr_lsrslew);
- if (err < 0)
+ &config->xcvr_lsrslew);
+ if (err)
return err;
if (tegra_phy->soc_config->requires_extra_tuning_parameters) {
err = read_utmi_param(pdev, "nvidia,xcvr-hsslew",
- &config->xcvr_hsslew);
- if (err < 0)
+ &config->xcvr_hsslew);
+ if (err)
return err;
err = read_utmi_param(pdev, "nvidia,hssquelch-level",
- &config->hssquelch_level);
- if (err < 0)
+ &config->hssquelch_level);
+ if (err)
return err;
err = read_utmi_param(pdev, "nvidia,hsdiscon-level",
- &config->hsdiscon_level);
- if (err < 0)
+ &config->hsdiscon_level);
+ if (err)
return err;
}
@@ -1022,8 +1035,8 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
if (!config->xcvr_setup_use_fuses) {
err = read_utmi_param(pdev, "nvidia,xcvr-setup",
- &config->xcvr_setup);
- if (err < 0)
+ &config->xcvr_setup);
+ if (err)
return err;
}
@@ -1053,23 +1066,20 @@ MODULE_DEVICE_TABLE(of, tegra_usb_phy_id_table);
static int tegra_usb_phy_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
- struct resource *res;
- struct tegra_usb_phy *tegra_phy = NULL;
struct device_node *np = pdev->dev.of_node;
+ struct tegra_usb_phy *tegra_phy;
enum usb_phy_interface phy_type;
+ struct reset_control *reset;
+ struct gpio_desc *gpiod;
+ struct resource *res;
+ struct usb_phy *phy;
int err;
tegra_phy = devm_kzalloc(&pdev->dev, sizeof(*tegra_phy), GFP_KERNEL);
if (!tegra_phy)
return -ENOMEM;
- match = of_match_device(tegra_usb_phy_id_table, &pdev->dev);
- if (!match) {
- dev_err(&pdev->dev, "Error: No device match found\n");
- return -ENODEV;
- }
- tegra_phy->soc_config = match->data;
+ tegra_phy->soc_config = of_device_get_match_data(&pdev->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
@@ -1078,7 +1088,7 @@ static int tegra_usb_phy_probe(struct platform_device *pdev)
}
tegra_phy->regs = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
+ resource_size(res));
if (!tegra_phy->regs) {
dev_err(&pdev->dev, "Failed to remap I/O memory\n");
return -ENOMEM;
@@ -1087,25 +1097,86 @@ static int tegra_usb_phy_probe(struct platform_device *pdev)
tegra_phy->is_legacy_phy =
of_property_read_bool(np, "nvidia,has-legacy-mode");
+ if (of_find_property(np, "dr_mode", NULL))
+ tegra_phy->mode = usb_get_dr_mode(&pdev->dev);
+ else
+ tegra_phy->mode = USB_DR_MODE_HOST;
+
+ if (tegra_phy->mode == USB_DR_MODE_UNKNOWN) {
+ dev_err(&pdev->dev, "dr_mode is invalid\n");
+ return -EINVAL;
+ }
+
+ /* On some boards, the VBUS regulator doesn't need to be controlled */
+ tegra_phy->vbus = devm_regulator_get(&pdev->dev, "vbus");
+ if (IS_ERR(tegra_phy->vbus))
+ return PTR_ERR(tegra_phy->vbus);
+
+ tegra_phy->pll_u = devm_clk_get(&pdev->dev, "pll_u");
+ err = PTR_ERR_OR_ZERO(tegra_phy->pll_u);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to get pll_u clock: %d\n", err);
+ return err;
+ }
+
phy_type = of_usb_get_phy_mode(np);
switch (phy_type) {
case USBPHY_INTERFACE_MODE_UTMI:
err = utmi_phy_probe(tegra_phy, pdev);
- if (err < 0)
+ if (err)
return err;
+
+ tegra_phy->pad_clk = devm_clk_get(&pdev->dev, "utmi-pads");
+ err = PTR_ERR_OR_ZERO(tegra_phy->pad_clk);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to get UTMIP pad clock: %d\n", err);
+ return err;
+ }
+
+ reset = devm_reset_control_get_optional_shared(&pdev->dev,
+ "utmi-pads");
+ err = PTR_ERR_OR_ZERO(reset);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to get UTMI-pads reset: %d\n", err);
+ return err;
+ }
+ tegra_phy->pad_rst = reset;
break;
case USBPHY_INTERFACE_MODE_ULPI:
tegra_phy->is_ulpi_phy = true;
- tegra_phy->reset_gpio =
- of_get_named_gpio(np, "nvidia,phy-reset-gpio", 0);
- if (!gpio_is_valid(tegra_phy->reset_gpio)) {
+ tegra_phy->clk = devm_clk_get(&pdev->dev, "ulpi-link");
+ err = PTR_ERR_OR_ZERO(tegra_phy->clk);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to get ULPI clock: %d\n", err);
+ return err;
+ }
+
+ gpiod = devm_gpiod_get_from_of_node(&pdev->dev, np,
+ "nvidia,phy-reset-gpio",
+ 0, GPIOD_OUT_HIGH,
+ "ulpi_phy_reset_b");
+ err = PTR_ERR_OR_ZERO(gpiod);
+ if (err) {
dev_err(&pdev->dev,
- "Invalid GPIO: %d\n", tegra_phy->reset_gpio);
- return tegra_phy->reset_gpio;
+ "Request failed for reset GPIO: %d\n", err);
+ return err;
+ }
+ tegra_phy->reset_gpio = gpiod;
+
+ phy = devm_otg_ulpi_create(&pdev->dev,
+ &ulpi_viewport_access_ops, 0);
+ if (!phy) {
+ dev_err(&pdev->dev, "Failed to create ULPI OTG\n");
+ return -ENOMEM;
}
- tegra_phy->config = NULL;
+
+ tegra_phy->ulpi = phy;
+ tegra_phy->ulpi->io_priv = tegra_phy->regs + ULPI_VIEWPORT;
break;
default:
@@ -1114,40 +1185,16 @@ static int tegra_usb_phy_probe(struct platform_device *pdev)
return -EINVAL;
}
- if (of_find_property(np, "dr_mode", NULL))
- tegra_phy->mode = usb_get_dr_mode(&pdev->dev);
- else
- tegra_phy->mode = USB_DR_MODE_HOST;
-
- if (tegra_phy->mode == USB_DR_MODE_UNKNOWN) {
- dev_err(&pdev->dev, "dr_mode is invalid\n");
- return -EINVAL;
- }
-
- /* On some boards, the VBUS regulator doesn't need to be controlled */
- if (of_find_property(np, "vbus-supply", NULL)) {
- tegra_phy->vbus = devm_regulator_get(&pdev->dev, "vbus");
- if (IS_ERR(tegra_phy->vbus))
- return PTR_ERR(tegra_phy->vbus);
- } else {
- dev_notice(&pdev->dev, "no vbus regulator");
- tegra_phy->vbus = ERR_PTR(-ENODEV);
- }
-
tegra_phy->u_phy.dev = &pdev->dev;
- err = tegra_usb_phy_init(tegra_phy);
- if (err < 0)
- return err;
-
- tegra_phy->u_phy.set_suspend = tegra_usb_phy_suspend;
+ tegra_phy->u_phy.init = tegra_usb_phy_init;
+ tegra_phy->u_phy.shutdown = tegra_usb_phy_shutdown;
+ tegra_phy->u_phy.set_suspend = tegra_usb_phy_set_suspend;
platform_set_drvdata(pdev, tegra_phy);
err = usb_add_phy_dev(&tegra_phy->u_phy);
- if (err < 0) {
- tegra_usb_phy_close(tegra_phy);
+ if (err)
return err;
- }
return 0;
}
@@ -1157,7 +1204,6 @@ static int tegra_usb_phy_remove(struct platform_device *pdev)
struct tegra_usb_phy *tegra_phy = platform_get_drvdata(pdev);
usb_remove_phy(&tegra_phy->u_phy);
- tegra_usb_phy_close(tegra_phy);
return 0;
}
diff --git a/drivers/usb/phy/phy-ulpi.c b/drivers/usb/phy/phy-ulpi.c
index a43c49369a60..e683a37e3a7a 100644
--- a/drivers/usb/phy/phy-ulpi.c
+++ b/drivers/usb/phy/phy-ulpi.c
@@ -240,6 +240,21 @@ static int ulpi_set_vbus(struct usb_otg *otg, bool on)
return usb_phy_io_write(phy, flags, ULPI_OTG_CTRL);
}
+static void otg_ulpi_init(struct usb_phy *phy, struct usb_otg *otg,
+ struct usb_phy_io_ops *ops,
+ unsigned int flags)
+{
+ phy->label = "ULPI";
+ phy->flags = flags;
+ phy->io_ops = ops;
+ phy->otg = otg;
+ phy->init = ulpi_init;
+
+ otg->usb_phy = phy;
+ otg->set_host = ulpi_set_host;
+ otg->set_vbus = ulpi_set_vbus;
+}
+
struct usb_phy *
otg_ulpi_create(struct usb_phy_io_ops *ops,
unsigned int flags)
@@ -257,17 +272,32 @@ otg_ulpi_create(struct usb_phy_io_ops *ops,
return NULL;
}
- phy->label = "ULPI";
- phy->flags = flags;
- phy->io_ops = ops;
- phy->otg = otg;
- phy->init = ulpi_init;
-
- otg->usb_phy = phy;
- otg->set_host = ulpi_set_host;
- otg->set_vbus = ulpi_set_vbus;
+ otg_ulpi_init(phy, otg, ops, flags);
return phy;
}
EXPORT_SYMBOL_GPL(otg_ulpi_create);
+struct usb_phy *
+devm_otg_ulpi_create(struct device *dev,
+ struct usb_phy_io_ops *ops,
+ unsigned int flags)
+{
+ struct usb_phy *phy;
+ struct usb_otg *otg;
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return NULL;
+
+ otg = devm_kzalloc(dev, sizeof(*otg), GFP_KERNEL);
+ if (!otg) {
+ devm_kfree(dev, phy);
+ return NULL;
+ }
+
+ otg_ulpi_init(phy, otg, ops, flags);
+
+ return phy;
+}
+EXPORT_SYMBOL_GPL(devm_otg_ulpi_create);
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index 0277f62739a2..ad2554630889 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -34,6 +34,14 @@ struct phy_devm {
struct notifier_block *nb;
};
+static const char *const usb_chger_type[] = {
+ [UNKNOWN_TYPE] = "USB_CHARGER_UNKNOWN_TYPE",
+ [SDP_TYPE] = "USB_CHARGER_SDP_TYPE",
+ [CDP_TYPE] = "USB_CHARGER_CDP_TYPE",
+ [DCP_TYPE] = "USB_CHARGER_DCP_TYPE",
+ [ACA_TYPE] = "USB_CHARGER_ACA_TYPE",
+};
+
static struct usb_phy *__usb_find_phy(struct list_head *list,
enum usb_phy_type type)
{
@@ -98,7 +106,8 @@ static void usb_phy_notify_charger_work(struct work_struct *work)
{
struct usb_phy *usb_phy = container_of(work, struct usb_phy, chg_work);
char uchger_state[50] = { 0 };
- char *envp[] = { uchger_state, NULL };
+ char uchger_type[50] = { 0 };
+ char *envp[] = { uchger_state, uchger_type, NULL };
unsigned int min, max;
switch (usb_phy->chg_state) {
@@ -122,6 +131,8 @@ static void usb_phy_notify_charger_work(struct work_struct *work)
return;
}
+ snprintf(uchger_type, ARRAY_SIZE(uchger_type),
+ "USB_CHARGER_TYPE=%s", usb_chger_type[usb_phy->chg_type]);
kobject_uevent_env(&usb_phy->dev->kobj, KOBJ_CHANGE, envp);
}
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index d438b7871446..3af91b2b8f76 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -8,11 +8,10 @@
*/
#include <linux/clk.h>
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/slab.h>
@@ -592,7 +591,8 @@ static int usbhs_probe(struct platform_device *pdev)
struct usbhs_priv *priv;
struct resource *irq_res;
struct device *dev = &pdev->dev;
- int ret, gpio;
+ struct gpio_desc *gpiod;
+ int ret;
u32 tmp;
/* check device node */
@@ -657,10 +657,9 @@ static int usbhs_probe(struct platform_device *pdev)
priv->dparam.pio_dma_border = 64; /* 64byte */
if (!of_property_read_u32(dev_of_node(dev), "renesas,buswait", &tmp))
priv->dparam.buswait_bwait = tmp;
- gpio = of_get_named_gpio_flags(dev_of_node(dev), "renesas,enable-gpio",
- 0, NULL);
- if (gpio > 0)
- priv->dparam.enable_gpio = gpio;
+ gpiod = devm_gpiod_get_optional(dev, "renesas,enable", GPIOD_IN);
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
/* FIXME */
/* runtime power control ? */
@@ -708,13 +707,10 @@ static int usbhs_probe(struct platform_device *pdev)
usbhs_sys_clock_ctrl(priv, 0);
/* check GPIO determining if USB function should be enabled */
- if (priv->dparam.enable_gpio) {
- gpio_request_one(priv->dparam.enable_gpio, GPIOF_IN, NULL);
- ret = !gpio_get_value(priv->dparam.enable_gpio);
- gpio_free(priv->dparam.enable_gpio);
+ if (gpiod) {
+ ret = !gpiod_get_value(gpiod);
if (ret) {
- dev_warn(dev, "USB function not selected (GPIO %d)\n",
- priv->dparam.enable_gpio);
+ dev_warn(dev, "USB function not selected (GPIO)\n");
ret = -ENOTSUPP;
goto probe_end_mod_exit;
}
diff --git a/drivers/usb/renesas_usbhs/rcar2.c b/drivers/usb/renesas_usbhs/rcar2.c
index 440d213e1749..52756fc2ac9c 100644
--- a/drivers/usb/renesas_usbhs/rcar2.c
+++ b/drivers/usb/renesas_usbhs/rcar2.c
@@ -6,8 +6,6 @@
* Copyright (C) 2019 Renesas Electronics Corporation
*/
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
#include <linux/phy/phy.h>
#include "common.h"
#include "rcar2.h"
@@ -34,7 +32,7 @@ static int usbhs_rcar2_hardware_exit(struct platform_device *pdev)
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
if (priv->phy) {
- phy_put(priv->phy);
+ phy_put(&pdev->dev, priv->phy);
priv->phy = NULL;
}
diff --git a/drivers/usb/renesas_usbhs/rza2.c b/drivers/usb/renesas_usbhs/rza2.c
index 021749594389..3eed3334a17f 100644
--- a/drivers/usb/renesas_usbhs/rza2.c
+++ b/drivers/usb/renesas_usbhs/rza2.c
@@ -29,7 +29,7 @@ static int usbhs_rza2_hardware_exit(struct platform_device *pdev)
{
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
- phy_put(priv->phy);
+ phy_put(&pdev->dev, priv->phy);
priv->phy = NULL;
return 0;
diff --git a/drivers/usb/roles/intel-xhci-usb-role-switch.c b/drivers/usb/roles/intel-xhci-usb-role-switch.c
index 409851306e99..80d6559bbcb2 100644
--- a/drivers/usb/roles/intel-xhci-usb-role-switch.c
+++ b/drivers/usb/roles/intel-xhci-usb-role-switch.c
@@ -161,7 +161,7 @@ static int intel_xhci_usb_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -EINVAL;
- data->base = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ data->base = devm_ioremap(dev, res->start, resource_size(res));
if (!data->base)
return -ENOMEM;
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index ed4a18b435a0..25d7e0c36d38 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -129,9 +129,6 @@ config USB_SERIAL_DIGI_ACCELEPORT
parallel port on the USB 2 appears as a third serial port on Linux.
The Digi Acceleport USB 8 is not yet supported by this driver.
- This driver works under SMP with the usb-uhci driver. It does not
- work under SMP with the uhci driver.
-
To compile this driver as a module, choose M here: the
module will be called digi_acceleport.
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index df582fe855f0..d3f420f3a083 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -642,9 +642,13 @@ static int ch341_tiocmget(struct tty_struct *tty)
static int ch341_reset_resume(struct usb_serial *serial)
{
struct usb_serial_port *port = serial->port[0];
- struct ch341_private *priv = usb_get_serial_port_data(port);
+ struct ch341_private *priv;
int ret;
+ priv = usb_get_serial_port_data(port);
+ if (!priv)
+ return 0;
+
/* reconfigure ch341 serial port after bus-reset */
ch341_configure(serial->dev, priv);
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index ebd76ab07b72..821970609695 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -276,7 +276,7 @@ static void cyberjack_read_int_callback(struct urb *urb)
old_rdtodo = priv->rdtodo;
if (old_rdtodo > SHRT_MAX - size) {
- dev_dbg(dev, "To many bulk_in urbs to do.\n");
+ dev_dbg(dev, "Too many bulk_in urbs to do.\n");
spin_unlock_irqrestore(&priv->lock, flags);
goto resubmit;
}
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 633550ec3025..ffd984142171 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -104,7 +104,7 @@ struct garmin_packet {
int seq;
/* the real size of the data array, always > 0 */
int size;
- __u8 data[1];
+ __u8 data[];
};
/* structure used to keep the current state of the driver */
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 9690a5f4b9d6..5737add6a2a4 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -716,7 +716,7 @@ static void edge_interrupt_callback(struct urb *urb)
if (txCredits) {
port = edge_serial->serial->port[portNumber];
edge_port = usb_get_serial_port_data(port);
- if (edge_port->open) {
+ if (edge_port && edge_port->open) {
spin_lock_irqsave(&edge_port->ep_lock,
flags);
edge_port->txCredits += txCredits;
@@ -1725,7 +1725,8 @@ static void edge_break(struct tty_struct *tty, int break_state)
static void process_rcvd_data(struct edgeport_serial *edge_serial,
unsigned char *buffer, __u16 bufferLength)
{
- struct device *dev = &edge_serial->serial->dev->dev;
+ struct usb_serial *serial = edge_serial->serial;
+ struct device *dev = &serial->dev->dev;
struct usb_serial_port *port;
struct edgeport_port *edge_port;
__u16 lastBufferLength;
@@ -1821,11 +1822,10 @@ static void process_rcvd_data(struct edgeport_serial *edge_serial,
/* spit this data back into the tty driver if this
port is open */
- if (rxLen) {
- port = edge_serial->serial->port[
- edge_serial->rxPort];
+ if (rxLen && edge_serial->rxPort < serial->num_ports) {
+ port = serial->port[edge_serial->rxPort];
edge_port = usb_get_serial_port_data(port);
- if (edge_port->open) {
+ if (edge_port && edge_port->open) {
dev_dbg(dev, "%s - Sending %d bytes to TTY for port %d\n",
__func__, rxLen,
edge_serial->rxPort);
@@ -1833,8 +1833,8 @@ static void process_rcvd_data(struct edgeport_serial *edge_serial,
rxLen);
edge_port->port->icount.rx += rxLen;
}
- buffer += rxLen;
}
+ buffer += rxLen;
break;
case EXPECT_HDR3: /* Expect 3rd byte of status header */
@@ -1869,6 +1869,8 @@ static void process_rcvd_status(struct edgeport_serial *edge_serial,
__u8 code = edge_serial->rxStatusCode;
/* switch the port pointer to the one being currently talked about */
+ if (edge_serial->rxPort >= edge_serial->serial->num_ports)
+ return;
port = edge_serial->serial->port[edge_serial->rxPort];
edge_port = usb_get_serial_port_data(port);
if (edge_port == NULL) {
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index 302eb9530859..79d0586e2b33 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -45,9 +45,10 @@ static int buffer_size;
static int xbof = -1;
static int ir_startup (struct usb_serial *serial);
-static int ir_open(struct tty_struct *tty, struct usb_serial_port *port);
-static int ir_prepare_write_buffer(struct usb_serial_port *port,
- void *dest, size_t size);
+static int ir_write(struct tty_struct *tty, struct usb_serial_port *port,
+ const unsigned char *buf, int count);
+static int ir_write_room(struct tty_struct *tty);
+static void ir_write_bulk_callback(struct urb *urb);
static void ir_process_read_urb(struct urb *urb);
static void ir_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios);
@@ -75,10 +76,13 @@ static struct usb_serial_driver ir_device = {
.description = "IR Dongle",
.id_table = ir_id_table,
.num_ports = 1,
+ .num_bulk_in = 1,
+ .num_bulk_out = 1,
.set_termios = ir_set_termios,
.attach = ir_startup,
- .open = ir_open,
- .prepare_write_buffer = ir_prepare_write_buffer,
+ .write = ir_write,
+ .write_room = ir_write_room,
+ .write_bulk_callback = ir_write_bulk_callback,
.process_read_urb = ir_process_read_urb,
};
@@ -251,35 +255,102 @@ static int ir_startup(struct usb_serial *serial)
return 0;
}
-static int ir_open(struct tty_struct *tty, struct usb_serial_port *port)
+static int ir_write(struct tty_struct *tty, struct usb_serial_port *port,
+ const unsigned char *buf, int count)
{
- int i;
+ struct urb *urb = NULL;
+ unsigned long flags;
+ int ret;
- for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i)
- port->write_urbs[i]->transfer_flags = URB_ZERO_PACKET;
+ if (port->bulk_out_size == 0)
+ return -EINVAL;
- /* Start reading from the device */
- return usb_serial_generic_open(tty, port);
-}
+ if (count == 0)
+ return 0;
-static int ir_prepare_write_buffer(struct usb_serial_port *port,
- void *dest, size_t size)
-{
- unsigned char *buf = dest;
- int count;
+ count = min(count, port->bulk_out_size - 1);
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (__test_and_clear_bit(0, &port->write_urbs_free)) {
+ urb = port->write_urbs[0];
+ port->tx_bytes += count;
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (!urb)
+ return 0;
/*
* The first byte of the packet we send to the device contains an
- * inbound header which indicates an additional number of BOFs and
+ * outbound header which indicates an additional number of BOFs and
* a baud rate change.
*
* See section 5.4.2.2 of the USB IrDA spec.
*/
- *buf = ir_xbof | ir_baud;
+ *(u8 *)urb->transfer_buffer = ir_xbof | ir_baud;
+
+ memcpy(urb->transfer_buffer + 1, buf, count);
+
+ urb->transfer_buffer_length = count + 1;
+ urb->transfer_flags = URB_ZERO_PACKET;
+
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret) {
+ dev_err(&port->dev, "failed to submit write urb: %d\n", ret);
+
+ spin_lock_irqsave(&port->lock, flags);
+ __set_bit(0, &port->write_urbs_free);
+ port->tx_bytes -= count;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return ret;
+ }
+
+ return count;
+}
+
+static void ir_write_bulk_callback(struct urb *urb)
+{
+ struct usb_serial_port *port = urb->context;
+ int status = urb->status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ __set_bit(0, &port->write_urbs_free);
+ port->tx_bytes -= urb->transfer_buffer_length - 1;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ switch (status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ dev_dbg(&port->dev, "write urb stopped: %d\n", status);
+ return;
+ case -EPIPE:
+ dev_err(&port->dev, "write urb stopped: %d\n", status);
+ return;
+ default:
+ dev_err(&port->dev, "nonzero write-urb status: %d\n", status);
+ break;
+ }
+
+ usb_serial_port_softint(port);
+}
- count = kfifo_out_locked(&port->write_fifo, buf + 1, size - 1,
- &port->lock);
- return count + 1;
+static int ir_write_room(struct tty_struct *tty)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ int count = 0;
+
+ if (port->bulk_out_size == 0)
+ return 0;
+
+ if (test_bit(0, &port->write_urbs_free))
+ count = port->bulk_out_size - 1;
+
+ return count;
}
static void ir_process_read_urb(struct urb *urb)
@@ -304,23 +375,15 @@ static void ir_process_read_urb(struct urb *urb)
tty_flip_buffer_push(&port->port);
}
-static void ir_set_termios_callback(struct urb *urb)
-{
- kfree(urb->transfer_buffer);
-
- if (urb->status)
- dev_dbg(&urb->dev->dev, "%s - non-zero urb status: %d\n",
- __func__, urb->status);
-}
-
static void ir_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
- struct urb *urb;
+ struct usb_device *udev = port->serial->dev;
unsigned char *transfer_buffer;
- int result;
+ int actual_length;
speed_t baud;
int ir_baud;
+ int ret;
baud = tty_get_baud_rate(tty);
@@ -332,34 +395,34 @@ static void ir_set_termios(struct tty_struct *tty,
switch (baud) {
case 2400:
- ir_baud = USB_IRDA_BR_2400;
+ ir_baud = USB_IRDA_LS_2400;
break;
case 9600:
- ir_baud = USB_IRDA_BR_9600;
+ ir_baud = USB_IRDA_LS_9600;
break;
case 19200:
- ir_baud = USB_IRDA_BR_19200;
+ ir_baud = USB_IRDA_LS_19200;
break;
case 38400:
- ir_baud = USB_IRDA_BR_38400;
+ ir_baud = USB_IRDA_LS_38400;
break;
case 57600:
- ir_baud = USB_IRDA_BR_57600;
+ ir_baud = USB_IRDA_LS_57600;
break;
case 115200:
- ir_baud = USB_IRDA_BR_115200;
+ ir_baud = USB_IRDA_LS_115200;
break;
case 576000:
- ir_baud = USB_IRDA_BR_576000;
+ ir_baud = USB_IRDA_LS_576000;
break;
case 1152000:
- ir_baud = USB_IRDA_BR_1152000;
+ ir_baud = USB_IRDA_LS_1152000;
break;
case 4000000:
- ir_baud = USB_IRDA_BR_4000000;
+ ir_baud = USB_IRDA_LS_4000000;
break;
default:
- ir_baud = USB_IRDA_BR_9600;
+ ir_baud = USB_IRDA_LS_9600;
baud = 9600;
}
@@ -375,42 +438,22 @@ static void ir_set_termios(struct tty_struct *tty,
/*
* send the baud change out on an "empty" data packet
*/
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb)
- return;
-
transfer_buffer = kmalloc(1, GFP_KERNEL);
if (!transfer_buffer)
- goto err_buf;
+ return;
*transfer_buffer = ir_xbof | ir_baud;
- usb_fill_bulk_urb(
- urb,
- port->serial->dev,
- usb_sndbulkpipe(port->serial->dev,
- port->bulk_out_endpointAddress),
- transfer_buffer,
- 1,
- ir_set_termios_callback,
- port);
-
- urb->transfer_flags = URB_ZERO_PACKET;
-
- result = usb_submit_urb(urb, GFP_KERNEL);
- if (result) {
- dev_err(&port->dev, "%s - failed to submit urb: %d\n",
- __func__, result);
- goto err_subm;
+ ret = usb_bulk_msg(udev,
+ usb_sndbulkpipe(udev, port->bulk_out_endpointAddress),
+ transfer_buffer, 1, &actual_length, 5000);
+ if (ret || actual_length != 1) {
+ if (actual_length != 1)
+ ret = -EIO;
+ dev_err(&port->dev, "failed to change line speed: %d\n", ret);
}
- usb_free_urb(urb);
-
- return;
-err_subm:
kfree(transfer_buffer);
-err_buf:
- usb_free_urb(urb);
}
static int __init ir_init(void)
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index e66a59ef43a1..aa3dbce22cfb 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -1058,6 +1058,8 @@ static void usa49_glocont_callback(struct urb *urb)
for (i = 0; i < serial->num_ports; ++i) {
port = serial->port[i];
p_priv = usb_get_serial_port_data(port);
+ if (!p_priv)
+ continue;
if (p_priv->resend_cont) {
dev_dbg(&port->dev, "%s - sending setup\n", __func__);
@@ -1459,6 +1461,8 @@ static void usa67_glocont_callback(struct urb *urb)
for (i = 0; i < serial->num_ports; ++i) {
port = serial->port[i];
p_priv = usb_get_serial_port_data(port);
+ if (!p_priv)
+ continue;
if (p_priv->resend_cont) {
dev_dbg(&port->dev, "%s - sending setup\n", __func__);
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index cb7aac9cd9e7..0af76800bd78 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -41,6 +41,9 @@ struct opticon_private {
bool rts;
bool cts;
int outstanding_urbs;
+ int outstanding_bytes;
+
+ struct usb_anchor anchor;
};
@@ -113,7 +116,7 @@ static int send_control_msg(struct usb_serial_port *port, u8 requesttype,
retval = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
requesttype,
USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
- 0, 0, buffer, 1, 0);
+ 0, 0, buffer, 1, USB_CTRL_SET_TIMEOUT);
kfree(buffer);
if (retval < 0)
@@ -149,6 +152,15 @@ static int opticon_open(struct tty_struct *tty, struct usb_serial_port *port)
return res;
}
+static void opticon_close(struct usb_serial_port *port)
+{
+ struct opticon_private *priv = usb_get_serial_port_data(port);
+
+ usb_kill_anchored_urbs(&priv->anchor);
+
+ usb_serial_generic_close(port);
+}
+
static void opticon_write_control_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
@@ -169,6 +181,7 @@ static void opticon_write_control_callback(struct urb *urb)
spin_lock_irqsave(&priv->lock, flags);
--priv->outstanding_urbs;
+ priv->outstanding_bytes -= urb->transfer_buffer_length;
spin_unlock_irqrestore(&priv->lock, flags);
usb_serial_port_softint(port);
@@ -182,8 +195,8 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
struct urb *urb;
unsigned char *buffer;
unsigned long flags;
- int status;
struct usb_ctrlrequest *dr;
+ int ret = -ENOMEM;
spin_lock_irqsave(&priv->lock, flags);
if (priv->outstanding_urbs > URB_UPPER_LIMIT) {
@@ -192,19 +205,16 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
return 0;
}
priv->outstanding_urbs++;
+ priv->outstanding_bytes += count;
spin_unlock_irqrestore(&priv->lock, flags);
buffer = kmalloc(count, GFP_ATOMIC);
- if (!buffer) {
- count = -ENOMEM;
+ if (!buffer)
goto error_no_buffer;
- }
urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- count = -ENOMEM;
+ if (!urb)
goto error_no_urb;
- }
memcpy(buffer, buf, count);
@@ -213,10 +223,8 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
/* The connected devices do not have a bulk write endpoint,
* to transmit data to de barcode device the control endpoint is used */
dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
- if (!dr) {
- count = -ENOMEM;
+ if (!dr)
goto error_no_dr;
- }
dr->bRequestType = USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT;
dr->bRequest = 0x01;
@@ -229,13 +237,13 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
(unsigned char *)dr, buffer, count,
opticon_write_control_callback, port);
+ usb_anchor_urb(urb, &priv->anchor);
+
/* send it down the pipe */
- status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status) {
- dev_err(&port->dev,
- "%s - usb_submit_urb(write endpoint) failed status = %d\n",
- __func__, status);
- count = status;
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret) {
+ dev_err(&port->dev, "failed to submit write urb: %d\n", ret);
+ usb_unanchor_urb(urb);
goto error;
}
@@ -253,8 +261,10 @@ error_no_urb:
error_no_buffer:
spin_lock_irqsave(&priv->lock, flags);
--priv->outstanding_urbs;
+ priv->outstanding_bytes -= count;
spin_unlock_irqrestore(&priv->lock, flags);
- return count;
+
+ return ret;
}
static int opticon_write_room(struct tty_struct *tty)
@@ -279,6 +289,20 @@ static int opticon_write_room(struct tty_struct *tty)
return 2048;
}
+static int opticon_chars_in_buffer(struct tty_struct *tty)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ struct opticon_private *priv = usb_get_serial_port_data(port);
+ unsigned long flags;
+ int count;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ count = priv->outstanding_bytes;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return count;
+}
+
static int opticon_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
@@ -354,6 +378,7 @@ static int opticon_port_probe(struct usb_serial_port *port)
return -ENOMEM;
spin_lock_init(&priv->lock);
+ init_usb_anchor(&priv->anchor);
usb_set_serial_port_data(port, priv);
@@ -381,8 +406,10 @@ static struct usb_serial_driver opticon_device = {
.port_probe = opticon_port_probe,
.port_remove = opticon_port_remove,
.open = opticon_open,
+ .close = opticon_close,
.write = opticon_write,
.write_room = opticon_write_room,
+ .chars_in_buffer = opticon_chars_in_buffer,
.throttle = usb_serial_generic_throttle,
.unthrottle = usb_serial_generic_unthrottle,
.get_serial = get_serial_info,
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index e9491d400a24..084cc2fff3ae 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -248,6 +248,7 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_BG96 0x0296
#define QUECTEL_PRODUCT_EP06 0x0306
#define QUECTEL_PRODUCT_EM12 0x0512
+#define QUECTEL_PRODUCT_RM500Q 0x0800
#define CMOTECH_VENDOR_ID 0x16d8
#define CMOTECH_PRODUCT_6001 0x6001
@@ -567,6 +568,9 @@ static void option_instat_callback(struct urb *urb);
/* Interface must have two endpoints */
#define NUMEP2 BIT(16)
+/* Device needs ZLP */
+#define ZLP BIT(17)
+
static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
@@ -1101,6 +1105,11 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
+ .driver_info = ZLP },
+
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1172,6 +1181,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1102, 0xff), /* Telit ME910 (ECM) */
.driver_info = NCTRL(0) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x110a, 0xff), /* Telit ME910G1 */
+ .driver_info = NCTRL(0) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
@@ -1196,6 +1207,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */
.driver_info = NCTRL(0) },
+ { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */
+ .driver_info = NCTRL(0) | ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) },
@@ -2097,6 +2110,9 @@ static int option_attach(struct usb_serial *serial)
if (!(device_flags & NCTRL(iface_desc->bInterfaceNumber)))
data->use_send_setup = 1;
+ if (device_flags & ZLP)
+ data->use_zlp = 1;
+
spin_lock_init(&data->susp_lock);
usb_set_serial_data(serial, data);
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index a62981ca7a73..f93b81a297d6 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -841,7 +841,10 @@ static void qt2_update_msr(struct usb_serial_port *port, unsigned char *ch)
u8 newMSR = (u8) *ch;
unsigned long flags;
+ /* May be called from qt2_process_read_urb() for an unbound port. */
port_priv = usb_get_serial_port_data(port);
+ if (!port_priv)
+ return;
spin_lock_irqsave(&port_priv->lock, flags);
port_priv->shadowMSR = newMSR;
@@ -869,7 +872,10 @@ static void qt2_update_lsr(struct usb_serial_port *port, unsigned char *ch)
unsigned long flags;
u8 newLSR = (u8) *ch;
+ /* May be called from qt2_process_read_urb() for an unbound port. */
port_priv = usb_get_serial_port_data(port);
+ if (!port_priv)
+ return;
if (newLSR & UART_LSR_BI)
newLSR &= (u8) (UART_LSR_OE | UART_LSR_BI);
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index edbbb13d6de6..bd23a7cb1be2 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -86,6 +86,8 @@ DEVICE(moto_modem, MOTO_IDS);
#define MOTOROLA_TETRA_IDS() \
{ USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
{ USB_DEVICE(0x0cad, 0x9012) }, /* MTP6550 */ \
+ { USB_DEVICE(0x0cad, 0x9013) }, /* MTP3xxx */ \
+ { USB_DEVICE(0x0cad, 0x9015) }, /* MTP85xx */ \
{ USB_DEVICE(0x0cad, 0x9016) } /* TPG2200 */
DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 8f066bb55d7d..dc7a65b9ec98 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -1317,6 +1317,9 @@ static int usb_serial_register(struct usb_serial_driver *driver)
return -EINVAL;
}
+ /* Prevent individual ports from being unbound. */
+ driver->driver.suppress_bind_attrs = true;
+
usb_serial_operations_init(driver);
/* Add this device to our list of devices */
diff --git a/drivers/usb/serial/usb-wwan.h b/drivers/usb/serial/usb-wwan.h
index 1c120eaf4091..934e9361cf6b 100644
--- a/drivers/usb/serial/usb-wwan.h
+++ b/drivers/usb/serial/usb-wwan.h
@@ -38,6 +38,7 @@ struct usb_wwan_intf_private {
spinlock_t susp_lock;
unsigned int suspended:1;
unsigned int use_send_setup:1;
+ unsigned int use_zlp:1;
int in_flight;
unsigned int open_ports;
void *private;
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 7e855c87e4f7..13be21aad2f4 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -461,6 +461,7 @@ static struct urb *usb_wwan_setup_urb(struct usb_serial_port *port,
void (*callback) (struct urb *))
{
struct usb_serial *serial = port->serial;
+ struct usb_wwan_intf_private *intfdata = usb_get_serial_data(serial);
struct urb *urb;
urb = usb_alloc_urb(0, GFP_KERNEL); /* No ISO */
@@ -471,6 +472,9 @@ static struct urb *usb_wwan_setup_urb(struct usb_serial_port *port,
usb_sndbulkpipe(serial->dev, endpoint) | dir,
buf, len, callback, ctx);
+ if (intfdata->use_zlp && dir == USB_DIR_OUT)
+ urb->transfer_flags |= URB_ZERO_PACKET;
+
return urb;
}
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index 4092248a5936..0edfb89e04a8 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -188,7 +188,7 @@ static void dp_altmode_work(struct work_struct *work)
switch (dp->state) {
case DP_STATE_ENTER:
- ret = typec_altmode_enter(dp->alt);
+ ret = typec_altmode_enter(dp->alt, NULL);
if (ret)
dev_err(&dp->alt->dev, "failed to enter mode\n");
break;
@@ -306,7 +306,8 @@ err_unlock:
static int dp_altmode_activate(struct typec_altmode *alt, int activate)
{
- return activate ? typec_altmode_enter(alt) : typec_altmode_exit(alt);
+ return activate ? typec_altmode_enter(alt, NULL) :
+ typec_altmode_exit(alt);
}
static const struct typec_altmode_ops dp_altmode_ops = {
diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c
index 74cb3c2ecb34..2e45eb479386 100644
--- a/drivers/usb/typec/bus.c
+++ b/drivers/usb/typec/bus.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* Bus for USB Type-C Alternate Modes
*
* Copyright (C) 2018 Intel Corporation
@@ -10,12 +10,23 @@
#include "bus.h"
-static inline int typec_altmode_set_mux(struct altmode *alt, u8 state)
+static inline int
+typec_altmode_set_mux(struct altmode *alt, unsigned long conf, void *data)
{
- return alt->mux ? alt->mux->set(alt->mux, state) : 0;
+ struct typec_mux_state state;
+
+ if (!alt->mux)
+ return 0;
+
+ state.alt = &alt->adev;
+ state.mode = conf;
+ state.data = data;
+
+ return alt->mux->set(alt->mux, &state);
}
-static int typec_altmode_set_state(struct typec_altmode *adev, int state)
+static int typec_altmode_set_state(struct typec_altmode *adev,
+ unsigned long conf, void *data)
{
bool is_port = is_typec_port(adev->dev.parent);
struct altmode *port_altmode;
@@ -23,11 +34,11 @@ static int typec_altmode_set_state(struct typec_altmode *adev, int state)
port_altmode = is_port ? to_altmode(adev) : to_altmode(adev)->partner;
- ret = typec_altmode_set_mux(port_altmode, state);
+ ret = typec_altmode_set_mux(port_altmode, conf, data);
if (ret)
return ret;
- blocking_notifier_call_chain(&port_altmode->nh, state, NULL);
+ blocking_notifier_call_chain(&port_altmode->nh, conf, NULL);
return 0;
}
@@ -67,7 +78,7 @@ int typec_altmode_notify(struct typec_altmode *adev,
is_port = is_typec_port(adev->dev.parent);
partner = altmode->partner;
- ret = typec_altmode_set_mux(is_port ? altmode : partner, (u8)conf);
+ ret = typec_altmode_set_mux(is_port ? altmode : partner, conf, data);
if (ret)
return ret;
@@ -84,12 +95,14 @@ EXPORT_SYMBOL_GPL(typec_altmode_notify);
/**
* typec_altmode_enter - Enter Mode
* @adev: The alternate mode
+ * @vdo: VDO for the Enter Mode command
*
* The alternate mode drivers use this function to enter mode. The port drivers
* use this to inform the alternate mode drivers that the partner has initiated
- * Enter Mode command.
+ * Enter Mode command. If the alternate mode does not require VDO, @vdo must be
+ * NULL.
*/
-int typec_altmode_enter(struct typec_altmode *adev)
+int typec_altmode_enter(struct typec_altmode *adev, u32 *vdo)
{
struct altmode *partner = to_altmode(adev)->partner;
struct typec_altmode *pdev = &partner->adev;
@@ -101,13 +114,16 @@ int typec_altmode_enter(struct typec_altmode *adev)
if (!pdev->ops || !pdev->ops->enter)
return -EOPNOTSUPP;
+ if (is_typec_port(pdev->dev.parent) && !pdev->active)
+ return -EPERM;
+
/* Moving to USB Safe State */
- ret = typec_altmode_set_state(adev, TYPEC_STATE_SAFE);
+ ret = typec_altmode_set_state(adev, TYPEC_STATE_SAFE, NULL);
if (ret)
return ret;
/* Enter Mode */
- return pdev->ops->enter(pdev);
+ return pdev->ops->enter(pdev, vdo);
}
EXPORT_SYMBOL_GPL(typec_altmode_enter);
@@ -130,7 +146,7 @@ int typec_altmode_exit(struct typec_altmode *adev)
return -EOPNOTSUPP;
/* Moving to USB Safe State */
- ret = typec_altmode_set_state(adev, TYPEC_STATE_SAFE);
+ ret = typec_altmode_set_state(adev, TYPEC_STATE_SAFE, NULL);
if (ret)
return ret;
@@ -383,7 +399,7 @@ static int typec_remove(struct device *dev)
drv->remove(to_typec_altmode(dev));
if (adev->active) {
- WARN_ON(typec_altmode_set_state(adev, TYPEC_STATE_SAFE));
+ WARN_ON(typec_altmode_set_state(adev, TYPEC_STATE_SAFE, NULL));
typec_altmode_update_active(adev, false);
}
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index 91d62276b56f..7c44e930602f 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -834,6 +834,52 @@ static const struct device_type typec_cable_dev_type = {
.release = typec_cable_release,
};
+static int cable_match(struct device *dev, void *data)
+{
+ return is_typec_cable(dev);
+}
+
+/**
+ * typec_cable_get - Get a reference to the USB Type-C cable
+ * @port: The USB Type-C Port the cable is connected to
+ *
+ * The caller must decrement the reference count with typec_cable_put() after
+ * use.
+ */
+struct typec_cable *typec_cable_get(struct typec_port *port)
+{
+ struct device *dev;
+
+ dev = device_find_child(&port->dev, NULL, cable_match);
+ if (!dev)
+ return NULL;
+
+ return to_typec_cable(dev);
+}
+EXPORT_SYMBOL_GPL(typec_cable_get);
+
+/**
+ * typec_cable_get - Decrement the reference count on USB Type-C cable
+ * @cable: The USB Type-C cable
+ */
+void typec_cable_put(struct typec_cable *cable)
+{
+ put_device(&cable->dev);
+}
+EXPORT_SYMBOL_GPL(typec_cable_put);
+
+/**
+ * typec_cable_is_active - Check is the USB Type-C cable active or passive
+ * @cable: The USB Type-C Cable
+ *
+ * Return 1 if the cable is active or 0 if it's passive.
+ */
+int typec_cable_is_active(struct typec_cable *cable)
+{
+ return cable->active;
+}
+EXPORT_SYMBOL_GPL(typec_cable_is_active);
+
/**
* typec_cable_set_identity - Report result from Discover Identity command
* @cable: The cable updated identity values
@@ -1483,7 +1529,11 @@ EXPORT_SYMBOL_GPL(typec_get_orientation);
*/
int typec_set_mode(struct typec_port *port, int mode)
{
- return port->mux ? port->mux->set(port->mux, mode) : 0;
+ struct typec_mux_state state = { };
+
+ state.mode = mode;
+
+ return port->mux ? port->mux->set(port->mux, &state) : 0;
}
EXPORT_SYMBOL_GPL(typec_set_mode);
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
index 57907f26f681..5baf0f416c73 100644
--- a/drivers/usb/typec/mux.c
+++ b/drivers/usb/typec/mux.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* USB Type-C Multiplexer/DeMultiplexer Switch support
*
* Copyright (C) 2018 Intel Corporation
diff --git a/drivers/usb/typec/mux/pi3usb30532.c b/drivers/usb/typec/mux/pi3usb30532.c
index 5585b109095b..46457c133d2b 100644
--- a/drivers/usb/typec/mux/pi3usb30532.c
+++ b/drivers/usb/typec/mux/pi3usb30532.c
@@ -73,7 +73,8 @@ static int pi3usb30532_sw_set(struct typec_switch *sw,
return ret;
}
-static int pi3usb30532_mux_set(struct typec_mux *mux, int state)
+static int
+pi3usb30532_mux_set(struct typec_mux *mux, struct typec_mux_state *state)
{
struct pi3usb30532 *pi = typec_mux_get_drvdata(mux);
u8 new_conf;
@@ -82,7 +83,7 @@ static int pi3usb30532_mux_set(struct typec_mux *mux, int state)
mutex_lock(&pi->lock);
new_conf = pi->conf;
- switch (state) {
+ switch (state->mode) {
case TYPEC_STATE_SAFE:
new_conf = (new_conf & PI3USB30532_CONF_SWAP) |
PI3USB30532_CONF_OPEN;
diff --git a/drivers/usb/typec/tcpm/Kconfig b/drivers/usb/typec/tcpm/Kconfig
index 72481bbb2af3..5b986d6c801d 100644
--- a/drivers/usb/typec/tcpm/Kconfig
+++ b/drivers/usb/typec/tcpm/Kconfig
@@ -32,6 +32,7 @@ endif # TYPEC_TCPCI
config TYPEC_FUSB302
tristate "Fairchild FUSB302 Type-C chip driver"
depends on I2C
+ depends on EXTCON || !EXTCON
help
The Fairchild FUSB302 Type-C chip driver that works with
Type-C Port Controller Manager to provide USB PD and USB
diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c
index ed8655c6af8c..b498960ff72b 100644
--- a/drivers/usb/typec/tcpm/fusb302.c
+++ b/drivers/usb/typec/tcpm/fusb302.c
@@ -1666,7 +1666,7 @@ static const struct property_entry port_props[] = {
PROPERTY_ENTRY_STRING("try-power-role", "sink"),
PROPERTY_ENTRY_U32_ARRAY("source-pdos", src_pdo),
PROPERTY_ENTRY_U32_ARRAY("sink-pdos", snk_pdo),
- PROPERTY_ENTRY_U32("op-sink-microwatt", 2500),
+ PROPERTY_ENTRY_U32("op-sink-microwatt", 2500000),
{ }
};
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index c1f7073a56de..753645bb2527 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -432,20 +432,30 @@ irqreturn_t tcpci_irq(struct tcpci *tcpci)
if (status & TCPC_ALERT_RX_STATUS) {
struct pd_message msg;
- unsigned int cnt;
+ unsigned int cnt, payload_cnt;
u16 header;
regmap_read(tcpci->regmap, TCPC_RX_BYTE_CNT, &cnt);
+ /*
+ * 'cnt' corresponds to READABLE_BYTE_COUNT in section 4.4.14
+ * of the TCPCI spec [Rev 2.0 Ver 1.0 October 2017] and is
+ * defined in table 4-36 as one greater than the number of
+ * bytes received. And that number includes the header. So:
+ */
+ if (cnt > 3)
+ payload_cnt = cnt - (1 + sizeof(msg.header));
+ else
+ payload_cnt = 0;
tcpci_read16(tcpci, TCPC_RX_HDR, &header);
msg.header = cpu_to_le16(header);
- if (WARN_ON(cnt > sizeof(msg.payload)))
- cnt = sizeof(msg.payload);
+ if (WARN_ON(payload_cnt > sizeof(msg.payload)))
+ payload_cnt = sizeof(msg.payload);
- if (cnt > 0)
+ if (payload_cnt > 0)
regmap_raw_read(tcpci->regmap, TCPC_RX_DATA,
- &msg.payload, cnt);
+ &msg.payload, payload_cnt);
/* Read complete, clear RX status alert bit */
tcpci_write16(tcpci, TCPC_ALERT, TCPC_ALERT_RX_STATUS);
@@ -581,6 +591,12 @@ static int tcpci_probe(struct i2c_client *client,
static int tcpci_remove(struct i2c_client *client)
{
struct tcpci_chip *chip = i2c_get_clientdata(client);
+ int err;
+
+ /* Disable chip interrupts before unregistering port */
+ err = tcpci_write16(chip->tcpci, TCPC_ALERT_MASK, 0);
+ if (err < 0)
+ return err;
tcpci_unregister_port(chip->tcpci);
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 56fc356bc55c..f3087ef8265c 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -1475,16 +1475,16 @@ static int tcpm_validate_caps(struct tcpm_port *port, const u32 *pdo,
return 0;
}
-static int tcpm_altmode_enter(struct typec_altmode *altmode)
+static int tcpm_altmode_enter(struct typec_altmode *altmode, u32 *vdo)
{
struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
u32 header;
mutex_lock(&port->lock);
- header = VDO(altmode->svid, 1, CMD_ENTER_MODE);
+ header = VDO(altmode->svid, vdo ? 2 : 1, CMD_ENTER_MODE);
header |= VDO_OPOS(altmode->mode);
- tcpm_queue_vdm(port, header, NULL, 0);
+ tcpm_queue_vdm(port, header, vdo, vdo ? 1 : 0);
mod_delayed_work(port->wq, &port->vdm_state_machine, 0);
mutex_unlock(&port->lock);
diff --git a/drivers/usb/typec/tcpm/wcove.c b/drivers/usb/typec/tcpm/wcove.c
index edc271da14f4..9b745f432c91 100644
--- a/drivers/usb/typec/tcpm/wcove.c
+++ b/drivers/usb/typec/tcpm/wcove.c
@@ -597,7 +597,7 @@ static const struct property_entry wcove_props[] = {
PROPERTY_ENTRY_STRING("try-power-role", "sink"),
PROPERTY_ENTRY_U32_ARRAY("source-pdos", src_pdo),
PROPERTY_ENTRY_U32_ARRAY("sink-pdos", snk_pdo),
- PROPERTY_ENTRY_U32("op-sink-microwatt", 15000),
+ PROPERTY_ENTRY_U32("op-sink-microwatt", 15000000),
{ }
};
diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c
index d4d5189edfb8..0f1273ae086c 100644
--- a/drivers/usb/typec/ucsi/displayport.c
+++ b/drivers/usb/typec/ucsi/displayport.c
@@ -45,7 +45,7 @@ struct ucsi_dp {
* -EOPNOTSUPP.
*/
-static int ucsi_displayport_enter(struct typec_altmode *alt)
+static int ucsi_displayport_enter(struct typec_altmode *alt, u32 *vdo)
{
struct ucsi_dp *dp = typec_altmode_get_drvdata(alt);
struct ucsi *ucsi = dp->con->ucsi;
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 4459bc68aa33..d5a6aac86327 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -189,7 +189,7 @@ int ucsi_resume(struct ucsi *ucsi)
u64 command;
/* Restore UCSI notification enable mask after system resume */
- command = UCSI_SET_NOTIFICATION_ENABLE | UCSI_ENABLE_NTFY_ALL;
+ command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
return ucsi_send_command(ucsi, command, NULL, 0);
}
@@ -318,6 +318,82 @@ err:
return ret;
}
+static int
+ucsi_register_altmodes_nvidia(struct ucsi_connector *con, u8 recipient)
+{
+ int max_altmodes = UCSI_MAX_ALTMODES;
+ struct typec_altmode_desc desc;
+ struct ucsi_altmode alt;
+ struct ucsi_altmode orig[UCSI_MAX_ALTMODES];
+ struct ucsi_altmode updated[UCSI_MAX_ALTMODES];
+ struct ucsi *ucsi = con->ucsi;
+ bool multi_dp = false;
+ u64 command;
+ int ret;
+ int len;
+ int i;
+ int k = 0;
+
+ if (recipient == UCSI_RECIPIENT_CON)
+ max_altmodes = con->ucsi->cap.num_alt_modes;
+
+ memset(orig, 0, sizeof(orig));
+ memset(updated, 0, sizeof(updated));
+
+ /* First get all the alternate modes */
+ for (i = 0; i < max_altmodes; i++) {
+ memset(&alt, 0, sizeof(alt));
+ command = UCSI_GET_ALTERNATE_MODES;
+ command |= UCSI_GET_ALTMODE_RECIPIENT(recipient);
+ command |= UCSI_GET_ALTMODE_CONNECTOR_NUMBER(con->num);
+ command |= UCSI_GET_ALTMODE_OFFSET(i);
+ len = ucsi_run_command(con->ucsi, command, &alt, sizeof(alt));
+ /*
+ * We are collecting all altmodes first and then registering.
+ * Some type-C device will return zero length data beyond last
+ * alternate modes. We should not return if length is zero.
+ */
+ if (len < 0)
+ return len;
+
+ /* We got all altmodes, now break out and register them */
+ if (!len || !alt.svid)
+ break;
+
+ orig[k].mid = alt.mid;
+ orig[k].svid = alt.svid;
+ k++;
+ }
+ /*
+ * Update the original altmode table as some ppms may report
+ * multiple DP altmodes.
+ */
+ if (recipient == UCSI_RECIPIENT_CON)
+ multi_dp = ucsi->ops->update_altmodes(ucsi, orig, updated);
+
+ /* now register altmodes */
+ for (i = 0; i < max_altmodes; i++) {
+ memset(&desc, 0, sizeof(desc));
+ if (multi_dp && recipient == UCSI_RECIPIENT_CON) {
+ desc.svid = updated[i].svid;
+ desc.vdo = updated[i].mid;
+ } else {
+ desc.svid = orig[i].svid;
+ desc.vdo = orig[i].mid;
+ }
+ desc.roles = TYPEC_PORT_DRD;
+
+ if (!desc.svid)
+ return 0;
+
+ ret = ucsi_register_altmode(con, &desc, recipient);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int ucsi_register_altmodes(struct ucsi_connector *con, u8 recipient)
{
int max_altmodes = UCSI_MAX_ALTMODES;
@@ -336,6 +412,9 @@ static int ucsi_register_altmodes(struct ucsi_connector *con, u8 recipient)
if (recipient == UCSI_RECIPIENT_SOP && con->partner_altmode[0])
return 0;
+ if (con->ucsi->ops->update_altmodes)
+ return ucsi_register_altmodes_nvidia(con, recipient);
+
if (recipient == UCSI_RECIPIENT_CON)
max_altmodes = con->ucsi->cap.num_alt_modes;
@@ -589,6 +668,11 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num)
{
struct ucsi_connector *con = &ucsi->connector[num - 1];
+ if (!(ucsi->ntfy & UCSI_ENABLE_NTFY_CONNECTOR_CHANGE)) {
+ dev_dbg(ucsi->dev, "Bogus connector change event\n");
+ return;
+ }
+
if (!test_and_set_bit(EVENT_PENDING, &ucsi->flags))
schedule_work(&con->work);
}
@@ -656,7 +740,7 @@ static int ucsi_role_cmd(struct ucsi_connector *con, u64 command)
ucsi_reset_ppm(con->ucsi);
mutex_unlock(&con->ucsi->ppm_lock);
- c = UCSI_SET_NOTIFICATION_ENABLE | UCSI_ENABLE_NTFY_ALL;
+ c = UCSI_SET_NOTIFICATION_ENABLE | con->ucsi->ntfy;
ucsi_send_command(con->ucsi, c, NULL, 0);
ucsi_reset_connector(con, true);
@@ -890,8 +974,8 @@ int ucsi_init(struct ucsi *ucsi)
}
/* Enable basic notifications */
- command = UCSI_SET_NOTIFICATION_ENABLE;
- command |= UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR;
+ ucsi->ntfy = UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR;
+ command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
ret = ucsi_run_command(ucsi, command, NULL, 0);
if (ret < 0)
goto err_reset;
@@ -923,7 +1007,8 @@ int ucsi_init(struct ucsi *ucsi)
}
/* Enable all notifications */
- command = UCSI_SET_NOTIFICATION_ENABLE | UCSI_ENABLE_NTFY_ALL;
+ ucsi->ntfy = UCSI_ENABLE_NTFY_ALL;
+ command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
ret = ucsi_run_command(ucsi, command, NULL, 0);
if (ret < 0)
goto err_unregister;
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 8569bbd3762f..e434b9c9a9eb 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -11,6 +11,7 @@
/* -------------------------------------------------------------------------- */
struct ucsi;
+struct ucsi_altmode;
/* UCSI offsets (Bytes) */
#define UCSI_VERSION 0
@@ -35,6 +36,7 @@ struct ucsi;
* @read: Read operation
* @sync_write: Blocking write operation
* @async_write: Non-blocking write operation
+ * @update_altmodes: Squashes duplicate DP altmodes
*
* Read and write routines for UCSI interface. @sync_write must wait for the
* Command Completion Event from the PPM before returning, and @async_write must
@@ -47,6 +49,8 @@ struct ucsi_operations {
const void *val, size_t val_len);
int (*async_write)(struct ucsi *ucsi, unsigned int offset,
const void *val, size_t val_len);
+ bool (*update_altmodes)(struct ucsi *ucsi, struct ucsi_altmode *orig,
+ struct ucsi_altmode *updated);
};
struct ucsi *ucsi_create(struct device *dev, const struct ucsi_operations *ops);
@@ -82,6 +86,7 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
#define UCSI_GET_ERROR_STATUS 0x13
#define UCSI_CONNECTOR_NUMBER(_num_) ((u64)(_num_) << 16)
+#define UCSI_COMMAND(_cmd_) ((_cmd_) & 0xff)
/* CONNECTOR_RESET command bits */
#define UCSI_CONNECTOR_RESET_HARD BIT(23) /* Deprecated in v1.1 */
@@ -94,15 +99,15 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
#define UCSI_ENABLE_NTFY_CMD_COMPLETE BIT(16)
#define UCSI_ENABLE_NTFY_EXT_PWR_SRC_CHANGE BIT(17)
#define UCSI_ENABLE_NTFY_PWR_OPMODE_CHANGE BIT(18)
-#define UCSI_ENABLE_NTFY_CAP_CHANGE BIT(19)
-#define UCSI_ENABLE_NTFY_PWR_LEVEL_CHANGE BIT(20)
-#define UCSI_ENABLE_NTFY_PD_RESET_COMPLETE BIT(21)
-#define UCSI_ENABLE_NTFY_CAM_CHANGE BIT(22)
-#define UCSI_ENABLE_NTFY_BAT_STATUS_CHANGE BIT(23)
-#define UCSI_ENABLE_NTFY_PARTNER_CHANGE BIT(24)
-#define UCSI_ENABLE_NTFY_PWR_DIR_CHANGE BIT(25)
-#define UCSI_ENABLE_NTFY_CONNECTOR_CHANGE BIT(26)
-#define UCSI_ENABLE_NTFY_ERROR BIT(27)
+#define UCSI_ENABLE_NTFY_CAP_CHANGE BIT(21)
+#define UCSI_ENABLE_NTFY_PWR_LEVEL_CHANGE BIT(22)
+#define UCSI_ENABLE_NTFY_PD_RESET_COMPLETE BIT(23)
+#define UCSI_ENABLE_NTFY_CAM_CHANGE BIT(24)
+#define UCSI_ENABLE_NTFY_BAT_STATUS_CHANGE BIT(25)
+#define UCSI_ENABLE_NTFY_PARTNER_CHANGE BIT(27)
+#define UCSI_ENABLE_NTFY_PWR_DIR_CHANGE BIT(28)
+#define UCSI_ENABLE_NTFY_CONNECTOR_CHANGE BIT(30)
+#define UCSI_ENABLE_NTFY_ERROR BIT(31)
#define UCSI_ENABLE_NTFY_ALL 0xdbe70000
/* SET_UOR command bits */
@@ -140,6 +145,12 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
#define UCSI_ERROR_PPM_POLICY_CONFLICT BIT(11)
#define UCSI_ERROR_SWAP_REJECTED BIT(12)
+#define UCSI_SET_NEW_CAM_ENTER(x) (((x) >> 23) & 0x1)
+#define UCSI_SET_NEW_CAM_GET_AM(x) (((x) >> 24) & 0xff)
+#define UCSI_SET_NEW_CAM_AM_MASK (0xff << 24)
+#define UCSI_SET_NEW_CAM_SET_AM(x) (((x) & 0xff) << 24)
+#define UCSI_CMD_CONNECTOR_MASK (0x7)
+
/* Data structure filled by PPM in response to GET_CAPABILITY command. */
struct ucsi_capability {
u32 attributes;
@@ -269,6 +280,9 @@ struct ucsi {
/* PPM Communication lock */
struct mutex ppm_lock;
+ /* The latest "Notification Enable" bits (SET_NOTIFICATION_ENABLE) */
+ u64 ntfy;
+
/* PPM communication flags */
unsigned long flags;
#define EVENT_PENDING 0
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index 3f1786170098..9fc4f338e870 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -127,7 +127,7 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
return -ENODEV;
}
- /* This will make sure we can use ioremap_nocache() */
+ /* This will make sure we can use ioremap() */
status = acpi_release_memory(ACPI_HANDLE(&pdev->dev), res, 1);
if (ACPI_FAILURE(status))
return -ENOMEM;
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index 3370b3fc37b1..a5b8530490db 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/usb/typec_dp.h>
#include <asm/unaligned.h>
#include "ucsi.h"
@@ -173,6 +174,15 @@ struct ccg_resp {
u8 length;
};
+struct ucsi_ccg_altmode {
+ u16 svid;
+ u32 mid;
+ u8 linked_idx;
+ u8 active_idx;
+#define UCSI_MULTI_DP_INDEX (0xff)
+ bool checked;
+} __packed;
+
struct ucsi_ccg {
struct device *dev;
struct ucsi *ucsi;
@@ -198,6 +208,11 @@ struct ucsi_ccg {
struct work_struct pm_work;
struct completion complete;
+
+ u64 last_cmd_sent;
+ bool has_multiple_dp;
+ struct ucsi_ccg_altmode orig[UCSI_MAX_ALTMODES];
+ struct ucsi_ccg_altmode updated[UCSI_MAX_ALTMODES];
};
static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
@@ -319,12 +334,169 @@ static int ucsi_ccg_init(struct ucsi_ccg *uc)
return -ETIMEDOUT;
}
+static void ucsi_ccg_update_get_current_cam_cmd(struct ucsi_ccg *uc, u8 *data)
+{
+ u8 cam, new_cam;
+
+ cam = data[0];
+ new_cam = uc->orig[cam].linked_idx;
+ uc->updated[new_cam].active_idx = cam;
+ data[0] = new_cam;
+}
+
+static bool ucsi_ccg_update_altmodes(struct ucsi *ucsi,
+ struct ucsi_altmode *orig,
+ struct ucsi_altmode *updated)
+{
+ struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
+ struct ucsi_ccg_altmode *alt, *new_alt;
+ int i, j, k = 0;
+ bool found = false;
+
+ alt = uc->orig;
+ new_alt = uc->updated;
+ memset(uc->updated, 0, sizeof(uc->updated));
+
+ /*
+ * Copy original connector altmodes to new structure.
+ * We need this before second loop since second loop
+ * checks for duplicate altmodes.
+ */
+ for (i = 0; i < UCSI_MAX_ALTMODES; i++) {
+ alt[i].svid = orig[i].svid;
+ alt[i].mid = orig[i].mid;
+ if (!alt[i].svid)
+ break;
+ }
+
+ for (i = 0; i < UCSI_MAX_ALTMODES; i++) {
+ if (!alt[i].svid)
+ break;
+
+ /* already checked and considered */
+ if (alt[i].checked)
+ continue;
+
+ if (!DP_CONF_GET_PIN_ASSIGN(alt[i].mid)) {
+ /* Found Non DP altmode */
+ new_alt[k].svid = alt[i].svid;
+ new_alt[k].mid |= alt[i].mid;
+ new_alt[k].linked_idx = i;
+ alt[i].linked_idx = k;
+ updated[k].svid = new_alt[k].svid;
+ updated[k].mid = new_alt[k].mid;
+ k++;
+ continue;
+ }
+
+ for (j = i + 1; j < UCSI_MAX_ALTMODES; j++) {
+ if (alt[i].svid != alt[j].svid ||
+ !DP_CONF_GET_PIN_ASSIGN(alt[j].mid)) {
+ continue;
+ } else {
+ /* Found duplicate DP mode */
+ new_alt[k].svid = alt[i].svid;
+ new_alt[k].mid |= alt[i].mid | alt[j].mid;
+ new_alt[k].linked_idx = UCSI_MULTI_DP_INDEX;
+ alt[i].linked_idx = k;
+ alt[j].linked_idx = k;
+ alt[j].checked = true;
+ found = true;
+ }
+ }
+ if (found) {
+ uc->has_multiple_dp = true;
+ } else {
+ /* Didn't find any duplicate DP altmode */
+ new_alt[k].svid = alt[i].svid;
+ new_alt[k].mid |= alt[i].mid;
+ new_alt[k].linked_idx = i;
+ alt[i].linked_idx = k;
+ }
+ updated[k].svid = new_alt[k].svid;
+ updated[k].mid = new_alt[k].mid;
+ k++;
+ }
+ return found;
+}
+
+static void ucsi_ccg_update_set_new_cam_cmd(struct ucsi_ccg *uc,
+ struct ucsi_connector *con,
+ u64 *cmd)
+{
+ struct ucsi_ccg_altmode *new_port, *port;
+ struct typec_altmode *alt = NULL;
+ u8 new_cam, cam, pin;
+ bool enter_new_mode;
+ int i, j, k = 0xff;
+
+ port = uc->orig;
+ new_cam = UCSI_SET_NEW_CAM_GET_AM(*cmd);
+ new_port = &uc->updated[new_cam];
+ cam = new_port->linked_idx;
+ enter_new_mode = UCSI_SET_NEW_CAM_ENTER(*cmd);
+
+ /*
+ * If CAM is UCSI_MULTI_DP_INDEX then this is DP altmode
+ * with multiple DP mode. Find out CAM for best pin assignment
+ * among all DP mode. Priorite pin E->D->C after making sure
+ * the partner supports that pin.
+ */
+ if (cam == UCSI_MULTI_DP_INDEX) {
+ if (enter_new_mode) {
+ for (i = 0; con->partner_altmode[i]; i++) {
+ alt = con->partner_altmode[i];
+ if (alt->svid == new_port->svid)
+ break;
+ }
+ /*
+ * alt will always be non NULL since this is
+ * UCSI_SET_NEW_CAM command and so there will be
+ * at least one con->partner_altmode[i] with svid
+ * matching with new_port->svid.
+ */
+ for (j = 0; port[j].svid; j++) {
+ pin = DP_CONF_GET_PIN_ASSIGN(port[j].mid);
+ if (alt && port[j].svid == alt->svid &&
+ (pin & DP_CONF_GET_PIN_ASSIGN(alt->vdo))) {
+ /* prioritize pin E->D->C */
+ if (k == 0xff || (k != 0xff && pin >
+ DP_CONF_GET_PIN_ASSIGN(port[k].mid))
+ ) {
+ k = j;
+ }
+ }
+ }
+ cam = k;
+ new_port->active_idx = cam;
+ } else {
+ cam = new_port->active_idx;
+ }
+ }
+ *cmd &= ~UCSI_SET_NEW_CAM_AM_MASK;
+ *cmd |= UCSI_SET_NEW_CAM_SET_AM(cam);
+}
+
static int ucsi_ccg_read(struct ucsi *ucsi, unsigned int offset,
void *val, size_t val_len)
{
+ struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
+ int ret;
u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(offset);
- return ccg_read(ucsi_get_drvdata(ucsi), reg, val, val_len);
+ ret = ccg_read(uc, reg, val, val_len);
+ if (ret)
+ return ret;
+
+ if (offset == UCSI_MESSAGE_IN) {
+ if (UCSI_COMMAND(uc->last_cmd_sent) == UCSI_GET_CURRENT_CAM &&
+ uc->has_multiple_dp) {
+ ucsi_ccg_update_get_current_cam_cmd(uc, (u8 *)val);
+ }
+ uc->last_cmd_sent = 0;
+ }
+
+ return ret;
}
static int ucsi_ccg_async_write(struct ucsi *ucsi, unsigned int offset,
@@ -339,12 +511,26 @@ static int ucsi_ccg_sync_write(struct ucsi *ucsi, unsigned int offset,
const void *val, size_t val_len)
{
struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
+ struct ucsi_connector *con;
+ int con_index;
int ret;
mutex_lock(&uc->lock);
pm_runtime_get_sync(uc->dev);
set_bit(DEV_CMD_PENDING, &uc->flags);
+ if (offset == UCSI_CONTROL && val_len == sizeof(uc->last_cmd_sent)) {
+ uc->last_cmd_sent = *(u64 *)val;
+
+ if (UCSI_COMMAND(uc->last_cmd_sent) == UCSI_SET_NEW_CAM &&
+ uc->has_multiple_dp) {
+ con_index = (uc->last_cmd_sent >> 16) &
+ UCSI_CMD_CONNECTOR_MASK;
+ con = &uc->ucsi->connector[con_index - 1];
+ ucsi_ccg_update_set_new_cam_cmd(uc, con, (u64 *)val);
+ }
+ }
+
ret = ucsi_ccg_async_write(ucsi, offset, val, val_len);
if (ret)
goto err_clear_bit;
@@ -363,7 +549,8 @@ err_clear_bit:
static const struct ucsi_operations ucsi_ccg_ops = {
.read = ucsi_ccg_read,
.sync_write = ucsi_ccg_sync_write,
- .async_write = ucsi_ccg_async_write
+ .async_write = ucsi_ccg_async_write,
+ .update_altmodes = ucsi_ccg_update_altmodes
};
static irqreturn_t ccg_irq_handler(int irq, void *data)
diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
index 6532d68e8808..e4b96674c405 100644
--- a/drivers/usb/usbip/usbip_common.c
+++ b/drivers/usb/usbip/usbip_common.c
@@ -727,6 +727,9 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
copy -= recv;
ret += recv;
+
+ if (!copy)
+ break;
}
if (ret != size)
diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
index 33f8972ba842..00fc98741c5d 100644
--- a/drivers/usb/usbip/vhci_rx.c
+++ b/drivers/usb/usbip/vhci_rx.c
@@ -77,16 +77,21 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
usbip_pack_pdu(pdu, urb, USBIP_RET_SUBMIT, 0);
/* recv transfer buffer */
- if (usbip_recv_xbuff(ud, urb) < 0)
- return;
+ if (usbip_recv_xbuff(ud, urb) < 0) {
+ urb->status = -EPROTO;
+ goto error;
+ }
/* recv iso_packet_descriptor */
- if (usbip_recv_iso(ud, urb) < 0)
- return;
+ if (usbip_recv_iso(ud, urb) < 0) {
+ urb->status = -EPROTO;
+ goto error;
+ }
/* restore the padding in iso packets */
usbip_pad_iso(ud, urb);
+error:
if (usbip_dbg_flag_vhci_rx)
usbip_dump_urb(urb);
diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c
index 7570c7602ab4..8ad14e5c02bf 100644
--- a/drivers/vfio/mdev/mdev_sysfs.c
+++ b/drivers/vfio/mdev/mdev_sysfs.c
@@ -74,7 +74,7 @@ static ssize_t create_store(struct kobject *kobj, struct device *dev,
return count;
}
-MDEV_TYPE_ATTR_WO(create);
+static MDEV_TYPE_ATTR_WO(create);
static void mdev_type_release(struct kobject *kobj)
{
diff --git a/drivers/vfio/pci/vfio_pci_nvlink2.c b/drivers/vfio/pci/vfio_pci_nvlink2.c
index f2983f0f84be..df4d96038cd4 100644
--- a/drivers/vfio/pci/vfio_pci_nvlink2.c
+++ b/drivers/vfio/pci/vfio_pci_nvlink2.c
@@ -97,8 +97,10 @@ static void vfio_pci_nvgpu_release(struct vfio_pci_device *vdev,
/* If there were any mappings at all... */
if (data->mm) {
- ret = mm_iommu_put(data->mm, data->mem);
- WARN_ON(ret);
+ if (data->mem) {
+ ret = mm_iommu_put(data->mm, data->mem);
+ WARN_ON(ret);
+ }
mmdrop(data->mm);
}
@@ -159,7 +161,7 @@ static int vfio_pci_nvgpu_mmap(struct vfio_pci_device *vdev,
data->useraddr = vma->vm_start;
data->mm = current->mm;
- atomic_inc(&data->mm->mm_count);
+ mmgrab(data->mm);
ret = (int) mm_iommu_newdev(data->mm, data->useraddr,
vma_pages(vma), data->gpu_hpa, &data->mem);
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
index 0120d8324a40..a87992892a9f 100644
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -230,7 +230,7 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
switch ((u32)pos) {
case 0xa0000 ... 0xbffff:
count = min(count, (size_t)(0xc0000 - pos));
- iomem = ioremap_nocache(0xa0000, 0xbffff - 0xa0000 + 1);
+ iomem = ioremap(0xa0000, 0xbffff - 0xa0000 + 1);
off = pos - 0xa0000;
rsrc = VGA_RSRC_LEGACY_MEM;
is_ioport = false;
diff --git a/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c b/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c
index 2d2babe21b2f..abdca900802d 100644
--- a/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c
+++ b/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c
@@ -24,7 +24,7 @@
#define MDIO_AN_INT 0x8002
#define MDIO_AN_INTMASK 0x8001
-static unsigned int xmdio_read(void *ioaddr, unsigned int mmd,
+static unsigned int xmdio_read(void __iomem *ioaddr, unsigned int mmd,
unsigned int reg)
{
unsigned int mmd_address, value;
@@ -35,7 +35,7 @@ static unsigned int xmdio_read(void *ioaddr, unsigned int mmd,
return value;
}
-static void xmdio_write(void *ioaddr, unsigned int mmd,
+static void xmdio_write(void __iomem *ioaddr, unsigned int mmd,
unsigned int reg, unsigned int value)
{
unsigned int mmd_address;
@@ -54,13 +54,13 @@ static int vfio_platform_amdxgbe_reset(struct vfio_platform_device *vdev)
if (!xgmac_regs->ioaddr) {
xgmac_regs->ioaddr =
- ioremap_nocache(xgmac_regs->addr, xgmac_regs->size);
+ ioremap(xgmac_regs->addr, xgmac_regs->size);
if (!xgmac_regs->ioaddr)
return -ENOMEM;
}
if (!xpcs_regs->ioaddr) {
xpcs_regs->ioaddr =
- ioremap_nocache(xpcs_regs->addr, xpcs_regs->size);
+ ioremap(xpcs_regs->addr, xpcs_regs->size);
if (!xpcs_regs->ioaddr)
return -ENOMEM;
}
diff --git a/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c b/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c
index 16165a62b86d..96064ef8f629 100644
--- a/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c
+++ b/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c
@@ -82,7 +82,7 @@ static int vfio_platform_bcmflexrm_reset(struct vfio_platform_device *vdev)
/* Map FlexRM ring registers if not mapped */
if (!reg->ioaddr) {
- reg->ioaddr = ioremap_nocache(reg->addr, reg->size);
+ reg->ioaddr = ioremap(reg->addr, reg->size);
if (!reg->ioaddr)
return -ENOMEM;
}
diff --git a/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c b/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c
index f67bab547501..09a9453b75c5 100644
--- a/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c
+++ b/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c
@@ -52,7 +52,7 @@ static int vfio_platform_calxedaxgmac_reset(struct vfio_platform_device *vdev)
if (!reg->ioaddr) {
reg->ioaddr =
- ioremap_nocache(reg->addr, reg->size);
+ ioremap(reg->addr, reg->size);
if (!reg->ioaddr)
return -ENOMEM;
}
diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
index e8f2bdbe0542..c0771a9567fb 100644
--- a/drivers/vfio/platform/vfio_platform_common.c
+++ b/drivers/vfio/platform/vfio_platform_common.c
@@ -409,7 +409,7 @@ static ssize_t vfio_platform_read_mmio(struct vfio_platform_region *reg,
if (!reg->ioaddr) {
reg->ioaddr =
- ioremap_nocache(reg->addr, reg->size);
+ ioremap(reg->addr, reg->size);
if (!reg->ioaddr)
return -ENOMEM;
@@ -486,7 +486,7 @@ static ssize_t vfio_platform_write_mmio(struct vfio_platform_region *reg,
if (!reg->ioaddr) {
reg->ioaddr =
- ioremap_nocache(reg->addr, reg->size);
+ ioremap(reg->addr, reg->size);
if (!reg->ioaddr)
return -ENOMEM;
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 26cef65b41e7..16b3adc508db 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -79,7 +79,7 @@ static long tce_iommu_mm_set(struct tce_container *container)
}
BUG_ON(!current->mm);
container->mm = current->mm;
- atomic_inc(&container->mm->mm_count);
+ mmgrab(container->mm);
return 0;
}
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 2ada8e6cdb88..a177bf2c6683 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -309,9 +309,8 @@ static int put_pfn(unsigned long pfn, int prot)
{
if (!is_invalid_reserved_pfn(pfn)) {
struct page *page = pfn_to_page(pfn);
- if (prot & IOMMU_WRITE)
- SetPageDirty(page);
- put_page(page);
+
+ unpin_user_pages_dirty_lock(&page, 1, prot & IOMMU_WRITE);
return 1;
}
return 0;
@@ -322,7 +321,6 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
{
struct page *page[1];
struct vm_area_struct *vma;
- struct vm_area_struct *vmas[1];
unsigned int flags = 0;
int ret;
@@ -330,33 +328,14 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
flags |= FOLL_WRITE;
down_read(&mm->mmap_sem);
- if (mm == current->mm) {
- ret = get_user_pages(vaddr, 1, flags | FOLL_LONGTERM, page,
- vmas);
- } else {
- ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page,
- vmas, NULL);
- /*
- * The lifetime of a vaddr_get_pfn() page pin is
- * userspace-controlled. In the fs-dax case this could
- * lead to indefinite stalls in filesystem operations.
- * Disallow attempts to pin fs-dax pages via this
- * interface.
- */
- if (ret > 0 && vma_is_fsdax(vmas[0])) {
- ret = -EOPNOTSUPP;
- put_page(page[0]);
- }
- }
- up_read(&mm->mmap_sem);
-
+ ret = pin_user_pages_remote(NULL, mm, vaddr, 1, flags | FOLL_LONGTERM,
+ page, NULL, NULL);
if (ret == 1) {
*pfn = page_to_pfn(page[0]);
- return 0;
+ ret = 0;
+ goto done;
}
- down_read(&mm->mmap_sem);
-
vaddr = untagged_addr(vaddr);
vma = find_vma_intersection(mm, vaddr, vaddr + 1);
@@ -366,7 +345,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
if (is_invalid_reserved_pfn(*pfn))
ret = 0;
}
-
+done:
up_read(&mm->mmap_sem);
return ret;
}
diff --git a/drivers/video/backlight/ams369fg06.c b/drivers/video/backlight/ams369fg06.c
index 94ccb9042440..8a4361e95a11 100644
--- a/drivers/video/backlight/ams369fg06.c
+++ b/drivers/video/backlight/ams369fg06.c
@@ -11,7 +11,6 @@
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/fb.h>
-#include <linux/gpio.h>
#include <linux/lcd.h>
#include <linux/module.h>
#include <linux/spi/spi.h>
diff --git a/drivers/video/backlight/bd6107.c b/drivers/video/backlight/bd6107.c
index d344fb03cb86..d5d5fb457e78 100644
--- a/drivers/video/backlight/bd6107.c
+++ b/drivers/video/backlight/bd6107.c
@@ -11,7 +11,7 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/fb.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/platform_data/bd6107.h>
@@ -71,6 +71,7 @@ struct bd6107 {
struct i2c_client *client;
struct backlight_device *backlight;
struct bd6107_platform_data *pdata;
+ struct gpio_desc *reset;
};
static int bd6107_write(struct bd6107 *bd, u8 reg, u8 data)
@@ -94,9 +95,10 @@ static int bd6107_backlight_update_status(struct backlight_device *backlight)
bd6107_write(bd, BD6107_MAINCNT1, brightness);
bd6107_write(bd, BD6107_LEDCNT1, BD6107_LEDCNT1_LEDONOFF1);
} else {
- gpio_set_value(bd->pdata->reset, 0);
+ /* Assert the reset line (gpiolib will handle active low) */
+ gpiod_set_value(bd->reset, 1);
msleep(24);
- gpio_set_value(bd->pdata->reset, 1);
+ gpiod_set_value(bd->reset, 0);
}
return 0;
@@ -125,8 +127,8 @@ static int bd6107_probe(struct i2c_client *client,
struct bd6107 *bd;
int ret;
- if (pdata == NULL || !pdata->reset) {
- dev_err(&client->dev, "No reset GPIO in platform data\n");
+ if (pdata == NULL) {
+ dev_err(&client->dev, "No platform data\n");
return -EINVAL;
}
@@ -144,10 +146,16 @@ static int bd6107_probe(struct i2c_client *client,
bd->client = client;
bd->pdata = pdata;
- ret = devm_gpio_request_one(&client->dev, pdata->reset,
- GPIOF_DIR_OUT | GPIOF_INIT_LOW, "reset");
- if (ret < 0) {
+ /*
+ * Request the reset GPIO line with GPIOD_OUT_HIGH meaning asserted,
+ * so in the machine descriptor table (or other hardware description),
+ * the line should be flagged as active low so this will assert
+ * the reset.
+ */
+ bd->reset = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(bd->reset)) {
dev_err(&client->dev, "unable to request reset GPIO\n");
+ ret = PTR_ERR(bd->reset);
return ret;
}
diff --git a/drivers/video/backlight/qcom-wled.c b/drivers/video/backlight/qcom-wled.c
index d46052d8ff41..3d276b30a78c 100644
--- a/drivers/video/backlight/qcom-wled.c
+++ b/drivers/video/backlight/qcom-wled.c
@@ -956,8 +956,8 @@ static int wled_configure(struct wled *wled, int version)
struct wled_config *cfg = &wled->cfg;
struct device *dev = wled->dev;
const __be32 *prop_addr;
- u32 size, val, c, string_len;
- int rc, i, j;
+ u32 size, val, c;
+ int rc, i, j, string_len;
const struct wled_u32_opts *u32_opts = NULL;
const struct wled_u32_opts wled3_opts[] = {
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index ac3a28c08f78..3c01b0d2414f 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -93,7 +93,6 @@ config SGI_NEWPORT_CONSOLE
config DUMMY_CONSOLE
bool
- depends on VGA_CONSOLE!=y || SGI_NEWPORT_CONSOLE!=y
default y
config DUMMY_CONSOLE_COLUMNS
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index aa9541bf964b..f65991a67af2 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -2215,6 +2215,7 @@ config FB_HYPERV
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_DEFERRED_IO
+ select DMA_CMA if HAVE_DMA_CONTIGUOUS && CMA
help
This framebuffer driver supports Microsoft Hyper-V Synthetic Video.
diff --git a/drivers/video/fbdev/carminefb.c b/drivers/video/fbdev/carminefb.c
index bf3f2a9598b1..3a1c2e0739a1 100644
--- a/drivers/video/fbdev/carminefb.c
+++ b/drivers/video/fbdev/carminefb.c
@@ -633,7 +633,7 @@ static int carminefb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
ret = -EBUSY;
goto err_free_hw;
}
- hw->v_regs = ioremap_nocache(carminefb_fix.mmio_start,
+ hw->v_regs = ioremap(carminefb_fix.mmio_start,
carminefb_fix.mmio_len);
if (!hw->v_regs) {
printk(KERN_ERR "carminefb: Can't remap %s register.\n",
@@ -664,7 +664,7 @@ static int carminefb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
goto err_unmap_vregs;
}
- hw->screen_mem = ioremap_nocache(carminefb_fix.smem_start,
+ hw->screen_mem = ioremap(carminefb_fix.smem_start,
carminefb_fix.smem_len);
if (!hw->screen_mem) {
printk(KERN_ERR "carmine: Can't ioremap smem area.\n");
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index 3108a42db777..e4c3c8b65da4 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -31,6 +31,16 @@
* "set-vmvideo" command. For example
* set-vmvideo -vmname name -horizontalresolution:1920 \
* -verticalresolution:1200 -resolutiontype single
+ *
+ * Gen 1 VMs also support direct using VM's physical memory for framebuffer.
+ * It could improve the efficiency and performance for framebuffer and VM.
+ * This requires to allocate contiguous physical memory from Linux kernel's
+ * CMA memory allocator. To enable this, supply a kernel parameter to give
+ * enough memory space to CMA allocator for framebuffer. For example:
+ * cma=130m
+ * This gives 130MB memory to CMA allocator that can be allocated to
+ * framebuffer. For reference, 8K resolution (7680x4320) takes about
+ * 127MB memory.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -228,7 +238,6 @@ struct synthvid_msg {
} __packed;
-
/* FB driver definitions and structures */
#define HVFB_WIDTH 1152 /* default screen width */
#define HVFB_HEIGHT 864 /* default screen height */
@@ -258,12 +267,15 @@ struct hvfb_par {
/* If true, the VSC notifies the VSP on every framebuffer change */
bool synchronous_fb;
+ /* If true, need to copy from deferred IO mem to framebuffer mem */
+ bool need_docopy;
+
struct notifier_block hvfb_panic_nb;
/* Memory for deferred IO and frame buffer itself */
unsigned char *dio_vp;
unsigned char *mmio_vp;
- unsigned long mmio_pp;
+ phys_addr_t mmio_pp;
/* Dirty rectangle, protected by delayed_refresh_lock */
int x1, y1, x2, y2;
@@ -434,7 +446,7 @@ static void synthvid_deferred_io(struct fb_info *p,
maxy = max_t(int, maxy, y2);
/* Copy from dio space to mmio address */
- if (par->fb_ready)
+ if (par->fb_ready && par->need_docopy)
hvfb_docopy(par, start, PAGE_SIZE);
}
@@ -751,12 +763,12 @@ static void hvfb_update_work(struct work_struct *w)
return;
/* Copy the dirty rectangle to frame buffer memory */
- for (j = y1; j < y2; j++) {
- hvfb_docopy(par,
- j * info->fix.line_length +
- (x1 * screen_depth / 8),
- (x2 - x1) * screen_depth / 8);
- }
+ if (par->need_docopy)
+ for (j = y1; j < y2; j++)
+ hvfb_docopy(par,
+ j * info->fix.line_length +
+ (x1 * screen_depth / 8),
+ (x2 - x1) * screen_depth / 8);
/* Refresh */
if (par->fb_ready && par->update)
@@ -801,7 +813,8 @@ static int hvfb_on_panic(struct notifier_block *nb,
par = container_of(nb, struct hvfb_par, hvfb_panic_nb);
par->synchronous_fb = true;
info = par->info;
- hvfb_docopy(par, 0, dio_fb_size);
+ if (par->need_docopy)
+ hvfb_docopy(par, 0, dio_fb_size);
synthvid_update(info, 0, 0, INT_MAX, INT_MAX);
return NOTIFY_DONE;
@@ -940,6 +953,62 @@ static void hvfb_get_option(struct fb_info *info)
return;
}
+/*
+ * Allocate enough contiguous physical memory.
+ * Return physical address if succeeded or -1 if failed.
+ */
+static phys_addr_t hvfb_get_phymem(struct hv_device *hdev,
+ unsigned int request_size)
+{
+ struct page *page = NULL;
+ dma_addr_t dma_handle;
+ void *vmem;
+ phys_addr_t paddr = 0;
+ unsigned int order = get_order(request_size);
+
+ if (request_size == 0)
+ return -1;
+
+ if (order < MAX_ORDER) {
+ /* Call alloc_pages if the size is less than 2^MAX_ORDER */
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!page)
+ return -1;
+
+ paddr = (page_to_pfn(page) << PAGE_SHIFT);
+ } else {
+ /* Allocate from CMA */
+ hdev->device.coherent_dma_mask = DMA_BIT_MASK(64);
+
+ vmem = dma_alloc_coherent(&hdev->device,
+ round_up(request_size, PAGE_SIZE),
+ &dma_handle,
+ GFP_KERNEL | __GFP_NOWARN);
+
+ if (!vmem)
+ return -1;
+
+ paddr = virt_to_phys(vmem);
+ }
+
+ return paddr;
+}
+
+/* Release contiguous physical memory */
+static void hvfb_release_phymem(struct hv_device *hdev,
+ phys_addr_t paddr, unsigned int size)
+{
+ unsigned int order = get_order(size);
+
+ if (order < MAX_ORDER)
+ __free_pages(pfn_to_page(paddr >> PAGE_SHIFT), order);
+ else
+ dma_free_coherent(&hdev->device,
+ round_up(size, PAGE_SIZE),
+ phys_to_virt(paddr),
+ paddr);
+}
+
/* Get framebuffer memory from Hyper-V video pci space */
static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
@@ -949,22 +1018,61 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
void __iomem *fb_virt;
int gen2vm = efi_enabled(EFI_BOOT);
resource_size_t pot_start, pot_end;
+ phys_addr_t paddr;
int ret;
- dio_fb_size =
- screen_width * screen_height * screen_depth / 8;
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures)
+ return -ENOMEM;
- if (gen2vm) {
- pot_start = 0;
- pot_end = -1;
- } else {
+ if (!gen2vm) {
pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
- PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
+ PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
if (!pdev) {
pr_err("Unable to find PCI Hyper-V video\n");
+ kfree(info->apertures);
return -ENODEV;
}
+ info->apertures->ranges[0].base = pci_resource_start(pdev, 0);
+ info->apertures->ranges[0].size = pci_resource_len(pdev, 0);
+
+ /*
+ * For Gen 1 VM, we can directly use the contiguous memory
+ * from VM. If we succeed, deferred IO happens directly
+ * on this allocated framebuffer memory, avoiding extra
+ * memory copy.
+ */
+ paddr = hvfb_get_phymem(hdev, screen_fb_size);
+ if (paddr != (phys_addr_t) -1) {
+ par->mmio_pp = paddr;
+ par->mmio_vp = par->dio_vp = __va(paddr);
+
+ info->fix.smem_start = paddr;
+ info->fix.smem_len = screen_fb_size;
+ info->screen_base = par->mmio_vp;
+ info->screen_size = screen_fb_size;
+
+ par->need_docopy = false;
+ goto getmem_done;
+ }
+ pr_info("Unable to allocate enough contiguous physical memory on Gen 1 VM. Using MMIO instead.\n");
+ } else {
+ info->apertures->ranges[0].base = screen_info.lfb_base;
+ info->apertures->ranges[0].size = screen_info.lfb_size;
+ }
+
+ /*
+ * Cannot use the contiguous physical memory.
+ * Allocate mmio space for framebuffer.
+ */
+ dio_fb_size =
+ screen_width * screen_height * screen_depth / 8;
+
+ if (gen2vm) {
+ pot_start = 0;
+ pot_end = -1;
+ } else {
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
pci_resource_len(pdev, 0) < screen_fb_size) {
pr_err("Resource not available or (0x%lx < 0x%lx)\n",
@@ -993,20 +1101,6 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
if (par->dio_vp == NULL)
goto err3;
- info->apertures = alloc_apertures(1);
- if (!info->apertures)
- goto err4;
-
- if (gen2vm) {
- info->apertures->ranges[0].base = screen_info.lfb_base;
- info->apertures->ranges[0].size = screen_info.lfb_size;
- remove_conflicting_framebuffers(info->apertures,
- KBUILD_MODNAME, false);
- } else {
- info->apertures->ranges[0].base = pci_resource_start(pdev, 0);
- info->apertures->ranges[0].size = pci_resource_len(pdev, 0);
- }
-
/* Physical address of FB device */
par->mmio_pp = par->mem->start;
/* Virtual address of FB device */
@@ -1017,13 +1111,15 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
info->screen_base = par->dio_vp;
info->screen_size = dio_fb_size;
+getmem_done:
+ remove_conflicting_framebuffers(info->apertures,
+ KBUILD_MODNAME, false);
if (!gen2vm)
pci_dev_put(pdev);
+ kfree(info->apertures);
return 0;
-err4:
- vfree(par->dio_vp);
err3:
iounmap(fb_virt);
err2:
@@ -1032,18 +1128,25 @@ err2:
err1:
if (!gen2vm)
pci_dev_put(pdev);
+ kfree(info->apertures);
return -ENOMEM;
}
/* Release the framebuffer */
-static void hvfb_putmem(struct fb_info *info)
+static void hvfb_putmem(struct hv_device *hdev, struct fb_info *info)
{
struct hvfb_par *par = info->par;
- vfree(par->dio_vp);
- iounmap(info->screen_base);
- vmbus_free_mmio(par->mem->start, screen_fb_size);
+ if (par->need_docopy) {
+ vfree(par->dio_vp);
+ iounmap(info->screen_base);
+ vmbus_free_mmio(par->mem->start, screen_fb_size);
+ } else {
+ hvfb_release_phymem(hdev, info->fix.smem_start,
+ screen_fb_size);
+ }
+
par->mem = NULL;
}
@@ -1062,6 +1165,7 @@ static int hvfb_probe(struct hv_device *hdev,
par = info->par;
par->info = info;
par->fb_ready = false;
+ par->need_docopy = true;
init_completion(&par->wait);
INIT_DELAYED_WORK(&par->dwork, hvfb_update_work);
@@ -1147,7 +1251,7 @@ static int hvfb_probe(struct hv_device *hdev,
error:
fb_deferred_io_cleanup(info);
- hvfb_putmem(info);
+ hvfb_putmem(hdev, info);
error2:
vmbus_close(hdev->channel);
error1:
@@ -1177,7 +1281,7 @@ static int hvfb_remove(struct hv_device *hdev)
vmbus_close(hdev->channel);
hv_set_drvdata(hdev, NULL);
- hvfb_putmem(info);
+ hvfb_putmem(hdev, info);
framebuffer_release(info);
return 0;
@@ -1194,6 +1298,7 @@ static int hvfb_suspend(struct hv_device *hdev)
fb_set_suspend(info, 1);
cancel_delayed_work_sync(&par->dwork);
+ cancel_delayed_work_sync(&info->deferred_work);
par->update_saved = par->update;
par->update = false;
@@ -1227,6 +1332,7 @@ static int hvfb_resume(struct hv_device *hdev)
par->fb_ready = true;
par->update = par->update_saved;
+ schedule_delayed_work(&info->deferred_work, info->fbdefio->delay);
schedule_delayed_work(&par->dwork, HVFB_UPDATE_DELAY);
/* 0 means do resume */
diff --git a/drivers/video/fbdev/i810/i810_main.c b/drivers/video/fbdev/i810/i810_main.c
index d18f7b31932c..aa7583d963ac 100644
--- a/drivers/video/fbdev/i810/i810_main.c
+++ b/drivers/video/fbdev/i810/i810_main.c
@@ -1883,7 +1883,7 @@ static int i810_allocate_pci_resource(struct i810fb_par *par,
}
par->res_flags |= MMIO_REQ;
- par->mmio_start_virtual = ioremap_nocache(par->mmio_start_phys,
+ par->mmio_start_virtual = ioremap(par->mmio_start_phys,
MMIO_SIZE);
if (!par->mmio_start_virtual) {
printk("i810fb_init: cannot remap mmio region\n");
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index 08a17eb2a5c7..370bf2553d43 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -1017,7 +1017,7 @@ static int imxfb_probe(struct platform_device *pdev)
}
fbi->lcd_pwr = devm_regulator_get(&pdev->dev, "lcd");
- if (IS_ERR(fbi->lcd_pwr) && (PTR_ERR(fbi->lcd_pwr) == -EPROBE_DEFER)) {
+ if (PTR_ERR(fbi->lcd_pwr) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto failed_lcd;
}
diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c
index c744891781a5..a9579964eaba 100644
--- a/drivers/video/fbdev/intelfb/intelfbdrv.c
+++ b/drivers/video/fbdev/intelfb/intelfbdrv.c
@@ -654,7 +654,7 @@ static int intelfb_pci_register(struct pci_dev *pdev,
}
dinfo->mmio_base =
- (u8 __iomem *)ioremap_nocache(dinfo->mmio_base_phys,
+ (u8 __iomem *)ioremap(dinfo->mmio_base_phys,
INTEL_REG_SIZE);
if (!dinfo->mmio_base) {
ERR_MSG("Cannot remap MMIO region.\n");
diff --git a/drivers/video/fbdev/kyro/fbdev.c b/drivers/video/fbdev/kyro/fbdev.c
index 72dff2e42e3a..8fbde92ae8b9 100644
--- a/drivers/video/fbdev/kyro/fbdev.c
+++ b/drivers/video/fbdev/kyro/fbdev.c
@@ -683,7 +683,7 @@ static int kyrofb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
kyro_fix.mmio_len = pci_resource_len(pdev, 1);
currentpar->regbase = deviceInfo.pSTGReg =
- ioremap_nocache(kyro_fix.mmio_start, kyro_fix.mmio_len);
+ ioremap(kyro_fix.mmio_start, kyro_fix.mmio_len);
if (!currentpar->regbase)
goto out_free_fb;
diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c
index 1a555f70923a..36cc718b96ae 100644
--- a/drivers/video/fbdev/matrox/matroxfb_base.c
+++ b/drivers/video/fbdev/matrox/matroxfb_base.c
@@ -1710,7 +1710,7 @@ static int initMatrox2(struct matrox_fb_info *minfo, struct board *b)
memsize = mem;
err = -ENOMEM;
- minfo->mmio.vbase.vaddr = ioremap_nocache(ctrlptr_phys, 16384);
+ minfo->mmio.vbase.vaddr = ioremap(ctrlptr_phys, 16384);
if (!minfo->mmio.vbase.vaddr) {
printk(KERN_ERR "matroxfb: cannot ioremap(%lX, 16384), matroxfb disabled\n", ctrlptr_phys);
goto failVideoMR;
diff --git a/drivers/video/fbdev/mbx/mbxfb.c b/drivers/video/fbdev/mbx/mbxfb.c
index 25dad4507d77..6dc287c819cb 100644
--- a/drivers/video/fbdev/mbx/mbxfb.c
+++ b/drivers/video/fbdev/mbx/mbxfb.c
@@ -938,7 +938,7 @@ static int mbxfb_probe(struct platform_device *dev)
}
mfbi->reg_phys_addr = mfbi->reg_res->start;
- mfbi->reg_virt_addr = devm_ioremap_nocache(&dev->dev,
+ mfbi->reg_virt_addr = devm_ioremap(&dev->dev,
mfbi->reg_phys_addr,
res_size(mfbi->reg_req));
if (!mfbi->reg_virt_addr) {
@@ -948,7 +948,7 @@ static int mbxfb_probe(struct platform_device *dev)
}
virt_base_2700 = mfbi->reg_virt_addr;
- mfbi->fb_virt_addr = devm_ioremap_nocache(&dev->dev, mfbi->fb_phys_addr,
+ mfbi->fb_virt_addr = devm_ioremap(&dev->dev, mfbi->fb_phys_addr,
res_size(mfbi->fb_req));
if (!mfbi->fb_virt_addr) {
dev_err(&dev->dev, "failed to ioremap frame buffer\n");
diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
index 88c137f5c729..061a105afb86 100644
--- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
+++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
@@ -493,7 +493,7 @@ static int mmphw_probe(struct platform_device *pdev)
goto failed;
}
- ctrl->reg_base = devm_ioremap_nocache(ctrl->dev,
+ ctrl->reg_base = devm_ioremap(ctrl->dev,
res->start, resource_size(res));
if (ctrl->reg_base == NULL) {
dev_err(ctrl->dev, "%s: res %pR map failed\n", __func__, res);
diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c
index 8f933bad461c..fe2cadeb1b66 100644
--- a/drivers/video/fbdev/pm2fb.c
+++ b/drivers/video/fbdev/pm2fb.c
@@ -1563,7 +1563,7 @@ static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_exit_neither;
}
default_par->v_regs =
- ioremap_nocache(pm2fb_fix.mmio_start, pm2fb_fix.mmio_len);
+ ioremap(pm2fb_fix.mmio_start, pm2fb_fix.mmio_len);
if (!default_par->v_regs) {
printk(KERN_WARNING "pm2fb: Can't remap %s register area.\n",
pm2fb_fix.id);
diff --git a/drivers/video/fbdev/pm3fb.c b/drivers/video/fbdev/pm3fb.c
index 15a2b495235b..2f5e23c8f8ec 100644
--- a/drivers/video/fbdev/pm3fb.c
+++ b/drivers/video/fbdev/pm3fb.c
@@ -1236,7 +1236,7 @@ static unsigned long pm3fb_size_memory(struct pm3_par *par)
return 0;
}
screen_mem =
- ioremap_nocache(pm3fb_fix.smem_start, pm3fb_fix.smem_len);
+ ioremap(pm3fb_fix.smem_start, pm3fb_fix.smem_len);
if (!screen_mem) {
printk(KERN_WARNING "pm3fb: Can't ioremap smem area.\n");
release_mem_region(pm3fb_fix.smem_start, pm3fb_fix.smem_len);
@@ -1347,7 +1347,7 @@ static int pm3fb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
goto err_exit_neither;
}
par->v_regs =
- ioremap_nocache(pm3fb_fix.mmio_start, pm3fb_fix.mmio_len);
+ ioremap(pm3fb_fix.mmio_start, pm3fb_fix.mmio_len);
if (!par->v_regs) {
printk(KERN_WARNING "pm3fb: Can't remap %s register area.\n",
pm3fb_fix.id);
diff --git a/drivers/video/fbdev/pmag-aa-fb.c b/drivers/video/fbdev/pmag-aa-fb.c
index 7b33b52b2526..62c8de99af0b 100644
--- a/drivers/video/fbdev/pmag-aa-fb.c
+++ b/drivers/video/fbdev/pmag-aa-fb.c
@@ -188,7 +188,7 @@ static int pmagaafb_probe(struct device *dev)
/* MMIO mapping setup. */
info->fix.mmio_start = start + PMAG_AA_BT455_OFFSET;
- par->mmio = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len);
+ par->mmio = ioremap(info->fix.mmio_start, info->fix.mmio_len);
if (!par->mmio) {
printk(KERN_ERR "%s: Cannot map MMIO\n", dev_name(dev));
err = -ENOMEM;
@@ -199,7 +199,7 @@ static int pmagaafb_probe(struct device *dev)
/* Frame buffer mapping setup. */
info->fix.smem_start = start + PMAG_AA_ONBOARD_FBMEM_OFFSET;
- info->screen_base = ioremap_nocache(info->fix.smem_start,
+ info->screen_base = ioremap(info->fix.smem_start,
info->fix.smem_len);
if (!info->screen_base) {
printk(KERN_ERR "%s: Cannot map FB\n", dev_name(dev));
diff --git a/drivers/video/fbdev/pmag-ba-fb.c b/drivers/video/fbdev/pmag-ba-fb.c
index a60e56d8980b..1296f9b370c2 100644
--- a/drivers/video/fbdev/pmag-ba-fb.c
+++ b/drivers/video/fbdev/pmag-ba-fb.c
@@ -180,7 +180,7 @@ static int pmagbafb_probe(struct device *dev)
/* MMIO mapping setup. */
info->fix.mmio_start = start;
- par->mmio = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len);
+ par->mmio = ioremap(info->fix.mmio_start, info->fix.mmio_len);
if (!par->mmio) {
printk(KERN_ERR "%s: Cannot map MMIO\n", dev_name(dev));
err = -ENOMEM;
@@ -190,7 +190,7 @@ static int pmagbafb_probe(struct device *dev)
/* Frame buffer mapping setup. */
info->fix.smem_start = start + PMAG_BA_FBMEM;
- info->screen_base = ioremap_nocache(info->fix.smem_start,
+ info->screen_base = ioremap(info->fix.smem_start,
info->fix.smem_len);
if (!info->screen_base) {
printk(KERN_ERR "%s: Cannot map FB\n", dev_name(dev));
diff --git a/drivers/video/fbdev/pmagb-b-fb.c b/drivers/video/fbdev/pmagb-b-fb.c
index f02080f21077..9dccd51ee65a 100644
--- a/drivers/video/fbdev/pmagb-b-fb.c
+++ b/drivers/video/fbdev/pmagb-b-fb.c
@@ -287,7 +287,7 @@ static int pmagbbfb_probe(struct device *dev)
/* MMIO mapping setup. */
info->fix.mmio_start = start;
- par->mmio = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len);
+ par->mmio = ioremap(info->fix.mmio_start, info->fix.mmio_len);
if (!par->mmio) {
printk(KERN_ERR "%s: Cannot map MMIO\n", dev_name(dev));
err = -ENOMEM;
@@ -298,7 +298,7 @@ static int pmagbbfb_probe(struct device *dev)
/* Frame buffer mapping setup. */
info->fix.smem_start = start + PMAGB_B_FBMEM;
- par->smem = ioremap_nocache(info->fix.smem_start, info->fix.smem_len);
+ par->smem = ioremap(info->fix.smem_start, info->fix.smem_len);
if (!par->smem) {
printk(KERN_ERR "%s: Cannot map FB\n", dev_name(dev));
err = -ENOMEM;
diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
index fcb16eabbb0d..f18d457175d9 100644
--- a/drivers/video/fbdev/pvr2fb.c
+++ b/drivers/video/fbdev/pvr2fb.c
@@ -770,7 +770,7 @@ static int __maybe_unused pvr2fb_common_init(void)
struct pvr2fb_par *par = currentpar;
unsigned long modememused, rev;
- fb_info->screen_base = ioremap_nocache(pvr2_fix.smem_start,
+ fb_info->screen_base = ioremap(pvr2_fix.smem_start,
pvr2_fix.smem_len);
if (!fb_info->screen_base) {
@@ -778,7 +778,7 @@ static int __maybe_unused pvr2fb_common_init(void)
goto out_err;
}
- par->mmio_base = ioremap_nocache(pvr2_fix.mmio_start,
+ par->mmio_base = ioremap(pvr2_fix.mmio_start,
pvr2_fix.mmio_len);
if (!par->mmio_base) {
printk(KERN_ERR "pvr2fb: Failed to remap mmio space\n");
diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
index 3679698fffd1..706c69430818 100644
--- a/drivers/video/fbdev/pxa168fb.c
+++ b/drivers/video/fbdev/pxa168fb.c
@@ -665,7 +665,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
/*
* Map LCD controller registers.
*/
- fbi->reg_base = devm_ioremap_nocache(&pdev->dev, res->start,
+ fbi->reg_base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (fbi->reg_base == NULL) {
ret = -ENOMEM;
diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
index c23defa27036..eaea8c373753 100644
--- a/drivers/video/fbdev/s1d13xxxfb.c
+++ b/drivers/video/fbdev/s1d13xxxfb.c
@@ -809,8 +809,8 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, info);
default_par = info->par;
- default_par->regs = ioremap_nocache(pdev->resource[1].start,
- resource_size(&pdev->resource[1]));
+ default_par->regs = ioremap(pdev->resource[1].start,
+ resource_size(&pdev->resource[1]));
if (!default_par->regs) {
printk(KERN_ERR PFX "unable to map registers\n");
ret = -ENOMEM;
@@ -818,8 +818,9 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
}
info->pseudo_palette = default_par->pseudo_palette;
- info->screen_base = ioremap_nocache(pdev->resource[0].start,
- resource_size(&pdev->resource[0]));
+ info->screen_base = ioremap(pdev->resource[0].start,
+ resource_size(&pdev->resource[0]));
+
if (!info->screen_base) {
printk(KERN_ERR PFX "unable to map framebuffer\n");
ret = -ENOMEM;
diff --git a/drivers/video/fbdev/sh7760fb.c b/drivers/video/fbdev/sh7760fb.c
index b8ec8d9bb4c8..5978a8921232 100644
--- a/drivers/video/fbdev/sh7760fb.c
+++ b/drivers/video/fbdev/sh7760fb.c
@@ -463,7 +463,7 @@ static int sh7760fb_probe(struct platform_device *pdev)
goto out_fb;
}
- par->base = ioremap_nocache(res->start, resource_size(res));
+ par->base = ioremap(res->start, resource_size(res));
if (!par->base) {
dev_err(&pdev->dev, "cannot remap\n");
ret = -ENODEV;
diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
index 25f119efebc0..4ea6f932b334 100644
--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
@@ -2588,7 +2588,7 @@ static int sh_mobile_lcdc_probe(struct platform_device *pdev)
if (num_channels == 2)
priv->forced_fourcc = pdata->ch[0].fourcc;
- priv->base = ioremap_nocache(res->start, resource_size(res));
+ priv->base = ioremap(res->start, resource_size(res));
if (!priv->base) {
error = -ENOMEM;
goto err1;
diff --git a/drivers/video/fbdev/sstfb.c b/drivers/video/fbdev/sstfb.c
index 82e707a48e4e..afe6d1b7c3a0 100644
--- a/drivers/video/fbdev/sstfb.c
+++ b/drivers/video/fbdev/sstfb.c
@@ -1363,14 +1363,14 @@ static int sstfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto fail_fb_mem;
}
- par->mmio_vbase = ioremap_nocache(fix->mmio_start,
+ par->mmio_vbase = ioremap(fix->mmio_start,
fix->mmio_len);
if (!par->mmio_vbase) {
printk(KERN_ERR "sstfb: cannot remap register area %#lx\n",
fix->mmio_start);
goto fail_mmio_remap;
}
- info->screen_base = ioremap_nocache(fix->smem_start, 0x400000);
+ info->screen_base = ioremap(fix->smem_start, 0x400000);
if (!info->screen_base) {
printk(KERN_ERR "sstfb: cannot remap framebuffer %#lx\n",
fix->smem_start);
diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
index 5400ab79fbbd..de953ddb6312 100644
--- a/drivers/video/fbdev/stifb.c
+++ b/drivers/video/fbdev/stifb.c
@@ -1198,7 +1198,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
case S9000_ID_TOMCAT: /* Dual CRX, behaves else like a CRX */
/* FIXME: TomCat supports two heads:
* fb.iobase = REGION_BASE(fb_info,3);
- * fb.screen_base = ioremap_nocache(REGION_BASE(fb_info,2),xxx);
+ * fb.screen_base = ioremap(REGION_BASE(fb_info,2),xxx);
* for now we only support the left one ! */
xres = fb->ngle_rom.x_size_visible;
yres = fb->ngle_rom.y_size_visible;
@@ -1291,7 +1291,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
strcpy(fix->id, "stifb");
info->fbops = &stifb_ops;
- info->screen_base = ioremap_nocache(REGION_BASE(fb,1), fix->smem_len);
+ info->screen_base = ioremap(REGION_BASE(fb,1), fix->smem_len);
if (!info->screen_base) {
printk(KERN_ERR "stifb: failed to map memory\n");
goto out_err0;
diff --git a/drivers/video/fbdev/tdfxfb.c b/drivers/video/fbdev/tdfxfb.c
index 0349cf69652f..f73e26c18c09 100644
--- a/drivers/video/fbdev/tdfxfb.c
+++ b/drivers/video/fbdev/tdfxfb.c
@@ -1417,7 +1417,7 @@ static int tdfxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
default_par->regbase_virt =
- ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len);
+ ioremap(info->fix.mmio_start, info->fix.mmio_len);
if (!default_par->regbase_virt) {
printk(KERN_ERR "fb: Can't remap %s register area.\n",
info->fix.id);
diff --git a/drivers/video/fbdev/tgafb.c b/drivers/video/fbdev/tgafb.c
index 7cd262858241..e9869135d833 100644
--- a/drivers/video/fbdev/tgafb.c
+++ b/drivers/video/fbdev/tgafb.c
@@ -1438,7 +1438,7 @@ static int tgafb_register(struct device *dev)
}
/* Map the framebuffer. */
- mem_base = ioremap_nocache(bar0_start, bar0_len);
+ mem_base = ioremap(bar0_start, bar0_len);
if (!mem_base) {
printk(KERN_ERR "tgafb: Cannot map MMIO\n");
goto err1;
diff --git a/drivers/video/fbdev/tridentfb.c b/drivers/video/fbdev/tridentfb.c
index 715509917eaa..4d20cb557ff0 100644
--- a/drivers/video/fbdev/tridentfb.c
+++ b/drivers/video/fbdev/tridentfb.c
@@ -1556,7 +1556,7 @@ static int trident_pci_probe(struct pci_dev *dev,
return -1;
}
- default_par->io_virt = ioremap_nocache(tridentfb_fix.mmio_start,
+ default_par->io_virt = ioremap(tridentfb_fix.mmio_start,
tridentfb_fix.mmio_len);
if (!default_par->io_virt) {
@@ -1579,7 +1579,7 @@ static int trident_pci_probe(struct pci_dev *dev,
goto out_unmap1;
}
- info->screen_base = ioremap_nocache(tridentfb_fix.smem_start,
+ info->screen_base = ioremap(tridentfb_fix.smem_start,
tridentfb_fix.smem_len);
if (!info->screen_base) {
diff --git a/drivers/video/fbdev/valkyriefb.c b/drivers/video/fbdev/valkyriefb.c
index ca8d7343aaa1..4d20c4603e5a 100644
--- a/drivers/video/fbdev/valkyriefb.c
+++ b/drivers/video/fbdev/valkyriefb.c
@@ -356,7 +356,7 @@ int __init valkyriefb_init(void)
p->total_vram = 0x100000;
p->frame_buffer_phys = frame_buffer_phys;
#ifdef CONFIG_MAC
- p->frame_buffer = ioremap_nocache(frame_buffer_phys, p->total_vram);
+ p->frame_buffer = ioremap(frame_buffer_phys, p->total_vram);
#else
p->frame_buffer = ioremap_wt(frame_buffer_phys, p->total_vram);
#endif
diff --git a/drivers/video/fbdev/vermilion/cr_pll.c b/drivers/video/fbdev/vermilion/cr_pll.c
index c1e3738e6789..79d42b23d850 100644
--- a/drivers/video/fbdev/vermilion/cr_pll.c
+++ b/drivers/video/fbdev/vermilion/cr_pll.c
@@ -159,7 +159,7 @@ static int __init cr_pll_init(void)
pci_read_config_dword(mch_dev, CRVML_REG_MCHBAR,
&mch_bar);
mch_regs_base =
- ioremap_nocache(mch_bar, CRVML_MCHMAP_SIZE);
+ ioremap(mch_bar, CRVML_MCHMAP_SIZE);
if (!mch_regs_base) {
printk(KERN_ERR
"Carillo Ranch MCH device was not enabled.\n");
diff --git a/drivers/video/fbdev/vermilion/vermilion.c b/drivers/video/fbdev/vermilion/vermilion.c
index 498038a964ee..ff61605b8764 100644
--- a/drivers/video/fbdev/vermilion/vermilion.c
+++ b/drivers/video/fbdev/vermilion/vermilion.c
@@ -317,7 +317,7 @@ static int vmlfb_enable_mmio(struct vml_par *par)
": Could not claim display controller MMIO.\n");
return -EBUSY;
}
- par->vdc_mem = ioremap_nocache(par->vdc_mem_base, par->vdc_mem_size);
+ par->vdc_mem = ioremap(par->vdc_mem_base, par->vdc_mem_size);
if (par->vdc_mem == NULL) {
printk(KERN_ERR MODULE_NAME
": Could not map display controller MMIO.\n");
@@ -332,7 +332,7 @@ static int vmlfb_enable_mmio(struct vml_par *par)
err = -EBUSY;
goto out_err_1;
}
- par->gpu_mem = ioremap_nocache(par->gpu_mem_base, par->gpu_mem_size);
+ par->gpu_mem = ioremap(par->gpu_mem_base, par->gpu_mem_size);
if (par->gpu_mem == NULL) {
printk(KERN_ERR MODULE_NAME ": Could not map GPU MMIO.\n");
err = -ENOMEM;
diff --git a/drivers/video/fbdev/via/via-core.c b/drivers/video/fbdev/via/via-core.c
index ffa2ca2d3f5e..703ddee9a244 100644
--- a/drivers/video/fbdev/via/via-core.c
+++ b/drivers/video/fbdev/via/via-core.c
@@ -442,7 +442,7 @@ static int via_pci_setup_mmio(struct viafb_dev *vdev)
*/
vdev->engine_start = pci_resource_start(vdev->pdev, 1);
vdev->engine_len = pci_resource_len(vdev->pdev, 1);
- vdev->engine_mmio = ioremap_nocache(vdev->engine_start,
+ vdev->engine_mmio = ioremap(vdev->engine_start,
vdev->engine_len);
if (vdev->engine_mmio == NULL)
dev_err(&vdev->pdev->dev,
diff --git a/drivers/video/fbdev/via/viafbdev.c b/drivers/video/fbdev/via/viafbdev.c
index f815f98190bc..852673c40a2f 100644
--- a/drivers/video/fbdev/via/viafbdev.c
+++ b/drivers/video/fbdev/via/viafbdev.c
@@ -1173,13 +1173,12 @@ static ssize_t viafb_dvp0_proc_write(struct file *file,
return count;
}
-static const struct file_operations viafb_dvp0_proc_fops = {
- .owner = THIS_MODULE,
- .open = viafb_dvp0_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = viafb_dvp0_proc_write,
+static const struct proc_ops viafb_dvp0_proc_ops = {
+ .proc_open = viafb_dvp0_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = viafb_dvp0_proc_write,
};
static int viafb_dvp1_proc_show(struct seq_file *m, void *v)
@@ -1238,13 +1237,12 @@ static ssize_t viafb_dvp1_proc_write(struct file *file,
return count;
}
-static const struct file_operations viafb_dvp1_proc_fops = {
- .owner = THIS_MODULE,
- .open = viafb_dvp1_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = viafb_dvp1_proc_write,
+static const struct proc_ops viafb_dvp1_proc_ops = {
+ .proc_open = viafb_dvp1_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = viafb_dvp1_proc_write,
};
static int viafb_dfph_proc_show(struct seq_file *m, void *v)
@@ -1273,13 +1271,12 @@ static ssize_t viafb_dfph_proc_write(struct file *file,
return count;
}
-static const struct file_operations viafb_dfph_proc_fops = {
- .owner = THIS_MODULE,
- .open = viafb_dfph_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = viafb_dfph_proc_write,
+static const struct proc_ops viafb_dfph_proc_ops = {
+ .proc_open = viafb_dfph_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = viafb_dfph_proc_write,
};
static int viafb_dfpl_proc_show(struct seq_file *m, void *v)
@@ -1308,13 +1305,12 @@ static ssize_t viafb_dfpl_proc_write(struct file *file,
return count;
}
-static const struct file_operations viafb_dfpl_proc_fops = {
- .owner = THIS_MODULE,
- .open = viafb_dfpl_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = viafb_dfpl_proc_write,
+static const struct proc_ops viafb_dfpl_proc_ops = {
+ .proc_open = viafb_dfpl_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = viafb_dfpl_proc_write,
};
static int viafb_vt1636_proc_show(struct seq_file *m, void *v)
@@ -1444,13 +1440,12 @@ static ssize_t viafb_vt1636_proc_write(struct file *file,
return count;
}
-static const struct file_operations viafb_vt1636_proc_fops = {
- .owner = THIS_MODULE,
- .open = viafb_vt1636_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = viafb_vt1636_proc_write,
+static const struct proc_ops viafb_vt1636_proc_ops = {
+ .proc_open = viafb_vt1636_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = viafb_vt1636_proc_write,
};
#endif /* CONFIG_FB_VIA_DIRECT_PROCFS */
@@ -1522,13 +1517,12 @@ static ssize_t viafb_iga1_odev_proc_write(struct file *file,
return res;
}
-static const struct file_operations viafb_iga1_odev_proc_fops = {
- .owner = THIS_MODULE,
- .open = viafb_iga1_odev_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = viafb_iga1_odev_proc_write,
+static const struct proc_ops viafb_iga1_odev_proc_ops = {
+ .proc_open = viafb_iga1_odev_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = viafb_iga1_odev_proc_write,
};
static int viafb_iga2_odev_proc_show(struct seq_file *m, void *v)
@@ -1562,13 +1556,12 @@ static ssize_t viafb_iga2_odev_proc_write(struct file *file,
return res;
}
-static const struct file_operations viafb_iga2_odev_proc_fops = {
- .owner = THIS_MODULE,
- .open = viafb_iga2_odev_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = viafb_iga2_odev_proc_write,
+static const struct proc_ops viafb_iga2_odev_proc_ops = {
+ .proc_open = viafb_iga2_odev_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = viafb_iga2_odev_proc_write,
};
#define IS_VT1636(lvds_chip) ((lvds_chip).lvds_chip_name == VT1636_LVDS)
@@ -1580,14 +1573,14 @@ static void viafb_init_proc(struct viafb_shared *shared)
shared->proc_entry = viafb_entry;
if (viafb_entry) {
#ifdef CONFIG_FB_VIA_DIRECT_PROCFS
- proc_create("dvp0", 0, viafb_entry, &viafb_dvp0_proc_fops);
- proc_create("dvp1", 0, viafb_entry, &viafb_dvp1_proc_fops);
- proc_create("dfph", 0, viafb_entry, &viafb_dfph_proc_fops);
- proc_create("dfpl", 0, viafb_entry, &viafb_dfpl_proc_fops);
+ proc_create("dvp0", 0, viafb_entry, &viafb_dvp0_proc_ops);
+ proc_create("dvp1", 0, viafb_entry, &viafb_dvp1_proc_ops);
+ proc_create("dfph", 0, viafb_entry, &viafb_dfph_proc_ops);
+ proc_create("dfpl", 0, viafb_entry, &viafb_dfpl_proc_ops);
if (IS_VT1636(shared->chip_info.lvds_chip_info)
|| IS_VT1636(shared->chip_info.lvds_chip_info2))
proc_create("vt1636", 0, viafb_entry,
- &viafb_vt1636_proc_fops);
+ &viafb_vt1636_proc_ops);
#endif /* CONFIG_FB_VIA_DIRECT_PROCFS */
proc_create_single("supported_output_devices", 0, viafb_entry,
@@ -1595,11 +1588,11 @@ static void viafb_init_proc(struct viafb_shared *shared)
iga1_entry = proc_mkdir("iga1", viafb_entry);
shared->iga1_proc_entry = iga1_entry;
proc_create("output_devices", 0, iga1_entry,
- &viafb_iga1_odev_proc_fops);
+ &viafb_iga1_odev_proc_ops);
iga2_entry = proc_mkdir("iga2", viafb_entry);
shared->iga2_proc_entry = iga2_entry;
proc_create("output_devices", 0, iga2_entry,
- &viafb_iga2_odev_proc_fops);
+ &viafb_iga2_odev_proc_ops);
}
}
static void viafb_remove_proc(struct viafb_shared *shared)
diff --git a/drivers/video/fbdev/w100fb.c b/drivers/video/fbdev/w100fb.c
index fe628ec20149..ad26cbffbc6f 100644
--- a/drivers/video/fbdev/w100fb.c
+++ b/drivers/video/fbdev/w100fb.c
@@ -648,12 +648,12 @@ int w100fb_probe(struct platform_device *pdev)
return -EINVAL;
/* Remap the chip base address */
- remapped_base = ioremap_nocache(mem->start+W100_CFG_BASE, W100_CFG_LEN);
+ remapped_base = ioremap(mem->start+W100_CFG_BASE, W100_CFG_LEN);
if (remapped_base == NULL)
goto out;
/* Map the register space */
- remapped_regs = ioremap_nocache(mem->start+W100_REG_BASE, W100_REG_LEN);
+ remapped_regs = ioremap(mem->start+W100_REG_BASE, W100_REG_LEN);
if (remapped_regs == NULL)
goto out;
@@ -672,7 +672,7 @@ int w100fb_probe(struct platform_device *pdev)
printk(" at 0x%08lx.\n", (unsigned long) mem->start+W100_CFG_BASE);
/* Remap the framebuffer */
- remapped_fbuf = ioremap_nocache(mem->start+MEM_WINDOW_BASE, MEM_WINDOW_SIZE);
+ remapped_fbuf = ioremap(mem->start+MEM_WINDOW_BASE, MEM_WINDOW_SIZE);
if (remapped_fbuf == NULL)
goto out;
diff --git a/drivers/video/logo/Makefile b/drivers/video/logo/Makefile
index bcda657493a4..895c60b8402e 100644
--- a/drivers/video/logo/Makefile
+++ b/drivers/video/logo/Makefile
@@ -18,7 +18,7 @@ obj-$(CONFIG_SPU_BASE) += logo_spe_clut224.o
# How to generate logo's
-hostprogs-y := pnmtologo
+hostprogs := pnmtologo
# Create commands like "pnmtologo -t mono -n logo_mac_mono -o ..."
quiet_cmd_logo = LOGO $@
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
index 2307b0329aec..d823d558c0c4 100644
--- a/drivers/virt/vboxguest/vboxguest_core.c
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -6,6 +6,7 @@
*/
#include <linux/device.h>
+#include <linux/io.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/sizes.h>
diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c
index 43c391626a00..50920b6fc319 100644
--- a/drivers/virt/vboxguest/vboxguest_utils.c
+++ b/drivers/virt/vboxguest/vboxguest_utils.c
@@ -7,6 +7,7 @@
*/
#include <linux/errno.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 93f995f6cf36..7bfe365d9372 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -158,6 +158,8 @@ static void set_page_pfns(struct virtio_balloon *vb,
{
unsigned int i;
+ BUILD_BUG_ON(VIRTIO_BALLOON_PAGES_PER_PAGE > VIRTIO_BALLOON_ARRAY_PFNS_MAX);
+
/*
* Set balloon pfns pointing at this page.
* Note that the first pfn points at start of the page.
@@ -475,7 +477,9 @@ static int init_vqs(struct virtio_balloon *vb)
names[VIRTIO_BALLOON_VQ_INFLATE] = "inflate";
callbacks[VIRTIO_BALLOON_VQ_DEFLATE] = balloon_ack;
names[VIRTIO_BALLOON_VQ_DEFLATE] = "deflate";
+ callbacks[VIRTIO_BALLOON_VQ_STATS] = NULL;
names[VIRTIO_BALLOON_VQ_STATS] = NULL;
+ callbacks[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL;
names[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL;
if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
@@ -899,8 +903,7 @@ static int virtballoon_probe(struct virtio_device *vdev)
vb->vb_dev_info.inode = alloc_anon_inode(balloon_mnt->mnt_sb);
if (IS_ERR(vb->vb_dev_info.inode)) {
err = PTR_ERR(vb->vb_dev_info.inode);
- kern_unmount(balloon_mnt);
- goto out_del_vqs;
+ goto out_kern_unmount;
}
vb->vb_dev_info.inode->i_mapping->a_ops = &balloon_aops;
#endif
@@ -911,13 +914,13 @@ static int virtballoon_probe(struct virtio_device *vdev)
*/
if (virtqueue_get_vring_size(vb->free_page_vq) < 2) {
err = -ENOSPC;
- goto out_del_vqs;
+ goto out_iput;
}
vb->balloon_wq = alloc_workqueue("balloon-wq",
WQ_FREEZABLE | WQ_CPU_INTENSIVE, 0);
if (!vb->balloon_wq) {
err = -ENOMEM;
- goto out_del_vqs;
+ goto out_iput;
}
INIT_WORK(&vb->report_free_page_work, report_free_page_func);
vb->cmd_id_received_cache = VIRTIO_BALLOON_CMD_ID_STOP;
@@ -951,6 +954,12 @@ static int virtballoon_probe(struct virtio_device *vdev)
out_del_balloon_wq:
if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
destroy_workqueue(vb->balloon_wq);
+out_iput:
+#ifdef CONFIG_BALLOON_COMPACTION
+ iput(vb->vb_dev_info.inode);
+out_kern_unmount:
+ kern_unmount(balloon_mnt);
+#endif
out_del_vqs:
vdev->config->del_vqs(vdev);
out_free_vb:
@@ -966,6 +975,10 @@ static void remove_common(struct virtio_balloon *vb)
leak_balloon(vb, vb->num_pages);
update_balloon_size(vb);
+ /* There might be free pages that are being reported: release them. */
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
+ return_free_pages_to_mm(vb, ULONG_MAX);
+
/* Now we reset the device so we can clean up the queues. */
vb->vdev->config->reset(vb->vdev);
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index e09edb5c5e06..97d5725fd9a2 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -531,18 +531,9 @@ static void virtio_mmio_release_dev(struct device *_d)
static int virtio_mmio_probe(struct platform_device *pdev)
{
struct virtio_mmio_device *vm_dev;
- struct resource *mem;
unsigned long magic;
int rc;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem)
- return -EINVAL;
-
- if (!devm_request_mem_region(&pdev->dev, mem->start,
- resource_size(mem), pdev->name))
- return -EBUSY;
-
vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL);
if (!vm_dev)
return -ENOMEM;
@@ -554,9 +545,9 @@ static int virtio_mmio_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&vm_dev->virtqueues);
spin_lock_init(&vm_dev->lock);
- vm_dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
- if (vm_dev->base == NULL)
- return -EFAULT;
+ vm_dev->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(vm_dev->base))
+ return PTR_ERR(vm_dev->base);
/* Check magic value */
magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index f2862f66c2ac..222d630c41fc 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -294,7 +294,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
/* Best option: one for change interrupt, one per vq. */
nvectors = 1;
for (i = 0; i < nvqs; ++i)
- if (callbacks[i])
+ if (names[i] && callbacks[i])
++nvectors;
} else {
/* Second best: one for change, shared for all vqs. */
diff --git a/drivers/visorbus/visorchipset.c b/drivers/visorbus/visorchipset.c
index ca752b8f495f..cb1eb7e05f87 100644
--- a/drivers/visorbus/visorchipset.c
+++ b/drivers/visorbus/visorchipset.c
@@ -1210,14 +1210,17 @@ static void setup_crash_devices_work_queue(struct work_struct *work)
{
struct controlvm_message local_crash_bus_msg;
struct controlvm_message local_crash_dev_msg;
- struct controlvm_message msg;
+ struct controlvm_message msg = {
+ .hdr.id = CONTROLVM_CHIPSET_INIT,
+ .cmd.init_chipset = {
+ .bus_count = 23,
+ .switch_count = 0,
+ },
+ };
u32 local_crash_msg_offset;
u16 local_crash_msg_count;
/* send init chipset msg */
- msg.hdr.id = CONTROLVM_CHIPSET_INIT;
- msg.cmd.init_chipset.bus_count = 23;
- msg.cmd.init_chipset.switch_count = 0;
chipset_init(&msg);
/* get saved message count */
if (visorchannel_read(chipset_dev->controlvm_channel,
diff --git a/drivers/vme/boards/vme_vmivme7805.c b/drivers/vme/boards/vme_vmivme7805.c
index 1b6e42e5e8fd..51e056bae943 100644
--- a/drivers/vme/boards/vme_vmivme7805.c
+++ b/drivers/vme/boards/vme_vmivme7805.c
@@ -55,7 +55,7 @@ static int vmic_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* Map registers in BAR 0 */
- vmic_base = ioremap_nocache(pci_resource_start(pdev, 0), 16);
+ vmic_base = ioremap(pci_resource_start(pdev, 0), 16);
if (!vmic_base) {
dev_err(&pdev->dev, "Unable to remap CRG region\n");
retval = -EIO;
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
index 1edb8a5de873..ea938dc29c5e 100644
--- a/drivers/vme/bridges/vme_ca91cx42.c
+++ b/drivers/vme/bridges/vme_ca91cx42.c
@@ -554,7 +554,7 @@ static int ca91cx42_alloc_resource(struct vme_master_resource *image,
goto err_resource;
}
- image->kern_base = ioremap_nocache(
+ image->kern_base = ioremap(
image->bus_resource.start, size);
if (!image->kern_base) {
dev_err(ca91cx42_bridge->parent, "Failed to remap resource\n");
@@ -1638,7 +1638,7 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* map registers in BAR 0 */
- ca91cx42_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
+ ca91cx42_device->base = ioremap(pci_resource_start(pdev, 0),
4096);
if (!ca91cx42_device->base) {
dev_err(&pdev->dev, "Unable to remap CRG region\n");
diff --git a/drivers/vme/bridges/vme_fake.c b/drivers/vme/bridges/vme_fake.c
index 3208a4409e44..6a1bc284f297 100644
--- a/drivers/vme/bridges/vme_fake.c
+++ b/drivers/vme/bridges/vme_fake.c
@@ -414,8 +414,9 @@ static void fake_lm_check(struct fake_driver *bridge, unsigned long long addr,
}
}
-static u8 fake_vmeread8(struct fake_driver *bridge, unsigned long long addr,
- u32 aspace, u32 cycle)
+static noinline_for_stack u8 fake_vmeread8(struct fake_driver *bridge,
+ unsigned long long addr,
+ u32 aspace, u32 cycle)
{
u8 retval = 0xff;
int i;
@@ -446,8 +447,9 @@ static u8 fake_vmeread8(struct fake_driver *bridge, unsigned long long addr,
return retval;
}
-static u16 fake_vmeread16(struct fake_driver *bridge, unsigned long long addr,
- u32 aspace, u32 cycle)
+static noinline_for_stack u16 fake_vmeread16(struct fake_driver *bridge,
+ unsigned long long addr,
+ u32 aspace, u32 cycle)
{
u16 retval = 0xffff;
int i;
@@ -478,8 +480,9 @@ static u16 fake_vmeread16(struct fake_driver *bridge, unsigned long long addr,
return retval;
}
-static u32 fake_vmeread32(struct fake_driver *bridge, unsigned long long addr,
- u32 aspace, u32 cycle)
+static noinline_for_stack u32 fake_vmeread32(struct fake_driver *bridge,
+ unsigned long long addr,
+ u32 aspace, u32 cycle)
{
u32 retval = 0xffffffff;
int i;
@@ -609,8 +612,9 @@ out:
return retval;
}
-static void fake_vmewrite8(struct fake_driver *bridge, u8 *buf,
- unsigned long long addr, u32 aspace, u32 cycle)
+static noinline_for_stack void fake_vmewrite8(struct fake_driver *bridge,
+ u8 *buf, unsigned long long addr,
+ u32 aspace, u32 cycle)
{
int i;
unsigned long long start, end, offset;
@@ -639,8 +643,9 @@ static void fake_vmewrite8(struct fake_driver *bridge, u8 *buf,
}
-static void fake_vmewrite16(struct fake_driver *bridge, u16 *buf,
- unsigned long long addr, u32 aspace, u32 cycle)
+static noinline_for_stack void fake_vmewrite16(struct fake_driver *bridge,
+ u16 *buf, unsigned long long addr,
+ u32 aspace, u32 cycle)
{
int i;
unsigned long long start, end, offset;
@@ -669,8 +674,9 @@ static void fake_vmewrite16(struct fake_driver *bridge, u16 *buf,
}
-static void fake_vmewrite32(struct fake_driver *bridge, u32 *buf,
- unsigned long long addr, u32 aspace, u32 cycle)
+static noinline_for_stack void fake_vmewrite32(struct fake_driver *bridge,
+ u32 *buf, unsigned long long addr,
+ u32 aspace, u32 cycle)
{
int i;
unsigned long long start, end, offset;
diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c
index 7e079d39bd76..50ae26977a02 100644
--- a/drivers/vme/bridges/vme_tsi148.c
+++ b/drivers/vme/bridges/vme_tsi148.c
@@ -770,7 +770,7 @@ static int tsi148_alloc_resource(struct vme_master_resource *image,
goto err_resource;
}
- image->kern_base = ioremap_nocache(
+ image->kern_base = ioremap(
image->bus_resource.start, size);
if (!image->kern_base) {
dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
@@ -2317,7 +2317,7 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* map registers in BAR 0 */
- tsi148_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
+ tsi148_device->base = ioremap(pci_resource_start(pdev, 0),
4096);
if (!tsi148_device->base) {
dev_err(&pdev->dev, "Unable to remap CRG region\n");
diff --git a/drivers/w1/masters/matrox_w1.c b/drivers/w1/masters/matrox_w1.c
index 3110791a2f1c..ee716c715710 100644
--- a/drivers/w1/masters/matrox_w1.c
+++ b/drivers/w1/masters/matrox_w1.c
@@ -139,7 +139,7 @@ static int matrox_w1_probe(struct pci_dev *pdev, const struct pci_device_id *ent
dev->phys_addr = pci_resource_start(pdev, 1);
- dev->virt_addr = ioremap_nocache(dev->phys_addr, 16384);
+ dev->virt_addr = ioremap(dev->phys_addr, 16384);
if (!dev->virt_addr) {
dev_err(&pdev->dev, "%s: failed to ioremap(0x%lx, %d).\n",
__func__, dev->phys_addr, 16384);
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index 4164045866b3..aa09f8527776 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -38,12 +38,6 @@
#define OMAP_HDQ_INT_STATUS_TXCOMPLETE BIT(2)
#define OMAP_HDQ_INT_STATUS_RXCOMPLETE BIT(1)
#define OMAP_HDQ_INT_STATUS_TIMEOUT BIT(0)
-#define OMAP_HDQ_SYSCONFIG 0x14
-#define OMAP_HDQ_SYSCONFIG_SOFTRESET BIT(1)
-#define OMAP_HDQ_SYSCONFIG_AUTOIDLE BIT(0)
-#define OMAP_HDQ_SYSCONFIG_NOIDLE 0x0
-#define OMAP_HDQ_SYSSTATUS 0x18
-#define OMAP_HDQ_SYSSTATUS_RESETDONE BIT(0)
#define OMAP_HDQ_FLAG_CLEAR 0
#define OMAP_HDQ_FLAG_SET 1
@@ -62,17 +56,9 @@ struct hdq_data {
void __iomem *hdq_base;
/* lock status update */
struct mutex hdq_mutex;
- int hdq_usecount;
u8 hdq_irqstatus;
/* device lock */
spinlock_t hdq_spinlock;
- /*
- * Used to control the call to omap_hdq_get and omap_hdq_put.
- * HDQ Protocol: Write the CMD|REG_address first, followed by
- * the data wrire or read.
- */
- int init_trans;
- int rrw;
/* mode: 0-HDQ 1-W1 */
int mode;
@@ -99,15 +85,6 @@ static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
return new_val;
}
-static void hdq_disable_interrupt(struct hdq_data *hdq_data, u32 offset,
- u32 mask)
-{
- u32 ie;
-
- ie = readl(hdq_data->hdq_base + offset);
- writel(ie & mask, hdq_data->hdq_base + offset);
-}
-
/*
* Wait for one or more bits in flag change.
* HDQ_FLAG_SET: wait until any bit in the flag is set.
@@ -142,22 +119,24 @@ static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
return ret;
}
+/* Clear saved irqstatus after using an interrupt */
+static void hdq_reset_irqstatus(struct hdq_data *hdq_data)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
+ hdq_data->hdq_irqstatus = 0;
+ spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
+}
+
/* write out a byte and fill *status with HDQ_INT_STATUS */
static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
{
int ret;
u8 tmp_status;
- unsigned long irqflags;
*status = 0;
- spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
- /* clear interrupt flags via a dummy read */
- hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
- /* ISR loads it with new INT_STATUS */
- hdq_data->hdq_irqstatus = 0;
- spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
-
hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
/* set the GO bit */
@@ -191,6 +170,7 @@ static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
}
out:
+ hdq_reset_irqstatus(hdq_data);
return ret;
}
@@ -237,47 +217,11 @@ static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
slave_found(master_dev, id);
}
-static int _omap_hdq_reset(struct hdq_data *hdq_data)
-{
- int ret;
- u8 tmp_status;
-
- hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
- OMAP_HDQ_SYSCONFIG_SOFTRESET);
- /*
- * Select HDQ/1W mode & enable clocks.
- * It is observed that INT flags can't be cleared via a read and GO/INIT
- * won't return to zero if interrupt is disabled. So we always enable
- * interrupt.
- */
- hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
- OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
- OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
-
- /* wait for reset to complete */
- ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_SYSSTATUS,
- OMAP_HDQ_SYSSTATUS_RESETDONE, OMAP_HDQ_FLAG_SET, &tmp_status);
- if (ret)
- dev_dbg(hdq_data->dev, "timeout waiting HDQ reset, %x",
- tmp_status);
- else {
- hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
- OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
- OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK |
- hdq_data->mode);
- hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
- OMAP_HDQ_SYSCONFIG_AUTOIDLE);
- }
-
- return ret;
-}
-
/* Issue break pulse to the device */
static int omap_hdq_break(struct hdq_data *hdq_data)
{
int ret = 0;
u8 tmp_status;
- unsigned long irqflags;
ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
if (ret < 0) {
@@ -286,13 +230,6 @@ static int omap_hdq_break(struct hdq_data *hdq_data)
goto rtn;
}
- spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
- /* clear interrupt flags via a dummy read */
- hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
- /* ISR loads it with new INT_STATUS */
- hdq_data->hdq_irqstatus = 0;
- spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
-
/* set the INIT and GO bit */
hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO,
@@ -341,6 +278,7 @@ static int omap_hdq_break(struct hdq_data *hdq_data)
" return to zero, %x", tmp_status);
out:
+ hdq_reset_irqstatus(hdq_data);
mutex_unlock(&hdq_data->hdq_mutex);
rtn:
return ret;
@@ -357,7 +295,7 @@ static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
goto rtn;
}
- if (!hdq_data->hdq_usecount) {
+ if (pm_runtime_suspended(hdq_data->dev)) {
ret = -EINVAL;
goto out;
}
@@ -388,86 +326,13 @@ static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
/* the data is ready. Read it in! */
*val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
out:
+ hdq_reset_irqstatus(hdq_data);
mutex_unlock(&hdq_data->hdq_mutex);
rtn:
return ret;
}
-/* Enable clocks and set the controller to HDQ/1W mode */
-static int omap_hdq_get(struct hdq_data *hdq_data)
-{
- int ret = 0;
-
- ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
- if (ret < 0) {
- ret = -EINTR;
- goto rtn;
- }
-
- if (OMAP_HDQ_MAX_USER == hdq_data->hdq_usecount) {
- dev_dbg(hdq_data->dev, "attempt to exceed the max use count");
- ret = -EINVAL;
- goto out;
- } else {
- hdq_data->hdq_usecount++;
- try_module_get(THIS_MODULE);
- if (1 == hdq_data->hdq_usecount) {
-
- pm_runtime_get_sync(hdq_data->dev);
-
- /* make sure HDQ/1W is out of reset */
- if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) &
- OMAP_HDQ_SYSSTATUS_RESETDONE)) {
- ret = _omap_hdq_reset(hdq_data);
- if (ret)
- /* back up the count */
- hdq_data->hdq_usecount--;
- } else {
- /* select HDQ/1W mode & enable clocks */
- hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
- OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
- OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK |
- hdq_data->mode);
- hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
- OMAP_HDQ_SYSCONFIG_NOIDLE);
- hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
- }
- }
- }
-
-out:
- mutex_unlock(&hdq_data->hdq_mutex);
-rtn:
- return ret;
-}
-
-/* Disable clocks to the module */
-static int omap_hdq_put(struct hdq_data *hdq_data)
-{
- int ret = 0;
-
- ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
- if (ret < 0)
- return -EINTR;
-
- hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
- OMAP_HDQ_SYSCONFIG_AUTOIDLE);
- if (0 == hdq_data->hdq_usecount) {
- dev_dbg(hdq_data->dev, "attempt to decrement use count"
- " when it is zero");
- ret = -EINVAL;
- } else {
- hdq_data->hdq_usecount--;
- module_put(THIS_MODULE);
- if (0 == hdq_data->hdq_usecount)
- pm_runtime_put_sync(hdq_data->dev);
- }
- mutex_unlock(&hdq_data->hdq_mutex);
-
- return ret;
-}
-
/*
* W1 triplet callback function - used for searching ROM addresses.
* Registered only when controller is in 1-wire mode.
@@ -482,7 +347,12 @@ static u8 omap_w1_triplet(void *_hdq, u8 bdir)
OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK;
u8 mask = ctrl | OMAP_HDQ_CTRL_STATUS_DIR;
- omap_hdq_get(_hdq);
+ err = pm_runtime_get_sync(hdq_data->dev);
+ if (err < 0) {
+ pm_runtime_put_noidle(hdq_data->dev);
+
+ return err;
+ }
err = mutex_lock_interruptible(&hdq_data->hdq_mutex);
if (err < 0) {
@@ -490,7 +360,6 @@ static u8 omap_w1_triplet(void *_hdq, u8 bdir)
goto rtn;
}
- hdq_data->hdq_irqstatus = 0;
/* read id_bit */
hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS,
ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask);
@@ -504,7 +373,9 @@ static u8 omap_w1_triplet(void *_hdq, u8 bdir)
}
id_bit = (hdq_reg_in(_hdq, OMAP_HDQ_RX_DATA) & 0x01);
- hdq_data->hdq_irqstatus = 0;
+ /* Must clear irqstatus for another RXCOMPLETE interrupt */
+ hdq_reset_irqstatus(hdq_data);
+
/* read comp_bit */
hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS,
ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask);
@@ -547,18 +418,33 @@ static u8 omap_w1_triplet(void *_hdq, u8 bdir)
OMAP_HDQ_CTRL_STATUS_SINGLE);
out:
+ hdq_reset_irqstatus(hdq_data);
mutex_unlock(&hdq_data->hdq_mutex);
rtn:
- omap_hdq_put(_hdq);
+ pm_runtime_mark_last_busy(hdq_data->dev);
+ pm_runtime_put_autosuspend(hdq_data->dev);
+
return ret;
}
/* reset callback */
static u8 omap_w1_reset_bus(void *_hdq)
{
- omap_hdq_get(_hdq);
- omap_hdq_break(_hdq);
- omap_hdq_put(_hdq);
+ struct hdq_data *hdq_data = _hdq;
+ int err;
+
+ err = pm_runtime_get_sync(hdq_data->dev);
+ if (err < 0) {
+ pm_runtime_put_noidle(hdq_data->dev);
+
+ return err;
+ }
+
+ omap_hdq_break(hdq_data);
+
+ pm_runtime_mark_last_busy(hdq_data->dev);
+ pm_runtime_put_autosuspend(hdq_data->dev);
+
return 0;
}
@@ -569,37 +455,19 @@ static u8 omap_w1_read_byte(void *_hdq)
u8 val = 0;
int ret;
- /* First write to initialize the transfer */
- if (hdq_data->init_trans == 0)
- omap_hdq_get(hdq_data);
+ ret = pm_runtime_get_sync(hdq_data->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(hdq_data->dev);
- ret = hdq_read_byte(hdq_data, &val);
- if (ret) {
- ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
- if (ret < 0) {
- dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
- return -EINTR;
- }
- hdq_data->init_trans = 0;
- mutex_unlock(&hdq_data->hdq_mutex);
- omap_hdq_put(hdq_data);
return -1;
}
- hdq_disable_interrupt(hdq_data, OMAP_HDQ_CTRL_STATUS,
- ~OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
+ ret = hdq_read_byte(hdq_data, &val);
+ if (ret)
+ ret = -1;
- /* Write followed by a read, release the module */
- if (hdq_data->init_trans) {
- ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
- if (ret < 0) {
- dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
- return -EINTR;
- }
- hdq_data->init_trans = 0;
- mutex_unlock(&hdq_data->hdq_mutex);
- omap_hdq_put(hdq_data);
- }
+ pm_runtime_mark_last_busy(hdq_data->dev);
+ pm_runtime_put_autosuspend(hdq_data->dev);
return val;
}
@@ -611,9 +479,12 @@ static void omap_w1_write_byte(void *_hdq, u8 byte)
int ret;
u8 status;
- /* First write to initialize the transfer */
- if (hdq_data->init_trans == 0)
- omap_hdq_get(hdq_data);
+ ret = pm_runtime_get_sync(hdq_data->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(hdq_data->dev);
+
+ return;
+ }
/*
* We need to reset the slave before
@@ -623,31 +494,15 @@ static void omap_w1_write_byte(void *_hdq, u8 byte)
if (byte == W1_SKIP_ROM)
omap_hdq_break(hdq_data);
- ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
- if (ret < 0) {
- dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
- return;
- }
- hdq_data->init_trans++;
- mutex_unlock(&hdq_data->hdq_mutex);
-
ret = hdq_write_byte(hdq_data, byte, &status);
if (ret < 0) {
dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status);
- return;
+ goto out_err;
}
- /* Second write, data transferred. Release the module */
- if (hdq_data->init_trans > 1) {
- omap_hdq_put(hdq_data);
- ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
- if (ret < 0) {
- dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
- return;
- }
- hdq_data->init_trans = 0;
- mutex_unlock(&hdq_data->hdq_mutex);
- }
+out_err:
+ pm_runtime_mark_last_busy(hdq_data->dev);
+ pm_runtime_put_autosuspend(hdq_data->dev);
}
static struct w1_bus_master omap_w1_master = {
@@ -656,6 +511,35 @@ static struct w1_bus_master omap_w1_master = {
.reset_bus = omap_w1_reset_bus,
};
+static int __maybe_unused omap_hdq_runtime_suspend(struct device *dev)
+{
+ struct hdq_data *hdq_data = dev_get_drvdata(dev);
+
+ hdq_reg_out(hdq_data, 0, hdq_data->mode);
+ hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
+
+ return 0;
+}
+
+static int __maybe_unused omap_hdq_runtime_resume(struct device *dev)
+{
+ struct hdq_data *hdq_data = dev_get_drvdata(dev);
+
+ /* select HDQ/1W mode & enable clocks */
+ hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
+ OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
+ OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK |
+ hdq_data->mode);
+ hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
+
+ return 0;
+}
+
+static const struct dev_pm_ops omap_hdq_pm_ops = {
+ SET_RUNTIME_PM_OPS(omap_hdq_runtime_suspend,
+ omap_hdq_runtime_resume, NULL)
+};
+
static int omap_hdq_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -677,23 +561,27 @@ static int omap_hdq_probe(struct platform_device *pdev)
if (IS_ERR(hdq_data->hdq_base))
return PTR_ERR(hdq_data->hdq_base);
- hdq_data->hdq_usecount = 0;
- hdq_data->rrw = 0;
mutex_init(&hdq_data->hdq_mutex);
+ ret = of_property_read_string(pdev->dev.of_node, "ti,mode", &mode);
+ if (ret < 0 || !strcmp(mode, "hdq")) {
+ hdq_data->mode = 0;
+ omap_w1_master.search = omap_w1_search_bus;
+ } else {
+ hdq_data->mode = 1;
+ omap_w1_master.triplet = omap_w1_triplet;
+ }
+
pm_runtime_enable(&pdev->dev);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 300);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
dev_dbg(&pdev->dev, "pm_runtime_get_sync failed\n");
goto err_w1;
}
- ret = _omap_hdq_reset(hdq_data);
- if (ret) {
- dev_dbg(&pdev->dev, "reset failed\n");
- goto err_irq;
- }
-
rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n",
(rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt");
@@ -715,16 +603,8 @@ static int omap_hdq_probe(struct platform_device *pdev)
omap_hdq_break(hdq_data);
- pm_runtime_put_sync(&pdev->dev);
-
- ret = of_property_read_string(pdev->dev.of_node, "ti,mode", &mode);
- if (ret < 0 || !strcmp(mode, "hdq")) {
- hdq_data->mode = 0;
- omap_w1_master.search = omap_w1_search_bus;
- } else {
- hdq_data->mode = 1;
- omap_w1_master.triplet = omap_w1_triplet;
- }
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
omap_w1_master.data = hdq_data;
@@ -739,6 +619,7 @@ static int omap_hdq_probe(struct platform_device *pdev)
err_irq:
pm_runtime_put_sync(&pdev->dev);
err_w1:
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return ret;
@@ -746,23 +627,19 @@ err_w1:
static int omap_hdq_remove(struct platform_device *pdev)
{
- struct hdq_data *hdq_data = platform_get_drvdata(pdev);
+ int active;
- mutex_lock(&hdq_data->hdq_mutex);
-
- if (hdq_data->hdq_usecount) {
- dev_dbg(&pdev->dev, "removed when use count is not zero\n");
- mutex_unlock(&hdq_data->hdq_mutex);
- return -EBUSY;
- }
+ active = pm_runtime_get_sync(&pdev->dev);
+ if (active < 0)
+ pm_runtime_put_noidle(&pdev->dev);
- mutex_unlock(&hdq_data->hdq_mutex);
+ w1_remove_master_device(&omap_w1_master);
- /* remove module dependency */
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ if (active >= 0)
+ pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- w1_remove_master_device(&omap_w1_master);
-
return 0;
}
@@ -779,6 +656,7 @@ static struct platform_driver omap_hdq_driver = {
.driver = {
.name = "omap_hdq",
.of_match_table = omap_hdq_dt_ids,
+ .pm = &omap_hdq_pm_ops,
},
};
module_platform_driver(omap_hdq_driver);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 1679e0dc869b..cec868f8db3f 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -687,6 +687,7 @@ config MAX63XX_WATCHDOG
config MAX77620_WATCHDOG
tristate "Maxim Max77620 Watchdog Timer"
depends on MFD_MAX77620 || COMPILE_TEST
+ select WATCHDOG_CORE
help
This is the driver for the Max77620 watchdog timer.
Say 'Y' here to enable the watchdog timer support for
@@ -1444,6 +1445,7 @@ config SMSC37B787_WDT
config TQMX86_WDT
tristate "TQ-Systems TQMX86 Watchdog Timer"
depends on X86
+ select WATCHDOG_CORE
help
This is the driver for the hardware watchdog timer in the TQMX86 IO
controller found on some of their ComExpress Modules.
diff --git a/drivers/watchdog/at91sam9_wdt.h b/drivers/watchdog/at91sam9_wdt.h
index abfe34dd760a..298d545df1a1 100644
--- a/drivers/watchdog/at91sam9_wdt.h
+++ b/drivers/watchdog/at91sam9_wdt.h
@@ -24,7 +24,10 @@
#define AT91_WDT_MR 0x04 /* Watchdog Mode Register */
#define AT91_WDT_WDV (0xfffUL << 0) /* Counter Value */
#define AT91_WDT_SET_WDV(x) ((x) & AT91_WDT_WDV)
+#define AT91_SAM9X60_PERIODRST BIT(4) /* Period Reset */
+#define AT91_SAM9X60_RPTHRST BIT(5) /* Minimum Restart Period */
#define AT91_WDT_WDFIEN BIT(12) /* Fault Interrupt Enable */
+#define AT91_SAM9X60_WDDIS BIT(12) /* Watchdog Disable */
#define AT91_WDT_WDRSTEN BIT(13) /* Reset Processor */
#define AT91_WDT_WDRPROC BIT(14) /* Timer Restart */
#define AT91_WDT_WDDIS BIT(15) /* Watchdog Disable */
@@ -37,4 +40,22 @@
#define AT91_WDT_WDUNF BIT(0) /* Watchdog Underflow */
#define AT91_WDT_WDERR BIT(1) /* Watchdog Error */
+/* Watchdog Timer Value Register */
+#define AT91_SAM9X60_VR 0x08
+
+/* Watchdog Window Level Register */
+#define AT91_SAM9X60_WLR 0x0c
+/* Watchdog Period Value */
+#define AT91_SAM9X60_COUNTER (0xfffUL << 0)
+#define AT91_SAM9X60_SET_COUNTER(x) ((x) & AT91_SAM9X60_COUNTER)
+
+/* Interrupt Enable Register */
+#define AT91_SAM9X60_IER 0x14
+/* Period Interrupt Enable */
+#define AT91_SAM9X60_PERINT BIT(0)
+/* Interrupt Disable Register */
+#define AT91_SAM9X60_IDR 0x18
+/* Interrupt Status Register */
+#define AT91_SAM9X60_ISR 0x1c
+
#endif
diff --git a/drivers/watchdog/bcm63xx_wdt.c b/drivers/watchdog/bcm63xx_wdt.c
index 8a043b52aa2f..7cdb25363ea0 100644
--- a/drivers/watchdog/bcm63xx_wdt.c
+++ b/drivers/watchdog/bcm63xx_wdt.c
@@ -246,7 +246,7 @@ static int bcm63xx_wdt_probe(struct platform_device *pdev)
return -ENODEV;
}
- bcm63xx_wdt_device.regs = devm_ioremap_nocache(&pdev->dev, r->start,
+ bcm63xx_wdt_device.regs = devm_ioremap(&pdev->dev, r->start,
resource_size(r));
if (!bcm63xx_wdt_device.regs) {
dev_err(&pdev->dev, "failed to remap I/O resources\n");
diff --git a/drivers/watchdog/cadence_wdt.c b/drivers/watchdog/cadence_wdt.c
index 06bd4e1a5923..672b184da875 100644
--- a/drivers/watchdog/cadence_wdt.c
+++ b/drivers/watchdog/cadence_wdt.c
@@ -369,9 +369,8 @@ static int cdns_wdt_probe(struct platform_device *pdev)
return ret;
platform_set_drvdata(pdev, wdt);
- dev_info(dev, "Xilinx Watchdog Timer at %p with timeout %ds%s\n",
- wdt->regs, cdns_wdt_device->timeout,
- nowayout ? ", nowayout" : "");
+ dev_info(dev, "Xilinx Watchdog Timer with timeout %ds%s\n",
+ cdns_wdt_device->timeout, nowayout ? ", nowayout" : "");
return 0;
}
diff --git a/drivers/watchdog/da9062_wdt.c b/drivers/watchdog/da9062_wdt.c
index e149e66a6ea9..47eefe072b40 100644
--- a/drivers/watchdog/da9062_wdt.c
+++ b/drivers/watchdog/da9062_wdt.c
@@ -11,6 +11,7 @@
#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
+#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/jiffies.h>
#include <linux/mfd/da9062/registers.h>
@@ -147,12 +148,13 @@ static int da9062_wdt_restart(struct watchdog_device *wdd, unsigned long action,
void *data)
{
struct da9062_watchdog *wdt = watchdog_get_drvdata(wdd);
+ struct i2c_client *client = to_i2c_client(wdt->hw->dev);
int ret;
- ret = regmap_write(wdt->hw->regmap,
- DA9062AA_CONTROL_F,
- DA9062AA_SHUTDOWN_MASK);
- if (ret)
+ /* Don't use regmap because it is not atomic safe */
+ ret = i2c_smbus_write_byte_data(client, DA9062AA_CONTROL_F,
+ DA9062AA_SHUTDOWN_MASK);
+ if (ret < 0)
dev_alert(wdt->hw->dev, "Failed to shutdown (err = %d)\n",
ret);
@@ -212,6 +214,7 @@ static int da9062_wdt_probe(struct platform_device *pdev)
watchdog_set_restart_priority(&wdt->wdtdev, 128);
watchdog_set_drvdata(&wdt->wdtdev, wdt);
+ dev_set_drvdata(dev, &wdt->wdtdev);
ret = devm_watchdog_register_device(dev, &wdt->wdtdev);
if (ret < 0)
@@ -220,10 +223,34 @@ static int da9062_wdt_probe(struct platform_device *pdev)
return da9062_wdt_ping(&wdt->wdtdev);
}
+static int __maybe_unused da9062_wdt_suspend(struct device *dev)
+{
+ struct watchdog_device *wdd = dev_get_drvdata(dev);
+
+ if (watchdog_active(wdd))
+ return da9062_wdt_stop(wdd);
+
+ return 0;
+}
+
+static int __maybe_unused da9062_wdt_resume(struct device *dev)
+{
+ struct watchdog_device *wdd = dev_get_drvdata(dev);
+
+ if (watchdog_active(wdd))
+ return da9062_wdt_start(wdd);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(da9062_wdt_pm_ops,
+ da9062_wdt_suspend, da9062_wdt_resume);
+
static struct platform_driver da9062_wdt_driver = {
.probe = da9062_wdt_probe,
.driver = {
.name = "da9062-watchdog",
+ .pm = &da9062_wdt_pm_ops,
.of_match_table = da9062_compatible_id_table,
},
};
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
index fef7c61f5555..fba21de2bbad 100644
--- a/drivers/watchdog/dw_wdt.c
+++ b/drivers/watchdog/dw_wdt.c
@@ -114,7 +114,15 @@ static int dw_wdt_set_timeout(struct watchdog_device *wdd, unsigned int top_s)
writel(top_val | top_val << WDOG_TIMEOUT_RANGE_TOPINIT_SHIFT,
dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
- wdd->timeout = dw_wdt_top_in_seconds(dw_wdt, top_val);
+ /*
+ * In case users set bigger timeout value than HW can support,
+ * kernel(watchdog_dev.c) helps to feed watchdog before
+ * wdd->max_hw_heartbeat_ms
+ */
+ if (top_s * 1000 <= wdd->max_hw_heartbeat_ms)
+ wdd->timeout = dw_wdt_top_in_seconds(dw_wdt, top_val);
+ else
+ wdd->timeout = top_s;
return 0;
}
@@ -135,6 +143,7 @@ static int dw_wdt_start(struct watchdog_device *wdd)
struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
dw_wdt_set_timeout(wdd, wdd->timeout);
+ dw_wdt_ping(&dw_wdt->wdd);
dw_wdt_arm_system_reset(dw_wdt);
return 0;
diff --git a/drivers/watchdog/imx7ulp_wdt.c b/drivers/watchdog/imx7ulp_wdt.c
index 0a87c6f4bab2..11b9e7c6b7f5 100644
--- a/drivers/watchdog/imx7ulp_wdt.c
+++ b/drivers/watchdog/imx7ulp_wdt.c
@@ -112,7 +112,7 @@ static int imx7ulp_wdt_restart(struct watchdog_device *wdog,
{
struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog);
- imx7ulp_wdt_enable(wdt->base, true);
+ imx7ulp_wdt_enable(wdog, true);
imx7ulp_wdt_set_timeout(&wdt->wdd, 1);
/* wait for wdog to fire */
diff --git a/drivers/watchdog/intel_scu_watchdog.c b/drivers/watchdog/intel_scu_watchdog.c
index 6ad5bf3451ec..804e35940983 100644
--- a/drivers/watchdog/intel_scu_watchdog.c
+++ b/drivers/watchdog/intel_scu_watchdog.c
@@ -463,7 +463,7 @@ static int __init intel_scu_watchdog_init(void)
return -ENODEV;
}
- tmp_addr = ioremap_nocache(watchdog_device.timer_tbl_ptr->phys_addr,
+ tmp_addr = ioremap(watchdog_device.timer_tbl_ptr->phys_addr,
20);
if (tmp_addr == NULL) {
diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c
index a4b71ebc8cab..f3bf3ea50e39 100644
--- a/drivers/watchdog/it87_wdt.c
+++ b/drivers/watchdog/it87_wdt.c
@@ -67,6 +67,7 @@
#define IT8726_ID 0x8726 /* the data sheet suggest wrongly 0x8716 */
#define IT8728_ID 0x8728
#define IT8783_ID 0x8783
+#define IT8786_ID 0x8786
/* GPIO Configuration Registers LDN=0x07 */
#define WDTCTRL 0x71
@@ -294,6 +295,7 @@ static int __init it87_wdt_init(void)
case IT8721_ID:
case IT8728_ID:
case IT8783_ID:
+ case IT8786_ID:
max_units = 65535;
break;
case IT8705_ID:
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
index 9c3d0033260d..d6a6393f609d 100644
--- a/drivers/watchdog/mtk_wdt.c
+++ b/drivers/watchdog/mtk_wdt.c
@@ -9,6 +9,9 @@
* Based on sunxi_wdt.c
*/
+#include <dt-bindings/reset-controller/mt2712-resets.h>
+#include <dt-bindings/reset-controller/mt8183-resets.h>
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -16,10 +19,11 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
#include <linux/types.h>
#include <linux/watchdog.h>
-#include <linux/delay.h>
#define WDT_MAX_TIMEOUT 31
#define WDT_MIN_TIMEOUT 1
@@ -44,6 +48,9 @@
#define WDT_SWRST 0x14
#define WDT_SWRST_KEY 0x1209
+#define WDT_SWSYSRST 0x18U
+#define WDT_SWSYS_RST_KEY 0x88000000
+
#define DRV_NAME "mtk-wdt"
#define DRV_VERSION "1.0"
@@ -53,8 +60,94 @@ static unsigned int timeout;
struct mtk_wdt_dev {
struct watchdog_device wdt_dev;
void __iomem *wdt_base;
+ spinlock_t lock; /* protects WDT_SWSYSRST reg */
+ struct reset_controller_dev rcdev;
+};
+
+struct mtk_wdt_data {
+ int toprgu_sw_rst_num;
};
+static const struct mtk_wdt_data mt2712_data = {
+ .toprgu_sw_rst_num = MT2712_TOPRGU_SW_RST_NUM,
+};
+
+static const struct mtk_wdt_data mt8183_data = {
+ .toprgu_sw_rst_num = MT8183_TOPRGU_SW_RST_NUM,
+};
+
+static int toprgu_reset_update(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
+{
+ unsigned int tmp;
+ unsigned long flags;
+ struct mtk_wdt_dev *data =
+ container_of(rcdev, struct mtk_wdt_dev, rcdev);
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ tmp = readl(data->wdt_base + WDT_SWSYSRST);
+ if (assert)
+ tmp |= BIT(id);
+ else
+ tmp &= ~BIT(id);
+ tmp |= WDT_SWSYS_RST_KEY;
+ writel(tmp, data->wdt_base + WDT_SWSYSRST);
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ return 0;
+}
+
+static int toprgu_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return toprgu_reset_update(rcdev, id, true);
+}
+
+static int toprgu_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return toprgu_reset_update(rcdev, id, false);
+}
+
+static int toprgu_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ int ret;
+
+ ret = toprgu_reset_assert(rcdev, id);
+ if (ret)
+ return ret;
+
+ return toprgu_reset_deassert(rcdev, id);
+}
+
+static const struct reset_control_ops toprgu_reset_ops = {
+ .assert = toprgu_reset_assert,
+ .deassert = toprgu_reset_deassert,
+ .reset = toprgu_reset,
+};
+
+static int toprgu_register_reset_controller(struct platform_device *pdev,
+ int rst_num)
+{
+ int ret;
+ struct mtk_wdt_dev *mtk_wdt = platform_get_drvdata(pdev);
+
+ spin_lock_init(&mtk_wdt->lock);
+
+ mtk_wdt->rcdev.owner = THIS_MODULE;
+ mtk_wdt->rcdev.nr_resets = rst_num;
+ mtk_wdt->rcdev.ops = &toprgu_reset_ops;
+ mtk_wdt->rcdev.of_node = pdev->dev.of_node;
+ ret = devm_reset_controller_register(&pdev->dev, &mtk_wdt->rcdev);
+ if (ret != 0)
+ dev_err(&pdev->dev,
+ "couldn't register wdt reset controller: %d\n", ret);
+ return ret;
+}
+
static int mtk_wdt_restart(struct watchdog_device *wdt_dev,
unsigned long action, void *data)
{
@@ -155,6 +248,7 @@ static int mtk_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_wdt_dev *mtk_wdt;
+ const struct mtk_wdt_data *wdt_data;
int err;
mtk_wdt = devm_kzalloc(dev, sizeof(*mtk_wdt), GFP_KERNEL);
@@ -190,6 +284,13 @@ static int mtk_wdt_probe(struct platform_device *pdev)
dev_info(dev, "Watchdog enabled (timeout=%d sec, nowayout=%d)\n",
mtk_wdt->wdt_dev.timeout, nowayout);
+ wdt_data = of_device_get_match_data(dev);
+ if (wdt_data) {
+ err = toprgu_register_reset_controller(pdev,
+ wdt_data->toprgu_sw_rst_num);
+ if (err)
+ return err;
+ }
return 0;
}
@@ -218,7 +319,9 @@ static int mtk_wdt_resume(struct device *dev)
#endif
static const struct of_device_id mtk_wdt_dt_ids[] = {
+ { .compatible = "mediatek,mt2712-wdt", .data = &mt2712_data },
{ .compatible = "mediatek,mt6589-wdt" },
+ { .compatible = "mediatek,mt8183-wdt", .data = &mt8183_data },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mtk_wdt_dt_ids);
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index 1cccf8eb1c5d..8e6dfe76f9c9 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -602,7 +602,7 @@ static int orion_wdt_probe(struct platform_device *pdev)
set_bit(WDOG_HW_RUNNING, &dev->wdt.status);
/* Request the IRQ only after the watchdog is disabled */
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
if (irq > 0) {
/*
* Not all supported platforms specify an interrupt for the
@@ -617,7 +617,7 @@ static int orion_wdt_probe(struct platform_device *pdev)
}
/* Optional 2nd interrupt for pretimeout */
- irq = platform_get_irq(pdev, 1);
+ irq = platform_get_irq_optional(pdev, 1);
if (irq > 0) {
orion_wdt_info.options |= WDIOF_PRETIMEOUT;
ret = devm_request_irq(&pdev->dev, irq, orion_wdt_pre_irq,
diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
index a494543d3ae1..eb47fe5ed280 100644
--- a/drivers/watchdog/qcom-wdt.c
+++ b/drivers/watchdog/qcom-wdt.c
@@ -246,7 +246,7 @@ static int qcom_wdt_probe(struct platform_device *pdev)
}
/* check if there is pretimeout support */
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
if (irq > 0) {
ret = devm_request_irq(dev, irq, qcom_wdt_isr,
IRQF_TRIGGER_RISING,
diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c
index 1dfede0abf18..aee3c2efd565 100644
--- a/drivers/watchdog/rc32434_wdt.c
+++ b/drivers/watchdog/rc32434_wdt.c
@@ -26,7 +26,7 @@
#include <linux/platform_device.h> /* For platform_driver framework */
#include <linux/spinlock.h> /* For spin_lock/spin_unlock/... */
#include <linux/uaccess.h> /* For copy_to_user/put_user/... */
-#include <linux/io.h> /* For devm_ioremap_nocache */
+#include <linux/io.h> /* For devm_ioremap */
#include <asm/mach-rc32434/integ.h> /* For the Watchdog registers */
@@ -267,7 +267,7 @@ static int rc32434_wdt_probe(struct platform_device *pdev)
return -ENODEV;
}
- wdt_reg = devm_ioremap_nocache(&pdev->dev, r->start, resource_size(r));
+ wdt_reg = devm_ioremap(&pdev->dev, r->start, resource_size(r));
if (!wdt_reg) {
pr_err("failed to remap I/O resources\n");
return -ENXIO;
diff --git a/drivers/watchdog/rn5t618_wdt.c b/drivers/watchdog/rn5t618_wdt.c
index 234876047431..6e524c8e26a8 100644
--- a/drivers/watchdog/rn5t618_wdt.c
+++ b/drivers/watchdog/rn5t618_wdt.c
@@ -188,6 +188,7 @@ static struct platform_driver rn5t618_wdt_driver = {
module_platform_driver(rn5t618_wdt_driver);
+MODULE_ALIAS("platform:rn5t618-wdt");
MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
MODULE_DESCRIPTION("RN5T618 watchdog driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/sama5d4_wdt.c b/drivers/watchdog/sama5d4_wdt.c
index d193a60430b2..e5d11d6a2600 100644
--- a/drivers/watchdog/sama5d4_wdt.c
+++ b/drivers/watchdog/sama5d4_wdt.c
@@ -2,7 +2,7 @@
/*
* Driver for Atmel SAMA5D4 Watchdog Timer
*
- * Copyright (C) 2015 Atmel Corporation
+ * Copyright (C) 2015-2019 Microchip Technology Inc. and its subsidiaries
*/
#include <linux/delay.h>
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
@@ -29,7 +30,10 @@ struct sama5d4_wdt {
struct watchdog_device wdd;
void __iomem *reg_base;
u32 mr;
+ u32 ir;
unsigned long last_ping;
+ bool need_irq;
+ bool sam9x60_support;
};
static int wdt_timeout;
@@ -78,7 +82,12 @@ static int sama5d4_wdt_start(struct watchdog_device *wdd)
{
struct sama5d4_wdt *wdt = watchdog_get_drvdata(wdd);
- wdt->mr &= ~AT91_WDT_WDDIS;
+ if (wdt->sam9x60_support) {
+ writel_relaxed(wdt->ir, wdt->reg_base + AT91_SAM9X60_IER);
+ wdt->mr &= ~AT91_SAM9X60_WDDIS;
+ } else {
+ wdt->mr &= ~AT91_WDT_WDDIS;
+ }
wdt_write(wdt, AT91_WDT_MR, wdt->mr);
return 0;
@@ -88,7 +97,12 @@ static int sama5d4_wdt_stop(struct watchdog_device *wdd)
{
struct sama5d4_wdt *wdt = watchdog_get_drvdata(wdd);
- wdt->mr |= AT91_WDT_WDDIS;
+ if (wdt->sam9x60_support) {
+ writel_relaxed(wdt->ir, wdt->reg_base + AT91_SAM9X60_IDR);
+ wdt->mr |= AT91_SAM9X60_WDDIS;
+ } else {
+ wdt->mr |= AT91_WDT_WDDIS;
+ }
wdt_write(wdt, AT91_WDT_MR, wdt->mr);
return 0;
@@ -109,6 +123,14 @@ static int sama5d4_wdt_set_timeout(struct watchdog_device *wdd,
struct sama5d4_wdt *wdt = watchdog_get_drvdata(wdd);
u32 value = WDT_SEC2TICKS(timeout);
+ if (wdt->sam9x60_support) {
+ wdt_write(wdt, AT91_SAM9X60_WLR,
+ AT91_SAM9X60_SET_COUNTER(value));
+
+ wdd->timeout = timeout;
+ return 0;
+ }
+
wdt->mr &= ~AT91_WDT_WDV;
wdt->mr |= AT91_WDT_SET_WDV(value);
@@ -143,8 +165,14 @@ static const struct watchdog_ops sama5d4_wdt_ops = {
static irqreturn_t sama5d4_wdt_irq_handler(int irq, void *dev_id)
{
struct sama5d4_wdt *wdt = platform_get_drvdata(dev_id);
+ u32 reg;
- if (wdt_read(wdt, AT91_WDT_SR)) {
+ if (wdt->sam9x60_support)
+ reg = wdt_read(wdt, AT91_SAM9X60_ISR);
+ else
+ reg = wdt_read(wdt, AT91_WDT_SR);
+
+ if (reg) {
pr_crit("Atmel Watchdog Software Reset\n");
emergency_restart();
pr_crit("Reboot didn't succeed\n");
@@ -157,13 +185,14 @@ static int of_sama5d4_wdt_init(struct device_node *np, struct sama5d4_wdt *wdt)
{
const char *tmp;
- wdt->mr = AT91_WDT_WDDIS;
+ if (wdt->sam9x60_support)
+ wdt->mr = AT91_SAM9X60_WDDIS;
+ else
+ wdt->mr = AT91_WDT_WDDIS;
if (!of_property_read_string(np, "atmel,watchdog-type", &tmp) &&
!strcmp(tmp, "software"))
- wdt->mr |= AT91_WDT_WDFIEN;
- else
- wdt->mr |= AT91_WDT_WDRSTEN;
+ wdt->need_irq = true;
if (of_property_read_bool(np, "atmel,idle-halt"))
wdt->mr |= AT91_WDT_WDIDLEHLT;
@@ -176,21 +205,46 @@ static int of_sama5d4_wdt_init(struct device_node *np, struct sama5d4_wdt *wdt)
static int sama5d4_wdt_init(struct sama5d4_wdt *wdt)
{
- u32 reg;
+ u32 reg, val;
+
+ val = WDT_SEC2TICKS(WDT_DEFAULT_TIMEOUT);
/*
* When booting and resuming, the bootloader may have changed the
* watchdog configuration.
* If the watchdog is already running, we can safely update it.
* Else, we have to disable it properly.
*/
- if (wdt_enabled) {
- wdt_write_nosleep(wdt, AT91_WDT_MR, wdt->mr);
- } else {
+ if (!wdt_enabled) {
reg = wdt_read(wdt, AT91_WDT_MR);
- if (!(reg & AT91_WDT_WDDIS))
+ if (wdt->sam9x60_support && (!(reg & AT91_SAM9X60_WDDIS)))
+ wdt_write_nosleep(wdt, AT91_WDT_MR,
+ reg | AT91_SAM9X60_WDDIS);
+ else if (!wdt->sam9x60_support &&
+ (!(reg & AT91_WDT_WDDIS)))
wdt_write_nosleep(wdt, AT91_WDT_MR,
reg | AT91_WDT_WDDIS);
}
+
+ if (wdt->sam9x60_support) {
+ if (wdt->need_irq)
+ wdt->ir = AT91_SAM9X60_PERINT;
+ else
+ wdt->mr |= AT91_SAM9X60_PERIODRST;
+
+ wdt_write(wdt, AT91_SAM9X60_IER, wdt->ir);
+ wdt_write(wdt, AT91_SAM9X60_WLR, AT91_SAM9X60_SET_COUNTER(val));
+ } else {
+ wdt->mr |= AT91_WDT_SET_WDD(WDT_SEC2TICKS(MAX_WDT_TIMEOUT));
+ wdt->mr |= AT91_WDT_SET_WDV(val);
+
+ if (wdt->need_irq)
+ wdt->mr |= AT91_WDT_WDFIEN;
+ else
+ wdt->mr |= AT91_WDT_WDRSTEN;
+ }
+
+ wdt_write_nosleep(wdt, AT91_WDT_MR, wdt->mr);
+
return 0;
}
@@ -201,7 +255,6 @@ static int sama5d4_wdt_probe(struct platform_device *pdev)
struct sama5d4_wdt *wdt;
void __iomem *regs;
u32 irq = 0;
- u32 timeout;
int ret;
wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
@@ -215,6 +268,8 @@ static int sama5d4_wdt_probe(struct platform_device *pdev)
wdd->min_timeout = MIN_WDT_TIMEOUT;
wdd->max_timeout = MAX_WDT_TIMEOUT;
wdt->last_ping = jiffies;
+ wdt->sam9x60_support = of_device_is_compatible(dev->of_node,
+ "microchip,sam9x60-wdt");
watchdog_set_drvdata(wdd, wdt);
@@ -224,15 +279,19 @@ static int sama5d4_wdt_probe(struct platform_device *pdev)
wdt->reg_base = regs;
- irq = irq_of_parse_and_map(dev->of_node, 0);
- if (!irq)
- dev_warn(dev, "failed to get IRQ from DT\n");
-
ret = of_sama5d4_wdt_init(dev->of_node, wdt);
if (ret)
return ret;
- if ((wdt->mr & AT91_WDT_WDFIEN) && irq) {
+ if (wdt->need_irq) {
+ irq = irq_of_parse_and_map(dev->of_node, 0);
+ if (!irq) {
+ dev_warn(dev, "failed to get IRQ from DT\n");
+ wdt->need_irq = false;
+ }
+ }
+
+ if (wdt->need_irq) {
ret = devm_request_irq(dev, irq, sama5d4_wdt_irq_handler,
IRQF_SHARED | IRQF_IRQPOLL |
IRQF_NO_SUSPEND, pdev->name, pdev);
@@ -244,11 +303,6 @@ static int sama5d4_wdt_probe(struct platform_device *pdev)
watchdog_init_timeout(wdd, wdt_timeout, dev);
- timeout = WDT_SEC2TICKS(wdd->timeout);
-
- wdt->mr |= AT91_WDT_SET_WDD(WDT_SEC2TICKS(MAX_WDT_TIMEOUT));
- wdt->mr |= AT91_WDT_SET_WDV(timeout);
-
ret = sama5d4_wdt_init(wdt);
if (ret)
return ret;
@@ -269,7 +323,12 @@ static int sama5d4_wdt_probe(struct platform_device *pdev)
}
static const struct of_device_id sama5d4_wdt_of_match[] = {
- { .compatible = "atmel,sama5d4-wdt", },
+ {
+ .compatible = "atmel,sama5d4-wdt",
+ },
+ {
+ .compatible = "microchip,sam9x60-wdt",
+ },
{ }
};
MODULE_DEVICE_TABLE(of, sama5d4_wdt_of_match);
diff --git a/drivers/watchdog/stm32_iwdg.c b/drivers/watchdog/stm32_iwdg.c
index a3a329011a06..25188d6bbe15 100644
--- a/drivers/watchdog/stm32_iwdg.c
+++ b/drivers/watchdog/stm32_iwdg.c
@@ -262,6 +262,24 @@ static int stm32_iwdg_probe(struct platform_device *pdev)
watchdog_set_nowayout(wdd, WATCHDOG_NOWAYOUT);
watchdog_init_timeout(wdd, 0, dev);
+ /*
+ * In case of CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED is set
+ * (Means U-Boot/bootloaders leaves the watchdog running)
+ * When we get here we should make a decision to prevent
+ * any side effects before user space daemon will take care of it.
+ * The best option, taking into consideration that there is no
+ * way to read values back from hardware, is to enforce watchdog
+ * being run with deterministic values.
+ */
+ if (IS_ENABLED(CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED)) {
+ ret = stm32_iwdg_start(wdd);
+ if (ret)
+ return ret;
+
+ /* Make sure the watchdog is serviced */
+ set_bit(WDOG_HW_RUNNING, &wdd->status);
+ }
+
ret = devm_watchdog_register_device(dev, wdd);
if (ret)
return ret;
diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c
index fdf533fe0bb2..56a4a4030ca9 100644
--- a/drivers/watchdog/w83627hf_wdt.c
+++ b/drivers/watchdog/w83627hf_wdt.c
@@ -420,7 +420,7 @@ static int wdt_find(int addr)
cr_wdt_csr = NCT6102D_WDT_CSR;
break;
case NCT6116_ID:
- ret = nct6102;
+ ret = nct6116;
cr_wdt_timeout = NCT6102D_WDT_TIMEOUT;
cr_wdt_control = NCT6102D_WDT_CONTROL;
cr_wdt_csr = NCT6102D_WDT_CSR;
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index 21e8085b848b..861daf4f37b2 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -147,6 +147,25 @@ int watchdog_init_timeout(struct watchdog_device *wdd,
}
EXPORT_SYMBOL_GPL(watchdog_init_timeout);
+static int watchdog_reboot_notifier(struct notifier_block *nb,
+ unsigned long code, void *data)
+{
+ struct watchdog_device *wdd;
+
+ wdd = container_of(nb, struct watchdog_device, reboot_nb);
+ if (code == SYS_DOWN || code == SYS_HALT) {
+ if (watchdog_active(wdd)) {
+ int ret;
+
+ ret = wdd->ops->stop(wdd);
+ if (ret)
+ return NOTIFY_BAD;
+ }
+ }
+
+ return NOTIFY_DONE;
+}
+
static int watchdog_restart_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
@@ -235,6 +254,19 @@ static int __watchdog_register_device(struct watchdog_device *wdd)
}
}
+ if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) {
+ wdd->reboot_nb.notifier_call = watchdog_reboot_notifier;
+
+ ret = register_reboot_notifier(&wdd->reboot_nb);
+ if (ret) {
+ pr_err("watchdog%d: Cannot register reboot notifier (%d)\n",
+ wdd->id, ret);
+ watchdog_dev_unregister(wdd);
+ ida_simple_remove(&watchdog_ida, id);
+ return ret;
+ }
+ }
+
if (wdd->ops->restart) {
wdd->restart_nb.notifier_call = watchdog_restart_notifier;
@@ -289,6 +321,9 @@ static void __watchdog_unregister_device(struct watchdog_device *wdd)
if (wdd->ops->restart)
unregister_restart_handler(&wdd->restart_nb);
+ if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status))
+ unregister_reboot_notifier(&wdd->reboot_nb);
+
watchdog_dev_unregister(wdd);
ida_simple_remove(&watchdog_ida, wdd->id);
}
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index 4b2a85438478..8b5c742f24e8 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -38,7 +38,6 @@
#include <linux/miscdevice.h> /* For handling misc devices */
#include <linux/module.h> /* For module stuff/... */
#include <linux/mutex.h> /* For mutexes */
-#include <linux/reboot.h> /* For reboot notifier */
#include <linux/slab.h> /* For memory functions */
#include <linux/types.h> /* For standard types (like size_t) */
#include <linux/watchdog.h> /* For watchdog specific items */
@@ -1097,25 +1096,6 @@ static void watchdog_cdev_unregister(struct watchdog_device *wdd)
put_device(&wd_data->dev);
}
-static int watchdog_reboot_notifier(struct notifier_block *nb,
- unsigned long code, void *data)
-{
- struct watchdog_device *wdd;
-
- wdd = container_of(nb, struct watchdog_device, reboot_nb);
- if (code == SYS_DOWN || code == SYS_HALT) {
- if (watchdog_active(wdd)) {
- int ret;
-
- ret = wdd->ops->stop(wdd);
- if (ret)
- return NOTIFY_BAD;
- }
- }
-
- return NOTIFY_DONE;
-}
-
/*
* watchdog_dev_register: register a watchdog device
* @wdd: watchdog device
@@ -1134,22 +1114,8 @@ int watchdog_dev_register(struct watchdog_device *wdd)
return ret;
ret = watchdog_register_pretimeout(wdd);
- if (ret) {
+ if (ret)
watchdog_cdev_unregister(wdd);
- return ret;
- }
-
- if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) {
- wdd->reboot_nb.notifier_call = watchdog_reboot_notifier;
-
- ret = devm_register_reboot_notifier(&wdd->wd_data->dev,
- &wdd->reboot_nb);
- if (ret) {
- pr_err("watchdog%d: Cannot register reboot notifier (%d)\n",
- wdd->id, ret);
- watchdog_dev_unregister(wdd);
- }
- }
return ret;
}
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 4fc83e3f5ad3..0258415ca0b2 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -1006,19 +1006,19 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
}
mutex_unlock(&priv->lock);
- /*
- * gntdev takes the address of the PTE in find_grant_ptes() and passes
- * it to the hypervisor in gntdev_map_grant_pages(). The purpose of
- * the notifier is to prevent the hypervisor pointer to the PTE from
- * going stale.
- *
- * Since this vma's mappings can't be touched without the mmap_sem,
- * and we are holding it now, there is no need for the notifier_range
- * locking pattern.
- */
- mmu_interval_read_begin(&map->notifier);
-
if (use_ptemod) {
+ /*
+ * gntdev takes the address of the PTE in find_grant_ptes() and
+ * passes it to the hypervisor in gntdev_map_grant_pages(). The
+ * purpose of the notifier is to prevent the hypervisor pointer
+ * to the PTE from going stale.
+ *
+ * Since this vma's mappings can't be touched without the
+ * mmap_sem, and we are holding it now, there is no need for
+ * the notifier_range locking pattern.
+ */
+ mmu_interval_read_begin(&map->notifier);
+
map->pages_vm_start = vma->vm_start;
err = apply_to_page_range(vma->vm_mm, vma->vm_start,
vma->vm_end - vma->vm_start,
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 49b381e104ef..7b36b51cdb9f 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -664,7 +664,6 @@ static int grow_gnttab_list(unsigned int more_frames)
unsigned int nr_glist_frames, new_nr_glist_frames;
unsigned int grefs_per_frame;
- BUG_ON(gnttab_interface == NULL);
grefs_per_frame = gnttab_interface->grefs_per_grant_frame;
new_nr_grant_frames = nr_grant_frames + more_frames;
@@ -1160,7 +1159,6 @@ EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
static unsigned int nr_status_frames(unsigned int nr_grant_frames)
{
- BUG_ON(gnttab_interface == NULL);
return gnttab_frames(nr_grant_frames, SPP);
}
@@ -1388,7 +1386,6 @@ static int gnttab_expand(unsigned int req_entries)
int rc;
unsigned int cur, extra;
- BUG_ON(gnttab_interface == NULL);
cur = nr_grant_frames;
extra = ((req_entries + gnttab_interface->grefs_per_grant_frame - 1) /
gnttab_interface->grefs_per_grant_frame);
@@ -1423,7 +1420,6 @@ int gnttab_init(void)
/* Determine the maximum number of frames required for the
* grant reference free list on the current hypervisor.
*/
- BUG_ON(gnttab_interface == NULL);
max_nr_glist_frames = (max_nr_grant_frames *
gnttab_interface->grefs_per_grant_frame / RPP);
diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c
index 8b9919c26095..70650b248de5 100644
--- a/drivers/xen/preempt.c
+++ b/drivers/xen/preempt.c
@@ -8,7 +8,7 @@
#include <linux/sched.h>
#include <xen/xen-ops.h>
-#ifndef CONFIG_PREEMPT
+#ifndef CONFIG_PREEMPTION
/*
* Some hypercalls issued by the toolstack can take many 10s of
@@ -37,4 +37,4 @@ asmlinkage __visible void xen_maybe_preempt_hcall(void)
__this_cpu_write(xen_in_preemptible_hcall, true);
}
}
-#endif /* CONFIG_PREEMPT */
+#endif /* CONFIG_PREEMPTION */
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index 6d12fc368210..a8d24433c8e9 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -94,7 +94,7 @@ static void watch_target(struct xenbus_watch *watch,
"%llu", &static_max) == 1))
static_max >>= PAGE_SHIFT - 10;
else
- static_max = new_target;
+ static_max = balloon_stats.current_pages;
target_diff = (xen_pv_domain() || xen_initial_domain()) ? 0
: static_max - balloon_stats.target_pages;
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
index 60111719b01f..b20e43e148ce 100644
--- a/drivers/xen/xen-pciback/conf_space.c
+++ b/drivers/xen/xen-pciback/conf_space.c
@@ -286,6 +286,43 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
return xen_pcibios_err_to_errno(err);
}
+int xen_pcibk_get_interrupt_type(struct pci_dev *dev)
+{
+ int err;
+ u16 val;
+ int ret = 0;
+
+ err = pci_read_config_word(dev, PCI_COMMAND, &val);
+ if (err)
+ return err;
+ if (!(val & PCI_COMMAND_INTX_DISABLE))
+ ret |= INTERRUPT_TYPE_INTX;
+
+ /*
+ * Do not trust dev->msi(x)_enabled here, as enabling could be done
+ * bypassing the pci_*msi* functions, by the qemu.
+ */
+ if (dev->msi_cap) {
+ err = pci_read_config_word(dev,
+ dev->msi_cap + PCI_MSI_FLAGS,
+ &val);
+ if (err)
+ return err;
+ if (val & PCI_MSI_FLAGS_ENABLE)
+ ret |= INTERRUPT_TYPE_MSI;
+ }
+ if (dev->msix_cap) {
+ err = pci_read_config_word(dev,
+ dev->msix_cap + PCI_MSIX_FLAGS,
+ &val);
+ if (err)
+ return err;
+ if (val & PCI_MSIX_FLAGS_ENABLE)
+ ret |= INTERRUPT_TYPE_MSIX;
+ }
+ return ret;
+}
+
void xen_pcibk_config_free_dyn_fields(struct pci_dev *dev)
{
struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
index 22db630717ea..28c45180a12e 100644
--- a/drivers/xen/xen-pciback/conf_space.h
+++ b/drivers/xen/xen-pciback/conf_space.h
@@ -65,6 +65,11 @@ struct config_field_entry {
void *data;
};
+#define INTERRUPT_TYPE_NONE (1<<0)
+#define INTERRUPT_TYPE_INTX (1<<1)
+#define INTERRUPT_TYPE_MSI (1<<2)
+#define INTERRUPT_TYPE_MSIX (1<<3)
+
extern bool xen_pcibk_permissive;
#define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset)
@@ -126,4 +131,6 @@ int xen_pcibk_config_capability_init(void);
int xen_pcibk_config_header_add_fields(struct pci_dev *dev);
int xen_pcibk_config_capability_add_fields(struct pci_dev *dev);
+int xen_pcibk_get_interrupt_type(struct pci_dev *dev);
+
#endif /* __XEN_PCIBACK_CONF_SPACE_H__ */
diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c
index e5694133ebe5..22f13abbe913 100644
--- a/drivers/xen/xen-pciback/conf_space_capability.c
+++ b/drivers/xen/xen-pciback/conf_space_capability.c
@@ -189,6 +189,85 @@ static const struct config_field caplist_pm[] = {
{}
};
+static struct msi_msix_field_config {
+ u16 enable_bit; /* bit for enabling MSI/MSI-X */
+ unsigned int int_type; /* interrupt type for exclusiveness check */
+} msi_field_config = {
+ .enable_bit = PCI_MSI_FLAGS_ENABLE,
+ .int_type = INTERRUPT_TYPE_MSI,
+}, msix_field_config = {
+ .enable_bit = PCI_MSIX_FLAGS_ENABLE,
+ .int_type = INTERRUPT_TYPE_MSIX,
+};
+
+static void *msi_field_init(struct pci_dev *dev, int offset)
+{
+ return &msi_field_config;
+}
+
+static void *msix_field_init(struct pci_dev *dev, int offset)
+{
+ return &msix_field_config;
+}
+
+static int msi_msix_flags_write(struct pci_dev *dev, int offset, u16 new_value,
+ void *data)
+{
+ int err;
+ u16 old_value;
+ const struct msi_msix_field_config *field_config = data;
+ const struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
+
+ if (xen_pcibk_permissive || dev_data->permissive)
+ goto write;
+
+ err = pci_read_config_word(dev, offset, &old_value);
+ if (err)
+ return err;
+
+ if (new_value == old_value)
+ return 0;
+
+ if (!dev_data->allow_interrupt_control ||
+ (new_value ^ old_value) & ~field_config->enable_bit)
+ return PCIBIOS_SET_FAILED;
+
+ if (new_value & field_config->enable_bit) {
+ /* don't allow enabling together with other interrupt types */
+ int int_type = xen_pcibk_get_interrupt_type(dev);
+
+ if (int_type == INTERRUPT_TYPE_NONE ||
+ int_type == field_config->int_type)
+ goto write;
+ return PCIBIOS_SET_FAILED;
+ }
+
+write:
+ return pci_write_config_word(dev, offset, new_value);
+}
+
+static const struct config_field caplist_msix[] = {
+ {
+ .offset = PCI_MSIX_FLAGS,
+ .size = 2,
+ .init = msix_field_init,
+ .u.w.read = xen_pcibk_read_config_word,
+ .u.w.write = msi_msix_flags_write,
+ },
+ {}
+};
+
+static const struct config_field caplist_msi[] = {
+ {
+ .offset = PCI_MSI_FLAGS,
+ .size = 2,
+ .init = msi_field_init,
+ .u.w.read = xen_pcibk_read_config_word,
+ .u.w.write = msi_msix_flags_write,
+ },
+ {}
+};
+
static struct xen_pcibk_config_capability xen_pcibk_config_capability_pm = {
.capability = PCI_CAP_ID_PM,
.fields = caplist_pm,
@@ -197,11 +276,21 @@ static struct xen_pcibk_config_capability xen_pcibk_config_capability_vpd = {
.capability = PCI_CAP_ID_VPD,
.fields = caplist_vpd,
};
+static struct xen_pcibk_config_capability xen_pcibk_config_capability_msi = {
+ .capability = PCI_CAP_ID_MSI,
+ .fields = caplist_msi,
+};
+static struct xen_pcibk_config_capability xen_pcibk_config_capability_msix = {
+ .capability = PCI_CAP_ID_MSIX,
+ .fields = caplist_msix,
+};
int xen_pcibk_config_capability_init(void)
{
register_capability(&xen_pcibk_config_capability_vpd);
register_capability(&xen_pcibk_config_capability_pm);
+ register_capability(&xen_pcibk_config_capability_msi);
+ register_capability(&xen_pcibk_config_capability_msix);
return 0;
}
diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
index 10ae24b5a76e..fb4fccb4aecc 100644
--- a/drivers/xen/xen-pciback/conf_space_header.c
+++ b/drivers/xen/xen-pciback/conf_space_header.c
@@ -117,6 +117,25 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
pci_clear_mwi(dev);
}
+ if (dev_data && dev_data->allow_interrupt_control) {
+ if ((cmd->val ^ value) & PCI_COMMAND_INTX_DISABLE) {
+ if (value & PCI_COMMAND_INTX_DISABLE) {
+ pci_intx(dev, 0);
+ } else {
+ /* Do not allow enabling INTx together with MSI or MSI-X. */
+ switch (xen_pcibk_get_interrupt_type(dev)) {
+ case INTERRUPT_TYPE_NONE:
+ pci_intx(dev, 1);
+ break;
+ case INTERRUPT_TYPE_INTX:
+ break;
+ default:
+ return PCIBIOS_SET_FAILED;
+ }
+ }
+ }
+ }
+
cmd->val = value;
if (!xen_pcibk_permissive && (!dev_data || !dev_data->permissive))
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index 097410a7cdb7..7af93d65ed51 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -304,6 +304,8 @@ void pcistub_put_pci_dev(struct pci_dev *dev)
xen_pcibk_config_reset_dev(dev);
xen_pcibk_config_free_dyn_fields(dev);
+ dev_data->allow_interrupt_control = 0;
+
xen_unregister_device_domain_owner(dev);
spin_lock_irqsave(&found_psdev->lock, flags);
@@ -1431,6 +1433,65 @@ static ssize_t permissive_show(struct device_driver *drv, char *buf)
}
static DRIVER_ATTR_RW(permissive);
+static ssize_t allow_interrupt_control_store(struct device_driver *drv,
+ const char *buf, size_t count)
+{
+ int domain, bus, slot, func;
+ int err;
+ struct pcistub_device *psdev;
+ struct xen_pcibk_dev_data *dev_data;
+
+ err = str_to_slot(buf, &domain, &bus, &slot, &func);
+ if (err)
+ goto out;
+
+ psdev = pcistub_device_find(domain, bus, slot, func);
+ if (!psdev) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ dev_data = pci_get_drvdata(psdev->dev);
+ /* the driver data for a device should never be null at this point */
+ if (!dev_data) {
+ err = -ENXIO;
+ goto release;
+ }
+ dev_data->allow_interrupt_control = 1;
+release:
+ pcistub_device_put(psdev);
+out:
+ if (!err)
+ err = count;
+ return err;
+}
+
+static ssize_t allow_interrupt_control_show(struct device_driver *drv,
+ char *buf)
+{
+ struct pcistub_device *psdev;
+ struct xen_pcibk_dev_data *dev_data;
+ size_t count = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pcistub_devices_lock, flags);
+ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
+ if (count >= PAGE_SIZE)
+ break;
+ if (!psdev->dev)
+ continue;
+ dev_data = pci_get_drvdata(psdev->dev);
+ if (!dev_data || !dev_data->allow_interrupt_control)
+ continue;
+ count +=
+ scnprintf(buf + count, PAGE_SIZE - count, "%s\n",
+ pci_name(psdev->dev));
+ }
+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
+ return count;
+}
+static DRIVER_ATTR_RW(allow_interrupt_control);
+
static void pcistub_exit(void)
{
driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_new_slot);
@@ -1441,6 +1502,8 @@ static void pcistub_exit(void)
driver_remove_file(&xen_pcibk_pci_driver.driver,
&driver_attr_permissive);
driver_remove_file(&xen_pcibk_pci_driver.driver,
+ &driver_attr_allow_interrupt_control);
+ driver_remove_file(&xen_pcibk_pci_driver.driver,
&driver_attr_irq_handlers);
driver_remove_file(&xen_pcibk_pci_driver.driver,
&driver_attr_irq_handler_state);
@@ -1530,6 +1593,9 @@ static int __init pcistub_init(void)
if (!err)
err = driver_create_file(&xen_pcibk_pci_driver.driver,
&driver_attr_permissive);
+ if (!err)
+ err = driver_create_file(&xen_pcibk_pci_driver.driver,
+ &driver_attr_allow_interrupt_control);
if (!err)
err = driver_create_file(&xen_pcibk_pci_driver.driver,
diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
index 263c059bff90..ce1077e32466 100644
--- a/drivers/xen/xen-pciback/pciback.h
+++ b/drivers/xen/xen-pciback/pciback.h
@@ -45,6 +45,7 @@ struct xen_pcibk_dev_data {
struct list_head config_fields;
struct pci_saved_state *pci_saved_state;
unsigned int permissive:1;
+ unsigned int allow_interrupt_control:1;
unsigned int warned_on_write:1;
unsigned int enable_intx:1;
unsigned int isr_on:1; /* Whether the IRQ handler is installed. */
diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h
index d75a2385b37c..5f5b8a7d5b80 100644
--- a/drivers/xen/xenbus/xenbus.h
+++ b/drivers/xen/xenbus/xenbus.h
@@ -116,8 +116,6 @@ int xenbus_probe_devices(struct xen_bus_type *bus);
void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
-void xenbus_dev_shutdown(struct device *_dev);
-
int xenbus_dev_suspend(struct device *dev);
int xenbus_dev_resume(struct device *dev);
int xenbus_dev_cancel(struct device *dev);
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index c21be6e9d38a..66975da4f3b6 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -239,7 +239,9 @@ int xenbus_dev_probe(struct device *_dev)
goto fail;
}
+ spin_lock(&dev->reclaim_lock);
err = drv->probe(dev, id);
+ spin_unlock(&dev->reclaim_lock);
if (err)
goto fail_put;
@@ -255,7 +257,6 @@ fail_put:
module_put(drv->driver.owner);
fail:
xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename);
- xenbus_switch_state(dev, XenbusStateClosed);
return err;
}
EXPORT_SYMBOL_GPL(xenbus_dev_probe);
@@ -269,41 +270,30 @@ int xenbus_dev_remove(struct device *_dev)
free_otherend_watch(dev);
- if (drv->remove)
+ if (drv->remove) {
+ spin_lock(&dev->reclaim_lock);
drv->remove(dev);
+ spin_unlock(&dev->reclaim_lock);
+ }
module_put(drv->driver.owner);
free_otherend_details(dev);
- xenbus_switch_state(dev, XenbusStateClosed);
+ /*
+ * If the toolstack has forced the device state to closing then set
+ * the state to closed now to allow it to be cleaned up.
+ * Similarly, if the driver does not support re-bind, set the
+ * closed.
+ */
+ if (!drv->allow_rebind ||
+ xenbus_read_driver_state(dev->nodename) == XenbusStateClosing)
+ xenbus_switch_state(dev, XenbusStateClosed);
+
return 0;
}
EXPORT_SYMBOL_GPL(xenbus_dev_remove);
-void xenbus_dev_shutdown(struct device *_dev)
-{
- struct xenbus_device *dev = to_xenbus_device(_dev);
- unsigned long timeout = 5*HZ;
-
- DPRINTK("%s", dev->nodename);
-
- get_device(&dev->dev);
- if (dev->state != XenbusStateConnected) {
- pr_info("%s: %s: %s != Connected, skipping\n",
- __func__, dev->nodename, xenbus_strstate(dev->state));
- goto out;
- }
- xenbus_switch_state(dev, XenbusStateClosing);
- timeout = wait_for_completion_timeout(&dev->down, timeout);
- if (!timeout)
- pr_info("%s: %s timeout closing device\n",
- __func__, dev->nodename);
- out:
- put_device(&dev->dev);
-}
-EXPORT_SYMBOL_GPL(xenbus_dev_shutdown);
-
int xenbus_register_driver_common(struct xenbus_driver *drv,
struct xen_bus_type *bus,
struct module *owner, const char *mod_name)
@@ -483,6 +473,7 @@ int xenbus_probe_node(struct xen_bus_type *bus,
goto fail;
dev_set_name(&xendev->dev, "%s", devname);
+ spin_lock_init(&xendev->reclaim_lock);
/* Register with generic device framework. */
err = device_register(&xendev->dev);
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
index b0bed4faf44c..791f6fe01e91 100644
--- a/drivers/xen/xenbus/xenbus_probe_backend.c
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -198,7 +198,6 @@ static struct xen_bus_type xenbus_backend = {
.uevent = xenbus_uevent_backend,
.probe = xenbus_dev_probe,
.remove = xenbus_dev_remove,
- .shutdown = xenbus_dev_shutdown,
.dev_groups = xenbus_dev_groups,
},
};
@@ -248,6 +247,41 @@ static int backend_probe_and_watch(struct notifier_block *notifier,
return NOTIFY_DONE;
}
+static int backend_reclaim_memory(struct device *dev, void *data)
+{
+ const struct xenbus_driver *drv;
+ struct xenbus_device *xdev;
+
+ if (!dev->driver)
+ return 0;
+ drv = to_xenbus_driver(dev->driver);
+ if (drv && drv->reclaim_memory) {
+ xdev = to_xenbus_device(dev);
+ if (!spin_trylock(&xdev->reclaim_lock))
+ return 0;
+ drv->reclaim_memory(xdev);
+ spin_unlock(&xdev->reclaim_lock);
+ }
+ return 0;
+}
+
+/*
+ * Returns 0 always because we are using shrinker to only detect memory
+ * pressure.
+ */
+static unsigned long backend_shrink_memory_count(struct shrinker *shrinker,
+ struct shrink_control *sc)
+{
+ bus_for_each_dev(&xenbus_backend.bus, NULL, NULL,
+ backend_reclaim_memory);
+ return 0;
+}
+
+static struct shrinker backend_memory_shrinker = {
+ .count_objects = backend_shrink_memory_count,
+ .seeks = DEFAULT_SEEKS,
+};
+
static int __init xenbus_probe_backend_init(void)
{
static struct notifier_block xenstore_notifier = {
@@ -264,6 +298,9 @@ static int __init xenbus_probe_backend_init(void)
register_xenstore_notifier(&xenstore_notifier);
+ if (register_shrinker(&backend_memory_shrinker))
+ pr_warn("shrinker registration failed\n");
+
return 0;
}
subsys_initcall(xenbus_probe_backend_init);
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index a7d90a719cea..8a1650bbe18f 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -126,6 +126,28 @@ static int xenbus_frontend_dev_probe(struct device *dev)
return xenbus_dev_probe(dev);
}
+static void xenbus_frontend_dev_shutdown(struct device *_dev)
+{
+ struct xenbus_device *dev = to_xenbus_device(_dev);
+ unsigned long timeout = 5*HZ;
+
+ DPRINTK("%s", dev->nodename);
+
+ get_device(&dev->dev);
+ if (dev->state != XenbusStateConnected) {
+ pr_info("%s: %s: %s != Connected, skipping\n",
+ __func__, dev->nodename, xenbus_strstate(dev->state));
+ goto out;
+ }
+ xenbus_switch_state(dev, XenbusStateClosing);
+ timeout = wait_for_completion_timeout(&dev->down, timeout);
+ if (!timeout)
+ pr_info("%s: %s timeout closing device\n",
+ __func__, dev->nodename);
+ out:
+ put_device(&dev->dev);
+}
+
static const struct dev_pm_ops xenbus_pm_ops = {
.suspend = xenbus_dev_suspend,
.resume = xenbus_frontend_dev_resume,
@@ -146,7 +168,7 @@ static struct xen_bus_type xenbus_frontend = {
.uevent = xenbus_uevent_frontend,
.probe = xenbus_frontend_dev_probe,
.remove = xenbus_dev_remove,
- .shutdown = xenbus_dev_shutdown,
+ .shutdown = xenbus_frontend_dev_shutdown,
.dev_groups = xenbus_dev_groups,
.pm = &xenbus_pm_ops,
diff --git a/drivers/zorro/Makefile b/drivers/zorro/Makefile
index b360ac4ea846..91ba82e633e7 100644
--- a/drivers/zorro/Makefile
+++ b/drivers/zorro/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_ZORRO) += zorro.o zorro-driver.o zorro-sysfs.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_ZORRO_NAMES) += names.o
-hostprogs-y := gen-devlist
+hostprogs := gen-devlist
# Files generated that shall be removed upon make clean
clean-files := devlist.h
diff --git a/drivers/zorro/proc.c b/drivers/zorro/proc.c
index 2e4ca4dc0960..1c9ae08225d8 100644
--- a/drivers/zorro/proc.c
+++ b/drivers/zorro/proc.c
@@ -56,10 +56,9 @@ proc_bus_zorro_read(struct file *file, char __user *buf, size_t nbytes, loff_t *
return nbytes;
}
-static const struct file_operations proc_bus_zorro_operations = {
- .owner = THIS_MODULE,
- .llseek = proc_bus_zorro_lseek,
- .read = proc_bus_zorro_read,
+static const struct proc_ops bus_zorro_proc_ops = {
+ .proc_lseek = proc_bus_zorro_lseek,
+ .proc_read = proc_bus_zorro_read,
};
static void * zorro_seq_start(struct seq_file *m, loff_t *pos)
@@ -105,7 +104,7 @@ static int __init zorro_proc_attach_device(unsigned int slot)
sprintf(name, "%02x", slot);
entry = proc_create_data(name, 0, proc_bus_zorro_dir,
- &proc_bus_zorro_operations,
+ &bus_zorro_proc_ops,
&zorro_autocon[slot]);
if (!entry)
return -ENOMEM;